Merge remote-tracking branch 'common/android-3.10' into android-x86_64-fugu-3.10
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 6a8b715..9c92bb8 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -1,6 +1,6 @@
 <?xml version="1.0"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
 <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
 <!ENTITY media-indices SYSTEM "./media-indices.tmpl">
 
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index 90956b6..4dfdc8f 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -12,6 +12,7 @@
 * AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
 * AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
 * AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity"
+* AMD Family 16h processors: "Kabini"
 
   Prefix: 'k10temp'
   Addresses scanned: PCI space
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
index 1e6634f..a370b20 100644
--- a/Documentation/i2c/busses/i2c-piix4
+++ b/Documentation/i2c/busses/i2c-piix4
@@ -13,7 +13,7 @@
   * AMD SP5100 (SB700 derivative found on some server mainboards)
     Datasheet: Publicly available at the AMD website
     http://support.amd.com/us/Embedded_TechDocs/44413.pdf
-  * AMD Hudson-2
+  * AMD Hudson-2, CZ
     Datasheet: Not publicly available
   * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
     Datasheet: Publicly available at the SMSC website http://www.smsc.com
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9d83a25..45f824c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -497,6 +497,15 @@
 tcp_timestamps - BOOLEAN
 	Enable timestamps as defined in RFC1323.
 
+tcp_min_tso_segs - INTEGER
+	Minimal number of segments per TSO frame.
+	Since linux-3.12, TCP does an automatic sizing of TSO frames,
+	depending on flow rate, instead of filling 64Kbytes packets.
+	For specific usages, it's possible to force TCP to build big
+	TSO frames. Note that TCP stack might split too big TSO packets
+	if available window is too small.
+	Default: 2
+
 tcp_tso_win_divisor - INTEGER
 	This allows control over what percentage of the congestion window
 	can be consumed by a single TSO frame.
diff --git a/Documentation/parisc/registers b/Documentation/parisc/registers
index dd3cadd..10c7d17 100644
--- a/Documentation/parisc/registers
+++ b/Documentation/parisc/registers
@@ -78,6 +78,14 @@
 TOC enable bit			1
 
 =========================================================================
+
+The PA-RISC architecture defines 7 registers as "shadow registers".
+Those are used in RETURN FROM INTERRUPTION AND RESTORE instruction to reduce
+the state save and restore time by eliminating the need for general register
+(GR) saves and restores in interruption handlers.
+Shadow registers are the GRs 1, 8, 9, 16, 17, 24, and 25.
+
+=========================================================================
 Register usage notes, originally from John Marvin, with some additional
 notes from Randolph Chung.
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 5106309..e600488 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7667,6 +7667,7 @@
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:	stable@vger.kernel.org
 S:	Supported
+F:	Documentation/stable_kernel_rules.txt
 
 STAGING SUBSYSTEM
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/Makefile b/Makefile
index e5e3ba0..9abd59d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 0
+SUBLEVEL = 20
 EXTRAVERSION =
-NAME = Unicycling Gorilla
+NAME = TOSSUG Baby Fish
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -369,7 +369,8 @@
 
 KBUILD_CPPFLAGS := -D__KERNEL__
 
-KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+KBUILD_CFLAGS   := $(ANDROID_TOOLCHAIN_FLAGS) \
+		   -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
@@ -626,7 +627,7 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 ifdef CONFIG_HAVE_FENTRY
-CC_USING_FENTRY	:= $(call cc-option, -mfentry -DCC_USING_FENTRY)
+CC_USING_FENTRY	:= $(call cc-option, -mfentry -DCC_USING_FENTRY -fno-pic)
 endif
 KBUILD_CFLAGS	+= -pg $(CC_USING_FENTRY)
 KBUILD_AFLAGS	+= $(CC_USING_FENTRY)
@@ -1267,7 +1268,7 @@
 clean: $(clean-dirs)
 	$(call cmd,rmdirs)
 	$(call cmd,rmfiles)
-	@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+	@find -L $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
 		\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
 		-o -name '*.ko.*' \
 		-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
diff --git a/arch/Kconfig b/arch/Kconfig
index b7dd46e..6dbcfc4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -473,6 +473,12 @@
 	help
 	  Architecture has the first two arguments of clone(2) swapped.
 
+config CLONE_BACKWARDS3
+	bool
+	help
+	  Architecture has tls passed as the 3rd argument of clone(2),
+	  not the 5th one.
+
 config ODD_RT_SIGACTION
 	bool
 	help
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 442ce5d..43de302 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -53,11 +53,10 @@
 {
 	unsigned long loops;
 
-	/* (long long) cast ensures 64 bit MPY - real or emulated
+	/* (u64) cast ensures 64 bit MPY - real or emulated
 	 * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
 	 */
-	loops = ((long long)(usecs * 4295 * HZ) *
-		 (long long)(loops_per_jiffy)) >> 32;
+	loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
 
 	__delay(loops);
 }
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 6179de7..2046a89 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -52,12 +52,14 @@
 
 	/*to distinguish bet excp, syscall, irq */
 	union {
+		struct {
 #ifdef CONFIG_CPU_BIG_ENDIAN
 		/* so that assembly code is same for LE/BE */
 		unsigned long orig_r8:16, event:16;
 #else
 		unsigned long event:16, orig_r8:16;
 #endif
+		};
 		long orig_r8_word;
 	};
 };
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
index 6fc1159..764f1e3 100644
--- a/arch/arc/include/asm/sections.h
+++ b/arch/arc/include/asm/sections.h
@@ -11,7 +11,6 @@
 
 #include <asm-generic/sections.h>
 
-extern char _int_vec_base_lds[];
 extern char __arc_dccm_base[];
 extern char __dtb_start[];
 
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197..b6a8c2d 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+	unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+	__asm__ __volatile__(
+	"	ex  %0, [%1]		\n"
+	: "+r" (tmp)
+	: "r"(&(lock->slock))
+	: "memory");
+
 	smp_mb();
 }
 
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 33ab304..29de098 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -18,7 +18,7 @@
 syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 {
 	if (user_mode(regs) && in_syscall(regs))
-		return regs->orig_r8;
+		return regs->r8;
 	else
 		return -1;
 }
@@ -26,8 +26,7 @@
 static inline void
 syscall_rollback(struct task_struct *task, struct pt_regs *regs)
 {
-	/* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
-	regs->r8 = regs->orig_r8;
+	regs->r0 = regs->orig_r0;
 }
 
 static inline long
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 3242082..30c9baf 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
  * Because it essentially checks if buffer end is within limit and @len is
  * non-ngeative, which implies that buffer start will be within limit too.
  *
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majority of cases, @len is generally
  * compile time constant, causing first sub-expression to be compile time
  * subsumed.
  *
@@ -53,7 +53,7 @@
  *
  */
 #define __user_ok(addr, sz)	(((sz) <= TASK_SIZE) && \
-				 (((addr)+(sz)) <= get_fs()))
+				 ((addr) <= (get_fs() - (sz))))
 #define __access_ok(addr, sz)	(unlikely(__kernel_ok) || \
 				 likely(__user_ok((addr), (sz))))
 
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 0c6d664..6dbe359 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -498,7 +498,7 @@
 trap_with_param:
 
 	; stop_pc info by gdb needs this info
-	stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+	st  orig_r8_IS_BRKPT, [sp, PT_orig_r8]
 
 	mov r0, r12
 	lr  r1, [efa]
@@ -723,7 +723,7 @@
 	; things to what they were, before returning from L2 context
 	;----------------------------------------------------------------
 
-	ldw  r9, [sp, PT_orig_r8]      ; get orig_r8 to make sure it is
+	ld   r9, [sp, PT_orig_r8]      ; get orig_r8 to make sure it is
 	brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
 
 	ld r9, [sp, PT_status32]       ; get statu32_l2 (saved in pt_regs)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 006dec3..0f944f0 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -27,11 +27,16 @@
 	; Don't clobber r0-r4 yet. It might have bootloader provided info
 	;-------------------------------------------------------------------
 
+	sr	@_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
 #ifdef CONFIG_SMP
 	; Only Boot (Master) proceeds. Others wait in platform dependent way
 	;	IDENTITY Reg [ 3  2  1  0 ]
 	;	(cpu-id)             ^^^	=> Zero for UP ARC700
 	;					=> #Core-ID if SMP (Master 0)
+	; Note that non-boot CPUs might not land here if halt-on-reset and
+	; instead breath life from @first_lines_of_secondary, but we still
+	; need to make sure only boot cpu takes this path.
 	GET_CPU_ID  r5
 	cmp	r5, 0
 	jnz	arc_platform_smp_wait_to_boot
@@ -96,6 +101,8 @@
 
 first_lines_of_secondary:
 
+	sr	@_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
 	; setup per-cpu idle task as "current" on this CPU
 	ld	r0, [@secondary_idle_tsk]
 	SET_CURR_TASK_ON_CPU  r0, r1
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 8115fa5..a199471 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -24,7 +24,6 @@
  * -Needed for each CPU (hence not foldable into init_IRQ)
  *
  * what it does ?
- * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
  * -Disable all IRQs (on CPU side)
  * -Optionally, setup the High priority Interrupts as Level 2 IRQs
  */
@@ -32,8 +31,6 @@
 {
 	int level_mask = 0;
 
-	write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
-
 	/* Disable all IRQs: enable them as devices request */
 	write_aux_reg(AUX_IENABLE, 0);
 
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index c6a81c5..0851604 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -92,7 +92,7 @@
 	REG_IN_CHUNK(scratch, callee, ptregs);	/* pt_regs[bta..orig_r8] */
 	REG_IN_CHUNK(callee, efa, cregs);	/* callee_regs[r25..r13] */
 	REG_IGNORE_ONE(efa);			/* efa update invalid */
-	REG_IN_ONE(stop_pc, &ptregs->ret);	/* stop_pc: PC update */
+	REG_IGNORE_ONE(stop_pc);			/* PC updated via @ret */
 
 	return ret;
 }
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2b3731..2d7786b 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,10 +47,7 @@
 	READ_BCR(AUX_IDENTITY, cpu->core);
 
 	cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
-
 	cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
-	if (cpu->vec_base == 0)
-		cpu->vec_base = (unsigned int)_int_vec_base_lds;
 
 	READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
 	cpu->uncached_base = uncached_space.start << 24;
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f..7e95e1a 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@
 {
 	struct rt_sigframe __user *sf;
 	unsigned int magic;
-	int err;
 	struct pt_regs *regs = current_pt_regs();
 
 	/* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@
 	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
 		goto badframe;
 
-	err = restore_usr_regs(regs, sf);
-	err |= __get_user(magic, &sf->sigret_magic);
-	if (err)
+	if (__get_user(magic, &sf->sigret_magic))
 		goto badframe;
 
 	if (unlikely(is_do_ss_needed(magic)))
 		if (restore_altstack(&sf->uc.uc_stack))
 			goto badframe;
 
+	if (restore_usr_regs(regs, sf))
+		goto badframe;
+
 	/* Don't restart from sigreturn */
 	syscall_wont_restart(regs);
 
@@ -191,6 +191,15 @@
 		return 1;
 
 	/*
+	 * w/o SA_SIGINFO, struct ucontext is partially populated (only
+	 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+	 * during signal handler execution. This works for SA_SIGINFO as well
+	 * although the semantics are now overloaded (the same reg state can be
+	 * inspected by userland: but are they allowed to fiddle with it ?
+	 */
+	err |= stash_usr_regs(sf, regs, set);
+
+	/*
 	 * SA_SIGINFO requires 3 args to signal handler:
 	 *  #1: sig-no (common to any handler)
 	 *  #2: struct siginfo
@@ -213,14 +222,6 @@
 		magic = MAGIC_SIGALTSTK;
 	}
 
-	/*
-	 * w/o SA_SIGINFO, struct ucontext is partially populated (only
-	 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
-	 * during signal handler execution. This works for SA_SIGINFO as well
-	 * although the semantics are now overloaded (the same reg state can be
-	 * inspected by userland: but are they allowed to fiddle with it ?
-	 */
-	err |= stash_usr_regs(sf, regs, set);
 	err |= __put_user(magic, &sf->sigret_magic);
 	if (err)
 		return err;
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 4cd8163..116d3e0 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -233,6 +233,12 @@
 		regs->status32 &= ~STATUS_DE_MASK;
 	} else {
 		regs->ret += state.instr_len;
+
+		/* handle zero-overhead-loop */
+		if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+			regs->ret = regs->lp_start;
+			regs->lp_count--;
+		}
 	}
 
 	return 0;
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c1047..9c548c7 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@
 	ld.a	r2,[r0,4]
 	sub	r12,r6,r7
 	bic	r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
 	and	r7,r12,r4
 	breq	r7,0,.Loop ; For speed, we want this branch to be unaligned.
 	b	.Lfound_char ; Likewise this one.
+#else
+	and	r12,r12,r4
+	breq	r12,0,.Loop ; For speed, we want this branch to be unaligned.
+	lsr_s	r12,r12,7
+	bic 	r2,r7,r6
+	b.d	.Lfound_char_b
+	and_s	r2,r2,r12
+#endif
 ; /* We require this code address to be unaligned for speed...  */
 .Laligned:
 	ld_s	r2,[r0]
@@ -95,6 +104,7 @@
 	lsr	r7,r7,7
 
 	bic	r2,r7,r6
+.Lfound_char_b:
 	norm	r2,r2
 	sub_s	r0,r0,4
 	asr_s	r2,r2,3
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 689ffd8..331a084 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -16,7 +16,7 @@
 #include <linux/kdebug.h>
 #include <asm/pgalloc.h>
 
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+static int handle_vmalloc_fault(unsigned long address)
 {
 	/*
 	 * Synchronize this task's top level page-table
@@ -26,7 +26,7 @@
 	pud_t *pud, *pud_k;
 	pmd_t *pmd, *pmd_k;
 
-	pgd = pgd_offset_fast(mm, address);
+	pgd = pgd_offset_fast(current->active_mm, address);
 	pgd_k = pgd_offset_k(address);
 
 	if (!pgd_present(*pgd_k))
@@ -72,7 +72,7 @@
 	 * nothing more.
 	 */
 	if (address >= VMALLOC_START && address <= VMALLOC_END) {
-		ret = handle_vmalloc_fault(mm, address);
+		ret = handle_vmalloc_fault(address);
 		if (unlikely(ret))
 			goto bad_area_nosemaphore;
 		else
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 988334f..df8471b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -19,7 +19,6 @@
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
 	select HARDIRQS_SW_RESEND
-	select HAVE_AOUT
 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index aabc02a..d1153c8 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -53,6 +53,17 @@
 	return fdt_getprop(fdt, offset, property, len);
 }
 
+static uint32_t get_cell_size(const void *fdt)
+{
+	int len;
+	uint32_t cell_size = 1;
+	const uint32_t *size_len =  getprop(fdt, "/", "#size-cells", &len);
+
+	if (size_len)
+		cell_size = fdt32_to_cpu(*size_len);
+	return cell_size;
+}
+
 static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
 {
 	char cmdline[COMMAND_LINE_SIZE];
@@ -95,9 +106,11 @@
 int atags_to_fdt(void *atag_list, void *fdt, int total_space)
 {
 	struct tag *atag = atag_list;
-	uint32_t mem_reg_property[2 * NR_BANKS];
+	/* In the case of 64 bits memory size, need to reserve 2 cells for
+	 * address and size for each bank */
+	uint32_t mem_reg_property[2 * 2 * NR_BANKS];
 	int memcount = 0;
-	int ret;
+	int ret, memsize;
 
 	/* make sure we've got an aligned pointer */
 	if ((u32)atag_list & 0x3)
@@ -137,8 +150,25 @@
 				continue;
 			if (!atag->u.mem.size)
 				continue;
-			mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
-			mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
+			memsize = get_cell_size(fdt);
+
+			if (memsize == 2) {
+				/* if memsize is 2, that means that
+				 * each data needs 2 cells of 32 bits,
+				 * so the data are 64 bits */
+				uint64_t *mem_reg_prop64 =
+					(uint64_t *)mem_reg_property;
+				mem_reg_prop64[memcount++] =
+					cpu_to_fdt64(atag->u.mem.start);
+				mem_reg_prop64[memcount++] =
+					cpu_to_fdt64(atag->u.mem.size);
+			} else {
+				mem_reg_property[memcount++] =
+					cpu_to_fdt32(atag->u.mem.start);
+				mem_reg_property[memcount++] =
+					cpu_to_fdt32(atag->u.mem.size);
+			}
+
 		} else if (atag->hdr.tag == ATAG_INITRD2) {
 			uint32_t initrd_start, initrd_size;
 			initrd_start = atag->u.initrd.start;
@@ -150,8 +180,10 @@
 		}
 	}
 
-	if (memcount)
-		setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
+	if (memcount) {
+		setprop(fdt, "/memory", "reg", mem_reg_property,
+			4 * memcount * memsize);
+	}
 
 	return fdt_pack(fdt);
 }
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 5d3ed5a..0af879a 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -35,8 +35,12 @@
 		ssc2 = &ssc2;
 	};
 	cpus {
-		cpu@0 {
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
 			compatible = "arm,arm920t";
+			device_type = "cpu";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 84c4bef..0dbdb84 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -32,8 +32,12 @@
 		ssc0 = &ssc0;
 	};
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
@@ -340,6 +344,14 @@
 					};
 				};
 
+				i2c_gpio0 {
+					pinctrl_i2c_gpio0: i2c_gpio0-0 {
+						atmel,pins =
+							<0 23 0x0 0x3   /* PA23 gpio I2C_SDA pin */
+							 0 24 0x0 0x3>; /* PA24 gpio I2C_SCL pin */
+					};
+				};
+
 				pioA: gpio@fffff400 {
 					compatible = "atmel,at91rm9200-gpio";
 					reg = <0xfffff400 0x200>;
@@ -592,6 +604,8 @@
 		i2c-gpio,delay-us = <2>;	/* ~100 kHz */
 		#address-cells = <1>;
 		#size-cells = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_i2c_gpio0>;
 		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 94b58ab..fcd38f8 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -29,8 +29,12 @@
 		ssc1 = &ssc1;
 	};
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index bf18a73..479a062 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -35,8 +35,12 @@
 		ssc1 = &ssc1;
 	};
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 8d25f88..a92ec78 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -31,8 +31,12 @@
 		ssc0 = &ssc0;
 	};
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d30e48b..28ba798 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
 	compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
 
 	chosen {
-		bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+		bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
 	};
 
 	memory {
-		reg = <0x20000000 0x10000000>;
+		reg = <0x20000000 0x8000000>;
 	};
 
 	clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 1145ac3..2b2b692 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -33,8 +33,12 @@
 		ssc0 = &ssc0;
 	};
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
@@ -643,7 +647,7 @@
 			};
 
 			rtc@fffffeb0 {
-				compatible = "atmel,at91rm9200-rtc";
+				compatible = "atmel,at91sam9x5-rtc";
 				reg = <0xfffffeb0 0x40>;
 				interrupts = <1 4 7>;
 				status = "disabled";
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 73fd7d0..587ceef 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -23,8 +23,12 @@
 	};
 
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 600f7cb..4c10a19 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -32,8 +32,12 @@
 	};
 
 	cpus {
-		cpu@0 {
-			compatible = "arm,arm926ejs";
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 5bcdf3a..62dc781 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -18,12 +18,14 @@
 
 		cpu@0 {
 			compatible = "arm,cortex-a9";
+			device_type = "cpu";
 			reg = <0>;
 			next-level-cache = <&L2>;
 		};
 
 		cpu@1 {
 			compatible = "arm,cortex-a9";
+			device_type = "cpu";
 			reg = <1>;
 			next-level-cache = <&L2>;
 		};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 21e6758..dc54a72 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -18,6 +18,7 @@
 
 		cpu@0 {
 			compatible = "arm,cortex-a9";
+			device_type = "cpu";
 			reg = <0>;
 			next-level-cache = <&L2>;
 			operating-points = <
@@ -39,18 +40,21 @@
 
 		cpu@1 {
 			compatible = "arm,cortex-a9";
+			device_type = "cpu";
 			reg = <1>;
 			next-level-cache = <&L2>;
 		};
 
 		cpu@2 {
 			compatible = "arm,cortex-a9";
+			device_type = "cpu";
 			reg = <2>;
 			next-level-cache = <&L2>;
 		};
 
 		cpu@3 {
 			compatible = "arm,cortex-a9";
+			device_type = "cpu";
 			reg = <3>;
 			next-level-cache = <&L2>;
 		};
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index ff1aea0..72693a6 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -9,11 +9,6 @@
 	model = "ARM Integrator/CP";
 	compatible = "arm,integrator-cp";
 
-	aliases {
-		arm,timer-primary = &timer2;
-		arm,timer-secondary = &timer1;
-	};
-
 	chosen {
 		bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
 	};
@@ -24,14 +19,18 @@
 	};
 
 	timer0: timer@13000000 {
+		/* TIMER0 runs @ 25MHz */
 		compatible = "arm,integrator-cp-timer";
+		status = "disabled";
 	};
 
 	timer1: timer@13000100 {
+		/* TIMER1 runs @ 1MHz */
 		compatible = "arm,integrator-cp-timer";
 	};
 
 	timer2: timer@13000200 {
+		/* TIMER2 runs @ 1MHz */
 		compatible = "arm,integrator-cp-timer";
 	};
 
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 5000e0d..642775d 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -35,8 +35,12 @@
 		ssc1 = &ssc1;
 	};
 	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
 		cpu@0 {
+			device_type = "cpu";
 			compatible = "arm,cortex-a5";
+			reg = <0x0>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e7ef619..06ef8b6 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -16,8 +16,12 @@
 	interrupt-parent = <&intc>;
 
 	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
 		cpu@0 {
+			device_type = "cpu";
 			compatible = "arm,cortex-a8";
+			reg = <0x0>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 31fa38f..d285254 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -17,8 +17,12 @@
 	interrupt-parent = <&intc>;
 
 	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
 		cpu@0 {
+			device_type = "cpu";
 			compatible = "arm,cortex-a8";
+			reg = <0x0>;
 		};
 	};
 
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb..0000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
-	struct task_struct *tsk = current;
-
-	dump->magic = CMAGIC;
-	dump->start_code = tsk->mm->start_code;
-	dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
-	dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
-	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	dump->u_ssize = 0;
-
-	memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
-	if (dump->start_stack < 0x04000000)
-		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
-	dump->regs = *regs;
-	dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c..863c892 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-	asm goto("1:\n\t"
+	asm_volatile_goto("1:\n\t"
 		 JUMP_LABEL_NOP "\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 18d5032..4bb08e3 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -37,16 +37,18 @@
 #define c5_AIFSR	15	/* Auxilary Instrunction Fault Status R */
 #define c6_DFAR		16	/* Data Fault Address Register */
 #define c6_IFAR		17	/* Instruction Fault Address Register */
-#define c9_L2CTLR	18	/* Cortex A15 L2 Control Register */
-#define c10_PRRR	19	/* Primary Region Remap Register */
-#define c10_NMRR	20	/* Normal Memory Remap Register */
-#define c12_VBAR	21	/* Vector Base Address Register */
-#define c13_CID		22	/* Context ID Register */
-#define c13_TID_URW	23	/* Thread ID, User R/W */
-#define c13_TID_URO	24	/* Thread ID, User R/O */
-#define c13_TID_PRIV	25	/* Thread ID, Privileged */
-#define c14_CNTKCTL	26	/* Timer Control Register (PL1) */
-#define NR_CP15_REGS	27	/* Number of regs (incl. invalid) */
+#define c7_PAR		18	/* Physical Address Register */
+#define c7_PAR_high	19	/* PAR top 32 bits */
+#define c9_L2CTLR	20	/* Cortex A15 L2 Control Register */
+#define c10_PRRR	21	/* Primary Region Remap Register */
+#define c10_NMRR	22	/* Normal Memory Remap Register */
+#define c12_VBAR	23	/* Vector Base Address Register */
+#define c13_CID		24	/* Context ID Register */
+#define c13_TID_URW	25	/* Thread ID, User R/W */
+#define c13_TID_URO	26	/* Thread ID, User R/O */
+#define c13_TID_PRIV	27	/* Thread ID, Privileged */
+#define c14_CNTKCTL	28	/* Timer Control Register (PL1) */
+#define NR_CP15_REGS	29	/* Number of regs (incl. invalid) */
 
 #define ARM_EXCEPTION_RESET	  0
 #define ARM_EXCEPTION_UNDEFINED   1
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 7345e37..6f18da0 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,8 @@
 typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
 	atomic64_t	id;
+#else
+	int		switch_pending;
 #endif
 	unsigned int	vmalloc_seq;
 	unsigned long	sigpage;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a7b85e0..e0b10f1 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -27,7 +27,15 @@
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
 #define init_new_context(tsk,mm)	({ atomic64_set(&mm->context.id, 0); 0; })
 
-DECLARE_PER_CPU(atomic64_t, active_asids);
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+			     cpumask_t *mask);
+#else  /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+					   cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
 
 #else	/* !CONFIG_CPU_HAS_ASID */
 
@@ -47,7 +55,7 @@
 		 * on non-ASID CPUs, the old mm will remain valid until the
 		 * finish_arch_post_lock_switch() call.
 		 */
-		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+		mm->context.switch_pending = 1;
 	else
 		cpu_switch_mm(mm->pgd, mm);
 }
@@ -56,9 +64,21 @@
 	finish_arch_post_lock_switch
 static inline void finish_arch_post_lock_switch(void)
 {
-	if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
-		struct mm_struct *mm = current->mm;
-		cpu_switch_mm(mm->pgd, mm);
+	struct mm_struct *mm = current->mm;
+
+	if (mm && mm->context.switch_pending) {
+		/*
+		 * Preemption must be disabled during cpu_switch_mm() as we
+		 * have some stateful cache flush implementations. Check
+		 * switch_pending again in case we were preempted and the
+		 * switch to this mm was already done.
+		 */
+		preempt_disable();
+		if (mm->context.switch_pending) {
+			mm->context.switch_pending = 0;
+			cpu_switch_mm(mm->pgd, mm);
+		}
+		preempt_enable_no_resched();
 	}
 }
 
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d50..413f387 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@
 
 #define start_thread(regs,pc,sp)					\
 ({									\
-	unsigned long *stack = (unsigned long *)sp;			\
 	memset(regs->uregs, 0, sizeof(regs->uregs));			\
 	if (current->personality & ADDR_LIMIT_32BIT)			\
 		regs->ARM_cpsr = USR_MODE;				\
@@ -65,9 +64,6 @@
 	regs->ARM_cpsr |= PSR_ENDSTATE;					\
 	regs->ARM_pc = pc & ~1;		/* pc */			\
 	regs->ARM_sp = sp;		/* sp */			\
-	regs->ARM_r2 = stack[2];	/* r2 (envp) */			\
-	regs->ARM_r1 = stack[1];	/* r1 (argv) */			\
-	regs->ARM_r0 = stack[0];	/* r0 (argc) */			\
 	nommu_start_thread(regs);					\
 })
 
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 6db3caa..ed805f1 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -57,6 +57,9 @@
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 {
+	if (n == 0)
+		return;
+
 	if (i + n > SYSCALL_MAX_ARGS) {
 		unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
 		unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@
 					 unsigned int i, unsigned int n,
 					 const unsigned long *args)
 {
+	if (n == 0)
+		return;
+
 	if (i + n > SYSCALL_MAX_ARGS) {
 		pr_warning("%s called with max args %d, handling only %d\n",
 			   __func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 1995d1a..f00b569 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -156,7 +156,6 @@
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	20
-#define TIF_SWITCH_MM		22	/* deferred switch_mm */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index bdf2b84..aa9b4ac 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@
 	struct mm_struct	*mm;
 	unsigned int		fullmm;
 	struct vm_area_struct	*vma;
+	unsigned long		start, end;
 	unsigned long		range_start;
 	unsigned long		range_end;
 	unsigned int		nr;
@@ -107,10 +108,12 @@
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
 	tlb->mm = mm;
-	tlb->fullmm = fullmm;
+	tlb->fullmm = !(start | (end+1));
+	tlb->start = start;
+	tlb->end = end;
 	tlb->vma = NULL;
 	tlb->max = ARRAY_SIZE(tlb->local);
 	tlb->pages = tlb->local;
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d..18d76fd 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-header-y += a.out.h
 header-y += byteorder.h
 header-y += fcntl.h
 header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b..0000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ARM_A_OUT_H__
-#define __ARM_A_OUT_H__
-
-#include <linux/personality.h>
-#include <linux/types.h>
-
-struct exec
-{
-  __u32 a_info;		/* Use macros N_MAGIC, etc for access */
-  __u32 a_text;		/* length of text, in bytes */
-  __u32 a_data;		/* length of data, in bytes */
-  __u32 a_bss;		/* length of uninitialized data area for file, in bytes */
-  __u32 a_syms;		/* length of symbol table data in file, in bytes */
-  __u32 a_entry;	/* start address */
-  __u32 a_trsize;	/* length of relocation info for text, in bytes */
-  __u32 a_drsize;	/* length of relocation info for data, in bytes */
-};
-
-/*
- * This is always the same
- */
-#define N_TXTADDR(a)	(0x00008000)
-
-#define N_TRSIZE(a)	((a).a_trsize)
-#define N_DRSIZE(a)	((a).a_drsize)
-#define N_SYMSIZE(a)	((a).a_syms)
-
-#define M_ARM 103
-
-#ifndef LIBRARY_START_TEXT
-#define LIBRARY_START_TEXT	(0x00c00000)
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8c3094d..e19edc6 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@
 static int
 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-	int mapping = (*event_map)[config];
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -ENOENT;
+
+	mapping = (*event_map)[config];
 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -253,6 +258,9 @@
 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 	struct pmu *leader_pmu = event->group_leader->pmu;
 
+	if (is_software_event(event))
+		return 1;
+
 	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
 		return 1;
 
@@ -569,6 +577,7 @@
 		return;
 	}
 
+	perf_callchain_store(entry, regs->ARM_pc);
 	tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
 	while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b8c8fbd..654eace 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -402,8 +402,9 @@
 			struct mm_struct *mm = current->mm;
 
 			/*
-			 * 32-bit code can use the new high-page
-			 * signal return code support.
+			 * 32-bit code can use the signal return page
+			 * except when the MPU has protected the vectors
+			 * page from PL0
 			 */
 			retcode = mm->context.sigpage + signal_return_offset +
 				  (idx << 2) + thumb;
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07..a98b62d 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@
 
 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
 {
-	int cpu, this_cpu;
+	int this_cpu;
 	cpumask_t mask = { CPU_BITS_NONE };
 
 	if (!erratum_a15_798181())
@@ -111,21 +111,7 @@
 
 	dummy_flush_tlb_a15_erratum();
 	this_cpu = get_cpu();
-	for_each_online_cpu(cpu) {
-		if (cpu == this_cpu)
-			continue;
-		/*
-		 * We only need to send an IPI if the other CPUs are running
-		 * the same ASID as the one being invalidated. There is no
-		 * need for locking around the active_asids check since the
-		 * switch_mm() function has at least one dmb() (as required by
-		 * this workaround) in case a context switch happens on
-		 * another CPU after the condition below.
-		 */
-		if (atomic64_read(&mm->context.id) ==
-		    atomic64_read(&per_cpu(active_asids, cpu)))
-			cpumask_set_cpu(cpu, &mask);
-	}
+	a15_erratum_get_cpumask(this_cpu, mm, &mask);
 	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
 	put_cpu();
 }
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 90525d9..f6fd1d4 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -120,7 +120,7 @@
 	 * changing cpu.
 	 */
 	if (flags == POST_RATE_CHANGE)
-		smp_call_function(twd_update_frequency,
+		on_each_cpu(twd_update_frequency,
 				  (void *)&cnd->new_rate, 1);
 
 	return NOTIFY_OK;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 8eea97b..db9cf69 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@
 #define access_pmintenclr pm_fake
 
 /* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg cp15_regs[] = {
 	/* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@
 			NULL, reset_unknown, c0_CSSELR },
 
 	/* TTBR0/TTBR1: swapped by interrupt.S. */
-	{ CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
-	{ CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+	{ CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+	{ CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
 
 	/* TTBCR: swapped by interrupt.S. */
 	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -180,6 +184,10 @@
 			NULL, reset_unknown, c6_DFAR },
 	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 			NULL, reset_unknown, c6_IFAR },
+
+	/* PAR swapped by interrupt.S */
+	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+
 	/*
 	 * DC{C,I,CI}SW operations:
 	 */
@@ -395,12 +403,13 @@
 			      | KVM_REG_ARM_OPC1_MASK))
 			return false;
 		params->is_64bit = true;
-		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+		/* CRm to CRn: see cp15_to_index for details */
+		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 			       >> KVM_REG_ARM_CRM_SHIFT);
 		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 			       >> KVM_REG_ARM_OPC1_SHIFT);
 		params->Op2 = 0;
-		params->CRn = 0;
+		params->CRm = 0;
 		return true;
 	default:
 		return false;
@@ -894,7 +903,14 @@
 	if (reg->is_64) {
 		val |= KVM_REG_SIZE_U64;
 		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
-		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+		/*
+		 * CRn always denotes the primary coproc. reg. nr. for the
+		 * in-kernel representation, but the user space API uses the
+		 * CRm for the encoding, because it is modelled after the
+		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
+		 * B3-1445
+		 */
+		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
 	} else {
 		val |= KVM_REG_SIZE_U32;
 		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3..0461d5c 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@
 		return -1;
 	if (i1->CRn != i2->CRn)
 		return i1->CRn - i2->CRn;
+	if (i1->is_64 != i2->is_64)
+		return i2->is_64 - i1->is_64;
 	if (i1->CRm != i2->CRm)
 		return i1->CRm - i2->CRm;
 	if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@
 
 #define CRn(_x)		.CRn = _x
 #define CRm(_x) 	.CRm = _x
+#define CRm64(_x)       .CRn = _x, .CRm = 0
 #define Op1(_x) 	.Op1 = _x
 #define Op2(_x) 	.Op2 = _x
 #define is64		.is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a..cf93472 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@
 
 /*
  * A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg a15_regs[] = {
 	/* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index f7793df..16cd4ba 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -49,6 +49,7 @@
 ENTRY(__kvm_tlb_flush_vmid_ipa)
 	push	{r2, r3}
 
+	dsb	ishst
 	add	r0, r0, #KVM_VTTBR
 	ldrd	r2, r3, [r0]
 	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
@@ -291,6 +292,7 @@
 	ldr	r2, =BSYM(panic)
 	msr	ELR_hyp, r2
 	ldr	r0, =\panic_str
+	clrex				@ Clear exclusive monitor
 	eret
 .endm
 
@@ -414,6 +416,10 @@
 	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR
 	bne	3f
 
+	/* Preserve PAR */
+	mrrc	p15, 0, r0, r1, c7	@ PAR
+	push	{r0, r1}
+
 	/* Resolve IPA using the xFAR */
 	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR
 	isb
@@ -424,13 +430,20 @@
 	lsl	r2, r2, #4
 	orr	r2, r2, r1, lsl #24
 
+	/* Restore PAR */
+	pop	{r0, r1}
+	mcrr	p15, 0, r0, r1, c7	@ PAR
+
 3:	load_vcpu			@ Load VCPU pointer to r0
 	str	r2, [r0, #VCPU_HPFAR]
 
 1:	mov	r1, #ARM_EXCEPTION_HVC
 	b	__kvm_vcpu_return
 
-4:	pop	{r0, r1, r2}		@ Failed translation, return to guest
+4:	pop	{r0, r1}		@ Failed translation, return to guest
+	mcrr	p15, 0, r0, r1, c7	@ PAR
+	clrex
+	pop	{r0, r1, r2}
 	eret
 
 /*
@@ -456,6 +469,7 @@
 
 	pop	{r3-r7}
 	pop	{r0-r2}
+	clrex
 	eret
 #endif
 
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 3c8f2f0..2b44b95 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -302,11 +302,14 @@
 	.endif
 
 	mrc	p15, 0, r2, c14, c1, 0	@ CNTKCTL
+	mrrc	p15, 0, r4, r5, c7	@ PAR
 
 	.if \store_to_vcpu == 0
-	push	{r2}
+	push	{r2,r4-r5}
 	.else
 	str	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
+	strd	r4, r5, [r12]
 	.endif
 .endm
 
@@ -319,12 +322,15 @@
  */
 .macro write_cp15_state read_from_vcpu
 	.if \read_from_vcpu == 0
-	pop	{r2}
+	pop	{r2,r4-r5}
 	.else
 	ldr	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
+	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
+	ldrd	r4, r5, [r12]
 	.endif
 
 	mcr	p15, 0, r2, c14, c1, 0	@ CNTKCTL
+	mcrr	p15, 0, r4, r5, c7	@ PAR
 
 	.if \read_from_vcpu == 0
 	pop	{r2-r12}
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc..139e42d 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@
 	.parts			= davinci_nand_partitions,
 	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
 	.ecc_mode		= NAND_ECC_HW_SYNDROME,
+	.ecc_bits		= 4,
 	.bbt_options		= NAND_BBT_USE_FLASH,
 };
 
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a..fa4bfaf 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@
 	.parts		= davinci_evm_nandflash_partition,
 	.nr_parts	= ARRAY_SIZE(davinci_evm_nandflash_partition),
 	.ecc_mode	= NAND_ECC_HW,
+	.ecc_bits	= 1,
 	.bbt_options	= NAND_BBT_USE_FLASH,
 	.timing		= &davinci_evm_nandflash_timing,
 };
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5a..0c005e8 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@
 	.parts			= davinci_nand_partitions,
 	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
 	.ecc_mode		= NAND_ECC_HW,
+	.ecc_bits		= 1,
 	.options		= 0,
 };
 
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112a..808233b 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@
 	.parts		= davinci_ntosd2_nandflash_partition,
 	.nr_parts	= ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
 	.ecc_mode	= NAND_ECC_HW,
+	.ecc_bits	= 1,
 	.bbt_options	= NAND_BBT_USE_FLASH,
 };
 
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf..3490a24 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -276,8 +276,6 @@
 
 	sys->mem_offset  = DC21285_PCI_MEM;
 
-	pci_ioremap_io(0, DC21285_PCI_IO);
-
 	pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
 	pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
 
diff --git a/arch/arm/mach-mxs/pm.h b/arch/arm/mach-mxs/pm.h
index f57e7cd..09d77b0 100644
--- a/arch/arm/mach-mxs/pm.h
+++ b/arch/arm/mach-mxs/pm.h
@@ -9,6 +9,10 @@
 #ifndef __ARCH_MXS_PM_H
 #define __ARCH_MXS_PM_H
 
+#ifdef CONFIG_PM
 void mxs_pm_init(void);
+#else
+#define mxs_pm_init NULL
+#endif
 
 #endif
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf..5645536 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@
 	}
 };
 
-static struct clk init_clocks[] = {
-	{
-		.name		= "lcd",
-		.parent		= &clk_h,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_LCDC,
-	}, {
-		.name		= "gpio",
-		.parent		= &clk_p,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_GPIO,
-	}, {
-		.name		= "usb-host",
-		.parent		= &clk_h,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_USBH,
-	}, {
-		.name		= "usb-device",
-		.parent		= &clk_h,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_USBD,
-	}, {
-		.name		= "timers",
-		.parent		= &clk_p,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_PWMT,
-	}, {
-		.name		= "uart",
-		.devname	= "s3c2410-uart.0",
-		.parent		= &clk_p,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_UART0,
-	}, {
-		.name		= "uart",
-		.devname	= "s3c2410-uart.1",
-		.parent		= &clk_p,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_UART1,
-	}, {
-		.name		= "uart",
-		.devname	= "s3c2410-uart.2",
-		.parent		= &clk_p,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_UART2,
-	}, {
-		.name		= "rtc",
-		.parent		= &clk_p,
-		.enable		= s3c2410_clkcon_enable,
-		.ctrlbit	= S3C2410_CLKCON_RTC,
-	}, {
-		.name		= "watchdog",
-		.parent		= &clk_p,
-		.ctrlbit	= 0,
-	}, {
-		.name		= "usb-bus-host",
-		.parent		= &clk_usb_bus,
-	}, {
-		.name		= "usb-bus-gadget",
-		.parent		= &clk_usb_bus,
-	},
+static struct clk clk_lcd = {
+	.name		= "lcd",
+	.parent		= &clk_h,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_LCDC,
+};
+
+static struct clk clk_gpio = {
+	.name		= "gpio",
+	.parent		= &clk_p,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_GPIO,
+};
+
+static struct clk clk_usb_host = {
+	.name		= "usb-host",
+	.parent		= &clk_h,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_USBH,
+};
+
+static struct clk clk_usb_device = {
+	.name		= "usb-device",
+	.parent		= &clk_h,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_USBD,
+};
+
+static struct clk clk_timers = {
+	.name		= "timers",
+	.parent		= &clk_p,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_PWMT,
+};
+
+struct clk s3c24xx_clk_uart0 = {
+	.name		= "uart",
+	.devname	= "s3c2410-uart.0",
+	.parent		= &clk_p,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_UART0,
+};
+
+struct clk s3c24xx_clk_uart1 = {
+	.name		= "uart",
+	.devname	= "s3c2410-uart.1",
+	.parent		= &clk_p,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_UART1,
+};
+
+struct clk s3c24xx_clk_uart2 = {
+	.name		= "uart",
+	.devname	= "s3c2410-uart.2",
+	.parent		= &clk_p,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_UART2,
+};
+
+static struct clk clk_rtc = {
+	.name		= "rtc",
+	.parent		= &clk_p,
+	.enable		= s3c2410_clkcon_enable,
+	.ctrlbit	= S3C2410_CLKCON_RTC,
+};
+
+static struct clk clk_watchdog = {
+	.name		= "watchdog",
+	.parent		= &clk_p,
+	.ctrlbit	= 0,
+};
+
+static struct clk clk_usb_bus_host = {
+	.name		= "usb-bus-host",
+	.parent		= &clk_usb_bus,
+};
+
+static struct clk clk_usb_bus_gadget = {
+	.name		= "usb-bus-gadget",
+	.parent		= &clk_usb_bus,
+};
+
+static struct clk *init_clocks[] = {
+	&clk_lcd,
+	&clk_gpio,
+	&clk_usb_host,
+	&clk_usb_device,
+	&clk_timers,
+	&s3c24xx_clk_uart0,
+	&s3c24xx_clk_uart1,
+	&s3c24xx_clk_uart2,
+	&clk_rtc,
+	&clk_watchdog,
+	&clk_usb_bus_host,
+	&clk_usb_bus_gadget,
 };
 
 /* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@
 {
 	unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
 	unsigned long clkcon  = __raw_readl(S3C2410_CLKCON);
-	struct clk *clkp;
 	struct clk *xtal;
 	int ret;
 	int ptr;
@@ -207,8 +241,9 @@
 
 	/* register clocks from clock array */
 
-	clkp = init_clocks;
-	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
+	for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
+		struct clk *clkp = init_clocks[ptr];
+
 		/* ensure that we note the clock state */
 
 		clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b56..aaf006d 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@
 	CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
 	CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
 	CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
+	CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
+	CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
+	CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
 	CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
 };
 
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index 899a86c..1ccddd2 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -287,14 +287,14 @@
 static struct resource gio3_resources[] = {
 	[0] = {
 		.name	= "GIO_096",
-		.start	= 0xe0050100,
-		.end	= 0xe005012b,
+		.start	= 0xe0050180,
+		.end	= 0xe00501ab,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
 		.name	= "GIO_096",
-		.start	= 0xe0050140,
-		.end	= 0xe005015f,
+		.start	= 0xe00501c0,
+		.end	= 0xe00501df,
 		.flags	= IORESOURCE_MEM,
 	},
 	[2] = {
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index c5a75a7..7f45c2e 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -62,7 +62,7 @@
 static const struct plat_sci_port scif[] = {
 	SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */
 	SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */
-	SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */
+	SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */
 	SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */
 	SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */
 	SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */
diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
index ec08740..6f938cc 100644
--- a/arch/arm/mach-versatile/include/mach/platform.h
+++ b/arch/arm/mach-versatile/include/mach/platform.h
@@ -231,12 +231,14 @@
 /* PCI space */
 #define VERSATILE_PCI_BASE             0x41000000	/* PCI Interface */
 #define VERSATILE_PCI_CFG_BASE	       0x42000000
+#define VERSATILE_PCI_IO_BASE          0x43000000
 #define VERSATILE_PCI_MEM_BASE0        0x44000000
 #define VERSATILE_PCI_MEM_BASE1        0x50000000
 #define VERSATILE_PCI_MEM_BASE2        0x60000000
 /* Sizes of above maps */
 #define VERSATILE_PCI_BASE_SIZE	       0x01000000
 #define VERSATILE_PCI_CFG_BASE_SIZE    0x02000000
+#define VERSATILE_PCI_IO_BASE_SIZE     0x01000000
 #define VERSATILE_PCI_MEM_BASE0_SIZE   0x0c000000	/* 32Mb */
 #define VERSATILE_PCI_MEM_BASE1_SIZE   0x10000000	/* 256Mb */
 #define VERSATILE_PCI_MEM_BASE2_SIZE   0x10000000	/* 256Mb */
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index e92e5e0..c97be4e 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -43,9 +43,9 @@
 #define PCI_IMAP0		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
 #define PCI_IMAP1		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
 #define PCI_IMAP2		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
-#define PCI_SMAP0		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
-#define PCI_SMAP1		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
-#define PCI_SMAP2		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
+#define PCI_SMAP0		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
+#define PCI_SMAP1		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
+#define PCI_SMAP2		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
 #define PCI_SELFID		__IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
 
 #define DEVICE_ID_OFFSET		0x00
@@ -170,8 +170,8 @@
 	.write	= versatile_write_config,
 };
 
-static struct resource io_mem = {
-	.name	= "PCI I/O space",
+static struct resource unused_mem = {
+	.name	= "PCI unused",
 	.start	= VERSATILE_PCI_MEM_BASE0,
 	.end	= VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
 	.flags	= IORESOURCE_MEM,
@@ -195,9 +195,9 @@
 {
 	int ret = 0;
 
-	ret = request_resource(&iomem_resource, &io_mem);
+	ret = request_resource(&iomem_resource, &unused_mem);
 	if (ret) {
-		printk(KERN_ERR "PCI: unable to allocate I/O "
+		printk(KERN_ERR "PCI: unable to allocate unused "
 		       "memory region (%d)\n", ret);
 		goto out;
 	}
@@ -205,7 +205,7 @@
 	if (ret) {
 		printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
 		       "memory region (%d)\n", ret);
-		goto release_io_mem;
+		goto release_unused_mem;
 	}
 	ret = request_resource(&iomem_resource, &pre_mem);
 	if (ret) {
@@ -225,8 +225,8 @@
 
  release_non_mem:
 	release_resource(&non_mem);
- release_io_mem:
-	release_resource(&io_mem);
+ release_unused_mem:
+	release_resource(&unused_mem);
  out:
 	return ret;
 }
@@ -246,7 +246,7 @@
 		goto out;
 	}
 
-	ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
+	ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
 	if (ret)
 		goto out;
 
@@ -295,6 +295,19 @@
 	__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
 
 	/*
+	 * For many years the kernel and QEMU were symbiotically buggy
+	 * in that they both assumed the same broken IRQ mapping.
+	 * QEMU therefore attempts to auto-detect old broken kernels
+	 * so that they still work on newer QEMU as they did on old
+	 * QEMU. Since we now use the correct (ie matching-hardware)
+	 * IRQ mapping we write a definitely different value to a
+	 * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+	 * real hardware behaviour and it need not be backwards
+	 * compatible for us. This write is harmless on real hardware.
+	 */
+	__raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
+
+	/*
 	 * Do not to map Versatile FPGA PCI device into memory space
 	 */
 	pci_slot_ignore |= (1 << myslot);
@@ -327,13 +340,13 @@
 {
 	int irq;
 
-	/* slot,  pin,	irq
-	 *  24     1     IRQ_SIC_PCI0
-	 *  25     1     IRQ_SIC_PCI1
-	 *  26     1     IRQ_SIC_PCI2
-	 *  27     1     IRQ_SIC_PCI3
+	/*
+	 * Slot	INTA	INTB	INTC	INTD
+	 * 31	PCI1	PCI2	PCI3	PCI0
+	 * 30	PCI0	PCI1	PCI2	PCI3
+	 * 29	PCI3	PCI0	PCI1	PCI2
 	 */
-	irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
+	irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
 
 	return irq;
 }
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 2ac3737..eeab06e 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -39,19 +39,43 @@
  * non 64-bit operations.
  */
 #define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION - 1)
-
-#define ASID_TO_IDX(asid)	((asid & ~ASID_MASK) - 1)
-#define IDX_TO_ASID(idx)	((idx + 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS		ASID_FIRST_VERSION
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
-DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 static cpumask_t tlb_flush_pending;
 
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+			     cpumask_t *mask)
+{
+	int cpu;
+	unsigned long flags;
+	u64 context_id, asid;
+
+	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+	context_id = mm->context.id.counter;
+	for_each_online_cpu(cpu) {
+		if (cpu == this_cpu)
+			continue;
+		/*
+		 * We only need to send an IPI if the other CPUs are
+		 * running the same ASID as the one being invalidated.
+		 */
+		asid = per_cpu(active_asids, cpu).counter;
+		if (asid == 0)
+			asid = per_cpu(reserved_asids, cpu);
+		if (context_id == asid)
+			cpumask_set_cpu(cpu, mask);
+	}
+	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+#endif
+
 #ifdef CONFIG_ARM_LPAE
 static void cpu_set_reserved_ttbr0(void)
 {
@@ -128,7 +152,16 @@
 			asid = 0;
 		} else {
 			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
-			__set_bit(ASID_TO_IDX(asid), asid_map);
+			/*
+			 * If this CPU has already been through a
+			 * rollover, but hasn't run another task in
+			 * the meantime, we must preserve its reserved
+			 * ASID, as this is the only trace we have of
+			 * the process it is still running.
+			 */
+			if (asid == 0)
+				asid = per_cpu(reserved_asids, i);
+			__set_bit(asid & ~ASID_MASK, asid_map);
 		}
 		per_cpu(reserved_asids, i) = asid;
 	}
@@ -167,17 +200,19 @@
 		/*
 		 * Allocate a free ASID. If we can't find one, take a
 		 * note of the currently active ASIDs and mark the TLBs
-		 * as requiring flushes.
+		 * as requiring flushes. We always count from ASID #1,
+		 * as we reserve ASID #0 to switch via TTBR0 and indicate
+		 * rollover events.
 		 */
-		asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 		if (asid == NUM_USER_ASIDS) {
 			generation = atomic64_add_return(ASID_FIRST_VERSION,
 							 &asid_generation);
 			flush_context(cpu);
-			asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+			asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 		}
 		__set_bit(asid, asid_map);
-		asid = generation | IDX_TO_ASID(asid);
+		asid |= generation;
 		cpumask_clear(mm_cpumask(mm));
 	}
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9a5cdc0..0ecc43f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -600,7 +600,7 @@
 
 #ifdef CONFIG_SA1111
 	/* now that our DMA memory is actually so designated, we can free it */
-	free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
+	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, 0, NULL);
 #endif
 
 	free_highpages();
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 9704097..b3997c7 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,7 @@
  ARM(	str	r3, [r0, #2048]! )
  THUMB(	add	r0, r0, #2048 )
  THUMB(	str	r3, [r0] )
-	ALT_SMP(mov	pc,lr)
+	ALT_SMP(W(nop))
 	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte
 #endif
 	mov	pc, lr
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 363027e..6ba4bd9 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -73,7 +73,7 @@
 	tst	r3, #1 << (55 - 32)		@ L_PTE_DIRTY
 	orreq	r2, #L_PTE_RDONLY
 1:	strd	r2, r3, [r0]
-	ALT_SMP(mov	pc, lr)
+	ALT_SMP(W(nop))
 	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte
 #endif
 	mov	pc, lr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index e35fec3..5fbccee 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,13 +75,14 @@
 ENDPROC(cpu_v7_do_idle)
 
 ENTRY(cpu_v7_dcache_clean_area)
-	ALT_SMP(mov	pc, lr)			@ MP extensions imply L1 PTW
-	ALT_UP(W(nop))
-	dcache_line_size r2, r3
-1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
+	ALT_SMP(W(nop))			@ MP extensions imply L1 PTW
+	ALT_UP_B(1f)
+	mov	pc, lr
+1:	dcache_line_size r2, r3
+2:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
 	add	r0, r0, r2
 	subs	r1, r1, r2
-	bhi	1b
+	bhi	2b
 	dsb
 	mov	pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753d..df45d6e 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@
 extern struct clksrc_clk clk_epllref;
 extern struct clksrc_clk clk_esysclk;
 
+/* S3C24XX UART clocks */
+extern struct clk s3c24xx_clk_uart0;
+extern struct clk s3c24xx_clk_uart1;
+extern struct clk s3c24xx_clk_uart2;
+
 /* S3C64XX specific clocks */
 extern struct clk clk_h2;
 extern struct clk clk_27m;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 13609e0..81edd31 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,6 +170,7 @@
 	per_cpu(xen_vcpu, cpu) = vcpup;
 
 	enable_percpu_irq(xen_events_irq, 0);
+	put_cpu();
 }
 
 static void xen_restart(char str, const char *cmd)
@@ -272,12 +273,15 @@
 
 static int __init xen_pm_init(void)
 {
+	if (!xen_domain())
+		return -ENODEV;
+
 	pm_power_off = xen_power_off;
 	arm_pm_restart = xen_restart;
 
 	return 0;
 }
-subsys_initcall(xen_pm_init);
+late_initcall(xen_pm_init);
 
 static irqreturn_t xen_arm_callback(int irq, void *arg)
 {
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 654f096..5546653 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@
 	struct mm_struct	*mm;
 	unsigned int		fullmm;
 	struct vm_area_struct	*vma;
+	unsigned long		start, end;
 	unsigned long		range_start;
 	unsigned long		range_end;
 	unsigned int		nr;
@@ -97,10 +98,12 @@
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
 	tlb->mm = mm;
-	tlb->fullmm = fullmm;
+	tlb->fullmm = !(start | (end+1));
+	tlb->start = start;
+	tlb->end = end;
 	tlb->vma = NULL;
 	tlb->max = ARRAY_SIZE(tlb->local);
 	tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c4..cea1594 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@
 static int
 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-	int mapping = (*event_map)[config];
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -EINVAL;
+
+	mapping = (*event_map)[config];
 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -317,7 +322,13 @@
 	struct hw_perf_event fake_event = event->hw;
 	struct pmu *leader_pmu = event->group_leader->pmu;
 
-	if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+	if (is_software_event(event))
+		return 1;
+
+	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
 		return 1;
 
 	return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -773,7 +784,7 @@
 /*
  * PMXEVTYPER: Event selection reg
  */
-#define	ARMV8_EVTYPE_MASK	0xc00000ff	/* Mask for writable bits */
+#define	ARMV8_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
 #define	ARMV8_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 
 /*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 916dbe1..7d2cf37 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -152,25 +152,8 @@
 #define ESR_CM			(1 << 8)
 #define ESR_LNX_EXEC		(1 << 24)
 
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
-	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
-	if (esr & ESR_WRITE)
-		mask = VM_WRITE;
-	if (esr & ESR_LNX_EXEC)
-		mask = VM_EXEC;
-
-	return vma->vm_flags & mask ? false : true;
-}
-
 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
-			   unsigned int esr, unsigned int flags,
+			   unsigned int mm_flags, unsigned long vm_flags,
 			   struct task_struct *tsk)
 {
 	struct vm_area_struct *vma;
@@ -188,12 +171,17 @@
 	 * it.
 	 */
 good_area:
-	if (access_error(esr, vma)) {
+	/*
+	 * Check that the permissions on the VMA allow for the fault which
+	 * occurred. If we encountered a write or exec fault, we must have
+	 * appropriate permissions, otherwise we allow any permission.
+	 */
+	if (!(vma->vm_flags & vm_flags)) {
 		fault = VM_FAULT_BADACCESS;
 		goto out;
 	}
 
-	return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+	return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
 
 check_stack:
 	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	int fault, sig, code;
-	bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
-	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-		(write ? FAULT_FLAG_WRITE : 0);
+	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	if (esr & ESR_LNX_EXEC) {
+		vm_flags = VM_EXEC;
+	} else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+		vm_flags = VM_WRITE;
+		mm_flags |= FAULT_FLAG_WRITE;
+	}
 
 	tsk = current;
 	mm  = tsk->mm;
@@ -248,7 +242,7 @@
 #endif
 	}
 
-	fault = __do_page_fault(mm, addr, esr, flags, tsk);
+	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
 
 	/*
 	 * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@
 	 */
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
 		if (fault & VM_FAULT_MAJOR) {
 			tsk->maj_flt++;
 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,8 +274,8 @@
 			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
 			 * starvation.
 			 */
-			flags &= ~FAULT_FLAG_ALLOW_RETRY;
-			flags |= FAULT_FLAG_TRIED;
+			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			mm_flags |= FAULT_FLAG_TRIED;
 			goto retry;
 		}
 	}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 869a1c6..12f828a 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -98,7 +98,14 @@
 	case CLOCK_EVT_MODE_SHUTDOWN:
 		sysreg_write(COMPARE, 0);
 		pr_debug("%s: stop\n", evdev->name);
-		cpu_idle_poll_ctrl(false);
+		if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
+		    evdev->mode == CLOCK_EVT_MODE_RESUME) {
+			/*
+			 * Only disable idle poll if we have forced that
+			 * in a previous call.
+			 */
+			cpu_idle_poll_ctrl(false);
+		}
 		break;
 	default:
 		BUG();
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index a9fcd89..b74ccb5 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -18,6 +18,7 @@
 #include <linux/initrd.h>
 
 #include <asm/sections.h>
+#include <asm/uaccess.h>
 
 /*
  * ZERO_PAGE is a special page that is used for zero-initialized
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de..bc5efc7 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
  * unmapping a portion of the virtual address space, these hooks are called according to
  * the following template:
  *
- *	tlb <- tlb_gather_mmu(mm, full_mm_flush);	// start unmap for address space MM
+ *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
  *	{
  *	  for each vma that needs a shootdown do {
  *	    tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@
 	unsigned int		max;
 	unsigned char		fullmm;		/* non-zero means full mm flush */
 	unsigned char		need_flush;	/* really unmapped some PTEs? */
+	unsigned long		start, end;
 	unsigned long		start_addr;
 	unsigned long		end_addr;
 	struct page		**pages;
@@ -155,13 +156,15 @@
 
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
 	tlb->mm = mm;
 	tlb->max = ARRAY_SIZE(tlb->local);
 	tlb->pages = tlb->local;
 	tlb->nr = 0;
-	tlb->fullmm = full_mm_flush;
+	tlb->fullmm = !(start | (end+1));
+	tlb->start = start;
+	tlb->end = end;
 	tlb->start_addr = ~0UL;
 }
 
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d..fa277ae 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
 #include <asm/machdep.h>
 #include <asm/natfeat.h>
 
+extern long nf_get_id2(const char *feature_name);
+
 asm("\n"
-"	.global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+"	.global nf_get_id2,nf_call\n"
+"nf_get_id2:\n"
 "	.short	0x7300\n"
 "	rts\n"
 "nf_call:\n"
@@ -29,12 +31,25 @@
 "1:	moveq.l	#0,%d0\n"
 "	rts\n"
 "	.section __ex_table,\"a\"\n"
-"	.long	nf_get_id,1b\n"
+"	.long	nf_get_id2,1b\n"
 "	.long	nf_call,1b\n"
 "	.previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
 EXPORT_SYMBOL_GPL(nf_call);
 
+long nf_get_id(const char *feature_name)
+{
+	/* feature_name may be in vmalloc()ed memory, so make a copy */
+	char name_copy[32];
+	size_t n;
+
+	n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+	if (n >= sizeof(name_copy))
+		return 0;
+
+	return nf_get_id2(name_copy);
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
 void nfprint(const char *fmt, ...)
 {
 	static char buf[256];
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a..ef881cf 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
 		unsigned long long n64;				\
 	} __n;							\
 	unsigned long __rem, __upper;				\
+	unsigned long __base = (base);				\
 								\
 	__n.n64 = (n);						\
 	if ((__upper = __n.n32[0])) {				\
 		asm ("divul.l %2,%1:%0"				\
-			: "=d" (__n.n32[0]), "=d" (__upper)	\
-			: "d" (base), "0" (__n.n32[0]));	\
+		     : "=d" (__n.n32[0]), "=d" (__upper)	\
+		     : "d" (__base), "0" (__n.n32[0]));		\
 	}							\
 	asm ("divu.l %2,%1:%0"					\
-		: "=d" (__n.n32[1]), "=d" (__rem)		\
-		: "d" (base), "1" (__upper), "0" (__n.n32[1]));	\
+	     : "=d" (__n.n32[1]), "=d" (__rem)			\
+	     : "d" (__base), "1" (__upper), "0" (__n.n32[1]));	\
 	(n) = __n.n64;						\
 	__rem;							\
 })
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ec..4fab522 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_IDLE_POLL_SETUP
 	select MODULES_USE_ELF_RELA
-	select CLONE_BACKWARDS
+	select CLONE_BACKWARDS3
 
 config SWAP
 	def_bool n
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a58ab9..e53e2b4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -27,6 +27,7 @@
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
+	select GENERIC_PCI_IOMAP
 	select HAVE_ARCH_JUMP_LABEL
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select IRQ_FORCED_THREADING
@@ -2412,7 +2413,6 @@
 	bool "Support for PCI controller"
 	depends on HW_HAS_PCI
 	select PCI_DOMAINS
-	select GENERIC_PCI_IOMAP
 	select NO_GENERIC_PCI_IOPORT_MAP
 	help
 	  Find out whether you have a PCI motherboard. PCI is the name of a
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 765ef30..733017b 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -164,7 +164,7 @@
 		ath79_ahb_clk.rate = freq / t;
 	}
 
-	ath79_wdt_clk.rate = ath79_ref_clk.rate;
+	ath79_wdt_clk.rate = ath79_ahb_clk.rate;
 	ath79_uart_clk.rate = ath79_ref_clk.rate;
 }
 
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 01b1b3f..2a75ff2 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -7,6 +7,7 @@
  * Copyright (C) 2008, 2009 Wind River Systems
  *   written by Ralf Baechle <ralf@linux-mips.org>
  */
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/console.h>
@@ -712,7 +713,7 @@
 	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
 		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
 	} else {
-		uint32_t ebase = read_c0_ebase() & 0x3ffff000;
+		uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
 #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
 		/* TLB refill */
 		cvmx_l2c_lock_mem_region(ebase, 0x100);
@@ -996,7 +997,7 @@
 	cvmx_bootmem_unlock();
 	/* Add the memory region for the kernel. */
 	kernel_start = (unsigned long) _text;
-	kernel_size = ALIGN(_end - _text, 0x100000);
+	kernel_size = _end - _text;
 
 	/* Adjust for physical offset. */
 	kernel_start &= ~0xffffffff80000000ULL;
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index b7e5985..b84e1fb 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -170,6 +170,11 @@
 extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
 extern void __iounmap(const volatile void __iomem *addr);
 
+#ifndef CONFIG_PCI
+struct pci_dev;
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
+#endif
+
 static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
 	unsigned long flags)
 {
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77e..e194f95 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-	asm goto("1:\tnop\n\t"
+	asm_volatile_goto("1:\tnop\n\t"
 		"nop\n\t"
 		".pushsection __jump_table,  \"aw\"\n\t"
 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6..eaf4dc1 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@
 	/* generic info returned from pdc_pat_cell_module() */
 	unsigned long	mod_info;	/* PAT specific - Misc Module info */
 	unsigned long	pmod_loc;	/* physical Module location */
+	unsigned long	mod0;
 #endif
 	u64		dma_mask;	/* DMA mask for I/O */
 	struct device 	dev;
@@ -61,4 +62,6 @@
 
 extern struct bus_type parisc_bus_type;
 
+int iosapic_serial_irq(struct parisc_device *dev);
+
 #endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index d306b75..e150930 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -32,9 +32,12 @@
 	cr;				\
 })
 
-#define mtsp(gr, cr) \
-	__asm__ __volatile__("mtsp %0,%1" \
+#define mtsp(val, cr) \
+	{ if (__builtin_constant_p(val) && ((val) == 0)) \
+	 __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
+	else \
+	 __asm__ __volatile__("mtsp %0,%1" \
 		: /* no outputs */ \
-		: "r" (gr), "i" (cr) : "memory")
+		: "r" (val), "i" (cr) : "memory"); }
 
 #endif /* __PARISC_SPECIAL_INSNS_H */
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 5273da9..9d086a5 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -63,13 +63,14 @@
 static inline void flush_tlb_page(struct vm_area_struct *vma,
 	unsigned long addr)
 {
-	unsigned long flags;
+	unsigned long flags, sid;
 
 	/* For one page, it's not worth testing the split_tlb variable */
 
 	mb();
-	mtsp(vma->vm_mm->context,1);
+	sid = vma->vm_mm->context;
 	purge_tlb_start(flags);
+	mtsp(sid, 1);
 	pdtlb(addr);
 	pitlb(addr);
 	purge_tlb_end(flags);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 65fb4cb..c035673 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@
 }
 EXPORT_SYMBOL(flush_cache_all_local);
 
+/* Virtual address of pfn.  */
+#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
+
 void
 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 {
-	struct page *page = pte_page(*ptep);
+	unsigned long pfn = pte_pfn(*ptep);
+	struct page *page;
 
-	if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
-	    test_bit(PG_dcache_dirty, &page->flags)) {
+	/* We don't have pte special.  As a result, we can be called with
+	   an invalid pfn and we don't need to flush the kernel dcache page.
+	   This occurs with FireGL card in C8000.  */
+	if (!pfn_valid(pfn))
+		return;
 
-		flush_kernel_dcache_page(page);
+	page = pfn_to_page(pfn);
+	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+		flush_kernel_dcache_page_addr(pfn_va(pfn));
 		clear_bit(PG_dcache_dirty, &page->flags);
 	} else if (parisc_requires_coherency())
-		flush_kernel_dcache_page(page);
+		flush_kernel_dcache_page_addr(pfn_va(pfn));
 }
 
 void
@@ -440,8 +449,8 @@
 	else {
 		unsigned long flags;
 
-		mtsp(sid, 1);
 		purge_tlb_start(flags);
+		mtsp(sid, 1);
 		if (split_tlb) {
 			while (npages--) {
 				pdtlb(start);
@@ -495,44 +504,42 @@
 
 void flush_cache_mm(struct mm_struct *mm)
 {
+	struct vm_area_struct *vma;
+	pgd_t *pgd;
+
 	/* Flushing the whole cache on each cpu takes forever on
 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
-	if (mm_total_size(mm) < parisc_cache_flush_threshold) {
-		struct vm_area_struct *vma;
+	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+		flush_cache_all();
+		return;
+	}
 
-		if (mm->context == mfsp(3)) {
-			for (vma = mm->mmap; vma; vma = vma->vm_next) {
-				flush_user_dcache_range_asm(vma->vm_start,
-					vma->vm_end);
-				if (vma->vm_flags & VM_EXEC)
-					flush_user_icache_range_asm(
-					  vma->vm_start, vma->vm_end);
-			}
-		} else {
-			pgd_t *pgd = mm->pgd;
-
-			for (vma = mm->mmap; vma; vma = vma->vm_next) {
-				unsigned long addr;
-
-				for (addr = vma->vm_start; addr < vma->vm_end;
-				     addr += PAGE_SIZE) {
-					pte_t *ptep = get_ptep(pgd, addr);
-					if (ptep != NULL) {
-						pte_t pte = *ptep;
-						__flush_cache_page(vma, addr,
-						  page_to_phys(pte_page(pte)));
-					}
-				}
-			}
+	if (mm->context == mfsp(3)) {
+		for (vma = mm->mmap; vma; vma = vma->vm_next) {
+			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+			if ((vma->vm_flags & VM_EXEC) == 0)
+				continue;
+			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
 		}
 		return;
 	}
 
-#ifdef CONFIG_SMP
-	flush_cache_all();
-#else
-	flush_cache_all_local();
-#endif
+	pgd = mm->pgd;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		unsigned long addr;
+
+		for (addr = vma->vm_start; addr < vma->vm_end;
+		     addr += PAGE_SIZE) {
+			unsigned long pfn;
+			pte_t *ptep = get_ptep(pgd, addr);
+			if (!ptep)
+				continue;
+			pfn = pte_pfn(*ptep);
+			if (!pfn_valid(pfn))
+				continue;
+			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+		}
+	}
 }
 
 void
@@ -556,33 +563,32 @@
 void flush_cache_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
 {
+	unsigned long addr;
+	pgd_t *pgd;
+
 	BUG_ON(!vma->vm_mm->context);
 
-	if ((end - start) < parisc_cache_flush_threshold) {
-		if (vma->vm_mm->context == mfsp(3)) {
-			flush_user_dcache_range_asm(start, end);
-			if (vma->vm_flags & VM_EXEC)
-				flush_user_icache_range_asm(start, end);
-		} else {
-			unsigned long addr;
-			pgd_t *pgd = vma->vm_mm->pgd;
-
-			for (addr = start & PAGE_MASK; addr < end;
-			     addr += PAGE_SIZE) {
-				pte_t *ptep = get_ptep(pgd, addr);
-				if (ptep != NULL) {
-					pte_t pte = *ptep;
-					flush_cache_page(vma,
-					   addr, pte_pfn(pte));
-				}
-			}
-		}
-	} else {
-#ifdef CONFIG_SMP
+	if ((end - start) >= parisc_cache_flush_threshold) {
 		flush_cache_all();
-#else
-		flush_cache_all_local();
-#endif
+		return;
+	}
+
+	if (vma->vm_mm->context == mfsp(3)) {
+		flush_user_dcache_range_asm(start, end);
+		if (vma->vm_flags & VM_EXEC)
+			flush_user_icache_range_asm(start, end);
+		return;
+	}
+
+	pgd = vma->vm_mm->pgd;
+	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+		unsigned long pfn;
+		pte_t *ptep = get_ptep(pgd, addr);
+		if (!ptep)
+			continue;
+		pfn = pte_pfn(*ptep);
+		if (pfn_valid(pfn))
+			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
 	}
 }
 
@@ -591,9 +597,10 @@
 {
 	BUG_ON(!vma->vm_mm->context);
 
-	flush_tlb_page(vma, vmaddr);
-	__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
-
+	if (pfn_valid(pfn)) {
+		flush_tlb_page(vma, vmaddr);
+		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+	}
 }
 
 #ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd7..d2d5825 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@
 	ldw             MEM_PDC_HI(%r0),%r6
 	depd            %r6, 31, 32, %r3        /* move to upper word */
 
+	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
+
 	ldo             PDC_PSW(%r0),%arg0              /* 21 */
 	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
 	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
@@ -203,6 +205,8 @@
 	copy            %r0,%arg3
 
 stext_pdc_ret:
+	mtctl		%r6,%cr30		/* restore task thread info */
+
 	/* restore rfi target address*/
 	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
 	tophys_r1       %r10
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4..f0b6722 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@
 	/* REVISIT: who is the consumer of this? not sure yet... */
 	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
 	dev->pmod_loc = pa_pdc_cell->mod_location;
+	dev->mod0 = pa_pdc_cell->mod[0];
 
 	register_parisc_device(dev);	/* advertise device */
 
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 04e47c6..b3f87a3 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -805,14 +805,14 @@
 	else {
 
 	    /*
-	     * The kernel should never fault on its own address space.
+	     * The kernel should never fault on its own address space,
+	     * unless pagefault_disable() was called before.
 	     */
 
-	    if (fault_space == 0) 
+	    if (fault_space == 0 && !in_atomic())
 	    {
 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
 		parisc_terminate("Kernel Fault", regs, code, fault_address);
-	
 	    }
 	}
 
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index a49cc81..ac4370b 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,6 +2,7 @@
  *    Optimized memory copy routines.
  *
  *    Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ *    Copyright (C) 2013 Helge Deller <deller@gmx.de>
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -153,17 +154,21 @@
 #define prefetch_dst(addr) do { } while(0)
 #endif
 
+#define PA_MEMCPY_OK		0
+#define PA_MEMCPY_LOAD_ERROR	1
+#define PA_MEMCPY_STORE_ERROR	2
+
 /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
  * per loop.  This code is derived from glibc. 
  */
-static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
+static inline unsigned long copy_dstaligned(unsigned long dst,
+					unsigned long src, unsigned long len)
 {
 	/* gcc complains that a2 and a3 may be uninitialized, but actually
 	 * they cannot be.  Initialize a2/a3 to shut gcc up.
 	 */
 	register unsigned int a0, a1, a2 = 0, a3 = 0;
 	int sh_1, sh_2;
-	struct exception_data *d;
 
 	/* prefetch_src((const void *)src); */
 
@@ -197,7 +202,7 @@
 			goto do2;
 		case 0:
 			if (len == 0)
-				return 0;
+				return PA_MEMCPY_OK;
 			/* a3 = ((unsigned int *) src)[0];
 			   a0 = ((unsigned int *) src)[1]; */
 			ldw(s_space, 0, src, a3, cda_ldw_exc);
@@ -256,42 +261,35 @@
 	preserve_branch(handle_load_error);
 	preserve_branch(handle_store_error);
 
-	return 0;
+	return PA_MEMCPY_OK;
 
 handle_load_error:
 	__asm__ __volatile__ ("cda_ldw_exc:\n");
-	d = &__get_cpu_var(exception_data);
-	DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
-		o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
-	return o_len * 4 - d->fault_addr + o_src;
+	return PA_MEMCPY_LOAD_ERROR;
 
 handle_store_error:
 	__asm__ __volatile__ ("cda_stw_exc:\n");
-	d = &__get_cpu_var(exception_data);
-	DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
-		o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
-	return o_len * 4 - d->fault_addr + o_dst;
+	return PA_MEMCPY_STORE_ERROR;
 }
 
 
-/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
+ * In case of an access fault the faulty address can be read from the per_cpu
+ * exception data struct. */
+static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
+					unsigned long len)
 {
 	register unsigned long src, dst, t1, t2, t3;
 	register unsigned char *pcs, *pcd;
 	register unsigned int *pws, *pwd;
 	register double *pds, *pdd;
-	unsigned long ret = 0;
-	unsigned long o_dst, o_src, o_len;
-	struct exception_data *d;
+	unsigned long ret;
 
 	src = (unsigned long)srcp;
 	dst = (unsigned long)dstp;
 	pcs = (unsigned char *)srcp;
 	pcd = (unsigned char *)dstp;
 
-	o_dst = dst; o_src = src; o_len = len;
-
 	/* prefetch_src((const void *)srcp); */
 
 	if (len < THRESHOLD)
@@ -401,7 +399,7 @@
 		len--;
 	}
 
-	return 0;
+	return PA_MEMCPY_OK;
 
 unaligned_copy:
 	/* possibly we are aligned on a word, but not on a double... */
@@ -438,8 +436,7 @@
 		src = (unsigned long)pcs;
 	}
 
-	ret = copy_dstaligned(dst, src, len / sizeof(unsigned int), 
-		o_dst, o_src, o_len);
+	ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
 	if (ret)
 		return ret;
 
@@ -454,17 +451,41 @@
 
 handle_load_error:
 	__asm__ __volatile__ ("pmc_load_exc:\n");
-	d = &__get_cpu_var(exception_data);
-	DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
-		o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
-	return o_len - d->fault_addr + o_src;
+	return PA_MEMCPY_LOAD_ERROR;
 
 handle_store_error:
 	__asm__ __volatile__ ("pmc_store_exc:\n");
+	return PA_MEMCPY_STORE_ERROR;
+}
+
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+{
+	unsigned long ret, fault_addr, reference;
+	struct exception_data *d;
+
+	ret = pa_memcpy_internal(dstp, srcp, len);
+	if (likely(ret == PA_MEMCPY_OK))
+		return 0;
+
+	/* if a load or store fault occured we can get the faulty addr */
 	d = &__get_cpu_var(exception_data);
-	DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
-		o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
-	return o_len - d->fault_addr + o_dst;
+	fault_addr = d->fault_addr;
+
+	/* error in load or store? */
+	if (ret == PA_MEMCPY_LOAD_ERROR)
+		reference = (unsigned long) srcp;
+	else
+		reference = (unsigned long) dstp;
+
+	DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
+		ret, len, fault_addr, reference);
+
+	if (fault_addr >= reference)
+		return len - (fault_addr - reference);
+	else
+		return len;
 }
 
 #ifdef __KERNEL__
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad..fe404e7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -572,7 +572,7 @@
 config PPC_DENORMALISATION
 	bool "PowerPC denormalisation exception handling"
 	depends on PPC_BOOK3S_64
-	default "n"
+	default "y" if PPC_POWERNV
 	---help---
 	  Add support for handling denormalisation of single precision
 	  values.  Useful for bare metal only.  If unsure say Y here.
@@ -986,6 +986,7 @@
 	  must live at a different physical address than the primary
 	  kernel.
 
+# This value must have zeroes in the bottom 60 bits otherwise lots will break
 config PAGE_OFFSET
 	hex
 	default "0xc000000000000000"
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 46793b5..07ca627 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -358,12 +358,12 @@
 	/* No guest interrupts come through here */	\
 	SET_SCRATCH0(r13);		/* save r13 */	\
 	EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
-				       EXC_STD, KVMTEST_PR, vec)
+				       EXC_STD, NOTEST, vec)
 
 #define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label)		\
 	.globl label##_relon_pSeries;				\
 label##_relon_pSeries:						\
-	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec);	\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec);		\
 	EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
 
 #define STD_RELON_EXCEPTION_HV(loc, vec, label)		\
@@ -374,12 +374,12 @@
 	/* No guest interrupts come through here */	\
 	SET_SCRATCH0(r13);	/* save r13 */		\
 	EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
-				       EXC_HV, KVMTEST, vec)
+				       EXC_HV, NOTEST, vec)
 
 #define STD_RELON_EXCEPTION_HV_OOL(vec, label)			\
 	.globl label##_relon_hv;				\
 label##_relon_hv:						\
-	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec);		\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec);		\
 	EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
 
 /* This associate vector numbers with bits in paca->irq_happened */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c4..f016bb6 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-	asm goto("1:\n\t"
+	asm_volatile_goto("1:\n\t"
 		 "nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590..49fa55b 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@
 void sort_ex_table(struct exception_table_entry *start,
 		   struct exception_table_entry *finish);
 
-#ifdef CONFIG_MODVERSIONS
+#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
 #define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
+#define reloc_start PHYSICAL_START
 #endif
 #endif /* __KERNEL__ */
 #endif	/* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..b9f4262 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@
 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
 #else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
 #endif
+#endif
 
 /*
  * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 14a6583..419e712 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@
 	unsigned long	tm_orig_msr;	/* Thread's MSR on ctx switch */
 	struct pt_regs	ckpt_regs;	/* Checkpointed registers */
 
+	unsigned long	tm_tar;
+	unsigned long	tm_ppr;
+	unsigned long	tm_dscr;
+
 	/*
 	 * Transactional FP and VSX 0-31 register set.
 	 * NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4a9e408..e1fb161 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
 #define SPRN_HRMOR	0x139	/* Real mode offset register */
 #define SPRN_HSRR0	0x13A	/* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1	0x13B	/* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG	8	/* Enable Target Address Register */
+#define FSCR_EBB_LG	7	/* Enable Event Based Branching */
+#define FSCR_TM_LG	5	/* Enable Transactional Memory */
+#define FSCR_PM_LG	4	/* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG	3	/* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG	2	/* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG	1	/* Enable VMX/VSX  */
+#define FSCR_FP_LG	0	/* Enable Floating Point */
 #define SPRN_FSCR	0x099	/* Facility Status & Control Register */
-#define   FSCR_TAR	(1 << (63-55)) /* Enable Target Address Register */
-#define   FSCR_EBB	(1 << (63-56)) /* Enable Event Based Branching */
-#define   FSCR_DSCR	(1 << (63-61)) /* Enable Data Stream Control Register */
+#define   FSCR_TAR	__MASK(FSCR_TAR_LG)
+#define   FSCR_EBB	__MASK(FSCR_EBB_LG)
+#define   FSCR_DSCR	__MASK(FSCR_DSCR_LG)
 #define SPRN_HFSCR	0xbe	/* HV=1 Facility Status & Control Register */
-#define   HFSCR_TAR	(1 << (63-55)) /* Enable Target Address Register */
-#define   HFSCR_EBB	(1 << (63-56)) /* Enable Event Based Branching */
-#define   HFSCR_TM	(1 << (63-58)) /* Enable Transactional Memory */
-#define   HFSCR_PM	(1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define   HFSCR_BHRB	(1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define   HFSCR_DSCR	(1 << (63-61)) /* Enable Data Stream Control Register */
-#define   HFSCR_VECVSX	(1 << (63-62)) /* Enable VMX/VSX  */
-#define   HFSCR_FP	(1 << (63-63)) /* Enable Floating Point */
+#define   HFSCR_TAR	__MASK(FSCR_TAR_LG)
+#define   HFSCR_EBB	__MASK(FSCR_EBB_LG)
+#define   HFSCR_TM	__MASK(FSCR_TM_LG)
+#define   HFSCR_PM	__MASK(FSCR_PM_LG)
+#define   HFSCR_BHRB	__MASK(FSCR_BHRB_LG)
+#define   HFSCR_DSCR	__MASK(FSCR_DSCR_LG)
+#define   HFSCR_VECVSX	__MASK(FSCR_VECVSX_LG)
+#define   HFSCR_FP	__MASK(FSCR_FP_LG)
 #define SPRN_TAR	0x32f	/* Target Address Register */
 #define SPRN_LPCR	0x13E	/* LPAR Control Register */
 #define   LPCR_VPM0	(1ul << (63-0))
@@ -626,6 +635,7 @@
 #define   MMCR0_TRIGGER	0x00002000UL /* TRIGGER enable */
 #define   MMCR0_PMAO	0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
 #define   MMCR0_SHRFC	0x00000040UL /* SHRre freeze conditions between threads */
+#define   MMCR0_FC56	0x00000010UL /* freeze counters 5 and 6 */
 #define   MMCR0_FCTI	0x00000008UL /* freeze counters in tags inactive mode */
 #define   MMCR0_FCTA	0x00000004UL /* freeze counters in tags active mode */
 #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabe..48cfc85 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@
 #define smp_setup_cpu_maps()
 static inline void inhibit_secondary_onlining(void) {}
 static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+	return cpumask_of(cpu);
+}
 
 #endif /* CONFIG_SMP */
 
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 200d763..685ecc8 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@
 struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
 				   struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+	if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
 
 extern void giveup_fpu(struct task_struct *);
 extern void load_up_fpu(void);
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ee5b690..52e5758 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -764,6 +764,16 @@
 	nb = aligninfo[instr].len;
 	flags = aligninfo[instr].flags;
 
+	/* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+	if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+		nb = 8;
+		flags = LD+SW;
+	} else if (IS_XFORM(instruction) &&
+		   ((instruction >> 1) & 0x3ff) == 660) {
+		nb = 8;
+		flags = ST+SW;
+	}
+
 	/* Byteswap little endian loads and stores */
 	swiz = 0;
 	if (regs->msr & MSR_LE) {
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffa..302886b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -139,6 +139,9 @@
 	DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
 	DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
 	DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+	DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+	DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+	DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
 	DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
 	DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
 					 transact_vr[0]));
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8741c85..38847767 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@
 
 #ifdef CONFIG_PPC_BOOK3S_64
 BEGIN_FTR_SECTION
-	/*
-	 * Back up the TAR across context switches.  Note that the TAR is not
-	 * available for use in the kernel.  (To provide this, the TAR should
-	 * be backed up/restored on exception entry/exit instead, and be in
-	 * pt_regs.  FIXME, this should be in pt_regs anyway (for debug).)
-	 */
-	mfspr	r0,SPRN_TAR
-	std	r0,THREAD_TAR(r3)
-
 	/* Event based branch registers */
 	mfspr	r0, SPRN_BESCR
 	std	r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@
 	ld	r7,DSCR_DEFAULT@toc(2)
 	ld	r0,THREAD_DSCR(r4)
 	cmpwi	r6,0
+	li	r8, FSCR_DSCR
 	bne	1f
 	ld	r0,0(r7)
-1:	cmpd	r0,r25
+	b	3f
+1:
+  BEGIN_FTR_SECTION_NESTED(70)
+	mfspr	r6, SPRN_FSCR
+	or	r6, r6, r8
+	mtspr	SPRN_FSCR, r6
+    BEGIN_FTR_SECTION_NESTED(69)
+	mfspr	r6, SPRN_HFSCR
+	or	r6, r6, r8
+	mtspr	SPRN_HFSCR, r6
+    END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+	b	4f
+  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+  BEGIN_FTR_SECTION_NESTED(70)
+	mfspr	r6, SPRN_FSCR
+	andc	r6, r6, r8
+	mtspr	SPRN_FSCR, r6
+    BEGIN_FTR_SECTION_NESTED(69)
+	mfspr	r6, SPRN_HFSCR
+	andc	r6, r6, r8
+	mtspr	SPRN_HFSCR, r6
+    END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4:	cmpd	r0,r25
 	beq	2f
 	mtspr	SPRN_DSCR,r0
 2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 40e4a17..902ca3c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -341,10 +341,17 @@
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
 	b	vsx_unavailable_pSeries
 
+facility_unavailable_trampoline:
 	. = 0xf60
 	SET_SCRATCH0(r13)
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	tm_unavailable_pSeries
+	b	facility_unavailable_pSeries
+
+hv_facility_unavailable_trampoline:
+	. = 0xf80
+	SET_SCRATCH0(r13)
+	EXCEPTION_PROLOG_0(PACA_EXGEN)
+	b	facility_unavailable_hv
 
 #ifdef CONFIG_CBE_RAS
 	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
@@ -522,8 +529,10 @@
 	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
 	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
 	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
-	STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
 	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
+	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
 
 /*
  * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -793,14 +802,10 @@
 	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
 
 	. = 0x4e00
-	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	h_data_storage_relon_hv
+	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
 
 	. = 0x4e20
-	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	h_instr_storage_relon_hv
+	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
 
 	. = 0x4e40
 	SET_SCRATCH0(r13)
@@ -808,9 +813,7 @@
 	b	emulation_assist_relon_hv
 
 	. = 0x4e60
-	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	hmi_exception_relon_hv
+	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
 
 	. = 0x4e80
 	SET_SCRATCH0(r13)
@@ -835,11 +838,17 @@
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
 	b	vsx_unavailable_relon_pSeries
 
-tm_unavailable_relon_pSeries_1:
+facility_unavailable_relon_trampoline:
 	. = 0x4f60
 	SET_SCRATCH0(r13)
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	tm_unavailable_relon_pSeries
+	b	facility_unavailable_relon_pSeries
+
+hv_facility_unavailable_relon_trampoline:
+	. = 0x4f80
+	SET_SCRATCH0(r13)
+	EXCEPTION_PROLOG_0(PACA_EXGEN)
+	b	hv_facility_unavailable_relon_hv
 
 	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
 #ifdef CONFIG_PPC_DENORMALISATION
@@ -1165,36 +1174,22 @@
 	bl	.vsx_unavailable_exception
 	b	.ret_from_except
 
-	.align	7
-	.globl tm_unavailable_common
-tm_unavailable_common:
-	EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
-	bl	.save_nvgprs
-	DISABLE_INTS
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.tm_unavailable_exception
-	b	.ret_from_except
+	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
 
 	.align	7
 	.globl	__end_handlers
 __end_handlers:
 
 	/* Equivalents to the above handlers for relocation-on interrupt vectors */
-	STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
-	STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
 	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
-	STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
 	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
 
 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
-	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 /*
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a949bdf..f0b47d1 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -176,7 +176,7 @@
 		length_max = 512 ; /* 64 doublewords */
 		/* DAWR region can't cross 512 boundary */
 		if ((bp->attr.bp_addr >> 10) != 
-		    ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
+		    ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
 			return -EINVAL;
 	}
 	if (info->len >
@@ -250,6 +250,7 @@
 	 * we still need to single-step the instruction, but we don't
 	 * generate an event.
 	 */
+	info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
 	if (!((bp->attr.bp_addr <= dar) &&
 	      (dar - bp->attr.bp_addr < bp->attr.bp_len)))
 		info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c0d0dbd..93d8d96 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -658,7 +658,7 @@
 	/* number of bytes needed for the bitmap */
 	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 
-	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 	if (!page)
 		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 	tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f387..e2a0a16 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
 #include <asm/vdso_datapage.h>
 #include <asm/vio.h>
 #include <asm/mmu.h>
+#include <asm/machdep.h>
 
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
 #define MODULE_VERS "1.9"
 #define MODULE_NAME "lparcfg"
 
@@ -418,7 +424,8 @@
 {
 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 
-	if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+	if (firmware_has_feature(FW_FEATURE_LPAR) &&
+	    plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
 		seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
 }
 
@@ -677,7 +684,6 @@
 }
 
 static const struct file_operations lparcfg_fops = {
-	.owner		= THIS_MODULE,
 	.read		= seq_read,
 	.write		= lparcfg_write,
 	.open		= lparcfg_open,
@@ -699,14 +705,4 @@
 	}
 	return 0;
 }
-
-static void __exit lparcfg_cleanup(void)
-{
-	remove_proc_subtree("powerpc/lparcfg", NULL);
-}
-
-module_init(lparcfg_init);
-module_exit(lparcfg_cleanup);
-MODULE_DESCRIPTION("Interface for LPAR configuration data");
-MODULE_AUTHOR("Dave Engebretsen");
-MODULE_LICENSE("GPL");
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 076d124..7baa27b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@
 	struct ppc64_tlb_batch *batch;
 #endif
 
+	/* Back up the TAR across context switches.
+	 * Note that the TAR is not available for use in the kernel.  (To
+	 * provide this, the TAR should be backed up/restored on exception
+	 * entry/exit instead, and be in pt_regs.  FIXME, this should be in
+	 * pt_regs anyway (for debug).)
+	 * Save the TAR here before we do treclaim/trecheckpoint as these
+	 * will change the TAR.
+	 */
+	save_tar(&prev->thread);
+
 	__switch_to_tm(prev);
 
 #ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 98c2fc1..64f7bd5 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1449,7 +1449,9 @@
 	 */
 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
 		len = bp_info->addr2 - bp_info->addr;
-	} else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+	} else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+		len = 1;
+	else {
 		ptrace_put_breakpoints(child);
 		return -EINVAL;
 	}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e379d3f..389fb807 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -76,7 +76,7 @@
 #endif
 
 int boot_cpuid = 0;
-int __initdata spinning_secondaries;
+int spinning_secondaries;
 u64 ppc64_pft_size;
 
 /* Pick defaults since we might want to patch instructions
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 201385c..0f83122 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -407,7 +407,8 @@
  * altivec/spe instructions at some point.
  */
 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
-		int sigret, int ctx_has_vsx_region)
+			  struct mcontext __user *tm_frame, int sigret,
+			  int ctx_has_vsx_region)
 {
 	unsigned long msr = regs->msr;
 
@@ -475,6 +476,12 @@
 
 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
 		return 1;
+	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
+	 * can check it on the restore to see if TM is active
+	 */
+	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
+		return 1;
+
 	if (sigret) {
 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -747,7 +754,7 @@
 				 struct mcontext __user *tm_sr)
 {
 	long err;
-	unsigned long msr;
+	unsigned long msr, msr_hi;
 #ifdef CONFIG_VSX
 	int i;
 #endif
@@ -852,8 +859,11 @@
 	tm_enable();
 	/* This loads the checkpointed FP/VEC state, if used */
 	tm_recheckpoint(&current->thread, msr);
-	/* The task has moved into TM state S, so ensure MSR reflects this */
-	regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
+	/* Get the top half of the MSR */
+	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+		return 1;
+	/* Pull in MSR TM from user context */
+	regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
 
 	/* This loads the speculative FP/VEC state, if used */
 	if (msr & MSR_FP) {
@@ -952,6 +962,7 @@
 {
 	struct rt_sigframe __user *rt_sf;
 	struct mcontext __user *frame;
+	struct mcontext __user *tm_frame = NULL;
 	void __user *addr;
 	unsigned long newsp = 0;
 	int sigret;
@@ -985,23 +996,24 @@
 	}
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	tm_frame = &rt_sf->uc_transact.uc_mcontext;
 	if (MSR_TM_ACTIVE(regs->msr)) {
-		if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
-				      &rt_sf->uc_transact.uc_mcontext, sigret))
+		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
 			goto badframe;
 	}
 	else
 #endif
-		if (save_user_regs(regs, frame, sigret, 1))
+	{
+		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
 			goto badframe;
+	}
 	regs->link = tramp;
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	if (MSR_TM_ACTIVE(regs->msr)) {
 		if (__put_user((unsigned long)&rt_sf->uc_transact,
 			       &rt_sf->uc.uc_link)
-		    || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
-				  &rt_sf->uc_transact.uc_regs))
+		    || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
 			goto badframe;
 	}
 	else
@@ -1170,7 +1182,7 @@
 		mctx = (struct mcontext __user *)
 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
 		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
-		    || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
+		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
 			return -EFAULT;
@@ -1233,7 +1245,7 @@
 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
 			goto bad;
 
-		if (MSR_TM_SUSPENDED(msr_hi<<32)) {
+		if (MSR_TM_ACTIVE(msr_hi<<32)) {
 			/* We only recheckpoint on return if we're
 			 * transaction.
 			 */
@@ -1392,6 +1404,7 @@
 {
 	struct sigcontext __user *sc;
 	struct sigframe __user *frame;
+	struct mcontext __user *tm_mctx = NULL;
 	unsigned long newsp = 0;
 	int sigret;
 	unsigned long tramp;
@@ -1425,6 +1438,7 @@
 	}
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	tm_mctx = &frame->mctx_transact;
 	if (MSR_TM_ACTIVE(regs->msr)) {
 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
 				      sigret))
@@ -1432,8 +1446,10 @@
 	}
 	else
 #endif
-		if (save_user_regs(regs, &frame->mctx, sigret, 1))
+	{
+		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
 			goto badframe;
+	}
 
 	regs->link = tramp;
 
@@ -1481,16 +1497,22 @@
 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
 		       struct pt_regs *regs)
 {
+	struct sigframe __user *sf;
 	struct sigcontext __user *sc;
 	struct sigcontext sigctx;
 	struct mcontext __user *sr;
 	void __user *addr;
 	sigset_t set;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	struct mcontext __user *mcp, *tm_mcp;
+	unsigned long msr_hi;
+#endif
 
 	/* Always make any pending restarted system calls return -EINTR */
 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
+	sc = &sf->sctx;
 	addr = sc;
 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
 		goto badframe;
@@ -1507,11 +1529,25 @@
 #endif
 	set_current_blocked(&set);
 
-	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
-	addr = sr;
-	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
-	    || restore_user_regs(regs, sr, 1))
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	mcp = (struct mcontext __user *)&sf->mctx;
+	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
+	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
 		goto badframe;
+	if (MSR_TM_ACTIVE(msr_hi<<32)) {
+		if (!cpu_has_feature(CPU_FTR_TM))
+			goto badframe;
+		if (restore_tm_user_regs(regs, mcp, tm_mcp))
+			goto badframe;
+	} else
+#endif
+	{
+		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
+		addr = sr;
+		if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
+		    || restore_user_regs(regs, sr, 1))
+			goto badframe;
+	}
 
 	set_thread_flag(TIF_RESTOREALL);
 	return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 3459473..887e99d 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -410,6 +410,10 @@
 
 	/* get MSR separately, transfer the LE bit if doing signal return */
 	err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+	/* pull in MSR TM from user context */
+	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+	/* pull in MSR LE from user context */
 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
 	/* The following non-GPR non-FPR non-VR state is also checkpointed: */
@@ -505,8 +509,6 @@
 	tm_enable();
 	/* This loads the checkpointed FP/VEC state, if used */
 	tm_recheckpoint(&current->thread, msr);
-	/* The task has moved into TM state S, so ensure MSR reflects this: */
-	regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
 
 	/* This loads the speculative FP/VEC state, if used */
 	if (msr & MSR_FP) {
@@ -654,7 +656,7 @@
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
 		goto badframe;
-	if (MSR_TM_SUSPENDED(msr)) {
+	if (MSR_TM_ACTIVE(msr)) {
 		/* We recheckpoint on return. */
 		struct ucontext __user *uc_transact;
 		if (__get_user(uc_transact, &uc->uc_link))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e68a845..a15fd1a 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
 #include <asm/machdep.h>
 #include <asm/smp.h>
 #include <asm/pmc.h>
+#include <asm/firmware.h>
 
 #include "cacheinfo.h"
 
@@ -179,15 +180,25 @@
 SYSFS_PMCSETUP(dscr, SPRN_DSCR);
 SYSFS_PMCSETUP(pir, SPRN_PIR);
 
+/*
+  Lets only enable read for phyp resources and
+  enable write when needed with a separate function.
+  Lets be conservative and default to pseries.
+*/
 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
 static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
 static DEVICE_ATTR(pir, 0400, show_pir, NULL);
 
 unsigned long dscr_default = 0;
 EXPORT_SYMBOL(dscr_default);
 
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+	attr->attr.mode |= 0200;
+}
+
 static ssize_t show_dscr_default(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -394,8 +405,11 @@
 	if (cpu_has_feature(CPU_FTR_MMCRA))
 		device_create_file(s, &dev_attr_mmcra);
 
-	if (cpu_has_feature(CPU_FTR_PURR))
+	if (cpu_has_feature(CPU_FTR_PURR)) {
+		if (!firmware_has_feature(FW_FEATURE_LPAR))
+			add_write_permission_dev_attr(&dev_attr_purr);
 		device_create_file(s, &dev_attr_purr);
+	}
 
 	if (cpu_has_feature(CPU_FTR_SPURR))
 		device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2da67e7..f2abb21 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@
 	TABORT(R3)
 	blr
 
+	.section	".toc","aw"
+DSCR_DEFAULT:
+	.tc dscr_default[TC],dscr_default
+
+	.section	".text"
 
 /* void tm_reclaim(struct thread_struct *thread,
  *                 unsigned long orig_msr,
@@ -178,11 +183,18 @@
 	std	r1, PACATMSCRATCH(r13)
 	ld	r1, PACAR1(r13)
 
+	/* Store the PPR in r11 and reset to decent value */
+	std	r11, GPR11(r1)			/* Temporary stash */
+	mfspr	r11, SPRN_PPR
+	HMT_MEDIUM
+
 	/* Now get some more GPRS free */
 	std	r7, GPR7(r1)			/* Temporary stash */
 	std	r12, GPR12(r1)			/* ''   ''    ''   */
 	ld	r12, STACK_PARAM(0)(r1)		/* Param 0, thread_struct * */
 
+	std	r11, THREAD_TM_PPR(r12)		/* Store PPR and free r11 */
+
 	addi	r7, r12, PT_CKPT_REGS		/* Thread's ckpt_regs */
 
 	/* Make r7 look like an exception frame so that we
@@ -194,15 +206,19 @@
 	SAVE_GPR(0, r7)				/* user r0 */
 	SAVE_GPR(2, r7)			/* user r2 */
 	SAVE_4GPRS(3, r7)			/* user r3-r6 */
-	SAVE_4GPRS(8, r7)			/* user r8-r11 */
+	SAVE_GPR(8, r7)				/* user r8 */
+	SAVE_GPR(9, r7)				/* user r9 */
+	SAVE_GPR(10, r7)			/* user r10 */
 	ld	r3, PACATMSCRATCH(r13)		/* user r1 */
 	ld	r4, GPR7(r1)			/* user r7 */
-	ld	r5, GPR12(r1)			/* user r12 */
-	GET_SCRATCH0(6)				/* user r13 */
+	ld	r5, GPR11(r1)			/* user r11 */
+	ld	r6, GPR12(r1)			/* user r12 */
+	GET_SCRATCH0(8)				/* user r13 */
 	std	r3, GPR1(r7)
 	std	r4, GPR7(r7)
-	std	r5, GPR12(r7)
-	std	r6, GPR13(r7)
+	std	r5, GPR11(r7)
+	std	r6, GPR12(r7)
+	std	r8, GPR13(r7)
 
 	SAVE_NVGPRS(r7)				/* user r14-r31 */
 
@@ -224,6 +240,14 @@
 	std	r5, _CCR(r7)
 	std	r6, _XER(r7)
 
+
+	/* ******************** TAR, DSCR ********** */
+	mfspr	r3, SPRN_TAR
+	mfspr	r4, SPRN_DSCR
+
+	std	r3, THREAD_TM_TAR(r12)
+	std	r4, THREAD_TM_DSCR(r12)
+
 	/* MSR and flags:  We don't change CRs, and we don't need to alter
 	 * MSR.
 	 */
@@ -239,7 +263,7 @@
 	std	r3, THREAD_TM_TFHAR(r12)
 	std	r4, THREAD_TM_TFIAR(r12)
 
-	/* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+	/* AMR is checkpointed too, but is unsupported by Linux. */
 
 	/* Restore original MSR/IRQ state & clear TM mode */
 	ld	r14, TM_FRAME_L0(r1)		/* Orig MSR */
@@ -255,6 +279,12 @@
 	mtcr	r4
 	mtlr	r0
 	ld	r2, 40(r1)
+
+	/* Load system default DSCR */
+	ld	r4, DSCR_DEFAULT@toc(r2)
+	ld	r0, 0(r4)
+	mtspr	SPRN_DSCR, r0
+
 	blr
 
 
@@ -338,35 +368,51 @@
 	mtmsr	r6				/* FP/Vec off again! */
 
 restore_gprs:
-	/* ******************** CR,LR,CCR,MSR ********** */
-	ld	r3, _CTR(r7)
-	ld	r4, _LINK(r7)
-	ld	r5, _CCR(r7)
-	ld	r6, _XER(r7)
 
-	mtctr	r3
-	mtlr	r4
-	mtcr	r5
-	mtxer	r6
+	/* ******************** CR,LR,CCR,MSR ********** */
+	ld	r4, _CTR(r7)
+	ld	r5, _LINK(r7)
+	ld	r6, _CCR(r7)
+	ld	r8, _XER(r7)
+
+	mtctr	r4
+	mtlr	r5
+	mtcr	r6
+	mtxer	r8
+
+	/* ******************** TAR ******************** */
+	ld	r4, THREAD_TM_TAR(r3)
+	mtspr	SPRN_TAR,	r4
+
+	/* Load up the PPR and DSCR in GPRs only at this stage */
+	ld	r5, THREAD_TM_DSCR(r3)
+	ld	r6, THREAD_TM_PPR(r3)
 
 	/* MSR and flags:  We don't change CRs, and we don't need to alter
 	 * MSR.
 	 */
 
 	REST_4GPRS(0, r7)			/* GPR0-3 */
-	REST_GPR(4, r7)				/* GPR4-6 */
-	REST_GPR(5, r7)
-	REST_GPR(6, r7)
+	REST_GPR(4, r7)				/* GPR4 */
 	REST_4GPRS(8, r7)			/* GPR8-11 */
 	REST_2GPRS(12, r7)			/* GPR12-13 */
 
 	REST_NVGPRS(r7)				/* GPR14-31 */
 
-	ld	r7, GPR7(r7)			/* GPR7 */
+	/* Load up PPR and DSCR here so we don't run with user values for long
+	 */
+	mtspr	SPRN_DSCR, r5
+	mtspr	SPRN_PPR, r6
+
+	REST_GPR(5, r7)				/* GPR5-7 */
+	REST_GPR(6, r7)
+	ld	r7, GPR7(r7)
 
 	/* Commit register state as checkpointed state: */
 	TRECHKPT
 
+	HMT_MEDIUM
+
 	/* Our transactional state has now changed.
 	 *
 	 * Now just get out of here.  Transactional (current) state will be
@@ -385,6 +431,12 @@
 	mtcr	r4
 	mtlr	r0
 	ld	r2, 40(r1)
+
+	/* Load system default DSCR */
+	ld	r4, DSCR_DEFAULT@toc(r2)
+	ld	r0, 0(r4)
+	mtspr	SPRN_DSCR, r0
+
 	blr
 
 	/* ****************************************************************** */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c0e5caf..88929b1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
 #include <asm/machdep.h>
 #include <asm/rtas.h>
 #include <asm/pmc.h>
-#ifdef CONFIG_PPC32
 #include <asm/reg.h>
-#endif
 #ifdef CONFIG_PMAC_BACKLIGHT
 #include <asm/backlight.h>
 #endif
@@ -1282,26 +1280,63 @@
 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
 }
 
-void tm_unavailable_exception(struct pt_regs *regs)
+#ifdef CONFIG_PPC64
+void facility_unavailable_exception(struct pt_regs *regs)
 {
+	static char *facility_strings[] = {
+		[FSCR_FP_LG] = "FPU",
+		[FSCR_VECVSX_LG] = "VMX/VSX",
+		[FSCR_DSCR_LG] = "DSCR",
+		[FSCR_PM_LG] = "PMU SPRs",
+		[FSCR_BHRB_LG] = "BHRB",
+		[FSCR_TM_LG] = "TM",
+		[FSCR_EBB_LG] = "EBB",
+		[FSCR_TAR_LG] = "TAR",
+	};
+	char *facility = "unknown";
+	u64 value;
+	u8 status;
+	bool hv;
+
+	hv = (regs->trap == 0xf80);
+	if (hv)
+		value = mfspr(SPRN_HFSCR);
+	else
+		value = mfspr(SPRN_FSCR);
+
+	status = value >> 56;
+	if (status == FSCR_DSCR_LG) {
+		/* User is acessing the DSCR.  Set the inherit bit and allow
+		 * the user to set it directly in future by setting via the
+		 * H/FSCR DSCR bit.
+		 */
+		current->thread.dscr_inherit = 1;
+		if (hv)
+			mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+		else
+			mtspr(SPRN_FSCR,  value | FSCR_DSCR);
+		return;
+	}
+
+	if ((status < ARRAY_SIZE(facility_strings)) &&
+	    facility_strings[status])
+		facility = facility_strings[status];
+
 	/* We restore the interrupt state now */
 	if (!arch_irq_disabled_regs(regs))
 		local_irq_enable();
 
-	/* Currently we never expect a TMU exception.  Catch
-	 * this and kill the process!
-	 */
-	printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
-	       "(msr %lx)\n",
-	       regs->nip, regs->msr);
+	pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
+	       hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
 
 	if (user_mode(regs)) {
 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
 		return;
 	}
 
-	die("Unexpected TM unavailable exception", regs, SIGABRT);
+	die("Unexpected facility unavailable exception", regs, SIGABRT);
 }
+#endif
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 536016d..2d845d8 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1529,11 +1529,15 @@
 	const char *cp;
 
 	dn = dev->of_node;
-	if (!dn)
-		return -ENODEV;
+	if (!dn) {
+		strcat(buf, "\n");
+		return strlen(buf);
+	}
 	cp = of_get_property(dn, "compatible", NULL);
-	if (!cp)
-		return -ENODEV;
+	if (!cp) {
+		strcat(buf, "\n");
+		return strlen(buf);
+	}
 
 	return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
 }
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479..f096e72 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@
 #endif
 SECTIONS
 {
-	. = 0;
-	reloc_start = .;
-
 	. = KERNELBASE;
 
 /*
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b02f91e..7bcd4d6 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1054,7 +1054,7 @@
 BEGIN_FTR_SECTION
 	mfspr	r8, SPRN_DSCR
 	ld	r7, HSTATE_DSCR(r13)
-	std	r8, VCPU_DSCR(r7)
+	std	r8, VCPU_DSCR(r9)
 	mtspr	SPRN_DSCR, r7
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd4..a3a5cb8 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
 #include <asm/hvcall.h>
 #include <asm/xics.h>
 #include <asm/debug.h>
+#include <asm/time.h>
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f725..57a0720 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@
 	blr
 
 
-	.macro source
+	.macro srcnr
 100:
 	.section __ex_table,"a"
 	.align 3
-	.llong 100b,.Lsrc_error
+	.llong 100b,.Lsrc_error_nr
+	.previous
+	.endm
+
+	.macro source
+150:
+	.section __ex_table,"a"
+	.align 3
+	.llong 150b,.Lsrc_error
+	.previous
+	.endm
+
+	.macro dstnr
+200:
+	.section __ex_table,"a"
+	.align 3
+	.llong 200b,.Ldest_error_nr
 	.previous
 	.endm
 
 	.macro dest
-200:
+250:
 	.section __ex_table,"a"
 	.align 3
-	.llong 200b,.Ldest_error
+	.llong 250b,.Ldest_error
 	.previous
 	.endm
 
@@ -269,16 +285,16 @@
 	rldicl. r6,r3,64-1,64-2		/* r6 = (r3 & 0x3) >> 1 */
 	beq	.Lcopy_aligned
 
-	li	r7,4
-	sub	r6,r7,r6
+	li	r9,4
+	sub	r6,r9,r6
 	mtctr	r6
 
 1:
-source;	lhz	r6,0(r3)		/* align to doubleword */
+srcnr;	lhz	r6,0(r3)		/* align to doubleword */
 	subi	r5,r5,2
 	addi	r3,r3,2
 	adde	r0,r0,r6
-dest;	sth	r6,0(r4)
+dstnr;	sth	r6,0(r4)
 	addi	r4,r4,2
 	bdnz	1b
 
@@ -392,10 +408,10 @@
 
 	mtctr	r6
 3:
-source;	ld	r6,0(r3)
+srcnr;	ld	r6,0(r3)
 	addi	r3,r3,8
 	adde	r0,r0,r6
-dest;	std	r6,0(r4)
+dstnr;	std	r6,0(r4)
 	addi	r4,r4,8
 	bdnz	3b
 
@@ -405,10 +421,10 @@
 	srdi.	r6,r5,2
 	beq	.Lcopy_tail_halfword
 
-source;	lwz	r6,0(r3)
+srcnr;	lwz	r6,0(r3)
 	addi	r3,r3,4
 	adde	r0,r0,r6
-dest;	stw	r6,0(r4)
+dstnr;	stw	r6,0(r4)
 	addi	r4,r4,4
 	subi	r5,r5,4
 
@@ -416,10 +432,10 @@
 	srdi.	r6,r5,1
 	beq	.Lcopy_tail_byte
 
-source;	lhz	r6,0(r3)
+srcnr;	lhz	r6,0(r3)
 	addi	r3,r3,2
 	adde	r0,r0,r6
-dest;	sth	r6,0(r4)
+dstnr;	sth	r6,0(r4)
 	addi	r4,r4,2
 	subi	r5,r5,2
 
@@ -427,10 +443,10 @@
 	andi.	r6,r5,1
 	beq	.Lcopy_finish
 
-source;	lbz	r6,0(r3)
+srcnr;	lbz	r6,0(r3)
 	sldi	r9,r6,8			/* Pad the byte out to 16 bits */
 	adde	r0,r0,r9
-dest;	stb	r6,0(r4)
+dstnr;	stb	r6,0(r4)
 
 .Lcopy_finish:
 	addze	r0,r0			/* add in final carry */
@@ -440,6 +456,11 @@
 	blr
 
 .Lsrc_error:
+	ld	r14,STK_REG(R14)(r1)
+	ld	r15,STK_REG(R15)(r1)
+	ld	r16,STK_REG(R16)(r1)
+	addi	r1,r1,STACKFRAMESIZE
+.Lsrc_error_nr:
 	cmpdi	0,r7,0
 	beqlr
 	li	r6,-EFAULT
@@ -447,6 +468,11 @@
 	blr
 
 .Ldest_error:
+	ld	r14,STK_REG(R14)(r1)
+	ld	r15,STK_REG(R15)(r1)
+	ld	r16,STK_REG(R16)(r1)
+	addi	r1,r1,STACKFRAMESIZE
+.Ldest_error_nr:
 	cmpdi	0,r8,0
 	beqlr
 	li	r6,-EFAULT
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 88c0425..cafad40 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,6 +27,7 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
+#include <asm/cputhreads.h>
 #include <asm/sparsemem.h>
 #include <asm/prom.h>
 #include <asm/smp.h>
@@ -1319,7 +1320,8 @@
 			}
 		}
 		if (changed) {
-			cpumask_set_cpu(cpu, changes);
+			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+			cpu = cpu_last_thread_sibling(cpu);
 		}
 	}
 
@@ -1427,17 +1429,15 @@
 	if (!data)
 		return -EINVAL;
 
-	cpu = get_cpu();
+	cpu = smp_processor_id();
 
 	for (update = data; update; update = update->next) {
 		if (cpu != update->cpu)
 			continue;
 
-		unregister_cpu_under_node(update->cpu, update->old_nid);
 		unmap_cpu_from_node(update->cpu);
 		map_cpu_to_node(update->cpu, update->new_nid);
 		vdso_getcpu_init();
-		register_cpu_under_node(update->cpu, update->new_nid);
 	}
 
 	return 0;
@@ -1449,12 +1449,12 @@
  */
 int arch_update_cpu_topology(void)
 {
-	unsigned int cpu, changed = 0;
+	unsigned int cpu, sibling, changed = 0;
 	struct topology_update_data *updates, *ud;
 	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
 	cpumask_t updated_cpus;
 	struct device *dev;
-	int weight, i = 0;
+	int weight, new_nid, i = 0;
 
 	weight = cpumask_weight(&cpu_associativity_changes_mask);
 	if (!weight)
@@ -1467,24 +1467,54 @@
 	cpumask_clear(&updated_cpus);
 
 	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
-		ud = &updates[i++];
-		ud->cpu = cpu;
+		/*
+		 * If siblings aren't flagged for changes, updates list
+		 * will be too short. Skip on this update and set for next
+		 * update.
+		 */
+		if (!cpumask_subset(cpu_sibling_mask(cpu),
+					&cpu_associativity_changes_mask)) {
+			pr_info("Sibling bits not set for associativity "
+					"change, cpu%d\n", cpu);
+			cpumask_or(&cpu_associativity_changes_mask,
+					&cpu_associativity_changes_mask,
+					cpu_sibling_mask(cpu));
+			cpu = cpu_last_thread_sibling(cpu);
+			continue;
+		}
+
+		/* Use associativity from first thread for all siblings */
 		vphn_get_associativity(cpu, associativity);
-		ud->new_nid = associativity_to_nid(associativity);
+		new_nid = associativity_to_nid(associativity);
+		if (new_nid < 0 || !node_online(new_nid))
+			new_nid = first_online_node;
 
-		if (ud->new_nid < 0 || !node_online(ud->new_nid))
-			ud->new_nid = first_online_node;
+		if (new_nid == numa_cpu_lookup_table[cpu]) {
+			cpumask_andnot(&cpu_associativity_changes_mask,
+					&cpu_associativity_changes_mask,
+					cpu_sibling_mask(cpu));
+			cpu = cpu_last_thread_sibling(cpu);
+			continue;
+		}
 
-		ud->old_nid = numa_cpu_lookup_table[cpu];
-		cpumask_set_cpu(cpu, &updated_cpus);
-
-		if (i < weight)
-			ud->next = &updates[i];
+		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+			ud = &updates[i++];
+			ud->cpu = sibling;
+			ud->new_nid = new_nid;
+			ud->old_nid = numa_cpu_lookup_table[sibling];
+			cpumask_set_cpu(sibling, &updated_cpus);
+			if (i < weight)
+				ud->next = &updates[i];
+		}
+		cpu = cpu_last_thread_sibling(cpu);
 	}
 
 	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
 
 	for (ud = &updates[0]; ud; ud = ud->next) {
+		unregister_cpu_under_node(ud->cpu, ud->old_nid);
+		register_cpu_under_node(ud->cpu, ud->new_nid);
+
 		dev = get_cpu_device(ud->cpu);
 		if (dev)
 			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29c6482..d3ee2e5 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -75,6 +75,8 @@
 
 #define MMCR0_FCHV		0
 #define MMCR0_PMCjCE		MMCR0_PMCnCE
+#define MMCR0_FC56		0
+#define MMCR0_PMAO		0
 
 #define SPRN_MMCRA		SPRN_MMCR2
 #define MMCRA_SAMPLE_ENABLE	0
@@ -852,7 +854,7 @@
 static void power_pmu_disable(struct pmu *pmu)
 {
 	struct cpu_hw_events *cpuhw;
-	unsigned long flags;
+	unsigned long flags, val;
 
 	if (!ppmu)
 		return;
@@ -860,9 +862,6 @@
 	cpuhw = &__get_cpu_var(cpu_hw_events);
 
 	if (!cpuhw->disabled) {
-		cpuhw->disabled = 1;
-		cpuhw->n_added = 0;
-
 		/*
 		 * Check if we ever enabled the PMU on this cpu.
 		 */
@@ -872,6 +871,21 @@
 		}
 
 		/*
+		 * Set the 'freeze counters' bit, clear PMAO/FC56.
+		 */
+		val  = mfspr(SPRN_MMCR0);
+		val |= MMCR0_FC;
+		val &= ~(MMCR0_PMAO | MMCR0_FC56);
+
+		/*
+		 * The barrier is to make sure the mtspr has been
+		 * executed and the PMU has frozen the events etc.
+		 * before we return.
+		 */
+		write_mmcr0(cpuhw, val);
+		mb();
+
+		/*
 		 * Disable instruction sampling if it was enabled
 		 */
 		if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
@@ -880,14 +894,8 @@
 			mb();
 		}
 
-		/*
-		 * Set the 'freeze counters' bit.
-		 * The barrier is to make sure the mtspr has been
-		 * executed and the PMU has frozen the events
-		 * before we return.
-		 */
-		write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
-		mb();
+		cpuhw->disabled = 1;
+		cpuhw->n_added = 0;
 	}
 	local_irq_restore(flags);
 }
@@ -911,12 +919,18 @@
 
 	if (!ppmu)
 		return;
+
 	local_irq_save(flags);
+
 	cpuhw = &__get_cpu_var(cpu_hw_events);
-	if (!cpuhw->disabled) {
-		local_irq_restore(flags);
-		return;
+	if (!cpuhw->disabled)
+		goto out;
+
+	if (cpuhw->n_events == 0) {
+		ppc_set_pmu_inuse(0);
+		goto out;
 	}
+
 	cpuhw->disabled = 0;
 
 	/*
@@ -928,8 +942,6 @@
 	if (!cpuhw->n_added) {
 		mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 		mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
-		if (cpuhw->n_events == 0)
-			ppc_set_pmu_inuse(0);
 		goto out_enable;
 	}
 
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index f7d1c4f..9aefaeb 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -109,6 +109,16 @@
 #define EVENT_IS_MARKED		(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
 #define EVENT_PSEL_MASK		0xff	/* PMCxSEL value */
 
+#define EVENT_VALID_MASK	\
+	((EVENT_THRESH_MASK    << EVENT_THRESH_SHIFT)		|	\
+	 (EVENT_SAMPLE_MASK    << EVENT_SAMPLE_SHIFT)		|	\
+	 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT)	|	\
+	 (EVENT_PMC_MASK       << EVENT_PMC_SHIFT)		|	\
+	 (EVENT_UNIT_MASK      << EVENT_UNIT_SHIFT)		|	\
+	 (EVENT_COMBINE_MASK   << EVENT_COMBINE_SHIFT)		|	\
+	 (EVENT_MARKED_MASK    << EVENT_MARKED_SHIFT)		|	\
+	  EVENT_PSEL_MASK)
+
 /* MMCRA IFM bits - POWER8 */
 #define	POWER8_MMCRA_IFM1		0x0000000040000000UL
 #define	POWER8_MMCRA_IFM2		0x0000000080000000UL
@@ -184,6 +194,7 @@
 #define MMCR1_UNIT_SHIFT(pmc)		(60 - (4 * ((pmc) - 1)))
 #define MMCR1_COMBINE_SHIFT(pmc)	(35 - ((pmc) - 1))
 #define MMCR1_PMCSEL_SHIFT(pmc)		(24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT			36
 #define MMCR1_DC_QUAL_SHIFT		47
 #define MMCR1_IC_QUAL_SHIFT		46
 
@@ -212,6 +223,9 @@
 
 	mask = value = 0;
 
+	if (event & ~EVENT_VALID_MASK)
+		return -1;
+
 	pmc   = (event >> EVENT_PMC_SHIFT)       & EVENT_PMC_MASK;
 	unit  = (event >> EVENT_UNIT_SHIFT)      & EVENT_UNIT_MASK;
 	cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
@@ -354,8 +368,8 @@
 		 * the threshold bits are used for the match value.
 		 */
 		if (event_is_fab_match(event[i])) {
-			mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
-				  EVENT_THR_CTL_MASK;
+			mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+				  EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
 		} else {
 			val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
 			mmcra |= val << MMCRA_THR_CTL_SHIFT;
@@ -378,6 +392,10 @@
 	if (pmc_inuse & 0x7c)
 		mmcr[0] |= MMCR0_PMCjCE;
 
+	/* If we're not using PMC 5 or 6, freeze them */
+	if (!(pmc_inuse & 0x60))
+		mmcr[0] |= MMCR0_FC56;
+
 	mmcr[1] = mmcr1;
 	mmcr[2] = mmcra;
 
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 9c9d15e..7816bef 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -441,6 +441,17 @@
 	set_iommu_table_base(&pdev->dev, &pe->tce32_table);
 }
 
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		set_iommu_table_base(&dev->dev, &pe->tce32_table);
+		if (dev->subordinate)
+			pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+	}
+}
+
 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
 					 u64 *startp, u64 *endp)
 {
@@ -596,6 +607,11 @@
 	}
 	iommu_init_table(tbl, phb->hose->node);
 
+	if (pe->pdev)
+		set_iommu_table_base(&pe->pdev->dev, tbl);
+	else
+		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
 	return;
  fail:
 	/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -667,6 +683,11 @@
 	}
 	iommu_init_table(tbl, phb->hose->node);
 
+	if (pe->pdev)
+		set_iommu_table_base(&pe->pdev->dev, tbl);
+	else
+		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
 	return;
 fail:
 	if (pe->tce32_seg >= 0)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c11c823..54b998f 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -354,7 +354,7 @@
 }
 early_initcall(alloc_dispatch_log_kmem_cache);
 
-static void pSeries_idle(void)
+static void pseries_lpar_idle(void)
 {
 	/* This would call on the cpuidle framework, and the back-end pseries
 	 * driver to  go to idle states
@@ -362,10 +362,22 @@
 	if (cpuidle_idle_call()) {
 		/* On error, execute default handler
 		 * to go into low thread priority and possibly
-		 * low power mode.
+		 * low power mode by cedeing processor to hypervisor
 		 */
-		HMT_low();
-		HMT_very_low();
+
+		/* Indicate to hypervisor that we are idle. */
+		get_lppaca()->idle = 1;
+
+		/*
+		 * Yield the processor to the hypervisor.  We return if
+		 * an external interrupt occurs (which are driven prior
+		 * to returning here) or if a prod occurs from another
+		 * processor. When returning here, external interrupts
+		 * are enabled.
+		 */
+		cede_processor();
+
+		get_lppaca()->idle = 0;
 	}
 }
 
@@ -456,15 +468,14 @@
 
 	pSeries_nvram_init();
 
-	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+	if (firmware_has_feature(FW_FEATURE_LPAR)) {
 		vpa_init(boot_cpuid);
-		ppc_md.power_save = pSeries_idle;
-	}
-
-	if (firmware_has_feature(FW_FEATURE_LPAR))
+		ppc_md.power_save = pseries_lpar_idle;
 		ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
-	else
+	} else {
+		/* No special idle routine */
 		ppc_md.enable_pmcs = power4_enable_pmcs;
+	}
 
 	ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
 
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index da183c5..97dcbea 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -227,11 +227,12 @@
 	  not work on older machines.
 
 config MARCH_ZEC12
-	bool "IBM zEC12"
+	bool "IBM zBC12 and zEC12"
 	select HAVE_MARCH_ZEC12_FEATURES if 64BIT
 	help
-	  Select this to enable optimizations for IBM zEC12 (2827 series). The
-	  kernel will be slightly faster but will not work on older machines.
+	  Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
+	  2827 series). The kernel will be slightly faster but will not work on
+	  older machines.
 
 endchoice
 
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e..7d46767 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -693,7 +693,7 @@
 	size -= offset;
 	p = addr + offset / BITS_PER_LONG;
 	if (bit) {
-		set = __flo_word(0, *p & (~0UL << bit));
+		set = __flo_word(0, *p & (~0UL >> bit));
 		if (set >= size)
 			return size + offset;
 		if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190..346b1c8 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-	asm goto("0:	brcl 0,0\n"
+	asm_volatile_goto("0:	brcl 0,0\n"
 		".pushsection __jump_table, \"aw\"\n"
 		ASM_ALIGN "\n"
 		ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d6..6d6d92b 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@
 	struct mm_struct *mm;
 	struct mmu_table_batch *batch;
 	unsigned int fullmm;
+	unsigned long start, end;
 };
 
 struct mmu_table_batch {
@@ -48,10 +49,13 @@
 
 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
 				  struct mm_struct *mm,
-				  unsigned int full_mm_flush)
+				  unsigned long start,
+				  unsigned long end)
 {
 	tlb->mm = mm;
-	tlb->fullmm = full_mm_flush;
+	tlb->start = start;
+	tlb->end = end;
+	tlb->fullmm = !(start | (end+1));
 	tlb->batch = NULL;
 	if (tlb->fullmm)
 		__tlb_flush_mm(mm);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4d5e6f8..32bb7bf 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -265,6 +265,7 @@
 	tm	__TI_flags+3(%r12),_TIF_SYSCALL
 	jno	sysc_return
 	lm	%r2,%r7,__PT_R2(%r11)	# load svc arguments
+	l	%r10,__TI_sysc_table(%r12)	# 31 bit system call table
 	xr	%r8,%r8			# svc 0 returns -ENOSYS
 	clc	__PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
 	jnl	sysc_nr_ok		# invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4c17eec..2e3befd 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -293,6 +293,7 @@
 	tm	__TI_flags+7(%r12),_TIF_SYSCALL
 	jno	sysc_return
 	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
+	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
 	lghi	%r8,0			# svc 0 returns -ENOSYS
 	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
 	cghi	%r1,NR_syscalls
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0a49095..8ad9413 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -998,6 +998,7 @@
 		strcpy(elf_platform, "z196");
 		break;
 	case 0x2827:
+	case 0x2828:
 		strcpy(elf_platform, "zEC12");
 		break;
 	}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c1c7c68..698fb82 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -622,14 +622,25 @@
 		kvm_s390_deliver_pending_interrupts(vcpu);
 
 	vcpu->arch.sie_block->icptcode = 0;
-	preempt_disable();
-	kvm_guest_enter();
-	preempt_enable();
 	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
 		   atomic_read(&vcpu->arch.sie_block->cpuflags));
 	trace_kvm_s390_sie_enter(vcpu,
 				 atomic_read(&vcpu->arch.sie_block->cpuflags));
+
+	/*
+	 * As PF_VCPU will be used in fault handler, between guest_enter
+	 * and guest_exit should be no uaccess.
+	 */
+	preempt_disable();
+	kvm_guest_enter();
+	preempt_enable();
 	rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+	kvm_guest_exit();
+
+	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+		   vcpu->arch.sie_block->icptcode);
+	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
+
 	if (rc) {
 		if (kvm_is_ucontrol(vcpu->kvm)) {
 			rc = SIE_INTERCEPT_UCONTROL;
@@ -639,10 +650,6 @@
 			rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 		}
 	}
-	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
-		   vcpu->arch.sie_block->icptcode);
-	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
-	kvm_guest_exit();
 
 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
 	return rc;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 89ebae4..eba15f1 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@
 		order = 2;
 		break;
 	case 0x2827:	/* zEC12 */
+	case 0x2828:	/* zEC12 */
 	default:
 		order = 5;
 		break;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17c..930783d 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,7 +440,7 @@
 		switch (id.machine) {
 		case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
 		case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
-		case 0x2827:              ops->cpu_type = "s390/zEC12"; break;
+		case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
 		default: return -ENODEV;
 		}
 	}
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d..362192e 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
 	tlb->mm = mm;
-	tlb->fullmm = full_mm_flush;
+	tlb->start = start;
+	tlb->end = end;
+	tlb->fullmm = !(start | (end+1));
 
 	init_tlb_gather(tlb);
 }
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16..ec2e2e2 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-		asm goto("1:\n\t"
+		asm_volatile_goto("1:\n\t"
 			 "nop\n\t"
 			 "nop\n\t"
 			 ".pushsection __jump_table,  \"aw\"\n\t"
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
index 961b87f..f76389a 100644
--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -49,6 +49,8 @@
 	DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
 	BLANK();
 	DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
+	BLANK();
+	DEFINE(VMA_VM_MM,    offsetof(struct vm_area_struct, vm_mm));
 
 	/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
 	return 0;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 5ef48da..252f876 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -842,9 +842,8 @@
 	if (boot_command && strlen(boot_command)) {
 		unsigned long len;
 
-		strcpy(full_boot_str, "boot ");
-		strlcpy(full_boot_str + strlen("boot "), boot_command,
-			sizeof(full_boot_str + strlen("boot ")));
+		snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
+			 boot_command);
 		len = strlen(full_boot_str);
 
 		if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index e2a0300..33c02b1 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -839,7 +839,7 @@
 	 nop
 
 	call	syscall_trace
-	 nop
+	 mov	1, %o1
 
 1:
 	/* We don't want to muck with user registers like a
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 0746e5e..fde5a41 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -25,11 +25,10 @@
 	 */
 kvmap_itlb_4v:
 
-kvmap_itlb_nonlinear:
 	/* Catch kernel NULL pointer calls.  */
 	sethi		%hi(PAGE_SIZE), %g5
 	cmp		%g4, %g5
-	bleu,pn		%xcc, kvmap_dtlb_longpath
+	blu,pn		%xcc, kvmap_itlb_longpath
 	 nop
 
 	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 22a1098..73ec8a7 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -152,7 +152,7 @@
 	srl	%i4, 0, %o4
 	srl	%i1, 0, %o1
 	srl	%i2, 0, %o2
-	ba,pt	%xcc, 2f
+	ba,pt	%xcc, 5f
 	 srl	%i3, 0, %o3
 
 linux_syscall_trace:
@@ -182,13 +182,13 @@
 	srl	%i1, 0, %o1				! IEU0	Group
 	ldx	[%g6 + TI_FLAGS], %l0		! Load
 
-	srl	%i5, 0, %o5				! IEU1
+	srl	%i3, 0, %o3				! IEU0
 	srl	%i2, 0, %o2				! IEU0	Group
 	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
 	bne,pn	%icc, linux_syscall_trace32		! CTI
 	 mov	%i0, %l5				! IEU1
-	call	%l7					! CTI	Group brk forced
-	 srl	%i3, 0, %o3				! IEU0
+5:	call	%l7					! CTI	Group brk forced
+	 srl	%i5, 0, %o5				! IEU1
 	ba,a,pt	%xcc, 3f
 
 	/* Linux native system calls enter here... */
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 2e973a2..3a43edb 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -131,7 +131,6 @@
 	clr		%l5
 	sethi		%hi(num_kernel_image_mappings), %l6
 	lduw		[%l6 + %lo(num_kernel_image_mappings)], %l6
-	add		%l6, 1, %l6
 
 	mov		15, %l7
 	BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
@@ -224,7 +223,6 @@
 	clr		%l5
 	sethi		%hi(num_kernel_image_mappings), %l6
 	lduw		[%l6 + %lo(num_kernel_image_mappings)], %l6
-	add		%l6, 1, %l6
 
 1:
 	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 0c4e35e..323335b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -98,15 +98,6 @@
 EXPORT_SYMBOL(___copy_in_user);
 EXPORT_SYMBOL(__clear_user);
 
-/* RW semaphores */
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
-
 /* Atomic counter implementation. */
 EXPORT_SYMBOL(atomic_add);
 EXPORT_SYMBOL(atomic_add_ret);
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 44aad32..969f964 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -74,7 +74,7 @@
 
 	/* The things we do for performance... */
 hypersparc_flush_cache_range:
-	ld	[%o0 + 0x0], %o0		/* XXX vma->vm_mm, GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 #ifndef CONFIG_SMP
 	ld	[%o0 + AOFF_mm_context], %g1
 	cmp	%g1, -1
@@ -163,7 +163,7 @@
 	 */
 	/* Verified, my ass... */
 hypersparc_flush_cache_page:
-	ld	[%o0 + 0x0], %o0		/* XXX vma->vm_mm, GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	ld	[%o0 + AOFF_mm_context], %g2
 #ifndef CONFIG_SMP
 	cmp	%g2, -1
@@ -284,7 +284,7 @@
 	 sta	%g5, [%g1] ASI_M_MMUREGS
 
 hypersparc_flush_tlb_range:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	mov	SRMMU_CTX_REG, %g1
 	ld	[%o0 + AOFF_mm_context], %o3
 	lda	[%g1] ASI_M_MMUREGS, %g5
@@ -307,7 +307,7 @@
 	 sta	%g5, [%g1] ASI_M_MMUREGS
 
 hypersparc_flush_tlb_page:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	mov	SRMMU_CTX_REG, %g1
 	ld	[%o0 + AOFF_mm_context], %o3
 	andn	%o1, (PAGE_SIZE - 1), %o1
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
index c801c39..5d2b88d 100644
--- a/arch/sparc/mm/swift.S
+++ b/arch/sparc/mm/swift.S
@@ -105,7 +105,7 @@
 
 	.globl	swift_flush_cache_range
 swift_flush_cache_range:
-	ld	[%o0 + 0x0], %o0		/* XXX vma->vm_mm, GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	sub	%o2, %o1, %o2
 	sethi	%hi(4096), %o3
 	cmp	%o2, %o3
@@ -116,7 +116,7 @@
 
 	.globl	swift_flush_cache_page
 swift_flush_cache_page:
-	ld	[%o0 + 0x0], %o0		/* XXX vma->vm_mm, GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 70:
 	ld	[%o0 + AOFF_mm_context], %g2
 	cmp	%g2, -1
@@ -219,7 +219,7 @@
 	.globl	swift_flush_tlb_range
 	.globl	swift_flush_tlb_all
 swift_flush_tlb_range:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 swift_flush_tlb_mm:
 	ld	[%o0 + AOFF_mm_context], %g2
 	cmp	%g2, -1
@@ -233,7 +233,7 @@
 
 	.globl	swift_flush_tlb_page
 swift_flush_tlb_page:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	mov	SRMMU_CTX_REG, %g1
 	ld	[%o0 + AOFF_mm_context], %o3
 	andn	%o1, (PAGE_SIZE - 1), %o1
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
index 4e55e8f..bf10a34 100644
--- a/arch/sparc/mm/tsunami.S
+++ b/arch/sparc/mm/tsunami.S
@@ -24,7 +24,7 @@
 	/* Sliiick... */
 tsunami_flush_cache_page:
 tsunami_flush_cache_range:
-	ld	[%o0 + 0x0], %o0	/* XXX vma->vm_mm, GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 tsunami_flush_cache_mm:
 	ld	[%o0 + AOFF_mm_context], %g2
 	cmp	%g2, -1
@@ -46,7 +46,7 @@
 
 	/* More slick stuff... */
 tsunami_flush_tlb_range:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 tsunami_flush_tlb_mm:
 	ld	[%o0 + AOFF_mm_context], %g2
 	cmp	%g2, -1
@@ -65,7 +65,7 @@
 
 	/* This one can be done in a fine grained manner... */
 tsunami_flush_tlb_page:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	mov	SRMMU_CTX_REG, %g1
 	ld	[%o0 + AOFF_mm_context], %o3
 	andn	%o1, (PAGE_SIZE - 1), %o1
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index bf8ee06..852257f 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -108,7 +108,7 @@
 viking_flush_cache_page:
 viking_flush_cache_range:
 #ifndef CONFIG_SMP
-	ld	[%o0 + 0x0], %o0		/* XXX vma->vm_mm, GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 #endif
 viking_flush_cache_mm:
 #ifndef CONFIG_SMP
@@ -148,7 +148,7 @@
 #endif
 
 viking_flush_tlb_range:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	mov	SRMMU_CTX_REG, %g1
 	ld	[%o0 + AOFF_mm_context], %o3
 	lda	[%g1] ASI_M_MMUREGS, %g5
@@ -173,7 +173,7 @@
 #endif
 
 viking_flush_tlb_page:
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	mov	SRMMU_CTX_REG, %g1
 	ld	[%o0 + AOFF_mm_context], %o3
 	lda	[%g1] ASI_M_MMUREGS, %g5
@@ -239,7 +239,7 @@
 	tst	%g5
 	bne	3f
 	 mov	SRMMU_CTX_REG, %g1
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	ld	[%o0 + AOFF_mm_context], %o3
 	lda	[%g1] ASI_M_MMUREGS, %g5
 	sethi	%hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
@@ -265,7 +265,7 @@
 	tst	%g5
 	bne	2f
 	 mov	SRMMU_CTX_REG, %g1
-	ld	[%o0 + 0x00], %o0	/* XXX vma->vm_mm GROSS XXX */
+	ld	[%o0 + VMA_VM_MM], %o0
 	ld	[%o0 + AOFF_mm_context], %o3
 	lda	[%g1] ASI_M_MMUREGS, %g5
 	and	%o1, PAGE_MASK, %o1
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5..4f7ae39 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
 #ifndef _ASM_TILE_PERCPU_H
 #define _ASM_TILE_PERCPU_H
 
-register unsigned long __my_cpu_offset __asm__("tp");
-#define __my_cpu_offset __my_cpu_offset
-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
+register unsigned long my_cpu_offset_reg asm("tp");
+
+#ifdef CONFIG_PREEMPT
+/*
+ * For full preemption, we can't just use the register variable
+ * directly, since we need barrier() to hazard against it, causing the
+ * compiler to reload anything computed from a previous "tp" value.
+ * But we also don't want to use volatile asm, since we'd like the
+ * compiler to be able to cache the value across multiple percpu reads.
+ * So we use a fake stack read as a hazard against barrier().
+ * The 'U' constraint is like 'm' but disallows postincrement.
+ */
+static inline unsigned long __my_cpu_offset(void)
+{
+	unsigned long tp;
+	register unsigned long *sp asm("sp");
+	asm("move %0, tp" : "=r" (tp) : "U" (*sp));
+	return tp;
+}
+#define __my_cpu_offset __my_cpu_offset()
+#else
+/*
+ * We don't need to hazard against barrier() since "tp" doesn't ever
+ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
+ * changes at function call points, at which we are already re-reading
+ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
+ */
+#define __my_cpu_offset my_cpu_offset_reg
+#endif
+
+#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
 
 #include <asm-generic/percpu.h>
 
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd..29b0301 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@
 }
 
 static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
 	tlb->mm = mm;
-	tlb->fullmm = full_mm_flush;
+	tlb->start = start;
+	tlb->end = end;
+	tlb->fullmm = !(start | (end+1));
 
 	init_tlb_gather(tlb);
 }
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 95feaa4..c70a234 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -200,6 +200,7 @@
 extern int os_drop_memory(void *addr, int length);
 extern int can_drop_memory(void);
 extern void os_flush_stdout(void);
+extern int os_mincore(void *addr, unsigned long len);
 
 /* execvp.c */
 extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index babe218..d8b78a0 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -13,7 +13,7 @@
 obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
 	physmem.o process.o ptrace.o reboot.o sigio.o \
 	signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
-	um_arch.o umid.o skas/
+	um_arch.o umid.o maccess.o skas/
 
 obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
 obj-$(CONFIG_GPROF)	+= gprof_syms.o
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 829df49..41ebbfe 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -40,9 +40,11 @@
 		const char __user *buffer, size_t count, loff_t *pos)
 {
 	char *end, buf[sizeof("nnnnn\0")];
+	size_t size;
 	int tmp;
 
-	if (copy_from_user(buf, buffer, count))
+	size = min(count, sizeof(buf));
+	if (copy_from_user(buf, buffer, size))
 		return -EFAULT;
 
 	tmp = simple_strtol(buf, &end, 0);
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
new file mode 100644
index 0000000..1f3d5c4
--- /dev/null
+++ b/arch/um/kernel/maccess.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <os.h>
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+	void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
+
+	if ((unsigned long)src < PAGE_SIZE || size <= 0)
+		return -EFAULT;
+
+	if (os_mincore(psrc, size + src - psrc) <= 0)
+		return -EFAULT;
+
+	return __probe_kernel_read(dst, src, size);
+}
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index b8f34c9..67b9c8f 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -4,6 +4,7 @@
  */
 
 #include <stdio.h>
+#include <stdlib.h>
 #include <unistd.h>
 #include <errno.h>
 #include <signal.h>
@@ -232,6 +233,57 @@
 	return ok;
 }
 
+static int os_page_mincore(void *addr)
+{
+	char vec[2];
+	int ret;
+
+	ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
+	if (ret < 0) {
+		if (errno == ENOMEM || errno == EINVAL)
+			return 0;
+		else
+			return -errno;
+	}
+
+	return vec[0] & 1;
+}
+
+int os_mincore(void *addr, unsigned long len)
+{
+	char *vec;
+	int ret, i;
+
+	if (len <= UM_KERN_PAGE_SIZE)
+		return os_page_mincore(addr);
+
+	vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
+	if (!vec)
+		return -ENOMEM;
+
+	ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
+	if (ret < 0) {
+		if (errno == ENOMEM || errno == EINVAL)
+			ret = 0;
+		else
+			ret = -errno;
+
+		goto out;
+	}
+
+	for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
+		if (!(vec[i] & 1)) {
+			ret = 0;
+			goto out;
+		}
+	}
+
+	ret = 1;
+out:
+	free(vec);
+	return ret;
+}
+
 void init_new_thread_signals(void)
 {
 	set_handler(SIGSEGV);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2b6c572..e159dcd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -13,9 +13,10 @@
 	select HAVE_UID16
 
 config X86_64
-	def_bool y
+	def_bool 64BIT
 	depends on 64BIT
 	select X86_DEV_DMA_OPS
+	select CLKSRC_I8253
 
 ### Arch settings
 config X86
@@ -82,6 +83,7 @@
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_TEXT_POKE_SMP
 	select HAVE_GENERIC_HARDIRQS
+	select HARDIRQS_SW_RESEND
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select SPARSE_IRQ
 	select GENERIC_FIND_FIRST_BIT
@@ -269,6 +271,11 @@
 config ARCH_SUPPORTS_UPROBES
 	def_bool y
 
+config NR_CPUS_PER_MODULE
+        int "number of cpu cores per module"
+        default "2"
+        depends on SMP
+
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
@@ -446,42 +453,39 @@
 	  This option compiles in support for the CE4100 SOC for settop
 	  boxes and media devices.
 
-config X86_WANT_INTEL_MID
-	bool "Intel MID platform support"
-	depends on X86_32
-	depends on X86_EXTENDED_PLATFORM
-	---help---
-	  Select to build a kernel capable of supporting Intel MID platform
-	  systems which do not have the PCI legacy interfaces (Moorestown,
-	  Medfield). If you are building for a PC class system say N here.
-
-if X86_WANT_INTEL_MID
-
 config X86_INTEL_MID
-	bool
-
-config X86_MDFLD
-       bool "Medfield MID platform"
+	bool "Intel MID platform"
 	depends on PCI
 	depends on PCI_GOANY
 	depends on X86_IO_APIC
-	select X86_INTEL_MID
+	depends on X86_32 || X86_64
+	depends on X86_EXTENDED_PLATFORM
 	select SFI
-	select DW_APB_TIMER
-	select APB_TIMER
-	select I2C
-	select SPI
 	select INTEL_SCU_IPC
 	select X86_PLATFORM_DEVICES
-	select MFD_INTEL_MSIC
 	---help---
-	  Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
-	  Internet Device(MID) platform. 
-	  Unlike standard x86 PCs, Medfield does not have many legacy devices
-	  nor standard legacy replacement devices/features. e.g. Medfield does
-	  not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+	  Intel MID is Intel's Low Power Intel Architecture (LPIA) based Mobile
+	  Internet Device(MID) platform.
+	  Unlike standard x86 PCs, Intel MID does not have many legacy devices
+	  nor standard legacy replacement devices/features. e.g. It does not
+	  contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
 
-endif
+menu "fugu platform selection"
+source "arch/x86/platform/fugu/Kconfig"
+endmenu
+
+config ATOM_SOC_POWER
+	bool "Select Atom SOC Power"
+
+config INTEL_DEBUG_FEATURE
+       bool "Debug feature interface on Intel MID platform"
+       depends on X86_INTEL_MID
+       ---help---
+         Provides an interface to list the debug features
+	 that are enabled on an Intel MID platform. The
+	 enabling of the debug features depends on the mode
+	 the device is in (e.g. manufacturing, production,
+	 end user, etc...).
 
 config X86_INTEL_LPSS
 	bool "Intel Low Power Subsystem Support"
@@ -493,6 +497,13 @@
 	  things like clock tree (common clock framework) which are needed
 	  by the LPSS peripheral drivers.
 
+config X86_INTEL_OSC_CLK
+	bool "Intel OSC CLK Support"
+	select COMMON_CLK
+	---help---
+	  Select to build support for Intel OSC0~OSC4 clock. Selecting
+	  this option enables Linux common clock framework.
+
 config X86_RDC321X
 	bool "RDC R-321x SoC"
 	depends on X86_32
@@ -705,8 +716,8 @@
 source "arch/x86/Kconfig.cpu"
 
 config HPET_TIMER
+	prompt "HPET Timer Support"
 	def_bool X86_64
-	prompt "HPET Timer Support" if X86_32
 	---help---
 	  Use the IA-PC HPET (High Precision Event Timer) to manage
 	  time in preference to the PIT and RTC, if a HPET is
@@ -851,6 +862,26 @@
 	  making when dealing with multi-core CPU chips at a cost of slightly
 	  increased overhead in some places. If unsure say N here.
 
+config CPU_CONCURRENCY
+	bool "CPU ConCurency (CC)"
+	default n
+	depends on SMP
+	---help---
+	  CPU ConCurrency (CC) is a new CPU semantic that measures the CPU load
+	  by averaging the number of running tasks. Using CC, the scheduler can
+	  evaluate the load of CPUs to improve load balance for power efficiency
+	  without sacrificing performance. If unsure say N here.
+
+config WORKLOAD_CONSOLIDATION
+	bool "CPU Workload Consolidation"
+	default n
+	depends on CPU_CONCURRENCY
+	---help---
+	  CPU Workload Consolidation is a new CPU PM module, which uses the CPU
+	  concurrency of the CPU, and allows asymmetric concurrency across CPUs to
+	  reduce the SW and HW overhead to increase load balance efficiency and
+	  conserve energy. If unsure say N here.
+
 source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
@@ -2041,7 +2072,7 @@
 
 choice
 	prompt "PCI access mode"
-	depends on X86_32 && PCI
+	depends on (X86_32 || X86_64) && PCI
 	default PCI_GOANY
 	---help---
 	  On PCI systems, the BIOS can be used to detect the PCI devices and
@@ -2104,7 +2135,9 @@
 
 config PCI_MMCONFIG
 	bool "Support mmconfig PCI config space access"
-	depends on X86_64 && PCI && ACPI
+	depends on X86_64 && PCI && (ACPI || SFI)
+	help
+	  Provides Low-level direct PCI config space access via MMCONFIG
 
 config PCI_CNB20LE_QUIRK
 	bool "Read CNB20LE Host Bridge Windows" if EXPERT
@@ -2356,7 +2389,7 @@
 
 config X86_DEV_DMA_OPS
 	bool
-	depends on X86_64 || STA2X11
+	depends on X86_64 || STA2X11 || VIDEO_CSS2600
 
 config X86_DMA_REMAP
 	bool
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca..0a7e986 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -269,6 +269,15 @@
 	  accordingly optimized code. Use a recent GCC with specific Atom
 	  support in order to fully benefit from selecting this option.
 
+config MSLM
+	bool "Intel Silvermont (Atom)"
+	---help---
+
+	  Select this for the Intel Silvermont (Atom) platform. Intel Atom
+	  CPUs have an in-order pipelining architecture and thus can benefit
+	  from accordingly optimized code. Use a recent GCC with specific
+	  Atom support in order to fully benefit from selecting this option.
+
 config GENERIC_CPU
 	bool "Generic-x86-64"
 	depends on X86_64
@@ -300,7 +309,7 @@
 config X86_L1_CACHE_SHIFT
 	int
 	default "7" if MPENTIUM4 || MPSC
-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MSLM || MVIAC7 || X86_GENERIC || GENERIC_CPU
 	default "4" if MELAN || M486 || MGEODEGX1
 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 
@@ -335,7 +344,7 @@
 
 config X86_USE_PPRO_CHECKSUM
 	def_bool y
-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MSLM
 
 config X86_USE_3DNOW
 	def_bool y
@@ -363,17 +372,17 @@
 
 config X86_TSC
 	def_bool y
-	depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
+	depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MSLM) && !X86_NUMAQ) || X86_64
 
 config X86_CMPXCHG64
 	def_bool y
-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MSLM
 
 # this should be set for all -march=.. options where the compiler
 # generates cmov.
 config X86_CMOV
 	def_bool y
-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MSLM || MGEODE_LX)
 
 config X86_MINIMUM_CPU_FAMILY
 	int
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c198b7e..f452fc2 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -304,4 +304,23 @@
 
 	  If unsure, say N.
 
+config INTEL_MID_PSTORE_RAM
+	bool "Enable Intel MID support for pstore RAM (ramoops, ram_console)"
+	depends on X86_INTEL_MID
+	select PSTORE
+	select PSTORE_RAM
+	select PSTORE_CONSOLE
+	----help---
+	  Select this option to enable pstore RAM backend support on
+	  Intel MID. It reserve ram and configure pstore ram
+	  (CONFIG_PSTORE_RAM). This enable panic and oops to be
+	  logged to pstore and make them available for next reboot.
+
+	  Pstore filesystem can be mounted to retrieve files :
+	  mount -t pstore none /logs/pstore
+	  For more information, see Documentation/ABI/testing/pstore
+
+	  Options could be enabled, see CONFIG_PSTORE_CONSOLE and
+	  CONFIG_PSTORE_FTRACE.
+
 endmenu
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5c47726..9929783 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -31,6 +31,9 @@
 
         KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
 
+        # Don't autogenerate MMX or SSE instructions
+        KBUILD_CFLAGS += -mno-mmx -mno-sse
+
         # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
         # with nonstandard options
         KBUILD_CFLAGS += -fno-pic
@@ -55,10 +58,13 @@
         CHECKFLAGS += -D__x86_64__ -m64
 
         KBUILD_AFLAGS += -m64
-        KBUILD_CFLAGS += -m64
+        KBUILD_CFLAGS += -m64 -fno-pic
+
+        # Don't autogenerate MMX or SSE instructions
+        KBUILD_CFLAGS += -mno-mmx -mno-sse
 
 	# Use -mpreferred-stack-boundary=3 if supported.
-	KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+	KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
 
         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
@@ -68,6 +74,8 @@
                 $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
 	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
 		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+        cflags-$(CONFIG_MSLM) += $(call cc-option,-march=slm) \
+                $(call cc-option,-mtune=slm,$(call cc-option,-mtune=generic))
         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
         KBUILD_CFLAGS += $(cflags-y)
 
@@ -134,6 +142,9 @@
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 
+# flag all warnings as errors
+KBUILD_CFLAGS += -Werror
+
 # Speed up the build
 KBUILD_CFLAGS += -pipe
 # Workaround for a gcc prelease that unfortunately was shipped in a suse release
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed4..a2eb6c6 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -34,6 +34,8 @@
 cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
 cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
 	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+cflags-$(CONFIG_MSLM)		+= $(call cc-option,-march=slm,$(call cc-option,-march=core2,-march=i686)) \
+	$(call cc-option,-mtune=slm,$(call cc-option,-mtune=generic))
 
 # AMD Elan support
 cflags-$(CONFIG_MELAN)		+= -march=i486
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 379814b..6cf0111 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@
 
 # How to compile the 16-bit code.  Note we always compile for -march=i386,
 # that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS	:= $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+KBUILD_CFLAGS	:= $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
 		   -DDISABLE_BRANCH_PROFILING \
 		   -Wall -Wstrict-prototypes \
 		   -march=i386 -mregparm=3 \
 		   -include $(srctree)/$(src)/code16gcc.h \
 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+		   -mno-mmx -mno-sse \
 		   $(call cc-option, -ffreestanding) \
 		   $(call cc-option, -fno-toplevel-reorder,\
-			$(call cc-option, -fno-unit-at-a-time)) \
+		   $(call cc-option, -fno-unit-at-a-time)) \
 		   $(call cc-option, -fno-stack-protector) \
 		   $(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS	+= $(call cc-option, -m32)
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
 
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5ef205c..7194d9f 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -12,6 +12,7 @@
 cflags-$(CONFIG_X86_32) := -march=i386
 cflags-$(CONFIG_X86_64) := -mcmodel=small
 KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CFLAGS += -mno-mmx -mno-sse
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
 
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c205035..d606463 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -992,18 +992,20 @@
 	efi_memory_desc_t *mem_map;
 	efi_status_t status;
 	__u32 desc_version;
+	bool called_exit = false;
 	u8 nr_entries;
 	int i;
 
 	size = sizeof(*mem_map) * 32;
 
 again:
-	size += sizeof(*mem_map);
+	size += sizeof(*mem_map) * 2;
 	_size = size;
 	status = low_alloc(size, 1, (unsigned long *)&mem_map);
 	if (status != EFI_SUCCESS)
 		return status;
 
+get_map:
 	status = efi_call_phys5(sys_table->boottime->get_memory_map, &size,
 				mem_map, &key, &desc_size, &desc_version);
 	if (status == EFI_BUFFER_TOO_SMALL) {
@@ -1029,8 +1031,20 @@
 	/* Might as well exit boot services now */
 	status = efi_call_phys2(sys_table->boottime->exit_boot_services,
 				handle, key);
-	if (status != EFI_SUCCESS)
-		goto free_mem_map;
+	if (status != EFI_SUCCESS) {
+		/*
+		 * ExitBootServices() will fail if any of the event
+		 * handlers change the memory map. In which case, we
+		 * must be prepared to retry, but only once so that
+		 * we're guaranteed to exit on repeated failures instead
+		 * of spinning forever.
+		 */
+		if (called_exit)
+			goto free_mem_map;
+
+		called_exit = true;
+		goto get_map;
+	}
 
 	/* Historic? */
 	boot_params->alt_mem_k = 32 * 1024;
diff --git a/arch/x86/configs/fugu_defconfig b/arch/x86/configs/fugu_defconfig
new file mode 100644
index 0000000..2bd7f5d
--- /dev/null
+++ b/arch/x86/configs/fugu_defconfig
@@ -0,0 +1,478 @@
+CONFIG_KERNEL_BZIP2=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FANOUT=32
+CONFIG_LOG_BUF_SHIFT=20
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+# CONFIG_MEMCG_SWAP_ENABLED is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PANIC_TIMEOUT=3
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_ARCH_MMAP_RND_BITS=32
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_SMP=y
+CONFIG_X86_INTEL_MID=y
+CONFIG_X86_INTEL_MID_FUGU=y
+CONFIG_FUGU_WIFI_PLATFORM_DATA=y
+CONFIG_FUGU_LED=y
+CONFIG_ATOM_SOC_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+CONFIG_X86_INTEL_OSC_CLK=y
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+CONFIG_MSLM=y
+CONFIG_NR_CPUS=4
+CONFIG_SCHED_SMT=y
+CONFIG_CPU_CONCURRENCY=y
+CONFIG_WORKLOAD_CONSOLIDATION=y
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE_AMD is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_COMPACTION is not set
+CONFIG_KSM=y
+CONFIG_ZSMALLOC=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+# CONFIG_ARCH_RANDOM is not set
+CONFIG_KEXEC=y
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_SUSPEND_TIME=y
+# CONFIG_ACPI is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_X86_SFI_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_INTEL_IDLE=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCI_MSI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_L2TP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_CONNECTOR=y
+CONFIG_ZRAM=y
+CONFIG_ZRAM_LZ4_COMPRESS=y
+CONFIG_ZRAM_DEBUG=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_STAT=y
+CONFIG_UID_CPUTIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+# CONFIG_ETHERNET is not set
+CONFIG_USB_KAWETH=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_CDC_EEM=y
+# CONFIG_USB_NET_CDC_NCM is not set
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_USB_NET_NET1080 is not set
+CONFIG_USB_NET_PLUSB=y
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_WIFI_CONTROL_FUNC=y
+CONFIG_BCMDHD=y
+CONFIG_BCMDHD_SDIO=y
+CONFIG_BCM4354=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_TRACE_ROUTER=y
+CONFIG_TRACE_SINK=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_NVRAM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_LANGWELL=y
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+CONFIG_MSIC_GPADC=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_SOC_THERMAL=y
+CONFIG_DEBUG_THERMAL=y
+CONFIG_INTEL_MOOR_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_INTEL_SCU_WATCHDOG_EVO=y
+CONFIG_DISABLE_SCU_WATCHDOG=y
+CONFIG_MFD_INTEL_MSIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+# CONFIG_RC_DECODERS is not set
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_VIDEO_CX231XX=y
+# CONFIG_VIDEO_CX231XX_RC is not set
+CONFIG_VIDEO_CX231XX_DVB=y
+CONFIG_DRM=y
+CONFIG_INTEL_NO_FB_PANIC_NOTIFY=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_ITE_HDMI_CEC=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_MOOR_PLATFORM=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_LENOVO_TPKBD=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_PS3REMOTE=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_STEELSERIES=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ANDROIDTV_REMOTE=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_EXYNOS is not set
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_FTDI_SIO=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_TEST_MODE=y
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_BLOCK_MINORS=10
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_SWITCH_GPIO=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES=y
+CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM=y
+# CONFIG_RTC_DRV_VRTC is not set
+CONFIG_DMADEVICES=y
+CONFIG_INTEL_MID_DMAC=y
+CONFIG_STAGING=y
+CONFIG_DX_SEP54=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ION=y
+# CONFIG_NET_VENDOR_SILICOM is not set
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+CONFIG_INTEL_MID_REMOTEPROC=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_IIO=y
+CONFIG_IIO_BASINCOVE_GPADC=y
+CONFIG_SCU_LOGGING=y
+CONFIG_UUID=y
+CONFIG_EMMC_IPANIC=y
+CONFIG_KCT_DAEMON=y
+CONFIG_FPS_THROTTLE=y
+CONFIG_BCM_BT_LPM=y
+# CONFIG_RAWIO is not set
+CONFIG_TRACEPOINT_TO_EVENT=y
+CONFIG_I2C_DESIGNWARE_CORE_FORK=y
+CONFIG_I2C_DESIGNWARE_PCI_FORK=y
+CONFIG_I2C_PMIC=y
+CONFIG_PMIC_CCSM=y
+CONFIG_GPIO_INTEL_MSIC=y
+CONFIG_INTEL_PSH_IPC=y
+CONFIG_INTEL_REBOOT_TARGET=y
+CONFIG_SERIAL_MFD_HSU_EXT=y
+CONFIG_SERIAL_MFD_HSU_EXT_CONSOLE=y
+CONFIG_DRM_INTEL_MID=y
+CONFIG_SUPPORT_HDMI=y
+CONFIG_DRM_INTEL_HANDSET=y
+CONFIG_GFX_RGX_BVNC="1.72.4.12"
+CONFIG_DRM_MRFLD=y
+CONFIG_SUPPORT_VSP=y
+CONFIG_MOOREFIELD=y
+CONFIG_ANDROID_BINDER_IPC=y
+# CONFIG_DMIID is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_SCHED_TRACER=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_BOOT_PARAMS=y
+CONFIG_OPTIMIZE_INLINING=y
+CONFIG_INTEL_MID_PSTORE_RAM=y
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_SHA1_SSSE3=y
+CONFIG_CRYPTO_SHA256_SSSE3=y
+CONFIG_CRYPTO_SHA512_SSSE3=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_BIT=y
diff --git a/arch/x86/configs/moor_defconfig b/arch/x86/configs/moor_defconfig
new file mode 100644
index 0000000..909c7c9
--- /dev/null
+++ b/arch/x86/configs/moor_defconfig
@@ -0,0 +1,595 @@
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FANOUT=32
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_SMP=y
+CONFIG_X86_INTEL_MID=y
+CONFIG_ATOM_SOC_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+CONFIG_X86_INTEL_OSC_CLK=y
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+CONFIG_MSLM=y
+CONFIG_NR_CPUS=4
+CONFIG_SCHED_SMT=y
+CONFIG_CPU_CONCURRENCY=y
+CONFIG_WORKLOAD_CONSOLIDATION=y
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE_AMD is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_COMPACTION is not set
+CONFIG_KSM=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+# CONFIG_ARCH_RANDOM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_HZ_100=y
+CONFIG_KEXEC=y
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_FB_EARLYSUSPEND=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_SUSPEND_TIME=y
+# CONFIG_ACPI is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_X86_SFI_CPUFREQ=y
+CONFIG_CPU_IDLE=y
+CONFIG_INTEL_IDLE=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCI_MSI=y
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_L2TP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_ALLOW_RECONNECT=y
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_NFC=y
+CONFIG_DEBUG_DEVRES=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_INTEL_MID_PTI=y
+CONFIG_INTEL_PTI_STM=y
+CONFIG_MONZA_X=y
+CONFIG_UID_STAT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+# CONFIG_NET_PACKET_ENGINE is not set
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_WIFI_CONTROL_FUNC=y
+CONFIG_WIFI_PLATFORM_DATA=y
+CONFIG_BCMDHD=y
+CONFIG_BCMDHD_SDIO=y
+CONFIG_BCM4339=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TABLET_USB_WACOM=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_N_GSM=y
+CONFIG_TRACE_ROUTER=y
+CONFIG_TRACE_SINK=y
+CONFIG_SERIAL_MRST_MAX3110=y
+CONFIG_NVRAM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_INTEL_MID_SSP=y
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_PCI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_LANGWELL=y
+CONFIG_POWER_SUPPLY_CHARGING_ALGO_PSE=y
+CONFIG_BATTERY_MAX17042=y
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+CONFIG_MSIC_GPADC=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_SOC_THERMAL=y
+CONFIG_DEBUG_THERMAL=y
+CONFIG_INTEL_MOOR_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_INTEL_SCU_WATCHDOG_EVO=y
+CONFIG_DISABLE_SCU_WATCHDOG=y
+CONFIG_MFD_INTEL_MSIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_INTEL_NO_FB_PANIC_NOTIFY=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_MOOR_MACHINE=y
+# CONFIG_SND_MOOR_DPCM is not set
+CONFIG_SST_DPCM=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_HCD_HSIC=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_OMAP is not set
+# CONFIG_USB_DWC3_EXYNOS is not set
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_TEST_MODE=y
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_BLOCK_MINORS=10
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_SWITCH_GPIO=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES=y
+CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM=y
+# CONFIG_RTC_DRV_VRTC is not set
+CONFIG_DMADEVICES=y
+CONFIG_INTEL_MID_DMAC=y
+CONFIG_STAGING=y
+CONFIG_DX_SEP54=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_LOGGER_PTI=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+CONFIG_INTEL_MODEM_NVRAM=y
+CONFIG_INTEL_MID_REMOTEPROC=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_IIO=y
+CONFIG_IIO_BASINCOVE_GPADC=y
+CONFIG_SCU_LOGGING=y
+CONFIG_UUID=y
+CONFIG_EMMC_IPANIC=y
+CONFIG_KCT_DAEMON=y
+CONFIG_FPS_THROTTLE=y
+CONFIG_BCM_BT_LPM=y
+# CONFIG_RAWIO is not set
+CONFIG_TRACEPOINT_TO_EVENT=y
+CONFIG_I2C_DESIGNWARE_CORE_FORK=y
+CONFIG_I2C_DESIGNWARE_PCI_FORK=y
+CONFIG_I2C_PMIC=y
+CONFIG_PMIC_CCSM=y
+CONFIG_BQ24261_CHARGER=y
+CONFIG_GPIO_INTEL_MSIC=y
+CONFIG_INTEL_PSH_IPC=y
+CONFIG_INTEL_REBOOT_TARGET=y
+CONFIG_SERIAL_MFD_HSU_EXT=y
+CONFIG_SERIAL_MFD_HSU_EXT_CONSOLE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4_FORK=y
+CONFIG_DRM_INTEL_MID=y
+CONFIG_SUPPORT_HDMI=y
+CONFIG_SUPPORT_MIPI=y
+CONFIG_DRM_INTEL_HANDSET=y
+CONFIG_GFX_RGX_BVNC="1.72.4.12"
+CONFIG_DRM_MRFLD=y
+CONFIG_SUPPORT_VSP=y
+CONFIG_MOOREFIELD=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_PSTORE_FTRACE=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_BOOT_PRINTK_DELAY=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_BOOT_PARAMS=y
+CONFIG_OPTIMIZE_INLINING=y
+CONFIG_INTEL_MID_PSTORE_RAM=y
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_SHA1_SSSE3=y
+CONFIG_CRYPTO_SHA256_SSSE3=y
+CONFIG_CRYPTO_SHA512_SSSE3=y
+CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_BLOWFISH_X86_64=y
+CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=y
+CONFIG_CRYPTO_CAST5_AVX_X86_64=y
+CONFIG_CRYPTO_CAST6_AVX_X86_64=y
+CONFIG_CRYPTO_SALSA20_X86_64=y
+CONFIG_CRYPTO_SERPENT_SSE2_X86_64=y
+CONFIG_CRYPTO_SERPENT_AVX2_X86_64=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC32_BIT=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index cf1a471..10adb41 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -459,7 +459,7 @@
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
-		err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+		compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
 		if (ksig->ka.sa.sa_flags & SA_RESTORER)
 			restorer = ksig->ka.sa.sa_restorer;
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index 0acbac2..2a67dda 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -44,6 +44,7 @@
 
 static inline unsigned long apbt_quick_calibrate(void) {return 0; }
 static inline void apbt_time_init(void) { }
+static inline void apbt_setup_secondary_clock(void) { }
 
 #endif
 #endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/bcm_bt_lpm.h b/arch/x86/include/asm/bcm_bt_lpm.h
new file mode 100644
index 0000000..248371f
--- /dev/null
+++ b/arch/x86/include/asm/bcm_bt_lpm.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2009 Google, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#ifndef BCM_BT_LMP_H
+#define BCM_BT_LMP_H
+
+struct bcm_bt_lpm_platform_data {
+	int gpio_wake;		/* CPU -> BCM wakeup gpio */
+	int gpio_host_wake;	/* BCM -> CPU wakeup gpio */
+	int int_host_wake;	/* BCM -> CPU wakeup irq */
+	int gpio_enable;	/* GPIO enable/disable BT/FM */
+
+	int port;		/* UART port to use with BT/FM */
+};
+
+#endif
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d..4a8cb8d 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@
 	 */
 	if (boot_params->sentinel) {
 		/* fields in boot_params are left uninitialized, clear them */
-		memset(&boot_params->olpc_ofw_header, 0,
+		memset(&boot_params->ext_ramdisk_image, 0,
 		       (char *)&boot_params->efi_info -
-			(char *)&boot_params->olpc_ofw_header);
+			(char *)&boot_params->ext_ramdisk_image);
 		memset(&boot_params->kbd_status, 0,
 		       (char *)&boot_params->hdr -
 		       (char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 46fc474..f50de69 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -49,9 +49,15 @@
 						 int len, __wsum sum,
 						 int *err_ptr)
 {
+	__wsum ret;
+
 	might_sleep();
-	return csum_partial_copy_generic((__force void *)src, dst,
-					 len, sum, err_ptr, NULL);
+	stac();
+	ret = csum_partial_copy_generic((__force void *)src, dst,
+					len, sum, err_ptr, NULL);
+	clac();
+
+	return ret;
 }
 
 /*
@@ -176,10 +182,16 @@
 					   int len, __wsum sum,
 					   int *err_ptr)
 {
+	__wsum ret;
+
 	might_sleep();
-	if (access_ok(VERIFY_WRITE, dst, len))
-		return csum_partial_copy_generic(src, (__force void *)dst,
-						 len, sum, NULL, err_ptr);
+	if (access_ok(VERIFY_WRITE, dst, len)) {
+		stac();
+		ret = csum_partial_copy_generic(src, (__force void *)dst,
+						len, sum, NULL, err_ptr);
+		clac();
+		return ret;
+	}
 
 	if (len)
 		*err_ptr = -EFAULT;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e99ac27..4af181d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -365,7 +365,7 @@
 static __always_inline __pure bool __static_cpu_has(u16 bit)
 {
 #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
-		asm goto("1: jmp %l[t_no]\n"
+		asm_volatile_goto("1: jmp %l[t_no]\n"
 			 "2:\n"
 			 ".section .altinstructions,\"a\"\n"
 			 " .long 1b - .\n"
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index cccd07f..779c2ef 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -29,7 +29,7 @@
 extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
 			unsigned long start_addr, unsigned long long end_addr);
 struct setup_data;
-extern void parse_e820_ext(struct setup_data *data);
+extern void parse_e820_ext(u64 phys_addr, u32 data_len);
 
 #if defined(CONFIG_X86_64) || \
 	(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0dc7d9e..b8e3120 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -119,6 +119,7 @@
 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
 #ifdef	CONFIG_X86_INTEL_MID
 	FIX_LNW_VRTC,
+	FIX_CLOCK_CTL,
 #endif
 	__end_of_permanent_fixed_addresses,
 
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index 02bab09..22a60b1 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,14 +1,8 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
-#else /* !CONFIG_X86_64 */
-static inline void enter_idle(void) { }
-static inline void exit_idle(void) { }
-static inline void __exit_idle(void) { }
-#endif /* CONFIG_X86_64 */
 
 void amd_e400_remove_cpu(int cpu);
 
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
new file mode 100644
index 0000000..0d79773
--- /dev/null
+++ b/arch/x86/include/asm/intel-mid.h
@@ -0,0 +1,213 @@
+/*
+ * intel-mid.h: Intel MID specific setup code
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _ASM_X86_INTEL_MID_H
+#define _ASM_X86_INTEL_MID_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <asm/intel_mid_pcihelpers.h>
+
+#ifdef CONFIG_SFI
+extern int get_gpio_by_name(const char *name);
+extern void install_irq_resource(struct platform_device *pdev, int irq);
+#else
+static inline int get_gpio_by_name(const char *name) { return -ENODEV; }
+/* Dummy function to prevent compilation error in byt */
+static inline void install_irq_resource(struct platform_device *pdev, int irq)
+{};
+#endif
+
+extern int intel_mid_pci_init(void);
+extern void *get_oem0_table(void);
+extern void intel_delayed_device_register(void *dev,
+			void (*delayed_callback)(void *dev_desc));
+extern void intel_scu_device_register(struct platform_device *pdev);
+extern struct devs_id *get_device_id(u8 type, char *name);
+extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
+extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
+extern int sfi_mrtc_num;
+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
+extern void *get_oem0_table(void);
+extern void register_rpmsg_service(char *name, int id, u32 addr);
+extern int sdhci_pci_request_regulators(void);
+extern unsigned int sfi_get_watchdog_irq(void);
+
+/* OEMB table */
+struct sfi_table_oemb {
+	struct sfi_table_header header;
+	u32 board_id;
+	u32 board_fab;
+	u8 iafw_major_version;
+	u8 iafw_main_version;
+	u8 val_hooks_major_version;
+	u8 val_hooks_minor_version;
+	u8 ia_suppfw_major_version;
+	u8 ia_suppfw_minor_version;
+	u8 scu_runtime_major_version;
+	u8 scu_runtime_minor_version;
+	u8 ifwi_major_version;
+	u8 ifwi_minor_version;
+} __packed;
+
+/*
+ * Here defines the array of devices platform data that IAFW would export
+ * through SFI "DEVS" table, we use name and type to match the device and
+ * its platform data.
+ */
+struct devs_id {
+	char name[SFI_NAME_LEN + 1];
+	u8 type;
+	u8 delay;
+	void *(*get_platform_data)(void *info);
+	void (*device_handler)(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev);
+	/* Custom handler for devices */
+	u8 trash_itp;/* true if this driver uses pin muxed with XDB connector */
+};
+
+#define SD_NAME_SIZE 16
+/**
+ * struct sd_board_info - template for device creation
+ * @name: Initializes sdio_device.name; identifies the driver.
+ * @bus_num: board-specific identifier for a given SDIO controller.
+ * @board_ref_clock: Initializes sd_device.board_ref_clock;
+ * @platform_data: Initializes sd_device.platform_data; the particular
+ *      data stored there is driver-specific.
+ *
+ */
+struct sd_board_info {
+	char            name[SD_NAME_SIZE];
+	int             bus_num;
+	unsigned short  addr;
+	u32             board_ref_clock;
+	void            *platform_data;
+};
+
+
+/*
+ * Medfield is the follow-up of Moorestown, it combines two chip solution into
+ * one. Other than that it also added always-on and constant tsc and lapic
+ * timers. Medfield is the platform name, and the chip name is called Penwell
+ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
+ * identified via MSRs.
+ */
+enum intel_mid_cpu_type {
+	INTEL_CPU_CHIP_NOTMID = 0,
+	/* 1 was Moorestown */
+	INTEL_MID_CPU_CHIP_PENWELL = 2,
+	INTEL_MID_CPU_CHIP_CLOVERVIEW,
+	INTEL_MID_CPU_CHIP_TANGIER,
+	INTEL_MID_CPU_CHIP_VALLEYVIEW2,
+	INTEL_MID_CPU_CHIP_ANNIEDALE,
+};
+
+extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ *             structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+	void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid)[cpuid] = \
+		get_##cpuname##_ops,
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+	DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL) \
+	DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW) \
+	DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+	DECLARE_INTEL_MID_OPS_INIT(anniedale, INTEL_MID_CPU_CHIP_ANNIEDALE) \
+};
+
+static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
+{
+#ifdef CONFIG_X86_INTEL_MID
+	return __intel_mid_cpu_chip;
+#else
+	return INTEL_CPU_CHIP_NOTMID;
+#endif
+}
+
+enum intel_mid_timer_options {
+	INTEL_MID_TIMER_DEFAULT,
+	INTEL_MID_TIMER_APBT_ONLY,
+	INTEL_MID_TIMER_LAPIC_APBT,
+};
+
+extern enum intel_mid_timer_options intel_mid_timer_options;
+
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define FSB_FREQ_83SKU	83200
+#define FSB_FREQ_100SKU	99840
+#define FSB_FREQ_133SKU	133120
+
+#define FSB_FREQ_167SKU	167000
+#define FSB_FREQ_200SKU	200000
+#define FSB_FREQ_267SKU	267000
+#define FSB_FREQ_333SKU	333000
+#define FSB_FREQ_400SKU	400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK	0x7
+#define BSEL_SOC_FUSE_001	0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101	0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111	0x7 /* FSB 83MHz */
+
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX	8
+
+extern struct console early_mrst_console;
+extern void mrst_early_console_init(void);
+
+extern struct console early_mrfld_console;
+extern void mrfld_early_console_init(void);
+
+extern struct console early_hsu_console;
+extern void hsu_early_console_init(const char *);
+
+extern struct console early_pti_console;
+
+extern void intel_scu_devices_create(void);
+extern void intel_scu_devices_destroy(void);
+extern void intel_psh_devices_create(void);
+extern void intel_psh_devices_destroy(void);
+
+/* VRTC timer */
+#define MRST_VRTC_MAP_SZ	(1024)
+/*#define MRST_VRTC_PGOFFSET	(0xc00) */
+
+extern void intel_mid_rtc_init(void);
+
+#define INTEL_MID_IRQ_OFFSET 0x100
+
+extern void pstore_ram_reserve_memory(void);
+
+#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/intel_basincove_gpadc.h b/arch/x86/include/asm/intel_basincove_gpadc.h
new file mode 100644
index 0000000..cb81c06
--- /dev/null
+++ b/arch/x86/include/asm/intel_basincove_gpadc.h
@@ -0,0 +1,102 @@
+#ifndef __INTEL_BASINCOVE_GPADC_H__
+#define __INTEL_BASINCOVE_GPADC_H__
+
+#define GPADC_VBAT	(1 << 0)
+#define GPADC_BATID	(1 << 1)
+#define GPADC_IBAT	(1 << 2)
+#define GPADC_PMICTEMP	(1 << 3)
+#define GPADC_BATTEMP0	(1 << 4)
+#define GPADC_BATTEMP1	(1 << 5)
+#define GPADC_SYSTEMP0	(1 << 6)
+#define GPADC_SYSTEMP1	(1 << 7)
+#define GPADC_SYSTEMP2	(1 << 8)
+#define GPADC_USBID	(1 << 9)
+#define GPADC_PEAK	(1 << 10)
+#define GPADC_ADC	(1 << 11)
+#define GPADC_VREF	(1 << 12)
+
+#define BCOVE_GPADC_CH_NUM	9
+#define SCOVE_GPADC_CH_NUM	12
+
+#define MUSBID		(1 << 0)
+#define MPEAK		(1 << 1)
+#define MBATTEMP	(1 << 2)
+#define MSYSTEMP	(1 << 3)
+#define MBATT		(1 << 4)
+#define MVIBATT		(1 << 5)
+#define MGPMEAS		(1 << 6)
+#define MCCTICK		(1 << 7)
+
+#define GPADC_RSL(channel, res) (res->data[ffs(channel)-1])
+
+#define PMIC_ID_ADDR    0x00
+#define PMIC_VENDOR_ID_MASK     (0x03 << 6)
+#define PMIC_MINOR_REV_MASK     0x07
+#define PMIC_MAJOR_REV_MASK     (0x07 << 3)
+#define BASINCOVE_VENDORID      (0x03 << 6)
+#define SHADYCOVE_VENDORID      0x00
+
+#define PMIC_MAJOR_REV_A0       0x00
+#define PMIC_MAJOR_REV_B0       (0x01 << 3)
+#define PMIC_MINOR_REV_X0       0x00
+#define PMIC_MINOR_REV_X1       (0x01 << 0)
+
+/* Register on I2C-dev2-0x6E */
+#define PMIC_SPARE03_ADDR	0x010B
+#define PMIC_PROVISIONED	(0x01 << 1)
+#define PMIC_PROV_MASK		(0x03 << 0)
+
+enum gpadc_channel_type {
+	PMIC_GPADC_CHANNEL_VBUS = 0,
+	PMIC_GPADC_CHANNEL_BATID,
+	PMIC_GPADC_CHANNEL_PMICTEMP,
+	PMIC_GPADC_CHANNEL_BATTEMP0,
+	PMIC_GPADC_CHANNEL_BATTEMP1,
+	PMIC_GPADC_CHANNEL_SYSTEMP0,
+	PMIC_GPADC_CHANNEL_SYSTEMP1,
+	PMIC_GPADC_CHANNEL_SYSTEMP2,
+	PMIC_GPADC_CHANNEL_USBID,
+	PMIC_GPADC_CHANNEL_PEAK,
+	PMIC_GPADC_CHANNEL_AGND,
+	PMIC_GPADC_CHANNEL_VREF,
+};
+
+struct gpadc_regmap_t {
+	char *name;
+	int cntl;       /* GPADC Conversion Control Bit indicator */
+	int rslth;      /* GPADC Conversion Result Register Addr High */
+	int rsltl;      /* GPADC Conversion Result Register Addr Low */
+};
+
+struct gpadc_regs_t {
+	u16 gpadcreq;
+	u16 gpadcreq_irqen;
+	u16 gpadcreq_busy;
+	u16 mirqlvl1;
+	u16 mirqlvl1_adc;
+	u16 adc1cntl;
+	u16 adcirq;
+	u16 madcirq;
+};
+
+struct iio_dev;
+
+struct intel_basincove_gpadc_platform_data {
+	int channel_num;
+	unsigned long intr;
+	u8 intr_mask;
+	struct iio_map *gpadc_iio_maps;
+	struct gpadc_regmap_t *gpadc_regmaps;
+	struct gpadc_regs_t *gpadc_regs;
+	const struct iio_chan_spec *gpadc_channels;
+};
+
+struct gpadc_result {
+	int data[SCOVE_GPADC_CH_NUM];
+};
+
+int iio_basincove_gpadc_sample(struct iio_dev *indio_dev,
+				int ch, struct gpadc_result *res);
+
+int intel_basincove_gpadc_sample(int ch, struct gpadc_result *res);
+#endif
diff --git a/arch/x86/include/asm/intel_basincove_ocd.h b/arch/x86/include/asm/intel_basincove_ocd.h
new file mode 100644
index 0000000..faba651
--- /dev/null
+++ b/arch/x86/include/asm/intel_basincove_ocd.h
@@ -0,0 +1,162 @@
+#ifndef __INTEL_BASINCOVE_OCD_H__
+#define __INTEL_BASINCOVE_OCD_H__
+
+#define DRIVER_NAME "bcove_bcu"
+#define DEVICE_NAME "mrfl_pmic_bcu"
+
+/* Generic bit representaion macros */
+#define B0	(1 << 0)
+#define B1	(1 << 1)
+#define B2	(1 << 2)
+#define B3	(1 << 3)
+#define B4	(1 << 4)
+#define B5	(1 << 5)
+#define B6	(1 << 6)
+#define B7	(1 << 7)
+
+/* 30 seconds delay macro for VWARN1 interrupt Unmask (enable) */
+#define VWARN2_INTR_EN_DELAY	(30 * HZ)
+
+/* IRQ registers */
+#define BCUIRQ                  0x05
+#define IRQLVL1                 0x01
+#define MIRQLVL1                0x0C
+
+/* Status registers */
+#define S_BCUINT                0x3B
+#define S_BCUCTRL               0x49
+
+/* PMIC SRAM address for BCU register */
+#define PMIC_SRAM_BCU_ADDR      0xFFFFF614
+#define IOMAP_LEN               1
+
+#define NUM_VOLT_LEVELS         3
+#define NUM_CURR_LEVELS         2
+
+#define VWARN_EN_MASK		B3
+#define ICCMAXVCC_EN_MASK	B6
+
+#define MVWARN1_MASK		B0
+#define MVWARN2_MASK		B1
+#define MVCRIT_MASK		B2
+
+#define MVCRIT			B2
+#define MVWARN2			B1
+#define MVWARN1			B0
+
+#define ICCMAXVCC_EN		(1 << 6)
+#define VWARN_EN		(1 << 3)
+#define VCRIT_SHUTDOWN		(1 << 4)
+
+#define BCU_ALERT               (1 << 3)
+#define VWARN1_IRQ              (1 << 0)
+#define VWARN2_IRQ              (1 << 1)
+#define VCRIT_IRQ               (1 << 2)
+#define GSMPULSE_IRQ            (1 << 3)
+#define TXPWRTH_IRQ             (1 << 4)
+
+/* Number of configurable thresholds for current and voltage */
+#define NUM_THRESHOLDS          8
+
+/* BCU real time status flags for corresponding input signals */
+#define SVWARN1                 (1<<0)
+#define SVWARN2                 (1<<1)
+#define SVCRIT                  (1<<2)
+
+/* S_BCUCTRL register status bits */
+#define S_CAMFLTORCH		B3
+#define S_CAMFLDIS		B2
+#define S_BCUDISW2		B1
+
+#define S_BCUDISW2_MASK		B1
+#define S_CAMFLDIS_MASK		B2
+#define S_CAMFLTORCH_MASK	B3
+
+/* check whether bit is sticky or not by checking 5th bit */
+#define IS_STICKY(data)         (!!(data & 0x10))
+
+/* check whether signal asserted for VW1/VW2/VC */
+#define IS_ASSRT_ON_VW1(data)   (!!(data & 0x01))
+#define IS_ASSRT_ON_VW2(data)   (!!(data & 0x02))
+#define IS_ASSRT_ON_VC(data)    (!!(data & 0x04))
+
+/* Configuration registers that monitor the voltage drop */
+#define VWARN1_CFG              0x3C
+#define VWARN2_CFG              0x3D
+#define VCRIT_CFG               0x3E
+#define ICCMAXVSYS_CFG          0x3F
+#define ICCMAXVCC_CFG           0x40
+#define ICCMAXVNN_CFG           0x41
+
+/* Behaviour registers */
+#define VFLEXSRC_BEH            0x42
+#define VFLEXDIS_BEH            0x43
+#define VIBDIS_BEH              0x44
+#define CAMFLTORCH_BEH          0x45
+#define CAMFLDIS_BEH            0x46
+#define BCUDISW2_BEH            0x47
+#define BCUDISCRIT_BEH          0x48
+
+/*IRQ Mask Register*/
+#define MBCUIRQ                 0x10
+
+#define MRFL_SMIP_SRAM_ADDR	0xFFFCE000
+#define MOFD_SMIP_SRAM_ADDR	0xFFFC5C00
+
+/* SMIP offset address from where the BCU related info should be read */
+#define BCU_SMIP_OFFSET		0x3BA
+
+/* No of Bytes we have to read from SMIP from BCU_SMIP_BASE*/
+#define NUM_SMIP_BYTES          14
+
+/* Max length of the register name string */
+#define MAX_REGNAME_LEN		15
+
+/* String to send the uevent along with env info to user space */
+#define EVT_STR	"BCUEVT="
+
+/* Macro to get the mode of acess for the BCU registers	*/
+#define MODE(m)	(((m != S_BCUINT) && (m != BCUIRQ) && (m != IRQLVL1))	\
+			? (S_IRUGO | S_IWUSR) : S_IRUGO)
+
+/* Generic macro to assign the parameters (reg name and address) */
+#define reg_info(x)	{ .name = #x, .addr = x, .mode = MODE(x) }
+
+/* Generic macro to fill the environmental data for bcu uevent */
+#define get_envp(evt)	(EVT_STR#evt)
+
+/*
+* These values are read from SMIP.
+* SMIP contains these entries - default register configurations
+* BCU is programmed to these default values during boot time.
+*/
+
+struct ocd_bcove_config_data {
+	uint8_t vwarn1_cfg;
+	uint8_t vwarn2_cfg;
+	uint8_t vcrit_cfg;
+	uint8_t iccmaxvsys_cfg;
+	uint8_t iccmaxvcc_cfg;
+	uint8_t iccmaxvnn_cfg;
+	uint8_t vflexsrc_beh;
+	uint8_t vflexdis_beh;
+	uint8_t vibdis_beh;
+	uint8_t camfltorch_beh;
+	uint8_t camfldis_beh;
+	uint8_t bcudisw2_beh;
+	uint8_t bcudiscrit_beh;
+	uint8_t mbcuirq;
+} __packed;
+
+struct ocd_platform_data {
+	int (*bcu_config_data) (struct ocd_bcove_config_data *);
+};
+
+struct bcu_reg_info {
+	char	name[MAX_REGNAME_LEN];	/* register name   */
+	u16	addr;			/* offset address  */
+	u16	mode;			/* permission mode */
+};
+
+#endif
+
diff --git a/arch/x86/include/asm/intel_mid_gpadc.h b/arch/x86/include/asm/intel_mid_gpadc.h
new file mode 100644
index 0000000..ba62f83
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_gpadc.h
@@ -0,0 +1,19 @@
+#ifndef __INTEL_MID_GPADC_H__
+#define __INTEL_MID_GPADC_H__
+
+struct intel_mid_gpadc_platform_data {
+	unsigned long intr;
+};
+
+#define CH_NEED_VREF		(1 << 8)
+#define CH_NEED_VCALIB		(1 << 9)
+#define CH_NEED_ICALIB		(1 << 10)
+
+int intel_mid_gpadc_gsmpulse_sample(int *vol, int *cur);
+int intel_mid_gpadc_sample(void *handle, int sample_count, ...);
+int get_gpadc_sample(void *handle, int sample_count, int *buffer);
+void intel_mid_gpadc_free(void *handle);
+void *intel_mid_gpadc_alloc(int count, ...);
+void *gpadc_alloc_channels(int count, int *channel_info);
+#endif
+
diff --git a/arch/x86/include/asm/intel_mid_hsu.h b/arch/x86/include/asm/intel_mid_hsu.h
new file mode 100644
index 0000000..962f041d
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_hsu.h
@@ -0,0 +1,78 @@
+#ifndef __INTEL_MID_HSU_H__
+#define __INTEL_MID_HSU_H__
+
+#define hsu_port_func_max 4
+
+enum hsu_core {
+	hsu_pnw,
+	hsu_clv,
+	hsu_tng,
+	hsu_vlv2,
+	hsu_chv,
+};
+
+enum {
+	hsu_port0,
+	hsu_port1,
+	hsu_port2,
+	hsu_port_share,
+	hsu_port_max,
+	hsu_dma,
+};
+
+enum {
+	bt_port,
+	modem_port,
+	gps_port,
+	debug_port,
+};
+
+enum {
+	hsu_intel,
+	hsu_dw,
+};
+
+struct hsu_port_cfg {
+	int type;
+	int hw_ip;
+	int index;
+	char *name;
+	int idle;
+	int has_alt;
+	int alt;
+	int force_suspend;
+	int preamble;
+	int hw_context_save;
+	int hw_ctrl_cts;
+	struct device *dev;
+	int (*hw_init)(struct device *dev, int port);
+	void(*hw_set_alt)(int port);
+	void(*hw_set_rts)(int port, int value);
+	void(*hw_suspend)(int port, struct device *dev, irq_handler_t wake_isr);
+	void(*hw_suspend_post)(int port);
+	void(*hw_resume)(int port, struct device *dev);
+	unsigned int (*hw_get_clk)(void);
+	void (*wake_peer)(struct device *tty);
+	void (*set_clk)(unsigned int m, unsigned int n,
+			void __iomem *addr);
+	void (*hw_reset)(void __iomem *addr);
+};
+
+
+void intel_mid_hsu_suspend(int port, struct device *dev,
+				irq_handler_t wake_isr);
+void intel_mid_hsu_resume(int port, struct device *dev);
+void intel_mid_hsu_rts(int port, int value);
+void intel_mid_hsu_switch(int port);
+int intel_mid_hsu_plat_init(int port, ulong plat, struct device *dev);
+int intel_mid_hsu_init(struct device *dev, int port);
+int intel_mid_hsu_func_to_port(unsigned int func);
+unsigned int intel_mid_hsu_get_clk(void);
+int hsu_register_board_info(void *inf);
+void intel_mid_hsu_suspend_post(int port);
+struct device *intel_mid_hsu_set_wake_peer(int port,
+			void (*wake_peer)(struct device *));
+void intel_mid_hsu_reset(void __iomem *addr);
+void intel_mid_hsu_set_clk(unsigned int m, unsigned int n,
+			void __iomem *addr);
+#endif
diff --git a/arch/x86/include/asm/intel_mid_pcihelpers.h b/arch/x86/include/asm/intel_mid_pcihelpers.h
new file mode 100644
index 0000000..b7c079f
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_pcihelpers.h
@@ -0,0 +1,35 @@
+/*
+ * Access to message bus through three registers
+ * in CUNIT(0:0:0) PCI configuration space.
+ * MSGBUS_CTRL_REG(0xD0):
+ *   31:24      = message bus opcode
+ *   23:16      = message bus port
+ *   15:8       = message bus address, low 8 bits.
+ *   7:4        = message bus byte enables
+ * MSGBUS_CTRL_EXT_REG(0xD8):
+ *   31:8       = message bus address, high 24 bits.
+ * MSGBUS_DATA_REG(0xD4):
+ *   hold the data for write or read
+ */
+#define PCI_ROOT_MSGBUS_CTRL_REG        0xD0
+#define PCI_ROOT_MSGBUS_DATA_REG        0xD4
+#define PCI_ROOT_MSGBUS_CTRL_EXT_REG    0xD8
+#define PCI_ROOT_MSGBUS_READ            0x10
+#define PCI_ROOT_MSGBUS_WRITE           0x11
+#define PCI_ROOT_MSGBUS_DWORD_ENABLE    0xf0
+
+/* In BYT platform for all internal PCI devices d3 delay
+ * of 3 ms is sufficient. Default value of 10 ms is overkill.
+ */
+#define INTERNAL_PCI_PM_D3_WAIT		3
+
+#define ISP_SUB_CLASS			0x80
+#define SUB_CLASS_MASK			0xFF00
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd);
+u32 intel_mid_msgbus_read32(u8 port, u32 addr);
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
+u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext);
+void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data);
+u32 intel_mid_soc_stepping(void);
diff --git a/arch/x86/include/asm/intel_mid_powerbtn.h b/arch/x86/include/asm/intel_mid_powerbtn.h
new file mode 100644
index 0000000..cd5e4e5
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_powerbtn.h
@@ -0,0 +1,20 @@
+#ifndef __INTEL_MID_POWERBTN_H__
+#define __INTEL_MID_POWERBTN_H__
+
+struct intel_msic_power_btn_platform_data {
+	u32 pbstat;
+	u16 pb_level;
+	u16 irq_lvl1_mask;
+	int (*irq_ack)(struct intel_msic_power_btn_platform_data *);
+};
+
+#define MSIC_PB_LEN	1
+#define MSIC_PWRBTNM	(1 << 0)
+
+#define BCOVE_PBIRQ		0x02
+#define BCOVE_PBIRQMASK		0x0d
+
+#define SCOVE_PBIRQ		0x1002
+#define SCOVE_PBIRQMASK		0x100d
+
+#endif
diff --git a/arch/x86/include/asm/intel_mid_remoteproc.h b/arch/x86/include/asm/intel_mid_remoteproc.h
new file mode 100644
index 0000000..6faef10
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_remoteproc.h
@@ -0,0 +1,118 @@
+/*
+ * INTEL MID Remote Processor Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ASM_INTEL_MID_REMOTEPROC_H
+#define _ASM_INTEL_MID_REMOTEPROC_H
+
+#define RP_IPC_COMMAND		0xA0
+#define RP_IPC_SIMPLE_COMMAND	0xA1
+#define RP_IPC_RAW_COMMAND	0xA2
+
+#define	RP_PMIC_ACCESS		0xFF
+#define	RP_DFU_REQUEST		0xFE
+#define	RP_SET_WATCHDOG		0xF8
+#define	RP_FLIS_ACCESS		0xF5
+#define	RP_GET_FW_REVISION	0xF4
+#define	RP_COLD_BOOT		0xF3
+#define	RP_COLD_RESET		0xF1
+#define	RP_COLD_OFF		0x80
+#define	RP_MIP_ACCESS		0xEC
+#define RP_GET_HOBADDR		0xE5
+#define RP_OSC_CLK_CTRL		0xE6
+#define RP_S0IX_COUNTER		0xE8
+#define RP_WRITE_OSNIB		0xE4
+#define RP_FW_UPDATE		0xFE
+#define RP_VRTC			0xFA
+#define RP_PMDB			0xE0
+#define RP_INDIRECT_WRITE	0x05
+
+/*
+ * Assigning some temp ids for following devices
+ * TODO: Need to change it to some meaningful
+ *       values.
+ */
+#define RP_PMIC_GPIO		0X02
+#define RP_PMIC_AUDIO		0x03
+#define RP_MSIC_GPIO		0x05
+#define RP_MSIC_AUDIO		0x06
+#define RP_MSIC_OCD		0x07
+#define RP_MSIC_BATTERY		0XEF
+#define RP_MSIC_THERMAL		0x09
+#define RP_MSIC_POWER_BTN	0x10
+#define RP_IPC			0X11
+#define RP_IPC_UTIL		0X12
+#define RP_FW_ACCESS		0X13
+#define RP_UMIP_ACCESS		0x14
+#define RP_OSIP_ACCESS		0x15
+#define RP_MSIC_ADC		0x16
+#define RP_BQ24192		0x17
+#define RP_MSIC_CLV_AUDIO	0x18
+#define RP_PMIC_CCSM		0x19
+#define RP_PMIC_I2C		0x20
+#define RP_MSIC_MRFLD_AUDIO	0x21
+#define RP_MSIC_PWM		0x22
+#define RP_MSIC_KPD_LED		0x23
+#define RP_BCOVE_ADC		0x24
+#define RP_BCOVE_THERMAL	0x25
+#define RP_MRFL_OCD		0x26
+#define RP_FW_LOGGING		0x27
+#define RP_PMIC_CHARGER		0x28
+#define RP_SCOVE_THERMAL	0x29
+
+enum rproc_type {
+	RPROC_SCU = 0,
+	RPROC_PSH,
+	RPROC_NUM,
+};
+
+struct rproc_ops;
+struct platform_device;
+struct rpmsg_ns_msg;
+
+struct rpmsg_ns_info {
+	enum rproc_type type;
+	char name[RPMSG_NAME_SIZE];
+	u32 addr;
+	u32 flags;
+	struct list_head node;
+};
+
+struct rpmsg_ns_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+extern struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name,
+						int id, u32 addr);
+extern void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+					struct rpmsg_ns_list *nslist);
+
+/*
+ * struct intel_mid_rproc_pdata - intel mid remoteproc's platform data
+ * @name: the remoteproc's name
+ * @firmware: name of firmware file to load
+ * @ops: start/stop rproc handlers
+ * @device_enable: handler for enabling a device
+ * @device_shutdown: handler for shutting down a device
+ */
+struct intel_mid_rproc_pdata {
+	const char *name;
+	const char *firmware;
+	const struct rproc_ops *ops;
+	int (*device_enable) (struct platform_device *pdev);
+	int (*device_shutdown) (struct platform_device *pdev);
+	struct rpmsg_ns_list *nslist;
+};
+
+#endif /* _ASM_INTEL_MID_REMOTEPROC_H */
diff --git a/arch/x86/include/asm/intel_mid_rpmsg.h b/arch/x86/include/asm/intel_mid_rpmsg.h
new file mode 100644
index 0000000..7691234
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_rpmsg.h
@@ -0,0 +1,74 @@
+#ifndef _INTEL_MID_RPMSG_H_
+#define _INTEL_MID_RPMSG_H_
+
+#include <asm/scu_ipc_rpmsg.h>
+#include <linux/wakelock.h>
+#include <linux/rpmsg.h>
+
+#define RPMSG_TX_TIMEOUT   (5 * HZ)
+
+struct rpmsg_instance {
+	struct rpmsg_channel *rpdev;
+	struct mutex instance_lock;
+	struct tx_ipc_msg *tx_msg;
+	struct rx_ipc_msg *rx_msg;
+	struct mutex rx_lock;
+	struct completion reply_arrived;
+	struct rpmsg_endpoint *endpoint;
+};
+
+struct rpmsg_lock {
+	struct mutex lock;
+	int locked_prev; /* locked prev flag */
+	atomic_t pending;
+};
+
+extern int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen);
+
+extern int rpmsg_send_raw_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen, u32 sptr,
+						u32 dptr);
+
+extern int rpmsg_send_simple_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub);
+
+extern int alloc_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance);
+
+extern void free_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance);
+
+extern void init_rpmsg_instance(struct rpmsg_instance *instance);
+
+extern int rpmsg_send_generic_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+				u32 *out, u32 outlen);
+
+extern int rpmsg_send_generic_simple_command(u32 cmd, u32 sub);
+
+extern int rpmsg_send_generic_raw_command(u32 cmd, u32 sub,
+				   u8 *in, u32 inlen,
+				   u32 *out, u32 outlen,
+				   u32 dptr, u32 sptr);
+
+struct rpmsg_device_data {
+	char name[RPMSG_NAME_SIZE];
+	struct rpmsg_channel *rpdev;
+	struct rpmsg_instance *rpmsg_instance;
+};
+
+enum rpmsg_ipc_command_type {
+	RPMSG_IPC_COMMAND = 0,
+	RPMSG_IPC_SIMPLE_COMMAND,
+	RPMSG_IPC_RAW_COMMAND,
+	RPMSG_IPC_COMMAND_TYPE_NUM,
+};
+
+extern void rpmsg_global_lock(void);
+extern void rpmsg_global_unlock(void);
+
+#endif
diff --git a/arch/x86/include/asm/intel_mid_thermal.h b/arch/x86/include/asm/intel_mid_thermal.h
new file mode 100644
index 0000000..b6e648b
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_thermal.h
@@ -0,0 +1,81 @@
+#ifndef __INTEL_MID_THERMAL_H__
+#define __INTEL_MID_THERMAL_H__
+
+#include <linux/thermal.h>
+
+#define BPTHERM_NAME	"bptherm"
+#define SKIN0_NAME	"skin0"
+#define SKIN1_NAME	"skin1"
+#define MSIC_DIE_NAME	"msicdie"
+#define MSIC_SYS_NAME	"sys"
+#define SYSTHERM0       "systherm0"
+#define SYSTHERM1       "systherm1"
+#define SYSTHERM2       "systherm2"
+/**
+ * struct intel_mid_thermal_sensor - intel_mid_thermal sensor information
+ * @name:		name of the sensor
+ * @index:		index number of sensor
+ * @slope:		slope used for temp calculation
+ * @intercept:		intercept used for temp calculation
+ * @adc_channel:	adc channel id|flags
+ * @direct:		If true then direct conversion is used.
+ * @priv:		private sensor data
+ * @temp_correlation:	temp correlation function
+ */
+struct intel_mid_thermal_sensor {
+	char name[THERMAL_NAME_LENGTH];
+	int index;
+	long slope;
+	long intercept;
+	int adc_channel;
+	bool direct;
+	void *priv;
+	int (*temp_correlation)(void *info, long temp, long *res);
+};
+
+/**
+ * struct soc_throttle_data - SoC level power limits for thermal throttling
+ * @power_limit:	power limit value
+ * @floor_freq:		The CPU frequency may not go below this value
+ */
+struct soc_throttle_data {
+	int power_limit;
+	int floor_freq;
+};
+
+/**
+ * struct intel_mid_thermal_platform_data - Platform data for
+ *		intel mid thermal driver
+ *
+ * @num_sensors:	Maximum number of real sensors supported
+ * @num_virtual_sensors: Number of virtual sensors
+ * @sensors:		sensor info
+ * @gpu_cooling:	Whether to register a cdev for GPU throttling
+ */
+struct intel_mid_thermal_platform_data {
+	int num_sensors;
+	int num_virtual_sensors;
+	struct intel_mid_thermal_sensor *sensors;
+	bool gpu_cooling;
+};
+
+/**
+ * struct skin1_private_info - skin1 sensor private data
+ *
+ * @dependent:		dependency on other sensors
+			0   - no dependency,
+			> 0 - depends on other sensors
+ * @sensors:		dependent sensor address.
+ */
+struct skin1_private_info {
+	int dependent;
+	struct intel_mid_thermal_sensor **sensors;
+};
+
+/* skin0 sensor temperature correlation function*/
+int skin0_temp_correlation(void *info, long temp, long *res);
+/* skin1 sensor temperature correlation function*/
+int skin1_temp_correlation(void *info, long temp, long *res);
+/* bptherm sensor temperature correlation function*/
+int bptherm_temp_correlation(void *info, long temp, long *res);
+#endif
diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
new file mode 100644
index 0000000..11ababf
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_vrtc.h
@@ -0,0 +1,9 @@
+#ifndef _INTEL_MID_VRTC_H
+#define _INTEL_MID_VRTC_H
+
+extern unsigned char vrtc_cmos_read(unsigned char reg);
+extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
+extern unsigned long vrtc_get_time(void);
+extern int vrtc_set_mmss(unsigned long nowtime);
+
+#endif
diff --git a/arch/x86/include/asm/intel_mip.h b/arch/x86/include/asm/intel_mip.h
new file mode 100644
index 0000000..f05fc05
--- /dev/null
+++ b/arch/x86/include/asm/intel_mip.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_X86_INTEL_MIP_H_
+#define  _ASM_X86_INTEL_MIP_H_
+
+#include <asm/intel-mid.h>
+
+/* SMIP property related definitions */
+#define SCU_MIP_DEV_NAME		"intel_scu_mip"
+#define SMIP_NUM_CONFIG_PROPS		6
+#define SMIP_MAX_PROP_LEN		4
+
+enum platform_prop {
+	USB_COMPLIANCE,
+	CHARGE_TERMINATION,
+	SHUTDOWN_METHODOLOGY,
+	MOS_TRANS_CAPACITY,
+	NFC_RESV_CAPACITY,
+	TEMP_CRIT_SHUTDOWN,
+};
+
+struct smip_platform_prop {
+	unsigned int offset;
+	unsigned int len;
+	bool is_bit_field;
+	unsigned int mask;
+};
+
+struct scu_mip_platform_data {
+	struct smip_platform_prop smip_prop[SMIP_NUM_CONFIG_PROPS];
+};
+
+int get_smip_property_by_name(enum platform_prop);
+#endif
diff --git a/arch/x86/include/asm/intel_psh_ipc.h b/arch/x86/include/asm/intel_psh_ipc.h
new file mode 100644
index 0000000..ed4d3c14
--- /dev/null
+++ b/arch/x86/include/asm/intel_psh_ipc.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_X86_INTEL_PSH_IPC_H_
+#define _ASM_X86_INTEL_PSH_IPC_H_
+
+#define CHANNEL_BUSY		(1 << 31)
+#define PSH_IPC_CONTINUE	(1 << 30)
+
+struct psh_msg {
+	u32 msg;
+	u32 param;
+};
+
+enum psh_channel {
+	PSH_SEND_CH0 = 0,
+	PSH_SEND_CH1,
+	PSH_SEND_CH2,
+	PSH_SEND_CH3,
+	NUM_IA2PSH_IPC,
+	PSH_RECV_CH0 = NUM_IA2PSH_IPC,
+	PSH_RECV_CH1,
+	PSH_RECV_CH2,
+	PSH_RECV_CH3,
+	PSH_RECV_END,
+	NUM_PSH2IA_IPC = PSH_RECV_END - PSH_RECV_CH0,
+	NUM_ALL_CH = NUM_IA2PSH_IPC + NUM_PSH2IA_IPC,
+};
+
+typedef void(*psh_channel_handle_t)(u32 msg, u32 param, void *data);
+int intel_ia2psh_command(struct psh_msg *in, struct psh_msg *out,
+			 int ch, int timeout);
+int intel_psh_ipc_bind(int ch, psh_channel_handle_t handle, void *data);
+void intel_psh_ipc_unbind(int ch);
+
+void intel_psh_ipc_disable_irq(void);
+void intel_psh_ipc_enable_irq(void);
+#endif
diff --git a/arch/x86/include/asm/intel_scu_flis.h b/arch/x86/include/asm/intel_scu_flis.h
new file mode 100644
index 0000000..c7f9b93
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_flis.h
@@ -0,0 +1,864 @@
+#ifndef _ASM_X86_INTEL_SCU_FLIS_H_
+#define _ASM_X86_INTEL_SCU_FLIS_H_
+
+enum flis_param_t {
+	PULL,
+	MUX,
+	OPEN_DRAIN,
+};
+
+#ifdef CONFIG_X86_CTP
+/* For CTP */
+/*
+ * Config value for PULL
+ */
+#define NONE		(0 << 0)
+#define DOWN_20K	(1 << 0)
+#define DOWN_2K		(1 << 1)
+/* DOWN_75K is reserved */
+#define UP_20K		(1 << 3)
+#define UP_2K		(1 << 4)
+/*910 Weak Pull-Up*/
+#define UP_910		(1 << 5)
+
+/*
+ * Config value for OPEN_DRAIN
+ */
+#define OD_DISABLE	(1 << 0)
+#define OD_ENABLE	(0 << 0)
+
+/*
+ * Config value for MUX
+ */
+/*Bit0: Mux Enable for Input Enable*/
+#define MUX_EN_INPUT_EN		(1<<0)
+/*Bit1: Input Enable for*/
+#define INPUT_EN		(1<<1)
+/*Bit2: Mux Enable for Output Enable*/
+#define MUX_EN_OUTPUT_EN	(1<<2)
+/*Bit3: Output Enable*/
+#define OUTPUT_EN		(1<<3)
+
+#define PULL_MASK		0x3F
+#define MUX_MASK		0xF
+#define OPEN_DRAIN_MASK		0x1
+
+#else
+/* For MERR & MOFD */
+
+#define PULL_MASK	((7 << 4) | (3 << 8))
+#define MUX_MASK	(0xF << 12)
+#define OPEN_DRAIN_MASK	BIT(21)
+
+#define PULL_UP		(1 << 8)
+#define PULL_DOWN	(2 << 8)
+#define R2Kohms		(0 << 4)
+#define R20Kohms	(1 << 4)
+#define R50Kohms	(2 << 4)
+#define R910ohms	(3 << 4)
+
+#define NONE		(0 << 8)
+#define UP_2K		(PULL_UP | R2Kohms)
+#define UP_20K		(PULL_UP | R20Kohms)
+#define UP_50K		(PULL_UP | R50Kohms)
+#define UP_910		(PULL_UP | R910ohms)
+#define DOWN_2K		(PULL_DOWN | R2Kohms)
+#define DOWN_20K	(PULL_DOWN | R20Kohms)
+#define DOWN_50K	(PULL_DOWN | R50Kohms)
+#define DOWN_910	(PULL_DOWN | R910ohms)
+
+#define OD_DISABLE	(0 << 21)
+#define OD_ENABLE	(1 << 21)
+
+#define MUX_EN_INPUT_EN		(2 << 12)
+#define INPUT_EN		(1 << 12)
+#define MUX_EN_OUTPUT_EN	(8 << 12)
+#define OUTPUT_EN		(4 << 12)
+
+#endif
+
+enum ctp_pinname_t {
+	i2s_2_clk = 0,
+	i2s_2_fs = 1,
+	i2s_2_rxd = 2,
+	i2s_2_txd = 3,
+	msic_reset_b = 4,
+	spi_0_clk = 5,
+	spi_0_sdi = 6,
+	spi_0_sdo = 7,
+	spi_0_ss = 8,
+	svid_clkout = 9,
+	svid_clksynch = 10,
+	svid_din = 11,
+	svid_dout = 12,
+	usb_ulpi_clk = 13,
+	usb_ulpi_data0 = 14,
+	usb_ulpi_data1 = 15,
+	usb_ulpi_data2 = 16,
+	usb_ulpi_data3 = 17,
+	usb_ulpi_data4 = 18,
+	usb_ulpi_data5 = 19,
+	usb_ulpi_data6 = 20,
+	usb_ulpi_data7 = 21,
+	usb_ulpi_dir = 22,
+	usb_ulpi_nxt = 23,
+	usb_ulpi_refclk = 24,
+	usb_ulpi_stp = 25,
+	ulpi1lpc_gpe_b = 26,
+	ulpi1lpc_lpc_ad0 = 27,
+	ulpi1lpc_lpc_ad1 = 28,
+	ulpi1lpc_lpc_ad2 = 29,
+	ulpi1lpc_lpc_ad3 = 30,
+	ulpi1lpc_lpc_clkout = 31,
+	ulpi1lpc_lpc_clkrun = 32,
+	ulpi1lpc_lpc_frame_b = 33,
+	ulpi1lpc_lpc_reset_b = 34,
+	ulpi1lpc_lpc_serirq = 35,
+	ulpi1lpc_lpc_smi_b = 36,
+	ulpi1lpc_usb_ulpi_1_clk = 37,
+	ulpi1lpc_usb_ulpi_1_data0 = 38,
+	ulpi1lpc_usb_ulpi_1_data1 = 39,
+	ulpi1lpc_usb_ulpi_1_data2 = 40,
+	ulpi1lpc_usb_ulpi_1_data3 = 41,
+	ulpi1lpc_usb_ulpi_1_data4 = 42,
+	ulpi1lpc_usb_ulpi_1_data5 = 43,
+	ulpi1lpc_usb_ulpi_1_data6 = 44,
+	ulpi1lpc_usb_ulpi_1_data7 = 45,
+	ulpi1lpc_usb_ulpi_1_dir = 46,
+	ulpi1lpc_usb_ulpi_1_nxt = 47,
+	ulpi1lpc_usb_ulpi_1_refclk = 48,
+	ulpi1lpc_usb_ulpi_1_stp = 49,
+	kbd_dkin0 = 50,
+	kbd_dkin1 = 51,
+	kbd_dkin2 = 52,
+	kbd_dkin3 = 53,
+	kbd_mkin0 = 54,
+	kbd_mkin1 = 55,
+	kbd_mkin2 = 56,
+	kbd_mkin3 = 57,
+	kbd_mkin4 = 58,
+	kbd_mkin5 = 59,
+	kbd_mkin6 = 60,
+	kbd_mkin7 = 61,
+	kbd_mkout0 = 62,
+	kbd_mkout1 = 63,
+	kbd_mkout2 = 64,
+	kbd_mkout3 = 65,
+	kbd_mkout4 = 66,
+	kbd_mkout5 = 67,
+	kbd_mkout6 = 68,
+	kbd_mkout7 = 69,
+	camerasb10 = 70,
+	camerasb4 = 71,
+	camerasb5 = 72,
+	camerasb6 = 73,
+	camerasb7 = 74,
+	camerasb8 = 75,
+	camerasb9 = 76,
+	i2c_4_scl = 77,
+	i2c_4_sda = 78,
+	i2c_5_scl = 79,
+	i2c_5_sda = 80,
+	intd_dsi_te1 = 81,
+	intd_dsi_te2 = 82,
+	stio_0_cd_b = 83,
+	stio_0_clk = 84,
+	stio_0_cmd = 85,
+	stio_0_dat0 = 86,
+	stio_0_dat1 = 87,
+	stio_0_dat2 = 88,
+	stio_0_dat3 = 89,
+	stio_0_dat4 = 90,
+	stio_0_dat5 = 91,
+	stio_0_dat6 = 92,
+	stio_0_dat7 = 93,
+	stio_0_wp_b = 94,
+	camerasb0 = 95,
+	camerasb1 = 96,
+	camerasb2 = 97,
+	camerasb3 = 98,
+	ded_gpio10 = 99,
+	ded_gpio11 = 100,
+	ded_gpio12 = 101,
+	ded_gpio13 = 102,
+	ded_gpio14 = 103,
+	ded_gpio15 = 104,
+	ded_gpio16 = 105,
+	ded_gpio17 = 106,
+	ded_gpio18 = 107,
+	ded_gpio19 = 108,
+	ded_gpio20 = 109,
+	ded_gpio21 = 110,
+	ded_gpio22 = 111,
+	ded_gpio23 = 112,
+	ded_gpio24 = 113,
+	ded_gpio25 = 114,
+	ded_gpio26 = 115,
+	ded_gpio27 = 116,
+	ded_gpio28 = 117,
+	ded_gpio29 = 118,
+	ded_gpio30 = 119,
+	ded_gpio8 = 120,
+	ded_gpio9 = 121,
+	mpti_nidnt_clk = 122,
+	mpti_nidnt_data0 = 123,
+	mpti_nidnt_data1 = 124,
+	mpti_nidnt_data2 = 125,
+	mpti_nidnt_data3 = 126,
+	stio_1_clk = 127,
+	stio_1_cmd = 128,
+	stio_1_dat0 = 129,
+	stio_1_dat1 = 130,
+	stio_1_dat2 = 131,
+	stio_1_dat3 = 132,
+	stio_2_clk = 133,
+	stio_2_cmd = 134,
+	stio_2_dat0 = 135,
+	stio_2_dat1 = 136,
+	stio_2_dat2 = 137,
+	stio_2_dat3 = 138,
+	coms_int0 = 139,
+	coms_int1 = 140,
+	coms_int2 = 141,
+	coms_int3 = 142,
+	ded_gpio4 = 143,
+	ded_gpio5 = 144,
+	ded_gpio6 = 145,
+	ded_gpio7 = 146,
+	i2s_0_clk = 147,
+	i2s_0_fs = 148,
+	i2s_0_rxd = 149,
+	i2s_0_txd = 150,
+	i2s_1_clk = 151,
+	i2s_1_fs = 152,
+	i2s_1_rxd = 153,
+	i2s_1_txd = 154,
+	mslim_1_bclk = 155,
+	mslim_1_bdat = 156,
+	resetout_b = 157,
+	spi_2_clk = 158,
+	spi_2_sdi = 159,
+	spi_2_sdo = 160,
+	spi_2_ss0 = 161,
+	spi_2_ss1 = 162,
+	spi_3_clk = 163,
+	spi_3_sdi = 164,
+	spi_3_sdo = 165,
+	spi_3_ss0 = 166,
+	spi_3_ss1 = 167,
+	uart_0_cts = 168,
+	uart_0_rts = 169,
+	uart_0_rx = 170,
+	uart_0_tx = 171,
+	uart_1_rx = 172,
+	uart_1_sd = 173,
+	uart_1_tx = 174,
+	uart_2_rx = 175,
+	uart_2_tx = 176,
+	aclkph = 177,
+	dclkph = 178,
+	dsiclkph = 179,
+	ierr = 180,
+	jtag_tckc = 181,
+	jtag_tdic = 182,
+	jtag_tdoc = 183,
+	jtag_tmsc = 184,
+	jtag_trst_b = 185,
+	lclkph = 186,
+	lfhclkph = 187,
+	osc_clk_ctrl0 = 188,
+	osc_clk_ctrl1 = 189,
+	osc_clk_out0 = 190,
+	osc_clk_out1 = 191,
+	osc_clk_out2 = 192,
+	osc_clk_out3 = 193,
+	prochot_b = 194,
+	thermtrip_b = 195,
+	uclkph = 196,
+	ded_gpio31 = 197,
+	ded_gpio32 = 198,
+	ded_gpio33 = 199,
+	hdmi_cec = 200,
+	i2c_3_scl_hdmi_ddc = 201,
+	i2c_3_sda_hdmi_ddc = 202,
+	i2c_0_scl = 203,
+	i2c_0_sda = 204,
+	i2c_1_scl = 205,
+	i2c_1_sda = 206,
+	i2c_2_scl = 207,
+	i2c_2_sda = 208,
+	spi_1_clk = 209,
+	spi_1_sdi = 210,
+	spi_1_sdo = 211,
+	spi_1_ss0 = 212,
+	spi_1_ss1 = 213,
+	spi_1_ss2 = 214,
+	spi_1_ss3 = 215,
+	spi_1_ss4 = 216,
+	CTP_PIN_NUM,
+};
+
+/* Add prefix "tng_" to avoid name duplication with ctp pins */
+enum tng_pinname_t {
+	tng_usb_ulpi_0_clk = 0,
+	tng_usb_ulpi_0_data_0 = 1,
+	tng_usb_ulpi_0_data_1 = 2,
+	tng_usb_ulpi_0_data_2 = 3,
+	tng_usb_ulpi_0_data_3 = 4,
+	tng_usb_ulpi_0_data_4 = 5,
+	tng_usb_ulpi_0_data_5 = 6,
+	tng_usb_ulpi_0_data_6 = 7,
+	tng_usb_ulpi_0_data_7 = 8,
+	tng_usb_ulpi_0_dir = 9,
+	tng_usb_ulpi_0_nxt = 10,
+	tng_usb_ulpi_0_refclk = 11,
+	tng_usb_ulpi_0_stp = 12,
+	tng_emmc_0_clk = 13,
+	tng_emmc_0_cmd = 14,
+	tng_emmc_0_d_0 = 15,
+	tng_emmc_0_d_1 = 16,
+	tng_emmc_0_d_2 = 17,
+	tng_emmc_0_d_3 = 18,
+	tng_emmc_0_d_4 = 19,
+	tng_emmc_0_d_5 = 20,
+	tng_emmc_0_d_6 = 21,
+	tng_emmc_0_d_7 = 22,
+	tng_emmc_0_rst_b = 23,
+	tng_gp_emmc_1_clk = 24,
+	tng_gp_emmc_1_cmd = 25,
+	tng_gp_emmc_1_d_0 = 26,
+	tng_gp_emmc_1_d_1 = 27,
+	tng_gp_emmc_1_d_2 = 28,
+	tng_gp_emmc_1_d_3 = 29,
+	tng_gp_emmc_1_d_4 = 30,
+	tng_gp_emmc_1_d_5 = 31,
+	tng_gp_emmc_1_d_6 = 32,
+	tng_gp_emmc_1_d_7 = 33,
+	tng_gp_emmc_1_rst_b = 34,
+	tng_gp_28 = 35,
+	tng_gp_29 = 36,
+	tng_gp_sdio_0_cd_b = 37,
+	tng_gp_sdio_0_clk = 38,
+	tng_gp_sdio_0_cmd = 39,
+	tng_gp_sdio_0_dat_0 = 40,
+	tng_gp_sdio_0_dat_1 = 41,
+	tng_gp_sdio_0_dat_2 = 42,
+	tng_gp_sdio_0_dat_3 = 43,
+	tng_gp_sdio_0_lvl_clk_fb = 44,
+	tng_gp_sdio_0_lvl_cmd_dir = 45,
+	tng_gp_sdio_0_lvl_dat_dir = 46,
+	tng_gp_sdio_0_lvl_sel = 47,
+	tng_gp_sdio_0_powerdown_b = 48,
+	tng_gp_sdio_0_wp = 49,
+	tng_gp_sdio_1_clk = 50,
+	tng_gp_sdio_1_cmd = 51,
+	tng_gp_sdio_1_dat_0 = 52,
+	tng_gp_sdio_1_dat_1 = 53,
+	tng_gp_sdio_1_dat_2 = 54,
+	tng_gp_sdio_1_dat_3 = 55,
+	tng_gp_sdio_1_powerdown_b = 56,
+	tng_mhsi_acdata = 57,
+	tng_mhsi_acflag = 58,
+	tng_mhsi_acready = 59,
+	tng_mhsi_acwake = 60,
+	tng_mhsi_cadata = 61,
+	tng_mhsi_caflag = 62,
+	tng_mhsi_caready = 63,
+	tng_mhsi_cawake = 64,
+	tng_gp_mslim_0_bclk = 65,
+	tng_gp_mslim_0_bdat = 66,
+	tng_gp_ssp_0_clk = 67,
+	tng_gp_ssp_0_fs = 68,
+	tng_gp_ssp_0_rxd = 69,
+	tng_gp_ssp_0_txd = 70,
+	tng_gp_ssp_1_clk = 71,
+	tng_gp_ssp_1_fs = 72,
+	tng_gp_ssp_1_rxd = 73,
+	tng_gp_ssp_1_txd = 74,
+	tng_gp_ssp_2_clk = 75,
+	tng_gp_ssp_2_fs = 76,
+	tng_gp_ssp_2_rxd = 77,
+	tng_gp_ssp_2_txd = 78,
+	tng_gp_ssp_3_clk = 79,
+	tng_gp_ssp_3_fs = 80,
+	tng_gp_ssp_3_rxd = 81,
+	tng_gp_ssp_3_txd = 82,
+	tng_gp_ssp_4_clk = 83,
+	tng_gp_ssp_4_fs_0 = 84,
+	tng_gp_ssp_4_fs_1 = 85,
+	tng_gp_ssp_4_fs_2 = 86,
+	tng_gp_ssp_4_fs_3 = 87,
+	tng_gp_ssp_4_rxd = 88,
+	tng_gp_ssp_4_txd = 89,
+	tng_gp_ssp_5_clk = 90,
+	tng_gp_ssp_5_fs_0 = 91,
+	tng_gp_ssp_5_fs_1 = 92,
+	tng_gp_ssp_5_fs_2 = 93,
+	tng_gp_ssp_5_fs_3 = 94,
+	tng_gp_ssp_5_rxd = 95,
+	tng_gp_ssp_5_txd = 96,
+	tng_gp_ssp_6_clk = 97,
+	tng_gp_ssp_6_fs = 98,
+	tng_gp_ssp_6_rxd = 99,
+	tng_gp_ssp_6_txd = 100,
+	tng_gp_i2c_1_scl = 101,
+	tng_gp_i2c_1_sda = 102,
+	tng_gp_i2c_2_scl = 103,
+	tng_gp_i2c_2_sda = 104,
+	tng_gp_i2c_3_scl = 105,
+	tng_gp_i2c_3_sda = 106,
+	tng_gp_i2c_4_scl = 107,
+	tng_gp_i2c_4_sda = 108,
+	tng_gp_i2c_5_scl = 109,
+	tng_gp_i2c_5_sda = 110,
+	tng_gp_i2c_6_scl = 111,
+	tng_gp_i2c_6_sda = 112,
+	tng_gp_i2c_7_scl = 113,
+	tng_gp_i2c_7_sda = 114,
+	tng_gp_uart_0_cts = 115,
+	tng_gp_uart_0_rts = 116,
+	tng_gp_uart_0_rx = 117,
+	tng_gp_uart_0_tx = 118,
+	tng_gp_uart_1_cts = 119,
+	tng_gp_uart_1_rts = 120,
+	tng_gp_uart_1_rx = 121,
+	tng_gp_uart_1_tx = 122,
+	tng_gp_uart_2_cts = 123,
+	tng_gp_uart_2_rts = 124,
+	tng_gp_uart_2_rx = 125,
+	tng_gp_uart_2_tx = 126,
+	tng_gp_13 = 127,
+	tng_gp_14 = 128,
+	tng_gp_15 = 129,
+	tng_gp_16 = 130,
+	tng_gp_17 = 131,
+	tng_gp_18 = 132,
+	tng_gp_19 = 133,
+	tng_gp_20 = 134,
+	tng_gp_21 = 135,
+	tng_gp_22 = 136,
+	tng_gp_23 = 137,
+	tng_gp_24 = 138,
+	tng_gp_25 = 139,
+	tng_gp_fast_int_0 = 140,
+	tng_gp_fast_int_1 = 141,
+	tng_gp_fast_int_2 = 142,
+	tng_gp_fast_int_3 = 143,
+	tng_gp_pwm_0 = 144,
+	tng_gp_pwm_1 = 145,
+	tng_gp_camerasb_0 = 146,
+	tng_gp_camerasb_1 = 147,
+	tng_gp_camerasb_2 = 148,
+	tng_gp_camerasb_3 = 149,
+	tng_gp_camerasb_4 = 150,
+	tng_gp_camerasb_5 = 151,
+	tng_gp_camerasb_6 = 152,
+	tng_gp_camerasb_7 = 153,
+	tng_gp_camerasb_8 = 154,
+	tng_gp_camerasb_9 = 155,
+	tng_gp_camerasb_10 = 156,
+	tng_gp_camerasb_11 = 157,
+	tng_gp_clkph_0 = 158,
+	tng_gp_clkph_1 = 159,
+	tng_gp_clkph_2 = 160,
+	tng_gp_clkph_3 = 161,
+	tng_gp_clkph_4 = 162,
+	tng_gp_clkph_5 = 163,
+	tng_gp_hdmi_hpd = 164,
+	tng_gp_intd_dsi_te1 = 165,
+	tng_gp_intd_dsi_te2 = 166,
+	tng_osc_clk_ctrl_0 = 167,
+	tng_osc_clk_ctrl_1 = 168,
+	tng_osc_clk_out_0 = 169,
+	tng_osc_clk_out_1 = 170,
+	tng_osc_clk_out_2 = 171,
+	tng_osc_clk_out_3 = 172,
+	tng_osc_clk_out_4 = 173,
+	tng_resetout_b = 174,
+	tng_xxpmode = 175,
+	tng_xxprdy = 176,
+	tng_xxpreq_b = 177,
+	tng_gp_26 = 178,
+	tng_gp_27 = 179,
+	tng_i2c_0_scl = 180,
+	tng_i2c_0_sda = 181,
+	tng_ierr_b = 182,
+	tng_jtag_tckc = 183,
+	tng_jtag_tdic = 184,
+	tng_jtag_tdoc = 185,
+	tng_jtag_tmsc = 186,
+	tng_jtag_trst_b = 187,
+	tng_prochot_b = 188,
+	tng_rtc_clk = 189,
+	tng_svid_vclk = 190,
+	tng_svid_vdio = 191,
+	tng_thermtrip_b = 192,
+	tng_standby = 193,
+	tng_gp_kbd_dkin_0 = 194,
+	tng_gp_kbd_dkin_1 = 195,
+	tng_gp_kbd_dkin_2 = 196,
+	tng_gp_kbd_dkin_3 = 197,
+	tng_gp_kbd_mkin_0 = 198,
+	tng_gp_kbd_mkin_1 = 199,
+	tng_gp_kbd_mkin_2 = 200,
+	tng_gp_kbd_mkin_3 = 201,
+	tng_gp_kbd_mkin_4 = 202,
+	tng_gp_kbd_mkin_5 = 203,
+	tng_gp_kbd_mkin_6 = 204,
+	tng_gp_kbd_mkin_7 = 205,
+	tng_gp_kbd_mkout_0 = 206,
+	tng_gp_kbd_mkout_1 = 207,
+	tng_gp_kbd_mkout_2 = 208,
+	tng_gp_kbd_mkout_3 = 209,
+	tng_gp_kbd_mkout_4 = 210,
+	tng_gp_kbd_mkout_5 = 211,
+	tng_gp_kbd_mkout_6 = 212,
+	tng_gp_kbd_mkout_7 = 213,
+	tng_gp_0 = 214,
+	tng_gp_1 = 215,
+	tng_gp_2 = 216,
+	tng_gp_3 = 217,
+	tng_gp_4 = 218,
+	tng_gp_5 = 219,
+	tng_gp_6 = 220,
+	tng_gp_7 = 221,
+	tng_gp_8 = 222,
+	tng_gp_9 = 223,
+	tng_gp_10 = 224,
+	tng_gp_11 = 225,
+	tng_gp_12 = 226,
+	tng_gp_mpti_clk = 227,
+	tng_gp_mpti_data_0 = 228,
+	tng_gp_mpti_data_1 = 229,
+	tng_gp_mpti_data_2 = 230,
+	tng_gp_mpti_data_3 = 231,
+	TNG_PIN_NUM,
+};
+
+/* If the same pin exists on TNG, use the same value as TNG,
+ * if the pin is new on ANN, allocate one value, starts from TNG_PIN_NUM.
+ */
+enum ann_pinname_t {
+	/* gpioclk */
+	ann_gp_clkph_0 = tng_gp_clkph_0,
+	ann_gp_clkph_1 = tng_gp_clkph_1,
+	ann_gp_clkph_2 = tng_gp_clkph_2,
+	ann_gp_clkph_3 = tng_gp_clkph_3,
+	ann_gp_clkph_4 = tng_gp_clkph_4,
+	ann_gp_clkph_5 = tng_gp_clkph_5,
+	ann_gp_clkph_6 = 232,
+	ann_osc_clk_ctrl_0 = tng_osc_clk_ctrl_0,
+	ann_osc_clk_ctrl_1 = tng_osc_clk_ctrl_1,
+	ann_osc_clk_out_0 = tng_osc_clk_out_0,
+	ann_osc_clk_out_1 = tng_osc_clk_out_1,
+	ann_osc_clk_out_2 = tng_osc_clk_out_2,
+	ann_osc_clk_out_3 = tng_osc_clk_out_3,
+	ann_osc_clk_out_4 = tng_osc_clk_out_4,
+
+	/* gpiocsb */
+	ann_gp_camerasb_0 = tng_gp_camerasb_0,
+	ann_gp_camerasb_1 = tng_gp_camerasb_1,
+	ann_gp_camerasb_2 = tng_gp_camerasb_2,
+	ann_gp_camerasb_3 = tng_gp_camerasb_3,
+	ann_gp_camerasb_4 = tng_gp_camerasb_4,
+	ann_gp_camerasb_5 = tng_gp_camerasb_5,
+	ann_gp_camerasb_6 = tng_gp_camerasb_6,
+	ann_gp_camerasb_7 = tng_gp_camerasb_7,
+	ann_gp_camerasb_8 = tng_gp_camerasb_8,
+	ann_gp_camerasb_9 = tng_gp_camerasb_9,
+	ann_gp_camerasb_10 = tng_gp_camerasb_10,
+	ann_gp_camerasb_11 = tng_gp_camerasb_11,
+	ann_gp_hdmi_hpd = tng_gp_hdmi_hpd,
+	ann_gp_intd_dsi_te1 = tng_gp_intd_dsi_te1,
+	ann_gp_intd_dsi_te2 = tng_gp_intd_dsi_te2,
+
+	/* gpioemmc */
+	ann_emmc_0_clk = tng_emmc_0_clk,
+	ann_emmc_0_cmd = tng_emmc_0_cmd,
+	ann_emmc_0_d_0 = tng_emmc_0_d_0,
+	ann_emmc_0_d_1 = tng_emmc_0_d_1,
+	ann_emmc_0_d_2 = tng_emmc_0_d_2,
+	ann_emmc_0_d_3 = tng_emmc_0_d_3,
+	ann_emmc_0_d_4 = tng_emmc_0_d_4,
+	ann_emmc_0_d_5 = tng_emmc_0_d_5,
+	ann_emmc_0_d_6 = tng_emmc_0_d_6,
+	ann_emmc_0_d_7 = tng_emmc_0_d_7,
+	ann_emmc_0_rst_b = tng_emmc_0_rst_b,
+	ann_emmc_0_rclk = 233,
+
+	/* gpiogpio */
+	ann_gp_12 = tng_gp_12,
+	ann_gp_13 = tng_gp_13,
+	ann_gp_14 = tng_gp_14,
+	ann_gp_15 = tng_gp_15,
+	ann_gp_16 = tng_gp_16,
+	ann_gp_17 = tng_gp_17,
+	ann_gp_18 = tng_gp_18,
+	ann_gp_19 = tng_gp_19,
+	ann_gp_20 = tng_gp_20,
+	ann_gp_21 = tng_gp_21,
+	ann_gp_22 = tng_gp_22,
+	ann_gp_23 = tng_gp_23,
+	ann_gp_24 = tng_gp_24,
+	ann_gp_25 = tng_gp_25,
+	ann_gp_26 = tng_gp_26,
+	ann_gp_27 = tng_gp_27,
+	ann_gp_28 = tng_gp_28,
+	ann_gp_29 = tng_gp_29,
+	ann_gp_30 = 234,
+	ann_gp_31 = 235,
+
+	/* gpiohsi */
+	ann_mhsi_acdata = tng_mhsi_acdata,
+	ann_mhsi_acflag = tng_mhsi_acflag,
+	ann_mhsi_acready = tng_mhsi_acready,
+	ann_mhsi_acwake = tng_mhsi_acwake,
+	ann_mhsi_cadata = tng_mhsi_cadata,
+	ann_mhsi_caflag = tng_mhsi_caflag,
+	ann_mhsi_caready = tng_mhsi_caready,
+	ann_mhsi_cawake = tng_mhsi_cawake,
+
+	/* gpioi2c */
+	ann_i2c_0_scl = tng_i2c_0_scl,
+	ann_i2c_0_sda = tng_i2c_0_sda,
+	ann_gp_i2c_1_scl = tng_gp_i2c_1_scl,
+	ann_gp_i2c_1_sda = tng_gp_i2c_1_sda,
+	ann_gp_i2c_2_scl = tng_gp_i2c_2_scl,
+	ann_gp_i2c_2_sda = tng_gp_i2c_2_sda,
+	ann_gp_i2c_3_scl = tng_gp_i2c_3_scl,
+	ann_gp_i2c_3_sda = tng_gp_i2c_3_sda,
+	ann_gp_i2c_4_scl = tng_gp_i2c_4_scl,
+	ann_gp_i2c_4_sda = tng_gp_i2c_4_sda,
+	ann_gp_i2c_5_scl = tng_gp_i2c_5_scl,
+	ann_gp_i2c_5_sda = tng_gp_i2c_5_sda,
+	ann_gp_i2c_6_scl = tng_gp_i2c_6_scl,
+	ann_gp_i2c_6_sda = tng_gp_i2c_6_sda,
+	ann_gp_i2c_7_scl = tng_gp_i2c_7_scl,
+	ann_gp_i2c_7_sda = tng_gp_i2c_7_sda,
+	ann_i2c_8_scl = 236,
+	ann_i2c_8_sda = 237,
+	ann_i2c_9_scl = 238,
+	ann_i2c_9_sda = 239,
+
+	/* gpiokbd */
+	ann_gp_kbd_dkin_0 = tng_gp_kbd_dkin_0,
+	ann_gp_kbd_dkin_1 = tng_gp_kbd_dkin_1,
+	ann_gp_kbd_dkin_2 = tng_gp_kbd_dkin_2,
+	ann_gp_kbd_dkin_3 = tng_gp_kbd_dkin_3,
+	ann_gp_kbd_mkin_0 = tng_gp_kbd_mkin_0,
+	ann_gp_kbd_mkin_1 = tng_gp_kbd_mkin_1,
+	ann_gp_kbd_mkin_2 = tng_gp_kbd_mkin_2,
+	ann_gp_kbd_mkin_3 = tng_gp_kbd_mkin_3,
+	ann_gp_kbd_mkin_4 = tng_gp_kbd_mkin_4,
+	ann_gp_kbd_mkin_5 = tng_gp_kbd_mkin_5,
+	ann_gp_kbd_mkin_6 = tng_gp_kbd_mkin_6,
+	ann_gp_kbd_mkin_7 = tng_gp_kbd_mkin_7,
+	ann_gp_kbd_mkout_0 = tng_gp_kbd_mkout_0,
+	ann_gp_kbd_mkout_1 = tng_gp_kbd_mkout_1,
+	ann_gp_kbd_mkout_2 = tng_gp_kbd_mkout_2,
+	ann_gp_kbd_mkout_3 = tng_gp_kbd_mkout_3,
+	ann_gp_kbd_mkout_4 = tng_gp_kbd_mkout_4,
+	ann_gp_kbd_mkout_5 = tng_gp_kbd_mkout_5,
+	ann_gp_kbd_mkout_6 = tng_gp_kbd_mkout_6,
+	ann_gp_kbd_mkout_7 = tng_gp_kbd_mkout_7,
+
+	/* gpiopmic */
+	ann_prochot_b = tng_prochot_b,
+	ann_resetout_b = tng_resetout_b,
+	ann_rtc_clk = tng_rtc_clk,
+	ann_standby = tng_standby,
+	ann_svid_alert_b = 240,
+	ann_svid_vclk = tng_svid_vclk,
+	ann_svid_vdio = tng_svid_vdio,
+	ann_thermtrip_b = tng_thermtrip_b,
+	ann_xxpmode = tng_xxpmode,
+	ann_xxprdy = tng_xxprdy,
+	ann_xxpreq_b = tng_xxpreq_b,
+	ann_gp_fast_int_0 = tng_gp_fast_int_0,
+	ann_gp_fast_int_1 = tng_gp_fast_int_1,
+	ann_gp_fast_int_2 = tng_gp_fast_int_2,
+	ann_gp_fast_int_3 = tng_gp_fast_int_3,
+
+	/* gpiopti */
+	ann_gp_mpti_clk = tng_gp_mpti_clk,
+	ann_gp_mpti_data_0 = tng_gp_mpti_data_0,
+	ann_gp_mpti_data_1 = tng_gp_mpti_data_1,
+	ann_gp_mpti_data_2 = tng_gp_mpti_data_2,
+	ann_gp_mpti_data_3 = tng_gp_mpti_data_3,
+	ann_gp_0 = tng_gp_0,
+	ann_gp_1 = tng_gp_1,
+	ann_gp_2 = tng_gp_2,
+	ann_gp_3 = tng_gp_3,
+	ann_gp_4 = tng_gp_4,
+	ann_gp_5 = tng_gp_5,
+	ann_gp_6 = tng_gp_6,
+	ann_gp_7 = tng_gp_7,
+	ann_gp_8 = tng_gp_8,
+	ann_gp_9 = tng_gp_9,
+	ann_gp_10 = tng_gp_10,
+	ann_gp_11 = tng_gp_11,
+	ann_jtag_tckc = tng_jtag_tckc,
+	ann_jtag_tdic = tng_jtag_tdic,
+	ann_jtag_tdoc = tng_jtag_tdoc,
+	ann_jtag_tmsc = tng_jtag_tmsc,
+	ann_jtag_trst_b = tng_jtag_trst_b,
+
+	/* gpiosdio */
+	ann_gp_sdio_0_cd_b = tng_gp_sdio_0_cd_b,
+	ann_gp_sdio_0_clk = tng_gp_sdio_0_clk,
+	ann_gp_sdio_0_cmd = tng_gp_sdio_0_cmd,
+	ann_gp_sdio_0_dat_0 = tng_gp_sdio_0_dat_0,
+	ann_gp_sdio_0_dat_1 = tng_gp_sdio_0_dat_1,
+	ann_gp_sdio_0_dat_2 = tng_gp_sdio_0_dat_2,
+	ann_gp_sdio_0_dat_3 = tng_gp_sdio_0_dat_3,
+	ann_gp_sdio_0_lvl_clk_fb = tng_gp_sdio_0_lvl_clk_fb,
+	ann_gp_sdio_0_lvl_cmd_dir = tng_gp_sdio_0_lvl_cmd_dir,
+	ann_gp_sdio_0_lvl_dat_dir = tng_gp_sdio_0_lvl_dat_dir,
+	ann_gp_sdio_0_lvl_sel = tng_gp_sdio_0_lvl_sel,
+	ann_gp_sdio_0_powerdown_b = tng_gp_sdio_0_powerdown_b,
+	ann_gp_sdio_0_wp = tng_gp_sdio_0_wp,
+	ann_gp_sdio_1_clk = tng_gp_sdio_1_clk,
+	ann_gp_sdio_1_cmd = tng_gp_sdio_1_cmd,
+	ann_gp_sdio_1_dat_0 = tng_gp_sdio_1_dat_0,
+	ann_gp_sdio_1_dat_1 = tng_gp_sdio_1_dat_1,
+	ann_gp_sdio_1_dat_2 = tng_gp_sdio_1_dat_2,
+	ann_gp_sdio_1_dat_3 = tng_gp_sdio_1_dat_3,
+	ann_gp_sdio_1_powerdown_b = tng_gp_sdio_1_powerdown_b,
+
+	/* gpiossp */
+	ann_gp_ssp_3_clk = tng_gp_ssp_3_clk,
+	ann_gp_ssp_3_fs = tng_gp_ssp_3_fs,
+	ann_gp_ssp_3_rxd = tng_gp_ssp_3_rxd,
+	ann_gp_ssp_3_txd = tng_gp_ssp_3_txd,
+	ann_gp_ssp_4_clk = tng_gp_ssp_4_clk,
+	ann_gp_ssp_4_fs_0 = tng_gp_ssp_4_fs_0,
+	ann_gp_ssp_4_fs_1 = tng_gp_ssp_4_fs_1,
+	ann_gp_ssp_4_fs_2 = tng_gp_ssp_4_fs_2,
+	ann_gp_ssp_4_fs_3 = tng_gp_ssp_4_fs_3,
+	ann_gp_ssp_4_rxd = tng_gp_ssp_4_rxd,
+	ann_gp_ssp_4_txd = tng_gp_ssp_4_txd,
+	ann_gp_ssp_5_clk = tng_gp_ssp_5_clk,
+	ann_gp_ssp_5_fs_0 = tng_gp_ssp_5_fs_0,
+	ann_gp_ssp_5_fs_1 = tng_gp_ssp_5_fs_1,
+	ann_gp_ssp_5_fs_2 = tng_gp_ssp_5_fs_2,
+	ann_gp_ssp_5_fs_3 = tng_gp_ssp_5_fs_3,
+	ann_gp_ssp_5_rxd = tng_gp_ssp_5_rxd,
+	ann_gp_ssp_5_txd = tng_gp_ssp_5_txd,
+	ann_gp_ssp_6_clk = tng_gp_ssp_6_clk,
+	ann_gp_ssp_6_fs = tng_gp_ssp_6_fs,
+	ann_gp_ssp_6_rxd = tng_gp_ssp_6_rxd,
+	ann_gp_ssp_6_txd = tng_gp_ssp_6_txd,
+
+	/* gpiosspa */
+	ann_gp_mslim_0_bclk = tng_gp_mslim_0_bclk,
+	ann_gp_mslim_0_bdat = tng_gp_mslim_0_bdat,
+	ann_gp_ssp_0_clk = tng_gp_ssp_0_clk,
+	ann_gp_ssp_0_fs = tng_gp_ssp_0_fs,
+	ann_gp_ssp_0_rxd = tng_gp_ssp_0_rxd,
+	ann_gp_ssp_0_txd = tng_gp_ssp_0_txd,
+	ann_gp_ssp_1_clk = tng_gp_ssp_1_clk,
+	ann_gp_ssp_1_fs = tng_gp_ssp_1_fs,
+	ann_gp_ssp_1_rxd = tng_gp_ssp_1_rxd,
+	ann_gp_ssp_1_txd = tng_gp_ssp_1_txd,
+	ann_gp_ssp_2_clk = tng_gp_ssp_2_clk,
+	ann_gp_ssp_2_fs = tng_gp_ssp_2_fs,
+	ann_gp_ssp_2_rxd = tng_gp_ssp_2_rxd,
+	ann_gp_ssp_2_txd = tng_gp_ssp_2_txd,
+
+	/* gpiouart */
+	ann_gp_uart_0_cts = tng_gp_uart_0_cts,
+	ann_gp_uart_0_rts = tng_gp_uart_0_rts,
+	ann_gp_uart_0_rx = tng_gp_uart_0_rx,
+	ann_gp_uart_0_tx = tng_gp_uart_0_tx,
+	ann_gp_uart_1_cts = tng_gp_uart_1_cts,
+	ann_gp_uart_1_rts = tng_gp_uart_1_rts,
+	ann_gp_uart_1_rx = tng_gp_uart_1_rx,
+	ann_gp_uart_1_tx = tng_gp_uart_1_tx,
+	ann_gp_uart_2_cts = tng_gp_uart_2_cts,
+	ann_gp_uart_2_rts = tng_gp_uart_2_rts,
+	ann_gp_uart_2_rx = tng_gp_uart_2_rx,
+	ann_gp_uart_2_tx = tng_gp_uart_2_tx,
+	ann_gp_32 = 241,
+	ann_gp_33 = 242,
+	ann_gp_34 = 243,
+	ann_gp_35 = 244,
+	ann_gp_36 = 245,
+	ann_gp_37 = 246,
+	ann_gp_38 = 247,
+	ann_gp_39 = 248,
+	ann_gp_40 = 249,
+	ann_gp_pwm_0 = tng_gp_pwm_0,
+	ann_gp_pwm_1 = tng_gp_pwm_1,
+
+	/* gpioulpi */
+	ann_gp_ulpi_0_clk = tng_usb_ulpi_0_clk,
+	ann_gp_ulpi_0_data_0 = tng_usb_ulpi_0_data_0,
+	ann_gp_ulpi_0_data_1 = tng_usb_ulpi_0_data_1,
+	ann_gp_ulpi_0_data_2 = tng_usb_ulpi_0_data_2,
+	ann_gp_ulpi_0_data_3 = tng_usb_ulpi_0_data_3,
+	ann_gp_ulpi_0_data_4 = tng_usb_ulpi_0_data_4,
+	ann_gp_ulpi_0_data_5 = tng_usb_ulpi_0_data_5,
+	ann_gp_ulpi_0_data_6 = tng_usb_ulpi_0_data_6,
+	ann_gp_ulpi_0_data_7 = tng_usb_ulpi_0_data_7,
+	ann_gp_ulpi_0_dir = tng_usb_ulpi_0_dir,
+	ann_gp_ulpi_0_nxt = tng_usb_ulpi_0_nxt,
+	ann_gp_ulpi_0_refclk = tng_usb_ulpi_0_refclk,
+	ann_gp_ulpi_0_stp = tng_usb_ulpi_0_stp,
+	ANN_PIN_TABLE_SIZE = 250,
+};
+
+struct pinstruct_t {
+	bool valid;	/* the pin is allowed to be configured or not */
+	u8 bus_address;
+	u8 pullup_offset;
+	u8 pullup_lsb_pos;
+	u8 direction_offset;
+	u8 direction_lsb_pos;
+	u8 open_drain_offset;
+	u8 open_drain_bit;
+};
+
+enum ACCESS_CTRL {
+	readonly = (1 << 0),
+	writable = (1 << 1),
+};
+
+struct pin_mmio_flis_t {
+	u8 access_ctrl; /* mmio flis access control */
+	u32 offset;	/* pin offset from flis base address */
+};
+
+struct intel_scu_flis_platform_data {
+	struct pinstruct_t *pin_t;
+	int pin_num;
+	u32 flis_base;
+	u32 flis_len;
+	struct pin_mmio_flis_t *mmio_flis_t;
+	bool shim_access;
+};
+
+#define I2C_FLIS_START	0x1D00
+#define I2C_FLIS_END	0x1D3C
+
+#define OPS_STR_LEN 10
+
+enum {
+	DBG_SHIM_FLIS_ADDR,
+	DBG_SHIM_OFFSET,
+	DBG_SHIM_DATA,
+
+	DBG_PARAM_VAL,
+	DBG_PARAM_TYPE,
+	DBG_PIN_NAME,
+};
+
+int intel_scu_ipc_write_shim(u32 data, u32 flis_addr, u32 offset);
+int intel_scu_ipc_read_shim(u32 *data, u32 flis_addr, u32 offset);
+int intel_scu_ipc_update_shim(u32 data, u32 mask, u32 flis_addr, u32 offset);
+int config_pin_flis(unsigned int name, enum flis_param_t param, u32 val);
+int get_pin_flis(unsigned int name, enum flis_param_t param, u32 *val);
+u32 get_flis_value(u32 offset);
+void set_flis_value(u32 value, u32 offset);
+
+extern struct pinstruct_t ctp_pin_table[];
+#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 925b605..7de4a28 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -2,53 +2,117 @@
 #define  _ASM_X86_INTEL_SCU_IPC_H_
 
 #include <linux/notifier.h>
+#include <asm/intel-mid.h>
 
+/* IPC defines the following message types */
+#define IPCMSG_GET_HOBADDR	0xE5 /* OSHOB access. */
+#define IPCMSG_BATTERY          0xEF /* Coulomb Counter Accumulator */
+#define IPCMSG_MIP_ACCESS       0xEC /* IA MIP access */
+#define IPCMSG_PMDB_CMD		0xE0
 #define IPCMSG_WARM_RESET	0xF0
 #define IPCMSG_COLD_RESET	0xF1
 #define IPCMSG_SOFT_RESET	0xF2
 #define IPCMSG_COLD_BOOT	0xF3
-
+#define IPCMSG_COLD_OFF		0x80 /* for TNG only */
+#define IPCMSG_FW_REVISION      0xF4 /* Get firmware revision */
+#define IPCMSG_SHIM_CONFIG	0xF5 /* Configure SHIM */
+#define IPCMSG_WATCHDOG_TIMER   0xF8 /* Set Kernel Watchdog Threshold */
 #define IPCMSG_VRTC		0xFA	 /* Set vRTC device */
-	/* Command id associated with message IPCMSG_VRTC */
-	#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
-	#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPCMSG_FW_UPDATE        0xFE /* Firmware update */
+#define IPCMSG_PCNTRL           0xFF /* Power controller unit read/write */
+#define IPCMSG_OSC_CLK		0xE6 /* Turn on/off osc clock */
+#define IPCMSG_S0IX_COUNTER	0xEB /* Get S0ix residency */
+#define IPCMSG_CLEAR_FABERROR	0xE3 /* Clear fabric error log */
+#define IPCMSG_SCULOG_CTRL	0xE1 /* SCU logging control message */
+#define IPCMSG_STORE_NV_DATA	0xCD /* Store the Non Volatile data to RAM */
 
-/* Read single register */
-int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+#define IPCMSG_SCULOG_TRACE	0x90 /* SCU trace logging message */
 
-/* Read two sequential registers */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data);
+#define IPC_CMD_SCU_LOG_DISABLE 0x00
+#define IPC_CMD_SCU_LOG_ENABLE	0x01
+#define IPC_CMD_SCU_LOG_DUMP	0x02
+#define IPC_CMD_SCU_LOG_DIS_RB	0x03
+#define IPC_CMD_SCU_LOG_EN_RB	0x04
+#define IPC_CMD_SCU_LOG_ADDR	0x05
+#define IPC_CMD_SCU_LOG_IATRACE	0x06
+#define IPC_CMD_SCU_EN_STATUS	0x07
 
-/* Read four sequential registers */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+#define IPC_CMD_UMIP_RD     0
+#define IPC_CMD_UMIP_WR     1
+#define IPC_CMD_SMIP_RD     2
 
-/* Read a vector */
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+/* Command id associated with message IPCMSG_PCNTRL */
+#define IPC_CMD_PCNTRL_W      0 /* Register write */
+#define IPC_CMD_PCNTRL_R      1 /* Register read */
+#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
 
-/* Write single register */
-int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+#define IPC_ERR_NONE			0
+#define IPC_ERR_CMD_NOT_SUPPORTED	1
+#define IPC_ERR_CMD_NOT_SERVICED	2
+#define IPC_ERR_UNABLE_TO_SERVICE	3
+#define IPC_ERR_CMD_INVALID		4
+#define IPC_ERR_CMD_FAILED		5
+#define IPC_ERR_EMSECURITY		6
+#define IPC_ERR_UNSIGNEDKERNEL		7
 
-/* Write two sequential registers */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data);
+#define MSIC_DEBUG_FILE "msic"
+#define MSIC_ALL_DEBUG_FILE "msic_all"
+#define MAX_MSIC_REG   0x3FF
+#define MIN_MSIC_REG   0x0
 
-/* Write four sequential registers */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data);
+/* Command id associated with SCULOG_CTRL */
+#define IPC_CMD_SCU_LOG_SUSPEND	1
+#define IPC_CMD_SCU_LOG_RESUME	2
 
-/* Write a vector */
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+/* Command id associated with message IPCMSG_VRTC */
+#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
+#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPC_CMD_VRTC_SYNC_RTC     3 /* Sync MSIC/PMIC RTC to VRTC */
 
-/* Update single register based on the mask */
-int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+/* Command id associated with message IPCMSG_SHIM_CONFIG */
+#define IPC_CMD_SHIM_RD		0 /* SHIM read */
+#define IPC_CMD_SHIM_WR		1 /* SHIM write */
 
-/* Issue commands to the SCU with or without data */
-int intel_scu_ipc_simple_command(int cmd, int sub);
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
-							u32 *out, int outlen);
+int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen, u32 dptr, u32 sptr);
+
+/* check ipc status */
+int intel_scu_ipc_check_status(void);
+
 /* I2C control api */
 int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
 
 /* Update FW version */
-int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
+int intel_scu_ipc_fw_update(void);
+int intel_scu_ipc_mrstfw_update(u8 *buffer, u32 length);
+int intel_scu_ipc_medfw_prepare(void __user *arg);
+
+#ifdef CONFIG_INTEL_SCU_IPC
+int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned);
+int intel_scu_ipc_write_umip(u8 *data, int len, int offset);
+#else
+/* Dummy function to prevent compilation error in BYT */
+static int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned)
+{
+	return 0;
+}
+static int intel_scu_ipc_write_umip(u8 *data, int len, int offset)
+{
+	return 0;
+}
+#endif
+
+/* NVRAM access */
+u32 intel_scu_ipc_get_nvram_size(void);
+phys_addr_t intel_scu_ipc_get_nvram_addr(void);
+
+/* Penwell has 4 osc clocks */
+#define OSC_CLK_AUDIO	0	/* Audio */
+#define OSC_CLK_CAM0	1	/* Primary camera */
+#define OSC_CLK_CAM1	2	/* Secondary camera */
+#define OSC_CLK_DISP	3	/* Display buffer */
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz);
 
 extern struct blocking_notifier_head intel_scu_notifier;
 
diff --git a/arch/x86/include/asm/intel_scu_ipcutil.h b/arch/x86/include/asm/intel_scu_ipcutil.h
new file mode 100644
index 0000000..bfd8d3e
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_ipcutil.h
@@ -0,0 +1,139 @@
+#ifndef _ASM_X86_INTEL_SCU_IPCUTIL_H_
+#define _ASM_X86_INTEL_SCU_IPCUTIL_H_
+
+#include <linux/types.h>
+
+/* ioctl commnds */
+#define INTEL_SCU_IPC_REGISTER_READ	0
+#define INTEL_SCU_IPC_REGISTER_WRITE	1
+#define INTEL_SCU_IPC_REGISTER_UPDATE	2
+#define INTEL_SCU_IPC_FW_UPDATE			    0xA2
+#define INTEL_SCU_IPC_MEDFIELD_FW_UPDATE	    0xA3
+#define INTEL_SCU_IPC_FW_REVISION_GET		    0xB0
+#define INTEL_SCU_IPC_FW_REVISION_EXT_GET	    0xB1
+#define INTEL_SCU_IPC_S0IX_RESIDENCY		    0xB8
+#define INTEL_SCU_IPC_READ_RR_FROM_OSNIB	    0xC1
+#define INTEL_SCU_IPC_WRITE_RR_TO_OSNIB		    0xC2
+#define INTEL_SCU_IPC_READ_VBATTCRIT		    0xC4
+#define INTEL_SCU_IPC_WRITE_ALARM_FLAG_TO_OSNIB	    0xC5
+#define INTEL_SCU_IPC_OSC_CLK_CNTL		    0xC6
+#define INTEL_SCU_IPC_PMDB_ACCESS		    0xD0
+
+#define SIGNED_MOS_ATTR		0x0
+#define SIGNED_COS_ATTR		0x0A
+#define SIGNED_RECOVERY_ATTR	0x0C
+#define SIGNED_POS_ATTR		0x0E
+#define SIGNED_FACTORY_ATTR	0x12
+#define SIGNED_FACTORY2_ATTR	0x18
+#define SIGNED_BOOTONESHOOT_ATTR 0x1A
+
+enum intel_scu_ipc_wake_src {
+	WAKE_BATT_INSERT,
+	WAKE_PWR_BUTTON_PRESS,
+	WAKE_RTC_TIMER,
+	WAKE_USB_CHRG_INSERT,
+	WAKE_RESERVED,
+	WAKE_REAL_RESET,
+	WAKE_COLD_BOOT,
+	WAKE_UNKNOWN,
+	WAKE_KERNEL_WATCHDOG_RESET,
+	WAKE_SECURITY_WATCHDOG_RESET,
+	WAKE_WATCHDOG_COUNTER_EXCEEDED,
+	WAKE_POWER_SUPPLY_DETECTED,
+	WAKE_FASTBOOT_BUTTONS_COMBO,
+	WAKE_NO_MATCHING_OSIP_ENTRY,
+	WAKE_CRITICAL_BATTERY,
+	WAKE_INVALID_CHECKSUM,
+	WAKE_FORCED_RESET,
+	WAKE_ACDC_CHRG_INSERT,
+	WAKE_PMIC_WATCHDOG_RESET,
+	WAKE_PLATFORM_WATCHDOG_RESET,
+	WAKE_SC_WATCHDOG_RESET,
+	WAKE_KERNEL_PANIC
+};
+
+struct scu_ipc_data {
+	u32	count;  /* No. of registers */
+	u16	addr[5]; /* Register addresses */
+	u8	data[5]; /* Register data */
+	u8	mask; /* Valid for read-modify-write */
+};
+
+struct scu_ipc_version {
+	u32	count;  /* length of version info */
+	u8	data[16]; /* version data */
+};
+
+struct osc_clk_t {
+	u32	id; /* clock id */
+	u32	khz; /* clock frequency */
+};
+
+/* PMDB buffer, cmd, and limits */
+#define PMDB_SIZE              512
+#define PMDB_WMDB_SIZE         76
+#define PMDB_OTPDB_SIZE        384
+#define PMDB_OTPCTL_SIZE       48
+#define PMDB_ACCESS_SIZE       16
+
+#define PMDB_SUB_CMD_R_WMDB    0
+#define PMDB_SUB_CMD_R_OTPDB   1
+#define PMDB_SUB_CMD_W_WMDB    2
+#define PMDB_SUB_CMD_W_OTPDB   3
+#define PMDB_SUB_CMD_R_OTPCTL  4
+
+struct scu_ipc_pmdb_buffer {
+	u32	sub; /* sub cmd of SCU's PMDB IPC commands */
+	u32	count; /* length of PMDB buffer */
+	u32	offset; /* buffer start offset for each PMDB component */
+	u8	data[PMDB_SIZE]; /* PMDB buffer */
+};
+
+/* Penwell has 4 osc clocks */
+#define OSC_CLK_AUDIO	0	/* Audio */
+#define OSC_CLK_CAM0	1	/* Primary camera */
+#define OSC_CLK_CAM1	2	/* Secondary camera */
+#define OSC_CLK_DISP	3	/* Display buffer */
+
+#ifdef __KERNEL__
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz);
+
+enum clk0_mode {
+	CLK0_AUDIENCE = 0x4,
+	CLK0_VIBRA1 = 0x8,
+	CLK0_VIBRA2 = 0x10,
+	CLK0_MSIC = 0x20,
+	CLK0_DEBUG = 0x100,
+	CLK0_QUERY = 0x1000,
+};
+
+int intel_scu_ipc_set_osc_clk0(unsigned int enable, enum clk0_mode mode);
+
+/* Helpers to turn on/off msic vprog1 and vprog2 */
+int intel_scu_ipc_msic_vprog1(int on);
+int intel_scu_ipc_msic_vprog2(int on);
+int intel_scu_ipc_msic_vprog3(int on);
+
+/* OSHOB-OS Handoff Buffer read */
+phys_addr_t intel_scu_ipc_get_oshob_base(void);
+int intel_scu_ipc_get_oshob_size(void);
+
+/* SCU trace buffer interface */
+phys_addr_t intel_scu_ipc_get_scu_trace_buffer(void);
+u32 intel_scu_ipc_get_scu_trace_buffer_size(void);
+u32 intel_scu_ipc_get_fabricerror_buf1_offset(void);
+u32 intel_scu_ipc_get_fabricerror_buf2_offset(void);
+
+/* OSNIB interface. */
+int intel_scu_ipc_write_osnib(u8 *data, int len, int offset);
+int intel_scu_ipc_read_osnib(u8 *data, int len, int offset);
+int intel_scu_ipc_write_osnib_extend(u8 *data, int len, int offset);
+int intel_scu_ipc_read_osnib_extend(u8 *data, int len, int offset);
+int intel_scu_ipc_write_osnib_rr(u8 rr);
+int intel_scu_ipc_read_osnib_rr(u8 *rr);
+int intel_scu_ipc_read_osnib_wd(u8 *wd);
+int intel_scu_ipc_write_osnib_wd(u8 *wd);
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/intel_scu_pmic.h b/arch/x86/include/asm/intel_scu_pmic.h
new file mode 100644
index 0000000..308afa6
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_pmic.h
@@ -0,0 +1,16 @@
+#ifndef __INTEL_SCU_PMIC_H__
+#define __INTEL_SCU_PMIC_H__
+
+#include <asm/types.h>
+
+#define KOBJ_PMIC_ATTR(_name, _mode, _show, _store) \
+	struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+extern int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+extern int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+extern int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+extern int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+extern int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+extern int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+
+#endif /*__INTEL_SCU_PMIC_H__ */
diff --git a/arch/x86/include/asm/intel_soc_debug.h b/arch/x86/include/asm/intel_soc_debug.h
new file mode 100644
index 0000000..9edb166
--- /dev/null
+++ b/arch/x86/include/asm/intel_soc_debug.h
@@ -0,0 +1,43 @@
+/*
+ * intel_soc_debug.h
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef INTEL_SOC_DEBUG_H
+#define INTEL_SOC_DEBUG_H
+
+#define DEBUG_FEATURE_PTI      0x00000001
+#define DEBUG_FEATURE_RTIT     0x00000002
+#define DEBUG_FEATURE_LAKEMORE 0x00000004
+#define DEBUG_FEATURE_SOCHAPS  0x00000008
+#define DEBUG_FEATURE_USB3DFX  0x00000010
+
+/* cpu_has_debug_feature checks whether the debug
+ * feature passed as parameter is enabled.
+ * The passed parameter shall be one (and only one)
+ * of the above values (DEBUG_FEATURE_XXX).
+ * The function returns 1 if the debug feature is
+ * enabled and 0 otherwise.
+ */
+
+#ifdef CONFIG_INTEL_DEBUG_FEATURE
+extern int cpu_has_debug_feature(u32 bit);
+#else
+static inline int cpu_has_debug_feature(u32 bit) { return 0; };
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/intel_sst_mrfld.h b/arch/x86/include/asm/intel_sst_mrfld.h
new file mode 100644
index 0000000..041ff85
--- /dev/null
+++ b/arch/x86/include/asm/intel_sst_mrfld.h
@@ -0,0 +1,44 @@
+/* intel_sst_mrlfd.h - Common enum of the Merrifield platform
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Samreen Nilofer <samreen.nilofer@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef _INTEL_SST_MRFLD_H
+#define _INTEL_SST_MRFLD_H
+
+enum {
+	MERR_SALTBAY_AUDIO = 0,
+	MERR_SALTBAY_COMPR,
+	MERR_SALTBAY_VOIP,
+	MERR_SALTBAY_PROBE,
+	MERR_SALTBAY_AWARE,
+	MERR_SALTBAY_VAD,
+	MERR_SALTBAY_POWER,
+};
+
+enum {
+	MERR_DPCM_AUDIO = 0,
+	MERR_DPCM_DB,
+	MERR_DPCM_LL,
+	MERR_DPCM_COMPR,
+	MERR_DPCM_VOIP,
+	MERR_DPCM_PROBE,
+};
+
+#endif
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d8e8eef..34f69cb 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -345,4 +345,11 @@
 
 #define IO_SPACE_LIMIT 0xffff
 
+#ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_add(unsigned long base,
+					 unsigned long size);
+extern void arch_phys_wc_del(int handle);
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
 #endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 3a16c14..0297669 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -13,7 +13,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-	asm goto("1:"
+	asm_volatile_goto("1:"
 		STATIC_KEY_INITIAL_NOP
 		".pushsection __jump_table,  \"aw\" \n\t"
 		_ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index fa5f71e..e6833c6 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -32,11 +32,20 @@
 #define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
 #define MCI_STATUS_S	 (1ULL<<56)  /* Signaled machine check */
 #define MCI_STATUS_AR	 (1ULL<<55)  /* Action required */
-#define MCACOD		  0xffff     /* MCA Error Code */
+
+/*
+ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
+ * bits 15:0.  But bit 12 is the 'F' bit, defined for corrected
+ * errors to indicate that errors are being filtered by hardware.
+ * We should mask out bit 12 when looking for specific signatures
+ * of uncorrected errors - so the F bit is deliberately skipped
+ * in this #define.
+ */
+#define MCACOD		  0xefff     /* MCA Error Code */
 
 /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
 #define MCACOD_SCRUB	0x00C0	/* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK	0xfff0
+#define MCACOD_SCRUBMSK	0xeff0	/* Skip bit 12 ('F' bit) */
 #define MCACOD_L3WB	0x017A	/* L3 Explicit Writeback */
 #define MCACOD_DATA	0x0134	/* Data Load */
 #define MCACOD_INSTR	0x0150	/* Instruction Fetch */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index cdbf367..be12c53 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -45,22 +45,28 @@
 		/* Re-load page tables */
 		load_cr3(next->pgd);
 
-		/* stop flush ipis for the previous mm */
+		/* Stop flush ipis for the previous mm */
 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
 
-		/*
-		 * load the LDT, if the LDT is different:
-		 */
+		/* Load the LDT, if the LDT is different: */
 		if (unlikely(prev->context.ldt != next->context.ldt))
 			load_LDT_nolock(&next->context);
 	}
 #ifdef CONFIG_SMP
-	else {
+	  else {
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
 
-		if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
-			/* We were in lazy tlb mode and leave_mm disabled
+		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+			/*
+			 * On established mms, the mm_cpumask is only changed
+			 * from irq context, from ptep_clear_flush() while in
+			 * lazy tlb mode, and here. Irqs are blocked during
+			 * schedule, protecting us from simultaneous changes.
+			 */
+			cpumask_set_cpu(cpu, mm_cpumask(next));
+			/*
+			 * We were in lazy tlb mode and leave_mm disabled
 			 * tlb flush IPI delivery. We must reload CR3
 			 * to make sure to use no freed page tables.
 			 */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index e3b7819..a11269b 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -15,7 +15,7 @@
 #define MODULE_PROC_FAMILY "586MMX "
 #elif defined CONFIG_MCORE2
 #define MODULE_PROC_FAMILY "CORE2 "
-#elif defined CONFIG_MATOM
+#elif (defined CONFIG_MATOM) || (defined CONFIG_MSLM)
 #define MODULE_PROC_FAMILY "ATOM "
 #elif defined CONFIG_M686
 #define MODULE_PROC_FAMILY "686 "
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index e235582..f768f62 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -26,7 +26,10 @@
 #include <uapi/asm/mtrr.h>
 
 
-/*  The following functions are for use by other drivers  */
+/*
+ * The following functions are for use by other drivers that cannot use
+ * arch_phys_wc_add and arch_phys_wc_del.
+ */
 # ifdef CONFIG_MTRR
 extern u8 mtrr_type_lookup(u64 addr, u64 end);
 extern void mtrr_save_fixed_ranges(void *);
@@ -45,6 +48,7 @@
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
+extern int phys_wc_to_mtrr_index(int handle);
 #  else
 static inline u8 mtrr_type_lookup(u64 addr, u64 end)
 {
@@ -80,6 +84,10 @@
 static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 {
 }
+static inline int phys_wc_to_mtrr_index(int handle)
+{
+	return -1;
+}
 
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
diff --git a/arch/x86/include/asm/platform_mrfld_audio.h b/arch/x86/include/asm/platform_mrfld_audio.h
new file mode 100644
index 0000000..fd7673b
--- /dev/null
+++ b/arch/x86/include/asm/platform_mrfld_audio.h
@@ -0,0 +1,26 @@
+/*
+ * platform_mrfld_audio.h: MRFLD audio platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFLD_AUDIO_H_
+#define _PLATFORM_MRFLD_AUDIO_H_
+
+#include <linux/sfi.h>
+
+struct mrfld_audio_platform_data {
+	const struct soft_platform_id *spid;
+	int codec_gpio;
+	int codec_rst;
+	int spk_gpio;
+};
+
+extern void __init *merfld_audio_platform_data(void *info) __attribute__((weak));
+extern void __init *merfld_wm8958_audio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/include/asm/platform_sst.h b/arch/x86/include/asm/platform_sst.h
new file mode 100644
index 0000000..12fa1a6
--- /dev/null
+++ b/arch/x86/include/asm/platform_sst.h
@@ -0,0 +1,133 @@
+
+/*
+ * platform_sst.h:  sst audio platform data header file
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _PLATFORM_SST_H_
+#define _PLATFORM_SST_H_
+
+#include <linux/sfi.h>
+
+#define MAX_NUM_STREAMS_CTP	5
+#define MAX_NUM_STREAMS_MRFLD	25
+#define MAX_NUM_STREAMS	MAX_NUM_STREAMS_MRFLD
+
+#define SST_MAX_SSP_PORTS 4
+#define SST_MAX_DMA 2
+
+enum {
+	SST_SSP_AUDIO = 0,
+	SST_SSP_MODEM,
+	SST_SSP_BT,
+	SST_SSP_FM,
+};
+
+struct sst_gpio_config {
+	u32 i2s_rx_alt;
+	u32 i2s_tx_alt;
+	u32 i2s_frame;
+	u32 i2s_clock;
+	u32 alt_function;
+};
+
+struct sst_ssp_info {
+	u32 base_add;
+	struct sst_gpio_config gpio;
+	bool gpio_in_use;
+};
+
+struct sst_info {
+	u32 iram_start;
+	u32 iram_end;
+	bool iram_use;
+	u32 dram_start;
+	u32 dram_end;
+	bool dram_use;
+	u32 imr_start;
+	u32 imr_end;
+	bool imr_use;
+	u32 mailbox_start;
+	bool use_elf;
+	bool lpe_viewpt_rqd;
+	unsigned int max_streams;
+	u32 dma_max_len;
+	u8 num_probes;
+};
+
+struct sst_ssp_platform_cfg {
+	u8 ssp_cfg_sst;
+	u8 port_number;
+	u8 is_master;
+	u8 pack_mode;
+	u8 num_slots_per_frame;
+	u8 num_bits_per_slot;
+	u8 active_tx_map;
+	u8 active_rx_map;
+	u8 ssp_frame_format;
+	u8 frame_polarity;
+	u8 serial_bitrate_clk_mode;
+	u8 frame_sync_width;
+	u8 dma_handshake_interface_tx;
+	u8 dma_handshake_interface_rx;
+	u8 network_mode;
+	u8 start_delay;
+	u32 ssp_base_add;
+} __packed;
+
+struct sst_board_config_data {
+	struct sst_ssp_platform_cfg ssp_platform_data[SST_MAX_SSP_PORTS];
+	u8 active_ssp_ports;
+	u8 platform_id;
+	u8 board_id;
+	u8 ihf_num_chan;
+	u32 osc_clk_freq;
+} __packed;
+
+struct sst_platform_config_data {
+	u32 sst_sram_buff_base;
+	u32 sst_dma_base[SST_MAX_DMA];
+} __packed;
+
+struct sst_platform_debugfs_data {
+	u32 ssp_reg_size;
+	u32 dma_reg_size;
+	u32 checkpoint_offset;
+	u32 checkpoint_size;
+	u8 num_ssp;
+	u8 num_dma;
+};
+
+struct sst_ipc_info {
+	int ipc_offset;
+	bool use_32bit_ops;
+	unsigned int mbox_recv_off;
+};
+
+struct sst_lib_dnld_info {
+	unsigned int mod_base;
+	unsigned int mod_end;
+	unsigned int mod_table_offset;
+	unsigned int mod_table_size;
+	bool mod_ddr_dnld;
+};
+
+struct sst_platform_info {
+	const struct sst_info *probe_data;
+	const struct sst_ssp_info *ssp_data;
+	const struct sst_board_config_data *bdata;
+	const struct sst_platform_config_data *pdata;
+	const struct sst_ipc_info *ipc_info;
+	const struct sst_platform_debugfs_data *debugfs_data;
+	const struct sst_lib_dnld_info *lib_info;
+	bool enable_recovery;
+};
+
+#endif
diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h
new file mode 100644
index 0000000..6333631
--- /dev/null
+++ b/arch/x86/include/asm/platform_sst_audio.h
@@ -0,0 +1,161 @@
+/*
+ * platform_sst_audio.h:  sst audio platform data header file
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SST_AUDIO_H_
+#define _PLATFORM_SST_AUDIO_H_
+
+#include <linux/sfi.h>
+
+/* The stream map status is used to dynamically assign
+ * device-id to a device, for example probe device. If
+ * a stream map entry is free for a device then the device-id
+ * for that device will be popluated when the device is
+ * opened and then the status set to IN_USE. When device
+ * is closed, the strm map status is set to FREE again.
+ */
+enum sst_strm_map_status {
+	SST_DEV_MAP_FREE = 0,
+	SST_DEV_MAP_IN_USE,
+};
+
+/* Device IDs for CTP are same as stream IDs */
+enum sst_audio_device_id_ctp {
+	SST_PCM_OUT0 = 1,
+	SST_PCM_OUT1 = 2,
+	SST_COMPRESSED_OUT = 3,
+	SST_CAPTURE_IN = 4,
+	SST_PROBE_IN = 5,
+};
+
+enum sst_audio_task_id_mrfld {
+	SST_TASK_ID_NONE = 0,
+	SST_TASK_ID_SBA = 1,
+	SST_TASK_ID_FBA_UL = 2,
+	SST_TASK_ID_MEDIA = 3,
+	SST_TASK_ID_AWARE = 4,
+	SST_TASK_ID_FBA_DL = 5,
+	SST_TASK_ID_MAX = SST_TASK_ID_FBA_DL,
+};
+
+/* Device IDs for Merrifield are Pipe IDs,
+ * ref: LPE DSP command interface spec v0.75 */
+enum sst_audio_device_id_mrfld {
+	/* Output pipeline IDs */
+	PIPE_ID_OUT_START = 0x0,
+	PIPE_MODEM_OUT = 0x0,
+	PIPE_BT_OUT = 0x1,
+	PIPE_CODEC_OUT0 = 0x2,
+	PIPE_CODEC_OUT1 = 0x3,
+	PIPE_SPROT_LOOP_OUT = 0x4,
+	PIPE_MEDIA_LOOP1_OUT = 0x5,
+	PIPE_MEDIA_LOOP2_OUT = 0x6,
+	PIPE_PROBE_OUT = 0x7,
+	PIPE_HF_SNS_OUT = 0x8, /* VOCIE_UPLINK_REF2 */
+	PIPE_HF_OUT = 0x9, /* VOICE_UPLINK_REF1 */
+	PIPE_SPEECH_OUT = 0xA, /* VOICE UPLINK */
+	PIPE_RxSPEECH_OUT = 0xB, /* VOICE_DOWNLINK */
+	PIPE_VOIP_OUT = 0xC,
+	PIPE_PCM0_OUT = 0xD,
+	PIPE_PCM1_OUT = 0xE,
+	PIPE_PCM2_OUT = 0xF,
+	PIPE_AWARE_OUT = 0x10,
+	PIPE_VAD_OUT = 0x11,
+	PIPE_MEDIA0_OUT = 0x12,
+	PIPE_MEDIA1_OUT = 0x13,
+	PIPE_FM_OUT = 0x14,
+	PIPE_PROBE1_OUT = 0x15,
+	PIPE_PROBE2_OUT = 0x16,
+	PIPE_PROBE3_OUT = 0x17,
+	PIPE_PROBE4_OUT = 0x18,
+	PIPE_PROBE5_OUT = 0x19,
+	PIPE_PROBE6_OUT = 0x1A,
+	PIPE_PROBE7_OUT = 0x1B,
+	PIPE_PROBE8_OUT = 0x1C,
+/* Input Pipeline IDs */
+	PIPE_ID_IN_START = 0x80,
+	PIPE_MODEM_IN = 0x80,
+	PIPE_BT_IN = 0x81,
+	PIPE_CODEC_IN0 = 0x82,
+	PIPE_CODEC_IN1 = 0x83,
+	PIPE_SPROT_LOOP_IN = 0x84,
+	PIPE_MEDIA_LOOP1_IN = 0x85,
+	PIPE_MEDIA_LOOP2_IN = 0x86,
+	PIPE_PROBE_IN = 0x87,
+	PIPE_SIDETONE_IN = 0x88,
+	PIPE_TxSPEECH_IN = 0x89,
+	PIPE_SPEECH_IN = 0x8A,
+	PIPE_TONE_IN = 0x8B,
+	PIPE_VOIP_IN = 0x8C,
+	PIPE_PCM0_IN = 0x8D,
+	PIPE_PCM1_IN = 0x8E,
+	PIPE_MEDIA0_IN = 0x8F,
+	PIPE_MEDIA1_IN = 0x90,
+	PIPE_MEDIA2_IN = 0x91,
+	PIPE_FM_IN = 0x92,
+	PIPE_PROBE1_IN = 0x93,
+	PIPE_PROBE2_IN = 0x94,
+	PIPE_PROBE3_IN = 0x95,
+	PIPE_PROBE4_IN = 0x96,
+	PIPE_PROBE5_IN = 0x97,
+	PIPE_PROBE6_IN = 0x98,
+	PIPE_PROBE7_IN = 0x99,
+	PIPE_PROBE8_IN = 0x9A,
+	PIPE_MEDIA3_IN = 0x9C,
+	PIPE_LOW_PCM0_IN = 0x9D,
+	PIPE_RSVD = 0xFF,
+};
+
+/* The stream map for each platform consists of an array of the below
+ * stream map structure. The array index is used as the static stream-id
+ * associated with a device and (dev_num,subdev_num,direction) tuple match
+ * gives the device_id for the device.
+ */
+struct sst_dev_stream_map {
+	u8 dev_num;
+	u8 subdev_num;
+	u8 direction;
+	u8 device_id;
+	u8 task_id;
+	u8 status;
+};
+
+#define MAX_DESCRIPTOR_SIZE 172
+
+struct sst_dev_effects_map {
+	char	uuid[16];
+	u16	algo_id;
+	char	descriptor[MAX_DESCRIPTOR_SIZE];
+};
+
+struct sst_dev_effects_resource_map {
+	char  uuid[16];
+	unsigned int flags;
+	u16 cpuLoad;
+	u16 memoryUsage;
+};
+
+struct sst_dev_effects {
+	struct sst_dev_effects_map *effs_map;
+	struct sst_dev_effects_resource_map *effs_res_map;
+	unsigned int effs_num_map;
+};
+
+struct sst_platform_data {
+	/* Intel software platform id*/
+	const struct soft_platform_id *spid;
+	struct sst_dev_stream_map *pdev_strm_map;
+	struct sst_dev_effects pdev_effs;
+	unsigned int strm_map_size;
+};
+
+int add_sst_platform_device(void);
+#endif
+
diff --git a/arch/x86/include/asm/pmic_pdata.h b/arch/x86/include/asm/pmic_pdata.h
new file mode 100644
index 0000000..5301711
--- /dev/null
+++ b/arch/x86/include/asm/pmic_pdata.h
@@ -0,0 +1,46 @@
+#ifndef __PMIC_PDATA_H__
+#define __PMIC_PDATA_H__
+
+struct temp_lookup {
+	int adc_val;
+	int temp;
+	int temp_err;
+};
+
+/*
+ * pmic cove charger driver info
+ */
+struct pmic_platform_data {
+	void (*cc_to_reg)(int, u8*);
+	void (*cv_to_reg)(int, u8*);
+	void (*inlmt_to_reg)(int, u8*);
+	int max_tbl_row_cnt;
+	struct temp_lookup *adc_tbl;
+};
+
+extern int pmic_get_status(void);
+extern int pmic_enable_charging(bool);
+extern int pmic_set_cc(int);
+extern int pmic_set_cv(int);
+extern int pmic_set_ilimma(int);
+extern int pmic_enable_vbus(bool enable);
+extern int pmic_handle_otgmode(bool enable);
+/* WA for ShadyCove VBUS removal detect issue */
+extern int pmic_handle_low_supply(void);
+
+extern void dump_pmic_regs(void);
+#ifdef CONFIG_PMIC_CCSM
+extern int pmic_get_health(void);
+extern int pmic_get_battery_pack_temp(int *);
+#else
+static int pmic_get_health(void)
+{
+	return 0;
+}
+static int pmic_get_battery_pack_temp(int *temp)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 6fd3fd7..a8980e9 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,6 +18,12 @@
 void x86_report_nx(void);
 
 extern int reboot_force;
+enum {
+	REBOOT_FORCE_COLD_RESET = 1,
+	REBOOT_FORCE_COLD_BOOT,
+	REBOOT_FORCE_OFF,
+	REBOOT_FORCE_ON
+};
 
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
 
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 5c6e4fb..ee177f7 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -47,7 +47,7 @@
 # define NEED_NOPL	0
 #endif
 
-#ifdef CONFIG_MATOM
+#if defined(CONFIG_MATOM) || defined(CONFIG_MSLM)
 # define NEED_MOVBE	(1<<(X86_FEATURE_MOVBE & 31))
 #else
 # define NEED_MOVBE	0
diff --git a/arch/x86/include/asm/scu_ipc_rpmsg.h b/arch/x86/include/asm/scu_ipc_rpmsg.h
new file mode 100644
index 0000000..f4aded0
--- /dev/null
+++ b/arch/x86/include/asm/scu_ipc_rpmsg.h
@@ -0,0 +1,19 @@
+#ifndef _SCU_IPC_RPMSG_H_
+#define _SCU_IPC_RPMSG_H_
+
+struct tx_ipc_msg {
+	u32 cmd;
+	u32 sub;
+	u8 *in;
+	u32 *out;
+	u32 inlen;	/* number of bytes to be written */
+	u32 outlen;	/* number of dwords to be read */
+	u32 sptr;	/* needed for raw ipc command */
+	u32 dptr;	/* needed for raw ipc command */
+};
+
+struct rx_ipc_msg {
+	u32 status;	/* Indicate IPC status, 0-success, 1-fail */
+};
+
+#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 2e327f1..e993660 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -49,9 +49,9 @@
 extern void setup_default_timer_irq(void);
 
 #ifdef CONFIG_X86_INTEL_MID
-extern void x86_mrst_early_setup(void);
+extern void x86_intel_mid_early_setup(void);
 #else
-static inline void x86_mrst_early_setup(void) { }
+static inline void x86_intel_mid_early_setup(void) { }
 #endif
 
 #ifdef CONFIG_X86_INTEL_CE
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
index 7ea79c5..492b298 100644
--- a/arch/x86/include/asm/xor_avx.h
+++ b/arch/x86/include/asm/xor_avx.h
@@ -167,12 +167,12 @@
 
 #define AVX_XOR_SPEED \
 do { \
-	if (cpu_has_avx) \
+	if (cpu_has_avx && cpu_has_osxsave) \
 		xor_speed(&xor_block_avx); \
 } while (0)
 
 #define AVX_SELECT(FASTEST) \
-	(cpu_has_avx ? &xor_block_avx : FASTEST)
+	(cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
 
 #else
 
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf..9c3733c 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -158,7 +158,7 @@
 	X86_SUBARCH_PC = 0,
 	X86_SUBARCH_LGUEST,
 	X86_SUBARCH_XEN,
-	X86_SUBARCH_MRST,
+	X86_SUBARCH_INTEL_MID,
 	X86_SUBARCH_CE4100,
 	X86_NR_SUBARCHS,
 };
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 2af848d..65eb2c4 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -310,6 +310,11 @@
 #define MSR_AMD_PERF_STATUS		0xc0010063
 #define MSR_AMD_PERF_CTL		0xc0010062
 
+#define MSR_IA32_POWER_MISC		0x00000120
+
+#define ENABLE_ULFM_AUTOCM		(1 << 2)
+#define ENABLE_INDP_AUTOCM		(1 << 3)
+
 #define MSR_IA32_MPERF			0x000000e7
 #define MSR_IA32_APERF			0x000000e8
 
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 111eb35..c5975ec 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -112,6 +112,6 @@
 	obj-$(CONFIG_GART_IOMMU)	+= amd_gart_64.o aperture_64.o
 	obj-$(CONFIG_CALGARY_IOMMU)	+= pci-calgary_64.o tce_64.o
 
-	obj-$(CONFIG_PCI_MMCONFIG)	+= mmconf-fam10h_64.o
+#	obj-$(CONFIG_PCI_MMCONFIG)	+= mmconf-fam10h_64.o
 	obj-y				+= vsmp_64.o
 endif
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index b44577b..ec94e11 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@
 #ifndef CONFIG_64BIT
 	native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
 
+	/*
+	 * We have to check that we can write back the value, and not
+	 * just read it.  At least on 90 nm Pentium M (Family 6, Model
+	 * 13), reading an invalid MSR is not guaranteed to trap, see
+	 * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
+	 * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
+	 * nm process with 512-KB L2 Cache Specification Update".
+	 */
 	if (!rdmsr_safe(MSR_EFER,
 			&header->pmode_efer_low,
-			&header->pmode_efer_high))
+			&header->pmode_efer_high) &&
+	    !wrmsr_safe(MSR_EFER,
+			header->pmode_efer_low,
+			header->pmode_efer_high))
 		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
 #endif /* !CONFIG_64BIT */
 
@@ -61,7 +72,10 @@
 	}
 	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
 			&header->pmode_misc_en_low,
-			&header->pmode_misc_en_high))
+			&header->pmode_misc_en_high) &&
+	    !wrmsr_safe(MSR_IA32_MISC_ENABLE,
+			header->pmode_misc_en_low,
+			header->pmode_misc_en_high))
 		header->pmode_behavior |=
 			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
 	header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3048ded..59554dc 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 	{}
 };
@@ -27,6 +28,7 @@
 
 static const struct pci_device_id amd_nb_link_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 	{}
 };
@@ -81,13 +83,20 @@
 			next_northbridge(misc, amd_nb_misc_ids);
 		node_to_amd_nb(i)->link = link =
 			next_northbridge(link, amd_nb_link_ids);
-        }
+	}
 
+	/* GART present only on Fam15h upto model 0fh */
 	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
-	    boot_cpu_data.x86 == 0x15)
+	    (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
 		amd_northbridges.flags |= AMD_NB_GART;
 
 	/*
+	 * Check for L3 cache presence.
+	 */
+	if (!cpuid_edx(0x80000006))
+		return 0;
+
+	/*
 	 * Some CPU families support L3 Cache Index Disable. There are some
 	 * limitations because of E382 and E388 on family 0x10.
 	 */
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index c9876ef..af5b08a 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -40,7 +40,7 @@
 
 #include <asm/fixmap.h>
 #include <asm/apb_timer.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/time.h>
 
 #define APBT_CLOCKEVENT_RATING		110
@@ -157,13 +157,13 @@
 
 	adev->num = smp_processor_id();
 	adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
-		mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
+		intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
 		APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
 		adev_virt_addr(adev), 0, apbt_freq);
 	/* Firmware does EOI handling for us. */
 	adev->timer->eoi = NULL;
 
-	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
 		global_clock_event = &adev->timer->ced;
 		printk(KERN_DEBUG "%s clockevent registered as global\n",
 		       global_clock_event->name);
@@ -253,7 +253,7 @@
 
 static __init int apbt_late_init(void)
 {
-	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT ||
+	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
 		!apb_timer_block_enabled)
 		return 0;
 	/* This notifier should be called after workqueue is ready */
@@ -340,7 +340,7 @@
 	}
 #ifdef CONFIG_SMP
 	/* kernel cmdline disable apb timer, so we will use lapic timers */
-	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
 		printk(KERN_INFO "apbt: disabled per cpu timer\n");
 		return;
 	}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 904611b..719487a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -54,6 +54,7 @@
 #include <asm/mce.h>
 #include <asm/tsc.h>
 #include <asm/hypervisor.h>
+#include <asm/intel-mid.h>
 
 unsigned int num_processors;
 
@@ -700,7 +701,7 @@
 		lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
 					TICK_NSEC, lapic_clockevent.shift);
 		lapic_clockevent.max_delta_ns =
-			clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+			clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
 		lapic_clockevent.min_delta_ns =
 			clockevent_delta2ns(0xF, &lapic_clockevent);
 		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
@@ -2224,6 +2225,24 @@
 	unsigned long flags;
 	int maxlvt;
 
+	/*
+	 * On intel_mid, the suspend flow is a bit different, and the lapic
+	 * hw implementation, and integration is not supporting standard
+	 * suspension.
+	 * This implementation is only putting high value to the timer, so that
+	 * AONT global timer will be updated with this big value at s0i3 entry,
+	 * and wont produce timer based wake up event.
+	 */
+	if ((intel_mid_identify_cpu() != 0) ||
+			(boot_cpu_data.x86_model == 0x37)) {
+		if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+			wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
+		else
+			apic_write(APIC_TMICT, ~0);
+
+		return 0;
+	}
+
 	if (!apic_pm_state.active)
 		return 0;
 
@@ -2261,6 +2280,21 @@
 	unsigned int l, h;
 	unsigned long flags;
 	int maxlvt;
+	u64 tsc;
+
+	/*
+	 * On intel_mid, the resume flow is a bit different.
+	 * Refer explanation on lapic_suspend.
+	 */
+	if ((intel_mid_identify_cpu() != 0) ||
+			(boot_cpu_data.x86_model == 0x37)) {
+		if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
+			rdtscll(tsc);
+			wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + 10);
+		} else
+			apic_write(APIC_TMICT, 10);
+		return;
+	}
 
 	if (!apic_pm_state.active)
 		return;
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index a698d71..abe2841 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -55,6 +55,12 @@
 			break;
 		mdelay(1);
 	}
+	if (cpumask_empty(to_cpumask(backtrace_mask)))
+		printk(KERN_INFO "All CPUs responded to NMI.\n");
+	else
+		for_each_cpu(i, to_cpumask(backtrace_mask))
+			printk(KERN_INFO "CPU %d did not respond to NMI%s.\n",
+				i, (smp_processor_id() == i) ? " (itself)" : "");
 
 	clear_bit(0, &backtrace_flag);
 	smp_mb__after_clear_bit();
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9ed796c..3f31f9e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -60,6 +60,7 @@
 #include <asm/irq_remapping.h>
 #include <asm/hpet.h>
 #include <asm/hw_irq.h>
+#include <asm/intel-mid.h>
 
 #include <asm/apic.h>
 
@@ -316,6 +317,24 @@
 	writel(vector, &io_apic->eoi);
 }
 
+/*
+ * This index matches with 1024 - 4 address in SCU RTE table area.
+ * That is not used for anything. Works in CLVP only
+ */
+#define LAST_INDEX_IN_IO_APIC_SPACE 255
+#define KERNEL_TO_SCU_PANIC_REQUEST (0x0515dead)
+void apic_scu_panic_dump(void)
+{
+	unsigned long flags;
+
+	printk(KERN_ERR "Request SCU panic dump");
+	raw_spin_lock_irqsave(&ioapic_lock, flags);
+	io_apic_write(0, LAST_INDEX_IN_IO_APIC_SPACE,
+		      KERNEL_TO_SCU_PANIC_REQUEST);
+	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+EXPORT_SYMBOL_GPL(apic_scu_panic_dump);
+
 unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
 {
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
@@ -2355,6 +2374,10 @@
 	return ret;
 }
 
+static int ioapic_set_wake(struct irq_data *data, unsigned int on)
+{
+	return 0;
+}
 static void ack_apic_edge(struct irq_data *data)
 {
 	irq_complete_move(data->chip_data);
@@ -2519,7 +2542,9 @@
 	.irq_ack		= ack_apic_edge,
 	.irq_eoi		= ack_apic_level,
 	.irq_set_affinity	= native_ioapic_set_affinity,
+	.irq_set_wake		= ioapic_set_wake,
 	.irq_retrigger		= ioapic_retrigger_irq,
+	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 
 static inline void init_IO_APIC_traps(void)
@@ -2857,7 +2882,9 @@
 	sync_Arb_IDs();
 	setup_IO_APIC_irqs();
 	init_IO_APIC_traps();
-	if (legacy_pic->nr_legacy_irqs)
+
+	/* Skip the timer check for newer CPU with ARAT timer */
+	if (!boot_cpu_has(X86_FEATURE_ARAT) && legacy_pic->nr_legacy_irqs)
 		check_timer();
 }
 
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 794f6eb..b32dbb4 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -98,7 +98,7 @@
 		break;
 	case UV3_HUB_PART_NUMBER:
 	case UV3_HUB_PART_NUMBER_X:
-		uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+		uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
 		break;
 	}
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5013a48..2017ac0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -663,11 +663,13 @@
 
 #ifdef CONFIG_X86_64
 	if (c->x86 == 0x10) {
+#if 0
 		/* do this for boot cpu */
 		if (c == &boot_cpu_data)
 			check_enable_amd_mmconf_dmi();
 
 		fam10h_check_enable_mmcfg();
+#endif
 	}
 
 	if (c == &boot_cpu_data && c->x86 >= 0xf) {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441..c2647be 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -101,6 +101,8 @@
 		switch (c->x86_model) {
 		case 0x27:	/* Penwell */
 		case 0x35:	/* Cloverview */
+		case 0x37:	/* ValleyView */
+		case 0x4A:	/* Merrifield */
 			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 			break;
 		default:
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 47a1870..95db10f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -30,8 +30,17 @@
 #include <asm/mce.h>
 #include <asm/msr.h>
 
-/* How long to wait between reporting thermal events */
+/*
+ * How long to wait between reporting thermal events ?
+ * If Interrupt is enabled for Coretemp driver, the BIOS
+ * takes care of hysteresis and thus there are no spurious
+ * interrupts expected. Hence making this interval to 0.
+ */
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+#define CHECK_INTERVAL         (0)
+#else
 #define CHECK_INTERVAL		(300 * HZ)
+#endif
 
 #define THERMAL_THROTTLING_EVENT	0
 #define POWER_LIMIT_EVENT		1
@@ -177,12 +186,12 @@
 	/* if we just entered the thermal event */
 	if (new_event) {
 		if (event == THERMAL_THROTTLING_EVENT)
-			printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+			pr_crit_ratelimited("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package",
 				state->count);
 		else
-			printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
+			pr_crit_ratelimited("CPU%d: %s power limit notification (total events = %lu)\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package",
 				state->count);
@@ -190,11 +199,11 @@
 	}
 	if (old_event) {
 		if (event == THERMAL_THROTTLING_EVENT)
-			printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
+			pr_info_ratelimited("CPU%d: %s temperature/speed normal\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package");
 		else
-			printk(KERN_INFO "CPU%d: %s power limit normal\n",
+			pr_info_ratelimited("CPU%d: %s power limit normal\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package");
 		return 1;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fa72a39..3982357 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -510,8 +510,9 @@
 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 			     unsigned long *size, mtrr_type *type)
 {
-	unsigned int mask_lo, mask_hi, base_lo, base_hi;
-	unsigned int tmp, hi;
+	u32 mask_lo, mask_hi, base_lo, base_hi;
+	unsigned int hi;
+	u64 tmp, mask;
 
 	/*
 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
@@ -532,18 +533,18 @@
 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
 
 	/* Work out the shifted address mask: */
-	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
-	mask_lo = size_or_mask | tmp;
+	tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
+	mask = size_or_mask | tmp;
 
 	/* Expand tmp with high bits to all 1s: */
-	hi = fls(tmp);
+	hi = fls64(tmp);
 	if (hi > 0) {
-		tmp |= ~((1<<(hi - 1)) - 1);
+		tmp |= ~((1ULL<<(hi - 1)) - 1);
 
-		if (tmp != mask_lo) {
+		if (tmp != mask) {
 			printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
 			add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
-			mask_lo = tmp;
+			mask = tmp;
 		}
 	}
 
@@ -551,8 +552,8 @@
 	 * This works correctly if size is a power of two, i.e. a
 	 * contiguous range:
 	 */
-	*size = -mask_lo;
-	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
+	*size = -mask;
+	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
 	*type = base_lo & 0xff;
 
 out_put_cpu:
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 726bf96..f961de9 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -51,9 +51,13 @@
 #include <asm/e820.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
+#include <asm/pat.h>
 
 #include "mtrr.h"
 
+/* arch_phys_wc_add returns an MTRR register index plus this offset. */
+#define MTRR_TO_PHYS_WC_OFFSET 1000
+
 u32 num_var_ranges;
 
 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
@@ -305,7 +309,8 @@
 		return -EINVAL;
 	}
 
-	if (base & size_or_mask || size & size_or_mask) {
+	if ((base | (base + size - 1)) >>
+	    (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
 		pr_warning("mtrr: base or size exceeds the MTRR width\n");
 		return -EINVAL;
 	}
@@ -524,6 +529,73 @@
 }
 EXPORT_SYMBOL(mtrr_del);
 
+/**
+ * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
+ * @base: Physical base address
+ * @size: Size of region
+ *
+ * If PAT is available, this does nothing.  If PAT is unavailable, it
+ * attempts to add a WC MTRR covering size bytes starting at base and
+ * logs an error if this fails.
+ *
+ * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
+ * but drivers should not try to interpret that return value.
+ */
+int arch_phys_wc_add(unsigned long base, unsigned long size)
+{
+	int ret;
+
+	if (pat_enabled)
+		return 0;  /* Success!  (We don't need to do anything.) */
+
+	ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
+	if (ret < 0) {
+		pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
+			(void *)base, (void *)(base + size - 1));
+		return ret;
+	}
+	return ret + MTRR_TO_PHYS_WC_OFFSET;
+}
+EXPORT_SYMBOL(arch_phys_wc_add);
+
+/*
+ * arch_phys_wc_del - undoes arch_phys_wc_add
+ * @handle: Return value from arch_phys_wc_add
+ *
+ * This cleans up after mtrr_add_wc_if_needed.
+ *
+ * The API guarantees that mtrr_del_wc_if_needed(error code) and
+ * mtrr_del_wc_if_needed(0) do nothing.
+ */
+void arch_phys_wc_del(int handle)
+{
+	if (handle >= 1) {
+		WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
+		mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
+	}
+}
+EXPORT_SYMBOL(arch_phys_wc_del);
+
+/*
+ * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
+ * @handle: Return value from arch_phys_wc_add
+ *
+ * This will turn the return value from arch_phys_wc_add into an mtrr
+ * index suitable for debugging.
+ *
+ * Note: There is no legitimate use for this function, except possibly
+ * in printk line.  Alas there is an illegitimate use in some ancient
+ * drm ioctls.
+ */
+int phys_wc_to_mtrr_index(int handle)
+{
+	if (handle < MTRR_TO_PHYS_WC_OFFSET)
+		return -1;
+	else
+		return handle - MTRR_TO_PHYS_WC_OFFSET;
+}
+EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
+
 /*
  * HACK ALERT!
  * These should be called implicitly, but we can't yet until all the initcall
@@ -583,6 +655,7 @@
 
 int __initdata changed_by_mtrr_cleanup;
 
+#define SIZE_OR_MASK_BITS(n)  (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
 /**
  * mtrr_bp_init - initialize mtrrs on the boot CPU
  *
@@ -600,7 +673,7 @@
 
 	if (cpu_has_mtrr) {
 		mtrr_if = &generic_mtrr_ops;
-		size_or_mask = 0xff000000;			/* 36 bits */
+		size_or_mask = SIZE_OR_MASK_BITS(36);
 		size_and_mask = 0x00f00000;
 		phys_addr = 36;
 
@@ -619,7 +692,7 @@
 			     boot_cpu_data.x86_mask == 0x4))
 				phys_addr = 36;
 
-			size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
+			size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
 			size_and_mask = ~size_or_mask & 0xfffff00000ULL;
 		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
 			   boot_cpu_data.x86 == 6) {
@@ -627,7 +700,7 @@
 			 * VIA C* family have Intel style MTRRs,
 			 * but don't support PAE
 			 */
-			size_or_mask = 0xfff00000;		/* 32 bits */
+			size_or_mask = SIZE_OR_MASK_BITS(32);
 			size_and_mask = 0;
 			phys_addr = 32;
 		}
@@ -637,21 +710,21 @@
 			if (cpu_has_k6_mtrr) {
 				/* Pre-Athlon (K6) AMD CPU MTRRs */
 				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
-				size_or_mask = 0xfff00000;	/* 32 bits */
+				size_or_mask = SIZE_OR_MASK_BITS(32);
 				size_and_mask = 0;
 			}
 			break;
 		case X86_VENDOR_CENTAUR:
 			if (cpu_has_centaur_mcr) {
 				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
-				size_or_mask = 0xfff00000;	/* 32 bits */
+				size_or_mask = SIZE_OR_MASK_BITS(32);
 				size_and_mask = 0;
 			}
 			break;
 		case X86_VENDOR_CYRIX:
 			if (cpu_has_cyrix_arr) {
 				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
-				size_or_mask = 0xfff00000;	/* 32 bits */
+				size_or_mask = SIZE_OR_MASK_BITS(32);
 				size_and_mask = 0;
 			}
 			break;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1025f3c..db8df97 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -180,8 +180,8 @@
 
 static bool check_hw_exists(void)
 {
-	u64 val, val_fail, val_new= ~0;
-	int i, reg, reg_fail, ret = 0;
+	u64 val, val_fail = 0, val_new = ~0;
+	int i, reg, reg_fail = 0, ret = 0;
 	int bios_fail = 0;
 
 	/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 52441a2..8aac56b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@
 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
-	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
-	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
+	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
+	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
 	{ /* end: all zeroes */ },
 };
 
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d32abea..174da5f 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -658,15 +658,18 @@
  * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
  * linked list of struct setup_data, which is parsed here.
  */
-void __init parse_e820_ext(struct setup_data *sdata)
+void __init parse_e820_ext(u64 phys_addr, u32 data_len)
 {
 	int entries;
 	struct e820entry *extmap;
+	struct setup_data *sdata;
 
+	sdata = early_memremap(phys_addr, data_len);
 	entries = sdata->len / sizeof(struct e820entry);
 	extmap = (struct e820entry *)(sdata->data);
 	__append_e820_map(extmap, entries);
 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+	early_iounmap(sdata, data_len);
 	printk(KERN_INFO "e820: extended physical RAM map:\n");
 	e820_print_map("extended");
 }
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b9..63bdb29 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@
 static void __init intel_remapping_check(int num, int slot, int func)
 {
 	u8 revision;
+	u16 device;
 
+	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
 	revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
 
 	/*
-	 * Revision 0x13 of this chipset supports irq remapping
-	 * but has an erratum that breaks its behavior, flag it as such
+ 	 * Revision 13 of all triggering devices id in this quirk have
+	 * a problem draining interrupts when irq remapping is enabled,
+	 * and should be flagged as broken.  Additionally revisions 0x12
+	 * and 0x22 of device id 0x3405 has this problem.
 	 */
 	if (revision == 0x13)
 		set_irq_remapping_broken();
+	else if ((device == 0x3405) &&
+	    ((revision == 0x12) ||
+	     (revision == 0x22)))
+		set_irq_remapping_broken();
 
 }
 
@@ -239,6 +247,8 @@
 	  PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
 	{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
 	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+	{ PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
+	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
 	{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
 	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
 	{}
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index d15f575..3b78cb7 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -14,7 +14,7 @@
 #include <xen/hvc-console.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/pgtable.h>
 #include <linux/usb/ehci_def.h>
 
@@ -228,11 +228,16 @@
 			mrst_early_console_init();
 			early_console_register(&early_mrst_console, keep);
 		}
-
+		if (!strncmp(buf, "mrfld", 5)) {
+			mrfld_early_console_init();
+			early_console_register(&early_mrfld_console, keep);
+		}
 		if (!strncmp(buf, "hsu", 3)) {
 			hsu_early_console_init(buf + 3);
 			early_console_register(&early_hsu_console, keep);
 		}
+		if (!strncmp(buf, "pti", 3))
+			early_console_register(&early_pti_console, keep);
 #endif
 		buf++;
 	}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 138463a..8f344e7 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -35,8 +35,8 @@
 
 	/* Call the subarch specific early setup function */
 	switch (boot_params.hdr.hardware_subarch) {
-	case X86_SUBARCH_MRST:
-		x86_mrst_early_setup();
+	case X86_SUBARCH_INTEL_MID:
+		x86_intel_mid_early_setup();
 		break;
 	case X86_SUBARCH_CE4100:
 		x86_ce4100_early_setup();
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 55b6761..2cd839a 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -190,5 +190,21 @@
 
 	reserve_ebda_region();
 
+	/* Call the subarch specific early setup function */
+	switch (boot_params.hdr.hardware_subarch) {
+	case X86_SUBARCH_INTEL_MID:
+		printk(KERN_CRIT "X86_SUBARCH_INTEL_MID\n");
+		x86_intel_mid_early_setup();
+		break;
+	case X86_SUBARCH_CE4100:
+		printk(KERN_CRIT "X86_SUBARCH_CE4100\n");
+		x86_ce4100_early_setup();
+		break;
+	default:
+		printk(KERN_CRIT "X86_SUBARCH default (%x)\n",
+			boot_params.hdr.hardware_subarch);
+		break;
+	}
+
 	start_kernel();
 }
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 321d65e..a836860 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -513,7 +513,7 @@
 #include "../../x86/xen/xen-head.S"
 	
 	.section .bss, "aw", @nobits
-	.align L1_CACHE_BYTES
+	.align PAGE_SIZE
 ENTRY(idt_table)
 	.skip IDT_ENTRIES * 16
 
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index cb33909..f7ea30d 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,7 +116,7 @@
 
 	if (cpu_has_fxsr) {
 		memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
-		asm volatile("fxsave %0" : : "m" (fx_scratch));
+		asm volatile("fxsave %0" : "+m" (fx_scratch));
 		mask = fx_scratch.mxcsr_mask;
 		if (mask == 0)
 			mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index ac0631d..ff014de 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -273,7 +273,10 @@
 
 		data = irq_desc_get_irq_data(desc);
 		affinity = data->affinity;
-		if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
+		/* include IRQs who have no action, but are chained */
+		if ((!irq_has_action(irq) && !irq_is_chained(irq)) ||
+			irq_is_nested_thread(irq) ||
+			irqd_is_per_cpu(data) ||
 		    cpumask_subset(affinity, cpu_online_mask)) {
 			raw_spin_unlock(&desc->lock);
 			continue;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ce13049..e5aea3f 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -174,9 +174,6 @@
 	unsigned int cpu = iminor(file_inode(file));
 	struct cpuinfo_x86 *c;
 
-	if (!capable(CAP_SYS_RAWIO))
-		return -EPERM;
-
 	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
 		return -ENXIO;	/* No such CPU */
 
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1ce8966..d413b8f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,9 +38,7 @@
  */
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
-#ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
-#endif
 
 struct kmem_cache *task_xstate_cachep;
 EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -240,7 +238,6 @@
 }
 #endif
 
-#ifdef CONFIG_X86_64
 void enter_idle(void)
 {
 	this_cpu_write(is_idle, 1);
@@ -262,7 +259,6 @@
 		return;
 	__exit_idle();
 }
-#endif
 
 void arch_cpu_idle_enter(void)
 {
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 76fa1e9..5c25b50 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -36,7 +36,7 @@
 EXPORT_SYMBOL(pm_power_off);
 
 static const struct desc_ptr no_idt = {};
-static int reboot_mode;
+static enum reboot_mode reboot_mode;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
 
@@ -88,11 +88,11 @@
 
 		switch (*str) {
 		case 'w':
-			reboot_mode = 0x1234;
+			reboot_mode = REBOOT_WARM;
 			break;
 
 		case 'c':
-			reboot_mode = 0;
+			reboot_mode = REBOOT_COLD;
 			break;
 
 #ifdef CONFIG_SMP
@@ -447,6 +447,22 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
 		},
 	},
+	{	/* Handle problems with rebooting on the Dell PowerEdge C6100. */
+		.callback = set_pci_reboot,
+		.ident = "Dell PowerEdge C6100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+		},
+	},
+	{	/* Some C6100 machines were shipped with vendor being 'Dell'. */
+		.callback = set_pci_reboot,
+		.ident = "Dell PowerEdge C6100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+		},
+	},
 	{ }
 };
 
@@ -536,6 +552,7 @@
 	int i;
 	int attempt = 0;
 	int orig_reboot_type = reboot_type;
+	unsigned short mode;
 
 	if (reboot_emergency)
 		emergency_vmx_disable_all();
@@ -543,7 +560,8 @@
 	tboot_shutdown(TB_SHUTDOWN_REBOOT);
 
 	/* Tell the BIOS if we want cold or warm reboot */
-	*((unsigned short *)__va(0x472)) = reboot_mode;
+	mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
+	*((unsigned short *)__va(0x472)) = mode;
 
 	for (;;) {
 		/* Could also try the reset bit in the Hammer NB */
@@ -585,7 +603,7 @@
 
 		case BOOT_EFI:
 			if (efi_enabled(EFI_RUNTIME_SERVICES))
-				efi.reset_system(reboot_mode ?
+				efi.reset_system(reboot_mode == REBOOT_WARM ?
 						 EFI_RESET_WARM :
 						 EFI_RESET_COLD,
 						 EFI_SUCCESS, 0, NULL);
@@ -598,10 +616,13 @@
 
 		case BOOT_CF9_COND:
 			if (port_cf9_safe) {
-				u8 cf9 = inb(0xcf9) & ~6;
+				u8 reboot_code = reboot_mode == REBOOT_WARM ?
+					0x06 : 0x0E;
+				u8 cf9 = inb(0xcf9) & ~reboot_code;
 				outb(cf9|2, 0xcf9); /* Request hard reset */
 				udelay(50);
-				outb(cf9|6, 0xcf9); /* Actually do the reset */
+				/* Actually do the reset */
+				outb(cf9|reboot_code, 0xcf9);
 				udelay(50);
 			}
 			reboot_type = BOOT_KBD;
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 198eb20..bdf8460 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -11,9 +11,10 @@
 
 #include <asm/vsyscall.h>
 #include <asm/x86_init.h>
+#include <asm/intel-mid.h>
 #include <asm/time.h>
-#include <asm/mrst.h>
 #include <asm/rtc.h>
+#include <asm/io_apic.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -149,6 +150,26 @@
 	ts->tv_nsec = 0;
 }
 
+static int handle_mrfl_dev_ioapic(int irq)
+{
+	int ret = 0;
+	int ioapic;
+	struct io_apic_irq_attr irq_attr;
+
+	ioapic = mp_find_ioapic(irq);
+	if (ioapic >= 0) {
+		irq_attr.ioapic = ioapic;
+		irq_attr.ioapic_pin = irq;
+		irq_attr.trigger = 1;
+		irq_attr.polarity = 0; /* Active high */
+		io_apic_set_pci_routing(NULL, irq, &irq_attr);
+	} else {
+		pr_warn("can not find interrupt %d in ioapic\n", irq);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
 
 static struct resource rtc_resources[] = {
 	[0] = {
@@ -172,6 +193,8 @@
 
 static __init int add_rtc_cmos(void)
 {
+	int ret;
+
 #ifdef CONFIG_PNP
 	static const char * const  const ids[] __initconst =
 	    { "PNP0b00", "PNP0b01", "PNP0b02", };
@@ -191,10 +214,18 @@
 	if (of_have_populated_dt())
 		return 0;
 
-	/* Intel MID platforms don't have ioport rtc */
-	if (mrst_identify_cpu())
+	/* Intel MID platforms don't have ioport rtc
+	 * except Tangier & Anniedale platform, which doesn't have vRTC
+	 */
+	if (intel_mid_identify_cpu() &&
+	    intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER &&
+	    intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_ANNIEDALE)
 		return -ENODEV;
 
+	ret = handle_mrfl_dev_ioapic(RTC_IRQ);
+	if (ret)
+		return ret;
+
 	platform_device_register(&rtc_device);
 	dev_info(&rtc_device.dev,
 		 "registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 56f7fcf..eb424a2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -110,6 +110,7 @@
 #include <asm/mce.h>
 #include <asm/alternative.h>
 #include <asm/prom.h>
+#include <asm/intel-mid.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -426,25 +427,23 @@
 static void __init parse_setup_data(void)
 {
 	struct setup_data *data;
-	u64 pa_data;
+	u64 pa_data, pa_next;
 
 	pa_data = boot_params.hdr.setup_data;
 	while (pa_data) {
-		u32 data_len, map_len;
+		u32 data_len, map_len, data_type;
 
 		map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
 			      (u64)sizeof(struct setup_data));
 		data = early_memremap(pa_data, map_len);
 		data_len = data->len + sizeof(struct setup_data);
-		if (data_len > map_len) {
-			early_iounmap(data, map_len);
-			data = early_memremap(pa_data, data_len);
-			map_len = data_len;
-		}
+		data_type = data->type;
+		pa_next = data->next;
+		early_iounmap(data, map_len);
 
-		switch (data->type) {
+		switch (data_type) {
 		case SETUP_E820_EXT:
-			parse_e820_ext(data);
+			parse_e820_ext(pa_data, data_len);
 			break;
 		case SETUP_DTB:
 			add_dtb(pa_data);
@@ -452,8 +451,7 @@
 		default:
 			break;
 		}
-		pa_data = data->next;
-		early_iounmap(data, map_len);
+		pa_data = pa_next;
 	}
 }
 
@@ -495,6 +493,24 @@
 	}
 }
 
+#ifdef CONFIG_CRASH_DUMP
+static void __init e820_crashdump_remove_ram(void)
+{
+	/*
+	* We are doing a crash dump, so remove all RAM ranges
+	* as they are the ones that need to be dumped.
+	* We still need all non-RAM information in order to do I/O.
+	*/
+	/* NOTE: if you use old kexec, please remove memmap=exactmap
+	* which remove all ranges, not only RAM ranges.
+	*/
+	saved_max_pfn = e820_end_of_ram_pfn();
+	e820_remove_range(0, ULLONG_MAX, E820_RAM, 1);
+	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+	e820_print_map("crash_dump");
+}
+#endif
+
 /*
  * --------- Crashkernel reservation ------------------------------
  */
@@ -929,6 +945,9 @@
 	parse_setup_data();
 	/* update the e820_saved too */
 	e820_reserve_setup_data();
+#ifdef CONFIG_CRASH_DUMP
+	e820_crashdump_remove_ram();
+#endif
 
 	copy_edd();
 
@@ -1199,6 +1218,10 @@
 
 	x86_init.resources.reserve_resources();
 
+#ifdef CONFIG_INTEL_MID_PSTORE_RAM
+	pstore_ram_reserve_memory();
+#endif
+
 	e820_setup_gap();
 
 #ifdef CONFIG_VT
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 6956299..087ab2a 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -364,7 +364,7 @@
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
-		err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
 		/* Set up to return from userspace.  */
 		restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -429,7 +429,7 @@
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
-		err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
 		/* Set up to return from userspace.  If provided, use a stub
 		   already in userspace.  */
@@ -496,7 +496,7 @@
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
 		put_user_ex(0, &frame->uc.uc_link);
-		err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+		compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 		put_user_ex(0, &frame->uc.uc__pad0);
 
 		if (ksig->ka.sa.sa_flags & SA_RESTORER) {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fe86275..4ffe6bb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1322,21 +1322,25 @@
 	return 0;
 }
 
+/*
+ * We let cpus' idle tasks announce their own death to complete
+ * logical cpu unplug sequence.
+ */
+DECLARE_COMPLETION(cpu_die_comp);
+
 void native_cpu_die(unsigned int cpu)
 {
 	/* We don't do anything here: idle task is faking death itself. */
-	unsigned int i;
+	unsigned long timeout = HZ; /* 1 sec */
 
-	for (i = 0; i < 10; i++) {
-		/* They ack this in play_dead by setting CPU_DEAD */
-		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-			if (system_state == SYSTEM_RUNNING)
-				pr_info("CPU %u is now offline\n", cpu);
-			return;
-		}
-		msleep(100);
-	}
-	pr_err("CPU %u didn't die...\n", cpu);
+	/* They ack this in play_dead by setting CPU_DEAD */
+	wait_for_completion_timeout(&cpu_die_comp, timeout);
+	if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+		if (system_state == SYSTEM_RUNNING)
+			pr_info("CPU %u is now offline\n", cpu);
+		return;
+	} else
+		pr_err("CPU %u didn't die...\n", cpu);
 }
 
 void play_dead_common(void)
@@ -1348,6 +1352,7 @@
 	mb();
 	/* Ack it */
 	__this_cpu_write(cpu_state, CPU_DEAD);
+	complete(&cpu_die_comp);
 
 	/*
 	 * With physical CPU hotplug, we should halt the cpu
@@ -1400,8 +1405,15 @@
 				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
 			}
 		}
-		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
-			(highest_subcstate - 1);
+
+		if (highest_cstate < 6) {
+			eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+				(highest_subcstate - 1);
+		} else {
+			/* For s0i3 substate code is 4 */
+			eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+				((highest_subcstate - 1) * 2);
+		}
 	}
 
 	/*
@@ -1413,6 +1425,13 @@
 
 	wbinvd();
 
+	/*
+	 * FIXME: SCU will abort S3 entry with ACK C6 timeout
+	 * if the lapic timer value programmed is low.
+	 * Hence program a high value before offlineing the CPU
+	 */
+	apic_write(APIC_TMICT, ~0);
+
 	while (1) {
 		/*
 		 * The CLFLUSH is a workaround for erratum AAI65 for
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index fdd0c64..17214ad 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <asm/stacktrace.h>
+#include <linux/mm.h>
 
 static int save_stack_stack(void *data, char *name)
 {
@@ -89,22 +90,78 @@
 };
 
 static int
-copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
+__copy_stack_frame(const void __user *fp, void *frame, unsigned long framesize)
 {
 	int ret;
 
-	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
+	if (!access_ok(VERIFY_READ, fp, framesize))
 		return 0;
 
 	ret = 1;
 	pagefault_disable();
-	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
+	if (__copy_from_user_inatomic(frame, fp, framesize))
 		ret = 0;
 	pagefault_enable();
 
 	return ret;
 }
 
+#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct compat_stack_frame_user {
+	compat_uptr_t next_fp;
+	compat_ulong_t ret_addr;
+};
+
+static inline int copy_stack_frame(const void __user *fp,
+				   struct stack_frame_user *frame)
+{
+	struct compat_stack_frame_user frame32 = { 0 };
+
+	if (!test_thread_flag(TIF_IA32))
+		return __copy_stack_frame(fp, frame, sizeof(*frame));
+
+	if (!__copy_stack_frame(fp, &frame32, sizeof(frame32)))
+		return 0;
+
+	frame->next_fp = compat_ptr(frame32.next_fp);
+	frame->ret_addr = (unsigned long)frame32.ret_addr;
+	return 1;
+}
+
+static inline int access_frame(struct task_struct *task, unsigned long addr,
+			       struct stack_frame_user *frame)
+{
+	struct compat_stack_frame_user frame32 = { 0 };
+
+	if (!test_tsk_thread_flag(task, TIF_IA32))
+		return access_process_vm(task, addr,
+					 (void *)frame, sizeof(*frame), 0);
+
+	if (!access_process_vm(task, addr, (void *)&frame32,
+			       sizeof(frame32), 0))
+		return 0;
+
+	frame->next_fp = compat_ptr(frame32.next_fp);
+	frame->ret_addr = (unsigned long)frame32.ret_addr;
+	return 1;
+}
+#else
+static inline int copy_stack_frame(const void __user *fp,
+				   struct stack_frame_user *frame)
+{
+	return __copy_stack_frame(fp, frame, sizeof(*frame));
+}
+
+static inline int access_frame(struct task_struct *task, unsigned long addr,
+			       struct stack_frame_user *frame)
+{
+	return access_process_vm(task, addr, (void *)frame, sizeof(*frame), 0);
+}
+#endif
+
 static inline void __save_stack_trace_user(struct stack_trace *trace)
 {
 	const struct pt_regs *regs = task_pt_regs(current);
@@ -144,3 +201,56 @@
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
+static inline void __save_stack_trace_user_task(struct task_struct *task,
+		struct stack_trace *trace)
+{
+	const struct pt_regs *regs = task_pt_regs(task);
+	const void __user *fp;
+	unsigned long addr;
+#ifdef CONFIG_SMP
+	if (task != current && task->state == TASK_RUNNING && task->on_cpu) {
+		/* To trap into kernel at least once */
+		smp_send_reschedule(task_cpu(task));
+	}
+#endif
+	fp = (const void __user *)regs->bp;
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = regs->ip;
+
+	while (trace->nr_entries < trace->max_entries) {
+		struct stack_frame_user frame;
+
+		frame.next_fp = NULL;
+		frame.ret_addr = 0;
+
+		addr = (unsigned long)fp;
+		if (!access_frame(task, addr, &frame))
+			break;
+		if ((unsigned long)fp < regs->sp)
+			break;
+		if (frame.ret_addr) {
+			trace->entries[trace->nr_entries++] =
+				frame.ret_addr;
+		}
+		if (fp == frame.next_fp)
+			break;
+		fp = frame.next_fp;
+	}
+}
+
+void save_stack_trace_user_task(struct task_struct *task,
+		struct stack_trace *trace)
+{
+	if (task == current || !task) {
+		save_stack_trace_user(trace);
+		return;
+	}
+
+	if (task->mm)
+		__save_stack_trace_user_task(task, trace);
+
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_user_task);
+
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5a..30277e2 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@
 				*begin = new_begin;
 		}
 	} else {
-		*begin = TASK_UNMAPPED_BASE;
+		*begin = current->mm->mmap_legacy_base;
 		*end = TASK_SIZE;
 	}
 }
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 24d3c91..8bbc603 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -21,6 +21,7 @@
 #include <asm/timer.h>
 #include <asm/hpet.h>
 #include <asm/time.h>
+#include <asm/intel-mid.h>
 
 #ifdef CONFIG_X86_64
 DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
@@ -76,7 +77,9 @@
 {
 	if (!hpet_enable())
 		setup_pit_timer();
-	setup_default_timer_irq();
+	/* Skip the lecacy timer setup for CPU with ARAT timer */
+	if (!boot_cpu_has(X86_FEATURE_ARAT))
+		setup_default_timer_irq();
 }
 
 static __init void x86_late_time_init(void)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 00a2873..8a75efd 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -271,6 +271,36 @@
 }
 #endif
 
+#ifdef CONFIG_X86_64
+static unsigned long count_bits(unsigned long value)
+{
+	value = value - ((value >> 1) & 0x5555555555555555);
+	value = (value & 0x3333333333333333) + ((value >> 2) & 0x3333333333333333);
+	return (((value + (value >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56;
+}
+
+/* Check that all the registers representing an address are in canonical form
+ if not, check that the bits differening are 2 or less to detect a bit flip */
+static void check_bit_flip(struct pt_regs *_regs, long error_code)
+{
+	int regs_nr = sizeof(struct pt_regs) / sizeof(unsigned long), idx;
+	unsigned long *regs = (unsigned long *)_regs;
+	unsigned long address;
+
+	for (idx = 0; idx < regs_nr; idx++) {
+		address = regs[idx];
+		if (((address & 0xffff800000000000) != 0xffff800000000000) &&
+		    ((address & 0xffff800000000000) != 0)) {
+			address ^= 0xffff800000000000;
+			address &= 0xffff800000000000;
+			if (count_bits(address) < 3) {
+				panic("Bit flip detected with register 0x%016lx during gpf\n", regs[idx]);
+			}
+		}
+	}
+}
+#endif
+
 dotraplinkage void __kprobes
 do_general_protection(struct pt_regs *regs, long error_code)
 {
@@ -297,6 +327,9 @@
 		tsk->thread.trap_nr = X86_TRAP_GP;
 		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
+#ifdef CONFIG_X86_64
+			check_bit_flip(regs, error_code);
+#endif
 			die("general protection fault", regs, error_code);
 		goto exit;
 	}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 098b3cf..0b19fd3 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -37,7 +37,7 @@
    erroneous rdtsc usage on !cpu_has_tsc processors */
 static int __read_mostly tsc_disabled = -1;
 
-int tsc_clocksource_reliable;
+int tsc_clocksource_reliable = 1;
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 260a919..5402c94 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3399,15 +3399,22 @@
 	var->limit = vmx_read_guest_seg_limit(vmx, seg);
 	var->selector = vmx_read_guest_seg_selector(vmx, seg);
 	ar = vmx_read_guest_seg_ar(vmx, seg);
+	var->unusable = (ar >> 16) & 1;
 	var->type = ar & 15;
 	var->s = (ar >> 4) & 1;
 	var->dpl = (ar >> 5) & 3;
-	var->present = (ar >> 7) & 1;
+	/*
+	 * Some userspaces do not preserve unusable property. Since usable
+	 * segment has to be present according to VMX spec we can use present
+	 * property to amend userspace bug by making unusable segment always
+	 * nonpresent. vmx_segment_access_rights() already marks nonpresent
+	 * segment as unusable.
+	 */
+	var->present = !var->unusable;
 	var->avl = (ar >> 12) & 1;
 	var->l = (ar >> 13) & 1;
 	var->db = (ar >> 14) & 1;
 	var->g = (ar >> 15) & 1;
-	var->unusable = (ar >> 16) & 1;
 }
 
 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 25b7ae8..7609e0e 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -6,6 +6,7 @@
  */
 #include <asm/checksum.h>
 #include <linux/module.h>
+#include <asm/smap.h>
 
 /**
  * csum_partial_copy_from_user - Copy and checksum from user space.
@@ -52,8 +53,10 @@
 			len -= 2;
 		}
 	}
+	stac();
 	isum = csum_partial_copy_generic((__force const void *)src,
 				dst, len, isum, errp, NULL);
+	clac();
 	if (unlikely(*errp))
 		goto out_err;
 
@@ -82,6 +85,8 @@
 csum_partial_copy_to_user(const void *src, void __user *dst,
 			  int len, __wsum isum, int *errp)
 {
+	__wsum ret;
+
 	might_sleep();
 
 	if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
@@ -105,8 +110,11 @@
 	}
 
 	*errp = 0;
-	return csum_partial_copy_generic(src, (void __force *)dst,
-					 len, isum, NULL, errp);
+	stac();
+	ret = csum_partial_copy_generic(src, (void __force *)dst,
+					len, isum, NULL, errp);
+	clac();
+	return ret;
 }
 EXPORT_SYMBOL(csum_partial_copy_to_user);
 
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 654be4a..a5e12c7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -268,8 +268,6 @@
 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 		return -1;
 
-	WARN_ON_ONCE(in_nmi());
-
 	/*
 	 * Synchronize this task's top level page-table
 	 * with the 'reference' page table.
@@ -572,6 +570,33 @@
 static const char nx_warning[] = KERN_CRIT
 "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
 
+
+#ifdef CONFIG_X86_64
+static unsigned long count_bits(unsigned long value)
+{
+	value = value - ((value >> 1) & 0x5555555555555555);
+	value = (value & 0x3333333333333333) + ((value >> 2) & 0x3333333333333333);
+	return (((value + (value >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56;
+}
+
+/* Check for all the registers to find one very close to the address
+	raising the fault, if one is found and the bits differing
+	is 1, we very probably have a bit flip */
+static void check_bit_flip(unsigned long address, struct pt_regs *_regs)
+{
+	int regs_nr = sizeof(struct pt_regs) / sizeof(unsigned long), idx;
+	unsigned long *regs = (unsigned long *)_regs;
+
+	for (idx = 0; idx < regs_nr; idx++) {
+		unsigned long reg = regs[idx] & PAGE_MASK;
+		if ((reg != address) && (count_bits(reg ^ address) == 1)) {
+			/* Found a bit flip*/
+			panic("Bit flip detected at address 0x%016lx with register 0x%016lx during pf\n", address, regs[idx]);
+		}
+	}
+}
+#endif
+
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 		unsigned long address)
@@ -588,6 +613,12 @@
 			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
 	}
 
+#ifdef CONFIG_X86_64
+	/* Just ignore NULL pointer exceptions for bit flips */
+	if (address & PAGE_MASK)
+		check_bit_flip(address & PAGE_MASK, regs);
+#endif
+
 	printk(KERN_ALERT "BUG: unable to handle kernel ");
 	if (address < PAGE_SIZE)
 		printk(KERN_CONT "NULL pointer dereference");
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 1f34e92..7a5bf1b 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -78,8 +78,8 @@
 	return __va(pfn << PAGE_SHIFT);
 }
 
-/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE	(5 * PAGE_SIZE)
+/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
+#define INIT_PGT_BUF_SIZE	(6 * PAGE_SIZE)
 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
 void  __init early_alloc_pgt_buf(void)
 {
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 1b3485a..d2afc87 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,12 +112,14 @@
  */
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
+	mm->mmap_legacy_base = mmap_legacy_base();
+	mm->mmap_base = mmap_base();
+
 	if (mmap_is_legacy()) {
-		mm->mmap_base = mmap_legacy_base();
+		mm->mmap_base = mm->mmap_legacy_base;
 		mm->get_unmapped_area = arch_get_unmapped_area;
 		mm->unmap_area = arch_unmap_area;
 	} else {
-		mm->mmap_base = mmap_base();
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 		mm->unmap_area = arch_unmap_area_topdown;
 	}
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 981c2db..62d7295 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -20,6 +20,8 @@
 #include <asm/pci_x86.h>
 #include <asm/setup.h>
 
+#include <linux/module.h>
+
 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
 				PCI_PROBE_MMCONF;
 
@@ -80,6 +82,7 @@
  * configuration space.
  */
 DEFINE_RAW_SPINLOCK(pci_config_lock);
+EXPORT_SYMBOL(pci_config_lock);
 
 static int can_skip_ioresource_align(const struct dmi_system_id *d)
 {
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 6eb18c4..b77fdf5 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -31,6 +31,7 @@
 #include <asm/pci_x86.h>
 #include <asm/hw_irq.h>
 #include <asm/io_apic.h>
+#include <asm/intel-mid.h>
 
 #define PCIE_CAP_OFFSET	0x100
 
@@ -203,7 +204,7 @@
 			       where, size, value);
 }
 
-static int mrst_pci_irq_enable(struct pci_dev *dev)
+static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 {
 	u8 pin;
 	struct io_apic_irq_attr irq_attr;
@@ -214,31 +215,37 @@
 	 * IOAPIC RTE entries, so we just enable RTE for the device.
 	 */
 	irq_attr.ioapic = mp_find_ioapic(dev->irq);
+	if (irq_attr.ioapic < 0)
+		return -EINVAL;
 	irq_attr.ioapic_pin = dev->irq;
 	irq_attr.trigger = 1; /* level */
-	irq_attr.polarity = 1; /* active low */
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		irq_attr.polarity = 0; /* active high */
+	else
+		irq_attr.polarity = 1; /* active low */
 	io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
 
 	return 0;
 }
 
-struct pci_ops pci_mrst_ops = {
+struct pci_ops intel_mid_pci_ops = {
 	.read = pci_read,
 	.write = pci_write,
 };
 
 /**
- * pci_mrst_init - installs pci_mrst_ops
+ * intel_mid_pci_init - installs intel_mid_pci_ops
  *
  * Moorestown has an interesting PCI implementation (see above).
  * Called when the early platform detection installs it.
  */
-int __init pci_mrst_init(void)
+int __init intel_mid_pci_init(void)
 {
 	printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
 	pci_mmcfg_late_init();
-	pcibios_enable_irq = mrst_pci_irq_enable;
-	pci_root_ops = pci_mrst_ops;
+	pcibios_enable_irq = intel_mid_pci_irq_enable;
+	pci_root_ops = intel_mid_pci_ops;
 	pci_soc_mode = 1;
 	/* Continue with standard init */
 	return 1;
@@ -259,6 +266,7 @@
 	if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
 		return;
 	dev->d3_delay = 0;
+	dev->d3cold_delay = 0;
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
 
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 01e0231..53a823a 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -4,10 +4,11 @@
 obj-y	+= geode/
 obj-y	+= goldfish/
 obj-y	+= iris/
-obj-y	+= mrst/
+obj-y	+= intel-mid/
 obj-y	+= olpc/
 obj-y	+= scx200/
 obj-y	+= sfi/
 obj-y	+= ts5500/
 obj-y	+= visws/
 obj-y	+= uv/
+obj-y	+= fugu/
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index d2fbced..91ac654 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -910,10 +910,13 @@
 
 	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
 		md = p;
-		if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-		    md->type != EFI_BOOT_SERVICES_CODE &&
-		    md->type != EFI_BOOT_SERVICES_DATA)
-			continue;
+		if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+			if (md->type != EFI_BOOT_SERVICES_CODE &&
+			    md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+				continue;
+		}
 
 		size = md->num_pages << EFI_PAGE_SHIFT;
 		end = md->phys_addr + size;
diff --git a/arch/x86/platform/fugu/Kconfig b/arch/x86/platform/fugu/Kconfig
new file mode 100644
index 0000000..cb61cb8
--- /dev/null
+++ b/arch/x86/platform/fugu/Kconfig
@@ -0,0 +1,26 @@
+config X86_INTEL_MID_FUGU
+	bool "ASUSTek FUGU platform"
+	depends on X86_INTEL_MID
+	select ASUSTEK_PCBID
+	---help---
+	  ASUSTek fugu platform is powered by Moorefield plaform.
+
+config ASUSTEK_PCBID
+	depends on X86_INTEL_MID_FUGU
+	bool "ASUSTek PCB_ID driver"
+	help
+	  Support to OEM1 SFI table parsing and pre-defined pins configuration
+	  of GPIO reading for PCB_ID layout, and export kernel APIs for
+	  querying board-specific information. (eg, hardware revision)
+
+config FUGU_WIFI_PLATFORM_DATA
+	depends on X86_INTEL_MID_FUGU
+	bool "Enable Fugu WiFi platform data"
+	---help---
+	  Enables platform_fugu_wifi
+
+config FUGU_LED
+	depends on X86_INTEL_MID_FUGU
+	bool "fugu led driver"
+	help
+	  This driver provides the access to control fugu led.
diff --git a/arch/x86/platform/fugu/Makefile b/arch/x86/platform/fugu/Makefile
new file mode 100644
index 0000000..70397dc
--- /dev/null
+++ b/arch/x86/platform/fugu/Makefile
@@ -0,0 +1,4 @@
+# ASUSTek fugu-specific drivers/devices (eg, ASUSTek GPIO pins)
+obj-$(CONFIG_ASUSTEK_PCBID) += asustek-pcbid.o devices_asustek.o
+obj-$(CONFIG_FUGU_WIFI_PLATFORM_DATA) += platform_fugu_wifi.o
+obj-$(CONFIG_FUGU_LED)      += fugu_led.o platform_fugu_led.o
diff --git a/arch/x86/platform/fugu/asustek-pcbid.c b/arch/x86/platform/fugu/asustek-pcbid.c
new file mode 100644
index 0000000..7ddd7b3
--- /dev/null
+++ b/arch/x86/platform/fugu/asustek-pcbid.c
@@ -0,0 +1,334 @@
+/*
+ * arch/x86/platform/intel-mid/fugu/asustek-pcbid.c
+ *
+ * Copyright (C) 2014 ASUSTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/string.h>
+#include <linux/board_asustek.h>
+#include <linux/lnw_gpio.h>
+#include <linux/sfi.h>
+#include "intel-mid-fugu.h"
+
+#define PCBID_VALUE_INVALID 0x4E2F4100 /* N/A */
+
+enum {
+	DEBUG_STATE = 1U << 0,
+	DEBUG_VERBOSE = 1U << 1,
+};
+
+static int debug_mask = DEBUG_STATE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned int asustek_pcbid = PCBID_VALUE_INVALID;
+static unsigned int hw_rev_pcbid[] = {0, 1, 2};
+
+#ifdef CONFIG_SFI
+static bool sfi_pcbid_initialized = false;
+static unsigned int asustek_pcbid_oem1 = PCBID_VALUE_INVALID;
+#endif
+
+struct pcbid_maps {
+	unsigned char name[16];
+	unsigned int *pcbid;
+	unsigned int pcbid_num;
+} asustek_pcbid_maps[] = {
+	{"HW_REV", hw_rev_pcbid, ARRAY_SIZE(hw_rev_pcbid)},
+};
+
+#define NUM_MAPS (sizeof(asustek_pcbid_maps) / sizeof(asustek_pcbid_maps[0]))
+
+int get_pcbid_type(const char *func)
+{
+	int i = 0, ret = 0;
+	struct pcbid_maps *map = NULL;
+
+#ifdef CONFIG_SFI
+	if (!sfi_pcbid_initialized && (asustek_pcbid == PCBID_VALUE_INVALID)) {
+#else
+	if (asustek_pcbid == PCBID_VALUE_INVALID) {
+#endif
+		pr_err("ASUSTek PCBID was invalid\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < NUM_MAPS; i++) {
+		if (!strcmp(func, asustek_pcbid_maps[i].name)) {
+			if (debug_mask & DEBUG_VERBOSE)
+				pr_info("%s was found\n", func);
+
+			map = &asustek_pcbid_maps[i];
+			break;
+		}
+	}
+
+	if (map) {
+		/* found */
+		for (i = 0; i < map->pcbid_num; i++) {
+#ifdef CONFIG_SFI
+			if (asustek_pcbid == PCBID_VALUE_INVALID)
+				ret += asustek_pcbid_oem1 & BIT(map->pcbid[i]);
+			else
+#endif
+				ret += asustek_pcbid & BIT(map->pcbid[i]);
+		}
+		ret = ret >> map->pcbid[0];
+	} else
+		ret = -ENODEV;
+
+	return ret;
+}
+
+hw_rev asustek_get_hw_rev(void)
+{
+	hw_rev ret = get_pcbid_type("HW_REV");
+
+	if (debug_mask & DEBUG_VERBOSE)
+		pr_info("%s: %d\n", __func__, ret);
+
+	if ((ret == -ENODEV) || (ret >= HW_REV_MAX))
+		ret = HW_REV_INVALID;
+
+	return ret;
+}
+EXPORT_SYMBOL(asustek_get_hw_rev);
+
+#define ASUSTEK_PCBID_ATTR(module) \
+static struct kobj_attribute module##_attr = { \
+	.attr = { \
+		.name = __stringify(module), \
+		.mode = 0444, \
+	}, \
+	.show = module##_show, \
+}
+
+static ssize_t asustek_pcbid_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%03x\n", asustek_pcbid);
+}
+
+static ssize_t asustek_projectid_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%02x\n", (asustek_pcbid >> 3) & 0x7);
+}
+
+ASUSTEK_PCBID_ATTR(asustek_pcbid);
+ASUSTEK_PCBID_ATTR(asustek_projectid);
+
+static struct attribute *attr_list[] = {
+	&asustek_pcbid_attr.attr,
+	&asustek_projectid_attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attr_list,
+};
+
+#ifdef CONFIG_SFI
+int sfi_parse_oem1(struct sfi_table_header *table)
+{
+	struct sfi_table_oem1 *oem1;
+	u8 sig[SFI_SIGNATURE_SIZE + 1] = {'\0'};
+	u8 oem_id[SFI_OEM_ID_SIZE + 1] = {'\0'};
+	u8 oem_table_id[SFI_OEM_TABLE_ID_SIZE + 1] = {'\0'};
+
+	oem1 = (struct sfi_table_oem1 *) table;
+
+	if (!oem1) {
+		pr_err("%s: fail to read SFI OEM1 Layout\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	snprintf(sig, (SFI_SIGNATURE_SIZE + 1), "%s", oem1->header.sig);
+	snprintf(oem_id, (SFI_OEM_ID_SIZE + 1), "%s", oem1->header.oem_id);
+	snprintf(oem_table_id, (SFI_OEM_TABLE_ID_SIZE + 1), "%s",
+		 oem1->header.oem_table_id);
+
+	pr_info("SFI OEM1 Layout\n");
+	pr_info("\tOEM1 signature               : %s\n"
+		"\tOEM1 length                  : %d\n"
+		"\tOEM1 revision                : %d\n"
+		"\tOEM1 checksum                : 0x%X\n"
+		"\tOEM1 oem_id                  : %s\n"
+		"\tOEM1 oem_table_id            : %s\n"
+		"\tOEM1 ifwi_rc			: 0x%02X\n"
+		"\tPCBID hardware_id            : 0x%02X\n"
+		"\tPCBID project_id             : 0x%02X\n"
+		"\tPCBID ram_id                 : 0x%02X\n",
+		sig,
+		oem1->header.len,
+		oem1->header.rev,
+		oem1->header.csum,
+		oem_id,
+		oem_table_id,
+		oem1->ifwi_rc,
+		oem1->hardware_id,
+		oem1->project_id,
+		oem1->ram_id
+		);
+
+	asustek_pcbid_oem1 = oem1->hardware_id | oem1->project_id << 3 |
+			oem1->ram_id << 6;
+
+	return 0;
+}
+
+void sfi_parsing_done(bool initialized)
+{
+	sfi_pcbid_initialized = initialized;
+}
+#endif
+
+static int __init pcbid_driver_probe(struct platform_device *pdev)
+{
+	int i, ret = 0;
+	struct resource *res;
+	unsigned int value;
+	int gpio = -1;
+
+	if (!pdev)
+		return -EINVAL;
+
+#ifdef CONFIG_SFI
+	/* Get SFI OEM1 Layout first */
+	sfi_parsing_done(sfi_table_parse(SFI_SIG_OEM1, NULL, NULL,
+				sfi_parse_oem1) ? false : true);
+	if (!sfi_pcbid_initialized) {
+		pr_info("ASUSTek: Cannot parse PCB_ID layout from SFI OEM1.\n");
+		asustek_pcbid_oem1 = 0;
+	}
+#endif
+
+	asustek_pcbid = 0;
+
+	for (i = 0; i < pdev->num_resources; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_IO, i);
+		if (!res)
+			return -ENODEV;
+
+		gpio = res->start;
+		/*
+		 * change necessary GPIO pin mode for PCB_ID module working.
+		 * This is something should be done in IA firmware.
+		 * But, anyway, just do it here in case IA firmware
+		 * forget to do so.
+		 */
+		lnw_gpio_set_alt(gpio, LNW_GPIO);
+
+		if (debug_mask & DEBUG_VERBOSE)
+			pr_info("ASUSTek: Requesting gpio%d\n", gpio);
+
+		ret = gpio_request(gpio, res->name);
+		if (ret) {
+			/* indicate invalid pcbid value when error happens */
+			pr_err("ASUSTek: Failed to request gpio%d\n", gpio);
+			asustek_pcbid = PCBID_VALUE_INVALID;
+			res = NULL;
+			break;
+		}
+
+		ret = gpio_direction_input(gpio);
+		if (ret) {
+			/* indicate invalid pcbid value when error happens */
+			pr_err("ASUSTek: Failed to configure direction for gpio%d\n",
+					gpio);
+			asustek_pcbid = PCBID_VALUE_INVALID;
+			res = NULL;
+			break;
+		}
+
+		/* read input value through gpio library directly */
+		value = gpio_get_value(gpio) ? 1 : 0;
+		if (debug_mask & DEBUG_VERBOSE)
+			pr_info("ASUSTek: Input value of gpio%d is %s\n", gpio,
+					value ? "high" : "low");
+
+		asustek_pcbid |= value << i;
+	}
+
+
+#ifdef CONFIG_SFI
+	if (sfi_pcbid_initialized && (asustek_pcbid_oem1 != asustek_pcbid)) {
+		if (debug_mask && DEBUG_STATE)
+			pr_info("ASUSTek: OEM1 PCBID=%03x\n",
+						asustek_pcbid_oem1);
+		WARN_ON(1);
+	}
+#endif
+
+	if (asustek_pcbid == PCBID_VALUE_INVALID) {
+
+#ifdef CONFIG_SFI
+		asustek_pcbid = asustek_pcbid_oem1;
+#endif
+
+		/* error handler to free allocated gpio resources */
+		while (i >= 0) {
+			res = platform_get_resource(pdev, IORESOURCE_IO, i);
+			if (!res)
+				return -ENODEV;
+
+			if (debug_mask & DEBUG_VERBOSE)
+				pr_info("ASUSTek: Freeing gpio%d\n", gpio);
+
+			gpio = res->start;
+			gpio_free(gpio);
+			i--;
+		}
+	} else {
+		/* report pcbid info to dmesg */
+		if (debug_mask && DEBUG_STATE)
+			pr_info("ASUSTek: PCBID=%05x\n", asustek_pcbid);
+
+		/* create a sysfs interface */
+		ret = sysfs_create_group(&pdev->dev.kobj, &attr_group);
+
+		if (ret)
+			pr_err("ASUSTek: Failed to create sysfs group\n");
+	}
+
+	return ret;
+}
+
+static int pcbid_driver_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct platform_driver asustek_pcbid_driver __refdata = {
+	.probe = pcbid_driver_probe,
+	.remove = pcbid_driver_remove,
+	.driver = {
+		.name = "asustek_pcbid",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int asustek_pcbid_init(void)
+{
+	return platform_driver_register(&asustek_pcbid_driver);
+}
+
+rootfs_initcall(asustek_pcbid_init);
+
+MODULE_DESCRIPTION("ASUSTek PCBID driver");
+MODULE_AUTHOR("Paris Yeh <paris_yeh@asus.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/platform/fugu/devices_asustek.c b/arch/x86/platform/fugu/devices_asustek.c
new file mode 100644
index 0000000..a5d2a33
--- /dev/null
+++ b/arch/x86/platform/fugu/devices_asustek.c
@@ -0,0 +1,77 @@
+/* Copyright (c) 2014, ASUSTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/board_asustek.h>
+
+static struct resource resources_asustek_pcbid[] = {
+	{
+		.start	= 163,
+		.end	= 163,
+		.name	= "PCB_ID0",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= 97,
+		.end	= 97,
+		.name	= "PCB_ID1",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= 154,
+		.end	= 154,
+		.name	= "PCB_ID2",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= 155,
+		.end	= 155,
+		.name	= "PCB_ID3",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= 156,
+		.end	= 156,
+		.name	= "PCB_ID4",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= 157,
+		.end	= 157,
+		.name	= "PCB_ID5",
+		.flags	= IORESOURCE_IO,
+	},
+	{
+		.start	= 159,
+		.end	= 159,
+		.name	= "PCB_ID6",
+		.flags	= IORESOURCE_IO,
+	},
+};
+
+static struct platform_device asustek_pcbid_device = {
+	.name		= "asustek_pcbid",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(resources_asustek_pcbid),
+	.resource = resources_asustek_pcbid,
+};
+
+static int __init asustek_add_pcbid_devices(void)
+{
+	platform_device_register(&asustek_pcbid_device);
+	return 0;
+}
+
+rootfs_initcall(asustek_add_pcbid_devices);
diff --git a/arch/x86/platform/fugu/fugu_led.c b/arch/x86/platform/fugu/fugu_led.c
new file mode 100755
index 0000000..a934ca1
--- /dev/null
+++ b/arch/x86/platform/fugu/fugu_led.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2014, ASUSTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#include <linux/leds.h>
+
+/* Charger LED Control Registr Definition */
+#define CHRLEDCTRL_REG 0x53
+/* Charger LED State Maching */
+#define CHRLEDFSM_REG 0x54
+/* Charger LED PWM Register Definiition */
+#define CHRLEDPWM_REG 0x55
+
+static struct rpmsg_instance *pmic_instance;
+
+static int fugu_led_level(enum led_brightness value)
+{
+	if ((value >= 0) && (value < 50))
+		return 1;
+	else if ((value >= 50) && (value < 150))
+		return 2;
+	else if ((value >= 150) && (value < 200))
+		return 3;
+	else
+		return 0;
+}
+
+static u8 is_enabled(void)
+{
+	u8 data;
+
+	intel_scu_ipc_ioread8(CHRLEDCTRL_REG, &data);
+
+	data &= BIT(1);
+
+	return !!data;
+}
+
+static void fugu_led_enable(bool enable)
+{
+	u8 data;
+
+	intel_scu_ipc_ioread8(CHRLEDCTRL_REG, &data);
+
+	if (enable)
+		data |= BIT(1);
+	else
+		data &= ~BIT(1);
+
+	intel_scu_ipc_iowrite8(CHRLEDCTRL_REG, data);
+}
+
+static void fugu_led_brightness_set(struct led_classdev *led_cdev,
+			     enum led_brightness value)
+{
+	s32 level;
+	u8 data;
+
+	level = fugu_led_level(value);
+
+	intel_scu_ipc_ioread8(CHRLEDCTRL_REG, &data);
+	data &= ~(BIT(3) | BIT(2));
+	data |= (level << 2);
+	intel_scu_ipc_iowrite8(CHRLEDCTRL_REG, data);
+	led_cdev->brightness = value;
+}
+
+static enum led_brightness
+fugu_led_brightness_get(struct led_classdev *led_cdev)
+{
+	return led_cdev->brightness;
+}
+
+static int fugu_led_freq_set(u8 freq)
+{
+	u8 data;
+
+	if (freq > 3)
+		return -EINVAL;
+
+	intel_scu_ipc_ioread8(CHRLEDCTRL_REG, &data);
+	data &= ~(BIT(5) | BIT(4));
+	data |= (freq << 4);
+	intel_scu_ipc_iowrite8(CHRLEDCTRL_REG, data);
+
+	return 0;
+}
+
+static u8 fugu_led_freq_get(void)
+{
+	u8 data;
+
+	intel_scu_ipc_ioread8(CHRLEDCTRL_REG, &data);
+	data = data | (BIT(5) | BIT(4));
+	data >>= 4;
+
+	return data;
+}
+
+static int fugu_led_set_lighting_effect(u8 lighting_effect)
+{
+	u8 data;
+	if (lighting_effect > 3)
+		return -EINVAL;
+
+	intel_scu_ipc_ioread8(CHRLEDFSM_REG, &data);
+	data &= ~(BIT(2) | BIT(1));
+	data |= (lighting_effect << 1);
+	intel_scu_ipc_iowrite8(CHRLEDFSM_REG, data);
+
+	return 0;
+}
+
+static u8 fugu_led_get_lighting_effect(void)
+{
+	u8 data;
+
+	intel_scu_ipc_ioread8(CHRLEDFSM_REG, &data);
+	data = data | (BIT(2) | BIT(1));
+	data >>= 1;
+
+	return data;
+}
+
+static ssize_t fugu_led_freq_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 freq;
+
+	freq = fugu_led_freq_get();
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", freq);
+}
+
+static ssize_t fugu_led_freq_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	ret = fugu_led_freq_set(val);
+	if (ret)
+		dev_err(dev, "fail to set freq\n");
+	 else
+		ret = size;
+
+	return ret;
+}
+
+static ssize_t fugu_led_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", is_enabled() ? "enabled" : "disabled");
+}
+
+static ssize_t fugu_led_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	fugu_led_enable(!!val);
+
+	return size;
+}
+
+static ssize_t fugu_led_lighting_effect_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 lighting_effect;
+
+	lighting_effect = fugu_led_get_lighting_effect();
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", lighting_effect);
+}
+
+static ssize_t fugu_led_lighting_effect_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	ret = fugu_led_set_lighting_effect(val);
+	if (ret)
+		dev_err(dev, "fail to set lighting effect\n");
+	else
+		ret = size;
+
+	return ret;
+}
+
+static DEVICE_ATTR(led_freq, 0644, fugu_led_freq_show, fugu_led_freq_store);
+static DEVICE_ATTR(led_enable, 0644,
+	fugu_led_enable_show, fugu_led_enable_store);
+static DEVICE_ATTR(led_lighting_effect, 0644,
+	fugu_led_lighting_effect_show, fugu_led_lighting_effect_store);
+
+static struct led_classdev fugu_white_led = {
+	.name			= "white",
+	.brightness		= LED_OFF,
+	.max_brightness	= LED_FULL,
+	.brightness_set		= fugu_led_brightness_set,
+	.brightness_get		= fugu_led_brightness_get,
+};
+
+static int fugu_led_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic rpmsg device\n");
+
+	/* Allocate rpmsg instance for pmic*/
+	ret = alloc_rpmsg_instance(rpdev, &pmic_instance);
+	if (!pmic_instance) {
+		dev_err(&rpdev->dev, "kzalloc pmic instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(pmic_instance);
+
+	ret = led_classdev_register(&rpdev->dev, &fugu_white_led);
+	if (ret < 0) {
+		dev_err(&rpdev->dev, "fail to register led class.");
+		goto out;
+	}
+
+	ret = device_create_file(&rpdev->dev, &dev_attr_led_freq);
+	if (ret)
+		dev_err(&rpdev->dev, "failed device_create_file(led_freq)\n");
+
+	ret = device_create_file(&rpdev->dev, &dev_attr_led_enable);
+	if (ret)
+		dev_err(&rpdev->dev, "failed device_create_fiaild(led_enable)\n");
+
+	ret = device_create_file(&rpdev->dev, &dev_attr_led_lighting_effect);
+	if (ret)
+		dev_err(&rpdev->dev, "failed device_create_file(led_lighting_effect)\n");
+
+	/* led initialization is done either by IAFW or init.rc */
+out:
+	return ret;
+}
+
+static void fugu_led_remove(struct rpmsg_channel *rpdev)
+{
+	free_rpmsg_instance(rpdev, &pmic_instance);
+	dev_info(&rpdev->dev, "Removed pmic rpmsg device\n");
+}
+
+static void fugu_led_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id fugu_led_id_table[] = {
+	{ .name	= "rpmsg_fugu_led" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, fugu_led_id_table);
+
+static struct rpmsg_driver fugu_led = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= fugu_led_id_table,
+	.probe		= fugu_led_probe,
+	.callback	= fugu_led_cb,
+	.remove		= fugu_led_remove,
+};
+
+static int __init fugu_led_init(void)
+{
+	return register_rpmsg_driver(&fugu_led);
+}
+
+module_init(fugu_led_init);
+
+static void __exit fugu_led_exit(void)
+{
+	return unregister_rpmsg_driver(&fugu_led);
+}
+module_exit(fugu_led_exit);
+
+MODULE_AUTHOR("Alan Lu<alan_lu@asus.com>");
+MODULE_DESCRIPTION("Fugu Led Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/fugu/intel-mid-fugu.h b/arch/x86/platform/fugu/intel-mid-fugu.h
new file mode 100644
index 0000000..7b90e6d
--- /dev/null
+++ b/arch/x86/platform/fugu/intel-mid-fugu.h
@@ -0,0 +1,26 @@
+/*
+ * intel-mid-fugu.h: ASUSTek fugu specific setup code
+ *
+ * (C) Copyright 2014 ASUSTek Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _X86_INTEL_MID_FUGU_H
+#define _X86_INTEL_MID_FUGU_H
+
+/* Table signatures reserved by the SFI specification */
+#define SFI_SIG_OEM1 "OEM1"
+
+/* OEM1 table */
+struct sfi_table_oem1 {
+	struct sfi_table_header header;
+	u8	ifwi_rc;		/* 0: shipping version,
+					 * 0xff: factory version */
+	u8      hardware_id;            /* hardware revision */
+	u8      project_id;             /* project identification */
+	u8      ram_id;                 /* ram size variants */
+} __packed;
+#endif /* _X86_INTEL_MID_FUGU_H */
diff --git a/arch/x86/platform/fugu/platform_fugu_led.c b/arch/x86/platform/fugu/platform_fugu_led.c
new file mode 100644
index 0000000..69621cb
--- /dev/null
+++ b/arch/x86/platform/fugu/platform_fugu_led.c
@@ -0,0 +1,25 @@
+/*
+ * platform_fugu_led.c: Platform data for fugu LED driver.
+ *
+ * (C) Copyright 2014 ASUSTek Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+
+static int __init fugu_led_init(void)
+{
+	register_rpmsg_service("rpmsg_fugu_led", RPROC_SCU,
+				RP_PMIC_ACCESS);
+	return 0;
+}
+
+postcore_initcall(fugu_led_init);
diff --git a/arch/x86/platform/fugu/platform_fugu_wifi.c b/arch/x86/platform/fugu/platform_fugu_wifi.c
new file mode 100644
index 0000000..6f552db
--- /dev/null
+++ b/arch/x86/platform/fugu/platform_fugu_wifi.c
@@ -0,0 +1,365 @@
+/*
+ * platform_fugu_wifi.c: fugu wifi platform data initilization file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/wlan_plat.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/board_asustek.h>
+#include "../intel-mid/device_libs/pci/platform_sdhci_pci.h"
+#include "platform_fugu_wifi.h"
+
+static int fugu_wifi_get_mac_addr(unsigned char *buf);
+static void *fugu_wifi_get_country_code(char *ccode, u32 flags);
+
+static struct wifi_platform_data fugu_wifi_control = {
+	.get_mac_addr	= fugu_wifi_get_mac_addr,
+	.get_country_code = fugu_wifi_get_country_code,
+};
+
+static struct resource wifi_res[] = {
+	{
+	.name = "wlan_irq",
+	.start = -1,
+	.end = -1,
+	.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING ,
+	},
+};
+
+static struct platform_device wifi_device = {
+	.name = "wlan",
+	.dev = {
+		.platform_data = &fugu_wifi_control,
+		},
+	.num_resources = ARRAY_SIZE(wifi_res),
+	.resource = wifi_res,
+};
+
+static const unsigned int sdhci_quirk = SDHCI_QUIRK2_NON_STD_CIS |
+		SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY;
+
+static void __init wifi_platform_data_init_sfi_fastirq(struct sfi_device_table_entry *pentry,
+						       bool should_register)
+{
+	/* If the GPIO mode was previously called, this code overloads
+	   the IRQ anyway */
+	wifi_res[0].start = wifi_res[0].end = pentry->irq;
+	wifi_res[0].flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH;
+
+	pr_info("wifi_platform_data: IRQ == %d\n", pentry->irq);
+
+	if (should_register && platform_device_register(&wifi_device) < 0)
+		pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called if SFI device WLAN is present */
+void __init wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+				       struct devs_id *dev)
+{
+	/* This is used in the driver to know if it is GPIO/FastIRQ */
+	fugu_wifi_control.use_fast_irq = true;
+
+	if (wifi_res[0].start == -1) {
+		pr_info("Using WiFi platform data (Fast IRQ)\n");
+
+		/* Set vendor specific SDIO quirks */
+		sdhci_pdata_set_quirks(sdhci_quirk);
+		wifi_platform_data_init_sfi_fastirq(pe, true);
+	} else {
+		pr_info("Using WiFi platform data (Fast IRQ, overloading GPIO mode set previously)\n");
+		/* We do not register platform device, as it's already been
+		   done by wifi_platform_data */
+		wifi_platform_data_init_sfi_fastirq(pe, false);
+	}
+
+}
+
+/* GPIO legacy code path */
+static void __init wifi_platform_data_init_sfi_gpio(void)
+{
+	int wifi_irq_gpio = -1;
+
+	/*Get GPIO numbers from the SFI table*/
+	wifi_irq_gpio = get_gpio_by_name(WIFI_SFI_GPIO_IRQ_NAME);
+	if (wifi_irq_gpio < 0) {
+		pr_err("%s: Unable to find " WIFI_SFI_GPIO_IRQ_NAME
+		       " WLAN-interrupt GPIO in the SFI table\n",
+		       __func__);
+		return;
+	}
+
+	wifi_res[0].start = wifi_res[0].end = wifi_irq_gpio;
+	pr_info("wifi_platform_data: GPIO == %d\n", wifi_irq_gpio);
+
+	if (platform_device_register(&wifi_device) < 0)
+		pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called from board.c */
+void __init *wifi_platform_data(void *info)
+{
+	/* When fast IRQ platform data has been called first, don't pursue */
+	if (wifi_res[0].start != -1)
+		return NULL;
+
+	pr_info("Using generic wifi platform data\n");
+
+	/* Set vendor specific SDIO quirks */
+#ifdef CONFIG_MMC_SDHCI_PCI
+	sdhci_pdata_set_quirks(sdhci_quirk);
+#endif
+
+#ifndef CONFIG_ACPI
+	/* We are SFI here, register platform device */
+	wifi_platform_data_init_sfi_gpio();
+#endif
+
+	return &wifi_device;
+}
+
+#define WIFI_MAC_ADDR_FILE	"/factory/wifi/mac.txt"
+#define EMMC_ID			"/proc/emmc0_id_entry"
+
+static int check_mac(char *str)
+{
+	int i;
+
+	if (strlen(str) != 12) {
+		pr_err("%s: bad mac address file len %zu < 12\n",
+				__func__, strlen(str));
+		return -1;
+	}
+	for (i = 0; i < strlen(str); i++) {
+		if (!strchr("1234567890abcdefABCDEF", str[i])) {
+			pr_err("%s: illegal wifi mac\n", __func__);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static void string_to_mac(char *str, unsigned char *buf)
+{
+	char temp[3]="\0";
+	int mac[6];
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		strncpy(temp, str+(i*2), 2);
+		sscanf(temp, "%x", &mac[i]);
+	}
+	pr_info("%s: using wifi mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+		__func__, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+
+	buf[0] = (unsigned char) mac[0];
+	buf[1] = (unsigned char) mac[1];
+	buf[2] = (unsigned char) mac[2];
+	buf[3] = (unsigned char) mac[3];
+	buf[4] = (unsigned char) mac[4];
+	buf[5] = (unsigned char) mac[5];
+}
+
+static int fugu_wifi_get_mac_addr(unsigned char *buf)
+{
+	struct file *fp;
+	char str[32];
+	char default_mac[12] = "00904C";
+
+	pr_debug("%s\n", __func__);
+
+	/* open wifi mac address file */
+	fp = filp_open(WIFI_MAC_ADDR_FILE, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		pr_err("%s: cannot open %s\n", __func__, WIFI_MAC_ADDR_FILE);
+		goto random_mac;
+	}
+
+	/* read wifi mac address file */
+	memset(str, 0, sizeof(str));
+	kernel_read(fp, fp->f_pos, str, 12);
+
+	if (check_mac(str)) {
+		filp_close(fp, NULL);
+		goto random_mac;
+	}
+	string_to_mac(str, buf);
+	filp_close(fp, NULL);
+	return 0;
+
+random_mac:
+	/* open wifi mac address file */
+	fp = filp_open(EMMC_ID, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		pr_err("%s: cannot open %s\n", __func__, EMMC_ID);
+		return -1;
+	}
+
+	/* read wifi mac address file */
+	memset(str, 0, sizeof(str));
+	kernel_read(fp, fp->f_pos, str, 32);
+	strcat(default_mac, str+strlen(str)-6);
+
+	if (check_mac(default_mac)) {
+		filp_close(fp, NULL);
+		return -1;
+	}
+	string_to_mac(default_mac, buf);
+	filp_close(fp, NULL);
+	return 0;
+}
+
+const char *get_nvram_path(void)
+{
+	const char *fugu_sr2_wifi_nv = "/etc/wifi/bcmdhd_sr2.cal";
+	const char *fugu_wifi_nv = "/etc/wifi/bcmdhd.cal";
+	hw_rev revision;
+	struct file *fp;
+
+	revision = asustek_get_hw_rev();
+
+	switch (revision) {
+	case HW_REV_A:
+	case HW_REV_B:
+		fp = filp_open(fugu_sr2_wifi_nv, O_RDONLY, 0);
+		if (IS_ERR(fp))
+			return fugu_wifi_nv;
+
+		filp_close(fp, NULL);
+		return fugu_sr2_wifi_nv;
+	break;
+	default:
+		return fugu_wifi_nv;
+	}
+}
+EXPORT_SYMBOL(get_nvram_path);
+
+#define WLAN_PLAT_NODFS_FLAG	0x01
+#define COUNTRY_BUF_SZ	4
+#define DEFAULT_CCODE	"/factory/country"
+
+struct cntry_locales_custom {
+	char iso_abbrev[COUNTRY_BUF_SZ];
+	char custom_locale[COUNTRY_BUF_SZ];
+	int custom_locale_rev;
+};
+
+/* Customized Locale table (DFS band enabled) */
+static struct cntry_locales_custom translate_custom_dfs_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+	{"",   "XZ", 11},	/* Universal if Country code is unknown or empty */
+	{"US", "CA", 2},
+	{"CA", "CA", 2},
+	{"MX", "CA", 2},
+	{"DK", "GB", 6},
+	{"FI", "GB", 6},
+	{"NO", "GB", 6},
+	{"SE", "GB", 6},
+	{"AT", "GB", 6},
+	{"CH", "GB", 6},
+	{"DE", "GB", 6},
+	{"FR", "GB", 6},
+	{"IT", "GB", 6},
+	{"ES", "GB", 6},
+	{"PT", "GB", 6},
+	{"NL", "GB", 6},
+	{"BE", "GB", 6},
+	{"GB", "GB", 6},
+	{"IE", "GB", 6},
+	{"TW", "TW", 1},
+	{"HK", "SG", 12},
+	{"SG", "SG", 12},
+	{"KR", "KR", 57},
+	{"IN", "IN", 3},
+	{"TH", "TH", 5},
+	{"MY", "MY", 3},
+	{"RU", "RU", 13},
+	{"JP", "JP", 45},
+	{"AU", "AU", 6},
+	{"NZ", "NZ", 4},
+	{"BR", "BR", 4},
+	{"ID", "ID", 1},
+};
+
+/* Customized Locale table (DFS band disabled) */
+static struct cntry_locales_custom translate_custom_nodfs_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+	{"",   "XZ", 40},	/* Universal if Country code is unknown or empty */
+	{"US", "CA", 53},
+	{"CA", "CA", 53},
+	{"MX", "CA", 53},
+	{"DK", "GB", 22},
+	{"FI", "GB", 22},
+	{"NO", "GB", 22},
+	{"SE", "GB", 22},
+	{"AT", "GB", 22},
+	{"CH", "GB", 22},
+	{"DE", "GB", 22},
+	{"FR", "GB", 22},
+	{"IT", "GB", 22},
+	{"ES", "GB", 22},
+	{"PT", "GB", 22},
+	{"NL", "GB", 22},
+	{"BE", "GB", 22},
+	{"GB", "GB", 22},
+	{"IE", "GB", 22},
+	{"TW", "TW", 60},
+	{"HK", "SG", 17},
+	{"SG", "SG", 17},
+	{"KR", "KR", 79},
+	{"IN", "IN", 27},
+	{"TH", "TH", 9},
+	{"MY", "MY", 15},
+	{"RU", "RU", 20},
+	{"JP", "JP", 83},
+	{"AU", "AU", 37},
+	{"NZ", "TH", 9},
+	{"BR", "BR", 18},
+	{"ID", "ID", 1},
+};
+
+static void *fugu_wifi_get_country_code(char *ccode, u32 flags)
+{
+	int size, i;
+	struct file *fp;
+	static struct cntry_locales_custom* wifi_translate_custom_table;
+
+	if (flags & WLAN_PLAT_NODFS_FLAG) {
+		wifi_translate_custom_table = translate_custom_nodfs_table;
+		size = ARRAY_SIZE(translate_custom_nodfs_table);
+	} else {
+		wifi_translate_custom_table = translate_custom_dfs_table;
+		size = ARRAY_SIZE(translate_custom_dfs_table);
+	}
+
+	if ((size == 0) || (ccode == NULL))
+		return NULL;
+
+	if (!strcmp(ccode, "")){
+		fp = filp_open(DEFAULT_CCODE, O_RDONLY, 0);
+		if (!IS_ERR(fp)) {
+			memset(ccode, 0, COUNTRY_BUF_SZ);
+			kernel_read(fp, fp->f_pos, ccode, 2);
+			filp_close(fp, NULL);
+		}
+	}
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(ccode, wifi_translate_custom_table[i].iso_abbrev))
+			return &wifi_translate_custom_table[i];
+	}
+	return &wifi_translate_custom_table[0];
+}
diff --git a/arch/x86/platform/fugu/platform_fugu_wifi.h b/arch/x86/platform/fugu/platform_fugu_wifi.h
new file mode 100644
index 0000000..4813de2
--- /dev/null
+++ b/arch/x86/platform/fugu/platform_fugu_wifi.h
@@ -0,0 +1,22 @@
+/*
+ * platform_fugu_wifi.h: Fugu WiFi platform data header file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_FUGU_WIFI_H_
+#define _PLATFORM_FUGU_WIFI_H_
+
+#define WIFI_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
+#define WIFI_SFI_GPIO_ENABLE_NAME "WLAN-enable"
+
+extern void __init *wifi_platform_data(void *info) __attribute__((weak));
+extern void wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+				       struct devs_id *dev) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
new file mode 100644
index 0000000..f4202ed
--- /dev/null
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -0,0 +1,28 @@
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o
+obj-$(CONFIG_X86_INTEL_MID)	+= intel_mid_vrtc.o
+obj-$(CONFIG_EARLY_PRINTK_INTEL_MID)	+= early_printk_intel_mid.o
+obj-$(CONFIG_INTEL_MID_PSTORE_RAM) += intel_mid_pstore_ram.o
+
+# SFI specific code
+obj-$(CONFIG_SFI) += intel_mid_sfi.o
+
+# platform configuration for board devices
+obj-y += device_libs/
+
+# SoC specific files
+obj-$(CONFIG_X86_INTEL_MID) += mrfl.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_pcihelpers.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_scu.o
+
+# BOARD files
+obj-$(CONFIG_X86_INTEL_MID) += board.o
+
+# PMU driver
+obj-$(CONFIG_ATOM_SOC_POWER) += intel_soc_pmu.o intel_soc_pm_debug.o intel_soc_dump.o
+obj-$(CONFIG_ATOM_SOC_POWER) += intel_soc_mrfld.o
+
+# pci acpi quirk driver
+obj-$(CONFIG_INTEL_SOC_PMC) += intel_soc_pci_acpi_quirk.o
+
+# Debug features driver
+obj-$(CONFIG_INTEL_DEBUG_FEATURE) += intel_soc_debug.o
diff --git a/arch/x86/platform/intel-mid/board.c b/arch/x86/platform/intel-mid/board.c
new file mode 100644
index 0000000..e7d7e9b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/board.c
@@ -0,0 +1,230 @@
+/*
+ * board-blackbay.c: Intel Medfield based board (Blackbay)
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/i2c-gpio.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <linux/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <linux/reboot.h>
+
+/*
+ * IPC devices
+ */
+#include "device_libs/platform_ipc.h"
+#include "device_libs/platform_pmic_gpio.h"
+#include "device_libs/platform_msic.h"
+#include "device_libs/platform_msic_gpio.h"
+#include "device_libs/platform_msic_audio.h"
+#include "device_libs/platform_msic_power_btn.h"
+#include "device_libs/platform_msic_ocd.h"
+#include "device_libs/platform_mrfl_pmic.h"
+#include "device_libs/platform_mrfl_pmic_i2c.h"
+#include "device_libs/platform_mrfl_ocd.h"
+#include "device_libs/platform_soc_thermal.h"
+#include "device_libs/platform_msic_adc.h"
+#include "device_libs/platform_bcove_adc.h"
+#include <asm/platform_mrfld_audio.h>
+#include "device_libs/platform_moor_thermal.h"
+#include "device_libs/platform_scu_log.h"
+
+/*
+ * SPI devices
+ */
+#include "device_libs/platform_max3111.h"
+
+/*
+ * I2C devices
+ */
+#include "device_libs/platform_max7315.h"
+#include "device_libs/platform_tca6416.h"
+#include "device_libs/platform_tc35876x.h"
+#include "device_libs/platform_rmi4.h"
+#include "device_libs/platform_bq24192.h"
+#include "device_libs/platform_bq24261.h"
+#include "device_libs/platform_r69001.h"
+#include "device_libs/platform_pn544.h"
+#include "device_libs/platform_pca9574.h"
+
+/* SW devices */
+#include "device_libs/platform_panel.h"
+
+#include "device_libs/platform_wm8994.h"
+
+/*
+ * SPI devices
+ */
+#include "device_libs/platform_max17042.h"
+
+/* HSI devices */
+
+/* SW devices */
+
+/* WIFI devices */
+#include "device_libs/platform_wifi.h"
+
+/* USB devices */
+#include "device_libs/pci/platform_usb_otg.h"
+
+static void __init *no_platform_data(void *info)
+{
+	return NULL;
+}
+
+struct devs_id __initconst device_ids[] = {
+	/* SD devices */
+	{"bcm43xx_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+	{"bcm43xx_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+	{"iwlwifi_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+	{"WLAN_FAST_IRQ", SFI_DEV_TYPE_SD, 0, &no_platform_data,
+	 &wifi_platform_data_fastirq},
+
+	/* SPI devices */
+	{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data, NULL},
+	{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data, NULL},
+	{"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data,
+					&ipc_device_handler},
+	{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data, NULL},
+	{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data, NULL},
+	{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data, NULL},
+	{"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data, NULL},
+	{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data,
+					&ipc_device_handler},
+	{"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data, NULL},
+	{"r69001-ts-i2c", SFI_DEV_TYPE_I2C, 0, &r69001_platform_data, NULL},
+	{"synaptics_3202", SFI_DEV_TYPE_I2C, 0, &rmi4_platform_data, NULL},
+	{"syn_3400_cgs", SFI_DEV_TYPE_I2C, 0, &rmi4_platform_data, NULL},
+	{"syn_3400_igzo", SFI_DEV_TYPE_I2C, 0, &rmi4_platform_data, NULL},
+	{"synaptics_3402", SFI_DEV_TYPE_I2C, 0, &rmi4_platform_data, NULL},
+
+	/* I2C devices*/
+	{"max17042", SFI_DEV_TYPE_I2C, 1, &max17042_platform_data, NULL},
+	{"max17047", SFI_DEV_TYPE_I2C, 1, &max17042_platform_data, NULL},
+	{"max17050", SFI_DEV_TYPE_I2C, 1, &max17042_platform_data, NULL},
+	{"bq24192", SFI_DEV_TYPE_I2C, 1, &bq24192_platform_data},
+	{"bq24261_charger", SFI_DEV_TYPE_I2C, 1, &bq24261_platform_data, NULL},
+	{"pn544", SFI_DEV_TYPE_I2C, 0, &pn544_platform_data, NULL},
+	{"MNZX8000", SFI_DEV_TYPE_I2C, 0, &no_platform_data, NULL},
+	{"pca953x", SFI_DEV_TYPE_I2C, 0, &nxp_pca9574_platform_data, NULL},
+	{"it8566_hdmi_cec", SFI_DEV_TYPE_I2C, 1, &no_platform_data, NULL},
+	{"it8566_flash_mod", SFI_DEV_TYPE_I2C, 1, &no_platform_data, NULL},
+
+	/* MSIC subdevices */
+	{"msic_adc", SFI_DEV_TYPE_IPC, 1, &msic_adc_platform_data,
+						&ipc_device_handler},
+	{"bcove_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data,
+					&ipc_device_handler},
+	{"scove_power_btn", SFI_DEV_TYPE_IPC, 1,
+					&msic_power_btn_platform_data,
+					&ipc_device_handler},
+	{"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data,
+					&ipc_device_handler},
+	{"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data,
+					&ipc_device_handler},
+	{"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data,
+					&ipc_device_handler},
+	{"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data,
+					&ipc_device_handler},
+	{"bcove_bcu", SFI_DEV_TYPE_IPC, 1, &mrfl_ocd_platform_data,
+					&ipc_device_handler},
+	{"bcove_adc", SFI_DEV_TYPE_IPC, 1, &bcove_adc_platform_data,
+					&ipc_device_handler},
+	{"scove_thrm", SFI_DEV_TYPE_IPC, 1, &moor_thermal_platform_data,
+					&ipc_device_handler},
+	{"scuLog", SFI_DEV_TYPE_IPC, 1, &scu_log_platform_data,
+					&ipc_device_handler},
+
+	/* Panel */
+	{"PANEL_CMI_CMD", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PANEL_JDI_VID", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PANEL_JDI_CMD", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	/*  above 3 items will be removed
+	* after firmware changing
+	*/
+	{"PNC_CMI_7x12", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNV_JDI_7x12", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNC_JDI_7x12", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNC_SHARP_10x19", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNCD_SHARP_10x19", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNV_SHARP_25x16", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNC_SHARP_25x16", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNV_JDI_25x16", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNC_JDI_25x16", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNC_SDC_16x25", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+	{"PNC_SDC_25x16", SFI_DEV_TYPE_MDM, 0, &no_platform_data,
+		&panel_handler},
+
+	/* IPC devices */
+	{"pmic_charger", SFI_DEV_TYPE_IPC, 1, &no_platform_data, NULL},
+	{"pmic_ccsm", SFI_DEV_TYPE_IPC, 1, &mrfl_pmic_ccsm_platform_data,
+						&ipc_device_handler},
+	{"i2c_pmic_adap", SFI_DEV_TYPE_IPC, 1, &mrfl_pmic_i2c_platform_data,
+						&ipc_device_handler},
+
+	/* IPC devices */
+	{"mrfld_lm49453", SFI_DEV_TYPE_IPC, 1, &merfld_audio_platform_data,
+						&ipc_device_handler},
+	{"mrfld_wm8958", SFI_DEV_TYPE_IPC, 1, &merfld_wm8958_audio_platform_data,
+						&ipc_device_handler},
+	{"soc_thrm", SFI_DEV_TYPE_IPC, 1, &no_platform_data,
+						&soc_thrm_device_handler},
+	{"wm8958", SFI_DEV_TYPE_I2C, 0, &wm8994_platform_data, NULL},
+	{"lm49453_codec", SFI_DEV_TYPE_I2C, 1, &no_platform_data, NULL},
+	/* USB */
+	{"ULPICAL_7F", SFI_DEV_TYPE_USB, 0, &no_platform_data,
+		&sfi_handle_usb},
+	{"ULPICAL_7D", SFI_DEV_TYPE_USB, 0, &no_platform_data,
+		&sfi_handle_usb},
+	{"UTMICAL_PEDE3TX0", SFI_DEV_TYPE_USB, 0, &no_platform_data,
+		&sfi_handle_usb},
+	{"UTMICAL_PEDE6TX7", SFI_DEV_TYPE_USB, 0, &no_platform_data,
+		&sfi_handle_usb},
+	{},
+};
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
new file mode 100644
index 0000000..405a95c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -0,0 +1,57 @@
+# IPC Devices
+obj-y += platform_sst_audio.o
+obj-y += platform_mrfl_regulator.o
+obj-y += platform_soc_thermal.o
+obj-$(subst m,y,$(CONFIG_SND_MOOR_PLATFORM)) += platform_mrfld_audio.o
+obj-y += platform_ipc.o
+obj-y += platform_i2c_gpio.o
+obj-y += platform_msic.o
+obj-y += platform_msic_audio.o
+obj-y += platform_msic_gpio.o
+obj-y += platform_msic_ocd.o
+obj-y += platform_tc35876x.o
+obj-y += pci/
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
+obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MRFL_OCD)) += platform_mrfl_ocd.o
+obj-$(subst m,y,$(CONFIG_PMIC_CCSM)) += platform_mrfl_pmic.o
+obj-$(subst m,y,$(CONFIG_I2C_PMIC)) += platform_mrfl_pmic_i2c.o
+obj-$(subst m,y,$(CONFIG_INTEL_MOOR_THERMAL)) += platform_moor_thermal.o
+obj-$(subst m,y,$(CONFIG_INTEL_SCU_FLIS)) += platform_scu_flis.o
+
+# I2C Devices
+obj-$(subst m,y,$(CONFIG_I2C_DESIGNWARE_CORE_FORK)) += platform_dw_i2c.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
+obj-$(subst m,y,$(CONFIG_CHARGER_BQ24192)) += platform_bq24192.o
+obj-$(subst m,y,$(CONFIG_BQ24261_CHARGER)) += platform_bq24261.o
+obj-$(subst m,y,$(CONFIG_SND_SOC_WM8994)) += platform_wm8994.o
+obj-$(subst m,y,$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4_FORK)) += platform_rmi4.o
+obj-$(subst m,y,$(CONFIG_TOUCHSCREEN_R69001_I2C)) += platform_r69001.o
+obj-$(subst m,y,$(CONFIG_NFC_PN544_PLATFORM_DATA)) += platform_pn544.o
+obj-$(subst m,y,$(CONFIG_SENSORS_LSM303_MAG)) += platform_lsm303.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pca9574.o
+
+# SPI Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MRST_MAX3110)) += platform_max3111.o
+
+# MISC Devices
+obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+# ADC
+obj-$(subst m,y,$(CONFIG_MSIC_GPADC))	+= platform_msic_adc.o
+obj-$(subst m,y,$(CONFIG_IIO_BASINCOVE_GPADC))	+= platform_bcove_adc.o
+# UART Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MFD_HSU_EXT)) += platform_hsu.o
+#I2C Devices
+obj-$(subst m,y,$(CONFIG_BATTERY_MAX17042)) += platform_max17042.o
+
+# Panel Control Device
+obj-$(subst m,y,$(CONFIG_DRM_MRFLD)) += platform_panel.o
+# WIFI devices
+obj-$(subst m,y,$(CONFIG_WIFI_PLATFORM_DATA)) += platform_wifi.o
+# BT devices
+obj-$(subst m,y,$(CONFIG_BCM_BT_LPM)) += platform_btlpm.o
+# SCU log
+obj-$(subst m,y,$(CONFIG_SCU_LOGGING)) += platform_scu_log.o
+# Display Control Device
+obj-y += platform_display.o
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/Makefile b/arch/x86/platform/intel-mid/device_libs/pci/Makefile
new file mode 100644
index 0000000..e97a167
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/Makefile
@@ -0,0 +1,9 @@
+# MMC Sdhci pci host controller platform data
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))				+= platform_sdhci_pci.o
+# USB OTG controller platform data
+obj-$(subst m,y,$(CONFIG_USB_DWC3_OTG))        				+= platform_usb_otg.o
+obj-$(subst m,y,$(CONFIG_SND_INTEL_SST))                                += platform_sst_pci.o
+# USB EHCI/SPH controller platform data
+obj-$(subst m,y,$(CONFIG_USB_EHCI_HCD_SPH))				+= platform_ehci_sph.o
+# Vibra driver platform data
+obj-$(subst m,y,$(CONFIG_INPUT_INTEL_MID_VIBRA))                        += platform_mid_vibra.o
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.c b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.c
new file mode 100644
index 0000000..1992c26
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.c
@@ -0,0 +1,415 @@
+/*
+ * platform_mmc_sdhci_pci.c: mmc sdhci pci platform data initilization file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/intel-mid.h>
+#include <linux/mmc/sdhci-pci-data.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/hardirq.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+#include "platform_sdhci_pci.h"
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static int panic_mode_emmc0_power_up(void *data)
+{
+	int ret;
+	bool atomic_context;
+	/*
+	 * Since pmu_set_emmc_to_d0i0_atomic function can
+	 * only be used in atomic context, before call this
+	 * function, do a check first and make sure this function
+	 * is used in atomic context.
+	 */
+	atomic_context = (!preemptible() || in_atomic_preempt_off());
+
+	if (!atomic_context) {
+		pr_err("%s: not in atomic context!\n", __func__);
+		return -EPERM;
+	}
+
+	ret = pmu_set_emmc_to_d0i0_atomic();
+	if (ret) {
+		pr_err("%s: power up host failed with err %d\n",
+				__func__, ret);
+	}
+
+	return ret;
+}
+#else
+static int panic_mode_emmc0_power_up(void *data)
+{
+	return 0;
+}
+#endif
+
+static unsigned int sdhci_pdata_quirks =
+	SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY;
+
+int sdhci_pdata_set_quirks(const unsigned int quirks)
+{
+	sdhci_pdata_quirks = quirks;
+	return 0;
+}
+
+static int mrfl_sdio_setup(struct sdhci_pci_data *data);
+static void mrfl_sdio_cleanup(struct sdhci_pci_data *data);
+
+static void (*sdhci_embedded_control)(void *dev_id, void (*virtual_cd)
+					(void *dev_id, int card_present));
+
+/*****************************************************************************\
+ *                                                                           *
+ *  Regulator declaration for WLAN SDIO card                                 *
+ *                                                                           *
+\*****************************************************************************/
+
+#define DELAY_ONOFF 250
+
+static struct regulator_consumer_supply wlan_vmmc_supply = {
+	.supply		= "vmmc",
+};
+
+static struct regulator_init_data wlan_vmmc_data = {
+	.constraints = {
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies = &wlan_vmmc_supply,
+};
+
+static struct fixed_voltage_config vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 1800000,
+	.gpio			= -EINVAL,
+	.startup_delay		= 1000 * DELAY_ONOFF,
+	.enable_high		= 1,
+	.enabled_at_boot	= 0,
+	.init_data		= &wlan_vmmc_data,
+};
+
+static void vwlan_device_release(struct device *dev) {}
+
+static struct platform_device vwlan_device = {
+	.name		= "reg-fixed-voltage",
+	.id		= PLATFORM_DEVID_AUTO,
+	.dev = {
+		.platform_data	= &vwlan,
+		.release = vwlan_device_release,
+	},
+};
+
+#define TNG_EMMC_0_FLIS_ADDR		0xff0c0900
+#define TNG_EMMC_FLIS_SLEW		0x00000400
+#define TNG_EMMC_0_CLK_PULLDOWN		0x00000200
+
+static int mrfl_flis_slew_change(void __iomem *flis_addr, int slew)
+{
+	unsigned int reg;
+	int i;
+
+	/*
+	 * Change TNG gpio FLIS settings for all eMMC0
+	 * CLK/CMD/DAT pins.
+	 * That is, including emmc_0_clk, emmc_0_cmd,
+	 * emmc_0_d_0, emmc_0_d_1, emmc_0_d_2, emmc_0_d_3,
+	 * emmc_0_d_4, emmc_0_d_5, emmc_0_d_6, emmc_0_d_7
+	 */
+	for (i = 0; i < 10; i++) {
+		reg = readl(flis_addr + (i * 4));
+		if (slew)
+			reg |= TNG_EMMC_FLIS_SLEW; /* SLEW B */
+		else
+			reg &= ~TNG_EMMC_FLIS_SLEW; /* SLEW A */
+		writel(reg, flis_addr + (i * 4));
+	}
+
+	/* Disable PullDown for emmc_0_clk */
+	reg = readl(flis_addr);
+	reg &= ~TNG_EMMC_0_CLK_PULLDOWN;
+	writel(reg, flis_addr);
+
+	return 0;
+}
+
+static int mrfl_flis_dump(void __iomem *addr)
+{
+	int i, ret = 0;
+	unsigned int reg;
+
+	if (addr) {
+		/*
+		 * Dump TNG gpio FLIS settings for all eMMC0
+		 * CLK/CMD/DAT pins.
+		 * That is, including emmc_0_clk, emmc_0_cmd,
+		 * emmc_0_d_0, emmc_0_d_1, emmc_0_d_2, emmc_0_d_3,
+		 * emmc_0_d_4, emmc_0_d_5, emmc_0_d_6, emmc_0_d_7
+		 */
+		for (i = 0; i < 10; i++) {
+			reg = readl(addr + (i * 4));
+			pr_err("emmc0 FLIS reg[%d] dump: 0x%08x\n", i, reg);
+		}
+	}
+
+	return ret;
+}
+
+/* Board specific setup related to eMMC goes here */
+static int mrfl_emmc_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+	int ret = 0;
+
+	data->flis_addr = ioremap_nocache(TNG_EMMC_0_FLIS_ADDR, 64);
+	if (!data->flis_addr) {
+		pr_err("emmc0 FLIS addr ioremap failed!\n");
+		ret = -ENOMEM;
+	} else {
+		pr_info("emmc0 mapped FLIS addr: %p\n", data->flis_addr);
+		if (pdev->revision == 0x01) /* TNB B0 stepping */
+			/* HS200 FLIS slew setting */
+			ret = mrfl_flis_slew_change(data->flis_addr, 1);
+	}
+
+	return ret;
+}
+
+/* Board specific cleanup related to eMMC goes here */
+static void mrfl_emmc_cleanup(struct sdhci_pci_data *data)
+{
+	if (data->flis_addr)
+		iounmap(data->flis_addr);
+}
+
+/* Board specific setup related to SD goes here */
+static int mrfl_sd_setup(struct sdhci_pci_data *data)
+{
+	u8 vldocnt = 0;
+	int err;
+
+	/*
+	 * Change necessary GPIO pin mode for SD card working.
+	 * This is something should be done in IA firmware.
+	 * But, anyway, just do it here in case IA firmware
+	 * forget to do so.
+	 */
+	lnw_gpio_set_alt(MRFLD_GPIO_SDIO_0_CD, 0);
+
+	err = intel_scu_ipc_ioread8(MRFLD_PMIC_VLDOCNT, &vldocnt);
+	if (err) {
+		pr_err("PMIC vldocnt IPC read error: %d\n", err);
+		return err;
+	}
+
+	vldocnt |= MRFLD_PMIC_VLDOCNT_VSWITCH_BIT;
+	err = intel_scu_ipc_iowrite8(MRFLD_PMIC_VLDOCNT, vldocnt);
+	if (err) {
+		pr_err("PMIC vldocnt IPC write error: %d\n", err);
+		return err;
+	}
+	msleep(20);
+
+	return 0;
+}
+
+/* Board specific cleanup related to SD goes here */
+static void mrfl_sd_cleanup(struct sdhci_pci_data *data)
+{
+	u8 vldocnt = 0;
+	int err;
+
+	err = intel_scu_ipc_ioread8(MRFLD_PMIC_VLDOCNT, &vldocnt);
+	if (err) {
+		pr_err("PMIC vldocnt IPC read error: %d\n", err);
+		return;
+	}
+
+	vldocnt &= MRFLD_PMIC_VLDOCNT_PW_OFF;
+	err = intel_scu_ipc_iowrite8(MRFLD_PMIC_VLDOCNT, vldocnt);
+	if (err)
+		pr_err("PMIC vldocnt IPC write error: %d\n", err);
+
+	return;
+}
+
+/* Board specific setup related to SDIO goes here */
+static int mrfl_sdio_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+	/* Control card power through a regulator */
+	wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+	vwlan.gpio = get_gpio_by_name("WLAN-enable");
+	if (vwlan.gpio < 0)
+		pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+	       __func__);
+	pr_info("vwlan gpio %d\n", vwlan.gpio);
+	/* add a regulator to control wlan enable gpio */
+	if (platform_device_register(&vwlan_device))
+		pr_err("regulator register failed\n");
+	else
+		sdhci_pci_request_regulators();
+
+	return 0;
+}
+
+/* Board specific cleanup related to SDIO goes here */
+static void mrfl_sdio_cleanup(struct sdhci_pci_data *data)
+{
+}
+
+/* MRFL platform data */
+static struct sdhci_pci_data mrfl_sdhci_pci_data[] = {
+	[EMMC0_INDEX] = {
+			.pdev = NULL,
+			.slotno = EMMC0_INDEX,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_emmc_setup,
+			.cleanup = mrfl_emmc_cleanup,
+			.power_up = panic_mode_emmc0_power_up,
+			.flis_dump = mrfl_flis_dump,
+	},
+	[SD_INDEX] = {
+			.pdev = NULL,
+			.slotno = SD_INDEX,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = 77,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_sd_setup,
+			.cleanup = mrfl_sd_cleanup,
+	},
+	[SDIO_INDEX] = {
+			.pdev = NULL,
+			.slotno = SDIO_INDEX,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_sdio_setup,
+			.cleanup = mrfl_sdio_cleanup,
+	},
+};
+
+/* Moorefield platform data */
+static struct sdhci_pci_data moor_sdhci_pci_data[] = {
+	[EMMC0_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = 0,
+			.cleanup = 0,
+			.power_up = panic_mode_emmc0_power_up,
+	},
+	[SD_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = 77,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_sd_setup,
+			.cleanup = mrfl_sd_cleanup,
+			.power_up = 0,
+	},
+	[SDIO_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_sdio_setup,
+			.cleanup = mrfl_sdio_cleanup,
+			.power_up = 0,
+	},
+};
+
+static struct sdhci_pci_data *get_sdhci_platform_data(struct pci_dev *pdev)
+{
+	struct sdhci_pci_data *pdata = NULL;
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+		switch (PCI_FUNC(pdev->devfn)) {
+		case 0:
+			pdata = &mrfl_sdhci_pci_data[EMMC0_INDEX];
+			break;
+		case 1:
+			pdata = &mrfl_sdhci_pci_data[EMMC1_INDEX];
+			break;
+		case 2:
+			pdata = &mrfl_sdhci_pci_data[SD_INDEX];
+			break;
+		case 3:
+			pdata = &mrfl_sdhci_pci_data[SDIO_INDEX];
+			break;
+		default:
+			pr_err("%s func %s: Invalid PCI Dev func no. (%d)\n",
+				__FILE__, __func__, PCI_FUNC(pdev->devfn));
+			break;
+		}
+		break;
+	case PCI_DEVICE_ID_INTEL_MOOR_EMMC:
+		pdata = &moor_sdhci_pci_data[EMMC0_INDEX];
+		break;
+	case PCI_DEVICE_ID_INTEL_MOOR_SD:
+		pdata = &moor_sdhci_pci_data[SD_INDEX];
+		break;
+	case PCI_DEVICE_ID_INTEL_MOOR_SDIO:
+		pdata = &moor_sdhci_pci_data[SDIO_INDEX];
+		pdata->quirks = sdhci_pdata_quirks;
+		break;
+	default:
+		break;
+	}
+	return pdata;
+}
+
+int sdhci_pdata_set_embedded_control(void (*fnp)
+			(void *dev_id, void (*virtual_cd)
+			(void *dev_id, int card_present)))
+{
+	WARN_ON(sdhci_embedded_control);
+	sdhci_embedded_control = fnp;
+	return 0;
+}
+
+struct sdhci_pci_data *mmc_sdhci_pci_get_data(struct pci_dev *pci_dev, int slotno)
+{
+	return get_sdhci_platform_data(pci_dev);
+}
+
+static int __init init_sdhci_get_data(void)
+{
+	sdhci_pci_get_data = mmc_sdhci_pci_get_data;
+
+	return 0;
+}
+
+arch_initcall(init_sdhci_get_data);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.h b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.h
new file mode 100644
index 0000000..fec04ba
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.h
@@ -0,0 +1,31 @@
+/*
+ * platform_mmc_sdhci_pci.h: mmc sdhci pci platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MMC_SDHCI_PCI_H_
+#define _PLATFORM_MMC_SDHCI_PCI_H_
+
+#define EMMC0_INDEX	0
+#define EMMC1_INDEX	1
+#define SD_INDEX	2
+#define SDIO_INDEX	3
+
+#define MRFLD_GPIO_SDIO_0_CD		77
+
+#define MRFLD_PMIC_VLDOCNT		0xaf
+#define MRFLD_PMIC_VLDOCNT_VSWITCH_BIT	0x02
+#define MRFLD_PMIC_VLDOCNT_PW_OFF	0xfd
+
+int sdhci_pdata_set_quirks(const unsigned int quirks);
+int sdhci_pdata_set_embedded_control(void (*fnp)
+			(void *dev_id, void (*virtual_cd)
+			(void *dev_id, int card_present)));
+#endif
+
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_sst_pci.c b/arch/x86/platform/intel-mid/device_libs/pci/platform_sst_pci.c
new file mode 100644
index 0000000..fe106bc
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_sst_pci.c
@@ -0,0 +1,139 @@
+/*
+ * platform_sst_pci.c: SST platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author:  Dharageswari R <dharageswari.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/intel_mid_dma.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst.h>
+
+#define CTP_SSP_BASE 0xffa23000
+#define CTP_DMA_BASE 0xffaf8000
+#define MRFLD_SSP_BASE 0xff2a0000
+#define MRFLD_DMA_BASE 0xff298000
+#define CTP_MAX_CONFIG_SIZE 500
+
+#define SST_CTP_IRAM_START	0
+#define SST_CTP_IRAM_END	0x80000
+#define SST_CTP_DRAM_START	0x400000
+#define SST_CTP_DRAM_END	0x480000
+#define SSP_SIZE 0x1000
+#define DMA_SIZE_CTP 0x1000
+#define DMA_SIZE_MRFLD 0x4000
+#define SST_CHECKPOINT_OFFSET 0x1C00
+#define SST_CHECKPOINT_OFFSET_MRFLD 0x0C00
+#define CHECKPOINT_DUMP_SZ 256
+
+#define SST_V1_MAILBOX_RECV	0x800
+#define SST_V2_MAILBOX_RECV	0x400
+
+#define MRFLD_FW_LSP_DDR_BASE 0xC5E00000
+#define MRFLD_FW_MOD_END (MRFLD_FW_LSP_DDR_BASE + 0x1FFFFF)
+#define MRFLD_FW_MOD_TABLE_OFFSET 0x80000
+#define MRFLD_FW_MOD_TABLE_SIZE 0x100
+
+struct sst_platform_info sst_data;
+
+static struct sst_ssp_info ssp_inf_mrfld = {
+	.base_add = MRFLD_SSP_BASE,
+	.gpio_in_use = false,
+};
+
+static struct sst_platform_config_data sst_mrfld_pdata = {
+	.sst_dma_base[0] = MRFLD_DMA_BASE,
+	.sst_dma_base[1] = 0x0,
+};
+
+static const struct sst_info mrfld_sst_info = {
+	.iram_start = 0,
+	.iram_end = 0,
+	.iram_use = false,
+	.dram_start = 0,
+	.dram_end = 0,
+	.dram_use = false,
+	.imr_start = 0,
+	.imr_end = 0,
+	.imr_use = false,
+	.mailbox_start = 0,
+	.use_elf = true,
+	.lpe_viewpt_rqd = false,
+	.max_streams = MAX_NUM_STREAMS_MRFLD,
+	.dma_max_len = SST_MAX_DMA_LEN_MRFLD,
+	.num_probes = 16,
+};
+
+static struct sst_platform_debugfs_data mrfld_debugfs_data = {
+	.ssp_reg_size = SSP_SIZE,
+	.dma_reg_size = DMA_SIZE_MRFLD,
+	.num_ssp = 3,
+	.num_dma = 2,
+	.checkpoint_offset = SST_CHECKPOINT_OFFSET_MRFLD,
+	.checkpoint_size = CHECKPOINT_DUMP_SZ,
+};
+
+static const struct sst_ipc_info mrfld_ipc_info = {
+	.use_32bit_ops = false,
+	.ipc_offset = 0,
+	.mbox_recv_off = SST_V2_MAILBOX_RECV,
+};
+
+static const struct sst_lib_dnld_info  mrfld_lib_dnld_info = {
+	.mod_base           = MRFLD_FW_LSP_DDR_BASE,
+	.mod_end            = MRFLD_FW_MOD_END,
+	.mod_table_offset   = MRFLD_FW_MOD_TABLE_OFFSET,
+	.mod_table_size     = MRFLD_FW_MOD_TABLE_SIZE,
+	.mod_ddr_dnld       = true,
+};
+
+static void set_mrfld_sst_config(struct sst_platform_info *sst_info)
+{
+	sst_info->ssp_data = &ssp_inf_mrfld;
+	sst_info->pdata = &sst_mrfld_pdata;
+	sst_info->bdata = NULL;
+	sst_info->probe_data = &mrfld_sst_info;
+	sst_info->ipc_info = &mrfld_ipc_info;
+	sst_info->debugfs_data = &mrfld_debugfs_data;
+	sst_info->lib_info = &mrfld_lib_dnld_info;
+	/* By default set recovery to true for all mrfld based devices */
+	sst_info->enable_recovery = 1;
+
+	return ;
+
+}
+
+static struct sst_platform_info *get_sst_platform_data(struct pci_dev *pdev)
+{
+	struct sst_platform_info *sst_pinfo = NULL;
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_SST_MRFLD:
+	case PCI_DEVICE_ID_INTEL_SST_MOOR:
+		set_mrfld_sst_config(&sst_data);
+		sst_pinfo = &sst_data;
+		break;
+	default:
+		return NULL;
+	}
+	return sst_pinfo;
+}
+
+static void sst_pci_early_quirks(struct pci_dev *pci_dev)
+{
+	pci_dev->dev.platform_data = get_sst_platform_data(pci_dev);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SST_MRFLD,
+							sst_pci_early_quirks);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SST_MOOR,
+							sst_pci_early_quirks);
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.c b/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.c
new file mode 100644
index 0000000..96832a5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.c
@@ -0,0 +1,126 @@
+/*
+ * platform_otg_pci.c: USB OTG platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/sfi.h>
+#include <linux/pci.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_USB_DWC3_OTG
+#include <linux/usb/dwc3-intel-mid.h>
+static struct intel_dwc_otg_pdata dwc_otg_pdata;
+
+struct {
+	u8 name[16];
+	u32 val;
+} usb2_el_cal[] = {
+	{"ULPICAL_7D", 0x7F},
+	{"ULPICAL_7F", 0x7D},
+	{"UTMICAL_PEDE3TX0", 0x51801},
+	{"UTMICAL_PEDE6TX7", 0x53f01},
+};
+#define USB_ULPI_SFI_PREFIX "ULPI"
+#define USB_UTMI_SFI_PREFIX "UTMI"
+
+void sfi_handle_usb(struct sfi_device_table_entry *pentry, struct devs_id *dev)
+{
+	int i;
+
+	if (!dev || !dev->name) {
+		pr_info("USB SFI entry is NULL!\n");
+		return;
+	}
+	for (i = 0; i < ARRAY_SIZE(usb2_el_cal); i++) {
+		if (!strncmp(dev->name, usb2_el_cal[i].name, strlen(dev->name))) {
+			if (!strncmp(dev->name, USB_ULPI_SFI_PREFIX, strlen(USB_ULPI_SFI_PREFIX)))
+				dwc_otg_pdata.ulpi_eye_calibration = usb2_el_cal[i].val;
+			else if (!strncmp(dev->name, USB_UTMI_SFI_PREFIX, strlen(USB_UTMI_SFI_PREFIX)))
+				dwc_otg_pdata.utmi_eye_calibration = usb2_el_cal[i].val;
+			else
+				pr_info("%s:is Invalid USB SFI Entry Name!\n", dev->name);
+
+			break;
+		}
+	}
+}
+
+static bool dwc_otg_get_usbspecoverride(u32 addr)
+{
+	void __iomem *usb_comp_iomap;
+	bool usb_spec_override;
+
+	/* Read MISCFLAGS byte */
+	usb_comp_iomap = ioremap_nocache(addr, 4);
+	/* MISCFLAGS.BIT[6] indicates USB spec override */
+	usb_spec_override = ioread8(usb_comp_iomap) & SMIP_VIOLATE_BC_MASK;
+	iounmap(usb_comp_iomap);
+
+	return usb_spec_override;
+}
+
+/* Read SCCB_USB_CFG.bit14 to get the current phy select setting */
+static enum usb_phy_intf get_usb2_phy_type(void)
+{
+	void __iomem *addr;
+	u32 val;
+
+	addr = ioremap_nocache(SCCB_USB_CFG, 4);
+	if (!addr)
+		return USB2_PHY_ULPI;
+
+	val = readl(addr) & SCCB_USB_CFG_SELECT_ULPI;
+	iounmap(addr);
+
+	if (val)
+		return USB2_PHY_ULPI;
+	else
+		return USB2_PHY_UTMI;
+}
+
+static struct intel_dwc_otg_pdata *get_otg_platform_data(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG:
+		dwc_otg_pdata.pmic_type = SHADY_COVE;
+		dwc_otg_pdata.charger_detect_enable = 0;
+		dwc_otg_pdata.usb2_phy_type = get_usb2_phy_type();
+		 dwc_otg_pdata.charging_compliance =
+			dwc_otg_get_usbspecoverride(MOFD_SMIP_VIOLATE_BC_ADDR);
+
+		if (dwc_otg_pdata.usb2_phy_type == USB2_PHY_ULPI) {
+			dwc_otg_pdata.charger_detect_enable = 1;
+			dwc_otg_pdata.using_vusbphy = 0;
+		} else {
+			dwc_otg_pdata.using_vusbphy = 1;
+			dwc_otg_pdata.utmi_fs_det_wa = 1;
+			dwc_otg_pdata.utmi_eye_calibration = 0x51801;
+		}
+		return &dwc_otg_pdata;
+	default:
+		break;
+	}
+
+	return NULL;
+}
+#endif
+
+static void otg_pci_early_quirks(struct pci_dev *pci_dev)
+{
+	pci_dev->dev.platform_data = get_otg_platform_data(pci_dev);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG,
+			otg_pci_early_quirks);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.h b/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.h
new file mode 100644
index 0000000..765d014
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.h
@@ -0,0 +1,17 @@
+/*
+ * platform_usb_otg.h: USB OTG platform data initialization head file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_USB_OTG_H_
+#define _PLATFORM_USB_OTG_H_
+
+extern void sfi_handle_usb(struct sfi_device_table_entry *pentry, struct devs_id *dev);
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.c b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.c
new file mode 100644
index 0000000..23281bb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.c
@@ -0,0 +1,174 @@
+/*
+ * platform_bcove_adc.c: Platform data for Merrifield Basincove GPADC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/types.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_basincove_gpadc.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "platform_bcove_adc.h"
+
+/* SRAM address where the GPADC interrupt register is cached */
+#define GPADC_SRAM_INTR_ADDR	0xfffff615
+
+static struct gpadc_regmap_t shadycove_gpadc_regmaps[SCOVE_GPADC_CH_NUM] = {
+	{"VBAT",        5, 0xE9, 0xEA, },
+	{"BATID",       4, 0xEC, 0xED, },
+	{"PMICTEMP",    3, 0xD5, 0xD6, },
+	{"BATTEMP0",    2, 0xD1, 0xD2, },
+	{"BATTEMP1",    2, 0xD3, 0xD4, },
+	{"SYSTEMP0",    3, 0xCB, 0xCC, },
+	{"SYSTEMP1",    3, 0xCD, 0xCE, },
+	{"SYSTEMP2",    3, 0xCF, 0xD0, },
+	{"USBID",       1, 0xEE, 0xEF, },
+	{"PEAK",        7, 0xF7, 0xF8, },
+	{"AGND",	6, 0xF0, 0xF1, },
+	{"VREF",	6, 0xF0, 0xF1, },
+};
+
+static struct gpadc_regs_t shadycove_gpadc_regs = {
+	.gpadcreq		= 0xDC,
+	.gpadcreq_irqen		= 0,
+	.gpadcreq_busy		= (1 << 0),
+	.mirqlvl1		= 0x0C,
+	.mirqlvl1_adc		= (1 << 4),
+	.adc1cntl		= 0xEB,
+	.adcirq			= 0x06,
+	.madcirq		= 0x11,
+};
+
+#define MSIC_ADC_MAP(_adc_channel_label,			\
+		     _consumer_dev_name,                        \
+		     _consumer_channel)                         \
+	{                                                       \
+		.adc_channel_label = _adc_channel_label,        \
+		.consumer_dev_name = _consumer_dev_name,        \
+		.consumer_channel = _consumer_channel,          \
+	}
+
+struct iio_map basincove_iio_maps[] = {
+	MSIC_ADC_MAP("CH0", "VIBAT", "VBAT"),
+	MSIC_ADC_MAP("CH1", "BATID", "BATID"),
+	MSIC_ADC_MAP("CH2", "VIBAT", "IBAT"),
+	MSIC_ADC_MAP("CH3", "PMICTEMP", "PMICTEMP"),
+	MSIC_ADC_MAP("CH4", "BATTEMP", "BATTEMP0"),
+	MSIC_ADC_MAP("CH5", "BATTEMP", "BATTEMP1"),
+	MSIC_ADC_MAP("CH6", "SYSTEMP", "SYSTEMP0"),
+	MSIC_ADC_MAP("CH7", "SYSTEMP", "SYSTEMP1"),
+	MSIC_ADC_MAP("CH8", "SYSTEMP", "SYSTEMP2"),
+	MSIC_ADC_MAP("CH6", "bcove_thrm", "SYSTEMP0"),
+	MSIC_ADC_MAP("CH7", "bcove_thrm", "SYSTEMP1"),
+	MSIC_ADC_MAP("CH8", "bcove_thrm", "SYSTEMP2"),
+	MSIC_ADC_MAP("CH3", "bcove_thrm", "PMICTEMP"),
+	{ },
+};
+
+struct iio_map shadycove_iio_maps[] = {
+	MSIC_ADC_MAP("CH0", "VIBAT", "VBAT"),
+	MSIC_ADC_MAP("CH1", "BATID", "BATID"),
+	MSIC_ADC_MAP("CH2", "PMICTEMP", "PMICTEMP"),
+	MSIC_ADC_MAP("CH3", "BATTEMP", "BATTEMP0"),
+	MSIC_ADC_MAP("CH4", "BATTEMP", "BATTEMP1"),
+	MSIC_ADC_MAP("CH5", "SYSTEMP", "SYSTEMP0"),
+	MSIC_ADC_MAP("CH6", "SYSTEMP", "SYSTEMP1"),
+	MSIC_ADC_MAP("CH7", "SYSTEMP", "SYSTEMP2"),
+	MSIC_ADC_MAP("CH8", "USBID", "USBID"),
+	MSIC_ADC_MAP("CH9", "PEAK", "PEAK"),
+	MSIC_ADC_MAP("CH10", "GPMEAS", "AGND"),
+	MSIC_ADC_MAP("CH11", "GPMEAS", "VREF"),
+	MSIC_ADC_MAP("CH5", "scove_thrm", "SYSTEMP0"),
+	MSIC_ADC_MAP("CH6", "scove_thrm", "SYSTEMP1"),
+	MSIC_ADC_MAP("CH7", "scove_thrm", "SYSTEMP2"),
+	MSIC_ADC_MAP("CH2", "scove_thrm", "PMICTEMP"),
+	{ },
+};
+
+#define MSIC_ADC_CHANNEL(_type, _channel, _datasheet_name) \
+	{                               \
+		.indexed = 1,           \
+		.type = _type,          \
+		.channel = _channel,    \
+		.datasheet_name = _datasheet_name,      \
+	}
+
+static const struct iio_chan_spec const basincove_adc_channels[] = {
+	MSIC_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0"),
+	MSIC_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1"),
+	MSIC_ADC_CHANNEL(IIO_CURRENT, 2, "CH2"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 3, "CH3"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 4, "CH4"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 5, "CH5"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 6, "CH6"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 7, "CH7"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 8, "CH8"),
+};
+
+static const struct iio_chan_spec const shadycove_adc_channels[] = {
+	MSIC_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0"),
+	MSIC_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 2, "CH2"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 3, "CH3"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 4, "CH4"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 5, "CH5"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 6, "CH6"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 7, "CH7"),
+	MSIC_ADC_CHANNEL(IIO_RESISTANCE, 8, "CH8"),
+	MSIC_ADC_CHANNEL(IIO_VOLTAGE, 9, "CH9"),
+	MSIC_ADC_CHANNEL(IIO_VOLTAGE, 10, "CH10"),
+	MSIC_ADC_CHANNEL(IIO_VOLTAGE, 11, "CH11"),
+};
+
+static struct intel_basincove_gpadc_platform_data bcove_adc_pdata;
+
+void __init *bcove_adc_platform_data(void *info)
+{
+	struct platform_device *pdev = NULL;
+	struct sfi_device_table_entry *entry = info;
+	int ret;
+
+	pdev = platform_device_alloc(BCOVE_ADC_DEV_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+					BCOVE_ADC_DEV_NAME);
+		goto out;
+	}
+
+	bcove_adc_pdata.channel_num = SCOVE_GPADC_CH_NUM;
+	bcove_adc_pdata.intr = GPADC_SRAM_INTR_ADDR;
+	bcove_adc_pdata.intr_mask = MUSBID | MPEAK | MBATTEMP
+		| MSYSTEMP | MBATT | MVIBATT | MGPMEAS | MCCTICK;
+	bcove_adc_pdata.gpadc_iio_maps = shadycove_iio_maps;
+	bcove_adc_pdata.gpadc_regmaps = shadycove_gpadc_regmaps;
+	bcove_adc_pdata.gpadc_regs = &shadycove_gpadc_regs;
+	bcove_adc_pdata.gpadc_channels = shadycove_adc_channels;
+
+	pdev->dev.platform_data = &bcove_adc_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add bcove adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+
+	install_irq_resource(pdev, entry->irq);
+
+	register_rpmsg_service("rpmsg_bcove_adc", RPROC_SCU,
+				RP_BCOVE_ADC);
+out:
+	return &bcove_adc_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.h b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.h
new file mode 100644
index 0000000..044adc7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.h
@@ -0,0 +1,17 @@
+/*
+ * platform_bcove_adc.h: Head File for Merrifield Basincove GPADC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_BCOVE_ADC_H_
+#define _PLATFORM_BCOVE_ADC_H_
+
+#define BCOVE_ADC_DEV_NAME	"bcove_adc"
+
+extern void __init *bcove_adc_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bq24192.h b/arch/x86/platform/intel-mid/device_libs/platform_bq24192.h
new file mode 100644
index 0000000..6b9c15f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bq24192.h
@@ -0,0 +1,53 @@
+/*
+ * platform_bq24192.h: bq24192 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_BQ24192_H_
+#define _PLATFORM_BQ24192_H_
+
+#define CHGR_INT_N	93
+#define BQ24192_CHRG_OTG_GPIO	36
+
+#define BQ24192_CHRG_CUR_NOLIMIT	1500 /* in mA */
+#define BQ24192_CHRG_CUR_HIGH		900
+#define BQ24192_CHRG_CUR_MEDIUM		500
+#define BQ24192_CHRG_CUR_LOW		100
+
+/* ADC Channel Numbers */
+#define BATT_NUM_GPADC_SENSORS	1
+#define GPADC_BPTHERM_CHNUM	0x9
+#define GPADC_BPTHERM_SAMPLE_COUNT	1
+
+#define BATT_VMIN_THRESHOLD_DEF	3400	/* 3400mV */
+#define BATT_TEMP_MAX_DEF	60	/* 60 degrees */
+#define BATT_TEMP_MIN_DEF	0
+#define BATT_CRIT_CUTOFF_VOLT_DEF	3600	/* 3600 mV */
+
+#define BPTHERM_CURVE_MAX_SAMPLES	23
+#define BPTHERM_CURVE_MAX_VALUES	4
+
+/*CLT battery temperature  attributes*/
+#define BPTHERM_ADC_MIN	107
+#define BPTHERM_ADC_MAX	977
+
+/* SMIP related definitions */
+/* sram base address for smip accessing*/
+#define SMIP_SRAM_OFFSET_ADDR	0x44d
+#define SMIP_SRAM_BATT_PROP_OFFSET_ADDR	0x460
+#define TEMP_MON_RANGES	4
+
+/* Signature comparision of SRAM data for supportted Battery Char's */
+#define SBCT_REV	0x16
+#define RSYS_MOHMS	0xAA
+/* Master Charge control register */
+#define MSIC_CHRCRTL	0x188
+#define MSIC_CHRGENBL	0x40
+extern void *bq24192_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bq24261.c b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.c
new file mode 100644
index 0000000..a55b720
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.c
@@ -0,0 +1,80 @@
+/*
+ * platform_mrfl_bq24261.c: Platform data for bq24261 charger driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/power_supply.h>
+#include <asm/pmic_pdata.h>
+#include <linux/power/bq24261_charger.h>
+#include <asm/intel-mid.h>
+
+#include "platform_ipc.h"
+#include "platform_bq24261.h"
+
+#define BOOST_CUR_LIM	500
+
+static struct power_supply_throttle bq24261_throttle_states[] = {
+	{
+		.throttle_action = PSY_THROTTLE_CC_LIMIT,
+		.throttle_val = BQ24261_CHRG_CUR_NOLIMIT,
+
+	},
+	{
+		.throttle_action = PSY_THROTTLE_CC_LIMIT,
+		.throttle_val = BQ24261_CHRG_CUR_MEDIUM,
+
+	},
+	{
+		.throttle_action = PSY_THROTTLE_DISABLE_CHARGING,
+	},
+	{
+		.throttle_action = PSY_THROTTLE_DISABLE_CHARGER,
+	},
+
+};
+
+char *bq24261_supplied_to[] = {
+				"max170xx_battery",
+				"max17047_battery",
+};
+
+
+void __init *bq24261_platform_data(void *info)
+{
+	static struct bq24261_plat_data bq24261_pdata;
+
+
+	bq24261_pdata.irq_map = PMIC_SRAM_INTR_MAP;
+	bq24261_pdata.irq_mask = PMIC_EXT_INTR_MASK;
+	bq24261_pdata.supplied_to = bq24261_supplied_to;
+	bq24261_pdata.num_supplicants = ARRAY_SIZE(bq24261_supplied_to);
+	bq24261_pdata.throttle_states = bq24261_throttle_states;
+	bq24261_pdata.num_throttle_states = ARRAY_SIZE(bq24261_throttle_states);
+	bq24261_pdata.enable_charger = NULL;
+#ifdef CONFIG_PMIC_CCSM
+	bq24261_pdata.enable_charging = pmic_enable_charging;
+	bq24261_pdata.set_inlmt = pmic_set_ilimma;
+	bq24261_pdata.set_cc = pmic_set_cc;
+	bq24261_pdata.set_cv = pmic_set_cv;
+	bq24261_pdata.dump_master_regs = dump_pmic_regs;
+	bq24261_pdata.enable_vbus = pmic_enable_vbus;
+	bq24261_pdata.handle_otgmode = pmic_handle_otgmode;
+	/* WA for ShadyCove host-mode WDT issue */
+	bq24261_pdata.is_wdt_kick_needed = true;
+#endif
+	bq24261_pdata.set_iterm = NULL;
+	bq24261_pdata.boost_mode_ma = BOOST_CUR_LIM;
+
+	return &bq24261_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bq24261.h b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.h
new file mode 100644
index 0000000..6b97380
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.h
@@ -0,0 +1,26 @@
+/*
+ * platform_mrfl_bq24261.h: platform data for bq24261 driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_BQ24261_H_
+#define _PLATFORM_MRFL_BQ24261_H_
+
+#define MRFL_CHRGR_DEV_NAME	"bq24261_charger"
+
+#define PMIC_SRAM_INTR_MAP 0xFFFFF616
+#define PMIC_EXT_INTR_MASK 0x01
+
+#define BQ24261_CHRG_CUR_LOW		100	/* 100mA */
+#define BQ24261_CHRG_CUR_MEDIUM		500	/* 500mA */
+#define BQ24261_CHRG_CUR_HIGH		900	/* 900mA */
+#define BQ24261_CHRG_CUR_NOLIMIT	1500	/* 1500mA */
+
+extern void __init *bq24261_platform_data(
+			void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_btlpm.c b/arch/x86/platform/intel-mid/device_libs/platform_btlpm.c
new file mode 100644
index 0000000..7e7494d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_btlpm.c
@@ -0,0 +1,78 @@
+/*
+ * platform_btlpm: btlpm platform data initialization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/pm_runtime.h>
+#include <asm/bcm_bt_lpm.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+
+#define UART_PORT_NO 0 /* Bluetooth is using UART port number 0 */
+
+static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = {
+	.gpio_wake = -EINVAL,
+	.gpio_host_wake = -EINVAL,
+	.int_host_wake = -EINVAL,
+	.gpio_enable = -EINVAL,
+	.port = UART_PORT_NO,
+};
+
+struct platform_device bcm_bt_lpm_device = {
+	.name = "bcm_bt_lpm",
+	.id = 0,
+	.dev = {
+		.platform_data = &bcm_bt_lpm_pdata,
+	},
+};
+
+static int __init bluetooth_init(void)
+{
+
+	int error_reg;
+
+	/* Get the GPIO numbers from the SFI table */
+
+	bcm_bt_lpm_pdata.gpio_enable = get_gpio_by_name("BT-reset");
+	if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_enable)) {
+		pr_err("%s: gpio %s not found\n", __func__, "BT-reset");
+		return -ENODEV;
+	}
+
+#ifndef BCM_BT_LPM_DBG
+	bcm_bt_lpm_pdata.gpio_host_wake = get_gpio_by_name("bt_uart_enable");
+	if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_host_wake)) {
+		pr_err("%s: gpio %s not found\n", __func__, "bt_uart_enable");
+		return -ENODEV;
+	}
+
+	bcm_bt_lpm_pdata.int_host_wake =
+				gpio_to_irq(bcm_bt_lpm_pdata.gpio_host_wake);
+
+	bcm_bt_lpm_pdata.gpio_wake = get_gpio_by_name("bt_wakeup");
+	if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_wake)) {
+		pr_err("%s: gpio %s not found\n", __func__, "bt_wakeup");
+		return -ENODEV;
+	}
+
+	pr_debug("%s: gpio_wake %d, gpio_host_wake %d\n", __func__,
+		bcm_bt_lpm_pdata.gpio_wake, bcm_bt_lpm_pdata.gpio_host_wake);
+#endif
+
+	error_reg = platform_device_register(&bcm_bt_lpm_device);
+	if (error_reg < 0) {
+		pr_err("%s: platform_device_register for %s failed\n",
+					__func__, bcm_bt_lpm_device.name);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+rootfs_initcall(bluetooth_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_display.c b/arch/x86/platform/intel-mid/device_libs/platform_display.c
new file mode 100644
index 0000000..2d9056a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_display.c
@@ -0,0 +1,54 @@
+/*
+ * platform_display.c: put platform display configuration in
+ * this file. If any platform level display related configuration
+ * has to be made, then that configuration shoule be in this
+ * file.
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/printk.h>
+#include <linux/platform_data/lp855x.h>
+
+static struct i2c_board_info __initdata lp8556_i2c_device = {
+	I2C_BOARD_INFO("lp8556", 0x2C),
+};
+
+struct lp855x_platform_data platform_data = {
+	.name = "lp8556",
+	.device_control = 0,
+	.initial_brightness = 0,
+	.period_ns = 5000000, /* 200 Hz */
+	.size_program = 0,
+	.rom_data = NULL,
+};
+
+void *lp8556_get_platform_data(void)
+{
+
+	return (void *)&platform_data;
+}
+
+static int __init platform_display_module_init(void)
+{
+
+	lp8556_i2c_device.platform_data = lp8556_get_platform_data();
+
+	if (lp8556_i2c_device.platform_data == NULL) {
+		pr_debug("failed to get platform data for lp8556.");
+		return -EINVAL;
+	}
+
+	return -EPERM;
+}
+
+module_init(platform_display_module_init);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_dw_i2c.c b/arch/x86/platform/intel-mid/device_libs/platform_dw_i2c.c
new file mode 100644
index 0000000..6cf70ed
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_dw_i2c.c
@@ -0,0 +1,224 @@
+/*
+ * platform_dw_i2c.c: I2C platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/intel_mid_pm.h>
+
+struct i2c_pin_cfg {
+	int scl_gpio;
+	int scl_alt;
+	int sda_gpio;
+	int sda_alt;
+};
+
+enum {
+	BOARD_NONE = 0,
+	BOARD_VTB,
+	BOARD_SALTBAY,
+};
+
+static struct i2c_pin_cfg dw_i2c_pin_cfgs[][10] = {
+	[BOARD_NONE] =  {},
+	[BOARD_VTB] =  {
+		[1] = {27, 1, 26, 1},
+	},
+	[BOARD_SALTBAY] =  {
+		[1] = {19, 1, 20, 1},
+	},
+};
+
+int intel_mid_dw_i2c_abort(int busnum)
+{
+	int i;
+	int ret = -EBUSY;
+	struct i2c_pin_cfg *pins = &dw_i2c_pin_cfgs[BOARD_NONE][busnum];
+
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+		pins = &dw_i2c_pin_cfgs[BOARD_VTB][busnum];
+		break;
+	case INTEL_MID_CPU_CHIP_TANGIER:
+	case INTEL_MID_CPU_CHIP_ANNIEDALE:
+		pins = &dw_i2c_pin_cfgs[BOARD_SALTBAY][busnum];
+		break;
+	default:
+		break;
+	}
+
+	if (!pins->scl_gpio || !pins->sda_gpio) {
+		pr_err("i2c-%d: recovery ignore\n", busnum);
+		return 0;
+	}
+	pr_err("i2c-%d: try to abort xfer, scl_gpio %d, sda_gpio %d\n",
+			busnum, pins->scl_gpio, pins->sda_gpio);
+	gpio_request(pins->scl_gpio, "scl");
+	gpio_request(pins->sda_gpio, "sda");
+	lnw_gpio_set_alt(pins->scl_gpio, LNW_GPIO);
+	lnw_gpio_set_alt(pins->sda_gpio, LNW_GPIO);
+	gpio_direction_input(pins->scl_gpio);
+	gpio_direction_input(pins->sda_gpio);
+	usleep_range(10, 10);
+	pr_err("i2c-%d: scl_gpio val %d, sda_gpio val %d\n",
+			busnum,
+			gpio_get_value(pins->scl_gpio) ? 1 : 0,
+			gpio_get_value(pins->sda_gpio) ? 1 : 0);
+	gpio_direction_output(pins->scl_gpio, 1);
+	pr_err("i2c-%d: toggle begin\n", busnum);
+	for (i = 0; i < 9; i++) {
+		if (gpio_get_value(pins->sda_gpio)) {
+			if (gpio_get_value(pins->scl_gpio)) {
+				pr_err("i2c-%d: recovery success\n", busnum);
+				break;
+			} else {
+				gpio_direction_output(pins->scl_gpio, 0);
+				pr_err("i2c-%d: scl_gpio val 0, sda_gpio val 1\n",
+					busnum);
+			}
+		}
+		gpio_set_value(pins->scl_gpio, 0);
+		usleep_range(10, 20);
+		gpio_set_value(pins->scl_gpio, 1);
+		usleep_range(10, 20);
+		pr_err("i2c-%d: toggle SCL loop %d\n", busnum, i);
+	}
+	pr_err("i2c-%d: toggle end\n", busnum);
+	gpio_direction_output(pins->scl_gpio, 1);
+	gpio_direction_output(pins->sda_gpio, 0);
+	gpio_set_value(pins->scl_gpio, 0);
+	usleep_range(10, 20);
+	gpio_set_value(pins->scl_gpio, 1);
+	usleep_range(10, 20);
+	gpio_set_value(pins->sda_gpio, 0);
+	lnw_gpio_set_alt(pins->scl_gpio, pins->scl_alt);
+	lnw_gpio_set_alt(pins->sda_gpio, pins->sda_alt);
+	usleep_range(10, 10);
+	gpio_free(pins->scl_gpio);
+	gpio_free(pins->sda_gpio);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_abort);
+
+/* synchronization for sharing the I2C controller */
+#define PUNIT_PORT	0x04
+static DEFINE_SPINLOCK(msgbus_lock);
+static struct pci_dev *pci_root;
+#include <linux/pm_qos.h>
+static struct pm_qos_request pm_qos;
+int qos;
+
+static int intel_mid_msgbus_init(void)
+{
+	pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+	if (!pci_root) {
+		pr_err("msgbus PCI handle NULL for I2C sharing\n");
+		return -ENODEV;
+	}
+
+	pm_qos_add_request(&pm_qos, PM_QOS_CPU_DMA_LATENCY,
+			PM_QOS_DEFAULT_VALUE);
+
+	return 0;
+}
+fs_initcall(intel_mid_msgbus_init);
+
+#define PUNIT_DOORBELL_OPCODE	(0xE0)
+#define PUNIT_DOORBELL_REG	(0x0)
+#define PUNIT_SEMAPHORE		(0x10E)
+
+#define GET_SEM() (intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE) & 0x1)
+
+static void reset_semaphore(void)
+{
+	u32 data;
+
+	data = intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE);
+	smp_mb();
+	data = data & 0xfffffffe;
+	intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, data);
+	smp_mb();
+
+	pm_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
+
+}
+
+int intel_mid_dw_i2c_acquire_ownership(void)
+{
+	u32 ret = 0;
+	u32 data = 0; /* data sent to PUNIT */
+	unsigned long irq_flags;
+	u32 cmd;
+	u32 cmdext;
+	int timeout = 100;
+
+	pm_qos_update_request(&pm_qos, CSTATE_EXIT_LATENCY_C1 - 1);
+
+	/* host driver writes 0x2 to side band register 0x7 */
+	intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, 0x2);
+	smp_mb();
+
+	/* host driver sends 0xE0 opcode to PUNIT and writes 0 register */
+	cmd = (PUNIT_DOORBELL_OPCODE << 24) | (PUNIT_PORT << 16) |
+	((PUNIT_DOORBELL_REG & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+	cmdext = PUNIT_DOORBELL_REG & 0xffffff00;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+
+	if (cmdext) {
+		/* This resets to 0 automatically, no need to write 0 */
+		pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+				cmdext);
+	}
+
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+	/* host driver waits for bit 0 to be set in side band 0x7 */
+	while (GET_SEM() != 0x1) {
+		usleep_range(1000, 2000);
+		timeout--;
+		if (timeout <= 0) {
+			pr_err("Timeout: semaphore timed out, reset sem\n");
+			ret = -ETIMEDOUT;
+			reset_semaphore();
+			pr_err("PUNIT SEM: %d\n",
+					intel_mid_msgbus_read32(PUNIT_PORT,
+						PUNIT_SEMAPHORE));
+			WARN_ON(1);
+			return ret;
+		}
+	}
+	smp_mb();
+
+	pr_devel("i2c-semaphore: acquired i2c\n");
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_acquire_ownership);
+
+int intel_mid_dw_i2c_release_ownership(void)
+{
+	reset_semaphore();
+
+	pr_devel("i2c-semaphore: released i2c\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_release_ownership);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
new file mode 100644
index 0000000..003f2db
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -0,0 +1,243 @@
+/*
+ * platform_gpio_keys.c: gpio_keys platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/pnp.h>
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
+#include <asm/hw_irq.h>
+#include <asm/intel-mid.h>
+#include "platform_gpio_keys.h"
+
+/*
+ * we will search these buttons in SFI GPIO table (by name)
+ * and register them dynamically. Please add all possible
+ * buttons here, we will shrink them if no GPIO found.
+ */
+static struct gpio_keys_button gpio_button[] = {
+	{KEY_POWER,		-1, 1, "power_btn",	EV_KEY, 0, 3000},
+	{KEY_PROG1,		-1, 1, "prog_btn1",	EV_KEY, 0, 20},
+	{KEY_PROG2,		-1, 1, "prog_btn2",	EV_KEY, 0, 20},
+	{SW_LID,		-1, 1, "lid_switch",	EV_SW,  0, 20},
+	{KEY_VOLUMEUP,		-1, 1, "vol_up",	EV_KEY, 0, 20},
+	{KEY_VOLUMEDOWN,	-1, 1, "vol_down",	EV_KEY, 0, 20},
+	{KEY_CAMERA,		-1, 1, "camera_full",	EV_KEY, 0, 20},
+	{KEY_CAMERA_FOCUS,	-1, 1, "camera_half",	EV_KEY, 0, 20},
+	{SW_KEYPAD_SLIDE,	-1, 1, "MagSw1",	EV_SW,  0, 20},
+	{SW_KEYPAD_SLIDE,	-1, 1, "MagSw2",	EV_SW,  0, 20},
+	{KEY_CAMERA,		-1, 1, "cam_capture",	EV_KEY, 0, 20},
+	{KEY_CAMERA_FOCUS,	-1, 1, "cam_focus",	EV_KEY, 0, 20},
+	{KEY_MENU,              -1, 1, "fp_menu_key",   EV_KEY, 0, 20},
+	{KEY_HOME,              -1, 1, "fp_home_key",   EV_KEY, 0, 20},
+	{KEY_SEARCH,            -1, 1, "fp_search_key", EV_KEY, 0, 20},
+	{KEY_BACK,              -1, 1, "fp_back_key",   EV_KEY, 0, 20},
+	{KEY_VOLUMEUP,          -1, 1, "volume_up",     EV_KEY, 0, 20},
+	{SW_MUTE_DEVICE,        -1, 1, "mute_enable",   EV_SW,  0, 20},
+	{KEY_CAMERA,            -1, 1, "camera0_sb1",   EV_KEY, 0, 20},
+	{KEY_CAMERA_FOCUS,      -1, 1, "camera0_sb2",   EV_KEY, 0, 20},
+	{KEY_CONNECT,		-1, 1, "key_connect",   EV_KEY, 0, 20},
+};
+
+static struct gpio_keys_platform_data gpio_keys = {
+	.buttons	= gpio_button,
+	.rep		= 0,
+	.nbuttons	= -1, /* will fill it after search */
+};
+
+static struct platform_device pb_device = {
+	.name		= DEVICE_NAME,
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &gpio_keys,
+	},
+};
+
+/*
+ * Shrink the non-existent buttons, register the gpio button
+ * device if there is some
+ */
+static int __init pb_keys_init(void)
+{
+	struct gpio_keys_button *gb = gpio_button;
+	int i, num, good = 0;
+
+	num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
+	for (i = 0; i < num; i++) {
+		gb[i].gpio = get_gpio_by_name(gb[i].desc);
+		pr_info("info[%2d]: name = %s, gpio = %d\n",
+			 i, gb[i].desc, gb[i].gpio);
+		if (gb[i].gpio == -1)
+			continue;
+
+		if (i != good)
+			gb[good] = gb[i];
+		good++;
+	}
+
+	if (good) {
+		gpio_keys.nbuttons = good;
+		return platform_device_register(&pb_device);
+	}
+	return 0;
+}
+late_initcall(pb_keys_init);
+
+#ifdef	CONFIG_ACPI
+enum {
+	PWRBTN_KEY,
+	ROLOCK_KEY,
+	VOL_KEYS,
+	KEY_TYPE_NUMS,
+};
+
+static struct gpio_keys_button lesskey_button_powerbtn[] = {
+	{KEY_POWER,		-1, 1, "power_btn", EV_KEY, .acpi_idx = 0, 1},
+	{ },
+};
+
+static struct gpio_keys_button lesskey_button_rolock[] = {
+	{KEY_RO,		-1, 1, "rotationlock",	EV_KEY, .acpi_idx = 4},
+	{ },
+};
+
+static struct gpio_keys_button lesskey_button_vol[] = {
+	{KEY_VOLUMEUP,		-1, 1, "volume_up",	EV_KEY, .acpi_idx = 2},
+	{KEY_VOLUMEDOWN,	-1, 1, "volume_down",	EV_KEY, .acpi_idx = 3},
+};
+
+struct gpio_keys_init_data {
+	struct gpio_keys_button *keys_button;
+	int nkeys;
+};
+
+static struct gpio_keys_init_data lesskey_init_data[KEY_TYPE_NUMS] = {
+	{
+		.keys_button = lesskey_button_powerbtn,
+		.nkeys = 1,
+	}, {
+		.keys_button = lesskey_button_rolock,
+		.nkeys = 1,
+	}, {
+		.keys_button = lesskey_button_vol,
+		.nkeys = sizeof(lesskey_button_vol) /
+			 sizeof(struct gpio_keys_button),
+	},
+};
+
+static struct gpio_keys_platform_data lesskey_keys[KEY_TYPE_NUMS] = {
+	{
+		.buttons	= lesskey_button_powerbtn,
+		.rep		= 0,
+		.nbuttons	= 0,
+	}, {
+		.buttons	= lesskey_button_rolock,
+		.rep		= 0,
+		.nbuttons	= 0,
+	}, {
+		.buttons	= lesskey_button_vol,
+		.rep		= 1,
+		.nbuttons	= 0,
+	},
+};
+
+static struct platform_device lesskey_device[KEY_TYPE_NUMS] = {
+	{
+		.name		= "gpio-lesskey",
+		.id		= PLATFORM_DEVID_AUTO,
+		.dev		= {
+			.platform_data	= &lesskey_keys[PWRBTN_KEY],
+		},
+	}, {
+		.name		= "gpio-lesskey",
+		.id		= PLATFORM_DEVID_AUTO,
+		.dev		= {
+			.platform_data	= &lesskey_keys[ROLOCK_KEY],
+		},
+	}, {
+		.name		= "gpio-lesskey",
+		.id		= PLATFORM_DEVID_AUTO,
+		.dev		= {
+			.platform_data	= &lesskey_keys[VOL_KEYS],
+		},
+	},
+};
+
+static int
+lesskey_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
+{
+	int type, i, num, good;
+	struct gpio_keys_button *gb;
+	struct acpi_gpio_info info;
+	int ret = 0;
+
+	for (type = 0; type < KEY_TYPE_NUMS; type++) {
+		good = 0;
+		gb = lesskey_init_data[type].keys_button;
+		num = lesskey_init_data[type].nkeys;
+		pr_info("%s, num = %d\n", __func__, num);
+
+		for (i = 0; i < num; i++) {
+			gb[i].gpio = acpi_get_gpio_by_index(&pdev->dev,
+							gb[i].acpi_idx, &info);
+			pr_info("lesskey [%2d]: name = %s, gpio = %d\n",
+				 i, gb[i].desc, gb[i].gpio);
+			if (gb[i].gpio < 0)
+				continue;
+			if (i != good)
+				gb[good] = gb[i];
+			good++;
+		}
+
+		if (good) {
+			lesskey_keys[type].nbuttons = good;
+			ret = platform_device_register(&lesskey_device[type]);
+			if (ret) {
+				dev_err(&pdev->dev, "register platform device %s failed\n",
+					lesskey_device[type].name);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static const struct pnp_device_id lesskey_pnp_match[] = {
+	{ "INTCFD9", 0},
+	{ }
+};
+MODULE_DEVICE_TABLE(pnp, lesskey_pnp_match);
+
+static struct pnp_driver lesskey_pnp_driver = {
+	.name		= "lesskey",
+	.id_table	= lesskey_pnp_match,
+	.probe          = lesskey_pnp_probe,
+};
+
+static int __init lesskey_init(void)
+{
+	return pnp_register_driver(&lesskey_pnp_driver);
+}
+
+late_initcall(lesskey_init);
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.h b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.h
new file mode 100644
index 0000000..5095329
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.h
@@ -0,0 +1,16 @@
+/*
+ * platform_gpio_keys.h: gpio_keys platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_GPIO_KEYS_H_
+#define _PLATFORM_GPIO_KEYS_H_
+
+#define DEVICE_NAME "gpio-keys"
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_hsu.c b/arch/x86/platform/intel-mid/device_libs/platform_hsu.c
new file mode 100644
index 0000000..ea502d2
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_hsu.c
@@ -0,0 +1,1018 @@
+/*
+ * platform_hsu.c: hsu platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <asm/setup.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_hsu.h>
+
+#include "platform_hsu.h"
+
+#define TNG_CLOCK_CTL 0xFF00B830
+#define TNG_CLOCK_SC  0xFF00B868
+
+#define VLV_HSU_CLOCK	0x0800
+#define VLV_HSU_RESET	0x0804
+#define VLV_HSU_OVF_IRQ	0x0820	/* Overflow interrupt related */
+
+static unsigned int clock;
+static struct hsu_port_pin_cfg *hsu_port_gpio_mux;
+static struct hsu_port_cfg *platform_hsu_info;
+
+static struct
+hsu_port_pin_cfg hsu_port_pin_cfgs[][hsu_pid_max][hsu_port_max] = {
+	[hsu_pnw] = {
+		[hsu_pid_def] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.wake_gpio = 13,
+				.rx_gpio = 96+26,
+				.rx_alt = 1,
+				.tx_gpio = 96+27,
+				.tx_alt = 1,
+				.cts_gpio = 96+28,
+				.cts_alt = 1,
+				.rts_gpio = 96+29,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_MODEM_PORT,
+				.wake_gpio = 64,
+				.rx_gpio = 64,
+				.rx_alt = 1,
+				.tx_gpio = 65,
+				.tx_alt = 1,
+				.cts_gpio = 68,
+				.cts_alt = 1,
+				.rts_gpio = 66,
+				.rts_alt = 2,
+			},
+			[hsu_port2] = {
+				.id = 2,
+				.name = HSU_GPS_PORT,
+			},
+			[hsu_port_share] = {
+				.id = 1,
+				.name = HSU_DEBUG_PORT,
+				.wake_gpio = 96+30,
+				.rx_gpio = 96+30,
+				.rx_alt = 1,
+				.tx_gpio = 96+31,
+				.tx_alt = 1,
+			},
+		},
+	},
+	[hsu_clv] = {
+		[hsu_pid_rhb] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.wake_gpio = 42,
+				.rx_gpio = 96+26,
+				.rx_alt = 1,
+				.tx_gpio = 96+27,
+				.tx_alt = 1,
+				.cts_gpio = 96+28,
+				.cts_alt = 1,
+				.rts_gpio = 96+29,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_MODEM_PORT,
+				.wake_gpio = 64,
+				.rx_gpio = 64,
+				.rx_alt = 1,
+				.tx_gpio = 65,
+				.tx_alt = 1,
+				.cts_gpio = 68,
+				.cts_alt = 1,
+				.rts_gpio = 66,
+				.rts_alt = 2,
+			},
+			[hsu_port2] = {
+				.id = 2,
+				.name = HSU_DEBUG_PORT,
+				.wake_gpio = 67,
+				.rx_gpio = 67,
+				.rx_alt = 1,
+			},
+			[hsu_port_share] = {
+				.id = 1,
+				.name = HSU_GPS_PORT,
+				.wake_gpio = 96+30,
+				.rx_gpio = 96+30,
+				.rx_alt = 1,
+				.tx_gpio = 96+31,
+				.tx_alt = 1,
+				.cts_gpio = 96+33,
+				.cts_alt = 1,
+				.rts_gpio = 96+32,
+				.rts_alt = 2,
+			},
+		},
+		[hsu_pid_vtb_pro] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.rx_gpio = 96+26,
+				.rx_alt = 1,
+				.tx_gpio = 96+27,
+				.tx_alt = 1,
+				.cts_gpio = 96+28,
+				.cts_alt = 1,
+				.rts_gpio = 96+29,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_MODEM_PORT,
+				.wake_gpio = 96+30,
+				.rx_gpio = 96+30,
+				.rx_alt = 1,
+				.tx_gpio = 96+31,
+				.tx_alt = 1,
+				.cts_gpio = 96+33,
+				.cts_alt = 1,
+				.rts_gpio = 96+32,
+				.rts_alt = 2,
+			},
+			[hsu_port2] = {
+				.id = 2,
+				.name = HSU_DEBUG_PORT,
+				.wake_gpio = 67,
+				.rx_gpio = 67,
+				.rx_alt = 1,
+			},
+			[hsu_port_share] = {
+				.id = 1,
+				.name = HSU_GPS_PORT,
+				.wake_gpio = 64,
+				.rx_gpio = 64,
+				.rx_alt = 1,
+				.tx_gpio = 65,
+				.tx_alt = 1,
+				.cts_gpio = 68,
+				.cts_alt = 1,
+				.rts_gpio = 66,
+				.rts_alt = 2,
+			},
+		},
+		[hsu_pid_vtb_eng] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.rx_gpio = 96+26,
+				.rx_alt = 1,
+				.tx_gpio = 96+27,
+				.tx_alt = 1,
+				.cts_gpio = 96+28,
+				.cts_alt = 1,
+				.rts_gpio = 96+29,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_MODEM_PORT,
+				.wake_gpio = 64,
+				.rx_gpio = 64,
+				.rx_alt = 1,
+				.tx_gpio = 65,
+				.tx_alt = 1,
+				.cts_gpio = 68,
+				.cts_alt = 1,
+				.rts_gpio = 66,
+				.rts_alt = 2,
+			},
+			[hsu_port2] = {
+				.id = 2,
+				.name = HSU_DEBUG_PORT,
+				.wake_gpio = 67,
+				.rx_gpio = 67,
+				.rx_alt = 1,
+			},
+			[hsu_port_share] = {
+				.id = 1,
+				.name = HSU_GPS_PORT,
+				.wake_gpio = 96+30,
+				.rx_gpio = 96+30,
+				.rx_alt = 1,
+				.tx_gpio = 96+31,
+				.tx_alt = 1,
+				.cts_gpio = 96+33,
+				.cts_alt = 1,
+				.rts_gpio = 96+32,
+				.rts_alt = 2,
+			},
+		},
+	},
+	[hsu_tng] = {
+		[hsu_pid_def] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.rx_gpio = 126,
+				.rx_alt = 1,
+				.tx_gpio = 127,
+				.tx_alt = 1,
+				.cts_gpio = 124,
+				.cts_alt = 1,
+				.rts_gpio = 125,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_GPS_PORT,
+				.wake_gpio = 130,
+				.rx_gpio = 130,
+				.rx_alt = 1,
+				.cts_gpio = 128,
+				.cts_alt = 1,
+				.rts_gpio = 129,
+				.rts_alt = 1,
+			},
+			[hsu_port2] = {
+				.id = 2,
+				.name = HSU_DEBUG_PORT,
+				.wake_gpio = 134,
+				.rx_gpio = 134,
+				.rx_alt = 1,
+				.cts_gpio = 132,
+				.cts_alt = 1,
+				.rts_gpio = 133,
+				.rts_alt = 1,
+			},
+		},
+	},
+	[hsu_vlv2] = {
+		[hsu_pid_def] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.rts_gpio = 72,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_GPS_PORT,
+				.wake_gpio = 74,
+				.rx_gpio = 74,
+				.rx_alt = 1,
+				.rts_gpio = 76,
+				.rts_alt = 1,
+			},
+		},
+	},
+	[hsu_chv] = {
+		[hsu_pid_def] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.rts_gpio = 0,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_GPS_PORT,
+				.wake_gpio = 0,
+				.wake_src = hsu_rxd,
+				.rx_gpio = 0,
+				.rx_alt = 1,
+				.rts_gpio = 0,
+				.rts_alt = 1,
+			},
+		},
+	},
+
+};
+
+static struct hsu_port_cfg hsu_port_cfgs[][hsu_port_max] = {
+	[hsu_pnw] = {
+		[hsu_port0] = {
+			.type = bt_port,
+			.hw_ip = hsu_intel,
+			.index = 0,
+			.name = HSU_BT_PORT,
+			.idle = 20,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+		},
+		[hsu_port1] = {
+			.type = modem_port,
+			.hw_ip = hsu_intel,
+			.index = 1,
+			.name = HSU_MODEM_PORT,
+			.idle = 100,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.has_alt = 1,
+			.alt = hsu_port_share,
+			.force_suspend = 0,
+		},
+		[hsu_port2] = {
+			.type = gps_port,
+			.hw_ip = hsu_intel,
+			.index = 2,
+			.name = HSU_GPS_PORT,
+			.idle = 40,
+			.preamble = 1,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_suspend_post = intel_mid_hsu_suspend_post,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+		},
+		[hsu_port_share] = {
+			.type = debug_port,
+			.hw_ip = hsu_intel,
+			.index = 3,
+			.name = HSU_DEBUG_PORT,
+			.idle = 2000,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.has_alt = 1,
+			.alt = hsu_port1,
+			.force_suspend = 1,
+		},
+	},
+	[hsu_clv] = {
+		[hsu_port0] = {
+			.type = bt_port,
+			.hw_ip = hsu_intel,
+			.index = 0,
+			.name = HSU_BT_PORT,
+			.idle = 20,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+		},
+		[hsu_port1] = {
+			.type = modem_port,
+			.hw_ip = hsu_intel,
+			.index = 1,
+			.name = HSU_MODEM_PORT,
+			.idle = 100,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.has_alt = 1,
+			.alt = hsu_port_share,
+			.force_suspend = 0,
+		},
+		[hsu_port2] = {
+			.type = debug_port,
+			.hw_ip = hsu_intel,
+			.index = 2,
+			.name = HSU_DEBUG_PORT,
+			.idle = 2000,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+		},
+		[hsu_port_share] = {
+			.type = gps_port,
+			.hw_ip = hsu_intel,
+			.index = 3,
+			.name = HSU_GPS_PORT,
+			.idle = 40,
+			.preamble = 1,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_suspend_post = intel_mid_hsu_suspend_post,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.has_alt = 1,
+			.alt = hsu_port1,
+			.force_suspend = 1,
+		},
+	},
+	[hsu_tng] = {
+		[hsu_port0] = {
+			.type = bt_port,
+			.hw_ip = hsu_intel,
+			.index = 0,
+			.name = HSU_BT_PORT,
+			.idle = 20,
+			.hw_ctrl_cts = 1,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.hw_context_save = 1,
+		},
+		[hsu_port1] = {
+			.type = gps_port,
+			.hw_ip = hsu_intel,
+			.index = 1,
+			.name = HSU_GPS_PORT,
+			.idle = 40,
+			.preamble = 1,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_suspend_post = intel_mid_hsu_suspend_post,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.hw_context_save = 1,
+		},
+		[hsu_port2] = {
+			.type = debug_port,
+			.hw_ip = hsu_intel,
+			.index = 2,
+			.name = HSU_DEBUG_PORT,
+			.idle = 2000,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.hw_context_save = 1,
+		},
+	},
+	[hsu_vlv2] = {
+		[hsu_port0] = {
+			.type = bt_port,
+			.hw_ip = hsu_dw,
+			.index = 0,
+			.name = HSU_BT_PORT,
+			.idle = 100,
+			.hw_reset = intel_mid_hsu_reset,
+			.set_clk = intel_mid_hsu_set_clk,
+			.hw_ctrl_cts = 1,
+			.hw_init = intel_mid_hsu_init,
+			/* Trust FW has set it correctly */
+			.hw_set_alt = NULL,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_context_save = 1,
+		},
+		[hsu_port1] = {
+			.type = gps_port,
+			.hw_ip = hsu_dw,
+			.index = 1,
+			.name = HSU_GPS_PORT,
+			.idle = 40,
+			.preamble = 1,
+			.hw_reset = intel_mid_hsu_reset,
+			.set_clk = intel_mid_hsu_set_clk,
+			.hw_ctrl_cts = 1,
+			.hw_init = intel_mid_hsu_init,
+			/* Trust FW has set it correctly */
+			.hw_set_alt = NULL,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_suspend_post = intel_mid_hsu_suspend_post,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_context_save = 1,
+		},
+	},
+	[hsu_chv] = {
+		[hsu_port0] = {
+			.type = bt_port,
+			.hw_ip = hsu_dw,
+			.index = 0,
+			.name = HSU_BT_PORT,
+			.idle = 100,
+			.hw_reset = intel_mid_hsu_reset,
+			.set_clk = intel_mid_hsu_set_clk,
+			.hw_ctrl_cts = 1,
+			.hw_init = intel_mid_hsu_init,
+			/* Trust FW has set it correctly */
+			.hw_set_alt = NULL,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_context_save = 1,
+		},
+		[hsu_port1] = {
+			.type = gps_port,
+			.hw_ip = hsu_dw,
+			.index = 1,
+			.name = HSU_GPS_PORT,
+			.idle = 40,
+			.preamble = 1,
+			.hw_reset = intel_mid_hsu_reset,
+			.set_clk = intel_mid_hsu_set_clk,
+			.hw_ctrl_cts = 1,
+			.hw_init = intel_mid_hsu_init,
+			/* Trust FW has set it correctly */
+			.hw_set_alt = NULL,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_suspend_post = intel_mid_hsu_suspend_post,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_context_save = 1,
+		},
+	},
+
+};
+
+static struct hsu_func2port hsu_port_func_id_tlb[][hsu_port_func_max] = {
+	[hsu_pnw] = {
+		[0] = {
+			.func = 0,
+			.port = hsu_port0,
+		},
+		[1] = {
+			.func = 1,
+			.port = hsu_port1,
+		},
+		[2] = {
+			.func = 2,
+			.port = hsu_port2,
+		},
+		[3] = {
+			.func = -1,
+			.port = -1,
+		},
+	},
+	[hsu_clv] = {
+		[0] = {
+			.func = 0,
+			.port = hsu_port0,
+		},
+		[1] = {
+			.func = 1,
+			.port = hsu_port1,
+		},
+		[2] = {
+			.func = 2,
+			.port = hsu_port2,
+		},
+		[3] = {
+			.func = -1,
+			.port = -1,
+		},
+	},
+	[hsu_tng] = {
+		[0] = {
+			.func = 0,
+			.port = -1,
+		},
+		[1] = {
+			.func = 1,
+			.port = hsu_port0,
+		},
+		[2] = {
+			.func = 2,
+			.port = hsu_port1,
+		},
+		[3] = {
+			.func = 3,
+			.port = hsu_port2,
+		},
+	},
+	[hsu_vlv2] = {
+		[0] = {
+			.func = 3,
+			.port = hsu_port0,
+		},
+		[1] = {
+			.func = 4,
+			.port = hsu_port1,
+		},
+		[2] = {
+			.func = -1,
+			.port = -1,
+		},
+		[3] = {
+			.func = -1,
+			.port = -1,
+		},
+	},
+};
+
+static void hsu_port_enable(int port)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->rx_gpio) {
+		lnw_gpio_set_alt(info->rx_gpio, info->rx_alt);
+		gpio_direction_input(info->rx_gpio);
+	}
+	if (info->tx_gpio) {
+		gpio_direction_output(info->tx_gpio, 1);
+		lnw_gpio_set_alt(info->tx_gpio, info->tx_alt);
+		usleep_range(10, 10);
+		gpio_direction_output(info->tx_gpio, 0);
+
+	}
+	if (info->cts_gpio) {
+		lnw_gpio_set_alt(info->cts_gpio, info->cts_alt);
+		gpio_direction_input(info->cts_gpio);
+	}
+	if (info->rts_gpio) {
+		gpio_direction_output(info->rts_gpio, 0);
+		lnw_gpio_set_alt(info->rts_gpio, info->rts_alt);
+	}
+}
+
+static void hsu_port_disable(int port)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->rx_gpio) {
+		lnw_gpio_set_alt(info->rx_gpio, LNW_GPIO);
+		gpio_direction_input(info->rx_gpio);
+	}
+	if (info->tx_gpio) {
+		gpio_direction_output(info->tx_gpio, 1);
+		lnw_gpio_set_alt(info->tx_gpio, LNW_GPIO);
+		usleep_range(10, 10);
+		gpio_direction_input(info->tx_gpio);
+	}
+	if (info->cts_gpio) {
+		lnw_gpio_set_alt(info->cts_gpio, LNW_GPIO);
+		gpio_direction_input(info->cts_gpio);
+	}
+	if (info->rts_gpio) {
+		lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+		gpio_direction_input(info->rts_gpio);
+	}
+}
+
+void intel_mid_hsu_suspend(int port, struct device *dev, irq_handler_t wake_isr)
+{
+	int ret;
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	info->dev = dev;
+	info->wake_isr = wake_isr;
+
+	if (info->wake_gpio) {
+		lnw_gpio_set_alt(info->wake_gpio, LNW_GPIO);
+		gpio_direction_input(info->wake_gpio);
+		udelay(10);
+		ret = request_irq(gpio_to_irq(info->wake_gpio), info->wake_isr,
+				IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING,
+				info->name, info->dev);
+		if (ret)
+			dev_err(info->dev, "failed to register wakeup irq\n");
+	}
+}
+
+void intel_mid_hsu_resume(int port, struct device *dev)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->wake_gpio)
+		free_irq(gpio_to_irq(info->wake_gpio), info->dev);
+
+	if (info->rx_gpio) {
+		lnw_gpio_set_alt(info->rx_gpio, info->rx_alt);
+		gpio_direction_input(info->rx_gpio);
+	}
+	if (info->tx_gpio) {
+		gpio_direction_output(info->tx_gpio, 1);
+		lnw_gpio_set_alt(info->tx_gpio, info->tx_alt);
+		usleep_range(10, 10);
+		gpio_direction_output(info->tx_gpio, 0);
+
+	}
+	if (info->cts_gpio) {
+		lnw_gpio_set_alt(info->cts_gpio, info->cts_alt);
+		gpio_direction_input(info->cts_gpio);
+	}
+}
+
+void intel_mid_hsu_switch(int port)
+{
+	int i;
+	struct hsu_port_pin_cfg *tmp;
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	for (i = 0; i < hsu_port_max; i++) {
+		tmp = hsu_port_gpio_mux + i;
+		if (tmp != info && tmp->id == info->id)
+			hsu_port_disable(i);
+	}
+	hsu_port_enable(port);
+}
+
+void intel_mid_hsu_rts(int port, int value)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (!info->rts_gpio)
+		return;
+
+	if (value) {
+		gpio_direction_output(info->rts_gpio, 1);
+		lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+	} else
+		lnw_gpio_set_alt(info->rts_gpio, info->rts_alt);
+}
+
+void intel_mid_hsu_suspend_post(int port)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->rts_gpio && info->wake_gpio
+		&& info->wake_gpio == info->rx_gpio) {
+		gpio_direction_output(info->rts_gpio, 0);
+		lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+	}
+}
+
+void intel_mid_hsu_set_clk(unsigned int m, unsigned int n,
+				void __iomem *addr)
+{
+	unsigned int param, update_bit;
+
+	switch (boot_cpu_data.x86_model) {
+	/* valleyview*/
+	case 0x37:
+	/* cherryview */
+	case 0x4C:
+		update_bit = 1 << 31;
+		param = (m << 1) | (n << 16) | 0x1;
+
+		writel(param, addr + VLV_HSU_CLOCK);
+		writel((param | update_bit), addr + VLV_HSU_CLOCK);
+		writel(param, addr + VLV_HSU_CLOCK);
+		break;
+	default:
+		break;
+	}
+}
+
+void intel_mid_hsu_reset(void __iomem *addr)
+{
+	switch (boot_cpu_data.x86_model) {
+	/* valleyview*/
+	case 0x37:
+	/* cherryview */
+	case 0x4C:
+		writel(0, addr + VLV_HSU_RESET);
+		writel(3, addr + VLV_HSU_RESET);
+		/* Disable the tx overflow IRQ */
+		writel(2, addr + VLV_HSU_OVF_IRQ);
+		break;
+	default:
+		break;
+	}
+}
+
+unsigned int intel_mid_hsu_get_clk(void)
+{
+	return clock;
+}
+
+int intel_mid_hsu_func_to_port(unsigned int func)
+{
+	int i;
+	struct hsu_func2port *tbl = NULL;
+
+	switch (boot_cpu_data.x86_model) {
+	/* penwell */
+	case 0x27:
+		tbl = &hsu_port_func_id_tlb[hsu_pnw][0];
+		break;
+	/* cloverview */
+	case 0x35:
+		tbl = &hsu_port_func_id_tlb[hsu_clv][0];
+		break;
+	/* tangier */
+	case 0x3C:
+	case 0x4A:
+		tbl = &hsu_port_func_id_tlb[hsu_tng][0];
+		break;
+	/* valleyview*/
+	case 0x37:
+		tbl = &hsu_port_func_id_tlb[hsu_vlv2][0];
+		break;
+	/* anniedale */
+	case 0x5A:
+		/* anniedale same config as tangier */
+		tbl = &hsu_port_func_id_tlb[hsu_tng][0];
+		break;
+	/* cherryview */
+	case 0x4C:
+	default:
+		return -1;
+	}
+
+	for (i = 0; i < hsu_port_func_max; i++) {
+		if (tbl->func == func)
+			return tbl->port;
+		tbl++;
+	}
+
+	return -1;
+}
+
+int intel_mid_hsu_init(struct device *dev, int port)
+{
+	struct hsu_port_cfg *port_cfg = platform_hsu_info + port;
+	struct hsu_port_pin_cfg *info;
+
+	if (port >= hsu_port_max)
+		return -ENODEV;
+
+	port_cfg->dev = dev;
+
+	info = hsu_port_gpio_mux + port;
+	if (info->wake_gpio)
+		gpio_request(info->wake_gpio, "hsu");
+	if (info->rx_gpio)
+		gpio_request(info->rx_gpio, "hsu");
+	if (info->tx_gpio)
+		gpio_request(info->tx_gpio, "hsu");
+	if (info->cts_gpio)
+		gpio_request(info->cts_gpio, "hsu");
+	if (info->rts_gpio)
+		gpio_request(info->rts_gpio, "hsu");
+
+	return 1;
+}
+
+static void hsu_platform_clk(enum intel_mid_cpu_type cpu_type, ulong plat)
+{
+	void __iomem *clkctl, *clksc;
+	u32 clk_src, clk_div;
+
+	switch (boot_cpu_data.x86_model) {
+	/* penwell */
+	case 0x27:
+	/* cloverview */
+	case 0x35:
+		clock = 50000;
+		break;
+	/* tangier */
+	case 0x3C:
+	case 0x4A:
+	/* anniedale */
+	case 0x5A:
+		clock = 100000;
+		clkctl = ioremap_nocache(TNG_CLOCK_CTL, 4);
+		if (!clkctl) {
+			pr_err("tng scu clk ctl ioremap error\n");
+			break;
+		}
+
+		clksc = ioremap_nocache(TNG_CLOCK_SC, 4);
+		if (!clksc) {
+			pr_err("tng scu clk sc ioremap error\n");
+			iounmap(clkctl);
+			break;
+		}
+
+		clk_src = readl(clkctl);
+		clk_div = readl(clksc);
+
+		if (clk_src & (1 << 16))
+			/* source SCU fabric 100M */
+			clock = clock / ((clk_div & 0x7) + 1);
+		else {
+			if (clk_src & (1 << 31))
+				/* source OSCX2 38.4M */
+				clock = 38400;
+			else
+				/* source OSC clock 19.2M */
+				clock = 19200;
+		}
+
+		iounmap(clkctl);
+		iounmap(clksc);
+		break;
+	/* valleyview*/
+	case 0x37:
+	/* cherryview */
+	case 0x4C:
+	default:
+		clock = 100000;
+		break;
+	}
+
+	pr_info("hsu core clock %u M\n", clock / 1000);
+}
+
+int intel_mid_hsu_plat_init(int port, ulong plat, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+	struct acpi_gpio_info info;
+	struct hsu_port_pin_cfg *pin_cfg = NULL;
+	int gpio = -1;
+
+	switch (plat) {
+	case hsu_chv:
+		pin_cfg = &hsu_port_pin_cfgs[plat][hsu_pid_def][port];
+
+		if (!pin_cfg->rx_gpio) {
+			gpio = acpi_get_gpio_by_index(dev, rxd_acpi_idx, &info);
+			if (gpio >= 0)
+				pin_cfg->rx_gpio = gpio;
+		}
+
+		if (!pin_cfg->tx_gpio) {
+			gpio = acpi_get_gpio_by_index(dev, txd_acpi_idx, &info);
+			if (gpio >= 0)
+				pin_cfg->tx_gpio = gpio;
+		}
+
+		if (!pin_cfg->rts_gpio) {
+			gpio = acpi_get_gpio_by_index(dev, rts_acpi_idx, &info);
+			if (gpio >= 0)
+				pin_cfg->rts_gpio = gpio;
+		}
+
+		if (!pin_cfg->cts_gpio) {
+			gpio = acpi_get_gpio_by_index(dev, cts_acpi_idx, &info);
+			if (gpio >= 0)
+				pin_cfg->cts_gpio = gpio;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	if (pin_cfg) {
+		switch (pin_cfg->wake_src) {
+		case hsu_rxd:
+			pin_cfg->wake_gpio = pin_cfg->rx_gpio;
+			break;
+		default:
+			break;
+		}
+	}
+#endif
+	return 0;
+}
+
+static __init int hsu_dev_platform_data(void)
+{
+	switch (boot_cpu_data.x86_model) {
+	/* tangier */
+	case 0x3C:
+	case 0x4A:
+		platform_hsu_info = &hsu_port_cfgs[hsu_tng][0];
+		hsu_port_gpio_mux = &hsu_port_pin_cfgs[hsu_tng][hsu_pid_def][0];
+		break;
+	/* anniedale */
+	case 0x5A:
+		/* anniedale same config as tangier */
+		platform_hsu_info = &hsu_port_cfgs[hsu_tng][0];
+		hsu_port_gpio_mux = &hsu_port_pin_cfgs[hsu_tng][hsu_pid_def][0];
+		break;
+	default:
+		pr_err("HSU: cpu%x no platform config!\n", boot_cpu_data.x86_model);
+		return -ENODEV;
+	}
+
+	if (platform_hsu_info == NULL)
+		return -ENODEV;
+
+	if (hsu_port_gpio_mux == NULL)
+		return -ENODEV;
+
+	hsu_register_board_info(platform_hsu_info);
+	hsu_platform_clk(intel_mid_identify_cpu(), 0);
+
+	return 0;
+}
+
+fs_initcall(hsu_dev_platform_data);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_hsu.h b/arch/x86/platform/intel-mid/device_libs/platform_hsu.h
new file mode 100644
index 0000000..c4ffd86
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_hsu.h
@@ -0,0 +1,67 @@
+/*
+ * platform_hsu.h: hsu platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_HSU_H_
+#define _PLATFORM_HSU_H_
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+enum {
+	rxd_acpi_idx = 0,
+	txd_acpi_idx,
+	rts_acpi_idx,
+	cts_acpi_idx,
+};
+#endif
+
+enum {
+	ext_gpio = 0,
+	hsu_rxd,
+};
+
+#define HSU_BT_PORT "hsu_bt_port"
+#define HSU_MODEM_PORT "hsu_modem_port"
+#define HSU_GPS_PORT "hsu_gps_port"
+#define HSU_DEBUG_PORT "hsu_debug_port"
+
+enum hsu_pid {
+	hsu_pid_def = 0,
+	hsu_pid_rhb = 0,
+	hsu_pid_vtb_pro = 1,
+	hsu_pid_vtb_eng = 2,
+	hsu_pid_max,
+};
+
+struct hsu_func2port {
+	int func;
+	int port;
+};
+
+struct hsu_port_pin_cfg {
+	char *name;
+	int id;
+	int wake_gpio;
+	int wake_src;
+	int rx_gpio;
+	int rx_alt;
+	int tx_gpio;
+	int tx_alt;
+	int cts_gpio;
+	int cts_alt;
+	int rts_gpio;
+	int rts_alt;
+	struct device *dev;
+	irq_handler_t wake_isr;
+};
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_i2c_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_i2c_gpio.c
new file mode 100644
index 0000000..a2ca7e9
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_i2c_gpio.c
@@ -0,0 +1,67 @@
+/*
+ * platform_i2c_gpio.c: i2c_gpio platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/i2c-gpio.h>
+#include <linux/platform_device.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_i2c_gpio.h"
+
+static int hdmi_i2c_workaround(void)
+{
+	struct platform_device *pdev;
+	struct i2c_gpio_platform_data *pdata;
+
+	/*
+	 * Hard code a gpio controller platform device to take over
+	 * the two gpio pins used to be controlled by i2c bus 3.
+	 * This is to support HDMI EDID extension block read, which
+	 * is not supported by the current i2c controller, so we use
+	 * GPIO pin the simulate an i2c bus.
+	 */
+
+	/* On Merrifield, bus number 8 is used for battery charger.
+	 *  use 10 across medfield/ctp/merrifield platforms.
+	 */
+	pdev = platform_device_alloc(DEVICE_NAME, 10);
+
+	if (!pdev) {
+		pr_err("i2c-gpio: failed to alloc platform device\n");
+		return -ENOMEM;
+	}
+
+	pdata = kzalloc(sizeof(struct i2c_gpio_platform_data), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("i2c-gpio: failed to alloc platform data\n");
+		kfree(pdev);
+		return -ENOMEM;
+	}
+	/* Pins 17 and 18 are used in Merrifield/MOOR-PRH for HDMI i2c (bus3) */
+	pdata->scl_pin = 17;
+	pdata->sda_pin = 18;
+	pdata->sda_is_open_drain = 0;
+	pdata->scl_is_open_drain = 0;
+	pdev->dev.platform_data = pdata;
+
+	platform_device_add(pdev);
+
+	lnw_gpio_set_alt(pdata->sda_pin, LNW_GPIO);
+	lnw_gpio_set_alt(pdata->scl_pin, LNW_GPIO);
+
+	return 0;
+}
+rootfs_initcall(hdmi_i2c_workaround);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_i2c_gpio.h b/arch/x86/platform/intel-mid/device_libs/platform_i2c_gpio.h
new file mode 100644
index 0000000..66ddd62
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_i2c_gpio.h
@@ -0,0 +1,16 @@
+/*
+ * platform_i2c_gpio.h: i2c_gpio platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_I2C_GPIO_H_
+#define _PLATFORM_I2C_GPIO_H_
+
+#define DEVICE_NAME "i2c-gpio"
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.c b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
new file mode 100644
index 0000000..5a71b82
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
@@ -0,0 +1,38 @@
+/*
+ * platform_ipc.c: IPC platform library file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/sfi.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc.h"
+
+void ipc_device_handler(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev) {
+	void *pdata = NULL;
+	/*
+	 * IPC device creation is handled by the MSIC
+	 * MFD driver so we don't need to do it here.
+	 */
+
+	/*
+	 * We need to call platform init of IPC devices to fill
+	 * misc_pdata structure. It will be used in msic_init for
+	 * initialization.
+	 */
+	pr_info("IPC bus, name = %16.16s, irq = 0x%2x\n",
+		pentry->name, pentry->irq);
+	if (dev != NULL)
+		pdata = dev->get_platform_data(pentry);
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
new file mode 100644
index 0000000..984b549
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
@@ -0,0 +1,17 @@
+/*
+ * platform_ipc.h: IPC platform library header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_IPC_H_
+#define _PLATFORM_IPC_H_
+
+extern void ipc_device_handler(struct sfi_device_table_entry *pentry,
+			struct devs_id *dev) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lsm303.h b/arch/x86/platform/intel-mid/device_libs/platform_lsm303.h
new file mode 100644
index 0000000..6131921
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_lsm303.h
@@ -0,0 +1,16 @@
+/*
+ * platform_lsm303.h: lsm303 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_LSM303_H_
+#define _PLATFORM_LSM303_H_
+
+extern void *lsm303dlhc_accel_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max17042.c b/arch/x86/platform/intel-mid/device_libs/platform_max17042.c
new file mode 100644
index 0000000..9269f8d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max17042.c
@@ -0,0 +1,279 @@
+/*
+ * platform_max17042.c: max17042 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/i2c.h>
+#include <linux/lnw_gpio.h>
+#include <linux/power_supply.h>
+#include <linux/power/max17042_battery.h>
+#include <linux/power/smb347-charger.h>
+#include <linux/power/bq24192_charger.h>
+#include <linux/power/bq24261_charger.h>
+#include <linux/power/battery_id.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel-mid.h>
+#include <asm/delay.h>
+#include <asm/intel_scu_ipc.h>
+#include "platform_max17042.h"
+#include "platform_bq24192.h"
+
+#define MRFL_SMIP_SRAM_ADDR		0xFFFCE000
+#define MOFD_SMIP_SRAM_ADDR		0xFFFC5C00
+#define MRFL_PLATFORM_CONFIG_OFFSET	0x3B3
+#define MRFL_SMIP_SHUTDOWN_OFFSET	1
+#define MRFL_SMIP_RESV_CAP_OFFSET	3
+
+#define MRFL_VOLT_SHUTDOWN_MASK (1 << 1)
+#define MRFL_NFC_RESV_MASK	(1 << 3)
+#define I2C_GPIO_PIN 		21
+
+static bool msic_battery_check(struct max17042_platform_data *pdata)
+{
+	struct sfi_table_simple *sb;
+	char *mrfl_batt_str = "INTN0001";
+#ifdef CONFIG_SFI
+	sb = (struct sfi_table_simple *)get_oem0_table();
+#else
+	sb = NULL;
+#endif
+	if (sb == NULL) {
+		pr_info("invalid battery detected\n");
+		snprintf(pdata->battid, BATTID_LEN + 1, "UNKNOWNB");
+		snprintf(pdata->serial_num, SERIAL_NUM_LEN + 1, "000000");
+		return false;
+	} else {
+		pr_info("valid battery detected\n");
+		/* First entry in OEM0 table is the BATTID. Read battid
+		 * if pentry is not NULL and header length is greater
+		 * than BATTID length*/
+		if (sb->pentry && sb->header.len >= BATTID_LEN) {
+			if (strncmp((char *)sb->pentry,
+				"PG000001", (BATTID_LEN)) == 0) {
+				snprintf(pdata->battid,
+					(BATTID_LEN + 1),
+					"%s", mrfl_batt_str);
+			} else {
+				snprintf(pdata->battid,
+					(BATTID_LEN + 1),
+					"%s", (char *)sb->pentry);
+			}
+
+			/* First 2 bytes represent the model name
+			 * and the remaining 6 bytes represent the
+			 * serial number. */
+			if (pdata->battid[0] == 'I' &&
+				pdata->battid[1] >= '0'
+					&& pdata->battid[1] <= '9') {
+				unsigned char tmp[SERIAL_NUM_LEN + 2];
+				int i;
+				snprintf(pdata->model_name,
+					(MODEL_NAME_LEN) + 1,
+						"%s", pdata->battid);
+				memcpy(tmp, sb->pentry, BATTID_LEN);
+				for (i = 0; i < SERIAL_NUM_LEN; i++) {
+					sprintf(pdata->serial_num + i*2,
+					"%02x", tmp[i + MODEL_NAME_LEN]);
+				}
+				if ((2 * SERIAL_NUM_LEN) <
+					ARRAY_SIZE(pdata->serial_num))
+					pdata->serial_num[2 * SERIAL_NUM_LEN]
+								 = '\0';
+			} else {
+				snprintf(pdata->model_name,
+						(MODEL_NAME_LEN + 1),
+						"%s", pdata->battid);
+				snprintf(pdata->serial_num,
+						(SERIAL_NUM_LEN + 1), "%s",
+				pdata->battid + MODEL_NAME_LEN);
+			}
+		}
+		return true;
+	}
+	return false;
+}
+
+#define UMIP_REF_FG_TBL			0x806	/* 2 bytes */
+#define BATT_FG_TBL_BODY		14	/* 144 bytes */
+/**
+ * mfld_fg_restore_config_data - restore config data
+ * @name : Power Supply name
+ * @data : config data output pointer
+ * @len : length of config data
+ *
+ */
+int mfld_fg_restore_config_data(const char *name, void *data, int len)
+{
+	int ret = 0;
+#ifdef CONFIG_X86_MDFLD
+	int mip_offset;
+	/* Read the fuel gauge config data from umip */
+	mip_offset = UMIP_REF_FG_TBL + BATT_FG_TBL_BODY;
+	ret = intel_scu_ipc_read_mip((u8 *)data, len, mip_offset, 0);
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(mfld_fg_restore_config_data);
+
+/**
+ * mfld_fg_save_config_data - save config data
+ * @name : Power Supply name
+ * @data : config data input pointer
+ * @len : length of config data
+ *
+ */
+int mfld_fg_save_config_data(const char *name, void *data, int len)
+{
+	int ret = 0;
+#ifdef CONFIG_X86_MDFLD
+	int mip_offset;
+	/* write the fuel gauge config data to umip */
+	mip_offset = UMIP_REF_FG_TBL + BATT_FG_TBL_BODY;
+	ret = intel_scu_ipc_write_umip((u8 *)data, len, mip_offset);
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(mfld_fg_save_config_data);
+
+static int mrfl_get_bat_health(void)
+{
+
+	int pbat_health = -ENODEV;
+	int bqbat_health = -ENODEV;
+#ifdef CONFIG_BQ24261_CHARGER
+	 bqbat_health = bq24261_get_bat_health();
+#endif
+#ifdef CONFIG_PMIC_CCSM
+	pbat_health = pmic_get_health();
+#endif
+
+	/*Battery temperature exceptions are reported to PMIC. ALl other
+	* exceptions are reported to bq24261 charger. Need to read the
+	* battery health reported by both drivers, before reporting
+	* the actual battery health
+	*/
+
+	/* FIXME: need to have a time stamp based implementation to
+	* report battery health
+	*/
+
+	if (pbat_health < 0 && bqbat_health < 0)
+		return pbat_health;
+	if (pbat_health > 0 && pbat_health != POWER_SUPPLY_HEALTH_GOOD)
+		return pbat_health;
+	else
+		return bqbat_health;
+}
+
+#define DEFAULT_VMIN	3400000		/* 3400mV */
+static int mrfl_get_vsys_min(void)
+{
+	struct ps_batt_chg_prof batt_profile;
+	int ret;
+	ret = get_batt_prop(&batt_profile);
+	if (!ret)
+		return ((struct ps_pse_mod_prof *)batt_profile.batt_prof)
+					->low_batt_mV * 1000;
+	return DEFAULT_VMIN;
+}
+#define DEFAULT_VMAX_LIM	4200000		/* 4200mV */
+static int mrfl_get_volt_max(void)
+{
+	struct ps_batt_chg_prof batt_profile;
+	int ret;
+	ret = get_batt_prop(&batt_profile);
+	if (!ret)
+		return ((struct ps_pse_mod_prof *)batt_profile.batt_prof)
+					->voltage_max * 1000;
+	return DEFAULT_VMAX_LIM;
+}
+
+static bool is_mapped;
+static void __iomem *smip;
+int get_smip_plat_config(int offset)
+{
+	unsigned long sram_addr;
+
+	sram_addr = MOFD_SMIP_SRAM_ADDR;
+
+	if (!is_mapped) {
+		smip = ioremap_nocache(sram_addr +
+				MRFL_PLATFORM_CONFIG_OFFSET, 8);
+		is_mapped = true;
+	}
+
+	return ioread8(smip + offset);
+}
+
+static void init_tgain_toff(struct max17042_platform_data *pdata)
+{
+	pdata->tgain = NTC_10K_MURATA_TGAIN;
+	pdata->toff = NTC_10K_MURATA_TOFF;
+}
+
+static void init_callbacks(struct max17042_platform_data *pdata)
+{
+	pdata->battery_health = mrfl_get_bat_health;
+	pdata->battery_pack_temp = pmic_get_battery_pack_temp;
+	pdata->get_vmin_threshold = mrfl_get_vsys_min;
+	pdata->get_vmax_threshold = mrfl_get_volt_max;
+}
+
+static void init_platform_params(struct max17042_platform_data *pdata)
+{
+	pdata->fg_algo_model = 100;
+
+	if (msic_battery_check(pdata)) {
+		pdata->enable_current_sense = true;
+		pdata->technology = POWER_SUPPLY_TECHNOLOGY_LION;
+		pdata->file_sys_storage_enabled = 1;
+		pdata->soc_intr_mode_enabled = true;
+		pdata->valid_battery = true;
+	}
+	pdata->is_init_done = 0;
+}
+
+static void init_platform_thresholds(struct max17042_platform_data *pdata)
+{
+	u8 shutdown_method;
+
+	/* Bit 1 of shutdown method determines if voltage based
+	 * shutdown in enabled.
+	 * Bit 3 specifies if capacity for NFC should be reserved.
+	 * Reserve capacity only if Bit 3 of shutdown method
+	 * is enabled.
+	 */
+	shutdown_method =
+		get_smip_plat_config(MRFL_SMIP_SHUTDOWN_OFFSET);
+	if (shutdown_method & MRFL_NFC_RESV_MASK)
+		pdata->resv_cap =
+			get_smip_plat_config
+				(MRFL_SMIP_RESV_CAP_OFFSET);
+
+	pdata->is_volt_shutdown = (shutdown_method &
+		MRFL_VOLT_SHUTDOWN_MASK) ? 1 : 0;
+}
+
+void *max17042_platform_data(void *info)
+{
+	static struct max17042_platform_data platform_data;
+
+	init_tgain_toff(&platform_data);
+	init_callbacks(&platform_data);
+	init_platform_params(&platform_data);
+	init_platform_thresholds(&platform_data);
+
+	if (smip)
+		iounmap(smip);
+	return &platform_data;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max17042.h b/arch/x86/platform/intel-mid/device_libs/platform_max17042.h
new file mode 100644
index 0000000..517b461
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max17042.h
@@ -0,0 +1,27 @@
+/*
+ * platform_max17042.h: max17042 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX17042_H_
+#define _PLATFORM_MAX17042_H_
+
+#define	NTC_47K_TGAIN			0xE4E4
+#define	NTC_47K_TOFF			0x2F1D
+#define	NTC_47K_TH05_TGAIN		0xDA1F
+#define	NTC_47K_TH05_TOFF		0x38C7
+#define	NTC_10K_B3435K_TDK_TGAIN	0xE4E4
+#define	NTC_10K_B3435K_TDK_TOFF		0x2218
+#define	NTC_10K_NCP15X_TGAIN		0xE254
+#define	NTC_10K_NCP15X_TOFF		0x2ACF
+#define NTC_10K_MURATA_TGAIN		0xE39C
+#define NTC_10K_MURATA_TOFF		0x2673
+
+extern void *max17042_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
new file mode 100644
index 0000000..0c3b501
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
@@ -0,0 +1,42 @@
+/*
+ * platform_max3111.c: max3111 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/lnw_gpio.h>
+#include <linux/serial_max3110.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include "platform_max3111.h"
+
+static struct intel_mid_ssp_spi_chip chip = {
+	.burst_size = DFLT_FIFO_BURST_SIZE,
+	.timeout = DFLT_TIMEOUT_VAL,
+	/* UART DMA is not supported in VP */
+	.dma_enabled = false,
+};
+
+void __init *max3111_platform_data(void *info)
+{
+	struct spi_board_info *spi_info = info;
+	static struct plat_max3110 max3110_pdata;
+
+	spi_info->mode = SPI_MODE_0;
+	spi_info->controller_data = &chip;
+	spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+	/*force polling for HVP and VP simulation platforms
+	 * on TANGIER AND ANNIEDALE.
+	 */
+
+	return &max3110_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.h b/arch/x86/platform/intel-mid/device_libs/platform_max3111.h
new file mode 100644
index 0000000..fe2c361
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max3111.h
@@ -0,0 +1,20 @@
+/*
+ * platform_max3111.h: max3111 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX3111_H_
+#define _PLATFORM_MAX3111_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM	5
+#define FORCE_CHIP_SELECT	0
+
+extern void *max3111_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
new file mode 100644
index 0000000..b682f0a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -0,0 +1,64 @@
+/*
+ * platform_max7315.c: max7315 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <asm/intel-mid.h>
+#include "platform_max7315.h"
+
+
+void __init *max7315_platform_data(void *info)
+{
+	static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
+	static int nr;
+	struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
+	struct i2c_board_info *i2c_info = info;
+	int gpio_base, intr;
+	char base_pin_name[SFI_NAME_LEN + 1];
+	char intr_pin_name[SFI_NAME_LEN + 1];
+
+	if (nr >= MAX7315_NUM) {
+		pr_err("too many max7315s, we only support %d\n",
+				MAX7315_NUM);
+		return NULL;
+	}
+	/* we have several max7315 on the board, we only need load several
+	 * instances of the same pca953x driver to cover them
+	 */
+	strcpy(i2c_info->type, "max7315");
+	if (nr++) {
+		snprintf(base_pin_name, sizeof(base_pin_name),
+							"max7315_%d_base", nr);
+		snprintf(intr_pin_name, sizeof(intr_pin_name),
+							"max7315_%d_int", nr);
+	} else {
+		strcpy(base_pin_name, "max7315_base");
+		strcpy(intr_pin_name, "max7315_int");
+	}
+
+	gpio_base = get_gpio_by_name(base_pin_name);
+	intr = get_gpio_by_name(intr_pin_name);
+
+	if (gpio_base == -1)
+		return NULL;
+	max7315->gpio_base = gpio_base;
+	if (intr != -1) {
+		i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+		max7315->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	} else {
+		i2c_info->irq = -1;
+		max7315->irq_base = -1;
+	}
+	return max7315;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.h b/arch/x86/platform/intel-mid/device_libs/platform_max7315.h
new file mode 100644
index 0000000..d62daa5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.h
@@ -0,0 +1,19 @@
+/*
+ * platform_max7315.h: max7315 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX7315_H_
+#define _PLATFORM_MAX7315_H_
+
+/* we have multiple max7315 on the board ... */
+#define MAX7315_NUM 2
+
+extern void __init *max7315_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_moor_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_moor_thermal.c
new file mode 100644
index 0000000..41b9a51
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_moor_thermal.c
@@ -0,0 +1,98 @@
+/*
+ * platform_moor_thermal.c: Platform data initilization file for
+ *			Intel Moorefield Platform thermal driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sumeet R Pawnikar <sumeet.r.pawnikar@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/platform_device.h>
+#include <asm/intel_mid_thermal.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "platform_moor_thermal.h"
+
+/*
+ * Naming convention:
+ * skin0 -> front skin,
+ * skin1--> back skin
+ */
+/* Updated slope and intercept values as per received from
+ * Thermal HW Team. Updated the same value for skin0.
+ * Updated the best match slope and intercept values
+ * for skin1 same as merrifield platform. */
+static struct intel_mid_thermal_sensor moor_sensors[] = {
+	{
+		.name = "SYSTHERM0",
+		.index = 0,
+		.direct = false,
+	},
+	{
+		.name = "SYSTHERM1",
+		.index = 1,
+		.direct = false,
+	},
+	{
+		.name = "SYSTHERM2",
+		.index = 2,
+		.direct = false,
+	},
+	{
+		.name = "PMIC_DIE",
+		.index = 3,
+		.direct = true,
+	},
+	/* Virtual Sensors should always be at the end */
+	{
+		.name = "FrontSkin",
+		.index = 4,
+	},
+	{
+		.name = "BackSkin",
+		.index = 5,
+	},
+};
+
+static struct intel_mid_thermal_platform_data pdata[] = {
+	[moor_thermal] = {
+		.num_sensors = 4,
+		.sensors = moor_sensors,
+		.num_virtual_sensors = 2,
+	},
+};
+
+void __init *moor_thermal_platform_data(void *info)
+{
+	struct platform_device *pdev;
+	struct sfi_device_table_entry *entry = info;
+
+	pdev = platform_device_alloc(MOOR_THERM_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+			MOOR_THERM_DEV_NAME);
+		return NULL;
+	}
+
+	if (platform_device_add(pdev)) {
+		pr_err("failed to add thermal platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+	pdev->dev.platform_data = &pdata[moor_thermal];
+
+	install_irq_resource(pdev, entry->irq);
+
+	register_rpmsg_service("rpmsg_moor_thermal", RPROC_SCU,
+				RP_SCOVE_THERMAL);
+
+	return 0;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_moor_thermal.h b/arch/x86/platform/intel-mid/device_libs/platform_moor_thermal.h
new file mode 100644
index 0000000..0444cf1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_moor_thermal.h
@@ -0,0 +1,25 @@
+/*
+ * platform_moor_thermal.h: Platform data initilization file for
+ *			Intel Moorefield Platform thermal driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sumeet R Pawnikar <sumeet.r.pawnikar@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MOOR_THERMAL_H_
+#define _PLATFORM_MOOR_THERMAL_H_
+
+#define MOOR_THERM_DEV_NAME "scove_thrm"
+
+extern void __init *moor_thermal_platform_data(void *)
+			__attribute__((weak));
+
+enum {
+	moor_thermal,
+};
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.c
new file mode 100644
index 0000000..77a8ae0
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.c
@@ -0,0 +1,70 @@
+/*
+ * platform_mrfl_ocd.c: Platform data for Merrifield Platform OCD  Driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *  Author: Saranya Gopal <saranya.gopal@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_basincove_ocd.h>
+
+#include "platform_msic.h"
+#include "platform_mrfl_ocd.h"
+
+static int get_bcu_config(struct ocd_bcove_config_data *ocd_smip_data)
+{
+	int i;
+	void __iomem *bcu_smip_sram_addr;
+	u8 *plat_smip_data;
+	unsigned long sram_addr;
+
+	if (!ocd_smip_data)
+		return -ENXIO;
+
+	sram_addr = MOFD_SMIP_SRAM_ADDR;
+	plat_smip_data = (u8 *)ocd_smip_data;
+	bcu_smip_sram_addr = ioremap_nocache(sram_addr +
+					BCU_SMIP_OFFSET, NUM_SMIP_BYTES);
+
+	for (i = 0; i < NUM_SMIP_BYTES; i++)
+		*(plat_smip_data + i) = ioread8(bcu_smip_sram_addr + i);
+
+	return 0;
+}
+
+static struct ocd_platform_data ocd_data;
+
+void __init *mrfl_ocd_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	struct platform_device *pdev;
+
+	pdev = platform_device_alloc(MRFL_OCD_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+			MRFL_OCD_DEV_NAME);
+		return NULL;
+	}
+
+	if (platform_device_add(pdev)) {
+		pr_err("failed to add merrifield ocd platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	install_irq_resource(pdev, entry->irq);
+	ocd_data.bcu_config_data = &get_bcu_config;
+	pdev->dev.platform_data	= &ocd_data;
+	register_rpmsg_service("rpmsg_mrfl_ocd", RPROC_SCU, RP_MRFL_OCD);
+
+	return &ocd_data;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.h
new file mode 100644
index 0000000..d817e7d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.h
@@ -0,0 +1,19 @@
+/*
+ * platform_mrfl_ocd.h: msic_thermal platform data header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Saranya Gopal <saranya.gopal@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_OCD_H_
+#define _PLATFORM_MRFL_OCD_H_
+
+#define MRFL_OCD_DEV_NAME "bcove_bcu"
+
+extern void __init *mrfl_ocd_platform_data(void *info)
+			__attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.c
new file mode 100644
index 0000000..68ed17a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.c
@@ -0,0 +1,113 @@
+/*
+ * platform_mrfl_pmic.c: Platform data for Merrifield PMIC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/power/bq24261_charger.h>
+#include <asm/intel_scu_pmic.h>
+
+#include "platform_ipc.h"
+#include "platform_mrfl_pmic.h"
+
+#define MCHGRIRQ0_ADDR			0x12
+#define MCHGRIRQ1_ADDR			0x13
+
+#define PMIC_ID_ADDR    0x00
+#define SHADYCOVE_A0	0x00
+#define SHADYCOVE_A1	0x01
+
+static struct temp_lookup shadycove_adc_tbl[] = {
+	{0x35, 125, 0}, {0x3C, 120, 0},
+	{0x43, 115, 0}, {0x4C, 110, 0},
+	{0x56, 105, 0}, {0x61, 100, 0},
+	{0x6F, 95, 0}, {0x7F, 90, 0},
+	{0x91, 85, 0}, {0xA7, 80, 0},
+	{0xC0, 75, 0}, {0xDF, 70, 0},
+	{0x103, 65, 0}, {0x12D, 60, 0},
+	{0x161, 55, 0}, {0x1A0, 50, 0},
+	{0x1EC, 45, 0}, {0x247, 40, 0},
+	{0x2B7, 35, 0}, {0x33F, 30, 0},
+	{0x3E8, 25, 0}, {0x4B8, 20, 0},
+	{0x5BB, 15, 0}, {0x700, 10, 0},
+	{0x89A, 5, 0}, {0xAA2, 0, 0},
+	{0xD3D, -5, 0}, {0x109B, -10, 0},
+	{0x14F5, -15, 0}, {0x1AA7, -20, 0},
+	{0x2234, -25, 0}, {0x2C47, -30, 0},
+	{0x39E4, -35, 0}, {0x4C6D, -40, 0},
+};
+
+void __init *mrfl_pmic_ccsm_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	static struct pmic_platform_data pmic_pdata;
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	pdev = platform_device_alloc(entry->name, -1);
+	if (!pdev) {
+		pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+		goto out;
+	}
+	pdev->dev.platform_data = &pmic_pdata;
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("Failed to add adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+	install_irq_resource(pdev, entry->irq);
+
+	pmic_pdata.max_tbl_row_cnt = ARRAY_SIZE(shadycove_adc_tbl);
+	pmic_pdata.adc_tbl = shadycove_adc_tbl;
+
+#ifdef CONFIG_BQ24261_CHARGER
+	pmic_pdata.cc_to_reg = bq24261_cc_to_reg;
+	pmic_pdata.cv_to_reg = bq24261_cv_to_reg;
+	pmic_pdata.inlmt_to_reg = bq24261_inlmt_to_reg;
+#endif
+	register_rpmsg_service("rpmsg_pmic_ccsm", RPROC_SCU,
+				RP_PMIC_CCSM);
+out:
+	return &pmic_pdata;
+}
+
+/* WA for ShadyCove PMIC issue to reset MCHGRIRQ0/1 to default values
+ * as soon as the IPC driver is loaded.
+ * Issue is supposed to be fixed with A2-PMIC
+ */
+static int __init pmic_reset_value_wa(void)
+{
+	u8 id_val;
+	int ret;
+
+	ret = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &id_val);
+	if (ret) {
+		pr_err("%s:%d Error(%d) reading PMIC ID register\n",
+				__func__, __LINE__, ret);
+		return 0;
+	}
+
+	pr_info("%s:%d ShadyCove ID_REG-val:%x\n",
+			__func__, __LINE__, id_val);
+	if ((id_val == SHADYCOVE_A0) || (id_val == SHADYCOVE_A1)) {
+		pr_info("%s:%d Reset MCHGRIRQs\n", __func__, __LINE__);
+		intel_scu_ipc_iowrite8(MCHGRIRQ0_ADDR, 0xFF);
+		intel_scu_ipc_iowrite8(MCHGRIRQ1_ADDR, 0x1F);
+	}
+	return 0;
+}
+rootfs_initcall(pmic_reset_value_wa);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.h
new file mode 100644
index 0000000..5eb3ed5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.h
@@ -0,0 +1,17 @@
+/*
+ * platform_mrfl_pmic.h: platform data for pmic driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_PMIC_H_
+#define _PLATFORM_MRFL_PMIC_H_
+
+extern void __init *mrfl_pmic_ccsm_platform_data(
+				void *info) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.c
new file mode 100644
index 0000000..c6ad12b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.c
@@ -0,0 +1,49 @@
+/*
+ * platform_mrfl_pmic_i2c.c: Platform data for Merrifield PMIC I2C
+ * adapter driver.
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/power/bq24261_charger.h>
+
+#include "platform_ipc.h"
+#include "platform_mrfl_pmic_i2c.h"
+
+void __init *mrfl_pmic_i2c_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	pdev = platform_device_alloc(entry->name, -1);
+	if (!pdev) {
+		pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+		goto out;
+	}
+	pdev->dev.platform_data = NULL;
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("Failed to add adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+	install_irq_resource(pdev, entry->irq);
+	register_rpmsg_service("rpmsg_i2c_pmic_adap", RPROC_SCU,
+				RP_PMIC_I2C);
+out:
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.h
new file mode 100644
index 0000000..3034a7f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.h
@@ -0,0 +1,17 @@
+/*
+ * platform_mrfl_pmic_i2c.h: platform data for pmic i2c adapter driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_PMIC_I2C_H_
+#define _PLATFORM_MRFL_PMIC_I2C_H_
+
+extern void __init *mrfl_pmic_i2c_platform_data(
+				void *info) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_regulator.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_regulator.c
new file mode 100644
index 0000000..f7d38fa
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_regulator.c
@@ -0,0 +1,125 @@
+/*
+ * platform_mrfl_regulator.c - Merrifield regulator machine drvier
+ * Copyright (c) 2012, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regulator/intel_basin_cove_pmic.h>
+#include <linux/regulator/machine.h>
+
+#include <asm/intel-mid.h>
+
+/***********VPROG1 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog1_consumer[] = {
+};
+static struct regulator_init_data vprog1_data = {
+	.constraints = {
+		.min_uV			= 1500000,
+		.max_uV			= 2800000,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(vprog1_consumer),
+	.consumer_supplies	= vprog1_consumer,
+};
+
+static struct intel_pmic_info vprog1_info = {
+	.pmic_reg   = VPROG1CNT_ADDR,
+	.init_data  = &vprog1_data,
+	.table_len  = ARRAY_SIZE(VPROG1_VSEL_table),
+	.table      = VPROG1_VSEL_table,
+};
+static struct platform_device vprog1_device = {
+	.name = "intel_regulator",
+	.id = VPROG1,
+	.dev = {
+		.platform_data = &vprog1_info,
+	},
+};
+/***********VPROG2 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog2_consumer[] = {
+};
+static struct regulator_init_data vprog2_data = {
+	.constraints = {
+		.min_uV			= 1500000,
+		.max_uV			= 2850000,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(vprog2_consumer),
+	.consumer_supplies	= vprog2_consumer,
+};
+static struct intel_pmic_info vprog2_info = {
+	.pmic_reg   = VPROG2CNT_ADDR,
+	.init_data  = &vprog2_data,
+	.table_len  = ARRAY_SIZE(VPROG2_VSEL_table),
+	.table      = VPROG2_VSEL_table,
+};
+static struct platform_device vprog2_device = {
+	.name = "intel_regulator",
+	.id = VPROG2,
+	.dev = {
+		.platform_data = &vprog2_info,
+	},
+};
+
+/***********VPROG3 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog3_consumer[] = {
+};
+static struct regulator_init_data vprog3_data = {
+	.constraints = {
+		.min_uV			= 1050000,
+		.max_uV			= 2800000,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(vprog2_consumer),
+	.consumer_supplies	= vprog3_consumer,
+};
+static struct intel_pmic_info vprog3_info = {
+	.pmic_reg   = VPROG3CNT_ADDR,
+	.init_data  = &vprog3_data,
+	.table_len  = ARRAY_SIZE(VPROG3_VSEL_table),
+	.table      = VPROG3_VSEL_table,
+};
+static struct platform_device vprog3_device = {
+	.name = "intel_regulator",
+	.id = VPROG3,
+	.dev = {
+		.platform_data = &vprog3_info,
+	},
+};
+
+static struct platform_device *regulator_devices[] __initdata = {
+	&vprog1_device,
+	&vprog2_device,
+	&vprog3_device,
+};
+
+static int __init regulator_init(void)
+{
+	/* register the regulator only if SoC is Tangier */
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+		platform_add_devices(regulator_devices,
+				ARRAY_SIZE(regulator_devices));
+
+	return 0;
+}
+device_initcall(regulator_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.c
new file mode 100644
index 0000000..e96dc14
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.c
@@ -0,0 +1,134 @@
+/*
+ * platform_mrfld_audio.c: MRFLD audio platform data initilization file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ *	Vinod Koul <vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/platform_mrfld_audio.h>
+#include "platform_msic.h"
+
+static struct mrfld_audio_platform_data mrfld_audio_pdata;
+
+void *merfld_audio_platform_data(void *info)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	pr_debug("in %s\n", __func__);
+
+	ret = add_sst_platform_device();
+	if (ret < 0) {
+		pr_err("%s failed to sst_platform device\n", __func__);
+		return NULL;
+	}
+
+	pdev = platform_device_alloc("hdmi-audio", -1);
+	if (!pdev) {
+		pr_err("failed to allocate hdmi-audio platform device\n");
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add hdmi-audio platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	/* request the gpios for audio */
+	mrfld_audio_pdata.codec_gpio = get_gpio_by_name("audiocodec_int");
+	mrfld_audio_pdata.codec_rst = get_gpio_by_name("audiocodec_rst");
+
+	pdev = platform_device_alloc("mrfld_lm49453", -1);
+	if (!pdev) {
+		pr_err("failed to allocate mrfld_lm49453 platform device\n");
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add mrfld_lm49453 platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+	if (platform_device_add_data(pdev, &mrfld_audio_pdata,
+				     sizeof(mrfld_audio_pdata))) {
+		pr_err("failed to add mrfld_lm49453 platform data\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	register_rpmsg_service("rpmsg_msic_mrfld_audio", RPROC_SCU,
+				RP_MSIC_MRFLD_AUDIO);
+
+	return NULL;
+}
+
+void *merfld_wm8958_audio_platform_data(void *info)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	ret = add_sst_platform_device();
+	if (ret < 0) {
+		pr_err("%s failed to sst_platform device\n", __func__);
+		return NULL;
+	}
+
+	pdev = platform_device_alloc("hdmi-audio", -1);
+	if (!pdev) {
+		pr_err("failed to allocate hdmi-audio platform device\n");
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add hdmi-audio platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	pdev = platform_device_alloc("mrfld_wm8958", -1);
+	if (!pdev) {
+		pr_err("failed to allocate mrfld_wm8958 platform device\n");
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add mrfld_wm8958 platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+	/* Speaker boost gpio is required on moorefield mofd_v0 PR1 phone
+	 * If its not populated get_gpio_by_name will return -1 */
+	mrfld_audio_pdata.spk_gpio = get_gpio_by_name("spkr_boost_en");
+	pr_info("Speaker boost gpio is %d\n", mrfld_audio_pdata.spk_gpio);
+	if (platform_device_add_data(pdev, &mrfld_audio_pdata,
+				     sizeof(mrfld_audio_pdata))) {
+		pr_err("failed to add mrfld_wm8958 platform data\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	register_rpmsg_service("rpmsg_mrfld_wm8958_audio", RPROC_SCU,
+				RP_MSIC_MRFLD_AUDIO);
+
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.c b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
new file mode 100644
index 0000000..a601f2c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
@@ -0,0 +1,92 @@
+/*
+ * platform_msic.c: MSIC platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+
+struct intel_msic_platform_data msic_pdata;
+
+static struct resource msic_resources[] = {
+	{
+		.start	= INTEL_MSIC_IRQ_PHYS_BASE,
+		.end	= INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device msic_device = {
+	.name		= "intel_msic",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &msic_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(msic_resources),
+	.resource	= msic_resources,
+};
+
+inline bool intel_mid_has_msic(void)
+{
+	return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
+}
+
+static int msic_scu_status_change(struct notifier_block *nb,
+				  unsigned long code, void *data)
+{
+	if (code == SCU_DOWN) {
+		platform_device_unregister(&msic_device);
+		return 0;
+	}
+
+	return platform_device_register(&msic_device);
+}
+
+static int __init msic_init(void)
+{
+	static struct notifier_block msic_scu_notifier = {
+		.notifier_call	= msic_scu_status_change,
+	};
+
+	/*
+	 * We need to be sure that the SCU IPC is ready before MSIC device
+	 * can be registered.
+	 */
+	if (intel_mid_has_msic())
+		intel_scu_notifier_add(&msic_scu_notifier);
+
+	return 0;
+}
+arch_initcall(msic_init);
+
+/*
+ * msic_generic_platform_data - sets generic platform data for the block
+ * @info: pointer to the SFI device table entry for this block
+ * @block: MSIC block
+ *
+ * Function sets IRQ number from the SFI table entry for given device to
+ * the MSIC platform data.
+ */
+void *msic_generic_platform_data(void *info, enum intel_msic_block block)
+{
+	struct sfi_device_table_entry *entry = info;
+
+	BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
+	msic_pdata.irq[block] = entry->irq;
+
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
new file mode 100644
index 0000000..6abcfc7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
@@ -0,0 +1,21 @@
+/*
+ * platform_msic.h: MSIC platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_H_
+#define _PLATFORM_MSIC_H_
+
+#include <linux/mfd/intel_msic.h>
+
+extern struct intel_msic_platform_data msic_pdata;
+
+extern void *msic_generic_platform_data(void *info,
+			enum intel_msic_block block) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.c
new file mode 100644
index 0000000..f1ed88e
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.c
@@ -0,0 +1,56 @@
+/*
+ * platform_msic_adc.c: MSIC ADC platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_adc.h"
+
+void __init *msic_adc_platform_data(void *info)
+{
+	struct platform_device *pdev = NULL;
+	struct sfi_device_table_entry *entry = info;
+	static struct intel_mid_gpadc_platform_data msic_adc_pdata;
+	int ret = 0;
+
+	pdev = platform_device_alloc(ADC_DEVICE_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+					ADC_DEVICE_NAME);
+		goto out;
+	}
+
+	msic_adc_pdata.intr = 0xffff7fc0;
+
+	pdev->dev.platform_data = &msic_adc_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+
+	install_irq_resource(pdev, entry->irq);
+
+	register_rpmsg_service("rpmsg_msic_adc", RPROC_SCU,
+				RP_MSIC_ADC);
+out:
+	return &msic_adc_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.h
new file mode 100644
index 0000000..8e1f901
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.h
@@ -0,0 +1,18 @@
+/*
+ * platform_msic_adc.h: MSIC ADC platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_ADC_H_
+#define _PLATFORM_MSIC_ADC_H_
+
+#define ADC_DEVICE_NAME "msic_adc"
+
+extern void __init *msic_adc_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
new file mode 100644
index 0000000..a8aadfb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
@@ -0,0 +1,36 @@
+/*
+ * platform_msic_audio.c: MSIC audio platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_audio.h"
+
+void *msic_audio_platform_data(void *info)
+{
+	struct platform_device *pdev;
+
+	pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
+
+	if (IS_ERR(pdev)) {
+		pr_err("failed to create audio platform device\n");
+		return NULL;
+	}
+
+	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.h
new file mode 100644
index 0000000..1fca68f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.h
@@ -0,0 +1,16 @@
+/*
+ * platform_msic_audio.h: MSIC audio platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_AUDIO_H_
+#define _PLATFORM_MSIC_AUDIO_H_
+
+extern void __init *msic_audio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
new file mode 100644
index 0000000..1b11038
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
@@ -0,0 +1,79 @@
+/*
+ * platform_msic_gpio.c: MSIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_gpio.h"
+
+void __init *msic_gpio_platform_data(void *info)
+{
+	struct platform_device *pdev = NULL;
+	struct sfi_device_table_entry *entry = info;
+	static struct intel_msic_gpio_pdata msic_gpio_pdata;
+	int ret;
+	int gpio;
+	struct resource res;
+
+	pdev = platform_device_alloc(MSIC_GPIO_DEVICE_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+					MSIC_GPIO_DEVICE_NAME);
+		return NULL;
+	}
+
+	gpio = get_gpio_by_name("msic_gpio_base");
+
+	if (gpio < 0)
+		return NULL;
+
+	/* Basincove PMIC GPIO has total 8 GPIO pins,
+	 * GPIO[5:2,0] support 1.8V, GPIO[7:6,1] support 1.8V and 3.3V,
+	 * We group GPIO[5:2] to low voltage and GPIO[7:6] to
+	 * high voltage. Because the CTL registers are contiguous,
+	 * this grouping method doesn't affect the driver usage but
+	 * easy for the driver sharing among multiple platforms.
+	 */
+	msic_gpio_pdata.ngpio_lv = 6;
+	msic_gpio_pdata.ngpio_hv = 2;
+	msic_gpio_pdata.gpio0_lv_ctlo = 0x7E;
+	msic_gpio_pdata.gpio0_lv_ctli = 0x8E;
+	msic_gpio_pdata.gpio0_hv_ctlo = 0x84;
+	msic_gpio_pdata.gpio0_hv_ctli = 0x94;
+
+	msic_gpio_pdata.can_sleep = 1;
+	msic_gpio_pdata.gpio_base = gpio;
+
+	pdev->dev.platform_data = &msic_gpio_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add msic gpio platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	res.name = "IRQ",
+	res.flags = IORESOURCE_IRQ,
+	res.start = entry->irq;
+	platform_device_add_resources(pdev, &res, 1);
+
+	register_rpmsg_service("rpmsg_msic_gpio", RPROC_SCU, RP_MSIC_GPIO);
+
+	return &msic_gpio_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.h
new file mode 100644
index 0000000..b78b236
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.h
@@ -0,0 +1,18 @@
+/*
+ * platform_msic_gpio.h: MSIC GPIO platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_GPIO_H_
+#define _PLATFORM_MSIC_GPIO_H_
+
+#define MSIC_GPIO_DEVICE_NAME "msic_gpio"
+
+extern void __init *msic_gpio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
new file mode 100644
index 0000000..1782389
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
@@ -0,0 +1,39 @@
+/*
+ * platform_msic_ocd.c: MSIC OCD platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_ocd.h"
+
+void __init *msic_ocd_platform_data(void *info)
+{
+	static struct intel_msic_ocd_pdata msic_ocd_pdata;
+	int gpio;
+
+	gpio = get_gpio_by_name("ocd_gpio");
+
+	if (gpio < 0)
+		return NULL;
+
+	msic_ocd_pdata.gpio = gpio;
+	msic_pdata.ocd = &msic_ocd_pdata;
+
+	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
+}
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.h
new file mode 100644
index 0000000..7caa13b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.h
@@ -0,0 +1,16 @@
+/*
+ * platform_msic_ocd.h: MSIC OCD platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_OCD_H_
+#define _PLATFORM_MSIC_OCD_H_
+
+extern void __init *msic_ocd_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
new file mode 100644
index 0000000..b8e1732
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
@@ -0,0 +1,70 @@
+/*
+ * platform_msic_power_btn.c: MSIC power btn platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_powerbtn.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include "platform_msic_power_btn.h"
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+static struct intel_msic_power_btn_platform_data msic_power_btn_pdata;
+
+static int moor_pb_irq_ack(struct intel_msic_power_btn_platform_data *pdata)
+{
+	intel_scu_ipc_update_register(BCOVE_PBIRQ, 0, MSIC_PWRBTNM);
+	intel_scu_ipc_update_register(BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM);
+
+	return 0;
+}
+
+void __init *msic_power_btn_platform_data(void *info)
+{
+	int ret;
+	struct platform_device *pdev;
+	struct sfi_device_table_entry *entry = info;
+	struct resource res;
+
+	pdev = platform_device_alloc(INTEL_MID_POWERBTN_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("%s(): out of memory\n", __func__);
+		return NULL;
+	}
+
+	msic_power_btn_pdata.pbstat = 0xfffff61a;
+	msic_power_btn_pdata.pb_level = (1 << 4);
+	msic_power_btn_pdata.irq_lvl1_mask = 0x0c;
+	msic_power_btn_pdata.irq_ack = moor_pb_irq_ack;
+
+	pdev->dev.platform_data = &msic_power_btn_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("%s(): platform_device_add() failed\n", __func__);
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	res.name = "IRQ",
+	res.flags = IORESOURCE_IRQ,
+	res.start = entry->irq;
+	platform_device_add_resources(pdev, &res, 1);
+
+	register_rpmsg_service("rpmsg_mid_powerbtn",
+			RPROC_SCU, RP_MSIC_POWER_BTN);
+
+	return &msic_power_btn_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.h
new file mode 100644
index 0000000..3de94ca
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.h
@@ -0,0 +1,19 @@
+/*
+ * platform_msic_power_btn.h: MSIC power btn platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_POWER_BTN_H_
+#define _PLATFORM_MSIC_POWER_BTN_H_
+
+#define INTEL_MID_POWERBTN_DEV_NAME "mid_powerbtn"
+
+extern void __init *msic_power_btn_platform_data(void *info)
+		__attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_panel.c b/arch/x86/platform/intel-mid/device_libs/platform_panel.c
new file mode 100644
index 0000000..8a22235
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_panel.c
@@ -0,0 +1,68 @@
+/*
+ * platform_panel.c: panel platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/intel-mid.h>
+#include <linux/string.h>
+#include <linux/sfi.h>
+#include <linux/panel_psb_drv.h>
+
+int PanelID = GCT_DETECT;
+EXPORT_SYMBOL(PanelID);
+
+struct support_panel_list_t {
+	enum panel_type panel_id;
+	char name[SFI_NAME_LEN];
+};
+static struct support_panel_list_t
+	support_panel_list[] = {
+		{CMI_7x12_CMD, "PANEL_CMI_CMD"},
+		{JDI_7x12_VID, "PANEL_JDI_VID"},
+		{JDI_7x12_CMD, "PANEL_JDI_CMD"},
+	/*  above 3 items will be removed
+	* after firmware changing
+	*/
+		{CMI_7x12_CMD, "PNC_CMI_7x12"},
+		{JDI_7x12_VID, "PNV_JDI_7x12"},
+		{JDI_7x12_CMD, "PNC_JDI_7x12"},
+		{SHARP_10x19_CMD, "PNC_SHARP_10x19"},
+		{SHARP_10x19_DUAL_CMD, "PNCD_SHARP_10x19"},
+		{SHARP_25x16_VID, "PNV_SHARP_25x16"},
+		{SHARP_25x16_CMD, "PNC_SHARP_25x16"},
+		{JDI_25x16_VID, "PNV_JDI_25x16"},
+		{JDI_25x16_CMD, "PNC_JDI_25x16"},
+		{SDC_16x25_CMD, "PNC_SDC_16x25"},
+		{SDC_25x16_CMD, "PNC_SDC_25x16"}
+	};
+
+#define NUM_SUPPORT_PANELS (sizeof( \
+		support_panel_list) \
+	/ sizeof(struct support_panel_list_t))
+
+void panel_handler(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev) {
+	int i;
+
+	/* JDI_7x12_CMD will be used as default panel */
+	PanelID = JDI_7x12_CMD;
+	for (i = 0; i < NUM_SUPPORT_PANELS; i++)
+		if (strncmp(pentry->name, support_panel_list[i].name,
+						SFI_NAME_LEN) == 0) {
+			PanelID = support_panel_list[i].panel_id;
+			break;
+		}
+	if (i == NUM_SUPPORT_PANELS)
+		pr_err("Could not detected this panel, set to default panel\n");
+	pr_info("Panel name = %16.16s PanelID = %d\n", pentry->name, PanelID);
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_panel.h b/arch/x86/platform/intel-mid/device_libs/platform_panel.h
new file mode 100644
index 0000000..9d6aa58
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_panel.h
@@ -0,0 +1,17 @@
+/*
+ * platform_panel.h: Panel platform library header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PANEL_H_
+#define _PLATFORM_PANEL_H_
+
+extern void panel_handler(struct sfi_device_table_entry *pentry,
+			struct devs_id *dev) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pca9574.c b/arch/x86/platform/intel-mid/device_libs/platform_pca9574.c
new file mode 100644
index 0000000..a3a2e86
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pca9574.c
@@ -0,0 +1,19 @@
+/*
+ * platform_pca9574.c: pca9574 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+void __init *nxp_pca9574_platform_data(void *info)
+{
+	pr_info("pca9574\n");
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pca9574.h b/arch/x86/platform/intel-mid/device_libs/platform_pca9574.h
new file mode 100644
index 0000000..41591ee
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pca9574.h
@@ -0,0 +1,15 @@
+/*
+ * platform_pca9574.h: pca9574 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PCA9574_H_
+#define _PLATFORM_PCA9574_H_
+extern void *nxp_pca9574_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
new file mode 100644
index 0000000..1526663
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
@@ -0,0 +1,36 @@
+/*
+ * platform_pmic_gpio.c: PMIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <linux/intel_pmic_gpio.h>
+#include "platform_pmic_gpio.h"
+
+void __init *pmic_gpio_platform_data(void *info)
+{
+	static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
+	int gpio_base = get_gpio_by_name("pmic_gpio_base");
+
+	if (gpio_base == -1)
+		gpio_base = 64;
+	pmic_gpio_pdata.gpio_base = gpio_base;
+	pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	pmic_gpio_pdata.gpiointr = 0xffffeff8;
+
+	return &pmic_gpio_pdata;
+}
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.h b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.h
new file mode 100644
index 0000000..0bce0de
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.h
@@ -0,0 +1,16 @@
+/*
+ * platform_pmic_gpio.h: PMIC GPIO platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PMIC_GPIO_H_
+#define _PLATFORM_PMIC_GPIO_H_
+
+extern void __init *pmic_gpio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pn544.c b/arch/x86/platform/intel-mid/device_libs/platform_pn544.c
new file mode 100644
index 0000000..6652224
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pn544.c
@@ -0,0 +1,114 @@
+/*
+ * platform_pn544.c: pn544 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/nfc/pn544.h>
+#include <asm/intel-mid.h>
+#include "platform_pn544.h"
+
+
+static unsigned int nfc_host_int_gpio, nfc_enable_gpio, nfc_fw_reset_gpio;
+
+static int pn544_nfc_request_resources(struct i2c_client *client)
+{
+	int ret;
+
+	ret = gpio_request(nfc_host_int_gpio, NFC_HOST_INT_GPIO);
+	if (ret) {
+		dev_err(&client->dev, "Request NFC INT GPIO fails %d\n", ret);
+		return -1;
+	}
+
+	ret = gpio_direction_input(nfc_host_int_gpio);
+	if (ret) {
+		dev_err(&client->dev, "Set GPIO Direction fails %d\n", ret);
+		goto err_int;
+	}
+
+	ret = gpio_request(nfc_enable_gpio, NFC_ENABLE_GPIO);
+	if (ret) {
+		dev_err(&client->dev,
+			"Request for NFC Enable GPIO fails %d\n", ret);
+		goto err_int;
+	}
+
+	ret = gpio_direction_output(nfc_enable_gpio, 0);
+	if (ret) {
+		dev_err(&client->dev, "Set GPIO Direction fails %d\n", ret);
+		goto err_enable;
+	}
+
+	ret = gpio_request(nfc_fw_reset_gpio, NFC_FW_RESET_GPIO);
+	if (ret) {
+		dev_err(&client->dev,
+			"Request for NFC FW Reset GPIO fails %d\n", ret);
+		goto err_enable;
+	}
+
+	ret = gpio_direction_output(nfc_fw_reset_gpio, 0);
+	if (ret) {
+		dev_err(&client->dev, "Set GPIO Direction fails %d\n", ret);
+		goto err_fw;
+	}
+
+	return 0;
+err_fw:
+	gpio_free(nfc_fw_reset_gpio);
+err_enable:
+	gpio_free(nfc_enable_gpio);
+err_int:
+	gpio_free(nfc_host_int_gpio);
+	return -1;
+}
+
+void *pn544_platform_data(void *info)
+{
+	struct i2c_board_info *i2c_info = (struct i2c_board_info *) info;
+	static struct pn544_i2c_platform_data pn544_nfc_platform_data;
+
+	memset(&pn544_nfc_platform_data, 0x00,
+		sizeof(struct pn544_i2c_platform_data));
+
+	nfc_host_int_gpio = get_gpio_by_name(NFC_HOST_INT_GPIO);
+	if (nfc_host_int_gpio == -1)
+		return NULL;
+	nfc_enable_gpio = get_gpio_by_name(NFC_ENABLE_GPIO);
+	if (nfc_enable_gpio  == -1)
+		return NULL;
+	nfc_fw_reset_gpio = get_gpio_by_name(NFC_FW_RESET_GPIO);
+	if (nfc_fw_reset_gpio == -1)
+		return NULL;
+
+	pn544_nfc_platform_data.irq_gpio = nfc_host_int_gpio;
+	pn544_nfc_platform_data.ven_gpio = nfc_enable_gpio;
+	pn544_nfc_platform_data.firm_gpio = nfc_fw_reset_gpio;
+	i2c_info->irq = nfc_host_int_gpio + INTEL_MID_IRQ_OFFSET;
+
+	/* On MFLD AND CLVT platforms, I2C xfers must be split
+	 * to avoid I2C FIFO underrun errors in I2C bus driver */
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_PENWELL:
+		pn544_nfc_platform_data.max_i2c_xfer_size = 31;
+		break;
+	case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+		pn544_nfc_platform_data.max_i2c_xfer_size = 255;
+		break;
+	default:
+		break;
+	}
+
+	pn544_nfc_platform_data.request_resources =
+		pn544_nfc_request_resources;
+
+	return &pn544_nfc_platform_data;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pn544.h b/arch/x86/platform/intel-mid/device_libs/platform_pn544.h
new file mode 100644
index 0000000..a5464e7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pn544.h
@@ -0,0 +1,20 @@
+/*
+ * platform_pn544.h: pn544 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PN544_H_
+#define _PLATFORM_PN544_H_
+
+/* MFLD NFC controller (PN544) platform init */
+#define NFC_HOST_INT_GPIO               "NFC-intr"
+#define NFC_ENABLE_GPIO                 "NFC-enable"
+#define NFC_FW_RESET_GPIO               "NFC-reset"
+extern void *pn544_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_r69001.c b/arch/x86/platform/intel-mid/device_libs/platform_r69001.c
new file mode 100644
index 0000000..d463def
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_r69001.c
@@ -0,0 +1,39 @@
+/*
+ * platform_r69001.c: r69001 touch platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/r69001-ts.h>
+#include <asm/intel-mid.h>
+#include "platform_r69001.h"
+
+void *r69001_platform_data(void *info)
+{
+	struct i2c_board_info *i2c_info = info;
+	static struct r69001_platform_data r69001_platform_data = {
+		.irq_type = IRQF_ONESHOT,
+		.gpio = -1,
+	};
+
+	if (!i2c_info->irq) { /* not a fast-int */
+		r69001_platform_data.gpio = get_gpio_by_name("jdi_touch_int");
+		if (r69001_platform_data.gpio == -1)
+			r69001_platform_data.gpio = 183;
+		r69001_platform_data.irq_type |= IRQ_TYPE_EDGE_FALLING;
+	}
+	return &r69001_platform_data;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_r69001.h b/arch/x86/platform/intel-mid/device_libs/platform_r69001.h
new file mode 100644
index 0000000..6e49e30
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_r69001.h
@@ -0,0 +1,16 @@
+/*
+ * platform_r69001.h: r69001 touch platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_R69001_H_
+#define _PLATFORM_R69001_H_
+
+extern void *r69001_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_rmi4.c b/arch/x86/platform/intel-mid/device_libs/platform_rmi4.c
new file mode 100644
index 0000000..c660d6a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_rmi4.c
@@ -0,0 +1,79 @@
+/*
+ * platform_rmi4.c: Synaptics rmi4 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/lnw_gpio.h>
+#include <linux/synaptics_i2c_rmi4.h>
+#include <asm/intel-mid.h>
+#include "platform_rmi4.h"
+
+void *rmi4_platform_data(void *info)
+{
+	static struct rmi4_touch_calib calib[] = {
+		/* RMI4_S3202_OGS */
+		{
+			.swap_axes = true,
+			.customer_id = 20130123,
+			.fw_name = "s3202_ogs.img",
+			.key_dev_name = "rmi4_key",
+		},
+		/* RMI4_S3202_GFF */
+		{
+			.swap_axes = false,
+			.customer_id = 20130123,
+			.fw_name = "s3202_gff.img",
+			.key_dev_name = "rmi4_key_gff",
+		},
+		/* RMI4_S3400_CGS*/
+		{
+			.swap_axes = true,
+			.customer_id = 1358954496,
+			.fw_name = "s3400_cgs.img",
+			.key_dev_name = "rmi4_key",
+		},
+		/* RMI4_S3400_IGZO*/
+		{
+			.swap_axes = true,
+			.customer_id = 1358954496,
+			.fw_name = "s3400_igzo.img",
+			.key_dev_name = "rmi4_key",
+		},
+	};
+
+	static struct rmi4_platform_data pdata = {
+		.irq_type = IRQ_TYPE_EDGE_FALLING | IRQF_ONESHOT,
+		.regulator_en = false,
+		.regulator_name = "vemmc2",
+		.calib = calib,
+	};
+
+	if (intel_mid_identify_cpu() ==
+		INTEL_MID_CPU_CHIP_TANGIER) {
+		/* on Merrifield based platform, vprog2 is being used for
+		supplying power to the touch panel. Currently regulator
+		functions are not supported so we don't enable it now
+		(it is turned on by display driver.) */
+		pdata.regulator_en = false;
+		pdata.regulator_name = "vprog2";
+		/* on Merrifield based device, FAST-IRQ, not GPIO based,
+		is dedicated for touch interrupt put invalid GPIO number */
+		pdata.int_gpio_number = -1;
+	} else
+		pdata.int_gpio_number = get_gpio_by_name("ts_int");
+
+	pdata.rst_gpio_number = get_gpio_by_name("ts_rst");
+
+	return &pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_rmi4.h b/arch/x86/platform/intel-mid/device_libs/platform_rmi4.h
new file mode 100644
index 0000000..3e9668c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_rmi4.h
@@ -0,0 +1,16 @@
+/*
+ * platform_rmi4.h: Synaptics rmi4 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_RMI4_H_
+#define _PLATFORM_RMI4_H_
+
+extern void *rmi4_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.c b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.c
new file mode 100644
index 0000000..3554b4b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.c
@@ -0,0 +1,323 @@
+/*
+ * platform_scu_flis.c: scu_flis platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Ning Li <ning.li@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_flis.h>
+#include "platform_scu_flis.h"
+
+static struct pin_mmio_flis_t ann_pin_mmio_flis_table[ANN_PIN_TABLE_SIZE] = {
+	/* gpioclk */
+	[ann_gp_clkph_0] = { writable, 0x2D00 },
+	[ann_gp_clkph_1] = { writable, 0x2D04 },
+	[ann_gp_clkph_2] = { writable, 0x2D08 },
+	[ann_gp_clkph_3] = { writable, 0x2D0C },
+	[ann_gp_clkph_4] = { writable, 0x2D10 },
+	[ann_gp_clkph_5] = { writable, 0x2D14 },
+	[ann_gp_clkph_6] = { writable, 0x2D18 },
+	[ann_osc_clk_ctrl_0] = { writable, 0x2D1C },
+	[ann_osc_clk_ctrl_1] = { writable, 0x2D20 },
+	[ann_osc_clk_out_0] = { writable, 0x2D24 },
+	[ann_osc_clk_out_1] = { writable, 0x2D28 },
+	[ann_osc_clk_out_2] = { writable, 0x2D2C },
+	[ann_osc_clk_out_3] = { writable, 0x2D30 },
+	[ann_osc_clk_out_4] = { writable, 0x2D34 },
+
+	/* gpiocsb */
+	[ann_gp_camerasb_0] = { writable, 0x2900 },
+	[ann_gp_camerasb_1] = { writable, 0x2904 },
+	[ann_gp_camerasb_2] = { writable, 0x2908 },
+	[ann_gp_camerasb_3] = { writable, 0x290C },
+	[ann_gp_camerasb_4] = { writable, 0x2910 },
+	[ann_gp_camerasb_5] = { writable, 0x2914 },
+	[ann_gp_camerasb_6] = { writable, 0x2918 },
+	[ann_gp_camerasb_7] = { writable, 0x291C },
+	[ann_gp_camerasb_8] = { writable, 0x2920 },
+	[ann_gp_camerasb_9] = { writable, 0x2924 },
+	[ann_gp_camerasb_10] = { writable, 0x2928 },
+	[ann_gp_camerasb_11] = { writable, 0x292C },
+	[ann_gp_hdmi_hpd] = { writable, 0x2930 },
+	[ann_gp_intd_dsi_te1] = { writable, 0x2934 },
+	[ann_gp_intd_dsi_te2] = { writable, 0x2938 },
+
+	/* gpioemmc */
+	[ann_emmc_0_clk] = { writable, 0x900 },
+	[ann_emmc_0_cmd] = { writable, 0x904 },
+	[ann_emmc_0_d_0] = { writable, 0x908 },
+	[ann_emmc_0_d_1] = { writable, 0x90C },
+	[ann_emmc_0_d_2] = { writable, 0x910 },
+	[ann_emmc_0_d_3] = { writable, 0x914 },
+	[ann_emmc_0_d_4] = { writable, 0x918 },
+	[ann_emmc_0_d_5] = { writable, 0x91C },
+	[ann_emmc_0_d_6] = { writable, 0x920 },
+	[ann_emmc_0_d_7] = { writable, 0x924 },
+	[ann_emmc_0_rst_b] = { writable, 0x928 },
+	[ann_emmc_0_rclk] = { writable, 0x92C },
+
+	/* gpiogpio */
+	[ann_gp_12] = { writable, 0x2500 },
+	[ann_gp_13] = { writable, 0x2504 },
+	[ann_gp_14] = { writable, 0x2508 },
+	[ann_gp_15] = { writable, 0x250C },
+	[ann_gp_16] = { writable, 0x2510 },
+	[ann_gp_17] = { writable, 0x2514 },
+	[ann_gp_18] = { writable, 0x2518 },
+	[ann_gp_19] = { writable, 0x251C },
+	[ann_gp_20] = { writable, 0x2520 },
+	[ann_gp_21] = { writable, 0x2524 },
+	[ann_gp_22] = { writable, 0x2528 },
+	[ann_gp_23] = { writable, 0x252C },
+	[ann_gp_24] = { writable, 0x2530 },
+	[ann_gp_25] = { writable, 0x2534 },
+	[ann_gp_26] = { writable, 0x2538 },
+	[ann_gp_27] = { writable, 0x253C },
+	[ann_gp_28] = { writable, 0x2540 },
+	[ann_gp_29] = { writable, 0x2544 },
+	[ann_gp_30] = { writable, 0x2548 },
+	[ann_gp_31] = { writable, 0x254C },
+
+	/* gpiohsi */
+	[ann_mhsi_acdata] = { writable, 0x1100 },
+	[ann_mhsi_acflag] = { writable, 0x1104 },
+	[ann_mhsi_acready] = { writable, 0x1108 },
+	[ann_mhsi_acwake] = { writable, 0x110C },
+	[ann_mhsi_cadata] = { writable, 0x1110 },
+	[ann_mhsi_caflag] = { writable, 0x1114 },
+	[ann_mhsi_caready] = { writable, 0x1118 },
+	[ann_mhsi_cawake] = { writable, 0x111C },
+
+	/* gpioi2c */
+	[ann_i2c_0_scl] = { writable, 0x1D00 },
+	[ann_i2c_0_sda] = { writable, 0x1D04 },
+	[ann_gp_i2c_1_scl] = { writable, 0x1D08 },
+	[ann_gp_i2c_1_sda] = { writable, 0x1D0C },
+	[ann_gp_i2c_2_scl] = { writable, 0x1D10 },
+	[ann_gp_i2c_2_sda] = { writable, 0x1D14 },
+	[ann_gp_i2c_3_scl] = { writable, 0x1D18 },
+	[ann_gp_i2c_3_sda] = { writable, 0x1D1C },
+	[ann_gp_i2c_4_scl] = { writable, 0x1D20 },
+	[ann_gp_i2c_4_sda] = { writable, 0x1D24 },
+	[ann_gp_i2c_5_scl] = { writable, 0x1D28 },
+	[ann_gp_i2c_5_sda] = { writable, 0x1D2C },
+	[ann_gp_i2c_6_scl] = { writable, 0x1D30 },
+	[ann_gp_i2c_6_sda] = { writable, 0x1D34 },
+	[ann_gp_i2c_7_scl] = { writable, 0x1D38 },
+	[ann_gp_i2c_7_sda] = { writable, 0x1D3C },
+	[ann_i2c_8_scl] = { writable, 0x1D40 },
+	[ann_i2c_8_sda] = { writable, 0x1D44 },
+	[ann_i2c_9_scl] = { writable, 0x1D48 },
+	[ann_i2c_9_sda] = { writable, 0x1D4C },
+
+	/* gpiokbd */
+	[ann_gp_kbd_dkin_0] = { writable, 0x3500 },
+	[ann_gp_kbd_dkin_1] = { writable, 0x3504 },
+	[ann_gp_kbd_dkin_2] = { writable, 0x3508 },
+	[ann_gp_kbd_dkin_3] = { writable, 0x350C },
+	[ann_gp_kbd_mkin_0] = { writable, 0x3510 },
+	[ann_gp_kbd_mkin_1] = { writable, 0x3514 },
+	[ann_gp_kbd_mkin_2] = { writable, 0x3518 },
+	[ann_gp_kbd_mkin_3] = { writable, 0x351C },
+	[ann_gp_kbd_mkin_4] = { writable, 0x3520 },
+	[ann_gp_kbd_mkin_5] = { writable, 0x3524 },
+	[ann_gp_kbd_mkin_6] = { writable, 0x3528 },
+	[ann_gp_kbd_mkin_7] = { writable, 0x352C },
+	[ann_gp_kbd_mkout_0] = { writable, 0x3530 },
+	[ann_gp_kbd_mkout_1] = { writable, 0x3534 },
+	[ann_gp_kbd_mkout_2] = { writable, 0x3538 },
+	[ann_gp_kbd_mkout_3] = { writable, 0x353C },
+	[ann_gp_kbd_mkout_4] = { writable, 0x3540 },
+	[ann_gp_kbd_mkout_5] = { writable, 0x3544 },
+	[ann_gp_kbd_mkout_6] = { writable, 0x3548 },
+	[ann_gp_kbd_mkout_7] = { writable, 0x354C },
+
+	/* gpiopmic */
+	[ann_prochot_b] = { writable, 0x3100 },
+	[ann_resetout_b] = { writable, 0x3104 },
+	[ann_rtc_clk] = { writable, 0x3108 },
+	[ann_standby] = { writable, 0x310C },
+	[ann_svid_alert_b] = { writable, 0x3110 },
+	[ann_svid_vclk] = { writable, 0x3114 },
+	[ann_svid_vdio] = { writable, 0x3118 },
+	[ann_thermtrip_b] = { writable, 0x311C },
+	[ann_xxpmode] = { writable, 0x3120 },
+	[ann_xxprdy] = { writable, 0x3124 },
+	[ann_xxpreq_b] = { writable, 0x3128 },
+	[ann_gp_fast_int_0] = { writable, 0x312C },
+	[ann_gp_fast_int_1] = { writable, 0x3130 },
+	[ann_gp_fast_int_2] = { writable, 0x3134 },
+	[ann_gp_fast_int_3] = { writable, 0x3138 },
+
+	/* gpiopti */
+	[ann_gp_mpti_clk] = { writable, 0x3D00 },
+	[ann_gp_mpti_data_0] = { writable, 0x3D04 },
+	[ann_gp_mpti_data_1] = { writable, 0x3D08 },
+	[ann_gp_mpti_data_2] = { writable, 0x3D0C },
+	[ann_gp_mpti_data_3] = { writable, 0x3D10 },
+	[ann_gp_0] = { writable, 0x3D14 },
+	[ann_gp_1] = { writable, 0x3D18 },
+	[ann_gp_2] = { writable, 0x3D1C },
+	[ann_gp_3] = { writable, 0x3D20 },
+	[ann_gp_4] = { writable, 0x3D24 },
+	[ann_gp_5] = { writable, 0x3D28 },
+	[ann_gp_6] = { writable, 0x3D2C },
+	[ann_gp_7] = { writable, 0x3D30 },
+	[ann_gp_8] = { writable, 0x3D34 },
+	[ann_gp_9] = { writable, 0x3D38 },
+	[ann_gp_10] = { writable, 0x3D3C },
+	[ann_gp_11] = { writable, 0x3D40 },
+	[ann_jtag_tckc] = { writable, 0x3D44 },
+	[ann_jtag_tdic] = { writable, 0x3D48 },
+	[ann_jtag_tdoc] = { writable, 0x3D4C },
+	[ann_jtag_tmsc] = { writable, 0x3D50 },
+	[ann_jtag_trst_b] = { writable, 0x3D54 },
+
+	/* gpiosdio */
+	[ann_gp_sdio_0_cd_b] = { writable, 0xD00 },
+	[ann_gp_sdio_0_clk] = { writable, 0xD04 },
+	[ann_gp_sdio_0_cmd] = { writable, 0xD08 },
+	[ann_gp_sdio_0_dat_0] = { writable, 0xD0C },
+	[ann_gp_sdio_0_dat_1] = { writable, 0xD10 },
+	[ann_gp_sdio_0_dat_2] = { writable, 0xD14 },
+	[ann_gp_sdio_0_dat_3] = { writable, 0xD18 },
+	[ann_gp_sdio_0_lvl_clk_fb] = { writable, 0xD1C },
+	[ann_gp_sdio_0_lvl_cmd_dir] = { writable, 0xD20 },
+	[ann_gp_sdio_0_lvl_dat_dir] = { writable, 0xD24 },
+	[ann_gp_sdio_0_lvl_sel] = { writable, 0xD28 },
+	[ann_gp_sdio_0_powerdown_b] = { writable, 0xD2C },
+	[ann_gp_sdio_0_wp] = { writable, 0xD30 },
+	[ann_gp_sdio_1_clk] = { writable, 0xD34 },
+	[ann_gp_sdio_1_cmd] = { writable, 0xD38 },
+	[ann_gp_sdio_1_dat_0] = { writable, 0xD3C },
+	[ann_gp_sdio_1_dat_1] = { writable, 0xD40 },
+	[ann_gp_sdio_1_dat_2] = { writable, 0xD44 },
+	[ann_gp_sdio_1_dat_3] = { writable, 0xD48 },
+	[ann_gp_sdio_1_powerdown_b] = { writable, 0xD4C },
+
+	/* gpiossp */
+	[ann_gp_ssp_3_clk] = { writable, 0x1900 },
+	[ann_gp_ssp_3_fs] = { writable, 0x1904 },
+	[ann_gp_ssp_3_rxd] = { writable, 0x1908 },
+	[ann_gp_ssp_3_txd] = { writable, 0x190C },
+	[ann_gp_ssp_4_clk] = { writable, 0x1910 },
+	[ann_gp_ssp_4_fs_0] = { writable, 0x1914 },
+	[ann_gp_ssp_4_fs_1] = { writable, 0x1918 },
+	[ann_gp_ssp_4_fs_2] = { writable, 0x191C },
+	[ann_gp_ssp_4_fs_3] = { writable, 0x1920 },
+	[ann_gp_ssp_4_rxd] = { writable, 0x1924 },
+	[ann_gp_ssp_4_txd] = { writable, 0x1928 },
+	[ann_gp_ssp_5_clk] = { writable, 0x192C },
+	[ann_gp_ssp_5_fs_0] = { writable, 0x1930 },
+	[ann_gp_ssp_5_fs_1] = { writable, 0x1934 },
+	[ann_gp_ssp_5_fs_2] = { writable, 0x1938 },
+	[ann_gp_ssp_5_fs_3] = { writable, 0x193C },
+	[ann_gp_ssp_5_rxd] = { writable, 0x1940 },
+	[ann_gp_ssp_5_txd] = { writable, 0x1944 },
+	[ann_gp_ssp_6_clk] = { writable, 0x1948 },
+	[ann_gp_ssp_6_fs] = { writable, 0x194C },
+	[ann_gp_ssp_6_rxd] = { writable, 0x1950 },
+	[ann_gp_ssp_6_txd] = { writable, 0x1954 },
+
+	/* gpiosspa */
+	[ann_gp_mslim_0_bclk] = { writable, 0x1500 },
+	[ann_gp_mslim_0_bdat] = { writable, 0x1504 },
+	[ann_gp_ssp_0_clk] = { writable, 0x1508 },
+	[ann_gp_ssp_0_fs] = { writable, 0x150C },
+	[ann_gp_ssp_0_rxd] = { writable, 0x1510 },
+	[ann_gp_ssp_0_txd] = { writable, 0x1514 },
+	[ann_gp_ssp_1_clk] = { writable, 0x1518 },
+	[ann_gp_ssp_1_fs] = { writable, 0x151C },
+	[ann_gp_ssp_1_rxd] = { writable, 0x1520 },
+	[ann_gp_ssp_1_txd] = { writable, 0x1524 },
+	[ann_gp_ssp_2_clk] = { writable, 0x1528 },
+	[ann_gp_ssp_2_fs] = { writable, 0x152C },
+	[ann_gp_ssp_2_rxd] = { writable, 0x1530 },
+	[ann_gp_ssp_2_txd] = { writable, 0x1534 },
+
+	/* gpiouart */
+	[ann_gp_uart_0_cts] = { writable, 0x2100 },
+	[ann_gp_uart_0_rts] = { writable, 0x2104 },
+	[ann_gp_uart_0_rx] = { writable, 0x2108 },
+	[ann_gp_uart_0_tx] = { writable, 0x210C },
+	[ann_gp_uart_1_cts] = { writable, 0x2110 },
+	[ann_gp_uart_1_rts] = { writable, 0x2114 },
+	[ann_gp_uart_1_rx] = { writable, 0x2118 },
+	[ann_gp_uart_1_tx] = { writable, 0x211C },
+	[ann_gp_uart_2_cts] = { writable, 0x2120 },
+	[ann_gp_uart_2_rts] = { writable, 0x2124 },
+	[ann_gp_uart_2_rx] = { writable, 0x2128 },
+	[ann_gp_uart_2_tx] = { writable, 0x212C },
+	[ann_gp_32] = { writable, 0x2130 },
+	[ann_gp_33] = { writable, 0x2134 },
+	[ann_gp_34] = { writable, 0x2138 },
+	[ann_gp_35] = { writable, 0x213C },
+	[ann_gp_36] = { writable, 0x2140 },
+	[ann_gp_37] = { writable, 0x2144 },
+	[ann_gp_38] = { writable, 0x2148 },
+	[ann_gp_39] = { writable, 0x214C },
+	[ann_gp_40] = { writable, 0x2150 },
+	[ann_gp_pwm_0] = { writable, 0x2154 },
+	[ann_gp_pwm_1] = { writable, 0x2158 },
+
+	/* gpioulpi */
+	[ann_gp_ulpi_0_clk] = { writable, 0x500 },
+	[ann_gp_ulpi_0_data_0] = { writable, 0x504 },
+	[ann_gp_ulpi_0_data_1] = { writable, 0x508 },
+	[ann_gp_ulpi_0_data_2] = { writable, 0x50C },
+	[ann_gp_ulpi_0_data_3] = { writable, 0x510 },
+	[ann_gp_ulpi_0_data_4] = { writable, 0x514 },
+	[ann_gp_ulpi_0_data_5] = { writable, 0x518 },
+	[ann_gp_ulpi_0_data_6] = { writable, 0x51C },
+	[ann_gp_ulpi_0_data_7] = { writable, 0x520 },
+	[ann_gp_ulpi_0_dir] = { writable, 0x524 },
+	[ann_gp_ulpi_0_nxt] = { writable, 0x528 },
+	[ann_gp_ulpi_0_refclk] = { writable, 0x52C },
+	[ann_gp_ulpi_0_stp] = { writable, 0x530 },
+};
+
+static int __init intel_scu_flis_init(void)
+{
+	int ret;
+	struct platform_device *pdev = NULL;
+	static struct intel_scu_flis_platform_data flis_pdata;
+
+	flis_pdata.pin_t = NULL;
+	flis_pdata.pin_num = ANN_PIN_TABLE_SIZE;
+	flis_pdata.flis_base = 0xFF0C0000;
+	flis_pdata.flis_len = 0x8000;
+	flis_pdata.mmio_flis_t = ann_pin_mmio_flis_table;
+
+	pdev = platform_device_alloc(FLIS_DEVICE_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for platform dev %s\n", FLIS_DEVICE_NAME);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	pdev->dev.platform_data = &flis_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add flis platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+
+	pr_info("intel_scu_flis platform device created\n");
+out:
+	return ret;
+}
+fs_initcall(intel_scu_flis_init);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.h b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.h
new file mode 100644
index 0000000..cf48ae5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.h
@@ -0,0 +1,17 @@
+/*
+ * platform_scu_flis.h: scu_flis platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SCU_FLIS_H_
+#define _PLATFORM_SCU_FLIS_H_
+
+#define FLIS_DEVICE_NAME "intel_scu_flis"
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_scu_log.c b/arch/x86/platform/intel-mid/device_libs/platform_scu_log.c
new file mode 100644
index 0000000..ffd1348
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_scu_log.c
@@ -0,0 +1,39 @@
+/*
+ * platform_scu_log.c: Platform data for intel_fw_logging driver.
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/irq.h>
+#include <asm/intel-mid.h>
+
+void __init *scu_log_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	struct platform_device *pdev;
+	int ret;
+
+	pdev = platform_device_alloc(entry->name, -1);
+	if (!pdev) {
+		pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+		goto out;
+	}
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("Failed to add platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+	irq_set_status_flags(entry->irq, IRQ_NOAUTOEN);
+	install_irq_resource(pdev, entry->irq);
+out:
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_scu_log.h b/arch/x86/platform/intel-mid/device_libs/platform_scu_log.h
new file mode 100644
index 0000000..2a04523
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_scu_log.h
@@ -0,0 +1,17 @@
+/*
+ * platform_scu_log.c: Platform data for intel_fw_logging driver.
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SCU_LOG_H_
+#define _PLATFORM_SCU_LOG_H_
+
+extern void __init *scu_log_platform_data(
+	void *info) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.c
new file mode 100644
index 0000000..46b697b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.c
@@ -0,0 +1,68 @@
+/*
+ * platform_soc_thermal.c: Platform data for SoC DTS driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#define pr_fmt(fmt)  "intel_soc_thermal: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include "platform_soc_thermal.h"
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_thermal.h>
+
+static struct resource res = {
+		.flags = IORESOURCE_IRQ,
+};
+
+/* Annidale based MOFD platform for Phone FFD */
+static struct soc_throttle_data ann_mofd_soc_data[] = {
+	{
+		.power_limit = 0xbb, /* 6W */
+		.floor_freq = 0x00,
+	},
+	{
+		.power_limit = 0x41, /* 2.1W */
+		.floor_freq = 0x01,
+	},
+	{
+		.power_limit = 0x1C, /* 0.9W */
+		.floor_freq = 0x01,
+	},
+	{
+		.power_limit = 0x1C, /* 0.9W */
+		.floor_freq = 0x01,
+	},
+};
+
+void soc_thrm_device_handler(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev)
+{
+	int ret;
+	struct platform_device *pdev;
+
+	pr_info("IPC bus = %d, name = %16.16s, irq = 0x%2x\n",
+		pentry->host_num, pentry->name, pentry->irq);
+
+	res.start = pentry->irq;
+
+	pdev = platform_device_register_simple(pentry->name, -1,
+					(const struct resource *)&res, 1);
+	if (IS_ERR(pdev)) {
+		ret = PTR_ERR(pdev);
+		pr_err("platform_soc_thermal:pdev_register failed: %d\n", ret);
+	}
+
+	pdev->dev.platform_data = &ann_mofd_soc_data;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.h b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.h
new file mode 100644
index 0000000..c8ece32
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.h
@@ -0,0 +1,20 @@
+/*
+ * platform_soc_thermal.h: platform SoC thermal driver library header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SOC_THERMAL_H_
+#define _PLATFORM_SOC_THERMAL_H_
+
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+
+extern void soc_thrm_device_handler(struct sfi_device_table_entry *,
+			struct devs_id *) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_sst_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_sst_audio.c
new file mode 100644
index 0000000..323d8cc
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_sst_audio.c
@@ -0,0 +1,123 @@
+/*
+ * platform_sst_libs.c: SST platform  data initilization file
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/asound.h>
+
+static struct sst_platform_data sst_platform_pdata;
+
+static struct sst_dev_stream_map mrfld_strm_map[] = {
+	{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
+	{MERR_SALTBAY_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_AUDIO, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_AUDIO, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_VOIP, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_VOIP_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_VOIP, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VOIP_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_PROBE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 1, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 2, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 3, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 4, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 5, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 6, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 7, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 3, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 4, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 5, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 6, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 7, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_AWARE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_AWARE_OUT, SST_TASK_ID_AWARE, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_VAD, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VAD_OUT, SST_TASK_ID_AWARE, SST_DEV_MAP_IN_USE},
+};
+
+#define EQ_EFFECT_ALGO_ID 0x99
+static struct sst_dev_effects_map mrfld_effs_map[] = {
+	{
+	  {0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45},/* uuid */
+	   EQ_EFFECT_ALGO_ID,										   /* algo id */
+	  {0x00, 0x43, 0xed, 0x0b, 0xd6, 0xdd, 0xdb, 0x11, 0x34, 0x8f, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b, /* descriptor */
+	   0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45,
+	   0x12, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x45, 0x71, 0x75, 0x61,
+	   0x6c, 0x69, 0x7a, 0x65, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x6e, 0x74, 0x65,
+	   0x6c, 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	  },
+	}
+};
+
+static struct sst_dev_effects_resource_map mrfld_effs_res_map[] = {
+	{
+	 {0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45}, /* uuid */
+	  0x50, /* Flags */
+	  0x00, /* Cpu load */
+	  0x01, /* Memory Usage */
+	 }
+};
+
+static void set_mrfld_platform_config(void)
+{
+	sst_platform_pdata.pdev_strm_map = mrfld_strm_map;
+	sst_platform_pdata.strm_map_size = ARRAY_SIZE(mrfld_strm_map);
+	sst_platform_pdata.pdev_effs.effs_map = mrfld_effs_map;
+	sst_platform_pdata.pdev_effs.effs_res_map = mrfld_effs_res_map;
+	sst_platform_pdata.pdev_effs.effs_num_map = ARRAY_SIZE(mrfld_effs_map);
+}
+
+static void  populate_platform_data(void)
+{
+	set_mrfld_platform_config();
+}
+
+int add_sst_platform_device(void)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	populate_platform_data();
+
+	pdev = platform_device_alloc("sst-platform", -1);
+	if (!pdev) {
+		pr_err("failed to allocate audio platform device\n");
+		return -EINVAL;
+	}
+
+	ret = platform_device_add_data(pdev, &sst_platform_pdata,
+					sizeof(sst_platform_pdata));
+	if (ret) {
+		pr_err("failed to add sst platform data\n");
+		platform_device_put(pdev);
+		return  -EINVAL;
+	}
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add audio platform device\n");
+		platform_device_put(pdev);
+		return  -EINVAL;
+	}
+	return ret;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
new file mode 100644
index 0000000..127091d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
@@ -0,0 +1,26 @@
+/*
+ * platform_tc35876x.c: tc35876x platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c/tc35876x.h>
+#include <asm/intel-mid.h>
+#include "platform_tc35876x.h"
+
+/*tc35876x DSI_LVDS bridge chip and panel platform data*/
+void *tc35876x_platform_data(void *data)
+{
+	static struct tc35876x_platform_data pdata;
+	pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
+	pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
+	pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
+	return &pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.h b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.h
new file mode 100644
index 0000000..56a74eb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.h
@@ -0,0 +1,16 @@
+/*
+ * platform_tc35876x.h: tc35876x platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_TC35876X_H_
+#define _PLATFORM_TC35876X_H_
+
+extern void *tc35876x_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
new file mode 100644
index 0000000..98a6cd1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
@@ -0,0 +1,45 @@
+/*
+ * platform_tca6416.c: tca6416 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c/pca953x.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_tca6416.h"
+
+void *tca6416_platform_data(void *info)
+{
+	static struct pca953x_platform_data tca6416;
+	struct i2c_board_info *i2c_info = info;
+	int gpio_base, intr;
+	char base_pin_name[SFI_NAME_LEN + 1];
+	char intr_pin_name[SFI_NAME_LEN + 1];
+
+	strcpy(i2c_info->type, TCA6416_NAME);
+	strcpy(base_pin_name, TCA6416_BASE);
+	strcpy(intr_pin_name, TCA6416_INTR);
+
+	gpio_base = get_gpio_by_name(base_pin_name);
+	intr = get_gpio_by_name(intr_pin_name);
+
+	if (gpio_base == -1)
+		return NULL;
+	tca6416.gpio_base = gpio_base;
+	if (intr != -1) {
+		i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+		tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	} else {
+		i2c_info->irq = -1;
+		tca6416.irq_base = -1;
+	}
+	return &tca6416;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.h b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.h
new file mode 100644
index 0000000..69802d6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.h
@@ -0,0 +1,20 @@
+/*
+ * platform_tca6416.h: tca6416 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_TCA6416_H_
+#define _PLATFORM_TCA6416_H_
+
+#define TCA6416_NAME	"tca6416"
+#define TCA6416_BASE	"tca6416_base"
+#define TCA6416_INTR	"tca6416_int"
+
+extern void *tca6416_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wifi.c b/arch/x86/platform/intel-mid/device_libs/platform_wifi.c
new file mode 100644
index 0000000..bf2cc84
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wifi.c
@@ -0,0 +1,124 @@
+/*
+ * platform_bcm43xx.c: bcm43xx platform data initilization file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/wlan_plat.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include "pci/platform_sdhci_pci.h"
+#include "platform_wifi.h"
+
+static struct resource wifi_res[] = {
+	{
+	.name = "wlan_irq",
+	.start = -1,
+	.end = -1,
+	.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING ,
+	},
+};
+
+static struct wifi_platform_data pdata;
+
+static struct platform_device wifi_device = {
+	.name = "wlan",
+	.dev = {
+		.platform_data = &pdata,
+		},
+	.num_resources = ARRAY_SIZE(wifi_res),
+	.resource = wifi_res,
+};
+
+static const unsigned int sdhci_quirk = SDHCI_QUIRK2_NON_STD_CIS |
+		SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY;
+
+static void __init wifi_platform_data_init_sfi_fastirq(struct sfi_device_table_entry *pentry,
+						       bool should_register)
+{
+	/* If the GPIO mode was previously called, this code overloads
+	   the IRQ anyway */
+	wifi_res[0].start = wifi_res[0].end = pentry->irq;
+	wifi_res[0].flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH;
+
+	pr_info("wifi_platform_data: IRQ == %d\n", pentry->irq);
+
+	if (should_register && platform_device_register(&wifi_device) < 0)
+		pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called if SFI device WLAN is present */
+void __init wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+				       struct devs_id *dev)
+{
+	/* This is used in the driver to know if it is GPIO/FastIRQ */
+	pdata.use_fast_irq = true;
+
+	if (wifi_res[0].start == -1) {
+		pr_info("Using WiFi platform data (Fast IRQ)\n");
+
+		/* Set vendor specific SDIO quirks */
+		sdhci_pdata_set_quirks(sdhci_quirk);
+		wifi_platform_data_init_sfi_fastirq(pe, true);
+	} else {
+		pr_info("Using WiFi platform data (Fast IRQ, overloading GPIO mode set previously)\n");
+		/* We do not register platform device, as it's already been
+		   done by wifi_platform_data */
+		wifi_platform_data_init_sfi_fastirq(pe, false);
+	}
+
+}
+
+/* GPIO legacy code path */
+static void __init wifi_platform_data_init_sfi_gpio(void)
+{
+	int wifi_irq_gpio = -1;
+
+	/*Get GPIO numbers from the SFI table*/
+	wifi_irq_gpio = get_gpio_by_name(WIFI_SFI_GPIO_IRQ_NAME);
+	if (wifi_irq_gpio < 0) {
+		pr_err("%s: Unable to find " WIFI_SFI_GPIO_IRQ_NAME
+		       " WLAN-interrupt GPIO in the SFI table\n",
+		       __func__);
+		return;
+	}
+
+	wifi_res[0].start = wifi_res[0].end = wifi_irq_gpio;
+	pr_info("wifi_platform_data: GPIO == %d\n", wifi_irq_gpio);
+
+	if (platform_device_register(&wifi_device) < 0)
+		pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called from board.c */
+void __init *wifi_platform_data(void *info)
+{
+	/* When fast IRQ platform data has been called first, don't pursue */
+	if (wifi_res[0].start != -1)
+		return NULL;
+
+	pr_info("Using generic wifi platform data\n");
+
+	/* Set vendor specific SDIO quirks */
+#ifdef CONFIG_MMC_SDHCI_PCI
+	sdhci_pdata_set_quirks(sdhci_quirk);
+#endif
+
+#ifndef CONFIG_ACPI
+	/* We are SFI here, register platform device */
+	wifi_platform_data_init_sfi_gpio();
+#endif
+
+	return &wifi_device;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wifi.h b/arch/x86/platform/intel-mid/device_libs/platform_wifi.h
new file mode 100644
index 0000000..29920a7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wifi.h
@@ -0,0 +1,22 @@
+/*
+ * platform_wifi.h: WiFi platform data header file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_WIFI_H_
+#define _PLATFORM_WIFI_H_
+
+#define WIFI_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
+#define WIFI_SFI_GPIO_ENABLE_NAME "WLAN-enable"
+
+extern void __init *wifi_platform_data(void *info) __attribute__((weak));
+extern void wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+				       struct devs_id *dev) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wm8994.c b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.c
new file mode 100644
index 0000000..0c24e8b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.c
@@ -0,0 +1,210 @@
+/*
+ * platform_wm8994.c: wm8994 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "platform_wm8994.h"
+
+/***********WM89941 REGUATOR platform data*************/
+static struct regulator_consumer_supply vwm89941_consumer[] = {
+	REGULATOR_SUPPLY("DBVDD", "1-001a"),
+	REGULATOR_SUPPLY("DBVDD1", "1-001a"),
+	REGULATOR_SUPPLY("DBVDD2", "1-001a"),
+	REGULATOR_SUPPLY("DBVDD3", "1-001a"),
+	REGULATOR_SUPPLY("AVDD2", "1-001a"),
+	REGULATOR_SUPPLY("CPVDD", "1-001a"),
+};
+
+static struct regulator_init_data vwm89941_data = {
+		.constraints = {
+			.always_on = 1,
+		},
+		.num_consumer_supplies	=	ARRAY_SIZE(vwm89941_consumer),
+		.consumer_supplies	=	vwm89941_consumer,
+};
+
+static struct fixed_voltage_config vwm89941_config = {
+	.supply_name	= "VCC_1.8V_PDA",
+	.microvolts	= 1800000,
+	.gpio		= -EINVAL,
+	.init_data	= &vwm89941_data,
+};
+
+static struct platform_device vwm89941_device = {
+	.name = "reg-fixed-voltage",
+	.id = PLATFORM_DEVID_AUTO,
+	.dev = {
+		.platform_data = &vwm89941_config,
+	},
+};
+
+/***********WM89942 REGUATOR platform data*************/
+static struct regulator_consumer_supply vwm89942_consumer[] = {
+	REGULATOR_SUPPLY("SPKVDD1", "1-001a"),
+	REGULATOR_SUPPLY("SPKVDD2", "1-001a"),
+};
+
+static struct regulator_init_data vwm89942_data = {
+		.constraints = {
+			.always_on = 1,
+		},
+		.num_consumer_supplies	=	ARRAY_SIZE(vwm89942_consumer),
+		.consumer_supplies	=	vwm89942_consumer,
+};
+
+static struct fixed_voltage_config vwm89942_config = {
+	.supply_name	= "V_BAT",
+	.microvolts	= 3700000,
+	.gpio		= -EINVAL,
+	.init_data  = &vwm89942_data,
+};
+
+static struct platform_device vwm89942_device = {
+	.name = "reg-fixed-voltage",
+	.id = PLATFORM_DEVID_AUTO,
+	.dev = {
+		.platform_data = &vwm89942_config,
+	},
+};
+
+static struct platform_device *wm8958_reg_devices[] __initdata = {
+	&vwm89941_device,
+	&vwm89942_device
+};
+
+static struct regulator_consumer_supply wm8994_avdd1_supply =
+	REGULATOR_SUPPLY("AVDD1", "1-001a");
+
+static struct regulator_consumer_supply wm8994_dcvdd_supply =
+	REGULATOR_SUPPLY("DCVDD", "1-001a");
+
+static struct regulator_init_data wm8994_ldo1_data = {
+	.constraints	= {
+		.always_on	= 1,
+		.name		= "AVDD1_3.0V",
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &wm8994_avdd1_supply,
+};
+
+static struct regulator_init_data wm8994_ldo2_data = {
+	.constraints	= {
+		.always_on	= 1,
+		.name		= "DCVDD_1.0V",
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &wm8994_dcvdd_supply,
+};
+
+static struct  wm8958_custom_config custom_config = {
+	.format = 6,
+	.rate = 48000,
+	.channels = 2,
+};
+
+static struct wm8994_pdata wm8994_pdata = {
+	/* configure gpio1 function: 0x0001(Logic level input/output) */
+	.gpio_defaults[0] = 0x0003,
+	.irq_flags = IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+	/* FIXME: Below are 1811A specfic, we need to use SPID for these */
+
+	/* configure gpio3/4/5/7 function for AIF2 voice */
+	.gpio_defaults[2] = 0x8100,
+	.gpio_defaults[3] = 0x8100,
+	.gpio_defaults[4] = 0x8100,
+	.gpio_defaults[6] = 0x0100,
+	/* configure gpio8/9/10/11 function for AIF3 BT */
+	/* gpio7 is codec intr pin for GV M2 */
+	.gpio_defaults[7] = 0x0003,
+	.gpio_defaults[8] = 0x0105,
+	.gpio_defaults[9] = 0x0100,
+	.gpio_defaults[10] = 0x0100,
+	.ldo[0]	= { 0, &wm8994_ldo1_data }, /* set actual value at wm8994_platform_data() */
+	.ldo[1]	= { 0, &wm8994_ldo2_data },
+	.ldo_ena_always_driven = 1,
+
+	.mic_id_delay = 300, /*300ms delay*/
+	.micdet_delay = 500,
+	.micb_en_delay = 5000, /* Keeps MICBIAS2 high for 5sec during jack insertion/removal */
+
+	.custom_cfg = &custom_config,
+};
+
+static struct wm8994_pdata wm8994_mofd_pr_pdata = {
+	/* configure gpio1 function as irq */
+	.gpio_defaults[0] = 0x0003,
+
+	/* configure gpio 6 as output to control DMIC clock */
+	/* Pull up, Invert, CMOS, default value=1 (driven low due to invert) */
+	.gpio_defaults[5] = 0x4441,
+	/* configure gpio8/9/10/11 function for AIF3 BT */
+	/* GPIO8 => DAC3 (Rx) pin, configure it as alt fn & i/p */
+	/* GPIO9 => ADC3 (Tx) pin, configure it as alt fn & o/p */
+	/* GPIO10 => LRCLK (FS) pin, configure it as alt fn & o/p */
+	/* GPIO11 => BCLK pin, configure it as alt fn & o/p */
+	.gpio_defaults[7] = 0x1000,
+	.gpio_defaults[8] = 0x0100,
+	.gpio_defaults[9] = 0x0100,
+	.gpio_defaults[10] = 0x0100,
+	.irq_flags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+
+	.mic_id_delay = 300, /*300ms delay*/
+	.micdet_delay = 500,
+	.micb_en_delay = 5000, /* Keeps MICBIAS2 high for 5sec during jack insertion/removal */
+
+	.custom_cfg = &custom_config,
+};
+
+static int wm8994_get_irq_data(struct wm8994_pdata *pdata,
+			struct i2c_board_info *i2c_info, char *name)
+{
+	int codec_gpio;
+
+	/* alek tells me that since driver is registering a new chip
+	 * irq we need to give it a base which is unused so put
+	 * 256+192 here */
+	pdata->irq_base = (256 + 192);
+	codec_gpio = get_gpio_by_name(name);
+	if (codec_gpio < 0) {
+		pr_err("%s failed for : %d\n", __func__, codec_gpio);
+		return -EINVAL;
+	}
+	i2c_info->irq = codec_gpio + INTEL_MID_IRQ_OFFSET;
+	return codec_gpio;
+}
+
+void __init *wm8994_platform_data(void *info)
+{
+	struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
+	int irq = 0;
+	struct wm8994_pdata *pdata = &wm8994_pdata;
+
+	platform_add_devices(wm8958_reg_devices,
+			ARRAY_SIZE(wm8958_reg_devices));
+
+	pdata = &wm8994_mofd_pr_pdata;
+	if (!pdata)
+		return NULL;
+
+	irq = wm8994_get_irq_data(pdata, i2c_info, "audiocodec_int");
+	if (irq < 0)
+		return NULL;
+
+	return pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wm8994.h b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.h
new file mode 100644
index 0000000..5abead7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.h
@@ -0,0 +1,5 @@
+#ifndef _PLATFORM_WM8994_H_
+#define _PLATFORM_WM8994_H_
+
+extern void *wm8994_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
new file mode 100644
index 0000000..1daa0d6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
@@ -0,0 +1,619 @@
+/*
+ * early_printk_intel_mid.c - early consoles for Intel MID platforms
+ *
+ * Copyright (c) 2008-2010, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/*
+ * Currently we have 3 types of early printk consoles: PTI, HSU and
+ * MAX3110 SPI-UART.
+ * PTI is available for mdfld, clv and mrfld.
+ * HSU is available for mdfld, clv and mrfld. But it depends on board design.
+ * Some boards don't have HSU UART pins routed to the connector so we can't
+ * use it.
+ * Max3110 SPI-UART is a stand-alone chip with SPI interface located in the
+ * debug card. Drivers can access to this chip via Soc's SPI controller or SSP
+ * controller(working in SPI mode).
+ * Max3110 is available for mrst, mdfld, clv and mrfld. But for mrst, mdfld
+ * and clv, MAX3110 is connected to SPI controller, for mrfld, MAX3110 is
+ * connected to SSP controller.
+ */
+
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/pti.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/intel-mid.h>
+
+#define MRST_SPI_TIMEOUT		0x200000
+#define MRST_REGBASE_SPI0		0xff128000
+#define MRST_REGBASE_SPI1		0xff128400
+#define CLV_REGBASE_SPI1		0xff135000
+#define MRST_CLK_SPI0_REG		0xff11d86c
+#define MRFLD_SSP_TIMEOUT		0x200000
+#define MRFLD_REGBASE_SSP5		0xff189000
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET			0
+
+#define SPI_FRF_OFFSET			4
+#define SPI_FRF_SPI			0x0
+#define SPI_FRF_SSP			0x1
+#define SPI_FRF_MICROWIRE		0x2
+#define SPI_FRF_RESV			0x3
+
+#define SPI_MODE_OFFSET			6
+#define SPI_SCPH_OFFSET			6
+#define SPI_SCOL_OFFSET			7
+#define SPI_TMOD_OFFSET			8
+#define	SPI_TMOD_TR			0x0		/* xmit & recv */
+#define SPI_TMOD_TO			0x1		/* xmit only */
+#define SPI_TMOD_RO			0x2		/* recv only */
+#define SPI_TMOD_EPROMREAD		0x3		/* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET		10
+#define SPI_SRL_OFFSET			11
+#define SPI_CFS_OFFSET			12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK				0x7f		/* cover 7 bits */
+#define SR_BUSY				(1 << 0)
+#define SR_TF_NOT_FULL			(1 << 1)
+#define SR_TF_EMPT			(1 << 2)
+#define SR_RF_NOT_EMPT			(1 << 3)
+#define SR_RF_FULL			(1 << 4)
+#define SR_TX_ERR			(1 << 5)
+#define SR_DCOL				(1 << 6)
+
+/* SR bit fields for SSP*/
+#define SSP_SR_TF_NOT_FULL		(1 << 2)
+
+static int ssp_timing_wr; /* Tangier A0 SSP timing workaround */
+
+static unsigned int early_pti_console_channel;
+static unsigned int early_pti_control_channel;
+
+/* SPI controller registers */
+struct dw_spi_reg {
+	u32	ctrl0;
+	u32	ctrl1;
+	u32	ssienr;
+	u32	mwcr;
+	u32	ser;
+	u32	baudr;
+	u32	txfltr;
+	u32	rxfltr;
+	u32	txflr;
+	u32	rxflr;
+	u32	sr;
+	u32	imr;
+	u32	isr;
+	u32	risr;
+	u32	txoicr;
+	u32	rxoicr;
+	u32	rxuicr;
+	u32	msticr;
+	u32	icr;
+	u32	dmacr;
+	u32	dmatdlr;
+	u32	dmardlr;
+	u32	idr;
+	u32	version;
+
+	/* Currently operates as 32 bits, though only the low 16 bits matter */
+	u32	dr;
+} __packed;
+
+/* SSP controler registers */
+struct dw_ssp_reg {
+	u32 ctrl0;
+	u32 ctrl1;
+	u32 sr;
+	u32 ssitr;
+	u32 dr;
+} __packed;
+
+#define dw_readl(dw, name)		__raw_readl(&(dw)->name)
+#define dw_writel(dw, name, val)	__raw_writel((val), &(dw)->name)
+
+/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
+static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
+
+static u32 *pclk_spi0;
+/* Always contains an accessible address, start with 0 */
+static struct dw_spi_reg *pspi;
+static struct dw_ssp_reg *pssp;
+
+static struct kmsg_dumper dw_dumper;
+static int dumper_registered;
+
+static void dw_kmsg_dump(struct kmsg_dumper *dumper,
+			 enum kmsg_dump_reason reason)
+{
+	static char line[1024];
+	size_t len;
+
+	/* When run to this, we'd better re-init the HW */
+	mrst_early_console_init();
+
+	while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+		early_mrst_console.write(&early_mrst_console, line, len);
+}
+
+/* Set the ratio rate to 115200, 8n1, IRQ disabled */
+static void max3110_spi_write_config(void)
+{
+	u16 config;
+
+	config = 0xc001;
+	dw_writel(pspi, dr, config);
+}
+
+/* Translate char to a eligible word and send to max3110 */
+static void max3110_spi_write_data(char c)
+{
+	u16 data;
+
+	data = 0x8000 | c;
+	dw_writel(pspi, dr, data);
+}
+
+/* similar to max3110_spi_write_data, but via SSP controller */
+static void max3110_ssp_write_data(char c)
+{
+	u16 data;
+
+	data = 0x8000 | c;
+	dw_writel(pssp, dr, data);
+	dw_readl(pssp, dr);
+	udelay(10);
+	return;
+}
+
+void mrst_early_console_init(void)
+{
+	u32 ctrlr0 = 0;
+	u32 spi0_cdiv;
+	u32 freq; /* Freqency info only need be searched once */
+
+	/* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
+	pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+							MRST_CLK_SPI0_REG);
+	spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
+	freq = 100000000 / (spi0_cdiv + 1);
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL)
+		mrst_spi_paddr = MRST_REGBASE_SPI1;
+	else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		mrst_spi_paddr = CLV_REGBASE_SPI1;
+
+	pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+						mrst_spi_paddr);
+
+	/* Disable SPI controller */
+	dw_writel(pspi, ssienr, 0);
+
+	/* Set control param, 8 bits, transmit only mode */
+	ctrlr0 = dw_readl(pspi, ctrl0);
+
+	ctrlr0 &= 0xfcc0;
+	ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
+		      | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
+	dw_writel(pspi, ctrl0, ctrlr0);
+
+	/*
+	 * Change the spi0 clk to comply with 115200 bps, use 100000 to
+	 * calculate the clk dividor to make the clock a little slower
+	 * than real baud rate.
+	 */
+	dw_writel(pspi, baudr, freq/100000);
+
+	/* Disable all INT for early phase */
+	dw_writel(pspi, imr, 0x0);
+
+	/* Set the cs to spi-uart */
+	dw_writel(pspi, ser, 0x2);
+
+	/* Enable the HW, the last step for HW init */
+	dw_writel(pspi, ssienr, 0x1);
+
+	/* Set the default configuration */
+	max3110_spi_write_config();
+
+	/* Register the kmsg dumper */
+	if (!dumper_registered) {
+		dw_dumper.dump = dw_kmsg_dump;
+		kmsg_dump_register(&dw_dumper);
+		dumper_registered = 1;
+	}
+}
+
+/* Slave select should be called in the read/write function */
+static void early_mrst_spi_putc(char c)
+{
+	unsigned int timeout;
+	u32 sr;
+
+	timeout = MRST_SPI_TIMEOUT;
+	/* Early putc needs to make sure the TX FIFO is not full */
+	while (--timeout) {
+		sr = dw_readl(pspi, sr);
+		if (!(sr & SR_TF_NOT_FULL))
+			cpu_relax();
+		else
+			break;
+	}
+
+	if (!timeout)
+		pr_warn("MRST earlycon: timed out\n");
+	else
+		max3110_spi_write_data(c);
+}
+
+/* Early SPI only uses polling mode */
+static void early_mrst_spi_write(struct console *con, const char *str,
+				unsigned n)
+{
+	int i;
+
+	for (i = 0; i < n && *str; i++) {
+		if (*str == '\n')
+			early_mrst_spi_putc('\r');
+		early_mrst_spi_putc(*str);
+		str++;
+	}
+}
+
+struct console early_mrst_console = {
+	.name =		"earlymrst",
+	.write =	early_mrst_spi_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void mrfld_early_console_init(void)
+{
+	u32 ctrlr0 = 0;
+
+	set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRFLD_REGBASE_SSP5);
+
+	pssp = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
+			(MRFLD_REGBASE_SSP5 & (PAGE_SIZE - 1)));
+
+	ssp_timing_wr = 1;
+
+	/* mask interrupts, clear enable and set DSS config */
+	/* SSPSCLK on active transfers only */
+	dw_writel(pssp, ctrl0, 0xc12c0f);
+	dw_writel(pssp, ctrl1, 0x0);
+
+	dw_readl(pssp, sr);
+
+	/* enable port */
+	ctrlr0 = dw_readl(pssp, ctrl0);
+	ctrlr0 |= 0x80;
+	dw_writel(pssp, ctrl0, ctrlr0);
+}
+
+/* slave select should be called in the read/write function */
+static int early_mrfld_putc(char c)
+{
+	unsigned int timeout;
+	u32 sr;
+
+	timeout = MRFLD_SSP_TIMEOUT;
+	/* early putc need make sure the TX FIFO is not full*/
+	while (timeout--) {
+		sr = dw_readl(pssp, sr);
+		if (ssp_timing_wr) {
+			if (sr & 0xF00)
+				cpu_relax();
+			else
+				break;
+		} else {
+			if (!(sr & SSP_SR_TF_NOT_FULL))
+				cpu_relax();
+			else
+				break;
+		}
+	}
+
+	if (timeout == 0xffffffff) {
+		pr_info("SSP: waiting timeout\n");
+		return -1;
+	}
+
+	max3110_ssp_write_data(c);
+	return 0;
+}
+
+static void early_mrfld_write(struct console *con,
+				const char *str, unsigned n)
+{
+	int  i;
+
+	for (i = 0; i < n && *str; i++) {
+		if (*str == '\n')
+			early_mrfld_putc('\r');
+		early_mrfld_putc(*str);
+
+		str++;
+	}
+}
+
+struct console early_mrfld_console = {
+	.name =		"earlymrfld",
+	.write =	early_mrfld_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void mrfld_early_printk(const char *fmt, ...)
+{
+	char buf[512];
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	n = vscnprintf(buf, 512, fmt, ap);
+	va_end(ap);
+
+	early_mrfld_console.write(&early_mrfld_console, buf, n);
+}
+
+/*
+ * Following is the early console based on High Speed UART device.
+ */
+#define MERR_HSU_PORT_BASE	0xff010180
+#define MERR_HSU_CLK_CTL	0xff00b830
+#define MFLD_HSU_PORT_BASE	0xffa28080
+
+static void __iomem *phsu;
+
+void hsu_early_console_init(const char *s)
+{
+	unsigned long paddr, port = 0;
+	u8 lcr;
+	int *clkctl;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		paddr = MERR_HSU_PORT_BASE;
+		clkctl = (int *)set_fixmap_offset_nocache(FIX_CLOCK_CTL,
+							  MERR_HSU_CLK_CTL);
+	} else {
+		paddr = MFLD_HSU_PORT_BASE;
+		clkctl = NULL;
+	}
+
+	/*
+	 * Select the early HSU console port if specified by user in the
+	 * kernel command line.
+	 */
+	if (*s && !kstrtoul(s, 10, &port))
+		port = clamp_val(port, 0, 2);
+
+	paddr += port * 0x80;
+	phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
+
+	/* Disable FIFO */
+	writeb(0x0, phsu + UART_FCR);
+
+	/* Set to default 115200 bps, 8n1 */
+	lcr = readb(phsu + UART_LCR);
+	writeb((0x80 | lcr), phsu + UART_LCR);
+	writeb(0x01, phsu + UART_DLL);
+	writeb(0x00, phsu + UART_DLM);
+	writeb(lcr,  phsu + UART_LCR);
+	writel(0x0010, phsu + UART_ABR * 4);
+	writel(0x0010, phsu + UART_PS * 4);
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		/* detect HSU clock is 50M or 19.2M */
+		if (clkctl && *clkctl & (1 << 16))
+			writel(0x0120, phsu + UART_MUL * 4); /* for 50M */
+		else
+			writel(0x05DC, phsu + UART_MUL * 4);  /* for 19.2M */
+	} else
+		writel(0x0240, phsu + UART_MUL * 4);
+
+	writel(0x3D09, phsu + UART_DIV * 4);
+
+	writeb(0x8, phsu + UART_MCR);
+	writeb(0x7, phsu + UART_FCR);
+	writeb(0x3, phsu + UART_LCR);
+
+	/* Clear IRQ status */
+	readb(phsu + UART_LSR);
+	readb(phsu + UART_RX);
+	readb(phsu + UART_IIR);
+	readb(phsu + UART_MSR);
+
+	/* Enable FIFO */
+	writeb(0x7, phsu + UART_FCR);
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void early_hsu_putc(char ch)
+{
+	unsigned int timeout = 10000; /* 10ms */
+	u8 status;
+
+	while (--timeout) {
+		status = readb(phsu + UART_LSR);
+		if (status & BOTH_EMPTY)
+			break;
+		udelay(1);
+	}
+
+	/* Only write the char when there was no timeout */
+	if (timeout)
+		writeb(ch, phsu + UART_TX);
+}
+
+static void early_hsu_write(struct console *con, const char *str, unsigned n)
+{
+	int i;
+
+	for (i = 0; i < n && *str; i++) {
+		if (*str == '\n')
+			early_hsu_putc('\r');
+		early_hsu_putc(*str);
+		str++;
+	}
+}
+
+struct console early_hsu_console = {
+	.name =		"earlyhsu",
+	.write =	early_hsu_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void hsu_early_printk(const char *fmt, ...)
+{
+	char buf[512];
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	n = vscnprintf(buf, 512, fmt, ap);
+	va_end(ap);
+
+	early_hsu_console.write(&early_hsu_console, buf, n);
+}
+
+#define PTI_ADDRESS		0xfd800000
+#define CONTROL_FRAME_LEN 32    /* PTI control frame maximum size */
+
+static void early_pti_write_to_aperture(struct pti_masterchannel *mc,
+					 u8 *buf, int len)
+{
+	int dwordcnt, final, i;
+	u32 ptiword;
+	u8 *p ;
+	u32 pti_phys_address ;
+	u32 __iomem *aperture;
+
+	p = buf;
+
+	/*
+	   calculate the aperture offset from the base using the master and
+	   channel id's.
+	*/
+	pti_phys_address = PTI_ADDRESS +
+				(mc->master << 15) + (mc->channel << 8);
+
+	set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, pti_phys_address);
+	aperture = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
+				(pti_phys_address & (PAGE_SIZE - 1)));
+
+	dwordcnt = len >> 2;
+	final = len - (dwordcnt << 2);		/* final = trailing bytes */
+	if (final == 0 && dwordcnt != 0) {	/* always have a final dword */
+		final += 4;
+		dwordcnt--;
+	}
+
+	for (i = 0; i < dwordcnt; i++) {
+		ptiword = be32_to_cpu(*(u32 *)p);
+		p += 4;
+		iowrite32(ptiword, aperture);
+	}
+
+	aperture += PTI_LASTDWORD_DTS;	/* adding DTS signals that is EOM */
+	ptiword = 0;
+
+	for (i = 0; i < final; i++)
+		ptiword |= *p++ << (24-(8*i));
+
+	iowrite32(ptiword, aperture);
+
+	return;
+}
+
+static int pti_early_console_init(void)
+{
+	early_pti_console_channel = 0;
+	early_pti_control_channel = 0;
+	return 0;
+}
+
+static void early_pti_write(struct console *con,
+			const char *str, unsigned n)
+{
+	static struct pti_masterchannel mccontrol = {.master = 72,
+						     .channel = 0};
+	static struct pti_masterchannel mcconsole = {.master = 73,
+						     .channel = 0};
+	const char *control_format = "%3d %3d %s";
+
+	/*
+	 * Since we access the comm member in current's task_struct,
+	 * we only need to be as large as what 'comm' in that
+	 * structure is.
+	 */
+	char comm[TASK_COMM_LEN];
+	u8 control_frame[CONTROL_FRAME_LEN];
+
+	/* task information */
+	if (in_irq())
+		strncpy(comm, "hardirq", sizeof(comm));
+	else if (in_softirq())
+		strncpy(comm, "softirq", sizeof(comm));
+	else
+		strncpy(comm, current->comm, sizeof(comm));
+
+	/* Absolutely ensure our buffer is zero terminated */
+	comm[TASK_COMM_LEN-1] = 0;
+
+	mccontrol.channel = early_pti_control_channel;
+	early_pti_control_channel = (early_pti_control_channel + 1) & 0x7f;
+
+	mcconsole.channel = early_pti_console_channel;
+	early_pti_console_channel = (early_pti_console_channel + 1) & 0x7f;
+
+	snprintf(control_frame, CONTROL_FRAME_LEN, control_format,
+		mcconsole.master, mcconsole.channel, comm);
+
+	early_pti_write_to_aperture(&mccontrol, control_frame,
+					strlen(control_frame));
+	early_pti_write_to_aperture(&mcconsole, (u8 *)str, n);
+
+}
+
+struct console early_pti_console = {
+	.name =		"earlypti",
+	.early_setup =  pti_early_console_init,
+	.write =	early_pti_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void pti_early_printk(const char *fmt, ...)
+{
+	char buf[512];
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	n = vscnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	early_pti_console.write(&early_pti_console, buf, n);
+}
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
new file mode 100644
index 0000000..255688f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -0,0 +1,263 @@
+/*
+ * intel-mid.c: Intel MID platform setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ * Author: Sathyanarayanan KN(sathyanarayanan.kuppuswamy@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#define	SFI_SIG_OEM0	"OEM0"
+#define pr_fmt(fmt) "intel_mid: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+#include <asm/proto.h>
+#include "intel_mid_weak_decls.h"
+#include "intel_soc_pmu.h"
+
+/*
+ * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
+ * cmdline option x86_intel_mid_timer can be used to override the configuration
+ * to prefer one or the other.
+ * at runtime, there are basically three timer configurations:
+ * 1. per cpu apbt clock only
+ * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
+ * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
+ *
+ * by default (without cmdline option), platform code first detects cpu type
+ * to see if we are on lincroft or penwell, then set up both lapic or apbt
+ * clocks accordingly.
+ * i.e. by default, medfield uses configuration #2, moorestown uses #1.
+ * config #3 is supported but not recommended on medfield.
+ *
+ * rating and feature summary:
+ * lapic (with C3STOP) --------- 100
+ * apbt (always-on) ------------ 110
+ * lapic (always-on,ARAT) ------ 150
+ */
+__cpuinitdata enum intel_mid_timer_options intel_mid_timer_options;
+
+/* intel_mid_ops to store sub arch ops */
+struct intel_mid_ops *intel_mid_ops;
+/* getter function for sub arch ops*/
+static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
+enum intel_mid_cpu_type __intel_mid_cpu_chip;
+EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
+
+static int force_cold_boot;
+module_param(force_cold_boot, int, 0644);
+MODULE_PARM_DESC(force_cold_boot,
+		 "Set to Y to force a COLD BOOT instead of a COLD RESET "
+		 "on the next reboot system call.");
+u32 nbr_hsi_clients = 2;
+static void intel_mid_power_off(void)
+{
+	pmu_power_off();
+};
+
+#define RSTC_IO_PORT_ADDR 0xcf9
+#define RSTC_COLD_BOOT    0x8
+#define RSTC_COLD_RESET   0x4
+
+static void intel_mid_reboot(void)
+{
+	if (intel_scu_ipc_fw_update()) {
+		pr_debug("intel_scu_fw_update: IFWI upgrade failed...\n");
+	}
+
+	if (!reboot_force) {
+		/* system_state is SYSTEM_RESTART now,
+		 * polling to wait for SCU not busy.
+		 */
+		while (intel_scu_ipc_check_status())
+			udelay(10);
+	}
+
+	if (force_cold_boot)
+		outb(RSTC_COLD_BOOT, RSTC_IO_PORT_ADDR);
+	else {
+		switch (reboot_force) {
+		case REBOOT_FORCE_OFF:
+			/*  this will cause context execution error when rebooting */
+			/*  in panic, but this is the very last action we take     */
+			rpmsg_send_generic_simple_command(RP_COLD_OFF, 0);
+			break;
+		case REBOOT_FORCE_ON:
+			pr_info("***** INFO: reboot requested but forced to keep system on *****\n");
+			while(1); /* halt */
+			break;
+		case REBOOT_FORCE_COLD_RESET:
+			outb(RSTC_COLD_RESET, RSTC_IO_PORT_ADDR);
+			break;
+		case REBOOT_FORCE_COLD_BOOT:
+			outb(RSTC_COLD_BOOT, RSTC_IO_PORT_ADDR);
+			break;
+		default:
+			outb(RSTC_COLD_RESET, RSTC_IO_PORT_ADDR);
+		}
+	}
+}
+
+static unsigned long __init intel_mid_calibrate_tsc(void)
+{
+	return 0;
+}
+
+static void __init intel_mid_time_init(void)
+{
+
+#ifdef CONFIG_SFI
+	sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
+#endif
+	switch (intel_mid_timer_options) {
+	case INTEL_MID_TIMER_APBT_ONLY:
+		break;
+	case INTEL_MID_TIMER_LAPIC_APBT:
+		x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+		x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+		break;
+	default:
+		if (!boot_cpu_has(X86_FEATURE_ARAT))
+			break;
+		x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+		x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+		return;
+	}
+	/* we need at least one APB timer */
+	pre_init_apic_IRQ0();
+	apbt_time_init();
+}
+
+static void __cpuinit intel_mid_arch_setup(void)
+{
+	if (boot_cpu_data.x86 != 6) {
+		pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
+			boot_cpu_data.x86, boot_cpu_data.x86_model);
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+		return;
+	}
+	switch (boot_cpu_data.x86_model) {
+	case 0x5A:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_ANNIEDALE;
+		break;
+	case 0x27:
+	default:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+		break;
+	}
+
+	if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
+		intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
+	else {
+		intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
+		pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+	}
+
+	if (intel_mid_ops->arch_setup)
+		intel_mid_ops->arch_setup();
+}
+
+/* MID systems don't have i8042 controller */
+static int intel_mid_i8042_detect(void)
+{
+	return 0;
+}
+
+/*
+ * Moorestown does not have external NMI source nor port 0x61 to report
+ * NMI status. The possible NMI sources are from pmu as a result of NMI
+ * watchdog or lock debug. Reading io port 0x61 results in 0xff which
+ * misled NMI handler.
+ */
+static unsigned char intel_mid_get_nmi_reason(void)
+{
+	return 0;
+}
+
+/*
+ * Moorestown specific x86_init function overrides and early setup
+ * calls.
+ */
+void __init x86_intel_mid_early_setup(void)
+{
+	x86_init.resources.probe_roms = x86_init_noop;
+	x86_init.resources.reserve_resources = x86_init_noop;
+	x86_init.oem.arch_setup = intel_mid_arch_setup;
+	x86_init.timers.setup_percpu_clockev = x86_init_noop;
+	x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+	x86_init.timers.timer_init = intel_mid_time_init;
+
+	x86_init.resources.probe_roms = x86_init_noop;
+	x86_init.resources.reserve_resources = x86_init_noop;
+
+	x86_init.irqs.pre_vector_init = x86_init_noop;
+
+
+	x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
+	x86_platform.i8042_detect = intel_mid_i8042_detect;
+	x86_init.timers.wallclock_init = intel_mid_rtc_init;
+	x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
+
+	x86_init.pci.init = intel_mid_pci_init;
+	x86_init.pci.fixup_irqs = x86_init_noop;
+
+	legacy_pic = &null_legacy_pic;
+
+	pm_power_off = intel_mid_power_off;
+	machine_ops.emergency_restart  = intel_mid_reboot;
+
+	/* Avoid searching for BIOS MP tables */
+	x86_init.mpparse.find_smp_config = x86_init_noop;
+	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+	set_bit(MP_BUS_ISA, mp_bus_not_pci);
+}
+
+/*
+ * if user does not want to use per CPU apb timer, just give it a lower rating
+ * than local apic timer and skip the late per cpu timer init.
+ */
+static inline int __init setup_x86_intel_mid_timer(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	if (strcmp("apbt_only", arg) == 0)
+		intel_mid_timer_options = INTEL_MID_TIMER_APBT_ONLY;
+	else if (strcmp("lapic_and_apbt", arg) == 0)
+		intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
+	else {
+		pr_warn("X86 INTEL_MID timer option %s not recognised"
+			   " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+			   arg);
+		return -EINVAL;
+	}
+	return 0;
+}
+__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
+
diff --git a/arch/x86/platform/intel-mid/intel_mid_pcihelpers.c b/arch/x86/platform/intel-mid/intel_mid_pcihelpers.c
new file mode 100644
index 0000000..fe16e96
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_pcihelpers.c
@@ -0,0 +1,137 @@
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/intel_mid_pm.h>
+
+#include <asm/intel_mid_pcihelpers.h>
+
+/* Unified message bus read/write operation */
+static DEFINE_SPINLOCK(msgbus_lock);
+
+static struct pci_dev *pci_root;
+
+static int intel_mid_msgbus_init(void)
+{
+	pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+	if (!pci_root) {
+		pr_err("%s: Error: msgbus PCI handle NULL\n", __func__);
+		return -ENODEV;
+	}
+	return 0;
+}
+fs_initcall(intel_mid_msgbus_init);
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd)
+{
+	unsigned long irq_flags;
+	u32 data;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+	return data;
+}
+EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
+
+/*
+ * GU: this function is only used by the VISA and 'VXD' drivers.
+ */
+u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext)
+{
+	unsigned long irq_flags;
+	u32 data;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+	return data;
+}
+EXPORT_SYMBOL(intel_mid_msgbus_read32_raw_ext);
+
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
+
+/*
+ * GU: this function is only used by the VISA and 'VXD' drivers.
+ */
+void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32_raw_ext);
+
+u32 intel_mid_msgbus_read32(u8 port, u32 addr)
+{
+	unsigned long irq_flags;
+	u32 data;
+	u32 cmd;
+	u32 cmdext;
+
+	cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
+		((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+	cmdext = addr & 0xffffff00;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+
+	if (cmdext) {
+		/* This resets to 0 automatically, no need to write 0 */
+		pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+					cmdext);
+	}
+
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+	return data;
+}
+
+EXPORT_SYMBOL(intel_mid_msgbus_read32);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
+{
+	unsigned long irq_flags;
+	u32 cmd;
+	u32 cmdext;
+
+	cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
+		((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+	cmdext = addr & 0xffffff00;
+
+	spin_lock_irqsave(&msgbus_lock, irq_flags);
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+
+	if (cmdext) {
+		/* This resets to 0 automatically, no need to write 0 */
+		pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+					cmdext);
+	}
+
+	pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+	spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32);
+
+/* called only from where is later then fs_initcall */
+u32 intel_mid_soc_stepping(void)
+{
+	return pci_root->revision;
+}
+EXPORT_SYMBOL(intel_mid_soc_stepping);
diff --git a/arch/x86/platform/intel-mid/intel_mid_pstore_ram.c b/arch/x86/platform/intel-mid/intel_mid_pstore_ram.c
new file mode 100644
index 0000000..7e7e497
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_pstore_ram.c
@@ -0,0 +1,150 @@
+/*
+ * Intel mid pstore ram support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <linux/pstore_ram.h>
+#include <linux/bootmem.h>
+#include <linux/efi.h>
+#include <linux/nls.h>
+
+#define SZ_4K	0x00001000
+#define SZ_512K	0x00080000
+#define SZ_1_5M	0x00180000
+#define SZ_2_1M	0x00219000
+#define SZ_16M	0x01000000
+
+/* Board files use the following if they are ok with 16M start defaults */
+#define PSTORE_RAM_START_DEFAULT	SZ_16M
+#define PSTORE_RAM_SIZE_DEFAULT		SZ_2_1M
+
+#ifdef CONFIG_X86_32
+#define RAM_MAX_MEM (max_low_pfn << PAGE_SHIFT)
+#else
+#define RAM_MAX_MEM (1 << 28)
+#endif
+
+static struct ramoops_platform_data pstore_ram_data = {
+	.mem_size	= PSTORE_RAM_SIZE_DEFAULT,
+	.mem_address	= PSTORE_RAM_START_DEFAULT,
+	.record_size	= SZ_4K,
+	.console_size	= SZ_1_5M,
+	.pmsg_size	= SZ_512K,
+	.ftrace_size	= 2*SZ_4K,
+	.dump_oops	= 1,
+};
+
+static struct platform_device pstore_ram_dev = {
+	.name = "ramoops",
+	.dev = {
+		.platform_data = &pstore_ram_data,
+	},
+};
+
+static __initdata bool intel_mid_pstore_ram_inited;
+
+static const char EFIVAR_PSTORE_ADDR[] = "PstoreAddr";
+static const char EFIVAR_PSTORE_SIZE[] = "PstoreSize";
+
+static void uefi_set_pstore_buffer(unsigned long *addr, unsigned long *size)
+{
+	int ret;
+	wchar_t varname[sizeof(EFIVAR_PSTORE_ADDR)];
+	u32 attributes = EFI_VARIABLE_NON_VOLATILE
+		| EFI_VARIABLE_BOOTSERVICE_ACCESS
+		| EFI_VARIABLE_RUNTIME_ACCESS;
+
+	utf8s_to_utf16s(EFIVAR_PSTORE_ADDR, sizeof(EFIVAR_PSTORE_ADDR),
+			UTF16_LITTLE_ENDIAN, varname, sizeof(varname));
+	varname[sizeof(EFIVAR_PSTORE_ADDR) - 1] = 0;
+
+	ret = efivar_entry_set_safe(varname, EFI_GLOBAL_VARIABLE_GUID,
+				    attributes, true,
+				    sizeof(unsigned long), addr);
+	if (ret) {
+		pr_err("%s can't set variable %s (%d)\n",
+		       __func__, EFIVAR_PSTORE_ADDR, ret);
+		return;
+	}
+
+	utf8s_to_utf16s(EFIVAR_PSTORE_SIZE, sizeof(EFIVAR_PSTORE_SIZE),
+			UTF16_LITTLE_ENDIAN, varname, sizeof(varname));
+	varname[sizeof(EFIVAR_PSTORE_SIZE) - 1] = 0;
+
+	ret = efivar_entry_set_safe(varname, EFI_GLOBAL_VARIABLE_GUID,
+				    attributes, true,
+				    sizeof(unsigned long), size);
+	if (ret)
+		pr_err("%s can't set variable %s (%d)\n",
+		       __func__, EFIVAR_PSTORE_SIZE, ret);
+}
+
+/**
+ * intel_mid_pstore_ram_register() - device_initcall to register ramoops device
+ */
+static int __init intel_mid_pstore_ram_register(void)
+{
+	int ret;
+
+	if (!intel_mid_pstore_ram_inited)
+		return -ENODEV;
+
+	ret = platform_device_register(&pstore_ram_dev);
+	if (ret) {
+		pr_err("%s: unable to register pstore_ram device: "
+		       "start=0x%llx, size=0x%lx, ret=%d\n", __func__,
+		       (unsigned long long)pstore_ram_data.mem_address,
+		       pstore_ram_data.mem_size, ret);
+	}
+
+	if (efi_enabled(EFI_BOOT) && efi_enabled(EFI_RUNTIME_SERVICES))
+		uefi_set_pstore_buffer(&pstore_ram_data.mem_address,
+				       &pstore_ram_data.mem_size);
+
+	return ret;
+}
+device_initcall(intel_mid_pstore_ram_register);
+
+void __init pstore_ram_reserve_memory(void)
+{
+	phys_addr_t mem;
+	size_t size;
+	int ret;
+
+	size = PSTORE_RAM_SIZE_DEFAULT;
+	size = ALIGN(size, PAGE_SIZE);
+
+	mem = memblock_find_in_range(0, RAM_MAX_MEM, size, PAGE_SIZE);
+	if (!mem) {
+		pr_err("Cannot find memblock range for pstore_ram\n");
+		return;
+	}
+
+	ret = memblock_reserve(mem, size);
+	if (ret) {
+		pr_err("Failed to reserve memory from 0x%llx-0x%llx\n",
+		       (unsigned long long)mem,
+		       (unsigned long long)(mem + size - 1));
+		return;
+	}
+
+	pstore_ram_data.mem_address = mem;
+	pstore_ram_data.mem_size = size;
+
+	pr_info("reserved RAM buffer (0x%zx@0x%llx)\n",
+		size, (unsigned long long)mem);
+
+	intel_mid_pstore_ram_inited = true;
+}
diff --git a/arch/x86/platform/intel-mid/intel_mid_scu.c b/arch/x86/platform/intel-mid/intel_mid_scu.c
new file mode 100644
index 0000000..a8e633c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_scu.c
@@ -0,0 +1,92 @@
+/*
+ * intel_mid_scu.c: Intel MID SCU platform initialization code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/irq.h>
+#include <linux/rpmsg.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+struct rpmsg_ns_list nslist = {
+	.list = LIST_HEAD_INIT(nslist.list),
+	.lock = __MUTEX_INITIALIZER(nslist.lock),
+};
+
+static struct intel_mid_rproc_pdata intel_scu_pdata = {
+	.name		= "intel_rproc_scu",
+	.firmware	= "intel_mid/intel_mid_remoteproc.fw",
+	.nslist		= &nslist,
+};
+
+static u64 intel_scu_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device intel_scu_device = {
+	.name		= "intel_rproc_scu",
+	.id		= -1,
+	.dev		= {
+		.platform_data = &intel_scu_pdata,
+		.dma_mask = &intel_scu_dmamask,
+	},
+};
+
+void register_rpmsg_service(char *name, int id, u32 addr)
+{
+	struct rpmsg_ns_info *info;
+	info = rpmsg_ns_alloc(name, id, addr);
+	rpmsg_ns_add_to_list(info, &nslist);
+}
+
+int intel_mid_rproc_init(void)
+{
+	int err;
+
+	/* generic rpmsg channels */
+	register_rpmsg_service("rpmsg_ipc_command", RPROC_SCU, RP_IPC_COMMAND);
+	register_rpmsg_service("rpmsg_ipc_simple_command",
+				RPROC_SCU, RP_IPC_SIMPLE_COMMAND);
+	register_rpmsg_service("rpmsg_ipc_raw_command",
+				RPROC_SCU, RP_IPC_RAW_COMMAND);
+
+	register_rpmsg_service("rpmsg_pmic", RPROC_SCU, RP_PMIC_ACCESS);
+	register_rpmsg_service("rpmsg_mip", RPROC_SCU, RP_MIP_ACCESS);
+	register_rpmsg_service("rpmsg_fw_update",
+					RPROC_SCU, RP_FW_ACCESS);
+	register_rpmsg_service("rpmsg_ipc_util",
+					RPROC_SCU, RP_IPC_UTIL);
+	register_rpmsg_service("rpmsg_flis", RPROC_SCU, RP_FLIS_ACCESS);
+	register_rpmsg_service("rpmsg_watchdog", RPROC_SCU, RP_SET_WATCHDOG);
+	register_rpmsg_service("rpmsg_umip", RPROC_SCU, RP_UMIP_ACCESS);
+	register_rpmsg_service("rpmsg_osip", RPROC_SCU, RP_OSIP_ACCESS);
+	register_rpmsg_service("rpmsg_vrtc", RPROC_SCU, RP_VRTC);
+	register_rpmsg_service("rpmsg_fw_logging", RPROC_SCU, RP_FW_LOGGING);
+	register_rpmsg_service("rpmsg_kpd_led", RPROC_SCU,
+				RP_MSIC_KPD_LED);
+	register_rpmsg_service("rpmsg_modem_nvram", RPROC_SCU,
+					RP_IPC_RAW_COMMAND);
+	register_rpmsg_service("rpmsg_mid_pwm", RPROC_SCU,
+				RP_MSIC_PWM);
+
+	err = platform_device_register(&intel_scu_device);
+	if (err < 0)
+		pr_err("Fail to register intel-mid-rproc platform device.\n");
+
+	return 0;
+}
+arch_initcall_sync(intel_mid_rproc_init);
diff --git a/arch/x86/platform/intel-mid/intel_mid_sfi.c b/arch/x86/platform/intel-mid/intel_mid_sfi.c
new file mode 100644
index 0000000..2904f65
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_sfi.c
@@ -0,0 +1,685 @@
+/*
+ * intel_mid_sfi.c: Intel MID SFI initialization code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Sathyanarayanan KN
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/skbuff.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/hsi/hsi.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/blkdev.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <linux/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <linux/reboot.h>
+#include "intel_mid_weak_decls.h"
+
+#define	SFI_SIG_OEM0	"OEM0"
+#define MAX_IPCDEVS	24
+#define MAX_SCU_SPI	24
+#define MAX_SCU_I2C	24
+
+static struct platform_device *ipc_devs[MAX_IPCDEVS];
+static struct spi_board_info *spi_devs[MAX_SCU_SPI];
+static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
+static struct sfi_gpio_table_entry *gpio_table;
+static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
+static int ipc_next_dev;
+static int spi_next_dev;
+static int i2c_next_dev;
+static int i2c_bus[MAX_SCU_I2C];
+static int gpio_num_entry;
+static unsigned int watchdog_irq_num = 0xff;
+static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
+int sfi_mrtc_num;
+int sfi_mtimer_num;
+
+struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+
+struct blocking_notifier_head intel_scu_notifier =
+			BLOCKING_NOTIFIER_INIT(intel_scu_notifier);
+EXPORT_SYMBOL_GPL(intel_scu_notifier);
+
+unsigned int sfi_get_watchdog_irq(void)
+{
+	return watchdog_irq_num;
+}
+
+/* parse all the mtimer info to a static mtimer array */
+int __init sfi_parse_mtmr(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_timer_table_entry *pentry;
+	struct mpc_intsrc mp_irq;
+	int totallen;
+
+	sb = (struct sfi_table_simple *)table;
+	if (!sfi_mtimer_num) {
+		sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
+					struct sfi_timer_table_entry);
+		pentry = (struct sfi_timer_table_entry *) sb->pentry;
+		totallen = sfi_mtimer_num * sizeof(*pentry);
+		memcpy(sfi_mtimer_array, pentry, totallen);
+	}
+
+	pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
+	pentry = sfi_mtimer_array;
+	for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
+		pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
+			totallen, (u32)pentry->phys_addr,
+			pentry->freq_hz, pentry->irq);
+			if (!pentry->irq)
+				continue;
+			mp_irq.type = MP_INTSRC;
+			mp_irq.irqtype = mp_INT;
+/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
+			mp_irq.irqflag = 5;
+			mp_irq.srcbus = MP_BUS_ISA;
+			mp_irq.srcbusirq = pentry->irq;	/* IRQ */
+			mp_irq.dstapic = MP_APIC_ALL;
+			mp_irq.dstirq = pentry->irq;
+			mp_save_irq(&mp_irq);
+	}
+
+	return 0;
+}
+
+struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
+{
+	int i;
+	if (hint < sfi_mtimer_num) {
+		if (!sfi_mtimer_usage[hint]) {
+			pr_debug("hint taken for timer %d irq %d\n",
+				hint, sfi_mtimer_array[hint].irq);
+			sfi_mtimer_usage[hint] = 1;
+			return &sfi_mtimer_array[hint];
+		}
+	}
+	/* take the first timer available */
+	for (i = 0; i < sfi_mtimer_num;) {
+		if (!sfi_mtimer_usage[i]) {
+			sfi_mtimer_usage[i] = 1;
+			return &sfi_mtimer_array[i];
+		}
+		i++;
+	}
+	return NULL;
+}
+
+void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
+{
+	int i;
+	for (i = 0; i < sfi_mtimer_num;) {
+		if (mtmr->irq == sfi_mtimer_array[i].irq) {
+			sfi_mtimer_usage[i] = 0;
+			return;
+		}
+		i++;
+	}
+}
+
+/* parse all the mrtc info to a global mrtc array */
+int __init sfi_parse_mrtc(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_rtc_table_entry *pentry;
+	struct mpc_intsrc mp_irq;
+
+	int totallen;
+
+	sb = (struct sfi_table_simple *)table;
+	if (!sfi_mrtc_num) {
+		sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
+						struct sfi_rtc_table_entry);
+		pentry = (struct sfi_rtc_table_entry *)sb->pentry;
+		totallen = sfi_mrtc_num * sizeof(*pentry);
+		memcpy(sfi_mrtc_array, pentry, totallen);
+	}
+
+	pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
+	pentry = sfi_mrtc_array;
+	for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
+		pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
+			totallen, (u32)pentry->phys_addr, pentry->irq);
+		mp_irq.type = MP_INTSRC;
+		mp_irq.irqtype = mp_INT;
+		mp_irq.irqflag = 0xf;	/* level trigger and active low */
+		mp_irq.srcbus = MP_BUS_ISA;
+		mp_irq.srcbusirq = pentry->irq;	/* IRQ */
+		mp_irq.dstapic = MP_APIC_ALL;
+		mp_irq.dstirq = pentry->irq;
+		mp_save_irq(&mp_irq);
+	}
+	return 0;
+}
+
+
+/*
+ * Parsing GPIO table first, since the DEVS table will need this table
+ * to map the pin name to the actual pin.
+ */
+static int __init sfi_parse_gpio(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_gpio_table_entry *pentry;
+	int num, i;
+
+	if (gpio_table)
+		return 0;
+	sb = (struct sfi_table_simple *)table;
+	num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
+	pentry = (struct sfi_gpio_table_entry *)sb->pentry;
+
+	gpio_table = (struct sfi_gpio_table_entry *)
+				kmalloc(num * sizeof(*pentry), GFP_KERNEL);
+	if (!gpio_table)
+		return -1;
+	memcpy(gpio_table, pentry, num * sizeof(*pentry));
+	gpio_num_entry = num;
+
+	pr_debug("GPIO pin info:\n");
+	for (i = 0; i < num; i++, pentry++)
+		pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s, pin = %d\n",
+			i,
+			pentry->controller_name,
+			pentry->pin_name,
+			pentry->pin_no);
+	return 0;
+}
+
+int get_gpio_by_name(const char *name)
+{
+	struct sfi_gpio_table_entry *pentry = gpio_table;
+	int i;
+
+	if (!pentry)
+		return -1;
+	for (i = 0; i < gpio_num_entry; i++, pentry++) {
+		if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
+			return pentry->pin_no;
+	}
+	return -1;
+}
+EXPORT_SYMBOL(get_gpio_by_name);
+
+void __init intel_scu_device_register(struct platform_device *pdev)
+{
+	if (ipc_next_dev == MAX_IPCDEVS)
+		pr_err("too many SCU IPC devices");
+	else
+		ipc_devs[ipc_next_dev++] = pdev;
+}
+
+static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
+{
+	struct spi_board_info *new_dev;
+
+	if (spi_next_dev == MAX_SCU_SPI) {
+		pr_err("too many SCU SPI devices");
+		return;
+	}
+
+	new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+	if (!new_dev) {
+		pr_err("failed to alloc mem for delayed spi dev %s\n",
+			sdev->modalias);
+		return;
+	}
+	memcpy(new_dev, sdev, sizeof(*sdev));
+
+	spi_devs[spi_next_dev++] = new_dev;
+}
+
+static void __init intel_scu_i2c_device_register(int bus,
+						struct i2c_board_info *idev)
+{
+	struct i2c_board_info *new_dev;
+
+	if (i2c_next_dev == MAX_SCU_I2C) {
+		pr_err("too many SCU I2C devices");
+		return;
+	}
+
+	new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
+	if (!new_dev) {
+		pr_err("failed to alloc mem for delayed i2c dev %s\n",
+			idev->type);
+		return;
+	}
+	memcpy(new_dev, idev, sizeof(*idev));
+
+	i2c_bus[i2c_next_dev] = bus;
+	i2c_devs[i2c_next_dev++] = new_dev;
+}
+
+/* Called by IPC driver */
+void intel_scu_devices_create(void)
+{
+	int i;
+
+	for (i = 0; i < ipc_next_dev; i++)
+		platform_device_add(ipc_devs[i]);
+
+	for (i = 0; i < spi_next_dev; i++)
+		spi_register_board_info(spi_devs[i], 1);
+
+	for (i = 0; i < i2c_next_dev; i++) {
+		struct i2c_adapter *adapter;
+		struct i2c_client *client;
+
+		adapter = i2c_get_adapter(i2c_bus[i]);
+		if (adapter) {
+			client = i2c_new_device(adapter, i2c_devs[i]);
+			if (!client)
+				pr_err("can't create i2c device %s\n",
+					i2c_devs[i]->type);
+		} else
+			i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
+	}
+	intel_scu_notifier_post(SCU_AVAILABLE, NULL);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_create);
+
+/* Called by IPC driver */
+void intel_scu_devices_destroy(void)
+{
+	int i;
+
+	intel_scu_notifier_post(SCU_DOWN, NULL);
+
+	for (i = 0; i < ipc_next_dev; i++)
+		platform_device_del(ipc_devs[i]);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
+
+static struct platform_device *psh_ipc;
+void intel_psh_devices_create(void)
+{
+	psh_ipc = platform_device_alloc("intel_psh_ipc", 0);
+	if (psh_ipc == NULL) {
+		pr_err("out of memory for platform device psh_ipc.\n");
+		return;
+	}
+
+	platform_device_add(psh_ipc);
+}
+EXPORT_SYMBOL_GPL(intel_psh_devices_create);
+
+void intel_psh_devices_destroy(void)
+{
+	if (psh_ipc)
+		platform_device_del(psh_ipc);
+}
+EXPORT_SYMBOL_GPL(intel_psh_devices_destroy);
+
+void __init install_irq_resource(struct platform_device *pdev, int irq)
+{
+	/* Single threaded */
+	static struct resource __initdata res = {
+		.name = "IRQ",
+		.flags = IORESOURCE_IRQ,
+	};
+	res.start = irq;
+	platform_device_add_resources(pdev, &res, 1);
+}
+
+static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct platform_device *pdev;
+	void *pdata = NULL;
+	pr_info("IPC bus, name = %16.16s, irq = 0x%2x\n",
+		pentry->name, pentry->irq);
+	pdata = dev->get_platform_data(pentry);
+	pdev = platform_device_alloc(pentry->name, 0);
+	if (pdev == NULL) {
+		pr_err("out of memory for SFI platform device '%s'.\n",
+			pentry->name);
+		return;
+	}
+	install_irq_resource(pdev, pentry->irq);
+
+	pdev->dev.platform_data = pdata;
+	intel_scu_device_register(pdev);
+}
+
+static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct spi_board_info spi_info;
+	void *pdata = NULL;
+
+	memset(&spi_info, 0, sizeof(spi_info));
+	strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
+	spi_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+	spi_info.bus_num = pentry->host_num;
+	spi_info.chip_select = pentry->addr;
+	spi_info.max_speed_hz = pentry->max_freq;
+	pr_info("SPI bus=%d, name=%16.16s, irq=0x%2x, max_freq=%d, cs=%d\n",
+		spi_info.bus_num,
+		spi_info.modalias,
+		spi_info.irq,
+		spi_info.max_speed_hz,
+		spi_info.chip_select);
+
+	pdata = dev->get_platform_data(&spi_info);
+
+	spi_info.platform_data = pdata;
+	if (dev->delay)
+		intel_scu_spi_device_register(&spi_info);
+	else
+		spi_register_board_info(&spi_info, 1);
+}
+
+static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct i2c_board_info i2c_info;
+	void *pdata = NULL;
+
+	memset(&i2c_info, 0, sizeof(i2c_info));
+	strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
+	i2c_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+	i2c_info.addr = pentry->addr;
+	pr_info("I2C bus = %d, name = %16.16s, irq = 0x%2x, addr = 0x%x\n",
+		pentry->host_num,
+		i2c_info.type,
+		i2c_info.irq,
+		i2c_info.addr);
+	pdata = dev->get_platform_data(&i2c_info);
+	i2c_info.platform_data = pdata;
+
+	if (dev->delay)
+		intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
+	else
+		i2c_register_board_info(pentry->host_num, &i2c_info, 1);
+}
+
+static void sfi_handle_hsu_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	pr_info("HSU bus = %d, name = %16.16s port = %d\n",
+		pentry->host_num,
+		pentry->name,
+		pentry->addr);
+	dev->get_platform_data(pentry);
+}
+
+static void sfi_handle_hsi_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct hsi_board_info hsi_info;
+	void *pdata = NULL;
+
+	pr_info("HSI bus = %d, name = %16.16s, port = %d\n",
+		pentry->host_num,
+		pentry->name,
+		pentry->addr);
+	memset(&hsi_info, 0, sizeof(hsi_info));
+	hsi_info.name = pentry->name;
+	hsi_info.hsi_id = pentry->host_num;
+	hsi_info.port = pentry->addr;
+
+	pdata = dev->get_platform_data(&hsi_info);
+	if (pdata) {
+		pr_info("SFI register platform data for HSI device %s\n",
+					dev->name);
+		hsi_register_board_info(pdata, 2);
+	}
+}
+
+static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct sd_board_info sd_info;
+	void *pdata = NULL;
+
+	memset(&sd_info, 0, sizeof(sd_info));
+	strncpy(sd_info.name, pentry->name, 16);
+	sd_info.bus_num = pentry->host_num;
+	sd_info.board_ref_clock = pentry->max_freq;
+	sd_info.addr = pentry->addr;
+	pr_info("SDIO bus = %d, name = %16.16s, ref_clock = %d, addr =0x%x\n",
+			sd_info.bus_num,
+			sd_info.name,
+			sd_info.board_ref_clock,
+			sd_info.addr);
+	pdata = dev->get_platform_data(&sd_info);
+	sd_info.platform_data = pdata;
+}
+
+struct devs_id __init *get_device_id(u8 type, char *name)
+{
+	struct devs_id *dev = device_ids;
+
+	if (device_ids == NULL)
+		return NULL;
+
+	while (dev->name[0]) {
+		if (dev->type == type &&
+			!strncmp(dev->name, name, SFI_NAME_LEN)) {
+			return dev;
+		}
+		dev++;
+	}
+
+	return NULL;
+}
+
+static int __init sfi_parse_devs(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_device_table_entry *pentry;
+	struct devs_id *dev = NULL;
+	int num, i;
+	int ioapic;
+	struct io_apic_irq_attr irq_attr;
+	struct sfi_device_table_entry *hsi_modem_entry = NULL;
+	struct devs_id *hsi_device = NULL;
+	void (*hsi_sfi_handler)(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev) = NULL;
+
+	sb = (struct sfi_table_simple *)table;
+	num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
+	pentry = (struct sfi_device_table_entry *)sb->pentry;
+
+	for (i = 0; i < num; i++, pentry++) {
+		int irq = pentry->irq;
+		if (irq != (u8)0xff) { /* native RTE case */
+			/* these SPI2 devices are not exposed to system as PCI
+			 * devices, but they have separate RTE entry in IOAPIC
+			 * so we have to enable them one by one here
+			 */
+			ioapic = mp_find_ioapic(irq);
+			if (ioapic >= 0) {
+				irq_attr.ioapic = ioapic;
+				irq_attr.ioapic_pin = irq;
+				irq_attr.trigger = 1;
+				if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER
+					|| intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) {
+					if (!strncmp(pentry->name,
+							"r69001-ts-i2c", 13))
+						/* active low */
+						irq_attr.polarity = 1;
+					else if (!strncmp(pentry->name,
+							"synaptics_3202", 14))
+						/* active low */
+						irq_attr.polarity = 1;
+					else if (irq == 41)
+						/* fast_int_1 */
+						irq_attr.polarity = 1;
+					else
+						/* active high */
+						irq_attr.polarity = 0;
+					/* catch watchdog interrupt number */
+					if (!strncmp(pentry->name,
+							"watchdog", 8))
+						watchdog_irq_num = (unsigned int) irq;
+				} else {
+					/* PNW and CLV go with active low */
+					irq_attr.polarity = 1;
+				}
+				io_apic_set_pci_routing(NULL, irq, &irq_attr);
+			} else
+				pr_info("APIC entry not found for: name=%s, irq=%d, ioapic=%d\n",
+					pentry->name, irq, ioapic);
+		}
+		dev = get_device_id(pentry->type, pentry->name);
+
+		if ((dev == NULL) || (dev->get_platform_data == NULL))
+			continue;
+
+		if (dev->device_handler) {
+			dev->device_handler(pentry, dev);
+			if (pentry->type == SFI_DEV_TYPE_MDM)
+				hsi_modem_entry = pentry;
+			if (pentry->type == SFI_DEV_TYPE_HSI) {
+				hsi_sfi_handler = dev->device_handler;
+				hsi_device = dev;
+			}
+		} else {
+			switch (pentry->type) {
+			case SFI_DEV_TYPE_IPC:
+				sfi_handle_ipc_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_SPI:
+				sfi_handle_spi_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_I2C:
+				sfi_handle_i2c_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_SD:
+				sfi_handle_sd_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_HSI:
+				sfi_handle_hsi_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_UART:
+				sfi_handle_hsu_dev(pentry, dev);
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	if (hsi_modem_entry)
+		if (hsi_sfi_handler)
+			hsi_sfi_handler(hsi_modem_entry, hsi_device);
+
+	return 0;
+}
+
+static int __init sfi_parse_oemb(struct sfi_table_header *table)
+{
+	struct sfi_table_oemb *oemb;
+	u32 board_id;
+	u8 sig[SFI_SIGNATURE_SIZE + 1] = {'\0'};
+	u8 oem_id[SFI_OEM_ID_SIZE + 1] = {'\0'};
+	u8 oem_table_id[SFI_OEM_TABLE_ID_SIZE + 1] = {'\0'};
+
+	oemb = (struct sfi_table_oemb *) table;
+	if (!oemb) {
+		pr_err("%s: fail to read SFI OEMB Layout\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	board_id = oemb->board_id | (oemb->board_fab << 4);
+
+	snprintf(sig, (SFI_SIGNATURE_SIZE + 1), "%s", oemb->header.sig);
+	snprintf(oem_id, (SFI_OEM_ID_SIZE + 1), "%s", oemb->header.oem_id);
+	snprintf(oem_table_id, (SFI_OEM_TABLE_ID_SIZE + 1), "%s",
+		 oemb->header.oem_table_id);
+	pr_info("SFI OEMB Layout\n");
+	pr_info("\tOEMB signature               : %s\n"
+		"\tOEMB length                  : %d\n"
+		"\tOEMB revision                : %d\n"
+		"\tOEMB checksum                : 0x%X\n"
+		"\tOEMB oem_id                  : %s\n"
+		"\tOEMB oem_table_id            : %s\n"
+		"\tOEMB board_id                : 0x%02X\n"
+		"\tOEMB iafw version            : %03d.%03d\n"
+		"\tOEMB val_hooks version       : %03d.%03d\n"
+		"\tOEMB ia suppfw version       : %03d.%03d\n"
+		"\tOEMB scu runtime version     : %03d.%03d\n"
+		"\tOEMB ifwi version            : %03d.%03d\n",
+		sig,
+		oemb->header.len,
+		oemb->header.rev,
+		oemb->header.csum,
+		oem_id,
+		oem_table_id,
+		board_id,
+		oemb->iafw_major_version,
+		oemb->iafw_main_version,
+		oemb->val_hooks_major_version,
+		oemb->val_hooks_minor_version,
+		oemb->ia_suppfw_major_version,
+		oemb->ia_suppfw_minor_version,
+		oemb->scu_runtime_major_version,
+		oemb->scu_runtime_minor_version,
+		oemb->ifwi_major_version,
+		oemb->ifwi_minor_version
+		);
+	return 0;
+}
+
+/*
+ * Parsing OEM0 table.
+ */
+static struct sfi_table_header *oem0_table;
+
+static int __init sfi_parse_oem0(struct sfi_table_header *table)
+{
+	oem0_table = table;
+	return 0;
+}
+
+void *get_oem0_table(void)
+{
+	return oem0_table;
+}
+
+static int __init intel_mid_platform_init(void)
+{
+	/* Get SFI OEMB Layout */
+	sfi_table_parse(SFI_SIG_OEMB, NULL, NULL, sfi_parse_oemb);
+	sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
+	sfi_table_parse(SFI_SIG_OEM0, NULL, NULL, sfi_parse_oem0);
+	sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
+
+	return 0;
+}
+arch_initcall(intel_mid_platform_init);
diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
new file mode 100644
index 0000000..80f3edd
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
@@ -0,0 +1,169 @@
+/*
+ * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * Note:
+ * VRTC is emulated by system controller firmware, the real HW
+ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
+ * in a memory mapped IO space that is visible to the host IA
+ * processor.
+ *
+ * This driver is based on RTC CMOS driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/time.h>
+#include <asm/fixmap.h>
+
+static unsigned char __iomem *vrtc_virt_base;
+
+unsigned char vrtc_cmos_read(unsigned char reg)
+{
+	unsigned char retval;
+
+	/* vRTC's registers range from 0x0 to 0xD */
+	if (reg > 0xd || !vrtc_virt_base)
+		return 0xff;
+
+	lock_cmos_prefix(reg);
+	retval = __raw_readb(vrtc_virt_base + (reg << 2));
+	lock_cmos_suffix(reg);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(vrtc_cmos_read);
+
+void vrtc_cmos_write(unsigned char val, unsigned char reg)
+{
+	if (reg > 0xd || !vrtc_virt_base)
+		return;
+
+	lock_cmos_prefix(reg);
+	__raw_writeb(val, vrtc_virt_base + (reg << 2));
+	lock_cmos_suffix(reg);
+}
+EXPORT_SYMBOL_GPL(vrtc_cmos_write);
+
+unsigned long vrtc_get_time(void)
+{
+	u8 sec, min, hour, mday, mon;
+	unsigned long flags;
+	u32 year;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+
+	while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
+		cpu_relax();
+
+	sec = vrtc_cmos_read(RTC_SECONDS);
+	min = vrtc_cmos_read(RTC_MINUTES);
+	hour = vrtc_cmos_read(RTC_HOURS);
+	mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
+	mon = vrtc_cmos_read(RTC_MONTH);
+	year = vrtc_cmos_read(RTC_YEAR);
+
+	spin_unlock_irqrestore(&rtc_lock, flags);
+
+	/* vRTC YEAR reg contains the offset to 1972 */
+	year += 1972;
+
+	pr_info("vRTC: sec: %d min: %d hour: %d day: %d "
+		"mon: %d year: %d\n", sec, min, hour, mday, mon, year);
+
+	return mktime(year, mon, mday, hour, min, sec);
+}
+
+/* Only care about the minutes and seconds */
+int vrtc_set_mmss(unsigned long nowtime)
+{
+	int real_sec, real_min;
+	unsigned long flags;
+	int vrtc_min;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	vrtc_min = vrtc_cmos_read(RTC_MINUTES);
+
+	real_sec = nowtime % 60;
+	real_min = nowtime / 60;
+	if (((abs(real_min - vrtc_min) + 15)/30) & 1)
+		real_min += 30;
+	real_min %= 60;
+
+	vrtc_cmos_write(real_sec, RTC_SECONDS);
+	vrtc_cmos_write(real_min, RTC_MINUTES);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+
+	return 0;
+}
+
+void __init intel_mid_rtc_init(void)
+{
+	unsigned long vrtc_paddr;
+
+	sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
+
+	vrtc_paddr = sfi_mrtc_array[0].phys_addr;
+	if (!sfi_mrtc_num || !vrtc_paddr)
+		return;
+
+	vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
+								vrtc_paddr);
+	x86_platform.get_wallclock = vrtc_get_time;
+	x86_platform.set_wallclock = vrtc_set_mmss;
+}
+
+/*
+ * The Moorestown platform has a memory mapped virtual RTC device that emulates
+ * the programming interface of the RTC.
+ */
+
+static struct resource vrtc_resources[] = {
+	[0] = {
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.flags	= IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device vrtc_device = {
+	.name		= "rtc_mrst",
+	.id		= -1,
+	.resource	= vrtc_resources,
+	.num_resources	= ARRAY_SIZE(vrtc_resources),
+};
+
+/* Register the RTC device if appropriate */
+static int __init intel_mid_device_create(void)
+{
+	/* No Moorestown, no device */
+	if (!intel_mid_identify_cpu())
+		return -ENODEV;
+	/* No timer, no device */
+	if (!sfi_mrtc_num)
+		return -ENODEV;
+
+	/* iomem resource */
+	vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
+	vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
+				MRST_VRTC_MAP_SZ;
+	/* irq resource */
+	vrtc_resources[1].start = sfi_mrtc_array[0].irq;
+	vrtc_resources[1].end = sfi_mrtc_array[0].irq;
+
+	return platform_device_register(&vrtc_device);
+}
+
+module_init(intel_mid_device_create);
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
new file mode 100644
index 0000000..015bf42
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
@@ -0,0 +1,22 @@
+/*
+ * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/* __attribute__((weak)) makes these declarations overridable */
+extern struct devs_id __initconst device_ids[] __attribute__((weak));
+/* For every CPU addition a new get_<cpuname>_ops interface needs
+ * to be added.
+ */
+extern void * __init get_penwell_ops(void) __attribute__((weak));
+extern void * __init get_cloverview_ops(void) __attribute__((weak));
+extern void * __init get_tangier_ops(void) __attribute__((weak));
+extern void * __init get_anniedale_ops(void) __attribute__((weak));
diff --git a/arch/x86/platform/intel-mid/intel_soc_debug.c b/arch/x86/platform/intel-mid/intel_soc_debug.c
new file mode 100644
index 0000000..d824e59
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_debug.c
@@ -0,0 +1,202 @@
+/*
+ * intel_soc_debug.c - This driver provides utility debug api's
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_soc_debug.h>
+
+/* This module currently only supports Intel Tangier
+ * and Anniedale SOCs (CONFIG_INTEL_DEBUG_FEATURE will
+ * only be set in i386_mrfl_defconfig and i386_moor_defconfig).
+ * In addition, a platform check is done in soc_debug_init()
+ * to make sure that this module is only used by appropriate
+ * platforms.
+ */
+#define PGRR_BASE           0xff03a0bc
+#define MAX_MODE_NUMBER     9
+#define MAX_DEBUG_NUMBER    5
+
+static struct dentry *dfs_entry;
+
+enum pgrr_mode {
+	manufacturing_mode = 0x0F,
+	production_mode = 0x07,
+	intel_production_mode = 0x04,
+	oem_production_mode = 0x05,
+	gfx_production_mode = 0x0E,
+	end_user_mode = 0x0B,
+	intel_end_user_mode = 0x08,
+	rma_mode = 0x03,
+	permanent_mode = 0x00
+};
+
+static struct debug_mode {
+	enum pgrr_mode mode;
+	u32 bitmask;
+	char *name;
+} asset_array[] = {
+	{ manufacturing_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "ManufacturingMode",
+	},
+	{ production_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "ProductionMode",
+	},
+	{ intel_production_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "IntelProductionMode",
+	},
+	{ oem_production_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "OemProductionMode",
+	},
+	{ gfx_production_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "GfxProductionMode",
+	},
+	{ intel_end_user_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "IntelEndUserMode",
+	},
+	{ end_user_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "EndUserMode",
+	},
+	{ rma_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "RmaMode",
+	},
+	{ permanent_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "PermanentMode",
+	}
+};
+
+static int debug_mode_idx; /* index in asset_array */
+
+static struct debug_feature {
+	u32 bit;
+	char *name;
+} debug_feature_array[] = {
+	{ DEBUG_FEATURE_PTI,
+	  "PTI",
+	},
+	{ DEBUG_FEATURE_RTIT,
+	  "RTIT",
+	},
+	{ DEBUG_FEATURE_LAKEMORE,
+	  "LAKERMORE",
+	},
+	{ DEBUG_FEATURE_SOCHAPS,
+	  "SOCHAPS",
+	},
+	{ DEBUG_FEATURE_USB3DFX,
+	  "USB3DFX",
+	},
+};
+
+int cpu_has_debug_feature(u32 bit)
+{
+	if (asset_array[debug_mode_idx].bitmask & bit)
+		return 1;
+
+	return  0;
+}
+EXPORT_SYMBOL(cpu_has_debug_feature);
+
+static int show_debug_feature(struct seq_file *s, void *unused)
+{
+	int i = 0;
+
+	if (debug_mode_idx >= 0 && (debug_mode_idx < MAX_MODE_NUMBER)) {
+		seq_printf(s, "Profile: %s\n",
+			   asset_array[debug_mode_idx].name);
+
+		for (i = 0; i < MAX_DEBUG_NUMBER; i++)
+			if (cpu_has_debug_feature(debug_feature_array[i].bit))
+				seq_printf(s, "%s\n",
+					   debug_feature_array[i].name);
+	}
+
+	return 0;
+}
+
+static int debug_feature_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_debug_feature, NULL);
+}
+
+static const struct file_operations debug_feature_ops = {
+	.open		= debug_feature_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+int __init soc_debug_init(void)
+{
+	u32 __iomem *pgrr;
+	int i = 0;
+	enum pgrr_mode soc_debug_setting = 0;
+
+	if ((intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) &&
+	    (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_ANNIEDALE))
+		return -EINVAL;
+
+	/* Read Policy Generator Result Register */
+	pgrr = ioremap_nocache(PGRR_BASE, sizeof(u32));
+	if (pgrr == NULL)
+		return -EFAULT;
+
+	pr_info("pgrr = %08x\n", *pgrr);
+	soc_debug_setting = *pgrr & 0x0F;
+	iounmap(pgrr);
+
+	for (i = 0; i < MAX_MODE_NUMBER; i++)
+		if (asset_array[i].mode == soc_debug_setting)
+			break;
+
+	if (i == MAX_MODE_NUMBER)
+		return -EFAULT;
+
+	debug_mode_idx = i;
+
+	dfs_entry = debugfs_create_file("debug_feature", S_IFREG | S_IRUGO,
+					NULL, NULL, &debug_feature_ops);
+
+	return 0;
+}
+arch_initcall(soc_debug_init);
+
+void __exit soc_debug_exit(void)
+{
+	debugfs_remove(dfs_entry);
+}
+module_exit(soc_debug_exit);
diff --git a/arch/x86/platform/intel-mid/intel_soc_dump.c b/arch/x86/platform/intel-mid/intel_soc_dump.c
new file mode 100644
index 0000000..2441b1c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_dump.c
@@ -0,0 +1,1586 @@
+/*
+ * intel_soc_dump.c - This driver provides a debugfs interface to read or
+ * write any registers inside the SoC. Supported access methods are:
+ * mmio, msg_bus, pci and i2c.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ * Author: Bin Gao <bin.gao@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Two files are created in debugfs root folder: dump_cmd and dump_output.
+ * Echo a dump command to the file dump_cmd, and then cat the file dump_output.
+ * Even for write command, you still have to run "cat dump_output", otherwise
+ * the data will not be really written.
+ *
+ * It works like this:
+ * $ echo "dump command" > dump_cmd
+ * $ cat dump_output
+ *
+ * I/O memory read: echo "r[1|2|4] mmio <addr> [<len>]" > dump_cmd
+ *     e.g.  echo "r mmio 0xff180000" > dump_cmd
+ *
+ * I/O memory write: echo "w[1|2|4] <addr> <val>" > dump_cmd
+ *     e.g.  echo "w mmio 0xff190000 0xf0107a08" > dump_cmd
+ *
+ * I/O port read: echo "r[1|2|4] port <port>" > dump_cmd
+ *     e.g.  echo "r port 0xcf8" > dump_cmd
+ *
+ * I/O port write: echo "w[1|2|4] <port> <val>" > dump_cmd
+ *     e.g.  echo "w4 port 0xcfc 0x80002188" > dump_cmd
+ *
+ * message bus read: echo "r msg_bus <port> <addr> [<len>]" > dump_cmd
+ *     e.g.  echo "r msg_bus 0x02 0x30" > dump_cmd
+ *
+ * message bus write: echo "w msg_bus <port> <addr> <val>" > dump_cmd
+ *     e.g.  echo "w msg_bus 0x02 0x30 0x1020003f" > dump_cmd
+ *
+ * pci config read: echo "r[1|2|4] pci <bus> <dev> <func> <reg> [<len>]" >
+ * dump_cmd
+ *     e.g.  echo "r1 pci 0 2 0 0x20" > dump_cmd
+ *
+ * pci config write: echo "w[1|2|4] pci <bus> <dev> <func> <reg> <value>" >
+ * dump_cmd
+ *     e.g.  echo "w pci 0 2 0 0x20 0x380020f3" > dump_cmd
+ *
+ * msr read: echo "r[4|8]  msr [<cpu>|all] <reg>" > dump_cmd
+ * read cab be 32bit(r4) or 64bit(r8), default is r8 (=r)
+ * cpu can be 0, 1, 2, 3, ... or all, default is all
+ *     e.g.  echo "r msr 0 0xcd" > dump_cmd
+ *     (read all cpu's msr reg 0xcd in 64bit mode)
+ *
+ * msr write: echo "w[4|8] msr [<cpu>|all] <reg> <val>" > dump_cmd
+ * write cab be 32bit(w4) or 64bit(w8), default is w8 (=w)
+ * cpu can be 0, 1, 2, 3, ... or all, default is all
+ *     e.g.  echo "w msr 1 289 0xf03090a0cc73be64" > dump_cmd
+ *     (write value 0xf03090a0cc73be64 to cpu 1's msr reg 289 in 64bit mode)
+ *
+ * i2c read:  echo "r i2c <bus> <addr>" > dump_cmd
+ *     e.g.  echo "r i2c 1 0x3e" > dump_cmd
+ *
+ * i2c write: echo "w i2c <bus> <addr> <val>" > dump_cmd
+ *      e.g.  echo "w i2c 2 0x70 0x0f" > dump_cmd
+ *
+ * SCU indirect memory read: echo "r[4] scu <addr>" > dump_cmd
+ *     e.g.  echo "r scu 0xff108194" > dump_cmd
+ *
+ * SCU indirect memory write: echo "w[4] scu <addr> <val>" > dump_cmd
+ *     e.g.  echo "w scu 0xff108194 0x03000001" > dump_cmd
+ *
+ *  SCU indirect read/write is limited to those addresses in
+ *  IndRdWrValidAddrRange array in SCU FW.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <asm/uaccess.h>
+#include <asm/intel-mid.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#define MAX_CMDLEN		96
+#define MAX_ERRLEN		255
+#define MIN_ARGS_NUM		3
+#define MAX_ARGS_NUM		8
+#define MAX_MMIO_PCI_LEN	4096
+#define MAX_MSG_BUS_LEN		64
+
+#define ACCESS_WIDTH_DEFAULT	0
+#define ACCESS_WIDTH_8BIT	1
+#define ACCESS_WIDTH_16BIT	2
+#define ACCESS_WIDTH_32BIT	4
+#define ACCESS_WIDTH_64BIT	8
+
+#define ACCESS_BUS_MMIO		1 /* I/O memory */
+#define ACCESS_BUS_PORT		2 /* I/O port */
+#define ACCESS_BUS_MSG_BUS	3 /* message bus */
+#define ACCESS_BUS_PCI		4 /* PCI bus */
+#define ACCESS_BUS_MSR		5 /* MSR registers */
+#define ACCESS_BUS_I2C		6 /* I2C bus */
+#define ACCESS_BUS_SCU_INDRW	7 /* SCU indirect read/write */
+
+#define ACCESS_DIR_READ		1
+#define ACCESS_DIR_WRITE	2
+
+#define RP_INDIRECT_READ	0x02 /* MSG_ID for indirect read via SCU */
+#define RP_INDIRECT_WRITE	0x05 /* MSG_ID for indirect write via SCU */
+
+#define SHOW_NUM_PER_LINE	(32 / access_width)
+#define LINE_WIDTH		(access_width * SHOW_NUM_PER_LINE)
+#define IS_WHITESPACE(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
+#define ADDR_RANGE(start, size, addr) \
+	((addr >= start) && (addr < (start + size)))
+
+/* mmio <--> device map */
+struct mmio_pci_map {
+	u32 start;
+	size_t size;
+	u32 pci_bus:8;
+	u32 pci_dev:8;
+	u32 pci_func:8;
+	char name[24];
+};
+
+static struct dentry *dump_cmd_dentry, *dump_output_dentry;
+static int dump_cmd_was_set;
+static char dump_cmd_buf[MAX_CMDLEN], err_buf[MAX_ERRLEN + 1];
+
+static int access_dir, access_width, access_bus, access_len;
+static u32 access_value;
+static u64 access_value_64;
+
+/* I/O memory */
+static u32 mmio_addr;
+
+/* I/O port */
+static unsigned port_addr;
+
+/* msg_bus */
+static u8 msg_bus_port;
+static u32 msg_bus_addr;
+
+/* pci */
+static u8 pci_bus, pci_dev, pci_func;
+static u16 pci_reg;
+
+/* msr */
+static int msr_cpu;
+static u32 msr_reg;
+
+/* i2c */
+static u8 i2c_bus;
+static u32 i2c_addr;
+
+/* scu */
+static u32 scu_addr;
+
+static const struct mmio_pci_map soc_pnw_map[] = {
+	{ 0xff128000, 0x400, 0, 0, 1, "SPI0" },
+	{ 0xff128400, 0x400, 0, 0, 2, "SPI1" },
+	{ 0xff128800, 0x400, 0, 2, 4, "SPI2" },
+
+	{ 0xff12a000, 0x400, 0, 0, 3, "I2C0" },
+	{ 0xff12a400, 0x400, 0, 0, 4, "I2C1" },
+	{ 0xff12a800, 0x400, 0, 0, 5, "I2C2" },
+	{ 0xff12ac00, 0x400, 0, 3, 2, "I2C3" },
+	{ 0xff12b000, 0x400, 0, 3, 3, "I2C4" },
+	{ 0xff12b400, 0x400, 0, 3, 4, "I2C5" },
+
+	{ 0xffae5800, 0x400, 0, 2, 7, "SSP0" },
+	{ 0xffae6000, 0x400, 0, 1, 4, "SSP1" },
+	{ 0xffae6400, 0x400, 0, 1, 3, "SSP2" },
+	{ 0xffaf0000, 0x800, 0, 2, 6, "LPE DMA1" },
+
+	{ 0xff0d0000, 0x10000, 0, 1, 5, "SEP SECURITY" },
+	{ 0xff11c000, 0x400, 0, 1, 7, "SCU IPC1" },
+
+	{ 0xdff00000, 0x100000, 0, 2, 0, "GVD BAR0" },
+	{ 0x40000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+	{ 0xdfec0000, 0x40000, 0, 2, 0, "GVD BAR3" },
+
+	{ 0xff11d000, 0x1000, 0, 2, 2, "PMU" },
+	{ 0xffa60000, 0x20000, 0, 2, 3, "USB OTG" },
+
+	{ 0xdf800000, 0x400000, 0, 3, 0, "ISP" },
+
+	{ 0xff12c000, 0x800, 0, 2, 1, "GPIO0" },
+	{ 0xff12c800, 0x800, 0, 3, 5, "GPIO1" },
+	{ 0xff12b800, 0x800, 0, 2, 5, "GP DMA" },
+
+	{ 0xffa58000, 0x100, 0, 4, 0, "SDIO0(HC2)" },
+	{ 0xffa5c000, 0x100, 0, 4, 1, "SDIO1(HC1a)" },
+	{ 0xffa2a000, 0x100, 0, 4, 2, "SDIO3(HC1b)" },
+	{ 0xffa50000, 0x100, 0, 1, 0, "SDIO3/eMMC0(HC0a)" },
+	{ 0xffa54000, 0x100, 0, 1, 1, "SDIO4/eMMC1(HC0b)" },
+
+	{ 0xffa28080, 0x80, 0, 5, 0, "UART0" },
+	{ 0xffa28100, 0x80, 0, 5, 1, "UART1" },
+	{ 0xffa28180, 0x80, 0, 5, 2, "UART2" },
+	{ 0xffa28400, 0x400, 0, 5, 3, "UART DMA" },
+
+	{ 0xffa2e000, 0x400, 0, 6, 0, "PTI" },
+
+	/* no address assigned:	{ 0x0, 0, 0, 6, 1, "xx" }, */
+
+	{ 0xffa29000, 0x800, 0, 6, 3, "HSI" },
+	{ 0xffa29800, 0x800, 0, 6, 4, "HSI DMA" },
+};
+
+static const struct mmio_pci_map soc_clv_map[] = {
+	{ 0xff138000, 0x400, 0, 0, 3, "I2C0" },
+	{ 0xff139000, 0x400, 0, 0, 4, "I2C1" },
+	{ 0xff13a000, 0x400, 0, 0, 5, "I2C2" },
+	{ 0xff13b000, 0x400, 0, 3, 2, "I2C3" },
+	{ 0xff13c000, 0x400, 0, 3, 3, "I2C4" },
+	{ 0xff13d000, 0x400, 0, 3, 4, "I2C5" },
+
+	{ 0xff128000, 0x400, 0, 0, 1, "SPI0/MSIC" },
+	{ 0xff135000, 0x400, 0, 0, 2, "SPI1" },
+	{ 0xff136000, 0x400, 0, 2, 4, "SPI2" },
+	/* invisible to IA: { 0xff137000, 0, -1, -1, -1, "SPI3" }, */
+
+	{ 0xffa58000, 0x100, 0, 4, 0, "SDIO0 (HC2)" },
+	{ 0xffa48000, 0x100, 0, 4, 1, "SDIO1 (HC1a)" },
+	{ 0xffa4c000, 0x100, 0, 4, 2, "SDIO2 (HC1b)" },
+	{ 0xffa50000, 0x100, 0, 1, 0, "SDIO3/eMMC0 (HC0a)" },
+	{ 0xffa54000, 0x100, 0, 1, 1, "SDIO4/eMMC1 (HC0b)" },
+
+	{ 0xff119000, 0x800, 0, 2, 1, "GPIO0" },
+	{ 0xff13f000, 0x800, 0, 3, 5, "GPIO1" },
+	{ 0xff13e000, 0x800, 0, 2, 5, "GP DMA" },
+
+	{ 0xffa20000, 0x400, 0, 2, 7, "SSP0" },
+	{ 0xffa21000, 0x400, 0, 1, 4, "SSP1" },
+	{ 0xffa22000, 0x400, 0, 1, 3, "SSP2" },
+	/* invisible to IA: { 0xffa23000, 0, -1, -1, -1, "SSP3" }, */
+
+	/* invisible to IA: { 0xffaf8000, 0, -1, -1, -1, "LPE DMA0" }, */
+	{ 0xffaf0000, 0x800, 0, 2, 6, "LPE DMA1" },
+	{ 0xffae8000, 0x1000, 0, 1, 3, "LPE SHIM" },
+	/* { 0xffae9000, 0, 0, 6, 5, "VIBRA" }, LPE SHIM BASE + 0x1000 */
+
+	{ 0xffa28080, 0x80, 0, 5, 0, "UART0" },
+	{ 0xffa28100, 0x80, 0, 5, 1, "UART1" },
+	{ 0xffa28180, 0x80, 0, 5, 2, "UART2" },
+	{ 0xffa28400, 0x400, 0, 5, 3, "UART DMA" },
+
+	{ 0xffa29000, 0x800, 0, 6, 3, "HSI" },
+	{ 0xffa2a000, 0x800, 0, 6, 4, "HSI DMA" },
+
+	{ 0xffa60000, 0x20000, 0, 2, 3, "USB OTG" },
+	{ 0xffa80000, 0x60000, 0, 6, 5, "USB SPH" },
+
+	{ 0xff0d0000, 0x10000, 0, 1, 5, "SEP SECURITY" },
+
+	{ 0xdff00000, 0x100000, 0, 2, 0, "GVD BAR0" },
+	{ 0x40000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+	{ 0xdfec0000, 0x40000, 0, 2, 0, "GVD BAR3" },
+	/* No address assigned: { 0x0, 0, 0, 6, 1, "HDMI HOTPLUG" }, */
+
+	{ 0xdf800000, 0x400000, 0, 3, 0, "ISP" },
+
+	{ 0xffa2e000, 0x400, 0, 6, 0, "PTI" },
+	{ 0xff11c000, 0x400, 0, 1, 7, "SCU IPC1" },
+	{ 0xff11d000, 0x1000, 0, 2, 2, "PMU" },
+};
+
+static const struct mmio_pci_map soc_tng_map[] = {
+	/* I2C0 is reserved for SCU<-->PMIC communication */
+	{ 0xff18b000, 0x400, 0, 8, 0, "I2C1" },
+	{ 0xff18c000, 0x400, 0, 8, 1, "I2C2" },
+	{ 0xff18d000, 0x400, 0, 8, 2, "I2C3" },
+	{ 0xff18e000, 0x400, 0, 8, 3, "I2C4" },
+	{ 0xff18f000, 0x400, 0, 9, 0, "I2C5" },
+	{ 0xff190000, 0x400, 0, 9, 1, "I2C6" },
+	{ 0xff191000, 0x400, 0, 9, 2, "I2C7" },
+
+	/* SDIO controllers number: 4 (compared to 5 of PNW/CLV) */
+	{ 0xff3fa000, 0x100, 0, 1, 2, "SDIO0 (HC2)" },
+	{ 0xff3fb000, 0x100, 0, 1, 3, "SDIO1 (HC1a)" },
+	{ 0xff3fc000, 0x100, 0, 1, 0, "SDIO3/eMMC0 (HC0a)" },
+	{ 0xff3fd000, 0x100, 0, 1, 1, "SDIO4/eMMC1 (HC0b)" },
+
+	/* GPIO0 and GPIO1 are merged to one GPIO controller in TNG */
+	{ 0xff008000, 0x1000, 0, 12, 0, "GPIO" },
+	{ 0xff192000, 0x1000, 0, 21, 0, "GP DMA" },
+
+	/* SSP Audio: SSP0: Modem, SSP1: Audio Codec, SSP2: Bluetooth */
+
+	/* LPE */
+	{ 0xff340000, 0x4000, 0, 13, 0, "LPE SHIM" },
+	{ 0xff344000, 0x1000, 0, 13, 0, "MAILBOX RAM" },
+	{ 0xff2c0000, 0x14000, 0, 13, 0, "ICCM" },
+	{ 0xff300000, 0x28000, 0, 13, 0, "DCCM" },
+	{ 0xff298000, 0x4000, 0, 14, 0, "LPE DMA0" },
+	/* invisible to IA: { 0xff29c000, 0x4000, -1, -1, -1, "LPE DMA1" }, */
+
+
+	/* SSP SC: SSP4: used by SCU for SPI Debug Card */
+	/* invisible to IA: { 0xff00e000, 0x1000, -1, -1, -1, "SSP SC" }, */
+
+	/* SSP General Purpose */
+	{ 0xff188000, 0x1000, 0, 7, 0, "SSP3" },
+	{ 0xff189000, 0x1000, 0, 7, 1, "SSP5" },
+	{ 0xff18a000, 0x1000, 0, 7, 2, "SSP6" },
+
+	/* UART */
+	{ 0xff010080, 0x80, 0, 4, 1, "UART0" },
+	{ 0xff011000, 0x80, 0, 4, 2, "UART1" },
+	{ 0xff011080, 0x80, 0, 4, 3, "UART2" },
+	{ 0xff011400, 0x400, 0, 5, 0, "UART DMA" },
+
+	/* HSI */
+	{ 0xff3f8000, 0x1000, 0, 10, 0, "HSI" },
+
+	/* USB */
+	{ 0xf9040000, 0x20000, 0, 15, 0, "USB2 OTG" },
+	{ 0xf9060000, 0x20000, 0, 16, 0, "USB2 MPH/HSIC" },
+	{ 0xf9100000, 0x100000, 0, 17, 0, "USB3 OTG" },
+	/* { 0xf90f0000, 0x1000, -1, -1, -1, "USB3 PHY" }, */
+	/* { 0xf90a0000, 0x10000, -1, -1, -1, "USB3 DMA FETCH" }, */
+
+	/* Security/Chaabi */
+	{ 0xf9030000, 0x1000, 0, 11, 0, "SEP SECURITY" },
+
+	/* Graphics/Display */
+	{ 0xc0000000, 0x2000000, 0, 2, 0, "GVD BAR0" },
+	{ 0x80000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+
+	/* ISP */
+	{ 0xc2000000, 0x400000, 0, 3, 0, "ISP" },
+
+	/* PTI */
+	{ 0xf9009000, 0x1000, 0, 18, 0, "PTI STM" },
+	{ 0xf90a0000, 0x10000, 0, 18, 0, "PTI USB3 DMA FETCH" },
+	{ 0xfa000000, 0x1000000, 0, 18, 0, "PTI APERTURE A" },
+
+	{ 0xff009000, 0x1000, 0, 19, 0, "SCU-IA IPC" },
+	{ 0xff00b000, 0x1000, 0, 20, 0, "PMU" },
+};
+
+static struct pci_dev *mmio_to_pci(u32 addr, char **name)
+{
+	int i, count;
+	struct mmio_pci_map *map;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) {
+		count = ARRAY_SIZE(soc_pnw_map);
+		map = (struct mmio_pci_map *) &soc_pnw_map[0];
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		count = ARRAY_SIZE(soc_clv_map);
+		map = (struct mmio_pci_map *) &soc_clv_map[0];
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		count = ARRAY_SIZE(soc_tng_map);
+		map = (struct mmio_pci_map *) &soc_tng_map[0];
+	} else {
+		return NULL;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (ADDR_RANGE(map[i].start, map[i].size, addr))
+			break;
+	}
+
+	if (i >= count)
+		return NULL;
+
+	*name = &map[i].name[0];
+	return pci_get_bus_and_slot(map[i].pci_bus,
+		PCI_DEVFN(map[i].pci_dev, map[i].pci_func));
+}
+
+static int parse_argument(char *input, char **args)
+{
+	int count, located;
+	char *p = input;
+	int input_len = strlen(input);
+
+	count = 0;
+	located = 0;
+	while (*p != 0) {
+		if (p - input >= input_len)
+			break;
+
+		/* Locate the first character of a argument */
+		if (!IS_WHITESPACE(*p)) {
+			if (!located) {
+				located = 1;
+				args[count++] = p;
+				if (count > MAX_ARGS_NUM)
+					break;
+			}
+		} else {
+			if (located) {
+				*p = 0;
+				located = 0;
+			}
+		}
+		p++;
+	}
+
+	return count;
+}
+
+static int dump_cmd_show(struct seq_file *s, void *unused)
+{
+	seq_printf(s, dump_cmd_buf);
+	return 0;
+}
+
+static int dump_cmd_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dump_cmd_show, NULL);
+}
+
+static int parse_mmio_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 3) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[1|2|4] <mmio> <addr> [<len>]\n"
+			"       w[1|2|4] <mmio> <addr> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	ret = kstrtou32(arg_list[2], 0, &mmio_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid mmio address %s\n",
+							 arg_list[2]);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_32BIT) &&
+		(mmio_addr % 4)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"addr %x is not 4 bytes aligned!\n",
+						mmio_addr);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_16BIT) &&
+		(mmio_addr % 2)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"addr %x is not 2 bytes aligned!\n",
+						mmio_addr);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num == 4) {
+			ret = kstrtou32(arg_list[3], 0, &access_len);
+			if (ret) {
+				snprintf(err_buf, MAX_ERRLEN,
+					"invalid mmio read length %s\n",
+							arg_list[3]);
+				goto failed;
+			}
+		} else if (arg_num > 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: r[1|2|4] mmio <addr> "
+						"[<len>]\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"need exact 4 arguments for "
+					"mmio write.\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[3], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid mmio address %s\n",
+						arg_list[3]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_port_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 2) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[1|2|4] port <port>\n"
+			"       w[1|2|4] port <port> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_8BIT;
+
+	ret = kstrtou16(arg_list[2], 0, (u16 *)&port_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid port address %s\n",
+							 arg_list[2]);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_32BIT) &&
+		(port_addr % ACCESS_WIDTH_32BIT)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"port %x is not 4 bytes aligned!\n", port_addr);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_16BIT) &&
+		(port_addr % ACCESS_WIDTH_16BIT)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"port %x is not 2 bytes aligned!\n", port_addr);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num != 3) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: r[1|2|4] port <port>\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"need exact 4 arguments for port write.\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[3], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value %s\n", arg_list[3]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_msg_bus_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 4) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r msg_bus <port> <addr> [<len>]\n"
+			"       w msg_bus <port> <addr> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	if (access_width != ACCESS_WIDTH_32BIT) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"only 32bit read/write are supported.\n");
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[2], 0, &msg_bus_port);
+	if (ret || msg_bus_port > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid msg_bus port %s\n",
+								arg_list[2]);
+		goto failed;
+	}
+
+	ret = kstrtou32(arg_list[3], 0, &msg_bus_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid msg_bus address %s\n",
+								arg_list[3]);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num == 5) {
+			ret = kstrtou32(arg_list[4], 0, &access_len);
+			if (ret) {
+				snprintf(err_buf, MAX_ERRLEN,
+					"invalid msg_bus read length %s\n",
+								arg_list[4]);
+				goto failed;
+			}
+		} else if (arg_num > 5) {
+			snprintf(err_buf, MAX_ERRLEN, "too many arguments\n"
+						"usage: r[1|2|4] msg_bus "
+						"<port> <addr> [<len>]\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 5) {
+			snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+				"usage: w msg_bus <port> <addr> <val>]\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[4], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value for msg_bus write %s\n",
+							 arg_list[4]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_pci_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 6) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[1|2|4] pci <bus> <dev> <func> <reg> [<len>]\n"
+			"       w[1|2|4] pci <bus> <dev> <func> <reg> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	ret = kstrtou8(arg_list[2], 0, &pci_bus);
+	if (ret || pci_bus > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci bus %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[3], 0, &pci_dev);
+	if (ret || pci_dev > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci device %s\n",
+							arg_list[3]);
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[4], 0, &pci_func);
+	if (ret || pci_func > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci function %s\n",
+							arg_list[4]);
+		goto failed;
+	}
+
+	ret = kstrtou16(arg_list[5], 0, &pci_reg);
+	if (ret || pci_reg > 4 * 1024) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci register %s\n",
+							arg_list[5]);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_32BIT) && (pci_reg % 4)) {
+		snprintf(err_buf, MAX_ERRLEN, "reg %x is not 4 bytes aligned!\n"
+							 , (u32) pci_reg);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_16BIT) && (pci_reg % 2)) {
+		snprintf(err_buf, MAX_ERRLEN, "reg %x is not 2 bytes aligned\n",
+								pci_reg);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num == 7) {
+			ret = kstrtou32(arg_list[6], 0, &access_len);
+			if (ret || access_len > 4 * 1024) {
+				snprintf(err_buf, MAX_ERRLEN,
+					"invalid pci read length %s\n",
+							arg_list[6]);
+				return ret;
+			}
+		} else if (arg_num > 7) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"max 7 args are allowed for pci read\n"
+				"usage: r[1|2|4] pci <bus> <dev> <func> "
+							"<reg> [<len>]\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 7) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"need exact 7 args for pci write.\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[6], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value for pci write %s\n",
+							 arg_list[6]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_msr_args(char **arg_list, int arg_num)
+{
+	int ret, arg_reg, arg_val;
+
+	if (((access_dir == ACCESS_DIR_READ) && (arg_num < 3)) ||
+		((access_dir == ACCESS_DIR_WRITE) && (arg_num < 4))) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[4|8] msr [<cpu> | all] <reg>]\n"
+			"       w[4|8] msr [<cpu> | all] <reg> <val>]\n");
+		goto failed;
+	}
+
+	if (((access_dir == ACCESS_DIR_READ) && (arg_num > 4)) ||
+		((access_dir == ACCESS_DIR_WRITE) && (arg_num > 5))) {
+		snprintf(err_buf, MAX_ERRLEN, "too many arguments\n"
+			"usage: r[4|8] msr [<cpu> | all] <reg>]\n"
+			"       w[4|8] msr [<cpu> | all] <reg> <val>]\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_64BIT;
+
+	if (!strncmp(arg_list[2], "all", 3)) {
+		msr_cpu = -1;
+		arg_reg = 3;
+		arg_val = 4;
+	} else if ((access_dir == ACCESS_DIR_READ && arg_num == 4) ||
+		(access_dir == ACCESS_DIR_WRITE && arg_num == 5)) {
+		ret = kstrtou32(arg_list[2], 0, &msr_cpu);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN, "invalid cpu: %s\n",
+							arg_list[2]);
+			goto failed;
+		}
+		arg_reg = 3;
+		arg_val = 4;
+	} else {
+		/* Default cpu for msr read is all, for msr write is 0 */
+		if (access_dir == ACCESS_DIR_READ)
+			msr_cpu = -1;
+		else
+			msr_cpu = 0;
+		arg_reg = 2;
+		arg_val = 3;
+	}
+
+
+	ret = kstrtou32(arg_list[arg_reg], 0, &msr_reg);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid msr reg: %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (access_width == ACCESS_WIDTH_32BIT)
+			ret = kstrtou32(arg_list[arg_val], 0, &access_value);
+		else
+			ret = kstrtou64(arg_list[arg_val], 0, &access_value_64);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN, "invalid value: %s\n",
+							arg_list[arg_val]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_i2c_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if ((access_dir == ACCESS_DIR_READ && arg_num != 4) ||
+		(access_dir == ACCESS_DIR_WRITE && arg_num != 5)) {
+		snprintf(err_buf, MAX_ERRLEN, "usage: r i2c <bus> <addr>\n"
+			"       w i2c <bus> <addr> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_8BIT;
+
+	if (access_width != ACCESS_WIDTH_8BIT) {
+		snprintf(err_buf, MAX_ERRLEN, "only 8bit access is allowed\n");
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[2], 0, &i2c_bus);
+	if (ret || i2c_bus > 9) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid i2c bus %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+
+	ret = kstrtou32(arg_list[3], 0, &i2c_addr);
+
+	pr_err("ret = %d, i2c_addr is 0x%x\n", ret, i2c_addr);
+	if (ret || (i2c_addr > 1024)) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid i2c address %s\n",
+							arg_list[3]);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		ret = kstrtou32(arg_list[4], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value for i2c write %s\n",
+							 arg_list[4]);
+			goto failed;
+		}
+	}
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_scu_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (access_width != ACCESS_WIDTH_32BIT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	ret = kstrtou32(arg_list[2], 0, &scu_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid scu address %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+
+	if (scu_addr % 4) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"addr %x is not 4 bytes aligned!\n",
+						scu_addr);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num != 3) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: r[4] scu <addr>\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: w[4] scu <addr> <val>\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[3], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid scu write value %s\n",
+						arg_list[3]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static ssize_t dump_cmd_write(struct file *file, const char __user *buf,
+				size_t len, loff_t *offset)
+{
+	char cmd[MAX_CMDLEN];
+	char *arg_list[MAX_ARGS_NUM];
+	int arg_num, ret = -EINVAL;
+
+	err_buf[0] = 0;
+
+	if (len >= MAX_CMDLEN) {
+		snprintf(err_buf, MAX_ERRLEN, "input command is too long.\n"
+					"max allowed input length is %d\n",
+							MAX_CMDLEN);
+		goto done;
+	}
+
+	if (copy_from_user(cmd, buf, len)) {
+		snprintf(err_buf, MAX_ERRLEN, "copy_from_user() failed.\n");
+		goto done;
+	}
+	cmd[len] = 0;
+
+	dump_cmd_buf[0] = 0;
+	strncpy(dump_cmd_buf, cmd, len);
+	dump_cmd_buf[len] = 0;
+
+	arg_num = parse_argument(cmd, arg_list);
+	if (arg_num < MIN_ARGS_NUM) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"invalid command(too few arguments): "
+					"%s\n", dump_cmd_buf);
+		goto done;
+	}
+	if (arg_num > MAX_ARGS_NUM) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"invalid command(too many arguments): "
+					"%s\n", dump_cmd_buf);
+		goto done;
+	}
+
+	/* arg 1: direction(read/write) and mode (8/16/32/64 bit) */
+	if (!strncmp(arg_list[0], "r8", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_64BIT;
+	} else if (!strncmp(arg_list[0], "r4", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_32BIT;
+	} else if (!strncmp(arg_list[0], "r2", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_16BIT;
+	} else if (!strncmp(arg_list[0], "r1", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_8BIT;
+	} else if (!strncmp(arg_list[0], "r", 1)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_DEFAULT;
+	} else if (!strncmp(arg_list[0], "w8", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_64BIT;
+	} else if (!strncmp(arg_list[0], "w4", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_32BIT;
+	} else if (!strncmp(arg_list[0], "w2", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_16BIT;
+	} else if (!strncmp(arg_list[0], "w1", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_8BIT;
+	} else if (!strncmp(arg_list[0], "w", 1)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_DEFAULT;
+	} else {
+		snprintf(err_buf, MAX_ERRLEN, "unknown argument: %s\n",
+							arg_list[0]);
+		goto done;
+	}
+
+	/* arg2: bus type(mmio, msg_bus, pci or i2c) */
+	access_len = 1;
+	if (!strncmp(arg_list[1], "mmio", 4)) {
+		access_bus = ACCESS_BUS_MMIO;
+		ret = parse_mmio_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "port", 4)) {
+		access_bus = ACCESS_BUS_PORT;
+		ret = parse_port_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "msg_bus", 7)) {
+		access_bus = ACCESS_BUS_MSG_BUS;
+		ret = parse_msg_bus_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "pci", 3)) {
+		access_bus = ACCESS_BUS_PCI;
+		ret = parse_pci_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "msr", 3)) {
+		access_bus = ACCESS_BUS_MSR;
+		ret = parse_msr_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "i2c", 3)) {
+		access_bus = ACCESS_BUS_I2C;
+		ret = parse_i2c_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "scu", 3)) {
+		access_bus = ACCESS_BUS_SCU_INDRW;
+		ret = parse_scu_args(arg_list, arg_num);
+	} else {
+		snprintf(err_buf, MAX_ERRLEN, "unknown argument: %s\n",
+							arg_list[1]);
+	}
+
+	if (access_len == 0) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"access length must be larger than 0\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((access_bus == ACCESS_BUS_MMIO || access_bus == ACCESS_BUS_PCI) &&
+					 (access_len > MAX_MMIO_PCI_LEN)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"%d exceeds max mmio/pci read length(%d)\n",
+					access_len, MAX_MMIO_PCI_LEN);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((access_bus == ACCESS_BUS_MSG_BUS) &&
+		(access_len > MAX_MSG_BUS_LEN)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"%d exceeds max msg_bus read length(%d)\n",
+					access_len, MAX_MSG_BUS_LEN);
+		ret = -EINVAL;
+	}
+
+	if (access_bus == ACCESS_BUS_MSR) {
+		if ((access_width != ACCESS_WIDTH_32BIT) &&
+			(access_width != ACCESS_WIDTH_64BIT) &&
+			(access_width != ACCESS_WIDTH_DEFAULT)) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"only 32bit or 64bit is allowed for msr\n");
+			ret = -EINVAL;
+		}
+	}
+
+done:
+	dump_cmd_was_set = ret ? 0 : 1;
+	return ret ? ret : len;
+}
+
+static int dump_output_show_mmio(struct seq_file *s)
+{
+	void __iomem *base;
+	int i, comp1, comp2;
+	u32 start, end, end_natural;
+	struct pci_dev *pdev;
+	char *name;
+
+	pdev = mmio_to_pci(mmio_addr, &name);
+	if (pdev && pm_runtime_get_sync(&pdev->dev) < 0) {
+		seq_printf(s, "can't put device %s into D0i0 state\n", name);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		base = ioremap_nocache(mmio_addr, access_width);
+		if (!base) {
+			seq_printf(s, "can't map physical address: %x\n",
+				mmio_addr);
+			if (pdev)
+				pm_runtime_put_sync(&pdev->dev);
+			return 0;
+		}
+		switch (access_width) {
+		case ACCESS_WIDTH_8BIT:
+			iowrite8((u8) access_value, base);
+			break;
+		case ACCESS_WIDTH_16BIT:
+			iowrite16((u16) access_value, base);
+			break;
+		case ACCESS_WIDTH_32BIT:
+		case ACCESS_WIDTH_DEFAULT:
+			iowrite32(access_value, base);
+			break;
+		default:
+			break; /* never happen */
+		}
+		seq_printf(s, "write succeeded\n");
+	} else {
+		start = (mmio_addr / LINE_WIDTH) * LINE_WIDTH;
+		end_natural = mmio_addr + (access_len - 1) * access_width;
+		end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+						access_width;
+		comp1 = (mmio_addr - start) / access_width;
+		comp2 = (end - end_natural) / access_width;
+
+		base = ioremap_nocache(start, (comp1 + comp2 +
+			access_len) * access_width);
+		if (!base) {
+			seq_printf(s, "can't map physical address: %x\n",
+				mmio_addr);
+			if (pdev)
+				pm_runtime_put_sync(&pdev->dev);
+			return 0;
+		}
+
+		for (i = 0; i < comp1 + comp2 + access_len; i++) {
+			if ((i % SHOW_NUM_PER_LINE) == 0)
+					seq_printf(s, "[%08x]", start + i * 4);
+
+			if (i < comp1 || i >= access_len + comp1) {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					seq_printf(s, "         ");
+					break;
+				case ACCESS_WIDTH_16BIT:
+					seq_printf(s, "     ");
+					break;
+				case ACCESS_WIDTH_8BIT:
+					seq_printf(s, "   ");
+					break;
+				}
+
+			} else {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					seq_printf(s, " %08x",
+						ioread32(base + i * 4));
+					break;
+				case ACCESS_WIDTH_16BIT:
+					seq_printf(s, " %04x",
+						(u16) ioread16(base + i * 2));
+					break;
+				case ACCESS_WIDTH_8BIT:
+					seq_printf(s, " %02x",
+						(u8) ioread8(base + i));
+					break;
+				}
+			}
+
+			if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+				seq_printf(s, "\n");
+		}
+	}
+
+	iounmap(base);
+	if (pdev)
+		pm_runtime_put_sync(&pdev->dev);
+	return 0;
+}
+
+static int dump_output_show_port(struct seq_file *s)
+{
+	if (access_dir == ACCESS_DIR_WRITE) {
+		switch (access_width) {
+		case ACCESS_WIDTH_8BIT:
+		case ACCESS_WIDTH_DEFAULT:
+			outb((u8) access_value, port_addr);
+			break;
+		case ACCESS_WIDTH_16BIT:
+			outw((u16) access_value, port_addr);
+			break;
+		case ACCESS_WIDTH_32BIT:
+			outl(access_value, port_addr);
+			break;
+		default:
+			break; /* never happen */
+		}
+		seq_printf(s, "write succeeded\n");
+	} else {
+		switch (access_width) {
+		case ACCESS_WIDTH_32BIT:
+			seq_printf(s, " %08x\n", inl(port_addr));
+			break;
+		case ACCESS_WIDTH_16BIT:
+			seq_printf(s, " %04x\n", (u16) inw(port_addr));
+			break;
+		case ACCESS_WIDTH_8BIT:
+			seq_printf(s, " %02x\n", (u8) inb(port_addr));
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_msg_bus(struct seq_file *s)
+{
+	int i, comp1, comp2;
+	u32 start, end, end_natural;
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		intel_mid_msgbus_write32(msg_bus_port,
+			msg_bus_addr, access_value);
+		seq_printf(s, "write succeeded\n");
+	} else {
+		start = (msg_bus_addr / LINE_WIDTH) * LINE_WIDTH;
+		end_natural = msg_bus_addr + (access_len - 1) * access_width;
+		end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+						access_width;
+		comp1 = (msg_bus_addr - start) / access_width;
+		comp2 = (end - end_natural) / access_width;
+
+	for (i = 0; i < comp1 + comp2 + access_len; i++) {
+			if ((i % SHOW_NUM_PER_LINE) == 0)
+					seq_printf(s, "[%08x]", start + i * 4);
+
+			if (i < comp1 || i >= access_len + comp1)
+				seq_printf(s, "         ");
+
+			else
+				seq_printf(s, " %08x", intel_mid_msgbus_read32(
+					msg_bus_port, msg_bus_addr + i));
+
+			if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+				seq_printf(s, "\n");
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_pci(struct seq_file *s)
+{
+	int i, comp1, comp2;
+	u32 start, end, end_natural, val;
+	struct pci_dev *pdev;
+
+	pdev = pci_get_bus_and_slot(pci_bus, PCI_DEVFN(pci_dev, pci_func));
+	if (!pdev) {
+		seq_printf(s, "pci bus %d:%d:%d doesn't exist\n",
+			pci_bus, pci_dev, pci_func);
+		return 0;
+	}
+
+	if (pm_runtime_get_sync(&pdev->dev) < 0) {
+		seq_printf(s, "can't put pci device %d:%d:%d into D0i0 state\n",
+			pci_bus, pci_dev, pci_func);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		switch (access_width) {
+		case ACCESS_WIDTH_8BIT:
+			pci_write_config_byte(pdev, (int)pci_reg,
+					(u8)access_value);
+			break;
+		case ACCESS_WIDTH_16BIT:
+			pci_write_config_word(pdev, (int)pci_reg,
+				(u16)access_value);
+			break;
+		case ACCESS_WIDTH_32BIT:
+		case ACCESS_WIDTH_DEFAULT:
+			pci_write_config_dword(pdev, (int)pci_reg,
+				access_value);
+			break;
+		default:
+			break; /* never happen */
+		}
+		seq_printf(s, "write succeeded\n");
+	} else {
+		start = (pci_reg / LINE_WIDTH) * LINE_WIDTH;
+		end_natural = pci_reg + (access_len - 1) * access_width;
+		end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+						access_width;
+		comp1 = (pci_reg - start) / access_width;
+		comp2 = (end - end_natural) / access_width;
+
+		for (i = 0; i < comp1 + comp2 + access_len; i++) {
+			if ((i % SHOW_NUM_PER_LINE) == 0)
+					seq_printf(s, "[%08x]", start + i * 4);
+
+			if (i < comp1 || i >= access_len + comp1) {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					seq_printf(s, "         ");
+					break;
+				case ACCESS_WIDTH_16BIT:
+					seq_printf(s, "     ");
+					break;
+				case ACCESS_WIDTH_8BIT:
+					seq_printf(s, "   ");
+					break;
+				}
+
+			} else {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					pci_read_config_dword(pdev,
+						start + i * 4, &val);
+					seq_printf(s, " %08x", val);
+					break;
+				case ACCESS_WIDTH_16BIT:
+					pci_read_config_word(pdev,
+						start + i * 2, (u16 *) &val);
+					seq_printf(s, " %04x", (u16)val);
+					break;
+				case ACCESS_WIDTH_8BIT:
+					pci_read_config_byte(pdev,
+						start + i, (u8 *) &val);
+					seq_printf(s, " %04x", (u8)val);
+					break;
+				}
+			}
+
+			if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+				seq_printf(s, "\n");
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_msr(struct seq_file *s)
+{
+	int ret, i, count;
+	u32 data[2];
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (msr_cpu < 0) {
+			/* loop for all cpus */
+			i = 0;
+			count = nr_cpu_ids;
+		} else if (msr_cpu >= nr_cpu_ids || msr_cpu < 0) {
+			seq_printf(s, "cpu should be between 0 - %d\n",
+							nr_cpu_ids - 1);
+			return 0;
+		} else {
+			/* loop for one cpu */
+			i = msr_cpu;
+			count = msr_cpu + 1;
+		}
+		for (; i < count; i++) {
+			ret = rdmsr_safe_on_cpu(i, msr_reg, &data[0], &data[1]);
+			if (ret) {
+				seq_printf(s, "msr read error: %d\n", ret);
+				return 0;
+			} else {
+				if (access_width == ACCESS_WIDTH_32BIT)
+					seq_printf(s, "[cpu %1d] %08x\n",
+							i, data[0]);
+				else
+					seq_printf(s, "[cpu %1d] %08x%08x\n",
+						 i, data[1], data[0]);
+			}
+		}
+	} else {
+		if (access_width == ACCESS_WIDTH_32BIT) {
+			ret = rdmsr_safe_on_cpu(msr_cpu, msr_reg,
+					&data[0], &data[1]);
+			if (ret) {
+				seq_printf(s, "msr write error: %d\n", ret);
+				return 0;
+			}
+			data[0] = access_value;
+		} else {
+			data[0] = (u32)access_value_64;
+			data[1] = (u32)(access_value_64 >> 32);
+		}
+		if (msr_cpu < 0) {
+			/* loop for all cpus */
+			i = 0;
+			count = nr_cpu_ids;
+		} else {
+			if (msr_cpu >= nr_cpu_ids || msr_cpu < 0) {
+				seq_printf(s, "cpu should be between 0 - %d\n",
+						nr_cpu_ids - 1);
+				return 0;
+			}
+			/* loop for one cpu */
+			i = msr_cpu;
+			count = msr_cpu + 1;
+		}
+		for (; i < count; i++) {
+			ret = wrmsr_safe_on_cpu(i, msr_reg, data[0], data[1]);
+			if (ret) {
+				seq_printf(s, "msr write error: %d\n", ret);
+				return 0;
+			} else {
+				seq_printf(s, "write succeeded.\n");
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_i2c(struct seq_file *s)
+{
+	int ret;
+	struct i2c_adapter *adap;
+	struct i2c_msg msg;
+	u8 val;
+
+	adap = i2c_get_adapter(i2c_bus);
+	if (!adap) {
+		seq_printf(s, "can't find bus adapter for i2c bus %d\n",
+							i2c_bus);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		msg.addr = i2c_addr;
+		msg.len = 1;
+		msg.buf = (u8 *) &access_value;
+		ret = i2c_transfer(adap, &msg, 1);
+		if (ret != 1)
+			seq_printf(s, "i2c write error: %d\n", ret);
+		else
+			seq_printf(s, "write succeeded.\n");
+	} else {
+		msg.flags |= I2C_M_RD;
+		msg.addr = i2c_addr;
+		msg.len = 1;
+		msg.buf = &val;
+		ret = i2c_transfer(adap, &msg, 1);
+		if (ret != 1)
+			seq_printf(s, "i2c read error: %d\n", ret);
+		else
+			seq_printf(s, "%02x\n", val);
+	}
+
+	return 0;
+}
+
+static int dump_output_show_scu(struct seq_file *s)
+{
+	struct pci_dev *pdev;
+	char *name;
+	int ret;
+	u32 cmd, sub = 0, dptr = 0, sptr = 0;
+	u8 wbuflen = 4, rbuflen = 4;
+	u8 wbuf[16];
+	u8 rbuf[16];
+
+	memset(wbuf, 0, 16);
+	memset(rbuf, 0, 16);
+
+	pdev = mmio_to_pci(scu_addr, &name);
+	if (pdev && pm_runtime_get_sync(&pdev->dev) < 0) {
+		seq_printf(s, "can't put device %s into D0i0 state\n", name);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		cmd = RP_INDIRECT_WRITE;
+		dptr = scu_addr;
+		wbuf[0] = (u8) (access_value & 0xff);
+		wbuf[1] = (u8) ((access_value >> 8) & 0xff);
+		wbuf[2] = (u8) ((access_value >> 16) & 0xff);
+		wbuf[3] = (u8) ((access_value >> 24) & 0xff);
+
+		ret = rpmsg_send_generic_raw_command(cmd, sub, wbuf, wbuflen,
+			(u32 *)rbuf, rbuflen, dptr, sptr);
+
+		if (ret) {
+			seq_printf(s,
+				"Indirect write failed (check dmesg): "
+						"[%08x]\n", scu_addr);
+		} else {
+			seq_printf(s, "write succeeded\n");
+		}
+	} else if (access_dir == ACCESS_DIR_READ) {
+		cmd = RP_INDIRECT_READ;
+		sptr = scu_addr;
+
+		ret = rpmsg_send_generic_raw_command(cmd, sub, wbuf, wbuflen,
+			(u32 *)rbuf, rbuflen, dptr, sptr);
+
+		if (ret) {
+			seq_printf(s,
+				"Indirect read failed (check dmesg): "
+						"[%08x]\n", scu_addr);
+		} else {
+			access_value = (rbuf[3] << 24) | (rbuf[2] << 16) |
+				(rbuf[1] << 8) | (rbuf[0]);
+			seq_printf(s, "[%08x] %08x\n", scu_addr, access_value);
+		}
+	}
+
+	if (pdev)
+		pm_runtime_put_sync(&pdev->dev);
+
+	return 0;
+}
+
+static int dump_output_show(struct seq_file *s, void *unused)
+{
+	int ret = 0;
+
+	if (!dump_cmd_was_set) {
+		seq_printf(s, "%s", err_buf);
+		return 0;
+	}
+
+	switch (access_bus) {
+	case ACCESS_BUS_MMIO:
+		ret = dump_output_show_mmio(s);
+		break;
+	case ACCESS_BUS_PORT:
+		ret = dump_output_show_port(s);
+		break;
+	case ACCESS_BUS_MSG_BUS:
+		ret = dump_output_show_msg_bus(s);
+		break;
+	case ACCESS_BUS_PCI:
+		ret = dump_output_show_pci(s);
+		break;
+	case ACCESS_BUS_MSR:
+		ret = dump_output_show_msr(s);
+		break;
+	case ACCESS_BUS_I2C:
+		ret = dump_output_show_i2c(s);
+		break;
+	case ACCESS_BUS_SCU_INDRW:
+		ret = dump_output_show_scu(s);
+		break;
+	default:
+		seq_printf(s, "unknow bus type: %d\n", access_bus);
+		break;
+
+	}
+
+	return ret;
+}
+
+static const struct file_operations dump_cmd_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dump_cmd_open,
+	.read		= seq_read,
+	.write		= dump_cmd_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dump_output_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dump_output_show, NULL);
+}
+
+static const struct file_operations dump_output_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dump_output_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init intel_mid_dump_init(void)
+{
+	dump_cmd_dentry = debugfs_create_file("dump_cmd",
+		S_IFREG | S_IRUGO | S_IWUSR, NULL, NULL, &dump_cmd_fops);
+	dump_output_dentry = debugfs_create_file("dump_output",
+		S_IFREG | S_IRUGO, NULL, NULL, &dump_output_fops);
+	if (!dump_cmd_dentry || !dump_output_dentry) {
+		pr_err("intel_mid_dump: can't create debugfs node\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+module_init(intel_mid_dump_init);
+
+static void __exit intel_mid_dump_exit(void)
+{
+	if (dump_cmd_dentry)
+		debugfs_remove(dump_cmd_dentry);
+	if (dump_output_dentry)
+		debugfs_remove(dump_output_dentry);
+}
+module_exit(intel_mid_dump_exit);
+
+MODULE_DESCRIPTION("Intel Atom SoC register dump driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/intel-mid/intel_soc_mrfld.c b/arch/x86/platform/intel-mid/intel_soc_mrfld.c
new file mode 100644
index 0000000..1c648c9
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mrfld.c
@@ -0,0 +1,422 @@
+/*
+ * intel_soc_mrfld.c - This driver provides utility api's for merrifield
+ * platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+u8 __iomem *s0ix_counters;
+
+int s0ix_counter_reg_map[] = {0x0, 0xAC, 0xB0, 0xA8, 0xA4, 0xC0,
+	0xBC, 0xB8, 0xB4, 0x8C, 0x90, 0x98};
+
+int s0ix_residency_reg_map[] = {0x0, 0xD8, 0xE0, 0xD0, 0xC8, 0x100,
+	0xF8, 0xF0, 0xE8, 0x68, 0x70, 0x80};
+
+/* list of north complex devices */
+char *mrfl_nc_devices[] = {
+	"GFXSLC",
+	"GSDKCK",
+	"GRSCD",
+	"VED",
+	"VEC",
+	"DPA",
+	"DPB",
+	"DPC",
+	"VSP",
+	"ISP",
+	"MIO",
+	"HDMIO",
+	"GFXSLCLDO"
+};
+
+int mrfl_no_of_nc_devices =
+	sizeof(mrfl_nc_devices)/sizeof(mrfl_nc_devices[0]);
+
+static int mrfld_pmu_init(void)
+{
+	mid_pmu_cxt->s3_hint = MRFLD_S3_HINT;
+
+
+	/* Put all unused LSS in D0i3 */
+	mid_pmu_cxt->os_sss[0] = (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_03)	|
+				SSMSK(D0I3_MASK, PMU_HSI_LSS_05)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_07)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_11)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_12)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_13)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_14)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_15));
+
+	mid_pmu_cxt->os_sss[1] = (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_16-16)|
+				SSMSK(D0I3_MASK, PMU_SSP3_LSS_17-16)|
+				SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16)|
+				SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_28-16)|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_29-16)|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_30-16));
+
+	/* Excpet for LSS 35 keep all in D0i3 */
+	mid_pmu_cxt->os_sss[2] = 0xFFFFFFFF;
+	mid_pmu_cxt->os_sss[3] = 0xFFFFFFFF;
+
+	mid_pmu_cxt->os_sss[2] &= ~SSMSK(D0I3_MASK, PMU_SSP4_LSS_35-32);
+
+	s0ix_counters = devm_ioremap_nocache(&mid_pmu_cxt->pmu_dev->dev,
+		S0IX_COUNTERS_BASE, S0IX_COUNTERS_SIZE);
+	if (!s0ix_counters)
+		goto err;
+
+	/* Keep PSH LSS's 00, 33, 34 in D0i0 if PM is disabled */
+	if (!enable_s0ix && !enable_s3) {
+		mid_pmu_cxt->os_sss[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C8_LSS_33-32);
+		mid_pmu_cxt->os_sss[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C9_LSS_34-32);
+	} else {
+		mid_pmu_cxt->os_sss[0] |= SSMSK(D0I3_MASK, PMU_PSH_LSS_00);
+	}
+
+	/* Disable the Interrupt Enable bit in PM ICS register */
+	pmu_clear_interrupt_enable();
+
+	return PMU_SUCCESS;
+
+err:
+	pr_err("Cannot map memory to read S0ix residency and count\n");
+	return PMU_FAILED;
+}
+
+/* This function checks north complex (NC) and
+ * south complex (SC) device status in MRFLD.
+ * returns TRUE if all NC and SC devices are in d0i3
+ * else FALSE.
+ */
+static bool mrfld_nc_sc_status_check(void)
+{
+	int i;
+	u32 val, nc_pwr_sts;
+	struct pmu_ss_states cur_pmsss;
+	bool nc_status, sc_status;
+
+	/* assuming nc and sc are good */
+	nc_status = true;
+	sc_status = true;
+
+	/* Check south complex device status */
+	pmu_read_sss(&cur_pmsss);
+
+	if (!(((cur_pmsss.pmu2_states[0] & S0IX_TARGET_SSS0_MASK) ==
+					 S0IX_TARGET_SSS0) &&
+		((cur_pmsss.pmu2_states[1] & S0IX_TARGET_SSS1_MASK) ==
+					 S0IX_TARGET_SSS1) &&
+		((cur_pmsss.pmu2_states[2] & S0IX_TARGET_SSS2_MASK) ==
+					 S0IX_TARGET_SSS2) &&
+		((cur_pmsss.pmu2_states[3] & S0IX_TARGET_SSS3_MASK) ==
+					 (S0IX_TARGET_SSS3)))) {
+		sc_status = false;
+		pr_warn("SC device/devices not in d0i3!!\n");
+		for (i = 0; i < 4; i++)
+			pr_warn("pmu2_states[%d] = %08lX\n", i,
+					cur_pmsss.pmu2_states[i]);
+	}
+
+	if (sc_status) {
+		/* Check north complex status */
+		nc_pwr_sts =
+			 intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS);
+		/* loop through the status to see if any of nc power island
+		 * is not in D0i3 state
+		 */
+		for (i = 0; i < mrfl_no_of_nc_devices; i++) {
+			val = nc_pwr_sts & 3;
+			if (val != 3) {
+				nc_status = false;
+				pr_warn("NC device (%s) is not in d0i3!!\n",
+							mrfl_nc_devices[i]);
+				pr_warn("nc_pm_sss = %08X\n", nc_pwr_sts);
+				break;
+			}
+			nc_pwr_sts >>= BITS_PER_LSS;
+		}
+	}
+
+	return nc_status & sc_status;
+}
+
+/* FIXME: Need to start the counter only if debug is
+ * needed. This will save SCU cycles if debug is
+ * disabled
+ */
+static int __init start_scu_s0ix_res_counters(void)
+{
+	int ret;
+
+	ret = intel_scu_ipc_simple_command(START_RES_COUNTER, 0);
+	if (ret) {
+		pr_err("IPC command to start res counter failed\n");
+		BUG();
+		return ret;
+	}
+	return 0;
+}
+late_initcall(start_scu_s0ix_res_counters);
+
+void platform_update_all_lss_states(struct pmu_ss_states *pmu_config,
+					int *PCIALLDEV_CFG)
+{
+	/* Overwrite the pmu_config values that we get */
+	pmu_config->pmu2_states[0] =
+				(SSMSK(D0I3_MASK, PMU_RESERVED_LSS_03)	|
+				SSMSK(D0I3_MASK, PMU_HSI_LSS_05)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_07)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_11)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_12)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_13)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_14)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_15));
+
+	pmu_config->pmu2_states[1] =
+				(SSMSK(D0I3_MASK, PMU_RESERVED_LSS_16-16)|
+				SSMSK(D0I3_MASK, PMU_SSP3_LSS_17-16)|
+				SSMSK(D0I3_MASK, PMU_SSP5_LSS_18-16)|
+				SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16)|
+				SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_28-16)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_29-16)|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_30-16));
+	if (platform_is(INTEL_ATOM_MRFLD))
+		pmu_config->pmu2_states[1] |=
+				SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16);
+
+	pmu_config->pmu2_states[0] &= ~IGNORE_SSS0;
+	pmu_config->pmu2_states[1] &= ~IGNORE_SSS1;
+	pmu_config->pmu2_states[2] = ~IGNORE_SSS2;
+	pmu_config->pmu2_states[3] = ~IGNORE_SSS3;
+
+	/* Excpet for LSS 35 keep all in D0i3 */
+	pmu_config->pmu2_states[2] &= ~SSMSK(D0I3_MASK, PMU_SSP4_LSS_35-32);
+
+	/* Keep PSH LSS's 00, 33, 34 in D0i0 if PM is disabled */
+	if (!enable_s0ix && !enable_s3) {
+		pmu_config->pmu2_states[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C8_LSS_33-32);
+		pmu_config->pmu2_states[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C9_LSS_34-32);
+	} else {
+		pmu_config->pmu2_states[0] |= SSMSK(D0I3_MASK, PMU_PSH_LSS_00);
+	}
+}
+
+/*
+ * In MDFLD and CLV this callback is used to issue
+ * PM_CMD which is not required in MRFLD
+ */
+static bool mrfld_pmu_enter(int s0ix_state)
+{
+	mid_pmu_cxt->s0ix_entered = s0ix_state;
+	if (s0ix_state == MID_S3_STATE) {
+		mid_pmu_cxt->pmu_current_state = SYS_STATE_S3;
+		pmu_set_interrupt_enable();
+	}
+
+	return true;
+}
+
+/**
+ *      platform_set_pmu_ops - Set the global pmu method table.
+ *      @ops:   Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+	pmu_ops = &mrfld_pmu_ops;
+}
+
+/*
+ * As of now since there is no sequential mapping between
+ * LSS abd WKS bits the following two calls are dummy
+ */
+
+bool mid_pmu_is_wake_source(u32 lss_number)
+{
+	return false;
+}
+
+/* return the last wake source id, and make statistics about wake sources */
+int pmu_get_wake_source(void)
+{
+	return INVALID_WAKE_SRC;
+}
+
+
+int set_extended_cstate_mode(const char *val, struct kernel_param *kp)
+{
+	return 0;
+}
+
+int get_extended_cstate_mode(char *buffer, struct kernel_param *kp)
+{
+	const char *default_string = "not supported";
+	strcpy(buffer, default_string);
+	return strlen(default_string);
+}
+
+static int wait_for_nc_pmcmd_complete(int verify_mask,
+				int status_mask, int state_type , int reg)
+{
+	int pwr_sts;
+	int count = 0;
+
+	while (true) {
+		pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, reg);
+		pwr_sts = pwr_sts >> SSS_SHIFT;
+		if (state_type == OSPM_ISLAND_DOWN ||
+					state_type == OSPM_ISLAND_SR) {
+			if ((pwr_sts & status_mask) ==
+						(verify_mask & status_mask))
+				break;
+			else
+				udelay(10);
+		} else if (state_type == OSPM_ISLAND_UP) {
+			if ((~pwr_sts & status_mask)  ==
+						(~verify_mask & status_mask))
+				break;
+			else
+				udelay(10);
+		}
+
+		count++;
+		if (count > 500000) {
+			pr_err("PUnit Timeout, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+				verify_mask, status_mask, state_type, reg,
+				intel_mid_msgbus_read32(PUNIT_PORT, reg));
+			panic("punit timeout");
+		}
+	}
+	return 0;
+}
+
+static int mrfld_nc_set_power_state(int islands, int state_type,
+							int reg, int *change)
+{
+	u32 pwr_sts = 0;
+	u32 pwr_mask = 0;
+	int i, lss, mask;
+	int ret = 0;
+	int status_mask = 0;
+
+	*change = 0;
+	pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, reg);
+	pwr_mask = pwr_sts;
+
+	for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+		lss = islands & (0x1 << i);
+		if (lss) {
+			mask = D0I3_MASK << (BITS_PER_LSS * i);
+			status_mask = status_mask | mask;
+			if (state_type == OSPM_ISLAND_DOWN) {
+				pwr_mask |= mask;
+				mid_pmu_cxt->nc_d0i0_time[i] +=
+					(cpu_clock(0) - mid_pmu_cxt->nc_d0i0_prev_time[i]);
+			} else if (state_type == OSPM_ISLAND_UP) {
+				mid_pmu_cxt->nc_d0i0_count[i]++;
+				pwr_mask &= ~mask;
+				mid_pmu_cxt->nc_d0i0_prev_time[i] = cpu_clock(0);
+			/* Soft reset case */
+			} else if (state_type == OSPM_ISLAND_SR) {
+				pwr_mask &= ~mask;
+				mask = SR_MASK << (BITS_PER_LSS * i);
+				pwr_mask |= mask;
+			}
+		}
+	}
+
+	if (pwr_mask != pwr_sts) {
+		intel_mid_msgbus_write32(PUNIT_PORT, reg, pwr_mask);
+		ret = wait_for_nc_pmcmd_complete(pwr_mask,
+					status_mask, state_type, reg);
+		if (!ret)
+			*change = 1;
+		if (nc_report_power_state)
+			nc_report_power_state(pwr_mask, reg);
+	}
+
+	return ret;
+}
+
+/* Provide s0i1-display vote to display driver. We add this operation in pmu
+ * driver to sync operation with display island power on/off with touching
+ * the same register.
+ * register also defined:  linux/modules/intel_media/display/tng/drv/pmu_tng.h */
+#define DSP_SS_PM 0x36
+#define PUNIT_DSPSSPM_ENABLE_S0i1_DISPLAY     (1<<8)
+static void set_s0i1_disp_vote(bool enable)
+{
+	u32 dsp_ss_pm_val = intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM);
+
+	if (enable)
+		dsp_ss_pm_val |= PUNIT_DSPSSPM_ENABLE_S0i1_DISPLAY;
+	else
+		dsp_ss_pm_val &= ~PUNIT_DSPSSPM_ENABLE_S0i1_DISPLAY;
+
+	intel_mid_msgbus_write32(PUNIT_PORT, DSP_SS_PM, dsp_ss_pm_val);
+}
+
+void s0ix_complete(void)
+{
+	if (mid_pmu_cxt->s0ix_entered) {
+		log_wakeup_irq();
+
+		if (mid_pmu_cxt->s0ix_entered == SYS_STATE_S3)
+			pmu_clear_interrupt_enable();
+
+		mid_pmu_cxt->pmu_current_state	=
+		mid_pmu_cxt->s0ix_entered	= 0;
+	}
+}
+
+bool could_do_s0ix(void)
+{
+	bool ret = false;
+	if (unlikely(!pmu_initialized))
+		goto ret;
+
+	/* dont do s0ix if suspend in progress */
+	if (unlikely(mid_pmu_cxt->suspend_started))
+		goto ret;
+
+	/* dont do s0ix if shutdown in progress */
+	if (unlikely(mid_pmu_cxt->shutdown_started))
+		goto ret;
+
+	if (nc_device_state())
+		goto ret;
+
+	ret = true;
+ret:
+	return ret;
+}
+EXPORT_SYMBOL(could_do_s0ix);
+
+
+struct platform_pmu_ops mrfld_pmu_ops = {
+	.init	 = mrfld_pmu_init,
+	.enter	 = mrfld_pmu_enter,
+	.set_s0ix_complete = s0ix_complete,
+	.set_s0i1_disp_vote = set_s0i1_disp_vote,
+	.nc_set_power_state = mrfld_nc_set_power_state,
+	.check_nc_sc_status = mrfld_nc_sc_status_check,
+};
diff --git a/arch/x86/platform/intel-mid/intel_soc_mrfld.h b/arch/x86/platform/intel-mid/intel_soc_mrfld.h
new file mode 100644
index 0000000..beec768
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mrfld.h
@@ -0,0 +1,153 @@
+/*
+ * intel_soc_mrfld.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_ATOM_SOC_POWER
+
+#define PM_SUPPORT		0x21
+
+#define ISP_POS			7
+#define ISP_SUB_CLASS		0x80
+
+#define PUNIT_PORT		0x04
+#define SSS_SHIFT		24
+
+/* Soft reset mask */
+#define SR_MASK			0x2
+
+#define PMU1_MAX_DEVS			8
+#define PMU2_MAX_DEVS			55
+
+#define MRFLD_S3_HINT			0x64
+
+#define PUNIT_PORT			0x04
+#define NC_PM_SSS			0x3F
+
+/* SRAM locations to get S0ix count and residency */
+#define S0IX_COUNTERS_BASE	0xFFFFF500
+#define S0IX_COUNTERS_SIZE	(0x608 - 0x500)
+
+/* IPC commands to start, stop and
+ * dump S0ix residency counters */
+#define START_RES_COUNTER	0x00EB
+#define STOP_RES_COUNTER	0x10EB
+#define DUMP_RES_COUNTER	0x20EB
+
+/* IPC commands to start/reset and
+ * dump S0ix count */
+#define START_S0IX_COUNT	0x00E1
+#define DUMP_S0IX_COUNT		0x10E1
+
+#define GFX_LSS_INDEX			1
+
+#define PMU_PSH_LSS_00			0
+#define PMU_SDIO0_LSS_01		1
+#define PMU_EMMC0_LSS_02		2
+#define PMU_RESERVED_LSS_03		3
+#define PMU_SDIO1_LSS_04		4
+#define PMU_HSI_LSS_05			5
+#define PMU_SECURITY_LSS_06		6
+#define PMU_RESERVED_LSS_07		7
+#define PMU_USB_MPH_LSS_08		8
+#define PMU_USB3_LSS_09			9
+#define PMU_AUDIO_LSS_10		10
+#define PMU_RESERVED_LSS_11		11
+#define PMU_RESERVED_LSS_12		12
+#define PMU_RESERVED_LSS_13		13
+#define PMU_RESERVED_LSS_14		14
+#define PMU_RESERVED_LSS_15		15
+#define PMU_RESERVED_LSS_16		16
+#define PMU_SSP3_LSS_17			17
+#define PMU_SSP5_LSS_18			18
+#define PMU_SSP6_LSS_19			19
+#define PMU_I2C1_LSS_20			20
+#define PMU_I2C2_LSS_21			21
+#define PMU_I2C3_LSS_22			22
+#define PMU_I2C4_LSS_23			23
+#define PMU_I2C5_LSS_24			24
+#define PMU_GP_DMA_LSS_25		25
+#define PMU_I2C6_LSS_26			26
+#define PMU_I2C7_LSS_27			27
+#define PMU_USB_OTG_LSS_28		28
+#define PMU_RESERVED_LSS_29		29
+#define PMU_RESERVED_LSS_30		30
+#define PMU_UART0_LSS_31		31
+#define PMU_UART1_LSS_31		31
+#define PMU_UART2_LSS_31		31
+
+#define PMU_I2C8_LSS_33			33
+#define PMU_I2C9_LSS_34			34
+#define PMU_SSP4_LSS_35			35
+#define PMU_PMW_LSS_36			36
+
+#define EMMC0_LSS			PMU_EMMC0_LSS_02
+
+#define IGNORE_SSS0			0
+#define IGNORE_SSS1			0
+#define IGNORE_SSS2			0
+#define IGNORE_SSS3			0
+
+#define PMU_WAKE_GPIO0      (1 << 0)
+#define PMU_WAKE_GPIO1      (1 << 1)
+#define PMU_WAKE_GPIO2      (1 << 2)
+#define PMU_WAKE_GPIO3      (1 << 3)
+#define PMU_WAKE_GPIO4      (1 << 4)
+#define PMU_WAKE_GPIO5      (1 << 5)
+#define PMU_WAKE_TIMERS     (1 << 6)
+#define PMU_WAKE_SECURITY   (1 << 7)
+#define PMU_WAKE_AONT32K    (1 << 8)
+#define PMU_WAKE_AONT       (1 << 9)
+#define PMU_WAKE_SVID_ALERT (1 << 10)
+#define PMU_WAKE_AUDIO      (1 << 11)
+#define PMU_WAKE_USB2       (1 << 12)
+#define PMU_WAKE_USB3       (1 << 13)
+#define PMU_WAKE_ILB        (1 << 14)
+#define PMU_WAKE_TAP        (1 << 15)
+#define PMU_WAKE_WATCHDOG   (1 << 16)
+#define PMU_WAKE_HSIC       (1 << 17)
+#define PMU_WAKE_PSH        (1 << 18)
+#define PMU_WAKE_PSH_GPIO   (1 << 19)
+#define PMU_WAKE_PSH_AONT   (1 << 20)
+#define PMU_WAKE_PSH_HALT   (1 << 21)
+#define PMU_GLBL_WAKE_MASK  (1 << 31)
+
+/* Ignore AONT WAKES and ALL from WKC1 */
+#define IGNORE_S3_WKC0 (PMU_WAKE_AONT32K | PMU_WAKE_AONT)
+#define IGNORE_S3_WKC1 (~0)
+
+#define S0IX_TARGET_SSS0_MASK (0xFFF3FFFF)
+#define S0IX_TARGET_SSS1_MASK (0xFFFFFFFF)
+#define S0IX_TARGET_SSS2_MASK (0xFFFFFFFF)
+#define S0IX_TARGET_SSS3_MASK (0xFFFFFFFF)
+
+#define S0IX_TARGET_SSS0 (0xFFF3FFFF)
+#define S0IX_TARGET_SSS1 (0xFFFFFFFF)
+#define S0IX_TARGET_SSS2 (0xFFFFFF3F)
+#define S0IX_TARGET_SSS3 (0xFFFFFFFF)
+
+#define LPMP3_TARGET_SSS0_MASK (0xFFF3FFFF)
+#define LPMP3_TARGET_SSS0 (0xFFC3FFFF)
+
+extern char *mrfl_nc_devices[];
+extern int mrfl_no_of_nc_devices;
+extern int intel_scu_ipc_simple_command(int, int);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern bool could_do_s0ix(void);
+
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_soc_pm_debug.c b/arch/x86/platform/intel-mid/intel_soc_pm_debug.c
new file mode 100644
index 0000000..8aca582
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pm_debug.c
@@ -0,0 +1,2549 @@
+/*
+ * intel_soc_pm_debug.c - This driver provides debug utilities across
+ * multiple platforms
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/time.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/cpuidle.h>
+#include "intel_soc_pm_debug.h"
+#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <asm/tsc.h>
+
+#ifdef CONFIG_PM_DEBUG
+#define MAX_CSTATES_POSSIBLE	32
+
+static struct latency_stat *lat_stat;
+
+static void latency_measure_enable_disable(bool enable_measure)
+{
+	int err;
+	u32 sub;
+
+	if (enable_measure == lat_stat->latency_measure)
+		return;
+
+	if (enable_measure)
+		sub = IPC_SUB_MEASURE_START_CLVP;
+	else
+		sub = IPC_SUB_MEASURE_STOP_CLVP;
+
+	err = rpmsg_send_generic_command(IPC_CMD_S0IX_LATENCY_CLVP,
+						sub, NULL, 0, NULL, 0);
+	if (unlikely(err)) {
+		pr_err("IPC to %s S0IX Latency Measurement failed!\n",
+					enable_measure ? "start" : "stop");
+		return;
+	}
+
+	if (enable_measure) {
+		memset(lat_stat->scu_latency, 0, sizeof(lat_stat->scu_latency));
+		memset(lat_stat->os_latency, 0, sizeof(lat_stat->os_latency));
+		memset(lat_stat->s3_parts_lat, 0,
+				sizeof(lat_stat->s3_parts_lat));
+		memset(lat_stat->count, 0, sizeof(lat_stat->count));
+	}
+
+	lat_stat->latency_measure = enable_measure;
+}
+
+static void print_simple_stat(struct seq_file *s, int divisor, int rem_div,
+					int count, struct simple_stat stat)
+{
+	unsigned long long min, avg, max;
+	unsigned long min_rem = 0, avg_rem = 0, max_rem = 0;
+
+	min = stat.min;
+	max = stat.max;
+	avg = stat.total;
+
+	if (count)
+		do_div(avg, count);
+
+	if (divisor > 1) {
+		min_rem = do_div(min, divisor);
+		max_rem = do_div(max, divisor);
+		avg_rem = do_div(avg, divisor);
+	}
+
+	if (rem_div > 1) {
+		min_rem /= rem_div;
+		max_rem /= rem_div;
+		avg_rem /= rem_div;
+	}
+
+	seq_printf(s, " %5llu.%03lu/%5llu.%03lu/%5llu.%03lu",
+			min, min_rem, avg, avg_rem, max, max_rem);
+}
+
+static int show_pmu_s0ix_lat(struct seq_file *s, void *unused)
+{
+	int i = 0;
+
+	char *states[] = {
+		"S0I1",
+		"LPMP3",
+		"S0I3",
+		"S3"
+	};
+
+	char *s3_parts_names[] = {
+		"PROC_FRZ",
+		"DEV_SUS",
+		"NB_CPU_OFF",
+		"NB_CPU_ON",
+		"DEV_RES",
+		"PROC_UNFRZ"
+	};
+
+	seq_printf(s, "%29s %35s\n", "SCU Latency", "OS Latency");
+	seq_printf(s, "%33s %35s\n", "min/avg/max(msec)", "min/avg/max(msec)");
+
+	for (i = 0; i < ARRAY_SIZE(states); i++) {
+		seq_printf(s, "\n%s(%llu)", states[i], lat_stat->count[i]);
+
+		seq_printf(s, "\n%5s", "entry");
+		print_simple_stat(s, USEC_PER_MSEC, 1, lat_stat->count[i],
+						lat_stat->scu_latency[i].entry);
+		seq_printf(s, "      ");
+		print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+			lat_stat->count[i], lat_stat->os_latency[i].entry);
+
+		seq_printf(s, "\n%5s", "exit");
+		print_simple_stat(s, USEC_PER_MSEC, 1, lat_stat->count[i],
+						lat_stat->scu_latency[i].exit);
+		seq_printf(s, "      ");
+		print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+			lat_stat->count[i], lat_stat->os_latency[i].exit);
+
+	}
+
+	seq_printf(s, "\n\n");
+
+	if (!lat_stat->count[SYS_STATE_S3])
+		return 0;
+
+	seq_printf(s, "S3 Latency dissection:\n");
+	seq_printf(s, "%38s\n", "min/avg/max(msec)");
+
+	for (i = 0; i < MAX_S3_PARTS; i++) {
+		seq_printf(s, "%10s\t", s3_parts_names[i]);
+		print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+					lat_stat->count[SYS_STATE_S3],
+					lat_stat->s3_parts_lat[i]);
+		seq_printf(s, "\n");
+	}
+
+	return 0;
+}
+
+static int pmu_s0ix_lat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_s0ix_lat, NULL);
+}
+
+static ssize_t pmu_s0ix_lat_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+
+	buf[buf_size] = 0;
+
+	if (((strlen("start") + 1) == buf_size) &&
+		!strncmp(buf, "start", strlen("start"))) {
+		latency_measure_enable_disable(true);
+	} else if (((strlen("stop") + 1) == buf_size) &&
+		!strncmp(buf, "stop", strlen("stop"))) {
+		latency_measure_enable_disable(false);
+	}
+
+	return buf_size;
+}
+
+static const struct file_operations s0ix_latency_ops = {
+	.open		= pmu_s0ix_lat_open,
+	.read		= seq_read,
+	.write		= pmu_s0ix_lat_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void update_simple_stat(struct simple_stat *simple_stat, int count)
+{
+	u64 duration = simple_stat->curr;
+
+	if (!count) {
+		simple_stat->min =
+		simple_stat->max =
+		simple_stat->total = duration;
+	} else {
+		if (duration < simple_stat->min)
+			simple_stat->min = duration;
+		else if (duration > simple_stat->max)
+			simple_stat->max = duration;
+		simple_stat->total += duration;
+	}
+}
+
+void s0ix_scu_latency_stat(int type)
+{
+	if (!lat_stat || !lat_stat->latency_measure)
+		return;
+
+	if (type < SYS_STATE_S0I1 || type > SYS_STATE_S3)
+		return;
+
+	lat_stat->scu_latency[type].entry.curr =
+			readl(lat_stat->scu_s0ix_lat_addr);
+	lat_stat->scu_latency[type].exit.curr =
+			readl(lat_stat->scu_s0ix_lat_addr + 1);
+
+	update_simple_stat(&lat_stat->scu_latency[type].entry,
+					lat_stat->count[type]);
+	update_simple_stat(&lat_stat->scu_latency[type].exit,
+					lat_stat->count[type]);
+}
+
+void time_stamp_in_suspend_flow(int mark, bool start)
+{
+	if (!lat_stat || !lat_stat->latency_measure)
+		return;
+
+	if (start) {
+		lat_stat->s3_parts_lat[mark].curr = cpu_clock(0);
+		return;
+	}
+
+	lat_stat->s3_parts_lat[mark].curr = cpu_clock(0) -
+				lat_stat->s3_parts_lat[mark].curr;
+}
+
+static void collect_sleep_state_latency_stat(int sleep_state)
+{
+	int i;
+	if (sleep_state == SYS_STATE_S3)
+		for (i = 0; i < MAX_S3_PARTS; i++)
+			update_simple_stat(&lat_stat->s3_parts_lat[i],
+						lat_stat->count[sleep_state]);
+
+	update_simple_stat(&lat_stat->os_latency[sleep_state].entry,
+						lat_stat->count[sleep_state]);
+	update_simple_stat(&lat_stat->os_latency[sleep_state].exit,
+						lat_stat->count[sleep_state]);
+	lat_stat->count[sleep_state]++;
+}
+
+void time_stamp_for_sleep_state_latency(int sleep_state, bool start, bool entry)
+{
+	if (!lat_stat || !lat_stat->latency_measure)
+		return;
+
+	if (start) {
+		if (entry)
+			lat_stat->os_latency[sleep_state].entry.curr =
+								cpu_clock(0);
+		else
+			lat_stat->os_latency[sleep_state].exit.curr =
+								cpu_clock(0);
+		return;
+	}
+
+	if (entry)
+		lat_stat->os_latency[sleep_state].entry.curr = cpu_clock(0) -
+				lat_stat->os_latency[sleep_state].entry.curr;
+	else {
+		lat_stat->os_latency[sleep_state].exit.curr = cpu_clock(0) -
+				lat_stat->os_latency[sleep_state].exit.curr;
+		collect_sleep_state_latency_stat(sleep_state);
+	}
+}
+#else /* CONFIG_PM_DEBUG */
+void s0ix_scu_latency_stat(int type) {}
+void time_stamp_for_sleep_state_latency(int sleep_state, bool start,
+							bool entry) {}
+void time_stamp_in_suspend_flow(int mark, bool start) {}
+inline unsigned int pmu_get_new_cstate
+		(unsigned int cstate, int *index) { return cstate; };
+#endif /* CONFIG_PM_DEBUG */
+
+static char *dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+/* This can be used to report NC power transitions */
+void (*nc_report_power_state) (u32, int);
+
+#if defined(CONFIG_INTEL_ATOM_SOC_POWER)
+
+#define PMU_DEBUG_PRINT_STATS	(1U << 0)
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_PRINT(logging_type, s, debug_level_mask, args...)		\
+	do {								\
+		if (logging_type)					\
+			seq_printf(s, args);				\
+		else if (debug_mask &					\
+			PMU_DEBUG_PRINT_##debug_level_mask)		\
+			pr_info(args);					\
+	} while (0)
+
+static struct island display_islands[] = {
+	{APM_REG_TYPE, APM_GRAPHICS_ISLAND, "GFX"},
+	{APM_REG_TYPE, APM_VIDEO_DEC_ISLAND, "Video Decoder"},
+	{APM_REG_TYPE, APM_VIDEO_ENC_ISLAND, "Video Encoder"},
+	{APM_REG_TYPE, APM_GL3_CACHE_ISLAND, "GL3 Cache"},
+	{OSPM_REG_TYPE, OSPM_DISPLAY_A_ISLAND, "Display A"},
+	{OSPM_REG_TYPE, OSPM_DISPLAY_B_ISLAND, "Display B"},
+	{OSPM_REG_TYPE, OSPM_DISPLAY_C_ISLAND, "Display C"},
+	{OSPM_REG_TYPE, OSPM_MIPI_ISLAND, "MIPI-DSI"}
+};
+
+static struct island camera_islands[] = {
+	{APM_REG_TYPE, APM_ISP_ISLAND, "ISP"},
+	{APM_REG_TYPE, APM_IPH_ISLAND, "Iunit PHY"}
+};
+
+static char *lss_device_status[4] = { "D0i0", "D0i1", "D0i2", "D0i3" };
+
+static int lsses_num =
+			sizeof(lsses)/sizeof(lsses[0]);
+
+#ifdef LOG_PMU_EVENTS
+static void pmu_log_timestamp(struct timespec *ts)
+{
+	if (timekeeping_suspended) {
+		ts->tv_sec = 0;
+		ts->tv_nsec = 0;
+	} else {
+		ktime_get_ts(ts);
+	}
+}
+
+void pmu_log_pmu_irq(int status)
+{
+	struct mid_pmu_pmu_irq_log *log =
+		&mid_pmu_cxt->pmu_irq_log[mid_pmu_cxt->pmu_irq_log_idx];
+
+	log->status = status;
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->pmu_irq_log_idx =
+		(mid_pmu_cxt->pmu_irq_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_pmu_irq_log(void)
+{
+	struct mid_pmu_pmu_irq_log *log;
+	int i = mid_pmu_cxt->pmu_irq_log_idx, j;
+
+	printk(KERN_ERR"%d last pmu irqs:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->pmu_irq_log[i];
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		printk(KERN_ERR"Status = 0x%02x", log->status);
+		printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_log_ipc_irq(void)
+{
+	struct mid_pmu_ipc_irq_log *log =
+		&mid_pmu_cxt->ipc_irq_log[mid_pmu_cxt->ipc_irq_log_idx];
+
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->ipc_irq_log_idx =
+	(mid_pmu_cxt->ipc_irq_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_ipc_irq_log(void)
+{
+	struct mid_pmu_ipc_irq_log *log;
+	int i = mid_pmu_cxt->ipc_irq_log_idx, j;
+
+	printk(KERN_ERR"%d last ipc irqs:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->ipc_irq_log[i];
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_log_ipc(u32 command)
+{
+	struct mid_pmu_ipc_log *log =
+	&mid_pmu_cxt->ipc_log[mid_pmu_cxt->ipc_log_idx];
+
+	log->command = command;
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->ipc_log_idx = (mid_pmu_cxt->ipc_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_ipc_log(void)
+{
+	struct mid_pmu_ipc_log *log;
+	int i = mid_pmu_cxt->ipc_log_idx, j;
+
+	printk(KERN_ERR"%d last ipc commands:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i  ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->ipc_log[i];
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		printk(KERN_ERR"Command: 0x%08x", log->command);
+		printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc)
+{
+	struct mid_pmu_cmd_log *log =
+		&mid_pmu_cxt->cmd_log[mid_pmu_cxt->cmd_log_idx];
+
+	if (pm_ssc != NULL)
+		memcpy(&log->pm_ssc, pm_ssc, sizeof(struct pmu_ss_states));
+	else
+		memset(&log->pm_ssc, 0, sizeof(struct pmu_ss_states));
+	log->command = command;
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->cmd_log_idx = (mid_pmu_cxt->cmd_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_command_log(void)
+{
+	struct mid_pmu_cmd_log *log;
+	int i = mid_pmu_cxt->cmd_log_idx, j, k;
+	u32 cmd_state;
+	printk(KERN_ERR"%d last pmu commands:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->cmd_log[i];
+		cmd_state = log->command;
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		switch (cmd_state) {
+		case INTERACTIVE_VALUE:
+			printk(KERN_ERR"PM_CMD = Interactive_CMD IOC bit not set.\n");
+			break;
+		case INTERACTIVE_IOC_VALUE:
+			printk(KERN_ERR"PM_CMD = Interactive_CMD IOC bit set.\n");
+			break;
+		case S0I1_VALUE:
+			printk(KERN_ERR"PM_CMD = S0i1_CMD\n");
+			break;
+		case S0I3_VALUE:
+			printk(KERN_ERR"PM_CMD = S0i3_CMD\n");
+			break;
+		case LPMP3_VALUE:
+			printk(KERN_ERR"PM_CMD = LPMP3_CMD\n");
+			break;
+		default:
+			printk(KERN_ERR "Invalid PM_CMD\n");
+			break;
+		}
+		for (k = 0; k < 4; k++)
+			printk(KERN_ERR"pmu2_states[%d]: 0x%08lx\n",
+				k, log->pm_ssc.pmu2_states[k]);
+			printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_dump_logs(void)
+{
+	struct timespec ts;
+
+	pmu_log_timestamp(&ts);
+	printk(KERN_ERR"Dumping out pmu logs\n");
+	printk(KERN_ERR"Timestamp: %lu.%09lu\n\n", ts.tv_sec, ts.tv_nsec);
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_command_log();
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_pmu_irq_log();
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_ipc_log();
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_ipc_irq_log();
+}
+#else
+void pmu_log_pmu_irq(int status) {}
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc) {}
+void pmu_dump_logs(void) {}
+#endif /* LOG_PMU_EVENTS */
+
+void pmu_stat_start(enum sys_state type)
+{
+	mid_pmu_cxt->pmu_current_state = type;
+	mid_pmu_cxt->pmu_stats[type].last_try = cpu_clock(smp_processor_id());
+}
+
+void pmu_stat_end(void)
+{
+	enum sys_state type = mid_pmu_cxt->pmu_current_state;
+
+	if (type > SYS_STATE_S0I0 && type < SYS_STATE_MAX) {
+		mid_pmu_cxt->pmu_stats[type].last_entry =
+			mid_pmu_cxt->pmu_stats[type].last_try;
+
+		if (!mid_pmu_cxt->pmu_stats[type].count)
+			mid_pmu_cxt->pmu_stats[type].first_entry =
+				mid_pmu_cxt->pmu_stats[type].last_entry;
+
+		mid_pmu_cxt->pmu_stats[type].time +=
+			cpu_clock(smp_processor_id())
+			- mid_pmu_cxt->pmu_stats[type].last_entry;
+
+		mid_pmu_cxt->pmu_stats[type].count++;
+
+		s0ix_scu_latency_stat(type);
+		if (type >= SYS_STATE_S0I1 && type <= SYS_STATE_S0I3)
+			/* time stamp for end of s0ix exit */
+			time_stamp_for_sleep_state_latency(type, false, false);
+	}
+
+	mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+}
+
+void pmu_stat_error(u8 err_type)
+{
+	enum sys_state type = mid_pmu_cxt->pmu_current_state;
+	u8 err_index;
+
+	if (type > SYS_STATE_S0I0 && type < SYS_STATE_MAX) {
+		switch (err_type) {
+		case SUBSYS_POW_ERR_INT:
+			trace_printk("S0ix_POW_ERR_INT\n");
+			err_index = 0;
+			break;
+		case S0ix_MISS_INT:
+			trace_printk("S0ix_MISS_INT\n");
+			err_index = 1;
+			break;
+		case NO_ACKC6_INT:
+			trace_printk("S0ix_NO_ACKC6_INT\n");
+			err_index = 2;
+			break;
+		default:
+			err_index = 3;
+			break;
+		}
+
+		if (err_index < 3)
+			mid_pmu_cxt->pmu_stats[type].err_count[err_index]++;
+	}
+}
+
+static void pmu_stat_seq_printf(struct seq_file *s, int type, char *typestr)
+{
+	unsigned long long t;
+	unsigned long nanosec_rem, remainder;
+	unsigned long time, init_2_now_time;
+
+	seq_printf(s, "%s\t%5llu\t%10llu\t%9llu\t%9llu\t", typestr,
+		 mid_pmu_cxt->pmu_stats[type].count,
+		 mid_pmu_cxt->pmu_stats[type].err_count[0],
+		 mid_pmu_cxt->pmu_stats[type].err_count[1],
+		 mid_pmu_cxt->pmu_stats[type].err_count[2]);
+
+	t = mid_pmu_cxt->pmu_stats[type].time;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	/* convert time in secs */
+	time = (unsigned long)t;
+
+	seq_printf(s, "%5lu.%06lu\t",
+	   (unsigned long) t, nanosec_rem / 1000);
+
+	t = mid_pmu_cxt->pmu_stats[type].last_entry;
+	nanosec_rem = do_div(t, NANO_SEC);
+	seq_printf(s, "%5lu.%06lu\t",
+	   (unsigned long) t, nanosec_rem / 1000);
+
+	t = mid_pmu_cxt->pmu_stats[type].first_entry;
+	nanosec_rem = do_div(t, NANO_SEC);
+	seq_printf(s, "%5lu.%06lu\t",
+	   (unsigned long) t, nanosec_rem / 1000);
+
+	t =  cpu_clock(0);
+	t -= mid_pmu_cxt->pmu_init_time;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	init_2_now_time =  (unsigned long) t;
+
+	/* for calculating percentage residency */
+	t = (u64) time;
+	t *= 100;
+
+	/* take care of divide by zero */
+	if (init_2_now_time) {
+		remainder = do_div(t, init_2_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		 * decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_2_now_time);
+	} else
+		time = t = 0;
+
+	seq_printf(s, "%5lu.%03lu\n", time, (unsigned long) t);
+}
+
+static unsigned long pmu_dev_res_print(int index, unsigned long *precision,
+				unsigned long *sampled_time, bool dev_state)
+{
+	unsigned long long t, delta_time = 0;
+	unsigned long nanosec_rem, remainder;
+	unsigned long time, init_to_now_time;
+
+	t =  cpu_clock(0);
+
+	if (dev_state) {
+		/* print for d0ix */
+		if ((mid_pmu_cxt->pmu_dev_res[index].state != PCI_D0))
+			delta_time = t -
+				mid_pmu_cxt->pmu_dev_res[index].d0i3_entry;
+
+			delta_time += mid_pmu_cxt->pmu_dev_res[index].d0i3_acc;
+	} else {
+		/* print for d0i0 */
+		if ((mid_pmu_cxt->pmu_dev_res[index].state == PCI_D0))
+			delta_time = t -
+				mid_pmu_cxt->pmu_dev_res[index].d0i0_entry;
+
+		delta_time += mid_pmu_cxt->pmu_dev_res[index].d0i0_acc;
+	}
+
+	t -= mid_pmu_cxt->pmu_dev_res[index].start;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	init_to_now_time =  (unsigned long) t;
+
+	t = delta_time;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	/* convert time in secs */
+	time = (unsigned long)t;
+	*sampled_time = time;
+
+	/* for calculating percentage residency */
+	t = (u64) time;
+	t *= 100;
+
+	/* take care of divide by zero */
+	if (init_to_now_time) {
+		remainder = do_div(t, init_to_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		* decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_to_now_time);
+	} else
+		time = t = 0;
+
+	*precision = (unsigned long)t;
+
+	return time;
+}
+
+static void nc_device_state_show(struct seq_file *s, struct pci_dev *pdev)
+{
+	int off, i, islands_num, state;
+	struct island *islands;
+
+	if (PCI_SLOT(pdev->devfn) == DEV_GFX &&
+			PCI_FUNC(pdev->devfn) == FUNC_GFX) {
+		off = mid_pmu_cxt->display_off;
+		islands_num = ISLANDS_GFX;
+		islands = &display_islands[0];
+	} else if (PCI_SLOT(pdev->devfn) == DEV_ISP &&
+			PCI_FUNC(pdev->devfn) == FUNC_ISP) {
+		off = mid_pmu_cxt->camera_off;
+		islands_num = ISLANDS_ISP;
+		islands = &camera_islands[0];
+	} else {
+		return;
+	}
+
+	seq_printf(s, "pci %04x %04X %s %20s: %41s %s\n",
+		pdev->vendor, pdev->device, dev_name(&pdev->dev),
+		dev_driver_string(&pdev->dev),
+		"", off ? "" : "blocking s0ix");
+	for (i = 0; i < islands_num; i++) {
+		state = pmu_nc_get_power_state(islands[i].index,
+				islands[i].type);
+		seq_printf(s, "%52s %15s %17s %s\n",
+				 "|------->", islands[i].name, "",
+				(state >= 0) ? dstates[state & 3] : "ERR");
+	}
+}
+
+static int pmu_devices_state_show(struct seq_file *s, void *unused)
+{
+	struct pci_dev *pdev = NULL;
+	int index, i, pmu_num, ss_idx, ss_pos;
+	unsigned int base_class;
+	u32 target_mask, mask, val, needed;
+	struct pmu_ss_states cur_pmsss;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	pmu_read_sss(&cur_pmsss);
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	seq_printf(s, "TARGET_CFG: ");
+	seq_printf(s, "SSS0:%08X ", S0IX_TARGET_SSS0_MASK);
+	seq_printf(s, "SSS1:%08X ", S0IX_TARGET_SSS1_MASK);
+	seq_printf(s, "SSS2:%08X ", S0IX_TARGET_SSS2_MASK);
+	seq_printf(s, "SSS3:%08X ", S0IX_TARGET_SSS3_MASK);
+
+	seq_printf(s, "\n");
+	seq_printf(s, "CONDITION FOR S0I3: ");
+	seq_printf(s, "SSS0:%08X ", S0IX_TARGET_SSS0);
+	seq_printf(s, "SSS1:%08X ", S0IX_TARGET_SSS1);
+	seq_printf(s, "SSS2:%08X ", S0IX_TARGET_SSS2);
+	seq_printf(s, "SSS3:%08X ", S0IX_TARGET_SSS3);
+
+	seq_printf(s, "\n");
+	seq_printf(s, "SSS: ");
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "%08lX ", cur_pmsss.pmu2_states[i]);
+
+	if (!mid_pmu_cxt->display_off)
+		seq_printf(s, "display not suspended: blocking s0ix\n");
+	else if (!mid_pmu_cxt->camera_off)
+		seq_printf(s, "camera not suspended: blocking s0ix\n");
+	else if (mid_pmu_cxt->s0ix_possible & MID_S0IX_STATE)
+		seq_printf(s, "can enter s0i1 or s0i3\n");
+	else if (mid_pmu_cxt->s0ix_possible & MID_LPMP3_STATE)
+		seq_printf(s, "can enter lpmp3\n");
+	else
+		seq_printf(s, "blocking s0ix\n");
+
+	seq_printf(s, "cmd_error_int count: %d\n", mid_pmu_cxt->cmd_error_int);
+
+	seq_printf(s,
+	"\tcount\tsybsys_pow\ts0ix_miss\tno_ack_c6\ttime (secs)\tlast_entry");
+	seq_printf(s, "\tfirst_entry\tresidency(%%)\n");
+
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1, "s0i1");
+	pmu_stat_seq_printf(s, SYS_STATE_S0I2, "lpmp3");
+	pmu_stat_seq_printf(s, SYS_STATE_S0I3, "s0i3");
+	pmu_stat_seq_printf(s, SYS_STATE_S3, "s3");
+
+	for_each_pci_dev(pdev) {
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+								  &ss_pos))
+			continue;
+
+		if (pmu_num == PMU_NUM_1) {
+			nc_device_state_show(s, pdev);
+			continue;
+		}
+
+		mask	= (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+		val	= (cur_pmsss.pmu2_states[ss_idx] & mask) >>
+						(ss_pos * BITS_PER_LSS);
+		switch (ss_idx) {
+		case 0:
+			target_mask = S0IX_TARGET_SSS0_MASK;
+			break;
+		case 1:
+			target_mask = S0IX_TARGET_SSS1_MASK;
+			break;
+		case 2:
+			target_mask = S0IX_TARGET_SSS2_MASK;
+			break;
+		case 3:
+			target_mask = S0IX_TARGET_SSS3_MASK;
+			break;
+		default:
+			target_mask = 0;
+			break;
+		}
+		needed	= ((target_mask &  mask) != 0);
+
+		seq_printf(s, "pci %04x %04X %s %20s: lss:%02d reg:%d"
+			"mask:%08X wk:%02d:%02d:%02d:%03d %s  %s\n",
+			pdev->vendor, pdev->device, dev_name(&pdev->dev),
+			dev_driver_string(&pdev->dev),
+			index - mid_pmu_cxt->pmu1_max_devs, ss_idx, mask,
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I1],
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I2],
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I3],
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S3],
+			dstates[val & 3],
+			(needed && !val) ? "blocking s0ix" : "");
+
+	}
+
+	return 0;
+}
+
+static int devices_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_devices_state_show, NULL);
+}
+
+static ssize_t devices_state_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+
+	buf[buf_size] = 0;
+
+	if (((strlen("clear")+1) == buf_size) &&
+		!strncmp(buf, "clear", strlen("clear"))) {
+		down(&mid_pmu_cxt->scu_ready_sem);
+		memset(mid_pmu_cxt->pmu_stats, 0,
+					sizeof(mid_pmu_cxt->pmu_stats));
+		memset(mid_pmu_cxt->num_wakes, 0,
+					sizeof(mid_pmu_cxt->num_wakes));
+		mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+		mid_pmu_cxt->pmu_init_time =
+			cpu_clock(0);
+		clear_d0ix_stats();
+		up(&mid_pmu_cxt->scu_ready_sem);
+	}
+
+	return buf_size;
+}
+
+static const struct file_operations devices_state_operations = {
+	.open		= devices_state_open,
+	.read		= seq_read,
+	.write		= devices_state_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_pmu_lss_status(struct seq_file *s, void *unused)
+{
+	int sss_reg_index;
+	int offset;
+	int lss;
+	unsigned long status;
+	unsigned long sub_status;
+	unsigned long lss_status[4];
+	struct lss_definition *entry;
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	lss_status[0] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[0]);
+	lss_status[1] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[1]);
+	lss_status[2] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[2]);
+	lss_status[3] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[3]);
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	lss = 0;
+	seq_printf(s, "%5s\t%12s %35s %5s %4s %4s %4s %4s\n",
+			"lss", "block", "subsystem", "state", "D0i0", "D0i1",
+			"D0i2", "D0i3");
+	seq_printf(s, "====================================================="
+		      "=====================\n");
+	for (sss_reg_index = 0; sss_reg_index < 4; sss_reg_index++) {
+		status = lss_status[sss_reg_index];
+		for (offset = 0; offset < sizeof(unsigned long) * 8 / 2;
+								offset++) {
+			sub_status = status & 3;
+			if (lss >= lsses_num)
+				entry = &lsses[lsses_num - 1];
+			else
+				entry = &lsses[lss];
+			seq_printf(s, "%5s\t%12s %35s %4s %4d %4d %4d %4d\n",
+					entry->lss_name, entry->block,
+					entry->subsystem,
+					lss_device_status[sub_status],
+					get_d0ix_stat(lss, SS_STATE_D0I0),
+					get_d0ix_stat(lss, SS_STATE_D0I1),
+					get_d0ix_stat(lss, SS_STATE_D0I2),
+					get_d0ix_stat(lss, SS_STATE_D0I3));
+
+			status >>= 2;
+			lss++;
+		}
+	}
+
+	return 0;
+}
+
+static int pmu_sss_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_lss_status, NULL);
+}
+
+static const struct file_operations pmu_sss_state_operations = {
+	.open		= pmu_sss_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_pmu_dev_stats(struct seq_file *s, void *unused)
+{
+	struct pci_dev *pdev = NULL;
+	unsigned long sampled_time, precision;
+	int index, pmu_num, ss_idx, ss_pos;
+	unsigned int base_class;
+
+	seq_printf(s, "%5s\t%20s\t%10s\t%10s\t%s\n",
+		"lss", "Name", "D0_res", "D0ix_res", "Sampled_Time");
+	seq_printf(s,
+	"==================================================================\n");
+
+	for_each_pci_dev(pdev) {
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+							&ss_pos))
+			continue;
+
+		if (pmu_num == PMU_NUM_1) {
+			seq_printf(s,
+			"%5s%20s\t%5lu.%03lu%%\t%5lu.%03lu%%\t%lu\n",
+			"NC", dev_driver_string(&pdev->dev),
+			pmu_dev_res_print(index, &precision,
+				 &sampled_time, false),
+			precision,
+			pmu_dev_res_print(index, &precision,
+				 &sampled_time, true),
+			precision, sampled_time);
+			continue;
+		}
+
+		/* Print for South Complex devices */
+		seq_printf(s, "%5d\t%20s\t%5lu.%03lu%%\t%5lu.%03lu%%\t%lu\n",
+		index - mid_pmu_cxt->pmu1_max_devs,
+		dev_driver_string(&pdev->dev),
+		pmu_dev_res_print(index, &precision, &sampled_time, false),
+		precision,
+		pmu_dev_res_print(index, &precision, &sampled_time, true),
+		precision, sampled_time);
+	}
+	return 0;
+}
+
+static int pmu_dev_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_dev_stats, NULL);
+}
+
+static const struct file_operations pmu_dev_stat_operations = {
+	.open		= pmu_dev_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#ifdef CONFIG_PM_DEBUG
+static int pmu_stats_interval = PMU_LOG_INTERVAL_SECS;
+module_param_named(pmu_stats_interval, pmu_stats_interval,
+				int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+void pmu_s0ix_demotion_stat(int req_state, int grant_state)
+{
+	struct pmu_ss_states cur_pmsss;
+	int i, req_sys_state, offset;
+	unsigned long status, sub_status;
+	unsigned long s0ix_target_sss_mask[4] = {
+				S0IX_TARGET_SSS0_MASK,
+				S0IX_TARGET_SSS1_MASK,
+				S0IX_TARGET_SSS2_MASK,
+				S0IX_TARGET_SSS3_MASK};
+
+	unsigned long s0ix_target_sss[4] = {
+				S0IX_TARGET_SSS0,
+				S0IX_TARGET_SSS1,
+				S0IX_TARGET_SSS2,
+				S0IX_TARGET_SSS3};
+
+	unsigned long lpmp3_target_sss_mask[4] = {
+				LPMP3_TARGET_SSS0_MASK,
+				LPMP3_TARGET_SSS1_MASK,
+				LPMP3_TARGET_SSS2_MASK,
+				LPMP3_TARGET_SSS3_MASK};
+
+	unsigned long lpmp3_target_sss[4] = {
+				LPMP3_TARGET_SSS0,
+				LPMP3_TARGET_SSS1,
+				LPMP3_TARGET_SSS2,
+				LPMP3_TARGET_SSS3};
+
+	req_sys_state = mid_state_to_sys_state(req_state);
+	if ((grant_state >= C4_STATE_IDX) && (grant_state <= S0I3_STATE_IDX))
+		mid_pmu_cxt->pmu_stats
+			[req_sys_state].demote_count
+				[grant_state-C4_STATE_IDX]++;
+
+	if (down_trylock(&mid_pmu_cxt->scu_ready_sem))
+		return;
+
+	pmu_read_sss(&cur_pmsss);
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	if (!mid_pmu_cxt->camera_off)
+		mid_pmu_cxt->pmu_stats[req_sys_state].camera_blocker_count++;
+
+	if (!mid_pmu_cxt->display_off)
+		mid_pmu_cxt->pmu_stats[req_sys_state].display_blocker_count++;
+
+	if (!mid_pmu_cxt->s0ix_possible) {
+		for (i = 0; i < 4; i++) {
+			unsigned int lss_per_register;
+			if (req_state == MID_LPMP3_STATE)
+				status = lpmp3_target_sss[i] ^
+					(cur_pmsss.pmu2_states[i] &
+						lpmp3_target_sss_mask[i]);
+			else
+				status = s0ix_target_sss[i] ^
+					(cur_pmsss.pmu2_states[i] &
+						s0ix_target_sss_mask[i]);
+			if (!status)
+				continue;
+
+			lss_per_register =
+				(sizeof(unsigned long)*8)/BITS_PER_LSS;
+
+			for (offset = 0; offset < lss_per_register; offset++) {
+				sub_status = status & SS_IDX_MASK;
+				if (sub_status) {
+					mid_pmu_cxt->pmu_stats[req_sys_state].
+						blocker_count
+						[offset + lss_per_register*i]++;
+				}
+
+				status >>= BITS_PER_LSS;
+			}
+		}
+	}
+}
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+
+static void pmu_log_s0ix_status(int type, char *typestr,
+		struct seq_file *s, bool logging_type)
+{
+	unsigned long long t;
+	unsigned long time, remainder, init_2_now_time;
+
+	t = mid_pmu_cxt->pmu_stats[type].time;
+	remainder = do_div(t, NANO_SEC);
+
+	/* convert time in secs */
+	time = (unsigned long)t;
+
+	t =  cpu_clock(0);
+	t -= mid_pmu_cxt->pmu_init_time;
+	remainder = do_div(t, NANO_SEC);
+
+	init_2_now_time =  (unsigned long) t;
+
+	/* for calculating percentage residency */
+	t = (u64) time;
+	t *= 100;
+
+	/* take care of divide by zero */
+	if (init_2_now_time) {
+		remainder = do_div(t, init_2_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		 * decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_2_now_time);
+	} else
+		time = t = 0;
+	DEBUG_PRINT(logging_type, s, STATS,
+			"%s\t%5llu\t%9llu\t%9llu\t%5lu.%03lu\n"
+			, typestr, mid_pmu_cxt->pmu_stats[type].count,
+			mid_pmu_cxt->pmu_stats[type].err_count[1],
+			mid_pmu_cxt->pmu_stats[type].err_count[2],
+			time, (unsigned long) t);
+}
+
+static void pmu_log_s0ix_demotion(int type, char *typestr,
+		struct seq_file *s, bool logging_type)
+{
+	DEBUG_PRINT(logging_type, s, STATS, "%s:\t%6d\t%6d\t%6d\t%6d\t%6d\n",
+		typestr,
+		mid_pmu_cxt->pmu_stats[type].demote_count[0],
+		mid_pmu_cxt->pmu_stats[type].demote_count[1],
+		mid_pmu_cxt->pmu_stats[type].demote_count[2],
+		mid_pmu_cxt->pmu_stats[type].demote_count[3],
+		mid_pmu_cxt->pmu_stats[type].demote_count[4]);
+}
+
+static void pmu_log_s0ix_lss_blocked(int type, char *typestr,
+		struct seq_file *s, bool logging_type)
+{
+	int i, block_count;
+
+	DEBUG_PRINT(logging_type, s, STATS, "%s: Block Count\n", typestr);
+
+	block_count = mid_pmu_cxt->pmu_stats[type].display_blocker_count;
+
+	if (block_count)
+		DEBUG_PRINT(logging_type, s, STATS,
+			 "\tDisplay blocked: %d times\n", block_count);
+
+	block_count = mid_pmu_cxt->pmu_stats[type].camera_blocker_count;
+
+	if (block_count)
+		DEBUG_PRINT(logging_type, s, STATS,
+			"\tCamera blocked: %d times\n", block_count);
+
+	DEBUG_PRINT(logging_type, s, STATS, "\tLSS\t #blocked\n");
+
+	for  (i = 0; i < MAX_LSS_POSSIBLE; i++) {
+		block_count = mid_pmu_cxt->pmu_stats[type].blocker_count[i];
+		if (block_count)
+			DEBUG_PRINT(logging_type, s, STATS, "\t%02d\t %6d\n", i,
+						block_count);
+	}
+	DEBUG_PRINT(logging_type, s, STATS, "\n");
+}
+
+static void pmu_stats_logger(bool logging_type, struct seq_file *s)
+{
+
+	if (!logging_type)
+		DEBUG_PRINT(logging_type, s, STATS,
+			"\n----MID_PMU_STATS_LOG_BEGIN----\n");
+
+	DEBUG_PRINT(logging_type, s, STATS,
+			"\tcount\ts0ix_miss\tno_ack_c6\tresidency(%%)\n");
+	pmu_log_s0ix_status(SYS_STATE_S0I1, "s0i1", s, logging_type);
+	pmu_log_s0ix_status(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+	pmu_log_s0ix_status(SYS_STATE_S0I3, "s0i3", s, logging_type);
+	pmu_log_s0ix_status(SYS_STATE_S3, "s3", s, logging_type);
+
+	DEBUG_PRINT(logging_type, s, STATS, "\nFrom:\tTo\n");
+	DEBUG_PRINT(logging_type, s, STATS,
+		"\t    C4\t   C6\t  S0i1\t  Lpmp3\t  S0i3\n");
+
+	/* storing C6 demotion info in S0I0 */
+	pmu_log_s0ix_demotion(SYS_STATE_S0I0, "  C6", s, logging_type);
+
+	pmu_log_s0ix_demotion(SYS_STATE_S0I1, "s0i1", s, logging_type);
+	pmu_log_s0ix_demotion(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+	pmu_log_s0ix_demotion(SYS_STATE_S0I3, "s0i3", s, logging_type);
+
+	DEBUG_PRINT(logging_type, s, STATS, "\n");
+	pmu_log_s0ix_lss_blocked(SYS_STATE_S0I1, "s0i1", s, logging_type);
+	pmu_log_s0ix_lss_blocked(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+	pmu_log_s0ix_lss_blocked(SYS_STATE_S0I3, "s0i3", s, logging_type);
+
+	if (!logging_type)
+		DEBUG_PRINT(logging_type, s, STATS,
+				"\n----MID_PMU_STATS_LOG_END----\n");
+}
+
+static void pmu_log_stat(struct work_struct *work)
+{
+
+	pmu_stats_logger(false, NULL);
+
+	schedule_delayed_work(&mid_pmu_cxt->log_work,
+			msecs_to_jiffies(pmu_stats_interval*1000));
+}
+
+static int show_pmu_stats_log(struct seq_file *s, void *unused)
+{
+	pmu_stats_logger(true, s);
+	return 0;
+}
+
+static int pmu_stats_log_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_stats_log, NULL);
+}
+
+static const struct file_operations pmu_stats_log_operations = {
+	.open		= pmu_stats_log_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#else
+void pmu_s0ix_demotion_stat(int req_state, int grant_state) {}
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+#endif
+
+void pmu_stats_init(void)
+{
+	struct dentry *fentry;
+
+	/* /sys/kernel/debug/mid_pmu_states */
+	(void) debugfs_create_file("mid_pmu_states", S_IFREG | S_IRUGO,
+				NULL, NULL, &devices_state_operations);
+
+	/* /sys/kernel/debug/pmu_sss_states */
+	(void) debugfs_create_file("pmu_sss_states", S_IFREG | S_IRUGO,
+				NULL, NULL, &pmu_sss_state_operations);
+
+	/* /sys/kernel/debug/pmu_dev_stats */
+	(void) debugfs_create_file("pmu_dev_stats", S_IFREG | S_IRUGO,
+				NULL, NULL, &pmu_dev_stat_operations);
+
+#ifdef CONFIG_PM_DEBUG
+	/* dynamic debug tracing in every 5 mins */
+	INIT_DEFERRABLE_WORK(&mid_pmu_cxt->log_work, pmu_log_stat);
+	schedule_delayed_work(&mid_pmu_cxt->log_work,
+				msecs_to_jiffies(pmu_stats_interval*1000));
+
+	debug_mask = PMU_DEBUG_PRINT_STATS;
+
+	/* /sys/kernel/debug/pmu_stats_log */
+	fentry = debugfs_create_file("pmu_stats_log", S_IFREG | S_IRUGO,
+				NULL, NULL, &pmu_stats_log_operations);
+	if (fentry == NULL)
+		printk(KERN_ERR "Failed to create pmu_stats_log debugfs\n");
+#endif
+}
+
+void pmu_s3_stats_update(int enter)
+{
+
+}
+
+void pmu_stats_finish(void)
+{
+#ifdef CONFIG_PM_DEBUG
+	cancel_delayed_work_sync(&mid_pmu_cxt->log_work);
+#endif
+}
+
+#endif /*if CONFIG_X86_MDFLD_POWER || CONFIG_X86_CLV_POWER*/
+
+#ifdef CONFIG_ATOM_SOC_POWER
+
+static u32 prev_s0ix_cnt[SYS_STATE_MAX];
+static unsigned long long prev_s0ix_res[SYS_STATE_MAX];
+static unsigned long long cur_s0ix_res[SYS_STATE_MAX];
+static unsigned long long cur_s0ix_cnt[SYS_STATE_MAX];
+static u32 S3_count;
+static unsigned long long S3_res;
+
+static inline u32 s0ix_count_read(int state)
+{
+	return readl(s0ix_counters + s0ix_counter_reg_map[state]);
+}
+
+static inline u64 s0ix_residency_read(int state)
+{
+	return readq(s0ix_counters + s0ix_residency_reg_map[state]);
+}
+
+static void pmu_stat_seq_printf(struct seq_file *s, int type, char *typestr,
+							long long uptime)
+{
+	unsigned long long t;
+	u32 scu_val = 0, time = 0;
+	u32 remainder;
+	unsigned long init_2_now_time;
+	unsigned long long tsc_freq = 1330000;
+
+	/* If tsc calibration fails use the default as 1330Mhz */
+	if (tsc_khz)
+		tsc_freq = tsc_khz;
+
+	/* Print S0ix residency counter */
+	if (type == SYS_STATE_S0I0) {
+		for (t = SYS_STATE_S0I1; t <= SYS_STATE_S3; t++)
+			time += cur_s0ix_res[t];
+	} else if (type < SYS_STATE_S3) {
+		t = s0ix_residency_read(type);
+		if (t < prev_s0ix_res[type])
+			t += (((unsigned long long)~0) - prev_s0ix_res[type]);
+		else
+			t -= prev_s0ix_res[type];
+
+		if (type == SYS_STATE_S0I3)
+			t -= prev_s0ix_res[SYS_STATE_S3];
+	} else
+		t = prev_s0ix_res[SYS_STATE_S3];
+
+	if (type == SYS_STATE_S0I0) {
+		/* uptime(nanoS) - sum_res(miliSec) */
+		t = uptime;
+		do_div(t, MICRO_SEC);
+		time = t - time;
+	} else {
+		/* s0ix residency counters are in TSC cycle count domain
+		 * convert this to milli second time domain
+		 */
+		remainder = do_div(t, tsc_freq);
+
+		/* store time in millisecs */
+		time = (unsigned int)t;
+	}
+	cur_s0ix_res[type] = (unsigned int)time;
+
+	seq_printf(s, "%s\t%5lu.%03lu\t", typestr,
+		(unsigned long)(time/1000), (unsigned long)(time%1000));
+
+	t = uptime;
+	do_div(t, MICRO_SEC); /* time in milli secs */
+
+	/* Note: with millisecs accuracy we get more
+	 * precise residency percentages, but we have
+	 * to trade off with the max number of days
+	 * that we can run without clearing counters,
+	 * with 32bit counter this value is ~50days.
+	 */
+	init_2_now_time =  (unsigned long) t;
+
+	/* for calculating percentage residency */
+	t	= (u64)(time);
+	t	*= 100;
+
+	/* take care of divide by zero */
+	if (init_2_now_time) {
+		remainder = do_div(t, init_2_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		 * decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_2_now_time);
+	} else
+		time = t = 0;
+
+	seq_printf(s, "%5lu.%03lu\t", (unsigned long) time, (unsigned long) t);
+
+	/* Print S0ix counters */
+	if (type == SYS_STATE_S0I0) {
+		for (t = SYS_STATE_S0I1; t <= SYS_STATE_S3; t++)
+			scu_val += cur_s0ix_cnt[t];
+		if (scu_val == 0) /* S0I0 residency 100% */
+			scu_val = 1;
+	} else if (type < SYS_STATE_S3) {
+		scu_val = s0ix_count_read(type);
+		if (scu_val < prev_s0ix_cnt[type])
+			scu_val += (((u32)~0) - prev_s0ix_cnt[type]);
+		else
+			scu_val -= prev_s0ix_cnt[type];
+
+		if (type == SYS_STATE_S0I3)
+			scu_val -= prev_s0ix_cnt[SYS_STATE_S3];
+	} else
+			scu_val = prev_s0ix_cnt[SYS_STATE_S3];
+
+	if (type != SYS_STATE_S0I0)
+		cur_s0ix_cnt[type] = scu_val;
+
+	seq_printf(s, "%5lu\t", (unsigned long) scu_val);
+
+	remainder = 0;
+	t = cur_s0ix_res[type];
+	if (scu_val) { /* s0ix_time in millisecs */
+		do_div(t, scu_val);
+		remainder = do_div(t, 1000);
+	}
+	seq_printf(s, "%5lu.%03lu\n", (unsigned long) t,
+			(unsigned long) remainder);
+}
+
+static int pmu_devices_state_show(struct seq_file *s, void *unused)
+{
+	struct pci_dev *pdev = NULL;
+	int index, i, pmu_num, ss_idx, ss_pos;
+	unsigned int base_class;
+	u32 mask, val, nc_pwr_sts;
+	struct pmu_ss_states cur_pmsss;
+	long long uptime, uptime_t;
+	int ret;
+
+	if (!pmu_initialized)
+		return 0;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	pmu_read_sss(&cur_pmsss);
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	seq_printf(s, "SSS: ");
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "%08lX ", cur_pmsss.pmu2_states[i]);
+
+	seq_printf(s, "cmd_error_int count: %d\n", mid_pmu_cxt->cmd_error_int);
+
+	seq_printf(s, "\t\t\ttime(secs)\tresidency(%%)\tcount\tAvg.Res(Sec)\n");
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+	/* Dump S0ix residency counters */
+	ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+	if (ret)
+		seq_printf(s, "IPC command to DUMP S0ix residency failed\n");
+
+	/* Dump number of interations of S0ix */
+	ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+	if (ret)
+		seq_printf(s, "IPC command to DUMP S0ix count failed\n");
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	uptime =  cpu_clock(0);
+	uptime -= mid_pmu_cxt->pmu_init_time;
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1, "s0i1             ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_LPMP3, "s0i1-lpe         ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_PSH, "s0i1-psh         ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_DISP, "s0i1-disp        ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_LPMP3_PSH, "s0i1-lpe-psh     ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_LPMP3_DISP, "s0i1-lpe-disp    ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_PSH, "s0i1-psh-disp    ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1_LPMP3_PSH_DISP, "s0i1-lpe-psh-disp", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I2, "s0i2             ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I3, "s0i3             ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I3_PSH_RET, "s0i3-psh-ret     ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S3, "s3               ", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I0, "s0               ", uptime);
+
+	val = do_div(uptime, NANO_SEC);
+	seq_printf(s, "\n\nTotal time: %5lu.%03lu Sec\n", (unsigned long)uptime,
+		   (unsigned long) val/1000000);
+
+	seq_puts(s, "\nNORTH COMPLEX DEVICES :\n\n");
+	seq_puts(s, "  IP_NAME : State D0i0_Time D0i0\% Count\n");
+	seq_puts(s, "========================================\n");
+
+	nc_pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS);
+	for (i = 0; i < mrfl_no_of_nc_devices; i++) {
+		unsigned long long t, t1;
+		u32 remainder, time, d0i0_time_secs;
+
+		val = nc_pwr_sts & 3;
+		nc_pwr_sts >>= BITS_PER_LSS;
+
+		/* For Islands after VED, we dont receive
+		 * requests for D0ix
+		 */
+		if (i <= VED) {
+			down(&mid_pmu_cxt->scu_ready_sem);
+
+			t = mid_pmu_cxt->nc_d0i0_time[i];
+			/* If in D0i0 add current time */
+			if (val == D0I0_MASK)
+				t += (cpu_clock(0) - mid_pmu_cxt->nc_d0i0_prev_time[i]);
+
+			uptime_t =  cpu_clock(0);
+			uptime_t -= mid_pmu_cxt->pmu_init_time;
+
+			up(&mid_pmu_cxt->scu_ready_sem);
+
+			t1 = t;
+			d0i0_time_secs = do_div(t1, NANO_SEC);
+
+			/* convert to usecs */
+			do_div(t, 10000);
+			do_div(uptime_t, 1000000);
+
+			if (uptime_t) {
+				remainder = do_div(t, uptime_t);
+
+				time = (unsigned long) t;
+
+				/* for getting 2 digit precision after
+				 * decimal dot */
+				t = (u64) remainder;
+				t *= 100;
+				remainder = do_div(t, uptime_t);
+			} else {
+				time = t = 0;
+			}
+		}
+
+		seq_printf(s, "%9s : %s", mrfl_nc_devices[i], dstates[val]);
+		if (i <= VED) {
+			seq_printf(s, " %5lu.%02lu", (unsigned long)t1,
+						   (unsigned long) d0i0_time_secs/10000000);
+			seq_printf(s, "   %3lu.%02lu", (unsigned long) time, (unsigned long) t);
+			seq_printf(s, " %5lu\n", (unsigned long) mid_pmu_cxt->nc_d0i0_count[i]);
+		} else
+			seq_puts(s, "\n");
+	}
+
+	seq_printf(s, "\nSOUTH COMPLEX DEVICES :\n\n");
+
+	seq_puts(s, "PCI VNDR DEVC DEVICE_NAME  DEVICE_DRIVER_STRING  LSS#");
+	seq_puts(s, "   State    D0i0_Time        D0i0\% Count\n");
+	seq_puts(s, "=====================================================");
+	seq_puts(s, "=========================================\n");
+	for_each_pci_dev(pdev) {
+		unsigned long long t, t1;
+		u32 remainder, time, d0i0_time_secs;
+		int lss;
+
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+								  &ss_pos))
+			continue;
+
+		if (pmu_num == PMU_NUM_1)
+			continue;
+
+		mask	= (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+		val	= (cur_pmsss.pmu2_states[ss_idx] & mask) >>
+						(ss_pos * BITS_PER_LSS);
+
+		lss = index - mid_pmu_cxt->pmu1_max_devs;
+
+		/* for calculating percentage residency */
+		down(&mid_pmu_cxt->scu_ready_sem);
+
+		t = mid_pmu_cxt->d0i0_time[lss];
+		/* If in D0i0 add current time */
+		if (val == D0I0_MASK)
+			t += (cpu_clock(0) - mid_pmu_cxt->d0i0_prev_time[lss]);
+
+		uptime_t =  cpu_clock(0);
+		uptime_t -= mid_pmu_cxt->pmu_init_time;
+
+		up(&mid_pmu_cxt->scu_ready_sem);
+
+		t1 = t;
+		d0i0_time_secs = do_div(t1, NANO_SEC);
+
+		/* convert to usecs */
+		do_div(t, 10000);
+		do_div(uptime_t, 1000000);
+
+		if (uptime_t) {
+			remainder = do_div(t, uptime_t);
+
+			time = (unsigned long) t;
+
+			/* for getting 2 digit precision after
+			 * decimal dot */
+			t = (u64) remainder;
+			t *= 100;
+			remainder = do_div(t, uptime_t);
+		} else {
+			time = t = 0;
+		}
+
+
+		seq_printf(s, "pci %04x %04X %s %20.20s: lss:%02d",
+			pdev->vendor, pdev->device, dev_name(&pdev->dev),
+			dev_driver_string(&pdev->dev), lss);
+		seq_printf(s, " %s", dstates[val & 3]);
+		seq_printf(s, "\t%5lu.%02lu", (unsigned long)t1,
+						   (unsigned long) d0i0_time_secs/10000000);
+		seq_printf(s, "\t%3lu.%02lu", (unsigned long) time, (unsigned long) t);
+		seq_printf(s, "\t%5lu\n", (unsigned long) mid_pmu_cxt->d0i0_count[lss]);
+	}
+
+	return 0;
+}
+
+static int devices_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_devices_state_show, NULL);
+}
+
+static ssize_t devices_state_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int ret;
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	if (((strlen("clear")+1) == buf_size) &&
+		!strncmp(buf, "clear", strlen("clear"))) {
+		down(&mid_pmu_cxt->scu_ready_sem);
+
+		/* Dump S0ix residency counters */
+		ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+		if (ret)
+			printk(KERN_ERR "IPC command to DUMP S0ix residency failed\n");
+
+		/* Dump number of interations of S0ix */
+		ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+		if (ret)
+			printk(KERN_ERR "IPC command to DUMP S0ix count failed\n");
+
+		mid_pmu_cxt->pmu_init_time = cpu_clock(0);
+		prev_s0ix_cnt[SYS_STATE_S0I1] = s0ix_count_read(SYS_STATE_S0I1);
+		prev_s0ix_cnt[SYS_STATE_S0I1_LPMP3] = s0ix_count_read(SYS_STATE_S0I1_LPMP3);
+		prev_s0ix_cnt[SYS_STATE_S0I1_PSH] = s0ix_count_read(SYS_STATE_S0I1_PSH);
+		prev_s0ix_cnt[SYS_STATE_S0I1_DISP] = s0ix_count_read(SYS_STATE_S0I1_DISP);
+		prev_s0ix_cnt[SYS_STATE_S0I1_LPMP3_PSH] = s0ix_count_read(SYS_STATE_S0I1_LPMP3_PSH);
+		prev_s0ix_cnt[SYS_STATE_S0I1_LPMP3_DISP] = s0ix_count_read(SYS_STATE_S0I1_LPMP3_DISP);
+		prev_s0ix_cnt[SYS_STATE_S0I1_PSH_DISP] = s0ix_count_read(SYS_STATE_S0I1_PSH_DISP);
+		prev_s0ix_cnt[SYS_STATE_S0I1_LPMP3_PSH_DISP] = s0ix_count_read(SYS_STATE_S0I1_LPMP3_PSH_DISP);
+		prev_s0ix_cnt[SYS_STATE_S0I2] = s0ix_count_read(SYS_STATE_S0I2);
+		prev_s0ix_cnt[SYS_STATE_S0I3] = s0ix_count_read(SYS_STATE_S0I3);
+		prev_s0ix_cnt[SYS_STATE_S0I3_PSH_RET] = s0ix_count_read(SYS_STATE_S0I3_PSH_RET);
+		prev_s0ix_cnt[SYS_STATE_S3] = 0;
+		prev_s0ix_res[SYS_STATE_S0I1] = s0ix_residency_read(SYS_STATE_S0I1);
+		prev_s0ix_res[SYS_STATE_S0I1_LPMP3] = s0ix_residency_read(SYS_STATE_S0I1_LPMP3);
+		prev_s0ix_res[SYS_STATE_S0I1_PSH] = s0ix_residency_read(SYS_STATE_S0I1_PSH);
+		prev_s0ix_res[SYS_STATE_S0I1_DISP] = s0ix_residency_read(SYS_STATE_S0I1_DISP);
+		prev_s0ix_res[SYS_STATE_S0I1_LPMP3_PSH] = s0ix_residency_read(SYS_STATE_S0I1_LPMP3_PSH);
+		prev_s0ix_res[SYS_STATE_S0I1_LPMP3_DISP] = s0ix_residency_read(SYS_STATE_S0I1_LPMP3_DISP);
+		prev_s0ix_res[SYS_STATE_S0I1_PSH_DISP] = s0ix_residency_read(SYS_STATE_S0I1_PSH_DISP);
+		prev_s0ix_res[SYS_STATE_S0I1_LPMP3_PSH_DISP] = s0ix_residency_read(SYS_STATE_S0I1_LPMP3_PSH_DISP);
+		prev_s0ix_res[SYS_STATE_S0I2] = s0ix_residency_read(SYS_STATE_S0I2);
+		prev_s0ix_res[SYS_STATE_S0I3] = s0ix_residency_read(SYS_STATE_S0I3);
+		prev_s0ix_res[SYS_STATE_S0I3_PSH_RET] = s0ix_residency_read(SYS_STATE_S0I3_PSH_RET);
+		prev_s0ix_res[SYS_STATE_S3] = 0 ;
+
+		/* D0i0 time stats clear */
+		{
+			int i;
+			for (i = 0; i < MAX_LSS_POSSIBLE; i++) {
+				mid_pmu_cxt->d0i0_count[i] = 0;
+				mid_pmu_cxt->d0i0_time[i] = 0;
+				mid_pmu_cxt->d0i0_prev_time[i] = cpu_clock(0);
+			}
+
+			for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+				mid_pmu_cxt->nc_d0i0_count[i] = 0;
+				mid_pmu_cxt->nc_d0i0_time[i] = 0;
+				mid_pmu_cxt->nc_d0i0_prev_time[i] = cpu_clock(0);
+			}
+		}
+
+		up(&mid_pmu_cxt->scu_ready_sem);
+	}
+
+	return buf_size;
+}
+
+
+static const struct file_operations devices_state_operations = {
+	.open		= devices_state_open,
+	.read		= seq_read,
+	.write		= devices_state_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#ifdef CONFIG_PM_DEBUG
+static int ignore_lss_show(struct seq_file *s, void *unused)
+{
+	u32 local_ignore_lss[4];
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	seq_printf(s, "IGNORE_LSS[0]: %08X\n", local_ignore_lss[0]);
+	seq_printf(s, "IGNORE_LSS[1]: %08X\n", local_ignore_lss[1]);
+	seq_printf(s, "IGNORE_LSS[2]: %08X\n", local_ignore_lss[2]);
+	seq_printf(s, "IGNORE_LSS[3]: %08X\n", local_ignore_lss[3]);
+
+	return 0;
+}
+
+static int ignore_add_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ignore_lss_show, NULL);
+}
+
+static ssize_t ignore_add_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	int sub_sys_pos, sub_sys_index;
+	u32 lss, local_ignore_lss[4];
+	u32 pm_cmd_val;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &lss);
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	/* If set to MAX_LSS_POSSIBLE it means
+	 * ignore all.
+	 */
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_ignore_lss[0] = 0xFFFFFFFF;
+		local_ignore_lss[1] = 0xFFFFFFFF;
+		local_ignore_lss[2] = 0xFFFFFFFF;
+		local_ignore_lss[3] = 0xFFFFFFFF;
+	} else {
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+		local_ignore_lss[sub_sys_index] |= pm_cmd_val;
+	}
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(mid_pmu_cxt->ignore_lss, local_ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return count;
+}
+
+static const struct file_operations ignore_add_ops = {
+	.open		= ignore_add_open,
+	.read		= seq_read,
+	.write		= ignore_add_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int ignore_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ignore_lss_show, NULL);
+}
+
+static ssize_t ignore_remove_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	int sub_sys_pos, sub_sys_index;
+	u32 lss, local_ignore_lss[4];
+	u32 pm_cmd_val;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &lss);
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	/* If set to MAX_LSS_POSSIBLE it means
+	 * remove all from ignore list.
+	 */
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_ignore_lss[0] = 0;
+		local_ignore_lss[1] = 0;
+		local_ignore_lss[2] = 0;
+		local_ignore_lss[3] = 0;
+	} else {
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+		local_ignore_lss[sub_sys_index] &= ~pm_cmd_val;
+	}
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(mid_pmu_cxt->ignore_lss, local_ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return count;
+}
+
+static const struct file_operations ignore_remove_ops = {
+	.open		= ignore_remove_open,
+	.read		= seq_read,
+	.write		= ignore_remove_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int pmu_sync_d0ix_show(struct seq_file *s, void *unused)
+{
+	int i;
+	u32 local_os_sss[4];
+	struct pmu_ss_states cur_pmsss;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	/* Read SCU SSS */
+	pmu_read_sss(&cur_pmsss);
+	/* Read OS SSS */
+	memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "OS_SSS[%d]: %08X\tSSS[%d]: %08lX\n", i,
+				local_os_sss[i], i, cur_pmsss.pmu2_states[i]);
+
+	return 0;
+}
+
+static int pmu_sync_d0ix_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_sync_d0ix_show, NULL);
+}
+
+static ssize_t pmu_sync_d0ix_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res, i;
+	bool send_cmd;
+	u32 lss, local_os_sss[4];
+	int sub_sys_pos, sub_sys_index;
+	u32 pm_cmd_val;
+	u32 temp_sss;
+
+	struct pmu_ss_states cur_pmsss;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &lss);
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	/* Read SCU SSS */
+	pmu_read_sss(&cur_pmsss);
+
+	for (i = 0; i < 4; i++)
+		local_os_sss[i] = mid_pmu_cxt->os_sss[i] &
+				~mid_pmu_cxt->ignore_lss[i];
+
+	send_cmd = false;
+	for (i = 0; i < 4; i++) {
+		if (local_os_sss[i] != cur_pmsss.pmu2_states[i]) {
+			send_cmd = true;
+			break;
+		}
+	}
+
+	if (send_cmd) {
+		int status;
+
+		if (lss == MAX_LSS_POSSIBLE) {
+			memcpy(cur_pmsss.pmu2_states, local_os_sss,
+							 (sizeof(u32)*4));
+		} else {
+			bool same;
+			sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+			sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+			pm_cmd_val =
+				(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+			/* dont send d0ix request if its same */
+			same =
+			((cur_pmsss.pmu2_states[sub_sys_index] & pm_cmd_val)
+			== (mid_pmu_cxt->os_sss[sub_sys_index] & pm_cmd_val));
+
+			if (same)
+				goto unlock;
+
+			cur_pmsss.pmu2_states[sub_sys_index] &= ~pm_cmd_val;
+			temp_sss =
+				mid_pmu_cxt->os_sss[sub_sys_index] & pm_cmd_val;
+			cur_pmsss.pmu2_states[sub_sys_index] |= temp_sss;
+		}
+
+		/* Issue the pmu command to PMU 2
+		 * flag is needed to distinguish between
+		 * S0ix vs interactive command in pmu_sc_irq()
+		 */
+		status = pmu_issue_interactive_command(&cur_pmsss, false,
+							false);
+
+		if (unlikely(status != PMU_SUCCESS)) {
+			dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				 "Failed to Issue a PM command to PMU2\n");
+			goto unlock;
+		}
+
+		/*
+		 * Wait for interactive command to complete.
+		 * If we dont wait, there is a possibility that
+		 * the driver may access the device before its
+		 * powered on in SCU.
+		 *
+		 */
+		status = _pmu2_wait_not_busy();
+		if (unlikely(status)) {
+			printk(KERN_CRIT "%s: D0ix transition failure\n",
+				__func__);
+		}
+	}
+
+unlock:
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return count;
+}
+
+static const struct file_operations pmu_sync_d0ix_ops = {
+	.open		= pmu_sync_d0ix_open,
+	.read		= seq_read,
+	.write		= pmu_sync_d0ix_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int pmu_force_d0ix_show(struct seq_file *s, void *unused)
+{
+	int i;
+	u32 local_os_sss[4];
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	/* Read OS SSS */
+	memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "OS_SSS[%d]: %08X\n", i, local_os_sss[i]);
+
+	return 0;
+}
+
+static int pmu_force_d0ix_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_force_d0ix_show, NULL);
+}
+
+static ssize_t pmu_force_d0i3_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	u32 lss, local_os_sss[4];
+	int sub_sys_pos, sub_sys_index;
+	u32 pm_cmd_val;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &lss);
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_os_sss[0] =
+		local_os_sss[1] =
+		local_os_sss[2] =
+		local_os_sss[3] = 0xFFFFFFFF;
+	} else {
+		memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+		local_os_sss[sub_sys_index] |= pm_cmd_val;
+	}
+
+	memcpy(mid_pmu_cxt->os_sss, local_os_sss, (sizeof(u32)*4));
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return count;
+}
+
+static const struct file_operations pmu_force_d0i3_ops = {
+	.open		= pmu_force_d0ix_open,
+	.read		= seq_read,
+	.write		= pmu_force_d0i3_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static ssize_t pmu_force_d0i0_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	u32 lss, local_os_sss[4];
+	int sub_sys_pos, sub_sys_index;
+	u32 pm_cmd_val;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &lss);
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_os_sss[0] =
+		local_os_sss[1] =
+		local_os_sss[2] =
+		local_os_sss[3] = 0;
+	} else {
+		memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+		local_os_sss[sub_sys_index] &= ~pm_cmd_val;
+	}
+
+	memcpy(mid_pmu_cxt->os_sss, local_os_sss, (sizeof(u32)*4));
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return count;
+}
+
+static const struct file_operations pmu_force_d0i0_ops = {
+	.open		= pmu_force_d0ix_open,
+	.read		= seq_read,
+	.write		= pmu_force_d0i0_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int cstate_ignore_add_show(struct seq_file *s, void *unused)
+{
+	int i;
+	seq_printf(s, "CSTATES IGNORED: ");
+	for (i = 0; i < (CPUIDLE_STATE_MAX-1); i++)
+		if ((mid_pmu_cxt->cstate_ignore & (1 << i)))
+			seq_printf(s, "%d, ", i+1);
+
+	seq_printf(s, "\n");
+	return 0;
+}
+
+static int cstate_ignore_add_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cstate_ignore_add_show, NULL);
+}
+
+static ssize_t cstate_ignore_add_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	int cstate;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &cstate);
+	if (res)
+		return -EINVAL;
+
+	if (cstate > MAX_CSTATES_POSSIBLE)
+		return -EINVAL;
+
+	/* cannot add/remove C0, C1 */
+	if (((cstate == 0) || (cstate == 1))) {
+		printk(KERN_CRIT "C0 C1 state cannot be used.\n");
+		return -EINVAL;
+	}
+
+	if (!mid_pmu_cxt->cstate_qos)
+		return -EINVAL;
+
+	if (cstate == MAX_CSTATES_POSSIBLE) {
+		mid_pmu_cxt->cstate_ignore = ((1 << (CPUIDLE_STATE_MAX-1)) - 1);
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+					CSTATE_EXIT_LATENCY_C1 - 1);
+	} else {
+		u32 cstate_exit_latency[CPUIDLE_STATE_MAX];
+		u32 local_cstate_allowed;
+		int max_cstate_allowed;
+
+		/* 0 is C1 state */
+		cstate--;
+		mid_pmu_cxt->cstate_ignore |= (1 << cstate);
+
+		/* by default remove C1 from ignore list */
+		mid_pmu_cxt->cstate_ignore &= ~(1 << 0);
+
+		/* populate cstate latency table */
+		cstate_exit_latency[0] = CSTATE_EXIT_LATENCY_C1;
+		cstate_exit_latency[1] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[2] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[3] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[4] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[5] = CSTATE_EXIT_LATENCY_C6;
+		cstate_exit_latency[6] = CSTATE_EXIT_LATENCY_S0i1;
+		cstate_exit_latency[7] = CSTATE_EXIT_LATENCY_S0i2;
+		cstate_exit_latency[8] = CSTATE_EXIT_LATENCY_S0i3;
+		cstate_exit_latency[9] = PM_QOS_DEFAULT_VALUE;
+
+		local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+
+		/* restrict to max c-states */
+		local_cstate_allowed &= ((1<<(CPUIDLE_STATE_MAX-1))-1);
+
+		/* If no states allowed will return 0 */
+		max_cstate_allowed = fls(local_cstate_allowed);
+
+		printk(KERN_CRIT "max_cstate: %d local_cstate_allowed = %x\n",
+			max_cstate_allowed, local_cstate_allowed);
+		printk(KERN_CRIT "exit latency = %d\n",
+				(cstate_exit_latency[max_cstate_allowed]-1));
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+				(cstate_exit_latency[max_cstate_allowed]-1));
+	}
+
+	return count;
+}
+
+static const struct file_operations cstate_ignore_add_ops = {
+	.open		= cstate_ignore_add_open,
+	.read		= seq_read,
+	.write		= cstate_ignore_add_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int cstate_ignore_remove_show(struct seq_file *s, void *unused)
+{
+	int i;
+	seq_printf(s, "CSTATES ALLOWED: ");
+	for (i = 0; i < (CPUIDLE_STATE_MAX-1); i++)
+		if (!(mid_pmu_cxt->cstate_ignore & (1 << i)))
+			seq_printf(s, "%d, ", i+1);
+
+	seq_printf(s, "\n");
+
+	return 0;
+}
+
+static int cstate_ignore_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cstate_ignore_remove_show, NULL);
+}
+
+static ssize_t cstate_ignore_remove_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	int cstate;
+	res = kstrtou32_from_user(userbuf, count, 0, &cstate);
+	if (res)
+		return -EINVAL;
+
+	if (cstate > MAX_CSTATES_POSSIBLE)
+		return -EINVAL;
+
+	/* cannot add/remove C0, C1 */
+	if (((cstate == 0) || (cstate == 1))) {
+		printk(KERN_CRIT "C0 C1 state cannot be used.\n");
+		return -EINVAL;
+	}
+
+	if (!mid_pmu_cxt->cstate_qos)
+		return -EINVAL;
+
+	if (cstate == MAX_CSTATES_POSSIBLE) {
+		mid_pmu_cxt->cstate_ignore =
+				~((1 << (CPUIDLE_STATE_MAX-1)) - 1);
+		/* Ignore C2, C3, C5, C8 states */
+		mid_pmu_cxt->cstate_ignore |= (1 << 1);
+		mid_pmu_cxt->cstate_ignore |= (1 << 2);
+		mid_pmu_cxt->cstate_ignore |= (1 << 4);
+		mid_pmu_cxt->cstate_ignore |= (1 << 7);
+
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+						PM_QOS_DEFAULT_VALUE);
+	} else {
+		u32 cstate_exit_latency[CPUIDLE_STATE_MAX];
+		u32 local_cstate_allowed;
+		int max_cstate_allowed;
+
+		/* populate cstate latency table */
+		cstate_exit_latency[0] = CSTATE_EXIT_LATENCY_C1;
+		cstate_exit_latency[1] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[2] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[3] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[4] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[5] = CSTATE_EXIT_LATENCY_C6;
+		cstate_exit_latency[6] = CSTATE_EXIT_LATENCY_S0i1;
+		cstate_exit_latency[7] = CSTATE_EXIT_LATENCY_S0i2;
+		cstate_exit_latency[8] = CSTATE_EXIT_LATENCY_S0i3;
+		cstate_exit_latency[9] = PM_QOS_DEFAULT_VALUE;
+
+		/* 0 is C1 state */
+		cstate--;
+		mid_pmu_cxt->cstate_ignore &= ~(1 << cstate);
+
+		/* by default remove C1 from ignore list */
+		mid_pmu_cxt->cstate_ignore &= ~(1 << 0);
+
+		/* Ignore C2, C3, C5, C8 states */
+		mid_pmu_cxt->cstate_ignore |= (1 << 1);
+		mid_pmu_cxt->cstate_ignore |= (1 << 2);
+		mid_pmu_cxt->cstate_ignore |= (1 << 4);
+		mid_pmu_cxt->cstate_ignore |= (1 << 7);
+
+		local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+		/* restrict to max c-states */
+		local_cstate_allowed &= ((1<<(CPUIDLE_STATE_MAX-1))-1);
+
+		/* If no states allowed will return 0 */
+		max_cstate_allowed = fls(local_cstate_allowed);
+		printk(KERN_CRIT "max_cstate: %d local_cstate_allowed = %x\n",
+			max_cstate_allowed, local_cstate_allowed);
+		printk(KERN_CRIT "exit latency = %d\n",
+				(cstate_exit_latency[max_cstate_allowed]-1));
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+				(cstate_exit_latency[max_cstate_allowed]-1));
+	}
+
+	return count;
+}
+
+static const struct file_operations cstate_ignore_remove_ops = {
+	.open		= cstate_ignore_remove_open,
+	.read		= seq_read,
+	.write		= cstate_ignore_remove_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int s3_ctrl_show(struct seq_file *s, void *unused)
+{
+	seq_printf(s, "%d\n", enable_s3);
+	return 0;
+}
+
+static int s3_ctrl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, s3_ctrl_show, NULL);
+}
+
+static ssize_t s3_ctrl_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	int res;
+	int local_s3_ctrl;
+
+	res = kstrtou32_from_user(userbuf, count, 0, &local_s3_ctrl);
+	if (res)
+		return -EINVAL;
+
+	enable_s3 = local_s3_ctrl ? 1 : 0;
+
+	if (enable_s3)
+		__pm_relax(mid_pmu_cxt->pmu_wake_lock);
+	else
+		__pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+
+	return count;
+}
+
+static const struct file_operations s3_ctrl_ops = {
+	.open		= s3_ctrl_open,
+	.read		= seq_read,
+	.write		= s3_ctrl_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*
+ * cstate: c1=1, c2=2, ..., c6=6, c7=7, c8=7, c9=7
+ *         for s0i1/s0i2/s0i3 cstate=7.
+ * index: this is the index in cpuidle_driver cstates table
+ *        where c1 is the 2nd element of the table
+ */
+unsigned int pmu_get_new_cstate(unsigned int cstate, int *index)
+{
+	static int cstate_index_table[CPUIDLE_STATE_MAX-1] = {
+					1, 1, 1, 1, 1, 2, 3, 3, 4};
+	unsigned int new_cstate = cstate;
+	u32 local_cstate = (u32)(cstate);
+	u32 local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+	u32 cstate_mask;
+
+	if (platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD)) {
+		/* cstate is also 7 for C9 so correct */
+		/* this is supposing that C9 is the 4th cstate allowed */
+		if ((local_cstate == 7) && (*index == 4))
+			local_cstate = 9;
+
+		/* get next low cstate allowed */
+		cstate_mask = (u32)((1 << local_cstate)-1);
+		local_cstate_allowed	&= ((1<<(CPUIDLE_STATE_MAX-1))-1);
+		local_cstate_allowed	&= cstate_mask;
+
+		/* Make sure we dont end up with new_state == 0 */
+		local_cstate_allowed |= 1;
+		new_cstate	= fls(local_cstate_allowed);
+
+		*index	= cstate_index_table[new_cstate-1];
+	}
+
+	return new_cstate;
+}
+#endif
+
+DEFINE_PER_CPU(u64[NUM_CSTATES_RES_MEASURE], c_states_res);
+
+static int read_c_states_res(void)
+{
+	int cpu, i;
+	u32 lo, hi;
+
+	u32 c_states_res_msr[NUM_CSTATES_RES_MEASURE] = {
+		PUNIT_CR_CORE_C1_RES_MSR,
+		PUNIT_CR_CORE_C4_RES_MSR,
+		PUNIT_CR_CORE_C6_RES_MSR
+	};
+
+	for_each_online_cpu(cpu)
+		for (i = 0; i < NUM_CSTATES_RES_MEASURE; i++) {
+			u64 temp;
+			rdmsr_on_cpu(cpu, c_states_res_msr[i], &lo, &hi);
+			temp = hi;
+			temp <<= 32;
+			temp |= lo;
+			per_cpu(c_states_res, cpu)[i] = temp;
+		}
+
+	return 0;
+}
+
+static int c_states_stat_show(struct seq_file *s, void *unused)
+{
+	char *c_states_name[] = {
+		"C1",
+		"C4",
+		"C6"
+	};
+
+	int i, cpu;
+
+	seq_printf(s, "C STATES: %20s\n", "Residecy");
+	for_each_online_cpu(cpu)
+		seq_printf(s, "%18s %d", "Core", cpu);
+	seq_printf(s, "\n");
+
+	read_c_states_res();
+	for (i = 0; i < NUM_CSTATES_RES_MEASURE; i++) {
+		seq_printf(s, "%s", c_states_name[i]);
+		for_each_online_cpu(cpu)
+			seq_printf(s, "%18llu", per_cpu(c_states_res, cpu)[i]);
+		seq_printf(s, "\n");
+	}
+	return 0;
+}
+
+static int c_states_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, c_states_stat_show, NULL);
+}
+
+static const struct file_operations c_states_stat_ops = {
+	.open		= c_states_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*These are place holders and will be enabled in next patch*/
+
+void pmu_log_pmu_irq(int status) { return; };
+void pmu_log_ipc_irq(void) { return; };
+void pmu_log_ipc(u32 command) { return; };
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc) { return; };
+void pmu_dump_logs(void) { return; };
+void pmu_stat_start(enum sys_state type) { return; };
+void pmu_stat_end(void) { return; };
+void pmu_stat_error(u8 err_type) { return; };
+void pmu_s0ix_demotion_stat(int req_state, int grant_state) { return; };
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+
+void pmu_stats_finish(void)
+{
+#ifdef CONFIG_PM_DEBUG
+	if (mid_pmu_cxt->cstate_qos) {
+		pm_qos_remove_request(mid_pmu_cxt->cstate_qos);
+		kfree(mid_pmu_cxt->cstate_qos);
+		mid_pmu_cxt->cstate_qos = NULL;
+	}
+#endif
+
+	return;
+}
+
+void pmu_s3_stats_update(int enter)
+{
+#ifdef CONFIG_PM_DEBUG
+	int ret;
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+	/* Dump S0ix residency counters */
+	ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+	if (ret)
+		printk(KERN_ERR "IPC command to DUMP S0ix residency failed\n");
+
+	/* Dump number of interations of S0ix */
+	ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+	if (ret)
+		printk(KERN_ERR "IPC command to DUMP S0ix count failed\n");
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	if (enter == 1) {
+		S3_count  = s0ix_count_read(SYS_STATE_S0I3);
+		S3_res = s0ix_residency_read(SYS_STATE_S0I3);
+	} else {
+		prev_s0ix_cnt[SYS_STATE_S3] +=
+			(s0ix_count_read(SYS_STATE_S0I3)) - S3_count;
+		prev_s0ix_res[SYS_STATE_S3] += (s0ix_residency_read(SYS_STATE_S0I3)) - S3_res;
+	}
+
+#endif
+	return;
+}
+
+
+void pmu_stats_init(void)
+{
+	/* /sys/kernel/debug/mid_pmu_states */
+	(void) debugfs_create_file("mid_pmu_states", S_IFREG | S_IRUGO,
+				NULL, NULL, &devices_state_operations);
+
+	/* /sys/kernel/debug/c_p_states_stat */
+	(void) debugfs_create_file("c_states_stat", S_IFREG | S_IRUGO,
+				NULL, NULL, &c_states_stat_ops);
+#ifdef CONFIG_PM_DEBUG
+	if (platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD)) {
+		/* If s0ix is disabled then restrict to C6 */
+		if (!enable_s0ix) {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << (CPUIDLE_STATE_MAX-1)) - 1);
+
+			/* Ignore C2, C3, C5 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+
+			/* For now ignore C7, C8, C9 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 6);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+			mid_pmu_cxt->cstate_ignore |= (1 << 8);
+		} else {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << (CPUIDLE_STATE_MAX-1)) - 1);
+
+			/* Ignore C2, C3, C5, C8 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+		}
+
+		mid_pmu_cxt->cstate_qos =
+			kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+		if (mid_pmu_cxt->cstate_qos) {
+			pm_qos_add_request(mid_pmu_cxt->cstate_qos,
+				 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+		}
+
+		/* If s0ix is disabled then restrict to C6 */
+		if (!enable_s0ix) {
+			/* Restrict platform Cx state to C6 */
+			pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+						(CSTATE_EXIT_LATENCY_S0i1-1));
+		}
+
+		/* D0i0 time stats clear */
+		{
+			int i;
+			for (i = 0; i < MAX_LSS_POSSIBLE; i++) {
+				mid_pmu_cxt->d0i0_time[i] = 0;
+				mid_pmu_cxt->d0i0_prev_time[i] = cpu_clock(0);
+			}
+
+			for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+				mid_pmu_cxt->nc_d0i0_time[i] = 0;
+				mid_pmu_cxt->nc_d0i0_prev_time[i] = cpu_clock(0);
+			}
+		}
+
+		/* /sys/kernel/debug/ignore_add */
+		(void) debugfs_create_file("ignore_add", S_IFREG | S_IRUGO,
+					NULL, NULL, &ignore_add_ops);
+		/* /sys/kernel/debug/ignore_remove */
+		(void) debugfs_create_file("ignore_remove", S_IFREG | S_IRUGO,
+					NULL, NULL, &ignore_remove_ops);
+		/* /sys/kernel/debug/pmu_sync_d0ix */
+		(void) debugfs_create_file("pmu_sync_d0ix", S_IFREG | S_IRUGO,
+					NULL, NULL, &pmu_sync_d0ix_ops);
+		/* /sys/kernel/debug/pmu_force_d0i0 */
+		(void) debugfs_create_file("pmu_force_d0i0", S_IFREG | S_IRUGO,
+					NULL, NULL, &pmu_force_d0i0_ops);
+		/* /sys/kernel/debug/pmu_force_d0i3 */
+		(void) debugfs_create_file("pmu_force_d0i3", S_IFREG | S_IRUGO,
+					NULL, NULL, &pmu_force_d0i3_ops);
+		/* /sys/kernel/debug/cstate_ignore_add */
+		(void) debugfs_create_file("cstate_ignore_add",
+			S_IFREG | S_IRUGO, NULL, NULL, &cstate_ignore_add_ops);
+		/* /sys/kernel/debug/cstate_ignore_remove */
+		(void) debugfs_create_file("cstate_ignore_remove",
+		S_IFREG | S_IRUGO, NULL, NULL, &cstate_ignore_remove_ops);
+		/* /sys/kernel/debug/cstate_ignore_remove */
+		(void) debugfs_create_file("s3_ctrl",
+		S_IFREG | S_IRUGO, NULL, NULL, &s3_ctrl_ops);
+	}
+#endif
+}
+
+#endif /*if CONFIG_ATOM_SOC_POWER */
diff --git a/arch/x86/platform/intel-mid/intel_soc_pm_debug.h b/arch/x86/platform/intel-mid/intel_soc_pm_debug.h
new file mode 100644
index 0000000..5907005
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pm_debug.h
@@ -0,0 +1,162 @@
+/*
+ * intel_soc_pm_debug.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _INTEL_SOC_PM_DEBUG_H
+#define _INTEL_SOC_PM_DEBUG_H
+#include <linux/intel_mid_pm.h>
+
+#include "intel_soc_pmu.h"
+
+
+#define NANO_SEC		1000000000UL /* 10^9 in sec */
+#define MICRO_SEC		1000000UL /* 10^6 in sec */
+#define PMU_LOG_INTERVAL_SECS	(60*5) /* 5 mins in secs */
+
+#define S0IX_LAT_SRAM_ADDR_CLVP		0xFFFF7FD0
+#define S0IX_LAT_SRAM_SIZE_CLVP		8
+
+#define IPC_CMD_S0IX_LATENCY_CLVP	0xCE
+#define IPC_SUB_MEASURE_START_CLVP	0x00
+#define IPC_SUB_MEASURE_STOP_CLVP	0x01
+
+struct simple_stat {
+	u64 min;
+	u64 max;
+	u64 total;
+	u64 curr;
+};
+
+struct entry_exit_stat {
+	struct simple_stat entry;
+	struct simple_stat exit;
+};
+
+struct latency_stat {
+	struct entry_exit_stat scu_latency[SYS_STATE_MAX];
+	struct entry_exit_stat os_latency[SYS_STATE_MAX];
+	struct simple_stat s3_parts_lat[MAX_S3_PARTS];
+	u64 count[SYS_STATE_MAX];
+	u32 __iomem *scu_s0ix_lat_addr;
+	struct dentry *dentry;
+	bool latency_measure;
+};
+
+struct island {
+	int type;
+	int index;
+	char *name;
+};
+
+struct lss_definition {
+	char *lss_name;
+	char *block;
+	char *subsystem;
+};
+
+#ifdef CONFIG_ATOM_SOC_POWER
+#define PUNIT_CR_CORE_C1_RES_MSR	0x660
+#define PUNIT_CR_CORE_C4_RES_MSR	0x3fc
+#define PUNIT_CR_CORE_C6_RES_MSR	0x3fd
+
+#define NUM_CSTATES_RES_MEASURE		3
+
+extern unsigned int enable_s3;
+extern unsigned int enable_s0ix;
+
+extern u8 __iomem *s0ix_counters;
+extern int s0ix_counter_reg_map[];
+extern int s0ix_residency_reg_map[];
+
+#endif
+
+/* platform dependency starts */
+#ifdef CONFIG_ATOM_SOC_POWER
+
+#define DEV_GFX		2
+#define FUNC_GFX	0
+#define ISLANDS_GFX	8
+#define DEV_ISP		3
+#define FUNC_ISP	0
+#define ISLANDS_ISP	2
+#define NC_DEVS		2
+
+struct lss_definition lsses[] = {
+	{"Lss00", "Storage", "SDIO0 (HC2)"},
+	{"Lss01", "Storage", "eMMC0 (HC0a)"},
+	{"NA", "Storage", "ND_CTL (Note 5)"},
+	{"Lss03", "H S I", "H S I DMA"},
+	{"Lss04", "Security", "RNG"},
+	{"Lss05", "Storage", "eMMC1 (HC0b)"},
+	{"Lss06", "USB", "USB OTG (ULPI)"},
+	{"Lss07", "USB", "USB_SPH"},
+	{"Lss08", "Audio", ""},
+	{"Lss09", "Audio", ""},
+	{"Lss10", "SRAM", " SRAM CTL+SRAM_16KB"},
+	{"Lss11", "SRAM", " SRAM CTL+SRAM_16KB"},
+	{"Lss12", "SRAM", "SRAM BANK (16KB+3x32KBKB)"},
+	{"Lss13", "SRAM", "SRAM BANK(4x32KB)"},
+	{"Lss14", "SDIO COMMS", "SDIO2 (HC1b)"},
+	{"Lss15", "PTI, DAFCA", " DFX Blocks"},
+	{"Lss16", "SC", " DMA"},
+	{"NA", "SC", "SPI0/MSIC"},
+	{"Lss18", "GP", "SPI1"},
+	{"Lss19", "GP", " SPI2"},
+	{"Lss20", "GP", " I2C0"},
+	{"Lss21", "GP", " I2C1"},
+	{"NA", "Fabrics", " Main Fabric"},
+	{"NA", "Fabrics", " Secondary Fabric"},
+	{"NA", "SC", "SC Fabric"},
+	{"Lss25", "Audio", " I-RAM BANK1 (32 + 256KB)"},
+	{"NA", "SCU", " ROM BANK1 (18KB+18KB+18KB)"},
+	{"Lss27", "GP", "I2C2"},
+	{"NA", "SSC", "SSC (serial bus controller to FLIS)"},
+	{"Lss29", "Security", "Chaabi AON Registers"},
+	{"Lss30", "SDIO COMMS", "SDIO1 (HC1a)"},
+	{"NA", "SCU", "I-RAM BANK0 (32KB)"},
+	{"NA", "SCU", "I-RAM BANK1 (32KB)"},
+	{"Lss33", "GP", "I2C3 (HDMI)"},
+	{"Lss34", "GP", "I2C4"},
+	{"Lss35", "GP", "I2C5"},
+	{"Lss36", "GP", "SSP (SPI3)"},
+	{"Lss37", "GP", "GPIO1"},
+	{"NA", "GP", "GP Fabric"},
+	{"Lss39", "SC", "GPIO0"},
+	{"Lss40", "SC", "KBD"},
+	{"Lss41", "SC", "UART2:0"},
+	{"NA", "NA", "NA"},
+	{"NA", "NA", "NA"},
+	{"Lss44", "Security", " Security TAPC"},
+	{"NA", "MISC", "AON Timers"},
+	{"NA", "PLL", "LFHPLL and Spread Spectrum"},
+	{"NA", "PLL", "USB PLL"},
+	{"NA", "NA", "NA"},
+	{"NA", "Audio", "SLIMBUS CTL 1 (note 5)"},
+	{"NA", "Audio", "SLIMBUS CTL 2 (note 5)"},
+	{"Lss51", "Audio", "SSP0"},
+	{"Lss52", "Audio", "SSP1"},
+	{"NA", "Bridge", "IOSF to OCP Bridge"},
+	{"Lss54", "GP", "DMA"},
+	{"NA", "SC", "SVID (Serial Voltage ID)"},
+	{"NA", "SOC Fuse", "SoC Fuse Block (note 3)"},
+	{"NA", "NA", "NA"},
+};
+#endif
+
+/* platform dependency ends */
+
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_soc_pmu.c b/arch/x86/platform/intel-mid/intel_soc_pmu.c
new file mode 100644
index 0000000..9426b84
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pmu.c
@@ -0,0 +1,2069 @@
+/*
+ * intel_soc_pmu.c - This driver provides interface to configure the 2 pmu's
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+#include <linux/cpuidle.h>
+#include <linux/proc_fs.h>
+#include <asm/stacktrace.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#ifdef CONFIG_DRM_INTEL_MID
+#define GFX_ENABLE
+#endif
+
+bool pmu_initialized;
+
+DEFINE_MUTEX(pci_root_lock);
+
+/* mid_pmu context structure */
+struct mid_pmu_dev *mid_pmu_cxt;
+
+struct platform_pmu_ops *pmu_ops;
+/*
+ * Locking strategy::
+ *
+ * one semaphore (scu_ready sem) is used for accessing busy bit,
+ * issuing interactive cmd in the code.
+ * The entry points in pmu driver are pmu_pci_set_power_state()
+ * and PMU interrupt handler contexts, so here is the flow of how
+ * the semaphore is used.
+ *
+ * In D0ix command case::
+ * set_power_state process context:
+ * set_power_state()->acquire_scu_ready_sem()->issue_interactive_cmd->
+ * wait_for_interactive_complete->release scu_ready sem
+ *
+ * PMU Interrupt context:
+ * pmu_interrupt_handler()->release interactive_complete->return
+ *
+ * In Idle handler case::
+ * Idle context:
+ * idle_handler()->try_acquire_scu_ready_sem->if acquired->
+ * issue s0ix command->return
+ *
+ * PMU Interrupt context:
+ * pmu_Interrupt_handler()->release scu_ready_sem->return
+ *
+ */
+
+/* Maps pci power states to SCU D0ix mask */
+static int pci_to_platform_state(pci_power_t pci_state)
+{
+
+	static int mask[]  = {D0I0_MASK, D0I1_MASK,
+				D0I2_MASK, D0I3_MASK, D0I3_MASK};
+
+	int state = D0I0_MASK;
+
+	if (pci_state > 4)
+		WARN(1, "%s: wrong pci_state received.\n", __func__);
+
+	else
+		state = mask[pci_state];
+
+	return state;
+}
+
+/* Maps power states to pmu driver's internal indexes */
+int mid_state_to_sys_state(int mid_state)
+{
+	int sys_state = 0;
+	switch (mid_state) {
+	case MID_S0I1_STATE:
+		sys_state = SYS_STATE_S0I1;
+		break;
+	case MID_LPMP3_STATE:
+		sys_state = SYS_STATE_S0I2;
+		break;
+	case MID_S0I3_STATE:
+		sys_state = SYS_STATE_S0I3;
+		break;
+	case MID_S3_STATE:
+		sys_state = SYS_STATE_S3;
+		break;
+
+	case C6_HINT:
+		sys_state = SYS_STATE_S0I0;
+	}
+
+	return sys_state;
+}
+
+/* PCI Device Id structure */
+static DEFINE_PCI_DEVICE_TABLE(mid_pm_ids) = {
+	{PCI_VDEVICE(INTEL, MID_PMU_MRFL_DRV_DEV_ID), 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, mid_pm_ids);
+
+char s0ix[5] = "s0ix";
+
+module_param_call(s0ix, set_extended_cstate_mode,
+		get_extended_cstate_mode, NULL, 0644);
+
+MODULE_PARM_DESC(s0ix,
+	"setup extended c state s0ix mode [s0i3|s0i1|lmp3|"
+				"i1i3|lpi1|lpi3|s0ix|none]");
+
+/* LSS without driver are powered up on the resume from standby
+ * preventing back to back S3/S0ix. IGNORE the PCI_DO transtion
+ * for such devices.
+ */
+static inline bool pmu_power_down_lss_without_driver(int index,
+			int sub_sys_index, int sub_sys_pos, pci_power_t state)
+{
+	/* Ignore NC devices */
+	if (index < PMU1_MAX_DEVS)
+		return false;
+
+	/* Only ignore D0i0 */
+	if (state != PCI_D0)
+		return false;
+
+	/* HSI not used in MRFLD. IGNORE HSI Transition to D0 for MRFLD.
+	 * Sometimes it is turned ON during resume in the absence of a driver
+	 */
+	if (platform_is(INTEL_ATOM_MRFLD))
+		return ((sub_sys_index == 0x0) && (sub_sys_pos == 0x5));
+
+	/* For MOFD ignore D0i0 on LSS 5, 7, 16, 17, 18, 19 */
+	if (platform_is(INTEL_ATOM_MOORFLD)) {
+		if (sub_sys_index == 0x0)
+			return ((sub_sys_pos == 0x5) || (sub_sys_pos == 0x7));
+		else if (sub_sys_index == 0x1)
+			return ((sub_sys_pos == 0x0) || (sub_sys_pos == 0x1)
+				|| (sub_sys_pos == 0x2) || (sub_sys_pos == 0x3));
+	}
+	return false;
+}
+
+/**
+ * This function set all devices in d0i0 and deactivates pmu driver.
+ * The function is used before IFWI update as it needs devices to be
+ * in d0i0 during IFWI update. Reboot is needed to work pmu
+ * driver properly again. After calling this function and IFWI
+ * update, system is always rebooted as IFWI update function,
+ * intel_scu_ipc_medfw_upgrade() is called from mrst_emergency_reboot().
+ */
+int pmu_set_devices_in_d0i0(void)
+{
+	int status;
+	struct pmu_ss_states cur_pmssc;
+
+	/* Ignore request until we have initialized */
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	cur_pmssc.pmu2_states[0] = D0I0_MASK;
+	cur_pmssc.pmu2_states[1] = D0I0_MASK;
+	cur_pmssc.pmu2_states[2] = D0I0_MASK;
+	cur_pmssc.pmu2_states[3] = D0I0_MASK;
+
+	/* Restrict platform Cx state to C6 */
+	pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+				(CSTATE_EXIT_LATENCY_S0i1-1));
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	mid_pmu_cxt->shutdown_started = true;
+
+	/* Issue the pmu command to PMU 2
+	 * flag is needed to distinguish between
+	 * S0ix vs interactive command in pmu_sc_irq()
+	 */
+	status = pmu_issue_interactive_command(&cur_pmssc, false, false);
+
+	if (unlikely(status != PMU_SUCCESS)) {	/* pmu command failed */
+		printk(KERN_CRIT "%s: Failed to Issue a PM command to PMU2\n",
+								__func__);
+		mid_pmu_cxt->shutdown_started = false;
+
+		/* allow s0ix now */
+		pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+						PM_QOS_DEFAULT_VALUE);
+		goto unlock;
+	}
+
+	if (_pmu2_wait_not_busy()) {
+		pmu_dump_logs();
+		BUG();
+	}
+
+unlock:
+	up(&mid_pmu_cxt->scu_ready_sem);
+	return status;
+}
+EXPORT_SYMBOL(pmu_set_devices_in_d0i0);
+
+static int _pmu_read_status(int type)
+{
+	u32 temp;
+	union pmu_pm_status result;
+
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_sts);
+
+	/* extract the busy bit */
+	result.pmu_status_value = temp;
+
+	if (type == PMU_BUSY_STATUS)
+		return result.pmu_status_parts.pmu_busy;
+	else if (type == PMU_MODE_ID)
+		return result.pmu_status_parts.mode_id;
+
+	return 0;
+}
+
+int _pmu2_wait_not_busy(void)
+{
+	int pmu_busy_retry = PMU2_BUSY_TIMEOUT;
+
+	/* wait 500ms that the latest pmu command finished */
+	do {
+		if (_pmu_read_status(PMU_BUSY_STATUS) == 0)
+			return 0;
+
+		udelay(1);
+	} while (--pmu_busy_retry);
+
+	WARN(1, "pmu2 busy!");
+
+	return -EBUSY;
+}
+
+static int _pmu2_wait_not_busy_yield(void)
+{
+	int pmu_busy_retry = PMU2_BUSY_TIMEOUT;
+
+	/* wait max 500ms that the latest pmu command finished */
+	do {
+		if (_pmu_read_status(PMU_BUSY_STATUS) == 0)
+			return 0;
+
+		usleep_range(10, 12);
+		pmu_busy_retry -= 11;
+	} while (pmu_busy_retry > 0);
+
+	WARN(1, "pmu2 busy!");
+
+	return -EBUSY;
+}
+
+static void pmu_write_subsys_config(struct pmu_ss_states *pm_ssc)
+{
+	/* South complex in Penwell has multiple registers for
+	 * PM_SSC, etc.
+	 */
+	writel(pm_ssc->pmu2_states[0], &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+	writel(pm_ssc->pmu2_states[1], &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+	writel(pm_ssc->pmu2_states[2], &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+	writel(pm_ssc->pmu2_states[3], &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+}
+
+void log_wakeup_irq(void)
+{
+	unsigned int irr = 0, vector = 0;
+	int offset = 0, irq = 0;
+	struct irq_desc *desc;
+	const char *act_name;
+
+	if ((mid_pmu_cxt->pmu_current_state != SYS_STATE_S3)
+	    || !mid_pmu_cxt->suspend_started)
+		return;
+
+	for (offset = (FIRST_EXTERNAL_VECTOR/32);
+	offset < (NR_VECTORS/32); offset++) {
+		irr = apic->read(APIC_IRR + (offset * 0x10));
+
+		while (irr) {
+			vector = __ffs(irr);
+			irr &= ~(1 << vector);
+			irq = __this_cpu_read(
+					vector_irq[vector + (offset * 32)]);
+			if (irq < 0)
+				continue;
+			pr_info("wakeup from  IRQ %d\n", irq);
+
+			desc = irq_to_desc(irq);
+
+			if ((desc) && (desc->action)) {
+				act_name = desc->action->name;
+				pr_info("IRQ %d,action name:%s\n",
+					irq,
+					(act_name) ? (act_name) : "no action");
+			}
+		}
+	}
+	return;
+}
+
+static inline int pmu_interrupt_pending(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+	result.pmu_pm_ics_value = temp;
+
+	/* return the pm interrupt status int pending bit info */
+	return result.pmu_pm_ics_parts.int_pend;
+}
+
+static inline void pmu_clear_pending_interrupt(void)
+{
+	u32 temp;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+
+	/* write into the PM_ICS register */
+	writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+void pmu_set_interrupt_enable(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+	result.pmu_pm_ics_value = temp;
+
+	/* Set the interrupt enable bit */
+	result.pmu_pm_ics_parts.int_enable = 1;
+
+	temp = result.pmu_pm_ics_value;
+
+	/* write into the PM_ICS register */
+	writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+void pmu_clear_interrupt_enable(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+	result.pmu_pm_ics_value = temp;
+
+	/* Clear the interrupt enable bit */
+	result.pmu_pm_ics_parts.int_enable = 0;
+
+	temp = result.pmu_pm_ics_value;
+
+	/* write into the PM_ICS register */
+	writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+static inline int pmu_read_interrupt_status(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+
+	result.pmu_pm_ics_value = temp;
+
+	if (result.pmu_pm_ics_parts.int_status == 0)
+		return PMU_FAILED;
+
+	/* return the pm interrupt status int pending bit info */
+	return result.pmu_pm_ics_parts.int_status;
+}
+
+/*This function is used for programming the wake capable devices*/
+static void pmu_prepare_wake(int s0ix_state)
+{
+
+	/* setup the wake capable devices */
+	if (s0ix_state == MID_S3_STATE) {
+		writel(~IGNORE_S3_WKC0, &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+		writel(~IGNORE_S3_WKC1, &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+	}
+}
+
+int mid_s0ix_enter(int s0ix_state)
+{
+	int ret = 0;
+
+	if (unlikely(!pmu_ops || !pmu_ops->enter))
+		goto ret;
+
+	/* check if we can acquire scu_ready_sem
+	 * if we are not able to then do a c6 */
+	if (down_trylock(&mid_pmu_cxt->scu_ready_sem))
+		goto ret;
+
+	/* If PMU is busy, we'll retry on next C6 */
+	if (unlikely(_pmu_read_status(PMU_BUSY_STATUS))) {
+		up(&mid_pmu_cxt->scu_ready_sem);
+		pr_debug("mid_pmu_cxt->scu_read_sem is up\n");
+		goto ret;
+	}
+
+	pmu_prepare_wake(s0ix_state);
+
+	/* no need to proceed if schedule pending */
+	if (unlikely(need_resched())) {
+		pmu_stat_clear();
+		/*set wkc to appropriate value suitable for s0ix*/
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+		up(&mid_pmu_cxt->scu_ready_sem);
+		goto ret;
+	}
+
+	/* entry function for pmu driver ops */
+	if (pmu_ops->enter(s0ix_state))
+		ret = s0ix_state;
+	else  {
+		/*set wkc to appropriate value suitable for s0ix*/
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+	}
+
+ret:
+	return ret;
+}
+
+/**
+ * pmu_sc_irq - pmu driver interrupt handler
+ * Context: interrupt context
+ */
+static irqreturn_t pmu_sc_irq(int irq, void *ignored)
+{
+	int status;
+	irqreturn_t ret = IRQ_NONE;
+	int wake_source;
+
+	/* check if interrup pending bit is set, if not ignore interrupt */
+	if (unlikely(!pmu_interrupt_pending())) {
+		goto ret_no_clear;
+	}
+
+	/* read the interrupt status */
+	status = pmu_read_interrupt_status();
+	if (unlikely(status == PMU_FAILED))
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Invalid interrupt source\n");
+
+	switch (status) {
+	case INVALID_INT:
+		goto ret_no_clear;
+
+	case CMD_COMPLETE_INT:
+		break;
+
+	case CMD_ERROR_INT:
+		mid_pmu_cxt->cmd_error_int++;
+		break;
+
+	case SUBSYS_POW_ERR_INT:
+	case NO_ACKC6_INT:
+	case S0ix_MISS_INT:
+		pmu_stat_error(status);
+		break;
+
+	case WAKE_RECEIVED_INT:
+		wake_source = pmu_get_wake_source();
+		trace_printk("wake_from_lss%d\n",
+				wake_source);
+		pmu_stat_end();
+		break;
+	case TRIGGERERR:
+		pmu_dump_logs();
+		WARN(1, "%s: TRIGGERERR caused, but proceeding...\n", __func__);
+		break;
+	}
+
+	pmu_stat_clear();
+
+	/* clear the interrupt pending bit */
+	pmu_clear_pending_interrupt();
+
+	if (pmu_ops->wakeup)
+		pmu_ops->wakeup();
+
+	ret = IRQ_HANDLED;
+ret_no_clear:
+	/* clear interrupt enable bit */
+	pmu_clear_interrupt_enable();
+
+	return ret;
+}
+
+void pmu_set_s0ix_complete(void)
+{
+	if (pmu_ops->set_s0ix_complete)
+		pmu_ops->set_s0ix_complete();
+}
+EXPORT_SYMBOL(pmu_set_s0ix_complete);
+
+bool pmu_is_s0ix_in_progress(void)
+{
+	bool state = false;
+
+	if (pmu_initialized && mid_pmu_cxt->s0ix_entered)
+		state = true;
+
+	return state;
+}
+EXPORT_SYMBOL(pmu_is_s0ix_in_progress);
+
+static inline u32 find_index_in_hash(struct pci_dev *pdev, int *found)
+{
+	u32 h_index;
+	int i;
+
+	/* assuming pdev is not null */
+	WARN_ON(pdev == NULL);
+
+	/*assuming pdev pionter will not change from platfrom
+	 *boot to shutdown*/
+	h_index = jhash_1word((u32) (long) pdev,
+		 MID_PCI_INDEX_HASH_INITVALUE) & MID_PCI_INDEX_HASH_MASK;
+
+	/* assume not found */
+	*found = 0;
+
+	for (i = 0; i < MID_PCI_INDEX_HASH_SIZE; i++) {
+		if (likely(mid_pmu_cxt->pci_dev_hash[h_index].pdev == pdev)) {
+			*found = 1;
+			break;
+		}
+
+		/* assume no deletions, hence there shouldn't be any
+		 * gaps ie., NULL's */
+		if (unlikely(mid_pmu_cxt->pci_dev_hash[h_index].pdev == NULL)) {
+			/* found NULL, that means we wont have
+			 * it in hash */
+			break;
+		}
+
+		h_index = (h_index+1)%MID_PCI_INDEX_HASH_SIZE;
+	}
+
+	/* Assume hash table wont be full */
+	WARN_ON(i == MID_PCI_INDEX_HASH_SIZE);
+
+	return h_index;
+}
+
+static bool is_display_subclass(unsigned int sub_class)
+{
+	 /* On MRFLD, we have display PCI device class 0x38000 */
+
+	if (sub_class == 0x80 && (platform_is(INTEL_ATOM_MRFLD)
+				|| platform_is(INTEL_ATOM_MOORFLD)))
+		return true;
+
+	return false;
+}
+
+static int get_pci_to_pmu_index(struct pci_dev *pdev)
+{
+	int pm, type;
+	unsigned int base_class;
+	unsigned int sub_class;
+	u8 ss;
+	int index = PMU_FAILED;
+	u32 h_index;
+	int found;
+
+	h_index = find_index_in_hash(pdev, &found);
+
+	if (found)
+		return (int)mid_pmu_cxt->pci_dev_hash[h_index].index;
+
+	/* if not found, h_index would be where
+	 * we can insert this */
+
+	base_class = pdev->class >> 16;
+	sub_class  = (pdev->class & SUB_CLASS_MASK) >> 8;
+	pm = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+	/* read the logical sub system id & cap if present */
+	pci_read_config_byte(pdev, pm + 4, &ss);
+
+	type = ss & LOG_SS_MASK;
+	ss = ss & LOG_ID_MASK;
+
+	if ((base_class == PCI_BASE_CLASS_DISPLAY) &&
+			is_display_subclass(sub_class))
+		index = 1;
+	else if ((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+			(sub_class == ISP_SUB_CLASS))
+				index = ISP_POS;
+	else if (type) {
+		WARN_ON(ss >= MAX_LSS_POSSIBLE);
+		index = mid_pmu_cxt->pmu1_max_devs + ss;
+	}
+
+	if (index != PMU_FAILED) {
+		/* insert into hash table */
+		mid_pmu_cxt->pci_dev_hash[h_index].pdev = pdev;
+
+		/* assume index never exceeds 0xff */
+		WARN_ON(index > 0xFF);
+
+		mid_pmu_cxt->pci_dev_hash[h_index].index = (u8)index;
+
+		if (index < mid_pmu_cxt->pmu1_max_devs) {
+			set_mid_pci_ss_idx(index, 0);
+			set_mid_pci_ss_pos(index, (u8)index);
+			set_mid_pci_pmu_num(index, PMU_NUM_1);
+		} else if (index >= mid_pmu_cxt->pmu1_max_devs &&
+			   index < (mid_pmu_cxt->pmu1_max_devs +
+						mid_pmu_cxt->pmu2_max_devs)) {
+			set_mid_pci_ss_idx(index,
+					(u8)(ss / mid_pmu_cxt->ss_per_reg));
+			set_mid_pci_ss_pos(index,
+					(u8)(ss % mid_pmu_cxt->ss_per_reg));
+			set_mid_pci_pmu_num(index, PMU_NUM_2);
+		} else {
+			index = PMU_FAILED;
+		}
+
+		WARN_ON(index == PMU_FAILED);
+	}
+
+	return index;
+}
+
+static void get_pci_lss_info(struct pci_dev *pdev)
+{
+	int index, pm;
+	unsigned int base_class;
+	unsigned int sub_class;
+	u8 ss, cap;
+	int i;
+	base_class = pdev->class >> 16;
+	sub_class  = (pdev->class & SUB_CLASS_MASK) >> 8;
+
+	pm = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+	/* read the logical sub system id & cap if present */
+	pci_read_config_byte(pdev, pm + 4, &ss);
+	pci_read_config_byte(pdev, pm + 5, &cap);
+
+	/* get the index for the copying of ss info */
+	index = get_pci_to_pmu_index(pdev);
+
+	if ((index == PMU_FAILED) || (index >= MAX_DEVICES))
+		return;
+
+	/* initialize gfx subsystem info */
+	if ((base_class == PCI_BASE_CLASS_DISPLAY) &&
+			is_display_subclass(sub_class)) {
+		set_mid_pci_log_id(index, (u32)index);
+		set_mid_pci_cap(index, PM_SUPPORT);
+	} else if ((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+		(sub_class == ISP_SUB_CLASS)) {
+			set_mid_pci_log_id(index, (u32)index);
+			set_mid_pci_cap(index, PM_SUPPORT);
+	} else if (ss && cap) {
+		set_mid_pci_log_id(index, (u32)(ss & LOG_ID_MASK));
+		set_mid_pci_cap(index, cap);
+	}
+
+	for (i = 0; i < PMU_MAX_LSS_SHARE &&
+		get_mid_pci_drv(index, i); i++) {
+		/* do nothing */
+	}
+
+	WARN_ON(i >= PMU_MAX_LSS_SHARE);
+
+	if (i < PMU_MAX_LSS_SHARE) {
+		set_mid_pci_drv(index, i, pdev);
+		set_mid_pci_power_state(index, i, PCI_D3hot);
+	}
+}
+
+static void pmu_enumerate(void)
+{
+	struct pci_dev *pdev = NULL;
+	unsigned int base_class;
+
+	for_each_pci_dev(pdev) {
+		if ((platform_is(INTEL_ATOM_MRFLD) ||
+			platform_is(INTEL_ATOM_MOORFLD)) &&
+			pdev->device == MID_MRFL_HDMI_DRV_DEV_ID)
+			continue;
+
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		get_pci_lss_info(pdev);
+	}
+}
+
+void pmu_read_sss(struct pmu_ss_states *pm_ssc)
+{
+	pm_ssc->pmu2_states[0] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[0]);
+	pm_ssc->pmu2_states[1] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[1]);
+	pm_ssc->pmu2_states[2] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[2]);
+	pm_ssc->pmu2_states[3] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[3]);
+}
+
+
+/*
+ * For all devices in this lss, we check what is the weakest power state
+ *
+ * Thus we dont power down if another device needs more power
+ */
+
+static pci_power_t  pmu_pci_get_weakest_state_for_lss(int lss_index,
+				struct pci_dev *pdev, pci_power_t state)
+{
+	int i;
+	pci_power_t weakest = state;
+
+	for (i = 0; i < PMU_MAX_LSS_SHARE; i++) {
+		if (get_mid_pci_drv(lss_index, i) == pdev)
+			set_mid_pci_power_state(lss_index, i, state);
+
+		if (get_mid_pci_drv(lss_index, i) &&
+			(get_mid_pci_power_state(lss_index, i) < weakest))
+			weakest = get_mid_pci_power_state(lss_index, i);
+	}
+	return weakest;
+}
+
+int pmu_pci_to_indexes(struct pci_dev *pdev, int *index,
+				int *pmu_num, int *ss_idx, int *ss_pos)
+{
+	int i;
+
+	i = get_pci_to_pmu_index(pdev);
+	if (i == PMU_FAILED)
+		return PMU_FAILED;
+
+	*index		= i;
+	*ss_pos		= get_mid_pci_ss_pos(i);
+	*ss_idx		= get_mid_pci_ss_idx(i);
+	*pmu_num	= get_mid_pci_pmu_num(i);
+
+	return PMU_SUCCESS;
+}
+
+static bool update_nc_device_states(int i, pci_power_t state)
+{
+	int status = 0;
+	int islands = 0;
+	int reg;
+
+	/* store the display status */
+	if (i == GFX_LSS_INDEX) {
+		mid_pmu_cxt->display_off = (state != PCI_D0);
+		return true;
+	}
+
+	/*Update the Camera status per ISP Driver Suspended/Resumed
+	* ISP power islands are also updated accordingly, otherwise Dx state
+	* in PMCSR refuses to change.
+	*/
+	else if (i == ISP_POS) {
+		if (platform_is(INTEL_ATOM_MRFLD) ||
+				platform_is(INTEL_ATOM_MOORFLD)) {
+			islands = TNG_ISP_ISLAND;
+			reg = ISP_SS_PM0;
+		} else
+			return false;
+		status = pmu_nc_set_power_state(islands,
+			(state != PCI_D0) ?
+			OSPM_ISLAND_DOWN : OSPM_ISLAND_UP,
+			reg);
+		if (status)
+			return false;
+		mid_pmu_cxt->camera_off = (state != PCI_D0);
+		return true;
+	}
+
+	return false;
+}
+
+void init_nc_device_states(void)
+{
+#if !IS_ENABLED(CONFIG_VIDEO_ATOMISP)
+	mid_pmu_cxt->camera_off = false;
+#endif
+
+#ifndef GFX_ENABLE
+	/* If Gfx is disabled
+	 * assume s0ix is not blocked
+	 * from gfx side
+	 */
+	mid_pmu_cxt->display_off = true;
+#endif
+
+	return;
+}
+
+/* FIXME::Currently HSI Modem 7060 (BZ# 28529) is having a issue and
+* it will not go to Low Power State on CVT. So Standby will not work
+* if HSI is enabled.
+* We can choose between Standby/HSI based on enable_stadby 1/0.
+*/
+unsigned int enable_standby __read_mostly;
+module_param(enable_standby, uint, 0000);
+
+/* FIXME:: We have issues with S0ix/S3 enabling by default
+ * with display lockup, HSIC etc., so have a boot time option
+ * to enable S0ix/S3
+ */
+unsigned int enable_s3 __read_mostly = 1;
+int set_enable_s3(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	if (platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD)) {
+		if (!enable_s3)
+			__pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+		else
+			__pm_relax(mid_pmu_cxt->pmu_wake_lock);
+	}
+
+	return 0;
+}
+module_param_call(enable_s3, set_enable_s3, param_get_uint,
+				&enable_s3, S_IRUGO | S_IWUSR);
+
+/* FIXME:: We have issues with S0ix/S3 enabling by default
+ * with display lockup, HSIC etc., so have a boot time option
+ * to enable S0ix/S3
+ */
+unsigned int enable_s0ix __read_mostly = 1;
+int set_enable_s0ix(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	if (platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD)) {
+		if (!enable_s0ix) {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << (CPUIDLE_STATE_MAX-1)) - 1);
+
+			/* Ignore C2, C3, C5 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+
+			/* For now ignore C7, C8, C9 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 6);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+			mid_pmu_cxt->cstate_ignore |= (1 << 8);
+
+			/* Restrict platform Cx state to C6 */
+			pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+						(CSTATE_EXIT_LATENCY_S0i1-1));
+		} else {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << (CPUIDLE_STATE_MAX-1)) - 1);
+
+			/* Ignore C2, C3, C5, C8 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+
+			pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+							PM_QOS_DEFAULT_VALUE);
+		}
+	}
+
+	return 0;
+}
+module_param_call(enable_s0ix, set_enable_s0ix, param_get_uint,
+				&enable_s0ix, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss0 __read_mostly = IGNORE_SSS0;
+module_param(pmu_ignore_lss0, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss1 __read_mostly = IGNORE_SSS1;
+module_param(pmu_ignore_lss1, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss2 __read_mostly = IGNORE_SSS2;
+module_param(pmu_ignore_lss2, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss3 __read_mostly = IGNORE_SSS3;
+module_param(pmu_ignore_lss3, uint, S_IRUGO | S_IWUSR);
+
+int pmu_set_emmc_to_d0i0_atomic(void)
+{
+	u32 pm_cmd_val;
+	u32 new_value;
+	int sub_sys_pos, sub_sys_index;
+	struct pmu_ss_states cur_pmssc;
+	int status = 0;
+
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	/* LSS 01 is index = 0, pos = 1 */
+	sub_sys_index	= EMMC0_LSS / mid_pmu_cxt->ss_per_reg;
+	sub_sys_pos	= EMMC0_LSS % mid_pmu_cxt->ss_per_reg;
+
+	memset(&cur_pmssc, 0, sizeof(cur_pmssc));
+
+	/*
+	 * Give time for possible previous PMU operation to finish in
+	 * case where SCU is functioning normally. For SCU crashed case
+	 * PMU may stay busy but check if the emmc is accessible.
+	 */
+	status = _pmu2_wait_not_busy();
+	if (status) {
+		dev_err(&mid_pmu_cxt->pmu_dev->dev,
+			"PMU2 busy, ignoring as emmc might be already d0i0\n");
+		status = 0;
+	}
+
+	pmu_read_sss(&cur_pmssc);
+
+	/* set D0i0 the LSS bits */
+	pm_cmd_val =
+		(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+	new_value = cur_pmssc.pmu2_states[sub_sys_index] &
+						(~pm_cmd_val);
+	if (new_value == cur_pmssc.pmu2_states[sub_sys_index])
+		goto err;
+
+	status = _pmu2_wait_not_busy();
+	if (status)
+		goto err;
+
+	cur_pmssc.pmu2_states[sub_sys_index] = new_value;
+
+	/* Request SCU for PM interrupt enabling */
+	writel(PMU_PANIC_EMMC_UP_REQ_CMD, mid_pmu_cxt->emergency_emmc_up_addr);
+
+	status = pmu_issue_interactive_command(&cur_pmssc, false, false);
+
+	if (unlikely(status != PMU_SUCCESS)) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+			 "Failed to Issue a PM command to PMU2\n");
+		goto err;
+
+	}
+
+	/*
+	 * Wait for interactive command to complete.
+	 * If we dont wait, there is a possibility that
+	 * the driver may access the device before its
+	 * powered on in SCU.
+	 *
+	 */
+	if (_pmu2_wait_not_busy()) {
+		pmu_dump_logs();
+		BUG();
+	}
+
+err:
+
+	return status;
+}
+
+
+#define SAVED_HISTORY_ADDRESS_NUM	10
+#define SAVED_HISTORY_NUM		20
+#define PCI_MAX_RECORD_NUM		10
+
+struct saved_nc_power_history {
+	unsigned long long ts;
+	unsigned short pci;
+	unsigned short cpu:4;
+	unsigned short state_type:8;
+	unsigned short real_change:2;
+	int reg_type;
+	int islands;
+	void *address[SAVED_HISTORY_ADDRESS_NUM];
+};
+
+static atomic_t saved_nc_power_history_current = ATOMIC_INIT(-1);
+static struct saved_nc_power_history all_history[SAVED_HISTORY_NUM];
+static struct saved_nc_power_history *get_new_record_history(void)
+{
+	unsigned int ret =
+		atomic_add_return(1, &saved_nc_power_history_current);
+	return &all_history[ret%SAVED_HISTORY_NUM];
+}
+
+static unsigned short pci_need_record[PCI_MAX_RECORD_NUM] = { 0x08c8, 0x0130, };
+static int num_pci_need_record = 2;
+module_param_array(pci_need_record, ushort, &num_pci_need_record, 0644);
+MODULE_PARM_DESC(pci_need_record,
+		"devices need be traced power state transition.");
+
+static bool pci_need_record_power_state(struct pci_dev *pdev)
+{
+	int i;
+	for (i = 0; i < num_pci_need_record; i++)
+		if (pdev->device == pci_need_record[i])
+			return true;
+
+	return false;
+}
+
+static void print_saved_record(struct saved_nc_power_history *record)
+{
+	int i;
+	unsigned long long ts = record->ts;
+	unsigned long nanosec_rem = do_div(ts, 1000000000);
+
+	printk(KERN_INFO "----\n");
+	printk(KERN_INFO "ts[%5lu.%06lu] cpu[%d] is pci[%04x] reg_type[%d] "
+			"state_type[%d] islands[%x] real_change[%d]\n",
+		(unsigned long)ts,
+		nanosec_rem / 1000,
+		record->cpu,
+		record->pci,
+		record->reg_type,
+		record->state_type,
+		record->islands,
+		record->real_change);
+	for (i = 0; i < SAVED_HISTORY_ADDRESS_NUM; i++) {
+		printk(KERN_INFO "%pf real_addr[%p]\n",
+			record->address[i],
+			record->address[i]);
+	}
+}
+
+#ifdef CONFIG_FRAME_POINTER
+size_t backtrace_safe(void **array, size_t max_size)
+{
+	unsigned long *bp;
+	unsigned long *caller;
+	unsigned int i;
+
+	get_bp(bp);
+
+	caller = (unsigned long *) *(bp+1);
+
+	for (i = 0; i < max_size; i++)
+		array[i] = 0;
+	for (i = 0; i < max_size; i++) {
+		array[i] = caller;
+		bp = (unsigned long *) *bp;
+		if (!object_is_on_stack(bp))
+			break;
+		caller = (unsigned long *) *(bp+1);
+	}
+
+	return i + 1;
+}
+#else
+size_t backtrace_safe(void **array, size_t max_size)
+{
+	memset(array, 0, max_size);
+	return 0;
+}
+#endif
+
+void dump_nc_power_history(void)
+{
+	int i, start;
+	unsigned int total = atomic_read(&saved_nc_power_history_current);
+
+	start = total % SAVED_HISTORY_NUM;
+	printk(KERN_INFO "<----current timestamp\n");
+	printk(KERN_INFO "start[%d] saved[%d]\n",
+			start, total);
+	for (i = start; i >= 0; i--)
+		print_saved_record(&all_history[i]);
+	for (i = SAVED_HISTORY_NUM - 1; i > start; i--)
+		print_saved_record(&all_history[i]);
+}
+EXPORT_SYMBOL(dump_nc_power_history);
+
+static ssize_t debug_read_history(struct file *file, char __user *buffer,
+			size_t count, loff_t *pos)
+{
+	dump_nc_power_history();
+
+	return 0;
+}
+
+static ssize_t debug_write_read_history_entry(struct file *file,
+		const char __user *buffer, size_t count, loff_t *pos)
+{
+	return count;
+}
+
+static const struct file_operations proc_debug_operations = {
+	.owner	= THIS_MODULE,
+	.read	= debug_read_history,
+	.write	= debug_write_read_history_entry,
+};
+
+static int __init debug_read_history_entry(void)
+{
+	struct proc_dir_entry *res = NULL;
+
+	res = proc_create("debug_read_history", S_IRUGO | S_IWUSR, NULL,
+		&proc_debug_operations);
+
+	if (!res)
+		return -ENOMEM;
+
+	return 0;
+}
+device_initcall(debug_read_history_entry);
+
+/**
+ * pmu_nc_set_power_state - Callback function is used by all the devices
+ * in north complex for a platform  specific device power on/shutdown.
+ * Following assumptions are made by this function
+ *
+ * Every new request starts from scratch with no assumptions
+ * on previous/pending request to Punit.
+ * Caller is responsible to retry if request fails.
+ * Avoids multiple requests to Punit if target state is
+ * already in the expected state.
+ * spin_locks guarantee serialized access to these registers
+ * and avoid concurrent access from 2d/3d, VED, VEC, ISP & IPH.
+ *
+ */
+int pmu_nc_set_power_state(int islands, int state_type, int reg)
+{
+	unsigned long flags;
+	struct saved_nc_power_history *record = NULL;
+	int ret = 0;
+	int change;
+
+	spin_lock_irqsave(&mid_pmu_cxt->nc_ready_lock, flags);
+
+	record = get_new_record_history();
+	record->cpu = raw_smp_processor_id();
+	record->ts = cpu_clock(record->cpu);
+	record->islands = islands;
+	record->pci = 0;
+	record->state_type = state_type;
+	backtrace_safe(record->address, SAVED_HISTORY_ADDRESS_NUM);
+	record->real_change = 0;
+	record->reg_type = reg;
+
+	if (pmu_ops->nc_set_power_state)	{
+		ret = pmu_ops->nc_set_power_state(islands, state_type,
+								reg, &change);
+		if (change) {
+			record->real_change = 1;
+			record->ts = cpu_clock(record->cpu);
+		}
+	}
+
+	spin_unlock_irqrestore(&mid_pmu_cxt->nc_ready_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(pmu_nc_set_power_state);
+
+/**
+ * pmu_nc_get_power_state - Callback function is used to
+ * query power status of all the devices in north complex.
+ * Following assumptions are made by this function
+ *
+ * Every new request starts from scratch with no assumptions
+ * on previous/pending request to Punit.
+ * Caller is responsible to retry if request fails.
+ * Avoids multiple requests to Punit if target state is
+ * already in the expected state.
+ * spin_locks guarantee serialized access to these registers
+ * and avoid concurrent access from 2d/3d, VED, VEC, ISP & IPH.
+ *
+ */
+int pmu_nc_get_power_state(int island, int reg_type)
+{
+	return 0;
+}
+EXPORT_SYMBOL(pmu_nc_get_power_state);
+
+void pmu_set_s0i1_disp_vote(bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mid_pmu_cxt->nc_ready_lock, flags);
+
+	if (pmu_ops->set_s0i1_disp_vote)
+		pmu_ops->set_s0i1_disp_vote(enable);
+
+	spin_unlock_irqrestore(&mid_pmu_cxt->nc_ready_lock, flags);
+}
+EXPORT_SYMBOL(pmu_set_s0i1_disp_vote);
+
+/*
+* update_dev_res - Calulates & Updates the device residency when
+* a device state change occurs.
+* Computation of respective device residency starts when
+* its first state tranisition happens after the pmu driver
+* is initialised.
+*
+*/
+void update_dev_res(int index, pci_power_t state)
+{
+	if (state != PCI_D0) {
+		if (mid_pmu_cxt->pmu_dev_res[index].start == 0) {
+			mid_pmu_cxt->pmu_dev_res[index].start = cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_entry =
+				mid_pmu_cxt->pmu_dev_res[index].start;
+				mid_pmu_cxt->pmu_dev_res[index].d0i0_acc = 0;
+		} else{
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_entry =
+							cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i0_acc +=
+			(mid_pmu_cxt->pmu_dev_res[index].d0i3_entry -
+				 mid_pmu_cxt->pmu_dev_res[index].d0i0_entry);
+		}
+	} else {
+		if (mid_pmu_cxt->pmu_dev_res[index].start == 0) {
+			mid_pmu_cxt->pmu_dev_res[index].start =
+						 cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i0_entry
+				= mid_pmu_cxt->pmu_dev_res[index].start;
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_acc = 0;
+		} else {
+			mid_pmu_cxt->pmu_dev_res[index].d0i0_entry =
+						 cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_acc +=
+			(mid_pmu_cxt->pmu_dev_res[index].d0i0_entry -
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_entry);
+		}
+	}
+	mid_pmu_cxt->pmu_dev_res[index].state = state;
+}
+
+/**
+ * pmu_pci_set_power_state - Callback function is used by all the PCI devices
+ *			for a platform  specific device power on/shutdown.
+ *
+ */
+int __ref pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+	u32 new_value;
+	int i = 0;
+	u32 pm_cmd_val, chk_val;
+	int sub_sys_pos, sub_sys_index;
+	int pmu_num;
+	struct pmu_ss_states cur_pmssc;
+	int status = 0;
+	int retry_times = 0;
+	ktime_t calltime, delta, rettime;
+	struct saved_nc_power_history *record = NULL;
+	bool d3_cold = false;
+
+	/* Ignore callback from devices until we have initialized */
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	might_sleep();
+
+	/* Try to acquire the scu_ready_sem, if not
+	 * get blocked, until pmu_sc_irq() releases */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	/*get LSS index corresponding to pdev, its position in
+	 *32 bit register and its register numer*/
+	status =
+		pmu_pci_to_indexes(pdev, &i, &pmu_num,
+				&sub_sys_index,  &sub_sys_pos);
+
+	if (status)
+		goto unlock;
+
+	/* Ignore D0i0 requests for LSS that have no drivers */
+	if (pmu_power_down_lss_without_driver(i, sub_sys_index,
+						sub_sys_pos, state))
+		goto unlock;
+
+	if (pci_need_record_power_state(pdev)) {
+		record = get_new_record_history();
+		record->cpu = raw_smp_processor_id();
+		record->ts = cpu_clock(record->cpu);
+		record->islands = 0;
+		record->reg_type = 0;
+		record->pci = pdev->device;
+		record->state_type = state;
+		backtrace_safe(record->address, SAVED_HISTORY_ADDRESS_NUM);
+		record->real_change = 0;
+	}
+
+	/* Ignore HDMI HPD driver d0ix on LSS 0 on MRFLD */
+	if ((platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD)) &&
+			pdev->device == MID_MRFL_HDMI_DRV_DEV_ID)
+			goto unlock;
+
+	/*in case a LSS is assigned to more than one pdev, we need
+	  *to find the shallowest state the LSS should be put into*/
+	state = pmu_pci_get_weakest_state_for_lss(i, pdev, state);
+
+	/*If the LSS corresponds to northcomplex device, update
+	  *the status and return*/
+	if (update_nc_device_states(i, state)) {
+		if (mid_pmu_cxt->pmu_dev_res[i].state == state)
+			goto nc_done;
+		else {
+			if (i < MAX_DEVICES)
+				update_dev_res(i, state);
+			goto nc_done;
+		}
+	}
+
+	/* initialize the current pmssc states */
+	memset(&cur_pmssc, 0, sizeof(cur_pmssc));
+
+	status = _pmu2_wait_not_busy();
+
+	if (status)
+		goto unlock;
+
+	pmu_read_sss(&cur_pmssc);
+
+	/* Read the pm_cmd val & update the value */
+	pm_cmd_val =
+		(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+	/* First clear the LSS bits */
+	new_value = cur_pmssc.pmu2_states[sub_sys_index] &
+						(~pm_cmd_val);
+	mid_pmu_cxt->os_sss[sub_sys_index] &= ~pm_cmd_val;
+
+	if (state != PCI_D0) {
+		pm_cmd_val =
+			(pci_to_platform_state(state) <<
+				(sub_sys_pos * BITS_PER_LSS));
+
+		new_value |= pm_cmd_val;
+
+		mid_pmu_cxt->os_sss[sub_sys_index] |= pm_cmd_val;
+	}
+
+	new_value &= ~mid_pmu_cxt->ignore_lss[sub_sys_index];
+
+	/* nothing to do, so dont do it... */
+	if (new_value == cur_pmssc.pmu2_states[sub_sys_index])
+		goto unlock;
+
+	cur_pmssc.pmu2_states[sub_sys_index] = new_value;
+
+	/* Check if the state is D3_cold or D3_Hot in TNG platform*/
+	if ((platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD))
+		&& (state == PCI_D3cold))
+		d3_cold = true;
+
+	/* Issue the pmu command to PMU 2
+	 * flag is needed to distinguish between
+	 * S0ix vs interactive command in pmu_sc_irq()
+	 */
+	status = pmu_issue_interactive_command(&cur_pmssc, false, d3_cold);
+
+	if (unlikely(status != PMU_SUCCESS)) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+			 "Failed to Issue a PM command to PMU2\n");
+		goto unlock;
+	}
+
+	calltime = ktime_get();
+retry:
+	/*
+	 * Wait for interactive command to complete.
+	 * If we dont wait, there is a possibility that
+	 * the driver may access the device before its
+	 * powered on in SCU.
+	 *
+	 */
+	status = _pmu2_wait_not_busy_yield();
+	if (unlikely(status)) {
+		rettime = ktime_get();
+		delta = ktime_sub(rettime, calltime);
+		retry_times++;
+
+		printk(KERN_CRIT "%s: D0ix transition failure: %04x %04X %s %20s:\n",
+				__func__,
+				pdev->vendor, pdev->device,
+				dev_name(&pdev->dev),
+				dev_driver_string(&pdev->dev));
+		printk(KERN_CRIT "interrupt pending = %d\n",
+				pmu_interrupt_pending());
+		printk(KERN_CRIT "pmu_busy_status = %d\n",
+				_pmu_read_status(PMU_BUSY_STATUS));
+		printk(KERN_CRIT "suspend_started = %d\n",
+				mid_pmu_cxt->suspend_started);
+		printk(KERN_CRIT "shutdown_started = %d\n",
+				mid_pmu_cxt->shutdown_started);
+		printk(KERN_CRIT "camera_off = %d display_off = %d\n",
+				mid_pmu_cxt->camera_off,
+				mid_pmu_cxt->display_off);
+		printk(KERN_CRIT "s0ix_possible = 0x%x\n",
+				mid_pmu_cxt->s0ix_possible);
+		printk(KERN_CRIT "s0ix_entered = 0x%x\n",
+				mid_pmu_cxt->s0ix_entered);
+		printk(KERN_CRIT "pmu_current_state = %d\n",
+				mid_pmu_cxt->pmu_current_state);
+		printk(KERN_CRIT "PMU is BUSY! retry_times[%d] total_delay[%lli]ms. Retry ...\n",
+				retry_times, (long long) ktime_to_ms(delta));
+		pmu_dump_logs();
+
+		trigger_all_cpu_backtrace();
+		if (retry_times < 60)
+			goto retry;
+		else
+			BUG();
+	}
+	if (record) {
+		record->real_change = 1;
+		record->ts = cpu_clock(record->cpu);
+	}
+
+	if (pmu_ops->set_power_state_ops)
+		pmu_ops->set_power_state_ops(state);
+
+	/* update stats */
+	inc_d0ix_stat((i-mid_pmu_cxt->pmu1_max_devs),
+				pci_to_platform_state(state));
+
+	/* D0i0 stats */
+	{
+		int lss = i-mid_pmu_cxt->pmu1_max_devs;
+		if (state == PCI_D0) {
+			mid_pmu_cxt->d0i0_count[lss]++;
+			mid_pmu_cxt->d0i0_prev_time[lss] = cpu_clock(0);
+		} else {
+			mid_pmu_cxt->d0i0_time[lss] += (cpu_clock(0) -
+						mid_pmu_cxt->d0i0_prev_time[lss]);
+		}
+	}
+
+	/* check if tranisition to requested state has happened */
+	pmu_read_sss(&cur_pmssc);
+
+	chk_val = cur_pmssc.pmu2_states[sub_sys_index] &
+		(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+	new_value &= (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+	if ((chk_val == new_value) && (i < MAX_DEVICES))
+		update_dev_res(i, state);
+
+	WARN_ON(chk_val != new_value);
+
+nc_done:
+#if !IS_ENABLED(CONFIG_VIDEO_ATOMISP)
+	/* ATOMISP is always powered up on system-resume path. It needs
+	 * to be turned off here if there is no driver to do it. */
+	if (!mid_pmu_cxt->camera_off) {
+		/* power down isp */
+		pmu_nc_set_power_state(APM_ISP_ISLAND | APM_IPH_ISLAND,
+				       OSPM_ISLAND_DOWN, APM_REG_TYPE);
+		/* power down DPHY */
+		new_value = intel_mid_msgbus_read32(0x09, 0x03);
+		new_value |= 0x300;
+		intel_mid_msgbus_write32(0x09, 0x03, new_value);
+		mid_pmu_cxt->camera_off = true;
+	}
+#endif
+
+unlock:
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return status;
+}
+
+pci_power_t platfrom_pmu_choose_state(int lss)
+{
+	pci_power_t state = PCI_D3hot;
+
+	if (pmu_ops->pci_choose_state)
+		state = pmu_ops->pci_choose_state(lss);
+
+	return state;
+}
+
+/* return platform specific deepest states that the device can enter */
+pci_power_t pmu_pci_choose_state(struct pci_dev *pdev)
+{
+	int i;
+	int sub_sys_pos, sub_sys_index;
+	int status;
+	int device_lss;
+	int pmu_num;
+
+	pci_power_t state = PCI_D3hot;
+
+	if (pmu_initialized) {
+		status =
+		pmu_pci_to_indexes(pdev, &i, &pmu_num,
+					&sub_sys_index,  &sub_sys_pos);
+
+		if ((status == PMU_SUCCESS) &&
+			(pmu_num == PMU_NUM_2)) {
+
+			device_lss =
+				(sub_sys_index * mid_pmu_cxt->ss_per_reg) +
+								sub_sys_pos;
+
+			state = platfrom_pmu_choose_state(device_lss);
+		}
+	}
+
+	return state;
+}
+
+int pmu_issue_interactive_command(struct pmu_ss_states *pm_ssc, bool ioc,
+					bool d3_cold)
+{
+	u32 command;
+
+	if (_pmu2_wait_not_busy()) {
+		dev_err(&mid_pmu_cxt->pmu_dev->dev,
+			"SCU BUSY. Operation not permitted\n");
+		return PMU_FAILED;
+	}
+
+	/* enable interrupts in PMU2 so that interrupts are
+	 * propagated when ioc bit for a particular set
+	 * command is set
+	 */
+	/* Enable the hardware interrupt */
+	if (ioc)
+		pmu_set_interrupt_enable();
+
+	/* Configure the sub systems for pmu2 */
+	pmu_write_subsys_config(pm_ssc);
+
+	command = (ioc) ? INTERACTIVE_IOC_VALUE : INTERACTIVE_VALUE;
+
+	 /* Special handling for PCI_D3cold in Tangier */
+	if (d3_cold)
+		command |= PM_CMD_D3_COLD;
+
+	/* send interactive command to SCU */
+	writel(command, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+	pmu_log_command(command, pm_ssc);
+
+	return 0;
+}
+
+/* Reads the status of each driver and updates the LSS values.
+ * To be called with scu_ready_sem mutex held, and pmu_config
+ * initialized with '0's
+ */
+static void update_all_lss_states(struct pmu_ss_states *pmu_config)
+{
+	u32 PCIALLDEV_CFG[4] = {0, 0, 0, 0};
+
+	platform_update_all_lss_states(pmu_config, PCIALLDEV_CFG);
+}
+
+static int pmu_init(void)
+{
+	int status;
+	struct pmu_ss_states pmu_config;
+	struct pmu_suspend_config *ss_config;
+	int ret = 0;
+	int retry_times = 0;
+
+
+	dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "PMU Driver loaded\n");
+	spin_lock_init(&mid_pmu_cxt->nc_ready_lock);
+
+	/* enumerate the PCI configuration space */
+	pmu_enumerate();
+
+	/* initialize the stats for pmu driver */
+	pmu_stats_init();
+
+	/* register platform pmu ops */
+	platform_set_pmu_ops();
+
+	/* platform specific initialization */
+	if (pmu_ops->init) {
+		status = pmu_ops->init();
+		if (status) {
+			dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				"pmu_ops->init failed\n");
+			goto out_err1;
+		}
+	}
+
+	/* initialize the state variables here */
+	ss_config = devm_kzalloc(&mid_pmu_cxt->pmu_dev->dev,
+			sizeof(struct pmu_suspend_config), GFP_KERNEL);
+	if (!ss_config) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+			"Allocation of memory for ss_config has failed\n");
+		status = -ENOMEM;
+		goto out_err1;
+	}
+
+	memset(&pmu_config, 0, sizeof(pmu_config));
+
+	ss_config->ss_state = pmu_config;
+
+	/* initialize for the autonomous S0i3 */
+	mid_pmu_cxt->ss_config = ss_config;
+
+	/* setup the wake capable devices */
+	mid_pmu_cxt->ss_config->wake_state.wake_enable[0] = WAKE_ENABLE_0;
+	mid_pmu_cxt->ss_config->wake_state.wake_enable[1] = WAKE_ENABLE_1;
+
+	/* setup the ignore lss list */
+	mid_pmu_cxt->ignore_lss[0] = pmu_ignore_lss0;
+	mid_pmu_cxt->ignore_lss[1] = pmu_ignore_lss1;
+	mid_pmu_cxt->ignore_lss[2] = pmu_ignore_lss2;
+	mid_pmu_cxt->ignore_lss[3] = pmu_ignore_lss3;
+
+	/*set wkc to appropriate value suitable for s0ix*/
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	/* Now we have initialized the driver
+	 * Allow drivers to get blocked in
+	 * pmu_pci_set_power_state(), until we finish
+	 * first interactive command.
+	 */
+
+	pmu_initialized = true;
+
+	/* get the current status of each of the driver
+	 * and update it in SCU
+	 */
+	update_all_lss_states(&pmu_config);
+
+	/* In MOFD LSS 16 is used by PTI and LSS 15 is used by DFX
+	 * and should not be powered down during init
+	 */
+	if (platform_is(INTEL_ATOM_MOORFLD)) {
+		pmu_config.pmu2_states[0] &=
+			~SSMSK(D0I3_MASK, 15);
+		pmu_config.pmu2_states[1] &=
+			~SSMSK(D0I3_MASK, 0);
+	}
+
+	status = pmu_issue_interactive_command(&pmu_config, false,
+						false);
+	if (status != PMU_SUCCESS) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,\
+		 "Failure from pmu mode change to interactive."
+		" = %d\n", status);
+		status = PMU_FAILED;
+		up(&mid_pmu_cxt->scu_ready_sem);
+		goto out_err1;
+	}
+
+	/*
+	 * Wait for interactive command to complete.
+	 * If we dont wait, there is a possibility that
+	 * the driver may access the device before its
+	 * powered on in SCU.
+	 *
+	 */
+retry:
+	ret = _pmu2_wait_not_busy();
+	if (unlikely(ret)) {
+		retry_times++;
+		if (retry_times < 60) {
+			usleep_range(10, 500);
+			goto retry;
+		} else {
+			pmu_dump_logs();
+			BUG();
+		}
+	}
+
+	/* In cases were gfx is not enabled
+	 * this will enable s0ix immediately
+	 */
+	if (pmu_ops->set_power_state_ops)
+		pmu_ops->set_power_state_ops(PCI_D3hot);
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return PMU_SUCCESS;
+
+out_err1:
+	return status;
+}
+
+/**
+ * mid_pmu_probe - This is the function where most of the PMU driver
+ *		   initialization happens.
+ */
+static int
+mid_pmu_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+{
+	int ret;
+	struct mrst_pmu_reg __iomem *pmu;
+
+	mid_pmu_cxt->pmu_wake_lock =
+				wakeup_source_register("pmu_wake_lock");
+
+	if (!mid_pmu_cxt->pmu_wake_lock) {
+		pr_err("%s: unable to register pmu wake source.\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Init the device */
+	ret = pci_enable_device(dev);
+	if (ret) {
+		pr_err("Mid PM device cant be enabled\n");
+		goto out_err0;
+	}
+
+	/* store the dev */
+	mid_pmu_cxt->pmu_dev = dev;
+	dev_warn(&dev->dev, "PMU DRIVER Probe called\n");
+
+	ret = pci_request_regions(dev, PMU_DRV_NAME);
+	if (ret < 0) {
+		pr_err("pci request region has failed\n");
+		goto out_err1;
+	}
+
+	mid_pmu_cxt->pmu1_max_devs = PMU1_MAX_DEVS;
+	mid_pmu_cxt->pmu2_max_devs = PMU2_MAX_DEVS;
+	mid_pmu_cxt->ss_per_reg = 16;
+
+	/* Map the memory of pmu1 PMU reg base */
+	pmu = pci_iomap(dev, 0, 0);
+	if (pmu == NULL) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				"Unable to map the PMU2 address space\n");
+		ret = -EIO;
+		goto out_err2;
+	}
+
+	mid_pmu_cxt->pmu_reg = pmu;
+
+	/* CCU is in same PCI device with PMU, offset is 0x800 */
+	ccu_osc_clk_init((void __iomem *)pmu + 0x800);
+
+	/* Map the memory of emergency emmc up */
+	mid_pmu_cxt->emergency_emmc_up_addr =
+		devm_ioremap_nocache(&mid_pmu_cxt->pmu_dev->dev, PMU_PANIC_EMMC_UP_ADDR, 4);
+	if (!mid_pmu_cxt->emergency_emmc_up_addr) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+		"Unable to map the emergency emmc up address space\n");
+		ret = -ENOMEM;
+		goto out_err3;
+	}
+
+	if (devm_request_irq(&mid_pmu_cxt->pmu_dev->dev, dev->irq, pmu_sc_irq,
+			IRQF_NO_SUSPEND, PMU_DRV_NAME, NULL)) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Registering isr has failed\n");
+		ret = -ENOENT;
+		goto out_err3;
+	}
+
+	/* call pmu init() for initialization of pmu interface */
+	ret = pmu_init();
+	if (ret != PMU_SUCCESS) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "PMU initialization has failed\n");
+		goto out_err3;
+	}
+	dev_warn(&mid_pmu_cxt->pmu_dev->dev, "after pmu initialization\n");
+
+	mid_pmu_cxt->pmu_init_time = cpu_clock(0);
+
+#ifdef CONFIG_PM_DEBUG
+	/*
+	 * FIXME: Since S3 is not enabled yet we need to take
+	 * a wake lock here. Else S3 will be triggered on display
+	 * time out and platform will hang
+	 */
+	if ((platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD))
+								 && !enable_s3)
+		__pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+#endif
+	return 0;
+
+out_err3:
+	iounmap(mid_pmu_cxt->pmu_reg);
+out_err2:
+	pci_release_region(dev, 0);
+out_err1:
+	pci_disable_device(dev);
+out_err0:
+	wakeup_source_unregister(mid_pmu_cxt->pmu_wake_lock);
+
+	return ret;
+}
+
+static void mid_pmu_remove(struct pci_dev *dev)
+{
+	/* Freeing up the irq */
+	free_irq(dev->irq, &pmu_sc_irq);
+
+	/* If CCU/OSC is inuse, don't remove PMU */
+	if (ccu_osc_clk_uninit() < 0) {
+		pr_warn("ccu/osc is in using. abort\n");
+		return;
+	}
+
+	if (pmu_ops->remove)
+		pmu_ops->remove();
+
+	pci_iounmap(dev, mid_pmu_cxt->pmu_reg);
+
+	/* disable the current PCI device */
+	pci_release_region(dev, 0);
+	pci_disable_device(dev);
+
+	wakeup_source_unregister(mid_pmu_cxt->pmu_wake_lock);
+}
+
+static void mid_pmu_shutdown(struct pci_dev *dev)
+{
+	dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Mid PM mid_pmu_shutdown called\n");
+
+	if (mid_pmu_cxt) {
+		/* Restrict platform Cx state to C6 */
+		pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+					(CSTATE_EXIT_LATENCY_S0i1-1));
+
+		down(&mid_pmu_cxt->scu_ready_sem);
+		mid_pmu_cxt->shutdown_started = true;
+		up(&mid_pmu_cxt->scu_ready_sem);
+	}
+}
+
+static struct pci_driver driver = {
+	.name = PMU_DRV_NAME,
+	.id_table = mid_pm_ids,
+	.probe = mid_pmu_probe,
+	.remove = mid_pmu_remove,
+	.shutdown = mid_pmu_shutdown
+};
+
+static int standby_enter(void)
+{
+	u32 temp = 0;
+	int s3_state = mid_state_to_sys_state(MID_S3_STATE);
+
+	if (mid_s0ix_enter(MID_S3_STATE) != MID_S3_STATE) {
+		pmu_set_s0ix_complete();
+		return -EINVAL;
+	}
+
+	/* time stamp for end of s3 entry */
+	time_stamp_for_sleep_state_latency(s3_state, false, true);
+
+	__monitor((void *) &temp, 0, 0);
+	smp_mb();
+	__mwait(mid_pmu_cxt->s3_hint, 1);
+	/* time stamp for start of s3 exit */
+	time_stamp_for_sleep_state_latency(s3_state, true, false);
+
+	pmu_set_s0ix_complete();
+
+	/*set wkc to appropriate value suitable for s0ix*/
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+
+	mid_pmu_cxt->camera_off = 0;
+	mid_pmu_cxt->display_off = 0;
+
+	if (platform_is(INTEL_ATOM_MRFLD) || platform_is(INTEL_ATOM_MOORFLD))
+		up(&mid_pmu_cxt->scu_ready_sem);
+
+	return 0;
+}
+
+static int mid_suspend_begin(suspend_state_t state)
+{
+	mid_pmu_cxt->suspend_started = true;
+	pmu_s3_stats_update(1);
+
+	/* Restrict to C6 during suspend */
+	pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+					(CSTATE_EXIT_LATENCY_S0i1-1));
+	return 0;
+}
+
+static int mid_suspend_valid(suspend_state_t state)
+{
+	int ret = 0;
+
+	switch (state) {
+	case PM_SUSPEND_ON:
+	case PM_SUSPEND_MEM:
+		/* check if we are ready */
+		if (likely(pmu_initialized))
+			ret = 1;
+	break;
+	}
+
+	return ret;
+}
+
+static int mid_suspend_prepare(void)
+{
+	return 0;
+}
+
+static int mid_suspend_prepare_late(void)
+{
+	return 0;
+}
+
+static int mid_suspend_enter(suspend_state_t state)
+{
+	int ret;
+
+	if (state != PM_SUSPEND_MEM)
+		return -EINVAL;
+
+	/*FIXME:: On MOFD target mask is incorrect, hence avoid check for MOFD*/
+	if (!platform_is(INTEL_ATOM_MOORFLD)) {
+		/* one last check before entering standby */
+		if (pmu_ops->check_nc_sc_status) {
+			if (!(pmu_ops->check_nc_sc_status())) {
+				trace_printk("Device d0ix status check failed! Aborting Standby entry!\n");
+				WARN_ON(1);
+			}
+		}
+	}
+
+	trace_printk("s3_entry\n");
+	ret = standby_enter();
+	trace_printk("s3_exit %d\n", ret);
+	if (ret != 0)
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				"Failed to enter S3 status: %d\n", ret);
+
+	return ret;
+}
+
+static void mid_suspend_end(void)
+{
+	/* allow s0ix now */
+	pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+					PM_QOS_DEFAULT_VALUE);
+
+	pmu_s3_stats_update(0);
+	mid_pmu_cxt->suspend_started = false;
+}
+
+static const struct platform_suspend_ops mid_suspend_ops = {
+	.begin = mid_suspend_begin,
+	.valid = mid_suspend_valid,
+	.prepare = mid_suspend_prepare,
+	.prepare_late = mid_suspend_prepare_late,
+	.enter = mid_suspend_enter,
+	.end = mid_suspend_end,
+};
+
+/**
+ * mid_pci_register_init - register the PMU driver as PCI device
+ */
+static int __init mid_pci_register_init(void)
+{
+	int ret;
+
+	mid_pmu_cxt = kzalloc(sizeof(struct mid_pmu_dev), GFP_KERNEL);
+
+	if (mid_pmu_cxt == NULL)
+		return -ENOMEM;
+
+	mid_pmu_cxt->s3_restrict_qos =
+		kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+	if (mid_pmu_cxt->s3_restrict_qos) {
+		pm_qos_add_request(mid_pmu_cxt->s3_restrict_qos,
+			 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+	} else {
+		return -ENOMEM;
+	}
+
+	init_nc_device_states();
+
+	mid_pmu_cxt->nc_restrict_qos =
+		kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+	if (mid_pmu_cxt->nc_restrict_qos == NULL)
+		return -ENOMEM;
+
+	/* initialize the semaphores */
+	sema_init(&mid_pmu_cxt->scu_ready_sem, 1);
+
+	/* registering PCI device */
+	ret = pci_register_driver(&driver);
+	suspend_set_ops(&mid_suspend_ops);
+
+	return ret;
+}
+fs_initcall(mid_pci_register_init);
+
+void pmu_power_off(void)
+{
+	/* wait till SCU is ready */
+	if (!_pmu2_wait_not_busy())
+		writel(S5_VALUE, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+	else {
+		/* If PM_BUSY bit is not clear issue COLD_OFF */
+		WARN(1, "%s: pmu busy bit not cleared.\n", __func__);
+		rpmsg_send_generic_simple_command(IPCMSG_COLD_RESET, 1);
+	}
+}
+
+static void __exit mid_pci_cleanup(void)
+{
+	if (mid_pmu_cxt) {
+		if (mid_pmu_cxt->s3_restrict_qos)
+			pm_qos_remove_request(mid_pmu_cxt->s3_restrict_qos);
+
+		if (pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+			pm_qos_remove_request(mid_pmu_cxt->nc_restrict_qos);
+	}
+
+	suspend_set_ops(NULL);
+
+	/* registering PCI device */
+	pci_unregister_driver(&driver);
+
+	if (mid_pmu_cxt)
+		pmu_stats_finish();
+
+	kfree(mid_pmu_cxt);
+}
+module_exit(mid_pci_cleanup);
diff --git a/arch/x86/platform/intel-mid/intel_soc_pmu.h b/arch/x86/platform/intel-mid/intel_soc_pmu.h
new file mode 100644
index 0000000..73ab7fb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pmu.h
@@ -0,0 +1,540 @@
+/*
+ * intel_soc_pmu.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _MID_PMU_H_
+#define _MID_PMU_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/jhash.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/nmi.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_wakeup.h>
+#include <asm/apic.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/intel_mid_pm.h>
+
+#include "intel_soc_mrfld.h"
+
+#define MID_PMU_MRFL_DRV_DEV_ID			0x11A1
+
+#define MID_MRFL_HDMI_DRV_DEV_ID		0x11A6
+
+/* SRAM address where PANIC START is written */
+#define PMU_PANIC_EMMC_UP_ADDR			0xFFFF3080
+#define PMU_PANIC_EMMC_UP_REQ_CMD		0xDEADBEEF
+
+#define MAX_DEVICES	(PMU1_MAX_DEVS + PMU2_MAX_DEVS)
+#define PMU_MAX_LSS_SHARE 4
+
+#define PMU2_BUSY_TIMEOUT			500000
+#define HSU0_PCI_ID				0x81c
+#define HSU1_PCI_ID				0x81b
+#define HSI_PCI_ID				0x833
+
+#define MODE_ID_MAGIC_NUM			1
+
+#define   LOG_ID_MASK				0x7F
+#define   SUB_CLASS_MASK			0xFF00
+
+
+/* Definition for C6 Offload MSR Address */
+#define MSR_C6OFFLOAD_CTL_REG			0x120
+
+#define MSR_C6OFFLOAD_SET_LOW			1
+#define MSR_C6OFFLOAD_SET_HIGH			0
+
+#define C6OFFLOAD_BIT_MASK			0x2
+#define C6OFFLOAD_BIT				0x2
+
+#define PMU_DRV_NAME				"intel_pmu_driver"
+
+#define MID_PCI_INDEX_HASH_BITS		8 /*size 256*/
+#define MID_PCI_INDEX_HASH_SIZE		(1<<MID_PCI_INDEX_HASH_BITS)
+#define MID_PCI_INDEX_HASH_MASK		(MID_PCI_INDEX_HASH_SIZE-1)
+
+/* some random number for initvalue */
+#define	MID_PCI_INDEX_HASH_INITVALUE	0x27041975
+
+/*
+ * Values for programming the PM_CMD register based on the PM
+ * architecture speci
+*/
+
+#define S5_VALUE	0x309D2601
+#define S0I1_VALUE	0X30992601
+#define LPMP3_VALUE	0X40492601
+#define S0I3_VALUE	0X309B2601
+#define FAST_ON_OFF_VALUE	0X309E2601
+#define INTERACTIVE_VALUE	0X00002201
+#define INTERACTIVE_IOC_VALUE	0X00002301
+
+#define WAKE_ENABLE_0		0xffffffff
+#define WAKE_ENABLE_1		0xffffffff
+#define INVALID_WAKE_SRC	0xFFFF
+
+#define LOG_SS_MASK		0x80
+
+#define D0I0_MASK		0
+#define D0I1_MASK		1
+#define D0I2_MASK		2
+#define D0I3_MASK		3
+
+#define BITS_PER_LSS		2
+#define MAX_LSS_POSSIBLE	64
+#define SS_IDX_MASK		0x3
+#define SS_POS_MASK		0xF
+
+#define SSMSK(mask, lss) ((mask) << ((lss) * 2))
+#define SSWKC(lss) (1 << (lss))
+
+/* North Complex Power management */
+#define OSPM_PUNIT_PORT         0x04
+#define OSPM_OSPMBA             0x78
+#define OSPM_PM_SSC             0x20
+#define OSPM_PM_SSS             0x30
+
+#define OSPM_APMBA              0x7a
+#define APM_CMD                 0x0
+#define APM_STS                 0x04
+#define PM_CMD_D3_COLD		(0x1 << 21)
+
+/* Size of command logging array */
+#define LOG_SIZE	5
+
+enum sys_state {
+	SYS_STATE_S0I0,
+	SYS_STATE_S0I1,
+	SYS_STATE_S0I1_LPMP3,
+	SYS_STATE_S0I1_PSH,
+	SYS_STATE_S0I1_DISP,
+	SYS_STATE_S0I1_LPMP3_PSH,
+	SYS_STATE_S0I1_LPMP3_DISP,
+	SYS_STATE_S0I1_PSH_DISP,
+	SYS_STATE_S0I1_LPMP3_PSH_DISP,
+	SYS_STATE_S0I2,
+	SYS_STATE_S0I3,
+	SYS_STATE_S0I3_PSH_RET,
+	SYS_STATE_S3,
+	SYS_STATE_S5,
+	SYS_STATE_MAX
+};
+
+enum int_status {
+	INVALID_INT = 0,
+	CMD_COMPLETE_INT = 1,
+	CMD_ERROR_INT = 2,
+	WAKE_RECEIVED_INT = 3,
+	SUBSYS_POW_ERR_INT = 4,
+	S0ix_MISS_INT = 5,
+	NO_ACKC6_INT = 6,
+	TRIGGERERR = 7,
+	INVALID_SRC_INT
+};
+
+enum pmu_number {
+	PMU_NUM_1,
+	PMU_NUM_2,
+	PMU_MAX_DEVS
+};
+
+enum pmu_ss_state {
+	SS_STATE_D0I0 = 0,
+	SS_STATE_D0I1 = 1,
+	SS_STATE_D0I2 = 2,
+	SS_STATE_D0I3 = 3
+};
+
+enum pmu_mrfl_nc_device_name {
+	GFXSLC = 0,
+	GSDKCK,
+	GRSCD,
+	VED,
+	VEC,
+	DPA,
+	DPB,
+	DPC,
+	VSP,
+	ISP,
+	MIO,
+	HDMIO,
+	GFXSLCLDO
+};
+
+
+struct pmu_ss_states {
+	unsigned long pmu1_states;
+	unsigned long pmu2_states[4];
+};
+
+struct pci_dev_info {
+	u8 ss_pos;
+	u8 ss_idx;
+	u8 pmu_num;
+
+	u32 log_id;
+	u32 cap;
+	struct pci_dev *drv[PMU_MAX_LSS_SHARE];
+	pci_power_t power_state[PMU_MAX_LSS_SHARE];
+};
+
+struct pmu_wake_ss_states {
+	unsigned long wake_enable[2];
+	unsigned long pmu1_wake_states;
+	unsigned long pmu2_wake_states[4];
+};
+
+struct pmu_suspend_config {
+	struct pmu_ss_states ss_state;
+	struct pmu_wake_ss_states wake_state;
+};
+
+struct pci_dev_index {
+	struct pci_dev	*pdev;
+	u8		index;
+};
+
+/* PMU register interface */
+struct mrst_pmu_reg {
+	u32 pm_sts;             /* 0x00 */
+	u32 pm_cmd;             /* 0x04 */
+	u32 pm_ics;             /* 0x08 */
+	u32 _resv1;
+	u32 pm_wkc[2];          /* 0x10 */
+	u32 pm_wks[2];          /* 0x18 */
+	u32 pm_ssc[4];          /* 0x20 */
+	u32 pm_sss[4];          /* 0x30 */
+	u32 pm_wssc[4];         /* 0x40 */
+	u32 pm_c3c4;            /* 0x50 */
+	u32 pm_c5c6;            /* 0x54 */
+	u32 pm_msic;            /* 0x58 */
+};
+
+struct mid_pmu_cmd_log {
+	struct timespec ts;
+	u32 command;
+	struct pmu_ss_states pm_ssc;
+};
+
+struct mid_pmu_irq_log {
+	struct timespec ts;
+	u32 status;
+};
+
+struct mid_pmu_ipc_log {
+	struct timespec ts;
+	u32 command;
+};
+
+struct mid_pmu_pmu_irq_log {
+	struct timespec ts;
+	u8 status;
+};
+
+struct mid_pmu_ipc_irq_log {
+	struct timespec ts;
+};
+
+union pmu_pm_status {
+	struct {
+		u32 pmu_rev:8;
+		u32 pmu_busy:1;
+		u32 mode_id:4;
+		u32 Reserved:19;
+	} pmu_status_parts;
+	u32 pmu_status_value;
+};
+
+union pmu_pm_ics {
+	struct {
+		u32 int_status:8;
+		u32 int_enable:1;
+		u32 int_pend:1;
+		/* New bit added in TNG to indicate device wakes*/
+		u32 sw_int_status:1;
+		u32 reserved:21;
+	} pmu_pm_ics_parts;
+	u32 pmu_pm_ics_value;
+};
+
+struct intel_mid_base_addr {
+	u32 *pm_table_base;
+	u32 __iomem *offload_reg;
+};
+
+#define MAX_PMU_LOG_STATES	(S0I3_STATE_IDX - C4_STATE_IDX + 1)
+
+struct mid_pmu_stats {
+	u64 err_count[3];
+	u64 count;
+	u64 time;
+	u64 last_entry;
+	u64 last_try;
+	u64 first_entry;
+	u32 demote_count[MAX_PMU_LOG_STATES];
+	u32 display_blocker_count;
+	u32 camera_blocker_count;
+	u32 blocker_count[MAX_LSS_POSSIBLE];
+};
+
+struct device_residency {
+	u64 d0i0_entry;
+	u64 d0i3_entry;
+	u64 d0i0_acc;
+	u64 d0i3_acc;
+	u64 start;
+	pci_power_t state;
+};
+
+struct mid_pmu_dev {
+	bool suspend_started;
+	bool shutdown_started;
+	bool camera_off;
+	bool display_off;
+
+	u32 apm_base;
+	u32 ospm_base;
+	u32 pmu1_max_devs;
+	u32 pmu2_max_devs;
+	u32 ss_per_reg;
+	u32 d0ix_stat[MAX_LSS_POSSIBLE][SS_STATE_D0I3+1];
+	u32 num_wakes[MAX_DEVICES][SYS_STATE_MAX];
+	u32 ignore_lss[4];
+	u32 os_sss[4];
+#ifdef CONFIG_PM_DEBUG
+	u32 cstate_ignore;
+	struct pm_qos_request *cstate_qos;
+#endif
+
+	u32 __iomem *emergency_emmc_up_addr;
+	u64 pmu_init_time;
+
+	u32 d0i0_count[MAX_LSS_POSSIBLE];
+	u64 d0i0_prev_time[MAX_LSS_POSSIBLE];
+	u64 d0i0_time[MAX_LSS_POSSIBLE];
+
+	u32 nc_d0i0_count[OSPM_MAX_POWER_ISLANDS];
+	u64 nc_d0i0_time[OSPM_MAX_POWER_ISLANDS];
+	u64 nc_d0i0_prev_time[OSPM_MAX_POWER_ISLANDS];
+
+	int cmd_error_int;
+	int s0ix_possible;
+	int s0ix_entered;
+
+#ifdef LOG_PMU_EVENTS
+	int cmd_log_idx;
+	int ipc_log_idx;
+	int ipc_irq_log_idx;
+	int pmu_irq_log_idx;
+#endif
+
+	enum sys_state  pmu_current_state;
+
+	struct pci_dev_info pci_devs[MAX_DEVICES];
+	struct pci_dev_index
+		pci_dev_hash[MID_PCI_INDEX_HASH_SIZE];
+	struct intel_mid_base_addr base_addr;
+	struct mrst_pmu_reg	__iomem *pmu_reg;
+	struct semaphore scu_ready_sem;
+	struct mid_pmu_stats pmu_stats[SYS_STATE_MAX];
+	struct device_residency pmu_dev_res[MAX_DEVICES];
+	struct delayed_work log_work;
+	struct pm_qos_request *s3_restrict_qos;
+
+#ifdef LOG_PMU_EVENTS
+	struct mid_pmu_cmd_log cmd_log[LOG_SIZE];
+	struct mid_pmu_ipc_log ipc_log[LOG_SIZE];
+	struct mid_pmu_ipc_irq_log ipc_irq_log[LOG_SIZE];
+	struct mid_pmu_pmu_irq_log pmu_irq_log[LOG_SIZE];
+#endif
+	struct wakeup_source *pmu_wake_lock;
+
+	struct pmu_suspend_config *ss_config;
+	struct pci_dev *pmu_dev;
+	struct pm_qos_request *nc_restrict_qos;
+
+	spinlock_t nc_ready_lock;
+
+	int s3_hint;
+};
+
+struct platform_pmu_ops {
+	int (*init)(void);
+	void (*prepare)(int);
+	bool (*enter)(int);
+	void (*wakeup)(void);
+	void (*remove)(void);
+	pci_power_t (*pci_choose_state) (int);
+	void (*set_power_state_ops) (int);
+	void (*set_s0ix_complete) (void);
+	void (*set_s0i1_disp_vote) (bool);
+	int (*nc_set_power_state) (int, int, int, int *);
+	bool (*check_nc_sc_status) (void);
+};
+
+extern char s0ix[5];
+extern struct platform_pmu_ops mfld_pmu_ops;
+extern struct platform_pmu_ops clv_pmu_ops;
+extern struct platform_pmu_ops mrfld_pmu_ops;
+extern struct platform_pmu_ops *get_platform_ops(void);
+extern void mfld_s0ix_sram_save_cleanup(void);
+extern void pmu_stats_init(void);
+extern void pmu_s3_stats_update(int enter);
+extern void pmu_stats_finish(void);
+extern void mfld_s0ix_sram_restore(u32 s0ix);
+extern void pmu_stat_error(u8 err_type);
+extern void pmu_stat_end(void);
+extern void pmu_stat_start(enum sys_state type);
+extern int pmu_pci_to_indexes(struct pci_dev *pdev, int *index,
+				int *pmu_num, int *ss_idx, int *ss_pos);
+extern struct mid_pmu_dev *mid_pmu_cxt;
+extern void platform_set_pmu_ops(void);
+extern void pmu_read_sss(struct pmu_ss_states *pm_ssc);
+extern int pmu_issue_interactive_command(struct pmu_ss_states *pm_ssc,
+				bool ioc, bool d3_cold);
+extern int _pmu2_wait_not_busy(void);
+extern u32 get_s0ix_val_set_pm_ssc(int);
+extern int pmu_get_wake_source(void);
+extern bool pmu_initialized;
+extern struct platform_pmu_ops *pmu_ops;
+extern void platform_update_all_lss_states(struct pmu_ss_states *, int *);
+extern int set_extended_cstate_mode(const char *val, struct kernel_param *kp);
+extern int get_extended_cstate_mode(char *buffer, struct kernel_param *kp);
+extern int byt_pmu_nc_set_power_state(int islands, int state_type, int reg);
+extern int byt_pmu_nc_get_power_state(int islands, int reg);
+extern void pmu_set_interrupt_enable(void);
+extern void pmu_clear_interrupt_enable(void);
+
+#ifdef LOG_PMU_EVENTS
+extern void pmu_log_pmu_irq(int status);
+extern void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc);
+extern void pmu_dump_logs(void);
+#endif
+
+/* Accessor function for pci_devs start */
+static inline void pmu_stat_clear(void)
+{
+	mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+}
+
+static inline struct pci_dev *get_mid_pci_drv(int lss_index, int i)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].drv[i];
+}
+
+static inline pci_power_t get_mid_pci_power_state(int lss_index, int i)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].power_state[i];
+}
+
+static inline u8 get_mid_pci_ss_idx(int lss_index)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].ss_idx & SS_IDX_MASK;
+}
+
+static inline u8 get_mid_pci_ss_pos(int lss_index)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].ss_pos & SS_POS_MASK;
+}
+
+static inline u8 get_mid_pci_pmu_num(int lss_index)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].pmu_num;
+}
+
+static inline void set_mid_pci_drv(int lss_index,
+					int i, struct pci_dev *pdev)
+{
+	mid_pmu_cxt->pci_devs[lss_index].drv[i] = pdev;
+}
+
+static inline void set_mid_pci_power_state(int lss_index,
+					int i, pci_power_t state)
+{
+	mid_pmu_cxt->pci_devs[lss_index].power_state[i] = state;
+}
+
+static inline void set_mid_pci_ss_idx(int lss_index, u8 ss_idx)
+{
+	mid_pmu_cxt->pci_devs[lss_index].ss_idx = ss_idx;
+}
+
+static inline void set_mid_pci_ss_pos(int lss_index, u8 ss_pos)
+{
+	mid_pmu_cxt->pci_devs[lss_index].ss_pos = ss_pos;
+}
+
+static inline void set_mid_pci_pmu_num(int lss_index, u8 pmu_num)
+{
+	mid_pmu_cxt->pci_devs[lss_index].pmu_num = pmu_num;
+}
+
+static inline void set_mid_pci_log_id(int lss_index, u32 log_id)
+{
+	mid_pmu_cxt->pci_devs[lss_index].log_id = log_id;
+}
+
+static inline void set_mid_pci_cap(int lss_index, u32 cap)
+{
+	mid_pmu_cxt->pci_devs[lss_index].cap = cap;
+}
+
+static inline u32 get_d0ix_stat(int lss_index, int state)
+{
+	return mid_pmu_cxt->d0ix_stat[lss_index][state];
+}
+
+static inline void inc_d0ix_stat(int lss_index, int state)
+{
+	mid_pmu_cxt->d0ix_stat[lss_index][state]++;
+}
+
+static inline void clear_d0ix_stats(void)
+{
+	memset(mid_pmu_cxt->d0ix_stat, 0, sizeof(mid_pmu_cxt->d0ix_stat));
+}
+
+/* Accessor functions for pci_devs end */
+
+static inline bool nc_device_state(void)
+{
+	return !mid_pmu_cxt->display_off || !mid_pmu_cxt->camera_off;
+}
+
+#ifdef CONFIG_X86_INTEL_OSC_CLK
+extern int ccu_osc_clk_init(void __iomem *ccubase);
+extern int ccu_osc_clk_uninit(void);
+#else
+static inline int ccu_osc_clk_init(void __iomem *ccubase) {return 0; }
+static inline int ccu_osc_clk_uninit(void) {return 0; }
+#endif /* CONFIG_X86_INTEL_OSC_CLK */
+#endif
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
new file mode 100644
index 0000000..7eaa8b9
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mrfl.c
@@ -0,0 +1,222 @@
+/*
+ * mrfl.c: Intel Merrifield platform specific setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Mark F. Brown <mark.f.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/power/battery_id.h>
+#include <asm/setup.h>
+#include <asm/intel-mid.h>
+#include <asm/processor.h>
+
+#define APIC_DIVISOR 16
+#define MRFL_I2_TERM_MA 120
+
+static void (*intel_mid_timer_init)(void);
+static struct ps_pse_mod_prof *battery_chrg_profile;
+static struct ps_batt_chg_prof *ps_batt_chrg_profile;
+
+static void tangier_arch_setup(void);
+
+/* tangier arch ops */
+static struct intel_mid_ops tangier_ops = {
+	.arch_setup = tangier_arch_setup,
+};
+
+static unsigned long __init tangier_calibrate_tsc(void)
+{
+	/* [REVERT ME] fast timer calibration method to be defined */
+	unsigned long fast_calibrate;
+	u32 lo, hi, ratio, fsb, bus_freq;
+
+	/* *********************** */
+	/* Compute TSC:Ratio * FSB */
+	/* *********************** */
+
+	/* Compute Ratio */
+	rdmsr(MSR_PLATFORM_INFO, lo, hi);
+	pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
+
+	ratio = (lo >> 8) & 0xFF;
+	pr_debug("ratio is %d\n", ratio);
+	if (!ratio) {
+		pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
+		ratio = 4;
+	}
+
+	/* Compute FSB */
+	rdmsr(MSR_FSB_FREQ, lo, hi);
+	pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
+		hi, lo);
+
+	bus_freq = lo & 0x7;
+	pr_debug("bus_freq = 0x%x\n", bus_freq);
+
+	if (bus_freq == 0)
+		fsb = FSB_FREQ_83SKU;
+	else if (bus_freq == 1)
+		fsb = FSB_FREQ_100SKU;
+	else if (bus_freq == 2)
+		fsb = FSB_FREQ_133SKU;
+	else if (bus_freq == 3)
+		fsb = FSB_FREQ_167SKU;
+	else if (bus_freq == 4)
+		fsb = FSB_FREQ_83SKU;
+	else if (bus_freq == 5)
+		fsb = FSB_FREQ_400SKU;
+	else if (bus_freq == 6)
+		fsb = FSB_FREQ_267SKU;
+	else if (bus_freq == 7)
+		fsb = FSB_FREQ_333SKU;
+	else {
+		BUG();
+		pr_err("Invalid bus_freq! Setting to minimal value!\n");
+		fsb = FSB_FREQ_100SKU;
+	}
+
+	/* TSC = FSB Freq * Resolved HFM Ratio */
+	fast_calibrate = ratio * fsb;
+	pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
+
+	/* ************************************ */
+	/* Calculate Local APIC Timer Frequency */
+	/* ************************************ */
+	lapic_timer_frequency = (fsb * 1000) / HZ;
+
+	pr_debug("Setting lapic_timer_frequency = %d\n",
+		lapic_timer_frequency);
+
+	/* mark tsc clocksource as reliable */
+	set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+	if (fast_calibrate)
+		return fast_calibrate;
+	return 0;
+}
+
+static void __init tangier_time_init(void)
+{
+	if (intel_mid_timer_init)
+		intel_mid_timer_init();
+}
+
+static void __init tangier_arch_setup(void)
+{
+	x86_platform.calibrate_tsc = tangier_calibrate_tsc;
+	intel_mid_timer_init = x86_init.timers.timer_init;
+	x86_init.timers.timer_init = tangier_time_init;
+}
+
+static void set_batt_chrg_prof(struct ps_pse_mod_prof *batt_prof,
+				struct ps_pse_mod_prof *pentry)
+{
+	int i, j;
+
+	if (batt_prof == NULL || pentry == NULL) {
+		pr_err("%s: Invalid Pointer\n", __func__);
+		return;
+	}
+
+	memcpy(batt_prof->batt_id, pentry->batt_id, BATTID_STR_LEN);
+	batt_prof->battery_type = pentry->battery_type;
+	batt_prof->capacity = pentry->capacity;
+	batt_prof->voltage_max = pentry->voltage_max;
+	if ((pentry->batt_id[0] == 'I') && (pentry->batt_id[1] == '2'))
+		batt_prof->chrg_term_ma = MRFL_I2_TERM_MA;
+	else
+		batt_prof->chrg_term_ma = pentry->chrg_term_ma;
+
+	batt_prof->low_batt_mV = pentry->low_batt_mV;
+	batt_prof->disch_tmp_ul = pentry->disch_tmp_ul;
+	batt_prof->disch_tmp_ll = pentry->disch_tmp_ll;
+	batt_prof->temp_low_lim = pentry->temp_low_lim;
+
+	for (i = 0, j = 0; i < pentry->temp_mon_ranges; i++) {
+		if (pentry->temp_mon_range[i].temp_up_lim != 0xff) {
+			memcpy(&batt_prof->temp_mon_range[j],
+			       &pentry->temp_mon_range[i],
+			       sizeof(struct ps_temp_chg_table));
+			j++;
+		}
+	}
+	batt_prof->temp_mon_ranges = j;
+	return;
+}
+
+static int __init mrfl_platform_init(void)
+{
+	struct sfi_table_simple *sb;
+	struct ps_pse_mod_prof *pentry;
+	int totentrs = 0, totlen = 0;
+	struct sfi_table_header *table;
+
+	table = get_oem0_table();
+
+	if (!table)
+		return 0;
+
+	sb = (struct sfi_table_simple *)table;
+	totentrs = SFI_GET_NUM_ENTRIES(sb, struct ps_pse_mod_prof);
+	if (totentrs) {
+		battery_chrg_profile = kzalloc(
+				sizeof(*battery_chrg_profile), GFP_KERNEL);
+		if (!battery_chrg_profile) {
+			pr_err("%s(): Error in kzalloc\n", __func__);
+			return -ENOMEM;
+		}
+		pentry = (struct ps_pse_mod_prof *)sb->pentry;
+		totlen = totentrs * sizeof(*pentry);
+		if (totlen <= sizeof(*battery_chrg_profile)) {
+			set_batt_chrg_prof(battery_chrg_profile, pentry);
+			ps_batt_chrg_profile = kzalloc(
+					sizeof(*ps_batt_chrg_profile),
+					GFP_KERNEL);
+			if (!ps_batt_chrg_profile) {
+				pr_err("%s(): Error in kzalloc\n", __func__);
+				kfree(battery_chrg_profile);
+				return -ENOMEM;
+			}
+			ps_batt_chrg_profile->chrg_prof_type =
+				PSE_MOD_CHRG_PROF;
+			ps_batt_chrg_profile->batt_prof = battery_chrg_profile;
+#ifdef CONFIG_POWER_SUPPLY_BATTID
+			battery_prop_changed(POWER_SUPPLY_BATTERY_INSERTED,
+					     ps_batt_chrg_profile);
+#endif
+		} else {
+			pr_err("%s: Error in copying batt charge profile\n",
+				__func__);
+			kfree(battery_chrg_profile);
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("%s: Error in finding batt charge profile\n",
+			__func__);
+	}
+
+	return 0;
+}
+arch_initcall_sync(mrfl_platform_init);
+
+void *get_tangier_ops(void)
+{
+	return &tangier_ops;
+}
+
+/* piggy back on anniedale ops right now */
+void *get_anniedale_ops(void)
+{
+	return &tangier_ops;
+}
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 8869287..9cac825 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@
 		   -march=i386 -mregparm=3 \
 		   -include $(srctree)/$(src)/../../boot/code16gcc.h \
 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+		   -mno-mmx -mno-sse \
 		   $(call cc-option, -ffreestanding) \
 		   $(call cc-option, -fno-toplevel-reorder,\
-			$(call cc-option, -fno-unit-at-a-time)) \
+		   $(call cc-option, -fno-unit-at-a-time)) \
 		   $(call cc-option, -fno-stack-protector) \
 		   $(call cc-option, -mpreferred-stack-boundary=2)
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index c74436e..0611adc 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -60,7 +60,11 @@
 
 static notrace cycle_t vread_hpet(void)
 {
-	return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
+	/*
+	 * This is a upstream bug, we should only use HPET_COUNTER
+	 * when CONFIG_HPET_TIMER==y
+	 */
+	return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
 }
 
 #ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c..0a9fb7a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -313,6 +313,17 @@
 	e820_add_region(start, end - start, type);
 }
 
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+	struct e820entry *entry;
+	unsigned int i;
+
+	for (i = 0, entry = list; i < map_size; i++, entry++) {
+		if (entry->type == E820_UNUSABLE)
+			entry->type = E820_RAM;
+	}
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
@@ -353,6 +364,17 @@
 	}
 	BUG_ON(rc);
 
+	/*
+	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+	 * regions, so if we're using the machine memory map leave the
+	 * region as RAM as it is in the pseudo-physical map.
+	 *
+	 * UNUSABLE regions in domUs are not handled and will need
+	 * a patch in the future.
+	 */
+	if (xen_initial_domain())
+		xen_ignore_unusable(map, memmap.nr_entries);
+
 	/* Make sure the Xen-supplied memory map is well-ordered. */
 	sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
 
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d99cae8..a1e58e1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -667,8 +667,15 @@
 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
 	int rc;
-	rc = native_cpu_up(cpu, tidle);
-	WARN_ON (xen_smp_intr_init(cpu));
+	/*
+	 * xen_smp_intr_init() needs to run before native_cpu_up()
+	 * so that IPI vectors are set up on the booting CPU before
+	 * it is marked online in native_cpu_up().
+	*/
+	rc = xen_smp_intr_init(cpu);
+	WARN_ON(rc);
+	if (!rc)
+		rc =  native_cpu_up(cpu, tidle);
 	return rc;
 }
 
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 3d88bfd..13e8935 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -36,9 +36,8 @@
 /* snapshots of runstate info */
 static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
 
-/* unused ns of stolen and blocked time */
+/* unused ns of stolen time */
 static DEFINE_PER_CPU(u64, xen_residual_stolen);
-static DEFINE_PER_CPU(u64, xen_residual_blocked);
 
 /* return an consistent snapshot of 64-bit time/counter value */
 static u64 get64(const u64 *p)
@@ -115,7 +114,7 @@
 {
 	struct vcpu_runstate_info state;
 	struct vcpu_runstate_info *snap;
-	s64 blocked, runnable, offline, stolen;
+	s64 runnable, offline, stolen;
 	cputime_t ticks;
 
 	get_runstate_snapshot(&state);
@@ -125,7 +124,6 @@
 	snap = &__get_cpu_var(xen_runstate_snapshot);
 
 	/* work out how much time the VCPU has not been runn*ing*  */
-	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
 	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
 	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
 
@@ -141,17 +139,6 @@
 	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
 	__this_cpu_write(xen_residual_stolen, stolen);
 	account_steal_ticks(ticks);
-
-	/* Add the appropriate number of ticks of blocked time,
-	   including any left-overs from last time. */
-	blocked += __this_cpu_read(xen_residual_blocked);
-
-	if (blocked < 0)
-		blocked = 0;
-
-	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
-	__this_cpu_write(xen_residual_blocked, blocked);
-	account_idle_ticks(ticks);
 }
 
 /* Get the TSC speed from Xen */
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index ef12c0e..7d740eb 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -68,6 +68,15 @@
 
 #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
 	initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+	rsr	a2, excsave1
+	movi	a3, 0x08000000
+	bgeu	a2, a3, 1f
+	movi	a3, 0xd0000000
+	add	a2, a2, a3
+	wsr	a2, excsave1
+1:
+#endif
 #endif
 	.end	no-absolute-literals
 
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 6dd25ec..14c6c3a 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -152,8 +152,8 @@
 {
 	meminfo_t* mi;
 	mi = (meminfo_t*)(tag->data);
-	initrd_start = (void*)(mi->start);
-	initrd_end = (void*)(mi->end);
+	initrd_start = __va(mi->start);
+	initrd_end = __va(mi->end);
 
 	return 0;
 }
@@ -164,7 +164,7 @@
 
 static int __init parse_tag_fdt(const bp_tag_t *tag)
 {
-	dtb_start = (void *)(tag->data[0]);
+	dtb_start = __va(tag->data[0]);
 	return 0;
 }
 
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 718eca1..98b67d5 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -341,7 +341,7 @@
 
 	sp = regs->areg[1];
 
-	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
 		sp = current->sas_ss_sp + current->sas_ss_size;
 	}
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d5cd3131..c410752 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1803,7 +1803,7 @@
 
 	if (samples) {
 		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
-		do_div(v, samples);
+		v = div64_u64(v, samples);
 	}
 	__blkg_prfill_u64(sf, pd, v);
 	return 0;
@@ -4347,18 +4347,28 @@
 	kfree(cfqd);
 }
 
-static int cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
 {
 	struct cfq_data *cfqd;
 	struct blkcg_gq *blkg __maybe_unused;
 	int i, ret;
+	struct elevator_queue *eq;
 
-	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
-	if (!cfqd)
+	eq = elevator_alloc(q, e);
+	if (!eq)
 		return -ENOMEM;
 
+	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+	if (!cfqd) {
+		kobject_put(&eq->kobj);
+		return -ENOMEM;
+	}
+	eq->elevator_data = cfqd;
+
 	cfqd->queue = q;
-	q->elevator->elevator_data = cfqd;
+	spin_lock_irq(q->queue_lock);
+	q->elevator = eq;
+	spin_unlock_irq(q->queue_lock);
 
 	/* Init root service tree */
 	cfqd->grp_service_tree = CFQ_RB_ROOT;
@@ -4433,6 +4443,7 @@
 
 out_free:
 	kfree(cfqd);
+	kobject_put(&eq->kobj);
 	return ret;
 }
 
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index ba19a3a..20614a3 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -337,13 +337,21 @@
 /*
  * initialize elevator private data (deadline_data).
  */
-static int deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
 {
 	struct deadline_data *dd;
+	struct elevator_queue *eq;
+
+	eq = elevator_alloc(q, e);
+	if (!eq)
+		return -ENOMEM;
 
 	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
-	if (!dd)
+	if (!dd) {
+		kobject_put(&eq->kobj);
 		return -ENOMEM;
+	}
+	eq->elevator_data = dd;
 
 	INIT_LIST_HEAD(&dd->fifo_list[READ]);
 	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -355,7 +363,9 @@
 	dd->front_merges = 1;
 	dd->fifo_batch = fifo_batch;
 
-	q->elevator->elevator_data = dd;
+	spin_lock_irq(q->queue_lock);
+	q->elevator = eq;
+	spin_unlock_irq(q->queue_lock);
 	return 0;
 }
 
diff --git a/block/elevator.c b/block/elevator.c
index eba5b04..668394d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -150,7 +150,7 @@
 
 static struct kobj_type elv_ktype;
 
-static struct elevator_queue *elevator_alloc(struct request_queue *q,
+struct elevator_queue *elevator_alloc(struct request_queue *q,
 				  struct elevator_type *e)
 {
 	struct elevator_queue *eq;
@@ -170,6 +170,7 @@
 	elevator_put(e);
 	return NULL;
 }
+EXPORT_SYMBOL(elevator_alloc);
 
 static void elevator_release(struct kobject *kobj)
 {
@@ -221,16 +222,7 @@
 		}
 	}
 
-	q->elevator = elevator_alloc(q, e);
-	if (!q->elevator)
-		return -ENOMEM;
-
-	err = e->ops.elevator_init_fn(q);
-	if (err) {
-		kobject_put(&q->elevator->kobj);
-		return err;
-	}
-
+	err = e->ops.elevator_init_fn(q, e);
 	return 0;
 }
 EXPORT_SYMBOL(elevator_init);
@@ -935,17 +927,10 @@
 	spin_unlock_irq(q->queue_lock);
 
 	/* allocate, init and register new elevator */
-	err = -ENOMEM;
-	q->elevator = elevator_alloc(q, new_e);
-	if (!q->elevator)
+	err = new_e->ops.elevator_init_fn(q, new_e);
+	if (err)
 		goto fail_init;
 
-	err = new_e->ops.elevator_init_fn(q);
-	if (err) {
-		kobject_put(&q->elevator->kobj);
-		goto fail_init;
-	}
-
 	if (registered) {
 		err = elv_register_queue(q);
 		if (err)
diff --git a/block/genhd.c b/block/genhd.c
index a534eaf..6f612a7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -512,7 +512,7 @@
 
 	ddev->parent = disk->driverfs_dev;
 
-	dev_set_name(ddev, disk->disk_name);
+	dev_set_name(ddev, "%s", disk->disk_name);
 
 	/* delay uevents, until we scanned partition table */
 	dev_set_uevent_suppress(ddev, 1);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 5d1bf70..3de89d4 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,16 +59,27 @@
 	return list_entry(rq->queuelist.next, struct request, queuelist);
 }
 
-static int noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
 {
 	struct noop_data *nd;
+	struct elevator_queue *eq;
 
-	nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
-	if (!nd)
+	eq = elevator_alloc(q, e);
+	if (!eq)
 		return -ENOMEM;
 
+	nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
+	if (!nd) {
+		kobject_put(&eq->kobj);
+		return -ENOMEM;
+	}
+	eq->elevator_data = nd;
+
 	INIT_LIST_HEAD(&nd->queue);
-	q->elevator->elevator_data = nd;
+
+	spin_lock_irq(q->queue_lock);
+	q->elevator = eq;
+	spin_unlock_irq(q->queue_lock);
 	return 0;
 }
 
diff --git a/build.config b/build.config
new file mode 100644
index 0000000..e1127ee
--- /dev/null
+++ b/build.config
@@ -0,0 +1,12 @@
+ARCH=x86_64
+BRANCH=android-x86_64-fugu-3.10
+CROSS_COMPILE=x86_64-linux-android-
+DEFCONFIG=fugu_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=private/x86_64-asus
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.8/bin
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+"
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 6149a6e..7a1ae87 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -495,7 +495,8 @@
 
 struct crypto_template *crypto_lookup_template(const char *name)
 {
-	return try_then_request_module(__crypto_lookup_template(name), name);
+	return try_then_request_module(__crypto_lookup_template(name), "%s",
+				       name);
 }
 EXPORT_SYMBOL_GPL(crypto_lookup_template);
 
diff --git a/crypto/api.c b/crypto/api.c
index 3b61803..37c4c72 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -34,6 +34,8 @@
 BLOCKING_NOTIFIER_HEAD(crypto_chain);
 EXPORT_SYMBOL_GPL(crypto_chain);
 
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+
 struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
 {
 	return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
@@ -144,8 +146,11 @@
 	}
 	up_write(&crypto_alg_sem);
 
-	if (alg != &larval->alg)
+	if (alg != &larval->alg) {
 		kfree(larval);
+		if (crypto_is_larval(alg))
+			alg = crypto_larval_wait(alg);
+	}
 
 	return alg;
 }
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 06007f0..0cd9f45 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -154,8 +154,8 @@
 		     (now.tm_sec < cert->valid_from.tm_sec
 		      ))))))))))) {
 		pr_warn("Cert %s is not yet valid\n", cert->fingerprint);
-		ret = -EKEYREJECTED;
-		goto error_free_cert;
+		/* ret = -EKEYREJECTED;
+		 * goto error_free_cert; */
 	}
 	if (now.tm_year > cert->valid_to.tm_year ||
 	    (now.tm_year == cert->valid_to.tm_year &&
diff --git a/drivers/Kconfig b/drivers/Kconfig
index f1a9506..f9c78f2 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -168,6 +168,8 @@
 
 source "drivers/reset/Kconfig"
 
+source "drivers/external_drivers/Kconfig"
+
 source "drivers/android/Kconfig"
 
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index fe5799a..734b8af 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -153,4 +153,5 @@
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_IPACK_BUS)		+= ipack/
 obj-$(CONFIG_NTB)		+= ntb/
+obj-$(CONFIG_EXTERNAL_DRIVERS)	+= external_drivers/
 obj-$(CONFIG_ANDROID)		+= android/
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 536562c..97c949a 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -43,6 +43,7 @@
 acpi-y				+= power.o
 acpi-y				+= event.o
 acpi-y				+= sysfs.o
+acpi-$(CONFIG_X86)		+= acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)		+= debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)	+= numa.o
 acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
new file mode 100644
index 0000000..84190ed
--- /dev/null
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -0,0 +1,92 @@
+/*
+ * ACPI support for CMOS RTC Address Space access
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Lan Tianyu <tianyu.lan@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm-generic/rtc.h>
+
+#include "internal.h"
+
+#define PREFIX "ACPI: "
+
+ACPI_MODULE_NAME("cmos rtc");
+
+static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
+	{ "PNP0B00" },
+	{ "PNP0B01" },
+	{ "PNP0B02" },
+	{}
+};
+
+static acpi_status
+acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
+		      u32 bits, u64 *value64,
+		      void *handler_context, void *region_context)
+{
+	int i;
+	u8 *value = (u8 *)&value64;
+
+	if (address > 0xff || !value64)
+		return AE_BAD_PARAMETER;
+
+	if (function != ACPI_WRITE && function != ACPI_READ)
+		return AE_BAD_PARAMETER;
+
+	spin_lock_irq(&rtc_lock);
+
+	for (i = 0; i < DIV_ROUND_UP(bits, 8); ++i, ++address, ++value)
+		if (function == ACPI_READ)
+			*value = CMOS_READ(address);
+		else
+			CMOS_WRITE(*value, address);
+
+	spin_unlock_irq(&rtc_lock);
+
+	return AE_OK;
+}
+
+static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev,
+		const struct acpi_device_id *id)
+{
+	acpi_status status;
+
+	status = acpi_install_address_space_handler(adev->handle,
+			ACPI_ADR_SPACE_CMOS,
+			&acpi_cmos_rtc_space_handler,
+			NULL, NULL);
+	if (ACPI_FAILURE(status)) {
+		pr_err(PREFIX "Error installing CMOS-RTC region handler\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev)
+{
+	if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle,
+			ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler)))
+		pr_err(PREFIX "Error removing CMOS-RTC region handler\n");
+}
+
+static struct acpi_scan_handler cmos_rtc_handler = {
+	.ids = acpi_cmos_rtc_ids,
+	.attach = acpi_install_cmos_rtc_space_handler,
+	.detach = acpi_remove_cmos_rtc_space_handler,
+};
+
+void __init acpi_cmos_rtc_init(void)
+{
+	acpi_scan_add_handler(&cmos_rtc_handler);
+}
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef..a6977e1 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
 #include <linux/ipmi.h>
 #include <linux/device.h>
 #include <linux/pnp.h>
+#include <linux/spinlock.h>
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@
 	struct list_head head;
 	/* the IPMI request message list */
 	struct list_head tx_msg_list;
-	struct mutex	tx_msg_lock;
+	spinlock_t	tx_msg_lock;
 	acpi_handle handle;
 	struct pnp_dev *pnp_dev;
 	ipmi_user_t	user_interface;
@@ -147,6 +148,7 @@
 	struct kernel_ipmi_msg *msg;
 	struct acpi_ipmi_buffer *buffer;
 	struct acpi_ipmi_device *device;
+	unsigned long flags;
 
 	msg = &tx_msg->tx_message;
 	/*
@@ -177,10 +179,10 @@
 
 	/* Get the msgid */
 	device = tx_msg->device;
-	mutex_lock(&device->tx_msg_lock);
+	spin_lock_irqsave(&device->tx_msg_lock, flags);
 	device->curr_msgid++;
 	tx_msg->tx_msgid = device->curr_msgid;
-	mutex_unlock(&device->tx_msg_lock);
+	spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 }
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@
 	int msg_found = 0;
 	struct acpi_ipmi_msg *tx_msg;
 	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+	unsigned long flags;
 
 	if (msg->user != ipmi_device->user_interface) {
 		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@
 		ipmi_free_recv_msg(msg);
 		return;
 	}
-	mutex_lock(&ipmi_device->tx_msg_lock);
+	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
 		if (msg->msgid == tx_msg->tx_msgid) {
 			msg_found = 1;
@@ -258,7 +261,7 @@
 		}
 	}
 
-	mutex_unlock(&ipmi_device->tx_msg_lock);
+	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 	if (!msg_found) {
 		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
 			"returned.\n", msg->msgid);
@@ -378,6 +381,7 @@
 	struct acpi_ipmi_device *ipmi_device = handler_context;
 	int err, rem_time;
 	acpi_status status;
+	unsigned long flags;
 	/*
 	 * IPMI opregion message.
 	 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@
 		return AE_NO_MEMORY;
 
 	acpi_format_ipmi_msg(tx_msg, address, value);
-	mutex_lock(&ipmi_device->tx_msg_lock);
+	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
-	mutex_unlock(&ipmi_device->tx_msg_lock);
+	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 	err = ipmi_request_settime(ipmi_device->user_interface,
 					&tx_msg->addr,
 					tx_msg->tx_msgid,
@@ -413,9 +417,9 @@
 	status = AE_OK;
 
 end_label:
-	mutex_lock(&ipmi_device->tx_msg_lock);
+	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 	list_del(&tx_msg->head);
-	mutex_unlock(&ipmi_device->tx_msg_lock);
+	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 	kfree(tx_msg);
 	return status;
 }
@@ -457,7 +461,7 @@
 
 	INIT_LIST_HEAD(&ipmi_device->head);
 
-	mutex_init(&ipmi_device->tx_msg_lock);
+	spin_lock_init(&ipmi_device->tx_msg_lock);
 	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
 	ipmi_install_space_handler(ipmi_device);
 
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index cab13f2..7c451cb 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -155,12 +155,13 @@
 			pdata->mmio_size = resource_size(&rentry->res);
 			pdata->mmio_base = ioremap(rentry->res.start,
 						   pdata->mmio_size);
-			pdata->dev_desc = dev_desc;
 			break;
 		}
 
 	acpi_dev_free_resource_list(&resource_list);
 
+	pdata->dev_desc = dev_desc;
+
 	if (dev_desc->clk_required) {
 		ret = register_device_clock(adev, pdata);
 		if (ret) {
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 5e6301e..2cf0244 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -283,6 +283,7 @@
 	/* Get the range from the _CRS */
 	result = acpi_memory_get_device_resources(mem_device);
 	if (result) {
+		device->driver_data = NULL;
 		kfree(mem_device);
 		return result;
 	}
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6357e93..6c95bba 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -44,6 +44,8 @@
 #ifndef __ACHWARE_H__
 #define __ACHWARE_H__
 
+#include "aclocal.h"
+
 /* Values for the _SST predefined method */
 
 #define ACPI_SST_INDICATOR_OFF  0
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 35eebda..09b06e2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -240,12 +240,14 @@
 	    &acpi_sleep_dispatch[function_id];
 
 #if (!ACPI_REDUCED_HARDWARE)
-
 	/*
 	 * If the Hardware Reduced flag is set (from the FADT), we must
-	 * use the extended sleep registers
+	 * use the extended sleep registers (FADT). Note: As per the ACPI
+	 * specification, these extended registers are to be used for HW-reduced
+	 * platforms only. They are not general-purpose replacements for the
+	 * legacy PM register sleep support.
 	 */
-	if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
+	if (acpi_gbl_reduced_hardware) {
 		status = sleep_functions->extended_function(sleep_state);
 	} else {
 		/* Legacy sleep */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index e710045..9533271 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -117,6 +117,7 @@
 	struct acpi_device *device;
 	struct notifier_block pm_nb;
 	unsigned long update_time;
+	int revision;
 	int rate_now;
 	int capacity_now;
 	int voltage_now;
@@ -359,6 +360,7 @@
 };
 
 static struct acpi_offsets extended_info_offsets[] = {
+	{offsetof(struct acpi_battery, revision), 0},
 	{offsetof(struct acpi_battery, power_unit), 0},
 	{offsetof(struct acpi_battery, design_capacity), 0},
 	{offsetof(struct acpi_battery, full_charge_capacity), 0},
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 31c217a..553527c 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -324,14 +324,27 @@
 	if (result)
 		return result;
 
-	if (state == ACPI_STATE_UNKNOWN)
+	if (state == ACPI_STATE_UNKNOWN) {
 		state = ACPI_STATE_D0;
-
-	result = acpi_device_set_power(device, state);
-	if (!result && state_p)
+		result = acpi_device_set_power(device, state);
+		if (result)
+			return result;
+	} else {
+		if (device->power.flags.power_resources) {
+			/*
+			 * We don't need to really switch the state, bu we need
+			 * to update the power resources' reference counters.
+			 */
+			result = acpi_power_transition(device, state);
+			if (result)
+				return result;
+		}
+		device->power.state = state;
+	}
+	if (state_p)
 		*state_p = state;
 
-	return result;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(acpi_bus_update_power);
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index edc0081..45af90a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -983,6 +983,14 @@
 	ec_enlarge_storm_threshold, "CLEVO hardware", {
 	DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
 	DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+	{
+	ec_skip_dsdt_scan, "HP Folio 13", {
+	DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+	DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
+	{
+	ec_validate_ecdt, "ASUS hardware", {
+	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+	DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
 	{},
 };
 
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 40a84cc..2384120 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -78,32 +78,99 @@
 	return ret;
 }
 
-static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
-				      void *addr_p, void **ret_p)
+static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
+				  void *not_used, void **ret_p)
 {
-	unsigned long long addr;
-	acpi_status status;
+	struct acpi_device *adev = NULL;
 
-	status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
-	if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
+	acpi_bus_get_device(handle, &adev);
+	if (adev) {
 		*ret_p = handle;
 		return AE_CTRL_TERMINATE;
 	}
 	return AE_OK;
 }
 
-acpi_handle acpi_get_child(acpi_handle parent, u64 address)
+static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
 {
-	void *ret = NULL;
+	unsigned long long sta;
+	acpi_status status;
 
-	if (!parent)
-		return NULL;
+	status = acpi_bus_get_status_handle(handle, &sta);
+	if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+		return false;
 
-	acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
-			    do_acpi_find_child, &address, &ret);
-	return (acpi_handle)ret;
+	if (is_bridge) {
+		void *test = NULL;
+
+		/* Check if this object has at least one child device. */
+		acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+				    acpi_dev_present, NULL, NULL, &test);
+		return !!test;
+	}
+	return true;
 }
-EXPORT_SYMBOL(acpi_get_child);
+
+struct find_child_context {
+	u64 addr;
+	bool is_bridge;
+	acpi_handle ret;
+	bool ret_checked;
+};
+
+static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
+				 void *data, void **not_used)
+{
+	struct find_child_context *context = data;
+	unsigned long long addr;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
+	if (ACPI_FAILURE(status) || addr != context->addr)
+		return AE_OK;
+
+	if (!context->ret) {
+		/* This is the first matching object.  Save its handle. */
+		context->ret = handle;
+		return AE_OK;
+	}
+	/*
+	 * There is more than one matching object with the same _ADR value.
+	 * That really is unexpected, so we are kind of beyond the scope of the
+	 * spec here.  We have to choose which one to return, though.
+	 *
+	 * First, check if the previously found object is good enough and return
+	 * its handle if so.  Second, check the same for the object that we've
+	 * just found.
+	 */
+	if (!context->ret_checked) {
+		if (acpi_extra_checks_passed(context->ret, context->is_bridge))
+			return AE_CTRL_TERMINATE;
+		else
+			context->ret_checked = true;
+	}
+	if (acpi_extra_checks_passed(handle, context->is_bridge)) {
+		context->ret = handle;
+		return AE_CTRL_TERMINATE;
+	}
+	return AE_OK;
+}
+
+acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
+{
+	if (parent) {
+		struct find_child_context context = {
+			.addr = addr,
+			.is_bridge = is_bridge,
+		};
+
+		acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
+				    NULL, &context, NULL);
+		return context.ret;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_find_child);
 
 static int acpi_bind_one(struct device *dev, acpi_handle handle)
 {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index c610a76..63a0854 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -50,6 +50,11 @@
 #else
 static inline void acpi_memory_hotplug_init(void) {}
 #endif
+#ifdef CONFIG_X86
+void acpi_cmos_rtc_init(void);
+#else
+static inline void acpi_cmos_rtc_init(void) {}
+#endif
 
 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
 				    const char *name);
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index aa1227a..04a1378 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,6 +311,8 @@
 			   dev->pnp.bus_id,
 			   (u32) dev->wakeup.sleep_state);
 
+		mutex_lock(&dev->physical_node_lock);
+
 		if (!dev->physical_node_count) {
 			seq_printf(seq, "%c%-8s\n",
 				dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@
 				put_device(ldev);
 			}
 		}
+
+		mutex_unlock(&dev->physical_node_lock);
 	}
 	mutex_unlock(&acpi_device_lock);
 	return 0;
@@ -347,12 +351,16 @@
 {
 	struct acpi_device_physical_node *entry;
 
+	mutex_lock(&adev->physical_node_lock);
+
 	list_for_each_entry(entry,
 		&adev->physical_node_list, node)
 		if (entry->dev && device_can_wakeup(entry->dev)) {
 			bool enable = !device_may_wakeup(entry->dev);
 			device_set_wakeup_enable(entry->dev, enable);
 		}
+
+	mutex_unlock(&adev->physical_node_lock);
 }
 
 static ssize_t
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 27da630..af658b2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -237,10 +237,12 @@
 
 	mutex_lock(&acpi_scan_lock);
 
-	acpi_bus_get_device(handle, &device);
-	if (device) {
-		dev_warn(&device->dev, "Attempt to re-insert\n");
-		goto out;
+	if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
+		acpi_bus_get_device(handle, &device);
+		if (device) {
+			dev_warn(&device->dev, "Attempt to re-insert\n");
+			goto out;
+		}
 	}
 	acpi_evaluate_hotplug_ost(handle, ost_source,
 				  ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
@@ -1890,6 +1892,9 @@
 	if (acpi_bus_get_device(handle, &device))
 		return AE_CTRL_DEPTH;
 
+	if (device->handler)
+		return AE_OK;
+
 	ret = acpi_scan_attach_handler(device);
 	if (ret)
 		return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
@@ -2040,6 +2045,7 @@
 	acpi_pci_link_init();
 	acpi_platform_init();
 	acpi_lpss_init();
+	acpi_cmos_rtc_init();
 	acpi_container_init();
 	acpi_memory_hotplug_init();
 	acpi_dock_init();
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 440eadf..0e4b96b 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -450,6 +450,14 @@
 	},
 	{
 	 .callback = video_ignore_initial_backlight,
+	 .ident = "Fujitsu E753",
+	 .matches = {
+		DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
+		},
+	},
+	{
+	 .callback = video_ignore_initial_backlight,
 	 .ident = "HP Pavilion dm4",
 	 .matches = {
 		DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a5a3ebc..78eabff 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -107,7 +107,7 @@
 	  If unsure, say N.
 
 config SATA_INIC162X
-	tristate "Initio 162x SATA support"
+	tristate "Initio 162x SATA support (Very Experimental)"
 	depends on PCI
 	help
 	  This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2b50dfd..b112625 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -291,6 +291,7 @@
 	{ PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
 	{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
 
 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -310,6 +311,7 @@
 
 	/* AMD */
 	{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
+	{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
 	/* AMD is using RAID class only for ahci controllers */
 	{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
 	  PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 9a8a674..b92913a 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@
 	/* SATA Controller IDE (Wellsburg) */
 	{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
 	/* SATA Controller IDE (Wellsburg) */
-	{ 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	{ 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
 	/* SATA Controller IDE (Wellsburg) */
 	{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
 	/* SATA Controller IDE (Wellsburg) */
@@ -338,6 +338,8 @@
 	/* SATA Controller IDE (BayTrail) */
 	{ 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
 	{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+	/* SATA Controller IDE (Coleto Creek) */
+	{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 
 	{ }	/* terminate list */
 };
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index a70ff15..7b9bdd8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1560,8 +1560,7 @@
 		u32 fbs = readl(port_mmio + PORT_FBS);
 		int pmp = fbs >> PORT_FBS_DWE_OFFSET;
 
-		if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
-		    ata_link_online(&ap->pmp_link[pmp])) {
+		if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
 			link = &ap->pmp_link[pmp];
 			fbs_need_dec = true;
 		}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c69fcce..370462f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1322,14 +1322,14 @@
  *	should be retried.  To be used from EH.
  *
  *	SCSI midlayer limits the number of retries to scmd->allowed.
- *	scmd->retries is decremented for commands which get retried
+ *	scmd->allowed is incremented for commands which get retried
  *	due to unrelated failures (qc->err_mask is zero).
  */
 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
 {
 	struct scsi_cmnd *scmd = qc->scsicmd;
-	if (!qc->err_mask && scmd->retries)
-		scmd->retries--;
+	if (!qc->err_mask)
+		scmd->allowed++;
 	__ata_eh_qc_complete(qc);
 }
 
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 61c59ee..20fd337 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@
 
 	/* Disable sending Early R_OK.
 	 * With "cached read" HDD testing and multiple ports busy on a SATA
-	 * host controller, 3726 PMP will very rarely drop a deferred
+	 * host controller, 3x26 PMP will very rarely drop a deferred
 	 * R_OK that was intended for the host. Symptom will be all
 	 * 5 drives under test will timeout, get reset, and recover.
 	 */
-	if (vendor == 0x1095 && devid == 0x3726) {
+	if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
 		u32 reg;
 
 		err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
 		if (err_mask) {
 			rc = -EIO;
-			reason = "failed to read Sil3726 Private Register";
+			reason = "failed to read Sil3x26 Private Register";
 			goto fail;
 		}
 		reg &= ~0x1;
 		err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
 		if (err_mask) {
 			rc = -EIO;
-			reason = "failed to write Sil3726 Private Register";
+			reason = "failed to write Sil3x26 Private Register";
 			goto fail;
 		}
 	}
@@ -383,15 +383,19 @@
 	u16 devid = sata_pmp_gscr_devid(gscr);
 	struct ata_link *link;
 
-	if (vendor == 0x1095 && devid == 0x3726) {
-		/* sil3726 quirks */
+	if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+		/* sil3x26 quirks */
 		ata_for_each_link(link, ap, EDGE) {
 			/* link reports offline after LPM */
 			link->flags |= ATA_LFLAG_NO_LPM;
 
-			/* Class code report is unreliable. */
+			/*
+			 * Class code report is unreliable and SRST times
+			 * out under certain configurations.
+			 */
 			if (link->pmp < 5)
-				link->flags |= ATA_LFLAG_ASSUME_ATA;
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_ATA;
 
 			/* port 5 is for SEMB device and it doesn't like SRST */
 			if (link->pmp == 5)
@@ -399,20 +403,17 @@
 					       ATA_LFLAG_ASSUME_SEMB;
 		}
 	} else if (vendor == 0x1095 && devid == 0x4723) {
-		/* sil4723 quirks */
-		ata_for_each_link(link, ap, EDGE) {
-			/* link reports offline after LPM */
-			link->flags |= ATA_LFLAG_NO_LPM;
-
-			/* class code report is unreliable */
-			if (link->pmp < 2)
-				link->flags |= ATA_LFLAG_ASSUME_ATA;
-
-			/* the config device at port 2 locks up on SRST */
-			if (link->pmp == 2)
-				link->flags |= ATA_LFLAG_NO_SRST |
-					       ATA_LFLAG_ASSUME_ATA;
-		}
+		/*
+		 * sil4723 quirks
+		 *
+		 * Link reports offline after LPM.  Class code report is
+		 * unreliable.  SIMG PMPs never got SRST reliable and the
+		 * config device at port 2 locks up on SRST.
+		 */
+		ata_for_each_link(link, ap, EDGE)
+			link->flags |= ATA_LFLAG_NO_LPM |
+				       ATA_LFLAG_NO_SRST |
+				       ATA_LFLAG_ASSUME_ATA;
 	} else if (vendor == 0x1095 && devid == 0x4726) {
 		/* sil4726 quirks */
 		ata_for_each_link(link, ap, EDGE) {
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 90b159b..cd8daf4 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -32,13 +32,14 @@
 
 static int eject_tray(struct ata_device *dev)
 {
-	struct ata_taskfile tf = {};
+	struct ata_taskfile tf;
 	const char cdb[] = {  GPCMD_START_STOP_UNIT,
 		0, 0, 0,
 		0x02,     /* LoEj */
 		0, 0, 0, 0, 0, 0, 0,
 	};
 
+	ata_tf_init(dev, &tf);
 	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.command = ATA_CMD_PACKET;
 	tf.protocol = ATAPI_PROT_NODATA;
@@ -52,8 +53,7 @@
 	char buf[16];
 	unsigned int ret;
 	struct rm_feature_desc *desc = (void *)(buf + 8);
-	struct ata_taskfile tf = {};
-
+	struct ata_taskfile tf;
 	char cdb[] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
@@ -62,6 +62,7 @@
 			0, 0, 0,
 	};
 
+	ata_tf_init(dev, &tf);
 	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.command = ATA_CMD_PACKET;
 	tf.protocol = ATAPI_PROT_PIO;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index d40e403..8401061 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@
 {
 	struct sata_fsl_host_priv *host_priv = host->private_data;
 	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned long flags;
 
 	if (count > ICC_MAX_INT_COUNT_THRESHOLD)
 		count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@
 			(count > ICC_MIN_INT_COUNT_THRESHOLD))
 		ticks = ICC_SAFE_INT_TICKS;
 
-	spin_lock(&host->lock);
+	spin_lock_irqsave(&host->lock, flags);
 	iowrite32((count << 24 | ticks), hcr_base + ICC);
 
 	intr_coalescing_count = count;
 	intr_coalescing_ticks = ticks;
-	spin_unlock(&host->lock);
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
 			intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index b20aa96..c846fd3 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -196,10 +196,26 @@
 	return 0;
 }
 
+/*
+ * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
+ * Retrying the phy hard reset can work around the issue, but the drive
+ * may fail again. In less than 150 out of 15000 test runs, it took more
+ * than 10 tries for the link to be established (but never more than 35).
+ * Triple the maximum observed retry count to provide plenty of margin for
+ * rare events and to guarantee that the link is established.
+ *
+ * Also, the default 2 second time-out on a failed drive is too long in
+ * this situation. The uboot implementation of the same driver function
+ * uses a much shorter time-out period and never experiences a time out
+ * issue. Reducing the time-out to 500ms improves the responsiveness.
+ * The other timing constants were kept the same as the stock AHCI driver.
+ * This change was also tested 15000 times on 24 drives and none of them
+ * experienced a time out.
+ */
 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
 				unsigned long deadline)
 {
-	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	static const unsigned long timing[] = { 5, 100, 500};
 	struct ata_port *ap = link->ap;
 	struct ahci_port_priv *pp = ap->private_data;
 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
@@ -207,7 +223,7 @@
 	bool online;
 	u32 sstatus;
 	int rc;
-	int retry = 10;
+	int retry = 100;
 
 	ahci_stop_engine(ap);
 
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 1e6827c..74456fa 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
  *
  * This file is released under GPL v2.
  *
+ * **** WARNING ****
+ *
+ * This driver never worked properly and unfortunately data corruption is
+ * relatively common.  There isn't anyone working on the driver and there's
+ * no support from the vendor.  Do not use this driver in any production
+ * environment.
+ *
+ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
+ *
+ * *****************
+ *
  * This controller is eccentric and easily locks up if something isn't
  * right.  Documentation is available at initio's website but it only
  * documents registers (not programming model).
@@ -807,6 +819,8 @@
 
 	ata_print_version_once(&pdev->dev, DRV_VERSION);
 
+	dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
+
 	/* alloc host */
 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4e22ce3..48029aa 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,7 @@
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o
 obj-$(CONFIG_ISA)	+= isa.o
 obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
 obj-$(CONFIG_NUMA)	+= node.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2499cef..ca4bcb8 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1839,7 +1839,7 @@
  */
 void device_shutdown(void)
 {
-	struct device *dev;
+	struct device *dev, *parent;
 
 	spin_lock(&devices_kset->list_lock);
 	/*
@@ -1856,7 +1856,7 @@
 		 * prevent it from being freed because parent's
 		 * lock is to be held
 		 */
-		get_device(dev->parent);
+		parent = get_device(dev->parent);
 		get_device(dev);
 		/*
 		 * Make sure the device is off the kset list, in the
@@ -1866,8 +1866,8 @@
 		spin_unlock(&devices_kset->list_lock);
 
 		/* hold lock to avoid race with probe/release */
-		if (dev->parent)
-			device_lock(dev->parent);
+		if (parent)
+			device_lock(parent);
 		device_lock(dev);
 
 		/* Don't allow any more runtime suspends */
@@ -1885,11 +1885,11 @@
 		}
 
 		device_unlock(dev);
-		if (dev->parent)
-			device_unlock(dev->parent);
+		if (parent)
+			device_unlock(parent);
 
 		put_device(dev);
-		put_device(dev->parent);
+		put_device(parent);
 
 		spin_lock(&devices_kset->list_lock);
 	}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f8a69..86abbff 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -152,6 +152,8 @@
 		container_of(dev, struct memory_block, dev);
 
 	for (i = 0; i < sections_per_block; i++) {
+		if (!present_section_nr(mem->start_section_nr + i))
+			continue;
 		pfn = section_nr_to_pfn(mem->start_section_nr + i);
 		ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
 	}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index f0d3054..e853b59 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -20,3 +20,6 @@
 
 config REGMAP_IRQ
 	bool
+
+config REGMAP_ALLOW_WRITE_DEBUGFS
+	tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index cf12998..4c63174 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -5,3 +5,7 @@
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
 obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
 obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
+
+ifeq (${TARGET_BUILD_VARIANT},$(filter ${TARGET_BUILD_VARIANT}, eng))
+ccflags-y += -DCONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
+endif
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 02f490b..844afef 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -23,14 +23,14 @@
 static int regcache_rbtree_exit(struct regmap *map);
 
 struct regcache_rbtree_node {
-	/* the actual rbtree node holding this block */
-	struct rb_node node;
-	/* base register handled by this block */
-	unsigned int base_reg;
 	/* block of adjacent registers */
 	void *block;
+	/* base register handled by this block */
+	unsigned int base_reg;
 	/* number of registers available in the block */
 	unsigned int blklen;
+	/* the actual rbtree node holding this block */
+	struct rb_node node;
 } __attribute__ ((packed));
 
 struct regcache_rbtree_ctx {
@@ -362,7 +362,7 @@
 		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
 		if (!rbnode)
 			return -ENOMEM;
-		rbnode->blklen = sizeof(*rbnode);
+		rbnode->blklen = 1;
 		rbnode->base_reg = reg;
 		rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
 					GFP_KERNEL);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 507ee2d..46283fd 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -644,7 +644,8 @@
 		}
 	}
 
-	return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+			map->reg_stride);
 }
 
 int regcache_sync_block(struct regmap *map, void *block,
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 975719b..d75e2c1 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -248,8 +248,7 @@
 				   count, ppos);
 }
 
-#undef REGMAP_ALLOW_WRITE_DEBUGFS
-#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+#ifdef CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS
 /*
  * This can be dangerous especially when we have clients such as
  * PMICs, therefore don't provide any real compile time configuration option
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1643e88..b233b23 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -35,6 +35,7 @@
 	int wake_count;
 
 	void *status_reg_buf;
+	void *mask_reg_buf;
 	unsigned int *status_buf;
 	unsigned int *mask_buf;
 	unsigned int *mask_buf_def;
@@ -199,7 +200,20 @@
 		u16 *buf16 = data->status_reg_buf;
 		u32 *buf32 = data->status_reg_buf;
 
+		u8 *mask8 = data->mask_reg_buf;
+		u16 *mask16 = data->mask_reg_buf;
+		u32 *mask32 = data->mask_reg_buf;
+
 		BUG_ON(!data->status_reg_buf);
+		BUG_ON(!data->mask_reg_buf);
+
+		ret = regmap_bulk_read(map, chip->mask_base, data->mask_reg_buf,
+				       chip->num_regs);
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to read mask register: %d\n",
+				ret);
+			return IRQ_NONE;
+		}
 
 		ret = regmap_bulk_read(map, chip->status_base,
 				       data->status_reg_buf,
@@ -214,12 +228,18 @@
 			switch (map->format.val_bytes) {
 			case 1:
 				data->status_buf[i] = buf8[i];
+				data->mask_buf[i] = mask8[i];
+				data->mask_buf_def[i] = mask8[i];
 				break;
 			case 2:
 				data->status_buf[i] = buf16[i];
+				data->mask_buf[i] = mask16[i];
+				data->mask_buf_def[i] = mask16[i];
 				break;
 			case 4:
 				data->status_buf[i] = buf32[i];
+				data->mask_buf[i] = mask32[i];
+				data->mask_buf_def[i] = mask32[i];
 				break;
 			default:
 				BUG();
@@ -242,6 +262,21 @@
 					pm_runtime_put(map->dev);
 				return IRQ_NONE;
 			}
+
+			ret = regmap_read(map, chip->mask_base +
+					  (i * map->reg_stride
+					   * data->irq_reg_stride),
+					  &data->mask_buf[i]);
+
+			if (ret != 0) {
+				dev_err(map->dev,
+					"Failed to read mask reg: %d\n",
+					ret);
+				if (chip->runtime_pm)
+					pm_runtime_put(map->dev);
+				return IRQ_NONE;
+			}
+			data->mask_buf_def[i] = data->mask_buf[i];
 		}
 	}
 
@@ -394,6 +429,11 @@
 					    chip->num_regs, GFP_KERNEL);
 		if (!d->status_reg_buf)
 			goto err_alloc;
+
+		d->mask_reg_buf = kmalloc(map->format.val_bytes *
+					    chip->num_regs, GFP_KERNEL);
+		if (!d->mask_reg_buf)
+			goto err_alloc;
 	}
 
 	mutex_init(&d->lock);
@@ -475,6 +515,7 @@
 	kfree(d->mask_buf);
 	kfree(d->status_buf);
 	kfree(d->status_reg_buf);
+	kfree(d->mask_reg_buf);
 	kfree(d);
 	return ret;
 }
@@ -497,6 +538,7 @@
 	kfree(d->mask_buf_def);
 	kfree(d->mask_buf);
 	kfree(d->status_reg_buf);
+	kfree(d->mask_reg_buf);
 	kfree(d->status_buf);
 	kfree(d);
 }
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index a941dcf..d0c81d1 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1717,7 +1717,7 @@
 	int ret;
 
 	/* Nothing to do with no async support */
-	if (!map->bus->async_write)
+	if (!map->bus || !map->bus->async_write)
 		return 0;
 
 	trace_regmap_async_complete_start(map->dev);
diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c
new file mode 100644
index 0000000..a73fbf3
--- /dev/null
+++ b/drivers/base/reservation.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012-2013 Canonical Ltd
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/reservation.h>
+#include <linux/export.h>
+
+DEFINE_WW_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 62b6c2c..90a4e6b 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@
 	int err;
 	u32 cp;
 
+	memset(&arg64, 0, sizeof(arg64));
 	err = 0;
 	err |=
 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b..2b94403 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@
 		ida_pci_info_struct pciinfo;
 
 		if (!arg) return -EINVAL;
+		memset(&pciinfo, 0, sizeof(pciinfo));
 		pciinfo.bus = host->pci_dev->bus->number;
 		pciinfo.dev_fn = host->pci_dev->devfn;
 		pciinfo.board_id = host->board_id;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 037288e..cf1576d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -623,8 +623,10 @@
 		if (!nbd->sock)
 			return -EINVAL;
 
+		nbd->disconnect = 1;
+
 		nbd_send_req(nbd, &sreq);
-                return 0;
+		return 0;
 	}
  
 	case NBD_CLEAR_SOCK: {
@@ -654,6 +656,7 @@
 				nbd->sock = SOCKET_I(inode);
 				if (max_part > 0)
 					bdev->bd_invalidated = 1;
+				nbd->disconnect = 0; /* we're connected now */
 				return 0;
 			} else {
 				fput(file);
@@ -714,7 +717,8 @@
 		else
 			blk_queue_flush(nbd->disk->queue, 0);
 
-		thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
+		thread = kthread_create(nbd_thread, nbd, "%s",
+					nbd->disk->disk_name);
 		if (IS_ERR(thread)) {
 			mutex_lock(&nbd->tx_lock);
 			return PTR_ERR(thread);
@@ -742,6 +746,8 @@
 		set_capacity(nbd->disk, 0);
 		if (max_part > 0)
 			ioctl_by_bdev(bdev, BLKRRPART, 0);
+		if (nbd->disconnect) /* user requested, ignore socket errors */
+			return 0;
 		return nbd->harderror;
 	}
 
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index aff789d..8c7421a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1565,11 +1565,12 @@
 		obj_request, obj_request->img_request, obj_request->result,
 		xferred, length);
 	/*
-	 * ENOENT means a hole in the image.  We zero-fill the
-	 * entire length of the request.  A short read also implies
-	 * zero-fill to the end of the request.  Either way we
-	 * update the xferred count to indicate the whole request
-	 * was satisfied.
+	 * ENOENT means a hole in the image.  We zero-fill the entire
+	 * length of the request.  A short read also implies zero-fill
+	 * to the end of the request.  An error requires the whole
+	 * length of the request to be reported finished with an error
+	 * to the block layer.  In each case we update the xferred
+	 * count to indicate the whole request was satisfied.
 	 */
 	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
 	if (obj_request->result == -ENOENT) {
@@ -1578,14 +1579,13 @@
 		else
 			zero_pages(obj_request->pages, 0, length);
 		obj_request->result = 0;
-		obj_request->xferred = length;
 	} else if (xferred < length && !obj_request->result) {
 		if (obj_request->type == OBJ_REQUEST_BIO)
 			zero_bio_chain(obj_request->bio_list, xferred);
 		else
 			zero_pages(obj_request->pages, xferred, length);
-		obj_request->xferred = length;
 	}
+	obj_request->xferred = length;
 	obj_request_done_set(obj_request);
 }
 
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index dd5b2fe..d81dfca 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -647,7 +647,18 @@
 	int status = BLKIF_RSP_OKAY;
 	struct block_device *bdev = blkif->vbd.bdev;
 	unsigned long secure;
+	struct phys_req preq;
 
+	preq.sector_number = req->u.discard.sector_number;
+	preq.nr_sects      = req->u.discard.nr_sectors;
+
+	err = xen_vbd_translate(&preq, blkif, WRITE);
+	if (err) {
+		pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
+			preq.sector_number,
+			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
+		goto fail_response;
+	}
 	blkif->st_ds_req++;
 
 	xen_blkif_get(blkif);
@@ -658,7 +669,7 @@
 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
 				   req->u.discard.nr_sectors,
 				   GFP_KERNEL, secure);
-
+fail_response:
 	if (err == -EOPNOTSUPP) {
 		pr_debug(DRV_PFX "discard op failed, not supported\n");
 		status = BLKIF_RSP_EOPNOTSUPP;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d89ef86..69b45fc 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -75,6 +75,7 @@
 	struct blkif_request req;
 	struct request *request;
 	struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+	struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 
 static DEFINE_MUTEX(blkfront_mutex);
@@ -98,7 +99,6 @@
 	enum blkif_state connected;
 	int ring_ref;
 	struct blkif_front_ring ring;
-	struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	unsigned int evtchn, irq;
 	struct request_queue *rq;
 	struct work_struct work;
@@ -422,11 +422,11 @@
 			ring_req->u.discard.flag = 0;
 	} else {
 		ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
-							   info->sg);
+							   info->shadow[id].sg);
 		BUG_ON(ring_req->u.rw.nr_segments >
 		       BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
-		for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
+		for_each_sg(info->shadow[id].sg, sg, ring_req->u.rw.nr_segments, i) {
 			fsect = sg->offset >> 9;
 			lsect = fsect + (sg->length >> 9) - 1;
 
@@ -867,12 +867,12 @@
 			     struct blkif_response *bret)
 {
 	int i = 0;
-	struct bio_vec *bvec;
-	struct req_iterator iter;
-	unsigned long flags;
+	struct scatterlist *sg;
 	char *bvec_data;
 	void *shared_data;
-	unsigned int offset = 0;
+	int nseg;
+
+	nseg = s->req.u.rw.nr_segments;
 
 	if (bret->operation == BLKIF_OP_READ) {
 		/*
@@ -881,19 +881,16 @@
 		 * than PAGE_SIZE, we have to keep track of the current offset,
 		 * to be sure we are copying the data from the right shared page.
 		 */
-		rq_for_each_segment(bvec, s->request, iter) {
-			BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
-			if (bvec->bv_offset < offset)
-				i++;
-			BUG_ON(i >= s->req.u.rw.nr_segments);
+		for_each_sg(s->sg, sg, nseg, i) {
+			BUG_ON(sg->offset + sg->length > PAGE_SIZE);
 			shared_data = kmap_atomic(
 				pfn_to_page(s->grants_used[i]->pfn));
-			bvec_data = bvec_kmap_irq(bvec, &flags);
-			memcpy(bvec_data, shared_data + bvec->bv_offset,
-				bvec->bv_len);
-			bvec_kunmap_irq(bvec_data, &flags);
+			bvec_data = kmap_atomic(sg_page(sg));
+			memcpy(bvec_data   + sg->offset,
+			       shared_data + sg->offset,
+			       sg->length);
+			kunmap_atomic(bvec_data);
 			kunmap_atomic(shared_data);
-			offset = bvec->bv_offset + bvec->bv_len;
 		}
 	}
 	/* Add the persistent grant into the list of free grants */
@@ -1022,7 +1019,7 @@
 			 struct blkfront_info *info)
 {
 	struct blkif_sring *sring;
-	int err;
+	int err, i;
 
 	info->ring_ref = GRANT_INVALID_REF;
 
@@ -1034,7 +1031,8 @@
 	SHARED_RING_INIT(sring);
 	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
 
-	sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+	for (i = 0; i < BLK_RING_SIZE; i++)
+		sg_init_table(info->shadow[i].sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
 	/* Allocate memory for grants */
 	err = fill_grant_buffer(info, BLK_RING_SIZE *
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 11f467c..0a327f4 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -85,12 +85,17 @@
 	{ USB_DEVICE(0x04CA, 0x3008) },
 	{ USB_DEVICE(0x13d3, 0x3362) },
 	{ USB_DEVICE(0x0CF3, 0xE004) },
+	{ USB_DEVICE(0x0CF3, 0xE005) },
 	{ USB_DEVICE(0x0930, 0x0219) },
 	{ USB_DEVICE(0x0489, 0xe057) },
 	{ USB_DEVICE(0x13d3, 0x3393) },
 	{ USB_DEVICE(0x0489, 0xe04e) },
 	{ USB_DEVICE(0x0489, 0xe056) },
 	{ USB_DEVICE(0x0489, 0xe04d) },
+	{ USB_DEVICE(0x04c5, 0x1330) },
+	{ USB_DEVICE(0x13d3, 0x3402) },
+	{ USB_DEVICE(0x0cf3, 0x3121) },
+	{ USB_DEVICE(0x0cf3, 0xe003) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -122,12 +127,17 @@
 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU22 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -193,24 +203,44 @@
 
 static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
 {
-	int pipe = 0;
+	int ret, pipe = 0;
+	char *buf;
+
+	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
 
 	pipe = usb_rcvctrlpipe(udev, 0);
-	return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
-			USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
-			state, 0x01, USB_CTRL_SET_TIMEOUT);
+	ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+			      USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+			      buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
+
+	*state = *buf;
+	kfree(buf);
+
+	return ret;
 }
 
 static int ath3k_get_version(struct usb_device *udev,
 			struct ath3k_version *version)
 {
-	int pipe = 0;
+	int ret, pipe = 0;
+	struct ath3k_version *buf;
+	const int size = sizeof(*buf);
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
 
 	pipe = usb_rcvctrlpipe(udev, 0);
-	return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
-			USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
-			sizeof(struct ath3k_version),
-			USB_CTRL_SET_TIMEOUT);
+	ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+			      USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+			      buf, size, USB_CTRL_SET_TIMEOUT);
+
+	memcpy(version, buf, size);
+	kfree(buf);
+
+	return ret;
 }
 
 static int ath3k_load_fwfile(struct usb_device *udev,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7a7e5f8..58491f1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -57,6 +57,9 @@
 	/* Apple-specific (Broadcom) devices */
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
 
+	/* MediaTek MT76x0E */
+	{ USB_DEVICE(0x0e8d, 0x763f) },
+
 	/* Broadcom SoftSailing reporting vendor specific */
 	{ USB_DEVICE(0x0a5c, 0x21e1) },
 
@@ -99,6 +102,7 @@
 
 	/* Broadcom BCM20702A0 */
 	{ USB_DEVICE(0x0b05, 0x17b5) },
+	{ USB_DEVICE(0x0b05, 0x17cb) },
 	{ USB_DEVICE(0x04ca, 0x2003) },
 	{ USB_DEVICE(0x0489, 0xe042) },
 	{ USB_DEVICE(0x413c, 0x8197) },
@@ -145,12 +149,17 @@
 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1092,7 +1101,7 @@
 	if (IS_ERR(skb)) {
 		BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
 		       hdev->name, cmd->opcode, PTR_ERR(skb));
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 
 	/* It ensures that the returned event matches the event data read from
@@ -1144,7 +1153,7 @@
 	if (IS_ERR(skb)) {
 		BT_ERR("%s sending initial HCI reset command failed (%ld)",
 		       hdev->name, PTR_ERR(skb));
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 	kfree_skb(skb);
 
@@ -1158,7 +1167,7 @@
 	if (IS_ERR(skb)) {
 		BT_ERR("%s reading Intel fw version command failed (%ld)",
 		       hdev->name, PTR_ERR(skb));
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 
 	if (skb->len != sizeof(*ver)) {
@@ -1216,7 +1225,7 @@
 		BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
 		       hdev->name, PTR_ERR(skb));
 		release_firmware(fw);
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 
 	if (skb->data[0]) {
@@ -1273,7 +1282,7 @@
 	if (IS_ERR(skb)) {
 		BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
 		       hdev->name, PTR_ERR(skb));
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 	kfree_skb(skb);
 
@@ -1289,7 +1298,7 @@
 	if (IS_ERR(skb)) {
 		BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
 		       hdev->name, PTR_ERR(skb));
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 	kfree_skb(skb);
 
@@ -1307,7 +1316,7 @@
 	if (IS_ERR(skb)) {
 		BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
 		       hdev->name, PTR_ERR(skb));
-		return -PTR_ERR(skb);
+		return PTR_ERR(skb);
 	}
 	kfree_skb(skb);
 
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d620b44..8a3aff7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2882,7 +2882,7 @@
 	if (lba < 0)
 		return -EINVAL;
 
-	cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
+	cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
 	if (cgc->buffer == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 94821ab..9576fad 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -129,7 +129,8 @@
 	off_t j, io_pg_start;
 	int io_pg_count;
 
-	if (type != 0 || mem->type != 0) {
+	if (type != mem->type ||
+		agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
 		return -EINVAL;
 	}
 
@@ -175,7 +176,8 @@
 	struct _parisc_agp_info *info = &parisc_agp_info;
 	int i, io_pg_start, io_pg_count;
 
-	if (type != 0 || mem->type != 0) {
+	if (type != mem->type ||
+		agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
 		return -EINVAL;
 	}
 
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index eb7f147..43577ca 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -110,4 +110,4 @@
 
 MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
 MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 35487e8..81eefa1 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1462,12 +1462,11 @@
 
 static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
 
-static int __init random_int_secret_init(void)
+int random_int_secret_init(void)
 {
 	get_random_bytes(random_int_secret, sizeof(random_int_secret));
 	return 0;
 }
-late_initcall(random_int_secret_init);
 
 /*
  * Get a random word for internal kernel use only. Similar to urandom but
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1b456fe..fc45567 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -272,9 +272,12 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&portdev->ports_lock, flags);
-	list_for_each_entry(port, &portdev->ports, list)
-		if (port->cdev->dev == dev)
+	list_for_each_entry(port, &portdev->ports, list) {
+		if (port->cdev->dev == dev) {
+			kref_get(&port->kref);
 			goto out;
+		}
+	}
 	port = NULL;
 out:
 	spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@
 
 	port = filp->private_data;
 
+	/* Port is hot-unplugged. */
+	if (!port->guest_connected)
+		return -ENODEV;
+
 	if (!port_has_data(port)) {
 		/*
 		 * If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@
 		if (ret < 0)
 			return ret;
 	}
-	/* Port got hot-unplugged. */
+	/* Port got hot-unplugged while we were waiting above. */
 	if (!port->guest_connected)
 		return -ENODEV;
 	/*
@@ -932,13 +939,25 @@
 	if (is_rproc_serial(port->out_vq->vdev))
 		return -EINVAL;
 
+	/*
+	 * pipe->nrbufs == 0 means there are no data to transfer,
+	 * so this returns just 0 for no data.
+	 */
+	pipe_lock(pipe);
+	if (!pipe->nrbufs) {
+		ret = 0;
+		goto error_out;
+	}
+
 	ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
 	if (ret < 0)
-		return ret;
+		goto error_out;
 
 	buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
-	if (!buf)
-		return -ENOMEM;
+	if (!buf) {
+		ret = -ENOMEM;
+		goto error_out;
+	}
 
 	sgl.n = 0;
 	sgl.len = 0;
@@ -946,12 +965,17 @@
 	sgl.sg = buf->sg;
 	sg_init_table(sgl.sg, sgl.size);
 	ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+	pipe_unlock(pipe);
 	if (likely(ret > 0))
 		ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
 
 	if (unlikely(ret <= 0))
 		free_buf(buf, true);
 	return ret;
+
+error_out:
+	pipe_unlock(pipe);
+	return ret;
 }
 
 static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@
 	struct port *port;
 	int ret;
 
+	/* We get the port with a kref here */
 	port = find_port_by_devt(cdev->dev);
+	if (!port) {
+		/* Port was unplugged before we could proceed */
+		return -ENXIO;
+	}
 	filp->private_data = port;
 
-	/* Prevent against a port getting hot-unplugged at the same time */
-	spin_lock_irq(&port->portdev->ports_lock);
-	kref_get(&port->kref);
-	spin_unlock_irq(&port->portdev->ports_lock);
-
 	/*
 	 * Don't allow opening of console port devices -- that's done
 	 * via /dev/hvc
@@ -1498,14 +1522,6 @@
 
 	port = container_of(kref, struct port, kref);
 
-	sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
-	device_destroy(pdrvdata.class, port->dev->devt);
-	cdev_del(port->cdev);
-
-	kfree(port->name);
-
-	debugfs_remove(port->debugfs_file);
-
 	kfree(port);
 }
 
@@ -1539,12 +1555,14 @@
 	spin_unlock_irq(&port->portdev->ports_lock);
 
 	if (port->guest_connected) {
-		port->guest_connected = false;
-		port->host_connected = false;
-		wake_up_interruptible(&port->waitqueue);
-
 		/* Let the app know the port is going down. */
 		send_sigio_to_port(port);
+
+		/* Do this after sigio is actually sent */
+		port->guest_connected = false;
+		port->host_connected = false;
+
+		wake_up_interruptible(&port->waitqueue);
 	}
 
 	if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@
 	 */
 	port->portdev = NULL;
 
+	sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+	device_destroy(pdrvdata.class, port->dev->devt);
+	cdev_del(port->cdev);
+
+	kfree(port->name);
+
+	debugfs_remove(port->debugfs_file);
+
 	/*
 	 * Locks around here are not necessary - a port can't be
 	 * opened after we removed the port struct from ports_list
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 16ed068..917a3ab 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -360,6 +360,8 @@
 	if (!clkdata)
 		return -ENOMEM;
 
+	clkdata->wm831x = wm831x;
+
 	/* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
 	ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
 	if (ret < 0) {
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 67ccf4a..f5e4c21 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -107,7 +107,7 @@
 
 	vco = icst_hz_to_vco(icst->params, rate);
 	icst->rate = icst_hz(icst->params, vco);
-	vco_set(icst->vcoreg, icst->lockreg, vco);
+	vco_set(icst->lockreg, icst->vcoreg, vco);
 	return 0;
 }
 
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index 0478138..0cfbd90 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,2 +1,3 @@
 clk-x86-lpss-objs		:= clk-lpt.o
 obj-$(CONFIG_X86_INTEL_LPSS)	+= clk-x86-lpss.o
+obj-$(CONFIG_X86_INTEL_OSC_CLK)	+= clk-osc.o
diff --git a/drivers/clk/x86/clk-osc.c b/drivers/clk/x86/clk-osc.c
new file mode 100644
index 0000000..2c7e701
--- /dev/null
+++ b/drivers/clk/x86/clk-osc.c
@@ -0,0 +1,254 @@
+/*
+ * Intel OSC clocks.
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Junxiao Chang <junxiao.chang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#define CCU_OSC_CTL_OFF		0x404
+#define OSC_CLOCK_RATE		19200
+#define OSC_CLOCK_COUNT		5
+#define to_clk_osc(_hw) container_of(_hw, struct osc_clk_data, hw)
+
+struct osc_clk_data {
+	void __iomem *oscaddr;
+	int id;
+	unsigned int rate;
+	unsigned int div;
+	struct clk_hw hw;
+	struct clk *clk;
+};
+
+static unsigned long clk_osc_recalc_rate(struct clk_hw *hw,
+				      unsigned long parent_rate)
+{
+	struct osc_clk_data *pdata = to_clk_osc(hw);
+	unsigned int reg_osc_ctrl;
+	unsigned int div;
+	unsigned long rate;
+
+	reg_osc_ctrl = readl(pdata->oscaddr);
+	div = reg_osc_ctrl & (BIT(1) | BIT(0));
+
+	/*
+	 * Bit 0:1 is div ctrl:
+	 * 00: div 4
+	 * 01: div 2
+	 * 10: div 1
+	 * 11: x2
+	 */
+	if (div == 0)
+		rate = parent_rate / 4;
+	else if (div == 1)
+		rate = parent_rate / 2;
+	else if (div == 2)
+		rate = parent_rate;
+	else if (div == 3)
+		rate = parent_rate * 2;
+
+	return rate;
+}
+
+static long clk_osc_round_rate(struct clk_hw *hw, unsigned long rate,
+			    unsigned long *prate)
+{
+	struct osc_clk_data *pdata = to_clk_osc(hw);
+	unsigned long round;
+
+	/* OSC supports div4, div2, div1, x2. */
+	if (rate > *prate * 2)
+		pr_warn("%s req rate: %ld. Max osc clk rate: %ld\n",
+				__func__, rate, *prate * 2);
+
+	/*
+	 * Bit 0:1 is div ctrl:
+	 * 00: div 4
+	 * 01: div 2
+	 * 10: div 1
+	 * 11: x2
+	 */
+	if (rate <= *prate / 4) {
+		round = *prate / 4;
+		pdata->div = 0;
+	} else if (rate <= *prate / 2) {
+		round = *prate / 2;
+		pdata->div = 1;
+	} else if (rate <= *prate) {
+		round = *prate;
+		pdata->div = 2;
+	} else if (rate <= *prate * 2) {
+		round = *prate * 2;
+		pdata->div = 3;
+	} else {
+		/* Set to default 2, div 1 */
+		round = *prate;
+		pdata->div = 2;
+	}
+
+	pr_debug("%s: rate:%ld, parent rate:%ld, round rate:%ld.\n",
+			__func__, rate, *prate, round);
+
+	return round;
+}
+
+static void clk_osc_disable_unused(struct clk_hw *hw)
+{
+	/*
+	 * This API is to prevent clk framework disabling unused clks.
+	 * By default, some clks are turned on during BIOS init, if kernel
+	 * driver needs these clks and didn't turn on clks in its driver,
+	 * new clk framework might impact it.
+	 * Linux clk framework will turn off unused clks during bootup.
+	 * This hook is added to prevent clk framework turns off unused clks.
+	 * If all drivers could handle clks initialization well in future, we
+	 * could remove this "disabled_unused" hook.
+	 */
+	return;
+}
+
+static int clk_osc_set_rate(struct clk_hw *hw, unsigned long rate,
+			 unsigned long parent_rate)
+{
+	struct osc_clk_data *pdata = to_clk_osc(hw);
+	unsigned int reg_osc_ctrl;
+
+	reg_osc_ctrl = readl(pdata->oscaddr);
+	reg_osc_ctrl &= ~(BIT(0) | BIT(1));
+
+	/* div has been saved in round_rate */
+	reg_osc_ctrl |= pdata->div;
+
+	writel(reg_osc_ctrl, pdata->oscaddr);
+	return 0;
+}
+
+static int clk_osc_is_enabled(struct clk_hw *hw)
+{
+	struct osc_clk_data *pdata = to_clk_osc(hw);
+	unsigned int reg_osc_ctrl;
+
+	reg_osc_ctrl = readl(pdata->oscaddr);
+	if (reg_osc_ctrl & BIT(31))
+		return 1;
+
+	return 0;
+}
+
+static int clk_osc_enable(struct clk_hw *hw)
+{
+	struct osc_clk_data *pdata = to_clk_osc(hw);
+	unsigned int reg_osc_ctrl;
+
+	reg_osc_ctrl = readl(pdata->oscaddr);
+	reg_osc_ctrl |= BIT(31);
+
+	writel(reg_osc_ctrl, pdata->oscaddr);
+	return 0;
+}
+
+static void clk_osc_disable(struct clk_hw *hw)
+{
+	struct osc_clk_data *pdata = to_clk_osc(hw);
+	unsigned int reg_osc_ctrl;
+
+	reg_osc_ctrl = readl(pdata->oscaddr);
+	reg_osc_ctrl &= ~(BIT(31));
+
+	writel(reg_osc_ctrl, pdata->oscaddr);
+	return;
+}
+
+const struct clk_ops clk_osc_ops = {
+	.recalc_rate = clk_osc_recalc_rate,
+	.round_rate = clk_osc_round_rate,
+	.set_rate = clk_osc_set_rate,
+	.is_enabled = clk_osc_is_enabled,
+	.disable_unused = clk_osc_disable_unused,
+	.enable = clk_osc_enable,
+	.disable = clk_osc_disable,
+};
+
+struct clk *ccu_osc_clk_register(const char *name, const char *parent, void __iomem *oscaddr, int id)
+{
+	struct osc_clk_data *pdata;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	pdata = kzalloc(sizeof(struct osc_clk_data), GFP_KERNEL);
+	if (pdata == NULL)
+		return NULL;
+
+	init.name = name;
+	init.ops = &clk_osc_ops;
+	init.flags = CLK_IS_BASIC;
+	init.parent_names = (parent ? &parent : NULL);
+	init.num_parents = (parent ? 1 : 0);
+
+	pdata->oscaddr = oscaddr;
+	pdata->id = id;
+	pdata->hw.init = &init;
+
+	clk = clk_register(NULL, &pdata->hw);
+	if (IS_ERR(clk)) {
+		kfree(pdata);
+		pr_err("%s:clk register error:%p\n", __func__, clk);
+	}
+
+	return clk;
+}
+
+/*
+ * ccu_osc_clk_init
+ * ccu is in same PCI device with PMU. CCU address is at offset 0x800.
+ * ccu's inialization is called from PMU init.
+ */
+int ccu_osc_clk_init(void __iomem *ccubase)
+{
+	struct clk *clk;
+	int i, ret;
+	char name[12];
+
+	pr_debug("%s entry\n", __func__);
+
+	clk = clk_register_fixed_rate(NULL, "clk-osc", NULL,
+			CLK_IS_ROOT, OSC_CLOCK_RATE);
+	if (IS_ERR(clk)) {
+		pr_err("%s:clk register fail.\n", __func__);
+		return -1;
+	}
+	clk_register_clkdev(clk, "clk-osc", NULL);
+
+	for (i = 0; i < OSC_CLOCK_COUNT; i++) {
+		memset(name, 0, sizeof(name));
+		sprintf(name, "osc.%d", i);
+		clk = ccu_osc_clk_register(name, "clk-osc", ccubase
+				+ CCU_OSC_CTL_OFF + i * 4, i);
+		if (!IS_ERR(clk))
+			ret = clk_register_clkdev(clk, name, NULL);
+	}
+
+	return 0;
+}
+
+/*
+ * ccu_osc_clk_uninit
+ * When kernel is trying to remove PMU driver, return -1 to prevent it.
+ * clk framework doesn't have a de-register function, so clk driver couldn't
+ * be stopped. Currently PMU driver will not be removed, this function is to
+ * double confirm CCU/OSC resource will not be free.
+ */
+int ccu_osc_clk_uninit(void)
+{
+	pr_warn("OSC clk resources couldn't be released.\n");
+	return -1;
+}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index ab09ed3..6b02edd 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -44,7 +44,7 @@
 	u32 irq, rate;
 
 	irq = irq_of_parse_and_map(event_timer, 0);
-	if (irq == NO_IRQ)
+	if (irq == 0)
 		panic("No IRQ for clock event timer");
 
 	timer_get_base_and_rate(event_timer, &iobase, &rate);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 08ae128..c73fc2b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -65,6 +65,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	/*  If cn_netlink_send() failed, the data is not sent */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
@@ -96,6 +98,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -122,6 +126,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	ev->what = which_id;
 	ev->event_data.id.process_pid = task->pid;
 	ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -160,6 +166,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -185,6 +193,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -218,6 +228,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -244,6 +256,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -269,6 +283,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	get_seq(&msg->seq, &ev->cpu);
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -304,6 +320,7 @@
 
 	msg = (struct cn_msg *)buffer;
 	ev = (struct proc_event *)msg->data;
+	memset(&ev->event_data, 0, sizeof(ev->event_data));
 	msg->seq = rcvd_seq;
 	ktime_get_ts(&ts); /* get high res monotonic timestamp */
 	put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = rcvd_ack + 1;
 	msg->len = sizeof(*ev);
+	msg->flags = 0; /* not used */
 	cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 6ecfa75..0daa11e 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -157,17 +157,18 @@
 static void cn_rx_skb(struct sk_buff *__skb)
 {
 	struct nlmsghdr *nlh;
-	int err;
 	struct sk_buff *skb;
+	int len, err;
 
 	skb = skb_get(__skb);
 
 	if (skb->len >= NLMSG_HDRLEN) {
 		nlh = nlmsg_hdr(skb);
+		len = nlmsg_len(nlh);
 
-		if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+		if (len < (int)sizeof(struct cn_msg) ||
 		    skb->len < nlh->nlmsg_len ||
-		    nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+		    len > CONNECTOR_MAX_MSG_SIZE) {
 			kfree_skb(skb);
 			return;
 		}
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 6bd63d6..2a9d25e 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -58,6 +58,21 @@
 	  By enabling this option the acpi_cpufreq driver provides the old
 	  entry in addition to the new boost ones, for compatibility reasons.
 
+config X86_SFI_CPUFREQ
+	tristate "SFI Processor P-States driver"
+	select CPU_FREQ_TABLE
+	depends on SFI
+	help
+	  This driver adds a CPUFreq driver which utilizes the SFI
+	  Processor Performance States enumeration.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sfi-cpufreq.
+
+	  For details, take a look at <file:Documentation/cpu-freq/>.
+
+	  If in doubt, say N.
+
 config ELAN_CPUFREQ
 	tristate "AMD Elan SC400 and SC410"
 	select CPU_FREQ_TABLE
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 52647cd..788bf12 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -26,6 +26,7 @@
 
 obj-$(CONFIG_X86_ACPI_CPUFREQ)		+= acpi-cpufreq.o mperf.o
 obj-$(CONFIG_X86_POWERNOW_K8)		+= powernow-k8.o
+obj-$(CONFIG_X86_SFI_CPUFREQ)		+= sfi-cpufreq.o mperf.o
 obj-$(CONFIG_X86_PCC_CPUFREQ)		+= pcc-cpufreq.o
 obj-$(CONFIG_X86_POWERNOW_K6)		+= powernow-k6.o
 obj-$(CONFIG_X86_POWERNOW_K7)		+= powernow-k7.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 450588a..8af6813 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -31,6 +31,7 @@
 #include <linux/completion.h>
 #include <linux/mutex.h>
 #include <linux/syscore_ops.h>
+#include <linux/pm_qos.h>
 
 #include <trace/events/power.h>
 
@@ -41,6 +42,7 @@
  */
 static struct cpufreq_driver *cpufreq_driver;
 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
 #ifdef CONFIG_HOTPLUG_CPU
 /* This one keeps track of the previously set governor of a removed CPU */
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
@@ -744,11 +746,8 @@
 				     struct cpufreq_policy *policy,
 				     struct device *dev)
 {
-	struct cpufreq_policy new_policy;
 	struct freq_attr **drv_attr;
-	unsigned long flags;
 	int ret = 0;
-	unsigned int j;
 
 	/* prepare interface data */
 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
@@ -780,17 +779,23 @@
 			goto err_out_kobj_put;
 	}
 
-	write_lock_irqsave(&cpufreq_driver_lock, flags);
-	for_each_cpu(j, policy->cpus) {
-		per_cpu(cpufreq_cpu_data, j) = policy;
-		per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
-	}
-	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
 	ret = cpufreq_add_dev_symlink(cpu, policy);
 	if (ret)
 		goto err_out_kobj_put;
 
+	return ret;
+
+err_out_kobj_put:
+	kobject_put(&policy->kobj);
+	wait_for_completion(&policy->kobj_unregister);
+	return ret;
+}
+
+static void cpufreq_init_policy(struct cpufreq_policy *policy)
+{
+	struct cpufreq_policy new_policy;
+	int ret = 0;
+
 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
 	/* assure that the starting sequence is run in __cpufreq_set_policy */
 	policy->governor = NULL;
@@ -805,17 +810,11 @@
 		if (cpufreq_driver->exit)
 			cpufreq_driver->exit(policy);
 	}
-	return ret;
-
-err_out_kobj_put:
-	kobject_put(&policy->kobj);
-	wait_for_completion(&policy->kobj_unregister);
-	return ret;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
-				  struct device *dev)
+				  struct device *dev, bool frozen)
 {
 	struct cpufreq_policy *policy;
 	int ret = 0, has_target = !!cpufreq_driver->target;
@@ -843,26 +842,68 @@
 		__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 	}
 
-	ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-	if (ret) {
+	/* Don't touch sysfs links during light-weight init */
+	if (frozen) {
+		/* Drop the extra refcount that we took above */
 		cpufreq_cpu_put(policy);
-		return ret;
+		return 0;
 	}
 
-	return 0;
+	ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+	if (ret)
+		cpufreq_cpu_put(policy);
+
+	return ret;
 }
 #endif
 
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
+{
+	struct cpufreq_policy *policy;
+	unsigned long flags;
+
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+	policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
+
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+	return policy;
+}
+
+static struct cpufreq_policy *cpufreq_policy_alloc(void)
+{
+	struct cpufreq_policy *policy;
+
+	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+	if (!policy)
+		return NULL;
+
+	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+		goto err_free_policy;
+
+	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
+		goto err_free_cpumask;
+
+	return policy;
+
+err_free_cpumask:
+	free_cpumask_var(policy->cpus);
+err_free_policy:
+	kfree(policy);
+
+	return NULL;
+}
+
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
+{
+	free_cpumask_var(policy->related_cpus);
+	free_cpumask_var(policy->cpus);
+	kfree(policy);
+}
+
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+			     bool frozen)
 {
 	unsigned int j, cpu = dev->id;
 	int ret = -ENOMEM;
@@ -876,13 +917,15 @@
 	if (cpu_is_offline(cpu))
 		return 0;
 
-	pr_debug("adding CPU %u\n", cpu);
+	pr_debug("adding CPU %u frozen %d\n", cpu, frozen);
 
 #ifdef CONFIG_SMP
 	/* check whether a different CPU already registered this
 	 * CPU because it is in the same boat. */
 	policy = cpufreq_cpu_get(cpu);
 	if (unlikely(policy)) {
+		/* according present policy to align all the cpus frequencies */
+		cpufreq_driver->target(policy, policy->cur, CPUFREQ_RELATION_H);
 		cpufreq_cpu_put(policy);
 		return 0;
 	}
@@ -894,7 +937,8 @@
 		struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
 		if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
 			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-			return cpufreq_add_policy_cpu(cpu, sibling, dev);
+			return cpufreq_add_policy_cpu(cpu, sibling, dev,
+						      frozen);
 		}
 	}
 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -906,16 +950,15 @@
 		goto module_out;
 	}
 
-	policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
+	if (frozen)
+		/* Restore the saved policy when doing light-weight init */
+		policy = cpufreq_policy_restore(cpu);
+	else
+		policy = cpufreq_policy_alloc();
+
 	if (!policy)
 		goto nomem_out;
 
-	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
-		goto err_free_policy;
-
-	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
-		goto err_free_cpumask;
-
 	policy->cpu = cpu;
 	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
 	cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -959,9 +1002,20 @@
 	}
 #endif
 
-	ret = cpufreq_add_dev_interface(cpu, policy, dev);
-	if (ret)
-		goto err_out_unregister;
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
+	for_each_cpu(j, policy->cpus) {
+		per_cpu(cpufreq_cpu_data, j) = policy;
+		per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
+	}
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+	if (!frozen) {
+		ret = cpufreq_add_dev_interface(cpu, policy, dev);
+		if (ret)
+			goto err_out_unregister;
+	}
+
+	cpufreq_init_policy(policy);
 
 	kobject_uevent(&policy->kobj, KOBJ_ADD);
 	module_put(cpufreq_driver->owner);
@@ -971,8 +1025,11 @@
 
 err_out_unregister:
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
-	for_each_cpu(j, policy->cpus)
+	for_each_cpu(j, policy->cpus) {
 		per_cpu(cpufreq_cpu_data, j) = NULL;
+		if (j != cpu)
+			per_cpu(cpufreq_policy_cpu, j) = -1;
+	}
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	kobject_put(&policy->kobj);
@@ -980,21 +1037,34 @@
 
 err_set_policy_cpu:
 	per_cpu(cpufreq_policy_cpu, cpu) = -1;
-	free_cpumask_var(policy->related_cpus);
-err_free_cpumask:
-	free_cpumask_var(policy->cpus);
-err_free_policy:
-	kfree(policy);
+	cpufreq_policy_free(policy);
 nomem_out:
 	module_put(cpufreq_driver->owner);
 module_out:
 	return ret;
 }
 
+/**
+ * cpufreq_add_dev - add a CPU device
+ *
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+	return __cpufreq_add_dev(dev, sif, false);
+}
+
 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
 	int j;
 
+	if (cpu == policy->cpu)
+		return;
+
 	policy->last_cpu = policy->cpu;
 	policy->cpu = cpu;
 
@@ -1008,6 +1078,47 @@
 			CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
 
+static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *data,
+					   unsigned int old_cpu, bool frozen)
+{
+	struct device *cpu_dev;
+	unsigned long flags;
+	int ret;
+
+	/* first sibling now owns the new sysfs dir */
+	cpu_dev = get_cpu_device(cpumask_first(data->cpus));
+	if (!cpu_dev) {
+		pr_err("%s: unable to get the cpu device\n", __func__);
+		return -ENODEV;
+	}
+
+	/* Don't touch sysfs files during light-weight tear-down */
+	if (frozen)
+		return cpu_dev->id;
+
+	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+	ret = kobject_move(&data->kobj, &cpu_dev->kobj);
+	if (ret) {
+		pr_err("%s: Failed to move kobj: %d", __func__, ret);
+
+		WARN_ON(lock_policy_rwsem_write(old_cpu));
+		cpumask_set_cpu(old_cpu, data->cpus);
+
+		write_lock_irqsave(&cpufreq_driver_lock, flags);
+		per_cpu(cpufreq_cpu_data, old_cpu) = data;
+		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+		unlock_policy_rwsem_write(old_cpu);
+
+		ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+					"cpufreq");
+
+		return -EINVAL;
+	}
+
+	return cpu_dev->id;
+}
+
 /**
  * __cpufreq_remove_dev - remove a CPU device
  *
@@ -1015,22 +1126,28 @@
  * Caller should already have policy_rwsem in write mode for this CPU.
  * This routine frees the rwsem before returning.
  */
-static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+static int __cpufreq_remove_dev(struct device *dev,
+				struct subsys_interface *sif, bool frozen)
 {
-	unsigned int cpu = dev->id, ret, cpus;
+	unsigned int cpu = dev->id, cpus;
+	int new_cpu;
 	unsigned long flags;
 	struct cpufreq_policy *data;
 	struct kobject *kobj;
 	struct completion *cmp;
-	struct device *cpu_dev;
+	int ret;
 
-	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
+	pr_debug("%s: unregistering CPU %u frozen %d\n", __func__, cpu, frozen);
 
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 
 	data = per_cpu(cpufreq_cpu_data, cpu);
 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
 
+	/* Save the policy somewhere when doing a light-weight tear-down */
+	if (frozen)
+		per_cpu(cpufreq_cpu_data_fallback, cpu) = data;
+
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	if (!data) {
@@ -1054,68 +1171,143 @@
 		cpumask_clear_cpu(cpu, data->cpus);
 	unlock_policy_rwsem_write(cpu);
 
-	if (cpu != data->cpu) {
+	if (cpu != data->cpu && !frozen) {
+		/* off lining a non mater CPU */
 		sysfs_remove_link(&dev->kobj, "cpufreq");
-	} else if (cpus > 1) {
-		/* first sibling now owns the new sysfs dir */
-		cpu_dev = get_cpu_device(cpumask_first(data->cpus));
-		sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
-		ret = kobject_move(&data->kobj, &cpu_dev->kobj);
-		if (ret) {
-			pr_err("%s: Failed to move kobj: %d", __func__, ret);
-
+	} else if (cpus > 1 && ((data->shared_type != CPUFREQ_SHARED_TYPE_ALL) || !frozen)) {
+		/* when in a shared_type policy don't promote a CPU on suspend path */
+		new_cpu = cpufreq_nominate_new_policy_cpu(data, cpu, frozen);
+		if (new_cpu >= 0) {
+			if (data->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+				/* when in a shared_type policy on hotplug path need to exit the driver on old CPU */
+				if (cpufreq_driver->exit)
+					cpufreq_driver->exit(data);
+				}
+			}
 			WARN_ON(lock_policy_rwsem_write(cpu));
-			cpumask_set_cpu(cpu, data->cpus);
-
-			write_lock_irqsave(&cpufreq_driver_lock, flags);
-			per_cpu(cpufreq_cpu_data, cpu) = data;
-			write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
+			update_policy_cpu(data, new_cpu);
 			unlock_policy_rwsem_write(cpu);
+			if (data->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+				/* when in a shared_type policy on hotplug path need to init the driver on new master CPU */
+				if (cpufreq_driver->init) {
+					ret = cpufreq_driver->init(data);
+					if (ret)
+						pr_debug("initialization failed during promotion from CPU%d to CPU%d\n", data->cpu, new_cpu);
 
-			ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
-					"cpufreq");
-			return -EINVAL;
+					cpumask_clear_cpu(cpu, data->cpus); /* init will restore data->cpus to default while we just removed cpu */
+			}
+			if (!frozen)
+				pr_debug("policy_kobj moved to cpu:%d from:%d\n", new_cpu, cpu);
 		}
-
-		WARN_ON(lock_policy_rwsem_write(cpu));
-		update_policy_cpu(data, cpu_dev->id);
-		unlock_policy_rwsem_write(cpu);
-		pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
-				__func__, cpu_dev->id, cpu);
 	}
 
-	if ((cpus == 1) && (cpufreq_driver->target))
-		__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+	if (data->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+		/* we have related CPUs */
+		if (frozen) {
+			/* when on suspend path we need to exit master CPUs */
+			if (cpu == data->cpu) {
+				/* we are on a master CPU */
+				if (cpufreq_driver->target)
+					__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
 
-	pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
-	cpufreq_cpu_put(data);
+				/*
+				 * Perform the ->exit() even during light-weight tear-down,
+				 * since this is a core component, and is essential for the
+				 * subsequent light-weight ->init() to succeed.
+				 */
+				if (cpufreq_driver->exit)
+					cpufreq_driver->exit(data);
 
-	/* If cpu is last user of policy, free policy */
-	if (cpus == 1) {
-		lock_policy_rwsem_read(cpu);
-		kobj = &data->kobj;
-		cmp = &data->kobj_unregister;
-		unlock_policy_rwsem_read(cpu);
-		kobject_put(kobj);
+			}
+		} else {
+			if (cpus == 1) {
+				/* If cpu is last user of policy, free policy */
+				if (cpufreq_driver->target)
+					__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
 
-		/* we need to make sure that the underlying kobj is actually
-		 * not referenced anymore by anybody before we proceed with
-		 * unloading.
-		 */
-		pr_debug("waiting for dropping of refcount\n");
-		wait_for_completion(cmp);
-		pr_debug("wait complete\n");
+				lock_policy_rwsem_read(cpu);
+				kobj = &data->kobj;
+				cmp = &data->kobj_unregister;
+				unlock_policy_rwsem_read(cpu);
+				kobject_put(kobj);
 
-		if (cpufreq_driver->exit)
-			cpufreq_driver->exit(data);
+				/*
+				 * We need to make sure that the underlying kobj is
+				 * actually not referenced anymore by anybody before we
+				 * proceed with unloading.
+				 */
+				pr_debug("waiting for dropping of refcount\n");
+				wait_for_completion(cmp);
+				pr_debug("wait complete\n");
 
-		free_cpumask_var(data->related_cpus);
-		free_cpumask_var(data->cpus);
-		kfree(data);
-	} else if (cpufreq_driver->target) {
-		__cpufreq_governor(data, CPUFREQ_GOV_START);
-		__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+				/*
+				 * Perform the ->exit() even during light-weight tear-down,
+				 * since this is a core component, and is essential for the
+				 * subsequent light-weight ->init() to succeed.
+				 */
+				if (cpufreq_driver->exit)
+					cpufreq_driver->exit(data);
+
+				cpufreq_policy_free(data);
+			} else {
+				if (data) {
+					pr_debug("%s: removing link, cpu: %d\n", __func__, data->cpu);
+					cpufreq_cpu_put(data);
+				}
+
+				if (cpufreq_driver->target) {
+						__cpufreq_governor(data, CPUFREQ_GOV_START);
+						__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+
+				}
+			}
+		}
+	} else {
+		/* we have'nt related CPUs */
+		/* If cpu is last user of policy, free policy */
+		if (cpus == 1) {
+			if (cpufreq_driver->target)
+				__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
+			if (!frozen) {
+				lock_policy_rwsem_read(cpu);
+				kobj = &data->kobj;
+				cmp = &data->kobj_unregister;
+				unlock_policy_rwsem_read(cpu);
+				kobject_put(kobj);
+
+				/*
+				 * We need to make sure that the underlying kobj is
+				 * actually not referenced anymore by anybody before we
+				 * proceed with unloading.
+				 */
+				pr_debug("waiting for dropping of refcount\n");
+				wait_for_completion(cmp);
+				pr_debug("wait complete\n");
+			}
+
+			/*
+			 * Perform the ->exit() even during light-weight tear-down,
+			 * since this is a core component, and is essential for the
+			 * subsequent light-weight ->init() to succeed.
+			 */
+			if (cpufreq_driver->exit)
+				cpufreq_driver->exit(data);
+
+			if (!frozen)
+				cpufreq_policy_free(data);
+		} else {
+
+			if (!frozen) {
+				pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+				cpufreq_cpu_put(data);
+			}
+
+			if (cpufreq_driver->target) {
+				__cpufreq_governor(data, CPUFREQ_GOV_START);
+				__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+			}
+		}
 	}
 
 	per_cpu(cpufreq_policy_cpu, cpu) = -1;
@@ -1131,7 +1323,7 @@
 	if (cpu_is_offline(cpu))
 		return 0;
 
-	retval = __cpufreq_remove_dev(dev, sif);
+	retval = __cpufreq_remove_dev(dev, sif, false);
 	return retval;
 }
 
@@ -1684,9 +1876,14 @@
 				struct cpufreq_policy *policy)
 {
 	int ret = 0, failed = 1;
+	unsigned int pmin = policy->min;
+	unsigned int qmin = pm_qos_request(PM_QOS_CPU_FREQ_MIN);
 
-	pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
-		policy->min, policy->max);
+	pr_debug("setting new policy for CPU %u: %u - %u (%u) kHz\n", policy->cpu,
+		pmin, policy->max, qmin);
+
+	/* clamp the new policy to PM QoS limits */
+	policy->min = max(pmin, qmin);
 
 	memcpy(&policy->cpuinfo, &data->cpuinfo,
 				sizeof(struct cpufreq_cpuinfo));
@@ -1780,6 +1977,8 @@
 	}
 
 error_out:
+	/* restore the limits that the policy requested */
+	policy->min = pmin;
 	return ret;
 }
 
@@ -1843,19 +2042,26 @@
 {
 	unsigned int cpu = (unsigned long)hcpu;
 	struct device *dev;
+	bool frozen = false;
 
 	dev = get_cpu_device(cpu);
 	if (dev) {
-		switch (action) {
+
+		if (action & CPU_TASKS_FROZEN)
+			frozen = true;
+
+		switch (action & ~CPU_TASKS_FROZEN) {
 		case CPU_ONLINE:
-			cpufreq_add_dev(dev, NULL);
+			__cpufreq_add_dev(dev, NULL, frozen);
+			cpufreq_update_policy(cpu);
 			break;
+
 		case CPU_DOWN_PREPARE:
-		case CPU_UP_CANCELED_FROZEN:
-			__cpufreq_remove_dev(dev, NULL);
+			__cpufreq_remove_dev(dev, NULL, frozen);
 			break;
+
 		case CPU_DOWN_FAILED:
-			cpufreq_add_dev(dev, NULL);
+			__cpufreq_add_dev(dev, NULL, frozen);
 			break;
 		}
 	}
@@ -1866,6 +2072,28 @@
     .notifier_call = cpufreq_cpu_callback,
 };
 
+static int __cpuinit cpu_freq_notify(struct notifier_block *b,
+		unsigned long l, void *v)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+		if (policy) {
+			cpufreq_update_policy(policy->cpu);
+			cpufreq_cpu_put(policy);
+		}
+	}
+
+	pr_debug("%s: Min cpufreq updated with the value %lu\n", __func__, l);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata min_freq_notifier = {
+	.notifier_call = cpu_freq_notify,
+};
+
 /*********************************************************************
  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
  *********************************************************************/
@@ -1973,7 +2201,7 @@
 
 static int __init cpufreq_core_init(void)
 {
-	int cpu;
+	int cpu, rc;
 
 	if (cpufreq_disabled())
 		return -ENODEV;
@@ -1987,6 +2215,10 @@
 	BUG_ON(!cpufreq_global_kobject);
 	register_syscore_ops(&cpufreq_syscore_ops);
 
+	rc = pm_qos_add_notifier(PM_QOS_CPU_FREQ_MIN,
+			&min_freq_notifier);
+	BUG_ON(rc);
+
 	return 0;
 }
 core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2ef..f97cb3d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -221,8 +221,8 @@
 	return count;
 }
 
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+		const char *buf, size_t count)
 {
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input, j;
@@ -235,10 +235,10 @@
 	if (input > 1)
 		input = 1;
 
-	if (input == cs_tuners->ignore_nice) /* nothing to do */
+	if (input == cs_tuners->ignore_nice_load) /* nothing to do */
 		return count;
 
-	cs_tuners->ignore_nice = input;
+	cs_tuners->ignore_nice_load = input;
 
 	/* we need to re-evaluate prev_cpu_idle */
 	for_each_online_cpu(j) {
@@ -246,7 +246,7 @@
 		dbs_info = &per_cpu(cs_cpu_dbs_info, j);
 		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 					&dbs_info->cdbs.prev_cpu_wall, 0);
-		if (cs_tuners->ignore_nice)
+		if (cs_tuners->ignore_nice_load)
 			dbs_info->cdbs.prev_cpu_nice =
 				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 	}
@@ -279,7 +279,7 @@
 show_store_one(cs, sampling_down_factor);
 show_store_one(cs, up_threshold);
 show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice);
+show_store_one(cs, ignore_nice_load);
 show_store_one(cs, freq_step);
 declare_show_sampling_rate_min(cs);
 
@@ -287,7 +287,7 @@
 gov_sys_pol_attr_rw(sampling_down_factor);
 gov_sys_pol_attr_rw(up_threshold);
 gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
 gov_sys_pol_attr_rw(freq_step);
 gov_sys_pol_attr_ro(sampling_rate_min);
 
@@ -297,7 +297,7 @@
 	&sampling_down_factor_gov_sys.attr,
 	&up_threshold_gov_sys.attr,
 	&down_threshold_gov_sys.attr,
-	&ignore_nice_gov_sys.attr,
+	&ignore_nice_load_gov_sys.attr,
 	&freq_step_gov_sys.attr,
 	NULL
 };
@@ -313,7 +313,7 @@
 	&sampling_down_factor_gov_pol.attr,
 	&up_threshold_gov_pol.attr,
 	&down_threshold_gov_pol.attr,
-	&ignore_nice_gov_pol.attr,
+	&ignore_nice_load_gov_pol.attr,
 	&freq_step_gov_pol.attr,
 	NULL
 };
@@ -338,7 +338,7 @@
 	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
 	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
 	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
-	tuners->ignore_nice = 0;
+	tuners->ignore_nice_load = 0;
 	tuners->freq_step = DEF_FREQUENCY_STEP;
 
 	dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 1361bd4..031e92b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,7 +26,6 @@
 #include <linux/tick.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
-#include <linux/cpu.h>
 
 #include "cpufreq_governor.h"
 
@@ -84,9 +83,9 @@
 	unsigned int j;
 
 	if (dbs_data->cdata->governor == GOV_ONDEMAND)
-		ignore_nice = od_tuners->ignore_nice;
+		ignore_nice = od_tuners->ignore_nice_load;
 	else
-		ignore_nice = cs_tuners->ignore_nice;
+		ignore_nice = cs_tuners->ignore_nice_load;
 
 	policy = cdbs->cur_policy;
 
@@ -173,10 +172,8 @@
 	if (!all_cpus) {
 		__gov_queue_work(smp_processor_id(), dbs_data, delay);
 	} else {
-		get_online_cpus();
 		for_each_cpu(i, policy->cpus)
 			__gov_queue_work(i, dbs_data, delay);
-		put_online_cpus();
 	}
 }
 EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -331,12 +328,12 @@
 		cs_tuners = dbs_data->tuners;
 		cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
 		sampling_rate = cs_tuners->sampling_rate;
-		ignore_nice = cs_tuners->ignore_nice;
+		ignore_nice = cs_tuners->ignore_nice_load;
 	} else {
 		od_tuners = dbs_data->tuners;
 		od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
 		sampling_rate = od_tuners->sampling_rate;
-		ignore_nice = od_tuners->ignore_nice;
+		ignore_nice = od_tuners->ignore_nice_load;
 		od_ops = dbs_data->cdata->gov_ops;
 		io_busy = od_tuners->io_is_busy;
 	}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index e16a961..0d9e6be 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -165,7 +165,7 @@
 
 /* Per policy Governers sysfs tunables */
 struct od_dbs_tuners {
-	unsigned int ignore_nice;
+	unsigned int ignore_nice_load;
 	unsigned int sampling_rate;
 	unsigned int sampling_down_factor;
 	unsigned int up_threshold;
@@ -175,7 +175,7 @@
 };
 
 struct cs_dbs_tuners {
-	unsigned int ignore_nice;
+	unsigned int ignore_nice_load;
 	unsigned int sampling_rate;
 	unsigned int sampling_down_factor;
 	unsigned int up_threshold;
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 0654e40..6943838 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -77,6 +77,10 @@
 	int usage_count;
 	/* Hi speed to bump to from lo speed when load burst (default max) */
 	unsigned int hispeed_freq;
+	/*
+	 * Frequency to which a touch boost takes the cpus to
+	 */
+	unsigned long touchboost_freq;
 	/* Go to hi speed when CPU load at or above this value. */
 #define DEFAULT_GO_HISPEED_LOAD 99
 	unsigned long go_hispeed_load;
@@ -107,6 +111,10 @@
 	int boostpulse_duration_val;
 	/* End time of boost pulse in ktime converted to usecs */
 	u64 boostpulse_endtime;
+	/* Duration of a touchboost pulse in usecs */
+	int touchboostpulse_duration_val;
+	/* End time of touchboost pulse in ktime converted to usecs */
+	u64 touchboostpulse_endtime;
 	bool boosted;
 	/*
 	 * Max additional time to wait in idle, beyond timer_rate, at speeds
@@ -375,6 +383,7 @@
 {
 	u64 now;
 	unsigned int delta_time;
+	unsigned int cur;
 	u64 cputime_speedadj;
 	int cpu_load;
 	struct cpufreq_interactive_cpuinfo *pcpu =
@@ -403,6 +412,9 @@
 	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
 	do_div(cputime_speedadj, delta_time);
 	loadadjfreq = (unsigned int)cputime_speedadj * 100;
+	cur = pcpu->policy->cur;
+	if (cur == 0)
+		goto rearm;
 	cpu_load = loadadjfreq / pcpu->policy->cur;
 	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
 
@@ -417,6 +429,10 @@
 		}
 	} else {
 		new_freq = choose_freq(pcpu, loadadjfreq);
+		if (now < tunables->touchboostpulse_endtime) {
+			if (new_freq < tunables->touchboost_freq)
+				new_freq = tunables->touchboost_freq;
+			}
 		if (new_freq > tunables->hispeed_freq &&
 				pcpu->target_freq < tunables->hispeed_freq)
 			new_freq = tunables->hispeed_freq;
@@ -532,7 +548,8 @@
 		 * min indefinitely.  This should probably be a quirk of
 		 * the CPUFreq driver.
 		 */
-		if (!pending)
+		/* No need to reschdule on Merrifield platform */
+		if (!pending && (boot_cpu_data.x86_model != 0x4a))
 			cpufreq_interactive_timer_resched(pcpu);
 	}
 
@@ -569,7 +586,6 @@
 	cpumask_t tmp_mask;
 	unsigned long flags;
 	struct cpufreq_interactive_cpuinfo *pcpu;
-
 	while (1) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
@@ -609,14 +625,14 @@
 				if (pjcpu->target_freq > max_freq)
 					max_freq = pjcpu->target_freq;
 			}
-
-			if (max_freq != pcpu->policy->cur)
+			if (max_freq != pcpu->policy->cur) {
 				__cpufreq_driver_target(pcpu->policy,
 							max_freq,
 							CPUFREQ_RELATION_H);
-			trace_cpufreq_interactive_setspeed(cpu,
-						     pcpu->target_freq,
-						     pcpu->policy->cur);
+				trace_cpufreq_interactive_setspeed(cpu,
+							max_freq,
+							pcpu->policy->cur);
+			}
 
 			up_read(&pcpu->enable_sem);
 		}
@@ -666,6 +682,38 @@
 		wake_up_process(speedchange_task);
 }
 
+static void cpufreq_interactive_touchboost(void)
+{
+	int i;
+	int anyboost = 0;
+	unsigned long flags;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_interactive_tunables *tunables;
+
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+	for_each_online_cpu(i) {
+		pcpu = &per_cpu(cpuinfo, i);
+		tunables = pcpu->policy->governor_data;
+
+	if (pcpu->target_freq < tunables->touchboost_freq) {
+			pcpu->target_freq = tunables->touchboost_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			pcpu->hispeed_validate_time =
+					ktime_to_us(ktime_get());
+			anyboost = 1;
+		}
+	/* no need to set floor freq to touchboost freq as floor
+	freq is set only if the new_freq is more than
+	hispeed_freq which is not the case here*/
+
+	}
+
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	if (anyboost)
+		wake_up_process(speedchange_task);
+}
+
+
 static int cpufreq_interactive_notifier(
 	struct notifier_block *nb, unsigned long val, void *data)
 {
@@ -874,6 +922,25 @@
 	return count;
 }
 
+static ssize_t show_touchboost_freq(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->touchboost_freq);
+}
+
+static ssize_t store_touchboost_freq(struct cpufreq_interactive_tunables
+				*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->touchboost_freq = val;
+	return count;
+}
+
 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
 		*tunables, char *buf)
 {
@@ -1000,6 +1067,44 @@
 	return count;
 }
 
+static ssize_t store_touchboostpulse(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->touchboostpulse_endtime = ktime_to_us(ktime_get())
+				+ tunables->touchboostpulse_duration_val;
+	trace_cpufreq_interactive_boost("pulse");
+	cpufreq_interactive_touchboost();
+	return count;
+}
+
+static ssize_t show_touchboostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->touchboostpulse_duration_val);
+}
+
+static ssize_t store_touchboostpulse_duration(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->touchboostpulse_duration_val = val;
+	return count;
+}
+
 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
 		char *buf)
 {
@@ -1059,12 +1164,15 @@
 show_store_gov_pol_sys(above_hispeed_delay);
 show_store_gov_pol_sys(hispeed_freq);
 show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(touchboost_freq);
 show_store_gov_pol_sys(min_sample_time);
 show_store_gov_pol_sys(timer_rate);
 show_store_gov_pol_sys(timer_slack);
 show_store_gov_pol_sys(boost);
 store_gov_pol_sys(boostpulse);
 show_store_gov_pol_sys(boostpulse_duration);
+store_gov_pol_sys(touchboostpulse);
+show_store_gov_pol_sys(touchboostpulse_duration);
 show_store_gov_pol_sys(io_is_busy);
 
 #define gov_sys_attr_rw(_name)						\
@@ -1083,11 +1191,13 @@
 gov_sys_pol_attr_rw(above_hispeed_delay);
 gov_sys_pol_attr_rw(hispeed_freq);
 gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(touchboost_freq);
 gov_sys_pol_attr_rw(min_sample_time);
 gov_sys_pol_attr_rw(timer_rate);
 gov_sys_pol_attr_rw(timer_slack);
 gov_sys_pol_attr_rw(boost);
 gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(touchboostpulse_duration);
 gov_sys_pol_attr_rw(io_is_busy);
 
 static struct global_attr boostpulse_gov_sys =
@@ -1096,12 +1206,19 @@
 static struct freq_attr boostpulse_gov_pol =
 	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
 
+static struct global_attr touchboostpulse_gov_sys =
+	__ATTR(touchboostpulse, 0200, NULL, store_touchboostpulse_gov_sys);
+
+static struct freq_attr touchboostpulse_gov_pol =
+	__ATTR(touchboostpulse, 0200, NULL, store_touchboostpulse_gov_pol);
+
 /* One Governor instance for entire system */
 static struct attribute *interactive_attributes_gov_sys[] = {
 	&target_loads_gov_sys.attr,
 	&above_hispeed_delay_gov_sys.attr,
 	&hispeed_freq_gov_sys.attr,
 	&go_hispeed_load_gov_sys.attr,
+	&touchboost_freq_gov_sys.attr,
 	&min_sample_time_gov_sys.attr,
 	&timer_rate_gov_sys.attr,
 	&timer_slack_gov_sys.attr,
@@ -1109,6 +1226,8 @@
 	&boostpulse_gov_sys.attr,
 	&boostpulse_duration_gov_sys.attr,
 	&io_is_busy_gov_sys.attr,
+	&touchboostpulse_gov_sys.attr,
+	&touchboostpulse_duration_gov_sys.attr,
 	NULL,
 };
 
@@ -1123,6 +1242,7 @@
 	&above_hispeed_delay_gov_pol.attr,
 	&hispeed_freq_gov_pol.attr,
 	&go_hispeed_load_gov_pol.attr,
+	&touchboost_freq_gov_pol.attr,
 	&min_sample_time_gov_pol.attr,
 	&timer_rate_gov_pol.attr,
 	&timer_slack_gov_pol.attr,
@@ -1130,6 +1250,8 @@
 	&boostpulse_gov_pol.attr,
 	&boostpulse_duration_gov_pol.attr,
 	&io_is_busy_gov_pol.attr,
+	&touchboostpulse_gov_pol.attr,
+	&touchboostpulse_duration_gov_pol.attr,
 	NULL,
 };
 
@@ -1182,6 +1304,8 @@
 		tunables = common_tunables;
 
 	WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+	if (!tunables && (event != CPUFREQ_GOV_POLICY_INIT))
+		return -EINVAL;
 
 	switch (event) {
 	case CPUFREQ_GOV_POLICY_INIT:
@@ -1209,6 +1333,8 @@
 		tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
 		tunables->timer_rate = DEFAULT_TIMER_RATE;
 		tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+		tunables->touchboostpulse_duration_val =
+				DEFAULT_MIN_SAMPLE_TIME;
 		tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
 
 		spin_lock_init(&tunables->target_loads_lock);
@@ -1260,6 +1386,8 @@
 		if (!tunables->hispeed_freq)
 			tunables->hispeed_freq = policy->max;
 
+		if (!tunables->touchboost_freq)
+			tunables->touchboost_freq = policy->max;
 		for_each_cpu(j, policy->cpus) {
 			pcpu = &per_cpu(cpuinfo, j);
 			pcpu->policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cb..c087347 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -403,8 +403,8 @@
 	return count;
 }
 
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+		const char *buf, size_t count)
 {
 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	unsigned int input;
@@ -419,10 +419,10 @@
 	if (input > 1)
 		input = 1;
 
-	if (input == od_tuners->ignore_nice) { /* nothing to do */
+	if (input == od_tuners->ignore_nice_load) { /* nothing to do */
 		return count;
 	}
-	od_tuners->ignore_nice = input;
+	od_tuners->ignore_nice_load = input;
 
 	/* we need to re-evaluate prev_cpu_idle */
 	for_each_online_cpu(j) {
@@ -430,7 +430,7 @@
 		dbs_info = &per_cpu(od_cpu_dbs_info, j);
 		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
-		if (od_tuners->ignore_nice)
+		if (od_tuners->ignore_nice_load)
 			dbs_info->cdbs.prev_cpu_nice =
 				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
@@ -461,7 +461,7 @@
 show_store_one(od, io_is_busy);
 show_store_one(od, up_threshold);
 show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice);
+show_store_one(od, ignore_nice_load);
 show_store_one(od, powersave_bias);
 declare_show_sampling_rate_min(od);
 
@@ -469,7 +469,7 @@
 gov_sys_pol_attr_rw(io_is_busy);
 gov_sys_pol_attr_rw(up_threshold);
 gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
 gov_sys_pol_attr_rw(powersave_bias);
 gov_sys_pol_attr_ro(sampling_rate_min);
 
@@ -478,7 +478,7 @@
 	&sampling_rate_gov_sys.attr,
 	&up_threshold_gov_sys.attr,
 	&sampling_down_factor_gov_sys.attr,
-	&ignore_nice_gov_sys.attr,
+	&ignore_nice_load_gov_sys.attr,
 	&powersave_bias_gov_sys.attr,
 	&io_is_busy_gov_sys.attr,
 	NULL
@@ -494,7 +494,7 @@
 	&sampling_rate_gov_pol.attr,
 	&up_threshold_gov_pol.attr,
 	&sampling_down_factor_gov_pol.attr,
-	&ignore_nice_gov_pol.attr,
+	&ignore_nice_load_gov_pol.attr,
 	&powersave_bias_gov_pol.attr,
 	&io_is_busy_gov_pol.attr,
 	NULL
@@ -544,7 +544,7 @@
 	}
 
 	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
-	tuners->ignore_nice = 0;
+	tuners->ignore_nice_load = 0;
 	tuners->powersave_bias = default_powersave_bias;
 	tuners->io_is_busy = should_io_be_busy();
 
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 05edbd0..620d3ea 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -713,19 +713,12 @@
 	unsigned int cpu = (unsigned long)hcpu;
 
 	switch (action) {
-	case CPU_ONLINE:
-		cpufreq_update_policy(cpu);
-		break;
 	case CPU_DOWN_PREPARE:
 		cpufreq_stats_free_sysfs(cpu);
 		break;
 	case CPU_DEAD:
 		cpufreq_stats_free_table(cpu);
 		break;
-	case CPU_UP_CANCELED_FROZEN:
-		cpufreq_stats_free_sysfs(cpu);
-		cpufreq_stats_free_table(cpu);
-		break;
 	case CPU_DOWN_FAILED:
 	case CPU_DOWN_FAILED_FROZEN:
 		cpufreq_stats_create_table_cpu(cpu);
@@ -760,8 +753,6 @@
 		return ret;
 
 	register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
-	for_each_online_cpu(cpu)
-		cpufreq_update_policy(cpu);
 
 	ret = cpufreq_register_notifier(&notifier_trans_block,
 				CPUFREQ_TRANSITION_NOTIFIER);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 07f2840..9520e3b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@
 static struct pstate_adjust_policy default_policy = {
 	.sample_rate_ms = 10,
 	.deadband = 0,
-	.setpoint = 109,
-	.p_gain_pct = 17,
+	.setpoint = 97,
+	.p_gain_pct = 20,
 	.d_gain_pct = 0,
-	.i_gain_pct = 4,
+	.i_gain_pct = 0,
 };
 
 struct perf_limits {
@@ -468,12 +468,12 @@
 static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
 	int32_t busy_scaled;
-	int32_t core_busy, turbo_pstate, current_pstate;
+	int32_t core_busy, max_pstate, current_pstate;
 
 	core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
-	turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+	max_pstate = int_tofp(cpu->pstate.max_pstate);
 	current_pstate = int_tofp(cpu->pstate.current_pstate);
-	busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+	busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 
 	return fp_toint(busy_scaled);
 }
@@ -629,8 +629,8 @@
 
 static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
-	int rc, min_pstate, max_pstate;
 	struct cpudata *cpu;
+	int rc;
 
 	rc = intel_pstate_init_cpu(policy->cpu);
 	if (rc)
@@ -644,9 +644,8 @@
 	else
 		policy->policy = CPUFREQ_POLICY_POWERSAVE;
 
-	intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
-	policy->min = min_pstate * 100000;
-	policy->max = max_pstate * 100000;
+	policy->min = cpu->pstate.min_pstate * 100000;
+	policy->max = cpu->pstate.turbo_pstate * 100000;
 
 	/* cpuinfo and default policy values */
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index d539127..f92b02a 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@
 		clk_put(cpuclk);
 		return -EINVAL;
 	}
-	ret = clk_set_rate(cpuclk, rate);
-	if (ret) {
-		clk_put(cpuclk);
-		return ret;
-	}
 
 	/* clock table init */
 	for (i = 2;
@@ -130,6 +125,12 @@
 	     i++)
 		loongson2_clockmod_table[i].frequency = (rate * i) / 8;
 
+	ret = clk_set_rate(cpuclk, rate);
+	if (ret) {
+		clk_put(cpuclk);
+		return ret;
+	}
+
 	policy->cur = loongson2_cpufreq_get(policy->cpu);
 
 	cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644
index 0000000..1da2601
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -0,0 +1,677 @@
+/*
+ * sfi_cpufreq.c - sfi Processor P-States Driver
+ *
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Vishwesh M Rudramuni
+ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
+ */
+
+/*
+ * This sfi Processor P-States Driver re-uses most part of the code available
+ * in acpi cpufreq driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/sfi.h>
+#include <linux/io.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+
+#include "sfi-cpufreq.h"
+#include "mperf.h"
+
+MODULE_AUTHOR("Vishwesh Rudramuni");
+MODULE_DESCRIPTION("SFI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
+
+static DEFINE_MUTEX(performance_mutex);
+static int sfi_cpufreq_num;
+static u32 sfi_cpu_num;
+static bool battlow;
+
+#define SFI_FREQ_MAX		32
+#define INTEL_MSR_RANGE		0xffff
+#define INTEL_MSR_BUSRATIO_MASK	0xff00
+#define SFI_CPU_MAX		8
+
+#define X86_ATOM_ARCH_SLM	0x4a
+
+struct sfi_cpufreq_data {
+	struct sfi_processor_performance *sfi_data;
+	struct cpufreq_frequency_table *freq_table;
+	unsigned int max_freq;
+	unsigned int resume;
+};
+
+
+struct drv_cmd {
+	const struct cpumask *mask;
+	u32 msr;
+	u32 val;
+};
+
+static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
+struct sfi_freq_table_entry sfi_cpufreq_array[SFI_FREQ_MAX];
+static struct sfi_cpu_table_entry sfi_cpu_array[SFI_CPU_MAX];
+
+/* sfi_perf_data is a pointer to percpu data. */
+static struct sfi_processor_performance *sfi_perf_data;
+
+static struct cpufreq_driver sfi_cpufreq_driver;
+
+static int parse_freq(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_freq_table_entry *pentry;
+	int totallen;
+
+	sb = (struct sfi_table_simple *)table;
+	if (!sb) {
+		printk(KERN_WARNING "SFI: Unable to map FREQ\n");
+		return -ENODEV;
+	}
+
+	if (!sfi_cpufreq_num) {
+		sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
+			 struct sfi_freq_table_entry);
+		pentry = (struct sfi_freq_table_entry *)sb->pentry;
+		totallen = sfi_cpufreq_num * sizeof(*pentry);
+		memcpy(sfi_cpufreq_array, pentry, totallen);
+	}
+
+	return 0;
+}
+
+static void get_cpu_sibling_mask(int cpu, struct cpumask *sibling_mask)
+{
+	unsigned int base = (cpu/CONFIG_NR_CPUS_PER_MODULE) * CONFIG_NR_CPUS_PER_MODULE;
+	unsigned int i;
+
+	cpumask_clear(sibling_mask);
+	for (i = base; i < (base + CONFIG_NR_CPUS_PER_MODULE); i++)
+		cpumask_set_cpu(i, sibling_mask);
+}
+
+static int sfi_processor_get_performance_states(struct sfi_processor *pr)
+{
+	int result = 0;
+	int i;
+
+	pr->performance->state_count = sfi_cpufreq_num;
+	pr->performance->states =
+	    kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
+		    GFP_KERNEL);
+	if (!pr->performance->states)
+		result = -ENOMEM;
+
+	printk(KERN_INFO "Num p-states %d\n", sfi_cpufreq_num);
+
+	/* Populate the P-states info from the SFI table here */
+	for (i = 0; i < sfi_cpufreq_num; i++) {
+		pr->performance->states[i].core_frequency =
+			sfi_cpufreq_array[i].freq_mhz;
+		pr->performance->states[i].transition_latency =
+			sfi_cpufreq_array[i].latency;
+		pr->performance->states[i].control =
+			sfi_cpufreq_array[i].ctrl_val;
+		printk(KERN_INFO "State [%d]: core_frequency[%d] transition_latency[%d] control[0x%x]\n",
+			i,
+			(u32) pr->performance->states[i].core_frequency,
+			(u32) pr->performance->states[i].transition_latency,
+			(u32) pr->performance->states[i].control);
+	}
+
+	return result;
+}
+
+static int sfi_processor_register_performance(struct sfi_processor_performance
+				    *performance, unsigned int cpu)
+{
+	struct sfi_processor *pr;
+
+	mutex_lock(&performance_mutex);
+
+	pr = per_cpu(sfi_processors, cpu);
+	if (!pr) {
+		mutex_unlock(&performance_mutex);
+		return -ENODEV;
+	}
+
+	if (pr->performance) {
+		mutex_unlock(&performance_mutex);
+		return -EBUSY;
+	}
+
+	WARN_ON(!performance);
+
+	pr->performance = performance;
+
+	/* parse the freq table from sfi */
+	sfi_cpufreq_num = 0;
+	sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, parse_freq);
+
+	sfi_processor_get_performance_states(pr);
+
+	mutex_unlock(&performance_mutex);
+	return 0;
+}
+
+void sfi_processor_unregister_performance(struct sfi_processor_performance
+				      *performance, unsigned int cpu)
+{
+	struct sfi_processor *pr;
+
+
+	mutex_lock(&performance_mutex);
+
+	pr = per_cpu(sfi_processors, cpu);
+	if (!pr) {
+		mutex_unlock(&performance_mutex);
+		return;
+	}
+
+	if (pr->performance)
+		kfree(pr->performance->states);
+	pr->performance = NULL;
+
+	mutex_unlock(&performance_mutex);
+
+	return;
+}
+
+static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
+{
+	int i;
+	struct sfi_processor_performance *perf;
+	u32 sfi_ctrl;
+
+	msr &= INTEL_MSR_BUSRATIO_MASK;
+	perf = data->sfi_data;
+
+	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		sfi_ctrl = perf->states[data->freq_table[i].index].control
+			& INTEL_MSR_BUSRATIO_MASK;
+		if (sfi_ctrl == msr)
+			return data->freq_table[i].frequency;
+	}
+	return data->freq_table[0].frequency;
+}
+
+/* Called via smp_call_function_many(), on the target CPUs */
+static void do_drv_write(void *_cmd)
+{
+	struct drv_cmd *cmd = _cmd;
+	u32 lo, hi;
+
+	rdmsr(cmd->msr, lo, hi);
+	lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
+	wrmsr(cmd->msr, lo, hi);
+}
+
+static void drv_write(struct drv_cmd *cmd)
+{
+	int this_cpu;
+
+	this_cpu = get_cpu();
+	if (cpumask_test_cpu(this_cpu, cmd->mask))
+		do_drv_write(cmd);
+	smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
+	put_cpu();
+}
+
+static u32 get_cur_val(const struct cpumask *mask)
+{
+	u32 val, dummy;
+
+	if (unlikely(cpumask_empty(mask)))
+		return 0;
+
+	rdmsr_on_cpu(cpumask_any(mask), MSR_IA32_PERF_STATUS, &val, &dummy);
+
+	return val;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
+	unsigned int freq;
+	unsigned int cached_freq;
+
+	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+
+	if (unlikely(data == NULL ||
+		data->sfi_data == NULL || data->freq_table == NULL)) {
+		return 0;
+	}
+
+	cached_freq = data->freq_table[data->sfi_data->state].frequency;
+	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+	if (freq != cached_freq) {
+		/*
+		 * The dreaded BIOS frequency change behind our back.
+		 * Force set the frequency on next target call.
+		 */
+		data->resume = 1;
+	}
+
+	pr_debug("cur freq = %u\n", freq);
+
+	return freq;
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy,
+			       unsigned int target_freq, unsigned int relation)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+	struct sfi_processor_performance *perf;
+	struct cpufreq_freqs freqs;
+	unsigned int next_state = 0; /* Index into freq_table */
+	unsigned int next_perf_state = 0; /* Index into perf table */
+	int result = 0;
+	struct drv_cmd cmd;
+
+
+	pr_debug("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
+
+	if (unlikely(data == NULL ||
+	     data->sfi_data == NULL || data->freq_table == NULL)) {
+		return -ENODEV;
+	}
+
+	perf = data->sfi_data;
+	result = cpufreq_frequency_table_target(policy,
+						data->freq_table,
+						target_freq,
+						relation, &next_state);
+	if (unlikely(result))
+		return -ENODEV;
+
+	next_perf_state = data->freq_table[next_state].index;
+	if (perf->state == next_perf_state) {
+		if (unlikely(data->resume)) {
+			pr_debug("Called after resume, resetting to P%d\n",
+				next_perf_state);
+			data->resume = 0;
+		} else {
+			pr_debug("Already at target state (P%d)\n",
+				next_perf_state);
+			return 0;
+		}
+	}
+
+	cmd.msr = MSR_IA32_PERF_CTL;
+	cmd.val = (u32) perf->states[next_perf_state].control;
+	cmd.mask = policy->cpus;
+
+	freqs.old = perf->states[perf->state].core_frequency * 1000;
+	freqs.new = data->freq_table[next_state].frequency;
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+	drv_write(&cmd);
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	perf->state = next_perf_state;
+
+	return result;
+}
+
+static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+	pr_debug("sfi_cpufreq_verify\n");
+
+	return cpufreq_frequency_table_verify(policy, data->freq_table);
+}
+
+/*
+ * sfi_cpufreq_early_init - initialize SFI P-States library
+ *
+ * Initialize the SFI P-States library (drivers/sfi/processor_perflib.c)
+ * in order to cope with the correct frequency and voltage pairings.
+ */
+static int __init sfi_cpufreq_early_init(void)
+{
+	sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
+	if (!sfi_perf_data) {
+		pr_debug("Memory allocation error for sfi_perf_data.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	unsigned int i;
+	unsigned int freq;
+	unsigned int cpufreqidx = 0;
+	unsigned int valid_states = 0;
+	unsigned int cpu = policy->cpu;
+	struct sfi_cpufreq_data *data;
+	unsigned int result = 0;
+	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
+	struct sfi_processor_performance *perf;
+	struct cpumask sibling_mask;
+
+	pr_debug("sfi_cpufreq_cpu_init CPU:%d\n", policy->cpu);
+
+	data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
+	per_cpu(drv_data, cpu) = data;
+
+	sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+
+	result = sfi_processor_register_performance(data->sfi_data, cpu);
+	if (result)
+		goto err_free;
+
+	perf = data->sfi_data;
+	policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+
+	get_cpu_sibling_mask(cpu, &sibling_mask);
+	cpumask_copy(policy->cpus, &sibling_mask);
+	cpumask_set_cpu(policy->cpu, policy->related_cpus);
+
+	/* capability check */
+	if (perf->state_count <= 1) {
+		pr_debug("No P-States\n");
+		result = -ENODEV;
+		goto err_unreg;
+	}
+
+	data->freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+		    (perf->state_count+1), GFP_KERNEL);
+	if (!data->freq_table) {
+		result = -ENOMEM;
+		goto err_unreg;
+	}
+
+	/* detect transition latency */
+	policy->cpuinfo.transition_latency = 0;
+	for (i = 0; i < perf->state_count; i++) {
+		if ((perf->states[i].transition_latency * 1000) >
+		    policy->cpuinfo.transition_latency)
+			policy->cpuinfo.transition_latency =
+			    perf->states[i].transition_latency * 1000;
+	}
+
+	data->max_freq = perf->states[0].core_frequency * 1000;
+	/* table init */
+	for (i = 0; i < perf->state_count; i++) {
+		if (i > 0 && perf->states[i].core_frequency >=
+		    data->freq_table[valid_states-1].frequency / 1000)
+			continue;
+
+		data->freq_table[valid_states].index = i;
+		data->freq_table[valid_states].frequency =
+		    perf->states[i].core_frequency * 1000;
+		valid_states++;
+	}
+	cpufreqidx = valid_states - 1;
+	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+	perf->state = 0;
+
+	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+	if (result)
+		goto err_freqfree;
+
+	policy->cur = get_cur_freq_on_cpu(cpu);
+
+
+	/* Check for APERF/MPERF support in hardware */
+	if (cpu_has(c, X86_FEATURE_APERFMPERF))
+		sfi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
+
+	pr_debug("CPU%u - SFI performance management activated.\n", cpu);
+	for (i = 0; i < perf->state_count; i++)
+		pr_debug("     %cP%d: %d MHz, %d uS\n",
+			(i == perf->state ? '*' : ' '), i,
+			(u32) perf->states[i].core_frequency,
+			(u32) perf->states[i].transition_latency);
+
+	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+
+	/*
+	 * the first call to ->target() should result in us actually
+	 * writing something to the appropriate registers.
+	 */
+	data->resume = 1;
+
+	/**
+	 * Capping the cpu frequency to LFM during boot, if battery is detected
+	 * as critically low.
+	 */
+	if (battlow) {
+		freq = data->freq_table[cpufreqidx].frequency;
+		if (freq != CPUFREQ_ENTRY_INVALID) {
+			pr_info("CPU%u freq is capping to %uKHz\n", cpu, freq);
+			policy->max = freq;
+		} else {
+			pr_err("CPU%u table entry %u is invalid.\n",
+					cpu, cpufreqidx);
+			goto err_freqfree;
+		}
+	}
+
+	return result;
+
+err_freqfree:
+	kfree(data->freq_table);
+err_unreg:
+	sfi_processor_unregister_performance(perf, cpu);
+err_free:
+	kfree(data);
+	per_cpu(drv_data, cpu) = NULL;
+
+	return result;
+}
+
+static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+	pr_debug("sfi_cpufreq_cpu_exit\n");
+
+	if (data) {
+		cpufreq_frequency_table_put_attr(policy->cpu);
+		per_cpu(drv_data, policy->cpu) = NULL;
+		sfi_processor_unregister_performance(data->sfi_data,
+							policy->cpu);
+		kfree(data->freq_table);
+		kfree(data);
+	}
+
+	return 0;
+}
+
+static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+	pr_debug("sfi_cpufreq_resume\n");
+
+	data->resume = 1;
+
+	return 0;
+}
+
+static struct freq_attr *sfi_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+	.get = get_cur_freq_on_cpu,
+	.verify = sfi_cpufreq_verify,
+	.target = sfi_cpufreq_target,
+	.init = sfi_cpufreq_cpu_init,
+	.exit = sfi_cpufreq_cpu_exit,
+	.resume = sfi_cpufreq_resume,
+	.name = "sfi-cpufreq",
+	.owner = THIS_MODULE,
+	.attr = sfi_cpufreq_attr,
+};
+
+/**
+ * set_battlow_status - enables "battlow" to cap the max scaling cpu frequency.
+ */
+static int __init set_battlow_status(char *unused)
+{
+	pr_notice("Low Battery detected! Frequency shall be capped.\n");
+	battlow = true;
+	return 0;
+}
+/* Checking "battlow" param on boot, whether battery is critically low or not */
+early_param("battlow", set_battlow_status);
+
+static int __init parse_cpus(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_cpu_table_entry *pentry;
+	int i;
+
+	sb = (struct sfi_table_simple *)table;
+
+	sfi_cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry);
+
+	pentry = (struct sfi_cpu_table_entry *) sb->pentry;
+	for (i = 0; i < sfi_cpu_num; i++) {
+		sfi_cpu_array[i].apic_id = pentry->apic_id;
+		printk(KERN_INFO "APIC ID: %d\n", pentry->apic_id);
+		pentry++;
+	}
+
+	return 0;
+
+}
+
+
+static int __init init_sfi_processor_list(void)
+{
+	struct sfi_processor *pr;
+	int i;
+	int result;
+
+	/* parse the cpus from the sfi table */
+	result = sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, parse_cpus);
+
+	if (result < 0)
+		return result;
+
+	pr = kzalloc(sfi_cpu_num * sizeof(struct sfi_processor), GFP_KERNEL);
+	if (!pr)
+		return -ENOMEM;
+
+	for (i = 0; i < sfi_cpu_num; i++) {
+		pr->id = sfi_cpu_array[i].apic_id;
+		per_cpu(sfi_processors, i) = pr;
+		pr++;
+	}
+
+	return 0;
+}
+
+static int __init sfi_cpufreq_init(void)
+{
+	int ret;
+
+	pr_debug("sfi_cpufreq_init\n");
+
+	ret = init_sfi_processor_list();
+	if (ret)
+		return ret;
+
+	ret = sfi_cpufreq_early_init();
+	if (ret)
+		return ret;
+
+	return cpufreq_register_driver(&sfi_cpufreq_driver);
+}
+
+static void __exit sfi_cpufreq_exit(void)
+{
+
+	struct sfi_processor *pr;
+
+	pr_debug("sfi_cpufreq_exit\n");
+
+	pr = per_cpu(sfi_processors, 0);
+	kfree(pr);
+
+	cpufreq_unregister_driver(&sfi_cpufreq_driver);
+
+	free_percpu(sfi_perf_data);
+
+	return;
+}
+late_initcall(sfi_cpufreq_init);
+module_exit(sfi_cpufreq_exit);
+
+
+unsigned int turbo_enable  __read_mostly = 1; /* default enable */
+int set_turbo_feature(const char *val, struct kernel_param *kp)
+{
+	int i, nc;
+	u32 lo, hi;
+	int rv = param_set_int(val, kp);
+
+	if (rv)
+		return rv;
+
+	/* enable/disable Turbo */
+	nc = num_possible_cpus();
+	if (boot_cpu_data.x86_model == X86_ATOM_ARCH_SLM) {
+		for (i = 0; i < nc; i++) {
+			rdmsr_on_cpu(i, MSR_IA32_MISC_ENABLE, &lo, &hi);
+			if (turbo_enable)
+				hi = hi &
+				 (~(MSR_IA32_MISC_ENABLE_TURBO_DISABLE >> 32));
+			else
+				hi = hi |
+				(MSR_IA32_MISC_ENABLE_TURBO_DISABLE >> 32);
+			wrmsr_on_cpu(i, MSR_IA32_MISC_ENABLE, lo, hi);
+		}
+	}
+	return 0;
+}
+MODULE_PARM_DESC(turbo_enable, "to enable/disable turbo feature(1:Enable; 0:Disable)");
+module_param_call(turbo_enable, set_turbo_feature, param_get_uint,
+		  &turbo_enable, S_IRUGO | S_IWUSR);
+
+MODULE_ALIAS("sfi");
diff --git a/drivers/cpufreq/sfi-cpufreq.h b/drivers/cpufreq/sfi-cpufreq.h
new file mode 100644
index 0000000..7e01c1e
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.h
@@ -0,0 +1,65 @@
+/*
+ * sfi_processor.h
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __SFI_PROCESSOR_H__
+#define __SFI_PROCESSOR_H__
+
+#include <linux/sfi.h>
+#include <linux/cpuidle.h>
+
+struct sfi_processor_power {
+	struct cpuidle_device dev;
+	u32 default_state;
+	int count;
+	struct cpuidle_state *states;
+	struct sfi_cstate_table_entry *sfi_cstates;
+};
+
+struct sfi_processor_flags {
+	u8 valid;
+	u8 power;
+};
+
+struct sfi_processor {
+	u32 id;
+	struct sfi_processor_flags flags;
+	struct sfi_processor_power power;
+	struct sfi_processor_performance *performance;
+};
+
+/* Performance management */
+struct sfi_processor_px {
+	u32 core_frequency;	/* megahertz */
+	u32 transition_latency;	/* microseconds */
+	u32 control;	/* control value */
+};
+
+struct sfi_processor_performance {
+	unsigned int state;
+	unsigned int state_count;
+	struct sfi_processor_px *states;
+};
+
+/* for communication between multiple parts of the processor kernel module */
+DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
+
+int sfi_processor_power_init(struct sfi_processor *pr);
+int sfi_processor_power_exit(struct sfi_processor *pr);
+
+#endif /*__SFI_PROCESSOR_H__*/
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 2a297f8..fe853903 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -106,6 +106,7 @@
 	cpumask_t coupled_cpus;
 	int requested_state[NR_CPUS];
 	atomic_t ready_waiting_counts;
+	atomic_t abort_barrier;
 	int online_count;
 	int refcnt;
 	int prevent;
@@ -122,12 +123,19 @@
 static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
 
 /*
- * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * The cpuidle_coupled_poke_pending mask is used to avoid calling
  * __smp_call_function_single with the per cpu call_single_data struct already
  * in use.  This prevents a deadlock where two cpus are waiting for each others
  * call_single_data struct to be available
  */
-static cpumask_t cpuidle_coupled_poked_mask;
+static cpumask_t cpuidle_coupled_poke_pending;
+
+/*
+ * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
+ * once to minimize entering the ready loop with a poke pending, which would
+ * require aborting and retrying.
+ */
+static cpumask_t cpuidle_coupled_poked;
 
 /**
  * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
@@ -291,10 +299,11 @@
 	return state;
 }
 
-static void cpuidle_coupled_poked(void *info)
+static void cpuidle_coupled_handle_poke(void *info)
 {
 	int cpu = (unsigned long)info;
-	cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+	cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
+	cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
 }
 
 /**
@@ -313,7 +322,7 @@
 {
 	struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
 
-	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
 		__smp_call_function_single(cpu, csd, 0);
 }
 
@@ -340,30 +349,19 @@
  * @coupled: the struct coupled that contains the current cpu
  * @next_state: the index in drv->states of the requested state for this cpu
  *
- * Updates the requested idle state for the specified cpuidle device,
- * poking all coupled cpus out of idle if necessary to let them see the new
- * state.
+ * Updates the requested idle state for the specified cpuidle device.
+ * Returns the number of waiting cpus.
  */
-static void cpuidle_coupled_set_waiting(int cpu,
+static int cpuidle_coupled_set_waiting(int cpu,
 		struct cpuidle_coupled *coupled, int next_state)
 {
-	int w;
-
 	coupled->requested_state[cpu] = next_state;
 
 	/*
-	 * If this is the last cpu to enter the waiting state, poke
-	 * all the other cpus out of their waiting state so they can
-	 * enter a deeper state.  This can race with one of the cpus
-	 * exiting the waiting state due to an interrupt and
-	 * decrementing waiting_count, see comment below.
-	 *
 	 * The atomic_inc_return provides a write barrier to order the write
 	 * to requested_state with the later write that increments ready_count.
 	 */
-	w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
-	if (w == coupled->online_count)
-		cpuidle_coupled_poke_others(cpu, coupled);
+	return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
 }
 
 /**
@@ -410,19 +408,33 @@
  * been processed and the poke bit has been cleared.
  *
  * Other interrupts may also be processed while interrupts are enabled, so
- * need_resched() must be tested after turning interrupts off again to make sure
+ * need_resched() must be tested after this function returns to make sure
  * the interrupt didn't schedule work that should take the cpu out of idle.
  *
- * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ * Returns 0 if no poke was pending, 1 if a poke was cleared.
  */
 static int cpuidle_coupled_clear_pokes(int cpu)
 {
+	if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
+		return 0;
+
 	local_irq_enable();
-	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
 		cpu_relax();
 	local_irq_disable();
 
-	return need_resched() ? -EINTR : 0;
+	return 1;
+}
+
+static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
+{
+	cpumask_t cpus;
+	int ret;
+
+	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+	ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
+
+	return ret;
 }
 
 /**
@@ -449,12 +461,14 @@
 {
 	int entered_state = -1;
 	struct cpuidle_coupled *coupled = dev->coupled;
+	int w;
 
 	if (!coupled)
 		return -EINVAL;
 
 	while (coupled->prevent) {
-		if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+		cpuidle_coupled_clear_pokes(dev->cpu);
+		if (need_resched()) {
 			local_irq_enable();
 			return entered_state;
 		}
@@ -465,15 +479,37 @@
 	/* Read barrier ensures online_count is read after prevent is cleared */
 	smp_rmb();
 
-	cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+reset:
+	cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
+
+	w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+	/*
+	 * If this is the last cpu to enter the waiting state, poke
+	 * all the other cpus out of their waiting state so they can
+	 * enter a deeper state.  This can race with one of the cpus
+	 * exiting the waiting state due to an interrupt and
+	 * decrementing waiting_count, see comment below.
+	 */
+	if (w == coupled->online_count) {
+		cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
+		cpuidle_coupled_poke_others(dev->cpu, coupled);
+	}
 
 retry:
 	/*
 	 * Wait for all coupled cpus to be idle, using the deepest state
-	 * allowed for a single cpu.
+	 * allowed for a single cpu.  If this was not the poking cpu, wait
+	 * for at least one poke before leaving to avoid a race where
+	 * two cpus could arrive at the waiting loop at the same time,
+	 * but the first of the two to arrive could skip the loop without
+	 * processing the pokes from the last to arrive.
 	 */
-	while (!cpuidle_coupled_cpus_waiting(coupled)) {
-		if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+	while (!cpuidle_coupled_cpus_waiting(coupled) ||
+			!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
+		if (cpuidle_coupled_clear_pokes(dev->cpu))
+			continue;
+
+		if (need_resched()) {
 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
 			goto out;
 		}
@@ -487,12 +523,19 @@
 			dev->safe_state_index);
 	}
 
-	if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+	cpuidle_coupled_clear_pokes(dev->cpu);
+	if (need_resched()) {
 		cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
 		goto out;
 	}
 
 	/*
+	 * Make sure final poke status for this cpu is visible before setting
+	 * cpu as ready.
+	 */
+	smp_wmb();
+
+	/*
 	 * All coupled cpus are probably idle.  There is a small chance that
 	 * one of the other cpus just became active.  Increment the ready count,
 	 * and spin until all coupled cpus have incremented the counter. Once a
@@ -511,6 +554,28 @@
 		cpu_relax();
 	}
 
+	/*
+	 * Make sure read of all cpus ready is done before reading pending pokes
+	 */
+	smp_rmb();
+
+	/*
+	 * There is a small chance that a cpu left and reentered idle after this
+	 * cpu saw that all cpus were waiting.  The cpu that reentered idle will
+	 * have sent this cpu a poke, which will still be pending after the
+	 * ready loop.  The pending interrupt may be lost by the interrupt
+	 * controller when entering the deep idle state.  It's not possible to
+	 * clear a pending interrupt without turning interrupts on and handling
+	 * it, and it's too late to turn on interrupts here, so reset the
+	 * coupled idle state of all cpus and retry.
+	 */
+	if (cpuidle_coupled_any_pokes_pending(coupled)) {
+		cpuidle_coupled_set_done(dev->cpu, coupled);
+		/* Wait for all cpus to see the pending pokes */
+		cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
+		goto reset;
+	}
+
 	/* all cpus have acked the coupled state */
 	next_state = cpuidle_coupled_get_state(dev, coupled);
 
@@ -596,7 +661,7 @@
 	coupled->refcnt++;
 
 	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
-	csd->func = cpuidle_coupled_poked;
+	csd->func = cpuidle_coupled_handle_poke;
 	csd->info = (void *)(unsigned long)dev->cpu;
 
 	return 0;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c42a8a1..14ce6ab 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -20,6 +20,10 @@
 #include <linux/sched.h>
 #include <linux/math64.h>
 #include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
 
 #define BUCKETS 12
 #define INTERVALS 8
@@ -31,10 +35,23 @@
 /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
 #define MAX_DEVIATION 60
 
+#ifdef CONFIG_PM_DEBUG
+#define IDLE_HIST_BUCKET_WIDTH 10
+
+struct idle_hist {
+	u32 no_of_buckets;
+	u32 max_range;
+	u32 bucket_width;
+	u32 *buckets;
+	bool active;
+};
+static DEFINE_PER_CPU(struct idle_hist *, idle_hists);
+#endif
+
 static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
 static DEFINE_PER_CPU(int, hrtimer_status);
 /* menu hrtimer mode */
-enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
+enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT};
 
 /*
  * Concepts and ideas behind the menu governor
@@ -116,13 +133,6 @@
  *
  */
 
-/*
- * The C-state residency is so long that is is worthwhile to exit
- * from the shallow C-state and re-enter into a deeper C-state.
- */
-static unsigned int perfect_cstate_ms __read_mostly = 30;
-module_param(perfect_cstate_ms, uint, 0000);
-
 struct menu_device {
 	int		last_state_idx;
 	int             needs_update;
@@ -140,6 +150,15 @@
 #define LOAD_INT(x) ((x) >> FSHIFT)
 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
 
+/*
+ * Define a variable per CPU in order to indicate when to
+ * update the buckets or not. The buckets need to be updated
+ * only when the wakeup is destinated to the CPU otherwise
+ * consider a perfect prediction for the buckets.
+ */
+DEFINE_PER_CPU(int, update_buckets);
+
+/* not used anymore in performance_multiplier - keep it for reference
 static int get_loadavg(void)
 {
 	unsigned long this = this_cpu_load();
@@ -147,6 +166,7 @@
 
 	return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
 }
+*/
 
 static inline int which_bucket(unsigned int duration)
 {
@@ -228,16 +248,6 @@
 static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
 {
 	int cpu = smp_processor_id();
-	struct menu_device *data = &per_cpu(menu_devices, cpu);
-
-	/* In general case, the expected residency is much larger than
-	 *  deepest C-state target residency, but prediction logic still
-	 *  predicts a small predicted residency, so the prediction
-	 *  history is totally broken if the timer is triggered.
-	 *  So reset the correction factor.
-	 */
-	if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
-		data->correction_factor[data->bucket] = RESOLUTION * DECAY;
 
 	per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
 
@@ -323,6 +333,9 @@
 	int repeat = 0, low_predicted = 0;
 	int cpu = smp_processor_id();
 	struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
+#ifdef CONFIG_PM_DEBUG
+	struct idle_hist *idle_hist = per_cpu(idle_hists, cpu);
+#endif
 
 	if (data->needs_update) {
 		menu_update(drv, dev);
@@ -357,6 +370,22 @@
 	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
 					 RESOLUTION * DECAY);
 
+#ifdef CONFIG_PM_DEBUG
+	/* Collect the idleness histogram data if it is activated */
+	if (idle_hist && idle_hist->active && idle_hist->buckets &&
+						idle_hist->bucket_width) {
+		u32 bucket = (u32)div_round64(data->predicted_us,
+					idle_hist->bucket_width);
+
+		/* Last bucket is used to collect the frequency of idleness
+		 * longer than target_residency of deepest C-state.
+		 */
+		if (bucket > idle_hist->no_of_buckets)
+			bucket = idle_hist->no_of_buckets;
+		idle_hist->buckets[bucket] += 1;
+	}
+#endif
+
 	repeat = get_typical_interval(data);
 
 	/*
@@ -394,7 +423,6 @@
 	/* not deepest C-state chosen for low predicted residency */
 	if (low_predicted) {
 		unsigned int timer_us = 0;
-		unsigned int perfect_us = 0;
 
 		/*
 		 * Set a timer to detect whether this sleep is much
@@ -405,28 +433,13 @@
 		 */
 		timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
 
-		perfect_us = perfect_cstate_ms * 1000;
-
 		if (repeat && (4 * timer_us < data->expected_us)) {
 			RCU_NONIDLE(hrtimer_start(hrtmr,
 				ns_to_ktime(1000 * timer_us),
 				HRTIMER_MODE_REL_PINNED));
 			/* In repeat case, menu hrtimer is started */
 			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
-		} else if (perfect_us < data->expected_us) {
-			/*
-			 * The next timer is long. This could be because
-			 * we did not make a useful prediction.
-			 * In that case, it makes sense to re-enter
-			 * into a deeper C-state after some time.
-			 */
-			RCU_NONIDLE(hrtimer_start(hrtmr,
-				ns_to_ktime(1000 * timer_us),
-				HRTIMER_MODE_REL_PINNED));
-			/* In general case, menu hrtimer is started */
-			per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
 		}
-
 	}
 
 	return data->last_state_idx;
@@ -486,7 +499,9 @@
 	new_factor = data->correction_factor[data->bucket]
 			* (DECAY - 1) / DECAY;
 
-	if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
+	/* if its a fake wakeup just consider it has perfect wakeup */
+	if ((__get_cpu_var(update_buckets)) &&
+		(data->expected_us > 0 && measured_us < MAX_INTERESTING))
 		new_factor += RESOLUTION * measured_us / data->expected_us;
 	else
 		/*
@@ -505,9 +520,15 @@
 	data->correction_factor[data->bucket] = new_factor;
 
 	/* update the repeating-pattern data */
-	data->intervals[data->interval_ptr++] = last_idle_us;
+	if (__get_cpu_var(update_buckets))
+		data->intervals[data->interval_ptr++] = last_idle_us;
+	else
+		data->intervals[data->interval_ptr++] = data->expected_us;
+
 	if (data->interval_ptr >= INTERVALS)
 		data->interval_ptr = 0;
+
+	__get_cpu_var(update_buckets) = 1;
 }
 
 /**
@@ -520,11 +541,38 @@
 {
 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
 	struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
+#ifdef CONFIG_PM_DEBUG
+	struct idle_hist *idle_hist = kzalloc(sizeof(struct idle_hist),
+							GFP_KERNEL);
+#endif
+
 	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	t->function = menu_hrtimer_notify;
 
 	memset(data, 0, sizeof(struct menu_device));
 
+#ifdef CONFIG_PM_DEBUG
+	if (!idle_hist) {
+		pr_warn("Failed to allocate memory for idle_hist\n");
+		return 0;
+	}
+
+	/* Max range is the target_residency of deepest C-state */
+	idle_hist->max_range =
+		drv->states[drv->state_count - 1].target_residency;
+	idle_hist->bucket_width = IDLE_HIST_BUCKET_WIDTH;
+	idle_hist->no_of_buckets =
+		idle_hist->max_range / idle_hist->bucket_width;
+	idle_hist->buckets = kzalloc(
+		(idle_hist->no_of_buckets + 1) * sizeof(u32), GFP_KERNEL);
+	if (!idle_hist->buckets)
+		pr_warn("Failed to allocate memory for idle_hist buckets\n");
+	pr_info("cpu %d: max_range = %u, #buckets = %d, width = %d\n",
+		dev->cpu, idle_hist->max_range, idle_hist->no_of_buckets,
+		idle_hist->bucket_width);
+
+	per_cpu(idle_hists, dev->cpu) = idle_hist;
+#endif
 	return 0;
 }
 
@@ -537,11 +585,126 @@
 	.owner =	THIS_MODULE,
 };
 
+#ifdef CONFIG_PM_DEBUG
+static int idle_hist_show(struct seq_file *s, void *unused)
+{
+	int cpu, i, max_no_of_buckets = 0;
+	struct idle_hist *idle_hist = NULL;
+
+	for_each_online_cpu(cpu) {
+		idle_hist = per_cpu(idle_hists, cpu);
+		if (idle_hist && idle_hist->no_of_buckets > max_no_of_buckets)
+			max_no_of_buckets = idle_hist->no_of_buckets;
+		seq_printf(s, "\tCPU%d", cpu);
+	}
+
+	if (unlikely(idle_hist == NULL))
+		return 0;
+
+	seq_puts(s, "\n");
+	for (i = 0; i <= max_no_of_buckets; i++) {
+		seq_printf(s, "%d", i * idle_hist->bucket_width);
+		for_each_online_cpu(cpu) {
+			u32 freq = -1; /* -1 indicates invalid frquency */
+			idle_hist = per_cpu(idle_hists, cpu);
+			if (idle_hist && idle_hist->buckets &&
+				i <= idle_hist->no_of_buckets)
+				freq = idle_hist->buckets[i];
+			seq_printf(s, "\t%d", freq);
+		}
+		seq_puts(s, "\n");
+	}
+	return 0;
+}
+
+static ssize_t idle_hist_write(struct file *file,
+		const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	struct idle_hist *idle_hist;
+	int buf_size = min(count, sizeof(buf)-1);
+	int bucket_width = 0, no_of_buckets;
+	char *start_msg = "start";
+	char *stop_msg = "stop";
+	u32 *temp;
+	int cpu;
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	if (buf[0] != 's') {	/* If it is not 'start' or 'stop' most probably
+				 * it is a update of bucket_width
+				 */
+		if (sscanf(buf, "%u", &bucket_width) != 1)
+			return -EFAULT;
+
+		if (bucket_width <= 0)
+			return -EFAULT;
+
+		/* update bucket width */
+		for_each_online_cpu(cpu) {
+			idle_hist = per_cpu(idle_hists, cpu);
+			if (bucket_width == idle_hist->bucket_width)
+					/* No need to update */
+				continue;
+
+			idle_hist->active = false;
+			no_of_buckets =	idle_hist->max_range / bucket_width;
+			temp = krealloc(idle_hist->buckets,
+				(no_of_buckets + 1) * sizeof(u32), GFP_KERNEL);
+			if (!temp) {
+				pr_warn("Failed to update bucket_width\n");
+				continue;
+			}
+			memset(temp, 0, (no_of_buckets + 1) * sizeof(u32));
+			idle_hist->buckets = temp;
+			idle_hist->bucket_width = bucket_width;
+			idle_hist->no_of_buckets = no_of_buckets;
+		}
+	} else if (!strncmp(buf, start_msg, strlen(start_msg))) {
+		/* start data collecting */
+		for_each_online_cpu(cpu) {
+			idle_hist = per_cpu(idle_hists, cpu);
+			memset(idle_hist->buckets, 0,
+				(idle_hist->no_of_buckets + 1) * sizeof(u32));
+			idle_hist->active = true;
+		}
+	} else if (!strncmp(buf, stop_msg, strlen(stop_msg))) {
+		/* stop data collecting */
+		for_each_online_cpu(cpu) {
+			idle_hist = per_cpu(idle_hists, cpu);
+			idle_hist->active = false;
+		}
+	}
+
+	return count;
+}
+
+static int idle_hist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, idle_hist_show, NULL);
+}
+
+static const struct file_operations idle_hist_ops = {
+	.open           = idle_hist_open,
+	.read           = seq_read,
+	.write		= idle_hist_write,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+#endif
+
 /**
  * init_menu - initializes the governor
  */
 static int __init init_menu(void)
 {
+#ifdef CONFIG_PM_DEBUG
+	struct dentry *d3 = debugfs_create_file("idle_hist", S_IFREG | S_IRUGO,
+				NULL, NULL, &idle_hist_ops);
+	if (!d3)
+		pr_warn("idle_hist: Failed to create debugfs for idle_hist\n");
+#endif
 	return cpuidle_register_governor(&menu_governor);
 }
 
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5996521..84573b4 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -429,7 +429,7 @@
 	dma_addr_t src_dma, dst_dma;
 	int ret = 0;
 
-	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
 	if (!desc) {
 		dev_err(jrdev, "unable to allocate key input memory\n");
 		return -ENOMEM;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3b36797..b0d9112 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -731,6 +731,12 @@
 out:
 	mutex_unlock(&devfreq_list_lock);
 
+	if (!ret) {
+		mutex_lock(&df->lock);
+		ret = update_devfreq(df);
+		mutex_unlock(&df->lock);
+	}
+
 	if (!ret)
 		ret = count;
 	return ret;
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df5..4b82961 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,7 +7,7 @@
 obj-$(CONFIG_DMA_OF) += of-dma.o
 
 obj-$(CONFIG_NET_DMA) += iovlock.o
-obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
+obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o intel_mid_dma_acpi.o
 obj-$(CONFIG_DMATEST) += dmatest.o
 obj-$(CONFIG_INTEL_IOATDMA) += ioat/
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f285833..617d170 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -414,17 +414,18 @@
 	struct imxdma_engine *imxdma = imxdmac->imxdma;
 	int chno = imxdmac->channel;
 	struct imxdma_desc *desc;
+	unsigned long flags;
 
-	spin_lock(&imxdma->lock);
+	spin_lock_irqsave(&imxdma->lock, flags);
 	if (list_empty(&imxdmac->ld_active)) {
-		spin_unlock(&imxdma->lock);
+		spin_unlock_irqrestore(&imxdma->lock, flags);
 		goto out;
 	}
 
 	desc = list_first_entry(&imxdmac->ld_active,
 				struct imxdma_desc,
 				node);
-	spin_unlock(&imxdma->lock);
+	spin_unlock_irqrestore(&imxdma->lock, flags);
 
 	if (desc->sg) {
 		u32 tmp;
@@ -496,7 +497,6 @@
 {
 	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 	struct imxdma_engine *imxdma = imxdmac->imxdma;
-	unsigned long flags;
 	int slot = -1;
 	int i;
 
@@ -504,7 +504,6 @@
 	switch (d->type) {
 	case IMXDMA_DESC_INTERLEAVED:
 		/* Try to get a free 2D slot */
-		spin_lock_irqsave(&imxdma->lock, flags);
 		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
 			if ((imxdma->slots_2d[i].count > 0) &&
 			((imxdma->slots_2d[i].xsr != d->x) ||
@@ -514,10 +513,8 @@
 			slot = i;
 			break;
 		}
-		if (slot < 0) {
-			spin_unlock_irqrestore(&imxdma->lock, flags);
+		if (slot < 0)
 			return -EBUSY;
-		}
 
 		imxdma->slots_2d[slot].xsr = d->x;
 		imxdma->slots_2d[slot].ysr = d->y;
@@ -526,7 +523,6 @@
 
 		imxdmac->slot_2d = slot;
 		imxdmac->enabled_2d = true;
-		spin_unlock_irqrestore(&imxdma->lock, flags);
 
 		if (slot == IMX_DMA_2D_SLOT_A) {
 			d->config_mem &= ~CCR_MSEL_B;
@@ -602,18 +598,17 @@
 	struct imxdma_channel *imxdmac = (void *)data;
 	struct imxdma_engine *imxdma = imxdmac->imxdma;
 	struct imxdma_desc *desc;
+	unsigned long flags;
 
-	spin_lock(&imxdma->lock);
+	spin_lock_irqsave(&imxdma->lock, flags);
 
 	if (list_empty(&imxdmac->ld_active)) {
 		/* Someone might have called terminate all */
-		goto out;
+		spin_unlock_irqrestore(&imxdma->lock, flags);
+		return;
 	}
 	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
 
-	if (desc->desc.callback)
-		desc->desc.callback(desc->desc.callback_param);
-
 	/* If we are dealing with a cyclic descriptor, keep it on ld_active
 	 * and dont mark the descriptor as complete.
 	 * Only in non-cyclic cases it would be marked as complete
@@ -640,7 +635,11 @@
 				 __func__, imxdmac->channel);
 	}
 out:
-	spin_unlock(&imxdma->lock);
+	spin_unlock_irqrestore(&imxdma->lock, flags);
+
+	if (desc->desc.callback)
+		desc->desc.callback(desc->desc.callback_param);
+
 }
 
 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -862,7 +861,7 @@
 	kfree(imxdmac->sg_list);
 
 	imxdmac->sg_list = kcalloc(periods + 1,
-			sizeof(struct scatterlist), GFP_KERNEL);
+			sizeof(struct scatterlist), GFP_ATOMIC);
 	if (!imxdmac->sg_list)
 		return NULL;
 
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a0de82e..7f97339 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -28,34 +28,35 @@
 #include <linux/pm_runtime.h>
 #include <linux/intel_mid_dma.h>
 #include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
 
 #include "dmaengine.h"
 
-#define MAX_CHAN	4 /*max ch across controllers*/
 #include "intel_mid_dma_regs.h"
 
 #define INTEL_MID_DMAC1_ID		0x0814
 #define INTEL_MID_DMAC2_ID		0x0813
 #define INTEL_MID_GP_DMAC2_ID		0x0827
-#define INTEL_MFLD_DMAC1_ID		0x0830
-#define LNW_PERIPHRAL_MASK_BASE		0xFFAE8008
-#define LNW_PERIPHRAL_MASK_SIZE		0x10
-#define LNW_PERIPHRAL_STATUS		0x0
-#define LNW_PERIPHRAL_MASK		0x8
+#define INTEL_MRFLD_GP_DMAC2_ID         0x11A2
+#define INTEL_MRFLD_DMAC0_ID		0x119B
 
-struct intel_mid_dma_probe_info {
-	u8 max_chan;
-	u8 ch_base;
-	u16 block_size;
-	u32 pimr_mask;
-};
+#define LNW_PERIPHRAL_MASK_SIZE		0x20
+#define ENABLE_PARTITION_UPDATE		(BIT(26))
 
-#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
+#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask,	\
+		_pimr_base, _dword_trf, _pimr_offset, _pci_id,	\
+			_pdma_ops)				\
 	((kernel_ulong_t)&(struct intel_mid_dma_probe_info) {	\
 		.max_chan = (_max_chan),			\
 		.ch_base = (_ch_base),				\
 		.block_size = (_block_size),			\
 		.pimr_mask = (_pimr_mask),			\
+		.pimr_base = (_pimr_base),			\
+		.dword_trf = (_dword_trf),			\
+		.pimr_offset = (_pimr_offset),			\
+		.pci_id = (_pci_id),				\
+		.pdma_ops = (_pdma_ops)				\
 	})
 
 /*****************************************************************************
@@ -65,32 +66,84 @@
  * @status: status mask
  * @base: dma ch base value
  *
- * Modify the status mask and return the channel index needing
- * attention (or -1 if neither)
+ * Returns the channel index by checking the status bits.
+ * If none of the bits in status are set, then returns -1.
  */
-static int get_ch_index(int *status, unsigned int base)
+static int get_ch_index(int status, unsigned int base)
 {
 	int i;
-	for (i = 0; i < MAX_CHAN; i++) {
-		if (*status & (1 << (i + base))) {
-			*status = *status & ~(1 << (i + base));
-			pr_debug("MDMA: index %d New status %x\n", i, *status);
+	for (i = 0; i < MID_MAX_CHAN; i++) {
+		if (status & (1 << (i + base)))
 			return i;
-		}
 	}
 	return -1;
 }
 
+static void dump_dma_reg(struct dma_chan *chan)
+{
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	struct middma_device	*mid = to_middma_device(chan->device);
+
+	if (!mid->pimr_base)
+		return;
+
+	pr_debug("<<<<<<<<<<<< DMA Dump Start >>>>>>>>>>>>");
+	pr_debug("DMA Dump for Channel id:%d & Chnl Base:%p",
+					midc->ch_id, midc->ch_regs);
+	/* dump common DMA registers */
+	pr_debug("PIMR:\t%#x", readl(mid->mask_reg) - 8);
+	pr_debug("ISRX:\t%#x", readl(mid->mask_reg));
+	pr_debug("ISRD:\t%#x", readl(mid->mask_reg + 0x8));
+	pr_debug("IMRX:\t%#x", readl(mid->mask_reg + 0x10));
+	pr_debug("IMRD:\t%#x", readl(mid->mask_reg + 0x18));
+	pr_debug("DMA_CHAN_EN:\t%#x", readl(midc->dma_base + DMA_CHAN_EN));
+	pr_debug("DMA_CFG:\t%#x", readl(midc->dma_base + DMA_CFG));
+	pr_debug("INTR_STATUS:\t%#x", readl(midc->dma_base + INTR_STATUS));
+	pr_debug("MASK_TFR:\t%#x", readl(midc->dma_base + MASK_TFR));
+	pr_debug("MASK_BLOCK:\t%#x", readl(midc->dma_base + MASK_BLOCK));
+	pr_debug("MASK_ERR:\t%#x", readl(midc->dma_base + MASK_ERR));
+	pr_debug("RAW_TFR:\t%#x", readl(midc->dma_base + RAW_TFR));
+	pr_debug("RAW_BLOCK:\t%#x", readl(midc->dma_base + RAW_BLOCK));
+	pr_debug("RAW_ERR:\t%#x", readl(midc->dma_base + RAW_ERR));
+	pr_debug("STATUS_TFR:\t%#x", readl(midc->dma_base + STATUS_TFR));
+	pr_debug("STATUS_BLOCK:\t%#x", readl(midc->dma_base + STATUS_BLOCK));
+	pr_debug("STATUS_ERR:\t%#x", readl(midc->dma_base + STATUS_ERR));
+	if (!mid->dword_trf) {
+		pr_debug("FIFO_PARTITION0_LO:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION0_LO));
+		pr_debug("FIFO_PARTITION0_HI:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION0_HI));
+		pr_debug("FIFO_PARTITION1_LO:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION1_LO));
+		pr_debug("FIFO_PARTITION1_HI:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION1_HI));
+		pr_debug("CH_SAI_ERR:\t%#x", readl(midc->dma_base + CH_SAI_ERR));
+	}
+
+	/* dump channel specific registers */
+	pr_debug("SAR:\t%#x", readl(midc->ch_regs + SAR));
+	pr_debug("DAR:\t%#x", readl(midc->ch_regs + DAR));
+	pr_debug("LLP:\t%#x", readl(midc->ch_regs + LLP));
+	pr_debug("CTL_LOW:\t%#x", readl(midc->ch_regs + CTL_LOW));
+	pr_debug("CTL_HIGH:\t%#x", readl(midc->ch_regs + CTL_HIGH));
+	pr_debug("CFG_LOW:\t%#x", readl(midc->ch_regs + CFG_LOW));
+	pr_debug("CFG_HIGH:\t%#x", readl(midc->ch_regs + CFG_HIGH));
+	pr_debug("<<<<<<<<<<<< DMA Dump ends >>>>>>>>>>>>");
+}
+
 /**
  * get_block_ts	-	calculates dma transaction length
  * @len: dma transfer length
  * @tx_width: dma transfer src width
  * @block_size: dma controller max block size
+ * @dword_trf: is transfer dword size aligned and needs the data transfer to
+ *   be in terms of data items and not bytes
  *
  * Based on src width calculate the DMA trsaction length in data items
  * return data items or FFFF if exceeds max length for block
  */
-static int get_block_ts(int len, int tx_width, int block_size)
+static unsigned int get_block_ts(int len, int tx_width,
+				int block_size, int dword_trf)
 {
 	int byte_width = 0, block_ts = 0;
 
@@ -106,13 +159,46 @@
 		byte_width = 4;
 		break;
 	}
-
-	block_ts = len/byte_width;
+	if (dword_trf)
+		block_ts = len/byte_width;
+	else
+		block_ts = len;
 	if (block_ts > block_size)
 		block_ts = 0xFFFF;
 	return block_ts;
 }
 
+/**
+ * get_reg_width	-	computes the DMA sample width
+ * @kernel_width: Kernel DMA slave bus width
+ *
+ * converts the DMA kernel slave bus width in the Intel DMA
+ * bus width
+ */
+static int get_reg_width(enum dma_slave_buswidth kernel_width)
+{
+	int reg_width = -1;
+
+	switch (kernel_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		reg_width = 0;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		reg_width = 1;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		reg_width = 2;
+		break;
+	case DMA_SLAVE_BUSWIDTH_UNDEFINED:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+	default:
+		pr_err("ERR_MDMA: get_reg_width unsupported reg width\n");
+		break;
+	}
+	return reg_width;
+}
+
+
 /*****************************************************************************
 DMAC1 interrupt Functions*/
 
@@ -129,9 +215,9 @@
 	u32 pimr;
 
 	if (mid->pimr_mask) {
-		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
 		pimr |= mid->pimr_mask;
-		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
 	}
 	return;
 }
@@ -149,14 +235,37 @@
 	u32 pimr;
 	struct middma_device *mid = to_middma_device(midc->chan.device);
 
-	if (mid->pimr_mask) {
-		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+	if (mid->pimr_mask && mid->dword_trf) {
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
 		pimr &= ~mid->pimr_mask;
-		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
+	}
+	if (mid->pimr_mask && !mid->dword_trf) {
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
+		pimr &= ~(1 << (midc->ch_id + 16));
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
 	}
 	return;
 }
 
+/*
+ * Some consumer may need to know how many bytes have been
+ * really transfered for one specific dma channel
+ */
+inline dma_addr_t intel_dma_get_src_addr(struct dma_chan *chan)
+{
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	return readl(midc->ch_regs + SAR);
+}
+EXPORT_SYMBOL(intel_dma_get_src_addr);
+
+inline dma_addr_t intel_dma_get_dst_addr(struct dma_chan *chan)
+{
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	return readl(midc->ch_regs + DAR);
+}
+EXPORT_SYMBOL(intel_dma_get_dst_addr);
+
 /**
  * enable_dma_interrupt -	enable the periphral interrupt
  * @midc: dma channel for which enable interrupt is required
@@ -167,10 +276,13 @@
  */
 static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
 {
+	struct middma_device *mid = to_middma_device(midc->chan.device);
+
 	dmac1_unmask_periphral_intr(midc);
 
 	/*en ch interrupts*/
 	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+	set_bit(midc->ch_id, &mid->tfr_intr_mask);
 	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
 	return;
 }
@@ -185,10 +297,39 @@
  */
 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
 {
+	struct middma_device *mid = to_middma_device(midc->chan.device);
+	u32 pimr;
+
 	/*Check LPE PISR, make sure fwd is disabled*/
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
+	clear_bit(midc->ch_id, &mid->block_intr_mask);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+	clear_bit(midc->ch_id, &mid->tfr_intr_mask);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+	if (mid->pimr_mask && !mid->dword_trf) {
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
+		pimr |= (1 << (midc->ch_id + 16));
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
+	}
+
+	return;
+}
+
+/**
+ * clear_dma_channel_interrupt - clear channel interrupt
+ * @midc: dma channel for which clear interrupt is required
+ *
+ */
+static void clear_dma_channel_interrupt(struct intel_mid_dma_chan *midc)
+{
+	struct middma_device *mid = to_middma_device(midc->chan.device);
+
+	/*clearing this interrupts first*/
+	iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
+	iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+	iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
+
+
 	return;
 }
 
@@ -243,7 +384,7 @@
  * Load a transaction into the engine. This must be called with midc->lock
  * held and bh disabled.
  */
-static void midc_dostart(struct intel_mid_dma_chan *midc,
+static int midc_dostart(struct intel_mid_dma_chan *midc,
 			struct intel_mid_dma_desc *first)
 {
 	struct middma_device *mid = to_middma_device(midc->chan.device);
@@ -253,7 +394,7 @@
 		/*error*/
 		pr_err("ERR_MDMA: channel is busy in start\n");
 		/* The tasklet will hopefully advance the queue... */
-		return;
+		return -EBUSY;
 	}
 	midc->busy = true;
 	/*write registers and en*/
@@ -264,12 +405,13 @@
 	iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
 	iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
 	iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
-	pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
+	pr_debug("MDMA:TX SAR %x,DAR %x,CFGH %x,CFGL %x,CTLH %x, CTLL %x LLI %x",
 		(int)first->sar, (int)first->dar, first->cfg_hi,
-		first->cfg_lo, first->ctl_hi, first->ctl_lo);
+		first->cfg_lo, first->ctl_hi, first->ctl_lo, (int)first->lli_phys);
 	first->status = DMA_IN_PROGRESS;
 
 	iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+	return 0;
 }
 
 /**
@@ -290,7 +432,8 @@
 	struct intel_mid_dma_lli	*llitem;
 	void *param_txd = NULL;
 
-	dma_cookie_complete(txd);
+	pr_debug("tx cookie after complete = %d\n", txd->cookie);
+
 	callback_txd = txd->callback;
 	param_txd = txd->callback_param;
 
@@ -303,46 +446,66 @@
 		else
 			desc->current_lli = 0;
 	}
-	spin_unlock_bh(&midc->lock);
+	if (midc->raw_tfr) {
+		dma_cookie_complete(txd);
+		list_del(&desc->desc_node);
+		desc->status = DMA_SUCCESS;
+		if (desc->lli != NULL && desc->lli->llp != 0)
+			dma_pool_free(desc->lli_pool, desc->lli,
+						desc->lli_phys);
+		list_add(&desc->desc_node, &midc->free_list);
+		midc->busy = false;
+		midc->raw_tfr = 0;
+		spin_unlock_bh(&midc->lock);
+	} else {
+		spin_unlock_bh(&midc->lock);
+	}
 	if (callback_txd) {
 		pr_debug("MDMA: TXD callback set ... calling\n");
 		callback_txd(param_txd);
 	}
-	if (midc->raw_tfr) {
-		desc->status = DMA_SUCCESS;
-		if (desc->lli != NULL) {
-			pci_pool_free(desc->lli_pool, desc->lli,
-						desc->lli_phys);
-			pci_pool_destroy(desc->lli_pool);
-			desc->lli = NULL;
-		}
-		list_move(&desc->desc_node, &midc->free_list);
-		midc->busy = false;
-	}
-	spin_lock_bh(&midc->lock);
 
+	spin_lock_bh(&midc->lock);
 }
-/**
- * midc_scan_descriptors -		check the descriptors in channel
- *					mark completed when tx is completete
- * @mid: device
- * @midc: channel to scan
- *
- * Walk the descriptor chain for the device and process any entries
- * that are complete.
- */
-static void midc_scan_descriptors(struct middma_device *mid,
+
+static struct
+intel_mid_dma_desc *midc_first_queued(struct intel_mid_dma_chan *midc)
+{
+	return list_entry(midc->queue.next, struct intel_mid_dma_desc, desc_node);
+}
+
+static void midc_collect_descriptors(struct middma_device *mid,
 				struct intel_mid_dma_chan *midc)
 {
 	struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
-
 	/*tx is complete*/
 	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
 		if (desc->status == DMA_IN_PROGRESS)
 			midc_descriptor_complete(midc, desc);
 	}
-	return;
+
+}
+
+/**
+ * midc_start_descriptors -		start the descriptors in queue
+ *
+ * @mid: device
+ * @midc: channel to scan
+ *
+ */
+static void midc_start_descriptors(struct middma_device *mid,
+				struct intel_mid_dma_chan *midc)
+{
+	if (!list_empty(&midc->queue)) {
+		pr_debug("MDMA: submitting txn in queue\n");
+		if (0 == midc_dostart(midc, midc_first_queued(midc)))
+			list_splice_init(&midc->queue, &midc->active_list);
+		else
+			pr_warn("Submit failed as ch is busy\n");
 	}
+	return;
+}
+
 /**
  * midc_lli_fill_sg -		Helper function to convert
  *				SG list to Linked List Items.
@@ -357,7 +520,8 @@
  */
 static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
 				struct intel_mid_dma_desc *desc,
-				struct scatterlist *sglist,
+				struct scatterlist *src_sglist,
+				struct scatterlist *dst_sglist,
 				unsigned int sglen,
 				unsigned int flags)
 {
@@ -366,18 +530,18 @@
 	dma_addr_t lli_next, sg_phy_addr;
 	struct intel_mid_dma_lli *lli_bloc_desc;
 	union intel_mid_dma_ctl_lo ctl_lo;
-	union intel_mid_dma_ctl_hi ctl_hi;
+	u32 ctl_hi;
 	int i;
 
-	pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+	pr_debug("MDMA: Entered %s\n", __func__);
 	mids = midc->mid_slave;
 
 	lli_bloc_desc = desc->lli;
 	lli_next = desc->lli_phys;
 
 	ctl_lo.ctl_lo = desc->ctl_lo;
-	ctl_hi.ctl_hi = desc->ctl_hi;
-	for_each_sg(sglist, sg, sglen, i) {
+	ctl_hi = desc->ctl_hi;
+	for_each_sg(src_sglist, sg, sglen, i) {
 		/*Populate CTL_LOW and LLI values*/
 		if (i != sglen - 1) {
 			lli_next = lli_next +
@@ -389,14 +553,14 @@
 				lli_next = desc->lli_phys;
 			} else {
 				lli_next = 0;
-				ctl_lo.ctlx.llp_dst_en = 0;
-				ctl_lo.ctlx.llp_src_en = 0;
+				/* llp_dst_en = 0 llp_src_en = 0 */
+				ctl_lo.ctl_lo &= ~(1 << CTL_LO_BIT_LLP_DST_EN);
+				ctl_lo.ctl_lo &= ~(1 << CTL_LO_BIT_LLP_SRC_EN);
 			}
 		}
 		/*Populate CTL_HI values*/
-		ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
-							desc->width,
-							midc->dma->block_size);
+		ctl_hi = get_block_ts(sg->length, desc->width,
+					midc->dma->block_size, midc->dma->dword_trf);
 		/*Populate SAR and DAR values*/
 		sg_phy_addr = sg_dma_address(sg);
 		if (desc->dirn ==  DMA_MEM_TO_DEV) {
@@ -405,13 +569,21 @@
 		} else if (desc->dirn ==  DMA_DEV_TO_MEM) {
 			lli_bloc_desc->sar  = mids->dma_slave.src_addr;
 			lli_bloc_desc->dar  = sg_phy_addr;
+		} else if (desc->dirn == DMA_MEM_TO_MEM && dst_sglist) {
+				lli_bloc_desc->sar = sg_phy_addr;
+				lli_bloc_desc->dar = sg_phys(dst_sglist);
 		}
 		/*Copy values into block descriptor in system memroy*/
 		lli_bloc_desc->llp = lli_next;
 		lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
-		lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+		lli_bloc_desc->ctl_hi = ctl_hi;
 
+		pr_debug("MDMA:Calc CTL LO %x, CTL HI %x src: %x dest: %x sg->l:%x\n",
+					ctl_lo.ctl_lo, lli_bloc_desc->ctl_hi,
+					lli_bloc_desc->sar, lli_bloc_desc->dar, sg->length);
 		lli_bloc_desc++;
+		if (dst_sglist)
+			dst_sglist = sg_next(dst_sglist);
 	}
 	/*Copy very first LLI values to descriptor*/
 	desc->ctl_lo = desc->lli->ctl_lo;
@@ -421,13 +593,14 @@
 
 	return 0;
 }
+
 /*****************************************************************************
 DMA engine callback Functions*/
 /**
  * intel_mid_dma_tx_submit -	callback to submit DMA transaction
  * @tx: dma engine descriptor
  *
- * Submit the DMA transaction for this descriptor, start if ch idle
+ * Submit the DMA trasaction for this descriptor, start if ch idle
  */
 static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
@@ -436,7 +609,16 @@
 	dma_cookie_t		cookie;
 
 	spin_lock_bh(&midc->lock);
+
+	if (unlikely(!midc->in_use)) {
+		spin_unlock_bh(&midc->lock);
+		WARN(1, "chan[%d] gets new request after close",
+			tx->chan->chan_id);
+		return -EIO;
+	}
+
 	cookie = dma_cookie_assign(tx);
+	pr_debug("Allocated cookie = %d\n", cookie);
 
 	if (list_empty(&midc->active_list))
 		list_add_tail(&desc->desc_node, &midc->active_list);
@@ -461,11 +643,87 @@
 
 	spin_lock_bh(&midc->lock);
 	if (!list_empty(&midc->queue))
-		midc_scan_descriptors(to_middma_device(chan->device), midc);
+		midc_start_descriptors(to_middma_device(chan->device), midc);
 	spin_unlock_bh(&midc->lock);
 }
 
 /**
+ * dma_wait_for_suspend - performs following functionality
+ * 		1. Suspends channel using mask bits
+ * 		2. Wait till FIFO to get empty
+ * 		3. Disable channel
+ * 		4. restore the previous masked bits
+ *
+ * @chan: chan where pending trascation needs to be checked and submitted
+ * @mask: mask bits to be used for suspend operation
+ *
+ */
+static inline void dma_wait_for_suspend(struct dma_chan *chan, unsigned int mask)
+{
+	union intel_mid_dma_cfg_lo cfg_lo;
+	struct middma_device	*mid = to_middma_device(chan->device);
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	int i;
+	const int max_loops = 100;
+
+	/* Suspend channel */
+	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+	cfg_lo.cfg_lo |= mask;
+	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+	/* wait till FIFO gets empty */
+	/* FIFO should be cleared in a couple of milli secs,
+	   but most of the time after a 'cpu_relax' */
+	for (i = 0; i < max_loops; i++) {
+		cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+		if (cfg_lo.cfgx.fifo_empty)
+			break;
+		/* use udelay since this might called from atomic context,
+		   and use incremental backoff time */
+		if (i)
+			udelay(i);
+		else
+			cpu_relax();
+	}
+
+	if (i == max_loops)
+		pr_info("Waited 5 ms for chan[%d] FIFO to get empty\n",
+			chan->chan_id);
+	else
+		pr_debug("waited for %d loops for chan[%d] FIFO to get empty",
+			i, chan->chan_id);
+
+	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+
+	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+	cfg_lo.cfg_lo &= ~mask;
+	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+}
+/**
+ * intel_mid_dma_chan_suspend_v1 - suspends the given channel, waits
+ *		till FIFO is cleared and disables channel.
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ */
+static void intel_mid_dma_chan_suspend_v1(struct dma_chan *chan)
+{
+
+	pr_debug("%s", __func__);
+	dma_wait_for_suspend(chan, CH_SUSPEND);
+}
+
+/**
+ * intel_mid_dma_chan_suspend_v2 - suspends the given channel, waits
+ *		till FIFO is cleared and disables channel.
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ */
+static void intel_mid_dma_chan_suspend_v2(struct dma_chan *chan)
+{
+	pr_debug("%s", __func__);
+	dma_wait_for_suspend(chan, CH_SUSPEND | CH_DRAIN);
+}
+
+/**
  * intel_mid_dma_tx_status -	Return status of txn
  * @chan: chan for where status needs to be checked
  * @cookie: cookie for txn
@@ -483,7 +741,7 @@
 	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret != DMA_SUCCESS) {
 		spin_lock_bh(&midc->lock);
-		midc_scan_descriptors(to_middma_device(chan->device), midc);
+		midc_start_descriptors(to_middma_device(chan->device), midc);
 		spin_unlock_bh(&midc->lock);
 
 		ret = dma_cookie_status(chan, cookie, txstate);
@@ -509,6 +767,7 @@
 	midc->mid_slave = mid_slave;
 	return 0;
 }
+
 /**
  * intel_mid_dma_device_control -	DMA device control
  * @chan: chan for DMA control
@@ -523,11 +782,30 @@
 	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
 	struct middma_device	*mid = to_middma_device(chan->device);
 	struct intel_mid_dma_desc	*desc, *_desc;
-	union intel_mid_dma_cfg_lo cfg_lo;
+	struct dma_async_tx_descriptor	*txd;
 
+	pr_debug("%s:CMD:%d for channel:%d\n", __func__, cmd, midc->ch_id);
 	if (cmd == DMA_SLAVE_CONFIG)
 		return dma_slave_control(chan, arg);
 
+	/*
+	 * Leverage the DMA_PAUSE/DMA_RESUME for tuntime PM managemnt.
+	 * DMA customer need make sure the channel is stopped before calling
+	 * the DMA_PAUSE here, and don't start DMA channel befor calling
+	 * DMA_RESUME.
+	 */
+	if (cmd == DMA_PAUSE) {
+		midc->in_use = 0;
+		pm_runtime_put_sync(mid->dev);
+		return 0;
+	}
+
+	if (cmd == DMA_RESUME) {
+		midc->in_use = 1;
+		pm_runtime_get_sync(mid->dev);
+		return 0;
+	}
+
 	if (cmd != DMA_TERMINATE_ALL)
 		return -ENXIO;
 
@@ -536,30 +814,30 @@
 		spin_unlock_bh(&midc->lock);
 		return 0;
 	}
-	/*Suspend and disable the channel*/
-	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
-	cfg_lo.cfgx.ch_susp = 1;
-	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
-	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
-	midc->busy = false;
-	/* Disable interrupts */
-	disable_dma_interrupt(midc);
-	midc->descs_allocated = 0;
 
-	spin_unlock_bh(&midc->lock);
+	/* Disable CH interrupts */
+	disable_dma_interrupt(midc);
+	/* clear channel interrupts */
+	clear_dma_channel_interrupt(midc);
+	mid->dma_ops.dma_chan_suspend(chan);
+	midc->busy = false;
+	midc->descs_allocated = 0;
 	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
-		if (desc->lli != NULL) {
-			pci_pool_free(desc->lli_pool, desc->lli,
-						desc->lli_phys);
-			pci_pool_destroy(desc->lli_pool);
-			desc->lli = NULL;
+		if (desc->status == DMA_IN_PROGRESS) {
+			txd = &desc->txd;
+			dma_cookie_complete(txd);
 		}
-		list_move(&desc->desc_node, &midc->free_list);
+		list_del(&desc->desc_node);
+		if (desc->lli != NULL)
+			dma_pool_free(desc->lli_pool, desc->lli,
+						desc->lli_phys);
+		list_add(&desc->desc_node, &midc->free_list);
 	}
+	spin_unlock_bh(&midc->lock);
+
 	return 0;
 }
 
-
 /**
  * intel_mid_dma_prep_memcpy -	Prep memcpy txn
  * @chan: chan for DMA transfer
@@ -580,10 +858,12 @@
 	struct intel_mid_dma_desc *desc = NULL;
 	struct intel_mid_dma_slave *mids;
 	union intel_mid_dma_ctl_lo ctl_lo;
-	union intel_mid_dma_ctl_hi ctl_hi;
+	u32 ctl_hi;
 	union intel_mid_dma_cfg_lo cfg_lo;
 	union intel_mid_dma_cfg_hi cfg_hi;
 	enum dma_slave_buswidth width;
+	int dst_reg_width = 0;
+	int src_reg_width = 0;
 
 	pr_debug("MDMA: Prep for memcpy\n");
 	BUG_ON(!chan);
@@ -596,6 +876,11 @@
 	mids = midc->mid_slave;
 	BUG_ON(!mids);
 
+	if (unlikely(!midc->in_use)) {
+		pr_err("ERR_MDMA: %s: channel not in use", __func__);
+		return NULL;
+	}
+
 	pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
 				midc->dma->pci_id, midc->ch_id, len);
 	pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
@@ -635,35 +920,35 @@
 		} else {
 			cfg_hi.cfgx.protctl = 0x1; /*default value*/
 			cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
-					midc->ch_id - midc->dma->chan_base;
+				midc->ch_id - midc->dma->chan_base;
 		}
 	}
-
 	/*calculate CTL_HI*/
-	ctl_hi.ctlx.reser = 0;
-	ctl_hi.ctlx.done  = 0;
 	width = mids->dma_slave.src_addr_width;
-
-	ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
+	ctl_hi = get_block_ts(len, width, midc->dma->block_size, midc->dma->dword_trf);
 	pr_debug("MDMA:calc len %d for block size %d\n",
-				ctl_hi.ctlx.block_ts, midc->dma->block_size);
+				ctl_hi, midc->dma->block_size);
 	/*calculate CTL_LO*/
 	ctl_lo.ctl_lo = 0;
 	ctl_lo.ctlx.int_en = 1;
+
+	dst_reg_width = get_reg_width(mids->dma_slave.dst_addr_width);
+	if (dst_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get DST reg width\n");
+		return NULL;
+
+	}
+	ctl_lo.ctlx.dst_tr_width = dst_reg_width;
+
+	src_reg_width = get_reg_width(mids->dma_slave.src_addr_width);
+	if (src_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get SRC reg width\n");
+				return NULL;
+	}
+	ctl_lo.ctlx.src_tr_width = src_reg_width;
 	ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
 	ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
 
-	/*
-	 * Here we need some translation from "enum dma_slave_buswidth"
-	 * to the format for our dma controller
-	 *		standard	intel_mid_dmac's format
-	 *		 1 Byte			0b000
-	 *		 2 Bytes		0b001
-	 *		 4 Bytes		0b010
-	 */
-	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
-	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
-
 	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
 		ctl_lo.ctlx.tt_fc = 0;
 		ctl_lo.ctlx.sinc = 0;
@@ -681,7 +966,7 @@
 	}
 
 	pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
-		ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+		ctl_lo.ctl_lo, ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
 
 	enable_dma_interrupt(midc);
 
@@ -694,7 +979,7 @@
 	desc->cfg_hi = cfg_hi.cfg_hi;
 	desc->cfg_lo = cfg_lo.cfg_lo;
 	desc->ctl_lo = ctl_lo.ctl_lo;
-	desc->ctl_hi = ctl_hi.ctl_hi;
+	desc->ctl_hi = ctl_hi;
 	desc->width = width;
 	desc->dirn = mids->dma_slave.direction;
 	desc->lli_phys = 0;
@@ -707,6 +992,330 @@
 	midc_desc_put(midc, desc);
 	return NULL;
 }
+
+/**
+ * intel_mid_dma_prep_memcpy_v2 - Prep memcpy txn
+ * @chan: chan for DMA transfer
+ * @dest: destn address
+ * @src: src address
+ * @len: DMA transfer len
+ * @flags: DMA flags
+ *
+ * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
+ * The periphral txn details should be filled in slave structure properly
+ * Returns the descriptor for this txn
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy_v2(
+			struct dma_chan *chan, dma_addr_t dest,
+			dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct intel_mid_dma_chan *midc;
+	struct intel_mid_dma_desc *desc = NULL;
+	struct intel_mid_dma_slave *mids;
+	union intel_mid_dma_ctl_lo ctl_lo;
+	u32 ctl_hi;
+	union intel_mid_dma_cfg_lo cfg_lo;
+	union intel_mid_dma_cfg_hi cfg_hi;
+	enum dma_slave_buswidth width;
+	int dst_reg_width = 0;
+	int src_reg_width = 0;
+
+	pr_debug("MDMA:%s\n", __func__);
+	BUG_ON(!chan);
+	if (!len)
+		return NULL;
+
+	midc = to_intel_mid_dma_chan(chan);
+	BUG_ON(!midc);
+
+	mids = midc->mid_slave;
+	BUG_ON(!mids);
+
+	if (unlikely(!midc->in_use)) {
+		pr_err("ERR_MDMA: %s: channel not in use", __func__);
+		return NULL;
+	}
+
+	pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
+				midc->dma->pci_id, midc->ch_id, len);
+	pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
+			mids->cfg_mode, mids->dma_slave.direction,
+			mids->hs_mode, mids->dma_slave.src_addr_width);
+
+	/*calculate CFG_LO*/
+	cfg_lo.cfgx_v2.dst_burst_align = 1;
+	cfg_lo.cfgx_v2.src_burst_align = 1;
+
+	/* For  mem to mem transfer, it's SW HS only*/
+	cfg_hi.cfg_hi = 0;
+	/*calculate CFG_HI for mem to/from dev scenario */
+	if (mids->cfg_mode != LNW_DMA_MEM_TO_MEM) {
+		if (midc->dma->pimr_mask) {
+			/* device_instace => SSP0 = 0, SSP1 = 1, SSP2 = 2*/
+			if (mids->device_instance > 2) {
+				pr_err("Invalid SSP identifier\n");
+				return NULL;
+			}
+			cfg_hi.cfgx_v2.src_per = 0;
+			cfg_hi.cfgx_v2.dst_per = 0;
+			if (mids->dma_slave.direction == DMA_MEM_TO_DEV)
+				/* SSP DMA in Tx direction */
+				cfg_hi.cfgx_v2.dst_per = (2 * mids->device_instance) + 1;
+			else if (mids->dma_slave.direction == DMA_DEV_TO_MEM)
+				/* SSP DMA in Rx direction */
+				cfg_hi.cfgx_v2.src_per = (2 * mids->device_instance);
+			else
+				return NULL;
+
+		} else if (midc->dma->pci_id == INTEL_MRFLD_GP_DMAC2_ID) {
+			if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+				cfg_hi.cfgx_v2.src_per = 0;
+
+				if (mids->device_instance ==
+					MRFL_INSTANCE_SPI3)
+					cfg_hi.cfgx_v2.dst_per = 0xF;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI5)
+					cfg_hi.cfgx_v2.dst_per = 0xD;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI6)
+					cfg_hi.cfgx_v2.dst_per = 0xB;
+				else
+					cfg_hi.cfgx_v2.dst_per = midc->ch_id
+						- midc->dma->chan_base;
+			} else if (mids->dma_slave.direction
+				== DMA_DEV_TO_MEM) {
+				if (mids->device_instance ==
+					MRFL_INSTANCE_SPI3)
+					cfg_hi.cfgx_v2.src_per = 0xE;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI5)
+					cfg_hi.cfgx_v2.src_per = 0xC;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI6)
+					cfg_hi.cfgx_v2.src_per = 0xA;
+				else
+					cfg_hi.cfgx_v2.src_per = midc->ch_id
+						- midc->dma->chan_base;
+
+				cfg_hi.cfgx_v2.dst_per = 0;
+			} else {
+				cfg_hi.cfgx_v2.dst_per =
+					cfg_hi.cfgx_v2.src_per = 0;
+			}
+		} else {
+			cfg_hi.cfgx_v2.src_per =
+				cfg_hi.cfgx_v2.dst_per =
+				midc->ch_id - midc->dma->chan_base;
+		}
+	}
+	/*calculate CTL_HI*/
+	width = mids->dma_slave.src_addr_width;
+	ctl_hi = get_block_ts(len, width, midc->dma->block_size, midc->dma->dword_trf);
+	pr_debug("MDMA:calc len %d for block size %d\n",
+				ctl_hi, midc->dma->block_size);
+	/*calculate CTL_LO*/
+	ctl_lo.ctl_lo = 0;
+	ctl_lo.ctlx_v2.int_en = 1;
+
+	dst_reg_width = get_reg_width(mids->dma_slave.dst_addr_width);
+	if (dst_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get DST reg width\n");
+		return NULL;
+
+	}
+	ctl_lo.ctlx_v2.dst_tr_width = dst_reg_width;
+
+	src_reg_width = get_reg_width(mids->dma_slave.src_addr_width);
+	if (src_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get SRC reg width\n");
+				return NULL;
+	}
+	ctl_lo.ctlx_v2.src_tr_width = src_reg_width;
+	ctl_lo.ctlx_v2.dst_msize = mids->dma_slave.src_maxburst;
+	ctl_lo.ctlx_v2.src_msize = mids->dma_slave.dst_maxburst;
+
+	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+		ctl_lo.ctlx_v2.tt_fc = 0;
+		ctl_lo.ctlx_v2.sinc = 0;
+		ctl_lo.ctlx_v2.dinc = 0;
+	} else {
+		if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+			ctl_lo.ctlx_v2.sinc = 0;
+			ctl_lo.ctlx_v2.dinc = 1;
+			ctl_lo.ctlx_v2.tt_fc = 1;
+		} else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+			ctl_lo.ctlx_v2.sinc = 1;
+			ctl_lo.ctlx_v2.dinc = 0;
+			ctl_lo.ctlx_v2.tt_fc = 2;
+		}
+	}
+
+	pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
+		ctl_lo.ctl_lo, ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+
+	enable_dma_interrupt(midc);
+
+	desc = midc_desc_get(midc);
+	if (desc == NULL)
+		goto err_desc_get;
+	desc->sar = src;
+	desc->dar = dest ;
+	desc->len = len;
+	desc->cfg_hi = cfg_hi.cfg_hi;
+	desc->cfg_lo = cfg_lo.cfg_lo;
+	desc->ctl_lo = ctl_lo.ctl_lo;
+	desc->ctl_hi = ctl_hi;
+	desc->width = width;
+	desc->dirn = mids->dma_slave.direction;
+	desc->lli_phys = 0;
+	desc->lli = NULL;
+	desc->lli_pool = NULL;
+	return &desc->txd;
+
+err_desc_get:
+	pr_err("ERR_MDMA: Failed to get desc\n");
+	midc_desc_put(midc, desc);
+	return NULL;
+}
+
+/**
+ * intel_mid_dma_chan_prep_desc
+ * @chan: chan for DMA transfer
+ * @src_sg: destination scatter gather list
+ * @dst_sg: source scatter gather list
+ * @flags: DMA flags
+ * @src_sg_len: length of src sg list
+ * @direction DMA transfer dirtn
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_chan_prep_desc(
+			struct dma_chan *chan, struct scatterlist *src_sg,
+			struct scatterlist *dst_sg, unsigned long flags,
+			unsigned long src_sg_len,
+			enum dma_transfer_direction direction)
+{
+	struct middma_device *mid = NULL;
+	struct intel_mid_dma_chan *midc = NULL;
+	struct intel_mid_dma_slave *mids = NULL;
+	struct intel_mid_dma_desc *desc = NULL;
+	struct dma_async_tx_descriptor *txd = NULL;
+	union intel_mid_dma_ctl_lo ctl_lo;
+	pr_debug("MDMA:intel_mid_dma_chan_prep_desc\n");
+
+	midc = to_intel_mid_dma_chan(chan);
+	BUG_ON(!midc);
+
+	mid = to_middma_device(midc->chan.device);
+	mids = midc->mid_slave;
+	BUG_ON(!mids);
+
+	if (!midc->dma->pimr_mask) {
+		pr_err("MDMA: SG list is not supported by this controller\n");
+		return  NULL;
+	}
+
+	txd = midc->dma->dma_ops.device_prep_dma_memcpy(chan, 0, 0, src_sg->length, flags);
+	if (NULL == txd) {
+		pr_err("MDMA: Prep memcpy failed\n");
+		return NULL;
+	}
+
+	desc = to_intel_mid_dma_desc(txd);
+	desc->dirn = direction;
+	ctl_lo.ctl_lo = desc->ctl_lo;
+	ctl_lo.ctl_lo |= (1 << CTL_LO_BIT_LLP_DST_EN);
+	ctl_lo.ctl_lo |= (1 << CTL_LO_BIT_LLP_SRC_EN);
+	desc->ctl_lo = ctl_lo.ctl_lo;
+	desc->lli_length = src_sg_len;
+	desc->current_lli = 0;
+	/* DMA coherent memory pool for LLI descriptors*/
+	desc->lli_pool = dma_pool_create("intel_mid_dma_lli_pool",
+				midc->dma->dev,
+				(sizeof(struct intel_mid_dma_lli)*src_sg_len),
+				32, 0);
+	if (NULL == desc->lli_pool) {
+		pr_err("MID_DMA:LLI pool create failed\n");
+		return NULL;
+	}
+	midc->lli_pool = desc->lli_pool;
+
+	desc->lli = dma_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+	if (!desc->lli) {
+		pr_err("MID_DMA: LLI alloc failed\n");
+		dma_pool_destroy(desc->lli_pool);
+		return NULL;
+	}
+
+	/*
+	 * dma_map_sg() helps to convert DMA address to comply to what the
+	 * device's dma mask asks for. On systems with 4+ GB DDR memory,
+	 * kmalloc() simply returns an address larger than 0x1 0000 0000.
+	 * For audio DMA(32-bit OCP master), obviously it can't handle
+	 * that address. So dma_map_sg() does the magic to re-assign a new
+	 * 32-bit DMA address(a memcpy actually on x86 no-IOMMU platforms)
+	 */
+	if (!dma_map_sg(mid->dev, src_sg, src_sg_len, DMA_MEM_TO_MEM)) {
+		pr_err("MID_DMA: dma_map_sg() failed\n");
+		dma_pool_free(desc->lli_pool, desc->lli, desc->lli_phys);
+		dma_pool_destroy(desc->lli_pool);
+		return NULL;
+	}
+
+	midc_lli_fill_sg(midc, desc, src_sg, dst_sg, src_sg_len, flags);
+	if (flags & DMA_PREP_INTERRUPT) {
+		/* Enable Block intr, disable TFR intr.
+		* It's not required to enable TFR, when Block intr is enabled
+		* Otherwise, for last block we will end up in invoking calltxd
+		* two times */
+
+		iowrite32(MASK_INTR_REG(midc->ch_id),
+					midc->dma_base + MASK_TFR);
+		clear_bit(midc->ch_id, &mid->tfr_intr_mask);
+		iowrite32(UNMASK_INTR_REG(midc->ch_id),
+					midc->dma_base + MASK_BLOCK);
+		set_bit(midc->ch_id, &mid->block_intr_mask);
+		midc->block_intr_status = true;
+		pr_debug("MDMA: Enabled Block Interrupt\n");
+	}
+	return &desc->txd;
+
+}
+
+/**
+ * intel_mid_dma_prep_sg -        Prep sg txn
+ * @chan: chan for DMA transfer
+ * @dst_sg: destination scatter gather list
+ * @dst_sg_len: length of dest sg list
+ * @src_sg: source scatter gather list
+ * @src_sg_len: length of src sg list
+ * @flags: DMA flags
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_sg(
+			struct dma_chan *chan, struct scatterlist *dst_sg,
+			unsigned int dst_sg_len, struct scatterlist *src_sg,
+			unsigned int src_sg_len, unsigned long flags)
+{
+
+	pr_debug("MDMA: Prep for memcpy SG\n");
+
+	if ((dst_sg_len != src_sg_len) || (dst_sg == NULL) ||
+							(src_sg == NULL)) {
+		pr_err("MDMA: Invalid SG length\n");
+		return NULL;
+	}
+
+	pr_debug("MDMA: SG Length = %d, Flags = %#lx, src_sg->length = %d\n",
+				src_sg_len, flags, src_sg->length);
+
+	return intel_mid_dma_chan_prep_desc(chan, src_sg, dst_sg, flags,
+						src_sg_len, DMA_MEM_TO_MEM);
+
+}
+
 /**
  * intel_mid_dma_prep_slave_sg -	Prep slave sg txn
  * @chan: chan for DMA transfer
@@ -719,84 +1328,26 @@
  * Prepares LLI based periphral transfer
  */
 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
-			struct dma_chan *chan, struct scatterlist *sgl,
+			struct dma_chan *chan, struct scatterlist *sg,
 			unsigned int sg_len, enum dma_transfer_direction direction,
 			unsigned long flags, void *context)
 {
-	struct intel_mid_dma_chan *midc = NULL;
-	struct intel_mid_dma_slave *mids = NULL;
-	struct intel_mid_dma_desc *desc = NULL;
-	struct dma_async_tx_descriptor *txd = NULL;
-	union intel_mid_dma_ctl_lo ctl_lo;
 
 	pr_debug("MDMA: Prep for slave SG\n");
 
-	if (!sg_len) {
+	if (!sg_len || sg == NULL) {
 		pr_err("MDMA: Invalid SG length\n");
 		return NULL;
 	}
-	midc = to_intel_mid_dma_chan(chan);
-	BUG_ON(!midc);
-
-	mids = midc->mid_slave;
-	BUG_ON(!mids);
-
-	if (!midc->dma->pimr_mask) {
-		/* We can still handle sg list with only one item */
-		if (sg_len == 1) {
-			txd = intel_mid_dma_prep_memcpy(chan,
-						mids->dma_slave.dst_addr,
-						mids->dma_slave.src_addr,
-						sg_dma_len(sgl),
-						flags);
-			return txd;
-		} else {
-			pr_warn("MDMA: SG list is not supported by this controller\n");
-			return  NULL;
-		}
-	}
-
 	pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
-			sg_len, direction, flags);
-
-	txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
-	if (NULL == txd) {
-		pr_err("MDMA: Prep memcpy failed\n");
+				sg_len, direction, flags);
+	if (direction != DMA_MEM_TO_MEM) {
+		return intel_mid_dma_chan_prep_desc(chan, sg, NULL, flags,
+							sg_len, direction);
+	} else {
+		pr_err("MDMA: Invalid Direction\n");
 		return NULL;
 	}
-
-	desc = to_intel_mid_dma_desc(txd);
-	desc->dirn = direction;
-	ctl_lo.ctl_lo = desc->ctl_lo;
-	ctl_lo.ctlx.llp_dst_en = 1;
-	ctl_lo.ctlx.llp_src_en = 1;
-	desc->ctl_lo = ctl_lo.ctl_lo;
-	desc->lli_length = sg_len;
-	desc->current_lli = 0;
-	/* DMA coherent memory pool for LLI descriptors*/
-	desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
-				midc->dma->pdev,
-				(sizeof(struct intel_mid_dma_lli)*sg_len),
-				32, 0);
-	if (NULL == desc->lli_pool) {
-		pr_err("MID_DMA:LLI pool create failed\n");
-		return NULL;
-	}
-
-	desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
-	if (!desc->lli) {
-		pr_err("MID_DMA: LLI alloc failed\n");
-		pci_pool_destroy(desc->lli_pool);
-		return NULL;
-	}
-
-	midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
-	if (flags & DMA_PREP_INTERRUPT) {
-		iowrite32(UNMASK_INTR_REG(midc->ch_id),
-				midc->dma_base + MASK_BLOCK);
-		pr_debug("MDMA:Enabled Block interrupt\n");
-	}
-	return &desc->txd;
 }
 
 /**
@@ -811,31 +1362,52 @@
 	struct middma_device	*mid = to_middma_device(chan->device);
 	struct intel_mid_dma_desc	*desc, *_desc;
 
+	pr_debug("entry:%s\n", __func__);
+	if (false == midc->in_use) {
+		pr_err("ERR_MDMA: try to free chnl already freed\n");
+		return;
+	}
 	if (true == midc->busy) {
 		/*trying to free ch in use!!!!!*/
 		pr_err("ERR_MDMA: trying to free ch in use\n");
+		dump_dma_reg(chan);
 	}
+
+	/* Disable CH interrupts */
+	disable_dma_interrupt(midc);
+	clear_dma_channel_interrupt(midc);
+
+	midc->block_intr_status = false;
+	midc->in_use = false;
+	midc->busy = false;
+
+	tasklet_unlock_wait(&mid->tasklet);
+
 	spin_lock_bh(&midc->lock);
 	midc->descs_allocated = 0;
 	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
 		list_del(&desc->desc_node);
-		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+		dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
 	}
 	list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
 		list_del(&desc->desc_node);
-		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+		dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
 	}
 	list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
 		list_del(&desc->desc_node);
-		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+		dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
 	}
+	midc->raw_tfr = 0;
 	spin_unlock_bh(&midc->lock);
-	midc->in_use = false;
-	midc->busy = false;
-	/* Disable CH interrupts */
-	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
-	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
-	pm_runtime_put(&mid->pdev->dev);
+
+	if (midc->lli_pool) {
+		dma_pool_destroy(midc->lli_pool);
+		midc->lli_pool = NULL;
+	}
+
+	/* Disable the channel */
+	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+	pm_runtime_put_sync(mid->dev);
 }
 
 /**
@@ -853,20 +1425,19 @@
 	dma_addr_t		phys;
 	int	i = 0;
 
-	pm_runtime_get_sync(&mid->pdev->dev);
+	pm_runtime_get_sync(mid->dev);
 
 	if (mid->state == SUSPENDED) {
-		if (dma_resume(&mid->pdev->dev)) {
+		if (dma_resume(mid->dev)) {
 			pr_err("ERR_MDMA: resume failed");
 			return -EFAULT;
 		}
 	}
 
 	/* ASSERT:  channel is idle */
-	if (test_ch_en(mid->dma_base, midc->ch_id)) {
-		/*ch is not idle*/
+	if (midc->in_use == true) {
 		pr_err("ERR_MDMA: ch not idle\n");
-		pm_runtime_put(&mid->pdev->dev);
+		pm_runtime_put_sync(mid->dev);
 		return -EIO;
 	}
 	dma_cookie_init(chan);
@@ -874,10 +1445,10 @@
 	spin_lock_bh(&midc->lock);
 	while (midc->descs_allocated < DESCS_PER_CHANNEL) {
 		spin_unlock_bh(&midc->lock);
-		desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
+		desc = dma_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
 		if (!desc) {
 			pr_err("ERR_MDMA: desc failed\n");
-			pm_runtime_put(&mid->pdev->dev);
+			pm_runtime_put_sync(mid->dev);
 			return -ENOMEM;
 			/*check*/
 		}
@@ -889,9 +1460,10 @@
 		i = ++midc->descs_allocated;
 		list_add_tail(&desc->desc_node, &midc->free_list);
 	}
+	midc->busy = false;
 	spin_unlock_bh(&midc->lock);
 	midc->in_use = true;
-	midc->busy = false;
+	midc->block_intr_status = false;
 	pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
 	return i;
 }
@@ -906,7 +1478,8 @@
 static void midc_handle_error(struct middma_device *mid,
 		struct intel_mid_dma_chan *midc)
 {
-	midc_scan_descriptors(mid, midc);
+	midc_collect_descriptors(mid, midc);
+	midc_start_descriptors(mid, midc);
 }
 
 /**
@@ -920,26 +1493,27 @@
 {
 	struct middma_device *mid = NULL;
 	struct intel_mid_dma_chan *midc = NULL;
-	u32 status, raw_tfr, raw_block;
+	u32 status, raw_tfr, raw_block, raw_err;
 	int i;
-
 	mid = (struct middma_device *)data;
 	if (mid == NULL) {
 		pr_err("ERR_MDMA: tasklet Null param\n");
 		return;
 	}
-	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
 	raw_tfr = ioread32(mid->dma_base + RAW_TFR);
-	raw_block = ioread32(mid->dma_base + RAW_BLOCK);
-	status = raw_tfr | raw_block;
-	status &= mid->intr_mask;
+	status = raw_tfr & mid->tfr_intr_mask;
+	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+	pr_debug("tfr_mask:%#lx, raw_tfr:%#x, status:%#x\n",
+			mid->tfr_intr_mask, raw_tfr, status);
 	while (status) {
 		/*txn interrupt*/
-		i = get_ch_index(&status, mid->chan_base);
+		i = get_ch_index(status, mid->chan_base);
 		if (i < 0) {
 			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
 			return;
 		}
+		/* clear the status bit */
+		status = status & ~(1 << (i + mid->chan_base));
 		midc = &mid->ch[i];
 		if (midc == NULL) {
 			pr_err("ERR_MDMA:Null param midc\n");
@@ -947,40 +1521,72 @@
 		}
 		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
 				status, midc->ch_id, i);
-		midc->raw_tfr = raw_tfr;
-		midc->raw_block = raw_block;
 		spin_lock_bh(&midc->lock);
+		midc->raw_tfr = raw_tfr;
 		/*clearing this interrupts first*/
 		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
-		if (raw_block) {
-			iowrite32((1 << midc->ch_id),
-				mid->dma_base + CLEAR_BLOCK);
+		if (likely(midc->in_use)) {
+			midc_collect_descriptors(mid, midc);
+			midc_start_descriptors(mid, midc);
 		}
-		midc_scan_descriptors(mid, midc);
 		pr_debug("MDMA:Scan of desc... complete, unmasking\n");
 		iowrite32(UNMASK_INTR_REG(midc->ch_id),
-				mid->dma_base + MASK_TFR);
-		if (raw_block) {
-			iowrite32(UNMASK_INTR_REG(midc->ch_id),
-				mid->dma_base + MASK_BLOCK);
-		}
+					mid->dma_base + MASK_TFR);
 		spin_unlock_bh(&midc->lock);
 	}
 
-	status = ioread32(mid->dma_base + RAW_ERR);
-	status &= mid->intr_mask;
+	raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+	status = raw_block & mid->block_intr_mask;
+	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+	pr_debug("block_mask:%#lx, raw_block%#x, status:%#x\n",
+			 mid->block_intr_mask, raw_block, status);
 	while (status) {
-		/*err interrupt*/
-		i = get_ch_index(&status, mid->chan_base);
+		/*txn interrupt*/
+		i = get_ch_index(status, mid->chan_base);
 		if (i < 0) {
 			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
 			return;
 		}
+		/* clear the status bit */
+		status = status & ~(1 << (i + mid->chan_base));
 		midc = &mid->ch[i];
 		if (midc == NULL) {
 			pr_err("ERR_MDMA:Null param midc\n");
 			return;
 		}
+		pr_debug("MDMA:Tx complete interrupt raw block  %x, Ch No %d Index %d\n",
+				status, midc->ch_id, i);
+		spin_lock_bh(&midc->lock);
+		/*clearing this interrupts first*/
+
+		midc->raw_block = raw_block;
+		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+		if (midc->block_intr_status) {
+			midc_collect_descriptors(mid, midc);
+			midc_start_descriptors(mid, midc);
+		}
+
+		iowrite32(UNMASK_INTR_REG(midc->ch_id),
+					mid->dma_base + MASK_BLOCK);
+		spin_unlock_bh(&midc->lock);
+	}
+
+	raw_err = ioread32(mid->dma_base + RAW_ERR);
+	status = raw_err & mid->intr_mask;
+	pr_debug("MDMA:raw error status:%#x\n", status);
+	while (status) {
+		/*err interrupt*/
+		i = get_ch_index(status, mid->chan_base);
+		if (i < 0) {
+			pr_err("ERR_MDMA:Invalid ch index %x (raw err)\n", i);
+			return;
+		}
+		status = status & ~(1 << (i + mid->chan_base));
+		midc = &mid->ch[i];
+		if (midc == NULL) {
+			pr_err("ERR_MDMA:Null param midc (raw err)\n");
+			return;
+		}
 		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
 				status, midc->ch_id, i);
 
@@ -1018,33 +1624,55 @@
 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
 {
 	struct middma_device *mid = data;
-	u32 tfr_status, err_status;
-	int call_tasklet = 0;
-
-	tfr_status = ioread32(mid->dma_base + RAW_TFR);
-	err_status = ioread32(mid->dma_base + RAW_ERR);
-	if (!tfr_status && !err_status)
-		return IRQ_NONE;
+	u32 tfr_status, err_status, block_status;
+	u32 isr;
 
 	/*DMA Interrupt*/
 	pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
-	pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
-	tfr_status &= mid->intr_mask;
+	if (!mid) {
+		pr_err("ERR_MDMA:null pointer mid\n");
+		return IRQ_NONE;
+	}
+
+	/* Read the interrupt status registers */
+	tfr_status = ioread32(mid->dma_base + STATUS_TFR);
+	err_status = ioread32(mid->dma_base + STATUS_ERR);
+	block_status = ioread32(mid->dma_base + STATUS_BLOCK);
+
+	/* Common case if the IRQ is shared with other devices */
+	if (!tfr_status && !err_status && !block_status)
+		return IRQ_NONE;
+
+	pr_debug("MDMA: trf_Status %x, Mask %x\n", tfr_status, mid->intr_mask);
 	if (tfr_status) {
 		/*need to disable intr*/
-		iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
-		iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
-		pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
-		call_tasklet = 1;
+		iowrite32((tfr_status << INT_MASK_WE),
+						mid->dma_base + MASK_TFR);
 	}
-	err_status &= mid->intr_mask;
+	if (block_status) {
+		/*need to disable intr*/
+		iowrite32((block_status << INT_MASK_WE),
+						mid->dma_base + MASK_BLOCK);
+	}
 	if (err_status) {
 		iowrite32((err_status << INT_MASK_WE),
 			  mid->dma_base + MASK_ERR);
-		call_tasklet = 1;
 	}
-	if (call_tasklet)
-		tasklet_schedule(&mid->tasklet);
+	/* in mrlfd we need to clear the pisr bits to stop intr as well
+	 * so read the PISR register, see if we have pisr bits status and clear
+	 * them
+	 */
+	if (mid->pimr_mask && !mid->dword_trf) {
+		isr = readl(mid->mask_reg);
+		pr_debug("isr says: %x", isr);
+		if (isr) {
+			isr &= mid->pimr_mask;
+			pr_debug("writing isr: %x", isr);
+			writel(isr, mid->mask_reg);
+		}
+	}
+
+	tasklet_schedule(&mid->tasklet);
 
 	return IRQ_HANDLED;
 }
@@ -1059,6 +1687,42 @@
 	return intel_mid_dma_interrupt(irq, data);
 }
 
+static void config_dma_fifo_partition(struct middma_device *dma)
+{
+	/* program FIFO Partition registers - 128 bytes for each ch */
+	iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION0_HI);
+	iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION1_LO);
+	iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION1_HI);
+	iowrite32(DMA_FIFO_SIZE | ENABLE_PARTITION_UPDATE,
+				dma->dma_base + FIFO_PARTITION0_LO);
+}
+
+/* v1 ops will be used for Medfield & CTP platforms */
+static struct intel_mid_dma_ops v1_dma_ops = {
+	.device_alloc_chan_resources	= intel_mid_dma_alloc_chan_resources,
+	.device_free_chan_resources	= intel_mid_dma_free_chan_resources,
+	.device_prep_dma_memcpy		= intel_mid_dma_prep_memcpy,
+	.device_prep_dma_sg		= intel_mid_dma_prep_sg,
+	.device_prep_slave_sg		= intel_mid_dma_prep_slave_sg,
+	.device_control			= intel_mid_dma_device_control,
+	.device_tx_status		= intel_mid_dma_tx_status,
+	.device_issue_pending		= intel_mid_dma_issue_pending,
+	.dma_chan_suspend		= intel_mid_dma_chan_suspend_v1,
+};
+
+/* v2 ops will be used in Merrifield and beyond platforms */
+static struct intel_mid_dma_ops v2_dma_ops = {
+	.device_alloc_chan_resources    = intel_mid_dma_alloc_chan_resources,
+	.device_free_chan_resources     = intel_mid_dma_free_chan_resources,
+	.device_prep_dma_memcpy         = intel_mid_dma_prep_memcpy_v2,
+	.device_prep_dma_sg             = intel_mid_dma_prep_sg,
+	.device_prep_slave_sg           = intel_mid_dma_prep_slave_sg,
+	.device_control                 = intel_mid_dma_device_control,
+	.device_tx_status               = intel_mid_dma_tx_status,
+	.device_issue_pending           = intel_mid_dma_issue_pending,
+	.dma_chan_suspend		= intel_mid_dma_chan_suspend_v2,
+};
+
 /**
  * mid_setup_dma -	Setup the DMA controller
  * @pdev: Controller PCI device structure
@@ -1066,33 +1730,33 @@
  * Initialize the DMA controller, channels, registers with DMA engine,
  * ISR. Initialize DMA controller channels.
  */
-static int mid_setup_dma(struct pci_dev *pdev)
+int mid_setup_dma(struct device *dev)
 {
-	struct middma_device *dma = pci_get_drvdata(pdev);
+	struct middma_device *dma = dev_get_drvdata(dev);
 	int err, i;
 
 	/* DMA coherent memory pool for DMA descriptor allocations */
-	dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
+	dma->dma_pool = dma_pool_create("intel_mid_dma_desc_pool", dev,
 					sizeof(struct intel_mid_dma_desc),
 					32, 0);
 	if (NULL == dma->dma_pool) {
-		pr_err("ERR_MDMA:pci_pool_create failed\n");
+		pr_err("ERR_MDMA:dma_pool_create failed\n");
 		err = -ENOMEM;
+		kfree(dma);
 		goto err_dma_pool;
 	}
 
 	INIT_LIST_HEAD(&dma->common.channels);
-	dma->pci_id = pdev->device;
 	if (dma->pimr_mask) {
-		dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
-					LNW_PERIPHRAL_MASK_SIZE);
+		dma->mask_reg = devm_ioremap(dma->dev, dma->pimr_base, LNW_PERIPHRAL_MASK_SIZE);
 		if (dma->mask_reg == NULL) {
 			pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
 			err = -ENOMEM;
-			goto err_ioremap;
+			goto err_setup;
 		}
-	} else
+	} else {
 		dma->mask_reg = NULL;
+	}
 
 	pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
 	/*init CH structures*/
@@ -1137,18 +1801,17 @@
 	dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
 	dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
 	dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
-	dma->common.dev = &pdev->dev;
+	dma->common.dev = dev;
 
-	dma->common.device_alloc_chan_resources =
-					intel_mid_dma_alloc_chan_resources;
-	dma->common.device_free_chan_resources =
-					intel_mid_dma_free_chan_resources;
+	dma->common.device_alloc_chan_resources = dma->dma_ops.device_alloc_chan_resources;
+	dma->common.device_free_chan_resources = dma->dma_ops.device_free_chan_resources;
 
-	dma->common.device_tx_status = intel_mid_dma_tx_status;
-	dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
-	dma->common.device_issue_pending = intel_mid_dma_issue_pending;
-	dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
-	dma->common.device_control = intel_mid_dma_device_control;
+	dma->common.device_tx_status = dma->dma_ops.device_tx_status;
+	dma->common.device_prep_dma_memcpy = dma->dma_ops.device_prep_dma_memcpy;
+	dma->common.device_prep_dma_sg = dma->dma_ops.device_prep_dma_sg;
+	dma->common.device_issue_pending = dma->dma_ops.device_issue_pending;
+	dma->common.device_prep_slave_sg = dma->dma_ops.device_prep_slave_sg;
+	dma->common.device_control = dma->dma_ops.device_control;
 
 	/*enable dma cntrl*/
 	iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
@@ -1156,23 +1819,23 @@
 	/*register irq */
 	if (dma->pimr_mask) {
 		pr_debug("MDMA:Requesting irq shared for DMAC1\n");
-		err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
+		err = devm_request_irq(dma->dev, dma->irq, intel_mid_dma_interrupt1,
 			IRQF_SHARED, "INTEL_MID_DMAC1", dma);
 		if (0 != err)
-			goto err_irq;
+			goto err_setup;
 	} else {
 		dma->intr_mask = 0x03;
 		pr_debug("MDMA:Requesting irq for DMAC2\n");
-		err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
+		err = devm_request_irq(dma->dev, dma->irq, intel_mid_dma_interrupt2,
 			IRQF_SHARED, "INTEL_MID_DMAC2", dma);
 		if (0 != err)
-			goto err_irq;
+			goto err_setup;
 	}
 	/*register device w/ engine*/
 	err = dma_async_device_register(&dma->common);
 	if (0 != err) {
 		pr_err("ERR_MDMA:device_register failed: %d\n", err);
-		goto err_engine;
+		goto err_dma_pool;
 	}
 	if (dma->pimr_mask) {
 		pr_debug("setting up tasklet1 for DMAC1\n");
@@ -1181,15 +1844,15 @@
 		pr_debug("setting up tasklet2 for DMAC2\n");
 		tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
 	}
+	if (!dma->dword_trf) {
+		config_dma_fifo_partition(dma);
+		/* Mask all interrupts from DMA controller to IA by default */
+		dmac1_mask_periphral_intr(dma);
+	}
 	return 0;
 
-err_engine:
-	free_irq(pdev->irq, dma);
-err_irq:
-	if (dma->mask_reg)
-		iounmap(dma->mask_reg);
-err_ioremap:
-	pci_pool_destroy(dma->dma_pool);
+err_setup:
+	dma_pool_destroy(dma->dma_pool);
 err_dma_pool:
 	pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
 	return err;
@@ -1198,25 +1861,42 @@
 
 /**
  * middma_shutdown -	Shutdown the DMA controller
- * @pdev: Controller PCI device structure
+ * @dev: Controller device structure
  *
  * Called by remove
  * Unregister DMa controller, clear all structures and free interrupt
  */
-static void middma_shutdown(struct pci_dev *pdev)
+void middma_shutdown(struct device *dev)
 {
-	struct middma_device *device = pci_get_drvdata(pdev);
+	struct middma_device *device = dev_get_drvdata(dev);
 
 	dma_async_device_unregister(&device->common);
-	pci_pool_destroy(device->dma_pool);
-	if (device->mask_reg)
-		iounmap(device->mask_reg);
-	if (device->dma_base)
-		iounmap(device->dma_base);
-	free_irq(pdev->irq, device);
+	dma_pool_destroy(device->dma_pool);
 	return;
 }
 
+struct middma_device *mid_dma_setup_context(struct device *dev,
+					    struct intel_mid_dma_probe_info *info)
+{
+	struct middma_device *mid_device;
+	mid_device = devm_kzalloc(dev, sizeof(*mid_device), GFP_KERNEL);
+	if (!mid_device) {
+		pr_err("ERR_MDMA:kzalloc failed probe\n");
+		return NULL;
+	}
+	mid_device->dev = dev;
+	mid_device->max_chan = info->max_chan;
+	mid_device->chan_base = info->ch_base;
+	mid_device->block_size = info->block_size;
+	mid_device->pimr_mask = info->pimr_mask;
+	mid_device->pimr_base = info->pimr_base;
+	mid_device->dword_trf = info->dword_trf;
+	mid_device->pimr_offset = info->pimr_offset;
+	mid_device->pci_id = info->pci_id;
+	memcpy(&mid_device->dma_ops, info->pdma_ops, sizeof(struct intel_mid_dma_ops));
+	return mid_device;
+}
+
 /**
  * intel_mid_dma_probe -	PCI Probe
  * @pdev: Controller PCI device structure
@@ -1231,7 +1911,7 @@
 	struct middma_device *device;
 	u32 base_addr, bar_size;
 	struct intel_mid_dma_probe_info *info;
-	int err;
+	int err = -EINVAL;
 
 	pr_debug("MDMA: probe for %x\n", pdev->device);
 	info = (void *)id->driver_data;
@@ -1255,42 +1935,41 @@
 	if (err)
 		goto err_set_dma_mask;
 
-	device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (!device) {
-		pr_err("ERR_MDMA:kzalloc failed probe\n");
-		err = -ENOMEM;
+	pci_dev_get(pdev);
+	device = mid_dma_setup_context(&pdev->dev, info);
+	if (!device)
 		goto err_kzalloc;
-	}
-	device->pdev = pci_dev_get(pdev);
+
+	device->pci_id = pdev->device;
 
 	base_addr = pci_resource_start(pdev, 0);
 	bar_size  = pci_resource_len(pdev, 0);
-	device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
+	device->dma_base = devm_ioremap_nocache(&pdev->dev, base_addr, DMA_REG_SIZE);
 	if (!device->dma_base) {
 		pr_err("ERR_MDMA:ioremap failed\n");
 		err = -ENOMEM;
 		goto err_ioremap;
 	}
+	device->irq = pdev->irq;
 	pci_set_drvdata(pdev, device);
 	pci_set_master(pdev);
-	device->max_chan = info->max_chan;
-	device->chan_base = info->ch_base;
-	device->block_size = info->block_size;
-	device->pimr_mask = info->pimr_mask;
 
-	err = mid_setup_dma(pdev);
+#ifdef CONFIG_PRH_TEMP_WA_FOR_SPID
+	/* PRH uses, ch 4,5,6,7 override the info table data */
+	pr_info("Device is Bodegabay\n");
+	device->max_chan = 4;
+	device->chan_base = 4;
+#endif
+	err = mid_setup_dma(&pdev->dev);
 	if (err)
-		goto err_dma;
+		goto err_ioremap;
 
 	pm_runtime_put_noidle(&pdev->dev);
 	pm_runtime_allow(&pdev->dev);
 	return 0;
 
-err_dma:
-	iounmap(device->dma_base);
 err_ioremap:
 	pci_dev_put(pdev);
-	kfree(device);
 err_kzalloc:
 err_set_dma_mask:
 	pci_release_regions(pdev);
@@ -1310,31 +1989,26 @@
  */
 static void intel_mid_dma_remove(struct pci_dev *pdev)
 {
-	struct middma_device *device = pci_get_drvdata(pdev);
-
 	pm_runtime_get_noresume(&pdev->dev);
 	pm_runtime_forbid(&pdev->dev);
-	middma_shutdown(pdev);
+	middma_shutdown(&pdev->dev);
 	pci_dev_put(pdev);
-	kfree(device);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 }
 
 /* Power Management */
 /*
-* dma_suspend - PCI suspend function
+* dma_suspend - suspend function
 *
-* @pci: PCI device structure
-* @state: PM message
+* @dev: device structure
 *
 * This function is called by OS when a power event occurs
 */
-static int dma_suspend(struct device *dev)
+int dma_suspend(struct device *dev)
 {
-	struct pci_dev *pci = to_pci_dev(dev);
 	int i;
-	struct middma_device *device = pci_get_drvdata(pci);
+	struct middma_device *device = dev_get_drvdata(dev);
 	pr_debug("MDMA: dma_suspend called\n");
 
 	for (i = 0; i < device->max_chan; i++) {
@@ -1343,93 +2017,91 @@
 	}
 	dmac1_mask_periphral_intr(device);
 	device->state = SUSPENDED;
-	pci_save_state(pci);
-	pci_disable_device(pci);
-	pci_set_power_state(pci, PCI_D3hot);
+
 	return 0;
 }
 
 /**
-* dma_resume - PCI resume function
+* dma_resume - resume function
 *
-* @pci:	PCI device structure
+* @dev:	device structure
 *
 * This function is called by OS when a power event occurs
 */
 int dma_resume(struct device *dev)
 {
-	struct pci_dev *pci = to_pci_dev(dev);
-	int ret;
-	struct middma_device *device = pci_get_drvdata(pci);
+	struct middma_device *device = dev_get_drvdata(dev);
 
 	pr_debug("MDMA: dma_resume called\n");
-	pci_set_power_state(pci, PCI_D0);
-	pci_restore_state(pci);
-	ret = pci_enable_device(pci);
-	if (ret) {
-		pr_err("MDMA: device can't be enabled for %x\n", pci->device);
-		return ret;
-	}
 	device->state = RUNNING;
 	iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+
+	if (!device->dword_trf)
+		config_dma_fifo_partition(device);
+
 	return 0;
 }
 
 static int dma_runtime_suspend(struct device *dev)
 {
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct middma_device *device = pci_get_drvdata(pci_dev);
-
-	device->state = SUSPENDED;
-	return 0;
+	return dma_suspend(dev);
 }
 
 static int dma_runtime_resume(struct device *dev)
 {
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct middma_device *device = pci_get_drvdata(pci_dev);
-
-	device->state = RUNNING;
-	iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
-	return 0;
+	return dma_resume(dev);
 }
 
 static int dma_runtime_idle(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct middma_device *device = pci_get_drvdata(pdev);
+	struct middma_device *device = dev_get_drvdata(dev);
 	int i;
 
 	for (i = 0; i < device->max_chan; i++) {
 		if (device->ch[i].in_use)
 			return -EAGAIN;
 	}
-
-	return pm_schedule_suspend(dev, 0);
+	return pm_schedule_suspend(dev, 0);;
 }
 
 /******************************************************************************
 * PCI stuff
 */
 static struct pci_device_id intel_mid_dma_ids[] = {
-	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),	INFO(2, 6, 4095, 0x200020)},
-	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),	INFO(2, 0, 2047, 0)},
-	{ PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),	INFO(2, 0, 2047, 0)},
-	{ PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID),	INFO(4, 0, 4095, 0x400040)},
+	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),
+		INFO(2, 6, SST_MAX_DMA_LEN, 0x200020, 0xFFAE8008, 1, 0x8, INTEL_MID_DMAC1_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),
+		INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_MID_DMAC2_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),
+		INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_MID_GP_DMAC2_ID, &v1_dma_ops)},
+	/* Mrfld */
+	{ PCI_VDEVICE(INTEL, INTEL_MRFLD_GP_DMAC2_ID),
+		INFO(4, 0, SST_MAX_DMA_LEN_MRFLD, 0, 0, 0, 0, INTEL_MRFLD_GP_DMAC2_ID, &v2_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MRFLD_DMAC0_ID),
+		INFO(2, 6, SST_MAX_DMA_LEN_MRFLD, 0xFF0000, 0xFF340018, 0, 0x10, INTEL_MRFLD_DMAC0_ID, &v2_dma_ops)},
+
+	/* Moorfield */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_GP_DMAC2_MOOR),
+		INFO(4, 0, SST_MAX_DMA_LEN_MRFLD, 0, 0, 0, 0,
+				PCI_DEVICE_ID_INTEL_GP_DMAC2_MOOR, &v2_dma_ops)},
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_AUDIO_DMAC0_MOOR),
+		INFO(2, 6, SST_MAX_DMA_LEN_MRFLD, 0xFF0000, 0xFF340018, 0, 0x10,
+				PCI_DEVICE_ID_INTEL_AUDIO_DMAC0_MOOR, &v2_dma_ops)},
+
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
 
 static const struct dev_pm_ops intel_mid_dma_pm = {
-	.runtime_suspend = dma_runtime_suspend,
-	.runtime_resume = dma_runtime_resume,
-	.runtime_idle = dma_runtime_idle,
-	.suspend = dma_suspend,
-	.resume = dma_resume,
+	.suspend_late = dma_suspend,
+	.resume_early = dma_resume,
+	SET_RUNTIME_PM_OPS(dma_runtime_suspend,
+			dma_runtime_resume,
+			dma_runtime_idle)
 };
 
 static struct pci_driver intel_mid_dma_pci_driver = {
-	.name		=	"Intel MID DMA",
+	.name		=	"intel_mid_dma",
 	.id_table	=	intel_mid_dma_ids,
 	.probe		=	intel_mid_dma_probe,
 	.remove		=	intel_mid_dma_remove,
@@ -1442,11 +2114,17 @@
 
 static int __init intel_mid_dma_init(void)
 {
+	int ret;
+
 	pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
 			INTEL_MID_DMA_DRIVER_VERSION);
-	return pci_register_driver(&intel_mid_dma_pci_driver);
+	ret = pci_register_driver(&intel_mid_dma_pci_driver);
+	if (ret)
+		pr_err("PCI dev registration failed");
+
+	return ret;
 }
-fs_initcall(intel_mid_dma_init);
+module_init(intel_mid_dma_init);
 
 static void __exit intel_mid_dma_exit(void)
 {
@@ -1458,3 +2136,5 @@
 MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
+MODULE_ALIAS("pci:intel_mid_dma");
+MODULE_ALIAS("acpi:intel_dma_acpi");
diff --git a/drivers/dma/intel_mid_dma_acpi.c b/drivers/dma/intel_mid_dma_acpi.c
new file mode 100644
index 0000000..f86f181
--- /dev/null
+++ b/drivers/dma/intel_mid_dma_acpi.c
@@ -0,0 +1,297 @@
+
+/* intel_mid_dma_acpi.c - Intel MID DMA driver init file for ACPI enumaration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ *  Authors:	Ramesh Babu K V <Ramesh.Babu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_runtime.h>
+#include <acpi/acpi_bus.h>
+
+#include "intel_mid_dma_regs.h"
+
+#define HID_MAX_SIZE 8
+
+struct list_head dma_dev_list;
+
+LIST_HEAD(dma_dev_list);
+
+struct acpi_dma_dev_list {
+	struct list_head dmadev_list;
+	char dma_hid[HID_MAX_SIZE];
+	struct device *acpi_dma_dev;
+};
+
+struct device *intel_mid_get_acpi_dma(const char *hid)
+{
+	struct acpi_dma_dev_list *listnode;
+	if (list_empty(&dma_dev_list))
+		return NULL;
+
+	list_for_each_entry(listnode, &dma_dev_list, dmadev_list) {
+		if (!(strncmp(listnode->dma_hid, hid, HID_MAX_SIZE)))
+			return listnode->acpi_dma_dev;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(intel_mid_get_acpi_dma);
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int mid_get_and_map_rsrc(void **dest, struct platform_device *pdev,
+				unsigned int num)
+{
+	struct resource *rsrc;
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, num);
+	if (!rsrc) {
+		pr_err("%s: Invalid resource - %d", __func__, num);
+		return -EIO;
+	}
+	pr_debug("rsrc #%d = %#x", num, (unsigned int) rsrc->start);
+	*dest = devm_ioremap_nocache(&pdev->dev, rsrc->start, resource_size(rsrc));
+	if (!*dest) {
+		pr_err("%s: unable to map resource: %#x", __func__, (unsigned int)rsrc->start);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int mid_platform_get_resources_fdk(struct middma_device *mid_device,
+				      struct platform_device *pdev)
+{
+	int ret;
+	struct resource *rsrc;
+
+	pr_debug("%s", __func__);
+
+	/* All ACPI resource request here */
+	/* Get DDR addr from platform resource table */
+	ret = mid_get_and_map_rsrc(&mid_device->dma_base, pdev, 0);
+	if (ret)
+		return ret;
+	pr_debug("dma_base:%p", mid_device->dma_base);
+
+	/* only get the resource from device table
+		mapping is performed in common code */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!rsrc) {
+		pr_warn("%s: Invalid resource for pimr", __func__);
+	} else {
+		/* add offset for ISRX register */
+		mid_device->pimr_base = rsrc->start + SHIM_ISRX_OFFSET;
+		pr_debug("pimr_base:%#x", mid_device->pimr_base);
+	}
+
+	mid_device->irq = platform_get_irq(pdev, 0);
+	if (mid_device->irq < 0) {
+		pr_err("invalid irq:%d", mid_device->irq);
+		return mid_device->irq;
+	}
+	pr_debug("irq from pdev is:%d", mid_device->irq);
+
+	return 0;
+}
+
+#define DMA_BASE_OFFSET 0x98000
+#define DMA_BASE_SIZE 0x4000
+
+static int mid_platform_get_resources_edk2(struct middma_device *mid_device,
+				      struct platform_device *pdev)
+{
+	struct resource *rsrc;
+	u32 dma_base_add;
+
+	pr_debug("%s", __func__);
+	/* All ACPI resource request here */
+	/* Get DDR addr from platform resource table */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!rsrc) {
+		pr_warn("%s: Invalid resource for pimr", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("rsrc %#x", (unsigned int)rsrc->start);
+	dma_base_add = rsrc->start + DMA_BASE_OFFSET;
+	mid_device->dma_base = devm_ioremap_nocache(&pdev->dev, dma_base_add, DMA_BASE_SIZE);
+	if (!mid_device->dma_base) {
+		pr_err("%s: unable to map resource: %#x", __func__, dma_base_add);
+		return -EIO;
+	}
+	pr_debug("dma_base:%p", mid_device->dma_base);
+
+	/* add offset for ISRX register */
+	mid_device->pimr_base = rsrc->start + SHIM_OFFSET + SHIM_ISRX_OFFSET;
+	pr_debug("pimr_base:%#x", mid_device->pimr_base);
+
+	mid_device->irq = platform_get_irq(pdev, 0);
+	if (mid_device->irq < 0) {
+		pr_err("invalid irq:%d", mid_device->irq);
+		return mid_device->irq;
+	}
+	pr_debug("irq from pdev is:%d", mid_device->irq);
+
+	return 0;
+}
+
+static int mid_platform_get_resources_lpio(struct middma_device *mid_device,
+				      struct platform_device *pdev)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	/* No need to request PIMR resource here */
+	ret = mid_get_and_map_rsrc(&mid_device->dma_base, pdev, 0);
+	if (ret)
+		return ret;
+	pr_debug("dma_base:%p\n", mid_device->dma_base);
+
+	mid_device->irq = platform_get_irq(pdev, 0);
+	if (mid_device->irq < 0) {
+		pr_err("invalid irq:%d\n", mid_device->irq);
+		return mid_device->irq;
+	}
+	pr_debug("irq from pdev is:%d\n", mid_device->irq);
+
+	return 0;
+}
+
+static int mid_platform_get_resources(const char *hid,
+		struct middma_device *mid_device, struct platform_device *pdev)
+{
+	if (!strncmp(hid, "DMA0F28", 7))
+		return mid_platform_get_resources_fdk(mid_device, pdev);
+	if (!strncmp(hid, "INTL9C60", 8))
+		return mid_platform_get_resources_lpio(mid_device, pdev);
+	if (!strncmp(hid, "ADMA0F28", 8))
+		return mid_platform_get_resources_edk2(mid_device, pdev);
+	else if ((!strncmp(hid, "ADMA22A8", 8))) {
+		return mid_platform_get_resources_edk2(mid_device, pdev);
+	} else if ((!strncmp(hid, "80862286", 8))) {
+		return mid_platform_get_resources_lpio(mid_device, pdev);
+	} else if ((!strncmp(hid, "808622C0", 8))) {
+		return mid_platform_get_resources_lpio(mid_device, pdev);
+	} else {
+		pr_err("Invalid device id..\n");
+		return -EINVAL;
+	}
+}
+
+int dma_acpi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	acpi_handle handle = ACPI_HANDLE(dev);
+	struct acpi_device *device;
+	struct middma_device *mid_device;
+	struct intel_mid_dma_probe_info *info;
+	const char *hid;
+	int ret;
+	struct acpi_dma_dev_list *listnode;
+
+	ret = acpi_bus_get_device(handle, &device);
+	if (ret) {
+		pr_err("%s: could not get acpi device - %d\n", __func__, ret);
+		return -ENODEV;
+	}
+
+	if (acpi_bus_get_status(device) || !device->status.present) {
+		pr_err("%s: device has invalid status", __func__);
+		return -ENODEV;
+	}
+
+	hid = acpi_device_hid(device);
+	pr_info("%s for %s", __func__, hid);
+
+	/* Apply default dma_mask if needed */
+	if (!pdev->dev.dma_mask) {
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	}
+
+	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		pr_err("dma_set_mask failed with err:%d", ret);
+		return ret;
+	}
+
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		pr_err("_coherent_mask failed with err:%d", ret);
+		return ret;
+	}
+	info = mid_get_acpi_driver_data(hid);
+	if (!info) {
+		pr_err("acpi driver data is null");
+		goto err_dma;
+	}
+
+	mid_device = mid_dma_setup_context(&pdev->dev, info);
+	if (!mid_device)
+		goto err_dma;
+
+	ret = mid_platform_get_resources(hid, mid_device, pdev);
+	if (ret) {
+		pr_err("Error while get resources:%d", ret);
+		goto err_dma;
+	}
+	platform_set_drvdata(pdev, mid_device);
+	ret = mid_setup_dma(&pdev->dev);
+	if (ret)
+		goto err_dma;
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	listnode = devm_kzalloc(&pdev->dev, sizeof(*listnode), GFP_KERNEL);
+	if (!listnode) {
+		pr_err("dma dev list alloc failed\n");
+		ret = -ENOMEM;
+		goto err_dma;
+	}
+
+	strncpy(listnode->dma_hid, hid, HID_MAX_SIZE);
+	listnode->acpi_dma_dev = &pdev->dev;
+	list_add_tail(&listnode->dmadev_list, &dma_dev_list);
+
+	pr_debug("%s:completed", __func__);
+	return 0;
+err_dma:
+	pr_err("ERR_MDMA:Probe failed %d\n", ret);
+	return ret;
+}
+#else
+int dma_acpi_probe(struct platform_device *pdev)
+{
+	return -EIO;
+}
+#endif
+
+int dma_acpi_remove(struct platform_device *pdev)
+{
+	pm_runtime_forbid(&pdev->dev);
+	middma_shutdown(&pdev->dev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index 17b4219..89adc14 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -31,6 +31,8 @@
 
 #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
 
+#define MID_MAX_CHAN	8 /*max ch across controllers*/
+
 #define	REG_BIT0		0x00000001
 #define	REG_BIT8		0x00000100
 #define INT_MASK_WE		0x8
@@ -45,11 +47,12 @@
 #define DISABLE_CHANNEL(chan_num) \
 	(REG_BIT8 << chan_num)
 
-#define DESCS_PER_CHANNEL	16
+#define DESCS_PER_CHANNEL	128
 /*DMA Registers*/
 /*registers associated with channel programming*/
 #define DMA_REG_SIZE		0x400
 #define DMA_CH_SIZE		0x58
+#define DMA_FIFO_SIZE 0x100080
 
 /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
 #define SAR			0x00 /* Source Address Register*/
@@ -83,6 +86,19 @@
 #define INTR_STATUS		0x360
 #define DMA_CFG			0x398
 #define DMA_CHAN_EN		0x3A0
+#define FIFO_PARTITION0_LO	0x400
+#define FIFO_PARTITION0_HI	0x404
+#define FIFO_PARTITION1_LO	0x408
+#define FIFO_PARTITION1_HI	0x40C
+#define CH_SAI_ERR		0x410
+
+#define SHIM_OFFSET 0x140000
+#define SHIM_ISRX_OFFSET	0x18
+#define CTL_LO_BIT_LLP_DST_EN	27
+#define CTL_LO_BIT_LLP_SRC_EN	28
+
+#define CH_SUSPEND	(BIT(8))
+#define CH_DRAIN	(BIT(10))
 
 /*DMA channel control registers*/
 union intel_mid_dma_ctl_lo {
@@ -111,6 +127,34 @@
 		u32	llp_src_en:1;	/*enable/disable source LLP = 0*/
 		u32	reser2:3;
 	} ctlx;
+	struct {
+		u32	int_en:1;	/*enable or disable interrupts*/
+					/*should be 0*/
+		u32	dst_tr_width:3;	/*destination transfer width*/
+					/*usually 32 bits = 010*/
+		u32	src_tr_width:3; /*source transfer width*/
+					/*usually 32 bits = 010*/
+		u32	rsvd4:1;
+		u32	dinc:1;		/*destination address inc/dec*/
+		u32	rsvd3:1;
+					/*For mem:INC=00, Periphral NoINC=11*/
+		u32	sinc:1;		/*source address inc or dec, as above*/
+		u32	dst_msize:3;	/*destination burst transaction length*/
+					/*always = 16 ie 011*/
+		u32	src_msize:3;	/*source burst transaction length*/
+					/*always = 16 ie 011*/
+		u32	src_gather_en:1;
+		u32	dst_scatter_en:1;
+		u32	rsvd2:1;
+		u32	tt_fc:2;	/*transfer type and flow controller*/
+					/*M-M = 000
+					  P-M = 010
+					  M-P = 001*/
+		u32	rsvd1:5;
+		u32	llp_dst_en:1;	/*enable/disable destination LLP = 0*/
+		u32	llp_src_en:1;	/*enable/disable source LLP = 0*/
+		u32	reser:3;
+	} ctlx_v2;
 	u32	ctl_lo;
 };
 
@@ -120,8 +164,13 @@
 		u32	done:1;		/*Done - updated by DMAC*/
 		u32	reser:19;	/*configured by DMAC*/
 	} ctlx;
+	struct {
+		u32	block_ts:12;	/*block transfer size*/
+		u32	done:1;		/*Done - updated by DMAC*/
+		u32	ch_weight:11;
+		u32	ch_class:2;
+	} ctlx_v2;
 	u32	ctl_hi;
-
 };
 
 /*DMA channel configuration registers*/
@@ -141,6 +190,33 @@
 		u32	reload_src:1;	/*auto reload src addr =1 if src is P*/
 		u32	reload_dst:1;	/*AR destn addr =1 if dstn is P*/
 	} cfgx;
+	struct {
+		u32	dst_burst_align:1;
+		u32	src_burst_align:1;
+		u32	all_np_wr:1;
+		u32	hshake_np_wr:1;
+		u32	rsvd4:1;
+		u32	ctl_hi_upd_en:1;
+		u32	ds_upd_en:1;
+		u32	ss_upd_en:1;
+		u32	ch_susp:1;
+		u32	fifo_empty:1;
+		u32	ch_drain:1;
+		u32	rsvd11:1;
+		u32	rd_snp:1;
+		u32	wr_snp:1;
+		u32	rd_llp_snp:1;
+		u32	rd_stat_snp:1;
+		u32	wr_stat_snp:1;
+		u32	wr_ctlhi_snp:1;
+		u32	dst_hs_pol:1;
+		u32	src_hs_pol:1;
+		u32	dst_opt_bl:1;
+		u32	src_opt_bl:1;
+		u32	rsvd_22_29:8;
+		u32	reload_src:1;
+		u32	reload_dst:1;
+	} cfgx_v2;
 	u32	cfg_lo;
 };
 
@@ -154,9 +230,43 @@
 		u32	dst_per:4;	/*dstn hw HS interface*/
 		u32	reser2:17;
 	} cfgx;
+	struct {
+		u32	src_per:4;	/*src hw HS interface*/
+		u32	dst_per:4;	/*dstn hw HS interface*/
+		u32	rd_issue_thd:10;
+		u32	wr_issue_thd:10;
+		u32	src_per_ext:2;
+		u32	dst_per_ext:2;
+	} cfgx_v2;
 	u32	cfg_hi;
 };
 
+struct intel_mid_dma_ops {
+	int (*device_alloc_chan_resources)(struct dma_chan *chan);
+	void (*device_free_chan_resources)(struct dma_chan *chan);
+
+	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+		size_t len, unsigned long flags);
+	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+		struct dma_chan *chan,
+		struct scatterlist *dst_sg, unsigned int dst_nents,
+		struct scatterlist *src_sg, unsigned int src_nents,
+		unsigned long flags);
+
+	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context);
+	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+		unsigned long arg);
+
+	enum dma_status (*device_tx_status)(struct dma_chan *chan,
+					    dma_cookie_t cookie,
+					    struct dma_tx_state *txstate);
+	void (*device_issue_pending)(struct dma_chan *chan);
+	void (*dma_chan_suspend)(struct dma_chan *chan);
+};
 
 /**
  * struct intel_mid_dma_chan - internal mid representation of a DMA channel
@@ -168,13 +278,14 @@
  * @active_list: current active descriptors
  * @queue: current queued up descriptors
  * @free_list: current free descriptors
- * @slave: dma slave structure
- * @descs_allocated: total number of descriptors allocated
- * @dma: dma device structure pointer
+ * @slave: dma slave struture
+ * @descs_allocated: total number of decsiptors allocated
+ * @dma: dma device struture pointer
  * @busy: bool representing if ch is busy (active txn) or not
  * @in_use: bool representing if ch is in use or not
  * @raw_tfr: raw trf interrupt received
  * @raw_block: raw block interrupt received
+ * @block_intr_status: bool representing if block intr is enabled or not
  */
 struct intel_mid_dma_chan {
 	struct dma_chan		chan;
@@ -192,6 +303,8 @@
 	u32			raw_tfr;
 	u32			raw_block;
 	struct intel_mid_dma_slave *mid_slave;
+	struct dma_pool		*lli_pool;
+	bool block_intr_status;
 };
 
 static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -207,6 +320,8 @@
 /**
  * struct middma_device - internal representation of a DMA device
  * @pdev: PCI device
+ * @dev : pointer to current device struct
+ * @irq : holds irq for the device
  * @dma_base: MMIO register space pointer of DMA
  * @dma_pool: for allocating DMA descriptors
  * @common: embedded struct dma_device
@@ -220,14 +335,17 @@
  * @block_size: Block size of DMA transfer supported (from drv_data)
  * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
  * @state: dma PM device state
+ * @tfr_intr_mask: hold the status of tfr intr mask register
+ * @block_intr_mask: hold the status of block intr mask register
  */
 struct middma_device {
-	struct pci_dev		*pdev;
+	struct device		*dev;
+	unsigned int		irq;
 	void __iomem		*dma_base;
 	struct pci_pool		*dma_pool;
 	struct dma_device	common;
 	struct tasklet_struct   tasklet;
-	struct intel_mid_dma_chan ch[MAX_CHAN];
+	struct intel_mid_dma_chan ch[MID_MAX_CHAN];
 	unsigned int		pci_id;
 	unsigned int		intr_mask;
 	void __iomem		*mask_reg;
@@ -235,7 +353,13 @@
 	int			max_chan;
 	int			block_size;
 	unsigned int		pimr_mask;
+	unsigned int		pimr_base;
+	unsigned int		dword_trf;
+	unsigned int		pimr_offset;
+	unsigned long		tfr_intr_mask;
+	unsigned long		block_intr_mask;
 	enum intel_mid_dma_state state;
+	struct intel_mid_dma_ops	dma_ops;
 };
 
 static inline struct middma_device *to_middma_device(struct dma_device *common)
@@ -266,15 +390,30 @@
 	enum intel_mid_dma_mode		cfg_mode; /*mode configuration*/
 
 };
-
+/* struct intel_mid_dma_lli is used to provide the DMA IP with SAR,DAR,LLP etc.
+   Use u32 for the elements of this structure irrespective
+   of whether dma_addr_t is u32 or u64.This is necessary because
+   the DMA IP expects these elements to be 32 bit wide */
 struct intel_mid_dma_lli {
-	dma_addr_t			sar;
-	dma_addr_t			dar;
-	dma_addr_t			llp;
+	u32				sar;
+	u32				dar;
+	u32				llp;
 	u32				ctl_lo;
 	u32				ctl_hi;
 } __attribute__ ((packed));
 
+struct intel_mid_dma_probe_info {
+	u8 max_chan;
+	u8 ch_base;
+	u32 block_size;
+	u32 pimr_mask;
+	u32 pimr_base;
+	u8 dword_trf;
+	u32 pimr_offset;
+	unsigned int		pci_id;
+	struct intel_mid_dma_ops *pdma_ops;
+};
+
 static inline int test_ch_en(void __iomem *dma, u32 ch_no)
 {
 	u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@@ -294,6 +433,12 @@
 }
 
 
+struct middma_device *mid_dma_setup_context(struct device *dev,
+					    struct intel_mid_dma_probe_info *info);
 int dma_resume(struct device *dev);
-
+int dma_acpi_probe(struct platform_device *pdev);
+int dma_acpi_remove(struct platform_device *pdev);
+struct intel_mid_dma_probe_info *mid_get_acpi_driver_data(const char *hid);
+int mid_setup_dma(struct device *dev);
+void middma_shutdown(struct device *dev);
 #endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a17553f..4c2f465 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2485,10 +2485,10 @@
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&pch->lock, flags);
-
 	tasklet_kill(&pch->task);
 
+	spin_lock_irqsave(&pch->lock, flags);
+
 	pl330_release_channel(pch->pl330_chid);
 	pch->pl330_chid = NULL;
 
@@ -2527,6 +2527,10 @@
 	/* Assign cookies to all nodes */
 	while (!list_empty(&last->node)) {
 		desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+		if (pch->cyclic) {
+			desc->txd.callback = last->txd.callback;
+			desc->txd.callback_param = last->txd.callback_param;
+		}
 
 		dma_cookie_assign(&desc->txd);
 
@@ -2710,45 +2714,82 @@
 		size_t period_len, enum dma_transfer_direction direction,
 		unsigned long flags, void *context)
 {
-	struct dma_pl330_desc *desc;
+	struct dma_pl330_desc *desc = NULL, *first = NULL;
 	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct dma_pl330_dmac *pdmac = pch->dmac;
+	unsigned int i;
 	dma_addr_t dst;
 	dma_addr_t src;
 
-	desc = pl330_get_desc(pch);
-	if (!desc) {
-		dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
-			__func__, __LINE__);
+	if (len % period_len != 0)
 		return NULL;
-	}
 
-	switch (direction) {
-	case DMA_MEM_TO_DEV:
-		desc->rqcfg.src_inc = 1;
-		desc->rqcfg.dst_inc = 0;
-		desc->req.rqtype = MEMTODEV;
-		src = dma_addr;
-		dst = pch->fifo_addr;
-		break;
-	case DMA_DEV_TO_MEM:
-		desc->rqcfg.src_inc = 0;
-		desc->rqcfg.dst_inc = 1;
-		desc->req.rqtype = DEVTOMEM;
-		src = pch->fifo_addr;
-		dst = dma_addr;
-		break;
-	default:
+	if (!is_slave_direction(direction)) {
 		dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
 		__func__, __LINE__);
 		return NULL;
 	}
 
-	desc->rqcfg.brst_size = pch->burst_sz;
-	desc->rqcfg.brst_len = 1;
+	for (i = 0; i < len / period_len; i++) {
+		desc = pl330_get_desc(pch);
+		if (!desc) {
+			dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+				__func__, __LINE__);
+
+			if (!first)
+				return NULL;
+
+			spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+			while (!list_empty(&first->node)) {
+				desc = list_entry(first->node.next,
+						struct dma_pl330_desc, node);
+				list_move_tail(&desc->node, &pdmac->desc_pool);
+			}
+
+			list_move_tail(&first->node, &pdmac->desc_pool);
+
+			spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+			return NULL;
+		}
+
+		switch (direction) {
+		case DMA_MEM_TO_DEV:
+			desc->rqcfg.src_inc = 1;
+			desc->rqcfg.dst_inc = 0;
+			desc->req.rqtype = MEMTODEV;
+			src = dma_addr;
+			dst = pch->fifo_addr;
+			break;
+		case DMA_DEV_TO_MEM:
+			desc->rqcfg.src_inc = 0;
+			desc->rqcfg.dst_inc = 1;
+			desc->req.rqtype = DEVTOMEM;
+			src = pch->fifo_addr;
+			dst = dma_addr;
+			break;
+		default:
+			break;
+		}
+
+		desc->rqcfg.brst_size = pch->burst_sz;
+		desc->rqcfg.brst_len = 1;
+		fill_px(&desc->px, dst, src, period_len);
+
+		if (!first)
+			first = desc;
+		else
+			list_add_tail(&desc->node, &first->node);
+
+		dma_addr += period_len;
+	}
+
+	if (!desc)
+		return NULL;
 
 	pch->cyclic = true;
-
-	fill_px(&desc->px, dst, src, period_len);
+	desc->txd.flags = flags;
 
 	return &desc->txd;
 }
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8b6a034..8b3d901 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2470,8 +2470,15 @@
 	layers[0].size = pvt->csels[0].b_cnt;
 	layers[0].is_virt_csrow = true;
 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
-	layers[1].size = pvt->channel_count;
+
+	/*
+	 * Always allocate two channels since we can have setups with DIMMs on
+	 * only one channel. Also, this simplifies handling later for the price
+	 * of a couple of KBs tops.
+	 */
+	layers[1].size = 2;
 	layers[1].is_virt_csrow = false;
+
 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
 	if (!mci)
 		goto err_siblings;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 27e86d9..89e1090 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -48,6 +48,8 @@
  */
 static void const *edac_mc_owner;
 
+static struct bus_type mc_bus[EDAC_MAX_MCS];
+
 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
 			         unsigned len)
 {
@@ -723,6 +725,11 @@
 	int ret = -EINVAL;
 	edac_dbg(0, "\n");
 
+	if (mci->mc_idx >= EDAC_MAX_MCS) {
+		pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
+		return -ENODEV;
+	}
+
 #ifdef CONFIG_EDAC_DEBUG
 	if (edac_debug_level >= 3)
 		edac_mc_dump_mci(mci);
@@ -762,6 +769,8 @@
 	/* set load time so that error rate can be tracked */
 	mci->start_time = jiffies;
 
+	mci->bus = &mc_bus[mci->mc_idx];
+
 	if (edac_create_sysfs_mci_device(mci)) {
 		edac_mc_printk(mci, KERN_WARNING,
 			"failed to create sysfs device\n");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 67610a6..c4d700a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -370,7 +370,7 @@
 		return -ENODEV;
 
 	csrow->dev.type = &csrow_attr_type;
-	csrow->dev.bus = &mci->bus;
+	csrow->dev.bus = mci->bus;
 	device_initialize(&csrow->dev);
 	csrow->dev.parent = &mci->dev;
 	csrow->mci = mci;
@@ -605,7 +605,7 @@
 	dimm->mci = mci;
 
 	dimm->dev.type = &dimm_attr_type;
-	dimm->dev.bus = &mci->bus;
+	dimm->dev.bus = mci->bus;
 	device_initialize(&dimm->dev);
 
 	dimm->dev.parent = &mci->dev;
@@ -975,11 +975,13 @@
 	 * The memory controller needs its own bus, in order to avoid
 	 * namespace conflicts at /sys/bus/edac.
 	 */
-	mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
-	if (!mci->bus.name)
+	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+	if (!mci->bus->name)
 		return -ENOMEM;
-	edac_dbg(0, "creating bus %s\n", mci->bus.name);
-	err = bus_register(&mci->bus);
+
+	edac_dbg(0, "creating bus %s\n", mci->bus->name);
+
+	err = bus_register(mci->bus);
 	if (err < 0)
 		return err;
 
@@ -988,7 +990,7 @@
 	device_initialize(&mci->dev);
 
 	mci->dev.parent = mci_pdev;
-	mci->dev.bus = &mci->bus;
+	mci->dev.bus = mci->bus;
 	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
 	dev_set_drvdata(&mci->dev, mci);
 	pm_runtime_forbid(&mci->dev);
@@ -997,8 +999,8 @@
 	err = device_add(&mci->dev);
 	if (err < 0) {
 		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
-		bus_unregister(&mci->bus);
-		kfree(mci->bus.name);
+		bus_unregister(mci->bus);
+		kfree(mci->bus->name);
 		return err;
 	}
 
@@ -1064,8 +1066,8 @@
 	}
 fail2:
 	device_unregister(&mci->dev);
-	bus_unregister(&mci->bus);
-	kfree(mci->bus.name);
+	bus_unregister(mci->bus);
+	kfree(mci->bus->name);
 	return err;
 }
 
@@ -1098,8 +1100,8 @@
 {
 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
 	device_unregister(&mci->dev);
-	bus_unregister(&mci->bus);
-	kfree(mci->bus.name);
+	bus_unregister(mci->bus);
+	kfree(mci->bus->name);
 }
 
 static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 1b63517..157b934 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -974,7 +974,7 @@
 	if (!i5100_debugfs)
 		return -ENODEV;
 
-	priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
+	priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
 
 	if (!priv->debugfs)
 		return -ENOMEM;
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 60adc04..dd87dc9 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -48,6 +48,11 @@
 	[EXTCON_FAST_CHARGER]	= "Fast-charger",
 	[EXTCON_SLOW_CHARGER]	= "Slow-charger",
 	[EXTCON_CHARGE_DOWNSTREAM]	= "Charge-downstream",
+	[EXTCON_SDP]		= "CHARGER_USB_SDP",
+	[EXTCON_DCP]		= "CHARGER_USB_DCP",
+	[EXTCON_CDP]		= "CHARGER_USB_CDP",
+	[EXTCON_ACA]		= "CHARGER_USB_ACA",
+	[EXTCON_AC]		= "CHARGER_AC",
 	[EXTCON_HDMI]		= "HDMI",
 	[EXTCON_MHL]		= "MHL",
 	[EXTCON_DVI]		= "DVI",
@@ -72,6 +77,8 @@
 static LIST_HEAD(extcon_dev_list);
 static DEFINE_MUTEX(extcon_dev_list_lock);
 
+static BLOCKING_NOTIFIER_HEAD(extcon_dev_notifier_list);
+
 /**
  * check_mutually_exclusive - Check if new_state violates mutually_exclusive
  *			    condition.
@@ -328,6 +335,30 @@
 EXPORT_SYMBOL_GPL(extcon_find_cable_index);
 
 /**
+ * extcon_find_cable_type() - Get the cable type based on the cable index.
+ * @edev:	the extcon device that has the cable.
+ * @idx:	cable idx to be searched.
+ *
+ * This function is useful if the notifee want to know the cable type
+ * equivalent value defined in extcon_cable_name enum.
+ */
+int extcon_find_cable_type(struct extcon_dev *edev, int index)
+{
+	int i;
+
+	if (edev->supported_cable) {
+		for (i = 0; extcon_cable_name[i]; i++) {
+			if (!strncmp(edev->supported_cable[index],
+				extcon_cable_name[i], CABLE_NAME_MAX))
+				return i;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(extcon_find_cable_type);
+
+/**
  * extcon_get_cable_state_() - Get the status of a specific cable.
  * @edev:	the extcon device that has the cable.
  * @index:	cable index that can be retrieved by extcon_find_cable_index().
@@ -412,6 +443,27 @@
 }
 EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
 
+/**
+ * extcon_num_of_extcon_devs() - number of extcon devices
+ * returns the total number of extcon registered devices.
+ */
+int extcon_num_of_cable_devs(const char *cable)
+{
+	struct extcon_dev *sd = NULL;
+	int i, j = 0;
+
+	mutex_lock(&extcon_dev_list_lock);
+	list_for_each_entry(sd, &extcon_dev_list, entry) {
+		for (i = 0; sd && i < sd->max_supported; i++) {
+			if (!strcmp(sd->supported_cable[i], cable))
+				j++;
+		}
+	}
+	mutex_unlock(&extcon_dev_list_lock);
+	return j;
+}
+EXPORT_SYMBOL_GPL(extcon_num_of_cable_devs);
+
 static int _call_per_cable(struct notifier_block *nb, unsigned long val,
 			   void *ptr)
 {
@@ -559,7 +611,7 @@
 			return PTR_ERR(extcon_class);
 		extcon_class->dev_attrs = extcon_attrs;
 
-#if defined(CONFIG_ANDROID)
+#if defined(CONFIG_ANDROID) && !defined(CONFIG_SWITCH)
 		switch_class = class_compat_register("switch");
 		if (WARN(!switch_class, "cannot allocate"))
 			return -ENOMEM;
@@ -579,6 +631,30 @@
 {
 }
 
+void extcon_dev_register_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&extcon_dev_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(extcon_dev_register_notify);
+
+void extcon_dev_unregister_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&extcon_dev_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(extcon_dev_unregister_notify);
+
+void extcon_dev_notify_add_device(struct extcon_dev *edev)
+{
+	blocking_notifier_call_chain(&extcon_dev_notifier_list,
+				EXTCON_DEVICE_ADD, edev);
+}
+
+void extcon_dev_notify_remove_device(struct extcon_dev *edev)
+{
+	blocking_notifier_call_chain(&extcon_dev_notifier_list,
+			EXTCON_DEVICE_REMOVE, edev);
+}
+
 /**
  * extcon_dev_register() - Register a new extcon device
  * @edev	: the new extcon device (should be allocated before calling)
@@ -764,6 +840,8 @@
 	list_add(&edev->entry, &extcon_dev_list);
 	mutex_unlock(&extcon_dev_list_lock);
 
+	extcon_dev_notify_add_device(edev);
+
 	return 0;
 
 err_dev:
@@ -799,6 +877,8 @@
 {
 	int index;
 
+	extcon_dev_notify_remove_device(edev);
+
 	mutex_lock(&extcon_dev_list_lock);
 	list_del(&edev->entry);
 	mutex_unlock(&extcon_dev_list_lock);
@@ -809,6 +889,8 @@
 		return;
 	}
 
+	device_unregister(edev->dev);
+
 	if (edev->mutually_exclusive && edev->max_supported) {
 		for (index = 0; edev->mutually_exclusive[index];
 				index++)
@@ -829,7 +911,6 @@
 	if (switch_class)
 		class_compat_remove_link(switch_class, edev->dev, NULL);
 #endif
-	device_unregister(edev->dev);
 	put_device(edev->dev);
 }
 EXPORT_SYMBOL_GPL(extcon_dev_unregister);
diff --git a/drivers/external_drivers/Kconfig b/drivers/external_drivers/Kconfig
new file mode 100644
index 0000000..87404c5
--- /dev/null
+++ b/drivers/external_drivers/Kconfig
@@ -0,0 +1,32 @@
+menuconfig EXTERNAL_DRIVERS
+	bool "Enable support to external drivers"
+	depends on X86
+	default y
+	help
+	  External drivers are drivers located outside this kernel tree
+	  but allowed to be configured as builtin.
+	  External drivers directories will behave as a regular directory
+	  inside kernel tree.
+
+if EXTERNAL_DRIVERS
+menuconfig EXTERNAL_MISC_DRIVERS
+	bool "Enable support to misc drivers"
+	depends on EXTERNAL_DRIVERS
+	default y
+	help
+	  Enable support to external misc drivers.
+
+source "drivers/external_drivers/drivers/Kconfig"
+
+menuconfig EXTERNAL_INTEL_MEDIA
+	bool "Enable support to intel media drivers"
+	depends on EXTERNAL_DRIVERS
+	default y
+	help
+	  Enable support to external intel media drivers.
+
+if EXTERNAL_INTEL_MEDIA
+source "drivers/external_drivers/intel_media/Kconfig"
+endif
+
+endif
diff --git a/drivers/external_drivers/Makefile b/drivers/external_drivers/Makefile
new file mode 100644
index 0000000..d61d4ca
--- /dev/null
+++ b/drivers/external_drivers/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_EXTERNAL_MISC_DRIVERS)	+= drivers/
+obj-$(CONFIG_EXTERNAL_INTEL_MEDIA)	+= intel_media/
diff --git a/drivers/external_drivers/drivers/Kconfig b/drivers/external_drivers/drivers/Kconfig
new file mode 100644
index 0000000..7baf7c3
--- /dev/null
+++ b/drivers/external_drivers/drivers/Kconfig
@@ -0,0 +1,9 @@
+source "drivers/external_drivers/drivers/hwmon/Kconfig"
+source "drivers/external_drivers/drivers/misc/Kconfig"
+source "drivers/external_drivers/drivers/i2c/busses/Kconfig"
+source "drivers/external_drivers/drivers/power/Kconfig"
+source "drivers/external_drivers/drivers/platform/x86/Kconfig"
+source "drivers/external_drivers/drivers/hsu/Kconfig"
+source "drivers/external_drivers/drivers/input/Kconfig"
+source "drivers/external_drivers/drivers/socwatch/socwatch_driver/Kconfig"
+source "drivers/external_drivers/drivers/socwatch/soc_perf_driver/Kconfig"
diff --git a/drivers/external_drivers/drivers/Makefile b/drivers/external_drivers/drivers/Makefile
new file mode 100644
index 0000000..6ae5b34
--- /dev/null
+++ b/drivers/external_drivers/drivers/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_HWMON)         += hwmon/
+obj-y                       += i2c/
+obj-y                       += misc/
+obj-y                       += platform/x86/
+obj-$(CONFIG_POWER_SUPPLY)  += power/
+obj-y                       += hsu/
+obj-$(CONFIG_INPUT)         += input/
+obj-y                       += socwatch/
diff --git a/drivers/external_drivers/drivers/hsu/Kconfig b/drivers/external_drivers/drivers/hsu/Kconfig
new file mode 100644
index 0000000..90abeb6
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/Kconfig
@@ -0,0 +1,11 @@
+config SERIAL_MFD_HSU_EXT
+	tristate "Medfield High Speed UART support"
+	depends on PCI
+	select SERIAL_CORE
+
+config SERIAL_MFD_HSU_EXT_CONSOLE
+	boolean "Medfile HSU serial console support"
+	depends on SERIAL_MFD_HSU_EXT=y
+	select SERIAL_CORE_CONSOLE
+
+
diff --git a/drivers/external_drivers/drivers/hsu/Makefile b/drivers/external_drivers/drivers/hsu/Makefile
new file mode 100644
index 0000000..c86ae93
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/Makefile
@@ -0,0 +1,3 @@
+ccflags-y = -I$(src)
+
+obj-$(CONFIG_SERIAL_MFD_HSU_EXT)    += mfd_core.o mfd_dma.o mfd_pci.o mfd_plat.o
diff --git a/drivers/external_drivers/drivers/hsu/mfd.h b/drivers/external_drivers/drivers/hsu/mfd.h
new file mode 100644
index 0000000..22cb3f3
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/mfd.h
@@ -0,0 +1,252 @@
+#ifndef _MFD_H
+#define _MFD_H
+
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/dma-direction.h>
+#include <asm/intel_mid_hsu.h>
+
+#define HSU_PORT_MAX		8
+#define HSU_DMA_BUF_SIZE	2048
+#define HSU_Q_MAX		4096
+#define HSU_CL_BUF_LEN		(1 << CONFIG_LOG_BUF_SHIFT)
+#define HSU_DMA_BSR		32
+#define HSU_DMA_MOTSR		4
+#define HSU_PIO_RX_ERR		0x06
+#define HSU_PIO_RX_AVB		0x04
+#define HSU_PIO_RX_TMO		0x0C
+#define HSU_PIO_TX_REQ		0x02
+
+enum {
+	flag_console = 0,
+	flag_reopen,
+	flag_suspend,
+	flag_active,
+	flag_set_alt,
+	flag_tx_on,
+	flag_rx_on,
+	flag_rx_pending,
+	flag_startup,
+	flag_cmd_on,
+	flag_cmd_off,
+};
+
+enum {
+	qcmd_overflow = 0,
+	qcmd_get_msr,
+	qcmd_set_mcr,
+	qcmd_set_ier,
+	qcmd_start_rx,
+	qcmd_stop_rx,
+	qcmd_start_tx,
+	qcmd_stop_tx,
+	qcmd_cl,
+	qcmd_port_irq,
+	qcmd_dma_irq,
+	qcmd_enable_irq,
+	qcmd_cmd_off,
+	qcmd_max,
+};
+
+enum {
+	context_save,
+	context_load,
+};
+
+struct hsu_dma_buffer {
+	u8		*buf;
+	dma_addr_t	dma_addr;
+	u32		dma_size;
+	u32		ofs;
+};
+
+struct hsu_dma_chan {
+	u32	id;
+	enum dma_data_direction	dirt;
+	struct uart_hsu_port	*uport;
+	void __iomem		*reg;
+	u32	cr;
+	u32	dcr;
+	u32	sar;
+	u32	tsr;
+};
+
+struct dw_dma_priv {
+	struct intel_mid_dma_slave	txs;
+	struct intel_mid_dma_slave	rxs;
+
+	struct uart_hsu_port	*up;
+
+	struct dma_chan		*txchan;
+	struct dma_chan		*rxchan;
+
+	/* phy address of the Data register */
+	dma_addr_t		dma_addr;
+	struct pci_dev		*dmac;
+};
+
+struct intel_dma_priv {
+	unsigned int		tx_addr;
+	struct hsu_dma_chan	*txc;
+	struct hsu_dma_chan	*rxc;
+};
+
+struct hsu_dma_ops {
+	int (*init)(struct uart_hsu_port *up);
+	int (*exit)(struct uart_hsu_port *up);
+	int (*suspend)(struct uart_hsu_port *up);
+	int (*resume)(struct uart_hsu_port *up);
+	void (*start_tx)(struct uart_hsu_port *up);
+	void (*stop_tx)(struct uart_hsu_port *up);
+	void (*start_rx)(struct uart_hsu_port *up);
+	void (*stop_rx)(struct uart_hsu_port *up);
+	/* op will be context_save or context_load */
+	void (*context_op)(struct uart_hsu_port *up, int op);
+};
+
+struct uart_hsu_port {
+	struct uart_port        port;
+	struct mutex		q_mutex;
+	int			q_start;
+	struct workqueue_struct *workqueue;
+	struct work_struct	work;
+	struct tasklet_struct	tasklet;
+	struct circ_buf		qcirc;
+	int			qbuf[HSU_Q_MAX];
+	struct circ_buf		cl_circ;
+	spinlock_t		cl_lock;
+
+	/* Intel HSU or Designware */
+	int			hw_type;
+
+	unsigned char           msr;
+	unsigned char           ier;
+	unsigned char           lcr;
+	unsigned char           mcr;
+	unsigned char           lsr;
+	unsigned char           dll;
+	unsigned char           dlm;
+	unsigned char		fcr;
+	/* intel_hsu's clk param */
+	unsigned int		mul;
+	unsigned int		div;
+	unsigned int		ps;
+
+	/* Buffered value due to runtime PM and sharing IRQ */
+	unsigned char		iir;
+
+	/* intel_dw's clk param */
+	unsigned int		m;
+	unsigned int		n;
+
+	unsigned int            lsr_break_flag;
+	char			name[24];
+	int			index;
+	struct device		*dev;
+
+	unsigned int		tx_addr;
+	struct hsu_dma_chan	*txc;
+	struct hsu_dma_chan	*rxc;
+	struct hsu_dma_buffer	txbuf;
+	struct hsu_dma_buffer	rxbuf;
+
+	unsigned char		rxc_chcr_save;
+
+	unsigned long		flags;
+
+	unsigned int		qcmd_num;
+	unsigned int		qcmd_done;
+	unsigned int		port_irq_num;
+	unsigned int		port_irq_cmddone;
+	unsigned int		port_irq_no_alt;
+	unsigned int		port_irq_no_startup;
+	unsigned int		port_irq_pio_no_irq_pend;
+	unsigned int		port_irq_pio_tx_req;
+	unsigned int		port_irq_pio_rx_avb;
+	unsigned int		port_irq_pio_rx_err;
+	unsigned int		port_irq_pio_rx_timeout;
+	unsigned int		cts_status;
+	unsigned int		dma_irq_num;
+	unsigned int		dma_invalid_irq_num;
+	unsigned int		dma_irq_cmddone;
+	unsigned int		dma_tx_irq_cmddone;
+	unsigned int		dma_rx_irq_cmddone;
+	unsigned int		dma_rx_tmt_irq_cmddone;
+	unsigned int		tasklet_done;
+	unsigned int		workq_done;
+	unsigned int		in_workq;
+	unsigned int		in_tasklet;
+
+	unsigned int		byte_delay;
+
+	int			use_dma;	/* flag for DMA/PIO */
+	unsigned int		dma_irq;
+	unsigned int		port_dma_sts;
+
+	void			*dma_priv;
+	struct hsu_dma_ops	*dma_ops;
+	struct pm_qos_request   qos;
+	int			dma_inited;
+};
+
+struct hsu_port {
+	int dma_irq;
+	int port_num;
+	int irq_port_and_dma;
+	struct hsu_port_cfg	*configs[HSU_PORT_MAX];
+	void __iomem	*reg;
+	struct uart_hsu_port	port[HSU_PORT_MAX];
+	struct hsu_dma_chan	chans[HSU_PORT_MAX * 2];
+	spinlock_t		dma_lock;
+	struct dentry *debugfs;
+};
+
+#define chan_readl(chan, offset)	readl(chan->reg + offset)
+#define chan_writel(chan, offset, val)	writel(val, chan->reg + offset)
+
+#define mfd_readl(obj, offset)		readl(obj->reg + offset)
+#define mfd_writel(obj, offset, val)	writel(val, obj->reg + offset)
+
+static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
+{
+	unsigned int val;
+
+	if (offset > UART_MSR || up->hw_type == hsu_dw) {
+		offset <<= 2;
+		val = readl(up->port.membase + offset);
+	} else
+		val = (unsigned int)readb(up->port.membase + offset);
+
+	return val;
+}
+
+static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
+{
+	if (offset > UART_MSR || up->hw_type == hsu_dw) {
+		offset <<= 2;
+		writel(value, up->port.membase + offset);
+	} else {
+		unsigned char val = value & 0xff;
+		writeb(val, up->port.membase + offset);
+	}
+}
+void serial_sched_cmd(struct uart_hsu_port *up, char cmd);
+extern struct hsu_dma_ops *pdw_dma_ops;
+extern struct hsu_dma_ops intel_dma_ops;
+
+struct uart_hsu_port *serial_hsu_port_setup(struct device *pdev, int port,
+	resource_size_t start, resource_size_t len, int irq);
+void serial_hsu_port_free(struct uart_hsu_port *up);
+void serial_hsu_port_shutdown(struct uart_hsu_port *up);
+int serial_hsu_dma_setup(struct device *pdev,
+	resource_size_t start, resource_size_t len, unsigned int irq, int share);
+void serial_hsu_dma_free(void);
+int serial_hsu_do_suspend(struct uart_hsu_port *up);
+int serial_hsu_do_resume(struct uart_hsu_port *up);
+int serial_hsu_do_runtime_idle(struct uart_hsu_port *up);
+
+#include "mfd_trace.h"
+#endif
diff --git a/drivers/external_drivers/drivers/hsu/mfd_core.c b/drivers/external_drivers/drivers/hsu/mfd_core.c
new file mode 100644
index 0000000..810f85a
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/mfd_core.c
@@ -0,0 +1,2484 @@
+/*
+ * mfd_core.c: driver core for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ *    2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ *    are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ *    asserted, only when the HW is reset the DDCD and DDSR will
+ *    be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#define CREATE_TRACE_POINTS
+#include "mfd.h"
+
+static int hsu_dma_enable = 0xff;
+module_param(hsu_dma_enable, int, 0);
+MODULE_PARM_DESC(hsu_dma_enable,
+		 "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
+
+static struct hsu_port hsu;
+static struct hsu_port *phsu = &hsu;
+static struct uart_driver serial_hsu_reg;
+static struct hsu_port_cfg *hsu_port_func_cfg;
+
+static void serial_hsu_command(struct uart_hsu_port *up);
+
+int hsu_register_board_info(void *inf)
+{
+	hsu_port_func_cfg = inf;
+	return 0;
+}
+
+static inline int check_qcmd(struct uart_hsu_port *up, char *cmd)
+{
+	struct circ_buf *circ = &up->qcirc;
+	char *buf;
+
+	buf = circ->buf + circ->tail;
+	*cmd = *buf;
+	return CIRC_CNT(circ->head, circ->tail, HSU_Q_MAX);
+}
+
+static inline void insert_qcmd(struct uart_hsu_port *up, char cmd)
+{
+	struct circ_buf *circ = &up->qcirc;
+	char *buf;
+	char last_cmd;
+
+	trace_hsu_cmd_insert(up->index, cmd);
+	if (check_qcmd(up, &last_cmd) && last_cmd == cmd &&
+		cmd != qcmd_enable_irq && cmd != qcmd_port_irq &&
+                cmd != qcmd_dma_irq)
+		return;
+	trace_hsu_cmd_add(up->index, cmd);
+	up->qcmd_num++;
+	buf = circ->buf + circ->head;
+	if (CIRC_SPACE(circ->head, circ->tail, HSU_Q_MAX) < 1)
+		*buf = qcmd_overflow;
+	else {
+		*buf = cmd;
+		circ->head++;
+		if (circ->head == HSU_Q_MAX)
+			circ->head = 0;
+	}
+}
+
+static inline int get_qcmd(struct uart_hsu_port *up, char *cmd)
+{
+	struct circ_buf *circ = &up->qcirc;
+	char *buf;
+
+	if (!CIRC_CNT(circ->head, circ->tail, HSU_Q_MAX))
+		return 0;
+	buf = circ->buf + circ->tail;
+	*cmd = *buf;
+	circ->tail++;
+	if (circ->tail == HSU_Q_MAX)
+		circ->tail = 0;
+	up->qcmd_done++;
+	return 1;
+}
+
+static inline void cl_put_char(struct uart_hsu_port *up, char c)
+{
+	struct circ_buf *circ = &up->cl_circ;
+	char *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&up->cl_lock, flags);
+	buf = circ->buf + circ->head;
+	if (CIRC_SPACE(circ->head, circ->tail, HSU_CL_BUF_LEN) > 1) {
+		*buf = c;
+		circ->head++;
+		if (circ->head == HSU_CL_BUF_LEN)
+			circ->head = 0;
+	}
+	spin_unlock_irqrestore(&up->cl_lock, flags);
+}
+
+static inline int cl_get_char(struct uart_hsu_port *up, char *c)
+{
+	struct circ_buf *circ = &up->cl_circ;
+	char *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&up->cl_lock, flags);
+	if (!CIRC_CNT(circ->head, circ->tail, HSU_CL_BUF_LEN)) {
+		spin_unlock_irqrestore(&up->cl_lock, flags);
+		return 0;
+	}
+	buf = circ->buf + circ->tail;
+	*c = *buf;
+	circ->tail++;
+	if (circ->tail == HSU_CL_BUF_LEN)
+		circ->tail = 0;
+	spin_unlock_irqrestore(&up->cl_lock, flags);
+	return 1;
+}
+
+
+
+void serial_sched_cmd(struct uart_hsu_port *up, char cmd)
+{
+	pm_runtime_get(up->dev);
+	insert_qcmd(up, cmd);
+	if (test_bit(flag_cmd_on, &up->flags)) {
+		if (up->use_dma)
+			tasklet_schedule(&up->tasklet);
+		else
+			queue_work(up->workqueue, &up->work);
+	}
+	pm_runtime_put(up->dev);
+}
+
+static inline void serial_sched_sync(struct uart_hsu_port *up)
+{
+	mutex_lock(&up->q_mutex);
+	if (up->q_start > 0) {
+		if (up->use_dma) {
+			tasklet_disable(&up->tasklet);
+			serial_hsu_command(up);
+			tasklet_enable(&up->tasklet);
+		} else {
+			flush_workqueue(up->workqueue);
+		}
+	}
+	mutex_unlock(&up->q_mutex);
+}
+
+static inline void serial_sched_start(struct uart_hsu_port *up)
+{
+	unsigned long flags;
+
+	mutex_lock(&up->q_mutex);
+	up->q_start++;
+	if (up->q_start == 1) {
+		clear_bit(flag_cmd_off, &up->flags);
+		spin_lock_irqsave(&up->port.lock, flags);
+		set_bit(flag_cmd_on, &up->flags);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		if (up->use_dma)
+			tasklet_schedule(&up->tasklet);
+		else
+			queue_work(up->workqueue, &up->work);
+	}
+	mutex_unlock(&up->q_mutex);
+}
+
+static inline void serial_sched_stop(struct uart_hsu_port *up)
+{
+	unsigned long flags;
+
+	mutex_lock(&up->q_mutex);
+	up->q_start--;
+	if (up->q_start == 0) {
+		spin_lock_irqsave(&up->port.lock, flags);
+		clear_bit(flag_cmd_on, &up->flags);
+		insert_qcmd(up, qcmd_cmd_off);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		if (up->use_dma) {
+			tasklet_schedule(&up->tasklet);
+			while (!test_bit(flag_cmd_off, &up->flags))
+				cpu_relax();
+		} else {
+			queue_work(up->workqueue, &up->work);
+			flush_workqueue(up->workqueue);
+		}
+	}
+	mutex_unlock(&up->q_mutex);
+}
+
+static void serial_set_alt(int index)
+{
+	struct uart_hsu_port *up = phsu->port + index;
+	struct hsu_dma_chan *txc = up->txc;
+	struct hsu_dma_chan *rxc = up->rxc;
+	struct hsu_port_cfg *cfg = phsu->configs[index];
+
+	if (test_bit(flag_set_alt, &up->flags))
+		return;
+
+	trace_hsu_func_start(up->index, __func__);
+	pm_runtime_get_sync(up->dev);
+	disable_irq(up->port.irq);
+	disable_irq(up->dma_irq);
+	serial_sched_stop(up);
+	if (up->use_dma && up->hw_type == hsu_intel) {
+		txc->uport = up;
+		rxc->uport = up;
+	}
+	dev_set_drvdata(up->dev, up);
+	if (cfg->hw_set_alt)
+		cfg->hw_set_alt(index);
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 0);
+	set_bit(flag_set_alt, &up->flags);
+	serial_sched_start(up);
+	enable_irq(up->dma_irq);
+	enable_irq(up->port.irq);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_clear_alt(int index)
+{
+	struct uart_hsu_port *up = phsu->port + index;
+	struct hsu_port_cfg *cfg = phsu->configs[index];
+
+	if (!test_bit(flag_set_alt, &up->flags))
+		return;
+
+	pm_runtime_get_sync(up->dev);
+	disable_irq(up->port.irq);
+	disable_irq(up->dma_irq);
+	serial_sched_stop(up);
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 1);
+	clear_bit(flag_set_alt, &up->flags);
+	serial_sched_start(up);
+	enable_irq(up->dma_irq);
+	enable_irq(up->port.irq);
+	pm_runtime_put(up->dev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define HSU_DBGFS_BUFSIZE	8192
+
+static int hsu_show_regs_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t port_show_regs(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct uart_hsu_port *up = file->private_data;
+	char *buf;
+	u32 len = 0;
+	ssize_t ret;
+
+	buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	pm_runtime_get_sync(up->dev);
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MFD HSU port[%d] regs:\n", up->index);
+
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"=================================\n");
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"IER: \t\t0x%08x\n", serial_in(up, UART_IER));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"PS: \t\t0x%08x\n", serial_in(up, UART_PS));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
+	pm_runtime_put(up->dev);
+
+	if (len > HSU_DBGFS_BUFSIZE)
+		len = HSU_DBGFS_BUFSIZE;
+
+	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct hsu_dma_chan *chan = file->private_data;
+	char *buf;
+	u32 len = 0;
+	ssize_t ret;
+
+	buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	pm_runtime_get_sync(chan->uport->dev);
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MFD HSU DMA channel [%d] regs:\n", chan->id);
+
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"=================================\n");
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
+	pm_runtime_put(chan->uport->dev);
+
+	if (len > HSU_DBGFS_BUFSIZE)
+		len = HSU_DBGFS_BUFSIZE;
+
+	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t hsu_dump_show(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct uart_hsu_port *up;
+	struct hsu_port_cfg *cfg;
+	char *buf;
+	char cmd;
+	int i;
+	u32 len = 0;
+	ssize_t ret;
+	struct irq_desc *dma_irqdesc = irq_to_desc(phsu->dma_irq);
+	struct irq_desc *port_irqdesc;
+	struct circ_buf *xmit;
+
+	buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+		"HSU status dump:\n");
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+		"\tdma irq (>0: disable): %d\n",
+		dma_irqdesc ? dma_irqdesc->depth : 0);
+	for (i = 0; i < phsu->port_num; i++) {
+		up = phsu->port + i;
+		cfg = hsu_port_func_cfg + i;
+		port_irqdesc = irq_to_desc(up->port.irq);
+		xmit = &up->port.state->xmit;
+
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"HSU port[%d] %s:\n", up->index, cfg->name);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"xmit empty[%d] xmit pending[%d]\n",
+			uart_circ_empty(xmit),
+			(int)uart_circ_chars_pending(xmit));
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tsuspend idle: %d\n", cfg->idle);
+		if (cfg->has_alt)
+			len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\talt port: %d\n", cfg->alt);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\tforce_suspend: %d\n", cfg->force_suspend);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\tcts status: %d\n", up->cts_status);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tuse_dma: %s\n",
+			up->use_dma ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_console: %s\n",
+			test_bit(flag_console, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_suspend: %s\n",
+			test_bit(flag_suspend, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_active: %s\n",
+			test_bit(flag_active, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_set_alt: %s\n",
+			test_bit(flag_set_alt, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_startup: %s\n",
+			test_bit(flag_startup, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd q_start: %d\n", up->q_start);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd total count: %d\n", up->qcmd_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd done count: %d\n", up->qcmd_done);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq count: %d\n", up->port_irq_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq cmddone: %d\n", up->port_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq cts: %d\n", up->port.icount.cts);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq rng: %d\n", up->port.icount.rng);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq dsr: %d\n", up->port.icount.dsr);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq no irq pending: %d\n",
+			up->port_irq_pio_no_irq_pend);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq no alt: %d\n",
+			up->port_irq_no_alt);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq no startup: %d\n",
+			up->port_irq_no_startup);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio rx error: %d\n",
+			up->port_irq_pio_rx_err);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio rx available: %d\n",
+			up->port_irq_pio_rx_avb);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio rx fifo timeout: %d\n",
+			up->port_irq_pio_rx_timeout);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio tx request: %d\n",
+			up->port_irq_pio_tx_req);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma invalid irq count: %d\n",
+			up->dma_invalid_irq_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma irq count: %d\n", up->dma_irq_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma irq cmddone: %d\n", up->dma_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma tx irq cmddone: %d\n",
+			up->dma_tx_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport&dma rx irq cmddone: %d\n",
+			up->dma_rx_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport&dma rx timeout irq cmddone: %d\n",
+			up->dma_rx_tmt_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\ttasklet done: %d\n", up->tasklet_done);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tworkq done: %d\n", up->workq_done);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd pending count: %d\n", check_qcmd(up, &cmd));
+		if (check_qcmd(up, &cmd))
+			len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\tqcmd pending next: %d\n", cmd);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tin tasklet: %d\n", up->in_tasklet);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tin workq: %d\n", up->in_workq);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq (>0: disable): %d\n",
+			port_irqdesc ? port_irqdesc->depth : 0);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tbyte delay: %d\n", up->byte_delay);
+	}
+	if (len > HSU_DBGFS_BUFSIZE)
+		len = HSU_DBGFS_BUFSIZE;
+
+	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+
+static const struct file_operations port_regs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= hsu_show_regs_open,
+	.read		= port_show_regs,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations dma_regs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= hsu_show_regs_open,
+	.read		= dma_show_regs,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations hsu_dump_ops = {
+	.owner		= THIS_MODULE,
+	.read		= hsu_dump_show,
+	.llseek		= default_llseek,
+};
+
+static int hsu_debugfs_init(struct hsu_port *hsu)
+{
+	int i;
+	char name[32];
+
+	hsu->debugfs = debugfs_create_dir("hsu", NULL);
+	if (!hsu->debugfs)
+		return -ENOMEM;
+
+	for (i = 0; i < 3; i++) {
+		snprintf(name, sizeof(name), "port_%d_regs", i);
+		debugfs_create_file(name, S_IRUSR,
+			hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
+	}
+
+	for (i = 0; i < 6; i++) {
+		snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
+		debugfs_create_file(name, S_IRUSR,
+			hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
+	}
+
+	snprintf(name, sizeof(name), "dump_status");
+	debugfs_create_file(name, S_IRUSR,
+		hsu->debugfs, NULL, &hsu_dump_ops);
+
+	return 0;
+}
+
+static void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+	if (hsu->debugfs)
+		debugfs_remove_recursive(hsu->debugfs);
+}
+
+#else
+static inline int hsu_debugfs_init(struct hsu_port *hsu)
+{
+	return 0;
+}
+
+static inline void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void serial_hsu_enable_ms(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	up->ier |= UART_IER_MSI;
+	serial_sched_cmd(up, qcmd_set_ier);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* Protected by spin_lock_irqsave(port->lock) */
+static void serial_hsu_start_tx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_start_tx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_stop_tx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_stop_tx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void hsu_stop_tx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_stop_tx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* This is always called in spinlock protected mode, so
+ * modify timeout timer is safe here */
+void intel_dma_do_rx(struct uart_hsu_port *up, u32 int_sts)
+{
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct hsu_dma_chan *chan = up->rxc;
+	struct uart_port *port = &up->port;
+	struct tty_struct *tty;
+	struct tty_port *tport = &port->state->port;
+	int count;
+
+	trace_hsu_func_start(up->index, __func__);
+	tty = tty_port_tty_get(&up->port.state->port);
+	if (!tty) {
+		trace_hsu_func_end(up->index, __func__, "notty");
+		return;
+	}
+
+	/*
+	 * First need to know how many is already transferred,
+	 * then check if its a timeout DMA irq, and return
+	 * the trail bytes out, push them up and reenable the
+	 * channel
+	 */
+
+	/* Timeout IRQ, need wait some time, see Errata 2 */
+	if (int_sts & 0xf00) {
+		up->dma_rx_tmt_irq_cmddone++;
+		udelay(2);
+	} else
+		up->dma_rx_irq_cmddone++;
+
+	/* Stop the channel */
+	chan_writel(chan, HSU_CH_CR, 0x0);
+
+	count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
+	if (!count) {
+		/* Restart the channel before we leave */
+		chan_writel(chan, HSU_CH_CR, 0x3);
+		tty_kref_put(tty);
+		trace_hsu_func_end(up->index, __func__, "nodata");
+		return;
+	}
+
+	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	/*
+	 * Head will only wrap around when we recycle
+	 * the DMA buffer, and when that happens, we
+	 * explicitly set tail to 0. So head will
+	 * always be greater than tail.
+	 */
+	tty_insert_flip_string(tport, dbuf->buf, count);
+	port->icount.rx += count;
+
+	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	/* Reprogram the channel */
+	chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
+	chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
+	chan_writel(chan, HSU_CH_DCR, 0x1
+					 | (0x1 << 8)
+					 | (0x1 << 16)
+					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
+					 );
+	tty_flip_buffer_push(tport);
+
+	chan_writel(chan, HSU_CH_CR, 0x3);
+	tty_kref_put(tty);
+	trace_hsu_func_end(up->index, __func__, "");
+
+}
+
+static void serial_hsu_stop_rx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_stop_rx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static inline void receive_chars(struct uart_hsu_port *up, int *status)
+{
+	struct tty_struct *tty = up->port.state->port.tty;
+	struct tty_port *tport = &up->port.state->port;
+	unsigned int ch, flag;
+	unsigned int max_count = 256;
+
+	if (!tty)
+		return;
+
+	trace_hsu_func_start(up->index, __func__);
+	do {
+		ch = serial_in(up, UART_RX);
+		flag = TTY_NORMAL;
+		up->port.icount.rx++;
+
+		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+				       UART_LSR_FE | UART_LSR_OE))) {
+
+			dev_warn(up->dev,
+				"We really rush into ERR/BI case"
+				"status = 0x%02x\n", *status);
+			/* For statistics only */
+			if (*status & UART_LSR_BI) {
+				*status &= ~(UART_LSR_FE | UART_LSR_PE);
+				up->port.icount.brk++;
+				/*
+				 * We do the SysRQ and SAK checking
+				 * here because otherwise the break
+				 * may get masked by ignore_status_mask
+				 * or read_status_mask.
+				 */
+				if (uart_handle_break(&up->port))
+					goto ignore_char;
+			} else if (*status & UART_LSR_PE)
+				up->port.icount.parity++;
+			else if (*status & UART_LSR_FE)
+				up->port.icount.frame++;
+			if (*status & UART_LSR_OE)
+				up->port.icount.overrun++;
+
+			/* Mask off conditions which should be ignored. */
+			*status &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_EXT_CONSOLE
+			if (up->port.cons &&
+				up->port.cons->index == up->port.line) {
+				/* Recover the break flag from console xmit */
+				*status |= up->lsr_break_flag;
+				up->lsr_break_flag = 0;
+			}
+#endif
+			if (*status & UART_LSR_BI) {
+				flag = TTY_BREAK;
+			} else if (*status & UART_LSR_PE)
+				flag = TTY_PARITY;
+			else if (*status & UART_LSR_FE)
+				flag = TTY_FRAME;
+		}
+
+		if (uart_handle_sysrq_char(&up->port, ch))
+			goto ignore_char;
+
+		uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
+	ignore_char:
+		*status = serial_in(up, UART_LSR);
+	} while ((*status & UART_LSR_DR) && max_count--);
+
+	tty_flip_buffer_push(tport);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void transmit_chars(struct uart_hsu_port *up)
+{
+	struct circ_buf *xmit = &up->port.state->xmit;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	trace_hsu_func_start(up->index, __func__);
+	if (up->port.x_char) {
+		serial_out(up, UART_TX, up->port.x_char);
+		up->port.icount.tx++;
+		up->port.x_char = 0;
+		trace_hsu_func_end(up->index, __func__, "x_char");
+		goto out;
+	}
+	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+		hsu_stop_tx(&up->port);
+		if (uart_circ_empty(xmit))
+			trace_hsu_func_end(up->index, __func__, "empty");
+		else
+			trace_hsu_func_end(up->index, __func__, "stop");
+		goto out;
+	}
+
+	/* The IRQ is for TX FIFO half-empty */
+	count = up->port.fifosize / 2;
+
+	do {
+		if (uart_tx_stopped(&up->port)) {
+			hsu_stop_tx(&up->port);
+			break;
+		}
+		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+
+		up->port.icount.tx++;
+		if (uart_circ_empty(xmit))
+			break;
+	} while (--count > 0);
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&up->port);
+
+	if (uart_circ_empty(xmit)) {
+		hsu_stop_tx(&up->port);
+		trace_hsu_func_end(up->index, __func__, "tx_complete");
+	}
+	else
+		trace_hsu_func_end(up->index, __func__, "");
+
+out:
+	spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void check_modem_status(struct uart_hsu_port *up)
+{
+	struct uart_port *uport = &up->port;
+	struct tty_port *port = &uport->state->port;
+	struct tty_struct *tty = port->tty;
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	int status;
+	int delta_msr = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	status = serial_in(up, UART_MSR);
+	trace_hsu_mctrl(up->index, status);
+	if (port->flags & ASYNC_CTS_FLOW && !cfg->hw_ctrl_cts) {
+		if (tty->hw_stopped) {
+			if (status & UART_MSR_CTS) {
+				serial_sched_cmd(up, qcmd_start_tx);
+				tty->hw_stopped = 0;
+				up->cts_status = 0;
+				uport->icount.cts++;
+				delta_msr = 1;
+				uart_write_wakeup(uport);
+			}
+		} else {
+			if (!(status & UART_MSR_CTS)) {
+				/* Is this automitically controlled */
+				if (up->use_dma)
+					up->dma_ops->stop_tx(up);
+				clear_bit(flag_tx_on, &up->flags);
+				tty->hw_stopped = 1;
+				up->cts_status = 1;
+				delta_msr = 1;
+				uport->icount.cts++;
+			}
+		}
+	}
+
+	if ((status & UART_MSR_ANY_DELTA)) {
+		if (status & UART_MSR_TERI)
+			up->port.icount.rng++;
+		if (status & UART_MSR_DDSR)
+			up->port.icount.dsr++;
+		/* We may only get DDCD when HW init and reset */
+		if (status & UART_MSR_DDCD)
+			uart_handle_dcd_change(&up->port,
+					status & UART_MSR_DCD);
+		delta_msr = 1;
+	}
+
+	if (delta_msr)
+		wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void hsu_dma_chan_handler(struct hsu_port *hsu, int index)
+{
+	unsigned long flags;
+	struct uart_hsu_port *up = hsu->chans[index * 2].uport;
+	struct hsu_dma_chan *txc = up->txc;
+	struct hsu_dma_chan *rxc = up->rxc;
+
+	up->dma_irq_num++;
+	if (unlikely(!up->use_dma
+		|| !test_bit(flag_startup, &up->flags))) {
+		up->dma_invalid_irq_num++;
+		chan_readl(txc, HSU_CH_SR);
+		chan_readl(rxc, HSU_CH_SR);
+		return;
+	}
+	disable_irq_nosync(up->dma_irq);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_dma_irq);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static irqreturn_t hsu_port_irq(int irq, void *dev_id)
+{
+	struct uart_hsu_port *up = dev_id;
+	unsigned long flags;
+	u8 lsr;
+
+	trace_hsu_func_start(up->index, __func__);
+	up->port_irq_num++;
+
+	if (up->hw_type == hsu_intel) {
+		if (unlikely(!test_bit(flag_set_alt, &up->flags))) {
+			up->port_irq_no_alt++;
+			trace_hsu_func_end(up->index, __func__, "noalt");
+			return IRQ_NONE;
+		}
+	} else {
+		if (unlikely(test_bit(flag_suspend, &up->flags))) {
+			trace_hsu_func_end(up->index, __func__, "suspend");
+			return IRQ_NONE;
+		}
+
+		/* On BYT, this IRQ may be shared with other HW */
+		up->iir = serial_in(up, UART_IIR);
+		if (unlikely(up->iir & 0x1)) {
+			/*
+			 * Read  UART_BYTE_COUNT and UART_OVERFLOW
+			 * registers to clear the overrun error on
+			 * Tx. This is a HW issue on VLV2 B0.
+			 * more information on HSD 4683358.
+			 */
+			serial_in(up, 0x818 / 4);
+			serial_in(up, 0x820 / 4);
+			trace_hsu_func_end(up->index, __func__, "workaround");
+			return IRQ_NONE;
+		}
+	}
+
+	if (unlikely(!test_bit(flag_startup, &up->flags))) {
+		pr_err("recv IRQ when we are not startup yet\n");
+		/*SCU might forward it too late when it is closed already*/
+		serial_in(up, UART_LSR);
+		up->port_irq_no_startup++;
+		trace_hsu_func_end(up->index, __func__, "nostart");
+		return IRQ_HANDLED;
+	}
+
+	/* DesignWare HW's DMA mode still needs the port irq */
+	if (up->use_dma && up->hw_type == hsu_intel) {
+		lsr = serial_in(up, UART_LSR);
+		spin_lock_irqsave(&up->port.lock, flags);
+		check_modem_status(up);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
+				UART_LSR_FE | UART_LSR_OE)))
+			dev_warn(up->dev,
+				"Got LSR irq(0x%02x) while using DMA", lsr);
+		trace_hsu_func_end(up->index, __func__, "lsr");
+		return IRQ_HANDLED;
+	}
+
+	disable_irq_nosync(up->port.irq);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_port_irq);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+
+	trace_hsu_func_end(up->index, __func__, "");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t hsu_dma_irq(int irq, void *dev_id)
+{
+	struct uart_hsu_port *up;
+	unsigned long flags;
+	unsigned int dmairq;
+	int i;
+
+	spin_lock_irqsave(&phsu->dma_lock, flags);
+	dmairq = mfd_readl(phsu, HSU_GBL_DMAISR);
+	if (phsu->irq_port_and_dma) {
+		up = dev_id;
+		up->port_dma_sts = dmairq;
+		if (up->port_dma_sts & (3 << (up->index * 2)))
+			hsu_dma_chan_handler(phsu, up->index);
+	} else {
+		for (i = 0; i < 3; i++)
+			if (dmairq & (3 << (i * 2))) {
+				up = phsu->chans[i * 2].uport;
+				up->port_dma_sts = dmairq;
+				hsu_dma_chan_handler(phsu, i);
+			}
+	}
+	spin_unlock_irqrestore(&phsu->dma_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned int serial_hsu_tx_empty(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	int ret = 1;
+
+	trace_hsu_func_start(up->index, __func__);
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+
+	if (up->use_dma && test_bit(flag_tx_on, &up->flags))
+		ret = 0;
+	ret = ret &&
+		(serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0);
+	serial_sched_start(up);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	unsigned char status = up->msr;
+	unsigned int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	if (status & UART_MSR_DCD)
+		ret |= TIOCM_CAR;
+	if (status & UART_MSR_RI)
+		ret |= TIOCM_RNG;
+	if (status & UART_MSR_DSR)
+		ret |= TIOCM_DSR;
+	if (status & UART_MSR_CTS)
+		ret |= TIOCM_CTS;
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static void set_mctrl(struct uart_hsu_port *up, unsigned int mctrl)
+{
+	trace_hsu_func_start(up->index, __func__);
+	up->mcr &= ~(UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT1 |
+		     UART_MCR_OUT2 | UART_MCR_LOOP);
+	if (mctrl & TIOCM_RTS)
+		up->mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		up->mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		up->mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		up->mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		up->mcr |= UART_MCR_LOOP;
+	trace_hsu_mctrl(up->index, mctrl);
+	serial_out(up, UART_MCR, up->mcr);
+	udelay(100);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	up->mcr &= ~(UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT1 |
+		     UART_MCR_OUT2 | UART_MCR_LOOP);
+	if (mctrl & TIOCM_RTS)
+		up->mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		up->mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		up->mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		up->mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		up->mcr |= UART_MCR_LOOP;
+	serial_sched_cmd(up, qcmd_set_mcr);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+	if (break_state == -1)
+		up->lcr |= UART_LCR_SBC;
+	else
+		up->lcr &= ~UART_LCR_SBC;
+	serial_out(up, UART_LCR, up->lcr);
+	serial_sched_start(up);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/*
+ * What special to do:
+ * 1. chose the 64B fifo mode
+ * 2. start dma or pio depends on configuration
+ * 3. we only allocate dma memory when needed
+ */
+static int serial_hsu_startup(struct uart_port *port)
+{
+	static int console_first_init = 1;
+	int ret = 0;
+	unsigned long flags;
+	static DEFINE_MUTEX(lock);
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	mutex_lock(&lock);
+
+	pm_runtime_get_sync(up->dev);
+
+	/* HW start it */
+	if (cfg->hw_reset)
+		cfg->hw_reset(up->port.membase);
+
+	if (console_first_init && test_bit(flag_console, &up->flags)) {
+		serial_sched_stop(up);
+		console_first_init = 0;
+	}
+	clear_bit(flag_reopen, &up->flags);
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg = hsu_port_func_cfg + cfg->alt;
+		struct uart_hsu_port *alt_up = phsu->port + alt_cfg->index;
+
+		if (test_bit(flag_startup, &alt_up->flags) &&
+			alt_up->port.state->port.tty) {
+			if (alt_cfg->force_suspend) {
+				uart_suspend_port(&serial_hsu_reg,
+							&alt_up->port);
+				serial_clear_alt(alt_up->index);
+				set_bit(flag_reopen, &alt_up->flags);
+			} else {
+				int loop = 50;
+
+				while (test_bit(flag_startup,
+						&alt_up->flags) && --loop)
+					msleep(20);
+				if (test_bit(flag_startup, &alt_up->flags)) {
+					WARN(1, "Share port open timeout\n");
+					ret = -EBUSY;
+					goto out;
+				}
+			}
+		}
+	}
+	serial_set_alt(up->index);
+	serial_sched_start(up);
+	serial_sched_stop(up);
+
+	/*
+	 * Clear the FIFO buffers and disable them.
+	 * (they will be reenabled in set_termios())
+	 */
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+			UART_FCR_CLEAR_RCVR |
+			UART_FCR_CLEAR_XMIT);
+	serial_out(up, UART_FCR, 0);
+
+	/* Clear the interrupt registers. */
+	(void) serial_in(up, UART_LSR);
+	(void) serial_in(up, UART_RX);
+	(void) serial_in(up, UART_IIR);
+	(void) serial_in(up, UART_MSR);
+
+	/* Now, initialize the UART, default is 8n1 */
+	serial_out(up, UART_LCR, UART_LCR_WLEN8);
+	up->port.mctrl |= TIOCM_OUT2;
+	set_mctrl(up, up->port.mctrl);
+
+	/* DMA init */
+	if (up->use_dma) {
+		ret = up->dma_ops->init ? up->dma_ops->init(up) : -ENODEV;
+		if (ret) {
+			dev_warn(up->dev, "Fail to init DMA, will use PIO\n");
+			up->use_dma = 0;
+		}
+	}
+
+	/*
+	 * Finally, enable interrupts.  Note: Modem status
+	 * interrupts are set via set_termios(), which will
+	 *  be occurring imminently
+	 * anyway, so we don't enable them here.
+	 */
+	/* bit 4 for DW is reserved, but SEG need it to be set */
+	if (!up->use_dma || up->hw_type == hsu_dw)
+		up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
+	else
+		up->ier = 0;
+	serial_out(up, UART_IER, up->ier);
+
+	/* And clear the interrupt registers again for luck. */
+	(void) serial_in(up, UART_LSR);
+	(void) serial_in(up, UART_RX);
+	(void) serial_in(up, UART_IIR);
+	(void) serial_in(up, UART_MSR);
+
+	set_bit(flag_startup, &up->flags);
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+
+out:
+	pm_runtime_put(up->dev);
+	mutex_unlock(&lock);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static void serial_hsu_shutdown(struct uart_port *port)
+{
+	static DEFINE_MUTEX(lock);
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	mutex_lock(&lock);
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+	clear_bit(flag_startup, &up->flags);
+
+	/* Disable interrupts from this port */
+	up->ier = 0;
+	serial_out(up, UART_IER, 0);
+
+	clear_bit(flag_tx_on, &up->flags);
+
+	up->port.mctrl &= ~TIOCM_OUT2;
+	set_mctrl(up, up->port.mctrl);
+
+	/* Disable break condition and FIFOs */
+	serial_out(up, UART_LCR,
+			serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+			UART_FCR_CLEAR_RCVR |
+			UART_FCR_CLEAR_XMIT);
+	serial_out(up, UART_FCR, 0);
+
+	/* Free allocated dma buffer */
+	if (up->use_dma)
+		up->dma_ops->exit(up);
+
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg = hsu_port_func_cfg + cfg->alt;
+		struct uart_hsu_port *alt_up = phsu->port + alt_cfg->index;
+
+		if (test_bit(flag_reopen, &alt_up->flags)) {
+			serial_clear_alt(up->index);
+			uart_resume_port(&serial_hsu_reg, &alt_up->port);
+		}
+	}
+
+	pm_runtime_put_sync(up->dev);
+	mutex_unlock(&lock);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* calculate mul,div for low fref e.g. TNG B0 38.4M
+ * finally the fref will swith to high fref e.g. 100M
+*/
+static bool calc_for_low_fref(u32 clock, u32 baud, u32 *mul, u32 *div)
+{
+	if (clock == 38400) {
+		switch (baud) {
+		case 3500000:
+			/* ps: 10 */
+			*mul = 350;
+			*div = 384;
+			break;
+		case 3000000:
+			/* ps: 12 */
+			*mul = 360;
+			*div = 384;
+			break;
+		case 2500000:
+			/* ps: 12 */
+			*mul = 300;
+			*div = 384;
+			break;
+		case 2000000:
+			/* ps: 16 */
+			*mul = 320;
+			*div = 384;
+			break;
+		case 1843200:
+			/* ps: 16 */
+			*mul = 294912;
+			*div = 384000;
+			break;
+		case 1500000:
+			/* ps: 16 */
+			*mul = 240;
+			*div = 384;
+			break;
+		case 1000000:
+			/* ps: 16 */
+			*mul = 160;
+			*div = 384;
+			break;
+		case 500000:
+			/* ps: 16 */
+			*mul = 80;
+			*div = 384;
+			break;
+		}
+		return true;
+	} else
+		return false;
+}
+
+static void
+serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+		       struct ktermios *old)
+{
+	struct uart_hsu_port *up =
+			container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	unsigned char cval, fcr = 0;
+	unsigned long flags;
+	unsigned int baud, quot, clock, bits;
+	/* 0x3d09 is default dividor value refer for deatils
+	 * please refer high speed UART HAS documents.
+	 */
+	u32 ps = 0, mul = 0, div = 0x3D09, m = 0, n = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		cval = UART_LCR_WLEN5;
+		bits = 7;
+		break;
+	case CS6:
+		cval = UART_LCR_WLEN6;
+		bits = 8;
+		break;
+	case CS7:
+		cval = UART_LCR_WLEN7;
+		bits = 9;
+		break;
+	default:
+	case CS8:
+		cval = UART_LCR_WLEN8;
+		bits = 10;
+		break;
+	}
+
+	/* CMSPAR isn't supported by this driver */
+	termios->c_cflag &= ~CMSPAR;
+
+	if (termios->c_cflag & CSTOPB) {
+		cval |= UART_LCR_STOP;
+		bits++;
+	}
+	if (termios->c_cflag & PARENB) {
+		cval |= UART_LCR_PARITY;
+		bits++;
+	}
+	if (!(termios->c_cflag & PARODD))
+		cval |= UART_LCR_EPAR;
+
+	baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+	trace_hsu_set_termios(up->index, baud, termios->c_cflag & CRTSCTS ? 1 : 0);
+
+	if (up->hw_type == hsu_intel) {
+		/*
+		 * If base clk is 50Mhz, and the baud rate come from:
+		 *	baud = 50M * MUL / (DIV * PS * DLAB)
+		 *
+		 * For those basic low baud rate we can get the direct
+		 * scalar from 2746800, like 115200 = 2746800/24. For those
+		 * higher baud rate, we handle them case by case, mainly by
+		 * adjusting the MUL/PS registers, and DIV register is kept
+		 * as default value 0x3d09 to make things simple
+		 */
+
+		if (cfg->hw_get_clk)
+			clock = cfg->hw_get_clk();
+		else
+			clock = 50000;
+		/* ps = 16 is prefered, if not have to use 12, l0 */
+		if (baud * 16 <= clock * 1000)
+			ps = 16;
+		else if (baud * 12 <= clock * 1000)
+			ps = 12;
+		else if (baud * 10 <= clock * 1000)
+			ps = 10;
+		else
+			pr_err("port:%d baud:%d is too high for clock:%u M\n",
+				up->index, baud, clock / 1000);
+
+		switch (baud) {
+		case 3500000:
+		case 3000000:
+		case 2500000:
+		case 2000000:
+		case 1843200:
+		case 1500000:
+		case 1000000:
+		case 500000:
+			quot = 1;
+			if (!calc_for_low_fref(clock, baud, &mul, &div))
+				/*
+				 * mul = baud * 0x3d09 * ps / 1000 / clock
+				 * change the formula order to avoid overflow
+				 */
+				mul = (0x3d09 * ps / 100) * (baud / 100)
+					* 10 / clock;
+			break;
+		default:
+			/* Use uart_get_divisor to get quot for other baud rates
+			 * avoid overflow: mul = uartclk * 0x3d09 / clock / 1000
+			 * uartclk is multiply of 115200 * n * 16 */
+			mul = (up->port.uartclk / 1600) * 0x3d09 /
+				clock * 16 / 10;
+			quot = 0;
+		}
+
+		if (!quot)
+			quot = uart_get_divisor(port, baud);
+
+		if ((up->port.uartclk / quot) < (2400 * 16))
+			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
+		else if ((up->port.uartclk / quot) < (230400 * 16))
+			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
+		else
+			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
+
+		fcr |= UART_FCR_HSU_64B_FIFO;
+	} else {
+		/* need calc quot here */
+		switch (baud) {
+		case 3000000:
+		case 1500000:
+		case 1000000:
+		case 500000:
+			m = 48;
+			n = 100;
+			quot = 3000000 / baud;
+			break;
+		default:
+			m = 9216;
+			n = 15625;
+			quot = 0;
+		}
+		if (!quot)
+			quot = uart_get_divisor(port, baud);
+
+		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+			UART_FCR_T_TRIG_11;
+		if (baud < 2400) {
+			fcr &= ~UART_FCR_TRIGGER_MASK;
+			fcr |= UART_FCR_TRIGGER_1;
+		}
+	}
+
+	/* one byte transfer duration unit microsecond */
+	up->byte_delay = (bits * 1000000 + baud - 1) / baud;
+
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+	/*
+	 * Ok, we're now changing the port state.  Do it with
+	 * interrupts disabled.
+	 */
+	spin_lock_irqsave(&up->port.lock, flags);
+
+	/* Update the per-port timeout */
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+	if (termios->c_iflag & INPCK)
+		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		up->port.read_status_mask |= UART_LSR_BI;
+
+	/* Characters to ignore */
+	up->port.ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+	if (termios->c_iflag & IGNBRK) {
+		up->port.ignore_status_mask |= UART_LSR_BI;
+		/*
+		 * If we're ignoring parity and break indicators,
+		 * ignore overruns too (for real raw support).
+		 */
+		if (termios->c_iflag & IGNPAR)
+			up->port.ignore_status_mask |= UART_LSR_OE;
+	}
+
+	/* Ignore all characters if CREAD is not set */
+	if ((termios->c_cflag & CREAD) == 0)
+		up->port.ignore_status_mask |= UART_LSR_DR;
+
+	/*
+	 * CTS flow control flag and modem status interrupts, disable
+	 * MSI by default
+	 */
+	up->ier &= ~UART_IER_MSI;
+	if (!cfg->hw_ctrl_cts && UART_ENABLE_MS(&up->port, termios->c_cflag))
+		up->ier |= UART_IER_MSI;
+
+	serial_out(up, UART_IER, up->ier);
+
+	if (termios->c_cflag & CRTSCTS)
+		up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
+	else
+		up->mcr &= ~UART_MCR_AFE;
+
+	up->dll	= quot & 0xff;
+	up->dlm	= quot >> 8;
+	up->fcr	= fcr;
+	up->lcr = cval;					/* Save LCR */
+
+	serial_out(up, UART_LCR, cval | UART_LCR_DLAB);	/* set DLAB */
+	serial_out(up, UART_DLL, up->dll);		/* LS of divisor */
+	serial_out(up, UART_DLM, up->dlm);		/* MS of divisor */
+	serial_out(up, UART_LCR, cval);			/* reset DLAB */
+
+	if (up->hw_type == hsu_intel) {
+		up->mul	= mul;
+		up->div = div;
+		up->ps	= ps;
+		serial_out(up, UART_MUL, up->mul);	/* set MUL */
+		serial_out(up, UART_DIV, up->div);	/* set DIV */
+		serial_out(up, UART_PS, up->ps);	/* set PS */
+	} else {
+		if (m != up->m || n != up->n) {
+			if (cfg->set_clk)
+				cfg->set_clk(m, n, up->port.membase);
+			up->m = m;
+			up->n = n;
+		}
+	}
+
+	serial_out(up, UART_FCR, fcr);
+	set_mctrl(up, up->port.mctrl);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_start(up);
+	serial_sched_sync(up);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void
+serial_hsu_pm(struct uart_port *port, unsigned int state,
+	      unsigned int oldstate)
+{
+}
+
+static void serial_hsu_release_port(struct uart_port *port)
+{
+}
+
+static int serial_hsu_request_port(struct uart_port *port)
+{
+	return 0;
+}
+
+static void serial_hsu_config_port(struct uart_port *port, int flags)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	up->port.type = PORT_MFD;
+}
+
+static int
+serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+	/* We don't want the core code to modify any port params */
+	return -EINVAL;
+}
+
+static const char *
+serial_hsu_type(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	return up->name;
+}
+
+struct device *intel_mid_hsu_set_wake_peer(int port,
+	void (*wake_peer)(struct device *))
+{
+	struct hsu_port_cfg *cfg = phsu->configs[port];
+
+	cfg->wake_peer = wake_peer;
+	return cfg->dev;
+}
+EXPORT_SYMBOL(intel_mid_hsu_set_wake_peer);
+
+static void serial_hsu_wake_peer(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+			container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	if (cfg->wake_peer)
+		cfg->wake_peer(cfg->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+/* Wait for transmitter & holding register to empty */
+static inline int wait_for_xmitr(struct uart_hsu_port *up)
+{
+	unsigned int status, tmout = 10000;
+
+	while (--tmout) {
+		status = serial_in(up, UART_LSR);
+		if (status & UART_LSR_BI)
+			up->lsr_break_flag = UART_LSR_BI;
+		udelay(1);
+		if (status & BOTH_EMPTY)
+			break;
+	}
+	if (tmout == 0)
+		return 0;
+
+	if (up->port.flags & UPF_CONS_FLOW) {
+		tmout = 10000;
+		while (--tmout &&
+		       ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+			udelay(1);
+		if (tmout == 0)
+			return 0;
+	}
+	return 1;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_hsu_get_poll_char(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	u8 lsr;
+
+	lsr = serial_in(up, UART_LSR);
+	if (!(lsr & UART_LSR_DR))
+		return NO_POLL_CHAR;
+	return serial_in(up, UART_RX);
+}
+
+static void serial_hsu_put_poll_char(struct uart_port *port,
+			unsigned char c)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	serial_out(up, UART_IER, 0);
+	while (!wait_for_xmitr(up))
+		cpu_relax();
+	serial_out(up, UART_TX, c);
+	while (!wait_for_xmitr(up))
+		cpu_relax();
+	serial_out(up, UART_IER, up->ier);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_MFD_HSU_EXT_CONSOLE
+static void serial_hsu_console_putchar(struct uart_port *port, int ch)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	cl_put_char(up, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ *	The console_lock must be held when we get here.
+ */
+static void
+serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct uart_hsu_port *up = phsu->port + co->index;
+	unsigned long flags;
+
+	uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
+	spin_lock_irqsave(&up->cl_lock, flags);
+	serial_sched_cmd(up, qcmd_cl);
+	spin_unlock_irqrestore(&up->cl_lock, flags);
+}
+
+static struct console serial_hsu_console;
+
+static int __init
+serial_hsu_console_setup(struct console *co, char *options)
+{
+	struct uart_hsu_port *up = phsu->port + co->index;
+	int baud = 115200;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+	unsigned long flags;
+
+	if (co->index < 0 || co->index >= hsu_port_max)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	pm_runtime_get_sync(up->dev);
+	set_bit(flag_console, &up->flags);
+	set_bit(flag_startup, &up->flags);
+	serial_set_alt(up->index);
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+	pm_runtime_put(up->dev);
+	up->cl_circ.buf = kzalloc(HSU_CL_BUF_LEN, GFP_KERNEL);
+	if (up->cl_circ.buf == NULL)
+		return -ENOMEM;
+	return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console serial_hsu_console = {
+	.name		= "ttyMFD",
+	.write		= serial_hsu_console_write,
+	.device		= uart_console_device,
+	.setup		= serial_hsu_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+	.data		= &serial_hsu_reg,
+};
+
+#define SERIAL_HSU_CONSOLE	(&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE	NULL
+#endif
+
+struct uart_ops serial_hsu_pops = {
+	.tx_empty	= serial_hsu_tx_empty,
+	.set_mctrl	= serial_hsu_set_mctrl,
+	.get_mctrl	= serial_hsu_get_mctrl,
+	.stop_tx	= serial_hsu_stop_tx,
+	.start_tx	= serial_hsu_start_tx,
+	.stop_rx	= serial_hsu_stop_rx,
+	.enable_ms	= serial_hsu_enable_ms,
+	.break_ctl	= serial_hsu_break_ctl,
+	.startup	= serial_hsu_startup,
+	.shutdown	= serial_hsu_shutdown,
+	.set_termios	= serial_hsu_set_termios,
+	.pm		= serial_hsu_pm,
+	.type		= serial_hsu_type,
+	.release_port	= serial_hsu_release_port,
+	.request_port	= serial_hsu_request_port,
+	.config_port	= serial_hsu_config_port,
+	.verify_port	= serial_hsu_verify_port,
+	.wake_peer	= serial_hsu_wake_peer,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char = serial_hsu_get_poll_char,
+	.poll_put_char = serial_hsu_put_poll_char,
+#endif
+};
+
+static struct uart_driver serial_hsu_reg = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "MFD serial",
+	.dev_name	= "ttyMFD",
+	.major		= TTY_MAJOR,
+	.minor		= 128,
+	.nr		= HSU_PORT_MAX,
+};
+
+static irqreturn_t wakeup_irq(int irq, void *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	set_bit(flag_active, &up->flags);
+	if (cfg->preamble && cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 1);
+	pm_runtime_get(dev);
+	pm_runtime_put(dev);
+	trace_hsu_func_end(up->index, __func__, "");
+	return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
+static void hsu_flush_rxfifo(struct uart_hsu_port *up)
+{
+	unsigned int lsr, cnt;
+
+	if (up->hw_type == hsu_intel) {
+		cnt = serial_in(up, UART_FOR) & 0x7F;
+		if (cnt)
+			dev_dbg(up->dev,
+				"Warning: %d bytes are received"
+				" in RX fifo after RTS active for %d us\n",
+				cnt, up->byte_delay);
+		lsr = serial_in(up, UART_LSR);
+		if (lsr & UART_LSR_DR && cnt)
+			dev_dbg(up->dev,
+				"flush abnormal data in rx fifo\n");
+			while (cnt) {
+				serial_in(up, UART_RX);
+				cnt--;
+			}
+	}
+}
+
+static void hsu_regs_context(struct uart_hsu_port *up, int op)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	if (op == context_load) {
+		/*
+		 * Delay a while before HW get stable. Without this the
+		 * resume will just fail, as the value you write to the
+		 * HW register will not be really written.
+		 *
+		 * This is only needed for Tangier, which really powers gate
+		 * the HSU HW in runtime suspend. While in Penwell/CLV it is
+		 * only clock gated.
+		*/
+		usleep_range(500, 510);
+
+		if (cfg->hw_reset)
+			cfg->hw_reset(up->port.membase);
+
+		serial_out(up, UART_LCR, up->lcr);
+		serial_out(up, UART_LCR, up->lcr | UART_LCR_DLAB);
+		serial_out(up, UART_DLL, up->dll);
+		serial_out(up, UART_DLM, up->dlm);
+		serial_out(up, UART_LCR, up->lcr);
+
+		if (up->hw_type == hsu_intel) {
+			serial_out(up, UART_MUL, up->mul);
+			serial_out(up, UART_DIV, up->div);
+			serial_out(up, UART_PS, up->ps);
+		} else {
+			if (cfg->set_clk)
+				cfg->set_clk(up->m, up->n, up->port.membase);
+		}
+
+		serial_out(up, UART_MCR, up->mcr);
+		serial_out(up, UART_FCR, up->fcr);
+		serial_out(up, UART_IER, up->ier);
+	}
+
+	if (up->use_dma && up->dma_ops->context_op)
+		up->dma_ops->context_op(up, op);
+}
+
+int serial_hsu_do_suspend(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	struct uart_port *uport = &up->port;
+	struct tty_port *tport = &uport->state->port;
+	struct tty_struct *tty = tport->tty;
+	struct circ_buf *xmit = &up->port.state->xmit;
+	char cmd;
+	unsigned long flags;
+
+	trace_hsu_func_start(up->index, __func__);
+
+	if (test_bit(flag_startup, &up->flags)) {
+		if (up->hw_type == hsu_intel &&
+			serial_in(up, UART_FOR) & 0x7F)
+			goto busy;
+		else if (up->hw_type == hsu_dw &&
+			serial_in(up, 0x7c / 4) & BIT(3))
+			goto busy;
+	}
+
+	if (up->use_dma) {
+		if (up->hw_type == hsu_intel) {
+			if (chan_readl(up->rxc, HSU_CH_D0SAR) >
+					up->rxbuf.dma_addr)
+				goto busy;
+		}
+	}
+
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 1);
+
+	disable_irq(up->port.irq);
+	disable_irq(up->dma_irq);
+
+	if (cfg->hw_set_rts)
+		usleep_range(up->byte_delay, up->byte_delay + 1);
+
+	serial_sched_stop(up);
+	set_bit(flag_suspend, &up->flags);
+
+	if (test_bit(flag_startup, &up->flags) && check_qcmd(up, &cmd)) {
+		dev_info(up->dev, "ignore suspend cmd: %d\n", cmd);
+		goto err;
+	}
+
+	if (test_bit(flag_tx_on, &up->flags)) {
+		dev_info(up->dev, "ignore suspend for tx on\n");
+		dev_info(up->dev,
+			"xmit pending:%d, stopped:%d, hw_stopped:%d, MSR:%x\n",
+			(int)uart_circ_chars_pending(xmit), tty->stopped,
+			tty->hw_stopped, serial_in(up, UART_MSR));
+		goto err;
+	}
+
+	if (test_bit(flag_startup, &up->flags) && !uart_circ_empty(xmit) &&
+		!uart_tx_stopped(&up->port)) {
+		dev_info(up->dev, "ignore suspend for xmit\n");
+		dev_info(up->dev,
+			"xmit pending:%d, stopped:%d, hw_stopped:%d, MSR:%x\n",
+			(int)uart_circ_chars_pending(xmit),
+			tty->stopped,
+			tty->hw_stopped,
+			serial_in(up, UART_MSR));
+		goto err;
+	}
+
+	if (up->use_dma) {
+		if (up->dma_ops->suspend(up))
+			goto err;
+	} else if (test_bit(flag_startup, &up->flags)) {
+		if (up->hw_type == hsu_intel &&
+			serial_in(up, UART_FOR) & 0x7F)
+			goto err;
+		else if (up->hw_type == hsu_dw &&
+			serial_in(up, 0x7c / 4) & BIT(3))
+			goto err;
+	}
+
+	if (cfg->hw_suspend)
+		cfg->hw_suspend(up->index, up->dev, wakeup_irq);
+	if (cfg->hw_context_save)
+		hsu_regs_context(up, context_save);
+	if (cfg->preamble && cfg->hw_suspend_post)
+		cfg->hw_suspend_post(up->index);
+	enable_irq(up->dma_irq);
+	if (up->hw_type == hsu_dw)
+		enable_irq(up->port.irq);
+
+	trace_hsu_func_end(up->index, __func__, "");
+	return 0;
+err:
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 0);
+	clear_bit(flag_suspend, &up->flags);
+	enable_irq(up->port.irq);
+	if (up->use_dma && up->hw_type == hsu_intel)
+		intel_dma_do_rx(up, 0);
+	enable_irq(up->dma_irq);
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+busy:
+	pm_schedule_suspend(up->dev, cfg->idle);
+	trace_hsu_func_end(up->index, __func__, "busy");
+	return -EBUSY;
+}
+EXPORT_SYMBOL(serial_hsu_do_suspend);
+
+int serial_hsu_do_resume(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	unsigned long flags;
+
+	trace_hsu_func_start(up->index, __func__);
+	if (!test_and_clear_bit(flag_suspend, &up->flags)) {
+		trace_hsu_func_end(up->index, __func__, "ignore");
+		return 0;
+	}
+	if (up->hw_type == hsu_dw)
+		disable_irq(up->port.irq);
+	if (cfg->hw_context_save)
+		hsu_regs_context(up, context_load);
+	if (cfg->hw_resume)
+		cfg->hw_resume(up->index, up->dev);
+	if (test_bit(flag_startup, &up->flags))
+		hsu_flush_rxfifo(up);
+	if (up->use_dma)
+		up->dma_ops->resume(up);
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 0);
+	enable_irq(up->port.irq);
+
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return 0;
+}
+EXPORT_SYMBOL(serial_hsu_do_resume);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+int serial_hsu_do_runtime_idle(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	if (cfg->type == debug_port
+			&& system_state == SYSTEM_BOOTING)
+		/* if HSU is set as default console, but earlyprintk is not hsu,
+		 * then it will enter suspend and can not get back since system
+		 * is on boot up, no contex switch to let it resume, here just
+		 * postpone the suspend retry 30 seconds, then system should
+		 * have finished booting
+		 */
+		pm_schedule_suspend(up->dev, 30000);
+	else if (!test_and_clear_bit(flag_active, &up->flags))
+		pm_schedule_suspend(up->dev, 20);
+	else
+		pm_schedule_suspend(up->dev, cfg->idle);
+	trace_hsu_func_end(up->index, __func__, "");
+	return -EBUSY;
+}
+EXPORT_SYMBOL(serial_hsu_do_runtime_idle);
+#endif
+
+static void serial_hsu_command(struct uart_hsu_port *up)
+{
+	char cmd, c;
+	unsigned long flags;
+	unsigned int iir, lsr;
+	int status;
+	struct hsu_dma_chan *txc = up->txc;
+	struct hsu_dma_chan *rxc = up->rxc;
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	if (unlikely(test_bit(flag_cmd_off, &up->flags))) {
+		trace_hsu_func_end(up->index, __func__, "cmd_off");
+		return;
+	}
+	if (unlikely(test_bit(flag_suspend, &up->flags))) {
+		dev_err(up->dev,
+			"Error to handle cmd while port is suspended\n");
+		if (check_qcmd(up, &cmd))
+			dev_err(up->dev, "Command pending: %d\n", cmd);
+		trace_hsu_func_end(up->index, __func__, "suspend");
+		return;
+	}
+	set_bit(flag_active, &up->flags);
+	spin_lock_irqsave(&up->port.lock, flags);
+	while (get_qcmd(up, &cmd)) {
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		trace_hsu_cmd_start(up->index, cmd);
+		switch (cmd) {
+		case qcmd_overflow:
+			dev_err(up->dev, "queue overflow!!\n");
+			break;
+		case qcmd_set_mcr:
+			serial_out(up, UART_MCR, up->mcr);
+			break;
+		case qcmd_set_ier:
+			serial_out(up, UART_IER, up->ier);
+			break;
+		case qcmd_start_rx:
+			/* use for DW DMA RX only */
+			if (test_and_clear_bit(flag_rx_pending, &up->flags)) {
+				if (up->use_dma)
+					up->dma_ops->start_rx(up);
+			}
+			break;
+		case qcmd_stop_rx:
+			if (!up->use_dma || up->hw_type == hsu_dw) {
+				up->ier &= ~UART_IER_RLSI;
+				up->port.read_status_mask &= ~UART_LSR_DR;
+				serial_out(up, UART_IER, up->ier);
+			}
+
+			if (up->use_dma)
+				up->dma_ops->stop_rx(up);
+			break;
+		case qcmd_start_tx:
+			if (up->use_dma) {
+				if (!test_bit(flag_tx_on, &up->flags))
+					up->dma_ops->start_tx(up);
+			} else if (!(up->ier & UART_IER_THRI)) {
+				up->ier |= UART_IER_THRI;
+				serial_out(up, UART_IER, up->ier);
+			}
+			break;
+		case qcmd_stop_tx:
+			if (up->use_dma) {
+				spin_lock_irqsave(&up->port.lock, flags);
+				up->dma_ops->stop_tx(up);
+				clear_bit(flag_tx_on, &up->flags);
+				spin_unlock_irqrestore(&up->port.lock, flags);
+			} else if (up->ier & UART_IER_THRI) {
+				up->ier &= ~UART_IER_THRI;
+				serial_out(up, UART_IER, up->ier);
+			}
+			break;
+		case qcmd_cl:
+			serial_out(up, UART_IER, 0);
+			while (cl_get_char(up, &c)) {
+				while (!wait_for_xmitr(up))
+					schedule();
+				serial_out(up, UART_TX, c);
+			}
+			serial_out(up, UART_IER, up->ier);
+			break;
+		case qcmd_port_irq:
+			up->port_irq_cmddone++;
+
+			/* Baytrail patform use shared IRQ and need more care */
+			if (up->hw_type == hsu_intel) {
+				iir = serial_in(up, UART_IIR);
+			} else {
+				if (up->iir & 0x1)
+					up->iir = serial_in(up, UART_IIR);
+				iir = up->iir;
+				up->iir = 1;
+			}
+
+			if (iir & UART_IIR_NO_INT) {
+				enable_irq(up->port.irq);
+				up->port_irq_pio_no_irq_pend++;
+				break;
+			}
+
+			if (iir & HSU_PIO_RX_ERR)
+				up->port_irq_pio_rx_err++;
+			if (iir & HSU_PIO_RX_AVB)
+				up->port_irq_pio_rx_avb++;
+			if (iir & HSU_PIO_RX_TMO)
+				up->port_irq_pio_rx_timeout++;
+			if (iir & HSU_PIO_TX_REQ)
+				up->port_irq_pio_tx_req++;
+
+			lsr = serial_in(up, UART_LSR);
+
+			/* We need to judge it's timeout or data available */
+			if (lsr & UART_LSR_DR) {
+				if (!up->use_dma) {
+					receive_chars(up, &lsr);
+				} else if (up->hw_type == hsu_dw) {
+					if ((iir & 0xf) == 0xc) {
+						/*
+						 * RX timeout IRQ, the DMA
+						 * channel may be stalled
+						 */
+						up->dma_ops->stop_rx(up);
+						receive_chars(up, &lsr);
+					} else
+						up->dma_ops->start_rx(up);
+				}
+			}
+
+			/* lsr will be renewed during the receive_chars */
+			if (!up->use_dma && (lsr & UART_LSR_THRE))
+				transmit_chars(up);
+
+			spin_lock_irqsave(&up->port.lock, flags);
+			serial_sched_cmd(up, qcmd_enable_irq);
+			spin_unlock_irqrestore(&up->port.lock, flags);
+			break;
+		case qcmd_enable_irq:
+			enable_irq(up->port.irq);
+			break;
+		case qcmd_dma_irq:
+			/* Only hsu_intel has this irq */
+			up->dma_irq_cmddone++;
+			if (up->port_dma_sts & (1 << txc->id)) {
+				up->dma_tx_irq_cmddone++;
+				status = chan_readl(txc, HSU_CH_SR);
+				up->dma_ops->start_tx(up);
+			}
+
+			if (up->port_dma_sts & (1 << rxc->id)) {
+				status = chan_readl(rxc, HSU_CH_SR);
+				intel_dma_do_rx(up, status);
+			}
+			enable_irq(up->dma_irq);
+			break;
+		case qcmd_cmd_off:
+			set_bit(flag_cmd_off, &up->flags);
+			break;
+		case qcmd_get_msr:
+			break;
+		default:
+			dev_err(up->dev, "invalid command!!\n");
+			break;
+		}
+		trace_hsu_cmd_end(up->index, cmd);
+		spin_lock_irqsave(&up->port.lock, flags);
+		if (unlikely(test_bit(flag_cmd_off, &up->flags)))
+			break;
+	}
+	up->msr = serial_in(up, UART_MSR);
+	if (cfg->hw_ctrl_cts)
+		up->msr |= UART_MSR_CTS;
+	check_modem_status(up);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_tasklet(unsigned long data)
+{
+	struct uart_hsu_port *up = (struct uart_hsu_port *)data;
+
+	up->in_tasklet = 1;
+	serial_hsu_command(up);
+	up->tasklet_done++;
+	up->in_tasklet = 0;
+}
+
+static void serial_hsu_work(struct work_struct *work)
+{
+	struct uart_hsu_port *uport =
+		container_of(work, struct uart_hsu_port, work);
+
+	uport->in_workq = 1;
+	serial_hsu_command(uport);
+	uport->workq_done++;
+	uport->in_workq = 0;
+}
+
+static int serial_port_setup(struct uart_hsu_port *up,
+		struct hsu_port_cfg *cfg)
+{
+	int ret;
+	int index = cfg->index;
+
+	phsu->configs[index] = cfg;
+	up->port.line = index;
+	snprintf(up->name, sizeof(up->name) - 1, "%s_p", cfg->name);
+	up->index = index;
+
+	if ((hsu_dma_enable & (1 << index)) && up->dma_ops)
+		up->use_dma = 1;
+	else
+		up->use_dma = 0;
+
+	if (cfg->hw_init)
+		cfg->hw_init(up->dev, index);
+	mutex_init(&up->q_mutex);
+	tasklet_init(&up->tasklet, serial_hsu_tasklet,
+				(unsigned long)up);
+	up->workqueue =
+		create_singlethread_workqueue(up->name);
+	INIT_WORK(&up->work, serial_hsu_work);
+	up->qcirc.buf = (char *)up->qbuf;
+	spin_lock_init(&up->cl_lock);
+	set_bit(flag_cmd_off, &up->flags);
+
+	if (phsu->irq_port_and_dma) {
+		up->dma_irq = up->port.irq;
+		ret = request_irq(up->dma_irq, hsu_dma_irq, IRQF_SHARED,
+				"hsu dma", up);
+		if (ret) {
+			dev_err(up->dev, "can not get dma IRQ\n");
+			return ret;
+		}
+		ret = request_irq(up->port.irq, hsu_port_irq, IRQF_SHARED,
+				up->name, up);
+		if (ret) {
+			dev_err(up->dev, "can not get port IRQ\n");
+			return ret;
+		}
+	} else {
+		up->dma_irq = phsu->dma_irq;
+		ret = request_irq(up->port.irq, hsu_port_irq, IRQF_SHARED,
+				up->name, up);
+		if (ret) {
+			dev_err(up->dev, "can not get port IRQ\n");
+			return ret;
+		}
+	}
+
+	if (cfg->type == debug_port) {
+		serial_hsu_reg.cons = SERIAL_HSU_CONSOLE;
+		if (serial_hsu_reg.cons)
+			serial_hsu_reg.cons->index = index;
+		up->use_dma = 0;
+	} else
+		serial_hsu_reg.cons = NULL;
+	uart_add_one_port(&serial_hsu_reg, &up->port);
+	return 0;
+}
+
+struct uart_hsu_port *serial_hsu_port_setup(struct device *pdev, int port,
+	resource_size_t start, resource_size_t len, int irq)
+{
+	struct uart_hsu_port *up;
+	int index;
+	unsigned int uclk, clock;
+	struct hsu_port_cfg *cfg;
+
+	cfg = hsu_port_func_cfg + port;
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+	pr_info("Found a %s HSU\n", cfg->hw_ip ? "Designware" : "Intel");
+
+	index = cfg->index;
+	up = phsu->port + index;
+
+	up->dev = pdev;
+	up->port.type = PORT_MFD;
+	up->port.iotype = UPIO_MEM;
+	up->port.mapbase = start;
+	up->port.membase = ioremap_nocache(up->port.mapbase, len);
+	up->port.fifosize = 64;
+	up->port.ops = &serial_hsu_pops;
+	up->port.flags = UPF_IOREMAP;
+	up->hw_type = cfg->hw_ip;
+	/* calculate if DLAB=1, the ideal uartclk */
+	if (cfg->hw_get_clk)
+		clock = cfg->hw_get_clk();
+	else
+		clock = 50000;
+	uclk = clock * 1000 / (115200 * 16); /* 16 is default ps */
+	if (uclk >= 24)
+		uclk = 24;
+	else if (uclk >= 16)
+		uclk = 16;
+	else if (uclk >= 8)
+		uclk = 8;
+	else
+		uclk = 1;
+
+	if (up->hw_type == hsu_intel)
+		up->port.uartclk = 115200 * uclk * 16;
+	else
+		up->port.uartclk = 115200 * 32 * 16;
+
+	up->port.irq = irq;
+	up->port.dev = pdev;
+
+	if (up->hw_type == hsu_intel) {
+		up->txc = &phsu->chans[index * 2];
+		up->rxc = &phsu->chans[index * 2 + 1];
+		up->dma_ops = &intel_dma_ops;
+	} else {
+		up->dma_ops = pdw_dma_ops;
+	}
+
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg =
+			hsu_port_func_cfg + cfg->alt;
+		struct uart_hsu_port *alt_up =
+			phsu->port + alt_cfg->index;
+
+		memcpy(alt_up, up, sizeof(*up));
+		serial_port_setup(alt_up, alt_cfg);
+		phsu->port_num++;
+	}
+
+	serial_port_setup(up, cfg);
+	phsu->port_num++;
+
+	return up;
+}
+EXPORT_SYMBOL(serial_hsu_port_setup);
+
+void serial_hsu_port_free(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	uart_remove_one_port(&serial_hsu_reg, &up->port);
+	free_irq(up->port.irq, up);
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg = phsu->configs[cfg->alt];
+		struct uart_hsu_port *alt_up =
+			phsu->port + alt_cfg->index;
+		uart_remove_one_port(&serial_hsu_reg, &alt_up->port);
+		free_irq(up->port.irq, alt_up);
+	}
+}
+EXPORT_SYMBOL(serial_hsu_port_free);
+
+void serial_hsu_port_shutdown(struct uart_hsu_port *up)
+{
+	uart_suspend_port(&serial_hsu_reg, &up->port);
+}
+EXPORT_SYMBOL(serial_hsu_port_shutdown);
+
+int serial_hsu_dma_setup(struct device *pdev,
+	resource_size_t start, resource_size_t len, unsigned int irq, int share)
+{
+	struct hsu_dma_chan *dchan;
+	int i, ret;
+
+	phsu->reg = ioremap_nocache(start, len);
+	dchan = phsu->chans;
+	for (i = 0; i < 6; i++) {
+		dchan->id = i;
+		dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE :
+			DMA_TO_DEVICE;
+		dchan->uport = &phsu->port[i/2];
+		dchan->reg = phsu->reg + HSU_DMA_CHANS_REG_OFFSET +
+			i * HSU_DMA_CHANS_REG_LENGTH;
+
+		dchan++;
+	}
+
+	/* will share irq with port if irq < 0 */
+	if (share)
+		phsu->irq_port_and_dma = 1;
+	else {
+		phsu->dma_irq = irq;
+		ret = request_irq(irq, hsu_dma_irq, 0, "hsu dma", phsu);
+		if (ret) {
+			dev_err(pdev, "can not get dma IRQ\n");
+			goto err;
+		}
+	}
+
+	dev_set_drvdata(pdev, phsu);
+
+	return 0;
+err:
+	iounmap(phsu->reg);
+	return ret;
+}
+EXPORT_SYMBOL(serial_hsu_dma_setup);
+
+void serial_hsu_dma_free(void)
+{
+	free_irq(phsu->dma_irq, phsu);
+}
+EXPORT_SYMBOL(serial_hsu_dma_free);
+
+static int __init hsu_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&serial_hsu_reg);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&phsu->dma_lock);
+	return hsu_debugfs_init(phsu);
+}
+
+static void __exit hsu_exit(void)
+{
+	uart_unregister_driver(&serial_hsu_reg);
+	hsu_debugfs_remove(phsu);
+}
+
+module_init(hsu_init);
+module_exit(hsu_exit);
+
+MODULE_AUTHOR("Yang Bin <bin.yang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/external_drivers/drivers/hsu/mfd_dma.c b/drivers/external_drivers/drivers/hsu/mfd_dma.c
new file mode 100644
index 0000000..a9371fb
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/mfd_dma.c
@@ -0,0 +1,753 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_mfd.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
+#include <asm/intel_mid_hsu.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#include "mfd.h"
+
+static int dma_init_common(struct uart_hsu_port *up)
+{
+	struct hsu_dma_buffer *dbuf;
+	struct circ_buf *xmit = &up->port.state->xmit;
+
+	/* 1. Allocate the RX buffer */
+	dbuf = &up->rxbuf;
+	dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf->buf) {
+		up->use_dma = 0;
+		dev_err(up->dev, "allocate DMA buffer failed!!\n");
+		return -ENOMEM;
+	}
+
+	dbuf->dma_addr = dma_map_single(up->dev,
+			dbuf->buf,
+			HSU_DMA_BUF_SIZE,
+			DMA_FROM_DEVICE);
+	dbuf->dma_size = HSU_DMA_BUF_SIZE;
+
+	/* 2. prepare teh TX buffer */
+	dbuf = &up->txbuf;
+	dbuf->buf = xmit->buf;
+	dbuf->dma_addr = dma_map_single(up->dev,
+			dbuf->buf,
+			UART_XMIT_SIZE,
+			DMA_TO_DEVICE);
+	dbuf->dma_size = UART_XMIT_SIZE;
+	dbuf->ofs = 0;
+	return 0;
+}
+
+static void dma_exit_common(struct uart_hsu_port *up)
+{
+	struct hsu_dma_buffer *dbuf;
+	struct uart_port *port = &up->port;
+
+	/* Free and unmap rx dma buffer */
+	dbuf = &up->rxbuf;
+	dma_unmap_single(port->dev,
+			dbuf->dma_addr,
+			dbuf->dma_size,
+			DMA_FROM_DEVICE);
+	kfree(dbuf->buf);
+
+	/* Next unmap tx dma buffer*/
+	dbuf = &up->txbuf;
+	dma_unmap_single(port->dev,
+			dbuf->dma_addr,
+			dbuf->dma_size,
+			DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_INTEL_MID_DMAC
+static bool dw_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+	struct dw_dma_priv *dw_dma = param;
+
+	if (dw_dma->dmac && (&dw_dma->dmac->dev == chan->device->dev))
+		return true;
+	else {
+#ifdef CONFIG_ACPI
+		acpi_handle handle = ACPI_HANDLE(chan->device->dev);
+		struct acpi_device *device;
+		int ret;
+		const char *hid;
+		ret = acpi_bus_get_device(handle, &device);
+		if (ret) {
+			pr_warn("DW HSU: no acpi entry\n");
+			return false;
+		}
+		hid = acpi_device_hid(device);
+		if (!strncmp(hid, "INTL9C60", strlen(hid))) {
+			acpi_status status;
+			unsigned long long tmp;
+			status = acpi_evaluate_integer(handle,
+					"_UID", NULL, &tmp);
+			if (!ACPI_FAILURE(status) && (tmp == 1))
+				return true;
+		}
+		if (!strncmp(hid, "80862286", strlen(hid))) {
+			return true;
+		}
+
+#endif
+		return false;
+	}
+}
+
+/* the RX/TX buffer init should be a common stuff */
+static int dw_dma_init(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma;
+	struct intel_mid_dma_slave *rxs, *txs;
+	dma_cap_mask_t mask;
+	int ret = 0;
+
+	dw_dma = kzalloc(sizeof(*dw_dma), GFP_KERNEL);
+	if (!dw_dma) {
+		pr_warn("DW HSU: Can't alloc memory for dw_dm_priv\n");
+		return -1;
+	}
+
+	up->dma_priv = dw_dma;
+
+	/*
+	 * Get pci device for DMA controller, currently it could only
+	 * be the DMA controller of baytrail
+	 */
+	dw_dma->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0f06, NULL);
+	if (!dw_dma->dmac) {
+		/* still have chance to get from ACPI dev */
+		pr_warn("DW HSU: Can't find LPIO1 DMA controller by PCI, try ACPI\n");
+	}
+
+	ret = dma_init_common(up);
+	if (ret)
+		return ret;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* 1. Init rx channel */
+	dw_dma->rxchan = dma_request_channel(mask, dw_dma_chan_filter, dw_dma);
+	if (!dw_dma->rxchan)
+		goto err_exit;
+	rxs = &dw_dma->rxs;
+	rxs->dma_slave.direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	/* These are fixed HW info from Baytrail datasheet */
+	if (up->index == 0)
+		rxs->device_instance = 3;
+	else
+		rxs->device_instance = 5;
+	dw_dma->rxchan->private = rxs;
+
+	/* 2. Init tx channel */
+	dw_dma->txchan = dma_request_channel(mask, dw_dma_chan_filter, dw_dma);
+	if (!dw_dma->txchan)
+		goto free_rxchan;
+
+	txs = &dw_dma->txs;
+	txs->dma_slave.direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	if (up->index == 0)
+		txs->device_instance = 2;
+	else
+		txs->device_instance = 4;
+	dw_dma->txchan->private = txs;
+
+	/* TX/RX reg share the same addr */
+	dw_dma->dma_addr = up->port.mapbase + UART_RX;
+
+	pm_qos_add_request(&up->qos, PM_QOS_CPU_DMA_LATENCY,
+			PM_QOS_DEFAULT_VALUE);
+
+	dw_dma->up = up;
+	up->dma_inited = 1;
+	return 0;
+
+free_rxchan:
+	dma_release_channel(dw_dma->rxchan);
+err_exit:
+	return -1;
+
+}
+
+static int dw_dma_suspend(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+
+	if (!up->dma_inited)
+		return 0;
+
+	txchan = dw_dma->txchan;
+	rxchan = dw_dma->rxchan;
+
+	if (test_bit(flag_rx_on, &up->flags) ||
+		test_bit(flag_rx_pending, &up->flags)) {
+		dev_warn(up->dev, "ignore suspend for rx dma is running\n");
+		return -1;
+	}
+
+	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+
+	txchan->device->device_control(txchan, DMA_PAUSE, 0);
+	rxchan->device->device_control(rxchan, DMA_PAUSE, 0);
+	pm_qos_update_request(&up->qos, PM_QOS_DEFAULT_VALUE);
+	return 0;
+}
+
+static int dw_dma_resume(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+
+	if (!up->dma_inited)
+		return 0;
+
+	txchan = dw_dma->txchan;
+	rxchan = dw_dma->rxchan;
+
+	rxchan->device->device_control(rxchan, DMA_RESUME, 0);
+	txchan->device->device_control(txchan, DMA_RESUME, 0);
+	pm_qos_update_request(&up->qos, CSTATE_EXIT_LATENCY_C2);
+	return 0;
+}
+
+
+static int dw_dma_exit(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan = dw_dma->txchan;
+	struct dma_chan *rxchan = dw_dma->rxchan;
+
+	pm_qos_remove_request(&up->qos);
+	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+	dma_release_channel(dw_dma->txchan);
+	dma_release_channel(dw_dma->rxchan);
+
+	dma_exit_common(up);
+
+	kfree(dw_dma);
+
+	up->dma_inited = 0;
+	up->dma_priv = NULL;
+	return 0;
+}
+
+static void dw_dma_tx_done(void *arg)
+{
+	struct dw_dma_priv *dw_dma = arg;
+	struct uart_hsu_port *up = dw_dma->up;
+	struct circ_buf *xmit = &up->port.state->xmit;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	unsigned long flags;
+	int count = 0;
+
+	count = intel_dma_get_src_addr(dw_dma->txchan) - dbuf->dma_addr
+			- xmit->tail;
+
+	/* Update the circ buf info */
+	xmit->tail += dbuf->ofs;
+	xmit->tail &= UART_XMIT_SIZE - 1;
+	up->port.icount.tx += dbuf->ofs;
+
+	dbuf->ofs = 0;
+
+	clear_bit(flag_tx_on, &up->flags);
+
+	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+		spin_lock_irqsave(&up->port.lock, flags);
+		serial_sched_cmd(up, qcmd_start_tx);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&up->port);
+}
+
+static void dw_dma_start_tx(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_async_tx_descriptor *txdesc = NULL;
+	struct dma_chan *txchan;
+	struct dma_slave_config *txconf;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	struct circ_buf *xmit = &up->port.state->xmit;
+	int count;
+	enum dma_ctrl_flags		flag;
+
+	txchan = dw_dma->txchan;
+	txconf = &dw_dma->txs.dma_slave;
+
+	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+		if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+			uart_write_wakeup(&up->port);
+		return;
+	}
+
+	/*
+	 * Need to check if FCR is set, better to be set only once when
+	 * use_dma == 1
+	 */
+
+	set_bit(flag_tx_on, &up->flags);
+	count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+	if (count >= 2000)
+		count = 2000;
+
+	dbuf->ofs = count;
+
+
+	if (!count) {
+		pr_err("we see a case of TX Len == 0!!!\n\n");
+		dump_stack();
+		clear_bit(flag_tx_on, &up->flags);
+		return;
+	}
+
+	/* 2. Prepare the TX dma transfer */
+	txconf->direction = DMA_TO_DEVICE;
+	txconf->dst_addr = dw_dma->dma_addr;
+	txconf->src_maxburst = LNW_DMA_MSIZE_8;
+	txconf->dst_maxburst = LNW_DMA_MSIZE_8;
+	txconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+				       (unsigned long) txconf);
+
+	dma_sync_single_for_device(up->port.dev,
+					   dbuf->dma_addr,
+					   dbuf->dma_size,
+					   DMA_TO_DEVICE);
+
+	flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+
+	txdesc = txchan->device->device_prep_dma_memcpy(
+					txchan,		/* DMA Channel */
+					dw_dma->dma_addr,	/* DAR */
+					dbuf->dma_addr + xmit->tail, /* SAR */
+					count,		/* Data len */
+					flag);		/* Flag */
+	if (!txdesc) {
+		pr_warn("DW HSU: fail to prepare TX DMA operation\n");
+		return;
+	}
+
+	txdesc->callback = dw_dma_tx_done;
+	txdesc->callback_param = dw_dma;
+	txdesc->tx_submit(txdesc);
+}
+
+static void dw_dma_stop_tx(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan = dw_dma->txchan;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	int ret;
+	int count;
+
+	if (!test_bit(flag_tx_on, &up->flags))
+		return;
+
+	count = intel_dma_get_src_addr(dw_dma->txchan) - dbuf->dma_addr;
+
+	/* ? this may be sleepable */
+	ret = txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	if (ret)
+		dev_warn(up->dev, "Fail to stop DMA RX channel!\n");
+}
+
+static void dw_dma_rx_done(void *arg)
+{
+	struct dw_dma_priv *dw_dma = arg;
+	struct uart_hsu_port *up = dw_dma->up;
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct uart_port *port = &up->port;
+	struct tty_struct *tty;
+	struct tty_port *tport = &port->state->port;
+	int count;
+	unsigned long flags;
+
+	tty = tty_port_tty_get(&up->port.state->port);
+	if (!tty)
+		return;
+
+	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	count = dbuf->ofs;
+	tty_insert_flip_string(tport, dbuf->buf, count);
+	port->icount.rx += count;
+
+	/* Do we really need it for x86? */
+	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	tty_flip_buffer_push(tport);
+	tty_kref_put(tty);
+
+	clear_bit(flag_rx_on, &up->flags);
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	if (test_bit(flag_rx_pending, &up->flags))
+		serial_sched_cmd(up, qcmd_start_rx);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+
+}
+
+
+static void dw_dma_start_rx(struct uart_hsu_port *up)
+{
+	struct dma_async_tx_descriptor *rxdesc = NULL;
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct dma_chan *rxchan = dw_dma->rxchan;
+	struct dma_slave_config *rxconf = &dw_dma->rxs.dma_slave;
+	enum dma_ctrl_flags flag;
+
+	if (test_and_set_bit(flag_rx_on, &up->flags)) {
+		set_bit(flag_rx_pending, &up->flags);
+		return;
+	}
+
+	dbuf->ofs = 2048 - 64;
+
+	/* Prepare the RX dma transfer */
+	rxconf->direction = DMA_FROM_DEVICE;
+	rxconf->src_addr = dw_dma->dma_addr;
+	rxconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	/* feng: better to calculate a best size */
+	rxconf->src_maxburst = LNW_DMA_MSIZE_8;
+	rxconf->dst_maxburst = LNW_DMA_MSIZE_8;
+
+	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+				       (unsigned long) rxconf);
+	flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+	rxdesc = rxchan->device->device_prep_dma_memcpy(
+					rxchan,			/* DMA chan */
+					dbuf->dma_addr,		/* DAR */
+					dw_dma->dma_addr,	/* SAR */
+					dbuf->ofs,		/* data len */
+					flag);
+	if (!rxdesc) {
+		pr_warn("DW HSU: fail to prepare TX DMA operation\n");
+		return;
+	}
+
+	rxdesc->callback = dw_dma_rx_done;
+	rxdesc->callback_param = dw_dma;
+	rxdesc->tx_submit(rxdesc);
+}
+
+static void dw_dma_stop_rx(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct dma_chan *rxchan = dw_dma->rxchan;
+	int count, ret;
+	struct uart_port *port = &up->port;
+	struct tty_struct *tty;
+	struct tty_port *tport = &port->state->port;
+
+	if (!test_bit(flag_rx_on, &up->flags)) {
+		clear_bit(flag_rx_pending, &up->flags);
+		return;
+	}
+
+	ret = rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+	if (ret) {
+		WARN(1, "DMA TERMINATE of TX returns error\n");
+		return;
+	}
+
+	tty = tty_port_tty_get(&up->port.state->port);
+	if (!tty)
+		return;
+
+	count = intel_dma_get_dst_addr(rxchan) - dbuf->dma_addr;
+	if (!count)
+		goto exit;
+
+	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+		dbuf->dma_size, DMA_FROM_DEVICE);
+
+	tty_insert_flip_string(tport, dbuf->buf, count);
+	port->icount.rx += count;
+
+	/* Do we really need it for x86? */
+	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	tty_flip_buffer_push(tport);
+
+exit:
+	tty_kref_put(tty);
+	clear_bit(flag_rx_on, &up->flags);
+	clear_bit(flag_rx_pending, &up->flags);
+}
+
+struct hsu_dma_ops dw_dma_ops = {
+	.init =		dw_dma_init,
+	.exit =		dw_dma_exit,
+	.suspend =	dw_dma_suspend,
+	.resume	=	dw_dma_resume,
+	.start_tx =	dw_dma_start_tx,
+	.stop_tx =	dw_dma_stop_tx,
+	.start_rx =	dw_dma_start_rx,
+	.stop_rx =	dw_dma_stop_rx,
+};
+
+struct hsu_dma_ops *pdw_dma_ops = &dw_dma_ops;
+
+#else
+struct hsu_dma_ops *pdw_dma_ops = NULL;
+#endif
+
+/* Intel DMA ops */
+
+/* The buffer is already cache coherent */
+void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc,
+			struct hsu_dma_buffer *dbuf)
+{
+	dbuf->ofs = 0;
+
+	chan_writel(rxc, HSU_CH_BSR, HSU_DMA_BSR);
+	chan_writel(rxc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+	chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
+	chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
+	chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
+					 | (0x1 << 16)
+					 | (0x1 << 24)	/* timeout, Errata 1 */
+					 );
+	chan_writel(rxc, HSU_CH_CR, 0x3);
+}
+
+static int intel_dma_init(struct uart_hsu_port *up)
+{
+	int ret;
+
+	clear_bit(flag_tx_on, &up->flags);
+
+	ret = dma_init_common(up);
+	if (ret)
+		return ret;
+
+	/* This should not be changed all around */
+	chan_writel(up->txc, HSU_CH_BSR, HSU_DMA_BSR);
+	chan_writel(up->txc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+	/* Start the RX channel right now */
+	hsu_dma_start_rx_chan(up->rxc, &up->rxbuf);
+
+	up->dma_inited = 1;
+	return 0;
+}
+
+static int intel_dma_exit(struct uart_hsu_port *up)
+{
+	chan_writel(up->txc, HSU_CH_CR, 0x0);
+	clear_bit(flag_tx_on, &up->flags);
+	chan_writel(up->rxc, HSU_CH_CR, 0x2);
+	dma_exit_common(up);
+
+	up->dma_inited = 0;
+	return 0;
+}
+
+
+static void intel_dma_start_tx(struct uart_hsu_port *up)
+{
+	struct circ_buf *xmit = &up->port.state->xmit;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	chan_writel(up->txc, HSU_CH_CR, 0x0);
+	while (chan_readl(up->txc, HSU_CH_CR))
+		cpu_relax();
+	clear_bit(flag_tx_on, &up->flags);
+	if (dbuf->ofs) {
+		u32 real = chan_readl(up->txc, HSU_CH_D0SAR) - up->tx_addr;
+
+		/* we found in flow control case, TX irq came without sending
+		 * all TX buffer
+		 */
+		if (real < dbuf->ofs)
+			dbuf->ofs = real; /* adjust to real chars sent */
+
+		/* Update the circ buf info */
+		xmit->tail += dbuf->ofs;
+		xmit->tail &= UART_XMIT_SIZE - 1;
+
+		up->port.icount.tx += dbuf->ofs;
+		dbuf->ofs = 0;
+	}
+
+	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+		set_bit(flag_tx_on, &up->flags);
+		dma_sync_single_for_device(up->port.dev,
+					   dbuf->dma_addr,
+					   dbuf->dma_size,
+					   DMA_TO_DEVICE);
+
+		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+		dbuf->ofs = count;
+
+		/* Reprogram the channel */
+		up->tx_addr = dbuf->dma_addr + xmit->tail;
+		chan_writel(up->txc, HSU_CH_D0SAR, up->tx_addr);
+		chan_writel(up->txc, HSU_CH_D0TSR, count);
+
+		/* Reenable the channel */
+		chan_writel(up->txc, HSU_CH_DCR, 0x1
+						 | (0x1 << 8)
+						 | (0x1 << 16));
+		chan_writel(up->txc, HSU_CH_CR, 0x1);
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&up->port);
+
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	return;
+}
+
+static void intel_dma_stop_tx(struct uart_hsu_port *up)
+{
+	chan_writel(up->txc, HSU_CH_CR, 0x0);
+	return;
+}
+
+static void intel_dma_start_rx(struct uart_hsu_port *up)
+{
+	return;
+}
+
+static void intel_dma_stop_rx(struct uart_hsu_port *up)
+{
+	chan_writel(up->rxc, HSU_CH_CR, 0x2);
+	return;
+}
+
+static void intel_dma_context_op(struct uart_hsu_port *up, int op)
+{
+	if (op == context_save) {
+		up->txc->cr  = chan_readl(up->txc, HSU_CH_CR);
+		up->txc->dcr = chan_readl(up->txc, HSU_CH_DCR);
+		up->txc->sar = chan_readl(up->txc, HSU_CH_D0SAR);
+		up->txc->tsr = chan_readl(up->txc, HSU_CH_D0TSR);
+
+		up->rxc->cr  = chan_readl(up->rxc, HSU_CH_CR);
+		up->rxc->dcr = chan_readl(up->rxc, HSU_CH_DCR);
+		up->rxc->sar = chan_readl(up->rxc, HSU_CH_D0SAR);
+		up->rxc->tsr = chan_readl(up->rxc, HSU_CH_D0TSR);
+	} else {
+		chan_writel(up->txc, HSU_CH_DCR, up->txc->dcr);
+		chan_writel(up->txc, HSU_CH_D0SAR, up->txc->sar);
+		chan_writel(up->txc, HSU_CH_D0TSR, up->txc->tsr);
+		chan_writel(up->txc, HSU_CH_BSR, HSU_DMA_BSR);
+		chan_writel(up->txc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+		chan_writel(up->rxc, HSU_CH_DCR, up->rxc->dcr);
+		chan_writel(up->rxc, HSU_CH_D0SAR, up->rxc->sar);
+		chan_writel(up->rxc, HSU_CH_D0TSR, up->rxc->tsr);
+		chan_writel(up->rxc, HSU_CH_BSR, HSU_DMA_BSR);
+		chan_writel(up->rxc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+	}
+}
+
+
+static int intel_dma_resume(struct uart_hsu_port *up)
+{
+	chan_writel(up->rxc, HSU_CH_CR, up->rxc_chcr_save);
+	return 0;
+}
+
+static int intel_dma_suspend(struct uart_hsu_port *up)
+{
+	int loop = 100000;
+	struct hsu_dma_chan *chan = up->rxc;
+
+	up->rxc_chcr_save = chan_readl(up->rxc, HSU_CH_CR);
+
+	if (test_bit(flag_startup, &up->flags)
+			&& serial_in(up, UART_FOR) & 0x7F) {
+		dev_err(up->dev, "ignore suspend for rx fifo\n");
+		return -1;
+	}
+
+	if (chan_readl(up->txc, HSU_CH_CR)) {
+		dev_info(up->dev, "ignore suspend for tx dma\n");
+		return -1;
+	}
+
+	chan_writel(up->rxc, HSU_CH_CR, 0x2);
+	while (--loop) {
+		if (chan_readl(up->rxc, HSU_CH_CR) == 0x2)
+			break;
+		cpu_relax();
+	}
+
+	if (!loop) {
+		dev_err(up->dev, "Can't stop rx dma\n");
+		return -1;
+	}
+
+	if (chan_readl(chan, HSU_CH_D0SAR) - up->rxbuf.dma_addr) {
+		dev_err(up->dev, "ignore suspend for dma pointer\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+struct hsu_dma_ops intel_dma_ops = {
+	.init =		intel_dma_init,
+	.exit =		intel_dma_exit,
+	.suspend =	intel_dma_suspend,
+	.resume	=	intel_dma_resume,
+	.start_tx =	intel_dma_start_tx,
+	.stop_tx =	intel_dma_stop_tx,
+	.start_rx =	intel_dma_start_rx,
+	.stop_rx =	intel_dma_stop_rx,
+	.context_op =	intel_dma_context_op,
+};
+
+
diff --git a/drivers/external_drivers/drivers/hsu/mfd_pci.c b/drivers/external_drivers/drivers/hsu/mfd_pci.c
new file mode 100644
index 0000000..90616f1
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/mfd_pci.c
@@ -0,0 +1,297 @@
+/*
+ * mfd_pci.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ *    2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ *    are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ *    asserted, only when the HW is reset the DDCD and DDSR will
+ *    be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#include "mfd.h"
+
+#ifdef CONFIG_PM
+static int serial_hsu_pci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_suspend(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+
+static int serial_hsu_pci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_resume(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+#else
+#define serial_hsu_pci_suspend	NULL
+#define serial_hsu_pci_resume	NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_pci_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+	return serial_hsu_do_runtime_idle(up);
+}
+
+static int serial_hsu_pci_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_suspend(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static int serial_hsu_pci_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_resume(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+#else
+#define serial_hsu_pci_runtime_idle		NULL
+#define serial_hsu_pci_runtime_suspend	NULL
+#define serial_hsu_pci_runtime_resume	NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_pci_pm_ops = {
+
+	SET_SYSTEM_SLEEP_PM_OPS(serial_hsu_pci_suspend,
+				serial_hsu_pci_resume)
+	SET_RUNTIME_PM_OPS(serial_hsu_pci_runtime_suspend,
+				serial_hsu_pci_runtime_resume,
+				serial_hsu_pci_runtime_idle)
+};
+
+DEFINE_PCI_DEVICE_TABLE(hsuart_port_pci_ids) = {
+	{ PCI_VDEVICE(INTEL, 0x081B), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x081C), hsu_port1 },
+	{ PCI_VDEVICE(INTEL, 0x081D), hsu_port2 },
+	/* Cloverview support */
+	{ PCI_VDEVICE(INTEL, 0x08FC), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x08FD), hsu_port1 },
+	{ PCI_VDEVICE(INTEL, 0x08FE), hsu_port2 },
+	/* Tangier and Anniedale support */
+	{ PCI_VDEVICE(INTEL, 0x1191), hsu_port0 },
+	/* VLV2 support FDK only */
+	{ PCI_VDEVICE(INTEL, 0x0F0A), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x0F0C), hsu_port1 },
+	/* CHV support, enume by ACPI not PCI now */
+	{ PCI_VDEVICE(INTEL, 0x228A), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x228C), hsu_port1 },
+	{},
+};
+
+DEFINE_PCI_DEVICE_TABLE(hsuart_dma_pci_ids) = {
+	{ PCI_VDEVICE(INTEL, 0x081E), hsu_dma },
+	/* Cloverview support */
+	{ PCI_VDEVICE(INTEL, 0x08FF), hsu_dma },
+	/* Tangier and Anniedale support */
+	{ PCI_VDEVICE(INTEL, 0x1192), hsu_dma },
+	{},
+};
+
+static int serial_hsu_pci_port_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct uart_hsu_port *up;
+	int ret, port;
+	resource_size_t start, len;
+
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+
+	dev_info(&pdev->dev,
+		"FUNC: %d driver: %ld addr:%lx len:%lx\n",
+		PCI_FUNC(pdev->devfn), ent->driver_data,
+		(ulong) start, (ulong) len);
+
+	port = intel_mid_hsu_func_to_port(PCI_FUNC(pdev->devfn));
+	if (port == -1)
+		return 0;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pci_request_region(pdev, 0, "hsu");
+	if (ret)
+		goto err;
+
+	up = serial_hsu_port_setup(&pdev->dev, port, start, len,
+			pdev->irq);
+	if (IS_ERR(up))
+		goto err;
+
+	pci_set_drvdata(pdev, up);
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	return 0;
+err:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void serial_hsu_pci_port_remove(struct pci_dev *pdev)
+{
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+	serial_hsu_port_free(up);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+}
+
+static void serial_hsu_pci_port_shutdown(struct pci_dev *pdev)
+{
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+	if (!up)
+		return;
+
+	serial_hsu_port_shutdown(up);
+}
+
+static struct pci_driver hsu_port_pci_driver = {
+	.name =		"HSU serial",
+	.id_table =	hsuart_port_pci_ids,
+	.probe =	serial_hsu_pci_port_probe,
+	.remove =	serial_hsu_pci_port_remove,
+	.shutdown =	serial_hsu_pci_port_shutdown,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+	.driver = {
+		.pm = &serial_hsu_pci_pm_ops,
+	},
+#endif
+};
+
+static int serial_hsu_pci_dma_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	int ret, share_irq = 0;
+	resource_size_t start, len;
+
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+
+	dev_info(&pdev->dev,
+		"FUNC: %d driver: %ld addr:%lx len:%lx\n",
+		PCI_FUNC(pdev->devfn), ent->driver_data,
+		(ulong) pci_resource_start(pdev, 0),
+		(ulong) pci_resource_len(pdev, 0));
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pci_request_region(pdev, 0, "hsu dma");
+	if (ret)
+		goto err;
+
+	/* share irq with port? ANN all and TNG chip from B0 stepping */
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER &&
+		pdev->revision >= 0x1) ||
+		intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		share_irq = 1;
+
+	ret = serial_hsu_dma_setup(&pdev->dev, start, len, pdev->irq, share_irq);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void serial_hsu_pci_dma_remove(struct pci_dev *pdev)
+{
+	serial_hsu_dma_free();
+	pci_disable_device(pdev);
+	pci_unregister_driver(&hsu_port_pci_driver);
+}
+
+static struct pci_driver hsu_dma_pci_driver = {
+	.name =		"HSU DMA",
+	.id_table =	hsuart_dma_pci_ids,
+	.probe =	serial_hsu_pci_dma_probe,
+	.remove =	serial_hsu_pci_dma_remove,
+};
+
+static int __init hsu_pci_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&hsu_dma_pci_driver);
+	if (!ret) {
+		ret = pci_register_driver(&hsu_port_pci_driver);
+		if (ret)
+			pci_unregister_driver(&hsu_dma_pci_driver);
+	}
+
+	return ret;
+}
+
+static void __exit hsu_pci_exit(void)
+{
+	pci_unregister_driver(&hsu_port_pci_driver);
+	pci_unregister_driver(&hsu_dma_pci_driver);
+}
+
+module_init(hsu_pci_init);
+module_exit(hsu_pci_exit);
+
+MODULE_AUTHOR("Yang Bin <bin.yang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/external_drivers/drivers/hsu/mfd_plat.c b/drivers/external_drivers/drivers/hsu/mfd_plat.c
new file mode 100644
index 0000000..5ff35df
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/mfd_plat.c
@@ -0,0 +1,247 @@
+/*
+ * mfd_plat.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+#include <linux/pci.h>
+
+#include "mfd.h"
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hsu_acpi_ids[] = {
+	{ "80860F0A", hsu_vlv2 },
+	{ "8086228A", hsu_chv },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, hsu_acpi_ids);
+#endif
+
+#ifdef CONFIG_PM
+static int serial_hsu_plat_suspend(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_suspend(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+
+static int serial_hsu_plat_resume(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_resume(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+#else
+#define serial_hsu_plat_suspend	NULL
+#define serial_hsu_plat_resume	NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_plat_runtime_idle(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+
+	return serial_hsu_do_runtime_idle(up);
+}
+
+static int serial_hsu_plat_runtime_suspend(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_suspend(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static int serial_hsu_plat_runtime_resume(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_resume(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+#else
+#define serial_hsu_plat_runtime_idle		NULL
+#define serial_hsu_plat_runtime_suspend	NULL
+#define serial_hsu_plat_runtime_resume	NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_plat_pm_ops = {
+
+	SET_SYSTEM_SLEEP_PM_OPS(serial_hsu_plat_suspend,
+				serial_hsu_plat_resume)
+	SET_RUNTIME_PM_OPS(serial_hsu_plat_runtime_suspend,
+				serial_hsu_plat_runtime_resume,
+				serial_hsu_plat_runtime_idle)
+};
+
+static int serial_hsu_plat_port_probe(struct platform_device *pdev)
+{
+	struct uart_hsu_port *up;
+	int port = pdev->id, irq;
+	struct resource *mem, *ioarea;
+	resource_size_t start, len;
+
+#ifdef CONFIG_ACPI
+	const struct acpi_device_id *id;
+	for (id = hsu_acpi_ids; id->id[0]; id++)
+		if (!strncmp(id->id, dev_name(&pdev->dev), strlen(id->id))) {
+			acpi_status status;
+			unsigned long long tmp;
+
+			status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+					"_UID", NULL, &tmp);
+			if (ACPI_FAILURE(status))
+				return -ENODEV;
+			port = tmp - 1;
+			if (intel_mid_hsu_plat_init(port,
+				id->driver_data, &pdev->dev))
+				return -ENODEV;
+		}
+#endif
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "no mem resource?\n");
+		return -EINVAL;
+	}
+	start = mem->start;
+	len = resource_size(mem);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no irq resource?\n");
+		return irq; /* -ENXIO */
+	}
+
+	ioarea = request_mem_region(mem->start, resource_size(mem),
+			pdev->name);
+	if (!ioarea) {
+		dev_err(&pdev->dev, "HSU region already claimed\n");
+		return -EBUSY;
+	}
+
+	up = serial_hsu_port_setup(&pdev->dev, port, start, len,
+			irq);
+	if (IS_ERR(up)) {
+		release_mem_region(mem->start, resource_size(mem));
+		dev_err(&pdev->dev, "failed to setup HSU\n");
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, up);
+
+	if (!pdev->dev.dma_mask) {
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	}
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return 0;
+}
+
+static int serial_hsu_plat_port_remove(struct platform_device *pdev)
+{
+	struct uart_hsu_port *up = platform_get_drvdata(pdev);
+	struct resource *mem;
+
+	pm_runtime_forbid(&pdev->dev);
+	serial_hsu_port_free(up);
+	platform_set_drvdata(pdev, NULL);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem)
+		release_mem_region(mem->start, resource_size(mem));
+
+	return 0;
+}
+
+static void serial_hsu_plat_port_shutdown(struct platform_device *pdev)
+{
+	struct uart_hsu_port *up = platform_get_drvdata(pdev);
+
+	if (!up)
+		return;
+
+	serial_hsu_port_shutdown(up);
+}
+
+static struct platform_driver hsu_plat_driver = {
+	.remove		= serial_hsu_plat_port_remove,
+	.shutdown 	= serial_hsu_plat_port_shutdown,
+	.driver		= {
+		.name	= "HSU serial",
+		.owner	= THIS_MODULE,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+		.pm     = &serial_hsu_plat_pm_ops,
+#endif
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(hsu_acpi_ids),
+#endif
+	},
+};
+
+static int __init hsu_plat_init(void)
+{
+	struct pci_dev *hsu_pci;
+
+	/*
+	 * Try to get pci device, if exist, then exit ACPI platform
+	 * register, On BYT FDK, include two enum mode: PCI, ACPI,
+	 * ignore ACPI enum mode.
+	 */
+	hsu_pci = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0F0A, NULL);
+	if (hsu_pci) {
+		pr_info("HSU serial: Find HSU controller in PCI device, "
+			"exit ACPI platform register!\n");
+		return 0;
+	}
+
+	return platform_driver_probe(&hsu_plat_driver, serial_hsu_plat_port_probe);
+}
+
+static void __exit hsu_plat_exit(void)
+{
+	platform_driver_unregister(&hsu_plat_driver);
+}
+
+module_init(hsu_plat_init);
+module_exit(hsu_plat_exit);
+
+MODULE_AUTHOR("Jason Chen <jason.cj.chen@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu-plat");
diff --git a/drivers/external_drivers/drivers/hsu/mfd_trace.h b/drivers/external_drivers/drivers/hsu/mfd_trace.h
new file mode 100644
index 0000000..49afd9b
--- /dev/null
+++ b/drivers/external_drivers/drivers/hsu/mfd_trace.h
@@ -0,0 +1,197 @@
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mfd_trace
+
+#define TRACE_SYSTEM hsu
+
+#if !defined(_TRACE_HSU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HSU_H
+
+#include <linux/tracepoint.h>
+
+#define hsucmd_name(cmd) { cmd, #cmd }
+#define show_hsucmd_name(val)			\
+	__print_symbolic(val,			\
+		hsucmd_name(qcmd_overflow),	\
+		hsucmd_name(qcmd_get_msr),	\
+		hsucmd_name(qcmd_set_mcr),	\
+		hsucmd_name(qcmd_set_ier),	\
+		hsucmd_name(qcmd_start_rx),	\
+		hsucmd_name(qcmd_stop_rx),	\
+		hsucmd_name(qcmd_start_tx),	\
+		hsucmd_name(qcmd_stop_tx),	\
+		hsucmd_name(qcmd_cl),		\
+		hsucmd_name(qcmd_port_irq),	\
+		hsucmd_name(qcmd_dma_irq),	\
+		hsucmd_name(qcmd_enable_irq),   \
+		hsucmd_name(qcmd_cmd_off))
+
+
+TRACE_EVENT(hsu_cmd_insert,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_add,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_start,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_end,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_func_start,
+
+	TP_PROTO(unsigned port, const char *func),
+
+	TP_ARGS(port, func),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__string(name, func)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__assign_str(name, func);
+	),
+
+	TP_printk("port=%u func=%s", __entry->port,
+		__get_str(name))
+);
+
+TRACE_EVENT(hsu_func_end,
+
+	TP_PROTO(unsigned port, const char *func, char *err),
+
+	TP_ARGS(port, func, err),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__string(name, func)
+		__string(ret, err)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__assign_str(name, func);
+		__assign_str(ret, err);
+	),
+
+	TP_printk("port=%u func=%s err=%s", __entry->port,
+		__get_str(name), __get_str(ret))
+);
+
+TRACE_EVENT(hsu_mctrl,
+
+	TP_PROTO(unsigned port, unsigned mctrl),
+
+	TP_ARGS(port, mctrl),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(unsigned, mctrl)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->mctrl = mctrl;
+	),
+
+	TP_printk("port=%u mctrl=%d", __entry->port, __entry->mctrl)
+);
+
+TRACE_EVENT(hsu_set_termios,
+
+	TP_PROTO(unsigned port, unsigned int baud, int ctsrts),
+
+	TP_ARGS(port, baud, ctsrts),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(unsigned int, baud)
+		__field(int, ctsrts)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->baud = baud;
+		__entry->ctsrts = ctsrts;
+	),
+
+	TP_printk("port=%u baud=%d ctsrts=%d", __entry->port,
+		__entry->baud, __entry->ctsrts)
+);
+
+#endif /* if !defined(_TRACE_HSU_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/external_drivers/drivers/hwmon/Kconfig b/drivers/external_drivers/drivers/hwmon/Kconfig
new file mode 100644
index 0000000..91f1814
--- /dev/null
+++ b/drivers/external_drivers/drivers/hwmon/Kconfig
@@ -0,0 +1,19 @@
+config SENSORS_MRFL_OCD
+	tristate "Basincove BCU driver for Merrifield"
+	depends on INTEL_SCU_IPC
+	help
+	  Say yes here to enable current monitoring driver for
+	  merrifield platform.
+
+	  This driver is specific to basin cove PMIC used in the
+          Merrifield platform.
+
+config SENSORS_PSH_MRLD
+        tristate "Intel PSH driver for Merrifield"
+        depends on INTEL_PSH_IPC && INTEL_SCU_IPC_UTIL
+        help
+          Say Y here to enable PSH driver on Intel Merrifield Platform, and
+          this driver depends on PSH IPC and SCU IPC driver.
+
+          This driver can also be built as a module. If so, the module
+          will be called psh.
diff --git a/drivers/external_drivers/drivers/hwmon/Makefile b/drivers/external_drivers/drivers/hwmon/Makefile
new file mode 100644
index 0000000..4cdf5c2
--- /dev/null
+++ b/drivers/external_drivers/drivers/hwmon/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SENSORS_MRFL_OCD)		+= intel_mrfl_ocd.o
+obj-$(CONFIG_SENSORS_PSH_MRLD)		+= psh_ia_common.o psh.o
diff --git a/drivers/external_drivers/drivers/hwmon/intel_mrfl_ocd.c b/drivers/external_drivers/drivers/hwmon/intel_mrfl_ocd.c
new file mode 100644
index 0000000..9658e5c
--- /dev/null
+++ b/drivers/external_drivers/drivers/hwmon/intel_mrfl_ocd.c
@@ -0,0 +1,1118 @@
+/*
+ * intel_mrfl_ocd.c - Intel Merrifield Platform Over Current Detection Driver
+ *
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This driver monitors the voltage level of the system. When the voltage
+ * drops below a programmed threshold, it notifies the CPU of the drop.
+ * Also, the driver configures the HW to take some actions to prevent
+ * system crash due to sudden drop in voltage.
+ * DEVICE_NAME: Intel Merrifield platform - PMIC: Burst Control Unit
+ */
+
+#define pr_fmt(fmt)  "intel_mrfl_ocd: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/rpmsg.h>
+#include <linux/debugfs.h>
+#include <linux/power_supply.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_basincove_ocd.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#else
+#include <asm/intel_mid_remoteproc.h>
+#endif
+
+#define DRIVER_NAME "bcove_bcu"
+
+#define CAMFLASH_STATE_NORMAL	0
+#define CAMFLASH_STATE_CRITICAL	3
+
+/* 'enum' of BCU events */
+enum bcu_events { VWARN1, VWARN2, VCRIT, GSMPULSE, TXPWRTH, UNKNOWN, __COUNT };
+
+static DEFINE_MUTEX(ocd_update_lock);
+
+/* Warning levels for Voltage (in mV) */
+static const unsigned long volt_thresholds[NUM_THRESHOLDS] = {
+			2550, 2600, 2700, 2750, 2800, 2900, 3000, 3100 };
+
+/* Warning levels for Current (in mA) */
+static const unsigned long curr_thresholds[NUM_THRESHOLDS] = {
+			1600, 2000, 2400, 2600, 3000, 3200, 3400, 3600 };
+
+struct ocd_info {
+	struct device *dev;
+	struct platform_device *pdev;
+	struct delayed_work vwarn2_irq_work;
+	void *bcu_intr_addr;
+	int irq;
+};
+
+static uint8_t cam_flash_state;
+static uint32_t intr_count_lvl1;
+static uint32_t intr_count_lvl2;
+static uint32_t intr_count_lvl3;
+
+static void enable_volt_trip_points(void)
+{
+	int i;
+	int ret;
+
+	/*
+	 * Enable the Voltage comparator logic, so that the output
+	 * signals are asserted when a voltage drop occurs.
+	 */
+	for (i = 0; i < NUM_VOLT_LEVELS; i++) {
+		ret = intel_scu_ipc_update_register(VWARN1_CFG + i,
+						VWARN_EN,
+						VWARN_EN_MASK);
+		if (ret)
+			pr_err("EM_BCU: Error in %s updating register 0x%x\n",
+					__func__, (VWARN1_CFG + i));
+	}
+}
+
+static void enable_current_trip_points(void)
+{
+	int i;
+	int ret;
+
+	/*
+	 * Enable the Current comparator logic, so that the output
+	 * signals are asserted when the platform current surges.
+	 */
+	for (i = 0; i < NUM_CURR_LEVELS; i++) {
+		ret = intel_scu_ipc_update_register(ICCMAXVCC_CFG + i,
+						ICCMAXVCC_EN,
+						ICCMAXVCC_EN_MASK);
+		if (ret)
+			pr_err("EM_BCU: Error in %s updating reg 0x%0x\n",
+					__func__, (ICCMAXVCC_CFG + i));
+	}
+}
+
+static int find_threshold(const unsigned long *arr,
+				unsigned long value)
+{
+	int pos = 0;
+
+	if (value < arr[0] || value > arr[NUM_THRESHOLDS - 1])
+		return -EINVAL;
+
+	/* Find the index of 'value' in the thresholds array */
+	while (pos < NUM_THRESHOLDS && value >= arr[pos])
+		++pos;
+
+	return pos - 1;
+}
+
+static int set_threshold(u16 reg_addr, int pos)
+{
+	int ret;
+	uint8_t data;
+
+	mutex_lock(&ocd_update_lock);
+
+	ret = intel_scu_ipc_ioread8(reg_addr, &data);
+	if (ret)
+		goto ipc_fail;
+
+	/* Set bits [0-2] to value of pos */
+	data = (data & 0xF8) | pos;
+
+	ret = intel_scu_ipc_iowrite8(reg_addr, data);
+
+ipc_fail:
+	mutex_unlock(&ocd_update_lock);
+	return ret;
+}
+
+static int program_bcu(void *ocd_smip_addr)
+{
+	int ret, i;
+	u8 *smip_data;
+
+	if (!ocd_smip_addr)
+		return -ENXIO;
+
+	smip_data = (u8 *)ocd_smip_addr;
+	mutex_lock(&ocd_update_lock);
+
+	for (i = 0; i < NUM_SMIP_BYTES-1; i++, smip_data++) {
+		ret = intel_scu_ipc_iowrite8(VWARN1_CFG + i, *smip_data);
+		if (ret)
+			goto ipc_fail;
+	}
+
+	/* MBCUIRQ register address not consecutive with other BCU registers */
+	ret = intel_scu_ipc_iowrite8(MBCUIRQ, *smip_data);
+	if (ret) {
+		pr_err("EM_BCU: Inside %s error(%d) in writing addr 0x%02x\n",
+				__func__, ret, MBCUIRQ);
+		goto ipc_fail;
+	}
+	pr_debug("EM_BCU: Registers are programmed successfully.\n");
+
+ipc_fail:
+	mutex_unlock(&ocd_update_lock);
+	return ret;
+}
+
+static ssize_t store_curr_thres(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long curnt;
+	int pos, ret;
+	struct sensor_device_attribute_2 *s_attr =
+					to_sensor_dev_attr_2(attr);
+
+	if (kstrtoul(buf, 10, &curnt))
+		return -EINVAL;
+
+	pos = find_threshold(curr_thresholds, curnt);
+	if (pos < 0)
+		return -EINVAL;
+
+	/*
+	 * Since VCC_CFG and VNN_CFG are consecutive registers, calculate the
+	 * required register address using s_attr->nr.
+	 */
+	ret = set_threshold(ICCMAXVCC_CFG + s_attr->nr, pos);
+
+	return ret ? ret : count;
+}
+
+static ssize_t show_curr_thres(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	uint8_t data;
+	struct sensor_device_attribute_2 *s_attr =
+					to_sensor_dev_attr_2(attr);
+
+	mutex_lock(&ocd_update_lock);
+
+	ret = intel_scu_ipc_ioread8(ICCMAXVCC_CFG + s_attr->nr, &data);
+
+	mutex_unlock(&ocd_update_lock);
+
+	if (ret)
+		return ret;
+
+	/* Read bits [0-2] of data to get the index into the array */
+	return sprintf(buf, "%lu\n", curr_thresholds[data & 0x07]);
+}
+
+static ssize_t store_volt_thres(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long volt;
+	int pos, ret;
+	struct sensor_device_attribute_2 *s_attr =
+					to_sensor_dev_attr_2(attr);
+
+	if (kstrtoul(buf, 10, &volt))
+		return -EINVAL;
+
+	pos = find_threshold(volt_thresholds, volt);
+	if (pos < 0)
+		return -EINVAL;
+
+	/*
+	 * The voltage thresholds are in descending order in VWARN*_CFG
+	 * registers. So calculate 'pos' by substracting from NUM_THRESHOLDS.
+	 */
+	pos = NUM_THRESHOLDS - pos - 1;
+
+	/*
+	 * Since VWARN*_CFG are consecutive registers, calculate the
+	 * required register address using s_attr->nr.
+	 */
+	ret = set_threshold(VWARN1_CFG + s_attr->nr, pos);
+
+	return ret ? ret : count;
+}
+
+static ssize_t show_volt_thres(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret, index;
+	uint8_t data;
+	struct sensor_device_attribute_2 *s_attr =
+					to_sensor_dev_attr_2(attr);
+
+	mutex_lock(&ocd_update_lock);
+
+	ret = intel_scu_ipc_ioread8(VWARN1_CFG + s_attr->nr, &data);
+
+	mutex_unlock(&ocd_update_lock);
+
+	if (ret)
+		return ret;
+
+	/* Read bits [0-2] of data to get the index into the array */
+	index = NUM_THRESHOLDS - (data & 0x07) - 1;
+
+	return sprintf(buf, "%lu\n", volt_thresholds[index]);
+}
+
+static ssize_t store_crit_shutdown(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	uint8_t data;
+	unsigned long flag;
+
+	if (kstrtoul(buf, 10, &flag) || (flag != 0 && flag != 1))
+		return -EINVAL;
+
+	mutex_lock(&ocd_update_lock);
+
+	ret = intel_scu_ipc_ioread8(VCRIT_CFG, &data);
+	if (ret)
+		goto ipc_fail;
+	/*
+	 * flag:1 enables shutdown due to burst current
+	 * flag:0 disables shutdown due to burst current
+	 */
+	if (flag)
+		data |= VCRIT_SHUTDOWN;
+	else
+		data &= ~VCRIT_SHUTDOWN;
+
+	ret = intel_scu_ipc_iowrite8(VCRIT_CFG, data);
+	if (!ret)
+		ret = count;
+
+ipc_fail:
+	mutex_unlock(&ocd_update_lock);
+	return ret;
+}
+
+static ssize_t show_crit_shutdown(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int flag, ret;
+	uint8_t data;
+
+	mutex_lock(&ocd_update_lock);
+
+	ret = intel_scu_ipc_ioread8(VCRIT_CFG, &data);
+	if (!ret) {
+		/* 'flag' is 1 if CRIT_SHUTDOWN is enabled, 0 otherwise */
+		flag = !!(data & VCRIT_SHUTDOWN);
+	}
+
+	mutex_unlock(&ocd_update_lock);
+
+	return ret ? ret : sprintf(buf, "%d\n", flag);
+}
+
+
+static ssize_t show_intr_count(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	uint32_t value;
+	int level = to_sensor_dev_attr(attr)->index;
+
+	switch (level) {
+	case VWARN1:
+		value = intr_count_lvl1;
+		break;
+	case VWARN2:
+		value = intr_count_lvl2;
+		break;
+	case VCRIT:
+		value = intr_count_lvl3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t store_camflash_ctrl(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	uint8_t value;
+	if (kstrtou8(buf, 10, &value))
+		return -EINVAL;
+
+	if ((value < CAMFLASH_STATE_NORMAL) ||
+		(value > CAMFLASH_STATE_CRITICAL))
+		return -EINVAL;
+
+	cam_flash_state = value;
+	return count;
+}
+
+static ssize_t show_camflash_ctrl(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", cam_flash_state);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dentry *bcbcu_dbgfs_root;
+
+static struct bcu_reg_info bcbcu_reg[] = {
+	reg_info(S_BCUINT),
+	reg_info(BCUIRQ),
+	reg_info(IRQLVL1),
+	reg_info(VWARN1_CFG),
+	reg_info(VWARN2_CFG),
+	reg_info(VCRIT_CFG),
+	reg_info(ICCMAXVSYS_CFG),
+	reg_info(ICCMAXVCC_CFG),
+	reg_info(ICCMAXVNN_CFG),
+	reg_info(VFLEXSRC_BEH),
+	reg_info(VFLEXDIS_BEH),
+	reg_info(VIBDIS_BEH),
+	reg_info(CAMFLTORCH_BEH),
+	reg_info(CAMFLDIS_BEH),
+	reg_info(BCUDISW2_BEH),
+	reg_info(BCUDISCRIT_BEH),
+	reg_info(S_BCUCTRL),
+	reg_info(MBCUIRQ),
+	reg_info(MIRQLVL1)
+};
+
+/**
+ * bcbcu_dbgfs_write - debugfs: write the new state to an endpoint.
+ * @file: The seq_file to write data to.
+ * @user_buf: the user data which is need to write to an endpoint
+ * @count: the size of the user data
+ * @pos: loff_t" is a "long offset", which is the current reading or writing
+ *       position.
+ *
+ * Send data to the device. If NULL,-EINVAL/-EFAULT return to the write call to
+ * the calling program if it is non-negative return value represents the number
+ * of bytes successfully written.
+ */
+static ssize_t bcbcu_dbgfs_write(struct file *file,
+			const char __user *user_buf, size_t count, loff_t *pos)
+{
+	char buf[count];
+	u8 data;
+	u16 addr;
+	int ret;
+	struct seq_file *s = file->private_data;
+
+	if (!s) {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	addr = *((u16 *)s->private);
+	if ((addr == BCUIRQ) || (addr == S_BCUINT) || (addr == IRQLVL1)) {
+		pr_err("EM_BCU: DEBUGFS no permission to write Addr(0x%04x)\n",
+				addr);
+		ret = -EIO;
+		goto error;
+	}
+
+	if (copy_from_user(buf, user_buf, count)) {
+		pr_err("EM_BCU: DEBUGFS unable to copy the user data.\n");
+		ret = -EFAULT;
+		goto error;
+	}
+
+	buf[count-1] = '\0';
+	if (kstrtou8(buf, 16, &data)) {
+		pr_err("EM_BCU: DEBUGFS invalid user data.\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = intel_scu_ipc_iowrite8(addr, data);
+	if (ret < 0) {
+		pr_err("EM_BCU: Dbgfs write error Addr: 0x%04x Data: 0x%02x\n",
+				addr, data);
+		goto error;
+	}
+	pr_debug("EM_BCU: DEBUGFS written Data: 0x%02x Addr: 0x%04x\n",
+			data, addr);
+	return count;
+
+error:
+	return ret;
+}
+
+/**
+ * bcbcu_reg_show - debugfs: show the state of an endpoint.
+ * @s: The seq_file to read data from.
+ * @unused: not used
+ *
+ * This debugfs entry shows the content of the register
+ * given in the data parameter.
+ */
+static int bcbcu_reg_show(struct seq_file *s, void *unused)
+{
+	u16 addr = 0;
+	u8 data = 0;
+	int ret;
+
+	addr = *((u16 *)s->private);
+	ret = intel_scu_ipc_ioread8(addr, &data);
+	if (ret) {
+		pr_err("EM_BCU: Error in reading 0x%04x register!!\n", addr);
+		return ret;
+	}
+	seq_printf(s, "0x%02x\n", data);
+
+	return 0;
+}
+
+/**
+ * bcbcu_dbgfs_open - debugfs: to open the endpoint for read/write operation.
+ * @inode: inode structure is used by the kernel internally to represent files.
+ * @file: It is created by the kernel on open and is passed to any function
+ *        that operates on the file, until the last close. After all instances
+ *        of the file are closed, the kernel releases the data structure.
+ *
+ * This is the first operation of the files on the device, does not require the
+ * driver to declare a corresponding method. If this is NULL, the device is
+ * turned on has been successful, but the driver will not be notified
+ */
+static int bcbcu_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, bcbcu_reg_show, inode->i_private);
+}
+
+static const struct file_operations bcbcu_dbgfs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= bcbcu_dbgfs_open,
+	.release	= single_release,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.write		= bcbcu_dbgfs_write,
+};
+
+static void bcbcu_create_debugfs(struct ocd_info *info)
+{
+	char reg_name[MAX_REGNAME_LEN] = {0};
+	u32 idx;
+	u32 max_dbgfs_num = ARRAY_SIZE(bcbcu_reg);
+	struct dentry *entry;
+
+	bcbcu_dbgfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
+	if (IS_ERR(bcbcu_dbgfs_root)) {
+		dev_warn(info->dev, "DEBUGFS directory(%s) create failed!\n",
+				DRIVER_NAME);
+		return;
+	}
+
+	for (idx = 0; idx < max_dbgfs_num; idx++) {
+		snprintf(reg_name, MAX_REGNAME_LEN, "%s", bcbcu_reg[idx].name);
+		entry = debugfs_create_file(reg_name,
+						bcbcu_reg[idx].mode,
+						bcbcu_dbgfs_root,
+						&bcbcu_reg[idx].addr,
+						&bcbcu_dbgfs_fops);
+		if (IS_ERR(entry)) {
+			debugfs_remove_recursive(bcbcu_dbgfs_root);
+			bcbcu_dbgfs_root = NULL;
+			dev_warn(info->dev, "DEBUGFS %s creation failed!!\n",
+					reg_name);
+			return;
+		}
+	}
+	dev_info(info->dev, "DEBUGFS %s created successfully.\n", DRIVER_NAME);
+}
+
+static inline void bcbcu_remove_debugfs(struct ocd_info *info)
+{
+	if (bcbcu_dbgfs_root)
+		debugfs_remove_recursive(bcbcu_dbgfs_root);
+}
+#else
+static inline void bcbcu_create_debugfs(struct ocd_info *info) { }
+static inline void bcbcu_remove_debugfs(struct ocd_info *info) { }
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * vwarn2_irq_enable_work: delayed work queue function, which is used to unmask
+ * (enable) the VWARN2 interrupt after the specified delay time while sceduling.
+ */
+static void vwarn2_irq_enable_work(struct work_struct *work)
+{
+	int ret = 0;
+	struct ocd_info *info = container_of(work,
+						struct ocd_info,
+						vwarn2_irq_work.work);
+
+	dev_dbg(info->dev, "EM_BCU: Inside %s\n", __func__);
+
+	/* Unmasking BCU MVWARN2 Interrupt, to see the interrupt occurrence */
+	ret = intel_scu_ipc_update_register(MBCUIRQ, ~MVWARN2, MVWARN2_MASK);
+	if (ret) {
+		dev_err(info->dev, "EM_BCU: Error in %s updating reg 0x%x\n",
+				__func__, MBCUIRQ);
+	}
+}
+
+static inline struct power_supply *get_psy_battery(void)
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	static struct power_supply *pst;
+
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+			class_dev_iter_exit(&iter);
+			return pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	return NULL;
+}
+
+/* Reading the Voltage now value of the battery */
+static inline int bcu_get_battery_voltage(int *volt)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+	if (!ret)
+		*volt = (val.intval);
+
+	return ret;
+}
+
+static void handle_VW1_event(void *dev_data)
+{
+	uint8_t irq_status;
+	struct ocd_info *cinfo = (struct ocd_info *)dev_data;
+	int ret;
+	char *bcu_envp[2];
+
+	dev_info(cinfo->dev, "EM_BCU: VWARN1 Event has occured\n");
+
+	/**
+	 * Notify using uevent along with env info. Here sending vwarn2 info
+	 * upon receiving vwarn1 interrupt since the vwarn1 & vwarn2 threshold
+	 * values is swapped.
+	 */
+	bcu_envp[0] = get_envp(VWARN2);
+	bcu_envp[1] = NULL;
+	kobject_uevent_env(&cinfo->dev->kobj, KOBJ_CHANGE, bcu_envp);
+
+	/**
+	 * Masking the BCU MVWARN1 Interrupt, since software does graceful
+	 * shutdown once VWARN1 interrupt occurs. So we never expect another
+	 * VWARN1 interrupt.
+	 */
+	ret = intel_scu_ipc_update_register(MBCUIRQ, MVWARN1, MVWARN1_MASK);
+	if (ret) {
+		dev_err(cinfo->dev, "EM_BCU: Error in %s updating reg 0x%x\n",
+				__func__, MBCUIRQ);
+		goto ipc_fail;
+	}
+
+	ret = intel_scu_ipc_ioread8(S_BCUINT, &irq_status);
+	if (ret)
+		goto ipc_fail;
+	dev_dbg(cinfo->dev, "EM_BCU: S_BCUINT: %x\n", irq_status);
+
+	if (!(irq_status & SVWARN1)) {
+		/* Vsys is above WARN1 level */
+		dev_info(cinfo->dev, "EM_BCU: Recovered from VWARN1 Level\n");
+	}
+
+	return;
+
+ipc_fail:
+	dev_err(cinfo->dev, "EM_BCU: ipc read/write failed:func:%s()\n",
+								__func__);
+	return;
+}
+
+static void handle_VW2_event(void *dev_data)
+{
+	uint8_t irq_status, beh_data;
+	struct ocd_info *cinfo = (struct ocd_info *)dev_data;
+	int ret;
+	char *bcu_envp[2];
+
+	dev_info(cinfo->dev, "EM_BCU: VWARN2 Event has occured\n");
+
+	/**
+	 * Notify using uevent along with env info. Here sending vwarn1 info
+	 * upon receiving vwarn2 interrupt since the vwarn1 & vwarn2 threshold
+	 * values is swapped.
+	 */
+	bcu_envp[0] = get_envp(VWARN1);
+	bcu_envp[1] = NULL;
+	kobject_uevent_env(&cinfo->dev->kobj, KOBJ_CHANGE, bcu_envp);
+
+	ret = intel_scu_ipc_ioread8(S_BCUINT, &irq_status);
+	if (ret)
+		goto ipc_fail;
+	dev_dbg(cinfo->dev, "EM_BCU: S_BCUINT: %x\n", irq_status);
+
+	/* If Vsys is below WARN2 level-No action required from driver */
+	if (!(irq_status & SVWARN2)) {
+		/* Vsys is above WARN2 level */
+		dev_info(cinfo->dev, "EM_BCU: Recovered from VWARN2 Level\n");
+
+		/* clearing BCUDISW2 signal if asserted */
+		ret = intel_scu_ipc_ioread8(BCUDISW2_BEH, &beh_data);
+		if (ret)
+			goto ipc_fail;
+		if (IS_ASSRT_ON_VW2(beh_data) && IS_STICKY(beh_data)) {
+			ret = intel_scu_ipc_update_register(S_BCUCTRL,
+					S_BCUDISW2, S_BCUDISW2_MASK);
+			if (ret)
+				goto ipc_fail;
+		}
+
+		/* clearing CAMFLDIS# signal if asserted */
+		ret = intel_scu_ipc_ioread8(CAMFLDIS_BEH, &beh_data);
+		if (ret)
+			goto ipc_fail;
+		if (IS_ASSRT_ON_VW2(beh_data) && IS_STICKY(beh_data)) {
+			ret = intel_scu_ipc_update_register(S_BCUCTRL,
+					S_CAMFLDIS, S_CAMFLDIS_MASK);
+			if (ret)
+				goto ipc_fail;
+		}
+
+		/* clearing CAMFLTORCH signal if asserted */
+		ret = intel_scu_ipc_ioread8(CAMFLTORCH_BEH, &beh_data);
+		if (ret)
+			goto ipc_fail;
+		if (IS_ASSRT_ON_VW2(beh_data) && IS_STICKY(beh_data)) {
+			ret = intel_scu_ipc_update_register(S_BCUCTRL,
+					S_CAMFLTORCH, S_CAMFLTORCH_MASK);
+			if (ret)
+				goto ipc_fail;
+		}
+	} else {
+		/**
+		 * Masking BCU VWARN2 Interrupt, to avoid multiple VWARN2
+		 * interrupt occurrence continuously.
+		 */
+		ret = intel_scu_ipc_update_register(MBCUIRQ,
+							MVWARN2,
+							MVWARN2_MASK);
+		if (ret) {
+			dev_err(cinfo->dev,
+				"EM_BCU: Error in %s updating reg 0x%x\n",
+				__func__, MBCUIRQ);
+		}
+
+		cancel_delayed_work_sync(&cinfo->vwarn2_irq_work);
+		/**
+		 * Schedule the work to re-enable the VWARN2 interrupt after
+		 * 30sec delay
+		 */
+		schedule_delayed_work(&cinfo->vwarn2_irq_work,
+					VWARN2_INTR_EN_DELAY);
+	}
+	return;
+
+ipc_fail:
+	dev_err(cinfo->dev, "EM_BCU: ipc read/write failed:func:%s()\n",
+								__func__);
+	return;
+}
+
+static void handle_VC_event(void *dev_data)
+{
+	struct ocd_info *cinfo = (struct ocd_info *)dev_data;
+	char *bcu_envp[2];
+	int ret = 0;
+
+	dev_info(cinfo->dev, "EM_BCU: VCRIT Event has occured\n");
+
+	/* Notify using uevent along with env info */
+	bcu_envp[0] = get_envp(VCRIT);
+	bcu_envp[1] = NULL;
+	kobject_uevent_env(&cinfo->dev->kobj, KOBJ_CHANGE, bcu_envp);
+
+	/**
+	 * Masking BCU VCRIT Interrupt, since hardware does critical hardware
+	 * shutdown once VCRIT interrupt occurs. So we never expect another
+	 * VCRIT interrupt.
+	 */
+	ret = intel_scu_ipc_update_register(MBCUIRQ, MVCRIT, MVCRIT_MASK);
+	if (ret)
+		dev_err(cinfo->dev, "EM_BCU: Error in %s updating reg 0x%x\n",
+			__func__, MBCUIRQ);
+	return;
+}
+
+static irqreturn_t ocd_intrpt_handler(int irq, void *dev_data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t ocd_intrpt_thread_handler(int irq, void *dev_data)
+{
+	int ret;
+	int bat_volt;
+	unsigned int irq_data;
+	struct ocd_info *cinfo = (struct ocd_info *)dev_data;
+
+	if (!cinfo)
+		return IRQ_NONE;
+
+	mutex_lock(&ocd_update_lock);
+
+	ret = bcu_get_battery_voltage(&bat_volt);
+	if (ret)
+		dev_err(cinfo->dev,
+			"EM_BCU: Error in getting battery voltage\n");
+	else
+		dev_info(cinfo->dev, "EM_BCU: Battery Volatge = %dmV\n",
+				(bat_volt/1000));
+
+	irq_data = ioread8(cinfo->bcu_intr_addr);
+
+	/* we are not handling(no action taken) GSMPULSE_IRQ and
+						TXPWRTH_IRQ event */
+	if (irq_data & VCRIT_IRQ) {
+		++intr_count_lvl3;
+		handle_VC_event(dev_data);
+	}
+	if (irq_data & VWARN2_IRQ) {
+		++intr_count_lvl2;
+		handle_VW2_event(dev_data);
+	}
+	if (irq_data & VWARN1_IRQ) {
+		++intr_count_lvl1;
+		handle_VW1_event(dev_data);
+	}
+	if (irq_data & GSMPULSE_IRQ) {
+		dev_info(cinfo->dev, "EM_BCU: GSMPULSE Event has occured\n");
+	}
+	if (irq_data & TXPWRTH_IRQ) {
+		dev_info(cinfo->dev, "EM_BCU: TXPWRTH Event has occured\n");
+	}
+
+	/* Unmask BCU Interrupt in the mask register */
+	ret = intel_scu_ipc_update_register(MIRQLVL1, 0x00, BCU_ALERT);
+	if (ret) {
+		dev_err(cinfo->dev,
+			"EM_BCU: Unmasking of BCU failed:%d\n", ret);
+		goto ipc_fail;
+	}
+
+	ret = IRQ_HANDLED;
+
+ipc_fail:
+	mutex_unlock(&ocd_update_lock);
+	return ret;
+}
+
+static SENSOR_DEVICE_ATTR_2(volt_warn1, S_IRUGO | S_IWUSR,
+				show_volt_thres, store_volt_thres, 0, 0);
+static SENSOR_DEVICE_ATTR_2(volt_warn2, S_IRUGO | S_IWUSR,
+				show_volt_thres, store_volt_thres, 1, 0);
+static SENSOR_DEVICE_ATTR_2(volt_crit, S_IRUGO | S_IWUSR,
+				show_volt_thres, store_volt_thres, 2, 0);
+
+static SENSOR_DEVICE_ATTR_2(core_current, S_IRUGO | S_IWUSR,
+				show_curr_thres, store_curr_thres, 0, 0);
+static SENSOR_DEVICE_ATTR_2(uncore_current, S_IRUGO | S_IWUSR,
+				show_curr_thres, store_curr_thres, 1, 0);
+
+static SENSOR_DEVICE_ATTR_2(enable_crit_shutdown, S_IRUGO | S_IWUSR,
+				show_crit_shutdown, store_crit_shutdown, 0, 0);
+
+static SENSOR_DEVICE_ATTR(intr_count_level1, S_IRUGO,
+				show_intr_count, NULL, 0);
+
+static SENSOR_DEVICE_ATTR(intr_count_level2, S_IRUGO,
+				show_intr_count, NULL, 1);
+
+static SENSOR_DEVICE_ATTR(intr_count_level3, S_IRUGO,
+				show_intr_count, NULL, 2);
+
+static SENSOR_DEVICE_ATTR(camflash_ctrl, S_IRUGO | S_IWUSR,
+				show_camflash_ctrl, store_camflash_ctrl, 0);
+
+static struct attribute *mrfl_ocd_attrs[] = {
+	&sensor_dev_attr_core_current.dev_attr.attr,
+	&sensor_dev_attr_uncore_current.dev_attr.attr,
+	&sensor_dev_attr_volt_warn1.dev_attr.attr,
+	&sensor_dev_attr_volt_warn2.dev_attr.attr,
+	&sensor_dev_attr_volt_crit.dev_attr.attr,
+	&sensor_dev_attr_enable_crit_shutdown.dev_attr.attr,
+	&sensor_dev_attr_intr_count_level1.dev_attr.attr,
+	&sensor_dev_attr_intr_count_level2.dev_attr.attr,
+	&sensor_dev_attr_intr_count_level3.dev_attr.attr,
+	&sensor_dev_attr_camflash_ctrl.dev_attr.attr,
+	NULL
+};
+
+static struct attribute_group mrfl_ocd_gr = {
+	.attrs = mrfl_ocd_attrs
+};
+
+static int mrfl_ocd_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct ocd_platform_data *ocd_plat_data;
+	struct ocd_bcove_config_data ocd_config_data;
+	struct ocd_info *cinfo = devm_kzalloc(&pdev->dev,
+			sizeof(struct ocd_info), GFP_KERNEL);
+
+	if (!cinfo) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		return -ENOMEM;
+	}
+	cinfo->pdev = pdev;
+	cinfo->irq = platform_get_irq(pdev, 0);
+	platform_set_drvdata(pdev, cinfo);
+
+	/* Creating a sysfs group with mrfl_ocd_gr attributes */
+	ret = sysfs_create_group(&pdev->dev.kobj, &mrfl_ocd_gr);
+	if (ret) {
+		dev_err(&pdev->dev, "sysfs create group failed\n");
+		goto exit_free;
+	}
+
+	/* Registering with hwmon class */
+	cinfo->dev = hwmon_device_register(&pdev->dev);
+	if (IS_ERR(cinfo->dev)) {
+		ret = PTR_ERR(cinfo->dev);
+		cinfo->dev = NULL;
+		dev_err(&pdev->dev, "hwmon_dev_regs failed\n");
+		goto exit_sysfs;
+	}
+
+	cinfo->bcu_intr_addr = ioremap_nocache(PMIC_SRAM_BCU_ADDR, IOMAP_LEN);
+	if (!cinfo->bcu_intr_addr) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "ioremap_nocache failed\n");
+		goto exit_hwmon;
+	}
+
+	/* Unmask 1st level BCU interrupt in the mask register */
+	ret = intel_scu_ipc_update_register(MIRQLVL1, 0x00, BCU_ALERT);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"EM_BCU: Unmasking of BCU failed:%d\n", ret);
+		goto exit_ioremap;
+	}
+
+	/* Register for Interrupt Handler */
+	ret = request_threaded_irq(cinfo->irq, ocd_intrpt_handler,
+						ocd_intrpt_thread_handler,
+						IRQF_NO_SUSPEND,
+						DRIVER_NAME, cinfo);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"EM_BCU: request_threaded_irq failed:%d\n", ret);
+		goto exit_ioremap;
+	}
+
+	/*Read BCU configuration values from smip*/
+	ocd_plat_data = pdev->dev.platform_data;
+
+	ret = ocd_plat_data->bcu_config_data(&ocd_config_data);
+	if (ret) {
+		dev_err(&pdev->dev, "EM_BCU: Read SMIP failed:%d\n", ret);
+		goto exit_freeirq;
+	}
+
+	/* Program the BCU with default values read from the smip*/
+	ret = program_bcu(&ocd_config_data);
+	if (ret) {
+		dev_err(&pdev->dev, "EM_BCU: program_bcu() failed:%d\n", ret);
+		goto exit_freeirq;
+	}
+
+	enable_volt_trip_points();
+	enable_current_trip_points();
+	cam_flash_state = CAMFLASH_STATE_NORMAL;
+
+	/* Initializing delayed work for re-enabling vwarn1 interrupt */
+	INIT_DELAYED_WORK(&cinfo->vwarn2_irq_work, vwarn2_irq_enable_work);
+
+	/* Create debufs for the basincove bcu registers */
+	bcbcu_create_debugfs(cinfo);
+
+	return 0;
+
+exit_freeirq:
+	free_irq(cinfo->irq, cinfo);
+exit_ioremap:
+	iounmap(cinfo->bcu_intr_addr);
+exit_hwmon:
+	hwmon_device_unregister(cinfo->dev);
+exit_sysfs:
+	sysfs_remove_group(&pdev->dev.kobj, &mrfl_ocd_gr);
+exit_free:
+	kfree(cinfo);
+	return ret;
+}
+
+static int mrfl_ocd_resume(struct device *dev)
+{
+	dev_info(dev, "Resume called.\n");
+	return 0;
+}
+
+static int mrfl_ocd_suspend(struct device *dev)
+{
+	dev_info(dev, "Suspend called.\n");
+	return 0;
+}
+
+static int mrfl_ocd_remove(struct platform_device *pdev)
+{
+	struct ocd_info *cinfo = platform_get_drvdata(pdev);
+
+	if (cinfo) {
+		flush_scheduled_work();
+		free_irq(cinfo->irq, cinfo);
+		iounmap(cinfo->bcu_intr_addr);
+		bcbcu_remove_debugfs(cinfo);
+		hwmon_device_unregister(cinfo->dev);
+		sysfs_remove_group(&pdev->dev.kobj, &mrfl_ocd_gr);
+		kfree(cinfo);
+	}
+	return 0;
+}
+
+/*********************************************************************
+ *		Driver initialisation and finalization
+ *********************************************************************/
+
+static const struct dev_pm_ops mrfl_ocd_pm_ops = {
+	.suspend = mrfl_ocd_suspend,
+	.resume = mrfl_ocd_resume,
+};
+
+static const struct platform_device_id mrfl_ocd_table[] = {
+	{DRIVER_NAME, 1 },
+};
+
+static struct platform_driver mrfl_over_curr_detect_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &mrfl_ocd_pm_ops,
+		},
+	.probe = mrfl_ocd_probe,
+	.remove = mrfl_ocd_remove,
+	.id_table = mrfl_ocd_table,
+};
+
+static int mrfl_ocd_module_init(void)
+{
+	return platform_driver_register(&mrfl_over_curr_detect_driver);
+}
+
+static void mrfl_ocd_module_exit(void)
+{
+	platform_driver_unregister(&mrfl_over_curr_detect_driver);
+}
+
+/* RPMSG related functionality */
+static int mrfl_ocd_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed mrfl_ocd rpmsg device\n");
+
+	ret = mrfl_ocd_module_init();
+out:
+	return ret;
+}
+
+static void mrfl_ocd_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	mrfl_ocd_module_exit();
+	dev_info(&rpdev->dev, "Removed mrfl_ocd rpmsg device\n");
+}
+
+static void mrfl_ocd_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+			int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+				data, len, true);
+}
+
+static struct rpmsg_device_id mrfl_ocd_id_table[] = {
+	{ .name = "rpmsg_mrfl_ocd" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, mrfl_ocd_id_table);
+
+static struct rpmsg_driver mrfl_ocd_rpmsg = {
+	.drv.name	= DRIVER_NAME,
+	.drv.owner	= THIS_MODULE,
+	.probe		= mrfl_ocd_rpmsg_probe,
+	.callback	= mrfl_ocd_rpmsg_cb,
+	.remove		= mrfl_ocd_rpmsg_remove,
+	.id_table	= mrfl_ocd_id_table,
+};
+
+static int __init mrfl_ocd_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&mrfl_ocd_rpmsg);
+}
+
+static void __init mrfl_ocd_rpmsg_exit(void)
+{
+	unregister_rpmsg_driver(&mrfl_ocd_rpmsg);
+}
+
+module_init(mrfl_ocd_rpmsg_init);
+module_exit(mrfl_ocd_rpmsg_exit);
+
+MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield Over Current Detection Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/hwmon/psh.c b/drivers/external_drivers/drivers/hwmon/psh.c
new file mode 100644
index 0000000..5de8002
--- /dev/null
+++ b/drivers/external_drivers/drivers/hwmon/psh.c
@@ -0,0 +1,449 @@
+/*
+ *  psh.c - Merrifield PSH IA side driver
+ *
+ *  (C) Copyright 2012 Intel Corporation
+ *  Author: Alek Du <alek.du@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA	02110-1301, USA
+ */
+
+/*
+ * PSH IA side driver for Merrifield Platform
+ */
+
+#define VPROG2_SENSOR
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/pci.h>
+#include <linux/circ_buf.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <asm/intel_psh_ipc.h>
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include "psh_ia_common.h"
+
+#ifdef VPROG2_SENSOR
+#include <asm/intel_scu_ipcutil.h>
+#endif
+
+#define APP_IMR_SIZE (1024 * 126)
+
+enum {
+	imr_allocate = 0,
+	imr_pci_shim = 1,
+};
+
+struct psh_plt_priv {
+	int imr_src;
+	struct device *hwmon_dev;
+	void *imr2;		/* IMR2 */
+	void *ddr;		/* IMR3 */
+	uintptr_t imr2_phy;
+	uintptr_t ddr_phy;
+	struct loop_buffer lbuf;
+};
+
+int process_send_cmd(struct psh_ia_priv *psh_ia_data,
+			int ch, struct ia_cmd *cmd, int len)
+{
+	int i, j;
+	int ret = 0;
+	u8 *pcmd = (u8 *)cmd;
+	struct psh_msg in;
+
+	if (ch == PSH2IA_CHANNEL0 && cmd->cmd_id == CMD_RESET) {
+		intel_psh_ipc_disable_irq();
+		ia_lbuf_read_reset(psh_ia_data->lbuf);
+	}
+
+	/* map from virtual channel to real channel */
+	ch = ch - PSH2IA_CHANNEL0 + PSH_SEND_CH0;
+
+	for (i = 0; i < len; i += 7) {
+		u8 left = len - i;
+		u8 *ptr = (u8 *)&in;
+
+		memset(&in, 0, sizeof(in));
+
+		if (left > 7) {
+			left = 7;
+			in.msg |= PSH_IPC_CONTINUE;
+		}
+
+		for (j = 0; j < left; j++) {
+			if (j == 3)
+				ptr++;
+			*ptr = *pcmd;
+			ptr++;
+			pcmd++;
+		}
+
+		ret = intel_ia2psh_command(&in, NULL, ch, 1000000);
+		if (ret) {
+			psh_err("sendcmd %d by IPC %d failed!, ret=%d\n",
+					cmd->cmd_id, ch, ret);
+			ret = -EIO;
+			goto f_out;
+		}
+	}
+
+f_out:
+	if (ch == PSH2IA_CHANNEL0 && cmd->cmd_id == CMD_RESET)
+		intel_psh_ipc_enable_irq();
+	return ret;
+}
+
+int do_setup_ddr(struct device *dev)
+{
+	struct psh_ia_priv *ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	struct psh_plt_priv *plt_priv =
+			(struct psh_plt_priv *)ia_data->platform_priv;
+	uintptr_t ddr_phy = plt_priv->ddr_phy;
+	uintptr_t imr2_phy = plt_priv->imr2_phy;
+	const struct firmware *fw_entry;
+	struct ia_cmd cmd_user = {
+		.cmd_id = CMD_SETUP_DDR,
+		.sensor_id = 0,
+		};
+	static int fw_load_done;
+	int load_default = 0;
+	char fname[40];
+
+	if (fw_load_done)
+		return 0;
+
+#ifdef VPROG2_SENSOR
+	intel_scu_ipc_msic_vprog2(1);
+	msleep(500);
+#endif
+again:
+	if (!request_firmware(&fw_entry, fname, dev)) {
+		if (!fw_entry)
+			return -ENOMEM;
+
+		psh_debug("psh fw size %d virt:0x%p\n",
+				(int)fw_entry->size, fw_entry->data);
+		if (fw_entry->size > APP_IMR_SIZE) {
+			psh_err("psh fw size too big\n");
+		} else {
+			struct ia_cmd cmd = {
+				.cmd_id = CMD_RESET,
+				.sensor_id = 0,
+				};
+
+			memcpy(plt_priv->imr2, fw_entry->data,
+				fw_entry->size);
+			*(uintptr_t *)(&cmd.param) = imr2_phy;
+			cmd.tran_id = 0x1;
+			if (process_send_cmd(ia_data, PSH2IA_CHANNEL3, &cmd, 7))
+				return -1;
+			ia_data->load_in_progress = 1;
+			wait_for_completion_timeout(&ia_data->cmd_load_comp,
+					3 * HZ);
+			fw_load_done = 1;
+		}
+		release_firmware(fw_entry);
+	} else {
+		psh_err("cannot find psh firmware(%s)\n", fname);
+		if (!load_default) {
+			psh_err("try to load default psh.bin\n");
+			snprintf(fname, 20, "psh.bin");
+			load_default = 1;
+			goto again;
+		}
+	}
+	ia_lbuf_read_reset(ia_data->lbuf);
+	*(unsigned long *)(&cmd_user.param) = ddr_phy;
+	return ia_send_cmd(ia_data, &cmd_user, 7);
+}
+
+static void psh2ia_channel_handle(u32 msg, u32 param, void *data)
+{
+	struct pci_dev *pdev = (struct pci_dev *)data;
+	struct psh_ia_priv *ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(&pdev->dev);
+	struct psh_plt_priv *plt_priv =
+			(struct psh_plt_priv *)ia_data->platform_priv;
+	u8 *dbuf = NULL;
+	u16 size = 0;
+
+	if (unlikely(ia_data->load_in_progress)) {
+		ia_data->load_in_progress = 0;
+		complete(&ia_data->cmd_load_comp);
+		return;
+	}
+
+	while (!ia_lbuf_read_next(ia_data,
+			&plt_priv->lbuf, &dbuf, &size)) {
+		ia_handle_frame(ia_data, dbuf, size);
+	}
+	sysfs_notify(&pdev->dev.kobj, NULL, "data_size");
+}
+
+static int psh_imr_init(struct pci_dev *pdev,
+			int imr_src, uintptr_t *phy_addr, void **virt_addr,
+			unsigned size, int bar)
+{
+	struct page *pg;
+	void __iomem *mem;
+	int ret = 0;
+	unsigned long start = 0, len;
+
+	if (imr_src == imr_allocate) {
+		/* dynamic alloct memory region */
+		pg = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO,
+						get_order(size));
+		if (!pg) {
+			dev_err(&pdev->dev, "can not allocate app page imr buffer\n");
+			ret = -ENOMEM;
+			goto err;
+		}
+		*phy_addr = page_to_phys(pg);
+		*virt_addr = page_address(pg);
+	} else if (imr_src == imr_pci_shim) {
+		/* dedicate isolated memory region */
+		start = pci_resource_start(pdev, bar);
+		len = pci_resource_len(pdev, bar);
+		if (!start || !len) {
+			dev_err(&pdev->dev, "bar %d address not set\n", bar);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		ret = pci_request_region(pdev, bar, "psh");
+		if (ret) {
+			dev_err(&pdev->dev, "failed to request psh region "
+				"0x%lx-0x%lx\n", start,
+				(unsigned long)pci_resource_end(pdev, bar));
+			goto err;
+		}
+
+		mem = ioremap_nocache(start, len);
+		if (!mem) {
+			dev_err(&pdev->dev, "can not ioremap app imr address\n");
+			ret = -EINVAL;
+			goto err_ioremap;
+		}
+
+		*phy_addr = start;
+		*virt_addr = (void *)mem;
+	} else {
+		dev_err(&pdev->dev, "Invalid chip imr source\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return 0;
+
+err_ioremap:
+	pci_release_region(pdev, bar);
+err:
+	return ret;
+}
+
+static void psh_imr_free(int imr_src, void *virt_addr, unsigned size)
+{
+	if (imr_src == imr_allocate)
+		__free_pages(virt_to_page(virt_addr), get_order(size));
+	else if (imr_src == imr_pci_shim)
+		iounmap((void __iomem *)virt_addr);
+}
+
+static int psh_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int ret = -1;
+	struct psh_ia_priv *ia_data;
+	struct psh_plt_priv *plt_priv;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "fail to enable psh pci device\n");
+		goto pci_err;
+	}
+
+	plt_priv = kzalloc(sizeof(*plt_priv), GFP_KERNEL);
+	if (!plt_priv) {
+		dev_err(&pdev->dev, "can not allocate plt_priv\n");
+		goto plt_err;
+	}
+
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		if (intel_mid_soc_stepping() == 0)
+			plt_priv->imr_src = imr_allocate;
+		else
+			plt_priv->imr_src = imr_pci_shim;
+		break;
+	case INTEL_MID_CPU_CHIP_ANNIEDALE:
+		plt_priv->imr_src = imr_pci_shim;
+		break;
+	default:
+		dev_err(&pdev->dev, "error memory region\n");
+		goto psh_imr2_err;
+		break;
+	}
+
+	/* init IMR2 */
+	ret = psh_imr_init(pdev, plt_priv->imr_src,
+				&plt_priv->imr2_phy, &plt_priv->imr2,
+				APP_IMR_SIZE, 0);
+	if (ret)
+		goto psh_imr2_err;
+
+
+	/* init IMR3 */
+	ret = psh_imr_init(pdev, plt_priv->imr_src,
+				&plt_priv->ddr_phy, &plt_priv->ddr,
+				BUF_IA_DDR_SIZE, 1);
+	if (ret)
+		goto psh_ddr_err;
+
+	ret = psh_ia_common_init(&pdev->dev, &ia_data);
+	if (ret) {
+		dev_err(&pdev->dev, "fail to init psh_ia_common\n");
+		goto psh_ia_err;
+	}
+
+	ia_lbuf_read_init(&plt_priv->lbuf,
+				plt_priv->ddr,
+				BUF_IA_DDR_SIZE, NULL);
+	ia_data->lbuf = &plt_priv->lbuf;
+
+	plt_priv->hwmon_dev = hwmon_device_register(&pdev->dev);
+	if (!plt_priv->hwmon_dev) {
+		dev_err(&pdev->dev, "fail to register hwmon device\n");
+		goto hwmon_err;
+	}
+
+	ia_data->platform_priv = plt_priv;
+
+	ret = intel_psh_ipc_bind(PSH_RECV_CH0, psh2ia_channel_handle, pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "fail to bind channel\n");
+		goto irq_err;
+	}
+
+	/* just put this dev into suspend status always, since this is fake */
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	return 0;
+
+irq_err:
+	hwmon_device_unregister(plt_priv->hwmon_dev);
+hwmon_err:
+	psh_ia_common_deinit(&pdev->dev);
+psh_ia_err:
+	psh_imr_free(plt_priv->imr_src, plt_priv->ddr, BUF_IA_DDR_SIZE);
+psh_ddr_err:
+	psh_imr_free(plt_priv->imr_src, plt_priv->imr2, APP_IMR_SIZE);
+psh_imr2_err:
+	kfree(plt_priv);
+plt_err:
+	pci_dev_put(pdev);
+pci_err:
+	return ret;
+}
+
+static void psh_remove(struct pci_dev *pdev)
+{
+	struct psh_ia_priv *ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(&pdev->dev);
+	struct psh_plt_priv *plt_priv =
+			(struct psh_plt_priv *)ia_data->platform_priv;
+
+	psh_imr_free(plt_priv->imr_src, plt_priv->ddr, BUF_IA_DDR_SIZE);
+	psh_imr_free(plt_priv->imr_src, plt_priv->imr2, APP_IMR_SIZE);
+
+	intel_psh_ipc_unbind(PSH_RECV_CH0);
+
+	hwmon_device_unregister(plt_priv->hwmon_dev);
+
+	kfree(plt_priv);
+
+	psh_ia_common_deinit(&pdev->dev);
+}
+
+static int psh_suspend(struct device *dev)
+{
+	return psh_ia_comm_suspend(dev);
+}
+
+static int psh_resume(struct device *dev)
+{
+	return psh_ia_comm_resume(dev);
+}
+
+static int psh_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "runtime suspend called\n");
+	return 0;
+}
+
+static int psh_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "runtime resume called\n");
+	return 0;
+}
+
+static const struct dev_pm_ops psh_drv_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(psh_suspend,
+			psh_resume)
+	SET_RUNTIME_PM_OPS(psh_runtime_suspend,
+			psh_runtime_resume, NULL)
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x11a4)},
+	{ 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver psh_driver = {
+	.name = "psh",
+	.driver = {
+		.pm = &psh_drv_pm_ops,
+	},
+	.id_table = pci_ids,
+	.probe	= psh_probe,
+	.remove	= psh_remove,
+};
+
+static int __init psh_init(void)
+{
+	return pci_register_driver(&psh_driver);
+}
+
+static void __exit psh_exit(void)
+{
+	pci_unregister_driver(&psh_driver);
+}
+
+module_init(psh_init);
+module_exit(psh_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/hwmon/psh_ia_common.c b/drivers/external_drivers/drivers/hwmon/psh_ia_common.c
new file mode 100644
index 0000000..65160d5
--- /dev/null
+++ b/drivers/external_drivers/drivers/hwmon/psh_ia_common.c
@@ -0,0 +1,1027 @@
+/*
+ *  psh_ia_common.c  - Intel PSH IA side driver common lib
+ *
+ * Copyright 2012-2013 Intel Corporation All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA	02110-1301, USA
+ */
+
+#include <linux/ctype.h>
+#include "psh_ia_common.h"
+#include <asm/intel-mid.h>
+#include <linux/hrtimer.h>
+
+#define TOLOWER(x) ((x) | 0x20)
+/* translate string to unsigned long value */
+unsigned long _strtoul(const char *str, char **endp, unsigned int base)
+{
+	unsigned long value = 0;
+
+	if (base == 0) {
+		if (str[0] == '0') {
+			if (TOLOWER(str[1]) == 'x' && isxdigit(str[2]))
+				base = 16;
+			else
+				base = 8;
+		} else
+			base = 10;
+	}
+
+	if (base == 16 && str[0] == '0' && TOLOWER(str[1]) == 'x')
+		str += 2;
+
+	while (isxdigit(*str)) {
+		unsigned int temp_value;
+
+		if ('0' <= *str && *str <= '9')
+			temp_value = *str - '0';
+		else
+			temp_value = TOLOWER(*str) - 'a' + 10;
+
+		if (temp_value >= base)
+			break;
+
+		value = value * base + temp_value;
+		str++;
+	}
+
+	if (endp)
+		*endp = (char *)str;
+
+	return value;
+}
+
+long trans_strtol(const char *str, char **endp, unsigned int base)
+{
+	if (*str == '-')
+		return -_strtoul(str + 1, endp, base);
+
+	return _strtoul(str, endp, base);
+}
+
+void ia_lbuf_read_init(struct loop_buffer *lbuf,
+		u8 *buf, u16 size, update_finished_f uf)
+{
+	lbuf->addr = buf;
+	lbuf->length = size;
+	lbuf->off_head = lbuf->off_tail = 0;
+	lbuf->update_finished = uf;
+	lbuf->in_reading = 0;
+}
+
+void ia_lbuf_read_reset(struct loop_buffer *lbuf)
+{
+	if (lbuf) {
+		lbuf->off_head = lbuf->off_tail = 0;
+		lbuf->in_reading = 0;
+	}
+}
+
+int ia_lbuf_read_next(struct psh_ia_priv *psh_ia_data,
+			struct loop_buffer *lbuf,
+			u8 **buf, u16 *size)
+{
+	struct frame_head *fhead =
+			(struct frame_head *)(lbuf->addr + lbuf->off_head);
+
+	*buf = NULL;
+	*size = 0;
+
+	if (lbuf->in_reading) {
+		lbuf->in_reading = 0;
+
+		/* go over previous frame has been read */
+		lbuf->off_head += frame_size(fhead->length);
+		lbuf->off_tail = lbuf->off_head;
+		fhead = (struct frame_head *)(lbuf->addr + lbuf->off_head);
+	}
+
+	if (fhead->sign == LBUF_DISCARD_SIGN) {
+		fhead = (struct frame_head *)lbuf->addr;
+		lbuf->off_head = lbuf->off_tail = 0;
+	}
+
+	if (fhead->sign == LBUF_CELL_SIGN) {
+		if (fhead->length > LBUF_MAX_CELL_SIZE)
+			goto f_out;
+
+		*buf = lbuf->addr + lbuf->off_head + sizeof(*fhead);
+		*size = fhead->length;
+		lbuf->in_reading = 1;
+	}
+
+f_out:
+	if (lbuf->update_finished &&
+			(!lbuf->in_reading && lbuf->off_head)) {
+		/* no more data frame, inform FW to update its HEAD */
+		lbuf->update_finished(psh_ia_data, lbuf->off_head);
+	}
+
+	return !lbuf->in_reading;
+}
+
+void ia_circ_reset_off(struct circ_buf *circ)
+{
+	circ->head = 0;
+	circ->tail = 0;
+}
+
+void ia_circ_put_data(struct circ_buf *circ, const char *buf, u32 size)
+{
+	int tail_size, cnt;
+
+	if (CIRC_SPACE(circ->head, circ->tail, CIRC_SIZE) < size)
+		return;
+	tail_size = CIRC_SPACE_TO_END(circ->head, circ->tail, CIRC_SIZE);
+	cnt = size;
+	if (cnt > tail_size)
+		cnt = tail_size;
+	memcpy(circ->buf + circ->head, buf, cnt);
+	cnt = size - cnt;
+	if (cnt)
+		memcpy(circ->buf, buf + tail_size, cnt);
+	circ->head += size;
+	circ->head &= (CIRC_SIZE - 1);
+}
+
+void ia_circ_dbg_put_data(struct psh_ia_priv *psh_ia_data,
+					 const char *buf, u32 size)
+{
+	int temp_count;
+	struct circ_buf *circ;
+
+	if (size > CIRC_SIZE -1)
+		return;
+
+	temp_count = 0;
+	circ = &psh_ia_data->circ_dbg;
+
+	if (CIRC_SPACE(circ->head, circ->tail, CIRC_SIZE) < size) {
+
+		mutex_lock(&psh_ia_data->circ_dbg_mutex);
+
+		circ->tail = circ->head + size + 1;
+		circ->tail &= (CIRC_SIZE - 1);
+		while (circ->buf[circ->tail++] != '\n') {
+			circ->tail &= (CIRC_SIZE - 1);
+			temp_count++;
+			if (temp_count > (CIRC_SIZE -1))
+				break;
+		}
+
+		mutex_unlock(&psh_ia_data->circ_dbg_mutex);
+	}
+
+	ia_circ_put_data(circ, buf, size);
+}
+
+int ia_circ_get_data(struct circ_buf *circ, char *buf, u32 size)
+{
+	int avail, avail_tail, cnt;
+
+	avail = CIRC_CNT(circ->head, circ->tail, CIRC_SIZE);
+	if (!avail)
+		return 0;
+	avail_tail = CIRC_CNT_TO_END(circ->head, circ->tail, CIRC_SIZE);
+	if (avail_tail) {
+		cnt = size;
+		if (cnt > avail_tail)
+			cnt = avail_tail;
+		memcpy(buf, circ->buf + circ->tail, cnt);
+		size -= cnt;
+		avail -= cnt;
+		circ->tail += cnt;
+		if (!avail || !size)
+			return cnt;
+	}
+	cnt = size;
+	if (cnt > avail)
+		cnt = avail;
+	memcpy(buf + avail_tail, circ->buf, cnt);
+	circ->tail += cnt;
+	circ->tail &= (CIRC_SIZE - 1);
+	return avail_tail + cnt;
+}
+
+int ia_circ_dbg_get_data(struct psh_ia_priv *psh_ia_data, char *buf, u32 size)
+{
+	int cnt;
+	struct circ_buf *circ = &psh_ia_data->circ_dbg;
+
+	mutex_lock(&psh_ia_data->circ_dbg_mutex);
+	cnt = ia_circ_get_data(circ, buf, size);
+	mutex_unlock(&psh_ia_data->circ_dbg_mutex);
+
+	return cnt;
+}
+
+static int ia_sync_timestamp(struct psh_ia_priv *psh_ia_data, u8 check_interval)
+{
+	static u64 tick_old = 0;
+	struct ia_cmd cmd_timestamp = { 0 };
+	struct cmd_ia_notify_param *param = (struct cmd_ia_notify_param *)cmd_timestamp.param;
+	u8 *linux_base_ns = param->extra;
+	timestamp_t base_ns = 0;
+	int ret;
+
+	if (check_interval) {
+		if (!tick_old) {
+			tick_old = jiffies;
+			return 0;
+		} else {
+			if (jiffies - tick_old < (120 * HZ))
+				return 0;
+		}
+	}
+
+	cmd_timestamp.tran_id = 0;
+	cmd_timestamp.cmd_id = CMD_IA_NOTIFY;
+	cmd_timestamp.sensor_id = 0;
+	param->id = IA_NOTIFY_TIMESTAMP_SYNC;
+	base_ns = ktime_to_ns(ktime_get_boottime());
+	tick_old = jiffies;
+	*(timestamp_t *)linux_base_ns = base_ns;
+	ret = process_send_cmd(psh_ia_data, PSH2IA_CHANNEL0,
+			&cmd_timestamp, sizeof(struct ia_cmd) - CMD_PARAM_MAX_SIZE
+		+ sizeof(struct cmd_ia_notify_param) + sizeof(base_ns));
+	return ret;
+}
+
+int ia_send_cmd(struct psh_ia_priv *psh_ia_data,
+			struct ia_cmd *cmd, int len)
+{
+	int ret = 0;
+	static struct resp_cmd_ack cmd_ack;
+
+	ia_sync_timestamp(psh_ia_data, 0);
+
+	mutex_lock(&psh_ia_data->cmd_mutex);
+	if (cmd->cmd_id == CMD_RESET) {
+		cmd->tran_id = 0;
+		ia_circ_reset_off(&psh_ia_data->circ);
+	}
+
+	cmd_ack.cmd_id = cmd->cmd_id;
+	psh_ia_data->cmd_ack = &cmd_ack;
+
+	psh_ia_data->cmd_in_progress = cmd->cmd_id;
+	ret = process_send_cmd(psh_ia_data, PSH2IA_CHANNEL0,
+			cmd, len);
+	psh_ia_data->cmd_in_progress = CMD_INVALID;
+	if (ret) {
+		psh_err("send cmd (id = %d) failed, ret=%d\n",
+				cmd->cmd_id, ret);
+		goto f_out;
+	}
+	if (cmd->cmd_id == CMD_FW_UPDATE)
+		goto f_out;
+
+ack_wait:
+	if (!wait_for_completion_timeout(&psh_ia_data->cmd_comp,
+				5 * HZ)) {
+		psh_err("no CMD_ACK for %d back, timeout!\n",
+				cmd_ack.cmd_id);
+		ret = -ETIMEDOUT;
+	} else if (cmd_ack.ret) {
+		if (cmd_ack.ret == E_CMD_ASYNC)
+			goto ack_wait;
+		psh_err("CMD %d return error %d!\n", cmd_ack.cmd_id,
+				cmd_ack.ret);
+		ret = -EREMOTEIO;
+	}
+
+f_out:
+	psh_ia_data->cmd_ack = NULL;
+	mutex_unlock(&psh_ia_data->cmd_mutex);
+	return ret;
+}
+
+ssize_t ia_start_control(struct device *dev,
+			struct device_attribute *attr,
+			const char *str, size_t count)
+{
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	struct ia_cmd cmd_user = { 0 };
+	u8 *ptr = (u8 *)&cmd_user;
+	char *s;
+	long val;
+	int token = 0;
+	int ret;
+
+	while (*str && (token < sizeof(cmd_user))) {
+		val = trans_strtol(str, &s, 0);
+		if (str == s) {
+			str++;
+			continue;
+		}
+		str = s;
+		*ptr++ = (u8)val;
+		token++;
+	}
+
+	if (cmd_user.cmd_id == CMD_SETUP_DDR) {
+		ret = do_setup_ddr(dev);
+		if (ret) {
+			psh_err("do_setup_ddr failed\n");
+			return ret;
+		} else {
+			return count;
+		}
+	}
+
+	ret = ia_send_cmd(psh_ia_data, &cmd_user, token);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+
+ssize_t ia_read_data_size(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	int avail = CIRC_CNT(psh_ia_data->circ.head,
+				psh_ia_data->circ.tail, CIRC_SIZE);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", avail);
+}
+
+ssize_t ia_read_data(struct file *file, struct kobject *kobj,
+			struct bin_attribute *attr, char *buf,
+			loff_t off, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	return ia_circ_get_data(&psh_ia_data->circ, buf, count);
+}
+
+ssize_t ia_read_debug_data(struct file *file, struct kobject *kobj,
+			struct bin_attribute *attr, char *buf,
+			loff_t off, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	return ia_circ_dbg_get_data(psh_ia_data, buf, count);
+}
+
+ssize_t ia_set_dbg_mask(struct device *dev,
+			struct device_attribute *attr,
+			const char *str, size_t count)
+{
+	struct ia_cmd cmd;
+	struct cmd_debug_param *param = (struct cmd_debug_param *)cmd.param;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	int token = 0;
+	char *s;
+	long val;
+
+	while (*str) {
+		val = trans_strtol(str, &s, 0);
+		if (str == s) {
+			str++;
+			continue;
+		}
+		switch (token) {
+		case 0:
+			param->mask_out = val;
+			break;
+		case 1:
+			param->mask_level = val;
+			break;
+		default:
+			break;
+		}
+		str = s;
+		if (++token == 2)
+			break;
+	}
+
+	if (token == 2) {
+		int ret;
+		cmd.cmd_id = CMD_DEBUG;
+		cmd.sensor_id = 0;
+		param->sub_cmd = SCMD_DEBUG_SET_MASK;
+		ret = ia_send_cmd(psh_ia_data, &cmd, 10);
+		if (ret)
+			return ret;
+		else
+			return count;
+	} else {
+		psh_err("wrong input for \"<mask_out> <mask_level>\"\n");
+		return -EPERM;
+	}
+}
+
+ssize_t ia_get_dbg_mask(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct ia_cmd cmd;
+	struct cmd_debug_param *param = (struct cmd_debug_param *)cmd.param;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	int ret;
+
+	cmd.cmd_id = CMD_DEBUG;
+	cmd.sensor_id = 0;
+	param->sub_cmd = SCMD_DEBUG_GET_MASK;
+	ret = ia_send_cmd(psh_ia_data, &cmd, 8);
+	if (ret)
+		return snprintf(buf, PAGE_SIZE, "%s failed!\n", __func__);
+
+	return snprintf(buf, PAGE_SIZE, "mask_out:0x%x mask_level:0x%x\n",
+			psh_ia_data->dbg_mask.mask_out,
+			psh_ia_data->dbg_mask.mask_level);
+}
+
+static inline int is_port_sensor(enum sensor_type type)
+{
+	return ((type > PORT_SENSOR_BASE) && (type < PORT_SENSOR_MAX_NUM));
+}
+
+static void ia_handle_snr_info(struct psh_ia_priv *psh_ia_data,
+				const struct snr_info *sinfo)
+{
+	char buf[STR_BUFF_SIZE];
+	ssize_t str_size;
+	int i;
+	static int snr_info_start;
+	static int sensor_map_setup;
+
+	if (!snr_info_start) {
+		snr_info_start++;
+		str_size = snprintf(buf, STR_BUFF_SIZE,
+				"******** Start Sensor Status ********\n");
+		ia_circ_dbg_put_data(psh_ia_data, buf, str_size);
+	}
+
+	if (!sinfo) {
+		if (snr_info_start) {
+			snr_info_start = 0;
+			sensor_map_setup = 1;
+			str_size = snprintf(buf, STR_BUFF_SIZE,
+					"******** End Sensor Status ********\n");
+			ia_circ_dbg_put_data(psh_ia_data, buf, str_size);
+		}
+		return;
+	}
+
+	if (!sensor_map_setup && snr_info_start) {
+		int len = strlen(sinfo->name) + 1;
+		struct sensor_db *sensor_obj =
+				kmalloc(sizeof(struct sensor_db), GFP_KERNEL);
+
+		if (sensor_obj == NULL) {
+			psh_err("ia_handle_snr_info failed kmalloc sensor_obj\n");
+			BUG();
+			return;
+		}
+
+		sensor_obj->sid = sinfo->id;
+		memcpy(sensor_obj->sensor_name, sinfo->name,
+			len < SNR_NAME_MAX_LEN ? len : SNR_NAME_MAX_LEN);
+		sensor_obj->sensor_name[SNR_NAME_MAX_LEN - 1] = '\0';
+		list_add_tail(&sensor_obj->list, &psh_ia_data->sensor_list);
+
+	}
+
+	str_size = snprintf(buf, STR_BUFF_SIZE,
+			"***** Sensor %5s(%d) Status *****\n",
+			sinfo->name, sinfo->id);
+	ia_circ_dbg_put_data(psh_ia_data, buf, str_size);
+
+	str_size = snprintf(buf, STR_BUFF_SIZE,
+			"  freq=%d, freq_max=%d\n"
+			"  status=0x%x,  bit_cfg=0x%x\n"
+			"  data_cnt=%d,  priv=0x%x\n"
+			"  attri=0x%x, health=%d\n",
+			sinfo->freq, sinfo->freq_max,
+			sinfo->status, sinfo->bit_cfg,
+			sinfo->data_cnt, sinfo->priv,
+			sinfo->attri, sinfo->health);
+	ia_circ_dbg_put_data(psh_ia_data, buf, str_size);
+
+	for (i = 0; i < sinfo->link_num; i++) {
+		const struct link_info *linfo = &sinfo->linfo[i];
+		str_size = snprintf(buf, STR_BUFF_SIZE,
+			"    %s%s=%3d, rpt_freq=%d\n",
+			(linfo->ltype == LINK_AS_REPORTER) ?
+						"REPORTER" : "CLIENT",
+			(linfo->ltype == LINK_AS_MONITOR) ?
+						"(M)" : "",
+			linfo->sid,
+			linfo->rpt_freq);
+
+		ia_circ_dbg_put_data(psh_ia_data, buf, str_size);
+	}
+
+	str_size = snprintf(buf, STR_BUFF_SIZE,
+			"*****************************\n");
+	ia_circ_dbg_put_data(psh_ia_data, buf, str_size);
+}
+
+ssize_t ia_set_status_mask(struct device *dev,
+			struct device_attribute *attr,
+			const char *str, size_t count)
+{
+	char *s;
+	long val = 0;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	while (*str) {
+		val = trans_strtol(str, &s, 0);
+		if (str == s) {
+			str++;
+			continue;
+		} else
+			break;
+	}
+	psh_ia_data->status_bitmask = val;
+
+	pr_debug("set status_bitmask as 0x%x\n", psh_ia_data->status_bitmask);
+	return count;
+}
+
+ssize_t ia_get_status_mask(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "status_mask=0x%x\n",
+						psh_ia_data->status_bitmask);
+}
+
+ssize_t ia_trig_get_status(struct device *dev,
+			struct device_attribute *attr,
+			const char *str, size_t count)
+{
+	struct ia_cmd cmd;
+	struct get_status_param *param = (struct get_status_param *)cmd.param;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	int ret;
+
+	if (str[0] == 'm')
+		param->snr_bitmask = psh_ia_data->status_bitmask;
+	else if (str[0] == 'a')
+		param->snr_bitmask = (u32)-1;
+	else if (str[0] == 'r')
+		param->snr_bitmask = ((u32)-1) & ~SNR_RUNONLY_BITMASK;
+
+	cmd.cmd_id = CMD_GET_STATUS;
+	cmd.sensor_id = 0;
+	ret = ia_send_cmd(psh_ia_data, &cmd, 7);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+ssize_t ia_get_counter(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct ia_cmd cmd;
+	struct cmd_counter_param *param = (struct cmd_counter_param *)cmd.param;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	cmd.cmd_id = CMD_COUNTER;
+	cmd.sensor_id = 0;
+	param->sub_cmd = SCMD_GET_COUNTER;
+
+	ret = ia_send_cmd(psh_ia_data, &cmd, 5);
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "   Module        Counter(hex)\n"
+					"    GPIO          %x\n"
+					"    I2C           %x\n"
+					"    DMA           %x\n"
+					"    PRINT         %x\n",
+			psh_ia_data->counter.gpio_counter,
+			psh_ia_data->counter.i2c_counter,
+			psh_ia_data->counter.dma_counter,
+			psh_ia_data->counter.print_counter);
+}
+ssize_t ia_clear_counter(struct device *dev,
+			struct device_attribute *attr,
+			const char *str, size_t count)
+{
+	int ret;
+	struct ia_cmd cmd;
+	struct cmd_counter_param *param = (struct cmd_counter_param *)cmd.param;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	cmd.cmd_id = CMD_COUNTER;
+	cmd.sensor_id = 0;
+	param->sub_cmd = SCMD_CLEAR_COUNTER;
+
+	ret = ia_send_cmd(psh_ia_data, &cmd, 5);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+
+ssize_t ia_get_version(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct ia_cmd cmd;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	cmd.cmd_id = CMD_GET_VERSION;
+	cmd.sensor_id = 0;
+
+	ret = ia_send_cmd(psh_ia_data, &cmd, 3);
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", psh_ia_data->version_str);
+}
+static SENSOR_DEVICE_ATTR(status_mask, S_IRUSR | S_IWUSR,
+				ia_get_status_mask, ia_set_status_mask, 0);
+static SENSOR_DEVICE_ATTR(status_trig, S_IWUSR, NULL, ia_trig_get_status, 1);
+static SENSOR_DEVICE_ATTR(debug, S_IRUSR | S_IWUSR ,
+				ia_get_dbg_mask, ia_set_dbg_mask, 0);
+static SENSOR_DEVICE_ATTR(control, S_IWUSR, NULL, ia_start_control, 1);
+static SENSOR_DEVICE_ATTR(data_size, S_IRUSR, ia_read_data_size, NULL, 2);
+static SENSOR_DEVICE_ATTR(counter, S_IWUSR | S_IRUSR, ia_get_counter,
+				ia_clear_counter, 0);
+static SENSOR_DEVICE_ATTR(fw_version, S_IRUSR, ia_get_version, NULL, 0);
+
+static struct bin_attribute bin_attr = {
+	.attr = { .name = "data", .mode = S_IRUSR },
+	.read = ia_read_data
+};
+static struct bin_attribute dbg_attr = {
+	.attr = { .name = "trace", .mode = S_IRUSR },
+	.read = ia_read_debug_data
+};
+
+const char *sensor_get_name(u8 sid, struct psh_ia_priv *psh_ia_data)
+{
+	if (sid == PSH_ITSELF)
+		return "_PSH_";
+	else if (is_port_sensor(sid))
+		return sensor_port_str[sid - PORT_SENSOR_BASE - 1];
+	else {
+		struct sensor_db *sensor_obj;
+
+		list_for_each_entry(sensor_obj,
+				&psh_ia_data->sensor_list, list) {
+			if (sid == sensor_obj->sid)
+				return sensor_obj->sensor_name;
+		}
+	}
+
+	return "?????";
+}
+
+static const char level_str[8][6] = {
+	"FATAL",
+	"ERROR",
+	" WARN",
+	" INFO",
+	"DEBUG",
+	"C_TRC",
+	"D_TRC",
+	"M_TRC",
+};
+
+static const char ctrace_str[12][6] = {
+	"SCORE",
+	"S_CFG",
+	"HW_PD",
+	"HW_PU",
+	"HW_PI",
+	" C_IN",
+	"C_OUT",
+	"IIDLE",
+	"OIDLE",
+	"SUSPD",
+	"RESUM",
+};
+
+static const char dtrace_str[5][6] = {
+	" MISC",
+	"D_RDY",
+	" D_IN",
+	"D_OUT",
+	"DDUMP"
+};
+
+static const char mtrace_str[3][6] = {
+	"TLOCK",
+	" LOCK",
+	"ULOCK",
+};
+
+const char *_get_evt_str(u16 level, u16 evt)
+{
+	if (level == PSH_DBG_CTRACE)
+		return ctrace_str[evt];
+	else if (level == PSH_DBG_DTRACE)
+		return dtrace_str[evt];
+	if (level == PSH_DBG_MTRACE)
+		return mtrace_str[evt];
+	else {
+		int level_index = 0;
+		u16 mid_val = level;
+
+		while (mid_val > 1) {
+			mid_val /= 2;
+			level_index++;
+		}
+		return level_str[level_index];
+	}
+}
+
+/*
+ * return value = 0	no valid data frame
+ * return value > 0	data frame
+ * return value < 0	error data frame
+ */
+int ia_handle_frame(struct psh_ia_priv *psh_ia_data, void *dbuf, int size)
+{
+	struct cmd_resp *resp = dbuf;
+	const struct snr_info *sinfo;
+	const struct resp_version *version;
+	const struct resp_cmd_ack *cmd_ack;
+	u32 curtime;
+	int len;
+	const char *sensor_name;
+	const char *event_name;
+	const char *context_name;
+	struct trace_data *out_data;
+	char msg_str[STR_BUFF_SIZE];
+
+	ia_sync_timestamp(psh_ia_data, 1);
+
+	switch (resp->type) {
+	case RESP_CMD_ACK:
+		cmd_ack = (struct resp_cmd_ack *)resp->buf;
+		if (!psh_ia_data->cmd_ack)
+			psh_err("Unexpected CMD_ACK recevied, %d\n", cmd_ack->cmd_id);
+		else if (cmd_ack->cmd_id == psh_ia_data->cmd_ack->cmd_id) {
+			psh_ia_data->cmd_ack->ret = cmd_ack->ret;
+			complete(&psh_ia_data->cmd_comp);
+		} else
+			psh_err("Unmatched CMD_ACK recevied, %d(EXP: %d)\n",
+					cmd_ack->cmd_id,
+					psh_ia_data->cmd_ack->cmd_id);
+		return 0;
+	case RESP_BIST_RESULT:
+		break;
+	case RESP_DEBUG_MSG:
+		ia_circ_dbg_put_data(psh_ia_data,
+				resp->buf, resp->data_len);
+		return 0;
+	case RESP_GET_STATUS:
+		sinfo = (struct snr_info *)resp->buf;
+		if (!resp->data_len)
+			ia_handle_snr_info(psh_ia_data, NULL);
+		else if (SNR_INFO_SIZE(sinfo) == resp->data_len)
+			ia_handle_snr_info(psh_ia_data, sinfo);
+		else {
+			psh_err("Wrong RESP_GET_STATUS!\n");
+			return 0;
+		}
+		break;
+	case RESP_DEBUG_GET_MASK:
+		memcpy(&psh_ia_data->dbg_mask, resp->buf,
+				sizeof(psh_ia_data->dbg_mask));
+		return 0;
+	case RESP_COUNTER:
+		memcpy(&psh_ia_data->counter, resp->buf,
+				sizeof(psh_ia_data->counter));
+		return 0;
+	case RESP_GET_VERSION:
+		version = (struct resp_version *)resp->buf;
+		if (likely(version->str_len < VERSION_STR_MAX_SIZE))
+			memcpy(psh_ia_data->version_str, version->str,
+					version->str_len + 1);
+		else {
+			memcpy(psh_ia_data->version_str, version->str,
+					VERSION_STR_MAX_SIZE - 1);
+			psh_ia_data->version_str[VERSION_STR_MAX_SIZE - 1]
+				= '\0';
+		}
+		return 0;
+	case RESP_TRACE_MSG:
+		out_data = (struct trace_data *)resp->buf;
+		while ((char *)out_data < resp->buf + resp->data_len) {
+			curtime = out_data->timestamp;
+			sensor_name = sensor_get_name(out_data->sensor_id,
+								psh_ia_data);
+			event_name = _get_evt_str(out_data->type,
+					out_data->event);
+			context_name = sensor_get_name(out_data->sensor_cnt,
+								psh_ia_data);
+
+			len = snprintf(msg_str, STR_BUFF_SIZE,
+						"[%u,%s,%s,%s]\n",
+						curtime, sensor_name,
+						event_name, context_name);
+
+			ia_circ_dbg_put_data(psh_ia_data,
+					msg_str, len);
+			out_data++;
+		}
+		return 0;
+	default:
+		break;
+	}
+
+	pr_debug("one DDR frame, data of sensor %d, size %d\n",
+			resp->sensor_id, size);
+	ia_circ_put_data(&psh_ia_data->circ, dbuf, size);
+	return size;
+}
+
+int psh_ia_common_init(struct device *dev, struct psh_ia_priv **data)
+{
+	int ret = -1;
+	struct psh_ia_priv *psh_ia_data;
+
+	psh_ia_data = kzalloc(sizeof(*psh_ia_data), GFP_KERNEL);
+	if (!psh_ia_data) {
+		dev_err(dev, "can not allocate psh_ia_data\n");
+		goto priv_err;
+	}
+	*data = psh_ia_data;
+
+	mutex_init(&psh_ia_data->cmd_mutex);
+	psh_ia_data->cmd_in_progress = CMD_INVALID;
+	mutex_init(&psh_ia_data->circ_dbg_mutex);
+	init_completion(&psh_ia_data->cmd_load_comp);
+	init_completion(&psh_ia_data->cmd_comp);
+	INIT_LIST_HEAD(&psh_ia_data->sensor_list);
+
+	psh_ia_data->circ.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
+	if (!psh_ia_data->circ.buf) {
+		dev_err(dev, "can not allocate circ buffer\n");
+		goto circ_err;
+	}
+
+	psh_ia_data->circ_dbg.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
+	if (!psh_ia_data->circ_dbg.buf) {
+		dev_err(dev, "can not allocate circ buffer\n");
+		goto dbg_err;
+	}
+
+	psh_ia_data->version_str = kmalloc(VERSION_STR_MAX_SIZE, GFP_KERNEL);
+	if (!psh_ia_data->version_str) {
+		dev_err(dev, "can not allocate version string\n");
+		goto ver_err;
+	}
+
+	psh_ia_data->status_bitmask = ((u32)-1) & ~SNR_RUNONLY_BITMASK;
+
+	dev_set_drvdata(dev, psh_ia_data);
+
+	ret = sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_status_mask.dev_attr.attr);
+	ret += sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_status_trig.dev_attr.attr);
+	ret += sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_debug.dev_attr.attr);
+	ret += sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_control.dev_attr.attr);
+	ret += sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_data_size.dev_attr.attr);
+	ret += sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_counter.dev_attr.attr);
+	ret += sysfs_create_file(&dev->kobj,
+			&sensor_dev_attr_fw_version.dev_attr.attr);
+	ret += sysfs_create_bin_file(&dev->kobj, &bin_attr);
+	ret += sysfs_create_bin_file(&dev->kobj, &dbg_attr);
+	if (ret) {
+		dev_err(dev, "can not create sysfs\n");
+		goto sysfs_err;
+	}
+
+	return 0;
+
+sysfs_err:
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_status_mask.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_status_trig.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_debug.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_control.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_data_size.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_counter.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_fw_version.dev_attr.attr);
+	sysfs_remove_bin_file(&dev->kobj, &bin_attr);
+	sysfs_remove_bin_file(&dev->kobj, &dbg_attr);
+
+	kfree(psh_ia_data->version_str);
+ver_err:
+	kfree(psh_ia_data->circ_dbg.buf);
+dbg_err:
+	kfree(psh_ia_data->circ.buf);
+circ_err:
+	kfree(psh_ia_data);
+priv_err:
+	return ret;
+}
+
+void psh_ia_common_deinit(struct device *dev)
+{
+	struct sensor_db *sensor_obj, *sensor_tmp;
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_status_mask.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_status_trig.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_debug.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_control.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_data_size.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_counter.dev_attr.attr);
+	sysfs_remove_file(&dev->kobj,
+		&sensor_dev_attr_fw_version.dev_attr.attr);
+	sysfs_remove_bin_file(&dev->kobj, &bin_attr);
+	sysfs_remove_bin_file(&dev->kobj, &dbg_attr);
+
+	list_for_each_entry_safe(sensor_obj, sensor_tmp,
+				&psh_ia_data->sensor_list, list) {
+		list_del(&sensor_obj->list);
+		kfree(sensor_obj);
+	}
+	kfree(psh_ia_data->version_str);
+
+	kfree(psh_ia_data->circ.buf);
+
+	kfree(psh_ia_data->circ_dbg.buf);
+
+	kfree(psh_ia_data);
+}
+
+int psh_ia_comm_suspend(struct device *dev)
+{
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	struct ia_cmd cmd = {
+		.cmd_id = CMD_IA_NOTIFY,
+	};
+	struct cmd_ia_notify_param *param =
+			(struct cmd_ia_notify_param *)cmd.param;
+	int ret;
+
+	param->id = IA_NOTIFY_SUSPEND;
+	ret = ia_send_cmd(psh_ia_data, &cmd, 4);
+	if (ret)
+		dev_warn(dev, "PSH: IA_NOTIFY_SUSPEND ret=%d\n", ret);
+	return 0;
+}
+
+int psh_ia_comm_resume(struct device *dev)
+{
+	struct psh_ia_priv *psh_ia_data =
+			(struct psh_ia_priv *)dev_get_drvdata(dev);
+	struct ia_cmd cmd = {
+		.cmd_id = CMD_IA_NOTIFY,
+	};
+	struct cmd_ia_notify_param *param =
+			(struct cmd_ia_notify_param *)cmd.param;
+	int ret;
+
+	param->id = IA_NOTIFY_RESUME;
+	ret = ia_send_cmd(psh_ia_data, &cmd, 4);
+	if (ret)
+		dev_warn(dev, "PSH: IA_NOTIFY_RESUME ret=%d\n", ret);
+	return 0;
+}
diff --git a/drivers/external_drivers/drivers/hwmon/psh_ia_common.h b/drivers/external_drivers/drivers/hwmon/psh_ia_common.h
new file mode 100644
index 0000000..76959a6
--- /dev/null
+++ b/drivers/external_drivers/drivers/hwmon/psh_ia_common.h
@@ -0,0 +1,324 @@
+#ifndef _PSH_IA_COMMON_H_
+#define _PSH_IA_COMMON_H_
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#define E_GENERAL		((int)(-1))
+#define E_NOMEM			((int)(-2))
+#define E_PARAM			((int)(-3))
+#define E_BUSY			((int)(-4))
+#define E_HW			((int)(-5))
+#define E_NOSUPPORT		((int)(-6))
+#define E_RPC_COMM		((int)(-7))
+#define E_LPE_COMM		((int)(-8))
+#define E_CMD_ASYNC		((int)(-9))
+#define E_CMD_NOACK		((int)(-10))
+#define E_LBUF_COMM		((int)(-11))
+
+#ifndef _CMD_ENGINE_H_
+enum cmd_id {
+	CMD_RESET = 0,
+	CMD_SETUP_DDR,
+	CMD_GET_SINGLE,
+	CMD_CFG_STREAM,
+	CMD_STOP_STREAM,
+	CMD_ADD_EVENT = 5,
+	CMD_CLEAR_EVENT,
+	CMD_SELF_TEST,
+	CMD_DEBUG,
+	CMD_CALIBRATION,
+	CMD_UPDATE_DDR = 10,
+	CMD_GET_STATUS,
+	CMD_SET_PROPERTY,
+	CMD_COUNTER,
+	CMD_GET_VERSION,
+	CMD_IA_NOTIFY = 15,
+	CMD_ID_MAX,
+	CMD_INVALID = 244,
+	CMD_FW_UPDATE = 255,
+};
+
+enum resp_type {
+	RESP_CMD_ACK,
+	RESP_GET_TIME,
+	RESP_GET_SINGLE,
+	RESP_STREAMING,
+	RESP_DEBUG_MSG,
+	RESP_DEBUG_GET_MASK = 5,
+	RESP_GYRO_CAL_RESULT,
+	RESP_BIST_RESULT,
+	RESP_ADD_EVENT,
+	RESP_CLEAR_EVENT,
+	RESP_EVENT = 10,
+	RESP_GET_STATUS,
+	RESP_COMP_CAL_RESULT,
+	RESP_COUNTER,
+	RESP_GET_VERSION,
+	RESP_TRACE_MSG,
+};
+
+#define CMD_PARAM_MAX_SIZE ((u16)60)
+struct ia_cmd {
+	u8 tran_id;
+	u8 cmd_id;
+	u8 sensor_id;
+	char param[CMD_PARAM_MAX_SIZE];
+} __packed;
+
+struct cmd_resp {
+	u8 tran_id;
+	u8 type;
+	u8 sensor_id;
+	u16 data_len;
+	char buf[0];
+} __packed;
+
+#define IA_NOTIFY_SUSPEND ((u8)0x1)
+#define IA_NOTIFY_RESUME  ((u8)0x2)
+#define IA_NOTIFY_TIMESTAMP_SYNC ((u8)0x3)
+typedef s64 timestamp_t;
+struct cmd_ia_notify_param {
+	u8 id;
+	char extra[0];
+} __attribute__ ((packed));
+
+
+struct resp_cmd_ack {
+	u8 cmd_id;
+	int ret;
+	char extra[0];
+} __attribute__ ((packed));
+
+#define SCMD_DEBUG_SET_MASK ((u16)0x1)
+#define SCMD_DEBUG_GET_MASK ((u16)0x2)
+struct cmd_debug_param {
+	u16 sub_cmd;
+	u16 mask_out;
+	u16 mask_level;
+} __packed;
+
+struct get_status_param {
+	u32 snr_bitmask;
+} __packed;
+
+struct resp_debug_get_mask {
+	u16 mask_out;
+	u16 mask_level;
+} __packed;
+
+#define SCMD_GET_COUNTER ((u16)0x1)
+#define SCMD_CLEAR_COUNTER ((u16)0x2)
+struct cmd_counter_param {
+	u16 sub_cmd;
+} __packed;
+
+struct resp_counter {
+	u32 gpio_counter;
+	u32 dma_counter;
+	u32 i2c_counter;
+	u32 print_counter;
+} __packed;
+
+#define VERSION_STR_MAX_SIZE ((u16)256)
+struct resp_version {
+	u8 str_len;
+	char str[0];
+} __packed;
+
+#define LINK_AS_CLIENT		(0)
+#define LINK_AS_MONITOR		(1)
+#define LINK_AS_REPORTER	(2)
+struct link_info {
+	u8 sid;
+	u8 ltype;
+	u16 rpt_freq;
+} __packed;
+
+#define SNR_NAME_MAX_LEN 6
+struct snr_info {
+	u8 id;
+	u8 status;
+	u16 freq;
+	u16 data_cnt;
+	u16 bit_cfg;
+	u16 priv;
+	u16 attri;
+
+	u16 freq_max;
+	char name[SNR_NAME_MAX_LEN];
+
+	u8 health;
+	u8 link_num;
+	struct link_info linfo[0];
+} __packed;
+#define SNR_INFO_SIZE(sinfo) (sizeof(struct snr_info) \
+		+ sinfo->link_num * sizeof(struct link_info))
+#define SNR_INFO_MAX_SIZE 256
+
+#define BUF_IA_DDR_SIZE 8192
+
+#endif
+
+
+#ifndef _SENSOR_DEF_H
+struct sensor_cfg_param {
+	u16 sample_freq; /* HZ */
+	u16 buff_delay; /* max time(ms) for data bufferring */
+	u16 bit_cfg;
+	char extra[0];
+} __packed;
+
+#define SNR_RUNONLY_BITMASK ((u32)0x1 << 0)
+
+#endif
+
+
+#ifndef _LOOP_BUFFER_H_
+struct psh_ia_priv;
+typedef int (*update_finished_f)(struct psh_ia_priv *psh_ia_data,
+					u16 offset);
+
+struct loop_buffer {
+	int in_reading;
+	u8 *addr;
+	u16 length;
+
+	u16 off_head;
+	u16 off_tail;
+
+	update_finished_f update_finished;
+};
+
+#define LBUF_CELL_SIGN ((u16)0x4853)
+#define LBUF_DISCARD_SIGN ((u16)0x4944)
+
+struct frame_head {
+	u16 sign;
+	u16 length;
+};
+
+#define LBUF_MAX_CELL_SIZE ((u16)4096)
+#define LBUF_MAX_DATA_SIZE (LBUF_MAX_CELL_SIZE \
+	- 4 - 2 * sizeof(struct frame_head)\
+	- sizeof(struct cmd_resp))
+
+#define size_align(size) ((size % 4) ? (size + 4 - (size % 4)) : size)
+#define frame_size(size) (size_align(size) + \
+		sizeof(struct frame_head))
+#endif
+
+#define PSH2IA_CHANNEL0	0
+#define PSH2IA_CHANNEL1	1
+#define PSH2IA_CHANNEL2	2
+#define PSH2IA_CHANNEL3	3
+
+#define CIRC_SIZE (1024 * 64)
+
+#define STR_BUFF_SIZE 256
+
+struct psh_ia_priv {
+	struct loop_buffer *lbuf; /* loop bufer, if have */
+	struct circ_buf circ, circ_dbg;	/* circ buf for sysfs data node */
+	struct resp_debug_get_mask dbg_mask;
+	struct resp_counter counter;
+	struct resp_cmd_ack *cmd_ack;
+	char *version_str;
+	struct mutex cmd_mutex;
+	struct mutex circ_dbg_mutex;
+	struct completion cmd_load_comp;
+	struct completion cmd_comp;
+	struct list_head sensor_list;
+	u8 cmd_in_progress;
+	u32 load_in_progress;
+	u32 status_bitmask;
+
+	void *platform_priv;
+};
+
+/* exports */
+void ia_lbuf_read_init(struct loop_buffer *lbuf,
+		u8 *buf, u16 size, update_finished_f uf);
+void ia_lbuf_read_reset(struct loop_buffer *lbuf);
+int ia_lbuf_read_next(struct psh_ia_priv *psh_ia_data,
+			struct loop_buffer *lbuf,
+			u8 **buf, u16 *size);
+int ia_send_cmd(struct psh_ia_priv *psh_ia_data,
+		struct ia_cmd *cmd, int len);
+int psh_ia_common_init(struct device *dev, struct psh_ia_priv **data);
+void psh_ia_common_deinit(struct device *dev);
+int ia_handle_frame(struct psh_ia_priv *psh_ia_data, void *dbuf, int size);
+int psh_ia_comm_suspend(struct device *dev);
+int psh_ia_comm_resume(struct device *dev);
+
+
+
+/* imports */
+/* need implemented by user */
+int do_setup_ddr(struct device *dev);
+int process_send_cmd(struct psh_ia_priv *psh_ia_data,
+			int ch, struct ia_cmd *cmd, int len);
+
+#define PSH_ITSELF     (PHY_SENSOR_BASE) /* means PSH itself */
+#define PORT_SENSOR_NUM (PORT_SENSOR_MAX_NUM - PORT_SENSOR_BASE - 1)
+#define PORT_SENSOR_INDEX(x) ( \
+		(x > PORT_SENSOR_BASE && x < PORT_SENSOR_MAX_NUM) \
+		? (x - PORT_SENSOR_BASE - 1) : 0)
+
+
+#define PSH_DBG_ALL     ((u16)-1)
+#define PSH_DBG_FATAL   ((u16)(0x1 << 0x0))
+#define PSH_DBG_ERR     ((u16)(0x1 << 0x1))
+#define PSH_DBG_WARN    ((u16)(0x1 << 0x2))
+#define PSH_DBG_INFO    ((u16)(0x1 << 0x3))
+#define PSH_DBG_DBG     ((u16)(0x1 << 0x4))
+#define PSH_DBG_CTRACE  ((u16)(0x1 << 0x5))     /* config path tracing */
+#define PSH_DBG_DTRACE  ((u16)(0x1 << 0x6))     /* data path tracing */
+#define PSH_DBG_MTRACE  ((u16)(0x1 << 0x7))     /* mutex_exec tracing */
+
+/* port sensor is fixed, other sensor can be created dynamically */
+enum sensor_type {
+	PHY_SENSOR_BASE = 0,
+	PORT_SENSOR_BASE = 200,
+	CS_PORT,        /* port for streaming configuration and uploading */
+	GS_PORT,        /* port for get_single configuration and uploading */
+	EVT_PORT,       /* port for event configuration and uploading */
+	PORT_SENSOR_MAX_NUM,
+};
+
+static const char sensor_port_str[PORT_SENSOR_NUM][SNR_NAME_MAX_LEN] = {
+	"CSPRT",
+	"GSPRT",
+	"EVPRT",
+};
+
+struct sensor_db {
+	u8 sid;
+	char sensor_name[SNR_NAME_MAX_LEN];
+	struct list_head list;
+} __packed;
+
+struct trace_data {
+	u32 timestamp;
+	u16 type;
+	u16 event;
+	u8 sensor_id;
+	u8 sensor_cnt;
+} __packed;
+
+#define psh_err(fmt, arg...) pr_err("psh: "fmt, ## arg)
+#define psh_warn(fmt, arg...) pr_warn("psh: "fmt, ## arg)
+#define psh_debug(fmt, arg...) pr_debug("psh: "fmt, ## arg)
+#endif
diff --git a/drivers/external_drivers/drivers/i2c/Makefile b/drivers/external_drivers/drivers/i2c/Makefile
new file mode 100644
index 0000000..0717b75
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/Makefile
@@ -0,0 +1 @@
+obj-y += busses/
diff --git a/drivers/external_drivers/drivers/i2c/busses/Kconfig b/drivers/external_drivers/drivers/i2c/busses/Kconfig
new file mode 100644
index 0000000..633041f
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/Kconfig
@@ -0,0 +1,40 @@
+#
+# I2C buses device configuration
+#
+
+config I2C_DESIGNWARE_CORE_FORK
+	tristate "Synopsys DesignWare Controller"
+	help
+	  If you say yes to this option, support will be included for the
+	  Synopsys DesignWare adapter core. Only master mode is supported.
+          You need choose platform or pci driver for its driver support.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-designware-core.
+
+config I2C_DESIGNWARE_PCI_FORK
+	tristate "Synopsys DesignWare PCI"
+	depends on PCI && I2C_DESIGNWARE_CORE_FORK
+	help
+	  If you say yes to this option, support will be included for the
+	  Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-designware-pci.
+
+
+config I2C_DW_SPEED_MODE_DEBUG
+	bool "Designware I2C Speed Mode Debug"
+	depends on I2C_DESIGNWARE_CORE_FORK
+	help
+	  If you say yes to this option, you could runtime change the I2C
+	  controller bus speed mode.
+
+config I2C_PMIC
+	bool "PMIC I2C Adapter"
+	depends on INTEL_SCU_IPC
+	help
+	  Say Y here if you have PMIC I2C adapter.
+
+	  PMIC-I2C adapter driver to handle I2C transactions
+	  in the PMIC's I2C bus.
diff --git a/drivers/external_drivers/drivers/i2c/busses/Makefile b/drivers/external_drivers/drivers/i2c/busses/Makefile
new file mode 100644
index 0000000..803777c
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the i2c bus drivers.
+#
+
+obj-$(CONFIG_I2C_DESIGNWARE_CORE_FORK)	+= i2c-designware-core.o
+obj-$(CONFIG_I2C_DESIGNWARE_PCI_FORK)	+= i2c-designware-pci.o
+i2c-designware-pci-objs := i2c-designware-pcidrv.o
+i2c-designware-platform-objs := i2c-designware-platdrv.o
+
+obj-$(CONFIG_I2C_PMIC)          += i2c-pmic.o
+
+ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/external_drivers/drivers/i2c/busses/i2c-designware-core.c b/drivers/external_drivers/drivers/i2c/busses/i2c-designware-core.c
new file mode 100644
index 0000000..742db98
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/i2c-designware-core.c
@@ -0,0 +1,1563 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
+#include "i2c-designware-core.h"
+#include <linux/intel_mid_pm.h>
+
+
+int i2c_dw_init(struct dw_i2c_dev *dev);
+int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
+u32 i2c_dw_func(struct i2c_adapter *adap);
+void i2c_dw_enable(struct dw_i2c_dev *dev);
+void i2c_dw_disable(struct dw_i2c_dev *dev);
+irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
+void i2c_dw_disable_int(struct dw_i2c_dev *dev);
+void i2c_dw_clear_int(struct dw_i2c_dev *dev);
+static void dw_i2c_acpi_setup(struct device *pdev, struct dw_i2c_dev *dev);
+u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+
+static char *abort_sources[] = {
+	[ABRT_7B_ADDR_NOACK] =
+		"slave address not acknowledged (7bit mode)",
+	[ABRT_10ADDR1_NOACK] =
+		"first address byte not acknowledged (10bit mode)",
+	[ABRT_10ADDR2_NOACK] =
+		"second address byte not acknowledged (10bit mode)",
+	[ABRT_TXDATA_NOACK] =
+		"data not acknowledged",
+	[ABRT_GCALL_NOACK] =
+		"no acknowledgement for a general call",
+	[ABRT_GCALL_READ] =
+		"read after general call",
+	[ABRT_SBYTE_ACKDET] =
+		"start byte acknowledged",
+	[ABRT_SBYTE_NORSTRT] =
+		"trying to send start byte when restart is disabled",
+	[ABRT_10B_RD_NORSTRT] =
+		"trying to read when restart is disabled (10bit mode)",
+	[ABRT_MASTER_DIS] =
+		"trying to use disabled adapter",
+	[ARB_LOST] =
+		"lost arbitration",
+};
+
+u32 dw_readl(struct dw_i2c_dev *dev, int offset)
+{
+	u32 value = readl(dev->base + offset);
+
+	if (dev->swab)
+		return swab32(value);
+	else
+		return value;
+}
+
+void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
+{
+	if (dev->swab)
+		b = swab32(b);
+
+	writel(b, dev->base + offset);
+}
+
+static void i2c_dw_dump(struct dw_i2c_dev *dev)
+{
+	u32 value;
+
+	dev_err(dev->dev, "===== REGISTER DUMP (i2c) =====\n");
+	value = dw_readl(dev, DW_IC_CON);
+	dev_err(dev->dev, "DW_IC_CON:               0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TAR);
+	dev_err(dev->dev, "DW_IC_TAR:               0x%x\n", value);
+	value = dw_readl(dev, DW_IC_SS_SCL_HCNT);
+	dev_err(dev->dev, "DW_IC_SS_SCL_HCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_SS_SCL_LCNT);
+	dev_err(dev->dev, "DW_IC_SS_SCL_LCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_FS_SCL_HCNT);
+	dev_err(dev->dev, "DW_IC_FS_SCL_HCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_FS_SCL_LCNT);
+	dev_err(dev->dev, "DW_IC_FS_SCL_LCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_INTR_STAT);
+	dev_err(dev->dev, "DW_IC_INTR_STAT:         0x%x\n", value);
+	value = dw_readl(dev, DW_IC_INTR_MASK);
+	dev_err(dev->dev, "DW_IC_INTR_MASK:         0x%x\n", value);
+	value = dw_readl(dev, DW_IC_RAW_INTR_STAT);
+	dev_err(dev->dev, "DW_IC_RAW_INTR_STAT:     0x%x\n", value);
+	value = dw_readl(dev, DW_IC_RX_TL);
+	dev_err(dev->dev, "DW_IC_RX_TL:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TX_TL);
+	dev_err(dev->dev, "DW_IC_TX_TL:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_ENABLE);
+	dev_err(dev->dev, "DW_IC_ENABLE:            0x%x\n", value);
+	value = dw_readl(dev, DW_IC_STATUS);
+	dev_err(dev->dev, "DW_IC_STATUS:            0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TXFLR);
+	dev_err(dev->dev, "DW_IC_TXFLR:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_RXFLR);
+	dev_err(dev->dev, "DW_IC_RXFLR:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
+	dev_err(dev->dev, "DW_IC_TX_ABRT_SOURCE:    0x%x\n", value);
+	value = dw_readl(dev, DW_IC_DATA_CMD);
+	dev_err(dev->dev, "DW_IC_DATA_CMD:          0x%x\n", value);
+	dev_err(dev->dev, "===============================\n");
+}
+
+/* VLV2 PCI config space memio access to the controller is
+* enabled by
+* 1. Reset 0x804 and 0x808 offset from base address.
+* 2. Set 0x804 offset from base address to 0x3.
+*/
+static void vlv2_reset(struct dw_i2c_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 10; i++) {
+		dw_writel(dev, 0, 0x804);
+		dw_writel(dev, 0, 0x808);
+		usleep_range(10, 100);
+
+		dw_writel(dev, 3, 0x804);
+		usleep_range(10, 100);
+
+		if (dw_readl(dev, DW_IC_COMP_TYPE) != DW_IC_COMP_TYPE_VALUE)
+			continue;
+
+		return;
+	}
+
+	dev_warn(dev->dev, "vlv2 I2C reset failed\n");
+}
+
+static int mfld_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, PNW_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, PNW_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, PNW_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, PNW_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	return 0;
+}
+
+static int ctp_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, CLV_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, CLV_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, CLV_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, CLV_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	return 0;
+}
+
+static int merr_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, MERR_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, MERR_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, MERR_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, MERR_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	dw_writel(dev, MERR_HS_SCLK_HCNT, DW_IC_HS_SCL_HCNT);
+	dw_writel(dev, MERR_HS_SCLK_LCNT, DW_IC_HS_SCL_LCNT);
+
+	return 0;
+}
+
+static int vlv2_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, VLV2_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, VLV2_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	if (dev->fast_plus) {
+		dw_writel(dev, VLV2_FS_P_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+		dw_writel(dev, VLV2_FS_P_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+	} else {
+		dw_writel(dev, VLV2_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+		dw_writel(dev, VLV2_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+	}
+
+	dw_writel(dev, VLV2_HS_SCLK_HCNT, DW_IC_HS_SCL_HCNT);
+	dw_writel(dev, VLV2_HS_SCLK_LCNT, DW_IC_HS_SCL_LCNT);
+
+	return 0;
+}
+
+static int chv_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, CHV_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, CHV_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	if (dev->fast_plus) {
+		dw_writel(dev, CHV_FS_P_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+		dw_writel(dev, CHV_FS_P_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+	} else {
+		dw_writel(dev, CHV_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+		dw_writel(dev, CHV_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+	}
+
+	return 0;
+}
+
+static struct  dw_controller  dw_controllers[] = {
+	[moorestown_0] = {
+		.bus_num     = 0,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.clk_khz      = 25000,
+	},
+	[moorestown_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.clk_khz      = 25000,
+	},
+	[moorestown_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.clk_khz      = 25000,
+	},
+	[medfield_0] = {
+		.bus_num     = 0,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 20500,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_3] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 20500,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_4] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_5] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+
+	[cloverview_0] = {
+		.bus_num     = 0,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_3] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 20500,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_4] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_5] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+
+	[merrifield_0] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_1] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_2] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_3] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_4] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_5] = {
+		.bus_num     = 6,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_6] = {
+		.bus_num     = 7,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[valleyview_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C1"
+	},
+	[valleyview_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C2"
+	},
+	[valleyview_3] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C3"
+	},
+	[valleyview_4] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C4"
+	},
+	[valleyview_5] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C5"
+	},
+	[valleyview_6] = {
+		.bus_num     = 6,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C6"
+	},
+	[valleyview_7] = {
+		.bus_num     = 7,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C7"
+	},
+	[cherryview_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	},
+	[cherryview_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	},
+	[cherryview_3] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	},
+	[cherryview_4] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	},
+	[cherryview_5] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	},
+	[cherryview_6] = {
+		.bus_num     = 6,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	},
+	[cherryview_7] = {
+		.bus_num     = 7,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.enable_stop = 1,
+		.scl_cfg = chv_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+	}
+};
+
+static struct i2c_algorithm i2c_dw_algo = {
+	.master_xfer	= i2c_dw_xfer,
+	.functionality	= i2c_dw_func,
+};
+
+int i2c_dw_suspend(struct dw_i2c_dev *dev, bool runtime)
+{
+	if (!runtime) {
+		if (down_trylock(&dev->lock))
+			return -EBUSY;
+		dev->status &= ~STATUS_POWERON;
+	}
+	if (!dev->shared_host)
+		i2c_dw_disable(dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(i2c_dw_suspend);
+
+int i2c_dw_resume(struct dw_i2c_dev *dev, bool runtime)
+{
+	if (!dev->shared_host)
+		i2c_dw_init(dev);
+	if (!runtime) {
+		dev->status |= STATUS_POWERON;
+		up(&dev->lock);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(i2c_dw_resume);
+
+static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+	return dev->controller->clk_khz;
+}
+
+static ssize_t show_bus_num(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", i2c->controller->bus_num);
+}
+
+#define MODE_NAME_SIZE	10
+
+static ssize_t store_mode(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+	int ret = 0;
+	char mode[MODE_NAME_SIZE];
+
+	if (sscanf(buf, "%9s", mode) != 1) {
+		dev_err(dev, "input I2C speed mode: std/fast\n");
+		return -EINVAL;
+	}
+
+	down(&i2c->lock);
+	pm_runtime_get_sync(i2c->dev);
+
+	if (!strncmp("std", mode, MODE_NAME_SIZE)) {
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_STD;
+	} else if (!strncmp("fast", mode, MODE_NAME_SIZE)) {
+		i2c->fast_plus = 0;
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+	} else if (!strncmp("fast+", mode, MODE_NAME_SIZE)) {
+		i2c->fast_plus = 1;
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+	} else if (!strncmp("high", mode, MODE_NAME_SIZE)) {
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_HIGH;
+	} else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* init to configure the i2c master */
+	i2c_dw_init(i2c);
+
+	dev_info(dev, "I2C speed mode changed to %s\n", mode);
+
+out:
+	pm_runtime_mark_last_busy(i2c->dev);
+	pm_runtime_put_autosuspend(i2c->dev);
+	up(&i2c->lock);
+
+	return (ret < 0) ? ret : size;
+}
+
+static ssize_t show_mode(struct device *dev,
+			 struct device_attribute *attr,
+			 char *buf)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+	int ret;
+
+	switch (i2c->master_cfg & DW_IC_SPEED_MASK) {
+	case DW_IC_CON_SPEED_STD:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "std");
+		break;
+	case DW_IC_CON_SPEED_FAST:
+		if (i2c->fast_plus)
+			ret = snprintf(buf, PAGE_SIZE, "%s\n", "fast+");
+		else
+			ret = snprintf(buf, PAGE_SIZE, "%s\n", "fast");
+		break;
+	case DW_IC_CON_SPEED_HIGH:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "high");
+		break;
+	default:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "Not Supported\n");
+		break;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(bus_num, S_IRUGO, show_bus_num, NULL);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode);
+
+static struct attribute *i2c_dw_attrs[] = {
+	&dev_attr_bus_num.attr,
+	&dev_attr_mode.attr,
+	NULL,
+};
+
+static struct attribute_group i2c_dw_attr_group = {
+	.name = "i2c_dw_sysnode",
+	.attrs = i2c_dw_attrs,
+};
+
+static ssize_t store_lock_xfer(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev->parent);
+	ssize_t	status = -EINVAL;
+	long lock;
+
+
+	status = kstrtol(buf, 0, &lock);
+	if (status == 0) {
+		if (lock && !i2c->lock_flag) {
+			down(&i2c->lock);
+			pm_runtime_get_sync(i2c->dev);
+			i2c->lock_flag = 1;
+			dev_info(dev, "lock i2c xfer\n");
+		} else if (!lock && i2c->lock_flag) {
+			pm_runtime_mark_last_busy(i2c->dev);
+			pm_runtime_put_autosuspend(i2c->dev);
+			i2c->lock_flag = 0;
+			up(&i2c->lock);
+			dev_info(dev, "unlock i2c xfer\n");
+		} else
+			return -EINVAL;
+	}
+
+	return status ? : size;
+}
+
+static DEVICE_ATTR(lock_xfer, S_IWUSR, NULL, store_lock_xfer);
+
+struct dw_i2c_dev *i2c_dw_setup(struct device *pdev, int bus_idx,
+	unsigned long start, unsigned long len, int irq)
+{
+	struct dw_i2c_dev *dev;
+	struct i2c_adapter *adap;
+	void __iomem *base;
+	struct  dw_controller *controller;
+	int r;
+#ifdef CONFIG_ACPI
+	void *handle_save = ACPI_HANDLE(pdev);
+#endif
+	u32 param1;
+
+	if (bus_idx >= ARRAY_SIZE(dw_controllers)) {
+		dev_err(pdev, "invalid bus index %d\n",
+			bus_idx);
+		r = -EINVAL;
+		goto exit;
+	}
+
+	controller = &dw_controllers[bus_idx];
+
+	base = ioremap_nocache(start, len);
+	if (!base) {
+		dev_err(pdev, "I/O memory remapping failed\n");
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+	if (!dev) {
+		r = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	init_completion(&dev->cmd_complete);
+	sema_init(&dev->lock, 1);
+	dev->status = STATUS_IDLE;
+	dev->clk = NULL;
+	dev->controller = controller;
+	dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+	dev->base = base;
+	dev->dev = get_device(pdev);
+	dev->functionality =
+		I2C_FUNC_I2C |
+		I2C_FUNC_SMBUS_BYTE |
+		I2C_FUNC_SMBUS_BYTE_DATA |
+		I2C_FUNC_SMBUS_WORD_DATA |
+		I2C_FUNC_SMBUS_I2C_BLOCK;
+	dev->master_cfg =  controller->bus_cfg;
+	dev->get_scl_cfg = controller->scl_cfg;
+	dev->enable_stop = controller->enable_stop;
+	dev->clk_khz = controller->clk_khz;
+	dev->speed_cfg = dev->master_cfg & DW_IC_SPEED_MASK;
+	dev->use_dyn_clk = 0;
+	dev->reset = controller->reset;
+	dev->irq = irq;
+	dev->share_irq = controller->share_irq;
+	dev->abort = intel_mid_dw_i2c_abort;
+	dev->tx_fifo_depth = controller->tx_fifo_depth;
+	dev->rx_fifo_depth = controller->rx_fifo_depth;
+	dev->fast_plus = controller->fast_plus;
+	dev_set_drvdata(pdev, dev);
+	dw_i2c_acpi_setup(pdev, dev);
+
+	if (!dev->tx_fifo_depth || !dev->rx_fifo_depth) {
+		param1 = i2c_dw_read_comp_param(dev);
+		dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
+		dev->rx_fifo_depth = ((param1 >> 8)  & 0xff) + 1;
+	}
+
+	r = i2c_dw_init(dev);
+	if (r)
+		goto err_kfree;
+
+	adap = &dev->adapter;
+	i2c_set_adapdata(adap, dev);
+	adap->owner = THIS_MODULE;
+	adap->class = 0;
+	adap->algo = &i2c_dw_algo;
+	adap->dev.parent = pdev;
+	adap->nr = controller->bus_num;
+	snprintf(adap->name, sizeof(adap->name), "i2c-designware-%d",
+		adap->nr);
+
+	r = request_irq(irq, i2c_dw_isr, IRQF_SHARED, adap->name, dev);
+	if (r) {
+		dev_err(pdev, "failure requesting irq %i\n", irq);
+		goto err_kfree;
+	}
+
+	i2c_dw_disable_int(dev);
+	i2c_dw_clear_int(dev);
+	r = i2c_add_numbered_adapter(adap);
+	if (r) {
+		dev_err(pdev, "failure adding adapter\n");
+		goto err_free_irq;
+	}
+
+	r = sysfs_create_group(&pdev->kobj, &i2c_dw_attr_group);
+	if (r) {
+		dev_err(pdev,
+			"Unable to export sysfs interface, error: %d\n", r);
+		goto err_del_adap;
+	}
+
+	r = device_create_file(&adap->dev, &dev_attr_lock_xfer);
+	if (r < 0)
+		dev_err(&adap->dev,
+			"Failed to add lock_xfer sysfs files: %d\n", r);
+
+	acpi_i2c_register_devices(adap);
+	ACPI_HANDLE_SET(pdev, handle_save);
+
+	return dev;
+
+err_del_adap:
+	i2c_del_adapter(&dev->adapter);
+err_free_irq:
+	free_irq(irq, dev);
+err_kfree:
+	put_device(pdev);
+	kfree(dev);
+err_iounmap:
+	iounmap(base);
+exit:
+	return ERR_PTR(r);
+}
+EXPORT_SYMBOL(i2c_dw_setup);
+
+#ifdef CONFIG_ACPI
+static int dw_i2c_acpi_get_freq(struct acpi_resource *ares,
+					void *data)
+{
+	struct dw_i2c_dev *i2c = data;
+
+	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+		struct acpi_resource_i2c_serialbus *sb;
+
+		sb = &ares->data.i2c_serial_bus;
+		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+			i2c->freq = sb->connection_speed;
+			if (i2c->freq == DW_STD_SPEED) {
+				i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+				i2c->master_cfg |= DW_IC_CON_SPEED_STD;
+			} else if (i2c->freq == DW_FAST_SPEED) {
+				i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+				i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+			} else if (i2c->freq == DW_HIGH_SPEED) {
+				i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+				i2c->master_cfg |= DW_IC_CON_SPEED_HIGH;
+			}
+
+			down(&i2c->lock);
+			i2c_dw_init(i2c);
+			up(&i2c->lock);
+		}
+	}
+
+	return 1;
+}
+
+static acpi_status acpi_i2c_find_device_speed(acpi_handle handle, u32 level,
+					void *data, void **return_value)
+{
+	struct dw_i2c_dev *i2c = data;
+	struct list_head resource_list;
+	struct acpi_device *adev;
+	int ret;
+
+	if (acpi_bus_get_device(handle, &adev))
+		return AE_OK;
+	if (acpi_bus_get_status(adev) || !adev->status.present)
+		return AE_OK;
+
+	INIT_LIST_HEAD(&resource_list);
+	ret = acpi_dev_get_resources(adev, &resource_list,
+				     dw_i2c_acpi_get_freq, i2c);
+	acpi_dev_free_resource_list(&resource_list);
+
+	if (ret < 0)
+		return AE_OK;
+
+	pr_debug("i2c device: %s, freq: %dkHz\n",
+			dev_name(&adev->dev), i2c->freq/1000);
+
+	return AE_OK;
+}
+
+static void dw_i2c_acpi_setup(struct device *pdev, struct dw_i2c_dev *dev)
+{
+	acpi_handle pdev_handle = ACPI_HANDLE(pdev);
+	acpi_handle handle = NULL;
+	acpi_status status;
+	unsigned long long shared_host;
+
+	if (pdev_handle) {
+		handle = pdev_handle;
+	} else if (dev->controller->acpi_name) {
+		acpi_get_handle(NULL,
+			dev->controller->acpi_name, &handle);
+
+		ACPI_HANDLE_SET(pdev, handle);
+	}
+
+	if (handle == NULL)
+		return;
+
+	status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+	if (ACPI_SUCCESS(status)) {
+		dev_info(pdev, "_SEM=%ld\n", shared_host);
+		if (shared_host != 0) {
+			dev_info(pdev, "Share controller with PUNIT\n");
+			dev->shared_host = 1;
+			dev->acquire_ownership = intel_mid_dw_i2c_acquire_ownership;
+			dev->release_ownership = intel_mid_dw_i2c_release_ownership;
+		}
+	}
+
+	/* Find I2C adapter bus frequency */
+	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+				     acpi_i2c_find_device_speed, NULL,
+				     dev, NULL);
+	if (ACPI_FAILURE(status))
+		dev_warn(pdev, "failed to get I2C bus freq\n");
+}
+#else
+void dw_i2c_acpi_setup(struct device *pdev, struct dw_i2c_dev *dev) { }
+#endif
+
+void i2c_dw_free(struct device *pdev, struct dw_i2c_dev *dev)
+{
+	struct i2c_adapter *adap = &dev->adapter;
+
+	i2c_dw_disable(dev);
+
+	device_remove_file(&adap->dev, &dev_attr_lock_xfer);
+	sysfs_remove_group(&pdev->kobj, &i2c_dw_attr_group);
+
+	i2c_del_adapter(&dev->adapter);
+	put_device(pdev);
+	free_irq(dev->irq, dev);
+	kfree(dev);
+}
+EXPORT_SYMBOL(i2c_dw_free);
+
+static u32
+i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+{
+	/*
+	 * DesignWare I2C core doesn't seem to have solid strategy to meet
+	 * the tHD;STA timing spec.  Configuring _HCNT based on tHIGH spec
+	 * will result in violation of the tHD;STA spec.
+	 */
+	if (cond)
+		/*
+		 * Conditional expression:
+		 *
+		 *   IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
+		 *
+		 * This is based on the DW manuals, and represents an ideal
+		 * configuration.  The resulting I2C bus speed will be
+		 * faster than any of the others.
+		 *
+		 * If your hardware is free from tHD;STA issue, try this one.
+		 */
+		return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset;
+	else
+		/*
+		 * Conditional expression:
+		 *
+		 *   IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
+		 *
+		 * This is just experimental rule; the tHD;STA period turned
+		 * out to be proportinal to (_HCNT + 3).  With this setting,
+		 * we could meet both tHIGH and tHD;STA timing specs.
+		 *
+		 * If unsure, you'd better to take this alternative.
+		 *
+		 * The reason why we need to take into account "tf" here,
+		 * is the same as described in i2c_dw_scl_lcnt().
+		 */
+		return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset;
+}
+
+static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+{
+	/*
+	 * Conditional expression:
+	 *
+	 *   IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf)
+	 *
+	 * DW I2C core starts counting the SCL CNTs for the LOW period
+	 * of the SCL clock (tLOW) as soon as it pulls the SCL line.
+	 * In order to meet the tLOW timing spec, we need to take into
+	 * account the fall time of SCL signal (tf).  Default tf value
+	 * should be 0.3 us, for safety.
+	 */
+	return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
+}
+
+/**
+ * i2c_dw_init() - initialize the designware i2c master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+int i2c_dw_init(struct dw_i2c_dev *dev)
+{
+	u32 input_clock_khz;
+	u32 hcnt, lcnt;
+	u32 reg;
+
+	if (dev->reset)
+		dev->reset(dev);
+
+	input_clock_khz = dev->get_clk_rate_khz(dev);
+
+	/* Configure register endianess access */
+	reg = dw_readl(dev, DW_IC_COMP_TYPE);
+	if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
+		dev->swab = 1;
+		reg = DW_IC_COMP_TYPE_VALUE;
+	}
+
+	if (reg != DW_IC_COMP_TYPE_VALUE) {
+		dev_err(dev->dev, "Unknown Synopsys component type: "
+			"0x%08x\n", reg);
+		return -ENODEV;
+	}
+
+	/* Disable the adapter */
+	i2c_dw_disable(dev);
+
+	if (dev->get_scl_cfg)
+		dev->get_scl_cfg(dev);
+	else {
+		/* set standard and fast speed deviders for high/low periods */
+
+		/* Standard-mode */
+		hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+					227,	/* tHD;STA = tHIGH = 22.7 us */
+					3,	/* tf = 0.3 us */
+					0,	/* 0: DW default, 1: Ideal */
+					23);	/* offset = 23 */
+		lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+					227,	/* tLOW = 22.7 us */
+					3,	/* tf = 0.3 us */
+					28);	/* offset = 28 */
+		dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
+		dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
+		dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n",
+					hcnt, lcnt);
+
+		/* Fast-mode */
+		hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+					52,	/* tHD;STA = tHIGH = 5.2 us */
+					3,	/* tf = 0.3 us */
+					0,	/* 0: DW default, 1: Ideal */
+					11);	/* offset = 11 */
+		lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+					72,	/* tLOW = 7.2 us */
+					3,	/* tf = 0.3 us */
+					12);	/* offset = 12 */
+		dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
+		dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
+		dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+	}
+
+	/* Configure Tx/Rx FIFO threshold levels */
+	dw_writel(dev, dev->tx_fifo_depth/2, DW_IC_TX_TL);
+	dw_writel(dev, dev->rx_fifo_depth/2, DW_IC_RX_TL);
+
+	/* configure the i2c master */
+	dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
+	return 0;
+}
+EXPORT_SYMBOL(i2c_dw_init);
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
+{
+	int timeout = TIMEOUT;
+
+	while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
+		if (timeout <= 0) {
+			dev_warn(dev->dev, "timeout waiting for bus ready\n");
+			return -ETIMEDOUT;
+		}
+		timeout--;
+		mdelay(1);
+	}
+
+	return 0;
+}
+
+static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	u32 ic_con;
+
+	/* Disable the adapter */
+	i2c_dw_disable(dev);
+
+	/* set the slave (target) address */
+	dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
+
+	/* if the slave address is ten bit address, enable 10BITADDR */
+	ic_con = dw_readl(dev, DW_IC_CON);
+	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+		ic_con |= DW_IC_CON_10BITADDR_MASTER;
+	else
+		ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+	dw_writel(dev, ic_con, DW_IC_CON);
+
+	/* Enable the adapter */
+	i2c_dw_enable(dev);
+
+	/* Clear and enable interrupts */
+	i2c_dw_clear_int(dev);
+	dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
+}
+/*
+ * Initiate (and continue) low level master read/write transaction.
+ * This function is only called from i2c_dw_isr, and pumping i2c_msg
+ * messages into the tx buffer.  Even if the size of i2c_msg data is
+ * longer than the size of the tx buffer, it handles everything.
+ */
+void
+i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	u32 intr_mask;
+	int tx_limit, rx_limit;
+	int cmd;
+	u32 addr = msgs[dev->msg_write_idx].addr;
+	u32 buf_len = dev->tx_buf_len;
+	u8 *buf = dev->tx_buf;
+	unsigned long flags;
+
+	intr_mask = DW_IC_INTR_DEFAULT_MASK;
+
+	raw_local_irq_save(flags);
+	/* if fifo only has one byte, it is not safe */
+	if (!dev->enable_stop && (dev->status & STATUS_WRITE_IN_PROGRESS) &&
+		(dw_readl(dev, DW_IC_TXFLR) < 1)) {
+		dev_err(dev->dev, "TX FIFO underrun, addr: 0x%x.\n", addr);
+		dev->msg_err = -EAGAIN;
+	}
+
+	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+		if (dev->msg_err)
+			break;
+
+		/*
+		 * if target address has changed, we need to
+		 * reprogram the target address in the i2c
+		 * adapter when we are done with this transfer
+		 */
+		if (msgs[dev->msg_write_idx].addr != addr) {
+			dev_err(dev->dev,
+				"%s: invalid target address\n", __func__);
+			dev->msg_err = -EINVAL;
+			break;
+		}
+
+		if (msgs[dev->msg_write_idx].len == 0) {
+			dev_err(dev->dev,
+				"%s: invalid message length\n", __func__);
+			dev->msg_err = -EINVAL;
+			break;
+		}
+
+		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+			/* new i2c_msg */
+			buf = msgs[dev->msg_write_idx].buf;
+			buf_len = msgs[dev->msg_write_idx].len;
+		}
+
+		tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR);
+		rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
+
+		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+			cmd = (dev->enable_stop && buf_len == 1
+				&& dev->msg_write_idx == dev->msgs_num - 1) ?
+				DW_IC_CMD_STOP : 0;
+			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+				dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
+				rx_limit--;
+			} else
+				dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
+			tx_limit--; buf_len--;
+		}
+
+		dev->tx_buf = buf;
+		dev->tx_buf_len = buf_len;
+
+		if (buf_len > 0) {
+			/* more bytes to be written */
+			dev->status |= STATUS_WRITE_IN_PROGRESS;
+			break;
+		} else
+			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
+	}
+	raw_local_irq_restore(flags);
+
+	/*
+	 * If i2c_msg index search is completed, we don't need TX_EMPTY
+	 * interrupt any more.
+	 */
+	if (dev->msg_write_idx == dev->msgs_num)
+		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
+
+	if (dev->msg_err)
+		intr_mask = 0;
+
+	dw_writel(dev, intr_mask,  DW_IC_INTR_MASK);
+}
+
+static void
+i2c_dw_read(struct dw_i2c_dev *dev)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	int rx_valid;
+
+	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
+		u32 len;
+		u8 *buf;
+
+		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
+			continue;
+
+		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
+			len = msgs[dev->msg_read_idx].len;
+			buf = msgs[dev->msg_read_idx].buf;
+		} else {
+			len = dev->rx_buf_len;
+			buf = dev->rx_buf;
+		}
+
+		rx_valid = dw_readl(dev, DW_IC_RXFLR);
+
+		for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+			*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+
+		if (len > 0) {
+			dev->status |= STATUS_READ_IN_PROGRESS;
+			dev->rx_buf_len = len;
+			dev->rx_buf = buf;
+			return;
+		} else
+			dev->status &= ~STATUS_READ_IN_PROGRESS;
+	}
+}
+
+static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
+{
+	unsigned long abort_source = dev->abort_source;
+	int i;
+
+	if (abort_source & DW_IC_TX_ABRT_NOACK) {
+		for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+			dev_dbg(dev->dev,
+				"%s: %s\n", __func__, abort_sources[i]);
+		return -EREMOTEIO;
+	}
+
+	for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+		dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
+
+	if (abort_source & DW_IC_TX_ARB_LOST)
+		return -EAGAIN;
+	else if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
+		return -EINVAL; /* wrong msgs[] data */
+	else
+		return -EIO;
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ */
+int
+i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+	int ret = 0;
+	unsigned long timeout;
+
+	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+
+	down(&dev->lock);
+	pm_runtime_get_sync(dev->dev);
+
+	INIT_COMPLETION(dev->cmd_complete);
+	dev->msgs = msgs;
+	dev->msgs_num = num;
+	dev->cmd_err = 0;
+	dev->msg_write_idx = 0;
+	dev->msg_read_idx = 0;
+	dev->msg_err = 0;
+	dev->status = STATUS_IDLE;
+	dev->abort_source = 0;
+
+	/* if the host is shared between other units on the SoC */
+	if (dev->shared_host && dev->acquire_ownership) {
+		ret = dev->acquire_ownership();
+		if (ret < 0) {
+			dev_WARN(dev->dev, "couldnt acquire ownership\n");
+			goto done;
+		}
+	}
+
+	ret = i2c_dw_wait_bus_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* start the transfers */
+	i2c_dw_xfer_init(dev);
+
+	/* wait for tx to complete */
+	timeout = wait_for_completion_timeout(&dev->cmd_complete, 3*HZ);
+	if (timeout == 0) {
+		dev_WARN(dev->dev, "controller timed out\n");
+		i2c_dw_dump(dev);
+		dump_stack();
+		if (dev->abort)
+			dev->abort(adap->nr);
+		i2c_dw_init(dev);
+		ret = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (dev->msg_err) {
+		dev_WARN(dev->dev, "i2c msg error\n");
+		ret = dev->msg_err;
+		goto done;
+	}
+
+	/* no error */
+	if (likely(!dev->cmd_err)) {
+		/* Disable the adapter */
+		i2c_dw_disable(dev);
+		ret = num;
+		goto done;
+	}
+
+	/* We have an error */
+	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
+		ret = i2c_dw_handle_tx_abort(dev);
+		goto done;
+	}
+	ret = -EIO;
+
+done:
+	if (dev->shared_host && dev->release_ownership)
+		dev->release_ownership();
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	up(&dev->lock);
+
+	return ret;
+}
+
+u32 i2c_dw_func(struct i2c_adapter *adap)
+{
+	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+	return dev->functionality;
+}
+
+static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
+{
+	u32 stat;
+
+	/*
+	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
+	 * Ths unmasked raw version of interrupt status bits are available
+	 * in the IC_RAW_INTR_STAT register.
+	 *
+	 * That is,
+	 *   stat = dw_readl(IC_INTR_STAT);
+	 * equals to,
+	 *   stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK);
+	 *
+	 * The raw version might be useful for debugging purposes.
+	 */
+	stat = dw_readl(dev, DW_IC_INTR_STAT);
+
+	/*
+	 * Do not use the IC_CLR_INTR register to clear interrupts, or
+	 * you'll miss some interrupts, triggered during the period from
+	 * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR).
+	 *
+	 * Instead, use the separately-prepared IC_CLR_* registers.
+	 */
+	if (stat & DW_IC_INTR_RX_UNDER)
+		dw_readl(dev, DW_IC_CLR_RX_UNDER);
+	if (stat & DW_IC_INTR_RX_OVER)
+		dw_readl(dev, DW_IC_CLR_RX_OVER);
+	if (stat & DW_IC_INTR_TX_OVER)
+		dw_readl(dev, DW_IC_CLR_TX_OVER);
+	if (stat & DW_IC_INTR_RD_REQ)
+		dw_readl(dev, DW_IC_CLR_RD_REQ);
+	if (stat & DW_IC_INTR_TX_ABRT) {
+		/*
+		 * The IC_TX_ABRT_SOURCE register is cleared whenever
+		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
+		 */
+		dev->abort_source = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
+		dw_readl(dev, DW_IC_CLR_TX_ABRT);
+	}
+	if (stat & DW_IC_INTR_RX_DONE)
+		dw_readl(dev, DW_IC_CLR_RX_DONE);
+	if (stat & DW_IC_INTR_ACTIVITY)
+		dw_readl(dev, DW_IC_CLR_ACTIVITY);
+	if (stat & DW_IC_INTR_STOP_DET)
+		dw_readl(dev, DW_IC_CLR_STOP_DET);
+	if (stat & DW_IC_INTR_START_DET)
+		dw_readl(dev, DW_IC_CLR_START_DET);
+	if (stat & DW_IC_INTR_GEN_CALL)
+		dw_readl(dev, DW_IC_CLR_GEN_CALL);
+
+	return stat;
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+{
+	struct dw_i2c_dev *dev = dev_id;
+	u32 stat, enabled;
+
+	pm_runtime_get(dev->dev);
+#ifdef CONFIG_PM_RUNTIME
+	if (!pm_runtime_active(dev->dev)) {
+		pm_runtime_put_autosuspend(dev->dev);
+		if (dev->share_irq)
+			return IRQ_NONE;
+		else
+			return IRQ_HANDLED;
+	}
+#endif
+	enabled = dw_readl(dev, DW_IC_ENABLE);
+	stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
+	dev_dbg(dev->dev, "%s:  %s enabled= 0x%x stat=0x%x\n", __func__,
+		dev->adapter.name, enabled, stat);
+	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) {
+		pm_runtime_put_autosuspend(dev->dev);
+		if (dev->share_irq)
+			return IRQ_NONE;
+		else
+			return IRQ_HANDLED;
+	}
+
+	stat = i2c_dw_read_clear_intrbits(dev);
+
+	if (stat & DW_IC_INTR_RX_OVER)
+		dev_warn(dev->dev, "RX fifo overrun\n");
+
+	if (stat & DW_IC_INTR_TX_ABRT) {
+		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+		dev->status = STATUS_IDLE;
+
+		/*
+		 * Anytime TX_ABRT is set, the contents of the tx/rx
+		 * buffers are flushed.  Make sure to skip them.
+		 */
+		dw_writel(dev, 0, DW_IC_INTR_MASK);
+		goto tx_aborted;
+	}
+
+	if (stat & DW_IC_INTR_RX_FULL)
+		i2c_dw_read(dev);
+
+	if (stat & DW_IC_INTR_TX_EMPTY)
+		i2c_dw_xfer_msg(dev);
+
+	/*
+	 * No need to modify or disable the interrupt mask here.
+	 * i2c_dw_xfer_msg() will take care of it according to
+	 * the current transmit status.
+	 */
+
+tx_aborted:
+	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+					|| dev->msg_err) {
+		/*
+		 * Check DW_IC_RXFLR register,
+		 * read from the RX FIFO if it's not empty.
+		 */
+		if ((stat & DW_IC_INTR_STOP_DET) &&
+			dw_readl(dev, DW_IC_RXFLR) > 0)
+			i2c_dw_read(dev);
+
+		complete(&dev->cmd_complete);
+	}
+
+	pm_runtime_put_autosuspend(dev->dev);
+	return IRQ_HANDLED;
+}
+
+u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
+{
+	return dw_readl(dev, DW_IC_ENABLE_STATUS);
+}
+
+static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
+{
+	int timeout = 100;
+
+	do {
+		dw_writel(dev, enable, DW_IC_ENABLE);
+		if (i2c_dw_is_enabled(dev) == enable)
+			return;
+
+		usleep_range(25, 250);
+	} while (timeout-- > 0);
+
+	dev_warn(dev->dev, "timeout in %sabling adapter\n",
+		enable ? "en" : "dis");
+}
+
+void i2c_dw_enable(struct dw_i2c_dev *dev)
+{
+       /* Enable the adapter */
+	__i2c_dw_enable(dev, true);
+}
+
+void i2c_dw_disable(struct dw_i2c_dev *dev)
+{
+	/* Disable controller */
+	__i2c_dw_enable(dev, false);
+
+	/* Disable all interupts */
+	dw_writel(dev, 0, DW_IC_INTR_MASK);
+	dw_readl(dev, DW_IC_CLR_INTR);
+}
+
+void i2c_dw_clear_int(struct dw_i2c_dev *dev)
+{
+	dw_readl(dev, DW_IC_CLR_INTR);
+}
+
+void i2c_dw_disable_int(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, 0, DW_IC_INTR_MASK);
+}
+
+u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
+{
+	return dw_readl(dev, DW_IC_COMP_PARAM_1);
+}
diff --git a/drivers/external_drivers/drivers/i2c/busses/i2c-designware-core.h b/drivers/external_drivers/drivers/i2c/busses/i2c-designware-core.h
new file mode 100644
index 0000000..5ff823c
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/i2c-designware-core.h
@@ -0,0 +1,341 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+/*
+ * Registers offset
+ */
+#define DW_IC_CON		0x0
+#define DW_IC_TAR		0x4
+#define DW_IC_DATA_CMD		0x10
+#define DW_IC_SS_SCL_HCNT	0x14
+#define DW_IC_SS_SCL_LCNT	0x18
+#define DW_IC_FS_SCL_HCNT	0x1c
+#define DW_IC_FS_SCL_LCNT	0x20
+#define DW_IC_HS_SCL_HCNT	0x24
+#define DW_IC_HS_SCL_LCNT	0x28
+#define DW_IC_INTR_STAT		0x2c
+#define DW_IC_INTR_MASK		0x30
+#define DW_IC_RAW_INTR_STAT	0x34
+#define DW_IC_RX_TL		0x38
+#define DW_IC_TX_TL		0x3c
+#define DW_IC_CLR_INTR		0x40
+#define DW_IC_CLR_RX_UNDER	0x44
+#define DW_IC_CLR_RX_OVER	0x48
+#define DW_IC_CLR_TX_OVER	0x4c
+#define DW_IC_CLR_RD_REQ	0x50
+#define DW_IC_CLR_TX_ABRT	0x54
+#define DW_IC_CLR_RX_DONE	0x58
+#define DW_IC_CLR_ACTIVITY	0x5c
+#define DW_IC_CLR_STOP_DET	0x60
+#define DW_IC_CLR_START_DET	0x64
+#define DW_IC_CLR_GEN_CALL	0x68
+#define DW_IC_ENABLE		0x6c
+#define DW_IC_STATUS		0x70
+#define DW_IC_TXFLR		0x74
+#define DW_IC_RXFLR		0x78
+#define DW_IC_TX_ABRT_SOURCE	0x80
+#define DW_IC_ENABLE_STATUS	0x9c
+#define DW_IC_COMP_PARAM_1	0xf4
+#define DW_IC_COMP_TYPE		0xfc
+#define DW_IC_COMP_TYPE_VALUE	0x44570140
+
+#define DW_IC_CON_MASTER		0x1
+#define DW_IC_CON_SPEED_STD		0x2
+#define DW_IC_CON_SPEED_FAST		0x4
+#define DW_IC_CON_SPEED_HIGH		0x6
+#define DW_IC_CON_10BITADDR_MASTER	0x10
+#define DW_IC_CON_RESTART_EN		0x20
+#define DW_IC_CON_SLAVE_DISABLE		0x40
+
+#define INTEL_MID_STD_CFG  (DW_IC_CON_MASTER |			\
+				DW_IC_CON_SLAVE_DISABLE |	\
+				DW_IC_CON_RESTART_EN)
+
+#define DW_IC_INTR_RX_UNDER	0x001
+#define DW_IC_INTR_RX_OVER	0x002
+#define DW_IC_INTR_RX_FULL	0x004
+#define DW_IC_INTR_TX_OVER	0x008
+#define DW_IC_INTR_TX_EMPTY	0x010
+#define DW_IC_INTR_RD_REQ	0x020
+#define DW_IC_INTR_TX_ABRT	0x040
+#define DW_IC_INTR_RX_DONE	0x080
+#define DW_IC_INTR_ACTIVITY	0x100
+#define DW_IC_INTR_STOP_DET	0x200
+#define DW_IC_INTR_START_DET	0x400
+#define DW_IC_INTR_GEN_CALL	0x800
+
+#define DW_IC_INTR_DEFAULT_MASK		(DW_IC_INTR_RX_FULL | \
+					 DW_IC_INTR_TX_EMPTY | \
+					 DW_IC_INTR_TX_ABRT | \
+					 DW_IC_INTR_STOP_DET | \
+					 DW_IC_INTR_RX_OVER)
+
+#define DW_IC_STATUS_ACTIVITY	0x1
+
+#define DW_IC_ERR_TX_ABRT	0x1
+
+#define DW_IC_CMD_STOP		0x200
+
+#define DW_IC_SPEED_MASK	0x6
+
+/*
+ * status codes
+ */
+#define STATUS_POWERON			0x0
+#define STATUS_IDLE			STATUS_POWERON
+#define STATUS_WRITE_IN_PROGRESS	0x1
+#define STATUS_READ_IN_PROGRESS		0x2
+
+#define TIMEOUT			20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK	0
+#define ABRT_10ADDR1_NOACK	1
+#define ABRT_10ADDR2_NOACK	2
+#define ABRT_TXDATA_NOACK	3
+#define ABRT_GCALL_NOACK	4
+#define ABRT_GCALL_READ		5
+#define ABRT_SBYTE_ACKDET	7
+#define ABRT_SBYTE_NORSTRT	9
+#define ABRT_10B_RD_NORSTRT	10
+#define ABRT_MASTER_DIS		11
+#define ARB_LOST		12
+
+#define DW_IC_TX_ABRT_7B_ADDR_NOACK	(1UL << ABRT_7B_ADDR_NOACK)
+#define DW_IC_TX_ABRT_10ADDR1_NOACK	(1UL << ABRT_10ADDR1_NOACK)
+#define DW_IC_TX_ABRT_10ADDR2_NOACK	(1UL << ABRT_10ADDR2_NOACK)
+#define DW_IC_TX_ABRT_TXDATA_NOACK	(1UL << ABRT_TXDATA_NOACK)
+#define DW_IC_TX_ABRT_GCALL_NOACK	(1UL << ABRT_GCALL_NOACK)
+#define DW_IC_TX_ABRT_GCALL_READ	(1UL << ABRT_GCALL_READ)
+#define DW_IC_TX_ABRT_SBYTE_ACKDET	(1UL << ABRT_SBYTE_ACKDET)
+#define DW_IC_TX_ABRT_SBYTE_NORSTRT	(1UL << ABRT_SBYTE_NORSTRT)
+#define DW_IC_TX_ABRT_10B_RD_NORSTRT	(1UL << ABRT_10B_RD_NORSTRT)
+#define DW_IC_TX_ABRT_MASTER_DIS	(1UL << ABRT_MASTER_DIS)
+#define DW_IC_TX_ARB_LOST		(1UL << ARB_LOST)
+
+#define DW_IC_TX_ABRT_NOACK		(DW_IC_TX_ABRT_7B_ADDR_NOACK | \
+					 DW_IC_TX_ABRT_10ADDR1_NOACK | \
+					 DW_IC_TX_ABRT_10ADDR2_NOACK | \
+					 DW_IC_TX_ABRT_TXDATA_NOACK | \
+					 DW_IC_TX_ABRT_GCALL_NOACK)
+
+/*
+ * i2c scl hcnt/lcnt setting
+ */
+#define PNW_SS_SCLK_HCNT		0x1EC
+#define PNW_SS_SCLK_LCNT		0x1F3
+#define PNW_FS_SCLK_HCNT		0x66
+#define PNW_FS_SCLK_LCNT		0x8B
+#define PNW_HS_SCLK_HCNT		0x9
+#define PNW_HS_SCLK_LCNT		0x17
+
+#define CLV_SS_SCLK_HCNT		0x1EC
+#define CLV_SS_SCLK_LCNT		0x1F3
+#define CLV_FS_SCLK_HCNT		0x59
+#define CLV_FS_SCLK_LCNT		0x98
+#define CLV_HS_SCLK_HCNT		0x8
+#define CLV_HS_SCLK_LCNT		0x17
+
+/* inofficial configuration
+#define MERR_SS_SCLK_HCNT 0x2c8
+#define MERR_SS_SCLK_LCNT 0x380
+#define MERR_FS_SCLK_HCNT 0x084
+#define MERR_FS_SCLK_LCNT 0x100
+*/
+#define MERR_SS_SCLK_HCNT 0x2f8
+#define MERR_SS_SCLK_LCNT 0x37b
+#define MERR_FS_SCLK_HCNT 0x087
+#define MERR_FS_SCLK_LCNT 0x10a
+#define MERR_HS_SCLK_HCNT 0x8
+#define MERR_HS_SCLK_LCNT 0x20
+
+#define VLV2_SS_SCLK_HCNT 0x214
+#define VLV2_SS_SCLK_LCNT 0x272
+#define VLV2_FS_SCLK_HCNT 0x50
+#define VLV2_FS_SCLK_LCNT 0xad
+#define VLV2_HS_SCLK_HCNT 0x6
+#define VLV2_HS_SCLK_LCNT 0x16
+#define VLV2_FS_P_SCLK_HCNT 0x1b
+#define VLV2_FS_P_SCLK_LCNT 0x3a
+
+#define CHV_SS_SCLK_HCNT 0x28f
+#define CHV_SS_SCLK_LCNT 0x2ba
+#define CHV_FS_SCLK_HCNT 0x71
+#define CHV_FS_SCLK_LCNT 0xce
+#define CHV_FS_P_SCLK_HCNT 0x24
+#define CHV_FS_P_SCLK_LCNT 0x53
+
+#define DW_STD_SPEED	100000
+#define DW_FAST_SPEED	400000
+#define DW_HIGH_SPEED	3400000
+
+struct dw_controller;
+/**
+ * struct dw_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transfered
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ *	array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ *	array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ * @shared_host: if this host is shared by other units on the SoC
+ */
+struct dw_i2c_dev {
+	struct device		*dev;
+	void __iomem		*base;
+	struct completion	cmd_complete;
+	struct semaphore	lock;
+	struct clk		*clk;
+	u32			(*get_clk_rate_khz) (struct dw_i2c_dev *dev);
+	int			(*get_scl_cfg) (struct dw_i2c_dev *dev);
+	void			(*reset)(struct dw_i2c_dev *dev);
+	int			(*abort)(int busnum);
+	struct dw_controller 	*controller;
+	int			enable_stop;
+	int			share_irq;
+	int			shared_host;
+	int			(*acquire_ownership) (void);
+	int			(*release_ownership) (void);
+	int			cmd_err;
+	struct i2c_msg		*msgs;
+	int			msgs_num;
+	int			msg_write_idx;
+	u32			tx_buf_len;
+	u8			*tx_buf;
+	int			msg_read_idx;
+	u32			rx_buf_len;
+	u8			*rx_buf;
+	int			msg_err;
+	unsigned int		status;
+	u32			abort_source;
+	int			irq;
+	int			swab;
+	struct i2c_adapter	adapter;
+	u32			functionality;
+	u32			master_cfg;
+	unsigned int		tx_fifo_depth;
+	unsigned int		rx_fifo_depth;
+	int			use_dyn_clk;	/* use dynamic clk setting */
+	u32			clk_khz;	/* input clock */
+	u32			speed_cfg;
+	u32			lock_flag;
+	u32			freq;
+	u32			fast_plus;
+};
+
+struct dw_controller {
+	u32 bus_num;
+	u32 bus_cfg;
+	u32 tx_fifo_depth;
+	u32 rx_fifo_depth;
+	u32 clk_khz;
+	u32 fast_plus;
+	int enable_stop;
+	int share_irq;
+	char *acpi_name;
+	int (*scl_cfg) (struct dw_i2c_dev *dev);
+	void (*reset)(struct dw_i2c_dev *dev);
+};
+
+enum dw_ctl_id_t {
+	moorestown_0,
+	moorestown_1,
+	moorestown_2,
+
+	medfield_0,
+	medfield_1,
+	medfield_2,
+	medfield_3,
+	medfield_4,
+	medfield_5,
+
+	cloverview_0,
+	cloverview_1,
+	cloverview_2,
+	cloverview_3,
+	cloverview_4,
+	cloverview_5,
+
+	merrifield_0,
+	merrifield_1,
+	merrifield_2,
+	merrifield_3,
+	merrifield_4,
+	merrifield_5,
+	merrifield_6,
+
+	valleyview_0,
+	valleyview_1,
+	valleyview_2,
+	valleyview_3,
+	valleyview_4,
+	valleyview_5,
+	valleyview_6,
+	valleyview_7,
+
+	cherryview_0,
+	cherryview_1,
+	cherryview_2,
+	cherryview_3,
+	cherryview_4,
+	cherryview_5,
+	cherryview_6,
+	cherryview_7,
+};
+
+extern int intel_mid_dw_i2c_abort(int busnum);
+int i2c_dw_init(struct dw_i2c_dev *dev);
+struct dw_i2c_dev *i2c_dw_setup(struct device *pdev, int bus_idx,
+	unsigned long start, unsigned long len, int irq);
+void i2c_dw_free(struct device *pdev, struct dw_i2c_dev *dev);
+int i2c_dw_suspend(struct dw_i2c_dev *dev, bool runtime);
+int i2c_dw_resume(struct dw_i2c_dev *dev, bool runtime);
+extern int intel_mid_dw_i2c_acquire_ownership(void);
+extern int intel_mid_dw_i2c_release_ownership(void);
diff --git a/drivers/external_drivers/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/external_drivers/drivers/i2c/busses/i2c-designware-pcidrv.c
new file mode 100644
index 0000000..74c4606
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -0,0 +1,289 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ * Copyright (C) 2011 Intel corporation.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
+#include "i2c-designware-core.h"
+
+#define DRIVER_NAME "i2c-designware-pci"
+#define DW_I2C_STATIC_BUS_NUM	10
+
+struct dw_probe_info {
+	enum dw_ctl_id_t ctl_id;
+	bool need_func;
+};
+
+#define DW_INFO(_ctl_id, _need_func)			\
+	((kernel_ulong_t)&(struct dw_probe_info) {	\
+		.ctl_id = (_ctl_id),			\
+		.need_func = (_need_func)		\
+	 })
+
+
+static int i2c_dw_pci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "suspend called\n");
+
+	return i2c_dw_suspend(i2c, false);
+}
+
+static int i2c_dw_pci_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+	int err;
+
+	dev_dbg(dev, "runtime suspend called\n");
+	i2c_dw_suspend(i2c, true);
+
+	err = pci_save_state(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_save_state failed\n");
+		return err;
+	}
+
+	err = pci_set_power_state(pdev, PCI_D3hot);
+	if (err) {
+		dev_err(&pdev->dev, "pci_set_power_state failed\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int i2c_dw_pci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "resume called\n");
+
+	return i2c_dw_resume(i2c, false);
+}
+
+static int i2c_dw_pci_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+	int err;
+
+	dev_dbg(dev, "runtime resume called\n");
+	err = pci_set_power_state(pdev, PCI_D0);
+	if (err) {
+		dev_err(&pdev->dev, "pci_set_power_state() failed\n");
+		return err;
+	}
+	pci_restore_state(pdev);
+	i2c_dw_resume(i2c, true);
+
+	return 0;
+}
+
+static const struct dev_pm_ops i2c_dw_pm_ops = {
+	.suspend_late = i2c_dw_pci_suspend,
+	.resume_early = i2c_dw_pci_resume,
+	SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend,
+			   i2c_dw_pci_runtime_resume,
+			   NULL)
+};
+
+static int i2c_dw_pci_probe(struct pci_dev *pdev,
+const struct pci_device_id *id)
+{
+	struct dw_i2c_dev *dev;
+	unsigned long start, len;
+	int r;
+	int bus_idx;
+	struct dw_probe_info *dw_info;
+
+	dw_info = (void *)id->driver_data;
+
+	bus_idx = dw_info->ctl_id;
+
+	if (dw_info->need_func)
+		bus_idx += PCI_FUNC(pdev->devfn);
+
+	r = pci_enable_device(pdev);
+	if (r) {
+		dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
+			r);
+		return r;
+	}
+
+	/* Determine the address of the I2C area */
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!start || len == 0) {
+		dev_err(&pdev->dev, "base address not set\n");
+		return -ENODEV;
+	}
+
+	r = pci_request_region(pdev, 0, DRIVER_NAME);
+	if (r) {
+		dev_err(&pdev->dev, "failed to request I2C region "
+			"0x%lx-0x%lx\n", start,
+			(unsigned long)pci_resource_end(pdev, 0));
+		return r;
+	}
+
+	dev = i2c_dw_setup(&pdev->dev, bus_idx, start, len, pdev->irq);
+	if (IS_ERR(dev)) {
+		pci_release_region(pdev, 0);
+		dev_err(&pdev->dev, "failed to setup i2c\n");
+		return -EINVAL;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
+	if (dev->shared_host)
+		pm_runtime_forbid(&pdev->dev);
+	else
+		pm_runtime_allow(&pdev->dev);
+
+	return 0;
+}
+
+static void i2c_dw_pci_remove(struct pci_dev *pdev)
+{
+	struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
+
+	pm_runtime_forbid(&pdev->dev);
+	i2c_dw_free(&pdev->dev, dev);
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("i2c_designware-pci");
+
+DEFINE_PCI_DEVICE_TABLE(i2c_designware_pci_ids) = {
+	/* Moorestown */
+	{ PCI_VDEVICE(INTEL, 0x0802), DW_INFO(moorestown_0, false) },
+	{ PCI_VDEVICE(INTEL, 0x0803), DW_INFO(moorestown_1, false) },
+	{ PCI_VDEVICE(INTEL, 0x0804), DW_INFO(moorestown_2, false) },
+	/* Medfield */
+	{ PCI_VDEVICE(INTEL, 0x0817), DW_INFO(medfield_0, false) },
+	{ PCI_VDEVICE(INTEL, 0x0818), DW_INFO(medfield_1, false) },
+	{ PCI_VDEVICE(INTEL, 0x0819), DW_INFO(medfield_2, false) },
+	{ PCI_VDEVICE(INTEL, 0x082C), DW_INFO(medfield_3, false) },
+	{ PCI_VDEVICE(INTEL, 0x082D), DW_INFO(medfield_4, false) },
+	{ PCI_VDEVICE(INTEL, 0x082E), DW_INFO(medfield_5, false) },
+	/* Cloverview */
+	{ PCI_VDEVICE(INTEL, 0x08E2), DW_INFO(cloverview_0, false) },
+	{ PCI_VDEVICE(INTEL, 0x08E3), DW_INFO(cloverview_1, false) },
+	{ PCI_VDEVICE(INTEL, 0x08E4), DW_INFO(cloverview_2, false) },
+	{ PCI_VDEVICE(INTEL, 0x08F4), DW_INFO(cloverview_3, false) },
+	{ PCI_VDEVICE(INTEL, 0x08F5), DW_INFO(cloverview_4, false) },
+	{ PCI_VDEVICE(INTEL, 0x08F6), DW_INFO(cloverview_5, false) },
+	/* Merrifield */
+	{ PCI_VDEVICE(INTEL, 0x1195), DW_INFO(merrifield_0, true) },
+	{ PCI_VDEVICE(INTEL, 0x1196), DW_INFO(merrifield_4, true) },
+	/* Valleyview 2 */
+	{ PCI_VDEVICE(INTEL, 0x0F41), DW_INFO(valleyview_1, false) },
+	{ PCI_VDEVICE(INTEL, 0x0F42), DW_INFO(valleyview_2, false) },
+	{ PCI_VDEVICE(INTEL, 0x0F43), DW_INFO(valleyview_3, false) },
+	{ PCI_VDEVICE(INTEL, 0x0F44), DW_INFO(valleyview_4, false) },
+	{ PCI_VDEVICE(INTEL, 0x0F45), DW_INFO(valleyview_5, false) },
+	{ PCI_VDEVICE(INTEL, 0x0F46), DW_INFO(valleyview_6, false) },
+	{ PCI_VDEVICE(INTEL, 0x0F47), DW_INFO(valleyview_7, false) },
+	/* Cherryview */
+	{ PCI_VDEVICE(INTEL, 0x22C1), DW_INFO(cherryview_1, false) },
+	{ PCI_VDEVICE(INTEL, 0x22C2), DW_INFO(cherryview_2, false) },
+	{ PCI_VDEVICE(INTEL, 0x22C3), DW_INFO(cherryview_3, false) },
+	{ PCI_VDEVICE(INTEL, 0x22C4), DW_INFO(cherryview_4, false) },
+	{ PCI_VDEVICE(INTEL, 0x22C5), DW_INFO(cherryview_5, false) },
+	{ PCI_VDEVICE(INTEL, 0x22C6), DW_INFO(cherryview_6, false) },
+	{ PCI_VDEVICE(INTEL, 0x22C7), DW_INFO(cherryview_7, false) },
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, i2c_designware_pci_ids);
+
+static struct pci_driver dw_i2c_driver = {
+	.name		= DRIVER_NAME,
+	.id_table	= i2c_designware_pci_ids,
+	.probe		= i2c_dw_pci_probe,
+	.remove		= i2c_dw_pci_remove,
+	.driver         = {
+		.pm     = &i2c_dw_pm_ops,
+	},
+};
+
+static int __init dw_i2c_init_driver(void)
+{
+	return  pci_register_driver(&dw_i2c_driver);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+	pci_unregister_driver(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+#ifndef MODULE
+static int __init dw_i2c_reserve_static_bus(void)
+{
+	struct i2c_board_info dummy = {
+		I2C_BOARD_INFO("dummy", 0xff),
+	};
+
+	i2c_register_board_info(DW_I2C_STATIC_BUS_NUM, &dummy, 1);
+	return 0;
+}
+subsys_initcall(dw_i2c_reserve_static_bus);
+
+static void dw_i2c_pci_final_quirks(struct pci_dev *pdev)
+{
+	pdev->pm_cap = 0x80;
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0F44,
+				dw_i2c_pci_final_quirks);
+#endif
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/i2c/busses/i2c-pmic-regs.h b/drivers/external_drivers/drivers/i2c/busses/i2c-pmic-regs.h
new file mode 100644
index 0000000..b9ce423
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/i2c-pmic-regs.h
@@ -0,0 +1,80 @@
+/*
+ * i2c-pmic-regs.h - PMIC I2C registers
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+#ifndef __I2C_PMIC_REGS_H__
+#define __I2C_PMIC_REGS_H__
+
+#include <linux/mutex.h>
+#include <linux/wakelock.h>
+
+/*********************************************************************
+ *		Generic defines
+ *********************************************************************/
+
+#define D7 (1 << 7)
+#define D6 (1 << 6)
+#define D5 (1 << 5)
+#define D4 (1 << 4)
+#define D3 (1 << 3)
+#define D2 (1 << 2)
+#define D1 (1 << 1)
+#define D0 (1 << 0)
+
+#define PMIC_SRAM_INTR_ADDR 0xFFFFF616
+
+#define I2C_MSG_LEN		4
+
+#define I2COVRCTRL_ADDR		0x58
+#define I2COVRDADDR_ADDR	0x59
+#define I2COVROFFSET_ADDR	0x5A
+#define I2COVRWRDATA_ADDR	0x5B
+#define I2COVRRDDATA_ADDR	0x5C
+
+#define IRQLVL1_ADDR			0x01
+#define IRQLVL1_MASK_ADDR		0x0c
+#define IRQLVL1_CHRGR_MASK		D5
+
+#define MCHGRIRQ1_ADDR			0x13
+#define MCHGRIRQ0_ADDR			0x12
+
+#define PMIC_I2C_INTR_MASK ((u8)(D3|D2|D1))
+#define I2COVRCTRL_I2C_RD D1
+#define I2COVRCTRL_I2C_WR D0
+#define CHGRIRQ0_ADDR			0x07
+
+#define IRQ0_I2C_BIT_POS 1
+
+struct pmic_i2c_dev {
+	int irq;
+	u32 pmic_intr_sram_addr;
+	struct i2c_adapter adapter;
+	int i2c_rw;
+	wait_queue_head_t i2c_wait;
+	struct mutex i2c_pmic_rw_lock;
+	void __iomem *pmic_intr_map;
+	struct wake_lock i2c_wake_lock;
+	struct device *dev;
+};
+
+#endif
diff --git a/drivers/external_drivers/drivers/i2c/busses/i2c-pmic.c b/drivers/external_drivers/drivers/i2c/busses/i2c-pmic.c
new file mode 100644
index 0000000..b93bdb0
--- /dev/null
+++ b/drivers/external_drivers/drivers/i2c/busses/i2c-pmic.c
@@ -0,0 +1,456 @@
+/*
+ * i2c-pmic.c: PMIC I2C adapter driver.
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "i2c-pmic-regs.h"
+
+#define DRIVER_NAME "i2c_pmic_adap"
+#define PMIC_I2C_ADAPTER 8
+
+enum I2C_STATUS {
+	I2C_WR = 1,
+	I2C_RD,
+	I2C_NACK = 4
+};
+
+static struct pmic_i2c_dev *pmic_dev;
+
+/* Function Definitions */
+
+/* PMIC I2C read-write completion interrupt handler */
+static irqreturn_t pmic_i2c_handler(int irq, void *data)
+{
+	u8 irq0_int;
+
+	irq0_int = ioread8(pmic_dev->pmic_intr_map);
+	irq0_int &= PMIC_I2C_INTR_MASK;
+
+	if (irq0_int) {
+		pmic_dev->i2c_rw = (irq0_int >> IRQ0_I2C_BIT_POS);
+		return IRQ_WAKE_THREAD;
+	}
+
+	return IRQ_NONE;
+}
+
+
+static irqreturn_t pmic_thread_handler(int id, void *data)
+{
+#define IRQLVL1_MASK_ADDR		0x0c
+#define IRQLVL1_CHRGR_MASK		D5
+
+	dev_dbg(pmic_dev->dev, "Clearing IRQLVL1_MASK_ADDR\n");
+
+	intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	wake_up(&(pmic_dev->i2c_wait));
+	return IRQ_HANDLED;
+}
+
+/* PMIC i2c read msg */
+static inline int pmic_i2c_read_xfer(struct i2c_msg msg)
+{
+	int ret;
+	u16 i;
+	u8 mask = (I2C_RD | I2C_NACK);
+	u16 regs[I2C_MSG_LEN] = {0};
+	u8 data[I2C_MSG_LEN] = {0};
+
+	for (i = 0; i < msg.len ; i++) {
+		pmic_dev->i2c_rw = 0;
+		regs[0] = I2COVRDADDR_ADDR;
+		data[0] = msg.addr;
+		regs[1] = I2COVROFFSET_ADDR;
+		data[1] = msg.buf[0] + i;
+		/* intel_scu_ipc_function works fine for even number of bytes */
+		/* Hence adding a dummy byte transfer */
+		regs[2] = I2COVROFFSET_ADDR;
+		data[2] = msg.buf[0] + i;
+		regs[3] = I2COVRCTRL_ADDR;
+		data[3] = I2COVRCTRL_I2C_RD;
+		ret = intel_scu_ipc_writev(regs, data, I2C_MSG_LEN);
+		if (unlikely(ret))
+			return ret;
+
+		ret = wait_event_timeout(pmic_dev->i2c_wait,
+				(pmic_dev->i2c_rw & mask),
+				HZ);
+
+		if (ret == 0) {
+			ret = -ETIMEDOUT;
+			goto read_err_exit;
+		} else if (pmic_dev->i2c_rw == I2C_NACK) {
+			ret = -EIO;
+			goto read_err_exit;
+		} else {
+			ret = intel_scu_ipc_ioread8(I2COVRRDDATA_ADDR,
+					&(msg.buf[i]));
+			if (unlikely(ret)) {
+				ret = -EIO;
+				goto read_err_exit;
+			}
+		}
+	}
+	return 0;
+
+read_err_exit:
+	return ret;
+}
+
+/* PMIC i2c write msg */
+static inline int pmic_i2c_write_xfer(struct i2c_msg msg)
+{
+	int ret;
+	u16 i;
+	u8 mask = (I2C_WR | I2C_NACK);
+	u16 regs[I2C_MSG_LEN] = {0};
+	u8 data[I2C_MSG_LEN] = {0};
+
+	for (i = 1; i <= msg.len ; i++) {
+		pmic_dev->i2c_rw = 0;
+		regs[0] = I2COVRDADDR_ADDR;
+		data[0] = msg.addr;
+		regs[1] = I2COVRWRDATA_ADDR;
+		data[1] = msg.buf[i];
+		regs[2] = I2COVROFFSET_ADDR;
+		data[2] = msg.buf[0] + i - 1;
+		regs[3] = I2COVRCTRL_ADDR;
+		data[3] = I2COVRCTRL_I2C_WR;
+		ret = intel_scu_ipc_writev(regs, data, I2C_MSG_LEN);
+		if (unlikely(ret))
+			return ret;
+
+		ret = wait_event_timeout(pmic_dev->i2c_wait,
+				(pmic_dev->i2c_rw & mask),
+				HZ);
+		if (ret == 0)
+			return -ETIMEDOUT;
+		else if (pmic_dev->i2c_rw == I2C_NACK)
+			return -EIO;
+	}
+	return 0;
+}
+
+static int (*xfer_fn[]) (struct i2c_msg) = {
+	pmic_i2c_write_xfer,
+	pmic_i2c_read_xfer
+};
+
+/* PMIC I2C Master transfer algorithm function */
+static int pmic_master_xfer(struct i2c_adapter *adap,
+				struct i2c_msg msgs[],
+				int num)
+{
+	int ret = 0;
+	int i;
+	u8 index;
+
+	mutex_lock(&pmic_dev->i2c_pmic_rw_lock);
+	wake_lock(&pmic_dev->i2c_wake_lock);
+	pm_runtime_get_sync(pmic_dev->dev);
+	for (i = 0 ; i < num ; i++) {
+		index = msgs[i].flags & I2C_M_RD;
+		ret = (xfer_fn[index])(msgs[i]);
+
+		if (ret == -EACCES)
+			dev_info(pmic_dev->dev, "Blocked Access!\n");
+
+		/* If access is restricted, return true to
+		*  avoid extra error handling in client
+		*/
+
+		if (ret != 0 && ret != -EACCES)
+			goto transfer_err_exit;
+	}
+
+	ret = num;
+
+transfer_err_exit:
+	mutex_unlock(&pmic_dev->i2c_pmic_rw_lock);
+	pm_runtime_put_sync(pmic_dev->dev);
+	wake_unlock(&pmic_dev->i2c_wake_lock);
+	intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	return ret;
+}
+
+/* PMIC I2C adapter capability function */
+static u32 pmic_master_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int pmic_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+				unsigned short flags, char read_write,
+				u8 command, int size,
+				union i2c_smbus_data *data)
+{
+	struct i2c_msg msg;
+	u8 buf[2];
+	int ret;
+
+	msg.addr = addr;
+	msg.flags = flags & I2C_M_TEN;
+	msg.buf = buf;
+	msg.buf[0] = command;
+	if (read_write == I2C_SMBUS_WRITE) {
+		msg.len = 1;
+		msg.buf[1] = data->byte;
+	} else {
+		msg.flags |= I2C_M_RD;
+		msg.len = 1;
+	}
+
+	ret = pmic_master_xfer(adap, &msg, 1);
+	if (ret == 1) {
+		if (read_write == I2C_SMBUS_READ)
+			data->byte = msg.buf[0];
+		return 0;
+	}
+	return ret;
+}
+
+
+static const struct i2c_algorithm pmic_i2c_algo = {
+	.master_xfer = pmic_master_xfer,
+	.functionality = pmic_master_func,
+	.smbus_xfer = pmic_smbus_xfer,
+};
+
+static int pmic_i2c_probe(struct platform_device *pdev)
+{
+	struct i2c_adapter *adap;
+	int ret;
+
+	pmic_dev = kzalloc(sizeof(struct pmic_i2c_dev), GFP_KERNEL);
+	if (!pmic_dev)
+		return -ENOMEM;
+
+	pmic_dev->dev = &pdev->dev;
+	pmic_dev->irq = platform_get_irq(pdev, 0);
+
+
+
+	mutex_init(&pmic_dev->i2c_pmic_rw_lock);
+	wake_lock_init(&pmic_dev->i2c_wake_lock, WAKE_LOCK_SUSPEND,
+			"pmic_i2c_wake_lock");
+	init_waitqueue_head(&(pmic_dev->i2c_wait));
+
+	pmic_dev->pmic_intr_map = ioremap_nocache(PMIC_SRAM_INTR_ADDR, 8);
+	if (!pmic_dev->pmic_intr_map) {
+		dev_err(&pdev->dev, "ioremap Failed\n");
+		ret = -ENOMEM;
+		goto ioremap_failed;
+	}
+	ret = request_threaded_irq(pmic_dev->irq, pmic_i2c_handler,
+					pmic_thread_handler,
+					IRQF_SHARED|IRQF_NO_SUSPEND,
+					DRIVER_NAME, pmic_dev);
+	if (ret)
+		goto err_irq_request;
+
+	ret = intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	if (unlikely(ret))
+		goto unmask_irq_failed;
+	ret = intel_scu_ipc_update_register(MCHGRIRQ0_ADDR, 0x00,
+			PMIC_I2C_INTR_MASK);
+	if (unlikely(ret))
+		goto unmask_irq_failed;
+
+	/* Init runtime PM state*/
+	pm_runtime_put_noidle(pmic_dev->dev);
+
+	adap = &pmic_dev->adapter;
+	adap->owner = THIS_MODULE;
+	adap->class = I2C_CLASS_HWMON;
+	adap->algo = &pmic_i2c_algo;
+	strcpy(adap->name, "PMIC I2C Adapter");
+	adap->nr = PMIC_I2C_ADAPTER;
+	ret = i2c_add_numbered_adapter(adap);
+
+	if (ret) {
+		dev_err(&pdev->dev, "Error adding the adapter\n");
+		goto err_adap_add;
+	}
+
+	pm_schedule_suspend(pmic_dev->dev, MSEC_PER_SEC);
+	return 0;
+
+err_adap_add:
+	free_irq(pmic_dev->irq, pmic_dev);
+unmask_irq_failed:
+err_irq_request:
+	iounmap(pmic_dev->pmic_intr_map);
+ioremap_failed:
+	kfree(pmic_dev);
+	return ret;
+}
+
+static int pmic_i2c_remove(struct platform_device *pdev)
+{
+	iounmap(pmic_dev->pmic_intr_map);
+	free_irq(pmic_dev->irq, pmic_dev);
+	pm_runtime_get_noresume(pmic_dev->dev);
+	kfree(pmic_dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_i2c_suspend(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int pmic_i2c_resume(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pmic_i2c_runtime_suspend(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int pmic_i2c_runtime_resume(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int pmic_i2c_runtime_idle(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops pmic_i2c_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pmic_i2c_suspend,
+				pmic_i2c_resume)
+	SET_RUNTIME_PM_OPS(pmic_i2c_runtime_suspend,
+				pmic_i2c_runtime_resume,
+				pmic_i2c_runtime_idle)
+};
+
+struct platform_driver pmic_i2c_driver = {
+	.probe = pmic_i2c_probe,
+	.remove = pmic_i2c_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &pmic_i2c_pm_ops,
+	},
+};
+
+static int pmic_i2c_init(void)
+{
+	return platform_driver_register(&pmic_i2c_driver);
+}
+
+static void pmic_i2c_exit(void)
+{
+	platform_driver_unregister(&pmic_i2c_driver);
+}
+
+static int pmic_i2c_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic_i2c rpmsg device\n");
+
+	ret = pmic_i2c_init();
+
+out:
+	return ret;
+}
+
+static void pmic_i2c_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	pmic_i2c_exit();
+	dev_info(&rpdev->dev, "Removed pmic_i2c rpmsg device\n");
+}
+
+static void pmic_i2c_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_i2c_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_i2c_pmic_adap" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_i2c_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_i2c_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_i2c_rpmsg_id_table,
+	.probe		= pmic_i2c_rpmsg_probe,
+	.callback	= pmic_i2c_rpmsg_cb,
+	.remove		= pmic_i2c_rpmsg_remove,
+};
+
+static int __init pmic_i2c_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_i2c_rpmsg);
+}
+
+static void __exit pmic_i2c_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_i2c_rpmsg);
+}
+module_init(pmic_i2c_rpmsg_init);
+module_exit(pmic_i2c_rpmsg_exit);
+
+MODULE_AUTHOR("Yegnesh Iyer <yegnesh.s.iyer@intel.com");
+MODULE_DESCRIPTION("PMIC I2C Master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/input/Kconfig b/drivers/external_drivers/drivers/input/Kconfig
new file mode 100644
index 0000000..221b63f
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/Kconfig
@@ -0,0 +1 @@
+source "drivers/external_drivers/drivers/input/touchscreen/Kconfig"
diff --git a/drivers/external_drivers/drivers/input/Makefile b/drivers/external_drivers/drivers/input/Makefile
new file mode 100644
index 0000000..a29c6c0
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
diff --git a/drivers/external_drivers/drivers/input/touchscreen/Kconfig b/drivers/external_drivers/drivers/input/touchscreen/Kconfig
new file mode 100644
index 0000000..26f8816
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/Kconfig
@@ -0,0 +1,9 @@
+config TOUCHSCREEN_R69001_I2C
+	tristate "JDI R69001 touchscreen"
+	depends on I2C
+	default n
+	help
+		Say Y here if you have a R69001 touchscreen
+		If unsure, say N.
+
+source "drivers/external_drivers/drivers/input/touchscreen/rmi4/Kconfig"
diff --git a/drivers/external_drivers/drivers/input/touchscreen/Makefile b/drivers/external_drivers/drivers/input/touchscreen/Makefile
new file mode 100644
index 0000000..1c24ef2
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TOUCHSCREEN_R69001_I2C)	+= r69001-ts-i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4_FORK) += rmi4/
diff --git a/drivers/external_drivers/drivers/input/touchscreen/r69001-ts-i2c.c b/drivers/external_drivers/drivers/input/touchscreen/r69001-ts-i2c.c
new file mode 100644
index 0000000..b1bff7f
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/r69001-ts-i2c.c
@@ -0,0 +1,636 @@
+/*
+ * R69001 Touchscreen Controller Driver
+ * Source file
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input-polldev.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/miscdevice.h>
+#include <linux/suspend.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+#define MODE_COUNT                  4
+#define MODE_STRING_MAX_LEN         30
+#define MODE_FILE_NAME              "mode"
+#endif
+
+#define CONFIG_R69001_POLLING_TIME 10
+#include <linux/r69001-ts.h>
+
+#define R69001_TS_NAME              "r69001-ts-i2c"
+
+/* Coordinates data register address */
+#define REG_COORDINATES_DATA        0x00    /* High */
+#define REG_INFO1                   0x00    /* Low */
+#define REG_INFO2                   0x01
+#define REG_DATA0                   0x02
+#define REG_DATA1                   0x0b
+#define REG_DATA2                   0x14
+#define REG_DATA3                   0x1d
+#define REG_DATA4                   0x26
+
+/* One set coordinates data size */
+#define ONE_SET_COORD_DATA_SIZE     9
+
+/* Boot Mode */
+#define BOOT_MODE_BOOT_ROM          0x80
+
+/* Commands */
+#define COMMAND_BOOT                0x10
+#define COMMAND_FIRMWARE_UPDATE     0x20
+
+/* Control register address */
+#define REG_CONTROL                 0x1c    /* High */
+#define REG_SCAN_MODE               0x00    /* Low */
+#define REG_SCAN_CYCLE              0x01
+#define REG_INT_POLL_CTRL           0x02
+#define REG_INT_SIGNAL_OUTPUT_CTRL  0x03
+#define REG_WRITE_DATA_CTRL         0x04
+#define REG_READY_DATA              0x05
+#define REG_SCAN_COUNTER            0x06
+#define REG_FUNC_CTRL               0x0b
+#define REG_LOW_POWER               0x17
+
+/* Ready data */
+#define READY_COORDINATES           0x01
+#define READY_RAW                   0x02
+#define READY_BASELINE              0x04
+#define READY_DIFF                  0x08
+#define READY_LABElMAP              0x10
+#define READY_CALIBRATION           0x20
+#define READY_GESTURE               0x40
+
+/* Scan Mode */
+#define SCAN_MODE_STOP              R69001_SCAN_MODE_STOP
+#define SCAN_MODE_LOW_POWER         R69001_SCAN_MODE_LOW_POWER
+#define SCAN_MODE_FULL_SCAN         R69001_SCAN_MODE_FULL_SCAN
+#define SCAN_MODE_CALIBRATION       R69001_SCAN_MODE_CALIBRATION
+
+/* Interrupt/Polling mode */
+#define INTERRUPT_MODE              R69001_TS_INTERRUPT_MODE
+#define POLLING_MODE                R69001_TS_POLLING_MODE
+#define POLLING_LOW_EDGE_MODE       R69001_TS_POLLING_LOW_EDGE_MODE
+#define CALIBRATION_INTERRUPT_MODE  R69001_TS_CALIBRATION_INTERRUPT_MODE
+#define UNKNOW_MODE                 255
+
+#define DEFAULT_INTERRUPT_MASK      0x0e
+#define CALIBRATION_INTERRUPT_MASK  0x08
+#define TOUCH_ID_MIN                1
+#define TOUCH_ID_INVALID            0xff
+
+struct r69001_ts_finger {
+	u16 x;
+	u16 y;
+	u8 z;
+	u8 t;
+};
+
+struct r69001_ts_before_regs {
+	u8 int_signal_output_ctrl;
+	u8 scan_cycle;
+};
+
+struct r69001_ts_data {
+	struct i2c_client *client;
+	struct input_dev *input_dev;
+	struct r69001_ts_finger finger[MAX_FINGERS];
+	struct r69001_io_data data;
+	struct r69001_ts_before_regs regs;
+	struct r69001_platform_data *pdata;
+	unsigned int finger_mask;
+	u8 mode;
+	u8 t_num;
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *r69001_ts_dbgfs_root;
+static const char * const r69001_ts_modes[] = { "interrupt", "polling",
+		"polling low edge", "calibration interrupt" };
+#endif
+
+static void r69001_set_mode(struct r69001_ts_data *ts, u8 mode, u16 poll_time);
+
+static int r69001_ts_read_data(struct r69001_ts_data *ts,
+				u8 addr_h, u8 addr_l, u16 size, u8 *data)
+{
+	struct i2c_client *client = ts->client;
+	struct i2c_msg msg[2];
+	int error;
+	u8 buf[2];
+
+	buf[0] = addr_h;
+	buf[1] = addr_l;
+
+	/* Set data point */
+	msg[0].addr = client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 2;
+	msg[0].buf = buf;
+
+	/* Byte read */
+	msg[1].addr = client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = size;
+	msg[1].buf = data;
+
+	error = i2c_transfer(client->adapter, msg, 1);
+	if (error > 0)
+		error = i2c_transfer(client->adapter, msg + 1, 1);
+	if (error < 0)
+		dev_err(&client->dev,
+			"I2C read error high: 0x%02x low:0x%02x size:%d ret:%d\n",
+			addr_h, addr_l, size, error);
+
+	return error;
+}
+
+static int
+r69001_ts_write_data(struct r69001_ts_data *ts, u8 addr_h, u8 addr_l, u8 data)
+{
+	struct i2c_client *client = ts->client;
+	struct i2c_msg msg;
+	int error;
+	u8 buf[3];
+
+	buf[0] = addr_h;
+	buf[1] = addr_l;
+	buf[2] = data;
+
+	/* Byte write */
+	msg.addr = client->addr;
+	msg.flags = 0;
+	msg.len = sizeof(buf);
+	msg.buf = buf;
+
+	error = i2c_transfer(client->adapter, &msg, 1);
+	if (error < 0)
+		dev_err(&client->dev,
+			"I2C write error high: 0x%02x low:0x%02x data:0x%02x ret:%d\n",
+			addr_h, addr_l, data, error);
+	return error;
+}
+
+static void
+r69001_ts_report_coordinates_data(struct r69001_ts_data *ts, int filter)
+{
+	struct r69001_ts_finger *finger = ts->finger;
+	struct input_dev *input_dev = ts->input_dev;
+	unsigned int mask = 0;
+	u8 i;
+
+	for (i = 0; i < ts->t_num; i++) {
+		if (finger[i].t < TOUCH_ID_MIN || finger[i].t == filter)
+			continue;
+		finger[i].t -= TOUCH_ID_MIN;
+		input_mt_slot(input_dev, finger[i].t);
+		input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, true);
+		input_report_abs(input_dev, ABS_MT_POSITION_X, finger[i].x);
+		input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[i].y);
+		input_report_abs(input_dev, ABS_MT_PRESSURE, finger[i].z);
+		mask |= (1 << finger[i].t);
+	}
+
+	/* Get the removed fingers */
+	ts->finger_mask &= ~mask;
+
+	/* Release the removed fingers */
+	for (i = 0; ts->finger_mask != 0; i++) {
+		if (ts->finger_mask & 0x01) {
+			input_mt_slot(input_dev, i);
+			input_mt_report_slot_state(input_dev,
+					MT_TOOL_FINGER, false);
+		}
+		ts->finger_mask >>= 1;
+	}
+
+	/* SYN_REPORT */
+	input_sync(input_dev);
+
+	ts->finger_mask = mask;
+	ts->t_num = 0;
+}
+
+static int r69001_ts_read_coordinates_data(struct r69001_ts_data *ts)
+{
+	struct r69001_ts_finger *finger = ts->finger;
+	u8 i;
+	u8 numt = 0;
+	u8 data[ONE_SET_COORD_DATA_SIZE] = { 0 };
+	u8 lowreg[5] = {REG_DATA0, REG_DATA1, REG_DATA2, REG_DATA3, REG_DATA4};
+	int error;
+	bool inval_id = false;
+
+	error = r69001_ts_read_data(ts,
+			REG_COORDINATES_DATA, REG_INFO1, 1, &numt);
+	if (error < 0)
+		return error;
+
+	numt &= 0x0f;
+	if (numt > MAX_FINGERS)
+		numt = MAX_FINGERS;
+
+	for (i = 0; i < numt; i++) {
+		if (i % 2) {
+			finger[i].x =
+				((u16)(data[7] & 0x0f) << 8) | (u16)(data[5]);
+			finger[i].y =
+				((u16)(data[7] & 0xf0) << 4) | (u16)(data[6]);
+			finger[i].z = data[8];
+			finger[i].t = (data[0] & 0xf0) >> 4;
+			if (finger[i].t < TOUCH_ID_MIN)
+				inval_id = true;
+
+		} else {
+			error = r69001_ts_read_data(ts,
+					REG_COORDINATES_DATA, lowreg[i / 2],
+					ONE_SET_COORD_DATA_SIZE, data);
+			if (error < 0)
+				return error;
+			finger[i].x =
+				((u16)(data[3] & 0x0f) << 8) | (u16)(data[1]);
+			finger[i].y =
+				((u16)(data[3] & 0xf0) << 4) | (u16)(data[2]);
+			finger[i].z = data[4];
+			finger[i].t = data[0] & 0x0f;
+			if (finger[i].t < TOUCH_ID_MIN)
+				inval_id = true;
+		}
+	}
+
+	/* Only update the number when there is no error happened */
+	ts->t_num = numt;
+	return inval_id ? TOUCH_ID_INVALID : 0;
+}
+
+static irqreturn_t r69001_ts_irq_handler(int irq, void *dev_id)
+{
+	struct r69001_ts_data *ts = dev_id;
+	struct i2c_client *client = ts->client;
+	u8 mode = 0;
+	int err = 0;
+	int filter = 0;
+
+	r69001_ts_read_data(ts, REG_CONTROL, REG_SCAN_MODE, 1, &mode);
+
+	if (ts->data.mode.mode == INTERRUPT_MODE) {
+		if (mode == SCAN_MODE_STOP) {
+			/* if receive a touchscreen interrupt, but the scan
+			 * mode is stop and we are in interrupt mode, that means
+			 * touch panel just power on, so re-init it
+			 */
+			ts->data.mode.mode = UNKNOW_MODE;
+			r69001_ts_write_data(ts, REG_CONTROL,
+						REG_SCAN_CYCLE, SCAN_TIME);
+			r69001_set_mode(ts, ts->mode, POLL_INTERVAL);
+		}
+
+		err = r69001_ts_read_coordinates_data(ts);
+		if (err < 0) {
+			dev_err(&client->dev,
+					"%s: Read coordinate data failed\n",
+					__func__);
+
+			return IRQ_HANDLED;
+		}
+		if (err == TOUCH_ID_INVALID)
+			filter = 1;
+		r69001_ts_report_coordinates_data(ts, filter);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Set Int Ctl
+ * mode : 0 = INT Mode, 1 = POLL Mode, 2 = POLL + INT Mode
+ * poll_time : Polling interval (msec, 1 - 1000)
+ *
+ * The msleep(100) in this driver comes directly from Vendor's
+ * driver, and can't find any explanation in the datasheet, so
+ * just keep it now.
+ */
+static void r69001_set_mode(struct r69001_ts_data *ts, u8 mode, u16 poll_time)
+{
+	struct i2c_client *client = ts->client;
+
+	if (ts->data.mode.mode == mode)
+		return;
+
+	switch (mode) {
+	case INTERRUPT_MODE:
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_INT_POLL_CTRL, INTERRUPT_MODE);
+
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_SCAN_MODE, SCAN_MODE_STOP);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_INT_SIGNAL_OUTPUT_CTRL,
+				DEFAULT_INTERRUPT_MASK);
+		msleep(100);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_SCAN_MODE, SCAN_MODE_FULL_SCAN);
+		ts->data.mode.mode = mode;
+		break;
+	case POLLING_MODE:
+	case POLLING_LOW_EDGE_MODE:
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_INT_POLL_CTRL, POLLING_MODE);
+		if (mode == POLLING_LOW_EDGE_MODE)
+			r69001_ts_write_data(ts, REG_CONTROL,
+				REG_INT_SIGNAL_OUTPUT_CTRL, 0x01);
+		else
+			r69001_ts_write_data(ts, REG_CONTROL,
+				REG_INT_SIGNAL_OUTPUT_CTRL, 0x00);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_SCAN_MODE, SCAN_MODE_STOP);
+		msleep(100);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_SCAN_MODE, SCAN_MODE_FULL_SCAN);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_WRITE_DATA_CTRL, 0x01);
+		if (poll_time && poll_time <= POLL_INTERVAL_MAX)
+			ts->data.mode.poll_time = poll_time;
+		else
+			ts->data.mode.poll_time = POLL_INTERVAL;
+		ts->data.mode.mode = mode;
+		break;
+	case CALIBRATION_INTERRUPT_MODE:
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_SCAN_MODE, SCAN_MODE_STOP);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_INT_SIGNAL_OUTPUT_CTRL,
+				CALIBRATION_INTERRUPT_MASK);
+		msleep(100);
+		r69001_ts_write_data(ts, REG_CONTROL,
+				REG_SCAN_MODE, SCAN_MODE_CALIBRATION);
+		ts->data.mode.mode = mode;
+		break;
+	default:
+		dev_err(&client->dev, "Set Int Ctl bad parameter = %d\n", mode);
+		break;
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int r69001_ts_dbgfs_show(struct seq_file *seq, void *unused)
+{
+	struct r69001_ts_data *ts;
+
+	ts = (struct r69001_ts_data *)seq->private;
+	if (!ts)
+		return -EFAULT;
+
+	if (ts->data.mode.mode >= MODE_COUNT)
+		return -EFAULT;
+
+	seq_printf(seq, "%s\n", r69001_ts_modes[ts->data.mode.mode]);
+
+	return 0;
+}
+
+static int r69001_ts_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, r69001_ts_dbgfs_show, inode->i_private);
+}
+
+static ssize_t r69001_ts_dbgfs_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	int i;
+	struct seq_file *seq;
+	struct r69001_ts_data *ts;
+	char buf[MODE_STRING_MAX_LEN] = {0};
+
+	if (count > sizeof(buf) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, user_buf, count - 1))
+		return -EFAULT;
+
+	seq = (struct seq_file *)file->private_data;
+	if (!seq) {
+		pr_warn("r69001-touchscreen: Failed to get seq_file\n");
+		return -EFAULT;
+	}
+
+	ts = (struct r69001_ts_data *)seq->private;
+	if (!ts) {
+		pr_warn("r69001-touchscreen: Failed to get private data\n");
+		return -EFAULT;
+	}
+
+	for (i = 0; i < MODE_COUNT; i++) {
+		if (!strncmp(buf, r69001_ts_modes[i], MODE_STRING_MAX_LEN)) {
+			r69001_set_mode(ts, i, POLL_INTERVAL);
+			break;
+		}
+	}
+
+	if (i == MODE_COUNT) {
+		pr_warn("r69001-touchscreen: Invalid mode: %s\n", buf);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations r69001_ts_dbgfs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= r69001_ts_dbgfs_open,
+	.read		= seq_read,
+	.write		= r69001_ts_dbgfs_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int r69001_ts_create_dbgfs(struct r69001_ts_data *ts)
+{
+	struct dentry *entry;
+
+	r69001_ts_dbgfs_root = debugfs_create_dir(R69001_TS_NAME, NULL);
+	if (!r69001_ts_dbgfs_root) {
+		dev_warn(&ts->client->dev, "debugfs_create_dir failed\n");
+		return -ENOMEM;
+	}
+
+	entry = debugfs_create_file(MODE_FILE_NAME, S_IRUSR,
+			r69001_ts_dbgfs_root,
+			(void *)ts, &r69001_ts_dbgfs_fops);
+	if (!entry) {
+		debugfs_remove_recursive(r69001_ts_dbgfs_root);
+		r69001_ts_dbgfs_root = NULL;
+		dev_warn(&ts->client->dev, "%s debugfs entry creation failed\n",
+				MODE_FILE_NAME);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void r69001_ts_remove_dbgfs(void)
+{
+	debugfs_remove_recursive(r69001_ts_dbgfs_root);
+}
+#endif
+
+static int
+r69001_ts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	struct r69001_ts_data *ts;
+	struct input_dev *input_dev;
+	struct r69001_platform_data *pdata = client->dev.platform_data;
+	int error;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "Not I2C_FUNC_I2C\n");
+		return -EIO;
+	}
+
+	ts = kzalloc(sizeof(struct r69001_ts_data), GFP_KERNEL);
+	if (!ts) {
+		dev_err(&client->dev, "Out of memory\n");
+		return -ENOMEM;
+	}
+	if (!pdata) {
+		dev_err(&client->dev, "No touch platform data\n");
+		error = -EINVAL;
+		goto err1;
+	}
+	ts->client = client;
+	ts->pdata = pdata;
+
+	if (!client->irq) /* not fast irq but a gpio one */
+		client->irq = gpio_to_irq(pdata->gpio);
+
+	input_dev = input_allocate_device();
+	if (!input_dev) {
+		dev_err(&client->dev, "Unable to allocated input device\n");
+		error =  -ENOMEM;
+		goto err2;
+	}
+
+	ts->input_dev = input_dev;
+
+	input_dev->name = "r69001-touchscreen";
+	input_dev->id.bustype = BUS_I2C;
+	input_dev->dev.parent = &client->dev;
+
+	__set_bit(EV_SYN, input_dev->evbit);
+	__set_bit(EV_ABS, input_dev->evbit);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,6,0))
+	input_mt_init_slots(input_dev, MAX_FINGERS, 0);
+#else
+	input_mt_init_slots(input_dev, MAX_FINGERS);
+#endif
+	input_set_abs_params(input_dev, ABS_MT_POSITION_X, MIN_X, MAX_X, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_POSITION_Y, MIN_Y, MAX_Y, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_PRESSURE, MIN_Z, MAX_Z, 0, 0);
+
+	error = input_register_device(ts->input_dev);
+	if (error) {
+		dev_err(&client->dev, "Failed to register %s input device\n",
+							input_dev->name);
+		goto err3;
+	}
+
+	i2c_set_clientdata(client, ts);
+
+	ts->mode = INTERRUPT_MODE;
+
+	error = request_threaded_irq(client->irq, NULL, r69001_ts_irq_handler,
+			pdata->irq_type, client->name, ts);
+	if (error) {
+		dev_err(&client->dev, "Failed to register interrupt\n");
+		goto err4;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	error = r69001_ts_create_dbgfs(ts);
+	if (error)
+		dev_warn(&client->dev, "Failed to create debugfs\n");
+#endif
+
+	return 0;
+
+err4:
+	input_unregister_device(ts->input_dev);
+err3:
+	input_free_device(ts->input_dev);
+err2:
+err1:
+	kfree(ts);
+	return error;
+}
+
+static int r69001_ts_remove(struct i2c_client *client)
+{
+	struct r69001_ts_data *ts = i2c_get_clientdata(client);
+
+#ifdef CONFIG_DEBUG_FS
+	r69001_ts_remove_dbgfs();
+#endif
+
+	input_unregister_device(ts->input_dev);
+	if (client->irq)
+		free_irq(client->irq, ts);
+	kfree(ts);
+	return 0;
+}
+
+static const struct i2c_device_id r69001_ts_id[] = {
+	{ R69001_TS_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, r69001_ts_id);
+
+static struct i2c_driver r69001_ts_driver = {
+	.probe = r69001_ts_probe,
+	.remove = r69001_ts_remove,
+	.id_table = r69001_ts_id,
+	.driver = {
+		.name = R69001_TS_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init r69001_ts_init(void)
+{
+	return i2c_add_driver(&r69001_ts_driver);
+}
+
+static void __exit r69001_ts_exit(void)
+{
+	i2c_del_driver(&r69001_ts_driver);
+}
+
+module_init(r69001_ts_init);
+module_exit(r69001_ts_exit);
+
+MODULE_DESCRIPTION("Renesas SP Driver R69001 Touchscreen Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/input/touchscreen/rmi4/Kconfig b/drivers/external_drivers/drivers/input/touchscreen/rmi4/Kconfig
new file mode 100644
index 0000000..987019c
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/rmi4/Kconfig
@@ -0,0 +1,9 @@
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI4_FORK
+	tristate "Synaptics i2c rmi4 touchscreen"
+	depends on I2C && INPUT
+	help
+	  Say Y here if you have a Synaptics RMI4 and
+	  want to enable support for the built-in touchscreen.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rmi4.
diff --git a/drivers/external_drivers/drivers/input/touchscreen/rmi4/Makefile b/drivers/external_drivers/drivers/input/touchscreen/rmi4/Makefile
new file mode 100644
index 0000000..884a125
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/rmi4/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the RMI4 touchscreen driver.
+#
+rmi4-objs = synaptics_i2c_rmi4.o rmi4_fw_reflash.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4_FORK) += rmi4.o
+
+ccflags-y += -Wno-packed-bitfield-compat
\ No newline at end of file
diff --git a/drivers/external_drivers/drivers/input/touchscreen/rmi4/rmi4_fw_reflash.c b/drivers/external_drivers/drivers/input/touchscreen/rmi4/rmi4_fw_reflash.c
new file mode 100644
index 0000000..9e7580f
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/rmi4/rmi4_fw_reflash.c
@@ -0,0 +1,852 @@
+/*
+ * Copyright (c) 2012 Synaptics Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ihex.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/i2c.h>
+#include <linux/stat.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/synaptics_i2c_rmi4.h>
+#include <linux/interrupt.h>
+#include "synaptics_i2c_rmi4.h"
+
+#define HAS_BSR_MASK 0x20
+
+#define CHECKSUM_OFFSET                 0
+#define BOOTLOADER_VERSION_OFFSET       0x07
+#define IMAGE_SIZE_OFFSET               0x08
+#define CONFIG_SIZE_OFFSET              0x0C
+#define PRODUCT_ID_OFFSET               0x10
+#define PRODUCT_INFO_OFFSET             0x1E
+#define PRODUCT_INFO_SIZE               2
+#define PRODUCT_ID_SIZE                 10
+
+/* F34 image file offsets. */
+#define F34_FW_IMAGE_OFFSET         0x100
+
+/* F34 register offsets. */
+#define F34_BLOCK_DATA_OFFSET 2
+#define F34_BLOCK_DATA_OFFSET_V1 1
+
+/* F34 commands */
+#define F34_WRITE_FW_BLOCK          0x2
+#define F34_ERASE_ALL               0x3
+#define F34_READ_CONFIG_BLOCK       0x5
+#define F34_WRITE_CONFIG_BLOCK      0x6
+#define F34_ERASE_CONFIG            0x7
+#define F34_ENABLE_FLASH_PROG       0xf
+#define F34_STATUS_IN_PROGRESS      0xff
+#define F34_STATUS_IDLE             0x80
+#define F34_IDLE_WAIT_MS            500
+#define F34_ENABLE_WAIT_MS          300
+#define F34_ERASE_WAIT_MS           (5 * 1000)
+
+#define IS_IDLE(ctl_ptr)      ((!ctl_ptr->status) && (!ctl_ptr->command))
+#define extract_u32(ptr)      (le32_to_cpu(*(__le32 *)(ptr)))
+
+#define FIRMWARE_NAME_FORCE	"rmi4.img"
+#define PID_S3202_GFF 0
+#define PID_S3202_OGS "TM2178"
+#define PID_S3408 "s3408_ver5"
+#define PID_S3402 "s3402"
+
+/** Image file V5, Option 0
+ */
+struct image_header {
+	u32 checksum;
+	unsigned int	image_size;
+	unsigned int	config_size;
+	unsigned char	options;
+	unsigned char	bootloader_version;
+	u8		product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char	product_info[PRODUCT_INFO_SIZE];
+};
+
+struct reflash_data {
+	struct rmi4_data		*rmi4_dev;
+	struct rmi4_fn_desc		*f01_pdt;
+	union f01_basic_queries		f01_queries;
+	union f01_device_control_0	f01_controls;
+	char				product_id[PRODUCT_ID_SIZE + 1];
+	struct rmi4_fn_desc		*f34_pdt;
+	u8				bootloader_id[2];
+	union f34_query_regs		f34_queries;
+	union f34_control_status	f34_controls;
+	const u8			*firmware_data;
+	const u8			*config_data;
+	unsigned int			int_count;
+};
+
+/* If this parameter is true, we will update the firmware regardless of
+ * the versioning info.
+ */
+static bool force;
+module_param(force, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(param, "Force reflash of RMI4 devices");
+
+/* If this parameter is not NULL, we'll use that name for the firmware image,
+ * instead of getting it from the F01 queries.
+ */
+static char *img_name;
+module_param(img_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(param, "Name of the RMI4 firmware image");
+
+#define RMI4_IMAGE_FILE_REV1_OFFSET	30
+#define RMI4_IMAGE_FILE_REV2_OFFSET	31
+#define IMAGE_FILE_CHECKSUM_SIZE	4
+#define FIRMWARE_IMAGE_AREA_OFFSET	0x100
+
+static void
+extract_header(const u8 *data, int pos, struct image_header *header)
+{
+	header->checksum = extract_u32(&data[pos + CHECKSUM_OFFSET]);
+	header->bootloader_version = data[pos + BOOTLOADER_VERSION_OFFSET];
+	header->image_size = extract_u32(&data[pos + IMAGE_SIZE_OFFSET]);
+	header->config_size = extract_u32(&data[pos + CONFIG_SIZE_OFFSET]);
+	memcpy(header->product_id,
+			&data[pos + PRODUCT_ID_OFFSET], PRODUCT_ID_SIZE);
+	header->product_id[PRODUCT_ID_SIZE] = 0;
+	memcpy(header->product_info,
+			&data[pos + PRODUCT_INFO_OFFSET], PRODUCT_INFO_SIZE);
+}
+
+static int rescan_pdt(struct reflash_data *data)
+{
+	int i, retval;
+	bool f01_found = false, f34_found = false;
+	struct rmi4_fn_desc rmi4_fn_desc;
+	struct rmi4_data *rmi4_dev = data->rmi4_dev;
+	struct rmi4_fn_desc *f34_pdt = data->f34_pdt;
+	struct rmi4_fn_desc *f01_pdt = data->f01_pdt;
+	struct i2c_client *client = rmi4_dev->i2c_client;
+
+	/* Per spec, once we're in reflash we only need to look at the first
+	 * PDT page for potentially changed F01 and F34 information.
+	 */
+	for (i = PDT_START_SCAN_LOCATION; i >= PDT_END_SCAN_LOCATION;
+			i -= sizeof(rmi4_fn_desc)) {
+		retval = rmi4_i2c_block_read(rmi4_dev, i, (u8 *)&rmi4_fn_desc,
+					sizeof(rmi4_fn_desc));
+		if (retval != sizeof(rmi4_fn_desc)) {
+			dev_err(&client->dev,
+				"Read PDT entry at %#06x failed: %d.\n",
+				i, retval);
+			return retval;
+		}
+
+		if (RMI4_END_OF_PDT(rmi4_fn_desc.fn_number))
+			break;
+
+		if (rmi4_fn_desc.fn_number == 0x01) {
+			memcpy(f01_pdt, &rmi4_fn_desc, sizeof(rmi4_fn_desc));
+			f01_found = true;
+		} else if (rmi4_fn_desc.fn_number == 0x34) {
+			memcpy(f34_pdt, &rmi4_fn_desc, sizeof(rmi4_fn_desc));
+			f34_found = true;
+		}
+	}
+
+	if (!f01_found) {
+		dev_err(&client->dev, "Failed to find F01 PDT entry.\n");
+		retval = -ENODEV;
+	} else if (!f34_found) {
+		dev_err(&client->dev, "Failed to find F34 PDT entry.\n");
+		retval = -ENODEV;
+	} else {
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static int read_f34_controls(struct reflash_data *data)
+{
+	int retval;
+	union f34_control_status_v1 f34ctrlsts1;
+	if (data->bootloader_id[1] > '5') {
+		retval = rmi4_i2c_block_read(data->rmi4_dev,
+			data->f34_controls.address, f34ctrlsts1.regs,
+			sizeof(f34ctrlsts1.regs));
+
+		data->f34_controls.command = f34ctrlsts1.command;
+		data->f34_controls.status = f34ctrlsts1.status;
+		data->f34_controls.program_enabled =
+			f34ctrlsts1.program_enabled;
+
+		return retval;
+	} else
+		return rmi4_i2c_byte_read(data->rmi4_dev,
+			data->f34_controls.address, data->f34_controls.regs);
+}
+
+static int read_f01_status(struct reflash_data *data,
+			   union f01_device_status *device_status)
+{
+	return rmi4_i2c_byte_read(data->rmi4_dev,
+			data->f01_pdt->data_base_addr, device_status->regs);
+}
+
+static int read_f01_controls(struct reflash_data *data)
+{
+	return rmi4_i2c_byte_read(data->rmi4_dev,
+			data->f01_pdt->ctrl_base_addr,
+			data->f01_controls.regs);
+}
+
+static int write_f01_controls(struct reflash_data *data)
+{
+	return rmi4_i2c_byte_write(data->rmi4_dev,
+			data->f01_pdt->ctrl_base_addr,
+			data->f01_controls.regs[0]);
+}
+
+#define SLEEP_TIME_MS 20
+/* Wait until the status is idle and we're ready to continue */
+static int wait_for_idle(struct reflash_data *data, int timeout_ms)
+{
+	int timeout_count = timeout_ms / SLEEP_TIME_MS + 1;
+	int count = 0;
+	union f34_control_status *controls = &data->f34_controls;
+	int retval;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	do {
+		if (count || timeout_count == 1)
+			msleep(SLEEP_TIME_MS);
+		count++;
+		retval = read_f34_controls(data);
+		if (retval < 0) {
+			dev_warn(&client->dev,
+				"Waiting for idle, check times: %d\n", count);
+			continue;
+		} else if (IS_IDLE(controls)) {
+			if (!data->f34_controls.program_enabled) {
+				/* TODO: Kill this whole if block once
+				 * FW-39000 is resolved. */
+				dev_warn(&client->dev,
+					"Yikes!  We're not enabled!\n");
+				msleep(1000);
+				read_f34_controls(data);
+			}
+			return 0;
+		}
+	} while (count < timeout_count);
+
+	dev_err(&client->dev,
+		"Timeout waiting for idle status, last status: %#04x.\n",
+		controls->regs[0]);
+	dev_err(&client->dev, "Command: %#04x\n", controls->command);
+	dev_err(&client->dev, "Status:  %#04x\n", controls->status);
+	dev_err(&client->dev, "Enabled: %d\n", controls->program_enabled);
+	dev_err(&client->dev, "Idle:    %d\n", IS_IDLE(controls));
+	return -ETIMEDOUT;
+}
+
+#define INT_SLEEP_TIME_MS 1
+static int wait_for_interrupt(struct reflash_data *data, int timeout_ms)
+{
+	int timeout_count = timeout_ms / INT_SLEEP_TIME_MS + 1;
+	int count = 0;
+	int int_count = data->int_count;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	do {
+		if (count || timeout_count == 1)
+			msleep(INT_SLEEP_TIME_MS);
+		count++;
+		if (data->int_count != int_count)
+			return 0;
+	} while (count < timeout_count);
+
+	dev_err(&client->dev, "%s, interrupt not detected within %d ms.\n",
+		__func__, timeout_ms);
+
+	return -ETIMEDOUT;
+}
+
+static irqreturn_t f34_irq_thread(int irq, void *data)
+{
+	u8 intr_status;
+	int retval;
+	struct reflash_data *pdata = data;
+	struct i2c_client *client = pdata->rmi4_dev->i2c_client;
+
+	pdata->int_count++;
+
+	/* Assuming we're in bootloader mode, we only read one interrupt
+	   register */
+	retval = rmi4_i2c_block_read(pdata->rmi4_dev,
+		pdata->f01_pdt->data_base_addr + 1,
+		&intr_status,
+		1);
+
+	if (retval != 1) {
+		dev_err(&client->dev,
+			"could not read interrupt status register\n");
+		return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int read_f01_queries(struct reflash_data *data)
+{
+	int retval;
+	u16 addr = data->f01_pdt->query_base_addr;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	retval = rmi4_i2c_block_read(data->rmi4_dev,
+					addr, data->f01_queries.regs,
+					ARRAY_SIZE(data->f01_queries.regs));
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to read F34 queries (code %d).\n", retval);
+		return retval;
+	}
+	addr += ARRAY_SIZE(data->f01_queries.regs);
+
+	retval = rmi4_i2c_block_read(data->rmi4_dev,
+				addr, data->product_id, PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to read product ID (code %d).\n", retval);
+		return retval;
+	}
+	data->product_id[PRODUCT_ID_SIZE] = 0;
+	dev_info(&client->dev, "F01 Product id:   %s\n",
+			data->product_id);
+	dev_info(&client->dev, "F01 product info: %#04x %#04x\n",
+				data->f01_queries.productinfo_1,
+				data->f01_queries.productinfo_2);
+
+	return 0;
+}
+
+static int read_f34_queries(struct reflash_data *data)
+{
+	int retval;
+	u8 id_str[3];
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	retval = rmi4_i2c_block_read(data->rmi4_dev,
+				data->f34_pdt->query_base_addr,
+				data->bootloader_id, 2);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to read F34 bootloader_id (code %d).\n",
+			retval);
+		return retval;
+	}
+
+	if (data->bootloader_id[1] > '5')
+		retval = rmi4_i2c_block_read(data->rmi4_dev,
+			data->f34_pdt->query_base_addr+1,
+			data->f34_queries.regs,
+			ARRAY_SIZE(data->f34_queries.regs));
+	else
+		retval = rmi4_i2c_block_read(data->rmi4_dev,
+			data->f34_pdt->query_base_addr+2,
+			data->f34_queries.regs,
+			ARRAY_SIZE(data->f34_queries.regs));
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to read F34 queries (code %d).\n", retval);
+		return retval;
+	}
+	data->f34_queries.block_size =
+			le16_to_cpu(data->f34_queries.block_size);
+	data->f34_queries.fw_block_count =
+			le16_to_cpu(data->f34_queries.fw_block_count);
+	data->f34_queries.config_block_count =
+			le16_to_cpu(data->f34_queries.config_block_count);
+	id_str[0] = data->bootloader_id[0];
+	id_str[1] = data->bootloader_id[1];
+	id_str[2] = 0;
+
+	dev_dbg(&client->dev, "Got F34 data->f34_queries.\n");
+	dev_dbg(&client->dev, "F34 bootloader id: %s (%#04x %#04x)\n",
+		id_str, data->bootloader_id[0], data->bootloader_id[1]);
+	dev_dbg(&client->dev, "F34 has config id: %d\n",
+			data->f34_queries.has_config_id);
+	dev_dbg(&client->dev, "F34 unlocked:      %d\n",
+			data->f34_queries.unlocked);
+	dev_dbg(&client->dev, "F34 regMap:        %d\n",
+			data->f34_queries.reg_map);
+	dev_dbg(&client->dev, "F34 block size:    %d\n",
+			data->f34_queries.block_size);
+	dev_dbg(&client->dev, "F34 fw blocks:     %d\n",
+			data->f34_queries.fw_block_count);
+	dev_dbg(&client->dev, "F34 config blocks: %d\n",
+			data->f34_queries.config_block_count);
+
+	if (data->bootloader_id[1] > '5')
+		data->f34_controls.address = data->f34_pdt->data_base_addr +
+			F34_BLOCK_DATA_OFFSET_V1 + 1;
+	else
+		data->f34_controls.address = data->f34_pdt->data_base_addr +
+			F34_BLOCK_DATA_OFFSET + data->f34_queries.block_size;
+
+	return 0;
+}
+
+static int write_bootloader_id(struct reflash_data *data)
+{
+	int retval;
+	struct rmi4_data *rmi4_dev = data->rmi4_dev;
+	struct rmi4_fn_desc *f34_pdt = data->f34_pdt;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	if (data->bootloader_id[1] > '5') {
+		retval = rmi4_i2c_block_write(rmi4_dev,
+			f34_pdt->data_base_addr + F34_BLOCK_DATA_OFFSET_V1,
+			data->bootloader_id, ARRAY_SIZE(data->bootloader_id));
+	} else
+		retval = rmi4_i2c_block_write(rmi4_dev,
+			f34_pdt->data_base_addr + F34_BLOCK_DATA_OFFSET,
+			data->bootloader_id, ARRAY_SIZE(data->bootloader_id));
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to write bootloader ID. Code: %d.\n", retval);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int write_f34_command(struct reflash_data *data, u8 command)
+{
+	int retval;
+	struct rmi4_data *rmi4_dev = data->rmi4_dev;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	retval = rmi4_i2c_byte_write(rmi4_dev,
+				data->f34_controls.address, command);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to write F34 command %#04x. Code: %d.\n",
+			command, retval);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int enter_flash_programming(struct reflash_data *data)
+{
+	int retval;
+	union f01_device_status device_status;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	retval = write_bootloader_id(data);
+	if (retval < 0)
+		return retval;
+
+	dev_info(&client->dev, "Enabling flash programming.\n");
+	retval = write_f34_command(data, F34_ENABLE_FLASH_PROG);
+	if (retval < 0)
+		return retval;
+
+	wait_for_interrupt(data, F34_ENABLE_WAIT_MS);
+
+	retval = wait_for_idle(data, F34_ENABLE_WAIT_MS);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Did not reach idle state after %d ms. Code: %d.\n",
+			F34_ENABLE_WAIT_MS, retval);
+		return retval;
+	}
+	if (!data->f34_controls.program_enabled) {
+		dev_err(&client->dev,
+			"Reached idle, but programming not enabled, "
+			" current status register: %#04x.\n",
+			data->f34_controls.regs[0]);
+		return -EINVAL;
+	}
+	dev_info(&client->dev, "HOORAY! Programming is enabled!\n");
+
+	retval = rescan_pdt(data);
+	if (retval < 0) {
+		dev_err(&client->dev,
+				"Failed to rescan pdt.  Code: %d.\n", retval);
+		return retval;
+	}
+
+	retval = read_f01_status(data, &device_status);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to read F01 status after enabling reflash."
+			"Code: %d\n", retval);
+		return retval;
+	}
+	if (!(device_status.flash_prog)) {
+		dev_err(&client->dev,
+			"Device reports as not in flash programming mode.\n");
+		return -EINVAL;
+	}
+
+	retval = read_f34_queries(data);
+	if (retval < 0) {
+		dev_err(&client->dev, "F34 queries failed, code = %d.\n",
+			retval);
+		return retval;
+	}
+
+	retval = read_f01_controls(data);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"F01 controls read failed, code = %d.\n", retval);
+		return retval;
+	}
+	data->f01_controls.nosleep = true;
+	data->f01_controls.sleep_mode = RMI4_SLEEP_MODE_NORMAL;
+
+	retval = write_f01_controls(data);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"F01 controls write failed, code = %d.\n", retval);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void reset_device(struct reflash_data *data)
+{
+	int retval;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	dev_info(&client->dev, "Resetting...\n");
+	retval = rmi4_i2c_byte_write(data->rmi4_dev,
+			data->f01_pdt->cmd_base_addr, RMI4_DEVICE_RESET_CMD);
+	if (retval < 0)
+		dev_warn(&client->dev,
+			 "WARNING - post-flash reset failed, code: %d.\n",
+			 retval);
+	msleep(RMI4_RESET_DELAY);
+	dev_info(&client->dev, "Reset completed.\n");
+}
+
+/*
+ * Send data to the device one block at a time.
+ */
+static int write_blocks(struct reflash_data *data, u8 *block_ptr,
+			u16 block_count, u8 cmd)
+{
+	int block_num;
+	u8 zeros[] = {0, 0};
+	int retval;
+	u16 addr = data->f34_pdt->data_base_addr + F34_BLOCK_DATA_OFFSET;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	if (data->bootloader_id[1] > '5')
+		addr = data->f34_pdt->data_base_addr + F34_BLOCK_DATA_OFFSET_V1;
+
+	retval = rmi4_i2c_block_write(data->rmi4_dev,
+					data->f34_pdt->data_base_addr,
+					zeros, ARRAY_SIZE(zeros));
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to write initial zeros. Code=%d.\n", retval);
+		return retval;
+	}
+
+	for (block_num = 0; block_num < block_count; ++block_num) {
+		retval = rmi4_i2c_block_write(data->rmi4_dev, addr, block_ptr,
+					 data->f34_queries.block_size);
+		if (retval < 0) {
+			dev_err(&client->dev,
+				"Failed to write block %d. Code=%d.\n",
+						block_num, retval);
+			return retval;
+		}
+
+		retval = write_f34_command(data, cmd);
+		if (retval < 0) {
+			dev_err(&client->dev,
+			"Failed to write command for block %d. Code=%d.\n",
+							block_num, retval);
+			return retval;
+		}
+
+		retval = wait_for_idle(data, F34_IDLE_WAIT_MS);
+		if (retval < 0) {
+			dev_err(&client->dev,
+			"Failed to go idle after writing block %d. Code=%d.\n",
+							block_num, retval);
+			return retval;
+		}
+
+		block_ptr += data->f34_queries.block_size;
+	}
+
+	return 0;
+}
+
+static int write_firmware(struct reflash_data *data)
+{
+	return write_blocks(data, (u8 *) data->firmware_data,
+		data->f34_queries.fw_block_count, F34_WRITE_FW_BLOCK);
+}
+
+static int write_configuration(struct reflash_data *data)
+{
+	return write_blocks(data, (u8 *)data->config_data,
+		data->f34_queries.config_block_count, F34_WRITE_CONFIG_BLOCK);
+}
+
+static void reflash_firmware(struct reflash_data *data)
+{
+	struct timespec start;
+	struct timespec end;
+	s64 duration_ns;
+	int retval = 0;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	retval = enter_flash_programming(data);
+	if (retval < 0)
+		return;
+
+	retval = write_bootloader_id(data);
+	if (retval < 0)
+		return;
+
+	dev_info(&client->dev, "Erasing FW...\n");
+	getnstimeofday(&start);
+
+	retval = write_f34_command(data, F34_ERASE_ALL);
+	if (retval < 0)
+		return;
+
+	wait_for_interrupt(data, F34_ERASE_WAIT_MS);
+
+	dev_info(&client->dev, "Waiting for idle...\n");
+	retval = wait_for_idle(data, F34_ERASE_WAIT_MS);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Failed to reach idle state. Code: %d.\n", retval);
+		return;
+	}
+
+	getnstimeofday(&end);
+	duration_ns = timespec_to_ns(&end) - timespec_to_ns(&start);
+	dev_info(&client->dev,
+		 "Erase complete, time: %lld ns.\n", duration_ns);
+
+	if (data->firmware_data) {
+		dev_info(&client->dev, "Writing firmware...\n");
+		getnstimeofday(&start);
+		retval = write_firmware(data);
+		if (retval < 0)
+			return;
+		getnstimeofday(&end);
+		duration_ns = timespec_to_ns(&end) - timespec_to_ns(&start);
+		dev_info(&client->dev,
+			 "Done writing FW, time: %lld ns.\n", duration_ns);
+	}
+
+	if (data->config_data) {
+		dev_info(&client->dev, "Writing configuration...\n");
+		getnstimeofday(&start);
+		retval = write_configuration(data);
+		if (retval < 0)
+			return;
+		getnstimeofday(&end);
+		duration_ns = timespec_to_ns(&end) - timespec_to_ns(&start);
+		dev_info(&client->dev,
+			 "Done writing config, time: %lld ns.\n", duration_ns);
+	}
+}
+
+/* Returns false if the firmware should not be reflashed.
+ */
+static bool
+go_nogo(struct reflash_data *data, const struct rmi4_touch_calib *calib)
+{
+	int retval;
+	u8 customer_id[4] = { 0 };
+	u32 id;
+	struct i2c_client *client = data->rmi4_dev->i2c_client;
+
+	retval = rmi4_i2c_block_read(data->rmi4_dev,
+				data->f34_pdt->ctrl_base_addr,
+				customer_id, sizeof(customer_id));
+	if (retval < 0) {
+		dev_err(&client->dev, "Failed to read customer congfig ID (code %d).\n",
+								retval);
+		return true;
+	}
+	dev_info(&client->dev, "Customer ID HEX: 0x%x %x %x %x\n",
+				customer_id[0], customer_id[1],
+				customer_id[2], customer_id[3]);
+
+	id = *(u32 *)customer_id;
+	dev_info(&client->dev, "Customer ID: %d\n", id);
+
+	if (id != calib->customer_id)
+		return true;
+	return false || force;
+}
+
+static void
+print_image_info(struct i2c_client *client, struct image_header *header,
+					const struct firmware *fw_entry)
+{
+	dev_info(&client->dev, "Img checksum:           %#08X\n",
+			header->checksum);
+	dev_info(&client->dev, "Img image size:         %d\n",
+			header->image_size);
+	dev_info(&client->dev, "Img config size:        %d\n",
+			header->config_size);
+	dev_info(&client->dev, "Img bootloader version: %d\n",
+			header->bootloader_version);
+	dev_info(&client->dev, "Img product id:         %s\n",
+			header->product_id);
+	dev_info(&client->dev, "Img product info:       %#04x %#04x\n",
+			header->product_info[0], header->product_info[1]);
+	dev_info(&client->dev, "Got firmware, size: %zd.\n", fw_entry->size);
+}
+
+int rmi4_fw_update(struct rmi4_data *pdata,
+		struct rmi4_fn_desc *f01_pdt, struct rmi4_fn_desc *f34_pdt)
+{
+#ifdef DEBUG
+	struct timespec start;
+	struct timespec end;
+	s64 duration_ns;
+#endif
+	int retval, touch_type = 0;
+	char *firmware_name;
+	const struct firmware *fw_entry = NULL;
+	struct i2c_client *client = pdata->i2c_client;
+	union pdt_properties pdt_props;
+	struct image_header header = { 0 };
+	struct reflash_data data = {
+		.rmi4_dev = pdata,
+		.f01_pdt = f01_pdt,
+		.f34_pdt = f34_pdt,
+	};
+	const struct rmi4_touch_calib *calib = pdata->board->calib;
+	const struct rmi4_platform_data *platformdata =
+		client->dev.platform_data;
+
+	dev_info(&client->dev, "Enter %s.\n", __func__);
+#ifdef	DEBUG
+	getnstimeofday(&start);
+#endif
+
+	retval = rmi4_i2c_byte_read(pdata,
+				PDT_PROPERTIES_LOCATION, pdt_props.regs);
+	if (retval < 0) {
+		dev_warn(&client->dev,
+			 "Failed to read PDT props at %#06x (code %d).\n",
+			 PDT_PROPERTIES_LOCATION, retval);
+	}
+	if (pdt_props.has_bsr) {
+		dev_warn(&client->dev,
+			 "Firmware update for LTS not currently supported.\n");
+		return -1;
+	}
+
+	retval = read_f01_queries(&data);
+	if (retval) {
+		dev_err(&client->dev, "F01 queries failed, code = %d.\n",
+			retval);
+		return -1;
+	}
+
+	if (data.product_id[0] == PID_S3202_GFF)
+		touch_type = RMI4_S3202_GFF;
+	else if (strcmp(data.product_id, PID_S3202_OGS) == 0)
+		touch_type = RMI4_S3202_OGS;
+	else if (strcmp(data.product_id, PID_S3402) == 0) {
+		if (!strncmp(client->name, S3400_CGS_DEV_ID, I2C_NAME_SIZE))
+			touch_type = RMI4_S3400_CGS;
+		else
+			touch_type = RMI4_S3400_IGZO;
+	} else {
+		dev_err(&client->dev, "Unsupported touch screen type, product ID: %s\n",
+				data.product_id);
+		if (!force) {
+			dev_err(&client->dev, "Use S3202 OGS as default type\n");
+			return RMI4_S3202_OGS;
+		}
+	}
+
+	if (force)
+		firmware_name = FIRMWARE_NAME_FORCE;
+	else
+		firmware_name = calib[touch_type].fw_name;
+	dev_info(&client->dev,
+			"Firmware name:%s, hardware type:%d, client name:%s\n",
+			firmware_name, touch_type, client->name);
+
+	retval = read_f34_queries(&data);
+	if (retval) {
+		dev_err(&client->dev, "F34 queries failed, code = %d.\n",
+			retval);
+		return touch_type;
+	}
+	if (!go_nogo(&data, &calib[touch_type])) {
+		dev_info(&client->dev, "Don't need to reflash firmware.\n");
+		return touch_type;
+	}
+	dev_info(&client->dev, "Requesting %s.\n", firmware_name);
+	retval = request_firmware(&fw_entry, firmware_name, &client->dev);
+	if (retval != 0 || !fw_entry) {
+		dev_err(&client->dev,
+				"Firmware %s not available, code = %d\n",
+				firmware_name, retval);
+		return touch_type;
+	}
+
+	extract_header(fw_entry->data, 0, &header);
+	print_image_info(client, &header, fw_entry);
+
+	if (header.image_size)
+		data.firmware_data = fw_entry->data + F34_FW_IMAGE_OFFSET;
+	if (header.config_size)
+		data.config_data = fw_entry->data + F34_FW_IMAGE_OFFSET +
+			header.image_size;
+
+	retval = request_threaded_irq(pdata->irq, NULL,
+		f34_irq_thread,
+		platformdata->irq_type,
+		"rmi4_f34", &data);
+	if (retval < 0) {
+		dev_err(&client->dev, "Unable to get attn irq %d\n",
+		pdata->irq);
+	}
+
+	reflash_firmware(&data);
+	reset_device(&data);
+	release_firmware(fw_entry);
+
+	free_irq(pdata->irq, &data);
+#ifdef	DEBUG
+	getnstimeofday(&end);
+	duration_ns = timespec_to_ns(&end) - timespec_to_ns(&start);
+	dev_info(&client->dev, "Time to reflash: %lld ns.\n", duration_ns);
+#endif
+	return touch_type;
+}
diff --git a/drivers/external_drivers/drivers/input/touchscreen/rmi4/synaptics_i2c_rmi4.c b/drivers/external_drivers/drivers/input/touchscreen/rmi4/synaptics_i2c_rmi4.c
new file mode 100644
index 0000000..8a6254b
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/rmi4/synaptics_i2c_rmi4.c
@@ -0,0 +1,2560 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/input/mt.h>
+#include <linux/regulator/consumer.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/synaptics_i2c_rmi4.h>
+#include <linux/early_suspend_sysfs.h>
+
+/* TODO: for multiple device support will need a per-device mutex */
+#define DRIVER_NAME "rmi4_ts"
+
+#define MAX_TOUCH_MAJOR		15
+#define MAX_TOUCH_MINOR		15
+#define MIN_TRACKING_ID		1
+#define MAX_TRACKING_ID		10
+#define MAX_RETRY_COUNT		5
+#define STD_QUERY_LEN		21
+#define PAGE_LEN		2
+#define DATA_BUF_LEN		32
+#define BUF_LEN			37
+#define QUERY_LEN		9
+#define DATA_LEN		12
+#define HAS_TAP			0x01
+#define HAS_PALMDETECT		0x01
+#define HAS_ROTATE		0x02
+#define HAS_TAPANDHOLD		0x02
+#define HAS_DOUBLETAP		0x04
+#define HAS_EARLYTAP		0x08
+#define HAS_RELEASE		0x08
+#define HAS_FLICK		0x10
+#define HAS_PRESS		0x20
+#define HAS_PINCH		0x40
+
+#define MASK_16BIT		0xFFFF
+#define MASK_8BIT		0xFF
+#define MASK_7BIT		0x7F
+#define MASK_5BIT		0x1F
+#define MASK_4BIT		0x0F
+#define MASK_3BIT		0x07
+#define MASK_2BIT		0x03
+#define TOUCHPAD_CTRL_INTR	0x8
+
+#define DELTA_XPOS_THRESH	3
+#define DELTA_YPOS_THRESH	3
+#define TOUCH_REDUCE_MODE	1
+
+#define F01_CTRL0_CONFIGURED (1 << 7)
+#define F01_CTRL0_SLEEP      (1 << 0)
+#define F01_CTRL0_NOSLEEP    (1 << 2)
+
+#define BOOT_MODE_MOS	0
+#define BOOT_MODE_COS	1
+
+static int boot_mode;
+module_param(boot_mode, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(param, "The boot mode of system");
+
+#ifdef CONFIG_DEBUG_FS
+#define F54_FIFO_DATA_OFFSET 3
+#include <linux/debugfs.h>
+
+static struct dentry *rmi4_debugfs_root;
+static char *retbuf;
+#define RAW_VALUE_SIZE 2
+#endif
+
+#include "synaptics_i2c_rmi4.h"
+
+/*
+#define REPORT_2D_Z
+*/
+
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+static struct rmi4_fn_ops supported_fn_ops[] = {
+	{
+		.fn_number = RMI4_TOUCHPAD_FUNC_NUM,
+		.detect = rmi4_touchpad_detect,
+		.config = rmi4_touchpad_config,
+		.irq_handler = rmi4_touchpad_irq_handler,
+		.remove = rmi4_touchpad_remove,
+	},
+#ifdef CONFIG_DEBUG_FS
+	{
+		.fn_number = RMI4_ANALOG_FUNC_NUM,
+		.detect  = rmi4_ana_data_detect,
+		.config = NULL,
+		.irq_handler = rmi4_ana_data_irq_handler,
+		.remove = rmi4_ana_data_remove,
+	},
+#endif
+	{
+		.fn_number = RMI4_TOUCHPAD_F12_FUNC_NUM,
+		.detect = rmi4_touchpad_f12_detect,
+		.config = rmi4_touchpad_f12_config,
+		.irq_handler = rmi4_touchpad_f12_irq_handler,
+		.remove = rmi4_touchpad_f12_remove,
+	},
+	{
+		.fn_number = RMI4_BUTTON_FUNC_NUM,
+		.detect  = rmi4_button_detect,
+		.config = NULL,
+		.irq_handler = rmi4_button_irq_handler,
+		.remove = rmi4_button_remove,
+	},
+	{
+		.fn_number = RMI4_DEV_CTL_FUNC_NUM,
+		.detect = rmi4_dev_ctl_detect,
+		.irq_handler = rmi4_dev_ctl_irq_handler,
+	},
+#ifdef DEBUG
+	{
+		.fn_number = RMI4_FLASH_FW_FUNC_NUM,
+	}
+#endif
+};
+
+/**
+ * rmi4_set_page() - sets the page
+ * @pdata: pointer to rmi4_data structure
+ * @address: set the address of the page
+ *
+ * This function is used to set the page and returns integer.
+ */
+static int rmi4_set_page(struct rmi4_data *pdata, u16 address)
+{
+	unsigned char	txbuf[PAGE_LEN];
+	int		retval;
+	unsigned int	page;
+	struct i2c_client *i2c = pdata->i2c_client;
+
+	page	= ((address >> 8) & MASK_8BIT);
+	if (page != pdata->current_page) {
+		txbuf[0] = RMI4_PAGE_SELECT_REG;
+		txbuf[1] = page;
+		retval	= i2c_master_send(i2c, txbuf, PAGE_LEN);
+		if (retval != PAGE_LEN)
+			dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
+		else
+			pdata->current_page = page;
+	} else
+		retval = PAGE_LEN;
+	return retval;
+}
+
+int rmi4_i2c_block_read(struct rmi4_data *pdata,
+					u16 address, u8 *valp, int size)
+{
+	int retval = 0;
+	int retry_count = 0;
+	unsigned char txbuf;
+	struct i2c_client *client = pdata->i2c_client;
+
+	mutex_lock(&(pdata->rmi4_page_mutex));
+	retval = rmi4_set_page(pdata, address);
+	if (retval != PAGE_LEN) {
+		retval = -1;
+		goto exit;
+	}
+	txbuf = address & MASK_8BIT;
+retry:
+	retval = i2c_master_send(client, &txbuf, sizeof(txbuf));
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: Write failed\n", __func__);
+		goto exit;
+	}
+	retval = i2c_master_recv(client, valp, size);
+	if (retval != size) {
+		if (++retry_count == MAX_RETRY_COUNT)
+			dev_err(&client->dev,
+				"%s: address 0x%04x size %d failed:%d\n",
+					__func__, address, size, retval);
+		else {
+			rmi4_set_page(pdata, address);
+			goto retry;
+		}
+	}
+exit:
+	mutex_unlock(&(pdata->rmi4_page_mutex));
+	return retval;
+}
+
+int rmi4_i2c_byte_read(struct rmi4_data *pdata, u16 address, u8 *valp)
+{
+	return rmi4_i2c_block_read(pdata, address, valp, 1);
+}
+
+int rmi4_i2c_block_write(struct rmi4_data *pdata,
+					u16 address, u8 *valp, int size)
+{
+	int retval = 0;
+	int retry_count = 0;
+	unsigned char txbuf[size + 1];
+	struct i2c_client *client = pdata->i2c_client;
+
+	memcpy(txbuf + 1, valp, size);
+
+	mutex_lock(&(pdata->rmi4_page_mutex));
+	retval = rmi4_set_page(pdata, address);
+	if (retval != PAGE_LEN) {
+		retval = -1;
+		goto exit;
+	}
+	txbuf[0] = address & MASK_8BIT;
+retry:
+	retval = i2c_master_send(client, txbuf, sizeof(txbuf));
+	if (retval < 0) {
+		if (++retry_count == MAX_RETRY_COUNT)
+			dev_err(&client->dev,
+				"%s: address 0x%04x size %d failed:%d\n",
+					__func__, address, size, retval);
+		else {
+			rmi4_set_page(pdata, address);
+			goto retry;
+		}
+	}
+exit:
+	mutex_unlock(&(pdata->rmi4_page_mutex));
+	return retval;
+}
+
+int rmi4_i2c_byte_write(struct rmi4_data *pdata, u16 address, u8 data)
+{
+	return rmi4_i2c_block_write(pdata, address, &data, 1);
+}
+
+static int rmi4_i2c_set_bits(struct rmi4_data *pdata, u16 addr, u8 bits)
+{
+	int retval;
+	u8 reg = 0;
+	struct i2c_client *client = pdata->i2c_client;
+
+	retval = rmi4_i2c_byte_read(pdata, addr, &reg);
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: read 0x%x failed!\n",
+						__func__, addr);
+		return retval;
+	}
+	reg |= bits;
+	retval = rmi4_i2c_byte_write(pdata, addr, reg);
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: write 0x%x failed!\n",
+						__func__, addr);
+		return retval;
+	}
+	return 0;
+}
+
+static int rmi4_i2c_clear_bits(struct rmi4_data *pdata, u16 addr, u8 bits)
+{
+	u8 reg = 0;
+	int retval;
+	struct i2c_client *client = pdata->i2c_client;
+
+	retval = rmi4_i2c_byte_read(pdata, addr, &reg);
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: read 0x%x failed!\n",
+						__func__, addr);
+		return retval;
+	}
+	reg &= ~bits;
+	retval = rmi4_i2c_byte_write(pdata, addr, reg);
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: write 0x%x failed!\n",
+						__func__, addr);
+		return retval;
+	}
+	return 0;
+}
+
+static struct rmi4_fn_ops *get_supported_fn_ops(int id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(supported_fn_ops); i++) {
+		if (id == supported_fn_ops[i].fn_number)
+			return &supported_fn_ops[i];
+	}
+	return NULL;
+}
+
+/**
+ * rmi4_touchpad_report() - reports for the rmi4 touchpad device
+ * @pdata: pointer to rmi4_data structure
+ * @rfi: pointer to rmi4_fn structure
+ *
+ * This function calls to reports for the rmi4 touchpad device
+ */
+int rmi4_touchpad_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	/* number of touch points - fingers down in this case */
+	int retval;
+	int touch_count = 0;
+	int finger, fingers_supported, finger_registers;
+	int reg;
+	int finger_shift;
+	int x, y, wx, wy;
+	enum finger_state finger_status;
+	u16 data_base_addr, data_offset;
+	u8 *data;
+	struct rmi4_touchpad_data *touch_data;
+	struct i2c_client *client = pdata->i2c_client;
+	const struct rmi4_touch_calib *calib =
+				&pdata->board->calib[pdata->touch_type];
+
+	/* get 2D sensor finger data */
+	/*
+	 * First get the finger status field - the size of the finger status
+	 * field is determined by the number of finger supporte - 2 bits per
+	 * finger, so the number of registers to read is:
+	 * registerCount = ceil(numberOfFingers/4).
+	 * Read the required number of registers and check each 2 bit field to
+	 * determine if a finger is down.
+	 */
+	touch_data		= rfi->fn_data;
+	fingers_supported	= rfi->num_of_data_points;
+	finger_registers	= (fingers_supported + 3)/4;
+	data_base_addr		= rfi->data_base_addr;
+
+	/* Read all the finger registers data in one i2c read, twice i2c read
+	 * in irq handler may cause i2c controller timeout */
+	retval = rmi4_i2c_block_read(pdata, data_base_addr,
+					touch_data->buffer, touch_data->size);
+	if (retval != touch_data->size) {
+		dev_err(&client->dev, "%s:read touch registers failed\n",
+								__func__);
+		return 0;
+	}
+
+	pdata->touch_counter++;
+	for (finger = 0; finger < fingers_supported; finger++) {
+		/* determine which data byte the finger status is in */
+		reg = finger / 4;
+		/* bit shift to get finger's status */
+		finger_shift = (finger % 4) * 2;
+		finger_status =
+			(touch_data->buffer[reg] >> finger_shift) & MASK_2BIT;
+		/*
+		 * if finger status indicates a finger is present then
+		 * read the finger data and report it
+		 */
+		if (finger_status == F11_PRESENT ||
+					finger_status == F11_INACCURATE) {
+			data_offset = finger_registers +
+				finger * rfi->size_of_data_register_block;
+			data = touch_data->buffer + data_offset;
+
+			x = (data[0] << 4) | (data[2] & MASK_4BIT);
+			y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT);
+			wx = (data[3] & MASK_4BIT);
+			wy = (data[3] >> 4) & MASK_4BIT;
+
+			if (calib->swap_axes)
+				swap(x, y);
+			if (calib->x_flip)
+				x = pdata->sensor_max_x - x;
+			if (calib->y_flip)
+				y = pdata->sensor_max_y - y;
+
+			input_mt_slot(pdata->input_ts_dev, finger);
+			input_mt_report_slot_state(pdata->input_ts_dev,
+					MT_TOOL_FINGER, true);
+
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_TOUCH_MAJOR, max(wx , wy));
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_TOUCH_MINOR, min(wx , wy));
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_POSITION_Y, y);
+
+			pdata->finger_status[finger] = F11_PRESENT;
+
+		} else if (pdata->finger_status[finger] == F11_PRESENT) {
+			input_mt_slot(pdata->input_ts_dev, finger);
+			input_mt_report_slot_state(pdata->input_ts_dev,
+					MT_TOOL_FINGER, false);
+			pdata->finger_status[finger] = F11_NO_FINGER;
+		}
+
+	}
+
+	/* sync after groups of events */
+	input_sync(pdata->input_ts_dev);
+
+	return touch_count;
+}
+
+/**
+ * rmi4_touchpad_f12_irq_handler() - reports for the rmi4 touchpad device
+ * @pdata: pointer to rmi4_data structure
+ * @rfi: pointer to rmi4_fn structure
+ *
+ * This function acts upon touch interrupts by reading the relevant touch data
+ * from the device and thereafter reporting the realized event.
+ */
+int rmi4_touchpad_f12_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	int retval;
+	int touch_count = 0;
+	int finger, fingers_supported, finger_registers;
+	int x, y, wx, wy;
+	enum finger_state finger_status;
+	u16 data_base_addr;
+	struct rmi4_touchpad_data *touch_data;
+	struct i2c_client *client = pdata->i2c_client;
+	struct synaptics_rmi4_f12_finger_data *data;
+	struct synaptics_rmi4_f12_finger_data *finger_data;
+	const struct rmi4_touch_calib *calib =
+				&pdata->board->calib[pdata->touch_type];
+
+	/* get 2D sensor finger data */
+	/*
+	 * First get the finger status field - the size of the finger status
+	 * field is determined by the number of finger supporte - 2 bits per
+	 * finger, so the number of registers to read is:
+	 * registerCount = ceil(numberOfFingers/4).
+	 * Read the required number of registers and check each 2 bit field to
+	 * determine if a finger is down.
+	 */
+	touch_data		= rfi->fn_data;
+	fingers_supported	= rfi->num_of_data_points;
+	finger_registers	= (fingers_supported + 3)/4;
+	data_base_addr		= rfi->data_base_addr + rfi->data1_offset;
+
+	/* Read all the finger registers data in one i2c read, twice i2c read
+	 * in irq handler may cause i2c controller timeout */
+	retval = rmi4_i2c_block_read(pdata, data_base_addr,
+					(unsigned char *)rfi->fn_data,
+					rfi->data_size);
+	if (retval != rfi->data_size) {
+		dev_err(&client->dev, "%s:read touch registers failed\n",
+								__func__);
+		return 0;
+	}
+
+	data = (struct synaptics_rmi4_f12_finger_data *)rfi->fn_data;
+
+	pdata->touch_counter++;
+	for (finger = 0; finger < fingers_supported; finger++) {
+		finger_data = data + finger;
+		finger_status = finger_data->object_type_and_status & MASK_2BIT;
+
+		if (finger_status) {
+			x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+			y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+			wx = finger_data->wx;
+			wy = finger_data->wy;
+			if (calib->swap_axes)
+				swap(x, y);
+			if (calib->x_flip)
+				x = pdata->sensor_max_x - x;
+			if (calib->y_flip)
+				y = pdata->sensor_max_y - y;
+
+			input_mt_slot(pdata->input_ts_dev, finger);
+			input_mt_report_slot_state(pdata->input_ts_dev,
+					MT_TOOL_FINGER, true);
+
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_TOUCH_MAJOR, max(wx , wy));
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_TOUCH_MINOR, min(wx , wy));
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(pdata->input_ts_dev,
+					ABS_MT_POSITION_Y, y);
+
+			pdata->finger_status[finger] = F11_PRESENT;
+
+		} else if (pdata->finger_status[finger] == F11_PRESENT) {
+			input_mt_slot(pdata->input_ts_dev, finger);
+			input_mt_report_slot_state(pdata->input_ts_dev,
+					MT_TOOL_FINGER, false);
+			pdata->finger_status[finger] = F11_NO_FINGER;
+		}
+	}
+
+	/* sync after groups of events */
+	input_sync(pdata->input_ts_dev);
+
+	return touch_count;
+}
+
+int rmi4_button_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	int i;
+	int retval = 0;
+	bool bttn_down;
+	u8 bttns_status;
+	struct rmi4_button_data *button_data;
+	struct i2c_client *client = pdata->i2c_client;
+
+	retval = rmi4_i2c_byte_read(pdata, rfi->data_base_addr, &bttns_status);
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: read data error!\n", __func__);
+		return retval;
+	}
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+	button_data = rfi->fn_data;
+	pdata->key_counter++;
+	for (i = 0; i < button_data->num_of_bttns; i++) {
+		bttn_down = (bttns_status >> i) & 0x01;
+		if (bttn_down != button_data->status[i]) {
+			dev_dbg(&client->dev, "%s: button %d - %d",
+					__func__, i, bttn_down);
+			input_report_key(pdata->input_key_dev,
+					button_data->bttns_map[i], bttn_down);
+			button_data->status[i] = bttn_down;
+		}
+	}
+	input_sync(pdata->input_key_dev);
+
+	return retval;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int rmi4_ana_data_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	int retval = 0;
+	struct rmi4_ana_data *ana_data = rfi->fn_data;
+	struct i2c_client *client = pdata->i2c_client;
+
+	if (!ana_data->buffer) {
+		dev_warn(&client->dev, "Raw sensor buffer not yet ready !\n");
+		return -ENOMEM;
+	}
+
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->data_base_addr + F54_FIFO_DATA_OFFSET,
+			ana_data->buffer, ana_data->size);
+	if (retval < 0) {
+		dev_err(&client->dev, "%s: read data error!\n", __func__);
+		ana_data->status = retval;
+		return retval;
+	}
+	dev_info(&client->dev, "%s: ana buffer %p, size=%d, retval=%d\n",
+		__func__, ana_data->buffer, ana_data->size, retval);
+
+	ana_data->status = retval;
+
+	return 0;
+}
+#endif
+
+static irqreturn_t rmi4_irq_thread(int irq, void *data)
+{
+	u8 intr_status[4];
+	int retval;
+	struct rmi4_fn *rfi;
+	struct rmi4_device_info	*rmi;
+	struct rmi4_data *pdata = data;
+	struct i2c_client *client = pdata->i2c_client;
+
+	/*
+	 * Get the interrupt status from the function $01
+	 * control register+1 to find which source(s) were interrupting
+	 * so we can read the data from the source(s) (2D sensor, buttons..)
+	 */
+	retval = rmi4_i2c_block_read(pdata, pdata->fn01_data_base_addr + 1,
+					intr_status,
+					pdata->number_of_interrupt_register);
+	if (retval != pdata->number_of_interrupt_register) {
+		dev_err(&client->dev,
+			"could not read interrupt status registers\n");
+		return IRQ_NONE;
+	}
+	dev_dbg(&client->dev,
+		"%s: number_of_interrupt_register:%d, intr_status:0x%x",
+		__func__, pdata->number_of_interrupt_register, intr_status[0]);
+	/*
+	 * check each function that has data sources and if the interrupt for
+	 * that triggered then call the function's irq handler to
+	 * gather data and report it to the input subsystem
+	 */
+	rmi = &(pdata->rmi4_mod_info);
+	list_for_each_entry(rfi, &rmi->support_fn_list, link) {
+		if ((intr_status[rfi->index_to_intr_reg] & rfi->intr_mask) &&
+						rfi->ops->irq_handler)
+			rfi->ops->irq_handler(pdata, rfi);
+	}
+	return IRQ_HANDLED;
+}
+
+int rmi4_dev_ctl_detect(struct rmi4_data *pdata, struct rmi4_fn *rfi,
+						unsigned int interruptcount)
+{
+	unsigned short	intr_offset;
+	int	i;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	dev_info(&client->dev, "%s\n", __func__);
+
+	/* Need to get interrupt info for handling interrupts */
+	rfi->index_to_intr_reg = (interruptcount + 7)/8;
+	if (rfi->index_to_intr_reg != 0)
+		rfi->index_to_intr_reg -= 1;
+	/*
+	 * loop through interrupts for each source in fn $01
+	 * and or in a bit to the interrupt mask for each.
+	 */
+	intr_offset = interruptcount % 8;
+	rfi->intr_mask = 0;
+	for (i = intr_offset;
+		i < ((rfi->intr_src_count & MASK_3BIT) + intr_offset); i++)
+		rfi->intr_mask |= 1 << i;
+
+	return 0;
+}
+
+/**
+ * rmi4_rmi4_touchpad_detect() - detects the rmi4 touchpad device
+ * @pdata: pointer to rmi4_data structure
+ * @rfi: pointer to rmi4_fn structure
+ * @interruptcount: count the number of interrupts
+ *
+ * This function calls to detects the rmi4 touchpad device
+ */
+int rmi4_touchpad_detect(struct rmi4_data *pdata, struct rmi4_fn *rfi,
+						unsigned int interruptcount)
+{
+	u8 queries[QUERY_LEN];
+	unsigned short	intr_offset;
+	unsigned char	abs_data_size;
+	unsigned char	abs_data_blk_size;
+	unsigned char	egr_0, egr_1;
+	unsigned int	all_data_blk_size;
+	int	has_pinch, has_flick, has_tap;
+	int	has_tapandhold, has_doubletap;
+	int	has_earlytap, has_press;
+	int	has_palmdetect, has_rotate;
+	int	has_rel;
+	int	i;
+	int	retval;
+	struct	rmi4_touchpad_data *touch_data;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	/*
+	 * need to get number of fingers supported, data size, etc.
+	 * to be used when getting data since the number of registers to
+	 * read depends on the number of fingers supported and data size.
+	 */
+	retval = rmi4_i2c_block_read(pdata, rfi->query_base_addr, queries,
+							sizeof(queries));
+	if (retval != sizeof(queries)) {
+		dev_err(&client->dev, "%s:read function query registers\n",
+							__func__);
+		return retval;
+	}
+	/*
+	 * 2D data sources have only 3 bits for the number of fingers
+	 * supported - so the encoding is a bit weird.
+	 */
+	if ((queries[1] & MASK_3BIT) <= 4)
+		/* add 1 since zero based */
+		rfi->num_of_data_points = (queries[1] & MASK_3BIT) + 1;
+	else {
+		/*
+		 * a value of 5 is up to 10 fingers - 6 and 7 are reserved
+		 * (shouldn't get these i int retval;n a normal 2D source).
+		 */
+		if ((queries[1] & MASK_3BIT) == 5)
+			rfi->num_of_data_points = 10;
+	}
+	/* Need to get interrupt info for handling interrupts */
+	rfi->index_to_intr_reg = (interruptcount + 7)/8;
+	if (rfi->index_to_intr_reg != 0)
+		rfi->index_to_intr_reg -= 1;
+	/*
+	 * loop through interrupts for each source in fn $11
+	 * and or in a bit to the interrupt mask for each.
+	 */
+	intr_offset = interruptcount % 8;
+	rfi->intr_mask = 0;
+	for (i = intr_offset;
+		i < ((rfi->intr_src_count & MASK_3BIT) + intr_offset); i++)
+		rfi->intr_mask |= 1 << i;
+
+	/* Size of just the absolute data for one finger */
+	abs_data_size	= queries[5] & MASK_2BIT;
+	/* One each for X and Y, one for LSB for X & Y, one for W, one for Z */
+	abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
+	rfi->size_of_data_register_block = abs_data_blk_size;
+
+	/*
+	 * need to determine the size of data to read - this depends on
+	 * conditions such as whether Relative data is reported and if Gesture
+	 * data is reported.
+	 */
+	egr_0 = queries[7];
+	egr_1 = queries[8];
+
+	/*
+	 * Get info about what EGR data is supported, whether it has
+	 * Relative data supported, etc.
+	 */
+	has_pinch	= egr_0 & HAS_PINCH;
+	has_flick	= egr_0 & HAS_FLICK;
+	has_tap		= egr_0 & HAS_TAP;
+	has_earlytap	= egr_0 & HAS_EARLYTAP;
+	has_press	= egr_0 & HAS_PRESS;
+	has_rotate	= egr_1 & HAS_ROTATE;
+	has_rel		= queries[1] & HAS_RELEASE;
+	has_tapandhold	= egr_0 & HAS_TAPANDHOLD;
+	has_doubletap	= egr_0 & HAS_DOUBLETAP;
+	has_palmdetect	= egr_1 & HAS_PALMDETECT;
+
+	/*
+	 * Size of all data including finger status, absolute data for each
+	 * finger, relative data and EGR data
+	 */
+	all_data_blk_size =
+		/* finger status, four fingers per register */
+		((rfi->num_of_data_points + 3) / 4) +
+		/* absolute data, per finger times number of fingers */
+		(abs_data_blk_size * rfi->num_of_data_points) +
+		/*
+		 * two relative registers (if relative is being reported)
+		 */
+		2 * has_rel +
+		/*
+		 * F11_2D_data8 is only present if the egr_0
+		 * register is non-zero.
+		 */
+		!!(egr_0) +
+		/*
+		 * F11_2D_data9 is only present if either egr_0 or
+		 * egr_1 registers are non-zero.
+		 */
+		(egr_0 || egr_1) +
+		/*
+		 * F11_2D_data10 is only present if EGR_PINCH or EGR_FLICK of
+		 * egr_0 reports as 1.
+		 */
+		!!(has_pinch | has_flick) +
+		/*
+		 * F11_2D_data11 and F11_2D_data12 are only present if
+		 * EGR_FLICK of egr_0 reports as 1.
+		 */
+		2 * !!(has_flick);
+
+	touch_data = kzalloc(sizeof(*touch_data), GFP_KERNEL);
+	if (!touch_data) {
+		dev_err(&client->dev, "kzalloc touchpad data failed\n");
+		return -ENOMEM;
+	}
+	touch_data->buffer = kzalloc(all_data_blk_size, GFP_KERNEL);
+	if (!touch_data->buffer) {
+		dev_err(&client->dev, "kzalloc touchpad buffer failed\n");
+		retval = -ENOMEM;
+		goto alloc_buf_err;
+	}
+	touch_data->size = all_data_blk_size;
+	rfi->fn_data = touch_data;
+	return 0;
+
+alloc_buf_err:
+	kfree(touch_data);
+	return retval;
+}
+
+/**
+ * rmi4_rmi4_touchpad_detect() - detects the rmi4 touchpad device
+ * @pdata: pointer to rmi4_data structure
+ * @rfi: pointer to rmi4_fn structure
+ * @interruptcount: number of interrupts
+ *
+ * This function detects the rmi4 touchpad device
+ */
+int rmi4_touchpad_f12_detect(struct rmi4_data *pdata, struct rmi4_fn *rfi,
+						unsigned int interruptcount)
+{
+	unsigned short	intr_offset;
+	int	i;
+	int	retval;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	unsigned char ctrl_8_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_28_offset;
+	struct f12_query_5 query_5;
+	struct f12_query_8 query_8;
+	struct f12_ctrl_8 ctrl_8;
+	struct f12_ctrl_23 ctrl_23;
+	unsigned char fingers_to_support = MAX_FINGERS;
+	unsigned char enable_mask;
+	unsigned char size_of_2d_data;
+	const struct rmi4_touch_calib *calib =
+				&pdata->board->calib[pdata->touch_type];
+
+
+	/*
+	 * need to get number of fingers supported, data size, etc.
+	 * to be used when getting data since the number of registers to
+	 * read depends on the number of fingers supported and data size.
+	 */
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->query_base_addr+5,
+			(u8 *)&query_5,
+			sizeof(query_5));
+	if (retval != sizeof(query_5)) {
+		dev_err(&client->dev, "%s:read function query registers 1\n",
+							__func__);
+		return retval;
+	}
+
+	ctrl_8_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present;
+
+	ctrl_23_offset = ctrl_8_offset +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	ctrl_28_offset = ctrl_23_offset +
+			query_5.ctrl23_is_present +
+			query_5.ctrl24_is_present +
+			query_5.ctrl25_is_present +
+			query_5.ctrl26_is_present +
+			query_5.ctrl27_is_present;
+
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->ctrl_base_addr + ctrl_23_offset,
+			(u8 *)&ctrl_23,
+			sizeof(ctrl_23));
+	if (retval != sizeof(ctrl_23)) {
+		dev_err(&client->dev, "%s:read function query registers 2\n",
+							__func__);
+		return retval;
+	}
+
+	/* Maximum number of fingers supported */
+	rfi->num_of_data_points = min(ctrl_23.max_reported_objects,
+			fingers_to_support);
+
+	enable_mask = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+	enable_mask |= RPT_Z;
+#endif
+	enable_mask |= (RPT_WX | RPT_WY);
+
+	retval = rmi4_i2c_block_write(pdata,
+			rfi->ctrl_base_addr + ctrl_28_offset,
+			&enable_mask,
+			sizeof(enable_mask));
+	if (retval < 0)
+		return retval;
+
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->query_base_addr + 8,
+			(u8 *)&query_8,
+			sizeof(query_8));
+	if (retval < 0)
+		return retval;
+
+	/* Determine the presence of Data0 register */
+	rfi->data1_offset = query_8.data0_is_present;
+
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->ctrl_base_addr + ctrl_8_offset,
+			(u8 *)&ctrl_8,
+			sizeof(ctrl_8));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum x and y */
+	pdata->sensor_max_x =
+			((unsigned short)ctrl_8.max_x_coord_lsb << 0) |
+			((unsigned short)ctrl_8.max_x_coord_msb << 8);
+	pdata->sensor_max_y =
+			((unsigned short)ctrl_8.max_y_coord_lsb << 0) |
+			((unsigned short)ctrl_8.max_y_coord_msb << 8);
+	if (calib->swap_axes)
+		swap(pdata->sensor_max_x, pdata->sensor_max_y);
+	dev_info(&pdata->i2c_client->dev,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, rfi->fn_number,
+			pdata->sensor_max_x,
+			pdata->sensor_max_y);
+
+#ifdef CONFIG_DEBUG_FS
+	pdata->num_rx = ctrl_8.num_of_rx;
+	pdata->num_tx = ctrl_8.num_of_tx;
+	dev_info(&pdata->i2c_client->dev,
+		"%s: Function %02x rx = %d tx = %d\n",
+		__func__, rfi->fn_number, ctrl_8.num_of_rx, ctrl_8.num_of_tx);
+#endif
+
+	/* Need to get interrupt info for handling interrupts */
+	rfi->index_to_intr_reg = (interruptcount + 7)/8;
+	if (rfi->index_to_intr_reg != 0)
+		rfi->index_to_intr_reg -= 1;
+	/*
+	 * loop through interrupts for each source in fn $12
+	 * and or in a bit to the interrupt mask for each.
+	 */
+	intr_offset = interruptcount % 8;
+	rfi->intr_mask = 0;
+	for (i = intr_offset;
+		i < ((rfi->intr_src_count & MASK_3BIT) + intr_offset); i++)
+		rfi->intr_mask |= 1 << i;
+
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	/* Allocate memory for finger data storage space */
+	rfi->data_size = rfi->num_of_data_points * size_of_2d_data;
+	rfi->fn_data = kmalloc(rfi->data_size, GFP_KERNEL);
+
+	if (!rfi->fn_data) {
+		dev_err(&client->dev, "kzalloc touchpad buffer failed\n");
+		retval = -ENOMEM;
+		goto alloc_buf_err;
+	}
+
+	return 0;
+
+alloc_buf_err:
+	return retval;
+}
+
+int rmi4_button_detect(struct rmi4_data *pdata, struct rmi4_fn *rfi,
+						unsigned int interruptcount)
+{
+	int i, retval, bttn_cnt;
+	u8 queries[2];
+	unsigned short intr_offset;
+	struct rmi4_button_data *button_data;
+	struct i2c_client *client = pdata->i2c_client;
+
+	retval = rmi4_i2c_block_read(pdata, rfi->query_base_addr, queries,
+							sizeof(queries));
+	if (retval != sizeof(queries)) {
+		dev_err(&client->dev, "%s:read query failed\n", __func__);
+		return retval;
+	}
+
+	button_data = kzalloc(sizeof(*button_data), GFP_KERNEL);
+	if (!button_data) {
+		dev_err(&client->dev, "kzalloc button data failed\n");
+		return -ENOMEM;
+	}
+	/* The value of the register is one less than the actual number
+	 * of buttons supported */
+	bttn_cnt = (queries[0] & MASK_3BIT) + 1;
+	dev_dbg(&client->dev, "%d buttons detected\n", bttn_cnt);
+
+	button_data->status = kcalloc(bttn_cnt, sizeof(bool), GFP_KERNEL);
+	if (!button_data->status) {
+		dev_err(&client->dev, "kcalloc button status failed\n");
+		retval = -ENOMEM;
+		goto alloc_status_err;
+	}
+	button_data->bttns_map =
+			kcalloc(bttn_cnt, sizeof(unsigned char), GFP_KERNEL);
+	if (!button_data->bttns_map) {
+		dev_err(&client->dev, "kcalloc button map table failed\n");
+		retval = -ENOMEM;
+		goto alloc_map_err;
+	}
+	/* Set the button map table as 1, 2, 3 ... */
+	for (i = 0; i < bttn_cnt; i++) {
+		button_data->bttns_map[i] = i + 1;
+		set_bit(i + 1, pdata->input_key_dev->keybit);
+	}
+	button_data->num_of_bttns = bttn_cnt;
+	rfi->fn_data = button_data;
+
+	rfi->index_to_intr_reg = (interruptcount + 7) / 8;
+	if (rfi->index_to_intr_reg != 0)
+		rfi->index_to_intr_reg -= 1;
+	/*
+	 * loop through interrupts for each source
+	 * and or in a bit to the interrupt mask for each.
+	 */
+	intr_offset = interruptcount % 8;
+	rfi->intr_mask = 0;
+	for (i = intr_offset;
+		i < ((rfi->intr_src_count & MASK_3BIT) + intr_offset); i++)
+		rfi->intr_mask |= 1 << i;
+	return 0;
+
+alloc_map_err:
+	kfree(button_data->status);
+alloc_status_err:
+	kfree(button_data);
+	return retval;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int rmi4_ana_data_detect(struct rmi4_data *pdata, struct rmi4_fn *rfi,
+			 unsigned int interruptcount)
+{
+	int i;
+	unsigned short intr_offset;
+	struct rmi4_ana_data *ana_data;
+	struct i2c_client *client = pdata->i2c_client;
+
+	rfi->index_to_intr_reg = (interruptcount + 7) / 8;
+	if (rfi->index_to_intr_reg != 0)
+		rfi->index_to_intr_reg -= 1;
+	/*
+	 * loop through interrupts for each source
+	 * and or in a bit to the interrupt mask for each.
+	 */
+	intr_offset = interruptcount % 8;
+	rfi->intr_mask = 0;
+	for (i = intr_offset;
+		i < ((rfi->intr_src_count & MASK_3BIT) + intr_offset); i++)
+		rfi->intr_mask |= 1 << i;
+
+	ana_data = kzalloc(sizeof(*ana_data), GFP_KERNEL);
+	if (!ana_data) {
+		dev_err(&client->dev, "kzalloc ana data failed\n");
+		return -ENOMEM;
+	}
+	ana_data->i2c_client = client;
+	rfi->fn_data = ana_data;
+	/* touch + button area rx/tx */
+	ana_data->rx = pdata->num_rx;
+	ana_data->tx = pdata->num_tx;
+
+	/* Report type = 3 Raw 15-bit Image report
+	 * Each pixel's raw capacitance is represented by 16-bit signed value.
+	 * The number of bytes reported is:
+	 * NumberofTransmitterElectrodes * NumberofReceiverElectrodes * 2
+	 */
+	ana_data->size = RAW_VALUE_SIZE * ana_data->rx * ana_data->tx;
+
+	return 0;
+}
+
+void rmi4_ana_data_remove(struct rmi4_fn *rfi)
+{
+	struct rmi4_ana_data *ana_data;
+
+	if (!rfi->fn_data)
+		return;
+
+	ana_data = rfi->fn_data;
+	kfree(ana_data);
+}
+#endif
+
+void rmi4_button_remove(struct rmi4_fn *rfi)
+{
+	struct rmi4_button_data *bttn_data;
+
+	if (!rfi->fn_data)
+		return;
+	bttn_data = rfi->fn_data;
+	kfree(bttn_data->status);
+	kfree(bttn_data->bttns_map);
+	kfree(bttn_data);
+}
+
+void rmi4_touchpad_remove(struct rmi4_fn *rfi)
+{
+	struct rmi4_touchpad_data *touch_data;
+
+	if (!rfi->fn_data)
+		return;
+	touch_data = rfi->fn_data;
+	kfree(touch_data->buffer);
+	kfree(touch_data);
+}
+
+void rmi4_touchpad_f12_remove(struct rmi4_fn *rfi)
+{
+	if (!rfi->fn_data)
+		return;
+	kfree(rfi->fn_data);
+}
+
+/**
+ * rmi4_rmi4_touchpad_config() - confiures the rmi4 touchpad device
+ * @pdata: pointer to rmi4_data structure
+ * @rfi: pointer to rmi4_fn structure
+ *
+ * This function calls to confiures the rmi4 touchpad device
+ */
+int rmi4_touchpad_config(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	/*
+	 * For the data source - print info and do any
+	 * source specific configuration.
+	 */
+	u8 ctrl0, data[BUF_LEN];
+	int retval = 0;
+	u8 pos_delta[] = { DELTA_XPOS_THRESH, DELTA_YPOS_THRESH };
+	struct	i2c_client *client = pdata->i2c_client;
+	const struct rmi4_touch_calib *calib =
+				&pdata->board->calib[pdata->touch_type];
+
+	/* Get and print some info about the data source... */
+	/* To Query 2D devices we need to read from the address obtained
+	 * from the function descriptor stored in the RMI function info.
+	 */
+	retval = rmi4_i2c_block_read(pdata,
+				rfi->query_base_addr, data, QUERY_LEN);
+	if (retval != QUERY_LEN) {
+		dev_err(&client->dev, "%s:read query registers failed\n",
+								__func__);
+		return retval;
+	}
+
+	retval = rmi4_i2c_byte_read(pdata, rfi->ctrl_base_addr, &ctrl0);
+	if (retval < 0) {
+		dev_err(&client->dev, "read control 0 failed\n");
+		return retval;
+	}
+
+	retval = rmi4_i2c_byte_write(pdata, rfi->ctrl_base_addr,
+				(ctrl0 & ~MASK_3BIT) | TOUCH_REDUCE_MODE);
+	if (retval < 0) {
+		dev_err(&client->dev, "Set touch report mode failed\n");
+		return retval;
+	}
+
+	retval = rmi4_i2c_block_write(pdata, rfi->ctrl_base_addr + 2,
+					pos_delta, sizeof(pos_delta));
+	if (retval < 0) {
+		dev_err(&client->dev, "Write DELTA_POS_THRESH failed\n");
+		return retval;
+	}
+	retval = rmi4_i2c_block_read(pdata,
+				rfi->ctrl_base_addr, data, DATA_BUF_LEN);
+	if (retval != DATA_BUF_LEN) {
+		dev_err(&client->dev, "%s:read control registers failed\n",
+								__func__);
+		return retval;
+	}
+	/* Store these for use later*/
+	pdata->sensor_max_x = ((data[6] & MASK_8BIT) << 0) |
+					((data[7] & MASK_4BIT) << 8);
+	pdata->sensor_max_y = ((data[8] & MASK_8BIT) << 0) |
+					((data[9] & MASK_4BIT) << 8);
+	if (calib->swap_axes)
+		swap(pdata->sensor_max_x, pdata->sensor_max_y);
+	dev_info(&client->dev, "sensor_max_x=%d, sensor_max_y=%d\n",
+				pdata->sensor_max_x, pdata->sensor_max_y);
+	return retval;
+}
+
+/**
+ * rmi4_rmi4_touchpad_f12_config() - confiures the rmi4 touchpad device
+ * @pdata: pointer to rmi4_data structure
+ * @rfi: pointer to rmi4_fn structure
+ *
+ * This function calls to confiures the rmi4 touchpad device
+ */
+int rmi4_touchpad_f12_config(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	int retval = 0;
+	struct	i2c_client *client = pdata->i2c_client;
+	struct f12_query_5 query_5;
+	struct f12_ctrl_20 ctrl_20;
+	unsigned char ctrl_20_offset;
+
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->query_base_addr+5,
+			(u8 *)&query_5,
+			sizeof(query_5));
+	if (retval != sizeof(query_5)) {
+		dev_err(&client->dev, "%s:read query 5 failed 1\n",
+							__func__);
+		return retval;
+	}
+
+	ctrl_20_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present;
+
+	retval = rmi4_i2c_block_read(pdata,
+			rfi->ctrl_base_addr + ctrl_20_offset,
+			(u8 *)&ctrl_20,
+			sizeof(ctrl_20));
+	if (retval != sizeof(ctrl_20)) {
+		dev_err(&client->dev, "%s:read control 20 failed 2\n",
+							__func__);
+		return retval;
+	}
+
+	ctrl_20.x_suppression = DELTA_XPOS_THRESH;
+	ctrl_20.y_suppression = DELTA_YPOS_THRESH;
+	ctrl_20.report_always = 0;
+
+	retval = rmi4_i2c_block_write(pdata,
+			rfi->ctrl_base_addr + ctrl_20_offset,
+			(u8 *)&ctrl_20,
+			sizeof(ctrl_20));
+	if (retval < 0) {
+		dev_err(&client->dev, "%s:write control 20 failed 3\n",
+							__func__);
+	}
+
+	return retval;
+}
+
+static int
+rmi4_process_func(struct rmi4_data *pdata, struct rmi4_fn_desc *rmi_fd,
+						int page_start, int intr_cnt)
+{
+	int retval, id;
+	struct i2c_client *client = pdata->i2c_client;
+	struct rmi4_fn *rfi = NULL;
+	struct rmi4_fn_ops *fn_ops = NULL;
+
+	dev_info(&client->dev,
+			"fn 0x%x detected: query=0x%x, cmd=0x%x, ctrl=0x%x, data=0x%x, intr=0x%x\n",
+			rmi_fd->fn_number, rmi_fd->query_base_addr,
+			rmi_fd->cmd_base_addr, rmi_fd->ctrl_base_addr,
+			rmi_fd->data_base_addr, rmi_fd->intr_src_count);
+
+	id = rmi_fd->fn_number;
+	if (id == RMI4_DEV_CTL_FUNC_NUM) {
+		pdata->fn01_query_base_addr = rmi_fd->query_base_addr;
+		pdata->fn01_ctrl_base_addr = rmi_fd->ctrl_base_addr;
+		pdata->fn01_data_base_addr = rmi_fd->data_base_addr;
+
+		retval = rmi4_i2c_set_bits(pdata, pdata->fn01_ctrl_base_addr,
+							F01_CTRL0_CONFIGURED);
+		if (retval < 0) {
+			dev_err(&client->dev, "Set F01_CONFIGURED failed\n");
+			return retval;
+		}
+		if (boot_mode == BOOT_MODE_COS) {
+			retval = rmi4_i2c_set_bits(pdata,
+					pdata->fn01_ctrl_base_addr,
+					F01_CTRL0_SLEEP);
+			if (retval < 0) {
+				dev_err(&client->dev,
+					"set F01_CTRL0_SLEEP failed\n");
+				return retval;
+			}
+			msleep(RMI4_RESET_DELAY);
+			dev_info(&client->dev,
+				"System is in charger mode, "
+				"touch screen should be always in sleep mode, "
+				"we don't need to go on anymore\n");
+			return -1;
+
+		} else {
+			retval = rmi4_i2c_clear_bits(pdata,
+					pdata->fn01_ctrl_base_addr,
+					F01_CTRL0_NOSLEEP);
+			if (retval < 0) {
+				dev_err(&client->dev,
+					"clear F01_CTRL0_SLEEP failed\n");
+				return retval;
+			}
+		}
+	}
+
+	fn_ops = get_supported_fn_ops(id);
+	if (!fn_ops)
+		return 0;
+	rfi = kzalloc(sizeof(*rfi), GFP_KERNEL);
+	if (!rfi) {
+		dev_err(&client->dev, "kzalloc fn%d rfi failed\n", id);
+		return -ENOMEM;
+	}
+	rfi->fn_number = rmi_fd->fn_number;
+	rfi->intr_src_count = rmi_fd->intr_src_count;
+	rfi->query_base_addr = page_start + rmi_fd->query_base_addr;
+	rfi->cmd_base_addr = page_start + rmi_fd->cmd_base_addr;
+	rfi->ctrl_base_addr = page_start + rmi_fd->ctrl_base_addr;
+	rfi->data_base_addr = page_start + rmi_fd->data_base_addr;
+	rfi->ops = fn_ops;
+
+	if (rfi->ops->detect) {
+		retval = rfi->ops->detect(pdata, rfi, intr_cnt);
+		if (retval < 0) {
+			dev_err(&client->dev, "fn 0x%x init failed\n", id);
+			goto init_err;
+		}
+	}
+	/* link this function info to the RMI module */
+	list_add_tail(&rfi->link, &pdata->rmi4_mod_info.support_fn_list);
+	return 0;
+
+init_err:
+	kfree(rfi);
+	return retval;
+}
+
+static void rmi4_free_funcs(struct rmi4_data *rmi4_data)
+{
+	struct rmi4_fn *rfi, *next;
+	struct list_head *fn_list;
+
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry_safe(rfi, next, fn_list, link) {
+		if (rfi->ops->remove)
+			rfi->ops->remove(rfi);
+		list_del(&rfi->link);
+		kfree(rfi);
+	}
+}
+
+static int do_init_reset(struct rmi4_data *pdata)
+{
+	bool has_f01 = false;
+	bool has_f34 = false;
+	int i, retval;
+	int page, page_start, pdt_start, pdt_end;
+	struct rmi4_fn_desc rmi_fd, f34_fd, f01_fd;
+	struct i2c_client *client = pdata->i2c_client;
+
+	for (page = 0; page <= RMI4_MAX_PAGE; page++) {
+		page_start = page * RMI4_PAGE_SIZE;
+		pdt_start = page_start + PDT_START_SCAN_LOCATION;
+		pdt_end = page_start + PDT_END_SCAN_LOCATION;
+		for (i = pdt_start; i >= pdt_end; i -= PDT_ENTRY_SIZE) {
+			retval = rmi4_i2c_block_read(pdata, i,
+						(u8 *)&rmi_fd,
+						sizeof(rmi_fd));
+			if (retval < 0) {
+				dev_err(&client->dev, "%s: read 0x%x failed\n",
+								__func__, i);
+				return retval;
+			}
+			if (RMI4_END_OF_PDT(rmi_fd.fn_number))
+				break;
+
+			if (rmi_fd.fn_number == RMI4_DEV_CTL_FUNC_NUM) {
+				u16 addr = page_start + rmi_fd.cmd_base_addr;
+				u8 cmd = RMI4_DEVICE_RESET_CMD;
+				retval = rmi4_i2c_byte_write(pdata, addr, cmd);
+				if (retval < 0) {
+					dev_err(&client->dev,
+							"reset cmd failed.\n");
+					return retval;
+				}
+				msleep(RMI4_RESET_DELAY);
+				memcpy(&f01_fd, &rmi_fd, sizeof(rmi_fd));
+				has_f01 = true;
+			} else if (rmi_fd.fn_number == RMI4_FLASH_FW_FUNC_NUM) {
+				memcpy(&f34_fd, &rmi_fd, sizeof(rmi_fd));
+				has_f34 = true;
+			}
+		}
+		if (has_f01 && has_f34)
+			break;
+	}
+
+	if (!has_f01 || !has_f34) {
+		dev_err(&client->dev,
+			"%s: Failed to find F01/F34 for init reset.\n",
+			__func__);
+		return -ENODEV;
+	}
+	retval = rmi4_fw_update(pdata, &f01_fd, &f34_fd);
+	if (retval < 0) {
+		dev_err(&client->dev,
+				"%s: update firmware failed!\n", __func__);
+		return retval;
+	}
+	pdata->touch_type = retval;
+
+	return 0;
+}
+
+/**
+ * rmi4_i2c_query_device() - query the rmi4 device
+ * @pdata: pointer to rmi4_data structure
+ *
+ * This function is used to query the rmi4 device.
+ */
+static int rmi4_i2c_query_device(struct rmi4_data *pdata)
+{
+	int i, retval;
+	int page, page_start, pdt_start, pdt_end;
+	int data_sources = 0;
+	u8 std_queries[STD_QUERY_LEN];
+	unsigned char intr_count = 0;
+	unsigned int ctrl_offset;
+	struct rmi4_fn_desc rmi_fd;
+	struct rmi4_fn *rfi;
+	struct rmi4_device_info *rmi;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	/*
+	 * init the physical drivers RMI module
+	 * info list of functions
+	 */
+	INIT_LIST_HEAD(&pdata->rmi4_mod_info.support_fn_list);
+
+	/*
+	 * Read the Page Descriptor Table to determine what functions
+	 * are present
+	 */
+	for (page = 0; page <= RMI4_MAX_PAGE; page++) {
+		page_start = page * RMI4_PAGE_SIZE;
+		pdt_start = page_start + PDT_START_SCAN_LOCATION;
+		pdt_end = page_start + PDT_END_SCAN_LOCATION;
+		for (i = pdt_start; i >= pdt_end; i -= PDT_ENTRY_SIZE) {
+			retval = rmi4_i2c_block_read(pdata, i,
+						(u8 *)&rmi_fd,
+						sizeof(rmi_fd));
+			if (retval < 0) {
+				dev_err(&client->dev, "%s: read 0x%x failed",
+								__func__, i);
+				goto failed;
+			}
+			if (RMI4_END_OF_PDT(rmi_fd.fn_number))
+				break;
+
+			retval = rmi4_process_func(pdata, &rmi_fd,
+						page_start, intr_count);
+			if (retval < 0) {
+				dev_err(&client->dev,
+						"%s: process fn%x failed",
+						__func__, rmi_fd.fn_number);
+				goto failed;
+			}
+			/* interrupt count for next iteration */
+			intr_count += rmi_fd.intr_src_count & MASK_3BIT;
+		}
+	}
+	dev_dbg(&client->dev, "End of PDT, intr_count=%d\n", intr_count);
+	/*
+	 * calculate the interrupt register count - used in the
+	 * ISR to read the correct number of interrupt registers
+	 */
+	pdata->number_of_interrupt_register = (intr_count + 7) / 8;
+	/*
+	 * Function $01 will be used to query the product properties,
+	 * and product ID  so we had to read the PDT above first to get
+	 * the Fn $01 query address and prior to filling in the product
+	 * info. NOTE: Even an unflashed device will still have FN $01.
+	 */
+
+	/* Load up the standard queries and get the RMI4 module info */
+	retval = rmi4_i2c_block_read(pdata, pdata->fn01_query_base_addr,
+					std_queries, sizeof(std_queries));
+	if (retval != sizeof(std_queries)) {
+		dev_err(&client->dev,
+				"%s: Failed reading queries\n", __func__);
+		retval = -EIO;
+		goto failed;
+	}
+
+	/* Currently supported RMI version is 4.0 */
+	pdata->rmi4_mod_info.version_major	= 4;
+	pdata->rmi4_mod_info.version_minor	= 0;
+	/*
+	 * get manufacturer id, product_props, product info,
+	 * date code, tester id, serial num and product id (name)
+	 */
+	pdata->rmi4_mod_info.manufacturer_id	= std_queries[0];
+	pdata->rmi4_mod_info.product_props	= std_queries[1];
+	pdata->rmi4_mod_info.product_info[0]	= std_queries[2];
+	pdata->rmi4_mod_info.product_info[1]	= std_queries[3];
+	/* year - 2001-2032 */
+	pdata->rmi4_mod_info.date_code[0]	= std_queries[4] & MASK_5BIT;
+	/* month - 1-12 */
+	pdata->rmi4_mod_info.date_code[1]	= std_queries[5] & MASK_4BIT;
+	/* day - 1-31 */
+	pdata->rmi4_mod_info.date_code[2]	= std_queries[6] & MASK_5BIT;
+	pdata->rmi4_mod_info.tester_id = ((std_queries[7] & MASK_7BIT) << 8) |
+						(std_queries[8] & MASK_7BIT);
+	pdata->rmi4_mod_info.serial_number =
+		((std_queries[9] & MASK_7BIT) << 8) |
+				(std_queries[10] & MASK_7BIT);
+	memcpy(pdata->rmi4_mod_info.product_id_string, &std_queries[11], 10);
+
+	/* Check if this is a Synaptics device - report if not. */
+	if (pdata->rmi4_mod_info.manufacturer_id != 1)
+		dev_err(&client->dev, "%s: non-Synaptics mfg id:%d\n",
+			__func__, pdata->rmi4_mod_info.manufacturer_id);
+
+	list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
+		data_sources += rfi->intr_src_count;
+	if (!data_sources)
+		return 0;
+
+	rmi = &(pdata->rmi4_mod_info);
+	/* Disable all the interrupt source before we enable
+	 * the supported function's interrupt source. */
+	for (i = 0; i < pdata->number_of_interrupt_register; i++)
+		rmi4_i2c_byte_write(pdata,
+				pdata->fn01_ctrl_base_addr + 1 + i, 0);
+	list_for_each_entry(rfi, &rmi->support_fn_list, link) {
+		if (rfi->ops->config) {
+			retval = rfi->ops->config(pdata, rfi);
+			if (retval < 0) {
+				dev_err(&client->dev,
+						"fn 0x%x config failed\n",
+						rfi->fn_number);
+				goto failed;
+			}
+		}
+		/* Turn on interrupt for this function data source. */
+		ctrl_offset = pdata->fn01_ctrl_base_addr + 1 +
+						rfi->index_to_intr_reg;
+		retval = rmi4_i2c_set_bits(pdata, ctrl_offset, rfi->intr_mask);
+		if (retval < 0) {
+			dev_err(&client->dev,
+					"fn 0x%x enable interrupt failed\n",
+					rfi->fn_number);
+			goto failed;
+		}
+	}
+	return 0;
+failed:
+	return retval;
+}
+
+static int do_sw_reset(struct rmi4_data *pdata)
+{
+	int	retval = 0;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct i2c_client *client = pdata->i2c_client;
+
+	fn_list = &(pdata->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == RMI4_DEV_CTL_FUNC_NUM) {
+			u16 addr = rfi->cmd_base_addr;
+			u8 cmd = RMI4_DEVICE_RESET_CMD;
+			dev_info(&client->dev, "%s: reset\n", __func__);
+			retval = rmi4_i2c_byte_write(pdata, addr, cmd);
+			if (retval < 0) {
+				dev_err(&client->dev, "reset cmd failed.\n");
+				return retval;
+			}
+			msleep(RMI4_RESET_DELAY);
+		}
+	}
+
+	return 0;
+}
+
+int rmi4_dev_ctl_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi)
+{
+	/* number of touch points - fingers down in this case */
+	int retval;
+	u16 data_base_addr;
+	u8 data;
+	struct i2c_client *client = pdata->i2c_client;
+
+	dev_info(&client->dev, "%s\n", __func__);
+
+	data_base_addr = rfi->data_base_addr;
+
+	retval = rmi4_i2c_block_read(pdata, data_base_addr,
+					&data, 1);
+	if (retval != 1) {
+		dev_err(&client->dev, "%s:read touch registers failed\n",
+								__func__);
+		return retval;
+	}
+
+	/* Check device status & act upon */
+	if ((data & 0x0F)) {
+		dev_info(&client->dev, "%s: reset & init!\n", __func__);
+		/* reset & init */
+		retval = do_sw_reset(pdata);
+		if (retval) {
+			dev_err(&client->dev, "%s: Soft reset failed!\n",
+					__func__);
+			return retval;
+		}
+		retval = rmi4_i2c_query_device(pdata);
+		if (retval) {
+			dev_err(&client->dev, "rmi4 query device failed\n");
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int rmi4_config_gpio(struct rmi4_data *pdata)
+{
+	int ret, int_gpio, rst_gpio;
+
+	int_gpio = pdata->board->int_gpio_number;
+	rst_gpio = pdata->board->rst_gpio_number;
+
+	/* if there's GPIO assigned for touch interrupt in the platform data */
+	if (int_gpio > -1) {
+		ret = gpio_request(int_gpio, "rmi4_int");
+		if (ret < 0) {
+			pr_err("Failed to request INT GPIO %d\n", int_gpio);
+			goto err_out;
+		}
+		ret = gpio_direction_input(int_gpio);
+		if (ret < 0) {
+			pr_err("Failed to config INT GPIO %d\n", int_gpio);
+			goto err_int;
+		}
+		ret = gpio_to_irq(int_gpio);
+		if (ret < 0) {
+			pr_err("Config GPIO %d to IRQ Error!\n", int_gpio);
+			goto err_int;
+		}
+		pdata->irq = ret;
+	}
+
+	ret = gpio_request(rst_gpio, "rmi4_rst");
+	if (ret < 0) {
+		pr_err("Failed to request RST GPIO %d\n", rst_gpio);
+		goto err_int;
+	}
+	ret = gpio_direction_output(rst_gpio, 1);
+	if (ret < 0) {
+		pr_err("Failed to config GPIO %d\n", rst_gpio);
+		goto err_rst;
+	}
+	gpio_set_value(rst_gpio, 1);
+	msleep(RMI4_RESET_DELAY);
+
+	return 0;
+
+err_rst:
+	gpio_free(rst_gpio);
+err_int:
+	gpio_free(int_gpio);
+err_out:
+	return ret;
+}
+
+#ifdef DEBUG
+/* sysfs entries for debug */
+static ssize_t attr_ctrl_reg_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == rmi4_data->dbg_fn_num) {
+			rmi4_i2c_byte_write(rmi4_data,
+				rfi->ctrl_base_addr + rmi4_data->dbg_reg_addr,
+				(u8)val);
+			return size;
+		}
+	}
+	return -EINVAL;
+}
+
+static ssize_t attr_ctrl_reg_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	u8 val;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == rmi4_data->dbg_fn_num) {
+			rmi4_i2c_byte_read(rmi4_data,
+				rfi->ctrl_base_addr + rmi4_data->dbg_reg_addr,
+				&val);
+			return sprintf(buf, "%d(0x%x)\n", val, val);
+		}
+	}
+	return -EINVAL;
+}
+static DEVICE_ATTR(ctrl_reg, S_IRUSR | S_IWUSR,
+		attr_ctrl_reg_get, attr_ctrl_reg_set);
+
+static ssize_t attr_query_reg_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == rmi4_data->dbg_fn_num) {
+			rmi4_i2c_byte_write(rmi4_data,
+				rfi->query_base_addr + rmi4_data->dbg_reg_addr,
+				(u8)val);
+			return size;
+		}
+	}
+	return -EINVAL;
+}
+
+static ssize_t attr_query_reg_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	u8 val;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == rmi4_data->dbg_fn_num) {
+			rmi4_i2c_byte_read(rmi4_data,
+				rfi->query_base_addr + rmi4_data->dbg_reg_addr,
+				&val);
+			return sprintf(buf, "%d(0x%x)\n", val, val);
+		}
+	}
+	return -EINVAL;
+}
+static DEVICE_ATTR(query_reg, S_IRUSR | S_IWUSR,
+		attr_query_reg_get, attr_query_reg_set);
+
+static ssize_t attr_data_reg_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == rmi4_data->dbg_fn_num) {
+			rmi4_i2c_byte_write(rmi4_data,
+				rfi->data_base_addr + rmi4_data->dbg_reg_addr,
+				(u8)val);
+			return size;
+		}
+	}
+	return -EINVAL;
+}
+
+static ssize_t attr_data_reg_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	u8 val;
+	struct rmi4_fn *rfi;
+	struct list_head *fn_list;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	fn_list = &(rmi4_data->rmi4_mod_info.support_fn_list);
+	list_for_each_entry(rfi, fn_list, link) {
+		if (rfi->ops->fn_number == rmi4_data->dbg_fn_num) {
+			rmi4_i2c_byte_read(rmi4_data,
+				rfi->data_base_addr + rmi4_data->dbg_reg_addr,
+				&val);
+			return sprintf(buf, "%d(0x%x)\n", val, val);
+		}
+	}
+	return -EINVAL;
+}
+static DEVICE_ATTR(data_reg, S_IRUSR | S_IWUSR,
+		attr_data_reg_get, attr_data_reg_set);
+
+static ssize_t attr_reg_addr_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	rmi4_data->dbg_reg_addr = val;
+
+	return size;
+}
+
+static ssize_t attr_reg_addr_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d(0x%x)\n",
+			rmi4_data->dbg_reg_addr, rmi4_data->dbg_reg_addr);
+}
+static DEVICE_ATTR(reg_addr, S_IRUSR | S_IWUSR,
+			attr_reg_addr_get, attr_reg_addr_set);
+
+static ssize_t attr_fn_num_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	rmi4_data->dbg_fn_num = val;
+
+	return size;
+}
+
+static ssize_t attr_fn_num_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return sprintf(buf, "0x%x\n", rmi4_data->dbg_fn_num);
+}
+static DEVICE_ATTR(fn_num, S_IRUSR | S_IWUSR,
+		attr_fn_num_get, attr_fn_num_set);
+
+static ssize_t attr_reg_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned long val;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	rmi4_i2c_byte_write(rmi4_data, rmi4_data->dbg_reg_addr, (u8)val);
+	return size;
+}
+
+static ssize_t attr_reg_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	u8 val;
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	rmi4_i2c_byte_read(rmi4_data, rmi4_data->dbg_reg_addr, &val);
+	return sprintf(buf, "%d(0x%x)\n", val, val);
+}
+static DEVICE_ATTR(reg, S_IRUSR | S_IWUSR, attr_reg_get, attr_reg_set);
+
+static struct attribute *rmi4_attrs[] = {
+	&dev_attr_ctrl_reg.attr,
+	&dev_attr_query_reg.attr,
+	&dev_attr_data_reg.attr,
+	&dev_attr_reg_addr.attr,
+	&dev_attr_fn_num.attr,
+	&dev_attr_reg.attr,
+	NULL
+};
+
+static struct attribute_group rmi4_attr_dbg = {
+	.name = "rmi4",
+	.attrs = rmi4_attrs
+};
+#endif
+
+void rmi4_suspend(struct rmi4_data *pdata)
+{
+	int retval, i;
+	bool need_sync = false;
+	struct i2c_client *client = pdata->i2c_client;
+
+	dev_info(&client->dev, "Enter %s, touch counter=%ld, key counter=%ld",
+			__func__, pdata->touch_counter, pdata->key_counter);
+	disable_irq(pdata->irq);
+
+	rmi4_i2c_byte_read(pdata, pdata->fn01_ctrl_base_addr,
+			&pdata->fn01_ctrl_reg_saved);
+
+	retval = rmi4_i2c_set_bits(pdata,
+			pdata->fn01_ctrl_base_addr, F01_CTRL0_SLEEP);
+	if (retval < 0)
+		dev_err(&client->dev, "set F01_CTRL0_SLEEP failed\n");
+
+	if (pdata->regulator)
+		regulator_disable(pdata->regulator);
+
+	/* swipe all the touch points before suspend */
+	for (i = 0; i < MAX_FINGERS; i++) {
+		if (pdata->finger_status[i] == F11_PRESENT) {
+			need_sync = true;
+			input_mt_slot(pdata->input_ts_dev, i);
+			input_mt_report_slot_state(pdata->input_ts_dev,
+					MT_TOOL_FINGER, false);
+			pdata->finger_status[i] = F11_NO_FINGER;
+		}
+	}
+	if (need_sync)
+		input_sync(pdata->input_ts_dev);
+
+	pdata->touch_counter = 0;
+	pdata->key_counter = 0;
+}
+
+void rmi4_resume(struct rmi4_data *pdata)
+{
+	struct i2c_client *client = pdata->i2c_client;
+	u8 intr_status[4];
+
+	dev_info(&client->dev, "Enter %s", __func__);
+
+	if (pdata->regulator) {
+		/*need wait to stable if regulator first output*/
+		int needwait = !regulator_is_enabled(pdata->regulator);
+		if (regulator_enable(pdata->regulator))
+			dev_err(&client->dev, "Failed to enable regulator\n");
+
+		if (needwait)
+			msleep(50);
+	}
+	enable_irq(pdata->irq);
+	rmi4_i2c_byte_write(pdata, pdata->fn01_ctrl_base_addr,
+			pdata->fn01_ctrl_reg_saved);
+
+	/* Clear interrupts */
+	rmi4_i2c_block_read(pdata,
+			pdata->fn01_data_base_addr + 1,
+			intr_status, pdata->number_of_interrupt_register);
+}
+
+static ssize_t early_suspend_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (!strncmp(buf, EARLY_SUSPEND_ON, EARLY_SUSPEND_STATUS_LEN))
+		rmi4_suspend(rmi4_data);
+	else if (!strncmp(buf, EARLY_SUSPEND_OFF, EARLY_SUSPEND_STATUS_LEN))
+		rmi4_resume(rmi4_data);
+
+	return count;
+}
+
+static DEVICE_EARLY_SUSPEND_ATTR(early_suspend_store);
+
+#ifdef CONFIG_DEBUG_FS
+static struct rmi4_fn *find_ana_rfi(struct rmi4_data *pdata)
+{
+	struct rmi4_fn *rfi;
+	struct device *dev = &pdata->i2c_client->dev;
+
+	list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
+		if (rfi->fn_number == RMI4_ANALOG_FUNC_NUM)
+			break;
+	if (!rfi || rfi->fn_number != RMI4_ANALOG_FUNC_NUM) {
+		dev_err(dev, "%s: rfi not found !\n", __func__);
+		rfi = NULL;
+	}
+
+	return rfi;
+}
+
+static int rmi4_debugfs_raw_sensor_data_show(struct seq_file *seq, void *unused)
+{
+	return 0;
+}
+
+static int rmi4_debugfs_raw_sensor_data_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, rmi4_debugfs_raw_sensor_data_show,
+			inode->i_private);
+}
+
+#define  rmi4_debugfs_error_check(dev, ret, addr, size)					\
+do {												\
+	if (ret < 0) {										\
+		dev_err(dev, "%s: Could not write data to 0x%x\n",				\
+				__func__, addr);						\
+		return ret;									\
+	}											\
+	dev_err(dev, "%s: Unexpected number written to 0x%x; Wrote: %d\t Expected: %d\n",	\
+			__func__, addr, ret, (int)size);					\
+	return -EIO;										\
+} while (0)
+
+static ssize_t rmi4_debugfs_raw_sensor_data_read(struct file *file,
+			char __user *buf, size_t count, loff_t *ppos)
+{
+	struct seq_file *seq;
+	struct rmi4_data *rmi4_data;
+	struct rmi4_fn *rfi;
+	struct rmi4_ana_data *ana_data;
+	struct device *dev;
+	unsigned char tx, rx;
+	unsigned short pixel;
+	int rst_gpio, ret, i;
+	size_t size;
+
+	seq = (struct seq_file *)file->private_data;
+	if (!seq) {
+		pr_err("rmi4_ts: Failed to get seq_file\n");
+		return -EFAULT;
+	}
+
+	rmi4_data = (struct rmi4_data *)seq->private;
+	if (!rmi4_data) {
+		pr_err("rmi4_ts: Failed to get private data\n");
+		return -EFAULT;
+	}
+
+	rfi = find_ana_rfi(rmi4_data);
+
+	if (!rfi) {
+		pr_err("rmi4_ts: Failed to get F54 rmi4 function\n");
+		return -EFAULT;
+	}
+
+	ana_data = rfi->fn_data;
+
+	dev = &rmi4_data->i2c_client->dev;
+
+	if (ana_data->status == 0) {
+
+		/* Reset using GPIO to get fresh data */
+		rst_gpio = rmi4_data->board->rst_gpio_number;
+
+		gpio_set_value(rst_gpio, 0);
+		msleep(RMI4_RESET_DELAY);
+		gpio_set_value(rst_gpio, 1);
+		/* Longer delay is needed here */
+		msleep(RMI4_RESET_DELAY * 6);
+
+		ana_data->buffer = kzalloc(ana_data->size, GFP_KERNEL);
+
+		if (!ana_data->buffer) {
+			dev_err(dev, "%s Failed to create buffer for sensor",
+					__func__);
+			ret = -ENOMEM;
+			goto dbgfs_exit;
+		}
+
+		/* Supports only Report type 3 (Raw 16-bit Image report) as of now */
+		ana_data->reporttype = F54_RAW_16BIT_IMAGE;
+
+		/* Set the Report Type 3 in the first block DATA registers F54_AD_Data0 */
+
+		dev_dbg(dev, "%s: Writing 0x%x: 0x%x\n", __func__, rfi->data_base_addr,
+					ana_data->reporttype);
+		ret = rmi4_i2c_byte_write(rmi4_data, rfi->data_base_addr,
+						ana_data->reporttype);
+		if (ret != 2)
+			rmi4_debugfs_error_check(dev, ret, rfi->data_base_addr,
+					sizeof(ana_data->reporttype));
+
+		/* Fn $54 command GET_REPORT */
+		ana_data->cmd = GET_REPORT;
+
+		/* Write the command to the command register */
+		dev_dbg(dev, "%s; Writing 0x%x: 0x%x\n", __func__,
+			rfi->cmd_base_addr, ana_data->cmd);
+		ret = rmi4_i2c_byte_write(rmi4_data, rfi->cmd_base_addr,
+					ana_data->cmd);
+		if (ret != 2)
+			rmi4_debugfs_error_check(dev, ret, rfi->cmd_base_addr, count);
+
+		ana_data->status = -EAGAIN;
+		ret = -EAGAIN;
+
+	} else if (ana_data->status == -EAGAIN) {
+
+		ret = -EAGAIN;
+
+	} else if (ana_data->status == ana_data->size) {
+
+		ret = 0;
+		if (*ppos > 0) {
+			ana_data->status = 0;
+			goto dbgfs_exit;
+		}
+
+		/* Create a buffer to hold the readable form of data. It
+		* consists of an array of numbers in hexadecimal format.
+		* Each row represents the number of receivers and each
+		* column represents the number of transmitters. Size of
+		* each value is entirely depended on the hardware. In this
+		* specific case it is 16 bits
+		* i.e  0x07df 0x7df ...
+		*      ... ... ...
+		*      ... ...
+		* size = (2 + RAW_VALUE_SIZE * 2 + 2) * Tx * RX + TX
+		*/
+		size = 2 + RAW_VALUE_SIZE * 2 + 2;
+		size *=	ana_data->tx * ana_data->rx;
+		size += ana_data->tx;
+		retbuf = kzalloc(size, GFP_KERNEL);
+
+		if (!retbuf) {
+			dev_err(dev, "%s: Failed to create return buffer\n",
+				__func__);
+			ret = -ENOMEM;
+			kfree(ana_data->buffer);
+			goto dbgfs_exit;
+		}
+
+		size = 0;
+		i = 0;
+
+		/* Processing raw binary data to readable form */
+		for (tx = 0; tx < ana_data->tx; tx++) {
+			for (rx = 0; rx < ana_data->rx; rx++) {
+				pixel = 0;
+				pixel = 0xff & ana_data->buffer[i++];
+				pixel = (ana_data->buffer[i++] << 8) | pixel;
+				size += sprintf(retbuf + size, "0x%04x  ", pixel);
+			}
+			size += sprintf(retbuf + size, "\n");
+		}
+
+		if (copy_to_user(buf, retbuf, size)) {
+			dev_err(dev, "%s: copy_to_user failed\n",
+				__func__);
+			ret = -EFAULT;
+		}
+
+		kfree(ana_data->buffer);
+		kfree(retbuf);
+		retbuf = NULL;
+		ana_data->buffer = NULL;
+
+		if (ret < 0)
+			goto dbgfs_exit;
+
+		*ppos = *ppos + size;
+		ret = size;
+	} else {
+		ret = -EFAULT;
+	}
+
+dbgfs_exit:
+	return ret;
+
+}
+
+static const struct file_operations rmi4_debugfs_raw_senor_data_fops = {
+	.owner			= THIS_MODULE,
+	.open			= rmi4_debugfs_raw_sensor_data_open,
+	.read			= rmi4_debugfs_raw_sensor_data_read,
+	.release		= single_release,
+};
+
+static void rmi4_debugfs_remove(void)
+{
+	debugfs_remove_recursive(rmi4_debugfs_root);
+}
+
+static int rmi4_debugfs_create(struct rmi4_data *rmi4_data)
+{
+	struct dentry *entry;
+	struct rmi4_fn *rfi = find_ana_rfi(rmi4_data);
+
+	if (!rfi)
+		return -EFAULT;
+
+	rmi4_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
+	if (!rmi4_debugfs_root) {
+		dev_warn(&rmi4_data->i2c_client->dev,
+			"%s: debugfs_create_dir failed\n", DRIVER_NAME);
+		return -ENOMEM;
+	} else {
+		entry = debugfs_create_file("raw_sensor_data",
+				S_IRUGO | S_IWUSR, rmi4_debugfs_root,
+				(void *)rmi4_data,
+				&rmi4_debugfs_raw_senor_data_fops);
+
+		if (!entry)
+			goto err_dbgfs;
+
+		return 0;
+
+err_dbgfs:
+		dev_warn(&rmi4_data->i2c_client->dev,
+			"%s: Creating debugfs entries failed !\n", DRIVER_NAME);
+		rmi4_debugfs_remove();
+		return -ENOMEM;
+	}
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * rmi4_probe() - Initialze the i2c-client touchscreen driver
+ * @client: i2c client structure pointer
+ * @dev_id:i2c device id pointer
+ *
+ * This function will allocate and initialize the instance
+ * data and request the irq and set the instance data as the clients
+ * platform data then register the physical driver which will do a scan of
+ * the rmi4 Physical Device Table and enumerate any rmi4 functions that
+ * have data sources associated with them.
+ */
+static int rmi4_probe(struct i2c_client *client,
+					const struct i2c_device_id *dev_id)
+{
+	int retval;
+	u8 intr_status[4];
+	struct rmi4_data *rmi4_data;
+	const struct rmi4_platform_data *platformdata =
+						client->dev.platform_data;
+	const struct rmi4_touch_calib *calib;
+
+	if (!platformdata) {
+		dev_err(&client->dev, "%s: no platform data\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Allocate and initialize the instance data for this client */
+	rmi4_data = kzalloc(sizeof(struct rmi4_data), GFP_KERNEL);
+	if (!rmi4_data) {
+		dev_err(&client->dev, "%s: no memory allocated\n", __func__);
+		return -ENOMEM;
+	}
+
+	rmi4_data->input_ts_dev = input_allocate_device();
+	if (rmi4_data->input_ts_dev == NULL) {
+		dev_err(&client->dev, "ts input device alloc failed\n");
+		retval = -ENOMEM;
+		goto err_input_ts;
+	}
+	rmi4_data->input_key_dev = input_allocate_device();
+	if (rmi4_data->input_key_dev == NULL) {
+		dev_err(&client->dev, "key input device alloc failed\n");
+		retval = -ENOMEM;
+		goto err_input_key;
+	}
+
+	if (platformdata->regulator_en && platformdata->regulator_name) {
+		rmi4_data->regulator = regulator_get(&client->dev,
+					platformdata->regulator_name);
+		if (IS_ERR(rmi4_data->regulator)) {
+			dev_err(&client->dev, "get regulator %s failed\n",
+					platformdata->regulator_name);
+			retval = PTR_ERR(rmi4_data->regulator);
+			goto err_regulator;
+		}
+		retval = regulator_enable(rmi4_data->regulator);
+		if (retval < 0) {
+			dev_err(&client->dev,
+				"enable regulator %s failed with ret %d\n",
+				platformdata->regulator_name, retval);
+			regulator_put(rmi4_data->regulator);
+			goto err_regulator;
+		}
+	}
+
+	/*
+	 * Copy i2c_client pointer into RTID's i2c_client pointer for
+	 * later use in rmi4_read, rmi4_write, etc.
+	 */
+	rmi4_data->i2c_client		= client;
+	/* So we set the page correctly the first time */
+	rmi4_data->current_page		= MASK_16BIT;
+	rmi4_data->board		= platformdata;
+	rmi4_data->irq			= client->irq;
+
+	mutex_init(&(rmi4_data->rmi4_page_mutex));
+
+	retval = rmi4_config_gpio(rmi4_data);
+	if (retval < 0) {
+		dev_err(&client->dev, "GPIO config failed!\n");
+		goto err_config_gpio;
+	}
+
+	retval = do_init_reset(rmi4_data);
+	if (retval)
+		dev_warn(&client->dev, "Init reset failed! Soldiering on!\n");
+	calib = &rmi4_data->board->calib[rmi4_data->touch_type];
+	/*
+	 * Register physical driver - this will call the detect function that
+	 * will then scan the device and determine the supported
+	 * rmi4 functions.
+	 */
+	retval = rmi4_i2c_query_device(rmi4_data);
+	if (retval) {
+		dev_err(&client->dev, "rmi4 query device failed\n");
+		goto err_query_dev;
+	}
+
+	/* Store the instance data in the i2c_client */
+	i2c_set_clientdata(client, rmi4_data);
+
+	/*initialize the input device parameters */
+	rmi4_data->input_ts_dev->name	= DRIVER_NAME;
+	rmi4_data->input_ts_dev->phys	= "Synaptics_Clearpad";
+	rmi4_data->input_ts_dev->id.bustype = BUS_I2C;
+	rmi4_data->input_ts_dev->dev.parent = &client->dev;
+	input_set_drvdata(rmi4_data->input_ts_dev, rmi4_data);
+
+	rmi4_data->input_key_dev->name	= calib->key_dev_name;
+	rmi4_data->input_key_dev->phys	= "Synaptics_Clearpad";
+	rmi4_data->input_key_dev->id.bustype = BUS_I2C;
+	rmi4_data->input_key_dev->dev.parent = &client->dev;
+	input_set_drvdata(rmi4_data->input_key_dev, rmi4_data);
+
+	/* Initialize the function handlers for rmi4 */
+	set_bit(EV_SYN, rmi4_data->input_ts_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->input_ts_dev->evbit);
+	set_bit(EV_KEY, rmi4_data->input_key_dev->evbit);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,6,0))
+	input_mt_init_slots(rmi4_data->input_ts_dev, MAX_FINGERS, 0);
+#else
+	input_mt_init_slots(rmi4_data->input_ts_dev, MAX_FINGERS);
+#endif
+	input_set_abs_params(rmi4_data->input_ts_dev,
+		ABS_MT_POSITION_X, 0, rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->input_ts_dev,
+		ABS_MT_POSITION_Y, 0, rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(rmi4_data->input_ts_dev,
+			ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+	input_set_abs_params(rmi4_data->input_ts_dev,
+			ABS_MT_TOUCH_MINOR, 0, MAX_TOUCH_MINOR, 0, 0);
+
+	/* Clear interrupts */
+	retval = rmi4_i2c_block_read(rmi4_data,
+			rmi4_data->fn01_data_base_addr + 1,
+			intr_status, rmi4_data->number_of_interrupt_register);
+	if (retval < 0) {
+		dev_err(&client->dev, "Clear interrupt failed\n");
+		goto err_clear_irq;
+	}
+	retval = request_threaded_irq(rmi4_data->irq, NULL,
+					rmi4_irq_thread,
+					platformdata->irq_type,
+					DRIVER_NAME, rmi4_data);
+	if (retval < 0) {
+		dev_err(&client->dev,
+			"Unable to get attn irq %d\n", rmi4_data->irq);
+		goto err_req_irq;
+	}
+
+	retval = input_register_device(rmi4_data->input_ts_dev);
+	if (retval) {
+		dev_err(&client->dev, "ts input register failed\n");
+		goto err_reg_input_ts;
+	}
+
+	retval = input_register_device(rmi4_data->input_key_dev);
+	if (retval) {
+		dev_err(&client->dev, "key input register failed\n");
+		goto err_reg_input_key;
+	}
+
+	device_create_file(&client->dev, &dev_attr_early_suspend);
+
+#ifdef DEBUG
+	retval = sysfs_create_group(&client->dev.kobj, &rmi4_attr_dbg);
+	if (retval < 0) {
+		dev_err(&client->dev, "rmi4 sysfs register failed\n");
+		goto err_reg_input;
+	}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	rmi4_data->es.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	rmi4_data->es.suspend = rmi4_early_suspend;
+	rmi4_data->es.resume = rmi4_late_resume;
+	register_early_suspend(&rmi4_data->es);
+#endif
+
+	register_early_suspend_device(&client->dev);
+
+#ifdef CONFIG_DEBUG_FS
+	if (rmi4_debugfs_create(rmi4_data))
+		dev_warn(&client->dev, "%s: Debugfs support failed !\n", DRIVER_NAME);
+#endif
+
+	return retval;
+
+#ifdef DEBUG
+err_reg_input:
+	input_unregister_device(rmi4_data->input_key_dev);
+	rmi4_data->input_key_dev = NULL;
+#endif
+err_reg_input_key:
+	input_unregister_device(rmi4_data->input_ts_dev);
+	rmi4_data->input_ts_dev = NULL;
+err_reg_input_ts:
+	free_irq(rmi4_data->irq, rmi4_data);
+err_req_irq:
+err_clear_irq:
+err_query_dev:
+	gpio_free(rmi4_data->board->int_gpio_number);
+	gpio_free(rmi4_data->board->rst_gpio_number);
+	rmi4_free_funcs(rmi4_data);
+err_config_gpio:
+	if (rmi4_data->regulator) {
+		regulator_disable(rmi4_data->regulator);
+		regulator_put(rmi4_data->regulator);
+	}
+err_regulator:
+	if (rmi4_data->input_key_dev)
+		input_free_device(rmi4_data->input_key_dev);
+err_input_key:
+	if (rmi4_data->input_ts_dev)
+		input_free_device(rmi4_data->input_ts_dev);
+err_input_ts:
+	kfree(rmi4_data);
+
+	return retval;
+}
+
+/**
+ * rmi4_remove() - Removes the i2c-client touchscreen driver
+ * @client: i2c client structure pointer
+ *
+ * This function uses to remove the i2c-client
+ * touchscreen driver and returns integer.
+ */
+static int rmi4_remove(struct i2c_client *client)
+{
+	struct rmi4_data *rmi4_data = i2c_get_clientdata(client);
+	const struct rmi4_platform_data *pdata = rmi4_data->board;
+
+	free_irq(rmi4_data->irq, rmi4_data);
+	gpio_free(pdata->int_gpio_number);
+	gpio_free(pdata->rst_gpio_number);
+
+	device_remove_file(&client->dev, &dev_attr_early_suspend);
+
+#ifdef DEBUG
+	sysfs_remove_group(&client->dev.kobj, &rmi4_attr_dbg);
+#endif
+#ifdef CONFIG_DEBUG_FS
+	rmi4_debugfs_remove();
+#endif
+	input_unregister_device(rmi4_data->input_ts_dev);
+	input_unregister_device(rmi4_data->input_key_dev);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->es);
+#endif
+	unregister_early_suspend_device(&client->dev);
+	if (rmi4_data->regulator) {
+		regulator_disable(rmi4_data->regulator);
+		regulator_put(rmi4_data->regulator);
+	}
+	rmi4_free_funcs(rmi4_data);
+	kfree(rmi4_data);
+
+	return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void rmi4_early_suspend(struct early_suspend *h)
+{
+	struct rmi4_data *pdata  = container_of(h, struct rmi4_data, es);
+
+	rmi4_suspend(pdata);
+}
+
+void rmi4_late_resume(struct early_suspend *h)
+{
+	struct rmi4_data *pdata  = container_of(h, struct rmi4_data, es);
+
+	rmi4_resume(pdata);
+}
+#endif
+
+static const struct i2c_device_id rmi4_id_table[] = {
+	{ S3202_DEV_ID, 0 },
+	{ S3402_DEV_ID, 0 },
+	{ S3400_CGS_DEV_ID, 0 },
+	{ S3400_IGZO_DEV_ID, 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, rmi4_id_table);
+
+static struct i2c_driver rmi4_driver = {
+	.driver = {
+		.name	=	DRIVER_NAME,
+		.owner	=	THIS_MODULE,
+	},
+	.probe		=	rmi4_probe,
+	.remove		=	rmi4_remove,
+	.id_table	=	rmi4_id_table,
+};
+
+static int __init rmi4_init(void)
+{
+	return i2c_add_driver(&rmi4_driver);
+}
+
+static void __exit rmi4_exit(void)
+{
+	i2c_del_driver(&rmi4_driver);
+}
+
+module_init(rmi4_init);
+module_exit(rmi4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("naveen.gaddipati@stericsson.com, js.ha@stericsson.com");
+MODULE_DESCRIPTION("synaptics rmi4 i2c touch Driver");
+MODULE_ALIAS("i2c:synaptics_rmi4_ts");
diff --git a/drivers/external_drivers/drivers/input/touchscreen/rmi4/synaptics_i2c_rmi4.h b/drivers/external_drivers/drivers/input/touchscreen/rmi4/synaptics_i2c_rmi4.h
new file mode 100644
index 0000000..1f609e1
--- /dev/null
+++ b/drivers/external_drivers/drivers/input/touchscreen/rmi4/synaptics_i2c_rmi4.h
@@ -0,0 +1,510 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+
+#ifndef _SYNAPTICS_RMI4_H_
+#define _SYNAPTICS_RMI4_H_
+
+#define RMI4_TOUCHPAD_FUNC_NUM      0x11
+#define RMI4_TOUCHPAD_F12_FUNC_NUM  0x12
+#define RMI4_BUTTON_FUNC_NUM        0x1a
+#define RMI4_DEV_CTL_FUNC_NUM       0x01
+#define RMI4_ANALOG_FUNC_NUM        0x54
+#define RMI4_FLASH_FW_FUNC_NUM      0x34
+#define RMI4_FW_VERSION             0x4
+#define RMI4_PAGE_SIZE              0x100
+#define RMI4_PAGE_SELECT_REG        0xff
+#define RMI4_MAX_PAGE               0xff
+#define RMI4_RESET_DELAY            50
+
+#define PDT_START_SCAN_LOCATION     0x00E9
+#define PDT_END_SCAN_LOCATION       0x000A
+#define PDT_ENTRY_SIZE              0x0006
+#define PDT_PROPERTIES_LOCATION     0x00EF
+
+#define RMI4_DEVICE_RESET_CMD       (0x01)
+#define RMI4_SLEEP_MODE_NORMAL      (0x00)
+#define RMI4_END_OF_PDT(id)         ((id) == 0x00 || (id) == 0xff)
+#define MAX_FINGERS                 10
+
+struct rmi4_fn_ops;
+/**
+ * struct rmi4_fn_desc - contains the function descriptor information
+ * @query_base_addr: base address for query
+ * @cmd_base_addr: base address for command
+ * @ctrl_base_addr: base address for control
+ * @data_base_addr: base address for data
+ * @intr_src_count: count for the interrupt source
+ * @fn_number: function number
+ *
+ * This structure is used to gives the function descriptor information
+ * of the particular functionality.
+ */
+struct rmi4_fn_desc {
+	u8	query_base_addr;
+	u8	cmd_base_addr;
+	u8	ctrl_base_addr;
+	u8	data_base_addr;
+	u8	intr_src_count:3;
+	u8	reserved_1:2;
+	u8	func_version:2;
+	u8	reserved_2:1;
+	u8	fn_number;
+} __packed;
+
+/**
+ * struct rmi4_fn - contains the function information
+ * @query_base_addr: base address for query with page number
+ * @cmd_base_addr: base address for command with page number
+ * @ctrl_base_addr: base address for control with page number
+ * @data_base_addr: base address for data
+ * @intr_src_count: count for the interrupt source
+ * @fn_number: function number
+ * @num_of_data_points: number of fingers touched
+ * @size_of_data_register_block: data register block size
+ * @index_to_intr_reg: index for interrupt register
+ * @intr_mask: interrupt mask value
+ * @link: linked list for function descriptors
+ * @ops: rmi4 function's operation methods
+ * @fn_data: function's specific data
+ */
+struct rmi4_fn {
+	u16 query_base_addr;
+	u16 cmd_base_addr;
+	u16 ctrl_base_addr;
+	u16 data_base_addr;
+	unsigned char intr_src_count;
+	unsigned char fn_number;
+	unsigned char num_of_data_points;
+	unsigned char size_of_data_register_block;
+	unsigned char index_to_intr_reg;
+	unsigned char intr_mask;
+	unsigned char data1_offset;
+	struct list_head    link;
+	struct rmi4_fn_ops  *ops;
+	void                *fn_data;
+	int data_size;
+};
+
+struct synaptics_rmi4_finger_state {
+	int x;
+	int y;
+	int wx;
+	int wy;
+	unsigned char status;
+};
+
+/**
+ * struct rmi4_device_info - contains the rmi4 device information
+ * @version_major: protocol major version number
+ * @version_minor: protocol minor version number
+ * @manufacturer_id: manufacturer identification byte
+ * @product_props: product properties information
+ * @product_info: product info array
+ * @date_code: device manufacture date
+ * @tester_id: tester id array
+ * @serial_number: serial number for that device
+ * @product_id_string: product id for the device
+ * @support_fn_list: linked list for device information
+ *
+ * This structure gives information about the number of data sources and
+ * the number of data registers associated with the function.
+ */
+struct rmi4_device_info {
+	unsigned int		version_major;
+	unsigned int		version_minor;
+	unsigned char		manufacturer_id;
+	unsigned char		product_props;
+	unsigned char		product_info[2];
+	unsigned char		date_code[3];
+	unsigned short		tester_id;
+	unsigned short		serial_number;
+	unsigned char		product_id_string[11];
+	struct list_head	support_fn_list;
+};
+
+/**
+ * struct rmi4_data - contains the rmi4 device data
+ * @rmi4_mod_info: structure variable for rmi4 device info
+ * @input_ts_dev: pointer for input touch device
+ * @input_key_dev: pointer for input key device
+ * @i2c_client: pointer for i2c client
+ * @board: constant pointer for touch platform data
+ * @rmi4_page_mutex: mutex for rmi4 page
+ * @current_page: variable for integer
+ * @number_of_interrupt_register: interrupt registers count
+ * @fn01_ctrl_base_addr: control base address for fn01
+ * @fn01_query_base_addr: query base address for fn01
+ * @fn01_data_base_addr: data base address for fn01
+ * @sensor_max_x: sensor maximum x value
+ * @sensor_max_y: sensor maximum y value
+ * @regulator: pointer to the regulator structure
+ * @irq: irq number
+ * @es: early suspend hooks
+ *
+ * This structure gives the device data information.
+ */
+struct rmi4_data {
+	const struct rmi4_platform_data *board;
+	struct rmi4_device_info rmi4_mod_info;
+	struct input_dev	*input_ts_dev;
+	struct input_dev	*input_key_dev;
+	struct i2c_client	*i2c_client;
+	struct mutex		rmi4_page_mutex;
+	unsigned int		number_of_interrupt_register;
+	u16		        fn01_ctrl_base_addr;
+	u16		        fn01_query_base_addr;
+	u16                     fn01_data_base_addr;
+	u8			fn01_ctrl_reg_saved;
+	int			current_page;
+	int			sensor_max_x;
+	int			sensor_max_y;
+	int			touch_type;
+	struct regulator	*regulator;
+	int			irq;
+	int			finger_status[MAX_FINGERS];
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend	es;
+#endif
+	unsigned long		touch_counter;
+	unsigned long		key_counter;
+#ifdef DEBUG
+	u16 dbg_reg_addr;
+	unsigned short dbg_fn_num;
+#endif
+#ifdef CONFIG_DEBUG_FS
+	u8 num_rx;
+	u8 num_tx;
+#endif
+};
+
+/**
+ * struct rmi4_fn_ops - contains the function's operation methods
+ */
+struct rmi4_fn_ops {
+	unsigned char fn_number;
+	int (*detect)(struct rmi4_data *pdata, struct rmi4_fn *rfi,
+			unsigned int intr_cnt);
+	int (*config)(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+	int (*irq_handler)(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+	void (*remove)(struct rmi4_fn *rfi);
+};
+
+/**
+ * struct rmi4_button_data - contains the button function data
+ * @num_of_bttns: number of buttons that supported
+ * @status: button down/up status
+ * @bttns_map: key code for each button that reported by input device
+ */
+struct rmi4_button_data {
+	int num_of_bttns;
+	bool *status;
+	u8 *bttns_map;
+};
+
+/**
+ * struct rmi4_touchpad_data - contains the touch function data
+ * @buffer: buffer to store finger registers
+ * @size: size of the buffer
+ */
+struct rmi4_touchpad_data {
+	u8 *buffer;
+	int size;
+};
+
+#ifdef CONFIG_DEBUG_FS
+/* Supported Fn $54 commands */
+#define GET_REPORT		1
+
+/*Supported Fn $54  Report types*/
+#define F54_RAW_16BIT_IMAGE	3
+
+/**
+ * struct rmi4_ana_data - contains analog data reporting function data
+ * @i2c_client: pointer for i2c client (for sysfs)
+ * @rx: number of receiver electrodes
+ * @tx: number of transmitter electrodes
+ * @cmd: f54 command
+ * @reporttype: f54 report type
+ * @buffer: buffer to store values
+ * @status: status of the operation
+ * @size: size of the buffer
+ */
+struct rmi4_ana_data {
+	struct i2c_client *i2c_client;
+	u8 rx;
+	u8 tx;
+	u8 cmd;
+	u32  reporttype;
+	u8 *buffer;
+	int status;
+	int size;
+};
+#endif
+
+union pdt_properties {
+	struct {
+		u8 reserved_1:6;
+		u8 has_bsr:1;
+		u8 reserved_2:1;
+	} __packed;
+	u8 regs[1];
+};
+
+union f01_basic_queries {
+	struct {
+		u8 manufacturer_id;
+
+		u8 custom_map:1;
+		u8 non_compliant:1;
+		u8 has_lts:1;
+		u8 has_sensor_id:1;
+		u8 has_charger_input:1;
+		u8 has_adjustable_doze:1;
+		u8 has_adjustable_doze_holdoff:1;
+		u8 has_product_properties_2:1;
+
+		u8 productinfo_1:7;
+		u8 q2_bit_7:1;
+		u8 productinfo_2:7;
+		u8 q3_bit_7:1;
+
+		u8 year:5;
+		u8 month:4;
+		u8 day:5;
+		u8 cp1:1;
+		u8 cp2:1;
+		u8 wafer_id1_lsb;
+		u8 wafer_id1_msb;
+		u8 wafer_id2_lsb;
+		u8 wafer_id2_msb;
+		u8 wafer_id3_lsb;
+	} __packed;
+	u8 regs[11];
+};
+
+union f01_device_status {
+	struct {
+		u8 status_code:4;
+		u8 reserved:2;
+		u8 flash_prog:1;
+		u8 unconfigured:1;
+	} __packed;
+	u8 regs[1];
+};
+
+union f01_device_control_0 {
+	struct {
+		u8 sleep_mode:2;
+		u8 nosleep:1;
+		u8 reserved:2;
+		u8 charger_input:1;
+		u8 report_rate:1;
+		u8 configured:1;
+	} __packed;
+	u8 regs[1];
+};
+
+union f34_query_regs {
+	struct {
+		u16 reg_map:1;
+		u16 unlocked:1;
+		u16 has_config_id:1;
+		u16 reserved:5;
+		u16 block_size;
+		u16 fw_block_count;
+		u16 config_block_count;
+	} __packed;
+	struct {
+		u8 regs[7];
+		u16 address;
+	};
+};
+
+union f34_control_status {
+	struct {
+		u8 command:4;
+		u8 status:3;
+		u8 program_enabled:1;
+	} __packed;
+	struct {
+		u8 regs[1];
+		u16 address;
+	};
+};
+
+union f34_control_status_v1 {
+	struct {
+		u8 command:6;
+		u8 reserved_2:2;
+		u8 status:6;
+		u8 reserved_1:1;
+		u8 program_enabled:1;
+	} __packed;
+	struct {
+		u8 regs[2];
+		u16 address;
+	};
+};
+
+struct f12_query_5 {
+	u8 size_of_query6;
+	u8 ctrl0_is_present:1;
+	u8 ctrl1_is_present:1;
+	u8 ctrl2_is_present:1;
+	u8 ctrl3_is_present:1;
+	u8 ctrl4_is_present:1;
+	u8 ctrl5_is_present:1;
+	u8 ctrl6_is_present:1;
+	u8 ctrl7_is_present:1;
+	u8 ctrl8_is_present:1;
+	u8 ctrl9_is_present:1;
+	u8 ctrl10_is_present:1;
+	u8 ctrl11_is_present:1;
+	u8 ctrl12_is_present:1;
+	u8 ctrl13_is_present:1;
+	u8 ctrl14_is_present:1;
+	u8 ctrl15_is_present:1;
+	u8 ctrl16_is_present:1;
+	u8 ctrl17_is_present:1;
+	u8 ctrl18_is_present:1;
+	u8 ctrl19_is_present:1;
+	u8 ctrl20_is_present:1;
+	u8 ctrl21_is_present:1;
+	u8 ctrl22_is_present:1;
+	u8 ctrl23_is_present:1;
+	u8 ctrl24_is_present:1;
+	u8 ctrl25_is_present:1;
+	u8 ctrl26_is_present:1;
+	u8 ctrl27_is_present:1;
+	u8 ctrl28_is_present:1;
+	u8 ctrl29_is_present:1;
+	u8 ctrl30_is_present:1;
+	u8 ctrl31_is_present:1;
+} __packed;
+
+struct f12_query_8 {
+	u8 size_of_query9;
+	u8 data0_is_present:1;
+	u8 data1_is_present:1;
+	u8 data2_is_present:1;
+	u8 data3_is_present:1;
+	u8 data4_is_present:1;
+	u8 data5_is_present:1;
+	u8 data6_is_present:1;
+	u8 data7_is_present:1;
+} __packed;
+
+struct f12_ctrl_8 {
+	u8 max_x_coord_lsb;
+	u8 max_x_coord_msb;
+	u8 max_y_coord_lsb;
+	u8 max_y_coord_msb;
+	u8 rx_pitch_lsb;
+	u8 rx_pitch_msb;
+	u8 tx_pitch_lsb;
+	u8 tx_pitch_msb;
+	u8 low_rx_clip;
+	u8 high_rx_clip;
+	u8 low_tx_clip;
+	u8 high_tx_clip;
+	u8 num_of_rx;
+	u8 num_of_tx;
+};
+
+struct f12_ctrl_20 {
+	u8 x_suppression;
+	u8 y_suppression;
+	u8 report_always:1;
+	u8 reserved:7;
+} __packed;
+
+struct f12_ctrl_23 {
+	u8 obj_type_enable;
+	u8 max_reported_objects;
+};
+
+struct synaptics_rmi4_f12_finger_data {
+	unsigned char object_type_and_status;
+	unsigned char x_lsb;
+	unsigned char x_msb;
+	unsigned char y_lsb;
+	unsigned char y_msb;
+#ifdef REPORT_2D_Z
+	unsigned char z;
+#endif
+	unsigned char wx;
+	unsigned char wy;
+};
+
+enum finger_state {
+	F11_NO_FINGER = 0,
+	F11_PRESENT = 1,
+	F11_INACCURATE = 2,
+	F11_RESERVED = 3
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void rmi4_early_suspend(struct early_suspend *h);
+void rmi4_late_resume(struct early_suspend *h);
+#endif
+
+int rmi4_i2c_block_read(struct rmi4_data *pdata, u16 addr, u8 *val, int size);
+int rmi4_i2c_byte_read(struct rmi4_data *pdata, u16 addr, u8 *val);
+int rmi4_i2c_block_write(struct rmi4_data *pdata, u16 addr, u8 *val, int size);
+int rmi4_i2c_byte_write(struct rmi4_data *pdata, u16 addr, u8 data);
+
+int rmi4_dev_ctl_detect(struct rmi4_data *pdata,
+				struct rmi4_fn *rfi, unsigned int cnt);
+int rmi4_dev_ctl_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+
+int rmi4_touchpad_detect(struct rmi4_data *pdata,
+				struct rmi4_fn *rfi, unsigned int cnt);
+int rmi4_touchpad_config(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+int rmi4_touchpad_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+void rmi4_touchpad_remove(struct rmi4_fn *rfi);
+
+int rmi4_touchpad_f12_detect(struct rmi4_data *pdata,
+				struct rmi4_fn *rfi, unsigned int cnt);
+int rmi4_touchpad_f12_config(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+int rmi4_touchpad_f12_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+void rmi4_touchpad_f12_remove(struct rmi4_fn *rfi);
+
+int rmi4_button_detect(struct rmi4_data *pdata,
+				struct rmi4_fn *rfi, unsigned int cnt);
+int rmi4_button_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+void rmi4_button_remove(struct rmi4_fn *);
+
+int rmi4_fw_update(struct rmi4_data *pdata,
+		struct rmi4_fn_desc *f01_pdt, struct rmi4_fn_desc *f34_pdt);
+
+#ifdef CONFIG_DEBUG_FS
+int rmi4_ana_data_detect(struct rmi4_data *pdata,
+				struct rmi4_fn *rfi, unsigned int intr_cnt);
+int rmi4_ana_data_irq_handler(struct rmi4_data *pdata, struct rmi4_fn *rfi);
+void rmi4_ana_data_remove(struct rmi4_fn *rfi);
+#endif
+#endif
diff --git a/drivers/external_drivers/drivers/main_before_4.3_r b/drivers/external_drivers/drivers/main_before_4.3_r
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/external_drivers/drivers/main_before_4.3_r
diff --git a/drivers/external_drivers/drivers/misc/Kconfig b/drivers/external_drivers/drivers/misc/Kconfig
new file mode 100644
index 0000000..cbab3286
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/Kconfig
@@ -0,0 +1,41 @@
+config SCU_LOGGING
+	tristate "Intel SCU fabric debug driver"
+	default n
+	---help---
+	 Driver that enable for debugging Intel SCU firmware fabric related error.
+
+config UUID
+	tristate "get uuid"
+	default n
+	---help---
+	 Driver for get UUID.
+
+config EMMC_IPANIC
+	bool "Intel kernel panic diagnostics driver FOR EMMC"
+	default n
+	---help---
+	  Driver which handles kernel panics and attempts to write
+	  critical debugging data to a dedicated partition on EMMC.
+
+config EMMC_IPANIC_PLABEL
+	string "Intel kernel panic driver (EMMC_IPANIC) partition label"
+	depends on EMMC_IPANIC
+	default "panic"
+	---help---
+	  Set the default mmc partition label for EMMC_IPANIC driver.
+
+config KCT_DAEMON
+	tristate "Intel Kernel Crash Tool daemon"
+	default n
+	---help---
+	 Driver that enables the kct daemon to handle Intel crashtool events.
+
+config FPS_THROTTLE
+	tristate "register a new cooling device for FPS throttling"
+	depends on THERMAL
+	---help---
+	  register a new cooling device for FPS throttling.
+
+source "drivers/external_drivers/drivers/misc/bcm-lpm/Kconfig"
+source "drivers/external_drivers/drivers/misc/rawio/Kconfig"
+source "drivers/external_drivers/drivers/misc/tp2e/Kconfig"
diff --git a/drivers/external_drivers/drivers/misc/Makefile b/drivers/external_drivers/drivers/misc/Makefile
new file mode 100644
index 0000000..2059187
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/Makefile
@@ -0,0 +1,12 @@
+ccflags-y = -I$(src)
+
+intel_fabric_logging-objs  := intel_fw_logging.o intel_fabricerr_status.o
+
+obj-$(CONFIG_BCM_BT_LPM)        += bcm-lpm/
+obj-$(CONFIG_SCU_LOGGING)	+= intel_fabric_logging.o
+obj-$(CONFIG_UUID)		+= uuid.o
+obj-$(CONFIG_EMMC_IPANIC)	+= emmc_ipanic.o
+obj-$(CONFIG_RAWIO)             += rawio/
+obj-$(CONFIG_KCT_DAEMON)	+= kct_daemon.o
+obj-y				+= tp2e/
+obj-$(CONFIG_FPS_THROTTLE)	+= fps_throttle.o
diff --git a/drivers/external_drivers/drivers/misc/bcm-lpm/Kconfig b/drivers/external_drivers/drivers/misc/bcm-lpm/Kconfig
new file mode 100644
index 0000000..c4e5be2
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/bcm-lpm/Kconfig
@@ -0,0 +1,13 @@
+config BCM_BT_LPM
+	tristate "Broadcom Bluetooth Low Power Mode"
+	depends on SERIAL_MFD_HSU || SERIAL_MFD_HSU_EXT
+	default n
+	help
+	   Select this module for Broadcom Bluetooth low power management.
+
+config BCM_BT_LPM_DBG
+	bool "Set this flag to disable LPM, for debug purpose."
+	depends on BCM_BT_LPM
+	default n
+	help
+	   Set this flag to disable LPM, for debug purpose.
diff --git a/drivers/external_drivers/drivers/misc/bcm-lpm/Makefile b/drivers/external_drivers/drivers/misc/bcm-lpm/Makefile
new file mode 100644
index 0000000..6dd43fd
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/bcm-lpm/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_BCM_BT_LPM) += bcm_bt_lpm.o
diff --git a/drivers/external_drivers/drivers/misc/bcm-lpm/bcm_bt_lpm.c b/drivers/external_drivers/drivers/misc/bcm-lpm/bcm_bt_lpm.c
new file mode 100644
index 0000000..7e62352
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/bcm-lpm/bcm_bt_lpm.c
@@ -0,0 +1,613 @@
+/*
+ * Bluetooth Broadcomm  and low power control via GPIO
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/hrtimer.h>
+#include <linux/irq.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/wakelock.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_hsu.h>
+
+#ifndef CONFIG_ACPI
+#include <asm/bcm_bt_lpm.h>
+#else
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+enum {
+	gpio_wake_acpi_idx,
+	gpio_enable_bt_acpi_idx,
+	host_wake_acpi_idx
+};
+#endif
+
+
+static struct rfkill *bt_rfkill;
+static bool bt_enabled;
+
+#ifndef BCM_BT_LPM_DBG
+static bool host_wake_uart_enabled;
+static bool wake_uart_enabled;
+static bool int_handler_enabled;
+#endif
+
+
+#ifndef BCM_BT_LPM_DBG
+static void activate_irq_handler(void);
+#endif
+
+struct bcm_bt_lpm {
+#ifndef BCM_BT_LPM_DBG
+	unsigned int gpio_wake;
+	unsigned int gpio_host_wake;
+	unsigned int int_host_wake;
+#endif
+	unsigned int gpio_enable_bt;
+#ifndef BCM_BT_LPM_DBG
+	int wake;
+	int host_wake;
+#endif
+	struct hrtimer enter_lpm_timer;
+	ktime_t enter_lpm_delay;
+
+	struct device *tty_dev;
+#ifndef BCM_BT_LPM_DBG
+	struct wake_lock wake_lock;
+	char wake_lock_name[100];
+#endif
+	int port;
+} bt_lpm;
+
+#ifndef BCM_BT_LPM_DBG
+static void uart_enable(struct device *tty)
+{
+	pr_debug("%s: runtime get\n", __func__);
+	/* Tell PM runtime to power on the tty device and block s0i3 */
+	pm_runtime_get(tty);
+}
+
+static void uart_disable(struct device *tty)
+{
+	pr_debug("%s: runtime put\n", __func__);
+	/* Tell PM runtime to release tty device and allow s0i3 */
+	pm_runtime_put(tty);
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int bcm_bt_lpm_acpi_probe(struct platform_device *pdev)
+{
+	struct acpi_gpio_info info;
+	acpi_handle handle;
+	acpi_integer port;
+
+	/*
+	 * Handle ACPI specific initializations.
+	 */
+	dev_dbg(&pdev->dev, "ACPI specific probe\n");
+
+	bt_lpm.gpio_enable_bt = acpi_get_gpio_by_index(&pdev->dev,
+						gpio_enable_bt_acpi_idx, &info);
+	if (!gpio_is_valid(bt_lpm.gpio_enable_bt)) {
+		pr_err("%s: gpio %d for gpio_enable_bt not valid\n", __func__,
+							bt_lpm.gpio_enable_bt);
+		return -EINVAL;
+	}
+
+#ifndef BCM_BT_LPM_DBG
+	bt_lpm.gpio_wake = acpi_get_gpio_by_index(&pdev->dev,
+						gpio_wake_acpi_idx, &info);
+	if (!gpio_is_valid(bt_lpm.gpio_wake)) {
+		pr_err("%s: gpio %d for gpio_wake not valid\n", __func__,
+							bt_lpm.gpio_wake);
+		return -EINVAL;
+	}
+
+	bt_lpm.gpio_host_wake = acpi_get_gpio_by_index(&pdev->dev,
+						host_wake_acpi_idx, &info);
+	if (!gpio_is_valid(bt_lpm.gpio_host_wake)) {
+		pr_err("%s: gpio %d for gpio_host_wake not valid\n", __func__,
+							bt_lpm.gpio_host_wake);
+		return -EINVAL;
+	}
+
+	bt_lpm.int_host_wake = gpio_to_irq(bt_lpm.gpio_host_wake);
+
+	pr_debug("%s: gpio_wake %d, gpio_host_wake %d, int_host_wake %d\n",
+							__func__,
+							bt_lpm.gpio_wake,
+							bt_lpm.gpio_host_wake,
+							bt_lpm.int_host_wake);
+#endif
+
+	handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+
+	if (ACPI_FAILURE(acpi_evaluate_integer(handle, "UART", NULL, &port))) {
+		dev_err(&pdev->dev, "Error evaluating UART port number\n");
+
+		/* FIXME - Force port 0 if the information is missing from the
+		 * ACPI table.
+		 * That will be removed once the ACPI tables will all have been
+		 * updated.
+		 */
+		 port = 0;
+	}
+
+	bt_lpm.port = port;
+	pr_debug("%s: UART port %d\n", __func__, bt_lpm.port);
+
+	return 0;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm43xx_bt_rfkill_set_power(void *data, bool blocked)
+{
+	/* rfkill_ops callback. Turn transmitter on when blocked is false */
+
+	if (!blocked) {
+#ifndef BCM_BT_LPM_DBG
+		gpio_set_value(bt_lpm.gpio_wake, 1);
+		/*
+		* Delay advice by BRCM is min 2.5ns,
+		* setting it between 10 and 50us for more confort
+		*/
+		usleep_range(10, 50);
+#endif
+		gpio_set_value(bt_lpm.gpio_enable_bt, 1);
+		pr_debug("%s: turn BT on\n", __func__);
+	} else {
+		gpio_set_value(bt_lpm.gpio_enable_bt, 0);
+		pr_debug("%s: turn BT off\n", __func__);
+	}
+
+	bt_enabled = !blocked;
+
+	return 0;
+}
+
+static const struct rfkill_ops bcm43xx_bt_rfkill_ops = {
+	.set_block = bcm43xx_bt_rfkill_set_power,
+};
+
+#ifndef BCM_BT_LPM_DBG
+static void set_wake_locked(int wake)
+{
+	bt_lpm.wake = wake;
+
+	if (!wake)
+		wake_unlock(&bt_lpm.wake_lock);
+
+	if (!wake_uart_enabled && wake) {
+		WARN_ON(!bt_lpm.tty_dev);
+		uart_enable(bt_lpm.tty_dev);
+	}
+
+	gpio_set_value(bt_lpm.gpio_wake, wake);
+
+	if (wake_uart_enabled && !wake) {
+		WARN_ON(!bt_lpm.tty_dev);
+		uart_disable(bt_lpm.tty_dev);
+	}
+	wake_uart_enabled = wake;
+}
+
+static enum hrtimer_restart enter_lpm(struct hrtimer *timer)
+{
+	pr_debug("%s\n", __func__);
+
+	set_wake_locked(0);
+
+	return HRTIMER_NORESTART;
+}
+
+
+static void update_host_wake_locked(int host_wake)
+{
+	if (host_wake == bt_lpm.host_wake)
+		return;
+
+	bt_lpm.host_wake = host_wake;
+
+	if (host_wake) {
+		wake_lock(&bt_lpm.wake_lock);
+		if (!host_wake_uart_enabled) {
+			WARN_ON(!bt_lpm.tty_dev);
+			uart_enable(bt_lpm.tty_dev);
+		}
+	} else  {
+		if (host_wake_uart_enabled) {
+			WARN_ON(!bt_lpm.tty_dev);
+			uart_disable(bt_lpm.tty_dev);
+		}
+		/*
+		 * Take a timed wakelock, so that upper layers can take it.
+		 * The chipset deasserts the hostwake lock, when there is no
+		 * more data to send.
+		 */
+		wake_lock_timeout(&bt_lpm.wake_lock, HZ/2);
+	}
+
+	host_wake_uart_enabled = host_wake;
+
+}
+
+static irqreturn_t host_wake_isr(int irq, void *dev)
+{
+	int host_wake;
+
+	host_wake = gpio_get_value(bt_lpm.gpio_host_wake);
+
+	pr_debug("%s: lpm %s\n", __func__, host_wake ? "off" : "on");
+
+	irq_set_irq_type(irq, host_wake ? IRQF_TRIGGER_FALLING :
+							IRQF_TRIGGER_RISING);
+
+	if (!bt_lpm.tty_dev) {
+		bt_lpm.host_wake = host_wake;
+		return IRQ_HANDLED;
+	}
+
+	update_host_wake_locked(host_wake);
+
+	return IRQ_HANDLED;
+}
+
+static void activate_irq_handler(void)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	ret = request_irq(bt_lpm.int_host_wake, host_wake_isr,
+				IRQF_TRIGGER_RISING, "bt_host_wake", NULL);
+
+	if (ret < 0) {
+		pr_err("Error lpm request IRQ");
+		gpio_free(bt_lpm.gpio_wake);
+		gpio_free(bt_lpm.gpio_host_wake);
+	}
+}
+
+
+static void bcm_bt_lpm_wake_peer(struct device *dev)
+{
+	bt_lpm.tty_dev = dev;
+
+	/*
+	 * the irq is enabled after the first host wake up signal.
+	 * in the original code, the irq should be in levels but, since mfld
+	 * does not support them, irq is triggering with edges.
+	 */
+
+	if (!int_handler_enabled) {
+		int_handler_enabled = true;
+		activate_irq_handler();
+	}
+
+	hrtimer_try_to_cancel(&bt_lpm.enter_lpm_timer);
+
+	set_wake_locked(1);
+
+	hrtimer_start(&bt_lpm.enter_lpm_timer, bt_lpm.enter_lpm_delay,
+		HRTIMER_MODE_REL);
+
+}
+
+static int bcm_bt_lpm_init(struct platform_device *pdev)
+{
+	int ret;
+	struct device *tty_dev;
+
+	hrtimer_init(&bt_lpm.enter_lpm_timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	bt_lpm.enter_lpm_delay = ktime_set(1, 0);  /* 1 sec */
+	bt_lpm.enter_lpm_timer.function = enter_lpm;
+
+	bt_lpm.host_wake = 0;
+
+	if (bt_lpm.gpio_host_wake < 0) {
+		pr_err("Error bt_lpm.gpio_host_wake\n");
+		return -ENODEV;
+	}
+
+	ret = irq_set_irq_wake(bt_lpm.int_host_wake, 1);
+	if (ret < 0) {
+		pr_err("Error lpm set irq IRQ");
+		gpio_free(bt_lpm.gpio_wake);
+		gpio_free(bt_lpm.gpio_host_wake);
+		return ret;
+	}
+
+	tty_dev = intel_mid_hsu_set_wake_peer(bt_lpm.port,
+			bcm_bt_lpm_wake_peer);
+	if (!tty_dev) {
+		pr_err("Error no tty dev");
+		gpio_free(bt_lpm.gpio_wake);
+		gpio_free(bt_lpm.gpio_host_wake);
+		return -ENODEV;
+	}
+
+	snprintf(bt_lpm.wake_lock_name, sizeof(bt_lpm.wake_lock_name),
+			"BTLowPower");
+	wake_lock_init(&bt_lpm.wake_lock, WAKE_LOCK_SUSPEND,
+			 bt_lpm.wake_lock_name);
+
+	bcm_bt_lpm_wake_peer(tty_dev);
+	return 0;
+}
+#endif
+
+#ifndef CONFIG_ACPI
+static int bcm43xx_bluetooth_pdata_probe(struct platform_device *pdev)
+{
+	struct bcm_bt_lpm_platform_data *pdata = pdev->dev.platform_data;
+
+	if (pdata == NULL) {
+		pr_err("Cannot register bcm_bt_lpm drivers, pdata is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!gpio_is_valid(pdata->gpio_enable)) {
+		pr_err("%s: gpio not valid\n", __func__);
+		return -EINVAL;
+	}
+
+#ifndef BCM_BT_LPM_DBG
+	if (!gpio_is_valid(pdata->gpio_wake) ||
+		!gpio_is_valid(pdata->gpio_host_wake)) {
+		pr_err("%s: gpio not valid\n", __func__);
+		return -EINVAL;
+	}
+
+
+	bt_lpm.gpio_wake = pdata->gpio_wake;
+	bt_lpm.gpio_host_wake = pdata->gpio_host_wake;
+	bt_lpm.int_host_wake = pdata->int_host_wake;
+#endif
+	bt_lpm.gpio_enable_bt = pdata->gpio_enable;
+	bt_lpm.port = pdata->port;
+
+	return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+static int bcm43xx_bluetooth_probe(struct platform_device *pdev)
+{
+	bool default_state = true;	/* off */
+	int ret = 0;
+#ifndef BCM_BT_LPM_DBG
+	int_handler_enabled = false;
+#endif
+
+#ifdef CONFIG_ACPI
+	if (ACPI_HANDLE(&pdev->dev)) {
+		/*
+		 * acpi specific probe
+		 */
+		pr_debug("%s for ACPI device %s\n", __func__,
+							dev_name(&pdev->dev));
+		if (bcm_bt_lpm_acpi_probe(pdev) < 0)
+			ret = -EINVAL;
+	} else
+		ret = -ENODEV;
+#else
+	ret = bcm43xx_bluetooth_pdata_probe(pdev);
+#endif
+
+	if (ret < 0) {
+		pr_err("%s: Cannot register platform data\n", __func__);
+		goto err_data_probe;
+	}
+
+	ret = gpio_request(bt_lpm.gpio_enable_bt, pdev->name);
+	if (ret < 0) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+							bt_lpm.gpio_enable_bt);
+		goto err_gpio_enable_req;
+	}
+
+	ret = gpio_direction_output(bt_lpm.gpio_enable_bt, 0);
+	if (ret < 0) {
+		pr_err("%s: Unable to set int direction for gpio %d\n",
+					__func__, bt_lpm.gpio_enable_bt);
+		goto err_gpio_enable_dir;
+	}
+
+#ifndef BCM_BT_LPM_DBG
+	ret = gpio_request(bt_lpm.gpio_host_wake, pdev->name);
+	if (ret < 0) {
+		pr_err("%s: Unable to request gpio %d\n",
+					__func__, bt_lpm.gpio_host_wake);
+		goto err_gpio_host_wake_req;
+	}
+
+	ret = gpio_direction_input(bt_lpm.gpio_host_wake);
+	if (ret < 0) {
+		pr_err("%s: Unable to set direction for gpio %d\n", __func__,
+							bt_lpm.gpio_host_wake);
+		goto err_gpio_host_wake_dir;
+	}
+
+	ret = gpio_request(bt_lpm.gpio_wake, pdev->name);
+	if (ret < 0) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+							bt_lpm.gpio_wake);
+		goto err_gpio_wake_req;
+	}
+
+	ret =  gpio_direction_output(bt_lpm.gpio_wake, 0);
+	if (ret < 0) {
+		pr_err("%s: Unable to set direction for gpio %d\n", __func__,
+							bt_lpm.gpio_wake);
+		goto err_gpio_wake_dir;
+	}
+
+	pr_debug("%s: gpio_enable=%d, gpio_wake=%d, gpio_host_wake=%d\n",
+							__func__,
+							bt_lpm.gpio_enable_bt,
+							bt_lpm.gpio_wake,
+							bt_lpm.gpio_host_wake);
+#endif
+
+	bt_rfkill = rfkill_alloc("bcm43xx Bluetooth", &pdev->dev,
+				RFKILL_TYPE_BLUETOOTH, &bcm43xx_bt_rfkill_ops,
+				NULL);
+	if (unlikely(!bt_rfkill)) {
+		ret = -ENOMEM;
+		goto err_rfkill_alloc;
+	}
+
+	bcm43xx_bt_rfkill_set_power(NULL, default_state);
+	rfkill_init_sw_state(bt_rfkill, default_state);
+
+	ret = rfkill_register(bt_rfkill);
+	if (unlikely(ret))
+		goto err_rfkill_register;
+
+#ifndef BCM_BT_LPM_DBG
+	ret = bcm_bt_lpm_init(pdev);
+	if (ret)
+		goto err_lpm_init;
+#endif
+
+	return ret;
+
+#ifndef BCM_BT_LPM_DBG
+err_lpm_init:
+#endif
+	rfkill_unregister(bt_rfkill);
+err_rfkill_register:
+	rfkill_destroy(bt_rfkill);
+err_rfkill_alloc:
+#ifndef BCM_BT_LPM_DBG
+err_gpio_wake_dir:
+	gpio_free(bt_lpm.gpio_wake);
+err_gpio_wake_req:
+err_gpio_host_wake_dir:
+	gpio_free(bt_lpm.gpio_host_wake);
+err_gpio_host_wake_req:
+#endif
+err_gpio_enable_dir:
+	gpio_free(bt_lpm.gpio_enable_bt);
+err_gpio_enable_req:
+err_data_probe:
+	return ret;
+}
+
+static int bcm43xx_bluetooth_remove(struct platform_device *pdev)
+{
+	rfkill_unregister(bt_rfkill);
+	rfkill_destroy(bt_rfkill);
+
+	gpio_free(bt_lpm.gpio_enable_bt);
+#ifndef BCM_BT_LPM_DBG
+	gpio_free(bt_lpm.gpio_wake);
+	gpio_free(bt_lpm.gpio_host_wake);
+	wake_lock_destroy(&bt_lpm.wake_lock);
+#endif
+	return 0;
+}
+#ifndef BCM_BT_LPM_DBG
+int bcm43xx_bluetooth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int host_wake;
+
+	pr_debug("%s\n", __func__);
+
+	if (!bt_enabled)
+		return 0;
+
+	disable_irq(bt_lpm.int_host_wake);
+	host_wake = gpio_get_value(bt_lpm.gpio_host_wake);
+	if (host_wake) {
+		enable_irq(bt_lpm.int_host_wake);
+		pr_err("%s suspend error, gpio %d set\n", __func__,
+							bt_lpm.gpio_host_wake);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+int bcm43xx_bluetooth_resume(struct platform_device *pdev)
+{
+	pr_debug("%s\n", __func__);
+
+	if (bt_enabled)
+		enable_irq(bt_lpm.int_host_wake);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id bcm_id_table[] = {
+	/* ACPI IDs here */
+	{ "BCM2E1A", 0 },
+	{ "BCM2E3A", 0 },
+	{ "OBDA8723", 0},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(acpi, bcm_id_table);
+#endif
+
+static struct platform_driver bcm43xx_bluetooth_platform_driver = {
+	.probe = bcm43xx_bluetooth_probe,
+	.remove = bcm43xx_bluetooth_remove,
+#ifndef BCM_BT_LPM_DBG
+	.suspend = bcm43xx_bluetooth_suspend,
+	.resume = bcm43xx_bluetooth_resume,
+#endif
+	.driver = {
+		   .name = "bcm_bt_lpm",
+		   .owner = THIS_MODULE,
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(bcm_id_table),
+#endif
+		   },
+};
+
+static int __init bcm43xx_bluetooth_init(void)
+{
+	bt_enabled = false;
+	return platform_driver_register(&bcm43xx_bluetooth_platform_driver);
+}
+
+static void __exit bcm43xx_bluetooth_exit(void)
+{
+	platform_driver_unregister(&bcm43xx_bluetooth_platform_driver);
+}
+
+
+late_initcall(bcm43xx_bluetooth_init);
+module_exit(bcm43xx_bluetooth_exit);
+
+MODULE_ALIAS("platform:bcm43xx");
+MODULE_DESCRIPTION("bcm43xx_bluetooth");
+MODULE_AUTHOR("Jaikumar Ganesh <jaikumar@google.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/external_drivers/drivers/misc/emmc_ipanic.c b/drivers/external_drivers/drivers/misc/emmc_ipanic.c
new file mode 100644
index 0000000..c7e1b39
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/emmc_ipanic.c
@@ -0,0 +1,1173 @@
+/*
+ * drivers/misc/emmc_ipanic.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/mmc/host.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+#include <linux/pci.h>
+#include <linux/nmi.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/panic_gbuffer.h>
+#include "emmc_ipanic.h"
+
+#include <linux/kmsg_dump.h>
+#include <asm/proto.h>
+
+static char *part_label = "";
+module_param(part_label, charp, 0);
+MODULE_PARM_DESC(part_label, "IPanic mmc partition device label (panic)");
+
+static u32 disable_emmc_ipanic;
+core_param(disable_emmc_ipanic, disable_emmc_ipanic, uint, 0644);
+
+static struct mmc_emergency_info emmc_info = {
+	.init = mmc_emergency_init,
+	.write = mmc_emergency_write,
+	.part_label = CONFIG_EMMC_IPANIC_PLABEL,
+};
+
+static unsigned char *ipanic_proc_entry_name[PROC_MAX_ENTRIES] = {
+	"emmc_ipanic_header",
+	"emmc_ipanic_console",
+	"emmc_ipanic_threads",
+	"emmc_ipanic_gbuffer"
+};
+
+static int in_panic;
+static struct emmc_ipanic_data drv_ctx;
+static struct work_struct proc_removal_work;
+static int log_offset[IPANIC_LOG_MAX];
+static int log_len[IPANIC_LOG_MAX];	/* sector count */
+static int log_size[IPANIC_LOG_MAX];	/* byte count */
+static size_t log_head[IPANIC_LOG_MAX];
+static size_t log_woff[IPANIC_LOG_MAX];
+static unsigned char last_chunk_buf[SECTOR_SIZE];
+static int last_chunk_buf_len;
+static DEFINE_MUTEX(drv_mutex);
+static void (*func_stream_emmc) (void);
+
+static struct kmsg_dumper ipanic_dumper;
+
+static void emmc_panic_erase(unsigned char *buffer, Sector *sect)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc = ctx->emmc;
+	unsigned char *read_buf_ptr = buffer;
+	Sector new_sect;
+	int rc;
+
+	if (!emmc) {
+		pr_err("%s:invalid emmc infomation\n", __func__);
+		return;
+	}
+
+	if (!read_buf_ptr || !sect) {
+		sect = &new_sect;
+		if (!emmc->bdev) {
+			pr_err("%s:invalid emmc block device\n",
+				__func__);
+			goto out;
+		}
+		/* make sure the block device is open rw */
+		rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, emmc_panic_erase);
+		if (rc < 0) {
+			pr_err("%s: blk_dev_get failed!\n", __func__);
+			goto out;
+		}
+
+		/*read panic header */
+		read_buf_ptr =
+		    read_dev_sector(emmc->bdev, emmc->start_block, sect);
+		if (!read_buf_ptr) {
+			pr_err("%s: read sector error(%llu)!\n",
+				__func__, (u64) emmc->start_block);
+			goto out;
+		}
+	}
+
+	/*write all zero to panic header */
+	lock_page(sect->v);
+	memset(read_buf_ptr, 0, SECTOR_SIZE);
+	set_page_dirty(sect->v);
+	unlock_page(sect->v);
+	sync_blockdev(emmc->bdev);
+
+	if (!read_buf_ptr)
+		put_dev_sector(*sect);
+out:
+	memset(&ctx->hdr, 0, SECTOR_SIZE);
+	return;
+}
+
+static int emmc_read(struct mmc_emergency_info *emmc, void *holder,
+		     char *buffer, off_t offset, int count, bool to_user)
+{
+	unsigned char *read_ptr;
+	unsigned int sector_no;
+	off_t sector_offset;
+	Sector sect;
+	int rc;
+
+	if (!emmc) {
+		pr_err("%s:invalid emmc infomation\n", __func__);
+		return 0;
+	}
+	if (!emmc->bdev) {
+		pr_err("%s:invalid emmc block device\n", __func__);
+		return 0;
+	}
+
+	sector_no = offset >> SECTOR_SIZE_SHIFT;
+	sector_offset = offset & (SECTOR_SIZE - 1);
+	if (sector_no >= emmc->block_count) {
+		pr_err("%s: reading an invalid address\n", __func__);
+		return -EINVAL;
+	}
+
+	/* make sure the block device is open rw */
+	rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, holder);
+	if (rc < 0) {
+		pr_err("%s: blk_dev_get failed!\n", __func__);
+		return 0;
+	}
+
+	read_ptr = read_dev_sector(emmc->bdev, sector_no + emmc->start_block,
+				   &sect);
+	if (!read_ptr) {
+		put_dev_sector(sect);
+		return -EINVAL;
+	}
+	/* count and read_ptr are updated to match flash page size */
+	if (count + sector_offset > SECTOR_SIZE)
+		count = SECTOR_SIZE - sector_offset;
+
+	if (sector_offset)
+		read_ptr += sector_offset;
+
+	if (to_user) {
+		if (copy_to_user(buffer, read_ptr, count)) {
+			pr_err( "%s: Failed to copy buffer to User\n",
+				__func__);
+			return 0;
+		}
+	}
+	else
+		memcpy(buffer, read_ptr, count);
+
+	put_dev_sector(sect);
+
+	return count;
+}
+
+static ssize_t emmc_ipanic_gbuffer_proc_read(struct file *file, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	size_t log_len, log_head;
+	off_t log_off;
+	int rc;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return 0;
+	}
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&drv_mutex);
+
+	log_off = ctx->curr.log_offset[IPANIC_LOG_GBUFFER];
+	log_len = ctx->curr.log_length[IPANIC_LOG_GBUFFER];
+	log_head = ctx->curr.log_head[IPANIC_LOG_GBUFFER];
+
+	if (*ppos >= log_len) {
+		mutex_unlock(&drv_mutex);
+		return 0;
+	}
+
+	if (*ppos < log_len - log_head) {
+		/* No overflow (log_head == 0)
+		 * or
+		 * overflow 2nd part buf (log_head = log_woff)
+		 * |-------w--------|
+		 *           off^
+		 *         |--------|
+		 */
+		log_off += log_head;
+		log_len -= log_head;
+	} else {
+		/* 1st part buf
+		 * |-------w--------|
+		 *   off^
+		 * |-------|
+		 */
+		*ppos -= (log_len - log_head);
+		log_len = log_head;
+	}
+
+	if ((*ppos + count) > log_len)
+		count = log_len - *ppos;
+
+	rc = emmc_read(ctx->emmc, emmc_ipanic_gbuffer_proc_read,
+			  buffer, log_off + *ppos, count, true);
+	if (rc <= 0) {
+		mutex_unlock(&drv_mutex);
+		pr_err("%s: emmc_read: invalid args: offset:0x%08llx, count:%zd",
+		       __func__, (u64)(log_off + *ppos), count);
+		return rc;
+	}
+
+	*ppos += rc;
+
+	mutex_unlock(&drv_mutex);
+
+	return rc;
+}
+
+static ssize_t emmc_ipanic_proc_read_by_log(struct file *file, char __user *buffer,
+			     size_t count, loff_t *ppos, int log)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	size_t file_length;
+	off_t file_offset;
+	int rc;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return 0;
+	}
+
+	if (!count)
+		return 0;
+
+	if (log < 0 || log > IPANIC_LOG_MAX) {
+		pr_err("%s: Bad log number (%d)\n", __func__, log);
+		return -EINVAL;
+	}
+
+	mutex_lock(&drv_mutex);
+
+	if (log == IPANIC_LOG_HEADER) {
+		file_length = ctx->hdr.log_size;
+		file_offset = offsetof(struct panic_header, panic);
+	}
+	else {
+		file_length = ctx->curr.log_length[log];
+		file_offset = ctx->curr.log_offset[log];
+	}
+
+	if (*ppos >= file_length) {
+		mutex_unlock(&drv_mutex);
+		return 0;
+	}
+
+	if ((*ppos + count) > file_length)
+		count = file_length - *ppos;
+
+	rc= emmc_read(ctx->emmc, emmc_ipanic_proc_read_by_log,
+		       buffer, file_offset + *ppos, count, true);
+	if (rc <= 0) {
+		mutex_unlock(&drv_mutex);
+		pr_err("%s: emmc_read: invalid args: offset:0x%08llx, count:%zd",
+		       __func__, (u64)(file_offset + *ppos), count);
+		return rc;
+	}
+
+	*ppos += rc;
+
+	mutex_unlock(&drv_mutex);
+
+	return rc;
+}
+
+static ssize_t emmc_ipanic_proc_read_hdr(struct file *file, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos, IPANIC_LOG_HEADER);
+}
+
+static ssize_t emmc_ipanic_proc_read0(struct file *file, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos, IPANIC_LOG_CONSOLE);
+}
+
+static ssize_t emmc_ipanic_proc_read1(struct file *file, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos, IPANIC_LOG_THREADS);
+}
+
+static void emmc_ipanic_remove_proc_work(struct work_struct *work)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	int log;
+
+	mutex_lock(&drv_mutex);
+	emmc_panic_erase(NULL, NULL);
+
+	for (log = 0; log < PROC_MAX_ENTRIES; log++) {
+		if (ctx->ipanic_proc_entry[log]) {
+			remove_proc_entry(ctx->ipanic_proc_entry_name
+					  [log], NULL);
+			ctx->ipanic_proc_entry[log] = NULL;
+		}
+	}
+	mutex_unlock(&drv_mutex);
+}
+
+static ssize_t emmc_ipanic_proc_write(struct file *file,
+					const char __user *buffer,
+					size_t count, loff_t *ppos)
+{
+	schedule_work(&proc_removal_work);
+	return count;
+}
+
+/* In section order inside panic partition : */
+static const struct file_operations ipanic_emmc_read_header_fops = {
+	.read = emmc_ipanic_proc_read_hdr,
+	.write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc0_fops = {
+	.read = emmc_ipanic_proc_read0,
+	.write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc1_fops = {
+	.read = emmc_ipanic_proc_read1,
+	.write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc_gbuffer_fops = {
+	.read = emmc_ipanic_gbuffer_proc_read,
+	.write = emmc_ipanic_proc_write
+};
+
+static void emmc_panic_notify_add(void)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc;
+	unsigned char *read_buf_ptr;
+	Sector sect;
+	int rc, idx_log, idx_proc;
+	int proc_entry_created = 0;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return;
+	}
+
+	emmc = ctx->emmc;
+	if (!emmc) {
+		pr_err("%s:invalid emmc infomation\n", __func__);
+		goto out_err;
+	}
+
+	if (!emmc->bdev) {
+		pr_err("%s:invalid emmc block device\n", __func__);
+		goto out_err;
+	}
+
+	/* make sure the block device is open rw */
+	rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, emmc_panic_notify_add);
+	if (rc < 0) {
+		pr_err("%s: blk_dev_get failed!\n", __func__);
+		goto out_err;
+	}
+
+	/* read panic header */
+	read_buf_ptr = read_dev_sector(emmc->bdev, emmc->start_block, &sect);
+	if (!read_buf_ptr) {
+		pr_err("%s: read sector error(%llu)!\n", __func__,
+			(u64) emmc->start_block);
+		return;
+	}
+
+	memcpy(&ctx->hdr, read_buf_ptr, sizeof(struct panic_header));
+
+	if (ctx->hdr.magic != PANIC_MAGIC) {
+		pr_info("%s: bad magic %x, no data available\n",
+			__func__, ctx->hdr.magic);
+		emmc_panic_erase(read_buf_ptr, &sect);
+		goto put_sector;
+	}
+
+	pr_info("%s: Data available in panic partition\n", __func__);
+
+	if (ctx->hdr.version != PHDR_VERSION) {
+		pr_err("%s: Version mismatch (%d != %d)\n",
+			__func__, ctx->hdr.version, PHDR_VERSION);
+		emmc_panic_erase(read_buf_ptr, &sect);
+		goto put_sector;
+	}
+
+	/* Create proc entry for the panic header */
+	ctx->ipanic_proc_entry[PROC_HEADER_INDEX] =
+		proc_create(ctx->ipanic_proc_entry_name
+			[PROC_HEADER_INDEX], S_IFREG | S_IRUGO, NULL,
+			&ipanic_emmc_read_header_fops);
+
+	if (!ctx->ipanic_proc_entry[PROC_HEADER_INDEX])
+		pr_err("%s: failed creating proc file\n", __func__);
+	else {
+		proc_entry_created = 1;
+		pr_info("%s: proc entry created: %s\n", __func__,
+			ctx->ipanic_proc_entry_name[PROC_HEADER_INDEX]);
+	}
+
+	/* read log_info to retrieve block numbers and offsets */
+	read_buf_ptr = read_dev_sector(emmc->bdev, emmc->start_block+1, &sect);
+	if (!read_buf_ptr) {
+		pr_err("%s: read sector error(%llu)!\n", __func__,
+			(u64)emmc->start_block + 1);
+		return;
+	}
+
+	memcpy(&ctx->curr, read_buf_ptr, sizeof(struct log_info));
+
+	/* Log files other than header */
+	for (idx_log = 0; idx_log < IPANIC_LOG_MAX; idx_log++) {
+
+		pr_info("%s: log file %u(%u, %u)\n", __func__, idx_log,
+			ctx->curr.log_offset[idx_log],
+			ctx->curr.log_length[idx_log]);
+
+		/* Skip empty file. */
+		if (ctx->curr.log_length[idx_log] == 0) {
+			pr_info("%s: empty log file %u\n", __func__, idx_log);
+			continue;
+		}
+
+		/* Create proc entry for console, threads and gbuffer log. */
+		if (idx_log == IPANIC_LOG_CONSOLE) {
+			idx_proc = PROC_CONSOLE_INDEX;
+			ctx->ipanic_proc_entry[PROC_CONSOLE_INDEX] =
+				proc_create(ctx->ipanic_proc_entry_name
+					[PROC_CONSOLE_INDEX], S_IFREG | S_IRUGO,
+					NULL,
+					&ipanic_emmc0_fops);
+		} else if (idx_log == IPANIC_LOG_THREADS) {
+			idx_proc = PROC_THREADS_INDEX;
+			ctx->ipanic_proc_entry[PROC_THREADS_INDEX] =
+				proc_create(ctx->ipanic_proc_entry_name
+					[PROC_THREADS_INDEX], S_IFREG | S_IRUGO,
+					NULL,
+					&ipanic_emmc1_fops);
+		} else if (idx_log == IPANIC_LOG_GBUFFER) {
+			idx_proc = PROC_GBUFFER_INDEX;
+			ctx->ipanic_proc_entry[PROC_GBUFFER_INDEX] =
+				proc_create(ctx->ipanic_proc_entry_name
+					[PROC_GBUFFER_INDEX], S_IFREG | S_IRUGO,
+					NULL,
+					&ipanic_emmc_gbuffer_fops);
+		} else {
+			/* No proc entry for this index */
+			idx_proc = 0;
+			continue;
+		}
+		if (!ctx->ipanic_proc_entry[idx_proc])
+			pr_err("%s: failed creating proc file\n",
+				__func__);
+		else {
+			proc_set_size(ctx->ipanic_proc_entry[idx_proc],
+				ctx->curr.log_length[idx_log]);
+			proc_entry_created = 1;
+			pr_info("%s: proc entry created: %s\n",
+				__func__,
+				ctx->ipanic_proc_entry_name[idx_proc]);
+		}
+	}
+
+	if (!proc_entry_created)
+		emmc_panic_erase(read_buf_ptr, &sect);
+
+put_sector:
+	put_dev_sector(sect);
+	return;
+out_err:
+	ctx->emmc = NULL;
+}
+
+static void emmc_panic_notify_remove(void)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+
+	if (ctx->emmc && ctx->emmc->part_dev) {
+		put_device(ctx->emmc->part_dev);
+		ctx->emmc->bdev = NULL;
+	}
+
+	ctx->emmc = NULL;
+}
+
+static int emmc_ipanic_writeflashpage(struct mmc_emergency_info *emmc,
+				      loff_t to, const u_char *buf)
+{
+	int rc;
+	size_t wlen = SECTOR_SIZE;
+
+	if (to >= emmc->start_block + emmc->block_count) {
+		pr_emerg("%s: panic partition is full.\n", __func__);
+		return 0;
+	}
+
+	rc = emmc->write((char *)buf, (unsigned int)to);
+	if (rc) {
+		pr_emerg("%s: Error writing data to flash (%d)\n",
+			__func__, rc);
+		return rc;
+	}
+
+	return wlen;
+}
+
+/*
+ * Writes the contents of the console to the specified offset in flash.
+ * Returns number of bytes written
+ */
+static int emmc_ipanic_write_console(struct mmc_emergency_info *emmc,
+				     unsigned int off, int *actual_size)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	int saved_oip, rc, block_shift = 0, bounce_idx = 0;
+	size_t line_len = 0;
+	bool ret;
+
+	static unsigned char line[SECTOR_SIZE];
+
+	*actual_size = 0;
+	while (1) {
+		saved_oip = oops_in_progress;
+		oops_in_progress = 1;
+		bounce_idx = 0;
+
+		if (last_chunk_buf_len) {
+			memcpy(ctx->bounce, last_chunk_buf, last_chunk_buf_len);
+			bounce_idx += last_chunk_buf_len;
+			last_chunk_buf_len = 0;
+		}
+
+		do {
+			ret = kmsg_dump_get_line(&ipanic_dumper, false,
+						 line, SECTOR_SIZE, &line_len);
+
+			if (ret) {
+				if (bounce_idx + line_len < SECTOR_SIZE) {
+					memcpy(ctx->bounce + bounce_idx,
+					       line, line_len);
+					bounce_idx += line_len;
+				} else {
+					int len = SECTOR_SIZE - bounce_idx;
+					memcpy(ctx->bounce + bounce_idx,
+					       line, len);
+					bounce_idx = SECTOR_SIZE;
+					memcpy(last_chunk_buf,
+					       line + len, line_len - len);
+					last_chunk_buf_len = line_len - len;
+				}
+			}
+		} while (ret && (bounce_idx != SECTOR_SIZE));
+
+		oops_in_progress = saved_oip;
+
+		/* If it is the last chunk, just copy it to last chunk
+		 * buffer and exit loop.
+		 */
+		if (!ret) {
+			/* Leave the last chunk for next writing */
+			memcpy(last_chunk_buf, ctx->bounce, bounce_idx);
+			last_chunk_buf_len = bounce_idx;
+			break;
+		}
+
+		rc = emmc_ipanic_writeflashpage(emmc, off + block_shift,
+						 ctx->bounce);
+		if (rc <= 0) {
+			pr_emerg("%s: Flash write failed (%d)\n",
+				__func__, rc);
+			return block_shift;
+		}
+
+		block_shift++;
+		*actual_size += SECTOR_SIZE;
+	}
+
+	return block_shift;
+}
+
+static void emmc_ipanic_flush_lastchunk_emmc(loff_t to,
+					     int *size_written,
+					     int *sector_written)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc = ctx->emmc;
+	int rc = 0;
+
+	if (last_chunk_buf_len) {
+		memset(last_chunk_buf + last_chunk_buf_len, 0,
+		       SECTOR_SIZE - last_chunk_buf_len);
+
+		rc = emmc_ipanic_writeflashpage(emmc, to, last_chunk_buf);
+		if (rc <= 0) {
+			pr_emerg("emmc_ipanic: write last chunk failed (%d)\n",
+				rc);
+			return;
+		}
+
+		*size_written += last_chunk_buf_len;
+		(*sector_written)++;
+		last_chunk_buf_len = 0;
+	}
+	return;
+}
+
+static void emmc_ipanic_write_thread_func(void)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc = ctx->emmc;
+	int size_written;
+	int thread_sector_count;
+
+	thread_sector_count =
+	    emmc_ipanic_write_console(emmc,
+				      log_offset[IPANIC_LOG_THREADS] +
+				      log_len[IPANIC_LOG_THREADS],
+				      &size_written);
+	if (thread_sector_count < 0) {
+		pr_emerg("Error writing threads to panic log! (%d)\n",
+			log_len[IPANIC_LOG_THREADS]);
+		return;
+	}
+	log_size[IPANIC_LOG_THREADS] += size_written;
+	log_len[IPANIC_LOG_THREADS] += thread_sector_count;
+
+	/*reset the log buffer */
+	log_buf_clear();
+	kmsg_dump_rewind(&ipanic_dumper);
+}
+
+static void emmc_ipanic_write_logbuf(struct mmc_emergency_info *emmc, int log)
+{
+	/*
+	 * Write the log data from the third block :
+	 * - the first block is reserved for panic header
+	 * - the second one is reserved for offset information
+	 */
+	log_offset[log] = emmc->start_block + 2;
+	log_len[log] = emmc_ipanic_write_console(emmc, log_offset[log],
+			&log_size[log]);
+	if (log_size[log] < 0) {
+		pr_emerg("Error writing console to panic log! (%d)\n",
+			log_len[log]);
+		log_size[log] = 0;
+		log_len[log] = 0;
+	}
+	/* flush last chunk buffer for console */
+	emmc_ipanic_flush_lastchunk_emmc(log_offset[log] +
+					 log_len[log],
+					 &log_size[log], &log_len[log]);
+}
+
+static void emmc_ipanic_write_calltrace(struct mmc_emergency_info *emmc,
+					int log)
+{
+	log_offset[log] = log_offset[log - 1] + log_len[log - 1];
+	/*
+	 * config func_stream_emmc to emmc_ipanic_write_thread_func to
+	 * stream thread call trace.
+	 */
+	log_buf_clear();
+	kmsg_dump_rewind(&ipanic_dumper);
+	func_stream_emmc = emmc_ipanic_write_thread_func;
+	show_state_filter(0);
+
+	/* flush last chunk buffer */
+	emmc_ipanic_flush_lastchunk_emmc(log_offset[log] +
+					 log_len[log],
+					 &log_size[log], &log_len[log]);
+}
+
+static int emmc_ipanic_write_gbuffer_data(struct mmc_emergency_info *emmc,
+					 struct g_buffer_header *gbuffer,
+					 unsigned int off,
+					 int *actual_size)
+{
+	int rc, block_shift = 0;
+	size_t log_off = 0;
+	size_t log_size;
+	unsigned char *buf = gbuffer->base;
+
+	if (gbuffer->head)
+		/* has overflow */
+		log_size = gbuffer->size;
+	else
+		/* no overflow */
+		log_size = gbuffer->woff;
+
+	while (log_off < log_size) {
+		size_t size_copy = log_size - log_off;
+		if (size_copy < SECTOR_SIZE) {
+			/*
+			 * flash page not complete, flushed with
+			 * emmc_ipanic_flush_lastchunk_emmc
+			 */
+			memcpy(last_chunk_buf, buf + log_off, size_copy);
+			last_chunk_buf_len = size_copy;
+			break;
+		}
+		rc = emmc_ipanic_writeflashpage(emmc, off + block_shift,
+						buf + log_off);
+		if (rc <= 0) {
+			pr_emerg("%s: Flash write failed (%d)\n",
+				__func__, rc);
+			return 0;
+		}
+		log_off += rc;
+		block_shift++;
+	}
+	*actual_size = log_off;
+
+	return block_shift;
+}
+
+static struct g_buffer_header gbuffer = {
+	.base = NULL,
+};
+
+static void emmc_ipanic_write_gbuffer(struct mmc_emergency_info *emmc,
+				    int log)
+{
+	struct g_buffer_header *m_gbuffer = &gbuffer;
+
+	log_offset[log] = log_offset[log - 1] + log_len[log - 1];
+
+	pr_info("write gbuffer data\n");
+	if (!m_gbuffer->base) {
+		pr_err("Ipanic error, no gbuffer data\n");
+		return;
+	}
+
+	log_len[log] = emmc_ipanic_write_gbuffer_data(emmc, m_gbuffer,
+						    log_offset[log],
+						    &log_size[log]);
+	if (log_len[log] < 0) {
+		pr_emerg("Error writing gbuffer to panic log! (%d)\n",
+			log_len[log]);
+		log_size[log] = 0;
+		log_len[log] = 0;
+	}
+	/* flush last chunk buffer */
+	emmc_ipanic_flush_lastchunk_emmc(log_offset[log] + log_len[log],
+					 &log_size[log], &log_len[log]);
+	log_head[log] = m_gbuffer->head;
+	log_woff[log] = m_gbuffer->woff;
+	pr_info("write gbuffer data END\n");
+}
+
+/*
+ * Exported in <linux/panic_gbuffer.h>
+ */
+void panic_set_gbuffer(struct g_buffer_header *buf)
+{
+	if (gbuffer.base) {
+		pr_err("%s: gbuffer already set to 0x%p, can not set again",
+		       __func__, gbuffer.base);
+		return;
+	}
+
+	gbuffer.base = buf->base;
+	gbuffer.size = buf->size;
+	gbuffer.woff = buf->woff;
+	gbuffer.head = buf->head;
+}
+EXPORT_SYMBOL(panic_set_gbuffer);
+
+static void emmc_ipanic_write_pageheader(struct mmc_emergency_info *emmc)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct panic_header *hdr = (struct panic_header *)ctx->bounce;
+	int wc;
+	size_t len, total, max;
+
+	memset(ctx->bounce, 0, SECTOR_SIZE);
+	hdr->magic = PANIC_MAGIC;
+	hdr->version = PHDR_VERSION;
+
+	total = snprintf(hdr->panic, SECTOR_SIZE,
+			"###Kernel panic###\n");
+
+	max =  SECTOR_SIZE - offsetof(struct panic_header, panic) - total;
+	kmsg_dump_get_buffer(&ipanic_dumper, false,  last_chunk_buf, max, &len);
+	kmsg_dump_rewind(&ipanic_dumper);
+
+	memcpy(hdr->panic + total, last_chunk_buf, len);
+	hdr->log_size = len + total;
+
+	/* Write header block */
+	wc = emmc_ipanic_writeflashpage(emmc, emmc->start_block, ctx->bounce);
+	if (wc <= 0) {
+		pr_emerg("emmc_ipanic: Info write failed (%d)\n", wc);
+		/* let the watchdog expire to reset the platform */
+		reboot_force = REBOOT_FORCE_ON;
+		return;
+	}
+}
+
+static void emmc_ipanic_clean_loginfo(struct mmc_emergency_info *emmc)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	int rc;
+
+	memset(log_offset, 0, IPANIC_LOG_MAX * sizeof(int));
+	memset(log_len, 0, IPANIC_LOG_MAX * sizeof(int));
+	memset(log_size, 0, IPANIC_LOG_MAX * sizeof(int));
+
+	memset(ctx->bounce, 0, SECTOR_SIZE);
+
+	rc = emmc_ipanic_writeflashpage(emmc, emmc->start_block+1, ctx->bounce);
+	if (rc <= 0) {
+		pr_emerg("emmc_ipanic: Header write failed (%d)\n",
+			rc);
+		return;
+	}
+}
+
+static void emmc_ipanic_write_loginfo(struct mmc_emergency_info *emmc, int newlog)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct log_info *info = (struct log_info *)ctx->bounce;
+	int log = IPANIC_LOG_CONSOLE;
+	int rc;
+
+	if ((newlog < 0) || (newlog >= IPANIC_LOG_MAX))
+		return;
+
+	if (log_size[newlog] == 0)
+		return;
+
+	memset(ctx->bounce, 0, SECTOR_SIZE);
+	/*Fill up log offset and size */
+	while (log < IPANIC_LOG_MAX) {
+		/*Configurate log offset and log size */
+		info->log_offset[log] = (log_offset[log] - emmc->start_block)
+		    << SECTOR_SIZE_SHIFT;
+		info->log_length[log] = log_size[log];
+		info->log_head[log] = log_head[log];
+		info->log_woff[log] = log_woff[log];
+		log++;
+	}
+	rc = emmc_ipanic_writeflashpage(emmc, emmc->start_block+1, ctx->bounce);
+	if (rc <= 0) {
+		pr_emerg("emmc_ipanic: Header write failed (%d)\n",
+			rc);
+		return;
+	}
+}
+
+static int emmc_ipanic(struct notifier_block *this, unsigned long event,
+		       void *ptr)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc;
+	int rc, log;
+
+	pr_emerg("panic notified\n");
+
+	if (in_panic)
+		/* if panic while executing this panic handler we trig */
+		/* a watchdog event to be sure to report an issue */
+		reboot_force = REBOOT_FORCE_ON;
+
+	if (in_panic || disable_emmc_ipanic)
+		return NOTIFY_DONE;
+
+	in_panic = 1;
+
+#ifdef CONFIG_PREEMPT
+	/* Ensure that cond_resched() won't try to preempt anybody */
+	add_preempt_count(PREEMPT_ACTIVE);
+#endif
+	touch_nmi_watchdog();
+
+	if (!ctx)
+		goto emmc_error;
+	emmc = ctx->emmc;
+	if (!emmc)
+		goto emmc_error;
+	if (ctx->hdr.magic) {
+		pr_emerg("Crash partition in use!\n");
+		goto emmc_error;
+	}
+
+	rc = emmc->init();
+	if (rc) {
+		/* String too long to fit on 1 80-char line */
+		pr_emerg("%s %s, rc=%d\n",
+			"Emmc emergency driver is",
+			"not initialized successfully!",
+			rc);
+		goto emmc_error;
+	}
+
+	/* Prepare kmsg dumper */
+	ipanic_dumper.active = 1;
+	/* Rewind kmsg dumper */
+	kmsg_dump_rewind(&ipanic_dumper);
+
+	/* Write emmc ipanic partition header */
+	emmc_ipanic_write_pageheader(emmc);
+	/* Clean emmc ipanic sections offsets */
+	emmc_ipanic_clean_loginfo(emmc);
+
+	/*Write all buffer into emmc */
+	log = IPANIC_LOG_CONSOLE;
+	while (log < IPANIC_LOG_MAX) {
+		/* Clear temporary buffer */
+		memset(ctx->bounce, 0, SECTOR_SIZE);
+		/* Log every buffer into emmc */
+		switch (log) {
+		case IPANIC_LOG_CONSOLE:
+			emmc_ipanic_write_logbuf(emmc, log);
+			break;
+		case IPANIC_LOG_THREADS:
+			emmc_ipanic_write_calltrace(emmc, log);
+			break;
+		case IPANIC_LOG_GBUFFER:
+			emmc_ipanic_write_gbuffer(emmc, log);
+			break;
+		default:
+			break;
+		}
+		/* Update emmc ipanic sections offsets */
+		emmc_ipanic_write_loginfo(emmc, log);
+		log++;
+	}
+	pr_info("Panic log data written done!\n");
+
+	ipanic_dumper.active = 0;
+
+	goto out;
+
+emmc_error:
+	/* let the watchdog expire to reset the platform */
+	reboot_force = REBOOT_FORCE_ON;
+
+out:
+#ifdef CONFIG_PREEMPT
+	sub_preempt_count(PREEMPT_ACTIVE);
+#endif
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+	.notifier_call = emmc_ipanic,
+	.priority = 100,
+};
+
+static int panic_dbg_set(void *data, u64 val)
+{
+	BUG();
+	return -1;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, NULL, panic_dbg_set, "%llu\n");
+
+static int match_dev_panic_part(struct device *dev, const void *data)
+{
+	struct hd_struct *part;
+	const char *name = (char *)data;
+
+	if (!name || !dev || dev->class != &block_class)
+		return 0;
+
+	part = dev_to_part(dev);
+
+	return part->info && part->info->volname &&
+		!strcmp(name, part->info->volname);
+}
+
+static int emmc_panic_partition_notify(struct notifier_block *nb,
+				       unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc;
+	struct gendisk *disk;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return 0;
+	}
+
+	emmc = ctx->emmc;
+	if (!emmc) {
+		pr_err("%s:invalid emmc information\n", __func__);
+		return 0;
+	}
+
+	switch (action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+	case BUS_NOTIFY_BOUND_DRIVER:
+		/* if emmc already found, exit the function */
+		if (emmc->bdev)
+			return 0;
+
+		emmc->part_dev = class_find_device(&block_class, NULL,
+						   emmc->part_label,
+						   &match_dev_panic_part);
+		if (emmc->part_dev) {
+			emmc->part = dev_to_part(emmc->part_dev);
+			if (!emmc->part) {
+				pr_err("unable to get partition\n");
+				goto put_dev;
+			}
+
+			disk = part_to_disk(emmc->part);
+			if (!disk) {
+				pr_err("unable to get disk\n");
+				goto put_dev;
+			}
+
+			/* get whole disk */
+			emmc->bdev = bdget_disk(disk, 0);
+			if (!emmc->bdev) {
+				pr_err("unable to get emmc block device\n");
+				goto put_dev;
+			}
+
+			emmc->start_block = emmc->part->start_sect;
+			emmc->block_count = emmc->part->nr_sects;
+
+			pr_info("panic partition found, label:%s, device:%s\n",
+				emmc->part_label, dev_name(emmc->part_dev));
+
+			/* notify to add the panic device */
+			emmc_panic_notify_add();
+
+			atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+
+			INIT_WORK(&proc_removal_work, emmc_ipanic_remove_proc_work);
+		}
+		break;
+	case BUS_NOTIFY_DEL_DEVICE:
+	case BUS_NOTIFY_UNBIND_DRIVER:
+		if (match_dev_panic_part(dev, emmc->part_label)) {
+			pr_info("bus notify removed device '%s', cleaning.\n", dev_name(dev));
+			flush_scheduled_work();
+			atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+			emmc_panic_notify_remove();
+		}
+		break;
+	case BUS_NOTIFY_BIND_DRIVER:
+	case BUS_NOTIFY_UNBOUND_DRIVER:
+		/* Nothing to do here, but we don't want
+		 * these actions to generate error messages,
+		 * so we need to catch them
+		 */
+		break;
+	default:
+		pr_err("Unknown action (%lu) on %s\n",
+			action, dev_name(dev));
+		return 0;
+	}
+	return 1;
+
+put_dev:
+	put_device(emmc->part_dev);
+	return 0;
+}
+
+static struct notifier_block panic_partition_notifier = {
+	.notifier_call = emmc_panic_partition_notify,
+};
+
+void emmc_ipanic_stream_emmc(void)
+{
+	if (func_stream_emmc)
+		(*func_stream_emmc) ();
+}
+
+EXPORT_SYMBOL(emmc_ipanic_stream_emmc);
+
+static struct dentry *emmc_ipanic_d;
+static struct dentry *emmc_ipanic_disable_d;
+
+static int __init emmc_ipanic_init(void)
+{
+	/* initialization of drv_ctx */
+	memset(&drv_ctx, 0, sizeof(drv_ctx));
+	drv_ctx.emmc = &emmc_info;
+
+	if (*part_label)
+		strcpy(emmc_info.part_label, part_label);
+
+	drv_ctx.ipanic_proc_entry_name = ipanic_proc_entry_name;
+	drv_ctx.bounce = (void *)__get_free_page(GFP_KERNEL);
+
+	bus_register_notifier(&pci_bus_type, &panic_partition_notifier);
+
+	emmc_ipanic_d = debugfs_create_file("emmc_ipanic", 0644, NULL, NULL,
+					    &panic_dbg_fops);
+	emmc_ipanic_disable_d = debugfs_create_u32("disable_emmc_ipanic", 0644,
+						   NULL, &disable_emmc_ipanic);
+
+	pr_info("init success\n");
+
+	return 0;
+}
+
+static void __exit emmc_ipanic_exit(void)
+{
+	debugfs_remove(emmc_ipanic_d);
+	debugfs_remove(emmc_ipanic_disable_d);
+	bus_unregister_notifier(&pci_bus_type, &panic_partition_notifier);
+	flush_scheduled_work();
+	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+	emmc_panic_notify_remove();
+}
+
+module_init(emmc_ipanic_init);
+module_exit(emmc_ipanic_exit);
diff --git a/drivers/external_drivers/drivers/misc/emmc_ipanic.h b/drivers/external_drivers/drivers/misc/emmc_ipanic.h
new file mode 100644
index 0000000..f2a9815
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/emmc_ipanic.h
@@ -0,0 +1,99 @@
+/*
+ * drivers/misc/emmc_ipanic.h
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _LINUX_EMMC_IPANIC_H
+#define _LINUX_EMMC_IPANIC_H
+
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/version.h>
+
+#if !(LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+extern int log_buf_copy(char *dest, int idx, int len);
+#endif
+extern void log_buf_clear(void);
+
+#define SECTOR_SIZE_SHIFT (9)
+
+#define PROC_HEADER_INDEX        0
+#define PROC_CONSOLE_INDEX       1
+#define PROC_THREADS_INDEX       2
+#define PROC_GBUFFER_INDEX       3
+#define PROC_MAX_ENTRIES         4
+
+#define IPANIC_LOG_CONSOLE       0
+#define IPANIC_LOG_THREADS       1
+#define IPANIC_LOG_GBUFFER       2
+#define IPANIC_LOG_MAX           3
+#define IPANIC_LOG_HEADER        IPANIC_LOG_MAX
+
+
+struct mmc_emergency_info {
+#define DISK_NAME_LENGTH 20
+	/* emmc panic partition label */
+	char part_label[PARTITION_META_INFO_VOLNAMELTH];
+
+	struct block_device *bdev;
+	struct device *part_dev;
+	struct hd_struct *part;
+
+	/*panic partition start block */
+	sector_t start_block;
+	/*panic partition block count */
+	sector_t block_count;
+
+	int (*init) (void);
+	int (*write) (char *, unsigned int);
+	int (*read) (char *, unsigned int);
+};
+
+struct panic_header {
+	u32 magic;
+#define PANIC_MAGIC 0xdeadf00d
+
+	u32 version;
+#define PHDR_VERSION   0x01
+	u32 log_size;
+
+	char panic[SECTOR_SIZE];
+};
+
+struct log_info {
+	u32 log_offset[IPANIC_LOG_MAX];
+	u32 log_length[IPANIC_LOG_MAX];
+
+	/* For logcat and generic buffer log status */
+	size_t log_head[IPANIC_LOG_MAX];
+	size_t log_woff[IPANIC_LOG_MAX];
+};
+
+struct emmc_ipanic_data {
+	struct mmc_emergency_info *emmc;
+	struct panic_header hdr;
+	struct log_info curr;
+	void *bounce;
+	struct proc_dir_entry *ipanic_proc_entry[PROC_MAX_ENTRIES];
+	unsigned char **ipanic_proc_entry_name;
+};
+
+#endif /* _LINUX_EMMC_IPANIC_H */
diff --git a/drivers/external_drivers/drivers/misc/fps_throttle.c b/drivers/external_drivers/drivers/misc/fps_throttle.c
new file mode 100644
index 0000000..de973b7
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/fps_throttle.c
@@ -0,0 +1,245 @@
+/*
+ * Support for camera module fps thermal throttling.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#define STATE_NUM 4
+#define HANDSHAKE_TIMEOUT 500
+
+static wait_queue_head_t wait;
+static unsigned int cur_state = 0;
+static int state_list[STATE_NUM] = {100, 75, 50, 25};
+static struct kobject *adapter_kobj;
+struct thermal_cooling_device *tcd_fps;
+
+struct adapter_attr {
+	struct attribute attr;
+	int value;
+};
+
+enum fps_throttling_state {
+	FPS_THROTTLE_DISABLE = 0,
+	FPS_THROTTLE_ENABLE,
+	FPS_THROTTLE_SUCCESS
+};
+
+static struct adapter_attr notify = {
+	.attr.name = "notify",
+	.attr.mode = 0664,
+	.value = 100,
+};
+
+static struct adapter_attr handshake = {
+	.attr.name = "handshake",
+	.attr.mode = 0664,
+	.value = FPS_THROTTLE_DISABLE,
+};
+
+static struct attribute *throttle_attr[] = {
+	&notify.attr,
+	&handshake.attr,
+	NULL
+};
+
+static void set_fps_scaling(int fs)
+{
+	notify.value = fs;
+	handshake.value = FPS_THROTTLE_ENABLE;
+	sysfs_notify(adapter_kobj, NULL, "notify");
+}
+
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct adapter_attr *a = container_of(attr, struct adapter_attr, attr);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", a->value);
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t len)
+{
+	struct adapter_attr *a = container_of(attr, struct adapter_attr, attr);
+	sscanf(buf, "%d", &a->value);
+
+	if (strcmp(a->attr.name, notify.attr.name) == 0) {
+		set_fps_scaling(a->value);
+	} else {
+		handshake.value = a->value;
+		if (handshake.value == FPS_THROTTLE_SUCCESS)
+			wake_up(&wait);
+	}
+	return len;
+}
+
+static struct sysfs_ops throttle_ops = {
+	.show = show,
+	.store = store,
+};
+
+static struct kobj_type throttle_type = {
+	.sysfs_ops = &throttle_ops,
+	.default_attrs = throttle_attr,
+};
+
+static int thermal_get_max_state(struct thermal_cooling_device *tcd,
+				unsigned long *pms)
+{
+	*pms = STATE_NUM;
+	return 0;
+}
+
+static int thermal_get_cur_state(struct thermal_cooling_device *tcd,
+				unsigned long *pcs)
+{
+	int i;
+	if (handshake.value == FPS_THROTTLE_DISABLE)
+		return -EPERM;
+
+	for (i = 0; i < 4; i++) {
+		if (notify.value == state_list[i]) {
+			*pcs = i;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int thermal_set_cur_state(struct thermal_cooling_device *tcd,
+				unsigned long pcs)
+{
+	int ret;
+
+	if (pcs >= STATE_NUM || pcs < 0)
+		return -EINVAL;
+	if (handshake.value == FPS_THROTTLE_DISABLE) {
+		notify.value = state_list[(int)pcs];
+		pr_err("fps change request rcvd from thermal, ignored.\n");
+		return -EPERM;
+	}
+
+	set_fps_scaling(state_list[(int)pcs]);
+	/* Wait the fps seting by HAL. */
+	ret = wait_event_interruptible_timeout(wait,
+				(handshake.value == FPS_THROTTLE_SUCCESS),
+				HANDSHAKE_TIMEOUT);
+	if (ret > 0) {
+		cur_state = (int)pcs;
+		handshake.value = FPS_THROTTLE_ENABLE;
+	} else {
+		/* fps change request is not replied by camera HAL, remain the value as
+		 * the previous one.
+		 */
+		notify.value = state_list[cur_state];
+	}
+
+	return ret;
+}
+
+static int thermal_set_force_state_override(struct thermal_cooling_device *tcd,
+				char *fps_state)
+{
+	sscanf(fps_state, "%d %d %d %d\n", &state_list[0],
+			 &state_list[1],
+			 &state_list[2],
+			 &state_list[3]);
+	return 0;
+}
+
+static int thermal_get_force_state_override(struct thermal_cooling_device *tcd,
+				char *fps_state)
+{
+	return sprintf(fps_state, "%d %d %d %d\n", state_list[0], state_list[1],
+			state_list[2], state_list[3]);
+}
+
+static int thermal_get_available_states(struct thermal_cooling_device *tcd,
+				char *fps_state)
+{
+	return sprintf(fps_state, "%d %d %d %d\n", state_list[0], state_list[1],
+			state_list[2], state_list[3]);
+}
+
+static const struct thermal_cooling_device_ops thermal_fps_ops = {
+	.get_max_state = thermal_get_max_state,
+	.get_cur_state = thermal_get_cur_state,
+	.set_cur_state = thermal_set_cur_state,
+	.set_force_state_override = thermal_set_force_state_override,
+	.get_force_state_override = thermal_get_force_state_override,
+	.get_available_states = thermal_get_available_states,
+};
+
+static int thermal_adapter_init(void)
+{
+	int err;
+
+	adapter_kobj = kzalloc(sizeof(*adapter_kobj), GFP_KERNEL);
+	if (!adapter_kobj)
+		return -ENOMEM;
+	kobject_init(adapter_kobj, &throttle_type);
+	err = kobject_add(adapter_kobj, NULL, "%s", "fps_throttle");
+	if (err)
+		goto adapter_failed;
+
+	tcd_fps = thermal_cooling_device_register("CameraFps", NULL,
+			&thermal_fps_ops);
+	if (IS_ERR(tcd_fps)) {
+		err = PTR_ERR(tcd_fps);
+		goto thermal_failed;
+	}
+
+	init_waitqueue_head(&wait);
+	return 0;
+
+thermal_failed:
+	kobject_del(adapter_kobj);
+adapter_failed:
+	kfree(adapter_kobj);
+	adapter_kobj = NULL;
+	return err;
+}
+
+static void thermal_adapter_exit(void)
+{
+	kobject_del(adapter_kobj);
+	kfree(adapter_kobj);
+	adapter_kobj = NULL;
+
+	thermal_cooling_device_unregister(tcd_fps);
+	tcd_fps = NULL;
+}
+
+static int __init fps_thermal_adapter_init(void)
+{
+	return thermal_adapter_init();
+}
+
+static void __exit fps_thermal_adapter_exit(void)
+{
+	thermal_adapter_exit();
+}
+
+module_init(fps_thermal_adapter_init);
+module_exit(fps_thermal_adapter_exit);
+
+MODULE_AUTHOR("Zhu,Shaoping <shaopingx.zhu@intel.com>");
+MODULE_DESCRIPTION("FPS throttling thermal adapter device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/misc/intel_fabricerr_status.c b/drivers/external_drivers/drivers/misc/intel_fabricerr_status.c
new file mode 100644
index 0000000..bad2b63
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/intel_fabricerr_status.c
@@ -0,0 +1,1634 @@
+/*
+ * drivers/misc/intel_fabricerr_status.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: winson.w.yung@intel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <asm/intel-mid.h>
+
+#include "intel_fabricid_def.h"
+
+static char *FullChip_FlagStatusLow32_pnw[] = {
+	"cdmi_iocp (IA Burst Timeout)",		/* bit 0 */
+	"cha_iahb (IA Burst Timeout)",		/* bit 1 */
+	"nand_iaxi (IA Burst Timeout)",		/* bit 2 */
+	"otg_iahb (IA Burst Timeout)",		/* bit 3 */
+	"usb_iahb (IA Burst Timeout)",		/* bit 4 */
+	"usc0a_iahb (IA Burst Timeout)",	/* bit 5 */
+	"usc0b_iahb (IA Burst Timeout)",	/* bit 6 */
+	"usc2_iahb (IA Burst Timeout)",		/* bit 7 */
+	"tra0_iocp (IA Burst Timeout)",		/* bit 8 */
+	"",					/* bit 9 */
+	"",					/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"cdmi_iocp (IA Resp Timeout)",		/* bit 16 */
+	"cha_iahb (IA Resp Timeout)",		/* bit 17 */
+	"nand_iaxi (IA Resp Timeout)",		/* bit 18 */
+	"otg_iahb (IA Resp Timeout)",		/* bit 19 */
+	"usb_iahb (IA Resp Timeout)",		/* bit 20 */
+	"usc0a_iahb (IA Resp Timeout)",		/* bit 21 */
+	"usc0b_iahb (IA Resp Timeout)",		/* bit 22 */
+	"usc2_iahb (IA Resp Timeout)",		/* bit 23 */
+	"tra0_iocp (IA Resp Timeout)",		/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *FullChip_FlagStatusLow32_clv[] = {
+	"cdmi_iocp (IA Burst Timeout)",		/* bit 0 */
+	"cha_iahb (IA Burst Timeout)",		/* bit 1 */
+	"",					/* bit 2 */
+	"otg_iahb (IA Burst Timeout)",		/* bit 3 */
+	"usb_iahb (IA Burst Timeout)",		/* bit 4 */
+	"usc0a_iahb (IA Burst Timeout)",	/* bit 5 */
+	"usc0b_iahb (IA Burst Timeout)",	/* bit 6 */
+	"usc2_iahb (IA Burst Timeout)",		/* bit 7 */
+	"tra0_iocp (IA Burst Timeout)",		/* bit 8 */
+	"",					/* bit 9 */
+	"",					/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"cdmi_iocp (IA Resp Timeout)",		/* bit 16 */
+	"cha_iahb (IA Resp Timeout)",		/* bit 17 */
+	"",					/* bit 18 */
+	"otg_iahb (IA Resp Timeout)",		/* bit 19 */
+	"usb_iahb (IA Resp Timeout)",		/* bit 20 */
+	"usc0a_iahb (IA Resp Timeout)",		/* bit 21 */
+	"usc0b_iahb (IA Resp Timeout)",		/* bit 22 */
+	"usc2_iahb (IA Resp Timeout)",		/* bit 23 */
+	"tra0_iocp (IA Resp Timeout)",		/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *FullChip_FlagStatusLow32_tng[] = {
+	"iosf2ocp_i0 (IA Burst Timeout)",	/* bit 0 */
+	"usb3_i0 (IA Burst Timeout)",		/* bit 1 */
+	"usb3_i1 (IA Burst Timeout)",		/* bit 2 */
+	"mfth_i0 (IA Burst Timeout)",		/* bit 3 */
+	"cha_i0 (IA Burst Timeout)",		/* bit 4 */
+	"otg_i0 (IA Burst Timeout)",		/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"iosf2ocp_i0 (IA Response Timeout)",	/* bit 10 */
+	"usb3_i0 (IA Response Timeout)",	/* bit 11 */
+	"usb3_i1 (IA Response Timeout)",	/* bit 12 */
+	"mfth_i0 (IA Response Timeout)",	/* bit 13 */
+	"cha_i0 (IA Response Timeout)",		/* bit 14 */
+	"otg_i0 (IA Response Timeout)",		/* bit 15 */
+	"",					/* bit 16 */
+	"",					/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"iosf2ocp_i0 (IA InBand Error)",	/* bit 20 */
+	"usb3_i0 (IA InBand Error)",		/* bit 21 */
+	"usb3_i1 (IA InBand Error)",		/* bit 22 */
+	"mfth_i0 (IA InBand Error)",		/* bit 23 */
+	"cha_i0 (IA InBand Error)",		/* bit 24 */
+	"otg_i0 (IA InBand Error)",		/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"iosf2ocp_t0 (TA Request Timeout)",	/* bit 30 */
+	"usb3_t0 (TA Request Timeout)"		/* bit 31 */
+};
+
+static char *FullChip_FlagStatusHi32_pnw[] = {
+	"cdmi_iocp (IA Inband Error)",		/* bit 32 */
+	"cha_iahb (IA Inband Error)",		/* bit 33 */
+	"nand_iaxi (IA Inband Error)",		/* bit 34 */
+	"otg_iahb (IA Inband Error)",		/* bit 35 */
+	"usb_iahb (IA Inband Error)",		/* bit 36 */
+	"usc0a_iahb (IA Inband Error)",		/* bit 37 */
+	"usc0b_iahb (IA Inband Error)",		/* bit 38 */
+	"usc2_iahb (IA Inband Error)",		/* bit 39 */
+	"tra0_iocp (IA Inband Error)",		/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"cdmi_tocp (TA Req Timeout)",		/* bit 48 */
+	"cha_tahb (TA Req Timeout)",		/* bit 49 */
+	"nand_taxi (TA Req Timeout)",		/* bit 50 */
+	"nandreg_taxi (TA Req Timeout)",	/* bit 51 */
+	"otg_tahb (TA Req Timeout)",		/* bit 52 */
+	"usb_tahb (TA Req Timeout)",		/* bit 53 */
+	"usc0a_tahb (TA Req Timeout)",		/* bit 54 */
+	"usc0b_tahb (TA Req Timeout)",		/* bit 55 */
+	"usc2_tahb (TA Req Timeout)",		/* bit 56 */
+	"pti_tocp (TA Req Timeout)",		/* bit 57 */
+	"tra0_tocp (TA Req Timeout)",		/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *FullChip_FlagStatusHi32_clv[] = {
+	"cdmi_iocp (IA Inband Error)",		/* bit 32 */
+	"cha_iahb (IA Inband Error)",		/* bit 33 */
+	"",					/* bit 34 */
+	"otg_iahb (IA Inband Error)",		/* bit 35 */
+	"usb_iahb (IA Inband Error)",		/* bit 36 */
+	"usc0a_iahb (IA Inband Error)",		/* bit 37 */
+	"usc0b_iahb (IA Inband Error)",		/* bit 38 */
+	"usc2_iahb (IA Inband Error)",		/* bit 39 */
+	"tra0_iocp (IA Inband Error)",		/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"cdmi_tocp (TA Req Timeout)",		/* bit 48 */
+	"cha_tahb (TA Req Timeout)",		/* bit 49 */
+	"",					/* bit 50 */
+	"",					/* bit 51 */
+	"otg_tahb (TA Req Timeout)",		/* bit 52 */
+	"usb_tahb (TA Req Timeout)",		/* bit 53 */
+	"usc0a_tahb (TA Req Timeout)",		/* bit 54 */
+	"usc0b_tahb (TA Req Timeout)",		/* bit 55 */
+	"usc2_tahb (TA Req Timeout)",		/* bit 56 */
+	"pti_tocp (TA Req Timeout)",		/* bit 57 */
+	"tra0_tocp (TA Req Timeout)",		/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *FullChip_FlagStatusHi32_tng[] = {
+	"ptistm_t0 (TA Request Timeout)",	/* bit 32 */
+	"ptistm_t1 (TA Request Timeout)",	/* bit 33 */
+	"ptistm_t2 (TA Request Timeout)",	/* bit 34 */
+	"mfth_t0 (TA Request Timeout)",		/* bit 35 */
+	"cha_t0 (TA Request Timeout)",		/* bit 36 */
+	"otg_t0 (TA Request Timeout)",		/* bit 37 */
+	"runctl_t0 (TA Request Timeout)",	/* bit 38 */
+	"usb3phy_t0 (TA Request Timeout)",	/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"",					/* bit 48 */
+	"",					/* bit 49 */
+	"iosf2ocp_t0 (Access Control Violation)",/* bit 50 */
+	"usb3_t0 (Access Control Violation)",	/* bit 51 */
+	"ptistm_t0 (Access Control Violation)",	/* bit 52 */
+	"ptistm_t1 (Access Control Violation)",	/* bit 53 */
+	"ptistm_t2 (Access Control Violation)",	/* bit 54 */
+	"mfth_t0 (Access Control Violation)",	/* bit 55 */
+	"cha_t0 (Access Control Violation)",	/* bit 56 */
+	"otg_t0 (Access Control Violation)",	/* bit 57 */
+	"runctl_t0 (Access Control Violation)",	/* bit 58 */
+	"usb3phy_t0 (Access Control Violation)",/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *Secondary_FlagStatusLow32[] = {
+	"usc1a_iahb (IA Burst Timeout)",	/* bit 0 */
+	"usc1b_iahb (IA Burst Timeout)",	/* bit 1 */
+	"hsidma_iahb (IA Burst Timeout)",	/* bit 2 */
+	"tra1_iocp (IA Burst Timeout)",		/* bit 3 */
+	"dfx_iahb (IA Burst Timeout)",		/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"",					/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"usc1a_iahb (IA Resp Timeout)",		/* bit 16 */
+	"usc1b_iahb (IA Resp Timeout)",		/* bit 17 */
+	"hsidma_iahb (IA Resp Timeout)",	/* bit 18 */
+	"tra1_iocp (IA Resp Timeout)",		/* bit 19 */
+	"dfx_iahb (IA Resp Timeout)",		/* bit 20 */
+	"",					/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"",					/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *Secondary_FlagStatusLow32_tng[] = {
+	"sdio0_i0 (IA Burst Timeout)",		/* bit 0 */
+	"emmc01_i0 (IA Burst Timeout)",		/* bit 1 */
+	"",					/* bit 2 */
+	"sdio1_i0 (IA Burst Timeout)",		/* bit 3 */
+	"hsi_i0 (IA Burst Timeout)",		/* bit 4 */
+	"mph_i0 (IA Burst Timeout)",		/* bit 5 */
+	"sfth_i0 (IA Burst Timeout)",		/* bit 6 */
+	"dfxsctap_i0 (IA Burst Timeout)",	/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"sdio0_i0 (IA Response Timeout)",	/* bit 10 */
+	"emmc01_i0 (IA Response Timeout)",	/* bit 11 */
+	"",					/* bit 12 */
+	"sdio1_i0 (IA Response Timeout)",	/* bit 13 */
+	"hsi_i0 (IA Response Timeout)",		/* bit 14 */
+	"mph_i0 (IA Response Timeout)",		/* bit 15 */
+	"sfth_i0 (IA Response Timeout)",	/* bit 16 */
+	"dfxsctap_i0 (IA Response Timeout)",	/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"sdio0_i0 (IA InBand Error)",		/* bit 20 */
+	"emmc01_i0 (IA InBand Error)",		/* bit 21 */
+	"",					/* bit 22 */
+	"sdio1_i0 (IA InBand Error)",		/* bit 23 */
+	"hsi_i0 (IA InBand Error)",		/* bit 24 */
+	"mph_i0 (IA InBand Error)",		/* bit 25 */
+	"sfth_i0 (IA InBand Error)",		/* bit 26 */
+	"dfxsctap_i0 (IA InBand Error)",	/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"sram_t0 (TA Request Timeout)",		/* bit 30 */
+	"sdio0_t0 (TA Request Timeout)"		/* bit 31 */
+};
+
+static char *Secondary_FlagStatusHi32[] = {
+	"usc1a_iahb (IA Inband Error)",		/* bit 32 */
+	"usc1b_iahb (IA Inband Error)",		/* bit 33 */
+	"hsidma_iahb (IA Inband Error)",	/* bit 34 */
+	"tra1_iocp (IA Inband Error)",		/* bit 35 */
+	"dfx_iahb (IA Inband Error)",		/* bit 36 */
+	"",					/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"usc1a_tahb (TA Req Timeout)",		/* bit 48 */
+	"usc1b_tahb (TA Req Timeout)",		/* bit 49 */
+	"hsi_tocp (TA Req Timeout)",		/* bit 50 */
+	"hsidma_tahb (TA Req Timeout)",		/* bit 51 */
+	"sram_tocp (TA Req Timeout)",		/* bit 52 */
+	"tra1_tocp (TA Req Timeout)",		/* bit 53 */
+	"i2c3ssc_tocp (TA Req Timeout)",	/* bit 54 */
+	"",					/* bit 55 */
+	"",					/* bit 56 */
+	"",					/* bit 57 */
+	"",					/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *Secondary_FlagStatusHi32_tng[] = {
+	"emmc01_t0 (TA Request Timeout)",	/* bit 32 */
+	"",					/* bit 33 */
+	"sdio1_t0 (TA Request Timeout)",	/* bit 34 */
+	"hsi_t0 (TA Request Timeout)",		/* bit 35 */
+	"mph_t0 (TA Request Timeout)",		/* bit 36 */
+	"sfth_t0 (TA Request Timeout)",		/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"",					/* bit 48 */
+	"",					/* bit 49 */
+	"sram_t0 (Access Control Violation)",	/* bit 50 */
+	"sdio0_t0 (Access Control Violation)",	/* bit 51 */
+	"emmc01_t0 (Access Control Violation)",	/* bit 52 */
+	"",					/* bit 53 */
+	"sdio1_t0 (Access Control Violation)",	/* bit 54 */
+	"hsi_t0 (Access Control Violation)",	/* bit 55 */
+	"mph_t0 (Access Control Violation)",	/* bit 56 */
+	"sfth_t0 (Access Control Violation)",	/* bit 57 */
+	"",					/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *Audio_FlagStatusLow32[] = {
+	"aes_iahb (IA Burst Timeout)",		/* bit 0 */
+	"adma_iahb (IA Burst Timeout)",		/* bit 1 */
+	"adma2_iahb (IA Burst Timeout)",	/* bit 2 */
+	"",					/* bit 3 */
+	"",					/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"",					/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"aes_iahb (IA Resp Timeout)",		/* bit 16 */
+	"adma_iahb (IA Resp Timeout)",		/* bit 17 */
+	"adma2_iahb (IA Resp Timeout)",		/* bit 18 */
+	"",					/* bit 19 */
+	"",					/* bit 20 */
+	"",					/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"",					/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *Audio_FlagStatusLow32_tng[] = {
+	"pifocp_i0 (IA Burst Timeout)",		/* bit 0 */
+	"adma0_i0 (IA Burst Timeout)",		/* bit 1 */
+	"adma0_i1 (IA Burst Timeout)",		/* bit 2 */
+	"adma1_i0 (IA Burst Timeout)",		/* bit 3 */
+	"adma1_i1 (IA Burst Timeout)",		/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"pifocp_i0 (IA Response Timeout)",	/* bit 10 */
+	"adma0_i0 (IA Response Timeout)",	/* bit 11 */
+	"adma0_i1 (IA Response Timeout)",	/* bit 12 */
+	"adma1_i0 (IA Response Timeout)",	/* bit 13 */
+	"adma1_i1 (IA Response Timeout)",	/* bit 14 */
+	"",					/* bit 15 */
+	"",					/* bit 16 */
+	"",					/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"pifocp_i0 (IA InBand Error)",		/* bit 20 */
+	"adma0_i0 (IA InBand Error)",		/* bit 21 */
+	"adma0_i1 (IA InBand Error)",		/* bit 22 */
+	"adma1_i0 (IA InBand Error)",		/* bit 23 */
+	"adma1_i1 (IA InBand Error)",		/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"ssp0_t0 (TA Request Timeout)",		/* bit 30 */
+	"ssp1_t0 (TA Request Timeout)"		/* bit 31 */
+};
+
+static char *Audio_FlagStatusHi32_pnw[] = {
+	"aes_iahb (IA Inband Error)",		/* bit 32 */
+	"adma_iahb (IA Inband Error)",		/* bit 33 */
+	"adma2_iahb (IA Inband Error)",		/* bit 34 */
+	"",					/* bit 35 */
+	"",					/* bit 36 */
+	"",					/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"aes_tahb (TA Req Timeout)",		/* bit 48 */
+	"adma_tahb (TA Req Timeout)",		/* bit 49 */
+	"adram2_tocp (TA Req Timeout)",		/* bit 50 */
+	"adram_tocp (TA Req Timeout)",		/* bit 51 */
+	"airam_tocp (TA Req Timeout)",		/* bit 52 */
+	"assp1_1_tapb (TA Req Timeout)",	/* bit 53 */
+	"assp2_2_tahb (TA Req Timeout)",	/* bit 54 */
+	"adma2_tahb (TA Req Timeout)",		/* bit 55 */
+	"slim0_iocp (TA Req Timeout)",		/* bit 56 */
+	"slim1_iocp (TA Req Timeout)",		/* bit 57 */
+	"slim2_iocp (TA Req Timeout)",		/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *Audio_FlagStatusHi32_clv[] = {
+	"aes_iahb (IA Inband Error)",		/* bit 32 */
+	"adma_iahb (IA Inband Error)",		/* bit 33 */
+	"adma2_iahb (IA Inband Error)",		/* bit 34 */
+	"",					/* bit 35 */
+	"",					/* bit 36 */
+	"",					/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"aes_tahb (TA Req Timeout)",		/* bit 48 */
+	"adma_tahb (TA Req Timeout)",		/* bit 49 */
+	"adram2_tocp (TA Req Timeout)",		/* bit 50 */
+	"adram_tocp (TA Req Timeout)",		/* bit 51 */
+	"airam_tocp (TA Req Timeout)",		/* bit 52 */
+	"assp1_1_tapb (TA Req Timeout)",	/* bit 53 */
+	"assp_2_tapb (TA Req Timeout)",		/* bit 54 */
+	"adma2_tahb (TA Req Timeout)",		/* bit 55 */
+	"assp_3_tapb (TA Req Timeout)",		/* bit 56 */
+	"",					/* bit 57 */
+	"",					/* bit 58 */
+	"assp_4_tapb (TA Req Timeout)",		/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *Audio_FlagStatusHi32_tng[] = {
+	"ssp2_t0 (TA Request Timeout)",		/* bit 32 */
+	"slim1_t0 (TA Request Timeout)",	/* bit 33 */
+	"pifocp_t0 (TA Request Timeout)",	/* bit 34 */
+	"adma0_t0 (TA Request Timeout)",	/* bit 35 */
+	"adma1_t0 (TA Request Timeout)",	/* bit 36 */
+	"mboxram_t0 (TA Request Timeout)",	/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"",					/* bit 48 */
+	"",					/* bit 49 */
+	"ssp0_t0 (Access Control Violation)",	/* bit 50 */
+	"ssp1_t0 (Access Control Violation)",	/* bit 51 */
+	"ssp2_t0 (Access Control Violation)",	/* bit 52 */
+	"slim1_t0 (Access Control Violation)",	/* bit 53 */
+	"pifocp_t0 (Access Control Violation)",	/* bit 54 */
+	"adma0_t0 (Access Control Violation)",	/* bit 55 */
+	"adma1_t0 (Access Control Violation)",	/* bit 56 */
+	"mboxram_t0 (Access Control Violation)",/* bit 57 */
+	"",					/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *GP_FlagStatusLow32[] = {
+	"gpdma_iahb (IA Burst Timeout)",	/* bit 0 */
+	"",					/* bit 1 */
+	"",					/* bit 2 */
+	"",					/* bit 3 */
+	"",					/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"",					/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"gpdma_iahb (IA Resp Timeout)",		/* bit 16 */
+	"",					/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"",					/* bit 20 */
+	"",					/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"",					/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *GP_FlagStatusLow32_tng[] = {
+	"gpdma_i0 (IA Burst Timeout)",		/* bit 0 */
+	"gpdma_i1 (IA Burst Timeout)",		/* bit 1 */
+	"",					/* bit 2 */
+	"",					/* bit 3 */
+	"",					/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"gpdma_i0 (IA Response Timeout)",	/* bit 10 */
+	"gpdma_i1 (IA Response Timeout)",	/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"",					/* bit 16 */
+	"",					/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"gpdma_i0 (IA InBand Error)",		/* bit 20 */
+	"gpdma_i1 (IA InBand Error)",		/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"",					/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"spi5_t0 (TA Request Timeout)",		/* bit 30 */
+	"ssp6_t0 (TA Request Timeout)"		/* bit 31 */
+};
+
+static char *GP_FlagStatusHi32[] = {
+	"gpdma_iahb (IA Inband Error)",		/* bit 32 */
+	"",					/* bit 33 */
+	"",					/* bit 34 */
+	"",					/* bit 35 */
+	"",					/* bit 36 */
+	"",					/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"gpio1_tocp (TA Req Timeout)",		/* bit 48 */
+	"i2c0_tocp (TA Req Timeout)",		/* bit 49 */
+	"i2c1_tocp (TA Req Timeout)",		/* bit 50 */
+	"i2c2_tocp (TA Req Timeout)",		/* bit 51 */
+	"i2c3hdmi_tocp (TA Req Timeout)",	/* bit 52 */
+	"i2c4_tocp (TA Req Timeout)",		/* bit 53 */
+	"i2c5_tocp (TA Req Timeout)",		/* bit 54 */
+	"spi1_tocp (TA Req Timeout)",		/* bit 55 */
+	"spi2_tocp (TA Req Timeout)",		/* bit 56 */
+	"spi3_tocp (TA Req Timeout)",		/* bit 57 */
+	"gpdma_tahb (TA Req Timeout)",		/* bit 58 */
+	"i2c3scc_tocp (TA Req Timeout)",	/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *GP_FlagStatusHi32_tng[] = {
+	"gpdma_t0 (TA Request Timeout)",	/* bit 32 */
+	"i2c12_t0 (TA Request Timeout)",	/* bit 33 */
+	"i2c12_t1 (TA Request Timeout)",	/* bit 34 */
+	"i2c3_t0 (TA Request Timeout)",		/* bit 35 */
+	"i2c45_t0 (TA Request Timeout)",	/* bit 36 */
+	"i2c45_t1 (TA Request Timeout)",	/* bit 37 */
+	"i2c67_t0 (TA Request Timeout)",	/* bit 38 */
+	"i2c67_t1 (TA Request Timeout)",	/* bit 39 */
+	"ssp3_t0 (TA Request Timeout)",		/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"",					/* bit 48 */
+	"",					/* bit 49 */
+	"spi5_t0 (Access Control Violation)",	/* bit 50 */
+	"ssp6_t0 (Access Control Violation)",	/* bit 51 */
+	"gpdma_t0 (Access Control Violation)",	/* bit 52 */
+	"i2c12_t0 (Access Control Violation)",	/* bit 53 */
+	"i2c12_t1 (Access Control Violation)",	/* bit 54 */
+	"i2c3_t0 (Access Control Violation)",	/* bit 55 */
+	"i2c45_t0 (Access Control Violation)",	/* bit 56 */
+	"i2c45_t1 Access Control Violation)",	/* bit 57 */
+	"i2c67_t0 (Access Control Violation)",	/* bit 58 */
+	"i2c67_t1 (Access Control Violation)",	/* bit 59 */
+	"ssp3_t0 (Access Control Violation)",	/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *SC_FlagStatusLow32_pnw[] = {
+	"MFlag0 (Audio)",			/* bit 0 */
+	"MFlag1 (Secondary)",			/* bit 1 */
+	"MFlag2 (FullChip)",			/* bit 2 */
+	"MFlag3 (GP)",				/* bit 3 */
+	"",					/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"arc_iocp (IA Burst Timeout)",		/* bit 8 */
+	"scdma_iocp (IA Burst Timeout)",	/* bit 9 */
+	"uart_iocp (IA Burst Timeout)",		/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"arc_iocp (IA Resp Timeout)",		/* bit 16 */
+	"scdma_iocp (IA Resp Timeout)",		/* bit 17 */
+	"uart_iocp (IA Resp Timeout)",		/* bit 18 */
+	"",					/* bit 19 */
+	"",					/* bit 20 */
+	"",					/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"arc_iocp (IA Inband Error)",		/* bit 24 */
+	"scdma_iocp (IA Inband Error)",		/* bit 25 */
+	"uart_iocp (IA Inband Error)",		/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *SC_FlagStatusLow32_clv[] = {
+	"MFlag0 (Audio)",			/* bit 0 */
+	"MFlag1 (Secondary)",			/* bit 1 */
+	"MFlag2 (FullChip)",			/* bit 2 */
+	"MFlag3 (GP)",				/* bit 3 */
+	"",					/* bit 4 */
+	"",					/* bit 5 */
+	"",					/* bit 6 */
+	"ilb_iocp (IA Burst Timeout)",		/* bit 7 */
+	"arc_iocp (IA Burst Timeout)",		/* bit 8 */
+	"scdma_iocp (IA Burst Timeout)",	/* bit 9 */
+	"uart_iocp (IA Burst Timeout)",		/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"arc_iocp (IA Resp Timeout)",		/* bit 16 */
+	"scdma_iocp (IA Resp Timeout)",		/* bit 17 */
+	"uart_iocp (IA Resp Timeout)",		/* bit 18 */
+	"ilb_iocp (IA Resp Timeout)",		/* bit 19 */
+	"",					/* bit 20 */
+	"",					/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"arc_iocp (IA Inband Error)",		/* bit 24 */
+	"scdma_iocp (IA Inband Error)",		/* bit 25 */
+	"uart_iocp (IA Inband Error)",		/* bit 26 */
+	"ilb_iocp (IA Inband Error)",		/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+static char *SC_FlagStatusLow32_tng[] = {
+	"ADF Flag Status",			/* bit 0 */
+	"SDF Flag Status",			/* bit 1 */
+	"MNF Flag Status",			/* bit 2 */
+	"GPF Flag Status",			/* bit 3 */
+	"ilb_i0 (IA Burst Timeout)",		/* bit 4 */
+	"scdma_i0 (IA Burst Timeout)",		/* bit 5 */
+	"scdma_i1 (IA Burst Timeout)",		/* bit 6 */
+	"arc_i0 (IA Burst Timeout)",		/* bit 7 */
+	"uart_i0 (IA Burst Timeout)",		/* bit 8 */
+	"psh_i0 (IA Burst Timeout)",		/* bit 9 */
+	"ilb_i0 (IA Response Timeout)",		/* bit 10 */
+	"scdma_i0 (IA Response Timeout)",	/* bit 11 */
+	"scdma_i1 (IA Response Timeout)",	/* bit 12 */
+	"arc_i0 (IA Response Timeout)",		/* bit 13 */
+	"uart_i0 (IA Response Timeout)",	/* bit 14 */
+	"psh_i0 (IA Response Timeout)",		/* bit 15 */
+	"",					/* bit 16 */
+	"",					/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"ilb_i0 (IA InBand Error)",		/* bit 20 */
+	"scdma_i0 (IA InBand Error)",		/* bit 21 */
+	"scdma_i1 (IA InBand Error)",		/* bit 22 */
+	"arc_i0 (IA InBand Error)",		/* bit 23 */
+	"uart_i0 (IA InBand Error)",		/* bit 24 */
+	"psh_i0 (IA InBand Error)",		/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"ilb_t0 (TA Request Timeout)",		/* bit 30 */
+	"ipc1_t0 (TA Request Timeout)"		/* bit 31 */
+};
+
+static char *SC_FlagStatusHi32_pnw[] = {
+	"",					/* bit 32 */
+	"",					/* bit 33 */
+	"",					/* bit 34 */
+	"",					/* bit 35 */
+	"",					/* bit 36 */
+	"",					/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"gpio_tocp (TA Req Timeout)",		/* bit 48 */
+	"uart_tocp (TA Req Timeout)",		/* bit 49 */
+	"ipc1_tocp (TA Req Timeout)",		/* bit 50 */
+	"ipc2_tocp (TA Req Timeout)",		/* bit 51 */
+	"kbd_tocp (TA Req Timeout)",		/* bit 52 */
+	"pmu_tocp (TA Req Timeout)",		/* bit 53 */
+	"scdma_tocp (TA Req Timeout)",		/* bit 54 */
+	"spi0_tocp (TA Req Timeout)",		/* bit 55 */
+	"tim_ocp (TA Req Timeout)",		/* bit 56 */
+	"vrtc_tocp (TA Req Timeout)",		/* bit 57 */
+	"arcs_tocp (TA Req Timeout)",		/* bit 58 */
+	"",					/* bit 59 */
+	"",					/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *SC_FlagStatusHi32_clv[] = {
+	"",					/* bit 32 */
+	"",					/* bit 33 */
+	"",					/* bit 34 */
+	"",					/* bit 35 */
+	"",					/* bit 36 */
+	"",					/* bit 37 */
+	"",					/* bit 38 */
+	"",					/* bit 39 */
+	"",					/* bit 40 */
+	"",					/* bit 41 */
+	"",					/* bit 42 */
+	"",					/* bit 43 */
+	"",					/* bit 44 */
+	"",					/* bit 45 */
+	"",					/* bit 46 */
+	"",					/* bit 47 */
+	"gpio_tocp (TA Req Timeout)",		/* bit 48 */
+	"uart_tocp (TA Req Timeout)",		/* bit 49 */
+	"ipc1_tocp (TA Req Timeout)",		/* bit 50 */
+	"ipc2_tocp (TA Req Timeout)",		/* bit 51 */
+	"kbd_tocp (TA Req Timeout)",		/* bit 52 */
+	"pmu_tocp (TA Req Timeout)",		/* bit 53 */
+	"scdma_tocp (TA Req Timeout)",		/* bit 54 */
+	"spi0_tocp (TA Req Timeout)",		/* bit 55 */
+	"tim_ocp (TA Req Timeout)",		/* bit 56 */
+	"vrtc_tocp (TA Req Timeout)",		/* bit 57 */
+	"arcs_tocp (TA Req Timeout)",		/* bit 58 */
+	"ilb_tocp (TA Req Timeout)",		/* bit 59 */
+	"ilbmb0_tocp (TA Req Timeout)",		/* bit 60 */
+	"",					/* bit 61 */
+	"",					/* bit 62 */
+	""					/* bit 63 */
+};
+
+static char *SC_FlagStatusHi32_tng[] = {
+	"ipc2_t0 (TA Request Timeout)",		/* bit 32 */
+	"mbb_t0 (TA Request Timeout)",		/* bit 33 */
+	"spi4_t0 (TA Request Timeout)",		/* bit 34 */
+	"scdma_t0 (TA Request Timeout)",	/* bit 35 */
+	"kbd_t0 (TA Request Timeout)",		/* bit 36 */
+	"sccb_t0 (TA Request Timeout)",		/* bit 37 */
+	"timers_t0 (TA Request Timeout)",	/* bit 38 */
+	"pmu_t0 (TA Request Timeout)",		/* bit 39 */
+	"arc_t0 (TA Request Timeout)",		/* bit 40 */
+	"gpio192_t0 (TA Request Timeout)",	/* bit 41 */
+	"i2c0_t0 (TA Request Timeout)",		/* bit 42 */
+	"uart_t0 (TA Request Timeout)",		/* bit 43 */
+	"ssc_t0 (TA Request Timeout)",		/* bit 44 */
+	"pwm_t0 (TA Request Timeout)",		/* bit 45 */
+	"psh_t0 (TA Request Timeout)",		/* bit 46 */
+	"pcache_t0 (TA Request Timeout)",	/* bit 47 */
+	"i2c89_t0 (TA Request Timeout)",	/* bit 48 */
+	"i2c89_t1 (TA Request Timeout)",	/* bit 49 */
+	"ilb_t0 (Access Control Violation)",	/* bit 50 */
+	"ipc1_t0 (Access Control Violation)",	/* bit 51 */
+	"ipc2_t0 (Access Control Violation)",	/* bit 52 */
+	"spi4_t0 (Access Control Violation)",	/* bit 53 */
+	"sccb_t0 (Access Control Violation)",	/* bit 54 */
+	"timers_t0 (Access Control Violation)",	/* bit 55 */
+	"pmu_t0 (Access Control Violation)",	/* bit 56 */
+	"arc_t0 (Access Control Violation)",	/* bit 57 */
+	"gpio192_t0 (Access Control Violation)",/* bit 58 */
+	"i2c0_t0 (Access Control Violation)",	/* bit 59 */
+	"ssc_t0 (Access Control Violation)",	/* bit 60 */
+	"pcache_t0 (Access Control Violation)",	/* bit 61 */
+	"i2c89_t0 (Access Control Violation)",	/* bit 62 */
+	"i2c89_t1 (Access Control Violation)"	/* bit 63 */
+};
+
+static char *SC_FlagStatus1Low32_tng[] = {
+	"mbb_t0 (Access Control Violation)",	/* bit 0 */
+	"scdma_t0 (Access Control Violation)",	/* bit 1 */
+	"kbd_t0 (Access Control Violation)",	/* bit 2 */
+	"uart_t0 (Access Control Violation)",	/* bit 3 */
+	"pwm_t0 (Access Control Violation)",	/* bit 4 */
+	"psh_t0 (Access Control Violation)",	/* bit 5 */
+	"",					/* bit 6 */
+	"",					/* bit 7 */
+	"",					/* bit 8 */
+	"",					/* bit 9 */
+	"",					/* bit 10 */
+	"",					/* bit 11 */
+	"",					/* bit 12 */
+	"",					/* bit 13 */
+	"",					/* bit 14 */
+	"",					/* bit 15 */
+	"",					/* bit 16 */
+	"",					/* bit 17 */
+	"",					/* bit 18 */
+	"",					/* bit 19 */
+	"",					/* bit 20 */
+	"",					/* bit 21 */
+	"",					/* bit 22 */
+	"",					/* bit 23 */
+	"",					/* bit 24 */
+	"",					/* bit 25 */
+	"",					/* bit 26 */
+	"",					/* bit 27 */
+	"",					/* bit 28 */
+	"",					/* bit 29 */
+	"",					/* bit 30 */
+	""					/* bit 31 */
+};
+
+char *fabric_error_lookup(u32 fab_id, u32 error_index, int use_hidword)
+{
+	if (error_index > 31) /* Out of range of 32bit */
+		return NULL;
+
+	switch (fab_id) {
+	case FAB_ID_FULLCHIP:
+		switch (intel_mid_identify_cpu()) {
+		case INTEL_MID_CPU_CHIP_PENWELL:
+			return use_hidword ?
+				FullChip_FlagStatusHi32_pnw[error_index] :
+				FullChip_FlagStatusLow32_pnw[error_index];
+		case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+			return use_hidword ?
+				FullChip_FlagStatusHi32_clv[error_index] :
+				FullChip_FlagStatusLow32_clv[error_index];
+		case INTEL_MID_CPU_CHIP_TANGIER:
+		case INTEL_MID_CPU_CHIP_ANNIEDALE:
+			return use_hidword ?
+				FullChip_FlagStatusHi32_tng[error_index] :
+				FullChip_FlagStatusLow32_tng[error_index];
+		default:
+			return NULL;
+		}
+
+	case FAB_ID_SECONDARY:
+		switch (intel_mid_identify_cpu()) {
+		case INTEL_MID_CPU_CHIP_PENWELL:
+		case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+			return use_hidword ?
+				Secondary_FlagStatusHi32[error_index] :
+				Secondary_FlagStatusLow32[error_index];
+		case INTEL_MID_CPU_CHIP_TANGIER:
+		case INTEL_MID_CPU_CHIP_ANNIEDALE:
+			return use_hidword ?
+				Secondary_FlagStatusHi32_tng[error_index] :
+				Secondary_FlagStatusLow32_tng[error_index];
+		default:
+			return NULL;
+		}
+
+	case FAB_ID_AUDIO:
+		switch (intel_mid_identify_cpu()) {
+		case INTEL_MID_CPU_CHIP_PENWELL:
+			return use_hidword ?
+				Audio_FlagStatusHi32_pnw[error_index] :
+				Audio_FlagStatusLow32[error_index];
+		case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+			return use_hidword ?
+				Audio_FlagStatusHi32_clv[error_index] :
+				Audio_FlagStatusLow32[error_index];
+		case INTEL_MID_CPU_CHIP_TANGIER:
+		case INTEL_MID_CPU_CHIP_ANNIEDALE:
+			return use_hidword ?
+				Audio_FlagStatusHi32_tng[error_index] :
+				Audio_FlagStatusLow32_tng[error_index];
+		default:
+			return NULL;
+		}
+
+	case FAB_ID_GP:
+		switch (intel_mid_identify_cpu()) {
+		case INTEL_MID_CPU_CHIP_PENWELL:
+		case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+			return use_hidword ?
+				GP_FlagStatusHi32[error_index] :
+				GP_FlagStatusLow32[error_index];
+		case INTEL_MID_CPU_CHIP_TANGIER:
+		case INTEL_MID_CPU_CHIP_ANNIEDALE:
+			return use_hidword ?
+				GP_FlagStatusHi32_tng[error_index] :
+				GP_FlagStatusLow32_tng[error_index];
+		default:
+			return NULL;
+		}
+
+	case FAB_ID_SC:
+		switch (intel_mid_identify_cpu()) {
+		case INTEL_MID_CPU_CHIP_PENWELL:
+			return use_hidword ?
+				SC_FlagStatusHi32_pnw[error_index] :
+				SC_FlagStatusLow32_pnw[error_index];
+		case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+			return use_hidword ?
+				SC_FlagStatusHi32_clv[error_index] :
+				SC_FlagStatusLow32_clv[error_index];
+		case INTEL_MID_CPU_CHIP_TANGIER:
+		case INTEL_MID_CPU_CHIP_ANNIEDALE:
+			return use_hidword ?
+				SC_FlagStatusHi32_tng[error_index] :
+				SC_FlagStatusLow32_tng[error_index];
+		default:
+			return NULL;
+		}
+
+	case FAB_ID_SC1:
+		switch (intel_mid_identify_cpu()) {
+		case INTEL_MID_CPU_CHIP_TANGIER:
+		case INTEL_MID_CPU_CHIP_ANNIEDALE:
+			return use_hidword ?
+				NULL :
+				SC_FlagStatus1Low32_tng[error_index];
+		default:
+			return NULL;
+		}
+
+	default:
+		return NULL;
+	}
+
+	return NULL;
+}
+
+static char *ScuBoot_ErrorTypes[] = {
+	"Unknown",
+	"Memory error",
+	"Instruction error",
+	"Fabric error",
+	"Shared SRAM ECC error",
+	"Unknown",
+	"North Fuses failure",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Kernel DLT expired",
+	"Kernel WDT expired",
+	"SCU CHAABI watchdog expired",
+	"FabricError xml request reset",
+};
+
+static char *ScuRuntime_ErrorTypes[] = {
+	"PLL Lock Slip",
+	"Unknown",
+	"Undefined L1 Interrupt"
+	"Punit Interrupt MBB Timeout_reset",
+	"Volt attack violation reset",
+	"Volt attack/SAI violation reset",
+	"LPE unknown interrupt",
+	"PSH Unknown interrupt",
+	"Fuse unknown interrupt",
+	"IPC2 unsupported error",
+	"Invalid KWDT IPC"
+};
+
+static char *ScuFabric_ErrorTypes[] = {
+	"Unknown",				/* 00 */
+	"Unknown",				/* 01 */
+	"Unknown",				/* 02 */
+	"Unknown",				/* 03 */
+	"Unknown",				/* 04 */
+	"Unknown",				/* 05 */
+	"Punit force reset",			/* 06 */
+	"Unknown",				/* 07 */
+	"Unknown",				/* 08 */
+	"Unknown",				/* 09 */
+	"Unsupported command error",		/* 0A */
+	"Address hole error",			/* 0B */
+	"Protection error",			/* 0C */
+	"Memory error assertion detected",	/* 0D */
+	"Request Timeout, Not acccepted",	/* 0E */
+	"Request Timeout, No response",		/* 0F */
+	"Request Timeout, Data not accepted",	/* 10 */
+};
+
+#define BEGIN_MAIN_FABRIC_REGID		16
+#define SC_FABRIC_ARC_I0_REGID		18
+#define SC_FABRIC_PSH_I0_REGID		17
+#define END_MAIN_FABRIC_REGID		24
+#define BEGIN_SC_FABRIC_REGID		25
+#define END_SC_FABRIC_REGID		29
+#define BEGIN_GP_FABRIC_REGID		30
+#define END_GP_FABRIC_REGID		32
+#define BEGIN_AUDIO_FABRIC_REGID	33
+#define END_AUDIO_FABRIC_REGID		38
+#define BEGIN_SEC_FABRIC_REGID		39
+#define END_SEC_FABRIC_REGID		48
+#define BEGIN_PM_MAIN_FABRIC_REGID	49
+#define END_PM_MAIN_FABRIC_REGID	59
+#define BEGIN_PM_AUDIO_FABRIC_REGID	60
+#define END_PM_AUDIO_FABRIC_REGID	68
+#define BEGIN_PM_SEC_FABRIC_REGID	69
+#define END_PM_SEC_FABRIC_REGID		76
+#define BEGIN_PM_SC_FABRIC_REGID	77
+#define END_PM_SC_FABRIC_REGID		97
+#define BEGIN_PM_GP_FABRIC_REGID	98
+#define END_PM_GP_FABRIC_REGID		109
+#define END_FABRIC_REGID		110
+
+static char *FabricFlagStatusErrLogDetail_tng[] = {
+	"Main Fabric Flag Status",
+	"Audio Fabric Flag Status",
+	"Secondary Fabric Flag Status",
+	"GP Fabric Flag Status",
+	"Lower 64bit part SC Fabric Flag Status",
+	"Upper 64bit part SC Fabric Flag Status",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"IA ERROR LOG Register for the initiator iosf2ocp_i0 in Main Fabric @200MHz{mnf}",	/*16*/
+	"IA ERROR LOG Register for the initiator psh_i0 in SC Fabric @100MHz{scf}",		/*17*/
+	"IA ERROR LOG Register for the initiator arc_i0 in SC Fabric @100MHz{scf}",		/*18*/
+	"IA ERROR LOG Register for the initiator usb3_i0 in Main Fabric @200MHz{mnf}",		/*19*/
+	"IA ERROR LOG Register for the initiator usb3_i1 in Main Fabric @200MHz{mnf}",		/*20*/
+	"IA ERROR LOG Register for the initiator mfth_i0 in Main Fabric @200MHz{mnf}",		/*21*/
+	"IA ERROR LOG Register for the initiator cha_i0 in Main Fabric @200MHz{mnf}",		/*22*/
+	"IA ERROR LOG Register for the initiator otg_i0 in Main Fabric @200MHz{mnf}",		/*23*/
+	"IA ERROR LOG Register for the initiator sdf2mnf_i0 in Main Fabric @200MHz{mnf}",	/*24*/
+	"IA ERROR LOG Register for the initiator sdf2scf_i0 in SC Fabric @100MHz{scf}",		/*25*/
+	"IA ERROR LOG Register for the initiator ilb_i0 in SC Fabric @100MHz{scf}",		/*26*/
+	"IA ERROR LOG Register for the initiator scdma_i0 in SC Fabric @100MHz{scf}",		/*27*/
+	"IA ERROR LOG Register for the initiator scdma_i1 in SC Fabric @100MHz{scf}",		/*28*/
+	"IA ERROR LOG Register for the initiator uart_i0 in SC Fabric @100MHz{scf}",		/*29*/
+	"IA ERROR LOG Register for the initiator gpdma_i0 in GP Fabric @100MHz{gpf}",		/*30*/
+	"IA ERROR LOG Register for the initiator gpdma_i1 in GP Fabric @100MHz{gpf}",		/*31*/
+	"IA ERROR LOG Register for the initiator sdf2gpf_i0 in GP Fabric @100MHz{gpf}",		/*32*/
+	"IA ERROR LOG Register for the initiator pifocp_i0 in Audio Fabric @50MHz{adf}",	/*33*/
+	"IA ERROR LOG Register for the initiator adma0_i0 in Audio Fabric @50MHz{adf}",		/*34*/
+	"IA ERROR LOG Register for the initiator adma0_i1 in Audio Fabric @50MHz{adf}",		/*35*/
+	"IA ERROR LOG Register for the initiator adma1_i0 in Audio Fabric @50MHz{adf}",		/*36*/
+	"IA ERROR LOG Register for the initiator adma1_i1 in Audio Fabric @50MHz{adf}",		/*37*/
+	"IA ERROR LOG Register for the initiator sdf2adf_i0 in Audio Fabric @50MHz{adf}",	/*38*/
+	"IA ERROR LOG Register for the initiator sdio0_i0 in Secondary Fabric @100MHz{sdf}",	/*39*/
+	"IA ERROR LOG Register for the initiator emmc01_i0 in Secondary Fabric @100MHz{sdf}",	/*40*/
+	"IA ERROR LOG Register for the initiator sdio1_i0 in Secondary Fabric @100MHz{sdf}",	/*41*/
+	"IA ERROR LOG Register for the initiator hsi_i0 in Secondary Fabric @100MHz{sdf}",	/*42*/
+	"IA ERROR LOG Register for the initiator mph_i0 in Secondary Fabric @100MHz{sdf}",	/*43*/
+	"IA ERROR LOG Register for the initiator sfth_i0 in Secondary Fabric @100MHz{sdf}",	/*44*/
+	"IA ERROR LOG Register for the initiator mnf2sdf_i0 in Secondary Fabric @100MHz{sdf}",	/*45*/
+	"IA ERROR LOG Register for the initiator gpf2sdf_i0 in Secondary Fabric @100MHz{sdf}",	/*46*/
+	"IA ERROR LOG Register for the initiator scf2sdf_i0 in Secondary Fabric @100MHz{sdf}",	/*47*/
+	"IA ERROR LOG Register for the initiator adf2sdf_i0 in Secondary Fabric @100MHz{sdf}",	/*48*/
+	"PM ERROR LOG register mnf_rt in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG register iosf2ocp_t0 in Main Fabric @200MHz{mnf}",			/*50*/
+	"PM ERROR LOG Register usb3_t0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register ptistm_t0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register ptistm_t0_regon0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register ptistm_t2 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register mfth_t0 in Main Fabric @200MHz{mnf}",				/*55*/
+	"PM ERROR LOG Register cha_t0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register otg_t0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register runctl_t0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register UBS3PHY_t0 in Main Fabric @200MHz{mnf}",
+	"PM ERROR LOG Register adf_rt in Audio Fabric @50MHz{adf}",				/*60*/
+	"PM ERROR LOG Register ssp0_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register ssp1_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register ssp2_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register slim1_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register pifocp_t0 in Audio Fabric @50MHz{adf}",				/*65*/
+	"PM ERROR LOG Register adma0_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register adma1_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register mboxram_t0 in Audio Fabric @50MHz{adf}",
+	"PM ERROR LOG Register sdf_rt in Secondary Fabric @100MHz{adf}",
+	"PM ERROR LOG Register sram_t0 in Secondary Fabric @100MHz{adf}",			/*70*/
+	"PM ERROR LOG Register sdio0_t0 in Secondary Fabric @100MHz{adf}",
+	"PM ERROR LOG Register emmc01_t1 in Secondary Fabric @100MHz{adf}",
+	"PM ERROR LOG Register sdio1_t0 in Secondary Fabric @100MHz{adf}",
+	"PM ERROR LOG Register hsi_t0 in Secondary Fabric @100MHz{adf}",
+	"PM ERROR LOG Register mph_t0 in Secondary Fabric @100MHz{adf}",			/*75*/
+	"PM ERROR LOG Register sfth_t0 in Secondary Fabric @100MHz{adf}",
+	"PM ERROR LOG Register scf_rt in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ilb_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ipc1_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ipc2_t0 in SC Fabric @100MHz{adf}",				/*80*/
+	"PM ERROR LOG Register mbb_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register spi4_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register scdma_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register kbd_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register sccb_t0 in SC Fabric @100MHz{adf}",				/*85*/
+	"PM ERROR LOG Register timers_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register pmu_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register arc_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register gpio192_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register i2c0_t0 in SC Fabric @100MHz{adf}",				/*90*/
+	"PM ERROR LOG Register uart_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ssc_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register pwm_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register psh_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register pcache_t0 in SC Fabric @100MHz{adf}",				/*95*/
+	"PM ERROR LOG Register ii2c89_t0 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ii2c89_t1 in SC Fabric @100MHz{adf}",
+	"PM ERROR LOG Register gpf_rt in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register spi5_t0 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ssp6_t0 in GP Fabric @100MHz{adf}",				/*100*/
+	"PM ERROR LOG Register gpdma_t0 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register i2c12_t0 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register i2c12_t1 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register i2c3_t0 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register i2c45_t0 in GP Fabric @100MHz{adf}",				/*105*/
+	"PM ERROR LOG Register i2c45_t1 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register i2c67_t0 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ic267_t1 in GP Fabric @100MHz{adf}",
+	"PM ERROR LOG Register ssp3_t0 in GP Fabric @100MHz{adf}",				/*109*/
+	""											/*110*/
+};
+
+#define CLV_BEGIN_MAIN_FABRIC_REGID		16
+#define CLV_END_MAIN_FABRIC_REGID		35
+#define CLV_BEGIN_SC_FABRIC_REGID		36
+#define CLV_END_SC_FABRIC_REGID			57
+#define CLV_BEGIN_GP_FABRIC_REGID		58
+#define CLV_END_GP_FABRIC_REGID			70
+#define CLV_BEGIN_AUDIO_FABRIC_REGID		71
+#define CLV_END_AUDIO_FABRIC_REGID		83
+#define CLV_BEGIN_SEC_FABRIC_REGID		84
+#define CLV_END_SEC_FABRIC_REGID		99
+#define CLV_END_FABRIC_REGID			100
+
+#define CLV_SC_FABRIC_PSH_T0_REGID		18
+#define CLV_SC_FABRIC_PSH_I0_REGID		19
+#define CLV_SC_FABRIC_ARC_T0_REGID		20
+#define CLV_SC_FABRIC_ARC_I0_REGID		21
+
+static char *FabricFlagStatusErrLogDetail_pnw_clv[] = {
+	"Main Fabric Flag Status",
+	"Audio Fabric Flag Status",
+	"Secondary Fabric Flag Status",
+	"GP Fabric Flag Status",
+	"Lower 64bit part SC Fabric Flag Status",
+	"Upper 64bit part SC Fabric Flag Status",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"",
+	"TA ERROR LOG register for initiator iosf2ocp_t0 in Main Fabric @200MHz{mnf}",
+	"IA ERROR LOG Register for initiator iosf2ocp_i0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator psh_t0 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator psh_i0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator arc_t0 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator arc_i0 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator usb3_i0 in Main Fabric @200MHz{mnf}",
+	"IA ERROR LOG Register for initiator usb3_i1 in Main Fabric @200MHz{mnf}",
+	"IA ERROR LOG Register for initiator mfth_i0 in Main Fabric @200MHz{mnf}",
+	"IA ERROR LOG Register for initiator cha_i0 in Main Fabric @200MHz{mnf}",
+	"IA ERROR LOG Register for initiator otg_i0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator usb3_t0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator ptistm_t0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator ptistm_t1 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator ptistm_t2 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator mfth_t0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator cha_t0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator otg_t0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator runctl_t0 in Main Fabric @200MHz{mnf}",
+	"TA ERROR LOG register for initiator usb3phy_t0 in Main Fabric @200MHz{mnf}",
+	"IA ERROR LOG Register for initiator ilb_i0 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator scdma_i0 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator scdma_i1 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator uart_i0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator ilb_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator ipc1_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator ipc2_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator mbb_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator spi4_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator scdma_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator kbd_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator sccb_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator timers_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator pmu_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator gpio192_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator i2c0_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator uart_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator ssc_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator pwm_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator pcache_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator i2c89_t0 in SC Fabric @100MHz{scf}",
+	"TA ERROR LOG register for initiator i2c89_t1 in SC Fabric @100MHz{scf}",
+	"IA ERROR LOG Register for initiator gpdma_i0 in GP Fabric @100MHz{gpf}",
+	"IA ERROR LOG Register for initiator gpdma_i1 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator spi5_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator ssp6_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator gpdma_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c12_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c12_t1 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c3_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c45_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c45_t1 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c67_t0 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator i2c67_t1 in GP Fabric @100MHz{gpf}",
+	"TA ERROR LOG register for initiator ssp3_t0 in GP Fabric @100MHz{gpf}",
+	"IA ERROR LOG Register for initiator pifocp_i0 in Audio Fabric @50MHz{adf}",
+	"IA ERROR LOG Register for initiator adma0_i0 in Audio Fabric @50MHz{adf}",
+	"IA ERROR LOG Register for initiator adma0_i1 in Audio Fabric @50MHz{adf}",
+	"IA ERROR LOG Register for initiator adma1_i0 in Audio Fabric @50MHz{adf}",
+	"IA ERROR LOG Register for initiator adma1_i1 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator ssp0_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator ssp1_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator ssp2_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator slim1_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator pifocp_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator adma0_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator adma1_t0 in Audio Fabric @50MHz{adf}",
+	"TA ERROR LOG register for initiator mboxram_t0 in Audio Fabric @50MHz{adf}",
+	"IA ERROR LOG Register for initiator sdio0_i0 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator emmc01_i0 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator emmc01_i1 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator sdio1_i0 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator hsi_i0 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator mph_i0 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator sfth_i0 in Secondary Fabric @100MHz{sdf}",
+	"IA ERROR LOG Register for initiator dfxsctap_i0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator sram_t0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator sdio0_t0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator emmc01_t0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator emmc01_t1 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator sdio1_t0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator hsi_t0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator mph_t0 in Secondary Fabric @100MHz{sdf}",
+	"TA ERROR LOG register for initiator sfth_t0 in Secondary Fabric @100MHz{sdf}",
+	"",
+};
+
+#define MAX_FULLCHIP_INITID_VAL		16
+#define MAX_SECONDARY_INITID_VAL	18
+#define MAX_AUDIO_INITID_VAL		5
+#define MAX_SC_INITID_VAL			9
+#define MAX_GP_INITID_VAL			2
+
+static char *init_id_str_fullchip_tng[] = {
+	"iosf2ocp_i0 (thread 0)",
+	"iosf2ocp_i0 (thread 1)",
+	"iosf2ocp_i0 (thread 2)",
+	"iosf2ocp_i0 (thread 3)",
+	"sdf2mnf_i0 (thread 0)",
+	"sdf2mnf_i0 (thread 1)",
+	"sdf2mnf_i0 (thread 2)",
+	"sdf2mnf_i0 (thread 3)",
+	"sdf2mnf_i0 (thread 4)",
+	"sdf2mnf_i0 (thread 5)",
+	"sdf2mnf_i0 (thread 6)",
+	"sdf2mnf_i0 (thread 7)",
+	"usb3_i0",
+	"usb3_i1",
+	"mfth_i0",
+	"cha_i0",
+	"otg_i0"
+};
+
+static char *init_id_str_secondary_tng[] = {
+	"mnf2sdf_i0 (thread 0)",
+	"mnf2sdf_i0 (thread 1)",
+	"sdio0_i0",
+	"gpf2sdf_i0 (thread 0)",
+	"gpf2sdf_i0 (thread 1)",
+	"emmc01_i0",
+	"scf2sdf_i0 (thread 0)",
+	"scf2sdf_i0 (thread 1)",
+	"scf2sdf_i0 (thread 2)",
+	"scf2sdf_i0 (thread 3)",
+	"sdio1_i0",
+	"adf2sdf_i0 (thread 0)",
+	"adf2sdf_i0 (thread 1)",
+	"adf2sdf_i0 (thread 2)",
+	"hsi_i0 (thread 0)",
+	"hsi_i0 (thread 1)",
+	"mph_i0",
+	"sfth_i0",
+	"dfxsctap_i0"
+};
+
+static char *init_id_str_audio_tng[] = {
+	"sdfadf_i0",
+	"pif2ocp_i0",
+	"adma0_i0",
+	"adma0_i1",
+	"adma1_i0",
+	"adma1_i1"
+};
+
+static char *init_id_str_sc_tng[] = {
+	"ilb_i0",
+	"sdf2scf_i0 (thread 0)",
+	"sdf2scf_i0 (thread 1)",
+	"scdma_i0",
+	"scdma_i1",
+	"arc_i0 (thread 0)",
+	"arc_i0 (thread 1)",
+	"arc_i0 (thread 2)",
+	"uart_i0",
+	"psh_i0"
+};
+
+static char *init_id_str_gp_tng[] = {
+	"sd2gpf_i0",
+	"gpdma_i0",
+	"gpdma_i1"
+};
+
+static char *init_id_str_unknown = "unknown";
+
+char *get_element_flagsts_detail(u8 id)
+{
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) ||
+		(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+		if (id < CLV_END_FABRIC_REGID)
+			return FabricFlagStatusErrLogDetail_pnw_clv[id];
+		else
+			return FabricFlagStatusErrLogDetail_pnw_clv[CLV_END_FABRIC_REGID];
+	}
+	if (id < END_FABRIC_REGID)
+		return FabricFlagStatusErrLogDetail_tng[id];
+
+	return FabricFlagStatusErrLogDetail_tng[END_FABRIC_REGID];
+}
+
+char *get_element_errorlog_detail_pnw_clv(u8 id, u32 *fabric_type)
+{
+	if (id >= CLV_BEGIN_MAIN_FABRIC_REGID && id <= CLV_END_MAIN_FABRIC_REGID &&
+		id != CLV_SC_FABRIC_PSH_T0_REGID && id != CLV_SC_FABRIC_PSH_I0_REGID &&
+		id != CLV_SC_FABRIC_ARC_T0_REGID && id != CLV_SC_FABRIC_ARC_I0_REGID)
+		*fabric_type = FAB_ID_FULLCHIP;
+
+	else if (id >= CLV_BEGIN_SEC_FABRIC_REGID &&
+		 id <= CLV_END_SEC_FABRIC_REGID)
+		*fabric_type = FAB_ID_SECONDARY;
+
+	else if (id >= CLV_BEGIN_AUDIO_FABRIC_REGID &&
+		 id <= CLV_END_AUDIO_FABRIC_REGID)
+		*fabric_type = FAB_ID_AUDIO;
+
+	else if (id >= CLV_BEGIN_GP_FABRIC_REGID &&
+			id <= CLV_END_GP_FABRIC_REGID)
+		*fabric_type = FAB_ID_GP;
+
+	else if ((id >= CLV_BEGIN_SC_FABRIC_REGID &&
+			id <= CLV_END_SC_FABRIC_REGID) ||
+			id == CLV_SC_FABRIC_PSH_T0_REGID ||
+			id == CLV_SC_FABRIC_PSH_I0_REGID ||
+			id == CLV_SC_FABRIC_ARC_T0_REGID ||
+			id == CLV_SC_FABRIC_ARC_I0_REGID)
+		*fabric_type = FAB_ID_SC;
+	else
+		*fabric_type = FAB_ID_UNKNOWN;
+	return get_element_flagsts_detail(id);
+}
+
+char *get_element_errorlog_detail_tng(u8 id, u32 *fabric_type)
+{
+	if (id >= BEGIN_MAIN_FABRIC_REGID && id <= END_MAIN_FABRIC_REGID &&
+		id != SC_FABRIC_PSH_I0_REGID && id != SC_FABRIC_ARC_I0_REGID)
+		*fabric_type = FAB_ID_FULLCHIP;
+
+	else if (id >= BEGIN_SEC_FABRIC_REGID &&
+		 id <= END_SEC_FABRIC_REGID)
+		*fabric_type = FAB_ID_SECONDARY;
+
+	else if (id >= BEGIN_AUDIO_FABRIC_REGID &&
+		 id <= END_AUDIO_FABRIC_REGID)
+		*fabric_type = FAB_ID_AUDIO;
+
+	else if (id >= BEGIN_GP_FABRIC_REGID &&
+			id <= END_GP_FABRIC_REGID)
+		*fabric_type = FAB_ID_GP;
+
+	else if ((id >= BEGIN_SC_FABRIC_REGID &&
+			id <= END_SC_FABRIC_REGID) ||
+			id == SC_FABRIC_PSH_I0_REGID ||
+			id == SC_FABRIC_ARC_I0_REGID)
+		*fabric_type = FAB_ID_SC;
+
+	else if (id >= BEGIN_PM_MAIN_FABRIC_REGID &&
+		 id <= END_PM_MAIN_FABRIC_REGID)
+		*fabric_type = FAB_ID_PM_FULLCHIP;
+
+	else if (id >= BEGIN_PM_AUDIO_FABRIC_REGID &&
+		 id <= END_PM_AUDIO_FABRIC_REGID)
+		*fabric_type = FAB_ID_PM_AUDIO;
+
+	else if (id >= BEGIN_PM_SEC_FABRIC_REGID &&
+		 id <= END_PM_SEC_FABRIC_REGID)
+		*fabric_type = FAB_ID_PM_SECONDARY;
+
+	else if (id >= BEGIN_PM_SC_FABRIC_REGID &&
+		 id <= END_PM_SC_FABRIC_REGID)
+		*fabric_type = FAB_ID_PM_SC;
+
+	else if (id >= BEGIN_PM_GP_FABRIC_REGID &&
+		 id <= END_PM_GP_FABRIC_REGID)
+		*fabric_type = FAB_ID_PM_GP;
+
+	else
+		*fabric_type = FAB_ID_UNKNOWN;
+
+	return get_element_flagsts_detail(id);
+}
+
+char *get_element_errorlog_detail(u8 id, u32 *fabric_type)
+{
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) ||
+		(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW))
+			return get_element_errorlog_detail_pnw_clv(id, fabric_type);
+	return get_element_errorlog_detail_tng(id, fabric_type);
+}
+
+char *get_errortype_str(u16 error_type)
+{
+	u16 error = error_type & 0xFF;
+
+	if (!error_type)
+		return "Not set";
+
+	switch(error_type & 0xFF00) {
+
+	case 0xE100 :
+
+		if (error < ARRAY_SIZE(ScuBoot_ErrorTypes))
+			return ScuBoot_ErrorTypes[error];
+		return "Unknown";
+
+	case 0xE600 :
+
+		if (error < ARRAY_SIZE(ScuRuntime_ErrorTypes))
+			return ScuRuntime_ErrorTypes[error];
+		return "Unknown";
+
+	case 0xF000 :
+
+		if (error < ARRAY_SIZE(ScuFabric_ErrorTypes))
+			return ScuFabric_ErrorTypes[error];
+		return "Unknown";
+
+	default :
+
+		return "Unknown";
+	}
+}
+
+char *get_initiator_id_str(int init_id, u32 fabric_id)
+{
+	switch (fabric_id) {
+
+	case FAB_ID_PM_FULLCHIP:
+	case FAB_ID_FULLCHIP:
+
+		if (init_id > MAX_FULLCHIP_INITID_VAL)
+			return init_id_str_unknown;
+
+		return init_id_str_fullchip_tng[init_id];
+
+	case FAB_ID_PM_AUDIO:
+	case FAB_ID_AUDIO:
+
+		if (init_id > MAX_AUDIO_INITID_VAL)
+			return init_id_str_unknown;
+
+		return init_id_str_audio_tng[init_id];
+
+	case FAB_ID_PM_SECONDARY:
+	case FAB_ID_SECONDARY:
+
+		if (init_id > MAX_SECONDARY_INITID_VAL)
+			return init_id_str_unknown;
+
+		return init_id_str_secondary_tng[init_id];
+
+	case FAB_ID_PM_GP:
+	case FAB_ID_GP:
+
+		if (init_id > MAX_GP_INITID_VAL)
+			return init_id_str_unknown;
+
+		return init_id_str_gp_tng[init_id];
+
+	case FAB_ID_PM_SC:
+	case FAB_ID_SC:
+
+		if (init_id > MAX_SC_INITID_VAL)
+			return init_id_str_unknown;
+
+		return init_id_str_sc_tng[init_id];
+
+	default:
+		return init_id_str_unknown;
+	}
+}
+
+int errorlog_element_type(u8 id_type)
+{
+	if (id_type >= 0 && id_type <= 15)
+		return 0; /* flag_status */
+	else
+		return 1; /* ia_errorlog */
+}
diff --git a/drivers/external_drivers/drivers/misc/intel_fabricid_def.h b/drivers/external_drivers/drivers/misc/intel_fabricid_def.h
new file mode 100644
index 0000000..669296f
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/intel_fabricid_def.h
@@ -0,0 +1,82 @@
+/*
+ * drivers/misc/intel_fabricid_def.h
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: winson.w.yung@intel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __INTEL_FABRICID_DEF_H
+#define __INTEL_FABRICID_DEF_H
+
+#define FAB_ID_FULLCHIP					0
+#define FAB_ID_AUDIO					1
+#define FAB_ID_SECONDARY				2
+#define FAB_ID_GP					3
+#define FAB_ID_SC					4
+#define FAB_ID_SC1					5
+#define FAB_ID_UNKNOWN					6
+#define FAB_ID_PM_FULLCHIP				7
+#define FAB_ID_PM_AUDIO					8
+#define FAB_ID_PM_SECONDARY				9
+#define FAB_ID_PM_GP					10
+#define FAB_ID_PM_SC					11
+
+enum scu_cold_boot_err {
+	ERR_MEM_ERR		= 0xE101,
+	ERR_INST_ERR,
+	ERR_FABRIC_ERR,
+	ERR_SRAM_ECC_ERR	= 0xE104,
+	ERR_NORTH_FUSES_ERR	= 0xE106,
+	ERR_KERNEL_HANG_ERR	= 0xE10A,
+	ERR_KERNEL_WDT_ERR,
+	ERR_SCUWDT_CHAABIWDT,
+	ERR_FABRIC_XML_ERR
+};
+
+enum scu_runtime_err {
+	ERR_PLL_LOCKSLIP_ERR	= 0xE601,
+	ERR_UNDEFINED_L1_ERR	= 0xE603,
+	ERR_PUINT_INT_MBB_TMOUT_ERR,
+	ERR_FUSE_VOLTATK_ERR,
+	ERR_FUSE_VOLT_SAIATK_ERR,
+	ERR_LPE_INT_ERR,
+	ERR_PSH_INT_ERR,
+	ERR_FUSE_INT_ERR,
+	ERR_UNSUPPORTED_IPC2_ERR,
+	ERR_KWDT_IPC_ERR
+};
+
+enum scu_recoverable_fab_err {
+	ERR_UNSUPPORTED_CMD_ERR	= 0xF00A,
+	ERR_ADDR_HOLE_ERR,
+	ERR_PROTECTION_ERR,
+	ERR_MEM_ASSERTION_ERR
+};
+
+enum scu_fatal_fab_err {
+	ERR_REQ_TIMEOUT_NOT_ACCEPTED	= 0xF00E,
+	ERR_REQ_TIMEOUT_NO_RESPONSE,
+	ERR_REQ_TIMEOUT_DATA_NOT_ACCEPTED
+};
+
+char *fabric_error_lookup(u32 fab_id, u32 error_index, int use_hidword);
+char *get_errortype_str(u16 error_type);
+int errorlog_element_type(u8 id_type);
+char *get_element_errorlog_detail(u8 id, u32 *fabric_type);
+char *get_element_flagsts_detail(u8 id);
+char *get_initiator_id_str(int init_id, u32 fabric_id);
+
+#endif /* __INTEL_FABRICID_DEF_H */
diff --git a/drivers/external_drivers/drivers/misc/intel_fw_logging.c b/drivers/external_drivers/drivers/misc/intel_fw_logging.c
new file mode 100644
index 0000000..5c89ea8e
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/intel_fw_logging.c
@@ -0,0 +1,2636 @@
+/*
+ * drivers/misc/intel_fw_logging.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: winson.w.yung@intel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/rpmsg.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/intel_mid_pm.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#include <linux/io.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_ipcutil.h>
+#ifdef CONFIG_TRACEPOINT_TO_EVENT
+#include <trace/events/tp2e.h>
+#endif
+
+#include "intel_fabricid_def.h"
+#include "intel_fw_trace.h"
+
+/*
+   OSHOB - OS Handoff Buffer
+
+   This buffer contains the 32-byte value that is persists across cold and warm
+   resets only, but loses context on a cold boot.
+
+   More info about OSHOB, OSNIB could be found in FAS Section 2.8.
+   We use the first byte in OSNIB to store and pass the Reboot/boot Reason.
+   The attribute of OS image is selected for Reboot/boot reason.
+*/
+
+#define RECOVERABLE_FABERR_INT		9
+#define MAX_FID_REG_LEN			32
+
+#define USE_LEGACY()							\
+	(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL ||	\
+	 intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+
+#define NON_LEGACY()					\
+	((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER &&	\
+	  intel_mid_soc_stepping() == 1) ||				\
+	 (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE))
+
+/* The legacy fabric error logging struct (e.g. Clovertrail) takes 12 dwords
+ * of basic, and 9 additional dwords of extension.
+ */
+#define MAX_NUM_LOGDWORDS		12
+#define MAX_NUM_LOGDWORDS_EXTENDED	9
+#define MAX_NUM_ALL_LOGDWORDS_LEGACY	(MAX_NUM_LOGDWORDS +		\
+					 MAX_NUM_LOGDWORDS_EXTENDED)
+#define SIZE_ALL_LOGDWORDS_LEGACY	(MAX_NUM_ALL_LOGDWORDS_LEGACY * \
+					 sizeof(u32))
+
+/* The new fabric error logging struct (e.g. Tangier) takes a maximum
+ * of 50 dwords.
+ */
+#define MAX_NUM_ALL_LOGDWORDS		50
+#define SIZE_ALL_LOGDWORDS		(MAX_NUM_ALL_LOGDWORDS *	\
+					 sizeof(u32))
+
+#define SCULOG_MAGIC			0x5343554c /* SCUL */
+#define SCULOG_DUMP_MAGIC		0x0515dead
+#define FABERR_INDICATOR		0x15
+#define FABERR_INDICATOR1		0x0dec0ded
+#define FWERR_INDICATOR			0x7
+#define UNDEFLVL1ERR_IND		0x11
+#define UNDEFLVL2ERR_IND		0x22
+#define SWDTERR_IND			0xdd
+#define MEMERR_IND			0xf501
+#define INSTERR_IND			0xf502
+#define ECCERR_IND			0xf504
+#define FATALERR_IND			0xf505
+#define INFORMATIVE_MSG_IND		0xf506
+#define FLAG_HILOW_MASK			8
+#define FAB_ID_MASK			7
+#define MAX_AGENT_IDX			15
+
+#define MAX_INPUT_LENGTH		32
+#define DWORDS_PER_LINE			2
+
+/* Safety limits for SCU extra trace dump */
+#define LOWEST_PHYS_SRAM_ADDRESS	0xFFFC0000
+#define MAX_SCU_EXTRA_DUMP_SIZE		4096
+
+/* Special indexes in error data */
+#define FABRIC_ERR_STS_IDX		0
+#define FABRIC_ERR_SIGNATURE_IDX	10
+
+/* For new fabric error logging layout */
+#define FABRIC_ERR_HEADER		0
+#define FABRIC_ERR_SIGNATURE_IDX1	1
+#define FABRIC_ERR_SCU_VERSIONINFO	2
+#define FABRIC_ERR_ERRORTYPE		3
+#define FABRIC_ERR_REGID0		4
+#define FABRIC_ERR_RECV_DUMP_START	5
+#define FABRIC_ERR_RECV_DUMP_LENGTH	6
+#define FABRIC_ERR_RECV_DUMP_START2	(FABRIC_ERR_RECV_DUMP_START + \
+					 FABRIC_ERR_RECV_DUMP_LENGTH)
+#define FABRIC_ERR_MAXIMUM_TXT		2048
+
+/* Timeout in ms we wait SCU to generate dump on panic */
+#define SCU_PANIC_DUMP_TOUT		1
+#define SCU_PANIC_DUMP_RECHECK		5
+
+/* The SCU_PANIC_DUMP_RECHECK value doesn't*/
+/* work for MRFLD, we need a longer delay. */
+#define SCU_PANIC_DUMP_RECHECK1		100
+
+#define output_str(ret, out, size, a...)				\
+	do {								\
+		if (out && (size) - (ret) > 1) {			\
+			(ret) += snprintf((out) + (ret),		\
+					  (size) - (ret), ## a);	\
+			if ((size) - (ret) <= 0)			\
+				ret = size - 1;				\
+		} else {						\
+			pr_info(a);					\
+		}							\
+	} while (0)
+
+union error_log {
+	struct {
+		u32 cmd:3;
+		u32 signature:5;
+		u32 initid:8;
+		u32 num_err_logs:4;
+		u32 agent_idx:4;
+		u32 err_code:4;
+		u32 fw_err_ind:3;
+		u32 multi_err:1;
+	} fields;
+	u32 data;
+};
+
+union fabric_status {
+	struct {
+		u32 status_has_hilo:11;
+		u32 flag_status_cnt:5;
+		u32 status_has_hilo1:12;
+		u32 regidx:4;
+	} fields;
+	u32 data;
+};
+
+union flag_status_hilo {
+	struct {
+		/* Maps to flag_status [10..0] or [42..32] */
+		u32 bits_rang0:11;
+		u32 reserved1:5;
+
+		/* Maps to flag_status [27..16] or [59..48] */
+		u32 bits_rang1:12;
+		u32 reserved:4;
+	} fields;
+	u32 data;
+};
+
+/* For new fabric error log format layout */
+
+union error_header {
+	struct {
+		u32 num_of_recv_err:6;
+		u32 recv_err_count_overflow:1;
+		u32 logging_buf_full:1;
+		u32 num_flag_regs:8;
+		u32 num_err_regs:8;
+		u32 checksum:8;
+	} fields;
+	u32 data;
+};
+
+union error_scu_version {
+	struct {
+		u32 scu_rt_minor_ver:16;
+		u32 scu_rt_major_ver:16;
+	} fields;
+	u32 data;
+};
+
+union scu_error_type {
+	struct {
+		u32 postcode_err_type:16;
+		u32 protect_err_type:16;
+	} fields;
+	u32 data;
+};
+
+
+union reg_ids {
+	struct {
+		u32 reg_id0:8;
+		u32 reg_id1:8;
+		u32 reg_id2:8;
+		u32 reg_id3:8;
+	} fields;
+	u32 data;
+};
+
+static void __iomem *oshob_base;
+static u32 *log_buffer;
+static u32 log_buffer_sz;
+
+static char *parsed_fab_err;
+static u32 parsed_fab_err_sz;
+static u32 parsed_fab_err_length;
+
+static void __iomem *tmp_ia_trace_buf;
+static void __iomem *fabric_err_buf1;
+static void __iomem *fabric_err_buf2;
+static void __iomem *sram_trace_buf;
+
+static struct scu_trace_hdr_t trace_hdr;
+
+static bool global_scutrace_enable;
+static bool global_unsolicit_scutrace_enable;
+
+static u32 *scu_trace_buffer;
+static int scu_trace_buffer_size;
+
+static struct kobject *scutrace_kobj;
+
+static u32 *new_scu_trace_buffer;
+static u32 new_scu_trace_buffer_size;
+static u32 new_scu_trace_buffer_rb_size;
+
+static u32 new_sculog_offline_size;
+static u32 *new_sculog_offline_buf;
+
+static struct sculog_list {
+	struct list_head list;
+	char *data;
+	u32 size;
+	u32 curpos;
+} pending_sculog_list;
+
+/* Structure of the most important data of SCU Recoverable FE to save */
+static struct recovfe_list {
+	struct list_head list;
+	union error_header	header;
+	union error_scu_version	scuversion;
+	union scu_error_type	errortype;
+	union reg_ids		regid0;
+	u32			dumpDw1[FABRIC_ERR_RECV_DUMP_LENGTH];
+	u32			dumpDw2[FABRIC_ERR_RECV_DUMP_LENGTH];
+} pending_recovfe_list;
+
+static DEFINE_SPINLOCK(parsed_faberr_lock);
+static DEFINE_SPINLOCK(pending_list_lock);
+
+/* This lock protects the list of SCU Recoverable FE information, */
+/* stacked during the SCU Recoverable FE hard-irq, and unstacked  */
+/* during the interruptible thread (soft-irq) */
+static DEFINE_SPINLOCK(pending_recovfe_lock);
+
+static int scu_trace_irq;
+static int recoverable_irq;
+
+static struct rpmsg_instance *fw_logging_instance;
+
+static char *fabric_names[] = {
+	"\nFull Chip Fabric [error]\n\n",
+	"\nAudio Fabric [error]\n\n",
+	"\nSecondary Chip Fabric [error]\n\n",
+	"\nGP Fabric [error]\n\n",
+	"\nSC Fabric [error]\n\n",
+	"\nSC1 Fabric [error]\n\n",
+	"\nUnknown Fabric [error]\n\n"
+};
+
+static char *agent_names[] = {
+	"FULLFAB_FLAG_STATUS",
+	"AUDIO",
+	"SECONDARY",
+	"GP",
+	"SC",
+	"CDMI_TOCP_TA",
+	"CDMI_IOCP_IA",
+	"FCSF_IOCP_IA",
+	"FCGF_IOCP_IA",
+	"AFSF_IOCP_IA",
+	"SFFC_IOCP_IA",
+	"SFAF_IOCP_IA",
+	"SFSC_IOCP_IA",
+	"GFFC_IOCP_IA",
+	"ARC_IOCP_IA",
+	"SCSF_TOCP_TA"
+};
+
+static bool disable_scu_tracing;
+static int set_disable_scu_tracing(const char *val,
+				   const struct kernel_param *kp)
+{
+	int err;
+	bool saved_value;
+
+	if (!USE_LEGACY()) {
+		pr_err("Unsupported option, use sysfs"
+			" scutrace_status instead.\n");
+
+		return -EINVAL;
+	}
+
+	saved_value = kp->arg;
+	err = param_set_bool(val, kp);
+
+	if (err || ((bool)kp->arg == saved_value))
+		return err;
+
+	if (disable_scu_tracing)
+		disable_irq(scu_trace_irq);
+	else
+		enable_irq(scu_trace_irq);
+
+	return 0;
+}
+
+static struct kernel_param_ops disable_scu_tracing_ops = {
+	.set = set_disable_scu_tracing,
+	.get = param_get_bool,
+};
+
+module_param_cb(disable_scu_tracing, &disable_scu_tracing_ops,
+		&disable_scu_tracing,  S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(disable_scu_tracing,
+		"Disable scu tracing"
+		 "Set to 1 to prevent SCU tracing messages in dmesg");
+
+static irqreturn_t fw_logging_irq_thread(int irq, void *ignored)
+{
+	char *trace, *end, prefix[20];
+	unsigned int count;
+	int i, len;
+	u32 size;
+
+	i = snprintf(prefix, sizeof(prefix), "SCU TRACE ");
+	switch (trace_hdr.cmd & TRACE_ID_MASK) {
+	case TRACE_ID_INFO:
+		i += snprintf(prefix + i, sizeof(prefix) - i, "INFO");
+		break;
+	case TRACE_ID_ERROR:
+		i += snprintf(prefix + i, sizeof(prefix) - i, "ERROR");
+		break;
+	default:
+		pr_err("Invalid message ID!\n");
+		break;
+	}
+
+	snprintf(prefix + i, sizeof(prefix) - i, ": ");
+
+	if (trace_hdr.cmd & TRACE_IS_ASCII) {
+		size = trace_hdr.size;
+		trace = (char *)scu_trace_buffer;
+		end = trace + trace_hdr.size;
+		while (trace < end) {
+			len = strnlen(trace, size);
+			if (!len) {
+				trace++;
+				continue;
+			}
+			pr_info("%s%s\n", prefix, trace);
+			trace += len + 1;
+			size -= len;
+		}
+	} else {
+		count = trace_hdr.size / sizeof(u32);
+
+		for (i = 0; i < count; i++)
+			pr_info("%s[%d]:0x%08x\n", prefix, i,
+				scu_trace_buffer[i]);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void read_scu_trace_hdr(struct scu_trace_hdr_t *hdr)
+{
+	unsigned int count;
+	u32 *buf = (u32 *) hdr;
+	int i;
+
+	if (!hdr)
+		return;
+
+	count = sizeof(struct scu_trace_hdr_t) / sizeof(u32);
+
+	if (!fabric_err_buf1) {
+		pr_err("Invalid Fabric Error buf1 offset\n");
+		return;
+	}
+
+	for (i = 0; i < count; i++)
+		*(buf + i)  = readl(fabric_err_buf1 + i * sizeof(u32));
+}
+
+static void read_sram_trace_buf(u32 *buf, u8 *scubuffer, unsigned int size)
+{
+	int i;
+	unsigned int count;
+
+	if (!buf || !scubuffer || !size)
+		return;
+
+	count = size / sizeof(u32);
+	for (i = 0; i < count; i++)
+		buf[i] = readl(scubuffer + i * sizeof(u32));
+}
+
+static irqreturn_t fw_logging_irq(int irq, void *ignored)
+{
+	read_scu_trace_hdr(&trace_hdr);
+
+	if (trace_hdr.magic != TRACE_MAGIC ||
+	    trace_hdr.offset + trace_hdr.size > scu_trace_buffer_size) {
+		pr_err("Invalid SCU trace!\n");
+		return IRQ_HANDLED;
+	}
+
+	read_sram_trace_buf(scu_trace_buffer,
+			    (u8 *) sram_trace_buf + trace_hdr.offset,
+			    trace_hdr.size);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static void __iomem *get_oshob_addr(void)
+{
+	u32 oshob_base_addr = 0;
+	u16 oshob_size;
+	void __iomem *oshob_addr;
+
+	oshob_base_addr = intel_scu_ipc_get_oshob_base();
+	if (oshob_base_addr == 0) {
+		pr_err("Invalid OSHOB address!!\n");
+		return NULL;
+	}
+
+	oshob_size = intel_scu_ipc_get_oshob_size();
+	if (oshob_size == 0) {
+		pr_err("Size of oshob is null!!\n");
+		return NULL;
+	}
+
+	pr_debug("OSHOB addr is 0x%x size is %d\n",
+		 oshob_base_addr, oshob_size);
+
+	oshob_addr = ioremap_nocache(
+			(resource_size_t)oshob_base_addr,
+			(unsigned long)oshob_size);
+	if (oshob_addr == NULL) {
+		pr_err("ioremap of oshob address failed!!\n");
+		return NULL;
+	}
+
+	return oshob_addr; /* Return OSHOB base address */
+}
+
+static u8 caculate_checksum(u32 length)
+{
+	int i;
+	u8 checksum = 0;
+	u8 *array = (u8 *)log_buffer;
+
+	for (i = 0; i < length; i++)
+		checksum += array[i];
+
+	return ~checksum + 1;
+}
+
+static bool fw_error_found(bool use_legacytype, int *only_sculog)
+{
+	u8 checksum = 0;
+	union error_log err_log;
+	union error_header err_header;
+
+	if (use_legacytype) {
+
+		err_log.data = log_buffer[FABRIC_ERR_SIGNATURE_IDX];
+
+		/* No SCU/fabric error if tenth
+		 * DW signature field is not 10101 */
+		if (err_log.fields.signature != FABERR_INDICATOR)
+			return false;
+	} else {
+		*only_sculog = sram_trace_buf ? 1 : 0;
+
+		if (log_buffer[FABRIC_ERR_SIGNATURE_IDX1] !=
+			FABERR_INDICATOR1) {
+			return false;
+		}
+
+		err_header.data = log_buffer[FABRIC_ERR_HEADER];
+		checksum = err_header.fields.checksum;
+		err_header.fields.checksum = 0;
+		log_buffer[FABRIC_ERR_HEADER] = err_header.data;
+
+		if (caculate_checksum(MAX_NUM_ALL_LOGDWORDS << 2) !=
+		    checksum) {
+			pr_info("fw_error_found: new checksum error\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int get_fabric_error_cause_detail(char *buf, u32 size, u32 fabid,
+					 u32 *fid_status, int ishidword)
+{
+	int index = 0, ret = 0;
+	char *ptr;
+	u32 fid_mask = 1;
+
+	while (index < MAX_FID_REG_LEN) {
+
+		if ((*fid_status) & fid_mask) {
+			ptr = fabric_error_lookup(fabid, index, ishidword);
+
+			if (ptr && *ptr)
+				output_str(ret, buf, size, "%s\n", ptr);
+		}
+
+		index++;
+		fid_mask <<= 1;
+	}
+
+	output_str(ret, buf, size, "\n");
+	return ret;
+}
+
+static int get_additional_error(char *buf, int size, int num_err_log,
+			u32 *faberr_dwords, int max_dw_left)
+{
+	int i = 0, ret = 0;
+	union error_log log;
+
+	output_str(ret, buf, size,
+		   "\nAdditional logs associated with error(s): ");
+
+	if (num_err_log) {
+
+		while (i < num_err_log && i < max_dw_left) {
+
+			output_str(ret, buf, size, "\nerror_log: 0x%X\n",
+				*(faberr_dwords + i));
+
+			output_str(ret, buf, size, "error_addr: 0x%X\n",
+				   *(faberr_dwords + i + 1));
+
+			log.data = *(faberr_dwords + i);
+
+			output_str(ret, buf, size,
+				   "\nDecoded error log detail\n");
+			output_str(ret, buf, size,
+				   "---------------------------\n\n");
+
+			output_str(ret, buf, size, "Agent Index:");
+			if (log.fields.agent_idx > MAX_AGENT_IDX) {
+				output_str(ret, buf, size,
+					   "Unknown agent index (%d)\n\n",
+					   log.fields.agent_idx);
+			} else {
+				output_str(ret, buf, size, "%s\n\n",
+					   agent_names[log.fields.agent_idx]);
+			}
+
+			output_str(ret, buf, size,
+				   "Cmd initiator ID: %d\n",
+				   log.fields.initid);
+
+			output_str(ret, buf, size, "Command: %d\n",
+				   log.fields.cmd);
+
+			output_str(ret, buf, size, "Code: %d\n",
+				   log.fields.err_code);
+
+			if (log.fields.multi_err)
+				output_str(ret, buf, size,
+					   "\n Multiple errors detected!\n");
+			i += 2; /* Skip one error_log/addr pair */
+		}
+	} else {
+		output_str(ret, buf, size, "Not present\n");
+	}
+
+	return ret;
+}
+
+char *get_fabric_name(u32 fabric_idx, u32 *fab_id)
+{
+	switch (fabric_idx) {
+	case 0: /* REGIDX [31..28] is x000 */
+	case 1: /* REGIDX [31..28] is x001 */
+	case 2: /* REGIDX [31..28] is x010 */
+	case 3: /* REGIDX [31..28] is x011 */
+	case 4: /* REGIDX [31..28] is x100 */
+		*fab_id = fabric_idx;
+		break;
+	default:
+		*fab_id = FAB_ID_UNKNOWN;
+		break;
+	}
+
+	return fabric_names[*fab_id];
+}
+
+static bool read_fwerr_log(u32 *buf, void __iomem *oshob_ptr)
+{
+	int count;
+	bool use_legacy = USE_LEGACY();
+	void __iomem *fabric_err_dump_offset = oshob_ptr +
+		intel_scu_ipc_get_fabricerror_buf1_offset();
+
+	if (fabric_err_dump_offset == oshob_ptr) {
+		pr_err("Invalid Fabric error buf1 offset\n");
+		return use_legacy;
+	}
+
+	if (!use_legacy) {
+		for (count = 0; count < MAX_NUM_ALL_LOGDWORDS; count++)
+			buf[count] = readl(fabric_err_dump_offset +
+					   count * sizeof(u32));
+	} else {
+		for (count = 0; count < MAX_NUM_LOGDWORDS; count++)
+			buf[count] = readl(fabric_err_dump_offset +
+					   count * sizeof(u32));
+
+		/* Get 9 additional DWORDS */
+		fabric_err_dump_offset = oshob_ptr +
+			intel_scu_ipc_get_fabricerror_buf2_offset();
+
+		if (fabric_err_dump_offset == oshob_ptr) {
+			/* Fabric error buf2 not available on all platforms. */
+			pr_warn("No Fabric Error buf2 offset available\n");
+			return use_legacy;
+		}
+
+		for (count = 0; count < MAX_NUM_LOGDWORDS_EXTENDED; count++)
+			buf[count + MAX_NUM_LOGDWORDS] =
+				readl(fabric_err_dump_offset +
+				      sizeof(u32) * count);
+	}
+
+	return use_legacy;
+}
+
+static int dump_fwerr_log(char *buf, int size)
+{
+	char *ptr = NULL;
+	union error_log err_log;
+	union flag_status_hilo flag_status;
+	union fabric_status err_status;
+	u32 id = FAB_ID_UNKNOWN;
+	int count, num_flag_status, num_err_logs;
+	int prev_id = FAB_ID_UNKNOWN, offset = 0, ret = 0;
+
+	err_status.data = log_buffer[FABRIC_ERR_STS_IDX];
+	err_log.data = log_buffer[FABRIC_ERR_SIGNATURE_IDX];
+
+	/* FW error if tenth DW reserved field is 111 */
+	if ((((err_status.data & 0xFFFF) == SWDTERR_IND) ||
+	     ((err_status.data & 0xFFFF) == UNDEFLVL1ERR_IND) ||
+	     ((err_status.data & 0xFFFF) == UNDEFLVL2ERR_IND) ||
+	     ((err_status.data & 0xFFFF) == MEMERR_IND) ||
+	     ((err_status.data & 0xFFFF) == INSTERR_IND) ||
+	     ((err_status.data & 0xFFFF) == ECCERR_IND) ||
+	     ((err_status.data & 0xFFFF) == FATALERR_IND) ||
+	     ((err_status.data & 0xFFFF) == INFORMATIVE_MSG_IND)) &&
+	    (err_log.fields.fw_err_ind == FWERR_INDICATOR)) {
+
+		output_str(ret, buf, size, "HW WDT expired");
+
+		switch (err_status.data & 0xFFFF) {
+		case SWDTERR_IND:
+			output_str(ret, buf, size,
+				   " without facing any exception.\n\n");
+			break;
+		case MEMERR_IND:
+			output_str(ret, buf, size,
+				   " following a Memory Error exception.\n\n");
+			break;
+		case INSTERR_IND:
+			output_str(ret, buf, size,
+				   " following an Instruction Error exception.\n\n");
+			break;
+		case ECCERR_IND:
+			output_str(ret, buf, size,
+				   " following a SRAM ECC Error exception.\n\n");
+			break;
+		case FATALERR_IND:
+			output_str(ret, buf, size,
+				   " following a FATAL Error exception.\n\n");
+			break;
+		case INFORMATIVE_MSG_IND:
+			output_str(ret, buf, size,
+				   " following an Informative Message.\n\n");
+			break;
+		default:
+			output_str(ret, buf, size, ".\n\n");
+			break;
+		}
+		output_str(ret, buf, size, "HW WDT debug data:\n");
+		output_str(ret, buf, size, "===================\n");
+		for (count = 0;
+			count < MAX_NUM_LOGDWORDS + MAX_NUM_LOGDWORDS_EXTENDED;
+			count++) {
+			output_str(ret, buf, size, "DW%d:0x%08x\n",
+				   count, log_buffer[count]);
+		}
+		goto out;
+	}
+
+	num_flag_status = err_status.fields.flag_status_cnt;
+	/* num_err_logs indicates num of error_log/addr pairs */
+	num_err_logs = err_log.fields.num_err_logs * 2;
+
+	output_str(ret, buf, size,
+		   "HW WDT fired following a Fabric Error exception.\n\n");
+	output_str(ret, buf, size, "Fabric Error debug data:\n");
+	output_str(ret, buf, size, "===================\n");
+
+	for (count = 0; count < num_flag_status; count++) {
+
+		err_status.data = log_buffer[count];
+		ptr = get_fabric_name(
+			err_status.fields.regidx & FAB_ID_MASK, &id);
+
+		/*
+		 * Only print the fabric name if is unknown
+		 * type, or we haven't printed out it yet.
+		 */
+
+		if (prev_id != id || id == FAB_ID_UNKNOWN) {
+			output_str(ret, buf, size, "%s", ptr);
+			prev_id = id;
+		}
+
+		flag_status.data = 0;
+		flag_status.fields.bits_rang0 =
+				err_status.fields.status_has_hilo;
+
+		flag_status.fields.bits_rang1 =
+				err_status.fields.status_has_hilo1;
+
+		/*
+		 * The most significant bit in REGIDX field is set
+		 * the flag_status has the high 32bit of the dword
+		 * otherwise the flag_status has the low 32bit of
+		 * the dword
+		 */
+
+		if (err_status.fields.regidx & FLAG_HILOW_MASK)
+			ret += get_fabric_error_cause_detail(buf + ret,
+							     size - ret,
+							     id,
+							     &flag_status.data,
+							     1);
+		else
+			ret += get_fabric_error_cause_detail(buf + ret,
+							     size - ret,
+							     id,
+							     &flag_status.data,
+							     0);
+
+		offset++; /* Use this to track error_log/address offset */
+	}
+
+	if (offset & 1)
+		offset++; /* If offset is odd number, adjust to even offset */
+
+	ret += get_additional_error(buf + ret, size - ret, num_err_logs,
+				    &log_buffer[offset],
+				    MAX_NUM_LOGDWORDS - offset);
+
+	output_str(ret, buf, size, "\n\n\nAdditional debug data:\n\n");
+	for (count = 0;
+		count < MAX_NUM_LOGDWORDS + MAX_NUM_LOGDWORDS_EXTENDED;
+		count++) {
+		output_str(ret, buf, size, "DW%d:0x%08x\n",
+			   count, log_buffer[count]);
+	}
+out:
+	return ret;
+}
+
+static int dump_scu_extented_trace(char *buf, int size,
+				   int log_offset, int *read)
+{
+	int ret = 0;
+	int i;
+	unsigned int end, start;
+
+	*read = 0;
+
+	/* Title for error dump */
+	if ((USE_LEGACY() && (log_offset == SIZE_ALL_LOGDWORDS_LEGACY)) ||
+	    (!USE_LEGACY() && (log_offset == SIZE_ALL_LOGDWORDS)))
+		output_str(ret, buf, size, "SCU Extra trace\n");
+
+	start = log_offset / sizeof(u32);
+	end = log_buffer_sz / sizeof(u32);
+	for (i = start; i < end; i++) {
+		/* Make sure we get only full lines */
+		if (buf && (size - ret < 18))
+			break;
+		/*
+		 * "EW:" to separate lines from "DW:" lines
+		 * elsewhere in this file.
+		 */
+		output_str(ret, buf, size, "EW%d:0x%08x\n", i,
+			   *(log_buffer + i));
+		*read += sizeof(u32);
+	}
+
+	return ret;
+}
+
+static char *cmd_type_str[] = {
+	"Idle",
+	"Write",
+	"Read",
+	"ReadEx",
+	"ReadLinked",
+	"WriteNonPost",
+	"WriteConditional",
+	"Broadcast"
+};
+
+static char *error_type_str[] = {
+	"Unknown",
+	"Unsupported Command",
+	"Address Hole",
+	"Unknown",
+	"Inband Error",
+	"Unknown",
+	"Unknown",
+	"Request Timeout, Not Accepted",
+	"Request Timeout, No Response",
+	"Request Timeout, Data not accepted",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown"
+};
+
+#define ALLOC_UNIT_SIZE 1024
+#define MAX_LINE_SIZE   132
+#define MAX_ALLOC_SIZE  (40 * ALLOC_UNIT_SIZE)
+#define fab_err_snprintf(str, sz, format, a...)			\
+	do {								\
+		char _buffer[MAX_LINE_SIZE];				\
+		int  _n, _current_size;				\
+		if ((str) == NULL)					\
+			break;						\
+		_n = snprintf(_buffer, MAX_LINE_SIZE, (format), ## a);	\
+		_n = min(_n, MAX_LINE_SIZE - 1);			\
+		_current_size = strlen(str);				\
+		if ((_current_size + _n + 1) > (sz)) {			\
+			if (((sz) + ALLOC_UNIT_SIZE) > MAX_ALLOC_SIZE)	\
+				break;					\
+			(str) = krealloc(				\
+				(str),					\
+				(sz) + ALLOC_UNIT_SIZE,		\
+				GFP_ATOMIC);				\
+			if ((str) != NULL)				\
+				(sz) += ALLOC_UNIT_SIZE;		\
+			else {						\
+				(sz) = 0;				\
+				pr_err("krealloc failed\n");		\
+			}						\
+		}							\
+		if ((str) != NULL) {					\
+			memcpy((str)+_current_size, _buffer, _n+1);	\
+		}							\
+	} while (0)
+
+static int parse_fab_err_log(
+	char **parsed_fab_err_log, u32 *parsed_fab_err_log_sz)
+{
+	u8 id = 0;
+	char *ptr;
+	u32 fabric_id;
+	union error_header err_header;
+	union error_scu_version err_scu_ver;
+	union scu_error_type scu_err_type;
+	int error_type, cmd_type, init_id, is_multi, is_secondary;
+	u16 scu_minor_ver, scu_major_ver;
+	u32 reg_ids = 0;
+	int i, need_new_regid, num_flag_status,
+		num_err_logs, offset, total;
+
+	err_header.data = log_buffer[FABRIC_ERR_HEADER];
+	err_scu_ver.data = log_buffer[FABRIC_ERR_SCU_VERSIONINFO];
+	scu_err_type.data = log_buffer[FABRIC_ERR_ERRORTYPE];
+
+	scu_minor_ver = err_scu_ver.fields.scu_rt_minor_ver;
+	scu_major_ver = err_scu_ver.fields.scu_rt_major_ver;
+
+	num_flag_status = err_header.fields.num_flag_regs;
+	num_err_logs = err_header.fields.num_err_regs;
+
+	if (*parsed_fab_err_log != NULL)
+		kfree(*parsed_fab_err_log);
+
+	*parsed_fab_err_log = kzalloc(ALLOC_UNIT_SIZE, GFP_ATOMIC);
+	if (*parsed_fab_err_log == NULL) {
+		*parsed_fab_err_log_sz = 0;
+		return 0;
+	}
+
+	*parsed_fab_err_log_sz = ALLOC_UNIT_SIZE;
+
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Fabric Error debug data:\n");
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"========================\n");
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"SCU runtime major version: %X\n",
+		scu_major_ver);
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"SCU runtime minor version: %X\n",
+		scu_minor_ver);
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Total Errlog reg recorded: %d\n",
+		num_err_logs);
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Total Flag Status reg recorded: %d\n",
+		num_flag_status);
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Recoverable error counter overflowed: %s\n",
+		err_header.fields.recv_err_count_overflow ? "Yes" : "No");
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Logging structure ran out of space: %s\n",
+		err_header.fields.logging_buf_full ? "Yes" : "No");
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"# of recoverable error since last fatal: %d\n",
+		err_header.fields.num_of_recv_err);
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Fabric error type: %s\n",
+		get_errortype_str(scu_err_type.fields.postcode_err_type));
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Protection violation type: %s\n\n",
+		get_errortype_str(scu_err_type.fields.protect_err_type));
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"Summary of Fabric Error detail:\n");
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"-------------------------------\n");
+
+	i = 0;
+	total = 0;
+	offset = 4;
+	need_new_regid = 1;
+
+	reg_ids = 0;
+	while ((num_flag_status + num_err_logs) > 0) {
+		if (!reg_ids)
+			reg_ids = log_buffer[offset++];
+		id = (reg_ids & 0xFF);
+		reg_ids >>= 8;
+		if (num_flag_status) {
+			unsigned long flag_status;
+			int hi;
+			ptr = get_element_flagsts_detail(id);
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\n* %s\n",
+				ptr);
+			for (hi = 0; hi < 2; hi++) {
+				flag_status = log_buffer[offset+hi];
+				while (flag_status) {
+					unsigned long idx =
+						__ffs(flag_status);
+					ptr = fabric_error_lookup(id, idx, hi);
+					if (ptr && *ptr)
+						fab_err_snprintf(
+							*parsed_fab_err_log,
+							*parsed_fab_err_log_sz,
+							"%s\n", ptr);
+					flag_status &= ~(1<<idx);
+				}
+			}
+			offset += 2;
+			num_flag_status--;
+		} else {
+			ptr = get_element_errorlog_detail(id, &fabric_id);
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\n* %s\n",
+				ptr);
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"Lower ErrLog DW: 0x%08X\n",
+				log_buffer[offset]);
+
+			cmd_type = log_buffer[offset] & 7;
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\tCommand Type: %s\n",
+				cmd_type_str[cmd_type]);
+
+			init_id = (log_buffer[offset] >> 8) & 0xFF;
+			ptr = get_initiator_id_str(init_id, fabric_id);
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\tInit ID: %s\n", ptr);
+
+			error_type = (log_buffer[offset] >> 24) & 0xF;
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\tError Type: %s\n",
+				error_type_str[error_type]);
+
+			is_secondary = (log_buffer[offset] >> 30) & 1;
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\tSecondary error: %s\n",
+				is_secondary ? "Yes" : "No");
+
+			is_multi = (log_buffer[offset] >> 31) & 1;
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"\tMultiple errors: %s\n",
+				is_multi ? "Yes" : "No");
+
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"Upper ErrLog DW: 0x%08X\n",
+				log_buffer[offset + 1]);
+			fab_err_snprintf(
+				*parsed_fab_err_log, *parsed_fab_err_log_sz,
+				"Associated 32bit Address: 0x%08X\n",
+				log_buffer[offset + 2]);
+
+			offset += 3;
+			num_err_logs--;
+		}
+	}
+
+	fab_err_snprintf(
+		*parsed_fab_err_log, *parsed_fab_err_log_sz,
+		"\n\n");
+
+	for (i = 0; i < MAX_NUM_ALL_LOGDWORDS; i++)
+		fab_err_snprintf(
+			*parsed_fab_err_log, *parsed_fab_err_log_sz,
+			"DW%d:0x%08x\n",
+			i, log_buffer[i]);
+
+	if (*parsed_fab_err_log != NULL) {
+		char *footer = "\nLength of fabric error file: %5dB\n";
+		fab_err_snprintf(
+			*parsed_fab_err_log,
+			*parsed_fab_err_log_sz,
+			footer,
+			strlen(*parsed_fab_err_log) + strlen(footer) + 2);
+	}
+
+	if (*parsed_fab_err_log != NULL)
+		return strlen(*parsed_fab_err_log);
+	else
+		return 0;
+}
+
+static int dump_sculog_to_ascii_raw(void *output, int max,
+				u32 *input_trace_buffer,
+				u32 input_scu_trace_buffer_size)
+{
+	int i, length = 0;
+	char buf[MAX_INPUT_LENGTH] = {0};
+
+	if (output == NULL) /* Output to kernel log */
+		pr_info("SCU trace logging data:\n");
+
+	for (i = 0; i < input_scu_trace_buffer_size / sizeof(u32); i++) {
+
+		sprintf(buf, "EW%d:0x%08x\n", i, *(input_trace_buffer + i));
+
+		if (output == NULL)
+			pr_info("%s", buf);
+		else if (max > (strlen(buf) + length))
+			strcat(output, buf);
+		else
+			break;
+
+		length += strlen(buf);
+	}
+
+	return length;
+}
+
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry *ipanic_faberr;
+struct proc_dir_entry *online_scu_log;
+struct proc_dir_entry *offline_scu_log;
+struct proc_dir_entry *ipanic_faberr_recoverable;
+
+static ssize_t intel_fw_logging_recoverable_proc_read(struct file *file,
+						  char __user *buffer,
+						  size_t count, loff_t *ppos)
+{
+	unsigned long flags;
+	char *temp_kbuffer;
+	spin_lock_irqsave(&parsed_faberr_lock, flags);
+
+	if ((parsed_fab_err == NULL) || (*ppos >= parsed_fab_err_length)) {
+		spin_unlock_irqrestore(&parsed_faberr_lock, flags);
+		return 0; /* We finished to read, return 0 */
+	}
+
+	if ((*ppos + count) >= parsed_fab_err_length)
+		count = parsed_fab_err_length - *ppos;
+
+	temp_kbuffer = kzalloc(count, GFP_ATOMIC);
+	if (!temp_kbuffer) {
+		spin_unlock_irqrestore(&parsed_faberr_lock, flags);
+		return -ENOMEM;
+	}
+	memcpy(temp_kbuffer, parsed_fab_err + *ppos, count);
+
+	spin_unlock_irqrestore(&parsed_faberr_lock, flags);
+
+	if (copy_to_user(buffer, temp_kbuffer, count)) {
+		kfree(temp_kbuffer);
+		return -EFAULT;
+	}
+
+	kfree(temp_kbuffer);
+	*ppos += count;
+	return count;
+}
+
+static ssize_t intel_fw_logging_proc_read(struct file *file,
+					  char __user *buffer, size_t count,
+					  loff_t *ppos)
+{
+	int ret = 0;
+	u32 read;
+
+	if (!USE_LEGACY()) {
+		return intel_fw_logging_recoverable_proc_read(file, buffer,
+							      count, ppos);
+	} else {
+		if (!*ppos) {
+			/* Fill the buffer, return the buffer size*/
+			ret = dump_fwerr_log(buffer, count);
+			read = SIZE_ALL_LOGDWORDS_LEGACY;
+			*ppos = read;
+		} else {
+			if (*ppos >= SIZE_ALL_LOGDWORDS_LEGACY +
+			    scu_trace_buffer_size)
+				return 0;
+
+			ret = dump_scu_extented_trace(buffer, count,
+						      *ppos, &read);
+			*ppos += read;
+		}
+	}
+
+	return ret;
+}
+
+static ssize_t offline_scu_log_proc_read(struct file *file,
+					  char __user *buffer, size_t count,
+					  loff_t *ppos)
+{
+	void *output_buf;
+	int max_size, len;
+	unsigned long ret;
+
+	if (USE_LEGACY()) {
+		if (*ppos >= log_buffer_sz) {
+			/* outside of offline SCU trace buffer bounds */
+			return 0;
+		}
+
+		if ((*ppos + count) >= log_buffer_sz - SIZE_ALL_LOGDWORDS_LEGACY)
+			count = (log_buffer_sz - SIZE_ALL_LOGDWORDS_LEGACY -
+				 *ppos);
+
+		if (copy_to_user(buffer,
+			log_buffer + MAX_NUM_ALL_LOGDWORDS_LEGACY + *ppos,
+			count))
+			return -EFAULT;
+	} else {
+		if (!new_sculog_offline_buf) {
+			pr_info("No offline SCU trace log found!\n");
+			return 0;
+		}
+
+		max_size = (new_sculog_offline_size / 4 + 1) * 20;
+		output_buf = kzalloc(max_size, GFP_KERNEL);
+
+		if (!output_buf) {
+			pr_err("Memory error in offline proc read\n");
+			return -EFAULT;
+		}
+
+		/* Calling new SCU log trace decode/dump function */
+		len = dump_sculog_to_ascii_raw(output_buf, max_size,
+						new_sculog_offline_buf,
+						new_sculog_offline_size);
+
+		if ((*ppos + count) >= len)
+			count = (len - *ppos);
+
+		ret = copy_to_user(buffer, output_buf + *ppos, count);
+		kfree(output_buf);
+
+		if (ret)
+			return -EFAULT;
+	}
+
+	*ppos += count;
+	return count;
+}
+
+static ssize_t online_scu_log_proc_read(struct file *file,
+					char __user *buffer, size_t count,
+					loff_t *ppos)
+{
+	int ret, max_size;
+	ssize_t result = 0;
+	char *ptr = NULL;
+
+	if (!global_scutrace_enable) {
+		pr_info("FW trace function disabled, enable it first\n");
+		return -EFAULT;
+
+	} else if (new_scu_trace_buffer && new_scu_trace_buffer_size) {
+
+		ret = rpmsg_send_simple_command(fw_logging_instance,
+				IPCMSG_SCULOG_TRACE, IPC_CMD_SCU_LOG_DUMP);
+
+		if (ret || (!ret && *new_scu_trace_buffer != SCULOG_MAGIC)) {
+			pr_info("Fail getting SCU trace via IPC\n");
+			return -EFAULT;
+		}
+
+		/* We convert each DW in the SCU log buffer to raw */
+		/* ascii EW%d:0x08%X, each EW takes max of 20 bytes*/
+		/* as ascii - Winson Yung */
+
+		max_size = (new_scu_trace_buffer_size / 4 + 1) * 20;
+		ptr = kzalloc(max_size, GFP_KERNEL);
+
+		if (!ptr) {
+			pr_err("Memory error to get SCU log\n");
+			goto proc_exit;
+		}
+
+		ret = dump_sculog_to_ascii_raw(ptr, max_size,
+					new_scu_trace_buffer,
+					new_scu_trace_buffer_size);
+
+		/*Zero out SCUL signature*/
+		*new_scu_trace_buffer = 0;
+
+		if (!ret) { /* "ret" has the actual size */
+			result = -EFAULT;
+			goto proc_exit1;
+		}
+
+		if ((*ppos + count) >= ret)
+			count = (ret - *ppos);
+
+		if (copy_to_user(buffer, ptr + *ppos, count)) {
+			result = -EFAULT;
+			goto proc_exit1;
+		}
+
+		*ppos += count;
+		result = count;
+proc_exit1:
+		kfree(ptr);
+proc_exit:
+		return result;
+	} else {
+		pr_info("FW trace function has invalid SRAM location\n");
+			return -EFAULT;
+	}
+}
+
+static const struct file_operations ipanic_fabric_err_fops = {
+	.read = intel_fw_logging_proc_read
+};
+
+static const struct file_operations offline_scu_log_fops = {
+	.read = offline_scu_log_proc_read
+};
+
+static const struct file_operations online_scu_log_fops = {
+	.read = online_scu_log_proc_read
+};
+
+static const struct file_operations ipanic_fab_recoverable_fops = {
+	.read = intel_fw_logging_recoverable_proc_read
+};
+#endif /* CONFIG_PROC_FS */
+
+/* ASCII messages in the unsolicited ring buffer are terminated/deliminated */
+/* by one NULL. Two ways to determine that we are at the end of the strings,*/
+/* either if we found double NULL terminators, or we wrap around where the  */
+/* index pos == curpos - Winson Yung */
+
+static void dump_unsolicited_scutrace_ascii(char *data,
+					u32 data_maxsize, u32 curpos)
+{
+	u32 tmp, len, last_pos = data_maxsize - 1;
+	bool single_null = false, seen_char = false;
+	u32 begin_pos = curpos ? curpos - 1 : last_pos;
+
+	char *output_all = kzalloc(data_maxsize * 2, GFP_ATOMIC);
+	char *output_tmp = kzalloc(data_maxsize * 2, GFP_ATOMIC);
+	char *output_str = kzalloc(data_maxsize, GFP_ATOMIC);
+
+	if (!output_str || !output_all || !output_tmp) {
+		pr_err("Memory allocate error for unsolicited SCU trace\n");
+		kfree(output_str);
+		kfree(output_tmp);
+		kfree(output_all);
+		return;
+	}
+
+	do {
+		/* If we find we loop back to where we start, */
+		/* we reach the end of the ring buffer as well*/
+
+		if (curpos == begin_pos) {
+
+			/* Check we ran out of space in ring */
+			/* buffer, and string starts to over-*/
+			/* write the beginning of the buffer */
+
+			if (seen_char) { /* Need to print last msg in case */
+				/* not double null terminators are found */
+				tmp = begin_pos;
+				len = strlen(output_str);
+
+				while (data[tmp]) {
+
+					output_str[len++] = data[tmp];
+					if (tmp == last_pos)
+						tmp = 0;
+					else
+						tmp++;
+				}
+
+				output_str[len++] = '\n';
+				output_str[len] = 0;
+			}
+
+			/* We need to re-order ouput of the message */
+			/* inside ring buffer in an event there are */
+			/* multiple messages. Inside ring buffer, we*/
+			/* traverse the messages backwards using the*/
+			/* index position SCU fw passes to IA which */
+			/* will pickup the last message first, and  */
+			/* first message last. We need output first */
+			/* message first and last message last.     */
+
+			if (strlen(output_str)) {
+				strcpy(output_tmp, "[SCU log] ");
+				strcat(output_tmp, output_str);
+				strcat(output_tmp, output_all);
+				strcpy(output_all, output_tmp);
+			}
+
+			break;
+		}
+
+		if (single_null) {
+
+			/* If we find double NULL terminators, */
+			/* we reach the end of the ring buffer */
+
+			if (!data[begin_pos])
+				break;  /* Saw the end of all messages */
+
+			seen_char = true; /* Beginning of next msg */
+			single_null = 0;
+		}
+
+		if (!data[begin_pos]) {
+
+			if (seen_char) { /*Dump the scanned string to kernel log*/
+				tmp = (begin_pos == last_pos) ? 0 : begin_pos + 1;
+				len = strlen(output_str);
+
+				while (data[tmp]) {
+
+					output_str[len++] = data[tmp];
+					if (tmp == last_pos)
+						tmp = 0;
+					else
+						tmp++;
+				}
+
+				output_str[len++] = '\n';
+				output_str[len] = 0;
+
+				strcpy(output_tmp, "[SCU log] ");
+				strcat(output_tmp, output_str);
+				strcat(output_tmp, output_all);
+				strcpy(output_all, output_tmp);
+
+				seen_char = false;
+				output_str[0] = 0;
+			}
+
+			single_null = 1;
+		}
+
+		if (begin_pos == 0)
+			begin_pos = last_pos;
+		else
+			begin_pos--;
+	} while (1);
+
+	if (strlen(output_all))
+		pr_info("%s", output_all);
+
+	kfree(output_all);
+	kfree(output_tmp);
+	kfree(output_str);
+}
+
+static irqreturn_t recoverable_faberror_irq(int irq, void *ignored)
+{
+	struct sculog_list *new_sculog_struct = NULL;
+	struct recovfe_list *new_recovfe_struct = NULL;
+	u32 i, *tmp_unsolicit_sram_data = new_scu_trace_buffer + 1;
+	void *new_sculog_data = NULL;
+
+	bool use_legacytype = read_fwerr_log(log_buffer, oshob_base);
+	bool has_recoverable_fe = fw_error_found(use_legacytype, &i);
+
+	if (use_legacytype) {
+		pr_info("No valid SCU errors found, bogus interrupt\n");
+		return IRQ_HANDLED;
+	}
+
+	if (has_recoverable_fe) {
+		pr_err("A recoverable fabric error intr was captured!!!\n");
+
+		/* Updating /proc/ipanic_fab_recv_err */
+		spin_lock(&parsed_faberr_lock);
+		parsed_fab_err_length = parse_fab_err_log(&parsed_fab_err,
+							&parsed_fab_err_sz);
+		spin_unlock(&parsed_faberr_lock);
+
+		/* Stack of Recoverable events (hopefully only one!)    */
+		/* This is needed in case another hard-irq fires before */
+		/* the first soft-irq thread is finished.               */
+		new_recovfe_struct = kzalloc(sizeof(struct recovfe_list),
+					    GFP_ATOMIC);
+
+		if (!new_recovfe_struct) {
+			pr_err("Fail to allocate memory for "
+				"SCU recoverable fabric error copy\n");
+			return IRQ_HANDLED;
+		}
+		/* We take a snapshot of the buffer's characteristic DWords  */
+		/* in case the attached file becomes obsolete (when multiple */
+		/* IRQ occur in quick succession). That way, we can check    */
+		/* the file integrity and still have data if it fails        */
+		new_recovfe_struct->header.data =
+				log_buffer[FABRIC_ERR_HEADER];
+		new_recovfe_struct->scuversion.data =
+				log_buffer[FABRIC_ERR_SCU_VERSIONINFO];
+		new_recovfe_struct->errortype.data =
+				log_buffer[FABRIC_ERR_ERRORTYPE];
+		new_recovfe_struct->regid0.data =
+				log_buffer[FABRIC_ERR_REGID0];
+		memcpy(new_recovfe_struct->dumpDw1,
+				log_buffer + FABRIC_ERR_RECV_DUMP_START,
+				FABRIC_ERR_RECV_DUMP_LENGTH *
+					sizeof(log_buffer[0]));
+		memcpy(new_recovfe_struct->dumpDw2,
+				log_buffer + FABRIC_ERR_RECV_DUMP_START2,
+				FABRIC_ERR_RECV_DUMP_LENGTH *
+					sizeof(log_buffer[0]));
+
+		/* Push on the stack of SCU recoverable events */
+		spin_lock(&pending_recovfe_lock);
+		list_add_tail(&(new_recovfe_struct->list),
+					&(pending_recovfe_list.list));
+
+		spin_unlock(&pending_recovfe_lock);
+
+		return IRQ_WAKE_THREAD;
+
+	} else if (global_unsolicit_scutrace_enable) {
+		pr_info("A un-solicited SCU trace dump intr was captured!!!\n");
+		new_sculog_struct = kzalloc(sizeof(struct sculog_list),
+					    GFP_ATOMIC);
+
+		/* The ring buffer size in new_scu_trace_buffer_rb_size is  */
+		/* the maximum data buffer size for holding the data (which */
+		/* is 256) plus the 32bit index position for the ring buffer*/
+		/* put at the beginning of data, so the value is 256 + 4    */
+
+		new_sculog_data = kzalloc(new_scu_trace_buffer_rb_size,
+								GFP_ATOMIC);
+
+		if ((new_sculog_struct != NULL) && (new_sculog_data != NULL)) {
+			new_sculog_struct->data = new_sculog_data;
+			new_sculog_struct->size =
+					new_scu_trace_buffer_rb_size - 4;
+			new_sculog_struct->curpos = *new_scu_trace_buffer;
+
+			/* Only copy the 256 data portion to location memory */
+			memcpy(new_sculog_data, (char *)tmp_unsolicit_sram_data,
+					new_scu_trace_buffer_rb_size - 4);
+
+			spin_lock(&pending_list_lock);
+			list_add_tail(&(new_sculog_struct->list),
+						&(pending_sculog_list.list));
+
+			spin_unlock(&pending_list_lock);
+		} else {
+			kfree(new_sculog_struct);
+			kfree(new_sculog_data);
+
+			pr_err("Fail to allocate memory for SCU trace copy\n");
+			return IRQ_HANDLED;
+		}
+	} else {
+		pr_err("Why are we still getting interrupt from SCU???\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_WAKE_THREAD;
+}
+
+#define LENGTH_HEX_VALUE	18	/* > sizeof("DW12:0x12345678") +1 */
+static irqreturn_t recoverable_faberror_thread(int irq, void *ignored)
+{
+	struct list_head *pos, *q;
+	struct sculog_list *tmp;
+	struct recovfe_list *iter;
+	unsigned long flags;
+
+#ifdef CONFIG_TRACEPOINT_TO_EVENT
+	char sData0[LENGTH_HEX_VALUE];
+	char sData1[LENGTH_HEX_VALUE];
+	char sData2[LENGTH_HEX_VALUE];
+	char sData3[LENGTH_HEX_VALUE*FABRIC_ERR_RECV_DUMP_LENGTH];
+	char sData4[LENGTH_HEX_VALUE*FABRIC_ERR_RECV_DUMP_LENGTH];
+	char sData5[LENGTH_HEX_VALUE];
+	int  idx;
+	int  len;
+#endif
+
+	spin_lock_irqsave(&pending_recovfe_lock, flags);
+
+	/* Flush remaining SCU trace log if any left */
+	list_for_each_safe(pos, q, &pending_recovfe_list.list) {
+		iter = list_entry(pos, struct recovfe_list, list);
+
+#ifdef CONFIG_TRACEPOINT_TO_EVENT
+		snprintf(sData0, sizeof(sData0), "DW3:0x%08x",
+			iter->errortype.data);
+		snprintf(sData1, sizeof(sData1), "DW0:0x%08x",
+			iter->header.data);
+		snprintf(sData2, sizeof(sData2), "DW4:0x%08x",
+			iter->regid0.data);
+
+		len = snprintf(sData3, sizeof(sData3), "DW%d:0x%08x",
+				FABRIC_ERR_RECV_DUMP_START, iter->dumpDw1[0]);
+		for (idx = 1; idx < FABRIC_ERR_RECV_DUMP_LENGTH; idx++) {
+			len += snprintf(sData3 + len, sizeof(sData3) - len,
+				" DW%d:0x%08x",
+				FABRIC_ERR_RECV_DUMP_START + idx,
+				iter->dumpDw1[idx]);
+		}
+
+		len = snprintf(sData4, sizeof(sData4), "DW%d:0x%08x",
+				FABRIC_ERR_RECV_DUMP_START2, iter->dumpDw2[0]);
+		for (idx = 1; idx < FABRIC_ERR_RECV_DUMP_LENGTH; idx++) {
+			len += snprintf(sData4 + len, sizeof(sData4) - len,
+				" DW%d:0x%08x",
+				FABRIC_ERR_RECV_DUMP_START2 + idx,
+				iter->dumpDw2[idx]);
+		}
+
+		snprintf(sData5, sizeof(sData5), "DW2:0x%08x",
+			iter->scuversion.data);
+
+		/* function added to create a crashtool event on condition */
+		trace_tp2e_scu_recov_event(TP2E_EV_ERROR,
+					"Fabric", "Recov",
+					sData0, sData1, sData2,
+					sData3, sData4, sData5,
+					"/proc/ipanic_fabric_recv_err");
+#else
+		pr_info("SCU IRQ: TP2E not enabled\n");
+#endif
+
+		list_del(pos);
+		kfree(iter);
+	}
+
+	spin_unlock_irqrestore(&pending_recovfe_lock, flags);
+
+	if (global_unsolicit_scutrace_enable) {
+
+		spin_lock_irqsave(&pending_list_lock, flags);
+
+		if (list_empty(&(pending_sculog_list.list))) {
+			spin_unlock_irqrestore(&pending_list_lock, flags);
+
+			/* The interrupt must for recoverable FE, clear FE */
+			pr_info("Issue IPCMSG_CLEAR_FABERROR\n");
+			rpmsg_send_simple_command(fw_logging_instance,
+						IPCMSG_CLEAR_FABERROR, 0);
+
+			return IRQ_HANDLED;
+		}
+
+		/* Flush remaining SCU trace log if any left */
+		list_for_each_safe(pos, q, &pending_sculog_list.list) {
+			tmp = list_entry(pos, struct sculog_list, list);
+			dump_unsolicited_scutrace_ascii(tmp->data, tmp->size,
+								tmp->curpos);
+			list_del(pos);
+			kfree(tmp->data);
+			kfree(tmp);
+		}
+
+		spin_unlock_irqrestore(&pending_list_lock, flags);
+	} else {
+		/* The interrupt must for recoverable FE, clear FE */
+		pr_info("Issue IPCMSG_CLEAR_FABERROR\n");
+		rpmsg_send_simple_command(fw_logging_instance,
+					IPCMSG_CLEAR_FABERROR, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int fw_logging_crash_on_boot(void)
+{
+	int length = 0;
+	int err = 0;
+	bool ret, use_legacytype = USE_LEGACY();
+	u32 read, has_onlysculog = 0;
+
+	if (use_legacytype)
+		log_buffer_sz =
+			SIZE_ALL_LOGDWORDS_LEGACY + scu_trace_buffer_size;
+	else
+		log_buffer_sz =
+			SIZE_ALL_LOGDWORDS + scu_trace_buffer_size;
+
+	log_buffer = kzalloc(log_buffer_sz, GFP_KERNEL);
+	if (!log_buffer) {
+		pr_err("Failed to allocate memory for log buffer\n");
+		err = -ENOMEM;
+		goto out1;
+	}
+
+	read_fwerr_log(log_buffer, oshob_base);
+	ret = fw_error_found(use_legacytype, &has_onlysculog);
+
+	/* No error and no trace to display */
+	if (!ret && !has_onlysculog) {
+		pr_info("No valid stored SCU errors found in SRAM\n");
+		goto out1;
+	}
+
+	if (use_legacytype) {
+		if (ret)
+			length = dump_fwerr_log(NULL, 0);
+
+		if (sram_trace_buf) {
+			/*
+			 * SCU gives pointer via oshob. Address is a physical
+			 * address somewhere in shared sram
+			 */
+			read_sram_trace_buf(
+				log_buffer + MAX_NUM_ALL_LOGDWORDS_LEGACY,
+				sram_trace_buf, scu_trace_buffer_size);
+
+			length += dump_scu_extented_trace(NULL, 0,
+					      SIZE_ALL_LOGDWORDS_LEGACY, &read);
+		}
+	} else {
+		if (ret)
+			parsed_fab_err_length = parse_fab_err_log(
+					&parsed_fab_err, &parsed_fab_err_sz);
+
+		if (sram_trace_buf) {
+			new_sculog_offline_size = scu_trace_buffer_size;
+			new_sculog_offline_buf = kzalloc(scu_trace_buffer_size,
+								GFP_KERNEL);
+
+			if (!new_sculog_offline_buf) {
+				pr_err("Memory allocation error to get SCU log\n");
+				goto out1;
+			}
+
+			/*Save a copy for /proc/offline_scu_log to view later*/
+			memcpy(new_sculog_offline_buf, sram_trace_buf,
+							scu_trace_buffer_size);
+
+			/* Call new SCU log trace dump to dump to kernel */
+			/* dmesg because there is a valid SCU trace log. */
+
+			dump_sculog_to_ascii_raw(NULL, 0,
+				sram_trace_buf, scu_trace_buffer_size);
+		}
+	}
+
+#ifdef CONFIG_PROC_FS
+	if (ret) {
+		ipanic_faberr = proc_create("ipanic_fabric_err",
+					    S_IFREG | S_IRUGO, NULL,
+					    &ipanic_fabric_err_fops);
+		if (ipanic_faberr == 0) {
+			pr_err("Fail creating procfile ipanic_fabric_err\n");
+			kfree(new_sculog_offline_buf);
+			new_sculog_offline_buf = NULL;
+			err = -ENODEV;
+			goto out1;
+		}
+	}
+
+	if (has_onlysculog) {
+		offline_scu_log = proc_create("offline_scu_log",
+					      S_IFREG | S_IRUGO, NULL,
+					      &offline_scu_log_fops);
+		if (offline_scu_log == 0) {
+			pr_err("Fail creating procfile offline_scu_log "
+				"for SCU log\n");
+			kfree(new_sculog_offline_buf);
+			new_sculog_offline_buf = NULL;
+			err = -ENODEV;
+			goto out1;
+		}
+	}
+#endif /* CONFIG_PROC_FS */
+
+out1:
+	return err;
+}
+
+static int intel_fw_logging_panic_handler(struct notifier_block *this,
+					  unsigned long event, void *unused)
+{
+	u32 *mbox_addr = (u32 *)tmp_ia_trace_buf;
+	struct list_head *pos, *q;
+	unsigned long flags;
+	unsigned int timeout = 0, count;
+	struct sculog_list *tmp;
+	int i;
+
+	if (!USE_LEGACY()) { /* Branch out for Merrifield SCU trace log*/
+
+		if (!global_scutrace_enable) {
+			pr_info("Global SCU trace logging is disabled\n");
+			goto out; /* Global SCU trace disabled */
+		}
+
+		if (!tmp_ia_trace_buf || !new_scu_trace_buffer ||
+			!new_scu_trace_buffer_size) {
+			pr_info("Invalid SRAM address or size\n");
+			goto out; /* Invalid SRAM address/size info */
+		}
+
+		/* We use the first DW from temp IA trace shared SRAM as */
+		/* the mail box to notify SCU to dump trace log instead  */
+		/* using IPC inside the kernel panic routine.            */
+
+		*mbox_addr = SCULOG_DUMP_MAGIC; /* Notify SCU to dump log */
+
+		do {
+			mdelay(SCU_PANIC_DUMP_TOUT);
+		} while (*mbox_addr != 0 &&
+			timeout++ < SCU_PANIC_DUMP_RECHECK1);
+
+		if (timeout > SCU_PANIC_DUMP_RECHECK1) {
+			pr_info("Waiting for trace from SCU timed out!\n");
+			goto out;
+		}
+
+		pr_info("SCU trace on Kernel panic:\n");
+		dump_sculog_to_ascii_raw(NULL, 0, new_scu_trace_buffer,
+						new_scu_trace_buffer_size);
+
+		spin_lock_irqsave(&pending_list_lock, flags);
+
+		/* Flush remaining SCU trace log if any left */
+		list_for_each_safe(pos, q, &pending_sculog_list.list) {
+			tmp = list_entry(pos, struct sculog_list, list);
+			dump_unsolicited_scutrace_ascii(tmp->data, tmp->size,
+								tmp->curpos);
+			list_del(pos);
+			kfree(tmp->data);
+			kfree(tmp);
+		}
+
+		spin_unlock_irqrestore(&pending_list_lock, flags);
+
+		*new_scu_trace_buffer = 0; /* Zero out SCUL signature */
+		goto out;
+	}
+
+	/* The rest of this function supports legacy  */
+	/* SCU trace on platforms prior to Merrifield */
+
+	apic_scu_panic_dump();
+
+	do {
+		mdelay(SCU_PANIC_DUMP_TOUT);
+		read_scu_trace_hdr(&trace_hdr);
+	} while (trace_hdr.magic != TRACE_MAGIC &&
+		 timeout++ < SCU_PANIC_DUMP_RECHECK);
+
+	if (timeout > SCU_PANIC_DUMP_RECHECK) {
+		pr_info("Waiting for trace from SCU timed out!\n");
+		goto out;
+	}
+
+	pr_info("SCU trace on Kernel panic:\n");
+	count = scu_trace_buffer_size / sizeof(u32);
+
+	for (i = 0; i < count; i += DWORDS_PER_LINE) {
+		/* EW111:0xdeadcafe EW112:0xdeadcafe \0 */
+		char dword_line[DWORDS_PER_LINE * 17 + 1] = {0};
+		/* abcdefgh\0 */
+		char ascii_line[DWORDS_PER_LINE * sizeof(u32) + 1] = {0};
+		int ascii_offset = 0, dword_offset = 0, j;
+
+		for (j = 0; i + j < count && j < DWORDS_PER_LINE; j++) {
+			int k;
+			u32 dword = readl(sram_trace_buf + (i + j) *
+					  sizeof(u32));
+			char *c = (char *) &dword;
+
+			dword_offset = sprintf(dword_line + dword_offset,
+					       "EW%d:0x%08x ", i + j, dword);
+			for (k = 0; k < sizeof(dword); k++)
+				if (isascii(*(c + k)) && isalnum(*(c + k)) &&
+				    *(c + k) != 0)
+					ascii_line[ascii_offset++] = *(c + k);
+				else
+					ascii_line[ascii_offset++] = '.';
+		}
+
+		ascii_line[ascii_offset++] = '\0';
+		pr_info("%s %s\n", dword_line, ascii_line);
+	}
+
+out:
+	return 0;
+}
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static void __iomem *ia_trace_buf;
+static void intel_fw_logging_report_nc_pwr(u32 value, int reg_type)
+{
+	struct ia_trace_t *ia_trace = ia_trace_buf;
+
+	switch (reg_type) {
+	case APM_REG_TYPE:
+		ia_trace->apm_cmd[1] = ia_trace->apm_cmd[0];
+		ia_trace->apm_cmd[0] = value;
+		break;
+	case OSPM_REG_TYPE:
+		ia_trace->ospm_pm_ssc[1] = ia_trace->ospm_pm_ssc[0];
+		ia_trace->ospm_pm_ssc[0] = value;
+		break;
+	default:
+		break;
+	}
+}
+
+static int intel_fw_logging_start_nc_pwr_reporting(void)
+{
+	u32 rbuf[4];
+	int ret, rbuflen = 4;
+
+	if (USE_LEGACY()) {
+
+		if (scu_trace_buffer_size <  sizeof(struct ia_trace_t)) {
+			pr_warn("Sram_buf_sz is smaller than expected\n");
+			return 0;
+		} else if (!sram_trace_buf) {
+			pr_err("Failed to map ia trace buffer\n");
+			return -ENOMEM;
+		}
+
+		ia_trace_buf = sram_trace_buf +
+			(scu_trace_buffer_size - sizeof(struct ia_trace_t));
+	} else {
+		memset(rbuf, 0, sizeof(rbuf));
+		ret = rpmsg_send_command(fw_logging_instance, IPCMSG_SCULOG_TRACE,
+			IPC_CMD_SCU_LOG_IATRACE, NULL, (u32 *)rbuf, 0, rbuflen);
+
+		if (ret || (!ret && rbuf[2] != 0)) {
+			pr_err("Fail getting shared SRAM addr for IA trace\n");
+			return -EINVAL;
+		}
+
+		tmp_ia_trace_buf = ioremap_nocache((resource_size_t)rbuf[0],
+						(unsigned long)rbuf[1]);
+		if (!tmp_ia_trace_buf) {
+			pr_err("Failed to map ia trace buffer\n");
+			return -ENOMEM;
+		}
+
+		ia_trace_buf = tmp_ia_trace_buf + (rbuf[1] -
+				sizeof(struct ia_trace_t));
+	}
+
+	nc_report_power_state =  intel_fw_logging_report_nc_pwr;
+	return 0;
+}
+
+static void intel_fw_logging_stop_nc_pwr_reporting(void)
+{
+	if (!USE_LEGACY() && tmp_ia_trace_buf)
+		iounmap(tmp_ia_trace_buf);
+
+	nc_report_power_state = NULL;
+}
+
+#else /* !CONFIG_ATOM_SOC_POWER */
+
+static int intel_fw_logging_start_nc_pwr_reporting(void)
+{
+	return 0;
+}
+
+static void intel_fw_logging_stop_nc_pwr_reporting(void)
+{
+}
+
+#endif /* CONFIG_ATOM_SOC_POWER */
+
+static struct notifier_block fw_logging_panic_notifier = {
+	.notifier_call	= intel_fw_logging_panic_handler,
+	.next		= NULL,
+	.priority	= INT_MAX
+};
+
+static int intel_fw_logging_probe(struct platform_device *pdev)
+{
+	int err;
+
+	if (!sram_trace_buf) {
+		pr_err("No sram trace buf available, skip SCU tracing init\n");
+		err = -ENODEV;
+		goto err1;
+	}
+
+	err = atomic_notifier_chain_register(
+		&panic_notifier_list,
+		&fw_logging_panic_notifier);
+	if (err) {
+		pr_err("Failed to register notifier!\n");
+		goto err1;
+	}
+
+	err = intel_fw_logging_start_nc_pwr_reporting();
+	if (err) {
+		pr_err("Failed to start nc power reporting!\n");
+		goto err2;
+	}
+
+	scu_trace_irq = platform_get_irq(pdev, 0);
+	if (scu_trace_irq < 0) {
+		pr_info("No irq available, SCU tracing not available\n");
+		err = scu_trace_irq;
+		goto err3;
+	}
+
+	err = request_threaded_irq(scu_trace_irq, fw_logging_irq,
+					fw_logging_irq_thread,
+					IRQF_ONESHOT, "fw_logging",
+					&pdev->dev);
+	if (err) {
+		pr_err("Requesting irq for logging trace failed\n");
+		goto err3;
+	}
+
+	if (!disable_scu_tracing)
+			enable_irq(scu_trace_irq);
+
+	return err;
+
+err3:
+	intel_fw_logging_stop_nc_pwr_reporting();
+err2:
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &fw_logging_panic_notifier);
+err1:
+	return err;
+}
+
+static int intel_fw_logging_remove(struct platform_device *pdev)
+{
+	free_irq(scu_trace_irq, &pdev->dev);
+	free_irq(recoverable_irq, &pdev->dev);
+	intel_fw_logging_stop_nc_pwr_reporting();
+	return atomic_notifier_chain_unregister(&panic_notifier_list,
+						&fw_logging_panic_notifier);
+}
+
+#ifdef CONFIG_PM
+static int intel_fw_logging_suspend(struct platform_device *dev,
+					pm_message_t state)
+{
+	if (USE_LEGACY()) {
+		rpmsg_send_simple_command(fw_logging_instance,
+						IPCMSG_SCULOG_CTRL,
+						IPC_CMD_SCU_LOG_SUSPEND);
+	}
+	return 0;
+}
+
+static int intel_fw_logging_resume(struct platform_device *dev)
+{
+	if (USE_LEGACY()) {
+		rpmsg_send_simple_command(fw_logging_instance,
+						IPCMSG_SCULOG_CTRL,
+						IPC_CMD_SCU_LOG_RESUME);
+	}
+	return 0;
+}
+#endif
+
+static ssize_t scutrace_status_show(struct device *dev,
+			struct device_attribute *attr, char *buffer)
+{
+	return sprintf(buffer, "%s\n", global_scutrace_enable ?
+					"enabled" : "disabled");
+}
+
+static ssize_t scutrace_status_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buffer, size_t count)
+{
+	u32 rbuf[4];
+	int ret = 0, rbuflen = 4;
+
+	char action[MAX_INPUT_LENGTH];
+	char format[10];
+	snprintf(format, sizeof(format), "%%%ds", MAX_INPUT_LENGTH-1);
+	sscanf(buffer, format, action);
+
+	if (!strcmp(action, "enabled")) {
+
+		if (global_scutrace_enable)
+			return count; /* Already enabled */
+
+		ret = rpmsg_send_command(fw_logging_instance,
+					IPCMSG_SCULOG_TRACE,
+					IPC_CMD_SCU_LOG_ENABLE, NULL,
+					(u32 *)rbuf, 0, rbuflen);
+
+		if (ret || (!ret && rbuf[0])) {
+			pr_err("Fail enable SCU trace logging via IPC\n");
+			return ret;
+		}
+
+		global_scutrace_enable = true;
+		return count;
+	} else if (!strcmp(action, "disabled")) {
+
+		if (!global_scutrace_enable)
+			return count; /* Already disabled */
+
+		ret = rpmsg_send_command(fw_logging_instance,
+					IPCMSG_SCULOG_TRACE,
+					IPC_CMD_SCU_LOG_DISABLE, NULL,
+					(u32 *)rbuf, 0, rbuflen);
+
+		if (ret || (!ret && rbuf[0])) {
+			pr_err("Fail disable SCU trace logging via IPC\n");
+			return ret;
+		}
+
+		global_scutrace_enable = false;
+		global_unsolicit_scutrace_enable = false;
+		return count;
+	} else {
+		pr_err("Invalid parameter for SCU trace logging sysfs\n");
+		return -EINVAL;
+	}
+}
+
+static ssize_t unsolicit_scutrace_show(struct device *dev,
+			struct device_attribute *attr, char *buffer)
+{
+	return sprintf(buffer, "%s\n", global_unsolicit_scutrace_enable ?
+						"enabled" : "disabled");
+}
+
+static ssize_t unsolicit_scutrace_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buffer, size_t count)
+{
+	u32 rbuf[4];
+	int ret = 0, rbuflen = 4;
+
+	char action[MAX_INPUT_LENGTH];
+	char format[10];
+	snprintf(format, sizeof(format), "%%%ds", MAX_INPUT_LENGTH-1);
+	sscanf(buffer, format, action);
+
+	if (!strcmp(action, "enabled")) {
+
+		if (!global_scutrace_enable) {
+			pr_err("Enable global SCU trace logging first\n");
+			return -EINVAL;
+		} else if (global_unsolicit_scutrace_enable) {
+			return count; /* Already enabled */
+		} else {
+			ret = rpmsg_send_command(fw_logging_instance,
+					IPCMSG_SCULOG_TRACE,
+						IPC_CMD_SCU_LOG_EN_RB, NULL,
+						(u32 *)rbuf, 0, rbuflen);
+
+			if (ret || (!ret && rbuf[0])) {
+				pr_err("Fail enable unsolicit SCU trace log\n");
+				return ret;
+			}
+
+			global_unsolicit_scutrace_enable = true;
+			return count;
+		}
+	} else if (!strcmp(action, "disabled")) {
+		if (!global_scutrace_enable) {
+			pr_err("Global SCU trace logging is disabled already\n");
+			return count;
+		} else if (!global_unsolicit_scutrace_enable) {
+			return count; /* Already disabled */
+		} else {
+			ret = rpmsg_send_command(fw_logging_instance,
+						IPCMSG_SCULOG_TRACE,
+						IPC_CMD_SCU_LOG_DIS_RB, NULL,
+						(u32 *)rbuf, 0, rbuflen);
+
+			if (ret || (!ret && rbuf[0])) {
+				pr_err("Fail disable unsolicit SCU trace log\n");
+				return ret;
+			}
+
+			global_unsolicit_scutrace_enable = false;
+			return count;
+		}
+	} else
+		return -EINVAL;
+}
+
+/* Attach the sysfs read/write methods */
+DEVICE_ATTR(scutrace_status, S_IRUGO|S_IWUSR,
+		scutrace_status_show, scutrace_status_store);
+
+DEVICE_ATTR(unsolicit_scutrace, S_IRUGO|S_IWUSR,
+		unsolicit_scutrace_show, unsolicit_scutrace_store);
+
+/* Attribute Descriptor */
+static struct attribute *scutrace_attrs[] = {
+	&dev_attr_scutrace_status.attr,
+	&dev_attr_unsolicit_scutrace.attr,
+	NULL
+};
+
+/* Attribute Group */
+static struct attribute_group scutrace_attrs_group = {
+	.attrs = scutrace_attrs,
+};
+
+static const struct platform_device_id intel_fw_logging_table[] = {
+	{"scuLog", 1 },
+};
+
+static struct platform_driver intel_fw_logging_driver = {
+	.driver = {
+		.name = "scuLog",
+		.owner = THIS_MODULE,
+		},
+	.probe = intel_fw_logging_probe,
+	.remove = intel_fw_logging_remove,
+	.id_table = intel_fw_logging_table,
+	.suspend = intel_fw_logging_suspend,
+	.resume = intel_fw_logging_resume,
+};
+
+static int intel_fw_logging_init(void)
+{
+	u32 rbuf[4], rbuflen = 4;
+	u32 scu_trace_buffer_addr, *tmp_addr;
+	int ioapic, ret, err = 0;
+	struct io_apic_irq_attr irq_attr;
+
+	memset(rbuf, 0, sizeof(rbuf));
+	oshob_base = get_oshob_addr();
+	if (oshob_base == NULL) {
+		pr_err("Failed to get OSHOB address\n");
+		err = -EINVAL;
+		goto err0;
+	}
+
+	scu_trace_buffer_addr = intel_scu_ipc_get_scu_trace_buffer();
+	scu_trace_buffer_size = intel_scu_ipc_get_scu_trace_buffer_size();
+
+	if (USE_LEGACY()) { /*Legacy support*/
+		if (scu_trace_buffer_addr &&
+			(scu_trace_buffer_addr >= LOWEST_PHYS_SRAM_ADDRESS)) {
+
+			/* Calculate size of SCU extra trace buffer. Size of
+			* the buffer is given by SCU. Make sanity check in
+			* case of incorrect data. */
+
+			if (scu_trace_buffer_size > MAX_SCU_EXTRA_DUMP_SIZE) {
+				pr_err("Failed to get scu trace buffer size\n");
+				err = -ENODEV;
+				goto err1;
+			}
+
+			/* Looks that we have valid buffer and size. */
+			sram_trace_buf =
+				ioremap_nocache(scu_trace_buffer_addr,
+						scu_trace_buffer_size);
+
+			if (!sram_trace_buf) {
+				pr_err("Failed to map scu trace buffer\n");
+				err =  -ENOMEM;
+				goto err1;
+			}
+
+			scu_trace_buffer = kzalloc(
+						scu_trace_buffer_size, GFP_KERNEL);
+			if (!scu_trace_buffer) {
+				pr_err("Failed to allocate memory for trace buffer\n");
+				err = -ENOMEM;
+				goto err2;
+			}
+		} else {
+			pr_info("No extended trace buffer available\n");
+		}
+	} else {
+		if (NON_LEGACY() == 1) {
+			if (scu_trace_buffer_addr && scu_trace_buffer_size) {
+				sram_trace_buf =
+					ioremap_nocache(scu_trace_buffer_addr,
+							scu_trace_buffer_size);
+				if (!sram_trace_buf) {
+					pr_err("Failed to map SCU trace buffer\n");
+					err = -ENOMEM;
+					goto err1;
+				}
+
+				tmp_addr = (u32 *)sram_trace_buf;
+				if (*tmp_addr != SCULOG_MAGIC) {
+					/* No SCU log detected */
+					iounmap(sram_trace_buf);
+					sram_trace_buf = NULL;
+					pr_info("No valid SCU log magic found!\n");
+				}
+			}
+		} else {
+			pr_err("Unsupported platform (stepping value %d)!\n",
+						intel_mid_soc_stepping());
+			err = -EINVAL;
+			goto err1;
+		}
+	}
+
+	fabric_err_buf1 = oshob_base +
+		intel_scu_ipc_get_fabricerror_buf1_offset();
+
+	if (fabric_err_buf1 == oshob_base) {
+		pr_err("OSHOB Fabric error buf1 offset NULL\n");
+		goto err3;
+	}
+
+	fabric_err_buf2 = oshob_base +
+		intel_scu_ipc_get_fabricerror_buf2_offset();
+
+	if (fabric_err_buf2 == oshob_base) {
+		/* Fabric buffer buf2 not available on all plaforms. */
+		pr_warn("OSHOB Fabric error buf2 not present (not available on all platforms)\n");
+	}
+
+	/* Check and report existing error logs */
+	err = fw_logging_crash_on_boot();
+	if (err) {
+		pr_err("Logging SCU errors stored in SRAM failed\n");
+		goto err3;
+	}
+
+	if (USE_LEGACY()) {
+		ipanic_faberr_recoverable = 0;
+		goto non_recover;
+
+	} else if (sram_trace_buf) { /* Done sram_trace_buf */
+		iounmap(sram_trace_buf);
+		sram_trace_buf = NULL;
+	}
+
+	ioapic = mp_find_ioapic(RECOVERABLE_FABERR_INT);
+	if (ioapic < 0) {
+		pr_err("Finding ioapic for recoverable fabric error interrupt failed\n");
+		goto err1;
+	}
+
+	irq_attr.ioapic = ioapic;
+	irq_attr.ioapic_pin = RECOVERABLE_FABERR_INT;
+	irq_attr.trigger = 1;
+	irq_attr.polarity = 0; /* Active High */
+	io_apic_set_pci_routing(NULL, RECOVERABLE_FABERR_INT, &irq_attr);
+
+	INIT_LIST_HEAD(&pending_sculog_list.list);
+	INIT_LIST_HEAD(&pending_recovfe_list.list);
+
+	recoverable_irq = RECOVERABLE_FABERR_INT;
+	err = request_threaded_irq(RECOVERABLE_FABERR_INT,
+				   recoverable_faberror_irq,
+				   recoverable_faberror_thread,
+				   IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				   "faberr_int",
+				   0);
+	if (err) {
+		pr_err("Requesting irq for recoverable fabric error failed\n");
+		goto err1;
+	}
+
+	/* Create a permanent sysfs for hosting recoverable error log */
+#ifdef CONFIG_PROC_FS
+	ipanic_faberr_recoverable = proc_create("ipanic_fabric_recv_err",
+						S_IFREG | S_IRUGO, NULL,
+						&ipanic_fab_recoverable_fops);
+
+	if (ipanic_faberr_recoverable == 0) {
+		pr_err("Fail creating procfile ipanic_fabric_recv_err for recoverable fabric err\n");
+		err = -ENODEV;
+		goto err1;
+	}
+
+	online_scu_log = proc_create("online_scu_log", S_IFREG | S_IRUGO,
+					NULL, &online_scu_log_fops);
+	if (online_scu_log == 0) {
+		pr_err("Fail creating procfile online_scu_log for SCU log\n");
+		remove_proc_entry("ipanic_fabric_recv_err", NULL);
+		err = -ENODEV;
+		goto err1;
+	}
+#endif /* CONFIG_PROC_FS */
+
+	ret = rpmsg_send_command(fw_logging_instance, IPCMSG_SCULOG_TRACE,
+			IPC_CMD_SCU_LOG_ADDR, NULL, (u32 *)rbuf, 0, rbuflen);
+
+	if (ret || (!ret && rbuf[3] != 0)) {
+		pr_err("Fail getting new SCU log shared SRAM location via IPC!\n");
+		global_scutrace_enable = false;
+		global_unsolicit_scutrace_enable = false;
+	} else {
+		new_scu_trace_buffer = ioremap_nocache((resource_size_t)rbuf[0],
+							(unsigned long)rbuf[1]);
+
+		if (!rbuf[1] || !rbuf[2] || !new_scu_trace_buffer) {
+
+			if (new_scu_trace_buffer)
+				iounmap(new_scu_trace_buffer);
+
+#ifdef CONFIG_PROC_FS
+			remove_proc_entry("ipanic_fabric_recv_err", NULL);
+			remove_proc_entry("online_scu_log", NULL);
+#endif /* CONFIG_PROC_FS */
+
+			pr_err("Failed to map SCU trace buffer\n");
+			err = -ENODEV;
+			goto err1;
+		}
+
+		new_scu_trace_buffer_size = rbuf[1];
+		new_scu_trace_buffer_rb_size = rbuf[2];
+
+		pr_info("New SCU trace buffer SRAM addr is: 0x%08X\n", rbuf[0]);
+		pr_info("New SCU trace buffer size (via IPC) is: 0x%08X\n", rbuf[1]);
+		pr_info("New SCU trace ring buffer (via IPC) size is: 0x%08X\n", rbuf[2]);
+
+		ret = rpmsg_send_command(fw_logging_instance, IPCMSG_SCULOG_TRACE,
+				IPC_CMD_SCU_EN_STATUS, NULL, (u32 *)rbuf, 0, rbuflen);
+
+		if (ret || (!ret && rbuf[0] == 0)) {
+			global_scutrace_enable = false;
+			pr_info("SCU trace logging is disabled\n");
+		} else {
+			global_scutrace_enable = true;
+			pr_info("SCU trace logging is enabled\n");
+		}
+
+		/*Disable unsolicit SCU trace by default*/
+		global_unsolicit_scutrace_enable = false;
+
+		scutrace_kobj = kobject_create_and_add(
+					"scutrace_kobj", kernel_kobj);
+		if (scutrace_kobj) {
+			ret = sysfs_create_group(scutrace_kobj,
+						&scutrace_attrs_group);
+			if (ret) {
+				pr_err("SCU log sysfs create group error\n");
+				kobject_put(scutrace_kobj);
+				scutrace_kobj = NULL;
+			}
+		} else
+			pr_err("SCU log sysfs kobject_create_and_add error\n");
+	}
+
+	if (atomic_notifier_chain_register(&panic_notifier_list,
+				&fw_logging_panic_notifier)) {
+		pr_err("Fail to register intel_fw_logging panic notifier!\n");
+		iounmap(new_scu_trace_buffer);
+
+#ifdef CONFIG_PROC_FS
+		remove_proc_entry("ipanic_fabric_recv_err", NULL);
+		remove_proc_entry("online_scu_log", NULL);
+#endif /* CONFIG_PROC_FS */
+		err = -ENODEV;
+		goto err1;
+	}
+
+	if (intel_fw_logging_start_nc_pwr_reporting())
+		pr_err("Fail to start north cluster power reporting!\n");
+
+non_recover:
+	/* Clear fabric error region inside OSHOB if neccessary */
+	rpmsg_send_simple_command(fw_logging_instance,
+				  IPCMSG_CLEAR_FABERROR, 0);
+
+	err = platform_driver_register(&intel_fw_logging_driver);
+
+	if (err) {
+#ifdef CONFIG_PROC_FS
+		if (ipanic_faberr_recoverable) {
+			remove_proc_entry("ipanic_fabric_recv_err", NULL);
+			remove_proc_entry("online_scu_log", NULL);
+			ipanic_faberr_recoverable = 0;
+		}
+#endif /* CONFIG_PROC_FS */
+
+		if (scutrace_kobj)
+			sysfs_remove_group(scutrace_kobj, &scutrace_attrs_group);
+
+		pr_err("Failed to register platform driver\n");
+
+		if (!USE_LEGACY()) {
+			atomic_notifier_chain_unregister(&panic_notifier_list,
+						&fw_logging_panic_notifier);
+
+			intel_fw_logging_stop_nc_pwr_reporting();
+			iounmap(new_scu_trace_buffer);
+
+			goto err1; /* For Merrifield platform(s) */
+		} else
+			goto err3; /* For other legacy platforms */
+	}
+
+	return err;
+err3:
+	kfree(scu_trace_buffer);
+err2:
+	iounmap(sram_trace_buf);
+err1:
+	iounmap(oshob_base);
+err0:
+	return err;
+}
+
+static void intel_fw_logging_exit(void)
+{
+	platform_driver_unregister(&intel_fw_logging_driver);
+	kfree(scu_trace_buffer);
+
+	iounmap(oshob_base);
+	iounmap(sram_trace_buf);
+	iounmap(new_scu_trace_buffer);
+
+	if (!USE_LEGACY()) { /* Does this for Merrifield platform */
+		/* only since legacy platforms hook panic handler & */
+		/* IA trace reporting inside intel_fw_logging_probe */
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+					&fw_logging_panic_notifier);
+
+		intel_fw_logging_stop_nc_pwr_reporting();
+	}
+
+#ifdef CONFIG_PROC_FS
+	if (ipanic_faberr)
+		remove_proc_entry("ipanic_fabric_err", NULL);
+
+	if (offline_scu_log)
+		remove_proc_entry("offline_scu_log", NULL);
+
+	if (online_scu_log)
+		remove_proc_entry("online_scu_log", NULL);
+
+	if (ipanic_faberr_recoverable)
+		remove_proc_entry("ipanic_fabric_recv_err", NULL);
+#endif /* CONFIG_PROC_FS */
+
+	if (scutrace_kobj) {
+		sysfs_remove_group(scutrace_kobj, &scutrace_attrs_group);
+		kobject_put(scutrace_kobj);
+	}
+
+	kfree(new_sculog_offline_buf);
+	kfree(log_buffer);
+}
+
+static int fw_logging_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("fw_logging rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed fw_logging rpmsg device\n");
+
+	/* Allocate rpmsg instance for fw_logging*/
+	ret = alloc_rpmsg_instance(rpdev, &fw_logging_instance);
+	if (!fw_logging_instance) {
+		dev_err(&rpdev->dev, "kzalloc fw_logging instance failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(fw_logging_instance);
+
+	/* Init scu fw_logging */
+	ret = intel_fw_logging_init();
+
+	if (ret)
+		free_rpmsg_instance(rpdev, &fw_logging_instance);
+out:
+	return ret;
+}
+
+static void fw_logging_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	intel_fw_logging_exit();
+	free_rpmsg_instance(rpdev, &fw_logging_instance);
+	dev_info(&rpdev->dev, "Removed fw_logging rpmsg device\n");
+}
+
+static void fw_logging_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id fw_logging_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_fw_logging" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, fw_logging_rpmsg_id_table);
+
+static struct rpmsg_driver fw_logging_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= fw_logging_rpmsg_id_table,
+	.probe		= fw_logging_rpmsg_probe,
+	.callback	= fw_logging_rpmsg_cb,
+	.remove		= fw_logging_rpmsg_remove,
+};
+
+static int __init fw_logging_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&fw_logging_rpmsg);
+}
+module_init(fw_logging_rpmsg_init);
+
+static void __exit fw_logging_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&fw_logging_rpmsg);
+}
+module_exit(fw_logging_rpmsg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Utility driver for getting intel scu fw debug info");
+MODULE_AUTHOR("Winson Yung <winson.w.yung@intel.com>");
diff --git a/drivers/external_drivers/drivers/misc/intel_fw_trace.h b/drivers/external_drivers/drivers/misc/intel_fw_trace.h
new file mode 100644
index 0000000..980784a
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/intel_fw_trace.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/misc/intel_fw_trace.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: jouni.hogander@intel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __INTEL_FW_TRACE_H
+#define __INTEL_FW_TRACE_H
+
+struct scu_trace_hdr_t {
+	u32 magic;
+	u8 majorrev;
+	u8 minorrev;
+	u16 cmd;
+	u32 offset;
+	u32 size;
+};
+
+struct ia_trace_t {
+	u32 apm_cmd[2];
+	u32 ospm_pm_ssc[2];
+};
+
+#define TRACE_MAGIC 0x53435554
+
+#define TRACE_ID_INFO    0x0100
+#define TRACE_ID_ERROR   0x0200
+#define TRACE_ID_MASK    (0x3 << 8)
+
+#define TRACE_IS_ASCII   0x0001
+
+void apic_scu_panic_dump(void);
+#endif /* __INTEL_FABRICID_DEF_H */
diff --git a/drivers/external_drivers/drivers/misc/jhash_uuid.h b/drivers/external_drivers/drivers/misc/jhash_uuid.h
new file mode 100644
index 0000000..71336a7
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/jhash_uuid.h
@@ -0,0 +1,86 @@
+#ifndef _LINUX_JHASH_UUID_H
+#define _LINUX_JHASH_UUID_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose.  It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault.  -DaveM
+ */
+
+/* NOTE: Arguments are modified.
+   NOTE 2 : specific version for uid calculation
+*/
+#define __jhash_mix(a, b, c) \
+{ \
+  a -= b; a -= c; a ^= (c>>13); \
+  b -= c; b -= a; b ^= (a<<8); \
+  c -= a; c -= b; c ^= (b>>13); \
+  a -= b; a -= c; a ^= (c>>12);  \
+  b -= c; b -= a; b ^= (a<<16); \
+  c -= a; c -= b; c ^= (b>>5); \
+  a -= b; a -= c; a ^= (c>>3);  \
+  b -= c; b -= a; b ^= (a<<10); \
+  c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO	0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes.  No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c, len;
+	const u8 *k = key;
+
+	len = length;
+	a = b = JHASH_GOLDEN_RATIO;
+	c = initval;
+
+	while (len >= 12) {
+		a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+		b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+		c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+		__jhash_mix(a,b,c);
+
+		k += 12;
+		len -= 12;
+	}
+
+	c += length;
+	switch (len) {
+	case 11: c += ((u32)k[10]<<24);
+	case 10: c += ((u32)k[9]<<16);
+	case 9 : c += ((u32)k[8]<<8);
+	case 8 : b += ((u32)k[7]<<24);
+	case 7 : b += ((u32)k[6]<<16);
+	case 6 : b += ((u32)k[5]<<8);
+	case 5 : b += k[4];
+	case 4 : a += ((u32)k[3]<<24);
+	case 3 : a += ((u32)k[2]<<16);
+	case 2 : a += ((u32)k[1]<<8);
+	case 1 : a += k[0];
+	};
+
+	__jhash_mix(a,b,c);
+
+	return c;
+}
+
+#endif /* _LINUX_JHASH_UUID_H */
diff --git a/drivers/external_drivers/drivers/misc/kct_daemon.c b/drivers/external_drivers/drivers/misc/kct_daemon.c
new file mode 100644
index 0000000..461d7d3
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/kct_daemon.c
@@ -0,0 +1,271 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+
+#include <net/sock.h>
+#include <net/netlink.h>
+
+#include <linux/kct.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 1))
+#define PORTID(skb) (NETLINK_CB(skb).pid)
+#else
+#define PORTID(skb) (NETLINK_CB(skb).portid)
+#endif
+
+/**
+ * FIFO of events
+ *
+ * The kct_daemon pops() events from this queue and sends them to the ct_agent
+ * through the Netlink socket ct_nl_sk. We push in this queue each time we
+ * trace an event and we cannot directly send it at that time (if in atomic
+ * context for example).
+ */
+static struct sk_buff_head kct_skb_queue;
+
+/** The Netlink socket */
+static struct sock *kct_nl_sk;
+
+/** The kct_daemon kthread */
+static struct task_struct *kctd;
+
+/** Waitqueue used to wake kct_daemon */
+static DECLARE_WAIT_QUEUE_HEAD(kct_wq);
+
+/* monitor port ID, used to identify the netlink socket to send events to */
+int monitor_pid;
+
+/** Sequence ID of packet sent to userland */
+atomic_t kct_seq = ATOMIC_INIT(0);
+
+/** Protocol number for netlink socket */
+static int kctunit = NETLINK_CRASHTOOL;
+module_param(kctunit, int, 0);
+
+/* keep ONE return to ensure inline */
+inline struct ct_event *kct_alloc_event(const char *submitter_name,
+					const char *ev_name,
+					enum ct_ev_type ev_type,
+					gfp_t flags,
+					uint ev_flags)
+{
+	struct ct_event *ev = NULL;
+	struct timespec t;
+
+	if (submitter_name && ev_name) {
+		ev = kzalloc(sizeof(*ev), flags);
+		if (ev) {
+			strlcpy(ev->submitter_name,
+				submitter_name,
+				sizeof(ev->submitter_name));
+			strlcpy(ev->ev_name, ev_name, sizeof(ev->ev_name));
+
+			getnstimeofday(&t);
+			ev->timestamp = (time_t)t.tv_sec;
+			ev->type = ev_type;
+			ev->flags = ev_flags;
+		}
+	}
+
+	return ev;
+}
+EXPORT_SYMBOL(kct_alloc_event);
+
+inline int kct_add_attchmt(struct ct_event **ev,
+			   enum ct_attchmt_type at_type,
+			   unsigned int size,
+			   char *data, gfp_t flags)
+{
+	struct ct_attchmt *new_attchmt = NULL;
+	struct ct_event *new_ev = NULL;
+	u32 new_size = sizeof(*new_ev) + (*ev)->attchmt_size +
+		ALIGN(size + sizeof(*new_attchmt), ATTCHMT_ALIGNMENT);
+
+	pr_debug("%s: size %u\n", __func__, new_size);
+
+	new_ev = krealloc(*ev, new_size, flags);
+	if (!new_ev) {
+		pr_warn("%s: krealloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	new_attchmt = (struct ct_attchmt *)
+		(((char *) new_ev->attachments) + new_ev->attchmt_size);
+
+	WARN_ON(!IS_ALIGNED((size_t)new_attchmt, 4));
+
+	new_attchmt->size = size;
+	new_attchmt->type = at_type;
+	memcpy(new_attchmt->data, data, size);
+
+	new_ev->attchmt_size = new_size - sizeof(*new_ev);
+
+	*ev = new_ev;
+
+	return 0;
+}
+EXPORT_SYMBOL(kct_add_attchmt);
+
+void kct_free_event(struct ct_event *ev)
+{
+	kfree(ev);
+}
+EXPORT_SYMBOL(kct_free_event);
+
+int kct_log_event(struct ct_event *ev, gfp_t flags)
+{
+	struct nlmsghdr *nlh = NULL;
+	struct sk_buff *skb = NULL;
+	u32 seq;
+
+	skb = nlmsg_new(sizeof(*ev) + ev->attchmt_size, flags);
+	if (!skb)
+		return -ENOMEM;
+
+	seq = atomic_inc_return(&kct_seq);
+	/** TODO: atomic monitor_pid or spinlock */
+	nlh = nlmsg_put(skb, monitor_pid, seq, KCT_EVENT,
+			sizeof(*ev) + ev->attchmt_size, 0);
+	if (nlh == NULL) {
+		nlmsg_free(skb);
+		return -EMSGSIZE;
+	}
+
+	memcpy(nlmsg_data(nlh), ev, sizeof(*ev) + ev->attchmt_size);
+
+	kct_free_event(ev);
+
+	skb_queue_tail(&kct_skb_queue, skb);
+	wake_up(&kct_wq);
+
+	return 0;
+}
+EXPORT_SYMBOL(kct_log_event);
+
+/**
+ * Daemon responsible to empty the FIFO of events inside the Netlink
+ */
+static int kct_daemon(void *unused)
+{
+	struct sk_buff *skb = NULL;
+
+	pr_debug("%s: started!\n", __func__);
+
+	while (!kthread_should_stop()) {
+
+		pr_debug("%s: loop.\n", __func__);
+
+		if (skb_queue_len(&kct_skb_queue) && monitor_pid) {
+			skb = skb_dequeue(&kct_skb_queue);
+			if (skb) {
+				/* pid might not have been set in kct_log_event;
+				 * ensure it's ok now
+				 **/
+				PORTID(skb) = monitor_pid;
+				netlink_unicast(kct_nl_sk, skb, monitor_pid, 1);
+			}
+		} else {
+			wait_event_interruptible(kct_wq,
+						 (skb_queue_len(&kct_skb_queue)
+						  && monitor_pid) ||
+						 kthread_should_stop());
+		}
+	}
+
+	pr_debug("%s: daemon terminated.\n", __func__);
+
+	skb_queue_purge(&kct_skb_queue);
+
+	return 0;
+}
+
+int kct_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	if (nlh->nlmsg_type != KCT_SET_PID) {
+		pr_warn("%s: Wrong command received.\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: KCT_SET_PID received: %d\n", __func__, PORTID(skb));
+	monitor_pid = PORTID(skb);
+
+	wake_up(&kct_wq);
+
+	return 0;
+}
+
+/**
+ * Netlink handler, called when a user space process writes into the netlink.
+ *
+ * Note:
+ * - The socket buffer skb given as argument might contain multiple Netlink
+ *   packet. We ack each of them.
+ */
+static void kct_receive_skb(struct sk_buff *skb)
+{
+	struct nlmsghdr *nlh = NULL;
+	int len = 0;
+	int err = 0;
+
+	pr_debug("%s: message received on the socket.\n", __func__);
+
+	nlh = nlmsg_hdr(skb);
+	len = skb->len;
+
+	while (nlmsg_ok(nlh, len)) {
+		err = kct_receive_msg(skb, nlh);
+		 if (err || (nlh->nlmsg_flags & NLM_F_ACK))
+			netlink_ack(skb, nlh, err);
+
+		nlh = nlmsg_next(nlh, &len);
+	}
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+#define CREATENL(init, unit, rcv)					\
+	(netlink_kernel_create(&init, unit,				\
+			       &((struct netlink_kernel_cfg){ .input = rcv })))
+#else
+#define CREATENL(init, unit, rcv)					\
+	(netlink_kernel_create(&init, unit, 0, &rcv, NULL, THIS_MODULE))
+#endif
+
+/**
+ * Initializes the module by:
+ * - creating a netlink socket with our protocol number (NETLINK_CRASHTOOL)
+ * - initializing the FIFO of events to pass on to userspace
+ * - starting the kthread that will consume those events from the internal FIFO
+ *    to the netlink.
+ */
+static int __init kct_init(void)
+{
+	kct_nl_sk = CREATENL(init_net, kctunit, kct_receive_skb);
+
+	if (!kct_nl_sk) {
+		pr_err("%s: Can't create netlink socket.\n", __func__);
+		return -1;
+	}
+
+	skb_queue_head_init(&kct_skb_queue);
+
+	kctd = kthread_run(kct_daemon, NULL, "kct_daemon");
+
+	return 0;
+}
+
+static void __exit kct_exit(void)
+{
+	netlink_kernel_release(kct_nl_sk);
+
+	kthread_stop(kctd);
+
+	/* Wake up kct_daemon kthread, so it can terminate */
+	wake_up(&kct_wq);
+}
+
+fs_initcall(kct_init);
+module_exit(kct_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/misc/rawio/Kconfig b/drivers/external_drivers/drivers/misc/rawio/Kconfig
new file mode 100644
index 0000000..fb64fc3
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/Kconfig
@@ -0,0 +1,81 @@
+#
+# rawio utility drivers
+#
+
+menuconfig RAWIO
+	tristate "Debug fs based raw io device read/write framework "
+	depends on DEBUG_FS
+	default y 
+	help
+	  This option enables support for reading or writing registers/memory
+	  region in a io device via debug fs.
+	  With this option and related rawio driver options enabled, you could
+	  read configuration space of a PCI device, registers of a memory
+	  mapped or port mapped device, registers of a i2c device, etc.
+	  This is the just the framework driver. You need enable more
+	  options to support specific device types.
+
+	  To compile this driver as a module, choose M: the module will
+	  be called rawio.
+
+	  If you are not sure, say N here.
+
+if RAWIO
+
+config RAWIO_PCI
+	tristate "rawio PCI driver"
+	depends on RAWIO && PCI
+	default y
+	help
+	  This option enables the rawio PCI driver.
+	  With this driver, you can read or write any PCI device's
+	  configuration space via debugfs.
+	  To compile this driver as a module, choose M: the module will
+	  be called rawio_pci.
+
+config RAWIO_IOMEM
+	tristate "rawio I/O memory driver"
+	depends on RAWIO
+	default y
+	help
+	  This option enables the rawio I/O memory driver.
+	  With this driver, you can read or write registers of a memory
+	  mapped I/O devices.
+
+	  To compile this driver as a module, choose M: the module will
+	  be called rawio_iomem.
+
+config RAWIO_I2C
+	tristate "rawio I2C driver"
+	depends on RAWIO && I2C
+	default y
+	help
+	  This option enables the rawio I2C driver.
+	  With this driver, you can read or write any I2C device's
+	  register debugfs interface.
+	  To compile this driver as a module, choose M: the module will
+	  be called rawio_i2c.
+
+config RAWIO_MSGBUS
+	tristate "rawio Message Bus driver"
+	depends on RAWIO && X86_WANT_INTEL_MID
+	default y
+	help
+	  This option enables the rawio Message Bus driver.
+	  With this driver, you can read or write any message bus
+	  register via the rawio debugfs interface.
+	  To compile this driver as a module, choose M: the module will
+	  be called rawio_msgbus.
+
+config RAWIO_MSR
+	tristate "rawio MSR driver"
+	depends on RAWIO && X86
+	default y
+	help
+	  This option enables the rawio MSR driver.
+	  With this driver, you can read or write any MSR register
+	  on X86 platform via the rawio debugfs interface.
+	  To compile this driver as a module, choose M: the module will
+	  be called rawio_msr.
+
+endif # RAWIO
diff --git a/drivers/external_drivers/drivers/misc/rawio/Makefile b/drivers/external_drivers/drivers/misc/rawio/Makefile
new file mode 100644
index 0000000..3d9f938
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_RAWIO)		+= rawio.o
+obj-$(CONFIG_RAWIO_PCI)		+= rawio_pci.o
+obj-$(CONFIG_RAWIO_IOMEM)	+= rawio_iomem.o
+obj-$(CONFIG_RAWIO_I2C)		+= rawio_i2c.o
+obj-$(CONFIG_RAWIO_MSGBUS)	+= rawio_msgbus.o
+obj-$(CONFIG_RAWIO_MSR)		+= rawio_msr.o
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio.c b/drivers/external_drivers/drivers/misc/rawio/rawio.c
new file mode 100644
index 0000000..513010f
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio.c
@@ -0,0 +1,514 @@
+/*
+ * rawio.c - a debugfs based framework for reading/writing registers
+ * from a I/O device.
+ * With pluggable rawio drivers, it can support PCI devices, I2C devices,
+ * memory mapped I/O devices, etc.
+ * It's designed for helping debug Linux device drivers on embedded system or
+ * SoC platforms.
+ *
+ * Copyright (c) 2013 Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2
+ *
+ *
+ * Two files are created in debugfs root folder: rawio_cmd and rawio_output.
+ * To read or write via the rawio debugfs interface, first echo a rawio
+ * command to the file rawio_cmd, then cat the file rawio_output:
+ * $ echo "<rawio command>" > /sys/kernel/debug/rawio_cmd
+ * $ cat /sys/kernel/debug/rawio_output
+ * The cat command is required for both read and write operations.
+ * For details of rawio command format, see specific rawio drivers.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include "rawio.h"
+
+#define SHOW_NUM_PER_LINE	(32 / active_width)
+#define LINE_WIDTH		32
+#define IS_WHITESPACE(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
+
+static struct dentry *rawio_cmd_dentry, *rawio_output_dentry;
+static char rawio_cmd_buf[RAWIO_CMD_LEN], rawio_err_buf[RAWIO_ERR_LEN + 1];
+static DEFINE_MUTEX(rawio_lock);
+static LIST_HEAD(rawio_driver_head);
+static struct rawio_driver *active_driver;
+static enum width active_width;
+static enum ops active_ops;
+static u64 args_val[RAWIO_ARGS_MAX];
+static u8 args_postfix[RAWIO_ARGS_MAX];
+static int num_args_val;
+
+static void store_value(u64 *where, void *value, enum type type)
+{
+	switch (type) {
+	case TYPE_U8:
+		*(u8 *)where = *(u8 *)value;
+		break;
+	case TYPE_U16:
+		*(u16 *)where = *(u16 *)value;
+		break;
+	case TYPE_U32:
+		*(u32 *)where = *(u32 *)value;
+		break;
+	case TYPE_U64:
+		*where = *(u64 *)value;
+		break;
+	case TYPE_S8:
+		*(s8 *)where = *(s8 *)value;
+		break;
+	case TYPE_S16:
+		*(s16 *)where = *(s16 *)value;
+		break;
+	case TYPE_S32:
+		*(s32 *)where = *(s32 *)value;
+		break;
+	case TYPE_S64:
+		*(s64 *)where = *(s64 *)value;
+		break;
+	default:
+		break;
+	}
+}
+
+int rawio_register_driver(struct rawio_driver *driver)
+{
+	mutex_lock(&rawio_lock);
+	list_add_tail(&driver->list, &rawio_driver_head);
+	mutex_unlock(&rawio_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rawio_register_driver);
+
+int rawio_unregister_driver(struct rawio_driver *driver)
+{
+	mutex_lock(&rawio_lock);
+	list_del(&driver->list);
+	mutex_unlock(&rawio_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rawio_unregister_driver);
+
+void rawio_err(const char *fmt, ...)
+{
+	va_list args;
+	va_start(args, fmt);
+	vsnprintf(rawio_err_buf, RAWIO_ERR_LEN, fmt, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(rawio_err);
+
+static int parse_arguments(char *input, char **args)
+{
+	int count, located;
+	char *p = input;
+	int input_len = strlen(input);
+
+	count = 0;
+	located = 0;
+	while (*p != 0) {
+		if (p - input >= input_len)
+			break;
+
+		/* Locate the first character of a argument */
+		if (!IS_WHITESPACE(*p)) {
+			if (!located) {
+				located = 1;
+				args[count++] = p;
+				if (count > RAWIO_ARGS_MAX)
+					break;
+			}
+		} else {
+			if (located) {
+				*p = 0;
+				located = 0;
+			}
+		}
+		p++;
+	}
+
+	return count;
+}
+
+static int parse_driver_args(struct rawio_driver *driver, char **arg_list,
+		int num_args, enum ops ops, u64 *arg_val, u8 *postfix)
+{
+	int i;
+	size_t str_len;
+	enum type type;
+	u64 value;
+	char *str;
+	char *args_postfix;
+
+	for (i = 0; i < num_args; i++) {
+		switch (ops) {
+		case OPS_RD:
+			type = driver->args_rd_types[i];
+			args_postfix = driver->args_rd_postfix;
+			break;
+		case OPS_WR:
+			type = driver->args_wr_types[i];
+			args_postfix = driver->args_wr_postfix;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (args_postfix[i]) {
+			str = (char *) arg_list[i];
+			str_len = strlen(str);
+			if (str[str_len - 1] == args_postfix[i]) {
+				postfix[i] = 1;
+				str[str_len - 1] = 0;
+			} else {
+				postfix[i] = 0;
+			}
+		}
+
+		if (kstrtou64(arg_list[i], 0, &value))
+				goto failed;
+		store_value(arg_val + i, &value, type);
+	}
+
+	return 0;
+
+failed:
+	snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+		"invalid argument %s, usage:\n", arg_list[i]);
+	strncat(rawio_err_buf, driver->help, RAWIO_ERR_LEN -
+					strlen(rawio_err_buf));
+	return -EINVAL;
+}
+
+static struct rawio_driver *find_driver(const char *name)
+{
+	struct rawio_driver *driver;
+
+	mutex_lock(&rawio_lock);
+	list_for_each_entry(driver, &rawio_driver_head, list) {
+		if (!strncmp(driver->name, name, strlen(name))) {
+			mutex_unlock(&rawio_lock);
+			return driver;
+		}
+	}
+	mutex_unlock(&rawio_lock);
+
+	return NULL;
+}
+
+static ssize_t rawio_cmd_write(struct file *file, const char __user *buf,
+				size_t len, loff_t *offset)
+{
+	char cmd[RAWIO_CMD_LEN];
+	char *arg_list[RAWIO_ARGS_MAX];
+	int num_args;
+	enum ops ops;
+	enum width width;
+	struct rawio_driver *driver;
+
+	rawio_err_buf[0] = 0;
+
+	if (len >= RAWIO_CMD_LEN) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN, "command is too long.\n"
+					"max allowed command length is %d\n",
+							RAWIO_CMD_LEN);
+		goto done;
+	}
+
+	if (copy_from_user(cmd, buf, len)) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"copy_from_user() failed.\n");
+		goto done;
+	}
+	cmd[len] = 0;
+
+	rawio_cmd_buf[0] = 0;
+	strncpy(rawio_cmd_buf, cmd, len);
+	rawio_cmd_buf[len] = 0;
+
+	num_args = parse_arguments(cmd, arg_list);
+	if (num_args < RAWIO_ARGS_MIN) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"invalid command(too few arguments)\n");
+		goto done;
+	}
+	if (num_args > RAWIO_ARGS_MAX) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"invalid command(too many arguments)\n");
+		goto done;
+	}
+
+	/* arg 0: ops(read/write) and width (8/16/32/64 bit) */
+	if (arg_list[0][0] == 'r')
+		ops = OPS_RD;
+	else if (arg_list[0][0] == 'w')
+		ops = OPS_WR;
+	else {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"invalid operation: %c, only r and w are supported\n",
+							 arg_list[0][0]);
+		goto done;
+	}
+
+	if (strlen(arg_list[0]) >= 3) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"invalid bus width: %s, only 1 2 4 8 are supported\n",
+							 arg_list[0] + 1);
+		goto done;
+	}
+
+	if (strlen(arg_list[0]) == 1)
+		width = WIDTH_DEFAULT;
+	else {
+		switch (arg_list[0][1]) {
+		case '1':
+			width = WIDTH_1;
+			break;
+		case '2':
+			width = WIDTH_2;
+			break;
+		case '4':
+			width = WIDTH_4;
+			break;
+		case '8':
+			width = WIDTH_8;
+			break;
+		default:
+			snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+				"invalid bus width: %c, only 1 2 4 8 are supported\n",
+								arg_list[0][1]);
+			goto done;
+		}
+	}
+
+	/* arg1: driver name */
+	driver = find_driver(arg_list[1]);
+	if (!driver) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"unsupported driver type: %s\n", arg_list[1]);
+		goto done;
+	}
+
+	if (width == WIDTH_DEFAULT)
+		width = driver->default_width;
+
+	if (!(width & driver->supported_width)) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"unsupported driver width: %s\n", arg_list[0]);
+		goto done;
+	}
+
+	/* arg2, ..., argn: driver specific arguments */
+	num_args = num_args - 2;
+	if (((ops == OPS_RD) && (num_args > driver->args_rd_max_num)) ||
+		((ops == OPS_WR) && (num_args > driver->args_wr_max_num))) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"too many arguments, usage:\n");
+		strncat(rawio_err_buf, driver->help, RAWIO_ERR_LEN -
+						strlen(rawio_err_buf));
+		goto done;
+	}
+	if (((ops == OPS_RD) && (num_args < driver->args_rd_min_num)) ||
+		((ops == OPS_WR) && (num_args < driver->args_wr_min_num))) {
+		snprintf(rawio_err_buf, RAWIO_ERR_LEN,
+			"too few arguments, usage:\n");
+		strncat(rawio_err_buf, driver->help, RAWIO_ERR_LEN -
+						strlen(rawio_err_buf));
+		goto done;
+	}
+
+	if (parse_driver_args(driver, arg_list + 2, num_args, ops,
+				args_val, args_postfix))
+		goto done;
+
+	active_driver = driver;
+	active_width = width;
+	active_ops = ops;
+	num_args_val = num_args;
+done:
+	return len;
+}
+
+static int rawio_output_show(struct seq_file *s, void *unused)
+{
+	u32 start, end, start_nature, end_nature;
+	int ret, i, comp1, comp2, output_len;
+	void *output;
+	char seq_buf[16];
+
+	mutex_lock(&rawio_lock);
+
+	if (strlen(rawio_err_buf) > 0) {
+		seq_puts(s, rawio_err_buf);
+		mutex_unlock(&rawio_lock);
+		return 0;
+	}
+
+	active_driver->s = s;
+
+	if (active_ops == OPS_WR) {
+		ret = active_driver->ops->write(active_driver, active_width,
+				args_val, args_postfix, num_args_val);
+		if (ret)
+			seq_puts(s, rawio_err_buf);
+		else
+			seq_puts(s, "write succeeded.\n");
+
+		mutex_unlock(&rawio_lock);
+		return 0;
+	}
+
+	if (active_driver->ops->read_and_show) {
+		ret = active_driver->ops->read_and_show(active_driver,
+			active_width, args_val, args_postfix, num_args_val);
+		if (ret)
+			seq_puts(s, rawio_err_buf);
+		mutex_unlock(&rawio_lock);
+		return 0;
+	}
+
+	ret = active_driver->ops->read(active_driver, active_width, args_val,
+			args_postfix, num_args_val, &output, &output_len);
+	if (ret) {
+		seq_puts(s, rawio_err_buf);
+		mutex_unlock(&rawio_lock);
+		return 0;
+	}
+
+	start_nature = (u32)args_val[active_driver->addr_pos];
+	start = (start_nature / LINE_WIDTH) * LINE_WIDTH;
+	end_nature = start_nature + (output_len - 1) * active_width;
+	end = (end_nature / LINE_WIDTH + 1) * LINE_WIDTH - active_width;
+	comp1 = (start_nature - start) / active_width;
+	comp2 = (end - end_nature) / active_width;
+
+	mutex_unlock(&rawio_lock);
+
+	for (i = 0; i < comp1 + comp2 + output_len; i++) {
+		if ((i % SHOW_NUM_PER_LINE) == 0) {
+			snprintf(seq_buf, sizeof(seq_buf), "[%08x]",
+						(u32)start + i * 4);
+			seq_puts(s, seq_buf);
+		}
+		if (i < comp1 || i >= output_len + comp1) {
+			switch (active_width) {
+			case WIDTH_8:
+				seq_puts(s, " ****************");
+				break;
+			case WIDTH_4:
+				seq_puts(s, " ********");
+				break;
+			case WIDTH_2:
+				seq_puts(s, " ****");
+				break;
+			case WIDTH_1:
+				seq_puts(s, " **");
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (active_width) {
+			case WIDTH_8:
+				snprintf(seq_buf, sizeof(seq_buf), "[%016llx]",
+					*((u64 *)output + i - comp1));
+				seq_puts(s, seq_buf);
+				break;
+			case WIDTH_4:
+				snprintf(seq_buf, sizeof(seq_buf), " %08x",
+					*((u32 *)output + i - comp1));
+				seq_puts(s, seq_buf);
+				break;
+			case WIDTH_2:
+				snprintf(seq_buf, sizeof(seq_buf), " %04x",
+					*((u16 *)output + i - comp1));
+				seq_puts(s, seq_buf);
+				break;
+			case WIDTH_1:
+				snprintf(seq_buf, sizeof(seq_buf), " %02x",
+					*((u8 *)output + i - comp1));
+				seq_puts(s, seq_buf);
+				break;
+			default:
+				break;
+			}
+		}
+
+		if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+			seq_puts(s, "\n");
+	}
+
+	kfree(output);
+	return 0;
+}
+
+static int rawio_cmd_show(struct seq_file *s, void *unused)
+{
+	seq_puts(s, rawio_cmd_buf);
+	return 0;
+}
+
+static int rawio_cmd_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rawio_cmd_show, NULL);
+}
+
+static const struct file_operations rawio_cmd_fops = {
+	.owner		= THIS_MODULE,
+	.open		= rawio_cmd_open,
+	.read		= seq_read,
+	.write		= rawio_cmd_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int rawio_output_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rawio_output_show, NULL);
+}
+
+static const struct file_operations rawio_output_fops = {
+	.owner		= THIS_MODULE,
+	.open		= rawio_output_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init rawio_init(void)
+{
+	rawio_cmd_dentry = debugfs_create_file("rawio_cmd",
+		S_IFREG | S_IRUGO | S_IWUSR, NULL, NULL, &rawio_cmd_fops);
+	rawio_output_dentry = debugfs_create_file("rawio_output",
+		S_IFREG | S_IRUGO, NULL, NULL, &rawio_output_fops);
+	if (!rawio_cmd_dentry || !rawio_output_dentry) {
+		pr_err("rawio: can't create debugfs node\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+module_init(rawio_init);
+
+static void __exit rawio_exit(void)
+{
+	debugfs_remove(rawio_cmd_dentry);
+	debugfs_remove(rawio_output_dentry);
+}
+module_exit(rawio_exit);
+
+MODULE_DESCRIPTION("Raw IO read/write utility framework driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio.h b/drivers/external_drivers/drivers/misc/rawio/rawio.h
new file mode 100644
index 0000000..8f62851
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio.h
@@ -0,0 +1,78 @@
+#ifndef RAWIO_H
+#define RAWIO_H
+
+#define RAWIO_DRVNAME_LEN	8
+#define RAWIO_ERR_LEN		256
+#define RAWIO_CMD_LEN		96
+#define RAWIO_HELP_LEN		128
+#define RAWIO_ARGS_MIN		3
+#define RAWIO_ARGS_MAX		10
+
+enum type {
+	TYPE_STR = 0,
+	TYPE_U8,
+	TYPE_U16,
+	TYPE_U32,
+	TYPE_U64,
+	TYPE_S8,
+	TYPE_S16,
+	TYPE_S32,
+	TYPE_S64,
+};
+
+/* read/write width: 1, 2, 4 or 8 bytes */
+enum width {
+	WIDTH_DEFAULT = 0,
+	WIDTH_1 = 1,
+	WIDTH_2 = 2,
+	WIDTH_4 = 4,
+	WIDTH_8 = 8,
+};
+
+enum ops {
+	OPS_RD = 1,	/* read */
+	OPS_WR,		/* write */
+};
+
+struct rawio_driver {
+	struct list_head list;
+	char name[RAWIO_DRVNAME_LEN];
+
+	int args_rd_max_num; /* max args for read(including optional args) */
+	enum type args_rd_types[RAWIO_ARGS_MAX]; /* type of each arg */
+	int args_rd_min_num; /* min args for read */
+	char args_rd_postfix[RAWIO_ARGS_MAX]; /* read args postfix */
+
+	int args_wr_max_num; /* max args for write(including optional args) */
+	enum type args_wr_types[RAWIO_ARGS_MAX]; /* type of each arg */
+	int args_wr_min_num; /* min args for write */
+	char args_wr_postfix[RAWIO_ARGS_MAX]; /* write args postfix */
+
+	/* index of argument that specifies the register or memory address */
+	int addr_pos;
+
+	unsigned int supported_width;
+	enum width default_width;
+	char help[RAWIO_HELP_LEN];
+	struct rawio_ops *ops;
+	struct seq_file *s;
+};
+
+struct rawio_ops {
+	/* driver reads io device and returns the data to framework */
+	int (*read) (struct rawio_driver *drv, int width,
+		u64 *input, u8 *postfix, int input_num,
+		void **output, int *output_num);
+	/* driver reads io device and shows the data */
+	int (*read_and_show) (struct rawio_driver *drv, int width,
+		u64 *input, u8 *postfix, int input_num);
+	/* driver writes data passed from framework to io device */
+	int (*write) (struct rawio_driver *driver, int width,
+		u64 *input, u8 *postfix, int input_num);
+};
+
+int rawio_register_driver(struct rawio_driver *drv);
+int rawio_unregister_driver(struct rawio_driver *drv);
+void rawio_err(const char *fmt, ...);
+
+#endif
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio_i2c.c b/drivers/external_drivers/drivers/misc/rawio/rawio_i2c.c
new file mode 100644
index 0000000..e603b17
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio_i2c.c
@@ -0,0 +1,224 @@
+/*
+ * rawio_i2c.c - rawio I2C driver.
+ * Read or write a I2C device's register, based on the rawio framework.
+ *
+ * Copyright (c) 2013 Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2
+ *
+ *
+ * read i2c registers:
+ * echo "r i2c <bus_num> <slace_addr> <reg> [<len>]" >
+ *			/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "r i2c 3 0x6b 0x84 5" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ * write a i2c register:
+ * echo "w i2c <bus_num> <slace_addr> <reg> <val>" >
+ *			/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "w i2c 4 0x70 0x4 0xfa" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/i2c.h>
+#include "rawio.h"
+
+static int i2c_prepare(u8 i2c_bus, u16 i2c_addr, u16 i2c_reg, u16 len,
+			int ten_bit_addr, struct i2c_adapter **ppadap)
+{
+	struct i2c_adapter *adap;
+
+	adap = i2c_get_adapter((int)i2c_bus);
+	if (!adap) {
+		rawio_err("can't find bus adapter for i2c bus %d\n",
+			i2c_bus);
+		return -ENODEV;
+	}
+
+	if ((!ten_bit_addr && (i2c_addr > 128)) || (i2c_addr > 1024)) {
+		rawio_err("register address is out of range, forgot 't' for 10bit addr?\n");
+		return -EINVAL;
+	}
+
+	if ((!ten_bit_addr && ((i2c_addr + len) > 128)) ||
+			((i2c_addr + len) > 1024)) {
+		rawio_err("register address is out of range, forgot 't' for 10bit addr?\n");
+		return -EINVAL;
+	}
+
+	*ppadap = adap;
+	return 0;
+}
+
+static int rawio_i2c_read(struct rawio_driver *driver, int width, u64 *input,
+	u8 *postfix, int input_num, void **output, int *output_num)
+{
+	int ret, len;
+	struct i2c_adapter *adap;
+	u16 i2c_addr, i2c_reg;
+	struct i2c_msg msg[2];
+	u8 i2c_bus, buf[2], *out_buf, ten_bit_addr, sixteen_bit_reg;
+
+	i2c_bus = (u8)input[0];
+	i2c_addr = (u16)input[1];
+	i2c_reg = (u16)input[2];
+
+	len = 1;
+	if (input_num == 4)
+		len = (u16)input[3];
+
+	ten_bit_addr = postfix[1];
+	sixteen_bit_reg = postfix[2];
+
+	ret = i2c_prepare(i2c_bus, i2c_addr, i2c_reg, len, ten_bit_addr, &adap);
+	if (ret)
+		return ret;
+
+	out_buf = kzalloc(sizeof(u8) * len, GFP_KERNEL);
+	if (buf == NULL) {
+		rawio_err("can't alloc memory\n");
+		return -ENOMEM;
+	}
+	buf[0] = i2c_reg & 0xff;
+	buf[1] = (i2c_reg >> 8) & 0xff;
+
+	/* write i2c reg address */
+	msg[0].addr = i2c_addr;
+	msg[0].flags = ten_bit_addr ? I2C_M_TEN : 0;
+	msg[0].len = sixteen_bit_reg ? 2 : 1;
+	msg[0].buf = buf;
+
+	/* read i2c reg */
+	msg[1].addr = i2c_addr;
+	msg[1].flags = I2C_M_RD | (ten_bit_addr ? I2C_M_TEN : 0);
+	msg[1].len = len;
+	msg[1].buf = out_buf;
+
+	ret = i2c_transfer(adap, msg, 2);
+	if (ret != 2) {
+		rawio_err("i2c_transfer() failed, ret = %d\n", ret);
+		kfree(out_buf);
+		return -EIO;
+	}
+
+	*output = out_buf;
+	*output_num = len;
+	return 0;
+}
+
+static int rawio_i2c_write(struct rawio_driver *driver, int width, u64 *input,
+			u8 *postfix, int input_num)
+{
+	int ret;
+	struct i2c_adapter *adap;
+	u16 i2c_addr, i2c_reg;
+	struct i2c_msg msg;
+	u8 value, i2c_bus, buf[2], buf16[3], ten_bit_addr, sixteen_bit_reg;
+
+	i2c_bus = (u8)input[0];
+	i2c_addr = (u8)input[1];
+	i2c_reg = (u16)input[2];
+	value = (u8)input[3];
+
+	ten_bit_addr = postfix[1];
+	sixteen_bit_reg = postfix[2];
+
+	ret = i2c_prepare(i2c_bus, i2c_addr, i2c_reg, 0, ten_bit_addr, &adap);
+	if (ret)
+		return ret;
+
+	if (sixteen_bit_reg) {
+		buf16[0] = (i2c_reg >> 8) & 0xff; /* high 8 bit reg addr */
+		buf16[1] = i2c_reg & 0xff; /* low 8 bit reg addr */
+		buf16[2] = value;
+		msg.len = 3;
+		msg.buf = buf16;
+	} else {
+		buf[0] = i2c_reg & 0xff; /* low 8 bit reg addr */
+		buf[1] = value;
+		msg.len = 2;
+		msg.buf = buf;
+	}
+
+	msg.addr = i2c_addr;
+	msg.flags = ten_bit_addr ? I2C_M_TEN : 0;
+
+	ret = i2c_transfer(adap, &msg, 1);
+	if (ret != 1) {
+		rawio_err("i2c_transfer() failed, ret = %d\n", ret);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static struct rawio_ops rawio_i2c_ops = {
+	rawio_i2c_read,
+	NULL,
+	rawio_i2c_write,
+};
+
+static struct rawio_driver rawio_i2c = {
+	{NULL, NULL}, /* list node */
+	"i2c", /* driver name */
+
+	/* read */
+	4, /* max args */
+	{TYPE_U8, TYPE_U16, TYPE_U16, TYPE_U8}, /* type of read args */
+	3, /* min args */
+	{ 0, 't', 's', 0 }, /* args postfix */
+
+	/* write */
+	4, /* max args */
+	{TYPE_U8, TYPE_U16, TYPE_U16, TYPE_U8}, /* type of write args */
+	4, /* min args */
+	{ 0, 't', 's', 0 }, /* args postfix */
+
+	2, /* index of arg that specifies the register or memory address */
+
+	WIDTH_1, /* supported width */
+	WIDTH_1, /* default width */
+
+	/*
+	 * Slave address with postfix 't' indicates 10 bit slave address,
+	 * otherwise it's 7 bit address by default.
+	 * Register offset with postfix 's' indicates register offset is
+	 * 16 bit, otherwise it's 8 bit offset by default.
+	 * For example "r i2c 5 0x108t 0x0394s 2" means 10 bit slave address
+	 * and 16 bit register offset, and "r i2c 5 0x28 0x74" means 7 bit
+	 * slave address and 8 bit register offset.
+	 */
+	"r i2c <bus> <addr>[t] <reg>[s] [<len>]\n"
+	"w i2c <bus> <addr>[t] <reg>[s] <val>\n",
+	&rawio_i2c_ops,
+	NULL
+};
+
+static int __init rawio_i2c_init(void)
+{
+	if (rawio_register_driver(&rawio_i2c))
+		return -ENODEV;
+
+	return 0;
+}
+module_init(rawio_i2c_init);
+
+static void __exit rawio_i2c_exit(void)
+{
+	rawio_unregister_driver(&rawio_i2c);
+}
+module_exit(rawio_i2c_exit);
+
+MODULE_DESCRIPTION("Rawio I2C driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio_iomem.c b/drivers/external_drivers/drivers/misc/rawio/rawio_iomem.c
new file mode 100644
index 0000000..85d46c1
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio_iomem.c
@@ -0,0 +1,401 @@
+/*
+ * rawio_iomem.c - a driver to read or write a device's I/O memory, based on
+ *                 the rawio debugfs framework.
+ *
+ * Copyright (c) 2013 Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2
+ *
+ *
+ * 1: byte, 2: word, 4: dword
+ *
+ * I/O mem read:
+ * echo "r[1|2|4] iomem <physical_addr> [<len>]" > /sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "r iomem 0xff003040 20" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ * I/O mem write:
+ * echo "w[1|2|4] iomem <physical_addr> <val>" > /sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "w2 iomem 0xff003042 0xb03f" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include "rawio.h"
+
+/*
+ * On some platforms, a read or write to a device which is in a low power
+ * state will cause a system error, or even cause a system reboot.
+ * To address this, we use runtime PM APIs to bring up the device to
+ * runnint state before use and put back to original state after use.
+ *
+ * We could use lookup_resource() to map an physical address to a device,
+ * but there are some problems:
+ * 1) lookup_resource() is not exported, so a kernel module can't use it;
+ * 2) To use the 'name' field of 'struct resource' to match a device is
+ *    not reliable;
+ * So we rather walk known device types than look up the resrouce list
+ * to map a physical address(I/O memory address) to a device.
+ */
+
+/* return true if range(start: m2, size: n2) is in range(start: m1, size: n1) */
+#define IN_RANGE(m1, n1, m2, n2) \
+	(((m2 >= m1) && (m2 < (m1 + n1))) && \
+	(((m2 + n2) >= m1) && ((m2 + n2) < (m1 + n1))))
+
+struct dev_walker {
+	resource_size_t addr;
+	resource_size_t size;
+	struct device *dev;
+	int error;
+};
+
+#ifdef CONFIG_PCI
+int walk_pci_devices(struct device *dev, void *data)
+{
+	int i;
+	resource_size_t start, len;
+	struct pci_dev *pdev = (struct pci_dev *) to_pci_dev(dev);
+	struct dev_walker *walker = (struct dev_walker *) data;
+
+	if (!pdev)
+		return -ENODEV;
+
+	walker->dev = NULL;
+	for (i = 0; i < 6; i++) {
+		start = pci_resource_start(pdev, i);
+		len = pci_resource_len(pdev, i);
+		if (IN_RANGE(start, len, walker->addr, walker->size)) {
+			walker->dev = dev;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+int walk_platform_devices(struct device *dev, void *data)
+{
+	int i;
+	struct resource *r;
+	resource_size_t start, len;
+	struct platform_device *plat_dev = to_platform_device(dev);
+	struct dev_walker *walker = (struct dev_walker *) data;
+
+	walker->dev = NULL;
+	for (i = 0; i < plat_dev->num_resources; i++) {
+		r = platform_get_resource(plat_dev, IORESOURCE_MEM, i);
+		if (!r)
+			continue;
+		start = r->start;
+		len = r->end - r->start + 1;
+		if (IN_RANGE(start, len, walker->addr, walker->size)) {
+			walker->dev = dev;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_PNP) && !defined(MODULE)
+int walk_pnp_devices(struct device *dev, void *data)
+{
+	int i;
+	struct resource *r;
+	resource_size_t start, len;
+	struct pnp_dev *pnp_dev = (struct pnp_dev *) to_pnp_dev(dev);
+	struct dev_walker *walker = (struct dev_walker *) data;
+
+	walker->dev = NULL;
+	for (i = 0; (r = pnp_get_resource(pnp_dev, IORESOURCE_MEM, i)); i++) {
+		if (!pnp_resource_valid(r))
+			continue;
+
+		start = r->start;
+		len = r->end - r->start + 1;
+		if (IN_RANGE(start, len, walker->addr, walker->size)) {
+			walker->dev = dev;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static acpi_status do_walk_acpi_device(acpi_handle handle, u32 nesting_level,
+				void *context, void **ret)
+{
+	struct dev_walker *walker = (struct dev_walker *) context;
+	struct acpi_device *adev = NULL;
+	resource_size_t start, len;
+	struct resource_list_entry *rentry;
+	struct list_head resource_list;
+	struct resource *resources;
+	int i, count;
+
+	acpi_bus_get_device(handle, &adev);
+	if (!adev)
+		return AE_CTRL_DEPTH;
+
+	/*
+	 * Simply skip this acpi device if it's already attached to
+	 * other bus types(e.g. platform dev or a pnp dev).
+	 */
+	if (adev->physical_node_count)
+		return 0;
+
+	INIT_LIST_HEAD(&resource_list);
+	count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+	if (count <= 0)
+		return AE_CTRL_DEPTH;
+
+	resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
+	if (!resources) {
+		pr_err("rawio: No memory for resources\n");
+		acpi_dev_free_resource_list(&resource_list);
+		walker->error = -ENOMEM;
+		return AE_CTRL_TERMINATE;
+	}
+
+	count = 0;
+	list_for_each_entry(rentry, &resource_list, node)
+		resources[count++] = rentry->res;
+
+	acpi_dev_free_resource_list(&resource_list);
+
+	for (i = 0; i < count; i++) {
+		start = resources[i].start;
+		len = resources[i].end - resources[i].start + 1;
+		if (IN_RANGE(start, len, walker->addr, walker->size)) {
+			walker->dev = &adev->dev;
+			kfree(resources);
+			return AE_CTRL_TERMINATE;
+		}
+	}
+
+	kfree(resources);
+	return AE_CTRL_DEPTH;
+}
+#endif
+
+static struct device *walk_devices(resource_size_t addr, resource_size_t size)
+{
+	int ret;
+	struct dev_walker walker;
+
+	walker.addr = addr;
+	walker.size = size;
+	walker.dev = NULL;
+	walker.error = 0;
+
+#ifdef CONFIG_PCI
+	ret = bus_for_each_dev(&pci_bus_type, NULL, (void *)&walker,
+						walk_pci_devices);
+	if (ret == 1)
+		return  walker.dev;
+#endif
+
+	ret = bus_for_each_dev(&platform_bus_type, NULL, (void *)&walker,
+						walk_platform_devices);
+	if (ret == 1)
+		return walker.dev;
+
+#if defined(CONFIG_PNP) && !defined(MODULE)
+	ret = bus_for_each_dev(&pnp_bus_type, NULL, (void *)&walker,
+						walk_pnp_devices);
+	if (ret == 1)
+		return walker.dev;
+#endif
+
+#ifdef CONFIG_ACPI
+	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+			NULL, do_walk_acpi_device, &walker, NULL);
+
+	if (walker.error)
+		return NULL;
+
+	if (walker.dev)
+		return walker.dev;
+#endif
+
+	return NULL;
+}
+
+
+static int rawio_iomem_read(struct rawio_driver *driver, int width,
+			u64 *input, u8 *postfix, int input_num,
+			void **output, int *output_num)
+{
+	int i, size, count;
+	phys_addr_t addr;
+	void *buf;
+	void __iomem *va;
+	struct device *dev = NULL;
+
+	addr = (phys_addr_t)input[0];
+	count = 1;
+	if (input_num == 2)
+		count = (int)input[1];
+	size = width * count;
+
+	if (((width == WIDTH_2) && (addr & 0x1)) ||
+		((width == WIDTH_4) && (addr & 0x3)) ||
+		((width == WIDTH_8) && (addr & 0x7))) {
+		rawio_err("address requires 2 bytes aligned for 16 bit access, 4 bytes aligned for 32 bit access and 8 bytes aligned for 64 bit access\n");
+		return -EINVAL;
+	}
+
+	va = ioremap_nocache(addr, size);
+	if (!va) {
+		rawio_err("can't map physical address %llx\n", addr);
+		return -EIO;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL) {
+		rawio_err("can't alloc memory\n");
+		iounmap(va);
+		return -ENOMEM;
+	}
+
+	dev = walk_devices(addr, size);
+	if (dev)
+		pm_runtime_get_sync(dev);
+
+	for (i = 0; i < count; i++) {
+		switch (width) {
+		case WIDTH_1:
+			*((u8 *)buf + i) = ioread8(va + i);
+			break;
+		case WIDTH_2:
+			*((u16 *)buf + i) = ioread16(va + i * 2);
+			break;
+		case WIDTH_4:
+			*((u32 *)buf + i) = ioread32(va + i * 4);
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (dev)
+		pm_runtime_put_sync(dev);
+	iounmap(va);
+	*output = buf;
+	*output_num = count;
+	return 0;
+}
+
+static int rawio_iomem_write(struct rawio_driver *driver, int width,
+			u64 *input, u8 *postfix, int input_num)
+{
+	unsigned long val;
+	phys_addr_t addr;
+	void __iomem *va;
+	struct device *dev;
+
+	addr = (phys_addr_t)input[0];
+	val = (u64)input[1];
+
+	if (((width == WIDTH_2) && (addr & 0x1)) ||
+		((width == WIDTH_4) && (addr & 0x3)) ||
+		((width == WIDTH_8) && (addr & 0x7))) {
+		rawio_err("address requires 2 bytes aligned for 16 bit access, 4 bytes aligned for 32 bit access and 8 bytes aligned for 64 bit access\n");
+		return -EINVAL;
+	}
+
+	va = ioremap_nocache(addr, width);
+	if (!va) {
+		rawio_err("can't map physical address %llx\n", addr);
+		return -EIO;
+	}
+
+	dev = walk_devices(addr, 0);
+	if (dev)
+		pm_runtime_get_sync(dev);
+
+	switch (width) {
+	case WIDTH_1:
+		 *(u8 *)va = (u8)val;
+		break;
+	case WIDTH_2:
+		 *(u16 *)va = (u16)val;
+		break;
+	case WIDTH_4:
+		 *(u32 *)va = (u32)val;
+		break;
+	default:
+		break;
+	}
+
+	if (dev)
+		pm_runtime_put_sync(dev);
+	iounmap(va);
+	return 0;
+}
+
+static struct rawio_ops rawio_iomem_ops = {
+	rawio_iomem_read,
+	NULL,
+	rawio_iomem_write,
+};
+
+static struct rawio_driver rawio_iomem = {
+	{NULL, NULL},
+	"iomem",
+
+	/* read */
+	2, /* max args */
+	{TYPE_U64, TYPE_S16}, /* args type */
+	1, /* min args */
+	{ 0, 0, }, /* args postfix */
+
+	/* write */
+	2, /* max args */
+	{TYPE_U64, TYPE_U64},
+	2, /* min args */
+	{ 0, 0, },
+
+	0, /* index to address arg */
+
+	WIDTH_1 | WIDTH_2 | WIDTH_4, /* supported access width*/
+	WIDTH_4, /* default access width */
+	"r[1|2|4] iomem <addr> [<len>]\nw[1|2|4] iomem <addr> <val>\n",
+	&rawio_iomem_ops,
+	NULL
+};
+
+static int __init rawio_iomem_init(void)
+{
+	return rawio_register_driver(&rawio_iomem);
+}
+module_init(rawio_iomem_init);
+
+static void __exit rawio_iomem_exit(void)
+{
+	rawio_unregister_driver(&rawio_iomem);
+}
+module_exit(rawio_iomem_exit);
+
+MODULE_DESCRIPTION("Rawio I/O memory driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio_msgbus.c b/drivers/external_drivers/drivers/misc/rawio/rawio_msgbus.c
new file mode 100644
index 0000000..fe7aca7
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio_msgbus.c
@@ -0,0 +1,131 @@
+/*
+ * rawio_msgbus.c - rawio Message Bus driver.
+ * Read or write message bus registers, based on the rawio framework.
+ *
+ * Copyright (c) 2013 Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2
+ *
+ *
+ * read message bus registers:
+ * echo "r msgbus <port> <addr> [<len>]" >
+ *			/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "r msgbus 2 0x30 4" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ * write a message bus register:
+ * echo "w msgbus <port> <addr> <val>" >
+ *			/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "w msgbus 2 0x30 0x20f8" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_mid_pcihelpers.h>
+#include "rawio.h"
+
+static int rawio_msgbus_read(struct rawio_driver *driver, int width, u64 *input,
+	u8 *postfix, int input_num, void **output, int *output_num)
+{
+	int i, len;
+	u8 msgbus_port;
+	u16 msgbus_addr;
+	u32 *buf;
+
+	msgbus_port = (u8)input[0];
+	msgbus_addr = (u16)input[1];
+
+	len = 1;
+	if (input_num == 3)
+		len = (u16)input[2];
+
+	buf = kzalloc(width * len, GFP_KERNEL);
+	if (buf == NULL) {
+		rawio_err("can't alloc memory\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++)
+		*(buf + i) = intel_mid_msgbus_read32(msgbus_port,
+						msgbus_addr + i);
+
+	*output = buf;
+	*output_num = len;
+	return 0;
+}
+
+static int rawio_msgbus_write(struct rawio_driver *driver, int width, u64 *input,
+			u8 *postfix, int input_num)
+{
+	u8 msgbus_port;
+	u16 msgbus_addr;
+	u32 value;
+
+	msgbus_port = (u8)input[0];
+	msgbus_addr = (u16)input[1];
+	value = (u32)input[2];
+
+	intel_mid_msgbus_write32(msgbus_port, msgbus_addr, value);
+	return 0;
+}
+
+static struct rawio_ops rawio_msgbus_ops = {
+	rawio_msgbus_read,
+	NULL,
+	rawio_msgbus_write,
+};
+
+static struct rawio_driver rawio_msgbus = {
+	{NULL, NULL}, /* list node */
+	"msgbus", /* driver name */
+
+	/* read */
+	3, /* max args */
+	{TYPE_U8, TYPE_U16, TYPE_U16, TYPE_U8}, /* type of read args */
+	2, /* min args */
+	{ 0, }, /* args postfix */
+
+	/* write */
+	3, /* max args */
+	{TYPE_U8, TYPE_U16, TYPE_U16, TYPE_U8}, /* type of write args */
+	3, /* min args */
+	{ 0, }, /* args postfix */
+
+	1, /* index of arg that specifies the register or memory address */
+
+	WIDTH_4, /* supported width */
+	WIDTH_4, /* default width */
+
+	"r msgbus <port> <addr> [<len>]\n"
+	"w msgbus <port> <addr> <val>\n",
+	&rawio_msgbus_ops,
+	NULL
+};
+
+static int __init rawio_msgbus_init(void)
+{
+	if (rawio_register_driver(&rawio_msgbus))
+		return -ENODEV;
+
+	return 0;
+}
+module_init(rawio_msgbus_init);
+
+static void __exit rawio_msgbus_exit(void)
+{
+	rawio_unregister_driver(&rawio_msgbus);
+}
+module_exit(rawio_msgbus_exit);
+
+MODULE_DESCRIPTION("Rawio Message Bus driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio_msr.c b/drivers/external_drivers/drivers/misc/rawio/rawio_msr.c
new file mode 100644
index 0000000..e42a18e
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio_msr.c
@@ -0,0 +1,193 @@
+/*
+ * rawio_msr.c - rawio MSR driver.
+ * Read or write x86 MSR registers, based on the rawio framework.
+ *
+ * Copyright (c) 2013 Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2
+ *
+ *
+ * read MSR registers:
+ * echo "r[4|8] msr <addr> [<cpu>]" >
+ *			/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ *      cat /sys/kernel/debug/rawio_output
+ * By default it's 64bit read(r8) on all cpus.
+ * e.g. echo "r msr 0x198" > /sys/kernel/debug/rawio_cmd
+ *
+ * write a message bus register:
+ * echo "w msr <addr> <value> [<cpu>]" >
+ *			/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "w msr 0x198 0xff002038102299a0 2" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ * This is a 64bit write toward cpu 2(cpu index starts from 0).
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
+#include <asm/msr.h>
+#include "rawio.h"
+
+static int rawio_msr_read_and_show(struct rawio_driver *driver, int width,
+				u64 *input, u8 *postfix, int input_num)
+{
+	int cpu, ret, i, count;
+	u32 msr_addr, data[2];
+	char seq_buf[32];
+
+	msr_addr = (u32)input[0];
+
+	if (input_num == 2) {
+		cpu = (int)input[1];
+		if ((cpu < 0) || (cpu >= nr_cpu_ids)) {
+			rawio_err("cpu should be between 0 - %d\n",
+						nr_cpu_ids - 1);
+			return -EINVAL;
+		}
+	} else
+		cpu = -1;
+
+	if (cpu < 0) {
+		/* loop for all cpus */
+		i = 0;
+		count = nr_cpu_ids;
+	} else {
+		/* loop for one cpu */
+		i = cpu;
+		count = cpu + 1;
+	}
+
+	for (; i < count; i++) {
+		ret = rdmsr_safe_on_cpu(i, msr_addr, &data[0], &data[1]);
+		if (ret) {
+			rawio_err("msr read error: %d\n", ret);
+			return -EIO;
+		} else {
+			if (width == WIDTH_4) {
+				snprintf(seq_buf, 31, "[cpu %2d] %08x\n",
+							i, data[0]);
+				seq_puts(driver->s, seq_buf);
+			} else {
+				snprintf(seq_buf, 32, "[cpu %2d] %08x%08x\n",
+					 i, data[1], data[0]);
+				seq_puts(driver->s, seq_buf);
+			}
+		}
+	}
+
+	return 0;
+}
+static int rawio_msr_write(struct rawio_driver *driver, int width, u64 *input,
+			u8 *postfix, int input_num)
+{
+	int cpu, ret, i, count;
+	u32 msr_addr, data[2];
+	u64 value;
+
+	msr_addr = (u32)input[0];
+	value = (u64)input[1];
+
+	if (input_num == 3) {
+		cpu = (int)input[2];
+		if ((cpu < 0) || (cpu >= nr_cpu_ids)) {
+			rawio_err("cpu should be between 0 - %d\n",
+						nr_cpu_ids - 1);
+			return -EINVAL;
+		}
+	} else
+		cpu = -1;
+
+	if (width == WIDTH_4) {
+		ret = rdmsr_safe_on_cpu(cpu, msr_addr,
+				&data[0], &data[1]);
+		if (ret) {
+			rawio_err("msr write error: %d\n", ret);
+			return -EIO;
+		}
+		data[0] = (u32)value;
+	} else {
+		data[0] = (u32)value;
+		data[1] = (u32)(value >> 32);
+	}
+
+	if (cpu < 0) {
+		/* loop for all cpus */
+		i = 0;
+		count = nr_cpu_ids;
+	} else {
+		/* loop for one cpu */
+		i = cpu;
+		count = cpu + 1;
+	}
+
+	for (; i < count; i++) {
+		ret = wrmsr_safe_on_cpu(i, msr_addr, data[0], data[1]);
+		if (ret) {
+			rawio_err("msr write error: %d\n", ret);
+			return -EIO;
+		} else
+			seq_puts(driver->s, "write succeeded.\n");
+	}
+
+	return 0;
+}
+
+static struct rawio_ops rawio_msr_ops = {
+	NULL,
+	rawio_msr_read_and_show,
+	rawio_msr_write,
+};
+
+static struct rawio_driver rawio_msr = {
+	{NULL, NULL}, /* list node */
+	"msr", /* driver name */
+
+	/* read */
+	2, /* max args */
+	{TYPE_U32, TYPE_U8}, /* type of read args */
+	1, /* min args */
+	{ 0, }, /* args postfix */
+
+	/* write */
+	3, /* max args */
+	{TYPE_U32, TYPE_U64, TYPE_U8}, /* type of write args */
+	2, /* min args */
+	{ 0, }, /* args postfix */
+
+	0, /* index of arg that specifies the register or memory address */
+
+	WIDTH_4 | WIDTH_8, /* supported width */
+	WIDTH_8, /* default width */
+
+	"r msr <addr> [<cpu>]\n"
+	"w msr <addr> <val> [<cpu>]\n",
+	&rawio_msr_ops,
+	NULL
+};
+
+static int __init rawio_msr_init(void)
+{
+	if (rawio_register_driver(&rawio_msr))
+		return -ENODEV;
+
+	return 0;
+}
+module_init(rawio_msr_init);
+
+static void __exit rawio_msr_exit(void)
+{
+	rawio_unregister_driver(&rawio_msr);
+}
+module_exit(rawio_msr_exit);
+
+MODULE_DESCRIPTION("Rawio MSR driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/misc/rawio/rawio_pci.c b/drivers/external_drivers/drivers/misc/rawio/rawio_pci.c
new file mode 100644
index 0000000..785da80
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/rawio/rawio_pci.c
@@ -0,0 +1,235 @@
+/*
+ * rawio_pci.c - a driver to read/write pci configuration space registers based
+ *               on the rawio framework.
+ *
+ * 1: byte, 2: word, 4: dword
+ *
+ * read pci config space registers
+ * echo "r[1|2|4] pci <domain> <bus> <dev> <func> <reg> [<len>]" >
+ *				/sys/kernel/debug/rawio_cmd
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "r1 pci 0 0 3 0 8 12" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ * write a pci config space register:
+ * echo "w[1|2|4] pci <domain> <bus> <dev> <func> <reg> <val>" >
+ *				/sys/kernel/debug/rawio_output
+ * cat /sys/kernel/debug/rawio_output
+ * e.g. echo "w pci 0 0 0x11 2 0x10 0xffffffff" > /sys/kernel/debug/rawio_cmd
+ *      cat /sys/kernel/debug/rawio_output
+ *
+ *
+ * Copyright (c) 2013 Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "rawio.h"
+
+static int pci_prepare(int pci_domain, unsigned int pci_bus,
+		u8 pci_dev, u8 pci_func, enum width width,
+		u16 pci_reg, u16 len, struct pci_dev **ppdev)
+{
+	struct pci_dev *pdev;
+	int ret;
+
+	if (((width == WIDTH_2) && (pci_reg & 0x1)) ||
+		((width == WIDTH_4) && (pci_reg & 0x3))) {
+		rawio_err("register address requires 2 bytes aligned for 16 bit access, and 4 bytes aligned for 32 bit access\n");
+		return -EINVAL;
+	}
+
+	pdev = pci_get_domain_bus_and_slot(pci_domain, pci_bus,
+				PCI_DEVFN(pci_dev, pci_func));
+	if (!pdev) {
+		rawio_err("pci device %04x:%02x:%02x.%01x doesn't exist\n",
+				pci_domain, pci_bus, pci_dev, pci_func);
+		return -ENODEV;
+	}
+
+	if (((pci_reg >= 0x100) && !pci_is_pcie(pdev)) ||
+				(pci_reg >= 0x1000)) {
+		rawio_err("register address is out of range\n");
+		return -EINVAL;
+	}
+
+	if ((((pci_reg + len * width) >= 0x100) && !pci_is_pcie(pdev)) ||
+				((pci_reg + len * width) >= 0x1000)) {
+		rawio_err("register address is out of range\n");
+		return -EINVAL;
+	}
+
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if ((ret >= 0) || (ret == -EACCES))
+		goto out;
+
+	rawio_err("can't put pci device %04x:%02x:%02x.%01xinto running state, pm_runtime_get_sync() returned %d\n",
+			pci_domain, pci_bus, pci_dev, pci_func, ret);
+	return -EBUSY;
+
+out:
+	*ppdev = pdev;
+	return 0;
+}
+
+static void pci_finish(struct pci_dev *pdev)
+{
+	pm_runtime_put_sync(&pdev->dev);
+}
+
+static int rawio_pci_read(struct rawio_driver *driver, int width,
+			u64 *input, u8 *postfix, int input_num,
+			void **output, int *output_num)
+{
+	int i, ret, pci_domain;
+	struct pci_dev *pdev;
+	unsigned int pci_bus;
+	u8 pci_dev, pci_func;
+	u16 pci_reg, len;
+	void *buf;
+
+	pci_domain = (int)input[0];
+	pci_bus = (unsigned int)input[1];
+	pci_dev = (u8)input[2];
+	pci_func = (u8)input[3];
+	pci_reg = (u16)input[4];
+	len = 1;
+	if (input_num == 6)
+		len = (u16)input[5];
+
+	ret = pci_prepare(pci_domain, pci_bus, pci_dev, pci_func,
+				width, pci_reg, len, &pdev);
+	if (ret)
+		return ret;
+
+	buf = kzalloc(width * len, GFP_KERNEL);
+	if (buf == NULL) {
+		rawio_err("can't alloc memory\n");
+		pci_finish(pdev);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		switch (width) {
+		case WIDTH_1:
+			pci_read_config_byte(pdev, pci_reg + i, (u8 *)buf + i);
+			break;
+		case WIDTH_2:
+			pci_read_config_word(pdev, pci_reg + i * 2,
+						(u16 *)buf + i);
+			break;
+		case WIDTH_4:
+			pci_read_config_dword(pdev, pci_reg + i * 4,
+						(u32 *)buf + i);
+			break;
+		default:
+			break;
+		}
+	}
+
+	pci_finish(pdev);
+	*output = buf;
+	*output_num = len;
+	return 0;
+}
+
+static int rawio_pci_write(struct rawio_driver *driver, int width,
+			u64 *input, u8 *postfix, int input_num)
+{
+	int ret, pci_domain;
+	struct pci_dev *pdev;
+	unsigned int pci_bus;
+	u8 pci_dev, pci_func;
+	u16 pci_reg;
+	u32 value;
+
+	pci_domain = (int)input[0];
+	pci_bus = (unsigned int)input[1];
+	pci_dev = (u8)input[2];
+	pci_func = (u8)input[3];
+	pci_reg = (u16)input[4];
+	value = (u32) input[5];
+
+	ret = pci_prepare(pci_domain, pci_bus, pci_dev, pci_func,
+					width, pci_reg, 1, &pdev);
+	if (ret)
+		return ret;
+
+	switch (width) {
+	case WIDTH_1:
+		pci_write_config_byte(pdev, pci_reg, (u8) value);
+		break;
+	case WIDTH_2:
+		pci_write_config_word(pdev, pci_reg, (u16) value);
+		break;
+	case WIDTH_4:
+		pci_write_config_dword(pdev, pci_reg, value);
+		break;
+	default:
+		break;
+	}
+
+	pci_finish(pdev);
+	return 0;
+}
+
+static struct rawio_ops rawio_pci_ops = {
+	rawio_pci_read,
+	NULL,
+	rawio_pci_write,
+};
+
+static struct rawio_driver rawio_pci = {
+	{NULL, NULL},
+	"pci",
+
+	/* read */
+	6, /* max args */
+	{TYPE_S32, TYPE_U32, TYPE_U8, TYPE_U8, TYPE_U16, TYPE_S16}, /* types */
+	5, /* min args */
+
+	{ 0, }, /* postfix */
+
+	/* write */
+	6, /* max args */
+	{TYPE_S32, TYPE_U32, TYPE_U8, TYPE_U8, TYPE_U16, TYPE_U32}, /* types */
+	6, /* min args */
+	{ 0, },
+
+	4, /* index of address arg */
+
+	WIDTH_1 | WIDTH_2 | WIDTH_4, /* supported width */
+	WIDTH_4, /* default width */
+	"r[1|2|4] pci <domain> <bus> <dev> <func> <reg> [<len>]\n"
+	"w[1|2|4] pci <domain> <bus> <dev> <func> <reg> <val>\n",
+	&rawio_pci_ops,
+	NULL
+};
+
+static int __init rawio_pci_init(void)
+{
+	if (rawio_register_driver(&rawio_pci))
+		return -ENODEV;
+
+	return 0;
+}
+module_init(rawio_pci_init);
+
+static void __exit rawio_pci_exit(void)
+{
+	rawio_unregister_driver(&rawio_pci);
+}
+module_exit(rawio_pci_exit);
+
+MODULE_DESCRIPTION("Rawio PCI driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/misc/tp2e/Kconfig b/drivers/external_drivers/drivers/misc/tp2e/Kconfig
new file mode 100644
index 0000000..8ed6f18
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/Kconfig
@@ -0,0 +1,10 @@
+config TRACEPOINT_TO_EVENT
+	tristate "Tracepoint To Event (tp2e)"
+	depends on TRACEPOINTS && KCT_DAEMON
+	help
+	  This module enable bindings between tracepoints and specific
+	  events.
+	  Tracepoint probes (bindings) are defined in headers doing
+	  matching between tracepoint elements and event
+	  fields. Including those headers in the module declares and
+	  registers the probe.
diff --git a/drivers/external_drivers/drivers/misc/tp2e/Makefile b/drivers/external_drivers/drivers/misc/tp2e/Makefile
new file mode 100644
index 0000000..6e38929
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TRACEPOINT_TO_EVENT) += tp2e.o
\ No newline at end of file
diff --git a/drivers/external_drivers/drivers/misc/tp2e/tp2e.c b/drivers/external_drivers/drivers/misc/tp2e/tp2e.c
new file mode 100644
index 0000000..12564ad
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/tp2e.c
@@ -0,0 +1,451 @@
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "tp2e.h"
+
+/* Create generic TP2E tracepoint event */
+#define CREATE_TRACE_POINTS
+#include <trace/events/tp2e.h>
+
+EXPORT_TRACEPOINT_SYMBOL(tp2e_generic_event);
+
+/* uncomment to compile test */
+/* #define TP2E_TEST */
+#ifdef TP2E_TEST
+#include "tp2e_test.c"
+#endif
+
+
+struct tp2e_element {
+	char *system;
+	char *name;
+	void *probe_fn;
+	struct list_head list;
+};
+
+/* List of tp2e_element objects */
+static struct list_head tp2e_list;
+
+/* A tp2e_system_dir element is associated to each system directory
+ * in the tp2e debugfs, e.g. /sys/kernel/debug/tp2e/events/my_system/
+ * Each system includes one or several tracepoint events (list of
+ * tp2e_event_file elements).
+ */
+struct tp2e_system_dir {
+	struct list_head list;
+	const char *name;
+	struct dentry *dir;
+	struct list_head event_files;
+};
+
+/* A tp2e_event_file element is associated to each tp2e element
+ * (i.e. tracepoint event supported by tp2e) in the tp2e debugfs,
+ * e.g. /sys/kernel/debug/tp2e/events/my_system/my_event/
+ */
+struct tp2e_event_file {
+	struct list_head list;
+	struct tp2e_element *elt;
+	bool enabled;
+};
+
+/* This is the root dir in the tp2e debugfs, i.e. /sys/kernel/debug/tp2e */
+static struct dentry *d_tp2e;
+
+/* List of tp2e_system_dir elements (lists all the systems supported by tp2e) */
+static struct list_head tp2e_systems_list;
+
+
+
+#define DECLARE_TP2E_ELT
+# include "tp2e_probes.h"
+#undef DECLARE_TP2E_ELT
+
+
+
+/* This method is used to enable/disable the tp2e probe for a given
+ * tracepoint event.
+ */
+static int tp2e_event_enable_disable(struct tp2e_event_file *file,
+				     int enable)
+{
+	if (!file->elt)
+		return -EINVAL;
+
+	if (enable && !file->enabled) {
+		tracepoint_probe_register(file->elt->name,
+					  file->elt->probe_fn,
+					  NULL);
+		file->enabled = true;
+	} else if (!enable && file->enabled) {
+		tracepoint_probe_unregister(file->elt->name,
+					    file->elt->probe_fn,
+					    NULL);
+		file->enabled = false;
+	}
+
+	return 0;
+}
+
+static int tp2e_generic_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+/* Write method that is associated to the 'enable' debugfs file
+ * associated to each tp2e element in the tp2e debugfs, e.g.
+ * /sys/kernel/debug/tp2e/events/my_system/my_event/enable
+ *
+ * Writing '0' (resp. '1') in this file disables (resp. enables) the
+ * tp2e probe for the tracepoint event.
+ */
+static ssize_t event_enable_write(struct file *filp, const char __user *ubuf,
+				  size_t cnt, loff_t *ppos)
+{
+	struct tp2e_event_file *event_file = filp->private_data;
+	unsigned long val;
+	int ret;
+
+	if (!event_file)
+		return -EINVAL;
+
+	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+	if (ret)
+		return ret;
+
+	switch (val) {
+	case 0:
+	case 1:
+		ret = tp2e_event_enable_disable(event_file, val);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*ppos += cnt;
+
+	return ret ? ret : cnt;
+}
+
+/* read method that is associated to the 'enable' file associated
+ * to each tp2e element in the tp2e debugfs, e.g.
+ * /sys/kernel/debug/tp2e/events/my_system/my_event/enable
+ *
+ * Reading '0' (resp. '1') means that the tp2e probe for the tracepoint
+ * event is disabled (resp. enabled).
+ */
+static ssize_t event_enable_read(struct file *filp, char __user *ubuf,
+				  size_t cnt, loff_t *ppos)
+{
+	struct tp2e_event_file *event_file = filp->private_data;
+	char *buf;
+
+	if (!event_file)
+		return -EINVAL;
+
+	if (event_file->enabled)
+		buf = "1\n";
+	else
+		buf = "0\n";
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
+}
+
+/* Write method that is associated to the 'enable' debugfs file
+ * located in each system directory in the tp2e debugfs, e.g.
+ * /sys/kernel/debug/tp2e/events/my_system/enable
+ *
+ * Writing '0' (resp. '1') in this file disables (resp. enables) the
+ * tp2e probe for the all the tracepoint events of the system.
+ */
+static ssize_t system_enable_write(struct file *filp, const char __user *ubuf,
+				  size_t cnt, loff_t *ppos)
+{
+	struct tp2e_system_dir *system_dir = filp->private_data;
+	struct tp2e_event_file *event_file;
+	unsigned long val;
+	int ret;
+
+	if (!system_dir)
+		return -EINVAL;
+
+	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+	if (ret)
+		return ret;
+
+	if ((val != 0) && (val != 1))
+		return -EINVAL;
+
+	list_for_each_entry(event_file, &system_dir->event_files, list) {
+		ret = tp2e_event_enable_disable(event_file, val);
+		if (ret)
+			return ret;
+	}
+
+	*ppos += cnt;
+
+	return cnt;
+}
+
+/* read method that is associated to the 'enable' debugfs file
+ * located in each system directory in the tp2e debugfs, e.g.
+ * /sys/kernel/debug/tp2e/events/my_system/enable
+ *
+ * Reading '0' (resp. '1') means that the tp2e probe of all the
+ * tracepoint events of the system are disabled (resp. enabled).
+ */
+static ssize_t system_enable_read(struct file *filp, char __user *ubuf,
+				  size_t cnt, loff_t *ppos)
+{
+	const char set_to_char[4] = { '?', '0', '1', 'X' };
+	struct tp2e_system_dir *system_dir = filp->private_data;
+	struct tp2e_event_file *event_file;
+	char buf[2];
+	int set = 0;
+
+	if (!system_dir)
+		return -EINVAL;
+
+	list_for_each_entry(event_file, &system_dir->event_files, list) {
+		set |= (1 << !!(event_file->enabled));
+		if (set == 3)
+			break;
+	}
+
+	buf[0] = set_to_char[set];
+	buf[1] = '\n';
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+/* Write method that is associated to the 'enable' debugfs file
+ * located in the 'events'directory, i.e.
+ * /sys/kernel/debug/tp2e/events/enable
+ *
+ * Writing '0' (resp. '1') in this file disables (resp. enables) the
+ * tp2e probe of all the tracepoint events that are supported by tp2e
+ */
+static ssize_t tp2e_enable_write(struct file *filp, const char __user *ubuf,
+				 size_t cnt, loff_t *ppos)
+{
+	struct tp2e_system_dir *system_dir;
+	struct tp2e_event_file *event_file;
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+	if (ret)
+		return ret;
+
+	if ((val != 0) && (val != 1))
+		return -EINVAL;
+
+	list_for_each_entry(system_dir, &tp2e_systems_list, list) {
+		list_for_each_entry(event_file, &system_dir->event_files,
+				    list) {
+			ret = tp2e_event_enable_disable(event_file, val);
+			if (ret)
+				return ret;
+		}
+	}
+
+	*ppos += cnt;
+
+	return cnt;
+}
+
+/* read method that is associated to the 'enable' debugfs file
+ * located in the 'events'directory, i.e.
+ * /sys/kernel/debug/tp2e/events/enable
+ *
+ * Reading '0' (resp. '1') means that the tp2e probe of all the
+ * tracepoint events supported by tp2e are disabled (resp. enabled).
+ */
+static ssize_t tp2e_enable_read(struct file *filp, char __user *ubuf,
+				size_t cnt, loff_t *ppos)
+{
+	const char set_to_char[4] = { '?', '0', '1', 'X' };
+	struct tp2e_system_dir *system_dir;
+	struct tp2e_event_file *event_file;
+	char buf[2];
+	int set = 0;
+
+	list_for_each_entry(system_dir, &tp2e_systems_list, list) {
+		list_for_each_entry(event_file, &system_dir->event_files,
+				    list) {
+			set |= (1 << !!(event_file->enabled));
+			if (set == 3)
+				break;
+		}
+	}
+
+	buf[0] = set_to_char[set];
+	buf[1] = '\n';
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static const struct file_operations tp2e_enable_fops = {
+	.open = tp2e_generic_open,
+	.read = tp2e_enable_read,
+	.write = tp2e_enable_write,
+};
+
+static const struct file_operations system_enable_fops = {
+	.open = tp2e_generic_open,
+	.read = system_enable_read,
+	.write = system_enable_write,
+};
+
+static const struct file_operations event_enable_fops = {
+	.open = tp2e_generic_open,
+	.read = event_enable_read,
+	.write = event_enable_write,
+};
+
+
+/* Get the system directory in debugfs for a given tp2e element,
+ * or create a system directory if not already existing.
+ */
+static struct tp2e_system_dir *tp2e_system_dir(struct dentry *parent,
+					       struct tp2e_element *elt)
+{
+	struct dentry *dir;
+	struct tp2e_system_dir *system;
+
+	/* Check if this dir has already been created */
+	list_for_each_entry(system, &tp2e_systems_list, list)
+		if (strcmp(system->name, elt->system) == 0)
+			return system;
+
+	/* If not, create a new system dir */
+	system = kmalloc(sizeof(*system), GFP_KERNEL);
+	if (!system)
+		goto out_fail;
+
+	dir = debugfs_create_dir(elt->system, parent);
+	if (!dir)
+		goto out_free;
+
+	system->name = elt->system;
+	system->dir = dir;
+	INIT_LIST_HEAD(&system->event_files);
+	list_add_tail(&system->list, &tp2e_systems_list);
+
+	if (!debugfs_create_file("enable", 0644, dir, system,
+				 &system_enable_fops))
+		pr_warn("Could not create debugfs '%s/enable' entry\n",
+			   elt->system);
+
+	return system;
+
+out_free:
+	kfree(dir);
+out_fail:
+	pr_warn("No memory to create event system '%s'",
+		elt->system);
+	return NULL;
+}
+
+/* Create the tp2e debugfs */
+static int tp2e_create_dirs(void)
+{
+	struct dentry *d_events, *d_system, *d_event;
+	struct tp2e_system_dir *system_dir;
+	struct tp2e_event_file *event_file;
+	struct tp2e_element *elt;
+
+	d_tp2e = debugfs_create_dir("tp2e", NULL);
+
+	d_events = debugfs_create_dir("events", d_tp2e);
+
+	/* Create the debugfs directory(ies) required for
+	 * each tp2e element.
+	 */
+	list_for_each_entry(elt, &tp2e_list, list) {
+		system_dir = tp2e_system_dir(d_events, elt);
+		if ((!system_dir) || (!system_dir->dir))
+			continue;
+
+		d_system = system_dir->dir;
+		d_event = debugfs_create_dir(elt->name, d_system);
+		if (!d_event) {
+			pr_warn("Could not create debugfs '%s' directory\n",
+				elt->name);
+			continue;
+		}
+
+		event_file = kmalloc(sizeof(*event_file), GFP_KERNEL);
+		if (!event_file) {
+			pr_warn("No memory to create event '%s'",
+				   elt->name);
+			return -ENOMEM;
+		}
+		event_file->enabled = false;
+		event_file->elt = elt;
+
+		if (!debugfs_create_file("enable", 0644, d_event, event_file,
+					 &event_enable_fops)) {
+			pr_warn("Could not create debugfs '%s/enable' entry\n",
+				elt->system);
+			kfree(event_file);
+			continue;
+		}
+
+		list_add_tail(&event_file->list, &system_dir->event_files);
+	}
+
+	if (!debugfs_create_file("enable", 0644, d_events, NULL,
+				 &tp2e_enable_fops))
+		pr_warn("Could not create debugfs '%s/enable' entry\n",
+			   elt->system);
+
+	return 0;
+}
+
+/* Clean-up the tp2e debugfs */
+static void tp2e_remove_dirs(void)
+{
+	struct tp2e_system_dir *current_system, *next_system;
+	struct tp2e_event_file *current_event, *next_event;
+
+	list_for_each_entry_safe(current_system, next_system,
+				 &tp2e_systems_list, list) {
+		list_for_each_entry_safe(current_event, next_event,
+					 &current_system->event_files, list) {
+			list_del(&current_event->list);
+			kfree(current_event);
+		}
+		list_del(&current_system->list);
+		kfree(current_system);
+	}
+
+	debugfs_remove_recursive(d_tp2e);
+}
+
+
+
+static int __init tp2e_init(void)
+{
+	INIT_LIST_HEAD(&tp2e_list);
+	INIT_LIST_HEAD(&tp2e_systems_list);
+
+/* Including tp2e_includes.h file with ADD_TP2E_ELEMENT being defined
+ * results in adding all the tp2e elements to the tp2e_list
+ */
+#define ADD_TP2E_ELT
+#include "tp2e_probes.h"
+#undef ADD_TP2E_ELT
+
+	tp2e_create_dirs();
+	return 0;
+}
+
+static void __exit tp2e_exit(void)
+{
+	tp2e_remove_dirs();
+}
+
+fs_initcall(tp2e_init);
+module_exit(tp2e_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/misc/tp2e/tp2e.h b/drivers/external_drivers/drivers/misc/tp2e/tp2e.h
new file mode 100644
index 0000000..c0049ae
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/tp2e.h
@@ -0,0 +1,29 @@
+#ifndef _TP2E_H_
+#define _TP2E_H_
+
+#include <linux/kct.h>
+
+
+#define DEFINE_PROBE(event, probe)
+
+#endif /* _TP2E_H_ */
+
+#ifdef DECLARE_TP2E_ELT
+#undef DEFINE_PROBE
+#define DEFINE_PROBE(event, probe)				\
+	static struct tp2e_element tp2e_##event = {			\
+		.system = __stringify(TRACE_SYSTEM),			\
+		.name = __stringify(event),				\
+		.probe_fn = (void *)probe,				\
+	};
+#endif /* DECLARE_TP2E_ELT */
+
+#ifdef ADD_TP2E_ELT
+#undef DEFINE_PROBE
+#define DEFINE_PROBE(event, probe)				\
+	do {								\
+		INIT_LIST_HEAD(&tp2e_##event.list);			\
+		list_add_tail(&tp2e_##event.list, &tp2e_list);		\
+	} while (0)
+#endif /* ADD_TP2E_ELT */
+
diff --git a/drivers/external_drivers/drivers/misc/tp2e/tp2e_probe_generic_event.h b/drivers/external_drivers/drivers/misc/tp2e/tp2e_probe_generic_event.h
new file mode 100644
index 0000000..b8b8891
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/tp2e_probe_generic_event.h
@@ -0,0 +1,47 @@
+#include "tp2e.h"
+
+#ifndef _TP2E_PROBE_GENERIC_EVENT_
+#define _TP2E_PROBE_GENERIC_EVENT_
+
+/* Below are the names of the system and event as they
+ * are defined in include/trace/events/tp2e.h
+ */
+#define TRACE_SYSTEM tp2e
+#define TP2E_GENERIC_EVENT_NAME tp2e_generic_event
+#define TP2E_SCU_RECOV_EVENT_NAME tp2e_scu_recov_event
+
+static void tp2e_probe_generic_event(void *cb_data,
+				     enum tp2e_ev_type tp2e_ev_type,
+				     char *submitter_name, char *ev_name,
+				     char *data0, char *data1, char *data2,
+				     char *data3, char *data4, char *data5,
+				     char *filelist)
+{
+	enum ct_ev_type ev_type;
+
+	switch (tp2e_ev_type) {
+	case TP2E_EV_STAT:
+		ev_type = CT_EV_STAT;
+		break;
+	case TP2E_EV_INFO:
+		ev_type = CT_EV_INFO;
+		break;
+	case TP2E_EV_ERROR:
+		ev_type = CT_EV_ERROR;
+		break;
+	case TP2E_EV_CRASH:
+		ev_type = CT_EV_CRASH;
+		break;
+	default:
+		ev_type = CT_EV_INFO;
+		break;
+	}
+	kct_log(ev_type, submitter_name, ev_name, 0, data0, data1, data2,
+		data3, data4, data5, filelist);
+}
+
+#endif /* _TP2E_PROBE_GENERIC_EVENT_H_ */
+
+DEFINE_PROBE(TP2E_GENERIC_EVENT_NAME, tp2e_probe_generic_event);
+
+DEFINE_PROBE(TP2E_SCU_RECOV_EVENT_NAME, tp2e_probe_generic_event);
diff --git a/drivers/external_drivers/drivers/misc/tp2e/tp2e_probes.h b/drivers/external_drivers/drivers/misc/tp2e/tp2e_probes.h
new file mode 100644
index 0000000..f668399
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/tp2e_probes.h
@@ -0,0 +1,5 @@
+# include "tp2e_probe_generic_event.h"
+
+/* one can add here additional header files
+ * describing new probes for TP2E
+ */
diff --git a/drivers/external_drivers/drivers/misc/tp2e/tp2e_test.c b/drivers/external_drivers/drivers/misc/tp2e/tp2e_test.c
new file mode 100644
index 0000000..d11d5da
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/tp2e/tp2e_test.c
@@ -0,0 +1,32 @@
+#define TP2E_CRASH_DATA_LEN 10
+
+static char trace_tp2e_crash_str[4 * (TP2E_CRASH_DATA_LEN + 1)] = {'\0',};
+
+static int set_tp2e_crash(const char *val, struct kernel_param *kp)
+{
+	char ev_name[TP2E_CRASH_DATA_LEN],
+	  data0[TP2E_CRASH_DATA_LEN],
+	  data1[TP2E_CRASH_DATA_LEN],
+	  data2[TP2E_CRASH_DATA_LEN];
+	int ret = -EINVAL;
+
+	if (sscanf(val, "%s %s %s %s", ev_name, data0, data1, data2) != 4)
+		return ret;
+
+	memcpy(trace_tp2e_crash_str, val, strlen(val));
+
+	trace_tp2e_generic_event(TP2E_EV_INFO, "tp2e_test", ev_name,
+				 data0, data1, data2, "", "", "", "");
+
+	return 0;
+}
+
+static int get_tp2e_crash(char *buf, struct kernel_param *kp)
+{
+	size_t len = strlen(trace_tp2e_crash_str);
+	memcpy(buf, trace_tp2e_crash_str, len);
+	return len;
+}
+
+module_param_call(trace_tp2e_crash_str, set_tp2e_crash, get_tp2e_crash, NULL, 0644);
+MODULE_PARM_DESC(trace_tp2e_crash_str, "log trace tp2e crash <ev_name> <data0> <data1> <data2>");
diff --git a/drivers/external_drivers/drivers/misc/uuid.c b/drivers/external_drivers/drivers/misc/uuid.c
new file mode 100644
index 0000000..5cd6571
--- /dev/null
+++ b/drivers/external_drivers/drivers/misc/uuid.c
@@ -0,0 +1,206 @@
+/*
+ * drivers/misc/uuid.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: jun.zhang@intel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/pti.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <linux/genhd.h>
+#include <jhash_uuid.h>
+#include <linux/io.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/version.h>
+
+#define EMMC0_ID_LENGTH            17
+static char emmc0_id[EMMC0_ID_LENGTH];
+struct proc_dir_entry *emmc0_id_entry;
+static struct mmc_cid_legacy cid_legacy;
+
+/* legacy structure to guarantee hash calculation */
+struct mmc_cid_legacy {
+	unsigned int		manfid;
+	char			prod_name[8];
+	unsigned int		serial;
+	unsigned short		oemid;
+	unsigned short		year;
+	unsigned char		hwrev;
+	unsigned char		fwrev;
+	unsigned char		month;
+};
+
+static ssize_t emmc0_id_read(struct file *file, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	if (*ppos > 0) {
+		/* We have finished to read, return 0 */
+		return 0;
+	} else {
+		/* Fill the buffer, return the buffer size */
+		memcpy(buffer, emmc0_id, sizeof(emmc0_id)-1);
+		*ppos += sizeof(emmc0_id)-1;
+		return sizeof(emmc0_id)-1;
+	}
+}
+
+
+static const struct file_operations emmc0_id_entry_fops = {
+	.read = emmc0_id_read
+};
+
+
+static int mmcblk0_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), "mmcblk0") == 0)
+		return 1;
+	return 0;
+}
+
+static int get_emmc0_cid(void)
+{
+	struct device *emmc_disk;
+	/*
+	 * Automation people are needing proper serial number for ADB
+	 * lets derivate from the serial number of the emmc card.
+	 */
+	emmc_disk = class_find_device(&block_class, NULL, NULL, mmcblk0_match);
+	if (emmc_disk) {
+		struct gendisk *disk = dev_to_disk(emmc_disk);
+		struct mmc_card *card = mmc_dev_to_card(disk->driverfs_dev);
+		if (card) {
+			cid_legacy.manfid = card->cid.manfid;
+			cid_legacy.prod_name[0] = card->cid.prod_name[0];
+			cid_legacy.prod_name[1] = card->cid.prod_name[1];
+			cid_legacy.prod_name[2] = card->cid.prod_name[2];
+			cid_legacy.prod_name[3] = card->cid.prod_name[3];
+			cid_legacy.prod_name[4] = card->cid.prod_name[4];
+			cid_legacy.prod_name[5] = card->cid.prod_name[5];
+			cid_legacy.prod_name[6] = card->cid.prod_name[6];
+			cid_legacy.prod_name[7] = card->cid.prod_name[7];
+			cid_legacy.serial = card->cid.serial;
+			cid_legacy.oemid = card->cid.oemid;
+			cid_legacy.year = card->cid.year;
+			cid_legacy.hwrev = card->cid.hwrev;
+			cid_legacy.fwrev = card->cid.fwrev;
+			cid_legacy.month = card->cid.month;
+			snprintf(emmc0_id, sizeof(emmc0_id),
+				 "%08X",
+				 jhash(&cid_legacy, sizeof(cid_legacy), 0));
+			return 1;
+		}
+	}
+	return 0;
+}
+
+
+static void set_cmdline_serialno(void)
+{
+	char *start;
+	char *serialno;
+	char *end_of_field;
+	int serialno_len;
+	int value_length;
+	char SERIALNO_CMDLINE[] = "androidboot.serialno=";
+
+	if (strlen(emmc0_id))
+		serialno = emmc0_id;
+	else {
+		pr_err("Failed to get SSN or emmc0 ID\n");
+		goto error;
+	}
+
+	start = strstr(saved_command_line, SERIALNO_CMDLINE);
+	if (!start) {
+		pr_err("Could not find %s in cmdline\n", SERIALNO_CMDLINE);
+		goto error;
+	}
+
+	serialno_len = strlen(serialno);
+
+	start += sizeof(SERIALNO_CMDLINE) - 1;
+
+	end_of_field = strstr(start, " ");
+	if (end_of_field)
+		value_length = end_of_field - start;
+	else
+		value_length = strlen(start);
+
+	if (value_length < serialno_len) {
+		pr_err("Pre-filled serialno cmdline value is too small\n");
+		goto error;
+	}
+
+	memcpy(start, serialno, serialno_len);
+	memset(start + serialno_len, ' ', value_length - serialno_len);
+
+	return;
+error:
+	pr_err("serialno will not be updated in cmdline!\n");
+	return;
+}
+
+static int __init uuid_init(void)
+{
+
+	memset(emmc0_id, 0x00, sizeof(emmc0_id));
+	if (get_emmc0_cid()) {
+		emmc0_id_entry = proc_create("emmc0_id_entry",
+					     S_IFREG | S_IRUGO, NULL,
+					     &emmc0_id_entry_fops);
+		if (emmc0_id_entry == 0) {
+			pr_err("Fail creating procfile emmc0_id_entry\n");
+			return -ENOMEM;
+		}
+	}
+
+	set_cmdline_serialno();
+
+	return 0;
+}
+
+static void __exit uuid_exit(void)
+{
+
+	if (emmc0_id_entry)
+		remove_proc_entry("emmc0_id_entry", NULL);
+
+}
+
+late_initcall(uuid_init);
+module_exit(uuid_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("get uuid");
+MODULE_AUTHOR("Zhang Jun<jun.zhang@intel.com>");
+
diff --git a/drivers/external_drivers/drivers/platform/x86/Kconfig b/drivers/external_drivers/drivers/platform/x86/Kconfig
new file mode 100644
index 0000000..08b79e9
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/Kconfig
@@ -0,0 +1,55 @@
+#
+# X86 Platform Specific Drivers
+#
+config GPIO_INTEL_MSIC
+	bool "Intel MSIC GPIO support"
+	depends on INTEL_SCU_IPC && GPIOLIB
+	---help---
+	  MSIC provides a GPIO subsystem containing several input
+	  output pins.
+	  Say Y here to support GPIO via the SCU IPC interface
+	  on Intel MID platforms.
+
+config INTEL_MID_OSIP
+	tristate "osip driver for Intel MID platforms"
+	default y
+	help
+	  This driver modifies the osip header when rebooting,
+	  so that one can type 'reboot bootloader' to fall back
+	  in kboot mode.
+
+	  If unsure, say N.
+
+config INTEL_MID_OSIP_DEBUG_FS
+	tristate "osip driver debug fs"
+	depends on INTEL_MID_OSIP && DEBUG_FS
+	default y
+	help
+	  This driver adds debugfs interface.
+	  The main feature of that interface is to be able to modify the kernel
+	  cmdline directly in the OSIP.
+	  This has to be disabled if we have secure boot enabled by default.
+
+	  If unsure, say N.
+
+config INTEL_PSH_IPC
+	bool "Intel PSH IPC Support"
+	depends on X86_INTEL_MID
+	---help---
+	  PSH(Platform Services Hub) is a low frequence IA core on Tangier Platform,
+	  whose power consumption is quite low. PSH runs RTOS software inside itself,
+	  which independently controls and collects sensor data, pre-processes the data,
+	  and communicates with Atom. Thus ATOM side could be put into low power mode
+	  with more time, while all the sensor data are collected without any lost.
+
+	  PSH IPC is used as a  bridge for OS sensor service to control and access PSH
+	  sensors communications between kernel and PSH. This is not needed for PC-type
+	  machines.
+
+	  Say Y here to get Intel PSH IPC support.
+
+config INTEL_REBOOT_TARGET
+	bool "Intel Reboot Target"
+	---help---
+	  This driver provides a generic implementation for reboot target setting at
+	  reset time.
diff --git a/drivers/external_drivers/drivers/platform/x86/Makefile b/drivers/external_drivers/drivers/platform/x86/Makefile
new file mode 100644
index 0000000..4468c50
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_GPIO_INTEL_MSIC)	+= intel_msic_gpio.o
+obj-$(CONFIG_INTEL_MID_OSIP)	+= intel_mid_osip.o
+obj-$(CONFIG_INTEL_PSH_IPC)	+= intel_psh_ipc.o
+obj-$(CONFIG_INTEL_REBOOT_TARGET)	+= reboot_target.o
diff --git a/drivers/external_drivers/drivers/platform/x86/intel_mid_osip.c b/drivers/external_drivers/drivers/platform/x86/intel_mid_osip.c
new file mode 100644
index 0000000..851315f
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/intel_mid_osip.c
@@ -0,0 +1,777 @@
+/*
+ * OSIP driver for Medfield.
+ *
+ * Copyright (C) 2011 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/genhd.h>
+#include <linux/seq_file.h>
+#include <linux/rpmsg.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 1))
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#else
+#include <asm/intel_mid_remoteproc.h>
+#endif
+#include <linux/delay.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "reboot_target.h"
+
+/* change to "loop0" and use losetup for safe testing */
+#define EMMC_OSIP_BLKDEVICE "mmcblk0"
+#define HDD_OSIP_BLKDEVICE "sda"
+#include <asm/intel_scu_ipc.h>
+#include <linux/power_supply.h>
+
+/* OSIP backup will be stored with this offset in the first sector */
+#define OSIP_BACKUP_OFFSET 0xE0
+#define MAX_OSII (7)
+#define VALLEYVIEW2_FAMILY	0x30670
+#define CPUID_MASK		0xffff0
+
+#define DRV_VERSION	"1.00"
+
+struct OSII {                   /* os image identifier */
+	uint16_t os_rev_minor;
+	uint16_t os_rev_major;
+	uint32_t logical_start_block;
+
+	uint32_t ddr_load_address;
+	uint32_t entry_point;
+	uint32_t size_of_os_image;
+
+	uint8_t attribute;
+	uint8_t reserved[3];
+};
+
+struct OSIP_header {            /* os image profile */
+	uint32_t sig;
+	uint8_t intel_reserved;
+	uint8_t header_rev_minor;
+	uint8_t header_rev_major;
+	uint8_t header_checksum;
+	uint8_t num_pointers;
+	uint8_t num_images;
+	uint16_t header_size;
+	uint32_t reserved[5];
+
+	struct OSII desc[MAX_OSII];
+};
+
+#ifdef CONFIG_INTEL_SCU_IPC
+/* A boolean variable, that is set, when wants to make the platform
+    force shuts down */
+static int force_shutdown_occured;
+
+module_param(force_shutdown_occured, int, 0644);
+MODULE_PARM_DESC(force_shutdown_occured,
+		"Variable to be set by user space"
+		" when a force shudown condition occurs, to allow"
+		" system shut down even with charger connected");
+
+
+int get_force_shutdown_occured(void)
+{
+	pr_info("%s, force_shutdown_occured=%d\n",
+		__func__, force_shutdown_occured);
+	return force_shutdown_occured;
+}
+EXPORT_SYMBOL(get_force_shutdown_occured);
+#endif
+
+int emmc_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), EMMC_OSIP_BLKDEVICE) == 0)
+		return 1;
+	return 0;
+}
+int hdd_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), HDD_OSIP_BLKDEVICE) == 0)
+		return 1;
+	return 0;
+}
+static struct block_device *get_bdev(void)
+{
+	struct block_device *bdev;
+	struct device *block_disk;
+
+	block_disk = class_find_device(&block_class, NULL, NULL,
+						emmc_match);
+	if (block_disk == 0) {
+		block_disk = class_find_device(&block_class, NULL, NULL,
+						hdd_match);
+		if (block_disk == 0) {
+			pr_err("block disk not found!\n");
+			return NULL;
+		}
+	}
+	/* partition 0 means raw disk */
+	bdev = bdget_disk(dev_to_disk(block_disk), 0);
+	if (bdev == NULL) {
+		dev_err(block_disk, "unable to get disk\n");
+		return NULL;
+	}
+	/* Note: this bdev ref will be freed after first
+	   bdev_get/bdev_put cycle */
+	return bdev;
+}
+static uint8_t calc_checksum(void *_buf, int size)
+{
+	int i;
+	uint8_t checksum = 0;
+	uint8_t *buf = (uint8_t *)_buf;
+	for (i = 0; i < size; i++)
+		checksum = checksum ^ (buf[i]);
+	return checksum;
+}
+/*
+  Allows to access the osip image. Callback is passed for user to
+  implement actual usage.
+  This function takes care of the blkdev housekeeping
+
+  how to do proper block access is got from:
+  fs/partitions/check.c
+  mtd/devices/block2mtd.c
+*/
+/* callbacks returns whether the OSIP was modified */
+typedef int (*osip_callback_t)(struct OSIP_header *osip, void *data);
+
+static int access_osip_record(osip_callback_t callback, void *cb_data)
+{
+	Sector sect;
+	struct block_device *bdev;
+	char *buffer;
+	struct OSIP_header *osip;
+	struct OSIP_header *osip_backup;
+	int ret = 0;
+	int dirty = 0;
+
+	bdev = get_bdev();
+	if (bdev == NULL) {
+		pr_err("%s: get_bdev failed!\n", __func__);
+		return -ENODEV;
+	}
+	/* make sure the block device is open rw */
+	ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, NULL);
+	if (ret < 0) {
+		pr_err("%s: blk_dev_get failed!\n", __func__);
+		return -ret;
+	}
+	/* get memmap of the OSIP header */
+	buffer = read_dev_sector(bdev, 0, &sect);
+
+	if (buffer == NULL) {
+		ret = -ENODEV;
+		goto bd_put;
+	}
+	osip = (struct OSIP_header *) buffer;
+	/* some sanity checks */
+	if (osip->header_size <= 0 || osip->header_size > PAGE_SIZE) {
+		pr_err("%s: corrupted osip!\n", __func__);
+		ret = -EINVAL;
+		goto put_sector;
+	}
+	if (calc_checksum(osip, osip->header_size) != 0) {
+		pr_err("%s: corrupted osip!\n", __func__);
+		ret = -EINVAL;
+		goto put_sector;
+	}
+	/* store the OSIP backup which will be used to recover in PrOS */
+	osip_backup = kmalloc(sizeof(struct OSIP_header), GFP_KERNEL);
+	if (osip_backup == NULL)
+		goto put_sector;
+	memcpy(osip_backup, osip, sizeof(struct OSIP_header));
+
+	lock_page(sect.v);
+	dirty = callback(osip, cb_data);
+	if (dirty) {
+		memcpy(buffer + OSIP_BACKUP_OFFSET, osip_backup,
+		       sizeof(struct OSIP_header));
+		osip->header_checksum = 0;
+		osip->header_checksum = calc_checksum(osip, osip->header_size);
+		set_page_dirty(sect.v);
+	}
+	unlock_page(sect.v);
+	sync_blockdev(bdev);
+	kfree(osip_backup);
+put_sector:
+	put_dev_sector(sect);
+bd_put:
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	return 0;
+}
+
+/* find OSII index for a given OS attribute */
+
+static int get_osii_index_cb(struct OSIP_header *osip, void *data)
+{
+	int attr = *(int *)data;
+	int i;
+
+	*(int *)data = 0;
+	for (i = 0; i < osip->num_pointers; i++) {
+		if ((osip->desc[i].attribute & ~0x1) == (attr & ~0x1) && i < MAX_OSII) {
+			*(int *)data = i;
+			break;
+		}
+	}
+	return 0;
+}
+static int get_osii_index(int attribute)
+{
+	int data = attribute;
+
+	access_osip_record(get_osii_index_cb, (void *)(&data));
+	return data;
+}
+
+#ifdef CONFIG_INTEL_SCU_IPC
+/*
+   OSHOB - OS Handoff Buffer
+   OSNIB - OS No Init Buffer
+   This buffer contains the 32-byte value that is persists across cold and warm
+   resets only, but loses context on a cold boot.
+   More info about OSHOB, OSNIB could be found in FAS Section 2.8.
+   We use the first byte in OSNIB to store and pass the Reboot/boot Reason.
+   The attribute of OS image is selected for Reboot/boot reason.
+*/
+
+static int osip_invalidate(struct OSIP_header *osip, void *data)
+{
+	unsigned int id = (unsigned int)(uintptr_t)data;
+	osip->desc[id].ddr_load_address = 0;
+	osip->desc[id].entry_point = 0;
+	return 1;
+}
+
+static int osip_restore(struct OSIP_header *osip, void *data)
+{
+	unsigned int id = (unsigned int)(uintptr_t)data;
+	/* hardcoding addresses. According to the FAS, this is how
+	   the OS image blob has to be loaded, and where is the
+	   bootstub entry point.
+	*/
+	osip->desc[id].ddr_load_address = 0x1100000;
+	osip->desc[id].entry_point = 0x1101000;
+	return 1;
+
+}
+
+/* Cold off sequence is initiated 4 sec after power button long press starts.    */
+/* In case of force shutdown, we delay cold off IPC sending by 5 seconds to make */
+/* sure PMIC fault timer has a chance to elapsed after power button is held      */
+/* down for 8 seconds */
+#define FORCE_SHUTDOWN_DELAY_IN_MSEC 5000
+
+static int osip_shutdown_notifier_call(struct notifier_block *notifier,
+				     unsigned long what, void *data)
+{
+	int ret = NOTIFY_DONE;
+	char *cmd = (char *)data;
+
+	if (what == SYS_HALT || what == SYS_POWER_OFF) {
+		pr_info("%s: notified [%s] command\n", __func__, cmd);
+		pr_info("%s(): sys power off ...\n", __func__);
+
+		if (get_force_shutdown_occured()) {
+			pr_warn("[SHTDWN] %s: Force shutdown occured, delaying ...\n",
+				__func__);
+			mdelay(FORCE_SHUTDOWN_DELAY_IN_MSEC);
+		}
+		else
+			pr_warn("[SHTDWN] %s, Not in force shutdown\n",
+				__func__);
+		/*
+		* PNW and CLVP depend on watchdog driver to
+		* send COLD OFF message to SCU.
+		* TNG and ANN use COLD_OFF IPC message to shut
+		* down the system.
+		*/
+		if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+				(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			pr_err("[SHTDWN] %s, executing COLD_OFF...\n", __func__);
+			ret = rpmsg_send_generic_simple_command(RP_COLD_OFF, 0);
+			if (ret)
+				pr_err("%s(): COLD_OFF ipc failed\n", __func__);
+		}
+	}
+	/* Reboot actions will be handled by osip_reboot_target_call */
+	return NOTIFY_DONE;
+}
+
+static int osip_reboot_target_call(const char *target, int id)
+{
+	int ret_ipc;
+#ifdef DEBUG
+	u8 rbt_reason;
+#endif
+
+	pr_info("%s: notified [%s] target\n", __func__, target);
+
+	pr_warn("[REBOOT] %s, rebooting into %s\n", __func__, target);
+#ifdef DEBUG
+	if (id == SIGNED_RECOVERY_ATTR)
+		intel_scu_ipc_read_osnib_rr(&rbt_reason);
+#endif
+
+	ret_ipc = intel_scu_ipc_write_osnib_rr(id);
+	if (ret_ipc < 0)
+		pr_err("%s cannot write %s reboot reason in OSNIB\n",
+			__func__, target);
+	if (id == SIGNED_MOS_ATTR || id == SIGNED_POS_ATTR) {
+		/* If device is already in RECOVERY we must be able */
+		/* to reboot in MOS if given target is MOS or POS.  */
+		pr_warn("[REBOOT] %s, restoring OSIP\n", __func__);
+		access_osip_record(osip_restore, (void *)(uintptr_t)
+				   (get_osii_index(SIGNED_MOS_ATTR)));
+	}
+	if (id == SIGNED_RECOVERY_ATTR && ret_ipc >= 0) {
+		pr_warn("[REBOOT] %s, invalidating osip\n", __func__);
+		access_osip_record(osip_invalidate, (void *)(uintptr_t)
+				   (get_osii_index(SIGNED_MOS_ATTR)));
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block osip_shutdown_notifier = {
+	.notifier_call = osip_shutdown_notifier_call,
+};
+
+static struct reboot_target osip_reboot_target = {
+	.set_reboot_target = osip_reboot_target_call,
+};
+#endif
+
+/* useful for engineering, not for product */
+#ifdef CONFIG_INTEL_MID_OSIP_DEBUG_FS
+/* show and edit boot.bin's cmdline */
+#define OSIP_MAX_CMDLINE_SIZE 0x400
+/* number N of sectors (512bytes) needed for the cmdline, N+1 needed */
+#define OSIP_MAX_CMDLINE_SECTOR ((OSIP_MAX_CMDLINE_SIZE >> 9) + 1)
+
+/* Size used by signature is not the same for valleyview */
+#define OSIP_SIGNATURE_SIZE 		0x2D8
+#define OSIP_VALLEYVIEW_SIGNATURE_SIZE 	0x400
+
+struct cmdline_priv {
+	Sector sect[OSIP_MAX_CMDLINE_SECTOR];
+	struct block_device *bdev;
+	int lba;
+	char *cmdline;
+	unsigned int osip_id;
+	uint8_t attribute;
+};
+
+static int osip_find_cmdline(struct OSIP_header *osip, void *data)
+{
+	struct cmdline_priv *p = (struct cmdline_priv *) data;
+	if (p->osip_id < MAX_OSII) {
+		p->attribute = osip->desc[p->osip_id].attribute;
+		p->lba = osip->desc[p->osip_id].logical_start_block;
+	}
+	return 0;
+}
+int open_cmdline(struct inode *i, struct file *f)
+{
+	struct cmdline_priv *p;
+	int ret, j;
+	p = kzalloc(sizeof(struct cmdline_priv), GFP_KERNEL);
+	if (!p) {
+		pr_err("%s: unable to allocate p!\n", __func__);
+		ret = -ENOMEM;
+		goto end;
+	}
+	if (i->i_private)
+		p->osip_id = (unsigned int)(uintptr_t) i->i_private;
+	f->private_data = 0;
+	access_osip_record(osip_find_cmdline, (void *)p);
+	if (!p->lba) {
+		pr_err("%s: osip_find_cmdline failed!\n", __func__);
+		ret = -ENODEV;
+		goto free;
+	}
+	/* need to open it again */
+	p->bdev = get_bdev();
+	if (!p->bdev) {
+		pr_err("%s: access_osip_record failed!\n", __func__);
+		ret = -ENODEV;
+		goto free;
+	}
+	ret = blkdev_get(p->bdev, f->f_mode, NULL);
+	if (ret < 0) {
+		pr_err("%s: blk_dev_get failed!\n", __func__);
+		ret = -ENODEV;
+		goto free;
+	}
+	if (p->lba >= get_capacity(p->bdev->bd_disk)) {
+		pr_err("%s: %d out of disk bound!\n", __func__, p->lba);
+		ret = -EINVAL;
+		goto put;
+	}
+	for (j = (OSIP_MAX_CMDLINE_SECTOR - 1); j >= 0; j--) {
+		p->cmdline = read_dev_sector(p->bdev,
+				     p->lba + j,
+				     &p->sect[j]);
+	}
+	if (!p->cmdline) {
+		pr_err("%s: read_dev_sector failed!\n", __func__);
+		ret = -ENODEV;
+		goto put;
+	}
+	if (!(p->attribute & 1))
+		/* even number: signed add size of signature header. */
+#ifdef CONFIG_INTEL_SCU_IPC
+		p->cmdline += OSIP_SIGNATURE_SIZE;
+#else
+		p->cmdline += OSIP_VALLEYVIEW_SIGNATURE_SIZE;
+#endif
+
+	f->private_data = p;
+	return 0;
+put:
+	blkdev_put(p->bdev, f->f_mode);
+free:
+	kfree(p);
+end:
+	return ret;
+}
+
+static ssize_t read_cmdline(struct file *file, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	struct cmdline_priv *p =
+		(struct cmdline_priv *)file->private_data;
+	if (!p)
+		return -ENODEV;
+	return simple_read_from_buffer(buf, count, ppos,
+			p->cmdline, strnlen(p->cmdline, OSIP_MAX_CMDLINE_SIZE));
+}
+
+static ssize_t write_cmdline(struct file *file, const char __user *buf,
+			     size_t count, loff_t *ppos)
+{
+	int ret, i;
+	struct cmdline_priv *p;
+
+	if (!file)
+		return -ENODEV;
+
+	p = (struct cmdline_priv *)file->private_data;
+	if (!p)
+		return -ENODEV;
+	/* @todo detect if image is signed, and prevent write */
+	lock_page(p->sect[0].v);
+	for (i = 1; i < OSIP_MAX_CMDLINE_SECTOR; i++) {
+		if (p->sect[i-1].v != p->sect[i].v)
+			lock_page(p->sect[i].v);
+	}
+	ret = simple_write_to_buffer(p->cmdline, OSIP_MAX_CMDLINE_SIZE-1,
+				     ppos,
+				     buf, count);
+	if (ret < 0)
+		goto unlock;
+	/* make sure we zero terminate the cmdline */
+	if (file->f_pos + count < OSIP_MAX_CMDLINE_SIZE)
+		p->cmdline[file->f_pos + count] = '\0';
+
+unlock:
+	set_page_dirty(p->sect[0].v);
+	unlock_page(p->sect[0].v);
+	for (i = 1; i < OSIP_MAX_CMDLINE_SECTOR; i++) {
+		if (p->sect[i-1].v != p->sect[i].v) {
+			set_page_dirty(p->sect[i].v);
+			unlock_page(p->sect[i].v);
+		}
+	}
+	return ret;
+}
+int release_cmdline(struct inode *i, struct file *f)
+{
+	int j;
+	struct cmdline_priv *p =
+		(struct cmdline_priv *)f->private_data;
+	if (!p)
+		return -ENOMEM;
+	put_dev_sector(p->sect[0]);
+	for (j = 1; j < OSIP_MAX_CMDLINE_SECTOR; j++)
+		if (p->sect[j-1].v != p->sect[j].v)
+			put_dev_sector(p->sect[j]);
+	blkdev_put(p->bdev, f->f_mode);
+	kfree(p);
+	return 0;
+}
+int fsync_cmdline(struct file *f, loff_t start, loff_t end, int datasync)
+{
+	struct cmdline_priv *p =
+		(struct cmdline_priv *)f->private_data;
+	if (!p)
+		return -ENOMEM;
+	sync_blockdev(p->bdev);
+	return 0;
+}
+static const struct file_operations osip_cmdline_fops = {
+	.open =         open_cmdline,
+	.read =         read_cmdline,
+	.write =        write_cmdline,
+	.release =      release_cmdline,
+	.fsync =        fsync_cmdline
+};
+
+/* decode the osip */
+
+static int decode_show_cb(struct OSIP_header *osip, void *data)
+{
+	struct seq_file *s = (struct seq_file *) data;
+	int i;
+
+	seq_printf(s, "HEADER:\n"
+		   "\tsig              = 0x%x\n"
+		   "\theader_size      = 0x%hx\n"
+		   "\theader_rev_minor = 0x%hhx\n"
+		   "\theader_rev_major = 0x%hhx\n"
+		   "\theader_checksum  = 0x%hhx\n"
+		   "\tnum_pointers     = 0x%hhx\n"
+		   "\tnum_images       = 0x%hhx\n",
+		   osip->sig,
+		   osip->header_size,
+		   osip->header_rev_minor,
+		   osip->header_rev_major,
+		   osip->header_checksum,
+		   osip->num_pointers,
+		   osip->num_images);
+
+	for (i = 0; i < osip->num_pointers; i++)
+		seq_printf(s, "image%d\n"
+			   "\tos_rev              =  0x%0hx\n"
+			   "\tos_rev              = 0x%hx\n"
+			   "\tlogical_start_block = 0x%x\n"
+			   "\tddr_load_address    = 0x%0x\n"
+			   "\tentry_point         = 0x%0x\n"
+			   "\tsize_of_os_image    = 0x%x\n"
+			   "\tattribute           = 0x%02x\n"
+			   "\treserved            = %02x%02x%02x\n",
+			   i,
+			   osip->desc[i].os_rev_minor,
+			   osip->desc[i].os_rev_major,
+			   osip->desc[i].logical_start_block,
+			   osip->desc[i].ddr_load_address,
+			   osip->desc[i].entry_point,
+			   osip->desc[i].size_of_os_image,
+			   osip->desc[i].attribute,
+			   osip->desc[i].reserved[0],
+			   osip->desc[i].reserved[1],
+			   osip->desc[i].reserved[2]);
+	return 0;
+}
+static int decode_show(struct seq_file *s, void *unused)
+{
+	access_osip_record(decode_show_cb, (void *)s);
+	return 0;
+}
+static int decode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, decode_show, NULL);
+}
+
+static const struct file_operations osip_decode_fops = {
+	.open           = decode_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static struct dentry *osip_dir;
+static void create_debugfs_files(void)
+{
+	/* /sys/kernel/debug/osip */
+	osip_dir = debugfs_create_dir("osip", NULL);
+	/* /sys/kernel/debug/osip/cmdline */
+	(void) debugfs_create_file("cmdline",
+				   S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				   osip_dir, (void *)(uintptr_t)
+				   (get_osii_index(SIGNED_MOS_ATTR)),
+				   &osip_cmdline_fops);
+	/* /sys/kernel/debug/osip/cmdline_ros */
+	(void) debugfs_create_file("cmdline_ros",
+				S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				   osip_dir, (void *)(uintptr_t)
+				   (get_osii_index(SIGNED_RECOVERY_ATTR)),
+				   &osip_cmdline_fops);
+	/* /sys/kernel/debug/osip/cmdline_pos */
+	(void) debugfs_create_file("cmdline_pos",
+				   S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				   osip_dir, (void *)(uintptr_t)
+				   (get_osii_index(SIGNED_POS_ATTR)),
+				   &osip_cmdline_fops);
+	/* /sys/kernel/debug/osip/decode */
+	(void) debugfs_create_file("decode",
+				   S_IFREG | S_IRUGO,
+				   osip_dir, NULL, &osip_decode_fops);
+}
+static void remove_debugfs_files(void)
+{
+	debugfs_remove_recursive(osip_dir);
+}
+#else /* defined(CONFIG_INTEL_MID_OSIP_DEBUG_FS) */
+static void create_debugfs_files(void)
+{
+}
+static void remove_debugfs_files(void)
+{
+}
+#endif
+
+static int osip_init(void)
+{
+#ifdef CONFIG_INTEL_SCU_IPC
+	pr_info("%s: shutdown_notifier registered\n", __func__);
+	if (register_reboot_notifier(&osip_shutdown_notifier))
+		pr_warning("osip: unable to register shutdown notifier");
+	pr_info("%s: reboot_target registered\n", __func__);
+	if (reboot_target_register(&osip_reboot_target))
+		pr_warning("osip: unable to register reboot notifier");
+
+#endif
+	create_debugfs_files();
+	return 0;
+}
+
+static void osip_exit(void)
+{
+#ifdef CONFIG_INTEL_SCU_IPC
+	pr_info("%s: shutdown_notifier unregistered\n", __func__);
+	unregister_reboot_notifier(&osip_shutdown_notifier);
+	pr_info("%s: reboot_target unregistered\n", __func__);
+	reboot_target_unregister(&osip_reboot_target);
+
+#endif
+	remove_debugfs_files();
+}
+
+#ifdef CONFIG_INTEL_SCU_IPC
+static int osip_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		return -ENODEV;
+	}
+
+	dev_info(&rpdev->dev, "Probed OSIP rpmsg device\n");
+
+	return osip_init();
+}
+
+static void osip_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+				int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static void osip_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	osip_exit();
+	dev_info(&rpdev->dev, "Removed OSIP rpmsg device\n");
+}
+
+static struct rpmsg_device_id osip_rpmsg_id_table[] = {
+	{ .name = "rpmsg_osip" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, osip_rpmsg_id_table);
+
+static struct rpmsg_driver osip_rpmsg_driver = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= osip_rpmsg_id_table,
+	.probe		= osip_rpmsg_probe,
+	.callback	= osip_rpmsg_cb,
+	.remove		= osip_rpmsg_remove,
+};
+
+static int __init osip_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&osip_rpmsg_driver);
+}
+module_init(osip_rpmsg_init);
+
+static void __exit osip_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&osip_rpmsg_driver);
+}
+module_exit(osip_rpmsg_exit);
+
+#else /* ! defined(CONFIG_INTEL_SCU_IPC) */
+
+static int __init osip_probe(struct platform_device *dev)
+{
+	return osip_init();
+}
+
+static int osip_remove(struct platform_device *dev)
+{
+	osip_exit();
+	return 0;
+}
+
+static struct platform_driver osip_driver = {
+	.remove         = osip_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = KBUILD_MODNAME,
+	},
+};
+
+static int __init osip_init_module(void)
+{
+	int err=0;
+
+	pr_info("Intel OSIP Driver v%s\n", DRV_VERSION);
+
+        platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+	err =  platform_driver_probe(&osip_driver,osip_probe);
+
+	return err;
+}
+
+static void __exit osip_cleanup_module(void)
+{
+	platform_driver_unregister(&osip_driver);
+	pr_info("OSIP Module Unloaded\n");
+}
+module_init(osip_init_module);
+module_exit(osip_cleanup_module);
+#endif
+
+MODULE_AUTHOR("Pierre Tardy <pierre.tardy@intel.com>");
+MODULE_AUTHOR("Xiaokang Qin <xiaokang.qin@intel.com>");
+MODULE_DESCRIPTION("Intel Medfield OSIP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/platform/x86/intel_msic_gpio.c b/drivers/external_drivers/drivers/platform/x86/intel_msic_gpio.c
new file mode 100644
index 0000000..ccdd8bb
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/intel_msic_gpio.c
@@ -0,0 +1,241 @@
+/* MSIC GPIO (access through IPC) driver for Cloverview
+ * (C) Copyright 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/rpmsg.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_scu_pmic.h>
+
+#define DRIVER_NAME "msic_gpio"
+
+#define CTLO_DOUT_MASK		(1 << 0)
+#define CTLO_DOUT_H		(1 << 0)
+#define CTLO_DOUT_L		(0 << 0)
+#define CTLO_DIR_MASK		(1 << 5)
+#define CTLO_DIR_O		(1 << 5)
+#define CTLO_DIR_I		(0 << 5)
+#define CTLO_OUT_DEF		(0x38)
+#define CTLO_IN_DEF		(0x18)
+
+#define CTL_VALUE_MASK		(1 << 0)
+
+struct msic_gpio {
+	struct gpio_chip chip;
+	int ngpio_lv; /* number of low voltage gpio */
+	u16 gpio0_lv_ctlo;
+	u16 gpio0_lv_ctli;
+	u16 gpio0_hv_ctlo;
+	u16 gpio0_hv_ctli;
+};
+
+static struct msic_gpio msic_gpio;
+
+static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	struct msic_gpio *mg = &msic_gpio;
+
+	u16 ctlo = offset < mg->ngpio_lv ? mg->gpio0_lv_ctlo + offset
+			: mg->gpio0_hv_ctlo + (offset - mg->ngpio_lv);
+
+	return intel_scu_ipc_iowrite8(ctlo, CTLO_IN_DEF);
+}
+
+static int msic_gpio_direction_output(struct gpio_chip *chip,
+			unsigned offset, int value)
+{
+	struct msic_gpio *mg = &msic_gpio;
+
+	u16 ctlo = offset < mg->ngpio_lv ? mg->gpio0_lv_ctlo + offset
+			: mg->gpio0_hv_ctlo + (offset - mg->ngpio_lv);
+
+	return intel_scu_ipc_iowrite8(ctlo,
+			CTLO_OUT_DEF | (value ? CTLO_DOUT_H : CTLO_DOUT_L));
+}
+
+static int msic_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct msic_gpio *mg = &msic_gpio;
+	u8 value;
+	int ret;
+	u16 ctlo, ctli, reg;
+
+	ctlo = offset < mg->ngpio_lv ? mg->gpio0_lv_ctlo + offset
+			: mg->gpio0_hv_ctlo + (offset - mg->ngpio_lv);
+	ctli = offset < mg->ngpio_lv ? mg->gpio0_lv_ctli + offset
+			: mg->gpio0_hv_ctli + (offset - mg->ngpio_lv);
+
+	/* First get pin direction */
+	ret = intel_scu_ipc_ioread8(ctlo, &value);
+	if (ret)
+		return -EIO;
+
+	/* The pin values for output and input direction
+	 * are stored in different registers.
+	 */
+	reg = (value & CTLO_DIR_O) ? ctlo : ctli;
+
+	ret = intel_scu_ipc_ioread8(reg, &value);
+	if (ret)
+		return -EIO;
+
+	return value & CTL_VALUE_MASK;
+}
+
+static void msic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct msic_gpio *mg = &msic_gpio;
+
+	u16 ctlo = offset < mg->ngpio_lv ? mg->gpio0_lv_ctlo + offset
+			: mg->gpio0_hv_ctlo + (offset - mg->ngpio_lv);
+
+	intel_scu_ipc_update_register(ctlo,
+			value ? CTLO_DOUT_H : CTLO_DOUT_L, CTLO_DOUT_MASK);
+}
+
+static int msic_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct intel_msic_gpio_pdata *pdata = dev->platform_data;
+	struct msic_gpio *mg = &msic_gpio;
+	int retval;
+
+	dev_dbg(dev, "base %d\n", pdata->gpio_base);
+
+	if (!pdata || !pdata->gpio_base) {
+		dev_err(dev, "incorrect or missing platform data\n");
+		return -ENOMEM;
+	}
+
+	dev_set_drvdata(dev, mg);
+
+	mg->ngpio_lv = pdata->ngpio_lv;
+	mg->gpio0_lv_ctlo = pdata->gpio0_lv_ctlo;
+	mg->gpio0_lv_ctli = pdata->gpio0_lv_ctli;
+	mg->gpio0_hv_ctlo = pdata->gpio0_hv_ctlo;
+	mg->gpio0_hv_ctli = pdata->gpio0_hv_ctli;
+	mg->chip.label = dev_name(&pdev->dev);
+	mg->chip.direction_input = msic_gpio_direction_input;
+	mg->chip.direction_output = msic_gpio_direction_output;
+	mg->chip.get = msic_gpio_get;
+	mg->chip.set = msic_gpio_set;
+	mg->chip.base = pdata->gpio_base;
+	mg->chip.ngpio = pdata->ngpio_lv + pdata->ngpio_hv;
+	mg->chip.can_sleep = pdata->can_sleep;
+	mg->chip.dev = dev;
+
+	retval = gpiochip_add(&mg->chip);
+	if (retval)
+		dev_err(dev, "%s: Can not add msic gpio chip.\n", __func__);
+
+	return retval;
+}
+
+static int msic_gpio_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	dev_set_drvdata(dev, NULL);
+	return 0;
+}
+
+static struct platform_driver msic_gpio_driver = {
+	.driver = {
+		.name		= DRIVER_NAME,
+		.owner		= THIS_MODULE,
+	},
+	.probe		= msic_gpio_probe,
+	.remove		= msic_gpio_remove,
+};
+
+static int msic_gpio_init(void)
+{
+	return platform_driver_register(&msic_gpio_driver);
+}
+
+static void msic_gpio_exit(void)
+{
+	return platform_driver_unregister(&msic_gpio_driver);
+}
+
+static int msic_gpio_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("msic_gpio rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed msic_gpio rpmsg device\n");
+
+	ret = msic_gpio_init();
+
+out:
+	return ret;
+}
+
+static void msic_gpio_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	msic_gpio_exit();
+	dev_info(&rpdev->dev, "Removed msic_gpio rpmsg device\n");
+}
+
+static void msic_gpio_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id msic_gpio_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_msic_gpio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, msic_gpio_rpmsg_id_table);
+
+static struct rpmsg_driver msic_gpio_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= msic_gpio_rpmsg_id_table,
+	.probe		= msic_gpio_rpmsg_probe,
+	.callback	= msic_gpio_rpmsg_cb,
+	.remove		= msic_gpio_rpmsg_remove,
+};
+
+static int __init msic_gpio_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&msic_gpio_rpmsg);
+}
+fs_initcall(msic_gpio_rpmsg_init);
+
+static void __exit msic_gpio_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&msic_gpio_rpmsg);
+}
+module_exit(msic_gpio_rpmsg_exit);
+
+MODULE_AUTHOR("Bin Yang <bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel MSIC GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/platform/x86/intel_psh_ipc.c b/drivers/external_drivers/drivers/platform/x86/intel_psh_ipc.c
new file mode 100644
index 0000000..211d55f
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/intel_psh_ipc.c
@@ -0,0 +1,672 @@
+/*
+ * intel_psh_ipc.c: Driver for the Intel PSH IPC mechanism
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Yang Bin (bin.yang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_psh_ipc.h>
+#include <asm/intel-mid.h>
+#include <linux/fs.h>
+#include <linux/intel_mid_pm.h>
+
+#define PSH_ERR(fmt, arg...)	dev_err(&ipc_ctrl.pdev->dev, fmt, ##arg)
+#define PSH_DBG(fmt, arg...)	dev_dbg(&ipc_ctrl.pdev->dev, fmt, ##arg)
+
+#define STATUS_PSH2IA(x)	(1 << ((x) + 6))
+#define FLAG_BIND		(1 << 0)
+
+#define IS_A_STEP		(ipc_ctrl.reg_map == 0)
+
+#define PIMR_A_STEP(x)		(ipc_ctrl.psh_regs->psh_regs_a_step.pimr##x)
+#define PIMR_B_STEP(x)		(ipc_ctrl.psh_regs->psh_regs_b_step.pimr##x)
+
+#define PIMR_ADDR(x)		((ipc_ctrl.reg_map & 1) ?	\
+				&PIMR_B_STEP(x) : &PIMR_A_STEP(x))
+
+#define PSH_REG_A_STEP(x)	(ipc_ctrl.psh_regs->psh_regs_a_step.x)
+#define PSH_REG_B_STEP(x)	(ipc_ctrl.psh_regs->psh_regs_b_step.x)
+
+#define PSH_REG_ADDR(x)		((ipc_ctrl.reg_map & 1) ?	\
+				&PSH_REG_B_STEP(x) : &PSH_REG_A_STEP(x))
+
+#define PSH_CH_HANDLE(x)	(ipc_ctrl.channel_handle[x])
+#define PSH_CH_DATA(x)		(ipc_ctrl.channel_data[x])
+#define PSH_CH_FLAG(x)		(ipc_ctrl.flags[x])
+
+/* PSH registers */
+union psh_registers {
+	/* reg mem map A */
+	struct {
+		u32		csr;	/* 00h */
+		u32		res1;	/* padding */
+		u32		pisr;	/* 08h */
+		u32		pimr0;	/* 0Ch */
+		u32		pimr1;	/* 10h */
+		u32		pimr2;	/* 14h */
+		u32		pimr3;	/* 18h */
+		u32		pmctl;	/* 1Ch */
+		u32		pmstat;	/* 20h */
+		u32		res2;	/* padding */
+		struct psh_msg	ia2psh[NUM_IA2PSH_IPC];/* 28h ~ 44h + 3 */
+		struct psh_msg	cry2psh;/* 48h ~ 4Ch + 3 */
+		struct psh_msg	scu2psh;/* 50h ~ 54h + 3 */
+		u32		res3[2];/* padding */
+		struct psh_msg	psh2ia[NUM_PSH2IA_IPC];/* 60h ~ 7Ch + 3 */
+		struct psh_msg	psh2cry;/* 80h ~ 84h + 3 */
+		struct psh_msg  psh2scu;/* 88h */
+		u32		msi_dir;/* 90h */
+		u32		res4[3];
+		u32		scratchpad[2];/* A0 */
+	} __packed psh_regs_a_step;
+	/* reg mem map B */
+	struct {
+		u32		pimr0;		/* 00h */
+		u32		csr;		/* 04h */
+		u32		pmctl;		/* 08h */
+		u32		pmstat;		/* 0Ch */
+		u32		psh_msi_direct;	/* 10h */
+		u32		res1[59];	/* 14h ~ FCh + 3, padding */
+		u32		pimr3;		/* 100h */
+		struct psh_msg	scu2psh;	/* 104h ~ 108h + 3 */
+		struct psh_msg	psh2scu;	/* 10Ch ~ 110h + 3 */
+		u32		res2[187];	/* 114h ~ 3FCh + 3, padding */
+		u32		pisr;		/* 400h */
+		u32		scratchpad[2];	/* 404h ~ 407h */
+		u32		res3[61];	/* 40Ch ~ 4FCh + 3, padding */
+		u32		pimr1;		/* 500h */
+		struct psh_msg	ia2psh[NUM_IA2PSH_IPC];	/* 504h ~ 520h + 3 */
+		struct psh_msg	psh2ia[NUM_PSH2IA_IPC];	/* 524h ~ 540h + 3 */
+		u32		res4[175];	/* 544h ~ 7FCh + 3, padding */
+		u32		pimr2;		/* 800h */
+		struct psh_msg	cry2psh;	/* 804h ~ 808h + 3 */
+		struct psh_msg	psh2cry;	/* 80Ch ~ 810h + 3 */
+	} __packed psh_regs_b_step;
+} __packed;
+
+static struct ipc_controller_t {
+	int			reg_map;
+	int			initialized;
+	struct pci_dev		*pdev;
+	spinlock_t		lock;
+	int			flags[NUM_ALL_CH];
+	union psh_registers	*psh_regs;
+	struct semaphore	ch_lock[NUM_ALL_CH];
+	struct mutex		psh_mutex;
+	psh_channel_handle_t	channel_handle[NUM_PSH2IA_IPC];
+	void			*channel_data[NUM_PSH2IA_IPC];
+} ipc_ctrl;
+
+
+/**
+ * intel_ia2psh_command - send IA to PSH command
+ * Send ia2psh command and return psh message and status
+ *
+ * @in: input psh message
+ * @out: output psh message
+ * @ch: psh channel
+ * @timeout: timeout for polling busy bit, in us
+ */
+int intel_ia2psh_command(struct psh_msg *in, struct psh_msg *out,
+			 int ch, int timeout)
+{
+	int ret = 0;
+	u32 status;
+
+	might_sleep();
+
+	if (!ipc_ctrl.initialized)
+		return -ENODEV;
+
+	if (ch < PSH_SEND_CH0 || ch > PSH_SEND_CH0 + NUM_IA2PSH_IPC - 1
+		|| in == NULL)
+		return -EINVAL;
+
+	if (in->msg & CHANNEL_BUSY)
+		return -EINVAL;
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	down(&ipc_ctrl.ch_lock[ch]);
+
+	in->msg |= CHANNEL_BUSY;
+	/* Check if channel is ready for IA sending command */
+
+	if (IS_A_STEP) {
+		/* PSH_CSR_WORKAROUND */
+		int tm = 10000;
+
+		/* wait either D0i0 got ack'ed by PSH, or scratchpad set */
+		usleep_range(1000, 2000);
+		while (readl(PSH_REG_ADDR(scratchpad[0])) && --tm)
+			usleep_range(100, 101);
+		if (!tm)
+			PSH_ERR("psh wait for scratchpad timeout\n");
+
+		tm = 10000;
+		while ((readl(PSH_REG_ADDR(ia2psh[ch].msg)) & CHANNEL_BUSY)
+				&& --tm)
+			usleep_range(100, 101);
+		if (!tm) {
+			PSH_ERR("psh ch[%d] wait for busy timeout\n", ch);
+			ret = -EBUSY;
+			goto end;
+		}
+	} else {
+		if (readl(PSH_REG_ADDR(ia2psh[ch].msg)) & CHANNEL_BUSY) {
+			ret = -EBUSY;
+			goto end;
+		}
+	}
+
+	writel(in->param, PSH_REG_ADDR(ia2psh[ch].param));
+	writel(in->msg, PSH_REG_ADDR(ia2psh[ch].msg));
+
+	/* Input timeout is zero, do not check channel status */
+	if (timeout == 0)
+		goto end;
+
+	/* Input timeout is nonzero, check channel status */
+	while (((status = readl(PSH_REG_ADDR(ia2psh[ch].msg))) & CHANNEL_BUSY)
+		&& timeout) {
+		usleep_range(100, 101);
+		timeout -= 100;
+	}
+
+	if (timeout <= 0) {
+		ret = -ETIMEDOUT;
+		PSH_ERR("ia2psh channel %d is always busy!\n", ch);
+		goto end;
+	} else {
+		if (out == NULL)
+			goto end;
+
+		out->param = readl(PSH_REG_ADDR(ia2psh[ch].param));
+		out->msg = status;
+	}
+
+end:
+	up(&ipc_ctrl.ch_lock[ch]);
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_ia2psh_command);
+
+/**
+ * intel_psh_ipc_bind - bind a handler to a psh channel
+ *
+ * @ch: psh channel
+ * @handle: handle function called when IA received psh interrupt
+ * @data: data passed to handle
+ */
+int intel_psh_ipc_bind(int ch, psh_channel_handle_t handle, void *data)
+{
+	unsigned long flags;
+
+	if (!ipc_ctrl.initialized)
+		return -ENODEV;
+
+	if (!handle || ch < PSH_RECV_CH0
+			|| ch > PSH_RECV_CH0 + NUM_PSH2IA_IPC - 1)
+		return -EINVAL;
+
+	mutex_lock(&ipc_ctrl.psh_mutex);
+	down(&ipc_ctrl.ch_lock[ch]);
+	if (PSH_CH_HANDLE(ch - PSH_RECV_CH0) != NULL) {
+		up(&ipc_ctrl.ch_lock[ch]);
+		mutex_unlock(&ipc_ctrl.psh_mutex);
+		return -EBUSY;
+	} else {
+		PSH_CH_DATA(ch - PSH_RECV_CH0) = data;
+		PSH_CH_HANDLE(ch - PSH_RECV_CH0) = handle;
+	}
+	up(&ipc_ctrl.ch_lock[ch]);
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	spin_lock_irqsave(&ipc_ctrl.lock, flags);
+	PSH_CH_FLAG(ch) |= FLAG_BIND;
+	writel(readl(PIMR_ADDR(1)) | (1 << (ch - PSH_RECV_CH0)), PIMR_ADDR(1));
+	spin_unlock_irqrestore(&ipc_ctrl.lock, flags);
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+	mutex_unlock(&ipc_ctrl.psh_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(intel_psh_ipc_bind);
+
+/**
+ * intel_psh_ipc_unbind - unbind a handler to a psh channel
+ *
+ * @ch: psh channel
+ */
+void intel_psh_ipc_unbind(int ch)
+{
+	unsigned long flags;
+
+	if (!ipc_ctrl.initialized)
+		return;
+
+	if (ch < PSH_RECV_CH0 || ch > PSH_RECV_CH0 + NUM_PSH2IA_IPC - 1)
+		return;
+
+	if (!(PSH_CH_FLAG(ch) & FLAG_BIND))
+		return;
+
+	mutex_lock(&ipc_ctrl.psh_mutex);
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	spin_lock_irqsave(&ipc_ctrl.lock, flags);
+	PSH_CH_FLAG(ch) &= ~FLAG_BIND;
+	writel(readl(PIMR_ADDR(1)) & (~(1 << (ch - PSH_RECV_CH0))),
+						PIMR_ADDR(1));
+	spin_unlock_irqrestore(&ipc_ctrl.lock, flags);
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+
+	down(&ipc_ctrl.ch_lock[ch]);
+	PSH_CH_HANDLE(ch - PSH_RECV_CH0) = NULL;
+	up(&ipc_ctrl.ch_lock[ch]);
+	mutex_unlock(&ipc_ctrl.psh_mutex);
+}
+EXPORT_SYMBOL(intel_psh_ipc_unbind);
+
+void intel_psh_ipc_disable_irq(void)
+{
+	disable_irq(ipc_ctrl.pdev->irq);
+}
+EXPORT_SYMBOL(intel_psh_ipc_disable_irq);
+
+void intel_psh_ipc_enable_irq(void)
+{
+	enable_irq(ipc_ctrl.pdev->irq);
+}
+EXPORT_SYMBOL(intel_psh_ipc_enable_irq);
+
+static void psh_recv_handle(int i)
+{
+	int msg, param;
+
+	down(&ipc_ctrl.ch_lock[i + PSH_RECV_CH0]);
+
+	msg = readl(PSH_REG_ADDR(psh2ia[i].msg)) & (~CHANNEL_BUSY);
+	param = readl(PSH_REG_ADDR(psh2ia[i].param));
+
+	if (PSH_CH_HANDLE(i) == NULL) {
+		PSH_ERR("Ignore message from channel %d\n", i+PSH_RECV_CH0);
+		goto end;
+	}
+
+	/* write back to clear the busy bit */
+	writel(msg, PSH_REG_ADDR(psh2ia[i].msg));
+	PSH_CH_HANDLE(i)(msg, param, PSH_CH_DATA(i));
+end:
+	up(&ipc_ctrl.ch_lock[i+PSH_RECV_CH0]);
+}
+
+static irqreturn_t psh_ipc_irq(int irq, void *data)
+{
+	int i;
+	u32 status;
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	status = readl(PSH_REG_ADDR(pisr));
+
+	for (i = 0; i < NUM_PSH2IA_IPC; i++) {
+		if (status & STATUS_PSH2IA(i))
+			psh_recv_handle(i);
+	}
+
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+	return IRQ_HANDLED;
+}
+
+static void psh_regs_dump(void)
+{
+	int i;
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	PSH_ERR("\n<-------------start------------>\n");
+
+	PSH_ERR("csr:\t%#x\n", readl(PSH_REG_ADDR(csr)));
+	PSH_ERR("pisr:\t%#x\n", readl(PSH_REG_ADDR(pisr)));
+
+	PSH_ERR("pimr0:\t%#x\n", readl(PIMR_ADDR(0)));
+	PSH_ERR("pimr1:\t%#x\n", readl(PIMR_ADDR(1)));
+	PSH_ERR("pimr2:\t%#x\n", readl(PIMR_ADDR(2)));
+	PSH_ERR("pimr3:\t%#x\n", readl(PIMR_ADDR(3)));
+
+	PSH_ERR("pmctl:\t%#x\n", readl(PSH_REG_ADDR(pmctl)));
+	PSH_ERR("pmstat:\t%#x\n", readl(PSH_REG_ADDR(pmstat)));
+	PSH_ERR("scratchpad0:\t%#x\n", readl(PSH_REG_ADDR(scratchpad[0])));
+	PSH_ERR("scratchpad1:\t%#x\n", readl(PSH_REG_ADDR(scratchpad[1])));
+
+	for (i = 0; i < NUM_IA2PSH_IPC; i++) {
+		PSH_ERR("ia2psh[%d].msg:\t%#x\n", i,
+				readl(PSH_REG_ADDR(ia2psh[i].msg)));
+		PSH_ERR("ia2psh[%d].param:\t%#x\n", i,
+				readl(PSH_REG_ADDR(ia2psh[i].param)));
+	}
+
+	PSH_ERR("cry2psh.msg:\t%#x\n", readl(PSH_REG_ADDR(cry2psh.msg)));
+	PSH_ERR("cry2psh.param:\t%#x\n", readl(PSH_REG_ADDR(cry2psh.param)));
+	PSH_ERR("scu2psh.msg:\t%#x\n", readl(PSH_REG_ADDR(scu2psh.msg)));
+	PSH_ERR("scu2psh.param:\t%#x\n", readl(PSH_REG_ADDR(scu2psh.param)));
+
+	for (i = 0; i < NUM_PSH2IA_IPC; i++) {
+		PSH_ERR("psh2ia[%d].msg:\t%#x\n", i,
+				readl(PSH_REG_ADDR(psh2ia[i].msg)));
+		PSH_ERR("psh2ia[%d].param:\t%#x\n", i,
+				readl(PSH_REG_ADDR(psh2ia[i].param)));
+	}
+
+	PSH_ERR("psh2cry.msg:\t%#x\n", readl(PSH_REG_ADDR(psh2cry.msg)));
+	PSH_ERR("psh2cry.param:\t%#x\n", readl(PSH_REG_ADDR(psh2cry.param)));
+
+	PSH_ERR("\n<-------------end------------>\n");
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+}
+
+static struct psh_msg psh_dbg_msg;
+static int psh_ch;
+
+static ssize_t psh_msg_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE,
+			"\nLast ia2psh command with msg: %#x\nparam: %#x\n",
+			psh_dbg_msg.msg, psh_dbg_msg.param);
+}
+
+static ssize_t psh_msg_store(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t size)
+{
+	int ret;
+	u32 msg, param;
+
+	memset(&psh_dbg_msg, 0, sizeof(psh_dbg_msg));
+
+	ret = sscanf(buf, "%x %x", &msg, &param);
+	if (ret != 2) {
+		PSH_ERR("Input two arguments as psh msg and param\n");
+		return -EINVAL;
+	}
+
+	psh_dbg_msg.msg = msg;
+	psh_dbg_msg.param = param;
+
+	return size;
+}
+
+static ssize_t psh_ch_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE,
+			"\nLast psh channel: %d\n", psh_ch);
+}
+
+static ssize_t psh_ch_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t size)
+{
+	int ret;
+
+	ret = sscanf(buf, "%d", &psh_ch);
+	if (ret != 1) {
+		PSH_ERR("Input one argument as psh channel\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t psh_send_cmd_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t size)
+{
+	int psh_dbg_err;
+	struct psh_msg out_msg;
+
+	memset(&out_msg, 0, sizeof(out_msg));
+
+	psh_dbg_err = intel_ia2psh_command(&psh_dbg_msg, &out_msg,
+					psh_ch, 3000000);
+	if (psh_dbg_err) {
+		PSH_ERR("Send ia2psh command failed, err %d\n", psh_dbg_err);
+		psh_regs_dump();
+		return psh_dbg_err;
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(psh_msg, S_IRUSR | S_IWUSR, psh_msg_show, psh_msg_store);
+static DEVICE_ATTR(psh_ch, S_IRUSR | S_IWUSR, psh_ch_show, psh_ch_store);
+static DEVICE_ATTR(ia2psh_cmd, S_IWUSR, NULL, psh_send_cmd_store);
+
+static struct attribute *psh_attrs[] = {
+	&dev_attr_psh_msg.attr,
+	&dev_attr_psh_ch.attr,
+	&dev_attr_ia2psh_cmd.attr,
+	NULL,
+};
+
+static struct attribute_group psh_attr_group = {
+	.name = "psh_debug",
+	.attrs = psh_attrs,
+};
+
+static int intel_psh_debug_sysfs_create(struct pci_dev *pdev)
+{
+	return sysfs_create_group(&pdev->dev.kobj, &psh_attr_group);
+}
+
+static void pmic_sysfs_remove(struct pci_dev *pdev)
+{
+	sysfs_remove_group(&pdev->dev.kobj, &psh_attr_group);
+}
+
+#ifdef CONFIG_PM
+static int psh_ipc_suspend_noirq(struct device *dev)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < NUM_ALL_CH; i++) {
+		if (down_trylock(&ipc_ctrl.ch_lock[i])) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+
+	if (ret) {
+		for (; i > 0; i--)
+			up(&ipc_ctrl.ch_lock[i - 1]);
+	}
+
+	return ret;
+}
+
+static int psh_ipc_resume_noirq(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < NUM_ALL_CH; i++)
+		up(&ipc_ctrl.ch_lock[i]);
+
+	return 0;
+}
+
+#else
+
+#define psh_ipc_suspend_noirq	NULL
+#define psh_ipc_resume_noirq	NULL
+
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int psh_ipc_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "runtime suspend called\n");
+	return 0;
+}
+
+static int psh_ipc_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "runtime resume called\n");
+	return 0;
+}
+
+#else
+
+#define psh_ipc_runtime_suspend	NULL
+#define psh_ipc_runtime_resume	NULL
+
+#endif
+
+static int psh_ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int i, ret;
+	unsigned long start, len;
+
+	ipc_ctrl.pdev = pci_dev_get(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto err1;
+
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!start || !len) {
+		ret = -ENODEV;
+		goto err1;
+	}
+
+	ret = pci_request_regions(pdev, "intel_psh_ipc");
+	if (ret)
+		goto err1;
+
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		if (intel_mid_soc_stepping() == 0)
+			ipc_ctrl.reg_map = 0;
+		else
+			ipc_ctrl.reg_map = 1;
+		break;
+	case INTEL_MID_CPU_CHIP_ANNIEDALE:
+		ipc_ctrl.reg_map = 1;
+		break;
+	default:
+		dev_err(&pdev->dev, "error register map\n");
+		ret = -EINVAL;
+		goto err2;
+		break;
+	}
+
+	ipc_ctrl.psh_regs = (union psh_registers *)ioremap_nocache(start, len);
+	if (!ipc_ctrl.psh_regs) {
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	ret = request_threaded_irq(pdev->irq, NULL, psh_ipc_irq, IRQF_ONESHOT,
+			"intel_psh_ipc", NULL);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register irq %d\n", pdev->irq);
+		goto err3;
+	}
+
+	irq_set_irq_wake(pdev->irq, 1);
+
+	spin_lock_init(&ipc_ctrl.lock);
+	mutex_init(&ipc_ctrl.psh_mutex);
+
+	for (i = 0; i < NUM_ALL_CH; i++)
+		sema_init(&ipc_ctrl.ch_lock[i], 1);
+
+	intel_psh_devices_create();
+
+	intel_psh_debug_sysfs_create(pdev);
+
+	ipc_ctrl.initialized = 1;
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return 0;
+
+err3:
+	iounmap(ipc_ctrl.psh_regs);
+err2:
+	pci_release_regions(pdev);
+err1:
+	pci_dev_put(pdev);
+
+	return ret;
+}
+
+static void psh_ipc_remove(struct pci_dev *pdev)
+{
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+	free_irq(pdev->irq, NULL);
+	iounmap(ipc_ctrl.psh_regs);
+	pci_release_regions(pdev);
+	pci_dev_put(pdev);
+	intel_psh_devices_destroy();
+	pmic_sysfs_remove(pdev);
+	ipc_ctrl.initialized = 0;
+}
+
+static const struct dev_pm_ops psh_ipc_drv_pm_ops = {
+	.suspend_noirq		= psh_ipc_suspend_noirq,
+	.resume_noirq		= psh_ipc_resume_noirq,
+	.runtime_suspend	= psh_ipc_runtime_suspend,
+	.runtime_resume		= psh_ipc_runtime_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x11a3)},
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver psh_ipc_driver = {
+	.name = "intel_psh_ipc",
+	.driver = {
+		.pm = &psh_ipc_drv_pm_ops,
+	},
+	.id_table = pci_ids,
+	.probe = psh_ipc_probe,
+	.remove = psh_ipc_remove,
+};
+
+static int __init psh_ipc_init(void)
+{
+	return  pci_register_driver(&psh_ipc_driver);
+}
+
+static void __exit psh_ipc_exit(void)
+{
+	pci_unregister_driver(&psh_ipc_driver);
+}
+
+MODULE_AUTHOR("bin.yang@intel.com");
+MODULE_DESCRIPTION("Intel PSH IPC driver");
+MODULE_LICENSE("GPL v2");
+
+fs_initcall(psh_ipc_init);
+module_exit(psh_ipc_exit);
diff --git a/drivers/external_drivers/drivers/platform/x86/reboot_target.c b/drivers/external_drivers/drivers/platform/x86/reboot_target.c
new file mode 100644
index 0000000..3c9696c
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/reboot_target.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+
+#include "reboot_target.h"
+
+/* Currently registered Reboot Target setter  */
+static struct reboot_target *var;
+
+struct name2id {
+	const char *name;
+	int id;
+};
+
+static const unsigned int DEFAULT_TARGET_INDEX = 0;
+
+static const struct name2id NAME2ID[] = {
+	{ "main",        0x00 },
+	{ "android",     0x00 },
+	{ "charging",    0x0A },
+	{ "recovery",    0x0C },
+	{ "fastboot",    0x0E },
+	{ "bootloader",  0x0E },
+	{ "factory",     0x12 },
+	{ "dnx",         0x14 },
+	{ "ramconsole",  0x16 },
+	{ "factory2",    0x18 },
+	{ "bootoneshot", 0x1A },
+};
+
+#define ALLOW_FACTORY_PARAM_NAME "allow_factory="
+
+static int reboot_target_name2id(const char *name)
+{
+	int i;
+	char *allow_factory;
+
+	allow_factory = strstr(saved_command_line, ALLOW_FACTORY_PARAM_NAME);
+	if (!allow_factory && strstr(name, "factory"))
+		return NAME2ID[DEFAULT_TARGET_INDEX].id;
+
+	for (i = 0; i < ARRAY_SIZE(NAME2ID); i++)
+		if (!strcmp(NAME2ID[i].name, name))
+			return NAME2ID[i].id;
+
+	return -EINVAL;
+}
+
+const char *reboot_target_id2name(int id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(NAME2ID); i++)
+		if (NAME2ID[i].id == id)
+			return NAME2ID[i].name;
+
+	return "";
+}
+
+static int set_reboot_target(const char *name)
+{
+	int id;
+
+	if (name == NULL) {
+		pr_err("Error in %s: NULL target\n", __func__);
+		return -EINVAL;
+	}
+
+	id  = reboot_target_name2id(name);
+	if (id < 0) {
+		pr_err("Error in %s: '%s' is not a valid target\n",
+		       __func__, name );
+		return -EINVAL;
+	}
+
+	return var ? var->set_reboot_target(name, id) : -ENODEV;
+}
+
+static int reboot_target_notify(struct notifier_block *notifier,
+				unsigned long what, void *data)
+{
+	const char *target = (const char *)data;
+	int ret;
+
+	if (what != SYS_RESTART)
+		goto out;
+
+	if (!target || target[0] == '\0')
+		target = NAME2ID[DEFAULT_TARGET_INDEX].name;
+
+	ret = set_reboot_target(target);
+	if (ret)
+		pr_err("%s: Failed to set the reboot target, return=%d\n",
+		       __func__, ret);
+
+out:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block reboot_target_notifier = {
+	.notifier_call = reboot_target_notify,
+};
+
+int reboot_target_register(struct reboot_target *new)
+{
+	if (var)
+		return -EBUSY;
+
+	var = new;
+	return 0;
+}
+EXPORT_SYMBOL(reboot_target_register);
+
+int reboot_target_unregister(struct reboot_target *old)
+{
+	if (old && old == var) {
+		var = NULL;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(reboot_target_unregister);
+
+int __init reboot_target_init(void)
+{
+	int ret = 0;
+
+	ret = register_reboot_notifier(&reboot_target_notifier);
+	if (ret)
+		pr_err("%s: failed to register reboot_notifier\n", __func__);
+
+	return ret;
+}
+
+void __exit reboot_target_exit(void)
+{
+	unregister_reboot_notifier(&reboot_target_notifier);
+}
+
+module_init(reboot_target_init);
+module_exit(reboot_target_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_DESCRIPTION("Intel Reboot Target");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/external_drivers/drivers/platform/x86/reboot_target.h b/drivers/external_drivers/drivers/platform/x86/reboot_target.h
new file mode 100644
index 0000000..023d998
--- /dev/null
+++ b/drivers/external_drivers/drivers/platform/x86/reboot_target.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _REBOOT_TARGET_H_
+#define _REBOOT_TARGET_H_
+
+struct reboot_target {
+	int (*set_reboot_target)(const char *name, const int id);
+};
+
+extern const char *reboot_target_id2name(int id);
+
+extern int reboot_target_register(struct reboot_target *);
+extern int reboot_target_unregister(struct reboot_target *);
+
+#endif	/* _REBOOT_TARGET_H_ */
diff --git a/drivers/external_drivers/drivers/power/Kconfig b/drivers/external_drivers/drivers/power/Kconfig
new file mode 100644
index 0000000..c3c32f6
--- /dev/null
+++ b/drivers/external_drivers/drivers/power/Kconfig
@@ -0,0 +1,19 @@
+config PMIC_CCSM
+	tristate "PMIC CCSM driver"
+	select POWER_SUPPLY_BATTID
+	depends on INTEL_SCU_IPC && IIO
+	help
+	  Say Y to include support for PMIC Charger Control State Machine driver
+	  Driver for initializing and monitoring the CCSM in PMIC
+	  This driver sets the CCSM registers and handles the PMIC
+	  charger interrupts.
+
+config BQ24261_CHARGER
+	tristate "BQ24261 charger driver"
+	select POWER_SUPPLY_CHARGER
+	depends on I2C
+	help
+	  Say Y to include support for BQ24261 Charger driver. This driver
+	  makes use of power supply charging framework. So the driver gives
+	  the charger hardware abstraction only. Charging logic is abstracted
+	  in the charging framework.
diff --git a/drivers/external_drivers/drivers/power/Makefile b/drivers/external_drivers/drivers/power/Makefile
new file mode 100644
index 0000000..18c2c84
--- /dev/null
+++ b/drivers/external_drivers/drivers/power/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BQ24261_CHARGER)	+= bq24261_charger.o
+obj-$(CONFIG_PMIC_CCSM) += pmic_ccsm.o
diff --git a/drivers/external_drivers/drivers/power/bq24261_charger.c b/drivers/external_drivers/drivers/power/bq24261_charger.c
new file mode 100644
index 0000000..981c705
--- /dev/null
+++ b/drivers/external_drivers/drivers/power/bq24261_charger.c
@@ -0,0 +1,1923 @@
+/*
+ * bq24261_charger.c - BQ24261 Charger I2C client driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+#include <linux/power/bq24261_charger.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/wakelock.h>
+
+#include <asm/intel_scu_ipc.h>
+
+#define DEV_NAME "bq24261_charger"
+#define DEV_MANUFACTURER "TI"
+#define MODEL_NAME_SIZE 8
+#define DEV_MANUFACTURER_NAME_SIZE 4
+
+#define CHRG_TERM_WORKER_DELAY (30 * HZ)
+#define EXCEPTION_MONITOR_DELAY (60 * HZ)
+#define WDT_RESET_DELAY (15 * HZ)
+
+/* BQ24261 registers */
+#define BQ24261_STAT_CTRL0_ADDR		0x00
+#define BQ24261_CTRL_ADDR		0x01
+#define BQ24261_BATT_VOL_CTRL_ADDR	0x02
+#define BQ24261_VENDOR_REV_ADDR		0x03
+#define BQ24261_TERM_FCC_ADDR		0x04
+#define BQ24261_VINDPM_STAT_ADDR	0x05
+#define BQ24261_ST_NTC_MON_ADDR		0x06
+
+#define BQ24261_RESET_MASK		(0x01 << 7)
+#define BQ24261_RESET_ENABLE		(0x01 << 7)
+
+#define BQ24261_FAULT_MASK		0x07
+#define BQ24261_STAT_MASK		(0x03 << 4)
+#define BQ24261_BOOST_MASK		(0x01 << 6)
+#define BQ24261_TMR_RST_MASK		(0x01 << 7)
+#define BQ24261_TMR_RST			(0x01 << 7)
+
+#define BQ24261_ENABLE_BOOST		(0x01 << 6)
+
+#define BQ24261_VOVP			0x01
+#define BQ24261_LOW_SUPPLY		0x02
+#define BQ24261_THERMAL_SHUTDOWN	0x03
+#define BQ24261_BATT_TEMP_FAULT		0x04
+#define BQ24261_TIMER_FAULT		0x05
+#define BQ24261_BATT_OVP		0x06
+#define BQ24261_NO_BATTERY		0x07
+#define BQ24261_STAT_READY		0x00
+
+#define BQ24261_STAT_CHRG_PRGRSS	(0x01 << 4)
+#define BQ24261_STAT_CHRG_DONE		(0x02 << 4)
+#define BQ24261_STAT_FAULT		(0x03 << 4)
+
+#define BQ24261_CE_MASK			(0x01 << 1)
+#define BQ24261_CE_DISABLE		(0x01 << 1)
+
+#define BQ24261_HZ_MASK			(0x01)
+#define BQ24261_HZ_ENABLE		(0x01)
+
+#define BQ24261_ICHRG_MASK		(0x1F << 3)
+
+#define BQ24261_ITERM_MASK		(0x03)
+#define BQ24261_MIN_ITERM 50 /* 50 mA */
+#define BQ24261_MAX_ITERM 300 /* 300 mA */
+
+#define BQ24261_VBREG_MASK		(0x3F << 2)
+
+#define BQ24261_INLMT_MASK		(0x03 << 4)
+#define BQ24261_INLMT_100		0x00
+#define BQ24261_INLMT_150		(0x01 << 4)
+#define BQ24261_INLMT_500		(0x02 << 4)
+#define BQ24261_INLMT_900		(0x03 << 4)
+#define BQ24261_INLMT_1500		(0x04 << 4)
+#define BQ24261_INLMT_2500		(0x06 << 4)
+
+#define BQ24261_TE_MASK			(0x01 << 2)
+#define BQ24261_TE_ENABLE		(0x01 << 2)
+#define BQ24261_STAT_ENABLE_MASK	(0x01 << 3)
+#define BQ24261_STAT_ENABLE		(0x01 << 3)
+
+#define BQ24261_VENDOR_MASK		(0x07 << 5)
+#define BQ24261_VENDOR			(0x02 << 5)
+#define BQ24261_REV_MASK		(0x07)
+#define BQ24261_2_3_REV			(0x06)
+#define BQ24261_REV			(0x02)
+#define BQ24260_REV			(0x01)
+
+#define BQ24261_TS_MASK			(0x01 << 3)
+#define BQ24261_TS_ENABLED		(0x01 << 3)
+#define BQ24261_BOOST_ILIM_MASK		(0x01 << 4)
+#define BQ24261_BOOST_ILIM_500ma	(0x0)
+#define BQ24261_BOOST_ILIM_1A		(0x01 << 4)
+
+#define BQ24261_SAFETY_TIMER_MASK	(0x03 << 5)
+#define BQ24261_SAFETY_TIMER_40MIN	0x00
+#define BQ24261_SAFETY_TIMER_6HR	(0x01 << 5)
+#define BQ24261_SAFETY_TIMER_9HR	(0x02 << 5)
+#define BQ24261_SAFETY_TIMER_DISABLED	(0x03 << 5)
+
+/* 1% above voltage max design to report over voltage */
+#define BQ24261_OVP_MULTIPLIER			1010
+#define BQ24261_OVP_RECOVER_MULTIPLIER		990
+#define BQ24261_DEF_BAT_VOLT_MAX_DESIGN		4200000
+
+/* Settings for Voltage / DPPM Register (05) */
+#define BQ24261_VBATT_LEVEL1		3700000
+#define BQ24261_VBATT_LEVEL2		3960000
+#define BQ24261_VINDPM_MASK		(0x07)
+#define BQ24261_VINDPM_320MV		(0x01 << 2)
+#define BQ24261_VINDPM_160MV		(0x01 << 1)
+#define BQ24261_VINDPM_80MV		(0x01 << 0)
+#define BQ24261_CD_STATUS_MASK		(0x01 << 3)
+#define BQ24261_DPM_EN_MASK		(0x01 << 4)
+#define BQ24261_DPM_EN_FORCE		(0x01 << 4)
+#define BQ24261_LOW_CHG_MASK		(0x01 << 5)
+#define BQ24261_LOW_CHG_EN		(0x01 << 5)
+#define BQ24261_LOW_CHG_DIS		(~BQ24261_LOW_CHG_EN)
+#define BQ24261_DPM_STAT_MASK		(0x01 << 6)
+#define BQ24261_MINSYS_STAT_MASK	(0x01 << 7)
+
+#define BQ24261_MIN_CC			500 /* 500mA */
+#define BQ24261_MAX_CC			3000 /* 3A */
+
+u16 bq24261_sfty_tmr[][2] = {
+	{0, BQ24261_SAFETY_TIMER_DISABLED}
+	,
+	{40, BQ24261_SAFETY_TIMER_40MIN}
+	,
+	{360, BQ24261_SAFETY_TIMER_6HR}
+	,
+	{540, BQ24261_SAFETY_TIMER_9HR}
+	,
+};
+
+
+u16 bq24261_inlmt[][2] = {
+	{100, BQ24261_INLMT_100}
+	,
+	{150, BQ24261_INLMT_150}
+	,
+	{500, BQ24261_INLMT_500}
+	,
+	{900, BQ24261_INLMT_900}
+	,
+	{1500, BQ24261_INLMT_1500}
+	,
+	{2500, BQ24261_INLMT_2500}
+	,
+};
+
+#define BQ24261_MIN_CV 3500
+#define BQ24261_MAX_CV 4440
+#define BQ24261_CV_DIV 20
+#define BQ24261_CV_BIT_POS 2
+
+static enum power_supply_property bq24261_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_INLMT,
+	POWER_SUPPLY_PROP_ENABLE_CHARGING,
+	POWER_SUPPLY_PROP_ENABLE_CHARGER,
+	POWER_SUPPLY_PROP_CHARGE_TERM_CUR,
+	POWER_SUPPLY_PROP_CABLE_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+	POWER_SUPPLY_PROP_MAX_TEMP,
+	POWER_SUPPLY_PROP_MIN_TEMP,
+};
+
+enum bq24261_chrgr_stat {
+	BQ24261_CHRGR_STAT_UNKNOWN,
+	BQ24261_CHRGR_STAT_READY,
+	BQ24261_CHRGR_STAT_CHARGING,
+	BQ24261_CHRGR_STAT_BAT_FULL,
+	BQ24261_CHRGR_STAT_FAULT,
+};
+
+struct bq24261_otg_event {
+	struct list_head node;
+	bool is_enable;
+};
+
+struct bq24261_charger {
+
+	struct mutex stat_lock;
+	struct i2c_client *client;
+	struct bq24261_plat_data *pdata;
+	struct power_supply psy_usb;
+	struct delayed_work sw_term_work;
+	struct delayed_work wdt_work;
+	struct delayed_work low_supply_fault_work;
+	struct delayed_work exception_mon_work;
+	struct notifier_block otg_nb;
+	struct usb_phy *transceiver;
+	struct work_struct otg_work;
+	struct work_struct irq_work;
+	struct list_head otg_queue;
+	struct list_head irq_queue;
+	wait_queue_head_t wait_ready;
+	spinlock_t otg_queue_lock;
+	void __iomem *irq_iomap;
+
+	int chrgr_health;
+	int bat_health;
+	int cc;
+	int cv;
+	int inlmt;
+	int max_cc;
+	int max_cv;
+	int iterm;
+	int cable_type;
+	int cntl_state;
+	int max_temp;
+	int min_temp;
+	int revision;
+	enum bq24261_chrgr_stat chrgr_stat;
+	bool online;
+	bool present;
+	bool is_charging_enabled;
+	bool is_charger_enabled;
+	bool is_vsys_on;
+	bool boost_mode;
+	bool is_hw_chrg_term;
+	char model_name[MODEL_NAME_SIZE];
+	char manufacturer[DEV_MANUFACTURER_NAME_SIZE];
+	struct wake_lock chrgr_en_wakelock;
+};
+
+enum bq2426x_model_num {
+	BQ2426X = 0,
+	BQ24260,
+	BQ24261,
+};
+
+struct bq2426x_model {
+	char model_name[MODEL_NAME_SIZE];
+	enum bq2426x_model_num model;
+};
+
+static struct bq2426x_model bq24261_model_name[] = {
+	{ "bq2426x", BQ2426X },
+	{ "bq24260", BQ24260 },
+	{ "bq24261", BQ24261 },
+};
+
+struct i2c_client *bq24261_client;
+static inline int get_battery_voltage(int *volt);
+static inline int get_battery_current(int *cur);
+static int bq24261_handle_irq(struct bq24261_charger *chip, u8 stat_reg);
+static inline int bq24261_set_iterm(struct bq24261_charger *chip, int iterm);
+
+enum power_supply_type get_power_supply_type(
+		enum power_supply_charger_cable_type cable)
+{
+
+	switch (cable) {
+
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+		return POWER_SUPPLY_TYPE_USB_DCP;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		return POWER_SUPPLY_TYPE_USB_CDP;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+		return POWER_SUPPLY_TYPE_USB_ACA;
+	case POWER_SUPPLY_CHARGER_TYPE_AC:
+		return POWER_SUPPLY_TYPE_MAINS;
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		return POWER_SUPPLY_TYPE_USB_DCP;
+	case POWER_SUPPLY_CHARGER_TYPE_NONE:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	default:
+		return POWER_SUPPLY_TYPE_USB;
+	}
+
+	return POWER_SUPPLY_TYPE_USB;
+}
+
+static void lookup_regval(u16 tbl[][2], size_t size, u16 in_val, u8 *out_val)
+{
+	int i;
+	for (i = 1; i < size; ++i)
+		if (in_val < tbl[i][0])
+			break;
+
+	*out_val = (u8) tbl[i - 1][1];
+}
+
+void bq24261_cc_to_reg(int cc, u8 *reg_val)
+{
+	/* Ichrg bits are B3-B7
+	 * Icharge = 500mA + IchrgCode * 100mA
+	 */
+	cc = clamp_t(int, cc, BQ24261_MIN_CC, BQ24261_MAX_CC);
+	cc = cc - BQ24261_MIN_CC;
+	*reg_val = (cc / 100) << 3;
+}
+
+void bq24261_cv_to_reg(int cv, u8 *reg_val)
+{
+	int val;
+
+	val = clamp_t(int, cv, BQ24261_MIN_CV, BQ24261_MAX_CV);
+	*reg_val =
+		(((val - BQ24261_MIN_CV) / BQ24261_CV_DIV)
+			<< BQ24261_CV_BIT_POS);
+}
+
+void bq24261_inlmt_to_reg(int inlmt, u8 *regval)
+{
+	return lookup_regval(bq24261_inlmt, ARRAY_SIZE(bq24261_inlmt),
+			     inlmt, regval);
+}
+
+static inline void bq24261_iterm_to_reg(int iterm, u8 *regval)
+{
+	/* Iterm bits are B0-B2
+	 * Icharge = 50mA + ItermCode * 50mA
+	 */
+	iterm = clamp_t(int, iterm, BQ24261_MIN_ITERM,  BQ24261_MAX_ITERM);
+	iterm = iterm - BQ24261_MIN_ITERM;
+	*regval =  iterm / 50;
+}
+
+static inline void bq24261_sfty_tmr_to_reg(int tmr, u8 *regval)
+{
+	return lookup_regval(bq24261_sfty_tmr, ARRAY_SIZE(bq24261_sfty_tmr),
+			     tmr, regval);
+}
+
+static inline int bq24261_read_reg(struct i2c_client *client, u8 reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		dev_err(&client->dev, "Error(%d) in reading reg %d\n", ret,
+			reg);
+
+	return ret;
+}
+
+
+static inline void bq24261_dump_regs(bool dump_master)
+{
+	int i;
+	int ret;
+	int bat_cur, bat_volt;
+	struct bq24261_charger *chip;
+	char buf[1024] = {0};
+	int used = 0;
+
+	if (!bq24261_client)
+		return;
+
+	chip = i2c_get_clientdata(bq24261_client);
+
+	dev_info(&bq24261_client->dev, "*======================*\n");
+	ret = get_battery_current(&bat_cur);
+	if (ret)
+		dev_err(&bq24261_client->dev,
+			"%s: Error in getting battery current", __func__);
+	else
+		dev_info(&bq24261_client->dev, "Battery Current=%dma\n",
+				(bat_cur/1000));
+
+	ret = get_battery_voltage(&bat_volt);
+	if (ret)
+		dev_err(&bq24261_client->dev,
+			"%s: Error in getting battery voltage", __func__);
+	else
+		dev_info(&bq24261_client->dev, "Battery VOlatge=%dmV\n",
+			(bat_volt/1000));
+
+
+	dev_info(&bq24261_client->dev, "BQ24261 Register dump:\n");
+
+	for (i = 0; i < 7; ++i) {
+		ret = bq24261_read_reg(bq24261_client, i);
+		if (ret < 0)
+			dev_err(&bq24261_client->dev,
+				"Error in reading REG 0x%X\n", i);
+		else
+			used += snprintf(buf + used, sizeof(buf) - used,
+					" 0x%X=0x%X,", i, ret);
+	}
+	dev_info(&bq24261_client->dev, "%s\n", buf);
+	dev_info(&bq24261_client->dev, "*======================*\n");
+
+	if (chip->pdata->dump_master_regs && dump_master)
+			chip->pdata->dump_master_regs();
+
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+static int bq24261_reg_show(struct seq_file *seq, void *unused)
+{
+	int val;
+	u8 reg;
+
+	reg = *((u8 *)seq->private);
+	val = bq24261_read_reg(bq24261_client, reg);
+
+	seq_printf(seq, "0x%02x\n", val);
+	return 0;
+}
+
+static int bq24261_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, bq24261_reg_show, inode->i_private);
+}
+
+static u32 bq24261_register_set[] = {
+	BQ24261_STAT_CTRL0_ADDR,
+	BQ24261_CTRL_ADDR,
+	BQ24261_BATT_VOL_CTRL_ADDR,
+	BQ24261_VENDOR_REV_ADDR,
+	BQ24261_TERM_FCC_ADDR,
+	BQ24261_VINDPM_STAT_ADDR,
+	BQ24261_ST_NTC_MON_ADDR,
+};
+
+static struct dentry *bq24261_dbgfs_dir;
+
+static const struct file_operations bq24261_dbg_fops = {
+	.open = bq24261_dbgfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static void bq24261_debugfs_init(void)
+{
+	struct dentry *fentry;
+	u32 count = ARRAY_SIZE(bq24261_register_set);
+	u32 i;
+	char name[6] = {0};
+
+	bq24261_dbgfs_dir = debugfs_create_dir(DEV_NAME, NULL);
+	if (bq24261_dbgfs_dir == NULL)
+		goto debugfs_root_exit;
+
+	for (i = 0; i < count; i++) {
+		snprintf(name, 6, "%02x", bq24261_register_set[i]);
+		fentry = debugfs_create_file(name, S_IRUGO,
+						bq24261_dbgfs_dir,
+						&bq24261_register_set[i],
+						&bq24261_dbg_fops);
+		if (fentry == NULL)
+			goto debugfs_err_exit;
+	}
+	dev_err(&bq24261_client->dev, "Debugfs created successfully!!\n");
+	return;
+
+debugfs_err_exit:
+	debugfs_remove_recursive(bq24261_dbgfs_dir);
+debugfs_root_exit:
+	dev_err(&bq24261_client->dev, "Error Creating debugfs!!\n");
+	return;
+}
+
+static void bq24261_debugfs_exit(void)
+{
+	if (bq24261_dbgfs_dir)
+		debugfs_remove_recursive(bq24261_dbgfs_dir);
+
+	return;
+}
+
+#else
+static void bq24261_debugfs_init(void)
+{
+	return;
+}
+
+static void bq24261_debugfs_exit(void)
+{
+	return;
+}
+#endif
+
+static inline int bq24261_write_reg(struct i2c_client *client, u8 reg, u8 data)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, data);
+	if (ret < 0)
+		dev_err(&client->dev, "Error(%d) in writing %d to reg %d\n",
+			ret, data, reg);
+
+	return ret;
+}
+
+static inline int bq24261_read_modify_reg(struct i2c_client *client, u8 reg,
+					  u8 mask, u8 val)
+{
+	int ret;
+
+	ret = bq24261_read_reg(client, reg);
+	if (ret < 0)
+		return ret;
+	ret = (ret & ~mask) | (mask & val);
+	return bq24261_write_reg(client, reg, ret);
+}
+
+static inline int bq24261_tmr_ntc_init(struct bq24261_charger *chip)
+{
+	u8 reg_val;
+	int ret;
+
+	bq24261_sfty_tmr_to_reg(chip->pdata->safety_timer, &reg_val);
+
+	if (chip->pdata->is_ts_enabled)
+		reg_val |= BQ24261_TS_ENABLED;
+
+	/* Check if boost mode current configuration is above 1A*/
+	if (chip->pdata->boost_mode_ma >= 1000)
+		reg_val |= BQ24261_BOOST_ILIM_1A;
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_ST_NTC_MON_ADDR,
+			BQ24261_TS_MASK|BQ24261_SAFETY_TIMER_MASK|
+			BQ24261_BOOST_ILIM_MASK, reg_val);
+
+	return ret;
+}
+
+static inline int bq24261_enable_charging(
+	struct bq24261_charger *chip, bool val)
+{
+	int ret;
+	u8 reg_val;
+	bool is_ready;
+
+	dev_dbg(&chip->client->dev, "%s=%d\n", __func__, val);
+	ret = bq24261_read_reg(chip->client,
+					BQ24261_STAT_CTRL0_ADDR);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"Error(%d) in reading BQ24261_STAT_CTRL0_ADDR\n", ret);
+	}
+
+	is_ready =  (ret & BQ24261_STAT_MASK) != BQ24261_STAT_FAULT;
+
+	/* If status is fault, wait for READY before enabling the charging */
+	if (!is_ready && val) {
+		ret = wait_event_timeout(chip->wait_ready,
+			(chip->chrgr_stat == BQ24261_CHRGR_STAT_READY),
+				HZ);
+		dev_info(&chip->client->dev,
+			"chrgr_stat=%x\n", chip->chrgr_stat);
+		if (ret == 0) {
+			dev_err(&chip->client->dev,
+				"ChgrReady timeout, enable charging anyway\n");
+		}
+	}
+
+	if (chip->pdata->enable_charging) {
+		ret = chip->pdata->enable_charging(val);
+		if (ret) {
+			dev_err(&chip->client->dev,
+				"Error(%d) in master enable-charging\n", ret);
+		}
+	}
+
+	if (val) {
+		reg_val = (~BQ24261_CE_DISABLE & BQ24261_CE_MASK);
+		if (chip->is_hw_chrg_term)
+			reg_val |= BQ24261_TE_ENABLE;
+	} else {
+		reg_val = BQ24261_CE_DISABLE;
+	}
+
+	reg_val |=  BQ24261_STAT_ENABLE;
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+		       BQ24261_STAT_ENABLE_MASK|BQ24261_RESET_MASK|
+				BQ24261_CE_MASK|BQ24261_TE_MASK,
+					reg_val);
+	if (ret || !val)
+		return ret;
+
+	bq24261_set_iterm(chip, chip->iterm);
+	ret = bq24261_tmr_ntc_init(chip);
+	if (ret) {
+		dev_err(&chip->client->dev,
+			"Error(%d) in tmr_ntc_init\n", ret);
+	}
+
+	dev_info(&chip->client->dev, "Completed %s=%d\n", __func__, val);
+	bq24261_dump_regs(false);
+
+	return ret;
+}
+
+static inline int bq24261_reset_timer(struct bq24261_charger *chip)
+{
+	return bq24261_read_modify_reg(chip->client, BQ24261_STAT_CTRL0_ADDR,
+			BQ24261_TMR_RST_MASK, BQ24261_TMR_RST);
+}
+
+static inline int bq24261_enable_charger(
+	struct bq24261_charger *chip, int val)
+{
+
+	/* TODO: Implement enable/disable HiZ mode to enable/
+	*  disable charger
+	*/
+	u8 reg_val;
+	int ret;
+
+	dev_dbg(&chip->client->dev, "%s=%d\n", __func__, val);
+	reg_val = val ? (~BQ24261_HZ_ENABLE & BQ24261_HZ_MASK)  :
+			BQ24261_HZ_ENABLE;
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+		       BQ24261_HZ_MASK|BQ24261_RESET_MASK, reg_val);
+	if (ret)
+		return ret;
+
+	return bq24261_reset_timer(chip);
+}
+
+static inline int bq24261_set_cc(struct bq24261_charger *chip, int cc)
+{
+	u8 reg_val;
+	int ret;
+
+	dev_dbg(&chip->client->dev, "%s=%d\n", __func__, cc);
+	if (chip->pdata->set_cc) {
+		ret = chip->pdata->set_cc(cc);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (cc && (cc < BQ24261_MIN_CC)) {
+		dev_dbg(&chip->client->dev, "Set LOW_CHG bit\n");
+		reg_val = BQ24261_LOW_CHG_EN;
+		ret = bq24261_read_modify_reg(chip->client,
+				BQ24261_VINDPM_STAT_ADDR,
+				BQ24261_LOW_CHG_MASK, reg_val);
+	} else {
+		dev_dbg(&chip->client->dev, "Clear LOW_CHG bit\n");
+		reg_val = BQ24261_LOW_CHG_DIS;
+		ret = bq24261_read_modify_reg(chip->client,
+				BQ24261_VINDPM_STAT_ADDR,
+				BQ24261_LOW_CHG_MASK, reg_val);
+	}
+
+	/* cc setting will be done by platform specific hardware
+	 * but, in case of error-conditions or if the setting fails,
+	 * the following will be a fail-safe mechanism.
+	 */
+
+	bq24261_cc_to_reg(cc, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_TERM_FCC_ADDR,
+			BQ24261_ICHRG_MASK, reg_val);
+}
+
+static inline int bq24261_set_cv(struct bq24261_charger *chip, int cv)
+{
+	int bat_volt;
+	int ret;
+	u8 reg_val;
+	u8 vindpm_val = 0x0;
+
+	dev_dbg(&chip->client->dev, "%s=%d\n", __func__, cv);
+	/*
+	* Setting VINDPM value as per the battery voltage
+	*  VBatt           Vindpm     Register Setting
+	*  < 3.7v           4.2v       0x0 (default)
+	*  3.71v - 3.96v    4.36v      0x2
+	*  > 3.96v          4.6v       0x5
+	*/
+	ret = get_battery_voltage(&bat_volt);
+	if (ret) {
+		dev_err(&chip->client->dev,
+			"Error getting battery voltage!!\n");
+	} else {
+		if (bat_volt > BQ24261_VBATT_LEVEL2)
+			vindpm_val =
+				(BQ24261_VINDPM_320MV | BQ24261_VINDPM_80MV);
+		else if (bat_volt > BQ24261_VBATT_LEVEL1)
+			vindpm_val = BQ24261_VINDPM_160MV;
+	}
+
+	ret = bq24261_read_modify_reg(chip->client,
+			BQ24261_VINDPM_STAT_ADDR,
+			BQ24261_VINDPM_MASK,
+			vindpm_val);
+	if (ret) {
+		dev_err(&chip->client->dev,
+			"Error setting VINDPM setting!!\n");
+		return ret;
+	}
+
+	if (chip->pdata->set_cv)
+		chip->pdata->set_cv(cv);
+
+	/* cv setting will be done by platform specific hardware
+	 * but, in case of error-conditions or if the setting fails,
+	 * the following will be a fail-safe mechanism.
+	 */
+	bq24261_cv_to_reg(cv, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_BATT_VOL_CTRL_ADDR,
+				       BQ24261_VBREG_MASK, reg_val);
+}
+
+static inline int bq24261_set_inlmt(struct bq24261_charger *chip, int inlmt)
+{
+	u8 reg_val;
+
+	dev_dbg(&chip->client->dev, "%s=%d\n", __func__, inlmt);
+	if (chip->pdata->set_inlmt)
+		return chip->pdata->set_inlmt(inlmt);
+
+	bq24261_inlmt_to_reg(inlmt, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+		       BQ24261_RESET_MASK|BQ24261_INLMT_MASK, reg_val);
+
+}
+
+static inline void resume_charging(struct bq24261_charger *chip)
+{
+
+	if (chip->is_charger_enabled)
+		bq24261_enable_charger(chip, true);
+	if (chip->inlmt)
+		bq24261_set_inlmt(chip, chip->inlmt);
+	if (chip->cc)
+		bq24261_set_cc(chip, chip->cc);
+	if (chip->cv)
+		bq24261_set_cv(chip, chip->cv);
+	if (chip->is_charging_enabled)
+		bq24261_enable_charging(chip, true);
+}
+
+static inline int bq24261_set_iterm(struct bq24261_charger *chip, int iterm)
+{
+	u8 reg_val;
+
+	if (chip->pdata->set_iterm)
+		return chip->pdata->set_iterm(iterm);
+
+	bq24261_iterm_to_reg(iterm, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_TERM_FCC_ADDR,
+				       BQ24261_ITERM_MASK, reg_val);
+}
+
+static inline int bq24261_enable_hw_charge_term(
+	struct bq24261_charger *chip, bool val)
+{
+	u8 data;
+	int ret;
+
+	data = val ? BQ24261_TE_ENABLE : (~BQ24261_TE_ENABLE & BQ24261_TE_MASK);
+
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+			       BQ24261_RESET_MASK|BQ24261_TE_MASK, data);
+
+	if (ret)
+		return ret;
+
+	chip->is_hw_chrg_term = val ? true : false;
+
+	return ret;
+}
+
+static inline int bq24261_enable_boost_mode(
+	struct bq24261_charger *chip, int val)
+{
+	int ret = 0;
+
+
+	if (val) {
+
+		if (((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) ||
+				chip->pdata->is_wdt_kick_needed) {
+			if (chip->pdata->enable_vbus)
+				chip->pdata->enable_vbus(true);
+		}
+
+		if (chip->pdata->handle_otgmode)
+			chip->pdata->handle_otgmode(true);
+
+		/* TODO: Support different Host Mode Current limits */
+
+		bq24261_enable_charger(chip, true);
+		ret =
+		    bq24261_read_modify_reg(chip->client,
+					    BQ24261_STAT_CTRL0_ADDR,
+					    BQ24261_BOOST_MASK,
+					    BQ24261_ENABLE_BOOST);
+		if (unlikely(ret))
+			return ret;
+
+		ret = bq24261_tmr_ntc_init(chip);
+		if (unlikely(ret))
+			return ret;
+		chip->boost_mode = true;
+
+		if (((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) ||
+				chip->pdata->is_wdt_kick_needed)
+			schedule_delayed_work(&chip->wdt_work, 0);
+
+		dev_info(&chip->client->dev, "Boost Mode enabled\n");
+	} else {
+
+		ret =
+		    bq24261_read_modify_reg(chip->client,
+					    BQ24261_STAT_CTRL0_ADDR,
+					    BQ24261_BOOST_MASK,
+					    ~BQ24261_ENABLE_BOOST);
+
+		if (unlikely(ret))
+			return ret;
+		/* if charging need not to be enabled, disable
+		* the charger else keep the charger on
+		*/
+		if (!chip->is_charging_enabled)
+			bq24261_enable_charger(chip, false);
+		chip->boost_mode = false;
+		dev_info(&chip->client->dev, "Boost Mode disabled\n");
+
+		if (((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) ||
+				chip->pdata->is_wdt_kick_needed) {
+			cancel_delayed_work_sync(&chip->wdt_work);
+
+			if (chip->pdata->enable_vbus)
+				chip->pdata->enable_vbus(false);
+		}
+
+		if (chip->pdata->handle_otgmode)
+			chip->pdata->handle_otgmode(false);
+
+		/* Notify power supply subsystem to enable charging
+		 * if needed. Eg. if DC adapter is connected
+		 */
+		power_supply_changed(&chip->psy_usb);
+	}
+
+	return ret;
+}
+
+static inline bool bq24261_is_vsys_on(struct bq24261_charger *chip)
+{
+	int ret;
+	struct i2c_client *client = chip->client;
+
+	ret = bq24261_read_reg(client, BQ24261_CTRL_ADDR);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"Error(%d) in reading BQ24261_CTRL_ADDR\n", ret);
+		return false;
+	}
+
+	if (((ret & BQ24261_HZ_MASK) == BQ24261_HZ_ENABLE) &&
+			chip->is_charger_enabled) {
+		dev_err(&client->dev, "Charger in Hi Z Mode\n");
+		bq24261_dump_regs(true);
+		return false;
+	}
+
+	ret = bq24261_read_reg(client, BQ24261_VINDPM_STAT_ADDR);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"Error(%d) in reading BQ24261_VINDPM_STAT_ADDR\n", ret);
+		return false;
+	}
+
+	if (ret & BQ24261_CD_STATUS_MASK) {
+		dev_err(&client->dev, "CD line asserted\n");
+		bq24261_dump_regs(true);
+		return false;
+	}
+
+	return true;
+}
+
+
+static inline bool bq24261_is_online(struct bq24261_charger *chip)
+{
+	if (chip->cable_type == POWER_SUPPLY_CHARGER_TYPE_NONE)
+		return false;
+	else if (!chip->is_charger_enabled)
+		return false;
+	/* BQ24261 gives interrupt only on stop/resume charging.
+	 * If charging is already stopped, we need to query the hardware
+	 * to see charger is still active and can supply vsys or not.
+	 */
+	else if ((chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT) ||
+		 (!chip->is_charging_enabled))
+		return bq24261_is_vsys_on(chip);
+	else
+		return chip->is_vsys_on;
+}
+
+static int bq24261_usb_set_property(struct power_supply *psy,
+				    enum power_supply_property psp,
+				    const union power_supply_propval *val)
+{
+	struct bq24261_charger *chip = container_of(psy,
+						    struct bq24261_charger,
+						    psy_usb);
+	int ret = 0;
+
+
+	mutex_lock(&chip->stat_lock);
+
+
+	switch (psp) {
+
+	case POWER_SUPPLY_PROP_PRESENT:
+		chip->present = val->intval;
+		/*If charging capable cable is present, then
+		hold the charger wakelock so that the target
+		does not enter suspend mode when charging is
+		in progress.
+		If charging cable has been removed, then
+		unlock the wakelock to allow the target to
+		enter the sleep mode*/
+		if (!wake_lock_active(&chip->chrgr_en_wakelock) &&
+					val->intval)
+			wake_lock(&chip->chrgr_en_wakelock);
+		else if (wake_lock_active(&chip->chrgr_en_wakelock) &&
+					!val->intval)
+			wake_unlock(&chip->chrgr_en_wakelock);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		chip->online = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGING:
+
+		/* Reset charging to avoid issues of not starting
+		 * charging when we're recovering from fault-cases.
+		 */
+		if (val->intval) {
+			dev_info(&chip->client->dev, "Charging reset");
+			ret = bq24261_enable_charging(chip, false);
+			if (ret)
+				dev_err(&chip->client->dev,
+					"Error(%d) in charging reset", ret);
+		}
+
+		ret = bq24261_enable_charging(chip, val->intval);
+
+		if (ret)
+			dev_err(&chip->client->dev,
+				"Error(%d) in %s charging", ret,
+				(val->intval ? "enable" : "disable"));
+		else
+			chip->is_charging_enabled = val->intval;
+
+		if (val->intval)
+			bq24261_enable_hw_charge_term(chip, true);
+		else
+			cancel_delayed_work_sync(&chip->sw_term_work);
+
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGER:
+
+		/* Don't enable the charger unless overvoltage is recovered */
+
+		if (chip->bat_health != POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+			ret = bq24261_enable_charger(chip, val->intval);
+
+			if (ret)
+				dev_err(&chip->client->dev,
+					"Error(%d) in %s charger", ret,
+					(val->intval ? "enable" : "disable"));
+			else
+				chip->is_charger_enabled = val->intval;
+		} else {
+			dev_info(&chip->client->dev, "Battery Over Voltage. Charger will be disabled\n");
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CURRENT:
+		ret = bq24261_set_cc(chip, val->intval);
+		if (!ret)
+			chip->cc = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_VOLTAGE:
+		ret = bq24261_set_cv(chip, val->intval);
+		if (!ret)
+			chip->cv = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT:
+		chip->max_cc = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE:
+		chip->max_cv = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TERM_CUR:
+		ret = bq24261_set_iterm(chip, val->intval);
+		if (!ret)
+			chip->iterm = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CABLE_TYPE:
+
+		chip->cable_type = val->intval;
+		chip->psy_usb.type = get_power_supply_type(chip->cable_type);
+		if (chip->cable_type != POWER_SUPPLY_CHARGER_TYPE_NONE) {
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+			chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+
+			/* Adding this processing in order to check
+			for any faults during connect */
+
+			ret = bq24261_read_reg(chip->client,
+						BQ24261_STAT_CTRL0_ADDR);
+			if (ret < 0)
+				dev_err(&chip->client->dev,
+				"Error (%d) in reading status register(0x00)\n",
+				ret);
+			else
+				bq24261_handle_irq(chip, ret);
+		} else {
+			chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+			cancel_delayed_work_sync(&chip->low_supply_fault_work);
+		}
+
+
+		break;
+	case POWER_SUPPLY_PROP_INLMT:
+		ret = bq24261_set_inlmt(chip, val->intval);
+		if (!ret)
+			chip->inlmt = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+		chip->cntl_state = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MAX_TEMP:
+		chip->max_temp = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MIN_TEMP:
+		chip->min_temp = val->intval;
+		break;
+	default:
+		ret = -ENODATA;
+	}
+
+	mutex_unlock(&chip->stat_lock);
+	return ret;
+}
+
+static int bq24261_usb_get_property(struct power_supply *psy,
+				    enum power_supply_property psp,
+				    union power_supply_propval *val)
+{
+	struct bq24261_charger *chip = container_of(psy,
+						    struct bq24261_charger,
+						    psy_usb);
+
+	mutex_lock(&chip->stat_lock);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = chip->present;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = chip->online;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = chip->chrgr_health;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT:
+		val->intval = chip->max_cc;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE:
+		val->intval = chip->max_cv;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CURRENT:
+		val->intval = chip->cc;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_VOLTAGE:
+		val->intval = chip->cv;
+		break;
+	case POWER_SUPPLY_PROP_INLMT:
+		val->intval = chip->inlmt;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TERM_CUR:
+		val->intval = chip->iterm;
+		break;
+	case POWER_SUPPLY_PROP_CABLE_TYPE:
+		val->intval = chip->cable_type;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGING:
+		if (chip->boost_mode)
+			val->intval = false;
+		else
+			val->intval = (chip->is_charging_enabled &&
+			(chip->chrgr_stat == BQ24261_CHRGR_STAT_CHARGING));
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGER:
+		val->intval = bq24261_is_online(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+		val->intval = chip->cntl_state;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+		val->intval = chip->pdata->num_throttle_states;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = chip->model_name;
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURER:
+		val->strval = chip->manufacturer;
+		break;
+	case POWER_SUPPLY_PROP_MAX_TEMP:
+		val->intval = chip->max_temp;
+		break;
+	case POWER_SUPPLY_PROP_MIN_TEMP:
+		val->intval = chip->min_temp;
+		break;
+	default:
+		mutex_unlock(&chip->stat_lock);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&chip->stat_lock);
+	return 0;
+}
+
+static inline struct power_supply *get_psy_battery(void)
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	static struct power_supply *pst;
+
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+			class_dev_iter_exit(&iter);
+			return pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	return NULL;
+}
+
+static inline int get_battery_voltage(int *volt)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+	if (!ret)
+		*volt = (val.intval);
+
+	return ret;
+}
+
+static inline int get_battery_volt_max_design(int *volt)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy,
+		POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &val);
+	if (!ret)
+		(*volt = val.intval);
+	return ret;
+}
+
+static inline int get_battery_current(int *cur)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW, &val);
+	if (!ret)
+		*cur = val.intval;
+
+	return ret;
+}
+
+static void bq24261_wdt_reset_worker(struct work_struct *work)
+{
+
+	struct bq24261_charger *chip = container_of(work,
+			    struct bq24261_charger, wdt_work.work);
+	int ret;
+	ret = bq24261_reset_timer(chip);
+
+	if (ret)
+		dev_err(&chip->client->dev, "Error (%d) in WDT reset\n", ret);
+	else
+		dev_info(&chip->client->dev, "WDT reset\n");
+
+	schedule_delayed_work(&chip->wdt_work, WDT_RESET_DELAY);
+}
+
+static void bq24261_sw_charge_term_worker(struct work_struct *work)
+{
+
+	struct bq24261_charger *chip = container_of(work,
+						    struct bq24261_charger,
+						    sw_term_work.work);
+
+	power_supply_changed(NULL);
+
+	schedule_delayed_work(&chip->sw_term_work,
+			      CHRG_TERM_WORKER_DELAY);
+
+}
+
+int bq24261_get_bat_health(void)
+{
+
+	struct bq24261_charger *chip;
+
+	if (!bq24261_client)
+		return -ENODEV;
+
+	chip = i2c_get_clientdata(bq24261_client);
+
+	return chip->bat_health;
+}
+
+
+static void bq24261_low_supply_fault_work(struct work_struct *work)
+{
+	struct bq24261_charger *chip = container_of(work,
+						    struct bq24261_charger,
+						    low_supply_fault_work.work);
+
+	if (chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT) {
+		dev_err(&chip->client->dev, "Low Supply Fault detected!!\n");
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_DEAD;
+		power_supply_changed(&chip->psy_usb);
+		schedule_delayed_work(&chip->exception_mon_work,
+					EXCEPTION_MONITOR_DELAY);
+		bq24261_dump_regs(true);
+	}
+	return;
+}
+
+
+/* is_bat_over_voltage: check battery is over voltage or not
+*  @chip: bq24261_charger context
+*
+*  This function is used to verify the over voltage condition.
+*  In some scenarios, HW generates Over Voltage exceptions when
+*  battery voltage is normal. This function uses the over voltage
+*  condition (voltage_max_design * 1.01) to verify battery is really
+*  over charged or not.
+*/
+
+static bool is_bat_over_voltage(struct bq24261_charger *chip,
+		bool verify_recovery)
+{
+
+	int bat_volt, bat_volt_max_des, ret;
+
+	ret = get_battery_voltage(&bat_volt);
+	if (ret)
+		return verify_recovery ? false : true;
+
+	ret = get_battery_volt_max_design(&bat_volt_max_des);
+
+	if (ret)
+		bat_volt_max_des = BQ24261_DEF_BAT_VOLT_MAX_DESIGN;
+
+	dev_info(&chip->client->dev, "bat_volt=%d Voltage Max Design=%d OVP_VOLT=%d OVP recover volt=%d\n",
+			bat_volt, bat_volt_max_des,
+			(bat_volt_max_des/1000 * BQ24261_OVP_MULTIPLIER),
+			(bat_volt_max_des/1000 *
+				BQ24261_OVP_RECOVER_MULTIPLIER));
+	if (verify_recovery) {
+		if ((bat_volt) <= (bat_volt_max_des / 1000 *
+				BQ24261_OVP_RECOVER_MULTIPLIER))
+			return true;
+		else
+			return false;
+	} else {
+		if ((bat_volt) >= (bat_volt_max_des / 1000 *
+					BQ24261_OVP_MULTIPLIER))
+			return true;
+		else
+			return false;
+	}
+
+	return false;
+}
+
+#define IS_BATTERY_OVER_VOLTAGE(chip) \
+	is_bat_over_voltage(chip , false)
+
+#define IS_BATTERY_OVER_VOLTAGE_RECOVERED(chip) \
+	is_bat_over_voltage(chip , true)
+
+static void handle_battery_over_voltage(struct bq24261_charger *chip)
+{
+	/* Set Health to Over Voltage. Disable charger to discharge
+	*  battery to reduce the battery voltage.
+	*/
+	chip->bat_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	bq24261_enable_charger(chip, false);
+	chip->is_charger_enabled = false;
+	cancel_delayed_work_sync(&chip->exception_mon_work);
+	schedule_delayed_work(&chip->exception_mon_work,
+			EXCEPTION_MONITOR_DELAY);
+}
+
+static void bq24261_exception_mon_work(struct work_struct *work)
+{
+	struct bq24261_charger *chip = container_of(work,
+			struct bq24261_charger,
+			exception_mon_work.work);
+	int ret;
+
+	if (chip->bat_health == POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+		if (IS_BATTERY_OVER_VOLTAGE_RECOVERED(chip)) {
+			dev_info(&chip->client->dev,
+					"Battery OVP Exception Recovered\n");
+			chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+			bq24261_enable_charger(chip, true);
+			chip->is_charger_enabled = true;
+			power_supply_changed(&chip->psy_usb);
+		} else {
+			schedule_delayed_work(&chip->exception_mon_work,
+					EXCEPTION_MONITOR_DELAY);
+		}
+	}
+
+	if ((chip->chrgr_health == POWER_SUPPLY_HEALTH_OVERVOLTAGE) ||
+		(chip->chrgr_health == POWER_SUPPLY_HEALTH_DEAD)) {
+		ret = bq24261_read_reg(chip->client, BQ24261_STAT_CTRL0_ADDR);
+		if (ret < 0) {
+			dev_err(&chip->client->dev, "Error reading reg %x\n",
+					BQ24261_STAT_CTRL0_ADDR);
+		} else {
+			mutex_lock(&chip->stat_lock);
+			bq24261_handle_irq(chip, ret);
+			mutex_unlock(&chip->stat_lock);
+			if ((ret & BQ24261_STAT_MASK) == BQ24261_STAT_READY) {
+				dev_info(&chip->client->dev,
+				"Charger OVP/Low Supply Exception recovered\n");
+				power_supply_changed(&chip->psy_usb);
+			}
+		}
+	}
+}
+
+static int bq24261_handle_irq(struct bq24261_charger *chip, u8 stat_reg)
+{
+	struct i2c_client *client = chip->client;
+	bool notify = true;
+
+	dev_info(&client->dev, "%s:%d stat=0x%x\n",
+			__func__, __LINE__, stat_reg);
+
+	switch (stat_reg & BQ24261_STAT_MASK) {
+	case BQ24261_STAT_READY:
+		chip->chrgr_stat = BQ24261_CHRGR_STAT_READY;
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		dev_info(&client->dev, "Charger Status: Ready\n");
+		notify = false;
+		break;
+	case BQ24261_STAT_CHRG_PRGRSS:
+		chip->chrgr_stat = BQ24261_CHRGR_STAT_CHARGING;
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		dev_info(&client->dev, "Charger Status: Charge Progress\n");
+		bq24261_dump_regs(false);
+		break;
+	case BQ24261_STAT_CHRG_DONE:
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		dev_info(&client->dev, "Charger Status: Charge Done\n");
+
+		bq24261_enable_hw_charge_term(chip, false);
+		resume_charging(chip);
+		schedule_delayed_work(&chip->sw_term_work, 0);
+		break;
+
+	case BQ24261_STAT_FAULT:
+		break;
+	}
+
+	if (stat_reg & BQ24261_BOOST_MASK)
+		dev_info(&client->dev, "Boost Mode\n");
+
+	if ((stat_reg & BQ24261_STAT_MASK) == BQ24261_STAT_FAULT) {
+		bool dump_master = true;
+		chip->chrgr_stat = BQ24261_CHRGR_STAT_FAULT;
+
+		switch (stat_reg & BQ24261_FAULT_MASK) {
+		case BQ24261_VOVP:
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+			schedule_delayed_work(&chip->exception_mon_work,
+					EXCEPTION_MONITOR_DELAY);
+			dev_err(&client->dev, "Charger OVP Fault\n");
+			break;
+
+		case BQ24261_LOW_SUPPLY:
+			notify = false;
+
+			if (chip->pdata->handle_low_supply)
+				chip->pdata->handle_low_supply();
+
+			if (chip->cable_type !=
+					POWER_SUPPLY_CHARGER_TYPE_NONE) {
+				schedule_delayed_work
+					(&chip->low_supply_fault_work,
+					5*HZ);
+				dev_dbg(&client->dev,
+					"Schedule Low Supply Fault work!!\n");
+			}
+			break;
+
+		case BQ24261_THERMAL_SHUTDOWN:
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+			dev_err(&client->dev, "Charger Thermal Fault\n");
+			break;
+
+		case BQ24261_BATT_TEMP_FAULT:
+			chip->bat_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+			dev_err(&client->dev, "Battery Temperature Fault\n");
+			break;
+
+		case BQ24261_TIMER_FAULT:
+			chip->bat_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+			dev_err(&client->dev, "Charger Timer Fault\n");
+			break;
+
+		case BQ24261_BATT_OVP:
+			notify = false;
+			if (chip->bat_health !=
+					POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+				if (!IS_BATTERY_OVER_VOLTAGE(chip)) {
+					chip->chrgr_stat =
+						BQ24261_CHRGR_STAT_UNKNOWN;
+					resume_charging(chip);
+				} else {
+					dev_err(&client->dev, "Battery Over Voltage Fault\n");
+					handle_battery_over_voltage(chip);
+					notify = true;
+				}
+			}
+			break;
+		case BQ24261_NO_BATTERY:
+			dev_err(&client->dev, "No Battery Connected\n");
+			break;
+
+		}
+
+		if (chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT && notify)
+			bq24261_dump_regs(dump_master);
+	}
+
+	wake_up(&chip->wait_ready);
+
+	chip->is_vsys_on = bq24261_is_vsys_on(chip);
+	if (notify)
+		power_supply_changed(&chip->psy_usb);
+
+	return 0;
+}
+
+static void bq24261_irq_worker(struct work_struct *work)
+{
+	struct bq24261_charger *chip =
+	    container_of(work, struct bq24261_charger, irq_work);
+	int ret;
+
+	/*Lock to ensure that interrupt register readings are done
+	* and processed sequentially. The interrupt Fault registers
+	* are read on clear and without sequential processing double
+	* fault interrupts or fault recovery cannot be handlled propely
+	*/
+
+	mutex_lock(&chip->stat_lock);
+
+	dev_dbg(&chip->client->dev, "%s\n", __func__);
+
+	ret = bq24261_read_reg(chip->client, BQ24261_STAT_CTRL0_ADDR);
+	if (ret < 0)
+		dev_err(&chip->client->dev,
+			"Error (%d) in reading BQ24261_STAT_CTRL0_ADDR\n", ret);
+	else
+		bq24261_handle_irq(chip, ret);
+
+	mutex_unlock(&chip->stat_lock);
+}
+
+static irqreturn_t bq24261_thread_handler(int id, void *data)
+{
+	struct bq24261_charger *chip = (struct bq24261_charger *)data;
+
+	queue_work(system_nrt_wq, &chip->irq_work);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bq24261_irq_handler(int irq, void *data)
+{
+	struct bq24261_charger *chip = (struct bq24261_charger *)data;
+	u8 intr_stat;
+
+	if (chip->irq_iomap) {
+		intr_stat = ioread8(chip->irq_iomap);
+		if ((intr_stat & chip->pdata->irq_mask)) {
+			dev_dbg(&chip->client->dev, "%s\n", __func__);
+			return IRQ_WAKE_THREAD;
+		}
+	}
+
+	return IRQ_NONE;
+}
+
+static void bq24261_boostmode_worker(struct work_struct *work)
+{
+	struct bq24261_charger *chip =
+	    container_of(work, struct bq24261_charger, otg_work);
+	struct bq24261_otg_event *evt, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->otg_queue_lock, flags);
+	list_for_each_entry_safe(evt, tmp, &chip->otg_queue, node) {
+		list_del(&evt->node);
+		spin_unlock_irqrestore(&chip->otg_queue_lock, flags);
+
+		dev_info(&chip->client->dev,
+			"%s:%d state=%d\n", __FILE__, __LINE__,
+				evt->is_enable);
+		mutex_lock(&chip->stat_lock);
+		if (evt->is_enable)
+			bq24261_enable_boost_mode(chip, 1);
+		else
+			bq24261_enable_boost_mode(chip, 0);
+
+		mutex_unlock(&chip->stat_lock);
+		spin_lock_irqsave(&chip->otg_queue_lock, flags);
+		kfree(evt);
+
+	}
+	spin_unlock_irqrestore(&chip->otg_queue_lock, flags);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+				   unsigned long event, void *param)
+{
+
+	struct bq24261_charger *chip =
+	    container_of(nb, struct bq24261_charger, otg_nb);
+	struct bq24261_otg_event *evt;
+
+	dev_dbg(&chip->client->dev, "OTG notification: %lu\n", event);
+	if (!param || event != USB_EVENT_DRIVE_VBUS)
+		return NOTIFY_DONE;
+
+	evt = kzalloc(sizeof(*evt), GFP_ATOMIC);
+	if (!evt) {
+		dev_err(&chip->client->dev,
+			"failed to allocate memory for OTG event\n");
+		return NOTIFY_DONE;
+	}
+
+	evt->is_enable = *(int *)param;
+	INIT_LIST_HEAD(&evt->node);
+
+	spin_lock(&chip->otg_queue_lock);
+	list_add_tail(&evt->node, &chip->otg_queue);
+	spin_unlock(&chip->otg_queue_lock);
+
+	queue_work(system_nrt_wq, &chip->otg_work);
+	return NOTIFY_OK;
+}
+
+static inline int register_otg_notifications(struct bq24261_charger *chip)
+{
+
+	int retval;
+
+	INIT_LIST_HEAD(&chip->otg_queue);
+	INIT_WORK(&chip->otg_work, bq24261_boostmode_worker);
+	spin_lock_init(&chip->otg_queue_lock);
+
+	chip->otg_nb.notifier_call = otg_handle_notification;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	chip->transceiver = usb_get_transceiver();
+#else
+	chip->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+#endif
+	if (!chip->transceiver || IS_ERR(chip->transceiver)) {
+		dev_err(&chip->client->dev, "failed to get otg transceiver\n");
+		return -EINVAL;
+	}
+	retval = usb_register_notifier(chip->transceiver, &chip->otg_nb);
+	if (retval) {
+		dev_err(&chip->client->dev,
+			"failed to register otg notifier\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum bq2426x_model_num bq24261_get_model(int bq24261_rev_reg)
+{
+	switch (bq24261_rev_reg & BQ24261_REV_MASK) {
+	case BQ24260_REV:
+		return BQ24260;
+	case BQ24261_REV:
+	case BQ24261_2_3_REV:
+		return BQ24261;
+	default:
+		return BQ2426X;
+	}
+}
+
+static int bq24261_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter;
+	struct bq24261_charger *chip;
+	int ret;
+	int bq2426x_rev;
+	enum bq2426x_model_num bq24261_rev_index;
+
+	adapter = to_i2c_adapter(client->dev.parent);
+
+	if (!client->dev.platform_data) {
+		dev_err(&client->dev, "platform data is null");
+		return -EFAULT;
+	}
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+			"I2C adapter %s doesn'tsupport BYTE DATA transfer\n",
+			adapter->name);
+		return -EIO;
+	}
+
+	bq2426x_rev = bq24261_read_reg(client, BQ24261_VENDOR_REV_ADDR);
+	if (bq2426x_rev < 0) {
+		dev_err(&client->dev,
+			"Error (%d) in reading BQ24261_VENDOR_REV_ADDR\n", bq2426x_rev);
+		return bq2426x_rev;
+	}
+	dev_info(&client->dev, "bq2426x revision: 0x%x found!!\n", bq2426x_rev);
+
+	bq24261_rev_index = bq24261_get_model(bq2426x_rev);
+	if ((bq2426x_rev & BQ24261_VENDOR_MASK) != BQ24261_VENDOR) {
+		dev_err(&client->dev,
+			"Invalid Vendor/Revision number in BQ24261_VENDOR_REV_ADDR: %d",
+			bq2426x_rev);
+		return -ENODEV;
+	}
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip) {
+		dev_err(&client->dev, "mem alloc failed\n");
+		return -ENOMEM;
+	}
+
+	init_waitqueue_head(&chip->wait_ready);
+	i2c_set_clientdata(client, chip);
+	chip->pdata = client->dev.platform_data;
+
+	/* Remap IRQ map address to read the IRQ status */
+	if ((chip->pdata->irq_map) && (chip->pdata->irq_mask)) {
+		chip->irq_iomap = ioremap_nocache(chip->pdata->irq_map, 8);
+		if (!chip->irq_iomap) {
+			dev_err(&client->dev, "Failed: ioremap_nocache\n");
+			return -EFAULT;
+		}
+	}
+
+	chip->client = client;
+	chip->pdata = client->dev.platform_data;
+
+	chip->psy_usb.name = DEV_NAME;
+	chip->psy_usb.type = POWER_SUPPLY_TYPE_USB;
+	chip->psy_usb.properties = bq24261_usb_props;
+	chip->psy_usb.num_properties = ARRAY_SIZE(bq24261_usb_props);
+	chip->psy_usb.get_property = bq24261_usb_get_property;
+	chip->psy_usb.set_property = bq24261_usb_set_property;
+	chip->psy_usb.supplied_to = chip->pdata->supplied_to;
+	chip->psy_usb.num_supplicants = chip->pdata->num_supplicants;
+	chip->psy_usb.throttle_states = chip->pdata->throttle_states;
+	chip->psy_usb.num_throttle_states = chip->pdata->num_throttle_states;
+	chip->psy_usb.supported_cables = POWER_SUPPLY_CHARGER_TYPE_USB;
+	chip->max_cc = 1500;
+	chip->max_cv = 4350;
+	chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+	chip->chrgr_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	chip->revision = bq2426x_rev;
+
+	strncpy(chip->model_name,
+		bq24261_model_name[bq24261_rev_index].model_name,
+		MODEL_NAME_SIZE);
+	strncpy(chip->manufacturer, DEV_MANUFACTURER,
+		DEV_MANUFACTURER_NAME_SIZE);
+
+	mutex_init(&chip->stat_lock);
+	wake_lock_init(&chip->chrgr_en_wakelock,
+			WAKE_LOCK_SUSPEND, "chrgr_en_wakelock");
+	ret = power_supply_register(&client->dev, &chip->psy_usb);
+	if (ret) {
+		dev_err(&client->dev, "Failed: power supply register (%d)\n",
+			ret);
+		iounmap(chip->irq_iomap);
+		return ret;
+	}
+
+	INIT_DELAYED_WORK(&chip->sw_term_work, bq24261_sw_charge_term_worker);
+	INIT_DELAYED_WORK(&chip->low_supply_fault_work,
+				bq24261_low_supply_fault_work);
+	INIT_DELAYED_WORK(&chip->exception_mon_work,
+				bq24261_exception_mon_work);
+	if (((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) ||
+			chip->pdata->is_wdt_kick_needed) {
+		INIT_DELAYED_WORK(&chip->wdt_work,
+					bq24261_wdt_reset_worker);
+	}
+
+	INIT_WORK(&chip->irq_work, bq24261_irq_worker);
+	if (chip->client->irq) {
+		ret = request_threaded_irq(chip->client->irq,
+					   bq24261_irq_handler,
+					   bq24261_thread_handler,
+					   IRQF_SHARED|IRQF_NO_SUSPEND,
+					   DEV_NAME, chip);
+		if (ret) {
+			dev_err(&client->dev, "Failed: request_irq (%d)\n",
+				ret);
+			iounmap(chip->irq_iomap);
+			power_supply_unregister(&chip->psy_usb);
+			return ret;
+		}
+	}
+
+	if (IS_BATTERY_OVER_VOLTAGE(chip))
+		handle_battery_over_voltage(chip);
+	else
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+
+	if (register_otg_notifications(chip))
+		dev_err(&client->dev, "Error in registering OTG notifications. Unable to supply power to Host\n");
+
+	bq24261_client = client;
+	power_supply_changed(&chip->psy_usb);
+	bq24261_debugfs_init();
+
+	return 0;
+}
+
+static int bq24261_remove(struct i2c_client *client)
+{
+	struct bq24261_charger *chip = i2c_get_clientdata(client);
+
+	if (client->irq)
+		free_irq(client->irq, chip);
+
+	flush_scheduled_work();
+	wake_lock_destroy(&chip->chrgr_en_wakelock);
+	if (chip->irq_iomap)
+		iounmap(chip->irq_iomap);
+	if (chip->transceiver)
+		usb_unregister_notifier(chip->transceiver, &chip->otg_nb);
+
+	power_supply_unregister(&chip->psy_usb);
+	bq24261_debugfs_exit();
+	return 0;
+}
+
+static int bq24261_suspend(struct device *dev)
+{
+	struct bq24261_charger *chip = dev_get_drvdata(dev);
+
+	if (((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) ||
+			chip->pdata->is_wdt_kick_needed) {
+		if (chip->boost_mode)
+			cancel_delayed_work_sync(&chip->wdt_work);
+	}
+	dev_dbg(&chip->client->dev, "bq24261 suspend\n");
+	return 0;
+}
+
+static int bq24261_resume(struct device *dev)
+{
+	struct bq24261_charger *chip = dev_get_drvdata(dev);
+
+	if (((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) ||
+			chip->pdata->is_wdt_kick_needed) {
+		if (chip->boost_mode)
+			bq24261_enable_boost_mode(chip, 1);
+	}
+
+	dev_dbg(&chip->client->dev, "bq24261 resume\n");
+	return 0;
+}
+
+static int bq24261_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int bq24261_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int bq24261_runtime_idle(struct device *dev)
+{
+
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops bq24261_pm_ops = {
+	.suspend = bq24261_suspend,
+	.resume = bq24261_resume,
+	.runtime_suspend = bq24261_runtime_suspend,
+	.runtime_resume = bq24261_runtime_resume,
+	.runtime_idle = bq24261_runtime_idle,
+};
+
+static const struct i2c_device_id bq24261_id[] = {
+	{DEV_NAME, 0},
+	{},
+};
+
+MODULE_DEVICE_TABLE(i2c, bq24261_id);
+
+static struct i2c_driver bq24261_driver = {
+	.driver = {
+		   .name = DEV_NAME,
+		   .pm = &bq24261_pm_ops,
+		   },
+	.probe = bq24261_probe,
+	.remove = bq24261_remove,
+	.id_table = bq24261_id,
+};
+
+static int __init bq24261_init(void)
+{
+	return i2c_add_driver(&bq24261_driver);
+}
+
+module_init(bq24261_init);
+
+static void __exit bq24261_exit(void)
+{
+	i2c_del_driver(&bq24261_driver);
+}
+
+module_exit(bq24261_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("BQ24261 Charger Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/power/pmic_ccsm.c b/drivers/external_drivers/drivers/power/pmic_ccsm.c
new file mode 100644
index 0000000..44a2f5f
--- /dev/null
+++ b/drivers/external_drivers/drivers/power/pmic_ccsm.c
@@ -0,0 +1,2212 @@
+/*
+ * pmic_ccsm.c - Intel MID PMIC Charger Driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+/* Includes */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/power_supply.h>
+#include <linux/wakelock.h>
+#include <linux/power_supply.h>
+#include <linux/rpmsg.h>
+#include <linux/version.h>
+#include <asm/intel_basincove_gpadc.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#include <linux/iio/consumer.h>
+#else
+#include "../../../kernel/drivers/staging/iio/consumer.h"
+#endif
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/sfi.h>
+#include <linux/async.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/power/battery_id.h>
+#include <linux/gpio.h>
+#include "pmic_ccsm.h"
+
+/* Macros */
+#define DRIVER_NAME "pmic_ccsm"
+#define PMIC_SRAM_INTR_ADDR 0xFFFFF616
+#define ADC_TO_TEMP 1
+#define TEMP_TO_ADC 0
+#define is_valid_temp(tmp)\
+	(!(tmp > chc.pdata->adc_tbl[0].temp ||\
+	tmp < chc.pdata->adc_tbl[chc.pdata->max_tbl_row_cnt - 1].temp))
+#define is_valid_adc_code(val)\
+	(!(val < chc.pdata->adc_tbl[0].adc_val ||\
+	val > chc.pdata->adc_tbl[chc.pdata->max_tbl_row_cnt - 1].adc_val))
+#define CONVERT_ADC_TO_TEMP(adc_val, temp)\
+	adc_temp_conv(adc_val, temp, ADC_TO_TEMP)
+#define CONVERT_TEMP_TO_ADC(temp, adc_val)\
+	adc_temp_conv(temp, adc_val, TEMP_TO_ADC)
+#define NEED_ZONE_SPLIT(bprof)\
+	 ((bprof->temp_mon_ranges < MIN_BATT_PROF))
+
+#define USB_WAKE_LOCK_TIMEOUT	(5 * HZ)
+
+/* 100mA value definition for setting the inlimit in bq24261 */
+#define USBINPUTICC100VAL	100
+
+#define SOC_ACOK 179
+static struct workqueue_struct *soc_acok_wq;
+
+/* Type definitions */
+static void pmic_bat_zone_changed(void);
+static void pmic_battery_overheat_handler(bool);
+
+/* Extern definitions */
+
+/* Global declarations */
+static DEFINE_MUTEX(pmic_lock);
+static struct pmic_chrgr_drv_context chc;
+static struct interrupt_info chgrirq0_info[] = {
+	{
+		CHGIRQ0_BZIRQ_MASK,
+		0,
+		"Battery temperature zone changed",
+		NULL,
+		NULL,
+		pmic_bat_zone_changed,
+		NULL,
+	},
+	{
+		CHGIRQ0_BAT_CRIT_MASK,
+		SCHGIRQ0_SBAT_CRIT_MASK,
+		NULL,
+		"Battery Over heat exception",
+		"Battery Over heat exception Recovered",
+		NULL,
+		pmic_battery_overheat_handler
+	},
+	{
+		CHGIRQ0_BAT0_ALRT_MASK,
+		SCHGIRQ0_SBAT0_ALRT_MASK,
+		NULL,
+		"Battery0 temperature inside boundary",
+		"Battery0 temperature outside boundary",
+		NULL,
+		pmic_battery_overheat_handler
+	},
+	{
+		CHGIRQ0_BAT1_ALRT_MASK,
+		SCHGIRQ0_SBAT1_ALRT_MASK,
+		NULL,
+		"Battery1 temperature inside boundary",
+		"Battery1 temperature outside boundary",
+		NULL,
+		NULL
+	},
+};
+
+u16 pmic_inlmt[][2] = {
+	{ 100, CHGRCTRL1_FUSB_INLMT_100},
+	{ 150, CHGRCTRL1_FUSB_INLMT_150},
+	{ 500, CHGRCTRL1_FUSB_INLMT_500},
+	{ 900, CHGRCTRL1_FUSB_INLMT_900},
+	{ 1500, CHGRCTRL1_FUSB_INLMT_1500},
+};
+
+
+static inline struct power_supply *get_psy_battery(void)
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	struct power_supply *pst;
+
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+			class_dev_iter_exit(&iter);
+			return pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	return NULL;
+}
+
+
+/* Function definitions */
+static void lookup_regval(u16 tbl[][2], size_t size, u16 in_val, u8 *out_val)
+{
+	int i;
+	for (i = 1; i < size; ++i)
+		if (in_val < tbl[i][0])
+			break;
+
+	*out_val = (u8)tbl[i-1][1];
+}
+
+static int interpolate_y(int dx1x0, int dy1y0, int dxx0, int y0)
+{
+	return y0 + DIV_ROUND_CLOSEST((dxx0 * dy1y0), dx1x0);
+}
+
+static int interpolate_x(int dy1y0, int dx1x0, int dyy0, int x0)
+{
+	return x0 + DIV_ROUND_CLOSEST((dyy0 * dx1x0), dy1y0);
+}
+
+static int adc_temp_conv(int in_val, int *out_val, int conv)
+{
+	int tbl_row_cnt, i;
+	struct temp_lookup *adc_temp_tbl;
+
+	if (!chc.pdata) {
+		dev_err(chc.dev, "ADC-lookup table not yet available\n");
+		return -ERANGE;
+	}
+
+	tbl_row_cnt = chc.pdata->max_tbl_row_cnt;
+	adc_temp_tbl = chc.pdata->adc_tbl;
+
+	if (conv == ADC_TO_TEMP) {
+		if (!is_valid_adc_code(in_val))
+			return -ERANGE;
+
+		if (in_val == adc_temp_tbl[tbl_row_cnt-1].adc_val)
+			i = tbl_row_cnt - 1;
+		else {
+			for (i = 0; i < tbl_row_cnt; ++i)
+				if (in_val < adc_temp_tbl[i].adc_val)
+					break;
+		}
+
+		*out_val =
+		    interpolate_y((adc_temp_tbl[i].adc_val
+					- adc_temp_tbl[i - 1].adc_val),
+				  (adc_temp_tbl[i].temp
+				   - adc_temp_tbl[i - 1].temp),
+				  (in_val - adc_temp_tbl[i - 1].adc_val),
+				  adc_temp_tbl[i - 1].temp);
+	} else {
+		if (!is_valid_temp(in_val))
+			return -ERANGE;
+
+		if (in_val == adc_temp_tbl[tbl_row_cnt-1].temp)
+			i = tbl_row_cnt - 1;
+		else {
+			for (i = 0; i < tbl_row_cnt; ++i)
+				if (in_val > adc_temp_tbl[i].temp)
+					break;
+		}
+
+		*((short int *)out_val) =
+		    interpolate_x((adc_temp_tbl[i].temp
+					- adc_temp_tbl[i - 1].temp),
+				  (adc_temp_tbl[i].adc_val
+				   - adc_temp_tbl[i - 1].adc_val),
+				  (in_val - adc_temp_tbl[i - 1].temp),
+				  adc_temp_tbl[i - 1].adc_val);
+	}
+	return 0;
+}
+
+static int pmic_read_reg(u16 addr, u8 *val)
+{
+	int ret;
+
+	ret = intel_scu_ipc_ioread8(addr, val);
+	if (ret) {
+		dev_err(chc.dev,
+			"Error in intel_scu_ipc_ioread8 0x%.4x\n", addr);
+		return -EIO;
+	}
+	return 0;
+}
+
+
+static int __pmic_write_tt(u8 addr, u8 data)
+{
+	int ret;
+
+	ret = intel_scu_ipc_iowrite8(CHRTTADDR_ADDR, addr);
+	if (unlikely(ret))
+		return ret;
+
+	return intel_scu_ipc_iowrite8(CHRTTDATA_ADDR, data);
+}
+
+static inline int pmic_write_tt(u8 addr, u8 data)
+{
+	int ret;
+
+	mutex_lock(&pmic_lock);
+	ret = __pmic_write_tt(addr, data);
+	mutex_unlock(&pmic_lock);
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC write blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+	return ret;
+}
+
+static int __pmic_read_tt(u8 addr, u8 *data)
+{
+	int ret;
+
+	ret = intel_scu_ipc_iowrite8(CHRTTADDR_ADDR, addr);
+	if (ret)
+		return ret;
+
+	usleep_range(2000, 3000);
+
+	return intel_scu_ipc_ioread8(CHRTTDATA_ADDR, data);
+}
+
+static inline int pmic_read_tt(u8 addr, u8 *data)
+{
+	int ret;
+
+	mutex_lock(&pmic_lock);
+	ret = __pmic_read_tt(addr, data);
+	mutex_unlock(&pmic_lock);
+
+	return ret;
+}
+
+static int pmic_update_tt(u8 addr, u8 mask, u8 data)
+{
+	u8 tdata;
+	int ret;
+
+	mutex_lock(&pmic_lock);
+	ret = __pmic_read_tt(addr, &tdata);
+	if (unlikely(ret))
+		goto exit;
+
+	tdata = (tdata & ~mask) | (data & mask);
+	ret = __pmic_write_tt(addr, tdata);
+exit:
+	mutex_unlock(&pmic_lock);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int pmic_chrgr_reg_show(struct seq_file *seq, void *unused)
+{
+	int ret;
+	u16 addr;
+	u16 val1;
+	u8 val;
+
+	addr = *((u8 *)seq->private);
+
+	if (addr == CHRGRIRQ1_ADDR) {
+		val1 = ioread16(chc.pmic_intr_iomap);
+		val = (u8)(val1 >> 8);
+	} else if (addr == CHGRIRQ0_ADDR) {
+		val1 = ioread16(chc.pmic_intr_iomap);
+		val = (u8)val1;
+	} else {
+		ret = pmic_read_reg(addr, &val);
+		if (ret != 0) {
+			dev_err(chc.dev,
+				"Error reading tt register 0x%2x\n",
+				addr);
+			return -EIO;
+		}
+	}
+
+	seq_printf(seq, "0x%x\n", val);
+	return 0;
+}
+
+static int pmic_chrgr_tt_reg_show(struct seq_file *seq, void *unused)
+{
+	int ret;
+	u8 addr;
+	u8 val;
+
+	addr = *((u8 *)seq->private);
+
+	ret = pmic_read_tt(addr, &val);
+	if (ret != 0) {
+		dev_err(chc.dev,
+			"Error reading tt register 0x%2x\n",
+			addr);
+		return -EIO;
+	}
+
+	seq_printf(seq, "0x%x\n", val);
+	return 0;
+}
+
+static int pmic_chrgr_tt_reg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmic_chrgr_tt_reg_show, inode->i_private);
+}
+
+static int pmic_chrgr_reg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmic_chrgr_reg_show, inode->i_private);
+}
+
+static struct dentry *charger_debug_dir;
+static struct pmic_regs_def pmic_regs_bc[] = {
+	PMIC_REG_DEF(PMIC_ID_ADDR),
+	PMIC_REG_DEF(IRQLVL1_ADDR),
+	PMIC_REG_DEF(IRQLVL1_MASK_ADDR),
+	PMIC_REG_DEF(CHGRIRQ0_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET1_ADDR),
+	PMIC_REG_DEF(BATTDETCTRL_ADDR),
+	PMIC_REG_DEF(VBUSDETCTRL_ADDR),
+	PMIC_REG_DEF(VDCINDETCTRL_ADDR),
+	PMIC_REG_DEF(CHRGRIRQ1_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(CHGRCTRL0_ADDR),
+	PMIC_REG_DEF(CHGRCTRL1_ADDR),
+	PMIC_REG_DEF(CHGRSTATUS_ADDR),
+	PMIC_REG_DEF(USBIDCTRL_ADDR),
+	PMIC_REG_DEF(USBIDSTAT_ADDR),
+	PMIC_REG_DEF(WAKESRC_ADDR),
+	PMIC_REG_DEF(THRMBATZONE_ADDR_BC),
+	PMIC_REG_DEF(THRMZN0L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN0H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN1L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN1H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN2L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN2H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN3L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN3H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN4L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN4H_ADDR_BC),
+};
+
+static struct pmic_regs_def pmic_regs_sc[] = {
+	PMIC_REG_DEF(PMIC_ID_ADDR),
+	PMIC_REG_DEF(IRQLVL1_ADDR),
+	PMIC_REG_DEF(IRQLVL1_MASK_ADDR),
+	PMIC_REG_DEF(CHGRIRQ0_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET1_ADDR),
+	PMIC_REG_DEF(BATTDETCTRL_ADDR),
+	PMIC_REG_DEF(VBUSDETCTRL_ADDR),
+	PMIC_REG_DEF(VDCINDETCTRL_ADDR),
+	PMIC_REG_DEF(CHRGRIRQ1_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(CHGRCTRL0_ADDR),
+	PMIC_REG_DEF(CHGRCTRL1_ADDR),
+	PMIC_REG_DEF(CHGRSTATUS_ADDR),
+	PMIC_REG_DEF(USBIDCTRL_ADDR),
+	PMIC_REG_DEF(USBIDSTAT_ADDR),
+	PMIC_REG_DEF(WAKESRC_ADDR),
+	PMIC_REG_DEF(USBPATH_ADDR),
+	PMIC_REG_DEF(USBSRCDETSTATUS_ADDR),
+	PMIC_REG_DEF(THRMBATZONE_ADDR_SC),
+	PMIC_REG_DEF(THRMZN0L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN0H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN1L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN1H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN2L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN2H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN3L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN3H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN4L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN4H_ADDR_SC),
+};
+
+static struct pmic_regs_def pmic_tt_regs[] = {
+	PMIC_REG_DEF(TT_I2CDADDR_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT0OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT1OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT2OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT3OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT4OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT5OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT6OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT7OS_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICCOS_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICCMASK_ADDR),
+	PMIC_REG_DEF(TT_CHRCVOS_ADDR),
+	PMIC_REG_DEF(TT_CHRCVMASK_ADDR),
+	PMIC_REG_DEF(TT_CHRCCOS_ADDR),
+	PMIC_REG_DEF(TT_CHRCCMASK_ADDR),
+	PMIC_REG_DEF(TT_LOWCHROS_ADDR),
+	PMIC_REG_DEF(TT_LOWCHRMASK_ADDR),
+	PMIC_REG_DEF(TT_WDOGRSTOS_ADDR),
+	PMIC_REG_DEF(TT_WDOGRSTMASK_ADDR),
+	PMIC_REG_DEF(TT_CHGRENOS_ADDR),
+	PMIC_REG_DEF(TT_CHGRENMASK_ADDR),
+	PMIC_REG_DEF(TT_CUSTOMFIELDEN_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT0VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT1VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT2VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT3VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT4VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT5VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT6VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT7VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC100VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC150VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC500VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC900VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC1500VAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVEMRGLOWVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVCOLDVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVCOOLVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVWARMVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVHOTVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVEMRGHIVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCEMRGLOWVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCCOLDVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCCOOLVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCWARMVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCHOTVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCEMRGHIVAL_ADDR),
+	PMIC_REG_DEF(TT_LOWCHRENVAL_ADDR),
+	PMIC_REG_DEF(TT_LOWCHRDISVAL_ADDR),
+};
+
+void dump_pmic_regs(void)
+{
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	u32 pmic_reg_cnt = 0;
+	u32 reg_index;
+	u8 data;
+	int retval;
+	struct pmic_regs_def *pmic_regs = NULL;
+
+	if (vendor_id == BASINCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_bc);
+		pmic_regs = pmic_regs_bc;
+	} else if (vendor_id == SHADYCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_sc);
+		pmic_regs = pmic_regs_sc;
+	}
+
+	dev_info(chc.dev, "PMIC Register dump\n");
+	dev_info(chc.dev, "====================\n");
+
+	for (reg_index = 0; reg_index < pmic_reg_cnt; reg_index++) {
+
+		retval = intel_scu_ipc_ioread8(pmic_regs[reg_index].addr,
+				&data);
+		if (retval)
+			dev_err(chc.dev, "Error in reading %x\n",
+				pmic_regs[reg_index].addr);
+		else
+			dev_info(chc.dev, "0x%x=0x%x\n",
+				pmic_regs[reg_index].addr, data);
+	}
+	dev_info(chc.dev, "====================\n");
+}
+
+void dump_pmic_tt_regs(void)
+{
+	u32 pmic_tt_reg_cnt = ARRAY_SIZE(pmic_tt_regs);
+	u32 reg_index;
+	u8 data;
+	int retval;
+
+	dev_info(chc.dev, "PMIC CHRGR TT dump\n");
+	dev_info(chc.dev, "====================\n");
+
+	for (reg_index = 0; reg_index < pmic_tt_reg_cnt; reg_index++) {
+
+		retval = pmic_read_tt(pmic_tt_regs[reg_index].addr, &data);
+		if (retval)
+			dev_err(chc.dev, "Error in reading %x\n",
+				pmic_tt_regs[reg_index].addr);
+		else
+			dev_info(chc.dev, "0x%x=0x%x\n",
+				pmic_tt_regs[reg_index].addr, data);
+	}
+
+	dev_info(chc.dev, "====================\n");
+}
+static const struct file_operations pmic_chrgr_reg_fops = {
+	.open = pmic_chrgr_reg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations pmic_chrgr_tt_reg_fops = {
+	.open = pmic_chrgr_tt_reg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static void pmic_debugfs_init(void)
+{
+	struct dentry *fentry;
+	struct dentry *pmic_regs_dir;
+	struct dentry *pmic_tt_regs_dir;
+
+	u32 reg_index;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	u32 pmic_reg_cnt = 0;
+	u32 pmic_tt_reg_cnt = ARRAY_SIZE(pmic_tt_regs);
+	char name[PMIC_REG_NAME_LEN] = {0};
+	struct pmic_regs_def *pmic_regs = NULL;
+
+	if (vendor_id == BASINCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_bc);
+		pmic_regs = pmic_regs_bc;
+	} else if (vendor_id == SHADYCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_sc);
+		pmic_regs = pmic_regs_sc;
+	}
+
+	/* Creating a directory under debug fs for charger */
+	charger_debug_dir = debugfs_create_dir(DRIVER_NAME , NULL) ;
+	if (charger_debug_dir == NULL)
+		goto debugfs_root_exit;
+
+	/* Create a directory for pmic charger registers */
+	pmic_regs_dir = debugfs_create_dir("pmic_ccsm_regs",
+			charger_debug_dir);
+
+	if (pmic_regs_dir == NULL)
+		goto debugfs_err_exit;
+
+	for (reg_index = 0; reg_index < pmic_reg_cnt; reg_index++) {
+
+		sprintf(name, "%s",
+			pmic_regs[reg_index].reg_name);
+
+		fentry = debugfs_create_file(name,
+				S_IRUGO,
+				pmic_regs_dir,
+				&pmic_regs[reg_index].addr,
+				&pmic_chrgr_reg_fops);
+
+		if (fentry == NULL)
+			goto debugfs_err_exit;
+	}
+
+	/* Create a directory for pmic tt charger registers */
+	pmic_tt_regs_dir = debugfs_create_dir("pmic_ccsm_tt_regs",
+			charger_debug_dir);
+
+	if (pmic_tt_regs_dir == NULL)
+		goto debugfs_err_exit;
+
+	for (reg_index = 0; reg_index < pmic_tt_reg_cnt; reg_index++) {
+
+		sprintf(name, "%s", pmic_tt_regs[reg_index].reg_name);
+
+		fentry = debugfs_create_file(name,
+				S_IRUGO,
+				pmic_tt_regs_dir,
+				&pmic_tt_regs[reg_index].addr,
+				&pmic_chrgr_tt_reg_fops);
+
+		if (fentry == NULL)
+			goto debugfs_err_exit;
+	}
+
+	dev_dbg(chc.dev, "Debugfs created successfully!!");
+	return;
+
+debugfs_err_exit:
+	debugfs_remove_recursive(charger_debug_dir);
+debugfs_root_exit:
+	dev_err(chc.dev, "Error creating debugfs entry!!");
+	return;
+}
+
+static void pmic_debugfs_exit(void)
+{
+	if (charger_debug_dir != NULL)
+		debugfs_remove_recursive(charger_debug_dir);
+}
+#endif
+
+static void pmic_get_bat_zone(int *bat_zone)
+{
+	u8 data = 0;
+	u16 addr = 0;
+	int vendor_id, ret;
+
+	vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	if (vendor_id == BASINCOVE_VENDORID)
+		addr = THRMBATZONE_ADDR_BC;
+	else if (vendor_id == SHADYCOVE_VENDORID)
+		addr = THRMBATZONE_ADDR_SC;
+
+	ret = intel_scu_ipc_ioread8(addr, &data);
+	if (ret) {
+		dev_err(chc.dev, "Error:%d in reading battery zone\n", ret);
+		/* Return undetermined zone in case of IPC failure */
+		*bat_zone = PMIC_BZONE_UNKNOWN;
+		return;
+	}
+
+	*bat_zone = (data & THRMBATZONE_MASK);
+}
+
+static void pmic_bat_zone_changed(void)
+{
+	int cur_zone;
+	struct power_supply *psy_bat;
+
+	pmic_get_bat_zone(&cur_zone);
+	dev_info(chc.dev, "Battery Zone changed. Current zone is %d\n",
+			cur_zone);
+
+	/* if current zone is the top and bottom zones then report OVERHEAT
+	 */
+	if ((cur_zone == PMIC_BZONE_LOW) || (cur_zone == PMIC_BZONE_HIGH))
+		chc.health = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (cur_zone == PMIC_BZONE_UNKNOWN)
+		chc.health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	else
+		chc.health = POWER_SUPPLY_HEALTH_GOOD;
+
+	psy_bat = get_psy_battery();
+
+	if (psy_bat && psy_bat->external_power_changed)
+		psy_bat->external_power_changed(psy_bat);
+
+	return;
+}
+
+static void pmic_battery_overheat_handler(bool stat)
+{
+	if (stat)
+		chc.health = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else
+		chc.health = POWER_SUPPLY_HEALTH_GOOD;
+	return;
+}
+
+int pmic_get_health(void)
+{
+	return chc.health;
+}
+
+int pmic_enable_vbus(bool enable)
+{
+	int ret = 0;
+
+	if (enable)
+		ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+				WDT_NOKICK_ENABLE, CHGRCTRL0_WDT_NOKICK_MASK);
+	else
+		ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+				WDT_NOKICK_DISABLE, CHGRCTRL0_WDT_NOKICK_MASK);
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int pmic_handle_otgmode(bool enable)
+{
+	int ret = 0;
+	int vendor_id;
+
+	vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	if (vendor_id != SHADYCOVE_VENDORID) {
+		dev_err(chc.dev, "Ignore otg-mode event received\n");
+		return 0;
+	}
+
+	if (enable) {
+		ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+				CHGRCTRL1_OTGMODE_MASK,
+				CHGRCTRL1_OTGMODE_MASK);
+
+		/* ShadyCove PMIC doesn’t kick charger-WDT during host-mode.
+		 * Driver does this regularly as a w/a. But, during suspend,
+		 * since this driver code doesn’t run, VBUS drops, DUT wakes
+		 * up, and re-enumerates again.
+		 * Hence, during host-mode, driver shall hold a wakelock.
+		 */
+		dev_info(chc.dev, "Hold wakelock for host-mode WDT-kick\n");
+		if (!wake_lock_active(&chc.otg_wa_wakelock)) {
+			wake_lock(&chc.otg_wa_wakelock);
+		}
+	} else {
+		ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+				0x0, CHGRCTRL1_OTGMODE_MASK);
+
+		dev_info(chc.dev, "Release wakelock for host-mode WDT-kick\n");
+		if (wake_lock_active(&chc.otg_wa_wakelock)) {
+			wake_unlock(&chc.otg_wa_wakelock);
+		}
+	}
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int pmic_enable_charging(bool enable)
+{
+	int ret;
+	u8 val;
+
+	if (enable) {
+		ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+			CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+		if (ret)
+			return ret;
+	}
+
+	val = (enable) ? 0 : EXTCHRDIS_ENABLE;
+
+	ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+			val, CHGRCTRL0_EXTCHRDIS_MASK);
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static inline int update_zone_cc(int zone, u8 reg_val)
+{
+	u8 addr_cc = TT_CHRCCHOTVAL_ADDR - zone;
+	dev_dbg(chc.dev, "%s:%X=%X\n", __func__, addr_cc, reg_val);
+	return pmic_write_tt(addr_cc, reg_val);
+}
+
+static inline int update_zone_cv(int zone, u8 reg_val)
+{
+	u8 addr_cv = TT_CHRCVHOTVAL_ADDR - zone;
+	dev_dbg(chc.dev, "%s:%X=%X\n", __func__, addr_cv, reg_val);
+	return pmic_write_tt(addr_cv, reg_val);
+}
+
+static inline int update_zone_temp(int zone, u16 adc_val)
+{
+	int ret;
+	u16 addr_tzone;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	if (vendor_id == BASINCOVE_VENDORID)
+		addr_tzone = THRMZN4H_ADDR_BC - (2 * zone);
+	else if (vendor_id == SHADYCOVE_VENDORID) {
+		/* to take care of address-discontinuity of zone-registers */
+		int offset_zone = zone;
+		if (zone >= 3)
+			offset_zone += 1;
+
+		addr_tzone = THRMZN4H_ADDR_SC - (2 * offset_zone);
+
+		/*
+		 * Override the adc values received from the LUT with the
+		 * values received from the PMIC hatdware team. SC pmic gets
+		 * 12-bit of ADC results however only 9-bits of the values can
+		 * be programmed into the temperature zone registers.
+		 */
+		switch (zone) {
+		case 0:
+			adc_val = THRMZN4_SC_ADCVAL;
+			break;
+		case 1:
+			adc_val = THRMZN3_SC_ADCVAL;
+			break;
+		case 2:
+			adc_val = THRMZN2_SC_ADCVAL;
+			break;
+		case 3:
+			adc_val = THRMZN1_SC_ADCVAL;
+			break;
+		case 4:
+			adc_val = THRMZN0_SC_ADCVAL;
+			break;
+		default:
+			dev_err(chc.dev, "no ADC default values\n");
+		}
+	} else {
+		dev_err(chc.dev, "%s: invalid vendor id %X\n", __func__, vendor_id);
+		return -EINVAL;
+	}
+
+	ret = intel_scu_ipc_iowrite8(addr_tzone, (u8)(adc_val >> 8));
+	if (unlikely(ret))
+		return ret;
+	dev_dbg(chc.dev, "%s:%X:%X=%X\n", __func__, addr_tzone,
+				(addr_tzone+1), adc_val);
+
+	return intel_scu_ipc_iowrite8(addr_tzone+1, (u8)(adc_val & 0xFF));
+}
+
+int pmic_set_cc(int new_cc)
+{
+	struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+	struct ps_pse_mod_prof *r_bcprof = chc.runtime_bcprof;
+	int temp_mon_ranges;
+	int new_cc1;
+	int ret;
+	int i, cur_zone;
+	u8 reg_val = 0;
+
+	pmic_get_bat_zone(&cur_zone);
+	dev_info(chc.dev, "%s: Battery Zone:%d\n", __func__, cur_zone);
+
+	/* No need to write PMIC if CC = 0 */
+	if (!new_cc)
+		return 0;
+
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		new_cc1 = min_t(int, new_cc,
+				bcprof->temp_mon_range[i].full_chrg_cur);
+
+		if (new_cc1 != r_bcprof->temp_mon_range[i].full_chrg_cur) {
+			if (chc.pdata->cc_to_reg) {
+				chc.pdata->cc_to_reg(new_cc1, &reg_val);
+				ret = update_zone_cc(i, reg_val);
+				if (unlikely(ret))
+					return ret;
+			}
+			r_bcprof->temp_mon_range[i].full_chrg_cur = new_cc1;
+		}
+	}
+
+	/* send the new CC and CV */
+	intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+		CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+
+	return 0;
+}
+
+int pmic_set_cv(int new_cv)
+{
+	struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+	struct ps_pse_mod_prof *r_bcprof = chc.runtime_bcprof;
+	int temp_mon_ranges;
+	int new_cv1;
+	int ret;
+	int i, cur_zone;
+	u8 reg_val = 0;
+
+	pmic_get_bat_zone(&cur_zone);
+	dev_info(chc.dev, "%s: Battery Zone:%d\n", __func__, cur_zone);
+
+	/* No need to write PMIC if CV = 0 */
+	if (!new_cv)
+		return 0;
+
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		new_cv1 = min_t(int, new_cv,
+				bcprof->temp_mon_range[i].full_chrg_vol);
+
+		if (new_cv1 != r_bcprof->temp_mon_range[i].full_chrg_vol) {
+			if (chc.pdata->cv_to_reg) {
+				chc.pdata->cv_to_reg(new_cv1, &reg_val);
+				ret = update_zone_cv(i, reg_val);
+				if (unlikely(ret))
+					return ret;
+			}
+			r_bcprof->temp_mon_range[i].full_chrg_vol = new_cv1;
+		}
+	}
+
+	/* send the new CC and CV */
+	intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+		CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+
+	return 0;
+}
+
+int pmic_set_ilimma(int ilim_ma)
+{
+	u8 reg_val;
+	int ret;
+
+	lookup_regval(pmic_inlmt, ARRAY_SIZE(pmic_inlmt),
+			ilim_ma, &reg_val);
+	dev_dbg(chc.dev, "Setting inlmt %d in register %x=%x\n", ilim_ma,
+		CHGRCTRL1_ADDR, reg_val);
+	ret = intel_scu_ipc_iowrite8(CHGRCTRL1_ADDR, reg_val);
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * pmic_read_adc_val - read ADC value of specified sensors
+ * @channel: channel of the sensor to be sampled
+ * @sensor_val: pointer to the charger property to hold sampled value
+ * @chc :  battery info pointer
+ *
+ * Returns 0 if success
+ */
+static int pmic_read_adc_val(int channel, int *sensor_val,
+			      struct pmic_chrgr_drv_context *chc)
+{
+	int val;
+	int ret;
+	struct iio_channel *indio_chan;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	indio_chan = iio_st_channel_get("BATTEMP", "BATTEMP0");
+#else
+	indio_chan = iio_channel_get(NULL, "BATTEMP0");
+#endif
+	if (IS_ERR_OR_NULL(indio_chan)) {
+		ret = PTR_ERR(indio_chan);
+		goto exit;
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	ret = iio_st_read_channel_raw(indio_chan, &val);
+#else
+	ret = iio_read_channel_raw(indio_chan, &val);
+#endif
+	if (ret) {
+		dev_err(chc->dev, "IIO channel read error\n");
+		goto err_exit;
+	}
+
+	switch (channel) {
+	case GPADC_BATTEMP0:
+		ret = CONVERT_ADC_TO_TEMP(val, sensor_val);
+		break;
+	default:
+		dev_err(chc->dev, "invalid sensor%d", channel);
+		ret = -EINVAL;
+	}
+	dev_dbg(chc->dev, "pmic_ccsm pmic_ccsm.0: %s adc val=%x, %d temp=%d\n",
+		__func__, val, val, *sensor_val);
+
+err_exit:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	iio_st_channel_release(indio_chan);
+#else
+	iio_channel_release(indio_chan);
+#endif
+exit:
+	return ret;
+}
+
+int pmic_get_battery_pack_temp(int *temp)
+{
+	if (chc.invalid_batt)
+		return -ENODEV;
+	return pmic_read_adc_val(GPADC_BATTEMP0, temp, &chc);
+}
+
+static int scove_get_usbid(void)
+{
+	int ret;
+	struct iio_channel *indio_chan;
+	int rid, id = RID_UNKNOWN;
+	u8 val;
+
+	ret = pmic_read_reg(SCHGRIRQ1_ADDR, &val);
+	if (ret) {
+		dev_err(chc.dev,
+			"Error reading SCHGRIRQ1-register 0x%2x\n",
+			SCHGRIRQ1_ADDR);
+		return ret;
+	}
+
+	/* SCHGRIRQ1_REG SUSBIDDET bit definition:
+	 * 00 = RID_A/B/C ; 01 = RID_GND ; 10 = RID_FLOAT */
+	if ((val & SCHRGRIRQ1_SUSBIDGNDDET_MASK) == SHRT_FLT_DET)
+		return RID_FLOAT;
+	else if ((val & SCHRGRIRQ1_SUSBIDGNDDET_MASK) == SHRT_GND_DET)
+		return RID_GND;
+
+	indio_chan = iio_channel_get(NULL, "USBID");
+	if (IS_ERR_OR_NULL(indio_chan)) {
+		dev_err(chc.dev, "Failed to get IIO channel USBID\n");
+		ret = PTR_ERR(indio_chan);
+		goto exit;
+	}
+
+	ret = iio_read_channel_raw(indio_chan, &rid);
+	if (ret) {
+		dev_err(chc.dev, "IIO channel read error for USBID\n");
+		goto err_exit;
+	}
+
+	if ((rid > 11150) && (rid < 13640))
+		id = RID_A;
+	else if ((rid > 6120) && (rid < 7480))
+		id = RID_B;
+	else if ((rid > 3285) && (rid < 4015))
+		id = RID_C;
+
+err_exit:
+	iio_channel_release(indio_chan);
+exit:
+	return id;
+}
+
+static int get_charger_type(void)
+{
+	int ret, i = 0;
+	u8 val;
+	int chgr_type, rid;
+
+	do {
+		ret = pmic_read_reg(USBSRCDETSTATUS_ADDR, &val);
+		if (ret != 0) {
+			dev_err(chc.dev,
+				"Error reading USBSRCDETSTAT-register 0x%2x\n",
+				USBSRCDETSTATUS_ADDR);
+			return 0;
+		}
+
+		i++;
+		dev_info(chc.dev, "Read USBSRCDETSTATUS val: %x\n", val);
+
+		if (val & USBSRCDET_SUSBHWDET_DETSUCC)
+			break;
+		else
+			msleep(USBSRCDET_SLEEP_TIME);
+	} while (i < USBSRCDET_RETRY_CNT);
+
+	if (!(val & USBSRCDET_SUSBHWDET_DETSUCC)) {
+		dev_err(chc.dev, "Charger detection unsuccessful after %dms\n",
+			i * USBSRCDET_SLEEP_TIME);
+		return 0;
+	}
+
+	chgr_type = (val & USBSRCDET_USBSRCRSLT_MASK) >> 2;
+	dev_info(chc.dev, "Charger type after detection complete: %d\n",
+			(val & USBSRCDET_USBSRCRSLT_MASK) >> 2);
+
+	switch (chgr_type) {
+	case PMIC_CHARGER_TYPE_SDP:
+	case PMIC_CHARGER_TYPE_FLOAT_DP_DN:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+	case PMIC_CHARGER_TYPE_DCP:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+	case PMIC_CHARGER_TYPE_CDP:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+	case PMIC_CHARGER_TYPE_ACA:
+		rid = scove_get_usbid();
+		if (rid == RID_A)
+			return POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+		else if (rid != RID_UNKNOWN)
+			return POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+	case PMIC_CHARGER_TYPE_SE1:
+		return POWER_SUPPLY_CHARGER_TYPE_SE1;
+	case PMIC_CHARGER_TYPE_MHL:
+		return POWER_SUPPLY_CHARGER_TYPE_MHL;
+	default:
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+}
+
+static void handle_internal_usbphy_notifications(int mask)
+{
+	struct power_supply_cable_props cap = {0};
+
+	if (mask) {
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		cap.chrg_type = get_charger_type();
+		chc.charger_type = cap.chrg_type;
+	} else {
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+		cap.chrg_type = chc.charger_type;
+	}
+
+	if (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+		cap.ma = 0;
+	else if ((cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+			|| (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_CDP)
+			|| (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_SE1)
+			|| (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_ACA)
+			|| (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK))
+		cap.ma = 1500;
+
+	dev_info(chc.dev, "Notifying OTG ev:%d, evt:%d, chrg_type:%d, mA:%d\n",
+			USB_EVENT_CHARGER, cap.chrg_evt, cap.chrg_type,
+			cap.ma);
+	atomic_notifier_call_chain(&chc.otg->notifier,
+			USB_EVENT_CHARGER, &cap);
+}
+
+/* ShadyCove-WA for VBUS removal detect issue */
+int pmic_handle_low_supply(void)
+{
+	int ret;
+	u8 val;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	dev_info(chc.dev, "Low-supply event received from external-charger\n");
+	if (vendor_id == BASINCOVE_VENDORID || !chc.vbus_connect_status) {
+		dev_err(chc.dev, "Ignore Low-supply event received\n");
+		return 0;
+	}
+
+	msleep(200);
+	ret = pmic_read_reg(SCHGRIRQ1_ADDR, &val);
+	if (ret) {
+		dev_err(chc.dev,
+			"Error reading SCHGRIRQ1-register 0x%2x\n",
+			SCHGRIRQ1_ADDR);
+		return ret;
+	}
+
+	if (!(val & SCHRGRIRQ1_SVBUSDET_MASK)) {
+		int mask = 0;
+
+		dev_info(chc.dev, "USB VBUS Removed. Notifying OTG driver\n");
+		mutex_lock(&chc.evt_queue_lock);
+		chc.vbus_connect_status = false;
+		mutex_unlock(&chc.evt_queue_lock);
+
+		if (chc.is_internal_usb_phy && !chc.otg_mode_enabled)
+			handle_internal_usbphy_notifications(mask);
+		else {
+			atomic_notifier_call_chain(&chc.otg->notifier,
+					USB_EVENT_VBUS, &mask);
+			mutex_lock(&chc.evt_queue_lock);
+			chc.otg_mode_enabled = false;
+			mutex_unlock(&chc.evt_queue_lock);
+		}
+	}
+
+	return ret;
+}
+
+static void handle_level0_interrupt(u8 int_reg, u8 stat_reg,
+				struct interrupt_info int_info[],
+				int int_info_size)
+{
+	int i;
+	bool int_stat;
+	char *log_msg;
+
+	for (i = 0; i < int_info_size; ++i) {
+
+		/*continue if interrupt register bit is not set */
+		if (!(int_reg & int_info[i].int_reg_mask))
+			continue;
+
+		/*log message if interrupt bit is set */
+		if (int_info[i].log_msg_int_reg_true)
+			dev_err(chc.dev, "%s",
+					int_info[i].log_msg_int_reg_true);
+
+		/* interrupt bit is set.call int handler. */
+		if (int_info[i].int_handle)
+			int_info[i].int_handle();
+
+		/* continue if stat_reg_mask is zero which
+		 *  means ignore status register
+		 */
+		if (!(int_info[i].stat_reg_mask))
+			continue;
+
+		dev_dbg(chc.dev,
+				"stat_reg=%X int_info[i].stat_reg_mask=%X",
+				stat_reg, int_info[i].stat_reg_mask);
+
+		/* check if the interrupt status is true */
+		int_stat = (stat_reg & int_info[i].stat_reg_mask);
+
+		/* log message */
+		log_msg = int_stat ? int_info[i].log_msg_stat_true :
+			int_info[i].log_msg_stat_false;
+
+		if (log_msg)
+			dev_err(chc.dev, "%s", log_msg);
+
+		/* call status handler function */
+		if (int_info[i].stat_handle)
+			int_info[i].stat_handle(int_stat);
+
+	}
+
+	return ;
+}
+
+
+static void pmic_ccsm_check_host_connect(u8 int_reg, u8 stat_reg)
+{
+	int __maybe_unused mask;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	int __maybe_unused ret;
+	u8 __maybe_unused val;
+
+	if (vendor_id == SHADYCOVE_VENDORID) {
+		if (int_reg & CHRGRIRQ1_SUSBIDFLTDET_MASK)
+			dev_info(chc.dev,
+				"USBID-FLT interrupt received\n");
+
+		ret = pmic_read_reg(CHGRCTRL1_ADDR, &val);
+		mask = ((stat_reg & SCHRGRIRQ1_SUSBIDGNDDET_MASK)
+				== SHRT_GND_DET) ? 1 : 0;
+
+		if (int_reg & CHRGRIRQ1_SUSBIDGNDDET_MASK) {
+			if (mask) {
+				dev_info(chc.dev,
+				"USBID-GND Detected. Notifying OTG\n");
+				val |= (1<<6);
+				ret = intel_scu_ipc_iowrite8(
+				CHGRCTRL1_ADDR,
+				val);
+			} else {
+				dev_info(chc.dev,
+				"USBID-GND Removed. Notifying OTG\n");
+				val &= ~(1<<6);
+				ret = intel_scu_ipc_iowrite8(
+				CHGRCTRL1_ADDR,
+				val);
+				chc.otg_mode_enabled = false;
+			}
+
+			atomic_notifier_call_chain(&chc.otg->notifier,
+					USB_EVENT_ID, &mask);
+		}
+	}
+}
+
+static void handle_level1_interrupt(u8 int_reg, u8 stat_reg)
+{
+	int mask;
+	u8 val;
+	u16 val1;
+	int ret;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	if (!int_reg)
+		return;
+
+	pmic_ccsm_check_host_connect(int_reg, stat_reg);
+	mask = !!(int_reg & stat_reg);
+	if ((vendor_id == BASINCOVE_VENDORID) &&
+			(int_reg & CHRGRIRQ1_SUSBIDDET_MASK)) {
+		if (mask)
+			dev_info(chc.dev,
+				"USB ID Detected. Notifying OTG driver\n");
+		else
+			dev_info(chc.dev,
+				"USB ID Removed. Notifying OTG driver\n");
+
+		atomic_notifier_call_chain(&chc.otg->notifier,
+				USB_EVENT_ID, &mask);
+	}
+
+	if (int_reg & CHRGRIRQ1_SVBUSDET_MASK) {
+		if (mask) {
+			dev_info(chc.dev,
+				"USB VBUS Detected. Notifying OTG driver\n");
+			chc.vbus_connect_status = true;
+
+			msleep(100);
+			val1 = ioread16(chc.pmic_intr_iomap);
+			int_reg = (u8)(val1 >> 8);
+			pmic_read_reg(SCHGRIRQ1_ADDR, &stat_reg);
+			pmic_ccsm_check_host_connect(int_reg, stat_reg);
+			ret = pmic_read_reg(CHGRCTRL1_ADDR, &val);
+			if (ret != 0) {
+				dev_err(chc.dev,
+				"Error reading CHGRCTRL1-register 0x%2x\n",
+				CHGRCTRL1_ADDR);
+				return;
+			}
+
+			if (val & CHGRCTRL1_OTGMODE_MASK)
+				chc.otg_mode_enabled = true;
+		} else {
+			dev_info(chc.dev, "USB VBUS Removed. Notifying OTG driver\n");
+			chc.vbus_connect_status = false;
+		}
+
+		/* Avoid charger-detection flow in case of host-mode */
+		if (chc.is_internal_usb_phy && !chc.otg_mode_enabled)
+			handle_internal_usbphy_notifications(mask);
+		else {
+			atomic_notifier_call_chain(&chc.otg->notifier,
+					USB_EVENT_VBUS, &mask);
+			if (!mask)
+				chc.otg_mode_enabled = false;
+		}
+	}
+
+	return;
+}
+
+static irqreturn_t soc_acok_isr(int irq, void *dev_id)
+{
+	pr_info("%s, SOC_ACOK = %d\n", __func__,
+		gpio_get_value(SOC_ACOK) ? 1 : 0);
+
+	if ((gpio_get_value(SOC_ACOK) ? 1 : 0))
+		queue_delayed_work(soc_acok_wq, &chc.acok_irq_work, 0.5 * HZ);
+
+	return IRQ_HANDLED;
+}
+
+static void acok_irq_work_function(struct work_struct *work)
+{
+	pmic_handle_low_supply();
+}
+
+static int soc_acok_init(struct platform_device *pdev)
+{
+	int err = 0;
+	unsigned gpio = SOC_ACOK;
+	unsigned irq_num = gpio_to_irq(gpio);
+
+	soc_acok_wq = create_singlethread_workqueue("soc_acok_wq");
+	INIT_DELAYED_WORK(&chc.acok_irq_work, acok_irq_work_function);
+
+	err = gpio_request(gpio, "soc_acok");
+	if (err)
+		pr_info("Fail to request GPIO %d.\n", gpio);
+
+	err = gpio_direction_input(gpio);
+	if (err)
+		pr_info("Fail to configure GPIO %d as input.\n", gpio);
+
+	err = request_irq(irq_num, soc_acok_isr,
+		IRQF_TRIGGER_RISING | IRQF_SHARED, "soc_acok", pdev);
+	if (err < 0)
+		pr_info("Fail to request IRQ %d.\n", irq_num);
+
+	return 0;
+}
+
+static void pmic_event_worker(struct work_struct *work)
+{
+	struct pmic_event *evt, *tmp;
+
+	dev_dbg(chc.dev, "%s\n", __func__);
+
+	mutex_lock(&chc.evt_queue_lock);
+	list_for_each_entry_safe(evt, tmp, &chc.evt_queue, node) {
+		list_del(&evt->node);
+
+		dev_info(chc.dev, "CHGRIRQ0=%X SCHGRIRQ0=%X CHGRIRQ1=%x SCHGRIRQ1=%X\n",
+				evt->chgrirq0_int, evt->chgrirq0_stat,
+				evt->chgrirq1_int, evt->chgrirq1_stat);
+		if (evt->chgrirq0_int)
+			handle_level0_interrupt(evt->chgrirq0_int,
+				evt->chgrirq0_stat, chgrirq0_info,
+				ARRAY_SIZE(chgrirq0_info));
+
+		if (evt->chgrirq1_stat)
+			handle_level1_interrupt(evt->chgrirq1_int,
+							evt->chgrirq1_stat);
+		kfree(evt);
+	}
+
+	mutex_unlock(&chc.evt_queue_lock);
+}
+
+static irqreturn_t pmic_isr(int irq, void *data)
+{
+	u16 pmic_intr;
+	u8 chgrirq0_int;
+	u8 chgrirq1_int;
+	u8 mask = 0;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	if (vendor_id == BASINCOVE_VENDORID)
+		mask = ((CHRGRIRQ1_SVBUSDET_MASK) |
+				(CHRGRIRQ1_SUSBIDDET_MASK));
+	else if (vendor_id == SHADYCOVE_VENDORID)
+		mask = ((CHRGRIRQ1_SVBUSDET_MASK) |
+				(CHRGRIRQ1_SUSBIDFLTDET_MASK) |
+				(CHRGRIRQ1_SUSBIDGNDDET_MASK));
+
+	pmic_intr = ioread16(chc.pmic_intr_iomap);
+	chgrirq0_int = (u8)pmic_intr;
+	chgrirq1_int = (u8)(pmic_intr >> 8);
+
+	if (!chgrirq1_int && !(chgrirq0_int & PMIC_CHRGR_INT0_MASK))
+		return IRQ_NONE;
+
+	if ((chgrirq1_int & mask) && (!wake_lock_active(&chc.wakelock))) {
+		/*
+		Setting the Usb wake lock hold timeout to a safe value of 5s.
+		*/
+		wake_lock_timeout(&chc.wakelock, USB_WAKE_LOCK_TIMEOUT);
+	}
+
+	dev_dbg(chc.dev, "%s", __func__);
+
+	return IRQ_WAKE_THREAD;
+}
+static irqreturn_t pmic_thread_handler(int id, void *data)
+{
+	u16 pmic_intr;
+	struct pmic_event *evt;
+	int ret;
+
+	evt = kzalloc(sizeof(*evt), GFP_ATOMIC);
+	if (evt == NULL) {
+		dev_dbg(chc.dev, "Error allocating evt structure in fn:%s\n",
+			__func__);
+		return IRQ_NONE;
+	}
+
+	pmic_intr = ioread16(chc.pmic_intr_iomap);
+	evt->chgrirq0_int = (u8)pmic_intr;
+	evt->chgrirq1_int = (u8)(pmic_intr >> 8);
+	dev_dbg(chc.dev, "irq0=%x irq1=%x\n",
+		evt->chgrirq0_int, evt->chgrirq1_int);
+
+	/*
+	In case this is an external charger interrupt, we are
+	clearing the level 1 irq register and let external charger
+	driver handle the interrupt.
+	 */
+
+	if (!(evt->chgrirq1_int) &&
+		!(evt->chgrirq0_int & PMIC_CHRGR_CCSM_INT0_MASK)) {
+		intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+				IRQLVL1_CHRGR_MASK);
+		if ((chc.invalid_batt) &&
+			(evt->chgrirq0_int & PMIC_CHRGR_EXT_CHRGR_INT_MASK)) {
+			dev_dbg(chc.dev, "Handling external charger interrupt!!\n");
+			kfree(evt);
+			return IRQ_HANDLED;
+		}
+		kfree(evt);
+		dev_dbg(chc.dev, "Unhandled interrupt!!\n");
+		return IRQ_NONE;
+	}
+
+	if (evt->chgrirq0_int & PMIC_CHRGR_CCSM_INT0_MASK) {
+		ret = intel_scu_ipc_ioread8(SCHGRIRQ0_ADDR,
+				&evt->chgrirq0_stat);
+		if (ret) {
+			dev_err(chc.dev,
+				"%s: Error(%d) in intel_scu_ipc_ioread8. Faile to read SCHGRIRQ0_ADDR\n",
+					__func__, ret);
+			kfree(evt);
+			goto end;
+		}
+	}
+	if (evt->chgrirq1_int) {
+		ret = intel_scu_ipc_ioread8(SCHGRIRQ1_ADDR,
+				&evt->chgrirq1_stat);
+		if (ret) {
+			dev_err(chc.dev,
+				"%s: Error(%d) in intel_scu_ipc_ioread8. Faile to read SCHGRIRQ1_ADDR\n",
+					__func__, ret);
+			kfree(evt);
+			goto end;
+		}
+	}
+
+	INIT_LIST_HEAD(&evt->node);
+
+	mutex_lock(&chc.evt_queue_lock);
+	list_add_tail(&evt->node, &chc.evt_queue);
+	mutex_unlock(&chc.evt_queue_lock);
+
+	queue_work(system_nrt_wq, &chc.evt_work);
+
+end:
+	/*clear first level IRQ */
+	dev_dbg(chc.dev, "Clearing IRQLVL1_MASK_ADDR\n");
+	intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+
+	return IRQ_HANDLED;
+}
+static int pmic_init(void)
+{
+	int ret = 0, i, temp_mon_ranges;
+	u16 adc_val;
+	u8 reg_val;
+	struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		ret =
+		CONVERT_TEMP_TO_ADC(bcprof->temp_mon_range[i].temp_up_lim,
+				(int *)&adc_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error converting temperature for zone %d!!\n",
+				i);
+			return ret;
+		}
+
+		ret = update_zone_temp(i, adc_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error updating zone temp for zone %d\n",
+				i);
+			return ret;
+		}
+
+		if (chc.pdata->cc_to_reg)
+			chc.pdata->cc_to_reg(bcprof->temp_mon_range[i].
+					full_chrg_cur, &reg_val);
+
+		ret = update_zone_cc(i, reg_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error updating zone cc for zone %d\n",
+				i);
+			return ret;
+		}
+
+		if (chc.pdata->cv_to_reg)
+			chc.pdata->cv_to_reg(bcprof->temp_mon_range[i].
+					full_chrg_vol, &reg_val);
+
+		ret = update_zone_cv(i, reg_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error updating zone cv for zone %d\n",
+				i);
+			return ret;
+		}
+
+		/* Write lowest temp limit */
+		if (i == (bcprof->temp_mon_ranges - 1)) {
+			ret = CONVERT_TEMP_TO_ADC(bcprof->temp_low_lim,
+							(int *)&adc_val);
+			if (unlikely(ret)) {
+				dev_err(chc.dev,
+					"Error converting low lim temp!!\n");
+				return ret;
+			}
+
+			ret = update_zone_temp(i+1, adc_val);
+
+			if (unlikely(ret)) {
+				dev_err(chc.dev,
+					"Error updating last temp for zone %d\n",
+					i+1);
+				return ret;
+			}
+		}
+	}
+	ret = pmic_update_tt(TT_CUSTOMFIELDEN_ADDR,
+				TT_HOT_COLD_LC_MASK,
+				TT_HOT_COLD_LC_DIS);
+
+	if (unlikely(ret)) {
+		dev_err(chc.dev, "Error updating TT_CUSTOMFIELD_EN reg\n");
+		return ret;
+	}
+
+	if (chc.pdata->inlmt_to_reg)
+		chc.pdata->inlmt_to_reg(USBINPUTICC100VAL, &reg_val);
+
+	ret = pmic_write_tt(TT_USBINPUTICC100VAL_ADDR, reg_val);
+	return ret;
+}
+
+static inline void print_ps_pse_mod_prof(struct ps_pse_mod_prof *bcprof)
+{
+	int i, temp_mon_ranges;
+
+	dev_info(chc.dev, "ChrgProf: batt_id:%s\n", bcprof->batt_id);
+	dev_info(chc.dev, "ChrgProf: battery_type:%u\n", bcprof->battery_type);
+	dev_info(chc.dev, "ChrgProf: capacity:%u\n", bcprof->capacity);
+	dev_info(chc.dev, "ChrgProf: voltage_max:%u\n", bcprof->voltage_max);
+	dev_info(chc.dev, "ChrgProf: chrg_term_ma:%u\n", bcprof->chrg_term_ma);
+	dev_info(chc.dev, "ChrgProf: low_batt_mV:%u\n", bcprof->low_batt_mV);
+	dev_info(chc.dev, "ChrgProf: disch_tmp_ul:%d\n", bcprof->disch_tmp_ul);
+	dev_info(chc.dev, "ChrgProf: disch_tmp_ll:%d\n", bcprof->disch_tmp_ll);
+	dev_info(chc.dev, "ChrgProf: temp_mon_ranges:%u\n",
+			bcprof->temp_mon_ranges);
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		dev_info(chc.dev, "ChrgProf: temp_up_lim[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].temp_up_lim);
+		dev_info(chc.dev, "ChrgProf: full_chrg_vol[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].full_chrg_vol);
+		dev_info(chc.dev, "ChrgProf: full_chrg_cur[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].full_chrg_cur);
+		dev_info(chc.dev, "ChrgProf: maint_chrgr_vol_ll[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].maint_chrg_vol_ll);
+		dev_info(chc.dev, "ChrgProf: maint_chrgr_vol_ul[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].maint_chrg_vol_ul);
+		dev_info(chc.dev, "ChrgProf: maint_chrg_cur[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].maint_chrg_cur);
+	}
+	dev_info(chc.dev, "ChrgProf: temp_low_lim:%d\n", bcprof->temp_low_lim);
+}
+
+static int find_tempzone_index(short int *interval,
+				int *num_zones,
+				short int *temp_up_lim)
+{
+	struct ps_pse_mod_prof *bprof = chc.sfi_bcprof->batt_prof;
+	int up_lim_index = 0, low_lim_index = -1;
+	int diff = 0;
+	int i;
+
+	*num_zones = MIN_BATT_PROF - bprof->temp_mon_ranges + 1;
+	if ((*num_zones) <= 0)
+		return 0;
+
+	for (i = 0 ; i < bprof->temp_mon_ranges ; i++) {
+		if (bprof->temp_mon_range[i].temp_up_lim == BATT_TEMP_WARM)
+			up_lim_index = i;
+	}
+
+	low_lim_index = up_lim_index + 1;
+
+	if (low_lim_index == bprof->temp_mon_ranges)
+		diff = bprof->temp_low_lim -
+			bprof->temp_mon_range[up_lim_index].temp_up_lim;
+	else
+		diff = bprof->temp_mon_range[low_lim_index].temp_up_lim -
+			bprof->temp_mon_range[up_lim_index].temp_up_lim;
+
+	*interval = diff / (*num_zones);
+	*temp_up_lim = bprof->temp_mon_range[up_lim_index].temp_up_lim;
+
+	return up_lim_index;
+}
+
+
+static void set_pmic_batt_prof(struct ps_pse_mod_prof *new_prof,
+				struct ps_pse_mod_prof *bprof)
+{
+	int num_zones;
+	int split_index;
+	int i, j = 0;
+	short int temp_up_lim = 0;
+	short int interval = 0;
+
+	if ((new_prof == NULL) || (bprof == NULL))
+		return;
+
+	if (!NEED_ZONE_SPLIT(bprof)) {
+		dev_info(chc.dev, "No need to split the zones!!\n");
+		memcpy(new_prof, bprof, sizeof(struct ps_pse_mod_prof));
+		return;
+	}
+
+	strcpy(&(new_prof->batt_id[0]), &(bprof->batt_id[0]));
+	new_prof->battery_type = bprof->battery_type;
+	new_prof->capacity = bprof->capacity;
+	new_prof->voltage_max =  bprof->voltage_max;
+	new_prof->chrg_term_ma = bprof->chrg_term_ma;
+	new_prof->low_batt_mV =  bprof->low_batt_mV;
+	new_prof->disch_tmp_ul = bprof->disch_tmp_ul;
+	new_prof->disch_tmp_ll = bprof->disch_tmp_ll;
+
+	split_index = find_tempzone_index(&interval, &num_zones, &temp_up_lim);
+
+	for (i = 0 ; i < bprof->temp_mon_ranges; i++) {
+		if ((i == split_index) && (num_zones > 0)) {
+			for (j = 0; j < num_zones; j++,
+					temp_up_lim += interval) {
+				memcpy(&new_prof->temp_mon_range[i+j],
+					&bprof->temp_mon_range[i],
+					sizeof(bprof->temp_mon_range[i]));
+				new_prof->temp_mon_range[i+j].temp_up_lim =
+					temp_up_lim;
+			}
+			j--;
+		} else {
+			memcpy(&new_prof->temp_mon_range[i+j],
+				&bprof->temp_mon_range[i],
+				sizeof(bprof->temp_mon_range[i]));
+		}
+	}
+
+	new_prof->temp_mon_ranges = i+j;
+	new_prof->temp_low_lim = bprof->temp_low_lim;
+
+	return;
+}
+
+
+static int pmic_check_initial_events(void)
+{
+	struct pmic_event *evt;
+	int ret;
+	u8 mask = (CHRGRIRQ1_SVBUSDET_MASK);
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	evt = kzalloc(sizeof(struct pmic_event), GFP_KERNEL);
+	if (evt == NULL) {
+		dev_dbg(chc.dev, "Error allocating evt structure in fn:%s\n",
+			__func__);
+		return -1;
+	}
+
+	ret = intel_scu_ipc_ioread8(SCHGRIRQ0_ADDR, &evt->chgrirq0_stat);
+	evt->chgrirq0_int = evt->chgrirq0_stat;
+	ret = intel_scu_ipc_ioread8(SCHGRIRQ1_ADDR, &evt->chgrirq1_stat);
+	evt->chgrirq1_int = evt->chgrirq1_stat;
+
+	/* For ShadyCove, CHGRIRQ1_REG & SCHGRIRQ1_REG cannot be directly
+	 * mapped. If status has (01 = Short to ground detected), it means
+	 * USBIDGNDDET should be handled. If status has (10 = Floating pin
+	 * detected), it means USBIDFLTDET should be handled.
+	 */
+	if (vendor_id == SHADYCOVE_VENDORID) {
+		if ((evt->chgrirq1_stat & SCHRGRIRQ1_SUSBIDGNDDET_MASK)
+				== SHRT_FLT_DET) {
+			evt->chgrirq1_int |= CHRGRIRQ1_SUSBIDFLTDET_MASK;
+			evt->chgrirq1_int &= ~CHRGRIRQ1_SUSBIDGNDDET_MASK;
+		} else if ((evt->chgrirq1_stat & SCHRGRIRQ1_SUSBIDGNDDET_MASK)
+				== SHRT_GND_DET)
+			evt->chgrirq1_int |= CHRGRIRQ1_SUSBIDGNDDET_MASK;
+	}
+
+	if (evt->chgrirq1_stat || evt->chgrirq0_int) {
+		INIT_LIST_HEAD(&evt->node);
+		mutex_lock(&chc.evt_queue_lock);
+		list_add_tail(&evt->node, &chc.evt_queue);
+		mutex_unlock(&chc.evt_queue_lock);
+		schedule_work(&chc.evt_work);
+	}
+
+	if ((evt->chgrirq1_stat & mask) && !wake_lock_active(&chc.wakelock)) {
+		/*
+		Setting the Usb wake lock hold timeout to a safe value of 5s.
+		*/
+		wake_lock_timeout(&chc.wakelock, USB_WAKE_LOCK_TIMEOUT);
+	}
+
+	pmic_bat_zone_changed();
+
+	return ret;
+}
+
+/**
+ * pmic_charger_probe - PMIC charger probe function
+ * @pdev: pmic platform device structure
+ * Context: can sleep
+ *
+ * pmic charger driver initializes its internal data
+ * structure and other  infrastructure components for it
+ * to work as expected.
+ */
+static int pmic_chrgr_probe(struct platform_device *pdev)
+{
+	int retval = 0;
+	u8 val;
+
+	if (!pdev)
+		return -ENODEV;
+
+	chc.health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	chc.dev = &pdev->dev;
+	chc.irq = platform_get_irq(pdev, 0);
+	chc.pdata = pdev->dev.platform_data;
+	platform_set_drvdata(pdev, &chc);
+
+	if (chc.pdata == NULL) {
+		dev_err(chc.dev, "Platform data not initialized\n");
+		return -EFAULT;
+	}
+
+	retval = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &chc.pmic_id);
+	if (retval) {
+		dev_err(chc.dev,
+			"Error reading PMIC ID register\n");
+		return retval;
+	}
+
+	dev_info(chc.dev, "PMIC-ID: %x\n", chc.pmic_id);
+	if ((chc.pmic_id & PMIC_VENDOR_ID_MASK) == SHADYCOVE_VENDORID) {
+		retval = pmic_read_reg(USBPATH_ADDR, &val);
+		if (retval) {
+			dev_err(chc.dev,
+				"Error reading CHGRSTATUS-register 0x%2x\n",
+				CHGRSTATUS_ADDR);
+			return retval;
+		}
+
+		if (val & USBPATH_USBSEL_MASK) {
+			dev_info(chc.dev, "SOC-Internal-USBPHY used\n");
+			chc.is_internal_usb_phy = true;
+		} else
+			dev_info(chc.dev, "External-USBPHY used\n");
+	}
+
+	chc.sfi_bcprof = kzalloc(sizeof(struct ps_batt_chg_prof),
+				GFP_KERNEL);
+	if (chc.sfi_bcprof == NULL) {
+		dev_err(chc.dev,
+			"Error allocating memeory SFI battery profile\n");
+		return -ENOMEM;
+	}
+
+	retval = get_batt_prop(chc.sfi_bcprof);
+	if (retval) {
+		dev_err(chc.dev,
+			"Error reading battery profile from battid frmwrk\n");
+		kfree(chc.sfi_bcprof);
+		chc.invalid_batt = true;
+		chc.sfi_bcprof = NULL;
+	}
+
+	retval = intel_scu_ipc_update_register(CHGRCTRL0_ADDR, SWCONTROL_ENABLE,
+			CHGRCTRL0_SWCONTROL_MASK);
+	if (retval)
+		dev_err(chc.dev, "Error enabling sw control. Charging may continue in h/w control mode\n");
+
+	if (!chc.invalid_batt) {
+		chc.actual_bcprof = kzalloc(sizeof(struct ps_pse_mod_prof),
+					GFP_KERNEL);
+		if (chc.actual_bcprof == NULL) {
+			dev_err(chc.dev,
+				"Error allocating mem for local battery profile\n");
+			kfree(chc.sfi_bcprof);
+			return -ENOMEM;
+		}
+
+		chc.runtime_bcprof = kzalloc(sizeof(struct ps_pse_mod_prof),
+					GFP_KERNEL);
+		if (chc.runtime_bcprof == NULL) {
+			dev_err(chc.dev,
+			"Error allocating mem for runtime batt profile\n");
+			kfree(chc.sfi_bcprof);
+			kfree(chc.actual_bcprof);
+			return -ENOMEM;
+		}
+
+		set_pmic_batt_prof(chc.actual_bcprof,
+				chc.sfi_bcprof->batt_prof);
+		print_ps_pse_mod_prof(chc.actual_bcprof);
+		retval = pmic_init();
+		if (retval)
+			dev_err(chc.dev, "Error in Initializing PMIC. Continue in h/w charging mode\n");
+
+		memcpy(chc.runtime_bcprof, chc.actual_bcprof,
+			sizeof(struct ps_pse_mod_prof));
+	}
+
+	chc.pmic_intr_iomap = ioremap_nocache(PMIC_SRAM_INTR_ADDR, 8);
+	if (!chc.pmic_intr_iomap) {
+		dev_err(&pdev->dev, "ioremap Failed\n");
+		retval = -ENOMEM;
+		goto ioremap_failed;
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	chc.otg = usb_get_transceiver();
+#else
+	chc.otg = usb_get_phy(USB_PHY_TYPE_USB2);
+#endif
+	if (!chc.otg || IS_ERR(chc.otg)) {
+		dev_err(&pdev->dev, "Failed to get otg transceiver!!\n");
+		retval = -ENOMEM;
+		goto otg_req_failed;
+	}
+
+	INIT_WORK(&chc.evt_work, pmic_event_worker);
+	INIT_LIST_HEAD(&chc.evt_queue);
+	mutex_init(&chc.evt_queue_lock);
+	wake_lock_init(&chc.wakelock, WAKE_LOCK_SUSPEND, "pmic_wakelock");
+	wake_lock_init(&chc.otg_wa_wakelock, WAKE_LOCK_SUSPEND,
+			"pmic_otg_wa_wakelock");
+
+	/* register interrupt */
+	retval = request_threaded_irq(chc.irq, pmic_isr,
+			pmic_thread_handler,
+			IRQF_SHARED|IRQF_NO_SUSPEND ,
+			DRIVER_NAME, &chc);
+	if (retval) {
+		dev_err(&pdev->dev,
+			"Error in request_threaded_irq(irq(%d)!!\n",
+			chc.irq);
+		goto otg_req_failed;
+	}
+
+	retval = gpio_request(57, "SOC_5V_EN");
+	if (retval)
+		pr_info("Failed to request GPIO 57.\n");
+	retval = gpio_direction_output(57, 0);
+	if (retval)
+		pr_info("Failed to configure GPIO 57 as output low.\n");
+
+	retval = pmic_check_initial_events();
+	if (unlikely(retval)) {
+		dev_err(&pdev->dev,
+			"Error posting initial events\n");
+		goto req_irq_failed;
+	}
+
+	/* unmask charger interrupts in second level IRQ register*/
+	retval = intel_scu_ipc_update_register(MCHGRIRQ0_ADDR, 0x00,
+			PMIC_CHRGR_INT0_MASK);
+	/* unmask charger interrupts in second level IRQ register*/
+	retval = intel_scu_ipc_iowrite8(MCHGRIRQ1_ADDR, 0x00);
+	if (unlikely(retval))
+		goto unmask_irq_failed;
+
+
+	/* unmask IRQLVL1 register */
+	retval = intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	if (unlikely(retval))
+		goto unmask_irq_failed;
+
+	retval = intel_scu_ipc_update_register(USBIDCTRL_ADDR,
+			 ACADETEN_MASK | USBIDEN_MASK,
+			ACADETEN_MASK | USBIDEN_MASK);
+	if (unlikely(retval))
+		goto unmask_irq_failed;
+
+	chc.health = POWER_SUPPLY_HEALTH_GOOD;
+#ifdef CONFIG_DEBUG_FS
+	pmic_debugfs_init();
+#endif
+
+	soc_acok_init(pdev);
+	return 0;
+
+unmask_irq_failed:
+req_irq_failed:
+	free_irq(chc.irq, &chc);
+otg_req_failed:
+	iounmap(chc.pmic_intr_iomap);
+ioremap_failed:
+	kfree(chc.sfi_bcprof);
+	kfree(chc.actual_bcprof);
+	kfree(chc.runtime_bcprof);
+	return retval;
+}
+
+static void pmic_chrgr_do_exit_ops(struct pmic_chrgr_drv_context *chc)
+{
+	/*TODO:
+	 * If charger is connected send IPC message to SCU to continue charging
+	 */
+#ifdef CONFIG_DEBUG_FS
+	pmic_debugfs_exit();
+#endif
+}
+
+/**
+ * pmic_charger_remove - PMIC Charger driver remove
+ * @pdev: PMIC charger platform device structure
+ * Context: can sleep
+ *
+ * PMIC charger finalizes its internal data structure and other
+ * infrastructure components that it initialized in
+ * pmic_chrgr_probe.
+ */
+static int pmic_chrgr_remove(struct platform_device *pdev)
+{
+	struct pmic_chrgr_drv_context *chc = platform_get_drvdata(pdev);
+
+	if (chc) {
+		pmic_chrgr_do_exit_ops(chc);
+		wake_lock_destroy(&chc->wakelock);
+		wake_lock_destroy(&chc->otg_wa_wakelock);
+		free_irq(chc->irq, chc);
+		iounmap(chc->pmic_intr_iomap);
+		kfree(chc->sfi_bcprof);
+		kfree(chc->actual_bcprof);
+		kfree(chc->runtime_bcprof);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_chrgr_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int pmic_chrgr_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pmic_chrgr_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int pmic_chrgr_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int pmic_chrgr_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+#endif
+
+/*********************************************************************
+ *		Driver initialisation and finalization
+ *********************************************************************/
+
+static const struct dev_pm_ops pmic_chrgr_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pmic_chrgr_suspend,
+				pmic_chrgr_resume)
+	SET_RUNTIME_PM_OPS(pmic_chrgr_runtime_suspend,
+				pmic_chrgr_runtime_resume,
+				pmic_chrgr_runtime_idle)
+};
+
+static struct platform_driver pmic_chrgr_driver = {
+	.driver = {
+		   .name = DRIVER_NAME,
+		   .owner = THIS_MODULE,
+		   .pm = &pmic_chrgr_pm_ops,
+		   },
+	.probe = pmic_chrgr_probe,
+	.remove = pmic_chrgr_remove,
+};
+
+static int pmic_chrgr_init(void)
+{
+	return platform_driver_register(&pmic_chrgr_driver);
+}
+
+static void pmic_chrgr_exit(void)
+{
+	platform_driver_unregister(&pmic_chrgr_driver);
+}
+
+static int pmic_ccsm_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic_ccsm rpmsg device\n");
+
+	ret = pmic_chrgr_init();
+
+out:
+	return ret;
+}
+
+static void pmic_ccsm_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	pmic_chrgr_exit();
+	dev_info(&rpdev->dev, "Removed pmic_ccsm rpmsg device\n");
+}
+
+static void pmic_ccsm_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_ccsm_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_pmic_ccsm" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_ccsm_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_ccsm_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_ccsm_rpmsg_id_table,
+	.probe		= pmic_ccsm_rpmsg_probe,
+	.callback	= pmic_ccsm_rpmsg_cb,
+	.remove		= pmic_ccsm_rpmsg_remove,
+};
+
+static int __init pmic_ccsm_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_ccsm_rpmsg);
+}
+
+static void __exit pmic_ccsm_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_ccsm_rpmsg);
+}
+/* Defer init call so that dependant drivers will be loaded. Using  async
+ * for parallel driver initialization */
+late_initcall(pmic_ccsm_rpmsg_init);
+module_exit(pmic_ccsm_rpmsg_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("PMIC Charger  Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/drivers/power/pmic_ccsm.h b/drivers/external_drivers/drivers/power/pmic_ccsm.h
new file mode 100644
index 0000000..3237f98
--- /dev/null
+++ b/drivers/external_drivers/drivers/power/pmic_ccsm.h
@@ -0,0 +1,383 @@
+/*
+ * pmic_ccsm.h - Intel MID PMIC CCSM Driver header file
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ */
+
+#ifndef __PMIC_CCSM_H__
+#define __PMIC_CCSM_H__
+
+#include <asm/pmic_pdata.h>
+/*********************************************************************
+ *		Generic defines
+ *********************************************************************/
+
+#define D7 (1 << 7)
+#define D6 (1 << 6)
+#define D5 (1 << 5)
+#define D4 (1 << 4)
+#define D3 (1 << 3)
+#define D2 (1 << 2)
+#define D1 (1 << 1)
+#define D0 (1 << 0)
+
+#define PMIC_ID_ADDR	0x00
+
+#define PMIC_VENDOR_ID_MASK	(0x03 << 6)
+#define PMIC_MINOR_REV_MASK	0x07
+#define PMIC_MAJOR_REV_MASK	(0x07 << 3)
+
+#define BASINCOVE_VENDORID	(0x03 << 6)
+#define SHADYCOVE_VENDORID	0x00
+
+#define BC_PMIC_MAJOR_REV_A0	0x00
+#define BC_PMIC_MAJOR_REV_B0	(0x01 << 3)
+
+#define PMIC_BZONE_LOW 0
+#define PMIC_BZONE_HIGH 5
+#define PMIC_BZONE_UNKNOWN 7
+
+#define IRQLVL1_ADDR			0x01
+#define IRQLVL1_MASK_ADDR		0x0c
+#define IRQLVL1_CHRGR_MASK		D5
+
+#define THRMZN0H_ADDR_BC		0xCE
+#define THRMZN0L_ADDR_BC		0xCF
+#define THRMZN1H_ADDR_BC		0xD0
+#define THRMZN1L_ADDR_BC		0xD1
+#define THRMZN2H_ADDR_BC		0xD2
+#define THRMZN2L_ADDR_BC		0xD3
+#define THRMZN3H_ADDR_BC		0xD4
+#define THRMZN3L_ADDR_BC		0xD5
+#define THRMZN4H_ADDR_BC		0xD6
+#define THRMZN4L_ADDR_BC		0xD7
+
+#define THRMZN0H_ADDR_SC		0xD7
+#define THRMZN0L_ADDR_SC		0xD8
+#define THRMZN1H_ADDR_SC		0xD9
+#define THRMZN1L_ADDR_SC		0xDA
+#define THRMZN2H_ADDR_SC		0xDD
+#define THRMZN2L_ADDR_SC		0xDE
+#define THRMZN3H_ADDR_SC		0xDF
+#define THRMZN3L_ADDR_SC		0xE0
+#define THRMZN4H_ADDR_SC		0xE1
+#define THRMZN4L_ADDR_SC		0xE2
+
+#define THRMZN0_SC_ADCVAL		0x25A1
+#define THRMZN1_SC_ADCVAL		0x3512
+#define THRMZN2_SC_ADCVAL		0x312D
+#define THRMZN3_SC_ADCVAL		0x20FE
+#define THRMZN4_SC_ADCVAL		0x10B8
+
+#define CHGRIRQ0_ADDR			0x07
+#define CHGIRQ0_BZIRQ_MASK		D7
+#define CHGIRQ0_BAT_CRIT_MASK		D6
+#define CHGIRQ0_BAT1_ALRT_MASK		D5
+#define CHGIRQ0_BAT0_ALRT_MASK		D4
+
+#define MCHGRIRQ0_ADDR			0x12
+#define MCHGIRQ0_RSVD_MASK		D7
+#define MCHGIRQ0_MBAT_CRIT_MASK		D6
+#define MCHGIRQ0_MBAT1_ALRT_MASK	D5
+#define MCHGIRQ0_MBAT0_ALRT_MASK	D4
+
+#define SCHGRIRQ0_ADDR			0x4E
+#define SCHGIRQ0_RSVD_MASK		D7
+#define SCHGIRQ0_SBAT_CRIT_MASK		D6
+#define SCHGIRQ0_SBAT1_ALRT_MASK	D5
+#define SCHGIRQ0_SBAT0_ALRT_MASK	D4
+
+#define LOWBATTDET0_ADDR		0x2C
+#define LOWBATTDET1_ADDR		0x2D
+#define BATTDETCTRL_ADDR		0x2E
+#define VBUSDETCTRL_ADDR		0x50
+#define VDCINDETCTRL_ADDR		0x51
+
+#define CHRGRIRQ1_ADDR			0x08
+#define CHRGRIRQ1_SUSBIDGNDDET_MASK	D4
+#define CHRGRIRQ1_SUSBIDFLTDET_MASK	D3
+#define CHRGRIRQ1_SUSBIDDET_MASK	D3
+#define CHRGRIRQ1_SBATTDET_MASK		D2
+#define CHRGRIRQ1_SDCDET_MASK		D1
+#define CHRGRIRQ1_SVBUSDET_MASK		D0
+#define MCHGRIRQ1_ADDR			0x13
+#define MCHRGRIRQ1_SUSBIDGNDDET_MASK	D4
+#define MCHRGRIRQ1_SUSBIDFLTDET_MASK	D3
+#define MCHRGRIRQ1_SUSBIDDET_MASK	D3
+#define MCHRGRIRQ1_SBATTDET_MAS		D2
+#define MCHRGRIRQ1_SDCDET_MASK		D1
+#define MCHRGRIRQ1_SVBUSDET_MASK	D0
+#define SCHGRIRQ1_ADDR			0x4F
+#define SCHRGRIRQ1_SUSBIDGNDDET_MASK	(D3|D4)
+#define SCHRGRIRQ1_SUSBIDDET_MASK	D3
+#define SCHRGRIRQ1_SBATTDET_MASK	D2
+#define SCHRGRIRQ1_SDCDET_MASK		D1
+#define SCHRGRIRQ1_SVBUSDET_MASK	D0
+#define SHRT_GND_DET			(0x01 << 3)
+#define SHRT_FLT_DET			(0x01 << 4)
+
+#define PMIC_CHRGR_INT0_MASK		0xB1
+#define PMIC_CHRGR_CCSM_INT0_MASK	0xB0
+#define PMIC_CHRGR_EXT_CHRGR_INT_MASK	0x01
+
+#define CHGRCTRL0_ADDR			0x4B
+#define CHGRCTRL0_WDT_NOKICK_MASK	D7
+#define CHGRCTRL0_DBPOFF_MASK		D6
+#define CHGRCTRL0_CCSM_OFF_MASK		D5
+#define CHGRCTRL0_TTLCK_MASK		D4
+#define CHGRCTRL0_SWCONTROL_MASK	D3
+#define CHGRCTRL0_EXTCHRDIS_MASK	D2
+#define	CHRCTRL0_EMRGCHREN_MASK		D1
+#define	CHRCTRL0_CHGRRESET_MASK		D0
+
+#define WDT_NOKICK_ENABLE		(0x01 << 7)
+#define WDT_NOKICK_DISABLE		(~WDT_NOKICK_ENABLE & 0xFF)
+
+#define EXTCHRDIS_ENABLE		(0x01 << 2)
+#define EXTCHRDIS_DISABLE		(~EXTCHRDIS_ENABLE & 0xFF)
+#define SWCONTROL_ENABLE		(0x01 << 3)
+#define EMRGCHREN_ENABLE		(0x01 << 1)
+
+#define CHGRCTRL1_ADDR			0x4C
+#define CHGRCTRL1_DBPEN_MASK		D7
+#define CHGRCTRL1_OTGMODE_MASK		D6
+#define CHGRCTRL1_FTEMP_EVENT_MASK	D5
+#define CHGRCTRL1_FUSB_INLMT_1500	D4
+#define CHGRCTRL1_FUSB_INLMT_900	D3
+#define CHGRCTRL1_FUSB_INLMT_500	D2
+#define CHGRCTRL1_FUSB_INLMT_150	D1
+#define CHGRCTRL1_FUSB_INLMT_100	D0
+
+#define CHGRSTATUS_ADDR			0x4D
+#define CHGRSTATUS_RSVD_MASK		(D7|D6|D5|D3)
+#define CHGRSTATUS_SDPB_MASK		D4
+#define CHGRSTATUS_CHGDISLVL_MASK	D2
+#define CHGRSTATUS_CHGDETB_LATCH_MASK	D1
+#define CHGDETB_MASK			D0
+
+#define THRMBATZONE_ADDR_BC		0xB5
+#define THRMBATZONE_ADDR_SC		0xB6
+#define THRMBATZONE_MASK		(D0|D1|D2)
+
+#define USBIDCTRL_ADDR		0x19
+#define USBIDEN_MASK		0x01
+#define ACADETEN_MASK		(0x01 << 1)
+
+#define USBIDSTAT_ADDR		0x1A
+#define ID_SHORT		D4
+#define ID_SHORT_VBUS		(1 << 4)
+#define ID_NOT_SHORT_VBUS	0
+#define ID_FLOAT_STS		D3
+#define R_ID_FLOAT_DETECT	(1 << 3)
+#define R_ID_FLOAT_NOT_DETECT	0
+#define ID_RAR_BRC_STS		((D2 | D1))
+#define ID_ACA_NOT_DETECTED	0
+#define R_ID_A			(1 << 1)
+#define R_ID_B			(2 << 1)
+#define R_ID_C			(3 << 1)
+#define ID_GND			D0
+#define ID_TYPE_A		0
+#define ID_TYPE_B		1
+#define is_aca(x) ((x & R_ID_A) || (x & R_ID_B) || (x & R_ID_C))
+
+#define WAKESRC_ADDR		0x24
+
+#define CHRTTADDR_ADDR		0x56
+#define CHRTTDATA_ADDR		0x57
+
+#define USBSRCDET_RETRY_CNT		5
+#define USBSRCDET_SLEEP_TIME		200
+#define USBSRCDETSTATUS_ADDR		0x5D
+#define USBSRCDET_SUSBHWDET_MASK	(D0|D1)
+#define USBSRCDET_USBSRCRSLT_MASK	(D2|D3|D4|D5)
+#define USBSRCDET_SDCD_MASK		(D6|D7)
+#define USBSRCDET_SUSBHWDET_DETON	(0x01 << 0)
+#define USBSRCDET_SUSBHWDET_DETSUCC	(0x01 << 1)
+#define USBSRCDET_SUSBHWDET_DETFAIL	(0x03 << 0)
+
+/* Register on I2C-dev2-0x6E */
+#define USBPATH_ADDR		0x011C
+#define USBPATH_USBSEL_MASK	D3
+
+#define TT_I2CDADDR_ADDR		0x00
+#define TT_CHGRINIT0OS_ADDR		0x01
+#define TT_CHGRINIT1OS_ADDR		0x02
+#define TT_CHGRINIT2OS_ADDR		0x03
+#define TT_CHGRINIT3OS_ADDR		0x04
+#define TT_CHGRINIT4OS_ADDR		0x05
+#define TT_CHGRINIT5OS_ADDR		0x06
+#define TT_CHGRINIT6OS_ADDR		0x07
+#define TT_CHGRINIT7OS_ADDR		0x08
+#define TT_USBINPUTICCOS_ADDR		0x09
+#define TT_USBINPUTICCMASK_ADDR		0x0A
+#define TT_CHRCVOS_ADDR			0X0B
+#define TT_CHRCVMASK_ADDR		0X0C
+#define TT_CHRCCOS_ADDR			0X0D
+#define TT_CHRCCMASK_ADDR		0X0E
+#define TT_LOWCHROS_ADDR		0X0F
+#define TT_LOWCHRMASK_ADDR		0X10
+#define TT_WDOGRSTOS_ADDR		0X11
+#define TT_WDOGRSTMASK_ADDR		0X12
+#define TT_CHGRENOS_ADDR		0X13
+#define TT_CHGRENMASK_ADDR		0X14
+
+#define TT_CUSTOMFIELDEN_ADDR		0X15
+#define TT_HOT_LC_EN			D1
+#define TT_COLD_LC_EN			D0
+#define TT_HOT_COLD_LC_MASK		(TT_HOT_LC_EN | TT_COLD_LC_EN)
+#define TT_HOT_COLD_LC_EN		(TT_HOT_LC_EN | TT_COLD_LC_EN)
+#define TT_HOT_COLD_LC_DIS		0
+
+#define TT_CHGRINIT0VAL_ADDR		0X20
+#define TT_CHGRINIT1VAL_ADDR		0X21
+#define TT_CHGRINIT2VAL_ADDR		0X22
+#define TT_CHGRINIT3VAL_ADDR		0X23
+#define TT_CHGRINIT4VAL_ADDR		0X24
+#define TT_CHGRINIT5VAL_ADDR		0X25
+#define TT_CHGRINIT6VAL_ADDR		0X26
+#define TT_CHGRINIT7VAL_ADDR		0X27
+#define TT_USBINPUTICC100VAL_ADDR	0X28
+#define TT_USBINPUTICC150VAL_ADDR	0X29
+#define TT_USBINPUTICC500VAL_ADDR	0X2A
+#define TT_USBINPUTICC900VAL_ADDR	0X2B
+#define TT_USBINPUTICC1500VAL_ADDR	0X2C
+#define TT_CHRCVEMRGLOWVAL_ADDR		0X2D
+#define TT_CHRCVCOLDVAL_ADDR		0X2E
+#define TT_CHRCVCOOLVAL_ADDR		0X2F
+#define TT_CHRCVWARMVAL_ADDR		0X30
+#define TT_CHRCVHOTVAL_ADDR		0X31
+#define TT_CHRCVEMRGHIVAL_ADDR		0X32
+#define TT_CHRCCEMRGLOWVAL_ADDR		0X33
+#define TT_CHRCCCOLDVAL_ADDR		0X34
+#define TT_CHRCCCOOLVAL_ADDR		0X35
+#define TT_CHRCCWARMVAL_ADDR		0X36
+#define TT_CHRCCHOTVAL_ADDR		0X37
+#define TT_CHRCCEMRGHIVAL_ADDR		0X38
+#define TT_LOWCHRENVAL_ADDR		0X39
+#define TT_LOWCHRDISVAL_ADDR		0X3A
+#define TT_WDOGRSTVAL_ADDR		0X3B
+#define TT_CHGRENVAL_ADDR		0X3C
+#define TT_CHGRDISVAL_ADDR		0X3D
+
+/*Interrupt registers*/
+#define BATT_CHR_BATTDET_MASK	D2
+/*Status registers*/
+#define BATT_PRESENT		1
+#define BATT_NOT_PRESENT	0
+
+#define BATT_STRING_MAX		8
+#define BATTID_STR_LEN		8
+
+#define CHARGER_PRESENT		1
+#define CHARGER_NOT_PRESENT	0
+
+/*FIXME: Modify default values */
+#define BATT_DEAD_CUTOFF_VOLT		3400	/* 3400 mV */
+#define BATT_CRIT_CUTOFF_VOLT		3700	/* 3700 mV */
+
+#define MSIC_BATT_TEMP_MAX		60	/* 60 degrees */
+#define MSIC_BATT_TEMP_MIN		0
+
+#define BATT_TEMP_WARM			45	/* 45 degrees */
+#define MIN_BATT_PROF			4
+
+#define PMIC_REG_NAME_LEN		28
+#define PMIC_REG_DEF(x) { .reg_name = #x, .addr = x }
+
+struct interrupt_info {
+	/* Interrupt register mask*/
+	u8 int_reg_mask;
+	/* interrupt status register mask */
+	u8 stat_reg_mask;
+	/* log message if interrupt is set */
+	char *log_msg_int_reg_true;
+	/* log message if stat is true or false */
+	char *log_msg_stat_true;
+	char *log_msg_stat_false;
+	/* handle if interrupt bit is set */
+	void (*int_handle) (void);
+	/* interrupt status handler */
+	void (*stat_handle) (bool);
+};
+
+enum pmic_charger_aca_type {
+	RID_UNKNOWN = 0,
+	RID_A,
+	RID_B,
+	RID_C,
+	RID_FLOAT,
+	RID_GND,
+};
+
+enum pmic_charger_cable_type {
+	PMIC_CHARGER_TYPE_NONE = 0,
+	PMIC_CHARGER_TYPE_SDP,
+	PMIC_CHARGER_TYPE_DCP,
+	PMIC_CHARGER_TYPE_CDP,
+	PMIC_CHARGER_TYPE_ACA,
+	PMIC_CHARGER_TYPE_SE1,
+	PMIC_CHARGER_TYPE_MHL,
+	PMIC_CHARGER_TYPE_FLOAT_DP_DN,
+	PMIC_CHARGER_TYPE_OTHER,
+	PMIC_CHARGER_TYPE_DCP_EXTPHY,
+};
+
+struct pmic_chrgr_drv_context {
+	bool invalid_batt;
+	bool is_batt_present;
+	bool current_sense_enabled;
+	unsigned int irq;		/* GPE_ID or IRQ# */
+	void __iomem *pmic_intr_iomap;
+	struct device *dev;
+	int health;
+	u8 pmic_id;
+	bool is_internal_usb_phy;
+	enum pmic_charger_cable_type charger_type;
+	/* ShadyCove-WA for VBUS removal detect issue */
+	bool vbus_connect_status;
+	bool otg_mode_enabled;
+	struct ps_batt_chg_prof *sfi_bcprof;
+	struct ps_pse_mod_prof *actual_bcprof;
+	struct ps_pse_mod_prof *runtime_bcprof;
+	struct pmic_platform_data *pdata;
+	struct usb_phy *otg;
+	struct list_head evt_queue;
+	struct work_struct evt_work;
+	struct delayed_work acok_irq_work;
+	struct mutex evt_queue_lock;
+	struct wake_lock wakelock;
+	struct wake_lock otg_wa_wakelock;
+};
+
+struct pmic_event {
+	struct list_head node;
+	u8 chgrirq0_int;
+	u8 chgrirq1_int;
+	u8 chgrirq0_stat;
+	u8 chgrirq1_stat;
+};
+
+struct pmic_regs_def {
+	char reg_name[PMIC_REG_NAME_LEN];
+	u16 addr;
+};
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/Makefile b/drivers/external_drivers/drivers/socwatch/Makefile
new file mode 100644
index 0000000..843a2d4
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/Makefile
@@ -0,0 +1,2 @@
+obj-y += soc_perf_driver/
+obj-y += socwatch_driver/
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/Kconfig b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/Kconfig
new file mode 100644
index 0000000..8de3a7c
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/Kconfig
@@ -0,0 +1,3 @@
+config INTEL_SOCWATCH_PERF_DRV
+	boolean "Intel SoCWatch Perf driver support"
+	depends on X86_INTEL_MID=y
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/Makefile b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/Makefile
new file mode 100644
index 0000000..446d279
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/Makefile
@@ -0,0 +1,8 @@
+# By default, build the EMON portions of the driver
+ccflags-y += -DEMON -DEMON_INTERNAL -DDRV_ANDROID -DDRV_ATOM_ONLY -DPCI_HELPERS_API
+
+obj-$(CONFIG_INTEL_SOCWATCH_PERF_DRV) +=	\
+		control.o	\
+		pci.o	\
+		socperfdrv.o	\
+		soc_uncore.o
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/control.c b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/control.c
new file mode 100644
index 0000000..9b56c4e
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/control.c
@@ -0,0 +1,751 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#include "lwpmudrv_defines.h"
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "lwpmudrv_types.h"
+#include "rise_errors.h"
+#include "lwpmudrv_ecb.h"
+#include "socperfdrv.h"
+#include "control.h"
+#include <linux/sched.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define SMP_CALL_FUNCTION(func,ctx,retry,wait)    smp_call_function((func),(ctx),(wait))
+#else
+#define SMP_CALL_FUNCTION(func,ctx,retry,wait)    smp_call_function((func),(ctx),(retry),(wait))
+#endif
+
+/*
+ *  Global State Nodes - keep here for now.  Abstract out when necessary.
+ */
+GLOBAL_STATE_NODE driver_state;
+MSR_DATA msr_data = NULL;
+MEM_TRACKER mem_tr_head = NULL;	/* start of the mem tracker list*/
+MEM_TRACKER mem_tr_tail = NULL;	/* end of mem tracker list*/
+spinlock_t mem_tr_lock;		/* spinlock for mem tracker list*/
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn       VOID CONTROL_Invoke_Cpu (func, ctx, arg)
+ *
+ * @brief    Set up a DPC call and insert it into the queue
+ *
+ * @param    IN cpu_idx  - the core id to dispatch this function to
+ *           IN func     - function to be invoked by the specified core(s)
+ *           IN ctx      - pointer to the parameter block for each function
+ *                         invocation
+ *
+ * @return   None
+ *
+ * <I>Special Notes:</I>
+ *
+ */
+extern VOID CONTROL_Invoke_Cpu(int cpu_idx, VOID(*func) (PVOID), PVOID ctx)
+{
+	CONTROL_Invoke_Parallel(func, ctx);
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude)
+ *
+ * @param    func     - function to be invoked by each core in the system
+ * @param    ctx      - pointer to the parameter block for each function invocation
+ * @param    blocking - Wait for invoked function to complete
+ * @param    exclude  - exclude the current core from executing the code
+ *
+ * @returns  None
+ *
+ * @brief    Service routine to handle all kinds of parallel invoke on all CPU calls
+ *
+ * <I>Special Notes:</I>
+ *           Invoke the function provided in parallel in either a blocking or
+ *           non-blocking mode.  The current core may be excluded if desired.
+ *           NOTE - Do not call this function directly from source code.
+ *           Use the aliases CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(),
+ *           or CONTROL_Invoke_Parallel_XS().
+ *
+ */
+extern VOID
+CONTROL_Invoke_Parallel_Service(VOID(*func) (PVOID),
+				PVOID ctx, int blocking, int exclude)
+{
+	GLOBAL_STATE_cpu_count(driver_state) = 0;
+	GLOBAL_STATE_dpc_count(driver_state) = 0;
+
+	preempt_disable();
+	SMP_CALL_FUNCTION(func, ctx, 0, blocking);
+
+	if (!exclude) {
+		func(ctx);
+	}
+	preempt_enable();
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID control_Memory_Tracker_Delete_Node(mem_tr)
+ *
+ * @param    IN mem_tr    - memory tracker node to delete
+ *
+ * @returns  None
+ *
+ * @brief    Delete specified node in the memory tracker
+ *
+ * <I>Special Notes:</I>
+ *           Assumes mem_tr_lock is already held while calling this function!
+ */
+static VOID control_Memory_Tracker_Delete_Node(MEM_TRACKER mem_tr)
+{
+	MEM_TRACKER prev_tr = NULL;
+	MEM_TRACKER next_tr = NULL;
+	U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE);
+
+	if (!mem_tr) {
+		return;
+	}
+	/* free the allocated mem_el array (if any)*/
+	if (MEM_TRACKER_mem(mem_tr)) {
+		if (size < MAX_KMALLOC_SIZE) {
+			kfree(MEM_TRACKER_mem(mem_tr));
+		} else {
+			free_pages((unsigned long)MEM_TRACKER_mem(mem_tr),
+				   get_order(size));
+		}
+	}
+	/* update the linked list*/
+	prev_tr = MEM_TRACKER_prev(mem_tr);
+	next_tr = MEM_TRACKER_next(mem_tr);
+	if (prev_tr) {
+		MEM_TRACKER_next(prev_tr) = next_tr;
+	}
+	if (next_tr) {
+		MEM_TRACKER_prev(next_tr) = prev_tr;
+	}
+	/* free the mem_tracker node*/
+	kfree(mem_tr);
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID control_Memory_Tracker_Create_Node(void)
+ *
+ * @param    None    - size of the memory to allocate
+ *
+ * @returns  OS_SUCCESS if successful, otherwise error
+ *
+ * @brief    Initialize the memory tracker
+ *
+ * <I>Special Notes:</I>
+ *           Assumes mem_tr_lock is already held while calling this function!
+ *
+ *           Since this function can be called within either GFP_KERNEL or
+ *           GFP_ATOMIC contexts, the most restrictive allocation is used
+ *           (viz., GFP_ATOMIC).
+ */
+static U32 control_Memory_Tracker_Create_Node(void
+    )
+{
+	U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE);
+	PVOID location = NULL;
+	MEM_TRACKER mem_tr = NULL;
+
+	/* create a mem tracker node*/
+	mem_tr = (MEM_TRACKER) kmalloc(sizeof(MEM_TRACKER_NODE), GFP_ATOMIC);
+	if (!mem_tr) {
+		SOCPERF_PRINT_ERROR
+		    ("control_Initialize_Memory_Tracker: failed to allocate mem tracker node\n");
+		return OS_FAULT;
+	}
+	/* create an initial array of mem_el's inside the mem tracker node*/
+	if (size < MAX_KMALLOC_SIZE) {
+		location = (PVOID) kmalloc(size, GFP_ATOMIC);
+		SOCPERF_PRINT_DEBUG
+		    ("control_Memory_Tracker_Create_Node: allocated small memory (0x%p, %d)\n",
+		     location, (S32) size);
+	} else {
+		location =
+		    (PVOID) __get_free_pages(GFP_ATOMIC, get_order(size));
+		SOCPERF_PRINT_DEBUG
+		    ("control_Memory_Tracker_Create_Node: allocated large memory (0x%p, %d)\n",
+		     location, (S32) size);
+	}
+
+	/* initialize new mem tracker node*/
+	MEM_TRACKER_mem(mem_tr) = location;
+	MEM_TRACKER_prev(mem_tr) = NULL;
+	MEM_TRACKER_next(mem_tr) = NULL;
+
+	/* if mem_el array allocation failed, then remove node*/
+	if (!MEM_TRACKER_mem(mem_tr)) {
+		control_Memory_Tracker_Delete_Node(mem_tr);
+		SOCPERF_PRINT_ERROR
+		    ("control_Memory_Tracker_Create_Node: failed to allocate mem_el array in tracker node ... deleting node\n");
+		return OS_FAULT;
+	}
+	/* initialize mem_tracker's mem_el array*/
+	MEM_TRACKER_max_size(mem_tr) = MEM_EL_MAX_ARRAY_SIZE;
+	memset(MEM_TRACKER_mem(mem_tr), 0, size);
+
+	/* update the linked list*/
+	if (!mem_tr_head) {
+		mem_tr_head = mem_tr;
+	} else {
+		MEM_TRACKER_prev(mem_tr) = mem_tr_tail;
+		MEM_TRACKER_next(mem_tr_tail) = mem_tr;
+	}
+	mem_tr_tail = mem_tr;
+	SOCPERF_PRINT_DEBUG
+	    ("control_Memory_Tracker_Create_node: allocating new node=0x%p, max_elements=%d, size=%d\n",
+	     MEM_TRACKER_mem(mem_tr_tail), MEM_EL_MAX_ARRAY_SIZE, size);
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID control_Memory_Tracker_Add(location, size, vmalloc_flag)
+ *
+ * @param    IN location     - memory location
+ * @param    IN size         - size of the memory to allocate
+ * @param    IN vmalloc_flag - flag that indicates if the allocation was done with vmalloc
+ *
+ * @returns  None
+ *
+ * @brief    Keep track of allocated memory with memory tracker
+ *
+ * <I>Special Notes:</I>
+ *           Starting from first mem_tracker node, the algorithm
+ *           finds the first "hole" in the mem_tracker list and
+ *           tracks the memory allocation there.
+ */
+static U32
+control_Memory_Tracker_Add(PVOID location, ssize_t size, DRV_BOOL vmalloc_flag)
+{
+	S32 i, n;
+	U32 status;
+	DRV_BOOL found;
+	MEM_TRACKER mem_tr;
+
+	spin_lock(&mem_tr_lock);
+
+	/* check if there is space in ANY of mem_tracker's nodes for the memory item*/
+	mem_tr = mem_tr_head;
+	found = FALSE;
+	status = OS_SUCCESS;
+	i = n = 0;
+	while (mem_tr && (!found)) {
+		for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) {
+			if (!MEM_TRACKER_mem_address(mem_tr, i)) {
+				SOCPERF_PRINT_DEBUG
+				    ("CONTROL_Memory_Tracker_Add: found index %d of %d available\n",
+				     i, MEM_TRACKER_max_size(mem_tr) - 1);
+				n = i;
+				found = TRUE;
+			}
+		}
+		if (!found) {
+			mem_tr = MEM_TRACKER_next(mem_tr);
+		}
+	}
+
+	if (!found) {
+		/* extend into (i.e., create new) mem_tracker node ...*/
+		status = control_Memory_Tracker_Create_Node();
+		if (status != OS_SUCCESS) {
+			SOCPERF_PRINT_ERROR
+			    ("Unable to create mem tracker node\n");
+			goto finish_add;
+		}
+		/* use mem tracker tail node and first available entry in mem_el array*/
+		mem_tr = mem_tr_tail;
+		n = 0;
+	}
+	/* we now have a location in mem tracker to keep track of the memory item*/
+	MEM_TRACKER_mem_address(mem_tr, n) = location;
+	MEM_TRACKER_mem_size(mem_tr, n) = size;
+	MEM_TRACKER_mem_vmalloc(mem_tr, n) = vmalloc_flag;
+	SOCPERF_PRINT_DEBUG
+	    ("control_Memory_Tracker_Add: tracking (0x%p, %d) in node %d of %d\n",
+	     location, (S32) size, n, MEM_TRACKER_max_size(mem_tr) - 1);
+
+finish_add:
+	spin_unlock(&mem_tr_lock);
+
+	return status;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID CONTROL_Memory_Tracker_Init(void)
+ *
+ * @param    None
+ *
+ * @returns  None
+ *
+ * @brief    Initializes Memory Tracker
+ *
+ * <I>Special Notes:</I>
+ *           This should only be called when the driver is being loaded.
+ */
+extern VOID CONTROL_Memory_Tracker_Init(VOID)
+{
+	SOCPERF_PRINT_DEBUG
+	    ("CONTROL_Memory_Tracker_Init: initializing mem tracker\n");
+
+	mem_tr_head = NULL;
+	mem_tr_tail = NULL;
+
+	spin_lock_init(&mem_tr_lock);
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID CONTROL_Memory_Tracker_Free(void)
+ *
+ * @param    None
+ *
+ * @returns  None
+ *
+ * @brief    Frees memory used by Memory Tracker
+ *
+ * <I>Special Notes:</I>
+ *           This should only be called when the driver is being unloaded.
+ */
+extern VOID CONTROL_Memory_Tracker_Free(VOID)
+{
+	S32 i;
+	MEM_TRACKER temp;
+
+	SOCPERF_PRINT_DEBUG
+	    ("CONTROL_Memory_Tracker_Free: destroying mem tracker\n");
+
+	spin_lock(&mem_tr_lock);
+
+	/* check for any memory that was not freed, and free it*/
+	while (mem_tr_head) {
+		for (i = 0; i < MEM_TRACKER_max_size(mem_tr_head); i++) {
+			if (MEM_TRACKER_mem_address(mem_tr_head, i)) {
+				SOCPERF_PRINT_WARNING
+				    ("CONTROL_Memory_Tracker_Free: index %d of %d, not freed (0x%p, %d) ... freeing now\n",
+				     i, MEM_TRACKER_max_size(mem_tr_head) - 1,
+				     MEM_TRACKER_mem_address(mem_tr_head, i),
+				     MEM_TRACKER_mem_size(mem_tr_head, i));
+				free_pages((unsigned long)
+					   MEM_TRACKER_mem_address(mem_tr_head,
+								   i),
+					   get_order(MEM_TRACKER_mem_size
+						     (mem_tr_head, i)));
+				MEM_TRACKER_mem_address(mem_tr_head, i) = NULL;
+				MEM_TRACKER_mem_size(mem_tr_head, i) = 0;
+				MEM_TRACKER_mem_vmalloc(mem_tr_head, i) = FALSE;
+			}
+		}
+		temp = MEM_TRACKER_next(mem_tr_head);
+		control_Memory_Tracker_Delete_Node(mem_tr_head);
+		mem_tr_head = temp;
+	}
+
+	spin_unlock(&mem_tr_lock);
+
+	SOCPERF_PRINT_DEBUG
+	    ("CONTROL_Memory_Tracker_Free: mem tracker destruction complete\n");
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn VOID CONTROL_Memory_Tracker_Compaction(void)
+ *
+ * @param    None
+ *
+ * @returns  None
+ *
+ * @brief    Compacts the memory allocator if holes are detected
+ *
+ * <I>Special Notes:</I>
+ *           The algorithm compacts mem_tracker nodes such that
+ *           node entries are full starting from mem_tr_head
+ *           up until the first empty node is detected, after
+ *           which nodes up to mem_tr_tail will be empty.
+ *           At end of collection (or at other safe sync point),
+ *           we reclaim/compact space used by mem tracker.
+ */
+extern VOID CONTROL_Memory_Tracker_Compaction(void
+    )
+{
+	S32 i, j, n, m, c, d;
+	DRV_BOOL found, overlap;
+	MEM_TRACKER mem_tr1, mem_tr2;
+
+	spin_lock(&mem_tr_lock);
+
+	mem_tr1 = mem_tr_head;
+	mem_tr2 = mem_tr_tail;
+
+	/* if memory tracker was never used, then no need to compact*/
+	if (!mem_tr1 || !mem_tr2) {
+		goto finish_compact;
+	}
+
+	i = j = n = c = d = 0;
+	m = MEM_TRACKER_max_size(mem_tr2) - 1;
+	overlap = FALSE;
+	while (!overlap) {
+		/* find an empty node*/
+		found = FALSE;
+		while (!found && !overlap && mem_tr1) {
+			SOCPERF_PRINT_DEBUG
+			    ("CONTROL_Memory_Tracker_Compaction: looking at mem_tr1 0x%p, index=%d\n",
+			     mem_tr1, n);
+			for (i = n; i < MEM_TRACKER_max_size(mem_tr1); i++) {
+				if (!MEM_TRACKER_mem_address(mem_tr1, i)) {
+					SOCPERF_PRINT_DEBUG
+					    ("CONTROL_Memory_Tracker_Compaction: found index %d of %d empty\n",
+					     i,
+					     MEM_TRACKER_max_size(mem_tr1) - 1);
+					found = TRUE;
+				}
+			}
+			/* check for overlap*/
+			overlap = (mem_tr1 == mem_tr2) && (i >= m);
+
+			/* if no overlap and an empty node was not found, then advance to next node*/
+			if (!found && !overlap) {
+				mem_tr1 = MEM_TRACKER_next(mem_tr1);
+				n = 0;
+			}
+		}
+		/* all nodes going in forward direction are full, so exit*/
+		if (!found || overlap) {
+			goto finish_compact;
+		}
+		/* find a non-empty node*/
+		found = FALSE;
+		while (!found && !overlap && mem_tr2) {
+			SOCPERF_PRINT_DEBUG
+			    ("CONTROL_Memory_Tracker_Compaction: looking at mem_tr2 0x%p, index=%d\n",
+			     mem_tr2, m);
+			for (j = m; j >= 0; j--) {
+				if (MEM_TRACKER_mem_address(mem_tr2, j)) {
+					SOCPERF_PRINT_DEBUG
+					    ("CONTROL_Memory_Tracker_Compaction: found index %d of %d non-empty\n",
+					     j,
+					     MEM_TRACKER_max_size(mem_tr2) - 1);
+					found = TRUE;
+				}
+			}
+			/* check for overlap*/
+			overlap = (mem_tr1 == mem_tr2) && (j <= i);
+
+			/* if no overlap and no non-empty node was found, then retreat to prev node*/
+			if (!found && !overlap) {
+				MEM_TRACKER empty_tr = mem_tr2;	/* keep track of empty node*/
+				mem_tr2 = MEM_TRACKER_prev(mem_tr2);
+				m = MEM_TRACKER_max_size(mem_tr2) - 1;
+				mem_tr_tail = mem_tr2;	/* keep track of new tail*/
+				/* reclaim empty mem_tracker node*/
+				control_Memory_Tracker_Delete_Node(empty_tr);
+				/* keep track of number of node deletions performed*/
+				d++;
+			}
+		}
+		/* all nodes going in reverse direction are empty, so exit*/
+		if (!found || overlap) {
+			goto finish_compact;
+		}
+		/* swap empty node with non-empty node so that "holes" get bubbled towards the end of list*/
+		MEM_TRACKER_mem_address(mem_tr1, i) =
+		    MEM_TRACKER_mem_address(mem_tr2, j);
+		MEM_TRACKER_mem_size(mem_tr1, i) =
+		    MEM_TRACKER_mem_size(mem_tr2, j);
+		MEM_TRACKER_mem_vmalloc(mem_tr1, i) =
+		    MEM_TRACKER_mem_vmalloc(mem_tr2, j);
+
+		MEM_TRACKER_mem_address(mem_tr2, j) = NULL;
+		MEM_TRACKER_mem_size(mem_tr2, j) = 0;
+		MEM_TRACKER_mem_vmalloc(mem_tr2, j) = FALSE;
+
+		/* keep track of number of memory compactions performed*/
+		c++;
+
+		/* start new search starting from next element in mem_tr1*/
+		n = i + 1;
+
+		/* start new search starting from prev element in mem_tr2*/
+		m = j - 1;
+	}
+
+finish_compact:
+	spin_unlock(&mem_tr_lock);
+
+	SOCPERF_PRINT_DEBUG
+	    ("CONTROL_Memory_Tracker_Compaction: number of elements compacted = %d, nodes deleted = %d\n",
+	     c, d);
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn PVOID CONTROL_Allocate_Memory(size)
+ *
+ * @param    IN size     - size of the memory to allocate
+ *
+ * @returns  char*       - pointer to the allocated memory block
+ *
+ * @brief    Allocate and zero memory
+ *
+ * <I>Special Notes:</I>
+ *           Allocate memory in the GFP_KERNEL pool.
+ *
+ *           Use this if memory is to be allocated within a context where
+ *           the allocator can block the allocation (e.g., by putting
+ *           the caller to sleep) while it tries to free up memory to
+ *           satisfy the request.  Otherwise, if the allocation must
+ *           occur atomically (e.g., caller cannot sleep), then use
+ *           CONTROL_Allocate_KMemory instead.
+ */
+extern PVOID CONTROL_Allocate_Memory(size_t size)
+{
+	U32 status;
+	PVOID location;
+
+	if (size <= 0) {
+		return NULL;
+	}
+	/* determine whether to use mem_tracker or not*/
+	if (size < MAX_KMALLOC_SIZE) {
+		location = (PVOID) kmalloc(size, GFP_KERNEL);
+		SOCPERF_PRINT_DEBUG
+		    ("CONTROL_Allocate_Memory: allocated small memory (0x%p, %d)\n",
+		     location, (S32) size);
+	} else {
+		location = (PVOID) vmalloc(size);
+		if (location) {
+			status =
+			    control_Memory_Tracker_Add(location, size, TRUE);
+			SOCPERF_PRINT_DEBUG
+			    ("CONTROL_Allocate_Memory: - allocated *large* memory (0x%p, %d)\n",
+			     location, (S32) size);
+			if (status != OS_SUCCESS) {
+				/* failed to track in mem_tracker, so free up memory and return NULL*/
+				vfree(location);
+				SOCPERF_PRINT_ERROR
+				    ("CONTROL_Allocate_Memory: - able to allocate, but failed to track via MEM_TRACKER ... freeing\n");
+				return NULL;
+			}
+		}
+	}
+
+	if (!location) {
+		SOCPERF_PRINT_ERROR
+		    ("CONTROL_Allocate_Memory: failed for size %d bytes\n",
+		     (S32) size);
+		return NULL;
+	}
+
+	memset(location, 0, size);
+
+	return location;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn PVOID CONTROL_Allocate_KMemory(size)
+ *
+ * @param    IN size     - size of the memory to allocate
+ *
+ * @returns  char*       - pointer to the allocated memory block
+ *
+ * @brief    Allocate and zero memory
+ *
+ * <I>Special Notes:</I>
+ *           Allocate memory in the GFP_ATOMIC pool.
+ *
+ *           Use this if memory is to be allocated within a context where
+ *           the allocator cannot block the allocation (e.g., by putting
+ *           the caller to sleep) as it tries to free up memory to
+ *           satisfy the request.  Examples include interrupt handlers,
+ *           process context code holding locks, etc.
+ */
+extern PVOID CONTROL_Allocate_KMemory(size_t size)
+{
+	U32 status;
+	PVOID location;
+
+	if (size <= 0) {
+		return NULL;
+	}
+
+	if (size < MAX_KMALLOC_SIZE) {
+		location = (PVOID) kmalloc(size, GFP_ATOMIC);
+		SOCPERF_PRINT_DEBUG
+		    ("CONTROL_Allocate_KMemory: allocated small memory (0x%p, %d)\n",
+		     location, (S32) size);
+	} else {
+		location =
+		    (PVOID) __get_free_pages(GFP_ATOMIC, get_order(size));
+		status = control_Memory_Tracker_Add(location, size, FALSE);
+		SOCPERF_PRINT_DEBUG
+		    ("CONTROL_Allocate_KMemory: allocated large memory (0x%p, %d)\n",
+		     location, (S32) size);
+		if (status != OS_SUCCESS) {
+			/* failed to track in mem_tracker, so free up memory and return NULL*/
+			free_pages((unsigned long)location, get_order(size));
+			SOCPERF_PRINT_ERROR
+			    ("CONTROL_Allocate_KMemory: - able to allocate, but failed to track via MEM_TRACKER ... freeing\n");
+			return NULL;
+		}
+	}
+
+	if (!location) {
+		SOCPERF_PRINT_ERROR
+		    ("CONTROL_Allocate_KMemory: failed for size %d bytes\n",
+		     (S32) size);
+		return NULL;
+	}
+
+	memset(location, 0, size);
+
+	return location;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * @fn PVOID CONTROL_Free_Memory(location)
+ *
+ * @param    IN location  - size of the memory to allocate
+ *
+ * @returns  pointer to the allocated memory block
+ *
+ * @brief    Frees the memory block
+ *
+ * <I>Special Notes:</I>
+ *           Does not try to free memory if fed with a NULL pointer
+ *           Expected usage:
+ *               ptr = CONTROL_Free_Memory(ptr);
+ *           Does not do compaction ... can have "holes" in
+ *           mem_tracker list after this operation.
+ */
+extern PVOID CONTROL_Free_Memory(PVOID location)
+{
+	S32 i;
+	DRV_BOOL found;
+	MEM_TRACKER mem_tr;
+
+	if (!location) {
+		return NULL;
+	}
+
+	spin_lock(&mem_tr_lock);
+
+	/* scan through mem_tracker nodes for matching entry (if any)*/
+	mem_tr = mem_tr_head;
+	found = FALSE;
+	while (mem_tr) {
+		for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) {
+			if (location == MEM_TRACKER_mem_address(mem_tr, i)) {
+				SOCPERF_PRINT_DEBUG
+				    ("CONTROL_Free_Memory: freeing large memory location 0x%p\n",
+				     location);
+				found = TRUE;
+				if (MEM_TRACKER_mem_vmalloc(mem_tr, i)) {
+					vfree(location);
+				} else {
+					free_pages((unsigned long)location,
+						   get_order
+						   (MEM_TRACKER_mem_size
+						    (mem_tr, i)));
+				}
+				MEM_TRACKER_mem_address(mem_tr, i) = NULL;
+				MEM_TRACKER_mem_size(mem_tr, i) = 0;
+				MEM_TRACKER_mem_vmalloc(mem_tr, i) = FALSE;
+				goto finish_free;
+			}
+		}
+		mem_tr = MEM_TRACKER_next(mem_tr);
+	}
+
+finish_free:
+	spin_unlock(&mem_tr_lock);
+
+	/* must have been of smaller than the size limit for mem tracker nodes*/
+	if (!found) {
+		SOCPERF_PRINT_DEBUG
+		    ("CONTROL_Free_Memory: freeing small memory location 0x%p\n",
+		     location);
+		kfree(location);
+	}
+
+	return NULL;
+}
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/control.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/control.h
new file mode 100644
index 0000000..10d3caf
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/control.h
@@ -0,0 +1,469 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _CONTROL_H_
+#define _CONTROL_H_
+
+#include <linux/smp.h>
+#include <linux/timer.h>
+#if defined(DRV_IA32)
+#include <asm/apic.h>
+#endif
+#include <asm/io.h>
+#if defined(DRV_IA32)
+#include <asm/msr.h>
+#endif
+#include <asm/atomic.h>
+
+#include "lwpmudrv_defines.h"
+#include "socperfdrv.h"
+#include "lwpmudrv_types.h"
+
+/* large memory allocation will be used if the requested size (in bytes) is*/
+/* above this threshold*/
+#define  MAX_KMALLOC_SIZE ((1<<17)-1)
+
+/* check whether Linux driver should use unlocked ioctls (not protected by BKL)*/
+#if defined(HAVE_UNLOCKED_IOCTL)
+#define DRV_USE_UNLOCKED_IOCTL
+#endif
+#if defined(DRV_USE_UNLOCKED_IOCTL)
+#define IOCTL_OP .unlocked_ioctl
+#define IOCTL_OP_TYPE long
+#define IOCTL_USE_INODE
+#else
+#define IOCTL_OP .ioctl
+#define IOCTL_OP_TYPE S32
+#define IOCTL_USE_INODE struct   inode  *inode,
+#endif
+
+/* Information about the state of the driver*/
+typedef struct GLOBAL_STATE_NODE_S GLOBAL_STATE_NODE;
+typedef GLOBAL_STATE_NODE *GLOBAL_STATE;
+struct GLOBAL_STATE_NODE_S {
+	volatile S32 cpu_count;
+	volatile S32 dpc_count;
+
+	S32 num_cpus;		/* Number of CPUs in the system*/
+	S32 active_cpus;	/* Number of active CPUs - some cores can be*/
+	/* deactivated by the user / admin*/
+	S32 num_em_groups;
+	S32 num_descriptors;
+	volatile S32 current_phase;
+};
+
+/* Access Macros*/
+#define  GLOBAL_STATE_num_cpus(x)          ((x).num_cpus)
+#define  GLOBAL_STATE_active_cpus(x)       ((x).active_cpus)
+#define  GLOBAL_STATE_cpu_count(x)         ((x).cpu_count)
+#define  GLOBAL_STATE_dpc_count(x)         ((x).dpc_count)
+#define  GLOBAL_STATE_num_em_groups(x)     ((x).num_em_groups)
+#define  GLOBAL_STATE_num_descriptors(x)   ((x).num_descriptors)
+#define  GLOBAL_STATE_current_phase(x)     ((x).current_phase)
+#define  GLOBAL_STATE_sampler_id(x)        ((x).sampler_id)
+
+/*
+ *
+ *
+ * CPU State data structure and access macros
+ *
+ */
+typedef struct CPU_STATE_NODE_S CPU_STATE_NODE;
+typedef CPU_STATE_NODE *CPU_STATE;
+struct CPU_STATE_NODE_S {
+	S32 apic_id;		/* Processor ID on the system bus*/
+	PVOID apic_linear_addr;	/* linear address of local apic*/
+	PVOID apic_physical_addr;	/* physical address of local apic*/
+
+	PVOID idt_base;		/* local IDT base address*/
+	atomic_t in_interrupt;
+
+#if defined(DRV_IA32)
+	U64 saved_ih;		/* saved perfvector to restore*/
+#endif
+#if defined(DRV_EM64T)
+	PVOID saved_ih;		/* saved perfvector to restore*/
+#endif
+
+	S64 *em_tables;		/* holds the data that is saved/restored*/
+	/* during event multiplexing*/
+
+	struct timer_list *em_timer;
+	U32 current_group;
+	S32 trigger_count;
+	S32 trigger_event_num;
+
+	DISPATCH dispatch;
+	PVOID lbr_area;
+	PVOID old_dts_buffer;
+	PVOID dts_buffer;
+	U32 initial_mask;
+	U32 accept_interrupt;
+
+#if defined(BUILD_CHIPSET)
+	/* Chipset counter stuff*/
+	U32 chipset_count_init;	/* flag to initialize the last MCH and ICH arrays below.*/
+	U64 last_mch_count[8];
+	U64 last_ich_count[8];
+	U64 last_gmch_count[MAX_CHIPSET_COUNTERS];
+	U64 last_mmio_count[32];	/* it's only 9 now but the next generation may have 29.*/
+#endif
+
+	U64 *pmu_state;		/* holds PMU state (e.g., MSRs) that will be*/
+	/* saved before and restored after collection*/
+	S32 socket_master;
+	S32 core_master;
+	S32 thr_master;
+	U64 num_samples;
+	U64 reset_mask;
+	U64 group_swap;
+	U64 last_uncore_count[16];
+};
+
+#define CPU_STATE_apic_id(cpu)              (cpu)->apic_id
+#define CPU_STATE_apic_linear_addr(cpu)     (cpu)->apic_linear_addr
+#define CPU_STATE_apic_physical_addr(cpu)   (cpu)->apic_physical_addr
+#define CPU_STATE_idt_base(cpu)             (cpu)->idt_base
+#define CPU_STATE_in_interrupt(cpu)         (cpu)->in_interrupt
+#define CPU_STATE_saved_ih(cpu)             (cpu)->saved_ih
+#define CPU_STATE_saved_ih_hi(cpu)          (cpu)->saved_ih_hi
+#define CPU_STATE_dpc(cpu)                  (cpu)->dpc
+#define CPU_STATE_em_tables(cpu)            (cpu)->em_tables
+#define CPU_STATE_pmu_state(cpu)            (cpu)->pmu_state
+#define CPU_STATE_em_dpc(cpu)               (cpu)->em_dpc
+#define CPU_STATE_em_timer(cpu)             (cpu)->em_timer
+#define CPU_STATE_current_group(cpu)        (cpu)->current_group
+#define CPU_STATE_trigger_count(cpu)        (cpu)->trigger_count
+#define CPU_STATE_trigger_event_num(cpu)    (cpu)->trigger_event_num
+#define CPU_STATE_dispatch(cpu)             (cpu)->dispatch
+#define CPU_STATE_lbr(cpu)                  (cpu)->lbr
+#define CPU_STATE_old_dts_buffer(cpu)       (cpu)->old_dts_buffer
+#define CPU_STATE_dts_buffer(cpu)           (cpu)->dts_buffer
+#define CPU_STATE_initial_mask(cpu)         (cpu)->initial_mask
+#define CPU_STATE_accept_interrupt(cpu)     (cpu)->accept_interrupt
+#define CPU_STATE_msr_value(cpu)            (cpu)->msr_value
+#define CPU_STATE_msr_addr(cpu)             (cpu)->msr_addr
+#define CPU_STATE_socket_master(cpu)        (cpu)->socket_master
+#define CPU_STATE_core_master(cpu)          (cpu)->core_master
+#define CPU_STATE_thr_master(cpu)           (cpu)->thr_master
+#define CPU_STATE_num_samples(cpu)          (cpu)->num_samples
+#define CPU_STATE_reset_mask(cpu)           (cpu)->reset_mask
+#define CPU_STATE_group_swap(cpu)           (cpu)->group_swap
+
+/*
+ * For storing data for --read/--write-msr command line options
+ */
+typedef struct MSR_DATA_NODE_S MSR_DATA_NODE;
+typedef MSR_DATA_NODE *MSR_DATA;
+struct MSR_DATA_NODE_S {
+	U64 value;		/* Used for emon, for read/write-msr value*/
+	U64 addr;
+};
+
+#define MSR_DATA_value(md)   (md)->value
+#define MSR_DATA_addr(md)    (md)->addr
+
+/*
+ * Memory Allocation tracker
+ *
+ * Currently used to track large memory allocations
+ */
+
+typedef struct MEM_EL_NODE_S MEM_EL_NODE;
+typedef MEM_EL_NODE *MEM_EL;
+struct MEM_EL_NODE_S {
+	char *address;		/* pointer to piece of memory we're tracking*/
+	S32 size;		/* size (bytes) of the piece of memory*/
+	DRV_BOOL is_addr_vmalloc;	/* flag to check if the memory is allocated using vmalloc*/
+};
+
+/* accessors for MEM_EL defined in terms of MEM_TRACKER below*/
+
+#define MEM_EL_MAX_ARRAY_SIZE  32	/* minimum is 1, nominal is 64*/
+
+typedef struct MEM_TRACKER_NODE_S MEM_TRACKER_NODE;
+typedef MEM_TRACKER_NODE *MEM_TRACKER;
+struct MEM_TRACKER_NODE_S {
+	S32 max_size;		/* number of elements in the array (default: MEM_EL_MAX_ARRAY_SIZE)*/
+	MEM_EL mem;		/* array of large memory items we're tracking*/
+	MEM_TRACKER prev, next;	/* enables bi-directional scanning of linked list*/
+};
+#define MEM_TRACKER_max_size(mt)         (mt)->max_size
+#define MEM_TRACKER_mem(mt)              (mt)->mem
+#define MEM_TRACKER_prev(mt)             (mt)->prev
+#define MEM_TRACKER_next(mt)             (mt)->next
+#define MEM_TRACKER_mem_address(mt, i)   (MEM_TRACKER_mem(mt)[(i)].address)
+#define MEM_TRACKER_mem_size(mt, i)      (MEM_TRACKER_mem(mt)[(i)].size)
+#define MEM_TRACKER_mem_vmalloc(mt, i)   (MEM_TRACKER_mem(mt)[(i)].is_addr_vmalloc)
+
+/****************************************************************************
+ ** Global State variables exported
+ ***************************************************************************/
+extern CPU_STATE pcb;
+extern U64 *tsc_info;
+extern GLOBAL_STATE_NODE driver_state;
+extern MSR_DATA msr_data;
+extern U32 *core_to_package_map;
+extern U32 num_packages;
+extern U64 *restore_bl_bypass;
+extern U32 **restore_ha_direct2core;
+extern U32 **restore_qpi_direct2core;
+/****************************************************************************
+ **  Handy Short cuts
+ ***************************************************************************/
+
+/*
+ * CONTROL_THIS_CPU()
+ *     Parameters
+ *         None
+ *     Returns
+ *         CPU number of the processor being executed on
+ *
+ */
+#define CONTROL_THIS_CPU()     smp_processor_id()
+
+/****************************************************************************
+ **  Interface definitions
+ ***************************************************************************/
+
+/*
+ *  Execution Control Functions
+ */
+
+extern VOID CONTROL_Invoke_Cpu(S32 cpuid, VOID(*func) (PVOID), PVOID ctx);
+
+/*
+ * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude)
+ *
+ * @param    func     - function to be invoked by each core in the system
+ * @param    ctx      - pointer to the parameter block for each function invocation
+ * @param    blocking - Wait for invoked function to complete
+ * @param    exclude  - exclude the current core from executing the code
+ *
+ * @returns  none
+ *
+ * @brief    Service routine to handle all kinds of parallel invoke on all CPU calls
+ *
+ * <I>Special Notes:</I>
+ *         Invoke the function provided in parallel in either a blocking/non-blocking mode.
+ *         The current core may be excluded if desired.
+ *         NOTE - Do not call this function directly from source code.  Use the aliases
+ *         CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), CONTROL_Invoke_Parallel_XS().
+ *
+ */
+extern VOID
+CONTROL_Invoke_Parallel_Service(VOID(*func) (PVOID),
+				PVOID ctx, S32 blocking, S32 exclude);
+
+/*
+ * @fn VOID CONTROL_Invoke_Parallel(func, ctx)
+ *
+ * @param    func     - function to be invoked by each core in the system
+ * @param    ctx      - pointer to the parameter block for each function invocation
+ *
+ * @returns  none
+ *
+ * @brief    Invoke the named function in parallel. Wait for all the functions to complete.
+ *
+ * <I>Special Notes:</I>
+ *        Invoke the function named in parallel, including the CPU that the control is
+ *        being invoked on
+ *        Macro built on the service routine
+ *
+ */
+#define CONTROL_Invoke_Parallel(a,b)      CONTROL_Invoke_Parallel_Service((a),(b),TRUE,FALSE)
+
+/*
+ * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx)
+ *
+ * @param    func     - function to be invoked by each core in the system
+ * @param    ctx      - pointer to the parameter block for each function invocation
+ *
+ * @returns  none
+ *
+ * @brief    Invoke the named function in parallel. DO NOT Wait for all the functions to complete.
+ *
+ * <I>Special Notes:</I>
+ *        Invoke the function named in parallel, including the CPU that the control is
+ *        being invoked on
+ *        Macro built on the service routine
+ *
+ */
+#define CONTROL_Invoke_Parallel_NB(a,b)   CONTROL_Invoke_Parallel_Service((a),(b),FALSE,FALSE)
+
+/*
+ * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx)
+ *
+ * @param    func     - function to be invoked by each core in the system
+ * @param    ctx      - pointer to the parameter block for each function invocation
+ *
+ * @returns  none
+ *
+ * @brief    Invoke the named function in parallel. Wait for all the functions to complete.
+ *
+ * <I>Special Notes:</I>
+ *        Invoke the function named in parallel, excluding the CPU that the control is
+ *        being invoked on
+ *        Macro built on the service routine
+ *
+ */
+#define CONTROL_Invoke_Parallel_XS(a,b)   CONTROL_Invoke_Parallel_Service((a),(b),TRUE,TRUE)
+
+/*
+ * @fn VOID CONTROL_Memory_Tracker_Init(void)
+ *
+ * @param    None
+ *
+ * @returns  None
+ *
+ * @brief    Initializes Memory Tracker
+ *
+ * <I>Special Notes:</I>
+ *           This should only be called when the
+ *           the driver is being loaded.
+ */
+extern VOID CONTROL_Memory_Tracker_Init(VOID);
+
+/*
+ * @fn VOID CONTROL_Memory_Tracker_Free(void)
+ *
+ * @param    None
+ *
+ * @returns  None
+ *
+ * @brief    Frees memory used by Memory Tracker
+ *
+ * <I>Special Notes:</I>
+ *           This should only be called when the
+ *           driver is being unloaded.
+ */
+extern VOID CONTROL_Memory_Tracker_Free(VOID);
+
+/*
+ * @fn VOID CONTROL_Memory_Tracker_Compaction(void)
+ *
+ * @param    None
+ *
+ * @returns  None
+ *
+ * @brief    Compacts the memory allocator if holes are detected
+ *
+ * <I>Special Notes:</I>
+ *           At end of collection (or at other safe sync point),
+ *           reclaim/compact space used by mem tracker
+ */
+extern VOID CONTROL_Memory_Tracker_Compaction(void
+    );
+
+/*
+ * @fn PVOID CONTROL_Allocate_Memory(size)
+ *
+ * @param    IN size     - size of the memory to allocate
+ *
+ * @returns  char*       - pointer to the allocated memory block
+ *
+ * @brief    Allocate and zero memory
+ *
+ * <I>Special Notes:</I>
+ *           Allocate memory in the GFP_KERNEL pool.
+ *
+ *           Use this if memory is to be allocated within a context where
+ *           the allocator can block the allocation (e.g., by putting
+ *           the caller to sleep) while it tries to free up memory to
+ *           satisfy the request.  Otherwise, if the allocation must
+ *           occur atomically (e.g., caller cannot sleep), then use
+ *           CONTROL_Allocate_KMemory instead.
+ */
+extern PVOID CONTROL_Allocate_Memory(size_t size);
+
+/*
+ * @fn PVOID CONTROL_Allocate_KMemory(size)
+ *
+ * @param    IN size     - size of the memory to allocate
+ *
+ * @returns  char*       - pointer to the allocated memory block
+ *
+ * @brief    Allocate and zero memory
+ *
+ * <I>Special Notes:</I>
+ *           Allocate memory in the GFP_ATOMIC pool.
+ *
+ *           Use this if memory is to be allocated within a context where
+ *           the allocator cannot block the allocation (e.g., by putting
+ *           the caller to sleep) as it tries to free up memory to
+ *           satisfy the request.  Examples include interrupt handlers,
+ *           process context code holding locks, etc.
+ */
+extern PVOID CONTROL_Allocate_KMemory(size_t size);
+
+/*
+ * @fn PVOID CONTROL_Free_Memory(location)
+ *
+ * @param    IN location  - size of the memory to allocate
+ *
+ * @returns  pointer to the allocated memory block
+ *
+ * @brief    Frees the memory block
+ *
+ * <I>Special Notes:</I>
+ *           Does not try to free memory if fed with a NULL pointer
+ *           Expected usage:
+ *               ptr = CONTROL_Free_Memory(ptr);
+ */
+extern PVOID CONTROL_Free_Memory(PVOID location);
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/ecb_iterators.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/ecb_iterators.h
new file mode 100644
index 0000000..ff6378f
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/ecb_iterators.h
@@ -0,0 +1,293 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _ECB_ITERATORS_H_
+#define _ECB_ITERATORS_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**/
+/* Loop macros to walk through the event control block*/
+/* Use for access only in the kernel mode*/
+/* To Do - Control access from kernel mode by a macro*/
+/**/
+#define FOR_EACH_CCCR_REG(pecb,idx) {                                                  \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+        for ((idx) = ECB_cccr_start(pecb);                                             \
+             (idx) < ECB_cccr_start(pecb)+ECB_cccr_pop(pecb);                          \
+             (idx)++) {                                                                \
+            if (ECB_entries_reg_id((pecb),(idx)) == 0) {                               \
+                continue;                                                              \
+            }
+
+#define END_FOR_EACH_CCCR_REG  }}}
+
+#define FOR_EACH_CCCR_GP_REG(pecb,idx) {                                               \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+        for ((idx) = ECB_cccr_start(pecb);                                             \
+             (idx) < ECB_cccr_start(pecb)+ECB_cccr_pop(pecb);                          \
+             (idx)++) {                                                                \
+            if (ECB_entries_is_gp_reg_get((pecb),(idx)) == 0) {                        \
+                continue;                                                              \
+            }
+
+#define END_FOR_EACH_CCCR_GP_REG  }}}
+
+#define FOR_EACH_ESCR_REG(pecb,idx) {                                                  \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+        for ((idx) = ECB_escr_start(pecb);                                             \
+             (idx) < ECB_escr_start(pecb)+ECB_escr_pop(pecb);                          \
+             (idx)++) {                                                                \
+            if (ECB_entries_reg_id((pecb),(idx)) == 0) {                               \
+                continue;                                                              \
+            }
+
+#define END_FOR_EACH_ESCR_REG  }}}
+
+#define FOR_EACH_DATA_REG(pecb,idx) {                                                  \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+        for ((idx) = ECB_data_start(pecb);                                             \
+             (idx) < ECB_data_start(pecb)+ECB_data_pop(pecb);                          \
+             (idx)++) {                                                                \
+            if (ECB_entries_reg_id((pecb),(idx)) == 0) {                               \
+                continue;                                                              \
+            }
+
+#define END_FOR_EACH_DATA_REG  }}}
+
+#define FOR_EACH_DATA_REG_UNC(pecb,device_idx,idx) {                                      \
+    U32        (idx);                                                                     \
+    U32        (cur_grp) = LWPMU_DEVICE_cur_group(&devices[(device_idx)]);                \
+    ECB        (pecb) = LWPMU_DEVICE_PMU_register_data(&devices[(device_idx)])[cur_grp];  \
+    if ((pecb)) {                                                                         \
+      for ((idx) = ECB_data_start(pecb);                                                  \
+           (idx) < ECB_data_start(pecb)+ECB_data_pop(pecb);                               \
+           (idx)++) {                                                                     \
+          if (ECB_entries_reg_id((pecb),(idx)) == 0) {                                    \
+              continue;                                                                   \
+    }
+
+#define END_FOR_EACH_DATA_REG_UNC  }}}
+
+#define FOR_EACH_DATA_GP_REG(pecb,idx) {                                               \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+        for ((idx) = ECB_data_start(pecb);                                             \
+             (idx) < ECB_data_start(pecb)+ECB_data_pop(pecb);                          \
+             (idx)++) {                                                                \
+            if (ECB_entries_is_gp_reg_get((pecb),(idx)) == 0) {                        \
+                continue;                                                              \
+            }
+
+#define END_FOR_EACH_DATA_GP_REG  }}}
+
+#define FOR_EACH_DATA_GENERIC_REG(pecb,idx) {                                          \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+        for ((idx) = ECB_data_start(pecb);                                             \
+             (idx) < ECB_data_start(pecb)+ECB_data_pop(pecb);                          \
+             (idx)++) {                                                                \
+            if (ECB_entries_is_generic_reg_get((pecb),(idx)) == 0) {                   \
+                continue;                                                              \
+            }
+
+#define END_FOR_EACH_DATA_GENERIC_REG  }}}
+
+#define FOR_EACH_REG_ENTRY(pecb,idx) {                                                 \
+    U32        (idx);                                                                  \
+    U32        this_cpu__ = CONTROL_THIS_CPU();                                        \
+    CPU_STATE  pcpu__  = &pcb[this_cpu__];                                             \
+    ECB        (pecb) = PMU_register_data[CPU_STATE_current_group(pcpu__)];            \
+    if ((pecb)) {                                                                      \
+    for ((idx) = 0; (idx) < ECB_num_entries(pecb); (idx)++) {                          \
+        if (ECB_entries_reg_id((pecb),(idx)) == 0) {                                   \
+            continue;                                                                  \
+        }
+
+#define END_FOR_EACH_REG_ENTRY  }}}
+
+#define FOR_EACH_REG_ENTRY_UNC(pecb,device_idx,idx) {                                          \
+    U32        (idx);                                                                          \
+    U32        (cur_grp) = LWPMU_DEVICE_cur_group(&devices[(device_idx)]);                     \
+    ECB        (pecb)    = LWPMU_DEVICE_PMU_register_data(&devices[(device_idx)])[(cur_grp)];  \
+    if ((pecb)) {                                                                              \
+        for ((idx) = 0; (idx) < ECB_num_entries(pecb); (idx)++) {                              \
+            if (ECB_entries_reg_id((pecb),(idx)) == 0) {                                       \
+                continue;                                                                      \
+            }
+
+#define END_FOR_EACH_REG_ENTRY_UNC  }}}
+
+#define FOR_EACH_PCI_DATA_REG(pecb,i, device_idx, offset_delta) {                                       \
+    U32                 (i)    = 0;                                                                     \
+    U32              (cur_grp) = LWPMU_DEVICE_cur_group(&devices[(device_idx)]);                        \
+    ECB                 (pecb) = LWPMU_DEVICE_PMU_register_data(&devices[(device_idx)])[(cur_grp)];     \
+    if ((pecb)) {                                                                                       \
+        for ((i) = ECB_data_start(pecb);                                                                \
+             (i) < ECB_data_start(pecb)+ECB_data_pop(pecb);                                             \
+             (i)++) {                                                                                   \
+            if (ECB_entries_pci_id_offset((pecb),(i)) == 0) {                                           \
+                continue;                                                                               \
+            }                                                                                           \
+            (offset_delta) =  ECB_entries_pci_id_offset(pecb,i) -                                       \
+                              DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(&ECB_pcidev_entry_node(pecb));
+
+#define END_FOR_EACH_PCI_DATA_REG    } } }
+
+#define FOR_EACH_PCI_DATA_REG_RAW(pecb,i, device_idx ) {                                                \
+    U32                 (i)       = 0;                                                                  \
+    U32                 (cur_grp) = LWPMU_DEVICE_cur_group(&devices[(dev_idx)]);                        \
+    ECB                 (pecb)    = LWPMU_DEVICE_PMU_register_data(&devices[(device_idx)])[(cur_grp)];  \
+    if ((pecb)) {                                                                                       \
+        for ((i) = ECB_data_start(pecb);                                                                \
+             (i) < ECB_data_start(pecb)+ECB_data_pop(pecb);                                             \
+             (i)++) {                                                                                   \
+            if (ECB_entries_pci_id_offset((pecb),(i)) == 0) {                                           \
+                continue;                                                                               \
+            }
+
+#define END_FOR_EACH_PCI_DATA_REG_RAW    } } }
+
+#define FOR_EACH_PCI_CCCR_REG_RAW(pecb,i, device_idx ) {                                            \
+    U32              (i)       = 0;                                                                 \
+    U32              (cur_grp) = LWPMU_DEVICE_cur_group(&devices[(dev_idx)]);                       \
+    ECB              (pecb)    = LWPMU_DEVICE_PMU_register_data(&devices[(device_idx)])[(cur_grp)]; \
+    if ((pecb)) {                                                                                   \
+        for ((i) = ECB_cccr_start(pecb);                                                            \
+             (i) < ECB_cccr_start(pecb)+ECB_cccr_pop(pecb);                                         \
+             (i)++) {                                                                               \
+            if (ECB_entries_pci_id_offset((pecb),(i)) == 0) {                                       \
+                continue;                                                                           \
+            }
+
+#define END_FOR_EACH_PCI_CCCR_REG_RAW   } } }
+
+#define FOR_EACH_PCI_REG_RAW(pecb, i, device_idx ) {                                                   \
+    U32                 (i)       = 0;                                                                 \
+    U32                 (cur_grp) = LWPMU_DEVICE_cur_group(device_uncore);                       \
+    ECB                 (pecb)    = LWPMU_DEVICE_PMU_register_data(device_uncore)[(cur_grp)]; \
+    if ((pecb)) {                                                                                      \
+        for ((i) = 0;                                                                                  \
+             (i) < ECB_num_entries(pecb);                                                              \
+             (i)++) {                                                                                  \
+            if (ECB_entries_pci_id_offset((pecb),(i)) == 0) {                                          \
+                continue;                                                                              \
+            }
+
+#define END_FOR_EACH_PCI_REG_RAW   } } }
+
+#define FOR_EACH_DATA_REG_UNC_VER2(pecb,i, device_idx ) {                                                     \
+    U32            (i)    = 0;                                                                                \
+    U32            (cur_grp) = LWPMU_DEVICE_cur_group(&devices[(device_idx)]);                                \
+    ECB            (pecb) = LWPMU_DEVICE_PMU_register_data(&devices[(device_idx)])[cur_grp];                  \
+    if ((pecb)) {                                                                                             \
+        for ((i) = ECB_data_start(pecb);                                                                      \
+             (i) < ECB_data_start(pecb)+ECB_data_pop(pecb);                                                   \
+             (i)++) {                                                                                         \
+             if ((ECB_flags(pecb) & ECB_pci_id_offset_bit)  && (ECB_entries_pci_id_offset(pecb,i) == 0) ){    \
+                  continue;                                                                                   \
+             }                                                                                                \
+             else if (ECB_entries_reg_id(pecb,i) == 0) {                                                      \
+                 continue;                                                                                    \
+             }                                                                                                \
+             if (ECB_entries_emon_event_id_index_local(pecb_unc, k)) {                                        \
+                 continue;                                                                                    \
+             }
+
+#define END_FOR_EACH_DATA_REG_UNC_VER2    } } }
+
+#define CHECK_SAVE_RESTORE_EVENT_INDEX(prev_ei, cur_ei, evt_index)  {                                   \
+        if (prev_ei == -1) {                                                                            \
+            prev_ei = cur_ei;                                                                           \
+        }                                                                                               \
+        if (prev_ei < cur_ei) {                                                                         \
+            prev_ei = cur_ei;                                                                           \
+            evt_index++;                                                                                \
+        }                                                                                               \
+        else {                                                                                          \
+             evt_index = 0;                                                                             \
+             prev_ei = cur_ei;                                                                          \
+        }}
+
+#if defined(__cplusplus)
+}
+#endif
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_defines.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_defines.h
new file mode 100644
index 0000000..d0d8023
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_defines.h
@@ -0,0 +1,394 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef  _LWPMUDRV_DEFINES_H_
+#define  _LWPMUDRV_DEFINES_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+/**/
+/* Start off with none of the OS'es are defined*/
+/**/
+#undef DRV_OS_WINDOWS
+#undef DRV_OS_LINUX
+#undef DRV_OS_SOLARIS
+#undef DRV_OS_MAC
+#undef DRV_OS_ANDROID
+
+/**/
+/* Make sure none of the architectures is defined here*/
+/**/
+#undef DRV_IA32
+#undef DRV_EM64T
+#undef DRV_IA64
+
+/**/
+/* Make sure one (and only one) of the OS'es gets defined here*/
+/**/
+/* Unfortunately entirex defines _WIN32 so we need to check for linux*/
+/* first.  The definition of these flags is one and only one*/
+/* _OS_xxx is allowed to be defined.*/
+/**/
+#if defined(__ANDROID__)
+#define DRV_OS_ANDROID
+#elif defined(__linux__)
+#define DRV_OS_LINUX
+#elif defined(sun)
+#define DRV_OS_SOLARIS
+#elif defined(_WIN32)
+#define DRV_OS_WINDOWS
+#elif defined(__APPLE__)
+#define DRV_OS_MAC
+#elif defined(__FreeBSD__)
+#define DRV_OS_FREEBSD
+#else
+#error "Compiling for an unknown OS"
+#endif
+
+/**/
+/* Make sure one (and only one) architecture is defined here*/
+/* as well as one (and only one) pointer__ size*/
+/**/
+#if defined(_M_IX86) || defined(__i386__)
+#define DRV_IA32
+#elif defined(_M_IA64) || defined(__ia64__)
+#define DRV_IA64
+#elif defined(_M_AMD64) || defined(__x86_64__)
+#define DRV_EM64T
+#else
+#error "Unknown architecture for compilation"
+#endif
+
+/**/
+/* Add a well defined definition of compiling for release (free) vs.*/
+/* debug (checked). Once again, don't assume these are the only two values,*/
+/* always have an else clause in case we want to expand this.*/
+/**/
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define WINAPI
+#endif
+
+/*
+ *  Add OS neutral defines for file processing.  This is needed in both
+ *  the user code and the kernel code for cleanliness
+ */
+#undef DRV_FILE_DESC
+#undef DRV_INVALID_FILE_DESC_VALUE
+#define DRV_ASSERT  assert
+
+#if defined(DRV_OS_WINDOWS)
+
+#define DRV_FILE_DESC                HANDLE
+#define DRV_INVALID_FILE_DESC_VALUE  INVALID_HANDLE_VALUE
+
+#define OUT
+#define IN
+#define INOUT
+
+#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_ANDROID)
+
+#if defined(DRV_IA64)
+#define DRV_IOCTL_FILE_DESC                 S32
+#else
+#define DRV_IOCTL_FILE_DESC                 SIOP
+#endif
+#define DRV_FILE_DESC                       SIOP
+#define DRV_INVALID_FILE_DESC_VALUE   -1
+
+#define OUT
+#define IN
+#define INOUT
+
+#elif defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD)
+
+#define DRV_IOCTL_FILE_DESC                 S64
+#define DRV_FILE_DESC                       S64
+#define DRV_INVALID_FILE_DESC_VALUE   -1
+
+#define OUT
+#define IN
+#define INOUT
+
+#else
+
+#error "Compiling for an unknown OS"
+
+#endif
+
+#if defined(DRV_OS_WINDOWS)
+#define DRV_DLLIMPORT      __declspec(dllimport)
+#define DRV_DLLEXPORT      __declspec(dllexport)
+#endif
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define DRV_DLLIMPORT
+#define DRV_DLLEXPORT
+#endif
+
+#if defined(DRV_OS_WINDOWS)
+#define FSI64RAW              "I64"
+#define FSS64                 "%"FSI64RAW"d"
+#define FSU64                 "%"FSI64RAW"u"
+#define FSX64                 "%"FSI64RAW"x"
+#define DRV_PATH_SEPARATOR    "\\"
+#define L_DRV_PATH_SEPARATOR L"\\"
+#endif
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define FSI64RAW              "ll"
+#define FSS64                 "%"FSI64RAW"d"
+#define FSU64                 "%"FSI64RAW"u"
+#define FSX64                 "%"FSI64RAW"x"
+#define DRV_PATH_SEPARATOR    "/"
+#define L_DRV_PATH_SEPARATOR L"/"
+#endif
+
+#if defined(DRV_OS_WINDOWS) || defined(DRV_OS_FREEBSD)
+#define DRV_RTLD_NOW    0
+#endif
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID)
+#define DRV_RTLD_NOW    RTLD_NOW
+#endif
+
+#define DRV_STRLEN                       strlen
+#define DRV_WCSLEN                       wcslen
+#define DRV_STRCSPN                      strcspn
+#define DRV_STRCHR                       strchr
+#define DRV_STRRCHR                      strrchr
+#define DRV_WCSRCHR                      wcsrchr
+#if defined(DRV_OS_WINDOWS)
+#define DRV_STRCPY                       strcpy_s
+#define DRV_STRNCPY                      strncpy_s
+#define DRV_STRICMP                     _stricmp
+#define DRV_STRNCMP                      strncmp
+#define DRV_STRNICMP                    _strnicmp
+#define DRV_STRDUP                      _strdup
+#define DRV_WCSDUP                      _wcsdup
+#define DRV_STRCMP                       strcmp
+#define DRV_WCSCMP                       wcscmp
+#define DRV_SNPRINTF                    _snprintf_s
+#define DRV_SNWPRINTF                   _snwprintf_s
+#define DRV_VSNPRINTF                   _vsnprintf_s
+#define DRV_SSCANF                       sscanf_s
+#define DRV_STRCAT                       strcat_s
+#define DRV_MEMCPY                       memcpy_s
+#define DRV_STRTOK                       strtok_s
+#define DRV_STRTOUL                      strtoul
+#define DRV_STRTOULL                     strtoull
+#define DRV_STRTOQ                      _strtoui64
+#define DRV_FOPEN(fp,name,mode)          fopen_s(&(fp),(name),(mode))
+#define DRV_WFOPEN(fp,name,mode)        _wfopen_s(&(fp),(name),(mode))
+#define DRV_FCLOSE(fp)                   if ((fp) != NULL) { fclose((fp)); }
+#define DRV_WCSCPY                       wcscpy_s
+#define DRV_WCSNCPY                      wcsncpy_s
+#define DRV_WCSCAT                       wcscat_s
+#define DRV_WCSTOK                       wcstok_s
+#define DRV_STRERROR                     strerror_s
+#define DRV_VSPRINTF                     vsprintf_s
+#define DRV_VSWPRINTF                    vswprintf_s
+#define DRV_GETENV_S                     getenv_s
+#define DRV_WGETENV_S                    wgetenv_s
+#define DRV_PUTENV(name)                _putenv(name)
+#define DRV_USTRCMP(X, Y)                DRV_WCSCMP(X, Y)
+#define DRV_USTRDUP(X)                   DRV_WCSDUP(X)
+#if defined(DRV_EM64T)
+#define DRV_GETENV(buf,buf_size,name)   {(buf)  = getenv((name)); \
+                                         (buf_size) = ((buf) == NULL) ? 0 : DRV_STRLEN((buf));}
+#define DRV_WGETENV(buf,buf_size,name)  {(buf)  = _wgetenv((name)); \
+                                         (buf_size) = ((buf) == NULL) ? 0 : DRV_WCSLEN((buf));}
+#else
+#define DRV_GETENV(buf,buf_size,name)   _dupenv_s(&(buf),&(buf_size),(name))
+#define DRV_WGETENV(buf,buf_size,name)  _wdupenv_s(&(buf),&(buf_size),(name))
+#endif
+#endif
+
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+/*
+   Note: Many of the following macros have a "size" as the second argument.  Generally
+         speaking, this is for compatibility with the _s versions available on Windows.
+         On Linux/Solaris/Mac, it is ignored.  On Windows, it is the size of the destination
+         buffer and is used wrt memory checking features available in the C runtime in debug
+         mode.  Do not confuse it with the number of bytes to be copied, or such.
+
+         On Windows, this size should correspond to the number of allocated characters
+         (char or wchar_t) pointed to by the first argument.  See MSDN for more details.
+*/
+#define DRV_STRICMP                              strcasecmp
+#define DRV_STRDUP                               strdup
+#define DRV_STRCMP                               strcmp
+#define DRV_STRNCMP                              strncmp
+#define DRV_SNPRINTF(buf,buf_size,length,args...)   snprintf((buf),(length),##args)
+#define DRV_SNWPRINTF(buf,buf_size,length,args...)  snwprintf((buf),(length),##args)
+#define DRV_VSNPRINTF(buf,buf_size,length,args...)  vsnprintf((buf),(length),##args)
+#define DRV_SSCANF                               sscanf
+#define DRV_STRCPY(dst,dst_size,src)             strcpy((dst),(src))
+#define DRV_STRNCPY(dst,dst_size,src,n)          strncpy((dst),(src),(n))
+#define DRV_STRCAT(dst,dst_size,src)             strcat((dst),(src))
+#define DRV_MEMCPY(dst,dst_size,src,n)           memcpy((dst),(src), (n))
+#define DRV_STRTOK(tok,delim,context)            strtok((tok),(delim))
+#define DRV_STRTOUL                              strtoul
+#define DRV_STRTOULL                             strtoull
+#define DRV_STRTOL                               strtol
+#define DRV_FOPEN(fp,name,mode)                  (fp) = fopen((name),(mode))
+#define DRV_FCLOSE(fp)                           if ((fp) != NULL) { fclose((fp)); }
+#define DRV_WCSCPY(dst,dst_size,src)             wcscpy((dst),(src))
+#define DRV_WCSNCPY(dst,dst_size,src,count)      wcsncpy((dst),(src),(count))
+#define DRV_WCSCAT(dst,dst_size,src)             wcscat((dst),(src))
+#define DRV_WCSTOK(tok,delim,context)            wcstok((tok),(delim))
+#define DRV_STRERROR                             strerror
+#define DRV_VSPRINTF(dst,dst_size,length,args...)    vsprintf((dst),(length),##args)
+#define DRV_VSWPRINTF(dst,dst_size,length,args...)   vswprintf((dst),(length),##args)
+#define DRV_GETENV_S(dst,dst_size)               getenv(dst)
+#define DRV_WGETENV_S(dst,dst_size)              wgetenv(dst)
+#define DRV_PUTENV(name)                         putenv(name)
+#define DRV_GETENV(buf,buf_size,name)            ((buf)=getenv((name)))
+#define DRV_USTRCMP(X, Y)                        DRV_STRCMP(X, Y)
+#define DRV_USTRDUP(X)                           DRV_STRDUP(X)
+#endif
+
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD)
+#define DRV_STRTOQ                               strtoq
+#endif
+
+#if defined(DRV_OS_ANDROID)
+#define DRV_STRTOQ                               strtol
+#endif
+
+#if defined(DRV_OS_SOLARIS)
+#define DRV_STRTOQ                               strtoll
+#endif
+
+#if defined(DRV_OS_LINUX)
+#define DRV_WCSDUP                               wcsdup
+#endif
+
+#if defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define DRV_WCSDUP                               solaris_wcsdup
+#endif
+
+/*
+ * Windows uses wchar_t and linux uses char for strings.
+ * Need an extra level of abstraction to standardize it.
+ */
+#if defined(DRV_OS_WINDOWS)
+#define DRV_STDUP                               DRV_WCSDUP
+#define DRV_PRINT_STRING(stream, ...)           fwprintf((stream), L"%s", __VA_ARGS__)
+#else
+#define DRV_STDUP                               DRV_STRDUP
+#define DRV_PRINT_STRING(stream, ...)           fprintf((stream), "%s", __VA_ARGS__)
+#endif
+
+/*
+ * OS return types
+ */
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_SOLARIS)
+#define OS_STATUS           int
+#define OS_SUCCESS          0
+#define OS_ILLEGAL_IOCTL    -ENOTTY
+#define OS_NO_MEM           -ENOMEM
+#define OS_FAULT            -EFAULT
+#define OS_INVALID          -EINVAL
+#define OS_NO_SYSCALL       -ENOSYS
+#define OS_RESTART_SYSCALL  -ERESTARTSYS
+#define OS_IN_PROGRESS      -EALREADY
+#endif
+
+/****************************************************************************
+ **  Driver State defintions
+ ***************************************************************************/
+#define  DRV_STATE_UNINITIALIZED       0
+#define  DRV_STATE_RESERVED            1
+#define  DRV_STATE_IDLE                2
+#define  DRV_STATE_PAUSED              3
+#define  DRV_STATE_STOPPED             4
+#define  DRV_STATE_RUNNING             5
+#define  DRV_STATE_PAUSING             6
+#define  DRV_STATE_PREPARE_STOP        7
+
+/*
+ *  Stop codes
+ */
+#define DRV_STOP_BASE      0
+#define DRV_STOP_NORMAL    1
+#define DRV_STOP_ASYNC     2
+#define DRV_STOP_CANCEL    3
+
+#define SEP_FREE(loc)   if ((loc)) { free(loc); loc = NULL; }
+
+#define MAX_EVENTS 128		/* Limiting maximum multiplexing events although up to 256 events can be supported.*/
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define UNREFERENCED_PARAMETER(p)       ((p) = (p))
+#endif
+
+/*
+ * Global marker names
+ */
+#define START_MARKER_NAME       "SEP_START_MARKER"
+#define PAUSE_MARKER_NAME       "SEP_PAUSE_MARKER"
+#define RESUME_MARKER_NAME      "SEP_RESUME_MARKER"
+
+#define DRV_SOC_STRING_LEN      100 + MAX_MARKER_LENGTH
+
+#if defined(DRV_OS_ANDROID)
+#define TEMP_PATH               "/data"
+#else
+#define TEMP_PATH               "/tmp"
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_ecb.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_ecb.h
new file mode 100644
index 0000000..8670dd9
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_ecb.h
@@ -0,0 +1,768 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _LWPMUDRV_ECB_H_
+#define _LWPMUDRV_ECB_H_
+
+#if defined(DRV_OS_WINDOWS)
+#pragma warning (disable:4200)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* control register types*/
+#define CCCR                1	/* counter configuration control register*/
+#define ESCR                2	/* event selection control register*/
+#define DATA                4	/* collected as snapshot of current value*/
+#define DATA_RO_DELTA       8	/* read-only counter collected as current-previous*/
+#define DATA_RO_SS          16	/* read-only counter collected as snapshot of current value*/
+
+/* event multiplexing modes*/
+#define EM_DISABLED                -1
+#define EM_TIMER_BASED              0
+#define EM_EVENT_BASED_PROFILING    1
+#define EM_TRIGGER_BASED            2
+
+/* ****************************************************************************/
+
+/*!\struct EVENT_DESC_NODE
+ * \var    sample_size                   - size of buffer in bytes to hold the sample + extras
+ * \var    max_gp_events                 - max number of General Purpose events per EM group
+ * \var    pebs_offset                   - offset in the sample to locate the pebs capture information
+ * \var    lbr_offset                    - offset in the sample to locate the lbr information
+ * \var    lbr_num_regs                  - offset in the sample to locate the number of lbr register information
+ * \var    latency_offset_in_sample      - offset in the sample to locate the latency information
+ * \var    latency_size_in_sample        - size of latency records in the sample
+ * \var    latency_size_from_pebs_record - size of the latency data from pebs record in the sample
+ * \var    latency_offset_in_pebs_record - offset in the sample to locate the latency information
+ *                                         in pebs record
+ * \var    power_offset_in_sample        - offset in the sample to locate the power information
+ * \var    ebc_offset                    - offset in the sample to locate the ebc count information
+ * \var    uncore_ebc_offset             - offset in the sample to locate the uncore ebc count information
+ *
+ * \var    ro_offset                     - offset of RO data in the sample
+ * \var    ro_count                      - total number of RO entries (including all of IEAR/DEAR/BTB/IPEAR)
+ * \var    iear_offset                   - offset into RO data at which IEAR entries begin
+ * \var    dear_offset                   - offset into RO data at which DEAR entries begin
+ * \var    btb_offset                    - offset into RO data at which BTB entries begin (these use the same PMDs)
+ * \var    ipear_offset                  - offset into RO data at which IPEAR entries begin (these use the same PMDs)
+ * \var    iear_count                    - number of IEAR entries
+ * \var    dear_count                    - number of DEAR entries
+ * \var    btb_count                     - number of BTB entries
+ * \var    ipear_count                   - number of IPEAR entries
+ *
+ * \var    gfx_offset                    - offset in the sample to locate the gfx count information
+ * \var    pwr_offset                    - offset in the sample to locate the pwr count information
+ *
+ * \brief  Data structure to describe the events and the mode
+ *
+ */
+
+	typedef struct EVENT_DESC_NODE_S EVENT_DESC_NODE;
+	typedef EVENT_DESC_NODE *EVENT_DESC;
+
+	struct EVENT_DESC_NODE_S {
+		U32 sample_size;
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+		U32 pebs_offset;
+		U32 pebs_size;
+		U32 lbr_offset;
+		U32 lbr_num_regs;
+		U32 latency_offset_in_sample;
+		U32 latency_size_in_sample;
+		U32 latency_size_from_pebs_record;
+		U32 latency_offset_in_pebs_record;
+		U32 power_offset_in_sample;
+		U32 ebc_offset;
+		U32 uncore_ebc_offset;
+		U32 eventing_ip_offset;
+		U32 hle_offset;
+		U32 gfx_offset;
+		U32 pwr_offset;
+		U32 callstack_offset;
+		U32 callstack_size;
+#endif
+#if defined(DRV_IA64)
+		U32 ro_offset;
+		U32 ro_count;
+		U32 iear_offset;
+		U32 dear_offset;
+		U32 btb_offset;
+		U32 ipear_offset;
+		U32 iear_count;
+		U32 dear_count;
+		U32 btb_count;
+		U32 ipear_count;
+#endif
+	};
+
+/**/
+/* Accessor macros for EVENT_DESC node*/
+/**/
+#define EVENT_DESC_sample_size(ec)                        (ec)->sample_size
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+#define EVENT_DESC_pebs_offset(ec)                        (ec)->pebs_offset
+#define EVENT_DESC_pebs_size(ec)                          (ec)->pebs_size
+#define EVENT_DESC_lbr_offset(ec)                         (ec)->lbr_offset
+#define EVENT_DESC_lbr_num_regs(ec)                       (ec)->lbr_num_regs
+#define EVENT_DESC_latency_offset_in_sample(ec)           (ec)->latency_offset_in_sample
+#define EVENT_DESC_latency_size_from_pebs_record(ec)      (ec)->latency_size_from_pebs_record
+#define EVENT_DESC_latency_offset_in_pebs_record(ec)      (ec)->latency_offset_in_pebs_record
+#define EVENT_DESC_latency_size_in_sample(ec)             (ec)->latency_size_in_sample
+#define EVENT_DESC_power_offset_in_sample(ec)             (ec)->power_offset_in_sample
+#define EVENT_DESC_ebc_offset(ec)                         (ec)->ebc_offset
+#define EVENT_DESC_uncore_ebc_offset(ec)                  (ec)->uncore_ebc_offset
+#define EVENT_DESC_eventing_ip_offset(ec)                 (ec)->eventing_ip_offset
+#define EVENT_DESC_hle_offset(ec)                         (ec)->hle_offset
+#define EVENT_DESC_gfx_offset(ec)                         (ec)->gfx_offset
+#define EVENT_DESC_pwr_offset(ec)                         (ec)->pwr_offset
+#define EVENT_DESC_callstack_offset(ec)                   (ec)->callstack_offset
+#define EVENT_DESC_callstack_size(ec)                     (ec)->callstack_size
+#endif
+#if defined(DRV_IA64)
+#define EVENT_DESC_ro_offset(ec)                          (ec)->ro_offset
+#define EVENT_DESC_ro_count(ec)                           (ec)->ro_count
+#define EVENT_DESC_iear_offset(ec)                        (ec)->iear_offset
+#define EVENT_DESC_dear_offset(ec)                        (ec)->dear_offset
+#define EVENT_DESC_btb_offset(ec)                         (ec)->btb_offset
+#define EVENT_DESC_ipear_offset(ec)                       (ec)->ipear_offset
+#define EVENT_DESC_iear_count(ec)                         (ec)->iear_count
+#define EVENT_DESC_dear_count(ec)                         (ec)->dear_count
+#define EVENT_DESC_btb_count(ec)                          (ec)->btb_count
+#define EVENT_DESC_ipear_count(ec)                        (ec)->ipear_count
+#endif
+
+/* ****************************************************************************/
+
+/*!\struct EVENT_CONFIG_NODE
+ * \var    num_groups      -  The number of groups being programmed
+ * \var    em_mode         -  Is EM valid?  If so how?
+ * \var    em_time_slice   -  EM valid?  time slice in milliseconds
+ * \var    sample_size     -  size of buffer in bytes to hold the sample + extras
+ * \var    max_gp_events   -  Max number of General Purpose events per EM group
+ * \var    pebs_offset     -  offset in the sample to locate the pebs capture information
+ * \var    lbr_offset      -  offset in the sample to locate the lbr information
+ * \var    lbr_num_regs    -  offset in the sample to locate the lbr information
+ * \var    latency_offset_in_sample      -  offset in the sample to locate the latency information
+ * \var    latency_size_in_sample        -  size of latency records in the sample
+ * \var    latency_size_from_pebs_record -  offset in the sample to locate the latency
+ *                                          size from pebs record
+ * \var    latency_offset_in_pebs_record -  offset in the sample to locate the latency information
+ *                                          in pebs record
+ * \var    power_offset_in_sample        -  offset in the sample to locate the power information
+ * \var    ebc_offset                    -  offset in the sample to locate the ebc count information
+ *
+ * \var    gfx_offset                    -  offset in the sample to locate the gfx count information
+ * \var    pwr_offset                    -  offset in the sample to locate the pwr count information
+ *
+ * \brief  Data structure to describe the events and the mode
+ *
+ */
+
+	typedef struct EVENT_CONFIG_NODE_S EVENT_CONFIG_NODE;
+	typedef EVENT_CONFIG_NODE *EVENT_CONFIG;
+
+	struct EVENT_CONFIG_NODE_S {
+		U32 num_groups;
+		S32 em_mode;
+		S32 em_factor;
+		S32 em_event_num;
+		U32 sample_size;
+		U32 max_gp_events;
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+		U32 max_fixed_counters;
+		U32 max_ro_counters;	/* maximum read-only counters*/
+		U32 pebs_offset;
+		U32 pebs_size;
+		U32 lbr_offset;
+		U32 lbr_num_regs;
+		U32 latency_offset_in_sample;
+		U32 latency_size_in_sample;
+		U32 latency_size_from_pebs_record;
+		U32 latency_offset_in_pebs_record;
+		U32 power_offset_in_sample;
+		U32 ebc_offset;
+		U32 num_groups_unc;
+		U32 ebc_offset_unc;
+		U32 sample_size_unc;
+		U32 eventing_ip_offset;
+		U32 hle_offset;
+		U32 gfx_offset;
+		U32 pwr_offset;
+		U32 callstack_offset;
+		U32 callstack_size;
+#endif
+#if defined(DRV_IA64)
+		U32 ro_offset;
+		U32 ro_count;
+		U32 iear_offset;
+		U32 dear_offset;
+		U32 btb_offset;
+		U32 ipear_offset;
+		U32 iear_count;
+		U32 dear_count;
+		U32 btb_count;
+		U32 ipear_count;
+#endif
+	};
+
+/**/
+/* Accessor macros for EVENT_CONFIG node*/
+/**/
+#define EVENT_CONFIG_num_groups(ec)                         (ec)->num_groups
+#define EVENT_CONFIG_mode(ec)                               (ec)->em_mode
+#define EVENT_CONFIG_em_factor(ec)                          (ec)->em_factor
+#define EVENT_CONFIG_em_event_num(ec)                       (ec)->em_event_num
+#define EVENT_CONFIG_sample_size(ec)                        (ec)->sample_size
+#define EVENT_CONFIG_max_gp_events(ec)                      (ec)->max_gp_events
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+#define EVENT_CONFIG_max_fixed_counters(ec)                 (ec)->max_fixed_counters
+#define EVENT_CONFIG_max_ro_counters(ec)                    (ec)->max_ro_counters
+#define EVENT_CONFIG_pebs_offset(ec)                        (ec)->pebs_offset
+#define EVENT_CONFIG_pebs_size(ec)                          (ec)->pebs_size
+#define EVENT_CONFIG_lbr_offset(ec)                         (ec)->lbr_offset
+#define EVENT_CONFIG_lbr_num_regs(ec)                       (ec)->lbr_num_regs
+#define EVENT_CONFIG_latency_offset_in_sample(ec)           (ec)->latency_offset_in_sample
+#define EVENT_CONFIG_latency_size_from_pebs_record(ec)      (ec)->latency_size_from_pebs_record
+#define EVENT_CONFIG_latency_offset_in_pebs_record(ec)      (ec)->latency_offset_in_pebs_record
+#define EVENT_CONFIG_latency_size_in_sample(ec)             (ec)->latency_size_in_sample
+#define EVENT_CONFIG_power_offset_in_sample(ec)             (ec)->power_offset_in_sample
+#define EVENT_CONFIG_ebc_offset(ec)                         (ec)->ebc_offset
+#define EVENT_CONFIG_num_groups_unc(ec)                     (ec)->num_groups_unc
+#define EVENT_CONFIG_ebc_offset_unc(ec)                     (ec)->ebc_offset_unc
+#define EVENT_CONFIG_sample_size_unc(ec)                    (ec)->sample_size_unc
+#define EVENT_CONFIG_eventing_ip_offset(ec)                 (ec)->eventing_ip_offset
+#define EVENT_CONFIG_hle_offset(ec)                         (ec)->hle_offset
+#define EVENT_CONFIG_gfx_offset(ec)                         (ec)->gfx_offset
+#define EVENT_CONFIG_pwr_offset(ec)                         (ec)->pwr_offset
+#define EVENT_CONFIG_callstack_offset(ec)                   (ec)->callstack_offset
+#define EVENT_CONFIG_callstack_size(ec)                     (ec)->callstack_size
+#endif
+#if defined(DRV_IA64)
+#define EVENT_CONFIG_ro_offset(ec)                          (ec)->ro_offset
+#define EVENT_CONFIG_ro_count(ec)                           (ec)->ro_count
+#define EVENT_CONFIG_iear_offset(ec)                        (ec)->iear_offset
+#define EVENT_CONFIG_dear_offset(ec)                        (ec)->dear_offset
+#define EVENT_CONFIG_btb_offset(ec)                         (ec)->btb_offset
+#define EVENT_CONFIG_ipear_offset(ec)                       (ec)->ipear_offset
+#define EVENT_CONFIG_iear_count(ec)                         (ec)->iear_count
+#define EVENT_CONFIG_dear_count(ec)                         (ec)->dear_count
+#define EVENT_CONFIG_btb_count(ec)                          (ec)->btb_count
+#define EVENT_CONFIG_ipear_count(ec)                        (ec)->ipear_count
+#endif
+
+	typedef enum {
+		UNC_MUX = 1,
+		UNC_COUNTER
+	} UNC_SA_PROG_TYPE;
+
+	typedef enum {
+		UNC_PCICFG = 1,
+		UNC_MMIO,
+		UNC_STOP,
+		UNC_MEMORY,
+		UNC_STATUS
+	} UNC_SA_CONFIG_TYPE;
+
+	typedef enum {
+		UNC_MCHBAR = 1,
+		UNC_DMIBAR,
+		UNC_PCIEXBAR,
+		UNC_GTTMMADR,
+		UNC_GDXCBAR,
+		UNC_CHAPADR,
+		UNC_SOCPCI
+	} UNC_SA_BAR_TYPE;
+
+	typedef enum {
+		UNC_OP_READ = 1,
+		UNC_OP_WRITE,
+		UNC_OP_RMW
+	} UNC_SA_OPERATION;
+
+	typedef enum {
+		STATIC_COUNTER = 1,
+		FREERUN_COUNTER
+	} COUNTER_TYPES;
+
+	typedef enum {
+		PACKAGE_EVENT = 1,
+		MODULE_EVENT,
+		THREAD_EVENT
+	} EVENT_SCOPE_TYPES;
+
+/* ****************************************************************************/
+
+/*!\struct PCI_ID_NODE
+ * \var    offset      -  PCI offset to start the read/write
+ * \var    data size      Number of bytes to operate on
+ */
+
+	typedef struct PCI_ID_NODE_S PCI_ID_NODE;
+	typedef PCI_ID_NODE *PCI_ID;
+
+	struct PCI_ID_NODE_S {
+		U32 offset;
+		U32 data_size;
+	};
+#define PCI_ID_offset(x)      (x)->offset
+#define PCI_ID_data_size(x)   (x)->data_size
+
+/* ****************************************************************************/
+
+/*!\struct EVENT_REG_ID_NODE
+ * \var    reg_id      -  MSR index to r/w
+ * \var    pci_id     PCI based register and its details to operate on
+ */
+	typedef union EVENT_REG_ID_NODE_S EVENT_REG_ID_NODE;
+	typedef EVENT_REG_ID_NODE *EVENT_REG_ID;
+
+	union EVENT_REG_ID_NODE_S {
+		U16 reg_id;
+		PCI_ID_NODE pci_id;
+	};
+
+/* ****************************************************************************/
+
+/*!\struct EVENT_REG_NODE
+ * \var    reg_type             - register type
+ * \var    event_id_index       - event ID index
+ * \var    event_id_index_local - event ID index within the device
+ * \var    event_reg_id         - register ID/pci register details
+ * \var    desc_id              - desc ID
+ * \var    flags                - flags
+ * \var    reg_value            - register value
+ * \var    max_bits             - max bits
+ * \var    scheduled            - boolean to specify if this event node has been scheduled already
+ * \var    bus_no               - PCI bus number
+ * \var    dev_no               - PCI device number
+ * \var    func_no              - PCI function number
+ * \var    counter_type         - Event counter type - static/freerun
+ * \var    event_scope          - Event scope - package/module/thread
+ * \var
+ * \brief  Data structure to describe the event registers
+ *
+ */
+
+	typedef struct EVENT_REG_NODE_S EVENT_REG_NODE;
+	typedef EVENT_REG_NODE *EVENT_REG;
+
+	struct EVENT_REG_NODE_S {
+		U8 reg_type;
+		U8 event_id_index;	/* U8 must be changed if MAX_EVENTS > 256*/
+		U8 event_id_index_local;	/* U8 must be changed if MAX_EVENTS > 256*/
+		U8 emon_event_id_index_local;
+		U8 group_index;
+		U8 reserved0;
+		U16 reserved1;
+		EVENT_REG_ID_NODE event_reg_id;
+		U16 desc_id;
+		U16 flags;
+		U32 reserved2;
+		U64 reg_value;
+		U64 max_bits;
+		U8 scheduled;
+		U8 reserved3;
+		U16 reserved4;
+		/* PCI config-specific fields*/
+		U32 bus_no;
+		U32 dev_no;
+		U32 func_no;
+		U32 counter_type;
+		U32 event_scope;
+	};
+
+/**/
+/* Accessor macros for EVENT_REG node*/
+/* Note: the flags field is not directly addressible to prevent hackery*/
+/**/
+#define EVENT_REG_reg_type(x,i)                    (x)[(i)].reg_type
+#define EVENT_REG_event_id_index(x,i)              (x)[(i)].event_id_index
+#define EVENT_REG_event_id_index_local(x,i)        (x)[(i)].event_id_index_local
+#define EVENT_REG_emon_event_id_index_local(x,i)   (x)[(i)].emon_event_id_index_local
+#define EVENT_REG_group_index(x,i)                 (x)[(i)].group_index
+#define EVENT_REG_reg_id(x,i)                      (x)[(i)].event_reg_id.reg_id
+#define EVENT_REG_pci_id(x,i)                      (x)[(i)].event_reg_id.pci_id
+#define EVENT_REG_pci_id_offset(x,i)               (x)[(i)].event_reg_id.pci_id.offset
+#define EVENT_REG_pci_id_size(x,i)                 (x)[(i)].event_reg_id.pci_id.data_size
+#define EVENT_REG_desc_id(x,i)                     (x)[(i)].desc_id
+#define EVENT_REG_reg_value(x,i)                   (x)[(i)].reg_value
+#define EVENT_REG_max_bits(x,i)                    (x)[(i)].max_bits
+#define EVENT_REG_scheduled(x,i)                   (x)[(i)].scheduled
+/* PCI config-specific fields*/
+#define EVENT_REG_bus_no(x,i)                      (x)[(i)].bus_no
+#define EVENT_REG_dev_no(x,i)                      (x)[(i)].dev_no
+#define EVENT_REG_func_no(x,i)                     (x)[(i)].func_no
+
+#define EVENT_REG_counter_type(x,i)                (x)[(i)].counter_type
+#define EVENT_REG_event_scope(x,i)                 (x)[(i)].event_scope
+
+/**/
+/* Config bits*/
+/**/
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+
+#define EVENT_REG_precise_bit               0x00000001
+#define EVENT_REG_tag_bit                   0x00000002
+#define EVENT_REG_uncore_bit                0x00000004
+#define EVENT_REG_uncore_q_rst_bit          0x00000008
+#define EVENT_REG_latency_bit               0x00000010
+#define EVENT_REG_is_gp_reg_bit             0x00000020
+#define EVENT_REG_clean_up_bit              0x00000040
+#define EVENT_REG_em_trigger_bit            0x00000080
+#define EVENT_REG_lbr_value_bit             0x00000100
+#define EVENT_REG_fixed_reg_bit             0x00000200
+#define EVENT_REG_compound_ctr_sub_bit      0x00000400
+#define EVENT_REG_compound_ctr_bit          0x00000800
+#define EVENT_REG_multi_pkg_evt_bit         0x00001000
+
+#else				/* DRV_IA64 */
+
+#define EVENT_REG_clean_up_bit              0x00000001
+#define EVENT_REG_em_trigger_bit            0x00000002
+#define EVENT_REG_dear_value_bit            0x00000004
+#define EVENT_REG_iear_value_bit            0x00000008
+#define EVENT_REG_btb_value_bit             0x00000010
+#define EVENT_REG_is_generic_reg_bit        0x00000020
+#define EVENT_REG_ipear_value_bit           0x00000040
+
+#endif				/* DRV_IA32 || DRV_EM64T */
+
+/**/
+/* Accessor macros for config bits*/
+/**/
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+
+#define EVENT_REG_precise_get(x,i)          ((x)[(i)].flags &   EVENT_REG_precise_bit)
+#define EVENT_REG_precise_set(x,i)          ((x)[(i)].flags |=  EVENT_REG_precise_bit)
+#define EVENT_REG_precise_clear(x,i)        ((x)[(i)].flags &= ~EVENT_REG_precise_bit)
+
+#define EVENT_REG_tag_get(x,i)              ((x)[(i)].flags &   EVENT_REG_tag_bit)
+#define EVENT_REG_tag_set(x,i)              ((x)[(i)].flags |=  EVENT_REG_tag_bit)
+#define EVENT_REG_tag_clear(x,i)            ((x)[(i)].flags &= ~EVENT_REG_tag_bit)
+
+#define EVENT_REG_uncore_get(x,i)           ((x)[(i)].flags &   EVENT_REG_uncore_bit)
+#define EVENT_REG_uncore_set(x,i)           ((x)[(i)].flags |=  EVENT_REG_uncore_bit)
+#define EVENT_REG_uncore_clear(x,i)         ((x)[(i)].flags &= ~EVENT_REG_uncore_bit)
+
+#define EVENT_REG_uncore_q_rst_get(x,i)     ((x)[(i)].flags &   EVENT_REG_uncore_q_rst_bit)
+#define EVENT_REG_uncore_q_rst_set(x,i)     ((x)[(i)].flags |=  EVENT_REG_uncore_q_rst_bit)
+#define EVENT_REG_uncore_q_rst_clear(x,i)   ((x)[(i)].flags &= ~EVENT_REG_uncore_q_rst_bit)
+
+#define EVENT_REG_latency_get(x,i)          ((x)[(i)].flags &   EVENT_REG_latency_bit)
+#define EVENT_REG_latency_set(x,i)          ((x)[(i)].flags |=  EVENT_REG_latency_bit)
+#define EVENT_REG_latency_clear(x,i)        ((x)[(i)].flags &= ~EVENT_REG_latency_bit)
+
+#define EVENT_REG_is_gp_reg_get(x,i)        ((x)[(i)].flags &   EVENT_REG_is_gp_reg_bit)
+#define EVENT_REG_is_gp_reg_set(x,i)        ((x)[(i)].flags |=  EVENT_REG_is_gp_reg_bit)
+#define EVENT_REG_is_gp_reg_clear(x,i)      ((x)[(i)].flags &= ~EVENT_REG_is_gp_reg_bit)
+
+#define EVENT_REG_lbr_value_get(x,i)        ((x)[(i)].flags &   EVENT_REG_lbr_value_bit)
+#define EVENT_REG_lbr_value_set(x,i)        ((x)[(i)].flags |=  EVENT_REG_lbr_value_bit)
+#define EVENT_REG_lbr_value_clear(x,i)      ((x)[(i)].flags &= ~EVENT_REG_lbr_value_bit)
+
+#define EVENT_REG_fixed_reg_get(x,i)        ((x)[(i)].flags &   EVENT_REG_fixed_reg_bit)
+#define EVENT_REG_fixed_reg_set(x,i)        ((x)[(i)].flags |=  EVENT_REG_fixed_reg_bit)
+#define EVENT_REG_fixed_reg_clear(x,i)      ((x)[(i)].flags &= ~EVENT_REG_fixed_reg_bit)
+
+#define EVENT_REG_compound_ctr_bit_get(x,i)   ((x)[(i)].flags &   EVENT_REG_compound_ctr_bit)
+#define EVENT_REG_compound_ctr_bit_set(x,i)   ((x)[(i)].flags |=  EVENT_REG_compound_ctr_bit)
+#define EVENT_REG_compound_ctr_bit_clear(x,i) ((x)[(i)].flags &= ~EVENT_REG_compound_ctr_bit)
+
+#define EVENT_REG_compound_ctr_sub_bit_get(x,i)   ((x)[(i)].flags &   EVENT_REG_compound_ctr_sub_bit)
+#define EVENT_REG_compound_ctr_sub_bit_set(x,i)   ((x)[(i)].flags |=  EVENT_REG_compound_ctr_sub_bit)
+#define EVENT_REG_compound_ctr_sub_bit_clear(x,i) ((x)[(i)].flags &= ~EVENT_REG_compound_ctr_sub_bit)
+
+#define EVENT_REG_multi_pkg_evt_bit_get(x,i)   ((x)[(i)].flags &   EVENT_REG_multi_pkg_evt_bit)
+#define EVENT_REG_multi_pkg_evt_bit_set(x,i)   ((x)[(i)].flags |=  EVENT_REG_multi_pkg_evt_bit)
+#define EVENT_REG_multi_pkg_evt_bit_clear(x,i) ((x)[(i)].flags &= ~EVENT_REG_multi_pkg_evt_bit)
+
+#else				/* DRV_IA64 */
+
+#define EVENT_REG_dear_value_get(x,i)       ((x)[(i)].flags &   EVENT_REG_dear_value_bit)
+#define EVENT_REG_dear_value_set(x,i)       ((x)[(i)].flags |=  EVENT_REG_dear_value_bit)
+#define EVENT_REG_dear_value_clear(x,i)     ((x)[(i)].flags &= ~EVENT_REG_dear_value_bit)
+
+#define EVENT_REG_iear_value_get(x,i)       ((x)[(i)].flags &   EVENT_REG_iear_value_bit)
+#define EVENT_REG_iear_value_set(x,i)       ((x)[(i)].flags |=  EVENT_REG_iear_value_bit)
+#define EVENT_REG_iear_value_clear(x,i)     ((x)[(i)].flags &= ~EVENT_REG_iear_value_bit)
+
+#define EVENT_REG_btb_value_get(x,i)        ((x)[(i)].flags &   EVENT_REG_btb_value_bit)
+#define EVENT_REG_btb_value_set(x,i)        ((x)[(i)].flags |=  EVENT_REG_btb_value_bit)
+#define EVENT_REG_btb_value_clear(x,i)      ((x)[(i)].flags &= ~EVENT_REG_btb_value_bit)
+
+#define EVENT_REG_ipear_value_get(x,i)      ((x)[(i)].flags &   EVENT_REG_ipear_value_bit)
+#define EVENT_REG_ipear_value_set(x,i)      ((x)[(i)].flags |=  EVENT_REG_ipear_value_bit)
+#define EVENT_REG_ipear_value_clear(x,i)    ((x)[(i)].flags &= ~EVENT_REG_ipear_value_bit)
+
+#define EVENT_REG_is_generic_reg_get(x,i)   ((x)[(i)].flags &   EVENT_REG_is_generic_reg_bit)
+#define EVENT_REG_is_generic_reg_set(x,i)   ((x)[(i)].flags |=  EVENT_REG_is_generic_reg_bit)
+#define EVENT_REG_is_generic_reg_clear(x,i) ((x)[(i)].flags &= ~EVENT_REG_is_generic_reg_bit)
+
+#endif				/* DRV_IA32 || DRV_EM64T */
+
+#define EVENT_REG_clean_up_get(x,i)         ((x)[(i)].flags &   EVENT_REG_clean_up_bit)
+#define EVENT_REG_clean_up_set(x,i)         ((x)[(i)].flags |=  EVENT_REG_clean_up_bit)
+#define EVENT_REG_clean_up_clear(x,i)       ((x)[(i)].flags &= ~EVENT_REG_clean_up_bit)
+
+#define EVENT_REG_em_trigger_get(x,i)       ((x)[(i)].flags &   EVENT_REG_em_trigger_bit)
+#define EVENT_REG_em_trigger_set(x,i)       ((x)[(i)].flags |=  EVENT_REG_em_trigger_bit)
+#define EVENT_REG_em_trigger_clear(x,i)     ((x)[(i)].flags &= ~EVENT_REG_em_trigger_bit)
+
+/* ****************************************************************************/
+
+/*!\struct DRV_PCI_DEVICE_ENTRY_NODE_S
+ * \var    bus_no          -  PCI bus no to read
+ * \var    dev_no          -  PCI device no to read
+ * \var    func_no            PCI device no to read
+ * \var    bar_offset         BASE Address Register offset of the PCI based PMU
+ * \var    bit_offset         Bit offset of the same
+ * \var    size               size of read/write
+ * \var    bar_address        the actual BAR present
+ * \var    enable_offset      Offset info to enable/disable
+ * \var    enabled            Status of enable/disable
+ * \brief  Data structure to describe the PCI Device
+ *
+ */
+
+	typedef struct DRV_PCI_DEVICE_ENTRY_NODE_S DRV_PCI_DEVICE_ENTRY_NODE;
+	typedef DRV_PCI_DEVICE_ENTRY_NODE *DRV_PCI_DEVICE_ENTRY;
+
+	struct DRV_PCI_DEVICE_ENTRY_NODE_S {
+		U32 bus_no;
+		U32 dev_no;
+		U32 func_no;
+		U32 bar_offset;
+		U32 bit_offset;
+		U32 size;
+		U64 bar_address;
+		U32 enable_offset;
+		U32 enabled;
+		U32 base_offset_for_mmio;
+		U32 operation;
+		U32 bar_name;
+		U32 prog_type;
+		U32 config_type;
+		U32 reserved0;
+		U64 value;
+		U64 mask;
+		U64 virtual_address;
+		U32 port_id;
+		U32 op_code;
+	};
+
+/**/
+/* Accessor macros for DRV_PCI_DEVICE_NODE node*/
+/**/
+#define DRV_PCI_DEVICE_ENTRY_bus_no(x)                (x)->bus_no
+#define DRV_PCI_DEVICE_ENTRY_dev_no(x)                (x)->dev_no
+#define DRV_PCI_DEVICE_ENTRY_func_no(x)               (x)->func_no
+#define DRV_PCI_DEVICE_ENTRY_bar_offset(x)            (x)->bar_offset
+#define DRV_PCI_DEVICE_ENTRY_bit_offset(x)            (x)->bit_offset
+#define DRV_PCI_DEVICE_ENTRY_size(x)                  (x)->size
+#define DRV_PCI_DEVICE_ENTRY_bar_address(x)           (x)->bar_address
+#define DRV_PCI_DEVICE_ENTRY_enable_offset(x)         (x)->enable_offset
+#define DRV_PCI_DEVICE_ENTRY_enable(x)                (x)->enabled
+#define DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(x)  (x)->base_offset_for_mmio
+#define DRV_PCI_DEVICE_ENTRY_operation(x)             (x)->operation
+#define DRV_PCI_DEVICE_ENTRY_bar_name(x)              (x)->bar_name
+#define DRV_PCI_DEVICE_ENTRY_prog_type(x)             (x)->prog_type
+#define DRV_PCI_DEVICE_ENTRY_config_type(x)           (x)->config_type
+#define DRV_PCI_DEVICE_ENTRY_value(x)                 (x)->value
+#define DRV_PCI_DEVICE_ENTRY_mask(x)                  (x)->mask
+#define DRV_PCI_DEVICE_ENTRY_virtual_address(x)       (x)->virtual_address
+#define DRV_PCI_DEVICE_ENTRY_port_id(x)               (x)->port_id
+#define DRV_PCI_DEVICE_ENTRY_op_code(x)               (x)->op_code
+
+/* ****************************************************************************/
+
+/*!\struct ECB_NODE_S
+ * \var    num_entries -       Total number of entries in "entries".
+ * \var    group_id    -       Group ID.
+ * \var    num_events  -       Number of events in this group.
+ * \var    cccr_start  -       Starting index of counter configuration control registers in "entries".
+ * \var    cccr_pop    -       Number of counter configuration control registers in "entries".
+ * \var    escr_start  -       Starting index of event selection control registers in "entries".
+ * \var    escr_pop    -       Number of event selection control registers in "entries".
+ * \var    data_start  -       Starting index of data registers in "entries".
+ * \var    data_pop    -       Number of data registers in "entries".
+ * \var    pcidev_entry_node   PCI device details for one device
+ * \var    entries     - .     All the register nodes required for programming
+ *
+ * \brief
+ */
+
+	typedef struct ECB_NODE_S ECB_NODE;
+	typedef ECB_NODE *ECB;
+
+	struct ECB_NODE_S {
+		U32 num_entries;
+		U32 group_id;
+		U32 num_events;
+		U32 cccr_start;
+		U32 cccr_pop;
+		U32 escr_start;
+		U32 escr_pop;
+		U32 data_start;
+		U32 data_pop;
+		U16 flags;
+		U8 pmu_timer_interval;
+		U8 reserved0;	/* added for alignment reasons*/
+		DRV_PCI_DEVICE_ENTRY_NODE pcidev_entry_node;
+		U32 num_pci_devices;
+		U32 pcidev_list_offset;
+		DRV_PCI_DEVICE_ENTRY pcidev_entry_list;
+#if defined(DRV_IA32)
+		U32 reserved1;
+#endif
+		EVENT_REG_NODE entries[];
+	};
+
+/**/
+/* Accessor macros for ECB node*/
+/**/
+#define ECB_num_entries(x)                (x)->num_entries
+#define ECB_group_id(x)                   (x)->group_id
+#define ECB_num_events(x)                 (x)->num_events
+#define ECB_cccr_start(x)                 (x)->cccr_start
+#define ECB_cccr_pop(x)                   (x)->cccr_pop
+#define ECB_escr_start(x)                 (x)->escr_start
+#define ECB_escr_pop(x)                   (x)->escr_pop
+#define ECB_data_start(x)                 (x)->data_start
+#define ECB_data_pop(x)                   (x)->data_pop
+#define ECB_pcidev_entry_node(x)          (x)->pcidev_entry_node
+#define ECB_num_pci_devices(x)            (x)->num_pci_devices
+#define ECB_pcidev_list_offset(x)         (x)->pcidev_list_offset
+#define ECB_pcidev_entry_list(x)          (x)->pcidev_entry_list
+#define ECB_flags(x)                      (x)->flags
+#define ECB_pmu_timer_interval(x)         (x)->pmu_timer_interval
+#define ECB_entries(x)                    (x)->entries
+
+/* for flag bit field*/
+#define ECB_direct2core_bit                0x0001
+#define ECB_bl_bypass_bit                  0x0002
+#define ECB_pci_id_offset_bit              0x0003
+
+#define ECB_CONSTRUCT(x,num_entries,group_id,cccr_start,escr_start,data_start)    \
+                                           ECB_num_entries((x)) = (num_entries);  \
+                                           ECB_group_id((x)) = (group_id);        \
+                                           ECB_cccr_start((x)) = (cccr_start);    \
+                                           ECB_cccr_pop((x)) = 0;                 \
+                                           ECB_escr_start((x)) = (escr_start);    \
+                                           ECB_escr_pop((x)) = 0;                 \
+                                           ECB_data_start((x)) = (data_start);    \
+                                           ECB_data_pop((x)) = 0;                 \
+                                           ECB_num_pci_devices((x)) = 0;
+
+#define ECB_CONSTRUCT1(x,num_entries,group_id,cccr_start,escr_start,data_start,num_pci_devices)    \
+                                           ECB_num_entries((x)) = (num_entries);  \
+                                           ECB_group_id((x)) = (group_id);        \
+                                           ECB_cccr_start((x)) = (cccr_start);    \
+                                           ECB_cccr_pop((x)) = 0;                 \
+                                           ECB_escr_start((x)) = (escr_start);    \
+                                           ECB_escr_pop((x)) = 0;                 \
+                                           ECB_data_start((x)) = (data_start);    \
+                                           ECB_data_pop((x)) = 0;                 \
+                                           ECB_num_pci_devices((x)) = (num_pci_devices);
+
+/**/
+/* Accessor macros for ECB node entries*/
+/**/
+#define ECB_entries_reg_type(x,i)                    EVENT_REG_reg_type((ECB_entries(x)),(i))
+#define ECB_entries_event_id_index(x,i)              EVENT_REG_event_id_index((ECB_entries(x)),(i))
+#define ECB_entries_event_id_index_local(x,i)        EVENT_REG_event_id_index_local((ECB_entries(x)),(i))
+#define ECB_entries_emon_event_id_index_local(x,i)   EVENT_REG_emon_event_id_index_local((ECB_entries(x)),(i))
+#define ECB_entries_reg_id(x,i)                      EVENT_REG_reg_id((ECB_entries(x)),(i))
+#define ECB_entries_pci_id(x,i)                      EVENT_REG_pci_id((ECB_entries(x)),(i))
+#define ECB_entries_pci_id_offset(x,i)               EVENT_REG_pci_id_offset((ECB_entries(x)),(i))
+#define ECB_entries_reg_value(x,i)                   EVENT_REG_reg_value((ECB_entries(x)),(i))
+#define ECB_entries_max_bits(x,i)                    EVENT_REG_max_bits((ECB_entries(x)),(i))
+#define ECB_entries_scheduled(x,i)                   EVENT_REG_scheduled((ECB_entries(x)),(i))
+#define ECB_entries_group_index(x,i)                 EVENT_REG_group_index((ECB_entries(x)),(i))
+/* PCI config-specific fields*/
+#define ECB_entries_bus_no(x,i)                      EVENT_REG_bus_no((ECB_entries(x)),(i))
+#define ECB_entries_dev_no(x,i)                      EVENT_REG_dev_no((ECB_entries(x)),(i))
+#define ECB_entries_func_no(x,i)                     EVENT_REG_func_no((ECB_entries(x)),(i))
+#define ECB_entries_counter_type(x,i)                EVENT_REG_counter_type((ECB_entries(x)),(i))
+#define ECB_entries_event_scope(x,i)                 EVENT_REG_event_scope((ECB_entries(x)),(i))
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+#define ECB_entries_precise_get(x,i)                    EVENT_REG_precise_get((ECB_entries(x)),(i))
+#define ECB_entries_tag_get(x,i)                        EVENT_REG_tag_get((ECB_entries(x)),(i))
+#define ECB_entries_uncore_get(x,i)                     EVENT_REG_uncore_get((ECB_entries(x)),(i))
+#define ECB_entries_uncore_q_rst_get(x,i)               EVENT_REG_uncore_q_rst_get((ECB_entries(x)),(i))
+#define ECB_entries_is_gp_reg_get(x,i)                  EVENT_REG_is_gp_reg_get((ECB_entries(x)),(i))
+#define ECB_entries_lbr_value_get(x,i)                  EVENT_REG_lbr_value_get((ECB_entries(x)),(i))
+#define ECB_entries_fixed_reg_get(x,i)                  EVENT_REG_fixed_reg_get((ECB_entries(x)),(i))
+#define ECB_entries_is_compound_ctr_bit_set(x,i)        EVENT_REG_compound_ctr_bit_get((ECB_entries(x)),(i))
+#define ECB_entries_is_compound_ctr_sub_bit_set(x,i)    EVENT_REG_compound_ctr_sub_bit_get((ECB_entries(x)),(i))
+#define ECB_entries_is_multi_pkg_bit_set(x,i)           EVENT_REG_multi_pkg_evt_bit_get((ECB_entries(x)),(i))
+#else				/* DRV_IA64 */
+#define ECB_entries_dear_value_get(x,i)       EVENT_REG_dear_value_get((ECB_entries(x)),(i))
+#define ECB_entries_iear_value_get(x,i)       EVENT_REG_iear_value_get((ECB_entries(x)),(i))
+#define ECB_entries_btb_value_get(x,i)        EVENT_REG_btb_value_get((ECB_entries(x)),(i))
+#define ECB_entries_ipear_value_get(x,i)      EVENT_REG_ipear_value_get((ECB_entries(x)),(i))
+#define ECB_entries_is_generic_reg_get(x,i)   EVENT_REG_is_generic_reg_get((ECB_entries(x)),(i))
+#endif				/* DRV_IA32 || DRV_EM64T */
+#define ECB_entries_clean_up_get(x,i)         EVENT_REG_clean_up_get((ECB_entries(x)),(i))
+#define ECB_entries_em_trigger_get(x,i)       EVENT_REG_em_trigger_get((ECB_entries(x)),(i))
+
+#if defined(__cplusplus)
+}
+#endif
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_ioctl.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_ioctl.h
new file mode 100644
index 0000000..28812ce
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_ioctl.h
@@ -0,0 +1,299 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _LWPMUDRV_IOCTL_H_
+#define _LWPMUDRV_IOCTL_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*SEP Driver Operation defines*/
+/**/
+#define DRV_OPERATION_START                          1
+#define DRV_OPERATION_STOP                           2
+#define DRV_OPERATION_INIT_PMU                       3
+#define DRV_OPERATION_GET_NORMALIZED_TSC             4
+#define DRV_OPERATION_TSC_SKEW_INFO                  5
+#define DRV_OPERATION_PAUSE                          6
+#define DRV_OPERATION_RESUME                         7
+#define DRV_OPERATION_TERMINATE                      8
+#define DRV_OPERATION_RESERVE                        9
+#define DRV_OPERATION_VERSION                        10
+#define DRV_OPERATION_SWITCH_GROUP                   11
+#define DRV_OPERATION_GET_DRIVER_STATE               12
+#define DRV_OPERATION_INIT_UNCORE                    13
+#define DRV_OPERATION_EM_GROUPS_UNCORE               14
+#define DRV_OPERATION_EM_CONFIG_NEXT_UNCORE          15
+#define DRV_OPERATION_READ_UNCORE_DATA               16
+#define DRV_OPERATION_STOP_MEM                       17
+#define DRV_OPERATION_CREATE_MEM                     18
+#define DRV_OPERATION_READ_MEM                       19
+#define DRV_OPERATION_CHECK_STATUS                   20
+#define DRV_OPERATION_TIMER_TRIGGER_READ             21
+
+/* IOCTL_SETUP*/
+/**/
+
+#if defined(DRV_OS_WINDOWS)
+
+/**/
+/* NtDeviceIoControlFile IoControlCode values for this device.*/
+/**/
+/* Warning:  Remember that the low two bits of the code specify how the*/
+/*           buffers are passed to the driver!*/
+/**/
+/* 16 bit device type. 12 bit function codes*/
+#define LWPMUDRV_IOCTL_DEVICE_TYPE  0xA000	/* values 0-32768 reserved for Microsoft*/
+#define LWPMUDRV_IOCTL_FUNCTION     0x0A00	/* values 0-2047  reserved for Microsoft*/
+
+/**/
+/* Basic CTL CODE macro to reduce typographical errors*/
+/* Use for FILE_READ_ACCESS*/
+/**/
+#define LWPMUDRV_CTL_READ_CODE(x)    CTL_CODE(LWPMUDRV_IOCTL_DEVICE_TYPE,  \
+                                              LWPMUDRV_IOCTL_FUNCTION+(x), \
+                                              METHOD_BUFFERED,             \
+                                              FILE_READ_ACCESS)
+
+#define LWPMUDRV_IOCTL_START                        LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_START)
+#define LWPMUDRV_IOCTL_STOP                         LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_STOP)
+#define LWPMUDRV_IOCTL_INIT_PMU                     LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_PMU)
+#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC           LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_NORMALIZED_TSC)
+#define LWPMUDRV_IOCTL_TSC_SKEW_INFO                LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TSC_SKEW_INFO)
+#define LWPMUDRV_IOCTL_PAUSE                        LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_PAUSE)
+#define LWPMUDRV_IOCTL_RESUME                       LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_RESUME)
+#define LWPMUDRV_IOCTL_TERMINATE                    LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TERMINATE)
+#define LWPMUDRV_IOCTL_RESERVE                      LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_RESERVE)
+#define LWPMUDRV_IOCTL_VERSION                      LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_VERSION)
+#define LWPMUDRV_IOCTL_SWITCH_GROUP                 LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_SWITCH_GROUP)
+#define LWPMUDRV_IOCTL_GET_DRIVER_STATE             LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_DRIVER_STATE)
+#define LWPMUDRV_IOCTL_INIT_UNCORE                  LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_UNCORE)
+#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE             LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_EM_GROUPS_UNCORE)
+#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE        LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_EM_CONFIG_NEXT_UNCORE)
+#define LWPMUDRV_IOCTL_READ_UNCORE_DATA             LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_READ_UNCORE_DATA)
+#define LWPMUDRV_IOCTL_STOP_MEM                     LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_STOP_MEM)
+#define LWPMUDRV_IOCTL_CREATE_MEM                   LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_CREATE_MEM)
+#define LWPMUDRV_IOCTL_READ_MEM                     LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_READ_MEM)
+#define LWPMUDRV_IOCTL_CHECK_STATUS                 LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_CHECK_STATUS)
+#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ           LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TIMER_TRIGGER_READ)
+
+#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined (DRV_OS_ANDROID)
+/* IOCTL_ARGS*/
+	typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE;
+	typedef IOCTL_ARGS_NODE *IOCTL_ARGS;
+	struct IOCTL_ARGS_NODE_S {
+		U64 r_len;
+		U64 w_len;
+		char *r_buf;
+		char *w_buf;
+	};
+
+/* COMPAT IOCTL_ARGS*/
+#if defined (CONFIG_COMPAT) && defined(DRV_EM64T)
+	typedef struct IOCTL_COMPAT_ARGS_NODE_S IOCTL_COMPAT_ARGS_NODE;
+	typedef IOCTL_COMPAT_ARGS_NODE *IOCTL_COMPAT_ARGS;
+	struct IOCTL_COMPAT_ARGS_NODE_S {
+		U64 r_len;
+		U64 w_len;
+		compat_uptr_t r_buf;
+		compat_uptr_t w_buf;
+	};
+#endif
+
+#define LWPMU_IOC_MAGIC   99
+
+/* IOCTL_SETUP*/
+/**/
+#define LWPMUDRV_IOCTL_START                  _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_START)
+#define LWPMUDRV_IOCTL_STOP                   _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP)
+#define LWPMUDRV_IOCTL_INIT_PMU               _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_PMU, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC     _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_NORMALIZED_TSC, int)
+#define LWPMUDRV_IOCTL_TSC_SKEW_INFO          _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_TSC_SKEW_INFO, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_PAUSE                  _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_PAUSE)
+#define LWPMUDRV_IOCTL_RESUME                 _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_RESUME)
+#define LWPMUDRV_IOCTL_TERMINATE              _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TERMINATE)
+#define LWPMUDRV_IOCTL_RESERVE                _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_RESERVE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_VERSION                _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_VERSION, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_SWITCH_GROUP           _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_SWITCH_GROUP)
+#define LWPMUDRV_IOCTL_GET_DRIVER_STATE       _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_DRIVER_STATE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_INIT_UNCORE            _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_UNCORE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE       _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_GROUPS_UNCORE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE  _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_CONFIG_NEXT_UNCORE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_READ_UNCORE_DATA       _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_UNCORE_DATA, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_STOP_MEM               _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP_MEM)
+#define LWPMUDRV_IOCTL_CREATE_MEM             _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_CREATE_MEM, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_READ_MEM               _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_MEM, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_CHECK_STATUS           _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_CHECK_STATUS, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ     _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TIMER_TRIGGER_READ)
+
+#elif defined(DRV_OS_FREEBSD)
+
+/* IOCTL_ARGS*/
+	typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE;
+	typedef IOCTL_ARGS_NODE *IOCTL_ARGS;
+	struct IOCTL_ARGS_NODE_S {
+		U64 r_len;
+		char *r_buf;
+		U64 w_len;
+		char *w_buf;
+	};
+
+/* IOCTL_SETUP*/
+/**/
+#define LWPMU_IOC_MAGIC   99
+
+/* FreeBSD is very strict about IOR/IOW/IOWR specifications on IOCTLs.
+ * Since these IOCTLs all pass down the real read/write buffer lengths
+ *  and addresses inside of an IOCTL_ARGS_NODE data structure, we
+ *  need to specify all of these as _IOW so that the kernel will
+ *  view it as userspace passing the data to the driver, rather than
+ *  the reverse.  There are also some cases where Linux is passing
+ *  a smaller type than IOCTL_ARGS_NODE, even though its really
+ *  passing an IOCTL_ARGS_NODE.  These needed to be fixed for FreeBSD.
+ */
+#define LWPMUDRV_IOCTL_START                  _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_START)
+#define LWPMUDRV_IOCTL_STOP                   _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP)
+#define LWPMUDRV_IOCTL_INIT_PMU               _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_PMU)
+#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC     _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_NORMALIZED_TSC, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_TSC_SKEW_INFO          _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_TSC_SKEW_INFO, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_PAUSE                  _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_PAUSE)
+#define LWPMUDRV_IOCTL_RESUME                 _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_RESUME)
+#define LWPMUDRV_IOCTL_TERMINATE              _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TERMINATE)
+#define LWPMUDRV_IOCTL_RESERVE                _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_RESERVE, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_VERSION                _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_VERSION, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_SWITCH_GROUP           _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_SWITCH_GROUP)
+#define LWPMUDRV_IOCTL_GET_DRIVER_STATE       _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_DRIVER_STATE, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_INIT_UNCORE            _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_UNCORE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE       _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_EM_GROUPS_UNCORE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE  _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_EM_CONFIG_NEXT_UNCORE, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_READ_UNCORE_DATA       _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_UNCORE_DATA, IOCTL_ARGS)
+#define LWPMUDRV_IOCTL_STOP_MEM               _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP_MEM)
+#define LWPMUDRV_IOCTL_CREATE_MEM             _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_CREATE_MEM, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_READ_MEM               _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_MEM, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_CHECK_STATUS           _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_CHECK_STATUS, IOCTL_ARGS_NODE)
+#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ     _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TIMER_TRIGGER_READ)
+
+#elif defined(DRV_OS_MAC)
+
+/* IOCTL_ARGS*/
+	typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE;
+	typedef IOCTL_ARGS_NODE *IOCTL_ARGS;
+	struct IOCTL_ARGS_NODE_S {
+		U64 r_len;
+		char *r_buf;
+		U64 w_len;
+		char *w_buf;
+		U32 command;
+	};
+
+	typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE;
+	typedef CPU_ARGS_NODE *CPU_ARGS;
+	struct CPU_ARGS_NODE_S {
+		U64 r_len;
+		char *r_buf;
+		U32 command;
+		U32 CPU_ID;
+		U32 BUCKET_ID;
+	};
+
+/* IOCTL_SETUP*/
+/**/
+#define LWPMU_IOC_MAGIC    99
+#define OS_SUCCESS         0
+#define OS_STATUS          int
+#define OS_ILLEGAL_IOCTL  -ENOTTY
+#define OS_NO_MEM         -ENOMEM
+#define OS_FAULT          -EFAULT
+
+/* Task file Opcodes.*/
+/* keeping the definitions as IOCTL but in MAC OSX*/
+/* these are really OpCodes consumed by Execute command.*/
+#define LWPMUDRV_IOCTL_START                  DRV_OPERATION_START
+#define LWPMUDRV_IOCTL_STOP                   DRV_OPERATION_STOP
+#define LWPMUDRV_IOCTL_INIT_PMU               DRV_OPERATION_INIT_PMU
+#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC     DRV_OPERATION_GET_NORMALIZED_TSC
+#define LWPMUDRV_IOCTL_TSC_SKEW_INFO          DRV_OPERATION_TSC_SKEW_INFO
+#define LWPMUDRV_IOCTL_PAUSE                  DRV_OPERATION_PAUSE
+#define LWPMUDRV_IOCTL_RESUME                 DRV_OPERATION_RESUME
+#define LWPMUDRV_IOCTL_TERMINATE              DRV_OPERATION_TERMINATE
+#define LWPMUDRV_IOCTL_RESERVE                DRV_OPERATION_RESERVE
+#define LWPMUDRV_IOCTL_VERSION                DRV_OPERATION_VERSION
+#define LWPMUDRV_IOCTL_SWITCH_GROUP           DRV_OPERATION_SWITCH_GROUP
+#define LWPMUDRV_IOCTL_GET_DRIVER_STATE       DRV_OPERATION_GET_DRIVER_STATE
+#define LWPMUDRV_IOCTL_INIT_UNCORE            DRV_OPERATION_INIT_UNCORE
+#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE       DRV_OPERATION_EM_GROUPS_UNCORE
+#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE  DRV_OPERATION_EM_CONFIG_NEXT_UNCORE
+#define LWPMUDRV_IOCTL_READ_UNCORE_DATA       DRV_OPERATION_READ_UNCORE_DATA
+#define LWPMUDRV_IOCTL_STOP_MEM               DRV_OPERATION_STOP_MEM
+#define LWPMUDRV_IOCTL_CREATE_MEM             DRV_OPERATION_CREATE_MEM
+#define LWPMUDRV_IOCTL_READ_MEM               DRV_OPERATION_READ_MEM
+#define LWPMUDRV_IOCTL_CHECK_STATUS           DRV_OPERATION_CHECK_STATUS
+#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ     DRV_OPERATION_TIMER_TRIGGER_READ
+
+/* This is only for MAC OSX*/
+#define LWPMUDRV_IOCTL_SET_OSX_VERSION        998
+#define LWPMUDRV_IOCTL_PROVIDE_FUNCTION_PTRS  999
+
+#else
+#error "unknown OS in lwpmudrv_ioctl.h"
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_struct.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_struct.h
new file mode 100644
index 0000000..0b9dcd7
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_struct.h
@@ -0,0 +1,379 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _LWPMUDRV_STRUCT_H_
+#define _LWPMUDRV_STRUCT_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* processor execution modes*/
+#define MODE_UNKNOWN    99
+/* the following defines must start at 0*/
+#define MODE_64BIT      3
+#define MODE_32BIT      2
+#define MODE_16BIT      1
+#define MODE_V86        0
+
+/* sampling methods*/
+#define SM_RTC          2020	/* real time clock*/
+#define SM_VTD          2021	/* OS Virtual Timer Device*/
+#define SM_NMI          2022	/* non-maskable interrupt time based*/
+#define SM_EBS          2023	/* event based*/
+
+/* sampling mechanism bitmap definitions*/
+#define INTERRUPT_RTC   0x1
+#define INTERRUPT_VTD   0x2
+#define INTERRUPT_NMI   0x4
+#define INTERRUPT_EBS   0x8
+
+/* eflags defines*/
+#define EFLAGS_VM       0x00020000	/* V86 mode*/
+#define EFLAGS_IOPL0    0
+#define EFLAGS_IOPL1    0x00001000
+#define EFLAGS_IOPL2    0x00002000
+#define EFLAGS_IOPL3    0x00003000
+#define MAX_DEVICES     30
+#define MAX_EMON_GROUPS 1000
+
+	extern float freq_multiplier;
+
+/* Enumeration for invoking dispatch on multiple cpus or not*/
+	typedef enum {
+		DRV_MULTIPLE_INSTANCE = 0,
+		DRV_SINGLE_INSTANCE
+	} DRV_PROG_TYPE;
+
+	typedef struct DRV_CONFIG_NODE_S DRV_CONFIG_NODE;
+	typedef DRV_CONFIG_NODE *DRV_CONFIG;
+
+	struct DRV_CONFIG_NODE_S {
+		U32 size;
+		U32 num_events;
+		DRV_BOOL start_paused;
+		DRV_BOOL counting_mode;
+		U32 dispatch_id;
+		DRV_BOOL enable_chipset;
+		U32 num_chipset_events;
+		U32 chipset_offset;
+		DRV_BOOL enable_gfx;
+		DRV_BOOL enable_pwr;
+		DRV_BOOL emon_mode;
+		U32 pebs_mode;
+		U32 pebs_capture;
+		DRV_BOOL collect_lbrs;
+		DRV_BOOL collect_callstacks;
+		DRV_BOOL debug_inject;
+		DRV_BOOL virt_phys_translation;
+		DRV_BOOL latency_capture;
+		U32 max_gp_counters;
+		DRV_BOOL htoff_mode;
+		DRV_BOOL power_capture;
+		U32 results_offset;	/* this is to store the offset for this device's results*/
+		DRV_BOOL eventing_ip_capture;
+		DRV_BOOL hle_capture;
+		U32 emon_unc_offset[MAX_EMON_GROUPS];
+		S32 seed_name_len;
+		U32 reserved0;
+		U64 target_pid;
+		DRV_BOOL use_pcl;
+		DRV_BOOL enable_ebc;
+		DRV_BOOL enable_tbc;
+		U32 reserved1;
+		union {
+			S8 *seed_name;
+			U64 dummy1;
+		} u1;
+		union {
+			S8 *cpu_mask;
+			U64 dummy2;
+		} u2;
+		U32 device_type;
+		U32 reserved2;
+	};
+
+#define DRV_CONFIG_size(cfg)                      (cfg)->size
+#define DRV_CONFIG_num_events(cfg)                (cfg)->num_events
+#define DRV_CONFIG_start_paused(cfg)              (cfg)->start_paused
+#define DRV_CONFIG_counting_mode(cfg)             (cfg)->counting_mode
+#define DRV_CONFIG_dispatch_id(cfg)               (cfg)->dispatch_id
+#define DRV_CONFIG_enable_chipset(cfg)            (cfg)->enable_chipset
+#define DRV_CONFIG_num_chipset_events(cfg)        (cfg)->num_chipset_events
+#define DRV_CONFIG_chipset_offset(cfg)            (cfg)->chipset_offset
+#define DRV_CONFIG_enable_gfx(cfg)                (cfg)->enable_gfx
+#define DRV_CONFIG_enable_pwr(cfg)                (cfg)->enable_pwr
+#define DRV_CONFIG_emon_mode(cfg)                 (cfg)->emon_mode
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+#define DRV_CONFIG_pebs_mode(cfg)                 (cfg)->pebs_mode
+#define DRV_CONFIG_pebs_capture(cfg)              (cfg)->pebs_capture
+#define DRV_CONFIG_collect_lbrs(cfg)              (cfg)->collect_lbrs
+#define DRV_CONFIG_collect_callstacks(cfg)        (cfg)->collect_callstacks
+#define DRV_CONFIG_debug_inject(cfg)              (cfg)->debug_inject
+#define DRV_CONFIG_virt_phys_translation(cfg)     (cfg)->virt_phys_translation
+#define DRV_CONFIG_latency_capture(cfg)           (cfg)->latency_capture
+#define DRV_CONFIG_max_gp_counters(cfg)           (cfg)->max_gp_counters
+#define DRV_CONFIG_htoff_mode(cfg)                (cfg)->htoff_mode
+#define DRV_CONFIG_power_capture(cfg)             (cfg)->power_capture
+#define DRV_CONFIG_results_offset(cfg)            (cfg)->results_offset
+#define DRV_CONFIG_eventing_ip_capture(cfg)       (cfg)->eventing_ip_capture
+#define DRV_CONFIG_hle_capture(cfg)               (cfg)->hle_capture
+
+#define DRV_CONFIG_emon_unc_offset(cfg,grp_num)   (cfg)->emon_unc_offset[grp_num]
+#else
+#define DRV_CONFIG_collect_ro(cfg)                (cfg)->collect_ro
+#endif
+#define DRV_CONFIG_seed_name(cfg)                 (cfg)->u1.seed_name
+#define DRV_CONFIG_seed_name_len(cfg)             (cfg)->seed_name_len
+#define DRV_CONFIG_cpu_mask(cfg)                  (cfg)->u2.cpu_mask
+#define DRV_CONFIG_target_pid(cfg)                (cfg)->target_pid
+#define DRV_CONFIG_use_pcl(cfg)                   (cfg)->use_pcl
+#define DRV_CONFIG_event_based_counts(cfg)        (cfg)->enable_ebc
+#define DRV_CONFIG_timer_based_counts(cfg)        (cfg)->enable_tbc
+#define DRV_CONFIG_device_type(cfg)               (cfg)->device_type
+
+/*
+ * @macro SOCPERF_VERSION_NODE_S
+ * @brief
+ * This structure supports versioning in Sep. The field major indicates the major version,
+ * minor indicates the minor version and api indicates the api version for the current
+ * sep build. This structure is initialized at the time when the driver is loaded.
+ */
+
+	typedef struct SOCPERF_VERSION_NODE_S SOCPERF_VERSION_NODE;
+	typedef SOCPERF_VERSION_NODE *SOCPERF_VERSION;
+
+	struct SOCPERF_VERSION_NODE_S {
+		union {
+			U32 socperf_version;
+			struct {
+				S32 major:8;
+				S32 minor:8;
+				S32 api:16;
+			} s1;
+		} u1;
+	};
+
+#define SOCPERF_VERSION_NODE_socperf_version(version) (version)->u1.socperf_version
+#define SOCPERF_VERSION_NODE_major(version)           (version)->u1.s1.major
+#define SOCPERF_VERSION_NODE_minor(version)           (version)->u1.s1.minor
+#define SOCPERF_VERSION_NODE_api(version)             (version)->u1.s1.api
+
+	typedef struct DEVICE_INFO_NODE_S DEVICE_INFO_NODE;
+	typedef DEVICE_INFO_NODE *DEVICE_INFO;
+
+	struct DEVICE_INFO_NODE_S {
+		S8 *dll_name;
+		PVOID dll_handle;
+		S8 *cpu_name;
+		S8 *pmu_name;
+		S8 *event_db_file_name;
+		/* this is undefined right now. Please take this as structure containing U64*/
+		/*PLATFORM_IDENTITY plat_identity;  */
+		U32 plat_type;	/* device type (e.g., DEVICE_INFO_CORE, etc. ... see enum below)*/
+		U32 plat_sub_type;	/* cti_type (e.g., CTI_Sandybridge, etc., ... see env_info_types.h)*/
+		U32 dispatch_id;	/* this will be set in user mode dlls and will be unique across all IPF, IA32 (including MIDS).*/
+		ECB *ecb;
+		EVENT_CONFIG ec;
+		DRV_CONFIG pcfg;
+		U32 num_of_groups;
+		U32 size_of_alloc;	/* size of each event control block*/
+		PVOID drv_event;
+		U32 num_events;
+		U32 event_id_index;	/* event id index of device (basically how many events processed before this device)*/
+		U32 num_counters;
+		U32 group_index;
+		U32 num_packages;
+		U32 num_units;
+	};
+
+#define MAX_EVENT_NAME_LENGTH 64
+
+#define DEVICE_INFO_dll_name(pdev)                  (pdev)->dll_name
+#define DEVICE_INFO_dll_handle(pdev)                (pdev)->dll_handle
+#define DEVICE_INFO_cpu_name(pdev)                  (pdev)->cpu_name
+#define DEVICE_INFO_pmu_name(pdev)                  (pdev)->pmu_name
+#define DEVICE_INFO_event_db_file_name(pdev)        (pdev)->event_db_file_name
+#define DEVICE_INFO_plat_type(pdev)                 (pdev)->plat_type
+#define DEVICE_INFO_plat_sub_type(pdev)             (pdev)->plat_sub_type
+#define DEVICE_INFO_ecb(pdev)                       (pdev)->ecb
+#define DEVICE_INFO_ec(pdev)                        (pdev)->ec
+#define DEVICE_INFO_pcfg(pdev)                      (pdev)->pcfg
+#define DEVICE_INFO_num_groups(pdev)                (pdev)->num_of_groups
+#define DEVICE_INFO_size_of_alloc(pdev)             (pdev)->size_of_alloc
+#define DEVICE_INFO_drv_event(pdev)                 (pdev)->drv_event
+#define DEVICE_INFO_num_events(pdev)                (pdev)->num_events
+#define DEVICE_INFO_event_id_index(pdev)            (pdev)->event_id_index
+#define DEVICE_INFO_num_counters(pdev)              (pdev)->num_counters
+#define DEVICE_INFO_group_index(pdev)               (pdev)->group_index
+#define DEVICE_INFO_num_packages(pdev)              (pdev)->num_packages
+#define DEVICE_INFO_num_units(pdev)                 (pdev)->num_units
+
+	typedef struct DEVICE_INFO_DATA_NODE_S DEVICE_INFO_DATA_NODE;
+	typedef DEVICE_INFO_DATA_NODE *DEVICE_INFO_DATA;
+
+	struct DEVICE_INFO_DATA_NODE_S {
+		DEVICE_INFO pdev_info;
+		U32 num_elements;
+		U32 num_allocated;
+	};
+
+#define DEVICE_INFO_DATA_pdev_info(d)           (d)->pdev_info
+#define DEVICE_INFO_DATA_num_elements(d)        (d)->num_elements
+#define DEVICE_INFO_DATA_num_allocated(d)       (d)->num_allocated
+
+	typedef enum {
+		DEVICE_INFO_CORE = 0,
+		DEVICE_INFO_UNCORE = 1,
+		DEVICE_INFO_CHIPSET = 2,
+		DEVICE_INFO_GFX = 3,
+		DEVICE_INFO_PWR = 4,
+		DEVICE_INFO_TELEMETRY = 5
+	} DEVICE_INFO_TYPE;
+
+#if defined(__cplusplus)
+}
+#endif
+typedef struct DRV_EVENT_MASK_NODE_S DRV_EVENT_MASK_NODE;
+typedef DRV_EVENT_MASK_NODE *DRV_EVENT_MASK;
+
+struct DRV_EVENT_MASK_NODE_S {
+	U8 event_idx;		/* 0 <= index < MAX_EVENTS*/
+	union {
+		U8 bitFields1;
+		struct {
+			U8 precise:1;
+			U8 lbr_capture:1;
+			U8 dear_capture:1;	/* Indicates which events need to have additional registers read*/
+			/* because they are DEAR events.*/
+			U8 iear_capture:1;	/* Indicates which events need to have additional registers read*/
+			/* because they are IEAR events.*/
+			U8 btb_capture:1;	/* Indicates which events need to have additional registers read*/
+			/* because they are BTB events.*/
+			U8 ipear_capture:1;	/* Indicates which events need to have additional registers read*/
+			/* because they are IPEAR events.*/
+			U8 uncore_capture:1;
+			U8 reserved0:2;
+		} s1;
+	} u1;
+};
+
+#define DRV_EVENT_MASK_event_idx(d)             (d)->event_idx
+#define DRV_EVENT_MASK_bitFields1(d)            (d)->u1.bitFields1
+#define DRV_EVENT_MASK_precise(d)               (d)->u1.s1.precise
+#define DRV_EVENT_MASK_lbr_capture(d)           (d)->u1.s1.lbr_capture
+#define DRV_EVENT_MASK_dear_capture(d)          (d)->u1.s1.dear_capture
+#define DRV_EVENT_MASK_iear_capture(d)          (d)->u1.s1.iear_capture
+#define DRV_EVENT_MASK_btb_capture(d)           (d)->u1.s1.btb_capture
+#define DRV_EVENT_MASK_ipear_capture(d)         (d)->u1.s1.ipear_capture
+#define DRV_EVENT_MASK_uncore_capture(d)        (d)->u1.s1.uncore_capture
+
+#define MAX_OVERFLOW_EVENTS 11	/* This defines the maximum number of overflow events per interrupt.*/
+				  /* In order to reduce memory footprint, the value should be at least*/
+				  /* the number of fixed and general PMU registers.*/
+				  /* Sandybridge with HT off has 11 PMUs(3 fixed and 8 generic)*/
+
+typedef struct DRV_MASKS_NODE_S DRV_MASKS_NODE;
+typedef DRV_MASKS_NODE *DRV_MASKS;
+
+/*
+ * @macro DRV_EVENT_MASK_NODE_S
+ * @brief
+ * The structure is used to store overflow events when handling PMU interrupt.
+ * This approach should be more efficient than checking all event masks
+ * if there are many events to be monitored
+ * and only a few events among them have overflow per interrupt.
+ */
+struct DRV_MASKS_NODE_S {
+	DRV_EVENT_MASK_NODE eventmasks[MAX_OVERFLOW_EVENTS];
+	U8 masks_num;		/* 0 <= mask_num <= MAX_OVERFLOW_EVENTS*/
+	U8 padding;		/* data structure alignment*/
+};
+
+#define DRV_MASKS_masks_num(d)           (d)->masks_num
+#define DRV_MASKS_eventmasks(d)          (d)->eventmasks
+
+typedef struct EMON_SCHED_INFO_NODE_S EMON_SCHED_INFO_NODE;
+typedef EMON_SCHED_INFO_NODE *EMON_SCHED_INFO;
+
+struct EMON_SCHED_INFO_NODE_S {
+	U32 max_counters_for_all_pmus;
+	U32 num_cpus;
+	U32 group_index[MAX_EMON_GROUPS];
+	U32 offset_for_next_device[MAX_EMON_GROUPS];
+	U32 device_id;
+	U32 num_packages;
+	U32 num_units;
+	U32 user_scheduled;
+};
+
+#define EMON_SCHED_INFO_max_counters_for_all_pmus(x)           (x)->max_counters_for_all_pmus
+#define EMON_SCHED_INFO_num_cpus(x)                            (x)->num_cpus
+#define EMON_SCHED_INFO_group_index(x,grp_num)                 (x)->group_index[grp_num]
+#define EMON_SCHED_INFO_offset_for_next_device(x, grp_num)     (x)->offset_for_next_device[grp_num]
+#define EMON_SCHED_INFO_device_id(x)                           (x)->device_id
+#define EMON_SCHED_INFO_num_packages(x)                        (x)->num_packages
+#define EMON_SCHED_INFO_num_units(x)                           (x)->num_units
+#define EMON_SCHED_INFO_user_scheduled(x)                      (x)->user_scheduled
+
+#define INITIALIZE_Emon_Sched_Info(x,j)                                                            \
+    for((j) =0; (j) < MAX_EMON_GROUPS; (j)++) {                                                    \
+        EMON_SCHED_INFO_group_index((x),(j))             = 0;                                      \
+        EMON_SCHED_INFO_offset_for_next_device((x), (j)) = 0;                                      \
+    }
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_types.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_types.h
new file mode 100644
index 0000000..72dacebf
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_types.h
@@ -0,0 +1,180 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _LWPMUDRV_TYPES_H_
+#define _LWPMUDRV_TYPES_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+	typedef unsigned char U8;
+	typedef char S8;
+	typedef short S16;
+	typedef unsigned short U16;
+	typedef unsigned int U32;
+	typedef int S32;
+#if defined(DRV_OS_WINDOWS)
+	typedef unsigned __int64 U64;
+	typedef __int64 S64;
+#elif defined (DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined (DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+	typedef unsigned long long U64;
+	typedef long long S64;
+	typedef unsigned long ULONG;
+	typedef void VOID;
+	typedef void *LPVOID;
+#else
+#error "Undefined OS"
+#endif
+
+#if defined(DRV_IA32)
+
+	typedef S32 SIOP;
+	typedef U32 UIOP;
+
+#elif defined(DRV_EM64T) || defined(DRV_IA64)
+
+	typedef S64 SIOP;
+	typedef U64 UIOP;
+
+#else
+#error "Unexpected Architecture seen"
+#endif
+
+	typedef U32 DRV_BOOL;
+	typedef void *PVOID;
+
+#if defined(UNICODE)
+	typedef wchar_t STCHAR;
+#define VTSA_T(x)           L ## x
+#else
+	typedef char STCHAR;
+#define VTSA_T(x)           x
+#endif
+
+#if defined(DRV_OS_WINDOWS)
+#include <wchar.h>
+	typedef wchar_t DRV_STCHAR;
+	typedef wchar_t VTSA_CHAR;
+#else
+	typedef char DRV_STCHAR;
+#endif
+
+/**/
+/* Handy Defines*/
+/**/
+	typedef U32 DRV_STATUS;
+
+#define   MAX_STRING_LENGTH             1024
+#define   MAXNAMELEN                     256
+
+#if defined(DRV_OS_WINDOWS)
+#define   UNLINK                        _unlink
+#define   RENAME                        rename
+#define   WCSDUP                        _wcsdup
+#endif
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined (DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define   UNLINK                        unlink
+#define   RENAME                        rename
+#endif
+
+#if (defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || defined (DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD))&& !defined(__KERNEL__)
+/*wcsdup is missing on Solaris*/
+#include <stdlib.h>
+#include <wchar.h>
+
+	static inline wchar_t *solaris_wcsdup(const wchar_t * wc) {
+		wchar_t *tmp =
+		    (wchar_t *) malloc((wcslen(wc) + 1) * sizeof(wchar_t));
+		 wcscpy(tmp, wc);
+		 return tmp;
+	}
+#define   WCSDUP                        solaris_wcsdup
+#endif
+#if defined(DRV_OS_LINUX)
+#define   WCSDUP                        wcsdup
+#endif
+#if !defined(_WCHAR_T_DEFINED)
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_SOLARIS)
+#if !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+#endif
+#endif
+#if (defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID)) && !defined(__KERNEL__)
+#include <wchar.h>
+	typedef wchar_t VTSA_CHAR;
+#endif
+
+#if (defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_SOLARIS)) && !defined(__KERNEL__)
+#include <wchar.h>
+	typedef wchar_t VTSA_CHAR;
+#endif
+
+#define   TRUE                          1
+#define   FALSE                         0
+
+#define ALIGN_4(x)    (((x) +  3) &  ~3)
+#define ALIGN_8(x)    (((x) +  7) &  ~7)
+#define ALIGN_16(x)   (((x) + 15) & ~15)
+#define ALIGN_32(x)   (((x) + 31) & ~31)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_version.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_version.h
new file mode 100644
index 0000000..6d73173
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/lwpmudrv_version.h
@@ -0,0 +1,107 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+/*
+ *  File  : lwpmudrv_version.h
+ */
+
+#ifndef _LWPMUDRV_VERSION_H_
+#define _LWPMUDRV_VERSION_H_
+
+/* SOCPERF VERSIONING*/
+
+#define _STRINGIFY(x)     #x
+#define STRINGIFY(x)      _STRINGIFY(x)
+#define _STRINGIFY_W(x)   L#x
+#define STRINGIFY_W(x)    _STRINGIFY_W(x)
+
+#define SOCPERF_MAJOR_VERSION 1
+#define SOCPERF_MINOR_VERSION 2
+#define SOCPERF_API_VERSION   0
+
+#define SOCPERF_NAME          "socperf"
+#define SOCPERF_NAME_W        L"socperf"
+
+#define SOCPERF_MSG_PREFIX    SOCPERF_NAME""STRINGIFY(SOCPERF_MAJOR_VERSION)"_"STRINGIFY(SOCPERF_MINOR_VERSION)":"
+#define SOCPERF_VERSION_STR   STRINGIFY(SOCPERF_MAJOR_VERSION)"."STRINGIFY(SOCPERF_MINOR_VERSION)"."STRINGIFY(SOCPERF_API_VERSION)
+
+#if defined(DRV_OS_WINDOWS)
+#define SOCPERF_DRIVER_NAME   SOCPERF_NAME"drv"STRINGIFY(SOCPERF_MAJOR_VERSION)"_"STRINGIFY(SOCPERF_MINOR_VERSION)
+#define SOCPERF_DRIVER_NAME_W SOCPERF_NAME_W L"drv" STRINGIFY_W(SOCPERF_MAJOR_VERSION) L"_" STRINGIFY_W(SOCPERF_MINOR_VERSION)
+#define SOCPERF_DEVICE_NAME   SOCPERF_DRIVER_NAME
+#endif
+
+#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD)
+#define SOCPERF_DRIVER_NAME   SOCPERF_NAME""STRINGIFY(SOCPERF_MAJOR_VERSION)"_"STRINGIFY(SOCPERF_MINOR_VERSION)
+#define SOCPERF_SAMPLES_NAME  SOCPERF_DRIVER_NAME"_s"
+#define SOCPERF_DEVICE_NAME   "/dev/"SOCPERF_DRIVER_NAME
+#endif
+
+#if defined(DRV_OS_MAC)
+#define SOCPERF_DRIVER_NAME   SOCPERF_NAME""STRINGIFY(SOCPERF_MAJOR_VERSION)"_"STRINGIFY(SOCPERF_MINOR_VERSION)
+#define SOCPERF_SAMPLES_NAME  SOCPERF_DRIVER_NAME"_s"
+#define SOCPERF_DEVICE_NAME   SOCPERF_DRIVER_NAME
+#endif
+
+#if defined(EMON_INTERNAL)
+#define SOCPERF_DRIVER_MODE " (SOCPERF INTERNAL)"
+#elif defined(EMON)
+#define SOCPERF_DRIVER_MODE " (ESOCPERFMON)"
+#else
+#define SOCPERF_DRIVER_MODE ""
+#endif
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/pci.c b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/pci.c
new file mode 100644
index 0000000..384aaa4
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/pci.c
@@ -0,0 +1,197 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#include "lwpmudrv_defines.h"
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/page.h>
+#include <asm/io.h>
+
+#include "lwpmudrv_types.h"
+#include "rise_errors.h"
+#include "lwpmudrv_ecb.h"
+#include "socperfdrv.h"
+#include "pci.h"
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn extern int PCI_Read_From_Memory_Address(addr, val)
+ *
+ * @param    addr    - physical address in mmio
+ * @param   *value  - value at this address
+ *
+ * @return  status
+ *
+ * @brief   Read memory mapped i/o physical location
+ *
+ */
+extern int PCI_Read_From_Memory_Address(U32 addr, U32 * val)
+{
+	U32 aligned_addr, offset, value;
+	PVOID base;
+
+	if (addr <= 0) {
+		return OS_INVALID;
+	}
+
+	SOCPERF_PRINT_DEBUG
+	    ("PCI_Read_From_Memory_Address: reading physical address:%x\n",
+	     addr);
+	offset = addr & ~PAGE_MASK;
+	aligned_addr = addr & PAGE_MASK;
+	SOCPERF_PRINT_DEBUG
+	    ("PCI_Read_From_Memory_Address: aligned physical address:%x,offset:%x\n",
+	     aligned_addr, offset);
+
+	base = ioremap_nocache(aligned_addr, PAGE_SIZE);
+	if (base == NULL) {
+		return OS_INVALID;
+	}
+
+	value = readl(base + offset);
+	*val = value;
+	SOCPERF_PRINT_DEBUG
+	    ("PCI_Read_From_Memory_Address: value at this physical address:%x\n",
+	     value);
+
+	iounmap(base);
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn extern int PCI_Write_To_Memory_Address(addr, val)
+ *
+ * @param   addr   - physical address in mmio
+ * @param   value  - value to be written
+ *
+ * @return  status
+ *
+ * @brief   Write to memory mapped i/o physical location
+ *
+ */
+extern int PCI_Write_To_Memory_Address(U32 addr, U32 val)
+{
+	U32 aligned_addr, offset;
+	PVOID base;
+
+	if (addr <= 0) {
+		return OS_INVALID;
+	}
+
+	SOCPERF_PRINT_DEBUG
+	    ("PCI_Write_To_Memory_Address: writing physical address:%x with value:%x\n",
+	     addr, val);
+	offset = addr & ~PAGE_MASK;
+	aligned_addr = addr & PAGE_MASK;
+	SOCPERF_PRINT_DEBUG
+	    ("PCI_Write_To_Memory_Address: aligned physical address:%x,offset:%x\n",
+	     aligned_addr, offset);
+
+	base = ioremap_nocache(aligned_addr, PAGE_SIZE);
+	if (base == NULL) {
+		return OS_INVALID;
+	}
+
+	writel(val, base + offset);
+
+	iounmap(base);
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn extern int PCI_Read_Ulong(pci_address)
+ *
+ * @param    pci_address - PCI configuration address
+ *
+ * @return  value at this location
+ *
+ * @brief   Reads a ULONG from PCI configuration space
+ *
+ */
+extern int PCI_Read_Ulong(U32 pci_address)
+{
+	U32 temp_ulong = 0;
+
+	outl(pci_address, PCI_ADDR_IO);
+	temp_ulong = inl(PCI_DATA_IO);
+
+	return temp_ulong;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn extern int PCI_Write_Ulong(addr, val)
+ *
+ * @param    pci_address - PCI configuration address
+ * @param    value - Value to be written
+ *
+ * @return  status
+ *
+ * @brief   Writes a ULONG to PCI configuration space
+ *
+ */
+extern void PCI_Write_Ulong(U32 pci_address, U32 value)
+{
+	outl(pci_address, PCI_ADDR_IO);
+	outl(value, PCI_DATA_IO);
+
+	return;
+}
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/pci.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/pci.h
new file mode 100644
index 0000000..14b5543
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/pci.h
@@ -0,0 +1,113 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2013-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include "lwpmudrv_defines.h"
+
+/*
+ * PCI Config Address macros
+ */
+#define PCI_ENABLE                          0x80000000
+
+#define PCI_ADDR_IO                         0xCF8
+#define PCI_DATA_IO                         0xCFC
+
+#define BIT0                                0x1
+#define BIT1                                0x2
+
+/*
+ * Macro for forming a PCI configuration address
+ */
+#define FORM_PCI_ADDR(bus,dev,fun,off)     (((PCI_ENABLE))          |   \
+                                            ((bus & 0xFF) << 16)    |   \
+                                            ((dev & 0x1F) << 11)    |   \
+                                            ((fun & 0x07) <<  8)    |   \
+                                            ((off & 0xFF) <<  0))
+
+#define VENDOR_ID_MASK                        0x0000FFFF
+#define DEVICE_ID_MASK                        0xFFFF0000
+#define DEVICE_ID_BITSHIFT                    16
+#define LOWER_4_BYTES_MASK                    0x00000000FFFFFFFF
+#define MAX_BUSNO                             256
+#define NEXT_ADDR_OFFSET                      4
+#define NEXT_ADDR_SHIFT                       32
+#define DRV_IS_PCI_VENDOR_ID_INTEL            0x8086
+
+#define CHECK_IF_GENUINE_INTEL_DEVICE(value, vendor_id, device_id)    \
+    {                                                                 \
+        vendor_id = value & VENDOR_ID_MASK;                           \
+        device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT;   \
+                                                                      \
+        if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) {                \
+            continue;                                                 \
+        }                                                             \
+                                                                      \
+    }
+
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+extern int PCI_Read_From_Memory_Address(U32 addr, U32 * val);
+
+extern int PCI_Write_To_Memory_Address(U32 addr, U32 val);
+
+extern int PCI_Read_Ulong(U32 pci_address);
+
+extern void PCI_Write_Ulong(U32 pci_address, U32 value);
+#endif
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/rise_errors.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/rise_errors.h
new file mode 100644
index 0000000..537773d
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/rise_errors.h
@@ -0,0 +1,352 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2004-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2004-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _RISE_ERRORS_H_
+#define _RISE_ERRORS_H_
+
+/**/
+/* NOTE:*/
+/**/
+/* 1) Before adding an error code, first make sure the error code doesn't*/
+/* already exist. If it does, use that, don't create a new one just because...*/
+/**/
+/* 2) When adding an error code, add it to the end of the list. Don't insert*/
+/* error numbers in the middle of the list! For backwards compatibility,*/
+/* we don't want the numbers changing unless we really need them*/
+/* to for some reason (like we want to switch to negative error numbers)*/
+/**/
+/* 3) Change the VT_LAST_ERROR_CODE macro to point to the (newly added)*/
+/* last error. This is done so SW can verify the number of error codes*/
+/* possible matches the number of error strings it has*/
+/**/
+/* 4) Don't forget to update the error string table to include your*/
+/* error code (rise_class.cpp). Since the goal is something human readable*/
+/* you don't need to use abbreviations in there (ie. don't say "bad param",*/
+/* say "bad parameter" or "illegal parameter passed in")*/
+/**/
+/* 5) Compile and run the test_rise app (in the test_rise directory) to*/
+/* verify things are still working*/
+/**/
+/**/
+
+#define VT_SUCCESS                      0
+
+/*************************************************************/
+
+#define VT_INVALID_MAX_SAMP                   1
+#define VT_INVALID_SAMP_PER_BUFF              2
+#define VT_INVALID_SAMP_INTERVAL              3
+#define VT_INVALID_PATH                       4
+#define VT_TB5_IN_USE                         5
+#define VT_INVALID_NUM_EVENTS                 6
+#define VT_INTERNAL_ERROR                     8
+#define VT_BAD_EVENT_NAME                     9
+#define VT_NO_SAMP_SESSION                   10
+#define VT_NO_EVENTS                         11
+#define VT_MULTIPLE_RUNS                     12
+#define VT_NO_SAM_PARAMS                     13
+#define VT_SDB_ALREADY_EXISTS                14
+#define VT_SAMPLING_ALREADY_STARTED          15
+#define VT_TBS_NOT_SUPPORTED                 16
+#define VT_INVALID_SAMPARAMS_SIZE            17
+#define VT_INVALID_EVENT_SIZE                18
+#define VT_ALREADY_PROCESSES                 19
+#define VT_INVALID_EVENTS_PATH               20
+#define VT_INVALID_LICENSE                   21
+
+/******************************************************/
+/*SEP error codes*/
+
+#define VT_SAM_ERROR                    22
+#define VT_SAMPLE_FILE_ALREADY_MAPPED   23
+#define VT_INVALID_SAMPLE_FILE          24
+#define VT_UNKNOWN_SECTION_NUMBER       25
+#define VT_NO_MEMORY                    26
+#define VT_ENV_VAR_NOT_FOUND            27
+#define VT_SAMPLE_FILE_NOT_MAPPED       28
+#define VT_BUFFER_OVERFLOW              29
+#define VT_USER_OP_COMPLETED            30
+#define VT_BINARY_NOT_FOUND             31
+#define VT_ISM_NOT_INITIALIZED          32
+#define VT_NO_SYMBOLS                   33
+#define VT_SAMPLE_FILE_MAPPING_ERROR    34
+#define VT_BUFFER_NULL                  35
+#define VT_UNEXPECTED_NULL_PTR          36
+#define VT_BINARY_LOAD_FAILED           37
+#define VT_FUNCTION_NOT_FOUND_IN_BINARY 38
+#define VT_ENTRY_NOT_FOUND              39
+#define VT_SEP_SYNTAX_ERROR             40
+#define VT_SEP_OPTIONS_ERROR            41
+#define VT_BAD_EVENT_MODIFIER           42
+#define VT_INCOMPATIBLE_PARAMS          43
+#define VT_FILE_OPEN_FAILED             44
+#define VT_EARLY_EXIT                   45
+#define VT_TIMEOUT_RETURN               46
+#define VT_NO_CHILD_PROCESS             47
+#define VT_DRIVER_RUNNING               48
+#define VT_DRIVER_STOPPED               49
+#define VT_MULTIPLE_RUNS_NEEDED         50
+#define VT_QUIT_IMMEDIATE               51
+#define VT_DRIVER_INIT_FAILED           52
+#define VT_NO_TB5_CREATED               53
+#define VT_NO_WRITE_PERMISSION          54
+#define VT_DSA_INIT_FAILED              55
+#define VT_INVALID_CPU_MASK             56
+#define VT_SAMP_IN_RUNNING_STATE        57
+#define VT_SAMP_IN_PAUSE_STATE          58
+#define VT_SAMP_IN_STOP_STATE           59
+#define VT_SAMP_NO_SESSION              60
+#define VT_NOT_CONFIGURED               61
+#define VT_LAUNCH_BUILD64_FAILED        62
+#define VT_BAD_PARAMETER                63
+#define VT_ISM_INIT_FAILED              64
+#define VT_INVALID_STATE_TRANS          65
+#define VT_EARLY_EXIT_N_CANCEL          66
+#define VT_EVT_MGR_NOT_INIT             67
+#define VT_ISM_SECTION_ENUM_FAILED      68
+#define VT_VG_PARSER_ERROR              69
+#define VT_MISSING_VALUE_FOR_TOKEN      70
+#define VT_EMPTY_SAMPLE_FILE_NAME       71
+#define VT_UNEXPECTED_VALUE             72
+#define VT_NOT_IMPLEMENTED              73
+#define VT_MISSING_COL_DEPNDNCIES       74
+#define VT_DEP_COL_NOT_LIB_DEFINED      75
+#define VT_COL_NOT_REG_WITH_LIB         76
+#define VT_SECTION_ALREADY_IN_USE       77
+#define VT_SECTION_NOT_EXIST            78
+#define VT_STREAM_NOT_EXIST             79
+#define VT_INVALID_STREAM               80
+#define VT_STREAM_ALREADY_IN_USE        81
+#define VT_DATA_DESC_NOT_EXIST          82
+#define VT_INVALID_ERROR_CODE           83
+#define VT_INCOMPATIBLE_VERSION         84
+#define VT_LEGACY_DATA_NOT_EXIST        85
+#define VT_INVALID_READ_START           86
+#define VT_DRIVER_OPEN_FAILED           87
+#define VT_DRIVER_IOCTL_FAILED          88
+#define VT_SAMP_FILE_CREATE_FAILED      89
+#define VT_MODULE_FILE_CREATE_FAILED    90
+#define VT_INVALID_SAMPLE_FILE_NAME     91
+#define VT_INVALID_MODULE_FILE_NAME     92
+#define VT_FORK_CHILD_PROCESS_FAILED    93
+#define VT_UNEXPECTED_MISMATCH_IN_STRING_TYPES    94
+#define VT_INCOMPLETE_TB5_ENCOUNTERED   95
+#define VT_ERR_CONVERSION_FROM_STRING_2_NUMBER 96
+#define VT_INVALID_STRING               97
+#define VT_UNSUPPORTED_DATA_SIZE        98
+#define VT_TBRW_INIT_FAILED             99
+#define VT_PLUGIN_UNLOAD                100
+#define VT_PLUGIN_ENTRY_NULL            101
+#define VT_UNKNOWN_PLUGIN               102
+#define VT_BUFFER_TOO_SMALL             103
+#define VT_CANNOT_MODIFY_COLUMN         104
+#define VT_MULT_FILTERS_NOT_ALLOWED     105
+#define VT_ADDRESS_IN_USE               106
+#define VT_NO_MORE_MMAPS                107
+#define VT_MAX_PAGES_IN_DS_EXCEEDED     108
+#define VT_INVALID_COL_TYPE_IN_GROUP_INFO 109
+#define VT_AGG_FN_ON_VARCHAR_NOT_SUPP   110
+#define VT_INVALID_ACCESS_PERMS         111
+#define VT_NO_DATA_TO_DISPLAY           112
+#define VT_TB5_IS_NOT_BOUND             113
+#define VT_MISSING_GROUP_BY_COLUMN      114
+#define VT_SMRK_MAX_STREAMS_EXCEEDED    115
+#define VT_SMRK_STREAM_NOT_CREATED      116
+#define VT_SMRK_NOT_IMPL                117
+#define VT_SMRK_TYPE_NOT_IMPL           118
+#define VT_SMRK_TYPE_ALREADY_SET        119
+#define VT_SMRK_NO_STREAM               120
+#define VT_SMRK_INVALID_STREAM_TYPE     121
+#define VT_SMRK_STREAM_NOT_FOUND        122
+#define VT_SMRK_FAIL                    123
+#define VT_SECTION_NOT_READABLE         124
+#define VT_SECTION_NOT_WRITEABLE        125
+#define VT_GLOBAL_SECTION_NOT_CLOSED    126
+#define VT_STREAM_SECTION_NOT_CLOSED    127
+#define VT_STREAM_NOT_CLOSED            128
+#define VT_STREAM_NOT_BOUND             129
+#define VT_NO_COLS_SPECIFIED            130
+#define VT_NOT_ALL_SECTIONS_CLOSED      131
+#define VT_SMRK_INVALID_PTR             132
+#define VT_UNEXPECTED_BIND_MISMATCH     133
+#define VT_WIN_TIMER_ERROR              134
+#define VT_ONLY_SNGL_DEPNDT_COL_ALLWD   135
+#define VT_BAD_MODULE                   136
+#define VT_INPUT_SOURCE_INFO_NOT_SET    137
+#define VT_UNSUPPORTED_TIME_GRAN        138
+#define VT_NO_SAMPLES_COLLECTED         139
+#define VT_INVALID_CPU_TYPE_VERSION     140
+#define VT_BIND_UNEXPECTED_1STMODREC    141
+#define VT_BIND_MODULES_NOT_SORTED      142
+#define VT_UNEXPECTED_NUM_CPUIDS        143
+#define VT_UNSUPPORTED_ARCH_TYPE        144
+#define VT_NO_DATA_TO_WRITE             145
+#define VT_EM_TIME_SLICE_TOO_SMALL      146
+#define VT_EM_TOO_MANY_EVENT_GROUPS     147
+#define VT_EM_ZERO_GROUPS               148
+#define VT_EM_NOT_SUPPORTED             149
+#define VT_PMU_IN_USE                   150
+#define VT_TOO_MANY_INTERRUPTS          151
+#define VT_MAX_SAMPLES_REACHED          152
+#define VT_MODULE_COLLECTION_FAILED     153
+#define VT_INCOMPATIBLE_DRIVER          154
+#define VT_UNABLE_LOCATE_TRIGGER_EVENT  155
+#define VT_COMMAND_NOT_HANDLED          156
+#define VT_DRIVER_VERSION_MISMATCH      157
+#define VT_MAX_MARKERS                  158
+#define VT_DRIVER_COMM_FAILED           159
+#define VT_CHIPSET_CONFIG_FAILED        160
+#define VT_BAD_DATA_BASE                161
+#define VT_PAX_SERVICE_NOT_CONNECTED    162
+#define VT_PAX_SERVICE_ERROR            163
+#define VT_PAX_PMU_RESERVE_FAILED       164
+#define VT_INVALID_CPU_INFO_TYPE        165
+#define VT_CACHE_DOESNT_EXIST           166
+#define VT_UNSUPPORTED_UNCORE_ARCH_TYPE 167
+#define VT_EXCEEDED_MAX_EVENTS          168
+#define VT_MARKER_TIMER_FAILED          169
+#define VT_PAX_PMU_UNRESERVE_FAILED     170
+#define VT_MULTIPLE_PROCESSES_FOUND     171
+#define VT_NO_SUCH_PROCESS_FOUND        172
+#define VT_PCL_NOT_ENABLED              173
+#define VT_PCL_UID_CHECK                174
+#define VT_DEL_RESULTS_DIR_FAILED       175
+#define VT_NO_VALID_EVENTS              176
+#define VT_INVALID_EVENT                177
+#define VT_EVENTS_COUNTED               178
+#define VT_EVENTS_COLLECTED             179
+#define VT_UNSUPPORTED_GFX_ARCH_TYPE    180
+#define VT_GFX_CONFIG_FAILED            181
+#define VT_UNSUPPORTED_NON_NATIVE_MODE  182
+#define VT_INVALID_DEVICE               183
+#define VT_ENV_SETUP_FAILED             184
+#define VT_RESUME_NOT_RECEIVED          185
+#define VT_UNSUPPORTED_PWR_ARCH_TYPE    186
+#define VT_PWR_CONFIG_FAILED            187
+#define VT_NMI_WATCHDOG_FOUND           188
+#define VT_NO_PMU_RESOURCES             189
+#define VT_MIC_CARD_NOT_ONLINE          190
+#define VT_FREEZE_ON_PMI_NOT_AVAIL      191
+#define VT_FLUSH_FAILED                 192
+#define VT_FLUSH_SUCCESS                193
+
+/*
+ * define error code for checking on async marker request
+ */
+#define VT_INVALID_MARKER_ID           -1
+
+/*
+ * ************************************************************
+ * NOTE: after adding new error code(s), remember to also
+ *       update the following:
+ *           1) VT_LAST_ERROR_CODE below
+ *           2) viewer/sampling_utils/src/rise.c
+ *           3) collector/runsa/sep_msg_catalog.xmc
+ *
+ * ************************************************************
+ */
+
+/**/
+/* To make error checking easier, the special VT_LAST_ERROR_CODE*/
+/* should be set to whatever is the last error on the list above*/
+/**/
+#define VT_LAST_ERROR_CODE         VT_MIC_CARD_NOT_ONLINE+1
+
+/**/
+/* Define a macro to determine success or failure. Users of this*/
+/* error header file should use the macros instead of direct*/
+/* checks so that we can change the error numbers in the future*/
+/* (such as making negative numbers be an error indication and positive*/
+/* numbers being a success with a value indication)*/
+/**/
+#define VTSA_SUCCESS(x)   ((x) == VT_SUCCESS)
+#define VTSA_FAILED(x)    (!VTSA_SUCCESS(x))
+
+/**/
+/* These should be deprecated, but we'll keep them here just in case*/
+/**/
+#define SEP_IS_SUCCESS(x)  VTSA_SUCCESS(x)
+#define SEP_IS_FAILED(x)   VTSA_FAILED(x)
+
+/*************************************************************
+ * API Error Codes
+ *************************************************************/
+#define VTAPI_INVALID_MAX_SAMP               VT_INVALID_MAX_SAMP
+#define VTAPI_INVALID_SAMP_PER_BUFF          VT_INVALID_SAMP_PER_BUFF
+#define VTAPI_INVALID_SAMP_INTERVAL          VT_INVALID_SAMP_INTERVAL
+#define VTAPI_INVALID_PATH                   VT_INVALID_PATH
+#define VTAPI_TB5_IN_USE                     VT_TB5_IN_USE
+#define VTAPI_INVALID_NUM_EVENTS             VT_INVALID_NUM_EVENTS
+#define VTAPI_INTERNAL_ERROR                 VT_INTERNAL_ERROR
+#define VTAPI_BAD_EVENT_NAME                 VT_BAD_EVENT_NAME
+#define VTAPI_NO_SAMP_SESSION                VT_NO_SAMP_SESSION
+#define VTAPI_NO_EVENTS                      VT_NO_EVENTS
+#define VTAPI_MULTIPLE_RUNS                  VT_MULTIPLE_RUNS
+#define VTAPI_NO_SAM_PARAMS                  VT_NO_SAM_PARAMS
+#define VTAPI_SDB_ALREADY_EXISTS             VT_SDB_ALREADY_EXISTS
+#define VTAPI_SAMPLING_ALREADY_STARTED       VT_SAMPLING_ALREADY_STARTED
+#define VTAPI_TBS_NOT_SUPPORTED              VT_TBS_NOT_SUPPORTED
+#define VTAPI_INVALID_SAMPARAMS_SIZE         VT_INVALID_SAMPARAMS_SIZE
+#define VTAPI_INVALID_EVENT_SIZE             VT_INVALID_EVENT_SIZE
+#define VTAPI_ALREADY_PROCESSES              VT_ALREADY_PROCESSES
+#define VTAPI_INVALID_EVENTS_PATH            VT_INVALID_EVENTS_PATH
+#define VTAPI_INVALID_LICENSE                VT_INVALID_LICENSE
+
+typedef int RISE_ERROR;
+typedef void *RISE_PTR;
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/soc_uncore.c b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/soc_uncore.c
new file mode 100644
index 0000000..46f1cf4
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/soc_uncore.c
@@ -0,0 +1,1187 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2013-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#include "lwpmudrv_defines.h"
+#include <linux/version.h>
+#include <linux/fs.h>
+
+#include "lwpmudrv_types.h"
+#include "lwpmudrv_ecb.h"
+#include "lwpmudrv_struct.h"
+
+#include "socperfdrv.h"
+#include "control.h"
+#include "soc_uncore.h"
+#include "ecb_iterators.h"
+#include "pci.h"
+
+#if defined (PCI_HELPERS_API)
+#include <asm/intel_mid_pcihelpers.h>
+#elif defined(DRV_CHROMEOS)
+#include <linux/pci.h>
+static struct pci_dev *pci_root = NULL;
+#define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#endif
+
+static U32 counter_overflow[UNCORE_MAX_COUNTERS];
+static U32 counter_port_id = 0;
+extern U64 *read_unc_ctr_info;
+/*global variable for reading  counter values*/
+static U64 *uncore_current_data = NULL;
+static U64 *uncore_to_read_data = NULL;
+static U32 device_id = 0;
+static U64 trace_virtual_address = 0;
+
+#if defined(DRV_CHROMEOS)
+/*!
+ * @fn          static VOID get_pci_device_handle(U32   bus_no,
+                                                  U32   dev_no,
+                                                  U32   func_no)
+ *
+ * @brief       Get PCI device handle to be able to read/write
+ *
+ * @param       bus_no      - bus number
+ *              dev_no      - device number
+ *              func_no     - function number
+ *
+ * @return      None
+ *
+ * <I>Special Notes:</I>
+ */
+static void get_pci_device_handle(U32 bus_no, U32 dev_no, U32 func_no)
+{
+	if (!pci_root) {
+		pci_root =
+		    pci_get_bus_and_slot(bus_no, PCI_DEVFN(dev_no, func_no));
+		if (!pci_root) {
+			SOCPERF_PRINT_DEBUG("Unable to get pci device handle");
+		}
+	}
+
+	return;
+}
+#endif
+
+/*!
+ * @fn          static VOID write_To_Register(U32   bus_no,
+                                              U32   dev_no,
+                                              U32   func_no,
+                                              U32   port_id,
+                                              U32   op_code,
+                                              U64   mmio_offset,
+                                              ULONG value)
+ *
+ * @brief       Reads Uncore programming
+ *
+ * @param       bus_no      - bus number
+ *              dev_no      - device number
+ *              func_no     - function number
+ *              port_id     - port id
+ *              op_code     - operation code
+ *              mmio_offset - mmio offset
+ *              value       - data to be written to the register
+ *
+ * @return      None
+ *
+ * <I>Special Notes:</I>
+ */
+static void
+write_To_Register(U32 bus_no,
+		  U32 dev_no,
+		  U32 func_no,
+		  U32 port_id, U32 op_code, U64 mmio_offset, ULONG value)
+{
+	U32 cmd = 0;
+	U32 mmio_offset_lo;
+	U32 mmio_offset_hi;
+#if !defined(DRV_CHROMEOS) && !defined(PCI_HELPERS_API)
+	U32 pci_address;
+#endif
+
+	mmio_offset_hi = mmio_offset & SOC_UNCORE_OFFSET_HI_MASK;
+	mmio_offset_lo = mmio_offset & SOC_UNCORE_OFFSET_LO_MASK;
+	cmd = (op_code << SOC_UNCORE_OP_CODE_SHIFT) +
+	    (port_id << SOC_UNCORE_PORT_ID_SHIFT) +
+	    (mmio_offset_lo << 8) + (SOC_UNCORE_BYTE_ENABLES << 4);
+	SOCPERF_PRINT_DEBUG("write off=%llx value=%x\n", mmio_offset, value);
+
+#if defined (PCI_HELPERS_API)
+	intel_mid_msgbus_write32_raw_ext(cmd, mmio_offset_hi, value);
+#elif defined(DRV_CHROMEOS)
+	if (!pci_root) {
+		get_pci_device_handle(bus_no, dev_no, func_no);
+	}
+	pci_write_config_dword(pci_root, SOC_UNCORE_MDR_REG_OFFSET, value);
+	pci_write_config_dword(pci_root, SOC_UNCORE_MCRX_REG_OFFSET,
+			       mmio_offset_hi);
+	pci_write_config_dword(pci_root, SOC_UNCORE_MCR_REG_OFFSET, cmd);
+#else
+	pci_address =
+	    FORM_PCI_ADDR(bus_no, dev_no, func_no, SOC_UNCORE_MDR_REG_OFFSET);
+	PCI_Write_Ulong((ULONG) pci_address, (ULONG) value);
+	pci_address =
+	    FORM_PCI_ADDR(bus_no, dev_no, func_no, SOC_UNCORE_MCRX_REG_OFFSET);
+	PCI_Write_Ulong((ULONG) pci_address, mmio_offset_hi);
+	pci_address =
+	    FORM_PCI_ADDR(bus_no, dev_no, func_no, SOC_UNCORE_MCR_REG_OFFSET);
+	PCI_Write_Ulong((ULONG) pci_address, cmd);
+#endif
+
+	return;
+}
+
+/*!
+ * @fn          static ULONG read_From_Register(U32 bus_no,
+                                                U32 dev_no,
+                                                U32 func_no,
+                                                U32 port_id,
+                                                U32 op_code,
+                                                U64 mmio_offset)
+ *
+ * @brief       Reads Uncore programming info
+ *
+ * @param       bus_no      - bus number
+ *              dev_no      - device number
+ *              func_no     - function number
+ *              port_id     - port id
+ *              op_code     - operation code
+ *              mmio_offset - mmio offset
+ *
+ * @return      data from the counter
+ *
+ * <I>Special Notes:</I>
+ */
+static void
+read_From_Register(U32 bus_no,
+		   U32 dev_no,
+		   U32 func_no,
+		   U32 port_id, U32 op_code, U64 mmio_offset, U32 * data_val)
+{
+	U32 data = 0;
+	U32 cmd = 0;
+	U32 mmio_offset_hi;
+	U32 mmio_offset_lo;
+#if !defined(DRV_CHROMEOS) && !defined(PCI_HELPERS_API)
+	U32 pci_address;
+#endif
+
+	mmio_offset_hi = mmio_offset & SOC_UNCORE_OFFSET_HI_MASK;
+	mmio_offset_lo = mmio_offset & SOC_UNCORE_OFFSET_LO_MASK;
+	cmd = (op_code << SOC_UNCORE_OP_CODE_SHIFT) +
+	    (port_id << SOC_UNCORE_PORT_ID_SHIFT) +
+	    (mmio_offset_lo << 8) + (SOC_UNCORE_BYTE_ENABLES << 4);
+
+#if defined (PCI_HELPERS_API)
+	data = intel_mid_msgbus_read32_raw_ext(cmd, mmio_offset_hi);
+#elif defined(DRV_CHROMEOS)
+	if (!pci_root) {
+		get_pci_device_handle(bus_no, dev_no, func_no);
+	}
+	pci_write_config_dword(pci_root, SOC_UNCORE_MCRX_REG_OFFSET,
+			       mmio_offset_hi);
+	pci_write_config_dword(pci_root, SOC_UNCORE_MCR_REG_OFFSET, cmd);
+	pci_read_config_dword(pci_root, SOC_UNCORE_MDR_REG_OFFSET, &data);
+#else
+	pci_address =
+	    FORM_PCI_ADDR(bus_no, dev_no, func_no, SOC_UNCORE_MCRX_REG_OFFSET);
+	PCI_Write_Ulong((ULONG) pci_address, mmio_offset_hi);
+	pci_address =
+	    FORM_PCI_ADDR(bus_no, dev_no, func_no, SOC_UNCORE_MCR_REG_OFFSET);
+	PCI_Write_Ulong((ULONG) pci_address, cmd);
+	pci_address =
+	    FORM_PCI_ADDR(bus_no, dev_no, func_no, SOC_UNCORE_MDR_REG_OFFSET);
+	data = PCI_Read_Ulong(pci_address);
+#endif
+	SOCPERF_PRINT_DEBUG("read off=%llx value=%x\n", mmio_offset, data);
+	if (data_val) {
+		*data_val = data;
+	}
+
+	return;
+}
+
+/*!
+ * @fn          static VOID uncore_Reset_Counters(U32 dev_idx)
+ *
+ * @brief       Reset counters
+ *
+ * @param       dev_idx - device index
+ *
+ * @return      None
+ *
+ * <I>Special Notes:</I>
+ */
+static VOID uncore_Reset_Counters(U32 dev_idx)
+{
+	U32 data_reg = 0;
+
+	if (counter_port_id != 0) {
+		FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) {
+			if (ECB_entries_reg_type(pecb, i) == CCCR) {
+				data_reg = i + ECB_cccr_pop(pecb);
+				if (ECB_entries_reg_type(pecb, data_reg) ==
+				    DATA) {
+					write_To_Register(ECB_entries_bus_no
+							  (pecb, data_reg),
+							  ECB_entries_dev_no
+							  (pecb, data_reg),
+							  ECB_entries_func_no
+							  (pecb, data_reg),
+							  counter_port_id,
+							  SOC_COUNTER_WRITE_OP_CODE,
+							  ECB_entries_pci_id_offset
+							  (pecb, data_reg),
+							  (ULONG) 0);
+				}
+				write_To_Register(ECB_entries_bus_no(pecb, i),
+						  ECB_entries_dev_no(pecb, i),
+						  ECB_entries_func_no(pecb, i),
+						  counter_port_id,
+						  SOC_COUNTER_WRITE_OP_CODE,
+						  ECB_entries_pci_id_offset
+						  (pecb, i),
+						  (ULONG) SOC_UNCORE_STOP);
+			}
+		}
+		END_FOR_EACH_PCI_REG_RAW;
+	}
+
+	return;
+}
+
+/*!
+ * @fn          static VOID uncore_Write_PMU(VOID*)
+ *
+ * @brief       Initial write of PMU registers
+ *              Walk through the entries and write the value of the register accordingly.
+ *              When current_group = 0, then this is the first time this routine is called,
+ *
+ * @param       param - device index
+ *
+ * @return      None
+ *
+ * <I>Special Notes:</I>
+ */
+static VOID uncore_Write_PMU(VOID * param)
+{
+	U32 dev_idx;
+	ECB pecb;
+	DRV_PCI_DEVICE_ENTRY dpden;
+	U32 pci_address;
+	U32 bar_lo;
+	U64 bar_hi;
+	U64 final_bar;
+	U64 physical_address;
+	U32 dev_index = 0;
+	S32 bar_list[SOC_UNCORE_MAX_PCI_DEVICES];
+	U32 bar_index = 0;
+	U32 map_size = 0;
+	U64 virtual_address = 0;
+	U32 bar_name = 0;
+	DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL;
+	U32 next_bar_offset = 0;
+	U64 mmio_offset = 0;
+	U64 map_base = 0;
+	U32 i = 0;
+	/*U32                        data_val        = 0;*/
+
+	dev_idx = *((U32 *) param);
+	if (device_uncore == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n");
+		return;
+	}
+	pecb = (ECB) LWPMU_DEVICE_PMU_register_data(device_uncore)[0];
+	if (pecb == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: null pecb!\n");
+		return;
+	}
+
+	device_id = dev_idx;
+	for (dev_index = 0; dev_index < SOC_UNCORE_MAX_PCI_DEVICES; dev_index++) {
+		bar_list[dev_index] = -1;
+	}
+
+	/* initialize the per-counter overflow numbers*/
+	for (i = 0; i < UNCORE_MAX_COUNTERS; i++) {
+		counter_overflow[i] = 0;
+		pcb[0].last_uncore_count[i] = 0;
+	}
+
+	/* Allocate memory for reading GMCH counter values + the group id*/
+	if (!uncore_current_data) {
+		uncore_current_data =
+		    CONTROL_Allocate_Memory((UNCORE_MAX_COUNTERS +
+					     1) * sizeof(U64));
+		if (!uncore_current_data) {
+			return;
+		}
+	}
+	if (!uncore_to_read_data) {
+		uncore_to_read_data =
+		    CONTROL_Allocate_Memory((UNCORE_MAX_COUNTERS +
+					     1) * sizeof(U64));
+		if (!uncore_to_read_data) {
+			return;
+		}
+	}
+
+	ECB_pcidev_entry_list(pecb) =
+	    (DRV_PCI_DEVICE_ENTRY) ((S8 *) pecb + ECB_pcidev_list_offset(pecb));
+	dpden = ECB_pcidev_entry_list(pecb);
+
+	uncore_Reset_Counters(dev_idx);
+
+	for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); dev_index++) {
+		curr_pci_entry = &dpden[dev_index];
+		bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry);
+		mmio_offset =
+		    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(curr_pci_entry);
+
+		if (counter_port_id == 0
+		    && DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) ==
+		    UNC_COUNTER) {
+			counter_port_id =
+			    DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry);
+		}
+		if (DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) ==
+		    UNC_PCICFG) {
+			if (bar_name == UNC_SOCPCI
+			    && (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry)
+				== UNC_MUX
+				||
+				DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry)
+				== UNC_COUNTER)
+			    && DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) ==
+			    UNC_OP_WRITE) {
+				SOCPERF_PRINT_DEBUG
+				    ("dev_index=%d OFFSET=%x VAL=%x\n",
+				     dev_index,
+				     DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+				     (curr_pci_entry),
+				     DRV_PCI_DEVICE_ENTRY_value
+				     (curr_pci_entry));
+				write_To_Register(DRV_PCI_DEVICE_ENTRY_bus_no
+						  (curr_pci_entry),
+						  DRV_PCI_DEVICE_ENTRY_dev_no
+						  (curr_pci_entry),
+						  DRV_PCI_DEVICE_ENTRY_func_no
+						  (curr_pci_entry),
+						  DRV_PCI_DEVICE_ENTRY_port_id
+						  (curr_pci_entry),
+						  DRV_PCI_DEVICE_ENTRY_op_code
+						  (curr_pci_entry),
+						  DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+						  (curr_pci_entry),
+						  (ULONG)
+						  DRV_PCI_DEVICE_ENTRY_value
+						  (curr_pci_entry));
+			}
+			continue;
+		}
+		/* UNC_MMIO programming*/
+		if (bar_list[bar_name] != -1) {
+			bar_index = bar_list[bar_name];
+			virtual_address =
+			    DRV_PCI_DEVICE_ENTRY_virtual_address(&dpden
+								 [bar_index]);
+			DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) =
+			    DRV_PCI_DEVICE_ENTRY_virtual_address(&dpden
+								 [bar_index]);
+			writel(DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry),
+			       (U32 *) (((char *)(UIOP) virtual_address) +
+					mmio_offset));
+			continue;
+		}
+		pci_address =
+		    FORM_PCI_ADDR(DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry),
+				  DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry),
+				  DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry),
+				  DRV_PCI_DEVICE_ENTRY_bar_offset
+				  (curr_pci_entry));
+		bar_lo = PCI_Read_Ulong(pci_address);
+		next_bar_offset =
+		    DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry) +
+		    SOC_UNCORE_NEXT_ADDR_OFFSET;
+		pci_address =
+		    FORM_PCI_ADDR(DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry),
+				  DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry),
+				  DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry),
+				  next_bar_offset);
+		bar_hi = PCI_Read_Ulong(pci_address);
+		final_bar = (bar_hi << SOC_UNCORE_BAR_ADDR_SHIFT) | bar_lo;
+		final_bar &= SOC_UNCORE_BAR_ADDR_MASK;
+		DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = final_bar;
+		physical_address =
+		    DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry);
+		if (physical_address) {
+			map_size = SOC_UNCORE_OTHER_BAR_MMIO_PAGE_SIZE;
+			map_base = (mmio_offset / map_size) * map_size;
+			if (mmio_offset > map_size) {
+				physical_address = physical_address + map_base;
+			}
+		}
+	}
+
+	return;
+}
+
+/*!
+ * @fn         static VOID uncore_Disable_PMU(PVOID)
+ *
+ * @brief      Unmap the virtual address when sampling/driver stops
+ *
+ * @param      param - device index
+ *
+ * @return     None
+ *
+ * <I>Special Notes:</I>
+ */
+static VOID uncore_Disable_PMU(PVOID param)
+{
+	U32 dev_idx = *((U32 *) param);
+
+	if (GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_PREPARE_STOP) {
+		uncore_Reset_Counters(dev_idx);
+	}
+	uncore_current_data = CONTROL_Free_Memory(uncore_current_data);
+	uncore_to_read_data = CONTROL_Free_Memory(uncore_to_read_data);
+
+	return;
+}
+
+/*!
+ * @fn         static VOID uncore_Stop_Mem(VOID)
+ *
+ * @brief      Stop trace
+ *
+ * @param      param - None
+ *
+ * @return     None
+ *
+ * <I>Special Notes:</I>
+ */
+static VOID uncore_Stop_Mem(VOID)
+{
+	ECB pecb;
+	DRV_PCI_DEVICE_ENTRY dpden;
+	U32 bar_name = 0;
+	DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL;
+	U64 mmio_offset = 0;
+	U32 dev_index = 0;
+	U32 data_val = 0;
+
+	if (device_uncore == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n");
+		return;
+	}
+	pecb = (ECB) LWPMU_DEVICE_PMU_register_data(device_uncore)[0];
+	if (pecb == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: null pecb!\n");
+		return;
+	}
+
+	ECB_pcidev_entry_list(pecb) =
+	    (DRV_PCI_DEVICE_ENTRY) ((S8 *) pecb + ECB_pcidev_list_offset(pecb));
+	dpden = ECB_pcidev_entry_list(pecb);
+
+	for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); dev_index++) {
+		curr_pci_entry = &dpden[dev_index];
+		bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry);
+		mmio_offset =
+		    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(curr_pci_entry);
+
+		if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == UNC_STOP
+		    && DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) ==
+		    UNC_PCICFG && bar_name == UNC_SOCPCI
+		    && DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) ==
+		    UNC_OP_READ) {
+			SOCPERF_PRINT_DEBUG("op=%d port=%d offset=%x val=%x\n",
+					    DRV_PCI_DEVICE_ENTRY_op_code
+					    (curr_pci_entry),
+					    DRV_PCI_DEVICE_ENTRY_port_id
+					    (curr_pci_entry),
+					    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					    (curr_pci_entry), data_val);
+			read_From_Register(DRV_PCI_DEVICE_ENTRY_bus_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_dev_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_func_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_port_id
+					   (curr_pci_entry),
+					   SOC_COUNTER_READ_OP_CODE,
+					   DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					   (curr_pci_entry), &data_val);
+			SOCPERF_PRINT_DEBUG("op=%d port=%d offset=%x val=%x\n",
+					    DRV_PCI_DEVICE_ENTRY_op_code
+					    (curr_pci_entry),
+					    DRV_PCI_DEVICE_ENTRY_port_id
+					    (curr_pci_entry),
+					    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					    (curr_pci_entry), data_val);
+			write_To_Register(DRV_PCI_DEVICE_ENTRY_bus_no
+					  (curr_pci_entry),
+					  DRV_PCI_DEVICE_ENTRY_dev_no
+					  (curr_pci_entry),
+					  DRV_PCI_DEVICE_ENTRY_func_no
+					  (curr_pci_entry),
+					  DRV_PCI_DEVICE_ENTRY_port_id
+					  (curr_pci_entry),
+					  SOC_COUNTER_WRITE_OP_CODE,
+					  DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					  (curr_pci_entry),
+					  (ULONG) (data_val | 0x2000));
+		}
+	}
+
+	return;
+}
+
+/*!
+ * @fn         static VOID uncore_Initialize(PVOID)
+ *
+ * @brief      Initialize any registers or addresses
+ *
+ * @param      param
+ *
+ * @return     None
+ *
+ * <I>Special Notes:</I>
+ */
+static VOID uncore_Initialize(VOID * param)
+{
+	return;
+}
+
+/*!
+ * @fn         static VOID uncore_Clean_Up(PVOID)
+ *
+ * @brief      Reset any registers or addresses
+ *
+ * @param      param
+ *
+ * @return     None
+ *
+ * <I>Special Notes:</I>
+ */
+static VOID uncore_Clean_Up(VOID * param)
+{
+	if (trace_virtual_address) {
+		iounmap((void *)(UIOP) trace_virtual_address);
+		trace_virtual_address = 0;
+	}
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Read_Counts(param, id)
+ *
+ * @param    param    The read thread node to process
+ * @param    id       The id refers to the device index
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the Uncore count data and store into the buffer param;
+ *
+ */
+static VOID uncore_Read_Counts(PVOID param, U32 id)
+{
+	U64 *data;
+	U32 data_index = 0;
+	U32 event_id = 0;
+	U32 data_reg = 0;
+
+	if (GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_UNINITIALIZED
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_IDLE
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_RESERVED
+	    || GLOBAL_STATE_current_phase(driver_state) ==
+	    DRV_STATE_PREPARE_STOP
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_STOPPED) {
+		return;
+	}
+
+	if (param == NULL) {
+		return;
+	}
+
+	if (uncore_to_read_data == NULL) {
+		return;
+	}
+
+	data = param;
+	data[data_index] = uncore_to_read_data[data_index];
+	data_index++;
+
+	FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) {
+		event_id = ECB_entries_event_id_index_local(pecb, i);
+		if (ECB_entries_reg_type(pecb, i) == CCCR) {
+			data_reg = i + ECB_cccr_pop(pecb);
+			if (ECB_entries_reg_type(pecb, data_reg) == DATA) {
+				data[data_index + event_id] =
+				    uncore_to_read_data[data_index + event_id];
+			}
+		}
+
+	}
+	END_FOR_EACH_PCI_REG_RAW;
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Read_PMU_Data(param)
+ *
+ * @param    param    The device index
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the Uncore count data and store into the buffer param;
+ *
+ */
+static VOID uncore_Read_PMU_Data(PVOID param)
+{
+	S32 j;
+	U64 *buffer = read_unc_ctr_info;
+	U32 start_index;
+	DRV_CONFIG pcfg_unc;
+	U32 data_val = 0;
+	U32 data_reg = 0;
+	U64 total_count = 0;
+	U32 this_cpu = CONTROL_THIS_CPU();
+	CPU_STATE pcpu = &pcb[this_cpu];
+	U32 event_index = 0;
+
+	if (!CPU_STATE_socket_master(pcpu)) {
+		return;
+	}
+
+	pcfg_unc = (DRV_CONFIG) LWPMU_DEVICE_pcfg(device_uncore);
+	start_index = DRV_CONFIG_emon_unc_offset(pcfg_unc, 0);
+
+	FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) {
+		if (ECB_entries_reg_type(pecb, i) == CCCR) {
+			write_To_Register(ECB_entries_bus_no(pecb, i),
+					  ECB_entries_dev_no(pecb, i),
+					  ECB_entries_func_no(pecb, i),
+					  counter_port_id,
+					  SOC_COUNTER_WRITE_OP_CODE,
+					  ECB_entries_pci_id_offset(pecb, i),
+					  (ULONG) SOC_UNCORE_SAMPLE_DATA);
+
+			data_reg = i + ECB_cccr_pop(pecb);
+			if (ECB_entries_reg_type(pecb, data_reg) == DATA) {
+				j = start_index + ECB_entries_group_index(pecb,
+									  data_reg)
+				    +
+				    ECB_entries_emon_event_id_index_local(pecb,
+									  data_reg);
+				read_From_Register(ECB_entries_bus_no
+						   (pecb, data_reg),
+						   ECB_entries_dev_no(pecb,
+								      data_reg),
+						   ECB_entries_func_no(pecb,
+								       data_reg),
+						   counter_port_id,
+						   SOC_COUNTER_READ_OP_CODE,
+						   ECB_entries_pci_id_offset
+						   (pecb, data_reg), &data_val);
+				if (data_val <
+				    pcb[0].last_uncore_count[event_index]) {
+					counter_overflow[event_index] =
+					    counter_overflow[event_index] + 1;
+				}
+				pcb[0].last_uncore_count[event_index] =
+				    data_val;
+				total_count =
+				    data_val +
+				    counter_overflow[event_index] *
+				    UNCORE_MAX_COUNT;
+				event_index++;
+				buffer[j] = total_count;
+			}
+		}
+
+	}
+	END_FOR_EACH_PCI_REG_RAW;
+
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Trigger_Read()
+ *
+ * @param    None
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the counters when timer is triggered
+ *
+ */
+static VOID uncore_Trigger_Read(VOID)
+{
+	U64 *temp;
+	U32 event_id = 0;
+	U64 *data;
+	int data_index;
+	U32 data_val = 0;
+	U32 data_reg = 0;
+	U64 total_count = 0;
+	U32 event_index = 0;
+
+	if (GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_UNINITIALIZED
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_IDLE
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_RESERVED
+	    || GLOBAL_STATE_current_phase(driver_state) ==
+	    DRV_STATE_PREPARE_STOP
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_STOPPED) {
+		return;
+	}
+
+	if (uncore_current_data == NULL) {
+		return;
+	}
+
+	data = uncore_current_data;
+	data_index = 0;
+
+	preempt_disable();
+
+	/* Write GroupID*/
+	data[data_index] = 1;
+	/* Increment the data index as the event id starts from zero*/
+	data_index++;
+
+	FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) {
+		event_id = ECB_entries_event_id_index_local(pecb, i);
+
+		if (ECB_entries_reg_type(pecb, i) == CCCR) {
+			write_To_Register(ECB_entries_bus_no(pecb, i),
+					  ECB_entries_dev_no(pecb, i),
+					  ECB_entries_func_no(pecb, i),
+					  counter_port_id,
+					  SOC_COUNTER_WRITE_OP_CODE,
+					  ECB_entries_pci_id_offset(pecb, i),
+					  (ULONG) SOC_UNCORE_SAMPLE_DATA);
+
+			data_reg = i + ECB_cccr_pop(pecb);
+			if (ECB_entries_reg_type(pecb, data_reg) == DATA) {
+				read_From_Register(ECB_entries_bus_no
+						   (pecb, data_reg),
+						   ECB_entries_dev_no(pecb,
+								      data_reg),
+						   ECB_entries_func_no(pecb,
+								       data_reg),
+						   counter_port_id,
+						   SOC_COUNTER_READ_OP_CODE,
+						   ECB_entries_pci_id_offset
+						   (pecb, data_reg), &data_val);
+				if (data_val <
+				    pcb[0].last_uncore_count[event_index]) {
+					counter_overflow[event_index]++;
+				}
+				pcb[0].last_uncore_count[event_index] =
+				    data_val;
+				total_count =
+				    data_val +
+				    counter_overflow[event_index] *
+				    UNCORE_MAX_COUNT;
+				event_index++;
+				data[data_index + event_id] = total_count;
+			}
+		}
+
+	}
+	END_FOR_EACH_PCI_REG_RAW;
+
+	temp = uncore_to_read_data;
+	uncore_to_read_data = uncore_current_data;
+	uncore_current_data = temp;
+	preempt_enable();
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Read_Data()
+ *
+ * @param    None
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the counters
+ *
+ */
+static VOID uncore_Read_Data(PVOID data_buffer)
+{
+	U32 event_id = 0;
+	U64 *data;
+	int data_index;
+	U32 data_val = 0;
+	U32 data_reg = 0;
+	U64 total_count = 0;
+	U32 event_index = 0;
+
+	if (GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_UNINITIALIZED
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_IDLE
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_RESERVED
+	    || GLOBAL_STATE_current_phase(driver_state) ==
+	    DRV_STATE_PREPARE_STOP
+	    || GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_STOPPED) {
+		SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n");
+		return;
+	}
+
+	data = data_buffer;
+	data_index = 0;
+
+	preempt_disable();
+
+	/* Write GroupID*/
+	data[data_index] = 1;
+	/* Increment the data index as the event id starts from zero*/
+	data_index++;
+
+	FOR_EACH_PCI_REG_RAW(pecb, i, dev_idx) {
+		event_id = ECB_entries_event_id_index_local(pecb, i);
+
+		if (ECB_entries_reg_type(pecb, i) == CCCR) {
+			write_To_Register(ECB_entries_bus_no(pecb, i),
+					  ECB_entries_dev_no(pecb, i),
+					  ECB_entries_func_no(pecb, i),
+					  counter_port_id,
+					  SOC_COUNTER_WRITE_OP_CODE,
+					  ECB_entries_pci_id_offset(pecb, i),
+					  (ULONG) SOC_UNCORE_SAMPLE_DATA);
+
+			data_reg = i + ECB_cccr_pop(pecb);
+			if (ECB_entries_reg_type(pecb, data_reg) == DATA) {
+				read_From_Register(ECB_entries_bus_no
+						   (pecb, data_reg),
+						   ECB_entries_dev_no(pecb,
+								      data_reg),
+						   ECB_entries_func_no(pecb,
+								       data_reg),
+						   counter_port_id,
+						   SOC_COUNTER_READ_OP_CODE,
+						   ECB_entries_pci_id_offset
+						   (pecb, data_reg), &data_val);
+				if (data_val <
+				    pcb[0].last_uncore_count[event_index]) {
+					counter_overflow[event_index]++;
+				}
+				pcb[0].last_uncore_count[event_index] =
+				    data_val;
+				total_count =
+				    data_val +
+				    counter_overflow[event_index] *
+				    UNCORE_MAX_COUNT;
+				event_index++;
+				data[data_index + event_id] = total_count;
+			}
+		}
+
+	}
+	END_FOR_EACH_PCI_REG_RAW;
+
+	preempt_enable();
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Create_Mem()
+ *
+ * @param    None
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the counters
+ *
+ */
+static VOID uncore_Create_Mem(U32 memory_size, U64 * trace_buffer)
+{
+	ECB pecb;
+	DRV_PCI_DEVICE_ENTRY dpden;
+	U32 bar_name = 0;
+	DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL;
+	U64 mmio_offset = 0;
+	U32 dev_index = 0;
+	U32 data_val = 0;
+	U32 reg_index = 0;
+	U64 physical_high = 0;
+	U64 odla_physical_address = 0;
+
+	if (device_uncore == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n");
+		return;
+	}
+	pecb = (ECB) LWPMU_DEVICE_PMU_register_data(device_uncore)[0];
+	if (pecb == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: null pecb!\n");
+		return;
+	}
+
+	if (!trace_buffer) {
+		return;
+	}
+
+	ECB_pcidev_entry_list(pecb) =
+	    (DRV_PCI_DEVICE_ENTRY) ((S8 *) pecb + ECB_pcidev_list_offset(pecb));
+	dpden = ECB_pcidev_entry_list(pecb);
+
+	for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); dev_index++) {
+		curr_pci_entry = &dpden[dev_index];
+		bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry);
+		mmio_offset =
+		    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(curr_pci_entry);
+
+		if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == UNC_MEMORY
+		    && DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) ==
+		    UNC_PCICFG && bar_name == UNC_SOCPCI
+		    && DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) ==
+		    UNC_OP_WRITE) {
+			read_From_Register(DRV_PCI_DEVICE_ENTRY_bus_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_dev_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_func_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_port_id
+					   (curr_pci_entry),
+					   SOC_COUNTER_READ_OP_CODE,
+					   DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					   (curr_pci_entry), &data_val);
+			if (reg_index == 1) {
+				odla_physical_address = data_val;
+			} else if (reg_index == 2) {
+				physical_high = data_val;
+				odla_physical_address =
+				    odla_physical_address | (physical_high <<
+							     32);
+			}
+			SOCPERF_PRINT_DEBUG("op=%d port=%d offset=%x val=%x\n",
+					    DRV_PCI_DEVICE_ENTRY_op_code
+					    (curr_pci_entry),
+					    DRV_PCI_DEVICE_ENTRY_port_id
+					    (curr_pci_entry),
+					    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					    (curr_pci_entry), data_val);
+			reg_index++;
+		}
+		continue;
+	}
+	SOCPERF_PRINT_DEBUG("Physical Address=%llx\n", odla_physical_address);
+	if (odla_physical_address) {
+		trace_virtual_address =
+		    (U64) (UIOP) ioremap_nocache(odla_physical_address,
+						 1024 * sizeof(U64));
+		SOCPERF_PRINT_DEBUG("PHY=%llx ODLA VIRTUAL ADDRESS=%llx\n",
+				    odla_physical_address,
+				    trace_virtual_address);
+		if (trace_buffer) {
+			*trace_buffer = odla_physical_address;
+		}
+	}
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Check_Status()
+ *
+ * @param    None
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the counters
+ *
+ */
+static VOID uncore_Check_Status(U64 * trace_buffer, U32 * num_entries)
+{
+	U32 dev_index = 0;
+	ECB pecb;
+	DRV_PCI_DEVICE_ENTRY dpden;
+	U32 bar_name = 0;
+	DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL;
+	U64 mmio_offset = 0;
+	U32 data_val = 0;
+	U32 data_index = 0;
+
+	if (device_uncore == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n");
+		return;
+	}
+	pecb = (ECB) LWPMU_DEVICE_PMU_register_data(device_uncore)[0];
+	if (pecb == NULL) {
+		SOCPERF_PRINT_ERROR("ERROR: null pecb!\n");
+		return;
+	}
+	if (!trace_buffer) {
+		return;
+	}
+
+	ECB_pcidev_entry_list(pecb) =
+	    (DRV_PCI_DEVICE_ENTRY) ((S8 *) pecb + ECB_pcidev_list_offset(pecb));
+	dpden = ECB_pcidev_entry_list(pecb);
+
+	for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); dev_index++) {
+		curr_pci_entry = &dpden[dev_index];
+		bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry);
+		mmio_offset =
+		    DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(curr_pci_entry);
+
+		if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == UNC_STATUS
+		    && DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) ==
+		    UNC_PCICFG && bar_name == UNC_SOCPCI
+		    && DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) ==
+		    UNC_OP_READ) {
+			read_From_Register(DRV_PCI_DEVICE_ENTRY_bus_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_dev_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_func_no
+					   (curr_pci_entry),
+					   DRV_PCI_DEVICE_ENTRY_port_id
+					   (curr_pci_entry),
+					   SOC_COUNTER_READ_OP_CODE,
+					   DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio
+					   (curr_pci_entry), &data_val);
+			SOCPERF_PRINT_DEBUG("TRACE STATUS=%x\n", data_val);
+			trace_buffer[data_index] = data_val;
+			data_index++;
+			continue;
+		}
+	}
+
+	if (num_entries) {
+		*num_entries = data_index;
+	}
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn uncore_Read_Mem()
+ *
+ * @param    None
+ *
+ * @return   None     No return needed
+ *
+ * @brief    Read the counters
+ *
+ */
+static VOID
+uncore_Read_Mem(U64 start_address, U64 * trace_buffer, U32 num_entries)
+{
+	U32 data_index = 0;
+	U32 data_value = 0;
+
+	if (num_entries == 0 || !trace_buffer) {
+		return;
+	}
+	SOCPERF_PRINT_DEBUG
+	    ("Reading memory for num_entries=%d from address=%llx\n",
+	     num_entries, trace_virtual_address);
+	for (data_index = 0; data_index < num_entries; data_index++) {
+		if (trace_virtual_address) {
+			data_value =
+			    readl((U64 *) (UIOP) trace_virtual_address +
+				  data_index);
+
+			SOCPERF_PRINT_DEBUG("DATA VALUE=%llx\n", data_value);
+			*(trace_buffer + data_index) = data_value;
+		}
+	}
+
+	return;
+}
+
+/*
+ * Initialize the dispatch table
+ */
+DISPATCH_NODE soc_uncore_dispatch = {
+	uncore_Initialize,	/* initialize*/
+	NULL,			/* destroy*/
+	uncore_Write_PMU,	/* write*/
+	uncore_Disable_PMU,	/* freeze*/
+	NULL,			/* restart*/
+	uncore_Read_PMU_Data,	/* read*/
+	NULL,			/* check for overflow*/
+	NULL,
+	NULL,
+	uncore_Clean_Up,
+	NULL,
+	NULL,
+	NULL,
+	uncore_Read_Counts,	/* read counts*/
+	NULL,
+	NULL,
+	NULL,
+	uncore_Trigger_Read,
+	uncore_Read_Data,
+	uncore_Create_Mem,
+	uncore_Check_Status,
+	uncore_Read_Mem,
+	uncore_Stop_Mem
+};
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/soc_uncore.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/soc_uncore.h
new file mode 100644
index 0000000..51e122c
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/soc_uncore.h
@@ -0,0 +1,91 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2013-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _SOC_UNCORE_H_INC_
+#define _SOC_UNCORE_H_INC_
+
+/*
+ * Local to this architecture: SoC uncore unit
+ *
+ */
+#define SOC_UNCORE_DESKTOP_DID                 0x000C04
+#define SOC_UNCORE_NEXT_ADDR_OFFSET            4
+#define SOC_UNCORE_BAR_ADDR_SHIFT              32
+#define SOC_UNCORE_BAR_ADDR_MASK               0x000FFFC00000LL
+#define SOC_UNCORE_MAX_PCI_DEVICES             16
+#define SOC_UNCORE_MCR_REG_OFFSET              0xD0
+#define SOC_UNCORE_MDR_REG_OFFSET              0xD4
+#define SOC_UNCORE_MCRX_REG_OFFSET             0xD8
+#define SOC_UNCORE_BYTE_ENABLES                0xF
+#define SOC_UNCORE_OP_CODE_SHIFT               24
+#define SOC_UNCORE_PORT_ID_SHIFT               16
+#define SOC_UNCORE_OFFSET_HI_MASK              0xFFFFFF00
+#define SOC_UNCORE_OFFSET_LO_MASK              0xFF
+#define SOC_COUNTER_PORT_ID                    23
+#define SOC_COUNTER_WRITE_OP_CODE              1
+#define SOC_COUNTER_READ_OP_CODE               0
+#define UNCORE_MAX_COUNTERS                    8
+#define UNCORE_MAX_COUNT                       0x00000000FFFFFFFFLL
+
+#define SOC_UNCORE_OTHER_BAR_MMIO_PAGE_SIZE    4096
+#define SOC_UNCORE_SAMPLE_DATA                 0x00020000
+#define SOC_UNCORE_STOP                        0x00040000
+#define SOC_UNCORE_CTRL_REG_OFFSET             0x0
+
+extern DISPATCH_NODE soc_uncore_dispatch;
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/socperfdrv.c b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/socperfdrv.c
new file mode 100644
index 0000000..bd6cd72
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/socperfdrv.c
@@ -0,0 +1,1382 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#include "lwpmudrv_defines.h"
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/page.h>
+#include <linux/cdev.h>
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h>
+#include <linux/device.h>
+#include <asm/uaccess.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <asm/unistd.h>
+#include <linux/compat.h>
+
+#include "lwpmudrv_types.h"
+#include "rise_errors.h"
+#include "lwpmudrv_ecb.h"
+#include "lwpmudrv_ioctl.h"
+#include "lwpmudrv_struct.h"
+#include "ecb_iterators.h"
+
+#include "socperfdrv.h"
+#include "control.h"
+#include "soc_uncore.h"
+
+MODULE_AUTHOR("Copyright(c) 2007-2014 Intel Corporation");
+MODULE_VERSION(SOCPERF_NAME "_" SOCPERF_VERSION_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+
+typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE;
+typedef LWPMU_DEV_NODE *LWPMU_DEV;
+
+struct LWPMU_DEV_NODE_S {
+	long buffer;
+	struct semaphore sem;
+	struct cdev cdev;
+};
+
+#define LWPMU_DEV_buffer(dev)      (dev)->buffer
+#define LWPMU_DEV_sem(dev)         (dev)->sem
+#define LWPMU_DEV_cdev(dev)        (dev)->cdev
+
+/* Global variables of the driver */
+SOCPERF_VERSION_NODE drv_version;
+U64 *read_unc_ctr_info = NULL;
+DISPATCH dispatch_uncore = NULL;
+EVENT_CONFIG global_ec = NULL;
+volatile S32 abnormal_terminate = 0;
+LWPMU_DEV socperf_control = NULL;
+
+LWPMU_DEVICE device_uncore = NULL;
+CPU_STATE pcb = NULL;
+size_t pcb_size = 0;
+
+#if defined(DRV_USE_UNLOCKED_IOCTL)
+static struct mutex ioctl_lock;
+#endif
+
+#define  PMU_DEVICES            2	/* pmu, mod*/
+#define  OTHER_PMU_DEVICES      1	/* mod*/
+
+static dev_t lwpmu_DevNum;	/* the major and minor parts for SOCPERF base */
+
+#if defined (DRV_ANDROID) || defined (DRV_CHROMEOS)
+static struct class *pmu_class = NULL;
+#endif
+
+#if defined (DRV_ANDROID)
+#define DRV_DEVICE_DELIMITER "_"
+#elif defined (DRV_CHROMEOS)
+#define DRV_DEVICE_DELIMITER "/"
+#endif
+
+#if !defined(DRV_USE_UNLOCKED_IOCTL)
+#define MUTEX_INIT(lock)
+#define MUTEX_LOCK(lock)
+#define MUTEX_UNLOCK(lock)
+#else
+#define MUTEX_INIT(lock)     mutex_init(&(lock));
+#define MUTEX_LOCK(lock)     mutex_lock(&(lock))
+#define MUTEX_UNLOCK(lock)   mutex_unlock(&(lock))
+#endif
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static OS_STATUS lwpmudrv_Initialize_State(void)
+ *
+ * @param none
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Allocates the memory needed at load time.  Initializes all the
+ * @brief  necessary state variables with the default values.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Initialize_State(VOID)
+{
+	/*
+	 *  Machine Initializations
+	 *  Abstract this information away into a separate entry point
+	 *
+	 *  Question:  Should we allow for the use of Hot-cpu
+	 *    add/subtract functionality while the driver is executing?
+	 */
+
+	GLOBAL_STATE_num_cpus(driver_state) = num_online_cpus();
+	GLOBAL_STATE_active_cpus(driver_state) = num_online_cpus();
+	GLOBAL_STATE_cpu_count(driver_state) = 0;
+	GLOBAL_STATE_dpc_count(driver_state) = 0;
+	GLOBAL_STATE_num_em_groups(driver_state) = 0;
+	GLOBAL_STATE_current_phase(driver_state) = DRV_STATE_UNINITIALIZED;
+
+	SOCPERF_PRINT_DEBUG
+	    ("lwpmudrv_Initialize_State: num_cpus=%d, active_cpus=%d\n",
+	     GLOBAL_STATE_num_cpus(driver_state),
+	     GLOBAL_STATE_active_cpus(driver_state));
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn       VOID UTILITY_Configure_CPU
+ *
+ * @brief    Reads the CPU information from the hardware
+ *
+ * @param    param   dispatch_id -  The id of the dispatch table.
+ *
+ * @return   Pointer to the correct dispatch table for the CPU architecture
+ *
+ * <I>Special Notes:</I>
+ *              <NONE>
+ */
+extern DISPATCH UTILITY_Configure_CPU(U32 dispatch_id)
+{
+	DISPATCH dispatch = NULL;
+	switch (dispatch_id) {
+	case 700:
+		SOCPERF_PRINT_DEBUG("Set up the SOC Uncore dispatch table\n");
+		dispatch = &soc_uncore_dispatch;
+		break;
+
+	default:
+		dispatch = NULL;
+		SOCPERF_PRINT_ERROR
+		    ("Architecture not supported (dispatch_id=%d)\n",
+		     dispatch_id);
+		break;
+	}
+
+	return dispatch;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn       VOID SOCPERF_Read_Data
+ *
+ * @brief    Reads counter data
+ *
+ * @param    param   data_buffer - buffer for reading counter data.
+ *
+ * @return  None
+ *
+ * <I>Special Notes:</I>
+ *              <NONE>
+ */
+extern VOID SOCPERF_Read_Data(PVOID data_buffer)
+{
+	if (dispatch_uncore && dispatch_uncore->read_current_data) {
+		smp_call_function_single(0, dispatch_uncore->read_current_data,
+					 data_buffer, 1);
+	}
+	SOCPERF_PRINT_DEBUG("SOCPERF_Read_Data called\n");
+	return;
+}
+
+EXPORT_SYMBOL(SOCPERF_Read_Data);
+
+/*********************************************************************
+ *  Internal Driver functions
+ *     Should be called only from the lwpmudrv_DeviceControl routine
+ *********************************************************************/
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg)
+ *
+ * @param arg - pointer to the IOCTL_ARGS structure
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Local function that handles the LWPMU_IOCTL_VERSION call.
+ * @brief  Returns the version number of the kernel mode sampling.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg)
+{
+	OS_STATUS status;
+
+	/* Check if enough space is provided for collecting the data*/
+	if ((arg->r_len != sizeof(U32)) || (arg->r_buf == NULL)) {
+		return OS_FAULT;
+	}
+
+	status =
+	    put_user(SOCPERF_VERSION_NODE_socperf_version(&drv_version),
+		     (U32 *) arg->r_buf);
+
+	return status;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static VOID lwpmudrv_Clean_Up(DRV_BOOL)
+ *
+ * @param  DRV_BOOL finish - Flag to call finish
+ *
+ * @return VOID
+ *
+ * @brief  Cleans up the memory allocation.
+ *
+ * <I>Special Notes</I>
+ */
+static VOID lwpmudrv_Clean_Up(DRV_BOOL finish)
+{
+	U32 i = 0;
+/*
+    if (PMU_register_data) {
+        for (i = 0; i < GLOBAL_STATE_num_em_groups(driver_state); i++) {
+            CONTROL_Free_Memory(PMU_register_data[i]);
+        }
+    }
+*/
+	if (dispatch_uncore && dispatch_uncore->clean_up) {
+		dispatch_uncore->clean_up((VOID *) & i);
+	}
+
+	if (device_uncore) {
+		EVENT_CONFIG ec;
+		if (LWPMU_DEVICE_PMU_register_data(device_uncore)) {
+			ec = LWPMU_DEVICE_ec(device_uncore);
+			for (i = 0; i < EVENT_CONFIG_num_groups_unc(ec); i++) {
+				CONTROL_Free_Memory
+				    (LWPMU_DEVICE_PMU_register_data
+				     (device_uncore)[i]);
+			}
+		}
+		LWPMU_DEVICE_pcfg(device_uncore) =
+		    CONTROL_Free_Memory(LWPMU_DEVICE_pcfg(device_uncore));
+		LWPMU_DEVICE_ec(device_uncore) =
+		    CONTROL_Free_Memory(LWPMU_DEVICE_ec(device_uncore));
+		device_uncore = CONTROL_Free_Memory(device_uncore);
+	}
+
+	pcb = CONTROL_Free_Memory(pcb);
+	pcb_size = 0;
+	GLOBAL_STATE_num_em_groups(driver_state) = 0;
+	GLOBAL_STATE_num_descriptors(driver_state) = 0;
+
+	return;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static OS_STATUS lwpmudrv_Initialize_Uncore(PVOID in_buf, U32 in_buf_len)
+ *
+ * @param  in_buf       - pointer to the input buffer
+ * @param  in_buf_len   - size of the input buffer
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Local function that handles the LWPMU_IOCTL_INIT call.
+ * @brief  Sets up the interrupt handler.
+ * @brief  Set up the output buffers/files needed to make the driver
+ * @brief  operational.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Initialize_Uncore(PVOID in_buf, U32 in_buf_len)
+{
+	DRV_CONFIG pcfg_unc;
+
+	SOCPERF_PRINT_DEBUG("Entered lwpmudrv_Initialize_UNC\n");
+	cmpxchg(&GLOBAL_STATE_current_phase(driver_state),
+		DRV_STATE_UNINITIALIZED, DRV_STATE_IDLE);
+
+	if (GLOBAL_STATE_current_phase(driver_state) != DRV_STATE_IDLE) {
+		SOCPERF_PRINT_ERROR("OS_IN_PROGRESS error!\n");
+		return OS_IN_PROGRESS;
+	}
+	/*
+	 *   Program State Initializations:
+	 *   Foreach device, copy over pcfg_unc and configure dispatch table
+	 */
+	if (in_buf == NULL) {
+		SOCPERF_PRINT_ERROR("in_buff ERROR!\n");
+		return OS_FAULT;
+	}
+	if (in_buf_len != sizeof(DRV_CONFIG_NODE)) {
+		SOCPERF_PRINT_ERROR("Got in_buf_len=%d, expecting size=%d\n",
+				    in_buf_len, (int)sizeof(DRV_CONFIG_NODE));
+		return OS_FAULT;
+	}
+
+	device_uncore = CONTROL_Allocate_Memory(sizeof(LWPMU_DEVICE_NODE));
+	if (!device_uncore) {
+		SOCPERF_PRINT_ERROR
+		    ("Memory allocation failure for device_uncore!\n");
+		return OS_NO_MEM;
+	}
+	pcb_size = GLOBAL_STATE_num_cpus(driver_state) * sizeof(CPU_STATE_NODE);
+	pcb = CONTROL_Allocate_Memory(pcb_size);
+	if (!pcb) {
+		SOCPERF_PRINT_ERROR("Memory allocation failure for pcb!\n");
+		return OS_NO_MEM;
+	}
+	/* allocate memory*/
+	LWPMU_DEVICE_pcfg(device_uncore) =
+	    CONTROL_Allocate_Memory(sizeof(DRV_CONFIG_NODE));
+	if (!LWPMU_DEVICE_pcfg(device_uncore)) {
+		SOCPERF_PRINT_ERROR
+		    ("Memory allocation failure for LWPMU_DEVICE_pcfg(device_uncore)!\n");
+		return OS_NO_MEM;
+	}
+	/* copy over pcfg_unc*/
+	if (copy_from_user
+	    (LWPMU_DEVICE_pcfg(device_uncore), in_buf, in_buf_len)) {
+		SOCPERF_PRINT_ERROR("Failed to copy from user");
+		return OS_FAULT;
+	}
+	/* configure dispatch from dispatch_id*/
+	pcfg_unc = (DRV_CONFIG) LWPMU_DEVICE_pcfg(device_uncore);
+
+	LWPMU_DEVICE_dispatch(device_uncore) =
+	    UTILITY_Configure_CPU(DRV_CONFIG_dispatch_id(pcfg_unc));
+	if (LWPMU_DEVICE_dispatch(device_uncore) == NULL) {
+		SOCPERF_PRINT_ERROR("Unable to configure CPU");
+		return OS_FAULT;
+	}
+
+	LWPMU_DEVICE_em_groups_count(device_uncore) = 0;
+	LWPMU_DEVICE_cur_group(device_uncore) = 0;
+	SOCPERF_PRINT("SocPerf Driver Config : uncore dispatch id   = %d\n",
+		      DRV_CONFIG_dispatch_id(pcfg_unc));
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static OS_STATUS socperf_Terminate(void)
+ *
+ * @param  none
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Local function that handles the LWPMUDRV_IOCTL_TERMINATE call.
+ * @brief  Cleans up the interrupt handler and resets the PMU state.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS socperf_Terminate(VOID)
+{
+	U32 previous_state;
+
+	if (GLOBAL_STATE_current_phase(driver_state) == DRV_STATE_UNINITIALIZED) {
+		return OS_SUCCESS;
+	}
+
+	previous_state = cmpxchg(&GLOBAL_STATE_current_phase(driver_state),
+				 DRV_STATE_STOPPED, DRV_STATE_UNINITIALIZED);
+	if (previous_state != DRV_STATE_STOPPED) {
+		SOCPERF_PRINT_ERROR
+		    ("socperf_Terminate: Sampling is in progress, cannot terminate.\n");
+		return OS_IN_PROGRESS;
+	}
+
+	GLOBAL_STATE_current_phase(driver_state) = DRV_STATE_UNINITIALIZED;
+	lwpmudrv_Clean_Up(TRUE);
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Trigger_Read(void)
+ *
+ * @param - none
+ *
+ * @return - OS_STATUS
+ *
+ * @brief Read the Counter Data.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Trigger_Read(VOID)
+{
+	dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore);
+	if (dispatch_uncore && dispatch_uncore->trigger_read) {
+		dispatch_uncore->trigger_read();
+	}
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Init_PMU(void)
+ *
+ * @param - none
+ *
+ * @return - OS_STATUS
+ *
+ * @brief Initialize the PMU and the driver state in preparation for data collection.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Init_PMU(VOID)
+{
+	U32 i = 0;
+
+	if (GLOBAL_STATE_current_phase(driver_state) != DRV_STATE_IDLE) {
+		return OS_IN_PROGRESS;
+	}
+	dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore);
+	if (dispatch_uncore && dispatch_uncore->write) {
+		dispatch_uncore->write((VOID *) & i);
+	}
+	SOCPERF_PRINT_DEBUG
+	    ("lwpmudrv_Init_PMU: IOCTL_Init_PMU - finished initial Write\n");
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg)
+ *
+ * @param arg - pointer to the IOCTL_ARGS structure
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Set the number of em groups in the global state node.
+ * @brief  Also, copy the EVENT_CONFIG struct that has been passed in,
+ * @brief  into a global location for now.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Set_EM_Config_Uncore(IOCTL_ARGS arg)
+{
+	EVENT_CONFIG ec;
+	SOCPERF_PRINT_DEBUG("enter lwpmudrv_Set_EM_Config_UNC\n");
+	if (GLOBAL_STATE_current_phase(driver_state) != DRV_STATE_IDLE) {
+		return OS_IN_PROGRESS;
+	}
+
+	if (arg->w_buf == NULL || arg->w_len == 0) {
+		return OS_INVALID;
+	}
+	/* allocate memory*/
+	LWPMU_DEVICE_ec(device_uncore) =
+	    CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE));
+	if (!LWPMU_DEVICE_ec(device_uncore)) {
+		SOCPERF_PRINT_ERROR
+		    ("Memory allocation failure for LWPMU_DEVICE_ec(device_uncore)!\n");
+		return OS_NO_MEM;
+	}
+	if (copy_from_user
+	    (LWPMU_DEVICE_ec(device_uncore), arg->w_buf, arg->w_len)) {
+		return OS_FAULT;
+	}
+	/* configure num_groups from ec of the specific device*/
+	ec = (EVENT_CONFIG) LWPMU_DEVICE_ec(device_uncore);
+	LWPMU_DEVICE_PMU_register_data(device_uncore) =
+	    CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups_unc(ec) *
+				    sizeof(VOID *));
+	if (!LWPMU_DEVICE_PMU_register_data(device_uncore)) {
+		SOCPERF_PRINT_ERROR
+		    ("Memory allocation failure for LWPMU_DEVICE_PMU_register_data(device_uncore)!\n");
+		return OS_NO_MEM;
+	}
+	LWPMU_DEVICE_em_groups_count(device_uncore) = 0;
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Configure_events_UNC(IOCTL_ARGS arg)
+ *
+ * @param arg - pointer to the IOCTL_ARGS structure
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Make a copy of the uncore registers that need to be programmed
+ * @brief  for the next event set used for event multiplexing
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS socperf_Configure_Events_Uncore(IOCTL_ARGS arg)
+{
+	VOID **PMU_register_data_unc;
+	S32 em_groups_count_unc;
+	ECB ecb;
+	EVENT_CONFIG ec_unc;
+	DRV_CONFIG pcfg_unc;
+	U32 group_id = 0;
+	ECB in_ecb = NULL;
+
+	if (GLOBAL_STATE_current_phase(driver_state) != DRV_STATE_IDLE) {
+		return OS_IN_PROGRESS;
+	}
+
+	em_groups_count_unc = LWPMU_DEVICE_em_groups_count(device_uncore);
+	PMU_register_data_unc = LWPMU_DEVICE_PMU_register_data(device_uncore);
+	ec_unc = LWPMU_DEVICE_ec(device_uncore);
+	pcfg_unc = LWPMU_DEVICE_pcfg(device_uncore);
+
+	if (em_groups_count_unc >= (S32) EVENT_CONFIG_num_groups_unc(ec_unc)) {
+		SOCPERF_PRINT_DEBUG
+		    ("Number of Uncore EM groups exceeded the initial configuration.");
+		return OS_SUCCESS;
+	}
+	if (arg->w_buf == NULL || arg->w_len == 0) {
+		return OS_FAULT;
+	}
+	/*       size is in w_len, data is pointed to by w_buf*/
+/*	*/
+	in_ecb = (ECB) arg->w_buf;
+	group_id = ECB_group_id(in_ecb);
+	PMU_register_data_unc[group_id] = CONTROL_Allocate_Memory(arg->w_len);
+	if (!PMU_register_data_unc[group_id]) {
+		SOCPERF_PRINT_ERROR("ECB memory allocation failed\n");
+		return OS_NO_MEM;
+	}
+/*	*/
+	/* Make a copy of the data for global use.*/
+/*	*/
+	if (copy_from_user
+	    (PMU_register_data_unc[group_id], arg->w_buf, arg->w_len)) {
+		SOCPERF_PRINT_ERROR("ECB copy failed\n");
+		return OS_NO_MEM;
+	}
+	/* at this point, we know the number of uncore events for this device,*/
+	/* so allocate the results buffer per thread for uncore only for event based uncore counting*/
+	if (em_groups_count_unc == 0) {
+		ecb = PMU_register_data_unc[0];
+		LWPMU_DEVICE_num_events(device_uncore) = ECB_num_events(ecb);
+	}
+	LWPMU_DEVICE_em_groups_count(device_uncore) = group_id + 1;
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS socperf_Start(void)
+ *
+ * @param none
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Local function that handles the LWPMU_IOCTL_START call.
+ * @brief  Set up the OS hooks for process/thread/load notifications.
+ * @brief  Write the initial set of MSRs.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS socperf_Start(VOID)
+{
+	OS_STATUS status = OS_SUCCESS;
+	U32 previous_state;
+	U32 i = 0;
+
+	/*
+	 * To Do: Check for state == STATE_IDLE and only then enable sampling
+	 */
+	previous_state = cmpxchg(&GLOBAL_STATE_current_phase(driver_state),
+				 DRV_STATE_IDLE, DRV_STATE_RUNNING);
+	if (previous_state != DRV_STATE_IDLE) {
+		SOCPERF_PRINT_ERROR
+		    ("socperf_Start: Unable to start sampling - State is %d\n",
+		     GLOBAL_STATE_current_phase(driver_state));
+		return OS_IN_PROGRESS;
+	}
+
+	if (dispatch_uncore && dispatch_uncore->restart) {
+		dispatch_uncore->restart((VOID *) & i);
+	}
+
+	return status;
+}
+
+/*
+ * @fn lwpmudrv_Prepare_Stop();
+ *
+ * @param        NONE
+ * @return       OS_STATUS
+ *
+ * @brief  Local function that handles the LWPMUDRV_IOCTL_STOP call.
+ * @brief  Cleans up the interrupt handler.
+ */
+static OS_STATUS socperf_Prepare_Stop(VOID)
+{
+	U32 i = 0;
+	U32 current_state = GLOBAL_STATE_current_phase(driver_state);
+
+	SOCPERF_PRINT_DEBUG("socperf_Prepare_Stop: About to stop sampling\n");
+	GLOBAL_STATE_current_phase(driver_state) = DRV_STATE_PREPARE_STOP;
+
+	if (current_state == DRV_STATE_UNINITIALIZED) {
+		return OS_SUCCESS;
+	}
+
+	if (dispatch_uncore && dispatch_uncore->freeze) {
+		dispatch_uncore->freeze((VOID *) & i);
+	}
+
+	return OS_SUCCESS;
+}
+
+/*
+ * @fn socperf_Finish_Stop();
+ *
+ * @param  NONE
+ * @return OS_STATUS
+ *
+ * @brief  Local function that handles the LWPMUDRV_IOCTL_STOP call.
+ * @brief  Cleans up the interrupt handler.
+ */
+static OS_STATUS socperf_Finish_Stop(VOID)
+{
+	OS_STATUS status = OS_SUCCESS;
+
+	GLOBAL_STATE_current_phase(driver_state) = DRV_STATE_STOPPED;
+
+	return status;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Read_Uncore_Counts(void out_buf, U32 out_buf_len)
+ *
+ * @param - out_buf       - output buffer
+ *          out_buf_len   - output buffer length
+ *
+ * @return - OS_STATUS
+ *
+ * @brief    Read the Counter Data.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Read_Uncore_Counts(PVOID out_buf, U32 out_buf_len)
+{
+	if (out_buf == NULL) {
+		SOCPERF_PRINT_ERROR
+		    ("lwpmudrv_Read_Uncore_Counts: counter buffer is NULL\n");
+		return OS_FAULT;
+	}
+
+	if (dispatch_uncore && dispatch_uncore->read_current_data) {
+		dispatch_uncore->read_current_data(out_buf);
+	}
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Create_Mem(IOCTL_ARGS arg)
+ *
+ * @param - none
+ *
+ * @return - OS_STATUS
+ *
+ * @brief Read the Counter Data.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Create_Mem(IOCTL_ARGS arg)
+{
+	U32 memory_size = 0;
+	U64 trace_phys_address = 0;
+
+	if (arg->w_buf == NULL || arg->w_len == 0) {
+		SOCPERF_PRINT_ERROR
+		    ("lwpmudrv_Create_Mem: Counter buffer is NULL\n");
+		return OS_FAULT;
+	}
+
+	if (copy_from_user(&memory_size, (U32 *) arg->w_buf, sizeof(U32))) {
+		return OS_FAULT;
+	}
+
+	if (arg->r_buf == NULL || arg->r_len == 0) {
+		SOCPERF_PRINT_ERROR
+		    ("lwpmudrv_Create_Mem: output buffer is NULL\n");
+		return OS_FAULT;
+	}
+	SOCPERF_PRINT_DEBUG("Read size=%llx\n", arg->r_len);
+	SOCPERF_PRINT_DEBUG("Write size=%llx\n", arg->w_len);
+	if (arg->r_len != sizeof(U64)) {
+		return OS_FAULT;
+	}
+
+	dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore);
+	if (dispatch_uncore && dispatch_uncore->create_mem) {
+		dispatch_uncore->create_mem(memory_size, &trace_phys_address);
+	} else {
+		SOCPERF_PRINT_ERROR("dispatch table could not be called\n");
+	}
+
+	if (copy_to_user(arg->r_buf, (void *)&trace_phys_address, sizeof(U64))) {
+		return OS_FAULT;
+	}
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Check_Status( IOCTL_ARGS arg)
+ *
+ * @param - none
+ *
+ * @return - OS_STATUS
+ *
+ * @brief Read the Counter Data.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Check_Status(IOCTL_ARGS arg)
+{
+	U32 num_entries = 0;
+	U64 *status_data = 0;
+
+	if ((arg->r_len == 0) || (arg->r_buf == NULL)) {
+		return OS_FAULT;
+	}
+
+	status_data = CONTROL_Allocate_Memory(arg->r_len);
+	if (dispatch_uncore && dispatch_uncore->check_status) {
+		dispatch_uncore->check_status(status_data, &num_entries);
+	}
+
+	if (copy_to_user
+	    (arg->r_buf, (void *)status_data, num_entries * sizeof(U64))) {
+		CONTROL_Free_Memory(status_data);
+		return OS_FAULT;
+	}
+	CONTROL_Free_Memory(status_data);
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static OS_STATUS lwpmudrv_Read_Mem( IOCTL_ARGS arg)
+ *
+ * @param - none
+ *
+ * @return - OS_STATUS
+ *
+ * @brief Read the Counter Data.
+ *
+ * <I>Special Notes</I>
+ */
+static OS_STATUS lwpmudrv_Read_Mem(IOCTL_ARGS arg)
+{
+	U64 start_address = 0;
+	U64 *mem_address = NULL;
+	U32 mem_size = 0;
+	U32 num_entries = 0;
+
+	if (arg->w_buf == NULL || arg->w_len == 0) {
+		SOCPERF_PRINT_ERROR
+		    ("lwpmudrv_Read_Mem: Counter buffer is NULL\n");
+		return OS_FAULT;
+	}
+
+	if (copy_from_user(&start_address, (U64 *) arg->w_buf, sizeof(U64))) {
+		return OS_FAULT;
+	}
+
+	if ((arg->r_len == 0) || (arg->r_buf == NULL)) {
+		return OS_FAULT;
+	}
+	mem_size = (U32) arg->r_len;
+	mem_address = CONTROL_Allocate_Memory(mem_size);
+	if (!mem_address) {
+		return OS_NO_MEM;
+	}
+
+	num_entries = (U32) (mem_size / sizeof(U64));
+	if (dispatch_uncore && dispatch_uncore->read_mem) {
+		dispatch_uncore->read_mem(start_address, mem_address,
+					  num_entries);
+	}
+	if (copy_to_user(arg->r_buf, (void *)mem_address, mem_size)) {
+		CONTROL_Free_Memory(mem_address);
+		return OS_FAULT;
+	}
+	CONTROL_Free_Memory(mem_address);
+
+	return OS_SUCCESS;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn static VOID lwpmudrv_Stop_Mem(void)
+ *
+ * @param - none
+ *
+ * @return - none
+ *
+ * @brief Stop Mem
+ *
+ * <I>Special Notes</I>
+ */
+extern VOID lwpmudrv_Stop_Mem(VOID)
+{
+	SOCPERF_PRINT_DEBUG("Entered lwpmudrv_Stop_Mem\n");
+
+	if (dispatch_uncore && dispatch_uncore->stop_mem) {
+		dispatch_uncore->stop_mem();
+	}
+
+	SOCPERF_PRINT_DEBUG("Exited lwpmudrv_Stop_Mem\n");
+
+	return;
+}
+
+/*******************************************************************************
+ *  External Driver functions - Open
+ *      This function is common to all drivers
+ *******************************************************************************/
+
+static int socperf_Open(struct inode *inode, struct file *filp)
+{
+	SOCPERF_PRINT_DEBUG("lwpmu_Open called on maj:%d, min:%d\n",
+			    imajor(inode), iminor(inode));
+	filp->private_data = container_of(inode->i_cdev, LWPMU_DEV_NODE, cdev);
+
+	return 0;
+}
+
+/*******************************************************************************
+ *  External Driver functions
+ *      These functions are registered into the file operations table that
+ *      controls this device.
+ *      Open, Close, Read, Write, Release
+ *******************************************************************************/
+
+static ssize_t
+socperf_Read(struct file *filp, char *buf, size_t count, loff_t * f_pos)
+{
+	unsigned long retval;
+
+	/* Transfering data to user space */
+	SOCPERF_PRINT_DEBUG("lwpmu_Read dispatched with count=%d\n",
+			    (S32) count);
+	if (copy_to_user(buf, &LWPMU_DEV_buffer(socperf_control), 1)) {
+		retval = OS_FAULT;
+		return retval;
+	}
+	/* Changing reading position as best suits */
+	if (*f_pos == 0) {
+		*f_pos += 1;
+		return 1;
+	}
+
+	return 0;
+}
+
+static ssize_t
+socperf_Write(struct file *filp, const char *buf, size_t count, loff_t * f_pos)
+{
+	unsigned long retval;
+
+	SOCPERF_PRINT_DEBUG("lwpmu_Write dispatched with count=%d\n",
+			    (S32) count);
+	if (copy_from_user
+	    (&LWPMU_DEV_buffer(socperf_control), buf + count - 1, 1)) {
+		retval = OS_FAULT;
+		return retval;
+	}
+
+	return 1;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  extern IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_NODE, filp, cmd, arg)
+ *
+ * @param   IOCTL_USE_INODE       - Used for pre 2.6.32 kernels
+ * @param   struct   file   *filp - file pointer
+ * @param   unsigned int     cmd  - IOCTL command
+ * @param   unsigned long    arg  - args to the IOCTL command
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Worker function that handles IOCTL requests from the user mode.
+ *
+ * <I>Special Notes</I>
+ */
+extern IOCTL_OP_TYPE
+lwpmu_Service_IOCTL(IOCTL_USE_INODE
+		    struct file *filp,
+		    unsigned int cmd, IOCTL_ARGS_NODE local_args)
+{
+	int status = OS_SUCCESS;
+
+	switch (cmd) {
+
+		/*
+		 * Common IOCTL commands
+		 */
+	case DRV_OPERATION_VERSION:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_VERSION\n");
+		status = lwpmudrv_Version(&local_args);
+		break;
+
+	case DRV_OPERATION_RESERVE:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_RESERVE\n");
+		break;
+
+	case DRV_OPERATION_INIT_PMU:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_PMU\n");
+		status = lwpmudrv_Init_PMU();
+		break;
+
+	case DRV_OPERATION_START:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_START\n");
+		status = socperf_Start();
+		break;
+
+	case DRV_OPERATION_STOP:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_STOP\n");
+		status = socperf_Prepare_Stop();
+		break;
+
+	case DRV_OPERATION_PAUSE:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_PAUSE\n");
+		break;
+
+	case DRV_OPERATION_RESUME:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_RESUME\n");
+		break;
+
+	case DRV_OPERATION_TERMINATE:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_TERMINATE\n");
+		status = socperf_Terminate();
+		break;
+
+	case DRV_OPERATION_INIT_UNCORE:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_UNCORE\n");
+		status =
+		    lwpmudrv_Initialize_Uncore(local_args.w_buf,
+					       local_args.w_len);
+		break;
+	case DRV_OPERATION_EM_GROUPS_UNCORE:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_EM_GROUPS_UNC\n");
+		status = lwpmudrv_Set_EM_Config_Uncore(&local_args);
+		break;
+
+	case DRV_OPERATION_EM_CONFIG_NEXT_UNCORE:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_EM_CONFIG_NEXT_UNC\n");
+		status = socperf_Configure_Events_Uncore(&local_args);
+		break;
+
+	case DRV_OPERATION_TIMER_TRIGGER_READ:
+		lwpmudrv_Trigger_Read();
+		break;
+
+	case DRV_OPERATION_READ_UNCORE_DATA:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_READ_UNCORE_DATA\n");
+		status =
+		    lwpmudrv_Read_Uncore_Counts(local_args.r_buf,
+						local_args.r_len);
+		break;
+
+	case DRV_OPERATION_CREATE_MEM:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_CREATE_MEM\n");
+		lwpmudrv_Create_Mem(&local_args);
+		break;
+
+	case DRV_OPERATION_READ_MEM:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_READ_MEM\n");
+		lwpmudrv_Read_Mem(&local_args);
+		break;
+
+	case DRV_OPERATION_CHECK_STATUS:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_CHECK_STATUS\n");
+		lwpmudrv_Check_Status(&local_args);
+		break;
+
+	case DRV_OPERATION_STOP_MEM:
+		SOCPERF_PRINT_DEBUG(" DRV_OPERATION_STOP_MEM\n");
+		lwpmudrv_Stop_Mem();
+		break;
+
+		/*
+		 * if none of the above, treat as unknown/illegal IOCTL command
+		 */
+	default:
+		SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d number:%d\n",
+				    _IOC_TYPE(cmd), _IOC_NR(cmd));
+		status = OS_ILLEGAL_IOCTL;
+		break;
+	}
+
+	if (cmd == DRV_OPERATION_STOP &&
+	    GLOBAL_STATE_current_phase(driver_state) ==
+	    DRV_STATE_PREPARE_STOP) {
+		status = socperf_Finish_Stop();
+	}
+
+	return status;
+}
+
+extern long
+lwpmu_Device_Control(IOCTL_USE_INODE
+		     struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int status = OS_SUCCESS;
+	IOCTL_ARGS_NODE local_args;
+
+#if !defined(DRV_USE_UNLOCKED_IOCTL)
+	SOCPERF_PRINT_DEBUG
+	    ("lwpmu_DeviceControl(0x%x) called on inode maj:%d, min:%d\n", cmd,
+	     imajor(inode), iminor(inode));
+#endif
+	SOCPERF_PRINT_DEBUG("type: %d, subcommand: %d\n", _IOC_TYPE(cmd),
+			    _IOC_NR(cmd));
+
+	if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) {
+		SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d\n", _IOC_TYPE(cmd));
+		return OS_ILLEGAL_IOCTL;
+	}
+
+	MUTEX_LOCK(ioctl_lock);
+	if (arg) {
+		status =
+		    copy_from_user(&local_args, (IOCTL_ARGS) arg,
+				   sizeof(IOCTL_ARGS_NODE));
+	}
+
+	status =
+	    lwpmu_Service_IOCTL(IOCTL_USE_INODE filp, _IOC_NR(cmd), local_args);
+	MUTEX_UNLOCK(ioctl_lock);
+
+	return status;
+}
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(DRV_EM64T)
+extern long
+lwpmu_Device_Control_Compat(struct file *filp,
+			    unsigned int cmd, unsigned long arg)
+{
+	int status = OS_SUCCESS;
+	IOCTL_COMPAT_ARGS_NODE local_args_compat;
+	IOCTL_ARGS_NODE local_args;
+
+	memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE));
+	SOCPERF_PRINT_DEBUG("Compat: type: %d, subcommand: %d\n",
+			    _IOC_TYPE(cmd), _IOC_NR(cmd));
+
+	if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) {
+		SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d\n", _IOC_TYPE(cmd));
+		return OS_ILLEGAL_IOCTL;
+	}
+
+	MUTEX_LOCK(ioctl_lock);
+	if (arg) {
+		status =
+		    copy_from_user(&local_args_compat, (IOCTL_COMPAT_ARGS) arg,
+				   sizeof(IOCTL_COMPAT_ARGS_NODE));
+	}
+	local_args.r_len = local_args_compat.r_len;
+	local_args.w_len = local_args_compat.w_len;
+	local_args.r_buf = (char *)compat_ptr(local_args_compat.r_buf);
+	local_args.w_buf = (char *)compat_ptr(local_args_compat.w_buf);
+
+	status = lwpmu_Service_IOCTL(filp, _IOC_NR(cmd), local_args);
+	MUTEX_UNLOCK(ioctl_lock);
+
+	return status;
+}
+#endif
+
+/*
+ * @fn        LWPMUDRV_Abnormal_Terminate(void)
+ *
+ * @brief     This routine is called from linuxos_Exit_Task_Notify if the user process has
+ *            been killed by an uncatchable signal (example kill -9).  The state variable
+ *            abormal_terminate is set to 1 and the clean up routines are called.  In this
+ *            code path the OS notifier hooks should not be unloaded.
+ *
+ * @param     None
+ *
+ * @return    OS_STATUS
+ *
+ * <I>Special Notes:</I>
+ *     <none>
+ */
+extern int LWPMUDRV_Abnormal_Terminate(void
+    )
+{
+	int status = OS_SUCCESS;
+
+	abnormal_terminate = 1;
+	SOCPERF_PRINT_DEBUG
+	    ("Abnormal-Termination: Calling socperf_Prepare_Stop\n");
+	status = socperf_Prepare_Stop();
+	SOCPERF_PRINT_DEBUG
+	    ("Abnormal-Termination: Calling socperf_Finish_Stop\n");
+	status = socperf_Finish_Stop();
+	SOCPERF_PRINT_DEBUG
+	    ("Abnormal-Termination: Calling lwpmudrv_Terminate\n");
+	status = socperf_Terminate();
+
+	return status;
+}
+
+/*****************************************************************************************
+ *
+ *   Driver Entry / Exit functions that will be called on when the driver is loaded and
+ *   unloaded
+ *
+ ****************************************************************************************/
+
+/*
+ * Structure that declares the usual file access functions
+ * First one is for lwpmu_c, the control functions
+ */
+static struct file_operations socperf_Fops = {
+	.owner = THIS_MODULE,
+	IOCTL_OP = lwpmu_Device_Control,
+#if defined(HAVE_COMPAT_IOCTL) && defined(DRV_EM64T)
+	.compat_ioctl = lwpmu_Device_Control_Compat,
+#endif
+	.read = socperf_Read,
+	.write = socperf_Write,
+	.open = socperf_Open,
+	.release = NULL,
+	.llseek = NULL,
+};
+
+/*!
+ * @fn  static int lwpmudrv_setup_cdev(dev, fops, dev_number)
+ *
+ * @param LWPMU_DEV               dev  - pointer to the device object
+ * @param struct file_operations *fops - pointer to the file operations struct
+ * @param dev_t                   dev_number - major/monor device number
+ *
+ * @return OS_STATUS
+ *
+ * @brief  Set up the device object.
+ *
+ * <I>Special Notes</I>
+ */
+static int
+lwpmu_setup_cdev(LWPMU_DEV dev, struct file_operations *fops, dev_t dev_number)
+{
+	cdev_init(&LWPMU_DEV_cdev(dev), fops);
+	LWPMU_DEV_cdev(dev).owner = THIS_MODULE;
+	LWPMU_DEV_cdev(dev).ops = fops;
+
+	return cdev_add(&LWPMU_DEV_cdev(dev), dev_number, 1);
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static int socperf_Load(void)
+ *
+ * @param none
+ *
+ * @return STATUS
+ *
+ * @brief  Load the driver module into the kernel.  Set up the driver object.
+ * @brief  Set up the initial state of the driver and allocate the memory
+ * @brief  needed to keep basic state information.
+ */
+static int socperf_Load(VOID)
+{
+	int num_cpus;
+	OS_STATUS status = OS_SUCCESS;
+
+	CONTROL_Memory_Tracker_Init();
+
+	/* Get one major device number and two minor numbers. */
+	/*   The result is formatted as major+minor(0) */
+	/*   One minor number is for control (lwpmu_c), */
+	/*   the other (lwpmu_m) is for modules */
+	SOCPERF_PRINT("SocPerf Driver loading...\n");
+	SOCPERF_PRINT("SocPerf Driver about to register chrdev...\n");
+
+	lwpmu_DevNum = MKDEV(0, 0);
+	status =
+	    alloc_chrdev_region(&lwpmu_DevNum, 0, PMU_DEVICES,
+				SOCPERF_DRIVER_NAME);
+	SOCPERF_PRINT("SocPerf Driver: result of alloc_chrdev_region is %d\n",
+		      status);
+	if (status < 0) {
+		SOCPERF_PRINT_ERROR
+		    ("SocPerf driver failed to alloc chrdev_region!\n");
+		return status;
+	}
+	SOCPERF_PRINT("SocPerf Driver: major number is %d\n",
+		      MAJOR(lwpmu_DevNum));
+	status = lwpmudrv_Initialize_State();
+	if (status < 0) {
+		SOCPERF_PRINT_ERROR
+		    ("SocPerf driver failed to initialize state!\n");
+		return status;
+	}
+	num_cpus = GLOBAL_STATE_num_cpus(driver_state);
+	SOCPERF_PRINT("SocPerf Driver: detected %d CPUs in lwpmudrv_Load\n",
+		      num_cpus);
+
+	/* Allocate memory for the control structures */
+	socperf_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE));
+
+	if (!socperf_control) {
+		CONTROL_Free_Memory(socperf_control);
+		return OS_NO_MEM;
+	}
+
+	/* Register the file operations with the OS */
+
+#if defined (DRV_ANDROID) || defined (DRV_CHROMEOS)
+	SOCPERF_PRINT("SocPerf Driver: ANDROID_DEVICE %s...\n",
+		      SOCPERF_DRIVER_NAME DRV_DEVICE_DELIMITER "c");
+	pmu_class = class_create(THIS_MODULE, SOCPERF_DRIVER_NAME);
+	if (IS_ERR(pmu_class)) {
+		SOCPERF_PRINT_ERROR
+		    ("Error registering SocPerf control class\n");
+	}
+	device_create(pmu_class, NULL, lwpmu_DevNum, NULL,
+		      SOCPERF_DRIVER_NAME DRV_DEVICE_DELIMITER "c");
+#endif
+
+	status = lwpmu_setup_cdev(socperf_control, &socperf_Fops, lwpmu_DevNum);
+	if (status) {
+		SOCPERF_PRINT_ERROR("Error %d adding lwpmu as char device\n",
+				    status);
+		return status;
+	}
+
+	MUTEX_INIT(ioctl_lock);
+
+	/*
+	 *  Initialize the SocPerf driver version (done once at driver load time)
+	 */
+	SOCPERF_VERSION_NODE_major(&drv_version) = SOCPERF_MAJOR_VERSION;
+	SOCPERF_VERSION_NODE_minor(&drv_version) = SOCPERF_MINOR_VERSION;
+	SOCPERF_VERSION_NODE_api(&drv_version) = SOCPERF_API_VERSION;
+/*	*/
+	/* Display driver version information*/
+/*	*/
+	SOCPERF_PRINT("SocPerf Driver v%d.%d.%d has been loaded.\n",
+		      SOCPERF_VERSION_NODE_major(&drv_version),
+		      SOCPERF_VERSION_NODE_minor(&drv_version),
+		      SOCPERF_VERSION_NODE_api(&drv_version));
+
+	return status;
+}
+
+/* ------------------------------------------------------------------------- */
+/*!
+ * @fn  static int lwpmu_Unload(void)
+ *
+ * @param none
+ *
+ * @return none
+ *
+ * @brief  Remove the driver module from the kernel.
+ */
+static VOID socperf_Unload(VOID)
+{
+	SOCPERF_PRINT("SocPerf Driver unloading...\n");
+
+	pcb = CONTROL_Free_Memory(pcb);
+	pcb_size = 0;
+
+#if defined (DRV_ANDROID) || defined (DRV_CHROMEOS)
+	unregister_chrdev(MAJOR(lwpmu_DevNum), SOCPERF_DRIVER_NAME);
+	device_destroy(pmu_class, lwpmu_DevNum);
+	device_destroy(pmu_class, lwpmu_DevNum + 1);
+#endif
+
+	cdev_del(&LWPMU_DEV_cdev(socperf_control));
+	unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES);
+
+#if defined (DRV_ANDROID) || defined (DRV_CHROMEOS)
+	class_destroy(pmu_class);
+#endif
+
+	socperf_control = CONTROL_Free_Memory(socperf_control);
+
+	CONTROL_Memory_Tracker_Free();
+
+/*	*/
+	/* Display driver version information*/
+/*	*/
+	SOCPERF_PRINT("SocPerf Driver v%d.%d.%d has been unloaded.\n",
+		      SOCPERF_VERSION_NODE_major(&drv_version),
+		      SOCPERF_VERSION_NODE_minor(&drv_version),
+		      SOCPERF_VERSION_NODE_api(&drv_version));
+
+	return;
+}
+
+/* Declaration of the init and exit functions */
+module_init(socperf_Load);
+module_exit(socperf_Unload);
diff --git a/drivers/external_drivers/drivers/socwatch/soc_perf_driver/socperfdrv.h b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/socperfdrv.h
new file mode 100644
index 0000000..a306fb8
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/soc_perf_driver/socperfdrv.h
@@ -0,0 +1,180 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  BSD LICENSE
+
+  Copyright(c) 2005-2014 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _SOCPERFDRV_H_
+#define _SOCPERFDRV_H_
+
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include "lwpmudrv_defines.h"
+#include "lwpmudrv_ecb.h"
+#include "lwpmudrv_types.h"
+#include "lwpmudrv_version.h"
+#include "lwpmudrv_struct.h"
+
+/*
+ * Print macros for driver messages
+ */
+
+#if defined(MYDEBUG)
+#define SOCPERF_PRINT_DEBUG(fmt,args...) { printk(KERN_INFO SOCPERF_MSG_PREFIX" [DEBUG] " fmt,##args); }
+#else
+#define SOCPERF_PRINT_DEBUG(fmt,args...) {;}
+#endif
+
+#define SOCPERF_PRINT(fmt,args...) { printk(KERN_INFO SOCPERF_MSG_PREFIX" " fmt,##args); }
+
+#define SOCPERF_PRINT_WARNING(fmt,args...) { printk(KERN_ALERT SOCPERF_MSG_PREFIX" [Warning] " fmt,##args); }
+
+#define SOCPERF_PRINT_ERROR(fmt,args...) { printk(KERN_CRIT SOCPERF_MSG_PREFIX" [ERROR] " fmt,##args); }
+
+/* Macro to return the thread group id*/
+#define GET_CURRENT_TGID() (current->tgid)
+
+#if defined(DRV_IA32) || defined(DRV_EM64T)
+#define OVERFLOW_ARGS  U64*, U64*
+#elif defined(DRV_IA64)
+#define OVERFLOW_ARGS  U64*, U64*, U64*, U64*, U64*, U64*
+#endif
+
+/*
+ *  Dispatch table for virtualized functions.
+ *  Used to enable common functionality for different
+ *  processor microarchitectures
+ */
+typedef struct DISPATCH_NODE_S DISPATCH_NODE;
+typedef DISPATCH_NODE *DISPATCH;
+
+struct DISPATCH_NODE_S {
+	VOID(*init) (PVOID);
+	VOID(*fini) (PVOID);
+	VOID(*write) (PVOID);
+	VOID(*freeze) (PVOID);
+	VOID(*restart) (PVOID);
+	VOID(*read_data) (PVOID);
+	VOID(*check_overflow) (DRV_MASKS);
+	VOID(*swap_group) (DRV_BOOL);
+	VOID(*read_lbrs) (PVOID);
+	VOID(*clean_up) (PVOID);
+	VOID(*hw_errata) (VOID);
+	VOID(*read_power) (PVOID);
+	U64(*check_overflow_errata) (ECB, U32, U64);
+	VOID(*read_counts) (PVOID, U32);
+	U64(*check_overflow_gp_errata) (ECB, U64 *);
+	VOID(*read_ro) (PVOID, U32, U32);
+	U64(*platform_info) (VOID);
+	VOID(*trigger_read) (VOID);	/* Counter reads triggered/initiated by User mode timer*/
+	VOID(*read_current_data) (PVOID);
+	VOID(*create_mem) (U32, U64 *);
+	VOID(*check_status) (U64 *, U32 *);
+	VOID(*read_mem) (U64, U64 *, U32);
+	VOID(*stop_mem) (VOID);
+};
+
+extern DISPATCH dispatch;
+
+extern VOID **PMU_register_data;
+extern VOID **desc_data;
+extern U64 *prev_counter_data;
+extern U64 *cur_counter_data;
+
+/*!
+ * @struct LWPMU_DEVICE_NODE_S
+ * @brief  Struct to hold fields per device
+ *           PMU_register_data_unc - MSR info
+ *           dispatch_unc          - dispatch table
+ *           em_groups_counts_unc  - # groups
+ *           pcfg_unc              - config struct
+ */
+typedef struct LWPMU_DEVICE_NODE_S LWPMU_DEVICE_NODE;
+typedef LWPMU_DEVICE_NODE *LWPMU_DEVICE;
+
+struct LWPMU_DEVICE_NODE_S {
+	VOID **PMU_register_data_unc;
+	DISPATCH dispatch_unc;
+	S32 em_groups_count_unc;
+	VOID *pcfg_unc;
+	U64 **acc_per_thread;
+	U64 **prev_val_per_thread;
+	U64 counter_mask;
+	U64 num_events;
+	U32 num_units;
+	VOID *ec;
+	S32 cur_group;
+};
+
+#define LWPMU_DEVICE_PMU_register_data(dev)   (dev)->PMU_register_data_unc
+#define LWPMU_DEVICE_dispatch(dev)            (dev)->dispatch_unc
+#define LWPMU_DEVICE_em_groups_count(dev)     (dev)->em_groups_count_unc
+#define LWPMU_DEVICE_pcfg(dev)                (dev)->pcfg_unc
+#define LWPMU_DEVICE_acc_per_thread(dev)      (dev)->acc_per_thread
+#define LWPMU_DEVICE_prev_val_per_thread(dev) (dev)->prev_val_per_thread
+#define LWPMU_DEVICE_counter_mask(dev)        (dev)->counter_mask
+#define LWPMU_DEVICE_num_events(dev)          (dev)->num_events
+#define LWPMU_DEVICE_num_units(dev)           (dev)->num_units
+#define LWPMU_DEVICE_ec(dev)                  (dev)->ec
+#define LWPMU_DEVICE_cur_group(dev)           (dev)->cur_group
+
+extern U32 num_devices;
+extern U32 cur_devices;
+extern LWPMU_DEVICE device_uncore;
+extern U64 *pmu_state;
+
+/* Handy macro*/
+#define TSC_SKEW(this_cpu)     (tsc_info[this_cpu] - tsc_info[0])
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/Kconfig b/drivers/external_drivers/drivers/socwatch/socwatch_driver/Kconfig
new file mode 100644
index 0000000..eccbcbc
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/Kconfig
@@ -0,0 +1,3 @@
+config INTEL_SOCWATCH_DRV
+	boolean "Intel SoCWatch driver support"
+	depends on X86_INTEL_MID=y
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/Makefile b/drivers/external_drivers/drivers/socwatch/socwatch_driver/Makefile
new file mode 100644
index 0000000..c8befc4
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/Makefile
@@ -0,0 +1,7 @@
+# By default, build for Android
+ccflags-y += -DDO_ANDROID -DDO_WAKELOCK_SAMPLE=1
+
+obj-$(CONFIG_INTEL_SOCWATCH_DRV) +=	\
+		pw_matrix.o	\
+		pw_output_buffer.o	\
+		apwr_driver.o
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/apwr_driver.c b/drivers/external_drivers/drivers/socwatch/socwatch_driver/apwr_driver.c
new file mode 100644
index 0000000..6099792
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/apwr_driver.c
@@ -0,0 +1,9002 @@
+/* ***********************************************************************************************
+
+   This file is provided under a dual BSD/GPLv2 license.  When using or
+   redistributing this file, you may do so under either license.
+
+   GPL LICENSE SUMMARY
+
+   Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of version 2 of the GNU General Public License as
+   published by the Free Software Foundation.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+   The full GNU General Public License is included in this distribution
+   in the file called LICENSE.GPL.
+
+   Contact Information:
+   SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+   BSD LICENSE
+
+   Copyright(c) 2013 Intel Corporation. All rights reserved.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+   * Neither the name of Intel Corporation nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+   ***********************************************************************************************
+   */
+
+/**
+ * apwr_driver.c: Prototype kernel module to trace the following
+ * events that are relevant to power:
+ *	- entry into a C-state
+ *	- change of processor frequency
+ *	- interrupts and timers
+ */
+
+#define MOD_AUTHOR "Gautam Upadhyaya <gautam.upadhyaya@intel.com>"
+#define MOD_DESC "Power driver for Piersol power tool. Adapted from Romain Cledat's codebase."
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/smp.h>		/* For smp_call_function*/
+
+#include <asm/local.h>
+#include <asm/cputime.h>	/* For ktime*/
+#include <asm/io.h>		/* For ioremap, read, and write*/
+
+#include <trace/events/timer.h>
+#include <trace/events/power.h>
+#include <trace/events/irq.h>
+#include <trace/events/sched.h>
+#include <trace/events/syscalls.h>
+struct pool_workqueue;		/* Get rid of warnings regarding trace_workqueue*/
+#include <trace/events/workqueue.h>
+
+#include <linux/hardirq.h>	/* for "in_interrupt"*/
+#include <linux/interrupt.h>	/* for "TIMER_SOFTIRQ, HRTIMER_SOFTIRQ"*/
+
+#include <linux/kallsyms.h>
+#include <linux/stacktrace.h>
+#include <linux/hash.h>
+#include <linux/poll.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/cpufreq.h>
+#include <linux/version.h>	/* for "LINUX_VERSION_CODE"*/
+#include <asm/unistd.h>		/* for "__NR_execve"*/
+#include <asm/delay.h>		/* for "udelay"*/
+#include <linux/suspend.h>	/* for "pm_notifier"*/
+#include <linux/pci.h>
+#include <linux/sfi.h>		/* To retrieve SCU F/W version*/
+
+#ifdef CONFIG_RPMSG_IPC
+#include <asm/intel_mid_rpmsg.h>
+#endif /* CONFIG_RPMSG_IPC*/
+/*
+#if DO_ANDROID
+    #include <asm/intel-mid.h>
+#endif
+*/
+#ifdef CONFIG_X86_INTEL_MID
+#include <asm/intel-mid.h>
+#endif /* CONFIG_X86_INTEL_MID*/
+
+#if DO_WAKELOCK_SAMPLE
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+#include <trace/events/wakelock.h>	/* Works for the custom kernel enabling wakelock tracepoint event*/
+#endif
+#endif
+
+#ifndef __arm__
+#include <asm/timer.h>		/* for "CYC2NS_SCALE_FACTOR"*/
+#endif
+
+#include "pw_lock_defs.h"
+#include "pw_mem.h"		/* internally includes "pw_lock_defs.h"*/
+#include "pw_data_structs.h"
+#include "pw_output_buffer.h"
+#include "pw_defines.h"
+#include "pw_matrix.h"
+
+/**** CONFIGURATION ****/
+
+typedef enum {
+	NON_ATOM = 0,
+	MFD,
+	LEX,
+	CLV
+} atom_arch_type_t;
+
+typedef enum {
+	NON_SLM = 0,
+	SLM_VLV2,
+	SLM_TNG,
+	SLM_ANN,
+	SLM_CHV,
+	SLM_BXT,
+} slm_arch_type_t;
+
+#define APWR_VERSION_CODE LINUX_VERSION_CODE
+
+static __read_mostly atom_arch_type_t pw_is_atm = NON_ATOM;
+static __read_mostly slm_arch_type_t pw_is_slm = NON_SLM;
+static __read_mostly bool pw_is_hsw = false;
+static __read_mostly bool pw_is_bdw = false;
+static __read_mostly bool pw_is_any_thread_set = false;
+static __read_mostly bool pw_is_auto_demote_enabled = false;
+static __read_mostly u16 pw_msr_fsb_freq_value = 0x0;
+static __read_mostly u16 pw_max_non_turbo_ratio = 0x0;	/* Highest non-turbo ratio i.e. TSC frequency*/
+static __read_mostly u16 pw_max_turbo_ratio = 0x0;	/* Highest turbo ratio i.e. "HFM"*/
+static __read_mostly u16 pw_max_efficiency_ratio = 0x0;	/* Lowest non-turbo (and non-thermal-throttled) ratio i.e. "LFM"*/
+
+__read_mostly u16 pw_scu_fw_major_minor = 0x0;
+
+/* Controls the amount of printks that happen. Levels are:
+ *	- 0: no output save for errors and status at end
+ *	- 1: single line for each hit (tracepoint, idle notifier...)
+ *	- 2: more details
+ *	- 3: user stack and kernel stack info
+ */
+static unsigned int verbosity = 0;
+
+module_param(verbosity, uint, 0);
+MODULE_PARM_DESC(verbosity,
+		 "Verbosity of output. From 0 to 3 with 3 the most verbose [default=0]");
+
+static bool do_force_module_scope_for_cpu_frequencies = false;
+module_param(do_force_module_scope_for_cpu_frequencies, bool, S_IRUSR);
+MODULE_PARM_DESC(do_force_module_scope_for_cpu_frequencies,
+		 "Toggle module scope for cpu frequencies. Sets \"affected_cpus\" and \"related_cpus\" of cpufreq_policy.");
+
+/*
+ * Controls whether we should be probing on
+ * syscall enters and exits.
+ * Useful for:
+ * (1) Fork <-> Exec issues.
+ * (2) Userspace <-> Kernelspace timer discrimination.
+ */
+static unsigned int probe_on_syscalls = 0;
+module_param(probe_on_syscalls, uint, 0);
+MODULE_PARM_DESC(probe_on_syscalls,
+		 "Should we probe on syscall enters and exits? 1 ==> YES, 0 ==> NO (Default NO)");
+
+/*
+ * For measuring collection times.
+ */
+static unsigned long startJIFF, stopJIFF;
+
+#define SUCCESS 0
+#define ERROR 1
+
+/*
+ * Compile-time flags -- these affect
+ * which parts of the driver get
+ * compiled in.
+ */
+/*
+ * Do we allow blocking reads?
+ */
+#define ALLOW_BLOCKING_READ 1
+/*
+ * Control whether the 'OUTPUT' macro is enabled.
+ * Set to: "1" ==> 'OUTPUT' is enabled.
+ *         "0" ==> 'OUTPUT' is disabled.
+ */
+/* #define DO_DEBUG_OUTPUT 0*/
+/*
+ * Control whether to output driver ERROR messages.
+ * These are independent of the 'OUTPUT' macro
+ * (which controls debug messages).
+ * Set to '1' ==> Print driver error messages (to '/var/log/messages')
+ *        '0' ==> Do NOT print driver error messages
+ */
+/* #define DO_PRINT_DRIVER_ERROR_MESSAGES 1*/
+/*
+ * Do we read the TSC MSR directly to determine
+ * TSC (as opposed to using a kernel
+ * function call -- e.g. rdtscll)?
+ */
+#define READ_MSR_FOR_TSC 1
+/*
+ * Do we support stats collection
+ * for the 'PW_IOCTL_STATUS' ioctl?
+ */
+#define DO_IOCTL_STATS 0
+/*
+ * Do we check if the special 'B0' MFLD
+ * microcode patch has been installed?
+ * '1' ==> YES, perform the check.
+ * '0' ==> NO, do NOT perform the check.
+ */
+#define DO_CHECK_BO_MICROCODE_PATCH 1
+/*
+ * Do we conduct overhead measurements?
+ * '1' == > YES, conduct measurements.
+ * '0' ==> NO, do NOT conduct measurements.
+ */
+#define DO_OVERHEAD_MEASUREMENTS 1
+/*
+ * Should we print some stats at the end of a collection?
+ * '1' ==> YES, print stats
+ * '0' ==> NO, do NOT print stats
+ */
+#define DO_PRINT_COLLECTION_STATS 0
+/*
+ * Do we keep track of IRQ # <--> DEV name mappings?
+ * '1' ==> YES, cache mappings.
+ * '0' ==> NO, do NOT cache mappings.
+ */
+#define DO_CACHE_IRQ_DEV_NAME_MAPPINGS 1
+/*
+ * Do we allow multiple device (names) to
+ * map to the same IRQ number? Setting
+ * to true makes the driver slower, if
+ * more accurate.
+ * '1' ==> YES, allow multi-device IRQs
+ * '0' ==> NO, do NOT allow.
+ */
+#define DO_ALLOW_MULTI_DEV_IRQ 0
+/*
+ * Do we use a constant poll for wakelock names?
+ * '1' ==> YES, use a constant pool.
+ * '0' ==> NO, do NOT use a constant pool.
+ */
+#define DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES 1
+/*
+ * Do we use APERF, MPERF for
+ * dynamic freq calculations?
+ * '1' ==> YES, use APERF, MPERF
+ * '0' ==> NO, use IA32_FIXED_CTR{1,2}
+ */
+#define USE_APERF_MPERF_FOR_DYNAMIC_FREQUENCY 1
+/*
+ * MSR used to toggle C-state auto demotions.
+ */
+#define AUTO_DEMOTE_MSR 0xe2
+/*
+ * Bit positions to toggle auto-demotion on NHM, ATM
+ */
+#define NHM_C3_AUTO_DEMOTE (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE (1UL << 26)
+#define ATM_C6_AUTO_DEMOTE (1UL << 25)
+#define AUTO_DEMOTE_FLAGS() ( pw_is_atm ? ATM_C6_AUTO_DEMOTE : (NHM_C3_AUTO_DEMOTE | NHM_C1_AUTO_DEMOTE) )
+#define IS_AUTO_DEMOTE_ENABLED(msr) ( pw_is_atm ? (msr) & ATM_C6_AUTO_DEMOTE : (msr) & (NHM_C3_AUTO_DEMOTE | NHM_C1_AUTO_DEMOTE) )
+/*
+ * PERF_STATUS MSR addr -- bits 12:8, multiplied by the
+ * bus clock freq, give the freq the H/W is currently
+ * executing at.
+ */
+#define IA32_PERF_STATUS_MSR_ADDR 0x198
+/*
+ * Do we use the cpufreq notifier
+ * for p-state transitions?
+ * Useful on MFLD, where the default
+ * TPF seems to be broken.
+ */
+#define DO_CPUFREQ_NOTIFIER 0
+/*
+ * Collect S state residency counters
+ */
+#define DO_S_RESIDENCY_SAMPLE 1
+/*
+ * Collect ACPI S3 state residency counters
+ */
+#define DO_ACPI_S3_SAMPLE 1
+/*
+ * Run the p-state sample generation in parallel for all CPUs
+ * at the beginning and the end to avoid any delay
+ * due to serial execution
+ */
+#define DO_GENERATE_CURRENT_FREQ_IN_PARALLEL 1
+/*
+ * Should we calculate TSC frequency using a sleep loop?
+ * Useful for debugging TSC frequency measurement issues.
+ * '1' ==> YES, calculate TSC frequency.
+ * '0' ==> NO, do NOT calculate TSC frequency.
+ */
+#define DO_DEBUG_TSC_FREQ_CALCULATION 0
+
+/*
+ * Compile-time constants and
+ * other macros.
+ */
+
+#define NUM_MAP_BUCKETS_BITS 9
+#define NUM_MAP_BUCKETS (1UL << NUM_MAP_BUCKETS_BITS)
+
+/* 32 locks for the hash table*/
+#define HASH_LOCK_BITS 5
+#define NUM_HASH_LOCKS (1UL << HASH_LOCK_BITS)
+#define HASH_LOCK_MASK (NUM_HASH_LOCKS - 1)
+
+#define HASH_LOCK(i) LOCK(hash_locks[(i) & HASH_LOCK_MASK])
+#define HASH_UNLOCK(i) UNLOCK(hash_locks[(i) & HASH_LOCK_MASK])
+
+#define NUM_TIMER_NODES_PER_BLOCK 20
+
+#define TIMER_HASH_FUNC(a) hash_ptr((void *)a, NUM_MAP_BUCKETS_BITS)
+
+/* Macro for printk based on verbosity */
+#if DO_DEBUG_OUTPUT
+#define OUTPUT(level, ...) do { if(unlikely(level <= verbosity)) printk(__VA_ARGS__); } while(0);
+#else
+#define OUTPUT(level, ...)
+#endif /* DO_DEBUG_OUTPUT*/
+/*
+ * Macro for driver error messages.
+ */
+#if DO_PRINT_DRIVER_ERROR_MESSAGES
+#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__)
+#else
+#define pw_pr_error(...)
+#endif
+
+#define CPU() (raw_smp_processor_id())
+#define RAW_CPU() (raw_smp_processor_id())
+#define TID() (current->pid)
+#define PID() (current->tgid)
+#define NAME() (current->comm)
+#define PKG(c) ( cpu_data(c).phys_proc_id )
+#define IT_REAL_INCR() (current->signal->it_real_incr.tv64)
+
+#define GET_BOOL_STRING(b) ( (b) ? "TRUE" : "FALSE" )
+
+#define BEGIN_IRQ_STATS_READ(p, c) do{		\
+    p = &per_cpu(irq_stat, (c));
+
+#define END_IRQ_STATS_READ(p, c)		\
+    }while(0)
+
+#define BEGIN_LOCAL_IRQ_STATS_READ(p) do{	\
+    p = &__get_cpu_var(irq_stat);
+
+#define END_LOCAL_IRQ_STATS_READ(p)		\
+    }while(0)
+
+/*
+ * For now, we limit kernel-space backtraces to 20 entries.
+ * This decision will be re-evaluated in the future.
+ */
+/* #define MAX_BACKTRACE_LENGTH 20*/
+#define MAX_BACKTRACE_LENGTH TRACE_LEN
+/*
+ * Is this a "root" timer?
+ */
+#if DO_PROBE_ON_SYSCALL_ENTER_EXIT
+#define IS_ROOT_TIMER(tid) ( (tid) == 0 || !is_tid_in_sys_list(tid) )
+#else
+#define IS_ROOT_TIMER(tid) ( (tid) == 0 )
+#endif /* DO_PROBE_ON_SYSCALL_ENTER_EXIT*/
+/*
+ * 64bit Compare-and-swap.
+ */
+#define CAS64(p, o, n) cmpxchg64((p), (o), (n)) == (o)
+/*
+ * Local compare-and-swap.
+ */
+#define LOCAL_CAS(l, o, n) local_cmpxchg((l), (o), (n)) == (o)
+/*
+ * Record a wakeup cause (but only if we're the first non-{TPS,TPE}
+ * event to occur after a wakeup.
+ * @tsc: the TSC when the event occurred
+ * @type: the wakeup type: one of c_break_type_t vals
+ * @value: a domain-specific value
+ * @cpu: the logical CPU on which the timer was initialized; specific ONLY to wakeups caused by timers!
+ * @pid: PID of the process that initialized the timer for a timer-wakeup (or -1 for other wakeup events).
+ * @tid: TID of the task that initialized the timer for a timer-wakeup (or -1 for other wakeup events).
+ */
+#define record_wakeup_cause(tsc, type, value, cpu, pid, tid) do { \
+    struct wakeup_event *wu_event = &get_cpu_var(wakeup_event_counter); \
+    bool is_first_wakeup_event = CAS64(&wu_event->event_tsc, 0, (tsc)); \
+    if (is_first_wakeup_event) { \
+        wu_event->event_val = (value); \
+        wu_event->init_cpu = (cpu); \
+        wu_event->event_type = (type); \
+        wu_event->event_tid = (tid); \
+        wu_event->event_pid = (pid); \
+    } \
+    put_cpu_var(wakeup_event_counter); \
+} while(0)
+
+/*
+ * For NHM etc.: Base operating frequency
+ * ratio is encoded in 'PLATFORM_INFO' MSR.
+ */
+#define PLATFORM_INFO_MSR_ADDR 0xCE
+/*
+ * For MFLD -- base operating frequency
+ * ratio is encoded in 'CLOCK_CR_GEYSIII_STAT'
+ * MSR (internal communication with Peggy Irelan)
+ */
+#define CLOCK_CR_GEYSIII_STAT_MSR_ADDR 0x198	/* '408 decimal'*/
+/*
+ * For SLM -- max turbo ratio is encoded in bits 4:0 of 'MSR_IA32_IACORE_TURBO_RATIOS'
+ */
+#define MSR_IA32_IACORE_TURBO_RATIOS 0x66c
+/*
+ * For "Core" -- max turbo ratio is encoded in bits
+ */
+#define MSR_TURBO_RATIO_LIMIT 0x1AD
+/*
+ * Standard Bus frequency. Valid for
+ * NHM/WMR.
+ * TODO: frequency for MFLD?
+ */
+#define BUS_CLOCK_FREQ_KHZ_NHM 133000	/* For NHM/WMR. SNB has 100000 */
+#define BUS_CLOCK_FREQ_KHZ_MFLD 100000	/* For MFLD. SNB has 100000 */
+/*
+ * For core and later, Bus freq is encoded in 'MSR_FSB_FREQ'
+ */
+#define MSR_FSB_FREQ_ADDR 0xCD
+/*
+ * Try and determine the bus frequency.
+ * Used ONLY if the user-program passed
+ * us an invalid clock frequency.
+ */
+#define DEFAULT_BUS_CLOCK_FREQ_KHZ() ({u32 __tmp = (pw_is_atm) ? BUS_CLOCK_FREQ_KHZ_MFLD : BUS_CLOCK_FREQ_KHZ_NHM; __tmp;})
+/*
+ * MSRs required to enable CPU_CLK_UNHALTED.REF
+ * counting.
+ */
+#define IA32_PERF_GLOBAL_CTRL_ADDR 0x38F
+#define IA32_FIXED_CTR_CTL_ADDR 0x38D
+/*
+ * Standard APERF/MPERF addresses.
+ * Required for dynamic freq
+ * measurement.
+ */
+#define MPERF_MSR_ADDR 0xe7
+#define APERF_MSR_ADDR 0xe8
+/*
+ * Fixed counter addresses.
+ * Required for dynamic freq
+ * measurement.
+ */
+#define IA32_FIXED_CTR1_ADDR 0x30A
+#define IA32_FIXED_CTR2_ADDR 0x30B
+/*
+ * Bit positions for 'AnyThread' bits for the two
+ * IA_32_FIXED_CTR{1,2} MSRs. Always '2 + 4*N'
+ * where N == 1 => CTR1, N == 2 => CTR2
+ */
+#define IA32_FIXED_CTR1_ANYTHREAD_POS (1UL << 6)
+#define IA32_FIXED_CTR2_ANYTHREAD_POS (1UL << 10)
+#define ENABLE_FIXED_CTR_ANY_THREAD_MASK (IA32_FIXED_CTR1_ANYTHREAD_POS | IA32_FIXED_CTR2_ANYTHREAD_POS)
+#define DISABLE_FIXED_CTR_ANY_THREAD_MASK ~ENABLE_FIXED_CTR_ANY_THREAD_MASK
+#define IS_ANY_THREAD_SET(msr) ( (msr) & ENABLE_FIXED_CTR_ANY_THREAD_MASK )
+/*
+ * Toggle between APERF,MPERF and
+ * IA32_FIXED_CTR{1,2} for Turbo.
+ */
+#if USE_APERF_MPERF_FOR_DYNAMIC_FREQUENCY
+#define CORE_CYCLES_MSR_ADDR APERF_MSR_ADDR
+#define REF_CYCLES_MSR_ADDR MPERF_MSR_ADDR
+#else /* !USE_APERF_MPERF_FOR_DYNAMIC_FREQUENCY*/
+#define CORE_CYCLES_MSR_ADDR IA32_FIXED_CTR1_ADDR
+#define REF_CYCLES_MSR_ADDR IA32_FIXED_CTR2_ADDR
+#endif
+
+/*
+ * Size of each 'bucket' for a 'cpu_bitmap'
+ */
+#define NUM_BITS_PER_BUCKET (sizeof(unsigned long) * 8)
+/*
+ * Num 'buckets' for each 'cpu_bitmap' in the
+ * 'irq_node' struct.
+ */
+#define NUM_BITMAP_BUCKETS ( (pw_max_num_cpus / NUM_BITS_PER_BUCKET) + 1 )
+/*
+ * 'cpu_bitmap' manipulation macros.
+ */
+#define IS_BIT_SET(bit,map) ( test_bit( (bit), (map) ) != 0 )
+#define SET_BIT(bit,map) ( test_and_set_bit( (bit), (map) ) )
+/*
+ * Timer stats accessor macros.
+ */
+#ifdef CONFIG_TIMER_STATS
+#define TIMER_START_PID(t) ( (t)->start_pid )
+#define TIMER_START_COMM(t) ( (t)->start_comm )
+#else
+#define TIMER_START_PID(t) (-1)
+#define TIMER_START_COMM(t) ( "UNKNOWN" )
+#endif
+/*
+ * Helper macro to return time in usecs.
+ */
+#define CURRENT_TIME_IN_USEC() ({struct timeval tv; \
+		do_gettimeofday(&tv);		\
+		(unsigned long long)tv.tv_sec*1000000ULL + (unsigned long long)tv.tv_usec;})
+
+#if DO_ACPI_S3_SAMPLE
+static u64 startTSC_acpi_s3;
+#endif
+/**/
+/* Required to calculate S0i0 residency counter from non-zero S state counters*/
+#if DO_S_RESIDENCY_SAMPLE || DO_ACPI_S3_SAMPLE
+    /* static u64 startJIFF_s_residency = 0;*/
+static u64 startTSC_s_residency = 0;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define SMP_CALL_FUNCTION(func,ctx,retry,wait)    smp_call_function((func),(ctx),(wait))
+#else
+#define SMP_CALL_FUNCTION(func,ctx,retry,wait)    smp_call_function((func),(ctx),(retry),(wait))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+#define PW_HLIST_FOR_EACH_ENTRY(tpos, pos, head, member) hlist_for_each_entry(tpos, pos, head, member)
+#define PW_HLIST_FOR_EACH_ENTRY_SAFE(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, pos, n, head, member)
+#define PW_HLIST_FOR_EACH_ENTRY_RCU(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, pos, head, member)
+#else /* >= 3.9.0*/
+#define PW_HLIST_FOR_EACH_ENTRY(tpos, pos, head, member) pos = NULL; hlist_for_each_entry(tpos, head, member)
+#define PW_HLIST_FOR_EACH_ENTRY_SAFE(tpos, pos, n, head, member) pos = NULL; hlist_for_each_entry_safe(tpos, n, head, member)
+#define PW_HLIST_FOR_EACH_ENTRY_RCU(tpos, pos, head, member) pos = NULL; hlist_for_each_entry_rcu(tpos, head, member)
+#endif
+
+#define ALLOW_WUWATCH_MSR_READ_WRITE 1
+#if ALLOW_WUWATCH_MSR_READ_WRITE
+#define WUWATCH_RDMSR_ON_CPU(cpu, addr, low, high) ({int __tmp = rdmsr_on_cpu((cpu), (addr), (low), (high)); __tmp;})
+#define WUWATCH_RDMSR(addr, low, high) rdmsr((addr), (low), (high))
+#define WUWATCH_RDMSR_SAFE_ON_CPU(cpu, addr, low, high) ({int __tmp = rdmsr_safe_on_cpu((cpu), (addr), (low), (high)); __tmp;})
+#define WUWATCH_RDMSRL(addr, val) rdmsrl((addr), (val))
+#else
+#define WUWATCH_RDMSR_ON_CPU(cpu, addr, low, high) ({int __tmp = 0; *(low) = 0; *(high) = 0; __tmp;})
+#define WUWATCH_RDMSR(addr, low, high) ({int __tmp = 0; (low) = 0; (high) = 0; __tmp;})
+#define WUWATCH_RDMSR_SAFE_ON_CPU(cpu, addr, low, high) ({int __tmp = 0; *(low) = 0; *(high) = 0; __tmp;})
+#define WUWATCH_RDMSRL(addr, val) ( (val) = 0 )
+#endif /* ALLOW_WUWATCH_MSR_READ*/
+
+/*
+ * Data structure definitions.
+ */
+
+typedef struct tnode tnode_t;
+struct tnode {
+	struct hlist_node list;
+	unsigned long timer_addr;
+	pid_t tid, pid;
+	u64 tsc;
+	s32 init_cpu;
+	u16 is_root_timer:1;
+	u16 trace_sent:1;
+	u16 trace_len:14;
+	unsigned long *trace;
+};
+
+typedef struct hnode hnode_t;
+struct hnode {
+	struct hlist_head head;
+};
+
+typedef struct tblock tblock_t;
+struct tblock {
+	struct tnode *data;
+	tblock_t *next;
+};
+
+typedef struct per_cpu_mem per_cpu_mem_t;
+struct per_cpu_mem {
+	tblock_t *block_list;
+	hnode_t free_list_head;
+};
+
+#define GET_MEM_VARS(cpu) &per_cpu(per_cpu_mem_vars, (cpu))
+#define GET_MY_MEM_VARS(cpu) &__get_cpu_var(per_cpu_mem_vars)
+
+/*
+ * For IRQ # <--> DEV NAME mappings.
+ */
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+
+typedef struct irq_node irq_node_t;
+struct irq_node {
+	struct hlist_node list;
+	struct rcu_head rcu;
+	int irq;
+	char *name;
+	/*
+	 * We send IRQ # <-> DEV name
+	 * mappings to Ring-3 ONCE PER
+	 * CPU. We need a bitmap to let
+	 * us know which cpus have
+	 * already had this info sent.
+	 *
+	 * FOR NOW, WE ASSUME A MAX OF 64 CPUS!
+	 * (This assumption is enforced in
+	 * 'init_data_structures()')
+	 */
+	unsigned long *cpu_bitmap;
+};
+#define PWR_CPU_BITMAP(node) ( (node)->cpu_bitmap )
+
+typedef struct irq_hash_node irq_hash_node_t;
+struct irq_hash_node {
+	struct hlist_head head;
+};
+#endif /* DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+
+#define NUM_IRQ_MAP_BITS 6
+#define NUM_IRQ_MAP_BUCKETS (1UL << NUM_IRQ_MAP_BITS)
+#define IRQ_MAP_HASH_MASK (NUM_IRQ_MAP_BITS - 1)
+/* #define IRQ_MAP_HASH_FUNC(num) (num & IRQ_MAP_HASH_MASK)*/
+#define IRQ_MAP_HASH_FUNC(a) hash_long((u32)a, NUM_IRQ_MAP_BITS)
+
+#define IRQ_LOCK_MASK HASH_LOCK_MASK
+
+#define IRQ_LOCK(i) LOCK(irq_map_locks[(i) & IRQ_LOCK_MASK])
+#define IRQ_UNLOCK(i) UNLOCK(irq_map_locks[(i) & IRQ_LOCK_MASK])
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+
+typedef struct wlock_node wlock_node_t;
+struct wlock_node {
+	struct hlist_node list;
+	struct rcu_head rcu;
+	int constant_pool_index;
+	unsigned long hash_val;
+	size_t wakelock_name_len;
+	char *wakelock_name;
+};
+
+typedef struct wlock_hash_node wlock_hash_node_t;
+struct wlock_hash_node {
+	struct hlist_head head;
+};
+
+#define NUM_WLOCK_MAP_BITS 6
+#define NUM_WLOCK_MAP_BUCKETS (1UL << NUM_WLOCK_MAP_BITS)
+#define WLOCK_MAP_HASH_MASK (NUM_WLOCK_MAP_BUCKETS - 1)	/* Used for modulo: x % y == x & (y-1) iff y is pow-of-2 */
+#define WLOCK_MAP_HASH_FUNC(n) pw_hash_string(n)
+
+#define WLOCK_LOCK_MASK HASH_LOCK_MASK
+
+#define WLOCK_LOCK(i) LOCK(wlock_map_locks[(i) & WLOCK_LOCK_MASK])
+#define WLOCK_UNLOCK(i) UNLOCK(wlock_map_locks[(i) & WLOCK_LOCK_MASK])
+
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+
+/*
+ * For syscall nodes
+ */
+typedef struct sys_node sys_node_t;
+struct sys_node {
+	struct hlist_node list;
+	pid_t tid, pid;
+	int ref_count, weight;
+};
+
+#define SYS_MAP_BUCKETS_BITS 9
+#define NUM_SYS_MAP_BUCKETS (1UL << SYS_MAP_BUCKETS_BITS)	/* MUST be pow-of-2*/
+#define SYS_MAP_LOCK_BITS 4
+#define NUM_SYS_MAP_LOCKS (1UL << SYS_MAP_LOCK_BITS)	/* MUST be pow-of-2*/
+
+#define SYS_MAP_NODES_HASH(t) hash_32(t, SYS_MAP_BUCKETS_BITS)
+#define SYS_MAP_LOCK_HASH(t) ( (t) & (SYS_MAP_LOCK_BITS - 1) )	/* pow-of-2 modulo*/
+
+#define SYS_MAP_LOCK(index) LOCK(apwr_sys_map_locks[index])
+#define SYS_MAP_UNLOCK(index) UNLOCK(apwr_sys_map_locks[index])
+
+#define GET_SYS_HLIST(index) (apwr_sys_map + index)
+
+/*
+ * Function declarations (incomplete).
+ */
+inline bool is_sleep_syscall_i(long id) __attribute__ ((always_inline));
+inline void sys_enter_helper_i(long id, pid_t tid, pid_t pid)
+    __attribute__ ((always_inline));
+inline void sys_exit_helper_i(long id, pid_t tid, pid_t pid)
+    __attribute__ ((always_inline));
+inline void sched_wakeup_helper_i(struct task_struct *task)
+    __attribute__ ((always_inline));
+static int pw_device_open(struct inode *inode, struct file *file);
+static int pw_device_release(struct inode *inode, struct file *file);
+static ssize_t pw_device_read(struct file *file, char __user * buffer,
+			      size_t length, loff_t * offset);
+static long pw_device_unlocked_ioctl(struct file *filp, unsigned int ioctl_num,
+				     unsigned long ioctl_param);
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+static long pw_device_compat_ioctl(struct file *file, unsigned int ioctl_num,
+				   unsigned long ioctl_param);
+#endif
+static long pw_unlocked_handle_ioctl_i(unsigned int ioctl_num,
+				       struct PWCollector_ioctl_arg
+				       *remote_args, unsigned long ioctl_param);
+static int pw_set_platform_res_config_i(struct PWCollector_platform_res_info
+					*remote_info, int size);
+static unsigned int pw_device_poll(struct file *filp, poll_table * wait);
+static int pw_device_mmap(struct file *filp, struct vm_area_struct *vma);
+static int pw_register_dev(void);
+static void pw_unregister_dev(void);
+/* static int pw_read_msr_set_i(struct msr_set *msr_set, int *which_cx, u64 *cx_val);*/
+static int pw_read_msr_info_set_i(struct pw_msr_info_set *msr_set);
+#if DO_WAKELOCK_SAMPLE
+static unsigned long pw_hash_string(const char *data);
+#endif /* DO_WAKELOCK_SAMPLE*/
+static int pw_init_data_structures(void);
+static void pw_destroy_data_structures(void);
+
+/*
+ * Variable declarations.
+ */
+
+/*
+ * Names for SOFTIRQs.
+ * These are taken from "include/linux/interrupt.h"
+ */
+static const char *pw_softirq_to_name[] =
+    { "HI_SOFTIRQ", "TIMER_SOFTIRQ", "NET_TX_SOFTIRQ", "NET_RX_SOFTIRQ",
+"BLOCK_SOFTIRQ", "BLOCK_IOPOLL_SOFTIRQ", "TASKLET_SOFTIRQ", "SCHED_SOFTIRQ", "HRTIMER_SOFTIRQ",
+"RCU_SOFTIRQ" };
+
+/*
+ * For microcode PATCH version.
+ * ONLY useful for MFLD!
+ */
+static u32 __read_mostly micro_patch_ver = 0x0;
+
+/*
+ * Is the device open right now? Used to prevent
+ * concurent access into the same device.
+ */
+#define DEV_IS_OPEN 0		/* see if device is in use*/
+static volatile unsigned long dev_status;
+
+static struct hnode timer_map[NUM_MAP_BUCKETS];
+
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+static PWCollector_irq_mapping_t *irq_mappings_list = NULL;
+static irq_hash_node_t irq_map[NUM_IRQ_MAP_BUCKETS];
+static int total_num_irq_mappings = 0;
+#endif /*  DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+static wlock_hash_node_t wlock_map[NUM_WLOCK_MAP_BUCKETS];
+static int total_num_wlock_mappings = 0;
+#define GET_NEXT_CONSTANT_POOL_INDEX() total_num_wlock_mappings++
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+
+DEFINE_PER_CPU(per_cpu_t, per_cpu_counts);
+
+DEFINE_PER_CPU(stats_t, per_cpu_stats);
+
+DEFINE_PER_CPU(CTRL_values_t, CTRL_data_values);
+
+#ifdef __arm__
+DEFINE_PER_CPU(u64, trace_power_prev_time) = 0;
+#endif
+
+static DEFINE_PER_CPU(per_cpu_mem_t, per_cpu_mem_vars);
+
+static DEFINE_PER_CPU(u64, num_local_apic_timer_inters) = 0;
+
+static DEFINE_PER_CPU(u32, pcpu_prev_req_freq) = 0;
+
+static DEFINE_PER_CPU(struct msr_set, pw_pcpu_msr_sets);
+
+static struct pw_msr_info_set *pw_pcpu_msr_info_sets
+    ____cacheline_aligned_in_smp = NULL;
+
+static DEFINE_PER_CPU(u32, pcpu_prev_perf_status_val) = 0;
+
+/*
+ * TPS helper -- required for overhead
+ * measurements.
+ */
+#if DO_IOCTL_STATS
+static DEFINE_PER_CPU(u64, num_inters) = 0;
+#endif
+
+/*
+ * Macro to add newly allocated timer
+ * nodes to individual free lists.
+ */
+#define LINK_FREE_TNODE_ENTRIES(nodes, size, free_head) do{		\
+	int i=0;							\
+	for(i=0; i<(size); ++i){					\
+	    tnode_t *__node = &((nodes)[i]);				\
+	    hlist_add_head(&__node->list, &((free_head)->head));	\
+	}								\
+    }while(0)
+
+/*
+ * Hash locks.
+ */
+static spinlock_t hash_locks[NUM_HASH_LOCKS];
+/*
+ * IRQ Map locks.
+ */
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+static spinlock_t irq_map_locks[NUM_HASH_LOCKS];
+#endif
+/*
+ * Wakelock map locks
+ */
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+static spinlock_t wlock_map_locks[NUM_HASH_LOCKS];
+#endif
+
+/*
+ * Base operating frequency -- required if
+ * checking turbo frequencies.
+ */
+static __read_mostly u32 base_operating_freq_khz = 0x0;
+/*
+ * Character device file MAJOR
+ * number -- we're now obtaining
+ * this dynamically.
+ */
+static int apwr_dev_major_num = -1;
+/*
+ * Atomic counter used to synchronize TPS probes and
+ * sched wakeups on other cores.
+ */
+#if DO_TPS_EPOCH_COUNTER
+static atomic_t tps_epoch = ATOMIC_INIT(0);
+#endif /* DO_TPS_EPOCH_COUNTER*/
+
+/*
+ * Variables to create the character device file
+ */
+static dev_t apwr_dev;
+static struct cdev *apwr_cdev;
+static struct class *apwr_class = NULL;
+
+#if DO_OVERHEAD_MEASUREMENTS
+/*
+ * Counter to count # of entries
+ * in the timer hash map -- used
+ * for debugging.
+ */
+static atomic_t num_timer_entries = ATOMIC_INIT(0);
+#endif
+/*
+ * The sys map. Individual buckets are unordered.
+ */
+static struct hlist_head apwr_sys_map[NUM_SYS_MAP_BUCKETS];
+/*
+ * Spinlock to guard updates to sys map.
+ */
+static spinlock_t apwr_sys_map_locks[NUM_SYS_MAP_LOCKS];
+/*
+ * These are used for the 'hrtimer_start(...)'
+ * hack.
+ */
+static u32 tick_count = 0;
+static DEFINE_SPINLOCK(tick_count_lock);
+static bool should_probe_on_hrtimer_start = true;
+
+DEFINE_PER_CPU(local_t, sched_timer_found) = LOCAL_INIT(0);
+
+static DEFINE_PER_CPU(local_t, num_samples_produced) = LOCAL_INIT(0);
+static DEFINE_PER_CPU(local_t, num_samples_dropped) = LOCAL_INIT(0);
+/*
+ * Collection time, in seconds. Specified by the user via the 'PW_IOCTL_COLLECTION_TIME'
+ * ioctl. Used ONLY to decide if we should wake up the power collector after resuming
+ * from an S3 (suspend) state.
+ */
+unsigned long pw_collection_time_secs = 0;
+/*
+ * Collection time, in clock ticks. Specified by the user via the 'PW_IOCTL_COLLECTION_TIME'
+ * ioctl. Used ONLY to decide if we should wake up the power collector after resuming
+ * from an S3 (suspend) state.
+ */
+u64 pw_collection_time_ticks = 0;
+/*
+ * Snapshot of 'TSC' time on collection START.
+ */
+u64 pw_collection_start_tsc = 0;
+/*
+ * Suspend {START, STOP} TSC ticks.
+ */
+u64 pw_suspend_start_tsc = 0, pw_suspend_stop_tsc = 0;
+/*
+ * Suspend {START, STOP} S0i3 values.
+ */
+u64 pw_suspend_start_s0i3 = 0, pw_suspend_stop_s0i3 = 0;
+/*
+ * The power collector task. Used ONLY to decide whom to send a 'SIGINT' to.
+ */
+struct task_struct *pw_power_collector_task = NULL;
+/*
+ * Timer used to defer sending SIGINT.
+ * Used ONLY if the device entered ACPI S3 (aka "Suspend-To-Ram") during the
+ * collection.
+ */
+static struct hrtimer pw_acpi_s3_hrtimer;
+/*
+ * Used to record which wakeup event occured first.
+ * Reset on every TPS.
+ */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct wakeup_event,
+				     wakeup_event_counter) = {
+0, 0, -1, PW_BREAK_TYPE_U, -1, -1};
+
+/*
+ * Did the user mmap our buffers?
+ */
+static bool pw_did_mmap = false;
+
+/*
+ * MACRO helpers to measure function call
+ * times.
+ */
+#if DO_OVERHEAD_MEASUREMENTS
+
+#include "pw_overhead_measurements.h"
+
+/*
+ * For each function that you want to profile,
+ * do the following (e.g. function 'foo'):
+ * **************************************************
+ * DECLARE_OVERHEAD_VARS(foo);
+ * **************************************************
+ * This will declare the two variables required
+ * to keep track of overheads incurred in
+ * calling/servicing 'foo'. Note that the name
+ * that you declare here *MUST* match the function name!
+ */
+
+DECLARE_OVERHEAD_VARS(timer_init);	/* for the "timer_init" family of probes*/
+DECLARE_OVERHEAD_VARS(timer_expire);	/* for the "timer_expire" family of probes*/
+DECLARE_OVERHEAD_VARS(tps);	/* for TPS*/
+DECLARE_OVERHEAD_VARS(tps_lite);	/* for TPS_lite*/
+DECLARE_OVERHEAD_VARS(tpf);	/* for TPF*/
+DECLARE_OVERHEAD_VARS(timer_insert);	/* for "timer_insert"*/
+DECLARE_OVERHEAD_VARS(timer_delete);	/* for "timer_delete"*/
+DECLARE_OVERHEAD_VARS(exit_helper);	/* for "exit_helper"*/
+DECLARE_OVERHEAD_VARS(map_find_unlocked_i);	/* for "map_find_i"*/
+DECLARE_OVERHEAD_VARS(get_next_free_node_i);	/* for "get_next_free_node_i"*/
+DECLARE_OVERHEAD_VARS(ti_helper);	/* for "ti_helper"*/
+DECLARE_OVERHEAD_VARS(inter_common);	/* for "inter_common"*/
+DECLARE_OVERHEAD_VARS(irq_insert);	/* for "irq_insert"*/
+DECLARE_OVERHEAD_VARS(find_irq_node_i);	/* for "find_irq_node_i"*/
+DECLARE_OVERHEAD_VARS(wlock_insert);	/* for "wlock_insert"*/
+DECLARE_OVERHEAD_VARS(find_wlock_node_i);	/* for "find_wlock_node_i"*/
+DECLARE_OVERHEAD_VARS(sys_enter_helper_i);
+DECLARE_OVERHEAD_VARS(sys_exit_helper_i);
+
+/*
+ * Macros to measure overheads
+ */
+#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) do{		\
+	u64 *__v = &__get_cpu_var(func##_elapsed_time);	\
+	u64 tmp_1 = 0, tmp_2 = 0;			\
+	local_inc(&__get_cpu_var(func##_num_iters));	\
+	tscval(&tmp_1);					\
+	{						\
+	    func(__VA_ARGS__);				\
+	}						\
+	tscval(&tmp_2);					\
+	*(__v) += (tmp_2 - tmp_1);			\
+    }while(0)
+
+#define DO_PER_CPU_OVERHEAD_FUNC_RET(ret, func, ...) do{	\
+	u64 *__v = &__get_cpu_var(func##_elapsed_time);		\
+	u64 tmp_1 = 0, tmp_2 = 0;				\
+	local_inc(&__get_cpu_var(func##_num_iters));		\
+	tscval(&tmp_1);						\
+	{							\
+	    ret = func(__VA_ARGS__);				\
+	}							\
+	tscval(&tmp_2);						\
+	*(__v) += (tmp_2 - tmp_1);				\
+    }while(0)
+
+#else /* DO_OVERHEAD_MEASUREMENTS*/
+
+#define DO_PER_CPU_OVERHEAD(v, func, ...) func(__VA_ARGS__)
+#define DO_PER_CPU_OVERHEAD_FUNC(func, ...) func(__VA_ARGS__)
+#define DO_PER_CPU_OVERHEAD_FUNC_RET(ret, func, ...) ret = func(__VA_ARGS__)
+
+#endif /* DO_OVERHEAD_MEASUREMENTS*/
+
+/*
+ * File operations exported by the driver.
+ */
+struct file_operations Fops = {
+	.open = &pw_device_open,
+	.read = &pw_device_read,
+	.poll = &pw_device_poll,
+	/* .ioctl = device_ioctl,*/
+	.unlocked_ioctl = &pw_device_unlocked_ioctl,
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	.compat_ioctl = &pw_device_compat_ioctl,
+#endif /* COMPAT && x64*/
+	.mmap = &pw_device_mmap,
+	.release = &pw_device_release,
+};
+
+/*
+ * Functions.
+ */
+
+/* Helper function to get TSC */
+static inline void tscval(u64 * v)
+{
+	if (!v) {
+		return;
+	}
+#ifndef __arm__
+#if READ_MSR_FOR_TSC && ALLOW_WUWATCH_MSR_READ_WRITE
+	{
+		u64 res;
+		WUWATCH_RDMSRL(0x10, res);
+		*v = res;
+		/* printk(KERN_INFO "TSC = %llu\n", res);*/
+	}
+#else
+	{
+		unsigned int aux;
+		rdtscpll(*v, aux);
+		/* printk(KERN_INFO "TSCPLL = %llu\n", *v);*/
+	}
+#endif /* READ_MSR_FOR_TSC*/
+#else
+	{
+		struct timespec ts;
+		ktime_get_ts(&ts);
+		*v = (u64) ts.tv_sec * 1000000000ULL + (u64) ts.tv_nsec;
+	}
+#endif /* not def __arm__*/
+};
+
+/*
+ * Initialization and termination routines.
+ */
+static void destroy_timer_map(void)
+{
+	/*
+	 * NOP: nothing to free here -- timer nodes
+	 * are freed when their corresponding
+	 * (per-cpu) blocks are freed.
+	 */
+};
+
+static int init_timer_map(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_MAP_BUCKETS; ++i) {
+		INIT_HLIST_HEAD(&timer_map[i].head);
+	}
+
+	for (i = 0; i < NUM_HASH_LOCKS; ++i) {
+		spin_lock_init(&hash_locks[i]);
+	}
+
+	return SUCCESS;
+};
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+
+static int init_wlock_map(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_WLOCK_MAP_BUCKETS; ++i) {
+		INIT_HLIST_HEAD(&wlock_map[i].head);
+	}
+
+	/*
+	 * Init locks
+	 */
+	for (i = 0; i < NUM_HASH_LOCKS; ++i) {
+		spin_lock_init(&wlock_map_locks[i]);
+	}
+
+	total_num_wlock_mappings = 0;
+
+	return SUCCESS;
+};
+
+static void wlock_destroy_node(struct wlock_node *node)
+{
+	if (node->wakelock_name) {
+		pw_kfree(node->wakelock_name);
+		node->wakelock_name = NULL;
+	}
+	pw_kfree(node);
+};
+
+static void wlock_destroy_callback(struct rcu_head *head)
+{
+	struct wlock_node *node = container_of(head, struct wlock_node, rcu);
+
+	if (node) {
+		wlock_destroy_node(node);
+	}
+};
+
+static void destroy_wlock_map(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_WLOCK_MAP_BUCKETS; ++i) {
+		struct hlist_head *head = &wlock_map[i].head;
+		while (!hlist_empty(head)) {
+			struct wlock_node *node =
+			    hlist_entry(head->first, struct wlock_node, list);
+			hlist_del(&node->list);
+			wlock_destroy_callback(&node->rcu);
+		}
+	}
+	total_num_wlock_mappings = 0;
+};
+
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+
+static int init_irq_map(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_IRQ_MAP_BUCKETS; ++i) {
+		INIT_HLIST_HEAD(&irq_map[i].head);
+	}
+
+	/*
+	 * Init locks
+	 */
+	for (i = 0; i < NUM_HASH_LOCKS; ++i) {
+		spin_lock_init(&irq_map_locks[i]);
+	}
+
+	total_num_irq_mappings = 0;
+
+	return SUCCESS;
+};
+
+static void irq_destroy_callback(struct rcu_head *head)
+{
+	struct irq_node *node = container_of(head, struct irq_node, rcu);
+
+	if (!node) {
+		return;
+	}
+
+	if (node->name) {
+		pw_kfree(node->name);
+		node->name = NULL;
+	}
+	if (node->cpu_bitmap) {
+		pw_kfree(node->cpu_bitmap);
+		node->cpu_bitmap = NULL;
+	}
+	pw_kfree(node);
+};
+
+static void destroy_irq_map(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_IRQ_MAP_BUCKETS; ++i) {
+		struct hlist_head *head = &irq_map[i].head;
+		while (!hlist_empty(head)) {
+			struct irq_node *node =
+			    hlist_entry(head->first, struct irq_node, list);
+			if (!node) {
+				continue;
+			}
+			hlist_del(&node->list);
+			irq_destroy_callback(&node->rcu);
+		}
+	}
+
+	if (irq_mappings_list) {
+		pw_kfree(irq_mappings_list);
+		irq_mappings_list = NULL;
+	}
+};
+
+#endif /* DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+
+static void free_timer_block(tblock_t * block)
+{
+	while (block) {
+		tblock_t *next = block->next;
+		if (block->data) {
+			int i = 0;
+			for (i = 0; i < NUM_TIMER_NODES_PER_BLOCK; ++i) {
+				/*
+				 * Check trace, just to be sure
+				 * (We shouldn't need this -- 'timer_destroy()'
+				 * explicitly checks and frees call trace
+				 * arrays).
+				 */
+				if (block->data[i].trace) {
+					pw_kfree(block->data[i].trace);
+				}
+			}
+			pw_kfree(block->data);
+		}
+		pw_kfree(block);
+		block = next;
+	}
+	return;
+};
+
+static tblock_t *allocate_new_timer_block(struct hnode *free_head)
+{
+	tblock_t *block = pw_kmalloc(sizeof(tblock_t), GFP_ATOMIC);
+	if (!block) {
+		return NULL;
+	}
+	block->data =
+	    pw_kmalloc(sizeof(tnode_t) * NUM_TIMER_NODES_PER_BLOCK, GFP_ATOMIC);
+	if (!block->data) {
+		pw_kfree(block);
+		return NULL;
+	}
+	memset(block->data, 0, sizeof(tnode_t) * NUM_TIMER_NODES_PER_BLOCK);
+	if (free_head) {
+		LINK_FREE_TNODE_ENTRIES(block->data, NUM_TIMER_NODES_PER_BLOCK,
+					free_head);
+	}
+	block->next = NULL;
+	return block;
+};
+
+static void destroy_per_cpu_timer_blocks(void)
+{
+	int cpu = -1;
+
+	for_each_online_cpu(cpu) {
+		per_cpu_mem_t *pcpu_mem = GET_MEM_VARS(cpu);
+		tblock_t *blocks = pcpu_mem->block_list;
+		free_timer_block(blocks);
+	}
+};
+
+static int init_per_cpu_timer_blocks(void)
+{
+	int cpu = -1;
+
+	for_each_online_cpu(cpu) {
+		per_cpu_mem_t *pcpu_mem = GET_MEM_VARS(cpu);
+		struct hnode *free_head = &pcpu_mem->free_list_head;
+		BUG_ON(!free_head);
+		INIT_HLIST_HEAD(&free_head->head);
+		if (!
+		    (pcpu_mem->block_list =
+		     allocate_new_timer_block(free_head))) {
+			return -ERROR;
+		}
+	}
+
+	return SUCCESS;
+};
+
+void free_sys_node_i(sys_node_t * node)
+{
+	if (!node) {
+		return;
+	}
+	pw_kfree(node);
+};
+
+sys_node_t *alloc_new_sys_node_i(pid_t tid, pid_t pid)
+{
+	sys_node_t *node = pw_kmalloc(sizeof(sys_node_t), GFP_ATOMIC);
+	if (!node) {
+		pw_pr_error("ERROR: could NOT allocate new sys node!\n");
+		return NULL;
+	}
+	node->tid = tid;
+	node->pid = pid;
+	node->ref_count = node->weight = 1;
+	INIT_HLIST_NODE(&node->list);
+	return node;
+};
+
+int destroy_sys_list(void)
+{
+	int size = 0, i = 0;
+
+	for (i = 0; i < NUM_SYS_MAP_BUCKETS; ++i) {
+		struct hlist_head *apwr_sys_list = GET_SYS_HLIST(i);
+		int tmp_size = 0;
+		while (!hlist_empty(apwr_sys_list)) {
+			sys_node_t *node =
+			    hlist_entry(apwr_sys_list->first, struct sys_node,
+					list);
+			hlist_del(&node->list);
+			++tmp_size;
+			free_sys_node_i(node);
+			++size;
+		}
+		if (tmp_size) {
+			OUTPUT(3, KERN_INFO "[%d] --> %d\n", i, tmp_size);
+		}
+	}
+
+#if DO_PRINT_COLLECTION_STATS
+	printk(KERN_INFO "SYS_LIST_SIZE = %d\n", size);
+#endif
+
+	return SUCCESS;
+};
+
+int init_sys_list(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_SYS_MAP_BUCKETS; ++i) {
+		INIT_HLIST_HEAD(GET_SYS_HLIST(i));
+	}
+
+	for (i = 0; i < NUM_SYS_MAP_LOCKS; ++i) {
+		spin_lock_init(apwr_sys_map_locks + i);
+	}
+
+	return SUCCESS;
+};
+
+/*
+ * MSR info set alloc/dealloc routines.
+ */
+static void pw_reset_msr_info_sets(void)
+{
+	int cpu = -1;
+	if (likely(pw_pcpu_msr_info_sets)) {
+		for_each_possible_cpu(cpu) {
+			struct pw_msr_info_set *info_set =
+			    pw_pcpu_msr_info_sets + cpu;
+			if (likely(info_set->prev_msr_vals)) {
+				pw_kfree(info_set->prev_msr_vals);
+			}
+			if (likely(info_set->curr_msr_count)) {
+				pw_kfree(info_set->curr_msr_count);
+			}
+			if (likely(info_set->c_multi_msg_mem)) {
+				pw_kfree(info_set->c_multi_msg_mem);
+			}
+			memset(info_set, 0, sizeof(*info_set));
+		}
+	}
+};
+
+static void pw_destroy_msr_info_sets(void)
+{
+	if (likely(pw_pcpu_msr_info_sets)) {
+		pw_reset_msr_info_sets();
+		pw_kfree(pw_pcpu_msr_info_sets);
+		pw_pcpu_msr_info_sets = NULL;
+	}
+};
+
+static int pw_init_msr_info_sets(void)
+{
+	BUG_ON(pw_max_num_cpus <= 0);
+	pw_pcpu_msr_info_sets =
+	    pw_kmalloc(sizeof(struct pw_msr_info_set) * pw_max_num_cpus,
+		       GFP_KERNEL);
+	if (!pw_pcpu_msr_info_sets) {
+		pw_pr_error("ERROR allocating space for info sets!\n");
+		return -ERROR;
+	}
+	memset(pw_pcpu_msr_info_sets, 0,
+	       sizeof(struct pw_msr_info_set) * pw_max_num_cpus);
+	return SUCCESS;
+};
+
+static void pw_destroy_data_structures(void)
+{
+	destroy_timer_map();
+
+	destroy_per_cpu_timer_blocks();
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+	destroy_wlock_map();
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+	destroy_irq_map();
+#endif /* DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+
+	destroy_sys_list();
+
+	pw_destroy_per_cpu_buffers();
+
+	pw_destroy_msr_info_sets();
+
+	{
+		/*
+		 * Print some stats about # samples produced and # dropped.
+		 */
+#if DO_PRINT_COLLECTION_STATS
+		printk(KERN_INFO
+		       "DEBUG: There were %llu / %llu dropped samples!\n",
+		       pw_num_samples_dropped, pw_num_samples_produced);
+#endif
+	}
+};
+
+static int pw_init_data_structures(void)
+{
+	/*
+	 * Find the # CPUs in this system.
+	 */
+	/* pw_max_num_cpus = num_online_cpus();*/
+	pw_max_num_cpus = num_possible_cpus();
+
+	/*
+	 * Init the (per-cpu) free lists
+	 * for timer mappings.
+	 */
+	if (init_per_cpu_timer_blocks()) {
+		pw_pr_error
+		    ("ERROR: could NOT initialize the per-cpu timer blocks!\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+
+	if (init_timer_map()) {
+		pw_pr_error("ERROR: could NOT initialize timer map!\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+	if (init_irq_map()) {
+		pw_pr_error("ERROR: could NOT initialize irq map!\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+#endif /* DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+	if (init_wlock_map()) {
+		pw_pr_error("ERROR: could NOT initialize wlock map!\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+
+	if (init_sys_list()) {
+		pw_pr_error("ERROR: could NOT initialize syscall map!\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+
+	if (pw_init_per_cpu_buffers()) {
+		pw_pr_error("ERROR initializing per-cpu output buffers\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+
+	if (pw_init_msr_info_sets()) {
+		pw_pr_error("ERROR initializing MSR info sets\n");
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+
+	return SUCCESS;
+};
+
+/*
+ * Free list manipulation routines.
+ */
+
+static int init_tnode_i(tnode_t * node, unsigned long timer_addr, pid_t tid,
+			pid_t pid, u64 tsc, s32 init_cpu, int trace_len,
+			unsigned long *trace)
+{
+
+	if (!node) {
+		return -ERROR;
+	}
+
+	if (node->trace) {
+		pw_kfree(node->trace);
+		node->trace = NULL;
+	}
+
+	node->timer_addr = timer_addr;
+	node->tsc = tsc;
+	node->tid = tid;
+	node->pid = pid;
+	node->init_cpu = init_cpu;
+	node->trace_sent = 0;
+	node->trace_len = trace_len;
+
+	if (trace_len > 0) {
+		/*
+		 * Root timer!
+		 */
+		node->is_root_timer = 1;
+		node->trace =
+		    pw_kmalloc(sizeof(unsigned long) * trace_len, GFP_ATOMIC);
+		if (!node->trace) {
+			pw_pr_error
+			    ("ERROR: could NOT allocate memory for backtrace!\n");
+			/* pw_kfree(node);*/
+			return -ERROR;
+		}
+		memcpy(node->trace, trace, sizeof(unsigned long) * trace_len);	/* dst, src*/
+	}
+
+	/*
+	 * Ensure everyone sees this...
+	 */
+	smp_mb();
+
+	return SUCCESS;
+};
+
+static tnode_t *get_next_free_tnode_i(unsigned long timer_addr, pid_t tid,
+				      pid_t pid, u64 tsc, s32 init_cpu,
+				      int trace_len, unsigned long *trace)
+{
+	per_cpu_mem_t *pcpu_mem = GET_MY_MEM_VARS();
+	struct hnode *free_head = &pcpu_mem->free_list_head;
+	struct hlist_head *head = &free_head->head;
+
+	if (hlist_empty(head)) {
+		tblock_t *block = allocate_new_timer_block(free_head);
+		if (block) {
+			block->next = pcpu_mem->block_list;
+			pcpu_mem->block_list = block;
+		}
+		OUTPUT(3, KERN_INFO "[%d]: ALLOCATED A NEW TIMER BLOCK!\n",
+		       CPU());
+	}
+
+	if (!hlist_empty(head)) {
+		struct tnode *node =
+		    hlist_entry(head->first, struct tnode, list);
+		hlist_del(&node->list);
+		/*
+		 * 'kmalloc' doesn't zero out memory -- set
+		 * 'trace' to NULL to avoid an invalid
+		 * 'free' in 'init_tnode_i(...)' just to
+		 * be sure (Shouldn't need to have to
+		 * do this -- 'destroy_timer()' *should*
+		 * have handled it for us).
+		 */
+		node->trace = NULL;
+
+		if (init_tnode_i
+		    (node, timer_addr, tid, pid, tsc, init_cpu, trace_len,
+		     trace)) {
+			/*
+			 * Backtrace couldn't be inited -- re-enqueue
+			 * onto the free-list.
+			 */
+			node->trace = NULL;
+			hlist_add_head(&node->list, head);
+			return NULL;
+		}
+		return node;
+	}
+	return NULL;
+};
+
+static void timer_destroy(struct tnode *node)
+{
+	per_cpu_mem_t *pcpu_mem = GET_MY_MEM_VARS();
+	struct hnode *free_head = &pcpu_mem->free_list_head;
+
+	if (!node) {
+		return;
+	}
+
+	OUTPUT(3, KERN_INFO "DESTROYING %p\n", node);
+
+	if (node->trace) {
+		pw_kfree(node->trace);
+		node->trace = NULL;
+	}
+
+	hlist_add_head(&node->list, &((free_head)->head));
+};
+
+/*
+ * Hash map routines.
+ */
+
+static tnode_t *timer_find(unsigned long timer_addr, pid_t tid)
+{
+	int idx = TIMER_HASH_FUNC(timer_addr);
+	tnode_t *node = NULL, *retVal = NULL;
+	struct hlist_node *curr = NULL;
+	struct hlist_head *head = NULL;
+
+	HASH_LOCK(idx);
+	{
+		head = &timer_map[idx].head;
+
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, head, list) {
+			if (node->timer_addr == timer_addr
+			    && (node->tid == tid || tid < 0)) {
+				retVal = node;
+				break;
+			}
+		}
+	}
+	HASH_UNLOCK(idx);
+
+	return retVal;
+};
+
+static void timer_insert(unsigned long timer_addr, pid_t tid, pid_t pid,
+			 u64 tsc, s32 init_cpu, int trace_len,
+			 unsigned long *trace)
+{
+	int idx = TIMER_HASH_FUNC(timer_addr);
+	struct hlist_node *curr = NULL;
+	struct hlist_head *head = NULL;
+	struct tnode *node = NULL, *new_node = NULL;
+	bool found = false;
+
+	HASH_LOCK(idx);
+	{
+		head = &timer_map[idx].head;
+
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, head, list) {
+			if (node->timer_addr == timer_addr) {
+				/*
+				 * Update-in-place.
+				 */
+				OUTPUT(3,
+				       KERN_INFO
+				       "Timer %p UPDATING IN PLACE! Node = %p, Trace = %p\n",
+				       (void *)timer_addr, node, node->trace);
+				init_tnode_i(node, timer_addr, tid, pid, tsc,
+					     init_cpu, trace_len, trace);
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			/*
+			 * Insert a new entry here.
+			 */
+			new_node =
+			    get_next_free_tnode_i(timer_addr, tid, pid, tsc,
+						  init_cpu, trace_len, trace);
+			if (likely(new_node)) {
+				hlist_add_head(&new_node->list,
+					       &timer_map[idx].head);
+#if DO_OVERHEAD_MEASUREMENTS
+				{
+					smp_mb();
+					atomic_inc(&num_timer_entries);
+				}
+#endif
+			} else {	/* !new_node*/
+				pw_pr_error
+				    ("ERROR: could NOT allocate new timer node!\n");
+			}
+		}
+	}
+	HASH_UNLOCK(idx);
+
+	return;
+};
+
+static int timer_delete(unsigned long timer_addr, pid_t tid)
+{
+	int idx = TIMER_HASH_FUNC(timer_addr);
+	tnode_t *node = NULL, *found_node = NULL;
+	struct hlist_node *curr = NULL, *next = NULL;
+	struct hlist_head *head = NULL;
+	int retVal = -ERROR;
+
+	HASH_LOCK(idx);
+	{
+		head = &timer_map[idx].head;
+
+		PW_HLIST_FOR_EACH_ENTRY_SAFE(node, curr, next, head, list) {
+			/* if(node->timer_addr == timer_addr && node->tid == tid){*/
+			if (node->timer_addr == timer_addr) {
+				if (node->tid != tid) {
+					OUTPUT(0,
+					       KERN_INFO
+					       "WARNING: stale timer tid value? node tid = %d, task tid = %d\n",
+					       node->tid, tid);
+				}
+				hlist_del(&node->list);
+				found_node = node;
+				retVal = SUCCESS;
+				OUTPUT(3,
+				       KERN_INFO
+				       "[%d]: TIMER_DELETE FOUND HRT = %p\n",
+				       tid, (void *)timer_addr);
+				break;
+			}
+		}
+	}
+	HASH_UNLOCK(idx);
+
+	if (found_node) {
+		timer_destroy(found_node);
+	}
+
+	return retVal;
+};
+
+static void delete_all_non_kernel_timers(void)
+{
+	struct tnode *node = NULL;
+	struct hlist_node *curr = NULL, *next = NULL;
+	int i = 0, num_timers = 0;
+
+	for (i = 0; i < NUM_MAP_BUCKETS; ++i) {
+		HASH_LOCK(i);
+		{
+			PW_HLIST_FOR_EACH_ENTRY_SAFE(node, curr, next,
+						     &timer_map[i].head, list) {
+				if (node->is_root_timer == 0) {
+					++num_timers;
+					OUTPUT(3,
+					       KERN_INFO
+					       "[%d]: Timer %p (Node %p) has TRACE = %p\n",
+					       node->tid,
+					       (void *)node->timer_addr, node,
+					       node->trace);
+					hlist_del(&node->list);
+					timer_destroy(node);
+				}
+			}
+		}
+		HASH_UNLOCK(i);
+	}
+};
+
+static void delete_timers_for_tid(pid_t tid)
+{
+	struct tnode *node = NULL;
+	struct hlist_node *curr = NULL, *next = NULL;
+	int i = 0, num_timers = 0;
+
+	for (i = 0; i < NUM_MAP_BUCKETS; ++i) {
+		HASH_LOCK(i);
+		{
+			PW_HLIST_FOR_EACH_ENTRY_SAFE(node, curr, next,
+						     &timer_map[i].head, list) {
+				if (node->is_root_timer == 0
+				    && node->tid == tid) {
+					++num_timers;
+					OUTPUT(3,
+					       KERN_INFO
+					       "[%d]: Timer %p (Node %p) has TRACE = %p\n",
+					       tid, (void *)node->timer_addr,
+					       node, node->trace);
+					hlist_del(&node->list);
+					timer_destroy(node);
+				}
+			}
+		}
+		HASH_UNLOCK(i);
+	}
+
+	OUTPUT(3, KERN_INFO "[%d]: # timers = %d\n", tid, num_timers);
+};
+
+static int get_num_timers(void)
+{
+	tnode_t *node = NULL;
+	struct hlist_node *curr = NULL;
+	int i = 0, num = 0;
+
+	for (i = 0; i < NUM_MAP_BUCKETS; ++i) {
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, &timer_map[i].head, list) {
+			++num;
+			OUTPUT(3, KERN_INFO "[%d]: %d --> %p\n", i, node->tid,
+			       (void *)node->timer_addr);
+		}
+	}
+
+	return num;
+};
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+
+#if DO_WAKELOCK_SAMPLE
+static unsigned long pw_hash_string(const char *data)
+{
+	unsigned long hash = 0;
+	unsigned char c;
+	char *str = (char *)data;
+
+	BUG_ON(!data);
+
+	while ((c = *str++)) {
+		hash = c + (hash << 6) + (hash << 16) - hash;
+	}
+	return hash;
+};
+
+static wlock_node_t *get_next_free_wlock_node_i(unsigned long hash,
+						size_t wlock_name_len,
+						const char *wlock_name)
+{
+	wlock_node_t *node = pw_kmalloc(sizeof(wlock_node_t), GFP_ATOMIC);
+
+	if (likely(node)) {
+		memset(node, 0, sizeof(wlock_node_t));
+		node->hash_val = hash;
+
+		INIT_HLIST_NODE(&node->list);
+
+		if (!(node->wakelock_name = pw_kstrdup(wlock_name, GFP_ATOMIC))) {
+			pw_pr_error
+			    ("ERROR: could NOT kstrdup wlock device name: %s\n",
+			     wlock_name);
+			pw_kfree(node);
+			node = NULL;
+		} else {
+			node->wakelock_name_len = wlock_name_len;
+		}
+	} else {
+		pw_pr_error("ERROR: could NOT allocate new wlock node!\n");
+	}
+
+	return node;
+};
+
+/*
+ * Check if the given wlock # <-> DEV Name mapping exists and, if
+ * it does, whether this mapping was sent for the given 'cpu'
+ * (We need to send each such mapping ONCE PER CPU to ensure it is
+ * received BEFORE a corresponding wlock C-state wakeup).
+ */
+static int find_wlock_node_i(unsigned long hash, size_t wlock_name_len,
+			     const char *wlock_name)
+{
+	wlock_node_t *node = NULL;
+	struct hlist_node *curr = NULL;
+	int idx = hash & WLOCK_MAP_HASH_MASK;
+	int cp_index = -1;
+
+	rcu_read_lock();
+	{
+		PW_HLIST_FOR_EACH_ENTRY_RCU(node, curr, &wlock_map[idx].head,
+					    list) {
+			/*printk(KERN_INFO "hash_val = %lu, name = %s, cp_index = %d\n", node->hash_val, node->wakelock_name, node->constant_pool_index);*/
+			if (node->hash_val == hash
+			    && node->wakelock_name_len == wlock_name_len
+			    && !strcmp(node->wakelock_name, wlock_name)) {
+				cp_index = node->constant_pool_index;
+				break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	return cp_index;
+};
+
+static pw_mapping_type_t wlock_insert(size_t wlock_name_len,
+				      const char *wlock_name, int *cp_index)
+{
+	wlock_node_t *node = NULL;
+	unsigned long hash = WLOCK_MAP_HASH_FUNC(wlock_name);
+	pw_mapping_type_t retVal = PW_MAPPING_ERROR;
+
+	if (!wlock_name || !cp_index) {
+		pw_pr_error("ERROR: NULL name/index?!\n");
+		return PW_MAPPING_ERROR;
+	}
+
+	*cp_index = find_wlock_node_i(hash, wlock_name_len, wlock_name);
+
+	/*printk(KERN_INFO "wlock_insert: cp_index = %d, name = %s\n", *cp_index, wlock_name);*/
+
+	if (*cp_index >= 0) {
+		/*
+		 * Mapping FOUND!
+		 */
+		/*printk(KERN_INFO "OK: mapping already exists for %s (cp_index = %d)\n", wlock_name, *cp_index);*/
+		return PW_MAPPING_EXISTS;
+	}
+
+	node = get_next_free_wlock_node_i(hash, wlock_name_len, wlock_name);
+
+	if (unlikely(node == NULL)) {
+		pw_pr_error
+		    ("ERROR: could NOT allocate node for wlock insertion!\n");
+		return PW_MAPPING_ERROR;
+	}
+
+	WLOCK_LOCK(hash);
+	{
+		int idx = hash & WLOCK_MAP_HASH_MASK;
+		wlock_node_t *old_node = NULL;
+		struct hlist_node *curr = NULL;
+		/*
+		 * It is THEORETICALLY possible that the same wakelock name was passed to 'acquire' twice and that
+		 * a different process inserted an entry into the wakelock after our check and before we could insert
+		 * (i.e. a race condition). Check for that first.
+		 */
+		PW_HLIST_FOR_EACH_ENTRY(old_node, curr, &wlock_map[idx].head,
+					list) {
+			if (old_node->hash_val == hash
+			    && old_node->wakelock_name_len == wlock_name_len
+			    && !strcmp(old_node->wakelock_name, wlock_name)) {
+				*cp_index = old_node->constant_pool_index;
+				/*printk(KERN_INFO "wlock mapping EXISTS: cp_index = %d, name = %s\n", *cp_index, wlock_name);*/
+				break;
+			}
+		}
+		if (likely(*cp_index < 0)) {
+			/*
+			 * OK: insert a new node.
+			 */
+			*cp_index = node->constant_pool_index =
+			    GET_NEXT_CONSTANT_POOL_INDEX();
+			hlist_add_head_rcu(&node->list, &wlock_map[idx].head);
+			retVal = PW_NEW_MAPPING_CREATED;
+			/*printk(KERN_INFO "CREATED new wlock mapping: cp_index = %d, name = %s\n", *cp_index, wlock_name);*/
+		} else {
+			/*
+			 * Hmnnn ... a race condition. Warn because this is very unlikely!
+			 */
+			/*printk(KERN_INFO "WARNING: race condition detected for wlock insert for node %s\n", wlock_name);*/
+			wlock_destroy_node(node);
+			retVal = PW_MAPPING_EXISTS;
+		}
+	}
+	WLOCK_UNLOCK(hash);
+
+	return retVal;
+};
+#endif /* DO_WAKELOCK_SAMPLE*/
+
+/*
+ * INTERNAL HELPER: retrieve number of
+ * mappings in the wlock mappings list.
+ */
+#if 0
+#ifndef __arm__
+static int get_num_wlock_mappings(void)
+{
+	int retVal = 0;
+	int i = 0;
+	wlock_node_t *node = NULL;
+	struct hlist_node *curr = NULL;
+
+	for (i = 0; i < NUM_WLOCK_MAP_BUCKETS; ++i)
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, &wlock_map[i].head, list) {
+		++retVal;
+		OUTPUT(0, KERN_INFO "[%d]: wlock Num=%d, Dev=%s\n", i,
+		       node->wlock, node->name);
+		}
+
+	return retVal;
+
+};
+#endif /* __arm__*/
+#endif /* 0*/
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+
+/*
+ * IRQ list manipulation routines.
+ */
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+
+static irq_node_t *get_next_free_irq_node_i(int cpu, int irq_num,
+					    const char *irq_name)
+{
+	irq_node_t *node = pw_kmalloc(sizeof(irq_node_t), GFP_ATOMIC);
+
+	if (likely(node)) {
+		memset(node, 0, sizeof(irq_node_t));
+		node->irq = irq_num;
+		/*
+		 * Set current CPU bitmap.
+		 */
+		node->cpu_bitmap =
+		    pw_kmalloc(sizeof(unsigned long) * NUM_BITMAP_BUCKETS,
+			       GFP_ATOMIC);
+		if (unlikely(!node->cpu_bitmap)) {
+			pw_pr_error
+			    ("ERROR: could NOT allocate a bitmap for the new irq_node!\n");
+			pw_kfree(node);
+			return NULL;
+		}
+		memset(node->cpu_bitmap, 0,
+		       sizeof(unsigned long) * NUM_BITMAP_BUCKETS);
+		SET_BIT(cpu, PWR_CPU_BITMAP(node));
+
+		INIT_HLIST_NODE(&node->list);
+
+		if (!(node->name = pw_kstrdup(irq_name, GFP_ATOMIC))) {
+			pw_pr_error
+			    ("ERROR: could NOT kstrdup irq device name: %s\n",
+			     irq_name);
+			pw_kfree(node->cpu_bitmap);
+			pw_kfree(node);
+			node = NULL;
+		}
+	} else {
+		pw_pr_error("ERROR: could NOT allocate new irq node!\n");
+	}
+
+	return node;
+
+};
+
+/*
+ * Check if the given IRQ # <-> DEV Name mapping exists and, if
+ * it does, whether this mapping was sent for the given 'cpu'
+ * (We need to send each such mapping ONCE PER CPU to ensure it is
+ * received BEFORE a corresponding IRQ C-state wakeup).
+ */
+static bool find_irq_node_i(int cpu, int irq_num, const char *irq_name,
+			    int *index, bool * was_mapping_sent)
+{
+	irq_node_t *node = NULL;
+	struct hlist_node *curr = NULL;
+	int idx = IRQ_MAP_HASH_FUNC(irq_num);
+
+	*index = idx;
+
+	rcu_read_lock();
+
+	PW_HLIST_FOR_EACH_ENTRY_RCU(node, curr, &irq_map[idx].head, list) {
+		if (node->irq == irq_num
+#if DO_ALLOW_MULTI_DEV_IRQ
+		    && !strcmp(node->name, irq_name)
+#endif /* DO_ALLOW_MULTI_DEV_IRQ*/
+		    ) {
+			/*
+			 * OK, so the maping exists. But each
+			 * such mapping must be sent ONCE PER
+			 * CPU to Ring-3 -- have we done so
+			 * for this cpu?
+			 */
+			/* *was_mapping_sent = (node->cpu_bitmap & (1 << cpu)) ? true : false;*/
+			*was_mapping_sent =
+			    (IS_BIT_SET(cpu, PWR_CPU_BITMAP(node))) ? true :
+			    false;
+			rcu_read_unlock();
+			return true;
+		}
+	}
+
+	rcu_read_unlock();
+	return false;
+};
+
+/*
+ * Check to see if a given IRQ # <-> DEV Name mapping exists
+ * in our list of such mappings and, if it does, whether this
+ * mapping has been sent to Ring-3. Take appropriate actions
+ * if any of these conditions is not met.
+ */
+static irq_mapping_types_t irq_insert(int cpu, int irq_num,
+				      const char *irq_name)
+{
+	irq_node_t *node = NULL;
+	int idx = -1;
+	bool found_mapping = false, mapping_sent = false;
+
+	if (!irq_name) {
+		pw_pr_error("ERROR: NULL IRQ name?!\n");
+		return ERROR_IRQ_MAPPING;
+	}
+	/*
+	 * Protocol:
+	 * (a) if mapping FOUND: return "OK_IRQ_MAPPING_EXISTS"
+	 * (b) if new mapping CREATED: return "OK_NEW_IRQ_MAPPING_CREATED"
+	 * (c) if ERROR: return "ERROR_IRQ_MAPPING"
+	 */
+
+	found_mapping =
+	    find_irq_node_i(cpu, irq_num, irq_name, &idx, &mapping_sent);
+	if (found_mapping && mapping_sent) {
+		/*
+		 * OK, mapping exists AND we've already
+		 * sent the mapping for this CPU -- nothing
+		 * more to do.
+		 */
+		return OK_IRQ_MAPPING_EXISTS;
+	}
+
+	/*
+	 * Either this mapping didn't exist at all,
+	 * or the mapping wasn't sent for this CPU.
+	 * In either case, because we're using RCU,
+	 * we'll have to allocate a new node.
+	 */
+
+	node = get_next_free_irq_node_i(cpu, irq_num, irq_name);
+
+	if (unlikely(node == NULL)) {
+		pw_pr_error
+		    ("ERROR: could NOT allocate node for irq insertion!\n");
+		return ERROR_IRQ_MAPPING;
+	}
+
+	IRQ_LOCK(idx);
+	{
+		/*
+		 * It is *THEORETICALLY* possible that
+		 * a different CPU added this IRQ entry
+		 * to the 'irq_map'. For now, disregard
+		 * the possiblility (at worst we'll have
+		 * multiple entries with the same mapping,
+		 * which is OK).
+		 */
+		bool found = false;
+		irq_node_t *old_node = NULL;
+		struct hlist_node *curr = NULL;
+		if (found_mapping) {
+			PW_HLIST_FOR_EACH_ENTRY(old_node, curr,
+						&irq_map[idx].head, list) {
+				if (old_node->irq == irq_num
+#if DO_ALLOW_MULTI_DEV_IRQ
+				    && !strcmp(old_node->name, irq_name)
+#endif /* DO_ALLOW_MULTI_DEV_IRQ*/
+				    ) {
+					/*
+					 * Found older entry -- copy the 'cpu_bitmap'
+					 * field over to the new entry (no need to set this
+					 * CPU's entry -- 'get_next_free_irq_node_i() has
+					 * already done that. Instead, do a BITWISE OR of
+					 * the old and new bitmaps)...
+					 */
+					OUTPUT(0,
+					       KERN_INFO
+					       "[%d]: IRQ = %d, OLD bitmap = %lu\n",
+					       cpu, irq_num,
+					       *(old_node->cpu_bitmap));
+					/* node->cpu_bitmap |= old_node->cpu_bitmap;*/
+					/*
+					 * UPDATE: new 'bitmap' scheme -- copy over the older
+					 * bitmap array...
+					 */
+					memcpy(node->cpu_bitmap, old_node->cpu_bitmap, sizeof(unsigned long) * NUM_BITMAP_BUCKETS);	/* dst, src*/
+					/*
+					 * ...then set the current CPU's pos in the 'bitmap'
+					 */
+					SET_BIT(cpu, node->cpu_bitmap);
+					/*
+					 * ...and then replace the old node with
+					 * the new one.
+					 */
+					hlist_replace_rcu(&old_node->list,
+							  &node->list);
+					call_rcu(&old_node->rcu,
+						 &irq_destroy_callback);
+					/*
+					 * OK -- everything done.
+					 */
+					found = true;
+					break;
+				}
+			}
+			if (!found) {
+				pw_pr_error
+				    ("ERROR: CPU = %d, IRQ = %d, mapping_found but not found!\n",
+				     cpu, irq_num);
+			}
+		} else {
+			hlist_add_head_rcu(&node->list, &irq_map[idx].head);
+			/*
+			 * We've added a new mapping.
+			 */
+			++total_num_irq_mappings;
+		}
+	}
+	IRQ_UNLOCK(idx);
+	/*
+	 * Tell caller that this mapping
+	 * should be sent to Ring-3.
+	 */
+	return OK_NEW_IRQ_MAPPING_CREATED;
+};
+
+/*
+ * INTERNAL HELPER: retrieve number of
+ * mappings in the IRQ mappings list.
+ */
+static int get_num_irq_mappings(void)
+{
+	int retVal = 0;
+	int i = 0;
+	irq_node_t *node = NULL;
+	struct hlist_node *curr = NULL;
+
+	for (i = 0; i < NUM_IRQ_MAP_BUCKETS; ++i) {
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, &irq_map[i].head, list) {
+			++retVal;
+			OUTPUT(0, KERN_INFO "[%d]: IRQ Num=%d, Dev=%s\n", i,
+			       node->irq, node->name);
+		}
+	}
+
+	return retVal;
+
+};
+
+#endif /* DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+
+/*
+ * SYS map manipulation routines.
+ */
+
+inline bool is_tid_in_sys_list(pid_t tid)
+{
+	sys_node_t *node = NULL;
+	struct hlist_node *curr = NULL;
+	bool found = false;
+
+	int hindex = SYS_MAP_NODES_HASH(tid);
+	int lindex = SYS_MAP_LOCK_HASH(tid);
+
+	SYS_MAP_LOCK(lindex);
+	{
+		struct hlist_head *apwr_sys_list = GET_SYS_HLIST(hindex);
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, apwr_sys_list, list) {
+			if (node->tid == tid) {
+				found = true;
+				break;
+			}
+		}
+	}
+	SYS_MAP_UNLOCK(lindex);
+
+	return found;
+};
+
+inline int check_and_remove_proc_from_sys_list(pid_t tid, pid_t pid)
+{
+	sys_node_t *node = NULL;
+	struct hlist_node *curr = NULL;
+	bool found = false;
+	int hindex = SYS_MAP_NODES_HASH(tid);
+	int lindex = SYS_MAP_LOCK_HASH(tid);
+
+	SYS_MAP_LOCK(lindex);
+	{
+		struct hlist_head *apwr_sys_list = GET_SYS_HLIST(hindex);
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, apwr_sys_list, list) {
+			if (node->tid == tid && node->ref_count > 0) {
+				found = true;
+				--node->ref_count;
+				break;
+			}
+		}
+	}
+	SYS_MAP_UNLOCK(lindex);
+
+	if (!found) {
+		return -ERROR;
+	}
+	return SUCCESS;
+};
+
+inline int check_and_delete_proc_from_sys_list(pid_t tid, pid_t pid)
+{
+	sys_node_t *node = NULL;
+	bool found = false;
+	struct hlist_node *curr = NULL;
+	int hindex = SYS_MAP_NODES_HASH(tid);
+	int lindex = SYS_MAP_LOCK_HASH(tid);
+
+	SYS_MAP_LOCK(lindex);
+	{
+		struct hlist_head *apwr_sys_list = GET_SYS_HLIST(hindex);
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, apwr_sys_list, list) {
+			if (node->tid == tid) {
+				found = true;
+				hlist_del(&node->list);
+				OUTPUT(3,
+				       KERN_INFO
+				       "CHECK_AND_DELETE: successfully deleted node: tid = %d, ref_count = %d, weight = %d\n",
+				       tid, node->ref_count, node->weight);
+				free_sys_node_i(node);
+				break;
+			}
+		}
+	}
+	SYS_MAP_UNLOCK(lindex);
+
+	if (!found) {
+		return -ERROR;
+	}
+	return SUCCESS;
+};
+
+inline int check_and_add_proc_to_sys_list(pid_t tid, pid_t pid)
+{
+	sys_node_t *node = NULL;
+	bool found = false;
+	int retVal = SUCCESS;
+	struct hlist_node *curr = NULL;
+	int hindex = SYS_MAP_NODES_HASH(tid);
+	int lindex = SYS_MAP_LOCK_HASH(tid);
+
+	SYS_MAP_LOCK(lindex);
+	{
+		struct hlist_head *apwr_sys_list = GET_SYS_HLIST(hindex);
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, apwr_sys_list, list) {
+			if (node->tid == tid) {
+				found = true;
+				++node->ref_count;
+				++node->weight;
+				break;
+			}
+		}
+		if (!found) {
+			node = alloc_new_sys_node_i(tid, pid);
+			if (!node) {
+				pw_pr_error
+				    ("ERROR: could NOT allocate new node!\n");
+				retVal = -ERROR;
+			} else {
+				hlist_add_head(&node->list, apwr_sys_list);
+			}
+		}
+	}
+	SYS_MAP_UNLOCK(lindex);
+	return retVal;
+};
+
+void print_sys_node_i(sys_node_t * node)
+{
+	printk(KERN_INFO "SYS_NODE: %d -> %d, %d\n", node->tid, node->ref_count,
+	       node->weight);
+};
+
+/*
+ * HELPER template function to illustrate
+ * how to 'produce' data into the
+ * (per-cpu) output buffers.
+ */
+static inline void producer_template(int cpu)
+{
+	/*
+	 * Template for any of the 'produce_XXX_sample(...)'
+	 * functions.
+	 */
+	struct PWCollector_msg msg;
+	bool should_wakeup = true;	/* set to FALSE if calling from scheduling context (e.g. from "sched_wakeup()")*/
+	msg.data_len = 0;
+
+	/* Populate 'sample' fields in a domain-specific*/
+	/* manner. e.g.: */
+	/* sample.foo = bar*/
+	/*
+	 * OK, computed 'sample' fields. Now
+	 * write sample into the output buffer.
+	 */
+	pw_produce_generic_msg(&msg, should_wakeup);
+};
+
+#if DO_ACPI_S3_SAMPLE
+/*
+ * Insert a ACPI S3 Residency counter sample into a (per-cpu) output buffer.
+ */
+static inline void produce_acpi_s3_sample(u64 tsc, u64 s3_res)
+{
+	int cpu = raw_smp_processor_id();
+
+	PWCollector_msg_t msg;
+	s_residency_sample_t sres;
+
+	/*
+	 * No residency counters available
+	 */
+	msg.data_type = ACPI_S3;
+	msg.cpuidx = cpu;
+	msg.tsc = tsc;
+	msg.data_len = sizeof(sres);
+
+	/*
+	   if (startTSC_acpi_s3 == 0) {
+	   startTSC_acpi_s3 = tsc;
+	   }
+
+	   if (s3flag) {
+	   sres.data[0] = 0;
+	   sres.data[1] = s3_res;
+	   } else {
+	   sres.data[0] = tsc - startTSC_acpi_s3;
+	   sres.data[1] = 0;
+	   }
+	 */
+	pw_pr_debug("GU: start tsc = %llu, tsc = %llu, s3_res = %llu\n",
+		    startTSC_acpi_s3, tsc, s3_res);
+
+	if (startTSC_acpi_s3 == 0 || s3_res > 0) {
+		startTSC_acpi_s3 = tsc;
+	}
+	sres.data[0] = tsc - startTSC_acpi_s3;
+	sres.data[1] = s3_res;
+
+	startTSC_acpi_s3 = tsc;
+
+	msg.p_data = (u64) ((unsigned long)(&sres));
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&msg, true);	/* "true" ==> allow wakeups*/
+};
+#endif /* DO_ACPI_S3_SAMPLE*/
+
+#if DO_S_RESIDENCY_SAMPLE
+
+#ifdef CONFIG_RPMSG_IPC
+#define PW_SCAN_MMAP_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd)
+#else
+#define PW_SCAN_MMAP_DO_IPC(cmd, sub_cmd) (-ENODEV)
+#endif /* CONFIG_RPMSG_IPC*/
+
+static inline void pw_start_s_residency_counter_i(void)
+{
+	/*
+	 * Send START IPC command.
+	 */
+	if (PW_SCAN_MMAP_DO_IPC
+	    (INTERNAL_STATE.ipc_start_command,
+	     INTERNAL_STATE.ipc_start_sub_command)) {
+		printk(KERN_INFO
+		       "WARNING: possible error starting S_RES counters!\n");
+	}
+	pw_pr_debug("GU: SENT START IPC command!\n");
+};
+
+static inline void pw_dump_s_residency_counter_i(void)
+{
+	/*
+	 * Send DUMP IPC command.
+	 */
+	if (PW_SCAN_MMAP_DO_IPC
+	    (INTERNAL_STATE.ipc_dump_command,
+	     INTERNAL_STATE.ipc_dump_sub_command)) {
+		printk(KERN_INFO
+		       "WARNING: possible error dumping S_RES counters!\n");
+	}
+	pw_pr_debug("GU: SENT DUMP IPC command!\n");
+};
+
+static inline void pw_stop_s_residency_counter_i(void)
+{
+	/*
+	 * Send STOP IPC command.
+	 */
+	if (PW_SCAN_MMAP_DO_IPC
+	    (INTERNAL_STATE.ipc_stop_command,
+	     INTERNAL_STATE.ipc_stop_sub_command)) {
+		printk(KERN_INFO
+		       "WARNING: possible error stopping S_RES counters!\n");
+	}
+	pw_pr_debug("GU: SENT STOP IPC command!\n");
+};
+
+static inline void pw_populate_s_residency_values_i(u64 * values,
+						    bool is_begin_boundary)
+{
+	u16 i = 0, j = 0;
+	u64 value = 0;
+	const int counter_size_in_bytes =
+	    (int)INTERNAL_STATE.counter_size_in_bytes;
+	if (INTERNAL_STATE.collection_type == PW_IO_IPC) {
+		pw_dump_s_residency_counter_i();	/* TODO: OK to call immediately after 'START'?*/
+	}
+#if 1
+	for (i = 0, j = 1; i < INTERNAL_STATE.num_addrs; ++i, ++j) {
+		values[j] = 0x0;
+		if (j == 4) {
+			/* pwr library EXPECTS the fifth element to be the ACPI S3 residency value!*/
+			++j;
+		}
+		switch (INTERNAL_STATE.collection_type) {
+		case PW_IO_IPC:
+		case PW_IO_MMIO:	/* fall-through*/
+			/* value = INTERNAL_STATE.platform_remapped_addrs[i];*/
+			/* value = *((u64 *)INTERNAL_STATE.platform_remapped_addrs[i]);*/
+			/* value = *((u32 *)INTERNAL_STATE.platform_remapped_addrs[i]);*/
+			memcpy(&value,
+			       (void *)(unsigned long)INTERNAL_STATE.
+			       platform_remapped_addrs[i],
+			       counter_size_in_bytes);
+			break;
+		default:
+			printk(KERN_INFO
+			       "ERROR: unsupported S0iX collection type: %u!\n",
+			       INTERNAL_STATE.collection_type);
+			break;
+		}
+		if (is_begin_boundary) {
+			INTERNAL_STATE.init_platform_res_values[i] = value;
+			values[j] = 0;
+		} else {
+			/* values[j] = INTERNAL_STATE.init_platform_res_values[i] - value;*/
+			values[j] =
+			    value - INTERNAL_STATE.init_platform_res_values[i];
+		}
+		/*
+		   if (is_begin_boundary) {
+		   INTERNAL_STATE.init_platform_res_values[i] = value;
+		   }
+		   values[j] = value - INTERNAL_STATE.init_platform_res_values[i];
+		 */
+		pw_pr_debug("\t[%u] ==> %llu (%llu <--> %llu)\n", j, values[j],
+			    INTERNAL_STATE.init_platform_res_values[i], value);
+	}
+#else /* if 1*/
+	{
+		char __tmp[1024];
+		u64 *__p_tmp = (u64 *) & __tmp[0];
+		/* memcpy((u32 *)&__tmp[0], INTERNAL_STATE.platform_remapped_addrs[0], sizeof(u32) * (INTERNAL_STATE.num_addrs * 2));*/
+		memcpy(__p_tmp, INTERNAL_STATE.platform_remapped_addrs[0],
+		       sizeof(u64) * INTERNAL_STATE.num_addrs);
+		for (i = 0; i < INTERNAL_STATE.num_addrs; ++i) {
+			u64 __value1 = 0, __value2 = 0;
+			memcpy(&__value1,
+			       INTERNAL_STATE.platform_remapped_addrs[i],
+			       sizeof(u64));
+			__value2 =
+			    *((u64 *) INTERNAL_STATE.
+			      platform_remapped_addrs[i]);
+			printk(KERN_INFO "[%d] ==> %llu, %llu, %llu, %llu\n", i,
+			       __p_tmp[i], __value1, __value2,
+			       INTERNAL_STATE.platform_remapped_addrs[i]);
+		}
+	}
+#endif /* if 0*/
+};
+
+static inline void produce_boundary_s_residency_msg_i(bool is_begin_boundary)
+{
+	u64 tsc;
+	int cpu = raw_smp_processor_id();
+	PWCollector_msg_t msg;
+	s_res_msg_t *smsg = INTERNAL_STATE.platform_residency_msg;
+	u64 *values = smsg->residencies;
+
+	/* printk(KERN_INFO "smsg = %p, smsg->residencies = %p\n", smsg, smsg->residencies);*/
+
+	tscval(&tsc);
+	msg.data_type = S_RESIDENCY;
+	msg.cpuidx = cpu;
+	msg.tsc = tsc;
+	/* msg.data_len = sizeof(smsg);*/
+	msg.data_len = sizeof(*smsg) + sizeof(u64) * (INTERNAL_STATE.num_addrs + 2);	/* "+2" for S0i3, S3*/
+
+	if (startTSC_s_residency == 0) {
+		startTSC_s_residency = tsc;
+	}
+	/*
+	 * Power library requires S0i0 entry to be delta TSC
+	 */
+	values[0] = tsc - startTSC_s_residency;
+
+	/* printk(KERN_INFO "\t[%u] ==> %llu\n", 0, values[0]);*/
+
+#if 0
+	if (INTERNAL_STATE.collection_type == PW_IO_IPC
+	    && is_begin_boundary == true) {
+		pw_stop_s_residency_counter_i();
+		pw_start_s_residency_counter_i();
+	}
+#endif /* if 0*/
+
+	pw_populate_s_residency_values_i(values, is_begin_boundary);
+
+#if 0
+	if (INTERNAL_STATE.collection_type == PW_IO_IPC
+	    && is_begin_boundary == false) {
+		pw_stop_s_residency_counter_i();
+	}
+#endif /* if 0*/
+
+	msg.p_data = (u64) ((unsigned long)(smsg));
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&msg, true);	/* "true" ==> allow wakeups*/
+
+	/*
+	 * Check if we need to produce ACPI S3 samples.
+	 */
+	if (pw_is_slm && IS_ACPI_S3_MODE()) {
+		if (is_begin_boundary == true) {
+			/*
+			 * Ensure we reset the ACPI S3 'start' TSC counter.
+			 */
+			startTSC_acpi_s3 = 0x0;
+		}
+		/* produce_acpi_s3_sample(0);*/
+		produce_acpi_s3_sample(tsc, 0);
+	}
+};
+
+#endif /* DO_S_RESIDENCY_SAMPLE*/
+
+#if DO_WAKELOCK_SAMPLE
+/*
+ * Insert a Wakelock sample into a (per-cpu) output buffer.
+ */
+static inline void produce_w_sample(int cpu, u64 tsc, w_sample_type_t type,
+				    pid_t tid, pid_t pid, const char *wlname,
+				    const char *pname, u64 timeout)
+{
+	PWCollector_msg_t sample;
+	w_wakelock_msg_t w_msg;
+	int cp_index = -1;
+	size_t len = strlen(wlname);
+	size_t msg_len = 0;
+	constant_pool_msg_t *cp_msg = NULL;
+
+	pw_mapping_type_t map_type = wlock_insert(len, wlname, &cp_index);
+
+	sample.cpuidx = cpu;
+
+	if (unlikely(map_type == PW_MAPPING_ERROR)) {
+		printk(KERN_INFO
+		       "ERROR: could NOT insert wlname = %s into constant pool!\n",
+		       wlname);
+		return;
+	}
+	/*
+	 * Preallocate any memory we might need, BEFORE disabling interrupts!
+	 */
+	if (unlikely(map_type == PW_NEW_MAPPING_CREATED)) {
+		msg_len = PW_CONSTANT_POOL_MSG_HEADER_SIZE + len + 1;
+		cp_msg = pw_kmalloc(msg_len, GFP_ATOMIC);
+		if (unlikely(cp_msg == NULL)) {
+			/*
+			 * Hmnnnnn ... we'll need to destroy the newly created node. For now, don't handle this!!!
+			 * TODO: handle this case!
+			 */
+			printk(KERN_INFO
+			       "ERROR: could NOT allocate a new node for a constant-pool mapping: WILL LEAK MEMORY and THIS MAPPING WILL BE MISSING FROM YOUR END RESULTS!");
+			return;
+		}
+	}
+	/* get_cpu();*/
+	{
+		if (unlikely(map_type == PW_NEW_MAPPING_CREATED)) {
+			/*
+			 * We've inserted a new entry into our kernel wakelock constant pool. Tell wuwatch
+			 * about it.
+			 */
+			cp_msg->entry_type = W_STATE;	/* This is a KERNEL walock constant pool mapping*/
+			cp_msg->entry_len = len;
+			cp_msg->entry_index = cp_index;
+			memcpy(cp_msg->entry, wlname, len + 1);
+
+			sample.tsc = tsc;
+			sample.data_type = CONSTANT_POOL_ENTRY;
+			sample.data_len = msg_len;
+			sample.p_data = (u64) ((unsigned long)cp_msg);
+
+			pw_produce_generic_msg(&sample, false);	/* "false" ==> do NOT wakeup any sleeping readers*/
+		}
+		/*
+		 * OK, now send the actual wakelock sample.
+		 */
+		w_msg.type = type;
+		w_msg.expires = timeout;
+		w_msg.tid = tid;
+		w_msg.pid = pid;
+		w_msg.constant_pool_index = cp_index;
+		/* memcpy(w_msg.proc_name, pname, PW_MAX_PROC_NAME_SIZE); */
+		strncpy(w_msg.proc_name, pname, PW_MAX_PROC_NAME_SIZE);	/* process name*/
+
+		sample.tsc = tsc;
+		sample.data_type = W_STATE;
+		sample.data_len = sizeof(w_msg);
+		sample.p_data = (u64) (unsigned long)&w_msg;
+		/*
+		 * OK, everything computed. Now copy
+		 * this sample into an output buffer
+		 */
+		pw_produce_generic_msg(&sample, false);	/* "false" ==> do NOT wakeup any sleeping readers*/
+	}
+	/* put_cpu();*/
+
+	if (unlikely(cp_msg)) {
+		/*printk(KERN_INFO "OK: sent wakelock mapping: cp_index = %d, name = %s\n", cp_index, wlname);*/
+		pw_kfree(cp_msg);
+	}
+	/*printk(KERN_INFO "OK: sent wakelock msg for wlname = %s\n", wlname);*/
+	return;
+};
+#endif
+
+/*
+ * Insert a P-state transition sample into a (per-cpu) output buffer.
+ */
+static inline void produce_p_sample(int cpu, unsigned long long tsc,
+				    u32 req_freq, u32 perf_status,
+				    u8 is_boundary_sample, u64 aperf, u64 mperf)
+{
+	struct PWCollector_msg sample;
+	p_msg_t p_msg;
+
+	sample.cpuidx = cpu;
+	sample.tsc = tsc;
+
+	p_msg.unhalted_core_value = aperf;
+	p_msg.unhalted_ref_value = mperf;
+
+	p_msg.prev_req_frequency = req_freq;
+	p_msg.perf_status_val = (u16) perf_status;
+	p_msg.is_boundary_sample = is_boundary_sample;
+
+	sample.data_type = P_STATE;
+	sample.data_len = sizeof(p_msg);
+	sample.p_data = (u64) ((unsigned long)&p_msg);
+
+	pw_pr_debug("DEBUG: TSC = %llu, req_freq = %u, perf-status = %u\n", tsc,
+		    req_freq, perf_status);
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&sample, true);	/* "true" ==> wakeup sleeping readers, if required*/
+};
+
+/*
+ * Insert a K_CALL_STACK sample into a (per-cpu) output buffer.
+ */
+static inline void produce_k_sample(int cpu, const tnode_t * tentry)
+{
+	struct PWCollector_msg sample;
+	k_sample_t k_sample;
+
+	sample.cpuidx = cpu;
+	sample.tsc = tentry->tsc;
+
+	k_sample.tid = tentry->tid;
+	k_sample.trace_len = tentry->trace_len;
+	/*
+	 * Generate the "entryTSC" and "exitTSC" values here.
+	 */
+	{
+		k_sample.entry_tsc = tentry->tsc - 1;
+		k_sample.exit_tsc = tentry->tsc + 1;
+	}
+	/*
+	 * Also populate the trace here!
+	 */
+	if (tentry->trace_len) {
+		int num = tentry->trace_len;
+		int i = 0;
+		u64 *trace = k_sample.trace;
+		if (tentry->trace_len >= PW_TRACE_LEN) {
+			OUTPUT(0,
+			       KERN_ERR
+			       "Warning: kernel trace len = %d > TRACE_LEN = %d! Will need CHAINING!\n",
+			       num, PW_TRACE_LEN);
+			num = PW_TRACE_LEN;
+		}
+		/*
+		 * Can't 'memcpy()' -- individual entries in
+		 * the 'k_sample_t->trace[]' array are ALWAYS
+		 * 64 bits wide, REGARDLESS OF THE UNDERLYING
+		 * ARCHITECTURE!
+		 */
+		for (i = 0; i < num; ++i) {
+			trace[i] = tentry->trace[i];
+		}
+	}
+	OUTPUT(3, KERN_INFO "KERNEL-SPACE mapping!\n");
+
+	sample.data_type = K_CALL_STACK;
+	sample.data_len = sizeof(k_sample);
+	sample.p_data = (u64) ((unsigned long)&k_sample);
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&sample, true);	/* "true" ==> wakeup sleeping readers, if required*/
+};
+
+/*
+ * Insert an IRQ_MAP sample into a (per-cpu) output buffer.
+ */
+static inline void produce_i_sample(int cpu, int num, u64 tsc, const char *name)
+{
+	struct PWCollector_msg sample;
+	i_sample_t i_sample;
+	/*
+	   u64 tsc;
+
+	   tscval(&tsc);
+	 */
+
+	sample.cpuidx = cpu;
+	sample.tsc = tsc;
+
+	i_sample.irq_num = num;
+	memcpy(i_sample.irq_name, name, PW_IRQ_DEV_NAME_LEN);	/* dst, src*/
+
+	sample.data_type = IRQ_MAP;
+	sample.data_len = sizeof(i_sample);
+	sample.p_data = (u64) ((unsigned long)&i_sample);
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&sample, true);	/* "true" ==> wakeup sleeping readers, if required*/
+};
+
+/*
+ * Insert a PROC_MAP sample into a (per-cpu) output buffer.
+ */
+static inline void produce_r_sample(int cpu, u64 tsc, r_sample_type_t type,
+				    pid_t tid, pid_t pid, const char *name)
+{
+	struct PWCollector_msg sample;
+	r_sample_t r_sample;
+
+	sample.cpuidx = cpu;
+	sample.tsc = tsc;
+
+	r_sample.type = type;
+	r_sample.tid = tid;
+	r_sample.pid = pid;
+	memcpy(r_sample.proc_name, name, PW_MAX_PROC_NAME_SIZE);	/* dst, src*/
+
+	sample.data_type = PROC_MAP;
+	sample.data_len = sizeof(r_sample);
+	sample.p_data = (u64) ((unsigned long)&r_sample);
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&sample, true);	/* "true" ==> wakeup sleeping readers, if required*/
+};
+
+/*
+ * Insert an M_MAP sample into a (per-cpu) output buffer.
+ */
+static inline void produce_m_sample(int cpu, const char *name,
+				    unsigned long long begin,
+				    unsigned long long sz)
+{
+	struct PWCollector_msg sample;
+	m_sample_t m_sample;
+	u64 tsc;
+
+	tscval(&tsc);
+
+	sample.cpuidx = cpu;
+	sample.tsc = tsc;
+
+	m_sample.start = begin;
+	m_sample.end = (begin + sz);
+	m_sample.offset = 0;
+	memcpy(m_sample.name, name, PW_MODULE_NAME_LEN);	/* dst, src*/
+
+	sample.data_type = M_MAP;
+	sample.data_len = sizeof(m_sample);
+	sample.p_data = (u64) ((unsigned long)&m_sample);
+
+	/*
+	 * OK, everything computed. Now copy
+	 * this sample into an output buffer
+	 */
+	pw_produce_generic_msg(&sample, true);	/* "true" ==> wakeup sleeping readers, if required*/
+
+};
+
+/*
+ * Probe functions (and helpers).
+ */
+
+/*
+ * Generic method to generate a kernel-space call stack.
+ * Utilizes the (provided) "save_stack_trace()" function.
+ */
+int __get_kernel_timerstack(unsigned long buffer[], int len)
+{
+	struct stack_trace strace;
+
+	strace.max_entries = len;	/* MAX_BACKTRACE_LENGTH;*/
+	strace.nr_entries = 0;
+	strace.entries = buffer;
+	strace.skip = 3;
+
+	save_stack_trace(&strace);
+
+	OUTPUT(0, KERN_INFO "[%d]: KERNEL TRACE: nr_entries = %d\n", TID(),
+	       strace.nr_entries);
+
+	return strace.nr_entries;
+};
+
+/*
+ * Generate a kernel-space call stack.
+ * Requires the kernel be compiler with frame pointers ON.
+ *
+ * Returns number of return addresses in the call stack
+ * or ZERO, to indicate no stack.
+ */
+int get_kernel_timerstack(unsigned long buffer[], int len)
+{
+	return __get_kernel_timerstack(buffer, len);
+};
+
+static void timer_init(void *timer_addr)
+{
+	pid_t tid = TID();
+	pid_t pid = PID();
+	u64 tsc = 0;
+	int trace_len = 0;
+	unsigned long trace[MAX_BACKTRACE_LENGTH];
+	bool is_root_timer = false;
+	s32 init_cpu = RAW_CPU();
+
+	tscval(&tsc);
+
+	/*
+	 * For accuracy, we ALWAYS collect
+	 * kernel call stacks.
+	 */
+	if ((is_root_timer = IS_ROOT_TIMER(tid))) {
+		/*
+		 * get kernel timerstack here.
+		 * Requires the kernel be compiled with
+		 * frame_pointers on.
+		 */
+		if (INTERNAL_STATE.have_kernel_frame_pointers) {
+			trace_len =
+			    get_kernel_timerstack(trace, MAX_BACKTRACE_LENGTH);
+		} else {
+			trace_len = 0;
+		}
+		OUTPUT(0,
+		       KERN_INFO
+		       "KERNEL-SPACE timer init! Timer_addr = %p, tid = %d, pid = %d\n",
+		       timer_addr, tid, pid);
+	} else {
+		trace_len = 0;
+	}
+	/*
+	 * Store the timer if:
+	 * (a) called for a ROOT process (tid == 0) OR
+	 * (b) we're actively COLLECTING.
+	 */
+	if (is_root_timer || IS_COLLECTING()) {
+		DO_PER_CPU_OVERHEAD_FUNC(timer_insert,
+					 (unsigned long)timer_addr, tid, pid,
+					 tsc, init_cpu, trace_len, trace);
+	}
+};
+
+/* #if (KERNEL_VER < 35)*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_hrtimer_init(struct hrtimer *timer, clockid_t clockid,
+			       enum hrtimer_mode mode)
+#else
+static void probe_hrtimer_init(void *ignore, struct hrtimer *timer,
+			       clockid_t clockid, enum hrtimer_mode mode)
+#endif
+{
+	DO_PER_CPU_OVERHEAD_FUNC(timer_init, timer);
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_timer_init(struct timer_list *timer)
+#else
+static void probe_timer_init(void *ignore, struct timer_list *timer)
+#endif
+{
+	/*
+	 * Debugging ONLY!
+	 */
+	DO_PER_CPU_OVERHEAD_FUNC(timer_init, timer);
+};
+
+/*
+ * Interval timer state probe.
+ * Fired on interval timer initializations
+ * (from "setitimer(...)")
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_itimer_state(int which, const struct itimerval *const value,
+			       cputime_t expires)
+#else
+static void probe_itimer_state(void *ignore, int which,
+			       const struct itimerval *const value,
+			       cputime_t expires)
+#endif
+{
+	struct hrtimer *timer = &current->signal->real_timer;
+
+	OUTPUT(3, KERN_INFO "[%d]: ITIMER STATE: timer = %p\n", TID(), timer);
+	DO_PER_CPU_OVERHEAD_FUNC(timer_init, timer);
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_hrtimer_start(struct hrtimer *hrt)
+#else
+static void probe_hrtimer_start(void *ignore, struct hrtimer *hrt)
+#endif
+{
+	int cpu = CPU();
+	pid_t tid = TID();
+	pid_t pid = PID();
+	u64 tsc = 0;
+	/* const char *name = TIMER_START_COMM(hrt); */
+	int i, trace_len;
+	char symname[KSYM_NAME_LEN];
+	unsigned long trace[MAX_BACKTRACE_LENGTH];
+	void *sched_timer_addr = NULL;
+	per_cpu_t *pcpu = NULL;
+	bool should_unregister = false;
+
+	BUG_ON(!hrt);
+
+	if (!should_probe_on_hrtimer_start) {
+		OUTPUT(3, KERN_INFO "HRTIMER_START: timer = %p\n", hrt);
+		return;
+	}
+
+	/*
+	 * Not sure if "save_stack_trace" or "sprint_symbol" can
+	 * sleep. To be safe, use the "__get_cpu_var" variants
+	 * here. Note that it's OK if they give us stale values -- we're
+	 * not looking for an exact match.
+	 */
+	if (tid || local_read(&__get_cpu_var(sched_timer_found)))
+		return;
+
+	/*
+	 * Basic algo: generate a backtrace for this hrtimer_start
+	 * tracepoint. Then generate symbolic information for each
+	 * entry in the backtrace array. Check these symbols.
+	 * If any one of these symbols is equal to "cpu_idle" then
+	 * we know that this timer is the "tick" timer for this
+	 * CPU -- store the address (and the backtrace) in
+	 * the trace map (and also note that we have, in fact, found
+	 * the tick timer so that we don't repeat this process again).
+	 */
+
+	if (INTERNAL_STATE.have_kernel_frame_pointers) {
+		trace_len = get_kernel_timerstack(trace, MAX_BACKTRACE_LENGTH);
+		OUTPUT(0,
+		       KERN_INFO
+		       "[%d]: %.20s TIMER_START for timer = %p. trace_len = %d\n",
+		       tid, TIMER_START_COMM(hrt), hrt, trace_len);
+		for (i = 0; i < trace_len; ++i) {
+			sprint_symbol(symname, trace[i]);
+			OUTPUT(3, KERN_INFO "SYM MAPPING: 0x%lx --> %s\n",
+			       trace[i], symname);
+			if (strstr(symname, "cpu_idle")) {
+				OUTPUT(0,
+				       KERN_INFO
+				       "FOUND CPU IDLE for cpu = %d . TICK SCHED TIMER = %p\n",
+				       cpu, hrt);
+				local_inc(&__get_cpu_var(sched_timer_found));
+				/* *timer_found = true;*/
+				sched_timer_addr = hrt;
+			}
+		}
+	} else {
+		OUTPUT(0, KERN_INFO "NO TIMER STACKS!\n");
+	}
+
+	if (sched_timer_addr) {
+		/*
+		 * OK, use the safer "get_cpu_var(...)" variants
+		 * here. These disable interrupts.
+		 */
+		pcpu = &get_cpu_var(per_cpu_counts);
+		{
+			cpu = CPU();
+			/*
+			 * Races should *NOT* happen. Still, check
+			 * to make sure.
+			 */
+			if (!pcpu->sched_timer_addr) {
+				pcpu->sched_timer_addr = sched_timer_addr;
+
+				tsc = 0x1 + cpu;
+
+				timer_insert((unsigned long)sched_timer_addr,
+					     tid, pid, tsc, cpu, trace_len,
+					     trace);
+				/*
+				 * Debugging
+				 */
+				if (!timer_find
+				    ((unsigned long)sched_timer_addr, tid)) {
+					pw_pr_error
+					    ("ERROR: could NOT find timer %p in hrtimer_start!\n",
+					     sched_timer_addr);
+				}
+			}
+		}
+		put_cpu_var(pcpu);
+
+		LOCK(tick_count_lock);
+		{
+			if ((should_unregister =
+			     (++tick_count == num_online_cpus()))) {
+				OUTPUT(0,
+				       KERN_INFO
+				       "[%d]: ALL TICK TIMERS accounted for -- removing hrtimer start probe!\n",
+				       cpu);
+				should_probe_on_hrtimer_start = false;
+			}
+		}
+		UNLOCK(tick_count_lock);
+	}
+};
+
+/*
+ * Common function to perform some bookkeeping on
+ * IRQ-related wakeups (including (HR)TIMER_SOFTIRQs).
+ * Records hits and (if necessary) sends i-sample
+ * messages to Ring 3.
+ */
+static void handle_irq_wakeup_i(int cpu, int irq_num, const char *irq_name,
+				bool was_hit)
+{
+	/*
+	 * Send a sample to Ring-3
+	 * (but only if collecting).
+	 */
+	u64 sample_tsc = 0;
+	if (IS_COLLECTING()) {
+		tscval(&sample_tsc);
+		record_wakeup_cause(sample_tsc, PW_BREAK_TYPE_I, irq_num, -1,
+				    -1, -1);
+	}
+	/*
+	 * Then send an i-sample instance
+	 * to Ring 3 (but only if so configured
+	 * and if this is first time this
+	 * particular IRQ was seen on the
+	 * current CPU).
+	 */
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+	{
+		int __ret = -1;
+		/*
+		 * We only cache device names if they
+		 * actually caused a C-state
+		 * wakeup.
+		 */
+		if (was_hit) {
+			DO_PER_CPU_OVERHEAD_FUNC_RET(__ret, irq_insert, cpu,
+						     irq_num, irq_name);
+			/*
+			 * Protocol:
+			 * (a) if mapping FOUND (and already SENT for THIS CPU): irq_insert returns "OK_IRQ_MAPPING_EXISTS"
+			 * (b) if new mapping CREATED (or mapping exists, but NOT SENT for THIS CPU): irq_insert returns "OK_NEW_IRQ_MAPPING_CREATED"
+			 * (c) if ERROR: irq_insert returns "ERROR_IRQ_MAPPING"
+			 */
+			if (__ret == OK_NEW_IRQ_MAPPING_CREATED
+			    && IS_COLLECTING()) {
+				/*
+				 * Send mapping info to Ring-3.
+				 */
+				/* produce_i_sample(cpu, irq_num, irq_name);*/
+				produce_i_sample(cpu, irq_num, sample_tsc,
+						 irq_name);
+			} else if (__ret == ERROR_IRQ_MAPPING) {
+				pw_pr_error
+				    ("ERROR: could NOT insert [%d,%s] into irq list!\n",
+				     irq_num, irq_name);
+			}
+		}
+	}
+#endif /* DO_CACHE_IRQ_DEV_NAME_MAPPINGS*/
+};
+
+#define TRACK_TIMER_EXPIRES 1
+
+static void timer_expire(void *timer_addr, pid_t tid)
+{
+	int cpu = -1;
+	pid_t pid = -1;
+	tnode_t *entry = NULL;
+	u64 tsc = 0;
+	bool found = false;
+	bool was_hit = false;
+	bool is_root = false;
+	int irq_num = -1;
+	s32 init_cpu = -1;
+
+	/*
+	 * Reduce overhead -- do NOT run
+	 * if user specifies NO C-STATES.
+	 */
+	if (unlikely(!IS_C_STATE_MODE())) {
+		return;
+	}
+#if !TRACK_TIMER_EXPIRES
+	{
+		if (IS_COLLECTING()) {
+			u64 sample_tsc;
+			tscval(&sample_tsc);
+			record_wakeup_cause(sample_tsc, PW_BREAK_TYPE_T, 0, -1,
+					    PID(), TID());
+		}
+
+		return;
+	}
+#endif
+
+#if DO_IOCTL_STATS
+	stats_t *pstats = NULL;
+#endif
+	/*
+	 * Atomic context => use __get_cpu_var(...) instead of get_cpu_var(...)
+	 */
+	irq_num = (&__get_cpu_var(per_cpu_counts))->was_timer_hrtimer_softirq;
+
+	/* was_hit = local_read(&__get_cpu_var(is_first_event)) == 1;*/
+	was_hit = __get_cpu_var(wakeup_event_counter).event_tsc == 0;
+
+#if DO_IOCTL_STATS
+	pstats = &__get_cpu_var(per_cpu_stats);
+#endif /* DO_IOCTL_STATS*/
+
+	cpu = CPU();
+
+	if ((entry = (tnode_t *) timer_find((unsigned long)timer_addr, tid))) {
+		pid = entry->pid;
+		tsc = entry->tsc;
+		init_cpu = entry->init_cpu;
+		found = true;
+		is_root = entry->is_root_timer;
+	} else {
+		/*
+		 * Couldn't find timer entry -- PID defaults to TID.
+		 */
+		pid = tid;
+		tsc = 0x1;
+		OUTPUT(3,
+		       KERN_INFO "Warning: [%d]: timer %p NOT found in list!\n",
+		       pid, timer_addr);
+		is_root = pid == 0;
+	}
+
+	if (!found) {
+		/* tsc = pw_max_num_cpus + 1;*/
+		tsc = 0x0;
+		if (tid < 0) {
+			/*
+			 * Yes, this is possible, especially if
+			 * the timer was fired because of a TIMER_SOFTIRQ.
+			 * Special case that here.
+			 */
+			if (irq_num > 0) {
+				/*
+				 * Basically, fall back on the SOFTIRQ
+				 * option because we couldn't quite figure
+				 * out the process that is causing this
+				 * wakeup. This is a duplicate of the
+				 * equivalent code in "inter_common(...)".
+				 */
+				const char *irq_name =
+				    pw_softirq_to_name[irq_num];
+				OUTPUT(3,
+				       KERN_INFO
+				       "WARNING: could NOT find TID in timer_expire for Timer = %p: FALLING BACK TO TIMER_SOFTIRQ OPTION! was_hit = %s\n",
+				       timer_addr, GET_BOOL_STRING(was_hit));
+				handle_irq_wakeup_i(cpu, irq_num, irq_name,
+						    was_hit);
+				/*
+				 * No further action is required.
+				 */
+				return;
+			} else {
+				/*
+				 * tid < 0 but this was NOT caused
+				 * by a TIMER_SOFTIRQ.
+				 * UPDATE: this is also possible if
+				 * the kernel wasn't compiled with the
+				 * 'CONFIG_TIMER_STATS' option set.
+				 */
+				OUTPUT(0,
+				       KERN_INFO
+				       "WARNING: NEGATIVE tid in timer_expire!\n");
+			}
+		}
+	} else {
+		/*
+		 * OK, found the entry. But timers fired
+		 * because of 'TIMER_SOFTIRQ' will have
+		 * tid == -1. Guard against that
+		 * by checking the 'tid' value. If < 0
+		 * then replace with entry->tid
+		 */
+		if (tid < 0) {
+			tid = entry->tid;
+		}
+	}
+	/*
+	 * Now send a sample to Ring-3.
+	 * (But only if collecting).
+	 */
+	if (IS_COLLECTING()) {
+		u64 sample_tsc;
+
+		tscval(&sample_tsc);
+		record_wakeup_cause(sample_tsc, PW_BREAK_TYPE_T, tsc, init_cpu,
+				    pid, tid);
+	}
+
+	/*
+	 * OK, send the TIMER::TSC mapping & call stack to the user
+	 * (but only if this is for a kernel-space call stack AND the
+	 * user wants kernel call stack info).
+	 */
+	if (is_root && (IS_COLLECTING() || IS_SLEEPING()) && IS_KTIMER_MODE()
+	    && found && entry && !entry->trace_sent) {
+		produce_k_sample(cpu, entry);
+		entry->trace_sent = 1;
+	}
+};
+
+/*
+ * High resolution timer (hrtimer) expire entry probe.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_hrtimer_expire_entry(struct hrtimer *hrt, ktime_t * now)
+#else
+static void probe_hrtimer_expire_entry(void *ignore, struct hrtimer *hrt,
+				       ktime_t * now)
+#endif
+{
+	DO_PER_CPU_OVERHEAD_FUNC(timer_expire, hrt, TIMER_START_PID(hrt));
+};
+
+/*
+ * Macro to determine if the given
+ * high resolution timer is periodic.
+ */
+#define IS_INTERVAL_TIMER(hrt) ({					\
+	    bool __tmp = false;						\
+	    pid_t pid = TIMER_START_PID(hrt);				\
+	    ktime_t rem_k = hrtimer_expires_remaining(hrt);		\
+	    s64 remaining = rem_k.tv64;					\
+	    /* We first account for timers that */			\
+	    /* are explicitly re-enqueued. For these */			\
+	    /* we check the amount of time 'remaining' */		\
+	    /* for the timer i.e.  how much time until */		\
+	    /* the timer expires. If this is POSITIVE ==> */		\
+	    /* the timer will be re-enqueued onto the */		\
+	    /* timer list and is therefore PERIODIC */			\
+	    if(remaining > 0){						\
+		__tmp = true;						\
+	    }else{							\
+		/* Next, check for 'itimers' -- these INTERVAL TIMERS are */ \
+		/* different in that they're only re-enqueued when their */ \
+		/* signal (i.e. SIGALRM) is DELIVERED. Accordingly, we */ \
+		/* CANNOT check the 'remaining' time for these timers. Instead, */ \
+		/* we compare them to an individual task's 'REAL_TIMER' address.*/ \
+		/* N.B.: Call to 'pid_task(...)' influenced by SEP driver code */ \
+		struct task_struct *tsk = pid_task(find_pid_ns(pid, &init_pid_ns), PIDTYPE_PID); \
+		__tmp = (tsk && ( (hrt) == &tsk->signal->real_timer));	\
+	    }								\
+	    __tmp; })
+
+/*
+ * High resolution timer (hrtimer) expire exit probe.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_hrtimer_expire_exit(struct hrtimer *hrt)
+#else
+static void probe_hrtimer_expire_exit(void *ignore, struct hrtimer *hrt)
+#endif
+{
+	if (!IS_INTERVAL_TIMER(hrt)) {
+		/*
+		 * timers are run from hardirq context -- no need
+		 * for expensive 'get_cpu_var(...)' variants.
+		 */
+		per_cpu_t *pcpu = &__get_cpu_var(per_cpu_counts);
+		/*
+		 * REMOVE the timer from
+		 * our timer map here (but
+		 * only if this isn't a 'sched_tick'
+		 * timer!)
+		 */
+		if ((void *)hrt != pcpu->sched_timer_addr) {
+			int ret = -1;
+			DO_PER_CPU_OVERHEAD_FUNC_RET(ret, timer_delete,
+						     (unsigned long)hrt,
+						     TIMER_START_PID(hrt));
+			if (ret) {
+				OUTPUT(0,
+				       KERN_INFO
+				       "WARNING: could NOT delete timer mapping for HRT = %p, TID = %d, NAME = %.20s\n",
+				       hrt, TIMER_START_PID(hrt),
+				       TIMER_START_COMM(hrt));
+			} else {
+				OUTPUT(3,
+				       KERN_INFO
+				       "OK: DELETED timer mapping for HRT = %p, TID = %d, NAME = %.20s\n",
+				       hrt, TIMER_START_PID(hrt),
+				       TIMER_START_COMM(hrt));
+				/* debugging ONLY!*/
+				if (timer_find
+				    ((unsigned long)hrt,
+				     TIMER_START_PID(hrt))) {
+					OUTPUT(0,
+					       KERN_INFO
+					       "WARNING: TIMER_FIND reports TIMER %p STILL IN MAP!\n",
+					       hrt);
+				}
+			}
+		}
+	}
+};
+
+#define DEFERRABLE_FLAG (0x1)
+#define IS_TIMER_DEFERRABLE(t) ( (unsigned long)( (t)->base) & DEFERRABLE_FLAG )
+
+/*
+ * Timer expire entry probe.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_timer_expire_entry(struct timer_list *t)
+#else
+static void probe_timer_expire_entry(void *ignore, struct timer_list *t)
+#endif
+{
+	DO_PER_CPU_OVERHEAD_FUNC(timer_expire, t, TIMER_START_PID(t));
+};
+
+/*
+ * Function common to all interrupt tracepoints.
+ */
+static void inter_common(int irq_num, const char *irq_name)
+{
+	per_cpu_t *pcpu = NULL;
+
+	bool was_hit = false;
+
+#if DO_IOCTL_STATS
+	stats_t *pstats = NULL;
+#endif
+
+	/*
+	 * Reduce overhead -- do NOT run
+	 * if user specifies NO C-STATES.
+	 */
+	if (unlikely(!IS_C_STATE_MODE())) {
+		return;
+	}
+
+	/*
+	 * Debugging: make sure we're in
+	 * interrupt context!
+	 */
+	if (!in_interrupt()) {
+		printk(KERN_ERR
+		       "BUG: inter_common() called from a NON-INTERRUPT context! Got irq: %lu and soft: %lu\n",
+		       in_irq(), in_softirq());
+		return;
+	}
+
+	/*
+	 * Interrupt context: no need for expensive "get_cpu_var(...)" version.
+	 */
+	pcpu = &__get_cpu_var(per_cpu_counts);
+
+	/*
+	 * If this is a TIMER or an HRTIMER SOFTIRQ then
+	 * DO NOTHING (let the 'timer_expire(...)'
+	 * function handle this for greater accuracy).
+	 */
+	if (false && (irq_num == TIMER_SOFTIRQ || irq_num == HRTIMER_SOFTIRQ)) {
+		pcpu->was_timer_hrtimer_softirq = irq_num;
+#if DO_IOCTL_STATS
+		/*
+		 * Increment counter for timer interrupts as well.
+		 */
+		local_inc(&pstats->num_timers);
+#endif /* DO_IOCTL_STATS*/
+		OUTPUT(3, KERN_INFO "(HR)TIMER_SOFTIRQ: # = %d\n", irq_num);
+		return;
+	}
+#if DO_IOCTL_STATS
+	pstats = &__get_cpu_var(per_cpu_stats);
+	local_inc(&pstats->num_inters);
+
+	/*
+	 * Increment counter for timer interrupts as well.
+	 */
+	if (in_softirq()
+	    && (irq_num == TIMER_SOFTIRQ || irq_num == HRTIMER_SOFTIRQ))
+		local_inc(&pstats->num_timers);
+#endif
+
+	/*
+	 * Check if this interrupt caused a C-state
+	 * wakeup (we'll use that info to decide
+	 * whether to cache this IRQ # <-> DEV name
+	 * mapping).
+	 */
+	/* was_hit = local_read(&__get_cpu_var(is_first_event)) == 1;*/
+	was_hit = __get_cpu_var(wakeup_event_counter).event_tsc == 0;
+
+	/*
+	 * OK, record a 'hit' (if applicable) and
+	 * send an i-sample message to Ring 3.
+	 */
+	handle_irq_wakeup_i(CPU(), irq_num, irq_name, was_hit);
+};
+
+/*
+ * IRQ tracepoint.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_irq_handler_entry(int irq, struct irqaction *action)
+#else
+static void probe_irq_handler_entry(void *ignore, int irq,
+				    struct irqaction *action)
+#endif
+{
+	const char *name = action->name;
+	if (!name) {
+		return;
+	}
+	OUTPUT(3, KERN_INFO "NUM: %d\n", irq);
+	/* inter_common(irq);*/
+	DO_PER_CPU_OVERHEAD_FUNC(inter_common, irq, name);
+};
+
+/*
+ * soft IRQ tracepoint.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_softirq_entry(struct softirq_action *h,
+				struct softirq_action *vec)
+#else
+static void probe_softirq_entry(void *ignore, struct softirq_action *h,
+				struct softirq_action *vec)
+#endif
+{
+	int irq = -1;
+	const char *name = NULL;
+	irq = (int)(h - vec);
+	name = pw_softirq_to_name[irq];
+
+	if (!name) {
+		return;
+	}
+
+	OUTPUT(3, KERN_INFO "NUM: %d\n", irq);
+
+	DO_PER_CPU_OVERHEAD_FUNC(inter_common, irq, name);
+};
+#else /* >= 2.6.38*/
+static void probe_softirq_entry(void *ignore, unsigned int vec_nr)
+{
+	int irq = (int)vec_nr;
+	const char *name = pw_softirq_to_name[irq];
+
+	DO_PER_CPU_OVERHEAD_FUNC(inter_common, irq, name);
+};
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_workqueue_execution(struct task_struct *wq_thread,
+				      struct work_struct *work)
+#else
+static void probe_workqueue_execution(void *ignore,
+				      struct task_struct *wq_thread,
+				      struct work_struct *work)
+#endif				/* < 2.6.35*/
+{
+	if (IS_COLLECTING()) {
+		u64 tsc;
+		tscval(&tsc);
+
+		record_wakeup_cause(tsc, PW_BREAK_TYPE_W, 0, -1, -1, -1);
+	}
+};
+#else /* >= 2.6.36*/
+static void probe_workqueue_execute_start(void *ignore,
+					  struct work_struct *work)
+{
+	if (IS_COLLECTING()) {
+		u64 tsc;
+		tscval(&tsc);
+
+		record_wakeup_cause(tsc, PW_BREAK_TYPE_W, 0, -1, -1, -1);
+	}
+};
+#endif /* < 2.6.36*/
+
+/*
+ * Basically the same as arch/x86/kernel/irq.c --> "arch_irq_stat_cpu(cpu)"
+ */
+
+static u64 my_local_arch_irq_stats_cpu(void)
+{
+	u64 sum = 0;
+	irq_cpustat_t *stats;
+#ifdef __arm__
+	int i = 0;
+#endif
+	BEGIN_LOCAL_IRQ_STATS_READ(stats);
+	{
+#ifndef __arm__
+/* #ifdef CONFIG_X86_LOCAL_APIC*/
+		sum += stats->apic_timer_irqs;
+/* #endif*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+		sum += stats->x86_platform_ipis;
+#endif /* 2,6,34*/
+		sum += stats->apic_perf_irqs;
+#ifdef CONFIG_SMP
+		sum += stats->irq_call_count;
+		sum += stats->irq_resched_count;
+		sum += stats->irq_tlb_count;
+#endif
+#ifdef CONFIG_X86_THERMAL_VECTOR
+		sum += stats->irq_thermal_count;
+#endif
+		sum += stats->irq_spurious_count;	/* should NEVER be non-zero!!!*/
+#else
+		sum += stats->__softirq_pending;
+#ifdef CONFIG_SMP
+		for (i = 0; i < NR_IPI; ++i) {
+			sum += stats->ipi_irqs[i];
+		}
+#endif
+#endif
+	}
+	END_LOCAL_IRQ_STATS_READ(stats);
+	return sum;
+};
+
+static DEFINE_PER_CPU(u64, prev_c6_val) = 0;
+
+/*
+ * TPS epoch manipulation functions.
+ */
+#if DO_TPS_EPOCH_COUNTER
+
+int inc_tps_epoch_i(void)
+{
+	int retVal = -1;
+	/*
+	 * From "Documentation/memory-barriers.txt": "atomic_inc_return()"
+	 * has IMPLICIT BARRIERS -- no need to add explicit barriers
+	 * here!
+	 */
+	retVal = atomic_inc_return(&tps_epoch);
+	return retVal;
+};
+
+int read_tps_epoch_i(void)
+{
+	/*
+	 * Make sure TPS updates have propagated
+	 */
+	smp_mb();
+	return atomic_read(&tps_epoch);
+};
+#endif /* DO_TPS_EPOCH_COUNTER*/
+
+static int pw_read_msr_info_set_i(struct pw_msr_info_set *info_set)
+{
+	int num_res = 0;
+#ifndef __arm__
+	int i = 0, curr_index = 0;
+	u64 val = 0;
+	s32 msr_addr = -1;
+	/* bool init_msr_set_sent = info_set->init_msr_set_sent == 1;*/
+	int num_msrs = INTERNAL_STATE.num_msrs;
+	struct pw_msr_addr *msr_addrs = INTERNAL_STATE.msr_addrs;
+
+	/*
+	 * Read values for EVERY C-state MSR (Thread/Core/Mod/Pkg)
+	 * TODO:
+	 * We will need to move away from a linked list to an array.
+	 */
+	for (i = 0; i < num_msrs; ++i) {
+		msr_addr = msr_addrs[i].addr;
+		if (unlikely(msr_addr <= 0)) {
+			continue;
+		}
+		WUWATCH_RDMSRL(msr_addr, val);
+		if (unlikely(info_set->prev_msr_vals[i].val == 0x0)) {
+			if (msr_addrs[i].id.depth == MPERF) {
+				info_set->curr_msr_count[curr_index].id =
+				    info_set->prev_msr_vals[i].id;
+				info_set->curr_msr_count[curr_index++].val =
+				    val;
+			}
+		} else {	/* val != 0x0*/
+			if (info_set->prev_msr_vals[i].val != val) {
+				if (msr_addrs[i].id.depth > MPERF) {
+					++num_res;
+				}
+				info_set->curr_msr_count[curr_index].id =
+				    info_set->prev_msr_vals[i].id;
+				info_set->curr_msr_count[curr_index++].val =
+				    val;
+			}
+		}
+		info_set->prev_msr_vals[i].val = val;
+	}
+#else
+	/* probe_power_end fills in these statistics when it is called*/
+	/* so we just grab what is set here.  On x86 we grab and set them above*/
+	*which_cx = msr_set->prev_req_cstate;
+	*cx_val = msr_set->curr_msr_count[msr_set->prev_req_cstate];
+	num_res = 1;
+#endif /* ifndef __arm__*/
+	return num_res;
+};
+
+static void tps_lite(bool is_boundary_sample)
+{
+	/*
+	 * (1) Read APERF, MPERF.
+	 * (2) Produce a "C_LITE_MSG" instance.
+	 */
+	u64 tsc = 0x0;
+	u64 mperf = 0x0;
+	int cpu = get_cpu();
+	{
+
+		tscval(&tsc);
+
+		WUWATCH_RDMSRL(REF_CYCLES_MSR_ADDR, mperf);
+	}
+	put_cpu();
+	/*
+	 * Data collected. Now enqueue it.
+	 */
+	{
+		c_multi_msg_t cm;
+		struct PWCollector_msg sample;
+
+		sample.cpuidx = cpu;
+		sample.tsc = tsc;
+
+#ifndef __arm__
+		cm.mperf = mperf;
+#else
+		/* TODO*/
+		cm.mperf = c0_time;
+#endif
+
+		cm.req_state = (u8) APERF;
+
+		cm.wakeup_tsc = 0x0;	/* don't care*/
+		cm.wakeup_data = 0x0;	/* don't care*/
+		cm.timer_init_cpu = 0x0;	/* don't care*/
+		cm.wakeup_pid = -1;	/* don't care*/
+		cm.wakeup_tid = -1;	/* don't care*/
+		cm.wakeup_type = 0x0;	/* don't care*/
+		/*
+		 * The only field of interest is the 'num_msrs' value.
+		 */
+		cm.num_msrs = 0x0;
+
+#if DO_TPS_EPOCH_COUNTER
+		/*
+		 * We're entering a new TPS "epoch".
+		 * Increment our counter.
+		 */
+		cm.tps_epoch = inc_tps_epoch_i();
+#endif /* DO_TPS_EPOCH_COUNTER*/
+
+		sample.data_type = C_STATE;
+		sample.data_len = C_MULTI_MSG_HEADER_SIZE();
+		sample.p_data = (u64) ((unsigned long)&cm);
+
+		if (IS_COLLECTING() || is_boundary_sample) {
+			pw_produce_generic_msg(&sample, true);
+		}
+	}
+};
+
+static void bdry_tps(void)
+{
+	u64 tsc = 0;
+	PWCollector_msg_t sample;
+	pw_msr_info_set_t *info_set = NULL;
+	u32 prev_req_cstate = 0;
+	u8 init_msr_set_sent = 1;
+	int num_cx = -1;
+	int num_msrs = 0;
+	pw_msr_val_t *msr_vals = NULL;
+	int cpu;
+
+#ifdef __arm__
+	u64 *prev_tsc = NULL;
+	u64 c0_time = 0;
+#endif
+
+	tscval(&tsc);
+	cpu = RAW_CPU();
+	/*
+	 * Read all C-state MSRs.
+	 */
+	{
+		info_set = pw_pcpu_msr_info_sets + cpu;
+#ifdef __arm__
+		++state;	/* on ARM (Nexus 7 at least) states start with LP3 and no C0*/
+		prev_tsc = &__get_cpu_var(trace_power_prev_time);
+		c0_time = tsc - *prev_tsc;
+		*prev_tsc = tsc;
+		set->prev_msr_vals[MPERF] += c0_time;
+		c0_time = set->prev_msr_vals[MPERF];
+#endif /* __arm__*/
+		{
+			num_msrs = info_set->num_msrs;
+			init_msr_set_sent = info_set->init_msr_set_sent;
+			prev_req_cstate = info_set->prev_req_cstate;
+			num_cx = pw_read_msr_info_set_i(info_set);
+			info_set->prev_req_cstate = (u32) MPERF;	/* must be after pw_read_msr_set_i for ARM changes*/
+			if (unlikely(init_msr_set_sent == 0)) {
+				/* memcpy(msr_set, info_set->prev_msr_vals, sizeof(u64) * info_set->num_msrs);*/
+				info_set->init_msr_set_sent = 1;
+			}
+			if (unlikely(num_cx > 1)) {
+				/* memcpy(msr_vals, info_set->curr_msr_count, sizeof(u64) * info_set->num_msrs);*/
+			}
+		}
+	}
+
+	if (num_cx > 1) {
+		OUTPUT(0, KERN_INFO "WARNING: [%d]: # cx = %d\n", cpu, num_cx);
+	}
+
+	/*
+	 * Get wakeup cause(s).
+	 * Only required if we're capturing C-state samples.
+	 */
+	if (IS_C_STATE_MODE()) {
+		if (unlikely(init_msr_set_sent == 0)) {
+			/*
+			 * OK, this is the first TPS for this thread during the current collection.
+			 * Send a "POSIX_TIME_SYNC" message to allow Ring-3 to correlate the TSC used by wuwatch with
+			 * the "clock_gettime()" used by TPSS.
+			 */
+			{
+				struct timespec ts;
+				tsc_posix_sync_msg_t tsc_msg;
+				u64 tmp_tsc = 0, tmp_nsecs = 0;
+				ktime_get_ts(&ts);
+				tscval(&tmp_tsc);
+				tmp_nsecs =
+				    (u64) ts.tv_sec * 1000000000ULL +
+				    (u64) ts.tv_nsec;
+				tsc_msg.tsc_val = tmp_tsc;
+				tsc_msg.posix_mono_val = tmp_nsecs;
+
+				sample.tsc = tsc;
+				sample.cpuidx = cpu;
+				sample.data_type = TSC_POSIX_MONO_SYNC;
+				sample.data_len = sizeof(tsc_msg);
+				sample.p_data = (u64) ((unsigned long)&tsc_msg);
+
+				pw_produce_generic_msg(&sample, true);
+				pw_pr_debug(KERN_INFO
+					    "[%d]: SENT POSIX_TIME_SYNC\n",
+					    cpu);
+				pw_pr_debug(KERN_INFO
+					    "[%d]: tsc = %llu posix mono = %llu\n",
+					    cpu, tmp_tsc, tmp_nsecs);
+			}
+			/*
+			 * 2. Second, the initial MSR 'set' for this (logical) CPU.
+			 */
+			{
+				sample.tsc = tsc;
+				sample.cpuidx = cpu;
+				sample.data_type = C_STATE_MSR_SET;
+				/* sample.data_len = sizeof(msr_set);*/
+				sample.data_len =
+				    num_msrs * sizeof(pw_msr_val_t);
+				/* sample.p_data = (u64)((unsigned long)msr_set);*/
+				sample.p_data =
+				    (u64) ((unsigned long)info_set->
+					   prev_msr_vals);
+
+				/* Why "true"? Document!*/
+				pw_produce_generic_msg(&sample, true);
+				pw_pr_debug(KERN_INFO
+					    "[%d]: SENT init msr set at TSC = %llu\n",
+					    cpu, tsc);
+			}
+		}
+
+		/*
+		 * Send the actual TPS message here.
+		 */
+		{
+			c_multi_msg_t *cm =
+			    (c_multi_msg_t *) info_set->c_multi_msg_mem;
+			BUG_ON(!cm);
+
+			sample.cpuidx = cpu;
+			sample.tsc = tsc;
+
+			msr_vals = (pw_msr_val_t *) cm->data;
+
+#ifndef __arm__
+			cm->mperf = info_set->curr_msr_count[0].val;
+#else
+			/* TODO*/
+			cm->mperf = c0_time;
+#endif
+
+			cm->req_state = (u8) MPERF;
+
+			cm->req_state = (u8) APERF;
+
+			cm->wakeup_tsc = 0x0;	/* don't care*/
+			cm->wakeup_data = 0x0;	/* don't care*/
+			cm->timer_init_cpu = 0x0;	/* don't care*/
+			cm->wakeup_pid = -1;	/* don't care*/
+			cm->wakeup_tid = -1;	/* don't care*/
+			cm->wakeup_type = 0x0;	/* don't care*/
+			cm->num_msrs = num_cx;
+
+			/*
+			 * 'curr_msr_count[0]' contains the MPERF value, which is encoded separately.
+			 * We therefore read from 'curr_msr_count[1]'
+			 */
+			memcpy(msr_vals, &info_set->curr_msr_count[1],
+			       sizeof(pw_msr_val_t) * num_cx);
+
+			sample.data_type = C_STATE;
+			sample.data_len =
+			    sizeof(pw_msr_val_t) * num_cx +
+			    C_MULTI_MSG_HEADER_SIZE();
+			sample.p_data = (u64) ((unsigned long)cm);
+
+			pw_produce_generic_msg(&sample, true);
+		}
+	}
+};
+
+static void tps(unsigned int type, unsigned int state)
+{
+	int cpu = CPU(), epoch = 0;
+	u64 tsc = 0;
+	PWCollector_msg_t sample;
+	pw_msr_info_set_t *info_set = NULL;
+	bool local_apic_fired = false;
+	u32 prev_req_cstate = 0;
+	u8 init_msr_set_sent = 1;
+	int num_cx = -1;
+	char *__buffer = NULL;
+	bool did_alloc = false;
+	int num_msrs = 0;
+	pw_msr_val_t *msr_vals = NULL;
+
+#ifdef __arm__
+	u64 *prev_tsc = NULL;
+	u64 c0_time = 0;
+#endif
+
+	tscval(&tsc);
+
+	/*
+	 * Read all C-state MSRs.
+	 */
+	get_cpu();
+	{
+		info_set = pw_pcpu_msr_info_sets + cpu;
+#ifdef __arm__
+		++state;	/* on ARM (Nexus 7 at least) states start with LP3 and no C0*/
+		prev_tsc = &__get_cpu_var(trace_power_prev_time);
+		c0_time = tsc - *prev_tsc;
+		*prev_tsc = tsc;
+		set->prev_msr_vals[MPERF] += c0_time;
+		c0_time = set->prev_msr_vals[MPERF];
+#endif /* __arm__*/
+		{
+			num_msrs = info_set->num_msrs;
+			init_msr_set_sent = info_set->init_msr_set_sent;
+			prev_req_cstate = info_set->prev_req_cstate;
+			num_cx = pw_read_msr_info_set_i(info_set);
+			info_set->prev_req_cstate = (u32) state;	/* must be after pw_read_msr_set_i for ARM changes*/
+			if (unlikely(init_msr_set_sent == 0)) {
+				/* memcpy(msr_set, info_set->prev_msr_vals, sizeof(u64) * info_set->num_msrs);*/
+				info_set->init_msr_set_sent = 1;
+			}
+			if (unlikely(num_cx > 1)) {
+				/* memcpy(msr_vals, info_set->curr_msr_count, sizeof(u64) * info_set->num_msrs);*/
+			}
+		}
+	}
+	put_cpu();
+
+	BUG_ON(init_msr_set_sent == 0);
+
+	if (num_cx > 1) {
+		OUTPUT(0, KERN_INFO "WARNING: [%d]: # cx = %d\n", cpu, num_cx);
+	}
+
+	/*
+	 * Get wakeup cause(s).
+	 * Only required if we're capturing C-state samples.
+	 */
+	if (IS_C_STATE_MODE()) {
+		u64 event_tsc = 0, event_val = 0;
+		pid_t event_tid = -1, event_pid = -1;
+		c_break_type_t event_type = PW_BREAK_TYPE_U;
+		s32 event_init_cpu = -1;
+		/*
+		 * See if we can get a wakeup cause, along
+		 * with associated data.
+		 * We use "__get_cpu_var()" instead of "get_cpu_var()" because
+		 * it is OK for us to be preempted out at any time. Also, not
+		 * disabling preemption saves us about 500 cycles per TPS.
+		 */
+		{
+			struct wakeup_event *wu_event =
+			    &get_cpu_var(wakeup_event_counter);
+			if (wu_event->event_tsc > 0) {
+				event_type = wu_event->event_type;
+				event_val = wu_event->event_val;
+				event_tsc = wu_event->event_tsc;
+				event_pid = wu_event->event_pid;
+				event_tid = wu_event->event_tid;
+				event_init_cpu = wu_event->init_cpu;
+				wu_event->event_tsc = 0;	/* reset for the next wakeup event.*/
+			}
+			put_cpu_var(wakeup_event_counter);
+		}
+		/*
+		 * Check if the local APIC timer raised interrupts.
+		 */
+		{
+			u64 curr_num_local_apic = my_local_arch_irq_stats_cpu();
+			u64 *old_num_local_apic =
+			    &__get_cpu_var(num_local_apic_timer_inters);
+			if (*old_num_local_apic
+			    && (*old_num_local_apic != curr_num_local_apic)) {
+				local_apic_fired = true;
+			}
+			*old_num_local_apic = curr_num_local_apic;
+		}
+
+		if (event_type == PW_BREAK_TYPE_U && local_apic_fired
+		    && IS_COLLECTING()) {
+			event_type = PW_BREAK_TYPE_IPI;
+			/*
+			 * We need a 'TSC' for this IPI sample but we don't know
+			 * WHEN the local APIC timer interrupt was raised. Fortunately, it doesn't
+			 * matter, because we only need to ensure this sample lies
+			 * BEFORE the corresponding 'C_STATE' sample in a sorted timeline.
+			 * We therefore simply subtract one from the C_STATE sample TSC to get
+			 * the IPI sample TSC.
+			 */
+			event_tsc = tsc - 1;
+			event_tid = event_pid = -1;
+			event_val = 0;
+		}
+
+		if (unlikely(init_msr_set_sent == 0)) {
+			/*
+			 * OK, this is the first TPS for this thread during the current collection.
+			 * Send a "POSIX_TIME_SYNC" message to allow Ring-3 to correlate the TSC used by wuwatch with
+			 * the "clock_gettime()" used by TPSS.
+			 */
+			{
+				struct timespec ts;
+				tsc_posix_sync_msg_t tsc_msg;
+				u64 tmp_tsc = 0, tmp_nsecs = 0;
+				ktime_get_ts(&ts);
+				tscval(&tmp_tsc);
+				tmp_nsecs =
+				    (u64) ts.tv_sec * 1000000000ULL +
+				    (u64) ts.tv_nsec;
+				tsc_msg.tsc_val = tmp_tsc;
+				tsc_msg.posix_mono_val = tmp_nsecs;
+
+				sample.tsc = tsc;
+				sample.cpuidx = cpu;
+				sample.data_type = TSC_POSIX_MONO_SYNC;
+				sample.data_len = sizeof(tsc_msg);
+				sample.p_data = (u64) ((unsigned long)&tsc_msg);
+
+				pw_produce_generic_msg(&sample, true);
+				pw_pr_debug(KERN_INFO
+					    "[%d]: SENT POSIX_TIME_SYNC\n",
+					    cpu);
+				pw_pr_debug(KERN_INFO
+					    "[%d]: tsc = %llu posix mono = %llu\n",
+					    cpu, tmp_tsc, tmp_nsecs);
+			}
+			/*
+			 * 2. Second, the initial MSR 'set' for this (logical) CPU.
+			 */
+			{
+				sample.tsc = tsc;
+				sample.cpuidx = cpu;
+				sample.data_type = C_STATE_MSR_SET;
+				/* sample.data_len = sizeof(msr_set);*/
+				sample.data_len =
+				    num_msrs * sizeof(pw_msr_val_t);
+				/* sample.p_data = (u64)((unsigned long)msr_set);*/
+				sample.p_data =
+				    (u64) ((unsigned long)info_set->
+					   prev_msr_vals);
+
+				/* Why "true"? Document!*/
+				pw_produce_generic_msg(&sample, true);
+				pw_pr_debug(KERN_INFO
+					    "[%d]: SENT init msr set at tsc = %llu\n",
+					    cpu, tsc);
+			}
+		}
+
+		/*
+		 * Send the actual TPS message here.
+		 */
+		{
+			c_multi_msg_t *cm =
+			    (c_multi_msg_t *) info_set->c_multi_msg_mem;
+			BUG_ON(!cm);
+
+			sample.cpuidx = cpu;
+			sample.tsc = tsc;
+
+			msr_vals = (pw_msr_val_t *) cm->data;
+
+#ifndef __arm__
+			cm->mperf = info_set->curr_msr_count[0].val;
+#else
+			/* TODO*/
+			cm->mperf = c0_time;
+#endif
+
+			cm->req_state = (u8) state;
+
+			cm->wakeup_tsc = event_tsc;
+			cm->wakeup_data = event_val;
+			cm->timer_init_cpu = event_init_cpu;
+			cm->wakeup_pid = event_pid;
+			cm->wakeup_tid = event_tid;
+			cm->wakeup_type = event_type;
+			cm->num_msrs = num_cx;
+
+			/*
+			 * 'curr_msr_count[0]' contains the MPERF value, which is encoded separately.
+			 * We therefore read from 'curr_msr_count[1]'
+			 */
+			memcpy(msr_vals, &info_set->curr_msr_count[1],
+			       sizeof(pw_msr_val_t) * num_cx);
+
+#if DO_TPS_EPOCH_COUNTER
+			/*
+			 * We're entering a new TPS "epoch".
+			 * Increment our counter.
+			 */
+			epoch = inc_tps_epoch_i();
+			/* epoch = 0x0;*/
+			cm->tps_epoch = epoch;
+#endif /* DO_TPS_EPOCH_COUNTER*/
+
+			sample.data_type = C_STATE;
+			sample.data_len =
+			    sizeof(pw_msr_val_t) * num_cx +
+			    C_MULTI_MSG_HEADER_SIZE();
+			sample.p_data = (u64) ((unsigned long)cm);
+
+			if (false && cpu == 0) {
+				printk(KERN_INFO
+				       "[%d]: TSC = %llu, #cx = %d, data_len = %u, break_type = %u\n",
+				       cpu, sample.tsc, num_cx, sample.data_len,
+				       cm->wakeup_type);
+			}
+
+			if (false && cpu == 0 && num_cx == 1) {
+				printk(KERN_INFO
+				       "[%d]: TSC = %llu, 1 Cx MSR counted: id = %u, val = %llu\n",
+				       cpu, sample.tsc,
+				       info_set->curr_msr_count[1].id.depth,
+				       info_set->curr_msr_count[1].val);
+			}
+
+			if (IS_COLLECTING()) {
+				pw_produce_generic_msg(&sample, true);
+			}
+		}
+
+		/*
+		 * Reset the "first-hit" variable.
+		 */
+		{
+			__get_cpu_var(wakeup_event_counter).event_tsc = 0;
+		}
+
+		if (unlikely(did_alloc)) {
+			did_alloc = false;
+			pw_kfree(__buffer);
+		}
+	}			/* IS_C_STATE_MODE()*/
+
+	/* Collect S and D state / residency counter samples on CPU0*/
+	if (cpu != 0 || !IS_COLLECTING()) {
+		return;
+	}
+};
+
+/*
+ * C-state break.
+ * Read MSR residencies.
+ * Also gather information on what caused C-state break.
+ * If so configured, write C-sample information to (per-cpu)
+ * output buffer.
+ */
+#ifdef __arm__
+/*
+ * TODO: we may want to change this to call receive_wakeup_cause()
+ * and put the logic for the ARM stuff in there.  The reason
+ * we don't do it now is we want to make sure this isn't called
+ * before the timer or interrupt trace calls or we will attribute
+ * the cause of the wakeup as this function instead of an interrupt or
+ * timer.  We can probably fix that by cleaning up the logic a bit
+ * but for the merge we ignore that for now
+ */
+static void probe_power_end(void *ignore)
+{
+	u64 tsc = 0;
+
+	msr_set_t *set = NULL;
+	u64 *prev_tsc = NULL;
+	u64 trace_time = 0;
+
+	tscval(&tsc);
+	prev_tsc = &get_cpu_var(trace_power_prev_time);
+	trace_time = tsc - *prev_tsc;
+
+	*prev_tsc = tsc;
+	put_cpu_var(trace_power_prev_time);
+
+	/*
+	 * Set all C-state MSRs.
+	 */
+	set = &get_cpu_var(pw_pcpu_msr_sets);
+	{
+		set->prev_msr_vals[set->prev_req_cstate] += trace_time;
+
+		memset(set->curr_msr_count, 0, sizeof(u64) * MAX_MSR_ADDRESSES);
+		set->curr_msr_count[set->prev_req_cstate] =
+		    set->prev_msr_vals[set->prev_req_cstate];
+	}
+	put_cpu_var(pw_pcpu_msr_sets);
+}
+#endif /* __arm__*/
+
+#ifdef APWR_RED_HAT
+/*
+ * Red Hat back ports SOME changes from 2.6.37 kernel
+ * into 2.6.32 kernel. Special case that here.
+ */
+static void probe_power_start(unsigned int type, unsigned int state,
+			      unsigned int cpu_id)
+{
+	/* tps_i(type, state);*/
+	DO_PER_CPU_OVERHEAD_FUNC(tps, type, state);
+};
+#else
+#if LINUX_VERSION_CODE  < KERNEL_VERSION(2,6,38)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_power_start(unsigned int type, unsigned int state)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+static void probe_power_start(void *ignore, unsigned int type,
+			      unsigned int state)
+#else /* 2.6.36 <= version < 2.6.38*/
+static void probe_power_start(void *ignore, unsigned int type,
+			      unsigned int state, unsigned int cpu_id)
+#endif
+{
+	if (likely(IS_C_STATE_MODE())) {
+		DO_PER_CPU_OVERHEAD_FUNC(tps, type, state);
+	} else {
+		DO_PER_CPU_OVERHEAD_FUNC(tps_lite, false /* boundary */ );
+	}
+};
+#else /* version >= 2.6.38*/
+static void probe_cpu_idle(void *ignore, unsigned int state,
+			   unsigned int cpu_id)
+{
+	if (state == PWR_EVENT_EXIT) {
+#ifdef __arm__
+		probe_power_end(NULL);
+#endif
+		return;
+	}
+
+	if (likely(IS_C_STATE_MODE())) {
+		DO_PER_CPU_OVERHEAD_FUNC(tps, 0 /*type */ , state);
+	} else {
+		DO_PER_CPU_OVERHEAD_FUNC(tps_lite, false /* boundary */ );
+	}
+};
+#endif /* version*/
+#endif /* APWR_RED_HAT*/
+#ifdef __arm__
+#if TRACE_CPU_HOTPLUG
+static void probe_cpu_hotplug(void *ignore, unsigned int state, int cpu_id)
+{
+	PWCollector_msg_t output_sample;
+	event_sample_t event_sample;
+	u64 sample_tsc;
+
+	tscval(&sample_tsc);
+	output_sample.cpuidx = cpu_id;
+	output_sample.tsc = sample_tsc;
+
+	event_sample.data[0] = cpu_id;
+	event_sample.data[1] = state;
+
+#if DO_TPS_EPOCH_COUNTER
+	event_sample.data[2] = read_tps_epoch_i();
+#endif /* DO_TPS_EPOCH_COUNTER*/
+
+	output_sample.data_type = CPUHOTPLUG_SAMPLE;
+	output_sample.data_len = sizeof(event_sample);
+	output_sample.p_data = (u64) ((unsigned long)&event_sample);
+
+	pw_produce_generic_msg(&output_sample, false);	/* "false" ==> don't wake any sleeping readers (required from scheduling context)*/
+}
+#endif /* TRACE_CPU_HOTPLUG*/
+#endif /* __arm__*/
+
+#ifndef __arm__
+static void tpf(int cpu, unsigned int type, u32 curr_req_freq,
+		u32 prev_req_freq)
+{
+	u64 tsc = 0, aperf = 0, mperf = 0;
+	/* u32 prev_req_freq = 0;*/
+	u32 perf_status = 0;
+	u32 prev_perf_status = 0;
+
+#if DO_IOCTL_STATS
+	stats_t *pstats = NULL;
+#endif
+
+	/*
+	 * We're not guaranteed that 'cpu' (which is the CPU on which the frequency transition is occuring) is
+	 * the same as the cpu on which the callback i.e. the 'TPF' probe is executing. This is why we use 'WUWATCH_RDMSR_SAFE_ON_CPU()'
+	 * to read the various MSRs.
+	 */
+	/*
+	 * Read TSC value
+	 */
+	u32 l = 0, h = 0;
+	WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU(cpu, 0x10, &l, &h));
+	tsc = (u64) h << 32 | (u64) l;
+	/*
+	 * Read CPU_CLK_UNHALTED.REF and CPU_CLK_UNHALTED.CORE. These required ONLY for AXE import
+	 * backward compatibility!
+	 */
+#if 1
+	{
+		WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+			(cpu, CORE_CYCLES_MSR_ADDR, &l, &h));
+		aperf = (u64) h << 32 | (u64) l;
+
+		WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+			(cpu, REF_CYCLES_MSR_ADDR, &l, &h));
+		mperf = (u64) h << 32 | (u64) l;
+	}
+#endif
+
+	/*
+	 * Read the IA32_PERF_STATUS MSR. Bits 12:8 (on Atom ) or 15:0 (on big-core) of this determines
+	 * the frequency the H/W is currently running at.
+	 * We delegate the actual frequency computation to Ring-3 because the PERF_STATUS encoding is
+	 * actually model-specific.
+	 */
+	WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+		(cpu, IA32_PERF_STATUS_MSR_ADDR, &l, &h));
+	/* We're only interested in the lower 16 bits!*/
+	/* perf_status = l */
+	/*
+	 * Update: 'TPF' is FORWARD facing -- make it BACKWARDS facing here.
+	 */
+	{
+		prev_perf_status = per_cpu(pcpu_prev_perf_status_val, cpu);
+		per_cpu(pcpu_prev_perf_status_val, cpu) = l;
+	}
+	perf_status = prev_perf_status;
+	/*
+	   if (false) {
+	   printk(KERN_INFO "[%d]: prev perf status = %u, curr perf status = %u\n", cpu, prev_perf_status, l);
+	   }
+	 */
+
+	/*
+	 * Retrieve the previous requested frequency, if any.
+	 */
+	if (unlikely(prev_req_freq == 0x0)) {
+		prev_req_freq = per_cpu(pcpu_prev_req_freq, cpu);
+	}
+	per_cpu(pcpu_prev_req_freq, cpu) = curr_req_freq;
+
+	produce_p_sample(cpu, tsc, prev_req_freq, perf_status, 0 /* boundary */ , aperf, mperf);	/* "0" ==> NOT a boundary sample*/
+
+	OUTPUT(0,
+	       KERN_INFO
+	       "[%d]: TSC = %llu, OLD_req_freq = %u, NEW_REQ_freq = %u, perf_status = %u\n",
+	       cpu, tsc, prev_req_freq, curr_req_freq, perf_status);
+
+#if DO_IOCTL_STATS
+	{
+		pstats = &get_cpu_var(per_cpu_stats);
+		local_inc(&pstats->p_trans);
+		put_cpu_var(pstats);
+	}
+#endif /* DO_IOCTL_STATS*/
+};
+#endif /* not def __arm__*/
+
+#if DO_CPUFREQ_NOTIFIER
+/*
+ * CPUFREQ notifier callback function.
+ * Used in cases where the default
+ * power frequency tracepoint mechanism
+ * is broken (e.g. MFLD).
+ */
+static int apwr_cpufreq_notifier(struct notifier_block *block,
+				 unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	u32 old_state = freq->old;	/* "state" is frequency CPU is ABOUT TO EXECUTE AT*/
+	u32 new_state = freq->new;	/* "state" is frequency CPU is ABOUT TO EXECUTE AT*/
+	int cpu = freq->cpu;
+
+	if (unlikely(!IS_FREQ_MODE())) {
+		return SUCCESS;
+	}
+
+	if (val == CPUFREQ_POSTCHANGE) {
+#ifndef __arm__
+		/* DO_PER_CPU_OVERHEAD_FUNC(tpf, cpu, 2, new_state, old_state);*/
+		tpf(cpu, 2, new_state, old_state);
+#else
+		u64 tsc;
+		u32 prev_req_freq = 0;
+		u32 perf_status = 0;
+
+		tscval(&tsc);
+
+		prev_req_freq = per_cpu(pcpu_prev_req_freq, cpu);
+		per_cpu(pcpu_prev_req_freq, cpu) = state;
+
+		perf_status = prev_req_freq / INTERNAL_STATE.bus_clock_freq_khz;
+		produce_p_sample(cpu, tsc, prev_req_freq, perf_status, 0 /* boundary */ , 0, 0);	/* "0" ==> NOT a boundary sample*/
+#endif /* not def __arm__*/
+	}
+	return SUCCESS;
+};
+
+static struct notifier_block apwr_cpufreq_notifier_block = {
+	.notifier_call = &apwr_cpufreq_notifier
+};
+
+#else /* DO_CPUFREQ_NOTIFIER*/
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)	/* Use 'trace_power_frequency()'*/
+/*
+ * P-state transition probe.
+ *
+ * "type" is ALWAYS "2" (i.e. "POWER_PSTATE", see "include/trace/power.h")
+ * "state" is the NEXT frequency range the CPU is going to enter (see "arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c")
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_power_frequency(unsigned int type, unsigned int state)
+#else
+static void probe_power_frequency(void *ignore, unsigned int type,
+				  unsigned int state)
+#endif
+{
+	if (unlikely(!IS_FREQ_MODE())) {
+		return;
+	}
+	DO_PER_CPU_OVERHEAD_FUNC(tpf, CPU(), type, state,
+				 0 /* prev freq, 0 ==> use pcpu var */ );
+};
+
+#else /* version >= 2.6.38 ==> Use 'trace_cpu_frequency()'*/
+static void probe_cpu_frequency(void *ignore, unsigned int new_freq,
+				unsigned int cpu)
+{
+	if (unlikely(!IS_FREQ_MODE())) {
+		return;
+	}
+	DO_PER_CPU_OVERHEAD_FUNC(tpf, cpu, 2 /* type, don't care */ , new_freq,
+				 0 /* prev freq, 0 ==> use pcpu var */ );
+};
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)*/
+#endif /* DO_CPUFREQ_NOTIFIER*/
+
+/*
+ * Helper function for "probe_sched_exit"
+ * Useful for overhead measurements.
+ */
+static void exit_helper(struct task_struct *task)
+{
+	pid_t tid = task->pid, pid = task->tgid;
+
+	OUTPUT(3, KERN_INFO "[%d]: SCHED_EXIT\n", tid);
+	/*
+	 * Delete all (non-Kernel) timer mappings created
+	 * for this thread.
+	 */
+	delete_timers_for_tid(tid);
+	/*
+	 * Delete any sys-node mappings created on behalf
+	 * of this thread.
+	 */
+	check_and_delete_proc_from_sys_list(tid, pid);
+
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_sched_process_exit(struct task_struct *task)
+#else
+static void probe_sched_process_exit(void *ignore, struct task_struct *task)
+#endif
+{
+	pid_t tid = task->pid, pid = task->tgid;
+	const char *name = task->comm;
+	u64 tsc;
+
+	OUTPUT(3, KERN_INFO "[%d, %d]: %s exitting\n", tid, pid, name);
+
+	DO_PER_CPU_OVERHEAD_FUNC(exit_helper, task);
+
+	/*
+	 * Track task exits ONLY IF COLLECTION
+	 * ONGOING!
+	 * UPDATE: track if COLLECTION ONGOING OR
+	 * IF IN PAUSED STATE!
+	 */
+	if (!IS_COLLECTING() && !IS_SLEEPING()) {
+		return;
+	}
+
+	tscval(&tsc);
+
+	produce_r_sample(CPU(), tsc, PW_PROC_EXIT, tid, pid, name);
+};
+
+inline void
+    __attribute__ ((always_inline)) sched_wakeup_helper_i(struct task_struct
+							  *task)
+{
+	int target_cpu = task_cpu(task), source_cpu = CPU();
+	/*
+	 * "Self-sched" samples are "don't care".
+	 */
+	if (target_cpu != source_cpu) {
+
+		PWCollector_msg_t output_sample;
+		event_sample_t event_sample;
+		u64 sample_tsc;
+
+		tscval(&sample_tsc);
+		output_sample.cpuidx = source_cpu;
+		output_sample.tsc = sample_tsc;
+
+		event_sample.data[0] = source_cpu;
+		event_sample.data[1] = target_cpu;
+
+#if DO_TPS_EPOCH_COUNTER
+		event_sample.data[2] = read_tps_epoch_i();
+#endif /* DO_TPS_EPOCH_COUNTER*/
+
+		output_sample.data_type = SCHED_SAMPLE;
+		output_sample.data_len = sizeof(event_sample);
+		output_sample.p_data = (u64) ((unsigned long)&event_sample);
+
+		pw_produce_generic_msg(&output_sample, false);	/* "false" ==> don't wake any sleeping readers (required from scheduling context)*/
+	}
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_sched_wakeup(struct rq *rq, struct task_struct *task,
+			       int success)
+#else
+static void probe_sched_wakeup(void *ignore, struct task_struct *task,
+			       int success)
+#endif
+{
+	if (likely(IS_COLLECTING())) {
+		sched_wakeup_helper_i(task);
+	}
+};
+
+inline bool __attribute__ ((always_inline)) is_sleep_syscall_i(long id)
+{
+	switch (id) {
+	case __NR_poll:	/* 7*/
+	case __NR_select:	/* 23*/
+	case __NR_nanosleep:	/* 35*/
+	case __NR_alarm:	/* 37*/
+	case __NR_setitimer:	/* 38*/
+	case __NR_rt_sigtimedwait:	/* 128*/
+	case __NR_futex:	/* 202*/
+	case __NR_timer_settime:	/* 223*/
+	case __NR_clock_nanosleep:	/* 230*/
+	case __NR_epoll_wait:	/* 232*/
+	case __NR_pselect6:	/* 270*/
+	case __NR_ppoll:	/* 271*/
+	case __NR_epoll_pwait:	/* 281*/
+	case __NR_timerfd_settime:	/* 286*/
+		return true;
+	default:
+		break;
+	}
+	return false;
+};
+
+inline void
+    __attribute__ ((always_inline)) sys_enter_helper_i(long id, pid_t tid,
+						       pid_t pid)
+{
+	if (check_and_add_proc_to_sys_list(tid, pid)) {
+		pw_pr_error("ERROR: could NOT add proc to sys list!\n");
+	}
+	return;
+};
+
+inline void
+    __attribute__ ((always_inline)) sys_exit_helper_i(long id, pid_t tid,
+						      pid_t pid)
+{
+	check_and_remove_proc_from_sys_list(tid, pid);
+};
+
+#if DO_PROBE_ON_SYSCALL_ENTER_EXIT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_sys_enter(struct pt_regs *regs, long ret)
+#else
+static void probe_sys_enter(void *ignore, struct pt_regs *regs, long ret)
+#endif
+{
+	long id = syscall_get_nr(current, regs);
+	pid_t tid = TID(), pid = PID();
+
+	if (is_sleep_syscall_i(id)) {
+		DO_PER_CPU_OVERHEAD_FUNC(sys_enter_helper_i, id, tid, pid);
+	}
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_sys_exit(struct pt_regs *regs, long ret)
+#else
+static void probe_sys_exit(void *ignore, struct pt_regs *regs, long ret)
+#endif
+{
+	long id = syscall_get_nr(current, regs);
+	pid_t tid = TID(), pid = PID();
+
+	DO_PER_CPU_OVERHEAD_FUNC(sys_exit_helper_i, id, tid, pid);
+
+	if (id == __NR_execve && IS_COLLECTING()) {
+		u64 tsc;
+
+		tscval(&tsc);
+		OUTPUT(3,
+		       KERN_INFO "[%d]: EXECVE ENTER! TID = %d, NAME = %.20s\n",
+		       CPU(), TID(), NAME());
+		produce_r_sample(CPU(), tsc, PW_PROC_EXEC, TID(), PID(),
+				 NAME());
+	}
+};
+#endif /* DO_PROBE_ON_SYSCALL_ENTER_EXIT*/
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_sched_process_fork(struct task_struct *parent,
+				     struct task_struct *child)
+#else
+static void probe_sched_process_fork(void *ignore, struct task_struct *parent,
+				     struct task_struct *child)
+#endif
+{
+	const char *cname = child->comm;
+	pid_t ctid = child->pid, cpid = child->tgid;
+	u64 tsc;
+
+	tscval(&tsc);
+
+	OUTPUT(3, KERN_INFO "DEBUG: PROCESS_FORK: %d (%.20s) --> %d (%.20s) \n",
+	       parent->pid, parent->comm, child->pid, cname);
+
+	if (IS_COLLECTING() || IS_SLEEPING()) {
+		produce_r_sample(CPU(), tsc, PW_PROC_FORK, ctid, cpid, cname);
+	}
+};
+
+#ifdef CONFIG_MODULES
+/*
+ * Notifier for module loads and frees.
+ * We register module load and free events -- extract memory bounds for
+ * the module (on load). Also track TID, NAME for tracking device driver timers.
+ */
+int apwr_mod_notifier(struct notifier_block *block, unsigned long val,
+		      void *data)
+{
+	struct module *mod = data;
+	int cpu = CPU();
+	const char *name = mod->name;
+	unsigned long module_core = (unsigned long)mod->module_core;
+	unsigned long core_size = mod->core_size;
+
+	if (IS_COLLECTING() || IS_SLEEPING()) {
+		if (val == MODULE_STATE_COMING) {
+			OUTPUT(0,
+			       KERN_INFO
+			       "COMING: tid = %d, pid = %d, name = %s, module_core = %lu\n",
+			       TID(), PID(), name, module_core);
+			produce_m_sample(cpu, name, module_core, core_size);
+		} else if (val == MODULE_STATE_GOING) {
+			OUTPUT(0,
+			       KERN_INFO
+			       "GOING: tid = %d, pid = %d, name = %s\n", TID(),
+			       PID(), name);
+		}
+	}
+	return SUCCESS;
+};
+
+static struct notifier_block apwr_mod_notifier_block = {
+	.notifier_call = &apwr_mod_notifier
+};
+#endif
+
+#if DO_WAKELOCK_SAMPLE
+/*
+ * Wakelock hooks
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_wake_lock(struct wake_lock *lock)
+#else
+static void probe_wake_lock(void *ignore, struct wake_lock *lock)
+#endif
+{
+	u64 tsc;
+	u64 timeout = 0;
+	w_sample_type_t wtype;
+
+	/*
+	 * Track task exits ONLY IF COLLECTION
+	 * ONGOING OR IF IN PAUSED STATE!
+	 */
+	if (!IS_COLLECTING() && !IS_SLEEPING()) {
+		return;
+	}
+
+	tscval(&tsc);
+	if (lock->flags & (1U << 10)) {	/* Check if WAKE_LOCK_AUTO_EXPIRE is flagged*/
+		wtype = PW_WAKE_LOCK_TIMEOUT;
+		timeout = jiffies_to_msecs(lock->expires - jiffies);
+	} else {
+		wtype = PW_WAKE_LOCK;
+	}
+
+	BUG_ON(!lock->name);
+	produce_w_sample(CPU(), tsc, wtype, TID(), PID(), lock->name, NAME(),
+			 timeout);
+
+	OUTPUT(0,
+	       "wake_lock: type=%d, name=%s, timeout=%llu (msec), CPU=%d, PID=%d, TSC=%llu\n",
+	       wtype, lock->name, timeout, CPU(), PID(), tsc);
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static void probe_wake_unlock(struct wake_lock *lock)
+#else
+static void probe_wake_unlock(void *ignore, struct wake_lock *lock)
+#endif
+{
+	u64 tsc;
+
+	/*
+	 * Track task exits ONLY IF COLLECTION
+	 * ONGOING OR IF IN PAUSED STATE!
+	 */
+	if (!IS_COLLECTING() && !IS_SLEEPING()) {
+		return;
+	}
+
+	tscval(&tsc);
+
+	produce_w_sample(CPU(), tsc, PW_WAKE_UNLOCK, TID(), PID(), lock->name,
+			 NAME(), 0);
+
+	OUTPUT(0, "wake_unlock: name=%s, CPU=%d, PID=%d, TSC=%llu\n",
+	       lock->name, CPU(), PID(), tsc);
+};
+
+#else
+static void probe_wakeup_source_activate(void *ignore, const char *name,
+					 unsigned int state)
+{
+	u64 tsc;
+	u64 timeout = 0;
+	w_sample_type_t wtype;
+
+	if (name == NULL) {
+		printk(KERN_INFO "wake_lock: name=UNKNOWNs, state=%u\n", state);
+		return;
+	}
+
+	/*
+	 * Track task exits ONLY IF COLLECTION
+	 * ONGOING OR IF IN PAUSED STATE!
+	 */
+	if (!IS_COLLECTING() && !IS_SLEEPING()) {
+		return;
+	}
+
+	tscval(&tsc);
+	wtype = PW_WAKE_LOCK;
+
+	produce_w_sample(CPU(), tsc, wtype, TID(), PID(), name, NAME(),
+			 timeout);
+
+	OUTPUT(0,
+	       "wake_lock: type=%d, name=%s, timeout=%llu (msec), CPU=%d, PID=%d, TSC=%llu\n",
+	       wtype, name, timeout, CPU(), PID(), tsc);
+};
+
+static void probe_wakeup_source_deactivate(void *ignore, const char *name,
+					   unsigned int state)
+{
+	u64 tsc;
+
+	if (name == NULL) {
+		printk(KERN_INFO "wake_unlock: name=UNKNOWNs, state=%u\n",
+		       state);
+		return;
+	}
+
+	/*
+	 * Track task exits ONLY IF COLLECTION
+	 * ONGOING OR IF IN PAUSED STATE!
+	 */
+	if (!IS_COLLECTING() && !IS_SLEEPING()) {
+		return;
+	}
+
+	tscval(&tsc);
+
+	produce_w_sample(CPU(), tsc, PW_WAKE_UNLOCK, TID(), PID(), name, NAME(),
+			 0);
+
+	OUTPUT(0, "wake_unlock: name=%s, CPU=%d, PID=%d, TSC=%llu\n", name,
+	       CPU(), PID(), tsc);
+};
+#endif
+#endif
+
+static int register_timer_callstack_probes(void)
+{
+	int ret = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	{
+		OUTPUT(0, KERN_INFO "\tTIMER_INIT_EVENTS");
+		ret = register_trace_hrtimer_init(probe_hrtimer_init);
+		WARN_ON(ret);
+		ret = register_trace_timer_init(probe_timer_init);
+		WARN_ON(ret);
+	}
+
+	{
+		OUTPUT(0, KERN_INFO "\tITIMER_STATE_EVENTS");
+		ret = register_trace_itimer_state(probe_itimer_state);
+		WARN_ON(ret);
+	}
+
+	{
+		OUTPUT(0, KERN_INFO "\tTIMER_START_EVENTS");
+		ret = register_trace_hrtimer_start(probe_hrtimer_start);
+		WARN_ON(ret);
+	}
+
+	{
+		OUTPUT(0, KERN_INFO "\tSCHED_EXIT_EVENTS");
+		ret =
+		    register_trace_sched_process_exit(probe_sched_process_exit);
+		WARN_ON(ret);
+	}
+
+#else
+
+	{
+		OUTPUT(0, KERN_INFO "\tTIMER_INIT_EVENTS");
+		ret = register_trace_hrtimer_init(probe_hrtimer_init, NULL);
+		WARN_ON(ret);
+		ret = register_trace_timer_init(probe_timer_init, NULL);
+		WARN_ON(ret);
+	}
+
+	{
+		OUTPUT(0, KERN_INFO "\tITIMER_STATE_EVENTS");
+		ret = register_trace_itimer_state(probe_itimer_state, NULL);
+		WARN_ON(ret);
+	}
+
+	{
+		OUTPUT(0, KERN_INFO "\tTIMER_START_EVENTS");
+		ret = register_trace_hrtimer_start(probe_hrtimer_start, NULL);
+		WARN_ON(ret);
+	}
+
+	{
+		OUTPUT(0, KERN_INFO "\tSCHED_EVENTS");
+		ret =
+		    register_trace_sched_process_exit(probe_sched_process_exit,
+						      NULL);
+		WARN_ON(ret);
+	}
+
+#endif /* KERNEL_VER*/
+	return SUCCESS;
+};
+
+static void unregister_timer_callstack_probes(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	{
+		unregister_trace_hrtimer_init(probe_hrtimer_init);
+		unregister_trace_timer_init(probe_timer_init);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	{
+		unregister_trace_itimer_state(probe_itimer_state);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	{
+		unregister_trace_hrtimer_start(probe_hrtimer_start);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	{
+		unregister_trace_sched_process_exit(probe_sched_process_exit);
+
+		tracepoint_synchronize_unregister();
+	}
+
+#else
+
+	{
+		unregister_trace_hrtimer_init(probe_hrtimer_init, NULL);
+		unregister_trace_timer_init(probe_timer_init, NULL);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	{
+		unregister_trace_itimer_state(probe_itimer_state, NULL);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	{
+		unregister_trace_hrtimer_start(probe_hrtimer_start, NULL);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	{
+		unregister_trace_sched_process_exit(probe_sched_process_exit,
+						    NULL);
+
+		tracepoint_synchronize_unregister();
+	}
+
+#endif /* KERNEL_VER*/
+};
+
+/*
+ * Register all probes which should be registered
+ * REGARDLESS OF COLLECTION STATUS.
+ */
+static int register_permanent_probes(void)
+{
+	if (probe_on_syscalls) {
+#if DO_PROBE_ON_SYSCALL_ENTER_EXIT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+		{
+			WARN_ON(register_trace_sys_enter(probe_sys_enter));
+			WARN_ON(register_trace_sys_exit(probe_sys_exit));
+		}
+#else /* LINUX_VERSION*/
+		{
+			WARN_ON(register_trace_sys_enter
+				(probe_sys_enter, NULL));
+			WARN_ON(register_trace_sys_exit(probe_sys_exit, NULL));
+		}
+#endif /* LINUX_VERSION*/
+#endif /* DO_PROBE_ON_SYSCALL_ENTER_EXIT*/
+	}
+	return register_timer_callstack_probes();
+};
+
+static void unregister_permanent_probes(void)
+{
+	if (probe_on_syscalls) {
+#if DO_PROBE_ON_SYSCALL_ENTER_EXIT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+		{
+			unregister_trace_sys_enter(probe_sys_enter);
+			unregister_trace_sys_exit(probe_sys_exit);
+
+			tracepoint_synchronize_unregister();
+		}
+#else /* LINUX_VERSION*/
+		{
+			unregister_trace_sys_enter(probe_sys_enter, NULL);
+			unregister_trace_sys_exit(probe_sys_exit, NULL);
+
+			tracepoint_synchronize_unregister();
+		}
+#endif /* LINUX_VERSION*/
+#endif /* DO_PROBE_ON_SYSCALL_ENTER_EXIT*/
+	}
+
+	unregister_timer_callstack_probes();
+};
+
+/*
+ * Register all probes which should be registered
+ * ONLY FOR AN ONGOING, NON-PAUSED COLLECTION.
+ */
+static int register_non_pausable_probes(void)
+{
+	/* timer expire*/
+	/* irq*/
+	/* tps*/
+	/* tpf*/
+	int ret = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+
+	/*
+	 * ONLY required for "SLEEP" mode i.e. C-STATES
+	 */
+	if (IS_SLEEP_MODE() || IS_C_STATE_MODE()) {
+		if (IS_C_STATE_MODE()) {
+			OUTPUT(0, KERN_INFO "C_STATE MODE REQUESTED\n");
+			{
+				OUTPUT(0, KERN_INFO "\tTRACE_BREAK_EVENTS");
+				ret =
+				    register_trace_timer_expire_entry
+				    (probe_timer_expire_entry);
+				WARN_ON(ret);
+				ret =
+				    register_trace_hrtimer_expire_entry
+				    (probe_hrtimer_expire_entry);
+				WARN_ON(ret);
+				ret =
+				    register_trace_hrtimer_expire_exit
+				    (probe_hrtimer_expire_exit);
+				WARN_ON(ret);
+				ret =
+				    register_trace_irq_handler_entry
+				    (probe_irq_handler_entry);
+				WARN_ON(ret);
+				ret =
+				    register_trace_softirq_entry
+				    (probe_softirq_entry);
+				WARN_ON(ret);
+				ret =
+				    register_trace_sched_wakeup
+				    (probe_sched_wakeup);
+				WARN_ON(ret);
+				ret =
+				    register_trace_workqueue_execution
+				    (probe_workqueue_execution);
+				if (ret) {
+					printk(KERN_INFO
+					       "WARNING: trace_workqueue_execute_start did NOT succeed!\n");
+				}
+				/* WARN_ON(ret);*/
+			}
+		}
+		{
+			OUTPUT(0, KERN_INFO "\tCSTATE_EVENTS");
+			ret = register_trace_power_start(probe_power_start);
+			WARN_ON(ret);
+		}
+#ifdef __arm__
+		{
+			OUTPUT(0, KERN_INFO "\tCSTATE_EVENTS");
+			ret = register_trace_power_end(probe_power_end);
+			WARN_ON(ret);
+		}
+#endif /* __arm__*/
+	}
+
+#else /* KERNEL_VER*/
+
+	/*
+	 * ONLY required for "SLEEP" mode i.e. C-STATES
+	 */
+	if (IS_SLEEP_MODE() || IS_C_STATE_MODE()) {
+		if (IS_C_STATE_MODE()) {
+			OUTPUT(0, KERN_INFO "C_STATE MODE REQUESTED\n");
+			{
+				OUTPUT(0, KERN_INFO "\tTRACE_BREAK_EVENTS");
+				ret =
+				    register_trace_timer_expire_entry
+				    (probe_timer_expire_entry, NULL);
+				WARN_ON(ret);
+				ret =
+				    register_trace_hrtimer_expire_entry
+				    (probe_hrtimer_expire_entry, NULL);
+				WARN_ON(ret);
+				ret =
+				    register_trace_hrtimer_expire_exit
+				    (probe_hrtimer_expire_exit, NULL);
+				WARN_ON(ret);
+				ret =
+				    register_trace_irq_handler_entry
+				    (probe_irq_handler_entry, NULL);
+				WARN_ON(ret);
+				ret =
+				    register_trace_softirq_entry
+				    (probe_softirq_entry, NULL);
+				WARN_ON(ret);
+				ret =
+				    register_trace_sched_wakeup
+				    (probe_sched_wakeup, NULL);
+				WARN_ON(ret);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+				{
+					ret =
+					    register_trace_workqueue_execution
+					    (probe_workqueue_execution, NULL);
+					if (ret) {
+						printk(KERN_INFO
+						       "WARNING: trace_workqueue_execute_start did NOT succeed!\n");
+					}
+					/* WARN_ON(ret);*/
+				}
+#else /* 2.6.36 <= version < 2.6.38*/
+				{
+					ret =
+					    register_trace_workqueue_execute_start
+					    (probe_workqueue_execute_start,
+					     NULL);
+					if (ret) {
+						printk(KERN_INFO
+						       "WARNING: trace_workqueue_execute_start did NOT succeed!\n");
+					}
+					/* WARN_ON(ret);*/
+				}
+#endif /* version < 2.6.36*/
+			}
+		}
+		/*
+		 * ONLY required for "SLEEP" mode i.e. C-STATES
+		 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
+		{
+			OUTPUT(0, KERN_INFO "\tCSTATE_EVENTS");
+			ret =
+			    register_trace_power_start(probe_power_start, NULL);
+			WARN_ON(ret);
+		}
+#ifdef __arm__
+		{
+			OUTPUT(0, KERN_INFO "\tCSTATE_EVENTS");
+			ret = register_trace_power_end(probe_power_end, NULL);
+			WARN_ON(ret);
+		}
+#endif /* __arm__*/
+#else /* version >= 2.6.38*/
+		{
+			OUTPUT(0, KERN_INFO "\tCSTATE_EVENTS");
+			ret = register_trace_cpu_idle(probe_cpu_idle, NULL);
+			WARN_ON(ret);
+		}
+#ifdef __arm__
+#if TRACE_CPU_HOTPLUG
+		{
+			OUTPUT(0, KERN_INFO "\tCPU_ON_OFF_EVENTS");
+			ret =
+			    register_trace_cpu_hotplug(probe_cpu_hotplug, NULL);
+			WARN_ON(ret);
+		}
+#endif /* TRACE_CPU_HOTPLUG*/
+#endif /* __arm__*/
+		if (IS_C_STATE_MODE()) {
+			ret =
+			    register_trace_workqueue_execute_start
+			    (probe_workqueue_execute_start, NULL);
+			if (ret) {
+				printk(KERN_INFO
+				       "WARNING: trace_workqueue_execute_start did NOT succeed!\n");
+			}
+			/* WARN_ON(ret);*/
+		}
+#endif /* LINUX_VERSION_CODE < 2.6.38*/
+	}
+#endif /* KERNEL_VER*/
+	return SUCCESS;
+};
+
+static void unregister_non_pausable_probes(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	/* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)*/
+
+	/*
+	 * ONLY required for "SLEEP" mode i.e. C-STATES
+	 */
+	if (IS_SLEEP_MODE() || IS_C_STATE_MODE()) {
+		if (IS_C_STATE_MODE()) {
+			OUTPUT(0, KERN_INFO "C_STATE MODE REQUESTED\n");
+			{
+				unregister_trace_timer_expire_entry
+				    (probe_timer_expire_entry);
+				unregister_trace_hrtimer_expire_entry
+				    (probe_hrtimer_expire_entry);
+				unregister_trace_hrtimer_expire_exit
+				    (probe_hrtimer_expire_exit);
+				unregister_trace_irq_handler_entry
+				    (probe_irq_handler_entry);
+				unregister_trace_softirq_entry
+				    (probe_softirq_entry);
+				unregister_trace_sched_wakeup
+				    (probe_sched_wakeup);
+				unregister_trace_workqueue_execution
+				    (probe_workqueue_execution);
+
+				tracepoint_synchronize_unregister();
+			}
+			/*
+			 * ONLY required for "SLEEP" mode i.e. C-STATES
+			 */
+			{
+				unregister_trace_power_start(probe_power_start);
+#ifdef __arm__
+				unregister_trace_power_end(probe_power_end);
+#endif /* __arm__*/
+				tracepoint_synchronize_unregister();
+			}
+		}
+	}
+
+#else /* KERNEL_VER*/
+
+	/*
+	 * ONLY required for "SLEEP" mode i.e. C-STATES
+	 */
+	if (IS_SLEEP_MODE() || IS_C_STATE_MODE()) {
+		if (IS_C_STATE_MODE()) {
+			OUTPUT(0, KERN_INFO "C_STATE MODE REQUESTED\n");
+			{
+				unregister_trace_timer_expire_entry
+				    (probe_timer_expire_entry, NULL);
+				unregister_trace_hrtimer_expire_entry
+				    (probe_hrtimer_expire_entry, NULL);
+				unregister_trace_hrtimer_expire_exit
+				    (probe_hrtimer_expire_exit, NULL);
+				unregister_trace_irq_handler_entry
+				    (probe_irq_handler_entry, NULL);
+				unregister_trace_softirq_entry
+				    (probe_softirq_entry, NULL);
+				unregister_trace_sched_wakeup
+				    (probe_sched_wakeup, NULL);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+				{
+					unregister_trace_workqueue_execution
+					    (probe_workqueue_execution, NULL);
+
+					tracepoint_synchronize_unregister();
+				}
+#else /* 2.6.36 <= version < 2.6.38*/
+				{
+					unregister_trace_workqueue_execute_start
+					    (probe_workqueue_execute_start,
+					     NULL);
+
+					tracepoint_synchronize_unregister();
+				}
+#endif /* version < 2.6.36*/
+
+				tracepoint_synchronize_unregister();
+			}
+		}
+		/*
+		 * ONLY required for "SLEEP" mode i.e. C-STATES
+		 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
+		{
+			unregister_trace_power_start(probe_power_start, NULL);
+#ifdef __arm__
+			unregister_trace_power_end(probe_power_end, NULL);
+#endif /* __arm__*/
+			tracepoint_synchronize_unregister();
+		}
+#else /* version >= 2.6.38*/
+		{
+			unregister_trace_cpu_idle(probe_cpu_idle, NULL);
+
+			tracepoint_synchronize_unregister();
+		}
+#ifdef __arm__
+#if TRACE_CPU_HOTPLUG
+		{
+			unregister_trace_cpu_hotplug(probe_cpu_hotplug, NULL);
+
+			tracepoint_synchronize_unregister();
+		}
+#endif /* TRACE_CPU_HOTPLUG*/
+#endif /* __arm__*/
+		if (IS_C_STATE_MODE()) {
+			unregister_trace_workqueue_execute_start
+			    (probe_workqueue_execute_start, NULL);
+
+			tracepoint_synchronize_unregister();
+		}
+#endif /* LINUX_VERSION_CODE < 2.6.38*/
+	}
+#endif /* KERNEL_VER*/
+};
+
+/*
+ * Register all probes which must be registered
+ * ONLY FOR AN ONGOING (i.e. START/PAUSED) COLLECTION.
+ */
+static int register_pausable_probes(void)
+{
+	int ret = 0;
+	/* sys_exit*/
+	/* sched_fork*/
+	/* module_notifier*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	/*
+	 * ALWAYS required.
+	 */
+#ifdef CONFIG_MODULES
+	{
+		OUTPUT(0, KERN_INFO "\tMOD_NOTIFIER_EVENTS");
+		register_module_notifier(&apwr_mod_notifier_block);
+	}
+#endif
+	/*
+	 * ALWAYS required.
+	 */
+	{
+		OUTPUT(0, KERN_INFO "\tSCHED_FORK_EVENTS");
+		ret =
+		    register_trace_sched_process_fork(probe_sched_process_fork);
+		WARN_ON(ret);
+	}
+
+	/*
+	 * ALWAYS required.
+	 */
+#if DO_PROBE_ON_SYSCALL_ENTER_EXIT
+
+#endif /* DO_PROBE_ON_SYSCALL_ENTER_EXIT*/
+
+	/*
+	 * ONLY required for "FREQ" mode i.e. P-STATES
+	 */
+	if (IS_FREQ_MODE()) {
+		OUTPUT(0, KERN_INFO "FREQ MODE REQUESTED\n");
+#if DO_CPUFREQ_NOTIFIER
+		{
+			OUTPUT(0, KERN_INFO "\tPSTATE_EVENTS\n");
+			cpufreq_register_notifier(&apwr_cpufreq_notifier_block,
+						  CPUFREQ_TRANSITION_NOTIFIER);
+		}
+#else /* DO_CPUFREQ_NOTIFIER*/
+		{
+			OUTPUT(0, KERN_INFO "\tPSTATE_EVENTS\n");
+			ret =
+			    register_trace_power_frequency
+			    (probe_power_frequency);
+			WARN_ON(ret);
+		}
+#endif /* DO_CPUFREQ_NOTIFIER*/
+	}
+
+	if (IS_WAKELOCK_MODE()) {
+#if DO_WAKELOCK_SAMPLE
+		OUTPUT(0, KERN_INFO "\tWAKELOCK_EVENTS");
+		ret = register_trace_wake_lock(probe_wake_lock);
+		WARN_ON(ret);
+
+		OUTPUT(0, KERN_INFO "\tWAKEUNLOCK_EVENTS");
+		ret = register_trace_wake_unlock(probe_wake_unlock);
+		WARN_ON(ret);
+#endif
+	}
+#else /* KERNEL_VERSION >= 2.6.35*/
+
+	/*
+	 * ALWAYS required.
+	 */
+#ifdef CONFIG_MODULES
+	{
+		OUTPUT(0, KERN_INFO "\tMOD_NOTIFIER_EVENTS");
+		register_module_notifier(&apwr_mod_notifier_block);
+	}
+#endif
+
+	/*
+	 * ALWAYS required.
+	 */
+	{
+		OUTPUT(0, KERN_INFO "\tSCHED_FORK_EVENTS");
+		ret =
+		    register_trace_sched_process_fork(probe_sched_process_fork,
+						      NULL);
+		WARN_ON(ret);
+	}
+
+	/*
+	 * ALWAYS required.
+	 */
+#if DO_PROBE_ON_SYSCALL_ENTER_EXIT
+
+#endif /* DO_PROBE_ON_SYSCALL_ENTER_EXIT*/
+
+	/*
+	 * ONLY required for "FREQ" mode i.e. P-STATES
+	 */
+	if (IS_FREQ_MODE()) {
+		OUTPUT(0, KERN_INFO "FREQ MODE REQUESTED!\n");
+#if DO_CPUFREQ_NOTIFIER
+		{
+			OUTPUT(0, KERN_INFO "\tPSTATE_EVENTS\n");
+			cpufreq_register_notifier(&apwr_cpufreq_notifier_block,
+						  CPUFREQ_TRANSITION_NOTIFIER);
+		}
+#else /* DO_CPUFREQ_NOTIFIER*/
+		{
+			OUTPUT(0, KERN_INFO "\tPSTATE_EVENTS\n");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)	/* Use 'trace_power_frequency()'*/
+			ret =
+			    register_trace_power_frequency
+			    (probe_power_frequency, NULL);
+#else /* Use 'trace_cpu_frequency()'*/
+			ret =
+			    register_trace_cpu_frequency(probe_cpu_frequency,
+							 NULL);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)*/
+			WARN_ON(ret);
+		}
+#endif /* DO_CPUFREQ_NOTIFIER*/
+	}
+
+	if (IS_WAKELOCK_MODE()) {
+#if DO_WAKELOCK_SAMPLE
+		OUTPUT(0, KERN_INFO "\tWAKELOCK_EVENTS");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+		ret = register_trace_wake_lock(probe_wake_lock, NULL);
+#else
+		ret =
+		    register_trace_wakeup_source_activate
+		    (probe_wakeup_source_activate, NULL);
+#endif
+		WARN_ON(ret);
+
+		OUTPUT(0, KERN_INFO "\tWAKEUNLOCK_EVENTS");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+		ret = register_trace_wake_unlock(probe_wake_unlock, NULL);
+#else
+		ret =
+		    register_trace_wakeup_source_deactivate
+		    (probe_wakeup_source_deactivate, NULL);
+#endif
+		WARN_ON(ret);
+#endif
+	}
+#endif /* KERNEL_VER*/
+
+	return SUCCESS;
+};
+
+static void unregister_pausable_probes(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+
+	/*
+	 * ALWAYS required.
+	 */
+#ifdef CONFIG_MODULES
+	{
+		unregister_module_notifier(&apwr_mod_notifier_block);
+	}
+#endif
+
+	/*
+	 * ALWAYS required.
+	 */
+	{
+		unregister_trace_sched_process_fork(probe_sched_process_fork);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	/*
+	 * ALWAYS required.
+	 */
+	/*
+	 * ONLY required for "FREQ" mode i.e. P-STATES
+	 */
+	if (IS_FREQ_MODE()) {
+		OUTPUT(0, KERN_INFO "FREQ MODE REQUESTED!\n");
+#if DO_CPUFREQ_NOTIFIER
+		{
+			OUTPUT(0, KERN_INFO "\tPSTATE_EVENTS\n");
+			cpufreq_unregister_notifier
+			    (&apwr_cpufreq_notifier_block,
+			     CPUFREQ_TRANSITION_NOTIFIER);
+		}
+#else /* DO_CPUFREQ_NOTIFIER*/
+		{
+			unregister_trace_power_frequency(probe_power_frequency);
+
+			tracepoint_synchronize_unregister();
+		}
+#endif /* DO_CPUFREQ_NOTIFIER*/
+	}
+
+	if (IS_WAKELOCK_MODE()) {
+#if DO_WAKELOCK_SAMPLE
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+		unregister_trace_wake_lock(probe_wake_lock);
+		unregister_trace_wake_unlock(probe_wake_unlock);
+#else
+		unregister_trace_wakeup_source_activate
+		    (probe_wakeup_source_activate);
+		unregister_trace_wakeup_source_deactivate
+		    (probe_wakeup_source_deactivate);
+#endif
+
+		tracepoint_synchronize_unregister();
+#endif
+	}
+#else /* Kernel version >= 2.6.35*/
+
+	/*
+	 * ALWAYS required.
+	 */
+#ifdef CONFIG_MODULES
+	{
+		unregister_module_notifier(&apwr_mod_notifier_block);
+	}
+#endif
+
+	/*
+	 * ALWAYS required.
+	 */
+	{
+		unregister_trace_sched_process_fork(probe_sched_process_fork,
+						    NULL);
+
+		tracepoint_synchronize_unregister();
+	}
+
+	/*
+	 * ALWAYS required.
+	 */
+
+	/*
+	 * ONLY required for "FREQ" mode i.e. P-STATES
+	 */
+	if (IS_FREQ_MODE()) {
+		OUTPUT(0, KERN_INFO "FREQ MODE REQUESTED\n");
+#if DO_CPUFREQ_NOTIFIER
+		{
+			OUTPUT(0, KERN_INFO "\tPSTATE_EVENTS\n");
+			cpufreq_unregister_notifier
+			    (&apwr_cpufreq_notifier_block,
+			     CPUFREQ_TRANSITION_NOTIFIER);
+		}
+#else /* DO_CPUFREQ_NOTIFIER*/
+		{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)	/* Use 'trace_power_frequency()'*/
+			unregister_trace_power_frequency(probe_power_frequency,
+							 NULL);
+#else /* Use 'trace_cpu_frequency()'*/
+			unregister_trace_cpu_frequency(probe_cpu_frequency,
+						       NULL);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)*/
+
+			tracepoint_synchronize_unregister();
+		}
+#endif /* DO_CPUFREQ_NOTIFIER*/
+	}
+
+	if (IS_WAKELOCK_MODE()) {
+#if DO_WAKELOCK_SAMPLE
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+		unregister_trace_wake_lock(probe_wake_lock, NULL);
+		unregister_trace_wake_unlock(probe_wake_unlock, NULL);
+#else
+		unregister_trace_wakeup_source_activate
+		    (probe_wakeup_source_activate, NULL);
+		unregister_trace_wakeup_source_deactivate
+		    (probe_wakeup_source_deactivate, NULL);
+#endif
+
+		tracepoint_synchronize_unregister();
+#endif
+	}
+#endif /* KERNEL_VER*/
+};
+
+/*
+ * Service a "read(...)" call from user-space.
+ *
+ * Returns sample information back to the user.
+ * When a user calls the "read" function, the device
+ * driver first checks if any (per-cpu) output buffers are full.
+ * If yes, then the entire contents of that buffer are
+ * copied to the user. If not, then the user blocks, until the
+ * buffer-full condition is met.
+ */
+static ssize_t pw_device_read(struct file *file, char __user * buffer,
+			      size_t length, loff_t * offset)
+{
+	u32 val = 0;
+	bool is_flush_mode = INTERNAL_STATE.drain_buffers;
+
+	if (!buffer) {
+		pw_pr_error("ERROR: \"read\" called with an empty buffer?!\n");
+		return -ERROR;
+	}
+#if 0
+	while (pw_any_seg_full(&val, is_flush_mode) == false) {
+		if (val == PW_ALL_WRITES_DONE_MASK) {
+			BUG_ON(IS_COLLECTING());
+			return 0;	/* "0" ==> EOF*/
+		}
+		val = PW_ALL_WRITES_DONE_MASK;
+		if (wait_event_interruptible
+		    (pw_reader_queue,
+		     ((!IS_COLLECTING() && !IS_SLEEPING())
+		      || pw_any_seg_full(&val, false /* is flush mode */ )))) {
+			pw_pr_error("wait_event_interruptible error\n");
+			return -ERESTARTSYS;
+		}
+		/*
+		 * OK, we were woken up. This can be because we have a full buffer or
+		 * because a 'STOP/CANCEL' cmd was issued. In the first case, we will have a valid
+		 * value for 'val' so check for that here.
+		 */
+		if (val != PW_ALL_WRITES_DONE_MASK) {
+			/* we have a full buffer to return*/
+			break;
+		}
+		/*
+		 * No full buffer exists; we may have been woken up because of a 'STOP' cmd. Loop
+		 * back and check.
+		 */
+		/* is_flush_mode = !IS_COLLECTING() && !IS_SLEEPING();*/
+		is_flush_mode = INTERNAL_STATE.drain_buffers;
+	}
+#else /* if 1*/
+	do {
+		val = PW_ALL_WRITES_DONE_MASK;
+		is_flush_mode = INTERNAL_STATE.drain_buffers;
+		pw_pr_debug(KERN_INFO "Waiting, flush = %s\n",
+			    GET_BOOL_STRING(is_flush_mode));
+		if (wait_event_interruptible
+		    (pw_reader_queue,
+		     (pw_any_seg_full(&val, &INTERNAL_STATE.drain_buffers)
+		      || (!IS_COLLECTING() && !IS_SLEEPING())))) {
+			pw_pr_error("wait_event_interruptible error\n");
+			return -ERESTARTSYS;
+		}
+		pw_pr_debug(KERN_INFO "After wait: val = %u\n", val);
+	} while (val == PW_NO_DATA_AVAIL_MASK);
+#endif /* if 0*/
+	/*
+	 * Are we done producing/consuming?
+	 */
+	if (val == PW_ALL_WRITES_DONE_MASK) {
+		return 0;	/* "0" ==> EOF*/
+	}
+	/*
+	 * 'mmap' unsupported, for now
+	 */
+	if (false && pw_did_mmap) {
+		if (put_user(val, (u32 *) buffer)) {
+			pw_pr_error("ERROR in put_user\n");
+			return -ERROR;
+		}
+		return sizeof(val);	/* 'read' returns # of bytes actually read*/
+	} else {
+		/*
+		 * Copy the buffer contents into userspace.
+		 */
+		size_t bytes_read = 0;
+		unsigned long bytes_not_copied = pw_consume_data(val, buffer, length, &bytes_read);	/* 'read' returns # of bytes actually read*/
+		pw_pr_debug(KERN_INFO "OK: returning %u\n",
+			    (unsigned)bytes_read);
+		if (unlikely(bytes_not_copied)) {
+			return -ERROR;
+		}
+		return bytes_read;
+	}
+};
+
+static unsigned int pw_device_poll(struct file *filp, poll_table * wait)
+{
+	unsigned int mask = 0;
+	u32 dummy = 0;
+
+	poll_wait(filp, &pw_reader_queue, wait);
+
+	if (!IS_COLLECTING() || pw_any_seg_full(&dummy, &INTERNAL_STATE.drain_buffers)) {	/* device is readable if: (a) NOT collecting or (b) any buffers is full*/
+		mask = (POLLIN | POLLRDNORM);
+	}
+
+	return mask;
+};
+
+/*
+ * 'mmap' unsupported, for now
+ */
+static int pw_device_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	long length = vma->vm_end - vma->vm_start;
+	unsigned long total_size = 0;
+
+	pw_pr_debug("MMAP received!\n");
+
+	if (true) {
+		return -ERROR;
+	}
+
+	/*
+	 * Check size restrictions.
+	 */
+	if (length != pw_buffer_alloc_size) {
+		pw_pr_error
+		    ("ERROR: requested mapping size %ld bytes, MUST be %lu\n",
+		     length, pw_buffer_alloc_size);
+		return -ERROR;
+	}
+
+	if (pw_map_per_cpu_buffers(vma, &total_size)) {
+		pw_pr_error("ERROR mapping per-cpu buffers to userspace!\n");
+		return -ERROR;
+	}
+
+	/*
+	 * Sanity!
+	 */
+	if (total_size != length) {
+		pw_pr_warn("WARNING: mmap: total size = %lu, length = %lu\n",
+			   total_size, length);
+	} else {
+		pw_pr_debug("OK: mmap total size = %lu, length = %lu\n",
+			    total_size, length);
+	}
+
+	pw_did_mmap = true;
+
+	return SUCCESS;
+};
+
+/* "copy_from_user" ==> dst, src*/
+#define EXTRACT_LOCAL_ARGS(l,u) copy_from_user((l), (u), sizeof(struct PWCollector_ioctl_arg))
+
+/*
+ * Check if command is valid, given current state.
+ */
+static inline bool is_cmd_valid(PWCollector_cmd_t cmd)
+{
+	bool is_collecting = IS_COLLECTING(), is_sleeping = IS_SLEEPING();
+
+	if (is_sleeping) {
+		/*
+		 * If currently PAUSEd, the ONLY command
+		 * that's NOT allowed is a subsequent PAUSE.
+		 */
+		if (cmd == PW_PAUSE)
+			return false;
+	} else if (is_collecting && (cmd == PW_START || cmd == PW_RESUME))
+		return false;
+	else if (!is_collecting
+		 && (cmd == PW_STOP || cmd == PW_PAUSE || cmd == PW_CANCEL))
+		return false;
+
+	return true;
+};
+
+/*
+ * Retrieve the base operating frequency
+ * for this CPU. The base frequency acts
+ * as a THRESHOLD indicator for TURBO -- frequencies
+ * ABOVE this are considered TURBO.
+ */
+static inline void get_base_operating_frequency(void)
+{
+#ifndef __arm__
+
+	if (!INTERNAL_STATE.bus_clock_freq_khz) {
+		pw_pr_error
+		    ("ERROR: cannot set base_operating_frequency until we have a bus clock frequency!\n");
+		return;
+	}
+
+	base_operating_freq_khz =
+	    pw_max_non_turbo_ratio * INTERNAL_STATE.bus_clock_freq_khz;
+	pw_pr_debug("RATIO = 0x%x, BUS_FREQ = %u, FREQ = %u\n",
+		    (u32) pw_max_non_turbo_ratio,
+		    (u32) INTERNAL_STATE.bus_clock_freq_khz,
+		    base_operating_freq_khz);
+#else
+	struct cpufreq_policy *policy;
+	if ((policy = cpufreq_cpu_get(0)) == NULL) {
+		base_operating_freq_khz = policy->max;
+	}
+#endif /* ifndef __arm__*/
+};
+
+/*
+ * Set initial config params.
+ * These include MSR addresses, and power
+ * collection switches.
+ */
+int set_config(struct PWCollector_config __user * remote_config, int size)
+{
+	int i = 0;
+	struct PWCollector_config local_config;
+
+	if (!remote_config) {
+		pw_pr_error("ERROR: NULL remote_config value?!\n");
+		return -ERROR;
+	}
+
+	if ((i = copy_from_user(&local_config, remote_config, sizeof(local_config) /*size */ )))	/* "copy_from_user" returns number of bytes that COULD NOT be copied*/
+		return i;
+	/*
+	 * Copy Core/Pkg MSR addresses
+	 */
+	memcpy(INTERNAL_STATE.coreResidencyMSRAddresses,
+	       local_config.info.coreResidencyMSRAddresses,
+	       sizeof(int) * MAX_MSR_ADDRESSES);
+	memcpy(INTERNAL_STATE.pkgResidencyMSRAddresses,
+	       local_config.info.pkgResidencyMSRAddresses,
+	       sizeof(int) * MAX_MSR_ADDRESSES);
+
+	if (true) {
+		OUTPUT(0, KERN_INFO "CORE addrs...\n");
+		for (i = 0; i < MAX_MSR_ADDRESSES; ++i) {
+			OUTPUT(0, KERN_INFO "C%d: %d\n", i,
+			       INTERNAL_STATE.coreResidencyMSRAddresses[i]);
+		}
+		OUTPUT(0, KERN_INFO "PKG addrs...\n");
+		for (i = 0; i < MAX_MSR_ADDRESSES; ++i) {
+			OUTPUT(0, KERN_INFO "C%d: %d\n", i,
+			       INTERNAL_STATE.pkgResidencyMSRAddresses[i]);
+		}
+	}
+	/*
+	 * Set C-state clock multiplier.
+	 */
+	INTERNAL_STATE.residency_count_multiplier =
+	    local_config.info.residency_count_multiplier;
+
+	/*
+	 * Make sure we've got a valid multiplier!
+	 */
+	if ((int)INTERNAL_STATE.residency_count_multiplier <= 0)
+		INTERNAL_STATE.residency_count_multiplier = 1;
+
+	/*
+	 * Set bus clock frequency -- required for
+	 * Turbo threshold determination / calculation.
+	 */
+	INTERNAL_STATE.bus_clock_freq_khz =
+	    local_config.info.bus_clock_freq_khz;
+	/*
+	 * Check if we've got a valid bus clock frequency -- default to
+	 * BUS_CLOCK_FREQ_KHZ if not.
+	 */
+	if ((int)INTERNAL_STATE.bus_clock_freq_khz <= 0)
+		/* INTERNAL_STATE.bus_clock_freq_khz = BUS_CLOCK_FREQ_KHZ;*/
+		INTERNAL_STATE.bus_clock_freq_khz =
+		    DEFAULT_BUS_CLOCK_FREQ_KHZ();
+
+	OUTPUT(0, KERN_INFO "DEBUG: Bus clock frequency = %u KHz\n",
+	       INTERNAL_STATE.bus_clock_freq_khz);
+
+	/*
+	 * The base operating frequency requires the
+	 * bus frequency -- set it here.
+	 */
+	get_base_operating_frequency();
+
+	/*
+	 * Set power switches.
+	 */
+	INTERNAL_STATE.collection_switches = local_config.data;
+	pw_pr_debug("\tCONFIG collection switches = %llu\n",
+		    INTERNAL_STATE.collection_switches);
+
+	INTERNAL_STATE.d_state_sample_interval =
+	    local_config.d_state_sample_interval;
+	OUTPUT(0,
+	       KERN_INFO "\tCONFIG D-state collection interval (msec) = %d\n",
+	       INTERNAL_STATE.d_state_sample_interval);
+
+	return SUCCESS;
+};
+
+/*
+ * Free up space allocated for MSR information.
+ */
+static void pw_deallocate_msr_info_i(struct pw_msr_addr **addrs)
+{
+	if (likely(*addrs)) {
+		pw_kfree(*addrs);
+		*addrs = NULL;
+	}
+};
+
+/*
+ * Set MSR addrs
+ */
+int pw_set_msr_addrs(struct pw_msr_info __user * remote_info, int size)
+{
+	int i = 0, retVal = SUCCESS;
+	struct pw_msr_info *local_info = NULL;
+	struct pw_msr_addr *msr_addrs = NULL;
+	int num_msrs = -1;
+	char *__buffer = NULL;
+
+	if (!remote_info) {
+		pw_pr_error("ERROR: NULL remote_info value?!\n");
+		return -ERROR;
+	}
+	/*
+	 * 'Size' includes space for the 'header' AND space for all of the individual 'msr_info' values.
+	 */
+	__buffer = pw_kmalloc(sizeof(char) * size, GFP_KERNEL);
+	if (!__buffer) {
+		pw_pr_error("ERROR allocating space for msr_addrs!\n");
+		return -ERROR;
+	}
+	memset(__buffer, 0, (sizeof(char) * size));
+
+	local_info = (pw_msr_info_t *) __buffer;
+
+	i = copy_from_user(local_info, remote_info, size);
+	if (i) {		/* "copy_from_user" returns number of bytes that COULD NOT be copied*/
+		pw_pr_error("ERROR copying msr_info data from userspace!\n");
+		pw_kfree(__buffer);
+		return i;
+	}
+	num_msrs = local_info->num_msr_addrs;
+	msr_addrs = (struct pw_msr_addr *)local_info->data;
+	pw_pr_debug("pw_set_msr_addrs: size = %d, # msrs = %d\n", size,
+		    num_msrs);
+	INTERNAL_STATE.num_msrs = num_msrs;
+	INTERNAL_STATE.msr_addrs =
+	    pw_kmalloc(sizeof(pw_msr_addr_t) * num_msrs, GFP_KERNEL);
+	if (unlikely(!INTERNAL_STATE.msr_addrs)) {
+		pw_pr_error("ERROR allocating msr_addr array");
+		retVal = -ERROR;
+		goto done;
+	}
+	memcpy(INTERNAL_STATE.msr_addrs, msr_addrs,
+	       sizeof(pw_msr_addr_t) * num_msrs);
+	for (i = 0; i < num_msrs; ++i) {
+		pw_pr_debug("MSR[%d] = 0x%x\n", i,
+			    INTERNAL_STATE.msr_addrs[i].addr);
+	}
+	/*
+	 * We also need to allocate space for the MSR sets populated by the "pw_read_msr_info_set_i()" function.
+	 */
+	{
+		int cpu = 0;
+		for_each_possible_cpu(cpu) {
+			pw_msr_info_set_t *info_set =
+			    pw_pcpu_msr_info_sets + cpu;
+			{
+				info_set->num_msrs = num_msrs;
+				info_set->prev_msr_vals =
+				    pw_kmalloc(sizeof(pw_msr_val_t) * num_msrs,
+					       GFP_KERNEL);
+				if (unlikely(!info_set->prev_msr_vals)) {
+					pw_pr_error
+					    ("ERROR allocating space for info_set->prev_msr_vals!\n");
+					pw_kfree(INTERNAL_STATE.msr_addrs);
+					retVal = -ERROR;
+					goto done;
+				}
+				info_set->curr_msr_count =
+				    pw_kmalloc(sizeof(pw_msr_val_t) * num_msrs,
+					       GFP_KERNEL);
+				if (unlikely(!info_set->curr_msr_count)) {
+					pw_pr_error
+					    ("ERROR allocating space for info_set->curr_msr_count!\n");
+					pw_kfree(INTERNAL_STATE.msr_addrs);
+					pw_kfree(info_set->prev_msr_vals);
+					info_set->prev_msr_vals = NULL;
+					retVal = -ERROR;
+					goto done;
+				}
+				info_set->c_multi_msg_mem =
+				    pw_kmalloc(sizeof(pw_msr_val_t) * num_msrs +
+					       C_MULTI_MSG_HEADER_SIZE(),
+					       GFP_KERNEL);
+				if (unlikely(!info_set->c_multi_msg_mem)) {
+					pw_pr_error
+					    ("ERROR allocating space for c_multi_msg scratch space!\n");
+					pw_kfree(INTERNAL_STATE.msr_addrs);
+					pw_kfree(info_set->prev_msr_vals);
+					pw_kfree(info_set->curr_msr_count);
+					info_set->prev_msr_vals = NULL;
+					info_set->curr_msr_count = NULL;
+					retVal = -ERROR;
+					goto done;
+				}
+				memset(info_set->prev_msr_vals, 0,
+				       sizeof(pw_msr_val_t) * num_msrs);
+				memset(info_set->curr_msr_count, 0,
+				       sizeof(pw_msr_val_t) * num_msrs);
+				memset(info_set->c_multi_msg_mem, 0,
+				       sizeof(pw_msr_val_t) * num_msrs +
+				       C_MULTI_MSG_HEADER_SIZE());
+				for (i = 0; i < num_msrs; ++i) {
+					info_set->prev_msr_vals[i].id =
+					    msr_addrs[i].id;
+				}
+				pw_pr_debug(KERN_INFO
+					    "[%d]: info_set = %p, prev_msr_vals = %p, curr_msr_count = %p\n",
+					    cpu, info_set,
+					    info_set->prev_msr_vals,
+					    info_set->curr_msr_count);
+			}
+		}
+	}
+done:
+	pw_kfree(__buffer);
+	return retVal;
+};
+
+/*
+ * Free up space required for S0iX addresses.
+ */
+static void pw_deallocate_platform_res_info_i(void)
+{
+	if (likely(INTERNAL_STATE.platform_res_addrs)) {
+		pw_kfree(INTERNAL_STATE.platform_res_addrs);
+		INTERNAL_STATE.platform_res_addrs = NULL;
+	}
+	/*
+	 * TODO
+	 * un-initialize as well?
+	 */
+	if (INTERNAL_STATE.platform_remapped_addrs) {
+		int i = 0;
+		for (i = 0; i < INTERNAL_STATE.num_addrs; ++i) {
+			if (INTERNAL_STATE.platform_remapped_addrs[i]) {
+				iounmap((volatile void *)(unsigned long)
+					INTERNAL_STATE.
+					platform_remapped_addrs[i]);
+				/* printk(KERN_INFO "OK: unmapped MMIO base addr: 0x%lx\n", INTERNAL_STATE.platform_remapped_addrs[i]);*/
+			}
+		}
+		pw_kfree(INTERNAL_STATE.platform_remapped_addrs);
+		INTERNAL_STATE.platform_remapped_addrs = NULL;
+	}
+	if (likely(INTERNAL_STATE.platform_residency_msg)) {
+		pw_kfree(INTERNAL_STATE.platform_residency_msg);
+		INTERNAL_STATE.platform_residency_msg = NULL;
+	}
+	if (likely(INTERNAL_STATE.init_platform_res_values)) {
+		pw_kfree(INTERNAL_STATE.init_platform_res_values);
+		INTERNAL_STATE.init_platform_res_values = NULL;
+	}
+	return;
+};
+
+/*
+ * Set S0iX method, addresses.
+ */
+#if 0
+int pw_set_platform_res_config_i(struct PWCollector_platform_res_info
+				 *remote_info, int size)
+{
+	struct PWCollector_platform_res_info *local_info;
+	char *__buffer = NULL;
+	int i = 0;
+	u64 *__addrs = NULL;
+
+	if (!remote_info) {
+		pw_pr_error("ERROR: NULL remote_info value?!\n");
+		return -ERROR;
+	}
+	printk(KERN_INFO "REMOTE_INFO = %p, size = %d\n", remote_info, size);
+	/*
+	 * 'Size' includes space for the 'header' AND space for all of the 64b IO addresses.
+	 */
+	__buffer = pw_kmalloc(sizeof(char) * size, GFP_KERNEL);
+	if (!__buffer) {
+		pw_pr_error
+		    ("ERROR allocating space for local platform_res_info!\n");
+		return -ERROR;
+	}
+	memset(__buffer, 0, (sizeof(char) * size));
+
+	local_info = (PWCollector_platform_res_info_t *) __buffer;
+	__addrs = (u64 *) local_info->addrs;
+
+	i = copy_from_user(local_info, remote_info, size);
+	if (i) {		/* "copy_from_user" returns number of bytes that COULD NOT be copied*/
+		pw_pr_error
+		    ("ERROR copying platform residency info data from userspace!\n");
+		pw_kfree(__buffer);
+		return i;
+	}
+	printk(KERN_INFO
+	       "OK: platform info collection type = %d, # addrs = %u\n",
+	       local_info->collection_type, local_info->num_addrs);
+	for (i = 0; i < local_info->num_addrs; ++i) {
+		printk(KERN_INFO "\t[%d] --> 0x%lx\n", i, __addrs[i]);
+	}
+	pw_kfree(__buffer);
+	return -ERROR;
+};
+#endif /* if 0*/
+int pw_set_platform_res_config_i(struct PWCollector_platform_res_info __user *
+				 remote_info, int size)
+{
+	struct PWCollector_platform_res_info local_info;
+	int i = 0;
+	u64 __user *__remote_addrs = NULL;
+	char *buffer = NULL;
+	/* const int counter_size_in_bytes = (int)INTERNAL_STATE.counter_size_in_bytes;*/
+
+	INTERNAL_STATE.init_platform_res_values =
+	    INTERNAL_STATE.platform_remapped_addrs =
+	    INTERNAL_STATE.platform_res_addrs = NULL;
+	INTERNAL_STATE.platform_residency_msg = NULL;
+
+	if (!remote_info) {
+		pw_pr_error("ERROR: NULL remote_info value?!\n");
+		return -ERROR;
+	}
+	__remote_addrs = (u64 *) remote_info->addrs;
+	pw_pr_debug("Remote address = %llx\n", __remote_addrs[0]);
+
+	i = copy_from_user(&local_info, remote_info, sizeof(local_info));
+	if (i) {		/* "copy_from_user" returns number of bytes that COULD NOT be copied*/
+		pw_pr_error
+		    ("ERROR copying platform residency info data from userspace!\n");
+		return i;
+	}
+	/* printk(KERN_INFO "OK: platform info collection type = %d, # addrs = %u\n", local_info.collection_type, local_info.num_addrs);*/
+
+	INTERNAL_STATE.ipc_start_command = local_info.ipc_start_command;
+	INTERNAL_STATE.ipc_start_sub_command = local_info.ipc_start_sub_command;
+	INTERNAL_STATE.ipc_stop_command = local_info.ipc_stop_command;
+	INTERNAL_STATE.ipc_stop_sub_command = local_info.ipc_stop_sub_command;
+	INTERNAL_STATE.ipc_dump_command = local_info.ipc_dump_command;
+	INTERNAL_STATE.ipc_dump_sub_command = local_info.ipc_dump_sub_command;
+
+	INTERNAL_STATE.num_addrs = local_info.num_addrs;
+	INTERNAL_STATE.collection_type = local_info.collection_type;
+	INTERNAL_STATE.counter_size_in_bytes = local_info.counter_size_in_bytes;
+
+	if (INTERNAL_STATE.num_addrs > 5) {
+		printk(KERN_INFO
+		       "ERROR: can only collect a max of 5 platform residency states, for now (%u requested)!\n",
+		       INTERNAL_STATE.num_addrs);
+		return -ERROR;
+	}
+	INTERNAL_STATE.platform_res_addrs =
+	    (u64 *) pw_kmalloc(sizeof(u64) * INTERNAL_STATE.num_addrs,
+			       GFP_KERNEL);
+	if (!INTERNAL_STATE.platform_res_addrs) {
+		printk(KERN_INFO "ERROR allocating space for local addrs!\n");
+		return -ERROR;
+	}
+	memset(INTERNAL_STATE.platform_res_addrs, 0,
+	       (sizeof(u64) * INTERNAL_STATE.num_addrs));
+
+	INTERNAL_STATE.platform_remapped_addrs =
+	    (u64 *) pw_kmalloc(sizeof(u64) * INTERNAL_STATE.num_addrs,
+			       GFP_KERNEL);
+	if (!INTERNAL_STATE.platform_remapped_addrs) {
+		printk(KERN_INFO "ERROR allocating space for local addrs!\n");
+		pw_kfree(INTERNAL_STATE.platform_res_addrs);
+		INTERNAL_STATE.platform_res_addrs = NULL;
+		return -ERROR;
+	}
+	memset(INTERNAL_STATE.platform_remapped_addrs, 0,
+	       (sizeof(u64) * INTERNAL_STATE.num_addrs));
+
+	INTERNAL_STATE.init_platform_res_values =
+	    (u64 *) pw_kmalloc(sizeof(u64) * INTERNAL_STATE.num_addrs,
+			       GFP_KERNEL);
+	if (!INTERNAL_STATE.init_platform_res_values) {
+		printk(KERN_INFO "ERROR allocating space for local addrs!\n");
+		pw_kfree(INTERNAL_STATE.platform_res_addrs);
+		pw_kfree(INTERNAL_STATE.platform_remapped_addrs);
+		INTERNAL_STATE.platform_res_addrs = NULL;
+		INTERNAL_STATE.platform_remapped_addrs = NULL;
+		return -ERROR;
+	}
+	memset(INTERNAL_STATE.init_platform_res_values, 0,
+	       (sizeof(u64) * INTERNAL_STATE.num_addrs));
+
+	buffer = (char *)pw_kmalloc(sizeof(s_res_msg_t) + (sizeof(u64) * (INTERNAL_STATE.num_addrs + 2)), GFP_KERNEL);	/* "+2" ==> S0i0, S3*/
+	if (!buffer) {
+		printk(KERN_INFO "ERROR allocating space for local addrs!\n");
+		pw_kfree(INTERNAL_STATE.platform_res_addrs);
+		pw_kfree(INTERNAL_STATE.platform_remapped_addrs);
+		pw_kfree(INTERNAL_STATE.init_platform_res_values);
+		INTERNAL_STATE.platform_res_addrs = NULL;
+		INTERNAL_STATE.platform_remapped_addrs = NULL;
+		INTERNAL_STATE.init_platform_res_values = NULL;
+		return -ERROR;
+	}
+	memset(buffer, 0,
+	       sizeof(char) * sizeof(s_res_msg_t) +
+	       (sizeof(u64) * (INTERNAL_STATE.num_addrs + 2)));
+	/* "+2" ==> S0i0, S3*/
+	/* INTERNAL_STATE.platform_residency_msg = (s_res_msg_t *)pw_kmalloc(sizeof(s_res_msg_t) + (sizeof(u64) * (INTERNAL_STATE.num_addrs+2)), GFP_KERNEL); */
+	INTERNAL_STATE.platform_residency_msg = (s_res_msg_t *) & buffer[0];
+	INTERNAL_STATE.platform_residency_msg->residencies =
+	    (u64 *) & buffer[sizeof(s_res_msg_t)];
+
+	i = copy_from_user(INTERNAL_STATE.platform_res_addrs, __remote_addrs,
+			   (sizeof(u64) * INTERNAL_STATE.num_addrs));
+	if (i) {		/* "copy_from_user" returns number of bytes that COULD NOT be copied*/
+		pw_pr_error
+		    ("ERROR copying platform residency info data from userspace!\n");
+		pw_deallocate_platform_res_info_i();
+		return i;
+	}
+#if 0
+	printk(KERN_INFO "%llx\n", INTERNAL_STATE.platform_res_addrs[0]);
+	for (i = 0; i < INTERNAL_STATE.num_addrs; ++i) {
+		printk(KERN_INFO "\t[%d] --> 0x%lx\n", i,
+		       INTERNAL_STATE.platform_res_addrs[i]);
+	}
+#endif /* if 0*/
+	switch (INTERNAL_STATE.collection_type) {
+	case PW_IO_IPC:
+	case PW_IO_MMIO:	/* fall-through*/
+#if 1
+		for (i = 0; i < INTERNAL_STATE.num_addrs; ++i) {
+			/* INTERNAL_STATE.platform_remapped_addrs[i] = ioremap_nocache(INTERNAL_STATE.platform_res_addrs[i], sizeof(u32) * 1);*/
+			/* INTERNAL_STATE.platform_remapped_addrs[i] = (u64)ioremap_nocache(INTERNAL_STATE.platform_res_addrs[i], sizeof(u64) * 1);*/
+			/* INTERNAL_STATE.platform_remapped_addrs[i] = (u64)ioremap_nocache(INTERNAL_STATE.platform_res_addrs[i], sizeof(u32) * 1);*/
+			INTERNAL_STATE.platform_remapped_addrs[i] =
+			    (u64) (unsigned long)ioremap_nocache((unsigned long)
+								 INTERNAL_STATE.
+								 platform_res_addrs
+								 [i],
+								 (unsigned
+								  long)
+								 (INTERNAL_STATE.
+								  counter_size_in_bytes
+								  * 1));
+			if ((void *)(unsigned long)INTERNAL_STATE.
+			    platform_remapped_addrs[i] == NULL) {
+				printk(KERN_INFO
+				       "ERROR remapping MMIO addresses %p\n",
+				       (void *)(unsigned long)INTERNAL_STATE.
+				       platform_res_addrs[i]);
+				pw_deallocate_platform_res_info_i();
+				return -ERROR;
+			}
+			pw_pr_debug("OK: mapped address %llu to %llu!\n",
+				    INTERNAL_STATE.platform_res_addrs[i],
+				    INTERNAL_STATE.platform_remapped_addrs[i]);
+		}
+#else /* if 1*/
+		INTERNAL_STATE.platform_remapped_addrs[0] =
+		    ioremap_nocache(INTERNAL_STATE.platform_res_addrs[0],
+				    sizeof(unsigned long) *
+				    (INTERNAL_STATE.num_addrs * 2));
+#endif /* if 0*/
+		break;
+	default:
+		printk(KERN_INFO
+		       "ERROR: unsupported platform residency collection type: %u!\n",
+		       INTERNAL_STATE.collection_type);
+		pw_deallocate_platform_res_info_i();
+		return -ERROR;
+	}
+	return SUCCESS;
+};
+
+int check_platform(struct PWCollector_check_platform *remote_check, int size)
+{
+	struct PWCollector_check_platform *local_check;
+	const char *unsupported = "UNSUPPORTED_T1, UNSUPPORTED_T2";	/* for debugging ONLY*/
+	int len = strlen(unsupported);
+	int max_size = sizeof(struct PWCollector_check_platform);
+	int retVal = SUCCESS;
+
+	if (!remote_check) {
+		pw_pr_error("ERROR: NULL remote_check value?!\n");
+		return -ERROR;
+	}
+
+	local_check = pw_kmalloc(max_size, GFP_KERNEL);
+
+	if (!local_check) {
+		pw_pr_error
+		    ("ERROR: could NOT allocate memory in check_platform!\n");
+		return -ERROR;
+	}
+
+	memset(local_check, 0, max_size);
+
+	/*
+	 * Populate "local_check.unsupported_tracepoints" with a (comma-separated)
+	 * list of unsupported tracepoints. For now, we just leave this
+	 * blank, reflecting the fact that, on our development systems,
+	 * every tracepoints is supported.
+	 *
+	 * Update: for debugging, write random data here.
+	 */
+	memcpy(local_check->unsupported_tracepoints, unsupported, len);
+	/*
+	 * UPDATE: we're borrowing one of the 'reserved' 64bit values
+	 * to document the following:
+	 * (1) Kernel call stacks supported?
+	 * (2) Kernel compiled with CONFIG_TIMER_STATS?
+	 * (3) Wakelocks supported?
+	 */
+#ifdef CONFIG_FRAME_POINTER
+	local_check->supported_kernel_features |=
+	    PW_KERNEL_SUPPORTS_CALL_STACKS;
+#endif
+#ifdef CONFIG_TIMER_STATS
+	local_check->supported_kernel_features |=
+	    PW_KERNEL_SUPPORTS_CONFIG_TIMER_STATS;
+#endif
+#if DO_WAKELOCK_SAMPLE
+	local_check->supported_kernel_features |=
+	    PW_KERNEL_SUPPORTS_WAKELOCK_PATCH;
+#endif
+	/*
+	 * Also update information on the underlying CPU:
+	 * (1) Was the 'ANY-THREAD' bit set?
+	 * (2) Was 'Auto-Demote' enabled?
+	 */
+	if (pw_is_any_thread_set) {
+		local_check->supported_arch_features |= PW_ARCH_ANY_THREAD_SET;
+	}
+	if (pw_is_auto_demote_enabled) {
+		local_check->supported_arch_features |=
+		    PW_ARCH_AUTO_DEMOTE_ENABLED;
+	}
+
+	/*
+	 * Copy everything back to user address space.
+	 */
+	if ((retVal = copy_to_user(remote_check, local_check, size)))	/* returns number of bytes that COULD NOT be copied*/
+		retVal = -ERROR;
+
+	pw_kfree(local_check);
+	return retVal;		/* all unsupported tracepoints documented*/
+};
+
+/*
+ * Return the TURBO frequency threshold
+ * for this CPU.
+ */
+int get_turbo_threshold(struct PWCollector_turbo_threshold *remote_thresh,
+			int size)
+{
+	struct PWCollector_turbo_threshold local_thresh;
+
+	if (!remote_thresh) {
+		pw_pr_error("ERROR: NULL remote_thresh value?!\n");
+		return -ERROR;
+	}
+
+	if (!base_operating_freq_khz) {
+		pw_pr_error
+		    ("ERROR: retrieving turbo threshold without specifying base operating freq?!\n");
+		return -ERROR;
+	}
+
+	local_thresh.threshold_frequency = base_operating_freq_khz;
+
+	if (copy_to_user(remote_thresh, &local_thresh, size))	/* returns number of bytes that could NOT be copied.*/
+		return -ERROR;
+
+	return SUCCESS;
+};
+
+/*
+ * Retrieve device driver version
+ */
+int get_version(struct PWCollector_version_info *remote_version, int size)
+{
+	struct PWCollector_version_info local_version;
+
+	if (!remote_version) {
+		pw_pr_error("ERROR: NULL remote_version value?!\n");
+		return -ERROR;
+	}
+
+	local_version.version = PW_DRV_VERSION_MAJOR;
+	local_version.inter = PW_DRV_VERSION_MINOR;
+	local_version.other = PW_DRV_VERSION_OTHER;
+
+	/*
+	 * Copy everything back to user address space.
+	 */
+	return copy_to_user(remote_version, &local_version, size);	/* returns number of bytes that could NOT be copiled*/
+};
+
+/*
+ * Retrieve microcode patch version.
+ * Only useful for MFLD
+ */
+int get_micro_patch_ver(int *remote_ver, int size)
+{
+	int local_ver = micro_patch_ver;
+
+	if (!remote_ver) {
+		pw_pr_error("ERROR: NULL remote_ver value?!\n");
+		return -ERROR;
+	}
+
+	/*
+	 * Copy everything back to user address space.
+	 */
+	return copy_to_user(remote_ver, &local_ver, size);	/* returns number of bytes that could NOT be copiled*/
+};
+
+int get_status(struct PWCollector_status *remote_status, int size)
+{
+	struct PWCollector_status local_status;
+	int cpu, retVal = SUCCESS;
+	stats_t *pstats = NULL;
+	unsigned long statusJIFF, elapsedJIFF = 0;
+
+	if (!remote_status) {
+		pw_pr_error("ERROR: NULL remote_status value?!\n");
+		return -ERROR;
+	}
+
+	memset(&local_status, 0, sizeof(local_status));
+
+	/*
+	 * Set # cpus.
+	 */
+	/* local_status.num_cpus = pw_max_num_cpus;*/
+	local_status.num_cpus = num_online_cpus();
+
+	/*
+	 * Set total collection time elapsed.
+	 */
+	{
+		statusJIFF = jiffies;
+		if (statusJIFF < INTERNAL_STATE.collectionStartJIFF) {
+			OUTPUT(0,
+			       KERN_INFO
+			       "WARNING: jiffies counter has WRAPPED AROUND!\n");
+			elapsedJIFF = 0;	/* avoid messy NAN when dividing*/
+		} else {
+			/* elapsedJIFF = statusJIFF - startJIFF;*/
+			elapsedJIFF =
+			    statusJIFF - INTERNAL_STATE.collectionStartJIFF;
+		}
+		OUTPUT(0, KERN_INFO "start = %lu, stop = %lu, elapsed = %lu\n",
+		       INTERNAL_STATE.collectionStartJIFF, statusJIFF,
+		       elapsedJIFF);
+	}
+	local_status.time = jiffies_to_msecs(elapsedJIFF);
+
+	/*
+	 * Set # c-breaks etc.
+	 * Note: aggregated over ALL cpus,
+	 * per spec document.
+	 */
+	for_each_online_cpu(cpu) {
+		pstats = &per_cpu(per_cpu_stats, cpu);
+		local_status.c_breaks += local_read(&pstats->c_breaks);
+		local_status.timer_c_breaks +=
+		    local_read(&pstats->timer_c_breaks);
+		local_status.inters_c_breaks +=
+		    local_read(&pstats->inters_c_breaks);
+		local_status.p_trans += local_read(&pstats->p_trans);
+		local_status.num_inters += local_read(&pstats->num_inters);
+		local_status.num_timers += local_read(&pstats->num_timers);
+	}
+
+	/*
+	 * Now copy everything to user-space.
+	 */
+	retVal = copy_to_user(remote_status, &local_status, sizeof(local_status));	/* returns number of bytes that COULD NOT be copied*/
+
+	return retVal;
+};
+
+/*
+ * Reset all statistics collected so far.
+ * Called from a non-running collection context.
+ */
+static inline void reset_statistics(void)
+{
+	int cpu;
+	stats_t *pstats = NULL;
+
+	/*
+	 * Note: no need to lock, since we're only
+	 * going to be called from a non-running
+	 * collection, and tracepoints are inserted
+	 * (just) before a collection starts, and removed
+	 * (just) after a collection ends.
+	 */
+	for_each_online_cpu(cpu) {
+		/*
+		 * Reset the per cpu stats
+		 */
+		{
+			pstats = &per_cpu(per_cpu_stats, cpu);
+			local_set(&pstats->c_breaks, 0);
+			local_set(&pstats->timer_c_breaks, 0);
+			local_set(&pstats->inters_c_breaks, 0);
+			local_set(&pstats->p_trans, 0);
+			local_set(&pstats->num_inters, 0);
+			local_set(&pstats->num_timers, 0);
+		}
+	}
+};
+
+/*
+ * Reset the (PER-CPU) structs containing
+ * MSR residency information (amongst
+ * other fields).
+ */
+void reset_per_cpu_msr_residencies(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		/*
+		 * Reset the per-cpu residencies
+		 */
+		*(&per_cpu(prev_c6_val, cpu)) = 0;
+		/*
+		 * Reset the "first-hit" variable.
+		 */
+		/* local_set(&per_cpu(is_first_event, cpu), 1);*/
+		per_cpu(wakeup_event_counter, cpu).event_tsc = 0;
+		/*
+		 * Reset the 'init_msr_sent' variable.
+		 */
+		memset((&per_cpu(pw_pcpu_msr_sets, cpu)), 0, sizeof(msr_set_t));
+		/*
+		 * Reset the MSR info sets.
+		 */
+		{
+			pw_msr_info_set_t *info_set =
+			    pw_pcpu_msr_info_sets + cpu;
+			if (likely(info_set->prev_msr_vals)) {
+				int i = 0;
+				for (i = 0; i < info_set->num_msrs; ++i) {
+					info_set->prev_msr_vals[i].val = 0x0;
+				}
+			}
+			if (likely(info_set->curr_msr_count)) {
+				memset(info_set->curr_msr_count, 0,
+				       sizeof(pw_msr_val_t) *
+				       info_set->num_msrs);
+			}
+			info_set->init_msr_set_sent = 0;
+		}
+		/*
+		 * Reset stats on # samples produced and # dropped.
+		 */
+		local_set(&per_cpu(num_samples_produced, cpu), 0);
+		local_set(&per_cpu(num_samples_dropped, cpu), 0);
+	}
+	/*
+	 * Reset the TPS atomic count value.
+	 */
+#if DO_TPS_EPOCH_COUNTER
+	atomic_set(&tps_epoch, 0);
+#endif
+	/*
+	 * Ensure updates are propagated.
+	 */
+	smp_mb();
+};
+
+static void reset_trace_sent_fields(void)
+{
+	struct tnode *node = NULL;
+	struct hlist_node *curr = NULL;
+	int i = 0;
+
+	for (i = 0; i < NUM_MAP_BUCKETS; ++i) {
+		PW_HLIST_FOR_EACH_ENTRY(node, curr, &timer_map[i].head, list) {
+			node->trace_sent = 0;
+		}
+	}
+};
+
+/*
+ * Run the generation of current p-state sample
+ * for all cpus in parallel to avoid the delay
+ * due to a serial execution.
+ */
+#if DO_GENERATE_CURRENT_FREQ_IN_PARALLEL
+static void generate_cpu_frequency_per_cpu(int cpu, bool is_start)
+{
+	u64 tsc = 0, aperf = 0, mperf = 0;
+	u32 perf_status = 0;
+	u8 is_boundary = (is_start) ? 1 : 2;	/* "0" ==> NOT boundary, "1" ==> START boundary, "2" ==> STOP boundary*/
+	u32 prev_req_freq = 0;
+
+#ifndef __arm__
+	u32 l = 0, h = 0;
+	{
+		tscval(&tsc);
+	}
+
+	/*
+	 * Read the IA32_PERF_STATUS MSR. We delegate the actual frequency computation to Ring-3 because
+	 * the PERF_STATUS encoding is actually model-specific.
+	 */
+	{
+		WUWATCH_RDMSR(IA32_PERF_STATUS_MSR_ADDR, l, h);
+		perf_status = l;	/* We're only interested in the lower 16 bits!*/
+		h = 0;
+	}
+
+	/*
+	 * Retrieve the previous requested frequency.
+	 */
+	if (is_start == false) {
+		prev_req_freq = per_cpu(pcpu_prev_req_freq, cpu);
+	} else {
+		/*
+		 * Collection START: make sure we reset the requested frequency!
+		 */
+		per_cpu(pcpu_prev_req_freq, cpu) = 0;
+	}
+	per_cpu(pcpu_prev_perf_status_val, cpu) = perf_status;
+	/*
+	 * Also read CPU_CLK_UNHALTED.REF and CPU_CLK_UNHALTED.CORE. These required ONLY for AXE import
+	 * backward compatibility!
+	 */
+	{
+		WUWATCH_RDMSRL(CORE_CYCLES_MSR_ADDR, aperf);
+
+		WUWATCH_RDMSRL(REF_CYCLES_MSR_ADDR, mperf);
+	}
+
+#else /* __arm__*/
+	msr_set_t *set = NULL;
+	set = &get_cpu_var(pw_pcpu_msr_sets);
+
+	tscval(&tsc);
+	aperf = set->prev_msr_vals[APERF];
+	mperf = set->prev_msr_vals[MPERF];
+	put_cpu_var(pw_pcpu_msr_sets);
+
+	perf_status =
+	    cpufreq_quick_get(cpu) / INTERNAL_STATE.bus_clock_freq_khz;
+
+	/*
+	 * Retrieve the previous requested frequency.
+	 */
+	/* TODO CAN WE MERGE THIS CODE WITH ifndef __arm__ code above and put it*/
+	/* after this code?*/
+	if (is_start == false) {
+		prev_req_freq = per_cpu(pcpu_prev_req_freq, cpu);
+	}
+#endif /* ifndef __arm__*/
+
+	produce_p_sample(cpu, tsc, prev_req_freq, perf_status, is_boundary,
+			 aperf, mperf);
+
+};
+#else /* DO_GENERATE_CURRENT_FREQ_IN_PARALLEL*/
+static void generate_cpu_frequency_per_cpu(int cpu, bool is_start)
+{
+	u64 tsc = 0, aperf = 0, mperf = 0;
+	u32 perf_status = 0;
+	u8 is_boundary = (is_start) ? 1 : 2;	/* "0" ==> NOT boundary, "1" ==> START boundary, "2" ==> STOP boundary*/
+	u32 prev_req_freq = 0;
+
+#ifndef __arm__
+	u32 l = 0, h = 0;
+	{
+		int ret = WUWATCH_RDMSR_SAFE_ON_CPU(cpu, 0x10, &l, &h);
+		if (ret) {
+			OUTPUT(0,
+			       KERN_INFO
+			       "WARNING: WUWATCH_RDMSR_SAFE_ON_CPU of TSC failed with code %d\n",
+			       ret);
+		}
+		tsc = h;
+		tsc <<= 32;
+		tsc += l;
+	}
+
+	/*
+	 * Read the IA32_PERF_STATUS MSR. We delegate the actual frequency computation to Ring-3 because
+	 * the PERF_STATUS encoding is actually model-specific.
+	 */
+	{
+		WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+			(cpu, IA32_PERF_STATUS_MSR_ADDR, &l, &h));
+		perf_status = l;	/* We're only interested in the lower 16 bits!*/
+	}
+
+	/*
+	 * Retrieve the previous requested frequency.
+	 */
+	if (is_start == false) {
+		prev_req_freq = per_cpu(pcpu_prev_req_freq, cpu);
+	} else {
+		/*
+		 * Collection START: make sure we reset the requested frequency!
+		 */
+		per_cpu(pcpu_prev_req_freq, cpu) = 0;
+	}
+	per_cpu(pcpu_prev_perf_status_val, cpu) = perf_status;
+	/*
+	 * Also read CPU_CLK_UNHALTED.REF and CPU_CLK_UNHALTED.CORE. These required ONLY for AXE import
+	 * backward compatibility!
+	 */
+#if 1
+	{
+		WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+			(cpu, CORE_CYCLES_MSR_ADDR, &l, &h));
+		aperf = (u64) h << 32 | (u64) l;
+
+		WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+			(cpu, REF_CYCLES_MSR_ADDR, &l, &h));
+		mperf = (u64) h << 32 | (u64) l;
+	}
+#endif
+
+#else
+	msr_set_t *set = NULL;
+	set = &get_cpu_var(pw_pcpu_msr_sets);
+
+	tscval(&tsc);
+	aperf = set->prev_msr_vals[APERF];
+	mperf = set->prev_msr_vals[MPERF];
+	put_cpu_var(pw_pcpu_msr_sets);
+
+	perf_status =
+	    cpufreq_quick_get(cpu) / INTERNAL_STATE.bus_clock_freq_khz;
+
+	/*
+	 * Retrieve the previous requested frequency.
+	 */
+	/* TODO CAN WE MERGE THIS CODE WITH ifndef __arm__ code above and put it*/
+	/* after this code?*/
+	if (is_start == false) {
+		prev_req_freq = per_cpu(pcpu_prev_req_freq, cpu);
+	}
+#endif /* ifndef __arm__*/
+
+	produce_p_sample(cpu, tsc, prev_req_freq, perf_status, is_boundary,
+			 aperf, mperf);
+
+};
+#endif /* DO_GENERATE_CURRENT_FREQ_IN_PARALLEL*/
+
+static void generate_cpu_frequency(void *start)
+{
+	int cpu = raw_smp_processor_id();
+	bool is_start = *((bool *) start);
+
+	generate_cpu_frequency_per_cpu(cpu, is_start);
+}
+
+/*
+ * Measure current CPU operating
+ * frequency, and push 'P-samples'
+ * onto the (per-cpu) O/P buffers.
+ * Also determine the various
+ * discrete frequencies the processor
+ * is allowed to execute at (basically
+ * the various frequencies present
+ * in the 'scaling_available_frequencies'
+ * sysfs file).
+ *
+ * REQUIRES CPUFREQ DRIVER!!!
+ */
+static void get_current_cpu_frequency(bool is_start)
+{
+
+#if DO_GENERATE_CURRENT_FREQ_IN_PARALLEL
+	SMP_CALL_FUNCTION(&generate_cpu_frequency, (void *)&is_start, 0, 1);
+	/* smp_call_function is executed for all other CPUs except itself.*/
+	/* So, run it for itself.*/
+	generate_cpu_frequency((void *)&is_start);
+#else
+	int cpu = 0;
+	for_each_online_cpu(cpu) {
+		generate_cpu_frequency_per_cpu(cpu, is_start);
+	}
+#endif
+};
+
+static void generate_end_tps_sample_per_cpu(void *tsc)
+{
+	if (IS_C_STATE_MODE()) {
+		/* tps(0 , MPERF , true );*/
+		bdry_tps();
+	} else {
+		tps_lite(true /* boundary */ );
+	}
+	return;
+};
+
+static void generate_end_tps_samples(void)
+{
+	u64 tsc = 0;
+
+	tscval(&tsc);
+
+	SMP_CALL_FUNCTION(&generate_end_tps_sample_per_cpu, (void *)&tsc, 0, 1);
+	generate_end_tps_sample_per_cpu((void *)&tsc);
+};
+
+/*
+ * START/RESUME a collection.
+ *
+ * (a) (For START ONLY): ZERO out all (per-cpu) O/P buffers.
+ * (b) Reset all statistics.
+ * (c) Register all tracepoints.
+ */
+int start_collection(PWCollector_cmd_t cmd)
+{
+	switch (cmd) {
+	case PW_START:
+		/*
+		 * Reset the O/P buffers.
+		 *
+		 * START ONLY
+		 */
+		pw_reset_per_cpu_buffers();
+		/*
+		 * Reset the 'trace_sent' fields
+		 * for all trace entries -- this
+		 * ensures we send backtraces
+		 * once per collection, as
+		 * opposed to once per 'insmod'.
+		 *
+		 * START ONLY
+		 */
+		{
+			reset_trace_sent_fields();
+		}
+
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+		/*
+		 * Reset the list of vars required
+		 * to transfer IRQ # <-> Name info.
+		 * UPDATE: the 'irq_map' should contain
+		 * mappings for only those
+		 * devices that actually caused C-state
+		 * wakeups DURING THE CURRENT COLLECTION.
+		 * We therefore reset the map before
+		 * every collection (this also auto resets
+		 * the "irq_mappings_list" data structure).
+		 *
+		 * START ONLY
+		 */
+		{
+			destroy_irq_map();
+			if (init_irq_map()) {
+				/* ERROR*/
+				pw_pr_error
+				    ("ERROR: could NOT initialize irq map in start_collection!\n");
+				return -ERROR;
+			}
+		}
+#endif
+
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+		{
+			destroy_wlock_map();
+			if (init_wlock_map()) {
+				pw_pr_error
+				    ("ERROR: could NOT initialize wlock map in start_collection1\n");
+				return -ERROR;
+			}
+		}
+#endif
+		/*
+		 * Reset collection stats
+		 *
+		 * START ONLY
+		 */
+#if DO_IOCTL_STATS
+		{
+			reset_statistics();
+		}
+#endif
+
+	case PW_RESUME:	/* fall through*/
+		break;
+	default:		/* should *NEVER* happen!*/
+		printk(KERN_ERR "Error: invalid cmd=%d in start collection!\n",
+		       cmd);
+		return -ERROR;
+	}
+	/*
+	 * Reset the (per-cpu) "per_cpu_t" structs that hold MSR residencies
+	 *
+	 * START + RESUME
+	 */
+	{
+		reset_per_cpu_msr_residencies();
+	}
+
+	/*
+	 * Get START P-state samples.
+	 *
+	 * UPDATE: do this ONLY IF
+	 * USER SPECIFIES FREQ-mode!
+	 *
+	 * START + RESUME???
+	 */
+	if (likely(IS_FREQ_MODE())) {
+		get_current_cpu_frequency(true);	/* "true" ==> collection START*/
+	}
+	/*
+	 * Get START C-state samples.
+	 */
+	if (likely(IS_SLEEP_MODE() || IS_C_STATE_MODE())) {
+		generate_end_tps_samples();
+	}
+
+	/*
+	 * Take a snapshot of the TSC on collection start -- required for ACPI S3 support.
+	 */
+	tscval(&pw_collection_start_tsc);
+
+	INTERNAL_STATE.collectionStartJIFF = jiffies;
+	INTERNAL_STATE.write_to_buffers = true;
+
+	/*
+	 * OK, all setup completed. Now
+	 * register the tracepoints.
+	 */
+	switch (cmd) {
+	case PW_START:
+		register_pausable_probes();
+	case PW_RESUME:	/* fall through*/
+		register_non_pausable_probes();
+		break;
+	default:		/* should *NEVER* happen!*/
+		printk(KERN_ERR "Error: invalid cmd=%d in start collection!\n",
+		       cmd);
+		return -ERROR;
+	}
+
+#if DO_S_RESIDENCY_SAMPLE
+	/*struct timeval cur_time;*/
+	if (IS_S_RESIDENCY_MODE()) {
+		startTSC_s_residency = 0;
+		produce_boundary_s_residency_msg_i(true);	/* "true" ==> BEGIN boundary*/
+		/* startJIFF_s_residency = CURRENT_TIME_IN_USEC();*/
+	}
+#endif /* DO_S_RESIDENCY_SAMPLE*/
+
+#if DO_ACPI_S3_SAMPLE
+	/*struct timeval cur_time;*/
+#if 0
+	if (pw_is_slm && IS_ACPI_S3_MODE()) {
+		/*
+		 * Ensure we reset the ACPI S3 'start' TSC counter.
+		 */
+		startTSC_acpi_s3 = 0x0;
+		produce_acpi_s3_sample(false);
+	}
+#endif /* if 0*/
+#endif
+
+	return SUCCESS;
+};
+
+/*
+ * STOP/PAUSE/CANCEL a (running) collection.
+ *
+ * (a) Unregister all tracepoints.
+ * (b) Reset all stats.
+ * (c) Wake any process waiting for full buffers.
+ */
+int stop_collection(PWCollector_cmd_t cmd)
+{
+	/*
+	 * Reset the power collector task.
+	 */
+	pw_power_collector_task = NULL;
+	/*
+	 * Reset the collection start TSC.
+	 */
+	pw_collection_start_tsc = 0;
+	/*
+	 * Reset the collection time.
+	 */
+	pw_collection_time_ticks = 0;
+	/*
+	 * Was the ACPI S3 hrtimer active? If so, cancel it.
+	 */
+	if (hrtimer_active(&pw_acpi_s3_hrtimer)) {
+		printk(KERN_INFO
+		       "WARNING: active ACPI S3 timer -- trying to cancel!\n");
+		hrtimer_try_to_cancel(&pw_acpi_s3_hrtimer);
+	}
+#if DO_S_RESIDENCY_SAMPLE
+	if (IS_S_RESIDENCY_MODE()) {
+		produce_boundary_s_residency_msg_i(false);	/* "false" ==> NOT begin boundary*/
+		startTSC_s_residency = 0;	/* redundant!*/
+	}
+#endif /* DO_S_RESIDENCY_SAMPLE*/
+
+#if DO_ACPI_S3_SAMPLE
+#if 0
+	if (pw_is_slm && IS_ACPI_S3_MODE()) {
+		produce_acpi_s3_sample(false);
+	}
+#endif /* if 0*/
+#endif
+
+	INTERNAL_STATE.collectionStopJIFF = jiffies;
+	INTERNAL_STATE.write_to_buffers = false;
+	{
+		if (true && cmd == PW_PAUSE) {
+			u64 tmp_tsc = 0;
+			tscval(&tmp_tsc);
+			OUTPUT(0, KERN_INFO "RECEIVED PAUSE at tsc = %llu\n",
+			       tmp_tsc);
+		}
+	}
+
+	{
+		unregister_non_pausable_probes();
+	}
+
+	if (cmd == PW_STOP || cmd == PW_CANCEL) {
+		unregister_pausable_probes();
+		/*
+		 * Get STOP P-state samples
+		 */
+		if (likely(IS_FREQ_MODE())) {
+			get_current_cpu_frequency(false);	/* "false" ==> collection STOP*/
+		}
+		/*
+		 * Get STOP C-state samples.
+		 */
+		if (likely(IS_SLEEP_MODE() || IS_C_STATE_MODE())) {
+			generate_end_tps_samples();
+		}
+		/*
+		 * Gather some stats on # of samples produced and dropped.
+		 */
+		{
+			pw_count_samples_produced_dropped();
+		}
+	}
+
+	/* Reset the (per-cpu) "per_cpu_t" structs that hold MSR residencies*/
+	{
+		reset_per_cpu_msr_residencies();
+	}
+
+	/*
+	 * Reset IOCTL stats
+	 *
+	 * STOP/CANCEL ONLY
+	 */
+#if DO_IOCTL_STATS
+	if (cmd == PW_STOP || cmd == PW_CANCEL) {
+		reset_statistics();
+	}
+#endif
+
+	/*
+	 * Tell consumers to 'flush' all buffers. We need to
+	 * defer this as long as possible because it needs to be
+	 * close to the 'wake_up_interruptible', below.
+	 */
+	{
+		INTERNAL_STATE.drain_buffers = true;
+		smp_mb();
+	}
+
+	/*
+	 * There might be a reader thread blocked on a read: wake
+	 * it up to give it a chance to respond to changed
+	 * conditions.
+	 */
+	{
+		wake_up_interruptible(&pw_reader_queue);
+	}
+
+	/*
+	 * Delete all non-kernel timers.
+	 * Also delete the wakelock map.
+	 *
+	 * STOP/CANCEL ONLY
+	 */
+	if (cmd == PW_STOP || cmd == PW_CANCEL) {
+		delete_all_non_kernel_timers();
+#if DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES
+		destroy_wlock_map();
+#endif /* DO_USE_CONSTANT_POOL_FOR_WAKELOCK_NAMES*/
+		pw_pr_debug("Debug: deallocating on a stop/cancel!\n");
+		pw_deallocate_msr_info_i(&INTERNAL_STATE.msr_addrs);
+		/* pw_deallocate_platform_res_info_i(&INTERNAL_STATE.platform_res_addrs);*/
+		pw_deallocate_platform_res_info_i();
+		pw_reset_msr_info_sets();
+	}
+
+	OUTPUT(0, KERN_INFO "\tUNREGISTERED all probes!\n");
+	return SUCCESS;
+};
+
+long handle_cmd(PWCollector_cmd_t cmd)
+{
+	PWCollector_cmd_t prev_cmd;
+	/*
+	 * Sanity check cmd range.
+	 */
+	if (cmd < PW_START || cmd > PW_MARK) {
+		pw_pr_error("Error: UNSUPPORTED cmd=%d\n", cmd);
+		return -ERROR;
+	}
+	/*
+	 * Check to see if there are any invalid
+	 * command combinations (e.g. START -> START etc.)
+	 */
+	if (!is_cmd_valid(cmd)) {
+		pw_pr_error("Error: INVALID requested cmd=%d, CURRENT cmd=%d\n",
+			    cmd, INTERNAL_STATE.cmd);
+		return -ERROR;
+	}
+	/*
+	 * OK, we've gotten a valid command.
+	 * Store it.
+	 */
+	prev_cmd = INTERNAL_STATE.cmd;
+	INTERNAL_STATE.cmd = cmd;
+	/*
+	 * Actions based on specific commands here...
+	 */
+	switch (cmd) {
+	case PW_START:
+	case PW_RESUME:
+		INTERNAL_STATE.drain_buffers = false;
+		/* startJIFF = jiffies;*/
+		{
+			if (start_collection(cmd))
+				return -ERROR;
+		}
+		break;
+	case PW_STOP:
+		/* INTERNAL_STATE.drain_buffers = true;*/
+	case PW_PAUSE:
+	case PW_CANCEL:
+		/* stopJIFF = jiffies;*/
+		{
+			stop_collection(cmd);
+		}
+		break;
+	default:
+		pw_pr_error("Error: UNSUPPORTED cmd=%d\n", cmd);
+		/*
+		 * Reset "cmd" state to what it was before
+		 * this ioctl.
+		 */
+		INTERNAL_STATE.cmd = prev_cmd;
+		return -ERROR;
+	}
+	OUTPUT(3,
+	       KERN_INFO
+	       "Debug: Successfully switched mode from %d to %d: IS_COLLECTING = %d\n",
+	       prev_cmd, cmd, IS_COLLECTING());
+	return SUCCESS;
+};
+
+long do_cmd(PWCollector_cmd_t cmd, u64 __user * remote_output_args, int size)
+{
+	int retVal = SUCCESS;
+
+	if (!remote_output_args) {
+		pw_pr_error("ERROR: NULL remote_output_args value?!\n");
+		return -ERROR;
+	}
+	/*
+	 * Handle the command itself.
+	 */
+	if (handle_cmd(cmd)) {
+		return -ERROR;
+	}
+	/*
+	 * Then check if the user requested some collection stats.
+	 */
+#if DO_COUNT_DROPPED_SAMPLES
+	if (cmd == PW_STOP || cmd == PW_CANCEL) {
+		/* u64 local_args[2] = {total_num_samples_produced, total_num_samples_dropped};*/
+		u64 local_args[2] =
+		    { pw_num_samples_produced, pw_num_samples_dropped };
+		/* for debugging!*/
+		/* u64 local_args[2] = {100, 10}; */
+		if (copy_to_user(remote_output_args, local_args, size))	/* returns number of bytes that could NOT be copied*/
+			retVal = -ERROR;
+	}
+#endif /* DO_COUNT_DROPPED_SAMPLES*/
+
+	return retVal;
+};
+
+/*
+ * Callback from Power Manager SUSPEND/RESUME events. Useful if the device was suspended
+ * (i.e. entered ACPI 'S3') during the collection.
+ */
+int pw_alrm_suspend_notifier_callback_i(struct notifier_block *block,
+					unsigned long state, void *dummy)
+{
+	u64 tsc_suspend_time_ticks = 0;
+	u64 suspend_time_ticks = 0;
+	/* u64 usec = 0;*/
+	u64 suspend_time_usecs = 0;
+	u64 base_operating_freq_mhz = base_operating_freq_khz / 1000;
+
+	if (!pw_is_slm) {
+		return NOTIFY_DONE;
+	}
+	switch (state) {
+	case PM_SUSPEND_PREPARE:
+		/*
+		 * Entering SUSPEND.
+		 */
+		tscval(&pw_suspend_start_tsc);
+		printk(KERN_INFO "pw: SUSPEND PREPARE: tsc = %llu\n",
+		       pw_suspend_start_tsc);
+		if (likely(IS_COLLECTING())) {
+			if (IS_S_RESIDENCY_MODE()) {
+				/*
+				 * Generate an S_RESIDENCY sample.
+				 */
+				int cpu = RAW_CPU();
+				PWCollector_msg_t msg;
+				s_res_msg_t *smsg =
+				    INTERNAL_STATE.platform_residency_msg;
+				u64 *values = smsg->residencies;
+
+#if 0
+				switch (INTERNAL_STATE.collection_type) {
+				case PW_IO_IPC:
+				case PW_IO_MMIO:
+					/*
+					 * ASSUMPTION:
+					 * S0iX addresses are layed out as following:
+					 * S0i1, S0i2, S0i3, <others>
+					 * Where "others" is optional.
+					 */
+					pw_suspend_start_s0i3 =
+					    *((u64 *) INTERNAL_STATE.
+					      platform_remapped_addrs[2]);
+					break;
+				default:
+					printk(KERN_INFO
+					       "ERROR: unsupported S0iX collection type: %u!\n",
+					       INTERNAL_STATE.collection_type);
+					return NOTIFY_DONE;
+				}
+#endif /* if 0*/
+
+				/*
+				 * No residency counters available
+				 */
+				msg.data_type = S_RESIDENCY;
+				msg.cpuidx = cpu;
+				msg.tsc = pw_suspend_start_tsc;
+				msg.data_len = sizeof(*smsg) + sizeof(u64) * (INTERNAL_STATE.num_addrs + 2);	/* "+2" for S0i3, S3*/
+
+				values[0] =
+				    pw_suspend_start_tsc - startTSC_s_residency;
+
+				pw_populate_s_residency_values_i(values, false);	/* "false" ==> NOT begin boundary*/
+
+				pw_suspend_start_s0i3 = values[3];	/* values array has entries in order: S0i0, S0i1, S0i2, S0i3, ...*/
+
+				msg.p_data = (u64) ((unsigned long)(smsg));
+
+				/*
+				 * OK, everything computed. Now copy
+				 * this sample into an output buffer
+				 */
+				pw_produce_generic_msg(&msg, true);	/* "true" ==> allow wakeups*/
+			}
+			/*
+			 * Also need to send an ACPI S3 sample.
+			 */
+			if (IS_ACPI_S3_MODE()) {
+				/* produce_acpi_s3_sample(false);*/
+				/* produce_acpi_s3_sample(0);*/
+				produce_acpi_s3_sample(pw_suspend_start_tsc,
+						       0 /* s3 res */ );
+			}
+			/*
+			 * And finally, the special 'broadcast' wakelock sample.
+			 */
+			if (IS_WAKELOCK_MODE()) {
+				PWCollector_msg_t sample;
+				w_sample_t w_msg;
+
+				memset(&w_msg, 0, sizeof(w_msg));
+
+				sample.cpuidx = RAW_CPU();
+				sample.tsc = pw_suspend_start_tsc;
+
+				w_msg.type = PW_WAKE_UNLOCK_ALL;
+
+				sample.data_type = W_STATE;
+				sample.data_len = sizeof(w_msg);
+				sample.p_data = (u64) (unsigned long)&w_msg;
+				/*
+				 * OK, everything computed. Now copy
+				 * this sample into an output buffer
+				 */
+				pw_produce_generic_msg(&sample, true);	/* "true" ==> wakeup sleeping readers, if required*/
+			}
+
+			printk(KERN_INFO "SUSPEND PREPARE s0i3 = %llu\n",
+			       pw_suspend_start_s0i3);
+		}
+		break;
+	case PM_POST_SUSPEND:
+		/*
+		 * Exitted SUSPEND -- check to see if we've been in suspend
+		 * for longer than the collection time specified by the user.
+		 * If so, send the use a SIGINT -- that will force it to
+		 * stop collecting.
+		 */
+		tscval(&pw_suspend_stop_tsc);
+		printk(KERN_INFO "pw: POST SUSPEND: tsc = %llu\n",
+		       pw_suspend_stop_tsc);
+		BUG_ON(pw_suspend_start_tsc == 0);
+
+		if (likely(IS_COLLECTING())) {
+			if (IS_S_RESIDENCY_MODE()) {
+#if 0
+				switch (INTERNAL_STATE.collection_type) {
+				case PW_IO_IPC:
+				case PW_IO_MMIO:
+					/*
+					 * ASSUMPTION:
+					 * S0iX addresses are layed out as following:
+					 * S0i1, S0i2, S0i3, <others>
+					 * Where "others" is optional.
+					 */
+					pw_suspend_stop_s0i3 =
+					    *((u64 *) INTERNAL_STATE.
+					      platform_remapped_addrs[2]);
+					break;
+				default:
+					printk(KERN_INFO
+					       "ERROR: unsupported S0iX collection type: %u!\n",
+					       INTERNAL_STATE.collection_type);
+					return NOTIFY_DONE;
+				}
+#endif /* if 0*/
+				/*
+				 * We need to an 'S_RESIDENCY' sample detailing the actual supend
+				 * statistics (when did the device get suspended; for how long
+				 * was it suspended etc.).
+				 */
+				{
+					PWCollector_msg_t msg;
+					s_res_msg_t *smsg =
+					    INTERNAL_STATE.
+					    platform_residency_msg;
+					u64 *values = smsg->residencies;
+
+					msg.data_type = S_RESIDENCY;
+					msg.cpuidx = RAW_CPU();
+					msg.tsc = pw_suspend_stop_tsc;
+					msg.data_len = sizeof(*smsg) + sizeof(u64) * (INTERNAL_STATE.num_addrs + 2);	/* "+2" for S0i3, S3*/
+
+					values[0] =
+					    pw_suspend_stop_tsc -
+					    startTSC_s_residency;
+
+					pw_populate_s_residency_values_i(values, false);	/* "false" ==> NOT begin boundary*/
+
+					pw_suspend_stop_s0i3 = values[3];	/* values array has entries in order: S0i0, S0i1, S0i2, S0i3, ...*/
+
+					/*
+					 * UPDATE: TNG, VLV have S0iX counter incrementing at TSC frequency!!!
+					 */
+					if (pw_is_slm) {
+						suspend_time_ticks =
+						    (pw_suspend_stop_s0i3 -
+						     pw_suspend_start_s0i3);
+					} else {
+						suspend_time_usecs =
+						    (pw_suspend_stop_s0i3 -
+						     pw_suspend_start_s0i3);
+						suspend_time_ticks =
+						    suspend_time_usecs *
+						    base_operating_freq_mhz;
+					}
+					printk(KERN_INFO
+					       "BASE operating freq_mhz = %llu\n",
+					       base_operating_freq_mhz);
+					printk(KERN_INFO
+					       "POST SUSPEND s0i3 = %llu, S3 RESIDENCY = %llu (%llu ticks)\n",
+					       pw_suspend_stop_s0i3,
+					       suspend_time_usecs,
+					       suspend_time_ticks);
+
+					/*
+					 * PWR library EXPECTS 5th entry to be the ACPI S3 residency (in clock ticks)!
+					 */
+					values[4] = suspend_time_ticks;
+
+					msg.p_data =
+					    (u64) ((unsigned long)(smsg));
+
+					/*
+					 * OK, everything computed. Now copy
+					 * this sample into an output buffer
+					 */
+					pw_produce_generic_msg(&msg, true);	/* "true" ==> allow wakeups*/
+				}
+			}	/* IS_S_RESIDENCY_MODE()*/
+		} else {
+			tsc_suspend_time_ticks =
+			    (pw_suspend_stop_tsc - pw_suspend_start_tsc);
+			suspend_time_ticks = tsc_suspend_time_ticks;
+		}
+		printk(KERN_INFO "OK: suspend time ticks = %llu\n",
+		       suspend_time_ticks);
+		if (IS_ACPI_S3_MODE()) {
+			/* produce_acpi_s3_sample(suspend_time_ticks);*/
+			produce_acpi_s3_sample(pw_suspend_stop_tsc,
+					       suspend_time_ticks /* s3 res */
+					       );
+		}
+
+		break;
+	default:
+		pw_pr_error("pw: unknown = %lu\n", state);
+	}
+	return NOTIFY_DONE;
+};
+
+/*
+ * PM notifier.
+ */
+struct notifier_block pw_alrm_pm_suspend_notifier = {
+	.notifier_call = &pw_alrm_suspend_notifier_callback_i,
+};
+
+static inline int get_arg_lengths(unsigned long ioctl_param, int *in_len,
+				  int *out_len)
+{
+	ioctl_args_stub_t local_stub, *remote_stub;
+
+	if (!in_len || !out_len) {
+		pw_pr_error("ERROR: NULL in_len or out_len?!\n");
+		return -ERROR;
+	}
+
+	remote_stub = (ioctl_args_stub_t *) ioctl_param;
+	if (copy_from_user(&local_stub, remote_stub, sizeof(ioctl_args_stub_t))) {
+		pw_pr_error("ERROR: could NOT extract local stub!\n");
+		return -ERROR;
+	}
+	OUTPUT(0, KERN_INFO "OK: in_len = %d, out_len = %d\n",
+	       local_stub.in_len, local_stub.out_len);
+	*in_len = local_stub.in_len;
+	*out_len = local_stub.out_len;
+	return SUCCESS;
+};
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+#define MATCH_IOCTL(num, pred) ( (num) == (pred) || (num) == (pred##32) )
+#else
+#define MATCH_IOCTL(num, pred) ( (num) == (pred) )
+#endif
+
+/*
+ * Service IOCTL calls from user-space.
+ * Handles both 32b and 64b calls.
+ */
+long pw_unlocked_handle_ioctl_i(unsigned int ioctl_num,
+				struct PWCollector_ioctl_arg *remote_args,
+				unsigned long ioctl_param)
+{
+	int local_in_len, local_out_len;
+	PWCollector_cmd_t cmd;
+	int tmp = -1;
+	struct PWCollector_ioctl_arg local_args;
+
+	/* printk(KERN_INFO "HANDLING IOCTL: %u\n", ioctl_num);*/
+
+	if (!remote_args) {
+		pw_pr_error("ERROR: NULL remote_args value?!\n");
+		return -ERROR;
+	}
+
+	/*
+	 * (1) Sanity check:
+	 * Before doing anything, double check to
+	 * make sure this IOCTL was really intended
+	 * for us!
+	 */
+	if (_IOC_TYPE(ioctl_num) != APWR_IOCTL_MAGIC_NUM) {
+		pw_pr_error
+		    ("ERROR: requested IOCTL TYPE (%d) != APWR_IOCTL_MAGIC_NUM (%d)\n",
+		     _IOC_TYPE(ioctl_num), APWR_IOCTL_MAGIC_NUM);
+		return -ERROR;
+	}
+	/*
+	 * (2) Extract arg lengths.
+	 */
+	if (copy_from_user(&local_args, remote_args, sizeof(local_args))) {
+		pw_pr_error("ERROR copying in data from userspace\n");
+		return -ERROR;
+	}
+	local_in_len = local_args.in_len;
+	local_out_len = local_args.out_len;
+	OUTPUT(0, KERN_INFO "GU: local_in_len = %d, local_out_len = %d\n",
+	       local_in_len, local_out_len);
+	/*
+	 * (3) Service individual IOCTL requests.
+	 */
+	if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CONFIG)) {
+		/* printk(KERN_INFO "PW_IOCTL_CONFIG\n");*/
+		/* return set_config((struct PWCollector_config *)remote_args->in_arg, local_in_len);*/
+		return set_config((struct PWCollector_config __user *)
+				  local_args.in_arg, local_in_len);
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CMD)) {
+		if (get_user
+		    (cmd, ((PWCollector_cmd_t __user *) local_args.in_arg))) {
+			pw_pr_error("ERROR: could NOT extract cmd value!\n");
+			return -ERROR;
+		}
+		/* return handle_cmd(cmd);*/
+		/* return do_cmd(cmd, (u64 *)remote_args->out_arg, local_out_len);*/
+		return do_cmd(cmd, (u64 __user *) local_args.out_arg,
+			      local_out_len);
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_STATUS)) {
+		/* printk(KERN_INFO "PW_IOCTL_STATUS\n");*/
+		/*
+		 * For now, we assume STATUS information can only
+		 * be retrieved for an ACTIVE collection.
+		 */
+		if (!IS_COLLECTING()) {
+			pw_pr_error
+			    ("\tError: status information requested, but NO COLLECTION ONGOING!\n");
+			return -ERROR;
+		}
+#if DO_IOCTL_STATS
+		return get_status((struct PWCollector_status __user *)
+				  local_args.out_arg, local_out_len);
+#else
+		return -ERROR;
+#endif
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_CHECK_PLATFORM)) {
+		/* printk(KERN_INFO "PW_IOCTL_CHECK_PLATFORM\n");*/
+		if ((tmp =
+		     check_platform((struct PWCollector_check_platform __user *)
+				    local_args.out_arg, local_out_len)))
+			if (tmp < 0)	/* ERROR*/
+				return 2;	/* for PW_IOCTL_CHECK_PLATFORM: >= 2 ==> Error; == 1 => SUCCESS, but not EOF; 0 ==> SUCCESS, EOF*/
+		return tmp;
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_VERSION)) {
+		/* printk(KERN_INFO "PW_IOCTL_VERSION\n");*/
+		OUTPUT(3, KERN_INFO "OUT len = %d\n", local_out_len);
+		return get_version((struct PWCollector_version_info __user *)
+				   local_args.out_arg, local_out_len);
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_MICRO_PATCH)) {
+		/* printk(KERN_INFO "PW_IOCTL_MICRO_PATCH\n");*/
+		return get_micro_patch_ver((int __user *)local_args.out_arg,
+					   local_out_len);
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_TURBO_THRESHOLD)) {
+		/* printk(KERN_INFO "PW_IOCTL_TURBO_THRESHOLD\n");*/
+		return
+		    get_turbo_threshold((struct PWCollector_turbo_threshold
+					 __user *)local_args.out_arg,
+					local_out_len);
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_COLLECTION_TIME)) {
+		/*
+		 * Only supported on Android/Moorestown!!!
+		 */
+		/* printk(KERN_INFO "PW_IOCTL_COLLECTION_TIME\n");*/
+/* #ifdef CONFIG_X86_MRST*/
+		{
+			unsigned int local_collection_time_secs = 0;
+			if (get_user
+			    (local_collection_time_secs,
+			     (unsigned long __user *)local_args.in_arg)) {
+				pw_pr_error
+				    ("ERROR extracting local collection time!\n");
+				return -ERROR;
+			}
+			/* printk(KERN_INFO "OK: received local collection time = %u seconds\n", local_collection_time_secs);*/
+			/*
+			 * Get (and set) collection START time...
+			 */
+			{
+				/* pw_rtc_time_start = pw_get_current_rtc_time_seconds();*/
+			}
+			/*
+			 * ...and the total collection time...
+			 */
+			{
+				pw_collection_time_secs =
+				    local_collection_time_secs;
+				pw_collection_time_ticks =
+				    (u64) local_collection_time_secs *(u64)
+				    base_operating_freq_khz *1000;
+			}
+			/*
+			 * ...and the client task.
+			 */
+			{
+				pw_power_collector_task = current;
+			}
+		}
+/* #endif */
+		return SUCCESS;
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_MMAP_SIZE)) {
+		/* printk(KERN_INFO "MMAP_SIZE received!\n");*/
+		if (put_user
+		    (pw_buffer_alloc_size,
+		     (unsigned long __user *)local_args.out_arg)) {
+			pw_pr_error("ERROR transfering buffer size!\n");
+			return -ERROR;
+		}
+		return SUCCESS;
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_BUFFER_SIZE)) {
+		unsigned long buff_size = pw_get_buffer_size();
+		pw_pr_debug("BUFFER_SIZE received!\n");
+		if (put_user
+		    (buff_size, (unsigned long __user *)local_args.out_arg)) {
+			pw_pr_error("ERROR transfering buffer size!\n");
+			return -ERROR;
+		}
+		return SUCCESS;
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_DO_D_NC_READ)) {
+		return SUCCESS;
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_FSB_FREQ)) {
+		/* printk(KERN_INFO "PW_IOCTL_FSB_FREQ  received!\n");*/
+		/*
+		 * UPDATE: return fsb-freq AND max non-turbo ratio here.
+		 * UPDATE: and also the LFM ratio (i.e. "max efficiency")
+		 * UPDATE: and also the max turbo ratio (i.e. "HFM")
+		 */
+		{
+			u64 __fsb_non_turbo =
+			    ((pw_u64_t) pw_max_turbo_ratio << 48 | (pw_u64_t)
+			     pw_max_non_turbo_ratio << 32 |
+			     pw_max_efficiency_ratio << 16 |
+			     pw_msr_fsb_freq_value);
+			pw_pr_debug("__fsb_non_turbo = %llu\n",
+				    __fsb_non_turbo);
+			if (put_user
+			    (__fsb_non_turbo,
+			     (u64 __user *) local_args.out_arg)) {
+				pw_pr_error
+				    ("ERROR transfering FSB_FREQ MSR value!\n");
+				return -ERROR;
+			}
+		}
+		return SUCCESS;
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_MSR_ADDRS)) {
+		/* printk(KERN_INFO "PW_IOCTL_MSR_ADDRS\n");*/
+		return pw_set_msr_addrs((struct pw_msr_info __user *)local_args.
+					in_arg, local_in_len);
+	} else if (MATCH_IOCTL(ioctl_num, PW_IOCTL_PLATFORM_RES_CONFIG)) {
+		/* printk(KERN_INFO "PW_IOCTL_PLATFORM_RES_CONFIG encountered!\n");*/
+		return
+		    pw_set_platform_res_config_i((struct
+						  PWCollector_platform_res_info
+						  __user *)local_args.in_arg,
+						 local_in_len);
+		/* return -ERROR;*/
+	} else {
+		/* ERROR!*/
+		pw_pr_error("Invalid IOCTL command = %u\n", ioctl_num);
+		return -ERROR;
+	}
+	/*
+	 * Should NEVER reach here!
+	 */
+	return -ERROR;
+};
+
+/*
+ * (1) Handle 32b IOCTLs in 32b kernel-space.
+ * (2) Handle 64b IOCTLs in 64b kernel-space.
+ */
+long pw_device_unlocked_ioctl(struct file *filp, unsigned int ioctl_num,
+			      unsigned long ioctl_param)
+{
+	OUTPUT(3, KERN_INFO "64b transfering to handler!\n");
+	return pw_unlocked_handle_ioctl_i(ioctl_num,
+					  (struct PWCollector_ioctl_arg *)
+					  ioctl_param, ioctl_param);
+};
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+/*
+ * Handle 32b IOCTLs in 64b kernel-space.
+ */
+long pw_device_compat_ioctl(struct file *file, unsigned int ioctl_num,
+			    unsigned long ioctl_param)
+{
+	struct PWCollector_ioctl_arg32 __user *remote_args32 =
+	    compat_ptr(ioctl_param);
+	struct PWCollector_ioctl_arg __user *remote_args = NULL;
+	u32 data;
+	int tmp;
+
+	remote_args = compat_alloc_user_space(sizeof(*remote_args));
+	if (!remote_args) {
+		return -ERROR;
+	}
+	if (get_user(tmp, &remote_args32->in_len)
+	    || put_user(tmp, &remote_args->in_len)) {
+		return -ERROR;
+	}
+	if (get_user(tmp, &remote_args32->out_len)
+	    || put_user(tmp, &remote_args->out_len)) {
+		return -ERROR;
+	}
+	if (get_user(data, &remote_args32->in_arg)
+	    || put_user(compat_ptr(data), &remote_args->in_arg)) {
+		return -ERROR;
+	}
+	if (get_user(data, &remote_args32->out_arg)
+	    || put_user(compat_ptr(data), &remote_args->out_arg)) {
+		return -ERROR;
+	}
+	/* printk(KERN_INFO "OK, copied. Remote_args = %p\n", remote_args);*/
+
+	/* return -ERROR;*/
+	return pw_unlocked_handle_ioctl_i(ioctl_num, remote_args, ioctl_param);
+};
+#endif /* COMPAT && x64*/
+
+/*
+ * Service an "open(...)" call from user-space.
+ */
+static int pw_device_open(struct inode *inode, struct file *file)
+{
+	/*
+	 * We don't want to talk to two processes at the same time
+	 */
+	if (test_and_set_bit(DEV_IS_OPEN, &dev_status)) {
+		/* Device is busy*/
+		return -EBUSY;
+	}
+
+	try_module_get(THIS_MODULE);
+	return SUCCESS;
+};
+
+/*
+ * Service a "close(...)" call from user-space.
+ */
+static int pw_device_release(struct inode *inode, struct file *file)
+{
+	OUTPUT(3, KERN_INFO "Debug: Device Release!\n");
+	/*
+	 * Did the client just try to zombie us?
+	 */
+	if (IS_COLLECTING()) {
+		pw_pr_error
+		    ("ERROR: Detected ongoing collection on a device release!\n");
+		INTERNAL_STATE.cmd = PW_CANCEL;
+		stop_collection(PW_CANCEL);
+	}
+	module_put(THIS_MODULE);
+	/*
+	 * We're now ready for our next caller
+	 */
+	clear_bit(DEV_IS_OPEN, &dev_status);
+	return SUCCESS;
+};
+
+int pw_register_dev(void)
+{
+	int ret;
+
+	/*
+	 * Create the character device
+	 */
+	ret = alloc_chrdev_region(&apwr_dev, 0, 1, PW_DEVICE_NAME);
+	apwr_dev_major_num = MAJOR(apwr_dev);
+	apwr_class = class_create(THIS_MODULE, "apwr");
+	if (IS_ERR(apwr_class))
+		printk(KERN_ERR "Error registering apwr class\n");
+
+	device_create(apwr_class, NULL, apwr_dev, NULL, PW_DEVICE_NAME);
+	apwr_cdev = cdev_alloc();
+	if (apwr_cdev == NULL) {
+		printk("Error allocating character device\n");
+		return ret;
+	}
+	apwr_cdev->owner = THIS_MODULE;
+	apwr_cdev->ops = &Fops;
+	if (cdev_add(apwr_cdev, apwr_dev, 1) < 0) {
+		printk("Error registering device driver\n");
+		return ret;
+	}
+
+	return ret;
+};
+
+void pw_unregister_dev(void)
+{
+	/*
+	 * Remove the device
+	 */
+	unregister_chrdev(apwr_dev_major_num, PW_DEVICE_NAME);
+	device_destroy(apwr_class, apwr_dev);
+	class_destroy(apwr_class);
+	unregister_chrdev_region(apwr_dev, 1);
+	cdev_del(apwr_cdev);
+};
+
+#if 0
+#ifndef __arm__
+static void disable_auto_demote(void *dummy)
+{
+	unsigned long long auto_demote_disable_flags = AUTO_DEMOTE_FLAGS();
+	unsigned long long msr_addr = AUTO_DEMOTE_MSR;
+	unsigned long long msr_bits = 0, old_msr_bits = 0;
+
+	WUWATCH_RDMSRL(msr_addr, msr_bits);
+	old_msr_bits = msr_bits;
+	msr_bits &= ~auto_demote_disable_flags;
+	wrmsrl(msr_addr, msr_bits);
+
+	if (true) {
+		printk(KERN_INFO
+		       "[%d]: old_msr_bits = %llu, was auto enabled = %s, DISABLED auto-demote\n",
+		       RAW_CPU(), old_msr_bits,
+		       GET_BOOL_STRING(IS_AUTO_DEMOTE_ENABLED(old_msr_bits)));
+	}
+};
+
+static void enable_auto_demote(void *dummy)
+{
+	unsigned long long auto_demote_disable_flags = AUTO_DEMOTE_FLAGS();
+	unsigned long long msr_addr = AUTO_DEMOTE_MSR;
+	unsigned long long msr_bits = 0, old_msr_bits = 0;
+
+	WUWATCH_RDMSRL(msr_addr, msr_bits);
+	old_msr_bits = msr_bits;
+	msr_bits |= auto_demote_disable_flags;
+	wrmsrl(msr_addr, msr_bits);
+
+	if (true) {
+		printk(KERN_INFO
+		       "[%d]: OLD msr_bits = %llu, NEW msr_bits = %llu\n",
+		       raw_smp_processor_id(), old_msr_bits, msr_bits);
+	}
+};
+#endif /* ifndef __arm__*/
+#endif
+
+static bool check_auto_demote_flags(int cpu)
+{
+#ifndef __arm__
+	u32 l = 0, h = 0;
+	u64 msr_val = 0;
+	WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU(cpu, AUTO_DEMOTE_MSR, &l, &h));
+	msr_val = (u64) h << 32 | (u64) l;
+	return IS_AUTO_DEMOTE_ENABLED(msr_val);
+#else
+	return false;
+#endif /* ifndef __arm__*/
+};
+
+static bool check_any_thread_flags(int cpu)
+{
+#ifndef __arm__
+	u32 l = 0, h = 0;
+	u64 msr_val = 0;
+	WARN_ON(WUWATCH_RDMSR_SAFE_ON_CPU
+		(cpu, IA32_FIXED_CTR_CTL_ADDR, &l, &h));
+	msr_val = (u64) h << 32 | (u64) l;
+	return IS_ANY_THREAD_SET(msr_val);
+#else
+	return false;
+#endif /* ifndef __arm__*/
+};
+
+static void check_arch_flags(void)
+{
+	int cpu = 0;
+	/*
+	 * It is ASSUMED that auto-demote and any-thread will either be set on ALL Cpus or on
+	 * none!
+	 */
+	pw_is_any_thread_set = check_any_thread_flags(cpu);
+	pw_is_auto_demote_enabled = check_auto_demote_flags(cpu);
+
+	OUTPUT(0, KERN_INFO "any thread set = %s, auto demote enabled = %s\n",
+	       GET_BOOL_STRING(pw_is_any_thread_set),
+	       GET_BOOL_STRING(pw_is_auto_demote_enabled));
+	return;
+};
+
+#if 0
+#ifndef __arm__
+/*
+ * Enable CPU_CLK_UNHALTED.REF counting
+ * by setting bits 8,9 in MSR_PERF_FIXED_CTR_CTRL
+ * MSR (addr == 0x38d). Also store the previous
+ * value of the MSR.
+ */
+static void enable_ref(void)
+{
+	int cpu;
+	u64 res;
+	int ret;
+
+	u32 *data_copy;		/* [2];*/
+	u32 data[2];
+
+	for_each_online_cpu(cpu) {
+		/*
+		 * (1) Do for IA32_FIXED_CTR_CTL
+		 */
+		{
+			data_copy =
+			    (&per_cpu(CTRL_data_values, cpu))->fixed_data;
+			ret =
+			    WUWATCH_RDMSR_SAFE_ON_CPU(cpu,
+						      IA32_FIXED_CTR_CTL_ADDR,
+						      &data[0], &data[1]);
+			WARN(ret, KERN_WARNING "rdmsr failed with code %d\n",
+			     ret);
+			memcpy(data_copy, data, sizeof(u32) * 2);
+			/*
+			 * Turn on CPU_CLK_UNHALTED.REF counting.
+			 *
+			 * UPDATE: also turn on CPU_CLK_UNHALTED.CORE counting.
+			 */
+			/* data[0] |= 0x300;*/
+			data[0] |= 0x330;
+
+			ret =
+			    wrmsr_safe_on_cpu(cpu, IA32_FIXED_CTR_CTL_ADDR,
+					      data[0], data[1]);
+		}
+		/*
+		 * (2) Do for IA32_PERF_GLOBAL_CTRL_ADDR
+		 */
+		{
+			data_copy =
+			    (&per_cpu(CTRL_data_values, cpu))->perf_data;
+			ret =
+			    WUWATCH_RDMSR_SAFE_ON_CPU(cpu,
+						      IA32_PERF_GLOBAL_CTRL_ADDR,
+						      &data[0], &data[1]);
+			WARN(ret, KERN_WARNING "rdmsr failed with code %d\n",
+			     ret);
+			memcpy(data_copy, data, sizeof(u32) * 2);
+			res = data[1];
+			res <<= 32;
+			res += data[0];
+			OUTPUT(0, KERN_INFO "[%d]: READ res = 0x%llx\n", cpu,
+			       res);
+			/*
+			 * Turn on CPU_CLK_UNHALTED.REF counting.
+			 *
+			 * UPDATE: also turn on CPU_CLK_UNHALTED.CORE counting.
+			 * Set bits 33, 34
+			 */
+			/* data[0] |= 0x330;*/
+			data[1] |= 0x6;
+			/* data[0] = data[1] = 0x0;*/
+
+			ret =
+			    wrmsr_safe_on_cpu(cpu, IA32_PERF_GLOBAL_CTRL_ADDR,
+					      data[0], data[1]);
+		}
+	}
+};
+
+static void restore_ref(void)
+{
+	int cpu;
+	u64 res;
+	int ret;
+
+	u32 *data_copy;
+	u32 data[2];
+
+	memset(data, 0, sizeof(u32) * 2);
+
+	for_each_online_cpu(cpu) {
+		/*
+		 * (1) Do for IA32_FIXED_CTR_CTL
+		 */
+		{
+			data_copy =
+			    (&per_cpu(CTRL_data_values, cpu))->fixed_data;
+			memcpy(data, data_copy, sizeof(u32) * 2);
+
+			res = data[1];
+			res <<= 32;
+			res += data[0];
+
+			OUTPUT(3, KERN_INFO "[%d]: PREV res = 0x%llx\n", cpu,
+			       res);
+			if ((ret =
+			     wrmsr_safe_on_cpu(cpu, IA32_FIXED_CTR_CTL_ADDR,
+					       data[0], data[1]))) {
+				pw_pr_error
+				    ("ERROR writing PREVIOUS IA32_FIXED_CTR_CLT_ADDR values for CPU = %d!\n",
+				     cpu);
+			}
+		}
+		/*
+		 * (2) Do for IA32_PERF_GLOBAL_CTRL_ADDR
+		 */
+		{
+			data_copy =
+			    (&per_cpu(CTRL_data_values, cpu))->perf_data;
+			memcpy(data, data_copy, sizeof(u32) * 2);
+
+			res = data[1];
+			res <<= 32;
+			res += data[0];
+
+			OUTPUT(3, KERN_INFO "[%d]: PREV res = 0x%llx\n", cpu,
+			       res);
+			if ((ret =
+			     wrmsr_safe_on_cpu(cpu, IA32_PERF_GLOBAL_CTRL_ADDR,
+					       data[0], data[1]))) {
+				pw_pr_error
+				    ("ERROR writing PREVIOUS IA32_PERF_GLOBAL_CTRL_ADDR values for CPU = %d!\n",
+				     cpu);
+			}
+		}
+	}
+};
+#endif /* ifndef __arm__*/
+#endif
+
+static void get_fms(unsigned int *family, unsigned int *model,
+		    unsigned int *stepping)
+{
+	unsigned int ecx, edx;
+	unsigned int fms;
+
+	if (!family || !model || !stepping) {
+		pw_pr_error("ERROR: NULL family/model/stepping value?!\n");
+		return;
+	}
+
+asm("cpuid": "=a"(fms), "=c"(ecx), "=d"(edx): "a"(1):"ebx");
+
+	*family = (fms >> 8) & 0xf;
+	*model = (fms >> 4) & 0xf;
+	*stepping = fms & 0xf;
+
+	if (*family == 6 || *family == 0xf) {
+		*model += ((fms >> 16) & 0xf) << 4;
+	}
+	pw_pr_debug("FMS = 0x%x:%x:%x (%d:%d:%d)\n", *family, *model, *stepping,
+		    *family, *model, *stepping);
+};
+
+/*
+ * Check if we're running on ATM.
+ */
+static atom_arch_type_t is_atm(void)
+{
+#ifndef __arm__
+	unsigned int family, model, stepping;
+
+	get_fms(&family, &model, &stepping);
+	/*
+	 * This check below will need to
+	 * be updated for each new
+	 * architecture type!!!
+	 */
+	if (family == 0x6) {
+		switch (model) {
+		case 0x27:
+			switch (stepping) {
+			case 0x1:
+				return MFD;
+			case 0x2:
+				return LEX;
+			}
+			break;
+		case 0x35:
+			return CLV;
+		}
+	}
+#endif /* ifndef __arm__*/
+	return NON_ATOM;
+};
+
+static slm_arch_type_t is_slm(void)
+{
+#ifndef __arm__
+	unsigned int family, model, stepping;
+
+	get_fms(&family, &model, &stepping);
+	/*
+	 * This check below will need to
+	 * be updated for each new
+	 * architecture type!!!
+	 */
+	if (family == 0x6) {
+		switch (model) {
+		case 0x37:
+			return SLM_VLV2;
+		case 0x4a:
+			return SLM_TNG;
+		case 0x4c:
+			return SLM_CHV;
+		case 0x5a:
+			return SLM_ANN;
+		case 0x5c:
+			return SLM_BXT;
+		default:
+			break;
+		}
+	}
+#endif /* __arm__*/
+	return NON_SLM;
+};
+
+static bool is_hsw(void)
+{
+#ifndef __arm__
+	unsigned int family, model, stepping;
+
+	get_fms(&family, &model, &stepping);
+	/*
+	 * This check below will need to
+	 * be updated for each new
+	 * architecture type!!!
+	 */
+	if (family == 0x6) {
+		switch (model) {
+		case 0x3c:
+		case 0x45:
+			return true;
+		default:
+			break;
+		}
+	}
+#endif /* __arm__*/
+	return false;
+};
+
+static bool is_bdw(void)
+{
+#ifndef __arm__
+	unsigned int family, model, stepping;
+
+	get_fms(&family, &model, &stepping);
+	/*
+	 * This check below will need to
+	 * be updated for each new
+	 * architecture type!!!
+	 */
+	if (family == 0x6) {
+		switch (model) {
+		case 0x3d:
+		case 0x47:
+			return true;
+		default:
+			break;
+		}
+	}
+#endif /* __arm__*/
+	return false;
+};
+
+static void test_wlock_mappings(void)
+{
+#if DO_WAKELOCK_SAMPLE
+	produce_w_sample(0, 0x1, PW_WAKE_LOCK, 0, 0, "abcdef", "swapper", 0x0);
+	produce_w_sample(0, 0x1, PW_WAKE_LOCK, 0, 0, "PowerManagerService",
+			 "swapper", 0x0);
+	produce_w_sample(0, 0x1, PW_WAKE_LOCK, 0, 0, "abcdef", "swapper", 0x0);
+#endif
+};
+
+#define PW_GET_SCU_FW_MAJOR(num) ( ( (num) >> 8 ) & 0xff)
+#define PW_GET_SCU_FW_MINOR(num) ( (num) & 0xff )
+
+#ifdef CONFIG_X86_INTEL_MID
+static int pw_do_parse_sfi_oemb_table_i(struct sfi_table_header *header)
+{
+	struct sfi_table_oemb *oemb;	/* "struct sfi_table_oemb" defined in "intel-mid.h"*/
+
+	oemb = (struct sfi_table_oemb *)header;
+	if (!oemb) {
+		printk(KERN_INFO "ERROR: NULL sfi table header?!\n");
+		return -ERROR;
+	}
+	pw_scu_fw_major_minor =
+	    (oemb->scu_runtime_major_version << 8) | (oemb->
+						      scu_runtime_minor_version);
+	pw_pr_debug("Major = %u, Minor = %u\n", oemb->scu_runtime_major_version,
+		    oemb->scu_runtime_minor_version);
+
+	return SUCCESS;
+};
+
+static void pw_do_extract_scu_fw_version(void)
+{
+	if (sfi_table_parse
+	    (SFI_SIG_OEMB, NULL, NULL, &pw_do_parse_sfi_oemb_table_i)) {
+		printk(KERN_INFO
+		       "WARNING: no SFI information; resetting SCU F/W version!\n");
+		pw_scu_fw_major_minor = 0x0;
+	}
+};
+#else /* CONFIG_X86_INTEL_MID*/
+static void pw_do_extract_scu_fw_version(void)
+{
+	pw_scu_fw_major_minor = 0x0;
+};
+#endif /* CONFIG_X86_INTEL_MID*/
+
+#ifndef CONFIG_NR_CPUS_PER_MODULE
+#define CONFIG_NR_CPUS_PER_MODULE 2
+#endif /* CONFIG_NR_CPUS_PER_MODULE*/
+
+static void get_cpu_sibling_mask(int cpu, struct cpumask *sibling_mask)
+{
+	unsigned int base =
+	    (cpu / CONFIG_NR_CPUS_PER_MODULE) * CONFIG_NR_CPUS_PER_MODULE;
+	unsigned int i;
+
+	cpumask_clear(sibling_mask);
+	for (i = base; i < (base + CONFIG_NR_CPUS_PER_MODULE); ++i) {
+		cpumask_set_cpu(i, sibling_mask);
+	}
+};
+
+struct pw_cpufreq_node {
+	int cpu;
+	struct cpumask cpus, related_cpus;
+	unsigned int shared_type;
+	struct list_head list;
+};
+static struct list_head pw_cpufreq_policy_lists;
+
+#if 0
+static int apwr_cpufreq_policy_notifier(struct notifier_block *block,
+					unsigned long val, void *data)
+{
+	struct cpufreq_policy *policy = data;
+	int cpu = policy ? policy->cpu : -1;
+	printk(KERN_INFO "val = %lu, policy = %p, cpu = %d\n", val, policy,
+	       cpu);
+	return PW_SUCCESS;
+};
+
+static struct notifier_block apwr_cpufreq_policy_notifier_block = {
+	.notifier_call = &apwr_cpufreq_policy_notifier
+};
+#endif /* if 0*/
+
+static int set_module_scope_for_cpus(void)
+{
+	/*
+	 * Warning: no support for cpu hotplugging!
+	 */
+	int cpu = 0;
+	INIT_LIST_HEAD(&pw_cpufreq_policy_lists);
+	for_each_online_cpu(cpu) {
+		struct cpumask sibling_mask;
+		struct pw_cpufreq_node *node = NULL;
+		struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+		if (!policy) {
+			continue;
+		}
+		/*
+		 * Get siblings for this cpu.
+		 */
+		get_cpu_sibling_mask(cpu, &sibling_mask);
+		/*
+		 * Check if affected_cpus already contains sibling_mask
+		 */
+		if (cpumask_subset(&sibling_mask, policy->cpus)) {
+			/*
+			 * 'sibling_mask' is already a subset of affected_cpus -- nothing
+			 * to do on this CPU.
+			 */
+			cpufreq_cpu_put(policy);
+			continue;
+		}
+
+		node = pw_kmalloc(sizeof(*node), GFP_ATOMIC);
+		if (node) {
+			cpumask_clear(&node->cpus);
+			cpumask_clear(&node->related_cpus);
+
+			node->cpu = cpu;
+			cpumask_copy(&node->cpus, policy->cpus);
+			cpumask_copy(&node->related_cpus, policy->related_cpus);
+			node->shared_type = policy->shared_type;
+		}
+
+		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+		/*
+		 * Set siblings. Don't worry about online/offline, that's
+		 * handled below.
+		 */
+		cpumask_copy(policy->cpus, &sibling_mask);
+		/*
+		 * Ensure 'related_cpus' is a superset of 'cpus'
+		 */
+		cpumask_or(policy->related_cpus, policy->related_cpus,
+			   policy->cpus);
+		/*
+		 * Ensure 'cpus' only contains online cpus.
+		 */
+		cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+		cpufreq_cpu_put(policy);
+
+		if (node) {
+			INIT_LIST_HEAD(&node->list);
+			list_add_tail(&node->list, &pw_cpufreq_policy_lists);
+		}
+	}
+	return PW_SUCCESS;
+};
+
+static int reset_module_scope_for_cpus(void)
+{
+	struct list_head *head = &pw_cpufreq_policy_lists;
+	while (!list_empty(head)) {
+		struct pw_cpufreq_node *node =
+		    list_first_entry(head, struct pw_cpufreq_node, list);
+		int cpu = node->cpu;
+		struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+		if (!policy) {
+			continue;
+		}
+		policy->shared_type = node->shared_type;
+		cpumask_copy(policy->related_cpus, &node->related_cpus);
+		cpumask_copy(policy->cpus, &node->cpus);
+
+		cpufreq_cpu_put(policy);
+
+		pw_pr_debug("OK, reset cpufreq_policy for cpu %d\n", cpu);
+		list_del(&node->list);
+		pw_kfree(node);
+	}
+	return PW_SUCCESS;
+};
+
+#if DO_DEBUG_TSC_FREQ_CALCULATION
+static void pw_do_debug_tsc_freq_calc(void)
+{
+	/* Debug code if you want to calculate frequency of the processor*/
+	u64 initial_tsc = 0, end_tsc = 0, elapsed_nsecs = 0, end_nsecs = 0;
+	struct timespec ts;
+	u64 tsc_delta = 0, elapsed_time = 0;
+	u64 freq = 0;
+
+	ktime_get_ts(&ts);
+	elapsed_nsecs = -((u64) ts.tv_sec * 1000000000ULL + (u64) ts.tv_nsec);
+	tscval(&initial_tsc);
+
+	msleep(1000);		/* 1 sec*/
+
+	ktime_get_ts(&ts);
+	elapsed_nsecs += (u64) ts.tv_sec * 1000000000ULL + (u64) ts.tv_nsec;
+	tscval(&end_tsc);
+
+	tsc_delta = end_tsc - initial_tsc;
+
+	freq = tsc_delta / (elapsed_nsecs / 1000);
+	printk(KERN_INFO "Frequency = %llu\n", (unsigned long long)freq);
+};
+#endif /* DO_DEBUG_TSC_FREQ_CALCULATION*/
+
+static int __init init_hooks(void)
+{
+	int ret = SUCCESS;
+
+#if DO_DEBUG_TSC_FREQ_CALCULATION
+	{
+		/*
+		 * Do TSC frequency calculation for debug.
+		 */
+		pw_do_debug_tsc_freq_calc();
+	}
+#endif /* DO_DEBUG_TSC_FREQ_CALCULATION*/
+
+	/*
+	 * We first check to see if
+	 * TRACEPOINTS are ENABLED in the kernel.
+	 * If not, EXIT IMMEDIATELY!
+	 */
+#ifdef CONFIG_TRACEPOINTS
+	OUTPUT(0, KERN_INFO "Tracepoints ON!\n");
+#else
+	pw_pr_error("ERROR: TRACEPOINTS NOT found on system!!!\n");
+	return -ERROR;
+#endif
+
+	/*
+	 * Check if we're running on ATM.
+	 */
+	pw_is_atm = is_atm();
+	/*
+	 * Check if we're running on SLM.
+	 */
+	pw_is_slm = is_slm();
+	/*
+	 * Check if we're running on HSW.
+	 */
+	pw_is_hsw = is_hsw();
+	/*
+	 * Check if we're running on BDW.
+	 */
+	pw_is_bdw = is_bdw();
+	/*
+	 * Sanity!
+	 */
+	BUG_ON(pw_is_atm && pw_is_slm && pw_is_hsw && pw_is_bdw);
+
+	/*
+	 * For MFLD, we also check
+	 * if the required microcode patches
+	 * have been installed. If
+	 * not then EXIT IMMEDIATELY!
+	 */
+#if DO_CHECK_BO_MICROCODE_PATCH
+	{
+		/*
+		 * Read MSR 0x8b -- if microcode patch
+		 * has been applied then the first 12 bits
+		 * of the higher order 32 bits should be
+		 * >= 0x102.
+		 *
+		 * THIS CHECK VALID FOR ATM ONLY!!!
+		 */
+		/*
+		 * Do check ONLY if we're ATM!
+		 */
+		if (pw_is_atm) {
+#ifndef __arm__
+			u64 res;
+			u32 patch_val;
+
+			WUWATCH_RDMSRL(0x8b, res);
+			patch_val = (res >> 32) & 0xfff;
+			if (patch_val < 0x102) {
+				pw_pr_error
+				    ("ERROR: B0 micro code path = 0x%x: REQUIRED >= 0x102!!!\n",
+				     patch_val);
+				return -ERROR;
+			}
+			micro_patch_ver = patch_val;
+			OUTPUT(3, KERN_INFO "patch ver = %u\n",
+			       micro_patch_ver);
+#endif /* ifndef __arm__*/
+		} else {
+			OUTPUT(0,
+			       KERN_INFO
+			       "DEBUG: SKIPPING MICROCODE PATCH check -- NON ATM DETECTED!\n");
+		}
+	}
+#endif
+
+	/*
+	 * Read the 'FSB_FREQ' MSR to determine bus clock freq multiplier.
+	 * Update: ONLY if saltwell or Silvermont!
+	 */
+	if (pw_is_atm || pw_is_slm) {
+		u64 res;
+
+		WUWATCH_RDMSRL(MSR_FSB_FREQ_ADDR, res);
+		/* memcpy(&pw_msr_fsb_freq_value, &res, sizeof(unsigned long));*/
+		memcpy(&pw_msr_fsb_freq_value, &res,
+		       sizeof(pw_msr_fsb_freq_value));
+		pw_pr_debug("MSR_FSB_FREQ value = %u\n", pw_msr_fsb_freq_value);
+	} else {
+		printk(KERN_INFO "NO FSB FREQ!\n");
+	}
+	/*
+	 * Read the Max non-turbo ratio.
+	 */
+	{
+		u64 res = 0;
+		u16 ratio = 0;
+		/*
+		 * Algo:
+		 * (1) If NHM/WMR/SNB -- read bits 15:8 of 'PLATFORM_INFO_MSR_ADDR'
+		 * (2) If MFLD/ATM -- read bits 44:40 of 'CLOCK_CR_GEYSIII_STAT' MSR
+		 * (3) If SLM -- read bits 21:16 of 'PUNIT_CR_IACORE_RATIOS' (MSR_IA32_IACORE_RATIOS) MSR
+		 * UPDATE: If SLM -- read bits 15:8 of 'PLATFORM_INFO_MSR_ADDR'
+		 * to extract the 'base operating ratio'.
+		 * To get actual TSC frequency, multiply this ratio
+		 * with the bus clock frequency.
+		 */
+		if (pw_is_atm) {
+			WUWATCH_RDMSRL(CLOCK_CR_GEYSIII_STAT_MSR_ADDR, res);
+			/*
+			 * Base operating Freq ratio is
+			 * bits 44:40
+			 */
+			ratio = (res >> 40) & 0x1f;
+		} else if (pw_is_slm) {
+			WUWATCH_RDMSRL(PLATFORM_INFO_MSR_ADDR, res);
+			ratio = (res >> 8) & 0xFF;	/* Bits 15:8*/
+			/*
+			 * End debug code.
+			 */
+		} else {
+			WUWATCH_RDMSRL(PLATFORM_INFO_MSR_ADDR, res);
+			/*
+			 * Base Operating Freq ratio is
+			 * bits 15:8
+			 */
+			ratio = (res >> 8) & 0xff;
+		}
+
+		pw_max_non_turbo_ratio = ratio;
+		pw_pr_debug("MAX non-turbo ratio = %u\n",
+			    (u32) pw_max_non_turbo_ratio);
+	}
+	/*
+	 * Read the max efficiency ratio
+	 * (AKA "LFM")
+	 */
+	{
+		u64 res = 0;
+		u16 ratio = 0;
+		/*
+		 * Algo:
+		 * (1) If "Core" -- read bits 47:40 of 'PLATFORM_INFO_MSR_ADDR'
+		 * (2) If Atom[STW] -- ???
+		 * (3) If Atom[SLM] -- read bits 13:8 of 'PUNIT_CR_IACORE_RATIOS' (MSR_IA32_IACORE_RATIOS) MSR
+		 * UPDATE: If SLM -- read bits 47:40 of 'PLATFORM_INFO_MSR_ADDR'
+		 * to extract the 'base operating ratio'.
+		 */
+		if (pw_is_atm) {
+			/*
+			 * TODO
+			 */
+			ratio = 0x0;
+		} else if (pw_is_slm) {
+			WUWATCH_RDMSRL(PLATFORM_INFO_MSR_ADDR, res);
+			ratio = (res >> 40) & 0xff;	/* Bits 47:40*/
+		} else {
+			WUWATCH_RDMSRL(PLATFORM_INFO_MSR_ADDR, res);
+			ratio = (res >> 40) & 0xff;
+		}
+		pw_max_efficiency_ratio = ratio;
+		pw_pr_debug("MAX EFFICIENCY RATIO = %u\n",
+			    (u32) pw_max_efficiency_ratio);
+	}
+	/*
+	 * Read the max turbo ratio.
+	 */
+	{
+		u64 res = 0;
+		u16 ratio = 0;
+		/*
+		 * Algo:
+		 * (1) If "Core" -- read bits 7:0 of 'MSR_TURBO_RATIO_LIMIT'
+		 * (2) If Atom[STW] -- ???
+		 * (3) If Atom[SLM] -- read bits 4:0 of MSR_IA32_IACORE_TURBO_RATIOS
+		 */
+		if (pw_is_atm) {
+			/*
+			 * TODO
+			 */
+			ratio = 0x0;
+		} else if (pw_is_slm) {
+			WUWATCH_RDMSRL(MSR_IA32_IACORE_TURBO_RATIOS, res);
+			ratio = res & 0x1F;	/* Bits 4:0*/
+		} else {
+			WUWATCH_RDMSRL(MSR_TURBO_RATIO_LIMIT, res);
+			ratio = res & 0xff;	/* Bits 7:0*/
+		}
+		pw_max_turbo_ratio = ratio;
+		pw_pr_debug("MAX TURBO RATIO = %u\n", (u32) pw_max_turbo_ratio);
+	}
+	/*
+	 * Extract SCU F/W version (if possible)
+	 */
+	{
+		pw_do_extract_scu_fw_version();
+		printk(KERN_INFO "SCU F/W version = %X.%X\n",
+		       PW_GET_SCU_FW_MAJOR(pw_scu_fw_major_minor),
+		       PW_GET_SCU_FW_MINOR(pw_scu_fw_major_minor));
+	}
+
+	OUTPUT(3, KERN_INFO "Sizeof node = %lu\n", sizeof(tnode_t));
+	OUTPUT(3, KERN_INFO "Sizeof per_cpu_t = %lu\n", sizeof(per_cpu_t));
+
+	startJIFF = jiffies;
+	/*
+	 * Check to see if the user wants us to force
+	 * software coordination of CPU frequencies.
+	 */
+	if (do_force_module_scope_for_cpu_frequencies) {
+		printk(KERN_INFO
+		       "DEBUG: FORCING MODULE SCOPE FOR CPU FREQUENCIES!\n");
+		if (set_module_scope_for_cpus()) {
+			printk(KERN_INFO "ERROR setting affected cpus\n");
+			return -PW_ERROR;
+		} else {
+			pw_pr_debug("OK, setting worked\n");
+		}
+	}
+#if 0
+	{
+		/*
+		 * Register a cpufreq policy notifier.
+		 */
+		cpufreq_register_notifier(&apwr_cpufreq_policy_notifier_block,
+					  CPUFREQ_POLICY_NOTIFIER);
+	}
+#endif /* if 0*/
+
+	if (pw_init_data_structures()) {
+		return -ERROR;
+	}
+
+	if (false) {
+		test_wlock_mappings();
+		pw_destroy_data_structures();
+		return -ERROR;
+	}
+
+	/*
+	   {
+	   disable_auto_demote(NULL);
+	   smp_call_function(disable_auto_demote, NULL, 1);
+	   printk(KERN_INFO "DISABLED AUTO-DEMOTE!\n");
+	   }
+	 */
+	/*
+	 * Check Arch flags (ANY_THREAD, AUTO_DEMOTE etc.)
+	 */
+	{
+		check_arch_flags();
+	}
+	{
+		/* enable_ref();*/
+	}
+
+	{
+		/*
+		 * Check if kernel-space call stack generation
+		 * is possible.
+		 */
+#ifdef CONFIG_FRAME_POINTER
+		OUTPUT(0, KERN_INFO "Frame pointer ON!\n");
+		INTERNAL_STATE.have_kernel_frame_pointers = true;
+#else
+		printk(KERN_INFO
+		       "**********************************************************************************************************\n");
+		printk(KERN_INFO
+		       "Error: kernel NOT compiled with frame pointers -- NO KERNEL-SPACE TIMER CALL TRACES WILL BE GENERATED!\n");
+		printk(KERN_INFO
+		       "**********************************************************************************************************\n");
+		INTERNAL_STATE.have_kernel_frame_pointers = false;
+#endif
+	}
+
+	/*
+	 * "Register" the device-specific special character file here.
+	 */
+	{
+		if ((ret = pw_register_dev()) < 0) {
+			goto err_ret_post_init;
+		}
+		/*
+		 * ...and then the matrix device file.
+		 */
+		if (mt_register_dev()) {
+			goto err_ret_post_pw_reg;
+		}
+	}
+
+	/*
+	 * Probes required to cache (kernel) timer
+	 * callstacks need to be inserted, regardless
+	 * of collection status.
+	 */
+	{
+		/* register_timer_callstack_probes();*/
+		register_permanent_probes();
+	}
+	/*
+	 * Register SUSPEND/RESUME notifier.
+	 */
+	{
+		register_pm_notifier(&pw_alrm_pm_suspend_notifier);
+	}
+
+#if 0
+	{
+		register_all_probes();
+	}
+#endif
+
+	printk(KERN_INFO
+	       "\n--------------------------------------------------------------------------------------------\n");
+	printk(KERN_INFO "START Initialized the SOCWatch driver\n");
+#ifdef CONFIG_X86_INTEL_MID
+	printk(KERN_INFO "SOC Identifier = %u, Stepping = %u\n",
+	       intel_mid_identify_cpu(), intel_mid_soc_stepping());
+#endif
+	printk(KERN_INFO
+	       "--------------------------------------------------------------------------------------------\n");
+
+	return SUCCESS;
+
+err_ret_post_pw_reg:
+	pw_unregister_dev();
+
+err_ret_post_init:
+	pw_destroy_data_structures();
+	/* restore_ref();*/
+	/*
+	   {
+	   enable_auto_demote(NULL);
+	   smp_call_function(enable_auto_demote, NULL, 1);
+	   printk(KERN_INFO "ENABLED AUTO-DEMOTE!\n");
+	   }
+	 */
+
+	return ret;
+};
+
+static void __exit cleanup_hooks(void)
+{
+	unsigned long elapsedJIFF = 0, collectJIFF = 0;
+	int num_timers = 0, num_irqs = 0;
+
+#if 0
+	{
+		/*
+		 * Unregister a cpufreq policy notifier.
+		 */
+		cpufreq_unregister_notifier(&apwr_cpufreq_policy_notifier_block,
+					    CPUFREQ_POLICY_NOTIFIER);
+	}
+#endif /* if 0*/
+
+	{
+		mt_unregister_dev();
+		pw_unregister_dev();
+	}
+
+	/*
+	 * Unregister the suspend notifier.
+	 */
+	{
+		unregister_pm_notifier(&pw_alrm_pm_suspend_notifier);
+	}
+
+	/*
+	 * Probes required to cache (kernel) timer
+	 * callstacks need to be removed, regardless
+	 * of collection status.
+	 */
+	{
+		/* unregister_timer_callstack_probes();*/
+		unregister_permanent_probes();
+	}
+
+#if 1
+	if (IS_COLLECTING()) {
+		/* unregister_all_probes();*/
+		unregister_non_pausable_probes();
+		unregister_pausable_probes();
+	} else if (IS_SLEEPING()) {
+		unregister_pausable_probes();
+	}
+#else
+	/*
+	 * Forcibly unregister -- used in debugging.
+	 */
+	{
+		unregister_all_probes();
+	}
+#endif
+
+	{
+		num_timers = get_num_timers();
+#if DO_CACHE_IRQ_DEV_NAME_MAPPINGS
+		num_irqs = get_num_irq_mappings();
+#endif
+	}
+
+	{
+		pw_destroy_data_structures();
+	}
+
+	if (do_force_module_scope_for_cpu_frequencies) {
+		if (reset_module_scope_for_cpus()) {
+			printk(KERN_INFO "ERROR resetting affected cpus\n");
+		} else {
+			pw_pr_debug("OK, resetting worked\n");
+		}
+	}
+
+	{
+		/* restore_ref();*/
+	}
+	/*
+	   {
+	   enable_auto_demote(NULL);
+	   smp_call_function(enable_auto_demote, NULL, 1);
+	   printk(KERN_INFO "ENABLED AUTO-DEMOTE!\n");
+	   }
+	 */
+
+	/*
+	 * Collect some statistics: total execution time.
+	 */
+	stopJIFF = jiffies;
+	if (stopJIFF < startJIFF) {
+		OUTPUT(0,
+		       KERN_INFO
+		       "WARNING: jiffies counter has WRAPPED AROUND!\n");
+		elapsedJIFF = 0;	/* avoid messy NAN when dividing*/
+	} else {
+		elapsedJIFF = stopJIFF - startJIFF;
+	}
+
+	/*
+	 * Collect some collection statistics: total collection time.
+	 */
+	if (INTERNAL_STATE.collectionStopJIFF <
+	    INTERNAL_STATE.collectionStartJIFF) {
+		OUTPUT(0,
+		       KERN_INFO
+		       "WARNING: jiffies counter has WRAPPED AROUND!\n");
+		collectJIFF = 0;
+	} else {
+		collectJIFF =
+		    INTERNAL_STATE.collectionStopJIFF -
+		    INTERNAL_STATE.collectionStartJIFF;
+	}
+
+	printk(KERN_INFO
+	       "\n--------------------------------------------------------------------------------------------\n");
+
+	printk(KERN_INFO "STOP Terminated the SOCWatch driver.\n");
+#if DO_PRINT_COLLECTION_STATS
+	printk(KERN_INFO
+	       "Total time elapsed = %u msecs, Total collection time = %u msecs\n",
+	       jiffies_to_msecs(elapsedJIFF), jiffies_to_msecs(collectJIFF));
+
+	printk(KERN_INFO "Total # timers = %d, Total # irq mappings = %d\n",
+	       num_timers, num_irqs);
+
+#if DO_OVERHEAD_MEASUREMENTS
+	{
+		timer_init_print_cumulative_overhead_params("TIMER_INIT");
+		timer_expire_print_cumulative_overhead_params("TIMER_EXPIRE");
+		timer_insert_print_cumulative_overhead_params("TIMER_INSERT");
+		tps_print_cumulative_overhead_params("TPS");
+		tps_lite_print_cumulative_overhead_params("TPS_LITE");
+		tpf_print_cumulative_overhead_params("TPF");
+		inter_common_print_cumulative_overhead_params("INTER_COMMON");
+		irq_insert_print_cumulative_overhead_params("IRQ_INSERT");
+		find_irq_node_i_print_cumulative_overhead_params
+		    ("FIND_IRQ_NODE_I");
+		exit_helper_print_cumulative_overhead_params("EXIT_HELPER");
+		timer_delete_print_cumulative_overhead_params("TIMER_DELETE");
+		sys_enter_helper_i_print_cumulative_overhead_params
+		    ("SYS_ENTER_HELPER_I");
+		sys_exit_helper_i_print_cumulative_overhead_params
+		    ("SYS_EXIT_HELPER_I");
+		/*
+		 * Also print stats on timer entries.
+		 */
+		printk(KERN_INFO "# TIMER ENTRIES = %d\n",
+		       atomic_read(&num_timer_entries));
+		/*
+		 * And some mem debugging stats.
+		 */
+		printk(KERN_INFO
+		       "TOTAL # BYTES ALLOCED = %llu, CURR # BYTES ALLOCED = %llu, MAX # BYTES ALLOCED = %llu\n",
+		       TOTAL_NUM_BYTES_ALLOCED(), CURR_NUM_BYTES_ALLOCED(),
+		       MAX_NUM_BYTES_ALLOCED());
+	}
+#endif /* DO_OVERHEAD_MEASUREMENTS*/
+#endif /* DO_PRINT_COLLECTION_STATS*/
+
+	printk(KERN_INFO
+	       "--------------------------------------------------------------------------------------------\n");
+};
+
+module_init(init_hooks);
+module_exit(cleanup_hooks);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_DESCRIPTION(MOD_DESC);
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/matrix.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/matrix.h
new file mode 100644
index 0000000..f998cb3
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/matrix.h
@@ -0,0 +1,528 @@
+/* ***********************************************************************************************
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _MATRIXIO_H_
+#define _MATRIXIO_H_
+
+#include "pw_version.h"
+#include "pw_defines.h"
+
+/* #define MATRIX_IO_FILE "/dev/matrix"*/
+#define SOCWATCH_DRIVER_NAME_ICS "socwatch"
+#define SOCWATCH_DRIVER_NAME SOCWATCH_DRIVER_NAME_ICS
+#define SOCWATCH_DRIVER_NAME_WITH_PATH_ICS "/dev/socwatch"
+#define SOCWATCH_DRIVER_NAME_WITH_PATH SOCWATCH_DRIVER_NAME_WITH_PATH_ICS
+/* #define MATRIX_IO_FILE "/dev/matrix_ICS"*/
+#define SOCWATCH_IO_FILE SOCWATCH_DRIVER_NAME_WITH_PATH_ICS
+
+/*enumerate operations to be done in an IOCTL scan(init, poll & term) */
+enum IOCtlType {
+	READ_OP = 0x00000001,
+	WRITE_OP = 0x00000002,
+	ENABLE_OP = 0x00000004,
+	SET_BITS_OP = 0x00000040,
+	RESET_BITS_OP = 0x00000080,
+};
+
+#define MAX_GMCH_CTRL_REGS 4
+#define MAX_GMCH_DATA_REGS 8
+#define DATA_ENABLE			0x00000001
+#define MTX_GMCH_PMON_GLOBAL_CTRL		0x0005F1F0
+#define MTX_GMCH_PMON_GLOBAL_CTRL_ENABLE	0x0001000F
+#define MTX_GMCH_PMON_GLOBAL_CTRL_DISABLE	0x00000000
+#define MTX_GMCH_PMON_FIXED_CTR0		0x0005E8F0
+#define MTX_GMCH_PMON_GP_CTR0_L			0x0005F8F0
+#define MTX_GMCH_PMON_GP_CTR0_H			0x0005FCF0
+#define MTX_GMCH_PMON_GP_CTR1_L			0x0005F9F0
+#define MTX_GMCH_PMON_GP_CTR1_H			0x0005FDF0
+#define MTX_GMCH_PMON_GP_CTR2_L			0x0005FAF0
+#define MTX_GMCH_PMON_GP_CTR2_H			0x0005FEF0
+#define MTX_GMCH_PMON_GP_CTR3_L			0x0005FBF0
+#define MTX_GMCH_PMON_GP_CTR3_H			0x0005FFF0
+#define MTX_GMCH_PMON_FIXED_CTR_CTRL	0x0005F4F0
+
+#define MTX_PCI_MSG_CTRL_REG  0x000000D0
+#define MTX_PCI_MSG_DATA_REG  0x000000D4
+
+#define PWR_MGMT_BASE_ADDR_MASK      0xFFFF
+#define PWR_STS_NORTH_CMPLX_LOWER    0x4
+#define PWR_STS_NORTH_CMPLX_UPPER    0x30
+
+struct mtx_msr {
+	unsigned long eax_LSB;
+	unsigned long edx_MSB;
+	unsigned long ecx_address;
+	unsigned long ebx_value;
+	unsigned long n_cpu;
+	unsigned long operation;
+};
+
+struct memory_map {
+	unsigned long ctrl_addr;
+	void *ctrl_remap_address;
+	unsigned long ctrl_data;
+	unsigned long data_addr;
+	void *data_remap_address;
+	char *ptr_data_usr;
+	unsigned long data_size;
+	unsigned long operation;
+};
+
+struct mtx_pci_ops {
+	unsigned long port;
+	unsigned long data;
+	unsigned long io_type;
+	unsigned long port_island;
+};
+
+struct mtx_soc_perf {
+	char *ptr_data_usr;
+	unsigned long data_size;
+	unsigned long operation;
+};
+
+/* PCI info for a real pci device */
+struct pci_config {
+	unsigned long bus;
+	unsigned long device;
+	unsigned long function;
+	unsigned long offset;
+	unsigned long data;	/* This is written to by the ioctl */
+};
+struct scu_config {
+	unsigned long *address;
+	unsigned char *usr_data;
+	unsigned char *drv_data;
+	unsigned long length;
+};
+
+struct lookup_table {
+	/*Init Data */
+	struct mtx_msr *msrs_init;
+	unsigned long msr_init_length;
+	unsigned long msr_init_wb;
+
+	struct memory_map *mmap_init;
+	unsigned long mem_init_length;
+	unsigned long mem_init_wb;
+
+	struct mtx_pci_ops *pci_ops_init;
+	unsigned long pci_ops_init_length;
+	unsigned long pci_ops_init_wb;
+
+	unsigned long *cfg_db_init;
+	unsigned long cfg_db_init_length;
+	unsigned long cfg_db_init_wb;
+
+	struct mtx_soc_perf *soc_perf_init;
+	unsigned long soc_perf_init_length;
+	unsigned long soc_perf_init_wb;
+
+	/*Poll Data */
+	struct mtx_msr *msrs_poll;
+	unsigned long msr_poll_length;
+	unsigned long msr_poll_wb;
+
+	struct memory_map *mmap_poll;
+	unsigned long mem_poll_length;
+	unsigned long mem_poll_wb;
+	unsigned long records;
+
+	struct mtx_pci_ops *pci_ops_poll;
+	unsigned long pci_ops_poll_length;
+	unsigned long pci_ops_poll_wb;
+	unsigned long pci_ops_records;
+
+	unsigned long *cfg_db_poll;
+	unsigned long cfg_db_poll_length;
+	unsigned long cfg_db_poll_wb;
+
+	struct scu_config scu_poll;
+	unsigned long scu_poll_length;
+
+	struct mtx_soc_perf *soc_perf_poll;
+	unsigned long soc_perf_poll_length;
+	unsigned long soc_perf_poll_wb;
+	unsigned long soc_perf_records;
+
+	/*Term Data */
+	struct mtx_msr *msrs_term;
+	unsigned long msr_term_length;
+	unsigned long msr_term_wb;
+
+	struct memory_map *mmap_term;
+	unsigned long mem_term_length;
+	unsigned long mem_term_wb;
+
+	struct mtx_pci_ops *pci_ops_term;
+	unsigned long pci_ops_term_length;
+	unsigned long pci_ops_term_wb;
+
+	unsigned long *cfg_db_term;
+	unsigned long cfg_db_term_length;
+	unsigned long cfg_db_term_wb;
+
+	struct mtx_soc_perf *soc_perf_term;
+	unsigned long soc_perf_term_length;
+	unsigned long soc_perf_term_wb;
+};
+
+/*
+ * 32b support in 64b kernel space
+ */
+
+#if defined (__linux__)
+
+#ifdef __KERNEL__
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+
+#include <linux/compat.h>
+/* #include <asm/compat.h>*/
+
+struct mtx_msr32 {
+	compat_ulong_t eax_LSB;
+	compat_ulong_t edx_MSB;
+	compat_ulong_t ecx_address;
+	compat_ulong_t ebx_value;
+	compat_ulong_t n_cpu;
+	compat_ulong_t operation;
+};
+
+struct memory_map32 {
+	compat_ulong_t ctrl_addr;
+	compat_caddr_t ctrl_remap_address;
+	compat_ulong_t ctrl_data;
+	compat_ulong_t data_addr;
+	compat_caddr_t data_remap_address;
+	compat_caddr_t ptr_data_usr;
+	compat_ulong_t data_size;
+	compat_ulong_t operation;
+};
+
+struct mtx_pci_ops32 {
+	compat_ulong_t port;
+	compat_ulong_t data;
+	compat_ulong_t io_type;
+	compat_ulong_t port_island;
+};
+
+struct mtx_soc_perf32 {
+	compat_caddr_t ptr_data_usr;
+	compat_ulong_t data_size;
+	compat_ulong_t operation;
+};
+
+struct pci_config32 {
+	compat_ulong_t bus;
+	compat_ulong_t device;
+	compat_ulong_t function;
+	compat_ulong_t offset;
+	compat_ulong_t data;	/* This is written to by the ioctl */
+};
+
+struct scu_config32 {
+	compat_caddr_t address;
+	compat_caddr_t usr_data;
+	compat_caddr_t drv_data;
+	compat_ulong_t length;
+};
+
+struct lookup_table32 {
+	/*Init Data */
+	compat_caddr_t msrs_init;
+	compat_ulong_t msr_init_length;
+	compat_ulong_t msr_init_wb;
+
+	compat_caddr_t mmap_init;
+	compat_ulong_t mem_init_length;
+	compat_ulong_t mem_init_wb;
+
+	compat_caddr_t pci_ops_init;
+	compat_ulong_t pci_ops_init_length;
+	compat_ulong_t pci_ops_init_wb;
+
+	compat_caddr_t cfg_db_init;
+	compat_ulong_t cfg_db_init_length;
+	compat_ulong_t cfg_db_init_wb;
+
+	compat_caddr_t soc_perf_init;
+	compat_ulong_t soc_perf_init_length;
+	compat_ulong_t soc_perf_init_wb;
+
+	/*Poll Data */
+	compat_caddr_t msrs_poll;
+	compat_ulong_t msr_poll_length;
+	compat_ulong_t msr_poll_wb;
+
+	compat_caddr_t mmap_poll;
+	compat_ulong_t mem_poll_length;
+	compat_ulong_t mem_poll_wb;
+	compat_ulong_t records;
+
+	compat_caddr_t pci_ops_poll;
+	compat_ulong_t pci_ops_poll_length;
+	compat_ulong_t pci_ops_poll_wb;
+	compat_ulong_t pci_ops_records;
+
+	compat_caddr_t cfg_db_poll;
+	compat_ulong_t cfg_db_poll_length;
+	compat_ulong_t cfg_db_poll_wb;
+
+	struct scu_config32 scu_poll;
+	compat_ulong_t scu_poll_length;
+
+	compat_caddr_t soc_perf_poll;
+	compat_ulong_t soc_perf_poll_length;
+	compat_ulong_t soc_perf_poll_wb;
+	compat_ulong_t soc_perf_records;
+
+	/*Term Data */
+	compat_caddr_t msrs_term;
+	compat_ulong_t msr_term_length;
+	compat_ulong_t msr_term_wb;
+
+	compat_caddr_t mmap_term;
+	compat_ulong_t mem_term_length;
+	compat_ulong_t mem_term_wb;
+
+	compat_caddr_t pci_ops_term;
+	compat_ulong_t pci_ops_term_length;
+	compat_ulong_t pci_ops_term_wb;
+
+	compat_caddr_t cfg_db_term;
+	compat_ulong_t cfg_db_term_length;
+	compat_ulong_t cfg_db_term_wb;
+
+	compat_caddr_t soc_perf_term;
+	compat_ulong_t soc_perf_term_length;
+	compat_ulong_t soc_perf_term_wb;
+};
+
+struct mtx_msr_container32 {
+	compat_caddr_t buffer;
+	compat_ulong_t length;
+	struct mtx_msr32 msrType1;
+};
+
+#endif /* HAVE_COMPAT_IOCTL && CONFIG_X86_64*/
+#endif /* __KERNEL__*/
+#endif /* __linux__*/
+
+struct msr_buffer {
+	unsigned long eax_LSB;
+	unsigned long edx_MSB;
+};
+
+struct mt_msr_buffer {
+	u32 eax_LSB;
+	u32 edx_MSB;
+};
+
+#define MAX_SOC_PERF_VALUES 10
+
+struct soc_perf_buffer {
+	unsigned long long values[MAX_SOC_PERF_VALUES];
+};
+
+struct mt_soc_perf_buffer {
+	u64 values[MAX_SOC_PERF_VALUES];
+};
+
+struct xchange_buffer {
+	struct msr_buffer *ptr_msr_buff;
+	unsigned long msr_length;
+	unsigned long *ptr_mem_buff;
+	unsigned long mem_length;
+	unsigned long *ptr_pci_ops_buff;
+	unsigned long pci_ops_length;
+	unsigned long *ptr_cfg_db_buff;
+	unsigned long cfg_db_length;
+	struct soc_perf_buffer *ptr_soc_perf_buff;
+	unsigned long soc_perf_length;
+};
+
+struct mt_xchange_buffer {
+	u64 ptr_msr_buff;
+	u64 ptr_mem_buff;
+	u64 ptr_pci_ops_buff;
+	u64 ptr_cfg_db_buff;
+	u64 ptr_soc_perf_buff;
+	u32 msr_length;
+	u32 mem_length;
+	u32 pci_ops_length;
+	u32 cfg_db_length;
+	u32 soc_perf_length;
+	u32 padding;		/* Required to keep sizeof(mt_xchange_buffer) the same on 32b and 64b systems*/
+	/* in the absence of #pragma pack(XXX) directives!*/
+};
+
+struct xchange_buffer_all {
+	unsigned long long init_time_stamp;
+	unsigned long long *poll_time_stamp;
+	unsigned long long term_time_stamp;
+	unsigned long long init_tsc;
+	unsigned long long term_tsc;
+	unsigned long long *poll_tsc;
+	struct xchange_buffer xhg_buf_init;
+	struct xchange_buffer xhg_buf_poll;
+	struct xchange_buffer xhg_buf_term;
+	unsigned long status;
+};
+
+struct mtx_msr_container {
+	unsigned long *buffer;
+	unsigned long length;
+	struct mtx_msr msrType1;
+};
+
+struct gmch_container {
+	unsigned long long time_stamp;
+	unsigned long read_mask;
+	unsigned long write_mask;
+	unsigned long mcr1[MAX_GMCH_CTRL_REGS];
+	unsigned long mcr2[MAX_GMCH_CTRL_REGS];
+	unsigned long mcr3[MAX_GMCH_CTRL_REGS];
+	unsigned long data[MAX_GMCH_DATA_REGS];
+	unsigned long event[MAX_GMCH_CTRL_REGS];
+	unsigned long core_clks;
+};
+
+struct mtx_size_info {
+	unsigned int init_msr_size;
+	unsigned int term_msr_size;
+	unsigned int poll_msr_size;
+	unsigned int init_mem_size;
+	unsigned int term_mem_size;
+	unsigned int poll_mem_size;
+	unsigned int init_pci_ops_size;
+	unsigned int term_pci_ops_size;
+	unsigned int poll_pci_ops_size;
+	unsigned int init_cfg_db_size;
+	unsigned int term_cfg_db_size;
+	unsigned int poll_cfg_db_size;
+	unsigned int poll_scu_drv_size;
+	unsigned int total_mem_bytes_req;
+	unsigned int init_soc_perf_size;
+	unsigned int term_soc_perf_size;
+	unsigned int poll_soc_perf_size;
+};
+
+#define IOCTL_INIT_SCAN _IOR(0xF8, 0x00000001, unsigned long)
+#define IOCTL_TERM_SCAN _IOR(0xF8, 0x00000002, unsigned long)
+#define IOCTL_POLL_SCAN _IOR(0xF8, 0x00000004, unsigned long)
+
+#define IOCTL_INIT_MEMORY _IOR(0xF8, 0x00000010, struct xchange_buffer_all *)
+#define IOCTL_FREE_MEMORY _IO(0xF8, 0x00000020)
+
+#define IOCTL_READ_PCI_CONFIG	_IOWR(0xF8, 0x00000001, struct pci_config *)
+
+#define IOCTL_VERSION_INFO _IOW(0xF8, 0x00000001, char *)
+#define IOCTL_COPY_TO_USER _IOW(0xF8, 0x00000002, struct xchange_buffer_all *)
+#define IOCTL_READ_CONFIG_DB _IOW(0xF8, 0x00000004, unsigned long *)
+#define IOCTL_WRITE_CONFIG_DB _IOW(0xF8, 0x00000010, unsigned long *)
+#define IOCTL_OPERATE_ON_MSR _IOW(0xF8, 0x00000020, struct mtx_msr *)
+
+#define IOCTL_MSR _IOW(0xF8, 0x00000040, struct mtx_msr_container *)
+#define IOCTL_SRAM _IOW(0xF8, 0x00000080, struct memory_map *)
+#define IOCTL_GMCH_RESET _IOW(0xF8, 0x00000003, struct gmch_container *)
+#define IOCTL_GMCH _IOW(0xF8, 0x00000005, struct gmch_container *)
+
+#define IOCTL_GET_SOC_STEPPING _IOR(0xF8, 0x00000100, unsigned long *)
+#define IOCTL_GET_SCU_FW_VERSION _IOR(0xF8, 0x00000200, unsigned long *)
+
+#define IOCTL_GET_DRIVER_VERSION _IOW(0xF8, 0x00000400, unsigned long *)
+
+#if defined (__linux__)
+
+#ifdef __KERNEL__
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+#define IOCTL_INIT_SCAN32 _IOR(0xF8, 0x00000001, compat_ulong_t)
+#define IOCTL_TERM_SCAN32 _IOR(0xF8, 0x00000002, compat_ulong_t)
+#define IOCTL_POLL_SCAN32 _IOR(0xF8, 0x00000004, compat_ulong_t)
+
+#define IOCTL_INIT_MEMORY32 _IOR(0xF8, 0x00000010, compat_uptr_t)
+#define IOCTL_FREE_MEMORY32 _IO(0xF8, 0x00000020)
+
+#define IOCTL_READ_PCI_CONFIG32	_IOWR(0xF8, 0x00000001, compat_uptr_t)
+
+#define IOCTL_VERSION_INFO32 _IOW(0xF8, 0x00000001, compat_caddr_t)
+#define IOCTL_COPY_TO_USER32 _IOW(0xF8, 0x00000002, compat_uptr_t)
+#define IOCTL_READ_CONFIG_DB32 _IOW(0xF8, 0x00000004, compat_uptr_t)
+#define IOCTL_WRITE_CONFIG_DB32 _IOW(0xF8, 0x00000010, compat_uptr_t)
+#define IOCTL_OPERATE_ON_MSR32 _IOW(0xF8, 0x00000020, compat_uptr_t)
+
+#define IOCTL_MSR32 _IOW(0xF8, 0x00000040, compat_uptr_t)
+#define IOCTL_SRAM32 _IOW(0xF8, 0x00000080, compat_uptr_t)
+#define IOCTL_GMCH_RESET32 _IOW(0xF8, 0x00000003, compat_uptr_t)
+#define IOCTL_GMCH32 _IOW(0xF8, 0x00000005, compat_uptr_t)
+
+#define IOCTL_GET_SOC_STEPPING32 _IOR(0xF8, 0x00000100, compat_uptr_t)
+#define IOCTL_GET_SCU_FW_VERSION32 _IOR(0xF8, 0x00000200, compat_uptr_t)
+
+#define IOCTL_GET_DRIVER_VERSION32 _IOW(0xF8, 0x00000400, compat_uptr_t)
+#endif /* HAVE_COMPAT_IOCTL && CONFIG_X86_64*/
+#endif /* __KERNEL__*/
+#endif /* __linux__*/
+
+#define platform_pci_read32	intel_mid_msgbus_read32_raw
+#define platform_pci_write32	intel_mid_msgbus_write32_raw
+
+#endif
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_data_structs.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_data_structs.h
new file mode 100644
index 0000000..0dc9fc2
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_data_structs.h
@@ -0,0 +1,364 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+/*
+ * Description: file containing internal data structures used by the
+ * power driver.
+ */
+
+#ifndef _PW_DATA_STRUCTS_H_
+#define _PW_DATA_STRUCTS_H_ 1
+
+/*
+ * We're the PWR kernel device driver.
+ * Flag needs to be set BEFORE
+ * including 'pw_ioctl.h'
+ */
+#define PW_KERNEL_MODULE 1
+
+#include "pw_ioctl.h"		/* For IOCTL mechanism*/
+#include <linux/fs.h>
+#include <linux/bitops.h>	/* for "test_and_set_bit(...)" atomic functionality*/
+
+enum {
+	EMPTY = 0,
+	FULL
+};
+
+enum {
+	IRQ = 0,
+	TIMER,
+	WORKQUEUE,
+	SCHED,
+	BREAK_TYPE_END
+};
+
+/*
+ * Used to indicate whether
+ * a new IRQ mapping was
+ * created.
+ */
+typedef enum {
+	OK_IRQ_MAPPING_EXISTS,
+	OK_NEW_IRQ_MAPPING_CREATED,
+	ERROR_IRQ_MAPPING
+} irq_mapping_types_t;
+
+typedef enum {
+	PW_MAPPING_EXISTS,
+	PW_NEW_MAPPING_CREATED,
+	PW_MAPPING_ERROR
+} pw_mapping_type_t;
+
+/*
+ * Data structures to help store C-state MSR information.
+ */
+typedef struct pw_msr_node pw_msr_node_t;
+struct pw_msr_node {
+	struct list_head list;
+	pw_msr_addr_t msr_addr;
+};
+
+/*
+ * Structure to hold current CMD state
+ * of the device driver. Constantly evolving, but
+ * that's OK -- this is internal to the driver
+ * and is NOT exported.
+ */
+typedef struct {
+	PWCollector_cmd_t cmd;	/* indicates which command was specified last e.g. START, STOP etc.*/
+	/*
+	 * Should we write to our per-cpu output buffers?
+	 * YES if we're actively collecting.
+	 * NO if we're not.
+	 */
+	bool write_to_buffers;
+	/*
+	 * Should we "drain/flush" the per-cpu output buffers?
+	 * (See "device_read" for an explanation)
+	 */
+	bool drain_buffers;
+	/*
+	 * Current methodology for generating kernel-space call
+	 * stacks relies on following frame pointer: has
+	 * the kernel been compiled with frame pointers?
+	 */
+	bool have_kernel_frame_pointers;
+	/*
+	 * On some archs, C-state residency MSRS do NOT count at TSC frequency.
+	 * For these, we need to apply a "clock multiplier". Record that
+	 * here.
+	 */
+	unsigned int residency_count_multiplier;
+	/*
+	 * Store the bus clock frequency.
+	 */
+	unsigned int bus_clock_freq_khz;
+	/*
+	 * Core/Pkg MSR residency addresses
+	 */
+	unsigned int coreResidencyMSRAddresses[MAX_MSR_ADDRESSES];
+	unsigned int pkgResidencyMSRAddresses[MAX_MSR_ADDRESSES];
+	/*
+	 * What switches should the device driver collect?
+	 * Note: different from interface spec:
+	 * We're moving from bitwise OR to bitwise OR of (1 << switch) values.
+	 * Use the "POWER_XXX_MASK" masks to set/test switch residency.
+	 */
+	/* int collection_switches;*/
+	u64 collection_switches;
+	/*
+	 * Total time elapsed for
+	 * all collections.
+	 * Aggregated over all collections -- useful
+	 * in multiple PAUSE/RESUME scenarios
+	 */
+	unsigned long totalCollectionTime;
+	/*
+	 * Start and stop jiffy values for
+	 * the current collection.
+	 */
+	unsigned long collectionStartJIFF, collectionStopJIFF;
+	/*
+	 * This is the knob to control the frequency of D-state data sampling
+	 * to adjust their collection overhead. By default, they are sampled
+	 * in power_start traceevent after 100 msec is passed from the previous sample.
+	 */
+	u32 d_state_sample_interval;
+	/*
+	 * New MSR storage scheme.
+	 */
+	struct list_head msr_list;
+	int num_msrs;
+	pw_msr_addr_t *msr_addrs;
+	/*
+	 * Platform residency information.
+	 */
+	struct {
+		/*
+		 * IPC commands for platform residency
+		 * Valid ONLY if 'collection_type' == 'PW_IO_IPC'
+		 */
+		u32 ipc_start_command, ipc_start_sub_command;	/* START IPC command, sub-cmd*/
+		u32 ipc_stop_command, ipc_stop_sub_command;	/* STOP IPC command, sub-cmd*/
+		u32 ipc_dump_command, ipc_dump_sub_command;	/* DUMP IPC command, sub-cmd*/
+		u16 num_addrs;	/* The number of addresses encoded in the 'platform_res_addrs', 'platform_remapped_addrs' and 'init_platform_res_values' arrays, below*/
+		u8 collection_type;	/* One of 'pw_io_type_t'*/
+		u8 counter_size_in_bytes;	/* Usually either 4 (for 32b counters) or 8 (for 64b counters)*/
+		u64 *platform_res_addrs;	/* Addresses from which to read the various S0iX values; will be remapped (via 'ioremap_nocache()') into 'platform_remapped_addrs'*/
+		u64 *platform_remapped_addrs;	/* Required for MMIO-based access; remapped addresses -- use this to do the actual reads*/
+		u64 *init_platform_res_values;	/* Store the INITIAL values here*/
+		s_res_msg_t *platform_residency_msg;	/* Used to send messages back to Ring-3; 'platform_residency_msg->residencies' usually has 'num_addrs+2' entries (+1 for S0i0, +1 for S3)*/
+	};
+
+	/* Others...*/
+} internal_state_t;
+
+static internal_state_t INTERNAL_STATE;
+
+#define IS_COLLECTING() (INTERNAL_STATE.cmd == PW_START || INTERNAL_STATE.cmd == PW_RESUME)
+#define IS_SLEEPING() (INTERNAL_STATE.cmd == PW_PAUSE)
+#define IS_SLEEP_MODE() (INTERNAL_STATE.collection_switches & POWER_SLEEP_MASK)
+#define IS_FREQ_MODE() (INTERNAL_STATE.collection_switches & POWER_FREQ_MASK)
+#define IS_KTIMER_MODE() (INTERNAL_STATE.collection_switches & POWER_KTIMER_MASK)
+#define IS_NON_PRECISE_MODE() (INTERNAL_STATE.collection_switches & POWER_SYSTEM_MASK)
+#define IS_S_RESIDENCY_MODE() (INTERNAL_STATE.collection_switches & POWER_S_RESIDENCY_MASK)
+#define IS_S_STATE_MODE() (INTERNAL_STATE.collection_switches & POWER_S_STATE_MASK)
+#define IS_D_SC_RESIDENCY_MODE() (INTERNAL_STATE.collection_switches & POWER_D_SC_RESIDENCY_MASK)
+#define IS_D_SC_STATE_MODE() (INTERNAL_STATE.collection_switches & POWER_D_SC_STATE_MASK)
+#define IS_D_NC_STATE_MODE() (INTERNAL_STATE.collection_switches & POWER_D_NC_STATE_MASK)
+#define IS_WAKELOCK_MODE() (INTERNAL_STATE.collection_switches & POWER_WAKELOCK_MASK)
+#define IS_ACPI_S3_MODE() (INTERNAL_STATE.collection_switches & POWER_ACPI_S3_STATE_MASK)
+/*
+ * Special check to see if we should produce c-state samples.
+ * Required to support S/D-state use of TPS probe (which requires "SLEEP_MASK") without
+ * producing any C-state samples.
+ */
+#define IS_C_STATE_MODE() ( INTERNAL_STATE.collection_switches & POWER_C_STATE_MASK )
+
+/*
+ * Per-cpu structure holding MSR residency counts,
+ * timer-TSC values etc.
+ */
+typedef struct per_cpu_struct {
+	u32 was_timer_hrtimer_softirq;	/* 4 bytes*/
+	void *sched_timer_addr;	/* 4/8 bytes (arch dependent)*/
+} per_cpu_t;
+
+/*
+ * Per-cpu structure holding wakeup event causes, tscs
+ * etc. Set by the first non-{TPS, TPE} event to occur
+ * after a processor wakes up.
+ */
+struct wakeup_event {
+	u64 event_tsc;		/* TSC at which the event occurred*/
+	u64 event_val;		/* Event value -- domain-specific*/
+	s32 init_cpu;		/* CPU on which a timer was initialized; valid ONLY for wakeups caused by timers!*/
+	u32 event_type;		/* one of c_break_type_t enum values*/
+	pid_t event_tid, event_pid;
+};
+
+/*
+ * Convenience macros for accessing per-cpu residencies
+ */
+#define RESIDENCY(p,i) ( (p)->residencies[(i)] )
+#define PREV_MSR_VAL(p,i) ( (p)->prev_msr_vals[(i)] )
+
+/*
+ * Per-cpu structure holding stats information.
+ * Eventually, we may want to incorporate these fields within
+ * the "per_cpu_t" structure.
+ */
+typedef struct {
+	local_t c_breaks, timer_c_breaks, inters_c_breaks;
+	local_t p_trans;
+	local_t num_inters, num_timers;
+} stats_t;
+
+/*
+ * Per-cpu structure holding C-state MSR values.
+ */
+typedef struct msr_set msr_set_t;
+struct msr_set {
+	u64 prev_msr_vals[MAX_MSR_ADDRESSES];
+	/*
+	 * Which 'Cx' MSRs counted during the previous C-state quantum(s)?
+	 * (We could have more than one in an HT environment -- it is NOT
+	 * guaranteed that a core wakeup will cause both threads to wakeup.)
+	 */
+	u64 curr_msr_count[MAX_MSR_ADDRESSES];
+	/*
+	 * What was the last C-state the OS requested?
+	 */
+	u32 prev_req_cstate;
+	/*
+	 * Have we sent the boundary C-state sample?
+	 * Required for an initial MSR set snapshot.
+	 */
+	u8 init_msr_set_sent;
+};
+
+typedef struct pw_msr_info_set pw_msr_info_set_t;
+struct pw_msr_info_set {
+	/*
+	 * Previous values of the various MSRs. Required to enable residency
+	 * computation.
+	 */
+	pw_msr_val_t *prev_msr_vals;
+	/*
+	 * Which 'Cx' MSRs counted during the previous C-state quantum(s)?
+	 * (We could have more than one in an HT environment -- it is NOT
+	 * guaranteed that a core wakeup will cause both threads to wakeup.)
+	 */
+	pw_msr_val_t *curr_msr_count;
+	/*
+	 * Scratch memory required for "C_MULTI_MSG" sample support.
+	 */
+	u8 *c_multi_msg_mem;
+	/*
+	 * The number of MSRs we're currently tracking.
+	 */
+	u32 num_msrs;
+	/*
+	 * What was the last C-state the OS requested?
+	 */
+	u32 prev_req_cstate;
+	/*
+	 * Have we sent the boundary C-state sample?
+	 * Required for an initial MSR set snapshot.
+	 */
+	u8 init_msr_set_sent;
+};
+
+/*
+ * Struct to hold old IA32_FIXED_CTR_CTRL MSR
+ * values (to enable restoring
+ * after pw driver terminates). These are
+ * used to enable/restore/disable CPU_CLK_UNHALTED.REF
+ * counting.
+ *
+ * UPDATE: also store old IA32_PERF_GLOBAL_CTRL values..
+ */
+typedef struct {
+	u32 fixed_data[2], perf_data[2];
+} CTRL_values_t;
+
+/*
+ * Helper struct to extract (user-supplied)
+ * IOCTL input and output lengths.
+ */
+typedef struct {
+	int in_len, out_len;
+	char data[1];
+} ioctl_args_stub_t;
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+/*
+ * Helper struct for use in translating
+ * IOCTLs from 32b user programs in 64b
+ * kernels.
+ */
+struct PWCollector_ioctl_arg32 {
+	int in_len, out_len;
+	u32 in_arg, out_arg;
+};
+#endif /* COMPAT && x64*/
+
+#endif /* _PW_DATA_STRUCTS_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_defines.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_defines.h
new file mode 100644
index 0000000..dd9854f
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_defines.h
@@ -0,0 +1,224 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PW_DEFINES_H_
+#define _PW_DEFINES_H_ 1
+
+#include "pw_version.h"
+
+/* ***************************************************
+ * Common to kernel and userspace.
+ * ***************************************************
+ */
+#define PW_SUCCESS 0
+#define PW_ERROR 1
+
+/*
+ * Helper macro to convert 'u64' to 'unsigned long long' to avoid gcc warnings.
+ */
+#define TO_ULL(x) (unsigned long long)(x)
+/*
+ * Convert an arg to 'unsigned long'
+ */
+#define TO_UL(x) (unsigned long)(x)
+/*
+ * Helper macro for string representation of a boolean value.
+ */
+#define GET_BOOL_STRING(b) ( (b) ? "TRUE" : "FALSE" )
+
+/*
+ * Circularly increment 'i' MODULO 'l'.
+ * ONLY WORKS IF 'l' is (power of 2 - 1) ie.
+ * l == (2 ^ x) - 1
+ */
+#define CIRCULAR_INC(index, mask) ( ( (index) + 1) & (mask) )
+#define CIRCULAR_ADD(index, val, mask) ( ( (index) + (val) ) & (mask) )
+/*
+ * Circularly decrement 'i'.
+ */
+#define CIRCULAR_DEC(i,m) ({int __tmp1 = (i); if(--__tmp1 < 0) __tmp1 = (m); __tmp1;})
+
+#ifdef __KERNEL__
+
+/* ***************************************************
+ * The following is only valid for kernel code.
+ * ***************************************************
+ */
+
+#define CPU() (raw_smp_processor_id())
+#define RAW_CPU() (raw_smp_processor_id())
+#define TID() (current->pid)
+#define PID() (current->tgid)
+#define NAME() (current->comm)
+#define PKG(c) ( cpu_data(c).phys_proc_id )
+#define IT_REAL_INCR() (current->signal->it_real_incr.tv64)
+
+#define ATOMIC_CAS(ptr, old_val, new_val) ( cmpxchg( (ptr), (old_val), (new_val) ) == (old_val) )
+
+/*
+ * Should we allow debug output.
+ * Set to: "1" ==> 'OUTPUT' is enabled.
+ *         "0" ==> 'OUTPUT' is disabled.
+ */
+#define DO_DEBUG_OUTPUT 0
+/*
+ * Control whether to output driver ERROR messages.
+ * These are independent of the 'OUTPUT' macro
+ * (which controls debug messages).
+ * Set to '1' ==> Print driver error messages (to '/var/log/messages')
+ *        '0' ==> Do NOT print driver error messages
+ */
+#define DO_PRINT_DRIVER_ERROR_MESSAGES 1
+/*
+ * Macros to control output printing.
+ */
+#if DO_DEBUG_OUTPUT
+#define pw_pr_debug(...) printk(KERN_INFO __VA_ARGS__)
+#define pw_pr_warn(...) printk(KERN_WARNING __VA_ARGS__)
+#else
+#define pw_pr_debug(...)
+#define pw_pr_warn(...)
+#endif
+/*
+ * Macro for driver error messages.
+ */
+#if (DO_PRINT_DRIVER_ERROR_MESSAGES || DO_DEBUG_OUTPUT)
+#define pw_pr_error(...) printk(KERN_ERR __VA_ARGS__)
+#else
+#define pw_pr_error(...)
+#endif
+
+#else /* __KERNEL__*/
+
+/* ***************************************************
+ * The following is valid only for userspace code.
+ * ***************************************************
+ */
+/*
+ * Default output file name -- the extensions depend on
+ * which program is executing: wuwatch output files have
+ * a ".sw1" extension, while wudump output files have a
+ * ".txt" extension. The extensions are added in by the
+ * respective programs i.e. wuwatch/wudump.
+ */
+#define DEFAULT_WUWATCH_OUTPUT_FILE_NAME "wuwatch_output"
+/*
+ * Default wuwatch config file name.
+ */
+#define DEFAULT_WUWATCH_CONFIG_FILE_NAME "wuwatch_config.txt"
+/*
+ * Macro to convert a {major.minor.other} version into a
+ * single 32-bit unsigned version number.
+ * This is useful when comparing versions, for example.
+ * Pretty much identical to the 'KERNEL_VERSION(...)' macro.
+ */
+/*#define WUWATCH_VERSION(major, minor, other) ( (2^16) * (major) + (2^8) * (minor) + (other) )*/
+#define COLLECTOR_VERSION(major, minor, other) ( (2^16) * (major) + (2^8) * (minor) + (other) )
+/* **************************************
+ * Debugging tools.
+ * **************************************
+ */
+extern bool g_do_debugging;
+#define db_fprintf(...) do { \
+    if (g_do_debugging) { \
+        fprintf(__VA_ARGS__); \
+    } \
+} while(0)
+#define db_assert(e, ...) do { \
+    if (g_do_debugging && !(e)) { \
+	    fprintf(stderr, __VA_ARGS__);	\
+	    assert(false);			\
+	}					\
+} while(0)
+#define db_abort(...) do { \
+    if (g_do_debugging) { \
+        fprintf(stderr, __VA_ARGS__); \
+        assert(false); \
+    } \
+} while(0)
+#define db_copy(...) do { \
+    if (g_do_debugging) { \
+        std::copy(__VA_ARGS__); \
+    } \
+} while(0)
+#define db_perror(...) do { \
+    if (g_do_debugging) { \
+        perror(__VA_ARGS__); \
+    } \
+} while(0)
+
+#define LOG_WUWATCH_FUNCTION_ENTER() db_fprintf(stderr, "ENTERING Function \"%s\"\n", __FUNCTION__);
+#define LOG_WUWATCH_FUNCTION_EXIT() db_fprintf(stderr, "EXITTING Function \"%s\"\n", __FUNCTION__);
+
+/*
+ * Macros corresponding to the kernel versions of 'likely()'
+ * and 'unlikely()' -- GCC SPECIFIC ONLY!
+ */
+#if defined (__linux__)
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* windows*/
+#define likely(x) (!!(x))
+#define unlikely(x) (!!(x))
+#endif /* linux*/
+
+#endif /* __KERNEL__*/
+
+#endif /* _PW_DEFINES_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_ioctl.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_ioctl.h
new file mode 100644
index 0000000..cbf5499
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_ioctl.h
@@ -0,0 +1,167 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+/*
+ * Description: file containing IOCTL info.
+ */
+
+#ifndef _PW_IOCTL_H_
+#define _PW_IOCTL_H_ 1
+
+#if PW_KERNEL_MODULE
+#include <linux/ioctl.h>
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+#include <linux/compat.h>
+#endif /* COMPAT && x64*/
+#else
+#include <sys/ioctl.h>
+#endif
+
+/*
+ * The APWR-specific IOCTL magic
+ * number -- used to ensure IOCTLs
+ * are delivered to the correct
+ * driver.
+ */
+/* #define APWR_IOCTL_MAGIC_NUM 0xdead*/
+#define APWR_IOCTL_MAGIC_NUM 100
+
+/*
+ * The name of the device file
+ */
+/* #define DEVICE_FILE_NAME "/dev/pw_driver_char_dev"*/
+#define PW_DEVICE_FILE_NAME "/dev/apwr_driver_char_dev"
+#define PW_DEVICE_NAME "apwr_driver_char_dev"
+
+/*
+ * Data structs that the IOCTLs will need.
+ */
+#include "pw_msg.h"
+#include "pw_structs.h"
+/* #include "pw_defines.h"*/
+
+/*
+ * The actual IOCTL commands.
+ *
+ * From the kernel documentation:
+ * "_IOR" ==> Read IOCTL
+ * "_IOW" ==> Write IOCTL
+ * "_IOWR" ==> Read/Write IOCTL
+ *
+ * Where "Read" and "Write" are from the user's perspective
+ * (similar to the file "read" and "write" calls).
+ */
+#define PW_IOCTL_CONFIG _IOW(APWR_IOCTL_MAGIC_NUM, 1, struct PWCollector_ioctl_arg *)
+#if DO_COUNT_DROPPED_SAMPLES
+#define PW_IOCTL_CMD _IOWR(APWR_IOCTL_MAGIC_NUM, 2, struct PWCollector_ioctl_arg *)
+#else
+#define PW_IOCTL_CMD _IOW(APWR_IOCTL_MAGIC_NUM, 2, struct PWCollector_ioctl_arg *)
+#endif /* DO_COUNT_DROPPED_SAMPLES*/
+#define PW_IOCTL_STATUS _IOR(APWR_IOCTL_MAGIC_NUM, 3, struct PWCollector_ioctl_arg *)
+#define PW_IOCTL_SAMPLE _IOR(APWR_IOCTL_MAGIC_NUM, 4, struct PWCollector_ioctl_arg *)
+#define PW_IOCTL_CHECK_PLATFORM _IOR(APWR_IOCTL_MAGIC_NUM, 5, struct PWCollector_ioctl_arg *)
+#define PW_IOCTL_VERSION _IOR(APWR_IOCTL_MAGIC_NUM, 6, struct PWCollector_version_info *)
+#define PW_IOCTL_MICRO_PATCH _IOR(APWR_IOCTL_MAGIC_NUM, 7, struct PWCollector_micro_patch_info *)
+#define PW_IOCTL_IRQ_MAPPINGS _IOR(APWR_IOCTL_MAGIC_NUM, 8, struct PWCollector_irq_mapping_block *)
+#define PW_IOCTL_PROC_MAPPINGS _IOR(APWR_IOCTL_MAGIC_NUM, 9, struct PWCollector_PROC_mapping_block *)
+#define PW_IOCTL_TURBO_THRESHOLD _IOR(APWR_IOCTL_MAGIC_NUM, 10, struct PWCollector_turbo_threshold *)
+#define PW_IOCTL_AVAILABLE_FREQUENCIES _IOR(APWR_IOCTL_MAGIC_NUM, 11, struct PWCollector_available_frequencies *)
+#define PW_IOCTL_COLLECTION_TIME _IOW(APWR_IOCTL_MAGIC_NUM, 12, unsigned long *)
+#define PW_IOCTL_MMAP_SIZE _IOW(APWR_IOCTL_MAGIC_NUM, 13, unsigned long *)
+#define PW_IOCTL_BUFFER_SIZE _IOW(APWR_IOCTL_MAGIC_NUM, 14, unsigned long *)
+#define PW_IOCTL_DO_D_NC_READ _IOR(APWR_IOCTL_MAGIC_NUM, 15, unsigned long *)
+#define PW_IOCTL_FSB_FREQ _IOR(APWR_IOCTL_MAGIC_NUM, 16, unsigned long *)
+#define PW_IOCTL_MSR_ADDRS _IOW(APWR_IOCTL_MAGIC_NUM, 17, struct PWCollector_ioctl_arg *)
+#define PW_IOCTL_FREQ_RATIOS _IOR(APWR_IOCTL_MAGIC_NUM, 18, unsigned long *)
+#define PW_IOCTL_PLATFORM_RES_CONFIG _IOW(APWR_IOCTL_MAGIC_NUM, 19, struct PWCollector_ioctl_arg *)
+
+/*
+ * 32b-compatible version of the above
+ * IOCTL numbers. Required ONLY for
+ * 32b compatibility on 64b systems,
+ * and ONLY by the driver.
+ */
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+#define PW_IOCTL_CONFIG32 _IOW(APWR_IOCTL_MAGIC_NUM, 1, compat_uptr_t)
+#if DO_COUNT_DROPPED_SAMPLES
+#define PW_IOCTL_CMD32 _IOWR(APWR_IOCTL_MAGIC_NUM, 2, compat_uptr_t)
+#else
+#define PW_IOCTL_CMD32 _IOW(APWR_IOCTL_MAGIC_NUM, 2, compat_uptr_t)
+#endif /* DO_COUNT_DROPPED_SAMPLES*/
+#define PW_IOCTL_STATUS32 _IOR(APWR_IOCTL_MAGIC_NUM, 3, compat_uptr_t)
+#define PW_IOCTL_SAMPLE32 _IOR(APWR_IOCTL_MAGIC_NUM, 4, compat_uptr_t)
+#define PW_IOCTL_CHECK_PLATFORM32 _IOR(APWR_IOCTL_MAGIC_NUM, 5, compat_uptr_t)
+#define PW_IOCTL_VERSION32 _IOR(APWR_IOCTL_MAGIC_NUM, 6, compat_uptr_t)
+#define PW_IOCTL_MICRO_PATCH32 _IOR(APWR_IOCTL_MAGIC_NUM, 7, compat_uptr_t)
+#define PW_IOCTL_IRQ_MAPPINGS32 _IOR(APWR_IOCTL_MAGIC_NUM, 8, compat_uptr_t)
+#define PW_IOCTL_PROC_MAPPINGS32 _IOR(APWR_IOCTL_MAGIC_NUM, 9, compat_uptr_t)
+#define PW_IOCTL_TURBO_THRESHOLD32 _IOR(APWR_IOCTL_MAGIC_NUM, 10, compat_uptr_t)
+#define PW_IOCTL_AVAILABLE_FREQUENCIES32 _IOR(APWR_IOCTL_MAGIC_NUM, 11, compat_uptr_t)
+#define PW_IOCTL_COLLECTION_TIME32 _IOW(APWR_IOCTL_MAGIC_NUM, 12, compat_uptr_t)
+#define PW_IOCTL_MMAP_SIZE32 _IOW(APWR_IOCTL_MAGIC_NUM, 13, compat_uptr_t)
+#define PW_IOCTL_BUFFER_SIZE32 _IOW(APWR_IOCTL_MAGIC_NUM, 14, compat_uptr_t)
+#define PW_IOCTL_DO_D_NC_READ32 _IOR(APWR_IOCTL_MAGIC_NUM, 15, compat_uptr_t)
+#define PW_IOCTL_FSB_FREQ32 _IOR(APWR_IOCTL_MAGIC_NUM, 16, compat_uptr_t)
+#define PW_IOCTL_MSR_ADDRS32 _IOW(APWR_IOCTL_MAGIC_NUM, 17, compat_uptr_t)
+#define PW_IOCTL_FREQ_RATIOS32 _IOR(APWR_IOCTL_MAGIC_NUM, 18, compat_uptr_t)
+#define PW_IOCTL_PLATFORM_RES_CONFIG32 _IOW(APWR_IOCTL_MAGIC_NUM, 19, compat_uptr_t)
+#endif /* defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)*/
+
+#endif /* _PW_IOCTL_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_lock_defs.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_lock_defs.h
new file mode 100644
index 0000000..ba1d2c4
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_lock_defs.h
@@ -0,0 +1,93 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+/*
+ * Description: file containing locking routines
+ * used by the power driver.
+ */
+
+#ifndef _PW_LOCK_H_
+#define _PW_LOCK_H_ 1
+
+#define LOCK(l) {				\
+    unsigned long _tmp_l_flags;			\
+    spin_lock_irqsave(&(l), _tmp_l_flags);
+
+#define UNLOCK(l)				\
+    spin_unlock_irqrestore(&(l), _tmp_l_flags); \
+    }
+
+#define READ_LOCK(l) {				\
+    unsigned long _tmp_l_flags;			\
+    read_lock_irqsave(&(l), _tmp_l_flags);
+
+#define READ_UNLOCK(l)				\
+    read_unlock_irqrestore(&(l), _tmp_l_flags);	\
+    }
+
+#define WRITE_LOCK(l) {				\
+    unsigned long _tmp_l_flags;			\
+    write_lock_irqsave(&(l), _tmp_l_flags);
+
+#define WRITE_UNLOCK(l)					\
+    write_unlock_irqrestore(&(l), _tmp_l_flags);	\
+    }
+
+#endif /* _PW_LOCK_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_matrix.c b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_matrix.c
new file mode 100644
index 0000000..4662843
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_matrix.c
@@ -0,0 +1,2886 @@
+/* ***********************************************************************************************
+
+   This file is provided under a dual BSD/GPLv2 license.  When using or
+   redistributing this file, you may do so under either license.
+
+   GPL LICENSE SUMMARY
+
+   Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of version 2 of the GNU General Public License as
+   published by the Free Software Foundation.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+   The full GNU General Public License is included in this distribution
+   in the file called LICENSE.GPL.
+
+   Contact Information:
+   SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+   BSD LICENSE
+
+   Copyright(c) 2013 Intel Corporation. All rights reserved.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+   * Neither the name of Intel Corporation nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+   ***********************************************************************************************
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#ifdef CONFIG_X86_WANT_INTEL_MID
+#include <asm/intel-mid.h>
+#endif
+#ifdef CONFIG_RPMSG_IPC
+#include <asm/intel_mid_rpmsg.h>
+#endif /* CONFIG_RPMSG_IPC*/
+#ifdef CONFIG_INTEL_SCU_IPC
+#include <asm/intel_scu_pmic.h>	/* Needed for 3.4 kernel port */
+#endif /* CONFIG_INTEL_SCU_IPC*/
+#include <linux/kdev_t.h>
+#include <asm/paravirt.h>
+#include <linux/sfi.h>		/* To retrieve SCU F/W version*/
+
+#include "pw_msg.h"
+#include "pw_structs.h"
+#include "pw_output_buffer.h"
+#include "matrix.h"
+
+/* #define NAME "matrix"*/
+#define DRV_NAME "socwatch"
+/*#define DRIVER_VERSION "1.0"*/
+
+#define MT_SUCCESS 0
+#define MT_ERROR 1
+
+#define MCR_WRITE_OPCODE    0x11
+#define BIT_POS_OPCODE      24
+/*
+ * Should we be doing 'direct' PCI reads and writes?
+ * '1' ==> YES, call "pci_{read,write}_config_dword()" directly
+ * '0' ==> NO, Use the "intel_mid_msgbus_{read32,write32}_raw()" API (defined in 'intel_mid_pcihelpers.c')
+ */
+#define DO_DIRECT_PCI_READ_WRITE 0
+#ifndef CONFIG_X86_WANT_INTEL_MID
+    /*
+     * 'intel_mid_pcihelpers.h' is probably not present -- force
+     * direct PCI calls in this case.
+     */
+#undef DO_DIRECT_PCI_READ_WRITE
+#define DO_DIRECT_PCI_READ_WRITE  1
+#endif
+#if !DO_DIRECT_PCI_READ_WRITE
+#include <asm/intel_mid_pcihelpers.h>
+#endif
+
+#define PW_NUM_SOC_COUNTERS 9
+extern void SOCPERF_Read_Data(void *data_buffer);
+
+static int matrix_major_number;
+static bool instantiated;
+static bool mem_alloc_status;
+static u8 *ptr_lut_ops = NULL;
+static unsigned long io_pm_status_reg;
+static unsigned long io_pm_lower_status;
+static unsigned long io_pm_upper_status;
+static unsigned int io_base_pwr_address;
+static dev_t matrix_dev;
+static struct cdev *matrix_cdev;
+static struct class *matrix_class;
+static struct timeval matrix_time;
+static struct device *matrix_device;
+static struct lookup_table *ptr_lut;
+static struct mtx_size_info lut_info;
+
+extern u16 pw_scu_fw_major_minor;	/* defined in 'apwr_driver.c'*/
+
+static int mt_free_memory(void);
+
+#define PRINT_LUT_LENGTHS(which, what) do { \
+    printk(KERN_INFO "GU: ptr_lut has %s_%s_length = %lu\n", #which, #what, ptr_lut->which##_##what##_##length); \
+} while(0)
+
+#define PRINT_LUT_WBS(which, what) do { \
+    printk(KERN_INFO "GU: ptr_lut has %s_%s_wb = %lu\n", #which, #what, ptr_lut->which##_##what##_##wb); \
+} while(0)
+
+#define TOTAL_ONE_SHOT_LENGTH(type) ({unsigned long __length = 0; \
+        __length += ptr_lut->msr_##type##_length * sizeof(struct mtx_msr); \
+        __length += ptr_lut->mem_##type##_length * sizeof(struct memory_map); \
+        __length += ptr_lut->pci_ops_##type##_length * sizeof(struct mtx_pci_ops); \
+        __length += ptr_lut->cfg_db_##type##_length * sizeof(unsigned long); \
+        __length += ptr_lut->soc_perf_##type##_length * sizeof(struct mtx_soc_perf); \
+        __length;})
+
+#define TOTAL_ONE_SHOT_LEN(type) ({unsigned long __length = 0; \
+        __length += ptr_lut->msr_##type##_wb * sizeof(struct mt_msr_buffer); \
+        __length += ptr_lut->mem_##type##_wb * sizeof(u32); \
+        __length += ptr_lut->pci_ops_##type##_wb * sizeof(u32); \
+        __length += ptr_lut->cfg_db_##type##_wb * sizeof(u32); \
+        /*__length += ptr_lut->soc_perf_##type##_wb * sizeof(struct mt_soc_perf_buffer); */\
+        /* GU: Ring-3 now sets 'soc_perf_poll_wb' == MAX_soc_perf_VALUES */ \
+        __length += ptr_lut->soc_perf_##type##_wb * sizeof(u64); \
+        __length;})
+
+static pw_mt_msg_t *mt_msg_init_buff = NULL, *mt_msg_poll_buff =
+    NULL, *mt_msg_term_buff = NULL;
+
+static u32 mt_platform_pci_read32(u32 address);
+static void mt_platform_pci_write32(unsigned long address, unsigned long data);
+
+/**
+ * Matrix Driver works in such a way that only one thread
+ * and one instance of driver can occur at a time.
+ * At the time of opening the driver file, driver checks driver
+ * status whether its already instantiated or not.. if it is, it
+ * will not allow to open new instance..
+ */
+
+#define MATRIX_GET_TIME_STAMP(time_stamp) \
+    do { \
+        do_gettimeofday(&matrix_time); \
+        time_stamp = \
+        (((u64)matrix_time.tv_sec * 1000000) \
+         + (u64)matrix_time.tv_usec); \
+    } while (0)
+
+#define MATRIX_GET_TSC(tsc) rdtscll(tsc)
+
+#define MATRIX_INCREMENT_MEMORY(cu, cl, buffer, type, lut) \
+    do { \
+        if (lut) { \
+            buffer##_info.init_##type##_size = \
+            sizeof(cu cl) * ptr_lut->type##_##init_length; \
+            buffer##_info.term_##type##_size = \
+            sizeof(cu cl) * ptr_lut->type##_##term_length; \
+            buffer##_info.poll_##type##_size = \
+            sizeof(cu cl) * ptr_lut->type##_##poll_length; \
+            lut_info.total_mem_bytes_req += \
+            buffer##_info.init_##type##_size + \
+            buffer##_info.term_##type##_size + \
+            buffer##_info.poll_##type##_size; \
+        } \
+    } while (0)
+
+#define MATRIX_IO_REMAP_MEMORY(state) \
+    do { \
+        unsigned long count; \
+        for (count = 0; \
+                count < ptr_lut->mem_##state##_length; count++) { \
+            if (ptr_lut->mmap_##state[count].ctrl_addr) { \
+                ptr_lut->mmap_##state[count].ctrl_remap_address = \
+                ioremap_nocache(ptr_lut-> \
+                        mmap_##state[count].ctrl_addr, \
+                        sizeof(unsigned long)); \
+            } else { \
+                ptr_lut->mmap_##state[count].ctrl_remap_address = NULL; \
+            } \
+            if (ptr_lut->mmap_##state[count].data_addr) { \
+                ptr_lut->mmap_##state[count].data_remap_address = \
+                ioremap_nocache(ptr_lut-> \
+                        mmap_##state[count].data_addr, \
+                        (ptr_lut-> \
+                         mmap_##state[count].data_size) \
+                        * sizeof(unsigned long));\
+            }  else { \
+                ptr_lut->mmap_##state[count].data_remap_address = NULL; \
+            }  \
+        } \
+    } while (0)
+
+#define MATRIX_IOUNMAP_MEMORY(state) \
+    do { \
+        unsigned long count; \
+        for (count = 0; \
+                count < ptr_lut->mem_##state##_length; count++) { \
+            if (ptr_lut->mmap_##state[count].ctrl_remap_address) { \
+                iounmap(ptr_lut->mmap_##state[count]. \
+                        ctrl_remap_address); \
+                ptr_lut->mmap_##state[count]. \
+                ctrl_remap_address = NULL; \
+            } \
+            if (ptr_lut->mmap_##state[count].data_remap_address) { \
+                iounmap(ptr_lut->mmap_##state[count]. \
+                        data_remap_address); \
+                ptr_lut->mmap_##state[count]. \
+                data_remap_address = NULL; \
+            } \
+        } \
+    } while (0)
+
+#define MATRIX_BOOK_MARK_LUT(state, type, struct_init, structure, member, mem, label) \
+    do { \
+        if (lut_info.state##_##type##_size) { \
+            if (copy_from_user \
+                    (&ptr_lut_ops[offset], ptr_lut->member##_##state, \
+                     lut_info.state##_##type##_size) > 0) { \
+                dev_dbg(matrix_device, \
+                        "file : %s ,function : %s ,line %i\n", \
+                        __FILE__, __func__, __LINE__); \
+                goto label; \
+            } \
+            ptr_lut->member##_##state =  \
+            (struct_init structure*)&ptr_lut_ops[offset]; \
+            offset += lut_info.state##_##type##_size; \
+            if (mem) \
+            MATRIX_IO_REMAP_MEMORY(state); \
+        } else \
+        ptr_lut->member##_##state = NULL; \
+    } while (0)
+
+#define ALLOW_MATRIX_MSR_READ_WRITE 1
+#if ALLOW_MATRIX_MSR_READ_WRITE
+#define MATRIX_RDMSR_ON_CPU(cpu, addr, low, high) ({int __tmp = rdmsr_on_cpu((cpu), (addr), (low), (high)); __tmp;})
+#define MATRIX_WRMSR_ON_CPU(cpu, addr, low, high) ({int __tmp = wrmsr_on_cpu((cpu), (addr), (low), (high)); __tmp;})
+#else
+#define MATRIX_RDMSR_ON_CPU(cpu, addr, low, high) ({int __tmp = 0; *(low) = 0; *(high) = 0; __tmp;})
+#define MATRIX_WRMSR_ON_CPU(cpu, addr, low, high) ({int __tmp = 0; __tmp;})
+#endif /* ALLOW_MATRIX_MSR_READ*/
+
+/*
+ * Function declarations (incomplete).
+ */
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+static long mt_device_compat_ioctl(struct file *file, unsigned int ioctl_num,
+				   unsigned long ioctl_param);
+static long mt_device_compat_init_ioctl_i(struct file *file,
+					  unsigned int ioctl_num,
+					  unsigned long ioctl_param);
+static long mt_device_compat_msr_ioctl_i(struct file *file,
+					 unsigned int ioctl_num,
+					 unsigned long ioctl_param);
+static long mt_device_compat_pci_config_ioctl_i(struct file *file,
+						unsigned int ioctl_num,
+						unsigned long ioctl_param);
+static long mt_device_compat_config_db_ioctl_i(struct file *file,
+					       unsigned int ioctl_num,
+					       unsigned long ioctl_param);
+static int mt_ioctl_mtx_msr_compat_i(struct mtx_msr_container __user *
+				     remote_args,
+				     struct mtx_msr_container32 __user *
+				     remote_args32);
+static int mt_ioctl_pci_config_compat_i(struct pci_config __user * remote_args,
+					struct pci_config32 __user *
+					remote_args32);
+static long mt_get_scu_fw_version_compat_i(u16 __user * remote_args32);
+static int mt_copy_mtx_msr_info_i(struct mtx_msr *msr,
+				  const struct mtx_msr32 __user * msr32,
+				  u32 length);
+static int mt_copy_mmap_info_i(struct memory_map *mem,
+			       const struct memory_map32 __user * mem32,
+			       unsigned long length);
+static int mt_copy_pci_info_i(struct mtx_pci_ops *pci,
+			      const struct mtx_pci_ops32 __user * pci32,
+			      unsigned long length);
+static int mt_copy_cfg_db_info_i(unsigned long *cfg, const u32 __user * cfg32,
+				 unsigned long length);
+static int mt_copy_soc_perf_info_i(struct mtx_soc_perf *soc_perf,
+				   const struct mtx_soc_perf32 __user *
+				   soc_perf32, unsigned long length);
+#endif
+/*
+ * MT_MSG functions.
+ */
+static void mt_free_msg_memory(void)
+{
+	if (mt_msg_init_buff) {
+		vfree(mt_msg_init_buff);
+	}
+	if (mt_msg_poll_buff) {
+		vfree(mt_msg_poll_buff);
+	}
+	if (mt_msg_term_buff) {
+		vfree(mt_msg_term_buff);
+	}
+
+	mt_msg_init_buff = 0x0;
+	mt_msg_poll_buff = 0x0;
+	mt_msg_term_buff = 0x0;
+
+	/* printk(KERN_INFO "OK, freed matrix MSG temp buffers!\n");*/
+};
+
+static int mt_init_msg_memory(void)
+{
+	unsigned int init_len = TOTAL_ONE_SHOT_LEN(init);
+	unsigned int poll_len = TOTAL_ONE_SHOT_LEN(poll);
+	unsigned int term_len = TOTAL_ONE_SHOT_LEN(term);
+
+	/* printk(KERN_INFO "sizeof mt_xchange = %lu, sizeof mt_msr_buffer = %lu, header_size = %lu\n", sizeof(struct mt_xchange_buffer), sizeof(struct mt_msr_buffer), PW_MT_MSG_HEADER_SIZE());*/
+
+	mt_msg_init_buff =
+	    (pw_mt_msg_t *) vmalloc(PW_MT_MSG_HEADER_SIZE() +
+				    sizeof(struct mt_xchange_buffer) +
+				    init_len);
+	if (!mt_msg_init_buff) {
+		mt_free_msg_memory();
+		return -MT_ERROR;
+	}
+	memset(mt_msg_init_buff, 0,
+	       PW_MT_MSG_HEADER_SIZE() + sizeof(struct mt_xchange_buffer) +
+	       init_len);
+	mt_msg_init_buff->data_type = (u16) PW_MT_MSG_INIT;
+	mt_msg_init_buff->data_len =
+	    (u16) (sizeof(struct mt_xchange_buffer) + init_len);
+	{
+		int __dst_idx = 0;
+		char *__buff_ops = (char *)&mt_msg_init_buff->p_data;
+		struct mt_xchange_buffer *__buff =
+		    (struct mt_xchange_buffer *)&__buff_ops[__dst_idx];
+		__dst_idx += sizeof(*__buff);
+		__buff->msr_length = ptr_lut->msr_init_wb;	/* printk(KERN_INFO "GU: set INIT msr_length = %lu\n", __buff->msr_length);*/
+		__buff->mem_length = ptr_lut->mem_init_wb;
+		__buff->pci_ops_length = ptr_lut->pci_ops_init_wb;
+		__buff->cfg_db_length = ptr_lut->cfg_db_init_wb;
+		__buff->soc_perf_length = ptr_lut->soc_perf_init_wb;
+
+		if (__buff->msr_length) {
+			__buff->ptr_msr_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx +=
+			    sizeof(struct mt_msr_buffer) * __buff->msr_length;
+		}
+		if (__buff->mem_length) {
+			__buff->ptr_mem_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->mem_length;
+		}
+		if (__buff->pci_ops_length) {
+			__buff->ptr_pci_ops_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->pci_ops_length;
+		}
+		if (__buff->cfg_db_length) {
+			__buff->ptr_cfg_db_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->cfg_db_length;
+		}
+		if (__buff->soc_perf_length) {
+			__buff->ptr_soc_perf_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+		}
+	}
+
+	mt_msg_poll_buff =
+	    (pw_mt_msg_t *) vmalloc(PW_MT_MSG_HEADER_SIZE() +
+				    sizeof(struct mt_xchange_buffer) +
+				    poll_len);
+	if (!mt_msg_poll_buff) {
+		mt_free_msg_memory();
+		return -MT_ERROR;
+	}
+	memset(mt_msg_poll_buff, 0,
+	       PW_MT_MSG_HEADER_SIZE() + sizeof(struct mt_xchange_buffer) +
+	       poll_len);
+	mt_msg_poll_buff->data_type = (u16) PW_MT_MSG_POLL;
+	mt_msg_poll_buff->data_len =
+	    (u16) (sizeof(struct mt_xchange_buffer) + poll_len);
+	{
+		int __dst_idx = 0;
+		char *__buff_ops = (char *)&mt_msg_poll_buff->p_data;
+		struct mt_xchange_buffer *__buff =
+		    (struct mt_xchange_buffer *)&__buff_ops[__dst_idx];
+		__dst_idx += sizeof(*__buff);
+		__buff->msr_length = ptr_lut->msr_poll_wb;	/* printk(KERN_INFO "GU: set poll msr_length = %lu\n", __buff->msr_length);*/
+		__buff->mem_length = ptr_lut->mem_poll_wb;
+		__buff->pci_ops_length = ptr_lut->pci_ops_poll_wb;
+		__buff->cfg_db_length = ptr_lut->cfg_db_poll_wb;
+		__buff->soc_perf_length = ptr_lut->soc_perf_poll_wb;
+
+		if (__buff->msr_length) {
+			__buff->ptr_msr_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx +=
+			    sizeof(struct mt_msr_buffer) * __buff->msr_length;
+		}
+		if (__buff->mem_length) {
+			__buff->ptr_mem_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->mem_length;
+		}
+		if (__buff->pci_ops_length) {
+			__buff->ptr_pci_ops_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->pci_ops_length;
+		}
+		if (__buff->cfg_db_length) {
+			__buff->ptr_cfg_db_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->cfg_db_length;
+		}
+		if (__buff->soc_perf_length) {
+			__buff->ptr_soc_perf_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+		}
+	}
+
+	mt_msg_term_buff =
+	    (pw_mt_msg_t *) vmalloc(PW_MT_MSG_HEADER_SIZE() +
+				    sizeof(struct mt_xchange_buffer) +
+				    term_len);
+	if (!mt_msg_term_buff) {
+		mt_free_msg_memory();
+		return -MT_ERROR;
+	}
+	memset(mt_msg_term_buff, 0,
+	       PW_MT_MSG_HEADER_SIZE() + sizeof(struct mt_xchange_buffer) +
+	       term_len);
+	mt_msg_term_buff->data_type = (u16) PW_MT_MSG_TERM;
+	mt_msg_term_buff->data_len =
+	    (u16) (sizeof(struct mt_xchange_buffer) + term_len);
+	{
+		int __dst_idx = 0;
+		char *__buff_ops = (char *)&mt_msg_term_buff->p_data;
+		struct mt_xchange_buffer *__buff =
+		    (struct mt_xchange_buffer *)&__buff_ops[__dst_idx];
+		__dst_idx += sizeof(*__buff);
+		__buff->msr_length = ptr_lut->msr_term_wb;	/* printk(KERN_INFO "GU: set term msr_length = %lu\n", __buff->msr_length);*/
+		__buff->mem_length = ptr_lut->mem_term_wb;
+		__buff->pci_ops_length = ptr_lut->pci_ops_term_wb;
+		__buff->cfg_db_length = ptr_lut->cfg_db_term_wb;
+		__buff->soc_perf_length = ptr_lut->soc_perf_term_wb;
+
+		if (__buff->msr_length) {
+			__buff->ptr_msr_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx +=
+			    sizeof(struct mt_msr_buffer) * __buff->msr_length;
+		}
+		if (__buff->mem_length) {
+			__buff->ptr_mem_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->mem_length;
+		}
+		if (__buff->pci_ops_length) {
+			__buff->ptr_pci_ops_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->pci_ops_length;
+		}
+		if (__buff->cfg_db_length) {
+			__buff->ptr_cfg_db_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+			__dst_idx += sizeof(u32) * __buff->cfg_db_length;
+		}
+		if (__buff->soc_perf_length) {
+			__buff->ptr_soc_perf_buff =
+			    (u64) (unsigned long)&__buff_ops[__dst_idx];
+		}
+	}
+	return MT_SUCCESS;
+};
+
+static int mt_msg_scan_msr(struct mt_xchange_buffer *xbuff,
+			   const struct mtx_msr *msrs,
+			   unsigned long max_msr_loop)
+{
+	unsigned long lut_loop = 0, msr_loop = 0;
+	for (lut_loop = 0; lut_loop < max_msr_loop; ++lut_loop) {
+		unsigned int cpu;
+		u32 *lo_rd, *high_rd, lo_wr, high_wr;
+		u32 msr_no;
+		struct mt_msr_buffer *msr_buff =
+		    (struct mt_msr_buffer *)(unsigned long)xbuff->ptr_msr_buff;
+
+		cpu = (unsigned int)msrs[lut_loop].n_cpu;
+		msr_no = msrs[lut_loop].ecx_address;
+		/*
+		   lo_rd = (u32 *)&xbuff->ptr_msr_buff[msr_loop].eax_LSB;
+		   high_rd = (u32 *)&xbuff->ptr_msr_buff[msr_loop].edx_MSB;
+		   lo_wr = xbuff->ptr_msr_buff[msr_loop].eax_LSB;
+		   high_wr = xbuff->ptr_msr_buff[msr_loop].edx_MSB;
+		 */
+		lo_rd = (u32 *) & msr_buff[msr_loop].eax_LSB;
+		high_rd = (u32 *) & msr_buff[msr_loop].edx_MSB;
+		lo_wr = msr_buff[msr_loop].eax_LSB;
+		high_wr = msr_buff[msr_loop].edx_MSB;
+
+		switch (msrs[lut_loop].operation) {
+		case READ_OP:
+			MATRIX_RDMSR_ON_CPU(cpu, msr_no, lo_rd, high_rd);
+			++msr_loop;
+			/* printk(KERN_INFO "GU: read MSR addr 0x%lx on cpu %d in INIT/TERM MT_MSG scan! Val = %llu\n", msr_no, cpu, ((u64)*high_rd << 32 | (u64)*lo_rd));*/
+			break;
+		case WRITE_OP:
+			MATRIX_WRMSR_ON_CPU(cpu, msr_no, lo_wr, high_wr);
+			break;
+		case SET_BITS_OP:
+			{
+				u32 eax_LSB, edx_MSB;
+				MATRIX_RDMSR_ON_CPU(cpu, msr_no, &eax_LSB,
+						    &edx_MSB);
+				MATRIX_WRMSR_ON_CPU(cpu, msr_no,
+						    (eax_LSB | lo_wr),
+						    (edx_MSB | high_wr));
+			}
+			break;
+		case RESET_BITS_OP:
+			{
+				u32 eax_LSB, edx_MSB;
+				MATRIX_RDMSR_ON_CPU(cpu, msr_no, &eax_LSB,
+						    &edx_MSB);
+				MATRIX_WRMSR_ON_CPU(cpu, msr_no,
+						    (eax_LSB & ~(lo_wr)),
+						    (edx_MSB & ~(high_wr)));
+			}
+			break;
+		default:
+			dev_dbg(matrix_device, "Error in MSR_OP value..\n");
+			return -MT_ERROR;
+		}
+	}
+	return MT_SUCCESS;
+};
+
+#if DO_ANDROID
+
+#ifdef CONFIG_RPMSG_IPC
+#define MATRIX_SCAN_MMAP_DO_IPC(cmd, sub_cmd) rpmsg_send_generic_simple_command(cmd, sub_cmd)
+#else
+#define MATRIX_SCAN_MMAP_DO_IPC(cmd, sub_cmd) (-ENODEV)
+#endif /* CONFIG_RPMSG_IPC*/
+
+static int mt_msg_scan_mmap(struct mt_xchange_buffer *xbuff,
+			    const struct memory_map *mmap,
+			    unsigned long max_mem_loop,
+			    unsigned long max_mem_lut_loop)
+{
+	unsigned long mem_loop = 0;
+	unsigned long scu_sub_cmd = 0;
+	unsigned long scu_cmd = 0;
+	unsigned long lut_loop = 0;
+
+	for (lut_loop = 0; lut_loop < max_mem_lut_loop; ++lut_loop) {
+		/* If ctrl_addr != NULL, we do scu IPC command
+		 * else it is a case of mmio read and data_remap
+		 * _address should point to the mmio address from which
+		 * we need to read
+		 */
+		/* printk(KERN_INFO "lut_loop = %lu, ctrl_addr = %lu, data_addr = %lu, data_size = %lu\n", lut_loop, mmap[lut_loop].ctrl_addr, mmap[lut_loop].data_addr, mmap[lut_loop].data_size);*/
+		if (mmap[lut_loop].ctrl_addr) {
+			scu_cmd = mmap[lut_loop].ctrl_data & 0xFF;
+			scu_sub_cmd = (mmap[lut_loop].ctrl_data >> 12) & 0xF;
+			if (MATRIX_SCAN_MMAP_DO_IPC(scu_cmd, scu_sub_cmd)) {
+				dev_dbg(matrix_device,
+					"Unable to get SCU data...\n");
+				return -MT_ERROR;
+			}
+		}
+		if (mmap[lut_loop].data_size != 0) {
+			memcpy(&((u32 *) (unsigned long)xbuff->ptr_mem_buff)
+			       [mem_loop], mmap[lut_loop].data_remap_address,
+			       mmap[lut_loop].data_size * sizeof(u32));
+			mem_loop += mmap[lut_loop].data_size;
+			if (mem_loop > max_mem_loop) {
+				dev_dbg(matrix_device,
+					"A(%04d) [0x%40lu]of [0x%40lu]\n",
+					__LINE__, mem_loop, max_mem_loop);
+				return -MT_ERROR;
+			}
+		}
+	}
+	return MT_SUCCESS;
+};
+#else /* DO_ANDROID*/
+static int mt_msg_scan_mmap(struct mt_xchange_buffer *xbuff,
+			    const struct memory_map *mmap,
+			    unsigned long max_mem_loop,
+			    unsigned long max_mem_lut_loop)
+{
+	return MT_SUCCESS;
+};
+#endif /* DO_ANDROID*/
+
+static int mt_msg_scan(struct mt_xchange_buffer *xbuff,
+		       const struct mtx_msr *msrs,
+		       const struct memory_map *mmap,
+		       const unsigned long *cfg_db, unsigned long max_msr_loop,
+		       unsigned long max_mem_loop,
+		       unsigned long max_mem_lut_loop,
+		       unsigned long max_cfg_db_loop)
+{
+	unsigned long lut_loop = 0;
+	/* printk(KERN_INFO "MT_MSG_SCAN: msrs = %p, mmap = %p, cfg_db = %p\n", msrs, mmap, cfg_db);*/
+	if (msrs && mt_msg_scan_msr(xbuff, msrs, max_msr_loop)) {
+		printk(KERN_INFO "ERROR reading MT_MSG MSRs!\n");
+		return -MT_ERROR;
+	}
+	if (mmap
+	    && mt_msg_scan_mmap(xbuff, mmap, max_mem_loop, max_mem_lut_loop)) {
+		printk(KERN_INFO "ERROR reading MT_MSG MMAPs!\n");
+		return -MT_ERROR;
+	}
+	for (lut_loop = 0; lut_loop < max_cfg_db_loop; ++lut_loop) {
+		((u32 *) (unsigned long)xbuff->ptr_cfg_db_buff)[lut_loop] =
+		    mt_platform_pci_read32(cfg_db[lut_loop]);
+	}
+	/* TODO pci_ops?*/
+	return MT_SUCCESS;
+};
+
+static int mt_produce_mt_msg(const pw_mt_msg_t * mt_msg, u64 tsc)
+{
+	PWCollector_msg_t msg;
+
+	if (!mt_msg) {
+		printk(KERN_INFO "ERROR: trying to produce a NULL MT_MSG?!\n");
+		return -MT_ERROR;
+	}
+
+	msg.cpuidx = 0;		/* TODO: set this to 'pw_max_num_cpus'???*/
+	msg.tsc = tsc;
+	msg.data_type = MATRIX_MSG;
+	msg.data_len = mt_msg->data_len + PW_MT_MSG_HEADER_SIZE();
+	msg.p_data = (u64) (unsigned long)mt_msg;
+
+	/* printk(KERN_INFO "PRODUCING mt_msg with TSC = %llu (len = %u, %u)\n", tsc, mt_msg->data_len, msg.data_len);*/
+
+	return pw_produce_generic_msg_on_cpu(pw_max_num_cpus, &msg,
+					     true
+					     /* wakeup sleeping readers, if required */
+					     );
+};
+
+static int mt_msg_init_scan(void)
+{
+	struct mt_xchange_buffer *xbuff =
+	    (struct mt_xchange_buffer *)&mt_msg_init_buff->p_data;
+	u64 tsc;
+
+	if (!xbuff) {
+		printk(KERN_INFO
+		       "ERROR: trying an INIT scan without allocating space?!\n");
+		return -MT_ERROR;
+	}
+
+	MATRIX_GET_TIME_STAMP(mt_msg_init_buff->timestamp);
+	rdtscll(tsc);
+
+	if (mt_msg_scan
+	    (xbuff, ptr_lut->msrs_init, ptr_lut->mmap_init,
+	     ptr_lut->cfg_db_init, ptr_lut->msr_init_length, xbuff->mem_length,
+	     ptr_lut->mem_init_length, ptr_lut->cfg_db_init_length)) {
+		printk(KERN_INFO "ERROR doing an MT_INIT scan!\n");
+		return -MT_ERROR;
+	}
+
+	if (mt_produce_mt_msg(mt_msg_init_buff, tsc)) {
+		printk(KERN_INFO "ERROR producing an INIT MT_MSG!\n");
+		return -MT_ERROR;
+	}
+
+	return MT_SUCCESS;
+};
+
+static int mt_msg_term_scan(void)
+{
+	struct mt_xchange_buffer *xbuff =
+	    (struct mt_xchange_buffer *)&mt_msg_term_buff->p_data;
+	u64 tsc;
+
+	if (!xbuff) {
+		printk(KERN_INFO
+		       "ERROR: trying an TERM scan without allocating space?!\n");
+		return -MT_ERROR;
+	}
+
+	MATRIX_GET_TIME_STAMP(mt_msg_term_buff->timestamp);
+	rdtscll(tsc);
+
+	if (mt_msg_scan
+	    (xbuff, ptr_lut->msrs_term, ptr_lut->mmap_term,
+	     ptr_lut->cfg_db_term, ptr_lut->msr_term_length, xbuff->mem_length,
+	     ptr_lut->mem_term_length, ptr_lut->cfg_db_term_length)) {
+		printk(KERN_INFO "ERROR doing an MT_TERM scan!\n");
+		return -MT_ERROR;
+	}
+
+	if (mt_produce_mt_msg(mt_msg_term_buff, tsc)) {
+		printk(KERN_INFO "ERROR producing a TERM MT_MSG!\n");
+		return -MT_ERROR;
+	}
+
+	return MT_SUCCESS;
+};
+
+/**
+ * poll_scan - function that is called at each iteration of the poll.
+ * at each poll observations are made and stored in kernel buffer.
+ * @poll_loop : specifies the current iteration of polling
+ */
+static int mt_msg_poll_scan(unsigned long poll_loop)
+{
+	unsigned long msr_loop = 0;
+	unsigned long mem_loop = 0;
+	unsigned long lut_loop;
+	unsigned long max_msr_loop;
+	unsigned long max_mem_loop;
+	unsigned long msr_base_addr;
+	unsigned long mem_base_addr;
+	unsigned long max_msr_read;
+	unsigned long max_cfg_db_loop;
+	unsigned long cfg_db_base_addr;
+	/* unsigned long delta_time;*/
+
+	u64 tsc;
+
+	struct mt_xchange_buffer *xbuff =
+	    (struct mt_xchange_buffer *)&mt_msg_poll_buff->p_data;
+
+	if (ptr_lut == NULL || xbuff == NULL) {
+		printk(KERN_INFO
+		       "ERROR: trying a POLL scan without allocating space?!\n");
+		goto MT_MSG_POLL_ERROR;
+	}
+
+	MATRIX_GET_TIME_STAMP(mt_msg_poll_buff->timestamp);
+	rdtscll(tsc);
+
+	max_msr_loop = ptr_lut->msr_poll_length;
+	max_msr_read = ptr_lut->msr_poll_wb;
+	max_mem_loop = xbuff->mem_length;
+	max_cfg_db_loop = ptr_lut->cfg_db_poll_length;
+	msr_base_addr = 0;	/* (poll_loop * max_msr_read);*/
+	mem_base_addr = 0;	/* (poll_loop * max_mem_loop);*/
+	cfg_db_base_addr = 0;	/* (poll_loop * max_cfg_db_loop);*/
+
+	if (ptr_lut->msrs_poll) {
+		for (lut_loop = 0; lut_loop < max_msr_loop; lut_loop++) {
+			if (ptr_lut->msrs_poll[lut_loop].operation == READ_OP) {
+				u32 *__lsb =
+				    &(((struct mt_msr_buffer *)(unsigned long)
+				       xbuff->ptr_msr_buff)[msr_base_addr +
+							    msr_loop].eax_LSB);
+				u32 *__msb =
+				    &(((struct mt_msr_buffer *)(unsigned long)
+				       xbuff->ptr_msr_buff)[msr_base_addr +
+							    msr_loop].edx_MSB);
+				MATRIX_RDMSR_ON_CPU(ptr_lut->
+						    msrs_poll[lut_loop].n_cpu,
+						    ptr_lut->
+						    msrs_poll[lut_loop].
+						    ecx_address, __lsb, __msb);
+				msr_loop++;
+			} else if (ptr_lut->msrs_poll[lut_loop].operation ==
+				   WRITE_OP) {
+				MATRIX_WRMSR_ON_CPU(ptr_lut->
+						    msrs_poll[lut_loop].n_cpu,
+						    ptr_lut->
+						    msrs_poll[lut_loop].
+						    ecx_address,
+						    ptr_lut->
+						    msrs_poll[lut_loop].eax_LSB,
+						    ptr_lut->
+						    msrs_poll[lut_loop].
+						    edx_MSB);
+			} else {
+				dev_dbg(matrix_device,
+					"Error in MSR_OP value..\n");
+				goto MT_MSG_POLL_ERROR;
+			}
+		}
+	}
+#if DO_ANDROID
+	if (ptr_lut->mmap_poll) {
+		for (lut_loop = 0; lut_loop < max_mem_loop; lut_loop++) {
+			/*
+			 * If ctrl_remap_adr = NULL, then the interface does
+			 * mmio read
+			 */
+			if (ptr_lut->mmap_poll[lut_loop].ctrl_remap_address)
+				writel(ptr_lut->mmap_poll[lut_loop].ctrl_data,
+				       ptr_lut->mmap_poll[lut_loop].
+				       ctrl_remap_address);
+			if (ptr_lut->mmap_poll[lut_loop].data_size != 0) {
+				memcpy(&
+				       ((u32 *) (unsigned long)xbuff->
+					ptr_mem_buff)[mem_base_addr + mem_loop],
+				       ptr_lut->mmap_poll[lut_loop].
+				       data_remap_address,
+				       ptr_lut->mmap_poll[lut_loop].data_size *
+				       sizeof(u32));
+				mem_loop +=
+				    ptr_lut->mmap_poll[lut_loop].data_size;
+				if (mem_loop > max_mem_loop) {
+					dev_dbg(matrix_device,
+						"A(%04d) [0x%40lu]of [0x%40lu]\n",
+						__LINE__, mem_loop,
+						max_mem_loop);
+					goto MT_MSG_POLL_ERROR;
+				}
+			}
+		}
+	}
+
+	/* Get the status of power islands in the North Complex */
+	io_pm_lower_status = inl(io_pm_status_reg + PWR_STS_NORTH_CMPLX_LOWER);
+	io_pm_upper_status =
+	    inl(io_base_pwr_address + PWR_STS_NORTH_CMPLX_UPPER);
+	memcpy(&((u32 *) (unsigned long)xbuff->ptr_pci_ops_buff)[0],
+	       &io_pm_lower_status, sizeof(u32));
+	memcpy(&((u32 *) (unsigned long)xbuff->ptr_pci_ops_buff)[1],
+	       &io_pm_upper_status, sizeof(u32));
+
+	/* SCU IO */
+#ifdef CONFIG_INTEL_SCU_IPC
+	if (0 != ptr_lut->scu_poll.length) {
+		int status;
+		unsigned long offset = 0;	/* (ptr_lut->scu_poll.length * poll_loop);*/
+		for (lut_loop = 0; lut_loop < ptr_lut->scu_poll.length;
+		     lut_loop++) {
+			status =
+			    intel_scu_ipc_ioread8(ptr_lut->scu_poll.
+						  address[lut_loop],
+						  &ptr_lut->scu_poll.
+						  drv_data[offset + lut_loop]);
+			if (status == 0) {
+				dev_dbg(matrix_device,
+					"IPC failed for reg: %lu addr: %c ..\n",
+					ptr_lut->scu_poll.address[lut_loop],
+					ptr_lut->scu_poll.drv_data[offset +
+								   lut_loop]);
+				goto MT_MSG_POLL_ERROR;
+			}
+		}
+	}
+#endif /* CONFIG_INTEL_SCU_IPC*/
+	cfg_db_base_addr = 0;	/* (poll_loop * max_cfg_db_loop);*/
+	for (lut_loop = 0; lut_loop < max_cfg_db_loop; lut_loop++) {
+		((u32 *) (unsigned long)xbuff->
+		 ptr_cfg_db_buff)[cfg_db_base_addr + lut_loop] =
+mt_platform_pci_read32(ptr_lut->cfg_db_poll[lut_loop]);
+		/* printk(KERN_INFO "DEBUG: cfg_db_poll val[%lu] (cfg_db address 0x%lx) = %u\n", cfg_db_base_addr+lut_loop, ptr_lut->cfg_db_poll[lut_loop], ((u32 *)(unsigned long)xbuff->ptr_cfg_db_buff)[cfg_db_base_addr + lut_loop]);*/
+	}
+#endif /* DO_ANDROID*/
+
+	/* Get the SOCPERF counter values */
+	if (NULL != ptr_lut->soc_perf_poll) {
+		if (ptr_lut->soc_perf_poll[0].operation == READ_OP) {
+			/*int i = 0;*/
+			u64 __soc_perf_buffer[PW_NUM_SOC_COUNTERS];
+
+			memset(__soc_perf_buffer, 0, sizeof(__soc_perf_buffer));
+
+			SOCPERF_Read_Data(__soc_perf_buffer);
+
+			memcpy(&
+			       ((struct soc_perf_buffer *)(unsigned long)xbuff->
+				ptr_soc_perf_buff)[0], __soc_perf_buffer,
+			       sizeof(__soc_perf_buffer));
+		} else {
+			dev_dbg(matrix_device,
+				"SOCPERF operation is NOT read!\n");
+		}
+	} else {
+		dev_dbg(matrix_device, "SOCPERF poll is NULL!\n");
+	}
+
+	if (mt_produce_mt_msg(mt_msg_poll_buff, tsc)) {
+		printk(KERN_INFO "ERROR producing a POLL MT_MSG!\n");
+		goto MT_MSG_POLL_ERROR;
+	}
+	/* printk(KERN_INFO "OK: POLL MT_MSG scan was SUCCESSFUL!\n");*/
+	return MT_SUCCESS;
+
+MT_MSG_POLL_ERROR:
+	printk(KERN_INFO "ERROR doing a POLL MT_MSG scan!\n");
+	mt_free_memory();
+	return -MT_ERROR;
+};
+
+#define MATRIX_VMALLOC(ptr, size, label) \
+    do { \
+        if (size > 0) { \
+            ptr = vmalloc(size); \
+            if (ptr == NULL) { \
+                dev_dbg(matrix_device, "file : %s line %i\n", \
+                        __FILE__, __LINE__); \
+                goto label; \
+            } \
+        } \
+    } while (0)
+
+static int matrix_open(struct inode *in, struct file *filp)
+{
+	if (!try_module_get(THIS_MODULE))
+		return -ENODEV;
+	if (instantiated) {
+		module_put(THIS_MODULE);
+		return -EBUSY;
+	} else {
+		instantiated = true;
+		return 0;
+	}
+}
+
+/**
+ * platform_pci_read32 - for reading PCI space through config registers
+ * of the platform.
+ * @address : an address in the pci space
+ */
+static u32 mt_platform_pci_read32(u32 address)
+{
+	u32 read_value = 0;
+#if DO_DIRECT_PCI_READ_WRITE
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+	if (!pci_root)
+		return 0;	/* Application will verify the data */
+	pci_write_config_dword(pci_root, MTX_PCI_MSG_CTRL_REG, address);
+	pci_read_config_dword(pci_root, MTX_PCI_MSG_DATA_REG, &read_value);
+#else /* !DO_DIRECT_PCI_READ_WRITE*/
+	read_value = intel_mid_msgbus_read32_raw(address);
+#endif /* if DO_DIRECT_PCI_READ_WRITE*/
+	return read_value;
+}
+
+/**
+ * platform_pci_write32 - for writing into PCI space through config registers
+ * of the platform.
+ * @address : an address in the pci space
+ * @data : data that has to wrriten
+ */
+static void mt_platform_pci_write32(unsigned long address, unsigned long data)
+{
+#if DO_DIRECT_PCI_READ_WRITE
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+	if (pci_root) {
+		pci_write_config_dword(pci_root, MTX_PCI_MSG_DATA_REG, data);
+		pci_write_config_dword(pci_root, MTX_PCI_MSG_CTRL_REG, address);
+	}
+#else /* !DO_DIRECT_PCI_READ_WRITE*/
+	intel_mid_msgbus_write32_raw(address, data);
+#endif /* if DO_DIRECT_PCI_READ_WRITE*/
+}
+
+/**
+ * calculate_memory_requirements - determine the amount of memory required based * on data passed in from the user space
+ */
+static void mt_calculate_memory_requirements(void)
+{
+	lut_info.total_mem_bytes_req = 0;
+
+	/* Find out memory required for Lookup table */
+	MATRIX_INCREMENT_MEMORY(struct, mtx_msr, lut, msr, 1);
+	MATRIX_INCREMENT_MEMORY(struct, memory_map, lut, mem, 1);
+	MATRIX_INCREMENT_MEMORY(struct, mtx_pci_ops, lut, pci_ops, 1);
+	MATRIX_INCREMENT_MEMORY(unsigned, long, lut, cfg_db, 1);
+	/* MATRIX_INCREMENT_MEMORY(unsigned, int, lut, cfg_db, 1);*/
+	MATRIX_INCREMENT_MEMORY(struct, mtx_soc_perf, lut, soc_perf, 1);
+	lut_info.poll_scu_drv_size =
+	    ptr_lut->scu_poll.length * ptr_lut->scu_poll_length;
+	lut_info.total_mem_bytes_req += lut_info.poll_scu_drv_size;
+
+	/* printk(KERN_INFO "GU: total LUT_INFO size = %u\n", lut_info.total_mem_bytes_req);*/
+}
+
+/**
+ * bookmark_lookup_table - bookmark memory locations of structures within the
+ * chunk of memory allocated earlier
+ */
+static int mt_bookmark_lookup_table(void)
+{
+	unsigned long offset = 0;
+
+	/* msr part of the lookup table */
+	MATRIX_BOOK_MARK_LUT(init, msr, struct, mtx_msr, msrs, 0, COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(poll, msr, struct, mtx_msr, msrs, 0, COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(term, msr, struct, mtx_msr, msrs, 0, COPY_FAIL);
+
+	/* mem part of the lookup table */
+	MATRIX_BOOK_MARK_LUT(init, mem, struct, memory_map, mmap, 1, COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(poll, mem, struct, memory_map, mmap, 1, COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(term, mem, struct, memory_map, mmap, 1, COPY_FAIL);
+
+	/* pci part of the lookup table */
+	MATRIX_BOOK_MARK_LUT(init, pci_ops, struct, mtx_pci_ops, pci_ops, 0,
+			     COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(poll, pci_ops, struct, mtx_pci_ops, pci_ops, 0,
+			     COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(term, pci_ops, struct, mtx_pci_ops, pci_ops, 0,
+			     COPY_FAIL);
+
+	/* config_db part of the lookup table */
+	MATRIX_BOOK_MARK_LUT(init, cfg_db, unsigned, long, cfg_db, 0,
+			     COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(poll, cfg_db, unsigned, long, cfg_db, 0,
+			     COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(term, cfg_db, unsigned, long, cfg_db, 0,
+			     COPY_FAIL);
+
+	/* scu part of the lookup table */
+	ptr_lut->scu_poll.drv_data = (unsigned char *)&ptr_lut_ops[offset];
+
+	/* soc_perf part of the lookup table */
+	MATRIX_BOOK_MARK_LUT(init, soc_perf, struct, mtx_soc_perf, soc_perf, 0,
+			     COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(poll, soc_perf, struct, mtx_soc_perf, soc_perf, 0,
+			     COPY_FAIL);
+	MATRIX_BOOK_MARK_LUT(term, soc_perf, struct, mtx_soc_perf, soc_perf, 0,
+			     COPY_FAIL);
+	return 0;
+COPY_FAIL:
+	return -EFAULT;
+}
+
+/**
+ * free_memory - frees up all of the memory obtained
+ */
+static int mt_free_memory(void)
+{
+	/* Freeing IOREMAP Memory */
+	if (ptr_lut) {
+		MATRIX_IOUNMAP_MEMORY(init);
+		MATRIX_IOUNMAP_MEMORY(term);
+		MATRIX_IOUNMAP_MEMORY(poll);
+		vfree(ptr_lut);
+		ptr_lut = NULL;
+	}
+	/*Freeing LUT Memory */
+	if (ptr_lut_ops) {
+		vfree(ptr_lut_ops);
+		ptr_lut_ops = NULL;
+	}
+
+	mem_alloc_status = false;
+
+	mt_free_msg_memory();
+
+	return 0;
+}
+
+/**
+ * initialize_memory - initializes all of the required memory as requested
+ * @ptr_data : gets the address of the lookup table that has all the info
+ */
+static int mt_initialize_memory(unsigned long ptr_data)
+{
+
+	if (mem_alloc_status) {
+		dev_dbg(matrix_device,
+			"Initialization of Memory is already done..\n");
+		/* printk(KERN_INFO "Initialization of Memory is already done..\n");*/
+		return -EPERM;
+	}
+
+	/* get information about lookup table from user space */
+	MATRIX_VMALLOC(ptr_lut, sizeof(struct lookup_table), ERROR);
+
+	if (copy_from_user(ptr_lut,
+			   (struct lookup_table *)ptr_data,
+			   sizeof(struct lookup_table)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		/* printk(KERN_INFO "file : %s ,function : %s ,line %i\n", __FILE__, __func__, __LINE__);*/
+		goto ERROR;
+	}
+
+	{
+		/* unsigned long init_length = TOTAL_ONE_SHOT_LENGTH(init);*/
+		/* unsigned long poll_length = TOTAL_ONE_SHOT_LENGTH(poll);*/
+		/* unsigned long term_length = TOTAL_ONE_SHOT_LENGTH(term);*/
+
+		/* printk(KERN_INFO "GU: init_length = %lu, poll_length = %lu, term_length = %lu\n", init_length, poll_length, term_length);*/
+		/* printk(KERN_INFO "GU: sizeof(xchange) = %u, init_len = %lu, poll_len = %lu, term_len = %lu\n", sizeof(struct mt_xchange_buffer), TOTAL_ONE_SHOT_LEN(init), TOTAL_ONE_SHOT_LEN(poll), TOTAL_ONE_SHOT_LEN(term));*/
+		/* printk(KERN_INFO "GU: # records = %u\n", ptr_lut->records);*/
+
+		if (mt_init_msg_memory()) {
+			printk(KERN_INFO
+			       "ERROR allocating memory for matrix messages!\n");
+			goto ERROR;
+		}
+	}
+
+	mt_calculate_memory_requirements();
+
+	/* allocate once and for all memory required for lookup table */
+	MATRIX_VMALLOC(ptr_lut_ops, lut_info.total_mem_bytes_req, ERROR);
+
+	if (mt_bookmark_lookup_table() < 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		/* printk(KERN_INFO "file : %s ,function : %s ,line %i\n", __FILE__, __func__, __LINE__);*/
+		goto ERROR;
+	}
+
+	io_pm_status_reg =
+	    (mt_platform_pci_read32(ptr_lut->pci_ops_poll->port) &
+	     PWR_MGMT_BASE_ADDR_MASK);
+	io_base_pwr_address =
+	    (mt_platform_pci_read32(ptr_lut->pci_ops_poll->port_island) &
+	     PWR_MGMT_BASE_ADDR_MASK);
+	mem_alloc_status = true;
+
+	return 0;
+ERROR:
+	printk(KERN_INFO "Memory Initialization Error!\n");
+	mt_free_memory();
+	return -EFAULT;
+}
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+#define MT_MATCH_IOCTL(num, pred) ( (num) == (pred) || (num) == (pred##32) )
+#else
+#define MT_MATCH_IOCTL(num, pred) ( (num) == (pred) )
+#endif /* COMPAT && x64*/
+
+/**
+ * initial_scan - function that is run once before polling at regular intervals
+ * sets the msr's and other variables to their default values
+ */
+static int mt_msg_data_scan(unsigned long ioctl_request)
+{
+	/* printk(KERN_INFO "MT_MSG_DATA_SCAN\n");*/
+	if (ptr_lut == NULL) {
+		printk(KERN_INFO "FATAL: NULL lookup table?!\n");
+		goto ERROR;
+	}
+	if (MT_MATCH_IOCTL(ioctl_request, IOCTL_INIT_SCAN)) {
+		if (mt_msg_init_scan()) {
+			printk(KERN_INFO "ERROR doing an MT_MSG init scan!\n");
+			goto ERROR;
+		} else {
+			/* printk(KERN_INFO "OK: INIT MT_MSG scan was SUCCESSFUL!\n");*/
+		}
+	} else if (MT_MATCH_IOCTL(ioctl_request, IOCTL_TERM_SCAN)) {
+		if (mt_msg_term_scan()) {
+			printk(KERN_INFO "ERROR doing an MT_MSG term scan!\n");
+			goto ERROR;
+		} else {
+			/* printk(KERN_INFO "OK: TERM MT_MSG scan was SUCCESSFUL!\n");*/
+		}
+	} else {
+		goto ERROR;
+	}
+	return MT_SUCCESS;
+ERROR:
+	mt_free_memory();
+	return -MT_ERROR;
+};
+
+/**
+ * transfer_data - transfers all the recorded info to user space for profiling
+ * @ptr_data : gets the address of the user buffer that has to be populated
+ */
+static int mt_transfer_data(unsigned long ptr_data)
+{
+	return 0;		/* SUCCESS*/
+}
+
+/**
+ * ioctl_mtx_msr - mtx_msr_container refers to structure designed to hold data related
+ * to MSR( Model Specific Registers).
+ * @ptr_data : gets the address of the user buffer that has to be populated
+ */
+static int IOCTL_mtx_msr(unsigned long ptr_data, unsigned int request)
+{
+	struct mtx_msr_container mtx_msr_drv;
+	unsigned long *buffer = NULL;
+	int err = 0;
+
+	if ((struct mtx_msr_container *)ptr_data == NULL) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	if (copy_from_user
+	    (&mtx_msr_drv, (struct mtx_msr_container *)ptr_data,
+	     sizeof(mtx_msr_drv)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	if (mtx_msr_drv.length > 0) {
+		MATRIX_VMALLOC(buffer,
+			       sizeof(unsigned long) * mtx_msr_drv.length,
+			       ERROR);
+		if (copy_from_user
+		    (buffer, mtx_msr_drv.buffer,
+		     (sizeof(unsigned long) * mtx_msr_drv.length)) > 0) {
+			dev_dbg(matrix_device,
+				"file : %s ,function : %s ,line %i\n", __FILE__,
+				__func__, __LINE__);
+			goto ERROR;
+		}
+	}
+	switch (mtx_msr_drv.msrType1.operation) {
+	case WRITE_OP:
+		err = MATRIX_WRMSR_ON_CPU(mtx_msr_drv.msrType1.n_cpu,
+					  mtx_msr_drv.msrType1.ecx_address,
+					  mtx_msr_drv.msrType1.eax_LSB,
+					  mtx_msr_drv.msrType1.edx_MSB);
+		break;
+	case READ_OP:
+		err = MATRIX_RDMSR_ON_CPU(mtx_msr_drv.msrType1.n_cpu,
+					  mtx_msr_drv.msrType1.ecx_address,
+					  (u32 *) & mtx_msr_drv.msrType1.
+					  eax_LSB,
+					  (u32 *) & mtx_msr_drv.msrType1.
+					  edx_MSB);
+		break;
+	case ENABLE_OP:
+		wrmsrl(mtx_msr_drv.msrType1.ecx_address,
+		       (unsigned long)&buffer[0]);
+		wrmsr(mtx_msr_drv.msrType1.ebx_value, 0x01, 0x00);
+		vfree(buffer);
+		return 0;
+	default:
+		dev_dbg(matrix_device,
+			"There is a problem in MSR Operation..\n");
+		goto ERROR;
+	}
+	if (err != 0)
+		goto ERROR;
+
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	if (request == IOCTL_MSR32) {
+		struct mtx_msr_container32 __user *__msr32 =
+		    compat_ptr(ptr_data);
+		/* struct mtx_msr_container32 __user *__msr32 = (struct mtx_msr_container32 *)ptr_data;*/
+		u32 data;
+		/* printk(KERN_INFO "SIZE = %u (%u)\n", sizeof(*__msr32), sizeof(__msr32->msrType1));*/
+		data = (u32) mtx_msr_drv.length;
+		if (put_user(data, &__msr32->length)) {
+			goto ERROR;
+		}
+		data = (u32) mtx_msr_drv.msrType1.eax_LSB;
+		if (put_user(data, &__msr32->msrType1.eax_LSB)) {
+			goto ERROR;
+		}
+		/* printk(KERN_INFO "eax_LSB = %u\n", data);*/
+		data = (u32) mtx_msr_drv.msrType1.edx_MSB;
+		if (put_user(data, &__msr32->msrType1.edx_MSB)) {
+			goto ERROR;
+		}
+		/* printk(KERN_INFO "edx_MSB = %u\n", data);*/
+		data = (u32) mtx_msr_drv.msrType1.ecx_address;
+		if (put_user(data, &__msr32->msrType1.ecx_address)) {
+			goto ERROR;
+		}
+		data = (u32) mtx_msr_drv.msrType1.ebx_value;
+		if (put_user(data, &__msr32->msrType1.ebx_value)) {
+			goto ERROR;
+		}
+		data = (u32) mtx_msr_drv.msrType1.n_cpu;
+		if (put_user(data, &__msr32->msrType1.n_cpu)) {
+			goto ERROR;
+		}
+		data = (u32) mtx_msr_drv.msrType1.operation;
+		if (put_user(data, &__msr32->msrType1.operation)) {
+			goto ERROR;
+		}
+		/* printk(KERN_INFO "OK: copied MSR32!\n");*/
+		vfree(buffer);
+		return 0;
+	}
+#endif /* COMPAT && x64*/
+
+	if (copy_to_user
+	    ((struct mtx_msr_container *)ptr_data, &mtx_msr_drv,
+	     sizeof(mtx_msr_drv)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		goto ERROR;
+	}
+	vfree(buffer);
+	return 0;
+ERROR:
+	vfree(buffer);
+	return -EFAULT;
+}
+
+/**
+ * ioctl_sram - memory map refers to a structure designed to hold data related
+ * to SRAM (Shared RAM).
+ * @ptr_data : gets the address of the user buffer that has to be populated
+ */
+static int IOCTL_sram(unsigned long ptr_data)
+{
+#if DO_ANDROID
+	struct memory_map mem_map_drv;
+	char *buffer = NULL;
+	if ((struct memory_map *)ptr_data == NULL) {
+		dev_dbg(matrix_device,
+			"Data Transfer can not be done as user buffer is NULL..\n");
+		return -EFAULT;
+	}
+	if (copy_from_user
+	    (&mem_map_drv,
+	     (struct memory_map *)ptr_data, sizeof(mem_map_drv)) > 0) {
+		dev_dbg(matrix_device, "Transferring data had issues..\n");
+		return -EFAULT;
+	}
+	if (mem_map_drv.ctrl_addr != 0) {
+		void *remap_addr = ioremap_nocache
+		    (mem_map_drv.ctrl_addr, sizeof(unsigned long));
+		if (remap_addr == NULL) {
+			dev_dbg(matrix_device, "IOREMAP has issue..\n");
+			return -ENOMEM;
+		}
+		writel(mem_map_drv.ctrl_data, remap_addr);
+		iounmap(remap_addr);
+	} else {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	MATRIX_VMALLOC(buffer, mem_map_drv.data_size, ERROR);
+	mem_map_drv.data_remap_address =
+	    ioremap_nocache(mem_map_drv.data_addr, mem_map_drv.data_size);
+	if (mem_map_drv.data_remap_address == NULL) {
+		dev_dbg(matrix_device, "IOREMAP has issue..\n");
+		goto ERROR;
+	}
+	memcpy(buffer, mem_map_drv.data_remap_address, mem_map_drv.data_size);
+	if (copy_to_user
+	    (mem_map_drv.ptr_data_usr, buffer, mem_map_drv.data_size) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		iounmap(mem_map_drv.data_remap_address);
+		goto ERROR;
+	}
+	iounmap(mem_map_drv.data_remap_address);
+	vfree(buffer);
+	return 0;
+ERROR:
+	vfree(buffer);
+	return -EFAULT;
+#endif /* DO_ANDROID*/
+	return 0;
+}
+
+/**
+ * read_config - procedure to read the config db registers (very generic)
+ * @ptr_data : gets the address of the user buffer that has to be populated
+ */
+static int mt_read_config(unsigned long *ptr_data)
+{
+#if DO_ANDROID
+	unsigned long buf, data;
+
+	if (copy_from_user(&buf, (u32 *) ptr_data, sizeof(unsigned long)) > 0) {
+		dev_err(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	data = mt_platform_pci_read32(buf);
+	/* Write back to the same user buffer */
+	if (copy_to_user
+	    ((unsigned long *)ptr_data, &data, sizeof(unsigned long)) > 0) {
+		dev_err(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+#endif /* DO_ANDROID*/
+	return 0;
+}
+
+/**
+ * write_config - proceduer to write the config db registers
+ * @ptr_data : user buffer address that contains information like
+ * mcr (port) and mdr (data) used for writing config DB registers.
+ */
+static inline int mt_write_config(unsigned long *ptr_data)
+{
+#if DO_ANDROID
+	unsigned long addr, val;
+	struct mtx_pci_ops pci_data;
+	if (copy_from_user
+	    (&pci_data,
+	     (struct mtx_pci_ops *)ptr_data, sizeof(struct mtx_pci_ops)) > 0) {
+		dev_err(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	addr = pci_data.port | (MCR_WRITE_OPCODE << BIT_POS_OPCODE);
+	val = pci_data.data;
+	mt_platform_pci_write32(addr, val);
+#endif /* DO_ANDROID*/
+	return 0;
+}
+
+/**
+ * read_pci_config - procedure to read the pci configuration space
+ * @ptr_data : gets the pci configuration info like bus, device,
+ * function and the offset in the config space. Also, returns
+ * the read data in "data" field of the structure
+ */
+static inline int mt_read_pci_config(unsigned long *ptr_data)
+{
+	int ret = 0;
+#if DO_ANDROID
+	struct pci_config pci_config_data;
+	struct pci_dev *pdev = NULL;
+	if (copy_from_user
+	    (&pci_config_data,
+	     (struct pci_config *)ptr_data, sizeof(struct pci_config)) > 0) {
+		dev_err(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	pdev = pci_get_bus_and_slot(pci_config_data.bus,
+				    PCI_DEVFN(pci_config_data.device,
+					      pci_config_data.function));
+	if (!pdev) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	ret = pci_read_config_dword(pdev, pci_config_data.offset,
+				    (u32 *) & pci_config_data.data);
+	/* Write back to the same user buffer */
+	if (copy_to_user
+	    ((unsigned long *)ptr_data, &pci_config_data,
+	     sizeof(struct pci_config)) > 0) {
+		dev_err(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+#endif /* DO_ANDROID*/
+exit:
+	return ret;
+}
+
+/**
+ * read_gmch_gen_pur_regs -  use this function to retrieve the complete set of
+ * general purpose gmch registers
+ * @data : gets the address of the user buffer that has to be populated
+ * @read_mask : read_mask applies mask corresponding to the platform
+ */
+static void mt_read_gmch_gen_pur_regs(unsigned long *data, unsigned long *clks,
+				      unsigned long read_mask)
+{
+#if DO_ANDROID
+	if (data && clks) {
+		data[0] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR0_L | read_mask);
+		data[1] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR0_H | read_mask);
+		data[2] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR1_L | read_mask);
+		data[3] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR1_H | read_mask);
+		data[4] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR2_L | read_mask);
+		data[5] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR2_H | read_mask);
+		data[6] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR3_L | read_mask);
+		data[7] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_GP_CTR3_H | read_mask);
+		clks[0] =
+		    mt_platform_pci_read32(MTX_GMCH_PMON_FIXED_CTR0 |
+					   read_mask);
+	}
+#endif /* DO_ANDROID*/
+}
+
+/**
+ * gmch_gen_pur_regs_trigger_enable -  use this function to trigger global flag
+ * that enables/disables gmch counters.
+ * @enable : enable is boolean.
+ * @write_mask : write_mask applies mask corresponding to the platform
+ */
+static void mt_gmch_gen_pur_regs_trigger_enable(bool enable,
+						unsigned long write_mask)
+{
+#if DO_ANDROID
+	if (enable)
+		mt_platform_pci_write32((MTX_GMCH_PMON_GLOBAL_CTRL |
+					 write_mask),
+					MTX_GMCH_PMON_GLOBAL_CTRL_ENABLE);
+	else
+		mt_platform_pci_write32((MTX_GMCH_PMON_GLOBAL_CTRL |
+					 write_mask),
+					MTX_GMCH_PMON_GLOBAL_CTRL_DISABLE);
+#endif /* DO_ANDROID*/
+}
+
+/**
+ * write_mt_gmch_gen_pur_regs -  use this function to write to the complete set of
+ * general purpose gmch registers
+ * @write_mask : write_mask applies mask corresponding to the platform
+ */
+static void mt_write_gmch_gen_pur_regs(unsigned long data,
+				       unsigned long write_mask)
+{
+#if DO_ANDROID
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR0_L | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR0_H | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR1_L | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR1_H | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR2_L | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR2_H | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR3_L | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_GP_CTR3_H | write_mask), data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_FIXED_CTR_CTRL | write_mask),
+				data);
+	mt_platform_pci_write32((MTX_GMCH_PMON_FIXED_CTR_CTRL | write_mask),
+				DATA_ENABLE);
+#endif /* DO_ANDROID*/
+}
+
+/**
+ * mt_reset_gmch_gen_pur_regs -  use this function to reset all of the gmch
+ * peformance counters
+ * @event : event points to the first of the event passed in from the
+ * application space.
+ * @mcr1 : config register 1 for perf event selection
+ * @mcr2 : config register 2 for perf event selection
+ * @mcr3 : config register 3 if ,for perf event selection depends on platform
+ */
+static void mt_reset_gmch_gen_pur_regs(unsigned long
+				       *event, unsigned long
+				       *mcr1, unsigned long
+				       *mcr2, unsigned long
+				       *mcr3, unsigned long write_mask)
+{
+	unsigned long count = 0;
+#if DO_ANDROID
+	if (event == NULL || mcr1 == NULL || mcr2 == NULL || mcr3 == NULL)
+		return;
+
+	/*disable  gmch general purpose counter */
+	mt_gmch_gen_pur_regs_trigger_enable(false, write_mask);
+
+	/*re-initialize gmch general purpose counter */
+	mt_write_gmch_gen_pur_regs(0x00000000, write_mask);
+
+	/*trigger performance counters */
+	for (count = 0; count < 4; count++) {
+		if (mcr1[count])
+			mt_platform_pci_write32(mcr1[count], event[count]);
+		if (mcr2[count])
+			mt_platform_pci_write32(mcr2[count], event[count]);
+		if (mcr3[count])
+			mt_platform_pci_write32(mcr3[count], event[count]);
+	}
+
+	/*enable gmch general purpose counter */
+	mt_gmch_gen_pur_regs_trigger_enable(true, write_mask);
+#endif /* DO_ANDROID*/
+}
+
+/**
+ * ioctl_gmch - gmch_container refers to a struct designed to hold data related
+ * to GMCH( The Greater Memeory Controller Hub, giving access to all Emons
+ * @ptr_data : gets the address of the user buffer that has to be populated
+ */
+static int IOCTL_gmch(unsigned long ioctl_request, unsigned long ptr_data)
+{
+#if DO_ANDROID
+	struct gmch_container gmch_drv;
+	if ((struct gmch_container *)ptr_data == NULL) {
+		dev_dbg(matrix_device,
+			"Data Transfer can not be done as user buffer is NULL..\n");
+		return -EFAULT;
+	}
+	if (copy_from_user
+	    (&gmch_drv,
+	     (struct gmch_container *)ptr_data,
+	     sizeof(struct gmch_container)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	/* read gmch counters */
+	mt_read_gmch_gen_pur_regs(gmch_drv.data, &gmch_drv.core_clks,
+				  gmch_drv.read_mask);
+	MATRIX_GET_TIME_STAMP(gmch_drv.time_stamp);
+
+	/* reset gmch counters */
+	if (ioctl_request == IOCTL_GMCH_RESET) {
+		mt_reset_gmch_gen_pur_regs(gmch_drv.event,
+					   gmch_drv.mcr1,
+					   gmch_drv.mcr2,
+					   gmch_drv.mcr3, gmch_drv.write_mask);
+	}
+	if (copy_to_user
+	    ((struct gmch_container *)ptr_data,
+	     &gmch_drv, sizeof(struct gmch_container)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+#endif /* DO_ANDROID*/
+	return 0;
+}
+
+/*
+ * The following function reads/writes to an MSR with
+ * inputs given by the user. The two primary use cases of
+ * this  function are: a) Request to read IA32_PERF_STATUS MSR from ring3.
+ * b) Debugging from user space. There could be other users of this in the
+ * future.
+ */
+static int mt_operate_on_msr(unsigned long ptr_data)
+{
+	struct mtx_msr data_msr;
+	if (copy_from_user
+	    (&data_msr, (struct mtx_msr *)ptr_data,
+	     sizeof(struct mtx_msr)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	if (data_msr.operation == READ_OP)
+		rdmsr(data_msr.ecx_address, data_msr.eax_LSB, data_msr.edx_MSB);
+	else if (data_msr.operation == WRITE_OP)
+		wrmsr(data_msr.ecx_address, data_msr.eax_LSB, data_msr.edx_MSB);
+	else
+		return -EFAULT;
+	if (copy_to_user((struct mtx_msr *)ptr_data, &data_msr,
+			 sizeof(struct mtx_msr)) > 0) {
+		dev_dbg(matrix_device,
+			"file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static long mt_get_scu_fw_version(u16 __user * remote_data)
+{
+	u16 local_data = pw_scu_fw_major_minor;
+	if (put_user(local_data, remote_data)) {
+		printk(KERN_INFO
+		       "ERROR transfering SCU F/W version to userspace!\n");
+		return -EFAULT;
+	}
+	return 0;		/* SUCCESS*/
+};
+
+static long mt_get_soc_stepping(u32 __user * remote_data)
+{
+#ifdef CONFIG_X86_WANT_INTEL_MID
+	{
+		u32 local_data = intel_mid_soc_stepping();
+		if (put_user(local_data, remote_data)) {
+			printk(KERN_INFO
+			       "ERROR transfering soc stepping number to userspace!\n");
+			return -EFAULT;
+		}
+		return 0;	/* SUCCESS*/
+	}
+#endif /* CONFIG_X86_WANT_INTEL_MID*/
+	return -EFAULT;
+};
+
+static long mt_get_version(u32 __user * remote_data)
+{
+	/*
+	 * Protocol:
+	 * Driver version info is:
+	 * [Internal/External] << 24 | [Major Num] << 16 | [Minor Num] << 8 | [Other Num]
+	 * Internal/External is: 1 ==> INTERNAL, 0 ==> EXTERNAL
+	 */
+	u32 local_data =
+	    (0 /*External */  << 24) | ((u8) PW_DRV_VERSION_MAJOR) << 16 | ((u8)
+									    PW_DRV_VERSION_MINOR)
+	    << 8 | (u8) PW_DRV_VERSION_OTHER;
+	if (put_user(local_data, remote_data)) {
+		printk(KERN_INFO
+		       "ERROR transfering driver version information to userspace!\n");
+		return -EFAULT;
+	}
+	return 0;		/* SUCCESS*/
+};
+
+/*
+ * GU: Use this version of the function for now, for debugging
+ */
+static long matrix_ioctl(struct file
+			 *filp, unsigned int request, unsigned long ptr_data)
+{
+	/* printk(KERN_INFO "Received MATRIX IOCTL: %u\n", request);*/
+	switch (request) {
+	case IOCTL_VERSION_INFO:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_VERSION_INFO32:
+#endif /* COMPAT && x64*/
+		/*
+		 * UNSUPPORTED
+		 * Use 'IOCTL_GET_DRIVER_VERSION/IOCTL_GET_DRIVER_VERSION32' instead.
+		 */
+		printk(KERN_INFO "ERROR: INVALID ioctl num %u received!\n",
+		       request);
+		return -PW_ERROR;
+		break;
+	case IOCTL_INIT_MEMORY:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_INIT_MEMORY32:
+#endif /* COMPAT && x64*/
+		/* printk(KERN_INFO "IOCTL_INIT_MEMORY received!\n");*/
+		return mt_initialize_memory(ptr_data);
+	case IOCTL_FREE_MEMORY:
+		/* printk(KERN_INFO "IOCTL_FREE_MEMORY received!\n");*/
+		return mt_free_memory();
+	case IOCTL_OPERATE_ON_MSR:
+		/* printk(KERN_INFO "IOCTL_OPERATE_ON_MSR received!\n");*/
+		return mt_operate_on_msr(ptr_data);
+	case IOCTL_INIT_SCAN:
+	case IOCTL_TERM_SCAN:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_INIT_SCAN32:
+	case IOCTL_TERM_SCAN32:
+#endif /* COMPAT && x64*/
+		/* printk(KERN_INFO "IOCTL_{INIT,TERM}_SCAN received!\n");*/
+		return mt_msg_data_scan(request);
+	case IOCTL_POLL_SCAN:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_POLL_SCAN32:
+#endif /* COMPAT && x64*/
+		/* printk(KERN_INFO "IOCTL_POLL_SCAN received!\n");*/
+		/* return mt_poll_scan(ptr_data);*/
+		return mt_msg_poll_scan(ptr_data);
+	case IOCTL_COPY_TO_USER:
+		/* printk(KERN_INFO "IOCTL_COPY_TO_USER received!\n");*/
+		return mt_transfer_data(ptr_data);
+		/* MSR based ioctl's */
+	case IOCTL_MSR:
+		/* printk(KERN_INFO "IOCTL_MSR received!\n");*/
+		return IOCTL_mtx_msr(ptr_data, request);
+		/* SRAM based ioctl's */
+	case IOCTL_SRAM:
+		/* printk(KERN_INFO "IOCTL_SRAM received!\n");*/
+		return IOCTL_sram(ptr_data);
+		/* return -1;*/
+		/* GMCH based ioctl's */
+	case IOCTL_GMCH:
+		/* printk(KERN_INFO "IOCTL_GMCH received!\n");*/
+		return IOCTL_gmch(request, ptr_data);
+	case IOCTL_GMCH_RESET:
+		/* printk(KERN_INFO "IOCTL_GMCH_REQUEST received!\n");*/
+		return IOCTL_gmch(request, ptr_data);
+	case IOCTL_READ_CONFIG_DB:
+		/* printk(KERN_INFO "IOCTL_READ_CONFIG_DB received!\n");*/
+		return mt_read_config((unsigned long *)ptr_data);
+		/* return -1;*/
+	case IOCTL_WRITE_CONFIG_DB:
+		/* printk(KERN_INFO "IOCTL_WRITE_CONFIG_DB received!\n");*/
+		return mt_write_config((unsigned long *)ptr_data);
+		/* return -1;*/
+	case IOCTL_READ_PCI_CONFIG:
+		return mt_read_pci_config((unsigned long *)ptr_data);
+	case IOCTL_GET_SOC_STEPPING:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_GET_SOC_STEPPING32:
+#endif /* compat && x64*/
+		if (!ptr_data) {
+			printk(KERN_INFO
+			       "NULL ptr_data in matrix_ioctl IOCTL_GET_SOC_STEPPING\n");
+			return -EFAULT;
+		}
+		return mt_get_soc_stepping((u32 *) ptr_data);
+	case IOCTL_GET_SCU_FW_VERSION:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_GET_SCU_FW_VERSION32:
+#endif /* compat && x64*/
+		if (!ptr_data) {
+			printk(KERN_INFO
+			       "NULL ptr_data in matrix_ioctl IOCTL_GET_SCU_FW_VERSION\n");
+			return -EFAULT;
+		}
+		return mt_get_scu_fw_version((u16 __user *) ptr_data);
+	case IOCTL_GET_DRIVER_VERSION:
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	case IOCTL_GET_DRIVER_VERSION32:
+#endif /* compat && x64*/
+		return mt_get_version((u32 *) ptr_data);
+	default:
+		printk(KERN_INFO "INVALID IOCTL = %u received!\n", request);
+		dev_dbg(matrix_device,
+			"file : %s ,function : %s ,line %i\n", __FILE__,
+			__func__, __LINE__);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int matrix_release(struct inode *in, struct file *filp)
+{
+	if (instantiated) {
+		mt_free_memory();
+		instantiated = false;
+	}
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static const struct file_operations matrix_fops = {
+	.owner = THIS_MODULE,
+	.open = matrix_open,
+	.unlocked_ioctl = matrix_ioctl,
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+	.compat_ioctl = &mt_device_compat_ioctl,
+#endif /* COMPAT && x64*/
+	.release = matrix_release
+};
+
+/*
+ * Compat IOCTL support.
+ */
+#if defined(HAVE_COMPAT_IOCTL) && defined(CONFIG_X86_64)
+
+#if 1
+static int mt_copy_mtx_msr_info_i(struct mtx_msr *local_msr,
+				  const struct mtx_msr32 __user * remote_msr32,
+				  u32 length)
+{
+	int retVal = PW_SUCCESS;
+	unsigned long i = 0;
+	/* u32 data;*/
+
+	for (i = 0; i < length; ++i) {
+		if (get_user(local_msr[i].eax_LSB, &remote_msr32[i].eax_LSB)) {
+			return -EFAULT;
+		}
+		if (get_user(local_msr[i].edx_MSB, &remote_msr32[i].edx_MSB)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_msr[i].ecx_address, &remote_msr32[i].ecx_address)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_msr[i].ebx_value, &remote_msr32[i].ebx_value)) {
+			return -EFAULT;
+		}
+		if (get_user(local_msr[i].n_cpu, &remote_msr32[i].n_cpu)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_msr[i].operation, &remote_msr32[i].operation)) {
+			return -EFAULT;
+		}
+		/* printk(KERN_INFO "DEBUG: addr[%lu] = 0x%lx, cpu = %lu, operation = %lu\n", i, local_msr[i].ecx_address, local_msr[i].n_cpu, local_msr[i].operation);*/
+	}
+	return retVal;
+};
+#endif /* if 0*/
+
+#if 1
+static int mt_copy_mmap_info_i(struct memory_map *local_mem,
+			       const struct memory_map32 __user * remote_mem32,
+			       unsigned long length)
+{
+	unsigned long i = 0;
+	int retVal = PW_SUCCESS;
+	/* u32 data;*/
+
+	for (i = 0; i < length; ++i) {
+		if (get_user
+		    (local_mem[i].ctrl_addr, &remote_mem32[i].ctrl_addr)) {
+			return -EFAULT;
+		}
+		/*
+		 * 'ctrl_remap_address' is never passed in from user space.
+		 * Set to NULL explicitly.
+		 */
+		/*
+		   if (copy_from_user(&local_mem[i].ctrl_remap_address, &remote_remote_mem32[i].ctrl_remap_address, sizeof(remote_remote_mem32[i].ctrl_remap_address))) {
+		   return -EFAULT;
+		   }
+		 */
+		local_mem[i].ctrl_remap_address = NULL;
+		if (get_user
+		    (local_mem[i].ctrl_data, &remote_mem32[i].ctrl_data)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_mem[i].data_addr, &remote_mem32[i].data_addr)) {
+			return -EFAULT;
+		}
+		/*
+		 * 'data_remap_address' is never passed in from user space.
+		 * Set to NULL explicitly.
+		 */
+		local_mem[i].data_remap_address = NULL;
+		/*
+		   if (get_user(local_mem[i].data_remap_address, compat_ptr(remote_mem32[i].data_remap_address))) {
+		   return -EFAULT;
+		   }
+		 */
+		/*
+		 * 'ptr_data_usr' is only used in 'IOCTL_sram', which doesn't utilize
+		 * the lookup table.
+		 * Set to NULL explicitly.
+		 */
+		/*
+		   if (get_user(local_mem[i].ptr_data_usr, &remote_mem32[i].ptr_data_usr)) {
+		   return -EFAULT;
+		   }
+		 */
+		local_mem[i].ptr_data_usr = NULL;
+		if (get_user
+		    (local_mem[i].data_size, &remote_mem32[i].data_size)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_mem[i].operation, &remote_mem32[i].operation)) {
+			return -EFAULT;
+		}
+		/* printk(KERN_INFO "DEBUG: [%lu]: ctrl_addr = %lu, ctrl_data = %lu, data_addr = %lu, data_size = %lu, operation = %lu\n", i, local_mem[i].ctrl_addr, local_mem[i].ctrl_data, local_mem[i].data_addr, local_mem[i].data_size, local_mem[i].operation);*/
+	}
+	return retVal;
+};
+#endif /* if 0*/
+
+#if 1
+static int mt_copy_pci_info_i(struct mtx_pci_ops *local_pci,
+			      const struct mtx_pci_ops32 __user * remote_pci32,
+			      unsigned long length)
+{
+	unsigned long i = 0;
+
+	for (i = 0; i < length; ++i) {
+		if (get_user(local_pci[i].port, &remote_pci32[i].port)) {
+			return -EFAULT;
+		}
+		if (get_user(local_pci[i].data, &remote_pci32[i].data)) {
+			return -EFAULT;
+		}
+		if (get_user(local_pci[i].io_type, &remote_pci32[i].io_type)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_pci[i].port_island, &remote_pci32[i].port_island)) {
+			return -EFAULT;
+		}
+		/* printk(KERN_INFO "DEBUG: pci port = %lu, data = %lu, type = %lu, port_island = %lu\n", local_pci[i].port, local_pci[i].data, local_pci[i].io_type, local_pci[i].port_island);*/
+	}
+	return PW_SUCCESS;
+};
+#endif /* if 0*/
+
+#if 1
+static int mt_copy_cfg_db_info_i(unsigned long *local_cfg,
+				 const u32 __user * remote_cfg32,
+				 unsigned long length)
+{
+	unsigned long i = 0;
+
+	for (i = 0; i < length; ++i) {
+		if (get_user(local_cfg[i], &remote_cfg32[i])) {
+			return -EFAULT;
+		}
+		/* printk(KERN_INFO "DEBUG: local_cfg[%lu] = %lu\n", i, local_cfg[i]);*/
+	}
+	return PW_SUCCESS;
+};
+#endif /* if 0*/
+
+#if 1
+static int mt_copy_soc_perf_info_i(struct mtx_soc_perf *local_soc_perf,
+				   const struct mtx_soc_perf32 __user *
+				   remote_soc_perf32, unsigned long length)
+{
+	unsigned long i = 0;
+
+	for (i = 0; i < length; ++i) {
+		/*
+		 * 'ptr_data_usr' is never used.
+		 * Set to NULL explicitly.
+		 */
+		local_soc_perf[i].ptr_data_usr = NULL;
+		if (get_user
+		    (local_soc_perf[i].data_size,
+		     &remote_soc_perf32[i].data_size)) {
+			return -EFAULT;
+		}
+		if (get_user
+		    (local_soc_perf[i].operation,
+		     &remote_soc_perf32[i].operation)) {
+			return -EFAULT;
+		}
+	}
+	return PW_SUCCESS;
+};
+#endif /* if 0*/
+
+static long mt_device_compat_ioctl(struct file *file, unsigned int ioctl_num,
+				   unsigned long ioctl_param)
+{
+	switch (ioctl_num) {
+	case IOCTL_INIT_MEMORY32:
+		/* printk(KERN_INFO "IOCTL_INIT_MEMORY32\n");*/
+		return mt_device_compat_init_ioctl_i(file, ioctl_num,
+						     ioctl_param);
+	case IOCTL_MSR32:
+		/* printk(KERN_INFO "IOCTL_MSR32\n");*/
+		return mt_device_compat_msr_ioctl_i(file, ioctl_num,
+						    ioctl_param);
+	case IOCTL_READ_CONFIG_DB32:
+		/* printk(KERN_INFO "IOCTL_READ_CONFIG_DB32\n");*/
+		return mt_device_compat_config_db_ioctl_i(file, ioctl_num,
+							  ioctl_param);
+	case IOCTL_READ_PCI_CONFIG32:
+		/* printk(KERN_INFO "IOCTL_READ_PCI_CONFIG32\n");*/
+		return mt_device_compat_pci_config_ioctl_i(file, ioctl_num,
+							   ioctl_param);
+	case IOCTL_GET_SCU_FW_VERSION32:
+		/* printk(KERN_INFO "IOCTL_GET_SCU_FW_VERSION32\n");*/
+		return mt_get_scu_fw_version_compat_i(compat_ptr(ioctl_param));
+	default:
+		/* printk(KERN_INFO "OTHER\n");*/
+		break;
+	}
+	return matrix_ioctl(file, ioctl_num, ioctl_param);
+};
+
+#if 1
+static long mt_device_compat_init_ioctl_i(struct file *file,
+					  unsigned int ioctl_num,
+					  unsigned long ioctl_param)
+{
+	struct lookup_table32 __user *__tab32 = compat_ptr(ioctl_param);
+	/* struct lookup_table __user *__tab = NULL;*/
+	/* u8 *__buffer = NULL;*/
+	/* u32 data = 0;*/
+	struct lookup_table32 __tmp;
+	/* struct lookup_table *ptr_lut = NULL;*/
+	/* struct lookup_table __user *remote_tab = NULL;*/
+	size_t __size = 0;
+	long retVal = PW_SUCCESS;
+
+	/*
+	 * Basic algo:
+	 * 1. Calculate total memory requirement for the 64b structure (including space required for all arrays etc.)
+	 * 2. Allocate entire lookup table chunk in 'compat' space (via 'compat_alloc_user_space()').
+	 * 3. Patch up pointers (individual array pointers in the lookup table need to point into the chunk allocated in 2, above).
+	 * 4. Copy over the "header" (all of the non-pointer fields) from 32b --> 64b
+	 * 5. For each pointer
+	 * a. Manually copy all fields from 32b userspace struct to 64b userspace struct.
+	 */
+
+	if (copy_from_user(&__tmp, __tab32, sizeof(__tmp))) {
+		printk(KERN_INFO "ERROR in length user copy!\n");
+		return -EFAULT;
+	}
+	ptr_lut = vmalloc(sizeof(*ptr_lut));
+	if (!ptr_lut) {
+		printk(KERN_INFO "ERROR allocating ptr_lut\n");
+		return -EFAULT;
+	}
+	memset(ptr_lut, 0, sizeof(*ptr_lut));
+	/*
+	   if (true) {
+	   printk(KERN_INFO "DEBUG: setting pci_ops length to zero!\n");
+	   __tmp.pci_ops_init_length = __tmp.pci_ops_poll_length = __tmp.pci_ops_term_length = 0;
+	   }
+	 */
+	/*
+	 * Step 1.
+	 */
+	{
+		__size +=
+		    (size_t) __tmp.msr_init_length * sizeof(struct mtx_msr);
+		__size +=
+		    (size_t) __tmp.msr_poll_length * sizeof(struct mtx_msr);
+		__size +=
+		    (size_t) __tmp.msr_term_length * sizeof(struct mtx_msr);
+
+		/* printk(KERN_INFO "msr_init_length = %u\n", __tmp.msr_init_length);*/
+		/* printk(KERN_INFO "msr_poll_length = %u\n", __tmp.msr_poll_length);*/
+		/* printk(KERN_INFO "msr_term_length = %u\n", __tmp.msr_term_length);*/
+
+		__size +=
+		    (size_t) __tmp.mem_init_length * sizeof(struct memory_map);
+		__size +=
+		    (size_t) __tmp.mem_poll_length * sizeof(struct memory_map);
+		__size +=
+		    (size_t) __tmp.mem_term_length * sizeof(struct memory_map);
+
+		/* printk(KERN_INFO "mem_init_length = %u\n", __tmp.mem_init_length);*/
+		/* printk(KERN_INFO "mem_poll_length = %u\n", __tmp.mem_poll_length);*/
+		/* printk(KERN_INFO "mem_term_length = %u\n", __tmp.mem_term_length);*/
+
+		__size +=
+		    (size_t) __tmp.pci_ops_init_length *
+		    sizeof(struct mtx_pci_ops);
+		__size +=
+		    (size_t) __tmp.pci_ops_poll_length *
+		    sizeof(struct mtx_pci_ops);
+		__size +=
+		    (size_t) __tmp.pci_ops_term_length *
+		    sizeof(struct mtx_pci_ops);
+
+		/* printk(KERN_INFO "pci_ops_init_length = %u\n", __tmp.pci_ops_init_length);*/
+		/* printk(KERN_INFO "pci_ops_poll_length = %u\n", __tmp.pci_ops_poll_length);*/
+		/* printk(KERN_INFO "pci_ops_term_length = %u\n", __tmp.pci_ops_term_length);*/
+
+		__size +=
+		    (size_t) __tmp.cfg_db_init_length * sizeof(unsigned long);
+		__size +=
+		    (size_t) __tmp.cfg_db_poll_length * sizeof(unsigned long);
+		__size +=
+		    (size_t) __tmp.cfg_db_term_length * sizeof(unsigned long);
+
+		/* printk(KERN_INFO "cfg_db_init_length = %u\n", __tmp.cfg_db_init_length);*/
+		/* printk(KERN_INFO "cfg_db_poll_length = %u\n", __tmp.cfg_db_poll_length);*/
+		/* printk(KERN_INFO "cfg_db_term_length = %u\n", __tmp.cfg_db_term_length);*/
+
+		__size +=
+		    (size_t) __tmp.soc_perf_init_length *
+		    sizeof(struct mtx_soc_perf);
+		__size +=
+		    (size_t) __tmp.soc_perf_poll_length *
+		    sizeof(struct mtx_soc_perf);
+		__size +=
+		    (size_t) __tmp.soc_perf_term_length *
+		    sizeof(struct mtx_soc_perf);
+
+		/* printk(KERN_INFO "soc_perf_init_length = %u\n", __tmp.soc_perf_init_length);*/
+		/* printk(KERN_INFO "soc_perf_poll_length = %u\n", __tmp.soc_perf_poll_length);*/
+		/* printk(KERN_INFO "soc_perf_term_length = %u\n", __tmp.soc_perf_term_length);*/
+
+		/* __size += sizeof(struct lookup_table);*/
+	}
+
+	/* printk(KERN_INFO "SIZE = %lu\n", __size);*/
+	/*
+	 * Step 2.
+	 */
+	{
+		ptr_lut_ops = vmalloc(__size);
+		if (!ptr_lut_ops) {
+			printk(KERN_INFO
+			       "ERROR allocating space for lookup_table\n");
+			goto error;
+		}
+	}
+	/*
+	 * Step 3.
+	 */
+	{
+		int __dst_idx = 0;
+		/* ptr_lut = (struct lookup_table *)&__buffer[__dst_idx]; __dst_idx += sizeof(*ptr_lut);*/
+
+		ptr_lut->msrs_init = (struct mtx_msr *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.msr_init_length * sizeof(*ptr_lut->msrs_init);
+		ptr_lut->mmap_init =
+		    (struct memory_map *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.mem_init_length * sizeof(*ptr_lut->mmap_init);
+		ptr_lut->pci_ops_init =
+		    (struct mtx_pci_ops *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.pci_ops_init_length * sizeof(*ptr_lut->pci_ops_init);
+		ptr_lut->cfg_db_init = (unsigned long *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.cfg_db_init_length * sizeof(*ptr_lut->cfg_db_init);
+		ptr_lut->soc_perf_init =
+		    (struct mtx_soc_perf *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.soc_perf_init_length *
+		    sizeof(*ptr_lut->soc_perf_init);
+
+		ptr_lut->msrs_poll = (struct mtx_msr *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.msr_poll_length * sizeof(*ptr_lut->msrs_poll);
+		ptr_lut->mmap_poll =
+		    (struct memory_map *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.mem_poll_length * sizeof(*ptr_lut->mmap_poll);
+		ptr_lut->pci_ops_poll =
+		    (struct mtx_pci_ops *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.pci_ops_poll_length * sizeof(*ptr_lut->pci_ops_poll);
+		ptr_lut->cfg_db_poll = (unsigned long *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.cfg_db_poll_length * sizeof(*ptr_lut->cfg_db_poll);
+		ptr_lut->soc_perf_poll =
+		    (struct mtx_soc_perf *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.soc_perf_poll_length *
+		    sizeof(*ptr_lut->soc_perf_poll);
+
+		ptr_lut->msrs_term = (struct mtx_msr *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.msr_term_length * sizeof(*ptr_lut->msrs_term);
+		ptr_lut->mmap_term =
+		    (struct memory_map *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.mem_term_length * sizeof(*ptr_lut->mmap_term);
+		ptr_lut->pci_ops_term =
+		    (struct mtx_pci_ops *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.pci_ops_term_length * sizeof(*ptr_lut->pci_ops_term);
+		ptr_lut->cfg_db_term = (unsigned long *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.cfg_db_term_length * sizeof(*ptr_lut->cfg_db_term);
+		ptr_lut->soc_perf_term =
+		    (struct mtx_soc_perf *)&ptr_lut_ops[__dst_idx];
+		__dst_idx +=
+		    __tmp.soc_perf_term_length *
+		    sizeof(*ptr_lut->soc_perf_term);
+	}
+	/*
+	 * Step 4.
+	 */
+	{
+		/*
+		 * INIT
+		 */
+		{
+			ptr_lut->msr_init_length = __tmp.msr_init_length;
+			ptr_lut->msr_init_wb = __tmp.msr_init_wb;
+			ptr_lut->mem_init_length = __tmp.mem_init_length;
+			ptr_lut->mem_init_wb = __tmp.mem_init_wb;
+			ptr_lut->pci_ops_init_length =
+			    __tmp.pci_ops_init_length;
+			ptr_lut->pci_ops_init_wb = __tmp.pci_ops_init_wb;
+			ptr_lut->cfg_db_init_length = __tmp.cfg_db_init_length;
+			ptr_lut->cfg_db_init_wb = __tmp.cfg_db_init_wb;
+			ptr_lut->soc_perf_init_length =
+			    __tmp.soc_perf_init_length;
+			ptr_lut->soc_perf_init_wb = __tmp.soc_perf_init_wb;
+		}
+		/*
+		 * POLL
+		 */
+		{
+			ptr_lut->msr_poll_length = __tmp.msr_poll_length;
+			ptr_lut->msr_poll_wb = __tmp.msr_poll_wb;
+			ptr_lut->mem_poll_length = __tmp.mem_poll_length;
+			ptr_lut->mem_poll_wb = __tmp.mem_poll_wb;
+			ptr_lut->records = __tmp.records;
+			ptr_lut->pci_ops_poll_length =
+			    __tmp.pci_ops_poll_length;
+			ptr_lut->pci_ops_poll_wb = __tmp.pci_ops_poll_wb;
+			ptr_lut->pci_ops_records = __tmp.pci_ops_records;
+			ptr_lut->cfg_db_poll_length = __tmp.cfg_db_poll_length;
+			ptr_lut->cfg_db_poll_wb = __tmp.cfg_db_poll_wb;
+			/*
+			 * TODO
+			 * 'scu_poll'
+			 */
+			{
+				ptr_lut->scu_poll_length = 0;	/* __tmp.scu_poll_length;*/
+				ptr_lut->scu_poll.address = NULL;
+				ptr_lut->scu_poll.usr_data = NULL;
+				ptr_lut->scu_poll.drv_data = NULL;
+				ptr_lut->scu_poll.length = 0;
+			}
+			ptr_lut->soc_perf_poll_length =
+			    __tmp.soc_perf_poll_length;
+			ptr_lut->soc_perf_poll_wb = __tmp.soc_perf_poll_wb;
+			ptr_lut->soc_perf_records = __tmp.soc_perf_records;
+		}
+		/*
+		 * TERM
+		 */
+		{
+			ptr_lut->msr_term_length = __tmp.msr_term_length;
+			ptr_lut->msr_term_wb = __tmp.msr_term_wb;
+			ptr_lut->mem_term_length = __tmp.mem_term_length;
+			ptr_lut->mem_term_wb = __tmp.mem_term_wb;
+			ptr_lut->pci_ops_term_length =
+			    __tmp.pci_ops_term_length;
+			ptr_lut->pci_ops_term_wb = __tmp.pci_ops_term_wb;
+			ptr_lut->cfg_db_term_length = __tmp.cfg_db_term_length;
+			ptr_lut->cfg_db_term_wb = __tmp.cfg_db_term_wb;
+			ptr_lut->soc_perf_term_length =
+			    __tmp.soc_perf_term_length;
+			ptr_lut->soc_perf_term_wb = __tmp.soc_perf_term_wb;
+		}
+	}
+	/*
+	 * Step 5.
+	 */
+	{
+		/*
+		 * INIT
+		 */
+		{
+			if (mt_copy_mtx_msr_info_i
+			    (ptr_lut->msrs_init,
+			     (struct mtx_msr32 __user *)compat_ptr(__tmp.
+								   msrs_init),
+			     __tmp.msr_init_length)) {
+				printk(KERN_INFO "ERROR copying init msrs!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_mmap_info_i
+			    (ptr_lut->mmap_init,
+			     (struct memory_map32 __user *)compat_ptr(__tmp.
+								      mmap_init),
+			     __tmp.mem_init_length)) {
+				printk(KERN_INFO "ERROR copying init mmap!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_pci_info_i
+			    (ptr_lut->pci_ops_init,
+			     (struct mtx_pci_ops32 __user *)compat_ptr(__tmp.
+								       pci_ops_init),
+			     __tmp.pci_ops_init_length)) {
+				printk(KERN_INFO "ERROR copying init pci!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_cfg_db_info_i
+			    (ptr_lut->cfg_db_init,
+			     (u32 __user *) compat_ptr(__tmp.cfg_db_init),
+			     __tmp.cfg_db_init_length)) {
+				printk(KERN_INFO
+				       "ERROR copying init cfg_db!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_soc_perf_info_i
+			    (ptr_lut->soc_perf_init,
+			     (struct mtx_soc_perf32 __user *)compat_ptr(__tmp.
+									soc_perf_init),
+			     __tmp.soc_perf_init_length)) {
+				printk(KERN_INFO
+				       "ERROR copying term soc_perf!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+		}
+		/*
+		 * POLL
+		 */
+		{
+			if (mt_copy_mtx_msr_info_i
+			    (ptr_lut->msrs_poll,
+			     (struct mtx_msr32 __user *)compat_ptr(__tmp.
+								   msrs_poll),
+			     __tmp.msr_poll_length)) {
+				printk(KERN_INFO "ERROR copying poll msrs!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_mmap_info_i
+			    (ptr_lut->mmap_poll,
+			     (struct memory_map32 __user *)compat_ptr(__tmp.
+								      mmap_poll),
+			     __tmp.mem_poll_length)) {
+				printk(KERN_INFO "ERROR copying poll mmap!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_pci_info_i
+			    (ptr_lut->pci_ops_poll,
+			     (struct mtx_pci_ops32 __user *)compat_ptr(__tmp.
+								       pci_ops_poll),
+			     __tmp.pci_ops_poll_length)) {
+				printk(KERN_INFO "ERROR copying poll pci!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_cfg_db_info_i
+			    (ptr_lut->cfg_db_poll,
+			     (u32 __user *) compat_ptr(__tmp.cfg_db_poll),
+			     __tmp.cfg_db_poll_length)) {
+				printk(KERN_INFO
+				       "ERROR copying poll cfg_db!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_soc_perf_info_i
+			    (ptr_lut->soc_perf_poll,
+			     (struct mtx_soc_perf32 __user *)compat_ptr(__tmp.
+									soc_perf_poll),
+			     __tmp.soc_perf_poll_length)) {
+				printk(KERN_INFO
+				       "ERROR copying term soc_perf!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+		}
+		/*
+		 * TERM
+		 */
+		{
+			if (mt_copy_mtx_msr_info_i
+			    (ptr_lut->msrs_term,
+			     (struct mtx_msr32 __user *)compat_ptr(__tmp.
+								   msrs_term),
+			     __tmp.msr_term_length)) {
+				printk(KERN_INFO "ERROR copying term msrs!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_mmap_info_i
+			    (ptr_lut->mmap_term,
+			     (struct memory_map32 __user *)compat_ptr(__tmp.
+								      mmap_term),
+			     __tmp.mem_term_length)) {
+				printk(KERN_INFO "ERROR copying term mmap!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_pci_info_i
+			    (ptr_lut->pci_ops_term,
+			     (struct mtx_pci_ops32 __user *)compat_ptr(__tmp.
+								       pci_ops_term),
+			     __tmp.pci_ops_term_length)) {
+				printk(KERN_INFO "ERROR copying term pci!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_cfg_db_info_i
+			    (ptr_lut->cfg_db_term,
+			     (u32 __user *) compat_ptr(__tmp.cfg_db_term),
+			     __tmp.cfg_db_term_length)) {
+				printk(KERN_INFO
+				       "ERROR copying term cfg_db!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+			if (mt_copy_soc_perf_info_i
+			    (ptr_lut->soc_perf_term,
+			     (struct mtx_soc_perf32 __user *)compat_ptr(__tmp.
+									soc_perf_term),
+			     __tmp.soc_perf_term_length)) {
+				printk(KERN_INFO
+				       "ERROR copying term soc_perf!\n");
+				retVal = -EFAULT;
+				goto error;
+			}
+		}
+	}
+
+	if (mt_init_msg_memory()) {
+		printk(KERN_INFO
+		       "ERROR allocating memory for matrix messages!\n");
+		goto error;
+	}
+	mt_calculate_memory_requirements();
+
+	io_pm_status_reg =
+	    (mt_platform_pci_read32(ptr_lut->pci_ops_poll->port) &
+	     PWR_MGMT_BASE_ADDR_MASK);
+	io_base_pwr_address =
+	    (mt_platform_pci_read32(ptr_lut->pci_ops_poll->port_island) &
+	     PWR_MGMT_BASE_ADDR_MASK);
+	mem_alloc_status = true;
+
+	/*
+	 * io_remap
+	 */
+	{
+		MATRIX_IO_REMAP_MEMORY(init);
+		MATRIX_IO_REMAP_MEMORY(poll);
+		MATRIX_IO_REMAP_MEMORY(term);
+	}
+
+	return retVal;
+
+error:
+	printk(KERN_INFO "Memory Initialization Error!\n");
+	mt_free_memory();
+	return -EFAULT;
+};
+#endif /* if 1*/
+
+#if 1
+static int mt_ioctl_mtx_msr_compat_i(struct mtx_msr_container __user *
+				     remote_args,
+				     struct mtx_msr_container32 __user *
+				     remote_args32)
+{
+	struct mtx_msr_container local_args;
+	struct mtx_msr_container32 local_args32;
+	unsigned long *buffer = NULL;
+	int err = 0;
+
+	if (copy_from_user(&local_args, remote_args, sizeof(local_args)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+	/* printk(KERN_INFO "local.op = %lu, local.address = %lu\n", local_args.msrType1.operation, local_args.msrType1.ecx_address);*/
+
+	if (local_args.length > 0) {
+		MATRIX_VMALLOC(buffer,
+			       sizeof(unsigned long) * local_args.length,
+			       ERROR);
+		if (copy_from_user
+		    (buffer, local_args.buffer,
+		     (sizeof(unsigned long) * local_args.length)) > 0) {
+			dev_dbg(matrix_device,
+				"file : %s ,function : %s ,line %i\n", __FILE__,
+				__func__, __LINE__);
+			goto ERROR;
+		}
+	}
+	switch (local_args.msrType1.operation) {
+	case WRITE_OP:
+		err = MATRIX_WRMSR_ON_CPU(local_args.msrType1.n_cpu,
+					  local_args.msrType1.ecx_address,
+					  local_args.msrType1.eax_LSB,
+					  local_args.msrType1.edx_MSB);
+		break;
+	case READ_OP:
+		err = MATRIX_RDMSR_ON_CPU(local_args.msrType1.n_cpu,
+					  local_args.msrType1.ecx_address,
+					  (u32 *) & local_args.msrType1.eax_LSB,
+					  (u32 *) & local_args.msrType1.
+					  edx_MSB);
+		break;
+	case ENABLE_OP:
+		wrmsrl(local_args.msrType1.ecx_address,
+		       (unsigned long)&buffer[0]);
+		wrmsr(local_args.msrType1.ebx_value, 0x01, 0x00);
+		vfree(buffer);
+		return 0;
+	default:
+		dev_dbg(matrix_device,
+			"There is a problem in MSR Operation..\n");
+		goto ERROR;
+	}
+	if (err != 0)
+		goto ERROR;
+
+	local_args32.buffer = 0x0;	/* MAKE THIS EXPLICIT!*/
+	local_args32.length = local_args.length;
+	local_args32.msrType1.eax_LSB = local_args.msrType1.eax_LSB;
+	local_args32.msrType1.edx_MSB = local_args.msrType1.edx_MSB;
+	local_args32.msrType1.ecx_address = local_args.msrType1.ecx_address;
+	local_args32.msrType1.ebx_value = local_args.msrType1.ebx_value;
+	local_args32.msrType1.n_cpu = local_args.msrType1.n_cpu;
+	local_args32.msrType1.operation = local_args.msrType1.operation;
+
+	if (copy_to_user(remote_args32, &local_args32, sizeof(local_args32))) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		goto ERROR;
+	}
+	/* printk(KERN_INFO "OK: copied MSR32!\n");*/
+	vfree(buffer);
+	return 0;
+ERROR:
+	vfree(buffer);
+	return -EFAULT;
+};
+#endif /* if 0*/
+
+static int mt_ioctl_pci_config_compat_i(struct pci_config __user * remote_args,
+					struct pci_config32 __user *
+					remote_args32)
+{
+	struct pci_config local_args;
+	struct pci_config32 local_args32;
+	int ret = 0;
+
+#if DO_ANDROID
+	struct pci_dev *pdev = NULL;
+
+	if (copy_from_user(&local_args, remote_args, sizeof(local_args)) > 0) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	pdev =
+	    pci_get_bus_and_slot(local_args.bus,
+				 PCI_DEVFN(local_args.device,
+					   local_args.function));
+	if (!pdev) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	ret =
+	    pci_read_config_dword(pdev, local_args.offset,
+				  (u32 *) & local_args.data);
+
+	/* Write back to the same user buffer */
+
+	local_args32.bus = local_args.bus;
+	local_args32.device = local_args.device;
+	local_args32.function = local_args.function;
+	local_args32.offset = local_args.offset;
+	local_args32.data = local_args.data;
+
+	if (copy_to_user(remote_args32, &local_args32, sizeof(local_args32))) {
+		dev_dbg(matrix_device, "file : %s ,function : %s ,line %i\n",
+			__FILE__, __func__, __LINE__);
+		ret = -EFAULT;
+		goto exit;
+	}
+#endif /* DO_ANDROID*/
+exit:
+	return ret;
+
+};
+
+static long mt_get_scu_fw_version_compat_i(u16 __user * remote_data)
+{
+	u16 local_data = pw_scu_fw_major_minor;
+	if (put_user(local_data, remote_data)) {
+		printk(KERN_INFO
+		       "ERROR transfering SCU F/W version to userspace!\n");
+		return -EFAULT;
+	}
+	return 0;		/* SUCCESS*/
+};
+
+static long mt_device_compat_msr_ioctl_i(struct file *file,
+					 unsigned int ioctl_num,
+					 unsigned long ioctl_param)
+{
+	struct mtx_msr_container32 __user *__msr32 = compat_ptr(ioctl_param);
+	struct mtx_msr_container __user *__msr = NULL;
+	struct mtx_msr_container32 __tmp;
+	u32 data = 0;
+
+	if (copy_from_user(&__tmp, __msr32, sizeof(*__msr32))) {
+		printk(KERN_INFO
+		       "ERROR copying in mtx_msr32 struct from userspace\n");
+		return -PW_ERROR;
+	}
+	__msr = compat_alloc_user_space(sizeof(*__msr));
+	/* printk(KERN_INFO "length = %u, __msr = %p\n", __tmp.length, __msr);*/
+	if (__tmp.length == 0 || compat_ptr(__tmp.buffer) == NULL) {
+		if (put_user(__tmp.length, &__msr->length)) {
+			return -PW_ERROR;
+		}
+		if (put_user(compat_ptr(__tmp.buffer), &__msr->buffer)) {
+			return -PW_ERROR;
+		}
+		if (get_user(data, &__msr32->msrType1.eax_LSB)
+		    || put_user(data, &__msr->msrType1.eax_LSB)) {
+			return -PW_ERROR;
+		}
+		if (get_user(data, &__msr32->msrType1.edx_MSB)
+		    || put_user(data, &__msr->msrType1.edx_MSB)) {
+			return -PW_ERROR;
+		}
+		if (get_user(data, &__msr32->msrType1.ecx_address)
+		    || put_user(data, &__msr->msrType1.ecx_address)) {
+			return -PW_ERROR;
+		}
+		if (get_user(data, &__msr32->msrType1.ebx_value)
+		    || put_user(data, &__msr->msrType1.ebx_value)) {
+			return -PW_ERROR;
+		}
+		if (get_user(data, &__msr32->msrType1.n_cpu)
+		    || put_user(data, &__msr->msrType1.n_cpu)) {
+			return -PW_ERROR;
+		}
+		if (get_user(data, &__msr32->msrType1.operation)
+		    || put_user(data, &__msr->msrType1.operation)) {
+			return -PW_ERROR;
+		}
+		/*
+		 * OK, everything copied. Now perform the IOCTL action here.
+		 * We need to do this here instead of delegating to 'matrix_ioctl()'
+		 * because this IOCTL needs to return information to Ring3, which means
+		 * we need the *original* ptr to call 'put_user()' on.
+		 */
+		return mt_ioctl_mtx_msr_compat_i(__msr, __msr32);
+		/*
+		   printk(KERN_INFO "ERROR: compat_msr_ioctl not supported (yet)!\n");
+		   return -PW_ERROR;
+		 */
+	} else {
+		printk(KERN_INFO
+		       "ERROR: NO support for \"ENABLE_OP\" in compat space!\n");
+		return -PW_ERROR;
+	}
+};
+
+static long mt_device_compat_pci_config_ioctl_i(struct file *file,
+						unsigned int ioctl_num,
+						unsigned long ioctl_param)
+{
+	struct pci_config32 __user *__pci32 = compat_ptr(ioctl_param);
+	struct pci_config __user *__pci = NULL;
+	struct pci_config32 __tmp;
+	/* u32 data;*/
+	size_t __size = 0;
+	u32 __dst_idx = 0;
+	u8 __user *__buffer = NULL;
+	/* struct pci_config mtx_pci_drv;*/
+	/* unsigned long *buffer = NULL;*/
+	/* int err = 0;*/
+
+	/* printk(KERN_INFO "OK, received \"matrix\" compat ioctl!\n");*/
+
+	/* printk(KERN_INFO "sizeof __tmp = %u\n", sizeof(__tmp));*/
+
+	/*
+	 * Basic algo:
+	 * 1. Calculate total memory requirement for the 64b structure (including space required for all arrays etc.)
+	 * 2. Allocate entire lookup table chunk in 'compat' space (via 'compat_alloc_user_space()').
+	 * 3. Patch up pointers.
+	 * 4. Copy over the "header" (all of the non-pointer fields) from 32b --> 64b
+	 */
+
+	if (copy_from_user(&__tmp, __pci32, sizeof(__tmp))) {
+		printk(KERN_INFO "ERROR in length user copy!\n");
+		return -EFAULT;
+	}
+	/*
+	 * Step 1.
+	 */
+	{
+		__size = sizeof(struct pci_config);
+	}
+	/*
+	 * Step 2
+	 */
+	__buffer = compat_alloc_user_space(__size);
+	if (!__buffer) {
+		printk(KERN_INFO
+		       "ERROR allocating compat space for size = %u!\n",
+		       (unsigned)__size);
+		return -EFAULT;
+	}
+	/* printk(KERN_INFO "OK: ALLOCATED compat space of size = %u\n", __size);*/
+	/*
+	 * Step 3
+	 */
+	{
+		__dst_idx = 0;
+		__pci = (struct pci_config *)&__buffer[__dst_idx];
+		__dst_idx += sizeof(*__pci);
+	}
+	/*
+	 * Step 4
+	 */
+	{
+		if (put_user(__tmp.bus, &__pci->bus)) {
+			return -EFAULT;
+		}
+		if (put_user(__tmp.device, &__pci->device)) {
+			return -EFAULT;
+		}
+		if (put_user(__tmp.function, &__pci->function)) {
+			return -EFAULT;
+		}
+		if (put_user(__tmp.offset, &__pci->offset)) {
+			return -EFAULT;
+		}
+		if (put_user(__tmp.data, &__pci->data)) {
+			return -EFAULT;
+		}
+	}
+	/*
+	 * OK, everything copied. Now perform the IOCTL action here.
+	 * We need to do this here instead of delegating to 'matrix_ioctl()'
+	 * because this IOCTL needs to return information to Ring3, which means
+	 * we need the *original* ptr to call 'put_user()' on.
+	 */
+	return mt_ioctl_pci_config_compat_i(__pci, __pci32);
+};
+
+static long mt_device_compat_config_db_ioctl_i(struct file *file,
+					       unsigned int ioctl_num,
+					       unsigned long ioctl_param)
+{
+#if DO_ANDROID
+	u32 __user *__addr32 = compat_ptr(ioctl_param);
+	u32 buf, data;
+	if (get_user(buf, __addr32)) {
+		printk(KERN_INFO "ERROR getting value!\n");
+		return -EFAULT;
+	}
+	/*
+	 * It is OK to use this 32b value directly
+	 */
+	/* printk(KERN_INFO "ADDR = 0x%x\n", buf);*/
+
+	data = mt_platform_pci_read32(buf);
+	/* Write back to the same user buffer */
+	if (put_user(data, __addr32)) {
+		printk(KERN_INFO "ERROR putting value back to userspace!\n");
+		return -EFAULT;
+	}
+#endif /* DO_ANDROID*/
+	return 0;
+};
+
+#endif /* HAVE_COMPAT_IOCTL && CONFIG_X86_64*/
+
+int mt_register_dev(void)
+{
+	int error;
+	error = alloc_chrdev_region(&matrix_dev, 0, 1, DRV_NAME);
+	if (error < 0) {
+		pr_err("Matrix : Could not allocate char dev region");
+		/* return 1;*/
+		return error;
+	}
+	matrix_major_number = MAJOR(matrix_dev);
+	matrix_class = class_create(THIS_MODULE, DRV_NAME);
+	if (IS_ERR(matrix_class)) {
+		pr_err("Matrix :Error registering class\n");
+		/* return 1;*/
+		return -MT_ERROR;
+	}
+	device_create(matrix_class, NULL, matrix_dev, NULL, DRV_NAME);
+
+	/*Device Registration */
+	matrix_cdev = cdev_alloc();
+	if (!matrix_cdev) {
+		pr_err("Matrix :Could not create device");
+		return -ENOMEM;
+	}
+	matrix_cdev->owner = THIS_MODULE;
+	matrix_cdev->ops = &matrix_fops;
+	matrix_device = (struct device *)(unsigned long)matrix_cdev->dev;
+	if (cdev_add(matrix_cdev, matrix_dev, 1) < 0) {
+		pr_err("Error registering device driver\n");
+		/* return error;*/
+		return -MT_ERROR;
+	}
+	pr_info("Matrix Registered Successfully with major no.[%d]\n",
+		matrix_major_number);
+	return MT_SUCCESS;
+}
+
+void mt_unregister_dev(void)
+{
+	pr_info("Matrix De-Registered Successfully...\n");
+	unregister_chrdev(matrix_major_number, DRV_NAME);
+	device_destroy(matrix_class, matrix_dev);
+	class_destroy(matrix_class);
+	unregister_chrdev_region(matrix_dev, 1);
+	cdev_del(matrix_cdev);
+};
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_matrix.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_matrix.h
new file mode 100644
index 0000000..ee2a4b37
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_matrix.h
@@ -0,0 +1,67 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PW_MATRIX_H_
+#define _PW_MATRIX_H_ 1
+
+extern int mt_register_dev(void);
+extern void mt_unregister_dev(void);
+
+#endif /* _PW_MATRIX_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_mem.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_mem.h
new file mode 100644
index 0000000..44d5e10
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_mem.h
@@ -0,0 +1,358 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+/*
+ * Description: file containing memory management routines
+ * used by the power driver.
+ */
+
+#ifndef _PW_MEM_H_
+#define _PW_MEM_H_ 1
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "pw_lock_defs.h"
+
+/*
+ * How do we behave if we ever
+ * get an allocation error?
+ * (a) Setting to '1' REFUSES ANY FURTHER
+ * allocation requests.
+ * (b) Setting to '0' treats each
+ * allocation request as separate, and
+ * handles them on an on-demand basis
+ */
+#define DO_MEM_PANIC_ON_ALLOC_ERROR 0
+
+#if DO_MEM_PANIC_ON_ALLOC_ERROR
+/*
+ * If we ever run into memory allocation errors then
+ * stop (and drop) everything.
+ */
+static atomic_t pw_mem_should_panic = ATOMIC_INIT(0);
+/*
+ * Macro to check if PANIC is on.
+ */
+#define MEM_PANIC() do{ atomic_set(&pw_mem_should_panic, 1); smp_mb(); }while(0)
+#define SHOULD_TRACE() ({bool __tmp = false; smp_mb(); __tmp = (atomic_read(&pw_mem_should_panic) == 0); __tmp;})
+
+#else /* if !DO_MEM_PANIC_ON_ALLOC_ERROR*/
+
+#define MEM_PANIC()
+#define SHOULD_TRACE() (true)
+
+#endif
+
+/*
+ * Toggle memory debugging.
+ * In memory debugging mode we track
+ * memory usage statistics.
+ */
+#define DO_MEM_DEBUGGING 0
+
+#if DO_MEM_DEBUGGING
+/*
+ * Variables to track memory usage.
+ */
+/*
+ * TOTAL num bytes allocated.
+ */
+static u64 total_num_bytes_alloced = 0;
+/*
+ * Num of allocated bytes that have
+ * not yet been freed.
+ */
+static u64 curr_num_bytes_alloced = 0;
+/*
+ * Max # of allocated bytes that
+ * have not been freed at any point
+ * in time.
+ */
+static u64 max_num_bytes_alloced = 0;
+/*
+ * Lock to guard access to memory
+ * debugging stats.
+ */
+static DEFINE_SPINLOCK(pw_kmalloc_lock);
+
+/*
+ * Helper macros to print out
+ * mem debugging stats.
+ */
+#define TOTAL_NUM_BYTES_ALLOCED() total_num_bytes_alloced
+#define CURR_NUM_BYTES_ALLOCED() curr_num_bytes_alloced
+#define MAX_NUM_BYTES_ALLOCED() max_num_bytes_alloced
+
+/*
+ * MAGIC number based memory tracker. Relies on
+ * storing (a) a MAGIC marker and (b) the requested
+ * size WITHIN the allocated block of memory. Standard
+ * malloc-tracking stuff, really.
+ *
+ * Overview:
+ * (1) ALLOCATION:
+ * When asked to allocate a block of 'X' bytes, allocate
+ * 'X' + 8 bytes. Then, in the FIRST 4 bytes, write the
+ * requested size. In the NEXT 4 bytes, write a special
+ * (i.e. MAGIC) number to let our deallocator know that
+ * this block of memory was allocated using this technique.
+ * Also, keep track of the number of bytes allocated.
+ *
+ * (2) DEALLOCATION:
+ * When given an object to deallocate, we first check
+ * the MAGIC number by decrementing the pointer by
+ * 4 bytes and reading the (integer) stored there.
+ * After ensuring the pointer was, in fact, allocated
+ * by us, we then read the size of the allocated
+ * block (again, by decrementing the pointer by 4
+ * bytes and reading the integer size). We
+ * use this size argument to decrement # of bytes
+ * allocated.
+ */
+#define PW_MEM_MAGIC 0xdeadbeef
+
+#define PW_ADD_MAGIC(x) ({char *__tmp1 = (char *)(x); *((int *)__tmp1) = PW_MEM_MAGIC; __tmp1 += sizeof(int); __tmp1;})
+#define PW_ADD_SIZE(x,s) ({char *__tmp1 = (char *)(x); *((int *)__tmp1) = (s); __tmp1 += sizeof(int); __tmp1;})
+#define PW_ADD_STAMP(x,s) PW_ADD_MAGIC(PW_ADD_SIZE((x), (s)))
+
+#define PW_IS_MAGIC(x) ({int *__tmp1 = (int *)((char *)(x) - sizeof(int)); *__tmp1 == PW_MEM_MAGIC;})
+#define PW_REMOVE_STAMP(x) ({char *__tmp1 = (char *)(x); __tmp1 -= sizeof(int) * 2; __tmp1;})
+#define PW_GET_SIZE(x) (*((int *)(x)))
+
+static __always_inline void *pw_kmalloc(size_t size, gfp_t flags)
+{
+	size_t act_size = 0;
+	void *retVal = NULL;
+	/*
+	 * No point in allocating if
+	 * we were unable to allocate
+	 * previously!
+	 */
+	{
+		if (!SHOULD_TRACE()) {
+			return NULL;
+		}
+	}
+	/*
+	 * (1) Allocate requested block.
+	 */
+	act_size = size + sizeof(int) * 2;
+	retVal = kmalloc(act_size, flags);
+	if (!retVal) {
+		/*
+		 * Panic if we couldn't allocate
+		 * requested memory.
+		 */
+		printk(KERN_INFO "ERROR: could NOT allocate memory!\n");
+		MEM_PANIC();
+		return NULL;
+	}
+	/*
+	 * (2) Update memory usage stats.
+	 */
+	LOCK(pw_kmalloc_lock);
+	{
+		total_num_bytes_alloced += size;
+		curr_num_bytes_alloced += size;
+		if (curr_num_bytes_alloced > max_num_bytes_alloced)
+			max_num_bytes_alloced = curr_num_bytes_alloced;
+	}
+	UNLOCK(pw_kmalloc_lock);
+	/*
+	 * Debugging ONLY.
+	 */
+	if (false && total_num_bytes_alloced > 3200000) {
+		MEM_PANIC();
+		smp_mb();
+		printk(KERN_INFO
+		       "DEBUG: Total # bytes = %llu, SET PANIC FLAG!\n",
+		       total_num_bytes_alloced);
+	}
+	/*
+	 * (3) And finally, add the 'size'
+	 * and 'magic' stamps.
+	 */
+	return PW_ADD_STAMP(retVal, size);
+};
+
+static __always_inline char *pw_kstrdup(const char *str, gfp_t flags)
+{
+	char *ret = NULL;
+	size_t ret_size = strlen(str), str_size = ret_size + 1;
+
+	/*
+	 * No point in allocating if
+	 * we were unable to allocate
+	 * previously!
+	 */
+	if (!SHOULD_TRACE() || !str) {
+		return NULL;
+	}
+
+	/*
+	 * (1) Use 'pw_kmalloc(...)' to allocate a
+	 * block of memory for our string.
+	 */
+	if ((ret = pw_kmalloc(str_size, flags)) == NULL) {
+		/*
+		 * No need to PANIC -- 'pw_kmalloc(...)'
+		 * would have done that already.
+		 */
+		return NULL;
+	}
+
+	/*
+	 * (2) Copy string contents into
+	 * newly allocated block.
+	 */
+	memcpy(ret, str, ret_size);
+	ret[ret_size] = '\0';
+
+	return ret;
+
+};
+
+static void pw_kfree(const void *obj)
+{
+	void *tmp = NULL;
+	size_t size = 0;
+
+	/*
+	 * (1) Check if this block was allocated
+	 * by us.
+	 */
+	if (!PW_IS_MAGIC(obj)) {
+		printk(KERN_INFO "ERROR: %p is NOT a PW_MAGIC ptr!\n", obj);
+		return;
+	}
+	/*
+	 * (2) Strip the magic num...
+	 */
+	tmp = PW_REMOVE_STAMP(obj);
+	/*
+	 * ...and retrieve size of block.
+	 */
+	size = PW_GET_SIZE(tmp);
+	/*
+	 * (3) Update memory usage stats.
+	 */
+	LOCK(pw_kmalloc_lock);
+	{
+		curr_num_bytes_alloced -= size;
+	}
+	UNLOCK(pw_kmalloc_lock);
+	/*
+	 * And finally, free the block.
+	 */
+	kfree(tmp);
+};
+
+#else /* !DO_MEM_DEBUGGING*/
+
+/*
+ * Helper macros to print out
+ * mem debugging stats.
+ */
+#define TOTAL_NUM_BYTES_ALLOCED() (u64)0
+#define CURR_NUM_BYTES_ALLOCED() (u64)0
+#define MAX_NUM_BYTES_ALLOCED() (u64)0
+
+static __always_inline void *pw_kmalloc(size_t size, int flags)
+{
+	void *ret = NULL;
+
+	if (SHOULD_TRACE()) {
+		if (!(ret = kmalloc(size, flags))) {
+			/*
+			 * Panic if we couldn't allocate
+			 * requested memory.
+			 */
+			MEM_PANIC();
+		}
+	}
+	return ret;
+};
+
+static __always_inline char *pw_kstrdup(const char *str, int flags)
+{
+	char *ret = NULL;
+
+	if (SHOULD_TRACE()) {
+		if (!(ret = kstrdup(str, flags))) {
+			/*
+			 * Panic if we couldn't allocate
+			 * requested memory.
+			 */
+			MEM_PANIC();
+		}
+	}
+
+	return ret;
+};
+
+static __always_inline void pw_kfree(void *mem)
+{
+	kfree(mem);
+};
+
+#endif /* DO_MEM_DEBUGGING*/
+
+#endif /* _PW_MEM_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_msg.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_msg.h
new file mode 100644
index 0000000..615bd3c
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_msg.h
@@ -0,0 +1,100 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PW_MESSAGE_H_
+#define _PW_MESSAGE_H_ 1
+
+#include "pw_types.h"
+
+#pragma pack(push)		/* Store current alignment */
+#pragma pack(2)			/* Set new alignment -- 2 byte boundaries */
+
+/*
+ * The main PWCollector_sample structure.
+ * are encoded in these.
+ */
+/*
+ * "Final" message header. ALL Ring 0 --> Ring 3 (data) messages are encoded in these.
+ * -------------------------------------------------------------------------------------------
+ * MUST Set "cpuidx" to ZERO for payloads that don't require a cpu field (e.g. GFX C-states).
+ * (cpuidx is included for all messages because it makes sorting data easier)
+ * -------------------------------------------------------------------------------------------
+ * WARNING!: This struct is being used and locally defined in the VMM of the SoFIA OS src. Any
+ * changes in this struct should be reflected in the VMM/SoFIA copy as well.
+ * Contact Robert Knight/Jay Chheda if you have any questions.
+ */
+typedef struct PWCollector_msg PWCollector_msg_t;
+struct PWCollector_msg {
+	u64 tsc;		/* TSC of message.*/
+	/* GEH: Is this equal to wakeup TSC for c_msg_t samples?*/
+	u16 data_len;		/* length of payload message in bytes (not including this header) represented by p_data.*/
+	u16 cpuidx;		/* GEH: Need to define what this is for post-processed samples*/
+	u8 data_type;		/* The type of payload encoded by 'p_data': one of 'sample_type_t'*/
+	u8 padding;		/* The compiler would have inserted it anyway!*/
+
+	u64 p_data;		/* For SW1 file, this is the payload: one of *_msg_t corresponding to data_type (inline memory).*/
+	/* For internal data, this field is a pointer to the non-contiguous payload memory (not inline).*/
+	/* GU: changed from "u8[1]" to "u64" to get the driver to compile*/
+};
+#define PW_MSG_HEADER_SIZE ( sizeof(PWCollector_msg_t) - sizeof(u64) )
+
+#pragma pack(pop)		/* Restore previous alignment */
+
+#endif /* _PW_MESSAGE_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_output_buffer.c b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_output_buffer.c
new file mode 100644
index 0000000..827fe80
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_output_buffer.c
@@ -0,0 +1,655 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#include <asm/local.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/mm.h>		/* for "remap_pfn_range"*/
+#include <asm/io.h>		/* for "virt_to_phys"*/
+#include <asm/uaccess.h>	/* for "copy_to_user"*/
+
+#include "pw_msg.h"
+#include "pw_structs.h"
+#include "pw_output_buffer.h"
+#include "pw_defines.h"
+#include "pw_mem.h"
+
+/*
+ * Global variable definitions.
+ */
+u64 pw_num_samples_produced = 0, pw_num_samples_dropped = 0;
+unsigned long pw_buffer_alloc_size = 0;
+int pw_max_num_cpus = -1;
+/*
+ * The size of the 'buffer' data array in each segment.
+ * This is 64kB - 2 * sizeof(u32)
+ */
+#define PW_SEG_DATA_SIZE 65528
+#define PW_SEG_SIZE_BYTES ( PW_SEG_DATA_SIZE + 2 * sizeof(u32) )	/* 64 kB */
+#define PW_DATA_BUFFER_SIZE (PW_SEG_SIZE_BYTES)
+#define PW_OUTPUT_BUFFER_SIZE (PW_DATA_BUFFER_SIZE * NUM_SEGS_PER_BUFFER)
+/*
+ * How much space is available in a given segment?
+ */
+#define SPACE_AVAIL(seg) ( (seg)->is_full ? 0 : (PW_SEG_DATA_SIZE - (seg)->bytes_written) )
+#define GET_OUTPUT_BUFFER(cpu) &per_cpu_output_buffers[(cpu)]
+/*
+ * Convenience macro: iterate over each segment in a per-cpu output buffer.
+ */
+#define for_each_segment(i) for (i=0; i<NUM_SEGS_PER_BUFFER; ++i)
+/*
+ * How many buffers are we using?
+ */
+/* #define GET_NUM_OUTPUT_BUFFERS() (pw_max_num_cpus)*/
+#define GET_NUM_OUTPUT_BUFFERS() (pw_max_num_cpus + 1)
+/*
+ * Convenience macro: iterate over each per-cpu output buffer.
+ */
+#define for_each_output_buffer(i) for (i=0; i<GET_NUM_OUTPUT_BUFFERS(); ++i)
+
+/*
+ * Typedefs and forward declarations.
+ */
+typedef struct pw_data_buffer pw_data_buffer_t;
+typedef struct pw_output_buffer pw_output_buffer_t;
+
+/*
+ * Output buffer data structures.
+ */
+struct pw_data_buffer {
+	u32 bytes_written;
+	u32 is_full;
+	char buffer[1];
+};
+
+struct pw_output_buffer {
+	pw_data_buffer_t *buffers[NUM_SEGS_PER_BUFFER];
+	int buff_index;
+	u32 produced_samples;
+	u32 dropped_samples;
+	int last_seg_read;
+	unsigned long free_pages;
+	unsigned long mem_alloc_size;
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Local function declarations.
+ */
+pw_data_buffer_t *pw_get_next_available_segment_i(pw_output_buffer_t * buffer,
+						  int size);
+
+/*
+ * Local variable definitions.
+ */
+/*
+ * The alarm queue.
+ */
+wait_queue_head_t pw_reader_queue;
+/*
+ * Per-cpu output buffers.
+ */
+pw_output_buffer_t *per_cpu_output_buffers = NULL;
+/*
+ * Variables for book keeping.
+ */
+/*
+static DEFINE_PER_CPU(local_t, pw_num_tps) = LOCAL_INIT(0);
+static DEFINE_PER_CPU(local_t, pw_num_d_msg) = LOCAL_INIT(0);
+*/
+volatile unsigned long reader_map = 0;
+int pw_last_cpu_read = -1;
+s32 pw_last_mask = -1;
+
+/*
+ * Function definitions.
+ */
+pw_data_buffer_t inline *pw_get_next_available_segment_i(pw_output_buffer_t *
+							 buffer, int size)
+{
+	int i = 0;
+	int buff_index = buffer->buff_index;
+
+	for_each_segment(i) {
+		buff_index = CIRCULAR_INC(buff_index, NUM_SEGS_PER_BUFFER_MASK);
+		if (SPACE_AVAIL(buffer->buffers[buff_index]) >= size) {
+			buffer->buff_index = buff_index;
+			return buffer->buffers[buff_index];
+		}
+	}
+	return NULL;
+};
+
+static pw_data_buffer_t *get_producer_seg_i(size_t size, int *cpu,
+					    u32 * write_index,
+					    bool * should_wakeup,
+					    bool * did_drop_sample)
+{
+	pw_data_buffer_t *seg = NULL;
+	unsigned long flags = 0;
+
+	local_irq_save(flags);
+	/* get_cpu();*/
+	{
+		pw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(*cpu = CPU());
+		int buff_index = buffer->buff_index;
+		if (buff_index < 0 || buff_index >= NUM_SEGS_PER_BUFFER) {
+			/* printk(KERN_INFO "ERROR: cpu = %d, buff_index = %d\n", cpu, buff_index);*/
+			seg = NULL;
+			goto prod_seg_done;
+		}
+		seg = buffer->buffers[buff_index];
+
+		if (unlikely(SPACE_AVAIL(seg) < size)) {
+			seg->is_full = 1;
+			*should_wakeup = true;
+			seg = pw_get_next_available_segment_i(buffer, size);
+			/* seg = NULL;*/
+			if (seg == NULL) {
+				/*
+				 * We couldn't find a non-full segment.
+				 */
+				buffer->dropped_samples++;
+				*did_drop_sample = true;
+				goto prod_seg_done;
+			}
+		}
+		*write_index = seg->bytes_written;
+		seg->bytes_written += size;
+
+		buffer->produced_samples++;
+	}
+prod_seg_done:
+	/* put_cpu();*/
+	local_irq_restore(flags);
+	return seg;
+};
+
+int pw_produce_generic_msg(struct PWCollector_msg *msg, bool allow_wakeup)
+{
+	int retval = PW_SUCCESS;
+	bool should_wakeup = false;
+	bool should_print_error = false;
+	bool did_drop_sample = false;
+	int cpu = -1;
+	bool did_switch_buffer = false;
+	int size = 0;
+	pw_data_buffer_t *seg = NULL;
+	char *dst = NULL;
+	u32 write_index = 0;
+
+	if (!msg) {
+		pw_pr_error("ERROR: CANNOT produce a NULL msg!\n");
+		return -PW_ERROR;
+	}
+
+	size = msg->data_len + PW_MSG_HEADER_SIZE;
+
+	pw_pr_debug("[%d]: size = %d\n", RAW_CPU(), size);
+
+	seg =
+	    get_producer_seg_i(size, &cpu, &write_index, &should_wakeup,
+			       &did_drop_sample);
+
+	if (likely(seg)) {
+		dst = &seg->buffer[write_index];
+		*((PWCollector_msg_t *) dst) = *msg;
+		dst += PW_MSG_HEADER_SIZE;
+		memcpy(dst, (void *)((unsigned long)msg->p_data),
+		       msg->data_len);
+	} else {
+		pw_pr_warn("WARNING: NULL seg! Msg type = %u\n",
+			   msg->data_type);
+	}
+
+	if (unlikely
+	    (should_wakeup && allow_wakeup
+	     && waitqueue_active(&pw_reader_queue))) {
+		set_bit(cpu, &reader_map);	/* we're guaranteed this won't get reordered!*/
+		smp_mb();	/* TODO: do we really need this?*/
+		/* printk(KERN_INFO "[%d]: has full seg!\n", cpu);*/
+		pw_pr_debug(KERN_INFO "[%d]: has full seg!\n", cpu);
+		wake_up_interruptible(&pw_reader_queue);
+	}
+
+	if (did_drop_sample) {
+		/* pw_pr_warn("Dropping sample\n");*/
+	}
+
+	if (should_print_error) {
+		/* pw_pr_error("ERROR in produce!\n");*/
+	}
+
+	if (did_switch_buffer) {
+		/* pw_pr_debug("[%d]: switched sub buffers!\n", cpu);*/
+	}
+
+	return retval;
+};
+
+int pw_produce_generic_msg_on_cpu(int cpu, struct PWCollector_msg *msg,
+				  bool allow_wakeup)
+{
+	/* unsigned long flags = 0;*/
+	const int retval = PW_SUCCESS;
+	bool should_wakeup = false;
+	bool should_print_error = false;
+	bool did_drop_sample = false;
+	bool did_switch_buffer = false;
+	int size = msg->data_len + PW_MSG_HEADER_SIZE;
+
+	pw_pr_debug("[%d]: cpu = %d, size = %d\n", RAW_CPU(), cpu, size);
+
+	/* local_irq_save(flags);*/
+	/* get_cpu();*/
+	{
+		pw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu);
+		int buff_index = buffer->buff_index;
+		pw_data_buffer_t *seg = buffer->buffers[buff_index];
+		char *dst = NULL;
+
+		if (unlikely(SPACE_AVAIL(seg) < size)) {
+			seg->is_full = 1;
+			should_wakeup = true;
+			seg = pw_get_next_available_segment_i(buffer, size);
+			if (seg == NULL) {
+				/*
+				 * We couldn't find a non-full segment.
+				 */
+				/* retval = -PW_ERROR;*/
+				should_wakeup = true;
+				buffer->dropped_samples++;
+				did_drop_sample = true;
+				goto done;
+			}
+		}
+
+		dst = &seg->buffer[seg->bytes_written];
+
+		*((PWCollector_msg_t *) dst) = *msg;
+		dst += PW_MSG_HEADER_SIZE;
+		/* pw_pr_debug("Diff = %d\n", (dst - &seg->buffer[seg->bytes_written]));*/
+		memcpy(dst, (void *)((unsigned long)msg->p_data),
+		       msg->data_len);
+
+		seg->bytes_written += size;
+
+		buffer->produced_samples++;
+
+		pw_pr_debug(KERN_INFO "OK: [%d] PRODUCED a generic msg!\n",
+			    cpu);
+	}
+done:
+	/* local_irq_restore(flags);*/
+	/* put_cpu();*/
+
+	if (should_wakeup && allow_wakeup && waitqueue_active(&pw_reader_queue)) {
+		set_bit(cpu, &reader_map);	/* we're guaranteed this won't get reordered!*/
+		smp_mb();	/* TODO: do we really need this?*/
+		/* printk(KERN_INFO "[%d]: has full seg!\n", cpu);*/
+		pw_pr_debug(KERN_INFO "[%d]: has full seg!\n", cpu);
+		wake_up_interruptible(&pw_reader_queue);
+	}
+
+	if (did_drop_sample) {
+		/* pw_pr_warn("Dropping sample\n");*/
+	}
+
+	if (should_print_error) {
+		/* pw_pr_error("ERROR in produce!\n");*/
+	}
+
+	if (did_switch_buffer) {
+		/* pw_pr_debug("[%d]: switched sub buffers!\n", cpu);*/
+	}
+
+	return retval;
+};
+
+int pw_init_per_cpu_buffers(void)
+{
+	int cpu = -1;
+	unsigned long per_cpu_mem_size = PW_OUTPUT_BUFFER_SIZE;
+
+	/* if (pw_max_num_cpus <= 0)*/
+	if (GET_NUM_OUTPUT_BUFFERS() <= 0) {
+		/* pw_pr_error("ERROR: max # cpus = %d\n", pw_max_num_cpus);*/
+		pw_pr_error("ERROR: max # output buffers= %d\n",
+			    GET_NUM_OUTPUT_BUFFERS());
+		return -PW_ERROR;
+	}
+
+	pw_pr_debug("DEBUG: pw_max_num_cpus = %d, num output buffers = %d\n",
+		    pw_max_num_cpus, GET_NUM_OUTPUT_BUFFERS());
+
+	per_cpu_output_buffers =
+	    (pw_output_buffer_t *) pw_kmalloc(sizeof(pw_output_buffer_t) *
+					      GET_NUM_OUTPUT_BUFFERS(),
+					      GFP_KERNEL | __GFP_ZERO);
+	if (per_cpu_output_buffers == NULL) {
+		pw_pr_error
+		    ("ERROR allocating space for per-cpu output buffers!\n");
+		pw_destroy_per_cpu_buffers();
+		return -PW_ERROR;
+	}
+	/* for (cpu=0; cpu<pw_max_num_cpus; ++cpu)*/
+	for_each_output_buffer(cpu) {
+		pw_output_buffer_t *buffer = &per_cpu_output_buffers[cpu];
+		char *buff = NULL;
+		int i = 0;
+		buffer->mem_alloc_size = per_cpu_mem_size;
+		buffer->free_pages =
+		    __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+				     get_order(per_cpu_mem_size));
+		pw_buffer_alloc_size +=
+		    (1 << get_order(per_cpu_mem_size)) * PAGE_SIZE;
+		if (buffer->free_pages == 0) {
+			pw_pr_error("ERROR allocating pages for buffer [%d]!\n",
+				    cpu);
+			pw_destroy_per_cpu_buffers();
+			return -PW_ERROR;
+		}
+		buff = (char *)buffer->free_pages;
+		for_each_segment(i) {
+			buffer->buffers[i] = (pw_data_buffer_t *) buff;
+			buff += PW_DATA_BUFFER_SIZE;
+		}
+	}
+
+	{
+		init_waitqueue_head(&pw_reader_queue);
+	}
+	return PW_SUCCESS;
+};
+
+void pw_destroy_per_cpu_buffers(void)
+{
+	int cpu = -1;
+
+#if DO_DEBUG_OUTPUT
+	if (per_cpu_output_buffers != NULL) {
+		/*
+		 * Testing!
+		 */
+		int cpu = -1, i = 0;
+		/* for_each_possible_cpu(cpu) {*/
+		/* for (cpu=0; cpu<pw_max_num_cpus; ++cpu)*/
+		for_each_output_buffer(cpu) {
+			pw_pr_debug("CPU: %d: # dropped = %d\n", cpu,
+				    per_cpu_output_buffers[cpu].
+				    dropped_samples);
+			for_each_segment(i) {
+				pw_data_buffer_t *buffer =
+				    per_cpu_output_buffers[cpu].buffers[i];
+				pw_pr_debug
+				    ("\tBuff [%d]: bytes_written = %u, is_full = %s, buffer = %p\n",
+				     i, buffer->bytes_written,
+				     GET_BOOL_STRING(buffer->is_full),
+				     buffer->buffer);
+			}
+		}
+	}
+#endif /* DO_DEBUG_OUTPUT*/
+	if (per_cpu_output_buffers != NULL) {
+		/* for_each_possible_cpu(cpu) {*/
+		/* for (cpu=0; cpu<pw_max_num_cpus; ++cpu)*/
+		for_each_output_buffer(cpu) {
+			pw_output_buffer_t *buffer =
+			    &per_cpu_output_buffers[cpu];
+			if (buffer->free_pages != 0) {
+				free_pages(buffer->free_pages,
+					   get_order(buffer->mem_alloc_size));
+				buffer->free_pages = 0;
+			}
+		}
+		pw_kfree(per_cpu_output_buffers);
+		per_cpu_output_buffers = NULL;
+	}
+};
+
+void pw_reset_per_cpu_buffers(void)
+{
+	int cpu = 0, i = 0;
+	/* for_each_possible_cpu(cpu) {*/
+	/* for (cpu=0; cpu<pw_max_num_cpus; ++cpu)*/
+	for_each_output_buffer(cpu) {
+		pw_output_buffer_t *buffer = GET_OUTPUT_BUFFER(cpu);
+		buffer->buff_index = buffer->dropped_samples =
+		    buffer->produced_samples = 0;
+		buffer->last_seg_read = -1;
+
+		for_each_segment(i) {
+			memset(buffer->buffers[i], 0, PW_DATA_BUFFER_SIZE);
+		}
+	}
+	pw_last_cpu_read = -1;
+	pw_last_mask = -1;
+};
+
+int pw_map_per_cpu_buffers(struct vm_area_struct *vma,
+			   unsigned long *total_size)
+{
+	int cpu = -1;
+	unsigned long start = vma->vm_start;
+	/*
+	 * We have a number of output buffers. Each (per-cpu) output buffer is one contiguous memory
+	 * area, but the individual buffers are disjoint from each other. We therefore need to
+	 * loop over each buffer and map in its memory area.
+	 */
+	/* for_each_possible_cpu(cpu) {*/
+	/* for (cpu=0; cpu<pw_max_num_cpus; ++cpu)*/
+	for_each_output_buffer(cpu) {
+		pw_output_buffer_t *buffer = &per_cpu_output_buffers[cpu];
+		unsigned long buff_size = buffer->mem_alloc_size;
+		int ret =
+		    remap_pfn_range(vma, start,
+				    virt_to_phys((void *)buffer->
+						 free_pages) >> PAGE_SHIFT,
+				    buff_size, vma->vm_page_prot);
+		if (ret < 0) {
+			return ret;
+		}
+		*total_size += buff_size;
+		start += buff_size;
+	}
+
+	return PW_SUCCESS;
+};
+
+bool pw_any_seg_full(u32 * val, const bool * is_flush_mode)
+{
+	int num_visited = 0, i = 0;
+
+	if (!val || !is_flush_mode) {
+		pw_pr_error("ERROR: NULL ptrs in pw_any_seg_full!\n");
+		return false;
+	}
+
+	*val = PW_NO_DATA_AVAIL_MASK;
+	pw_pr_debug(KERN_INFO "Checking for full seg: val = %u, flush = %s\n",
+		    *val, GET_BOOL_STRING(*is_flush_mode));
+	/* for_each_online_cpu(num_visited)*/
+	for_each_output_buffer(num_visited) {
+		pw_output_buffer_t *buffer = NULL;
+		/*
+		   if (++pw_last_cpu_read >= num_online_cpus()) {
+		   pw_last_cpu_read = 0;
+		   }
+		 */
+		if (++pw_last_cpu_read >= GET_NUM_OUTPUT_BUFFERS()) {
+			pw_last_cpu_read = 0;
+		}
+		buffer = GET_OUTPUT_BUFFER(pw_last_cpu_read);
+		for_each_segment(i) {
+			if (++buffer->last_seg_read >= NUM_SEGS_PER_BUFFER) {
+				buffer->last_seg_read = 0;
+			}
+			if (pw_last_cpu_read == 0) {
+				pw_pr_debug(KERN_INFO
+					    "Any_seg_Full: cpu = %d, segment = %d, flush-mode = %s, non-empty = %s\n",
+					    pw_last_cpu_read,
+					    buffer->last_seg_read,
+					    GET_BOOL_STRING(*is_flush_mode),
+					    GET_BOOL_STRING(buffer->
+							    buffers[buffer->
+								    last_seg_read]->
+							    bytes_written > 0));
+			}
+			smp_mb();
+			if (buffer->buffers[buffer->last_seg_read]->is_full
+			    || (*is_flush_mode
+				&& buffer->buffers[buffer->last_seg_read]->
+				bytes_written > 0)) {
+				*val =
+				    (pw_last_cpu_read & 0xffff) << 16 |
+				    (buffer->last_seg_read & 0xffff);
+				/* pw_last_mask = *val;*/
+				return true;
+			}
+		}
+	}
+	/*
+	 * Reaches here only if there's no data to be read.
+	 */
+	if (*is_flush_mode) {
+		/*
+		 * We've drained all buffers and need to tell the userspace application there
+		 * isn't any data. Unfortunately, we can't just return a 'zero' value for the
+		 * mask (because that could also indicate that segment # 0 of cpu #0 has data).
+		 */
+		*val = PW_ALL_WRITES_DONE_MASK;
+		return true;
+	}
+	return false;
+};
+
+/*
+ * Has semantics of 'copy_to_user()' -- returns # of bytes that could NOT be copied
+ * (On success ==> returns 0).
+ */
+unsigned long pw_consume_data(u32 mask, char __user * buffer,
+			      size_t bytes_to_read, size_t * bytes_read)
+{
+	int which_cpu = -1, which_seg = -1;
+	unsigned long bytes_not_copied = 0;
+	pw_output_buffer_t *buff = NULL;
+	pw_data_buffer_t *seg = NULL;
+
+	if (!buffer || !bytes_read) {
+		pw_pr_error("ERROR: NULL ptrs in pw_consume_data!\n");
+		return -PW_ERROR;
+	}
+
+	if (bytes_to_read != PW_DATA_BUFFER_SIZE) {
+		pw_pr_error("Error: bytes_to_read = %u, required to be %lu\n",
+			    (unsigned)bytes_to_read,
+			    (unsigned long)PW_DATA_BUFFER_SIZE);
+		return bytes_to_read;
+	}
+	which_cpu = mask >> 16;
+	which_seg = mask & 0xffff;
+	pw_pr_debug(KERN_INFO "CONSUME: cpu = %d, seg = %d\n", which_cpu,
+		    which_seg);
+	if (which_seg >= NUM_SEGS_PER_BUFFER) {
+		pw_pr_error
+		    ("Error: which_seg (%d) >= NUM_SEGS_PER_BUFFER (%d)\n",
+		     which_seg, NUM_SEGS_PER_BUFFER);
+		return bytes_to_read;
+	}
+	/*
+	 * OK to access unlocked; either the segment is FULL, or no collection
+	 * is ongoing. In either case, we're GUARANTEED no producer is touching
+	 * this segment.
+	 */
+	buff = GET_OUTPUT_BUFFER(which_cpu);
+	seg = buff->buffers[which_seg];
+	bytes_not_copied = copy_to_user(buffer, seg->buffer, seg->bytes_written);	/* dst,src*/
+	if (likely(bytes_not_copied == 0)) {
+		*bytes_read = seg->bytes_written;
+	} else {
+		pw_pr_warn("Warning: couldn't copy %u bytes\n",
+			   bytes_not_copied);
+	}
+	seg->is_full = seg->bytes_written = 0;
+	return bytes_not_copied;
+};
+
+unsigned long pw_get_buffer_size(void)
+{
+	return PW_DATA_BUFFER_SIZE;
+};
+
+void pw_count_samples_produced_dropped(void)
+{
+	int cpu = 0;
+	pw_num_samples_produced = pw_num_samples_dropped = 0;
+	if (per_cpu_output_buffers == NULL) {
+		return;
+	}
+	/* for_each_possible_cpu(cpu) {*/
+	/* for (cpu=0; cpu<pw_max_num_cpus; ++cpu)*/
+	for_each_output_buffer(cpu) {
+		pw_output_buffer_t *buff = GET_OUTPUT_BUFFER(cpu);
+		pw_pr_debug(KERN_INFO "[%d]: # samples = %u\n", cpu,
+			    buff->produced_samples);
+		pw_num_samples_dropped += buff->dropped_samples;
+		pw_num_samples_produced += buff->produced_samples;
+	}
+};
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_output_buffer.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_output_buffer.h
new file mode 100644
index 0000000..6c1a8a8
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_output_buffer.h
@@ -0,0 +1,133 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PW_OUTPUT_BUFFER_H_
+#define _PW_OUTPUT_BUFFER_H_ 1
+/*
+ * Special mask for the case where all buffers have been flushed.
+ */
+/* #define PW_ALL_WRITES_DONE_MASK 0xffffffff*/
+#define PW_ALL_WRITES_DONE_MASK ((u32)-1)
+/*
+ * Special mask for the case where no data is available to be read.
+ */
+#define PW_NO_DATA_AVAIL_MASK ((u32)-2)
+
+/*
+ * Forward declarations.
+ */
+typedef struct PWC_tps_msg PWC_tps_msg_t;
+struct PWCollector_msg;
+struct c_msg;
+
+/*
+ * Common data structures.
+ */
+#pragma pack(push)
+#pragma pack(2)
+/*
+ * Everything EXCEPT the 'c_msg_t' field MUST match
+ * the fields in 'PWCollector_msg_t' EXACTLY!!!
+ */
+struct PWC_tps_msg {
+	u64 tsc;
+	u16 data_len;
+	u8 cpuidx;		/* should really be a u16!!!*/
+	u8 data_type;
+	struct c_msg data;
+};
+#pragma pack(pop)
+
+/*
+ * Variable declarations.
+ */
+extern u64 pw_num_samples_produced, pw_num_samples_dropped;
+extern unsigned long pw_buffer_alloc_size;
+extern wait_queue_head_t pw_reader_queue;
+extern int pw_max_num_cpus;
+
+/*
+ * Public API.
+ */
+int pw_init_per_cpu_buffers(void);
+void pw_destroy_per_cpu_buffers(void);
+void pw_reset_per_cpu_buffers(void);
+int pw_map_per_cpu_buffers(struct vm_area_struct *vma,
+			   unsigned long *total_size);
+
+void pw_count_samples_produced_dropped(void);
+
+int pw_produce_generic_msg(struct PWCollector_msg *, bool);
+int pw_produce_generic_msg_on_cpu(int cpu, struct PWCollector_msg *, bool);
+
+bool pw_any_seg_full(u32 * val, const bool * is_flush_mode);
+unsigned long pw_consume_data(u32 mask, char __user * buffer,
+			      size_t bytes_to_read, size_t * bytes_read);
+
+unsigned long pw_get_buffer_size(void);
+
+void pw_wait_once(void);
+void pw_wakeup(void);
+
+/*
+ * Debugging ONLY!!!
+ */
+void pw_dump_pages(const char *msg);
+#endif /* _PW_OUTPUT_BUFFER_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_overhead_measurements.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_overhead_measurements.h
new file mode 100644
index 0000000..b6c528d
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_overhead_measurements.h
@@ -0,0 +1,122 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+/*
+ * Description: file containing overhead measurement
+ * routines used by the power driver.
+ */
+
+#ifndef _PW_OVERHEAD_MEASUREMENTS_H_
+#define _PW_OVERHEAD_MEASUREMENTS_H_
+/*
+ * Helper macro to declare variables required
+ * for conducting overhead measurements.
+ */
+
+/*
+ * For each function that you want to profile,
+ * do the following (e.g. function 'foo'):
+ * **************************************************
+ * DECLARE_OVERHEAD_VARS(foo);
+ * **************************************************
+ * This will declare the two variables required
+ * to keep track of overheads incurred in
+ * calling/servicing 'foo'. Note that the name
+ * that you declare here *MUST* match the function name!
+ */
+
+#define DECLARE_OVERHEAD_VARS(name)					\
+    static DEFINE_PER_CPU(u64, name##_elapsed_time) = 0;				\
+    static DEFINE_PER_CPU(local_t, name##_num_iters) = LOCAL_INIT(0);		\
+									\
+    static inline u64 get_my_cumulative_elapsed_time_##name(void){		\
+	return *(&__get_cpu_var(name##_elapsed_time));			\
+    }									\
+    static inline int get_my_cumulative_num_iters_##name(void){		\
+	return local_read(&__get_cpu_var(name##_num_iters));		\
+    }									\
+									\
+    static inline u64 name##_get_cumulative_elapsed_time_for(int cpu){	\
+	return *(&per_cpu(name##_elapsed_time, cpu));			\
+    }									\
+									\
+    static inline int name##_get_cumulative_num_iters_for(int cpu){	\
+	return local_read(&per_cpu(name##_num_iters, cpu));		\
+    }									\
+									\
+    static inline void name##_get_cumulative_overhead_params(u64 *time,	\
+							     int *iters){ \
+	int cpu = 0;							\
+	*time = 0; *iters = 0;						\
+	for_each_online_cpu(cpu){					\
+	    *iters += name##_get_cumulative_num_iters_for(cpu);		\
+	    *time += name##_get_cumulative_elapsed_time_for(cpu);	\
+	}								\
+	return;								\
+    }									\
+	\
+static inline void name##_print_cumulative_overhead_params(const char *str){\
+	int num = 0; \
+	u64 time = 0; \
+	name##_get_cumulative_overhead_params(&time, &num); \
+	printk(KERN_INFO "%s: %d iters took %llu cycles!\n", str, num, time); \
+}
+
+#endif /* _PW_OVERHEAD_MEASUREMENTS_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_structs.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_structs.h
new file mode 100644
index 0000000..fcf5b66
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_structs.h
@@ -0,0 +1,1990 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+/*
+ * Description: file containing data structures used by the
+ * power driver.
+ */
+
+#ifndef _DATA_STRUCTURES_H_
+#define _DATA_STRUCTURES_H_ 1
+
+#include "pw_types.h"
+
+/*
+ * Should we probe on syscall enters and exits?
+ * We require this functionality to handle certain
+ * device-driver related timers.
+ * ********************************************************
+ * WARNING: SETTING TO 1 will INVOLVE HIGH OVERHEAD!!!
+ * ********************************************************
+ */
+#define DO_PROBE_ON_SYSCALL_ENTER_EXIT 0
+#define DO_PROBE_ON_EXEC_SYSCALL DO_PROBE_ON_SYSCALL_ENTER_EXIT
+/*
+ * Do we use an RCU-based mechanism
+ * to determine which output buffers
+ * to write to?
+ * Set to: "1" ==> YES
+ *         "0" ==> NO
+ * ************************************
+ * CAUTION: RCU-based output buffer
+ * selection is EXPERIMENTAL ONLY!!!
+ * ************************************
+ */
+#define DO_RCU_OUTPUT_BUFFERS 0
+/*
+ * Do we force the device driver to
+ * (periodically) flush its buffers?
+ * Set to: "1" ==> YES
+ *       : "0" ==> NO
+ * ***********************************
+ * UPDATE: This value is now tied to the
+ * 'DO_RCU_OUTPUT_BUFFERS' flag value
+ * because, for proper implementations
+ * of buffer flushing, we MUST have
+ * an RCU-synchronized output buffering
+ * mechanism!!!
+ * ***********************************
+ */
+#define DO_PERIODIC_BUFFER_FLUSH DO_RCU_OUTPUT_BUFFERS
+/*
+ * Do we use a TPS "epoch" counter to try and
+ * order SCHED_WAKEUP samples and TPS samples?
+ * (Required on many-core architectures that don't have
+ * a synchronized TSC).
+ */
+#define DO_TPS_EPOCH_COUNTER 1
+/*
+ * Should the driver count number of dropped samples?
+ */
+#define DO_COUNT_DROPPED_SAMPLES 1
+/*
+ * Should we allow the driver to terminate the wuwatch userspace
+ * application dynamically?
+ * Used ONLY by the 'suspend_notifier' in cases when the driver
+ * detects the application should exit because the device
+ * was in ACPI S3 for longer than the collection time.
+ * DISABLED, FOR NOW
+ */
+#define DO_ALLOW_DRIVER_TERMINATION_OF_WUWATCH 0
+
+#define NUM_SAMPLES_PER_SEG 512
+#define SAMPLES_PER_SEG_MASK 511	/* MUST be (NUM_SAMPLES_PER_SEG - 1) */
+#if 1
+#define NUM_SEGS_PER_BUFFER 2	/* MUST be POW-of-2 */
+#define NUM_SEGS_PER_BUFFER_MASK 1	/* MUST be (NUM_SEGS_PER_BUFFER - 1) */
+#else
+#define NUM_SEGS_PER_BUFFER 4	/* MUST be POW-of-2 */
+#define NUM_SEGS_PER_BUFFER_MASK 3	/* MUST be (NUM_SEGS_PER_BUFFER - 1) */
+#endif
+
+#define SEG_SIZE (NUM_SAMPLES_PER_SEG * sizeof(PWCollector_sample_t))
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+/*
+ * The MAX number of entries in the "trace" array of the "k_sample_t" structure.
+ * If the actual backtrace is longer, multiple
+ * k_sample structs need to be chained together (see "sample_len" in
+ * the "sample" struct).
+ */
+#define PW_TRACE_LEN 11
+#define TRACE_LEN PW_TRACE_LEN	/* required by PERFRUN*/
+/*
+ * Max size of a module name. Ideally we'd
+ * like to directly include "module.h" (which
+ * defines this value), but this code
+ * will be shared with Ring-3 code, which is
+ * why we redefine it here.
+ */
+#define PW_MODULE_NAME_LEN (64 - sizeof(unsigned long))
+/*
+ * MAX size of each irq name (bytes).
+ */
+#define PW_IRQ_DEV_NAME_LEN 100
+/*
+ * MAX size of each proc name.
+ */
+#define PW_MAX_PROC_NAME_SIZE 16
+
+/*
+ * We remove this to avoid any hard coded information regarding hardware
+ * Meta data samples will include this kind of information
+ */
+#if 1
+/*
+ * MAX number of logical subsystems in south complex.
+ * for Medfield platform
+ */
+#define MFD_MAX_LSS_NUM_IN_SC 31
+/*
+ * MAX number of logical subsystems in south complex.
+ * for Clovertrail platform.
+ */
+#define CLV_MAX_LSS_NUM_IN_SC 25
+/*
+ * MAX number of logical subsystems.
+ * Choose whichever is the maximum among available platforms
+ * defined above
+ */
+#define MAX_LSS_NUM_IN_SC 31
+
+/*
+ * MAX number of logical subsystems in north complex.
+ * for Medfield platform
+ */
+#define MFD_MAX_LSS_NUM_IN_NC 9
+/*
+ * MAX number of logical subsystems in north complex.
+ * for Clovertrail platform
+ */
+#define CLV_MAX_LSS_NUM_IN_NC 7
+/*
+ * MAX number of logical subsystems.
+ * Choose whichever is the maximum among available platforms
+ * defined above
+ */
+#define MAX_LSS_NUM_IN_NC 9
+#endif
+
+/*
+ * MAX size of each wakelock name.
+ */
+#define PW_MAX_WAKELOCK_NAME_SIZE 76
+/*
+ * Device {short, long} names.
+ * Used for MFLD.
+ */
+#define PW_MAX_DEV_SHORT_NAME_SIZE 10
+#define PW_MAX_DEV_LONG_NAME_SIZE 80
+/*
+ * Package names used for Android OS.
+ */
+#define PW_MAX_PKG_NAME_SIZE 80
+/*
+ * Max # of 'd_residency' counters present
+ * in a single 'd_residency_sample' instance.
+ */
+#define PW_MAX_DEVICES_PER_SAMPLE 2
+/*
+ * MAX number of mappings per block.
+ */
+#define PW_MAX_NUM_IRQ_MAPPINGS_PER_BLOCK 16
+/*
+ * MAX size of each irq name (bytes).
+ */
+#define PW_MAX_IRQ_NAME_SIZE 32
+/*
+ * Max # of available frequencies.
+ */
+#define PW_MAX_NUM_AVAILABLE_FREQUENCIES 16	/* should be enough!*/
+/*
+ * MAX number of mappings per block.
+ */
+#define PW_MAX_NUM_PROC_MAPPINGS_PER_BLOCK 32
+/*
+ * MAX length of metadata names;
+ */
+#define PW_MAX_METADATA_NAME 80
+/*
+ * MAX number of GPU frequencies coded per *meta-data* sample; used for fixed-length samples only!
+ */
+#define PW_MAX_FREQS_PER_META_SAMPLE 54
+/*
+ * MAX number of C-state MSRs per C multi-msg
+ */
+#define PW_MAX_C_STATE_MSRS_PER_MESSAGE 6
+/*
+ * MAX number of C-state MSRs per fixed-size C meta sample
+ */
+#define PW_MAX_MSRS_PER_META_SAMPLE 9
+/*
+ * MAX length of C-state MSR name
+ */
+#define PW_MAX_C_MSR_NAME 6
+
+#define PW_MAX_ELEMENTS_PER_BW_COMPONENT 12
+
+/*
+ * MSR counter stuff.
+ *
+ * Ultimately the list of MSRs to read (and the core MSR residency addresses)
+ * will be specified by the "runss" tool (via the "PW_IOCTL_CONFIG" ioctl).
+ *
+ * For now, hardcoded to values for NHM.
+ */
+typedef enum {
+	MPERF = 0,		/* C0*/
+	APERF = 1,		/* C1*/
+	C2 = 2,
+	C3 = 3,
+	C4 = 4,
+	C5 = 5,
+	C6 = 6,
+	C7 = 7,
+	C8 = 8,
+	C9 = 9,
+	C10 = 10,
+	/* C11 = 11, */
+	MAX_MSR_ADDRESSES
+} c_state_t;
+
+/*
+ * Enumeration of possible sample types.
+ */
+typedef enum {
+	FREE_SAMPLE = 0,	/* Used (internally) to indicate a FREE entry */
+	C_STATE = 1,		/* Used for c-state samples */
+	P_STATE = 2,		/* Used for p-state samples */
+	K_CALL_STACK = 3,	/* Used for kernel-space call trace entries */
+	M_MAP = 4,		/* Used for module map info samples */
+	IRQ_MAP = 5,		/* Used for IRQ # <-> DEV name mapping samples */
+	PROC_MAP = 6,		/* Used for PID <-> PROC name mapping samples */
+	S_RESIDENCY = 7,	/* Used for S residency counter samples */
+	S_STATE = 8,		/* Used for S state samples */
+	D_RESIDENCY = 9,	/* Used for D residency counter samples */
+	D_STATE = 10,		/* Used for D state samples in north or south complex */
+	TIMER_SAMPLE = 11,
+	IRQ_SAMPLE = 12,
+	WORKQUEUE_SAMPLE = 13,
+	SCHED_SAMPLE = 14,
+	IPI_SAMPLE = 15,
+	TPE_SAMPLE = 16,	/*  Used for 'trace_power_end' samples */
+	W_STATE = 17,		/* Used for kernel wakelock samples */
+	DEV_MAP = 18,		/* Used for NC and SC device # <-> DEV name mapping samples */
+	C_STATE_MSR_SET = 19,	/* Used to send an initial snapshot of the various C-state MSRs */
+	U_STATE = 20,		/* Used for user wakelock samples */
+	TSC_POSIX_MONO_SYNC = 21,	/* Used to sync TSC <-> posix CLOCK_MONOTONIC timers; REQUIRED for AXE support */
+	CONSTANT_POOL_ENTRY = 22,	/* Used to send constant pool information */
+	PKG_MAP = 23,		/* Used to send UID and package name mappings used for Android */
+	CPUHOTPLUG_SAMPLE = 24,	/* Used to note when a CPU goes online or offline in ARM systems */
+	C_MULTI_MSG = 25,	/* Used for c-state samples when multiple C-state MSRs have counted */
+	THERMAL = 26,		/* Needs Meta Data???*/
+	POWER_METER = 27,	/* Needs Meta Data*/
+	GRAPHICS = 28,		/* Needs Meta Data + Subtypes (GFX_C_STATE, GFX_P_STATE)*/
+	BANDWIDTH = 29,		/* Needs Meta Data + Subtypes(???)*/
+	MODEM = 30,		/* Needs Meta Data*/
+	U_CALL_STACK = 31,	/* Needs subtypes for Windows*/
+	TIMER_RESOLUTION = 32,	/* Needs input from Windows team*/
+	SYSTEM = 33,		/* ??? for meta data only*/
+	META_DATA = 34,		/* Needs subtypes*/
+	SUMMARY = 35,		/* For summary and timeline trace samples; will have sub types */
+	ACPI_S3 = 36,		/* Used for ACPI S3 residency counter samples */
+	GPUFREQ = 37,		/* Used for GPU P-state metadata */
+	THERMAL_COMP = 38,	/* HACK! (GEH) Used for thermal component metadata for fixed-length samples only */
+	BANDWIDTH_COMP = 39,	/* HACK! (GEH) Used for bandwidth component/pathway metadata for fixed-length samples only */
+	GPU_P_STATE = 40,	/* HACK! (GEH) Used for GPU P-state for fixed-length samples only (temporary) */
+	GPU_C_STATE = 41,	/* Used for GPU C-state samples */
+	FPS = 42,		/* Used for FPS samples */
+	DRAM_SELF_REFRESH = 43,	/* Used for DRAM Self Refresh residency */
+	DRAM_SELF_REFRESH_COMP = 44,	/* Used for DRAM Self Refresh residency metadata for fixed-length samples only */
+	S_RESIDENCY_STATES = 45,	/* Used for S residency metadata for fixed-length samples only */
+	MATRIX_MSG = 46,	/* Used for Matrix messages */
+	BANDWIDTH_ALL_APPROX = 47,	/* Used for T-unit B/W messages */
+	C_STATE_META = 48,	/* HACK! Used for CPU C-state metadata for fixed-length samples only! (temporary) */
+	GPU_C_STATE_META = 49,	/* HACK! Used for GPU C-state metadata for fixed-length samples only! (temporary) */
+	BANDWIDTH_MULTI = 50,
+	BANDWIDTH_MULTI_META = 51,	/* HACK! Used for elements of a BW compenent metadata for fixed-length samples only! (temporary) */
+	SAMPLE_TYPE_END
+} sample_type_t;
+#define FOR_EACH_SAMPLE_TYPE(idx) for ( idx = C_STATE; idx < SAMPLE_TYPE_END; ++idx )
+
+/*
+ * For HT-testing. These correspond to the various
+ * "sample_type_t" enum values. We have a long and
+ * a short version of each name.
+ */
+#ifndef __KERNEL__
+static const char *s_long_sample_names[] =
+    { "FREE_SAMPLE", "TPS", "TPF", "K_CALL", "M_MAP", "I_MAP", "P_MAP", "S_RES",
+"S_ST", "D_RES", "D_ST", "TIM", "IRQ", "WRQ", "SCD", "IPI", "TPE", "W_ST", "D_MAP", "MSR", "U_ST", "PSX",
+"CPE", "PKG_MAP", "CPUHOTPLUG", "MUL", "THERM", "POW_MET", "GFX", "BW", "MOD", "U_CALL", "TIM_RES", "SYS",
+"META", "TRACE_SUM", "S3", "GPUF", "T_COMP", "B_COMP", "GPUP", "GPUC", "FPS", "DRAMSRR", "DRAMSRR_COMP",
+"S_RES_STATES", "MATRIX", "BW_ALL_APPROX", "CPU_C_META", "GPU_C_META", "BW_MULTI", "BW_MULTI_META",
+"SOFIA_C_ST", "SOFIA_SLEEP_BLK", "SOFIA_S_ST", "SOFIA_P_ST_REQ", "SOFIA_P_ST", "SOFIA_VM_SWITCH",
+"SAMPLE_END" };
+#endif /* __KERNEL__*/
+
+/*
+ * Enumeration of possible C-state sample
+ * types.
+ */
+
+typedef enum {
+	PW_BREAK_TYPE_I = 0,	/* interrupt*/
+	PW_BREAK_TYPE_T = 1,	/* timer*/
+	PW_BREAK_TYPE_S = 2,	/* sched-switch*/
+	PW_BREAK_TYPE_IPI = 3,	/* (LOC, RES, CALL, TLB)*/
+	PW_BREAK_TYPE_W = 4,	/* workqueue*/
+	PW_BREAK_TYPE_B = 5,	/* boundary*/
+	PW_BREAK_TYPE_N = 6,	/* Not-a-break: used exclusively for CLTP support: DEBUGGING ONLY!*/
+	PW_BREAK_TYPE_A = 7,	/* Abort*/
+	PW_BREAK_TYPE_U = 8,	/* unknown*/
+	PW_BREAK_TYPE_END = 9	/* EOF*/
+} c_break_type_t;
+#define FOR_EACH_WAKEUP_TYPE(idx) for ( idx = PW_BREAK_TYPE_I; idx < PW_BREAK_TYPE_END; ++idx )
+/*
+ * 's_wake_type_names' and 's_wake_type_long_names' are used mainly in Ring-3 debugging.
+ */
+#ifndef __KERNEL__
+static const char *s_wake_type_names[] =
+    { "IRQ", "TIM", "SCHED", "IPI", "WRQ", "BDRY", "NONE", "ABRT", "UNK",
+"EOF" };
+static const char *s_wake_type_long_names[] =
+    { "IRQ", "TIMER", "SCHEDULER", "IPI", "WORK QUEUE", "BOUNDARY", "NONE",
+"ABORT", "UNKNOWN", "EOF" };
+#endif /* __KERNEL__*/
+
+#pragma pack(push)		/* Store current alignment */
+#pragma pack(2)			/* Set new alignment -- 2 byte boundaries */
+/*
+ * MSRs may be "Thread" MSRs, "Core" MSRs, "Module" MSRs, "Package" MSRs or "GPU" MSRs.
+ * (Examples: PC2 on Saltwell is a "Package" MSR, MC4 on SLM is a "Module" MSR)
+ */
+typedef enum pw_msr_type {
+	PW_MSR_THREAD = 0,
+	PW_MSR_CORE = 1,
+	PW_MSR_MODULE = 2,
+	PW_MSR_PACKAGE = 3,
+	PW_MSR_GPU = 4
+} pw_msr_type_t;
+/*
+ * Names corresponding to C-state MSR types.
+ * Ring-3 Debugging ONLY!
+ */
+#ifndef __KERNEL__
+static const char *s_pw_msr_type_names[] =
+    { "Thread", "Core", "Module", "Package", "GPU" };
+#endif /* __KERNEL__*/
+/*
+ * Specifier for GPU C-states.
+ */
+typedef enum pw_gpu_msr_subtype {
+	PW_MSR_GPU_RENDER = 0,
+	PW_MSR_GPU_MEDIA = 1
+} pw_gpu_msr_subtype_t;
+/*
+ * Names corresponding to GPU C-state subtypes.
+ * Ring-3 Debugging ONLY!
+ */
+#ifndef __KERNEL__
+static const char *s_pw_gpu_msr_subtype_names[] = { "RENDER", "MEDIA" };
+#endif /* __KERNEL__*/
+
+/*
+ * MSR specifiers
+ */
+typedef struct pw_msr_identifier {
+	u8 subtype:4;		/* For "PC6" v "PC6C" differentiation etc.*/
+	u8 type:4;		/* One of 'pw_msr_type_t'*/
+	u8 depth;		/* Actual MSR number e.g. "CC2" will have a "2" here*/
+} pw_msr_identifier_t;
+/*
+ * Struct to encode MSR addresses.
+ */
+typedef struct pw_msr_addr {
+	pw_msr_identifier_t id;	/* MSR identifier*/
+	u32 addr;		/* MSR address*/
+} pw_msr_addr_t;
+/*
+ * Struct to encode MSR values.
+ */
+typedef struct pw_msr_val {
+	pw_msr_identifier_t id;	/* MSR identifier*/
+	u64 val;		/* MSR value*/
+} pw_msr_val_t;
+/*
+ * Structure used by Ring-3 to tell the power driver which
+ * MSRs to read.
+ */
+typedef struct pw_msr_info {
+	u16 num_msr_addrs;	/* The number of MSR addresses pointed to by the 'data' field*/
+	char data[1];		/* The list of 'pw_msr_addr_t' instances to read.*/
+} pw_msr_info_t;
+
+#define PW_MSR_INFO_HEADER_SIZE() ( sizeof(pw_msr_info_t) - sizeof(char[1]) )
+
+/* Restore previous alignment */
+/* #pragma pack(pop) */
+
+/*
+ * A c-state msg.
+ */
+#if 0
+#pragma pack(push)		/* Store current alignment */
+#pragma pack(2)			/* Set new alignment -- 2 byte boundaries */
+#endif
+/*
+ * GU: moved "c_msg" to "c_multi_msg". Replaced with older version of "c_msg".
+ * This is temporary ONLY!
+ */
+#if 0
+typedef struct c_msg {
+	u64 mperf;
+	u64 wakeup_tsc;		/* The TSC when the wakeup event was handled.*/
+	u64 wakeup_data;	/* Domain-specific wakeup data. Corresponds to "c_data" under old scheme.*/
+	s32 wakeup_pid;
+	s32 wakeup_tid;
+	u32 tps_epoch;
+	/*
+	 * In cases of timer-related wakeups, encode the CPU on which the timer was
+	 * initialized (and whose init TSC is encoded in the 'wakeup_data' field).
+	 */
+	s16 timer_init_cpu;
+	u8 wakeup_type;		/* instance of 'c_break_type_t'*/
+	u8 req_state;		/* State requested by OS: "HINT" parameter passed to TPS probe*/
+	u8 num_msrs;		/* the number of 'pw_msr_val_t' instances encoded in the 'data' field below*/
+	u8 data[1];		/* array of 'num_msrs' 'pw_msr_val_t' instances*/
+} c_msg_t;
+#endif
+typedef struct c_msg {
+	pw_msr_val_t cx_msr_val;	/* The value read from the C-state MSR at the wakeup point.*/
+	u64 mperf;		/* The value read from the C0 MSR*/
+	u64 wakeup_tsc;		/* The TSC when the wakeup event was handled.*/
+	/*
+	 * Domain-specific wakeup data. Corresponds to "c_data" under c_message_t fixed-length scheme.
+	 * Meaning is as follows for the following wakeup_types:
+	 *    PW_BREAK_TYPE_I: IRQ number (used as index to get IRQ name using i_message_t)
+	 *    PW_BREAK_TYPE_T: System call CPU TSC
+	 *    All other types: field is ignored.
+	 */
+	u64 wakeup_data;
+	/*
+	 * 's32' for wakeup_{pid,tid} is overkill: '/proc/sys/kernel/pid_max' is almost always
+	 * 32768, which will fit in 's16'. However, it IS user-configurable, so we must
+	 * accomodate larger pids.
+	 */
+	s32 wakeup_pid;
+	s32 wakeup_tid;
+	u32 tps_epoch;		/* Only used before post-processing*/
+	/*
+	 * In cases of timer-related wakeups, encode the CPU on which the timer was
+	 * initialized (and whose init TSC is encoded in the 'wakeup_data' field).
+	 */
+	s16 timer_init_cpu;	/* Only used before post-processing*/
+	/* pw_msr_identifier_t act_state_id;*/
+	u8 wakeup_type;		/* instance of 'c_break_type_t'*/
+	u8 req_state;		/* State requested by OS: "HINT" parameter passed to TPS probe. Only used before post-processing*/
+	/* State granted by hardware: the MSR that counted, and whose residency is encoded in the 'cx_res' field*/
+	u8 act_state;
+} c_msg_t;
+
+typedef struct c_multi_msg {
+	u64 mperf;
+	u64 wakeup_tsc;		/* The TSC when the wakeup event was handled.*/
+	u64 wakeup_data;	/* Domain-specific wakeup data. Corresponds to "c_data" under old scheme.*/
+	s32 wakeup_pid;
+	s32 wakeup_tid;
+	u32 tps_epoch;
+	/*
+	 * In cases of timer-related wakeups, encode the CPU on which the timer was
+	 * initialized (and whose init TSC is encoded in the 'wakeup_data' field).
+	 */
+	s16 timer_init_cpu;
+	u8 wakeup_type;		/* instance of 'c_break_type_t'*/
+	u8 req_state;		/* State requested by OS: "HINT" parameter passed to TPS probe*/
+	u8 num_msrs;		/* the number of 'pw_msr_val_t' instances encoded in the 'data' field below*/
+	u8 data[1];		/* array of 'num_msrs' 'pw_msr_val_t' instances*/
+} c_multi_msg_t;
+
+#define C_MULTI_MSG_HEADER_SIZE() ( sizeof(c_multi_msg_t) - sizeof(char[1]) )
+
+/*
+ * A p-state sample: MUST be 54 bytes EXACTLY!!!
+ */
+typedef struct p_msg {
+	/*
+	 * The frequency the OS requested during the LAST TPF, in KHz.
+	 */
+	u32 prev_req_frequency;
+	/*
+	 * The value of the IA32_PERF_STATUS register: multiply bits 12:8 (Atom) or 15:0 (big-core)
+	 * with the BUS clock frequency to get actual frequency the HW was executing at.
+	 */
+	u16 perf_status_val;
+	/*
+	 * We encode the frequency at the start and end of a collection in 'boundary'
+	 * messages. This flag is set for such messages. Only used before post-processing..
+	 */
+	u16 is_boundary_sample;	/* Only lsb is used for the flag, which when true indicates a*/
+	/* begin or end boundary sample for the collection. These are generated*/
+	/* during post-processing.*/
+	union {
+		u64 unhalted_core_value;	/* The APERF value. Used only before post-processing.*/
+		u32 frequency;	/* The actual measured frequency in KHz.*/
+		/* Used only for post-processed samples.*/
+	};
+	union {
+		u64 unhalted_ref_value;	/* The MPERF value. Used only before post-processing..*/
+		u32 cx_time_rate;	/* The fraction of time since the previous p_msg sample spent in a non-C0 state.*/
+		/* This value ranges from 0 (0% Cx time) to 1e9 (100% Cx time).*/
+		/* Used only for post-processed samples.*/
+	};
+} p_msg_t;
+
+/*
+ * The 'type' of the associated
+ * 'u_sample'.
+ */
+typedef enum {
+	PW_WAKE_ACQUIRE = 0,	/* Wake lock*/
+	PW_WAKE_RELEASE = 1	/* Wake unlock*/
+} u_sample_type_t;
+
+/*
+ * The 'type' of the associated
+ * 'u_sample'.
+ */
+typedef enum {
+	PW_WAKE_PARTIAL = 0,	/* PARTIAL_WAKE_LOCK*/
+	PW_WAKE_FULL = 1,	/* FULL_WAKE_LOCK*/
+	PW_WAKE_SCREEN_DIM = 2,	/* SCREEN_DIM_WAKE_LOCK*/
+	PW_WAKE_SCREEN_BRIGHT = 3,	/* SCREEN_BRIGHT_WAKE_LOCK*/
+	PW_WAKE_PROXIMITY_SCREEN_OFF = 4	/* PROXIMITY_SCREEN_OFF_WAKE_LOCK*/
+} u_sample_flag_t;
+
+/*
+ * Wakelock sample
+ */
+typedef struct {
+	u_sample_type_t type;	/* Either WAKE_ACQUIRE or WAKE_RELEASE*/
+	u_sample_flag_t flag;	/* Wakelock flag*/
+	pid_t pid, uid;
+	u32 count;
+	char tag[PW_MAX_WAKELOCK_NAME_SIZE];	/* Wakelock tag*/
+} u_sample_t;
+
+/*
+ * Generic "event" sample.
+ */
+typedef struct event_sample {
+	u64 data[6];
+} event_sample_t;
+
+/*
+ * The 'type' of the associated
+ * 'd_residency_sample'.
+ */
+typedef enum {
+	PW_NORTH_COMPLEX = 0,	/* North complex*/
+	PW_SOUTH_COMPLEX = 1,	/* South complex*/
+	PW_NOT_APPLICABLE = 2	/* Not applicable*/
+} device_type_t;
+
+/*
+ * Convenience for a 'string' data type.
+ * Not strictly required.
+ */
+typedef struct pw_string_type pw_string_type_t;
+struct pw_string_type {
+	u16 len;
+	/* char data[1];*/
+	char *data;
+};
+/* TODO: ALL pointers need to be converted to "u64"!!!*/
+
+/*
+ * Meta data used to describe S-state residency.
+ */
+typedef struct s_res_meta_data s_res_meta_data_t;
+struct s_res_meta_data {
+	u8 num_states;		/* The number of states available including S3.*/
+	pw_string_type_t *names;	/* The list of state names e.g. S0i0, S0i1, S0i2, S0i3, S3 ...*/
+	/* The order must be same as the order of values stored in residencies*/
+};
+#define S_RES_META_MSG_HEADER_SIZE (sizeof(s_res_meta_data_t) - sizeof(pw_string_type_t *))
+
+/*
+ * Platform state (a.k.a. S-state) residency counter sample
+ */
+typedef struct s_res_msg {
+	u64 *residencies;	/* Residencies in time (in unit of TSC ticks) for each state*/
+	/* Array size is determined by num_states (defined in metadata)*/
+	/* MUST be last entry in struct!*/
+} s_res_msg_t;
+#define S_RES_MSG_HEADER_SIZE (sizeof(s_res_msg_t) - sizeof(u64 *))
+
+/*
+ * Meta data used to describe D-state sample or residency
+ * Device names are generated in dev_sample structure
+ */
+typedef struct d_state_meta_data d_state_meta_data_t;
+struct d_state_meta_data {
+	device_type_t dev_type;	/* one of "device_type_t". Different complex may have different device states available.*/
+	u8 num_states;		/* The total number of states available*/
+	pw_string_type_t *names;	/* The list of state names e.g. D0i0_AON, D0i0_ACG, D0i1, D0i3, D3_hot ...*/
+	/* The order must be same as the order of values stored in residencies*/
+};
+#define D_STATE_META_MSG_HEADER_SIZE (sizeof(d_state_meta_data_t) - sizeof(pw_string_type_t *))
+
+/*
+ * Structure to return Dev # <-> Dev Name mappings.
+ */
+typedef struct dev_map_msg dev_map_msg_t;
+struct dev_map_msg {
+	u16 dev_num;		/* Device ID*/
+	u16 dev_type;		/* one of "device_type_t"*/
+	/* The pair (dev_num, dev_type) is a unique ID for each device*/
+	pw_string_type_t dev_short_name;
+	pw_string_type_t dev_long_name;
+};
+
+typedef struct dev_map_meta_data dev_map_meta_data_t;
+struct dev_map_meta_data {
+	u16 num_devices;	/* The number of 'dev_map_msg_t' instances in the 'device_mappings' array, below*/
+	dev_map_msg_t *device_mappings;	/* A mapping of dev num <-> dev names; size is governed by 'num_devices'*/
+};
+#define DEV_MAP_MSG_META_MSG_HEADER_SIZE (sizeof(dev_map_meta_data_t) - sizeof(dev_map_msg_t *))
+
+/*
+ * 'D-state' information. Used for both residency and state samples.
+ */
+typedef struct d_state_msg d_state_msg_t;
+typedef struct d_state_msg d_res_msg_t;
+struct d_state_msg {
+	u16 num_devices;	/* Number of devices profiled (a subset of devices can be monitored)*/
+	device_type_t dev_type;	/* one of "device_type_t"*/
+	u16 *deviceIDs;		/* Array of Device IDs profiled. Array size is determined by num_devices*/
+	u64 *values;		/* Array of Device residencies or states depending on sample type (sample_type_t).*/
+	/* If the sample type is D_RESIDENCY,*/
+	/* Array size is determined by num_devices * num_states (defined in metadata)*/
+	/* Array have values in the following order in the unit of TSC ticks*/
+	/* {D0 in Dev0, D1 in Dev0, D2 in Dev0, D0 in Dev1, D1 in Dev1 ...}*/
+	/* Or the other way? Which one is better?*/
+	/* If the sample type is D_STATE,*/
+	/* Array size is determined by num_devices * log(num_states) / 64*/
+};
+#define D_STATE_MSG_HEADER_SIZE ( sizeof(d_state_msg_t) - sizeof(u16 *) - sizeof(u64 *) )
+#define D_RES_MSG_HEADER_SIZE ( sizeof(d_res_msg_t) - sizeof(u16 *) - sizeof(u64 *) )
+
+/*
+ * The 'unit' of thermal data.
+ */
+typedef enum {
+	PW_FAHRENHEIT = 0,
+	PW_CELCIUS = 1
+} thermal_unit_t;
+
+/*
+ * Meta data used to describe Thermal-states.
+ */
+typedef struct thermal_meta_data thermal_meta_data_t;
+struct thermal_meta_data {
+	u16 num_components;	/* The number of components.*/
+	thermal_unit_t thermal_unit;	/* 0 is Fahrenheit and 1 is Celcius.*/
+	pw_string_type_t *names;	/* Names of components like Core, Skin, MSIC Die, SOC, ...*/
+};
+#define THERMAL_META_MSG_HEADER_SIZE (sizeof(thermal_meta_data_t) - sizeof(pw_string_type_t *))
+
+/*
+ * Thermal state sample
+ */
+typedef struct thermal_msg {
+	u16 index;		/* Array index to components defined in thermal_meta_data. Index must be [0, num_components)*/
+	u16 temperatures;	/* Thermal value in the unit defined in thermal_unit*/
+} thermal_msg_t;
+
+/*
+ * Meta data used to describe GPU Frequency.
+ */
+typedef struct gpufreq_meta_data gpufreq_meta_data_t;
+struct gpufreq_meta_data {
+	u16 num_available_freqs;	/* Number of available gpu frequencies.*/
+	u16 *available_freqs;	/* List of all available frequncies. Max Length will be equal to the num_available_freq.*/
+	/* The unit of frequency here is Mhz.*/
+};
+#define GPUFREQ_META_MSG_HEADER_SIZE (sizeof(gpufreq_meta_data_t) - sizeof(u16 *))
+
+/*
+ * GPU Frequency state sample
+ */
+typedef struct gpufreq_msg {
+	u16 gpufrequency;	/* GPU frequency is stored here. Unit is MHz*/
+} gpufreq_msg_t;
+
+/*
+ * Meta data used to describe Power-states.
+ */
+typedef struct power_meta_data power_meta_data_t;
+struct power_meta_data {
+	u16 num_components;	/* The number of components.*/
+	pw_string_type_t *names;	/* Names of components like IA Pkg, Gfx, SOC, ...*/
+};
+
+/*
+ * Power state sample
+ */
+typedef struct power_msg {
+	u16 index;		/* Array index to components defined in power_meta_data. index must be [0, num_components)*/
+	u32 currnt;		/* Assume the unit is uA: GU: changed name from "current" to "currnt" to get the driver to compile*/
+	u32 voltage;		/* Assume the unit is uV*/
+	u32 power;		/* Assume the unit is uW*/
+} power_msg_t;
+
+/*
+ * Meta data used to describe bandwidths.
+ */
+typedef struct bw_meta_data {
+	u16 num_components;	/* The number of components.*/
+	pw_string_type_t *names;	/* Names of components like Core to DDR0, Core to DDR1, ISP, GFX, IO, DISPLAY ...*/
+} bw_meta_data_t;
+#define BANDWIDTH_META_MSG_HEADER_SIZE (sizeof(bw_meta_data_t) - sizeof(pw_string_type_t *))
+
+/*
+ * Bandwidth sample
+ */
+typedef struct bw_msg {
+	u16 index;		/* Array index to components defined in bw_meta_data.*/
+	/* Index must be [0, num_components)*/
+	u64 read32_bytes;	/* Total number of READ32 bytes for duration*/
+	u64 write32_bytes;	/* Total number of WRITE32 bytes for duration*/
+	u64 read64_bytes;	/* Total number of READ64 bytes for duration*/
+	u64 write64_bytes;	/* Total number of WRITE64 bytes for duration*/
+	u64 duration;		/* The unit should be TSC ticks.*/
+} bw_msg_t;
+
+typedef struct bw_multi_meta_data bw_multi_meta_data_t;
+struct bw_multi_meta_data {
+	u16 index;		/* Array index to components defined in bw_meta_data.*/
+	/* Index must be [0, num_components)*/
+	/* Currently, this is ALWAYS ZERO (because only one VISA metric can be collected at a time).*/
+	u16 num_names;		/* Size of 'names' array, below*/
+	pw_string_type_t *names;	/* Individual names for each element in 'bw_multi_msg->data' e.g. "Read32", "WritePartial" "DDR-0 Rank-0 Read64"*/
+};
+#define BW_MULTI_META_MSG_HEADER_SIZE (sizeof(bw_multi_meta_data_t) - sizeof(pw_string_type_t *))
+
+typedef struct bw_multi_msg bw_multi_msg_t;
+struct bw_multi_msg {
+	u16 index;		/* Array index to components defined in bw_meta_data.*/
+	/* Index must be [0, num_components)*/
+	/* Currently, this is ALWAYS ZERO (because only one VISA metric can be collected at a time).*/
+	u16 num_data_elems;	/* Size of 'data' array, below*/
+	u64 duration;		/* In TSC ticks*/
+	u64 p_data;		/* Size of array == 'bw_multi_msg->num_data_elems' == 'bw_multi_meta_data->num_names'.*/
+};
+#define BW_MULTI_MSG_HEADER_SIZE() (sizeof(bw_multi_msg_t) - sizeof(u64))
+
+typedef struct bw_multi_sample bw_multi_sample_t;
+struct bw_multi_sample {
+	u16 index;		/* Array index to components defined in bw_meta_data.*/
+	/* Index must be [0, num_components)*/
+	/* Currently, this is ALWAYS ZERO (because only one VISA metric can be collected at a time).*/
+	u16 num_data_elems;	/* Size of 'data' array, below*/
+	u64 duration;		/* In TSC ticks*/
+	u64 data[PW_MAX_ELEMENTS_PER_BW_COMPONENT];	/* Size of array == 'bw_multi_meta_data->num_names'.*/
+};
+
+/*
+ * Meta data used to describe FPS
+ */
+typedef struct fps_meta_data fps_meta_data_t;
+struct fps_meta_data {
+	u16 num_components;	/* The number of components including frames*/
+	pw_string_type_t *names;	/* Names of components like FPS*/
+};
+#define FPS_META_MSG_HEADER_SIZE (sizeof(fps_meta_data_t) - sizeof(pw_string_type_t *))
+
+/*
+ * FPS sample
+ */
+typedef struct fps_msg {
+	u32 frames;
+} fps_msg_t;
+
+typedef struct dram_srr_meta_data dram_srr_meta_data_t;
+struct dram_srr_meta_data {
+	u16 num_components;	/* The number of components.*/
+	pw_string_type_t *names;	/* Names of components like DUNIT0, DUNIT1...*/
+};
+#define DRAM_SRR_META_MSG_HEADER_SIZE (sizeof(dram_srr_meta_data_t) - sizeof(pw_string_type_t *))
+
+typedef struct dram_srr_msg {
+	u16 num_components;	/* The number of components.*/
+	u64 duration;		/* The unit should be TSC ticks.*/
+	u64 *residency_cpu_ticks;	/* Residency in terms of CPU clock ticks i.e. TSC*/
+	/* Number of elements in array must be equal to num_components in meta data*/
+	/* This field is for VTune visualization.*/
+	u64 *residency_soc_ticks;	/* Residency in terms of SOC clock ticks*/
+	/* Number of elements in array must be equal to num_components in meta data*/
+} dram_srr_msg_t;
+
+/*
+ * Kernel wakelock information.
+ */
+typedef struct constant_pool_msg {
+	u16 entry_type;		/* one of 'W_STATE' for kernel mapping or 'U_STATE' for userspace mapping*/
+	u16 entry_len;
+	/*
+	 * We need to differentiate between the two types of 'W_STATE' constant-pool entries:
+	 * 1. Entries generated in Ring-3 (as a result of parsing the "/proc/wakelocks" file). These are generated at
+	 *    the START of a collection and have a 'w_sample_type_t' value of 'PW_WAKE_LOCK_INITAL'.
+	 * 2. Entries generated in Ring-0 DURING the collection.
+	 * All examples of (1) will have the MSB set to '1'. Examples of (2) will not be bitmasked in any way.
+	 */
+	u32 entry_index;
+	char entry[1];		/* MUST be LAST entry in struct!*/
+} constant_pool_msg_t;
+#define PW_CONSTANT_POOL_MSG_HEADER_SIZE (sizeof(constant_pool_msg_t) - sizeof(char[1]))
+#define PW_CONSTANT_POOL_INIT_ENTRY_MASK (1U << 31)
+#define PW_SET_INITIAL_W_STATE_MAPPING_MASK(idx) ( (idx) | PW_CONSTANT_POOL_INIT_ENTRY_MASK )
+#define PW_HAS_INITIAL_W_STATE_MAPPING_MASK(idx) ( (idx) & PW_CONSTANT_POOL_INIT_ENTRY_MASK )	/* MSB will be SET if 'PW_WAKE_LOCK_INITIAL' mapping */
+#define PW_STRIP_INITIAL_W_STATE_MAPPING_MASK(idx) ( (idx) & ~PW_CONSTANT_POOL_INIT_ENTRY_MASK )
+
+typedef struct w_wakelock_msg {
+	u16 type;		/* one of 'w_sample_type_t'*/
+	pid_t tid, pid;
+	u32 constant_pool_index;
+	u64 expires;
+	char proc_name[PW_MAX_PROC_NAME_SIZE];
+} w_wakelock_msg_t;
+
+typedef struct u_wakelock_msg {
+	u16 type;		/* One of 'u_sample_type_t'*/
+	u16 flag;		/* One of 'u_sample_flag_t'*/
+	pid_t pid, uid;
+	u32 count;
+	u32 constant_pool_index;
+} u_wakelock_msg_t;
+
+typedef struct i_sample i_msg_t;
+
+typedef struct r_sample r_msg_t;
+
+/*
+ * TSC_POSIX_MONO_SYNC
+ * TSC <-> Posix clock_gettime() sync messages.
+ */
+typedef struct tsc_posix_sync_msg {
+	pw_u64_t tsc_val;
+	pw_u64_t posix_mono_val;
+} tsc_posix_sync_msg_t;
+
+/*
+ * Temp struct to hold C_STATE_MSR_SET samples.
+ * Required only until we move away from using PWCollector_sample
+ * instances to using PWCollector_msg instances in the power lib.
+ * *********************************************************************
+ * RESTRICTIONS: struct size MUST be LESS THAN or EQUAL to 112 bytes!!!
+ * *********************************************************************
+ */
+typedef struct tmp_c_state_msr_set_sample {
+	u16 num_msrs;
+	pw_msr_val_t msr_vals[11];	/* Each 'pw_msr_val_t' instance is 10 bytes wide.*/
+} tmp_c_state_msr_set_sample_t;
+
+/*
+ * Information on the specific TYPE of a matrix message.
+ */
+typedef enum pw_mt_msg_type {
+	PW_MG_MSG_NONE = 0,
+	PW_MT_MSG_INIT = 1,
+	PW_MT_MSG_POLL = 2,
+	PW_MT_MSG_TERM = 3,
+	PW_MT_MSG_END = 4
+} pw_mt_msg_type_t;
+/*
+ * Ring-3 Debugging: names for the above msg types.
+ */
+#ifndef __KERNEL__
+static const char *s_pw_mt_msg_type_names[] =
+    { "NONE", "INIT", "POLL", "TERM", "END" };
+#endif /* __KERNEL__*/
+/*
+ * Encode information returned by the matrix driver.
+ * Msg type == 'MATRIX_MSG'
+ */
+typedef struct pw_mt_msg pw_mt_msg_t;
+struct pw_mt_msg {
+	u16 data_type;		/* One of 'pw_mt_msg_type_t'*/
+	u16 data_len;
+	u64 timestamp;
+	u64 p_data;
+};
+#define PW_MT_MSG_HEADER_SIZE() ( sizeof(pw_mt_msg_t) - sizeof(u64) )
+
+/*
+ * Summary structs: structs used for summary and trace timeline information.
+ */
+typedef struct pw_c_state_wakeup_info pw_c_state_wakeup_info_t;
+struct pw_c_state_wakeup_info {
+	pw_u16_t wakeup_type;	/* One of 'c_break_type_t'*/
+	pw_s32_t wakeup_data;	/* Proc PID if wakeup_type == PW_BREAK_TYPE_T*/
+	/* IRQ # if wakeup_type == PW_BREAK_TYPE_I*/
+	/* Undefined otherwise*/
+	pw_u32_t wakeup_count;	/* Number of times this timer/irq/other has woken up the system from the specified C-state*/
+	pw_string_type_t wakeup_name;	/* Proc Name if wakeup_type == PW_BREAK_TYPE_T*/
+	/* Device # if wakeup_type == PW_BREAK_TYPE_I*/
+	/* Undefined otherwise*/
+};
+
+typedef struct c_state_summary_msg c_state_summary_msg_t;
+struct c_state_summary_msg {
+	float res_percent;
+	pw_u32_t abort_count;
+	pw_u32_t promotion_count;
+	pw_u32_t wakeup_count;	/* The TOTAL number of wakeups for the given node and this C-state*/
+	pw_msr_identifier_t id;
+	pw_u16_t num_wakeup_infos;	/* The number of elements in the 'wakeup_infos' array, below*/
+	pw_c_state_wakeup_info_t *wakeup_infos;
+};
+
+typedef struct c_node_summary_msg c_node_summary_msg_t;
+struct c_node_summary_msg {
+	pw_msr_type_t node_type;	/* Thread/Core/Module/Package*/
+	pw_u16_t node_id;
+	pw_u16_t num_c_states;	/* The number of elements in the 'c_states' array, below*/
+	c_state_summary_msg_t *c_states;
+};
+
+typedef struct c_summary_msg c_summary_msg_t;
+struct c_summary_msg {
+	pw_u16_t num_c_nodes;	/* The number of elements in the 'c_nodes' array, below*/
+	c_node_summary_msg_t *c_nodes;
+};
+
+typedef struct p_state_summary_msg p_state_summary_msg_t;
+struct p_state_summary_msg {
+	pw_u16_t freq_mhz;	/* The frequency, in MHz, whose residency rate is encoded in 'res_rate', below*/
+	pw_u16_t res_rate;	/* The residency rate, obtained by multiplying the residency fraction by 1e4 i.e. 100% == 10000, 99.99% == 9999 etc.*/
+};
+/*
+ * Macros to encode and decode residency rates. Used in
+ * P-state "summary" structures.
+ * Update: and also in S-residency and ACPI S3 "summary" structures.
+ */
+#define ENCODE_RES_RATE(r) (pw_u16_t)( (r) * 1e4 )
+#define DECODE_RES_RATE(r) ( (float)(r) / 1e4 )
+
+typedef struct p_node_summary_msg p_node_summary_msg_t;
+struct p_node_summary_msg {
+	pw_msr_type_t node_type;	/* Thread/Core/Module/Package*/
+	pw_u16_t node_id;
+	pw_u16_t num_p_states;	/* The number of elements in the 'p_states' array, below*/
+	p_state_summary_msg_t *p_states;
+};
+
+typedef struct p_summary_msg p_summary_msg_t;
+struct p_summary_msg {
+	pw_u16_t num_p_nodes;	/* The number of elements in the 'p_nodes' array, below.*/
+	p_node_summary_msg_t *p_nodes;
+};
+
+/*
+ * Information on a single wakelock.
+ */
+typedef struct wlock_info wlock_info_t;
+struct wlock_info {
+	double total_lock_time_msecs;	/* double is GUARANTEED to be 64bits/8bytes*/
+	pw_u32_t num_times_locked;
+	/* One of 'W_STATE' for KERNEL wakelocks or 'U_STATE' for USER wakelocks*/
+	/* pw_u16_t lock_type; */
+	pw_string_type_t name;	/* 'lock_type' == 'W_STATE' ==> Kernel wakelock name*/
+	/* 'lock_type' == 'U_STATE' ==> Wakelock tag*/
+};
+
+/*
+ * Information for all wakelocks.
+ */
+typedef struct wlock_summary_msg wlock_summary_msg_t;
+struct wlock_summary_msg {
+	pw_u32_t num_wlocks;	/* The number of elements in the 'wlocks' array, below*/
+	wlock_info_t *wlocks;	/* The list of kernel or user wakelocks*/
+};
+#define WLOCK_SUMMARY_MSG_HEADER_SIZE (sizeof(wlock_summary_msg_t) - sizeof(wlock_info_t *))
+
+#if 0
+/*
+ * Stub for kernel wakelock information.
+ */
+typedef struct kernel_wlock_map_summary_msg kernel_wlock_map_summary_msg_t;
+struct kernel_wlock_map_summary_msg {
+	/* TODO*/
+};
+/*
+ * Stub for user wakelock information.
+ */
+typedef struct user_wlock_map_summary_msg user_wlock_map_summary_msg_t;
+struct user_wlock_map_summary_msg {
+	/* TODO*/
+};
+
+typedef struct wlock_map_summary_msg wlock_map_summary_msg_t;
+struct wlock_map_summary_msg {
+	pw_u16_t lock_type;	/* One of 'W_STATE' (for Kernel) or 'U_STATE' (for User) wakelocks*/
+	pw_string_type_t lock_name;	/* Name of the wakelock*/
+	pw_string_type_t proc_name;	/* Name of process taking/releasing the wakelock*/
+	void *data;		/* If 'lock_type' == 'W_STATE' then ptr to 'kernel_wlock_map_summary_msg'*/
+	/* If 'lock_type' == 'U_STATE' then ptr to 'user_wlock_map_summary_msg'*/
+};
+#define WLOCK_MAP_SUMMARY_MSG_HEADER_SIZE (sizeof(wlock_map_summary_msg) - sizeof(void *))
+
+typedef struct wlock_summary_msg wlock_summary_msg_t;
+struct wlock_summary_msg {
+	pw_u64_t lock_time_tscs;	/* Total time (in TSC ticks) when ANY wakelock was taken.*/
+	pw_u16_t num_wlock_maps;	/* Number of instances in the 'maps' array, below*/
+	wlock_map_summary_msg_t *maps;	/* Mappings for each wakelock that was taken/released in this interval*/
+};
+#define WLOCK_SUMMARY_MSG_HEADER_SIZE (sizeof(wlock_summary_msg) - sizeof(wlock_map_summary_msg_t *))
+#endif
+
+typedef struct thermal_node_summary_msg thermal_node_summary_msg_t;
+struct thermal_node_summary_msg {
+	pw_u16_t unit;		/* An instance of thermal_unit_t*/
+	pw_u16_t index;		/* Array index to components defined in thermal_meta_data. Index must be [0, num_components)*/
+	pw_u16_t min_temp, max_temp;
+	float avg_temp;
+};
+
+typedef struct thermal_summary_msg thermal_summary_msg_t;
+struct thermal_summary_msg {
+	pw_u16_t num_thermal_nodes;
+	thermal_node_summary_msg_t *thermal_nodes;
+};
+
+typedef struct gpu_p_state_summary_msg gpu_p_state_summary_msg_t;
+struct gpu_p_state_summary_msg {
+	pw_u16_t freq_mhz;	/* The frequency, in MHz, whose residency rate is encoded in 'res_rate', below*/
+	pw_u16_t res_rate;	/* The residency rate, obtained by multiplying the residency fraction by 1e4 i.e. 100% == 10000, 99.99% == 9999 etc.*/
+};
+
+typedef struct gpu_p_summary_msg gpu_p_summary_msg_t;
+struct gpu_p_summary_msg {
+	pw_u16_t num_p_states;	/* The number of elements in the 'p_states' array, below*/
+	gpu_p_state_summary_msg_t *gpu_p_states;
+};
+
+/*
+ * Bandwidth summaries are EXACTLY the same as regular 'BW' messages.
+ */
+typedef bw_msg_t bw_summary_msg_t;
+
+typedef struct s_res_summary_msg s_res_summary_msg_t;
+struct s_res_summary_msg {
+	pw_u16_t num_states;	/* The number of elements in the 'res_rates' array below. MUST be same as 'num_states' in 's_res_meta_data'!*/
+	pw_u16_t *res_rates;	/* The residency rate, obtained by multiplying the residency fraction by 1e4 i.e. 100% == 10000, 99.99% == 9999 etc.*/
+};
+
+typedef struct summary_msg summary_msg_t;
+struct summary_msg {
+	pw_u64_t start_tsc, stop_tsc;	/* The start and stop of the interval being summarized.*/
+	pw_u16_t data_type;	/* The type of the payload; one of 'sample_type_t'*/
+	pw_u16_t data_len;	/* The size of the payload*/
+	void *p_data;		/* Pointer to the payload*/
+};
+
+/*
+ * Meta-data specifiers, data structures etc.
+ */
+
+/*
+ * Meta data used to describe a single C-state and its associated MSR.
+ */
+typedef struct pw_c_state_msr_meta_data pw_c_state_msr_meta_data_t;
+struct pw_c_state_msr_meta_data {
+	pw_msr_identifier_t id;	/* The MSR identifier for this C-state*/
+	/*
+	 * On Big-cores, the hint that gets passed to mwait is basically the ACPI C-state
+	 * and differs from the actual Intel C-state (e.g. on SNB, mwait hint for (Intel)C7
+	 * is '4' ==> the power_start tracepoint will receive a hint of '4', which must then
+	 * be converted in Ring-3 to C7).
+	 * Note:
+	 * 1. An mwait hint of "zero" indicates "don't care" (e.g. package C6 on SLM cannot have
+	 * an mwait 'hint').
+	 * 2. This is (usually) equal to the C-state number on Android (needs investigation for SLM!!!)
+	 */
+	u16 acpi_mwait_hint;	/* The mwait hint corresponding to this C-state; GU: changed to "u16" for alignment reasons*/
+	u16 target_residency;	/* Target residency for this C-state*/
+	/*
+	 * The "msr_name" field basically encodes the information present in "/sys/devices/system/cpu/cpu0/cpuidle/stateXXX/name"
+	 */
+	pw_string_type_t msr_name;	/* The actual C-state name (e.g. "ATM-C6")*/
+};
+#define C_MSR_META_MSG_HEADER_SIZE (sizeof(pw_c_state_msr_meta_data) - sizeof(pw_string_type_t))
+
+/*
+ * Meta data used to describe C-states.
+ */
+typedef struct c_meta_data c_meta_data_t;
+struct c_meta_data {
+	/*
+	 * GEH: Could we add an enum for processing unit (GPU, CPU, etc.) and add a field here to reference? Something like this:
+	 * proc_unit_t proc_unit;
+	 */
+	u16 num_c_states;	/* The number of 'pw_c_state_msr_meta_data' instances encoded in the 'data' field below.*/
+	/* GU: Changed from u8 --> u16 for alignment*/
+	pw_c_state_msr_meta_data_t *data;	/* An array of 'pw_c_state_msr_meta_data' instances, one per C-state*/
+	/* Length of the array is given by num_c_states.*/
+	/* For SW1 file, this array is contiguous in memory to the c_meta_data_t struct (inline):*/
+	/* e.g. pw_c_state_msr_meta_data_t data[num_c_state];*/
+};
+#define C_META_MSG_HEADER_SIZE (sizeof(c_meta_data_t) - sizeof(pw_c_state_msr_meta_data_t *))
+
+/*
+ * HACK! (JC)
+ * Meta data used to describe a single C-state and its associated MSR as a fixed-length sample.
+ */
+typedef struct pw_c_state_msr_meta_sample pw_c_state_msr_meta_sample_t;
+struct pw_c_state_msr_meta_sample {
+	pw_msr_identifier_t id;	/* The MSR identifier for this C-state*/
+	u16 acpi_mwait_hint;	/* The mwait hint corresponding to this C-state; GU: changed to "u16" for alignment reasons*/
+	u16 target_residency;	/* Target residency for this C-state*/
+	/*
+	 * The "msr_name" field basically encodes the information present in "/sys/devices/system/cpu/cpu0/cpuidle/stateXXX/name"
+	 */
+	char msr_name[PW_MAX_C_MSR_NAME];	/* The actual C-state name (e.g. "CC6, MC0, PC6")*/
+};
+
+/*
+ * HACK! (JC)
+ * Temporary fixed-length Structure used to describe a single C-state fixed-length sample and its associated MSR.
+ * Multiple fixed-length samples may be chained to increase the available frequencies beyond PW_MAX_MSRS_PER_META_SAMPLE
+ * Used for CPU & GPU meta samples
+ */
+typedef struct c_meta_sample c_meta_sample_t;
+struct c_meta_sample {
+	u16 num_c_states;	/* The number of 'pw_c_state_msr_meta_data' instances encoded in the 'data' field below.*/
+	pw_c_state_msr_meta_sample_t data[PW_MAX_MSRS_PER_META_SAMPLE];	/* An array of 'pw_c_state_msr_meta_sample' instances, one per C-state*/
+	/* Length of the array is given by num_c_states.*/
+	/* e.g. pw_c_state_msr_meta_sample_t data[num_c_states];*/
+};
+
+/*
+ * Meta data used to describe P-state samples.
+ */
+typedef struct p_meta_data p_meta_data_t;
+struct p_meta_data {
+	/*
+	 * GEH: Could we add an enum for processing unit (GPU, CPU, etc.) and add a field here to reference? Something like this:
+	 * proc_unit_t proc_unit;
+	 */
+	u16 num_available_freqs;	/* The # of frequencies in the 'data' field below; 256 freqs should be enough for anybody!*/
+	u16 *available_freqs;	/* A (variable-length) array of 16bit frequencies, in MHz.*/
+	/* Length of array is given by 'num_available_freqs'*/
+	/* For SW1 file, this array is contiguous in memory to the p_meta_data_t structure (inline):*/
+	/* e.g. u16 available_freqs[num_available_freqs];*/
+};
+#define P_META_MSG_HEADER_SIZE (sizeof(p_meta_data_t) - sizeof(u16 *))
+
+/*
+ * Meta data used to describe the target system (OS+H/W). Basically, most of
+ * the data that appears in the 'SYS_PARAMS' section of a current '.ww1' file
+ * (excluding some that is C-state specific and some that is P-state specific, see above).
+ * --------------------------------------------------------------------------------------------------------------------------------
+ *  WARNING: SOME COMMENTS BELOW EXPOSE INTEL-PRIVATE DATA: REMOVE BEFORE DISTRIBUTING EXTERNALLY!!!
+ * --------------------------------------------------------------------------------------------------------------------------------
+ */
+typedef struct system_meta_data system_meta_data_t;
+#if 0
+struct system_meta_data {
+	u8 driver_version_major, driver_version_minor, driver_version_other;	/* Driver version*/
+	pw_string_type_t collector_name;	/* Collector name e.g. SOCWatch for Android (NDK)*/
+	u8 collector_version_major, collector_version_minor, collector_version_other;	/* Collector version*/
+	u8 format_version_major, format_version_minor;	/* File format version*/
+	u8 bounded;		/* 1 for bounded and 0 for unbounded*/
+	u16 customer_id, vendor_id, manufacturer_id, platform_id, hardware_id;	/* Soft Platform IDs (SPID)*/
+	float collection_time_seconds;	/* Collection time, in seconds*/
+	u64 start_tsc, stop_tsc;
+	u64 start_timeval, stop_timeval;
+	u64 start_time, stop_time;
+	pw_string_type_t host_name;
+	pw_string_type_t os_name;
+	pw_string_type_t os_type;
+	pw_string_type_t os_version;
+	pw_string_type_t cpu_brand;
+	u16 cpu_family, cpu_model, cpu_stepping;	/* "u8" is probably enough for each of these!*/
+	/*
+	 * --------------------------------------------------------------------------------------------------------------------------------
+	 *  WARNING: REMOVE THIS COMMENT BEFORE DISTRIBUTING CODE EXTERNALLY!!!
+	 * --------------------------------------------------------------------------------------------------------------------------------
+	 * We currently encode the rate at which the Cx MSRs tick within the config file. However, on SLM, the rate at which the Cx MSRs
+	 * tick is specified by the 'GUAR_RATIO', which is obtained from bits 21:16 of the PUNIT_CR_IACORE_RATIOS MSR (0x66a).
+	 */
+	u16 cpu_c_states_clock_rate;	/* The rate at which the C-state MSRs tick.*/
+	u8 msr_fsb_freq_value;	/* Encoding for bus frequency; needs a "switch" statement to retrieve the ACTUAL bus freq*/
+	u8 perf_status_bits[2];	/* Need a low and high value*/
+	u32 turbo_threshold;
+	u16 num_cpus;
+	pw_string_type_t cpu_topology;
+	u16 tsc_frequency_mhz;
+	u8 was_any_thread_bit_set, was_auto_demote_enabled;
+	u32 collection_switches;
+	s32 profiled_app_pid;
+	pw_string_type_t profiled_app_name;
+	u64 number_of_samples_collected, number_of_samples_dropped;
+	/*
+	 * Do we even need a "descendent_pids_list" anymore???
+	 */
+	/* descendent_pids_list; */
+};
+#endif
+struct system_meta_data {
+	/*
+	 * 64bit vars go here...
+	 */
+	u64 start_tsc, stop_tsc;
+	u64 start_timeval, stop_timeval;
+	u64 start_time, stop_time;
+	u64 number_of_samples_collected, number_of_samples_dropped;
+	/*
+	 * 32bit vars go here...
+	 */
+	float collection_time_seconds;	/* Collection time, in seconds*/
+	float bus_freq_mhz;	/* The bus frequency, in MHz*/
+	u32 turbo_threshold;
+	u32 collection_switches;
+	s32 profiled_app_pid;
+	s32 micro_patch_ver;
+	/*
+	 * 16bit vars go here...
+	 */
+	u16 customer_id, vendor_id, manufacturer_id, platform_id, hardware_id;	/* Soft Platform IDs (SPID)*/
+	u16 cpu_family, cpu_model, cpu_stepping;	/* "u8" is probably enough for each of these!*/
+	u16 cpu_c_states_clock_rate;	/* The rate at which the C-state MSRs tick.*/
+	u16 num_cpus;
+	u16 tsc_frequency_mhz;
+	/*
+	 * 8bit vars go here...
+	 */
+	u8 driver_version_major, driver_version_minor, driver_version_other;	/* Driver version*/
+	u8 collector_version_major, collector_version_minor, collector_version_other;	/* Collector version*/
+	u8 format_version_major, format_version_minor;	/* File format version*/
+	/* u8 bound;                                                                  : 1 for bound and 0 for unbound*/
+	u8 userspace_pointer_size_bytes;	/* '4' for 32b userspace, '8' for 64b userspace*/
+	/*
+	 * --------------------------------------------------------------------------------------------------------------------------------
+	 *  WARNING: REMOVE THIS COMMENT BEFORE DISTRIBUTING CODE EXTERNALLY!!!
+	 * --------------------------------------------------------------------------------------------------------------------------------
+	 * We currently encode the rate at which the Cx MSRs tick within the config file. However, on SLM, the rate at which the Cx MSRs
+	 * tick is specified by the 'GUAR_RATIO', which is obtained from bits 21:16 of the PUNIT_CR_IACORE_RATIOS MSR (0x66a).
+	 */
+	/* GU: replaced with "float bus_freq_mhz" value*/
+	/* Encoding for bus frequency; needs a "switch" statement to retrieve the ACTUAL bus freq*/
+	/* u8 msr_fsb_freq_value; */
+	u8 perf_status_bits[2];	/* Need a low and high value*/
+	u8 was_any_thread_bit_set, was_auto_demote_enabled;
+	/*
+	 * Var-len vars go here...
+	 */
+	pw_string_type_t collector_name;	/* Collector name e.g. SOCWatch for Android (NDK)*/
+	pw_string_type_t host_name;
+	pw_string_type_t os_name;
+	pw_string_type_t os_type;
+	pw_string_type_t os_version;
+	pw_string_type_t cpu_brand;
+	pw_string_type_t cpu_topology;
+	pw_string_type_t profiled_app_name;
+};
+#define SYSTEM_META_MSG_HEADER_SIZE (sizeof(system_meta_data) -  (8 * sizeof(pw_string_type_t)))	/* 8 because we have 8 pw_string_type_t instances in this class */
+
+typedef struct meta_data_msg meta_data_msg_t;
+struct meta_data_msg {
+	u16 data_len;		/* Probably not required: this value can be derived from the "data_len" field of the PWCollector_msg struct!*/
+	u16 data_type;		/* The type of payload encoded by 'data': one of 'sample_type_t'*/
+	/* GU: Changed from u8 --> u16 for alignment*/
+
+	void *data;		/* For SW1 file, this is the payload:  one of *_meta_data_t corresponding to data_type (inline memory).*/
+	/* For internal data, this is a pointer to the payload memory (not inline).*/
+};
+#define PW_META_MSG_HEADER_SIZE sizeof(meta_data_msg_t) - sizeof(void *)
+
+#pragma pack(pop)		/* Restore previous alignment */
+
+/*
+ * Structure used to encode C-state sample information.
+ */
+typedef struct c_sample {
+	u16 break_type;		/* instance of 'c_break_type_t'*/
+	u16 prev_state;		/* "HINT" parameter passed to TPS probe*/
+	pid_t pid;		/* PID of process which caused the C-state break.*/
+	pid_t tid;		/* TID of process which caused the C-state break.*/
+	u32 tps_epoch;		/* Used to sync with SCHED_SAMPLE events*/
+	/*
+	 * "c_data" is one of the following:
+	 * (1) If "break_type" == 'I' ==> "c_data" is the IRQ of the interrupt
+	 * that caused the C-state break.
+	 * (2) If "break_type" == 'D' || 'N' => "c_data" is the TSC that maps to the
+	 * user-space call trace ID for the process which caused the C-state break.
+	 * (3) If "break_type" == 'U' ==> "c_data" is undefined.
+	 */
+	u64 c_data;
+	u64 c_state_res_counts[MAX_MSR_ADDRESSES];
+} c_sample_t;
+
+#define RES_COUNT(s,i) ( (s).c_state_res_counts[(i)] )
+
+/*
+ * Structure used to encode P-state transition information.
+ *
+ * UPDATE: For TURBO: for now, we only encode WHETHER the CPU is
+ * about to TURBO-up; we don't include information on WHICH Turbo
+ * frequency the CPU will execute at. See comments in struct below
+ * for an explanation on why the 'frequency' field values are
+ * unreliable in TURBO mode.
+ */
+typedef struct p_sample {
+	/*
+	 * Field to encode the frequency
+	 * the CPU was ACTUALLY executing
+	 * at DURING THE PREVIOUS
+	 * P-QUANTUM.
+	 */
+	u32 frequency;
+	/*
+	 * Field to encode the frequency
+	 * the OS requested DURING THE
+	 * PREVIOUS P-QUANTUM.
+	 */
+	u32 prev_req_frequency;
+	/*
+	 * We encode the frequency at the start
+	 * and end of a collection in 'boundary'
+	 * messages. This flag is set for such
+	 * messages.
+	 */
+	u32 is_boundary_sample;
+	u32 padding;
+	/*
+	 * The APERF and MPERF values.
+	 */
+	u64 unhalted_core_value, unhalted_ref_value;
+} p_sample_t;
+
+/*
+ * Structure used to encode kernel-space call trace information.
+ */
+typedef struct k_sample {
+	/*
+	 * "trace_len" indicates the number of entries in the "trace" array.
+	 * Note that the actual backtrace may be larger -- in which case the "sample_len"
+	 * field of the enclosing "struct PWCollector_sample" will be greater than 1.
+	 */
+	u32 trace_len;
+	/*
+	 * We can have root timers with non-zero tids.
+	 * Account for that possibility here.
+	 */
+	pid_t tid;
+	/*
+	 * The entry and exit TSC values for this kernel call stack.
+	 * MUST be equal to "[PWCollector_sample.tsc - 1, PWCollector_sample.tsc + 1]" respectively!
+	 */
+	u64 entry_tsc, exit_tsc;
+	/*
+	 * "trace" contains the kernel-space call trace.
+	 * Individual entries in the trace correspond to the various
+	 * return addresses in the call trace, shallowest address first.
+	 * For example: if trace is: "0x10 0x20 0x30 0x40" then
+	 * the current function has a return address of 0x10, its calling function
+	 * has a return address of 0x20 etc.
+	 */
+	u64 trace[TRACE_LEN];
+} k_sample_t;
+
+/*
+ * Structure used to encode kernel-module map information.
+ */
+typedef struct m_sample {
+	/*
+	 * Offset of current chunk, in case a kernel module is
+	 * mapped in chunks. DEFAULTS TO ZERO!
+	 */
+	u32 offset;
+	/*
+	 * Compiler would have auto-padded this for us, but
+	 * we make that padding explicit just in case.
+	 */
+	u32 padding_64b;
+	/*
+	 * The starting addr (in HEX) for this module.
+	 */
+	u64 start;
+	/*
+	 * The ending addr (in HEX) for this module.
+	 */
+	u64 end;
+	/*
+	 * Module NAME. Note that this is NOT the full
+	 * path name. There currently exists no way
+	 * of extracting path names from the module
+	 * structure.
+	 */
+	char name[PW_MODULE_NAME_LEN];
+} m_sample_t;
+
+/*
+ * Structure used to encode IRQ # <-> DEV name
+ * mapping information.
+ */
+typedef struct i_sample {
+	/*
+	 * The IRQ #
+	 */
+	int irq_num;
+	/*
+	 * Device name corresponding
+	 * to 'irq_num'
+	 */
+	char irq_name[PW_IRQ_DEV_NAME_LEN];
+} i_sample_t;
+
+/*
+ * The 'type' of the associated
+ * 'r_sample'.
+ */
+typedef enum r_sample_type {
+	PW_PROC_FORK = 0,	/* Sample encodes a process FORK */
+	PW_PROC_EXIT = 1,	/* Sample encodes a process EXIT */
+	PW_PROC_EXEC = 2	/* Sample encodes an EXECVE system call */
+} r_sample_type_t;
+
+typedef struct r_sample {
+	u32 type;
+	pid_t tid, pid;
+	char proc_name[PW_MAX_PROC_NAME_SIZE];
+} r_sample_t;
+
+/*
+ * Temporary fixed-length meta data structure used to describe S-state residency.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+#define PW_MAX_PLATFORM_STATE_NAME_LEN 15
+typedef struct s_residency_meta_sample s_residency_meta_sample_t;
+struct s_residency_meta_sample {
+	u8 num_states;		/* The number of states available including S3.*/
+	char state_names[6][PW_MAX_PLATFORM_STATE_NAME_LEN];	/* The list of state names e.g. S0i0, S0i1, S0i2, S0i3, S3 ...*/
+	/* The order must be same as the order of values stored in residencies*/
+};
+
+/*
+ * Platform state (a.k.a. S state) residency counter sample
+ */
+typedef struct event_sample s_residency_sample_t;
+/*
+ * Platform state (a.k.a. S state) sample
+ */
+typedef struct s_state_sample {
+	u32 state;		/* S-state*/
+} s_state_sample_t;
+
+typedef struct event_sample d_residency_t;
+
+/*
+ * Device state (a.k.a. D state) residency counter sample
+ */
+typedef struct d_residency_sample {
+	u16 device_type;	/* Either NORTH_COMPLEX or SOUTH_COMPLEX*/
+	u16 num_sampled;
+	u16 mask[PW_MAX_DEVICES_PER_SAMPLE];	/* Each bit indicates whether LSS residency is counted or not.*/
+	/* 1 means "counted", 0 means "not counted"*/
+	/* The last byte indicates the number of LSSes sampled*/
+	d_residency_t d_residency_counters[PW_MAX_DEVICES_PER_SAMPLE];	/* we can fit at most '2' samples in every 'PWCollector_sample_t'*/
+} d_residency_sample_t;
+
+/*
+ * Device state (a.k.a. D state) sample from north or south complex
+ */
+typedef struct d_state_sample {
+	char device_type;	/* Either NORTH_COMPLEX or SOUTH_COMPLEX*/
+	u32 states[4];		/* Each device state is represented in 2 bits*/
+} d_state_sample_t;
+
+/*
+ * The 'type' of the associated
+ * 'w_sample'.
+ */
+typedef enum w_sample_type {
+	PW_WAKE_LOCK = 0,	/* Wake lock*/
+	PW_WAKE_UNLOCK = 1,	/* Wake unlock*/
+	PW_WAKE_LOCK_TIMEOUT = 2,	/* Wake lock with timeout*/
+	PW_WAKE_LOCK_INITIAL = 3,	/* Wake locks acquired before collection*/
+	PW_WAKE_UNLOCK_ALL = 4	/* All previously held wakelocks have been unlocked -- used in ACPI S3 notifications*/
+} w_sample_type_t;
+
+/*
+ * Wakelock sample
+ */
+typedef struct w_sample {
+	w_sample_type_t type;	/* Wakelock type*/
+	pid_t tid, pid;
+	char name[PW_MAX_WAKELOCK_NAME_SIZE];	/* Wakelock name*/
+	u64 expires;		/* wakelock timeout in tsc if type is equal to PW_WAKE_LOCK_TIMEOUT,*/
+	/* otherwise 0*/
+	char proc_name[PW_MAX_PROC_NAME_SIZE];	/* process name*/
+} w_sample_t;
+
+/*
+ * Structure to return Dev # <-> Dev Name mappings.
+ */
+typedef struct dev_sample {
+	u16 dev_num;		/* Device ID*/
+	device_type_t dev_type;	/* one of "device_type_t"*/
+	/* The pair (dev_num, dev_type) is a unique ID for each device*/
+	char dev_short_name[PW_MAX_DEV_SHORT_NAME_SIZE];
+	char dev_long_name[PW_MAX_DEV_LONG_NAME_SIZE];
+} dev_sample_t;
+
+/*
+ * Structure to return UID # <-> Package Name mappings.
+ */
+typedef struct pkg_sample {
+	u32 uid;
+	char pkg_name[PW_MAX_PKG_NAME_SIZE];
+} pkg_sample_t;
+
+/*
+ * HACK! (GEH)
+ * Temporary fixed-length structure used to describe a single GPU frequency (p-state) sample.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+typedef gpufreq_msg_t gpu_p_sample_t;
+
+/*
+ * HACK! (GEH)
+ * Temporary fixed-length structure used to describe a single Thermal state sample.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+typedef thermal_msg_t thermal_sample_t;
+
+/*
+ * HACK! (GEH)
+ * Temporary fixed-length structure used to describe a single Bandwidth sample.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+typedef bw_msg_t bw_sample_t;
+/* Number of 8-byte data fields in a bandwidth sample:  (subtract out index and duration fields)*/
+#define PW_NUM_BANDWIDTH_COUNT_FIELDS  ( (sizeof(bw_sample_t) - sizeof(u16) - sizeof(u64) ) >> 3 )
+
+/*
+ * HACK! (GEH)
+ * Temporary fixed-length Structure used to describe a single Thermal component.
+ * (Plan to switch to variable length samples for everything later.)
+ * Multiple fixed-length samples may be chained to increase the available frequencies beyond PW_MAX_FREQS_PER_META_SAMPLE
+ * In this case, sample order determines frequency order for visualization.
+ */
+typedef struct gpu_freq_sample {
+	u16 num_available_freqs;	/* Number of available gpu frequencies given in this sample.*/
+	u16 available_freqs[PW_MAX_FREQS_PER_META_SAMPLE];	/* List of all available frequencies.*/
+	/* The unit of frequency here is Mhz.*/
+} gpu_freq_sample_t;
+
+/*
+ * HACK! (GEH)
+ * Temporary fixed-length Structure used to describe a single Thermal component.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+typedef struct thermal_comp_sample {
+	u16 thermal_comp_num;	/* index used for matching thermal component index in thermal_sample*/
+	thermal_unit_t thermal_unit;	/* 0 is Fahrenheit and 1 is Celcius.*/
+	char thermal_comp_name[PW_MAX_METADATA_NAME];	/* Name of component like Core, Skin, MSIC Die, SOC, ...*/
+} thermal_comp_sample_t;
+
+/*
+ * HACK! (GEH)
+ * Temporary fixed-length Structure used to describe a single Bandwidth component/pathway.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+typedef struct bw_comp_sample {
+	u16 bw_comp_num;	/* Index used for matching bandwidth component index in bw_sample*/
+	char bw_comp_name[PW_MAX_METADATA_NAME];	/* Names of component/pathway like Core to DDR0, Core to DDR1, ISP, GFX, IO, DISPLAY ...*/
+} bw_comp_sample_t;
+
+/*
+ * Temporary fixed-length Structure used to describe all elements of a single Bandwidth component/pathway.
+ * (Plan to switch to variable length samples for everything later.)
+ */
+typedef struct bw_multi_meta_sample {
+	u16 index;		/* Array index to components defined in bw_meta_data.*/
+	/* Index must be [0, num_components)*/
+	/* Currently, this is ALWAYS ZERO (because only one VISA metric can be collected at a time).*/
+	u16 bw_comp_element_index;	/* Index used for matching individual element of a bandwidth component*/
+	char name[PW_MAX_METADATA_NAME];	/* Individual names for each element in a component e.g. "Read32", "WritePartial" "DDR-0 Rank-0 Read64"*/
+} bw_multi_meta_sample_t;
+
+typedef struct dram_srr_comp_sample dram_srr_comp_sample_t;
+struct dram_srr_comp_sample {
+	u16 comp_idx;		/* Index used for matching bandwidth component index in bw_sample*/
+	char comp_name[PW_MAX_METADATA_NAME];	/* Names of components like DUNIT0, DUNIT1...*/
+};
+
+typedef struct dram_srr_sample {
+	u16 index;		/* The index of components matched with comp_idx in dram_srr_comp_sample.*/
+	u64 duration;		/* The unit should be TSC ticks.*/
+	u64 residency_cpu_ticks;	/* Residency in terms of CPU clock ticks i.e. TSC*/
+	/* This field is for VTune visualization.*/
+	u64 residency_soc_ticks;	/* Residency in terms of SOC clock ticks*/
+} dram_srr_sample_t;
+
+#ifndef __KERNEL__
+#include "pw_user_structs.h"
+#endif
+
+typedef enum PWCollector_cmd {
+	PW_START = 1,
+	PW_DETACH = 2,
+	PW_PAUSE = 3,
+	PW_RESUME = 4,
+	PW_STOP = 5,
+	PW_CANCEL = 6,
+	PW_SNAPSHOT = 7,
+	PW_STATUS = 8,
+	PW_MARK = 9
+} PWCollector_cmd_t;
+
+/*
+ * UPDATE: Whenever a new type is added here,
+ * the config parser (PWParser) needs to be updated accordingly.
+ */
+typedef enum power_data {
+	PW_SLEEP = 0,		/* DD should register all timer and sleep-related tracepoints */
+	PW_KTIMER = 1,		/* DD should collect kernel call stacks */
+	PW_FREQ = 2,		/* DD should collect P-state transition information */
+	PW_PLATFORM_RESIDENCY = 3,	/* DD should collect S-state residency information */
+	PW_PLATFORM_STATE = 4,	/* DD should collect S-state samples */
+	PW_DEVICE_SC_RESIDENCY = 5,	/* DD should collect South-Complex D-state residency information */
+	PW_DEVICE_NC_STATE = 6,	/* DD should collect North-Complex D-state samples */
+	PW_DEVICE_SC_STATE = 7,	/* DD should collect South-Complex D-state samples */
+	PW_WAKELOCK_STATE = 8,	/* DD should collect wakelock samples */
+	PW_POWER_C_STATE = 9,	/* DD should collect C-state samples */
+	PW_THERMAL_CORE = 10,	/* DD should collect Core temperature samples */
+	PW_THERMAL_SOC_DTS = 11,	/* DD should collect SOC_DTS readings samples */
+	PW_THERMAL_SKIN = 12,	/* DD should collect SKIN temperature samples */
+	PW_THERMAL_MSIC = 13,	/* DD should collect MSIC die temperature samples */
+	PW_GPU_FREQ = 14,	/* DD should collect GPU Frequency samples */
+	PW_BANDWIDTH_DRAM = 15,	/* DD should collect DDR bandwidth samples */
+	PW_BANDWIDTH_CORE = 16,	/* DD should collect Core to DDR bandwidth samples */
+	PW_BANDWIDTH_GPU = 17,	/* DD should collect GPU to DDR bandwidth samples */
+	PW_BANDWIDTH_DISP = 18,	/* DD should collect Display to DDR bandwidth samples */
+	PW_BANDWIDTH_ISP = 19,	/* DD should collect ISP to DDR bandwidth samples */
+	PW_BANDWIDTH_IO = 20,	/* DD should collect IO bandwidth samples */
+	PW_BANDWIDTH_SRR = 21,	/* DD should collect DRAM Self Refresh residency samples */
+	PW_GPU_C_STATE = 22,	/* DD should collect GPU C-state samples */
+	PW_FPS = 23,		/* DD should collect FPS information */
+	PW_ACPI_S3_STATE = 24,	/* DD should collect ACPI S-state samples */
+	PW_POWER_SNAPSHOT_C_STATE = 25,	/* DD should collect SNAPSHOT C-state data */
+	PW_BANDWIDTH_CORE_MODULE0 = 26,	/* DD should collect Core on Module 0 to DDR bandwidth samples */
+	PW_BANDWIDTH_CORE_MODULE1 = 27,	/* DD should collect Core on Module 1 to DDR bandwidth samples */
+	PW_BANDWIDTH_CORE_32BYTE = 28,	/* DD should collect Core to DDR 32bytes bandwidth samples */
+	PW_BANDWIDTH_CORE_64BYTE = 29,	/* DD should collect Core to DDR 64bytes bandwidth samples */
+	PW_BANDWIDTH_SRR_CH0 = 30,	/* DD should collect Channel 0 DRAM Self Refresh residency samples */
+	PW_BANDWIDTH_SRR_CH1 = 31,	/* DD should collect Channel 1 DRAM Self Refresh residency samples */
+	PW_BANDWIDTH_TUNIT = 32,	/* DD should collect T-Unit bandwidth samples */
+	PW_MAX_POWER_DATA_MASK	/* Marker used to indicate MAX valid 'power_data_t' enum value -- NOT used by DD */
+} power_data_t;
+
+#define POWER_SLEEP_MASK (1ULL << PW_SLEEP)
+#define POWER_KTIMER_MASK (1ULL << PW_KTIMER)
+#define POWER_FREQ_MASK (1ULL << PW_FREQ)
+#define POWER_S_RESIDENCY_MASK (1ULL << PW_PLATFORM_RESIDENCY)
+#define POWER_S_STATE_MASK (1ULL << PW_PLATFORM_STATE)
+#define POWER_D_SC_RESIDENCY_MASK (1ULL << PW_DEVICE_SC_RESIDENCY)
+#define POWER_D_SC_STATE_MASK (1ULL << PW_DEVICE_SC_STATE)
+#define POWER_D_NC_STATE_MASK (1ULL << PW_DEVICE_NC_STATE)
+#define POWER_WAKELOCK_MASK (1ULL << PW_WAKELOCK_STATE)
+#define POWER_C_STATE_MASK ( 1ULL << PW_POWER_C_STATE )
+#define POWER_THERMAL_CORE_MASK (1ULL << PW_THERMAL_CORE)
+#define POWER_THERMAL_SOC_DTS_MASK (1ULL << PW_THERMAL_SOC_DTS)
+#define POWER_THERMAL_SKIN_MASK (1ULL << PW_THERMAL_SKIN)
+#define POWER_THERMAL_MSIC_MASK (1ULL << PW_THERMAL_MSIC)
+#define POWER_GPU_FREQ_MASK (1ULL << PW_GPU_FREQ)
+#define POWER_BANDWIDTH_DRAM_MASK (1ULL << PW_BANDWIDTH_DRAM )
+#define POWER_BANDWIDTH_CORE_MASK (1ULL << PW_BANDWIDTH_CORE )
+#define POWER_BANDWIDTH_GPU_MASK (1ULL << PW_BANDWIDTH_GPU )
+#define POWER_BANDWIDTH_DISP_MASK (1ULL << PW_BANDWIDTH_DISP )
+#define POWER_BANDWIDTH_ISP_MASK (1ULL << PW_BANDWIDTH_ISP )
+#define POWER_BANDWIDTH_IO_MASK (1ULL << PW_BANDWIDTH_IO )
+#define POWER_BANDWIDTH_SRR_MASK (1ULL << PW_BANDWIDTH_SRR )
+#define POWER_GPU_C_STATE_MASK (1ULL << PW_GPU_C_STATE )
+#define POWER_FPS_MASK (1ULL << PW_FPS )
+#define POWER_ACPI_S3_STATE_MASK (1ULL << PW_ACPI_S3_STATE )
+#define POWER_SNAPSHOT_C_STATE_MASK (1ULL << PW_POWER_SNAPSHOT_C_STATE )
+#define POWER_BANDWIDTH_CORE_MODULE0_MASK (1ULL << PW_BANDWIDTH_CORE_MODULE0 )
+#define POWER_BANDWIDTH_CORE_MODULE1_MASK (1ULL << PW_BANDWIDTH_CORE_MODULE1)
+#define POWER_BANDWIDTH_CORE_32BYTE_MASK (1ULL << PW_BANDWIDTH_CORE_32BYTE )
+#define POWER_BANDWIDTH_CORE_64BYTE_MASK (1ULL << PW_BANDWIDTH_CORE_64BYTE )
+#define POWER_BANDWIDTH_SRR_CH0_MASK (1ULL << PW_BANDWIDTH_SRR_CH0 )
+#define POWER_BANDWIDTH_SRR_CH1_MASK (1ULL << PW_BANDWIDTH_SRR_CH1)
+#define POWER_BANDWIDTH_TUNIT_MASK (1ULL << PW_BANDWIDTH_TUNIT )
+
+#define SET_COLLECTION_SWITCH(m,s) ( (m) |= (1ULL << (s) ) )
+#define RESET_COLLECTION_SWITCH(m,s) ( (m) &= ~(1ULL << (s) ) )
+#define WAS_COLLECTION_SWITCH_SET(m, s) ( (m) & (1ULL << (s) ) )
+
+/*
+ * Platform-specific config struct.
+ */
+typedef struct platform_info {
+	int residency_count_multiplier;
+	int bus_clock_freq_khz;
+	int coreResidencyMSRAddresses[MAX_MSR_ADDRESSES];
+	int pkgResidencyMSRAddresses[MAX_MSR_ADDRESSES];
+	u64 reserved[3];
+} platform_info_t;
+
+/*
+ * Config Structure. Includes platform-specific
+ * stuff and power switches.
+ */
+struct PWCollector_config {
+	/* int data;*/
+	u64 data;		/* collection switches.*/
+	u32 d_state_sample_interval;	/* This is the knob to control the frequency of D-state data sampling*/
+	/* to adjust their collection overhead in the unit of msec.*/
+	platform_info_t info;
+};
+
+/*
+ * Some constants used to describe kernel features
+ * available to the power driver.
+ */
+#define PW_KERNEL_SUPPORTS_CALL_STACKS (1 << 0)
+#define PW_KERNEL_SUPPORTS_CONFIG_TIMER_STATS (1 << 1)
+#define PW_KERNEL_SUPPORTS_WAKELOCK_PATCH (1 << 2)
+/*
+ * Some constants used to describe arch features enabled
+ */
+#define PW_ARCH_ANY_THREAD_SET (1 << 0)
+#define PW_ARCH_AUTO_DEMOTE_ENABLED (1 << 1)
+
+/*
+ * Structure to encode unsupported tracepoints and
+ * kernel features that enable power collection.
+ */
+struct PWCollector_check_platform {
+	char unsupported_tracepoints[4096];
+	/*
+	 * Bitwise 'OR' of zero or more of the
+	 * 'PW_KERNEL_SUPPORTS_' constants described
+	 * above.
+	 */
+	u32 supported_kernel_features;
+	u32 supported_arch_features;
+	u64 reserved[3];
+};
+
+/*
+ * Structure to return status information.
+ */
+struct PWCollector_status {
+	u32 num_cpus;
+	u32 time;
+	u32 c_breaks;
+	u32 timer_c_breaks;
+	u32 inters_c_breaks;
+	u32 p_trans;
+	u32 num_inters;
+	u32 num_timers;
+};
+
+/*
+ * Structure to return version information.
+ */
+struct PWCollector_version_info {
+	int version;
+	int inter;
+	int other;
+};
+
+/*
+ * Structure to return specific microcode
+ * patch -- for MFLD development steppings.
+ */
+struct PWCollector_micro_patch_info {
+	u32 patch_version;
+};
+
+/*
+ * Helper struct for IRQ <-> DEV name mappings.
+ */
+typedef struct PWCollector_irq_mapping {
+	int irq_num;
+	char irq_name[PW_MAX_IRQ_NAME_SIZE];
+} PWCollector_irq_mapping_t;
+/*
+ * Structure to return IRQ <-> DEV name mappings.
+ */
+struct PWCollector_irq_mapping_block {
+	/*
+	 * INPUT param: if >= 0 ==> indicates
+	 * the client wants information for
+	 * a SPECIFIC IRQ (and does not want
+	 * ALL mappings).
+	 */
+	int requested_irq_num;
+	/*
+	 * OUTPUT param: records number of
+	 * valid entries in the 'mappings'
+	 * array.
+	 */
+	int size;
+	/*
+	 * INPUT/OUTPUT param: records from which IRQ
+	 * entry the client wants mapping info.
+	 * Required because the driver
+	 * may return LESS than the total number
+	 * of IRQ mappings, in which case the client
+	 * is expected to call this IOCTL
+	 * again, specifying the offset.
+	 */
+	int offset;
+	/*
+	 * The array of mappings.
+	 */
+	PWCollector_irq_mapping_t mappings[PW_MAX_NUM_IRQ_MAPPINGS_PER_BLOCK];
+};
+
+typedef struct PWCollector_proc_mapping {
+	pid_t pid, tid;
+	char name[PW_MAX_PROC_NAME_SIZE];
+} PWCollector_proc_mapping_t;
+/*
+ * Structure to return PID <-> PROC name mappings.
+ */
+struct PWCollector_proc_mapping_block {
+	/*
+	 * OUTPUT param: records number of
+	 * valid entries in the 'mappings'
+	 * array.
+	 */
+	int size;
+	/*
+	 * INPUT/OUTPUT param: records from which PROC
+	 * entry the client wants mapping info.
+	 * may return LESS than the total number
+	 * of PROC mappings, in which case the client
+	 * is expected to call this IOCTL
+	 * again, specifying the offset.
+	 */
+	int offset;
+	/*
+	 * The array of mappings.
+	 */
+	PWCollector_proc_mapping_t mappings[PW_MAX_NUM_PROC_MAPPINGS_PER_BLOCK];
+};
+
+/*
+ * Structure to return TURBO frequency
+ * threshold.
+ */
+struct PWCollector_turbo_threshold {
+	u32 threshold_frequency;
+};
+
+/*
+ * Structure to return 'available
+ * frequencies' i.e. the list of
+ * frequencies the processor
+ * may execute at.
+ */
+struct PWCollector_available_frequencies {
+	/*
+	 * Number of valid entries in the
+	 * 'frequencies' array -- supplied
+	 * by the DD.
+	 */
+	u32 num_freqs;
+	/*
+	 * List of available frequencies, in kHz.
+	 */
+	u32 frequencies[PW_MAX_NUM_AVAILABLE_FREQUENCIES];
+};
+
+/*
+ * Different IO mechanism types.
+ */
+typedef enum {
+	PW_IO_MSR = 0,
+	PW_IO_IPC = 1,
+	PW_IO_MMIO = 2,
+	PW_IO_PCI = 3,
+	PW_IO_MAX = 4
+} pw_io_type_t;
+
+typedef struct PWCollector_platform_res_info PWCollector_platform_res_info_t;
+struct PWCollector_platform_res_info {
+	/*
+	 * IPC commands for platform residency
+	 * Valid ONLY if 'collection_type' == 'PW_IO_IPC'
+	 * ('u32' is probably overkill for these, 'u16' should work just fine)
+	 */
+	u32 ipc_start_command, ipc_start_sub_command;	/* START IPC command, sub-cmd*/
+	u32 ipc_stop_command, ipc_stop_sub_command;	/* STOP IPC command, sub-cmd*/
+	u32 ipc_dump_command, ipc_dump_sub_command;	/* DUMP IPC command, sub-cmd*/
+	u16 num_addrs;		/* Number of 64b addresses encoded in the 'addrs' array, below*/
+	u8 collection_type;	/* One of 'pw_io_type_t'*/
+	u8 counter_size_in_bytes;	/* Usually either 4 (for 32b counters) or 8 (for 64b counters)*/
+	char addrs[1];		/* Array of 64bit addresses; size of array == 'num_addrs'*/
+};
+#define PW_PLATFORM_RES_INFO_HEADER_SIZE() (sizeof(PWCollector_platform_res_info_t) - sizeof(char[1]))
+
+/*
+ * Wrapper for ioctl arguments.
+ * EVERY ioctl MUST use this struct!
+ */
+struct PWCollector_ioctl_arg {
+	int in_len;
+	int out_len;
+	const char *in_arg;
+	char *out_arg;
+};
+
+#endif /* _DATA_STRUCTURES_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_types.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_types.h
new file mode 100644
index 0000000..21be9ec
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_types.h
@@ -0,0 +1,130 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PW_TYPES_H_
+#define _PW_TYPES_H_
+
+#if defined (__linux__)
+
+#ifndef __KERNEL__
+/*
+ * Called from Ring-3.
+ */
+#include <stdint.h>		/* Grab 'uint64_t' etc.*/
+/*
+ * UNSIGNED types...
+ */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+/*
+ * SIGNED types...
+ */
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+#endif /* __KERNEL__*/
+
+#elif defined (_WIN32)
+/*
+ * UNSIGNED types...
+ */
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+/*
+ * SIGNED types...
+ */
+typedef signed char s8;
+typedef signed short s16;
+typedef signed int s32;
+typedef signed long long s64;
+typedef s32 pid_t;
+typedef s32 ssize_t;
+
+#endif /* _WIN32*/
+
+/* ************************************
+ * Common to both operating systems.
+ * ************************************
+ */
+/*
+ * UNSIGNED types...
+ */
+typedef u8 pw_u8_t;
+typedef u16 pw_u16_t;
+typedef u32 pw_u32_t;
+typedef u64 pw_u64_t;
+
+/*
+ * SIGNED types...
+ */
+typedef s8 pw_s8_t;
+typedef s16 pw_s16_t;
+typedef s32 pw_s32_t;
+typedef s64 pw_s64_t;
+
+typedef pid_t pw_pid_t;
+
+#endif /* _PW_TYPES_H_*/
diff --git a/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_version.h b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_version.h
new file mode 100644
index 0000000..f2c07c6
--- /dev/null
+++ b/drivers/external_drivers/drivers/socwatch/socwatch_driver/pw_version.h
@@ -0,0 +1,106 @@
+/* ***********************************************************************************************
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+  SOCWatch Developer Team <socwatchdevelopers@intel.com>
+
+  BSD LICENSE
+
+  Copyright(c) 2013 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  ***********************************************************************************************
+*/
+
+#ifndef _PW_VERSION_H_
+#define _PW_VERSION_H_ 1
+
+/*
+ * SOCWatch driver version
+ * Current driver version is 1.0.0
+ * Current driver version is 1.1.0
+ */
+#define PW_DRV_VERSION_MAJOR 1
+#define PW_DRV_VERSION_MINOR 5
+#define PW_DRV_VERSION_OTHER 1
+#define PW_DRV_VERSION_STRING "1.5"	/* used by matrix*/
+#define PW_DRV_NAME "socwatch1_5"
+
+/*
+ * Every SOCWatch component shares the same version number.
+ */
+#define SOCWATCH_VERSION_MAJOR 1
+#define SOCWATCH_VERSION_MINOR 5
+#define SOCWATCH_VERSION_OTHER 1
+
+/*
+ * WUWatch driver version
+ */
+#define PW_DRV_VERSION 3
+#define PW_DRV_INTERFACE 1
+#define PW_DRV_OTHER 9
+
+/*
+ * Every wuwatch component shares the same version number.
+ * THIS WILL BE REMOVED WHEN NOT USED IN WUWATCH
+ */
+#define WUWATCH_VERSION_VERSION 3
+#define WUWATCH_VERSION_INTERFACE 1
+#define WUWATCH_VERSION_OTHER 9
+
+/*
+ * Power interface version
+ * Current interface version is 0.1.0
+ * Current interface version is 0.2.0
+ */
+#define PW_INT_VERSION_VERSION 0
+#define PW_INT_VERSION_INTERFACE 2
+#define PW_INT_VERSION_OTHER 0
+
+#endif /* _PW_VERSION_H_*/
diff --git a/drivers/external_drivers/intel_media/Kconfig b/drivers/external_drivers/intel_media/Kconfig
new file mode 100644
index 0000000..35f2bbb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/Kconfig
@@ -0,0 +1,197 @@
+#
+# Drm device configuration
+#
+# This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+#
+
+config DRM_INTEL_MID
+	tristate "Intel Moorestown/Medfield (load along with IMG driver)"
+	depends on DRM && PCI
+	select FB_CFB_COPYAREA
+        select FB_CFB_FILLRECT
+        select FB_CFB_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select GENERIC_ALLOCATOR
+        select DRM_TTM
+	help
+	  xxxxxxxxxxxxxxxxxxxxxxxxxx
+
+choice
+	prompt "Build IMG kernel service as "
+	depends on DRM_INTEL_MID
+	default DRM_MID_RELEASE
+
+config DRM_MID_RELEASE
+	bool "Release"
+	depends on DRM_INTEL_MID
+	help
+	  Build IMG kernel services as release
+
+config DRM_MID_DEBUG
+	bool "Debug"
+	depends on DRM_INTEL_MID
+	help
+	 Build IMG kernel services as debug
+
+endchoice
+
+config SUPPORT_HDMI
+        bool "SUPPORT_HDMI"
+        depends on DRM_MDFLD || DRM_CTP || DRM_I915 || DRM_MRFLD
+        default n
+        help
+          Choose this option to support HDMI.
+
+config INTEL_HDMI_AUDIO
+	tristate "INTEL_HDMI_AUDIO"
+	depends on SUPPORT_HDMI
+	default y
+	help
+	  Controls HDMI audio support.
+	  Specify option INTEL_HDMI_AUDIO as:
+	  y - built-in
+	  m - module
+	  n - not supported.
+
+config SUPPORT_MIPI
+	bool "SUPPORT_MIPI"
+	depends on DRM_MDFLD || DRM_CTP || DRM_I915 || DRM_MRFLD
+	default n
+	help
+	  Choose this option to support MIPI.
+
+config R63311_MIPI_VIDEO_MODE
+	bool "support jdi 1920x1080 MIPI panel"
+	default n
+
+config DRM_INTEL_HANDSET
+	tristate "Intel Display and IMG/RGX Graphics Driver Support"
+	depends on DRM && PCI
+	select FB_CFB_COPYAREA
+        select FB_CFB_FILLRECT
+        select FB_CFB_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select DRM_TTM
+	help
+	  Specify DRM_INTEL_HANDSET to control presence of display and IMG
+	  RGX graphics driver as:
+	  y - built-in
+	  m - module
+	  n - not present
+
+choice
+	prompt "Build IMG kernel service as "
+	depends on DRM_INTEL_HANDSET
+	default DRM_HANDSET_RELEASE
+
+config DRM_HANDSET_RELEASE
+	bool "Release"
+	depends on DRM_INTEL_HANDSET
+	help
+	  Build IMG kernel services as release
+
+config DRM_HANDSET_DEBUG
+	bool "Debug"
+	depends on DRM_INTEL_HANDSET
+	help
+	  Build IMG kernel services as debug
+
+endchoice
+
+config GFX_RGX_BVNC
+	string
+	depends on DRM_INTEL_HANDSET
+	prompt "IMG graphics driver BNVC specification"
+	help
+	    A build configuration parameter which must have the same value
+	    for the IMG kernel mode driver and the IMG user mode driver.
+	    The BVNC value will differ depending upon target (e.g.,
+	    Merrifield vs. Moorefield) and desired build configuration.
+	    Normally a value recommended by IMG is used.
+	    Examples as of 2013-12-06:
+	    - Merrifield - 1.14.4.4 - DDK1.2
+	    - Merrifield - 1.76.4.6 - DDK1.3
+	    - Moorefield - 1.72.4.12 - either DDK
+
+config MRFL_DISPLAY
+	tristate "Intel Merrifield Generic Framebuffer Driver"
+	depends on PCI && X86_MRFLD && !DRM_MDFLD && FB
+	default n
+	select FB_CFB_COPYAREA
+        select FB_CFB_FILLRECT
+        select FB_CFB_IMAGEBLIT
+	help
+	   Basic support for Generic Framebuffer Driver for Merrifield.
+
+config DRM_MRFLD
+        tristate "Intel Merrifield Graphics Driver Support with IMG"
+        depends on DRM_INTEL_HANDSET && PCI
+        select FB_CFB_COPYAREA
+        select FB_CFB_FILLRECT
+        select FB_CFB_IMAGEBLIT
+        select DRM_KMS_HELPER
+        select GENERIC_ALLOCATOR
+        help
+          Choose this option if you have a Merrifield platform.
+          If M is selected the module will be called mid_gfx.
+
+config PDUMP
+        bool "Enable pdump in Merrifield graphics driver"
+        depends on DRM_MRFLD
+        default n
+        help
+          Choose this option to enable pdump on Merrifield.
+
+config MID_DSI_DSR
+	bool "Support DSI Fullscreen Display Self Refreshment "
+	depends on DRM_INTEL_HANDSET && !MID_DSI_DPU
+	default y
+	help
+	  Choose this option if you have a Type1 MIPI panel.
+
+config MID_DSI_DPU
+	bool "Support DSI Display Partial Update"
+	depends on DRM_INTEL_HANDSET
+	default n
+	help
+	  xxxxxx
+
+config GFX_RTPM
+	bool "Enable GFX runtime_pm"
+	depends on DRM_INTEL_HANDSET
+	default n
+	help
+          xxxxxx
+
+config SUPPORT_VSP
+        bool "SUPPORT_VIDEO_VSP"
+        depends on DRM_MRFLD
+        default n
+        help
+          xxxxxx
+
+config GFX_RGX_DEVFREQ
+	tristate "IMG Rogue Graphics DEVFREQ Driver"
+	default y
+	depends on PM_DEVFREQ && DRM_INTEL_HANDSET
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	help
+	  This adds the DEVFREQ driver for Imagination Technologies (IMG)
+	  Rogue/Hood graphics (RGX).  This driver helps control GPU clock
+	  speed in order to provide high performance on demand and power
+	  savings when not busy or when thermal conditions suggest.
+
+config MOOREFIELD
+	bool "Build option for the Moorefield platform"
+	depends on DRM_INTEL_HANDSET
+	default n
+	help
+	  For the GFX block of TNG/ANN, its power islands' power on/off sequences are:
+	  1. Power on:  SLC_LDO -> SLC -> Sidekick -> Rascal/Dust;
+	  2. Power off: Rascal/Dust -> Sidekick -> SLC -> SLC_LDO;
+	  So we should follow the sequence, and VED/VEC/VSP power islands just depend
+	  on the SLC power island.
+	  And it would distinguish the Display Class implementation.
diff --git a/drivers/external_drivers/intel_media/Makefile b/drivers/external_drivers/intel_media/Makefile
new file mode 100644
index 0000000..740ffcf
--- /dev/null
+++ b/drivers/external_drivers/intel_media/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DRM_INTEL_HANDSET)	+= bld/merrifield/tng/
+obj-$(CONFIG_INTEL_HDMI_AUDIO)  += bld/hdmi_audio/
+obj-$(CONFIG_GFX_RGX_DEVFREQ)   += graphics/dfrgx/
diff --git a/drivers/external_drivers/intel_media/bld/hdmi_audio/Makefile b/drivers/external_drivers/intel_media/bld/hdmi_audio/Makefile
new file mode 100644
index 0000000..8ee5c66
--- /dev/null
+++ b/drivers/external_drivers/intel_media/bld/hdmi_audio/Makefile
@@ -0,0 +1,35 @@
+DRIVER_NAME := hdmi_audio
+
+MEDIADIR=../..
+HDMI_AUDIO_SRC=$(MEDIADIR)/hdmi_audio
+
+MEDIAINC=drivers/external_drivers/intel_media
+PNWDISPLAYINC=$(MEDIAINC)/display/pnw/drv
+TNGDISPLAYINC=$(MEDIAINC)/display/tng/drv
+OTM_HDMI_INC=$(MEDIAINC)/otm_hdmi
+
+include_dirs := \
+	-I$(MEDIAINC)/common \
+	-I$(MEDIAINC)/interface \
+	-I$(OTM_HDMI_INC)/os/android/include \
+	-I$(OTM_HDMI_INC)/pil/include \
+	-I$(TNGDISPLAYINC)/ospm
+
+ccflags-y += $(include_dirs)
+
+ifdef CONFIG_DRM_MRFLD
+ccflags-y += -I$(TNGDISPLAYINC)
+else
+ifdef CONFIG_DRM_I915
+ccflags-y += -Idrivers/gpu/drm/i915
+else
+ccflags-y += -I$(PNWDISPLAYINC)
+endif
+endif
+
+$(DRIVER_NAME)-objs += \
+	$(HDMI_AUDIO_SRC)/intel_mid_hdmi_audio.o \
+	$(HDMI_AUDIO_SRC)/intel_mid_hdmi_audio_if.o \
+	$(HDMI_AUDIO_SRC)/intel_mid_hdmi_audio_debug.o
+
+obj-$(CONFIG_INTEL_HDMI_AUDIO) += $(DRIVER_NAME).o
diff --git a/drivers/external_drivers/intel_media/bld/merrifield/rgx/Makefile b/drivers/external_drivers/intel_media/bld/merrifield/rgx/Makefile
new file mode 100644
index 0000000..2d18475
--- /dev/null
+++ b/drivers/external_drivers/intel_media/bld/merrifield/rgx/Makefile
@@ -0,0 +1,340 @@
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+# DRIVER_NAME := pvrrgx
+DRIVER_NAME := tngdisp
+
+obj-$(CONFIG_DRM_INTEL_HANDSET) += $(DRIVER_NAME).o
+
+KERNELDIR ?= $(ANDROID_BUILD_TOP)/$(KERNEL_SRC_DIR)
+
+MEDIADIR=../../..
+RGXDIR=$(MEDIADIR)/graphics/rgx
+DISPLAYDIR=$(MEDIADIR)/display/tng/drv
+
+MEDIAINC=drivers/external_drivers/intel_media
+RGXINC=$(MEDIAINC)/graphics/rgx
+DISPINC=$(MEDIAINC)/display/tng
+
+ccflags-y += \
+        -I$(MEDIAINC)/interface \
+	-I$(RGXINC)/include \
+	-I$(RGXINC)/generated/rgxcmp_bridge \
+	-I$(RGXINC)/generated/dmm_bridge \
+	-I$(RGXINC)/generated/pdumpmm_bridge \
+	-I$(RGXINC)/generated/dc_bridge \
+	-I$(RGXINC)/generated/cachegeneric_bridge \
+	-I$(RGXINC)/generated/dpvrtl_bridge \
+	-I$(RGXINC)/generated/dri_bridge \
+	-I$(RGXINC)/generated/hostportio_bridge \
+	-I$(RGXINC)/generated/rgxtq_bridge \
+	-I$(RGXINC)/generated/mm_bridge \
+	-I$(RGXINC)/generated/cmm_bridge \
+	-I$(RGXINC)/generated/rgxpdump_bridge \
+	-I$(RGXINC)/generated/smm_bridge \
+	-I$(RGXINC)/generated/pdump_bridge \
+	-I$(RGXINC)/generated/syncsexport_bridge \
+	-I$(RGXINC)/generated/sync_bridge \
+	-I$(RGXINC)/generated/debugmisc_bridge \
+	-I$(RGXINC)/generated/syncexport_bridge \
+	-I$(RGXINC)/generated/dsync_bridge \
+	-I$(RGXINC)/generated/rgxinit_bridge \
+	-I$(RGXINC)/generated/rgxta3d_bridge \
+	-I$(RGXINC)/generated/srvcore_bridge \
+	-I$(RGXINC)/generated/breakpoint_bridge \
+	-I$(RGXINC)/generated/pvrtl_bridge \
+	-I$(RGXINC)/generated/rgxhwperf_bridge \
+	-I$(RGXINC)/generated/regconfig_bridge \
+	-I$(RGXINC)/generated/ion_bridge \
+	-I$(RGXINC)/generated/ri_bridge \
+	-I$(RGXINC)/generated/timerquery_bridge \
+	-I$(RGXINC)/generated/devicememhistory_bridge \
+	-I$(RGXINC)/generated/pdumpctrl_bridge \
+	-I$(RGXINC)/tools/intern/debug/include \
+	-I$(RGXINC)/tools/intern/debug/dbgdriv/common \
+	-I$(RGXINC)/hwdefs \
+	-I$(RGXINC)/hwdefs/km \
+	-I$(RGXINC)/hwdefs/km/cores \
+	-I$(RGXINC)/services/include \
+	-I$(RGXINC)/services/include/shared \
+	-I$(RGXINC)/services/include/env/linux \
+	-I$(RGXINC)/services/3rdparty/intel_drm \
+	-I$(RGXINC)/services/3rdparty/intel_devfreq \
+	-I$(RGXINC)/services/system/rgx_intel \
+	-I$(RGXINC)/services/system/include \
+	-I$(RGXINC)/services/system/common/env/linux \
+	-I$(RGXINC)/services/shared/devices/rgx \
+	-I$(RGXINC)/services/shared/include \
+	-I$(RGXINC)/services/server/env/linux \
+	-I$(RGXINC)/services/server/include \
+	-I$(RGXINC)/services/server/devices/rgx \
+	-I$(RGXINC)/kernel/drivers/staging/imgtec \
+	-I$(DISPINC)/interface \
+	-I$(DISPINC)/drv/ospm \
+	-I$(DISPINC)/drv \
+	-Iinclude/linux \
+	-Iinclude/drm \
+	-Idrivers/staging/android \
+	-Idrivers/staging/android/ion \
+	-Iarch/arm/include
+
+ifeq ($(CONFIG_GFX_RGX_BVNC),)
+# If not defined, default to Tangier/Merrifield
+CONFIG_GFX_RGX_BVNC := 1.76.4.6
+endif
+
+cgr_bvnc := $(subst ., ,$(CONFIG_GFX_RGX_BVNC))
+cgr_b := $(word 1, $(cgr_bvnc))
+cgr_v := $(word 2, $(cgr_bvnc))
+cgr_n := $(word 3, $(cgr_bvnc))
+cgr_c := $(word 4, $(cgr_bvnc))
+
+# Example: ccflags-y += -DRGX_BVNC_CORE_KM_HEADER="\"cores/rgxcore_km_1.76.4.6.h\""
+# Example: ccflags-y += -DRGX_BNC_CONFIG_KM_HEADER="\"configs/rgxconfig_km_1.V.4.4.h\""
+
+ccflags-y += -DRGX_BVNC_CORE_KM_HEADER="\"cores/rgxcore_km_$(CONFIG_GFX_RGX_BVNC).h\""
+ccflags-y += -DRGX_BNC_CONFIG_KM_HEADER="\"configs/rgxconfig_km_$(cgr_b).V.$(cgr_n).$(cgr_c).h\""
+
+ccflags-y += \
+	-DANDROID \
+	-DLINUX \
+	-DPVRSRV_MODNAME="\"pvrsrvkm\"" \
+	-DPVR_BUILD_DIR="\"intel_android\"" \
+	-DPVR_LDM_DRIVER_REGISTRATION_NAME="\"pvrsrvkm\"" \
+	-DSUPPORT_RGX=1 \
+	-DSUPPORT_ION=1 \
+	-DSUPPORT_LINUX_X86_WRITECOMBINE \
+	-DSUPPORT_LINUX_X86_PAT \
+	-DSYS_USING_INTERRUPTS \
+	-DLDM_PCI \
+	-DSUPPORT_DRM \
+	-DSUPPORT_DRM_EXT \
+	-DBC_DISCONTIG_BUFFERS \
+	-DCACHEFLUSH_TYPE=CACHEFLUSH_X86 \
+	-DSUPPORT_SECURE_EXPORT \
+	-DSUPPORT_GPUTRACE_EVENTS \
+	-DPVR_LINUX_PHYSMEM_MAX_POOL_PAGES=5120 \
+	-DPVR_LINUX_ARM_PAGEALLOC_FLUSH_THRESHOLD=256 \
+	-DPVR_LINUX_PHYSMEM_MIN_NUM_PAGES=256 \
+	-DPVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER=2 \
+	-DSUPPORT_NATIVE_FENCE_SYNC \
+	-DPVRSRV_NEED_PVR_DPF \
+	-DSUPPORT_SYSTEM_INTERRUPT_HANDLING \
+	-DSUPPORT_SHARED_SLC \
+	-DPVR_MMAP_USE_VM_INSERT \
+	-DPVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD=16384\
+	-DPVR_ANDROID_ION_USE_SG_LENGTH \
+	-DPVRSRV_ENABLE_PROCESS_STATS \
+	-DPVR_ANDROID_SYNC_HEADER="\"../../drivers/staging/android/sync.h\"" \
+	-DGPUVIRT_VALIDATION_NUM_OS=8 \
+	-DPVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN=256 \
+	-DSUPPORT_PERCONTEXT_FREELIST \
+	-DSUPPORT_AUTH \
+	-DPVR_DRM_DRIVER_NAME="\"pvr\"" \
+	$(NULL)
+
+#	-DPVRSRV_ENABLE_FW_TRACE_DEBUGFS \
+
+ccflags-y += \
+	-DSUPPORT_DISPLAY_CLASS \
+	$(NULL)
+
+# Skip the following dependencies on Merrifield Virtual Platform
+ifneq ($(CONFIG_BOARD_MRFLD_VP),y)
+ccflags-y += \
+	-DPVR_LINUX_USING_WORKQUEUES \
+	-DPVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE \
+	-DPVR_LINUX_TIMERS_USING_WORKQUEUES
+endif
+
+ccflags-y += \
+	-DSUPPORT_POWMON_WO_GPIO_PIN 
+
+# for PDUMP
+ccflags-$(CONFIG_PDUMP) += -DPDUMP -DSUPPORT_DBGDRV_EVENT_OBJECTS
+
+# Either BUILD is debug or release. It can NOT be both.
+ccflags-$(CONFIG_DRM_HANDSET_RELEASE) += \
+	-DPVR_BUILD_TYPE="\"release\"" \
+	-DRELEASE
+
+ccflags-$(CONFIG_DRM_HANDSET_DEBUG) += \
+	-DPVR_BUILD_TYPE="\"debug\"" \
+	-DDEBUG \
+	-DDEBUG_LINUX_MEMORY_ALLOCATIONS \
+	-DDEBUG_HANDLEALLOC_KM \
+	-DRGXFW_ALIGNCHECKS \
+	-DPVR_RI_DEBUG
+
+# The following is an option, it can be enabled for more debug information
+# Add 'DEBUG_PVR_BRIDGE=y' to AndroidRGX.mk to enable it.
+ccflags-$(DEBUG_PVR_BRIDGE) += \
+	-DDEBUG_BRIDGE_KM \
+	-DDEBUG_TRACE_BRIDGE_KM \
+	-DDEBUG_BRIDGE_KM_DISPATCH_TABLE
+
+# pvrsrvkm
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/services/server/common/pmr.o \
+	$(RGXDIR)/services/server/common/devicemem_server.o \
+	$(RGXDIR)/services/server/common/devicemem_heapcfg.o \
+	$(RGXDIR)/services/server/common/devicemem_history_server.o \
+	$(RGXDIR)/services/server/common/dc_server.o \
+	$(RGXDIR)/services/server/common/connection_server.o \
+	$(RGXDIR)/services/server/common/pvrsrv.o \
+	$(RGXDIR)/services/server/common/sync_server.o \
+	$(RGXDIR)/services/server/common/physmem_lma.o \
+	$(RGXDIR)/services/server/common/power.o \
+	$(RGXDIR)/services/server/common/physheap.o \
+	$(RGXDIR)/services/server/common/cache_generic.o \
+	$(RGXDIR)/services/server/common/scp.o \
+	$(RGXDIR)/services/server/common/mmu_common.o \
+	$(RGXDIR)/services/server/common/srvcore.o \
+	$(RGXDIR)/services/server/common/lists.o \
+	$(RGXDIR)/services/server/common/handle.o \
+	$(RGXDIR)/services/server/common/physmem.o \
+	$(RGXDIR)/services/server/common/process_stats.o \
+	$(RGXDIR)/services/server/env/linux/allocmem.o \
+	$(RGXDIR)/services/server/env/linux/mm.o \
+	$(RGXDIR)/services/server/env/linux/physmem_osmem_linux.o \
+	$(RGXDIR)/services/server/env/linux/physmem_tdsecbuf_linux.o \
+	$(RGXDIR)/services/server/env/linux/osfunc_x86.o \
+	$(RGXDIR)/services/server/env/linux/osfunc.o \
+	$(RGXDIR)/services/server/env/linux/event.o \
+	$(RGXDIR)/services/server/env/linux/pvr_bridge_k.o \
+	$(RGXDIR)/services/server/env/linux/pvr_debug.o \
+	$(RGXDIR)/services/server/env/linux/osconnection_server.o \
+	$(RGXDIR)/services/server/env/linux/devicemem_mmap_stub.o \
+	$(RGXDIR)/services/server/env/linux/module.o \
+	$(RGXDIR)/services/server/env/linux/ossecure_export.o \
+	$(RGXDIR)/services/server/env/linux/pdump.o \
+	$(RGXDIR)/services/server/env/linux/pvr_debugfs.o \
+	$(RGXDIR)/services/server/env/linux/pvr_gputrace.o \
+	$(RGXDIR)/services/server/env/linux/trace_events.o \
+	$(RGXDIR)/services/server/env/linux/handle_idr.o \
+	$(RGXDIR)/services/shared/common/dllist.o \
+	$(RGXDIR)/services/shared/common/devicemem.o \
+	$(RGXDIR)/services/shared/common/hash.o \
+	$(RGXDIR)/services/shared/common/devicemem_utils.o \
+	$(RGXDIR)/services/shared/common/sync.o \
+	$(RGXDIR)/services/shared/common/ra.o \
+	$(RGXDIR)/services/shared/common/uniq_key_splay_tree.o \
+	$(RGXDIR)/services/shared/devices/rgx/rgx_compat_bvnc.o \
+	$(RGXDIR)/services/server/common/tlintern.o \
+	$(RGXDIR)/services/server/common/tlstream.o \
+	$(RGXDIR)/services/server/common/tlserver.o \
+        $(RGXDIR)/services/shared/common/tlclient.o \
+
+# SUPPORT_NATIVE_FENCE_SYNC
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/kernel/drivers/staging/imgtec/pvr_sync.o
+
+$(DRIVER_NAME)-$(CONFIG_DRM_HANDSET_DEBUG) += \
+	$(RGXDIR)/services/server/common/ri_server.o
+
+# for PDUMP
+$(DRIVER_NAME)-$(CONFIG_PDUMP) += \
+	$(RGXDIR)/services/server/common/pdump_common.o \
+	$(RGXDIR)/services/server/common/pdump_mmu.o \
+	$(RGXDIR)/services/server/common/pdump_physmem.o \
+	$(RGXDIR)/tools/intern/debug/dbgdriv/linux/main.o \
+	$(RGXDIR)/tools/intern/debug/dbgdriv/linux/hostfunc.o \
+	$(RGXDIR)/tools/intern/debug/dbgdriv/common/ioctl.o \
+	$(RGXDIR)/tools/intern/debug/dbgdriv/common/dbgdriv.o \
+	$(RGXDIR)/tools/intern/debug/dbgdriv/common/hotkey.o \
+	$(RGXDIR)/tools/intern/debug/dbgdriv/common/handle.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxpdump.o \
+	$(RGXDIR)/services/shared/common/devicemem_pdump.o
+
+# For SUPPORT_RGX
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/services/server/devices/rgx/rgxinit.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxmmuinit.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxtransfer.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxfwutils.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxcompute.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxutils.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxpower.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxregconfig.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxta3d.o \
+	$(RGXDIR)/services/server/devices/rgx/debugmisc_server.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxmem.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxccb.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxsync.o \
+	$(RGXDIR)/services/server/env/linux/mmap.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxtimerquery.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxdebug.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxbreakpoint.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxhwperf.o \
+	$(RGXDIR)/services/server/devices/rgx/rgxtimecorr.o
+
+# For devfreq
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/services/3rdparty/intel_devfreq/rgxdf.o \
+	$(RGXDIR)/services/3rdparty/intel_devfreq/dfrgx_utilstats.o \
+	$(RGXDIR)/services/3rdparty/intel_devfreq/dfrgx_interface.o
+
+# For Power Monitor
+#$(DRIVER_NAME)-y += \
+#	$(RGXDIR)/services/server/devices/rgx/rgxpowermon.o
+
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/generated/cmm_bridge/server_cmm_bridge.o \
+	$(RGXDIR)/generated/dc_bridge/server_dc_bridge.o \
+	$(RGXDIR)/generated/dpvrtl_bridge/client_pvrtl_bridge.o \
+	$(RGXDIR)/generated/pdump_bridge/server_pdump_bridge.o \
+	$(RGXDIR)/generated/srvcore_bridge/server_srvcore_bridge.o \
+	$(RGXDIR)/generated/dsync_bridge/client_sync_bridge.o \
+	$(RGXDIR)/generated/rgxinit_bridge/server_rgxinit_bridge.o \
+	$(RGXDIR)/generated/breakpoint_bridge/server_breakpoint_bridge.o \
+	$(RGXDIR)/generated/pdumpmm_bridge/server_pdumpmm_bridge.o \
+	$(RGXDIR)/generated/rgxcmp_bridge/server_rgxcmp_bridge.o \
+	$(RGXDIR)/generated/debugmisc_bridge/server_debugmisc_bridge.o \
+	$(RGXDIR)/generated/sync_bridge/server_sync_bridge.o \
+	$(RGXDIR)/generated/rgxtq_bridge/server_rgxtq_bridge.o \
+	$(RGXDIR)/generated/dmm_bridge/client_mm_bridge.o \
+	$(RGXDIR)/generated/dpdumpmm_bridge/client_pdumpmm_bridge.o \
+	$(RGXDIR)/generated/rgxta3d_bridge/server_rgxta3d_bridge.o \
+	$(RGXDIR)/generated/rgxpdump_bridge/server_rgxpdump_bridge.o \
+	$(RGXDIR)/generated/pvrtl_bridge/server_pvrtl_bridge.o \
+	$(RGXDIR)/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.o \
+	$(RGXDIR)/generated/regconfig_bridge/server_regconfig_bridge.o \
+	$(RGXDIR)/generated/timerquery_bridge/server_timerquery_bridge.o \
+	$(RGXDIR)/generated/devicememhistory_bridge/server_devicememhistory_bridge.o \
+	$(RGXDIR)/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.o
+
+$(DRIVER_NAME)-$(CONFIG_DRM_HANDSET_DEBUG) += \
+	$(RGXDIR)/generated/dri_bridge/client_ri_bridge.o \
+	$(RGXDIR)/generated/ri_bridge/server_ri_bridge.o \
+
+# for SUPPORT_SECURE_EXPORT
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/generated/mm_bridge/server_mm_bridge.o \
+	$(RGXDIR)/generated/smm_bridge/server_smm_bridge.o \
+	$(RGXDIR)/generated/syncsexport_bridge/server_syncsexport_bridge.o
+ccflags-y += \
+	-I$(RGXINC)/generated/smm_bridge
+
+ccflags-y += \
+       -DPVR_ANDROID_ION_HEADER="\"../../drivers/staging/android/ion/ion.h\"" \
+       -DPVR_ANDROID_ION_PRIV_HEADER="\"../../drivers/staging/android/ion/ion_priv.h\"" \
+       -I$(RGXINC)/generated/dmabuf_bridge
+
+$(DRIVER_NAME)-y += \
+       $(RGXDIR)/services/server/env/linux/physmem_dmabuf.o \
+       $(RGXDIR)/generated/dmabuf_bridge/server_dmabuf_bridge.o \
+       $(RGXDIR)/services/system/common/env/linux/ion_support_generic.o
+
+# buffer class video
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/services/system/rgx_intel/bufferclass_video_linux.o \
+	$(RGXDIR)/services/system/rgx_intel/bufferclass_video.o
+
+# display class
+$(DRIVER_NAME)-y += \
+	$(RGXDIR)/services/server/env/linux/pvr_drm_gem.o \
+	$(RGXDIR)/services/server/env/linux/pvr_drm_prime.o \
+	$(RGXDIR)/services/system/rgx_intel/sysconfig.o \
+	$(RGXDIR)/services/system/rgx_intel/pvr_drm_ext.o \
+	$(RGXDIR)/services/3rdparty/intel_drm/dc_mrfld.o \
+	$(RGXDIR)/services/system/common/env/linux/pci_support.o
+
diff --git a/drivers/external_drivers/intel_media/bld/merrifield/tng/Makefile b/drivers/external_drivers/intel_media/bld/merrifield/tng/Makefile
new file mode 100644
index 0000000..45dde8e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/bld/merrifield/tng/Makefile
@@ -0,0 +1,196 @@
+DRIVER_NAME := tngdisp
+
+include drivers/external_drivers/intel_media/bld/merrifield/rgx/Makefile
+
+obj-$(CONFIG_DRM_INTEL_HANDSET) += $(DRIVER_NAME).o
+
+MEDIADIR=../../..
+DISPLAYDIR=$(MEDIADIR)/display/tng/drv
+OTM_HDMI_SRC = $(MEDIADIR)/otm_hdmi
+VIDEO_COMMON_DIR=$(MEDIADIR)/video/common
+DECODE_DIR=$(MEDIADIR)/video/decode
+ENCODE_DIR=$(MEDIADIR)/video/encode
+VSP_DIR=$(MEDIADIR)/video/vsp
+
+MEDIAINC=drivers/external_drivers/intel_media
+GFXINC=$(MEDIAINC)/graphics/rgx
+VIDEOINC=$(MEDIAINC)/video
+DISPLAYINC=$(MEDIAINC)/display/tng
+OTM_HDMI_INC=$(MEDIAINC)/otm_hdmi
+
+ccflags-y += \
+	-I$(MEDIAINC)/interface \
+	-I$(DISPLAYINC)/bc_video \
+	-I$(DISPLAYINC)/interface \
+	-I$(DISPLAYINC)/drv \
+	-I$(DISPLAYINC)/drv/ospm \
+	-I$(GFXINC)/include \
+	-I$(OTM_HDMI_INC)/os/android/include \
+	-I$(OTM_HDMI_INC)/pil/include \
+	-Iinclude/linux \
+	-Iinclude/drm
+
+ccflags-y += -DANDROID -DLINUX -D_linux_ -D__KERNEL__
+ccflags-y += -DMERRIFIELD -DCONFIG_PCI_MSI
+ccflags-y += -DCONFIG_SLICE_HEADER_PARSING
+ccflags-y += -DCONFIG_VIDEO_MRFLD_EC
+
+#
+# HDMI
+#
+
+ccflags-$(CONFIG_SUPPORT_HDMI) += -DCONFIG_SUPPORT_HDMI
+
+#
+# HDMI PRIMARY devices
+#
+ccflags-$(CONFIG_SUPPORT_MIPI) += -DCONFIG_SUPPORT_MIPI
+
+#only for internal testing
+ccflags-$(CONFIG_SUPPORT_HDMI) += -DOTM_HDMI_UNIT_TEST
+
+#enable HDCP
+ccflags-$(CONFIG_SUPPORT_HDMI) += -DOTM_HDMI_HDCP_ENABLE
+#ccflags-$(CONFIG_SUPPORT_HDMI) += -DOTM_HDMI_HDCP_ALWAYS_ENC
+
+ccflags-$(CONFIG_SUPPORT_HDMI) += \
+	-I$(OTM_HDMI_INC)/os/android/include \
+	-I$(OTM_HDMI_INC)/pil/include \
+	-I$(OTM_HDMI_INC)/pil/common \
+	-I$(OTM_HDMI_INC)/pil/specific/include \
+	-I$(OTM_HDMI_INC)/ipil/include \
+	-I$(OTM_HDMI_INC)/ipil/common \
+	-I$(OTM_HDMI_INC)/ipil/specific/include \
+	-I$(OTM_HDMI_INC)/pil/specific/mrfld \
+	-I$(OTM_HDMI_INC)/ipil/specific/mfld
+
+#
+# Video ccflags
+#
+ccflags-y += -DCONFIG_VIDEO_MRFLD -DSUPPORT_VSP
+ccflags-y += \
+ 	-I$(VIDEOINC)/common \
+	-I$(VIDEOINC)/decode \
+	-I$(VIDEOINC)/encode \
+	-I$(VIDEOINC)/vsp \
+	-I$(GFXINC)/services/server/env/linux \
+	-I$(GFXINC)/services/server/include \
+	-Iinclude/drm/ttm
+
+#
+# Display
+#
+$(DRIVER_NAME)-objs += \
+	$(DISPLAYDIR)/psb_bl.o \
+	$(DISPLAYDIR)/psb_drv.o \
+	$(DISPLAYDIR)/psb_fb.o \
+	$(DISPLAYDIR)/psb_gtt.o \
+	$(DISPLAYDIR)/psb_hotplug.o \
+	$(DISPLAYDIR)/psb_intel_display.o \
+	$(DISPLAYDIR)/mdfld_hdmi_audio.o \
+	$(DISPLAYDIR)/mdfld_msic.o \
+	$(DISPLAYDIR)/mdfld_debugfs.o \
+	$(DISPLAYDIR)/mdfld_csc.o \
+	$(DISPLAYDIR)/mdfld_intel_hdcp.o \
+	$(DISPLAYDIR)/psb_intel_hdmi.o \
+	$(DISPLAYDIR)/psb_socket.o \
+	$(DISPLAYDIR)/psb_umevents.o \
+	$(DISPLAYDIR)/mdfld_output.o \
+	$(DISPLAYDIR)/mrfld_clock.o \
+	$(DISPLAYDIR)/psb_irq.o \
+	$(DISPLAYDIR)/tng_wa.o \
+	$(DISPLAYDIR)/pmu_tng.o \
+	$(DISPLAYDIR)/ospm/pwr_mgmt.o \
+	$(DISPLAYDIR)/ospm/gfx_rtpm.o \
+	$(DISPLAYDIR)/ospm/dc_ospm.o \
+	$(DISPLAYDIR)/ospm/dc_maxfifo.o \
+	$(DISPLAYDIR)/ospm/video_ospm.o \
+	$(DISPLAYDIR)/ospm/early_suspend.o \
+	$(DISPLAYDIR)/ospm/early_suspend_sysfs.o
+
+ifeq ($(CONFIG_SUPPORT_MIPI),y)
+$(DRIVER_NAME)-objs += \
+	$(DISPLAYDIR)/psb_dpst.o \
+	$(DISPLAYDIR)/psb_dpst_func.o \
+	$(DISPLAYDIR)/mdfld_dsi_dbi.o \
+	$(DISPLAYDIR)/mdfld_dsi_dpi.o \
+	$(DISPLAYDIR)/mdfld_dsi_output.o \
+	$(DISPLAYDIR)/mdfld_dsi_esd.o \
+	$(DISPLAYDIR)/mdfld_dsi_dbi_dsr.o \
+	$(DISPLAYDIR)/dispmgrnl.o \
+	$(DISPLAYDIR)/mdfld_dsi_pkg_sender.o \
+	$(DISPLAYDIR)/jdi_vid.o \
+	$(DISPLAYDIR)/jdi_cmd.o \
+	$(DISPLAYDIR)/cmi_vid.o \
+	$(DISPLAYDIR)/cmi_cmd.o \
+	$(DISPLAYDIR)/sharp10x19_cmd.o \
+	$(DISPLAYDIR)/sharp25x16_vid.o \
+	$(DISPLAYDIR)/sharp25x16_cmd.o \
+	$(DISPLAYDIR)/sdc16x25_8_cmd.o \
+	$(DISPLAYDIR)/sdc25x16_cmd.o \
+	$(DISPLAYDIR)/jdi25x16_vid.o \
+	$(DISPLAYDIR)/jdi25x16_cmd.o
+endif
+
+ifeq ($(CONFIG_MOOREFIELD),y)
+$(DRIVER_NAME)-objs += \
+	$(DISPLAYDIR)/ospm/gfx_ospm_ann.o \
+	$(DISPLAYDIR)/mofd_dc_callbacks.o
+else
+$(DRIVER_NAME)-objs += \
+	$(DISPLAYDIR)/ospm/gfx_ospm.o \
+	$(DISPLAYDIR)/dc_callbacks.o
+endif
+
+ifeq ($(CONFIG_SUPPORT_HDMI),y)
+# Platform independent library
+$(DRIVER_NAME)-objs += \
+	$(OTM_HDMI_SRC)/pil/common/otm_hdmi.o \
+	$(OTM_HDMI_SRC)/pil/common/mode_info.o \
+	$(OTM_HDMI_SRC)/pil/common/hdcp.o \
+	$(OTM_HDMI_SRC)/pil/common/edid.o \
+	$(OTM_HDMI_SRC)/pil/common/edid_print.o \
+	$(OTM_HDMI_SRC)/pil/common/infoframes.o
+
+# IP independent library
+$(DRIVER_NAME)-objs += \
+	$(OTM_HDMI_SRC)/ipil/common/otm_ipil_main.o \
+	$(OTM_HDMI_SRC)/ipil/common/ipil_hdcp.o
+
+# OS specific library
+$(DRIVER_NAME)-objs += \
+	$(OTM_HDMI_SRC)/os/android/android_hdmi.o
+	
+$(DRIVER_NAME)-objs += \
+	$(OTM_HDMI_SRC)/ipil/specific/mfld/ips_hdmi.o \
+	$(OTM_HDMI_SRC)/ipil/specific/mrfld/ips_hdcp.o \
+	$(OTM_HDMI_SRC)/ipil/specific/mrfld/ips_hdmi_priv.o
+
+ifeq ($(CONFIG_MOOREFIELD),y)
+$(DRIVER_NAME)-objs += \
+	$(OTM_HDMI_SRC)/pil/specific/mofd/ps_hdmi.o
+else
+$(DRIVER_NAME)-objs += \
+	$(OTM_HDMI_SRC)/pil/specific/mrfld/ps_hdmi.o
+endif
+
+endif
+
+$(DRIVER_NAME)-objs += \
+	$(VIDEO_COMMON_DIR)/psb_ttm_glue.o \
+	$(VIDEO_COMMON_DIR)/psb_cmdbuf.o \
+	$(VIDEO_COMMON_DIR)/tng_securefw.o \
+	$(VIDEO_COMMON_DIR)/psb_buffer.o \
+	$(VIDEO_COMMON_DIR)/psb_fence.o \
+	$(VIDEO_COMMON_DIR)/psb_mmu.o \
+	$(VIDEO_COMMON_DIR)/psb_ttm_fence.o \
+	$(VIDEO_COMMON_DIR)/psb_ttm_fence_user.o \
+	$(VIDEO_COMMON_DIR)/psb_ttm_placement_user.o \
+	$(DECODE_DIR)/psb_msvdx.o \
+	$(DECODE_DIR)/psb_msvdx_ec.o \
+	$(DECODE_DIR)/psb_msvdxinit.o \
+	$(DECODE_DIR)/psb_msvdx_fw.o \
+	$(ENCODE_DIR)/tng_topaz.o \
+	$(ENCODE_DIR)/tng_topazinit.o \
+	$(VSP_DIR)/vsp.o \
+	$(VSP_DIR)/vsp_init.o
diff --git a/drivers/external_drivers/intel_media/common/mdfld_gl3.c b/drivers/external_drivers/intel_media/common/mdfld_gl3.c
new file mode 100644
index 0000000..ea74707
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/mdfld_gl3.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Bish <jim.bish@@intel.com>
+ */
+
+#ifdef CONFIG_MDFD_GL3
+
+#include "mdfld_gl3.h"
+
+void gl3_enable(void)
+{
+	struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	PSB_DEBUG_PM("gl3_enable called on platform %x\n",
+				dev_priv->platform_rev_id);
+	if (gl3_exist()) {
+		if (!ospm_power_using_hw_begin(OSPM_GL3_CACHE_ISLAND, true))
+			return;
+		MDFLD_GL3_WRITE(MDFLD_GL3_ENABLE_CACHE, MDFLD_GL3_CONTROL);
+		/* set gl3 attributes */
+		MDFLD_GL3_WRITE(GCL_CR_CTL2_ATTRIBUTES, MDFLD_GCL_CR_CTL2);
+		MDFLD_GL3_WRITE(MDFLD_GCL_CR_ECO_EVICT_INVAL, MDFLD_GCL_CR_ECO);
+		PSB_DEBUG_GENERAL("gl3 cache enabled with mask %x\n", MDFLD_GL3_ENABLE_CACHE);
+		ospm_power_using_hw_end(OSPM_GL3_CACHE_ISLAND);
+	}
+}
+
+void gl3_disable(void)
+{
+	struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	PSB_DEBUG_PM("gl3_disable called on platform %x\n",
+				dev_priv->platform_rev_id);
+	if (gl3_exist()) {
+		if (!ospm_power_using_hw_begin(OSPM_GL3_CACHE_ISLAND, true))
+			return;
+		MDFLD_GL3_WRITE(MDFLD_GL3_DISABLE_CACHE, MDFLD_GL3_CONTROL);
+		PSB_DEBUG_GENERAL("gl3 cache disabled with mask %x\n", MDFLD_GL3_DISABLE_CACHE);
+		ospm_power_using_hw_end(OSPM_GL3_CACHE_ISLAND);
+	}
+}
+
+void gl3_invalidate(void)
+{
+	struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	PSB_DEBUG_PM("gl3_invalidate called on platform %x\n",
+					dev_priv->platform_rev_id);
+	if (gl3_exist()) {
+		/* No need to call ospm_power_using_hw_begin
+			as this is being called from ospm_suspend_pci only.
+			Otherwise would lead to deadlock.
+		*/
+		/* Invalidate the cache */
+		#if 0
+			MDFLD_GL3_WRITE(MDFLD_GL3_INVALIDATE_CACHE, MDFLD_GL3_CONTROL);
+		#else
+			uint32_t gl3_ctl;
+			/* IS there a way to avoid multiple invalidation simultaneously? Maybe a ATOM value */
+			gl3_ctl = MDFLD_GL3_READ(MDFLD_GL3_CONTROL);
+			PSB_DEBUG_GENERAL("gl3_invalidation: GCL_CR_CTL2 is 0x%08x\n", gl3_ctl);
+			MDFLD_GL3_WRITE(gl3_ctl | MDFLD_GL3_INVALIDATE, MDFLD_GL3_CONTROL);
+		#endif
+		PSB_DEBUG_GENERAL("gl3 cache invalidated with mask %x\n", MDFLD_GL3_INVALIDATE_CACHE);
+#if 0
+		uint32_t poll_count = 0x1000, gl3_stat;
+		while (poll_count) {
+			gl3_stat = MDFLD_GL3_READ(MDFLD_GL3_STATUS);
+			if (gl3_stat & 0x1) {
+				/* Frome D.Will : write 1 to Inval_done bit to clear it */
+				MDFLD_GL3_WRITE(gl3_stat | 0x1, MDFLD_GL3_STATUS);
+				return;
+			}
+			cpu_relax();
+			poll_count--;
+		}
+		DRM_ERROR("Invalidation GL3 timeout\n");
+#endif
+	}
+}
+
+void gl3_flush(void)
+{
+	struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	PSB_DEBUG_PM("gl3_flush called on platform %x\n",
+				dev_priv->platform_rev_id);
+	if (gl3_exist()) {
+		if (!ospm_power_using_hw_begin(OSPM_GL3_CACHE_ISLAND, true))
+			return;
+		/* Flush the cache */
+		MDFLD_GL3_WRITE(MDFLD_GL3_FLUSH_CACHE, MDFLD_GL3_CONTROL);
+		PSB_DEBUG_GENERAL("gl3 cache flushed with mask %x\n", MDFLD_GL3_FLUSH_CACHE);
+		ospm_power_using_hw_end(OSPM_GL3_CACHE_ISLAND);\
+	}
+}
+
+void gl3_reset(void)
+{
+	struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	PSB_DEBUG_PM("gl3_reset called on platform %x\n",
+				dev_priv->platform_rev_id);
+	if (gl3_exist()) {
+		if (!ospm_power_using_hw_begin(OSPM_GL3_CACHE_ISLAND, true))
+			return;
+		/* Reset the cache */
+		MDFLD_GL3_WRITE(MDFLD_GL3_SOFT_RESET_ENABLE, MDFLD_GL3_G_CONTROL);
+		PSB_DEBUG_GENERAL("gl3 cache soft reset with mas %x\n", MDFLD_GL3_SOFT_RESET_ENABLE);
+		ospm_power_using_hw_end(OSPM_GL3_CACHE_ISLAND);\
+	}
+}
+
+bool gl3_exist(void)
+{
+	struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	if (IS_MDFLD(gpDrmDevice) && dev_priv->platform_rev_id != MDFLD_PNW_A0)
+		return true;
+	else
+		PSB_DEBUG_ENTRY("gl3 not supported in unknown platform");
+
+	return false;
+}
+
+#endif /* CONFIG_MDFD_GL3 */
diff --git a/drivers/external_drivers/intel_media/common/mdfld_gl3.h b/drivers/external_drivers/intel_media/common/mdfld_gl3.h
new file mode 100644
index 0000000..0216998
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/mdfld_gl3.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Bish <jim.bish@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_fb.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "psb_msvdx.h"
+#include "pnw_topaz.h"
+#include <drm/drm_pciids.h>
+#include "pvr_drm_shared.h"
+#include "psb_powermgmt.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_MDFD_GL3
+
+/*
+ * GL3 Control
+ */
+
+#define MDFLD_GCL_CR_CTL2		0xB0000
+/* address at location is the address that had the error */
+#define MDFLD_GCL_ERR_ADDR		0xB0004
+/*GL3 ECO Register*/
+#define MDFLD_GCL_CR_ECO		0xB0030
+#define MDFLD_GCL_ERR_STATUS		0xB0008
+
+/* unmask bit 21 to get GL3 interrupts */
+#define MDFLD_IMG_MASK			0x20A8
+/*
+ * GL3 registers and bits
+ */
+
+#define MDFLD_GL3_CONTROL				0x2100
+#define MDFLD_GL3_USE_WRT_INVAL			0x2104
+#define MDFLD_GL3_STATUS				0x2108
+#define MDFLD_GL3_G_CONTROL				0x20FC
+#define MDFLD_GL3_SOFT_RESET_ENABLE		(1<<4)
+
+#define MDFLD_GL3_DISABLE				(1<<31)
+#define MDFLD_GL3_BYP_PREQ2_USSE3 		(1<<29)
+#define MDLFD_GL3_BYP_PREQ2_USSE2 		(1<<28)
+#define MDFLD_GL3_BYP_PREQ2_PDS			(1<<27)
+#define MDFLD_GL3_BYP_PREQ2_USEC		(1<<26)
+#define MDFLD_GL3_FLUSH					(1<<25)
+#define MDFLD_GL3_FLUSH_CTL				(1<<24)
+#define MDFLD_GL3_BYP_CPU_COH			(1<<23)
+#define MDFLD_GL3_BYP_VED				(1<<21)
+#define MDFLD_GL3_BYP_VEC				(1<<20)
+#define MDFLD_GL3_BYP_GFX				(1<<19)
+#define MDFLD_GL3_BYP_PREQ1_USE1		(1<<18)
+#define MDFLD_GL3_BYP_PREQ1_USE0		(1<<17)
+#define MDFLD_GL3_BYP_PREQ1_ISPZ		(1<<16)
+#define MDFLD_GL3_BYP_PREQ1_ISPP 		(1<<15)
+#define MDFLD_GL3_BYP_PREQ1_TSPP		(1<<14)
+#define MDFLD_GL3_BYP_PREQ1_PBE			(1<<13)
+#define MDFLD_GL3_BYP_PREQ1_VDM			(1<<12)
+#define MDFLD_GL3_BYP_PREQ1_TA			(1<<11)
+#define MDFLD_GL3_BYP_PREQ1_MADD		(1<<10)
+#define MDFLD_GL3_BYP_PREQ1_MMU			(1<<9)
+#define MDFLD_GL3_USE_INVAL_REQ_USSE3	(1<<8)
+#define MDFLD_GL3_USE_INVAL_REQ_USSE2	(1<<7)
+#define MDFLD_GL3_USE_INVAL_REQ_USSE1	(1<<6)
+#define MDFLD_GL3_USE_INVAL_REQ_USSE0	(1<<5)
+#define MDFLD_GL3_BYP_WR				(1<<4)
+#define MDFLD_GL3_IGN_VED_HINT			(1<<3)
+#define MDFLD_GL3_IGN_VEC_HINT			(1<<2)
+#define MDFLD_GL3_INVALIDATE			(1<<1)
+#define MDFLD_GL3_PAUSE					(1)
+
+/*
+ * GL3 Masks
+ */
+
+#define MDFLD_GL3_ENABLE_CACHE (MDFLD_GL3_BYP_PREQ2_USSE3 | MDLFD_GL3_BYP_PREQ2_USSE2 | \
+	MDFLD_GL3_BYP_PREQ1_USE1 | MDFLD_GL3_BYP_PREQ1_USE0 | \
+	MDFLD_GL3_BYP_PREQ1_ISPZ | MDFLD_GL3_BYP_PREQ1_PBE | MDFLD_GL3_BYP_PREQ1_VDM | \
+	MDFLD_GL3_BYP_PREQ1_TA | MDFLD_GL3_BYP_PREQ1_MMU | MDFLD_GL3_USE_INVAL_REQ_USSE3 | \
+	MDFLD_GL3_USE_INVAL_REQ_USSE2 | MDFLD_GL3_USE_INVAL_REQ_USSE1 | \
+	MDFLD_GL3_USE_INVAL_REQ_USSE0)
+
+#define MDFLD_GL3_INVALIDATE_CACHE (MDFLD_GL3_ENABLE_CACHE | MDFLD_GL3_INVALIDATE)
+
+#define MDFLD_GL3_FLUSH_CACHE (MDFLD_GL3_ENABLE_CACHE | MDFLD_GL3_FLUSH)
+
+#define MDFLD_GL3_DISABLE_CACHE (MDFLD_GL3_ENABLE_CACHE | MDFLD_GL3_DISABLE)
+
+/*
+	GL3 attributes controlled via GCL_CR_CTL2
+*/
+#define GCL_CR_CTL2_WRTHRU_ENA				(1)
+#define GCL_CR_CTL2_TAG_ECC_CHECK_ENABLE		(1<<2)
+#define GCL_CR_CTL2_GL3_GL3Q_GATING_DISABLE		(1<<8)
+#define GCL_CR_CTL2_GL3_GCL_GATING_DISABLE		(1<<9)
+#define GCL_CR_CTL2_GL3_L3_GATING_DISABLE		(1<<10)
+#define GCL_CR_CTL2_DATA_ECC_CHECK_ENABLE		(1<<19)
+#define GCL_CR_CTL2_L2_SLEEP_TRANSISTOR_STRENGTH_ENABLE	(1<<20)
+
+#define GCL_CR_CTL2_ATTRIBUTES (GCL_CR_CTL2_WRTHRU_ENA | \
+				GCL_CR_CTL2_TAG_ECC_CHECK_ENABLE | \
+				GCL_CR_CTL2_GL3_GL3Q_GATING_DISABLE | \
+				GCL_CR_CTL2_GL3_GCL_GATING_DISABLE | \
+				GCL_CR_CTL2_GL3_L3_GATING_DISABLE | \
+				GCL_CR_CTL2_DATA_ECC_CHECK_ENABLE | \
+		GCL_CR_CTL2_L2_SLEEP_TRANSISTOR_STRENGTH_ENABLE)
+
+/*
+	GL3 attributes controlled via GCL_CR_ECO
+*/
+#define MDFLD_GCL_CR_ECO_EVICT_INVAL			(1)
+
+/* GL3 */
+#define MDFLD_GL3_WRITE(_val, _offs) \
+	iowrite32(_val, dev_priv->gl3_reg + (_offs))
+#define MDFLD_GL3_READ(_offs) \
+	ioread32(dev_priv->gl3_reg + (_offs))
+
+void gl3_enable(void);
+void gl3_invalidate(void);
+void gl3_flush(void);
+void gl3_reset(void);
+bool gl3_exist(void);
+void gl3_disable(void);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/common/psb_drv.c b/drivers/external_drivers/intel_media/common/psb_drv.c
new file mode 100644
index 0000000..70d007b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_drv.c
@@ -0,0 +1,5033 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <drm/drm_pciids.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+#include <asm/intel_scu_pmic.h>
+#else
+#include <asm/intel_scu_ipc.h>
+#endif
+#include <asm/intel-mid.h>
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_fb.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "psb_msvdx.h"
+#include "pnw_topaz.h"
+#include "mdfld_dsi_dbi_dsr.h"
+#include "mdfld_csc.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dbi.h"
+#include "pvr_drm_shared.h"
+#include "psb_powermgmt.h"
+
+#ifdef CONFIG_MDFLD_DSI_DPU
+#include "mdfld_dsi_dbi_dpu.h"
+#endif
+
+#ifdef CONFIG_GFX_RTPM
+#include <linux/pm_runtime.h>
+#endif
+
+#ifdef CONFIG_MDFD_GL3
+#include "mdfld_gl3.h"
+#endif
+
+#include "otm_hdmi.h"
+#include "android_hdmi.h"
+
+/*IMG headers*/
+#include "pvr_drm_shared.h"
+#include "img_types.h"
+#include "pvr_bridge.h"
+#include "linkage.h"
+
+struct workqueue_struct *te_wq;
+struct workqueue_struct *vsync_wq;
+
+#define HDMI_MONITOR_NAME_LENGTH 20
+
+int drm_psb_debug = PSB_D_WARN;
+int drm_psb_enable_cabc = 1;
+int drm_psb_enable_gamma;
+int drm_psb_enable_color_conversion;
+static int drm_psb_trap_pagefaults;
+
+bool gbdispstatus = true;
+
+int drm_psb_disable_vsync = 1;
+int drm_psb_no_fb;
+int drm_psb_force_pipeb;
+int drm_msvdx_pmpolicy = PSB_PMPOLICY_POWERDOWN;
+int drm_psb_cpurelax;
+int drm_psb_udelaydivider = 1;
+int drm_topaz_pmpolicy = PSB_PMPOLICY_POWERDOWN;
+int drm_topaz_sbuswa;
+int drm_psb_ospm = 1;
+int drm_psb_gl3_enable = 1;
+int drm_psb_topaz_clockgating;
+int gfxrtdelay = 2 * 1000;
+int drm_psb_3D_vblank = 1;
+int drm_psb_smart_vsync = 1;
+int drm_psb_te_timer_delay = (DRM_HZ / 40);
+char HDMI_EDID[HDMI_MONITOR_NAME_LENGTH];
+int hdmi_state;
+u32 DISP_PLANEB_STATUS = ~DISPLAY_PLANE_ENABLE;
+int drm_psb_use_cases_control = PSB_ALL_UC_ENABLE;
+int drm_psb_dump_pm_history;
+int gamma_setting[129] = {0};
+int csc_setting[6] = {0};
+int gamma_number = 129;
+int csc_number = 6;
+#ifdef CONFIG_CTP_DPST
+int dpst_level = 3;
+#endif
+int drm_hdmi_hpd_auto;
+int default_hdmi_scaling_mode = DRM_MODE_SCALE_CENTER;
+
+int drm_psb_msvdx_tiling = 1;
+int drm_msvdx_bottom_half;
+struct drm_device *g_drm_dev;
+EXPORT_SYMBOL(g_drm_dev);
+
+#ifdef CONFIG_SUPPORT_MIPI_H8C7_CMD_DISPLAY
+extern struct platform_driver h8c7_lcd_driver;
+#endif
+
+#ifdef CONFIG_SUPPORT_VB_MIPI_DISPLAY
+extern struct platform_driver vb_lcd_driver;
+#endif
+
+#ifdef CONFIG_R63311_MIPI_VIDEO_MODE
+extern struct platform_driver jdi_r63311_lcd_driver;
+#endif
+
+#ifdef CONFIG_SUPPORT_TMD_MIPI_600X1024_DISPLAY
+extern struct platform_driver tmd_lcd_driver;
+#endif
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(no_fb, "Disable FBdev");
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
+MODULE_PARM_DESC(ospm, "switch for ospm support");
+MODULE_PARM_DESC(gl3_enabled, "Enable GL3 cache");
+MODULE_PARM_DESC(rtpm, "Specifies Runtime PM delay for GFX");
+MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
+MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
+MODULE_PARM_DESC(topaz_sbuswa, "WA for topaz sysbus write");
+MODULE_PARM_DESC(hdmi_edid, "EDID info for HDMI monitor");
+MODULE_PARM_DESC(hdmi_state, "Whether HDMI Monitor is connected or not");
+MODULE_PARM_DESC(vblank_sync, "whether sync to vblank interrupt when do 3D flip");
+MODULE_PARM_DESC(smart_vsync, "Enable Smart Vsync for Display");
+MODULE_PARM_DESC(te_delay, "swap delay after TE interrpt");
+MODULE_PARM_DESC(cpu_relax, "replace udelay with cpu_relax for video");
+MODULE_PARM_DESC(udelay_divider, "divide the usec value of video udelay");
+MODULE_PARM_DESC(enable_color_conversion, "Enable display side color conversion");
+MODULE_PARM_DESC(enable_gamma, "Enable display side gamma");
+MODULE_PARM_DESC(use_cases_control, "Use to enable and disable use cases");
+MODULE_PARM_DESC(pm_history, "whether to dump pm history when SGX HWR");
+#ifdef CONFIG_CTP_DPST
+MODULE_PARM_DESC(dpst_level, "dpst aggressive level: 0~5");
+#endif
+MODULE_PARM_DESC(hdmi_hpd_auto, "HDMI hot-plug auto test flag");
+MODULE_PARM_DESC(default_hdmi_scaling_mode, "Default HDMI scaling mode");
+
+module_param_named(debug, drm_psb_debug, int, 0600);
+module_param_named(psb_enable_cabc, drm_psb_enable_cabc, int, 0600);
+module_param_named(enable_color_conversion, drm_psb_enable_color_conversion, int, 0600);
+module_param_named(enable_gamma, drm_psb_enable_gamma, int, 0600);
+/* [SC1] change parameter name */
+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
+module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
+module_param_named(cpu_relax, drm_psb_cpurelax, int, 0600);
+module_param_named(udelay_divider, drm_psb_udelaydivider, int, 0600);
+module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
+module_param_named(topaz_sbuswa, drm_topaz_sbuswa, int, 0600);
+module_param_named(ospm, drm_psb_ospm, int, 0600);
+module_param_named(gl3_enabled, drm_psb_gl3_enable, int, 0600);
+module_param_named(rtpm, gfxrtdelay, int, 0600);
+module_param_named(topaz_clockgating, drm_psb_topaz_clockgating, int, 0600);
+module_param_string(hdmi_edid, HDMI_EDID, 20, 0600);
+module_param_named(hdmi_state, hdmi_state, int, 0600);
+module_param_named(vblank_sync, drm_psb_3D_vblank, int, 0600);
+module_param_named(smart_vsync, drm_psb_smart_vsync, int, 0600);
+module_param_named(te_delay, drm_psb_te_timer_delay, int, 0600);
+module_param_named(msvdx_tiling_memory, drm_psb_msvdx_tiling, int, 0600);
+module_param_named(msvdx_bottom_half, drm_msvdx_bottom_half, int, 0600);
+module_param_named(psb_use_cases_control, drm_psb_use_cases_control, int, 0600);
+module_param_named(pm_history, drm_psb_dump_pm_history, int, 0600);
+module_param_array_named(gamma_adjust, gamma_setting, int, &gamma_number, 0600);
+module_param_array_named(csc_adjust, csc_setting, int, &csc_number, 0600);
+#ifdef CONFIG_CTP_DPST
+module_param_named(dpst_level, dpst_level, int, 0600);
+#endif
+module_param_named(hdmi_hpd_auto, drm_hdmi_hpd_auto, int, 0600);
+module_param_named(default_hdmi_scaling_mode, default_hdmi_scaling_mode,
+					int, 0600);
+
+#ifndef MODULE
+/* Make ospm configurable via cmdline firstly, and others can be enabled if needed. */
+static int __init config_ospm(char *arg)
+{
+	/* ospm turn on/off control can be passed in as a cmdline parameter */
+	/* to enable this feature add ospm=1 to cmdline */
+	/* to disable this feature add ospm=0 to cmdline */
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		drm_psb_ospm = 0;
+	else if (!strcasecmp(arg, "1"))
+		drm_psb_ospm = 1;
+
+	return 0;
+}
+static int __init config_gl3(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		drm_psb_gl3_enable = 0;
+	else if (!strcasecmp(arg, "1"))
+		drm_psb_gl3_enable = 1;
+
+	return 0;
+}
+early_param("ospm", config_ospm);
+early_param("gl3_enabled", config_gl3);
+#endif
+
+static struct pci_device_id pciidlist[] = {
+#ifdef SGX535
+	{0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+	{0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+#endif
+#if defined (MEDFIELD) || defined (CLOVERTRAIL_PHONE)
+	{0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MDFLD_0130},
+	{0x8086, 0x08c0, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xFFFF00, CHIP_MDFLD_0130},
+	{0x8086, 0x08c7, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xFFFF00, CHIP_MDFLD_0130},
+	{0x8086, 0x08c8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xFFFF00, CHIP_MDFLD_0130},
+#endif
+	{0, 0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_KMS_OFF	\
+		DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_KMS_ON	\
+		DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_VT_LEAVE	\
+		DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_VT_ENTER	\
+		DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_EXTENSION	\
+		DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
+			 union drm_psb_extension_arg)
+#define DRM_IOCTL_PSB_SIZES	\
+		DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
+			struct drm_psb_sizes_arg)
+#define DRM_IOCTL_PSB_FUSE_REG	\
+		DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_VBT	\
+		DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
+			struct gct_ioctl_arg)
+#define DRM_IOCTL_PSB_DC_STATE	\
+		DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
+			struct drm_psb_dc_state_arg)
+#define DRM_IOCTL_PSB_ADB	\
+		DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION	\
+		DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
+			 struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY	\
+		DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+			 struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_REGISTER_RW	\
+		DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
+			 struct drm_psb_register_rw_arg)
+#define DRM_IOCTL_PSB_GTT_MAP	\
+		DRM_IOWR(DRM_PSB_GTT_MAP + DRM_COMMAND_BASE, \
+			 struct psb_gtt_mapping_arg)
+#define DRM_IOCTL_PSB_GTT_UNMAP	\
+		DRM_IOW(DRM_PSB_GTT_UNMAP + DRM_COMMAND_BASE, \
+			struct psb_gtt_mapping_arg)
+#define DRM_IOCTL_PSB_GETPAGEADDRS	\
+		DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_GETPAGEADDRS,\
+			 struct drm_psb_getpageaddrs_arg)
+#define DRM_IOCTL_PSB_HIST_ENABLE	\
+		DRM_IOWR(DRM_PSB_HIST_ENABLE + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_HIST_STATUS	\
+		DRM_IOWR(DRM_PSB_HIST_STATUS + DRM_COMMAND_BASE, \
+			 struct drm_psb_hist_status_arg)
+#define DRM_IOCTL_PSB_UPDATE_GUARD	\
+		DRM_IOWR(DRM_PSB_UPDATE_GUARD + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_INIT_COMM	\
+		DRM_IOWR(DRM_PSB_INIT_COMM + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_DPST	\
+		DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_GAMMA	\
+		DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
+			 struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_DPST_LEVEL	\
+	DRM_IOWR(DRM_PSB_DPST_LEVEL + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_DPST_BL	\
+		DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID	\
+		DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+			 struct drm_psb_get_pipe_from_crtc_id_arg)
+
+/*pvr ioctls*/
+#define PVR_DRM_SRVKM_IOCTL \
+	DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, \
+		PVRSRV_BRIDGE_PACKAGE)
+#define PVR_DRM_DISP_IOCTL \
+	DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
+#define PVR_DRM_IS_MASTER_IOCTL \
+	DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
+#define PVR_DRM_UNPRIV_IOCTL \
+	DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, \
+		IMG_UINT32)
+#if defined(PDUMP)
+#define PVR_DRM_DBGDRV_IOCTL \
+	DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
+#endif
+
+/*DPU/DSR stuff*/
+#define DRM_IOCRL_PSB_DPU_QUERY DRM_IOR(DRM_PSB_DPU_QUERY + DRM_COMMAND_BASE, IMG_UINT32)
+#define DRM_IOCRL_PSB_DPU_DSR_ON DRM_IOW(DRM_PSB_DPU_DSR_ON + DRM_COMMAND_BASE, IMG_UINT32)
+/* #define DRM_IOCRL_PSB_DPU_DSR_OFF DRM_IOW(DRM_PSB_DPU_DSR_OFF + DRM_COMMAND_BASE, IMG_UINT32) */
+#define DRM_IOCRL_PSB_DPU_DSR_OFF DRM_IOW(DRM_PSB_DPU_DSR_OFF + DRM_COMMAND_BASE, struct drm_psb_drv_dsr_off_arg)
+
+/*HDMI FB stuff*/
+#define DRM_IOCTL_PSB_HDMI_FB_CMD DRM_IOWR(DRM_PSB_HDMI_FB_CMD + DRM_COMMAND_BASE, struct drm_psb_disp_ctrl)
+
+/* HDCP IOCTLs */
+#define DRM_IOCTL_PSB_QUERY_HDCP \
+		DRM_IOR(DRM_PSB_QUERY_HDCP + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_VALIDATE_HDCP_KSV \
+		DRM_IOWR(DRM_PSB_VALIDATE_HDCP_KSV + DRM_COMMAND_BASE, sqword_t)
+#define DRM_IOCTL_PSB_GET_HDCP_STATUS \
+		DRM_IOR(DRM_PSB_GET_HDCP_STATUS + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_ENABLE_HDCP \
+		DRM_IO(DRM_PSB_ENABLE_HDCP + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_DISABLE_HDCP \
+		DRM_IO(DRM_PSB_DISABLE_HDCP + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_GET_HDCP_LINK_STATUS \
+		DRM_IOR(DRM_PSB_GET_HDCP_LINK_STATUS + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_HDCP_DISPLAY_IED_OFF \
+		DRM_IO(DRM_PSB_HDCP_DISPLAY_IED_OFF + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_HDCP_DISPLAY_IED_ON \
+		DRM_IO(DRM_PSB_HDCP_DISPLAY_IED_ON + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_QUERY_HDCP_DISPLAY_IED_CAPS \
+		DRM_IOR(DRM_PSB_QUERY_HDCP_DISPLAY_IED_CAPS \
+			+ DRM_COMMAND_BASE, uint32_t)
+/* CSC IOCTLS */
+#define DRM_IOCTL_PSB_SET_CSC \
+	DRM_IOW(DRM_PSB_SET_CSC + DRM_COMMAND_BASE, struct drm_psb_csc_matrix)
+
+/* VSYNC IOCTL */
+#define DRM_IOCTL_PSB_VSYNC_SET \
+	DRM_IOWR(DRM_PSB_VSYNC_SET + DRM_COMMAND_BASE,		\
+			struct drm_psb_vsync_set_arg)
+
+/* GET DC INFO IOCTL */
+#define DRM_IOCTL_PSB_GET_DC_INFO \
+	DRM_IOR(DRM_PSB_GET_DC_INFO + DRM_COMMAND_BASE,		\
+			struct drm_psb_dc_info)
+
+/*CSC GAMMA Setting*/
+#define DRM_IOCTL_PSB_CSC_GAMMA_SETTING \
+		DRM_IOWR(DRM_PSB_CSC_GAMMA_SETTING + DRM_COMMAND_BASE, struct drm_psb_csc_gamma_setting)
+
+#define DRM_IOCTL_PSB_ENABLE_IED_SESSION \
+		DRM_IO(DRM_PSB_ENABLE_IED_SESSION + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_DISABLE_IED_SESSION \
+		DRM_IO(DRM_PSB_DISABLE_IED_SESSION + DRM_COMMAND_BASE)
+
+/*
+ * TTM execbuf extension.
+ */
+
+#define DRM_IOCTL_PSB_CMDBUF	\
+		DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE,	\
+			struct drm_psb_cmdbuf_arg)
+#define DRM_IOCTL_PSB_SCENE_UNREF	\
+		DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
+			struct drm_psb_scene)
+#define DRM_IOCTL_PSB_KMS_OFF	  DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_KMS_ON	  DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_EXTENSION	\
+		DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
+			 union drm_psb_extension_arg)
+
+/*
+ * TTM placement user extension.
+ */
+
+#define DRM_PSB_TTM_PL_CREATE	 (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_UNREF	 (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_SYNCCPU	 (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_WAITIDLE  (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_CREATE_UB (TTM_PL_CREATE_UB + DRM_PSB_PLACEMENT_OFFSET)
+
+/*
+ * TTM fence extension.
+ */
+
+#define DRM_PSB_FENCE_OFFSET	   (DRM_PSB_TTM_PL_CREATE_UB + 1)
+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
+#define DRM_PSB_TTM_FENCE_FINISH   (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
+#define DRM_PSB_TTM_FENCE_UNREF    (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
+
+#define DRM_PSB_FLIP	   (DRM_PSB_TTM_FENCE_UNREF + 1)	/*20*/
+/* PSB video extension */
+#define DRM_PSB_VIDEO_GETPARAM		(DRM_PSB_FLIP + 1)
+
+#define DRM_IOCTL_PSB_TTM_PL_CREATE    \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
+		 union ttm_pl_create_arg)
+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
+		 union ttm_pl_reference_arg)
+#define DRM_IOCTL_PSB_TTM_PL_UNREF    \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
+		struct ttm_pl_reference_req)
+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU	\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
+		struct ttm_pl_synccpu_arg)
+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE	 \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
+		struct ttm_pl_waitidle_arg)
+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
+		 union ttm_pl_setstatus_arg)
+#define DRM_IOCTL_PSB_TTM_PL_CREATE_UB    \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE_UB,\
+		 union ttm_pl_create_ub_arg)
+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED,	\
+		  union ttm_fence_signaled_arg)
+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH,	\
+		 union ttm_fence_finish_arg)
+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF,	\
+		 struct ttm_fence_unref_arg)
+#define DRM_IOCTL_PSB_FLIP \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
+		 struct drm_psb_pageflip_arg)
+#define DRM_IOCTL_PSB_VIDEO_GETPARAM \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_VIDEO_GETPARAM, \
+		 struct drm_lnc_video_getparam_arg)
+
+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
+			      struct drm_file *file_priv);
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+static int psb_vsync_set_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_get_dc_info_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#ifdef CONFIG_CTP_DPST
+static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+static int psb_dpst_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+#endif
+static int psb_dpu_query_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+static int psb_dpu_dsr_on_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+
+static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disp_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+
+#ifdef CONFIG_SUPPORT_HDMI
+static int psb_query_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_validate_hdcp_ksv_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_get_hdcp_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_enable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_get_hdcp_link_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_enable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_query_display_ied_caps_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#endif
+
+static int psb_set_csc_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_csc_gamma_setting_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+
+static int psb_enable_ied_session_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disable_ied_session_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#ifdef CONFIG_CTP_DPST
+extern int psb_dpst_get_level_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);
+#endif
+
+/* wrapper for PVR ioctl functions to avoid direct call */
+int PVRDRM_Dummy_ioctl2(struct drm_device *dev, void *arg,
+			struct drm_file *pFile)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->PVRDRM_Dummy_ioctl(dev, arg, pFile);
+}
+int PVRSRV_BridgeDispatchKM2(struct drm_device unref__ * dev,
+		void *arg, struct drm_file *pFile)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->PVRSRV_BridgeDispatchKM(dev, arg, pFile);
+}
+int PVRDRMIsMaster2(struct drm_device *dev, void *arg,
+		struct drm_file *pFile)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->PVRDRMIsMaster(dev, arg, pFile);
+}
+int PVRDRMUnprivCmd2(struct drm_device *dev, void *arg,
+		struct drm_file *pFile)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->PVRDRMUnprivCmd(dev, arg, pFile);
+}
+void PVRSRVDrmPostClose2(struct drm_device *dev,
+		struct drm_file *file)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->PVRSRVDrmPostClose(dev, file);
+}
+#if defined(PDUMP)
+int SYSPVRDBGDrivIoctl2(struct drm_device *dev, IMG_VOID *arg,
+		struct drm_file *pFile)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->SYSPVRDBGDrivIoctl(dev, arg, pFile);
+}
+#endif
+
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+	[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func, ioctl}
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
+	DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON,
+	psbfb_kms_on_ioctl,
+	DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
+	DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER,
+	psb_vt_enter_ioctl,
+	DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_MAP,
+	psb_gtt_map_meminfo_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_UNMAP,
+	psb_gtt_unmap_meminfo_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GETPAGEADDRS,
+	psb_getpageaddrs_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM2,
+		DRM_UNLOCKED),
+	PSB_IOCTL_DEF(PVR_DRM_DISP_IOCTL, PVRDRM_Dummy_ioctl2, 0),
+	PSB_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster2, DRM_MASTER),
+	PSB_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd2, DRM_UNLOCKED),
+#ifdef CONFIG_CTP_DPST
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_ENABLE,
+	psb_hist_enable_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_STATUS,
+	psb_hist_status_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_UPDATE_GUARD, psb_update_guard_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_INIT_COMM, psb_init_comm_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_DPST_LEVEL, psb_dpst_get_level_ioctl, DRM_AUTH),
+#endif
+#if defined(PDUMP)
+	PSB_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, SYSPVRDBGDrivIoctl2, 0),
+#endif
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl,
+		DRM_AUTH | DRM_UNLOCKED),
+	/*to be removed later*/
+	/*PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
+		      DRM_AUTH),*/
+
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE_UB, psb_pl_ub_create_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
+	psb_fence_signaled_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	/*to be removed later */
+	/*PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH),*/
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VIDEO_GETPARAM,
+	psb_video_getparam, DRM_AUTH | DRM_UNLOCKED),
+#endif
+	PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_QUERY, psb_dpu_query_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_DSR_ON, psb_dpu_dsr_on_ioctl,
+	DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_DSR_OFF, psb_dpu_dsr_off_ioctl,
+	DRM_AUTH),
+#ifdef CONFIG_SUPPORT_HDMI
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDMI_FB_CMD, psb_disp_ioctl, 0),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_QUERY_HDCP, psb_query_hdcp_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VALIDATE_HDCP_KSV, psb_validate_hdcp_ksv_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_HDCP_STATUS, psb_get_hdcp_status_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ENABLE_HDCP, psb_enable_hdcp_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DISABLE_HDCP, psb_disable_hdcp_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_HDCP_LINK_STATUS, psb_get_hdcp_link_status_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDCP_DISPLAY_IED_OFF,
+			psb_disable_display_ied_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDCP_DISPLAY_IED_ON,
+			psb_enable_display_ied_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_QUERY_HDCP_DISPLAY_IED_CAPS,
+			psb_query_display_ied_caps_ioctl, DRM_AUTH),
+#endif
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_CSC_GAMMA_SETTING, psb_csc_gamma_setting_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_SET_CSC, psb_set_csc_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VSYNC_SET, psb_vsync_set_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_DC_INFO, psb_get_dc_info_ioctl,
+	DRM_AUTH | DRM_UNLOCKED),
+
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ENABLE_IED_SESSION,
+	psb_enable_ied_session_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DISABLE_IED_SESSION,
+	psb_disable_ied_session_ioctl, DRM_AUTH),
+};
+
+static void get_imr_info(struct drm_psb_private *dev_priv)
+{
+	u32 high, low, start, end;
+	int size = 0;
+
+	low = intel_mid_msgbus_read32(PNW_IMR_MSG_PORT,
+			PNW_IMR4L_MSG_REGADDR);
+	high = intel_mid_msgbus_read32(PNW_IMR_MSG_PORT,
+			PNW_IMR4H_MSG_REGADDR);
+
+	start = (low & PNW_IMR_ADDRESS_MASK) << PNW_IMR_ADDRESS_SHIFT;
+	end = (high & PNW_IMR_ADDRESS_MASK) << PNW_IMR_ADDRESS_SHIFT;
+	if (end > start)
+		size = end - start + 1;
+	if (size > 0) {
+		dev_priv->imr_region_start = start;
+		dev_priv->imr_region_size = size & PAGE_MASK;
+	} else {
+		dev_priv->imr_region_start = 0;
+		dev_priv->imr_region_size = 0;
+	}
+	DRM_INFO("IMR4 start=0x%08x, size=%dB (%d pages)\n",
+		dev_priv->imr_region_start,
+		dev_priv->imr_region_size,
+		dev_priv->imr_region_size >> PAGE_SHIFT);
+	return;
+}
+
+static void psb_set_uopt(struct drm_psb_uopt *uopt)
+{
+	return;
+}
+
+static void psb_lastclose(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct msvdx_private *msvdx_priv = NULL;
+
+	if (!dev_priv)
+		return;
+
+	msvdx_priv = dev_priv->msvdx_private;
+	if (msvdx_priv) {
+		mutex_lock(&msvdx_priv->msvdx_mutex);
+		if (dev_priv->decode_context.buffers) {
+			vfree(dev_priv->decode_context.buffers);
+			dev_priv->decode_context.buffers = NULL;
+		}
+		mutex_unlock(&msvdx_priv->msvdx_mutex);
+	}
+
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	if (dev_priv->encode_context.buffers) {
+		vfree(dev_priv->encode_context.buffers);
+		dev_priv->encode_context.buffers = NULL;
+	}
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+
+	if (dev_priv->have_mem_mmu) {
+		ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
+		dev_priv->have_mem_mmu = 0;
+	}
+
+	if (dev_priv->have_tt) {
+		ttm_bo_clean_mm(bdev, TTM_PL_TT);
+		dev_priv->have_tt = 0;
+	}
+
+	if (dev_priv->have_imr) {
+		ttm_bo_clean_mm(bdev, TTM_PL_IMR);
+		dev_priv->have_imr = 0;
+	}
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	psb_msvdx_uninit(dev);
+
+	if (IS_MDFLD(dev))
+		pnw_topaz_uninit(dev);
+#endif
+}
+
+#define FB_REG06_MRST 0xD08106F0
+#define FB_REG06_MDFLD 0x108106F0
+#define FB_TOPAZ_DISABLE BIT0
+#define FB_MIPI_DISABLE  BIT11
+#define FB_REG09_MRST 0xD08109F0
+#define FB_REG09_MDFLD 0x108109F0
+#define FB_SKU_MASK  (BIT12|BIT13|BIT14)
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+#define FB_GFX_CLK_DIVIDE_MASK	(BIT20|BIT21|BIT22)
+#define FB_GFX_CLK_DIVIDE_SHIFT 20
+#define FB_VED_CLK_DIVIDE_MASK	(BIT23|BIT24)
+#define FB_VED_CLK_DIVIDE_SHIFT 23
+#define FB_VEC_CLK_DIVIDE_MASK	(BIT25|BIT26)
+#define FB_VEC_CLK_DIVIDE_SHIFT 25
+
+
+void mrst_get_fuse_settings(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t fuse_value = 0;
+	uint32_t fuse_value_tmp = 0;
+
+	fuse_value = intel_mid_msgbus_read32_raw(IS_MDFLD(dev) ?
+			FB_REG06_MDFLD : FB_REG06_MRST);
+
+	dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+	if (IS_MDFLD(dev)) {
+		dev_priv->iLVDS_enable = 0;
+	}
+
+	PSB_DEBUG_ENTRY("internal display is %s\n",
+			dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+	/*prevent Runtime suspend at start*/
+	if (dev_priv->iLVDS_enable) {
+		dev_priv->is_lvds_on = true;
+		dev_priv->is_mipi_on = false;
+	} else {
+		dev_priv->is_mipi_on = true;
+		dev_priv->is_lvds_on = false;
+	}
+
+	if (dev_priv->dev->pci_device == PCI_ID_TOPAZ_DISABLED)
+		dev_priv->topaz_disabled = 1;
+	else
+		dev_priv->topaz_disabled = 0;
+
+	dev_priv->video_device_fuse = fuse_value;
+
+	PSB_DEBUG_ENTRY("topaz is %s\n",
+			dev_priv->topaz_disabled ? "disabled" : "enabled");
+
+	fuse_value = intel_mid_msgbus_read32_raw(IS_MDFLD(dev) ?
+			FB_REG09_MDFLD : FB_REG09_MRST);
+
+	PSB_DEBUG_ENTRY("SKU values is 0x%x.\n", fuse_value);
+	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+	dev_priv->fuse_reg_value = fuse_value;
+
+	switch (fuse_value_tmp) {
+	case FB_SKU_100:
+		dev_priv->core_freq = 200;
+		break;
+	case FB_SKU_100L:
+		dev_priv->core_freq = 100;
+		break;
+	case FB_SKU_83:
+		dev_priv->core_freq = 166;
+		break;
+	default:
+		DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n", fuse_value_tmp);
+		dev_priv->core_freq = 0;
+	}
+	PSB_DEBUG_ENTRY("LNC core clk is %dMHz.\n", dev_priv->core_freq);
+
+#if 0 /* debug message */
+	fuse_value_tmp =
+		(fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
+
+	switch (fuse_value_tmp) {
+	case 0:
+		DRM_INFO("Gfx clk : core clk = 1:1. \n");
+		break;
+	case 1:
+		DRM_INFO("Gfx clk : core clk = 4:3. \n");
+		break;
+	case 2:
+		DRM_INFO("Gfx clk : core clk = 8:5. \n");
+		break;
+	case 3:
+		DRM_INFO("Gfx clk : core clk = 2:1. \n");
+		break;
+	case 4:
+		DRM_INFO("Gfx clk : core clk = 16:7. \n");
+		break;
+	case 5:
+		DRM_INFO("Gfx clk : core clk = 8:3. \n");
+		break;
+	case 6:
+		DRM_INFO("Gfx clk : core clk = 16:5. \n");
+		break;
+	case 7:
+		DRM_INFO("Gfx clk : core clk = 4:1. \n");
+		break;
+	default:
+		DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
+			  fuse_value_tmp);
+	}
+
+	fuse_value_tmp =
+		(fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
+
+	switch (fuse_value_tmp) {
+	case 0:
+		DRM_INFO("Ved clk : core clk = 1:1. \n");
+		break;
+	case 1:
+		DRM_INFO("Ved clk : core clk = 4:3. \n");
+		break;
+	case 2:
+		DRM_INFO("Ved clk : core clk = 8:5. \n");
+		break;
+	case 3:
+		DRM_INFO("Ved clk : core clk = 2:1. \n");
+		break;
+	default:
+		DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
+			  fuse_value_tmp);
+	}
+
+	fuse_value_tmp =
+		(fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
+
+	switch (fuse_value_tmp) {
+	case 0:
+		DRM_INFO("Vec clk : core clk = 1:1. \n");
+		break;
+	case 1:
+		DRM_INFO("Vec clk : core clk = 4:3. \n");
+		break;
+	case 2:
+		DRM_INFO("Vec clk : core clk = 8:5. \n");
+		break;
+	case 3:
+		DRM_INFO("Vec clk : core clk = 2:1. \n");
+		break;
+	default:
+		DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
+			  fuse_value_tmp);
+	}
+#endif /* FIXME remove it after PO */
+
+	if (IS_MDFLD(dev)) {
+#if KSEL_BYPASS_83_100_ENABLE
+		dev_priv->ksel = KSEL_BYPASS_83_100;
+#endif /* KSEL_BYPASS_83_100_ENABLE */
+
+#if  KSEL_CRYSTAL_19_ENABLED
+		dev_priv->ksel = KSEL_CRYSTAL_19;
+#endif /*  KSEL_CRYSTAL_19_ENABLED */
+
+#if  KSEL_CRYSTAL_38_ENABLED
+		dev_priv->ksel = KSEL_CRYSTAL_38;
+#endif /*  KSEL_CRYSTAL_38_ENABLED */
+	}
+
+	return;
+}
+
+bool mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+	uint32_t platform_rev_id = 0;
+	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+	if (!pci_gfx_root) {
+		DRM_ERROR("Invalid root\n");
+		return false;
+	}
+
+	/*get the revison ID, B0:D2:F0;0x08 */
+	pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+	dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+	pci_dev_put(pci_gfx_root);
+	PSB_DEBUG_ENTRY("platform_rev_id is %x\n",	dev_priv->platform_rev_id);
+
+	return true;
+}
+
+static bool intel_mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+	u32 platform_config_address;
+	u8 *pVBT_virtual;
+	u8 primary_panel;
+	u8 number_desc = 0;
+	u8 panel_name[PANEL_NAME_MAX_LEN+1] = {0};
+	struct intel_mid_vbt *pVBT = &dev_priv->vbt_data;
+	void *panel_desc;
+	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+	mdfld_dsi_encoder_t mipi_mode;
+	int ret = 0, len = 0;
+	struct platform_device *pdev;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!pci_gfx_root) {
+		DRM_ERROR("Invalid root\n");
+		return false;
+	}
+
+	/*get the address of the platform config vbt, B0:D2:F0;0xFC */
+	pci_read_config_dword(pci_gfx_root, 0xFC, &platform_config_address);
+	pci_dev_put(pci_gfx_root);
+
+	/**
+	 * if platform_config_address is 0,
+	 * that means FW doesn't support VBT
+	 */
+	if (platform_config_address == 0) {
+		pVBT->size = 0;
+		return false;
+	}
+
+	/*copy vbt data to local memory*/
+	pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT));
+	if (!pVBT_virtual) {
+		DRM_ERROR("fail to ioremap platform_config_address:0x%x\n",
+			  platform_config_address);
+		return false;
+	}
+	memcpy(pVBT, pVBT_virtual, sizeof(*pVBT));
+	iounmap(pVBT_virtual); /* Free virtual address space */
+
+	if (strncmp(pVBT->signature, "$GCT", 4)) {
+		DRM_ERROR("wrong GCT signature\n");
+		return false;
+	}
+
+	PSB_DEBUG_ENTRY("GCT Revision is %#x\n", pVBT->revision);
+
+	number_desc = pVBT->num_of_panel_desc;
+	primary_panel = pVBT->primary_panel_idx;
+	dev_priv->gct_data.bpi = primary_panel; /*save boot panel id*/
+
+	/**
+	 * current we just need parse revision 0x11 and 0x20
+	 */
+	switch (pVBT->revision) {
+	case 0x11:
+		/* number of descriptors defined in the GCT */
+		pVBT->panel_descs =
+			ioremap(platform_config_address + GCT_R11_HEADER_SIZE,
+				GCT_R11_DISPLAY_DESC_SIZE * number_desc);
+		panel_desc = (u8 *)pVBT->panel_descs +
+			(primary_panel * GCT_R11_DISPLAY_DESC_SIZE);
+
+		if (!panel_desc) {
+			DRM_ERROR("Invalid desc\n");
+			return false;
+		}
+
+		strncpy(panel_name, panel_desc, PANEL_NAME_MAX_LEN);
+
+		mipi_mode =
+		((struct gct_r11_panel_desc *)panel_desc)->display.mode ? \
+			MDFLD_DSI_ENCODER_DPI : MDFLD_DSI_ENCODER_DBI;
+
+		break;
+	case 0x20:
+		pVBT->panel_descs =
+			ioremap(platform_config_address + GCT_R20_HEADER_SIZE,
+				GCT_R20_DISPLAY_DESC_SIZE * number_desc);
+		panel_desc = (u8 *)pVBT->panel_descs +
+			(primary_panel * GCT_R20_DISPLAY_DESC_SIZE);
+
+		if (!panel_desc) {
+			DRM_ERROR("Invalid desc\n");
+			return false;
+		}
+
+		strncpy(panel_name, panel_desc, PANEL_NAME_MAX_LEN);
+
+		mipi_mode =
+		((struct gct_r20_panel_desc *)panel_desc)->panel_mode.mode ?\
+			MDFLD_DSI_ENCODER_DPI : MDFLD_DSI_ENCODER_DBI;
+		break;
+	default:
+		pr_err("unsupported GCT revision\n");
+		pVBT->size = 0;
+		return false;
+	}
+
+	len = strnlen(panel_name, PANEL_NAME_MAX_LEN);
+	if (len) {
+		strncpy(dev_priv->panel_info.name, panel_name, len);
+		dev_priv->panel_info.mode = mipi_mode;
+	} else {
+		DRM_ERROR("%s: detect panel info from gct error\n",
+				__func__);
+		return false;
+	}
+
+	pdev = platform_device_alloc(panel_name, -1);
+	if (!pdev) {
+		DRM_ERROR("%s: fail to alloc platform device\n", __func__);
+		return false;
+	}
+	ret = platform_device_add(pdev);
+	if (ret) {
+		DRM_ERROR("%s: fail to add platform device\n", __func__);
+		return false;
+	}
+
+	DRM_INFO("%s: panel name: %s, mipi_mode = %d\n", __func__,
+			panel_name, mipi_mode);
+
+	return true;
+}
+
+#ifdef CONFIG_SUPPORT_HDMI
+void hdmi_do_audio_wq(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv = container_of(work,
+		struct drm_psb_private,
+		hdmi_audio_wq);
+	bool hdmi_hpd_connected = false;
+
+	/* As in the hdmi_do_hotplug_wq() function above
+	* it seems we should not be running this section of
+	* code if we don't also have CONFIG_SUPPORT_HDMI set,
+	* some devices might not want/need support for HDMI
+	* early in the platform bring up and by having this
+	* available to run might produce unexpected results
+	* if HDMI connector is plugged in.
+	*/
+
+	DRM_INFO("hdmi_do_audio_wq: Checking for HDMI connection at boot\n");
+	hdmi_hpd_connected = android_hdmi_is_connected(dev_priv->dev);
+
+	if (hdmi_hpd_connected) {
+		DRM_INFO("hdmi_do_audio_wq: HDMI plugged in\n");
+		mid_hdmi_audio_signal_event(dev_priv->dev, HAD_EVENT_HOT_PLUG);
+	}
+}
+#endif
+
+static int psb_do_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct psb_gtt *pg = dev_priv->pg;
+
+	uint32_t tmp;
+	uint32_t stolen_gtt;
+	uint32_t tt_start;
+	uint32_t tt_pages;
+
+	int ret = -ENOMEM;
+
+	/*
+	 * Initialize sequence numbers for the different command
+	 * submission mechanisms.
+	 */
+	dev_priv->sequence[PSB_ENGINE_DECODE] = 1;
+	dev_priv->sequence[LNC_ENGINE_ENCODE] = 0;
+
+	if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+		DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+	stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	stolen_gtt =
+		(stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+	dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+				     (stolen_gtt << PAGE_SHIFT) * 1024;
+
+	if (1 || drm_debug) {
+		uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+		uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+		DRM_INFO("SGX core id = 0x%08x\n", core_id);
+		DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+			 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+			 _PSB_CC_REVISION_MAJOR_SHIFT,
+			 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+			 _PSB_CC_REVISION_MINOR_SHIFT);
+		DRM_INFO
+		("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+		 (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+		 _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+		 (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+		 _PSB_CC_REVISION_DESIGNER_SHIFT);
+	}
+
+	spin_lock_init(&dev_priv->irqmask_lock);
+	spin_lock_init(&dev_priv->flip_lock);
+
+	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+		   pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
+	tt_start = dev_priv->gatt_free_offset - pg->mmu_gatt_start;
+	tt_pages -= tt_start >> PAGE_SHIFT;
+	dev_priv->sizes.ta_mem_size = 0;
+
+	/* IMR region managed by TTM */
+	tmp = dev_priv->imr_region_size >> PAGE_SHIFT; /* IMR region size */
+	if ((dev_priv->imr_region_size != 0) &&
+	    !ttm_bo_init_mm(bdev, TTM_PL_IMR, tmp))
+		dev_priv->have_imr = 1;
+
+	/* TT region managed by TTM. */
+	tmp = pg->gatt_pages -
+		(pg->gtt_video_start >> PAGE_SHIFT) -
+		(dev_priv->ci_region_size >> PAGE_SHIFT); /* TT region size */
+	if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tmp))
+		dev_priv->have_tt = 1;
+
+	/* MMU region managed by TTM */
+	tmp = PSB_MEM_IMR_START >> PAGE_SHIFT; /* MMU region size:MMU->IMR */
+	if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, tmp))
+		dev_priv->have_mem_mmu = 1;
+
+	if (IS_MSVDX_MEM_TILE(dev)) {
+		/* Create tiling MMU region managed by TTM */
+		tmp = (0x10000000) >> PAGE_SHIFT;
+		printk(KERN_INFO "init tiling heap, size is 0x%08x pages\n", tmp);
+		if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU_TILING, tmp))
+			dev_priv->have_mem_mmu_tiling = 1;
+	}
+
+	PSB_DEBUG_INIT("Init MSVDX\n");
+
+	/*
+	 * Customer will boot droidboot, then boot the MOS kernel.
+	 * It is observed the video decode island is off during the
+	 * MOS kernel boot.
+	 * We need to power on it first, else will cause the fabric error.
+	 */
+	if (ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND)) {
+		DRM_ERROR("ospm_video_dec_island_up failed.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+	psb_msvdx_init(dev);
+
+	PSB_DEBUG_INIT("Init Topaz\n");
+
+	/*
+	 * Customer will boot droidboot, then boot the MOS kernel.
+	 * It is observed the video encode island is off during the
+	 * MOS kernel boot while the panel is not connected.
+	 * We need to power on it first, else will cause fabric error.
+	 */
+	if (ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND)) {
+		DRM_ERROR("ospm_video_enc_island_up failed.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	if (IS_MDFLD(dev))
+		pnw_topaz_init(dev);
+
+	return 0;
+out_err:
+	psb_do_takedown(dev);
+	return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	/*Fristly, unload pvr driver*/
+	if (!dev_priv || !dev_priv->pvr_ops)
+		return -EINVAL;
+	dev_priv->pvr_ops->PVRSRVDrmUnload(dev);
+
+	/*TODO: destroy DSR/DPU infos here*/
+	psb_backlight_exit(); /*writes minimum value to backlight HW reg */
+
+	if (drm_psb_no_fb == 0)
+		psb_modeset_cleanup(dev);
+
+	destroy_workqueue(te_wq);
+	destroy_workqueue(vsync_wq);
+
+	if (dev_priv) {
+		/* psb_watchdog_takedown(dev_priv); */
+		psb_do_takedown(dev);
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+		if (dev_priv->pf_pd) {
+			psb_mmu_free_pagedir(dev_priv->pf_pd);
+			dev_priv->pf_pd = NULL;
+		}
+#endif
+		if (dev_priv->mmu) {
+			struct psb_gtt *pg = dev_priv->pg;
+
+			down_read(&pg->sem);
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+			psb_mmu_remove_pfn_sequence(
+				psb_mmu_get_default_pd
+				(dev_priv->mmu),
+				pg->mmu_gatt_start,
+				pg->vram_stolen_size >> PAGE_SHIFT);
+			if (pg->rar_stolen_size != 0)
+				psb_mmu_remove_pfn_sequence(
+					psb_mmu_get_default_pd
+					(dev_priv->mmu),
+					pg->rar_start,
+					pg->rar_stolen_size >> PAGE_SHIFT);
+#endif
+			up_read(&pg->sem);
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+			psb_mmu_driver_takedown(dev_priv->mmu);
+#endif
+			dev_priv->mmu = NULL;
+		}
+		psb_gtt_takedown(dev_priv->pg, 1);
+		if (dev_priv->scratch_page) {
+			__free_page(dev_priv->scratch_page);
+			dev_priv->scratch_page = NULL;
+		}
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+		if (dev_priv->has_bo_device) {
+			ttm_bo_device_release(&dev_priv->bdev);
+			dev_priv->has_bo_device = 0;
+		}
+		if (dev_priv->has_fence_device) {
+			ttm_fence_device_release(&dev_priv->fdev);
+			dev_priv->has_fence_device = 0;
+		}
+#endif
+		if (dev_priv->vdc_reg) {
+			iounmap(dev_priv->vdc_reg);
+			dev_priv->vdc_reg = NULL;
+		}
+		if (dev_priv->sgx_reg) {
+			iounmap(dev_priv->sgx_reg);
+			dev_priv->sgx_reg = NULL;
+		}
+#ifdef CONFIG_MDFD_GL3
+		if (IS_MDFLD(dev) && dev_priv->platform_rev_id != MDFLD_PNW_A0) {
+			iounmap(dev_priv->gl3_reg);
+			dev_priv->gl3_reg = NULL;
+		}
+#endif
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+		if (dev_priv->msvdx_reg) {
+			iounmap(dev_priv->msvdx_reg);
+			dev_priv->msvdx_reg = NULL;
+		}
+
+		if (IS_TOPAZ(dev)) {
+			if (dev_priv->topaz_reg) {
+				iounmap(dev_priv->topaz_reg);
+				dev_priv->topaz_reg = NULL;
+			}
+		}
+
+		if (dev_priv->tdev)
+			ttm_object_device_release(&dev_priv->tdev);
+
+		if (dev_priv->has_global)
+			psb_ttm_global_release(dev_priv);
+#endif
+		kfree(dev_priv->vblank_count);
+		kfree(dev_priv);
+		dev->dev_private = NULL;
+	}
+
+	ospm_power_uninit();
+
+	return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	struct drm_psb_private *dev_priv;
+	struct ttm_bo_device *bdev;
+	unsigned long resource_start;
+	struct psb_gtt *pg;
+	unsigned long irqflags;
+	int ret = -ENOMEM;
+	uint32_t tt_pages;
+	int i = 0;
+
+	DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&dev_priv->video_ctx);
+	spin_lock_init(&dev_priv->video_ctx_lock);
+	if (IS_CTP(dev)) {
+		dev_priv->num_pipe = 2;
+	} else if (IS_MDFLD(dev)) {
+		dev_priv->num_pipe = 1;
+#ifdef CONFIG_MDFD_DUAL_MIPI
+		dev_priv->num_pipe++;
+#endif
+#ifdef CONFIG_SUPPORT_HDMI
+		dev_priv->num_pipe++;
+#endif
+	} else if (IS_MRST(dev))
+		dev_priv->num_pipe = 1;
+	else
+		dev_priv->num_pipe = 2;
+
+	/*init DPST umcomm to NULL*/
+	dev_priv->psb_dpst_state = NULL;
+
+	dev_priv->um_start = false;
+	dev_priv->b_vblank_enable = false;
+
+	dev_priv->dev = dev;
+	bdev = &dev_priv->bdev;
+
+	hdmi_state = 0;
+	drm_hdmi_hpd_auto = 0;
+	dev_priv->ied_enabled = false;
+	dev_priv->ied_context = NULL;
+	dev_priv->bhdmiconnected = false;
+	dev_priv->bhdmi_enable = true;
+	dev_priv->dpms_on_off = false;
+	atomic_set(&dev_priv->mipi_flip_abnormal, 0);
+	dev_priv->brightness_adjusted = 0;
+	dev_priv->buf = kzalloc(PSB_REG_PRINT_SIZE * sizeof(char),
+				 GFP_KERNEL);
+	if (dev_priv->buf == NULL)
+		return -ENOMEM;
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	ret = psb_ttm_global_init(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_err;
+	dev_priv->has_global = 1;
+
+	dev_priv->tdev = ttm_object_device_init
+			 (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
+	if (unlikely(dev_priv->tdev == NULL))
+		goto out_err;
+	mutex_init(&dev_priv->cmdbuf_mutex);
+	INIT_LIST_HEAD(&dev_priv->decode_context.validate_list);
+	INIT_LIST_HEAD(&dev_priv->encode_context.validate_list);
+	/*
+	INIT_LIST_HEAD(&dev_priv->decode_context.kern_validate_list);
+	INIT_LIST_HEAD(&dev_priv->encode_context.kern_validate_list);
+	*/
+#endif
+
+	mutex_init(&dev_priv->dpms_mutex);
+
+	mutex_init(&dev_priv->gamma_csc_lock);
+	mutex_init(&dev_priv->overlay_lock);
+	mutex_init(&dev_priv->vsync_lock);
+
+
+	spin_lock_init(&dev_priv->reloc_lock);
+
+	DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
+
+	dev->dev_private = (void *) dev_priv;
+	dev_priv->chipset = chipset;
+	psb_set_uopt(&dev_priv->uopt);
+
+	PSB_DEBUG_INIT("Mapping MMIO\n");
+	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	dev_priv->msvdx_reg =
+		ioremap(resource_start + MRST_MSVDX_OFFSET,
+				PSB_MSVDX_SIZE);
+	if (!dev_priv->msvdx_reg)
+		goto out_err;
+
+	if (IS_TOPAZ(dev)) {
+		if (IS_MDFLD(dev)) {
+			dev_priv->topaz_reg =
+				ioremap(resource_start + PNW_TOPAZ_OFFSET,
+					PNW_TOPAZ_SIZE);
+		} else
+			dev_priv->topaz_reg =
+				ioremap(resource_start + LNC_TOPAZ_OFFSET,
+					LNC_TOPAZ_SIZE);
+		if (!dev_priv->topaz_reg)
+			goto out_err;
+	}
+#endif
+
+	dev_priv->vdc_reg =
+		ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+	if (!dev_priv->vdc_reg)
+		goto out_err;
+	if (IS_MID(dev))
+		dev_priv->sgx_reg =
+			ioremap(resource_start + MRST_SGX_OFFSET,
+				PSB_SGX_SIZE);
+	else
+		dev_priv->sgx_reg =
+			ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
+
+	if (!dev_priv->sgx_reg)
+		goto out_err;
+
+	/*WA in PO: Program SGX544MP master clk gating earlier, or CPU stuck later
+	 *before loading SGX driver, root cause is still unkown
+	 */
+#if defined(SGX_FEATURE_MP)
+	if (IS_CTP_NEED_WA(dev)) {
+		if (SGX_FEATURE_MP_CORE_COUNT == 2) {
+			iowrite32(0x1, dev_priv->sgx_reg + 0x4000);
+			iowrite32(0x5, dev_priv->sgx_reg + 0x4004);
+			iowrite32(0xa, dev_priv->sgx_reg + 0x4004);
+		} else if  (SGX_FEATURE_MP_CORE_COUNT == 1) {
+			iowrite32(0x0, dev_priv->sgx_reg + 0x4000);
+			iowrite32(0x1, dev_priv->sgx_reg + 0x4004);
+			iowrite32(0x2, dev_priv->sgx_reg + 0x4004);
+		}
+		iowrite32(0x2aa, dev_priv->sgx_reg + 0x4020);
+	}
+#endif
+
+	/* setup hdmi driver */
+	android_hdmi_driver_setup(dev);
+
+	if (IS_MID(dev)) {
+		mrst_get_fuse_settings(dev);
+		intel_mid_get_vbt_data(dev_priv);
+		mid_get_pci_revID(dev_priv);
+	}
+
+#ifdef CONFIG_MDFD_GL3
+	/* GL3 */
+	if (IS_MDFLD(dev) && dev_priv->platform_rev_id != MDFLD_PNW_A0) {
+		dev_priv->gl3_reg =
+			ioremap(resource_start + MDFLD_GL3_OFFSET, MDFLD_GL3_SIZE);
+		if (!dev_priv->gl3_reg)
+			goto out_err;
+	}
+#endif
+
+	PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
+
+	get_imr_info(dev_priv);
+
+	/* Init OSPM support */
+	ospm_power_init(dev);
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	ret = psb_ttm_fence_device_init(&dev_priv->fdev);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	/* For VXD385 DE2.x firmware support 16bit fence value */
+	dev_priv->fdev.fence_class[PSB_ENGINE_DECODE].wrap_diff = (1 << 14);
+	dev_priv->fdev.fence_class[PSB_ENGINE_DECODE].flush_diff = (1 << 13);
+	dev_priv->fdev.fence_class[PSB_ENGINE_DECODE].sequence_mask =
+								0x0000ffff;
+
+	dev_priv->has_fence_device = 1;
+
+	ret = ttm_bo_device_init(bdev,
+				 dev_priv->bo_global_ref.ref.object,
+				 &psb_ttm_bo_driver,
+				 DRM_PSB_FILE_PAGE_OFFSET, false);
+	if (unlikely(ret != 0))
+		goto out_err;
+	dev_priv->has_bo_device = 1;
+	ttm_lock_init(&dev_priv->ttm_lock);
+#endif
+	ret = -ENOMEM;
+
+	dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+	if (!dev_priv->scratch_page)
+		goto out_err;
+
+	set_pages_uc(dev_priv->scratch_page, 1);
+
+	dev_priv->pg = psb_gtt_alloc(dev);
+	if (!dev_priv->pg)
+		goto out_err;
+
+	ret = psb_gtt_init(dev_priv->pg, 0);
+	if (ret)
+		goto out_err;
+
+	ret = psb_gtt_mm_init(dev_priv->pg);
+	if (ret)
+		goto out_err;
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	dev_priv->mmu = psb_mmu_driver_init((void *)0,
+					    drm_psb_trap_pagefaults, 0,
+					    dev_priv, IMG_MMU);
+	if (!dev_priv->mmu)
+		goto out_err;
+#endif
+	pg = dev_priv->pg;
+
+	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+		   (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	/* CI/RAR use the lower half of TT. */
+	pg->gtt_video_start = (tt_pages / 2) << PAGE_SHIFT;
+	pg->rar_start = pg->gtt_video_start + pg->ci_stolen_size;
+
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	/*
+	 * Make MSVDX/TOPAZ MMU aware of the imr stolen memory area.
+	 */
+	if (dev_priv->pg->rar_stolen_size != 0) {
+		down_read(&pg->sem);
+		ret = psb_mmu_insert_pfn_sequence(
+			      psb_mmu_get_default_pd(dev_priv->mmu),
+			      dev_priv->imr_region_start >> PAGE_SHIFT,
+			      PSB_MEM_IMR_START,
+			      pg->rar_stolen_size >> PAGE_SHIFT, 0);
+		up_read(&pg->sem);
+		if (ret)
+			goto out_err;
+	}
+
+	dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+	if (!dev_priv->pf_pd)
+		goto out_err;
+
+	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+	psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+	spin_lock_init(&dev_priv->sequence_lock);
+
+	PSB_DEBUG_INIT("Begin to init MSVDX/Topaz\n");
+#endif
+
+	ret = psb_do_init(dev);
+	if (ret)
+		return ret;
+
+	if (pci_enable_msi(dev->pdev)) {
+		DRM_ERROR("Enable MSI failed!\n");
+	} else {
+		PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
+			       dev->pdev->irq);
+		/* pci_write_config_word(pdev, 0x04, 0x07); */
+	}
+
+	ret = drm_vblank_init(dev, dev_priv->num_pipe);
+	if (ret)
+		goto out_err;
+
+	/* disable vblank_disable_timer in drm_vblank_put
+	 * because the vblank disable should be controlled
+	 * by framework in Jellybean instead of in driver.
+	 */
+	drm_vblank_offdelay = 0;
+
+	DRM_INIT_WAITQUEUE(&dev_priv->vsync_queue);
+
+	dev_priv->vblank_count =
+		kmalloc(sizeof(atomic_t) * dev_priv->num_pipe, GFP_KERNEL);
+
+	if (!dev_priv->vblank_count)
+		goto out_err;
+
+	for (i = 0; i < dev_priv->num_pipe; i++)
+		atomic_set(&dev_priv->vblank_count[i], 0);
+
+	/*
+	 * Install interrupt handlers prior to powering off SGX or else we will
+	 * crash.
+	 */
+	dev_priv->vdc_irq_mask = 0;
+	dev_priv->pipestat[0] = 0;
+	dev_priv->pipestat[1] = 0;
+	dev_priv->pipestat[2] = 0;
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		drm_irq_install(dev);
+		if (IS_CTP(dev)) {
+			if (irq_set_affinity(drm_dev_to_irq(dev),
+					cpumask_of(0)))
+				pr_err("psb_drv: set irq affinity failed\n");
+			else
+				pr_info("psb_drv: set irq affnity to CPU0\n");
+		}
+	}
+
+	dev->vblank_disable_allowed = 1;
+
+	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+	dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+	if (IS_MDFLD(dev)) {
+#ifdef CONFIG_MDFLD_DSI_DPU
+		/*init dpu info*/
+		mdfld_dbi_dpu_init(dev);
+#else
+		mdfld_dbi_dsr_init(dev);
+#endif /*CONFIG_MDFLD_DSI_DPU*/
+		INIT_WORK(&dev_priv->te_work, mdfld_te_handler_work);
+
+		te_wq = alloc_workqueue("teworkq", WQ_UNBOUND, 1);
+		if (unlikely(!te_wq)) {
+			pr_err(": unable to create TE workqueue\n");
+			goto out_err;
+		}
+		INIT_WORK(&dev_priv->reset_panel_work,
+				mdfld_reset_panel_handler_work);
+
+		INIT_WORK(&dev_priv->vsync_event_work, mdfld_vsync_event_work);
+
+		vsync_wq = alloc_workqueue("vsyncworkq", WQ_UNBOUND, 1);
+		if (unlikely(!vsync_wq)) {
+			pr_err(": unable to create Vsync workqueue\n");
+			destroy_workqueue(te_wq);
+			goto out_err;
+		}
+	}
+
+	if (drm_psb_no_fb == 0)
+		psb_modeset_init(dev);
+
+	/* GL3 */
+#ifdef CONFIG_MDFD_GL3
+	if (drm_psb_gl3_enable)
+		gl3_enable();
+#endif
+
+#ifdef CONFIG_SUPPORT_HDMI
+	INIT_WORK(&dev_priv->hdmi_audio_wq, hdmi_do_audio_wq);
+#endif
+
+	/*Intel drm driver load is done, continue doing pvr load*/
+	DRM_DEBUG("Pvr driver load\n");
+
+	dev_priv->pvr_ops = NULL;
+	/* Delay PVRSRVDrmLoad to PVR module init */
+	g_drm_dev = dev;
+	return 0;
+
+out_err:
+	psb_driver_unload(dev);
+	return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+	return 0;
+}
+
+int psb_extension_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	union drm_psb_extension_arg *arg = data;
+	struct drm_psb_extension_rep *rep = &arg->rep;
+
+	if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+	if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+	if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+
+	/*return the page flipping ioctl offset*/
+	if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_FLIP;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+
+	if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_VIDEO_GETPARAM;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+
+	rep->exists = 0;
+	return 0;
+}
+
+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct ttm_mem_type_manager *man;
+	int clean;
+	int ret;
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	ret = ttm_vt_lock(&dev_priv->ttm_lock, 1,
+			  psb_fpriv(file_priv)->tfile);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	man = &bdev->man[TTM_PL_TT];
+	/*spin_lock(&bdev->lru_lock);*//* lru_lock is removed from upstream TTM */
+	clean = drm_mm_clean((struct drm_mm *)man->priv);
+	/*spin_unlock(&bdev->lru_lock);*/
+	if (unlikely(!clean))
+		DRM_INFO("Warning: GATT was not clean after VT switch.\n");
+
+	ttm_bo_swapout_all(&dev_priv->bdev);
+#endif
+	return 0;
+out_unlock:
+	(void) ttm_vt_unlock(&dev_priv->ttm_lock);
+	return ret;
+}
+
+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	return ttm_vt_unlock(&dev_priv->ttm_lock);
+}
+
+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_sizes_arg *arg =
+		(struct drm_psb_sizes_arg *) data;
+
+	*arg = dev_priv->sizes;
+	return 0;
+}
+
+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+
+	*arg = dev_priv->fuse_reg_value;
+	return 0;
+}
+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct gct_ioctl_arg *pGCT = data;
+
+	memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
+
+	return 0;
+}
+
+static int psb_flip_hdmi(struct drm_device *dev, uint32_t pipe)
+{
+	int dspsurf;
+	unsigned long irqflags;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	if (dev_priv->head_fliped) {
+		printk(KERN_INFO "HDMI flipped already!");
+		return 0;
+	}
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false)) {
+		switch (pipe) {
+		case 1:
+			dspsurf = DSPBSURF;
+			break;
+		default:
+			goto exit;
+		}
+		spin_lock_irqsave(&dev_priv->flip_lock, irqflags);
+		REG_WRITE(dspsurf, dev_priv->addr_array[dev_priv->flip_head]);
+		/* DRM_INFO("fliping:%d \n", dev_priv->flip_head); */
+		dev_priv->head_fliped = 1;
+		spin_unlock_irqrestore(&dev_priv->flip_lock, irqflags);
+exit:
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+	return 0;
+}
+
+static int psb_csc_gamma_setting_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_psb_csc_gamma_setting *csc_gamma_setting = NULL;
+	int ret = 0;
+	csc_gamma_setting = (struct drm_psb_csc_gamma_setting *)
+					data;
+	printk("seting gamma ioctl!\n");
+	if (!csc_gamma_setting)
+		return -EINVAL;
+
+	if (csc_gamma_setting->type == GAMMA)
+		ret = mdfld_intel_crtc_set_gamma(dev,
+			&csc_gamma_setting->data.gamma_data);
+	else if (csc_gamma_setting->type == CSC)
+		ret = mdfld_intel_crtc_set_color_conversion(dev,
+			&csc_gamma_setting->data.csc_data);
+	return ret;
+}
+static int psb_enable_ied_session_ioctl(struct drm_device *dev, void *data,
+						struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	data = data;
+	DRM_INFO("Enabling IED session...\n");
+	if (file_priv == NULL) {
+		DRM_ERROR("%s: file_priv is NULL.\n", __func__);
+		return -1;
+	}
+	if (dev_priv->ied_enabled) {
+		DRM_ERROR("%s: ied_enabled has been set.\n", __func__);
+		return 0;
+	}
+	dev_priv->ied_enabled = true;
+	dev_priv->ied_context = file_priv->filp;
+	mdfld_dsi_dsr_forbid(dsi_config);
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+		OSPM_UHB_FORCE_POWER_ON)) {
+		/* Set bit 31 to enable IED pipeline */
+		REG_WRITE(PSB_IED_DRM_CNTL_STATUS, 0x80000000);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		return 0;
+	} else {
+		DRM_ERROR("%s: Failed to power on display island.\n", __func__);
+		return -1;
+	}
+}
+
+static int psb_disable_ied_session_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv)
+{
+	int ret = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	data = data;
+	DRM_INFO("Disabling IED session...\n");
+	if (file_priv == NULL) {
+		DRM_ERROR("%s: file_priv is NULL.\n", __func__);
+		return -1;
+	}
+	if (dev_priv->ied_enabled == false) {
+		DRM_ERROR("%s: ied_enabled is not set.\n", __func__);
+		return 0;
+	}
+	if (dev_priv->ied_context != file_priv->filp) {
+		DRM_ERROR("%s: Wrong context.\n", __func__);
+		return -1;
+	}
+	dev_priv->ied_enabled = false;
+	dev_priv->ied_context = NULL;
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+		OSPM_UHB_FORCE_POWER_ON)) {
+		REG_WRITE(PSB_IED_DRM_CNTL_STATUS, 0);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		ret = 0;
+	} else {
+		DRM_ERROR("%s: Failed to power on display island.\n", __func__);
+		ret = -1;
+	}
+	mdfld_dsi_dsr_allow(dsi_config);
+	return ret;
+}
+
+void psb_cleanup_ied_session(struct drm_psb_private *dev_priv,
+			struct file *filp)
+{
+	struct mdfld_dsi_config *dsi_config = NULL;
+	if (dev_priv == NULL || filp == NULL)
+		return;
+	if (dev_priv->ied_enabled && dev_priv->ied_context == filp) {
+		DRM_ERROR("%s: ied_enabled is not cleared.\n", __func__);
+		dev_priv->ied_enabled = false;
+		dev_priv->ied_context = NULL;
+		dsi_config = dev_priv->dsi_configs[0];
+		mdfld_dsi_dsr_allow(dsi_config);
+	}
+}
+
+static int psb_disp_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_psb_disp_ctrl *dp_ctrl = data;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_flip_chain_data *flip_data = NULL;
+	struct drm_mode_object *obj;
+	struct drm_framebuffer *fb;
+	struct psb_framebuffer *psbfb;
+	unsigned long irqflags;
+	int ret = 0;
+	unsigned int i = 0;
+	int dspcntr_reg = DSPBCNTR;
+	int dspbase_reg = MRST_DSPBBASE;
+	u32 temp;
+	struct mdfld_dsi_config *dsi_config;
+	/*DRM_COPY_FROM_USER(&dp_ctrl, data, sizeof(struct drm_psb_disp_ctrl));*/
+	/*DRM_INFO("disp cmd:%d\n",dp_ctrl->cmd);*/
+	if (dp_ctrl->cmd == DRM_PSB_DISP_INIT_HDMI_FLIP_CHAIN) {
+		flip_data = &dp_ctrl->u.flip_chain_data;
+		if (flip_data->size > DRM_PSB_HDMI_FLIP_ARRAY_SIZE) {
+			ret = -EINVAL;
+			goto exit;
+		}
+		for (i = 0; i < flip_data->size; i++) {
+			dev_priv->flip_array[i] = flip_data->h_buffer_array[i];
+			obj = drm_mode_object_find(dev, (uint32_t)dev_priv->flip_array[i],
+				DRM_MODE_OBJECT_FB);
+			fb = obj_to_fb(obj);
+			psbfb = to_psb_fb(fb);
+			if (!psbfb) {
+				ret = -EINVAL;
+				goto exit;
+			}
+			dev_priv->addr_array[i] = psbfb->offset;
+			/* DRM_INFO("adding id:%d to psb flip chain \n",
+				(uint32_t)dev_priv->flip_array[i]); */
+		}
+		dev_priv->flip_valid_size = flip_data->size;
+		dev_priv->flip_inited = 1;
+	} else if (dp_ctrl->cmd == DRM_PSB_DISP_QUEUE_BUFFER) {
+		spin_lock_irqsave(&dev_priv->flip_lock, irqflags);
+		dev_priv->flip_head = (unsigned int)dp_ctrl->u.buf_data.h_buffer;
+		dev_priv->head_fliped = 0;
+		spin_unlock_irqrestore(&dev_priv->flip_lock, irqflags);
+		psb_flip_hdmi(dev, 1);
+	} else if (dp_ctrl->cmd == DRM_PSB_DISP_DEQUEUE_BUFFER) {
+		if (!dev_priv->flip_inited) {
+			ret = -EINVAL;
+			goto exit;
+		}
+		i = (dev_priv->flip_tail + 1) % dev_priv->flip_valid_size;
+		if (i != dev_priv->flip_head)
+			dev_priv->flip_tail = i;
+		dp_ctrl->u.buf_data.h_buffer = (void *)dev_priv->flip_tail;
+	} else if (dp_ctrl->cmd == DRM_PSB_DISP_PLANEB_DISABLE) {
+		dev_priv->bhdmi_enable = false;
+		if (DISP_PLANEB_STATUS == DISPLAY_PLANE_DISABLE)
+			ret = -1;
+		else {
+			if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+						OSPM_UHB_FORCE_POWER_ON)) {
+				/* If data=1, then force setting plane status. */
+				if (dp_ctrl->u.data == 1)
+					DISP_PLANEB_STATUS = DISPLAY_PLANE_DISABLE;
+
+				ret = -1;
+				goto exit;
+			}
+			/* Use Disable pipeB plane to turn off HDMI screen */
+			temp = REG_READ(dspcntr_reg);
+			if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+				REG_WRITE(dspcntr_reg,
+					temp & ~DISPLAY_PLANE_ENABLE);
+				/* Flush the plane changes */
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			}
+
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+
+		/* If data=1, then force setting plane status. */
+		if (dp_ctrl->u.data == 1)
+			DISP_PLANEB_STATUS = DISPLAY_PLANE_DISABLE;
+	} else if (dp_ctrl->cmd == DRM_PSB_DISP_PLANEB_ENABLE) {
+		/* If data=1, then force setting plane status. */
+		if (dp_ctrl->u.data == 1)
+			DISP_PLANEB_STATUS = DISPLAY_PLANE_ENABLE;
+
+		if (DISP_PLANEB_STATUS == DISPLAY_PLANE_DISABLE)
+			ret = -1;
+		else {
+			if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+						OSPM_UHB_FORCE_POWER_ON)) {
+				ret = -1;
+				goto exit;
+			}
+			/*Restore pipe B plane to turn on HDMI screen*/
+			temp = REG_READ(dspcntr_reg);
+			if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+				REG_WRITE(dspcntr_reg,
+					temp | DISPLAY_PLANE_ENABLE);
+				/* Flush the plane changes */
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			}
+
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+		dev_priv->bhdmi_enable = true;
+	} else if (dp_ctrl->cmd == DRM_PSB_HDMI_OSPM_ISLAND_DOWN) {
+		/* before turning off HDMI power island, re-check the
+		*HDMI hotplus status in case that there are plug-in
+		*signals agains.
+		*/
+		if (!hdmi_state) {
+			/* We need forbid DSR here to let DSR has a new
+			** chance to determine the hdmi status
+			** Or when HDMI unplug, the DSR has sometimes
+			** remained at Level 0
+			*/
+			dsi_config = dev_priv->dsi_configs[0];
+			mdfld_dsi_dsr_forbid(dsi_config);
+			/*Set power island down when hdmi disconnected*/
+			acquire_ospm_lock();
+			/*HDMI is considered totally disconected
+			*before power off its island */
+			dev_priv->bhdmiconnected = false;
+			if (pmu_nc_set_power_state(OSPM_DISPLAY_B_ISLAND,
+					OSPM_ISLAND_DOWN, OSPM_REG_TYPE))
+				BUG();
+			dev_priv->panel_desc &= ~DISPLAY_B;
+			DISP_PLANEB_STATUS = ~DISPLAY_PLANE_ENABLE;
+			release_ospm_lock();
+			mdfld_dsi_dsr_allow(dsi_config);
+		}
+	} else if (dp_ctrl->cmd == DRM_PSB_HDMI_NOTIFY_HOTPLUG_TO_AUDIO) {
+		if (dp_ctrl->u.data == 0) {
+			/* notify audio with HDMI unplug event */
+			if (dev_priv->hdmi_priv->monitor_type == MONITOR_TYPE_HDMI) {
+				DRM_INFO("HDMI plug out to audio driver\n");
+				mid_hdmi_audio_signal_event(dev, HAD_EVENT_HOT_UNPLUG);
+			}
+		} else {
+			/* notify audio with HDMI plug event */
+			if (dev_priv->hdmi_priv->monitor_type == MONITOR_TYPE_HDMI) {
+				DRM_INFO("HDMI plug in to audio driver\n");
+				mid_hdmi_audio_signal_event(dev, HAD_EVENT_HOT_PLUG);
+			}
+		}
+	}
+
+exit:
+	return ret;
+}
+
+#ifdef CONFIG_SUPPORT_HDMI
+static int psb_query_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	uint8_t bksv[5];
+
+	/* Attempting to read the BKSV value from the HDMI device.
+	 * A successful read of this data indicates that HDCP is
+	 * supported.  A value of zero would indicate that it's
+	 * not.
+	 */
+	*arg = android_query_hdmi_hdcp_sink(dev, bksv);
+
+	return 0;
+}
+static int psb_validate_hdcp_ksv_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	sqword_t *arg = data;
+	sqword_t hw_bksv;
+	if (android_query_hdmi_hdcp_sink(dev, (uint8_t *)&hw_bksv)) {
+		*arg = hw_bksv;
+		return 0;
+	}
+
+	return -1;
+}
+static int psb_get_hdcp_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON)) {
+		*arg = android_check_hdmi_hdcp_enc_status(dev);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+
+	return 0;
+}
+static int psb_enable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret = 0;
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON)) {
+		ret = android_enable_hdmi_hdcp(dev);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+	if (ret)
+		return 0;
+	return -1;
+}
+static int psb_disable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret = 0;
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON)) {
+		ret = android_disable_hdmi_hdcp(dev);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+	if (ret)
+		return 0;
+	return -1;
+}
+
+static int psb_enable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret = 0;
+	int temp = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	if (HAS_DISPLAY_IED_CNTRL(dev)) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+					OSPM_UHB_FORCE_POWER_ON)) {
+			temp = PSB_RVDC32(DSPCHICKENBIT);
+			temp &= ~(1 << 31);
+			PSB_WVDC32(temp, DSPCHICKENBIT);
+			temp = PSB_RVDC32(DSPCHICKENBIT);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else
+			ret = -1;
+	}
+
+	return ret;
+}
+static int psb_disable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret = 0;
+	int temp = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	if (HAS_DISPLAY_IED_CNTRL(dev)) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON)) {
+			temp = PSB_RVDC32(DSPCHICKENBIT);
+			temp |= (1 << 31);
+			PSB_WVDC32(temp, DSPCHICKENBIT);
+			temp = PSB_RVDC32(DSPCHICKENBIT);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else
+			ret = -1;
+	}
+
+	return ret;
+}
+static int psb_query_display_ied_caps_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+
+	if (HAS_DISPLAY_IED_CNTRL(dev))
+		*arg = 1;
+	else
+		*arg = 0;
+
+	return 0;
+}
+static int psb_get_hdcp_link_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON)) {
+		*arg = android_check_hdmi_hdcp_link_status(dev);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+	return 0;
+}
+
+#endif
+
+static int psb_set_csc_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+    struct drm_psb_csc_matrix *csc_matrix = data;
+    csc_program_DC(dev, csc_matrix->matrix, csc_matrix->pipe);
+    return 0;
+}
+
+static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
+			      struct drm_file *file_priv)
+{
+	uint32_t flags;
+	uint32_t obj_id;
+	struct drm_mode_object *obj;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc;
+	struct drm_psb_dc_state_arg *arg =
+		(struct drm_psb_dc_state_arg *)data;
+
+	if (IS_MID(dev))
+		return 0;
+
+	flags = arg->flags;
+	obj_id = arg->obj_id;
+
+	if (flags & PSB_DC_CRTC_MASK) {
+		obj = drm_mode_object_find(dev, obj_id,
+					   DRM_MODE_OBJECT_CRTC);
+		if (!obj) {
+			DRM_DEBUG("Invalid CRTC object.\n");
+			return -EINVAL;
+		}
+
+		crtc = obj_to_crtc(obj);
+
+		mutex_lock(&dev->mode_config.mutex);
+		if (drm_helper_crtc_in_use(crtc)) {
+			if (flags & PSB_DC_CRTC_SAVE)
+				crtc->funcs->save(crtc);
+			else
+				crtc->funcs->restore(crtc);
+		}
+		mutex_unlock(&dev->mode_config.mutex);
+
+		return 0;
+	} else if (flags & PSB_DC_OUTPUT_MASK) {
+		obj = drm_mode_object_find(dev, obj_id,
+					   DRM_MODE_OBJECT_CONNECTOR);
+		if (!obj) {
+			DRM_DEBUG("Invalid connector id.\n");
+			return -EINVAL;
+		}
+
+		connector = obj_to_connector(obj);
+		if (flags & PSB_DC_OUTPUT_SAVE)
+			connector->funcs->save(connector);
+		else
+			connector->funcs->restore(connector);
+
+		return 0;
+	}
+
+	DRM_DEBUG("Bad flags 0x%x\n", flags);
+	return -EINVAL;
+}
+
+#ifdef CONFIG_CTP_DPST
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+	struct backlight_device bd;
+	dev_priv->blc_adj2 = *arg;
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	bd.props.brightness = psb_get_brightness(&bd);
+	psb_set_brightness(&bd);
+#endif
+	return 0;
+}
+#endif
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+	struct backlight_device bd;
+	dev_priv->blc_adj1 = *arg;
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	bd.props.brightness = psb_get_brightness(&bd);
+	psb_set_brightness(&bd);
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_CTP_DPST
+static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	u32 irqCtrl = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct dpst_guardband guardband_reg;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	uint32_t *enable = data;
+	unsigned long irq_flags;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		return 0;
+	}
+
+	if (*enable == 1) {
+		ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		ie_hist_cont_reg.ie_pipe_assignment = 0;
+		ie_hist_cont_reg.histogram_mode_select = DPST_YUV_LUMA_MODE;
+		ie_hist_cont_reg.ie_histogram_enable = 1;
+		PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+
+		guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		guardband_reg.interrupt_enable = 1;
+		guardband_reg.interrupt_status = 1;
+		PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+		irqCtrl = PSB_RVDC32(PIPEASTAT);
+		PSB_WVDC32(irqCtrl | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+		/* Wait for two vblanks */
+	} else {
+		guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		guardband_reg.interrupt_enable = 0;
+		guardband_reg.interrupt_status = 1;
+		PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+
+		ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		ie_hist_cont_reg.ie_histogram_enable = 0;
+		PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+		irqCtrl = PSB_RVDC32(PIPEASTAT);
+		irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
+		PSB_WVDC32(irqCtrl, PIPEASTAT);
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+	}
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return 0;
+}
+
+static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv		= psb_priv(dev);
+	struct drm_psb_hist_status_arg *hist_status	= data;
+	uint32_t *arg					= hist_status->buf;
+	u32 iedbr_reg_data				= 0;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	u32 i;
+	int dpst3_bin_threshold_count	= 0;
+	uint32_t blm_hist_ctl		= HISTOGRAM_LOGIC_CONTROL;
+	uint32_t iebdr_reg		= HISTOGRAM_BIN_DATA;
+	uint32_t segvalue_max_22_bit	= 0x3fffff;
+	uint32_t iedbr_busy_bit		= 0x80000000;
+	int dpst3_bin_count		= 32;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		return 0;
+	}
+
+	ie_hist_cont_reg.data			= PSB_RVDC32(blm_hist_ctl);
+	ie_hist_cont_reg.bin_reg_func_select	= dpst3_bin_threshold_count;
+	ie_hist_cont_reg.bin_reg_index		= 0;
+
+	PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+
+	for (i = 0; i < dpst3_bin_count; i++) {
+		iedbr_reg_data = PSB_RVDC32(iebdr_reg);
+
+		if (!(iedbr_reg_data & iedbr_busy_bit)) {
+			arg[i] = iedbr_reg_data & segvalue_max_22_bit;
+		} else {
+			i = 0;
+			ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+			ie_hist_cont_reg.bin_reg_index = 0;
+			PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+		}
+	}
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return 0;
+}
+
+static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct pci_dev *pdev = NULL;
+	struct device *ddev = NULL;
+	struct kobject *kobj = NULL;
+	uint32_t *arg = data;
+
+	if (*arg == 1) {
+		/*find handle to drm kboject*/
+		pdev = dev->pdev;
+		ddev = &pdev->dev;
+		kobj = &ddev->kobj;
+
+		if (dev_priv->psb_dpst_state == NULL) {
+			/*init dpst kmum comms*/
+			dev_priv->psb_dpst_state = psb_dpst_init(kobj);
+		} else {
+			PSB_DEBUG_ENTRY("DPST already initialized\n");
+		}
+
+		psb_irq_enable_dpst(dev);
+		psb_dpst_notify_change_um(DPST_EVENT_INIT_COMPLETE,
+					  dev_priv->psb_dpst_state);
+	} else {
+		/*hotplug and dpst destroy examples*/
+		psb_irq_disable_dpst(dev);
+		psb_dpst_notify_change_um(DPST_EVENT_TERMINATE,
+					  dev_priv->psb_dpst_state);
+		psb_dpst_device_pool_destroy(dev_priv->psb_dpst_state);
+		dev_priv->psb_dpst_state = NULL;
+	}
+	return 0;
+}
+
+/* return the current mode to the dpst module */
+static int psb_dpst_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+	uint32_t x;
+	uint32_t y;
+	uint32_t reg;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		return 0;
+	}
+
+	reg = PSB_RVDC32(PIPEASRC);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	/* horizontal is the left 16 bits */
+	x = reg >> 16;
+	/* vertical is the right 16 bits */
+	y = reg & 0x0000ffff;
+
+	/* the values are the image size minus one */
+	x += 1;
+	y += 1;
+
+	*arg = (x << 16) | y;
+
+	return 0;
+}
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_dpst_lut_arg *lut_arg = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct psb_intel_crtc *psb_intel_crtc;
+	int i = 0;
+	int32_t obj_id;
+
+	obj_id = lut_arg->output_id;
+	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+	if (!obj) {
+		DRM_DEBUG("Invalid Connector object.\n");
+		return -EINVAL;
+	}
+
+	connector = obj_to_connector(obj);
+	crtc = connector->encoder->crtc;
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+	for (i = 0; i < 256; i++)
+		psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+	psb_intel_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct dpst_guardband *input = (struct dpst_guardband *) data;
+	struct dpst_guardband reg_data;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		return 0;
+	}
+
+	reg_data.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+	reg_data.guardband = input->guardband;
+	reg_data.guardband_interrupt_delay = input->guardband_interrupt_delay;
+	/* PSB_DEBUG_ENTRY("guardband = %u\ninterrupt delay = %u\n",
+		reg_data.guardband, reg_data.guardband_interrupt_delay); */
+	PSB_WVDC32(reg_data.data, HISTOGRAM_INT_CONTROL);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return 0;
+}
+#endif
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	uint32_t obj_id;
+	uint16_t op;
+	struct drm_mode_modeinfo *umode;
+	struct drm_display_mode *mode = NULL;
+	struct drm_psb_mode_operation_arg *arg;
+	struct drm_mode_object *obj;
+	struct drm_connector *connector;
+	struct drm_framebuffer *drm_fb;
+	struct psb_framebuffer *psb_fb;
+	struct drm_connector_helper_funcs *connector_funcs;
+	int ret = 0;
+	int resp = MODE_OK;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+
+	arg = (struct drm_psb_mode_operation_arg *)data;
+	obj_id = arg->obj_id;
+	op = arg->operation;
+
+	switch (op) {
+	case PSB_MODE_OPERATION_SET_DC_BASE:
+		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
+		if (!obj) {
+			DRM_ERROR("Invalid FB id %d\n", obj_id);
+			return -EINVAL;
+		}
+
+		drm_fb = obj_to_fb(obj);
+		psb_fb = to_psb_fb(drm_fb);
+
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+					      OSPM_UHB_ONLY_IF_ON)) {
+			REG_WRITE(DSPASURF, psb_fb->offset);
+			REG_READ(DSPASURF);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			dev_priv->saveDSPASURF = psb_fb->offset;
+		}
+
+		return 0;
+	case PSB_MODE_OPERATION_MODE_VALID:
+		umode = &arg->mode;
+
+		mutex_lock(&dev->mode_config.mutex);
+
+		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+		if (!obj) {
+			ret = -EINVAL;
+			goto mode_op_out;
+		}
+
+		connector = obj_to_connector(obj);
+
+		mode = drm_mode_create(dev);
+		if (!mode) {
+			ret = -ENOMEM;
+			goto mode_op_out;
+		}
+
+		/* drm_crtc_convert_umode(mode, umode); */
+		{
+			mode->clock = umode->clock;
+			mode->hdisplay = umode->hdisplay;
+			mode->hsync_start = umode->hsync_start;
+			mode->hsync_end = umode->hsync_end;
+			mode->htotal = umode->htotal;
+			mode->hskew = umode->hskew;
+			mode->vdisplay = umode->vdisplay;
+			mode->vsync_start = umode->vsync_start;
+			mode->vsync_end = umode->vsync_end;
+			mode->vtotal = umode->vtotal;
+			mode->vscan = umode->vscan;
+			mode->vrefresh = umode->vrefresh;
+			mode->flags = umode->flags;
+			mode->type = umode->type;
+			strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+			mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+		}
+
+		connector_funcs = (struct drm_connector_helper_funcs *)
+				  connector->helper_private;
+
+		if (connector_funcs->mode_valid) {
+			resp = connector_funcs->mode_valid(connector, mode);
+			arg->data = (void *)resp;
+		}
+
+		/*do some clean up work*/
+		if (mode) {
+			drm_mode_destroy(dev, mode);
+		}
+mode_op_out:
+		mutex_unlock(&dev->mode_config.mutex);
+		return ret;
+
+	default:
+		DRM_DEBUG("Unsupported psb mode operation");
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_stolen_memory_arg *arg = data;
+
+	arg->base = dev_priv->pg->stolen_base;
+	arg->size = dev_priv->pg->vram_stolen_size;
+
+	return 0;
+}
+
+static int psb_dpu_query_ioctl(struct drm_device *dev, void *arg,
+			       struct drm_file *file_priv)
+{
+	IMG_INT *data = (IMG_INT *)arg;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	int panel_type;
+
+	/*reject requests from non-mdfld platforms*/
+	if (!IS_MDFLD(dev)) {
+		DRM_INFO("Not Medfield platform! return.");
+		return -EOPNOTSUPP;
+	}
+
+	DRM_INFO("dsr query.\n");
+
+	dev_priv->um_start = true;
+	panel_type = get_panel_mode(dev);
+
+	if (panel_type == MDFLD_DSI_ENCODER_DPI) {
+		DRM_INFO("DSI panel is working in video mode\n");
+		dev_priv->b_dsr_enable = false;
+		*data = 0;
+		return 0;
+	}
+
+#if defined(CONFIG_MDFLD_DSI_DSR)
+	dev_priv->b_dsr_enable = true;
+	*data = MDFLD_DSR_RR | MDFLD_DSR_FULLSCREEN;
+#elif defined(CONFIG_MDFLD_DSI_DPU)
+	dev_priv->b_dsr_enable = true;
+	*data = MDFLD_DSR_RR | MDFLD_DPU_ENABLE;
+#else /*DBI panel but DSR was not defined*/
+	DRM_INFO("DSR is disabled by kernel configuration.\n");
+
+	dev_priv->b_dsr_enable = false;
+	*data = 0;
+#endif /*CONFIG_MDFLD_DSI_DSR*/
+	return 0;
+}
+
+static int psb_dpu_dsr_on_ioctl(struct drm_device *dev, void *arg,
+				struct drm_file *file_priv)
+{
+	u32 * param = (u32 *)arg;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	int panel_type;
+
+	/*reject requests from non-mdfld platforms*/
+	if (!IS_MDFLD(dev)) {
+		DRM_INFO("Not Medfield platform! return.");
+		return -EOPNOTSUPP;
+	}
+
+	panel_type = get_panel_mode(dev);
+
+	if (panel_type == MDFLD_DSI_ENCODER_DPI) {
+		DRM_INFO("DSI panel is working in video mode\n");
+		dev_priv->b_dsr_enable = false;
+		return 0;
+	}
+
+	if (!param) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_ENTRY("dsr kick in. param 0x%08x\n", *param);
+
+	if (*param == DRM_PSB_DSR_DISABLE) {
+		PSB_DEBUG_ENTRY("DSR is turned off\n");
+		dev_priv->b_dsr_enable = false;
+#if defined(CONFIG_MDFLD_DSI_DPU)
+		mdfld_dbi_dpu_report_fullscreen_damage(dev);
+#elif defined(CONFIG_MDFLD_DSI_DSR)
+		mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D, 0, 0);
+#endif
+		return 0;
+	} else if (*param == DRM_PSB_DSR_ENABLE) {
+		PSB_DEBUG_ENTRY("DSR is turned on\n");
+#if defined(CONFIG_MDFLD_DSI_DPU) || defined(CONFIG_MDFLD_DSI_DSR)
+		dev_priv->b_dsr_enable = true;
+#endif
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *arg,
+				 struct drm_file *file_priv)
+{
+#if defined(CONFIG_MDFLD_DSI_DPU)
+	struct drm_psb_drv_dsr_off_arg *dsr_off_arg = (struct drm_psb_drv_dsr_off_arg *) arg;
+	struct psb_drm_dpu_rect rect = dsr_off_arg->damage_rect;
+
+	return mdfld_dsi_dbi_dsr_off(dev, &rect);
+#elif defined(CONFIG_MDFLD_DSI_DSR)
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	if ((dev_priv->dsr_fb_update & MDFLD_DSR_2D_3D) != MDFLD_DSR_2D_3D)
+		mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D, 0, 0);
+#endif
+
+	return 0;
+}
+
+static void wait_for_pipeb_finish(struct drm_device *dev,
+				  int pipenum)
+{
+	static int prev_pipe;
+	int tmp, i;
+
+	if (prev_pipe == 1 && pipenum == 0) {
+		/* switch from Pipe B to Pipe A */
+		for (i = 0; i < 1000; i++) {
+			tmp = REG_READ(PIPEBCONF);
+			if ((tmp >> 30) != 0x01)
+				break;
+			/* Pipe is not fully disabled */
+			usleep_range(100, 200);
+		}
+		if (i == 1000)
+			DRM_ERROR("Fail to wait pipe B\n");
+	}
+	prev_pipe = pipenum;
+}
+
+/*wait for ovadd flip complete*/
+static void overlay_wait_flip(struct drm_device *dev)
+{
+	int retry = 60;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	/**
+	 * make sure overlay command buffer
+	 * was copied before updating the system
+	 * overlay command buffer.
+	 * sleep long time first to avoid frequent wake up.
+	 */
+	if (BIT31 & PSB_RVDC32(OV_DOVASTA))
+		goto fliped;
+	usleep_range(6000, 12000);
+
+	while (--retry) {
+		if (BIT31 & PSB_RVDC32(OV_DOVASTA))
+			break;
+		usleep_range(500, 600);
+	}
+
+fliped:
+	if (!retry)
+		DRM_ERROR("OVADD flip timeout!\n");
+}
+
+/*wait for vblank*/
+static void overlay_wait_vblank(struct drm_device *dev,
+				struct drm_file *file_priv,
+				uint32_t ovadd)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	union drm_wait_vblank vblwait;
+	uint32_t ovadd_pipe;
+	int pipe = 0;
+
+	ovadd_pipe = ((ovadd >> 6) & 0x3);
+
+	vblwait.request.type = (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_NEXTONMISS);
+	vblwait.request.sequence = 1;
+	if (ovadd_pipe) {
+		pipe = 1;
+		vblwait.request.type |= _DRM_VBLANK_SECONDARY;
+	}
+
+	/*
+	 * FIXME: don't enable vblank in this way.
+	 * Current vblank usages didn't follow the DRM framework.
+	 * drm_vblank_get()/drm_vblank_put() should be used to enable/disabe
+	 * vblank interrupt. However, currently driver is using following way
+	 * to enable vblank which may lead to some problems.
+	 * Plus, drm_vblank_get() was called by PVR 3rd party display driver
+	 * when creating a new swapchain, and drm_vblank_put() won't be called
+	 * util destroy swapchain. this will make drm_vblank_get() in
+	 * drm_wait_vblank useless since the refcount is not 0.
+	 */
+
+	if (!psb_enable_vblank(dev, pipe)) {
+		dev_priv->b_is_in_idle = false;
+		dev_priv->dsr_idle_count = 0;
+		DRM_DEBUG("%s: start drm_wait_vblank()\n", __func__);
+		drm_wait_vblank(dev, (void *)&vblwait, file_priv);
+	} else {
+		DRM_DEBUG("%s: psb_enable_vblank() failed\n", __func__);
+	}
+}
+
+static int validate_overlay_register_buffer(struct drm_file *file_priv,
+				uint32_t *OVADD, uint32_t buffer_handle)
+{
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	struct ttm_buffer_object *reg_buffer = NULL;
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+	struct ttm_placement placement;
+	uint32_t flags = TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+	int ret = -EINVAL;
+
+	reg_buffer = ttm_buffer_object_lookup(tfile, buffer_handle);
+
+	if (!reg_buffer)
+		goto out_err0;
+
+	placement.num_placement = 1;
+	placement.placement = &flags;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &flags;
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+
+	ret = ttm_bo_reserve(reg_buffer, true, false, false, 0);
+
+	if (ret)
+		goto out_err1;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
+	ret = ttm_bo_validate(reg_buffer, &placement, 1, 0, 0);
+#else
+	ret = ttm_bo_validate(reg_buffer, &placement, 1, 0);
+#endif
+
+	if (ret)
+		goto out_err2;
+
+	if ((reg_buffer->offset & 0x0fff0000) != (*OVADD & 0xffff0000)) {
+		DRM_DEBUG("%s: old value 0x%08x, buffer gpu address 0x%08x\n",
+				__func__, *OVADD, (unsigned int)reg_buffer->offset);
+		*OVADD = (*OVADD & 0xffff) + (reg_buffer->offset & 0x0fffffff);
+		DRM_DEBUG("patch ovadd value, new value 0x%08x\n", *OVADD);
+	}
+
+out_err2:
+	ttm_bo_unreserve(reg_buffer);
+out_err1:
+	ttm_bo_unref(&reg_buffer);
+out_err0:
+	return ret;
+#else
+	return 0;
+#endif
+}
+
+/*
+* use to dump display registers. and print to standard output.
+*/
+static int psb_register_dump(struct drm_device *dev, int start, int end)
+{
+	int  len = 0;
+	int  Offset = 0;
+	int  ret = 0;
+
+	PSB_DEBUG_ENTRY("start:0x%08x\n", start);
+	PSB_DEBUG_ENTRY("end:  0x%08x\n", end);
+
+	if ((start % 0x4) != 0) {
+		PSB_DEBUG_ENTRY("The address should be 4 byte aligned.\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if ((end % 0x4) != 0) {
+		PSB_DEBUG_ENTRY("The address should be 4 byte aligned.\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	len = end - start + 1;
+	if (len <= 0)
+		PSB_DEBUG_ENTRY("The end should be greater than start.\n");
+
+	if (end < 0xa000 || end >  0x720ff) {
+		PSB_DEBUG_ENTRY("The end address is out of range.\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if (start < 0xa000 || start >  0x720ff)	{
+		PSB_DEBUG_ENTRY("The start address is out of the range.\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	for (Offset = start ; Offset < end; Offset = Offset + 0x10) {
+		printk(KERN_INFO
+			"[DISPLAY DUMP] 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					Offset,
+					REG_READ(Offset + 0x0),
+					REG_READ(Offset + 0x4),
+					REG_READ(Offset + 0x8),
+					REG_READ(Offset + 0xc));
+	}
+	return ret;
+}
+static int psb_display_reg_dump(struct drm_device *dev)
+{
+
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	/* DSI PLL */
+	printk(KERN_INFO "[DISPLAY REG DUMP] DSI PLL REG\n\n");
+	psb_register_dump(dev, 0xf010, 0xf020);
+	printk(KERN_INFO "\n");
+
+	/* MIPI A REGISTER */
+	printk(KERN_INFO "[DISPLAY REG DUMP] MIPI A\n\n");
+	psb_register_dump(dev, 0xb000, 0xb100);
+	printk(KERN_INFO "\n");
+
+	/* PIPE A */
+	printk(KERN_INFO "[DISPLAY REG DUMP] PIPE A\n\n");
+	psb_register_dump(dev, 0x60000, 0x60100);
+	printk(KERN_INFO "\n");
+
+	/* Plane A */
+	printk(KERN_INFO "[DISPLAY REG DUMP] PLANE A\n\n");
+	psb_register_dump(dev, 0x70000, 0x700FC);
+	psb_register_dump(dev, 0x70180, 0x701F4);
+	psb_register_dump(dev, 0x70400, 0x7044C);
+	psb_register_dump(dev, 0x70500, 0x70504);
+	printk(KERN_INFO "\n");
+
+	if (dev_priv->bhdmiconnected) {
+		/* PIPE B */
+		printk(KERN_INFO "[DISPLAY REG DUMP] PIPE B\n\n");
+		psb_register_dump(dev, 0x61000, 0x61100);
+		printk(KERN_INFO "\n");
+
+		/* Plane B */
+		printk(KERN_INFO "[DISPLAY REG DUMP] PLANE B\n\n");
+		psb_register_dump(dev, 0x71000, 0x710FC);
+		psb_register_dump(dev, 0x71180, 0x711F4);
+		psb_register_dump(dev, 0x71400, 0x7144C);
+		printk(KERN_INFO "\n");
+	}
+
+	/* OVERLAY */
+	printk(KERN_INFO "[DISPLAY REG DUMP] OVERLAY A\n\n");
+	psb_register_dump(dev, 0x30000, 0x30060);
+	psb_register_dump(dev, 0x30100, 0x301A4);
+	psb_register_dump(dev, 0x32000, 0x3201C);
+	psb_register_dump(dev, 0x33000, 0x33024);
+	printk(KERN_INFO "\n");
+
+	mdfld_dsi_dsr_allow(dsi_config);
+	return 0;
+}
+
+void psb_flip_abnormal_debug_info(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = NULL;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	int pipe = 0;
+	unsigned long long interval = 0;
+	unsigned long long second = 0;
+	unsigned long nanosec_rem;
+
+	if (!dev) {
+		DRM_INFO("%s dev is NUL\n", __func__);
+		return;
+	}
+	dev_priv =
+	(struct drm_psb_private *) dev->dev_private;
+
+	if (!dev_priv) {
+		DRM_INFO("%s dev_priv is NUL\n", __func__);
+		return;
+	}
+	dsi_config = dev_priv->dsi_configs[0];
+
+	if (!dsi_config) {
+		DRM_INFO("%s dsi_config is NUL\n", __func__);
+		return;
+	}
+
+	DRM_INFO("\n1.level1 interrupt status\n");
+	DRM_INFO("PSB_INT_MASK_R mask 0x%x\n", PSB_RVDC32(PSB_INT_MASK_R));
+	DRM_INFO("PSB_INT_ENABLE_R mask 0x%x\n", PSB_RVDC32(PSB_INT_ENABLE_R));
+	DRM_INFO("dev_priv->vdc_irq_mask = 0x%x\n\n", dev_priv->vdc_irq_mask);
+
+	DRM_INFO("2.level2 interrupt register\n");
+	DRM_INFO("pipe 0 config 0x%x status 0x%x\n",
+		REG_READ(0x70008), REG_READ(0x70024));
+	DRM_INFO("pipe 1 config 0x%x status 0x%x\n\n",
+		REG_READ(0x71008), REG_READ(0x71024));
+
+	DRM_INFO("3.check irq and workqueue relationship\n");
+	second = dev_priv->vsync_te_trouble_ts;
+	nanosec_rem = do_div(second, 1000000000);
+	DRM_INFO("vsync_te trouble: [%5lu.%06lu]\n",
+			(unsigned long) second,
+			nanosec_rem / 1000);
+	for (pipe = 0; pipe < PSB_NUM_PIPE; pipe++) {
+		if (pipe == 2)
+			continue;
+		second = dev_priv->vsync_te_irq_ts[pipe];
+		nanosec_rem = do_div(second, 1000000000);
+		DRM_INFO("pipe %d last vsync_te irq: [%5lu.%06lu]\n",
+				pipe, (unsigned long) second,
+				nanosec_rem / 1000);
+
+		second = dev_priv->vsync_te_worker_ts[pipe];
+		nanosec_rem = do_div(second, 1000000000);
+		DRM_INFO("pipe %d last vsync_te workqueue : [%5lu.%06lu]\n",
+				pipe, (unsigned long) second,
+				nanosec_rem / 1000);
+
+		if (dev_priv->vsync_te_irq_ts[pipe] <
+			dev_priv->vsync_te_worker_ts[pipe]) {
+			/*workqueue delay*/
+			interval = dev_priv->vsync_te_worker_ts[pipe] -
+					dev_priv->vsync_te_irq_ts[pipe];
+			nanosec_rem = do_div(interval, 1000000000);
+			DRM_INFO("pipe %d workqueue be delayed : [%5lu.%06lu]\n",
+					pipe, (unsigned long) interval,
+					nanosec_rem / 1000);
+		} else {
+			/*workqueue block*/
+			interval = cpu_clock(0) -
+				dev_priv->vsync_te_irq_ts[pipe];
+			nanosec_rem = do_div(interval, 1000000000);
+			DRM_INFO("pipe %d workqueue be blocked : [%5lu.%06lu]\n\n",
+					pipe, (unsigned long) interval,
+					nanosec_rem / 1000);
+		}
+		/*check whether real vsync te missing*/
+		interval = cpu_clock(0) -
+			dev_priv->vsync_te_irq_ts[pipe];
+		nanosec_rem = do_div(interval, 1000000000);
+		if (nanosec_rem > 200000000) {
+			DRM_INFO("pipe %d vsync te missing %dms !\n\n",
+				 pipe, nanosec_rem/1000000);
+			dev_priv->vsync_te_working[pipe] = false;
+			if (pipe == 0)
+				atomic_set(&dev_priv->mipi_flip_abnormal, 1);
+		}
+	}
+}
+
+static int psb_get_dc_info_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_dc_info *dc = data;
+
+	if (IS_MDFLD_OLD(dev)) {
+		dc->pipe_count = INTEL_MDFLD_DISPLAY_PIPE_NUM;
+
+		dc->primary_plane_count = INTEL_MDFLD_SPRITE_PLANE_NUM;
+		dc->sprite_plane_count = 0;
+		dc->overlay_plane_count = INTEL_MDFLD_OVERLAY_PLANE_NUM;
+		dc->cursor_plane_count = INTEL_MDFLD_CURSOR_PLANE_NUM;
+	} else if (IS_CTP(dev)) {
+		dc->pipe_count = INTEL_CTP_DISPLAY_PIPE_NUM;
+
+		dc->primary_plane_count = INTEL_CTP_SPRITE_PLANE_NUM;
+		dc->sprite_plane_count = 0;
+		dc->overlay_plane_count = INTEL_CTP_OVERLAY_PLANE_NUM;
+		dc->cursor_plane_count = INTEL_CTP_CURSOR_PLANE_NUM;
+	} else {
+		DRM_INFO("unsupported platform in mrst driver now!");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int psb_vsync_set_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_vsync_set_arg *arg = data;
+	struct mdfld_dsi_config *dsi_config;
+	unsigned long irq_flags;
+	struct timespec now;
+	uint32_t vsync_enable = 0;
+	uint32_t pipe;
+	u32 vbl_count = 0;
+	s64 nsecs = 0;
+	int ret = 0;
+	struct psb_fpriv *psb_fp = NULL;
+
+	mutex_lock(&dev_priv->vsync_lock);
+	if (arg->vsync_operation_mask) {
+		pipe = arg->vsync.pipe;
+
+		if (arg->vsync_operation_mask & GET_VSYNC_COUNT) {
+			vbl_count = intel_vblank_count(dev, pipe);
+
+			getrawmonotonic(&now);
+			nsecs = timespec_to_ns(&now);
+
+			arg->vsync.timestamp = (uint64_t)nsecs;
+			arg->vsync.vsync_count = (uint64_t)vbl_count;
+		}
+
+		if (arg->vsync_operation_mask & VSYNC_WAIT) {
+			vbl_count = intel_vblank_count(dev, pipe);
+
+			spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+			vsync_enable = dev_priv->pipestat[pipe] &
+				(PIPE_TE_ENABLE | PIPE_VBLANK_INTERRUPT_ENABLE);
+			spin_unlock_irqrestore(&dev_priv->irqmask_lock,
+					irq_flags);
+
+			mutex_unlock(&dev_priv->vsync_lock);
+
+			if (vsync_enable) {
+				ret = wait_event_interruptible_timeout(
+					    dev_priv->vsync_queue,
+					    (intel_vblank_count(dev, pipe) !=
+					     vbl_count),
+					    3 * DRM_HZ);
+
+				if (!ret)
+					DRM_ERROR("Pipe %d vsync time out\n",
+							pipe);
+			}
+
+			getrawmonotonic(&now);
+			nsecs = timespec_to_ns(&now);
+
+			arg->vsync.timestamp = (uint64_t)nsecs;
+
+			return 0;
+		}
+
+		dsi_config = dev_priv->dsi_configs[0];
+
+		if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true)) {
+			mutex_unlock(&dev_priv->vsync_lock);
+			return -EINVAL;
+		}
+
+		if (arg->vsync_operation_mask & VSYNC_ENABLE) {
+			/*enable vblank/TE*/
+			/*drm_vblank_get(dev, pipe);*/
+			switch (pipe) {
+			case 0:
+			case 2:
+				psb_fp = psb_fpriv(file_priv);
+				mdfld_dsi_dsr_forbid(dsi_config);
+				psb_fp->dsr_blocked = true;
+
+				if (get_panel_mode(dev) ==
+						MDFLD_DSI_ENCODER_DPI)
+					psb_enable_vblank(dev, pipe);
+				break;
+			case 1:
+				psb_enable_vblank(dev, pipe);
+				break;
+			}
+		}
+
+		if (arg->vsync_operation_mask & VSYNC_DISABLE) {
+			/*drm_vblank_put(dev, pipe);*/
+			switch (pipe) {
+			case 0:
+			case 2:
+				psb_fp = psb_fpriv(file_priv);
+				if (get_panel_mode(dev) ==
+						MDFLD_DSI_ENCODER_DPI)
+					psb_disable_vblank(dev, pipe);
+
+				mdfld_dsi_dsr_allow(dsi_config);
+				psb_fp->dsr_blocked = false;
+				break;
+			case 1:
+				psb_disable_vblank(dev, pipe);
+				break;
+			}
+
+			atomic_inc(&dev_priv->vblank_count[pipe]);
+			wake_up_interruptible(&dev_priv->vsync_queue);
+		}
+
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+
+	mutex_unlock(&dev_priv->vsync_lock);
+	return 0;
+}
+
+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_register_rw_arg *arg = data;
+	unsigned int iep_ble_status;
+	unsigned long iep_timeout;
+	UHBUsage usage =
+		arg->b_force_hw_on ? OSPM_UHB_FORCE_POWER_ON : OSPM_UHB_ONLY_IF_ON;
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	int ret = 0;
+
+	mutex_lock(&dev_priv->overlay_lock);
+	if (arg->display_write_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
+				PSB_WVDC32(arg->display.pfit_controls,
+					   PFIT_CONTROL);
+			if (arg->display_write_mask &
+			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+				PSB_WVDC32(arg->display.pfit_autoscale_ratios,
+					   PFIT_AUTO_RATIOS);
+			if (arg->display_write_mask &
+			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+				PSB_WVDC32(
+					arg->display.pfit_programmed_scale_ratios,
+					PFIT_PGM_RATIOS);
+			if (arg->display_write_mask & REGRWBITS_PIPEASRC)
+				PSB_WVDC32(arg->display.pipeasrc,
+					   PIPEASRC);
+			if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
+				PSB_WVDC32(arg->display.pipebsrc,
+					   PIPEBSRC);
+			if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
+				PSB_WVDC32(arg->display.vtotal_a,
+					   VTOTAL_A);
+			if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
+				PSB_WVDC32(arg->display.vtotal_b,
+					   VTOTAL_B);
+			if (arg->display_write_mask & REGRWBITS_DSPACNTR)
+				PSB_WVDC32(arg->display.dspcntr_a,
+					   DSPACNTR);
+			if (arg->display_write_mask & REGRWBITS_DSPBCNTR)
+				PSB_WVDC32(arg->display.dspcntr_b,
+					   DSPBCNTR);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
+				dev_priv->savePFIT_CONTROL =
+					arg->display.pfit_controls;
+			if (arg->display_write_mask &
+			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+				dev_priv->savePFIT_AUTO_RATIOS =
+					arg->display.pfit_autoscale_ratios;
+			if (arg->display_write_mask &
+			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+				dev_priv->savePFIT_PGM_RATIOS =
+					arg->display.pfit_programmed_scale_ratios;
+			if (arg->display_write_mask & REGRWBITS_PIPEASRC)
+				dev_priv->savePIPEASRC = arg->display.pipeasrc;
+			if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
+				dev_priv->savePIPEBSRC = arg->display.pipebsrc;
+			if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
+				dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
+			if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
+				dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
+		}
+	}
+
+	if (arg->display_read_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			if (arg->display_read_mask &
+			    REGRWBITS_PFIT_CONTROLS)
+				arg->display.pfit_controls =
+					PSB_RVDC32(PFIT_CONTROL);
+			if (arg->display_read_mask &
+			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+				arg->display.pfit_autoscale_ratios =
+					PSB_RVDC32(PFIT_AUTO_RATIOS);
+			if (arg->display_read_mask &
+			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+				arg->display.pfit_programmed_scale_ratios =
+					PSB_RVDC32(PFIT_PGM_RATIOS);
+			if (arg->display_read_mask & REGRWBITS_PIPEASRC)
+				arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
+			if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
+				arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
+			if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
+				arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
+			if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
+				arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
+			if (arg->display_read_mask & REGRWBITS_DSPACNTR)
+				arg->display.dspcntr_a = PSB_RVDC32(DSPACNTR);
+			if (arg->display_read_mask & REGRWBITS_DSPBCNTR)
+				arg->display.dspcntr_b = PSB_RVDC32(DSPBCNTR);
+			if (arg->display_read_mask & REGRWBITS_PIPEASTAT)
+				arg->display.pipestat_a = PSB_RVDC32(PIPEASTAT);
+			if (arg->display_read_mask & REGRWBITS_INT_MASK)
+				arg->display.int_mask =
+						PSB_RVDC32(PSB_INT_MASK_R);
+			if (arg->display_read_mask & REGRWBITS_INT_ENABLE)
+				arg->display.int_enable =
+						PSB_RVDC32(PSB_INT_ENABLE_R);
+			if (arg->display_read_mask & REGRWBITS_DISPLAY_ALL)
+				psb_display_reg_dump(dev);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			if (arg->display_read_mask &
+			    REGRWBITS_PFIT_CONTROLS)
+				arg->display.pfit_controls =
+					dev_priv->savePFIT_CONTROL;
+			if (arg->display_read_mask &
+			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+				arg->display.pfit_autoscale_ratios =
+					dev_priv->savePFIT_AUTO_RATIOS;
+			if (arg->display_read_mask &
+			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+				arg->display.pfit_programmed_scale_ratios =
+					dev_priv->savePFIT_PGM_RATIOS;
+			if (arg->display_read_mask & REGRWBITS_PIPEASRC)
+				arg->display.pipeasrc = dev_priv->savePIPEASRC;
+			if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
+				arg->display.pipebsrc = dev_priv->savePIPEBSRC;
+			if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
+				arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
+			if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
+				arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
+			if (arg->display_read_mask & REGRWBITS_PIPEASTAT)
+				arg->display.pipestat_a = PSB_RVDC32(PIPEASTAT);
+			if (arg->display_read_mask & REGRWBITS_INT_MASK)
+				arg->display.int_mask =
+						PSB_RVDC32(PSB_INT_MASK_R);
+			if (arg->display_read_mask & REGRWBITS_INT_ENABLE)
+				arg->display.int_enable =
+						PSB_RVDC32(PSB_INT_ENABLE_R);
+			if (arg->display_read_mask & REGRWBITS_DISPLAY_ALL)
+				psb_display_reg_dump(dev);
+		}
+	}
+
+	if (arg->overlay_write_mask != 0) {
+		int pipenum;
+		uint32_t ovadd_pipe = (arg->overlay.OVADD >> 6) & 0x3;
+
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			dsi_config = dev_priv->dsi_configs[0];
+			regs = &dsi_config->regs;
+			ctx = &dsi_config->dsi_hw_context;
+
+			/*forbid dsr which will restore regs*/
+			if (ovadd_pipe == 0)
+				mdfld_dsi_dsr_forbid(dsi_config);
+
+			if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
+				PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
+				PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
+				PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
+				PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
+				PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
+				PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
+			}
+			if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
+				PSB_WVDC32(arg->overlay.OGAMC5, OVC_OGAMC5);
+				PSB_WVDC32(arg->overlay.OGAMC4, OVC_OGAMC4);
+				PSB_WVDC32(arg->overlay.OGAMC3, OVC_OGAMC3);
+				PSB_WVDC32(arg->overlay.OGAMC2, OVC_OGAMC2);
+				PSB_WVDC32(arg->overlay.OGAMC1, OVC_OGAMC1);
+				PSB_WVDC32(arg->overlay.OGAMC0, OVC_OGAMC0);
+			}
+
+			if (arg->overlay_write_mask & OV_REGRWBITS_WAIT_FLIP)
+				overlay_wait_flip(dev);
+
+			if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
+				if (arg->overlay.buffer_handle) {
+					ret = validate_overlay_register_buffer(
+						file_priv,
+						&arg->overlay.OVADD,
+						arg->overlay.buffer_handle);
+
+					if (ret) {
+						printk(KERN_ERR
+							"Invalid parameter\n");
+						mutex_unlock(&dev_priv->overlay_lock);
+						return -EINVAL;
+					}
+				}
+
+				if (ovadd_pipe == 0) {
+					/*lock*/
+					mutex_lock(&dsi_config->context_lock);
+
+					if (dev_priv->exit_idle &&
+							(dsi_config->type ==
+							 MDFLD_DSI_ENCODER_DPI))
+						dev_priv->exit_idle(dev,
+								MDFLD_DSR_2D_3D,
+								NULL,
+								true);
+
+					/*flip overlay*/
+					PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
+
+					ctx->ovaadd = arg->overlay.OVADD;
+
+					/*update on-panel frame buffer*/
+					if (arg->overlay.b_wms)
+						mdfld_dsi_dsr_update_panel_fb(dsi_config);
+
+					mutex_unlock(&dsi_config->context_lock);
+				} else
+					PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
+
+				/* when switch back from HDMI to local
+				 * this ensures the Pipe B is fully disabled */
+				pipenum = ((arg->overlay.OVADD >> 6) & 0x3) ? 1 : 0;
+				wait_for_pipeb_finish(dev, pipenum);
+
+				if (arg->overlay.b_wait_vblank)
+					overlay_wait_vblank(dev,
+							file_priv,
+							arg->overlay.OVADD);
+
+				if (IS_MDFLD(dev)) {
+					if (arg->overlay.IEP_ENABLED) {
+						/* VBLANK period */
+						iep_timeout = jiffies + HZ / 10;
+						do {
+							iep_ble_status = PSB_RVDC32(0x31800);
+							if (time_after_eq(jiffies, iep_timeout)) {
+								DRM_ERROR("IEP Lite timeout\n");
+								break;
+							}
+							cpu_relax();
+						} while ((iep_ble_status >> 1) != 1);
+
+						arg->overlay.IEP_BLE_MINMAX    = PSB_RVDC32(0x31804);
+						arg->overlay.IEP_BSSCC_CONTROL = PSB_RVDC32(0x32000);
+					}
+				}
+			}
+			if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD) {
+				if (arg->overlay.buffer_handle) {
+					ret = validate_overlay_register_buffer(
+						file_priv,
+						&arg->overlay.OVADD,
+						arg->overlay.buffer_handle);
+
+					if (ret) {
+						printk(KERN_ERR
+							"Invalid parameter\n");
+						mutex_unlock(&dev_priv->overlay_lock);
+						return -EINVAL;
+					}
+				}
+
+				PSB_WVDC32(arg->overlay.OVADD, OVC_OVADD);
+				if (arg->overlay.b_wait_vblank) {
+					/*Wait for 20ms.*/
+					unsigned long vblank_timeout = jiffies + HZ / 50;
+					uint32_t temp;
+					while (time_before_eq(jiffies, vblank_timeout)) {
+						temp = PSB_RVDC32(OVC_DOVCSTA);
+						if ((temp & (0x1 << 31)) != 0) {
+							break;
+						}
+						cpu_relax();
+					}
+				}
+			}
+			/*allow entering dsr*/
+			if (ovadd_pipe == 0)
+				mdfld_dsi_dsr_allow(dsi_config);
+
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
+				dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
+				dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
+				dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
+				dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
+				dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
+				dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
+			}
+			if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
+				dev_priv->saveOVC_OGAMC5 = arg->overlay.OGAMC5;
+				dev_priv->saveOVC_OGAMC4 = arg->overlay.OGAMC4;
+				dev_priv->saveOVC_OGAMC3 = arg->overlay.OGAMC3;
+				dev_priv->saveOVC_OGAMC2 = arg->overlay.OGAMC2;
+				dev_priv->saveOVC_OGAMC1 = arg->overlay.OGAMC1;
+				dev_priv->saveOVC_OGAMC0 = arg->overlay.OGAMC0;
+			}
+			if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
+				dev_priv->saveOV_OVADD = arg->overlay.OVADD;
+			if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD)
+				dev_priv->saveOVC_OVADD = arg->overlay.OVADD;
+		}
+	}
+
+	if (arg->overlay_read_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
+				arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+				arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+				arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+				arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+				arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+				arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+			}
+			if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
+				arg->overlay.OGAMC5 = PSB_RVDC32(OVC_OGAMC5);
+				arg->overlay.OGAMC4 = PSB_RVDC32(OVC_OGAMC4);
+				arg->overlay.OGAMC3 = PSB_RVDC32(OVC_OGAMC3);
+				arg->overlay.OGAMC2 = PSB_RVDC32(OVC_OGAMC2);
+				arg->overlay.OGAMC1 = PSB_RVDC32(OVC_OGAMC1);
+				arg->overlay.OGAMC0 = PSB_RVDC32(OVC_OGAMC0);
+			}
+			if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
+				arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
+			if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
+				arg->overlay.OVADD = PSB_RVDC32(OVC_OVADD);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
+				arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
+				arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
+				arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
+				arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
+				arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
+				arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
+			}
+			if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
+				arg->overlay.OGAMC5 = dev_priv->saveOVC_OGAMC5;
+				arg->overlay.OGAMC4 = dev_priv->saveOVC_OGAMC4;
+				arg->overlay.OGAMC3 = dev_priv->saveOVC_OGAMC3;
+				arg->overlay.OGAMC2 = dev_priv->saveOVC_OGAMC2;
+				arg->overlay.OGAMC1 = dev_priv->saveOVC_OGAMC1;
+				arg->overlay.OGAMC0 = dev_priv->saveOVC_OGAMC0;
+			}
+			if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
+				arg->overlay.OVADD = dev_priv->saveOV_OVADD;
+			if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
+				arg->overlay.OVADD = dev_priv->saveOVC_OVADD;
+		}
+	}
+
+	if (arg->sprite_enable_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			PSB_WVDC32(0x1F3E, DSPARB);
+			PSB_WVDC32(arg->sprite.dspa_control | PSB_RVDC32(DSPACNTR), DSPACNTR);
+			PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
+			PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
+			PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
+			PSB_RVDC32(DSPASURF);
+			PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
+			PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
+			PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
+			PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
+			PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
+			PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
+			PSB_RVDC32(DSPCSURF);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+
+	if (arg->sprite_disable_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			PSB_WVDC32(0x3F3E, DSPARB);
+			PSB_WVDC32(0x0, DSPCCNTR);
+			PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
+			PSB_RVDC32(DSPCSURF);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+
+	if (arg->subpicture_enable_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			uint32_t temp;
+			if (arg->subpicture_enable_mask & REGRWBITS_DSPACNTR) {
+				temp =  PSB_RVDC32(DSPACNTR);
+				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+				temp &= ~DISPPLANE_BOTTOM;
+				temp |= DISPPLANE_32BPP;
+				PSB_WVDC32(temp, DSPACNTR);
+
+				temp =  PSB_RVDC32(DSPABASE);
+				PSB_WVDC32(temp, DSPABASE);
+				PSB_RVDC32(DSPABASE);
+				temp =  PSB_RVDC32(DSPASURF);
+				PSB_WVDC32(temp, DSPASURF);
+				PSB_RVDC32(DSPASURF);
+			}
+			if (arg->subpicture_enable_mask & REGRWBITS_DSPBCNTR) {
+				temp =  PSB_RVDC32(DSPBCNTR);
+				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+				temp &= ~DISPPLANE_BOTTOM;
+				temp |= DISPPLANE_32BPP;
+				PSB_WVDC32(temp, DSPBCNTR);
+
+				temp =  PSB_RVDC32(DSPBBASE);
+				PSB_WVDC32(temp, DSPBBASE);
+				PSB_RVDC32(DSPBBASE);
+				temp =  PSB_RVDC32(DSPBSURF);
+				PSB_WVDC32(temp, DSPBSURF);
+				PSB_RVDC32(DSPBSURF);
+			}
+			if (arg->subpicture_enable_mask & REGRWBITS_DSPCCNTR) {
+				temp =  PSB_RVDC32(DSPCCNTR);
+				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+				temp &= ~DISPPLANE_BOTTOM;
+				temp |= DISPPLANE_32BPP;
+				PSB_WVDC32(temp, DSPCCNTR);
+
+				temp =  PSB_RVDC32(DSPCBASE);
+				PSB_WVDC32(temp, DSPCBASE);
+				PSB_RVDC32(DSPCBASE);
+				temp =  PSB_RVDC32(DSPCSURF);
+				PSB_WVDC32(temp, DSPCSURF);
+				PSB_RVDC32(DSPCSURF);
+			}
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+
+	if (arg->subpicture_disable_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			uint32_t temp;
+			if (arg->subpicture_disable_mask & REGRWBITS_DSPACNTR) {
+				temp =  PSB_RVDC32(DSPACNTR);
+				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+				temp |= DISPPLANE_32BPP_NO_ALPHA;
+				PSB_WVDC32(temp, DSPACNTR);
+
+				temp =  PSB_RVDC32(DSPABASE);
+				PSB_WVDC32(temp, DSPABASE);
+				PSB_RVDC32(DSPABASE);
+				temp =  PSB_RVDC32(DSPASURF);
+				PSB_WVDC32(temp, DSPASURF);
+				PSB_RVDC32(DSPASURF);
+			}
+			if (arg->subpicture_disable_mask & REGRWBITS_DSPBCNTR) {
+				temp =  PSB_RVDC32(DSPBCNTR);
+				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+				temp |= DISPPLANE_32BPP_NO_ALPHA;
+				PSB_WVDC32(temp, DSPBCNTR);
+
+				temp =  PSB_RVDC32(DSPBBASE);
+				PSB_WVDC32(temp, DSPBBASE);
+				PSB_RVDC32(DSPBBASE);
+				temp =  PSB_RVDC32(DSPBSURF);
+				PSB_WVDC32(temp, DSPBSURF);
+				PSB_RVDC32(DSPBSURF);
+			}
+			if (arg->subpicture_disable_mask & REGRWBITS_DSPCCNTR) {
+				temp =  PSB_RVDC32(DSPCCNTR);
+				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+				temp |= DISPPLANE_32BPP_NO_ALPHA;
+				PSB_WVDC32(temp, DSPCCNTR);
+
+				temp =  PSB_RVDC32(DSPCBASE);
+				PSB_WVDC32(temp, DSPCBASE);
+				PSB_RVDC32(DSPCBASE);
+				temp =  PSB_RVDC32(DSPCSURF);
+				PSB_WVDC32(temp, DSPCSURF);
+				PSB_RVDC32(DSPCSURF);
+			}
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+	if (arg->cursor_enable_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			uint32_t temp;
+			temp = PSB_RVDC32(PIPEACONF);
+			temp &= ~PIPECONF_CURSOR_OFF;
+			PSB_WVDC32(temp, PIPEACONF);
+			PSB_WVDC32((arg->cursor.CursorSize == 1) ? 0x22 : 0x27,
+					CURACNTR);
+			PSB_WVDC32(arg->cursor.CursorADDR, CURABASE);
+			if ((arg->cursor.xPos > 0) && (arg->cursor.yPos > 0))
+				PSB_WVDC32(((arg->cursor.yPos << 16)
+					|(arg->cursor.xPos)), CURAPOS);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+	if (arg->cursor_disable_mask != 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
+			uint32_t temp;
+			temp = PSB_RVDC32(PIPEACONF);
+			temp |= PIPECONF_CURSOR_OFF;
+			PSB_WVDC32(temp, PIPEACONF);
+			PSB_WVDC32(0x0, CURACNTR);
+			PSB_WVDC32(0x0, CURABASE);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+
+	mutex_unlock(&dev_priv->overlay_lock);
+	return 0;
+}
+
+/* always available as we are SIGIO'd */
+static unsigned int psb_poll(struct file *filp,
+			     struct poll_table_struct *wait)
+{
+	return POLLIN | POLLRDNORM;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	DRM_DEBUG("\n");
+	BUG_ON(!dev_priv->pvr_ops);
+	return dev_priv->pvr_ops->PVRSRVOpen(dev, priv);
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+			       unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	long ret;
+
+	DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr);
+
+	/*
+	 * The driver private ioctls and TTM ioctls should be
+	 * thread-safe.
+	 */
+
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+	    && (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(psb_ioctls))) {
+		struct drm_ioctl_desc *ioctl =
+					&psb_ioctls[nr - DRM_COMMAND_BASE];
+
+		if (unlikely(ioctl->cmd != cmd)) {
+			DRM_ERROR(
+				"Invalid drm cmnd %d ioctl->cmd %x, cmd %x\n",
+				nr - DRM_COMMAND_BASE, ioctl->cmd, cmd);
+			return -EINVAL;
+		}
+	}
+	/*
+	 * Not all old drm ioctls are thread-safe.
+	 */
+
+	/* lock_kernel(); */
+	ret = drm_ioctl(filp, cmd, arg);
+	/* unlock_kernel(); */
+	return ret;
+}
+
+#ifdef DISPLAY_DRIVER_DEBUG_INTERFACE
+static int psb_blc_proc_show(struct seq_file *seq, void *v)
+{
+	struct drm_minor *minor = (struct drm_minor *) seq->private;
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	int user_brightness = 0;
+	int final_brightness = 0;
+
+	user_brightness = psb_get_brightness(NULL);
+	final_brightness = (user_brightness * dev_priv->blc_adj1) / 100;
+	final_brightness = (final_brightness * dev_priv->blc_adj2) / 100;
+
+	DRM_INFO("%i\n", final_brightness);
+	seq_printf(seq, "%i\n", final_brightness);
+
+	return 0;
+}
+
+static int psb_blc_proc_open(struct inode *inode, struct file *file)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	return single_open(file, psb_blc_proc_show, PDE(inode));
+#else
+	return single_open(file, psb_blc_proc_show, PDE_DATA(inode));
+#endif
+}
+
+static const struct file_operations psb_blc_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= psb_blc_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int psb_rtpm_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	PSB_DEBUG_ENTRY("Current Runtime PM delay for GFX: %d (ms)\n", gfxrtdelay);
+
+	return 0;
+}
+
+static int psb_rtpm_write(struct file *file, const char *buffer,
+			  size_t count, loff_t *ppos)
+{
+	char buf[2];
+	int temp = 0;
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		temp = buf[0] - '0';
+		switch (temp) {
+		case 1:
+			gfxrtdelay = 10 * 1000;
+			break;
+
+		case 2:
+			gfxrtdelay = 20 * 1000;
+			break;
+		default:
+			gfxrtdelay = 30 * 1000;
+			break;
+		}
+		PSB_DEBUG_ENTRY("Runtime PM delay set for GFX: %d (ms)\n", gfxrtdelay);
+	}
+	return count;
+}
+
+static int psb_ospm_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+#ifdef OSPM_STAT
+	unsigned long on_time = 0;
+	unsigned long off_time = 0;
+#endif
+
+
+	/*#ifdef SUPPORT_ACTIVE_POWER_MANAGEMENT
+	    DRM_INFO("GFX D0i3: enabled	      ");
+	#else
+	    DRM_INFO("GFX D0i3: disabled	      ");
+	#endif*/
+	if (drm_psb_ospm)
+		DRM_INFO("GFX D0i3: enabled	      ");
+	else
+		DRM_INFO("GFX D0i3: disabled	      ");
+
+#ifdef OSPM_STAT
+	switch (dev_priv->graphics_state) {
+	case PSB_PWR_STATE_ON:
+		DRM_INFO("GFX state:%s\n", "on");
+		break;
+	case PSB_PWR_STATE_OFF:
+		DRM_INFO("GFX state:%s\n", "off");
+		break;
+	default:
+		DRM_INFO("GFX state:%s\n", "unknown");
+	}
+
+	on_time = dev_priv->gfx_on_time * 1000 / HZ;
+	off_time = dev_priv->gfx_off_time * 1000 / HZ;
+	switch (dev_priv->graphics_state) {
+	case PSB_PWR_STATE_ON:
+		on_time += (jiffies - dev_priv->gfx_last_mode_change) * \
+			   1000 / HZ;
+		break;
+	case PSB_PWR_STATE_OFF:
+		off_time += (jiffies - dev_priv->gfx_last_mode_change) * \
+			    1000 / HZ;
+		break;
+	}
+	DRM_INFO("GFX(count/ms):\n");
+	DRM_INFO("on:%lu/%lu, off:%lu/%lu \n",
+		 dev_priv->gfx_on_cnt, on_time, dev_priv->gfx_off_cnt, off_time);
+#endif
+	return nbytes;
+}
+
+
+static int psb_ospm_write(struct file *file, const char *buffer,
+			  size_t count, loff_t *ppos)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		drm_psb_ospm = buf[0] - '0';
+		PSB_DEBUG_ENTRY(" SGX (D0i3) drm_psb_ospm: %d\n",
+		       drm_psb_ospm);
+	}
+	return count;
+}
+static int psb_panel_register_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	char msg[PSB_REG_PRINT_SIZE];
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	/*do nothing*/
+
+	if (dev_priv->buf && dev_priv->count < PSB_REG_PRINT_SIZE)
+		memcpy(msg, dev_priv->buf, dev_priv->count);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, dev_priv->count);
+}
+/*
+* use to read and write panel side register. and print to standard output.
+*/
+#define GENERIC_READ_FIFO_SIZE_MAX 0x40
+static int psb_panel_register_write(struct file *file, const char *buffer,
+				      size_t count, loff_t *ppos)
+{
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	char buf[256];
+	char op = '0';
+	char type = '0';
+	int  cmd = 0;
+	char par[256];
+	int  pnum = 0;
+	int  par_offset = 0;
+	int  add_size = 0;
+	int  ret = 0;
+	u8 *pdata = NULL;
+	int  i = 0;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	dev_priv->count = 0;
+	memset(buf, '\0', sizeof(buf));
+
+	if (count > sizeof(buf)) {
+		PSB_DEBUG_ENTRY(
+			"The input is too bigger, kernel can not handle.\n");
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		PSB_DEBUG_ENTRY("input = %s", buf);
+	}
+
+	sscanf(buf, "%c%c%x%x", &op, &type, &cmd, &pnum);
+	par_offset = (sizeof("xx xx xx ") - 2);
+	memcpy(par, buf + par_offset, 256 - par_offset);
+
+	if (op != 'g' && op != 's') {
+		PSB_DEBUG_ENTRY("The input format is not right!\n");
+		PSB_DEBUG_ENTRY(
+			"sg: send generic. sm: send mcs. gg: get state\n");
+		PSB_DEBUG_ENTRY(
+			"gg  cmd count (gg a 01 :get panel status.)\n");
+		PSB_DEBUG_ENTRY(
+			"sg  cmd count par (sg 2c 00:set write_mem_start.)\n");
+		PSB_DEBUG_ENTRY(
+			"sm  00  count cmd+par(sm 00 01 28:set display on)\n");
+		return -EINVAL;
+	}
+	PSB_DEBUG_ENTRY("op= %c type= %c cmd=%x pnum=%x\n",
+			op, type, cmd, pnum);
+	PSB_DEBUG_ENTRY("par =%s", par);
+
+	if (op == 'g' && pnum == 0) {
+		PSB_DEBUG_ENTRY("get status must has parameter count!");
+		sprintf(dev_priv->buf,
+			"get status must has parameter count!\n");
+		return -EINVAL;
+	}
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				       OSPM_UHB_FORCE_POWER_ON)) {
+		PSB_DEBUG_ENTRY("Display controller can not power on.!\n");
+		return -EPERM;
+	}
+	/*forbid dsr which will restore regs*/
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	if (op == 'g' && pnum <= GENERIC_READ_FIFO_SIZE_MAX) {
+		pdata = kmalloc(sizeof(u8)*pnum, GFP_KERNEL);
+		if (!pdata) {
+			DRM_ERROR("No memory for long_pkg data\n");
+			ret = -ENOMEM;
+			goto fun_exit;
+		}
+		ret = mdfld_dsi_get_panel_status(dsi_config, cmd,
+				pdata , MDFLD_DSI_LP_TRANSMISSION, pnum);
+		if (ret == pnum && ret != 0) {
+			PSB_DEBUG_ENTRY("read panel status\n");
+			PSB_DEBUG_ENTRY("cmd : 0x%02x\n", cmd);
+			add_size = sizeof("cmd : 0xFF\n");
+			if (dev_priv->buf && (dev_priv->count + add_size)
+					 < PSB_REG_PRINT_SIZE)
+				dev_priv->count += sprintf(
+						dev_priv->buf + dev_priv->count,
+						"cmd : 0x%02x\n", cmd);
+			for (i = 0; i < pnum; i++) {
+				PSB_DEBUG_ENTRY("par%d= 0x%02x\n", i, pdata[i]);
+				add_size = sizeof("par1=0xFF 0xFF\n");
+			  if (dev_priv->buf && (dev_priv->count + add_size)
+						 < PSB_REG_PRINT_SIZE)
+					dev_priv->count += sprintf(
+					    dev_priv->buf + dev_priv->count,
+					   "par%d= 0x%02x\n", i, pdata[i]);
+			}
+		} else {
+			PSB_DEBUG_ENTRY("get panel status fail\n");
+			sprintf(dev_priv->buf, "get panel status fail\n");
+		}
+
+		kfree(pdata);
+	}
+	if (op == 's' && pnum <= GENERIC_READ_FIFO_SIZE_MAX) {
+		struct mdfld_dsi_pkg_sender *sender =
+				 mdfld_dsi_get_pkg_sender(dsi_config);
+		if (!sender) {
+			DRM_ERROR("Invalid sender\n");
+			return -EINVAL;
+		}
+		pdata = kmalloc(sizeof(u8)*pnum, GFP_KERNEL);
+		if (!pdata) {
+			DRM_ERROR("No memory for long_pkg data\n");
+			ret = -ENOMEM;
+			goto fun_exit;
+		}
+		for (i = 0; i < pnum; i++)
+			sscanf(par + i * 3, "%x", &pdata[i]);
+
+		if (cmd == 0 && pnum != 0) {
+			if (type == 'g')
+				ret = mdfld_dsi_send_gen_long_hs(
+						sender, pdata, pnum, 0);
+			else if (type == 'm')
+				ret = mdfld_dsi_send_mcs_long_hs(
+						sender, pdata, pnum, 0);
+		}
+		else {
+			if (cmd == 0x2c)
+				atomic64_inc(&sender->te_seq);
+			ret = mdfld_dsi_send_dcs(sender,
+					cmd,
+					pdata,
+					pnum,
+					CMD_DATA_SRC_SYSTEM_MEM,
+					MDFLD_DSI_SEND_PACKAGE);
+		}
+		if (ret) {
+			PSB_DEBUG_ENTRY("set panel status fail!\n");
+			sprintf(dev_priv->buf, "set panel status fail!\n");
+		} else {
+			PSB_DEBUG_ENTRY("set panel status ok!\n");
+			sprintf(dev_priv->buf, "set panel status ok\n");
+		}
+		kfree(pdata);
+	}
+fun_exit:
+	/*allow entering dsr*/
+	mdfld_dsi_dsr_allow(dsi_config);
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	return count;
+}
+
+static int psb_display_register_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	char msg[PSB_REG_PRINT_SIZE];
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	/*do nothing*/
+
+	if (dev_priv->buf && dev_priv->count < PSB_REG_PRINT_SIZE)
+		memcpy(msg, dev_priv->buf, dev_priv->count);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, dev_priv->count);
+}
+/*
+* use to read and write display register. and print to standard output.
+*/
+static int psb_display_register_write(struct file *file, const char *buffer,
+				      size_t count, loff_t *ppos)
+{
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	int reg_val = 0;
+	char buf[256];
+	char op = '0';
+	int  reg = 0, start = 0, end = 0;
+	unsigned int  val = 0;
+	int  len = 0;
+	int  Offset = 0;
+	int  add_size = 0;
+	int  ret = 0;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	dev_priv->count = 0;
+	memset(buf, '\0', sizeof(buf));
+
+	if (count > sizeof(buf)) {
+		PSB_DEBUG_ENTRY("The input is too bigger, kernel can not handle.\n");
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		PSB_DEBUG_ENTRY("input = %s", buf);
+	}
+
+	sscanf(buf, "%c%x%x", &op, &reg, &val);
+
+	if (op != 'r' && op != 'w' && op != 'a') {
+		PSB_DEBUG_ENTRY("The input format is not right!\n");
+		PSB_DEBUG_ENTRY("for exampe: r 70184		(read register 70184.)\n");
+		PSB_DEBUG_ENTRY("for exampe: w 70184 123	(write register 70184 with value 123.)\n");
+		PSB_DEBUG_ENTRY("for exmape: a 60000 60010(read all registers start at 60000 and end at 60010.\n)");
+		return -EINVAL;
+	}
+	if ((reg < 0xa000 || reg >  0x720ff) && (reg < 0x40 || reg >  0x64)) {
+		PSB_DEBUG_ENTRY("the register is out of display controller registers rang.\n");
+		return -EINVAL;
+	}
+
+	if ((reg % 0x4) != 0) {
+		PSB_DEBUG_ENTRY("the register address should aligned to 4 byte.please refrence display controller specification.\n");
+		return -EINVAL;
+	}
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				       OSPM_UHB_FORCE_POWER_ON)) {
+		PSB_DEBUG_ENTRY("Display controller can not power on.!\n");
+		return -EPERM;
+	}
+	/*forbid dsr which will restore regs*/
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	if (op == 'r') {
+		if (reg >= 0xa000) {
+			reg_val = REG_READ(reg);
+			PSB_DEBUG_ENTRY("Read :reg=0x%08x , val=0x%08x.\n", reg, reg_val);
+		} else {
+			reg_val = SGX_REG_READ(reg);
+			PSB_DEBUG_ENTRY("SGX Read :reg=0x%08x , val=0x%08x.\n", reg, reg_val);
+		}
+		add_size = sizeof("0xFFFFFFFF 0xFFFFFFFF\n");
+		if (dev_priv->buf &&
+			(dev_priv->count + add_size) < PSB_REG_PRINT_SIZE)
+			dev_priv->count = sprintf(dev_priv->buf, "%08x %08x\n", reg, reg_val);
+	}
+	if (op == 'w') {
+		if (reg >= 0xa000) {
+			reg_val = REG_READ(reg);
+			PSB_DEBUG_ENTRY("Before change:reg=0x%08x , val=0x%08x.\n", reg, reg_val);
+			REG_WRITE(reg, val);
+			reg_val = REG_READ(reg);
+			PSB_DEBUG_ENTRY("After change:reg=0x%08x , val=0x%08x.\n", reg, reg_val);
+		} else {
+			reg_val = SGX_REG_READ(reg);
+			PSB_DEBUG_ENTRY("Before change: sgx reg=0x%08x , val=0x%08x.\n", reg, reg_val);
+			SGX_REG_WRITE(reg, val);
+			reg_val = SGX_REG_READ(reg);
+			PSB_DEBUG_ENTRY("After change:sgx reg=0x%08x , val=0x%08x.\n", reg, reg_val);
+		}
+	}
+
+	if (op == 'a') {
+		start = reg;
+		end = val;
+		PSB_DEBUG_ENTRY("start:0x%08x\n", start);
+		PSB_DEBUG_ENTRY("end:  0x%08x\n", end);
+		if ((start % 0x4) != 0) {
+			PSB_DEBUG_ENTRY("The start address should be 4 byte aligned. Please reference the display controller specification.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+
+		if ((end % 0x4) != 0) {
+			PSB_DEBUG_ENTRY("The end address should be 4 byte aligned. Please reference the display controller specification.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+
+		len = end - start + 1;
+		if (len <= 0)
+			PSB_DEBUG_ENTRY("The end address should be greater than the start address.\n");
+
+		if (end < 0xa000 || end >  0x720ff) {
+			PSB_DEBUG_ENTRY("The end address is out of the display controller register range.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+
+		if (start < 0xa000 || start >  0x720ff)	{
+			PSB_DEBUG_ENTRY("The start address is out of the display controller register range.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+		for (Offset = start ; Offset < end; Offset = Offset + 0x10) {
+			if (reg >= 0xa000) {
+				PSB_DEBUG_ENTRY("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					Offset,
+					REG_READ(Offset + 0x0),
+					REG_READ(Offset + 0x4),
+					REG_READ(Offset + 0x8),
+					REG_READ(Offset + 0xc));
+
+			add_size = 5 * sizeof("0xFFFFFFFF ");
+			if (dev_priv->buf &&
+				(dev_priv->count + add_size) < PSB_REG_PRINT_SIZE)
+				dev_priv->count += sprintf(dev_priv->buf + dev_priv->count,
+					"%08x %08x %08x %08x %08x\n",
+					Offset,
+					REG_READ(Offset + 0x0),
+					REG_READ(Offset + 0x4),
+					REG_READ(Offset + 0x8),
+					REG_READ(Offset + 0xc));
+			} else {
+				PSB_DEBUG_ENTRY("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					Offset,
+					SGX_REG_READ(Offset + 0x0),
+					SGX_REG_READ(Offset + 0x4),
+					SGX_REG_READ(Offset + 0x8),
+					SGX_REG_READ(Offset + 0xc));
+
+				add_size = 5 * sizeof("0xFFFFFFFF ");
+				if (dev_priv->buf &&
+					(dev_priv->count + add_size) < PSB_REG_PRINT_SIZE)
+					dev_priv->count += sprintf(dev_priv->buf + dev_priv->count,
+					"%08x %08x %08x %08x %08x\n",
+					Offset,
+					SGX_REG_READ(Offset + 0x0),
+					SGX_REG_READ(Offset + 0x4),
+					SGX_REG_READ(Offset + 0x8),
+					SGX_REG_READ(Offset + 0xc));
+			}
+
+		}
+	}
+fun_exit:
+	/*allow entering dsr*/
+	mdfld_dsi_dsr_allow(dsi_config);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	return count;
+}
+
+static int csc_control_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	return 0;
+}
+
+static int csc_control_write(struct file *file, const char *buffer,
+				      size_t count, loff_t *ppos)
+{
+	char buf[2];
+	int  csc_control;
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct csc_setting csc;
+	struct gamma_setting gamma;
+
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		csc_control = buf[0] - '0';
+		PSB_DEBUG_ENTRY(" csc control: %d\n", csc_control);
+
+		switch (csc_control) {
+		case 0x0:
+			csc.pipe = 0;
+			csc.type = CSC_REG_SETTING;
+			csc.enable_state = true;
+			csc.data_len = CSC_REG_COUNT;
+			memcpy(csc.data.csc_reg_data, csc_setting, sizeof(csc.data.csc_reg_data));
+			mdfld_intel_crtc_set_color_conversion(dev, &csc);
+			break;
+		case 0x1:
+			gamma.pipe = 0;
+			gamma.type = GAMMA_REG_SETTING;
+			gamma.enable_state = true;
+			gamma.data_len = GAMMA_10_BIT_TABLE_COUNT;
+			memcpy(gamma.gamma_tableX100, gamma_setting, sizeof(gamma.gamma_tableX100));
+			mdfld_intel_crtc_set_gamma(dev, &gamma);
+			break;
+		default:
+			printk("invalied parameters\n");
+		}
+	}
+	return count;
+}
+
+#ifdef CONFIG_SUPPORT_HDMI
+int gpio_control_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	unsigned int value = 0;
+	unsigned int pin_num = otm_hdmi_get_hpd_pin();
+	if (pin_num)
+		value = gpio_get_value(pin_num);
+
+	printk(KERN_ALERT "read pin_num: %8d value:%8d\n", pin_num, value);
+	return 0;
+}
+
+int gpio_control_write(struct file *file, const char *buffer,
+				      size_t count, loff_t *ppos)
+{
+	char buf[2];
+	int  gpio_control;
+	int result = 0;
+	unsigned int pin_num = otm_hdmi_get_hpd_pin();
+	bool auto_state = drm_hdmi_hpd_auto;
+
+	if (!pin_num)
+		return -EINVAL;
+
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		gpio_control = buf[0] - '0';
+
+		printk(KERN_ALERT "GPIO set pin:%8d\n", pin_num);
+		printk(KERN_ALERT "value:%8d\n", gpio_control);
+
+		switch (gpio_control) {
+		case 0x0:
+			result = gpio_direction_output(pin_num, 0);
+			otm_hdmi_override_cable_status(false, auto_state);
+			if (result) {
+				printk(KERN_ALERT "Failed set GPIO as output\n");
+				return -EINVAL;
+			}
+			break;
+		case 0x1:
+			result = gpio_direction_output(pin_num, 0);
+			otm_hdmi_override_cable_status(true, auto_state);
+			if (result) {
+				printk(KERN_ALERT "Failed set GPIO as output\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			printk(KERN_ALERT "invalied parameters\n");
+		}
+
+		result = gpio_direction_input(pin_num);
+		if (result) {
+			printk(KERN_ALERT "Failed set GPIO as input\n");
+			return -EINVAL;
+		}
+
+	}
+	return count;
+}
+#endif
+
+static const struct file_operations psb_gpio_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = gpio_control_read,
+       .write = gpio_control_write,
+};
+
+static const struct file_operations psb_ospm_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = psb_ospm_read,
+       .write = psb_ospm_write,
+};
+
+static const struct file_operations psb_rtpm_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = psb_rtpm_read,
+       .write = psb_rtpm_write,
+};
+
+static const struct file_operations psb_display_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = psb_display_register_read,
+       .write = psb_display_register_write,
+};
+
+static const struct file_operations psb_panel_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = psb_panel_register_read,
+       .write = psb_panel_register_write,
+};
+
+static const struct file_operations psb_csc_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = csc_control_read,
+       .write = csc_control_write,
+ };
+
+#ifdef CONFIG_SUPPORT_HDMI
+static int psb_hdmi_proc_init(struct drm_minor *minor)
+{
+	struct proc_dir_entry *gpio_setting;
+        gpio_setting = proc_create_data(GPIO_PROC_ENTRY,
+                                0644, minor->proc_root, &psb_gpio_proc_fops, minor);
+
+	if (!gpio_setting)
+		return -1;
+
+	return 0;
+}
+#endif
+
+static int psb_proc_init(struct drm_minor *minor)
+{
+	struct proc_dir_entry *ent;
+	struct proc_dir_entry *ent1;
+	struct proc_dir_entry *rtpm;
+	struct proc_dir_entry *ent_display_status;
+	struct proc_dir_entry *ent_panel_status;
+	struct proc_dir_entry *csc_setting;
+
+        ent = proc_create_data(OSPM_PROC_ENTRY, 0644, minor->proc_root, &psb_ospm_proc_fops, minor);
+        rtpm = proc_create_data(RTPM_PROC_ENTRY, 0644, minor->proc_root, &psb_rtpm_proc_fops, minor);
+        ent_display_status = proc_create_data(DISPLAY_PROC_ENTRY, 0644, minor->proc_root,
+                 &psb_display_proc_fops, minor);
+        ent_panel_status = proc_create_data(PANEL_PROC_ENTRY, 0644, minor->proc_root,
+                &psb_panel_proc_fops, minor);
+        ent1 = proc_create_data(BLC_PROC_ENTRY, 0, minor->proc_root, &psb_blc_proc_fops, minor);
+        csc_setting = proc_create_data(CSC_PROC_ENTRY, 0644, minor->proc_root, &psb_csc_proc_fops, minor);
+
+	if (!ent || !ent1 || !rtpm || !ent_display_status || !ent_panel_status
+		|| !csc_setting)
+		return -1;
+
+#ifdef CONFIG_SUPPORT_HDMI
+	psb_hdmi_proc_init(minor);
+#endif
+
+	return 0;
+}
+
+static void psb_proc_cleanup(struct drm_minor *minor)
+{
+	remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root);
+	remove_proc_entry(RTPM_PROC_ENTRY, minor->proc_root);
+	remove_proc_entry(BLC_PROC_ENTRY, minor->proc_root);
+#ifdef CONFIG_SUPPORT_HDMI
+	remove_proc_entry(GPIO_PROC_ENTRY, minor->proc_root);
+#endif
+	return;
+}
+#endif /* DISPLAY_DRIVER_DEBUG_INTERFACE */
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+	.runtime_suspend = psb_runtime_suspend,
+	.runtime_resume = psb_runtime_resume,
+	.runtime_idle = psb_runtime_idle,
+	.suspend = psb_runtime_suspend,
+	.resume = psb_runtime_resume,
+};
+
+static struct vm_operations_struct psb_ttm_vm_ops;
+
+/**
+ * NOTE: driver_private of drm_file is now a PVRSRV_FILE_PRIVATE_DATA struct
+ * pPriv in PVRSRV_FILE_PRIVATE_DATA contains the original psb_fpriv;
+ */
+int psb_open(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv;
+	struct drm_psb_private *dev_priv;
+	struct psb_fpriv *psb_fp;
+	PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	ret = drm_open(inode, filp);
+	if (unlikely(ret))
+		return ret;
+
+	psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
+
+	if (unlikely(psb_fp == NULL))
+		goto out_err0;
+
+	file_priv = (struct drm_file *) filp->private_data;
+
+	/* In case that the local file priv has created a master,
+	 * which has been referenced, even if it's not authenticated
+	 * (non-root user). */
+	if ((file_priv->minor->master)
+		&& (file_priv->master == file_priv->minor->master)
+		&& (!file_priv->is_master))
+		file_priv->is_master = 1;
+
+	dev_priv = psb_priv(file_priv->minor->dev);
+
+	DRM_DEBUG("is_master %d\n", file_priv->is_master ? 1 : 0);
+
+	psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
+					     PSB_FILE_OBJECT_HASH_ORDER);
+	if (unlikely(psb_fp->tfile == NULL))
+		goto out_err1;
+
+	pvr_file_priv = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
+	if (!pvr_file_priv) {
+		DRM_ERROR("drm file private is NULL\n");
+		goto out_err1;
+	}
+
+	pvr_file_priv->pPriv = psb_fp;
+	if (unlikely(dev_priv->bdev.dev_mapping == NULL))
+		dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
+
+	return 0;
+
+out_err1:
+	kfree(psb_fp);
+out_err0:
+	(void) drm_release(inode, filp);
+	return ret;
+}
+
+int psb_release(struct inode *inode, struct file *filp)
+{
+	struct psb_fpriv *psb_fp;
+	struct drm_psb_private *dev_priv;
+	struct msvdx_private *msvdx_priv;
+	int ret, island_is_on;
+	struct drm_file *file_priv = (struct drm_file *) filp->private_data;
+#ifdef CONFIG_VIDEO_MRFLD
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+	int i;
+	struct psb_msvdx_ec_ctx *ec_ctx;
+#endif
+	struct mdfld_dsi_config *dsi_config;
+	psb_fp = psb_fpriv(file_priv);
+	dev_priv = psb_priv(file_priv->minor->dev);
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+
+	msvdx_priv = (struct msvdx_private *)dev_priv->msvdx_private;
+
+#ifdef CONFIG_VIDEO_MRFLD
+	/*cleanup for msvdx*/
+#if 0
+	if (msvdx_priv->tfile == psb_fpriv(file_priv)->tfile) {
+		msvdx_priv->decoding_err = 0;
+		msvdx_priv->host_be_opp_enabled = 0;
+		memset(&msvdx_priv->frame_info, 0, sizeof(struct drm_psb_msvdx_frame_info) * MAX_DECODE_BUFFERS);
+	}
+#endif
+
+	if (msvdx_priv->msvdx_ec_ctx[0] != NULL) {
+		for (i = 0; i < PSB_MAX_EC_INSTANCE; i++) {
+			if (msvdx_priv->msvdx_ec_ctx[i]->tfile == tfile)
+				break;
+		}
+
+		if (i < PSB_MAX_EC_INSTANCE) {
+			ec_ctx = msvdx_priv->msvdx_ec_ctx[i];
+			printk(KERN_DEBUG "remove ec ctx with tfile 0x%08x\n",
+			       ec_ctx->tfile);
+			ec_ctx->tfile = NULL;
+			ec_ctx->fence = PSB_MSVDX_INVALID_FENCE;
+		}
+	}
+#endif
+
+	ttm_object_file_release(&psb_fp->tfile);
+#endif
+
+	if (psb_fp->dsr_blocked) {
+		dsi_config = dev_priv->dsi_configs[0];
+		mdfld_dsi_dsr_allow(dsi_config);
+	}
+
+	kfree(psb_fp);
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	/* remove video context */
+	psb_remove_videoctx(dev_priv, filp);
+
+#ifdef PSB_DRAM_SELF_REFRESH
+	/* FIXME: workaround for MRST HSD3469585
+	 *        re-enable DRAM Self Refresh Mode
+	 *        by setting DUNIT.DPMC0
+	 */
+	int ui32_reg_value = intel_mid_msgbus_read32_raw((0xD0 << 24) |
+		(0x1 << 16) | (0x4 << 8) | 0xF0);
+	intel_mid_msgbus_write32_raw((0xE0 << 24) | (0x1 << 16) |
+		(0x4 << 8) | 0xF0, ui32_reg_value | (0x1 << 7));
+#endif
+	if (IS_MDFLD(dev_priv->dev)) {
+		struct pnw_topaz_private *topaz_priv =
+			(struct pnw_topaz_private *)dev_priv->topaz_private;
+		if (drm_topaz_pmpolicy == PSB_PMPOLICY_POWERDOWN)
+			schedule_delayed_work(&topaz_priv->topaz_suspend_wq,
+						msecs_to_jiffies(10));
+	}
+
+	island_is_on = ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND);
+
+	if ((drm_msvdx_pmpolicy == PSB_PMPOLICY_POWERDOWN) && island_is_on) {
+		PSB_DEBUG_PM("MSVDX: psb_release schedule msvdx suspend.\n");
+		schedule_delayed_work(&msvdx_priv->msvdx_suspend_wq,
+					msecs_to_jiffies(10));
+	}
+#endif
+	ret = drm_release(inode, filp);
+
+	return ret;
+}
+
+/**
+ * if vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET call directly to PVRMMap
+ */
+int psb_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct drm_psb_private *dev_priv;
+	int ret;
+
+	file_priv = (struct drm_file *) filp->private_data;
+	dev_priv = psb_priv(file_priv->minor->dev);
+
+	if (vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET ||
+			vma->vm_pgoff > 2 * DRM_PSB_FILE_PAGE_OFFSET) {
+		BUG_ON(!dev_priv->pvr_ops);
+		return dev_priv->pvr_ops->PVRMMap(filp, vma);
+	}
+
+	ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
+		dev_priv->ttm_vm_ops = (struct vm_operations_struct *)vma->vm_ops;
+		psb_ttm_vm_ops = *vma->vm_ops;
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+		psb_ttm_vm_ops.fault = &psb_ttm_fault;
+#endif
+	}
+
+	vma->vm_ops = &psb_ttm_vm_ops;
+
+	return 0;
+}
+
+static const struct file_operations driver_psb_fops = {
+	.owner = THIS_MODULE,
+	.open = psb_open,
+	.release = psb_release,
+	.unlocked_ioctl = psb_unlocked_ioctl,
+	.mmap = psb_mmap,
+	.poll = psb_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+	DRIVER_IRQ_VBL | DRIVER_MODESET,
+	.load = psb_driver_load,
+	.unload = psb_driver_unload,
+
+	.ioctls = psb_ioctls,
+	.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+	.device_is_agp = psb_driver_device_is_agp,
+	.irq_preinstall = psb_irq_preinstall,
+	.irq_postinstall = psb_irq_postinstall,
+	.irq_uninstall = psb_irq_uninstall,
+	.irq_handler = psb_irq_handler,
+	.enable_vblank = psb_enable_vblank,
+	.disable_vblank = psb_disable_vblank,
+	.get_vblank_counter = psb_get_vblank_counter,
+	.firstopen = NULL,
+	.lastclose = psb_lastclose,
+	.open = psb_driver_open,
+	.postclose = PVRSRVDrmPostClose2,
+#ifdef DISPLAY_DRIVER_DEBUG_INTERFACE
+	.debugfs_init = psb_proc_init,
+	.debugfs_cleanup = psb_proc_cleanup,
+#else
+	.debugfs_init = NULL,
+	.debugfs_cleanup = NULL,
+#endif
+	.preclose = psb_driver_preclose,
+	.fops = &driver_psb_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = PSB_DRM_DRIVER_DATE,
+	.major = PSB_DRM_DRIVER_MAJOR,
+	.minor = PSB_DRM_DRIVER_MINOR,
+	.patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = psb_probe,
+	.remove = psb_remove,
+#ifdef CONFIG_PM
+	.driver.pm = &psb_pm_ops,
+#endif
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+#ifndef MODULE
+static __init int parse_panelid(char *arg)
+{
+	/* panel ID can be passed in as a cmdline parameter */
+	/* to enable this feature add panelid=TMD to cmdline for TMD panel*/
+	if (!arg)
+		return -EINVAL;
+
+	return 0;
+}
+early_param("panelid", parse_panelid);
+#endif
+
+#ifndef MODULE
+static __init int parse_hdmi_edid(char *arg)
+{
+	/* HDMI EDID info can be passed in as a cmdline parameter,
+	 * and need to remove it after we can get EDID info via MSIC.*/
+	if ((!arg) || (strlen(arg) >= HDMI_MONITOR_NAME_LENGTH))
+		return -EINVAL;
+
+	strncpy(HDMI_EDID, arg, HDMI_MONITOR_NAME_LENGTH - 1);
+
+	return 0;
+}
+early_param("hdmi_edid", parse_hdmi_edid);
+#endif
+
+static int __init psb_init(void)
+{
+	int ret;
+
+#if defined(MODULE) && defined(CONFIG_NET)
+#ifdef CONFIG_SUPPORT_HDMI
+	psb_kobject_uevent_init();
+#endif
+#endif
+
+#if 0
+	/* delay this until PVRSRVDrmLoad is to be loaded */
+	ret = SYSPVRInit();
+	if (ret != 0) {
+		return ret;
+	}
+#endif
+
+	ret = drm_pci_init(&driver, &psb_pci_driver);
+	if (ret != 0) {
+		return ret;
+	}
+
+#ifdef CONFIG_SUPPORT_HDMI
+	if (gpDrmDevice) {
+		struct drm_psb_private *dev_priv = NULL;
+		dev_priv = (struct drm_psb_private *)gpDrmDevice->dev_private;
+		if (dev_priv)
+			otm_hdmi_hpd_init();
+	}
+#endif
+
+#ifdef CONFIG_SUPPORT_MIPI_H8C7_CMD_DISPLAY
+	ret = platform_driver_register(&h8c7_lcd_driver);
+	if (ret != 0) {
+		return ret;
+	}
+#endif
+
+#ifdef CONFIG_SUPPORT_VB_MIPI_DISPLAY
+	ret = platform_driver_register(&vb_lcd_driver);
+	if (ret != 0) {
+		return ret;
+	}
+#endif
+
+#ifdef CONFIG_R63311_MIPI_VIDEO_MODE
+	ret = platform_driver_register(&jdi_r63311_lcd_driver);
+	if (ret != 0) {
+		return ret;
+	}
+#endif
+
+#ifdef CONFIG_SUPPORT_TMD_MIPI_600X1024_DISPLAY
+	ret = platform_driver_register(&tmd_lcd_driver);
+#endif
+
+	return ret;
+}
+
+static void __exit psb_exit(void)
+{
+#ifdef CONFIG_SUPPORT_HDMI
+	if (gpDrmDevice) {
+		struct drm_psb_private *dev_priv = NULL;
+		dev_priv = (struct drm_psb_private *)gpDrmDevice->dev_private;
+		if (dev_priv)
+			otm_hdmi_hpd_deinit();
+	}
+#endif
+	drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+module_init(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/intel_media/common/psb_drv.h b/drivers/external_drivers/intel_media/common/psb_drv.h
new file mode 100644
index 0000000..bc42c9c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_drv.h
@@ -0,0 +1,1382 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include "sys_pvr_drm_export.h"
+#include "psb_drm.h"
+#include "psb_reg.h"
+#include "psb_dpst.h"
+#include "psb_gtt.h"
+#include "psb_powermgmt.h"
+#include "ttm/ttm_object.h"
+#include "psb_ttm_fence_driver.h"
+#include "psb_ttm_userobj_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_lock.h"
+#include "psb_intel_display.h"
+#include "psb_video_drv.h"
+
+/*IMG headers*/
+#include "private_data.h"
+#include "pvr_drm.h"
+
+#include "mdfld_hdmi_audio_if.h"
+
+/*Append new drm mode definition here, align with libdrm definition*/
+#define DRM_MODE_SCALE_NO_SCALE   4
+
+extern struct drm_device *gpDrmDevice;
+
+/* sys interface variables */
+extern bool gbdispstatus;
+extern int drm_psb_debug;
+extern int drm_psb_enable_cabc ;
+extern int gfxrtdelay;
+extern int drm_psb_te_timer_delay;
+extern int drm_psb_enable_gamma;
+extern int drm_psb_adjust_contrast;
+extern int drm_psb_adjust_brightness;
+extern int drm_psb_enable_color_conversion;
+extern u32 DISP_PLANEB_STATUS;
+extern int drm_psb_use_cases_control;
+extern int dpst_level;
+
+
+extern struct ttm_bo_driver psb_ttm_bo_driver;
+
+enum {
+	CHIP_PSB_8108 = 0,
+	CHIP_PSB_8109 = 1,
+	CHIP_MRST_4100 = 2,
+	CHIP_MDFLD_0130 = 3
+};
+
+#define PNW_GCT_NDX_OEM		0
+#define PNW_GCT_NDX_STD		1
+#define PNW_GCT_NDX_TMD		2
+#define PNW_GCT_NDX_TPO		3
+
+#define CLV_GCT_NDX_DEFAULT		0
+#define CLV_GCT_NDX_OEM		1
+#define CLV_GCT_NDX_STD		2
+
+#define PCI_ID_TOPAZ_DISABLED 0x4101
+
+/*
+ *Hardware bugfixes
+ */
+#define FIX_TG_16
+#define FIX_TG_2D_CLOCKGATE
+#define OSPM_STAT
+
+#define DRIVER_NAME "pvrsrvkm"
+#define DRIVER_DESC "drm driver for the Intel GMA500"
+#define DRIVER_AUTHOR "Intel Corporation"
+#define OSPM_PROC_ENTRY "ospm"
+#define RTPM_PROC_ENTRY "rtpm"
+#define BLC_PROC_ENTRY "mrst_blc"
+#define DISPLAY_PROC_ENTRY "display_status"
+#define PANEL_PROC_ENTRY "panel_status"
+#define CSC_PROC_ENTRY "csc_control"
+#define GPIO_PROC_ENTRY "hdmi_gpio_control"
+
+
+#define PSB_DRM_DRIVER_DATE "2009-03-10"
+#define PSB_DRM_DRIVER_MAJOR 8
+#define PSB_DRM_DRIVER_MINOR 1
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+#define PSB_VDC_OFFSET		 0x00000000
+#define PSB_VDC_SIZE		 0x000080000
+#define MRST_MMIO_SIZE		 0x0000C0000
+#define MDFLD_MMIO_SIZE          0x000100000
+#define PSB_SGX_SIZE		 0x8000
+#define PSB_SGX_OFFSET		 0x00040000
+#ifdef CONFIG_MDFD_GL3
+#define MDFLD_GL3_OFFSET	 0x00000000
+#define MDFLD_GL3_SIZE		 0x00040000
+#endif
+#define MRST_SGX_OFFSET		 0x00080000
+#define PSB_MMIO_RESOURCE	 0
+#define PSB_GATT_RESOURCE	 2
+#define PSB_GTT_RESOURCE	 3
+#define PSB_GMCH_CTRL		 0x52
+#define PSB_BSM			 0x5C
+#define _PSB_GMCH_ENABLED	 0x4
+#define PSB_PGETBL_CTL		 0x2020
+#define _PSB_PGETBL_ENABLED	 0x00000001
+#define PSB_SGX_2D_SLAVE_PORT	 0x4000
+
+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
+#define MRST_MSVDX_OFFSET	0x90000	/*MSVDX Base offset */
+#define PSB_MSVDX_SIZE		0x10000
+
+#define PSB_IED_DRM_CNTL_STATUS		0x2208
+#define IED_DRM_VLD			(1<<0)
+
+#define LNC_TOPAZ_OFFSET	0xA0000
+#define PNW_TOPAZ_OFFSET	0xC0000
+#define PNW_GL3_OFFSET		0xB0000
+#define LNC_TOPAZ_SIZE		0x10000
+#define PNW_TOPAZ_SIZE		0x30000 /* PNW VXE285 has two cores */
+
+#define PSB_MMU_CACHED_MEMORY	  0x0001	/* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY	  0x0002	/* MMU RO memory */
+#define PSB_MMU_WO_MEMORY	  0x0004	/* MMU WO memory */
+
+/*
+ *PTE's and PDE's
+ */
+
+#define PSB_PDE_MASK		  0x003FFFFF
+#define PSB_PDE_SHIFT		  22
+#define PSB_PTE_SHIFT		  12
+
+#define PSB_PTE_VALID		  0x0001	/* PTE / PDE valid */
+#define PSB_PTE_WO		  0x0002	/* Write only */
+#define PSB_PTE_RO		  0x0004	/* Read only */
+#define PSB_PTE_CACHED		  0x0008	/* CPU cache coherent */
+
+/*
+ *VDC registers and bits
+ */
+#define PSB_GFX_CLOCKGATING	  0x2060
+#define PSB_MSVDX_CLOCKGATING	  0x2064
+#define PSB_TOPAZ_CLOCKGATING	  0x2068
+#define PSB_HWSTAM		  0x2098
+#define PSB_INSTPM		  0x20C0
+#define PSB_INT_IDENTITY_R        0x20A4
+#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
+#define _PSB_DPST_PIPEB_FLAG      (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG	  (1<<5)
+#define _PSB_DPST_PIPEA_FLAG      (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG	  (1<<7)
+#define _MDFLD_MIPIA_FLAG	  (1<<16)
+#define _MDFLD_MIPIC_FLAG	  (1<<17)
+#define _PSB_IRQ_SGX_FLAG	  (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG	  (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG	  (1<<20)
+#ifdef CONFIG_MDFD_GL3
+#define _MDFLD_GL3_IRQ_FLAG	  (1<<21)
+#define _MDFLD_GL3_ECC_FLAG	  (1<<2)  /* unrecoverable ecc error.  We must flush and reset */
+#endif
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | _MDFLD_PIPEB_EVENT_FLAG | \
+	_PSB_PIPEA_EVENT_FLAG | _PSB_VSYNC_PIPEA_FLAG | _MDFLD_MIPIA_FLAG | _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R	  0x20A4
+#define PSB_INT_MASK_R		  0x20A8
+#define PSB_INT_ENABLE_R	  0x20A0
+
+#define _PSB_MMU_ER_MASK      0x0001FF00
+#define _PSB_MMU_ER_HOST      (1 << 16)
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+#define GPIO_CLOCK_DIR_MASK		(1 << 0)
+#define GPIO_CLOCK_DIR_IN		(0 << 1)
+#define GPIO_CLOCK_DIR_OUT		(1 << 1)
+#define GPIO_CLOCK_VAL_MASK		(1 << 2)
+#define GPIO_CLOCK_VAL_OUT		(1 << 3)
+#define GPIO_CLOCK_VAL_IN		(1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+#define GPIO_DATA_DIR_MASK		(1 << 8)
+#define GPIO_DATA_DIR_IN		(0 << 9)
+#define GPIO_DATA_DIR_OUT		(1 << 9)
+#define GPIO_DATA_VAL_MASK		(1 << 10)
+#define GPIO_DATA_VAL_OUT		(1 << 11)
+#define GPIO_DATA_VAL_IN		(1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV	    0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST	       1
+#define PSB_UIRQ_OOM_REPLY	       2
+#define PSB_UIRQ_FIRE_TA_REPLY	       3
+#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+
+#define PSB_2D_SIZE (256*1024*1024)
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+#define PSB_2D_SIZE (256*1024*1024)
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_A0 0x00
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0 	BIT0
+#define MDFLD_DSR_2D_3D_2 	BIT1
+#define MDFLD_DSR_CURSOR_0 	BIT2
+#define MDFLD_DSR_CURSOR_2	BIT3
+#define MDFLD_DSR_OVERLAY_0 	BIT4
+#define MDFLD_DSR_OVERLAY_2 	BIT5
+#define MDFLD_DSR_MIPI_CONTROL	BIT6
+#define MDFLD_DSR_DAMAGE_MASK_0	(BIT0 | BIT2 | BIT4 | BIT6)
+#define MDFLD_DSR_DAMAGE_MASK_2	(BIT1 | BIT3 | BIT5 | BIT6)
+#define MDFLD_DSR_2D_3D 	(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR 45
+#define MDFLD_DPU_ENABLE BIT31
+#define MDFLD_DSR_FULLSCREEN BIT30
+#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON		1
+#define PSB_PWR_STATE_OFF		2
+
+#define PSB_PMPOLICY_NOPM		0
+#define PSB_PMPOLICY_CLOCKGATING	1
+#define PSB_PMPOLICY_POWERDOWN		2
+
+#define PSB_BOTTOM_HALF_WQ		1
+#define PSB_BOTTOM_HALF_TQ		2
+
+#define PSB_PMSTATE_POWERUP		0
+#define PSB_PMSTATE_CLOCKGATED		1
+#define PSB_PMSTATE_POWERDOWN		2
+#define PSB_PCIx_MSI_ADDR_LOC		0x94
+#define PSB_PCIx_MSI_DATA_LOC		0x98
+
+#define MDFLD_PLANE_MAX_WIDTH		2048
+#define MDFLD_PLANE_MAX_HEIGHT		2048
+#define PANEL_NAME_MAX_LEN	        16
+
+#define MAX_NUM 0xffffffff
+
+/*
+ *User options.
+ */
+
+struct drm_psb_uopt {
+	int pad; /*keep it here in case we use it in future*/
+};
+
+typedef int (*pfn_vsync_handler)(struct drm_device *dev, int pipe);
+typedef int(*pfn_screen_event_handler)(struct drm_device *psDrmDevice, int state);
+
+
+#define MODE_SETTING_IN_CRTC 	0x1
+#define MODE_SETTING_IN_ENCODER 0x2
+#define MODE_SETTING_ON_GOING 	0x3
+#define MODE_SETTING_IN_DSR 	0x4
+#define MODE_SETTING_ENCODER_DONE 0x8
+#define GCT_R10_HEADER_SIZE		16
+#define GCT_R10_DISPLAY_DESC_SIZE	28
+#define GCT_R11_HEADER_SIZE		16
+#define GCT_R11_DISPLAY_DESC_SIZE	44
+#define GCT_R20_HEADER_SIZE		16
+#define GCT_R20_DISPLAY_DESC_SIZE	48
+
+#define PSB_REG_PRINT_SIZE    40960
+
+struct psb_context;
+struct psb_validate_buffer;
+struct psb_video_ctx;
+
+/* PVR call back for display driver.
+ * Definition is copied from PVR code.
+ */
+struct gpu_pvr_ops {
+	IMG_BOOL (*PVRGetDisplayClassJTable)(PVRSRV_DC_DISP2SRV_KMJTABLE
+		*psJTable);
+#if defined(SUPPORT_DRI_DRM_EXT)
+	int (*SYSPVRServiceSGXInterrupt)(struct drm_device *dev);
+#endif
+	int (*PVRSRVDrmLoad)(struct drm_device *dev, unsigned long flags);
+	int (*SYSPVRInit)(void);
+	int (*PVRDRM_Dummy_ioctl)(struct drm_device *dev, void *arg,
+			struct drm_file *pFile);
+	int (*PVRMMap)(struct file *pFile, struct vm_area_struct *ps_vma);
+	void (*PVRSRVDrmPostClose)(struct drm_device *dev,
+			struct drm_file *file);
+	int (*PVRSRV_BridgeDispatchKM)(struct drm_device unref__ * dev,
+			void *arg, struct drm_file *pFile);
+	int (*PVRSRVOpen)(struct drm_device unref__ *dev,
+			struct drm_file *pFile);
+	int (*PVRDRMIsMaster)(struct drm_device *dev, void *arg,
+			struct drm_file *pFile);
+	int (*PVRDRMUnprivCmd)(struct drm_device *dev, void *arg,
+			struct drm_file *pFile);
+	int (*SYSPVRDBGDrivIoctl)(struct drm_device *dev, IMG_VOID *arg,
+			struct drm_file *pFile);
+	int (*PVRSRVDrmUnload)(struct drm_device *dev);
+	PVRSRV_PER_PROCESS_DATA *(*PVRSRVPerProcessData)(IMG_UINT32 ui32PID);
+#if defined (SUPPORT_SID_INTERFACE)
+	PVRSRV_ERROR (*PVRSRVLookupHandle)(PVRSRV_HANDLE_BASE *psBase,
+			IMG_PVOID *ppvData, IMG_SID hHandle,
+			PVRSRV_HANDLE_TYPE eType);
+#else
+	PVRSRV_ERROR (*PVRSRVLookupHandle)(PVRSRV_HANDLE_BASE *psBase,
+			IMG_PVOID *ppvData, IMG_HANDLE hHandle,
+			PVRSRV_HANDLE_TYPE eType);
+#endif
+	IMG_CPU_PHYADDR (*LinuxMemAreaToCpuPAddr)(LinuxMemArea *psLinuxMemArea,
+			IMG_UINT32 ui32ByteOffset);
+	PVRSRV_ERROR (*OSScheduleMISR2)(void);
+};
+
+struct platform_panel_info {
+	char name[PANEL_NAME_MAX_LEN+1];
+	int  mode;
+};
+
+struct drm_psb_private {
+	/*
+	 * DSI info.
+	 */
+	void *dbi_dsr_info;
+#ifdef CONFIG_MDFLD_DSI_DPU
+	void *dbi_dpu_info;
+#endif
+	struct mdfld_dsi_config *dsi_configs[2];
+
+	struct work_struct te_work;
+	struct work_struct reset_panel_work;
+
+	struct work_struct vsync_event_work;
+	int vsync_pipe;
+	wait_queue_head_t vsync_queue;
+	atomic_t *vblank_count;
+
+	/*
+	 *TTM Glue.
+	 */
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	struct drm_global_reference mem_global_ref;
+	struct ttm_bo_global_ref bo_global_ref;
+#endif
+	int has_global;
+
+	struct drm_device *dev;
+	struct ttm_object_device *tdev;
+	struct ttm_fence_device fdev;
+	struct ttm_bo_device bdev;
+	/* Todo: can remove due to no one call ttm_write_lock */
+	struct ttm_lock ttm_lock;
+	struct vm_operations_struct *ttm_vm_ops;
+	int has_fence_device;
+	int has_bo_device;
+
+	unsigned long chipset;
+
+	struct drm_psb_dev_info_arg dev_info;
+	struct drm_psb_uopt uopt;
+
+	struct psb_gtt *pg;
+
+	/*GTT Memory manager*/
+	struct psb_gtt_mm *gtt_mm;
+
+	struct page *scratch_page;
+	uint32_t sequence[PSB_NUM_ENGINES];
+	uint32_t last_sequence[PSB_NUM_ENGINES];
+	uint32_t last_submitted_seq[PSB_NUM_ENGINES];
+
+	struct psb_mmu_driver *mmu;
+	struct psb_mmu_pd *pf_pd;
+
+	uint8_t *sgx_reg;
+	uint8_t *vdc_reg;
+#ifdef CONFIG_MDFD_GL3
+	uint8_t *gl3_reg;
+#endif
+	uint32_t gatt_free_offset;
+
+	/* IMG video context */
+	struct list_head video_ctx;
+	spinlock_t video_ctx_lock;
+	/* Current video context */
+	struct psb_video_ctx *topaz_ctx;
+	/* previous vieo context */
+	struct psb_video_ctx *last_topaz_ctx;
+
+	/*
+	 *MSVDX
+	 */
+	uint8_t *msvdx_reg;
+	atomic_t msvdx_mmu_invaldc;
+	void *msvdx_private;
+
+	/*
+	 *TOPAZ
+	 */
+	uint8_t *topaz_reg;
+	void *topaz_private;
+	uint8_t topaz_disabled;
+	uint32_t video_device_fuse;
+	atomic_t topaz_mmu_invaldc;
+
+	/*
+	 *Fencing / irq.
+	 */
+
+	uint32_t vdc_irq_mask;
+	uint32_t pipestat[PSB_NUM_PIPE];
+	bool vblanksEnabledForFlips;
+
+	spinlock_t irqmask_lock;
+	spinlock_t sequence_lock;
+
+	/*
+	 *Modesetting
+	 */
+	struct psb_intel_mode_device mode_dev;
+
+	struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+	struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+	uint32_t num_pipe;
+
+	/*
+	 * CI share buffer
+	 */
+	unsigned int ci_region_start;
+	unsigned int ci_region_size;
+
+	/*
+	 * IMR share buffer;
+	 */
+	unsigned int imr_region_start;
+	unsigned int imr_region_size;
+
+	/*
+	 *Memory managers
+	 */
+
+	int have_imr;
+	int have_tt;
+	int have_mem_mmu;
+	int have_mem_mmu_tiling;
+
+	/*
+	 *Relocation buffer mapping.
+	 */
+
+	spinlock_t reloc_lock;
+	unsigned int rel_mapped_pages;
+	wait_queue_head_t rel_mapped_queue;
+
+	/*
+	 *SAREA
+	 */
+	struct drm_psb_sarea *sarea_priv;
+
+	/*
+	*OSPM info
+	*/
+	spinlock_t ospm_lock;
+	uint8_t panel_desc;
+	bool early_suspended;
+
+	/*
+	 * Sizes info
+	 */
+
+	struct drm_psb_sizes_arg sizes;
+
+	uint32_t fuse_reg_value;
+
+	/* vbt (gct) header information*/
+	struct intel_mid_vbt vbt_data;
+	/* info that is stored from the gct */
+	struct gct_ioctl_arg gct_data;
+
+	/* pci revision id for B0:D2:F0 */
+	uint8_t platform_rev_id;
+
+	/*
+	 *LVDS info
+	 */
+	struct drm_display_mode *panel_fixed_mode;
+
+	/* Feature bits from the VBIOS*/
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	int lvds_ssc_freq;
+	bool is_lvds_on;
+
+	/* MRST private date start */
+	unsigned int core_freq;
+	uint32_t iLVDS_enable;
+
+	/* pipe config register value */
+	uint32_t pipeconf;
+	uint32_t pipeconf1;
+	uint32_t pipeconf2;
+
+	/* plane control register value */
+	uint32_t dspcntr;
+	uint32_t dspcntr1;
+	uint32_t dspcntr2;
+
+	/* MRST_DSI private date start */
+	struct work_struct dsi_work;
+
+	/*
+	 *MRST DSI info
+	 */
+
+	/* The DPI panel power on */
+	bool dpi_panel_on;
+
+	/* The DPI display */
+	bool dpi;
+
+	/* Set if MIPI encoder wants to control plane/pipe */
+	bool dsi_plane_pipe_control;
+
+	/* status */
+	uint32_t videoModeFormat:2;
+	uint32_t laneCount:3;
+	uint32_t channelNumber:2;
+	uint32_t status_reserved:25;
+
+	/* dual display - DPI & DBI */
+	bool dual_display;
+
+	/* HS or LP transmission */
+	bool lp_transmission;
+
+	/* configuration phase */
+	bool config_phase;
+
+	/* first boot phase */
+	bool first_boot;
+
+	bool is_mipi_on;
+
+	/* DSI clock */
+	uint32_t RRate;
+	uint32_t DDR_Clock;
+	uint32_t DDR_Clock_Calculated;
+	uint32_t ClockBits;
+
+	/* DBI Buffer pointer */
+	u32 DBI_CB_phys;
+	u8 *p_DBI_commandBuffer;
+	uint32_t DBI_CB_pointer;
+	u8 *p_DBI_dataBuffer_orig;
+	u8 *p_DBI_dataBuffer;
+	uint32_t DBI_DB_pointer;
+
+	uint32_t bpp:5;
+
+	/* MDFLD_DSI private date start */
+	/* dual display - DPI & DBI */
+	bool dual_mipi;
+	uint32_t ksel;
+	uint32_t mipi_lane_config;
+	uint32_t mipi_ctrl_display;
+	/*
+	 *MRST DSI info
+	 */
+	/* The DPI panel power on */
+	bool dpi_panel_on2;
+
+	/* The DPI display */
+	bool dpi2;
+
+	/* status */
+	uint32_t videoModeFormat2:2;
+	uint32_t laneCount2:3;
+	uint32_t channelNumber2:2;
+	uint32_t status_reserved2:25;
+
+	/* HS or LP transmission */
+	bool lp_transmission2;
+
+	/* configuration phase */
+	bool config_phase2;
+
+	/* DSI clock */
+	uint32_t RRate2;
+	uint32_t DDR_Clock2;
+	uint32_t DDR_Clock_Calculated2;
+	uint32_t ClockBits2;
+
+	/* DBI Buffer pointer */
+	u32 DBI_CB_phys2;
+	u8 *p_DBI_commandBuffer2;
+	uint32_t DBI_CB_pointer2;
+	u8 *p_DBI_dataBuffer_orig2;
+	u8 *p_DBI_dataBuffer2;
+
+	/* DSI panel spec */
+	uint32_t pixelClock2;
+	uint32_t HsyncWidth2;
+	uint32_t HbackPorch2;
+	uint32_t HfrontPorch2;
+	uint32_t HactiveArea2;
+	uint32_t VsyncWidth2;
+	uint32_t VbackPorch2;
+	uint32_t VfrontPorch2;
+	uint32_t VactiveArea2;
+	uint32_t bpp2:5;
+	uint32_t Reserved2:27;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct mdfld_dsi_dbi_output *dbi_output2;
+	struct mdfld_dsi_dpi_output *dpi_output;
+	struct mdfld_dsi_dpi_output *dpi_output2;
+	/* MDFLD_DSI private date end */
+
+	/*runtime PM state*/
+	int rpm_enabled;
+
+	/*
+	 *Register state
+	 */
+	uint32_t saveDSPACNTR;
+	uint32_t saveDSPBCNTR;
+	uint32_t savePIPEACONF;
+	uint32_t savePIPEBCONF;
+	uint32_t savePIPEASRC;
+	uint32_t savePIPEBSRC;
+	uint32_t saveFPA0;
+	uint32_t saveFPA1;
+	uint32_t saveDPLL_A;
+	uint32_t saveDPLL_A_MD;
+	uint32_t saveHTOTAL_A;
+	uint32_t saveHBLANK_A;
+	uint32_t saveHSYNC_A;
+	uint32_t saveVTOTAL_A;
+	uint32_t saveVBLANK_A;
+	uint32_t saveVSYNC_A;
+	uint32_t saveDSPASTRIDE;
+	uint32_t saveDSPASIZE;
+	uint32_t saveDSPAPOS;
+	uint32_t saveDSPABASE;
+	uint32_t saveDSPASURF;
+	uint32_t saveDSPASTATUS;
+	uint32_t saveFPB0;
+	uint32_t saveFPB1;
+	uint32_t saveDPLL_B;
+	uint32_t saveDATALANES_B;
+	uint32_t saveDPLL_B_MD;
+	uint32_t saveHTOTAL_B;
+	uint32_t saveHBLANK_B;
+	uint32_t saveHSYNC_B;
+	uint32_t saveVTOTAL_B;
+	uint32_t saveVBLANK_B;
+	uint32_t saveVSYNC_B;
+	uint32_t saveDSPBSTRIDE;
+	uint32_t saveDSPBSIZE;
+	uint32_t saveDSPBPOS;
+	uint32_t saveDSPBBASE;
+	uint32_t saveDSPBSURF;
+	uint32_t saveDSPBSTATUS;
+	uint32_t saveVCLK_DIVISOR_VGA0;
+	uint32_t saveVCLK_DIVISOR_VGA1;
+	uint32_t saveVCLK_POST_DIV;
+	uint32_t saveVGACNTRL;
+	uint32_t saveADPA;
+	uint32_t saveLVDS;
+	uint32_t saveDVOA;
+	uint32_t saveDVOB;
+	uint32_t saveDVOC;
+	uint32_t savePP_ON;
+	uint32_t savePP_OFF;
+	uint32_t savePP_CONTROL;
+	uint32_t savePP_CYCLE;
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePaletteA[256];
+	uint32_t savePaletteB[256];
+	uint32_t saveBLC_PWM_CTL2;
+	uint32_t saveBLC_PWM_CTL;
+	uint32_t saveCLOCKGATING;
+	uint32_t saveDSPARB;
+	uint32_t saveDSPATILEOFF;
+	uint32_t saveDSPBTILEOFF;
+	uint32_t saveDSPAADDR;
+	uint32_t saveDSPBADDR;
+	uint32_t savePFIT_AUTO_RATIOS;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t savePP_ON_DELAYS;
+	uint32_t savePP_OFF_DELAYS;
+	uint32_t savePP_DIVISOR;
+	uint32_t saveBSM;
+	uint32_t saveVBT;
+	uint32_t saveBCLRPAT_A;
+	uint32_t saveBCLRPAT_B;
+	uint32_t saveDSPALINOFF;
+	uint32_t saveDSPBLINOFF;
+	uint32_t saveVED_CG_DIS;
+	uint32_t saveVEC_CG_DIS;
+	uint32_t savePERF_MODE;
+	uint32_t saveGL3_CTL;
+	uint32_t saveGL3_USE_WRT_INVAL;
+	uint32_t saveDSPFW1;
+	uint32_t saveDSPFW2;
+	uint32_t saveDSPFW3;
+	uint32_t saveDSPFW4;
+	uint32_t saveDSPFW5;
+	uint32_t saveDSPFW6;
+	uint32_t saveCHICKENBIT;
+	uint32_t saveDSPACURSOR_CTRL;
+	uint32_t saveDSPBCURSOR_CTRL;
+	uint32_t saveDSPACURSOR_BASE;
+	uint32_t saveDSPBCURSOR_BASE;
+	uint32_t saveDSPACURSOR_POS;
+	uint32_t saveDSPBCURSOR_POS;
+	uint32_t save_palette_a[256];
+	uint32_t save_palette_b[256];
+	uint32_t save_color_coef_a[6];
+	uint32_t save_color_coef_b[6];
+	uint32_t save_color_coef_c[6];
+	uint32_t saveOV_OVADD;
+	uint32_t saveOV_OGAMC0;
+	uint32_t saveOV_OGAMC1;
+	uint32_t saveOV_OGAMC2;
+	uint32_t saveOV_OGAMC3;
+	uint32_t saveOV_OGAMC4;
+	uint32_t saveOV_OGAMC5;
+	uint32_t saveOVC_OVADD;
+	uint32_t saveOVC_OGAMC0;
+	uint32_t saveOVC_OGAMC1;
+	uint32_t saveOVC_OGAMC2;
+	uint32_t saveOVC_OGAMC3;
+	uint32_t saveOVC_OGAMC4;
+	uint32_t saveOVC_OGAMC5;
+
+	/*
+	 * extra MDFLD Register state
+	 */
+	uint32_t saveHDMIPHYMISCCTL;
+	uint32_t saveHDMIB_CONTROL;
+	uint32_t saveDSPCCNTR;
+	uint32_t savePIPECCONF;
+	uint32_t savePIPECSRC;
+	uint32_t saveHTOTAL_C;
+	uint32_t saveHBLANK_C;
+	uint32_t saveHSYNC_C;
+	uint32_t saveVTOTAL_C;
+	uint32_t saveVBLANK_C;
+	uint32_t saveVSYNC_C;
+	uint32_t saveDSPCSTRIDE;
+	uint32_t saveDSPCSIZE;
+	uint32_t saveDSPCPOS;
+	uint32_t saveDSPCSURF;
+	uint32_t saveDSPCSTATUS;
+	uint32_t saveDSPCLINOFF;
+	uint32_t saveDSPCTILEOFF;
+	uint32_t saveDSPCCURSOR_CTRL;
+	uint32_t saveDSPCCURSOR_BASE;
+	uint32_t saveDSPCCURSOR_POS;
+	uint32_t save_palette_c[256];
+	uint32_t saveOV_OVADD_C;
+	uint32_t saveOV_OGAMC0_C;
+	uint32_t saveOV_OGAMC1_C;
+	uint32_t saveOV_OGAMC2_C;
+	uint32_t saveOV_OGAMC3_C;
+	uint32_t saveOV_OGAMC4_C;
+	uint32_t saveOV_OGAMC5_C;
+
+	/* DSI reg save */
+	uint32_t saveDEVICE_READY_REG;
+	uint32_t saveINTR_EN_REG;
+	uint32_t saveDSI_FUNC_PRG_REG;
+	uint32_t saveHS_TX_TIMEOUT_REG;
+	uint32_t saveLP_RX_TIMEOUT_REG;
+	uint32_t saveTURN_AROUND_TIMEOUT_REG;
+	uint32_t saveDEVICE_RESET_REG;
+	uint32_t saveDPI_RESOLUTION_REG;
+	uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+	uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+	uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+	uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+	uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+	uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+	uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+	uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+	uint32_t saveINIT_COUNT_REG;
+	uint32_t saveMAX_RET_PAK_REG;
+	uint32_t saveVIDEO_FMT_REG;
+	uint32_t saveEOT_DISABLE_REG;
+	uint32_t saveLP_BYTECLK_REG;
+	uint32_t saveHS_LS_DBI_ENABLE_REG;
+	uint32_t saveTXCLKESC_REG;
+	uint32_t saveDPHY_PARAM_REG;
+	uint32_t saveMIPI_CONTROL_REG;
+	uint32_t saveMIPI;
+	uint32_t saveMIPI_C;
+	void (*init_drvIC)(struct drm_device *dev);
+	void (*dsi_prePowerState)(struct drm_device *dev);
+	void (*dsi_postPowerState)(struct drm_device *dev);
+
+	/* DPST Register Save */
+	uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+	uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+	uint32_t savePWM_CONTROL_LOGIC;
+
+	/* DPST: TODO - Assign Appropriate Connector here */
+	struct drm_connector *dpst_lvds_connector;
+
+	/* MSI reg save */
+
+	uint32_t msi_addr;
+	uint32_t msi_data;
+
+	/*
+	 *Scheduling.
+	 */
+
+	struct mutex cmdbuf_mutex;
+	/*uint32_t ta_mem_pages;
+	struct psb_ta_mem *ta_mem;
+	int force_ta_mem_load;
+	atomic_t val_seq;*/
+
+	/*
+	 *TODO: change this to be per drm-context.
+	 */
+
+	struct psb_context decode_context;
+	struct psb_context encode_context;
+
+	/*
+	 * LID-Switch
+	 */
+	spinlock_t lid_lock;
+	struct timer_list lid_timer;
+	u32 *lid_state;
+	u32 lid_last_state;
+
+	/*
+	 *Watchdog
+	 */
+
+	spinlock_t watchdog_lock;
+	struct timer_list watchdog_timer;
+	struct work_struct watchdog_wq;
+	struct work_struct msvdx_watchdog_wq;
+	struct work_struct topaz_watchdog_wq;
+	struct work_struct hdmi_audio_wq;
+	int timer_available;
+
+#ifdef OSPM_STAT
+	unsigned char graphics_state;
+	unsigned long gfx_on_time;
+	unsigned long gfx_off_time;
+	unsigned long gfx_last_mode_change;
+	unsigned long gfx_on_cnt;
+	unsigned long gfx_off_cnt;
+#endif
+
+	/*
+	 * Used for modifying backlight from
+	 * xrandr -- consider removing and using HAL instead
+	 */
+	struct drm_property *backlight_property;
+	uint32_t blc_adj1;
+	uint32_t blc_adj2;
+
+	/*
+	 * DPST and Hotplug state
+	 */
+	struct dpst_state *psb_dpst_state;
+	pfn_vsync_handler psb_vsync_handler;
+
+	bool b_dsr_enable_config;
+	bool b_dsr_enable;
+	bool b_dsr_enable_status;
+	bool b_async_flip_enable;
+	bool dsr_fb_update_done_0;
+	bool dsr_fb_update_done_2;
+	uint32_t dsr_fb_update;
+	uint32_t dsr_idle_count;
+	bool b_is_in_idle;
+	void (*exit_idle)(struct drm_device *dev, u32 update_src, void *p_surfaceAddr, bool check_hw_on_only);
+	bool b_vblank_enable;
+	int (*async_flip_update_fb)(struct drm_device *dev, int pipe);
+	int (*async_check_fifo_empty)(struct drm_device *dev);
+
+	bool dsi_device_ready;
+	bool um_start;
+
+	uint32_t tmds_clock_khz;
+	had_event_call_back mdfld_had_event_callbacks;
+	struct snd_intel_had_interface *had_interface;
+	void *had_pvt_data;
+
+	uint32_t hdmi_audio_interrupt_mask;
+
+	struct mdfld_dsi_encoder *encoder0;
+	struct mdfld_dsi_encoder *encoder2;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+	/*psb fb dev*/
+	void *fbdev;
+#endif
+	/* read register value through sysfs. */
+	int count;
+	char *buf;
+
+	/*for HDMI flip chain*/
+#define DRM_PSB_HDMI_FLIP_ARRAY_SIZE 4
+	void *flip_array[DRM_PSB_HDMI_FLIP_ARRAY_SIZE];
+	unsigned int addr_array[DRM_PSB_HDMI_FLIP_ARRAY_SIZE];
+	unsigned int flip_valid_size;
+	unsigned int flip_head;
+	unsigned int flip_tail;
+	unsigned int flip_inited;
+	unsigned int head_fliped;
+	spinlock_t flip_lock;
+
+	/*hdmi connected status */
+	bool bhdmiconnected;
+	bool dpms_on_off;
+	bool bhdmi_enable;
+	struct workqueue_struct *hpd_detect;
+	pfn_screen_event_handler pvr_screen_event_handler;
+	struct mutex dpms_mutex;
+
+	/* fix Lock screen flip in resume issue */
+	unsigned long init_screen_start;
+	unsigned long init_screen_offset;
+	unsigned long init_screen_size;
+	unsigned long init_screen_stride;
+
+	/* gamma and csc setting lock*/
+	struct mutex gamma_csc_lock;
+	/* overlay setting lock*/
+	struct mutex overlay_lock;
+	struct mutex vsync_lock;
+
+	int brightness_adjusted;
+
+	/*
+	 * HDMI info
+	 */
+	struct android_hdmi_priv *hdmi_priv;
+
+	/* indicate whether IED session is active */
+	/* Maximum one active IED session at any given time */
+	bool ied_enabled;
+	/* indicate which source sets ied_enabled flag */
+	struct file *ied_context;
+	unsigned long long vsync_te_irq_ts[PSB_NUM_PIPE];
+	unsigned long long vsync_te_worker_ts[PSB_NUM_PIPE];
+	unsigned long long vsync_te_trouble_ts;
+	bool  vsync_te_working[PSB_NUM_PIPE];
+	atomic_t mipi_flip_abnormal;
+	struct gpu_pvr_ops * pvr_ops;
+
+	struct platform_panel_info panel_info;
+};
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+extern int mdfld_intel_crtc_set_gamma(struct drm_device *dev,
+					struct gamma_setting *setting_data);
+extern int mdfld_intel_crtc_set_color_conversion(struct drm_device *dev,
+					struct csc_setting *setting_data);
+
+struct psb_fpriv {
+	struct ttm_object_file *tfile;
+	bool dsr_blocked;
+};
+
+static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
+{
+	PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv
+	= (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
+	return (struct psb_fpriv *) pvr_file_priv->pPriv;
+}
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+	return (struct drm_psb_private *) dev->dev_private;
+}
+
+/*
+ *psb_irq.c
+ */
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int mdfld_enable_te(struct drm_device *dev, int pipe);
+extern void mdfld_disable_te(struct drm_device *dev, int pipe);
+extern int mid_irq_enable_hdmi_audio(struct drm_device *dev);
+extern int mid_irq_disable_hdmi_audio(struct drm_device *dev);
+extern void psb_te_timer_func(unsigned long data);
+extern void mdfld_te_handler_work(struct work_struct *te_work);
+extern void mdfld_vsync_event_work(struct work_struct *work);
+extern u32 intel_vblank_count(struct drm_device *dev, int pipe);
+
+/*
+ *psb_fb.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+			struct drm_framebuffer *fb);
+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern void *psbfb_vdc_reg(struct drm_device* dev);
+
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+/*fbdev*/
+extern int psb_fbdev_init(struct drm_device *dev);
+#endif
+/* ied session */
+extern void psb_cleanup_ied_session(struct drm_psb_private *dev_priv,
+			      struct file *filp);
+
+/* psb_bl.c */
+extern  int psb_brightness;
+int psb_backlight_init(struct drm_device *dev);
+void psb_backlight_exit(void);
+int psb_set_brightness(struct backlight_device *bd);
+int psb_get_brightness(struct backlight_device *bd);
+struct backlight_device *psb_get_backlight_device(void);
+
+/*
+ *Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT    (1 << 1)
+#define PSB_D_IRQ     (1 << 2)
+#define PSB_D_ENTRY   (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV      (1 << 4)
+#define PSB_D_DBI_BF  (1 << 5)
+#define PSB_D_PM      (1 << 6)
+#define PSB_D_RENDER  (1 << 7)
+#define PSB_D_REG     (1 << 8)
+#define PSB_D_MSVDX   (1 << 9)
+#define PSB_D_TOPAZ   (1 << 10)
+#define PSB_D_WARN    (1 << 11)
+#define PSB_D_MIPI    (1 << 12)
+
+#ifndef DRM_DEBUG_CODE
+/* To enable debug printout, set drm_psb_debug in psb_drv.c
+ * to any combination of above print flags.
+ */
+#define DRM_DEBUG_CODE 2
+#endif
+
+/*use case control*/
+#define PSB_SUSPEND_ENABLE     (1 << 0)
+#define PSB_BRIGHTNESS_ENABLE  (1 << 1)
+#define PSB_ESD_ENABLE         (1 << 2)
+#define PSB_DPMS_ENABLE        (1 << 3)
+#define PSB_DSR_ENABLE         (1 << 4)
+#define PSB_VSYNC_OFF_ENABLE   (1 << 5)
+#define PSB_ALL_UC_ENABLE      0x3F
+
+extern int drm_psb_no_fb;
+extern int drm_psb_disable_vsync;
+extern int drm_topaz_sbuswa;
+
+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
+#define PSB_DEBUG_INIT(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
+#define PSB_DEBUG_ENTRY(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_ENTRY, _fmt, ##_arg)
+#define PSB_DEBUG_HV(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_HV, _fmt, ##_arg)
+#define PSB_DEBUG_DBI_BF(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_DBI_BF, _fmt, ##_arg)
+#define PSB_DEBUG_PM(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
+#define PSB_DEBUG_REG(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_REG, _fmt, ##_arg)
+#define PSB_DEBUG_MSVDX(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_MSVDX, _fmt, ##_arg)
+#define PSB_DEBUG_TOPAZ(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_TOPAZ, _fmt, ##_arg)
+/* force to print WARN msg */
+#define PSB_DEBUG_WARN(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_WARN, _fmt, ##_arg)
+#define PSB_DEBUG_MIPI(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_MIPI, _fmt, ##_arg)
+
+
+#if DRM_DEBUG_CODE
+#define PSB_DEBUG(_flag, _fmt, _arg...)					\
+	do {								\
+		if ((_flag & drm_psb_debug) ||	(_flag == PSB_D_WARN)) 	\
+			printk(KERN_DEBUG				\
+			       "[psb:0x%02x:%s] " _fmt , _flag,		\
+			       __func__ , ##_arg);			\
+	} while (0)
+#else
+#define PSB_DEBUG(_fmt, _arg...)     do { } while (0)
+#endif
+
+/*
+ *Utilities
+ */
+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int reg_val = ioread32(dev_priv->vdc_reg + (reg));
+	PSB_DEBUG_REG("reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
+	return reg_val;
+}
+
+#define REG_READ(reg)	       REGISTER_READ(dev, (reg))
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+				  uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+				    uint32_t reg, uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
+
+	iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val)	  REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+				   uint32_t reg, uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
+
+	iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val)	 REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_ALIGN_TO(_val, _align) \
+  (((_val) + ((_align) - 1)) & ~((_align) - 1))
+#define PSB_WVDC32(_val, _offs) \
+  iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs) \
+  ioread32(dev_priv->vdc_reg + (_offs))
+
+static inline uint32_t SGX_REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int reg_val = ioread32(dev_priv->sgx_reg + (reg));
+       PSB_DEBUG_REG("sgx reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
+       return reg_val;
+}
+
+#define SGX_REG_READ(reg)             SGX_REGISTER_READ(dev, (reg))
+static inline void SGX_REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+						uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       PSB_DEBUG_REG("sgx reg = 0x%x, val = 0x%x. \n", reg, val);
+
+       iowrite32((val), dev_priv->sgx_reg + (reg));
+}
+
+#define SGX_REG_WRITE(reg, val)        SGX_REGISTER_WRITE(dev, (reg), (val))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs)					\
+({								\
+    if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) {		\
+	printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
+	       __FILE__, __LINE__);				\
+	mdelay(1000);						\
+    }								\
+    ioread32(dev_priv->sgx_reg + (_offs));			\
+})
+#else
+#define PSB_RSGX32(_offs)					\
+  ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+
+#define OSPM_PUNIT_PORT		0x04
+
+#define OSPM_APMBA		0x7a
+/* APM_STS register:
+ * 1:0- GPS, 3:2 - VDPS, 5:4 -VEPS, 7:6 -GL3, 9:8 -ISP, 11:10 - IPH */
+#define APM_STS			0x04
+#define APM_STS_VDPS_SHIFT	2
+
+#define APM_STS_D0		0x0
+#define APM_STS_D1		0x1
+#define APM_STS_D2		0x2
+#define APM_STS_D3		0x3
+#define APM_STS_VDPS_MASK	0xC
+
+/* OSPM_PM_SSS register:
+ * 1:0 - GFX, 3:2 - DPA, 5:4 - VED, 7:6 - VEC, 9:8 - GL3,
+ * 11:10 - IUNIT, 13:12 - Iunit PHY Cache
+ * 15:14 - Display B, 17:16 - Display C, 19:18 - MIPI
+ */
+#define OSPM_OSPMBA		0x78
+#define OSPM_PM_SSS		0x30
+
+#define MFLD_MSVDX_FABRIC_DEBUG 0
+#define MSVDX_REG_DUMP 0
+
+static inline int psb_get_power_state(int islands)
+{
+	switch (islands) {
+	case OSPM_VIDEO_DEC_ISLAND:
+		if (pmu_nc_get_power_state(OSPM_VIDEO_DEC_ISLAND, APM_REG_TYPE)
+			== APM_STS_D3)
+			return 0;
+		else
+			return 1;
+		break;
+	case OSPM_GL3_CACHE_ISLAND:
+		if (pmu_nc_get_power_state(OSPM_GL3_CACHE_ISLAND, APM_REG_TYPE)
+			== APM_STS_D3)
+			return 0;
+		else
+			return 1;
+		break;
+	default:
+		PSB_DEBUG_WARN("WARN: unsupported island.\n");
+		return -EINVAL;
+	}
+}
+
+#define PSB_ALPL(_val, _base)			\
+  (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
+#define PSB_ALPLM(_val, _base)			\
+  ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
+
+#define IS_POULSBO(dev) 0  /* (((dev)->pci_device == 0x8108) || \
+			       ((dev)->pci_device == 0x8109)) */
+
+#define IS_MRST(dev) (((dev)->pci_device & 0xfff8) == 0x4100)
+
+/* Will revisit it after CLOVER TRAIL PO. */
+/* pciid: CLV A0 = 0X8C7, CLV B0 = 0X8C8-0X8CB, CLV+ A0/B0 0X8CC-0X8CF.*/
+#define IS_MDFLD_OLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+#define IS_CTP(dev) (((dev->pci_device & 0xffff) == 0x08c0) ||	\
+		     ((dev->pci_device & 0xffff) == 0x08c7) ||  \
+		     ((dev->pci_device & 0xffff) == 0x08c8))
+
+#define IS_MRFL(dev) ((dev->pci_device & 0xFFFC) == 0x1180)
+#define IS_MRFLD(dev) (((dev)->pci_device & 0xfff8) == 0x1180)
+
+#define IS_CTP_NEED_WA(dev) ((dev->pci_device & 0xffff) == 0x08c8)
+#define HAS_DISPLAY_IED_CNTRL(dev) ((dev->pci_device & 0xffff) == 0x08c8)
+
+#define IS_MDFLD(dev) (IS_CTP(dev) || IS_MDFLD_OLD(dev))
+#define IS_MID(dev) (IS_MRST(dev) || IS_MDFLD(dev))
+
+#define IS_TOPAZ(dev) ((IS_MRST(dev) && (((dev)->pci_device & 0xfffc) != PCI_ID_TOPAZ_DISABLED)) || IS_MDFLD(dev))
+
+#define IS_MSVDX_MEM_TILE(dev) ((IS_MRFL(dev)) || (IS_CTP(dev)))
+
+extern int drm_psb_ospm;
+extern int drm_psb_cpurelax;
+extern int drm_psb_udelaydivider;
+extern int drm_psb_gl3_enable;
+extern int drm_psb_topaz_clockgating;
+
+extern char HDMI_EDID[20];
+extern int hdmi_state;
+extern void psb_flip_abnormal_debug_info(struct drm_device *dev);
+extern struct drm_device *g_drm_dev;
+/*
+ * set cpu_relax = 1 in sysfs to use cpu_relax instead of udelay bysy loop
+ * set udelay_divider to reduce the udelay values,e.g.= 10, reduce 10 times
+ */
+#define PSB_UDELAY(usec)                        \
+do {                                            \
+	if (drm_psb_cpurelax == 0)              \
+		DRM_UDELAY(usec / drm_psb_udelaydivider);   \
+	else                                    \
+		cpu_relax();                    \
+} while (0)
+
+#endif
diff --git a/drivers/external_drivers/intel_media/common/psb_gtt.c b/drivers/external_drivers/intel_media/common/psb_gtt.c
new file mode 100644
index 0000000..a6c3dd5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_gtt.c
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_pvr_glue.h"
+
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+	uint32_t mask = PSB_PTE_VALID;
+
+	if (type & PSB_MMU_CACHED_MEMORY)
+		mask |= PSB_PTE_CACHED;
+	if (type & PSB_MMU_RO_MEMORY)
+		mask |= PSB_PTE_RO;
+	if (type & PSB_MMU_WO_MEMORY)
+		mask |= PSB_PTE_WO;
+
+	return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
+{
+	struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+
+	if (!tmp)
+		return NULL;
+
+	init_rwsem(&tmp->sem);
+	tmp->dev = dev;
+
+	return tmp;
+}
+
+void psb_gtt_takedown(struct psb_gtt *pg, int free)
+{
+	struct drm_psb_private *dev_priv = pg->dev->dev_private;
+
+	if (!pg)
+		return;
+
+	if (pg->gtt_map) {
+		iounmap(pg->gtt_map);
+		pg->gtt_map = NULL;
+	}
+	if (pg->initialized) {
+		pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
+				      pg->gmch_ctrl);
+		PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
+		(void) PSB_RVDC32(PSB_PGETBL_CTL);
+	}
+	if (free)
+		kfree(pg);
+}
+
+int psb_gtt_init(struct psb_gtt *pg, int resume)
+{
+	struct drm_device *dev = pg->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned gtt_pages;
+	unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
+	unsigned long rar_stolen_size;
+	unsigned i, num_pages;
+	unsigned pfn_base;
+	uint32_t ci_pages, vram_pages;
+	uint32_t tt_pages;
+	uint32_t *ttm_gtt_map;
+	uint32_t dvmt_mode = 0;
+
+	int ret = 0;
+	uint32_t pte;
+
+	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
+	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+			      pg->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+	pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+	PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+	(void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+	pg->initialized = 1;
+
+	pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
+
+	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+	/* fix me: video mmu has hw bug to access 0x0D0000000,
+	 * then make gatt start at 0x0e000,0000 */
+	pg->mmu_gatt_start = PSB_MEM_TT_START;
+	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+	gtt_pages =
+		pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
+	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+			 >> PAGE_SHIFT;
+
+	pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
+	vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
+
+	/* CI is not included in the stolen size since the TOPAZ MMU bug */
+	ci_stolen_size = dev_priv->ci_region_size;
+	/* Don't add CI & RAR share buffer space
+	 * managed by TTM to stolen_size */
+	stolen_size = vram_stolen_size;
+
+	rar_stolen_size = dev_priv->imr_region_size;
+
+	printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
+	       pg->gatt_start, pg->gatt_pages / 256);
+	printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
+	       pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
+	printk(KERN_INFO"Stole memory information \n");
+	printk(KERN_INFO"      base in RAM: 0x%x \n", pg->stolen_base);
+	printk(KERN_INFO"      size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+	       vram_stolen_size / 1024);
+	dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
+	printk(KERN_INFO"      the correct size should be: %dM(dvmt mode=%d) \n",
+	       (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+
+	if (ci_stolen_size > 0)
+		printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M \n",
+		       dev_priv->ci_region_start,
+		       ci_stolen_size / 1024 / 1024);
+	if (rar_stolen_size > 0)
+		printk(KERN_INFO"RAR Stole memory: RAM base = 0x%08x, size = %lu M \n",
+		       dev_priv->imr_region_start,
+		       rar_stolen_size / 1024 / 1024);
+
+	if (resume && (gtt_pages != pg->gtt_pages) &&
+	    (stolen_size != pg->stolen_size)) {
+		DRM_ERROR("GTT resume error.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	pg->gtt_pages = gtt_pages;
+	pg->stolen_size = stolen_size;
+	pg->vram_stolen_size = vram_stolen_size;
+	pg->ci_stolen_size = ci_stolen_size;
+	pg->rar_stolen_size = rar_stolen_size;
+	pg->gtt_map =
+		ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
+	if (!pg->gtt_map) {
+		DRM_ERROR("Failure to map gtt.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
+	if (!pg->vram_addr) {
+		DRM_ERROR("Failure to map stolen base.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+		   (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	ttm_gtt_map = pg->gtt_map + tt_pages / 2;
+
+	/*
+	 * insert vram stolen pages.
+	 */
+
+	pfn_base = pg->stolen_base >> PAGE_SHIFT;
+	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+	printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+	       num_pages, pfn_base, 0);
+	for (i = 0; i < num_pages; ++i) {
+		pte = psb_gtt_mask_pte(pfn_base + i, 0);
+		iowrite32(pte, pg->gtt_map + i);
+	}
+
+	/*
+	 * Init rest of gtt managed by IMG.
+	 */
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	for (; i < tt_pages / 2; ++i)
+		iowrite32(pte, pg->gtt_map + i);
+
+	/*
+	 * insert CI stolen pages
+	 */
+
+	pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
+	ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
+	printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
+	       num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
+	for (i = 0; i < num_pages; ++i) {
+		pte = psb_gtt_mask_pte(pfn_base + i, 0);
+		iowrite32(pte, ttm_gtt_map + i);
+	}
+	/*
+	 * Init rest of gtt managed by TTM.
+	 */
+
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	PSB_DEBUG_INIT("Initializing the rest of a total "
+		       "of %d gtt pages.\n", pg->gatt_pages);
+
+	for (; i < pg->gatt_pages - tt_pages / 2; ++i)
+		iowrite32(pte, ttm_gtt_map + i);
+	(void) ioread32(pg->gtt_map + i - 1);
+
+	return 0;
+
+out_err:
+	psb_gtt_takedown(pg, 0);
+	return ret;
+}
+
+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
+			 unsigned offset_pages, unsigned num_pages,
+			 unsigned desired_tile_stride,
+			 unsigned hw_tile_stride, int type)
+{
+	unsigned rows = 1;
+	unsigned add;
+	unsigned row_add;
+	unsigned i;
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	uint32_t pte;
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride;
+	row_add = hw_tile_stride;
+
+	down_read(&pg->sem);
+	for (i = 0; i < rows; ++i) {
+		cur_page = pg->gtt_map + offset_pages;
+		for (j = 0; j < desired_tile_stride; ++j) {
+			pte =
+				psb_gtt_mask_pte(page_to_pfn(*pages++), type);
+			iowrite32(pte, cur_page++);
+		}
+		offset_pages += add;
+	}
+	(void) ioread32(cur_page - 1);
+	up_read(&pg->sem);
+
+	return 0;
+}
+
+static int psb_gtt_insert_pfn_list(struct psb_gtt *pg, u32 *pfn_list,
+	unsigned offset_pages, unsigned num_pages,
+	unsigned desired_tile_stride,
+	unsigned hw_tile_stride, int type)
+{
+	unsigned rows = 1;
+	unsigned add;
+	unsigned row_add;
+	unsigned i;
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	uint32_t pte;
+
+	if (!pg || !pfn_list)
+		return -EINVAL;
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride;
+	row_add = hw_tile_stride;
+
+	down_read(&pg->sem);
+	for (i = 0; i < rows; ++i) {
+		cur_page = pg->gtt_map + offset_pages;
+		for (j = 0; j < desired_tile_stride; ++j) {
+			pte = psb_gtt_mask_pte(*pfn_list++, type);
+			iowrite32(pte, cur_page++);
+		}
+		offset_pages += add;
+	}
+	(void) ioread32(cur_page - 1);
+	up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, IMG_CPU_PHYADDR *pPhysFrames,
+				  unsigned offset_pages, unsigned num_pages, int type)
+{
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	uint32_t pte;
+
+	/* printk("Allocatng IMG GTT mem at %x (pages %d)\n",offset_pages,num_pages); */
+	down_read(&pg->sem);
+
+	cur_page = pg->gtt_map + offset_pages;
+	for (j = 0; j < num_pages; ++j) {
+		pte =  psb_gtt_mask_pte((pPhysFrames++)->uiAddr >> PAGE_SHIFT, type);
+		iowrite32(pte, cur_page++);
+		/* printk("PTE %d: %x/%x\n",j,(pPhysFrames-1)->uiAddr,pte); */
+	}
+	(void) ioread32(cur_page - 1);
+
+	up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
+			 unsigned num_pages, unsigned desired_tile_stride,
+			 unsigned hw_tile_stride, int rc_prot)
+{
+	struct drm_psb_private *dev_priv = pg->dev->dev_private;
+	unsigned rows = 1;
+	unsigned add;
+	unsigned row_add;
+	unsigned i;
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
+	uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride;
+	row_add = hw_tile_stride;
+
+	if (rc_prot)
+		down_read(&pg->sem);
+	for (i = 0; i < rows; ++i) {
+		cur_page = pg->gtt_map + offset_pages;
+		for (j = 0; j < desired_tile_stride; ++j)
+			iowrite32(pte, cur_page++);
+
+		offset_pages += add;
+	}
+	(void) ioread32(cur_page - 1);
+	if (rc_prot)
+		up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_mm_init(struct psb_gtt *pg)
+{
+	struct psb_gtt_mm *gtt_mm;
+	struct drm_psb_private *dev_priv = pg->dev->dev_private;
+	struct drm_open_hash *ht;
+	struct drm_mm *mm;
+	int ret;
+	uint32_t tt_start;
+	uint32_t tt_size;
+
+	if (!pg || !pg->initialized) {
+		DRM_DEBUG("Invalid gtt struct\n");
+		return -EINVAL;
+	}
+
+	gtt_mm =  kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
+	if (!gtt_mm)
+		return -ENOMEM;
+
+	spin_lock_init(&gtt_mm->lock);
+
+	ht = &gtt_mm->hash;
+	ret = drm_ht_create(ht, 20);
+	if (ret) {
+		DRM_DEBUG("Create hash table failed(%d)\n", ret);
+		goto err_free;
+	}
+
+	tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
+	tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+		  (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	mm = &gtt_mm->base;
+
+	/*will use tt_start ~ 128M for IMG TT buffers*/
+	drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
+
+	gtt_mm->count = 0;
+
+	dev_priv->gtt_mm = gtt_mm;
+
+	DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
+		 (unsigned long)tt_start,
+		 (unsigned long)((tt_size / 2) - tt_start));
+	return 0;
+err_mm_init:
+	drm_ht_remove(ht);
+
+err_free:
+	kfree(gtt_mm);
+	return ret;
+}
+
+/**
+ * Delete all hash entries;
+ */
+void psb_gtt_mm_takedown(void)
+{
+	return;
+}
+
+static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
+		u32 tgid,
+		struct psb_gtt_hash_entry **hentry)
+{
+	struct drm_hash_item *entry;
+	struct psb_gtt_hash_entry *psb_entry;
+	int ret;
+
+	ret = drm_ht_find_item(&mm->hash, tgid, &entry);
+	if (ret) {
+		DRM_DEBUG("Cannot find entry pid=%d\n", tgid);
+		return ret;
+	}
+
+	psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
+	if (!psb_entry) {
+		DRM_DEBUG("Invalid entry");
+		return -EINVAL;
+	}
+
+	*hentry = psb_entry;
+	return 0;
+}
+
+
+static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
+				       u32 tgid,
+				       struct psb_gtt_hash_entry *hentry)
+{
+	struct drm_hash_item *item;
+	int ret;
+
+	if (!hentry) {
+		DRM_DEBUG("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	item = &hentry->item;
+	item->key = tgid;
+
+	/**
+	 * NOTE: drm_ht_insert_item will perform such a check
+	ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
+	if (!ret) {
+		DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
+		return -EAGAIN;
+	}
+	*/
+
+	/*Insert the given entry*/
+	ret = drm_ht_insert_item(&mm->hash, item);
+	if (ret) {
+		DRM_DEBUG("Insert failure\n");
+		return ret;
+	}
+
+	mm->count++;
+
+	return 0;
+}
+
+static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
+				      u32 tgid,
+				      struct psb_gtt_hash_entry **entry)
+{
+	struct psb_gtt_hash_entry *hentry;
+	int ret;
+
+	/*if the hentry for this tgid exists, just get it and return*/
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
+	if (!ret) {
+		DRM_DEBUG("Entry for tgid %d exists, hentry %p\n",
+			  tgid, hentry);
+		*entry = hentry;
+		spin_unlock(&mm->lock);
+		return 0;
+	}
+	spin_unlock(&mm->lock);
+
+	DRM_DEBUG("Entry for tgid %d doesn't exist, will create it\n", tgid);
+
+	hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
+	if (!hentry) {
+		DRM_DEBUG("Kmalloc failed\n");
+		return -ENOMEM;
+	}
+
+	ret = drm_ht_create(&hentry->ht, 20);
+	if (ret) {
+		DRM_DEBUG("Create hash table failed\n");
+		goto failed_drm_ht_create;
+	}
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
+	spin_unlock(&mm->lock);
+
+	if (ret) {
+		DRM_DEBUG("Insert hash table entry failed\n");
+		goto failed_psb_gtt_mm_insert_ht_locked;
+	}
+
+	*entry = hentry;
+
+	return ret;
+
+failed_psb_gtt_mm_insert_ht_locked:
+	drm_ht_remove(&hentry->ht);
+
+failed_drm_ht_create:
+	kfree(hentry);
+
+	return ret;
+}
+
+static struct psb_gtt_hash_entry *
+psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid) {
+	struct psb_gtt_hash_entry *tmp;
+	int ret;
+
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
+	if (ret) {
+		DRM_DEBUG("Cannot find entry pid %d\n", tgid);
+		return NULL;
+	}
+
+	/*remove it from ht*/
+	drm_ht_remove_item(&mm->hash, &tmp->item);
+
+	mm->count--;
+
+	return tmp;
+}
+
+static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
+{
+	struct psb_gtt_hash_entry *entry;
+
+	entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
+
+	if (!entry) {
+		DRM_DEBUG("Invalid entry");
+		return -EINVAL;
+	}
+
+	/*delete ht*/
+	drm_ht_remove(&entry->ht);
+
+	/*free this entry*/
+	kfree(entry);
+	return 0;
+}
+
+static int
+psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
+				  u32 key,
+				  struct psb_gtt_mem_mapping **hentry)
+{
+	struct drm_hash_item *entry;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	ret = drm_ht_find_item(ht, key, &entry);
+	if (ret) {
+		DRM_DEBUG("Cannot find key %d\n", key);
+		return ret;
+	}
+
+	mapping =  container_of(entry, struct psb_gtt_mem_mapping, item);
+	if (!mapping) {
+		DRM_DEBUG("Invalid entry\n");
+		return -EINVAL;
+	}
+
+	*hentry = mapping;
+	return 0;
+}
+
+static int
+psb_gtt_mm_get_mem_mapping_anyused_locked(struct drm_open_hash *ht,
+				  struct psb_gtt_mem_mapping **hentry)
+{
+	struct drm_hash_item *entry;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	ret = drm_ht_find_item_anyused(ht, &entry);
+	if (ret) {
+		DRM_DEBUG("Cannot find\n");
+		return ret;
+	}
+
+	mapping =  container_of(entry, struct psb_gtt_mem_mapping, item);
+	if (!mapping) {
+		DRM_DEBUG("Invalid entry\n");
+		return -EINVAL;
+	}
+
+	*hentry = mapping;
+	return 0;
+}
+
+static int
+psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
+				     u32 key,
+				     struct psb_gtt_mem_mapping *hentry)
+{
+	struct drm_hash_item *item;
+	struct psb_gtt_hash_entry *entry;
+	int ret;
+
+	if (!ht || !hentry) {
+		DRM_DEBUG("hentry is NULL\n");
+		return -EINVAL;
+	}
+
+	item = &hentry->item;
+	item->key = key;
+
+	ret = drm_ht_insert_item(ht, item);
+	if (ret) {
+		DRM_DEBUG("insert_item failed\n");
+		return ret;
+	}
+
+	entry = container_of(ht, struct psb_gtt_hash_entry, ht);
+	if (entry)
+		entry->count++;
+
+	return 0;
+}
+
+static int
+psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
+				    struct drm_open_hash *ht,
+				    u32 key,
+				    struct drm_mm_node *node,
+				    struct psb_gtt_mem_mapping **entry)
+{
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	if (!node || !ht) {
+		DRM_DEBUG("parameter error\n");
+		return -EINVAL;
+	}
+
+	/*try to get this mem_map */
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
+	if (!ret) {
+		DRM_DEBUG("mapping entry for key %d exists, entry %p\n",
+			  key, mapping);
+		*entry = mapping;
+		spin_unlock(&mm->lock);
+		return 0;
+	}
+	spin_unlock(&mm->lock);
+
+	DRM_DEBUG("Mapping entry for key %d doesn't exist, will create it\n",
+		  key);
+
+	mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
+	if (!mapping) {
+		DRM_DEBUG("kmalloc failed\n");
+		return -ENOMEM;
+	}
+
+	mapping->node = node;
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
+	spin_unlock(&mm->lock);
+
+	if (!ret)
+		*entry = mapping;
+
+	return ret;
+}
+
+static struct psb_gtt_mem_mapping *
+psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key) {
+	struct psb_gtt_mem_mapping *tmp;
+	struct psb_gtt_hash_entry *entry;
+	int ret;
+
+	if (!ht) {
+		DRM_DEBUG("hash table is NULL\n");
+		return NULL;
+	}
+
+	ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
+	if (ret) {
+		DRM_DEBUG("Cannot find key %d\n", key);
+		return NULL;
+	}
+
+	drm_ht_remove_item(ht, &tmp->item);
+
+	entry = container_of(ht, struct psb_gtt_hash_entry, ht);
+	if (entry)
+		entry->count--;
+
+	return tmp;
+}
+
+static struct psb_gtt_mem_mapping *
+psb_gtt_mm_remove_mem_mapping_anyused_locked(struct drm_open_hash *ht) {
+	struct psb_gtt_mem_mapping *tmp;
+	struct psb_gtt_hash_entry *entry;
+	int ret;
+
+	if (!ht) {
+		DRM_DEBUG("hash table is NULL\n");
+		return NULL;
+	}
+
+	ret = psb_gtt_mm_get_mem_mapping_anyused_locked(ht, &tmp);
+	if (ret) {
+		DRM_DEBUG("Cannot find any used\n");
+		return NULL;
+	}
+
+	drm_ht_remove_item(ht, &tmp->item);
+
+	entry = container_of(ht, struct psb_gtt_hash_entry, ht);
+	if (entry)
+		entry->count--;
+
+	return tmp;
+}
+
+static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
+		u32 key,
+		struct drm_mm_node **node)
+{
+	struct psb_gtt_mem_mapping *entry;
+
+	entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
+	if (!entry) {
+		DRM_DEBUG("entry is NULL\n");
+		return -EINVAL;
+	}
+
+	*node = entry->node;
+
+	kfree(entry);
+	return 0;
+}
+
+static int psb_gtt_mm_remove_free_mem_mapping_anyused_locked
+	(struct drm_open_hash *ht,
+	struct drm_mm_node **node)
+{
+	struct psb_gtt_mem_mapping *entry;
+
+	entry = psb_gtt_mm_remove_mem_mapping_anyused_locked(ht);
+	if (!entry) {
+		DRM_DEBUG("entry is NULL\n");
+		return -EINVAL;
+	}
+
+	*node = entry->node;
+
+	kfree(entry);
+	return 0;
+}
+
+static int psb_gtt_add_node(struct psb_gtt_mm *mm,
+			    u32 tgid,
+			    u32 key,
+			    struct drm_mm_node *node,
+			    struct psb_gtt_mem_mapping **entry)
+{
+	struct psb_gtt_hash_entry *hentry;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
+	if (ret) {
+		DRM_DEBUG("alloc_insert failed\n");
+		return ret;
+	}
+
+	ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
+			&hentry->ht,
+			key,
+			node,
+			&mapping);
+	if (ret) {
+		DRM_DEBUG("mapping alloc_insert failed\n");
+		return ret;
+	}
+
+	*entry = mapping;
+
+	return 0;
+}
+
+static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
+			       u32 tgid,
+			       u32 key,
+			       struct drm_mm_node **node)
+{
+	struct psb_gtt_hash_entry *hentry;
+	struct drm_mm_node *tmp;
+	int ret;
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
+	if (ret) {
+		DRM_DEBUG("Cannot find entry for pid %d\n", tgid);
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+	spin_unlock(&mm->lock);
+
+	/*remove mapping entry*/
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
+			key,
+			&tmp);
+	if (ret) {
+		DRM_DEBUG("remove_free failed\n");
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+
+	*node = tmp;
+
+	/*check the count of mapping entry*/
+	if (!hentry->count) {
+		DRM_DEBUG("count of mapping entry is zero, tgid=%d\n", tgid);
+		psb_gtt_mm_remove_free_ht_locked(mm, tgid);
+	}
+
+	spin_unlock(&mm->lock);
+
+	return 0;
+}
+
+static int psb_gtt_remove_node_anyused(struct psb_gtt_mm *mm,
+			       u32 tgid,
+			       struct drm_mm_node **node)
+{
+	struct psb_gtt_hash_entry *hentry;
+	struct drm_mm_node *tmp;
+	int ret;
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
+	if (ret) {
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+	spin_unlock(&mm->lock);
+
+	/*remove mapping entry*/
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_remove_free_mem_mapping_anyused_locked(&hentry->ht,
+			&tmp);
+	if (ret) {
+		DRM_DEBUG("remove_free failed\n");
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+
+	*node = tmp;
+
+	/*check the count of mapping entry*/
+	if (!hentry->count) {
+		DRM_DEBUG("count of mapping entry is zero, tgid=%d\n", tgid);
+		psb_gtt_mm_remove_free_ht_locked(mm, tgid);
+	}
+
+	spin_unlock(&mm->lock);
+
+	return 0;
+}
+
+static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
+				uint32_t pages,
+				uint32_t align,
+				struct drm_mm_node **node)
+{
+	struct drm_mm_node *tmp_node;
+	int ret;
+
+	do {
+		ret = drm_mm_pre_get(&mm->base);
+		if (unlikely(ret)) {
+			DRM_DEBUG("drm_mm_pre_get error\n");
+			return ret;
+		}
+
+		spin_lock(&mm->lock);
+		tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
+		if (unlikely(!tmp_node)) {
+			DRM_DEBUG("No free node found\n");
+			spin_unlock(&mm->lock);
+			break;
+		}
+
+		tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
+		spin_unlock(&mm->lock);
+	} while (!tmp_node);
+
+	if (!tmp_node) {
+		DRM_DEBUG("Node allocation failed\n");
+		return -ENOMEM;
+	}
+
+	*node = tmp_node;
+	return 0;
+}
+
+static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
+{
+	spin_lock(&mm->lock);
+	drm_mm_put_block(node);
+	spin_unlock(&mm->lock);
+}
+
+int psb_gtt_map_meminfo(struct drm_device *dev,
+			IMG_HANDLE hKernelMemInfo,
+			uint32_t page_align,
+			uint32_t *offset)
+{
+	struct drm_psb_private *dev_priv
+	= (struct drm_psb_private *)dev->dev_private;
+	PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t size, pages, offset_pages;
+	void *kmem;
+	struct drm_mm_node *node;
+	u32 *pfn_list = 0;
+	struct psb_gtt_mem_mapping *mapping = NULL;
+	int ret;
+
+	ret = psb_get_meminfo_by_handle(dev_priv, hKernelMemInfo,
+			&psKernelMemInfo);
+	if (ret) {
+		DRM_DEBUG("Cannot find kernelMemInfo handle %d\n",
+			  (int)hKernelMemInfo);
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("Got psKernelMemInfo %p for handle %x\n",
+		  psKernelMemInfo, (u32)hKernelMemInfo);
+
+	size = psKernelMemInfo->uAllocSize;
+	kmem = psKernelMemInfo->pvLinAddrKM;
+	pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	DRM_DEBUG("KerMemInfo size %d, cpuVadr %p, pages %d, osMemHdl %p\n",
+		size, kmem, pages,
+		psKernelMemInfo->sMemBlk.hOSMemHandle);
+
+	if (!kmem)
+		DRM_DEBUG("kmem is NULL");
+
+	/*get pages*/
+	ret = psb_get_pages_by_mem_handle(dev_priv,
+			psKernelMemInfo->sMemBlk.hOSMemHandle,
+			&pfn_list, pages);
+	if (ret) {
+		DRM_DEBUG("get pages error\n");
+		return ret;
+	}
+
+	DRM_DEBUG("get %d pages\n", pages);
+
+	/*alloc memory in TT apeture*/
+	ret = psb_gtt_mm_alloc_mem(mm, pages, page_align, &node);
+	if (ret) {
+		DRM_DEBUG("alloc TT memory error\n");
+		goto failed_pages_alloc;
+	}
+
+	/*update psb_gtt_mm*/
+	ret = psb_gtt_add_node(mm,
+			       (u32)psb_get_tgid(),
+			       (u32)hKernelMemInfo,
+			       node,
+			       &mapping);
+	if (ret) {
+		DRM_DEBUG("add_node failed");
+		goto failed_add_node;
+	}
+
+	node = mapping->node;
+	offset_pages = node->start;
+
+	DRM_DEBUG("get free node for %d pages, offset %d pages",
+		  pages, offset_pages);
+
+	/*update gtt*/
+	psb_gtt_insert_pfn_list(pg, pfn_list,
+			     (unsigned)offset_pages,
+			     (unsigned)pages,
+			     0,
+			     0,
+			     0);
+
+	/*free pfn_list if allocated*/
+	kfree(pfn_list);
+
+	*offset = offset_pages;
+	return 0;
+
+failed_add_node:
+	psb_gtt_mm_free_mem(mm, node);
+failed_pages_alloc:
+	kfree(pfn_list);
+	return ret;
+}
+
+static int psb_gtt_unmap_common(struct drm_device *dev,
+			unsigned int ui32TaskId,
+			unsigned int hHandle)
+{
+	struct drm_psb_private *dev_priv
+	= (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	int ret;
+
+	ret = psb_gtt_remove_node(mm,
+				  (u32)ui32TaskId,
+				  (u32)hHandle,
+				  &node);
+	if (ret) {
+		DRM_DEBUG("remove node failed\n");
+		return ret;
+	}
+
+	/*remove gtt entries*/
+	offset_pages = node->start;
+	pages = node->size;
+
+	psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
+
+
+	/*free tt node*/
+
+	psb_gtt_mm_free_mem(mm, node);
+	return 0;
+
+}
+
+static int psb_gtt_unmap_anyused(struct drm_device *dev,
+			unsigned int ui32TaskId)
+{
+	struct drm_psb_private *dev_priv
+	= (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	int ret;
+
+	ret = psb_gtt_remove_node_anyused(mm,
+				  (u32)ui32TaskId,
+				  &node);
+	if (ret) {
+		DRM_DEBUG("remove node failed\n");
+		return ret;
+	}
+
+	/*remove gtt entries*/
+	offset_pages = node->start;
+	pages = node->size;
+
+	psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
+
+
+	/*free tt node*/
+
+	psb_gtt_mm_free_mem(mm, node);
+	return 0;
+
+}
+
+
+int psb_gtt_unmap_meminfo(struct drm_device *dev, IMG_HANDLE hKernelMemInfo)
+{
+	return psb_gtt_unmap_common(dev,
+				psb_get_tgid(),
+				(unsigned int)hKernelMemInfo);
+}
+
+int psb_gtt_map_vaddr(struct drm_device *dev,
+			uint32_t vaddr,
+			uint32_t size,
+			uint32_t page_align,
+			uint32_t *offset)
+{
+	struct drm_psb_private *dev_priv
+		= (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	u32 *pfn_list = 0;
+	struct psb_gtt_mem_mapping *mapping = NULL;
+	int ret;
+
+	/*get pages*/
+	ret = psb_get_vaddr_pages(vaddr, size, &pfn_list, &pages);
+	if (ret) {
+		DRM_DEBUG("get pages error\n");
+		return ret;
+	}
+
+	DRM_DEBUG("get %d pages\n", pages);
+
+	/*alloc memory in TT apeture*/
+	ret = psb_gtt_mm_alloc_mem(mm, pages, page_align, &node);
+	if (ret) {
+		DRM_DEBUG("alloc TT memory error\n");
+		goto failed_pages_alloc;
+	}
+
+	/*update psb_gtt_mm*/
+	ret = psb_gtt_add_node(mm,
+			       (u32)psb_get_tgid(),
+			       vaddr,
+			       node,
+			       &mapping);
+	if (ret) {
+		DRM_DEBUG("add_node failed");
+		goto failed_add_node;
+	}
+
+	node = mapping->node;
+	offset_pages = node->start;
+
+	DRM_DEBUG("get free node for %d pages, offset %d pages",
+		  pages, offset_pages);
+
+	/*update gtt*/
+	psb_gtt_insert_pfn_list(pg, pfn_list,
+			     (unsigned)offset_pages,
+			     (unsigned)pages,
+			     0,
+			     0,
+			     0);
+
+	/*free pfn_list if allocated*/
+	kfree(pfn_list);
+
+	*offset = offset_pages;
+	return 0;
+
+failed_add_node:
+	psb_gtt_mm_free_mem(mm, node);
+failed_pages_alloc:
+	kfree(pfn_list);
+	return ret;
+}
+
+static int psb_gtt_unmap_vaddr(struct drm_device *dev,
+			uint32_t vaddr,
+			uint32_t size)
+{
+	return psb_gtt_unmap_common(dev, psb_get_tgid(), vaddr);
+
+}
+
+int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct psb_gtt_mapping_arg *arg
+	= (struct psb_gtt_mapping_arg *)data;
+	uint32_t *offset_pages = &arg->offset_pages;
+	uint32_t page_align = arg->page_align;
+	uint32_t device_id = arg->bcd_device_id;
+	uint32_t buffer_id = arg->bcd_buffer_id;
+	uint32_t *buffer_count = &arg->bcd_buffer_count;
+	uint32_t *buffer_stride = &arg->bcd_buffer_stride;
+	uint32_t vaddr = arg->vaddr;
+	uint32_t size = arg->size;
+	uint32_t type = arg->type;
+
+	DRM_DEBUG("\n");
+
+	switch (type) {
+	case PSB_GTT_MAP_TYPE_MEMINFO:
+		return psb_gtt_map_meminfo(dev,
+				arg->hKernelMemInfo,
+				page_align,
+				offset_pages);
+	case PSB_GTT_MAP_TYPE_VIRTUAL:
+		return psb_gtt_map_vaddr(dev,
+					vaddr,
+					size,
+					page_align,
+					offset_pages);
+	default:
+		DRM_ERROR("unsupported buffer type %d\n", type);
+		return -EINVAL;
+	}
+}
+
+int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+
+	struct psb_gtt_mapping_arg *arg
+		= (struct psb_gtt_mapping_arg *)data;
+	uint32_t device_id = arg->bcd_device_id;
+	uint32_t buffer_id = arg->bcd_buffer_id;
+	uint32_t vaddr = arg->vaddr;
+	uint32_t size = arg->size;
+	uint32_t type = arg->type;
+
+	DRM_DEBUG("\n");
+
+	switch (type) {
+	case PSB_GTT_MAP_TYPE_MEMINFO:
+		return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
+	case PSB_GTT_MAP_TYPE_VIRTUAL:
+		return psb_gtt_unmap_vaddr(dev, vaddr, size);
+	default:
+		DRM_ERROR("unsupported buffer type %d\n", type);
+		return -EINVAL;
+	}
+}
+
+int psb_gtt_map_pvr_memory(struct drm_device *dev,
+			   unsigned int hHandle,
+			   unsigned int ui32TaskId,
+			   IMG_CPU_PHYADDR *pPages,
+			   unsigned int ui32PagesNum,
+			   unsigned int *ui32Offset)
+{
+	struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+
+	uint32_t size, pages, offset_pages;
+	struct drm_mm_node *node = NULL;
+	struct psb_gtt_mem_mapping *mapping = NULL;
+	int ret;
+
+	size = ui32PagesNum * PAGE_SIZE;
+	pages = 0;
+
+	/*alloc memory in TT apeture*/
+	ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
+	if (ret) {
+		DRM_DEBUG("alloc TT memory error\n");
+		goto failed_pages_alloc;
+	}
+
+	/*update psb_gtt_mm*/
+	ret = psb_gtt_add_node(mm,
+			       (u32)ui32TaskId,
+			       (u32)hHandle,
+			       node,
+			       &mapping);
+	if (ret) {
+		DRM_DEBUG("add_node failed");
+		goto failed_add_node;
+	}
+
+	node = mapping->node;
+	offset_pages = node->start;
+
+	DRM_DEBUG("get free node for %d pages, offset %d pages",
+		pages, offset_pages);
+
+	/*update gtt*/
+	psb_gtt_insert_phys_addresses(pg, pPages, (unsigned)offset_pages, (unsigned)ui32PagesNum, 0);
+
+	*ui32Offset = offset_pages;
+	return 0;
+
+failed_add_node:
+	psb_gtt_mm_free_mem(mm, node);
+failed_pages_alloc:
+	return ret;
+}
+
+
+int psb_gtt_unmap_pvr_memory(struct drm_device *dev,
+	unsigned int hHandle,
+	unsigned int ui32TaskId)
+{
+	return psb_gtt_unmap_common(dev, ui32TaskId, hHandle);
+}
+
+int psb_gtt_free_ht_for_tgid(struct drm_device *dev,
+	unsigned int ui32TaskId)
+{
+	while (!psb_gtt_unmap_anyused(dev, ui32TaskId))
+		;
+
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/common/psb_gtt.h b/drivers/external_drivers/intel_media/common/psb_gtt.h
new file mode 100644
index 0000000..e9584b8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_gtt.h
@@ -0,0 +1,116 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+#include "img_types.h"
+
+struct psb_gtt {
+	struct drm_device *dev;
+	int initialized;
+	uint32_t gatt_start;
+	uint32_t mmu_gatt_start;
+	uint32_t gtt_video_start;
+	uint32_t rar_start;
+	uint32_t gtt_start;
+	uint32_t gtt_phys_start;
+	uint32_t reserved_gtt_start;
+	unsigned gtt_pages;
+	unsigned gatt_pages;
+	uint32_t stolen_base;
+	void *vram_addr;
+	uint32_t pge_ctl;
+	u16 gmch_ctrl;
+	unsigned long stolen_size;
+	unsigned long vram_stolen_size;
+	unsigned long ci_stolen_size;
+	unsigned long rar_stolen_size;
+	uint32_t *gtt_map;
+	struct rw_semaphore sem;
+};
+
+struct psb_gtt_mm {
+	struct drm_mm base;
+	struct drm_open_hash hash;
+	uint32_t count;
+	spinlock_t lock;
+};
+
+struct psb_gtt_hash_entry {
+	struct drm_open_hash ht;
+	uint32_t count;
+	struct drm_hash_item item;
+};
+
+struct psb_gtt_mem_mapping {
+	struct drm_mm_node *node;
+	struct drm_hash_item item;
+};
+
+#if 0
+/*Ioctl args*/
+struct psb_gtt_mapping_arg {
+	IMG_HANDLE hKernelMemInfo;
+};
+#endif
+
+/*Exported functions*/
+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
+				unsigned offset_pages, unsigned num_pages,
+				unsigned desired_tile_stride,
+				unsigned hw_tile_stride, int type);
+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
+				unsigned num_pages,
+				unsigned desired_tile_stride,
+				unsigned hw_tile_stride,
+				int rc_prot);
+
+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
+extern int psb_gtt_map_meminfo(struct drm_device *dev,
+			       IMG_HANDLE hKernelMemInfo,
+			       uint32_t page_align,
+			       uint32_t *offset);
+extern int psb_gtt_unmap_meminfo(struct drm_device *dev,
+				 IMG_HANDLE hKernelMemInfo);
+extern int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int psb_gtt_mm_init(struct psb_gtt *pg);
+extern void psb_gtt_mm_takedown(void);
+
+extern int psb_gtt_map_pvr_memory(struct drm_device *dev,
+				  unsigned int hHandle,
+				  unsigned int ui32TaskId,
+				  IMG_CPU_PHYADDR *pPages,
+				  unsigned int ui32PagesNum,
+				  unsigned int *ui32Offset);
+
+extern int psb_gtt_unmap_pvr_memory(struct drm_device *dev,
+				    unsigned int hHandle,
+				    unsigned int ui32TaskId);
+extern int psb_gtt_free_ht_for_tgid(struct drm_device *dev,
+	unsigned int ui32TaskId);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/common/psb_irq.c b/drivers/external_drivers/intel_media/common/psb_irq.c
new file mode 100644
index 0000000..44dd4b6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_irq.c
@@ -0,0 +1,1417 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_msvdx.h"
+#include "pnw_topaz.h"
+#include "psb_intel_reg.h"
+#include "psb_powermgmt.h"
+
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dbi_dsr.h"
+
+#ifdef CONFIG_MDFD_GL3
+#include "mdfld_gl3.h"
+#endif
+
+#include "psb_irq.h"
+
+#include <linux/time.h>
+#include <linux/history_record.h>
+
+extern int drm_psb_smart_vsync;
+extern atomic_t g_videoenc_access_count;
+extern atomic_t g_videodec_access_count;
+extern struct workqueue_struct *te_wq;
+extern struct workqueue_struct *vsync_wq;
+
+/* inline functions */
+static inline u32
+psb_pipestat(int pipe)
+{
+	if (pipe == 0)
+		return PIPEASTAT;
+	if (pipe == 1)
+		return PIPEBSTAT;
+	if (pipe == 2)
+		return PIPECSTAT;
+	BUG();
+	return 0;
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+	if (pipe == 0)
+		return _PSB_PIPEA_EVENT_FLAG;
+	if (pipe == 1)
+		return _MDFLD_PIPEB_EVENT_FLAG;
+	if (pipe == 2)
+		return _MDFLD_PIPEC_EVENT_FLAG;
+	BUG();
+	return 0;
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+	if (pipe == 0)
+		return _PSB_VSYNC_PIPEA_FLAG;
+	if (pipe == 1)
+		return _PSB_VSYNC_PIPEB_FLAG;
+	if (pipe == 2)
+		return _MDFLD_PIPEC_VBLANK_FLAG;
+	BUG();
+	return 0;
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+	if (pipe == 0)
+		return PIPEACONF;
+	if (pipe == 1)
+		return PIPEBCONF;
+	if (pipe == 2)
+		return PIPECCONF;
+	BUG();
+	return 0;
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	if ((dev_priv->pipestat[pipe] & mask) != mask) {
+		u32 reg = psb_pipestat(pipe);
+		dev_priv->pipestat[pipe] |= mask;
+		/* Enable the interrupt, clear any pending status */
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+			u32 writeVal = PSB_RVDC32(reg);
+			/* Don't clear other interrupts */
+			writeVal &= (PIPE_EVENT_MASK | PIPE_VBLANK_MASK);
+			writeVal |= (mask | (mask >> 16));
+			PSB_WVDC32(writeVal, reg);
+			(void) PSB_RVDC32(reg);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	if ((dev_priv->pipestat[pipe] & mask) != 0) {
+		u32 reg = psb_pipestat(pipe);
+		dev_priv->pipestat[pipe] &= ~mask;
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+			if ((mask == PIPE_VBLANK_INTERRUPT_ENABLE) ||
+					(mask == PIPE_TE_ENABLE))
+				wake_up_interruptible(&dev_priv->vsync_queue);
+
+			u32 writeVal = PSB_RVDC32(reg);
+			/* Don't clear other interrupts */
+			writeVal &= (PIPE_EVENT_MASK | PIPE_VBLANK_MASK);
+			writeVal &= ~mask;
+			PSB_WVDC32(writeVal, reg);
+			(void) PSB_RVDC32(reg);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+}
+
+void
+mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		u32 pipe_event = mid_pipe_event(pipe);
+		dev_priv->vdc_irq_mask |= pipe_event;
+		PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+		PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+}
+
+void
+mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+	if (dev_priv->pipestat[pipe] == 0) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+			u32 pipe_event = mid_pipe_event(pipe);
+			dev_priv->vdc_irq_mask &= ~pipe_event;
+			PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+			PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		}
+	}
+}
+/*
+ * to sync the mipi and hdmi irq to ensure they flush the same buffer
+ *
+ */
+static int mipi_hdmi_vsync_check(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	static int pipe_surf[2];
+	int pipea_stat = 0;
+	int pipeb_stat = 0;
+	int pipeb_ctl = 0;
+	int pipeb_cntr = 0;
+	unsigned long irqflags;
+
+	/*check whether need to sync*/
+	if (dev_priv->vsync_te_working[0] == false ||
+		dev_priv->vsync_te_working[1] == false)
+		return 1;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	if (dev_priv->bhdmiconnected && dsi_config->dsi_hw_context.panel_on) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+			pipea_stat = REG_READ(psb_pipestat(0));
+			pipeb_stat = REG_READ(psb_pipestat(1));
+			pipeb_ctl = REG_READ(HDMIB_CONTROL);
+			pipeb_cntr = REG_READ(DSPBCNTR);
+			pipe_surf[pipe] = REG_READ(DSPASURF);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			spin_unlock_irqrestore(&dev_priv->irqmask_lock,
+							irqflags);
+			return 1;
+		}
+
+		/* PSB_DEBUG_ENTRY("[vsync irq] pipe : 0x%x, regsurf: 0x%x !\n", pipe, pipe_surf[pipe]); */
+
+		if ((pipea_stat & PIPE_VBLANK_INTERRUPT_ENABLE)
+		    && (pipeb_ctl & HDMIB_PORT_EN)
+		    && (pipeb_cntr & DISPLAY_PLANE_ENABLE)
+		    && (pipeb_stat & PIPE_VBLANK_INTERRUPT_ENABLE)) {
+			if (pipe_surf[0] == pipe_surf[1]) {
+				pipe_surf[0] = MAX_NUM;
+				pipe_surf[1] = MAX_NUM;
+			} else {
+				spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+		/*PSB_DEBUG_ENTRY("Probable Display Buffer LOCK!\n");*/
+				return 0;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 1;
+}
+
+/* Function to check if HDMI and MIPI are presenting the same buffer.
+ * If MIPI is faster than HDMI (different refresh rates), then throttle
+ * the MIPI buffers by simply dropping them.
+ * This function should be called from the local display's Vblank/TE
+ * interrupt handler. This version of function is required to handle
+ * the sync check on CTP, on which local display works on the TE interrupt
+ * handler.
+ */
+static int mipi_te_hdmi_vsync_check(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	int pipea_stat, pipeb_stat, pipeb_ctl, pipeb_cntr, pipeb_config;
+	static int pipe_surf[2];
+	unsigned long irqflags;
+
+	/*check whether need to sync*/
+	if (dev_priv->vsync_te_working[0] == false ||
+		dev_priv->vsync_te_working[1] == false)
+		return 1;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	if (dev_priv->bhdmiconnected && dsi_config->dsi_hw_context.panel_on) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+					      OSPM_UHB_ONLY_IF_ON)) {
+			pipea_stat = REG_READ(psb_pipestat(0));
+			pipeb_stat = REG_READ(psb_pipestat(1));
+			pipeb_ctl = REG_READ(HDMIB_CONTROL);
+			pipeb_cntr = REG_READ(DSPBCNTR);
+			pipeb_config = REG_READ(PIPEBCONF);
+			pipe_surf[pipe] = REG_READ(DSPASURF);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else {
+			spin_unlock_irqrestore(&dev_priv->irqmask_lock,
+					irqflags);
+			return 1;
+		}
+
+		if ((pipea_stat & PIPE_TE_ENABLE)
+			&& (pipeb_config & PIPEBCONF_ENABLE)
+			&& (pipeb_ctl & HDMIB_PORT_EN)
+			&& (pipeb_cntr & DISPLAY_PLANE_ENABLE)
+			&& (pipeb_stat & PIPE_VBLANK_INTERRUPT_ENABLE)) {
+			if (pipe_surf[0] == pipe_surf[1]) {
+				pipe_surf[0] = MAX_NUM;
+				pipe_surf[1] = MAX_NUM;
+			} else {
+				spin_unlock_irqrestore(&dev_priv->irqmask_lock,
+						       irqflags);
+				return 0;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 1;
+}
+
+/**
+ * Check if we can disable vblank for video MIPI display
+ *
+ */
+static void mid_check_vblank(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+	static unsigned long cnt;
+
+	if (drm_psb_smart_vsync == 0) {
+		if ((cnt++) % 600 == 0)
+			PSB_DEBUG_ENTRY("[vsync irq] 600 times pipe : 0x%x!\n", pipe);
+		return ;
+	}
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	if (dev_priv->dsr_idle_count > 50) {
+		dev_priv->b_is_in_idle = true;
+	} else
+		dev_priv->dsr_idle_count++;
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+u32 intel_vblank_count(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	return atomic_read(&dev_priv->vblank_count[pipe]);
+}
+
+/**
+ * Display controller interrupt handler for vsync/vblank.
+ *
+ */
+static void mid_vblank_handler(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+#if 0
+	/*doesn't use it for now, leave code here for reference*/
+	if (pipe == 1)
+		psb_flip_hdmi(dev, pipe);
+#endif
+	drm_handle_vblank(dev, pipe);
+
+	if (is_cmd_mode_panel(dev)) {
+		if (!mipi_te_hdmi_vsync_check(dev, pipe))
+			return;
+	} else {
+		if (!mipi_hdmi_vsync_check(dev, pipe))
+			return;
+	}
+
+	if (dev_priv->psb_vsync_handler)
+		(*dev_priv->psb_vsync_handler)(dev, pipe);
+}
+
+void psb_te_timer_func(unsigned long data)
+{
+	/*
+		struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+		struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+		uint32_t pipe = dev_priv->cur_pipe;
+		drm_handle_vblank(dev, pipe);
+		if( dev_priv->psb_vsync_handler != NULL)
+			(*dev_priv->psb_vsync_handler)(dev,pipe);
+	*/
+}
+
+static void mdfld_vsync_event(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	if (dev_priv)
+		wake_up_interruptible(&dev_priv->vsync_queue);
+}
+
+void mdfld_vsync_event_work(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(work, struct drm_psb_private, vsync_event_work);
+	int pipe = dev_priv->vsync_pipe;
+	struct drm_device *dev = dev_priv->dev;
+
+	dev_priv->vsync_te_worker_ts[pipe] = cpu_clock(0);
+
+	mid_vblank_handler(dev, pipe);
+
+	/*report vsync event*/
+	mdfld_vsync_event(dev, pipe);
+}
+
+void mdfld_te_handler_work(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(work, struct drm_psb_private, te_work);
+	int pipe = 0;
+	struct drm_device *dev = dev_priv->dev;
+
+	dev_priv->vsync_te_worker_ts[pipe] = cpu_clock(0);
+
+	/*report vsync event*/
+	mdfld_vsync_event(dev, pipe);
+
+	drm_handle_vblank(dev, pipe);
+
+	if (dev_priv->b_async_flip_enable) {
+		if (mipi_te_hdmi_vsync_check(dev, pipe)) {
+			if (dev_priv->psb_vsync_handler != NULL)
+				(*dev_priv->psb_vsync_handler)(dev, pipe);
+		}
+
+		mdfld_dsi_dsr_report_te(dev_priv->dsi_configs[0]);
+	} else {
+#ifdef CONFIG_MDFD_DSI_DPU
+		mdfld_dpu_update_panel(dev);
+#else
+		mdfld_dbi_update_panel(dev, pipe);
+#endif
+		if (dev_priv->psb_vsync_handler != NULL)
+			(*dev_priv->psb_vsync_handler)(dev, pipe);
+	}
+}
+
+static void update_te_counter(struct drm_device *dev, uint32_t pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender;
+	struct mdfld_dsi_dbi_output **dbi_outputs;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+
+	if (!dsr_info)
+		return;
+
+	dbi_outputs = dsr_info->dbi_outputs;
+	dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
+	sender = mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+	mdfld_dsi_report_te(sender);
+}
+static void get_use_cases_control_info()
+{
+	static int last_use_cases_control = -1;
+	if (drm_psb_use_cases_control != last_use_cases_control) {
+		last_use_cases_control = drm_psb_use_cases_control;
+		DRM_INFO("new update of use cases\n");
+		if (!(drm_psb_use_cases_control & PSB_SUSPEND_ENABLE))
+			DRM_INFO("BIT0 suspend/resume  disabled\n");
+		if (!(drm_psb_use_cases_control & PSB_BRIGHTNESS_ENABLE))
+			DRM_INFO("BIT1 brighness setting disabled\n");
+		if (!(drm_psb_use_cases_control & PSB_ESD_ENABLE))
+			DRM_INFO("BIT2 ESD  disabled\n");
+		if (!(drm_psb_use_cases_control & PSB_DPMS_ENABLE))
+			DRM_INFO("BIT3 DPMS  disabled\n");
+		if (!(drm_psb_use_cases_control & PSB_DSR_ENABLE))
+			DRM_INFO("BIT4 DSR disabled\n");
+		if (!(drm_psb_use_cases_control & PSB_VSYNC_OFF_ENABLE))
+			DRM_INFO("BIT5 VSYNC off  disabled\n");
+	}
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+#define WAIT_STATUS_CLEAR_LOOP_COUNT 0xffff
+static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	uint32_t pipe_stat_val;
+	uint32_t pipe_stat_val_raw;
+	uint32_t pipe_stat_reg = psb_pipestat(pipe);
+	uint32_t pipe_enable;
+	uint32_t pipe_status;
+	uint32_t i = 0;
+	unsigned long irq_flags;
+	struct mdfld_dsi_pkg_sender *sender;
+	struct mdfld_dsi_dbi_output **dbi_outputs;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+
+	pipe_enable = dev_priv->pipestat[pipe];
+	pipe_status = dev_priv->pipestat[pipe] >> 16;
+
+	pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+	pipe_stat_val_raw = pipe_stat_val;
+	pipe_stat_val &= pipe_enable | pipe_status;
+	pipe_stat_val &= pipe_stat_val >> 16;
+
+	/* clear the 2nd level interrupt status bits.
+	 * We must keep spinlock to protect disable / enable status
+	 * safe in the same register (assuming that other do that also)
+	 * Clear only bits that we are going to serve this time.
+	 */
+	/**
+	* FIXME: shouldn't use while loop here. However, the interrupt
+	* status 'sticky' bits cannot be cleared by setting '1' to that
+	* bit once...
+	*/
+
+	for (i = 0; i < WAIT_STATUS_CLEAR_LOOP_COUNT; i++) {
+		PSB_WVDC32(pipe_stat_val_raw, pipe_stat_reg);
+		(void) PSB_RVDC32(pipe_stat_reg);
+
+		if ((PSB_RVDC32(pipe_stat_reg) & pipe_stat_val) == 0)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+
+	if (i == WAIT_STATUS_CLEAR_LOOP_COUNT)
+		DRM_ERROR("%s, can't clear the status bits in pipe_stat_reg, its value = 0x%x. \n",
+			  __FUNCTION__, PSB_RVDC32(pipe_stat_reg));
+
+	/* if pipe is HDMI, but hdmi is plugged out, should not handle
+	* HDMI reg any more here */
+	if (pipe == 1 && !dev_priv->bhdmiconnected)
+		return;
+
+#ifdef CONFIG_CTP_DPST
+	if ((pipe_stat_val & (PIPE_DPST_EVENT_STATUS)) &&
+	    (dev_priv->psb_dpst_state != NULL)) {
+		uint32_t pwm_reg = 0;
+		uint32_t hist_reg = 0;
+		u32 irqCtrl = 0;
+		struct dpst_guardband guardband_reg;
+		struct dpst_ie_histogram_control ie_hist_cont_reg;
+
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+		/* Determine if this is histogram or pwm interrupt */
+		if ((hist_reg & HISTOGRAM_INT_CTRL_CLEAR) &&
+				(hist_reg & HISTOGRAM_INTERRUPT_ENABLE)) {
+			/* Notify UM of histogram interrupt */
+			psb_dpst_notify_change_um(DPST_EVENT_HIST_INTERRUPT,
+						  dev_priv->psb_dpst_state);
+
+			/* disable dpst interrupts */
+			guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+			guardband_reg.interrupt_enable = 0;
+			guardband_reg.interrupt_status = 1;
+			PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+
+			ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+			ie_hist_cont_reg.ie_histogram_enable = 0;
+			PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+		}
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		if ((pwm_reg & PWM_PHASEIN_INT_ENABLE) &&
+		    !(pwm_reg & PWM_PHASEIN_ENABLE)) {
+			/* Notify UM of the phase complete */
+			psb_dpst_notify_change_um(DPST_EVENT_PHASE_COMPLETE,
+						  dev_priv->psb_dpst_state);
+
+			/* Temporarily get phase mngr ready to generate
+			 * another interrupt until this can be moved to
+			 * user mode */
+			/* PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+				   PWM_CONTROL_LOGIC); */
+		}
+	}
+#endif
+
+	if (pipe_stat_val & PIPE_VBLANK_STATUS) {
+		dev_priv->vsync_te_irq_ts[pipe] = cpu_clock(0);
+		dev_priv->vsync_te_working[pipe] = true;
+		dev_priv->vsync_pipe = pipe;
+		atomic_inc(&dev_priv->vblank_count[pipe]);
+		queue_work(vsync_wq, &dev_priv->vsync_event_work);
+	}
+
+	if (pipe_stat_val & PIPE_TE_STATUS) {
+		/*update te sequence on this pipe*/
+		dev_priv->vsync_te_irq_ts[pipe] = cpu_clock(0);
+		dev_priv->vsync_te_working[pipe] = true;
+		update_te_counter(dev, pipe);
+		atomic_inc(&dev_priv->vblank_count[pipe]);
+		queue_work(te_wq, &dev_priv->te_work);
+
+	}
+
+	if (pipe_stat_val & PIPE_HDMI_AUDIO_UNDERRUN_STATUS)
+		mid_hdmi_audio_signal_event(dev, HAD_EVENT_AUDIO_BUFFER_UNDERRUN);
+
+	if (pipe_stat_val & PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS)
+		mid_hdmi_audio_signal_event(dev, HAD_EVENT_AUDIO_BUFFER_DONE);
+
+	get_use_cases_control_info();
+}
+
+/**
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+
+	if (vdc_stat & _PSB_PIPEA_EVENT_FLAG) {
+		mid_pipe_event_handler(dev, 0);
+	}
+
+	if (vdc_stat & _MDFLD_PIPEB_EVENT_FLAG) {
+		mid_pipe_event_handler(dev, 1);
+	}
+
+	if (vdc_stat & _MDFLD_PIPEC_EVENT_FLAG) {
+		mid_pipe_event_handler(dev, 2);
+	}
+
+	if (vdc_stat & _MDFLD_MIPIA_FLAG) {
+		/* mid_mipi_event_handler(dev, 0); */
+	}
+
+	if (vdc_stat & _MDFLD_MIPIC_FLAG) {
+		/* mid_mipi_event_handler(dev, 2); */
+	}
+}
+
+/**
+ * Medfield Gl3 Cache interrupt handler.
+ */
+#ifdef CONFIG_MDFD_GL3
+static void mdfld_gl3_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	uint32_t gl3_err_status = 0;
+	unsigned long irq_flags;
+
+	gl3_err_status = MDFLD_GL3_READ(MDFLD_GCL_ERR_STATUS);
+	if (gl3_err_status & _MDFLD_GL3_ECC_FLAG) {
+		gl3_flush();
+		gl3_reset();
+	}
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+	dev_priv->vdc_irq_mask &= ~_MDFLD_GL3_IRQ_FLAG;
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+}
+#endif
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, msvdx_int = 0, topaz_int = 0;
+	int handled = 0;
+	unsigned long irq_flags;
+	struct saved_history_record *precord = NULL;
+
+#ifdef CONFIG_MDFD_GL3
+	uint32_t gl3_int = 0;
+#endif
+
+	/*	PSB_DEBUG_ENTRY("\n"); */
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+	vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+	precord = get_new_history_record();
+	if (precord) {
+		precord->type = 5;
+		precord->record_value.vdc_stat = vdc_stat;
+	}
+
+	if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG) {
+		PSB_DEBUG_IRQ("Got DISP interrupt\n");
+		dsp_int = 1;
+	}
+
+	if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
+		PSB_DEBUG_IRQ("Got SGX interrupt\n");
+		sgx_int = 1;
+	}
+	if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
+		PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
+		msvdx_int = 1;
+	}
+
+	if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
+		PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
+		topaz_int = 1;
+	}
+
+#ifdef CONFIG_MDFD_GL3
+	if (vdc_stat & _MDFLD_GL3_IRQ_FLAG) {
+		PSB_DEBUG_IRQ("Got GL3 interrupt\n");
+		gl3_int = 1;
+	}
+#endif
+
+	vdc_stat &= dev_priv->vdc_irq_mask;
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+
+	/*
+		Ignore interrupt if sub-system is already
+		powered gated; nothing needs to be done,
+		when HW is already power-gated
+		- saftey check to avoid illegal HW access.
+	*/
+	if (dsp_int) {
+		if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+			OSPM_UHB_ONLY_IF_ON)) {
+			psb_vdc_interrupt(dev, vdc_stat);
+			handled = 1;
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		} else
+			DRM_INFO("get dsp int while it's off\n");
+	}
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	if (msvdx_int) {
+		if (ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
+			atomic_inc(&g_videodec_access_count);
+			psb_msvdx_interrupt(dev);
+			handled = 1;
+			atomic_dec(&g_videodec_access_count);
+		} else
+			DRM_INFO("get msvdx int while it's off\n");
+	}
+	if ((IS_MDFLD(dev) && topaz_int)) {
+		if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+			atomic_inc(&g_videoenc_access_count);
+			pnw_topaz_interrupt(dev);
+			handled = 1;
+			atomic_dec(&g_videoenc_access_count);
+		} else
+			DRM_INFO("get mdfld topaz int while it's off\n");
+	}
+#endif
+	if (sgx_int) {
+		BUG_ON(!dev_priv->pvr_ops);
+		if (dev_priv->pvr_ops->SYSPVRServiceSGXInterrupt(dev) != 0)
+				handled = 1;
+	}
+
+
+#ifdef CONFIG_MDFD_GL3
+	if (gl3_int && ospm_power_is_hw_on(OSPM_GL3_CACHE_ISLAND)) {
+		mdfld_gl3_interrupt(dev, vdc_stat);
+		handled = 1;
+	}
+#endif
+
+	PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+	(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+	DRM_READMEMORYBARRIER();
+
+	if (!handled)
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+	psb_irq_preinstall_islands(dev, OSPM_ALL_ISLANDS);
+}
+
+void psb_irq_preinstall_graphics_islands(struct drm_device *dev, int hw_islands)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+
+	if (hw_islands & OSPM_GRAPHICS_ISLAND)
+		dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+
+}
+
+/**
+ * FIXME: should I remove display irq enable here??
+ */
+void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND) {
+		if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+			if (IS_POULSBO(dev))
+				PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+			if (dev->vblank_enabled[0])
+				dev_priv->vdc_irq_mask |= _PSB_PIPEA_EVENT_FLAG;
+			if (dev->vblank_enabled[1])
+				dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+			if (dev->vblank_enabled[2])
+				dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+		}
+	}
+	if (hw_islands & OSPM_GRAPHICS_ISLAND) {
+		dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
+	}
+
+#ifdef CONFIG_MDFD_GL3
+	if (hw_islands & OSPM_GL3_CACHE_ISLAND)
+		dev_priv->vdc_irq_mask |= _MDFLD_GL3_IRQ_FLAG;
+#endif
+	if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
+		if (ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
+			dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
+
+	if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
+		if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
+			dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
+
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+	return psb_irq_postinstall_islands(dev, OSPM_ALL_ISLANDS);
+}
+
+int psb_irq_postinstall_graphics_islands(struct drm_device *dev, int hw_islands)
+{
+
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+
+	return 0;
+}
+
+int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands)
+{
+
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND) {
+		if (true/*powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)*/) {
+			if (IS_POULSBO(dev))
+				PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+			if (dev->vblank_enabled[0]) {
+				if (dev_priv->platform_rev_id != MDFLD_PNW_A0 &&
+				    is_cmd_mode_panel(dev)) {
+#if 0 /* FIXME need to revisit it */
+					psb_enable_pipestat(dev_priv, 0,
+							    PIPE_TE_ENABLE);
+#endif
+				} else
+					psb_enable_pipestat(dev_priv, 0,
+							    PIPE_VBLANK_INTERRUPT_ENABLE);
+			} else {
+				if (dev_priv->platform_rev_id != MDFLD_PNW_A0 &&
+				    is_cmd_mode_panel(dev)) {
+#if 0 /* FIXME need to revisit it */
+					psb_disable_pipestat(dev_priv, 0,
+							     PIPE_TE_ENABLE);
+#endif
+				} else
+					psb_disable_pipestat(dev_priv, 0,
+							     PIPE_VBLANK_INTERRUPT_ENABLE);
+			}
+
+			if (dev->vblank_enabled[1])
+				psb_enable_pipestat(dev_priv, 1,
+						    PIPE_VBLANK_INTERRUPT_ENABLE);
+			else
+				psb_disable_pipestat(dev_priv, 1,
+						     PIPE_VBLANK_INTERRUPT_ENABLE);
+
+			if (dev->vblank_enabled[2]) {
+				if (dev_priv->platform_rev_id != MDFLD_PNW_A0 &&
+				    is_cmd_mode_panel(dev)) {
+#if 0 /* FIXME need to revisit it */
+					psb_enable_pipestat(dev_priv, 2,
+							    PIPE_TE_ENABLE);
+#endif
+				} else
+					psb_enable_pipestat(dev_priv, 2,
+							    PIPE_VBLANK_INTERRUPT_ENABLE);
+			} else {
+				if (dev_priv->platform_rev_id != MDFLD_PNW_A0 &&
+				    is_cmd_mode_panel(dev)) {
+#if 0 /* FIXME need to revisit it */
+					psb_disable_pipestat(dev_priv, 2,
+							     PIPE_TE_ENABLE);
+#endif
+				} else
+					psb_disable_pipestat(dev_priv, 2,
+							     PIPE_VBLANK_INTERRUPT_ENABLE);
+			}
+		}
+	}
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	if (!dev_priv->topaz_disabled)
+		if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
+			if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+				if (IS_MDFLD(dev))
+					pnw_topaz_enableirq(dev);
+			}
+
+	if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
+		if (true/*powermgmt_is_hw_on(dev->pdev, PSB_VIDEO_DEC_ISLAND)*/)
+			psb_msvdx_enableirq(dev);
+#endif
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+	psb_irq_uninstall_islands(dev, OSPM_ALL_ISLANDS);
+}
+
+void psb_irq_uninstall_graphics_islands(struct drm_device *dev, int hw_islands)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	if (hw_islands & OSPM_GRAPHICS_ISLAND)
+		dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
+
+	/*These two registers are safe even if display island is off*/
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	wmb();
+
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+
+}
+
+void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND) {
+		if (true/*powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)*/) {
+			if (IS_POULSBO(dev))
+				PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+			if (dev->vblank_enabled[0]) {
+				if (dev_priv->platform_rev_id != MDFLD_PNW_A0 &&
+				    is_cmd_mode_panel(dev)) {
+#if 0 /* FIXME need to revisit it */
+					psb_disable_pipestat(dev_priv, 0,
+							     PIPE_TE_ENABLE);
+#endif
+				} else
+					psb_disable_pipestat(dev_priv, 0,
+							     PIPE_VBLANK_INTERRUPT_ENABLE);
+			}
+
+			if (dev->vblank_enabled[1])
+				psb_disable_pipestat(dev_priv, 1,
+						     PIPE_VBLANK_INTERRUPT_ENABLE);
+
+			if (dev->vblank_enabled[2]) {
+				if (dev_priv->platform_rev_id != MDFLD_PNW_A0 &&
+				    is_cmd_mode_panel(dev)) {
+#if 0 /* FIXME need to revisit it */
+					psb_disable_pipestat(dev_priv, 2,
+							     PIPE_TE_ENABLE);
+#endif
+				} else
+					psb_disable_pipestat(dev_priv, 2,
+							     PIPE_VBLANK_INTERRUPT_ENABLE);
+			}
+		}
+		dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+					  _PSB_IRQ_MSVDX_FLAG |
+					  _LNC_IRQ_TOPAZ_FLAG;
+#ifdef CONFIG_MDFD_GL3
+		/* Duplicate code can be removed.
+		dev_priv->vdc_irq_mask &= _MDFLD_GL3_IRQ_FLAG;
+		*/
+#endif
+	}
+	/*TODO: remove follwoing code*/
+	if (hw_islands & OSPM_GRAPHICS_ISLAND) {
+		dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
+	}
+	if (hw_islands & OSPM_GL3_CACHE_ISLAND) {
+#ifdef CONFIG_MDFD_GL3
+		dev_priv->vdc_irq_mask &= ~_MDFLD_GL3_IRQ_FLAG;
+#endif
+	}
+
+	if ((hw_islands & OSPM_VIDEO_DEC_ISLAND))
+		dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
+
+	if ((hw_islands & OSPM_VIDEO_ENC_ISLAND))
+		dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
+
+	/*These two registers are safe even if display island is off*/
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	wmb();
+
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	if (!dev_priv->topaz_disabled)
+		if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
+			if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+				if (IS_MDFLD(dev))
+					pnw_topaz_disableirq(dev);
+			}
+	if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
+		if (ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
+			psb_msvdx_disableirq(dev);
+#endif
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+#ifdef CONFIG_CTP_DPST
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	u32 hist_reg;
+	u32 pwm_reg;
+
+	if (!dev_priv)
+		return;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return;
+	ctx = &dsi_config->dsi_hw_context;
+
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		PSB_WVDC32(BIT31, HISTOGRAM_LOGIC_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		ctx->histogram_logic_ctrl = hist_reg;
+		PSB_WVDC32(BIT31, HISTOGRAM_INT_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		ctx->histogram_intr_ctrl = hist_reg;
+
+		PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE | PWM_PHASEIN_INT_ENABLE,
+			   PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR, HISTOGRAM_INT_CONTROL);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE, PWM_CONTROL_LOGIC);
+
+		PSB_WVDC32(0x0, LVDS_PORT_CTRL);
+		ctx->lvds_port_ctrl = 0x0;
+
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+	mdfld_dsi_dsr_allow(dsi_config);
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	psb_irq_turn_on_dpst(dev);
+
+	return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	u32 hist_reg;
+	u32 pwm_reg;
+
+	if (!dev_priv)
+		return;
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return;
+
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_ONLY_IF_ON)) {
+		PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+		psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+
+	mdfld_dsi_dsr_allow(dsi_config);
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	psb_irq_turn_off_dpst(dev);
+
+	return 0;
+}
+#endif
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+			      unsigned int *sequence, atomic_t *counter)
+{
+	unsigned int cur_vblank;
+	int ret = 0;
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(counter))
+		      - *sequence) <= (1 << 23)));
+	*sequence = cur_vblank;
+
+	return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+	uint32_t reg_val = 0;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (IS_MDFLD(dev) && (dev_priv->platform_rev_id != MDFLD_PNW_A0) &&
+	    is_cmd_mode_panel(dev) && (pipe != 1))
+		return mdfld_enable_te(dev, pipe);
+
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		reg_val = REG_READ(pipeconf_reg);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		DRM_ERROR("%s: Pipe config hasn't been enabled for pipe %d\n",
+				__func__, pipe);
+		return 0;
+	}
+
+	dev_priv->b_vblank_enable = true;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	drm_psb_disable_vsync = 0;
+	mid_enable_pipe_event(dev_priv, pipe);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	if (!(drm_psb_use_cases_control & PSB_VSYNC_OFF_ENABLE))
+		return;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (IS_MDFLD(dev) && (dev_priv->platform_rev_id != MDFLD_PNW_A0) &&
+	    is_cmd_mode_panel(dev) && (pipe != 1))
+		mdfld_disable_te(dev, pipe);
+
+	dev_priv->b_vblank_enable = false;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	drm_psb_disable_vsync = 1;
+	dev_priv->vsync_te_working[pipe] = false;
+	mid_disable_pipe_event(dev_priv, pipe);
+	psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	uint32_t high_frame = PIPEAFRAMEHIGH;
+	uint32_t low_frame = PIPEAFRAMEPIXEL;
+	uint32_t pipeconf_reg = PIPEACONF;
+	uint32_t reg_val = 0;
+	uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		high_frame = PIPEBFRAMEHIGH;
+		low_frame = PIPEBFRAMEPIXEL;
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		high_frame = PIPECFRAMEHIGH;
+		low_frame = PIPECFRAMEPIXEL;
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		DRM_ERROR("%s, invalded pipe.\n", __func__);
+		return 0;
+	}
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false))
+		return 0;
+
+	if (pipe == 1 && dev_priv->bhdmiconnected == false) {
+		DRM_ERROR("trying to get vblank count for power off pipe %d\n",
+									pipe);
+		goto psb_get_vblank_counter_exit;
+	}
+
+	reg_val = REG_READ(pipeconf_reg);
+
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
+		goto psb_get_vblank_counter_exit;
+	}
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+		low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+		       PIPE_FRAME_LOW_SHIFT);
+		high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+	} while (high1 != high2);
+
+	count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return count;
+}
+
+/*
+ * It is used to enable TE interrupt
+ */
+int mdfld_enable_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+	uint32_t reg_val = 0;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+	PSB_DEBUG_ENTRY("pipe = %d, \n", pipe);
+
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		reg_val = REG_READ(pipeconf_reg);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		DRM_ERROR("%s: Pipe config hasn't been enabled for pipe %d\n",
+				__func__, pipe);
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_enable_pipe_event(dev_priv, pipe);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * It is used to disable TE interrupt
+ */
+void mdfld_disable_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+	struct mdfld_dsi_pkg_sender *sender;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	PSB_DEBUG_ENTRY("\n");
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_disable_pipe_event(dev_priv, pipe);
+	psb_disable_pipestat(dev_priv, pipe,
+			(PIPE_TE_ENABLE | PIPE_DPST_EVENT_ENABLE));
+
+	if (dsi_config) {
+		/*
+		 * reset te_seq, which make sure te_seq is really
+		 * increased by next te enable.
+		 * reset te_seq to 1 instead of 0 will make sure
+		 * that last_screen_update and te_seq are alwasys
+		 * unequal when exiting from DSR.
+		 */
+		sender = mdfld_dsi_get_pkg_sender(dsi_config);
+		atomic64_set(&sender->last_screen_update, 0);
+		atomic64_set(&sender->te_seq, 1);
+		dev_priv->vsync_te_working[pipe] = false;
+		atomic_set(&dev_priv->mipi_flip_abnormal, 0);
+	}
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int mid_irq_enable_hdmi_audio(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+	u32 reg_val = 0, mask = 0;
+
+	if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
+		reg_val = REG_READ(PIPEBCONF);
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	}
+
+	if (!(reg_val & PIPEACONF_ENABLE))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/* enable HDMI audio interrupt*/
+	mid_enable_pipe_event(dev_priv, 1);
+	dev_priv->pipestat[1] &= ~PIPE_HDMI_AUDIO_INT_MASK;
+	mask = dev_priv->hdmi_audio_interrupt_mask;
+	psb_enable_pipestat(dev_priv, 1, mask);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	return 0;
+}
+
+int mid_irq_disable_hdmi_audio(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_disable_pipe_event(dev_priv, 1);
+	psb_disable_pipestat(dev_priv, 1, PIPE_HDMI_AUDIO_INT_MASK);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/common/psb_irq.h b/drivers/external_drivers/intel_media/common/psb_irq.h
new file mode 100644
index 0000000..8e5912e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_irq.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _SYSIRQ_H_
+#define _SYSIRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int  psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
+int  psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
+void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+
+void psb_irq_preinstall_graphics_islands(struct drm_device *dev,
+							int hw_islands);
+int  psb_irq_postinstall_graphics_islands(struct drm_device *dev,
+							int hw_islands);
+void psb_irq_uninstall_graphics_islands(struct drm_device *dev,
+							int hw_islands);
+
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int  psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+#endif /* _SYSIRQ_H_ */
diff --git a/drivers/external_drivers/intel_media/common/psb_powermgmt.c b/drivers/external_drivers/intel_media/common/psb_powermgmt.c
new file mode 100644
index 0000000..14c4ca9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_powermgmt.c
@@ -0,0 +1,2230 @@
+/**************************************************************************
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *    Yun(Mark) Tu <yun.tu@intel.com>
+ *    Zhouzhou(Scott) Fang <zhouzhou.fang@intel.com>
+ */
+#include "psb_powermgmt.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_msvdx.h"
+#include "pnw_topaz.h"
+#include "mdfld_gl3.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_dpi.h"
+#include "android_hdmi.h"
+#include "psb_intel_display.h"
+#include "psb_irq.h"
+#ifdef CONFIG_GFX_RTPM
+#include <linux/pm_runtime.h>
+#endif
+#include <linux/atomic.h>
+
+#include <linux/version.h>
+#define SUPPORT_EARLY_SUSPEND 1
+#include <asm/intel_scu_pmic.h>
+
+#if SUPPORT_EARLY_SUSPEND
+#include <linux/earlysuspend.h>
+#endif /* if SUPPORT_EARLY_SUSPEND */
+
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/early_suspend_sysfs.h>
+#include "mdfld_dsi_dbi_dsr.h"
+
+#define SCU_CMD_VPROG2  0xe3
+
+struct drm_device *gpDrmDevice;
+EXPORT_SYMBOL(gpDrmDevice);
+struct mutex g_ospm_mutex;
+
+/* Lock strategy */
+/*
+ * we use both mutex lock and spin lock, for it
+ * need synchronization between atomic context and process context
+*/
+static bool gbSuspendInProgress; /* default set as false */
+static bool gbResumeInProgress; /* default set as false */
+static bool pcihostSuspendInProgress;
+bool gbSuspended; /* Indicate the host PCI suspened or not */
+static int g_hw_power_status_mask;
+static atomic_t g_display_access_count;
+static atomic_t g_graphics_access_count;
+atomic_t g_videoenc_access_count;
+atomic_t g_videodec_access_count;
+
+
+extern u32 DISP_PLANEB_STATUS;
+
+void acquire_ospm_lock(void)
+{
+	mutex_lock(&g_ospm_mutex);
+}
+
+void release_ospm_lock(void)
+{
+	mutex_unlock(&g_ospm_mutex);
+}
+
+static void ospm_early_suspend();
+static void ospm_late_resume();
+
+#if SUPPORT_EARLY_SUSPEND
+/*
+ * gfx_early_suspend
+ *
+ */
+static void gfx_early_suspend(struct early_suspend *h);
+static void gfx_late_resume(struct early_suspend *h);
+
+static struct early_suspend gfx_early_suspend_desc = {
+	.level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+	.suspend = gfx_early_suspend,
+	.resume = gfx_late_resume,
+};
+#endif /* if SUPPORT_EARLY_SUSPEND */
+
+static int ospm_runtime_pm_msvdx_suspend(struct drm_device *dev)
+{
+	int ret = 0;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	struct psb_video_ctx *pos, *n;
+	int decode_ctx = 0, decode_running = 0;
+	unsigned long irq_flags;
+
+	PSB_DEBUG_PM("MSVDX: %s: enter in runtime pm.\n", __func__);
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
+		goto out;
+
+	if (psb_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0) {
+		PSB_DEBUG_PM("MSVDX: island already in power off state.\n");
+		goto out;
+	}
+
+	if (atomic_read(&g_videodec_access_count)) {
+		ret = -1;
+		goto out;
+	}
+
+	if (psb_check_msvdx_idle(dev)) {
+		ret = -2;
+		goto out;
+	}
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		int entrypoint = pos->ctx_type & 0xff;
+		if (entrypoint == VAEntrypointVLD ||
+			entrypoint == VAEntrypointIZZ ||
+			entrypoint == VAEntrypointIDCT ||
+			entrypoint == VAEntrypointMoComp ||
+			entrypoint == VAEntrypointDeblocking) {
+			decode_ctx = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	/* have decode context, but not started, or is just closed */
+	if (decode_ctx && msvdx_priv->msvdx_ctx)
+		decode_running = 1;
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
+
+	if (decode_running)
+		psb_msvdx_save_context(gpDrmDevice);
+	MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERDOWN);
+#endif
+	ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
+
+#ifdef CONFIG_MDFD_GL3
+	/* Power off GL3 */
+	if (IS_MDFLD(dev))
+		ospm_power_island_down(OSPM_GL3_CACHE_ISLAND);
+#endif
+
+out:
+	return ret;
+}
+
+static int ospm_runtime_pm_topaz_suspend(struct drm_device *dev)
+{
+	int ret = 0;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pnw_topaz_private *pnw_topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+	int encode_ctx = 0, encode_running = 0;
+	unsigned long irq_flags;
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
+		goto out;
+
+	if (atomic_read(&g_videoenc_access_count)) {
+		ret = -1;
+		goto out;
+	}
+
+	if (IS_MDFLD(dev)) {
+		if (pnw_check_topaz_idle(dev)) {
+			ret = -2;
+			goto out;
+		}
+	}
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		int entrypoint = pos->ctx_type & 0xff;
+		if (entrypoint == VAEntrypointEncSlice ||
+		    entrypoint == VAEntrypointEncPicture) {
+			encode_ctx = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	/* have encode context, but not started, or is just closed */
+	if (encode_ctx && dev_priv->topaz_ctx)
+		encode_running = 1;
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
+
+	if (IS_MDFLD(dev)) {
+		if (encode_running)
+			pnw_topaz_save_mtx_state(gpDrmDevice);
+		PNW_TOPAZ_NEW_PMSTATE(dev, pnw_topaz_priv,
+				PSB_PMSTATE_POWERDOWN);
+	}
+#endif
+	ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
+
+#ifdef CONFIG_MDFD_GL3
+	/* Power off GL3 */
+	if (IS_MDFLD(dev))
+		ospm_power_island_down(OSPM_GL3_CACHE_ISLAND);
+#endif
+
+out:
+	return ret;
+}
+
+#ifdef CONFIG_GFX_RTPM
+void psb_ospm_post_power_down()
+{
+	int ret;
+
+	if (likely(!gpDrmDevice->pdev->dev.power.runtime_auto))
+		return;
+
+	if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND |
+				OSPM_VIDEO_DEC_ISLAND |
+				OSPM_GRAPHICS_ISLAND))
+		return;
+
+	PSB_DEBUG_PM("request runtime idle\n");
+
+	ret = pm_request_idle(&gpDrmDevice->pdev->dev);
+
+	if (ret) {
+		PSB_DEBUG_PM("pm_request_idle fail, ret %d\n", ret);
+		ret = pm_runtime_barrier(&gpDrmDevice->pdev->dev);
+		if (!ret) {
+			ret = pm_request_idle(&gpDrmDevice->pdev->dev);
+			PSB_DEBUG_PM("pm_request_idle again, ret %d\n", ret);
+		}
+	}
+}
+#endif
+
+
+
+static int ospm_runtime_pm_msvdx_resume(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	/*printk(KERN_ALERT "ospm_runtime_pm_msvdx_resume\n");*/
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERUP);
+
+	psb_msvdx_restore_context(dev);
+#endif
+
+	return 0;
+}
+
+static int ospm_runtime_pm_topaz_resume(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pnw_topaz_private *pnw_topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+	int encode_ctx = 0, encode_running = 0;
+	unsigned long irq_flags;
+
+	/*printk(KERN_ALERT "ospm_runtime_pm_topaz_resume\n");*/
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		int entrypoint = pos->ctx_type & 0xff;
+		if (entrypoint == VAEntrypointEncSlice ||
+		    entrypoint == VAEntrypointEncPicture) {
+			encode_ctx = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	/* have encode context, but not started, or is just closed */
+	if (encode_ctx && dev_priv->topaz_ctx)
+		encode_running = 1;
+
+	if (encode_ctx)
+		PSB_DEBUG_PM("Topaz: has encode context, running=%d\n",
+			     encode_running);
+	else
+		PSB_DEBUG_PM("Topaz: no encode running\n");
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	if (IS_MDFLD(dev)) {
+		if (encode_running) { /* has encode session running */
+			psb_irq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
+			pnw_topaz_restore_mtx_state(gpDrmDevice);
+		}
+		PNW_TOPAZ_NEW_PMSTATE(dev, pnw_topaz_priv, PSB_PMSTATE_POWERUP);
+	}
+#endif
+	return 0;
+}
+
+void ospm_apm_power_down_msvdx(struct drm_device *dev, int force_off)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	mutex_lock(&g_ospm_mutex);
+	if (force_off)
+		goto power_off;
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
+		PSB_DEBUG_PM("g_hw_power_status_mask: msvdx in power off.\n");
+		goto out;
+	}
+
+	if (psb_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0) {
+		PSB_DEBUG_PM("pmu_nc_get_power_state: msvdx in power off.\n");
+		goto out;
+	}
+
+	if (atomic_read(&g_videodec_access_count)) {
+		PSB_DEBUG_PM("g_videodec_access_count has been set.\n");
+		goto out;
+	}
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	if (psb_check_msvdx_idle(dev))
+		goto out;
+
+	psb_msvdx_save_context(dev);
+
+#endif
+
+power_off:
+	ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
+#ifdef CONFIG_MDFD_GL3
+	/* Power off GL3 */
+	ospm_power_island_down(OSPM_GL3_CACHE_ISLAND);
+#endif
+	MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERDOWN);
+
+out:
+	mutex_unlock(&g_ospm_mutex);
+	return;
+}
+
+void ospm_apm_power_down_topaz(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pnw_topaz_private *pnw_topaz_priv = dev_priv->topaz_private;
+
+	mutex_lock(&g_ospm_mutex);
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
+		goto out;
+
+
+	if (atomic_read(&g_videoenc_access_count))
+		goto out;
+
+#ifdef CONFIG_MDFD_VIDEO_DECODE
+	if (IS_MDFLD(dev))
+		if (pnw_check_topaz_idle(dev))
+			goto out;
+	if (IS_MDFLD(dev)) {
+		psb_irq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+		pnw_topaz_save_mtx_state(gpDrmDevice);
+		PNW_TOPAZ_NEW_PMSTATE(dev, pnw_topaz_priv, PSB_PMSTATE_POWERDOWN);
+	}
+	ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
+#endif
+
+#ifdef CONFIG_MDFD_GL3
+	/* Power off GL3 */
+	if (IS_MDFLD(dev))
+		ospm_power_island_down(OSPM_GL3_CACHE_ISLAND);
+#endif
+
+out:
+	mutex_unlock(&g_ospm_mutex);
+	return;
+}
+
+static ssize_t early_suspend_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	if (!strncmp(buf, EARLY_SUSPEND_ON, EARLY_SUSPEND_STATUS_LEN))
+		ospm_early_suspend();
+	else if (!strncmp(buf, EARLY_SUSPEND_OFF, EARLY_SUSPEND_STATUS_LEN))
+		ospm_late_resume();
+
+	return count;
+}
+static DEVICE_EARLY_SUSPEND_ATTR(early_suspend_store);
+
+/*
+ * ospm_power_init
+ *
+ * Description: Initialize this ospm power management module
+ */
+void ospm_power_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
+	unsigned long flags;
+
+	gpDrmDevice = dev;
+
+	mutex_init(&g_ospm_mutex);
+	spin_lock_init(&dev_priv->ospm_lock);
+
+	spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+	g_hw_power_status_mask = OSPM_ALL_ISLANDS;
+	spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+	atomic_set(&g_display_access_count, 0);
+	atomic_set(&g_graphics_access_count, 0);
+	atomic_set(&g_videoenc_access_count, 0);
+	atomic_set(&g_videodec_access_count, 0);
+
+	device_create_file(&dev->pdev->dev, &dev_attr_early_suspend);
+
+	register_early_suspend_device(&gpDrmDevice->pdev->dev);
+
+#if SUPPORT_EARLY_SUSPEND
+	register_early_suspend(&gfx_early_suspend_desc);
+#endif /* if SUPPORT_EARLY_SUSPEND */
+
+#ifdef OSPM_STAT
+	dev_priv->graphics_state = PSB_PWR_STATE_ON;
+	dev_priv->gfx_last_mode_change = jiffies;
+	dev_priv->gfx_on_time = 0;
+	dev_priv->gfx_off_time = 0;
+#endif
+}
+
+/*
+ * ospm_power_uninit
+ *
+ * Description: Uninitialize this ospm power management module
+ */
+void ospm_power_uninit(void)
+{
+	device_remove_file(&gpDrmDevice->pdev->dev, &dev_attr_early_suspend);
+	unregister_early_suspend_device(&gpDrmDevice->pdev->dev);
+
+#if SUPPORT_EARLY_SUSPEND
+    unregister_early_suspend(&gfx_early_suspend_desc);
+#endif /* if SUPPORT_EARLY_SUSPEND */
+    mutex_destroy(&g_ospm_mutex);
+#ifdef CONFIG_GFX_RTPM
+	pm_runtime_get_noresume(&gpDrmDevice->pdev->dev);
+#endif
+}
+
+/*
+ *  mdfld_adjust_display_fifo
+ *
+ * Update display fifo setting to avoid hdmi flicker
+ */
+static void mdfld_adjust_display_fifo(struct drm_device *dev)
+{
+	u32 temp;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	struct drm_display_mode *mode = dsi_config->fixed_mode;
+
+	if (IS_CTP(dev)) {
+		/* Set proper high priority configuration to avoid overlay
+		 * block memory self-refresh entry */
+		temp = REG_READ(G_HP_CONTROL);
+		REG_WRITE(G_HP_CONTROL,
+			HP_REQUESTORS_STATUS_OVERRIDE_MODE | temp);
+		if (mode &&
+		    ((mode->hdisplay >= 1920 && mode->vdisplay >= 1080) ||
+		     (mode->hdisplay >= 1080 && mode->vdisplay >= 1920))) {
+			if ((mode->hdisplay == 1920 &&
+			     mode->vdisplay == 1080) ||
+			    (mode->hdisplay == 1080 &&
+			     mode->vdisplay == 1920)) {
+				/* setting for 1080p panel */
+				REG_WRITE(DSPARB, 0x0005F8C0);
+				REG_WRITE(DSPFW1, 0x0F0F1010);
+				REG_WRITE(DSPFW2, 0x5F2F0F0F);
+				REG_WRITE(DSPFW4, 0x07071010);
+			} else {
+				/* setting for panel bigger than 1080p */
+				REG_WRITE(DSPARB, 0x0005F8D4);
+				REG_WRITE(DSPFW1, 0x0F0F1010);
+				REG_WRITE(DSPFW2, 0x5F2F0F0F);
+				REG_WRITE(DSPFW4, 0x07071010);
+			}
+		} else {
+			/* setting for panel smaller than 1080p, f.e 720p */
+			REG_WRITE(DSPARB, 0x0005E480);
+			REG_WRITE(DSPFW1, 0x0F0F103F);
+			REG_WRITE(DSPFW4, 0x0707101F);
+		}
+
+		REG_WRITE(MI_ARB, 0x0);
+	}
+
+	temp = REG_READ(DSPARB);
+	PSB_DEBUG_ENTRY("gfx_hdmi_setting: DSPARB = 0x%x", temp);
+
+	temp = REG_READ(DSPFW1);
+	PSB_DEBUG_ENTRY("gfx_hdmi_setting: DSPFW1 = 0x%x", temp);
+
+	temp = REG_READ(DSPFW4);
+	PSB_DEBUG_ENTRY("gfx_hdmi_setting: DSPFW4 = 0x%x", temp);
+
+	temp = REG_READ(MI_ARB);
+	PSB_DEBUG_ENTRY("gfx_hdmi_setting: MI_ARB = 0x%x", temp);
+
+}
+
+/*
+* ospm_post_init
+*
+* Description: Power gate unused GFX & Display islands.
+*/
+void ospm_post_init(struct drm_device *dev)
+{
+	u32 dc_islands  = 0;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	unsigned long flags;
+	u32 all_display_islands;
+
+	mutex_lock(&g_ospm_mutex);
+	/* need disable power for msvdx and topaz in init stage */
+	ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND | OSPM_VIDEO_ENC_ISLAND);
+
+#ifndef CONFIG_MDFD_GL3
+	ospm_power_island_down(OSPM_GL3_CACHE_ISLAND);
+#endif
+	/*Save & Power gate un-used display islands.*/
+	mdfld_save_display(dev);
+
+	if (!(dev_priv->panel_desc & DISPLAY_A))
+		dc_islands |= OSPM_DISPLAY_A_ISLAND;
+
+	if (!(dev_priv->panel_desc & DISPLAY_B))
+		dc_islands |= OSPM_DISPLAY_B_ISLAND;
+
+	if (!(dev_priv->panel_desc & DISPLAY_C))
+		dc_islands |= OSPM_DISPLAY_C_ISLAND;
+
+	if (!(dev_priv->panel_desc))
+		dc_islands |= OSPM_MIPI_ISLAND;
+
+	DRM_INFO("%s dc_islands: %x to be powered OFF\n", __func__, dc_islands);
+
+	spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+	/*
+	If pmu_nc_set_power_state fails then accessing HW
+	reg would result in a crash - IERR/Fabric error.
+	*/
+	if (pmu_nc_set_power_state(dc_islands,
+		OSPM_ISLAND_DOWN, OSPM_REG_TYPE))
+		BUG();
+
+	all_display_islands = (OSPM_DISPLAY_A_ISLAND |
+	OSPM_DISPLAY_B_ISLAND |
+	OSPM_DISPLAY_C_ISLAND |
+	OSPM_MIPI_ISLAND);
+	if ((dc_islands & all_display_islands) == all_display_islands)
+		g_hw_power_status_mask &= ~OSPM_DISPLAY_ISLAND;
+
+	spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+/* if HDMI is disabled in the kernel .config, then we want to
+disable these MSIC power rails permanently.  */
+#ifndef CONFIG_SUPPORT_HDMI
+	if (IS_MDFLD_OLD(dev)) {
+		/* turn off HDMI power rails */
+		intel_scu_ipc_iowrite8(MSIC_VHDMICNT, VHDMI_OFF);
+		intel_scu_ipc_iowrite8(MSIC_VCC330CNT, VCC330_OFF);
+	}
+	if (IS_CTP(dev)) {
+		/* turn off HDMI power rails */
+		intel_scu_ipc_iowrite8(MSIC_VCC330CNT, VCC330_OFF);
+	}
+#endif
+	mdfld_adjust_display_fifo(dev);
+
+	mutex_unlock(&g_ospm_mutex);
+
+}
+
+/*
+ * mdfld_save_display_registers
+ *
+ * Description: We are going to suspend so save current display
+ * register state.
+ *
+ */
+static int mdfld_save_display_registers (struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i;
+
+	/* regester */
+	u32 dpll_reg = MDFLD_DPLL_B;
+	u32 fp_reg = MDFLD_DPLL_DIV0;
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 htot_reg = HTOTAL_B;
+	u32 hblank_reg = HBLANK_B;
+	u32 hsync_reg = HSYNC_B;
+	u32 vtot_reg = VTOTAL_B;
+	u32 vblank_reg = VBLANK_B;
+	u32 vsync_reg = VSYNC_B;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dsptileoff_reg = DSPBTILEOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dsppos_reg = DSPBPOS;
+	u32 dspsurf_reg = DSPBSURF;
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstatus_reg = PIPEBSTAT;
+	u32 palette_reg = PALETTE_B;
+	u32 color_coef_reg = PIPEB_COLOR_COEF0;
+
+
+	/* values */
+	u32 *dpll_val = &dev_priv->saveDPLL_B;
+	u32 *fp_val = &dev_priv->saveFPB0;
+	u32 *pipeconf_val = &dev_priv->savePIPEBCONF;
+	u32 *htot_val = &dev_priv->saveHTOTAL_B;
+	u32 *hblank_val = &dev_priv->saveHBLANK_B;
+	u32 *hsync_val = &dev_priv->saveHSYNC_B;
+	u32 *vtot_val = &dev_priv->saveVTOTAL_B;
+	u32 *vblank_val = &dev_priv->saveVBLANK_B;
+	u32 *vsync_val = &dev_priv->saveVSYNC_B;
+	u32 *pipesrc_val = &dev_priv->savePIPEBSRC;
+	u32 *dspstride_val = &dev_priv->saveDSPBSTRIDE;
+	u32 *dsplinoff_val = &dev_priv->saveDSPBLINOFF;
+	u32 *dsptileoff_val = &dev_priv->saveDSPBTILEOFF;
+	u32 *dspsize_val = &dev_priv->saveDSPBSIZE;
+	u32 *dsppos_val = &dev_priv->saveDSPBPOS;
+	u32 *dspsurf_val = &dev_priv->saveDSPBSURF;
+	u32 *dspcntr_val = &dev_priv->saveDSPBCNTR;
+	u32 *dspstatus_val = &dev_priv->saveDSPBSTATUS;
+	u32 *palette_val = dev_priv->save_palette_b;
+	u32 *color_coef = dev_priv->save_color_coef_b;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe != 1)
+		return 0;
+
+	/* Pipe & plane A info */
+	*dpll_val = REG_READ(dpll_reg);
+	*fp_val = REG_READ(fp_reg);
+	*pipeconf_val = REG_READ(pipeconf_reg);
+	*htot_val = REG_READ(htot_reg);
+	*hblank_val = REG_READ(hblank_reg);
+	*hsync_val = REG_READ(hsync_reg);
+	*vtot_val = REG_READ(vtot_reg);
+	*vblank_val = REG_READ(vblank_reg);
+	*vsync_val = REG_READ(vsync_reg);
+	*pipesrc_val = REG_READ(pipesrc_reg);
+	*dspstride_val = REG_READ(dspstride_reg);
+	*dsplinoff_val = REG_READ(dsplinoff_reg);
+	*dsptileoff_val = REG_READ(dsptileoff_reg);
+	*dspsize_val = REG_READ(dspsize_reg);
+	*dsppos_val = REG_READ(dsppos_reg);
+	*dspsurf_val = REG_READ(dspsurf_reg);
+	*dspcntr_val = REG_READ(dspcntr_reg);
+	*dspstatus_val = REG_READ(dspstatus_reg);
+
+	/*save palette (gamma) */
+	for (i = 0; i < 256; i++)
+		palette_val[i] = REG_READ(palette_reg + (i<<2));
+
+	/*save color_coef (chrome) */
+	for (i = 0; i < 6; i++)
+		color_coef[i] = REG_READ(color_coef_reg + (i<<2));
+
+
+	dev_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+	dev_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+	dev_priv->saveHDMIPHYMISCCTL = REG_READ(HDMIPHYMISCCTL);
+	dev_priv->saveHDMIB_CONTROL = REG_READ(HDMIB_CONTROL);
+	dev_priv->saveDATALANES_B = REG_READ(HDMIB_LANES02);
+	return 0;
+}
+
+/*
+ * mdfld_save_cursor_overlay_registers
+ *
+ * Description: We are going to suspend so save current cursor and overlay display
+ * register state.
+ */
+static int mdfld_save_cursor_overlay_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/*save cursor regs*/
+	dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+	dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+	dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+	dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+	dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+	dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+	dev_priv->saveDSPCCURSOR_CTRL = PSB_RVDC32(CURCCNTR);
+	dev_priv->saveDSPCCURSOR_BASE = PSB_RVDC32(CURCBASE);
+	dev_priv->saveDSPCCURSOR_POS = PSB_RVDC32(CURCPOS);
+
+	/* HW overlay */
+	dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+	dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+	dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+	dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+	dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+	dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+	dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+	dev_priv->saveOV_OVADD_C = PSB_RVDC32(OV_OVADD + OV_C_OFFSET);
+	dev_priv->saveOV_OGAMC0_C = PSB_RVDC32(OV_OGAMC0 + OV_C_OFFSET);
+	dev_priv->saveOV_OGAMC1_C = PSB_RVDC32(OV_OGAMC1 + OV_C_OFFSET);
+	dev_priv->saveOV_OGAMC2_C = PSB_RVDC32(OV_OGAMC2 + OV_C_OFFSET);
+	dev_priv->saveOV_OGAMC3_C = PSB_RVDC32(OV_OGAMC3 + OV_C_OFFSET);
+	dev_priv->saveOV_OGAMC4_C = PSB_RVDC32(OV_OGAMC4 + OV_C_OFFSET);
+	dev_priv->saveOV_OGAMC5_C = PSB_RVDC32(OV_OGAMC5 + OV_C_OFFSET);
+
+	return 0;
+}
+
+/**
+ * @dev: DRM device
+ * @pipe: DC pipe
+ *
+ * restore registers of display controller. It's just for HDMI, as for MIPI
+ * pipe, use early suspend to save/restore dc registers.
+ */
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i = 0;
+	u32 dpll = 0;
+
+	/* regester */
+	u32 dpll_reg = MDFLD_DPLL_B;
+	u32 fp_reg = MDFLD_DPLL_DIV0;
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 htot_reg = HTOTAL_B;
+	u32 hblank_reg = HBLANK_B;
+	u32 hsync_reg = HSYNC_B;
+	u32 vtot_reg = VTOTAL_B;
+	u32 vblank_reg = VBLANK_B;
+	u32 vsync_reg = VSYNC_B;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dsptileoff_reg = DSPBTILEOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dsppos_reg = DSPBPOS;
+	u32 dspsurf_reg = DSPBSURF;
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 palette_reg = PALETTE_B;
+	u32 dspstatus_reg = PIPEBSTAT;
+	u32 color_coef_reg = PIPEB_COLOR_COEF0;
+
+	/* values */
+	u32 dpll_val = dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE;
+	u32 fp_val = dev_priv->saveFPB0;
+	u32 pipeconf_val = dev_priv->savePIPEBCONF;
+	u32 htot_val = dev_priv->saveHTOTAL_B;
+	u32 hblank_val = dev_priv->saveHBLANK_B;
+	u32 hsync_val = dev_priv->saveHSYNC_B;
+	u32 vtot_val = dev_priv->saveVTOTAL_B;
+	u32 vblank_val = dev_priv->saveVBLANK_B;
+	u32 vsync_val = dev_priv->saveVSYNC_B;
+	u32 pipesrc_val = dev_priv->savePIPEBSRC;
+	u32 dspstride_val = dev_priv->saveDSPBSTRIDE;
+	u32 dsplinoff_val = dev_priv->saveDSPBLINOFF;
+	u32 dsptileoff_val = dev_priv->saveDSPBTILEOFF;
+	u32 dspsize_val = dev_priv->saveDSPBSIZE;
+	u32 dsppos_val = dev_priv->saveDSPBPOS;
+	u32 dspsurf_val = dev_priv->saveDSPBSURF;
+	u32 dspcntr_val = dev_priv->saveDSPBCNTR & ~DISPLAY_PLANE_ENABLE;
+	u32 dspstatus_val = dev_priv->saveDSPBSTATUS;
+	u32 *palette_val = dev_priv->save_palette_b;
+	u32 *color_coef = dev_priv->save_color_coef_b;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe != 1)
+		return 0;
+
+	/*make sure VGA plane is off. it initializes to on after reset!*/
+	REG_WRITE(VGACNTRL, 0x80000000);
+
+	dpll = REG_READ(dpll_reg);
+
+	if (!(dpll & DPLL_VCO_ENABLE)) {
+		/**
+		 * When ungating power of DPLL, needs to wait 0.5us
+		 * before enable the VCO
+		 */
+		if (dpll & MDFLD_PWR_GATE_EN) {
+			dpll &= ~MDFLD_PWR_GATE_EN;
+			REG_WRITE(dpll_reg, dpll);
+			ndelay(500);
+		}
+
+		REG_WRITE(fp_reg, fp_val);
+		REG_WRITE(dpll_reg, dpll_val);
+		ndelay(500);
+
+		dpll_val |= DPLL_VCO_ENABLE;
+		REG_WRITE(dpll_reg, dpll_val);
+		REG_READ(dpll_reg);
+	}
+
+	/* Restore mode */
+	REG_WRITE(htot_reg, htot_val);
+	REG_WRITE(hblank_reg, hblank_val);
+	REG_WRITE(hsync_reg, hsync_val);
+	REG_WRITE(vtot_reg, vtot_val);
+	REG_WRITE(vblank_reg, vblank_val);
+	REG_WRITE(vsync_reg, vsync_val);
+	REG_WRITE(pipesrc_reg, pipesrc_val);
+	REG_WRITE(dspstatus_reg, dspstatus_val);
+
+	/*set up the plane*/
+	REG_WRITE(dspstride_reg, dspstride_val);
+	REG_WRITE(dsplinoff_reg, dsplinoff_val);
+	REG_WRITE(dsptileoff_reg, dsptileoff_val);
+	REG_WRITE(dspsize_reg, dspsize_val);
+	REG_WRITE(dsppos_reg, dsppos_val);
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+
+	REG_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+	REG_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+	REG_WRITE(HDMIPHYMISCCTL, dev_priv->saveHDMIPHYMISCCTL);
+	REG_WRITE(HDMIB_CONTROL, dev_priv->saveHDMIB_CONTROL);
+	REG_WRITE(HDMIB_LANES02, dev_priv->saveDATALANES_B);
+	REG_WRITE(HDMIB_LANES3, dev_priv->saveDATALANES_B);
+
+	/*save color_coef (chrome) */
+	for (i = 0; i < 6; i++)
+		REG_WRITE(color_coef_reg + (i<<2), color_coef[i]);
+
+	/* restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		REG_WRITE(palette_reg + (i<<2), palette_val[i]);
+
+	/*enable the plane*/
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+
+	/*enable the pipe*/
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+	if (pipeconf_val & PIPEBCONF_ENABLE)
+		intel_wait_for_pipe_enable_disable(dev, pipe, true);
+
+	return 0;
+}
+
+/*
+ * mdfld_restore_cursor_overlay_registers
+ *
+ * Description: We are going to resume so restore cursor and overlay register state.
+ */
+static int mdfld_restore_cursor_overlay_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/*Enable Cursor A*/
+	REG_WRITE(CURACNTR, dev_priv->saveDSPACURSOR_CTRL);
+	REG_WRITE(CURAPOS, dev_priv->saveDSPACURSOR_POS);
+	REG_WRITE(CURABASE, dev_priv->saveDSPACURSOR_BASE);
+
+	REG_WRITE(CURBCNTR, dev_priv->saveDSPBCURSOR_CTRL);
+	REG_WRITE(CURBPOS, dev_priv->saveDSPBCURSOR_POS);
+	REG_WRITE(CURBBASE, dev_priv->saveDSPBCURSOR_BASE);
+
+	REG_WRITE(CURCCNTR, dev_priv->saveDSPCCURSOR_CTRL);
+	REG_WRITE(CURCPOS, dev_priv->saveDSPCCURSOR_POS);
+	REG_WRITE(CURCBASE, dev_priv->saveDSPCCURSOR_BASE);
+
+	/* restore HW overlay */
+	REG_WRITE(OV_OVADD, dev_priv->saveOV_OVADD);
+	REG_WRITE(OV_OGAMC0, dev_priv->saveOV_OGAMC0);
+	REG_WRITE(OV_OGAMC1, dev_priv->saveOV_OGAMC1);
+	REG_WRITE(OV_OGAMC2, dev_priv->saveOV_OGAMC2);
+	REG_WRITE(OV_OGAMC3, dev_priv->saveOV_OGAMC3);
+	REG_WRITE(OV_OGAMC4, dev_priv->saveOV_OGAMC4);
+	REG_WRITE(OV_OGAMC5, dev_priv->saveOV_OGAMC5);
+
+	REG_WRITE(OV_OVADD + OV_C_OFFSET, dev_priv->saveOV_OVADD_C);
+	REG_WRITE(OV_OGAMC0 + OV_C_OFFSET, dev_priv->saveOV_OGAMC0_C);
+	REG_WRITE(OV_OGAMC1 + OV_C_OFFSET, dev_priv->saveOV_OGAMC1_C);
+	REG_WRITE(OV_OGAMC2 + OV_C_OFFSET, dev_priv->saveOV_OGAMC2_C);
+	REG_WRITE(OV_OGAMC3 + OV_C_OFFSET, dev_priv->saveOV_OGAMC3_C);
+	REG_WRITE(OV_OGAMC4 + OV_C_OFFSET, dev_priv->saveOV_OGAMC4_C);
+	REG_WRITE(OV_OGAMC5 + OV_C_OFFSET, dev_priv->saveOV_OGAMC5_C);
+
+	return 0;
+}
+
+/*
+ *  mdfld_save_display
+ *
+ * Description: Save display status before DPMS OFF for RuntimePM
+ */
+void mdfld_save_display(struct drm_device *dev)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	mdfld_save_cursor_overlay_registers(dev);
+}
+
+/*
+ * powermgmt_suspend_display
+ *
+ * Description: Suspend the display hardware saving state and disabling
+ * as necessary.
+ */
+void ospm_suspend_display(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+		DRM_INFO("%s: Exit because island is down\n", __func__);
+		return;
+	}
+
+	mdfld_save_cursor_overlay_registers(dev);
+
+	if (dev_priv->panel_desc & DISPLAY_A) {
+		mdfld_save_display_registers(dev, 0);
+		mdfld_disable_crtc(dev, 0);
+	}
+	if (dev_priv->panel_desc & DISPLAY_B) {
+		android_hdmi_suspend_display(dev);
+	}
+	if (dev_priv->panel_desc & DISPLAY_C) {
+		mdfld_save_display_registers(dev, 2);
+		mdfld_disable_crtc(dev, 2);
+	}
+
+	/*save performance state*/
+	dev_priv->savePERF_MODE = REG_READ(MRST_PERF_MODE);
+	dev_priv->saveCLOCKGATING = REG_READ(PSB_GFX_CLOCKGATING);
+	dev_priv->saveVED_CG_DIS = REG_READ(PSB_MSVDX_CLOCKGATING);
+	dev_priv->saveVEC_CG_DIS = REG_READ(PSB_TOPAZ_CLOCKGATING);
+
+#ifdef CONFIG_MDFD_GL3
+	dev_priv->saveGL3_CTL = REG_READ(MDFLD_GL3_CONTROL);
+	dev_priv->saveGL3_USE_WRT_INVAL = REG_READ(MDFLD_GL3_USE_WRT_INVAL);
+#endif
+
+	ospm_power_island_down(OSPM_DISPLAY_ISLAND);
+}
+
+/*
+ * ospm_resume_display
+ *
+ * Description: Resume the display hardware restoring state and enabling
+ * as necessary.
+ */
+void ospm_resume_display(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_gtt *pg = dev_priv->pg;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+		DRM_INFO("%s: Exit because hw on\n", __func__);
+		return;
+	}
+
+	/*restore performance mode*/
+	REG_WRITE(MRST_PERF_MODE, dev_priv->savePERF_MODE);
+	REG_WRITE(PSB_GFX_CLOCKGATING, dev_priv->saveCLOCKGATING);
+	REG_WRITE(PSB_MSVDX_CLOCKGATING, dev_priv->saveVED_CG_DIS);
+	REG_WRITE(PSB_TOPAZ_CLOCKGATING, dev_priv->saveVEC_CG_DIS);
+#ifdef CONFIG_MDFD_GL3
+	REG_WRITE(MDFLD_GL3_CONTROL, dev_priv->saveGL3_CTL);
+	REG_WRITE(MDFLD_GL3_USE_WRT_INVAL, dev_priv->saveGL3_USE_WRT_INVAL);
+#endif
+
+	/* turn on the display power island */
+	ospm_power_island_up(OSPM_DISPLAY_ISLAND);
+
+	REG_WRITE(PSB_PGETBL_CTL, pg->pge_ctl | _PSB_PGETBL_ENABLED);
+	pci_write_config_word(pdev, PSB_GMCH_CTRL,
+			pg->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+	if (dev_priv->panel_desc & DISPLAY_A)
+		mdfld_restore_display_registers(dev, 0);
+	if (dev_priv->panel_desc & DISPLAY_C)
+		mdfld_restore_display_registers(dev, 2);
+
+	/*
+	 * Don't restore Display B registers during resuming, if HDMI
+	 * isn't turned on before suspending.
+	 */
+	if (dev_priv->panel_desc & DISPLAY_B) {
+		android_hdmi_resume_display(dev);
+		/*devices connect status will be changed
+		  when system suspend,re-detect once here*/
+		if (android_hdmi_is_connected(dev))
+			mid_hdmi_audio_resume(dev);
+	}
+	mdfld_restore_cursor_overlay_registers(dev);
+
+	mdfld_adjust_display_fifo(dev);
+}
+
+/*
+ * ospm_suspend_pci
+ *
+ * Description: Suspend the pci device saving state and disabling
+ * as necessary.
+ */
+void ospm_suspend_pci(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int bsm, vbt;
+
+	if (gbSuspended)
+		return;
+
+	PSB_DEBUG_PM("ospm_suspend_pci\n");
+
+	pci_save_state(pdev);
+	pci_read_config_dword(pdev, 0x5C, &bsm);
+	dev_priv->saveBSM = bsm;
+	pci_read_config_dword(pdev, 0xFC, &vbt);
+	dev_priv->saveVBT = vbt;
+	pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+	pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	gbSuspended = true;
+}
+
+/*
+ * ospm_resume_pci
+ *
+ * Description: Resume the pci device restoring state and enabling
+ * as necessary.
+ */
+static bool ospm_resume_pci(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	if (!gbSuspended)
+		return true;
+
+	PSB_DEBUG_PM("ospm_resume_pci.\n");
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+	pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+	/* retoring MSI address and data in PCIx space */
+	pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+	pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+	ret = pci_enable_device(pdev);
+
+	if (ret != 0)
+		printk(KERN_ALERT "ospm_resume_pci: pci_enable_device failed: %d\n", ret);
+	else {
+		if (IS_MDFLD(dev)) {
+			/*restore performance mode*/
+			PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+			PSB_WVDC32(dev_priv->saveCLOCKGATING,
+				PSB_GFX_CLOCKGATING);
+			PSB_WVDC32(dev_priv->saveVED_CG_DIS,
+					PSB_MSVDX_CLOCKGATING);
+			PSB_WVDC32(dev_priv->saveVEC_CG_DIS,
+					PSB_TOPAZ_CLOCKGATING);
+#ifdef CONFIG_MDFD_GL3
+			PSB_WVDC32(dev_priv->saveGL3_CTL, MDFLD_GL3_CONTROL);
+			PSB_WVDC32(dev_priv->saveGL3_USE_WRT_INVAL,
+					MDFLD_GL3_USE_WRT_INVAL);
+#endif
+		}
+		gbSuspended = false;
+	}
+
+	return !gbSuspended;
+}
+
+static void ospm_early_suspend()
+{
+	struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	if (!(drm_psb_use_cases_control & PSB_SUSPEND_ENABLE))
+		return ;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dev_priv->b_dsr_enable_status = dev_priv->b_dsr_enable;
+	if (dev_priv->b_dsr_enable) {
+		dev_priv->exit_idle(dev,
+				MDFLD_DSR_2D_3D,
+				NULL,
+				0);
+		dev_priv->b_dsr_enable = false;
+	}
+
+	/* protect early_suspend with dpms and mode config */
+	mutex_lock(&dev->mode_config.mutex);
+
+	list_for_each_entry(encoder,
+			&dev->mode_config.encoder_list,
+			head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->save)
+			enc_funcs->save(encoder);
+	}
+
+
+	gbdispstatus = false;
+	dev_priv->early_suspended = true;
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+#ifdef CONFIG_GFX_RTPM
+	pm_runtime_allow(&gpDrmDevice->pdev->dev);
+#endif
+}
+
+static void ospm_late_resume()
+{
+	struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	if (!(drm_psb_use_cases_control & PSB_SUSPEND_ENABLE))
+		return ;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* protect early_suspend with dpms and mode config */
+	mutex_lock(&dev->mode_config.mutex);
+
+	dev_priv->early_suspended = false;
+
+#ifdef CONFIG_GFX_RTPM
+	pm_runtime_forbid(&gpDrmDevice->pdev->dev);
+	mutex_lock(&g_ospm_mutex);
+	ospm_resume_pci(gpDrmDevice->pdev);
+	ospm_resume_display(gpDrmDevice->pdev);
+	psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
+	psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
+	mutex_unlock(&g_ospm_mutex);
+#endif
+
+	list_for_each_entry(encoder,
+			&dev->mode_config.encoder_list,
+			head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->restore)
+			enc_funcs->restore(encoder);
+	}
+
+	gbdispstatus = true;
+	dev_priv->b_dsr_enable = dev_priv->b_dsr_enable_status;
+
+	if (lastFailedBrightness > 0)
+		psb_set_brightness(NULL);
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+#if SUPPORT_EARLY_SUSPEND
+static void gfx_early_suspend(struct early_suspend *h)
+{
+	ospm_early_suspend();
+}
+#endif /* if SUPPORT_EARLY_SUSPEND */
+
+#if SUPPORT_EARLY_SUSPEND
+static void gfx_late_resume(struct early_suspend *h)
+{
+	ospm_late_resume();
+}
+#endif /* if SUPPORT_EARLY_SUSPEND */
+
+/*
+ * ospm_power_suspend
+ *
+ * Description: OSPM is telling our driver to suspend so save state
+ * and power down all hardware.
+ */
+int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int ret = 0;
+	int graphics_access_count;
+	int videoenc_access_count;
+	int videodec_access_count;
+	int display_access_count;
+	struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
+	unsigned long flags;
+	bool hdmi_audio_suspend = true;
+
+	if (gbSuspendInProgress || gbResumeInProgress) {
+		PSB_DEBUG_PM(KERN_ALERT "%s: system BUSY\n", __func__);
+		return  -EBUSY;
+	}
+
+	PSB_DEBUG_PM("enter ospm_power_suspend\n");
+
+	mutex_lock(&g_ospm_mutex);
+	if (!gbSuspended) {
+		hdmi_audio_suspend = mid_hdmi_audio_suspend(dev_priv->dev);
+		/* Turn on suspending first before check the access count */
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		gbSuspendInProgress = true;
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+		graphics_access_count = atomic_read(&g_graphics_access_count);
+		videoenc_access_count = atomic_read(&g_videoenc_access_count);
+		videodec_access_count = atomic_read(&g_videodec_access_count);
+		display_access_count = atomic_read(&g_display_access_count);
+
+		if (graphics_access_count
+			|| videoenc_access_count
+			|| videodec_access_count
+			|| display_access_count
+			|| (hdmi_audio_suspend == false))
+			ret = -EBUSY;
+		if (!ret) {
+			if (ospm_runtime_pm_msvdx_suspend(gpDrmDevice) != 0)
+				ret = -EBUSY;
+
+			if (ospm_runtime_pm_topaz_suspend(gpDrmDevice) != 0)
+				ret = -EBUSY;
+
+			if (!ret) {
+				ospm_suspend_display(gpDrmDevice);
+
+				/* When suspend, the gfx island may increase
+				** its access count, hence the PCI host
+				** shouldn't be power off
+				*/
+				spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+				graphics_access_count =
+					atomic_read(&g_graphics_access_count);
+				if (!graphics_access_count) {
+					pcihostSuspendInProgress = true;
+					spin_unlock_irqrestore(
+						&dev_priv->ospm_lock, flags);
+					ospm_suspend_pci(pdev);
+					pcihostSuspendInProgress = false;
+				} else {
+					spin_unlock_irqrestore(
+						&dev_priv->ospm_lock, flags);
+					ret = -EBUSY;
+				}
+			}
+		} else {
+			PSB_DEBUG_PM("ospm_power_suspend: device busy:");
+			PSB_DEBUG_PM("SGX %d Enc %d Dec %d Display %d\n",
+				graphics_access_count, videoenc_access_count,
+				videodec_access_count, display_access_count);
+		}
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		gbSuspendInProgress = false;
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+	}
+
+
+	mutex_unlock(&g_ospm_mutex);
+	return ret;
+}
+
+void ospm_power_graphics_island_up(int hw_islands)
+{
+	unsigned long flags;
+	unsigned long irqflags;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+
+
+	if (hw_islands) {
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+		/*
+		If pmu_nc_set_power_state fails then accessing HW
+		reg would result in a crash - IERR/Fabric error.
+		*/
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		PSB_DEBUG_PM("power on gfx_islands: 0x%x\n", hw_islands);
+		if (pmu_nc_set_power_state(hw_islands,
+					   OSPM_ISLAND_UP, APM_REG_TYPE))
+			BUG();
+		if (hw_islands & OSPM_GRAPHICS_ISLAND)
+			atomic_inc(&g_graphics_access_count);
+		g_hw_power_status_mask |= hw_islands;
+		psb_irq_preinstall_graphics_islands(gpDrmDevice,
+							OSPM_GRAPHICS_ISLAND);
+		psb_irq_postinstall_graphics_islands(gpDrmDevice,
+							OSPM_GRAPHICS_ISLAND);
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	}
+}
+
+/*
+ * ospm_power_island_up
+ *
+ * Description: Restore power to the specified island(s) (powergating)
+ */
+int ospm_power_island_up(int hw_islands)
+{
+	u32 dc_islands  = 0;
+	u32 gfx_islands = hw_islands;
+	unsigned long flags;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+	int ret = 0;
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND) {
+		/*Power-up required islands only*/
+		if (dev_priv->panel_desc & DISPLAY_A)
+			dc_islands |= OSPM_DISPLAY_A_ISLAND;
+
+		if (dev_priv->panel_desc & DISPLAY_B)
+			dc_islands |= OSPM_DISPLAY_B_ISLAND;
+
+		if (dev_priv->panel_desc & DISPLAY_C)
+			dc_islands |= OSPM_DISPLAY_C_ISLAND;
+
+		if (dev_priv->panel_desc)
+			dc_islands |= OSPM_MIPI_ISLAND;
+
+		/*
+		If pmu_nc_set_power_state fails then accessing HW
+		reg would result in a crash - IERR/Fabric error.
+		*/
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+
+		PSB_DEBUG_PM("power up display islands 0x%x.\n", dc_islands);
+		if (pmu_nc_set_power_state(dc_islands,
+			OSPM_ISLAND_UP, OSPM_REG_TYPE))
+			BUG();
+		g_hw_power_status_mask |= OSPM_DISPLAY_ISLAND;
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+		/* handle other islands */
+		gfx_islands = hw_islands & ~OSPM_DISPLAY_ISLAND;
+	}
+
+	if (gfx_islands) {
+		/*
+		If pmu_nc_set_power_state fails then accessing HW
+		reg would result in a crash - IERR/Fabric error.
+		*/
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		PSB_DEBUG_PM("power on gfx_islands: 0x%x\n", gfx_islands);
+		ret = pmu_nc_set_power_state(gfx_islands,
+					   OSPM_ISLAND_UP, APM_REG_TYPE);
+		if (ret) {
+			PSB_DEBUG_PM("pmu_nc_set_power_state fails, ret is %d\n", ret);
+			spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+			return ret;
+		}
+		if (gfx_islands & OSPM_GRAPHICS_ISLAND)
+			atomic_inc(&g_graphics_access_count);
+		g_hw_power_status_mask |= gfx_islands;
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+	}
+
+	return 0;
+}
+
+/*
+ * ospm_power_resume
+ */
+int ospm_power_resume(struct pci_dev *pdev)
+{
+	if (gbSuspendInProgress || gbResumeInProgress) {
+		DRM_INFO("%s: suspend/resume in progress\n", __func__);
+		return 0;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mutex_lock(&g_ospm_mutex);
+
+	gbResumeInProgress = true;
+
+	ospm_resume_pci(pdev);
+
+	ospm_resume_display(gpDrmDevice->pdev);
+	psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
+	psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
+
+	gbResumeInProgress = false;
+
+	mutex_unlock(&g_ospm_mutex);
+
+	return 0;
+}
+
+/*
+ * ospm_power_island_down_video
+ *
+ * Description: Cut power to the specified video island(s) (powergating)
+ * If pmu_nc_set_power_state fails then accessing HW
+ * reg would result in a crash - IERR/Fabric error.
+ */
+static void ospm_power_island_down_video(int video_islands)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+	unsigned long flags;
+	PSB_DEBUG_PM("MSVDX: power off video island %d.\n", video_islands);
+	spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+	if (video_islands & OSPM_VIDEO_DEC_ISLAND) {
+		if (pmu_nc_set_power_state(OSPM_VIDEO_DEC_ISLAND,
+				OSPM_ISLAND_DOWN, APM_REG_TYPE))
+			BUG();
+		g_hw_power_status_mask &= ~OSPM_VIDEO_DEC_ISLAND;
+	}
+
+	if (video_islands & OSPM_VIDEO_ENC_ISLAND) {
+		if (pmu_nc_set_power_state(OSPM_VIDEO_ENC_ISLAND,
+				OSPM_ISLAND_DOWN, APM_REG_TYPE))
+			BUG();
+		g_hw_power_status_mask &= ~OSPM_VIDEO_ENC_ISLAND;
+	}
+	spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+}
+
+void ospm_power_graphics_island_down(int hw_islands)
+{
+	u32 dc_islands = 0;
+	u32 gfx_islands = hw_islands;
+
+	unsigned long flags;
+	unsigned long irqflags;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	if (gfx_islands) {
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		/* both graphics and GL3 based on graphics_access count */
+		if (gfx_islands & OSPM_GL3_CACHE_ISLAND) {
+#ifdef CONFIG_MDFD_GL3
+			/*
+			Make sure both GFX & Video aren't
+			using GL3
+			*/
+			if (atomic_read(&g_graphics_access_count) ||
+					(g_hw_power_status_mask &
+					(OSPM_VIDEO_DEC_ISLAND |
+					OSPM_VIDEO_ENC_ISLAND |
+					OSPM_GRAPHICS_ISLAND)) ||
+					(drm_psb_gl3_enable == 0)) {
+				gfx_islands &=  ~OSPM_GL3_CACHE_ISLAND;
+				if (!gfx_islands) {
+					spin_unlock_irqrestore(
+					&dev_priv->ospm_lock, flags);
+					spin_unlock_irqrestore(
+					&dev_priv->irqmask_lock, irqflags);
+					return ;
+				}
+			}
+#endif
+		}
+		if (gfx_islands & OSPM_GRAPHICS_ISLAND) {
+			if (atomic_read(&g_graphics_access_count))
+				gfx_islands &=  ~OSPM_GRAPHICS_ISLAND;
+			else
+				psb_irq_uninstall_graphics_islands(gpDrmDevice,
+							OSPM_GRAPHICS_ISLAND);
+		}
+
+		/*
+		If pmu_nc_set_power_state fails then accessing HW
+		reg would result in a crash - IERR/Fabric error.
+		*/
+		PSB_DEBUG_PM("power off gfx/gl3 island 0x%x.\n", gfx_islands);
+		g_hw_power_status_mask &= ~gfx_islands;
+		if (pmu_nc_set_power_state(gfx_islands,
+			OSPM_ISLAND_DOWN, APM_REG_TYPE))
+			BUG();
+
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+		/* From the test, after enter DSR level 1, only GFX island
+		** has chance to power on and leave PCI host power ungated
+		** Because after SGX complete a buffer, it will trigger
+		** PROCESS_QUEUES command to SGX even if there are no more
+		** 3D thing to do, hence power on SGX and PCI. Because there are
+		** nothing remain to flip, exit_dsr doesn't be called,
+		** so PCI host remain power ungated.
+		** here just give another chance to enter DSR
+		** Note:
+		*/
+#if 0
+		if (gfx_islands & OSPM_GRAPHICS_ISLAND) {
+			dsi_config = dev_priv->dsi_configs[0];
+			mdfld_dsi_dsr_forbid(dsi_config);
+			mdfld_dsi_dsr_allow(dsi_config);
+		}
+#endif
+	}
+}
+EXPORT_SYMBOL(ospm_power_graphics_island_down);
+
+/*
+ * ospm_power_island_down
+ *
+ * Description: Cut power to the specified island(s) (powergating)
+ */
+void ospm_power_island_down(int hw_islands)
+{
+	u32 dc_islands = 0;
+	u32 gfx_islands = hw_islands;
+	int video_islands = hw_islands &
+		(OSPM_VIDEO_DEC_ISLAND | OSPM_VIDEO_ENC_ISLAND);
+	unsigned long flags;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+
+	PSB_DEBUG_PM("power down hw_islands: %x\n", hw_islands);
+
+	if (video_islands) {
+		ospm_power_island_down_video(video_islands);
+		hw_islands = hw_islands &
+			~(OSPM_VIDEO_DEC_ISLAND | OSPM_VIDEO_ENC_ISLAND);
+	}
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND) {
+		/*Power gate all display islands.*/
+		dc_islands |= (OSPM_DISPLAY_A_ISLAND |
+				OSPM_DISPLAY_B_ISLAND |
+				OSPM_DISPLAY_C_ISLAND |
+				OSPM_MIPI_ISLAND);
+
+		/*
+		If pmu_nc_set_power_state fails then accessing HW
+		reg would result in a crash - IERR/Fabric error.
+		*/
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		/*last chance of canceling the power off*/
+		/*
+		 * if (atomic_read(&g_display_access_count))
+		 *	goto unlock;
+		 */
+
+		PSB_DEBUG_PM("power off display island\n");
+		g_hw_power_status_mask &= ~OSPM_DISPLAY_ISLAND;
+		if (pmu_nc_set_power_state(dc_islands,
+					   OSPM_ISLAND_DOWN, OSPM_REG_TYPE))
+			BUG();
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+		/* handle other islands */
+		gfx_islands = hw_islands & ~OSPM_DISPLAY_ISLAND;
+	}
+
+	if (gfx_islands) {
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		/* both graphics and GL3 based on graphics_access count */
+		if (gfx_islands & OSPM_GL3_CACHE_ISLAND) {
+#ifdef CONFIG_MDFD_GL3
+			/*
+			Make sure both GFX & Video aren't
+			using GL3
+			*/
+			if (atomic_read(&g_graphics_access_count) ||
+					(g_hw_power_status_mask &
+					(OSPM_VIDEO_DEC_ISLAND |
+					OSPM_VIDEO_ENC_ISLAND |
+					OSPM_GRAPHICS_ISLAND)) ||
+					(drm_psb_gl3_enable == 0)) {
+				gfx_islands &=  ~OSPM_GL3_CACHE_ISLAND;
+				if (!gfx_islands) {
+					spin_unlock_irqrestore(
+						&dev_priv->ospm_lock, flags);
+					return ;
+				}
+			}
+#endif
+		}
+		if (gfx_islands & OSPM_GRAPHICS_ISLAND) {
+			if (atomic_read(&g_graphics_access_count))
+				gfx_islands &=  ~OSPM_GRAPHICS_ISLAND;
+		}
+
+		/*
+		If pmu_nc_set_power_state fails then accessing HW
+		reg would result in a crash - IERR/Fabric error.
+		*/
+		PSB_DEBUG_PM("power off gfx/gl3 island 0x%x.\n", gfx_islands);
+		g_hw_power_status_mask &= ~gfx_islands;
+		if (pmu_nc_set_power_state(gfx_islands,
+			OSPM_ISLAND_DOWN, APM_REG_TYPE))
+			BUG();
+
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+		/* From the test, after enter DSR level 1, only GFX island
+		** has chance to power on and leave PCI host power ungated
+		** Because after SGX complete a buffer, it will trigger
+		** PROCESS_QUEUES command to SGX even if there are no more
+		** 3D thing to do, hence power on SGX and PCI. Because there are
+		** nothing remain to flip, exit_dsr doesn't be called,
+		** so PCI host remain power ungated.
+		** here just give another chance to enter DSR
+		** Note:
+		*/
+#if 0
+		if (gfx_islands & OSPM_GRAPHICS_ISLAND) {
+			dsi_config = dev_priv->dsi_configs[0];
+			mdfld_dsi_dsr_forbid(dsi_config);
+			mdfld_dsi_dsr_allow(dsi_config);
+		}
+#endif
+	}
+}
+EXPORT_SYMBOL(ospm_power_island_down);
+
+/*
+ * ospm_power_is_hw_on
+ *
+ * Description: do an instantaneous check for if the specified islands
+ * are on.  Only use this in cases where you know the g_state_change_mutex
+ * is already held such as in irq install/uninstall.  Otherwise, use
+ * ospm_power_using_hw_begin().
+ */
+bool ospm_power_is_hw_on(int hw_islands)
+{
+	unsigned long flags;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+	bool ret = false;
+	spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+	ret = ((g_hw_power_status_mask & hw_islands)
+			== hw_islands) ? true : false;
+	spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(ospm_power_is_hw_on);
+
+/* For video case, we only force enable hw in process context.
+ * Protected by g_ospm_mutex */
+bool ospm_power_using_video_begin(int video_island)
+{
+	bool ret = true;
+	bool island_is_on = true;
+	struct pci_dev *pdev = gpDrmDevice->pdev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)gpDrmDevice->dev_private;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	bool  already_increase = false;
+
+	PSB_DEBUG_PM("MSVDX: need power on island 0x%x.\n", video_island);
+
+	if (!(video_island & (OSPM_VIDEO_DEC_ISLAND | OSPM_VIDEO_ENC_ISLAND)))
+		return false;
+#ifdef CONFIG_GFX_RTPM
+	/* if system suspend is in progress, do NOT allow system resume. if
+	 * runtime_status is RPM_SUSPENDING, and here call pm_runtime_get will
+	 * call rpm_resume indirectly, it causes defferred_resume be set to
+	 * ture, so at the end of rpm_suspend(), rpm_resume() will be called.
+	 * it will block system from entering s0ix */
+	if (gbSuspendInProgress) {
+		DRM_INFO("%s: suspend in progress,"
+			"call pm_runtime_get_noresume\n", __func__);
+		pm_runtime_get_noresume(&pdev->dev);
+	} else {
+		pm_runtime_get(&pdev->dev);
+	}
+
+
+	/* Taking this lock is very important to keep consistent with
+	*runtime framework */
+	spin_lock_irq(&pdev->dev.power.lock);
+recheck:
+	if (pdev->dev.power.runtime_status == RPM_SUSPENDING) {
+		DEFINE_WAIT(wait);
+		/* Wait for the other suspend running to finish */
+		for (;;) {
+			prepare_to_wait(&pdev->dev.power.wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+			if (pdev->dev.power.runtime_status != RPM_SUSPENDING)
+				break;
+			spin_unlock_irq(&pdev->dev.power.lock);
+			schedule();
+			spin_lock_irq(&pdev->dev.power.lock);
+		}
+		finish_wait(&pdev->dev.power.wait_queue, &wait);
+		goto recheck;
+	}
+	/* Because !force_on has been done above, so here is force_on case
+	**it must be process context in current code base, so it will power on
+	** island defintely, so increase access_count here to prevent another
+	** suspending thread run async
+	*/
+	switch (video_island) {
+	case OSPM_VIDEO_ENC_ISLAND:
+		atomic_inc(&g_videoenc_access_count);
+		break;
+	case OSPM_VIDEO_DEC_ISLAND:
+		atomic_inc(&g_videodec_access_count);
+		break;
+	}
+	already_increase = true;
+	spin_unlock_irq(&pdev->dev.power.lock);
+#endif
+
+	/* It must be process context, will not be called in irq */
+	mutex_lock(&g_ospm_mutex);
+
+	island_is_on = ospm_power_is_hw_on(video_island);
+	if (island_is_on)
+		goto out;
+
+	gbResumeInProgress = true;
+
+	/* Because gfx island may resume pci silently,
+	** so need sync with gfx
+	*/
+	if (ospm_resume_pci(pdev) == false) {
+		ret = false;
+		goto out;
+	}
+
+	switch (video_island) {
+	case OSPM_VIDEO_DEC_ISLAND:
+		if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+			/* printk(KERN_ALERT "%s power on display
+			** for video decode use\n", __func__);
+			*/
+			ospm_resume_display(pdev);
+			psb_irq_preinstall_islands(gpDrmDevice,
+					OSPM_DISPLAY_ISLAND);
+			psb_irq_postinstall_islands(gpDrmDevice,
+					OSPM_DISPLAY_ISLAND);
+		} else {
+			/* printk(KERN_ALERT
+			** "%s display is already on
+			** for video decode use\n", __func__);
+			*/
+		}
+
+		if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
+			/* printk(KERN_ALERT "%s power on video decode\n",
+			** __func__);
+			*/
+			/*
+			 * GL3 power island needs to be on for MSVDX working.
+			 * We found this during enabling new MSVDX firmware
+			 * uploading mechanism(by PUNIT) for Penwell D0.
+			 */
+#ifdef CONFIG_MDFD_GL3
+			if (ospm_power_island_up(OSPM_GL3_CACHE_ISLAND | OSPM_VIDEO_DEC_ISLAND)) {
+#else
+			if (ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND)) {
+#endif
+				ret = false;
+				goto out;
+			}
+			if (msvdx_priv->fw_loaded_by_punit) {
+				int reg_ret;
+				reg_ret = psb_wait_for_register(dev_priv,
+					MSVDX_COMMS_SIGNATURE,
+					MSVDX_COMMS_SIGNATURE_VALUE,
+					0xffffffff, 2000000, 5);
+				if (reg_ret)
+					PSB_DEBUG_WARN("WARN: load fw fail,\n"
+					"MSVDX_COMMS_SIGNATURE reg is 0x%x,"
+					"MSVDX_COMMS_FW_STATUS reg is 0x%x,"
+					"MTX_ENABLE reg is 0x%x.\n",
+					PSB_RMSVDX32(MSVDX_COMMS_SIGNATURE),
+					PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS),
+					PSB_RMSVDX32(MTX_ENABLE_OFFSET));
+			}
+			ospm_runtime_pm_msvdx_resume(gpDrmDevice);
+			psb_irq_preinstall_islands(gpDrmDevice,
+				OSPM_VIDEO_DEC_ISLAND);
+			psb_irq_postinstall_islands(gpDrmDevice,
+				OSPM_VIDEO_DEC_ISLAND);
+		} else {
+#ifdef CONFIG_MDFD_GL3
+			if (ospm_power_island_up(OSPM_GL3_CACHE_ISLAND)) {
+				ret = false;
+				goto out;
+			}
+#endif
+		}
+
+		break;
+	case OSPM_VIDEO_ENC_ISLAND:
+		if (IS_MRST(gpDrmDevice) &&
+				(!ospm_power_is_hw_on(
+					OSPM_DISPLAY_ISLAND))) {
+			ospm_resume_display(pdev);
+			psb_irq_preinstall_islands(gpDrmDevice,
+				OSPM_DISPLAY_ISLAND);
+			psb_irq_postinstall_islands(gpDrmDevice,
+				OSPM_DISPLAY_ISLAND);
+		}
+
+		if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+			/* printk(KERN_ALERT "%s power on video
+			** encode\n", __func__);
+			*/
+#ifdef CONFIG_MDFD_GL3
+			if (ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND | OSPM_GL3_CACHE_ISLAND)) {
+#else
+			if (ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND)) {
+#endif
+				ret = false;
+				goto out;
+			}
+			ospm_runtime_pm_topaz_resume(gpDrmDevice);
+			psb_irq_preinstall_islands(gpDrmDevice,
+				OSPM_VIDEO_ENC_ISLAND);
+			psb_irq_postinstall_islands(gpDrmDevice,
+				OSPM_VIDEO_ENC_ISLAND);
+		} else {
+#ifdef CONFIG_MDFD_GL3
+			if (ospm_power_island_up(OSPM_GL3_CACHE_ISLAND)) {
+				ret = false;
+				goto out;
+			}
+#endif
+		}
+		break;
+	default:
+		printk(KERN_ALERT "%s unknown island !!!!\n",
+				__func__);
+		break;
+	}
+
+out:
+	if (!ret)
+		printk(KERN_ALERT "%s: %d failed\n",
+				__func__, video_island);
+
+	gbResumeInProgress = false;
+
+	if (ret) {
+		if (!already_increase) {
+			switch (video_island) {
+			case OSPM_VIDEO_ENC_ISLAND:
+				atomic_inc(&g_videoenc_access_count);
+				break;
+			case OSPM_VIDEO_DEC_ISLAND:
+				atomic_inc(&g_videodec_access_count);
+				break;
+			}
+		}
+	}
+#ifdef CONFIG_GFX_RTPM
+	else {
+		pm_runtime_put(&pdev->dev);
+	}
+#endif
+	mutex_unlock(&g_ospm_mutex);
+	return ret;
+}
+
+/*
+ * ospm_power_using_hw_begin
+ *
+ * Description: Notify PowerMgmt module that you will be accessing the
+ * specified island's hw so don't power it off.  If force_on is true,
+ * this will power on the specified island if it is off.
+ * Otherwise, this will return false and the caller is expected to not
+ * access the hw.
+ *
+ * NOTE:The function doesn't support force_on in atomic context,
+ * as there may sleep when resuming these islands. If u have to
+ * resume in atomic context for these islands, u need revise ur
+ * logic and move the resume to a process context. return true if
+ * the island is on(no matter it's forced or already on) otherwise
+ * false is returned.
+ */
+bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage)
+{
+	bool ret = true;
+	bool island_is_on = true;
+	struct pci_dev *pdev = gpDrmDevice->pdev;
+	IMG_UINT32 deviceID = 0;
+	bool force_on = usage ? true : false;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) gpDrmDevice->dev_private;
+	unsigned long flags;
+
+	if (!(hw_island & (OSPM_GRAPHICS_ISLAND | OSPM_DISPLAY_ISLAND |
+		OSPM_GL3_CACHE_ISLAND)))
+		return false;
+
+#ifdef CONFIG_GFX_RTPM
+	if (force_on)
+		pm_runtime_get_sync(&pdev->dev);
+	else
+		pm_runtime_get_noresume(&pdev->dev);
+#endif
+
+
+	/* Only process context is allowed when in
+	** force_on == true case. In force_on == false cases,
+	** it may be in atomic or process context, so use spin_lock_irq
+	*/
+	if (!force_on) {
+		spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+		island_is_on = ((g_hw_power_status_mask & hw_island)
+			== hw_island) ? true : false;
+
+		/* if island is off or another thread has been in
+		** suspend progress, we should return false to
+		** keep consistent, the caller should have dealt
+		** with return value properly
+		** Note: when in interrupt context, we should
+		** always return true, for it has triggerred interrupt.
+		*/
+		if ((!island_is_on) ||
+			((((hw_island == OSPM_DISPLAY_ISLAND) &&
+			gbSuspendInProgress) ||
+			((hw_island == OSPM_GRAPHICS_ISLAND) &&
+			pcihostSuspendInProgress)) && (!in_interrupt()))) {
+			spin_unlock_irqrestore(&dev_priv->ospm_lock,
+				flags);
+#ifdef CONFIG_GFX_RTPM
+			pm_runtime_put(&pdev->dev);
+#endif
+			return false;
+		}
+
+		/* After sanity check can increase the access_count */
+		if (hw_island == OSPM_DISPLAY_ISLAND)
+			atomic_inc(&g_display_access_count);
+		else if (hw_island == OSPM_GRAPHICS_ISLAND)
+			atomic_inc(&g_graphics_access_count);
+
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+		return true;
+	}
+
+	/* Actually we can remove the codes below for following
+	** mutex lock can keep race condition safe. These codes
+	** exist only for facilitating gfx misuse force_on display
+	** while taking spin lock. These codes can safely be removed
+	** once gfx doesn't force on engines when taking spinlock
+	*/
+	spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+	island_is_on = ((g_hw_power_status_mask & hw_island)
+			== hw_island) ? true : false;
+	if (island_is_on && force_on &&
+		((hw_island == OSPM_DISPLAY_ISLAND) &&
+			!gbSuspendInProgress)) {
+		atomic_inc(&g_display_access_count);
+		spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+		return true;
+	}
+	spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+	BUG_ON(in_interrupt());
+	mutex_lock(&g_ospm_mutex);
+
+	spin_lock_irqsave(&dev_priv->ospm_lock, flags);
+	island_is_on = ((g_hw_power_status_mask & hw_island)
+			== hw_island) ? true : false;
+
+	if (island_is_on && (hw_island == OSPM_GRAPHICS_ISLAND))
+		atomic_inc(&g_graphics_access_count);
+
+	spin_unlock_irqrestore(&dev_priv->ospm_lock, flags);
+
+	if (island_is_on)
+		goto increase_count;
+
+	gbResumeInProgress = true;
+
+	/* Because gfx island may resume pci silently,
+	** so need sync with gfx
+	*/
+	ret = ospm_resume_pci(pdev);
+
+	if (ret) {
+		if (hw_island == OSPM_DISPLAY_ISLAND) {
+			deviceID = gui32MRSTDisplayDeviceID;
+			ospm_resume_display(pdev);
+			psb_irq_preinstall_islands(gpDrmDevice,
+				OSPM_DISPLAY_ISLAND);
+			psb_irq_postinstall_islands(gpDrmDevice,
+				OSPM_DISPLAY_ISLAND);
+		} else if (hw_island == OSPM_GRAPHICS_ISLAND) {
+				deviceID = gui32SGXDeviceID;
+#ifdef CONFIG_MDFD_GL3
+				ospm_power_graphics_island_up(
+						OSPM_GRAPHICS_ISLAND |
+						OSPM_GL3_CACHE_ISLAND);
+#else
+				ospm_power_graphics_island_up(
+						OSPM_GRAPHICS_ISLAND);
+#endif
+		}
+	}
+
+	if (!ret)
+		DRM_INFO("%s: %d failed\n", __func__, hw_island);
+
+	gbResumeInProgress = false;
+
+increase_count:
+	if (ret) {
+		if (hw_island == OSPM_DISPLAY_ISLAND)
+			atomic_inc(&g_display_access_count);
+	}
+#ifdef CONFIG_GFX_RTPM
+	else
+		pm_runtime_put(&pdev->dev);
+#endif
+	mutex_unlock(&g_ospm_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(ospm_power_using_hw_begin);
+
+/*
+ * ospm_power_using_video_end
+ *
+ * Description: Notify PowerMgmt module that you are done accessing the
+ * specified video island's hw so feel free to power it off.  Note that this
+ * function doesn't actually power off the islands.
+ */
+void ospm_power_using_video_end(int video_island)
+{
+	PSB_DEBUG_PM("MSVDX: using video 0x%x end.\n", video_island);
+
+	if (!(video_island & (OSPM_VIDEO_ENC_ISLAND | OSPM_VIDEO_DEC_ISLAND)))
+		return;
+
+	switch (video_island) {
+	case OSPM_VIDEO_ENC_ISLAND:
+		if (atomic_read(&g_videoenc_access_count) <= 0)
+			DRM_ERROR("g_videoenc_access_count <=0.\n");
+		else
+			atomic_dec(&g_videoenc_access_count);
+		break;
+	case OSPM_VIDEO_DEC_ISLAND:
+		if (atomic_read(&g_videodec_access_count) <= 0)
+			DRM_ERROR("g_videodec_access_count <=0.\n");
+		else
+			atomic_dec(&g_videodec_access_count);
+		break;
+	}
+
+#ifdef CONFIG_GFX_RTPM
+	/* decrement runtime pm ref count */
+	pm_runtime_put(&gpDrmDevice->pdev->dev);
+#endif
+}
+
+/*
+ * ospm_power_using_hw_end
+ *
+ * Description: Notify PowerMgmt module that you are done accessing the
+ * specified island's hw so feel free to power it off.  Note that this
+ * function doesn't actually power off the islands.
+ */
+void ospm_power_using_hw_end(int hw_island)
+{
+	if (!(hw_island & (OSPM_GRAPHICS_ISLAND | OSPM_DISPLAY_ISLAND |
+		OSPM_GL3_CACHE_ISLAND)))
+		return;
+
+	switch (hw_island) {
+	case OSPM_GRAPHICS_ISLAND:
+		atomic_dec(&g_graphics_access_count);
+		break;
+	case OSPM_DISPLAY_ISLAND:
+		atomic_dec(&g_display_access_count);
+		break;
+	}
+
+#ifdef CONFIG_GFX_RTPM
+	/* decrement runtime pm ref count */
+	pm_runtime_put(&gpDrmDevice->pdev->dev);
+#endif
+
+	WARN_ON(atomic_read(&g_graphics_access_count) < 0);
+	WARN_ON(atomic_read(&g_display_access_count) < 0);
+}
+EXPORT_SYMBOL(ospm_power_using_hw_end);
+
+int ospm_runtime_pm_allow(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_config **dsi_configs;
+	bool panel_on = false, panel_on2 = false;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	dsi_configs = dev_priv->dsi_configs;
+
+	if (dev_priv->rpm_enabled)
+		return 0;
+
+	if (dsi_configs[0])
+		panel_on = dsi_configs[0]->dsi_hw_context.panel_on;
+	if (dsi_configs[1])
+		panel_on2 = dsi_configs[1]->dsi_hw_context.panel_on;
+
+#ifdef CONFIG_GFX_RTPM
+	if (!panel_on && !panel_on2) {
+		pm_runtime_allow(&dev->pdev->dev);
+		dev_priv->rpm_enabled = 1;
+		DRM_INFO("Runtime PM enabled\n");
+	}
+#endif
+
+	return 0;
+}
+
+void ospm_runtime_pm_forbid(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_ENTRY("\n");
+
+#ifdef CONFIG_GFX_RTPM
+	pm_runtime_forbid(&dev->pdev->dev);
+#endif
+	dev_priv->rpm_enabled = 0;
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+	pm_message_t state;
+	int ret = 0;
+
+	state.event = 0;
+
+	PSB_DEBUG_PM("psb_runtime_suspend is called.\n");
+
+	if (atomic_read(&g_graphics_access_count) ||
+		atomic_read(&g_videoenc_access_count) ||
+		(gbdispstatus == true) ||
+		atomic_read(&g_videodec_access_count) ||
+		atomic_read(&g_display_access_count))
+		return -EBUSY;
+	else
+		ret = ospm_power_suspend(gpDrmDevice->pdev, state);
+
+	return ret;
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	/* Nop for GFX */
+	return 0;
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+	struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
+	bool hdmi_audio_busy = false;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	hdmi_audio_busy = mid_hdmi_audio_is_busy(dev_priv->dev);
+
+	if (atomic_read(&g_graphics_access_count) ||
+		atomic_read(&g_videoenc_access_count) ||
+		atomic_read(&g_videodec_access_count) ||
+		atomic_read(&g_display_access_count) ||
+		(gbdispstatus == true) ||
+		(hdmi_audio_busy == true))
+		return -EBUSY;
+	else
+		return 0;
+}
+
+
diff --git a/drivers/external_drivers/intel_media/common/psb_powermgmt.h b/drivers/external_drivers/intel_media/common/psb_powermgmt.h
new file mode 100644
index 0000000..c4ba489
--- /dev/null
+++ b/drivers/external_drivers/intel_media/common/psb_powermgmt.h
@@ -0,0 +1,149 @@
+/**************************************************************************
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <linux/intel_mid_pm.h>
+
+#define OSPM_GRAPHICS_ISLAND	APM_GRAPHICS_ISLAND
+#ifndef CONFIG_DRM_VXD_BYT
+#define OSPM_VIDEO_DEC_ISLAND	APM_VIDEO_DEC_ISLAND
+#endif
+#define OSPM_VIDEO_ENC_ISLAND	APM_VIDEO_ENC_ISLAND
+#define OSPM_GL3_CACHE_ISLAND	APM_GL3_CACHE_ISLAND
+#define OSPM_DISPLAY_ISLAND	0x40
+
+#ifdef CONFIG_MDFD_GL3
+#define OSPM_ALL_ISLANDS	((OSPM_GRAPHICS_ISLAND) |\
+				(OSPM_VIDEO_ENC_ISLAND) |\
+				(OSPM_VIDEO_DEC_ISLAND) |\
+				(OSPM_GL3_CACHE_ISLAND) |\
+				(OSPM_DISPLAY_ISLAND))
+#else
+#define OSPM_ALL_ISLANDS	((OSPM_GRAPHICS_ISLAND) |\
+				(OSPM_VIDEO_ENC_ISLAND) |\
+				(OSPM_VIDEO_DEC_ISLAND) |\
+				(OSPM_DISPLAY_ISLAND))
+#endif
+/* IPC message and command defines used to enable/disable mipi panel voltages */
+#define IPC_MSG_PANEL_ON_OFF    0xE9
+#define IPC_CMD_PANEL_ON        1
+#define IPC_CMD_PANEL_OFF       0
+
+/* Panel presence */
+#define DISPLAY_A 0x1
+#define DISPLAY_B 0x2
+#define DISPLAY_C 0x4
+
+extern bool gbSuspended;
+extern int lastFailedBrightness;
+extern struct drm_device *gpDrmDevice;
+
+typedef enum _UHBUsage {
+    OSPM_UHB_ONLY_IF_ON = 0,
+    OSPM_UHB_FORCE_POWER_ON,
+} UHBUsage;
+
+struct mdfld_dsi_config;
+void mdfld_save_display(struct drm_device *dev);
+void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on);
+void mdfld_dsi_dbi_set_power(struct drm_encoder *encoder, bool on);
+
+#ifndef CONFIG_DRM_VXD_BYT
+/* extern int psb_check_msvdx_idle(struct drm_device *dev); */
+/* Use these functions to power down video HW for D0i3 purpose  */
+void ospm_apm_power_down_msvdx(struct drm_device *dev, int force_on);
+void ospm_apm_power_down_topaz(struct drm_device *dev);
+#endif
+
+void ospm_power_init(struct drm_device *dev);
+void ospm_post_init(struct drm_device *dev);
+void ospm_power_uninit(void);
+void ospm_subsystem_no_gating(struct drm_device *dev, int subsystem);
+void ospm_subsystem_power_gate(struct drm_device *dev, int subsystem);
+
+/*
+ * OSPM will call these functions
+ */
+int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
+int ospm_power_resume(struct pci_dev *pdev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage);
+void ospm_power_using_hw_end(int hw_island);
+
+#ifndef CONFIG_DRM_VXD_BYT
+bool ospm_power_using_video_begin(int hw_island);
+void ospm_power_using_video_end(int hw_island);
+#endif
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the g_state_change_mutex
+ * is already held such as in irq install/uninstall and you need to
+ * prevent a deadlock situation.  Otherwise use ospm_power_using_hw_begin().
+ */
+bool ospm_power_is_hw_on(int hw_islands);
+
+/*
+ * Power up/down different hw component rails/islands
+ */
+void mdfld_save_display(struct drm_device *dev);
+void ospm_power_island_down(int hw_islands);
+int ospm_power_island_up(int hw_islands);
+void ospm_suspend_graphics(void);
+void ospm_power_graphics_island_down(int hw_islands);
+void ospm_power_graphics_island_up(int hw_islands);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+int ospm_runtime_pm_allow(struct drm_device *dev);
+void ospm_runtime_pm_forbid(struct drm_device *dev);
+void acquire_ospm_lock(void);
+void release_ospm_lock(void);
+
+
+/*
+ * If vec/ved/gfx are idle, submit a request to execute the subsystem-level
+ * idle callback for the device.
+ */
+#ifdef CONFIG_GFX_RTPM
+extern void psb_ospm_post_power_down(void);
+#endif
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/external_drivers/intel_media/display/tng/bc_video/bufferclass_video_linux.h b/drivers/external_drivers/intel_media/display/tng/bc_video/bufferclass_video_linux.h
new file mode 100644
index 0000000..36be669
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/bc_video/bufferclass_video_linux.h
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#ifndef __BC_VIDEO_LINUX_H__
+#define __BC_VIDEO_LINUX_H__
+
+#include <linux/ioctl.h>
+
+#define BC_FOURCC(a, b, c, d) \
+	((unsigned long) ((a) | (b)<<8 | (c)<<16 | (d)<<24))
+
+#define BC_PIX_FMT_NV12     BC_FOURCC('N', 'V', '1', '2')	/*YUV 4:2:0 */
+#define BC_PIX_FMT_UYVY     BC_FOURCC('U', 'Y', 'V', 'Y')	/*YUV 4:2:2 */
+#define BC_PIX_FMT_YUYV     BC_FOURCC('Y', 'U', 'Y', 'V')	/*YUV 4:2:2 */
+#define BC_PIX_FMT_RGB565   BC_FOURCC('R', 'G', 'B', 'P')	/*RGB 5:6:5 */
+
+int FillBuffer(unsigned int uiBufferIndex);
+
+typedef struct BC_Video_ioctl_package_TAG {
+	int ioctl_cmd;
+	int device_id;
+	int inputparam;
+	int outputparam;
+} BC_Video_ioctl_package;
+
+typedef struct bc_buf_ptr {
+	unsigned int index;
+	int size;
+	unsigned long pa;
+	unsigned long handle;
+} bc_buf_ptr_t;
+
+#define BC_Video_ioctl_fill_buffer              0
+#define BC_Video_ioctl_get_buffer_count         1
+/*get physical address by index */
+#define BC_Video_ioctl_get_buffer_phyaddr       2
+/*get index by physical address */
+#define BC_Video_ioctl_get_buffer_index         3
+#define BC_Video_ioctl_request_buffers          4
+#define BC_Video_ioctl_set_buffer_phyaddr       5
+#define BC_Video_ioctl_release_buffer_device    6
+
+#define BC_Video_ioctl_alloc_buffer             7
+#define BC_Video_ioctl_free_buffer              8
+#define BC_Video_ioctl_get_buffer_handle        9
+
+int BC_Camera_Bridge(BC_Video_ioctl_package *psBridge, unsigned long pAddr);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/cmi_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/cmi_cmd.c
new file mode 100644
index 0000000..8880ae7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/cmi_cmd.c
@@ -0,0 +1,760 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_esd.h"
+#include <linux/gpio.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/intel_pmic.h>
+#include <linux/regulator/machine.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+
+/* The register to control secure I2C FLIS pin */
+#define SECURE_I2C_FLIS_REG	0xFF0C1D30
+
+static int mipi_reset_gpio;
+
+static u8 cmi_exit_sleep_mode[]     = {0x11};
+static u8 cmi_set_tear_on[]         = {0x35, 0x00};
+static u8 cmi_set_brightness[]      = {0x51, 0x00};
+static u8 cmi_turn_on_backlight[]   = {0x53, 0x24};
+static u8 cmi_turn_off_backlight[]  = {0x53, 0x00};
+static u8 cmi_set_mipi_ctrl[]       = {
+	0xba, 0x12, 0x83, 0x00,
+	0xd6, 0xc5, 0x00, 0x09,
+	0xff, 0x0f, 0x27, 0x03,
+	0x21, 0x27, 0x25, 0x20,
+	0x00, 0x10};
+static u8 cmi_command_mode[]        = {0xc2, 0x08};
+static u8 cmi_set_panel[]           = {0xcc, 0x08};
+static u8 cmi_set_eq_func_ltps[]    = {0xd4, 0x0c};
+static u8 cmi_set_address_mode[]    = {0x36, 0x00};
+static u8 cmi_set_te_scanline[]     = {0x44, 0x00, 0x00, 0x00};
+static u8 cmi_set_pixel_format[]    = {0x3a, 0x77};
+static u8 cmi_mcs_protect_off[]     = {0xb9, 0xff, 0x83, 0x92};
+static u8 cmi_mcs_protect_on[]      = {0xb9, 0x00, 0x00, 0x00};
+static u8 cmi_set_blanking_opt_2[]  = {0xc7, 0x00, 0x40};
+static u8 cmi_mcs_clumn_addr[]      = {0x2a, 0x00, 0x00, 0x02, 0xcf};
+static u8 cmi_mcs_page_addr[]       = {0x2b, 0x00, 0x00, 0x04, 0xff};
+static u8 cmi_ic_bias_current[] = {
+	0xbf, 0x05, 0xe0, 0x02,
+	0x00};
+static u8 cmi_set_power[] = {
+	0xb1, 0x7c, 0x00, 0x44,
+	0x94, 0x00, 0x0d, 0x0d,
+	0x12, 0x1f, 0x3f, 0x3f,
+	0x42, 0x72};
+static u8 cmi_set_power_dstb[] = {
+	0xb1, 0x01, 0x01, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00};
+static u8 cmi_set_disp_reg[] = {
+	0xb2, 0x0f, 0xc8, 0x01,
+	0x01, 0x06, 0x84, 0x00,
+	0xff, 0x01, 0x01, 0x06,
+	0x20};
+static u8 cmi_set_command_cyc[] = {
+	0xb4, 0x00, 0x00, 0x05,
+	0x00, 0xa0, 0x05, 0x16,
+	0x9d, 0x30, 0x03, 0x16,
+	0x00, 0x03, 0x03, 0x00,
+	0x1b, 0x04, 0x07, 0x07,
+	0x01, 0x00, 0x1a, 0x77};
+static u8 cmi_set_ltps_ctrl_output[] = {
+	0xd5, 0x00, 0x08, 0x08,
+	0x00, 0x44, 0x55, 0x66,
+	0x77, 0xcc, 0xcc, 0xcc,
+	0xcc, 0x00, 0x77, 0x66,
+	0x55, 0x44, 0xcc, 0xcc,
+	0xcc, 0xcc};
+static u8 cmi_set_video_cyc[] = {
+	0xd8, 0x00, 0x00, 0x05,
+	0x00, 0xa0, 0x05, 0x16,
+	0x9d, 0x30, 0x03, 0x16,
+	0x00, 0x03, 0x03, 0x00,
+	0x1b, 0x04, 0x07, 0x07,
+	0x01, 0x00, 0x1a, 0x77};
+static u8 cmi_gamma_r[] = {
+	0xe0, 0x00, 0x1f, 0x23,
+	0x3f, 0x3f, 0x3f, 0x33,
+	0x55, 0x06, 0x0e, 0x0e,
+	0x11, 0x14, 0x12, 0x14,
+	0x1d, 0x1f, 0x00, 0x1f,
+	0x23, 0x3f, 0x3f, 0x3f,
+	0x33, 0x55, 0x06, 0x0e,
+	0x0e, 0x11, 0x14, 0x12,
+	0x14, 0x1d, 0x1f};
+static u8 cmi_gamma_g[] = {
+	0xe1, 0x00, 0x1f, 0x23,
+	0x3f, 0x3f, 0x3f, 0x33,
+	0x55, 0x06, 0x0e, 0x0e,
+	0x11, 0x14, 0x12, 0x14,
+	0x1d, 0x1f, 0x00, 0x1f,
+	0x23, 0x3f, 0x3f, 0x3f,
+	0x33, 0x55, 0x06, 0x0e,
+	0x0e, 0x11, 0x14, 0x12,
+	0x14, 0x1d, 0x1f};
+static u8 cmi_gamma_b[] = {
+	0xe2, 0x00, 0x1f, 0x23,
+	0x3f, 0x3f, 0x3f, 0x33,
+	0x55, 0x06, 0x0e, 0x0e,
+	0x11, 0x14, 0x12, 0x14,
+	0x1d, 0x1f, 0x00, 0x1f,
+	0x23, 0x3f, 0x3f, 0x3f,
+	0x33, 0x55, 0x06, 0x0e,
+	0x0e, 0x11, 0x14, 0x12,
+	0x14, 0x1d, 0x1f};
+static u8 cmi_enter_set_cabc[] = {
+	0xc9, 0x1f, 0x00, 0x1e,
+	0x1e, 0x00, 0x20, 0x00,
+	0x01, 0xe3};
+static u8 cmi_set_stba[] = {
+	0xc0, 0x01, 0x94};
+
+static
+int mdfld_cmi_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_pkg_sender *sender
+			= mdfld_dsi_get_pkg_sender(dsi_config);
+	struct mdfld_dsi_hw_registers *regs;
+
+	if (!sender)
+		return -EINVAL;
+
+	PSB_DEBUG_ENTRY("\n");
+	sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+	regs = &dsi_config->regs;
+
+	/* swtich to 2 data lanes */
+	REG_WRITE(regs->device_ready_reg, 0x0);
+	udelay(1);
+	REG_WRITE(regs->dsi_func_prg_reg, 0xA002);
+	udelay(1);
+	REG_WRITE(regs->device_ready_reg, 0x1);
+	udelay(1);
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+		cmi_exit_sleep_mode[0], 0, 0, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdelay(150);
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_mcs_protect_off, 4, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_ic_bias_current, 5, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_power, 14, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_disp_reg, 13, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_command_cyc, 24, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_mipi_ctrl, 3, 0);
+
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	/* switch back to 3 data lanes */
+	mdfld_dsi_wait_for_fifos_empty(sender);
+	REG_WRITE(regs->device_ready_reg, 0x0);
+	udelay(1);
+	REG_WRITE(regs->dsi_func_prg_reg, 0xA003);
+	udelay(1);
+	REG_WRITE(regs->device_ready_reg, 0x1);
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_stba, 3, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender, cmi_command_mode[0],
+			cmi_command_mode[1], 1, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_blanking_opt_2,
+			sizeof(cmi_set_blanking_opt_2), 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender, cmi_set_panel[0],
+			cmi_set_panel[1], 1, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender, cmi_set_eq_func_ltps[0],
+			cmi_set_eq_func_ltps[1], 1, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_ltps_ctrl_output, 22, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_video_cyc, 24, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_gamma_r, 35, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_gamma_g, 35, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_gamma_b, 35, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender, cmi_set_pixel_format[0],
+			cmi_set_pixel_format[1], 1, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_mcs_clumn_addr, 5, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_mcs_page_addr, 5, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender, cmi_set_address_mode[0],
+	cmi_set_address_mode[1], 1, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_set_te_scanline, 4, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender, cmi_set_tear_on[0],
+			cmi_set_tear_on[1], 1, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_enter_set_cabc, 10, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+
+		return -EIO;
+	/* set backlight on*/
+	mdfld_dsi_send_mcs_short_hs(sender,
+		cmi_turn_on_backlight[0],
+		cmi_turn_on_backlight[1], 1 , 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	/* turn CABC on*/
+	mdfld_dsi_send_mcs_short_hs(sender,
+			write_ctrl_cabc, STILL_IMAGE, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_mcs_protect_on, 4, 0);
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+	mdelay(5);
+	return 0;
+}
+
+static
+void mdfld_cmi_dsi_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_hw_context *hw_ctx = &dsi_config->dsi_hw_context;
+#ifdef ENABLE_CSC_GAMMA /*FIXME*/
+	struct drm_device *dev = dsi_config->dev;
+
+	struct csc_setting csc = {
+		.pipe = 0,
+		.type = CSC_REG_SETTING,
+		.enable_state = true,
+		.data_len = CSC_REG_COUNT,
+		.data.csc_reg_data = {
+			0xFFB0424, 0xFDF, 0x4320FF1, 0xFDC, 0xFF50FF5, 0x415}
+	};
+	struct gamma_setting gamma = {
+		.pipe = 0,
+		.type = GAMMA_REG_SETTING,
+		.enable_state = true,
+		.data_len = GAMMA_10_BIT_TABLE_COUNT,
+		.gamma_tableX100 = {
+			0x000000, 0x030303, 0x050505, 0x070707,
+			0x090909, 0x0C0C0C, 0x0E0E0E, 0x101010,
+			0x121212, 0x141414, 0x171717, 0x191919,
+			0x1B1B1B, 0x1D1D1D, 0x1F1F1F, 0x212121,
+			0x232323, 0x252525, 0x282828, 0x2A2A2A,
+			0x2C2C2C, 0x2E2E2E, 0x303030, 0x323232,
+			0x343434, 0x363636, 0x383838, 0x3A3A3A,
+			0x3C3C3C, 0x3E3E3E, 0x404040, 0x424242,
+			0x444444, 0x464646, 0x484848, 0x4A4A4A,
+			0x4C4C4C, 0x4E4E4E, 0x505050, 0x525252,
+			0x545454, 0x565656, 0x585858, 0x5A5A5A,
+			0x5C5C5C, 0x5E5E5E, 0x606060, 0x626262,
+			0x646464, 0x666666, 0x686868, 0x6A6A6A,
+			0x6C6C6C, 0x6E6E6E, 0x707070, 0x727272,
+			0x747474, 0x767676, 0x787878, 0x7A7A7A,
+			0x7C7C7C, 0x7E7E7E, 0x808080, 0x828282,
+			0x848484, 0x868686, 0x888888, 0x8A8A8A,
+			0x8C8C8C, 0x8E8E8E, 0x909090, 0x929292,
+			0x949494, 0x969696, 0x989898, 0x999999,
+			0x9B9B9B, 0x9D9D9D, 0x9F9F9F, 0xA1A1A1,
+			0xA3A3A3, 0xA5A5A5, 0xA7A7A7, 0xA9A9A9,
+			0xABABAB, 0xADADAD, 0xAFAFAF, 0xB1B1B1,
+			0xB3B3B3, 0xB5B5B5, 0xB6B6B6, 0xB8B8B8,
+			0xBABABA, 0xBCBCBC, 0xBEBEBE, 0xC0C0C0,
+			0xC2C2C2, 0xC4C4C4, 0xC6C6C6, 0xC8C8C8,
+			0xCACACA, 0xCCCCCC, 0xCECECE, 0xCFCFCF,
+			0xD1D1D1, 0xD3D3D3, 0xD5D5D5, 0xD7D7D7,
+			0xD9D9D9, 0xDBDBDB, 0xDDDDDD, 0xDFDFDF,
+			0xE1E1E1, 0xE3E3E3, 0xE4E4E4, 0xE6E6E6,
+			0xE8E8E8, 0xEAEAEA, 0xECECEC, 0xEEEEEE,
+			0xF0F0F0, 0xF2F2F2, 0xF4F4F4, 0xF6F6F6,
+			0xF7F7F7, 0xF9F9F9, 0xFBFBFB, 0xFDFDFD}
+	};
+#endif
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 3;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_3_1;
+	dsi_config->enable_gamma_csc = ENABLE_GAMMA | ENABLE_CSC;
+	/* This is for 400 mhz.  Set it to 0 for 800mhz */
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->turn_around_timeout = 0x1f;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->high_low_switch_count = 0x20;
+	hw_ctx->clk_lane_switch_time_cnt = 0x20000E;
+	hw_ctx->eot_disable = 0x3;
+	hw_ctx->init_count = 0xf0;
+	hw_ctx->lp_byteclk = 0x4;
+	hw_ctx->dphy_param = 0x1B104315;
+	hw_ctx->dbi_bw_ctrl = 1390;
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+	hw_ctx->mipi = SEL_FLOPPED_HSTX |
+			PASS_FROM_SPHY_TO_AFE |
+			BANDGAP_CHICKEN_BIT |
+			TE_TRIGGER_GPIO_PIN;
+	hw_ctx->video_mode_format = 0xf;
+
+#ifdef ENABLE_CSC_GAMMA /*FIXME*/
+	if (dsi_config->enable_gamma_csc & ENABLE_CSC) {
+		/* setting the tuned csc setting */
+		drm_psb_enable_color_conversion = 1;
+		mdfld_intel_crtc_set_color_conversion(dev, &csc);
+	}
+
+	if (dsi_config->enable_gamma_csc & ENABLE_GAMMA) {
+		/* setting the tuned gamma setting */
+		drm_psb_enable_gamma = 1;
+		mdfld_intel_crtc_set_gamma(dev, &gamma);
+	}
+#endif
+}
+
+static
+struct drm_display_mode *cmi_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->htotal = 920;
+	mode->hdisplay = 720;
+	mode->hsync_start = 816;
+	mode->hsync_end = 824;
+	mode->vtotal = 1300;
+	mode->vdisplay = 1280;
+	mode->vsync_start = 1294;
+	mode->vsync_end = 1296;
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static
+int mdfld_dsi_cmi_cmd_power_on(struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/*exit sleep */
+	err = mdfld_dsi_send_dcs(sender,
+		 exit_sleep_mode,
+		 NULL,
+		 0,
+		 CMD_DATA_SRC_SYSTEM_MEM,
+		 MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("faild to exit_sleep mode\n");
+		goto power_err;
+	}
+
+	msleep(120);
+
+	/*set tear on*/
+	err = mdfld_dsi_send_dcs(sender,
+		 set_tear_on,
+		 NULL,
+		 0,
+		 CMD_DATA_SRC_SYSTEM_MEM,
+		 MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("faild to set_tear_on mode\n");
+		goto power_err;
+	}
+
+	/*turn on display*/
+	err = mdfld_dsi_send_dcs(sender,
+		 set_display_on,
+		 NULL,
+		 0,
+		 CMD_DATA_SRC_SYSTEM_MEM,
+		 MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("faild to set_display_on mode\n");
+		goto power_err;
+	}
+power_err:
+	return err;
+}
+
+static int mdfld_dsi_cmi_cmd_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/* turn off cabc */
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+		write_ctrl_cabc, 0, 1,
+		MDFLD_DSI_SEND_PACKAGE);
+
+	/*turn off backlight*/
+	err = mdfld_dsi_send_mcs_long_hs(sender, cmi_turn_off_backlight,
+					 sizeof(cmi_turn_off_backlight), 0);
+	if (err) {
+		DRM_ERROR("%s: failed to turn off backlight\n", __func__);
+		goto out;
+	}
+	mdelay(1);
+
+
+	/*turn off display */
+	err = mdfld_dsi_send_dcs(sender,
+		 set_display_off,
+		 NULL,
+		 0,
+		 CMD_DATA_SRC_SYSTEM_MEM,
+		 MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("sent set_display_off faild\n");
+		goto out;
+	}
+
+	/*set tear off */
+	err = mdfld_dsi_send_dcs(sender,
+		 set_tear_off,
+		 NULL,
+		 0,
+		 CMD_DATA_SRC_SYSTEM_MEM,
+		 MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("sent set_tear_off faild\n");
+		goto out;
+	}
+
+	/*Enter sleep mode */
+	err = mdfld_dsi_send_dcs(sender,
+			enter_sleep_mode,
+			NULL,
+			0,
+			CMD_DATA_SRC_SYSTEM_MEM,
+			MDFLD_DSI_SEND_PACKAGE);
+
+	if (err) {
+		DRM_ERROR("DCS 0x%x sent failed\n", enter_sleep_mode);
+		goto out;
+	}
+
+	/**
+	 * MIPI spec shows it must wait 5ms
+	 * before sneding next command
+	 */
+	mdelay(5);
+
+	/*enter deep standby mode*/
+	err = mdfld_dsi_send_mcs_long_hs(sender, cmi_mcs_protect_off, 4, 0);
+	if (err) {
+		DRM_ERROR("Failed to turn off protection\n");
+		goto out;
+	}
+
+	err = mdfld_dsi_send_mcs_long_hs(sender, cmi_set_power_dstb, 14, 0);
+	if (err)
+		DRM_ERROR("Failed to enter DSTB\n");
+	mdelay(5);
+	mdfld_dsi_send_mcs_long_hs(sender, cmi_mcs_protect_on, 4, 0);
+
+out:
+	return err;
+}
+
+static
+void cmi_cmd_get_panel_info(int pipe, struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = PANEL_4DOT3_WIDTH;
+		pi->height_mm = PANEL_4DOT3_HEIGHT;
+	}
+}
+
+static
+int mdfld_dsi_cmi_cmd_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	u32 dpll_val, device_ready_val;
+	int pipe = dsi_config->pipe;
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		/*
+		 * FIXME: WA to detect the panel connection status, and need to
+		 * implement detection feature with get_power_mode DSI command.
+		 */
+		if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+					OSPM_UHB_FORCE_POWER_ON)) {
+			DRM_ERROR("hw begin failed\n");
+			return -EAGAIN;
+		}
+
+		dpll_val = REG_READ(regs->dpll_reg);
+		device_ready_val = REG_READ(regs->device_ready_reg);
+		if ((device_ready_val & DSI_DEVICE_READY) &&
+		    (dpll_val & DPLL_VCO_ENABLE)) {
+			dsi_config->dsi_hw_context.panel_on = true;
+			mdfld_dsi_send_gen_long_hs(sender,
+					cmi_mcs_protect_off, 4, 0);
+			mdfld_dsi_send_gen_long_hs(sender,
+					cmi_set_disp_reg, 13, 0);
+			mdfld_dsi_send_gen_long_hs(sender,
+					cmi_mcs_protect_on, 4, 0);
+
+		} else {
+			dsi_config->dsi_hw_context.panel_on = false;
+			DRM_INFO("%s: panel is not initialized!\n", __func__);
+		}
+
+		status = MDFLD_DSI_PANEL_CONNECTED;
+
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static
+int mdfld_dsi_cmi_cmd_set_brightness(struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int duty_val = 0;
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	duty_val = (255 * level) / 255;
+	cmi_set_brightness[1] = duty_val;
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+		cmi_set_brightness[0], cmi_set_brightness[1], 1, 0);
+
+	return 0;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n", __func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+static
+void _get_panel_reset_gpio(void)
+{
+	int ret = 0;
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("mipi-reset");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+}
+
+static
+int mdfld_dsi_cmi_cmd_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	u8 *vaddr = NULL, *vaddr1 = NULL;
+	int reg_value_scl = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* Because when reset touchscreen panel, touchscreen will pull i2c bus
+	 * to low, sometime this operation will cause i2c bus enter into wrong
+	 * status, so before reset, switch i2c scl pin */
+	vaddr1 = ioremap(SECURE_I2C_FLIS_REG, 4);
+	reg_value_scl = ioread32(vaddr1);
+	reg_value_scl &= ~0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+
+	__vpro2_power_ctrl(true);
+	usleep_range(2000, 2500);
+
+	_get_panel_reset_gpio();
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(3000, 3500);
+	vaddr = ioremap(0xff0c2d00, 0x60);
+	iowrite32(0x3221, vaddr + 0x1c);
+	usleep_range(2000, 2500);
+	iounmap(vaddr);
+	/* switch i2c scl pin back */
+	reg_value_scl |= 0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+	iounmap(vaddr1);
+	return 0;
+}
+
+void cmi_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs->get_config_mode = cmi_cmd_get_config_mode;
+	p_funcs->get_panel_info = cmi_cmd_get_panel_info;
+	p_funcs->reset = mdfld_dsi_cmi_cmd_panel_reset;
+	p_funcs->exit_deep_standby =
+			mdfld_dsi_cmi_cmd_panel_reset;
+	p_funcs->drv_ic_init = mdfld_cmi_drv_ic_init;
+	p_funcs->dsi_controller_init = mdfld_cmi_dsi_controller_init;
+	p_funcs->detect = mdfld_dsi_cmi_cmd_detect;
+	p_funcs->power_on = mdfld_dsi_cmi_cmd_power_on;
+	p_funcs->power_off = mdfld_dsi_cmi_cmd_power_off;
+	p_funcs->set_brightness = mdfld_dsi_cmi_cmd_set_brightness;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/cmi_vid.c b/drivers/external_drivers/intel_media/display/tng/drv/cmi_vid.c
new file mode 100644
index 0000000..785c05a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/cmi_vid.c
@@ -0,0 +1,605 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu
+ */
+
+#include "displays/cmi_vid.h"
+
+static u8 cmi_set_extension[] = {0xb9, 0xff, 0x83, 0x92};
+static u8 cmi_ic_bias_current[] = {
+	0xbf, 0x05, 0x60, 0x82,
+	0x00, 0x00, 0x00, 0x00};
+static u8 cmi_set_power[] = {
+	0xb1, 0x7c, 0x00, 0x44,
+	0x24, 0x00, 0x0d, 0x0d,
+	0x12, 0x1a, 0x3f, 0x3f,
+	0x42, 0x72, 0x00, 0x00};
+static u8 cmi_set_disp_reg[] = {
+	0xb2, 0x0f, 0xc8, 0x05,
+	0x0f, 0x08, 0x84, 0x00,
+	0xff, 0x05, 0x0f, 0x04,
+	0x20, 0x00, 0x00, 0x00};
+static u8 cmi_set_command_cyc[] = {
+	0xb4, 0x00, 0x00, 0x05,
+	0x00, 0xa0, 0x05, 0x16,
+	0x9d, 0x30, 0x03, 0x16,
+	0x00, 0x03, 0x03, 0x00,
+	0x1b, 0x06, 0x07, 0x07,
+	0x00, 0x00, 0x00, 0x00};
+static u8 cmi_set_mipi_ctrl[] = {0xba, 0x12, 0x83, 0x00};
+static u8 cmi_set_blanking_opt_2[]  = {0xc7, 0x00, 0x40, 0x00};
+static u8 cmi_set_ltps_ctrl_output[] = {
+	0xd5, 0x00, 0x08, 0x08,
+	0x00, 0x44, 0x55, 0x66,
+	0x77, 0xcc, 0xcc, 0xcc,
+	0xcc, 0x00, 0x77, 0x66,
+	0x55, 0x44, 0xcc, 0xcc,
+	0xcc, 0xcc, 0x00, 0x00};
+static u8 cmi_set_video_cyc[] = {
+	0xd8, 0x00, 0x00, 0x04,
+	0x00, 0xa0, 0x04, 0x16,
+	0x9d, 0x30, 0x03, 0x16,
+	0x00, 0x03, 0x03, 0x00,
+	0x1b, 0x06, 0x07, 0x07,
+	0x00, 0x00, 0x00, 0x00};
+static u8 cmi_gamma_r[] = {
+	0xe0, 0x3a, 0x3e, 0x3c,
+	0x2f, 0x31, 0x32, 0x33,
+	0x46, 0x04, 0x08, 0x0c,
+	0x0d, 0x10, 0x0f, 0x11,
+	0x10, 0x17, 0x3a, 0x3e,
+	0x3c, 0x2f, 0x31, 0x32,
+	0x33, 0x46, 0x04, 0x08,
+	0x0c, 0x0d, 0x10, 0x0f,
+	0x11, 0x10, 0x17, 0x00};
+static u8 cmi_gamma_g[] = {
+	0xe1, 0x3b, 0x3e, 0x3d,
+	0x31, 0x31, 0x32, 0x33,
+	0x46, 0x03, 0x07, 0x0b,
+	0x0d, 0x10, 0x0e, 0x11,
+	0x10, 0x17, 0x3b, 0x3e,
+	0x3d, 0x31, 0x31, 0x32,
+	0x33, 0x46, 0x03, 0x07,
+	0x0b, 0x0d, 0x10, 0x0e,
+	0x11, 0x10, 0x17, 0x00};
+static u8 cmi_gamma_b[] = {
+	0xe2, 0x01, 0x06, 0x07,
+	0x2d, 0x2a, 0x32, 0x1f,
+	0x40, 0x05, 0x0c, 0x0e,
+	0x11, 0x14, 0x12, 0x13,
+	0x0f, 0x18, 0x01, 0x06,
+	0x07, 0x2d, 0x2a, 0x32,
+	0x1f, 0x40, 0x05, 0x0c,
+	0x0e, 0x11, 0x14, 0x12,
+	0x13, 0x0f, 0x18, 0x00};
+static u8 cmi_enter_set_cabc[] = {
+	0xc9, 0x1f, 0x00, 0x1e,
+	0x1e, 0x00, 0x00, 0x00,
+	0x01, 0xe3, 0x00, 0x00};
+static u8 cmi_mcs_protect_on[]      = {0xb9, 0x00, 0x00, 0x00};
+static u8 cmi_set_address_mode[]    = {0x36, 0x00, 0x00, 0x00};
+static u8 cmi_set_pixel_format[] = {0x3a, 0x70, 0x00, 0x00};
+static int mdfld_dsi_cmi_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+			= mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	unsigned long wait_timeout;
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+	sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+	/* sleep out and wait for 150ms. */
+	mdfld_dsi_send_mcs_short_hs(sender,
+			exit_sleep_mode, 0, 0, 0);
+	wait_timeout = jiffies + (3 * HZ / 20);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_set_extension, 4, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	/* set TE on and wait for 10ms. */
+	mdfld_dsi_send_mcs_short_hs(sender,
+			set_tear_on, 0, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+		write_display_brightness, 0xff, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+			write_ctrl_display, 0x24, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+			write_ctrl_cabc, 0x2, STILL_IMAGE, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_ic_bias_current, 5, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_set_power, 0xe, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_set_disp_reg, 0xd, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_set_command_cyc, 24, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_set_mipi_ctrl, 4, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	REG_WRITE(regs->device_ready_reg, 0);
+	REG_WRITE(regs->hs_tx_timeout_reg, 0x00ffffff);
+	REG_WRITE(regs->lp_rx_timeout_reg, 0x00ffffff);
+	REG_WRITE(regs->turn_around_timeout_reg, 0x0000ffff);
+	REG_WRITE(regs->device_reset_timer_reg, 0x000000ff);
+	REG_WRITE(regs->high_low_switch_count_reg, 0x00000020);
+	REG_WRITE(regs->clk_lane_switch_time_cnt_reg, 0x0020000e);
+	REG_WRITE(regs->eot_disable_reg, 0x00000000);
+	REG_WRITE(regs->init_count_reg, 0x0000007D0);
+	REG_WRITE(regs->lp_byteclk_reg, 0x0000000e);
+	REG_WRITE(regs->dphy_param_reg, 0x1b104315);
+
+	REG_WRITE(regs->mipi_reg, 0x80030100);
+	REG_WRITE(regs->mipi_control_reg, 0x18);
+	REG_WRITE(regs->dsi_func_prg_reg, 0x203);
+	REG_WRITE(regs->video_mode_format_reg, 0x17);
+
+	REG_WRITE(regs->intr_en_reg, 0xffffffff);
+	REG_WRITE(regs->dpi_resolution_reg, 0x50002d0);
+	REG_WRITE(regs->hsync_count_reg, 0x4);
+	REG_WRITE(regs->hbp_count_reg, 0x33);
+	REG_WRITE(regs->hfp_count_reg, 0x30);
+	REG_WRITE(regs->hactive_count_reg, 0x5a0);
+	REG_WRITE(regs->vsync_count_reg, 0x8);
+	REG_WRITE(regs->vbp_count_reg, 0x8);
+	REG_WRITE(regs->vfp_count_reg, 0x8);
+	REG_WRITE(regs->video_mode_format_reg, 0x1f);
+	REG_WRITE(regs->device_ready_reg, 1);
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+			set_video_mode, 0x3, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender,
+			cmi_set_blanking_opt_2, 4, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+			set_panel, 0x8, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_mcs_short_hs(sender,
+			set_eq_func_ltps, 0xc, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_set_ltps_ctrl_output, 24, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_set_video_cyc, 24, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_gamma_r, 36, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_gamma_g, 36, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_gamma_b, 36, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_enter_set_cabc, 10, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_mcs_protect_on, 4, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_set_address_mode, 4, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	mdfld_dsi_send_gen_long_hs(sender, cmi_set_pixel_format, 4, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+	if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+		return -EIO;
+
+	return 0;
+}
+
+static
+void mdfld_dsi_cmi_dsi_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+		&dsi_config->dsi_hw_context;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 2;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->pll_bypass_mode = 0;
+	/* This is for 400 mhz.  Set it to 0 for 800mhz */
+	hw_ctx->cck_div = 1;
+
+	hw_ctx->mipi_control = 0x18;
+	hw_ctx->intr_en = 0xffffffff;
+	hw_ctx->hs_tx_timeout = 0xffffff;
+	hw_ctx->lp_rx_timeout = 0xffffff;
+	hw_ctx->turn_around_timeout = 0xffff;
+	hw_ctx->device_reset_timer = 0xff;
+	hw_ctx->high_low_switch_count = 0x1C;
+	hw_ctx->init_count = 0x7d0;
+	hw_ctx->eot_disable = 0x0;
+	hw_ctx->lp_byteclk = 0x4;
+	hw_ctx->clk_lane_switch_time_cnt = 0x1E000E;
+	hw_ctx->dphy_param = 0x1B104315;
+
+	/*setup video mode format*/
+	hw_ctx->video_mode_format = 0x17 ;
+
+	/*set up func_prg*/
+	hw_ctx->dsi_func_prg = (0x200 | dsi_config->lane_count);
+	/*setup mipi port configuration*/
+	hw_ctx->mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE |
+		BANDGAP_CHICKEN_BIT | dsi_config->lane_config | BIT17;
+}
+
+static
+int mdfld_dsi_cmi_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static int mdfld_dsi_cmi_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/* Sleep Out */
+	err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+	/* Wait for 6 frames after exit_sleep_mode. */
+	msleep(100);
+
+	/* Set Display on */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+	/* Wait for 1 frame after set_display_on. */
+
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender, MDFLD_DSI_DPI_SPK_TURN_ON);
+	if (err) {
+		DRM_ERROR("Failed to send turn on packet\n");
+		goto power_on_err;
+	}
+	return 0;
+
+power_on_err:
+	err = -EIO;
+	return err;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n", __func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int mdfld_dsi_cmi_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/*send SHUT_DOWN packet */
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+			MDFLD_DSI_DPI_SPK_SHUT_DOWN);
+	if (err) {
+		DRM_ERROR("Failed to send turn off packet\n");
+		goto power_off_err;
+	}
+	/* According HW DSI spec, need to wait for 100ms. */
+	msleep(100);
+
+	/* Set Display off */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	/* Wait for 1 frame after set_display_on. */
+	msleep(20);
+
+	/* Sleep In */
+	err = mdfld_dsi_send_mcs_short_hs(sender, enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	/* Wait for 3 frames after enter_sleep_mode. */
+	msleep(51);
+
+	__vpro2_power_ctrl(false);
+
+	return 0;
+
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static int mdfld_dsi_cmi_set_brightness(struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+	unsigned long wait_timeout;
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (255 * level) / 255;
+
+	mdfld_dsi_send_mcs_short_hs(sender, 0x51, duty_val, 1, 0);
+	wait_timeout = jiffies + (HZ / 100);
+	while (time_before_eq(jiffies, wait_timeout))
+		cpu_relax();
+
+	return 0;
+}
+
+static int mdfld_dsi_cmi_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	static int mipi_reset_gpio;
+
+	PSB_DEBUG_ENTRY("\n");
+	__vpro2_power_ctrl(true);
+	mdelay(1100);
+	mipi_reset_gpio = 190;
+	gpio_direction_output(mipi_reset_gpio, 0);
+
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	mdelay(100);
+
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	mdelay(400);
+
+	return 0;
+}
+
+static struct drm_display_mode *cmi_vid_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 720;
+	mode->vdisplay = 1280;
+	mode->hsync_start = 816;
+	mode->hsync_end = 824;
+	mode->htotal = 920;
+	mode->vsync_start = 1284;
+	mode->vsync_end = 1286;
+	mode->vtotal = 1300;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal *
+		mode->htotal / 1000;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static void  cmi_vid_get_panel_info(int pipe, struct panel_info *pi)
+{
+	if (!pi)
+		return;
+
+	if (pipe == 0) {
+		pi->width_mm = PANEL_4DOT3_WIDTH;
+		pi->height_mm = PANEL_4DOT3_HEIGHT;
+	}
+
+	return;
+}
+
+void  cmi_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs->get_config_mode =  cmi_vid_get_config_mode;
+	p_funcs->get_panel_info =  cmi_vid_get_panel_info;
+	p_funcs->reset = mdfld_dsi_cmi_panel_reset;
+	p_funcs->drv_ic_init = mdfld_dsi_cmi_ic_init;
+	p_funcs->dsi_controller_init =
+		mdfld_dsi_cmi_dsi_controller_init;
+	p_funcs->detect = mdfld_dsi_cmi_detect;
+	p_funcs->power_on = mdfld_dsi_cmi_power_on;
+	p_funcs->power_off = mdfld_dsi_cmi_power_off;
+	p_funcs->set_brightness =
+		mdfld_dsi_cmi_set_brightness;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/dc_callbacks.c b/drivers/external_drivers/intel_media/display/tng/drv/dc_callbacks.c
new file mode 100644
index 0000000..8ed94b3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/dc_callbacks.c
@@ -0,0 +1,813 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+#include <linux/console.h>
+
+#include "psb_drv.h"
+#include "pmu_tng.h"
+#include "psb_fb.h"
+#include "psb_intel_reg.h"
+#include "displayclass_interface.h"
+#include "mdfld_dsi_output.h"
+#include "pwr_mgmt.h"
+#include "mdfld_dsi_dbi_dsr.h"
+
+#define KEEP_UNUSED_CODE 0
+
+#if KEEP_UNUSED_CODE
+static int FindCurPipe(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (drm_helper_crtc_in_use(crtc)) {
+			struct psb_intel_crtc *psb_intel_crtc =
+			    to_psb_intel_crtc(crtc);
+			return psb_intel_crtc->pipe;
+		}
+	}
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+static void user_mode_start(struct drm_psb_private *dev_priv)
+{
+	if (!dev_priv->um_start) {
+		dev_priv->um_start = true;
+		dev_priv->b_async_flip_enable = true;
+		if (dev_priv->b_dsr_enable_config)
+			dev_priv->b_dsr_enable = true;
+	}
+}
+
+static void DCWriteReg(struct drm_device *dev, unsigned long ulOffset,
+		       unsigned long ulValue)
+{
+	struct drm_psb_private *dev_priv;
+	void *pvRegAddr;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	pvRegAddr = (void *)(dev_priv->vdc_reg + ulOffset);
+	mb();
+	iowrite32(ulValue, pvRegAddr);
+}
+
+void DCCBGetFramebuffer(struct drm_device *dev, struct psb_framebuffer **ppsb)
+{
+	struct drm_psb_private *dev_priv;
+	struct psb_fbdev *fbdev;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	fbdev = dev_priv->fbdev;
+	if (fbdev != NULL)
+		*ppsb = fbdev->pfb;
+}
+
+int DCChangeFrameBuffer(struct drm_device *dev,
+			struct psb_framebuffer *psbfb)
+{
+	return 0;
+}
+
+int DCCBEnableVSyncInterrupt(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv;
+	int ret = 0;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	if (drm_vblank_get(dev, pipe)) {
+		DRM_DEBUG("Couldn't enable vsync interrupt\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+void DCCBDisableVSyncInterrupt(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	drm_vblank_put(dev, pipe);
+}
+
+void DCCBInstallVSyncISR(struct drm_device *dev,
+			 pfn_vsync_handler pVsyncHandler)
+{
+	struct drm_psb_private *dev_priv;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	dev_priv->psb_vsync_handler = pVsyncHandler;
+}
+
+void DCCBUninstallVSyncISR(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	dev_priv->psb_vsync_handler = NULL;
+}
+
+void DCCBFlipToSurface(struct drm_device *dev, unsigned long uiAddr,
+				unsigned long uiFormat, unsigned long uiStride,
+		       unsigned int pipeflag)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	u32 dspsurf = (dev_priv->cur_pipe == 0 ? DSPASURF : DSPBSURF);
+	u32 dspcntr;
+	u32 dspstride;
+	u32 reg_offset;
+	u32 val = 0;
+	u32 power_island = 0;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+
+	DRM_DEBUG("%s %s %d, uiAddr = 0x%lx\n", __FILE__, __func__,
+			  __LINE__, uiAddr);
+
+	user_mode_start(dev_priv);
+
+	if (pipeflag == 0) {
+		dsi_config = dev_priv->dsi_configs[0];
+		reg_offset = 0;
+	} else if (pipeflag == 2) {
+		dsi_config = dev_priv->dsi_configs[1];
+		reg_offset = 0x2000;
+	} else if (pipeflag == 1) {
+		dsi_config = NULL;
+		reg_offset = 0x1000;
+	} else {
+		DRM_ERROR("%s: invalid pipe %u\n", __func__, pipeflag);
+		return;
+	}
+
+	/*update format*/
+	val = (0x80000000 | uiFormat);
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->dspstride = uiStride;
+		dsi_ctx->dspcntr = val;
+		dsi_ctx->dspsurf = uiAddr;
+	}
+
+	dspsurf = DSPASURF + reg_offset;
+	dspcntr = DSPACNTR + reg_offset;
+	dspstride = DSPASTRIDE + reg_offset;
+
+	DCWriteReg(dev, dspcntr, val);
+	/*update stride*/
+	DCWriteReg(dev, dspstride, uiStride);
+	/*update surface address*/
+	DCWriteReg(dev, dspsurf, uiAddr);
+}
+
+void DCCBFlipOverlay(struct drm_device *dev,
+			struct intel_dc_overlay_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+	u32 ovadd_reg = OV_OVADD;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+	if (ctx->index == 1)
+		ovadd_reg = OVC_OVADD;
+
+	ctx->ovadd |= 1;
+
+	if (ctx->pipe == 0)
+		dsi_config = dev_priv->dsi_configs[0];
+	else if (ctx->pipe == 2)
+		dsi_config = dev_priv->dsi_configs[1];
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		if (ctx->index == 0)
+			dsi_ctx->ovaadd = ctx->ovadd;
+		else if (ctx->index == 1)
+			dsi_ctx->ovcadd = ctx->ovadd;
+	}
+
+	PSB_WVDC32(ctx->ovadd, ovadd_reg);
+}
+
+void DCCBFlipSprite(struct drm_device *dev,
+			struct intel_dc_sprite_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+	u32 reg_offset = 0x3000;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+	if (ctx->index != 0) {
+		DRM_ERROR("%s: invalid index %d\n", __func__, ctx->index);
+		return;
+	}
+
+	/* asign sprite to pipe */
+	ctx->cntr &= ~DISPPLANE_SEL_PIPE_MASK;
+
+	if (ctx->pipe == 1)
+		ctx->cntr |= DISPPLANE_SEL_PIPE_B;
+	else if (ctx->pipe == 0) {
+		ctx->cntr |= DISPPLANE_SEL_PIPE_A;
+		dsi_config = dev_priv->dsi_configs[0];
+	} else if (ctx->pipe == 2) {
+		ctx->cntr |= DISPPLANE_SEL_PIPE_C;
+		dsi_config = dev_priv->dsi_configs[1];
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_POSITION))
+		PSB_WVDC32(ctx->pos, DSPAPOS + reg_offset);
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SIZE)) {
+		PSB_WVDC32(ctx->size, DSPASIZE + reg_offset);
+		PSB_WVDC32(ctx->stride, DSPASTRIDE + reg_offset);
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONSTALPHA))
+		PSB_WVDC32(ctx->contalpa, DSPACONSTALPHA + reg_offset);
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONTROL)){
+                if(drm_psb_set_gamma_success)
+		        PSB_WVDC32(ctx->cntr | DISPPLANE_GAMMA_ENABLE, DSPACNTR + reg_offset);
+                else
+                        PSB_WVDC32(ctx->cntr, DSPACNTR + reg_offset);
+        }
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SURFACE)) {
+		PSB_WVDC32(ctx->linoff, DSPALINOFF + reg_offset);
+		PSB_WVDC32(ctx->surf, DSPASURF + reg_offset);
+	}
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->sprite_dsppos = ctx->pos;
+		dsi_ctx->sprite_dspsize = ctx->size;
+		dsi_ctx->sprite_dspstride = ctx->stride;
+		dsi_ctx->sprite_dspcntr = ctx->cntr | ((PSB_RVDC32(DSPACNTR + reg_offset) & DISPPLANE_GAMMA_ENABLE));
+		dsi_ctx->sprite_dsplinoff = ctx->linoff;
+		dsi_ctx->sprite_dspsurf = ctx->surf;
+	}
+}
+
+void DCCBFlipPrimary(struct drm_device *dev,
+			struct intel_dc_primary_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+	u32 reg_offset;
+	int pipe;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+	if (ctx->index == 0) {
+		reg_offset = 0;
+		dsi_config = dev_priv->dsi_configs[0];
+		pipe = 0;
+	} else if (ctx->index == 1) {
+		reg_offset = 0x1000;
+		pipe = 1;
+	} else if (ctx->index == 2) {
+		reg_offset = 0x2000;
+		dsi_config = dev_priv->dsi_configs[1];
+		pipe = 2;
+	} else
+		return;
+
+	if ((ctx->update_mask & SPRITE_UPDATE_POSITION))
+		PSB_WVDC32(ctx->pos, DSPAPOS + reg_offset);
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SIZE)) {
+		PSB_WVDC32(ctx->size, DSPASIZE + reg_offset);
+		PSB_WVDC32(ctx->stride, DSPASTRIDE + reg_offset);
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONSTALPHA))
+		PSB_WVDC32(ctx->contalpa, DSPACONSTALPHA + reg_offset);
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONTROL)){
+                if(drm_psb_set_gamma_success)
+                        PSB_WVDC32(ctx->cntr | DISPPLANE_GAMMA_ENABLE, DSPACNTR + reg_offset);
+                else
+                        PSB_WVDC32(ctx->cntr, DSPACNTR + reg_offset);
+        }
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SURFACE)) {
+		PSB_WVDC32(ctx->linoff, DSPALINOFF + reg_offset);
+		PSB_WVDC32(ctx->surf, DSPASURF + reg_offset);
+	}
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->dsppos = ctx->pos;
+		dsi_ctx->dspsize = ctx->size;
+		dsi_ctx->dspstride = ctx->stride;
+		dsi_ctx->dspcntr = ctx->cntr | ((PSB_RVDC32(DSPACNTR + reg_offset) & DISPPLANE_GAMMA_ENABLE));
+		dsi_ctx->dsplinoff = ctx->linoff;
+		dsi_ctx->dspsurf = ctx->surf;
+	}
+}
+
+void DCCBSetPipeToOvadd(u32 *ovadd, int pipe)
+{
+	switch (pipe) {
+	case 0:
+		*ovadd |= OV_PIPE_A << OV_PIPE_SELECT_POS;
+		break;
+	case 1:
+		*ovadd |= OV_PIPE_B << OV_PIPE_SELECT_POS;
+		break;
+	case 2:
+		*ovadd |= OV_PIPE_C << OV_PIPE_SELECT_POS;
+		break;
+	}
+
+	return;
+}
+
+void DCCBSetupZorder(struct drm_device *dev,
+			struct intel_dc_plane_zorder *zorder,
+			int pipe)
+{
+	struct drm_psb_private *dev_priv;
+	u32 dspcntr_reg;
+	u32 dspsurf_reg;
+	u32 sprite_reg = DSPACNTR + 0x3000;
+	u32 sprite_surf_reg = DSPASURF + 0x3000;
+
+	if (!dev || pipe < 0 || pipe > 2)
+		return;
+
+	if (pipe == 0) {
+		dspcntr_reg = DSPACNTR;
+		dspsurf_reg = DSPASURF;
+	} else if (pipe == 1) {
+		dspcntr_reg = DSPACNTR + 0x1000;
+		dspsurf_reg = DSPASURF + 0x1000;
+	} else if (pipe == 2) {
+		dspcntr_reg = DSPACNTR + 0x2000;
+		dspsurf_reg = DSPASURF + 0x2000;
+	} else
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	PSB_WVDC32((PSB_RVDC32(dspcntr_reg) & ~0x00000004), dspcntr_reg);
+	PSB_WVDC32((PSB_RVDC32(sprite_reg) & ~0x00000002), sprite_reg);
+
+	if (zorder->forceBottom[pipe])
+		PSB_WVDC32((PSB_RVDC32(dspcntr_reg) | 0x00000004), dspcntr_reg);
+
+	if (zorder->abovePrimary)
+		PSB_WVDC32((PSB_RVDC32(sprite_reg) | 0x00000002), sprite_reg);
+
+	PSB_WVDC32(PSB_RVDC32(dspsurf_reg), dspsurf_reg);
+	PSB_WVDC32(PSB_RVDC32(sprite_surf_reg), sprite_surf_reg);
+}
+
+static void _OverlayWaitFlip(
+	struct drm_device *dev, u32 ovstat_reg, int index, int pipe)
+{
+	int retry;
+	int ret = -EBUSY;
+
+	if (DCCBEnableVSyncInterrupt(dev, pipe) != 0) {
+		DRM_ERROR("%s: failed to enable vblank on pipe %d\n",
+			__func__, pipe);
+		return;
+	}
+
+	/* HDMI pipe can run as low as 24Hz */
+	retry = 600;
+	if (pipe != 1) {
+		retry = 200;  /* 60HZ for MIPI */
+		DCCBDsrForbid(dev, pipe);
+	}
+	/**
+	 * make sure overlay command buffer
+	 * was copied before updating the system
+	 * overlay command buffer.
+	 */
+	while (--retry) {
+		if (pipe != 1 && ret == -EBUSY) {
+			ret = DCCBUpdateDbiPanel(dev, pipe);
+		}
+		if (BIT31 & PSB_RVDC32(ovstat_reg))
+			break;
+		udelay(100);
+	}
+
+	DCCBDisableVSyncInterrupt(dev, pipe);
+	if (pipe != 1)
+		DCCBDsrAllow(dev, pipe);
+
+	if (!retry)
+		DRM_ERROR("OVADD %d flip timeout on pipe %d!\n", index, pipe);
+}
+
+static int _GetPipeFromOvadd(u32 ovadd)
+{
+	int ov_pipe_sel = (ovadd & OV_PIPE_SELECT) >> OV_PIPE_SELECT_POS;
+	int pipe = 0;
+	switch (ov_pipe_sel) {
+	case OV_PIPE_A:
+		pipe = 0;
+		break;
+	case OV_PIPE_B:
+		pipe = 1;
+		break;
+	case OV_PIPE_C:
+		pipe = 2;
+		break;
+	}
+
+	return pipe;
+}
+
+int DCCBOverlayDisableAndWait(struct drm_device *dev, u32 ctx,
+			int index)
+{
+	u32 ovadd_reg = OV_OVADD;
+	u32 ovstat_reg = OV_DOVASTA;
+	u32 power_islands = OSPM_DISPLAY_A;
+	int pipe;
+
+	if (index != 0 && index != 1) {
+		DRM_ERROR("Invalid overlay index %d\n", index);
+		return -EINVAL;
+	}
+
+	if (index) {
+		ovadd_reg = OVC_OVADD;
+		ovstat_reg = OVC_DOVCSTA;
+		power_islands |= OSPM_DISPLAY_C;
+	}
+
+	pipe = _GetPipeFromOvadd(ctx);
+
+	if (power_island_get(power_islands)) {
+		PSB_WVDC32(ctx, ovadd_reg);
+
+		/*wait for overlay flipped*/
+		_OverlayWaitFlip(dev, ovstat_reg, index, pipe);
+
+		power_island_put(power_islands);
+	}
+
+	return 0;
+}
+
+int DCCBOverlayEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+	u32 ovadd_reg = OV_OVADD;
+	u32 ovstat_reg = OV_DOVASTA;
+	u32 power_islands = OSPM_DISPLAY_A;
+	int pipe;
+
+	if (index != 0 && index != 1) {
+		DRM_ERROR("Invalid overlay index %d\n", index);
+		return -EINVAL;
+	}
+
+	if (index) {
+		ovadd_reg = OVC_OVADD;
+		ovstat_reg = OVC_DOVCSTA;
+		power_islands |= OSPM_DISPLAY_C;
+	}
+
+	pipe = _GetPipeFromOvadd(ctx);
+
+	if (!enabled) {
+		if (pipe == 0)
+			dsi_config = dev_priv->dsi_configs[0];
+		else if (pipe == 2)
+			dsi_config = dev_priv->dsi_configs[1];
+
+		if (dsi_config) {
+			dsi_ctx = &dsi_config->dsi_hw_context;
+			if (index == 0)
+				dsi_ctx->ovaadd = 0;
+			else if (index == 1)
+				dsi_ctx->ovcadd = 0;
+		}
+	}
+
+	if (power_island_get(power_islands)) {
+		/*make sure previous flip was done*/
+		//_OverlayWaitFlip(dev, ovstat_reg, index, pipe);
+		//_OverlayWaitVblank(dev, pipe);
+
+		PSB_WVDC32(ctx, ovadd_reg);
+
+		power_island_put(power_islands);
+	}
+
+	return 0;
+}
+
+int DCCBSpriteEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled)
+{
+	u32 power_islands = (OSPM_DISPLAY_A | OSPM_DISPLAY_C);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx = NULL;
+
+	if (index != 0) {
+		DRM_ERROR("Invalid overlay index %d\n", index);
+		return -EINVAL;
+	}
+
+	/* FIXME: need to check pipe info here. */
+	dsi_config = dev_priv->dsi_configs[0];
+
+	if (dsi_config)
+		dsi_ctx = &dsi_config->dsi_hw_context;
+
+	if (power_island_get(power_islands)) {
+		if (dsi_ctx)
+			dsi_ctx->sprite_dspcntr &= ~DISPLAY_PLANE_ENABLE;
+		PSB_WVDC32((PSB_RVDC32(DSPDCNTR) & ~DISPLAY_PLANE_ENABLE),
+				DSPDCNTR);
+		PSB_WVDC32((PSB_RVDC32(DSPDSURF)), DSPDSURF);
+		power_island_put(power_islands);
+	}
+
+	return 0;
+}
+
+int DCCBPrimaryEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx = NULL;
+	u32 sprite_reg = DSPACNTR + 0x3000;
+	u32 reg_offset;
+
+	if (index < 0 || index > 2) {
+		DRM_ERROR("Invalid primary index %d\n", index);
+		return -EINVAL;
+	}
+
+	if (index == 0) {
+		dsi_config = dev_priv->dsi_configs[0];
+		reg_offset = 0;
+	} else if (index == 1) {
+		reg_offset = 0x1000;
+	} else if (index == 2) {
+		dsi_config = dev_priv->dsi_configs[1];
+		reg_offset = 0x2000;
+	}
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->dsppos = 0;
+		dsi_ctx->dspsize = (63 << 16) | 63;
+		dsi_ctx->dspstride = (64 << 2);
+		dsi_ctx->dspcntr = DISPPLANE_32BPP_NO_ALPHA;
+		dsi_ctx->dspcntr |= (BIT31 & PSB_RVDC32(DSPACNTR + reg_offset));
+		dsi_ctx->dsplinoff = 0;
+		dsi_ctx->dspsurf = 0;
+	}
+
+	PSB_WVDC32(0, DSPAPOS + reg_offset);
+	PSB_WVDC32((63 << 16) | 63, DSPASIZE + reg_offset);
+	PSB_WVDC32((64 << 2), DSPASTRIDE + reg_offset);
+	PSB_WVDC32(0x18000000 | (BIT31 & PSB_RVDC32(DSPACNTR + reg_offset)),
+		DSPACNTR + reg_offset);
+	if (enabled == 0) {
+		PSB_WVDC32((PSB_RVDC32(DSPACNTR + reg_offset) | 0x00000004),
+			DSPACNTR + reg_offset);
+		PSB_WVDC32((PSB_RVDC32(sprite_reg) | 0x00000002), sprite_reg);
+	}
+
+	PSB_WVDC32(0, DSPALINOFF + reg_offset);
+	PSB_WVDC32(0, DSPATILEOFF + reg_offset);
+	PSB_WVDC32(0, DSPASURF + reg_offset);
+
+	return 0;
+}
+
+int DCCBUpdateDbiPanel(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if ((pipe != 0) && (pipe != 2))
+		return -EINVAL;
+
+	if (dev_priv && dev_priv->dsi_configs)
+		dsi_config = (pipe == 0) ?
+			dev_priv->dsi_configs[0] : dev_priv->dsi_configs[1];
+
+	return mdfld_dsi_dsr_update_panel_fb(dsi_config);
+}
+
+void DCCBWaitForDbiFifoEmpty(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config;
+	int retry;
+
+	if ((pipe != 0) && (pipe != 2))
+		return;
+
+	dsi_config = (pipe == 0) ? dev_priv->dsi_configs[0] :
+				   dev_priv->dsi_configs[1];
+
+	if (!dsi_config || dsi_config->type != MDFLD_DSI_ENCODER_DBI)
+		return;
+
+	/* shall we use FLIP_DONE on ANN? */
+	if (IS_TNG_B0(dev)) {
+		retry = wait_event_interruptible_timeout(dev_priv->eof_wait,
+				(REG_READ(MIPIA_GEN_FIFO_STAT_REG) & BIT27),
+				msecs_to_jiffies(1000));
+	} else {
+		retry = 1000;
+		while (retry-- && !(REG_READ(MIPIA_GEN_FIFO_STAT_REG)))
+			udelay(500);
+	}
+
+	if (retry == 0)
+		DRM_ERROR("DBI FIFO not empty\n");
+}
+
+void DCCBUnblankDisplay(struct drm_device *dev)
+{
+	int res;
+	struct psb_framebuffer *psb_fb = NULL;
+
+	DCCBGetFramebuffer(dev, &psb_fb);
+
+	if (!psb_fb)
+		return;
+
+	console_lock();
+	res = fb_blank(psb_fb->fbdev, 0);
+	console_unlock();
+	if (res != 0) {
+		DRM_ERROR("fb_blank failed (%d)", res);
+	}
+}
+
+void DCCBFlipDSRCb(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	if (!dev_priv->um_start) {
+		dev_priv->um_start = true;
+
+		if (dev_priv->b_dsr_enable_config)
+			dev_priv->b_dsr_enable = true;
+	}
+
+	if (dev_priv->b_dsr_enable && dev_priv->b_is_in_idle) {
+		dev_priv->exit_idle(dev, MDFLD_DSR_2D_3D, NULL, true);
+	}
+}
+
+u32 DCCBGetPipeCount(void)
+{
+	/* FIXME */
+	return 3;
+}
+
+bool DCCBIsSuspended(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	bool ret = false;
+
+	if (!dev_priv)
+		return false;
+
+	mutex_lock(&dev->mode_config.mutex);
+	ret = dev_priv->early_suspended;
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+int DCCBIsPipeActive(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	u32 pipeconf_reg;
+	int active = 0;
+
+	if (pipe == 0)
+		pipeconf_reg = PIPEACONF;
+	else if (pipe == 1)
+		pipeconf_reg = PIPEBCONF;
+	else {
+		DRM_ERROR("%s: unsupported pipe %d\n", __func__, pipe);
+		return 0;
+	}
+
+	/* FIXME: need to remove the suspended state checking. */
+	if (dev_priv->early_suspended)
+		return 0;
+
+	/* get display a for register reading */
+	if (power_island_get(OSPM_DISPLAY_A)) {
+		if ((pipe != 1) && dev_priv->dsi_configs) {
+			dsi_config = (pipe == 0) ? dev_priv->dsi_configs[0] :
+				dev_priv->dsi_configs[1];
+		}
+
+		mdfld_dsi_dsr_forbid(dsi_config);
+
+		active = (PSB_RVDC32(pipeconf_reg) & BIT31) ? 1 : 0 ;
+
+		mdfld_dsi_dsr_allow(dsi_config);
+
+		power_island_put(OSPM_DISPLAY_A);
+	}
+
+	return active;
+}
+
+void DCCBDsrForbid(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if ((pipe != 0) && (pipe != 2))
+		return;
+
+	if (dev_priv && dev_priv->dsi_configs)
+		dsi_config = (pipe == 0) ?
+			dev_priv->dsi_configs[0] : dev_priv->dsi_configs[1];
+
+	mdfld_dsi_dsr_forbid(dsi_config);
+}
+
+void DCCBDsrAllow(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if ((pipe != 0) && (pipe != 2))
+		return;
+
+	if (dev_priv && dev_priv->dsi_configs)
+		dsi_config = (pipe == 0) ?
+			dev_priv->dsi_configs[0] : dev_priv->dsi_configs[1];
+
+	mdfld_dsi_dsr_allow(dsi_config);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/cmi_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/cmi_cmd.h
new file mode 100644
index 0000000..7cb7f1c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/cmi_cmd.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu
+*/
+
+
+#ifndef CMI_CMD_H
+#define CMI_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "mdfld_output.h"
+
+void cmi_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/cmi_vid.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/cmi_vid.h
new file mode 100644
index 0000000..aaea498
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/cmi_vid.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu
+*/
+
+
+#ifndef CMI_VID_H
+#define CMI_VID_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_pmic.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void cmi_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/hdmi.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/hdmi.h
new file mode 100644
index 0000000..ada582c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/hdmi.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef HDMI_H
+#define HDMI_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "mdfld_output.h"
+
+void hdmi_init(struct drm_device *dev);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi25x16_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi25x16_cmd.h
new file mode 100644
index 0000000..801ac4b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi25x16_cmd.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+*/
+
+
+#ifndef JDI25x16_CMD_H
+#define JDI25x16_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void jdi25x16_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi25x16_vid.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi25x16_vid.h
new file mode 100644
index 0000000..c99c758
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi25x16_vid.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+*/
+
+
+#ifndef JDI25x16_VID_H
+#define JDI25x16_VID_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void jdi25x16_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi_cmd.h
new file mode 100644
index 0000000..d7fffa5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi_cmd.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu
+*/
+
+
+#ifndef JDI_CMD_H
+#define JDI_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void jdi_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi_vid.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi_vid.h
new file mode 100644
index 0000000..106e102
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/jdi_vid.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Austin Hu <austin.hu@intel.com>
+*/
+
+
+#ifndef JDI_VID_H
+#define JDI_VID_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+/* FIXME: To get the JDI panel width/height inches. */
+
+void jdi_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/sdc16x25_8_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/sdc16x25_8_cmd.h
new file mode 100644
index 0000000..a9cff14
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/sdc16x25_8_cmd.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c)  2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eckhart Koeppen <eckhart.koeppen@intel.com>
+*/
+
+
+#ifndef SDC16x25_8_CMD_H
+#define SDC16x25_8_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void sdc16x25_8_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/sdc25x16_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/sdc25x16_cmd.h
new file mode 100644
index 0000000..ac4d4ae
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/sdc25x16_cmd.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c)  2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+*/
+
+
+#ifndef SDC25x16_CMD_H
+#define SDC25x16_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void sdc25x16_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp10x19_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp10x19_cmd.h
new file mode 100644
index 0000000..a48c8c6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp10x19_cmd.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+*/
+
+
+#ifndef SHARP10x19_CMD_H
+#define SHARP10x19_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void sharp10x19_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp25x16_cmd.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp25x16_cmd.h
new file mode 100644
index 0000000..bf43cc5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp25x16_cmd.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+*/
+
+
+#ifndef SHARP25x16_CMD_H
+#define SHARP25x16_CMD_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void sharp25x16_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp25x16_vid.h b/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp25x16_vid.h
new file mode 100644
index 0000000..06063cc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/displays/sharp25x16_vid.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+*/
+
+
+#ifndef SHARP25x16_VID_H
+#define SHARP25x16_VID_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+void sharp25x16_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/dispmgrnl.c b/drivers/external_drivers/intel_media/display/tng/drv/dispmgrnl.c
new file mode 100644
index 0000000..a0fbd26
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/dispmgrnl.c
@@ -0,0 +1,274 @@
+/******************************************************************************
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ * Portions (c), Imagination Technology, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and Use.  Redistribution and use in binary form, without
+ * modification, of the software code provided with this license ("Software"),
+ * are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions must reproduce the above copyright notice and this
+ *     license in the documentation and/or other materials provided with the
+ *     Software.
+ *  2. Neither the name of Intel Corporation nor the name of Imagination
+ *     Technology, Ltd may be used to endorse or promote products derived from
+ *     the Software without specific prior written permission.
+ *  3. The Software can only be used in connection with the Intel hardware
+ *     designed to use the Software as outlined in the documentation. No other
+ *     use is authorized.
+ *  4. No reverse engineering, decompilation, or disassembly of the Software
+ *     is permitted.
+ *  5. The Software may not be distributed under terms different than this
+ *     license.
+ *
+ * Limited Patent License.  Intel Corporation grants a world-wide, royalty-free
+ * , non-exclusive license under patents it now or hereafter owns or controls
+ * to make, have made, use, import, offer to sell and sell ("Utilize") the
+ * Software, but solely to the extent that any such patent is necessary to
+ * Utilize the Software alone.  The patent license shall not apply to any
+ * combinations which include the Software.  No hardware per se is licensed
+ * hereunder.
+ *
+ * Ownership of Software and Copyrights. Title to all copies of the Software
+ * remains with the copyright holders. The Software is copyrighted and
+ * protected by the laws of the United States and other countries, and
+ * international treaty provisions.
+ *
+ * DISCLAIMER.  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <net/genetlink.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "dispmgrnl.h"
+#include "psb_dpst_func.h"
+#include "psb_powermgmt.h"
+
+#define NETLINK_DISPMGR		20
+
+static unsigned int g_pid = 0;
+
+static struct drm_device *g_dev = NULL;
+struct sock *nl_sk = NULL;
+
+static void execute_recv_command(struct dispmgr_command_hdr *cmd_hdr)
+{
+	switch (cmd_hdr->module) {
+	case DISPMGR_MOD_NETLINK:
+		{
+			switch (cmd_hdr->cmd) {
+			case DISPMGR_TEST:
+				{
+					struct dispmgr_command_hdr send_cmd_hdr;
+					unsigned int data = 0xdeadbeef;
+
+					if (cmd_hdr->data_size) {
+						unsigned int value =
+						    *((unsigned int *)
+						      cmd_hdr->data);
+						printk
+						    ("kdispmgr: received DISPMGR_TEST cmd data = 0x%x.\n",
+						     value);
+					} else
+						printk
+						    ("kdispmgr: received DISPMGR_TEST cmd NO data.\n");
+
+					send_cmd_hdr.data_size = sizeof(data);
+					send_cmd_hdr.data = (uintptr_t) &data;
+					send_cmd_hdr.module =
+					    DISPMGR_MOD_NETLINK;
+					send_cmd_hdr.cmd = DISPMGR_TEST;
+					dispmgr_nl_send_msg(&send_cmd_hdr);
+				}
+				break;
+			case DISPMGR_TEST_TEXT:
+				{
+					struct dispmgr_command_hdr send_cmd_hdr;
+					char *data = "can you hear me?";
+
+					if (cmd_hdr->data_size) {
+						printk
+						    ("kdispmgr: received DISPMGR_TEST_TEXT cmd text = 0x%s.\n",
+						     (char *)cmd_hdr->data);
+					} else
+						printk
+						    ("kdispmgr: received DISPMGR_TEST_TEXT cmd NO text.\n");
+
+					send_cmd_hdr.module =
+					    DISPMGR_MOD_NETLINK;
+					send_cmd_hdr.cmd = DISPMGR_TEST_TEXT;
+					send_cmd_hdr.data_size =
+					    strlen(data) + 1;
+					send_cmd_hdr.data =
+						(uintptr_t) (void *)data;
+					dispmgr_nl_send_msg(&send_cmd_hdr);
+				}
+				break;
+			default:
+				{
+					printk
+					    ("kdispmgr: received unknown command = %d.\n",
+					     cmd_hdr->cmd);
+				};
+			};	/* switch */
+		}
+		break;
+	case DISPMGR_MOD_DPST:
+		{
+			dpst_execute_recv_command(cmd_hdr);
+		}
+		break;
+	default:
+		{
+			printk("kdispmgr: received unknown module = %d.\n",
+			       cmd_hdr->module);
+		};
+	}			/* switch */
+}
+
+/* Send Message to user mode */
+void dispmgr_nl_send_msg(struct dispmgr_command_hdr *cmd_hdr)
+{
+	struct nlmsghdr *nlh;
+	struct sk_buff *skb_out;
+	unsigned int msg_size = 0;
+	unsigned int data_size = 0;
+	unsigned int hdr_size = 0;
+	int ret = 0;
+
+	/* if no user mode process active */
+	if (!g_pid)
+		return;
+
+	hdr_size = sizeof(struct dispmgr_command_hdr);
+	data_size = hdr_size + cmd_hdr->data_size;
+	msg_size = data_size + sizeof(struct nlmsghdr);
+
+	skb_out = nlmsg_new(msg_size, 0);
+	if (!skb_out) {
+		printk("kdispmgr: Failed to allocated skb\n");
+		return;
+	}
+
+	nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
+	NETLINK_CB(skb_out).dst_group = 0;	/* not in mcast group */
+
+	memcpy(nlmsg_data(nlh), cmd_hdr, hdr_size);
+	if (cmd_hdr->data_size) {
+		memcpy(nlmsg_data(nlh) + hdr_size, (void *) cmd_hdr->data,
+		       cmd_hdr->data_size);
+	}
+	ret = netlink_unicast(nl_sk, skb_out, g_pid, MSG_DONTWAIT);
+}
+
+/* Receive Message from Kernel */
+static void nl_recv_msg(struct sk_buff *skb)
+{
+	struct nlmsghdr *nlh = NULL;
+	struct dispmgr_command_hdr cmd_hdr;
+	unsigned int hdr_size = sizeof(struct dispmgr_command_hdr);
+
+	if (skb == NULL) {
+		printk("kdispmgr: received null command.\n");
+		return;
+	}
+
+	nlh = (struct nlmsghdr *)skb->data;
+	g_pid = nlh->nlmsg_pid;
+
+	memcpy((void *)(&cmd_hdr), NLMSG_DATA(nlh), hdr_size);
+	if (cmd_hdr.data_size) {
+		cmd_hdr.data = (uintptr_t) (NLMSG_DATA(nlh) + hdr_size);
+	}
+
+	execute_recv_command(&cmd_hdr);
+}
+
+#define KEEP_UNUSED_CODE 0
+
+#if KEEP_UNUSED_CODE
+static void dispmgr_nl_exit(void)
+{
+	printk(KERN_INFO "kdispmgr: exiting hello module\n");
+	netlink_kernel_release(nl_sk);
+	g_pid = 0;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+static int dispmgr_nl_init(void)
+{
+	int ret = 0;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+	nl_sk = netlink_kernel_create(&init_net,
+				      NETLINK_DISPMGR,
+				      0, nl_recv_msg, NULL, THIS_MODULE);
+#else
+	struct netlink_kernel_cfg cfg = {
+		.groups = 0,
+		.input = nl_recv_msg,
+		.cb_mutex = NULL,
+		.flags = (uintptr_t) (THIS_MODULE),
+	};
+
+	nl_sk = netlink_kernel_create(&init_net, NETLINK_DISPMGR, &cfg);
+#endif
+
+	if (!nl_sk) {
+		printk(KERN_ALERT "kdispmgr: error creating netlink socket.\n");
+		ret = -10;
+	} else {
+		printk(KERN_ALERT
+		       "kdispmgr: netlink socket created successfully.\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+void dispmgr_start(struct drm_device *dev)
+{
+	g_dev = dev;
+	printk("kdispmgr: display manager start.\n");
+	dispmgr_nl_init();
+	return;
+}
+
+/* this function is only called by dpms on or late resume function */
+void dpstmgr_reg_restore_locked(struct drm_device *dev, struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	struct drm_psb_private *dev_priv = NULL;
+	struct mdfld_dsi_hw_registers *regs = NULL;
+
+	if (!dsi_config || !dsi_config->dev)
+		return;
+
+	ctx = &dsi_config->dsi_hw_context;
+	regs = &dsi_config->regs;
+	dev_priv = dsi_config->dev->dev_private;
+
+	if (power_island_get(OSPM_DISPLAY_A))
+	{
+
+		PSB_WVDC32(ctx->histogram_intr_ctrl, regs->histogram_intr_ctrl_reg);
+		PSB_WVDC32(ctx->histogram_logic_ctrl, regs->histogram_logic_ctrl_reg);
+		PSB_WVDC32(ctx->aimg_enhance_bin, regs->aimg_enhance_bin_reg);
+		PSB_WVDC32(ctx->lvds_port_ctrl, regs->lvds_port_ctrl_reg);
+
+		power_island_put(OSPM_DISPLAY_A);
+	}
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/dispmgrnl.h b/drivers/external_drivers/intel_media/display/tng/drv/dispmgrnl.h
new file mode 100644
index 0000000..39ea59e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/dispmgrnl.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ * Portions (c), Imagination Technology, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and Use.  Redistribution and use in binary form, without
+ * modification, of the software code provided with this license ("Software"),
+ * are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions must reproduce the above copyright notice and this
+ *     license in the documentation and/or other materials provided with the
+ *     Software.
+ *  2. Neither the name of Intel Corporation nor the name of Imagination
+ *     Technology, Ltd may be used to endorse or promote products derived from
+ *     the Software without specific prior written permission.
+ *  3. The Software can only be used in connection with the Intel hardware
+ *     designed to use the Software as outlined in the documentation. No other
+ *     use is authorized.
+ *  4. No reverse engineering, decompilation, or disassembly of the Software
+ *     is permitted.
+ *  5. The Software may not be distributed under terms different than this
+ *     license.
+ *
+ * Limited Patent License.  Intel Corporation grants a world-wide, royalty-free
+ * , non-exclusive license under patents it now or hereafter owns or controls
+ * to make, have made, use, import, offer to sell and sell ("Utilize") the
+ * Software, but solely to the extent that any such patent is necessary to
+ * Utilize the Software alone.  The patent license shall not apply to any
+ * combinations which include the Software.  No hardware per se is licensed
+ * hereunder.
+ *
+ * Ownership of Software and Copyrights. Title to all copies of the Software
+ * remains with the copyright holders. The Software is copyrighted and
+ * protected by the laws of the United States and other countries, and
+ * international treaty provisions.
+ *
+ * DISCLAIMER.  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#ifndef _DISPLAY_MANAGER_NETLINK_H_
+#define _DISPLAY_MANAGER_NETLINK_H_
+
+#include <drm/drmP.h>
+#include "mdfld_dsi_output.h"
+
+#define SUCCESS		1
+#define FAILED		0
+
+enum dispmgr_module_enum {
+	DISPMGR_MOD_UNKNOWN,
+	DISPMGR_MOD_NETLINK,
+	DISPMGR_MOD_DPST,
+};
+
+enum dispmgr_event_enum {
+	DISPMGR_UNKNOWN,
+	DISPMGR_TEST,
+	DISPMGR_TEST_TEXT,
+};
+
+enum dispmgr_dpst_event_enum {
+	DISPMGR_DPST_UNKNOWN,
+	DISPMGR_DPST_INIT_COMM,
+	DISPMGR_DPST_UPDATE_GUARD,
+	DISPMGR_DPST_HIST_ENABLE,
+	DISPMGR_DPST_HIST_DATA,
+	DISPMGR_DPST_BL_CMD,
+	DISPMGR_DPST_GAMMA_SET_CMD,
+	DISPMGR_DPST_DIET_ENABLE,
+	DISPMGR_DPST_DIET_DISABLE,
+	DISPMGR_DPST_GET_MODE,
+};
+
+/* Display Manager Command Header */
+struct dispmgr_command_hdr {
+	unsigned int module;	/* module to receive the command */
+	unsigned int cmd;	/* command from Userspace */
+	unsigned int data_size;	/* data size of command_data in number of bytes */
+	uint64_t  data  __attribute__ ((__packed__));             /* command data */
+};
+
+void dispmgr_start(struct drm_device *dev);
+void dispmgr_nl_send_msg(struct dispmgr_command_hdr *cmd_hdr);
+void dpstmgr_reg_restore_locked(struct drm_device *dev, struct mdfld_dsi_config *dsi_config);
+
+#endif				/* _DISPLAY_MANAGER_NETLINK_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/jdi25x16_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/jdi25x16_cmd.c
new file mode 100644
index 0000000..ed89635
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/jdi25x16_cmd.c
@@ -0,0 +1,610 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_esd.h"
+#include <asm/intel_scu_pmic.h>
+
+#include "displays/jdi25x16_cmd.h"
+static int mipi_reset_gpio;
+static int bias_en_gpio;
+
+static u8 jdi25x16_pixel_format[] = {0x3a, 0x77};
+static u8 jdi25x16_clumn_addr[] = {
+            0x2a, 0x00, 0x00, 0x04, 0xff};
+static u8 jdi25x16_page_addr[] = {
+            0x2b, 0x00, 0x00, 0x06, 0x3f};
+static u8 jdi25x16_set_tear_on[] = {0x35, 0x00};
+static u8 jdi25x16_tear_scanline[] = {
+            0x44, 0x00, 0x00};
+static u8 jdi25x16_set_brightness[] = {0x51, 0x0};
+static u8 jdi25x16_turn_on_backlight[] = {0x53, 0x24};
+static u8 jdi25x16_set_vid_mode[] = {0xb3, 0x14};
+static u8 jdi25x16_set_cmd_mode[] = {0xb3, 0x0c};
+static u8 jdi25x16_set_normal_mode[] = {0xb3, 0x1c};
+
+static
+int jdi25x16_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+	int i = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+	for(i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		err = mdfld_dsi_send_mcs_short_hs(sender, soft_reset, 0, 0,
+		        MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+		    DRM_ERROR("%s: %d: Panel software reset\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+		mdelay(25);
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_pixel_format[0],
+				jdi25x16_pixel_format[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Set pixel format\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+
+		err = mdfld_dsi_send_mcs_long_hs(sender,
+		        jdi25x16_clumn_addr,
+				5, MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Clumn Address\n",
+			__func__, __LINE__);
+			goto ic_init_err;
+		}
+
+		err = mdfld_dsi_send_mcs_long_hs(sender,
+		        jdi25x16_page_addr,
+				5, MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Page Address\n",
+			__func__, __LINE__);
+			goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_set_tear_on[0],
+				jdi25x16_set_tear_on[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Set tear on\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_long_hs(sender,
+		        jdi25x16_tear_scanline,
+				3, MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set tear scanline\n",
+			__func__, __LINE__);
+			goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_set_brightness[0],
+				jdi25x16_set_brightness[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Set brightness\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_turn_on_backlight[0],
+				jdi25x16_turn_on_backlight[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Turn on backlight\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+ic_init_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+int jdi25x16_cmd_set_mode(struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	int i = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		mdelay(20);
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 0, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto set_mode_err;
+		}
+
+		err = mdfld_dsi_send_gen_long_hs(sender, jdi25x16_set_vid_mode,
+				2,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Mode\n", __func__, __LINE__);
+			goto set_mode_err;
+		}
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 3, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto set_mode_err;
+		}
+		/* Set Display on 0x29 */
+		err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+			goto set_mode_err;
+		}
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		mdelay(20);
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 0, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto set_mode_err;
+		}
+		err = mdfld_dsi_send_gen_long_hs(sender, jdi25x16_set_cmd_mode,
+				2,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Mode\n", __func__, __LINE__);
+			goto set_mode_err;
+		}
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 3, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto set_mode_err;
+		}
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+set_mode_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+static
+void jdi25x16_cmd_controller_init(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+				&dsi_config->dsi_hw_context;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x14;
+	hw_ctx->high_low_switch_count = 0x2b;
+	hw_ctx->clk_lane_switch_time_cnt = 0x2b0014;
+	hw_ctx->lp_byteclk = 0x6;
+	hw_ctx->dphy_param = 0x2a18681f;
+	hw_ctx->eot_disable = 0x0;
+	hw_ctx->init_count = 0xf0;
+	hw_ctx->dbi_bw_ctrl = 1024;
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+	hw_ctx->mipi = SEL_FLOPPED_HSTX | PASS_FROM_SPHY_TO_AFE |
+		DUAL_LINK_ENABLE | DUAL_LINK_CAPABLE;
+	hw_ctx->video_mode_format = 0xf;
+}
+static
+int jdi25x16_cmd_panel_connection_detect(
+	struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n",
+		__func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static
+int jdi25x16_cmd_power_on(
+	struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+
+		/* Sleep Out */
+		err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+			goto power_on_err;
+		}
+		msleep(120);
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+power_on_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n",
+		__func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int jdi25x16_cmd_power_off(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		mdelay(20);
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 0, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto power_off_err;
+		}
+		err = mdfld_dsi_send_gen_long_hs(sender, jdi25x16_set_normal_mode,
+				2,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Mode\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 3, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto power_off_err;
+		}
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		/* Set Display off */
+		err = mdfld_dsi_send_mcs_short_hs(sender, set_display_off, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+		msleep(20);
+		/* Sleep In */
+		err = mdfld_dsi_send_mcs_short_hs(sender, enter_sleep_mode, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+		msleep(80);
+	}
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	msleep(10);
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		err = mdfld_dsi_send_gen_short_hs(sender,
+				access_protect, 0, 2,
+				MDFLD_DSI_SEND_PACKAGE);
+			if (err) {
+				DRM_ERROR("%s: %d: Set MCAP\n",
+				__func__, __LINE__);
+				goto power_off_err;
+			}
+		err = mdfld_dsi_send_gen_short_hs(sender,
+				low_power_mode, 1, 2,
+				MDFLD_DSI_SEND_PACKAGE);
+			if (err) {
+				DRM_ERROR("%s: %d: Set MCAP\n",
+				__func__, __LINE__);
+				goto power_off_err;
+			}
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+power_off_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+static
+int jdi25x16_cmd_set_brightness(
+		struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+	int i;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		/*
+		*	Set maximum brightness here. AOB needs to be modified
+		*	to get real brightness setting
+		*/
+		mdfld_dsi_send_mcs_short_hs(sender,
+				write_display_brightness, 0x90, 1,
+				MDFLD_DSI_SEND_PACKAGE);
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+}
+
+static
+int jdi25x16_cmd_panel_reset(
+		struct mdfld_dsi_config *dsi_config)
+{
+	int ret;
+
+	msleep(10);
+	__vpro2_power_ctrl(true);
+	usleep_range(2000, 2500);
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+	}
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return 0;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return 0;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+
+	return 0;
+
+}
+
+static
+int jdi25x16_cmd_exit_deep_standby(
+		struct mdfld_dsi_config *dsi_config)
+{
+	static bool skip_once = true;
+	if (skip_once) {
+		skip_once = false;
+		return 0;
+	}
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	mdelay(10);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	mdelay(20);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	mdelay(15);
+
+	return 0;
+}
+
+static
+struct drm_display_mode *jdi25x16_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 2560;
+
+	mode->hsync_start = mode->hdisplay + 160;
+	mode->hsync_end = mode->hsync_start + 24;
+	mode->htotal = mode->hsync_end + 56;
+
+	mode->vdisplay = 1600;
+	mode->vsync_start = mode->vdisplay + 12;
+	mode->vsync_end = mode->vsync_start + 4;
+	mode->vtotal = mode->vsync_end + 4;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal *
+		mode->htotal / 1000;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static
+void jdi25x16_cmd_get_panel_info(int pipe,
+		struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = 192;
+		pi->height_mm = 120;
+	}
+}
+
+void jdi25x16_cmd_init(struct drm_device *dev,
+		struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+	PSB_DEBUG_ENTRY("\n");
+	p_funcs->reset = jdi25x16_cmd_panel_reset;
+	p_funcs->power_on = jdi25x16_cmd_power_on;
+	p_funcs->power_off = jdi25x16_cmd_power_off;
+	p_funcs->drv_ic_init = jdi25x16_cmd_drv_ic_init;
+	p_funcs->get_config_mode = jdi25x16_cmd_get_config_mode;
+	p_funcs->get_panel_info = jdi25x16_cmd_get_panel_info;
+	p_funcs->dsi_controller_init =
+			jdi25x16_cmd_controller_init;
+	p_funcs->detect =
+			jdi25x16_cmd_panel_connection_detect;
+	p_funcs->set_brightness =
+			jdi25x16_cmd_set_brightness;
+	p_funcs->exit_deep_standby =
+				jdi25x16_cmd_exit_deep_standby;
+	p_funcs->drv_set_panel_mode = jdi25x16_cmd_set_mode;
+
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/jdi25x16_vid.c b/drivers/external_drivers/intel_media/display/tng/drv/jdi25x16_vid.c
new file mode 100644
index 0000000..6ff47b5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/jdi25x16_vid.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+ */
+
+#include <asm/intel_scu_pmic.h>
+
+#include "displays/jdi25x16_vid.h"
+
+static int mipi_reset_gpio;
+static int bias_en_gpio;
+
+static u8 jdi25x16_pixel_format[] = {0x3a, 0x77};
+static u8 jdi25x16_clumn_addr[] = {
+            0x2a, 0x00, 0x00, 0x04, 0xff};
+static u8 jdi25x16_page_addr[] = {
+            0x2b, 0x00, 0x00, 0x06, 0x3f};
+static u8 jdi25x16_set_tear_on[] = {0x35, 0x00};
+static u8 jdi25x16_tear_scanline[] = {
+            0x44, 0x00, 0x00};
+static u8 jdi25x16_set_brightness[] = {0x51, 0xFF};
+static u8 jdi25x16_turn_on_backlight[] = {0x53, 0x24};
+static u8 jdi25x16_set_mode[] = {0xb3, 0x14};
+
+int mdfld_dsi_jdi25x16_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+	int i = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+	for(i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		err = mdfld_dsi_send_mcs_short_hs(sender, soft_reset, 0, 0,
+		        MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+		    DRM_ERROR("%s: %d: Panel software reset\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+		mdelay(20);
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_pixel_format[0],
+				jdi25x16_pixel_format[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Set pixel format\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+
+		err = mdfld_dsi_send_mcs_long_hs(sender,
+		        jdi25x16_clumn_addr,
+				5, MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Clumn Address\n",
+			__func__, __LINE__);
+			goto ic_init_err;
+		}
+
+		err = mdfld_dsi_send_mcs_long_hs(sender,
+		        jdi25x16_page_addr,
+				5, MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Page Address\n",
+			__func__, __LINE__);
+			goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_set_tear_on[0],
+				jdi25x16_set_tear_on[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Set tear on\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_long_hs(sender,
+		        jdi25x16_tear_scanline,
+				3, MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set tear scanline\n",
+			__func__, __LINE__);
+			goto ic_init_err;
+		}
+		err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_set_brightness[0],
+				jdi25x16_set_brightness[1], 1, 0);
+		if (err) {
+		    DRM_ERROR("%s: %d: Set brightness\n", __func__, __LINE__);
+		    goto ic_init_err;
+		}
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+ic_init_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+int mdfld_dsi_jdi25x16_set_mode(struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	int i = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		mdelay(20);
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 0, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto set_mode_err;
+		}
+
+		err = mdfld_dsi_send_gen_long_hs(sender, jdi25x16_set_mode,
+				2,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Mode\n", __func__, __LINE__);
+			goto set_mode_err;
+		}
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 3, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set MCAP\n",
+			__func__, __LINE__);
+			goto set_mode_err;
+		}
+		/* Set Display on 0x29 */
+		err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+			goto set_mode_err;
+		}
+	}
+
+        msleep(20);
+	err = mdfld_dsi_send_mcs_short_hs(sender, jdi25x16_turn_on_backlight[0],
+			jdi25x16_turn_on_backlight[1], 1, 0);
+	if (err) {
+		DRM_ERROR("%s: %d: Turn on backlight\n", __func__, __LINE__);
+		goto set_mode_err;
+	}
+
+	sender->work_for_slave_panel = false;
+	return 0;
+
+set_mode_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+static
+void mdfld_dsi_jdi25x16_dsi_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+		&dsi_config->dsi_hw_context;
+	/* Virtual channel number */
+	int mipi_vc = 0;
+	int mipi_pixel_format = 0x4;
+	/* BURST_MODE */
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->pll_bypass_mode = 0;
+	/* This is for 400 mhz.  Set it to 0 for 800mhz */
+	hw_ctx->cck_div = 1;
+
+	hw_ctx->mipi_control = 0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x3f;
+	hw_ctx->high_low_switch_count = 0x40;
+	hw_ctx->clk_lane_switch_time_cnt =  0x16002d;
+	hw_ctx->lp_byteclk = 0x5;
+	hw_ctx->dphy_param = 0x3c1fc51f;
+	hw_ctx->eot_disable = 0x2;
+	hw_ctx->init_count = 0xfa0;
+	hw_ctx->dbi_bw_ctrl = 0x820;
+
+	/*setup video mode format*/
+	hw_ctx->video_mode_format = 0xf;
+
+	/*set up func_prg*/
+	hw_ctx->dsi_func_prg = ((mipi_pixel_format << 7) | (mipi_vc << 3) |
+			dsi_config->lane_count);
+
+	/*setup mipi port configuration*/
+	hw_ctx->mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE |
+		dsi_config->lane_config |
+		DUAL_LINK_ENABLE | DUAL_LINK_CAPABLE;
+
+}
+
+static int mdfld_dsi_jdi25x16_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static int mdfld_dsi_jdi25x16_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		/* Sleep Out */
+		err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+			goto power_on_err;
+		}
+		msleep(100);
+		/* Send TURN_ON packet */
+		err = mdfld_dsi_send_dpi_spk_pkg_hs(sender, MDFLD_DSI_DPI_SPK_TURN_ON);
+		if (err) {
+			DRM_ERROR("Failed to send turn on packet\n");
+			goto power_on_err;
+		}
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+power_on_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n", __func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int mdfld_dsi_jdi25x16_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 2; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		/*send SHUT_DOWN packet */
+		err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+				MDFLD_DSI_DPI_SPK_SHUT_DOWN);
+		if (err) {
+			DRM_ERROR("Failed to send turn off packet\n");
+			goto power_off_err;
+		}
+		/* Set Display off */
+		err = mdfld_dsi_send_mcs_short_hs(sender, set_display_off, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+		msleep(20);
+		/* Sleep In */
+		err = mdfld_dsi_send_mcs_short_hs(sender, enter_sleep_mode, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+	}
+	sender->work_for_slave_panel = false;
+	msleep(80);
+	return 0;
+
+power_off_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+static int mdfld_dsi_jdi25x16_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	int ret;
+	__vpro2_power_ctrl(true);
+	usleep_range(2000, 2500);
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+	}
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return 0;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return 0;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(2000, 2500);
+	return 0;
+}
+
+static struct drm_display_mode *jdi25x16_vid_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 2560;
+
+	mode->hsync_start = mode->hdisplay + 8;
+	mode->hsync_end = mode->hsync_start + 20;
+	mode->htotal = mode->hsync_end + 32;
+
+	mode->vdisplay = 1600;
+	mode->vsync_start = mode->vdisplay + 12;
+	mode->vsync_end = mode->vsync_start + 4;
+	mode->vtotal = mode->vsync_end + 4;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal *
+		mode->htotal / 1000;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static int mdfld_dsi_jdi25x16_set_brightness(struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	return 0;
+}
+
+static void jdi25x16_vid_get_panel_info(int pipe, struct panel_info *pi)
+{
+	if (!pi)
+		return;
+
+	if (pipe == 0) {
+		pi->width_mm = 192;
+		pi->height_mm = 120;
+	}
+
+	return;
+}
+
+void jdi25x16_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs->get_config_mode = jdi25x16_vid_get_config_mode;
+	p_funcs->get_panel_info = jdi25x16_vid_get_panel_info;
+	p_funcs->reset = mdfld_dsi_jdi25x16_panel_reset;
+	p_funcs->drv_ic_init = mdfld_dsi_jdi25x16_ic_init;
+	p_funcs->dsi_controller_init = mdfld_dsi_jdi25x16_dsi_controller_init;
+	p_funcs->detect = mdfld_dsi_jdi25x16_detect;
+	p_funcs->power_on = mdfld_dsi_jdi25x16_power_on;
+	p_funcs->power_off = mdfld_dsi_jdi25x16_power_off;
+	p_funcs->set_brightness = mdfld_dsi_jdi25x16_set_brightness;
+	p_funcs->drv_set_panel_mode = mdfld_dsi_jdi25x16_set_mode;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/jdi_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/jdi_cmd.c
new file mode 100644
index 0000000..17af8cb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/jdi_cmd.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_esd.h"
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "displays/jdi_cmd.h"
+
+/* The register to control secure I2C FLIS pin */
+#define SECURE_I2C_FLIS_REG	0xFF0C1D30
+
+static int mipi_reset_gpio;
+static int bias_en_gpio;
+
+static u8 jdi_mcs_clumn_addr[] = {
+			0x2a, 0x00, 0x00, 0x02, 0xcf};
+static u8 jdi_mcs_page_addr[] = {
+			0x2b, 0x00, 0x00, 0x04, 0xff};
+static u8 jdi_timing_control[] = {
+			0xc6, 0x6d, 0x05, 0x60, 0x05,
+			0x60, 0x01, 0x01, 0x01, 0x02,
+			0x01, 0x02, 0x01, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x05, 0x15,
+			0x09
+			};
+static
+int jdi_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			exit_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	msleep(120);
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			write_display_brightness, 0x0, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Brightness\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			write_ctrl_display, 0x24, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Write Control Display\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			write_ctrl_cabc, STILL_IMAGE, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Write Control CABC\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	if (!IS_ANN(dev)) {
+		err = mdfld_dsi_send_mcs_short_hs(sender,
+				write_cabc_min_bright, 51, 1,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Write CABC minimum brightness\n",
+					__func__, __LINE__);
+			goto ic_init_err;
+		}
+	}
+
+	err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 4, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Manufacture command protect on\n",
+				__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_gen_long_lp(sender,
+			jdi_timing_control,
+			21, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set panel timing\n",
+				__func__, __LINE__);
+		goto ic_init_err;
+	}
+	msleep(20);
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_tear_on, 0x00, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Tear On\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_mcs_long_hs(sender,
+			jdi_mcs_clumn_addr,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Clumn Address\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_mcs_long_hs(sender,
+			jdi_mcs_page_addr,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Page Address\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	return 0;
+
+ic_init_err:
+	err = -EIO;
+	return err;
+}
+
+static
+void jdi_cmd_controller_init(
+		struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_hw_context *hw_ctx =
+				&dsi_config->dsi_hw_context;
+#ifdef ENABLE_CSC_GAMMA /*FIXME*/
+	struct drm_device *dev = dsi_config->dev;
+
+	struct csc_setting csc = {
+			.pipe = 0,
+			.type = CSC_REG_SETTING,
+			.enable_state = true,
+			.data_len = CSC_REG_COUNT,
+			.data.csc_reg_data = {
+			0xFFB0424, 0xFDF, 0x4320FF1,
+			0xFDC, 0xFF50FF5, 0x415}
+		 };
+	struct gamma_setting gamma = {
+		.pipe = 0,
+		.type = GAMMA_REG_SETTING,
+		.enable_state = true,
+		.data_len = GAMMA_10_BIT_TABLE_COUNT,
+		.gamma_tableX100 = {
+			0x000000, 0x030303, 0x050505, 0x070707,
+			0x090909, 0x0C0C0C, 0x0E0E0E, 0x101010,
+			0x121212, 0x141414, 0x171717, 0x191919,
+			0x1B1B1B, 0x1D1D1D, 0x1F1F1F, 0x212121,
+			0x232323, 0x252525, 0x282828, 0x2A2A2A,
+			0x2C2C2C, 0x2E2E2E, 0x303030, 0x323232,
+			0x343434, 0x363636, 0x383838, 0x3A3A3A,
+			0x3C3C3C, 0x3E3E3E, 0x404040, 0x424242,
+			0x444444, 0x464646, 0x484848, 0x4A4A4A,
+			0x4C4C4C, 0x4E4E4E, 0x505050, 0x525252,
+			0x545454, 0x565656, 0x585858, 0x5A5A5A,
+			0x5C5C5C, 0x5E5E5E, 0x606060, 0x626262,
+			0x646464, 0x666666, 0x686868, 0x6A6A6A,
+			0x6C6C6C, 0x6E6E6E, 0x707070, 0x727272,
+			0x747474, 0x767676, 0x787878, 0x7A7A7A,
+			0x7C7C7C, 0x7E7E7E, 0x808080, 0x828282,
+			0x848484, 0x868686, 0x888888, 0x8A8A8A,
+			0x8C8C8C, 0x8E8E8E, 0x909090, 0x929292,
+			0x949494, 0x969696, 0x989898, 0x999999,
+			0x9B9B9B, 0x9D9D9D, 0x9F9F9F, 0xA1A1A1,
+			0xA3A3A3, 0xA5A5A5, 0xA7A7A7, 0xA9A9A9,
+			0xABABAB, 0xADADAD, 0xAFAFAF, 0xB1B1B1,
+			0xB3B3B3, 0xB5B5B5, 0xB6B6B6, 0xB8B8B8,
+			0xBABABA, 0xBCBCBC, 0xBEBEBE, 0xC0C0C0,
+			0xC2C2C2, 0xC4C4C4, 0xC6C6C6, 0xC8C8C8,
+			0xCACACA, 0xCCCCCC, 0xCECECE, 0xCFCFCF,
+			0xD1D1D1, 0xD3D3D3, 0xD5D5D5, 0xD7D7D7,
+			0xD9D9D9, 0xDBDBDB, 0xDDDDDD, 0xDFDFDF,
+			0xE1E1E1, 0xE3E3E3, 0xE4E4E4, 0xE6E6E6,
+			0xE8E8E8, 0xEAEAEA, 0xECECEC, 0xEEEEEE,
+			0xF0F0F0, 0xF2F2F2, 0xF4F4F4, 0xF6F6F6,
+			0xF7F7F7, 0xF9F9F9, 0xFBFBFB, 0xFDFDFD}
+	 };
+#endif
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 3;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	/* FIXME: enable CSC and GAMMA */
+	/*dsi_config->enable_gamma_csc = ENABLE_GAMMA | ENABLE_CSC;*/
+	/* This is for 400 mhz.  Set it to 0 for 800mhz */
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	if (IS_ANN(dev)) {
+		hw_ctx->mipi_control = 0x18;
+		hw_ctx->intr_en = 0xFFFFFFFF;
+		hw_ctx->hs_tx_timeout = 0xFFFFFF;
+		hw_ctx->lp_rx_timeout = 0xFFFFFF;
+		hw_ctx->device_reset_timer = 0xff;
+		hw_ctx->turn_around_timeout = 0xffff;
+		hw_ctx->high_low_switch_count = 0x20;
+		hw_ctx->clk_lane_switch_time_cnt = 0x21000e;
+		hw_ctx->lp_byteclk = 0x4;
+		hw_ctx->dphy_param = 0x1b104315;
+		hw_ctx->eot_disable = 0x1;
+		hw_ctx->init_count = 0x7d0;
+		hw_ctx->dbi_bw_ctrl = 1390;
+		hw_ctx->hs_ls_dbi_enable = 0x0;
+		hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+		hw_ctx->mipi = SEL_FLOPPED_HSTX	| PASS_FROM_SPHY_TO_AFE |
+			BANDGAP_CHICKEN_BIT | TE_TRIGGER_GPIO_PIN;
+	} else {
+		hw_ctx->mipi_control = 0x0;
+		hw_ctx->intr_en = 0xFFFFFFFF;
+		hw_ctx->hs_tx_timeout = 0xFFFFFF;
+		hw_ctx->lp_rx_timeout = 0xFFFFFF;
+		hw_ctx->device_reset_timer = 0xffff;
+		hw_ctx->turn_around_timeout = 0x1a;
+		hw_ctx->high_low_switch_count = 0x21;
+		hw_ctx->clk_lane_switch_time_cnt = 0x21000f;
+		hw_ctx->lp_byteclk = 0x5;
+		hw_ctx->dphy_param = 0x25155b1e;
+		hw_ctx->eot_disable = 0x3;
+		hw_ctx->init_count = 0xf0;
+		hw_ctx->dbi_bw_ctrl = 1390;
+		hw_ctx->hs_ls_dbi_enable = 0x0;
+		hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+		hw_ctx->mipi = PASS_FROM_SPHY_TO_AFE |
+			BANDGAP_CHICKEN_BIT |
+			TE_TRIGGER_GPIO_PIN;
+	}
+	hw_ctx->video_mode_format = 0xf;
+
+#ifdef ENABLE_CSC_GAMMA /*FIXME*/
+	if (dsi_config->enable_gamma_csc & ENABLE_CSC) {
+		/* setting the tuned csc setting */
+		drm_psb_enable_color_conversion = 1;
+		mdfld_intel_crtc_set_color_conversion(dev, &csc);
+	}
+
+	if (dsi_config->enable_gamma_csc & ENABLE_GAMMA) {
+		/* setting the tuned gamma setting */
+		drm_psb_enable_gamma = 1;
+		mdfld_intel_crtc_set_gamma(dev, &gamma);
+	}
+#endif
+
+}
+static
+int jdi_cmd_panel_connection_detect(
+	struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n",
+		__func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static
+int jdi_cmd_power_on(
+	struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_address_mode, 0x0, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Address Mode\n",
+		__func__, __LINE__);
+		goto power_err;
+	}
+	usleep_range(20000, 20100);
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_pixel_format, 0x77, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Pixel format\n",
+		__func__, __LINE__);
+		goto power_err;
+	}
+
+	/*turn on display*/
+	err = mdfld_dsi_send_dcs(sender,
+		 set_display_on,
+		 NULL,
+		 0,
+		 CMD_DATA_SRC_SYSTEM_MEM,
+		 MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("faild to set_display_on mode\n");
+		goto power_err;
+	}
+	usleep_range(20000, 20100);
+
+power_err:
+	return err;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n",
+		__func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int jdi_cmd_power_off(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display Off\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+	usleep_range(20000, 20100);
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_tear_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Tear Off\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Enter Sleep Mode\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+
+	msleep(60);
+
+	err = mdfld_dsi_send_gen_short_hs(sender,
+		access_protect, 4, 2,
+		MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Access Protect\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+
+	err = mdfld_dsi_send_gen_short_hs(sender,
+		low_power_mode, 1, 2,
+		MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Low Power Mode\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 0);
+	usleep_range(1000, 1500);
+	return 0;
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static
+int jdi_cmd_set_brightness(
+		struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	mdfld_dsi_send_mcs_short_hs(sender,
+			write_display_brightness, duty_val, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	return 0;
+}
+
+static
+void _get_panel_reset_gpio(void)
+{
+	int ret = 0;
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+}
+static
+int jdi_cmd_panel_reset(
+		struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+	u8 *vaddr = NULL, *vaddr1 = NULL;
+	int reg_value_scl = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* Because when reset touchscreen panel, touchscreen will pull i2c bus
+	 * to low, sometime this operation will cause i2c bus enter into wrong
+	 * status, so before reset, switch i2c scl pin */
+	vaddr1 = ioremap(SECURE_I2C_FLIS_REG, 4);
+	reg_value_scl = ioread32(vaddr1);
+	reg_value_scl &= ~0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+
+	__vpro2_power_ctrl(true);
+	usleep_range(2000, 2500);
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+	}
+
+	_get_panel_reset_gpio();
+
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(3000, 3500);
+	vaddr = ioremap(0xff0c2d00, 0x60);
+	iowrite32(0x3221, vaddr + 0x1c);
+	usleep_range(2000, 2500);
+	iounmap(vaddr);
+
+	/* switch i2c scl pin back */
+	reg_value_scl |= 0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+	iounmap(vaddr1);
+	return 0;
+}
+
+static
+int jdi_cmd_exit_deep_standby(
+		struct mdfld_dsi_config *dsi_config)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 1);
+	_get_panel_reset_gpio();
+	gpio_direction_output(mipi_reset_gpio, 0);
+
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(1000, 1500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(3000, 3500);
+	return 0;
+}
+
+static
+struct drm_display_mode *jdi_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 720;
+	mode->hsync_start = 816;
+	mode->hsync_end = 818;
+	mode->htotal = 920;
+
+	mode->vdisplay = 1280;
+	mode->vsync_start = 1288;
+	mode->vsync_end = 1296;
+	mode->vtotal = 1304;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static
+void jdi_cmd_get_panel_info(int pipe,
+		struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = 56;
+		pi->height_mm = 99;
+	}
+}
+
+void jdi_cmd_init(struct drm_device *dev,
+		struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+	p_funcs->reset = jdi_cmd_panel_reset;
+	p_funcs->power_on = jdi_cmd_power_on;
+	p_funcs->power_off = jdi_cmd_power_off;
+	p_funcs->drv_ic_init = jdi_cmd_drv_ic_init;
+	p_funcs->get_config_mode = jdi_cmd_get_config_mode;
+	p_funcs->get_panel_info = jdi_cmd_get_panel_info;
+	p_funcs->dsi_controller_init =
+			jdi_cmd_controller_init;
+	p_funcs->detect =
+			jdi_cmd_panel_connection_detect;
+	p_funcs->set_brightness =
+			jdi_cmd_set_brightness;
+	p_funcs->exit_deep_standby =
+			jdi_cmd_exit_deep_standby;
+
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/jdi_vid.c b/drivers/external_drivers/intel_media/display/tng/drv/jdi_vid.c
new file mode 100644
index 0000000..4a804e4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/jdi_vid.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Austin Hu <austin.hu@intel.com>
+ */
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "displays/jdi_vid.h"
+
+/* The register to control secure I2C FLIS pin */
+#define SECURE_I2C_FLIS_REG	0xFF0C1D30
+
+static int mipi_reset_gpio;
+static int bias_en_gpio;
+
+static u8 jdi_set_address_mode[] = {0x36, 0x00, 0x00, 0x00};
+static u8 jdi_write_display_brightness[] = {0x51, 0x0f, 0xff, 0x00};
+
+static u8 jdi_mcs_clumn_addr[] = {0x2a, 0x00, 0x00, 0x02, 0xcf};
+static u8 jdi_mcs_page_addr[] = {0x2b, 0x00, 0x00, 0x04, 0xff};
+static u8 jdi_set_mode[] = {0xb3, 0x35};
+int mdfld_dsi_jdi_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+
+	/* Set Address Mode */
+	err = mdfld_dsi_send_mcs_long_hs(sender, jdi_set_address_mode,
+			4,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Address Mode\n", __func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	/* Set Pixel format */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_pixel_format, 0x70, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Pixel format\n", __func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	/* change "ff0f" according to the brightness desired. */
+	err = mdfld_dsi_send_mcs_long_hs(sender, jdi_write_display_brightness,
+			4, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Brightness\n", __func__, __LINE__);
+		goto ic_init_err;
+	}
+
+    /* Write control CABC */
+	err = mdfld_dsi_send_mcs_short_hs(sender, write_ctrl_cabc, STILL_IMAGE,
+			1, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Write Control CABC\n", __func__, __LINE__);
+		goto ic_init_err;
+	}
+	err = mdfld_dsi_send_mcs_long_hs(sender,jdi_mcs_clumn_addr,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Clumn Address\n",__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_mcs_long_hs(sender,jdi_mcs_page_addr,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Page Address\n",__func__, __LINE__);
+		goto ic_init_err;
+	}
+	return 0;
+
+ic_init_err:
+	err = -EIO;
+	return err;
+}
+
+static
+void mdfld_dsi_jdi_dsi_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+		&dsi_config->dsi_hw_context;
+	/* Virtual channel number */
+	int mipi_vc = 0;
+	int mipi_pixel_format = 0x4;
+	/* BURST_MODE */
+	int mipi_mode = 0x3;
+	/* IP_TG_CONFIG */
+	int ip_tg_config = 0x4;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 3;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->pll_bypass_mode = 0;
+	/* This is for 400 mhz.  Set it to 0 for 800mhz */
+	hw_ctx->cck_div = 1;
+
+	hw_ctx->mipi_control = 0x18;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->turn_around_timeout = 0xFFFF;
+	hw_ctx->device_reset_timer = 0xFF;
+	hw_ctx->high_low_switch_count = 0x20;
+	hw_ctx->clk_lane_switch_time_cnt = 0x0020000E;
+	hw_ctx->dbi_bw_ctrl = 0x0;
+	hw_ctx->eot_disable = 0x0;
+	hw_ctx->init_count = 0x7D0;
+	hw_ctx->lp_byteclk = 0x4;
+	hw_ctx->dphy_param = 0x1B0F4115;
+
+	/*setup video mode format*/
+	hw_ctx->video_mode_format = mipi_mode | ip_tg_config;
+
+	/*set up func_prg*/
+	hw_ctx->dsi_func_prg = ((mipi_pixel_format << 7) | (mipi_vc << 3) |
+			dsi_config->lane_count);
+
+	/*setup mipi port configuration*/
+	hw_ctx->mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE |
+		BANDGAP_CHICKEN_BIT | dsi_config->lane_config;
+}
+
+static int mdfld_dsi_jdi_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	u32 dpll_val, device_ready_val;
+	int pipe = dsi_config->pipe;
+	u32 power_island = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		/*
+		 * FIXME: WA to detect the panel connection status, and need to
+		 * implement detection feature with get_power_mode DSI command.
+		 */
+		power_island = pipe_to_island(pipe);
+
+		if (!power_island_get(power_island)) {
+			DRM_ERROR("Failed to turn on power island\n");
+			return -EAGAIN;
+		}
+
+		dpll_val = REG_READ(regs->dpll_reg);
+		device_ready_val = REG_READ(regs->device_ready_reg);
+		if ((device_ready_val & DSI_DEVICE_READY) &&
+				(dpll_val & DPLL_VCO_ENABLE)) {
+			dsi_config->dsi_hw_context.panel_on = true;
+		} else {
+			dsi_config->dsi_hw_context.panel_on = false;
+			DRM_INFO("%s: panel is not detected!\n", __func__);
+		}
+
+		status = MDFLD_DSI_PANEL_CONNECTED;
+
+		power_island_put(power_island);
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static int mdfld_dsi_jdi_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/* Sleep Out */
+	err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+	/* Wait for 6 frames after exit_sleep_mode. */
+	msleep(100);
+
+	err = mdfld_dsi_send_gen_short_hs(sender,access_protect, 0x4, 2,
+                        MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set MCAP\n",__func__, __LINE__);
+		goto power_on_err;
+	}
+
+	err = mdfld_dsi_send_gen_long_hs(sender, jdi_set_mode,2,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Mode\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+
+	err = mdfld_dsi_send_gen_short_hs(sender,access_protect, 0x3, 2,
+                        MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set MCAP\n",__func__, __LINE__);
+		goto power_on_err;
+	}
+
+	/* Set Display on */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+	/* Wait for 1 frame after set_display_on. */
+	msleep(20);
+
+	/* Send TURN_ON packet */
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender, MDFLD_DSI_DPI_SPK_TURN_ON);
+	if (err) {
+		DRM_ERROR("Failed to send turn on packet\n");
+		goto power_on_err;
+	}
+
+	/* Write control display */
+	err = mdfld_dsi_send_mcs_short_hs(sender, write_ctrl_display, 0x24, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Write Control Display\n", __func__,
+				__LINE__);
+		goto power_on_err;
+	}
+
+	return 0;
+
+power_on_err:
+	err = -EIO;
+	return err;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n", __func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int mdfld_dsi_jdi_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/*send SHUT_DOWN packet */
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+			MDFLD_DSI_DPI_SPK_SHUT_DOWN);
+	if (err) {
+		DRM_ERROR("Failed to send turn off packet\n");
+		goto power_off_err;
+	}
+	/* According HW DSI spec, need to wait for 100ms. */
+	msleep(100);
+
+	/* Set Display off */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	/* Wait for 1 frame after set_display_on. */
+	msleep(20);
+
+	/* Sleep In */
+	err = mdfld_dsi_send_mcs_short_hs(sender, enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	/* Wait for 3 frames after enter_sleep_mode. */
+	msleep(51);
+
+	/* Can not poweroff VPROG2, because many other module related to
+	 * this power supply, such as PSH sensor. */
+	/*__vpro2_power_ctrl(false);*/
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 0);
+
+	return 0;
+
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static int mdfld_dsi_jdi_set_brightness(struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+    struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	mdfld_dsi_send_mcs_short_hs(sender,
+			0x51, duty_val, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	return 0;
+}
+
+static int mdfld_dsi_jdi_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	u8 *vaddr1 = NULL;
+	int reg_value_scl = 0;
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* Because when reset touchscreen panel, touchscreen will pull i2c bus
+	 * to low, sometime this operation will cause i2c bus enter into wrong
+	 * status, so before reset, switch i2c scl pin */
+	vaddr1 = ioremap(SECURE_I2C_FLIS_REG, 4);
+	reg_value_scl = ioread32(vaddr1);
+	reg_value_scl &= ~0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+
+	__vpro2_power_ctrl(true);
+
+	/* For meeting tRW1 panel spec */
+	usleep_range(2000, 2500);
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+	}
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return ret;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return ret;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(2000, 2500);
+	/* switch i2c scl pin back */
+	reg_value_scl |= 0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+	iounmap(vaddr1);
+
+	return 0;
+}
+
+static struct drm_display_mode *jdi_vid_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 720;
+	mode->hsync_start = 816;
+	mode->hsync_end = 818;
+	mode->htotal = 920;
+
+	mode->vdisplay = 1280;
+	mode->vsync_start = 1288;
+	mode->vsync_end = 1296;
+	mode->vtotal = 1304;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal *
+		mode->htotal / 1000;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static void jdi_vid_get_panel_info(int pipe, struct panel_info *pi)
+{
+	if (!pi)
+		return;
+
+	if (pipe == 0) {
+		pi->width_mm = 56;
+		pi->height_mm = 99;
+	}
+
+	return;
+}
+
+void jdi_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs->get_config_mode = jdi_vid_get_config_mode;
+	p_funcs->get_panel_info = jdi_vid_get_panel_info;
+	p_funcs->reset = mdfld_dsi_jdi_panel_reset;
+	p_funcs->drv_ic_init = mdfld_dsi_jdi_ic_init;
+	p_funcs->dsi_controller_init = mdfld_dsi_jdi_dsi_controller_init;
+	p_funcs->detect = mdfld_dsi_jdi_detect;
+	p_funcs->power_on = mdfld_dsi_jdi_power_on;
+	p_funcs->power_off = mdfld_dsi_jdi_power_off;
+	p_funcs->set_brightness = mdfld_dsi_jdi_set_brightness;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_csc.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_csc.c
new file mode 100644
index 0000000..adf2853
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_csc.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include <linux/math64.h>
+
+#define MULTIPLIER_CHROM 10000
+#define MULTIPLIER_MATRIX 10000
+#define MULTIPLIER_MATRIX1 100000
+#define MULTIPLIER_MATRIX2 100
+#define csc_sign(x) ((x < 0) ? false : true)
+
+/**
+ *  csc_matrix_mult_func
+ *
+ *
+ */
+static int64_t csc_matrix_mult_func(int64_t * M1, int64_t * M2)
+{
+	int64_t result = 0;
+	int i = 0;
+
+	for (i = 0; i < 3; i++)
+		result += M1[i] * M2[i];
+
+	return result;
+}
+
+/**
+ *  csc_matrix_mult
+ *
+ *  3x3 matrix multiply 3x3 matrix
+ *
+ */
+static void csc_matrix_mult(int64_t * M1, int64_t * M2, int64_t * M3)
+{
+	int64_t temp1[3], temp2[3];
+	int i = 0;
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i];
+		temp2[i] = M2[i * 3];
+	}
+
+	M3[0] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i];
+		temp2[i] = M2[(i * 3) + 1];
+	}
+
+	M3[1] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i];
+		temp2[i] = M2[(i * 3) + 2];
+	}
+
+	M3[2] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i + 3];
+		temp2[i] = M2[i * 3];
+	}
+
+	M3[3] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i + 3];
+		temp2[i] = M2[(i * 3) + 1];
+	}
+
+	M3[4] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i + 3];
+		temp2[i] = M2[(i * 3) + 2];
+	}
+
+	M3[5] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i + 6];
+		temp2[i] = M2[i * 3];
+	}
+
+	M3[6] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i + 6];
+		temp2[i] = M2[(i * 3) + 1];
+	}
+
+	M3[7] = csc_matrix_mult_func(temp1, temp2);
+
+	for (i = 0; i < 3; i++) {
+		temp1[i] = M1[i + 6];
+		temp2[i] = M2[(i * 3) + 2];
+	}
+
+	M3[8] = csc_matrix_mult_func(temp1, temp2);
+}
+
+/**
+ *  csc_div64
+ *
+ *  division with 64bit dividend and 64bit divisor.
+ *
+ */
+static int64_t csc_div64(int64_t divident, int64_t divisor)
+{
+	bool sign = !(csc_sign(divident) ^ csc_sign(divisor));
+	uint64_t temp_N = (uint64_t) - 1;
+	uint64_t temp_divid = 0, temp_divis = 0, temp_quot = 0;
+
+	if (divident < 0) {
+		temp_divid = temp_N - ((uint64_t) divident) + 1;
+	} else {
+		temp_divid = (uint64_t) divident;
+	}
+
+	if (divisor < 0) {
+		temp_divis = temp_N - ((uint64_t) divisor) + 1;
+	} else {
+		temp_divis = (uint64_t) divisor;
+	}
+
+	temp_quot = div64_u64(temp_divid, temp_divis);
+
+	if (!sign)
+		temp_quot = temp_N - ((uint64_t) temp_quot) + 1;
+
+	return (int64_t) temp_quot;
+}
+
+/**
+ *  csc_det_2x2_matrix
+ *
+ *  get the determinant of 2x2 matrix
+ *
+ *  note: the 2x2 matrix will be represented in a vector with 4 elements.
+ *  M00 = V0, M01 = V1, M10 = V2, M11 = V3.
+ *  det M = V0 * V3 - V1 * V2
+ */
+int64_t csc_det_2x2_matric(int64_t * M2)
+{
+	return ((M2[0] * M2[3]) - (M2[1] * M2[2]));
+}
+
+/**
+ *  csc_det_3x3_matrix
+ *
+ *  get the determinant of 3x3 matrix
+ *
+ *  note: the 3x3 matrix will be represented in a vector with 9 elements.
+ *  M00 = V0, M01 = V1, M02 = V2, M10 = V3, M11 = V4, M12 = V5, M20 = V6, M21
+ *  = V7, M22 = V8.
+ *  det M = V0*(V8*V4 - V7*V5) - V3*(V8*V1 - V7*V2) + V6*(V5*V1 - V4*V2)
+ */
+int64_t csc_det_3x3_matric(int64_t * M3)
+{
+	int64_t M2_0[4], M2_1[4], M2_2[4];
+	int64_t det0 = 0;
+	int64_t det1 = 0;
+	int64_t det2 = 0;
+
+	M2_0[0] = M3[4];
+	M2_0[1] = M3[5];
+	M2_0[2] = M3[7];
+	M2_0[3] = M3[8];
+	det0 = csc_det_2x2_matric(M2_0);
+
+	M2_1[0] = M3[1];
+	M2_1[1] = M3[2];
+	M2_1[2] = M3[7];
+	M2_1[3] = M3[8];
+	det1 = csc_det_2x2_matric(M2_1);
+
+	M2_2[0] = M3[1];
+	M2_2[1] = M3[2];
+	M2_2[2] = M3[4];
+	M2_2[3] = M3[5];
+	det2 = csc_det_2x2_matric(M2_2);
+
+	return ((M3[0] * det0) - (M3[3] * det1) + (M3[6] * det2));
+}
+
+/**
+ *  csc_inverse_3x3_matrix
+ *
+ *  get the inverse of 3x3 matrix
+ *
+ *  note: the 3x3 matrix will be represented in a vector with 9 elements.
+ *  M00 = V0, M01 = V1, M02 = V2, M10 = V3, M11 = V4, M12 = V5, M20 = V6, M21
+ *  = V7, M22 = V8.
+ */
+int64_t csc_inverse_3x3_matrix(int64_t * M3, int64_t * M3_out)
+{
+	int64_t M2[4];
+	int64_t det_M3 = 0;
+	int64_t det[9];
+	int i = 0;
+
+	M2[0] = M3[4];
+	M2[1] = M3[5];
+	M2[2] = M3[7];
+	M2[3] = M3[8];
+	det[0] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[2];
+	M2[1] = M3[1];
+	M2[2] = M3[8];
+	M2[3] = M3[7];
+	det[1] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[1];
+	M2[1] = M3[2];
+	M2[2] = M3[4];
+	M2[3] = M3[5];
+	det[2] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[5];
+	M2[1] = M3[3];
+	M2[2] = M3[8];
+	M2[3] = M3[6];
+	det[3] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[0];
+	M2[1] = M3[2];
+	M2[2] = M3[6];
+	M2[3] = M3[8];
+	det[4] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[2];
+	M2[1] = M3[0];
+	M2[2] = M3[5];
+	M2[3] = M3[3];
+	det[5] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[3];
+	M2[1] = M3[4];
+	M2[2] = M3[6];
+	M2[3] = M3[7];
+	det[6] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[1];
+	M2[1] = M3[0];
+	M2[2] = M3[7];
+	M2[3] = M3[6];
+	det[7] = csc_det_2x2_matric(M2);
+
+	M2[0] = M3[0];
+	M2[1] = M3[1];
+	M2[2] = M3[3];
+	M2[3] = M3[4];
+	det[8] = csc_det_2x2_matric(M2);
+
+	for (i = 0; i < 9; i++)
+		M3_out[i] = det[i];
+
+	det_M3 = csc_det_3x3_matric(M3);
+
+	return det_M3;
+}
+
+/**
+ *  csc_func1
+ *
+ *  csc interim function #1
+ *
+ */
+static int64_t csc_func1(int64_t chrom1, int64_t chrom2)
+{
+	return csc_div64((chrom1 * MULTIPLIER_MATRIX), chrom2);
+}
+
+/**
+ *  csc_func2
+ *
+ *  csc interim function #2
+ *
+ */
+static int64_t csc_func2(int64_t chrom1, int64_t chrom2)
+{
+	return csc_div64((MULTIPLIER_CHROM - chrom1 - chrom2) *
+			 MULTIPLIER_MATRIX, chrom2);
+}
+
+/**
+ *  csc_func3
+ *
+ *  csc interim function #3
+ *
+ */
+static int64_t csc_func3(int64_t M3_1, int64_t M3_2, int64_t M3_3,
+			 int64_t chrom1, int64_t chrom2)
+{
+	return csc_div64(M3_1 * chrom1,
+			 chrom2) + M3_2 + csc_div64(M3_3 * (MULTIPLIER_CHROM -
+							    chrom1 - chrom2),
+						    chrom2);
+}
+
+/**
+ *  csc_func4
+ *
+ *  csc interim function #4
+ *
+ */
+static int64_t csc_func4(int64_t Y1, int64_t chrom1, int64_t chrom2,
+			 int64_t det1)
+{
+	return csc_div64(MULTIPLIER_MATRIX1 * Y1 * chrom1, chrom2 * det1);
+}
+
+/**
+ *  csc_func5
+ *
+ *  csc interim function #5
+ *
+ */
+static int64_t csc_func5(int64_t Y1, int64_t det1)
+{
+	return csc_div64(MULTIPLIER_MATRIX1 * Y1, det1);
+}
+
+/**
+ *  csc_func6
+ *
+ *  csc interim function #6
+ *
+ */
+static int64_t csc_func6(int64_t Y1, int64_t chrom1, int64_t chrom2,
+			 int64_t det1)
+{
+	return csc_div64(MULTIPLIER_MATRIX1 * Y1 * (MULTIPLIER_CHROM - chrom1 -
+						    chrom2), chrom2 * det1);
+}
+
+/**
+ *  csc_XYZ
+ *
+ *  Get the transformation matrix from the input color space to CIE XYZ color
+ *  space.
+ *
+ *  note: the input parameters are the chromaticity values. They are 10000
+ *  times the actuall values.
+ *  xr = chrom[0], yr = chrom[1], xg = chrom[2], yg = chrom[3], xb =
+ *  chrom[4], yb = chrom[5], xw = chrom[6], yw = chrom[7].
+ *
+ *  See display SAS for the detailed algorithem.
+ *
+ */
+static void csc_XYZ(int *chrom, int64_t * M_csc)
+{
+	int64_t M3_in[9];
+	int64_t M3_out[9];
+	int64_t det_M3 = 0;
+	int64_t Y[3];
+	int i = 0;
+
+	/*
+	 * Get the matrix to convert from RGB space to XYZ space.
+	 *
+	 */
+
+	for (i = 0; i < 3; i++) {
+		M3_in[i] = csc_func1(chrom[2 * i], chrom[(2 * i) + 1]);
+		M3_in[i + 3] = MULTIPLIER_MATRIX;
+		M3_in[i + 6] = csc_func2(chrom[2 * i], chrom[(2 * i) + 1]);
+	}
+
+	det_M3 = csc_inverse_3x3_matrix(M3_in, M3_out);
+	det_M3 = csc_div64(det_M3, MULTIPLIER_MATRIX);
+
+	for (i = 0; i < 3; i++)
+		Y[i] = csc_func3(M3_out[i * 3], M3_out[(i * 3) + 1],
+				 M3_out[(i * 3) + 2], chrom[6], chrom[7]);
+
+	for (i = 0; i < 3; i++) {
+		M_csc[i] =
+		    csc_func4(Y[i], chrom[i * 2], chrom[(i * 2) + 1], det_M3);
+		M_csc[i + 3] = csc_func5(Y[i], det_M3);
+		M_csc[i + 6] =
+		    csc_func6(Y[i], chrom[i * 2], chrom[(i * 2) + 1], det_M3);
+	}
+
+	for (i = 0; i < 9; i++) {
+		if (M_csc[i] > 0)
+			M_csc[i] = csc_div64(M_csc[i] + 5, 10);
+		else
+			M_csc[i] = csc_div64(M_csc[i] - 5, 10);
+	}
+}
+
+/**
+ *  csc_to_12bit_register_value
+ *
+ *  Convert the 64bit integer to a 12bit 2-complement fixed point value in
+ *  format {12, 10,1}
+ *
+ */
+static void csc_to_12bit_register_value(int64_t csc, u16 * reg_val)
+{
+	uint64_t temp_N = (uint64_t) - 1;
+	uint64_t temp64 = 0;
+	u32 temp_32_1;
+	u32 temp_32_2;
+	bool sign = true;	/* true: positive, false: negative. */
+	u16 remain = 0;
+	u8 integer = 0;
+
+	/*
+	 * Convert the signed number to absolute value.
+	 *
+	 */
+	if (csc < 0) {
+		sign = false;
+		temp64 = temp_N - ((uint64_t) csc) + 1;
+		temp_32_2 = (u32) temp64;
+		temp_32_1 = (u32) (temp64 >> 32);
+	} else {
+		temp64 = (uint64_t) csc;
+		temp_32_2 = (u32) temp64;
+		temp_32_1 = (u32) (temp64 >> 32);
+	}
+
+	/*
+	 * Convert the absolute value to register value.
+	 *
+	 */
+	integer = temp_32_2 / 1024;
+	remain = temp_32_2 % 1024;
+
+        *reg_val = 0;
+        remain = (remain * 1024) / 1024;
+        *reg_val |= remain;
+
+        if (integer)
+                *reg_val |= BIT10;
+
+        if (!sign) {
+                (*reg_val) = ~(*reg_val);
+                (*reg_val)++;
+                (*reg_val) &= 0xFFF;
+        }
+
+        if (integer > 1)
+                DRM_ERROR("Invalid parameters\n");
+}
+
+/**
+ *  csc_program_DC
+ *
+ *  Program DC color matrix coefficients register
+ *
+ *  note: the 3x3 matrix will be represented in a vector with 9 elements.
+ *  M00 = V0, M01 = V1, M02 = V2, M10 = V3, M11 = V4, M12 = V5, M20 = V6, M21
+ *  = V7, M22 = V8.
+ *
+ */
+void csc_program_DC(struct drm_device *dev, int64_t * csc, int pipe)
+{
+	u16 reg_val1 = 0, reg_val2 = 0;
+	u32 reg_val = 0;
+	u32 color_coef_reg = PIPEA_COLOR_COEF0;
+	int i = 0;
+
+	if (pipe == PIPEB)
+		color_coef_reg += PIPEB_OFFSET;
+	else if (pipe == PIPEC)
+		color_coef_reg += PIPEC_OFFSET;
+
+	/*
+	 *  Convert the 64bit integer to a 12bit 2-complement fixed point value in
+	 *  format {12, 10,1}
+	 *
+	 */
+	for (i = 0; i < 9; i += 3) {
+		csc_to_12bit_register_value(csc[i], &reg_val1);
+		csc_to_12bit_register_value(csc[i + 1], &reg_val2);
+		reg_val = reg_val1 | (reg_val2 << CC_1_POS);
+		REG_WRITE(color_coef_reg, reg_val);
+		color_coef_reg += 4;
+		csc_to_12bit_register_value(csc[i + 2], &reg_val1);
+		reg_val = reg_val1;
+		REG_WRITE(color_coef_reg, reg_val);
+		color_coef_reg += 4;
+	}
+}
+
+/**
+ *  csc_operation
+ *
+ *  Program DC register to perform csc.
+ *
+ *  note: the input parameters are the chromaticity values. They are 10000
+ *  times the actuall values.
+ *  xr = chrom[0], yr = chrom[1], xg = chrom[2], yg = chrom[3], xb =
+ *  chrom[4], yb = chrom[5], xw = chrom[6], yw = chrom[7].
+ *
+ *  chrom1 represents the input color space; chrom2 represents the output
+ *  color space.
+ *
+ *  See display SAS for the detailed algorithem.
+ *
+ */
+void csc(struct drm_device *dev, int *chrom1, int *chrom2, int pipe)
+{
+	int64_t M3_out[9];
+	int64_t det_M3 = 0;
+	int64_t csc1[9];
+	int64_t csc2[9];
+	int64_t csc2_inv[9];
+	int64_t csc[9];
+	int i = 0;
+
+	/*
+	 * Get the matrix to convert from RGB space to XYZ space.
+	 *
+	 */
+
+	csc_XYZ(chrom1, csc1);
+	csc_XYZ(chrom2, csc2);
+
+	det_M3 = csc_inverse_3x3_matrix(csc2, M3_out);
+	det_M3 = csc_div64(det_M3, MULTIPLIER_MATRIX2);
+
+	for (i = 0; i < 9; i++) {
+		csc2_inv[i] =
+		    csc_div64(M3_out[i] * MULTIPLIER_MATRIX1 * 1000, det_M3);
+
+		if (csc2_inv[i] > 0)
+			csc2_inv[i] = csc_div64(csc2_inv[i] + 50, 100);
+		else
+			csc2_inv[i] = csc_div64(csc2_inv[i] - 50, 100);
+	}
+
+	csc_matrix_mult(csc1, csc2_inv, csc);
+
+	for (i = 0; i < 9; i++) {
+		if (csc[i] > 0)
+			csc[i] = csc_div64(csc[i] + 50000, 100000);
+		else
+			csc[i] = csc_div64(csc[i] - 50000, 100000);
+	}
+
+	csc_program_DC(dev, csc, pipe);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_csc.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_csc.h
new file mode 100644
index 0000000..08b8932
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_csc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#ifndef _MDFLD_CSC_H_
+#define _MDFLD_CSC_H_
+
+/* chromaticity value for sRGB color space */
+/* int chrom1[8] = {6400, 3300, 3000, 6000, 1500, 600, 3127, 3290}; */
+/* chromaticity value for Adobe Wide Gamut RGB color space */
+/* int chrom2[8] = {7347, 2653, 1152, 8264, 1566, 177, 3457, 3585}; */
+
+void csc(struct drm_device *dev, int *chrom1, int *chrom2, int pipe);
+void csc_program_DC(struct drm_device *dev, int64_t * csc, int pipe);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_debugfs.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_debugfs.c
new file mode 100644
index 0000000..060ef47
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_debugfs.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int mdfld_dc_dpll_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+
+	seq_printf(m, "DISPLAY DPLL\n");
+
+	seq_printf(m, "\tDPLL:\n");
+	for (i=0xf000; i<0xffff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int mdfld_dc_pipeline_a_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+
+	seq_printf(m, "DISPLAY PIPELINE A\n");
+
+	seq_printf(m, "\tPALETTE A/B/C:\n");
+	seq_printf(m, "\t\t reg(0xa000) = 0x%x\n", REG_READ(0xa000));
+
+	seq_printf(m, "\tMIPI A:\n");
+	for (i=0xb000; i<0xb0ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tDSI ADAPTER:\n");
+	for (i=0xb104; i<=0xb138; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tDPLL:\n");
+	for (i=0xf000; i<0xffff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPIPELINE A:\n");
+	for (i=0x60000; i<0x600ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPORT CONTROL:\n");
+	for (i=0x61190; i<0x61194; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPIPELINE A CONTROL:\n");
+	for (i=0x70000; i<0x700ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int mdfld_dc_pipeline_b_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+
+	seq_printf(m, "DISPLAY PIPELINE B\n");
+
+	seq_printf(m, "\tPALETTE B:\n");
+	seq_printf(m, "\t\t reg(0xa800) = 0x%x\n", REG_READ(0xa800));
+
+	seq_printf(m, "\tPIPELINE B:\n");
+	for (i=0x61000; i<0x610ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tHDMI PORT CONTROL/HDCP/TV:\n");
+	for (i=0x61110; i<=0x61178; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPANEL FITTING:\n");
+	for (i=0x61200; i<0x612ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPIPELINE B CONTROL:\n");
+	for (i=0x71000; i<0x710ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int mdfld_dc_pipeline_c_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+
+	seq_printf(m, "DISPLAY PIPELINE C\n");
+	seq_printf(m, "\tPALETTE C:\n");
+	seq_printf(m, "\t\t reg(0xac00) = 0x%x\n", REG_READ(0xac00));
+
+	seq_printf(m, "\tMIPI C:\n");
+	for (i=0xb800; i<0xb8ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tDSI ADAPTER:\n");
+	for (i=0xb904; i<=0xb938; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPIPELINE C:\n");
+	for (i=0x62000; i<0x620ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPORT CONTROL:\n");
+	for (i=0x62190; i<0x62194; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	seq_printf(m, "\tPIPELINE C CONTROL:\n");
+	for (i=0x72000; i<0x720ff; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int mdfld_dc_overlay_a_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+
+	seq_printf(m, "DISPLAY OVERLAY A\n");
+	seq_printf(m, "\tOVERLAY A:\n");
+	for (i=0x30000; i<0x34023; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int mdfld_dc_overlay_c_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	int i;
+
+	seq_printf(m, "DISPLAY OVERLAY C\n");
+	seq_printf(m, "\tOVERLAY C:\n");
+	for (i=0x38000; i<0x3c023; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int dc_sprite_regs_info(struct seq_file *m, void *data, int index)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	char c;
+	u32 reg_offset;
+	int i;
+
+	switch (index) {
+	case 0:
+		c = 'A';
+		reg_offset = 0;
+		break;
+	case 1:
+		c = 'B';
+		reg_offset = 0x1000;
+		break;
+	case 2:
+		c = 'C';
+		reg_offset = 0x2000;
+		break;
+	case 3:
+		c = 'D';
+		reg_offset = 0x3000;
+		break;
+	case 4:
+		c = 'E';
+		reg_offset = 0x4000;
+		break;
+	case 5:
+		c = 'F';
+		reg_offset = 0x5000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	seq_printf(m, "DISPLAY SPRITE %c\n", c);
+	for (i=0x70180 + reg_offset; i<0x701d4+reg_offset; i+=4)
+		seq_printf(m, "\t\t reg(0x%x) = 0x%x\n", i, REG_READ(i));
+
+	return 0;
+}
+
+static int mdfld_dc_sprite_a_regs_info(struct seq_file *m, void *data)
+{
+	return dc_sprite_regs_info(m, data, 0);
+}
+
+static int mdfld_dc_sprite_b_regs_info(struct seq_file *m, void *data)
+{
+	return dc_sprite_regs_info(m, data, 1);
+}
+
+static int mdfld_dc_sprite_c_regs_info(struct seq_file *m, void *data)
+{
+	return dc_sprite_regs_info(m, data, 2);
+}
+
+static int mdfld_dc_sprite_d_regs_info(struct seq_file *m, void *data)
+{
+	return dc_sprite_regs_info(m, data, 3);
+}
+
+static int mdfld_dc_sprite_e_regs_info(struct seq_file *m, void *data)
+{
+	return dc_sprite_regs_info(m, data, 4);
+}
+
+static int mdfld_dc_sprite_f_regs_info(struct seq_file *m, void *data)
+{
+	return dc_sprite_regs_info(m, data, 5);
+}
+
+static struct drm_info_list mdfld_debugfs_list[] = {
+	{"mdfld_dc_dpll_regs", mdfld_dc_dpll_regs_info, 0},
+	{"mdfld_dc_pipeline_a_regs", mdfld_dc_pipeline_a_regs_info, 0},
+	{"mdfld_dc_pipeline_b_regs", mdfld_dc_pipeline_b_regs_info, 0},
+	{"mdfld_dc_pipeline_c_regs", mdfld_dc_pipeline_c_regs_info, 0},
+	{"mdfld_dc_overlay_a_regs", mdfld_dc_overlay_a_regs_info, 0},
+	{"mdfld_dc_overlay_c_regs", mdfld_dc_overlay_c_regs_info, 0},
+	{"dc_sprite_a_regs", mdfld_dc_sprite_a_regs_info, 0},
+	{"dc_sprite_b_regs", mdfld_dc_sprite_b_regs_info, 0},
+	{"dc_sprite_c_regs", mdfld_dc_sprite_c_regs_info, 0},
+	{"dc_sprite_d_regs", mdfld_dc_sprite_d_regs_info, 0},
+	{"dc_sprite_e_regs", mdfld_dc_sprite_e_regs_info, 0},
+	{"dc_sprite_f_regs", mdfld_dc_sprite_f_regs_info, 0},
+};
+#define MDFLD_DEBUGFS_ENTRIES ARRAY_SIZE(mdfld_debugfs_list)
+
+int mdfld_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(mdfld_debugfs_list,
+				MDFLD_DEBUGFS_ENTRIES,
+				minor->debugfs_root, minor);
+
+}
+
+void mdfld_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(mdfld_debugfs_list,
+			MDFLD_DEBUGFS_ENTRIES,
+			minor);
+}
+
+#endif /*CONFIG_DEBUG_FS*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_debugfs.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_debugfs.h
new file mode 100644
index 0000000..535a6d4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_debugfs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef MDFLD_DEBUGFS_H_
+#define MDFLD_DEBUGFS_H_
+
+extern int mdfld_debugfs_init(struct drm_minor *minor);
+extern void mdfld_debugfs_cleanup(struct drm_minor *minor);
+
+#endif /* MDFLD_DEBUGFS_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi.c
new file mode 100644
index 0000000..7c9dd5c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi.c
@@ -0,0 +1,1411 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *  jim liu <jim.liu@intel.com>
+ *  Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_esd.h"
+#include "pwr_mgmt.h"
+#include "mdfld_dsi_dbi_dsr.h"
+#include "mrfld_clock.h"
+#include "psb_drv.h"
+#include "dispmgrnl.h"
+
+/**
+ * Enter DSR
+ */
+void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+		int pipe)
+{
+	return;
+}
+
+#ifndef CONFIG_MDFLD_DSI_DPU
+
+int mdfld_dsi_dbi_async_check_fifo_empty(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+	struct mdfld_dsi_dbi_output **dbi_outputs = NULL;
+	struct mdfld_dsi_dbi_output *dbi_output = NULL;
+	struct mdfld_dsi_pkg_sender *sender = NULL;
+	int err = 0;
+
+	dbi_outputs = dsr_info->dbi_outputs;
+	dbi_output = 0 ? dbi_outputs[1] : dbi_outputs[0];
+	if (!dbi_output)
+		return 0;
+
+	sender = mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+	if (!sender) {
+		DRM_ERROR("pkg sender is NULL\n");
+		return -EINVAL;
+	}
+	err = mdfld_dsi_check_fifo_empty(sender);
+	return err;
+}
+
+/*
+ * use hw te to update fb
+ */
+int mdfld_dsi_dbi_async_flip_fb_update(struct drm_device *dev, int pipe)
+{
+	return 0;
+}
+
+/**
+ * Exit from DSR
+ */
+void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev,
+		u32 update_src,
+		void *p_surfaceAddr,
+		bool check_hw_on_only)
+{
+}
+
+static
+void intel_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output)
+{
+	struct mdfld_dsi_pkg_sender *sender;
+	struct drm_device *dev = dbi_output->dev;
+	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+	struct psb_intel_crtc *psb_crtc =
+		(crtc) ? to_psb_intel_crtc(crtc) : NULL;
+	int pipe = dbi_output->channel_num ? 2 : 0;
+	u32 dpll_reg = MRST_DPLL_A;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsurf_reg = DSPASURF;
+
+	sender = mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+	if (!sender) {
+		DRM_ERROR("pkg sender is NULL\n");
+		return;
+	}
+
+	/* if mode setting on-going, back off */
+
+	if (!IS_ANN(dev)) {
+		if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+				(psb_crtc && (psb_crtc->mode_flags & MODE_SETTING_ON_GOING)) ||
+				!(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
+			return;
+	}
+
+	if (pipe == 2) {
+		dspcntr_reg = DSPCCNTR;
+		pipeconf_reg = PIPECCONF;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsurf_reg = DSPCSURF;
+	}
+
+	/* check DBI FIFO status */
+	if (is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DBI) {
+		if (!(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
+		   !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE))
+			return;
+	} else if (!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
+	   !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
+	   !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE))
+		return;
+
+	if (!IS_ANN(dev)) {
+		/* refresh plane changes */
+
+		REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
+		REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+		REG_READ(dspsurf_reg);
+	}
+
+	mdfld_dsi_send_dcs(sender,
+			   write_mem_start,
+			   NULL,
+			   0,
+			   CMD_DATA_SRC_PIPE,
+			   MDFLD_DSI_SEND_PACKAGE);
+	dbi_output->dsr_fb_update_done = true;
+	mdfld_dsi_cmds_kick_out(sender);
+}
+
+/* Perodically update dbi panel */
+void mdfld_dbi_update_panel(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+	struct mdfld_dsi_dbi_output **dbi_outputs;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_hw_context *ctx;
+
+	if (!dsr_info)
+		return;
+
+	dbi_outputs = dsr_info->dbi_outputs;
+	dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
+	dsi_config = pipe ? dev_priv->dsi_configs[1] : dev_priv->dsi_configs[0];
+
+	if (!dbi_output || !dsi_config || (pipe == 1) ||
+		(is_panel_vid_or_cmd(dev) != MDFLD_DSI_ENCODER_DBI))
+		return;
+
+	ctx = &dsi_config->dsi_hw_context;
+
+	/*lock dsi config*/
+	mutex_lock(&dsi_config->context_lock);
+
+	/*if FB is damaged and panel is on update on-panel FB*/
+	if (!ctx->panel_on)
+		goto update_out;
+
+	intel_dsi_dbi_update_fb(dbi_output);
+
+update_out:
+	mutex_unlock(&dsi_config->context_lock);
+}
+
+int mdfld_dbi_dsr_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsr_info || IS_ERR(dsr_info)) {
+		dsr_info = kzalloc(sizeof(struct mdfld_dbi_dsr_info),
+				   GFP_KERNEL);
+		if (!dsr_info) {
+			DRM_ERROR("No memory\n");
+			return -ENOMEM;
+		}
+
+		dev_priv->dbi_dsr_info = dsr_info;
+	}
+
+	return 0;
+}
+#endif
+
+static int __dbi_enter_ulps_locked(struct mdfld_dsi_config *dsi_config,
+				int offset)
+{
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_pkg_sender *sender
+			= mdfld_dsi_get_pkg_sender(dsi_config);
+	if (!sender) {
+		DRM_ERROR("pkg sender is NULL\n");
+		return -EINVAL;
+	}
+
+	ctx->device_ready = REG_READ(regs->device_ready_reg + offset);
+
+	if ((offset == 0) && (ctx->device_ready & DSI_POWER_STATE_ULPS_MASK)) {
+		DRM_ERROR("Broken ULPS states\n");
+		return -EINVAL;
+	}
+
+	if (offset != 0)
+		sender->work_for_slave_panel = true;
+	/*wait for all FIFOs empty*/
+	mdfld_dsi_wait_for_fifos_empty(sender);
+	sender->work_for_slave_panel = false;
+
+	/*inform DSI host is to be put on ULPS*/
+	ctx->device_ready |= (DSI_POWER_STATE_ULPS_ENTER |
+				 DSI_DEVICE_READY);
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+	mdelay(1);
+
+	/* set AFE hold value*/
+	REG_WRITE(regs->mipi_reg + offset,
+		 REG_READ(regs->mipi_reg + offset) & (~PASS_FROM_SPHY_TO_AFE));
+
+	PSB_DEBUG_ENTRY("%s: entered ULPS state\n", __func__);
+	return 0;
+}
+
+static int __dbi_exit_ulps_locked(struct mdfld_dsi_config *dsi_config,
+			int offset)
+{
+	int tem = 0;
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+	struct drm_device *dev = dsi_config->dev;
+
+	ctx->device_ready = REG_READ(regs->device_ready_reg + offset);
+
+	/*inform DSI host is to be put on ULPS*/
+	ctx->device_ready |= (DSI_POWER_STATE_ULPS_ENTER |
+				 DSI_DEVICE_READY);
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+
+	mdelay(1);
+	/* clear AFE hold value*/
+	if (offset != 0)
+		tem = 0x1000;
+	REG_WRITE(regs->mipi_reg + tem,
+		REG_READ(regs->mipi_reg + tem) | PASS_FROM_SPHY_TO_AFE);
+
+	/*enter ULPS EXIT state*/
+	ctx->device_ready &= ~DSI_POWER_STATE_ULPS_MASK;
+	ctx->device_ready |= (DSI_POWER_STATE_ULPS_EXIT |
+			DSI_DEVICE_READY);
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+
+	/*wait for 1ms as spec suggests*/
+	mdelay(1);
+
+	/*clear ULPS state*/
+	ctx->device_ready &= ~DSI_POWER_STATE_ULPS_MASK;
+	ctx->device_ready |= DSI_DEVICE_READY;
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+
+	mdelay(1);
+
+	PSB_DEBUG_ENTRY("%s: exited ULPS state\n", __func__);
+	return 0;
+}
+
+static void __dbi_set_properties(struct mdfld_dsi_config *dsi_config,
+			enum enum_ports port)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev;
+	int offset = 0;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+
+	if (port == PORT_C)
+		offset = 0x800;
+	/*D-PHY parameter*/
+	REG_WRITE(regs->dphy_param_reg + offset, ctx->dphy_param);
+
+	/*Configure DSI controller*/
+	REG_WRITE(regs->mipi_control_reg + offset, ctx->mipi_control);
+	REG_WRITE(regs->intr_en_reg + offset, ctx->intr_en);
+	REG_WRITE(regs->hs_tx_timeout_reg + offset, ctx->hs_tx_timeout);
+	REG_WRITE(regs->lp_rx_timeout_reg + offset, ctx->lp_rx_timeout);
+	REG_WRITE(regs->turn_around_timeout_reg + offset,
+		ctx->turn_around_timeout);
+	REG_WRITE(regs->device_reset_timer_reg + offset,
+		ctx->device_reset_timer);
+	REG_WRITE(regs->high_low_switch_count_reg + offset,
+		ctx->high_low_switch_count);
+	REG_WRITE(regs->init_count_reg + offset, ctx->init_count);
+	REG_WRITE(regs->eot_disable_reg + offset, (REG_READ(regs->eot_disable_reg) & ~DSI_EOT_DISABLE_MASK) | (ctx->eot_disable & DSI_EOT_DISABLE_MASK));
+	REG_WRITE(regs->lp_byteclk_reg + offset, ctx->lp_byteclk);
+	REG_WRITE(regs->clk_lane_switch_time_cnt_reg + offset,
+		ctx->clk_lane_switch_time_cnt);
+	REG_WRITE(regs->dsi_func_prg_reg + offset, ctx->dsi_func_prg);
+
+	/*DBI bw ctrl*/
+	REG_WRITE(regs->dbi_bw_ctrl_reg + offset, ctx->dbi_bw_ctrl);
+
+}
+
+/* dbi interface power on*/
+int __dbi_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	u32 val = 0;
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *dev;
+	int retry;
+	int err = 0;
+	u32 power_island = 0;
+	u32 sprite_reg_offset = 0;
+	int i = 0;
+	int offset = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	power_island = pipe_to_island(dsi_config->pipe);
+
+	if (power_island & (OSPM_DISPLAY_A | OSPM_DISPLAY_C))
+		power_island |= OSPM_DISPLAY_MIO;
+
+	if (is_dual_dsi(dev))
+	power_island |= OSPM_DISPLAY_C;
+
+	if (!power_island_get(power_island))
+		return -EAGAIN;
+
+	/*
+	 * Wait for DSI PLL locked on pipe, and only need to poll status of pipe
+	 * A as both MIPI pipes share the same DSI PLL.
+	 */
+	if (dsi_config->pipe == 0) {
+		retry = 20000;
+		while (!(REG_READ(regs->pipeconf_reg) & PIPECONF_DSIPLL_LOCK) &&
+				--retry)
+			udelay(150);
+		if (!retry) {
+			DRM_ERROR("PLL failed to lock on pipe\n");
+			err = -EAGAIN;
+			goto power_on_err;
+		}
+	}
+
+	if (IS_ANN(dev)) {
+		/* FIXME: reset the DC registers for ANN A0 */
+		power_island_get(OSPM_DISPLAY_B | OSPM_DISPLAY_C);
+
+		REG_WRITE(DSPCLK_GATE_D, 0x0);
+		REG_WRITE(RAMCLK_GATE_D, 0xc0000 | (1 << 11)); // FIXME: delay 1us for RDB done signal
+		REG_WRITE(PFIT_CONTROL, 0x20000000);
+		REG_WRITE(DSPIEDCFGSHDW, 0x0);
+		REG_WRITE(DSPARB2, 0x000A0200);
+		REG_WRITE(DSPARB, 0x18040080);
+		REG_WRITE(DSPFW1, 0x0F0F3F3F);
+		REG_WRITE(DSPFW2, 0x5F2F0F3F);
+		REG_WRITE(DSPFW3, 0x0);
+		REG_WRITE(DSPFW4, 0x07071F1F);
+		REG_WRITE(DSPFW5, 0x2F17071F);
+		REG_WRITE(DSPFW6, 0x00001F3F);
+		REG_WRITE(DSPFW7, 0x1F3F1F3F);
+		REG_WRITE(DSPSRCTRL, 0x00080100);
+		REG_WRITE(DSPCHICKENBIT, 0x20);
+		REG_WRITE(FBDC_CHICKEN, 0x0C0C0C0C);
+		REG_WRITE(CURACNTR, 0x0);
+		REG_WRITE(CURBCNTR, 0x0);
+		REG_WRITE(CURCCNTR, 0x0);
+		REG_WRITE(IEP_OVA_CTRL, 0x0);
+		REG_WRITE(IEP_OVA_CTRL, 0x0);
+		REG_WRITE(DSPACNTR, 0x0);
+		REG_WRITE(DSPBCNTR, 0x0);
+		REG_WRITE(DSPCCNTR, 0x0);
+		REG_WRITE(DSPDCNTR, 0x0);
+		REG_WRITE(DSPECNTR, 0x0);
+		REG_WRITE(DSPFCNTR, 0x0);
+
+		power_island_put(OSPM_DISPLAY_B | OSPM_DISPLAY_C);
+	}
+
+	/*exit ULPS*/
+	if (__dbi_exit_ulps_locked(dsi_config, 0)) {
+		DRM_ERROR("Failed to exit ULPS\n");
+		goto power_on_err;
+	}
+	/*update MIPI port config*/
+	REG_WRITE(regs->mipi_reg, ctx->mipi |
+			 REG_READ(regs->mipi_reg));
+
+	/*unready dsi adapter for re-programming*/
+	REG_WRITE(regs->device_ready_reg,
+		REG_READ(regs->device_ready_reg) & ~(DSI_DEVICE_READY));
+
+	if (is_dual_dsi(dev)) {
+		if (__dbi_exit_ulps_locked(dsi_config, 0x800)) {
+			DRM_ERROR("Failed to exit ULPS\n");
+			goto power_on_err;
+		}
+		offset = 0x1000;
+		REG_WRITE(regs->mipi_reg + offset, ctx->mipi |
+				 REG_READ(regs->mipi_reg + offset));
+		/*unready dsi adapter for re-programming*/
+		offset = 0x800;
+		REG_WRITE(regs->device_ready_reg + offset,
+			REG_READ(regs->device_ready_reg + offset) & ~(DSI_DEVICE_READY));
+	}
+
+	/*
+	 * According to MIPI D-PHY spec, if clock stop feature is enabled (EOT
+	 * Disable), un-ready MIPI adapter needs to wait for 20 cycles from HS
+	 * to LP mode. Per calculation 1us is enough.
+	 */
+	if (ctx->eot_disable & CLOCK_STOP)
+		udelay(1);
+
+	__dbi_set_properties(dsi_config, PORT_A);
+
+	/* update 0x650c[0] = 1 to fixed arbitration pattern
+	 * it is found display TLB request be blocked by display plane
+	 * memory requests, never goes out. This causes display controller
+	 * uses stale TLB data to do memory translation, getting wrong
+	 * memory address for data, and causing the flickering issue.
+	 */
+	REG_WRITE(GCI_CTRL, REG_READ(GCI_CTRL) | 1);
+
+	/*Setup pipe timing*/
+	REG_WRITE(regs->htotal_reg, ctx->htotal);
+	REG_WRITE(regs->hblank_reg, ctx->hblank);
+	REG_WRITE(regs->hsync_reg, ctx->hsync);
+	REG_WRITE(regs->vtotal_reg, ctx->vtotal);
+	REG_WRITE(regs->vblank_reg, ctx->vblank);
+	REG_WRITE(regs->vsync_reg, ctx->vsync);
+	REG_WRITE(regs->pipesrc_reg, ctx->pipesrc);
+	REG_WRITE(regs->dsppos_reg, ctx->dsppos);
+	REG_WRITE(regs->dspstride_reg, ctx->dspstride);
+
+	/*restore color_coef (chrome) */
+	for (i = 0; i < 6; i++)
+		REG_WRITE(regs->color_coef_reg + (i<<2), csc_setting_save[i]);
+
+	/* restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		REG_WRITE(regs->palette_reg + (i<<2), gamma_setting_save[i]);
+
+	/* restore dpst setting */
+	if (dev_priv->psb_dpst_state) {
+		dpstmgr_reg_restore_locked(dev, dsi_config);
+		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+	}
+
+	/*Setup plane*/
+	REG_WRITE(regs->dspsize_reg, ctx->dspsize);
+	REG_WRITE(regs->dspsurf_reg, ctx->dspsurf);
+	REG_WRITE(regs->dsplinoff_reg, ctx->dsplinoff);
+	REG_WRITE(regs->vgacntr_reg, ctx->vgacntr);
+
+	if (is_dual_dsi(dev))
+		__dbi_set_properties(dsi_config, PORT_C);
+
+	/*enable plane*/
+	val = ctx->dspcntr | BIT31;
+	REG_WRITE(regs->dspcntr_reg, val);
+
+	if (ctx->sprite_dspcntr & BIT31) {
+		if (dsi_config->pipe == 0)
+			sprite_reg_offset = 0x3000;
+		else if (dsi_config->pipe == 2)
+			sprite_reg_offset = 0x1000;
+
+		/* Set up Sprite Plane */
+		REG_WRITE(regs->dspsize_reg + sprite_reg_offset,
+				ctx->sprite_dspsize);
+		REG_WRITE(regs->dspsurf_reg + sprite_reg_offset,
+				ctx->sprite_dspsurf);
+		REG_WRITE(regs->dsplinoff_reg + sprite_reg_offset,
+				ctx->sprite_dsplinoff);
+		REG_WRITE(regs->dsppos_reg + sprite_reg_offset,
+				ctx->sprite_dsppos);
+		REG_WRITE(regs->dspstride_reg + sprite_reg_offset,
+				ctx->sprite_dspstride);
+
+		/* enable plane */
+		REG_WRITE(regs->dspcntr_reg + sprite_reg_offset,
+				ctx->sprite_dspcntr);
+	}
+
+	/* Set up Overlay Plane */
+	if (ctx->ovaadd)
+		PSB_WVDC32(ctx->ovaadd, OV_OVADD);
+
+	if (ctx->ovcadd)
+		PSB_WVDC32(ctx->ovcadd, OVC_OVADD);
+
+	/*ready dsi adapter*/
+	REG_WRITE(regs->device_ready_reg,
+		REG_READ(regs->device_ready_reg) | DSI_DEVICE_READY);
+	mdelay(1);
+	if (is_dual_dsi(dev)) {
+		REG_WRITE(regs->device_ready_reg + offset,
+			REG_READ(regs->device_ready_reg + offset) | DSI_DEVICE_READY);
+	}
+
+	if (IS_ANN(dev)) {
+		REG_WRITE(regs->ddl1_reg, ctx->ddl1);
+		REG_WRITE(regs->ddl2_reg, ctx->ddl2);
+		REG_WRITE(regs->ddl3_reg, ctx->ddl3);
+		REG_WRITE(regs->ddl4_reg, ctx->ddl4);
+
+		REG_WRITE(DSPARB2, ctx->dsparb2);
+		REG_WRITE(DSPARB, ctx->dsparb);
+	}
+
+	/*Enable pipe*/
+	val = ctx->pipeconf;
+	val &= ~0x000c0000;
+	val |= BIT31 | PIPEACONF_DSR;
+	REG_WRITE(regs->pipeconf_reg, val);
+
+	/*Wait for pipe enabling,when timing generator is working */
+	retry = 10000;
+	while (--retry && !(REG_READ(regs->pipeconf_reg) & BIT30))
+		udelay(3);
+
+	if (!retry) {
+		DRM_ERROR("Failed to enable pipe\n");
+		err = -EAGAIN;
+		goto power_on_err;
+	}
+
+	/*
+	 * Enable TE to trigger "write_mem_start" issuing
+	 * in non-normal boot modes.
+	 */
+	mdfld_enable_te(dev, dsi_config->pipe);
+	return err;
+
+power_on_err:
+	power_island_put(power_island);
+	return err;
+}
+
+/**
+ * Power on sequence for command mode MIPI panel.
+ * NOTE: do NOT modify this function
+ */
+static int __dbi_panel_power_on(struct mdfld_dsi_config *dsi_config,
+			struct panel_funcs *p_funcs)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *dev;
+	int reset_count = 10;
+	int err = 0;
+	struct mdfld_dsi_pkg_sender *sender
+			= mdfld_dsi_get_pkg_sender(dsi_config);
+	struct mdfld_dbi_dsr_info *dsr_info;
+	struct mdfld_dsi_dbi_output **dbi_outputs;
+	struct mdfld_dsi_dbi_output *dbi_output;
+
+	if (!sender) {
+		DRM_ERROR("pkg sender is NULL\n");
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	mdfld_dsi_dsr_forbid_locked(dsi_config);
+reset_recovery:
+	--reset_count;
+	err = 0;
+	/*after entering dstb mode, need reset*/
+	if (p_funcs && p_funcs->exit_deep_standby)
+		p_funcs->exit_deep_standby(dsi_config);
+
+	if (__dbi_power_on(dsi_config)) {
+		DRM_ERROR("Failed to init display controller!\n");
+		err = -EAGAIN;
+		goto power_on_err;
+	}
+
+	/**
+	 * Different panel may have different ways to have
+	 * drvIC initialized. Support it!
+	 */
+	if (p_funcs && p_funcs->drv_ic_init) {
+		if (p_funcs->drv_ic_init(dsi_config)) {
+			DRM_ERROR("Failed to init dsi controller!\n");
+			err = -EAGAIN;
+			goto power_on_err;
+		}
+	}
+	/* Issue "write_mem_start" DSI command during power on. */
+	dsr_info = dev_priv->dbi_dsr_info;
+	dbi_outputs = dsr_info->dbi_outputs;
+	dbi_output = dsi_config->pipe ? dbi_outputs[1] : dbi_outputs[0];
+
+	if (!IS_ANN(dev))
+		intel_dsi_dbi_update_fb(dbi_output);
+
+	/**
+	 * Different panel may have different ways to have
+	 * panel turned on. Support it!
+	 */
+	if (p_funcs && p_funcs->power_on)
+		if (p_funcs->power_on(dsi_config)) {
+			DRM_ERROR("Failed to power on panel\n");
+			err = -EAGAIN;
+			goto power_on_err;
+		}
+	if (p_funcs && p_funcs->set_brightness)
+		if (p_funcs->set_brightness(dsi_config,
+					ctx->lastbrightnesslevel))
+			DRM_ERROR("Failed to set panel brightness\n");
+
+	if (p_funcs && p_funcs->drv_set_panel_mode)
+		p_funcs->drv_set_panel_mode(dsi_config);
+
+	/*wait for all FIFOs empty*/
+	mdfld_dsi_wait_for_fifos_empty(sender);
+	if (is_dual_dsi(dev)) {
+		sender->work_for_slave_panel = true;
+		mdfld_dsi_wait_for_fifos_empty(sender);
+		sender->work_for_slave_panel = false;
+	}
+
+	if (IS_ANN(dev))
+		intel_dsi_dbi_update_fb(dbi_output);
+
+power_on_err:
+	if (err && reset_count) {
+		DRM_ERROR("Failed to init panel, try  reset it again!\n");
+		goto reset_recovery;
+	}
+	mdfld_dsi_dsr_allow_locked(dsi_config);
+	return err;
+}
+/**
+ * Power off sequence for DBI interface
+*/
+int __dbi_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	int err = 0;
+	u32 power_island = 0;
+	int retry,i;
+	int offset = 0;
+	u32 val;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	ctx->dspcntr    = REG_READ(regs->dspcntr_reg);
+	ctx->pipeconf   = REG_READ(regs->pipeconf_reg);
+
+	ctx->dsparb = REG_READ(DSPARB);
+	ctx->dsparb2 = REG_READ(DSPARB2);
+
+	/*save color_coef (chrome) */
+	for (i = 0; i < 6; i++)
+		ctx->color_coef[i] = REG_READ(regs->color_coef_reg + (i<<2));
+
+	/* save palette (gamma) */
+	for (i = 0; i < 256; i++)
+		ctx->palette[i] = REG_READ(regs->palette_reg + (i<<2));
+
+	/*Disable plane*/
+	val = ctx->dspcntr;
+	REG_WRITE(regs->dspcntr_reg, (val & ~BIT31));
+
+	/*Disable pipe*/
+	/* Don't disable DSR mode. */
+	REG_WRITE(regs->pipeconf_reg, (REG_READ(regs->pipeconf_reg) & ~BIT31));
+	/*wait for pipe disabling,
+	  pipe synchronization plus , only avaiable when
+	  timer generator is working*/
+	if (REG_READ(regs->mipi_reg) & BIT31) {
+		retry = 100000;
+		while (--retry && (REG_READ(regs->pipeconf_reg) & BIT30))
+			udelay(5);
+
+		if (!retry) {
+			DRM_ERROR("Failed to disable pipe\n");
+			if (IS_MOFD(dev)) {
+				/*
+				 * FIXME: turn off the power island directly
+				 * although failed to disable pipe.
+				 */
+				err = 0;
+			} else
+				err = -EAGAIN;
+			goto power_off_err;
+		}
+	}
+	if (!is_dual_dsi(dev)) {
+		/*enter ULPS*/
+		__dbi_enter_ulps_locked(dsi_config, offset);
+	} else {
+		/*Disable MIPI port*/
+		REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT31));
+		/*clear Low power output hold*/
+		REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT16));
+		/*Disable DSI controller*/
+		REG_WRITE(regs->device_ready_reg, (ctx->device_ready & ~BIT0));
+		/*enter ULPS*/
+		__dbi_enter_ulps_locked(dsi_config, offset);
+
+		offset = 0x1000;
+		/*Disable MIPI port*/
+		REG_WRITE(regs->mipi_reg +offset,
+		(REG_READ(regs->mipi_reg + offset) & ~BIT31));
+		/*clear Low power output hold*/
+		REG_WRITE(regs->mipi_reg + offset,
+		(REG_READ(regs->mipi_reg + offset) & ~BIT16));
+
+		offset = 0x800;
+		/*Disable DSI controller*/
+		REG_WRITE(regs->device_ready_reg + offset, (ctx->device_ready & ~BIT0));
+		/*enter ULPS*/
+		__dbi_enter_ulps_locked(dsi_config, offset);
+		offset = 0x0;
+
+	}
+power_off_err:
+
+	power_island = pipe_to_island(dsi_config->pipe);
+
+	if (power_island & (OSPM_DISPLAY_A | OSPM_DISPLAY_C))
+		power_island |= OSPM_DISPLAY_MIO;
+	if (is_dual_dsi(dev))
+		power_island |= OSPM_DISPLAY_C;
+
+	if (!power_island_put(power_island))
+		return -EINVAL;
+
+	return err;
+}
+
+/**
+ * Power off sequence for command mode MIPI panel.
+ * NOTE: do NOT modify this function
+ */
+static int __dbi_panel_power_off(struct mdfld_dsi_config *dsi_config,
+			struct panel_funcs *p_funcs)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	int err = 0;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	mdfld_dsi_dsr_forbid_locked(dsi_config);
+	ctx->lastbrightnesslevel = psb_brightness;
+	if (p_funcs && p_funcs->set_brightness)
+		if (p_funcs->set_brightness(dsi_config, 0))
+			DRM_ERROR("Failed to set panel brightness\n");
+
+	/*wait for two TE, let pending PVR flip complete*/
+	msleep(32);
+
+	/**
+	 * Different panel may have different ways to have
+	 * panel turned off. Support it!
+	 */
+	if (p_funcs && p_funcs->power_off) {
+		if (p_funcs->power_off(dsi_config)) {
+			DRM_ERROR("Failed to power off panel\n");
+			err = -EAGAIN;
+			goto power_off_err;
+		}
+	}
+
+	/*power off dbi interface*/
+	__dbi_power_off(dsi_config);
+
+power_off_err:
+	mdfld_dsi_dsr_allow_locked(dsi_config);
+	return err;
+}
+
+/* generic dbi function */
+static
+int mdfld_generic_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct mdfld_dsi_connector *dsi_connector;
+	struct mdfld_dsi_config *dsi_config;
+	struct panel_funcs *p_funcs;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	int pipe = 0;
+
+	if (!encoder) {
+		DRM_ERROR("Invalid encoder\n");
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_ENTRY("%s\n", (on ? "on" : "off"));
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dbi_output = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	dsi_connector = mdfld_dsi_encoder_get_connector(dsi_encoder);
+	if (!dsi_connector) {
+		DRM_ERROR("dsi_connector is NULL\n");
+		return -EINVAL;
+	}
+
+	p_funcs = dbi_output->p_funcs;
+	dev = encoder->dev;
+	dev_priv = dev->dev_private;
+	pipe = dsi_config->pipe;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	if (dsi_connector->status != connector_status_connected)
+		goto set_power_err;
+
+	if (dbi_output->first_boot && on) {
+		if (dsi_config->dsi_hw_context.panel_on) {
+			/* When using smooth transition,
+			 * wake up ESD detection thread.
+			 */
+			mdfld_dsi_error_detector_wakeup(dsi_connector);
+
+			DRM_INFO("skip panle power setting for first boot!");
+			goto fun_exit;
+		}
+
+		/* power down islands turned on by firmware */
+		power_island_put(OSPM_DISPLAY_A |
+				 OSPM_DISPLAY_C | OSPM_DISPLAY_MIO);
+	}
+
+	switch (on) {
+	case true:
+		/* panel is already on */
+		if (dsi_config->dsi_hw_context.panel_on)
+			goto fun_exit;
+
+		if (__dbi_panel_power_on(dsi_config, p_funcs)) {
+			DRM_ERROR("Faild to turn on panel\n");
+			goto set_power_err;
+		}
+
+		dsi_config->dsi_hw_context.panel_on = 1;
+		dbi_output->dbi_panel_on = 1;
+		mdfld_dsi_error_detector_wakeup(dsi_connector);
+
+		break;
+	case false:
+		if (!dsi_config->dsi_hw_context.panel_on &&
+			!dbi_output->first_boot)
+			goto fun_exit;
+		if (__dbi_panel_power_off(dsi_config, p_funcs)) {
+			DRM_ERROR("Faild to turn off panel\n");
+			goto set_power_err;
+		}
+
+		dsi_config->dsi_hw_context.panel_on = 0;
+		dbi_output->dbi_panel_on = 0;
+		break;
+	default:
+		break;
+	}
+
+fun_exit:
+	mutex_unlock(&dsi_config->context_lock);
+	PSB_DEBUG_ENTRY("successfully\n");
+	return 0;
+
+set_power_err:
+	mutex_unlock(&dsi_config->context_lock);
+	PSB_DEBUG_ENTRY("unsuccessfully!\n");
+	return -EAGAIN;
+}
+
+static
+void mdfld_generic_dsi_dbi_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	mdfld_dsi_set_drain_latency(encoder, adjusted_mode);
+	return;
+}
+
+static
+void mdfld_generic_dsi_dbi_prepare(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_dbi_output *dbi_output =
+		MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
+	dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
+}
+
+static
+void mdfld_generic_dsi_dbi_commit(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder =
+		MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_dbi_output *dbi_output =
+		MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+	struct drm_device *dev = dbi_output->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mdfld_generic_dsi_dbi_set_power(encoder, true);
+
+	dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
+	if (dbi_output->channel_num == 1)
+		dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
+	else
+		dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
+	dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
+
+	dbi_output->first_boot = false;
+}
+
+static
+void mdfld_generic_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct drm_device *dev;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_psb_private *dev_priv;
+	struct panel_funcs *p_funcs;
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return;
+	}
+	dbi_output = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	PSB_DEBUG_ENTRY("%s\n", (mode == DRM_MODE_DPMS_ON ? "on" :
+		DRM_MODE_DPMS_STANDBY == mode ? "standby" : "off"));
+
+	mutex_lock(&dev_priv->dpms_mutex);
+	DCLockMutex();
+
+	p_funcs = dbi_output->p_funcs;
+	if (mode == DRM_MODE_DPMS_ON) {
+		mdfld_generic_dsi_dbi_set_power(encoder, true);
+		DCAttachPipe(dsi_config->pipe);
+		DC_MRFLD_onPowerOn(dsi_config->pipe);
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+		{
+			struct mdfld_dsi_hw_context *ctx =
+				&dsi_config->dsi_hw_context;
+			struct backlight_device bd;
+			bd.props.brightness = ctx->lastbrightnesslevel;
+			psb_set_brightness(&bd);
+		}
+#endif
+	} else if (mode == DRM_MODE_DPMS_STANDBY) {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+		struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+		struct backlight_device bd;
+		ctx->lastbrightnesslevel = psb_get_brightness(&bd);
+		bd.props.brightness = 0;
+		psb_set_brightness(&bd);
+#endif
+
+		/* Make the pending flip request as completed. */
+		DCUnAttachPipe(dsi_config->pipe);
+		DC_MRFLD_onPowerOff(dsi_config->pipe);
+	} else {
+		mdfld_generic_dsi_dbi_set_power(encoder, false);
+
+		drm_handle_vblank(dev, dsi_config->pipe);
+
+		/* Turn off TE interrupt. */
+		drm_vblank_off(dev, dsi_config->pipe);
+
+		/* Make the pending flip request as completed. */
+		DCUnAttachPipe(dsi_config->pipe);
+		DC_MRFLD_onPowerOff(dsi_config->pipe);
+	}
+
+	DCUnLockMutex();
+	mutex_unlock(&dev_priv->dpms_mutex);
+}
+
+static
+void mdfld_generic_dsi_dbi_save(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_device *dev;
+	int pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!encoder)
+		return;
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	dev = dsi_config->dev;
+	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+	DCLockMutex();
+	mdfld_generic_dsi_dbi_set_power(encoder, false);
+
+	drm_handle_vblank(dev, pipe);
+
+	/* Turn off vsync (TE) interrupt. */
+	drm_vblank_off(dev, pipe);
+
+	/* Make the pending flip request as completed. */
+	DCUnAttachPipe(pipe);
+	DC_MRFLD_onPowerOff(pipe);
+	DCUnLockMutex();
+}
+
+static
+void mdfld_generic_dsi_dbi_restore(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_device *dev;
+	int pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!encoder)
+		return;
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	dev = dsi_config->dev;
+	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+	DCLockMutex();
+	mdfld_generic_dsi_dbi_set_power(encoder, true);
+
+	DCAttachPipe(pipe);
+	DC_MRFLD_onPowerOn(pipe);
+	DCUnLockMutex();
+}
+
+static
+bool mdfld_generic_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_dbi_output *dbi_output =
+		MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+	struct drm_display_mode *fixed_mode = dbi_output->panel_fixed_mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (fixed_mode) {
+		adjusted_mode->hdisplay = fixed_mode->hdisplay;
+		adjusted_mode->hsync_start = fixed_mode->hsync_start;
+		adjusted_mode->hsync_end = fixed_mode->hsync_end;
+		adjusted_mode->htotal = fixed_mode->htotal;
+		adjusted_mode->vdisplay = fixed_mode->vdisplay;
+		adjusted_mode->vsync_start = fixed_mode->vsync_start;
+		adjusted_mode->vsync_end = fixed_mode->vsync_end;
+		adjusted_mode->vtotal = fixed_mode->vtotal;
+		adjusted_mode->clock = fixed_mode->clock;
+		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+	}
+
+	return true;
+}
+
+static
+struct drm_encoder_funcs dsi_dbi_generic_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static
+struct drm_encoder_helper_funcs dsi_dbi_generic_encoder_helper_funcs = {
+	.save = mdfld_generic_dsi_dbi_save,
+	.restore = mdfld_generic_dsi_dbi_restore,
+	.dpms = mdfld_generic_dsi_dbi_dpms,
+	.mode_fixup = mdfld_generic_dsi_dbi_mode_fixup,
+	.prepare = mdfld_generic_dsi_dbi_prepare,
+	.mode_set = mdfld_generic_dsi_dbi_mode_set,
+	.commit = mdfld_generic_dsi_dbi_commit,
+};
+
+/*
+ * Init DSI DBI encoder.
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DBI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
+		struct mdfld_dsi_connector *dsi_connector,
+		struct panel_funcs *p_funcs)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_dbi_output *dbi_output = NULL;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_connector *connector = NULL;
+	struct drm_encoder *encoder = NULL;
+	struct drm_display_mode *fixed_mode = NULL;
+	struct psb_gtt *pg = dev_priv ? (dev_priv->pg) : NULL;
+
+#ifdef CONFIG_MDFLD_DSI_DPU
+	struct mdfld_dbi_dpu_info *dpu_info =
+		dev_priv ? (dev_priv->dbi_dpu_info) : NULL;
+#else
+	struct mdfld_dbi_dsr_info *dsr_info =
+		dev_priv ? (dev_priv->dbi_dsr_info) : NULL;
+#endif
+	int pipe;
+	int ret;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!pg || !dsi_connector || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return NULL;
+	}
+
+	dsi_config = mdfld_dsi_get_config(dsi_connector);
+	pipe = dsi_connector->pipe;
+
+	if (p_funcs && p_funcs->reset)
+			p_funcs->reset(dsi_config);
+
+	/*detect panel connection stauts*/
+	if (p_funcs->detect) {
+		ret = p_funcs->detect(dsi_config);
+		if (ret) {
+			DRM_INFO("Fail to detect Panel on pipe %d\n", pipe);
+			dsi_connector->status =
+				connector_status_disconnected;
+		} else {
+			DRM_INFO("Panel on pipe %d is connected\n", pipe);
+			dsi_connector->status =
+				connector_status_connected;
+		}
+	} else {
+		/*use the default config*/
+		if (pipe == 0)
+			dsi_connector->status =
+				connector_status_connected;
+		else
+			dsi_connector->status =
+				connector_status_disconnected;
+	}
+
+	/*init DSI controller*/
+	if (p_funcs->dsi_controller_init)
+		p_funcs->dsi_controller_init(dsi_config);
+
+	if (dsi_connector->status == connector_status_connected) {
+		if (pipe == 0)
+			dev_priv->panel_desc |= DISPLAY_A;
+		if (pipe == 2)
+			dev_priv->panel_desc |= DISPLAY_C;
+	}
+
+	/* TODO: get panel info from DDB */
+	dbi_output = kzalloc(sizeof(struct mdfld_dsi_dbi_output), GFP_KERNEL);
+	if (!dbi_output) {
+		DRM_ERROR("No memory\n");
+		return NULL;
+	}
+
+	if (dsi_connector->pipe == 0) {
+		dbi_output->channel_num = 0;
+		dev_priv->dbi_output = dbi_output;
+	} else if (dsi_connector->pipe == 2) {
+		dbi_output->channel_num = 1;
+		dev_priv->dbi_output2 = dbi_output;
+	} else {
+		DRM_ERROR("only support 2 DSI outputs\n");
+		goto out_err1;
+	}
+
+	dbi_output->dev = dev;
+	dbi_output->p_funcs = p_funcs;
+
+	/*get fixed mode*/
+	fixed_mode = dsi_config->fixed_mode;
+
+	dbi_output->panel_fixed_mode = fixed_mode;
+
+	/*create drm encoder object*/
+	connector = &dsi_connector->base.base;
+	encoder = &dbi_output->base.base;
+	drm_encoder_init(dev,
+			encoder,
+			&dsi_dbi_generic_encoder_funcs,
+			DRM_MODE_ENCODER_DSI);
+	drm_encoder_helper_add(encoder,
+			&dsi_dbi_generic_encoder_helper_funcs);
+
+	/*attach to given connector*/
+	drm_mode_connector_attach_encoder(connector, encoder);
+	connector->encoder = encoder;
+
+	/*set possible crtcs and clones*/
+	if (dsi_connector->pipe) {
+		encoder->possible_crtcs = (1 << 2);
+		encoder->possible_clones = (1 << 1);
+	} else {
+		encoder->possible_crtcs = (1 << 0);
+		encoder->possible_clones = (1 << 0);
+	}
+
+	dev_priv->dsr_fb_update = 0;
+	dev_priv->b_dsr_enable = false;
+	dev_priv->exit_idle = mdfld_dsi_dbi_exit_dsr;
+	dev_priv->b_async_flip_enable = false;
+
+#if defined(CONFIG_MDFLD_DSI_DPU) || defined(CONFIG_MDFLD_DSI_DSR)
+	dev_priv->b_dsr_enable_config = true;
+#endif /*CONFIG_MDFLD_DSI_DSR*/
+
+	dbi_output->first_boot = true;
+	dbi_output->mode_flags = MODE_SETTING_IN_ENCODER;
+
+#ifdef CONFIG_MDFLD_DSI_DPU
+	/*add this output to dpu_info*/
+
+	if (dsi_connector->status == connector_status_connected) {
+		if (dsi_connector->pipe == 0)
+			dpu_info->dbi_outputs[0] = dbi_output;
+		else
+			dpu_info->dbi_outputs[1] = dbi_output;
+
+		dpu_info->dbi_output_num++;
+	}
+
+#else /*CONFIG_MDFLD_DSI_DPU*/
+	if (dsi_connector->status == connector_status_connected) {
+		/*add this output to dsr_info*/
+		if (dsi_connector->pipe == 0)
+			dsr_info->dbi_outputs[0] = dbi_output;
+		else
+			dsr_info->dbi_outputs[1] = dbi_output;
+
+		dsr_info->dbi_output_num++;
+	}
+#endif
+
+	PSB_DEBUG_ENTRY("successfully\n");
+
+	return &dbi_output->base;
+
+out_err1:
+	kfree(dbi_output);
+
+	return NULL;
+}
+
+
+void mdfld_reset_panel_handler_work(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(work, struct drm_psb_private, reset_panel_work);
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_dbi_output *dbi_output = NULL;
+	struct panel_funcs *p_funcs  = NULL;
+	struct drm_device *dev;
+
+	dbi_output = dev_priv->dbi_output;
+	dsi_config = dev_priv->dsi_configs[0];
+
+	if (!dsi_config || !dbi_output)
+		return;
+	dev = dsi_config->dev;
+
+	/*disable ESD when HDMI connected*/
+	if (hdmi_state)
+		return;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs = dbi_output->p_funcs;
+	if (p_funcs) {
+		mutex_lock(&dsi_config->context_lock);
+
+		if (!dsi_config->dsi_hw_context.panel_on) {
+			DRM_INFO("don't reset panel when panel is off\n");
+			mutex_unlock(&dsi_config->context_lock);
+			return;
+		}
+
+		DRM_INFO("Starts ESD panel reset\n");
+		/*
+		 * since panel is in abnormal state,
+		 * we do a power off/on first
+		 */
+		power_island_put(OSPM_DISPLAY_A |
+				 OSPM_DISPLAY_C |
+				 OSPM_DISPLAY_MIO);
+		power_island_get(OSPM_DISPLAY_A |
+				 OSPM_DISPLAY_C |
+				 OSPM_DISPLAY_MIO);
+
+		if (__dbi_panel_power_off(dsi_config, p_funcs))
+			DRM_INFO("failed to power off dbi panel\n");
+
+		if (get_panel_type(dev, 0) == JDI_7x12_CMD)
+			if (p_funcs && p_funcs->reset)
+				p_funcs->reset(dsi_config);
+
+		if (__dbi_panel_power_on(dsi_config, p_funcs)) {
+			DRM_ERROR("failed to power on dbi panel\n");
+			mutex_unlock(&dsi_config->context_lock);
+			return;
+		}
+
+		mutex_unlock(&dsi_config->context_lock);
+
+		/*recover pipestat in case island once been poweroff and pipestat was reset to default*/
+		mdfld_recover_te(dev, dsi_config->pipe);
+
+		DRM_INFO("%s: End panel reset\n", __func__);
+	} else {
+		DRM_INFO("%s invalid panel init\n", __func__);
+	}
+}
+
+/*
+ * SV's suggestion, dbi_bw_ctrl is calculated by following formular:
+ * (16*252+8)/lane_count in spec, and sv uses (16*256)/lane_count
+ */
+uint32_t calculate_dbi_bw_ctrl(const uint32_t lane_count)
+{
+	return (16*256)/lane_count;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi.h
new file mode 100644
index 0000000..e4074f0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DBI_H__
+#define __MDFLD_DSI_DBI_H__
+
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+
+/*
+ * DBI encoder which inherits from mdfld_dsi_encoder 
+ */
+struct mdfld_dsi_dbi_output {
+	struct mdfld_dsi_encoder base;
+	struct drm_display_mode *panel_fixed_mode;
+
+	u8 last_cmd;
+	u8 lane_count;
+	u8 channel_num;
+
+	struct drm_device *dev;
+
+	/*DSR*/
+	u32 dsr_idle_count;
+	bool dsr_fb_update_done;
+
+	/*mode setting flags*/
+	u32 mode_flags;
+
+	/*panel status*/
+	bool dbi_panel_on;
+	bool first_boot;
+	struct panel_funcs *p_funcs;
+};
+
+struct mdfld_dbi_dsr_info {
+	int dbi_output_num;
+	struct mdfld_dsi_dbi_output *dbi_outputs[2];
+
+	spinlock_t dsr_timer_lock;
+	struct timer_list dsr_timer;
+	u32 dsr_idle_count;
+};
+
+#define MDFLD_DSI_DBI_OUTPUT(dsi_encoder) \
+	container_of(dsi_encoder, struct mdfld_dsi_dbi_output, base)
+
+#define DBI_CB_TIMEOUT_COUNT	0xffff
+
+/*DCS commands*/
+#define enter_sleep_mode	0x10
+#define exit_sleep_mode		0x11
+#define set_display_off		0x28
+#define	set_dispaly_on		0x29
+#define set_column_address	0x2a
+#define set_page_addr		0x2b
+#define write_mem_start		0x2c
+
+/*offsets*/
+#define CMD_MEM_ADDR_OFFSET	0
+
+#define CMD_DATA_SRC_SYSTEM_MEM	0
+#define CMD_DATA_SRC_PIPE	1
+
+/*export functions*/
+extern void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev,
+		u32 update_src,
+		void *p_surfaceAddr,
+		bool check_hw_on_only);
+extern void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+		int pipe);
+extern int mdfld_dbi_dsr_init(struct drm_device *dev);
+extern struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
+		struct mdfld_dsi_connector *dsi_connector,
+		struct panel_funcs *p_funcs);
+extern void mdfld_reset_panel_handler_work(struct work_struct *work);
+extern void mdfld_dbi_update_panel(struct drm_device *dev, int pipe);
+extern int __dbi_power_on(struct mdfld_dsi_config *dsi_config);
+extern int __dbi_power_off(struct mdfld_dsi_config *dsi_config);
+uint32_t calculate_dbi_bw_ctrl(const uint32_t lane_count);
+#endif /*__MDFLD_DSI_DBI_H__*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dpu.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dpu.c
new file mode 100644
index 0000000..a4bf9a1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dpu.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_dbi.h"
+#include "psb_intel_display.h"
+
+/**
+ * NOTE: all mdlfd_x_damage funcs should be called by holding dpu_update_lock
+ */
+static int mdfld_cursor_damage(struct mdfld_dbi_dpu_info *dpu_info,
+		mdfld_plane_t plane,
+		struct psb_drm_dpu_rect *damaged_rect)
+{
+	int x, y;
+	int new_x, new_y;
+	struct psb_drm_dpu_rect *rect;
+	struct psb_drm_dpu_rect *pipe_rect;
+	int cursor_size;
+	struct mdfld_cursor_info *cursor;
+	mdfld_plane_t fb_plane;
+
+	if (plane == MDFLD_CURSORA) {
+		cursor = &dpu_info->cursors[0];
+		x = dpu_info->cursors[0].x;
+		y = dpu_info->cursors[0].y;
+		cursor_size = dpu_info->cursors[0].size;
+		pipe_rect = &dpu_info->damage_pipea;
+		fb_plane = MDFLD_PLANEA;
+	} else {
+		cursor = &dpu_info->cursors[1];
+		x = dpu_info->cursors[1].x;
+		y = dpu_info->cursors[1].y;
+		cursor_size = dpu_info->cursors[1].size;
+		pipe_rect = &dpu_info->damage_pipec;
+		fb_plane = MDFLD_PLANEC;
+	}
+	new_x = damaged_rect->x;
+	new_y = damaged_rect->y;
+
+	if ((x == new_x) && (y == new_y))
+		return 0;
+
+	rect = &dpu_info->damaged_rects[plane];
+
+	/*move to right*/
+	if (new_x >= x) {
+		if (new_y > y) {
+			rect->x = x;
+			rect->y = y;
+			rect->width = (new_x + cursor_size) - x;
+			rect->height = (new_y + cursor_size) - y;
+			goto cursor_out;
+		} else {
+			rect->x = x;
+			rect->y = new_y;
+			rect->width = (new_x + cursor_size) - x;
+			rect->height = (y - new_y);
+			goto cursor_out;
+		}
+	} else {
+		if (new_y > y) {
+			rect->x = new_x;
+			rect->y = y;
+			rect->width = (x + cursor_size) - new_x;
+			rect->height = new_y - y;
+			goto cursor_out;
+		} else {
+			rect->x = new_x;
+			rect->y = new_y;
+			rect->width = (x + cursor_size) - new_x;
+			rect->height = (y + cursor_size) - new_y;
+		}
+	}
+
+cursor_out:
+	if (new_x < 0)
+		cursor->x = 0;
+	else if (new_x > 864)
+		cursor->x = 864;
+	else
+		cursor->x = new_x;
+
+	if (new_y < 0)
+		cursor->y = 0;
+	else if (new_y > 480)
+		cursor->y = 480;
+	else
+		cursor->y = new_y;
+
+	/**
+	 * FIXME: this is a workaround for cursor plane update, remove it later!
+	 */
+	rect->x = 0;
+	rect->y = 0;
+	rect->width = 864;
+	rect->height = 480;
+
+	mdfld_check_boundary(dpu_info, rect);
+
+	mdfld_dpu_region_extent(pipe_rect, rect);
+
+	/*update pending status of dpu_info*/
+	dpu_info->pending |= (1 << plane);
+
+	/*update fb panel as well*/
+	dpu_info->pending |= (1 << fb_plane);
+
+	return 0;
+}
+
+static int mdfld_fb_damage(struct mdfld_dbi_dpu_info *dpu_info,
+		mdfld_plane_t plane,
+		struct psb_drm_dpu_rect *damaged_rect)
+{
+	struct psb_drm_dpu_rect *rect;
+
+	if (plane == MDFLD_PLANEA)
+		rect = &dpu_info->damage_pipea;
+	else
+		rect = &dpu_info->damage_pipec;
+
+	mdfld_check_boundary(dpu_info, damaged_rect);
+
+	/*add fb damage area to this pipe*/
+	mdfld_dpu_region_extent(rect, damaged_rect);
+
+	/*update pending status of dpu_info*/
+	dpu_info->pending |= (1 << plane);
+	return 0;
+}
+
+/*do nothing here, right now*/
+static int mdfld_overlay_damage(struct mdfld_dbi_dpu_info *dpu_info,
+		mdfld_plane_t plane,
+		struct psb_drm_dpu_rect *damaged_rect)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	return 0;
+}
+
+int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
+		mdfld_plane_t plane,
+		struct psb_drm_dpu_rect *rect)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+	int ret = 0;
+
+	/*request lock*/
+	spin_lock(&dpu_info->dpu_update_lock);
+
+	switch (plane) {
+	case MDFLD_PLANEA:
+	case MDFLD_PLANEC:
+		mdfld_fb_damage(dpu_info, plane, rect);
+		break;
+	case MDFLD_CURSORA:
+	case MDFLD_CURSORC:
+		mdfld_cursor_damage(dpu_info, plane, rect);
+		break;
+	case MDFLD_OVERLAYA:
+	case MDFLD_OVERLAYC:
+		mdfld_overlay_damage(dpu_info, plane, rect);
+		break;
+	default:
+		DRM_ERROR("Invalid plane type %d\n", plane);
+		ret = -EINVAL;
+	}
+
+	spin_unlock(&dpu_info->dpu_update_lock);
+	return ret;
+}
+
+int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dbi_dpu_info *dpu_info;
+	struct mdfld_dsi_config *dsi_config;
+	struct psb_drm_dpu_rect rect;
+	int i;
+
+	if (!dev) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	dev_priv = dev->dev_private;
+	dpu_info = dev_priv ? dev_priv->dbi_dpu_info : NULL;
+
+	if (!dpu_info) {
+		DRM_ERROR("No dpu info found\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+		dsi_config = dev_priv->dsi_configs[i];
+		if (dsi_config) {
+			rect.x = rect.y = 0;
+			rect.width = dsi_config->fixed_mode->hdisplay;
+			rect.height = dsi_config->fixed_mode->vdisplay;
+			mdfld_dbi_dpu_report_damage(dev,
+					i ? (MDFLD_PLANEC) : (MDFLD_PLANEA),
+					&rect);
+
+		}
+	}
+
+	/*exit DSR state*/
+	mdfld_dpu_exit_dsr(dev);
+
+	return 0;
+}
+
+int mdfld_dsi_dbi_dsr_off(struct drm_device *dev, struct psb_drm_dpu_rect *rect)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+	mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, rect);
+
+	/*if dual display mode*/
+	if (dpu_info->dbi_output_num == 2)
+		mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, rect);
+
+	/*force dsi to exit DSR mode*/
+	mdfld_dpu_exit_dsr(dev);
+
+	return 0;
+}
+
+static void mdfld_dpu_cursor_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+		mdfld_plane_t plane)
+{
+	struct drm_device *dev = dpu_info->dev;
+	u32 curpos_reg = CURAPOS;
+	u32 curbase_reg = CURABASE;
+	u32 curcntr_reg = CURACNTR;
+	struct mdfld_cursor_info *cursor = &dpu_info->cursors[0];
+
+	if (plane == MDFLD_CURSORC) {
+		curpos_reg = CURCPOS;
+		curbase_reg = CURCBASE;
+		curcntr_reg = CURCCNTR;
+		cursor = &dpu_info->cursors[1];
+	}
+
+	REG_WRITE(curcntr_reg, REG_READ(curcntr_reg));
+	REG_WRITE(curpos_reg,
+			(((cursor->x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) |
+			 ((cursor->y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT)));
+	REG_WRITE(curbase_reg, REG_READ(curbase_reg));
+}
+
+static void mdfld_dpu_fb_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+		mdfld_plane_t plane)
+{
+	u32 pipesrc_reg = PIPEASRC;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspoff_reg = DSPALINOFF;
+	u32 dspsurf_reg = DSPASURF;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 stride;
+	struct psb_drm_dpu_rect *rect = &dpu_info->damage_pipea;
+	struct drm_device *dev = dpu_info->dev;
+
+	if (plane == MDFLD_PLANEC) {
+		pipesrc_reg = PIPECSRC;
+		dspsize_reg = DSPCSIZE;
+		dspoff_reg = DSPCLINOFF;
+		dspsurf_reg = DSPCSURF;
+		dspstride_reg = DSPCSTRIDE;
+
+		rect = &dpu_info->damage_pipec;
+	}
+
+	stride = REG_READ(dspstride_reg);
+	/*FIXME: should I do the pipe src update here?*/
+	REG_WRITE(pipesrc_reg, ((rect->width - 1) << 16) | (rect->height - 1));
+	/*flush plane*/
+	REG_WRITE(dspsize_reg, ((rect->height - 1) << 16) | (rect->width - 1));
+	REG_WRITE(dspoff_reg, ((rect->x * 4) + (rect->y * stride)));
+	REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+
+	/**
+	 * TODO: wait for flip finished and restore the pipesrc reg, 
+	 * or cursor will be show at a wrong position
+	 */
+}
+
+static void mdfld_dpu_overlay_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+		mdfld_plane_t plane)
+{
+	PSB_DEBUG_ENTRY("\n");
+}
+
+/**
+ * TODO: we are still in dbi normal mode now, will try to use partial mode
+ * later.
+ */
+static int mdfld_dbi_prepare_cb(struct mdfld_dsi_dbi_output *dbi_output,
+		struct mdfld_dbi_dpu_info *dpu_info, int pipe)
+{
+	u8 *cb_addr = (u8 *)dbi_output->dbi_cb_addr;
+	u32 *index;
+	struct psb_drm_dpu_rect *rect =
+		pipe ? (&dpu_info->damage_pipec) : (&dpu_info->damage_pipea);
+
+	/*
+	 * FIXME: lock command buffer, this may lead to a dead lock, we've
+	 * already hold the dpu_update_lock
+	 */
+	if (!spin_trylock(&dbi_output->cb_lock)) {
+		DRM_ERROR("lock command buffer failed, try again\n");
+		return -EAGAIN;
+	}
+
+	index = &dbi_output->cb_write;
+
+	if (*index) {
+		DRM_ERROR("DBI command buffer unclean\n");
+		return -EAGAIN;
+	}
+
+	/*column address*/
+	*(cb_addr + ((*index)++)) = set_column_address;
+	*(cb_addr + ((*index)++)) = rect->x >> 8;
+	*(cb_addr + ((*index)++)) = rect->x;
+	*(cb_addr + ((*index)++)) = (rect->x + rect->width - 1) >> 8;
+	*(cb_addr + ((*index)++)) = (rect->x + rect->width - 1);
+
+	*index = 8;
+
+	/*page address*/
+	*(cb_addr + ((*index)++)) = set_page_addr;
+	*(cb_addr + ((*index)++)) = rect->y >> 8;
+	*(cb_addr + ((*index)++)) = rect->y;
+	*(cb_addr + ((*index)++)) = (rect->y + rect->height - 1) >> 8;
+	*(cb_addr + ((*index)++)) = (rect->y + rect->height - 1);
+
+	*index = 16;
+
+	/*write memory*/
+	*(cb_addr + ((*index)++)) = write_mem_start;
+
+	return 0;
+}
+
+static int mdfld_dbi_flush_cb(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
+{
+	u32 cmd_phy = dbi_output->dbi_cb_phy;
+	u32 *index = &dbi_output->cb_write;
+	int reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+	struct drm_device *dev = dbi_output->dev;
+
+	if (*index == 0 || !dbi_output)
+		return 0;
+
+	REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
+	REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | BIT0 | BIT1);
+
+	*index = 0;
+
+	/*FIXME: unlock command buffer*/
+	spin_unlock(&dbi_output->cb_lock);
+
+	return 0;
+}
+
+static int mdfld_dpu_update_pipe(struct mdfld_dsi_dbi_output *dbi_output,
+		struct mdfld_dbi_dpu_info *dpu_info, int pipe)
+{
+	struct drm_device *dev = dbi_output->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	mdfld_plane_t cursor_plane = MDFLD_CURSORA;
+	mdfld_plane_t fb_plane = MDFLD_PLANEA;
+	mdfld_plane_t overlay_plane = MDFLD_OVERLAYA;
+	int ret = 0;
+	u32 plane_mask = MDFLD_PIPEA_PLANE_MASK;
+
+	/*damaged rects on this pipe*/
+	if (pipe) {
+		cursor_plane = MDFLD_CURSORC;
+		fb_plane = MDFLD_PLANEC;
+		overlay_plane = MDFLD_OVERLAYC;
+		plane_mask = MDFLD_PIPEC_PLANE_MASK;
+	}
+
+	/*update cursor which assigned to @pipe*/
+	if (dpu_info->pending & (1 << cursor_plane))
+		mdfld_dpu_cursor_plane_flush(dpu_info, cursor_plane);
+
+	/*update fb which assigned to @pipe*/
+	if (dpu_info->pending & (1 << fb_plane))
+		mdfld_dpu_fb_plane_flush(dpu_info, fb_plane);
+
+	/*TODO: update overlay*/
+	if (dpu_info->pending & (1 << overlay_plane))
+		mdfld_dpu_overlay_plane_flush(dpu_info, overlay_plane);
+
+	/*flush damage area to panel fb*/
+	if (dpu_info->pending & plane_mask) {
+		ret = mdfld_dbi_prepare_cb(dbi_output, dpu_info, pipe);
+
+		/**
+		 * TODO: remove b_dsr_enable later, 
+		 * added it so that text console could boot smoothly
+		 */
+		/*clean pending flags on this pipe*/
+		if (!ret && dev_priv->b_dsr_enable) {
+			dpu_info->pending &= ~plane_mask;
+
+			/*reset overlay pipe damage rect*/
+			mdfld_dpu_init_damage(dpu_info, pipe);
+		}
+	}
+
+	return ret;
+}
+
+static int mdfld_dpu_update_fb(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct psb_intel_crtc *psb_crtc;
+	struct mdfld_dsi_dbi_output **dbi_output;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+	bool pipe_updated[2];
+	unsigned long irq_flags;
+	u32 dpll_reg = MRST_DPLL_A;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_state_reg = MIPIA_INTR_STAT_REG;
+	u32 reg_offset = 0;
+	int pipe;
+	int i;
+	int ret;
+
+	dbi_output = dpu_info->dbi_outputs;
+	pipe_updated[0] = pipe_updated[1] = false;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
+		return -EAGAIN;
+
+	/*try to prevent any new damage reports*/
+	if (!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags))
+		return -EAGAIN;
+
+	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+		crtc = dbi_output[i]->base.base.crtc;
+		psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
+
+		pipe = dbi_output[i]->channel_num ? 2 : 0;
+
+		if (pipe == 2) {
+			dspcntr_reg = DSPCCNTR;
+			pipeconf_reg = PIPECCONF;
+			dsplinoff_reg = DSPCLINOFF;
+			dspsurf_reg = DSPCSURF;
+
+			reg_offset = MIPIC_REG_OFFSET;
+		}
+
+		if (!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset)) &
+					BIT27) ||
+				!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
+				!(REG_READ(dspcntr_reg) &
+					DISPLAY_PLANE_ENABLE) ||
+				!(REG_READ(pipeconf_reg) &
+					DISPLAY_PLANE_ENABLE)) {
+			PSB_DEBUG_ENTRY("DBI FIFO is busy, DSI %d state %x\n",
+					pipe,
+					REG_READ(mipi_state_reg + reg_offset));
+			continue;
+		}
+
+		/*
+		 * If dbi output is in a exclusive state, pipe change won't be
+		 * updated.
+		 */
+		if (dbi_output[i]->dbi_panel_on &&
+				!(dbi_output[i]->mode_flags &
+					MODE_SETTING_ON_GOING) &&
+				!(psb_crtc && psb_crtc->mode_flags &
+					MODE_SETTING_ON_GOING) &&
+				!(dbi_output[i]->mode_flags &
+					MODE_SETTING_IN_DSR)) {
+			ret = mdfld_dpu_update_pipe(dbi_output[i], dpu_info,
+					dbi_output[i]->channel_num ? 2 : 0);
+			if (!ret)
+				pipe_updated[i] = true;
+		}
+	}
+
+	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+		if (pipe_updated[i])
+			mdfld_dbi_flush_cb(dbi_output[i],
+					dbi_output[i]->channel_num ? 2 : 0);
+	}
+
+	spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return 0;
+}
+
+static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+		int pipe)
+{
+	struct drm_device *dev = dbi_output->dev;
+	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+	struct psb_intel_crtc *psb_crtc =
+		(crtc) ? to_psb_intel_crtc(crtc) : NULL;
+	u32 reg_val;
+	u32 dpll_reg = MRST_DPLL_A;
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspbase_reg = DSPABASE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 reg_offset = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dbi_output)
+		return 0;
+
+	/*if mode setting on-going, back off*/
+	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+			(psb_crtc &&
+			 psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+		return -EAGAIN;
+
+	if (pipe == 2) {
+		dpll_reg = MRST_DPLL_A;
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspbase_reg = MDFLD_DSPCBASE;
+		dspsurf_reg = DSPCSURF;
+
+		reg_offset = MIPIC_REG_OFFSET;
+	}
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true))
+		return -EAGAIN;
+
+	/*enable DPLL*/
+	reg_val = REG_READ(dpll_reg);
+	if (!(reg_val & DPLL_VCO_ENABLE)) {
+
+		if (reg_val & MDFLD_PWR_GATE_EN) {
+			reg_val &= ~MDFLD_PWR_GATE_EN;
+			REG_WRITE(dpll_reg, reg_val);
+			REG_READ(dpll_reg);
+			udelay(500);
+		}
+
+		reg_val |= DPLL_VCO_ENABLE;
+		REG_WRITE(dpll_reg, reg_val);
+		REG_READ(dpll_reg);
+		udelay(500);
+
+		/*FIXME: add timeout*/
+		while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
+			;
+	}
+
+	/*enable pipe*/
+	reg_val = REG_READ(pipeconf_reg);
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		reg_val |= PIPEACONF_ENABLE;
+		REG_WRITE(pipeconf_reg, reg_val);
+		REG_READ(pipeconf_reg);
+		udelay(500);
+		intel_wait_for_pipe_enable_disable(dev, pipe, true);
+	}
+
+	/*enable plane*/
+	reg_val = REG_READ(dspcntr_reg);
+	if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+		reg_val |= DISPLAY_PLANE_ENABLE;
+		REG_WRITE(dspcntr_reg, reg_val);
+		REG_READ(dspcntr_reg);
+		udelay(500);
+	}
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	/*clean IN_DSR flag*/
+	dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+
+	return 0;
+}
+
+int mdfld_dpu_exit_dsr(struct drm_device *dev)
+{
+	struct mdfld_dsi_dbi_output **dbi_output;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+	int i;
+	int pipe;
+
+	dbi_output = dpu_info->dbi_outputs;
+
+	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+		/*if this output is not in DSR mode, don't call exit dsr*/
+		if (dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)
+			__mdfld_dbi_exit_dsr(dbi_output[i],
+					dbi_output[i]->channel_num ? 2 : 0);
+	}
+
+	/*start dpu timer*/
+	if (dev_priv->platform_rev_id == MDFLD_PNW_A0)
+		mdfld_dbi_dpu_timer_start(dpu_info);
+
+	return 0;
+}
+
+static int mdfld_dpu_enter_dsr(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+	struct mdfld_dsi_dbi_output **dbi_output;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dbi_output = dpu_info->dbi_outputs;
+
+	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+		/*if output is off or already in DSR state, don't enter again*/
+		if (dbi_output[i]->dbi_panel_on &&
+				!(dbi_output[i]->mode_flags &
+					MODE_SETTING_IN_DSR))
+			mdfld_dsi_dbi_enter_dsr(dbi_output[i],
+					dbi_output[i]->channel_num ? 2 : 0);
+	}
+
+	return 0;
+}
+
+static void mdfld_dbi_dpu_timer_func(unsigned long data)
+{
+	struct drm_device *dev = (struct drm_device *)data;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+	struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+	unsigned long flags;
+
+	if (dpu_info->pending) {
+		dpu_info->idle_count = 0;
+
+		/*update panel fb with damaged area*/
+		mdfld_dpu_update_fb(dev);
+	} else
+		dpu_info->idle_count++;
+
+	if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
+		/*enter dsr*/
+		mdfld_dpu_enter_dsr(dev);
+
+		/*stop timer by return*/
+		return;
+	}
+
+	spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+	if (!timer_pending(dpu_timer)) {
+		dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+		add_timer(dpu_timer);
+	}
+	spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+}
+
+void mdfld_dpu_update_panel(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+	if (dpu_info->pending) {
+		dpu_info->idle_count = 0;
+
+		/*update panel fb with damaged area*/
+		mdfld_dpu_update_fb(dev);
+	} else
+		dpu_info->idle_count++;
+
+	if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
+		/*enter dsr*/
+		mdfld_dpu_enter_dsr(dev);
+	}
+}
+
+static int mdfld_dbi_dpu_timer_init(struct drm_device *dev,
+		struct mdfld_dbi_dpu_info *dpu_info)
+{
+	struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+	unsigned long flags;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	spin_lock_init(&dpu_info->dpu_timer_lock);
+	spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+
+	init_timer(dpu_timer);
+
+	dpu_timer->data = (unsigned long)dev;
+	dpu_timer->function = mdfld_dbi_dpu_timer_func;
+	dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+
+	spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+
+	PSB_DEBUG_ENTRY("successfully\n");
+
+	return 0;
+}
+
+void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info)
+{
+	struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+	if (!timer_pending(dpu_timer)) {
+		dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+		add_timer(dpu_timer);
+	}
+	spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+}
+
+int mdfld_dbi_dpu_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+	if (!dpu_info || IS_ERR(dpu_info)) {
+		dpu_info = kzalloc(sizeof(struct mdfld_dbi_dpu_info),
+				GFP_KERNEL);
+		if (!dpu_info) {
+			DRM_ERROR("No memory\n");
+			return -ENOMEM;
+		}
+
+		dev_priv->dbi_dpu_info = dpu_info;
+	}
+
+	dpu_info->dev = dev;
+
+	dpu_info->cursors[0].size = MDFLD_CURSOR_SIZE;
+	dpu_info->cursors[1].size = MDFLD_CURSOR_SIZE;
+
+	/*init dpu_update_lock*/
+	spin_lock_init(&dpu_info->dpu_update_lock);
+
+	/*init dpu refresh timer*/
+	mdfld_dbi_dpu_timer_init(dev, dpu_info);
+
+	/*init pipe damage area*/
+	mdfld_dpu_init_damage(dpu_info, 0);
+	mdfld_dpu_init_damage(dpu_info, 2);
+
+	PSB_DEBUG_ENTRY("successfully\n");
+
+	return 0;
+}
+
+void mdfld_dbi_dpu_exit(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+	if (!dpu_info)
+		return;
+
+	/*delete dpu timer*/
+	del_timer_sync(&dpu_info->dpu_timer);
+
+	/*free dpu info*/
+	kfree(dpu_info);
+
+	dev_priv->dbi_dpu_info = NULL;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dpu.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dpu.h
new file mode 100644
index 0000000..91f0119
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dpu.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DBI_DPU_H__
+#define __MDFLD_DSI_DBI_DPU_H__
+
+#include "mdfld_dsi_dbi.h"
+
+typedef enum {
+	MDFLD_PLANEA,
+	MDFLD_PLANEC,
+	MDFLD_CURSORA,
+	MDFLD_CURSORC,
+	MDFLD_OVERLAYA,
+	MDFLD_OVERLAYC,
+	MDFLD_PLANE_NUM,
+} mdfld_plane_t;
+
+#define MDFLD_PIPEA_PLANE_MASK	0x15
+#define MDFLD_PIPEC_PLANE_MASK	0x2A
+
+struct mdfld_cursor_info {
+	int x, y;
+	int size;
+};
+
+#define MDFLD_CURSOR_SIZE	64
+
+/**
+ * enter DSR mode if screen has no update for 2 frames.
+ * TODO: export this as a configuration variable. 
+ * (or what's the PRD for this?)
+ */
+#define MDFLD_MAX_IDLE_COUNT	2
+
+struct mdfld_dbi_dpu_info {
+	struct drm_device *dev;
+	/*lock*/
+	spinlock_t dpu_update_lock;
+
+	/*cursor postion*/
+	struct mdfld_cursor_info cursors[2];
+
+	/*damaged area for each plane*/
+	struct psb_drm_dpu_rect damaged_rects[MDFLD_PLANE_NUM];
+
+	/*final damaged area*/
+	struct psb_drm_dpu_rect damage_pipea;
+	struct psb_drm_dpu_rect damage_pipec;
+
+	/*pending*/
+	u32 pending;
+
+	/*dpu timer*/
+	struct timer_list dpu_timer;
+	spinlock_t dpu_timer_lock;
+
+	/*dpu idle count*/
+	u32 idle_count;
+
+	/*dsi outputs*/
+	struct mdfld_dsi_dbi_output *dbi_outputs[2];
+	int dbi_output_num;
+};
+
+static inline int mdfld_dpu_region_extent(struct psb_drm_dpu_rect *origin,
+		struct psb_drm_dpu_rect *rect)
+{
+	int x1, y1, x2, y2;
+
+	/*
+	 * PSB_DEBUG_ENTRY("rect (%d, %d, %d, %d)\n", rect->x, rect->y,
+	 * rect->width, rect->height);
+	 */
+
+	x1 = origin->x + origin->width;
+	y1 = origin->y + origin->height;
+
+	x2 = rect->x + rect->width;
+	y2 = rect->y + rect->height;
+
+	origin->x = min(origin->x, rect->x);
+	origin->y = min(origin->y, rect->y);
+	origin->width = max(x1, x2) - origin->x;
+	origin->height = max(y1, y2) - origin->y;
+
+	return 0;
+}
+
+static inline void mdfld_check_boundary(struct mdfld_dbi_dpu_info *dpu_info,
+		struct psb_drm_dpu_rect *rect)
+{
+	if (rect->x < 0)
+		rect->x = 0;
+	if (rect->y < 0)
+		rect->y = 0;
+
+	if ((rect->x + rect->width) > 864)
+		rect->width = 864 - rect->x;
+
+	if ((rect->y + rect->height) > 480)
+		rect->height = 480 - rect->height;
+
+	if (!rect->width)
+		rect->width = 1;
+	if (!rect->height)
+		rect->height = 1;
+}
+
+static inline void mdfld_dpu_init_damage(struct mdfld_dbi_dpu_info *dpu_info,
+		int pipe)
+{
+	struct psb_drm_dpu_rect *rect;
+
+	if (pipe == 0)
+		rect = &dpu_info->damage_pipea;
+	else
+		rect = &dpu_info->damage_pipec;
+
+	rect->x = 864;
+	rect->y = 480;
+	rect->width = -864;
+	rect->height = -480;
+}
+
+extern int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
+		struct psb_drm_dpu_rect *rect);
+extern int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
+		mdfld_plane_t plane,
+		struct psb_drm_dpu_rect *rect);
+extern int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev);
+extern int mdfld_dpu_exit_dsr(struct drm_device *dev);
+extern void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info);
+extern int mdfld_dbi_dpu_init(struct drm_device *dev);
+extern void mdfld_dbi_dpu_exit(struct drm_device *dev);
+extern void mdfld_dpu_update_panel(struct drm_device *dev);
+
+#endif /*__MDFLD_DSI_DBI_DPU_H__*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dsr.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dsr.c
new file mode 100644
index 0000000..a46b6f7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dsr.c
@@ -0,0 +1,512 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dbi_dsr.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+#define DSR_COUNT 15
+
+static int exit_dsr_locked(struct mdfld_dsi_config *dsi_config)
+{
+	int err = 0;
+	struct drm_device *dev;
+	if (!dsi_config)
+		return -EINVAL;
+
+	dev = dsi_config->dev;
+	err =  __dbi_power_on(dsi_config);
+
+	DC_MRFLD_onPowerOn(dsi_config->pipe);
+
+	return err;
+}
+
+static int enter_dsr_locked(struct mdfld_dsi_config *dsi_config, int level)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *dev;
+	struct mdfld_dsi_pkg_sender *sender;
+	int err;
+	pm_message_t state;
+
+	PSB_DEBUG_ENTRY("mdfld_dsi_dsr: enter dsr\n");
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	sender = mdfld_dsi_get_pkg_sender(dsi_config);
+	if (!sender) {
+		DRM_ERROR("Failed to get dsi sender\n");
+		return -EINVAL;
+	}
+
+	if (level < DSR_EXITED) {
+		DRM_ERROR("Why to do this?");
+		return -EINVAL;
+	}
+
+	if (level > DSR_ENTERED_LEVEL0) {
+		/**
+		 * TODO: require OSPM interfaces to tell OSPM module that
+		 * display controller is ready to be power gated.
+		 * OSPM module needs to response this request ASAP.
+		 * NOTE: it makes no sense to have display controller islands
+		 * & pci power gated here directly. OSPM module is the only one
+		 * who can power gate/ungate power islands.
+		 * FIXME: since there's no ospm interfaces for acquiring
+		 * suspending DSI related power islands, we have to call OSPM
+		 * interfaces to power gate display islands and pci right now,
+		 * which should NOT happen in this way!!!
+		 */
+		if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+			OSPM_UHB_FORCE_POWER_ON)) {
+			DRM_ERROR("Failed power on display island\n");
+			return -EINVAL;
+		}
+
+		PSB_DEBUG_ENTRY("mdfld_dsi_dsr: entering DSR level 1\n");
+
+		err = mdfld_dsi_wait_for_fifos_empty(sender);
+		if (err) {
+			DRM_ERROR("mdfld_dsi_dsr: FIFO not empty\n");
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+			return err;
+		}
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+		/*suspend whole PCI host and related islands
+		** if failed at this try, revive te for another chance
+		*/
+		state.event = 0;
+		if (ospm_power_suspend()) {
+			/* Only display island is powered off then
+			 ** need revive the whole TE
+			 */
+			if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND))
+				exit_dsr_locked(dsi_config);
+
+			return -EINVAL;
+		}
+		/*
+		 *suspend pci
+		 *FIXME: should I do it here?
+		 *how about decoder/encoder is working??
+		 *OSPM should check the refcout of each islands before
+		 *actually power off PCI!!!
+		 *need invoke this in the same context, we need deal with
+		 *DSR lock later for suspend PCI may go to sleep!!!
+		 */
+		/*ospm_suspend_pci(dev->pdev);*/
+
+		PSB_DEBUG_ENTRY("mdfld_dsi_dsr: entered\n");
+		return 0;
+	}
+
+	PSB_DEBUG_ENTRY("mdfld_dsi_dsr: entering DSR level 0\n");
+
+	err = mdfld_dsi_wait_for_fifos_empty(sender);
+	if (err) {
+		DRM_ERROR("mdfld_dsi_dsr: FIFO not empty\n");
+		return err;
+	}
+
+	/*
+	 * To set the vblank_enabled to false with drm_vblank_off(), as
+	 * vblank_disable_and_save() would be scheduled late (<= 5s), and it
+	 * would cause drm_vblank_get() fail to turn on vsync interrupt
+	 * immediately.
+	 */
+	drm_vblank_off(dev, dsi_config->pipe);
+
+	DC_MRFLD_onPowerOff(dsi_config->pipe);
+
+	/*turn off dbi interface put in ulps*/
+	__dbi_power_off(dsi_config);
+
+	PSB_DEBUG_ENTRY("entered\n");
+	return 0;
+}
+
+static void dsr_power_off_work(struct work_struct *work)
+{
+	DRM_INFO("mdfld_dsi_dsr: power off work\n");
+}
+
+static void dsr_power_on_work(struct work_struct *work)
+{
+	DRM_INFO("mdfld_dsi_dsr: power on work\n");
+}
+
+int mdfld_dsi_dsr_update_panel_fb(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+	struct mdfld_dsi_pkg_sender *sender;
+	int err = 0;
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	dsr = dsi_config->dsr;
+
+	if (!IS_ANN(dev)) {
+		/*if no dsr attached, return 0*/
+		if (!dsr)
+			return 0;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+		return 0;
+	mutex_lock(&dsi_config->context_lock);
+
+	if (!dsi_config->dsi_hw_context.panel_on) {
+		PSB_DEBUG_ENTRY(
+		"if screen off, update fb is not allowed\n");
+		err = -EINVAL;
+		goto update_fb_out;
+	}
+
+	/*no pending fb updates, go ahead to send out write_mem_start*/
+	PSB_DEBUG_ENTRY("send out write_mem_start\n");
+	sender = mdfld_dsi_get_pkg_sender(dsi_config);
+	if (!sender) {
+		DRM_ERROR("No sender\n");
+		err = -EINVAL;
+		goto update_fb_out;
+	}
+
+	err = mdfld_dsi_send_dcs(sender, write_mem_start,
+				NULL, 0, CMD_DATA_SRC_PIPE,
+				MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("Failed to send write_mem_start");
+		err = -EINVAL;
+		goto update_fb_out;
+	}
+
+	/*clear free count*/
+	dsr->free_count = 0;
+
+update_fb_out:
+	mutex_unlock(&dsi_config->context_lock);
+	return err;
+}
+
+int mdfld_dsi_dsr_report_te(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *dev;
+	int err = 0;
+	int dsr_level;
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	dsr = dsi_config->dsr;
+
+	/*if no dsr attached, return 0*/
+	if (!dsr)
+		return 0;
+
+	/*
+	 * TODO: check HDMI & WIDI connection state here, then setup
+	 * dsr_level accordingly.
+	 */
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	/*
+	 * FIXME: when hdmi connected with no audio output, we still can
+	 * power gate DSI related islands, how to check whether HDMI audio
+	 * is active or not.
+	 * Currently, we simply enter DSR LEVEL0 when HDMI is connected
+	 */
+	dsr_level = DSR_ENTERED_LEVEL0;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	if (!dsr->dsr_enabled)
+		goto report_te_out;
+
+	/*if panel is off, then forget it*/
+	if (!dsi_config->dsi_hw_context.panel_on)
+		goto report_te_out;
+
+	if (dsr_level <= dsr->dsr_state)
+		goto report_te_out;
+	else if (++dsr->free_count > DSR_COUNT && !dsr->ref_count) {
+		/*reset free count*/
+		dsr->free_count = 0;
+		/*enter dsr*/
+		err = enter_dsr_locked(dsi_config, dsr_level);
+		if (err) {
+			PSB_DEBUG_ENTRY("Failed to enter DSR\n");
+			goto report_te_out;
+		}
+		dsr->dsr_state = dsr_level;
+	}
+report_te_out:
+	mutex_unlock(&dsi_config->context_lock);
+	return err;
+}
+
+int mdfld_dsi_dsr_forbid_locked(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+	int err = 0;
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	dsr = dsi_config->dsr;
+
+	/*if no dsr attached, return 0*/
+	if (!dsr) {
+		return 0;
+	}
+	/*exit dsr if necessary*/
+	if (!dsr->dsr_enabled)
+		goto forbid_out;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*if reference count is not 0, it means dsr was forbidden*/
+	if (dsr->ref_count) {
+		dsr->ref_count++;
+		goto forbid_out;
+	}
+
+	/*exited dsr if current dsr state is DSR_ENTERED*/
+	if (dsr->dsr_state > DSR_EXITED) {
+		err = exit_dsr_locked(dsi_config);
+		if (err) {
+			DRM_ERROR("Failed to exit DSR\n");
+			goto forbid_out;
+		}
+		dsr->dsr_state = DSR_EXITED;
+	}
+	dsr->ref_count++;
+forbid_out:
+	return err;
+}
+
+int mdfld_dsi_dsr_forbid(struct mdfld_dsi_config *dsi_config)
+{
+	int err = 0;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	err = mdfld_dsi_dsr_forbid_locked(dsi_config);
+
+	mutex_unlock(&dsi_config->context_lock);
+
+	return err;
+}
+
+int mdfld_dsi_dsr_allow_locked(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	dsr = dsi_config->dsr;
+
+	/*if no dsr attached, return 0*/
+	if (!dsr) {
+		return 0;
+	}
+
+	if (!dsr->dsr_enabled)
+		goto allow_out;
+
+	if (!dsr->ref_count) {
+		DRM_ERROR("Reference count is 0\n");
+		goto allow_out;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dsr->ref_count--;
+allow_out:
+	return 0;
+}
+
+int mdfld_dsi_dsr_allow(struct mdfld_dsi_config *dsi_config)
+{
+	int err = 0;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	err = mdfld_dsi_dsr_allow_locked(dsi_config);
+
+	mutex_unlock(&dsi_config->context_lock);
+
+	return err;
+}
+
+void mdfld_dsi_dsr_enable(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		return;
+	}
+
+	dsr = dsi_config->dsr;
+
+	/*if no dsr attached, return 0*/
+	if (!dsr)
+		return;
+
+	/*lock dsr*/
+	mutex_lock(&dsi_config->context_lock);
+
+	dsr->dsr_enabled = 1;
+	dsr->dsr_state = DSR_EXITED;
+
+	mutex_unlock(&dsi_config->context_lock);
+}
+
+int mdfld_dsi_dsr_in_dsr_locked(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+	int in_dsr = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		goto get_state_out;
+	}
+
+	dsr = dsi_config->dsr;
+
+	/*if no dsr attached, return 0*/
+	if (!dsr)
+		goto get_state_out;
+
+	if (dsr->dsr_state > DSR_EXITED)
+		in_dsr = 1;
+
+get_state_out:
+	return in_dsr;
+}
+
+/**
+ * init dsr structure
+ */
+int mdfld_dsi_dsr_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	/*check panel type*/
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+		DRM_INFO("%s: Video mode panel, disabling DSR\n", __func__);
+		return 0;
+	}
+
+	dsr = kzalloc(sizeof(struct mdfld_dsi_dsr), GFP_KERNEL);
+	if (!dsr) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	/*init reference count*/
+	dsr->ref_count = 0;
+
+	/*init free count*/
+	dsr->free_count = 0;
+
+	/*init dsr enabled*/
+	dsr->dsr_enabled = 0;
+
+	/*set dsr state*/
+	dsr->dsr_state = DSR_INIT;
+
+	/*init power on/off works*/
+	INIT_WORK(&dsr->power_off_work, dsr_power_off_work);
+	INIT_WORK(&dsr->power_on_work, dsr_power_on_work);
+
+	/*init dsi config*/
+	dsr->dsi_config = dsi_config;
+
+	dsi_config->dsr = dsr;
+
+	PSB_DEBUG_ENTRY("successfully\n");
+
+	return 0;
+}
+
+/**
+ * destroy dsr structure
+ */
+void mdfld_dsi_dsr_destroy(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_dsr *dsr;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dsr = dsi_config->dsr;
+
+	if (!dsr)
+		kfree(dsr);
+
+	dsi_config->dsr = 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dsr.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dsr.h
new file mode 100644
index 0000000..a8922d3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dbi_dsr.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DBI_DSR_H__
+#define __MDFLD_DSI_DBI_DSR_H__
+
+#include "mdfld_dsi_output.h"
+
+enum {
+	DSR_INIT = 0,
+	DSR_EXITED,
+	DSR_ENTERED_LEVEL0,
+	DSR_ENTERED_LEVEL1,
+};
+
+/*protected by context_lock in dsi config*/
+struct mdfld_dsi_dsr {
+	/*dsr reference count*/
+	int ref_count;
+
+	int free_count;
+
+	/*dsr enabled*/
+	int dsr_enabled;
+	/*dsr state*/
+	int dsr_state;
+	/*power off work*/
+	struct work_struct power_off_work;
+	/*power on work*/
+	struct work_struct power_on_work;
+
+	/*dsi config*/
+	void *dsi_config;
+};
+
+int mdfld_dsi_dsr_update_panel_fb(struct mdfld_dsi_config *dsi_config);
+int mdfld_dsi_dsr_report_te(struct mdfld_dsi_config *dsi_config);
+
+int mdfld_dsi_dsr_forbid(struct mdfld_dsi_config *dsi_config);
+int mdfld_dsi_dsr_allow(struct mdfld_dsi_config *dsi_config);
+int mdfld_dsi_dsr_forbid_locked(struct mdfld_dsi_config *dsi_config);
+int mdfld_dsi_dsr_allow_locked(struct mdfld_dsi_config *dsi_config);
+
+int mdfld_dsi_dsr_init(struct mdfld_dsi_config *dsi_config);
+void mdfld_dsi_dsr_destroy(struct mdfld_dsi_config *dsi_config);
+
+void mdfld_dsi_dsr_enable(struct mdfld_dsi_config *dsi_config);
+
+int mdfld_dsi_dsr_in_dsr_locked(struct mdfld_dsi_config *dsi_config);
+
+/*FIXME: remove it later*/
+extern void ospm_suspend_display(struct drm_device *dev);
+extern void ospm_suspend_pci(struct pci_dev *pdev);
+
+#endif /*__MDFLD_DSI_DBI_DSR_H__*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dpi.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dpi.c
new file mode 100644
index 0000000..9ae380f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dpi.c
@@ -0,0 +1,1282 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "psb_drv.h"
+#include "mdfld_csc.h"
+#include "psb_irq.h"
+#include "dispmgrnl.h"
+#include "mrfld_clock.h"
+#include "android_hdmi.h"
+#include "otm_hdmi.h"
+
+#define KEEP_UNUSED_CODE 0
+
+static
+u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
+		int num_lane, int bpp)
+{
+	return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
+}
+
+/*
+ * Calculate the dpi time basing on a given drm mode @mode
+ * return 0 on success.
+ * FIXME: I was using proposed mode value for calculation, may need to
+ * use crtc mode values later
+ */
+int mdfld_dsi_dpi_timing_calculation(struct drm_device *dev,
+		struct drm_display_mode *mode,
+		struct mdfld_dsi_dpi_timing *dpi_timing,
+		int num_lane, int bpp)
+{
+	int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
+	int pclk_vsync, pclk_vfp, pclk_vbp, pclk_vactive;
+	if (!mode || !dpi_timing) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_ENTRY("pclk %d, hdisplay %d, hsync_start %d, hsync_end %d," \
+			"htotal %d\n",
+			mode->clock, mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal);
+	PSB_DEBUG_ENTRY("vdisplay %d, vsync_start %d, vsync_end %d," \
+			"vtotal %d\n",
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal);
+
+	pclk_hactive = mode->hdisplay;
+	pclk_hfp = mode->hsync_start - mode->hdisplay;
+	pclk_hsync = mode->hsync_end - mode->hsync_start;
+	pclk_hbp = mode->htotal - mode->hsync_end;
+
+	pclk_vactive = mode->vdisplay;
+	pclk_vfp = mode->vsync_start - mode->vdisplay;
+	pclk_vsync = mode->vsync_end - mode->vsync_start;
+	pclk_vbp = mode->vtotal - mode->vsync_end;
+	/*
+	 * byte clock counts were calculated by following formula
+	 * bclock_count = pclk_count * bpp / num_lane / 8
+	 */
+	if (is_dual_dsi(dev)) {
+		dpi_timing->hsync_count = pclk_hsync;
+		dpi_timing->hbp_count = pclk_hbp;
+		dpi_timing->hfp_count = pclk_hfp;
+		dpi_timing->hactive_count = pclk_hactive / 2;
+		dpi_timing->vsync_count = pclk_vsync;
+		dpi_timing->vbp_count = pclk_vbp;
+		dpi_timing->vfp_count = pclk_vfp;
+	} else {
+		dpi_timing->hsync_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_hsync, num_lane, bpp);
+		dpi_timing->hbp_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_hbp, num_lane, bpp);
+		dpi_timing->hfp_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_hfp, num_lane, bpp);
+		dpi_timing->hactive_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_hactive, num_lane, bpp);
+
+		dpi_timing->vsync_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_vsync, num_lane, bpp);
+		dpi_timing->vbp_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_vbp, num_lane, bpp);
+		dpi_timing->vfp_count =
+			mdfld_dsi_dpi_to_byte_clock_count(pclk_vfp, num_lane, bpp);
+	}
+	PSB_DEBUG_ENTRY("DPI timings: %d, %d, %d, %d, %d, %d, %d\n",
+			dpi_timing->hsync_count, dpi_timing->hbp_count,
+			dpi_timing->hfp_count, dpi_timing->hactive_count,
+			dpi_timing->vsync_count, dpi_timing->vbp_count,
+			dpi_timing->vfp_count);
+
+	return 0;
+}
+
+void mdfld_dsi_dpi_set_color_mode(struct mdfld_dsi_config *dsi_config , bool on)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+	u32  spk_pkg =  (on == true) ? MDFLD_DSI_DPI_SPK_COLOR_MODE_ON :
+		MDFLD_DSI_DPI_SPK_COLOR_MODE_OFF;
+
+
+	PSB_DEBUG_ENTRY("Turn  color mode %s  pkg value= %d...\n",
+			(on ? "on" : "off"), spk_pkg);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return ;
+	}
+
+	/*send turn on/off color mode packet*/
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+			spk_pkg);
+	if (err) {
+		DRM_ERROR("Failed to send turn on packet\n");
+		return ;
+	}
+	PSB_DEBUG_ENTRY("Turn  color mode %s successful.\n",
+			(on ? "on" : "off"));
+	return;
+}
+
+static int __dpi_enter_ulps_locked(struct mdfld_dsi_config *dsi_config, int offset)
+{
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	if (!sender) {
+		DRM_ERROR("pkg sender is NULL\n");
+		return -EINVAL;
+	}
+
+	ctx->device_ready = REG_READ(regs->device_ready_reg + offset);
+
+	if (ctx->device_ready & DSI_POWER_STATE_ULPS_MASK) {
+		DRM_ERROR("Broken ULPS states\n");
+		return -EINVAL;
+	}
+
+	if (offset != 0)
+		sender->work_for_slave_panel = true;
+	/*wait for all FIFOs empty*/
+	mdfld_dsi_wait_for_fifos_empty(sender);
+	sender->work_for_slave_panel = false;
+
+	/*inform DSI host is to be put on ULPS*/
+	ctx->device_ready |= DSI_POWER_STATE_ULPS_ENTER;
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+
+	PSB_DEBUG_ENTRY("entered ULPS state\n");
+	return 0;
+}
+
+static int __dpi_exit_ulps_locked(struct mdfld_dsi_config *dsi_config, int offset)
+{
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+	struct drm_device *dev = dsi_config->dev;
+
+	ctx->device_ready = REG_READ(regs->device_ready_reg + offset);
+
+	/*enter ULPS EXIT state*/
+	ctx->device_ready &= ~DSI_POWER_STATE_ULPS_MASK;
+	ctx->device_ready |= DSI_POWER_STATE_ULPS_EXIT;
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+
+	/*wait for 1ms as spec suggests*/
+	mdelay(1);
+
+	/*clear ULPS state*/
+	ctx->device_ready &= ~DSI_POWER_STATE_ULPS_MASK;
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready);
+
+	PSB_DEBUG_ENTRY("exited ULPS state\n");
+	return 0;
+}
+
+static void __dpi_set_properties(struct mdfld_dsi_config *dsi_config,
+			enum enum_ports port)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev;
+	int offset = 0;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+
+	if (port == PORT_C)
+		offset = 0x800;
+	/*D-PHY parameter*/
+	REG_WRITE(regs->dphy_param_reg + offset, ctx->dphy_param);
+
+	/*Configure DSI controller*/
+	REG_WRITE(regs->mipi_control_reg + offset, ctx->mipi_control);
+	REG_WRITE(regs->intr_en_reg + offset, ctx->intr_en);
+	REG_WRITE(regs->hs_tx_timeout_reg + offset, ctx->hs_tx_timeout);
+	REG_WRITE(regs->lp_rx_timeout_reg + offset, ctx->lp_rx_timeout);
+	REG_WRITE(regs->turn_around_timeout_reg + offset,
+			ctx->turn_around_timeout);
+	REG_WRITE(regs->device_reset_timer_reg + offset,
+			ctx->device_reset_timer);
+	REG_WRITE(regs->high_low_switch_count_reg + offset,
+			ctx->high_low_switch_count);
+	REG_WRITE(regs->init_count_reg + offset, ctx->init_count);
+	REG_WRITE(regs->eot_disable_reg + offset, (REG_READ(regs->eot_disable_reg) & ~DSI_EOT_DISABLE_MASK) | (ctx->eot_disable & DSI_EOT_DISABLE_MASK));
+	REG_WRITE(regs->lp_byteclk_reg + offset, ctx->lp_byteclk);
+	REG_WRITE(regs->clk_lane_switch_time_cnt_reg + offset,
+			ctx->clk_lane_switch_time_cnt);
+	REG_WRITE(regs->video_mode_format_reg + offset, ctx->video_mode_format);
+	REG_WRITE(regs->dsi_func_prg_reg + offset, ctx->dsi_func_prg);
+
+	/*DSI timing*/
+	REG_WRITE(regs->dpi_resolution_reg + offset, ctx->dpi_resolution);
+	REG_WRITE(regs->hsync_count_reg + offset, ctx->hsync_count);
+	REG_WRITE(regs->hbp_count_reg + offset, ctx->hbp_count);
+	REG_WRITE(regs->hfp_count_reg + offset, ctx->hfp_count);
+	REG_WRITE(regs->hactive_count_reg + offset, ctx->hactive_count);
+	REG_WRITE(regs->vsync_count_reg + offset, ctx->vsync_count);
+	REG_WRITE(regs->vbp_count_reg + offset, ctx->vbp_count);
+	REG_WRITE(regs->vfp_count_reg + offset, ctx->vfp_count);
+
+}
+
+static int __dpi_config_port(struct mdfld_dsi_config *dsi_config,
+			struct panel_funcs *p_funcs, enum enum_ports port)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev;
+	int offset = 0;
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+
+	if (port == PORT_C)
+		offset = 0x800;
+
+	/*exit ULPS state*/
+	__dpi_exit_ulps_locked(dsi_config, offset);
+
+	/*Enable DSI Controller*/
+	REG_WRITE(regs->device_ready_reg + offset, ctx->device_ready | BIT0);
+
+	/*set low power output hold*/
+	if (port == PORT_C)
+		offset = 0x1000;
+	REG_WRITE(regs->mipi_reg + offset, (ctx->mipi | BIT16));
+
+	return 0;
+}
+
+static void ann_dc_setup(struct mdfld_dsi_config *dsi_config)
+{
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_hw_registers *regs = &dsi_config->regs;
+	struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+
+
+	DRM_INFO("restore some registers to default value\n");
+
+	power_island_get(OSPM_DISPLAY_B | OSPM_DISPLAY_C);
+
+	REG_WRITE(DSPCLK_GATE_D, 0x0);
+	REG_WRITE(RAMCLK_GATE_D, 0xc0000 | (1 << 11)); // FIXME: delay 1us for RDB done signal
+	REG_WRITE(PFIT_CONTROL, 0x20000000);
+	REG_WRITE(DSPIEDCFGSHDW, 0x0);
+	REG_WRITE(DSPARB2, 0x000A0200);
+	REG_WRITE(DSPARB, 0x18040080);
+	REG_WRITE(DSPFW1, 0x0F0F3F3F);
+	REG_WRITE(DSPFW2, 0x5F2F0F3F);
+	REG_WRITE(DSPFW3, 0x0);
+	REG_WRITE(DSPFW4, 0x07071F1F);
+	REG_WRITE(DSPFW5, 0x2F17071F);
+	REG_WRITE(DSPFW6, 0x00001F3F);
+	REG_WRITE(DSPFW7, 0x1F3F1F3F);
+	REG_WRITE(DSPSRCTRL, 0x00080100);
+	REG_WRITE(DSPCHICKENBIT, 0x20);
+	REG_WRITE(FBDC_CHICKEN, 0x0C0C0C0C);
+	REG_WRITE(CURACNTR, 0x0);
+	REG_WRITE(CURBCNTR, 0x0);
+	REG_WRITE(CURCCNTR, 0x0);
+	REG_WRITE(IEP_OVA_CTRL, 0x0);
+	REG_WRITE(IEP_OVA_CTRL, 0x0);
+
+	REG_WRITE(DSPBCNTR, 0x0);
+	REG_WRITE(DSPCCNTR, 0x0);
+	REG_WRITE(DSPDCNTR, 0x0);
+	REG_WRITE(DSPECNTR, 0x0);
+	REG_WRITE(DSPFCNTR, 0x0);
+	REG_WRITE(GCI_CTRL, REG_READ(GCI_CTRL) | 1);
+
+	power_island_put(OSPM_DISPLAY_B | OSPM_DISPLAY_C);
+
+	DRM_INFO("setup drain latency\n");
+
+	REG_WRITE(regs->ddl1_reg, ctx->ddl1);
+	REG_WRITE(regs->ddl2_reg, ctx->ddl2);
+	REG_WRITE(regs->ddl3_reg, ctx->ddl3);
+	REG_WRITE(regs->ddl4_reg, ctx->ddl4);
+}
+
+/**
+ * Power on sequence for video mode MIPI panel.
+ * NOTE: do NOT modify this function
+ */
+static int __dpi_panel_power_on(struct mdfld_dsi_config *dsi_config,
+		struct panel_funcs *p_funcs, bool first_boot)
+{
+	u32 val = 0;
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *dev;
+	int retry, reset_count = 10;
+	int i;
+	int err = 0;
+	u32 power_island = 0;
+	int offset = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+	power_island = pipe_to_island(dsi_config->pipe);
+	if (power_island & (OSPM_DISPLAY_A | OSPM_DISPLAY_C))
+		power_island |= OSPM_DISPLAY_MIO;
+	if (is_dual_dsi(dev))
+		power_island |= OSPM_DISPLAY_C;
+
+	if (!power_island_get(power_island))
+		return -EAGAIN;
+	if (android_hdmi_is_connected(dev) && first_boot)
+			otm_hdmi_power_islands_on();
+
+reset_recovery:
+	--reset_count;
+	/*HW-Reset*/
+	if (p_funcs && p_funcs->reset)
+		p_funcs->reset(dsi_config);
+
+	/*
+	 * Wait for DSI PLL locked on pipe, and only need to poll status of pipe
+	 * A as both MIPI pipes share the same DSI PLL.
+	 */
+	if (dsi_config->pipe == 0) {
+		retry = 20000;
+		while (!(REG_READ(regs->pipeconf_reg) & PIPECONF_DSIPLL_LOCK) &&
+				--retry)
+			udelay(150);
+		if (!retry) {
+			DRM_ERROR("PLL failed to lock on pipe\n");
+			err = -EAGAIN;
+			goto power_on_err;
+		}
+	}
+
+	if (IS_ANN(dev)) {
+		/* FIXME: reset the DC registers for ANN A0 */
+		ann_dc_setup(dsi_config);
+	}
+
+	__dpi_set_properties(dsi_config, PORT_A);
+
+	/* update 0x650c[0] = 1 to fixed arbitration pattern
+	 * it is found display TLB request be blocked by display plane
+	 * memory requests, never goes out. This causes display controller
+	 * uses stale TLB data to do memory translation, getting wrong
+	 * memory address for data, and causing the flickering issue.
+	 */
+	REG_WRITE(GCI_CTRL, REG_READ(GCI_CTRL) | 1);
+
+	/*Setup pipe timing*/
+	REG_WRITE(regs->htotal_reg, ctx->htotal);
+	REG_WRITE(regs->hblank_reg, ctx->hblank);
+	REG_WRITE(regs->hsync_reg, ctx->hsync);
+	REG_WRITE(regs->vtotal_reg, ctx->vtotal);
+	REG_WRITE(regs->vblank_reg, ctx->vblank);
+	REG_WRITE(regs->vsync_reg, ctx->vsync);
+	REG_WRITE(regs->pipesrc_reg, ctx->pipesrc);
+
+	REG_WRITE(regs->dsppos_reg, ctx->dsppos);
+	REG_WRITE(regs->dspstride_reg, ctx->dspstride);
+
+	/*Setup plane*/
+	REG_WRITE(regs->dspsize_reg, ctx->dspsize);
+	REG_WRITE(regs->dspsurf_reg, ctx->dspsurf);
+	REG_WRITE(regs->dsplinoff_reg, ctx->dsplinoff);
+	REG_WRITE(regs->vgacntr_reg, ctx->vgacntr);
+
+	/*restore color_coef (chrome) */
+	for (i = 0; i < 6; i++)
+		REG_WRITE(regs->color_coef_reg + (i<<2), csc_setting_save[i]);
+
+	/* restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		REG_WRITE(regs->palette_reg + (i<<2), gamma_setting_save[i]);
+
+	/* restore dpst setting */
+	if (dev_priv->psb_dpst_state) {
+		dpstmgr_reg_restore_locked(dev, dsi_config);
+		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+	}
+
+	if (__dpi_config_port(dsi_config, p_funcs, PORT_A) != 0) {
+		if (!reset_count) {
+				err = -EAGAIN;
+				goto power_on_err;
+			}
+			DRM_ERROR("Failed to init dsi controller, reset it!\n");
+			goto reset_recovery;
+	}
+	if (is_dual_dsi(dev)) {
+		__dpi_set_properties(dsi_config, PORT_C);
+		__dpi_config_port(dsi_config, p_funcs, PORT_C);
+	}
+
+	/**
+	 * Different panel may have different ways to have
+	 * drvIC initialized. Support it!
+	 */
+	if (p_funcs && p_funcs->drv_ic_init) {
+		if (p_funcs->drv_ic_init(dsi_config)) {
+			if (!reset_count) {
+				err = -EAGAIN;
+				goto power_on_err;
+			}
+
+			DRM_ERROR("Failed to init dsi controller, reset it!\n");
+			goto reset_recovery;
+		}
+	}
+
+	/*Enable MIPI Port A*/
+	offset = 0x0;
+	REG_WRITE(regs->mipi_reg + offset, (ctx->mipi | BIT31));
+	REG_WRITE(regs->dpi_control_reg + offset, BIT1);
+	if (is_dual_dsi(dev)) {
+		/*Enable MIPI Port C*/
+		offset = 0x1000;
+		REG_WRITE(regs->mipi_reg + offset, (ctx->mipi | BIT31));
+		offset = 0x800;
+		REG_WRITE(regs->dpi_control_reg + offset, BIT1);
+	}
+	/**
+	 * Different panel may have different ways to have
+	 * panel turned on. Support it!
+	 */
+	if (p_funcs && p_funcs->power_on)
+		if (p_funcs->power_on(dsi_config)) {
+			DRM_ERROR("Failed to power on panel\n");
+			err = -EAGAIN;
+			goto power_on_err;
+		}
+
+	if (IS_ANN(dev)) {
+		REG_WRITE(regs->ddl1_reg, ctx->ddl1);
+		REG_WRITE(regs->ddl2_reg, ctx->ddl2);
+		REG_WRITE(regs->ddl3_reg, ctx->ddl3);
+		REG_WRITE(regs->ddl4_reg, ctx->ddl4);
+
+		REG_WRITE(DSPARB2, ctx->dsparb2);
+		REG_WRITE(DSPARB, ctx->dsparb);
+	}
+
+	/*Enable pipe*/
+	val = ctx->pipeconf;
+	val &= ~0x000c0000;
+	/**
+	 * Frame Start occurs on third HBLANK
+	 * after the start of VBLANK
+	 */
+	val |= BIT31 | BIT28;
+	REG_WRITE(regs->pipeconf_reg, val);
+	/*Wait for pipe enabling,when timing generator
+	  is wroking */
+	if (REG_READ(regs->mipi_reg) & BIT31) {
+		retry = 10000;
+		while (--retry && !(REG_READ(regs->pipeconf_reg) & BIT30))
+			udelay(3);
+
+		if (!retry) {
+			DRM_ERROR("Failed to enable pipe\n");
+			err = -EAGAIN;
+			goto power_on_err;
+		}
+	}
+	/*enable plane*/
+	val = ctx->dspcntr | BIT31;
+	REG_WRITE(regs->dspcntr_reg, val);
+
+	if (p_funcs && p_funcs->set_brightness) {
+		if (p_funcs->set_brightness(dsi_config,
+				ctx->lastbrightnesslevel))
+			DRM_ERROR("Failed to set panel brightness\n");
+	} else {
+		DRM_ERROR("Failed to set panel brightness\n");
+	}
+	if (p_funcs && p_funcs->drv_set_panel_mode)
+		p_funcs->drv_set_panel_mode(dsi_config);
+
+	psb_enable_vblank(dev, dsi_config->pipe);
+	return err;
+
+power_on_err:
+	power_island_put(power_island);
+	return err;
+}
+
+/**
+ * Power off sequence for video mode MIPI panel.
+ * NOTE: do NOT modify this function
+ */
+static int __dpi_panel_power_off(struct mdfld_dsi_config *dsi_config,
+		struct panel_funcs *p_funcs)
+{
+	u32 val = 0;
+	u32 tmp = 0;
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	int retry;
+	int i;
+	int err = 0;
+	u32 power_island = 0;
+	int offset = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_config)
+		return -EINVAL;
+
+	regs = &dsi_config->regs;
+	ctx = &dsi_config->dsi_hw_context;
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	ctx->dsparb = REG_READ(DSPARB);
+	ctx->dsparb2 = REG_READ(DSPARB2);
+
+	/* Don't reset brightness to 0.*/
+	ctx->lastbrightnesslevel = psb_brightness;
+
+	tmp = REG_READ(regs->pipeconf_reg);
+        ctx->dspcntr = REG_READ(regs->dspcntr_reg);
+
+	/*save color_coef (chrome) */
+	for (i = 0; i < 6; i++)
+		ctx->color_coef[i] = REG_READ(regs->color_coef_reg + (i<<2));
+
+	/* save palette (gamma) */
+	for (i = 0; i < 256; i++)
+		ctx->palette[i] = REG_READ(regs->palette_reg + (i<<2));
+
+	/*
+	 * Couldn't disable the pipe until DRM_WAIT_ON signaled by last
+	 * vblank event when playing video, otherwise the last vblank event
+	 * will lost when pipe disabled before vblank interrupt coming
+	 * sometimes.
+	 */
+
+	/*Disable panel*/
+	val = ctx->dspcntr;
+	REG_WRITE(regs->dspcntr_reg, (val & ~BIT31));
+	/*Disable overlay & cursor panel assigned to this pipe*/
+	REG_WRITE(regs->pipeconf_reg, (tmp | (0x000c0000)));
+
+	/*Disable pipe*/
+	val = REG_READ(regs->pipeconf_reg);
+	ctx->pipeconf = val;
+	REG_WRITE(regs->pipeconf_reg, (val & ~BIT31));
+
+	/*wait for pipe disabling,
+	  pipe synchronization plus , only avaiable when
+	  timer generator is working*/
+	if (REG_READ(regs->mipi_reg) & BIT31) {
+		retry = 100000;
+		while (--retry && (REG_READ(regs->pipeconf_reg) & BIT30))
+			udelay(5);
+
+		if (!retry) {
+			DRM_ERROR("Failed to disable pipe\n");
+			if (IS_MOFD(dev)) {
+				/*
+				 * FIXME: turn off the power island directly
+				 * although failed to disable pipe.
+				 */
+				err = 0;
+			} else
+				err = -EAGAIN;
+			goto power_off_err;
+		}
+	}
+
+	/**
+	 * Different panel may have different ways to have
+	 * panel turned off. Support it!
+	 */
+	if (p_funcs && p_funcs->power_off) {
+		if (p_funcs->power_off(dsi_config)) {
+			DRM_ERROR("Failed to power off panel\n");
+			err = -EAGAIN;
+			goto power_off_err;
+		}
+	}
+
+	/*Disable MIPI port*/
+	REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT31));
+
+	/*clear Low power output hold*/
+	REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT16));
+
+	/*Disable DSI controller*/
+	REG_WRITE(regs->device_ready_reg, (ctx->device_ready & ~BIT0));
+
+	/*enter ULPS*/
+	__dpi_enter_ulps_locked(dsi_config, offset);
+
+	if (is_dual_dsi(dev)) {
+		offset = 0x1000;
+		/*Disable MIPI port*/
+		REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT31));
+
+		/*clear Low power output hold*/
+		REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT16));
+		offset = 0x800;
+		/*Disable DSI controller*/
+		REG_WRITE(regs->device_ready_reg, (ctx->device_ready & ~BIT0));
+
+		/*enter ULPS*/
+		__dpi_enter_ulps_locked(dsi_config, offset);
+		offset = 0x0;
+	}
+
+power_off_err:
+	power_island = pipe_to_island(dsi_config->pipe);
+
+	if (power_island & (OSPM_DISPLAY_A | OSPM_DISPLAY_C))
+		power_island |= OSPM_DISPLAY_MIO;
+
+	if (is_dual_dsi(dev))
+		power_island |= OSPM_DISPLAY_C;
+
+	if (!power_island_put(power_island))
+		return -EINVAL;
+
+	return err;
+}
+
+#if KEEP_UNUSED_CODE
+/**
+ * Send TURN_ON package to dpi panel to turn it on
+ */
+static int mdfld_dsi_dpi_panel_turn_on(struct mdfld_dsi_config *dsi_config,
+		struct panel_funcs *p_funcs)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct mdfld_dsi_hw_context *ctx;
+	int err = 0;
+
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+			MDFLD_DSI_DPI_SPK_TURN_ON);
+	/*According HW DSI spec, here need wait for 100ms*/
+	/*To optimize dpms flow, move sleep out of mutex*/
+	/* msleep(100); */
+
+	ctx = &dsi_config->dsi_hw_context;
+	if (p_funcs->set_brightness(dsi_config, ctx->lastbrightnesslevel))
+		DRM_ERROR("Failed to set panel brightness\n");
+
+	return err;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/**
+ * Send SHUT_DOWN package to dpi panel to turn if off
+ */
+static int mdfld_dsi_dpi_panel_shut_down(struct mdfld_dsi_config *dsi_config,
+		struct panel_funcs *p_funcs)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct mdfld_dsi_hw_context *ctx;
+	int err = 0;
+
+	ctx = &dsi_config->dsi_hw_context;
+	ctx->lastbrightnesslevel = psb_brightness;
+	if (p_funcs->set_brightness(dsi_config, 0))
+		DRM_ERROR("Failed to set panel brightness\n");
+
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+			MDFLD_DSI_DPI_SPK_SHUT_DOWN);
+	/*According HW DSI spec, here need wait for 100ms*/
+	/*To optimize dpms flow, move sleep out of mutex*/
+	/* msleep(100); */
+
+	return err;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+/**
+ * Setup Display Controller to turn on/off a video mode panel.
+ * Most of the video mode MIPI panel should follow the power on/off
+ * sequence in this function.
+ * NOTE: do NOT modify this function for new panel Enabling. Register
+ * new panel function callbacks to make this function available for a
+ * new video mode panel
+ */
+static int __mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_connector *dsi_connector;
+	struct mdfld_dsi_dpi_output *dpi_output;
+	struct mdfld_dsi_config *dsi_config;
+	struct panel_funcs *p_funcs;
+	int pipe;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+
+	if (!encoder) {
+		DRM_ERROR("Invalid encoder\n");
+		return -EINVAL;
+	}
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	p_funcs = dpi_output->p_funcs;
+	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+	dsi_connector = mdfld_dsi_encoder_get_connector(dsi_encoder);
+	if (!dsi_connector) {
+		DRM_ERROR("dsi_connector is NULL\n");
+		return -EINVAL;
+	}
+
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	if (dsi_connector->status != connector_status_connected)
+		return 0;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	if (dpi_output->first_boot && on) {
+		if (dsi_config->dsi_hw_context.panel_on) {
+			if (IS_ANN(dev))
+				ann_dc_setup(dsi_config);
+
+			psb_enable_vblank(dev, dsi_config->pipe);
+
+			/* don't need ISLAND c for non dual-dsi panel */
+			if (!is_dual_dsi(dev))
+				power_island_put(OSPM_DISPLAY_C);
+
+			DRM_INFO("skip panle power setting for first boot! "
+				 "panel is already powered on\n");
+			goto fun_exit;
+		}
+		if (android_hdmi_is_connected(dev))
+			otm_hdmi_power_islands_off();
+		/* power down islands turned on by firmware */
+		power_island_put(OSPM_DISPLAY_A | OSPM_DISPLAY_C |
+				 OSPM_DISPLAY_MIO);
+	}
+
+	switch (on) {
+	case true:
+		/* panel is already on */
+		if (dsi_config->dsi_hw_context.panel_on)
+			goto fun_exit;
+		if (__dpi_panel_power_on(dsi_config, p_funcs, dpi_output->first_boot)) {
+			DRM_ERROR("Faild to turn on panel\n");
+			goto set_power_err;
+		}
+		dsi_config->dsi_hw_context.panel_on = 1;
+
+		/* for every dpi panel power on, clear the dpi underrun count */
+		dev_priv->pipea_dpi_underrun_count = 0;
+		dev_priv->pipec_dpi_underrun_count = 0;
+
+		break;
+	case false:
+		if (!dsi_config->dsi_hw_context.panel_on &&
+			!dpi_output->first_boot)
+			goto fun_exit;
+		if (__dpi_panel_power_off(dsi_config, p_funcs)) {
+			DRM_ERROR("Faild to turn off panel\n");
+			goto set_power_err;
+		}
+		dsi_config->dsi_hw_context.panel_on = 0;
+		break;
+	default:
+		break;
+	}
+
+fun_exit:
+	mutex_unlock(&dsi_config->context_lock);
+	PSB_DEBUG_ENTRY("successfully\n");
+	return 0;
+set_power_err:
+	mutex_unlock(&dsi_config->context_lock);
+	PSB_DEBUG_ENTRY("unsuccessfully!!!!\n");
+	return -EAGAIN;
+}
+
+void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_dpi_output *dpi_output = NULL;
+	u32 mipi_reg = MIPI;
+	u32 pipeconf_reg = PIPEACONF;
+	int pipe;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return;
+	}
+
+	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	PSB_DEBUG_ENTRY("set power %s on pipe %d\n", on ? "On" : "Off", pipe);
+
+	dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+
+	if (pipe)
+		if (!(dev_priv->panel_desc & DISPLAY_B) ||
+				!(dev_priv->panel_desc & DISPLAY_C))
+			return;
+
+	if (pipe) {
+		mipi_reg = MIPI_C;
+		pipeconf_reg = PIPECCONF;
+	}
+
+	/**
+	 * if TMD panel call new power on/off sequences instead.
+	 * NOTE: refine TOSHIBA panel code later
+	 */
+	__mdfld_dsi_dpi_set_power(encoder, on);
+}
+
+static
+void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_dpi_output *dpi_output;
+	struct panel_funcs *p_funcs;
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return;
+	}
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+
+	dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+	p_funcs = dpi_output->p_funcs;
+
+	PSB_DEBUG_ENTRY("%s\n", (mode == DRM_MODE_DPMS_ON ? "on" :
+		DRM_MODE_DPMS_STANDBY == mode ? "standby" : "off"));
+
+	mutex_lock(&dev_priv->dpms_mutex);
+	DCLockMutex();
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		mdfld_dsi_dpi_set_power(encoder, true);
+		DCAttachPipe(dsi_config->pipe);
+		DC_MRFLD_onPowerOn(dsi_config->pipe);
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+		{
+			struct mdfld_dsi_hw_context *ctx =
+				&dsi_config->dsi_hw_context;
+			struct backlight_device bd;
+			bd.props.brightness = ctx->lastbrightnesslevel;
+			psb_set_brightness(&bd);
+		}
+#endif
+	} else if (mode == DRM_MODE_DPMS_STANDBY) {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+		struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
+		struct backlight_device bd;
+		ctx->lastbrightnesslevel = psb_get_brightness(&bd);
+		bd.props.brightness = 0;
+		psb_set_brightness(&bd);
+#endif
+		/* Make the pending flip request as completed. */
+		DCUnAttachPipe(dsi_config->pipe);
+		msleep(50);
+		DC_MRFLD_onPowerOff(dsi_config->pipe);
+		msleep(50);
+	} else {
+		mdfld_dsi_dpi_set_power(encoder, false);
+
+		drm_handle_vblank(dev, dsi_config->pipe);
+
+		/* Turn off TE interrupt. */
+		drm_vblank_off(dev, dsi_config->pipe);
+
+		/* Make the pending flip request as completed. */
+		DCUnAttachPipe(dsi_config->pipe);
+		DC_MRFLD_onPowerOff(dsi_config->pipe);
+	}
+
+	DCUnLockMutex();
+	mutex_unlock(&dev_priv->dpms_mutex);
+}
+
+static
+bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_display_mode *fixed_mode;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return false;
+	}
+
+	fixed_mode = dsi_config->fixed_mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (fixed_mode) {
+		adjusted_mode->hdisplay = fixed_mode->hdisplay;
+		adjusted_mode->hsync_start = fixed_mode->hsync_start;
+		adjusted_mode->hsync_end = fixed_mode->hsync_end;
+		adjusted_mode->htotal = fixed_mode->htotal;
+		adjusted_mode->vdisplay = fixed_mode->vdisplay;
+		adjusted_mode->vsync_start = fixed_mode->vsync_start;
+		adjusted_mode->vsync_end = fixed_mode->vsync_end;
+		adjusted_mode->vtotal = fixed_mode->vtotal;
+		adjusted_mode->clock = fixed_mode->clock;
+		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+	}
+
+	return true;
+}
+
+static
+void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
+{
+	PSB_DEBUG_ENTRY("\n");
+}
+
+static
+void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_dpi_output *dpi_output;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+
+	/*Everything is ready, commit DSI hw context to HW*/
+	__mdfld_dsi_dpi_set_power(encoder, true);
+	dpi_output->first_boot = 0;
+}
+
+/**
+ * Setup DPI timing for video mode MIPI panel.
+ * NOTE: do NOT modify this function
+ */
+static void __mdfld_dsi_dpi_set_timing(struct mdfld_dsi_config *config,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct mdfld_dsi_dpi_timing dpi_timing;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_device *dev = config->dev;
+
+	if (!config) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+
+	mode = adjusted_mode;
+	ctx = &config->dsi_hw_context;
+
+	mutex_lock(&config->context_lock);
+
+	/*dpi resolution*/
+	if (is_dual_dsi(dev))
+		ctx->dpi_resolution = (mode->vdisplay << 16 | (mode->hdisplay / 2));
+	else
+		ctx->dpi_resolution = (mode->vdisplay << 16 | mode->hdisplay);
+
+	/*Calculate DPI timing*/
+	mdfld_dsi_dpi_timing_calculation(dev, mode, &dpi_timing,
+			config->lane_count,
+			config->bpp);
+
+	/*update HW context with new DPI timings*/
+	ctx->hsync_count = dpi_timing.hsync_count;
+	ctx->hbp_count = dpi_timing.hbp_count;
+	ctx->hfp_count = dpi_timing.hfp_count;
+	ctx->hactive_count = dpi_timing.hactive_count;
+	ctx->vsync_count = dpi_timing.vsync_count;
+	ctx->vbp_count = dpi_timing.vbp_count;
+	ctx->vfp_count = dpi_timing.vfp_count;
+
+	mutex_unlock(&config->context_lock);
+}
+
+static
+void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_encoder_get_config(dsi_encoder);
+	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+
+	PSB_DEBUG_ENTRY("set mode %dx%d on pipe %d",
+			mode->hdisplay, mode->vdisplay, pipe);
+
+	/**
+	 * if TMD panel call new power on/off sequences instead.
+	 * NOTE: refine TOSHIBA panel code later
+	 */
+	if (!dsi_config) {
+		DRM_ERROR("Invalid dsi config\n");
+		return;
+	}
+
+	__mdfld_dsi_dpi_set_timing(dsi_config, mode, adjusted_mode);
+	mdfld_dsi_set_drain_latency(encoder, adjusted_mode);
+}
+
+static
+void mdfld_dsi_dpi_save(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_device *dev;
+	int pipe;
+
+	if (!encoder)
+		return;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	dev = dsi_config->dev;
+	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+	DCLockMutex();
+
+	/* give time to the last flip to take effective,
+	 * if we disable hardware too quickly, overlay hardware may crash,
+	 * causing pipe hang next time when we try to use overlay
+	 */
+	msleep(50);
+	DC_MRFLD_onPowerOff(pipe);
+	msleep(50);
+	__mdfld_dsi_dpi_set_power(encoder, false);
+
+	drm_handle_vblank(dev, pipe);
+
+	/* Turn off vsync interrupt. */
+	drm_vblank_off(dev, pipe);
+
+	/* Make the pending flip request as completed. */
+	DCUnAttachPipe(pipe);
+	DCUnLockMutex();
+}
+
+static
+void mdfld_dsi_dpi_restore(struct drm_encoder *encoder)
+{
+	struct mdfld_dsi_encoder *dsi_encoder;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_device *dev;
+	int pipe;
+
+	if (!encoder)
+		return;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+	dev = dsi_config->dev;
+	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+	DCLockMutex();
+	__mdfld_dsi_dpi_set_power(encoder, true);
+
+	DCAttachPipe(pipe);
+	DC_MRFLD_onPowerOn(pipe);
+	DCUnLockMutex();
+}
+
+static const
+struct drm_encoder_helper_funcs dsi_dpi_generic_encoder_helper_funcs = {
+	.save = mdfld_dsi_dpi_save,
+	.restore = mdfld_dsi_dpi_restore,
+	.dpms = mdfld_dsi_dpi_dpms,
+	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
+	.prepare = mdfld_dsi_dpi_prepare,
+	.mode_set = mdfld_dsi_dpi_mode_set,
+	.commit = mdfld_dsi_dpi_commit,
+};
+
+static const
+struct drm_encoder_funcs dsi_dpi_generic_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+/*
+ * Init DSI DPI encoder. 
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DPI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+		struct mdfld_dsi_connector *dsi_connector,
+		struct panel_funcs *p_funcs)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_dpi_output *dpi_output = NULL;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_connector *connector = NULL;
+	struct drm_encoder *encoder = NULL;
+	struct drm_display_mode *fixed_mode = NULL;
+	int pipe;
+	int ret;
+
+	PSB_DEBUG_ENTRY("[DISPLAY] %s\n", __func__);
+
+	if (!dsi_connector || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return NULL;
+	}
+	dsi_config = mdfld_dsi_get_config(dsi_connector);
+	pipe = dsi_connector->pipe;
+
+	/*detect panel connection stauts*/
+	if (p_funcs->detect) {
+		ret = p_funcs->detect(dsi_config);
+		if (ret) {
+			DRM_INFO("Detecting Panel %d, Not connected\n", pipe);
+			dsi_connector->status = connector_status_disconnected;
+		} else {
+			PSB_DEBUG_ENTRY("Panel %d is connected\n", pipe);
+			dsi_connector->status = connector_status_connected;
+		}
+
+		if (dsi_connector->status == connector_status_disconnected &&
+				pipe == 0) {
+			DRM_ERROR("Primary panel disconnected\n");
+			return NULL;
+		}
+	} else {
+		/*use the default config*/
+		if (pipe == 0)
+			dsi_connector->status = connector_status_connected;
+		else
+			dsi_connector->status = connector_status_disconnected;
+	}
+	/*init DSI controller*/
+	if (p_funcs->dsi_controller_init)
+		p_funcs->dsi_controller_init(dsi_config);
+	/**
+	 * TODO: can we keep these code out of display driver as
+	 * it will make display driver hard to be maintained
+	 */
+	if (dsi_connector->status == connector_status_connected) {
+		if (pipe == 0)
+			dev_priv->panel_desc |= DISPLAY_A;
+		if (pipe == 2)
+			dev_priv->panel_desc |= DISPLAY_C;
+	}
+
+	dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
+	if (!dpi_output) {
+		DRM_ERROR("No memory\n");
+		return NULL;
+	}
+
+	dpi_output->dev = dev;
+	dpi_output->p_funcs = p_funcs;
+	dpi_output->first_boot = 1;
+	/*get fixed mode*/
+	fixed_mode = dsi_config->fixed_mode;
+
+	/*create drm encoder object*/
+	connector = &dsi_connector->base.base;
+	encoder = &dpi_output->base.base;
+	drm_encoder_init(dev,
+			encoder,
+			&dsi_dpi_generic_encoder_funcs,
+			DRM_MODE_ENCODER_DSI);
+	drm_encoder_helper_add(encoder,
+			&dsi_dpi_generic_encoder_helper_funcs);
+
+	/*attach to given connector*/
+	drm_mode_connector_attach_encoder(connector, encoder);
+	connector->encoder = encoder;
+
+	/*set possible crtcs and clones*/
+	if (dsi_connector->pipe) {
+		encoder->possible_crtcs = (1 << 2);
+		encoder->possible_clones = (1 << 1);
+	} else {
+		encoder->possible_crtcs = (1 << 0);
+		encoder->possible_clones = (1 << 0);
+	}
+
+	dev_priv->dsr_fb_update = 0;
+	dev_priv->b_dsr_enable = false;
+	dev_priv->exit_idle = NULL;
+#if defined(CONFIG_MDFLD_DSI_DPU) || defined(CONFIG_MDFLD_DSI_DSR)
+	dev_priv->b_dsr_enable_config = true;
+#endif /*CONFIG_MDFLD_DSI_DSR*/
+
+	PSB_DEBUG_ENTRY("successfully\n");
+
+	return &dpi_output->base;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dpi.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dpi.h
new file mode 100644
index 0000000..aec2a26
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_dpi.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DPI_H__
+#define __MDFLD_DSI_DPI_H__
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+struct mdfld_dsi_dpi_timing {
+	u16 hsync_count;
+	u16 hbp_count;
+	u16 hfp_count;
+	u16 hactive_count;
+	u16 vsync_count;
+	u16 vbp_count;
+	u16 vfp_count;
+};
+
+struct mdfld_dsi_dpi_output {
+	struct mdfld_dsi_encoder base;
+	struct drm_device *dev;
+
+	int panel_on;
+	int first_boot;
+
+	struct panel_funcs *p_funcs;
+};
+
+#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder) \
+	container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
+
+/*export functions*/
+extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+				struct mdfld_dsi_connector *dsi_connector,
+				struct panel_funcs *p_funcs);
+
+#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_esd.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_esd.c
new file mode 100644
index 0000000..ca4e2df
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_esd.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+
+#include <linux/version.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
+#include <linux/sched/rt.h>
+#endif
+#include "mdfld_dsi_esd.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dbi_dsr.h"
+
+#define MDFLD_ESD_SLEEP_MSECS	8000
+
+/**
+ * esd detection
+ */
+static bool intel_dsi_dbi_esd_detection(struct mdfld_dsi_config *dsi_config)
+{
+	int ret;
+	u32 data = 0;
+
+	PSB_DEBUG_ENTRY("esd\n");
+
+	ret = mdfld_dsi_get_power_mode(dsi_config,
+			(u8 *) &data,
+			MDFLD_DSI_HS_TRANSMISSION);
+	/**
+	 * if FIFO is not empty, need do ESD, ret equals -EIO means
+	 * FIFO is abnormal.
+	 */
+	if ((ret == -EIO) || ((ret == 1) && ((data & 0x14) != 0x14)))
+		return true;
+
+	return false;
+}
+
+static int __esd_thread(void *data)
+{
+	struct mdfld_dsi_dbi_output *dbi_output = NULL;
+	struct panel_funcs *p_funcs  = NULL;
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_error_detector *err_detector =
+		(struct mdfld_dsi_error_detector *)data;
+	struct drm_device *dev = err_detector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return -EINVAL;
+
+	set_freezable();
+
+	while (!kthread_should_stop()) {
+		wait_event_freezable(err_detector->esd_thread_wq,
+			(dsi_config->dsi_hw_context.panel_on ||
+			 kthread_should_stop()));
+
+		dbi_output = dev_priv->dbi_output;
+
+		if (!dbi_output)
+			goto esd_exit;
+
+		mutex_lock(&dsi_config->context_lock);
+
+		p_funcs = dbi_output->p_funcs;
+		if (dsi_config->dsi_hw_context.panel_on &&
+			!mdfld_dsi_dsr_in_dsr_locked(dsi_config)) {
+			/*forbid DSR during detection & resume*/
+			mdfld_dsi_dsr_forbid_locked(dsi_config);
+
+			if (intel_dsi_dbi_esd_detection(dsi_config)) {
+				DRM_INFO("%s: error detected\n", __func__);
+				schedule_work(&dev_priv->reset_panel_work);
+			}
+
+			mdfld_dsi_dsr_allow_locked(dsi_config);
+		}
+		mutex_unlock(&dsi_config->context_lock);
+esd_exit:
+		schedule_timeout_interruptible(
+			msecs_to_jiffies(MDFLD_ESD_SLEEP_MSECS));
+	}
+
+	DRM_INFO("%s: ESD exited\n", __func__);
+	return 0;
+}
+
+/**
+ * Wake up the error detector
+ */
+void mdfld_dsi_error_detector_wakeup(struct mdfld_dsi_connector *dsi_connector)
+{
+	struct mdfld_dsi_error_detector *err_detector;
+
+	if (!dsi_connector || !dsi_connector->err_detector)
+		return;
+
+	err_detector = dsi_connector->err_detector;
+	wake_up_interruptible(&err_detector->esd_thread_wq);
+}
+
+/**
+ * @dev: DRM device
+ * @dsi_connector:
+ *
+ * Initialize DSI error detector
+ */
+int mdfld_dsi_error_detector_init(struct drm_device *dev,
+		struct mdfld_dsi_connector *dsi_connector)
+{
+	struct mdfld_dsi_error_detector *err_detector;
+	const char *fmt = "%s";
+	struct task_struct *p;
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
+
+	if (!dsi_connector || !dev) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	if (dsi_connector->err_detector)
+		return 0;
+
+	/*create a new error detector*/
+	err_detector = kzalloc(sizeof(struct mdfld_dsi_error_detector),
+				GFP_KERNEL);
+	if (!err_detector) {
+		DRM_ERROR("Failed to allocate ESD\n");
+		return -ENOMEM;
+	}
+
+	/*init detector thread wait queue*/
+	init_waitqueue_head(&err_detector->esd_thread_wq);
+
+	/*init detector thread*/
+	p = kthread_create(__esd_thread, err_detector, fmt, "dsi_esd", 0);
+	if (IS_ERR(p)) {
+		DRM_ERROR("Failed to create ESD thread\n");
+		goto esd_thread_err;
+	}
+	/*use FIFO scheduler*/
+	sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
+
+	err_detector->esd_thread = p;
+	err_detector->dev = dev;
+
+	/*attach it to connector*/
+	dsi_connector->err_detector = err_detector;
+
+	/*time to start detection*/
+	wake_up_process(p);
+
+	DRM_INFO("%s: started\n", __func__);
+
+	return 0;
+esd_thread_err:
+	kfree(err_detector);
+	return -EAGAIN;
+}
+
+void mdfld_dsi_error_detector_exit(struct mdfld_dsi_connector *dsi_connector)
+{
+	struct mdfld_dsi_error_detector *err_detector;
+
+	if (!dsi_connector || !dsi_connector->err_detector)
+		return;
+
+	err_detector = dsi_connector->err_detector;
+
+	/*stop & destroy detector thread*/
+	if (err_detector->esd_thread) {
+		kthread_stop(err_detector->esd_thread);
+		err_detector->esd_thread = NULL;
+	}
+
+	/*delete it*/
+	kfree(err_detector);
+
+	dsi_connector->err_detector = NULL;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_esd.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_esd.h
new file mode 100644
index 0000000..8baec9c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_esd.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+
+#include "psb_drv.h"
+#include "mdfld_dsi_output.h"
+
+struct mdfld_dsi_error_detector {
+	struct drm_device *dev;
+
+	struct task_struct *esd_thread;
+	wait_queue_head_t esd_thread_wq;
+};
+
+int mdfld_dsi_error_detector_init(struct drm_device *dev,
+	struct mdfld_dsi_connector *dsi_connector);
+void mdfld_dsi_error_detector_exit(struct mdfld_dsi_connector *dsi_connector);
+void mdfld_dsi_error_detector_wakeup(struct mdfld_dsi_connector *dsi_connector);
+
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_output.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_output.c
new file mode 100644
index 0000000..a553366
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_output.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_dsi_pkg_sender.h"
+#include <linux/freezer.h>
+#include "psb_drv.h"
+#include "mdfld_dsi_esd.h"
+#include "mdfld_dsi_dbi_dsr.h"
+
+#define ACTUAL_DRAIN_RATE_7x12 75
+#define ACTUAL_DRAIN_RATE_10x19 150
+#define ACTUAL_DRAIN_RATE_25x16 300
+#define HDMI_SPRITE_DEADLINE 0x8D
+#define HDMI_OVERLAY_DEADLINE 0xA4
+
+#define KEEP_UNUSED_CODE 0
+
+#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
+
+/* get the CABC LABC from command line. */
+static int CABC_control = 1;
+static int LABC_control = 1;
+
+#ifdef MODULE
+module_param(CABC_control, int, 0644);
+module_param(LABC_control, int, 0644);
+#else
+static int __init parse_CABC_control(char *arg)
+{
+	/* CABC control can be passed in as a cmdline parameter */
+	/* to enable this feature add CABC=1 to cmdline */
+	/* to disable this feature add CABC=0 to cmdline */
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		CABC_control = 0;
+	else if (!strcasecmp(arg, "1"))
+		CABC_control = 1;
+
+	return 0;
+}
+early_param("CABC", parse_CABC_control);
+
+static int __init parse_LABC_control(char *arg)
+{
+	/* LABC control can be passed in as a cmdline parameter */
+	/* to enable this feature add LABC=1 to cmdline */
+	/* to disable this feature add LABC=0 to cmdline */
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		LABC_control = 0;
+	else if (!strcasecmp(arg, "1"))
+		LABC_control = 1;
+
+	return 0;
+}
+early_param("LABC", parse_LABC_control);
+#endif
+
+#if KEEP_UNUSED_CODE
+/**
+ * make these MCS command global 
+ * we don't need 'movl' everytime we send them.
+ * FIXME: these datas were provided by OEM, we should get them from GCT.
+ **/
+static u32 mdfld_dbi_mcs_hysteresis[] = {
+	0x42000f57, 0x8c006400, 0xff00bf00, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0x38000aff, 0x82005000, 0xff00ab00, 0xffffffff,
+	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	0x000000ff,
+};
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+static u32 mdfld_dbi_mcs_display_profile[] = {
+	0x50281450, 0x0000c882, 0x00000000, 0x00000000,
+	0x00000000,
+};
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+static u32 mdfld_dbi_mcs_kbbc_profile[] = {
+	0x00ffcc60, 0x00000000, 0x00000000, 0x00000000,
+};
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+static u32 mdfld_dbi_mcs_gamma_profile[] = {
+	0x81111158, 0x88888888, 0x88888888,
+};
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/**
+ * write hysteresis values.
+ */
+static void mdfld_dsi_write_hysteresis(struct mdfld_dsi_config *dsi_config,
+		int pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+
+	if (!sender) {
+		DRM_ERROR("No sender found\n");
+		return;
+	}
+
+	mdfld_dsi_send_mcs_long_hs(sender,
+			(u8 *) mdfld_dbi_mcs_hysteresis,
+			68,
+			MDFLD_DSI_SEND_PACKAGE);
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/**
+ * write display profile values.
+ */
+static void mdfld_dsi_write_display_profile(struct mdfld_dsi_config *dsi_config,
+		int pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+
+	if (!sender) {
+		DRM_ERROR("No sender found\n");
+		return;
+	}
+
+	mdfld_dsi_send_mcs_long_hs(sender,
+			(u8 *) mdfld_dbi_mcs_display_profile,
+			20,
+			MDFLD_DSI_SEND_PACKAGE);
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/**
+ * write KBBC profile values.
+ */
+static void mdfld_dsi_write_kbbc_profile(struct mdfld_dsi_config *dsi_config,
+		int pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+
+	if (!sender) {
+		DRM_ERROR("No sender found\n");
+		return;
+	}
+
+	mdfld_dsi_send_mcs_long_hs(sender,
+			(u8 *) mdfld_dbi_mcs_kbbc_profile,
+			20,
+			MDFLD_DSI_SEND_PACKAGE);
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/**
+ * write gamma setting.
+ */
+static void mdfld_dsi_write_gamma_setting(struct mdfld_dsi_config *dsi_config,
+		int pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+
+	if (!sender) {
+		DRM_ERROR("No sender found\n");
+		return;
+	}
+
+	mdfld_dsi_send_mcs_long_hs(sender,
+			(u8 *) mdfld_dbi_mcs_gamma_profile,
+			3,
+			MDFLD_DSI_SEND_PACKAGE);
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+/**
+ * Check and see if the generic control or data buffer is empty and ready.
+ */
+void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
+		u32 fifo_stat)
+{
+	u32 GEN_BF_time_out_count = 0;
+
+	/* Check MIPI Adatper command registers */
+	for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT;
+			GEN_BF_time_out_count++) {
+		if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
+			break;
+		udelay(100);
+	}
+
+	if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
+		DRM_ERROR("%s: Timeout. gen_fifo_stat_reg = 0x%x.\n", __func__,
+				gen_fifo_stat_reg);
+}
+
+/**
+ * Manage the mipi display brightness.
+ * TODO: refine this interface later
+ */
+void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
+{
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_dpi_output *dpi_output;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct mdfld_dsi_encoder *encoder;
+	struct panel_funcs *p_funcs;
+	u32 power_island = 0;
+
+	if (!dev || (pipe != 0 && pipe != 2)) {
+		DRM_ERROR("Invalid parameter\n");
+		return;
+	}
+
+	dev_priv = dev->dev_private;
+
+	if (pipe)
+		dsi_config = dev_priv->dsi_configs[1];
+	else
+		dsi_config = dev_priv->dsi_configs[0];
+
+	if (!dsi_config) {
+		PSB_DEBUG_ENTRY("No dsi config found on pipe %d\n", pipe);
+		return;
+	}
+
+	encoder = dsi_config->encoders[dsi_config->type];
+
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+		dbi_output = MDFLD_DSI_DBI_OUTPUT(encoder);
+		p_funcs = dbi_output ? dbi_output->p_funcs : NULL;
+	} else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+		dpi_output = MDFLD_DSI_DPI_OUTPUT(encoder);
+		p_funcs = dpi_output ? dpi_output->p_funcs : NULL;
+	} else {
+		DRM_ERROR("Invalid parameter\n");
+		return;
+	}
+
+	if (!p_funcs || !p_funcs->set_brightness) {
+		DRM_INFO("Cannot set panel brightness\n");
+		return;
+	}
+
+	power_island = pipe_to_island(dsi_config->pipe);
+
+	if (power_island & (OSPM_DISPLAY_A | OSPM_DISPLAY_C))
+		power_island |= OSPM_DISPLAY_MIO;
+
+	if (!power_island_get(power_island))
+		return;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	mdfld_dsi_dsr_forbid_locked(dsi_config);
+
+	if (!dsi_config->dsi_hw_context.panel_on)
+		goto set_brightness_out;
+
+	if (p_funcs->set_brightness(dsi_config, level))
+		DRM_ERROR("Failed to set panel brightness\n");
+
+set_brightness_out:
+	mdfld_dsi_dsr_allow_locked(dsi_config);
+	mutex_unlock(&dsi_config->context_lock);
+	power_island_put(power_island);
+}
+
+int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+		u8 dcs,
+		u8 *data,
+		u8 transmission,
+		u32 len)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int ret = 0;
+
+	if (!sender || !data) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	if (transmission == MDFLD_DSI_HS_TRANSMISSION) {
+		ret = mdfld_dsi_read_mcs_hs(sender, dcs, data, len);
+		if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+			ret = -EIO;
+		return ret;
+	} else if (transmission == MDFLD_DSI_LP_TRANSMISSION) {
+		ret = mdfld_dsi_read_mcs_lp(sender, dcs, data, len);
+		if (sender->status == MDFLD_DSI_CONTROL_ABNORMAL)
+			ret = -EIO;
+		return ret;
+	} else
+		return -EINVAL;
+}
+
+int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+		u8 *mode,
+		u8 transmission)
+{
+	if (!dsi_config || !mode) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_get_panel_status(dsi_config, 0x0A, mode,
+			transmission, 1);
+}
+
+static void mdfld_dsi_connector_save(struct drm_connector *connector)
+{
+	PSB_DEBUG_ENTRY("\n");
+}
+
+static void mdfld_dsi_connector_restore(struct drm_connector *connector)
+{
+	PSB_DEBUG_ENTRY("\n");
+}
+
+static enum drm_connector_status mdfld_dsi_connector_detect(
+	struct drm_connector *connector, bool force)
+{
+	struct psb_intel_output *psb_output =
+		to_psb_intel_output(connector);
+	struct mdfld_dsi_connector *dsi_connector =
+		MDFLD_DSI_CONNECTOR(psb_output);
+	(void) force;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	return dsi_connector->status;
+}
+
+static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
+		struct drm_property *property,
+		uint64_t value)
+{
+	struct drm_encoder *encoder = connector->encoder;
+	struct backlight_device *psb_bd;
+	struct drm_encoder_helper_funcs *pEncHFuncs = NULL;
+	struct psb_intel_crtc *psb_crtc = NULL;
+	bool bTransitionFromToCentered;
+	uint64_t curValue;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!strcmp(property->name, "scaling mode") && encoder) {
+		psb_crtc = to_psb_intel_crtc(encoder->crtc);
+		if (!psb_crtc)
+			goto set_prop_error;
+
+		switch (value) {
+		case DRM_MODE_SCALE_FULLSCREEN:
+			break;
+		case DRM_MODE_SCALE_CENTER:
+			break;
+		case DRM_MODE_SCALE_NO_SCALE:
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			goto set_prop_error;
+		}
+
+		if (drm_object_property_get_value(&connector->base, property,
+					&curValue))
+			goto set_prop_error;
+
+		if (curValue == value)
+			goto set_prop_done;
+
+		if (drm_object_property_set_value(&connector->base, property,
+					value))
+			goto set_prop_error;
+
+		bTransitionFromToCentered =
+			(curValue == DRM_MODE_SCALE_NO_SCALE) ||
+			(value == DRM_MODE_SCALE_NO_SCALE);
+
+		if (psb_crtc->saved_mode.hdisplay != 0 &&
+				psb_crtc->saved_mode.vdisplay != 0) {
+			if (bTransitionFromToCentered) {
+				if (!drm_crtc_helper_set_mode(encoder->crtc,
+							&psb_crtc->saved_mode,
+							encoder->crtc->x,
+							encoder->crtc->y,
+							encoder->crtc->fb))
+					goto set_prop_error;
+			} else {
+				pEncHFuncs = encoder->helper_private;
+				pEncHFuncs->mode_set(encoder,
+						&psb_crtc->saved_mode,
+						&psb_crtc->saved_adjusted_mode);
+			}
+		}
+	} else if (!strcmp(property->name, "backlight") && encoder) {
+		PSB_DEBUG_ENTRY("backlight level = %d\n", (int)value);
+		if (drm_object_property_set_value(&connector->base, property,
+					value))
+			goto set_prop_error;
+		else {
+			PSB_DEBUG_ENTRY("set brightness to %d", (int)value);
+			psb_bd = psb_get_backlight_device();
+			if (psb_bd) {
+				psb_bd->props.brightness = value;
+				psb_set_brightness(psb_bd);
+			}
+		}
+	}
+set_prop_done:
+	return 0;
+set_prop_error:
+	return -1;
+}
+
+static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_output *psb_output = to_psb_intel_output(connector);
+	struct mdfld_dsi_connector *dsi_connector =
+		MDFLD_DSI_CONNECTOR(psb_output);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_get_config(dsi_connector);
+	struct mdfld_dsi_pkg_sender *sender;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_connector)
+		return;
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+
+	mdfld_dsi_dsr_destroy(dsi_config);
+
+	sender = dsi_connector->pkg_sender;
+
+	mdfld_dsi_error_detector_exit(dsi_connector);
+
+	mdfld_dsi_pkg_sender_destroy(sender);
+
+	kfree(dsi_connector);
+}
+
+static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_output *psb_output = to_psb_intel_output(connector);
+	struct mdfld_dsi_connector *dsi_connector =
+		MDFLD_DSI_CONNECTOR(psb_output);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_get_config(dsi_connector);
+	struct drm_display_mode *fixed_mode;
+	struct drm_display_mode *dup_mode = NULL;
+	struct drm_device *dev;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return MODE_ERROR;
+	}
+	fixed_mode = dsi_config->fixed_mode;
+	dev = connector->dev;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	connector->display_info.min_vfreq = 0;
+	connector->display_info.max_vfreq = 200;
+	connector->display_info.min_hfreq = 0;
+	connector->display_info.max_hfreq = 200;
+
+	if (fixed_mode) {
+		PSB_DEBUG_ENTRY("fixed_mode %dx%d\n",
+				fixed_mode->hdisplay, fixed_mode->vdisplay);
+
+		dup_mode = drm_mode_duplicate(dev, fixed_mode);
+		drm_mode_probed_add(connector, dup_mode);
+		return 1;
+	}
+
+	DRM_ERROR("Didn't get any modes!\n");
+
+	return 0;
+}
+
+static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode)
+{
+	struct psb_intel_output *psb_output = to_psb_intel_output(connector);
+	struct mdfld_dsi_connector *dsi_connector =
+		MDFLD_DSI_CONNECTOR(psb_output);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_get_config(dsi_connector);
+	struct drm_display_mode *fixed_mode;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return MODE_ERROR;
+	}
+	fixed_mode = dsi_config->fixed_mode;
+
+	PSB_DEBUG_ENTRY("mode %p, fixed mode %p\n", mode, fixed_mode);
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	/** FIXME: current DC has no fitting unit, reject any mode setting
+	 * request will figure out a way to do up-scaling(pannel fitting) later
+	 **/
+	if (fixed_mode) {
+		if (mode->hdisplay != fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay != fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	PSB_DEBUG_ENTRY("ok\n");
+
+	return MODE_OK;
+}
+
+static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+	/*first, execute dpms*/
+	drm_helper_connector_dpms(connector, mode);
+}
+
+static struct drm_encoder *
+mdfld_dsi_connector_best_encoder(struct drm_connector *connector)
+{
+	struct psb_intel_output *psb_output = to_psb_intel_output(connector);
+	struct mdfld_dsi_connector *dsi_connector =
+		MDFLD_DSI_CONNECTOR(psb_output);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_get_config(dsi_connector);
+	struct mdfld_dsi_encoder *encoder = NULL;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return NULL;
+	}
+
+	PSB_DEBUG_ENTRY("config type %d\n", dsi_config->type);
+
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
+		encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DBI];
+	else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+		encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DPI];
+
+	PSB_DEBUG_ENTRY("get encoder %p\n", encoder);
+
+	if (!encoder) {
+		DRM_ERROR("Invalid encoder for type %d\n", dsi_config->type);
+		return NULL;
+	}
+
+	dsi_config->encoder = encoder;
+
+	return &encoder->base;
+}
+
+/*DSI connector funcs*/
+static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+	.dpms = mdfld_dsi_connector_dpms,
+	.save = mdfld_dsi_connector_save,
+	.restore = mdfld_dsi_connector_restore,
+	.detect = mdfld_dsi_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = mdfld_dsi_connector_set_property,
+	.destroy = mdfld_dsi_connector_destroy,
+};
+
+/*DSI connector helper funcs*/
+static const
+struct drm_connector_helper_funcs mdfld_dsi_connector_helper_funcs = {
+	.get_modes = mdfld_dsi_connector_get_modes,
+	.mode_valid = mdfld_dsi_connector_mode_valid,
+	.best_encoder = mdfld_dsi_connector_best_encoder,
+};
+
+static int mdfld_dsi_get_default_config(struct drm_device *dev,
+		struct mdfld_dsi_config *config, int pipe)
+{
+	if (!dev || !config) {
+		DRM_ERROR("Invalid parameters");
+		return -EINVAL;
+	}
+
+	config->bpp = 24;
+	config->type = is_panel_vid_or_cmd(dev);
+	config->lane_count = 2;
+	config->lane_config = MDFLD_DSI_DATA_LANE_2_2;
+
+	config->channel_num = 0;
+	config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
+
+	return 0;
+}
+
+static int mdfld_dsi_regs_init(struct mdfld_dsi_config *dsi_config,
+		int pipe)
+{
+	struct mdfld_dsi_hw_registers *regs;
+	u32 reg_offset;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is null\n");
+		return -EINVAL;
+	}
+
+	regs = &dsi_config->regs;
+
+	regs->vgacntr_reg = VGACNTRL;
+	regs->dpll_reg = MRST_DPLL_A;
+	regs->fp_reg = MRST_FPA0;
+
+	regs->ovaadd_reg = OV_OVADD;
+	regs->ovcadd_reg = OVC_OVADD;
+	regs->ddl1_reg = DDL1;
+	regs->ddl2_reg = DDL2;
+	regs->ddl3_reg = DDL3;
+	regs->ddl4_reg = DDL4;
+
+	if (pipe == 0) {
+		regs->dspcntr_reg = DSPACNTR;
+		regs->dspsize_reg = DSPASIZE;
+		regs->dspsurf_reg = DSPASURF;
+		regs->dsplinoff_reg = DSPALINOFF;
+		regs->dsppos_reg = DSPAPOS;
+		regs->dspstride_reg = DSPASTRIDE;
+		regs->color_coef_reg = PIPEA_COLOR_COEF0;
+		regs->htotal_reg = HTOTAL_A;
+		regs->hblank_reg = HBLANK_A;
+		regs->hsync_reg = HSYNC_A;
+		regs->vtotal_reg = VTOTAL_A;
+		regs->vblank_reg = VBLANK_A;
+		regs->vsync_reg = VSYNC_A;
+		regs->pipesrc_reg = PIPEASRC;
+		regs->pipeconf_reg = PIPEACONF;
+		regs->pipestat_reg = PIPEASTAT;
+		regs->mipi_reg = MIPI;
+		regs->palette_reg = PALETTE_A;
+		regs->gamma_red_max_reg = GAMMA_RED_MAX_A;
+		regs->gamma_green_max_reg = GAMMA_GREEN_MAX_A;
+		regs->gamma_blue_max_reg = GAMMA_BLUE_MAX_A;
+		reg_offset = 0;
+	} else if (pipe == 2) {
+		regs->dspcntr_reg = DSPCCNTR;
+		regs->dspsize_reg = DSPCSIZE;
+		regs->dspsurf_reg = DSPCSURF;
+		regs->dsplinoff_reg = DSPCLINOFF;
+		regs->dsppos_reg = DSPCPOS;
+		regs->dspstride_reg = DSPCSTRIDE;
+		regs->color_coef_reg = PIPEC_COLOR_COEF0;
+		regs->htotal_reg = HTOTAL_C;
+		regs->hblank_reg = HBLANK_C;
+		regs->hsync_reg = HSYNC_C;
+		regs->vtotal_reg = VTOTAL_C;
+		regs->vblank_reg = VBLANK_C;
+		regs->vsync_reg = VSYNC_C;
+		regs->pipesrc_reg = PIPECSRC;
+		regs->pipeconf_reg = PIPECCONF;
+		regs->pipestat_reg = PIPECSTAT;
+		regs->mipi_reg = MIPI_C;
+		regs->palette_reg = PALETTE_C;
+		regs->gamma_red_max_reg = GAMMA_RED_MAX_C;
+		regs->gamma_green_max_reg = GAMMA_GREEN_MAX_C;
+		regs->gamma_blue_max_reg = GAMMA_BLUE_MAX_C;
+
+		reg_offset = MIPIC_REG_OFFSET;
+	} else {
+		DRM_ERROR("Wrong pipe\n");
+		return -EINVAL;
+	}
+
+	regs->device_ready_reg = MIPIA_DEVICE_READY_REG + reg_offset;
+	regs->intr_stat_reg = MIPIA_INTR_STAT_REG + reg_offset;
+	regs->intr_en_reg = MIPIA_INTR_EN_REG + reg_offset;
+	regs->dsi_func_prg_reg = MIPIA_DSI_FUNC_PRG_REG + reg_offset;
+	regs->hs_tx_timeout_reg = MIPIA_HS_TX_TIMEOUT_REG + reg_offset;
+	regs->lp_rx_timeout_reg = MIPIA_LP_RX_TIMEOUT_REG + reg_offset;
+	regs->turn_around_timeout_reg =
+		MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset;
+	regs->device_reset_timer_reg =
+		MIPIA_DEVICE_RESET_TIMER_REG + reg_offset;
+	regs->dpi_resolution_reg = MIPIA_DPI_RESOLUTION_REG + reg_offset;
+	regs->dbi_fifo_throttle_reg =
+		MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset;
+	regs->hsync_count_reg = MIPIA_HSYNC_COUNT_REG + reg_offset;
+	regs->hbp_count_reg = MIPIA_HBP_COUNT_REG + reg_offset;
+	regs->hfp_count_reg = MIPIA_HFP_COUNT_REG + reg_offset;
+	regs->hactive_count_reg = MIPIA_HACTIVE_COUNT_REG + reg_offset;
+	regs->vsync_count_reg = MIPIA_VSYNC_COUNT_REG + reg_offset;
+	regs->vbp_count_reg = MIPIA_VBP_COUNT_REG + reg_offset;
+	regs->vfp_count_reg = MIPIA_VFP_COUNT_REG + reg_offset;
+	regs->high_low_switch_count_reg =
+		MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset;
+	regs->dpi_control_reg = MIPIA_DPI_CONTROL_REG + reg_offset;
+	regs->dpi_data_reg = MIPIA_DPI_DATA_REG + reg_offset;
+	regs->init_count_reg = MIPIA_INIT_COUNT_REG + reg_offset;
+	regs->max_return_pack_size_reg =
+		MIPIA_MAX_RETURN_PACK_SIZE_REG + reg_offset;
+	regs->video_mode_format_reg =
+		MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset;
+	regs->eot_disable_reg = MIPIA_EOT_DISABLE_REG + reg_offset;
+	regs->lp_byteclk_reg = MIPIA_LP_BYTECLK_REG + reg_offset;
+	regs->lp_gen_data_reg = MIPIA_LP_GEN_DATA_REG + reg_offset;
+	regs->hs_gen_data_reg = MIPIA_HS_GEN_DATA_REG + reg_offset;
+	regs->lp_gen_ctrl_reg = MIPIA_LP_GEN_CTRL_REG + reg_offset;
+	regs->hs_gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG + reg_offset;
+	regs->gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG + reg_offset;
+	regs->hs_ls_dbi_enable_reg =
+		MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset;
+	regs->dphy_param_reg = MIPIA_DPHY_PARAM_REG + reg_offset;
+	regs->dbi_bw_ctrl_reg = MIPIA_DBI_BW_CTRL_REG + reg_offset;
+	regs->clk_lane_switch_time_cnt_reg =
+		MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset;
+
+	regs->mipi_control_reg = MIPIA_CONTROL_REG + reg_offset;
+	regs->mipi_data_addr_reg = MIPIA_DATA_ADD_REG + reg_offset;
+	regs->mipi_data_len_reg = MIPIA_DATA_LEN_REG + reg_offset;
+	regs->mipi_cmd_addr_reg = MIPIA_CMD_ADD_REG + reg_offset;
+	regs->mipi_cmd_len_reg = MIPIA_CMD_LEN_REG + reg_offset;
+	regs->histogram_intr_ctrl_reg = HISTOGRAM_INT_CONTROL;
+	regs->histogram_logic_ctrl_reg = HISTOGRAM_LOGIC_CONTROL;
+	regs->aimg_enhance_bin_reg = HISTOGRAM_BIN_DATA;
+	//regs->lvds_port_ctrl_reg = LVDS_PORT_CTRL;
+
+	return 0;
+}
+
+/*
+ * Returns the panel fixed mode from configuration. 
+ */
+struct drm_display_mode *
+mdfld_dsi_get_configuration_mode(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_display_mode *mode;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+	bool use_gct = false;
+	if (IS_CTP(dev))
+		use_gct = true;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	if (use_gct) {
+		PSB_DEBUG_ENTRY("gct find MIPI panel. \n");
+
+		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+		mode->hsync_start = mode->hdisplay +
+				    ((ti->hsync_offset_hi << 8) |
+				     ti->hsync_offset_lo);
+		mode->hsync_end = mode->hsync_start +
+				  ((ti->hsync_pulse_width_hi << 8) |
+				   ti->hsync_pulse_width_lo);
+		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
+				ti->hblank_lo);
+		mode->vsync_start = mode->vdisplay +
+			((ti->vsync_offset_hi << 8) |
+			 ti->vsync_offset_lo);
+		mode->vsync_end = mode->vsync_start +
+			((ti->vsync_pulse_width_hi << 8) |
+			 ti->vsync_pulse_width_lo);
+		mode->vtotal = mode->vdisplay +
+			       ((ti->vblank_hi << 8) | ti->vblank_lo);
+		mode->clock = ti->pixel_clock * 10;
+
+		PSB_DEBUG_ENTRY("hdisplay is %d\n", mode->hdisplay);
+		PSB_DEBUG_ENTRY("vdisplay is %d\n", mode->vdisplay);
+		PSB_DEBUG_ENTRY("HSS is %d\n", mode->hsync_start);
+		PSB_DEBUG_ENTRY("HSE is %d\n", mode->hsync_end);
+		PSB_DEBUG_ENTRY("htotal is %d\n", mode->htotal);
+		PSB_DEBUG_ENTRY("VSS is %d\n", mode->vsync_start);
+		PSB_DEBUG_ENTRY("VSE is %d\n", mode->vsync_end);
+		PSB_DEBUG_ENTRY("vtotal is %d\n", mode->vtotal);
+		PSB_DEBUG_ENTRY("clock is %d\n", mode->clock);
+	} else {
+		if (dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+			mode->hdisplay = 864;
+			mode->vdisplay = 480;
+			mode->hsync_start = 873;
+			mode->hsync_end = 876;
+			mode->htotal = 887;
+			mode->vsync_start = 487;
+			mode->vsync_end = 490;
+			mode->vtotal = 499;
+			mode->clock = 33264;
+		} else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+			mode->hdisplay = 864;
+			mode->vdisplay = 480;
+			mode->hsync_start = 872;
+			mode->hsync_end = 876;
+			mode->htotal = 884;
+			mode->vsync_start = 482;
+			mode->vsync_end = 494;
+			mode->vtotal = 486;
+			mode->clock = 25777;
+		}
+	}
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+/*
+ * MIPI output init
+ * @dev drm device
+ * @pipe pipe number. 0 or 2
+ * @config 
+ * 
+ * Do the initialization of a MIPI output, including create DRM mode objects
+ * initialization of DSI output on @pipe 
+ */
+int mdfld_dsi_output_init(struct drm_device *dev,
+		int pipe,
+		struct mdfld_dsi_config *config,
+		struct panel_funcs *p_funcs)
+{
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_connector *dsi_connector;
+	struct psb_intel_output *psb_output;
+	struct drm_connector *connector;
+	struct mdfld_dsi_encoder *encoder;
+	struct drm_psb_private *dev_priv;
+	struct panel_info dsi_panel_info;
+	u32 width_mm, height_mm;
+
+	PSB_DEBUG_ENTRY("init DSI output on pipe %d\n", pipe);
+
+	if (!dev || ((pipe != 0) && (pipe != 2))) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EIO;
+	}
+
+	dev_priv = dev->dev_private;
+
+	/*create a new connetor*/
+	dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
+	if (!dsi_connector) {
+		DRM_ERROR("No memory");
+		return -ENOMEM;
+	}
+
+	dsi_connector->pipe =  pipe;
+
+	/*set DSI config*/
+	if (config)
+		dsi_config = config;
+	else {
+		dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
+				GFP_KERNEL);
+		if (!dsi_config) {
+			DRM_ERROR("cannot allocate memory for DSI config\n");
+			goto dsi_init_err0;
+		}
+
+		mdfld_dsi_get_default_config(dev, dsi_config, pipe);
+	}
+
+	/*init DSI regs*/
+	mdfld_dsi_regs_init(dsi_config, pipe);
+
+	/*init DSI HW context lock*/
+	mutex_init(&dsi_config->context_lock);
+
+	dsi_connector->private = dsi_config;
+
+	dsi_config->pipe = pipe;
+	dsi_config->changed = 1;
+	dsi_config->dev = dev;
+
+	/*init fixed mode basing on DSI config type*/
+	dsi_panel_info.panel_180_rotation = false;
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+		dsi_config->fixed_mode = p_funcs->get_config_mode();
+		p_funcs->get_panel_info(pipe, &dsi_panel_info);
+	} else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+		dsi_config->fixed_mode = p_funcs->get_config_mode();
+		p_funcs->get_panel_info(pipe, &dsi_panel_info);
+	}
+
+	width_mm = dsi_panel_info.width_mm;
+	height_mm = dsi_panel_info.height_mm;
+	dev_priv->panel_180_rotation = dsi_panel_info.panel_180_rotation;
+
+	dsi_config->mode = dsi_config->fixed_mode;
+	dsi_config->connector = dsi_connector;
+
+	if (!dsi_config->fixed_mode) {
+		DRM_ERROR("No pannel fixed mode was found\n");
+		goto dsi_init_err0;
+	}
+
+	if (pipe && dev_priv->dsi_configs[0])
+		dev_priv->dsi_configs[1] = dsi_config;
+	else if (pipe == 0)
+		dev_priv->dsi_configs[0] = dsi_config;
+	else {
+		DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
+		goto dsi_init_err0;
+	}
+
+	/*init drm connector object*/
+	psb_output = &dsi_connector->base;
+
+	psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
+
+	connector = &psb_output->base;
+	drm_connector_init(dev, connector,
+			&mdfld_dsi_connector_funcs,
+			DRM_MODE_CONNECTOR_DSI);
+
+	drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
+
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->display_info.width_mm = width_mm;
+	connector->display_info.height_mm = height_mm;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	/*attach properties*/
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.scaling_mode_property,
+			DRM_MODE_SCALE_FULLSCREEN);
+
+	drm_object_attach_property(&connector->base,
+			dev_priv->backlight_property,
+			MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+	/*init DSI package sender on this output*/
+	if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
+		DRM_ERROR("Package Sender initialization failed on pipe %d\n",
+				pipe);
+		goto dsi_init_err0;
+	}
+
+#if 0
+	/*init panel error detector*/
+	if (mdfld_dsi_error_detector_init(dev, dsi_connector)) {
+		DRM_ERROR("Failed to init dsi_error detector");
+		goto dsi_init_err1;
+	}
+#endif
+
+	/*create DBI & DPI encoders*/
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+		encoder = mdfld_dsi_dbi_init(dev, dsi_connector, p_funcs);
+		if (!encoder) {
+			DRM_ERROR("Create DBI encoder failed\n");
+			goto dsi_init_err2;
+		}
+		encoder->private = dsi_config;
+		dsi_config->encoders[MDFLD_DSI_ENCODER_DBI] = encoder;
+
+		if (pipe == 2)
+			dev_priv->encoder2 = encoder;
+
+		if (pipe == 0)
+			dev_priv->encoder0 = encoder;
+	} else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+		encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_funcs);
+		if (!encoder) {
+			DRM_ERROR("Create DPI encoder failed\n");
+			goto dsi_init_err2;
+		}
+		encoder->private = dsi_config;
+		dsi_config->encoders[MDFLD_DSI_ENCODER_DPI] = encoder;
+
+		if (pipe == 2)
+			dev_priv->encoder2 = encoder;
+
+		if (pipe == 0)
+			dev_priv->encoder0 = encoder;
+	}
+
+	drm_sysfs_connector_add(connector);
+
+	/* DPST: TODO - get appropriate connector */
+	if (dev_priv->dpst_connector == 0)
+		dev_priv->dpst_connector = connector;
+
+	/*init dsr*/
+	if (mdfld_dsi_dsr_init(dsi_config))
+		DRM_INFO("%s: Failed to initialize DSR\n", __func__);
+
+	PSB_DEBUG_ENTRY("successfully\n");
+	return 0;
+
+	/*TODO: add code to destroy outputs on error*/
+dsi_init_err2:
+	mdfld_dsi_error_detector_exit(dsi_connector);
+
+#if 0
+dsi_init_err1:
+#endif
+	/*destroy sender*/
+	mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
+
+	drm_connector_cleanup(connector);
+
+	if (dsi_config->fixed_mode)
+		kfree(dsi_config->fixed_mode);
+
+	if (dsi_config) {
+		kfree(dsi_config);
+		if (pipe)
+			dev_priv->dsi_configs[1] = NULL;
+		else
+			dev_priv->dsi_configs[0] = NULL;
+	}
+
+dsi_init_err0:
+	if (dsi_connector)
+		kfree(dsi_connector);
+
+	return -EIO;
+}
+
+void mdfld_dsi_set_drain_latency(struct drm_encoder *encoder,
+		struct drm_display_mode *mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_encoder_get_config(dsi_encoder);
+
+	struct mdfld_dsi_hw_context *ctx;
+
+	if (!dsi_config) {
+                DRM_ERROR("Invalid parameters\n");
+                return;
+        }
+
+	ctx = &dsi_config->dsi_hw_context;
+	if (dsi_config->pipe == 0) {
+		mutex_lock(&dsi_config->context_lock);
+#if 0
+		if ((mode->hdisplay == 720) && (mode->vdisplay == 1280))
+			drain_rate = ACTUAL_DRAIN_RATE_7x12;
+		else if ((mode->hdisplay == 1080) && (mode->vdisplay == 1920))
+			drain_rate = ACTUAL_DRAIN_RATE_10x19;
+		else if ((mode->hdisplay == 2560) && (mode->vdisplay == 1600))
+			drain_rate = ACTUAL_DRAIN_RATE_25x16;
+		if (drain_rate != 0) {
+			value = ((64 * 32 / drain_rate) & 0xFF) | 0x80;
+			ctx->ddl1 = value | (HDMI_SPRITE_DEADLINE << 8) |
+					(value << 24);
+			ctx->ddl2 = value | (HDMI_OVERLAY_DEADLINE << 8);
+			ctx->ddl3 = 0;
+			ctx->ddl4 = value | (value << 8);
+			ctx->ddl1 = 0x83838383;
+			ctx->ddl2 = 0x83838383;
+			ctx->ddl3 = 0x83;
+			ctx->ddl4 = 0x8383;
+
+		}
+#endif
+		ctx->ddl1 = 0x86868686;
+		ctx->ddl2 = 0x86868686;
+		ctx->ddl3 = 0x86;
+		ctx->ddl4 = 0x8686;
+
+		/* init for 1st boot, 12KB for plane A D E F */
+		ctx->dsparb = 0xc0300c0;
+		ctx->dsparb2 = 0x90180;
+
+		mutex_unlock(&dsi_config->context_lock);
+	}
+
+	return;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_output.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_output.h
new file mode 100644
index 0000000..8269e34
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_output.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_OUTPUT_H__
+#define __MDFLD_DSI_OUTPUT_H__
+
+#include <linux/backlight.h>
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+#include "psb_intel_display.h"
+#include "psb_intel_reg.h"
+#include "psb_powermgmt.h"
+
+#include <asm/intel-mid.h>
+#include "mdfld_output.h"
+
+/*mdfld DSI controller registers*/
+#define MIPIA_DEVICE_READY_REG				0xb000
+#define MIPIA_INTR_STAT_REG				0xb004
+#define MIPIA_INTR_EN_REG				0xb008
+#define MIPIA_DSI_FUNC_PRG_REG				0xb00c
+#define MIPIA_HS_TX_TIMEOUT_REG				0xb010
+#define MIPIA_LP_RX_TIMEOUT_REG				0xb014
+#define MIPIA_TURN_AROUND_TIMEOUT_REG			0xb018
+#define MIPIA_DEVICE_RESET_TIMER_REG			0xb01c
+#define MIPIA_DPI_RESOLUTION_REG			0xb020
+#define MIPIA_DBI_FIFO_THROTTLE_REG			0xb024
+#define MIPIA_HSYNC_COUNT_REG				0xb028
+#define MIPIA_HBP_COUNT_REG				0xb02c
+#define MIPIA_HFP_COUNT_REG				0xb030
+#define MIPIA_HACTIVE_COUNT_REG				0xb034
+#define MIPIA_VSYNC_COUNT_REG				0xb038
+#define MIPIA_VBP_COUNT_REG				0xb03c
+#define MIPIA_VFP_COUNT_REG				0xb040
+#define MIPIA_HIGH_LOW_SWITCH_COUNT_REG			0xb044
+#define MIPIA_DPI_CONTROL_REG				0xb048
+#define MIPIA_DPI_DATA_REG				0xb04c
+#define MIPIA_INIT_COUNT_REG				0xb050
+#define MIPIA_MAX_RETURN_PACK_SIZE_REG			0xb054
+#define MIPIA_VIDEO_MODE_FORMAT_REG			0xb058
+#define MIPIA_EOT_DISABLE_REG				0xb05c
+#define CLOCK_STOP					(0x1 << 1)
+#define DSI_EOT_DISABLE_MASK                            (0xff)
+
+#define MIPIA_LP_BYTECLK_REG				0xb060
+#define MIPIA_LP_GEN_DATA_REG				0xb064
+#define MIPIA_HS_GEN_DATA_REG				0xb068
+#define MIPIA_LP_GEN_CTRL_REG				0xb06c
+#define MIPIA_HS_GEN_CTRL_REG				0xb070
+#define MIPIA_GEN_FIFO_STAT_REG				0xb074
+#define MIPIA_HS_LS_DBI_ENABLE_REG			0xb078
+#define MIPIA_DPHY_PARAM_REG				0xb080
+#define MIPIA_DBI_BW_CTRL_REG				0xb084
+#define MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG		0xb088
+
+#define DSI_DEVICE_READY				(0x1)
+#define DSI_POWER_STATE_ULPS_ENTER			(0x2 << 1)
+#define DSI_POWER_STATE_ULPS_EXIT			(0x1 << 1)
+#define DSI_POWER_STATE_ULPS_MASK			(0x3 << 1)
+
+
+#define DSI_ONE_DATA_LANE				(0x1)
+#define DSI_TWO_DATA_LANE				(0x2)
+#define DSI_THREE_DATA_LANE				(0X3)
+#define DSI_FOUR_DATA_LANE				(0x4)
+#define DSI_DPI_VIRT_CHANNEL_OFFSET			(0x3)
+#define DSI_DBI_VIRT_CHANNEL_OFFSET			(0x5)
+#define DSI_DPI_COLOR_FORMAT_RGB565			(0x01 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666			(0x02 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK		(0x03 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB888			(0x04 << 7)
+#define DSI_DBI_COLOR_FORMAT_OPTION2			(0x05 << 13)
+
+#define DSI_INTR_STATE_RXSOTERROR			BIT0
+
+#define DSI_INTR_STATE_SPL_PKG_SENT			BIT30
+#define DSI_INTR_STATE_TE				BIT31
+
+#define DSI_HS_TX_TIMEOUT_MASK				(0xffffff)
+
+#define DSI_LP_RX_TIMEOUT_MASK				(0xffffff)
+
+#define DSI_TURN_AROUND_TIMEOUT_MASK			(0x3f)
+
+#define DSI_RESET_TIMER_MASK				(0xffff)
+
+#define DSI_DBI_FIFO_WM_HALF				(0x0)
+#define DSI_DBI_FIFO_WM_QUARTER				(0x1)
+#define DSI_DBI_FIFO_WM_LOW				(0x2)
+
+#define DSI_DPI_TIMING_MASK				(0xffff)
+
+#define DSI_INIT_TIMER_MASK				(0xffff)
+
+#define DSI_DBI_RETURN_PACK_SIZE_MASK			(0x3ff)
+
+#define DSI_LP_BYTECLK_MASK				(0x0ffff)
+
+#define DSI_HS_CTRL_GEN_SHORT_W0			(0x03)
+#define DSI_HS_CTRL_GEN_SHORT_W1			(0x13)
+#define DSI_HS_CTRL_GEN_SHORT_W2			(0x23)
+#define DSI_HS_CTRL_GEN_R0				(0x04)
+#define DSI_HS_CTRL_GEN_R1				(0x14)
+#define DSI_HS_CTRL_GEN_R2				(0x24)
+#define DSI_HS_CTRL_GEN_LONG_W				(0x29)
+#define DSI_HS_CTRL_MCS_SHORT_W0			(0x05)
+#define DSI_HS_CTRL_MCS_SHORT_W1			(0x15)
+#define DSI_HS_CTRL_MCS_R0				(0x06)
+#define DSI_HS_CTRL_MCS_LONG_W				(0x39)
+#define DSI_HS_CTRL_VC_OFFSET				(0x06)
+#define DSI_HS_CTRL_WC_OFFSET				(0x08)
+
+#define	DSI_FIFO_GEN_HS_DATA_FULL			BIT0
+#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY			BIT1
+#define DSI_FIFO_GEN_HS_DATA_EMPTY			BIT2
+#define DSI_FIFO_GEN_LP_DATA_FULL			BIT8
+#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY			BIT9
+#define DSI_FIFO_GEN_LP_DATA_EMPTY			BIT10
+#define DSI_FIFO_GEN_HS_CTRL_FULL			BIT16
+#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY			BIT17
+#define DSI_FIFO_GEN_HS_CTRL_EMPTY			BIT18
+#define DSI_FIFO_GEN_LP_CTRL_FULL			BIT24
+#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY			BIT25
+#define DSI_FIFO_GEN_LP_CTRL_EMPTY			BIT26
+#define DSI_FIFO_DBI_EMPTY				BIT27
+#define DSI_FIFO_DPI_EMPTY				BIT28
+
+#define DSI_DBI_HS_LP_SWITCH_MASK			(0x1)
+
+#define DSI_HS_LP_SWITCH_COUNTER_OFFSET			(0x0)
+#define DSI_LP_HS_SWITCH_COUNTER_OFFSET			(0x16)
+
+#define DSI_DPI_CTRL_HS_SHUTDOWN			(0x00000001)
+#define DSI_DPI_CTRL_HS_TURN_ON				(0x00000002)
+
+/*mdfld DSI adapter registers*/
+#define MIPIA_CONTROL_REG				0xb104
+#define MIPIA_DATA_ADD_REG				0xb108
+#define MIPIA_DATA_LEN_REG				0xb10c
+#define MIPIA_CMD_ADD_REG				0xb110
+#define MIPIA_CMD_LEN_REG				0xb114
+
+/*DSI data lane configuration*/
+enum {
+	MDFLD_DSI_DATA_LANE_4_0 = 0,
+	MDFLD_DSI_DATA_LANE_3_1 = 1,
+	MDFLD_DSI_DATA_LANE_2_2 = 2,
+};
+
+enum {
+	RESET_FROM_BOOT_UP = 0,
+	RESET_FROM_OSPM_RESUME,
+} ;
+
+enum {
+	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
+	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
+	MDFLD_DSI_VIDEO_BURST_MODE = 3,
+};
+
+#define DSI_DPI_COMPLETE_LAST_LINE			BIT2
+#define DSI_DPI_DISABLE_BTA					BIT3
+
+struct mdfld_dsi_connector_state {
+	u32 mipi_ctrl_reg;
+};
+
+struct mdfld_dsi_encoder_state {
+
+};
+
+struct mdfld_dsi_connector {
+	/*
+	 * This is ugly, but I have to use connector in it! :-(
+	 * FIXME: use drm_connector instead.
+	 */
+	struct psb_intel_output base;
+
+	int pipe;
+	void *private;
+	void *pkg_sender;
+	void *err_detector;
+
+	/*connection status*/
+	enum drm_connector_status status;
+};
+
+struct mdfld_dsi_encoder {
+	struct drm_encoder base;
+	void *private;
+};
+
+/*display controller hardware context on a pipe*/
+struct mdfld_dsi_hw_context {
+	u32 vgacntr;
+
+	/*plane*/
+	u32 dspcntr;
+	u32 dspsize;
+	u32 dspsurf;
+	u32 dsppos;
+	u32 dspstride;
+	u32 dsplinoff;
+
+	/*plane*/
+	u32 sprite_dspcntr;
+	u32 sprite_dspsize;
+	u32 sprite_dspsurf;
+	u32 sprite_dsppos;
+	u32 sprite_dspstride;
+	u32 sprite_dsplinoff;
+
+	/*Drain Latency*/
+	u32 ddl1;
+	u32 ddl2;
+	u32 ddl3;
+	u32 ddl4;
+
+	u32 dsparb;
+	u32 dsparb2;
+
+	/*overlay*/
+	u32 ovaadd;
+	u32 ovcadd;
+
+	/* gamma and csc */
+	u32 palette[256];
+	u32 color_coef[6];
+
+	/*pipe regs*/
+	u32 htotal;
+	u32 hblank;
+	u32 hsync;
+	u32 vtotal;
+	u32 vblank;
+	u32 vsync;
+	u32 pipestat;
+
+	u32 pipesrc;
+
+	u32 dpll;
+	u32 fp;
+	u32 pipeconf;
+
+	/*mipi port*/
+	u32 mipi;
+
+	/*DSI controller regs*/
+	u32 device_ready;
+	u32 intr_stat;
+	u32 intr_en;
+	u32 dsi_func_prg;
+	u32 hs_tx_timeout;
+	u32 lp_rx_timeout;
+	u32 turn_around_timeout;
+	u32 device_reset_timer;
+	u32 dpi_resolution;
+	u32 dbi_fifo_throttle;
+	u32 hsync_count;
+	u32 hbp_count;
+	u32 hfp_count;
+	u32 hactive_count;
+	u32 vsync_count;
+	u32 vbp_count;
+	u32 vfp_count;
+	u32 high_low_switch_count;
+	u32 dpi_control;
+	u32 dpi_data;
+	u32 init_count;
+	u32 max_return_pack_size;
+	u32 video_mode_format;
+	u32 eot_disable;
+	u32 lp_byteclk;
+	u32 lp_gen_data;
+	u32 hs_gen_data;
+	u32 lp_gen_ctrl;
+	u32 hs_gen_ctrl;
+	u32 gen_fifo_stat;
+	u32 hs_ls_dbi_enable;
+	u32 dphy_param;
+	u32 dbi_bw_ctrl;
+	u32 clk_lane_switch_time_cnt;
+
+	/*MIPI adapter regs*/
+	u32 mipi_control;
+	u32 mipi_data_addr;
+	u32 mipi_data_len;
+	u32 mipi_cmd_addr;
+	u32 mipi_cmd_len;
+
+	/*panel status*/
+	int panel_on;
+	int backlight_level;
+
+	u32 pll_bypass_mode;
+	u32 cck_div;
+	/*brightness*/
+	int lastbrightnesslevel;
+
+	/*dpst register values*/
+	u32 histogram_intr_ctrl;
+	u32 histogram_logic_ctrl;
+	u32 aimg_enhance_bin;
+	u32 lvds_port_ctrl;
+
+};
+
+struct mdfld_dsi_hw_registers {
+	u32 vgacntr_reg;
+
+	/*plane*/
+	u32 dspcntr_reg;
+	u32 dspsize_reg;
+	u32 dspsurf_reg;
+	u32 dsplinoff_reg;
+	u32 dsppos_reg;
+	u32 dspstride_reg;
+
+	/*Drain Latency*/
+	u32 ddl1_reg;
+	u32 ddl2_reg;
+	u32 ddl3_reg;
+	u32 ddl4_reg;
+
+	/*overlay*/
+	u32 ovaadd_reg;
+	u32 ovcadd_reg;
+
+	/* csc */
+	u32 color_coef_reg;
+
+	/*pipe regs*/
+	u32 htotal_reg;
+	u32 hblank_reg;
+	u32 hsync_reg;
+	u32 vtotal_reg;
+	u32 vblank_reg;
+	u32 vsync_reg;
+	u32 pipestat_reg;
+
+	u32 pipesrc_reg;
+
+	u32 dpll_reg;
+	u32 fp_reg;
+	u32 pipeconf_reg;
+	u32 palette_reg;
+	u32 gamma_red_max_reg;
+	u32 gamma_green_max_reg;
+	u32 gamma_blue_max_reg;
+
+	/*mipi port*/
+	u32 mipi_reg;
+
+	/*DSI controller regs*/
+	u32 device_ready_reg;
+	u32 intr_stat_reg;
+	u32 intr_en_reg;
+	u32 dsi_func_prg_reg;
+	u32 hs_tx_timeout_reg;
+	u32 lp_rx_timeout_reg;
+	u32 turn_around_timeout_reg;
+	u32 device_reset_timer_reg;
+	u32 dpi_resolution_reg;
+	u32 dbi_fifo_throttle_reg;
+	u32 hsync_count_reg;
+	u32 hbp_count_reg;
+	u32 hfp_count_reg;
+	u32 hactive_count_reg;
+	u32 vsync_count_reg;
+	u32 vbp_count_reg;
+	u32 vfp_count_reg;
+	u32 high_low_switch_count_reg;
+	u32 dpi_control_reg;
+	u32 dpi_data_reg;
+	u32 init_count_reg;
+	u32 max_return_pack_size_reg;
+	u32 video_mode_format_reg;
+	u32 eot_disable_reg;
+	u32 lp_byteclk_reg;
+	u32 lp_gen_data_reg;
+	u32 hs_gen_data_reg;
+	u32 lp_gen_ctrl_reg;
+	u32 hs_gen_ctrl_reg;
+	u32 gen_fifo_stat_reg;
+	u32 hs_ls_dbi_enable_reg;
+	u32 dphy_param_reg;
+	u32 dbi_bw_ctrl_reg;
+	u32 clk_lane_switch_time_cnt_reg;
+
+	/*MIPI adapter regs*/
+	u32 mipi_control_reg;
+	u32 mipi_data_addr_reg;
+	u32 mipi_data_len_reg;
+	u32 mipi_cmd_addr_reg;
+	u32 mipi_cmd_len_reg;
+
+	/*dpst registers*/
+	u32 histogram_intr_ctrl_reg;
+	u32 histogram_logic_ctrl_reg;
+	u32 aimg_enhance_bin_reg;
+	u32 lvds_port_ctrl_reg;
+};
+
+#define NO_GAMMA_CSC			0x0
+#define ENABLE_GAMMA			(0x1 << 0)
+#define ENABLE_CSC			(0x1 << 1)
+#define ENABLE_GAMMA_CSC		(ENABLE_GAMMA | ENABLE_CSC)
+/*
+ * DSI config, consists of one DSI connector, two DSI encoders.
+ * DRM will pick up on DSI encoder basing on differents configs.
+ */
+struct mdfld_dsi_config {
+	struct drm_device *dev;
+	struct drm_display_mode *fixed_mode;
+	struct drm_display_mode *mode;
+
+	struct mdfld_dsi_connector *connector;
+	struct mdfld_dsi_encoder *encoders[DRM_CONNECTOR_MAX_ENCODER];
+	struct mdfld_dsi_encoder *encoder;
+
+	struct mdfld_dsi_hw_registers regs;
+
+	/*DSI hw context*/
+	struct mutex context_lock;
+	struct mdfld_dsi_hw_context dsi_hw_context;
+
+	int pipe;
+	int changed;
+
+	int drv_ic_inited;
+
+	int bpp;
+	mdfld_dsi_encoder_t type;
+	int lane_count;
+	/*mipi data lane config*/
+	int lane_config;
+	/*Virtual channel number for this encoder*/
+	int channel_num;
+	/*video mode configure*/
+	int video_mode;
+	int enable_gamma_csc;
+	uint32_t s3d_format;
+
+	/*dsr*/
+	void *dsr;
+};
+
+#define MDFLD_DSI_CONNECTOR(psb_output) \
+	(container_of(psb_output, struct mdfld_dsi_connector, base))
+
+#define MDFLD_DSI_ENCODER(encoder) \
+	(container_of(encoder, struct mdfld_dsi_encoder, base))
+
+#define MDFLD_DSI_ENCODER_WITH_DRM_ENABLE(encoder) \
+		(container_of((struct drm_encoder *) encoder, \
+		struct mdfld_dsi_encoder, base))
+
+static inline struct mdfld_dsi_config *
+mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
+{
+	if (!connector)
+		return NULL;
+
+	return (struct mdfld_dsi_config *)connector->private;
+}
+
+static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
+{
+	struct mdfld_dsi_connector *dsi_connector;
+
+	if (!config)
+		return NULL;
+
+	dsi_connector = config->connector;
+
+	if (!dsi_connector)
+		return NULL;
+
+	return dsi_connector->pkg_sender;
+}
+
+static inline struct mdfld_dsi_config *
+mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
+{
+	if (!encoder)
+		return NULL;
+	return (struct mdfld_dsi_config *)encoder->private;
+}
+
+static inline struct mdfld_dsi_connector *
+mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
+{
+	struct mdfld_dsi_config *config;
+
+	if (!encoder)
+		return NULL;
+
+	config = mdfld_dsi_encoder_get_config(encoder);
+	if (!config)
+		return NULL;
+
+	return config->connector;
+}
+
+static inline void *
+mdfld_dsi_encoder_get_pkg_sender(struct mdfld_dsi_encoder *encoder)
+{
+	struct mdfld_dsi_config *dsi_config;
+
+	dsi_config = mdfld_dsi_encoder_get_config(encoder);
+	if (!dsi_config)
+		return NULL;
+
+	return mdfld_dsi_get_pkg_sender(dsi_config);
+}
+
+static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
+{
+	struct mdfld_dsi_connector *connector;
+
+	if (!encoder)
+		return -1;
+
+	connector = mdfld_dsi_encoder_get_connector(encoder);
+	if (!connector)
+		return -1;
+
+	return connector->pipe;
+}
+
+/*Export functions*/
+extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+		u32 gen_fifo_stat_reg, u32 fifo_stat);
+extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
+		int pipe);
+extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+		int level);
+extern int mdfld_dsi_output_init(struct drm_device *dev,
+		int pipe,
+		struct mdfld_dsi_config *config,
+		struct panel_funcs *p_funcs);
+extern int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+		u8 dcs,
+		u8 *data,
+		u8 transmission,
+		u32 len);
+extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+		u8 *mode,
+		u8 transmission);
+extern void mdfld_dsi_set_drain_latency(struct drm_encoder *encoder,
+		struct drm_display_mode *mode);
+
+#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_pkg_sender.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_pkg_sender.c
new file mode 100644
index 0000000..24302e5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_pkg_sender.c
@@ -0,0 +1,1861 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/freezer.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dbi_dsr.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+
+#define MDFLD_DSI_DBI_FIFO_TIMEOUT		1000
+#define MDFLD_DSI_MAX_RETURN_PACKET_SIZE	512
+#define MDFLD_DSI_READ_MAX_COUNT		10000
+
+const char *dsi_errors[] = {
+	"[ 0:RX SOT Error]",
+	"[ 1:RX SOT Sync Error]",
+	"[ 2:RX EOT Sync Error]",
+	"[ 3:RX Escape Mode Entry Error]",
+	"[ 4:RX LP TX Sync Error",
+	"[ 5:RX HS Receive Timeout Error]",
+	"[ 6:RX False Control Error]",
+	"[ 7:RX ECC Single Bit Error]",
+	"[ 8:RX ECC Multibit Error]",
+	"[ 9:RX Checksum Error]",
+	"[10:RX DSI Data Type Not Recognised]",
+	"[11:RX DSI VC ID Invalid]",
+	"[12:TX False Control Error]",
+	"[13:TX ECC Single Bit Error]",
+	"[14:TX ECC Multibit Error]",
+	"[15:TX Checksum Error]",
+	"[16:TX DSI Data Type Not Recognised]",
+	"[17:TX DSI VC ID invalid]",
+	"[18:High Contention]",
+	"[19:Low contention]",
+	"[20:DPI FIFO Under run]",
+	"[21:HS TX Timeout]",
+	"[22:LP RX Timeout]",
+	"[23:Turn Around ACK Timeout]",
+	"[24:ACK With No Error]",
+	"[25:RX Invalid TX Length]",
+	"[26:RX Prot Violation]",
+	"[27:HS Generic Write FIFO Full]",
+	"[28:LP Generic Write FIFO Full]",
+	"[29:Generic Read Data Avail]",
+	"[30:Special Packet Sent]",
+	"[31:Tearing Effect]",
+};
+
+static void debug_dbi_hang(struct mdfld_dsi_pkg_sender *sender) {
+  struct mdfld_dsi_connector *dsi_connector = sender->dsi_connector;
+  struct mdfld_dsi_config *dsi_config = (struct mdfld_dsi_config *)dsi_connector->private;
+  struct drm_device *dev = sender->dev;
+  struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
+  bool pmon = ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND);
+
+  DRM_ERROR("sender->pipe: 0x%08x\n", sender->pipe);
+  DRM_ERROR("dev_priv->um_start: 0x%08x\n", dev_priv->um_start);
+  DRM_ERROR("ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND): 0x%08x\n", pmon);
+  DRM_ERROR("dsi_config->dsi_hw_context.panel_on: 0x%08x\n", dsi_config->dsi_hw_context.panel_on);
+	if (dsi_config->dsr) {
+	  DRM_ERROR("dsi_config->dsr->dsr_enabled: 0x%08x\n", ((struct mdfld_dsi_dsr *)dsi_config->dsr)->dsr_enabled);
+	  DRM_ERROR("dsi_config->dsr->dsr_state: 0x%08x\n", ((struct mdfld_dsi_dsr *)dsi_config->dsr)->dsr_state);
+	}
+	if (!pmon) {
+	  /* Not safe to dump registers when the power is off */
+	  return;
+	}
+  DRM_ERROR("dsi_config->regs.dspcntr_reg: 0x%08x\n", REG_READ(dsi_config->regs.dspcntr_reg));
+
+  DRM_ERROR("MIPIA_DEVICE_READY_REG: 0x%08x\n", REG_READ(MIPIA_DEVICE_READY_REG));
+  DRM_ERROR("MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET: 0x%08x\n", REG_READ(MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET));
+  DRM_ERROR("sender->dpll_reg: 0x%08x\n", REG_READ(sender->dpll_reg));
+  DRM_ERROR("sender->dspcntr_reg: 0x%08x\n", REG_READ(sender->dspcntr_reg));
+  DRM_ERROR("sender->pipeconf_reg: 0x%08x\n", REG_READ(sender->pipeconf_reg));
+  DRM_ERROR("sender->pipestat_reg: 0x%08x\n", REG_READ(sender->pipestat_reg));
+  DRM_ERROR("sender->dsplinoff_reg: 0x%08x\n", REG_READ(sender->dsplinoff_reg));
+  DRM_ERROR("sender->dspsurf_reg: 0x%08x\n", REG_READ(sender->dspsurf_reg));
+
+  DRM_ERROR("sender->mipi_intr_stat_reg: 0x%08x\n", REG_READ(sender->mipi_intr_stat_reg));
+  DRM_ERROR("sender->mipi_lp_gen_data_reg: 0x%08x\n", REG_READ(sender->mipi_lp_gen_data_reg));
+  DRM_ERROR("sender->mipi_hs_gen_data_reg: 0x%08x\n", REG_READ(sender->mipi_hs_gen_data_reg));
+  DRM_ERROR("sender->mipi_lp_gen_ctrl_reg: 0x%08x\n", REG_READ(sender->mipi_lp_gen_ctrl_reg));
+  DRM_ERROR("sender->mipi_hs_gen_ctrl_reg: 0x%08x\n", REG_READ(sender->mipi_hs_gen_ctrl_reg));
+  DRM_ERROR("sender->mipi_gen_fifo_stat_reg: 0x%08x\n", REG_READ(sender->mipi_gen_fifo_stat_reg));
+  DRM_ERROR("sender->mipi_data_addr_reg: 0x%08x\n", REG_READ(sender->mipi_data_addr_reg));
+  DRM_ERROR("sender->mipi_data_len_reg: 0x%08x\n", REG_READ(sender->mipi_data_len_reg));
+  DRM_ERROR("sender->mipi_cmd_addr_reg: 0x%08x\n", REG_READ(sender->mipi_cmd_addr_reg));
+  DRM_ERROR("sender->mipi_cmd_len_reg: 0x%08x\n", REG_READ(sender->mipi_cmd_len_reg));
+  DRM_ERROR("sender->mipi_dpi_control_reg: 0x%08x\n", REG_READ(sender->mipi_dpi_control_reg));
+}
+
+static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
+						u32 mask)
+{
+	struct drm_device *dev = sender->dev;
+	u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
+	int retry = 10000;
+
+	if (sender->work_for_slave_panel)
+		gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+	while (retry--) {
+		if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
+			return 0;
+		udelay(3);
+	}
+
+	DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
+	if (!IS_ANN(dev))
+		debug_dbi_hang(sender);
+
+	sender->status = MDFLD_DSI_CONTROL_ABNORMAL;
+	return -EIO;
+}
+
+static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender,
+		(BIT2 | BIT10 | BIT18 | BIT26 | BIT27 | BIT28));
+}
+
+static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT10 | BIT26));
+}
+
+static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT2 | BIT18));
+}
+
+static int wait_for_dbi_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT27));
+}
+
+static int wait_for_dpi_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT28));
+}
+
+static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct drm_device *dev = sender->dev;
+	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+
+	int i;
+	u32 mask;
+	int err = 0;
+	int count = 0;
+	u32 intr_stat;
+
+	intr_stat = REG_READ(intr_stat_reg);
+	if (!intr_stat)
+		return 0;
+
+	for (i = 0; i < 32; i++) {
+		mask = (0x00000001UL) << i;
+		if (!(intr_stat & mask))
+			continue;
+
+		switch (mask) {
+		case BIT0:
+		case BIT1:
+		case BIT2:
+		case BIT3:
+		case BIT4:
+		case BIT5:
+		case BIT6:
+		case BIT7:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT8:
+			/*No Action required.*/
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT9:
+		case BIT10:
+		case BIT11:
+		case BIT12:
+		case BIT13:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT14:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			break;
+		case BIT15:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT16:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT17:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT18:
+			REG_WRITE(MIPIA_EOT_DISABLE_REG,
+				REG_READ(MIPIA_EOT_DISABLE_REG)|0x30);
+			while ((REG_READ(intr_stat_reg) & BIT18)) {
+				count++;
+				/*
+				* Per silicon feedback,
+				* if this bit cannot be
+				* cleared by 3 times,
+				* it should be a real
+				* High Contention error.
+				*/
+				if (count == 4) {
+					DRM_INFO("dsi status %s\n",
+						dsi_errors[i]);
+					break;
+				}
+				REG_WRITE(intr_stat_reg, mask);
+			}
+			break;
+		case BIT19:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			break;
+		case BIT20:
+			/*No Action required.*/
+			DRM_DEBUG("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT21:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			err = wait_for_all_fifos_empty(sender);
+			break;
+		case BIT22:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			err = wait_for_all_fifos_empty(sender);
+			break;
+		case BIT23:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT24:
+			/*No Action required.*/
+			DRM_DEBUG("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT25:
+		case BIT26:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			break;
+		case BIT27:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			err = wait_for_hs_fifos_empty(sender);
+			break;
+		case BIT28:
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			REG_WRITE(intr_stat_reg, mask);
+			err = wait_for_lp_fifos_empty(sender);
+			break;
+		case BIT29:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			break;
+		case BIT30:
+			break;
+		case BIT31:
+			/*No Action required.*/
+			DRM_INFO("dsi status %s\n", dsi_errors[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+static inline int dbi_cmd_sent(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct drm_device *dev = sender->dev;
+	u32 retry = 0xffff;
+	u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
+	int ret = 0;
+
+	/*query the command execution status*/
+	while (retry--) {
+		if (!(REG_READ(dbi_cmd_addr_reg) & BIT0))
+			break;
+	}
+
+	if (!retry) {
+		DRM_ERROR("Timeout waiting for DBI Command status\n");
+		ret = -EAGAIN;
+	}
+
+	return ret;
+}
+
+/**
+ * NOTE: this interface is abandoned expect for write_mem_start DCS
+ * other DCS are sent via generic pkg interfaces
+ */
+static int send_dcs_pkg(struct mdfld_dsi_pkg_sender *sender,
+			struct mdfld_dsi_pkg *pkg)
+{
+	struct drm_device *dev = sender->dev;
+	struct mdfld_dsi_dcs_pkg *dcs_pkg = &pkg->pkg.dcs_pkg;
+	u32 dbi_cmd_len_reg = sender->mipi_cmd_len_reg;
+	u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
+	u32 cb_phy = sender->dbi_cb_phy;
+	u32 index = 0;
+	u8 *cb = (u8 *)sender->dbi_cb_addr;
+	int i;
+	int ret;
+
+	if (!sender->dbi_pkg_support) {
+		DRM_ERROR("Trying to send DCS on a non DBI output, abort!\n");
+		return -ENOTSUPP;
+	}
+
+	PSB_DEBUG_MIPI("Sending DCS pkg 0x%x...\n", dcs_pkg->cmd);
+
+	/*wait for DBI fifo empty*/
+	wait_for_dbi_fifo_empty(sender);
+
+	*(cb + (index++)) = dcs_pkg->cmd;
+	if (dcs_pkg->param_num) {
+		for (i = 0; i < dcs_pkg->param_num; i++) {
+			*(cb + (index++)) = *(dcs_pkg->param + i);
+		}
+	}
+
+	REG_WRITE(dbi_cmd_len_reg, (1 + dcs_pkg->param_num));
+	REG_WRITE(dbi_cmd_addr_reg,
+		(cb_phy << CMD_MEM_ADDR_OFFSET)
+		| BIT0
+		| ((dcs_pkg->data_src == CMD_DATA_SRC_PIPE) ? BIT1 : 0));
+
+	ret = dbi_cmd_sent(sender);
+	if (ret) {
+		DRM_ERROR("command 0x%x not complete\n", dcs_pkg->cmd);
+		return -EAGAIN;
+	}
+
+	PSB_DEBUG_MIPI("sent DCS pkg 0x%x...\n", dcs_pkg->cmd);
+
+	return 0;
+}
+
+static int __send_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	struct drm_device *dev = sender->dev;
+	u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+	u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+	u32 gen_ctrl_val = 0;
+	struct mdfld_dsi_gen_short_pkg *short_pkg = &pkg->pkg.short_pkg;
+
+	if (sender->work_for_slave_panel) {
+		hs_gen_ctrl_reg += MIPIC_REG_OFFSET;
+		lp_gen_ctrl_reg += MIPIC_REG_OFFSET;
+	}
+	gen_ctrl_val |= short_pkg->cmd << MCS_COMMANDS_POS;
+	gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
+	gen_ctrl_val |= pkg->pkg_type;
+	gen_ctrl_val |= short_pkg->param << MCS_PARAMETER_POS;
+
+	if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
+		/*wait for hs fifo empty*/
+		wait_for_dbi_fifo_empty(sender);
+		wait_for_hs_fifos_empty(sender);
+
+		/*send pkg*/
+		REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
+	} else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
+		wait_for_dbi_fifo_empty(sender);
+		wait_for_lp_fifos_empty(sender);
+
+		/*send pkg*/
+		REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
+	} else {
+		DRM_ERROR("Unknown transmission type %d\n",
+				pkg->transmission_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __send_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	struct drm_device *dev = sender->dev;
+	u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+	u32 hs_gen_data_reg = sender->mipi_hs_gen_data_reg;
+	u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+	u32 lp_gen_data_reg = sender->mipi_lp_gen_data_reg;
+	u32 gen_ctrl_val = 0;
+	u8 *dp = NULL;
+	u32 reg_val = 0;
+	int i;
+	int dword_count = 0, remain_byte_count = 0;
+	struct mdfld_dsi_gen_long_pkg *long_pkg = &pkg->pkg.long_pkg;
+
+	dp = long_pkg->data;
+	if (sender->work_for_slave_panel) {
+		hs_gen_ctrl_reg += MIPIC_REG_OFFSET;
+		hs_gen_data_reg += MIPIC_REG_OFFSET;
+		lp_gen_ctrl_reg += MIPIC_REG_OFFSET;
+		lp_gen_data_reg += MIPIC_REG_OFFSET;
+	}
+
+	/**
+	 * Set up word count for long pkg
+	 * FIXME: double check word count field.
+	 * currently, using the byte counts of the payload as the word count.
+	 * ------------------------------------------------------------
+	 * | DI |   WC   | ECC|         PAYLOAD              |CHECKSUM|
+	 * ------------------------------------------------------------
+	 */
+	gen_ctrl_val |= (long_pkg->len) << WORD_COUNTS_POS;
+	gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
+	gen_ctrl_val |= pkg->pkg_type;
+
+	if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
+		/*wait for hs ctrl and data fifos to be empty*/
+		wait_for_dbi_fifo_empty(sender);
+		wait_for_hs_fifos_empty(sender);
+
+		dword_count = long_pkg->len / 4;
+		remain_byte_count = long_pkg->len % 4;
+		for (i = 0; i < dword_count * 4; i = i + 4) {
+			reg_val = 0;
+			reg_val = *(dp + i);
+			reg_val |= *(dp + i + 1) << 8;
+			reg_val |= *(dp + i + 2) << 16;
+			reg_val |= *(dp + i + 3) << 24;
+			PSB_DEBUG_MIPI("HS Sending data 0x%08x\n", reg_val);
+			REG_WRITE(hs_gen_data_reg, reg_val);
+		}
+
+		if (remain_byte_count) {
+			reg_val = 0;
+			for (i = 0; i < remain_byte_count; i++)
+				reg_val |=
+					*(dp + dword_count * 4 + i) << (8 * i);
+			PSB_DEBUG_MIPI("HS Sending data 0x%08x\n", reg_val);
+			REG_WRITE(hs_gen_data_reg, reg_val);
+		}
+
+		REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
+	} else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
+		wait_for_dbi_fifo_empty(sender);
+		wait_for_lp_fifos_empty(sender);
+
+		dword_count = long_pkg->len / 4;
+		remain_byte_count = long_pkg->len % 4;
+		for (i = 0; i < dword_count * 4; i = i + 4) {
+			reg_val = 0;
+			reg_val = *(dp + i);
+			reg_val |= *(dp + i + 1) << 8;
+			reg_val |= *(dp + i + 2) << 16;
+			reg_val |= *(dp + i + 3) << 24;
+			PSB_DEBUG_MIPI("LP Sending data 0x%08x\n", reg_val);
+			REG_WRITE(lp_gen_data_reg, reg_val);
+		}
+
+		if (remain_byte_count) {
+			reg_val = 0;
+			for (i = 0; i < remain_byte_count; i++) {
+				reg_val |=
+					*(dp + dword_count * 4 + i) << (8 * i);
+			}
+			PSB_DEBUG_MIPI("LP Sending data 0x%08x\n", reg_val);
+			REG_WRITE(lp_gen_data_reg, reg_val);
+		}
+
+		REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
+	} else {
+		DRM_ERROR("Unknown transmission type %d\n",
+				pkg->transmission_type);
+		return -EINVAL;
+	}
+
+	return 0;
+
+}
+
+static int send_mcs_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	PSB_DEBUG_MIPI("Sending MCS short pkg...\n");
+
+	return __send_short_pkg(sender, pkg);
+}
+
+static int send_mcs_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	PSB_DEBUG_MIPI("Sending MCS long pkg...\n");
+
+	return __send_long_pkg(sender, pkg);
+}
+
+static int send_gen_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	PSB_DEBUG_MIPI("Sending GEN short pkg...\n");
+
+	return __send_short_pkg(sender, pkg);
+}
+
+static int send_gen_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	PSB_DEBUG_MIPI("Sending GEN long pkg...\n");
+
+	return __send_long_pkg(sender, pkg);
+}
+
+static int send_dpi_spk_pkg(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	struct drm_device *dev = sender->dev;
+	u32 dpi_control_reg = sender->mipi_dpi_control_reg;
+	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+	u32 dpi_control_val = 0;
+	u32 dpi_control_current_setting = 0;
+	struct mdfld_dsi_dpi_spk_pkg *spk_pkg = &pkg->pkg.spk_pkg;
+	int retry = 10000;
+
+	dpi_control_val = spk_pkg->cmd;
+
+	if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION)
+		dpi_control_val |= BIT6;
+
+	/*Wait for DPI fifo empty*/
+	wait_for_dpi_fifo_empty(sender);
+
+	/*clean spk packet sent interrupt*/
+	REG_WRITE(intr_stat_reg, BIT30);
+	dpi_control_current_setting =
+		REG_READ(dpi_control_reg);
+
+	/*send out spk packet*/
+	if (dpi_control_current_setting != dpi_control_val) {
+		REG_WRITE(dpi_control_reg, dpi_control_val);
+
+		/*wait for spk packet sent interrupt*/
+		while (--retry && !(REG_READ(intr_stat_reg) & BIT30))
+			udelay(3);
+
+		if (!retry) {
+			DRM_ERROR("Fail to send SPK Packet 0x%x\n",
+				 spk_pkg->cmd);
+			return -EINVAL;
+		}
+	} else
+		/*For SHUT_DOWN and TURN_ON, it is better called by
+		symmetrical. so skip duplicate called*/
+		printk(KERN_WARNING "skip duplicate setting of DPI control\n");
+	return 0;
+}
+
+static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg)
+{
+	u8 cmd;
+	u8 *data;
+
+	PSB_DEBUG_MIPI("Prepare to Send type 0x%x pkg\n", pkg->pkg_type);
+
+	switch (pkg->pkg_type) {
+	case MDFLD_DSI_PKG_DCS:
+		cmd = pkg->pkg.dcs_pkg.cmd;
+		break;
+	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+		cmd = pkg->pkg.short_pkg.cmd;
+		break;
+	case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+		data = (u8 *)pkg->pkg.long_pkg.data;
+		cmd = *data;
+		break;
+	default:
+		return 0;
+	}
+
+	/*this prevents other package sending while doing msleep*/
+	sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
+
+	return 0;
+}
+
+static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender,
+		struct mdfld_dsi_pkg *pkg)
+{
+	u8 cmd;
+	u8 *data = NULL;
+
+	PSB_DEBUG_MIPI("Sent type 0x%x pkg\n", pkg->pkg_type);
+
+	switch (pkg->pkg_type) {
+	case MDFLD_DSI_PKG_DCS:
+		cmd = pkg->pkg.dcs_pkg.cmd;
+		break;
+	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+		cmd = pkg->pkg.short_pkg.cmd;
+		break;
+	case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+	case MDFLD_DSI_PKG_GEN_LONG_WRITE:
+		data = (u8 *)pkg->pkg.long_pkg.data;
+		cmd = *data;
+		break;
+	default:
+		return 0;
+	}
+
+	/*update panel status*/
+	if (unlikely(cmd == enter_sleep_mode))
+		sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
+	else if (unlikely(cmd == exit_sleep_mode))
+		sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
+
+	if (sender->status != MDFLD_DSI_CONTROL_ABNORMAL)
+		sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+	/*after sending pkg done, free the data buffer for mcs long pkg*/
+	if (pkg->pkg_type == MDFLD_DSI_PKG_MCS_LONG_WRITE ||
+		pkg->pkg_type == MDFLD_DSI_PKG_GEN_LONG_WRITE) {
+		if (data != NULL)
+			kfree(data);
+	}
+
+	return 0;
+}
+
+static int do_send_pkg(struct mdfld_dsi_pkg_sender *sender,
+			struct mdfld_dsi_pkg *pkg)
+{
+	int ret = 0;
+
+	PSB_DEBUG_MIPI("Sending type 0x%x pkg\n", pkg->pkg_type);
+
+	if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
+		DRM_ERROR("sender is busy\n");
+		return -EAGAIN;
+	}
+
+	ret = send_pkg_prepare(sender, pkg);
+	if (ret) {
+		DRM_ERROR("send_pkg_prepare error\n");
+		return ret;
+	}
+
+	switch (pkg->pkg_type) {
+	case MDFLD_DSI_PKG_DCS:
+		ret = send_dcs_pkg(sender, pkg);
+		break;
+	case MDFLD_DSI_PKG_GEN_SHORT_WRITE_0:
+	case MDFLD_DSI_PKG_GEN_SHORT_WRITE_1:
+	case MDFLD_DSI_PKG_GEN_SHORT_WRITE_2:
+	case MDFLD_DSI_PKG_GEN_READ_0:
+	case MDFLD_DSI_PKG_GEN_READ_1:
+	case MDFLD_DSI_PKG_GEN_READ_2:
+		ret = send_gen_short_pkg(sender, pkg);
+		break;
+	case MDFLD_DSI_PKG_GEN_LONG_WRITE:
+		ret = send_gen_long_pkg(sender, pkg);
+		break;
+	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+	case MDFLD_DSI_PKG_MCS_READ:
+		ret = send_mcs_short_pkg(sender, pkg);
+		break;
+	case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+		ret = send_mcs_long_pkg(sender, pkg);
+		break;
+	case MDFLD_DSI_DPI_SPK:
+		ret = send_dpi_spk_pkg(sender, pkg);
+		break;
+	default:
+		DRM_ERROR("Invalid pkg type 0x%x\n", pkg->pkg_type);
+		ret = -EINVAL;
+	}
+
+	send_pkg_done(sender, pkg);
+
+	return ret;
+}
+
+static int send_pkg(struct mdfld_dsi_pkg_sender *sender,
+			struct mdfld_dsi_pkg *pkg)
+{
+	int err = 0;
+
+	/*handle DSI error*/
+	err = dsi_error_handler(sender);
+	if (err) {
+		DRM_ERROR("Error handling failed\n");
+		err = -EAGAIN;
+		goto send_pkg_err;
+	}
+
+	/*send pkg*/
+	err = do_send_pkg(sender, pkg);
+	if (err) {
+		DRM_ERROR("sent pkg failed\n");
+		dsi_error_handler(sender);
+		err = -EAGAIN;
+		goto send_pkg_err;
+	}
+
+	/*FIXME: should I query complete and fifo empty here?*/
+send_pkg_err:
+	return err;
+}
+
+static struct mdfld_dsi_pkg *
+pkg_sender_get_pkg_locked(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct mdfld_dsi_pkg *pkg;
+
+	if (list_empty(&sender->free_list)) {
+		DRM_ERROR("No free pkg left\n");
+		return NULL;
+	}
+
+	pkg = list_first_entry(&sender->free_list, struct mdfld_dsi_pkg, entry);
+
+	/*detach from free list*/
+	list_del_init(&pkg->entry);
+
+	return pkg;
+}
+
+static void pkg_sender_put_pkg_locked(struct mdfld_dsi_pkg_sender *sender,
+		struct mdfld_dsi_pkg *pkg)
+{
+	memset(pkg, 0, sizeof(struct mdfld_dsi_pkg));
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	list_add_tail(&pkg->entry, &sender->free_list);
+}
+
+static int mdfld_dbi_cb_init(struct mdfld_dsi_pkg_sender *sender,
+		struct psb_gtt *pg, int pipe)
+{
+	uint32_t phy;
+	void *virt_addr = NULL;
+
+	switch (pipe) {
+	case 0:
+		phy = pg->gtt_phys_start - 0x1000;
+		break;
+	case 2:
+		phy = pg->gtt_phys_start - 0x800;
+		break;
+	default:
+		DRM_ERROR("Unsupported channel\n");
+		return -EINVAL;
+	}
+
+	/*mapping*/
+	virt_addr = ioremap_nocache(phy, 0x800);
+	if (!virt_addr) {
+		DRM_ERROR("Map DBI command buffer error\n");
+		return -ENOMEM;
+	}
+
+	if (IS_ANN(dev))
+		memset(virt_addr, 0x0, 0x800);
+
+	sender->dbi_cb_phy = phy;
+	sender->dbi_cb_addr = virt_addr;
+
+	PSB_DEBUG_ENTRY("DBI command buffer initailized. phy %x, addr %p\n",
+			phy, virt_addr);
+
+	return 0;
+}
+
+static void mdfld_dbi_cb_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (sender && sender->dbi_cb_addr)
+		iounmap(sender->dbi_cb_addr);
+}
+
+static inline void pkg_sender_queue_pkg(struct mdfld_dsi_pkg_sender *sender,
+					struct mdfld_dsi_pkg *pkg,
+					int delay)
+{
+	mutex_lock(&sender->lock);
+
+	if (!delay) {
+		send_pkg(sender, pkg);
+
+		pkg_sender_put_pkg_locked(sender, pkg);
+	} else {
+		/*queue it*/
+		list_add_tail(&pkg->entry, &sender->pkg_list);
+	}
+
+	mutex_unlock(&sender->lock);
+}
+
+static inline int process_pkg_list(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct mdfld_dsi_pkg *pkg;
+	int ret = 0;
+
+	mutex_lock(&sender->lock);
+
+	while (!list_empty(&sender->pkg_list)) {
+		pkg = list_first_entry(&sender->pkg_list,
+				struct mdfld_dsi_pkg, entry);
+		ret = send_pkg(sender, pkg);
+
+		if (ret) {
+			DRM_INFO("Returning eror from process_pkg_lisgt");
+			goto errorunlock;
+		}
+
+		list_del_init(&pkg->entry);
+
+		pkg_sender_put_pkg_locked(sender, pkg);
+	}
+
+	mutex_unlock(&sender->lock);
+	return 0;
+
+errorunlock:
+	mutex_unlock(&sender->lock);
+	return ret;
+}
+
+static int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender,
+				   u8 *data,
+				   u32 len,
+				   u8 transmission,
+				   int delay)
+{
+	struct mdfld_dsi_pkg *pkg;
+	u8 *pdata = NULL;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	/* alloc a data buffer to save the long pkg data,
+	 * free the buffer when send_pkg_done.
+	 * */
+	pdata = kmalloc(sizeof(u8) * len, GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("No memory for long_pkg data\n");
+		return -ENOMEM;
+	}
+
+	memcpy(pdata, data, len * sizeof(u8));
+
+	pkg->pkg_type = MDFLD_DSI_PKG_MCS_LONG_WRITE;
+	pkg->transmission_type = transmission;
+	pkg->pkg.long_pkg.data = pdata;
+	pkg->pkg.long_pkg.len = len;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	pkg_sender_queue_pkg(sender, pkg, delay);
+
+	return 0;
+}
+
+static int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender,
+					u8 cmd, u8 param, u8 param_num,
+					u8 transmission,
+					int delay)
+{
+	struct mdfld_dsi_pkg *pkg;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	if (param_num) {
+		pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_1;
+		pkg->pkg.short_pkg.param = param;
+	} else {
+		pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_0;
+		pkg->pkg.short_pkg.param = 0;
+	}
+	pkg->transmission_type = transmission;
+	pkg->pkg.short_pkg.cmd = cmd;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	pkg_sender_queue_pkg(sender, pkg, delay);
+
+	return 0;
+}
+
+static int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender,
+					u8 param0, u8 param1, u8 param_num,
+					u8 transmission,
+					int delay)
+{
+	struct mdfld_dsi_pkg *pkg;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	switch (param_num) {
+	case 0:
+		pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_0;
+		pkg->pkg.short_pkg.cmd = 0;
+		pkg->pkg.short_pkg.param = 0;
+		break;
+	case 1:
+		pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_1;
+		pkg->pkg.short_pkg.cmd = param0;
+		pkg->pkg.short_pkg.param = 0;
+		break;
+	case 2:
+		pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_2;
+		pkg->pkg.short_pkg.cmd = param0;
+		pkg->pkg.short_pkg.param = param1;
+		break;
+	}
+
+	pkg->transmission_type = transmission;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	pkg_sender_queue_pkg(sender, pkg, delay);
+
+	return 0;
+}
+
+static int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender,
+				   u8 *data,
+				   u32 len,
+				   u8 transmission,
+				   int delay)
+{
+	struct mdfld_dsi_pkg *pkg;
+	u8 *pdata = NULL;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	/* alloc a data buffer to save the long pkg data,
+	 * free the buffer when send_pkg_done.
+	 * */
+	pdata = kmalloc(sizeof(u8)*len, GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("No memory for long_pkg data\n");
+		return -ENOMEM;
+	}
+
+	memcpy(pdata, data, len*sizeof(u8));
+
+	pkg->pkg_type = MDFLD_DSI_PKG_GEN_LONG_WRITE;
+	pkg->transmission_type = transmission;
+	pkg->pkg.long_pkg.data = pdata;
+	pkg->pkg.long_pkg.len = len;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	pkg_sender_queue_pkg(sender, pkg, delay);
+
+	return 0;
+}
+
+static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender,
+				struct mdfld_dsi_pkg *pkg,
+				u8 *data,
+				u32 len)
+{
+	struct drm_device *dev = sender->dev;
+	int i;
+	u32 gen_data_reg;
+	u32 gen_data_value;
+	int retry = MDFLD_DSI_READ_MAX_COUNT;
+	u8 transmission = pkg->transmission_type;
+	int dword_count = 0, remain_byte_count = 0;
+
+	/*Check the len. Max value is 0x40
+	based on the generic read FIFO size*/
+	if (len * sizeof(*data) > 0x40) {
+		len = 0x40 / sizeof(*data);
+		DRM_ERROR("Bigger than Max.Set the len to Max 0x40 bytes\n");
+	}
+
+	/**
+	 * do reading.
+	 * 0) set the max return pack size
+	 * 1) send out generic read request
+	 * 2) polling read data avail interrupt
+	 * 3) read data
+	 */
+	mutex_lock(&sender->lock);
+
+	/*Set the Max return pack size*/
+	wait_for_all_fifos_empty(sender);
+	REG_WRITE(MIPIA_MAX_RETURN_PACK_SIZE_REG, (len*sizeof(*data)) & 0x3FF);
+	wait_for_all_fifos_empty(sender);
+
+	REG_WRITE(sender->mipi_intr_stat_reg, BIT29);
+
+	if ((REG_READ(sender->mipi_intr_stat_reg) & BIT29))
+		DRM_ERROR("Can NOT clean read data valid interrupt\n");
+
+	/*send out read request*/
+	send_pkg(sender, pkg);
+
+	pkg_sender_put_pkg_locked(sender, pkg);
+
+	/*polling read data avail interrupt*/
+	while (--retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT29))
+		udelay(3);
+
+	if (!retry) {
+		mutex_unlock(&sender->lock);
+		return -ETIMEDOUT;
+	}
+
+	REG_WRITE(sender->mipi_intr_stat_reg, BIT29);
+
+	/*read data*/
+	if (transmission == MDFLD_DSI_HS_TRANSMISSION)
+		gen_data_reg = sender->mipi_hs_gen_data_reg;
+	else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
+		gen_data_reg = sender->mipi_lp_gen_data_reg;
+	else {
+		DRM_ERROR("Unknown transmission");
+		mutex_unlock(&sender->lock);
+		return -EINVAL;
+	}
+
+	dword_count = len / 4;
+	remain_byte_count = len % 4;
+	for (i = 0; i < dword_count * 4; i = i + 4) {
+		gen_data_value = REG_READ(gen_data_reg);
+		*(data + i)     = gen_data_value & 0x000000FF;
+		*(data + i + 1) = (gen_data_value >> 8)  & 0x000000FF;
+		*(data + i + 2) = (gen_data_value >> 16) & 0x000000FF;
+		*(data + i + 3) = (gen_data_value >> 24) & 0x000000FF;
+	}
+	if (remain_byte_count) {
+		gen_data_value = REG_READ(gen_data_reg);
+		for (i = 0; i < remain_byte_count; i++) {
+			*(data + dword_count * 4 + i)  =
+				(gen_data_value >> (8 * i)) & 0x000000FF;
+		}
+	}
+
+	mutex_unlock(&sender->lock);
+
+	return len;
+}
+
+static int mdfld_dsi_read_gen(struct mdfld_dsi_pkg_sender *sender,
+				u8 param0,
+				u8 param1,
+				u8 param_num,
+				u8 *data,
+				u32 len,
+				u8 transmission)
+{
+	struct mdfld_dsi_pkg *pkg;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	switch (param_num) {
+	case 0:
+		pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_0;
+		pkg->pkg.short_pkg.cmd = 0;
+		pkg->pkg.short_pkg.param = 0;
+		break;
+	case 1:
+		pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_1;
+		pkg->pkg.short_pkg.cmd = param0;
+		pkg->pkg.short_pkg.param = 0;
+		break;
+	case 2:
+		pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_2;
+		pkg->pkg.short_pkg.cmd = param0;
+		pkg->pkg.short_pkg.param = param1;
+		break;
+	}
+
+	pkg->transmission_type = transmission;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	return __read_panel_data(sender, pkg, data, len);
+}
+
+static int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender,
+				u8 cmd,
+				u8 *data,
+				u32 len,
+				u8 transmission)
+{
+	struct mdfld_dsi_pkg *pkg;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	pkg->pkg_type = MDFLD_DSI_PKG_MCS_READ;
+	pkg->pkg.short_pkg.cmd = cmd;
+	pkg->pkg.short_pkg.param = 0;
+
+	pkg->transmission_type = transmission;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	return __read_panel_data(sender, pkg, data, len);
+}
+
+static int mdfld_dsi_send_dpi_spk_pkg(struct mdfld_dsi_pkg_sender *sender,
+				u32 spk_pkg,
+				u8 transmission)
+{
+	struct mdfld_dsi_pkg *pkg;
+
+	mutex_lock(&sender->lock);
+
+	pkg = pkg_sender_get_pkg_locked(sender);
+
+	mutex_unlock(&sender->lock);
+
+	if (!pkg) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+
+	pkg->pkg_type = MDFLD_DSI_DPI_SPK;
+	pkg->transmission_type = transmission;
+	pkg->pkg.spk_pkg.cmd = spk_pkg;
+
+	INIT_LIST_HEAD(&pkg->entry);
+
+	pkg_sender_queue_pkg(sender, pkg, 0);
+
+	return 0;
+}
+
+void dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+	int lane_count = dsi_config->lane_count;
+	u32 val = 0;
+
+	PSB_DEBUG_ENTRY("Init DBI interface on pipe %d...\n", pipe);
+
+	/*un-ready device*/
+	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+
+	/*init dsi adapter before kicking off*/
+	REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+
+	/*TODO: figure out how to setup these registers*/
+	REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+	REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
+			0x000a0014);
+	REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+	REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
+	REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+
+	/*enable all interrupts*/
+	REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+	/*max value: 20 clock cycles of txclkesc*/
+	REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+	/*min 21 txclkesc, max: ffffh*/
+	REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+	/*min: 7d0 max: 4e20*/
+	REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+
+	/*set up max return packet size*/
+	REG_WRITE((MIPIA_MAX_RETURN_PACK_SIZE_REG + reg_offset),
+			MDFLD_DSI_MAX_RETURN_PACKET_SIZE);
+
+	/*set up func_prg*/
+	val |= lane_count;
+	val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+	val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+	REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+
+	REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+	REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+
+	REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+	REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+	REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+}
+
+int mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender)
+{
+	return process_pkg_list(sender);
+}
+
+int mdfld_dsi_status_check(struct mdfld_dsi_pkg_sender *sender)
+{
+	return dsi_error_handler(sender);
+}
+
+int mdfld_dsi_check_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct drm_device *dev;
+
+	if (!sender) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	dev = sender->dev;
+
+	if (!sender->dbi_pkg_support) {
+		DRM_ERROR("No DBI pkg sending on this sender\n");
+		return -ENOTSUPP;
+	}
+
+	return REG_READ(sender->mipi_gen_fifo_stat_reg) & BIT27;
+}
+
+int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender,
+			u8 dcs, u8 *param, u32 param_num, u8 data_src,
+			int delay)
+{
+	u32 cb_phy;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	u32 index = 0;
+	u8 *cb;
+	int retry = 1;
+	u8 *dst = NULL;
+	u8 *pSendparam = NULL;
+	int err = 0;
+	int i;
+	int loop_num = 1;
+	int offset = 0;
+
+	if (!sender) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	cb_phy = sender->dbi_cb_phy;
+	dev = sender->dev;
+	cb = (u8 *)sender->dbi_cb_addr;
+	dev_priv = dev->dev_private;
+
+	if (!sender->dbi_pkg_support) {
+		DRM_ERROR("No DBI pkg sending on this sender\n");
+		return -ENOTSUPP;
+	}
+
+	/*
+	 * If dcs is write_mem_start, send it directly using
+	 * DSI adapter interface
+	 */
+	if (dcs == write_mem_start) {
+
+		/**
+		 * query whether DBI FIFO is empty,
+		 * if not sleep the drv and wait for it to become empty.
+		 * The MIPI frame done interrupt will wake up the drv.
+		 */
+		if (is_dual_dsi(dev))
+			loop_num = 2;
+		mutex_lock(&sender->lock);
+
+		/*handle DSI error*/
+		if (dsi_error_handler(sender)) {
+			mutex_unlock(&sender->lock);
+			DRM_ERROR("Error handling failed\n");
+			return  -EAGAIN;
+		}
+		/**
+		 * check the whether is there any write_mem_start already being
+		 * sent in this between current te_seq and next te.
+		 * if yes, simply reject the rest of write_mem_start because it
+		 * is unnecessary. otherwise, go ahead and kick of a
+		 * write_mem_start.
+		 */
+		if (atomic64_read(&sender->last_screen_update) ==
+			atomic64_read(&sender->te_seq)) {
+			mutex_unlock(&sender->lock);
+			if (dev_priv->b_async_flip_enable)
+				DRM_INFO("reject WMS LSU[%lld], te_seq[%lld]\n",
+					 (long long) atomic64_read(&sender->
+						       last_screen_update),
+					 (long long) atomic64_read(&sender->te_seq));
+			return -EAGAIN;
+		}
+
+		for(i = 0; i < loop_num; i++) {
+			if (i != 0)
+				offset = MIPIC_REG_OFFSET;
+
+			if (!IS_TNG_A0(dev)) {
+				retry = wait_event_interruptible_timeout(dev_priv->eof_wait,
+				  (REG_READ(sender->mipi_gen_fifo_stat_reg) & BIT27),
+				    msecs_to_jiffies(MDFLD_DSI_DBI_FIFO_TIMEOUT));
+			} else {
+				retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+				while (retry && !(REG_READ(sender->mipi_gen_fifo_stat_reg + offset) & BIT27)) {
+					udelay(500);
+					retry--;
+				}
+			}
+
+			/*if DBI FIFO timeout, drop this frame*/
+			if (!retry) {
+				DRM_ERROR("DBI FIFO timeout, drop frame\n");
+				mutex_unlock(&sender->lock);
+				if (!IS_ANN(dev)) {
+					debug_dbi_hang(sender);
+					panic("DBI FIFO timeout, drop frame\n");
+				}
+				return 0;
+			}
+
+			if (i != 0)
+				sender->work_for_slave_panel = true;
+
+			/*wait for generic fifo*/
+			if (REG_READ(HS_LS_DBI_ENABLE_REG + offset) & BIT0)
+				wait_for_lp_fifos_empty(sender);
+			else
+				wait_for_hs_fifos_empty(sender);
+			sender->work_for_slave_panel = false;
+		}
+
+		/*record the last screen update timestamp*/
+		atomic64_set(&sender->last_screen_update,
+			atomic64_read(&sender->te_seq));
+		*(cb + (index++)) = write_mem_start;
+
+		/* Set write_mem_start to mipi C first */
+		if (is_dual_dsi(dev))
+			REG_WRITE(sender->mipi_cmd_len_reg + MIPIC_REG_OFFSET, 1);
+		REG_WRITE(sender->mipi_cmd_len_reg, 1);
+		if (is_dual_dsi(dev))
+			REG_WRITE(sender->mipi_cmd_addr_reg + MIPIC_REG_OFFSET, cb_phy | BIT0 | BIT1);
+		REG_WRITE(sender->mipi_cmd_addr_reg, cb_phy | BIT0 | BIT1);
+
+		if (is_dual_dsi(dev)) {
+			retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+			while (retry && (REG_READ(sender->mipi_cmd_addr_reg + MIPIC_REG_OFFSET) & BIT0)) {
+				udelay(1);
+				retry--;
+			}
+		}
+
+		retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+		while (retry && (REG_READ(sender->mipi_cmd_addr_reg) & BIT0)) {
+			usleep_range(990, 1010);
+			retry--;
+		}
+		mutex_unlock(&sender->lock);
+		return 0;
+	}
+
+	if (param_num == 0)
+		err =  mdfld_dsi_send_mcs_short_hs(sender, dcs, 0, 0, delay);
+	else if (param_num == 1)
+		err =  mdfld_dsi_send_mcs_short_hs(sender, dcs, param[0], 1,
+				delay);
+	else if (param_num > 1) {
+		/*transfer to dcs package*/
+		pSendparam = kmalloc(sizeof(u8) * (param_num + 1), GFP_KERNEL);
+		if (!pSendparam) {
+			DRM_ERROR("No memory\n");
+			return -ENOMEM;
+		}
+
+		(*pSendparam) = dcs;
+
+		dst = pSendparam + 1;
+		memcpy(dst, param, param_num);
+
+		err = mdfld_dsi_send_mcs_long_hs(sender, pSendparam,
+				param_num + 1, delay);
+
+		/*free pkg*/
+		kfree(pSendparam);
+	}
+
+	return err;
+}
+
+int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
+				u8 cmd, u8 param, u8 param_num, int delay)
+{
+	if (!sender) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
+			MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
+				u8 cmd, u8 param, u8 param_num, int delay)
+{
+	if (!sender) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
+			MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
+				u8 *data,
+				u32 len,
+				int delay)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_mcs_long(sender, data, len,
+			MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
+				u8 *data,
+				u32 len,
+				int delay)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_mcs_long(sender, data, len,
+			MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
+				u8 param0, u8 param1, u8 param_num, int delay)
+{
+	if (!sender) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
+			MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
+				u8 param0, u8 param1, u8 param_num, int delay)
+{
+	if (!sender || param_num < 0 || param_num > 2) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
+			MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
+				u8 *data,
+				u32 len,
+				int delay)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_gen_long(sender, data, len,
+			MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
+				u8 *data,
+				u32 len,
+				int delay)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_gen_long(sender, data, len,
+			MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
+			u8 param0,
+			u8 param1,
+			u8 param_num,
+			u8 *data,
+			u32 len)
+{
+	if (!sender || !data || param_num < 0 || param_num > 2
+		|| !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_read_gen(sender, param0, param1, param_num,
+				data, len, MDFLD_DSI_HS_TRANSMISSION);
+
+}
+
+int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
+			u8 param0,
+			u8 param1,
+			u8 param_num,
+			u8 *data,
+			u32 len)
+{
+	if (!sender || !data || param_num < 0 || param_num > 2
+		|| !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_read_gen(sender, param0, param1, param_num,
+				data, len, MDFLD_DSI_LP_TRANSMISSION);
+}
+
+int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
+			u8 cmd,
+			u8 *data,
+			u32 len)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_read_mcs(sender, cmd, data, len,
+				MDFLD_DSI_HS_TRANSMISSION);
+}
+EXPORT_SYMBOL(mdfld_dsi_read_mcs_hs);
+
+int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
+			u8 cmd,
+			u8 *data,
+			u32 len)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_read_mcs(sender, cmd, data, len,
+				MDFLD_DSI_LP_TRANSMISSION);
+}
+
+int mdfld_dsi_send_dpi_spk_pkg_hs(struct mdfld_dsi_pkg_sender *sender,
+				u32 spk_pkg)
+{
+	if (!sender) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_dpi_spk_pkg(sender, spk_pkg,
+				MDFLD_DSI_HS_TRANSMISSION);
+}
+
+int mdfld_dsi_send_dpi_spk_pkg_lp(struct mdfld_dsi_pkg_sender *sender,
+				u32 spk_pkg)
+{
+	if (!sender) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_send_dpi_spk_pkg(sender, spk_pkg,
+				MDFLD_DSI_LP_TRANSMISSION);
+}
+
+int mdfld_dsi_wait_for_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_all_fifos_empty(sender);
+}
+
+void mdfld_dsi_report_te(struct mdfld_dsi_pkg_sender *sender)
+{
+	if (sender)
+		atomic64_inc(&sender->te_seq);
+}
+
+int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+		int pipe)
+{
+	int ret;
+	struct mdfld_dsi_pkg_sender *pkg_sender;
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_get_config(dsi_connector);
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct psb_gtt *pg;
+	struct mdfld_dsi_pkg *pkg, *tmp;
+	int i;
+
+	if (!dsi_config) {
+		DRM_ERROR("dsi_config is NULL\n");
+		return -EINVAL;
+	}
+
+	dev = dsi_config->dev;
+	dev_priv = dev->dev_private;
+	pg = dev_priv->pg;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dsi_connector) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	pkg_sender = dsi_connector->pkg_sender;
+
+	if (!pkg_sender || IS_ERR(pkg_sender)) {
+		pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
+				GFP_KERNEL);
+		if (!pkg_sender) {
+			DRM_ERROR("Create DSI pkg sender failed\n");
+			return -ENOMEM;
+		}
+
+		dsi_connector->pkg_sender = (void *)pkg_sender;
+	}
+
+	pkg_sender->dev = dev;
+	pkg_sender->dsi_connector = dsi_connector;
+	pkg_sender->pipe = pipe;
+	pkg_sender->pkg_num = 0;
+	pkg_sender->panel_mode = 0;
+	pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+	/*int dbi command buffer*/
+	if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+		pkg_sender->dbi_pkg_support = 1;
+		ret = mdfld_dbi_cb_init(pkg_sender, pg, pipe);
+		if (ret) {
+			DRM_ERROR("DBI command buffer map failed\n");
+			goto mapping_err;
+		}
+	}
+
+	/*init regs*/
+	if (pipe == 0) {
+		pkg_sender->dpll_reg = MRST_DPLL_A;
+		pkg_sender->dspcntr_reg = DSPACNTR;
+		pkg_sender->pipeconf_reg = PIPEACONF;
+		pkg_sender->dsplinoff_reg = DSPALINOFF;
+		pkg_sender->dspsurf_reg = DSPASURF;
+		pkg_sender->pipestat_reg = PIPEASTAT;
+
+		pkg_sender->mipi_intr_stat_reg = MIPIA_INTR_STAT_REG;
+		pkg_sender->mipi_lp_gen_data_reg = MIPIA_LP_GEN_DATA_REG;
+		pkg_sender->mipi_hs_gen_data_reg = MIPIA_HS_GEN_DATA_REG;
+		pkg_sender->mipi_lp_gen_ctrl_reg = MIPIA_LP_GEN_CTRL_REG;
+		pkg_sender->mipi_hs_gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
+		pkg_sender->mipi_gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+		pkg_sender->mipi_data_addr_reg = MIPIA_DATA_ADD_REG;
+		pkg_sender->mipi_data_len_reg = MIPIA_DATA_LEN_REG;
+		pkg_sender->mipi_cmd_addr_reg = MIPIA_CMD_ADD_REG;
+		pkg_sender->mipi_cmd_len_reg = MIPIA_CMD_LEN_REG;
+		pkg_sender->mipi_dpi_control_reg = MIPIA_DPI_CONTROL_REG;
+	} else if (pipe == 2) {
+		pkg_sender->dpll_reg = MRST_DPLL_A;
+		pkg_sender->dspcntr_reg = DSPCCNTR;
+		pkg_sender->pipeconf_reg = PIPECCONF;
+		pkg_sender->dsplinoff_reg = DSPCLINOFF;
+		pkg_sender->dspsurf_reg = DSPCSURF;
+		pkg_sender->pipestat_reg = 72024;
+
+		pkg_sender->mipi_intr_stat_reg =
+			MIPIA_INTR_STAT_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_lp_gen_data_reg =
+			MIPIA_LP_GEN_DATA_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_hs_gen_data_reg =
+			MIPIA_HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_lp_gen_ctrl_reg =
+			MIPIA_LP_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_hs_gen_ctrl_reg =
+			MIPIA_HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_gen_fifo_stat_reg =
+			MIPIA_GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_data_addr_reg =
+			MIPIA_DATA_ADD_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_data_len_reg =
+			MIPIA_DATA_LEN_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_cmd_addr_reg =
+			MIPIA_CMD_ADD_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_cmd_len_reg =
+			MIPIA_CMD_LEN_REG + MIPIC_REG_OFFSET;
+		pkg_sender->mipi_dpi_control_reg =
+			MIPIA_DPI_CONTROL_REG + MIPIC_REG_OFFSET;
+	}
+
+	/*init pkg list*/
+	INIT_LIST_HEAD(&pkg_sender->pkg_list);
+	INIT_LIST_HEAD(&pkg_sender->free_list);
+
+	/*init lock*/
+	mutex_init(&pkg_sender->lock);
+
+	/*allocate free pkg pool*/
+	for (i = 0; i < MDFLD_MAX_PKG_NUM; i++) {
+		pkg = kzalloc(sizeof(struct mdfld_dsi_pkg), GFP_KERNEL);
+		if (!pkg) {
+			ret = -ENOMEM;
+			goto pkg_alloc_err;
+		}
+
+		INIT_LIST_HEAD(&pkg->entry);
+
+		/*append to free list*/
+		list_add_tail(&pkg->entry, &pkg_sender->free_list);
+	}
+
+	/*init te & screen update seqs*/
+	atomic64_set(&pkg_sender->te_seq, 0);
+	atomic64_set(&pkg_sender->last_screen_update, 0);
+
+	PSB_DEBUG_ENTRY("initialized\n");
+
+	return 0;
+
+pkg_alloc_err:
+	list_for_each_entry_safe(pkg, tmp, &pkg_sender->free_list, entry) {
+		list_del(&pkg->entry);
+		kfree(pkg);
+	}
+
+	/*free mapped command buffer*/
+	mdfld_dbi_cb_destroy(pkg_sender);
+mapping_err:
+	kfree(pkg_sender);
+	dsi_connector->pkg_sender = NULL;
+
+	return ret;
+}
+
+void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct mdfld_dsi_pkg *pkg, *tmp;
+
+	if (!sender || IS_ERR(sender))
+		return;
+
+	/*free pkg pool*/
+	list_for_each_entry_safe(pkg, tmp, &sender->free_list, entry) {
+		list_del(&pkg->entry);
+		kfree(pkg);
+	}
+
+	/*free pkg list*/
+	list_for_each_entry_safe(pkg, tmp, &sender->pkg_list, entry) {
+		list_del(&pkg->entry);
+		kfree(pkg);
+	}
+
+	/*free mapped command buffer*/
+	mdfld_dbi_cb_destroy(sender);
+
+	/*free*/
+	kfree(sender);
+
+	PSB_DEBUG_ENTRY("destroyed\n");
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_pkg_sender.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_pkg_sender.h
new file mode 100644
index 0000000..c3f31dc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_dsi_pkg_sender.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#ifndef __MDFLD_DSI_PKG_SENDER_H__
+#define __MDFLD_DSI_PKG_SENDER_H__
+
+#include <linux/kthread.h>
+
+#define MDFLD_MAX_DCS_PARAM	8
+#define MDFLD_MAX_PKG_NUM	2048
+
+enum {
+	MDFLD_DSI_PKG_DCS,
+	MDFLD_DSI_DPI_SPK,
+	MDFLD_DSI_PKG_GEN_SHORT_WRITE_0 = 0x03,
+	MDFLD_DSI_PKG_GEN_SHORT_WRITE_1 = 0x13,
+	MDFLD_DSI_PKG_GEN_SHORT_WRITE_2 = 0x23,
+	MDFLD_DSI_PKG_GEN_READ_0 = 0x04,
+	MDFLD_DSI_PKG_GEN_READ_1 = 0x14,
+	MDFLD_DSI_PKG_GEN_READ_2 = 0x24,
+	MDFLD_DSI_PKG_GEN_LONG_WRITE = 0x29,
+	MDFLD_DSI_PKG_MCS_SHORT_WRITE_0 = 0x05,
+	MDFLD_DSI_PKG_MCS_SHORT_WRITE_1 = 0x15,
+	MDFLD_DSI_PKG_MCS_READ = 0x06,
+	MDFLD_DSI_PKG_MCS_LONG_WRITE = 0x39,
+};
+
+enum {
+	MDFLD_DSI_DPI_SPK_SHUT_DOWN = BIT0,
+	MDFLD_DSI_DPI_SPK_TURN_ON = BIT1,
+	MDFLD_DSI_DPI_SPK_COLOR_MODE_ON = BIT2,
+	MDFLD_DSI_DPI_SPK_COLOR_MODE_OFF = BIT3,
+	MDFLD_DSI_DPI_SPK_BACKLIGHT_ON = BIT4,
+	MDFLD_DSI_DPI_SPK_BACKLIGHT_OFF = BIT5,
+	MDFLD_DSI_DPI_SPK_RESET_TRIGGER = BIT6,
+};
+
+enum {
+	MDFLD_DSI_LP_TRANSMISSION,
+	MDFLD_DSI_HS_TRANSMISSION,
+	MDFLD_DSI_DCS,
+};
+
+enum {
+	MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
+};
+
+enum {
+	MDFLD_DSI_PKG_SENDER_FREE  = 0x0,
+	MDFLD_DSI_PKG_SENDER_BUSY  = 0x1,
+	MDFLD_DSI_CONTROL_ABNORMAL = 0x2,
+};
+
+enum {
+	MDFLD_DSI_SEND_PACKAGE,
+	MDFLD_DSI_QUEUE_PACKAGE,
+};
+
+struct mdfld_dsi_gen_short_pkg {
+	u8 cmd;
+	u8 param;
+};
+
+struct mdfld_dsi_gen_long_pkg {
+	u8 *data;
+	u32 len;
+};
+
+struct mdfld_dsi_dcs_pkg {
+	u8 cmd;
+	u8 *param;
+	u32 param_num;
+	u8 data_src;
+};
+
+struct mdfld_dsi_dpi_spk_pkg { u32 cmd; };
+
+struct mdfld_dsi_pkg {
+	u8 pkg_type;
+	u8 transmission_type;
+
+	union {
+		struct mdfld_dsi_gen_short_pkg short_pkg;
+		struct mdfld_dsi_gen_long_pkg long_pkg;
+		struct mdfld_dsi_dcs_pkg dcs_pkg;
+		struct mdfld_dsi_dpi_spk_pkg spk_pkg;
+	} pkg;
+
+	struct list_head entry;
+};
+
+struct mdfld_dsi_pkg_sender {
+	struct drm_device *dev;
+	struct mdfld_dsi_connector *dsi_connector;
+	u32 status;
+
+	u32 panel_mode;
+
+	int pipe;
+	bool work_for_slave_panel;
+
+	struct mutex lock;
+	struct list_head pkg_list;
+	struct list_head free_list;
+
+	u32 pkg_num;
+
+	int dbi_pkg_support;
+
+	u32 dbi_cb_phy;
+	void *dbi_cb_addr;
+
+	atomic64_t te_seq;
+	atomic64_t last_screen_update;
+
+	/*registers*/
+	u32 dpll_reg;
+	u32 dspcntr_reg;
+	u32 pipeconf_reg;
+	u32 pipestat_reg;
+	u32 dsplinoff_reg;
+	u32 dspsurf_reg;
+
+	u32 mipi_intr_stat_reg;
+	u32 mipi_lp_gen_data_reg;
+	u32 mipi_hs_gen_data_reg;
+	u32 mipi_lp_gen_ctrl_reg;
+	u32 mipi_hs_gen_ctrl_reg;
+	u32 mipi_gen_fifo_stat_reg;
+	u32 mipi_data_addr_reg;
+	u32 mipi_data_len_reg;
+	u32 mipi_cmd_addr_reg;
+	u32 mipi_cmd_len_reg;
+	u32 mipi_dpi_control_reg;
+};
+
+extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+		int pipe);
+extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
+extern int mdfld_dsi_check_fifo_empty(struct mdfld_dsi_pkg_sender *sender);
+extern int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender,
+			u8 dcs, u8 *param, u32 param_num, u8 data_src,
+			int delay);
+extern int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
+					u8 cmd, u8 param, u8 param_num,
+					int delay);
+extern int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
+					u8 cmd, u8 param, u8 param_num,
+					int delay);
+extern int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
+					u8 *data,
+					u32 len,
+					int delay);
+extern int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
+					u8 *data,
+					u32 len,
+					int delay);
+extern int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
+					u8 param0, u8 param1, u8 param_num,
+					int delay);
+extern int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
+					u8 param0, u8 param1, u8 param_num,
+					int delay);
+extern int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
+				u8 *data,
+				u32 len,
+				int delay);
+extern int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
+				u8 *data,
+				u32 len,
+				int delay);
+extern int mdfld_dsi_send_dpi_spk_pkg_hs(struct mdfld_dsi_pkg_sender *sender,
+				u32 spk_pkg);
+extern int mdfld_dsi_send_dpi_spk_pkg_lp(struct mdfld_dsi_pkg_sender *sender,
+				u32 spk_pkg);
+extern int mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender);
+extern void mdfld_dsi_report_te(struct mdfld_dsi_pkg_sender *sender);
+extern int mdfld_dsi_status_check(struct mdfld_dsi_pkg_sender *sender);
+
+/*read interfaces*/
+extern int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
+			u8 param0,
+			u8 param1,
+			u8 param_num,
+			u8 *data,
+			u32 len);
+extern int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
+			u8 param0,
+			u8 param1,
+			u8 param_num,
+			u8 *data,
+			u32 len);
+extern int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
+			u8 cmd,
+			u8 *data,
+			u32 len);
+extern int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
+			u8 cmd,
+			u8 *data,
+			u32 len);
+extern int mdfld_dsi_wait_for_fifos_empty(struct mdfld_dsi_pkg_sender *sender);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp.h
new file mode 100644
index 0000000..f8ac371
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#ifndef MDFLD_HDCP_H
+#define MDFLD_HDCP_H
+
+#include "mdfld_hdcp_if.h"
+
+#define MAX_HDCP_DEVICES 127
+#define KSV_SIZE           5
+#define V_SIZE            20
+
+#define HDCP_MAX_RETRY_STATUS 1500
+
+#define HDCP_100MS_DELAY 100	//
+
+#define HDCP_BKSV_BYTES 5
+
+typedef struct _hdcp_priv_data {
+	int enabled;
+	int drmFD;
+	int output_id;
+} hdcp_priv_data_t;
+typedef struct _sqwordt {
+	union {
+		unsigned long long quad_part;	//ULONGLONG QuadPart;
+		struct {
+			unsigned long low_part;
+			unsigned long high_part;
+		} u;
+		struct {
+			uint8_t byte[8];
+		};
+	};
+} sqword_tt; //need_check_later
+// A quadword size member 
+//
+typedef struct _hqword {
+	union {
+		struct {
+			uint64_t major_part:40;	// lower 40 bits
+			uint64_t unused1:24;
+		};
+		struct {
+			unsigned major_part_low:32;	// lower 32 bits
+			unsigned major_part_high:8;	// lower  bits
+			unsigned unused2:24;
+		};
+		struct {
+			uint8_t byte[8];
+		};
+	};
+} hqword_t;
+
+// HDCP related definitions are kept here for common usability between 
+// Integrated and External SDVO based HDCP operations
+//I2C Address for HDCP cummnication with Receiver
+#define RX_ADDRESS      0x74000000	// HDCP Port I2C Address (Single Link)
+				    //  shifted for call back function
+
+//I2C Subaddress Defines - As per the HDCP Spec
+// Downstream spec does not specify which is MSB and LSB?
+#define RX_BKSV_0       0x00	// BKSV[7:0]
+#define RX_BKSV_1       0x01	// BKSV[15:8]
+#define RX_BKSV_2       0x02	// BKSV[23:16]
+#define RX_BKSV_3       0x03	// BKSV[31:24]
+#define RX_BKSV_4       0x04	// BKSV[39:32]
+#define RX_RI_HIGH      0x08	// Ri'[7:0]
+#define RX_RI_LOW       0x09	// Ri'[15:8]
+#define RX_AKSV_0       0x10	// AKSV[7:0]
+#define RX_AKSV_1       0x11	// AKSV[15:8]
+#define RX_AKSV_2       0x12	// AKSV[23:16]
+#define RX_AKSV_3       0x13	// AKSV[31:24]
+#define RX_AKSV_4       0x14	// AKSV[39:32]... write this byte last
+#define RX_AINFO        0x15	// Receiver register to inform it to enable 1.1 features
+#define RX_AN_0         0x18	// An[7:0]
+#define RX_AN_1         0x19	// An[15:8]
+#define RX_AN_2         0x1A	// An[23:16]
+#define RX_AN_3         0x1B	// An[31:24]
+#define RX_AN_4         0x1C	// An[39:32]
+#define RX_AN_5         0x1D	// An[47:40]
+#define RX_AN_6         0x1E	// An[55:48]
+#define RX_AN_7         0x1F	// An[63:56]
+#define RX_VPRIME_H0_0  0x20	// V'[7:0]
+#define RX_VPRIME_H0_1  0x21	// V'[15:8]
+#define RX_VPRIME_H0_2  0x22	// V'[23:16]
+#define RX_VPRIME_H0_3  0x23	// V'[31:24]
+#define RX_VPRIME_H1_0  0x24	// V'[39:32]
+#define RX_VPRIME_H1_1  0x25	// V'[47:40]
+#define RX_VPRIME_H1_2  0x26	// V'[55:48]
+#define RX_VPRIME_H1_3  0x27	// V'[63:56]
+#define RX_VPRIME_H2_0  0x28	// V'[71:64]
+#define RX_VPRIME_H2_1  0x29	// V'[79:72]
+#define RX_VPRIME_H2_2  0x2A	// V'[87:80]
+#define RX_VPRIME_H2_3  0x2B	// V'[95:88]
+#define RX_VPRIME_H3_0  0x2C	// V'[103:96]
+#define RX_VPRIME_H3_1  0x2D	// V'[111:104]
+#define RX_VPRIME_H3_2  0x2E	// V'[119:112]
+#define RX_VPRIME_H3_3  0x2F	// V'[127:120]
+#define RX_VPRIME_H4_0  0x30	// V'[135:128]
+#define RX_VPRIME_H4_1  0x31	// V'[143:136]
+#define RX_VPRIME_H4_2  0x32	// V'[151:144]
+#define RX_VPRIME_H4_3  0x33	// V'[159:152]
+#define RX_BCAPS        0x40	// [7] RSVD, [6] Repeater, [5] Ready, [4] Fast, [3:2] RSVD, [1] Features, [0] Fast_reauthentication
+#define RX_BSTATUS_0    0x41	// [7] MAX_DEVS_EXCEEDED, [6:0] DEVICE_COUNT
+#define RX_BSTATUS_1    0x42	// [15:14] RSVD, [13] HDMI_RSVD, [12] HDMI_MODE, [11] MAX_CASCADE_EXCEEDED, [10:8] DEPTH
+#define RX_KSV_FIFO     0x43
+
+typedef enum _mdfld_hdcp_rx_data_type_enum {
+	RX_TYPE_BKSV_DATA = 0,
+	RX_TYPE_BCAPS = 1,
+	RX_TYPE_BSTATUS = 2,
+	RX_TYPE_REPEATER_KSV_LIST = 3,
+	RX_TYPE_REPEATER_PRIME_V = 4,
+	RX_TYPE_RI_DATA = 5,
+	RX_TYPE_BINFO = 6
+} mdfld_hdcp_rx_data_type_en;
+
+typedef struct _hdcp_bstatus {
+	unsigned device_count:7;	// [6:0] Total Number of Receiver Devices (excluding repeaters) attached
+	unsigned max_devices_exceeded:1;	// [7] Topology Error. Greater than 127 devices attached
+	unsigned repeater_depth:3;	// [10:8] Repeater depth 
+	unsigned max_cascade_exceeded:1;	// [11] Topology Error. Greater than 7 levels of Repeater attached
+	unsigned reserved:20;	// [31:12] Reserved for future expansion
+} hdcp_bstatus_t;
+
+//
+// BCAPS
+//
+typedef union _hdcp_rx_bcaps {
+	uint8_t value;
+	struct {
+		uint8_t fast_reauthantication:1;	// bit 0
+		uint8_t b1_1features_supported:1;	// bit 1
+		uint8_t reserved:2;	// bi 3:2
+		uint8_t fast_transfer:1;	// bit 4 ( TRUE  = transfer speed at 400 kHz FALSE = transfer speed at 100 Khz)
+		uint8_t ksv_fifo_ready:1;	// bit 5
+		uint8_t is_reapeater:1;	// bit 6
+		uint8_t reserved1:1;	// bit 7
+	};
+} hdcp_rx_bcaps_t;
+
+//
+// BSTATUS
+//
+typedef union _hdcp_rx_bstatus {
+	uint16_t value;
+	struct {
+		uint16_t device_count:7;	// bit 6:0
+		uint16_t max_devs_exceeded:1;	// bit 7
+		uint16_t depth:3;	// bit 10:8
+		uint16_t max_cascade_exceeded:1;	// bit 11
+		uint16_t rx_in_hdmi_mode:1;	// bit 12
+		uint16_t rserved:3;	// bit 15:13
+	};
+} hdcp_rx_bstatus_t;
+
+// HDCP authentication step
+typedef enum _hdcp_authentication_step {
+	HDCP_AUTHENTICATION_STEP_NONE = 0,
+	HDCP_AUTHENTICATION_STEP_1 = 1,
+	HDCP_AUTHENTICATION_STEP_2 = 2,
+} hdcp_authentication_step_t;
+
+// KSV_GET
+typedef struct _aksv_get {
+	uint8_t uc_aksv[CP_HDCP_KEY_SELECTION_VECTOR_SIZE];
+} aksv_get_t;
+
+int hdcp_init(void **data, int drmFD, int output_id);
+int hdcp_uninit(void *data);
+int hdcp_query(void);
+int hdcp_is_valid_bksv(uint8_t * buffer, uint32_t size);
+int hdcp_is_enabled(void);
+int hdcp_enable(int enable);
+int read_hdcp_port(uint32_t read_request_type, uint8_t * buffer, int size);
+#endif				/* MDFLD_HDCP_H */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp_if.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp_if.h
new file mode 100644
index 0000000..f585e94
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp_if.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#ifndef MDFLD_HDCP_IF_H
+#define MDFLD_HDCP_IF_H
+
+// Constants
+#define CP_HDCP_KEY_SELECTION_VECTOR_SIZE       5
+
+// Protection level (HDCP)
+typedef enum _cp_protection_level_hdcp {
+	CP_PROTECTION_LEVEL_HDCP_OFF = 0,
+	CP_PROTECTION_LEVEL_HDCP_ON = 1,
+} cp_protection_level_hdcp_t;
+
+// Protection type
+typedef enum _cp_protection_type {
+	CP_PROTECTION_TYPE_UNKNOWN = 0x80000000,
+	CP_PROTECTION_TYPE_NONE = 0x00000000,
+	CP_PROTECTION_TYPE_HDCP = 0x00000001,
+	CP_PROTECTION_TYPE_MASK = 0x80000001,
+} cp_protection_type_t;
+
+typedef enum _cp_status {
+	STATUS_UNSUCCESSFUL = 0x80000000,
+	STATUS_SUCCESS = 0x00000000,
+	STATUS_NOT_SUPPORTED = 0x00000001,
+	STATUS_INVALID_DEVICE_REQUEST = 0x00000002,
+	STATUS_REVOKED_HDCP_DEVICE_ATTACHED = 0x00000003,
+	STATUS_DATA_ERROR = 0x00000004,
+	STATUS_PENDING = 0x00000005,
+	STATUS_INVALID_PARAMETER = 0x00000006,
+} cp_status_t;
+
+// KSV
+typedef struct _ksv_t {
+	uint8_t ab_ksv[CP_HDCP_KEY_SELECTION_VECTOR_SIZE];
+} ksv_t;
+
+// HDCP
+typedef struct _hdcp_data {
+	uint32_t ksv_list_length;	// Length of the revoked KSV list (set)
+	//ksv_t                       aksv;               // KSV of attached device
+	//ksv_t                       bksv;               // KSV of attached device
+	ksv_t *ksv_list;	// List of revoked KSVs (set)
+	int perform_second_step;	// True when the second authentication step is requested (get)
+	int is_repeater;	// True when a repeater is attached to the connector (get and set)
+} hdcp_data_t;
+
+// CP Parameters
+typedef struct _cp_parameters {
+	uint32_t protect_type_mask;	// Protection type mask (get and set)
+	uint32_t level;		// Protection level (get and set)
+	hdcp_data_t hdcp;	// HDCP specific data (get and set)
+} cp_parameters_t;
+
+extern uint32_t hdcp_set_cp_data(cp_parameters_t * cp);
+extern uint32_t hdcp_get_cp_data(cp_parameters_t * cp);
+#endif				/* MDFLD_HDCP_IF_H */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp_reg.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp_reg.h
new file mode 100644
index 0000000..9098646
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdcp_reg.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#ifndef MDFLD_HDCP_REG_H
+#define MDFLD_HDCP_REG_H
+
+/* Integrated HDMI specific registers */
+
+#define RESERVED2(x,y)  x##y
+#define RESERVED1(x,y)  RESERVED2(x,y)
+#define RANDOMNUMBER	__LINE__	// __COUNTER__
+#define UNIQUENAME(ValueName) RESERVED1(ValueName, RANDOMNUMBER)
+
+/* TBD: This may change when tested on actual system */
+#define HDCP_MAX_RI_QUERY_COUNT 4
+#define HDCP_MAX_NUM_DWORDS 4	//128 bits
+#define HDCP_MAX_RANDOMNUM_LENGTH 2	//In DWORD => 64 bits
+#define HDCP_MAX_RETRY_DISABLE 2
+/*All sizes are defined in bytes */
+#define HDCP_SIZEOF_AKSV    8
+#define HDCP_SIZEOF_BKSV    8
+#define HDCP_SIZEOF_AN        5
+#define HDCP_SIZEOF_RI        2
+#define HDCP_ENCRYPTED_KEY_SIZE 12	//Akeys, IV and MAC
+#define HDCP_NUM_AKEYS 40
+#define HDCP_NEXT_RI_FRAME 126
+#define HDCP_MAX_RANDOM_NUM_SIZE 4	//in dwords
+
+#define HDCP_CONVERT_BIG_ENDIAN(x) (((x&0x000000ff)<<24)|\
+                                    ((x&0x0000ff00)<<8)|\
+                                    ((x&0x00ff0000)>>8)|\
+                                    ((x&0xff000000)>>24))
+
+#define HDCP_MAX_AN_RETRY 100
+
+#define HDCP_AN_LO_INDEX 0
+#define HDCP_AN_HI_INDEX 1
+
+uint32_t hdcp_invalid_an_list[6][2] = {
+	{0x881cf9e4, 0x38155bf4}
+	,
+	{0xb0e81640, 0xb5cac2ec}
+	,
+	{0x514fa3e7, 0x5bbb3806}
+	,
+	{0xd1b4923a, 0x6172afbb}
+	,
+	{0x0c16fd1c, 0x1b28baf5}
+	,
+	{0x00000000, 0x00000000}
+};
+
+/* HDMI HDCP Regs */
+/* HDCP config */
+
+typedef enum _mdfld_hdcp_config_enum {
+	HDCP_Off = 0,
+	HDCP_CAPTURE_AN = 1,
+	HDCP_DECRYPT_KEYS = 2,
+	HDCP_AUTHENTICATE_AND_ENCRYPT = 3,
+	HDCP_UNIQUE_MCH_ID = 5,
+	HDCP_ENCRYPT_KEYS = 6,
+	HDCP_CYPHER_CHECK_MODE = 7
+} mdfld_hdcp_config_en;
+
+#define MDFLD_HDCP_CONFIG_REG 0x61400
+#define MDFLD_HDCP_CONFIG_PRESERVED_BITS    BITRANGE(3,31)
+typedef union _mdfld_hdcp_config {
+	uint32_t value;
+
+	struct {
+		uint32_t hdcp_config:3;	//bit 2:0; uses HDCP_CONFIGURATION_EN
+		uint32_t UNIQUENAME(Reserved):29;	//bit 3:31
+	};
+} mdfld_hdcp_config_t;
+
+/* HDCP_STATUS */
+
+#define MDFLD_HDCP_STATUS_REG 0x61448
+#define MDFLD_HDCP_STATUS_PRESERVED_BITS    BITRANGE(24,31)
+typedef union _mdfld_hdcp_status {
+	uint32_t value;
+
+	struct {
+		uint32_t ainfo:8;	//Bit 7:0
+		uint32_t frame_count:8;	//Bit 15:8
+		uint32_t cipher_hdcp_status:1;	//Bit 16
+		uint32_t cipher_an_status:1;	//Bit 17
+		uint32_t cipher_ri_ready_status:1;	//Bit 18
+		uint32_t cipher_ri_match_status:1;	//Bit 19
+		uint32_t cipher_encrypting_status:1;	//Bit 20
+		uint32_t cipher_ready_for_encryption:1;	//Bit 21
+		uint32_t cipher_mch_id_ready:1;	//Bit 22
+		uint32_t cipher_mac_status:1;	//Bit 23
+		uint32_t UNIQUENAME(Reserved):8;	//Bit 31:24
+	};
+} mdfld_hdcp_status_t;
+
+/* HDCP_RI */
+#define MDFLD_HDCP_RECEIVER_RI_REG 0x61418
+#define MDFLD_HDCP_RECEIVER_RI_PRESERVED_BITS    BITRANGE(16,31)
+typedef union _mdfld_hdcp_receiver_ri {
+	uint32_t value;
+
+	struct {
+		uint32_t ri:16;	//bit 15:0
+		uint32_t UNIQUENAME(Reserved):16;	//bit 31:16
+	};
+} mdfld_hdcp_receiver_ri_t;
+
+/* HDCP_BKSV_HI */
+#define MDFLD_HDCP_BKSV_HI_REG 0x6140C
+#define MDFLD_HDCP_BKSV_HI_PRESERVED_BITS BITRANGE(8,31)
+typedef union _mdfld_hdcp_bksv_hi {
+	uint32_t value;
+
+	struct {
+		uint32_t bksv_hi:8;	//bit 7:0
+		uint32_t UNIQUENAME(Reserved):24;	//bit 31:8
+	};
+} mdfld_hdcp_bksv_hi_t;
+
+/* HDCP_AKEY_HI */
+#define MDFLD_HDCP_AKEY_HI_REG 0x61424
+#define MDFLD_HDCP_AKEY_HI_PRESERVED_BITS BITRANGE(20,31)
+typedef union _mdfld_hdcp_akey_hi {
+	uint32_t value;
+
+	struct {
+		uint32_t akey_hi:20;	//bit 7:0
+		uint32_t UNIQUENAME(Reserved):12;	//bit 31:8
+	};
+} mdfld_hdcp_akey_hi_t;
+
+/* HDCP_REP: Repeator specific register definitions */
+
+/* Repeater Control register */
+typedef enum _mdfld_hdcp_repeater_status_enum {
+	HDCP_REPEATER_STATUS_IDLE = 0,
+	HDCP_REPEATER_STATUS_BUSY = 1,
+	HDCP_REPEATER_STATUS_RDY_NEXT_DATA = 2,
+	HDCP_REPEATER_STATUS_COMPLETE_NO_MATCH = 4,
+	HDCP_REPEATER_STATUS_COMPLETE_MATCH = 12
+} mdfld_hdcp_repeater_status_en;
+
+typedef enum _mdfld_hdcp_repeater_ctrl_enum {
+	HDCP_REPEATER_CTRL_IDLE = 0,
+	HDCP_REPEATER_32BIT_TEXT_IP = 1,
+	HDCP_REPEATER_COMPLETE_SHA1 = 2,
+	HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP = 4,
+	HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP = 5,
+	HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP = 6,
+	HDCP_REPEATER_32BIT_MO_IP = 7
+} mdfld_hdcp_repeater_ctrl_en;
+
+#define MDFLD_HDCP_REP_REG 0x61444
+#define MDFLD_HDCP_REP_PRESERVED_BITS BITRANGE(8,31)
+typedef union _mdfld_hdcp_rep {
+	uint32_t value;
+
+	struct {
+		uint32_t repeater_present:1;	//bit 0
+		uint32_t repeater_control:3;	//bit 3:1
+		uint32_t UNIQUENAME(Reserved):12;	//bit 15:4 BUN#: 07ww44#1
+		const uint32_t repeater_status:4;	//bit 19:16
+		uint32_t UNIQUENAME(Reserved):12;	//bit 31:20
+	};
+} mdfld_hdcp_rep_t;
+
+/* HDCP_BKSV_HI */
+#define MDFLD_HDCP_AKSV_HI_REG 0x61450
+#define MDFLD_HDCP_AKSV_HI_PRESERVED_BITS BITRANGE(8,31)
+typedef union _mdfld_hdcp_aksv_hi {
+	uint32_t value;
+
+	struct {
+		uint32_t aksv_hi:8;	//bit 7:0
+		uint32_t UNIQUENAME(Reserved):24;	//bit 31:8
+	};
+} mdfld_hdcp_aksv_hi_t;
+
+typedef union _mdfld_hdcp_aksv {
+	uint8_t byte[8];
+
+	struct {
+		uint32_t low;
+		mdfld_hdcp_aksv_hi_t hi;
+	} aksv;
+} mdfld_hdcp_aksv_t;
+
+/* These holds part of the hash result from the receiver used for repeaters */
+#define MDFLD_HDCP_VPRIME_H0 0x6142C
+#define MDFLD_HDCP_VPRIME_H1 0x61430
+#define MDFLD_HDCP_VPRIME_H2 0x61434
+#define MDFLD_HDCP_VPRIME_H3 0x61438
+#define MDFLD_HDCP_VPRIME_H4 0x6143C
+
+#define MDFLD_HDCP_SHA1_IN    0x61440
+
+/* Define of registers that don't need register definitions */
+#define MDFLD_HDCP_INIT_REG 0x61404
+#define MDFLD_HDCP_AN_LOW_REG 0x61410
+#define MDFLD_HDCP_AN_HI_REG 0x61414
+#define MDFLD_HDCP_BKSV_LOW_REG 0x61408
+#define MDFLD_HDCP_AKSV_LOW_REG 0x61454
+/* Akey registers */
+#define MDFLD_HDCP_AKEY_LO_REG 0x6141C
+#define MDFLD_HDCP_AKEY_MED_REG 0x61420
+
+#endif				/* MDFLD_HDCP_REG_H */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdmi_audio.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdmi_audio.c
new file mode 100644
index 0000000..4b077c3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdmi_audio.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <linux/kernel.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "mdfld_hdmi_audio_if.h"
+#include "android_hdmi.h"
+
+#ifdef CONFIG_SUPPORT_HDMI
+
+/*
+ * Audio register range 0x69000 to 0x69117
+ */
+
+#define IS_HDMI_AUDIO_REG(reg) ((reg >= 0x69000) && (reg < 0x69118))
+
+/*
+ *
+ */
+static struct android_hdmi_priv *hdmi_priv;
+
+void mid_hdmi_audio_init(struct android_hdmi_priv *p_hdmi_priv)
+{
+	hdmi_priv = p_hdmi_priv;
+}
+
+/*
+ * return whether HDMI audio device is busy.
+*/
+bool mid_hdmi_audio_is_busy(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int hdmi_audio_busy = 0;
+	hdmi_audio_event_t hdmi_audio_event;
+
+	if (hdmi_state == 0) {
+		/* HDMI is not connected, assuming audio device is idle. */
+		return false;
+	}
+
+	if (dev_priv->had_interface) {
+		hdmi_audio_event.type = HAD_EVENT_QUERY_IS_AUDIO_BUSY;
+		hdmi_audio_busy = dev_priv->had_interface->query(
+			dev_priv->had_pvt_data,
+			hdmi_audio_event);
+		return hdmi_audio_busy != 0;
+	}
+	return false;
+}
+
+/*
+ * return whether HDMI audio device is suspended.
+*/
+bool mid_hdmi_audio_suspend(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	hdmi_audio_event_t hdmi_audio_event;
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("%s: hdmi_state %d", __func__, hdmi_state);
+	if (hdmi_state == 0) {
+		/* HDMI is not connected,
+		*assuming audio device is suspended already.
+		*/
+		return true;
+	}
+
+	if (dev_priv->had_interface) {
+		hdmi_audio_event.type = 0;
+		ret = dev_priv->had_interface->suspend(
+						dev_priv->had_pvt_data,
+						hdmi_audio_event);
+		return (ret == 0) ? true : false;
+	}
+	return true;
+}
+
+void mid_hdmi_audio_resume(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	PSB_DEBUG_ENTRY("%s: hdmi_state %d", __func__, hdmi_state);
+	if (hdmi_state == 0) {
+		/* HDMI is not connected,
+		*  there is no need to resume audio device.
+		*/
+		return;
+	}
+
+	if (dev_priv->had_interface)
+		dev_priv->had_interface->resume(dev_priv->had_pvt_data);
+}
+
+void mid_hdmi_audio_signal_event(
+						struct drm_device *dev,
+						enum had_event_type event)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (dev_priv->mdfld_had_event_callbacks)
+		(*dev_priv->mdfld_had_event_callbacks)
+				(event, dev_priv->had_pvt_data);
+}
+
+
+/**
+ * mid_hdmi_audio_write:
+ * used to write into display controller HDMI audio registers.
+ *
+ */
+static int mid_hdmi_audio_write(uint32_t reg, uint32_t val)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	int ret = 0;
+
+	if (hdmi_priv->monitor_type == MONITOR_TYPE_DVI)
+		return 0;
+
+	if (!is_island_on(OSPM_DISPLAY_B) || !is_island_on(OSPM_DISPLAY_HDMI))
+		return 0;
+
+	if (IS_HDMI_AUDIO_REG(reg))
+		REG_WRITE(reg, val);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+/**
+ * mid_hdmi_audio_read:
+ * used to get the register value read
+ * from display controller HDMI audio registers.
+ *
+ */
+static int mid_hdmi_audio_read(uint32_t reg, uint32_t *val)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	int ret = 0;
+
+	if (hdmi_priv->monitor_type == MONITOR_TYPE_DVI)
+		return 0;
+
+	if (!is_island_on(OSPM_DISPLAY_B) || !is_island_on(OSPM_DISPLAY_HDMI))
+		return 0;
+
+	if (IS_HDMI_AUDIO_REG(reg))
+		*val = REG_READ(reg);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+/**
+ * mid_hdmi_audio_rmw:
+ * used to update the masked bits in display
+ * controller HDMI audio registers .
+ *
+ */
+static int mid_hdmi_audio_rmw(uint32_t reg,
+				uint32_t val, uint32_t mask)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	int ret = 0;
+	uint32_t val_tmp = 0;
+
+	if (!is_island_on(OSPM_DISPLAY_B) || !is_island_on(OSPM_DISPLAY_HDMI))
+		return 0;
+
+	if (IS_HDMI_AUDIO_REG(reg)) {
+		val_tmp = (val & mask) | (REG_READ(reg) & ~mask);
+		REG_WRITE(reg, val_tmp);
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * mid_hdmi_audio_get_caps:
+ * used to return the HDMI audio capabilities.
+ * e.g. resolution, frame rate.
+ */
+static int mid_hdmi_audio_get_caps(
+						enum had_caps_list get_element,
+						void *capabilities)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	switch (get_element) {
+	case HAD_GET_ELD:
+		ret = android_hdmi_get_eld(dev, capabilities);
+		break;
+	case HAD_GET_SAMPLING_FREQ:
+	{
+		uint32_t val;
+		val = android_hdmi_get_dpll_clock(dev);
+		memcpy(capabilities, &val, sizeof(uint32_t));
+		break;
+	}
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * mid_hdmi_audio_set_caps:
+ * used to set the HDMI audio capabilities.
+ * e.g. Audio INT.
+ */
+static int mid_hdmi_audio_set_caps(
+				enum had_caps_list set_element,
+				void *capabilties)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	struct drm_psb_private *dev_priv =
+				(struct drm_psb_private *) dev->dev_private;
+	int ret = 0;
+	u32 hdmib;
+	u32 int_masks = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!is_island_on(OSPM_DISPLAY_B) || !is_island_on(OSPM_DISPLAY_HDMI))
+		return -EINVAL;
+
+	switch (set_element) {
+	case HAD_SET_ENABLE_AUDIO:
+		if (hdmi_priv->hdmi_audio_enabled) {
+			pr_err("OSPM: %s: hdmi audio has been enabled\n", __func__);
+			return 0;
+		}
+
+		hdmib = REG_READ(hdmi_priv->hdmib_reg);
+
+		if (hdmib & HDMIB_PORT_EN)
+			hdmib |= HDMIB_AUDIO_ENABLE;
+
+		REG_WRITE(hdmi_priv->hdmib_reg, hdmib);
+		REG_READ(hdmi_priv->hdmib_reg);
+		hdmi_priv->hdmi_audio_enabled = true;
+		break;
+	case HAD_SET_DISABLE_AUDIO:
+		if (!hdmi_priv->hdmi_audio_enabled) {
+			pr_err("OSPM: %s: hdmi audio has been disabled\n", __func__);
+			return 0;
+		}
+		hdmib = REG_READ(hdmi_priv->hdmib_reg) & ~HDMIB_AUDIO_ENABLE;
+		REG_WRITE(hdmi_priv->hdmib_reg, hdmib);
+		REG_READ(hdmi_priv->hdmib_reg);
+
+		hdmi_priv->hdmi_audio_enabled = false;
+		break;
+	case HAD_SET_ENABLE_AUDIO_INT:
+		if (*((u32 *)capabilties) & HDMI_AUDIO_UNDERRUN)
+			int_masks |= PIPE_HDMI_AUDIO_UNDERRUN;
+
+		if (*((u32 *)capabilties) & HDMI_AUDIO_BUFFER_DONE)
+			int_masks |= PIPE_HDMI_AUDIO_BUFFER_DONE;
+
+		dev_priv->hdmi_audio_interrupt_mask |= int_masks;
+		mid_irq_enable_hdmi_audio(dev);
+		break;
+	case HAD_SET_DISABLE_AUDIO_INT:
+		if (*((u32 *)capabilties) & HDMI_AUDIO_UNDERRUN)
+			int_masks |= PIPE_HDMI_AUDIO_UNDERRUN;
+
+		if (*((u32 *)capabilties) & HDMI_AUDIO_BUFFER_DONE)
+			int_masks |= PIPE_HDMI_AUDIO_BUFFER_DONE;
+
+		dev_priv->hdmi_audio_interrupt_mask &= ~int_masks;
+
+		if (dev_priv->hdmi_audio_interrupt_mask)
+			mid_irq_enable_hdmi_audio(dev);
+		else
+			mid_irq_disable_hdmi_audio(dev);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct  hdmi_audio_registers_ops mid_hdmi_audio_reg_ops = {
+	.hdmi_audio_read_register = mid_hdmi_audio_read,
+	.hdmi_audio_write_register = mid_hdmi_audio_write,
+	.hdmi_audio_read_modify = mid_hdmi_audio_rmw,
+};
+
+static struct hdmi_audio_query_set_ops mid_hdmi_audio_get_set_ops = {
+	.hdmi_audio_get_caps = mid_hdmi_audio_get_caps,
+	.hdmi_audio_set_caps = mid_hdmi_audio_set_caps,
+};
+
+int mid_hdmi_audio_setup(
+	had_event_call_back audio_callbacks,
+	struct hdmi_audio_registers_ops *reg_ops,
+	struct hdmi_audio_query_set_ops *query_ops)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	struct drm_psb_private *dev_priv =
+				(struct drm_psb_private *) dev->dev_private;
+	int ret = 0;
+
+	reg_ops->hdmi_audio_read_register =
+			(mid_hdmi_audio_reg_ops.hdmi_audio_read_register);
+	reg_ops->hdmi_audio_write_register =
+			(mid_hdmi_audio_reg_ops.hdmi_audio_write_register);
+	reg_ops->hdmi_audio_read_modify =
+			(mid_hdmi_audio_reg_ops.hdmi_audio_read_modify);
+	query_ops->hdmi_audio_get_caps =
+			mid_hdmi_audio_get_set_ops.hdmi_audio_get_caps;
+	query_ops->hdmi_audio_set_caps =
+			mid_hdmi_audio_get_set_ops.hdmi_audio_set_caps;
+
+	dev_priv->mdfld_had_event_callbacks = audio_callbacks;
+
+	return ret;
+}
+EXPORT_SYMBOL(mid_hdmi_audio_setup);
+
+int mid_hdmi_audio_register(struct snd_intel_had_interface *driver,
+							void *had_data)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	struct drm_psb_private *dev_priv =
+				(struct drm_psb_private *) dev->dev_private;
+	dev_priv->had_pvt_data = had_data;
+	dev_priv->had_interface = driver;
+
+	if (hdmi_priv->monitor_type == MONITOR_TYPE_DVI)
+		return 0;
+
+	/* The Audio driver is loading now and we need to notify
+	 * it if there is an HDMI device attached */
+	DRM_INFO("%s: Scheduling HDMI audio work queue\n", __func__);
+	schedule_work(&dev_priv->hdmi_audio_wq);
+
+	return 0;
+}
+EXPORT_SYMBOL(mid_hdmi_audio_register);
+
+
+#else /* CONFIG_SUPPORT_HDMI - HDMI is not supported. */
+
+bool mid_hdmi_audio_is_busy(struct drm_device *dev)
+{
+	/* always in idle state */
+	return false;
+}
+
+bool mid_hdmi_audio_suspend(struct drm_device *dev)
+{
+	/* always in suspend state */
+	return true;
+}
+
+void mid_hdmi_audio_resume(struct drm_device *dev)
+{
+}
+
+void mid_hdmi_audio_signal_event(struct drm_device *dev,
+					enum had_event_type event)
+{
+}
+
+void mid_hdmi_audio_init(struct android_hdmi_priv *hdmi_priv)
+{
+	DRM_INFO("%s: HDMI is not supported.\n", __func__);
+}
+
+int mid_hdmi_audio_setup(
+	had_event_call_back audio_callbacks,
+	struct hdmi_audio_registers_ops *reg_ops,
+	struct hdmi_audio_query_set_ops *query_ops)
+{
+	DRM_ERROR("%s: HDMI is not supported.\n", __func__);
+	return -ENODEV;
+}
+EXPORT_SYMBOL(mid_hdmi_audio_setup);
+
+int mid_hdmi_audio_register(struct snd_intel_had_interface *driver,
+				void *had_data)
+{
+	DRM_ERROR("%s: HDMI is not supported.\n", __func__);
+	return -ENODEV;
+}
+EXPORT_SYMBOL(mid_hdmi_audio_register);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdmi_audio_if.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdmi_audio_if.h
new file mode 100644
index 0000000..c0ccb72
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_hdmi_audio_if.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+
+#ifndef MDFLD_HDMI_AUDIO_IF_H
+#define MDFLD_HDMI_AUDIO_IF_H
+
+/* HDMI AUDIO INTERRUPT TYPE */
+#define HDMI_AUDIO_UNDERRUN	(1UL<<0)
+#define HDMI_AUDIO_BUFFER_DONE	(1UL<<1)
+
+enum had_caps_list {
+	HAD_GET_ELD = 1,
+	HAD_GET_SAMPLING_FREQ,
+	HAD_GET_DISPLAY_RATE,
+	HAD_GET_HDCP_STATUS,
+	HAD_GET_AUDIO_STATUS,
+	HAD_SET_ENABLE_AUDIO,
+	HAD_SET_DISABLE_AUDIO,
+	HAD_SET_ENABLE_AUDIO_INT,
+	HAD_SET_DISABLE_AUDIO_INT,
+	OTHERS_TBD,
+};
+
+enum had_event_type {
+	HAD_EVENT_HOT_PLUG = 1,
+	HAD_EVENT_HOT_UNPLUG,
+	HAD_EVENT_MODE_CHANGING,
+	HAD_EVENT_PM_CHANGING,
+	HAD_EVENT_AUDIO_BUFFER_DONE,
+	HAD_EVENT_AUDIO_BUFFER_UNDERRUN,
+	HAD_EVENT_QUERY_IS_AUDIO_BUSY,
+	HAD_EVENT_QUERY_IS_AUDIO_SUSPENDED,
+};
+
+/**
+ * HDMI Display Controller Audio Interface 
+ * 
+ */
+typedef int (*had_event_call_back)(enum had_event_type event_type,
+			void * ctxt_info);
+
+struct  hdmi_audio_registers_ops {
+	int (*hdmi_audio_read_register)(uint32_t reg_addr, uint32_t *data);
+	int (*hdmi_audio_write_register) (uint32_t reg_addr, uint32_t data);
+	int (*hdmi_audio_read_modify)(uint32_t reg_addr,
+			uint32_t data, uint32_t mask);
+};
+
+struct hdmi_audio_query_set_ops {
+	int (*hdmi_audio_get_caps)(enum had_caps_list query_element,
+					void *capabilties);
+	int (*hdmi_audio_set_caps)(enum had_caps_list set_element,
+					void *capabilties);
+};
+
+typedef struct hdmi_audio_event {
+	int type;
+} hdmi_audio_event_t;
+
+struct snd_intel_had_interface {
+	const char *name;
+	int (*query) (void *had_data, hdmi_audio_event_t event);
+	int (*suspend) (void *had_data, hdmi_audio_event_t event);
+	int (*resume) (void *had_data);
+};
+
+extern int mid_hdmi_audio_setup(
+	had_event_call_back audio_callbacks,
+	struct hdmi_audio_registers_ops *reg_ops,
+	struct hdmi_audio_query_set_ops *query_ops);
+extern int mid_hdmi_audio_register(
+			struct snd_intel_had_interface *driver, void *had_data);
+extern bool mid_hdmi_audio_is_busy(struct drm_device *dev);
+extern bool mid_hdmi_audio_suspend(struct drm_device *dev);
+extern void mid_hdmi_audio_resume(struct drm_device *dev);
+extern void mid_hdmi_audio_signal_event(struct drm_device *dev,
+					enum had_event_type event);
+#endif /* MDFLD_HDMI_AUDIO_IF_H */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_intel_hdcp.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_intel_hdcp.c
new file mode 100644
index 0000000..390d36a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_intel_hdcp.c
@@ -0,0 +1,1293 @@
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <random.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_hdmi_reg.h"
+#include "psb_intel_hdmi.h"
+#include "mdfld_hdcp_if.h"
+#include "mdfld_hdcp_reg.h"
+#include "mdfld_hdcp.h"
+
+/*
+ *
+ */
+static struct mid_intel_hdmi_priv *hdmi_priv;
+
+void mdfld_hdcp_init(struct mid_intel_hdmi_priv *p_hdmi_priv)
+{
+	hdmi_priv = p_hdmi_priv;
+}
+
+/* 
+ * IsValidBKSV:
+ * Checks if the BKSV is valid or not.
+ * A valid BKSV is one that contains 20 0's and 20 1's
+ *
+ */
+int hdcp_is_valid_bksv(uint8_t * buffer, uint32_t size)
+{
+	uint8_t count = 0;
+	int i = 0;
+	uint8_t bksv = 0;
+	uint8_t bit = 0;
+	int ret = 0;
+
+	if (buffer == NULL || size != CP_HDCP_KEY_SELECTION_VECTOR_SIZE)
+		return ret;
+
+	while (i < CP_HDCP_KEY_SELECTION_VECTOR_SIZE) {
+		bksv = buffer[i];
+		while (bksv != 0) {
+			bit = (bksv) & 0x01;
+			if (bit)
+				count++;
+			bksv = bksv >> 1;
+		}
+		i++;
+	}
+
+	if (count == 20)
+		ret = 1;
+
+	return ret;
+}
+
+/*
+ * Checks if HDCP is supported
+ *
+ */
+int hdcp_query()
+{
+	return hdmi_priv->is_hdcp_supported;
+}
+
+/* 
+ * Gets the current status of HDCP 
+ * 
+ */
+int hdcp_is_enabled(void)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	mdfld_hdcp_status_t hdcp_status = { 0 };
+	int ret = 0;
+
+	if (hdmi_priv->is_hdcp_supported) {
+		hdcp_status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+		ret = hdcp_status.cipher_hdcp_status;
+	}
+
+	return ret;
+}
+
+#define HDCP_PRIMARY_I2C_ADDR 0x74
+/*
+ *
+ * Read HDCP device data from i2c link 
+ *
+ */
+static int read_hdcp_port_data(uint8_t offset, uint8_t * buffer, int size)
+{
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = HDCP_PRIMARY_I2C_ADDR,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = &offset,
+		 }, {
+		     .addr = HDCP_PRIMARY_I2C_ADDR,
+		     .flags = I2C_M_RD,
+		     .len = size,
+		     .buf = buffer,
+		     }
+	};
+
+	if (i2c_transfer(hdmi_priv->hdmi_i2c_adapter, msgs, 2) == 2)
+		return 1;
+
+	return 0;
+}
+
+/* Read device status from i2c link */
+int read_hdcp_port(uint32_t read_request_type, uint8_t * buffer, int size)
+{
+	int more_blocks_to_read = 0;
+	uint32_t block_offset = 0;
+	int ret = 1;
+	uint8_t offset = 0;
+
+	while (1) {
+		switch (read_request_type) {
+		case RX_TYPE_BSTATUS:
+			offset = RX_BSTATUS_0;
+			break;
+		case RX_TYPE_RI_DATA:
+			offset = RX_RI_HIGH;
+			break;
+		case RX_TYPE_BCAPS:
+			offset = RX_BCAPS;
+			break;
+		case RX_TYPE_REPEATER_KSV_LIST:
+			offset = RX_KSV_FIFO;
+			break;
+		case RX_TYPE_BKSV_DATA:
+			offset = RX_BKSV_0;
+			break;
+		case RX_TYPE_REPEATER_PRIME_V:
+			{
+				offset = block_offset + RX_VPRIME_H0_0;
+				buffer += block_offset;
+				size = 4;
+				if (offset < RX_VPRIME_H4_0) {
+					more_blocks_to_read = 1;
+					block_offset += 4;
+				}
+			}
+			break;
+		default:
+			ret = 0;
+			break;
+		}
+
+		if (ret) {
+			if (!read_hdcp_port_data(offset, buffer, size)) {
+				//I2C access failed
+				ret = 0;
+				break;
+			}
+			//Check whether more blocks are to be read
+			if (!more_blocks_to_read) {
+				break;
+			} else {
+				more_blocks_to_read = 0;
+			}
+		} else {
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/* write to HDCP device through i2c link */
+static int write_hdcp_port(uint8_t offset, uint8_t * buffer, int size)
+{
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = HDCP_PRIMARY_I2C_ADDR,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = &offset,
+		 }, {
+		     .addr = HDCP_PRIMARY_I2C_ADDR,
+		     .flags = 0,
+		     .len = size,
+		     .buf = buffer,
+		     }
+	};
+
+	if (i2c_transfer(hdmi_priv->hdmi_i2c_adapter, msgs, 2) == 2)
+		return 1;
+
+	return 0;
+}
+
+/*
+ *
+ * UpdateRepeaterState : Enables/Disables Repeater
+ *
+ */
+static int hdcp_update_repeater_state(int enable)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	mdfld_hdcp_rep_t hdcp_rep_ctrl_reg;
+	hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+	hdcp_rep_ctrl_reg.repeater_present = enable;
+
+	REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+	return 1;
+
+}
+
+/* 
+ * EnableHDCP : Enables/Disables HDCP
+ *
+ */
+int hdcp_enable(int enable)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	mdfld_hdcp_config_t config;
+	mdfld_hdcp_receiver_ri_t receivers_ri;
+	mdfld_hdcp_status_t status;
+	mdfld_hdcp_rep_t hdcp_repeater;
+	uint32_t max_retry = 0;
+	sqword_tt hw_an;
+	sqword_tt hw_aksv;
+	sqword_tt hw_bksv;
+	uint8_t bcaps = 0;
+	uint32_t rx_ri = 0;
+	int ret = 0;
+	uint32_t hdcp_init_vec;
+
+	if (enable == 0) {
+		config.value = REG_READ(MDFLD_HDCP_CONFIG_REG);
+		config.hdcp_config = HDCP_Off;
+		REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
+
+		//Check the status of cipher till it get's turned off
+		// Bug #2808007-Delay required is one frame period.
+		// waiting for 2 VBlanks provides this amount of delay
+		max_retry = 0;
+		status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+		while ((status.cipher_hdcp_status
+			|| status.cipher_encrypting_status)
+		       && max_retry < HDCP_MAX_RETRY_DISABLE) {
+			psb_intel_wait_for_vblank(dev);
+			status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+			max_retry++;
+		}
+
+		// Check for cipher time out
+		if (status.cipher_hdcp_status
+		    || status.cipher_encrypting_status) {
+			ret = 0;
+			return ret;
+		}
+		// clear the repeater specfic bits and set the repeater to idle
+		hdcp_repeater.value = REG_READ(MDFLD_HDCP_REP_REG);
+		hdcp_repeater.repeater_present = 0;
+		hdcp_repeater.repeater_control = HDCP_REPEATER_CTRL_IDLE;
+		REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_repeater.value);
+
+		max_retry = HDCP_MAX_RETRY_STATUS;	//tbd: not yet finalized
+		while (max_retry--) {
+			hdcp_repeater.value = REG_READ(MDFLD_HDCP_REP_REG);
+
+			if (hdcp_repeater.repeater_status ==
+			    HDCP_REPEATER_STATUS_IDLE) {
+				ret = 1;
+				break;
+			}
+		}
+
+		// Check for cipher time out
+		if (max_retry == 0) {
+			ret = 0;
+			return 0;
+		}
+		// Clear the Ri' register
+		// This is a HW issue because of which the Ri' status bit in HDCP_STATUS
+		// register doesn't get cleared. 
+		// refer ro https://vthsd.fm.intel.com/hsd/cantiga/sighting/default.aspx?sighting_id=304464
+		// for details
+		REG_WRITE(MDFLD_HDCP_RECEIVER_RI_REG, 0);
+
+		//Disable the port on which HDCP is enabled
+		REG_WRITE(hdmi_priv->hdmib_reg,
+			  REG_READ(hdmi_priv->hdmib_reg) & ~HDMIB_HDCP_PORT);
+	} else {
+		//Generate An
+		config.value = REG_READ(MDFLD_HDCP_CONFIG_REG);
+
+		if (config.hdcp_config != HDCP_Off) {
+			config.hdcp_config = HDCP_Off;
+			REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
+		}
+
+		/*
+		 * When generating An, hardware will use the two most recent values as a
+		 * 64-bit source of entropy.
+		 */
+		get_random_bytes(&hdcp_init_vec, sizeof(hdcp_init_vec));
+		REG_WRITE(MDFLD_HDCP_INIT_REG, hdcp_init_vec);
+		get_random_bytes(&hdcp_init_vec, sizeof(hdcp_init_vec));
+		REG_WRITE(MDFLD_HDCP_INIT_REG, hdcp_init_vec);
+		udelay(10);
+
+		config.hdcp_config = HDCP_CAPTURE_AN;
+		REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
+
+		//check the status of cipher before reading an
+		max_retry = HDCP_MAX_RETRY_STATUS;	//tbd: not yet finalized
+		while (max_retry--) {
+			status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+			if (status.cipher_an_status) {
+				ret = 1;
+				break;
+			}
+		}
+
+		config.hdcp_config = HDCP_Off;
+		REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
+
+		if (max_retry == 0)
+			return 0;	//Cipher timeout, was not able to generate An :(
+
+		//Read An
+		hw_an.u.low_part = REG_READ(MDFLD_HDCP_AN_LOW_REG);
+		hw_an.u.high_part = REG_READ(MDFLD_HDCP_AN_HI_REG);
+
+		hw_aksv.u.low_part = REG_READ(MDFLD_HDCP_AKSV_LOW_REG);
+		hw_aksv.u.high_part = REG_READ(MDFLD_HDCP_AKSV_HI_REG);
+		//stHdcpParams.hwAksv.MajorPart_Low = 0x0361f714;//test data
+		//stHdcpParams.hwAksv.MajorPart_High = 0xb7;
+
+		//write An
+		ret = write_hdcp_port(RX_AN_0, hw_an.byte, 8);
+		if (!ret)
+			return 0;
+
+		//write Aksv
+		ret = write_hdcp_port(RX_AKSV_0, hw_aksv.byte, 5);
+		if (!ret)
+			return 0;
+
+		//Read the Bksv from receiver
+		ret = read_hdcp_port(RX_TYPE_BKSV_DATA, &hw_bksv.byte[0], 5);
+		if (ret) {
+			// Validate BKSV
+			ret = hdcp_is_valid_bksv(&hw_bksv.byte[0], 5);
+		}
+
+		if (!ret)
+			return 0;
+
+		//read the BCaps 
+		ret = read_hdcp_port(RX_TYPE_BCAPS, &bcaps, 1);
+		if (!ret)
+			return 0;
+
+		// set repeater bit if receiver connected is a repeater
+		if (bcaps & BIT6) {
+			hdcp_update_repeater_state(1);
+		}
+		//Write the BKsv into the encoder
+		REG_WRITE(MDFLD_HDCP_BKSV_LOW_REG, hw_bksv.u.low_part);
+		REG_WRITE(MDFLD_HDCP_BKSV_HI_REG, hw_bksv.u.high_part);
+
+		//enable HDCP on this port
+		REG_WRITE(hdmi_priv->hdmib_reg,
+			  REG_READ(hdmi_priv->hdmib_reg) | HDMIB_HDCP_PORT);
+
+		//TBD :Check the bStatus, for repeater and set HDCP_REP[1]
+		//Set HDCP_CONFIG to 011 = Authenticate and encrypt
+		config.hdcp_config = HDCP_AUTHENTICATE_AND_ENCRYPT;
+		REG_WRITE(MDFLD_HDCP_CONFIG_REG, config.value);
+
+		//At this point of time the Km is created
+
+		//Wait for Ri ready
+		max_retry = HDCP_MAX_RETRY_STATUS;	//TBD: Not yet finalized
+		while (max_retry--) {
+			status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+			if (status.cipher_ri_ready_status)
+				break;
+		}
+
+		if (max_retry == 0)
+			return 0;	//Cipher timeout, was not able to generate An :(
+
+		//Compare the R0 and Ri
+		//Read the Ri' of reciever
+		ret = read_hdcp_port(RX_TYPE_RI_DATA, (uint8_t *) & rx_ri, 2);
+		if (!ret)
+			return 0;
+
+		//TBD:Have some delay before reading the Ri'
+		//Right now using 100 ms, as per the HDCP spec(Refer HDCP SAS for details)
+		mdelay(HDCP_100MS_DELAY);
+
+		//update the HDCP_Ri' register and read the status reg for confrmation
+		receivers_ri.value = REG_READ(MDFLD_HDCP_RECEIVER_RI_REG);
+		receivers_ri.ri = rx_ri;
+		REG_WRITE(MDFLD_HDCP_RECEIVER_RI_REG, receivers_ri.value);
+
+		status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+
+		//SoftbiosDebugMessage(DBG_CRITICAL,"R Prime       = %x\n",dwRxRi);
+		//SoftbiosDebugMessage(DBG_CRITICAL,"HDCP_STATUS = %x\n",stStatus.value);
+		ret = status.cipher_ri_match_status;
+		/*if(GEN4INTHDCPCONTROLLER_HasInterruptOccured(pThis,ePort) == TRUE)
+		   {
+		   bRet = 0;
+		   } */
+	}
+
+	if (!ret) {
+		//TODO: SoftbiosDebugMessage(DBG_CRITICAL," EnableHDCP failed \n");
+	}
+
+	return ret;
+}
+
+/*
+To obtain receiver specific data. The request type
+is from mdfld_hdcp_rx_data_type_en 
+*/
+static int hdcp_get_receiver_data(uint8_t * buffer, uint32_t size,
+				  uint32_t rx_data_type)
+{
+	int ret = 0;
+
+	if (buffer) {
+		memset(buffer, 0, size);
+		//Get the Data from reciever
+		ret = read_hdcp_port(rx_data_type, buffer, size);
+	}
+	// Validate BKSV and Check if its Valid.
+	if (RX_TYPE_BKSV_DATA == rx_data_type) {
+		ret = hdcp_is_valid_bksv(buffer, size);
+	}
+
+	return ret;
+}
+
+/*
+ *
+ * WaitForNextDataReady : Function Waits for enryption ready
+ *
+ */
+static int hdcp_wait_for_next_data_ready(void)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	mdfld_hdcp_rep_t hdcp_rep_reg;
+	uint32_t i = 0;
+	int ret = 0;
+
+	for (i = 0; i < HDCP_MAX_RETRY_STATUS; i++) {
+		hdcp_rep_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+
+		if (HDCP_REPEATER_STATUS_RDY_NEXT_DATA ==
+		    hdcp_rep_reg.repeater_status) {
+			ret = 1;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ *
+ * CompareVPrime : This routine compares the vprime
+ * obtained from receiver with the one generated in
+ * transmitter.
+ *
+ */
+static int hdcp_compare_v_prime(uint32_t * buffer_repeater_v_prime,
+				uint8_t size_in_dword)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	uint32_t value = 0;
+	uint8_t *buffer = (uint8_t *) buffer_repeater_v_prime;
+	int ret = 0;
+	mdfld_hdcp_rep_t hdcp_rep_ctrl_reg;
+	uint32_t i = 0;
+
+	//TBD: To be implemented as part of repeater implementation
+	//Set the repeater's vprime in GMCH
+
+	if (size_in_dword == KSV_SIZE) {
+		memcpy(&value, buffer, 4);
+		REG_WRITE(MDFLD_HDCP_VPRIME_H0, value);
+
+		buffer += 4;
+		memcpy(&value, buffer, 4);
+		REG_WRITE(MDFLD_HDCP_VPRIME_H1, value);
+
+		buffer += 4;
+		memcpy(&value, buffer, 4);
+		REG_WRITE(MDFLD_HDCP_VPRIME_H2, value);
+
+		buffer += 4;
+		memcpy(&value, buffer, 4);
+		REG_WRITE(MDFLD_HDCP_VPRIME_H3, value);
+
+		buffer += 4;
+		memcpy(&value, buffer, 4);
+		REG_WRITE(MDFLD_HDCP_VPRIME_H4, value);
+
+		if (!hdcp_wait_for_next_data_ready())
+			return 0;
+
+		// Set HDCP_REP to do the comparison
+		// Start transmitter's V calculation
+		hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+		hdcp_rep_ctrl_reg.repeater_control =
+		    HDCP_REPEATER_COMPLETE_SHA1;
+		REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+
+		for (i = 0; i < HDCP_MAX_RETRY_STATUS; i++) {
+			hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+
+			switch (hdcp_rep_ctrl_reg.repeater_status) {
+			case HDCP_REPEATER_STATUS_COMPLETE_MATCH:
+				ret = 1;
+				break;
+			case HDCP_REPEATER_STATUS_COMPLETE_NO_MATCH:
+				ret = 0;
+				break;
+			case HDCP_REPEATER_STATUS_IDLE:
+				//Should not happen
+				ret = 0;
+				break;
+				//default: Not needed
+			}
+
+			if (hdcp_rep_ctrl_reg.repeater_status !=
+			    HDCP_REPEATER_STATUS_BUSY) {
+				break;
+			}
+		}
+
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/* 
+ *
+ * ComputeTransmitterV : This routine computes transmitter's V prime.
+ * As per HDCP spec 1.3 for HDMI/DVI the BStatus register contains data specific to repeater 
+ * sink topology in case sink is a repeater. The same interface is used by HDCP over display port
+ * in which case BInfo contains the relevant data. The variable wBTopologyData represents Bstatus
+ * for HDMI/DVI and BInfo for Display Port
+ *
+ */
+static int hdcp_compute_transmitter_v(ksv_t * ksv_list,
+				      uint32_t ksv_list_entries,
+				      uint16_t b_topology_data)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	uint32_t num_devices = ksv_list_entries;
+//    uint32_t lower_num_bytes_for_sha = 0, upper_num_bytes_for_sha = 0; // This has to be in mutiple of 512 bits
+	uint32_t lower_num_bytes_for_sha = 0;
+	uint32_t num_pad_bytes = 0;
+	uint8_t *buffer = NULL;
+	uint8_t *temp_buffer = NULL;
+	mdfld_hdcp_rep_t hdcp_rep_ctrl_reg;
+	uint32_t value = 0;
+	int ret = 1;
+	uint32_t i = 0;
+	uint32_t rem_text_data = 0, num_mo_bytes_left = 8;
+	uint8_t *temp_data_ptr = NULL;
+	sqword_tt buffer_len;
+	uint32_t temp_data = 0;
+
+	//Clear SHA hash generator for new V calculation and set the repeater to idle state
+	REG_WRITE(MDFLD_HDCP_SHA1_IN, 0);
+	hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+	hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_CTRL_IDLE;
+	REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+	for (i = 0; i < HDCP_MAX_RETRY_STATUS; i++) {
+		hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+		if (HDCP_REPEATER_CTRL_IDLE ==
+		    hdcp_rep_ctrl_reg.repeater_status) {
+			ret = 1;
+			break;
+		}
+	}
+	if (i == HDCP_MAX_RETRY_STATUS) {
+		return 0;
+	}
+	// Start the SHA buffer creation
+	//To find the number of pad bytes
+	num_pad_bytes = (64 - (ksv_list_entries * KSV_SIZE + 18) % 64);
+
+	// Get the number of bytes for SHA
+	lower_num_bytes_for_sha = KSV_SIZE * num_devices + 18 + num_pad_bytes;	//multiple of 64 bytes
+
+	buffer = (uint8_t *) kzalloc(lower_num_bytes_for_sha, GFP_KERNEL);
+
+	if (buffer) {
+		//1. Copy the KSV buffer
+		//Note:The data is in little endian format
+		temp_buffer = buffer;
+		memcpy((void *)temp_buffer, (void *)ksv_list,
+		       num_devices * KSV_SIZE);
+		temp_buffer += num_devices * KSV_SIZE;
+
+		//2. Copy the b_topology_data
+		memcpy((void *)temp_buffer, (void *)&b_topology_data, 2);
+		//The bstatus is copied in little endian format
+		temp_buffer += 2;
+
+		//3. Offset the pointer buffer by 8 bytes
+		// These 8 bytes are zeroed and are place holdes for Mo
+		temp_buffer += 8;
+
+		//4. Pad the buffer with extra bytes
+		// No need to pad the begining of padding bytes by adding
+		// 0x80. HW automatically appends the same while creating 
+		// the buffer.
+		//*temp_buffer = (BYTE)0x80;
+		//temp_buffer++;
+		for (i = 0; i < num_pad_bytes; i++) {
+			*temp_buffer = (uint8_t) 0x00;
+			temp_buffer++;
+		}
+
+		//5. Construct the length byte
+		buffer_len.quad_part =
+		    (unsigned long long)(ksv_list_entries * KSV_SIZE + 2 +
+					 8) * 8;
+		temp_data_ptr = (uint8_t *) & buffer_len.quad_part;
+		// Store it in big endian form
+		for (i = 1; i <= 8; i++) {
+			*temp_buffer = *(temp_data_ptr + 8 - i);
+			temp_buffer++;
+		}
+
+		//5.Write a random 64 bit value to the buffer
+		//memcpy(temp_buffer,&upper_num_bytes_for_sha,4);
+		//temp_buffer += 4;
+		//memcpy(temp_buffer,&lower_num_bytes_for_sha,4);
+
+		//Now write the data into the SHA
+		temp_buffer = buffer;
+		for (i = 0; i < (KSV_SIZE * num_devices + 2) / 4; i++) {
+			hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+			hdcp_rep_ctrl_reg.repeater_control =
+			    HDCP_REPEATER_32BIT_TEXT_IP;
+			REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+
+			//As per HDCP spec sample SHA is in little endian format. But the
+			//data fed to the cipher needs to be in big endian format for it
+			//to compute it correctly
+			memcpy(&value, temp_buffer, 4);
+			value = HDCP_CONVERT_BIG_ENDIAN(value);
+			REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
+			temp_buffer += 4;
+
+			if (!hdcp_wait_for_next_data_ready())
+				return 0;
+		}
+
+		//Write the remaining text data with M0 
+		//BUN#: 07ww44#1: text input must be aligned to LSB of the SHA1 
+		//in register when inputting partial text and partial M0
+		rem_text_data = (KSV_SIZE * num_devices + 2) % 4;
+		if (rem_text_data) {
+			// Update the no of Mo bytes
+			num_mo_bytes_left =
+			    num_mo_bytes_left - (4 - rem_text_data);
+
+			if (!hdcp_wait_for_next_data_ready())
+				return 0;
+
+			hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+			switch (rem_text_data) {
+			case 1:
+				hdcp_rep_ctrl_reg.repeater_control =
+				    HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP;
+				break;
+			case 2:
+				hdcp_rep_ctrl_reg.repeater_control =
+				    HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP;
+				break;
+			case 3:
+				hdcp_rep_ctrl_reg.repeater_control =
+				    HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP;
+				break;
+			default:
+				ret = 0;
+			}
+
+			if (!ret)
+				return ret;
+
+			REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+			memcpy(&value, temp_buffer, 4);
+
+			// swap the text data in big endian format leaving the Mo data as it is.
+			// As per the bun the LSB should contain the data in big endian format.
+			// since the M0 specfic data is all zeros while it's fed to the cipher.
+			// Those bit don't need to be modified
+			temp_data = 0;
+			for (i = 0; i < rem_text_data; i++) {
+				temp_data |=
+				    ((value & 0xff << (i * 8)) >> (i * 8)) <<
+				    ((rem_text_data - i - 1) * 8);
+			}
+			REG_WRITE(MDFLD_HDCP_SHA1_IN, temp_data);
+			temp_buffer += 4;
+		}
+		//Write 4 bytes of Mo
+		if (!hdcp_wait_for_next_data_ready())
+			return 0;
+
+		hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+		hdcp_rep_ctrl_reg.repeater_control = HDCP_REPEATER_32BIT_MO_IP;
+		REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+		memcpy(&value, temp_buffer, 4);
+		REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
+		temp_buffer += 4;
+		num_mo_bytes_left -= 4;
+
+		if (num_mo_bytes_left) {
+			// The remaining Mo + padding bytes need to be added 
+			num_pad_bytes = num_pad_bytes - (4 - num_mo_bytes_left);
+
+			//Write 4 bytes of Mo
+			if (!hdcp_wait_for_next_data_ready())
+				return 0;
+
+			hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+			switch (num_mo_bytes_left) {
+			case 1:
+				hdcp_rep_ctrl_reg.repeater_control =
+				    HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP;
+				break;
+			case 2:
+				hdcp_rep_ctrl_reg.repeater_control =
+				    HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP;
+				break;
+			case 3:
+				hdcp_rep_ctrl_reg.repeater_control =
+				    HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP;
+				break;
+			default:
+				// should never happen
+				ret = 0;
+			}
+
+			if (!ret)
+				return ret;
+
+			REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+			memcpy(&value, temp_buffer, 4);
+			//BUN#:07ww44#1
+			temp_data = 0;
+			for (i = 0; i < rem_text_data; i++) {
+				temp_data |=
+				    ((value & 0xff << (i * 8)) >> (i * 8)) <<
+				    ((rem_text_data - i - 1) * 8);
+			}
+			REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
+			temp_buffer += 4;
+			num_mo_bytes_left = 0;
+		}
+		//Write the remaining no of bytes
+		// Remaining data = remaining padding data + 64 bits of length data
+		rem_text_data = num_pad_bytes + 8;
+
+		if (rem_text_data % 4) {
+			//Should not happen
+			return 0;
+		}
+
+		for (i = 0; i < rem_text_data / 4; i++) {
+			REG_WRITE(MDFLD_HDCP_SHA1_IN, temp_data);
+			if (!hdcp_wait_for_next_data_ready())
+				return 0;
+
+			hdcp_rep_ctrl_reg.value = REG_READ(MDFLD_HDCP_REP_REG);
+			hdcp_rep_ctrl_reg.repeater_control =
+			    HDCP_REPEATER_32BIT_TEXT_IP;
+			REG_WRITE(MDFLD_HDCP_REP_REG, hdcp_rep_ctrl_reg.value);
+			memcpy(&value, temp_buffer, 4);
+			// do the Big endian conversion
+			value = HDCP_CONVERT_BIG_ENDIAN(value);
+			REG_WRITE(MDFLD_HDCP_SHA1_IN, value);
+			temp_buffer += 4;
+		}
+		kfree(buffer);
+
+		ret = 1;
+	} else {
+		return 0;
+	}
+
+	return ret;
+}
+
+/*
+ *
+ * SetHDCPEncryptionLevel:
+ *
+ */
+static uint32_t hdcp_set_encryption_level(cp_parameters_t * cp)
+{
+	uint32_t ret = STATUS_UNSUCCESSFUL;
+	int hdcp_enabled = 0;
+	uint32_t ksv_length = 0;
+	ksv_t bksv;
+
+	//Get the hdcp configuration of the port
+	if (hdmi_priv->is_hdcp_supported) {
+		hdcp_enabled = hdcp_is_enabled();
+		if (((cp->level == CP_PROTECTION_LEVEL_HDCP_OFF)
+		     && (!hdcp_enabled))
+		    || ((cp->level == CP_PROTECTION_LEVEL_HDCP_ON)
+			&& hdcp_enabled)) {
+			ret = STATUS_SUCCESS;
+		}
+
+		if (ret == STATUS_UNSUCCESSFUL) {
+			//Turn off HDCP
+			if (hdcp_enable(0))
+				ret = STATUS_SUCCESS;
+
+			if ((cp->level != CP_PROTECTION_LEVEL_HDCP_OFF)
+			    && (ret == STATUS_SUCCESS)) {
+				// Check if a Revoked device is attached
+				if (cp->hdcp.ksv_list_length) {
+					//Get the current set of BKSV's from the 
+					if (hdcp_get_receiver_data
+					    ((uint8_t *) bksv.ab_ksv,
+					     CP_HDCP_KEY_SELECTION_VECTOR_SIZE,
+					     RX_TYPE_BKSV_DATA)) {
+						for (ksv_length = 0;
+						     ksv_length <
+						     cp->hdcp.ksv_list_length;
+						     ksv_length++) {
+							if (!memcmp
+							    (&bksv,
+							     &cp->hdcp.ksv_list
+							     [ksv_length],
+							     CP_HDCP_KEY_SELECTION_VECTOR_SIZE))
+							{
+								ret =
+								    STATUS_REVOKED_HDCP_DEVICE_ATTACHED;
+								break;
+
+							}
+						}
+					}
+				}
+
+				if (ret == STATUS_SUCCESS) {
+					//Activate the link layer
+					ret = hdcp_enable(1);
+				}
+
+			}
+		}
+	}
+	return ret;
+}
+
+/*
+ *
+ * GetMaxSupportedAttachedDevices: Returns the
+ * max no attached devices supported on repeater 
+ *
+ */
+static uint16_t hdcp_get_max_supported_attached_devices(void)
+{
+	//currently return 128 as specified by the HDCP spec
+	return MAX_HDCP_DEVICES;
+}
+
+/*
+ *
+ * ActivateRepeater: Activates reciver mode
+ *
+ */
+static uint32_t hdcp_activate_repeater(cp_parameters_t * cp)
+{
+	uint32_t ret = STATUS_UNSUCCESSFUL;
+	uint16_t device_count = 0;
+	uint16_t get_max_device_supported = 0;
+	uint8_t *ksv_list = NULL;	//[MAX_HDCP_DEVICES] = {0};// * KSV_SIZE] = { 0 };
+	uint16_t i = 0, j = 0, k = 0;
+	uint32_t repeater_prime_v[5];
+	hdcp_rx_bcaps_t b_caps;
+	hdcp_rx_bstatus_t b_status;
+	//TBD: TO be enabled for OPM - Vista
+
+	// Init bcaps
+	b_caps.value = 0;
+	b_status.value = 0;
+
+	for (i = 0; i < 5; i++)
+		repeater_prime_v[i] = 0;
+
+	for (i = 0; i < 1; i++) {
+		ksv_list =
+		    (uint8_t *) kzalloc(MAX_HDCP_DEVICES * KSV_SIZE,
+					GFP_KERNEL);
+
+		if (!ksv_list) {
+			ret = STATUS_UNSUCCESSFUL;
+			break;
+		}
+		//get the receiver bcaps 
+		hdcp_get_receiver_data(&b_caps.value, 1, RX_TYPE_BCAPS);
+
+		// Check for repeater caps
+		if (!(b_caps.is_reapeater)) {
+			ret = STATUS_INVALID_PARAMETER;
+			break;
+		}
+		// Check if the KSV FIFO is ready
+		if (!(b_caps.ksv_fifo_ready)) {
+			// The HDCP repeater is not yet ready to return a KSV list.
+			// Per HDCP spec, the repeater has 5 seconds from when KSVs are exchanged
+			// in the first part of the authentication protocol (HDCPActivateLink)
+			// to be ready to report out downstream KSVs.
+			ret = STATUS_PENDING;
+			break;
+		}
+		//Read repeater's Bstatus
+		hdcp_get_receiver_data((uint8_t *) & b_status.value, 2,
+				       RX_TYPE_BSTATUS);
+
+		// check if max dev limit is exceeded
+		if (b_status.max_devs_exceeded) {
+			ret = STATUS_INVALID_PARAMETER;
+			break;
+		}
+		// Check for topology error. This happens when
+		// more then seven levels of video repeater have been cascaded.
+		if (b_status.max_cascade_exceeded) {
+			ret = STATUS_INVALID_PARAMETER;
+			break;
+		}
+
+		device_count = b_status.device_count;
+		if (device_count == 0) {
+			ret = STATUS_SUCCESS;
+			break;
+		}
+
+		get_max_device_supported =
+		    hdcp_get_max_supported_attached_devices();
+
+		if (device_count > get_max_device_supported) {
+			ret = STATUS_INVALID_PARAMETER;
+			break;
+		}
+		// Update the cipher saying sink supports repeater capabilities
+		if (!hdcp_update_repeater_state(1)) {
+			ret = STATUS_UNSUCCESSFUL;
+			break;
+		}
+		// Read the KSV list from the repeater
+		if (!hdcp_get_receiver_data
+		    (ksv_list, device_count * KSV_SIZE,
+		     RX_TYPE_REPEATER_KSV_LIST)) {
+			ret = STATUS_UNSUCCESSFUL;
+			break;
+		}
+
+		for (j = 0; j < device_count; j++) {
+			for (k = 0; k < cp->hdcp.ksv_list_length; k++) {
+				if (0 ==
+				    memcmp(&ksv_list[j * KSV_SIZE],
+					   &cp->hdcp.ksv_list[k],
+					   CP_HDCP_KEY_SELECTION_VECTOR_SIZE)) {
+					ret =
+					    STATUS_REVOKED_HDCP_DEVICE_ATTACHED;
+					break;
+				}
+			}
+		}
+
+		if (!hdcp_compute_transmitter_v
+		    ((ksv_t *) ksv_list, device_count, b_status.value)) {
+			ret = STATUS_UNSUCCESSFUL;
+			break;
+		}
+		//Get the HDCP receiver's V' value (20 bytes in size)
+		if (!hdcp_get_receiver_data
+		    ((uint8_t *) repeater_prime_v, KSV_SIZE * 4,
+		     RX_TYPE_REPEATER_PRIME_V)) {
+			ret = STATUS_UNSUCCESSFUL;
+			break;
+		}
+
+		if (!hdcp_compare_v_prime(repeater_prime_v, KSV_SIZE)) {
+			//set hdcp encryption level to 0
+			hdcp_update_repeater_state(0);
+			hdcp_enable(0);
+			ret = STATUS_UNSUCCESSFUL;
+		} else {
+			ret = STATUS_SUCCESS;
+		}
+	}
+
+	if (ksv_list) {
+		kfree(ksv_list);
+		ksv_list = NULL;
+	}
+
+	return ret;
+}
+
+/*
+ * IsHDCPRepeater : Reads the caps register and informs
+ * whether the received is a repeater
+ *
+ */
+static int hdcp_is_repeater(int *is_repeater)
+{
+	int ret = 0;
+	hdcp_rx_bcaps_t b_caps;
+
+	//Init
+	b_caps.value = 0;
+
+	ret = hdcp_get_receiver_data(&b_caps.value, 1, RX_TYPE_BCAPS);
+	if (ret) {
+		*is_repeater = b_caps.is_reapeater;
+	}
+
+	return ret;
+}
+
+/* Get's the current link status */
+static int hdcp_get_link_status(void)
+{
+	struct drm_device *dev = hdmi_priv->dev;
+	int ret = 0;
+	uint32_t rx_ri = 0;
+	mdfld_hdcp_receiver_ri_t receivers_ri;
+	mdfld_hdcp_status_t status;
+	uint32_t max_count = 0;
+
+	max_count = HDCP_MAX_RI_QUERY_COUNT;
+	while (max_count) {
+		max_count--;
+
+		//Read the Ri' of reciever
+		ret = read_hdcp_port(RX_TYPE_RI_DATA, (uint8_t *) & rx_ri, 2);
+		if (!ret)
+			break;	// I2C access failed
+
+		//update the HDCP_Ri' register and read the status reg for cofrmation
+		receivers_ri.value = REG_READ(MDFLD_HDCP_RECEIVER_RI_REG);
+		receivers_ri.ri = rx_ri;
+		REG_WRITE(MDFLD_HDCP_RECEIVER_RI_REG, receivers_ri.value);
+
+		status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+
+		ret = status.cipher_ri_match_status;
+		if (ret) {
+			//Ri and Ri' matches, hence reciver is a authentic one :)
+			break;
+		} else {
+			//The Ri changes every 128th frame.Hence if the Ri check fails
+			//that means the sink has updated the Ri value and that can happen
+			//every 128th frame. In that case we wait for the next frame count.
+			//Wait should be around 16 ms.
+			while ((status.frame_count & HDCP_NEXT_RI_FRAME) ==
+			       HDCP_NEXT_RI_FRAME) {
+				status.value = REG_READ(MDFLD_HDCP_STATUS_REG);
+			}
+		}
+	}
+
+	return ret;
+}
+
+/*
+ *
+ * GetHDCPEncryptionLevel: 
+ *
+ */
+static void hdcp_get_encryption_level(cp_parameters_t * cp)
+{
+
+	if (hdcp_is_enabled()) {
+		cp->level = CP_PROTECTION_LEVEL_HDCP_ON;
+	} else {
+		cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
+	}
+
+	return;
+}
+
+/*
+ *
+ * GetCPData: Get Content protection Data
+ * based upon the request from CP
+ *
+ */
+uint32_t hdcp_get_cp_data(cp_parameters_t * cp)
+{
+	uint32_t ret = STATUS_SUCCESS;
+	int is_repeater = 0;
+
+	if ((cp->protect_type_mask & CP_PROTECTION_TYPE_HDCP)) {
+		//Check whether HDCP is on
+		hdcp_get_encryption_level(cp);
+
+		if (cp->level != CP_PROTECTION_LEVEL_HDCP_OFF) {
+			// see if the link is valid, do it by authenticating
+			if (!hdcp_get_link_status()) {
+				// Encryption setting failed; swtich off the encryption 
+				cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
+				hdcp_set_encryption_level(cp);
+				ret = STATUS_UNSUCCESSFUL;
+			}
+		}
+		//else
+		//{
+		//HDCP is off
+		//}
+#if 0				//Don't need this for client
+		//Get the BKSv and repeater status. This has to be returned irrespective of
+		//HDCP is ON or Not
+		if (!hdcp_get_receiver_data
+		    ((uint8_t *) (cp->hdcp.bksv.ab_ksv),
+		     CP_HDCP_KEY_SELECTION_VECTOR_SIZE, RX_TYPE_BKSV_DATA)) {
+			cp->hdcp.is_repeater = 0;
+			memset(&(cp->hdcp.bksv), 0,
+			       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+			ret = STATUS_DATA_ERROR;
+		} else {
+			// This is via opregion. This will return all zeros in production mode
+			// Get the AKSV
+			if (hdcp_get_aksv(&aksv)) {
+				memcpy(cp->hdcp.aksv.ab_ksv, aksv.uc_aksv,
+				       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+			} else	// if failed return all zeros
+			{
+				memset(&cp->hdcp.aksv, 0,
+				       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+			}
+		}
+#endif
+		if (ret != STATUS_DATA_ERROR) {
+			if (hdcp_is_repeater(&is_repeater)) {
+				cp->hdcp.is_repeater = is_repeater;
+			} else {
+				cp->hdcp.is_repeater = 0;
+				ret = STATUS_DATA_ERROR;
+			}
+		}
+	}
+#if 0				/* support repeater later */
+	else if (cp->protect_type_mask == CP_PROTECTION_TYPE_NONE)	// report repeater capability+BKSV for this mask
+	{
+		if (!hdcp_get_receiver_data
+		    ((uint8_t *) (cp->hdcp.bksv.ab_ksv),
+		     CP_HDCP_KEY_SELECTION_VECTOR_SIZE, RX_TYPE_BKSV_DATA)) {
+			cp->hdcp.is_repeater = 0;
+			memset(&(cp->hdcp.bksv), 0,
+			       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+		} else if (hdcp_is_repeater(&is_repeater)) {
+			cp->hdcp.is_repeater = is_repeater;
+		} else {
+			cp->hdcp.is_repeater = 0;
+		}
+
+		// Get the AKSV
+		if (hdcp_get_aksv(&aksv)) {
+			memcpy(cp->hdcp.aksv.ab_ksv, aksv.uc_aksv,
+			       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+		} else		// if failed return all zeros
+		{
+			memset(&cp->hdcp.aksv, 0,
+			       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+		}
+	}
+#endif
+	else			//Invalid mask
+	{
+		//assert(0);
+
+		cp->hdcp.is_repeater = 0;
+		//memset(&(cp->hdcp.bksv), 0, CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+	}
+
+	//Note this data needs to be sent irrespective of any unsupported mask
+	if (ret == STATUS_SUCCESS) {
+		cp->protect_type_mask |= CP_PROTECTION_TYPE_HDCP;
+	}
+
+	return ret;
+}
+
+/*
+ *
+ * SetCPData: Enables/Disables Content protection
+ * based upon the request from CP
+ *
+ */
+uint32_t hdcp_set_cp_data(cp_parameters_t * cp)
+{
+	uint32_t ret = STATUS_UNSUCCESSFUL;
+	int is_repeater = 0;
+
+	if (cp->protect_type_mask & CP_PROTECTION_TYPE_HDCP) {
+		// Get Receiver's repeater status
+		// Note:- Reporting back Repeater status in SetCP Data.
+		// This is because the analyzer for CTS, acts as repeater only 
+		// when the test is running, so notifying this back to opm in 
+		// SetProtectionLevel.
+		if (hdcp_is_repeater(&is_repeater)) {
+			cp->hdcp.is_repeater = is_repeater;
+		}
+		// Second step flag is if Repeater support needs to be enabled
+		if (cp->hdcp.perform_second_step) {
+			ret = hdcp_activate_repeater(cp);
+			if ((ret != STATUS_SUCCESS) && (ret != STATUS_PENDING)) {
+				// Encryption setting failed; switch off the encryption 
+				cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
+				hdcp_set_encryption_level(cp);
+			}
+		} else {
+			ret = hdcp_set_encryption_level(cp);
+
+			if (ret != STATUS_SUCCESS) {
+				// Encryption setting failed; swtich off the encryption 
+				cp->level = CP_PROTECTION_LEVEL_HDCP_OFF;
+				hdcp_set_encryption_level(cp);
+			}
+		}
+
+		if (ret == STATUS_SUCCESS) {
+#if 0				//Do need this for client
+			// read the bksv
+			if (!hdcp_get_receiver_data
+			    ((uint8_t *) (cp->hdcp.bksv.ab_ksv),
+			     CP_HDCP_KEY_SELECTION_VECTOR_SIZE,
+			     RX_TYPE_BKSV_DATA)) {
+				cp->hdcp.is_repeater = 0;
+				memset(&(cp->hdcp.bksv), 0,
+				       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+			} else {
+				// read aksv
+				if (hdcp_get_aksv(&aksv)) {
+					memcpy(cp->hdcp.aksv.ab_ksv,
+					       aksv.uc_aksv,
+					       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+				} else	// if failed return all zeros
+				{
+					memset(&cp->hdcp.aksv, 0,
+					       CP_HDCP_KEY_SELECTION_VECTOR_SIZE);
+				}
+			}
+#endif
+			cp->protect_type_mask |= CP_PROTECTION_TYPE_HDCP;
+		}
+	} else {
+		// No other calls are handled
+		return STATUS_SUCCESS;
+	}
+
+	return ret;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_msic.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_msic.c
new file mode 100644
index 0000000..bae4d92
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_msic.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#include "mdfld_msic.h"
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_intel_hdmi.h"
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#include <asm/intel_scu_ipc.h>
+#endif
+
+#define SRAM_MSIC_VRINT_ADDR 0xFFFF7FCB
+static u8 *sram_vreg_addr = 0;
+/*
+ *
+ */
+static struct mid_intel_hdmi_priv *hdmi_priv;
+
+void mdfld_msic_init(struct mid_intel_hdmi_priv *p_hdmi_priv)
+{
+	hdmi_priv = p_hdmi_priv;
+}
+
+/**
+ *  hpd_notify_um
+ */
+void hpd_notify_um(void)
+{
+	struct drm_device *dev = hdmi_priv ? hdmi_priv->dev : NULL;
+	struct drm_psb_private *dev_priv = NULL;
+	struct pci_dev *pdev = NULL;
+	struct device *ddev = NULL;
+	struct kobject *kobj = NULL;
+
+	if (dev) {
+		PSB_DEBUG_ENTRY("\n");
+		dev_priv = psb_priv(dev);
+		dev_priv->hdmi_done_reading_edid = false;
+	}
+
+	/*find handle to drm kboject */
+	if (dev == NULL) {
+		DRM_ERROR("%s: dev == NULL.\n", __func__);
+		return;
+	}
+	pdev = dev->pdev;
+
+	if (pdev == NULL) {
+		DRM_ERROR("%s: pdev == NULL.\n", __func__);
+		return;
+	}
+	ddev = &pdev->dev;
+
+	if (ddev == NULL) {
+		DRM_ERROR("%s: ddev == NULL.\n", __func__);
+		return;
+	}
+	kobj = &ddev->kobj;
+
+	if (kobj == NULL) {
+		DRM_ERROR("%s: kobj == NULL.\n", __func__);
+		return;
+	}
+
+	if (dev_priv->psb_hotplug_state) {
+		DRM_INFO("%s: HPD interrupt.\n", __func__);
+		psb_hotplug_notify_change_um("hpd_hdmi",
+					     dev_priv->psb_hotplug_state);
+	} else {
+		DRM_INFO("%s: Hotplug comm layer isn't initialized!\n",
+			 __func__);
+	}
+
+	/* send drm uevent message */
+	schedule_work(&dev_priv->hdmi_hotplug_wq);
+
+	return;
+}
+
+/**
+ *  msic_vreg_handler
+ */
+irqreturn_t msic_vreg_handler(int irq, void *dev_id)
+{
+	struct drm_device *dev = hdmi_priv ? hdmi_priv->dev : NULL;
+	struct drm_psb_private *dev_priv = NULL;
+	u8 data = 0;
+
+	/* Need to add lock later. */
+
+	/* Read VREG interrupt status register */
+	if (sram_vreg_addr)
+		data = readb(sram_vreg_addr);
+	else
+		DRM_ERROR("%s: sram_vreg_addr = %p.\n",
+			  __func__, sram_vreg_addr);
+
+	if (dev) {
+		PSB_DEBUG_ENTRY("data = 0x%x.\n", data);
+		dev_priv = psb_priv(dev);
+
+		/* handle HDMI HPD interrupts. */
+		if (data & HDMI_HPD_STATUS) {
+			DRM_INFO("%s: HPD interrupt. data = 0x%x.\n", __func__,
+				 data);
+
+			if (dev_priv->um_start)
+				hpd_notify_um();
+		}
+	}
+	/* handle other msic vreq interrupts when necessary. */
+
+	return IRQ_HANDLED;
+}
+
+/**
+ *  msic_probe
+ */
+static int msic_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct drm_device *dev = hdmi_priv ? hdmi_priv->dev : 0;
+	int ret = 0;
+
+	if (dev) {
+		PSB_DEBUG_ENTRY("\n");
+	}
+	/* enable msic hdmi device */
+	ret = pci_enable_device(pdev);
+
+	if (!ret) {
+
+		if (pdev->device == MSIC_PCI_DEVICE_ID) {
+			sram_vreg_addr =
+			    ioremap_nocache(SRAM_MSIC_VRINT_ADDR, 0x2);
+			ret =
+			    request_irq(pdev->irq, msic_vreg_handler,
+					IRQF_SHARED, "msic_hdmi_driver",
+					(void *)&hdmi_priv);
+		} else
+			DRM_ERROR("%s: pciid = 0x%x is not msic_hdmi pciid.\n",
+				  __func__, pdev->device);
+
+		if (ret) {
+			pci_dev_put(pdev);
+			DRM_ERROR("%s: request_irq failed. ret = 0x%x.\n",
+				  __func__, ret);
+		}
+	}
+
+	return ret;
+}
+
+static struct pci_device_id msic_pci_id_list[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MSIC_PCI_DEVICE_ID)},
+	{0}
+};
+
+/*MODULE_DEVICE_TABLE(pci, msic_pci_id_list);*/
+
+/* field for registering driver to PCI device */
+static struct pci_driver msic_pci_driver = {
+	.name = "msic_hdmi_driver",
+	.id_table = msic_pci_id_list,
+	.probe = msic_probe
+};
+
+/**
+ *  msic_regsiter_driver - register the msic hdmi device to PCI system.
+ */
+int msic_regsiter_driver(void)
+{
+	return pci_register_driver(&msic_pci_driver);
+}
+
+/**
+ *  msic_unregsiter_driver - unregister the msic hdmi device from PCI system.
+ */
+int msic_unregister_driver(void)
+{
+	if (!sram_vreg_addr) {
+		iounmap(sram_vreg_addr);
+		sram_vreg_addr = 0;
+	}
+	pci_unregister_driver(&msic_pci_driver);
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_msic.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_msic.h
new file mode 100644
index 0000000..89f562d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_msic.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#ifndef __MDFLD_MSIC_H__
+#define __MDFLD_MSIC_H__
+
+#define MSIC_PCI_DEVICE_ID 0x831
+
+int msic_regsiter_driver(void);
+int msic_unregister_driver(void);
+extern void hpd_notify_um(void);
+extern int msic_regsiter_driver(void);
+extern int msic_unregister_driver(void);
+
+#endif
\ No newline at end of file
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_output.c b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_output.c
new file mode 100644
index 0000000..68430fe
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_output.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include "displays/hdmi.h"
+
+#include "psb_drv.h"
+#include "android_hdmi.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+#include "displays/jdi_vid.h"
+#include "displays/jdi_cmd.h"
+#include "displays/cmi_vid.h"
+#include "displays/cmi_cmd.h"
+#include "displays/sharp10x19_cmd.h"
+#include "displays/sharp25x16_vid.h"
+#include "displays/sharp25x16_cmd.h"
+#include "displays/sdc16x25_8_cmd.h"
+#include "displays/sdc25x16_cmd.h"
+#include "displays/jdi25x16_vid.h"
+#include "displays/jdi25x16_cmd.h"
+
+static struct intel_mid_panel_list panel_list[] = {
+	{JDI_7x12_VID, MDFLD_DSI_ENCODER_DPI, jdi_vid_init},
+	{JDI_7x12_CMD, MDFLD_DSI_ENCODER_DBI, jdi_cmd_init},
+	{CMI_7x12_VID, MDFLD_DSI_ENCODER_DPI, cmi_vid_init},
+	{CMI_7x12_CMD, MDFLD_DSI_ENCODER_DBI, cmi_cmd_init},
+	{SHARP_10x19_CMD, MDFLD_DSI_ENCODER_DBI, sharp10x19_cmd_init},
+	{SHARP_10x19_DUAL_CMD, MDFLD_DSI_ENCODER_DBI, sharp10x19_cmd_init},
+	{SHARP_25x16_VID, MDFLD_DSI_ENCODER_DPI, sharp25x16_vid_init},
+	{SHARP_25x16_CMD, MDFLD_DSI_ENCODER_DBI, sharp25x16_cmd_init},
+	{JDI_25x16_VID, MDFLD_DSI_ENCODER_DPI, jdi25x16_vid_init},
+	{JDI_25x16_CMD, MDFLD_DSI_ENCODER_DBI, jdi25x16_cmd_init},
+	{SDC_16x25_CMD, MDFLD_DSI_ENCODER_DBI, sdc16x25_8_cmd_init},
+	{SDC_25x16_CMD, MDFLD_DSI_ENCODER_DBI, sdc25x16_cmd_init},
+};
+
+enum panel_type get_panel_type(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+
+	return dev_priv->panel_id;
+}
+
+bool is_dual_dsi(struct drm_device *dev)
+{
+	if ((get_panel_type(dev, 0) == SHARP_25x16_VID) ||
+		(get_panel_type(dev, 0) == SHARP_25x16_CMD) ||
+		(get_panel_type(dev, 0) == SHARP_10x19_DUAL_CMD) ||
+		(get_panel_type(dev, 0) == SDC_16x25_CMD) ||
+		(get_panel_type(dev, 0) == SDC_25x16_CMD) ||
+		(get_panel_type(dev, 0) == JDI_25x16_CMD) ||
+		(get_panel_type(dev, 0) == JDI_25x16_VID))
+		return true;
+	else return false;
+}
+
+bool is_dual_panel(struct drm_device *dev)
+{
+	if (get_panel_type(dev, 0) == SHARP_10x19_DUAL_CMD)
+		return true;
+	else return false;
+}
+
+/**
+ * is_panel_vid_or_cmd(struct drm_device *dev)
+ * Function return value: panel encoder type
+ */
+mdfld_dsi_encoder_t is_panel_vid_or_cmd(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(panel_list); i++) {
+		if (panel_list[i].p_type == dev_priv->panel_id)
+			return panel_list[i].encoder_type;
+	}
+	DRM_INFO("%s : Could not find panel: pabel_id = %d, ARRAY_SIZE = %zd",
+			__func__, dev_priv->panel_id, ARRAY_SIZE(panel_list));
+	DRM_INFO("%s : Crashing...", __func__);
+	BUG();
+
+	/* This should be unreachable */
+	return 0;
+}
+#endif
+
+void init_panel(struct drm_device *dev, int mipi_pipe, enum panel_type p_type)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct panel_funcs *p_funcs = NULL;
+	int i = 0, ret = 0;
+#endif
+	struct drm_connector *connector;
+
+#ifdef CONFIG_SUPPORT_HDMI
+	if (p_type == HDMI) {
+		PSB_DEBUG_ENTRY("GFX: Initializing HDMI");
+		android_hdmi_driver_init(dev, &dev_priv->mode_dev);
+		if (!IS_MRFLD(dev))
+			return;
+
+		mutex_lock(&dev->mode_config.mutex);
+		list_for_each_entry(connector,
+				&dev->mode_config.connector_list, head) {
+			if ((connector->connector_type !=
+						DRM_MODE_CONNECTOR_DSI) &&
+					(connector->connector_type !=
+					 DRM_MODE_CONNECTOR_LVDS))
+				connector->polled = DRM_CONNECTOR_POLL_HPD;
+		}
+		mutex_unlock(&dev->mode_config.mutex);
+
+		return;
+	}
+#endif
+
+#ifdef CONFIG_SUPPORT_MIPI
+	dev_priv->cur_pipe = mipi_pipe;
+	p_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
+
+	for (i = 0; i < ARRAY_SIZE(panel_list); i++) {
+		if (panel_list[i].p_type == dev_priv->panel_id) {
+			panel_list[i].panel_init(
+					dev,
+					p_funcs);
+			ret = mdfld_dsi_output_init(dev,
+					mipi_pipe,
+					NULL,
+					p_funcs);
+			if (ret)
+				kfree(p_funcs);
+			break;
+		}
+	}
+#endif
+}
+
+void mdfld_output_init(struct drm_device *dev)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	enum panel_type p_type1;
+
+	/* MIPI panel 1 */
+	p_type1 = get_panel_type(dev, 0);
+	init_panel(dev, 0, p_type1);
+
+#ifdef CONFIG_MDFD_DUAL_MIPI
+	{
+		/* MIPI panel 2 */
+		enum panel_type p_type2;
+		p_type2 = get_panel_type(dev, 2);
+		init_panel(dev, 2, p_type2);
+	}
+#endif
+#endif
+
+#ifdef CONFIG_SUPPORT_HDMI
+	/* HDMI panel */
+	init_panel(dev, 0, HDMI);
+#endif
+}
+
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mdfld_output.h b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_output.h
new file mode 100644
index 0000000..a9994f9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mdfld_output.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+
+
+#ifndef MDFLD_OUTPUT_H
+#define MDFLD_OUTPUT_H
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+
+#define TPO_PANEL_WIDTH		84
+#define TPO_PANEL_HEIGHT	46
+#define TMD_PANEL_WIDTH		53 /* PR3 */
+#define TMD_PANEL_HEIGHT	89 /* PR3 */
+#define PYR_PANEL_WIDTH		53
+#define PYR_PANEL_HEIGHT	95
+#define PANEL_4DOT3_WIDTH	53
+#define PANEL_4DOT3_HEIGHT	95
+#define AUO_PANEL_WIDTH		54
+#define AUO_PANEL_HEIGHT	96
+#define PANEL_3DOT47_WIDTH	49
+#define PANEL_3DOT47_HEIGHT	73
+
+struct mdfld_dsi_config;
+
+/*DSI panel connection status*/
+enum {
+	MDFLD_DSI_PANEL_CONNECTED = 0,
+	MDFLD_DSI_PANEL_DISCONNECTED,
+};
+
+enum {
+	MDFLD_DSI_PANEL_POWER_ON = 0,
+	MDFLD_DSI_PANEL_POWER_OFF,
+};
+
+struct panel_info {
+	u32 width_mm;
+	u32 height_mm;
+
+	bool panel_180_rotation;
+	/*other infos*/
+};
+
+/**
+ *Panel specific callbacks.
+ *
+ *@get_config_mode: return the fixed display mode of panel.
+ *@update_fb: command mode panel only. update on-panel framebuffer.
+ *@get_panel_info: return panel information. such as physical size.
+ *@reset: panel hard reset.
+ *@drv_ic_init: initialize panel driver IC and additional HW initialization.
+ *@detect: return the panel physical connection status.
+ *@dsi_controller_init: Initialize MIPI IP for this panel.
+ *@power_on: turn on panel. e.g. send a TURN_ON special packet.
+ *@power_off: turn off panel. e.g. send a SHUT_DOWN special packet.
+ *
+ *When adding a new panel, the driver code should implement these callbacks
+ *according to corresponding panel specs. DPI and DBI implementation will
+ *call these callbacks to take the specific actions for the new panel.
+ */
+struct panel_funcs {
+	struct drm_display_mode *(*get_config_mode)(void);
+	void (*dsi_controller_init)(struct mdfld_dsi_config *dsi_config);
+	void (*get_panel_info)(int, struct panel_info *);
+	int (*reset)(struct mdfld_dsi_config *dsi_config);
+	int (*exit_deep_standby)(struct mdfld_dsi_config *dsi_config);
+	int (*detect)(struct mdfld_dsi_config *dsi_config);
+	int (*power_on)(struct mdfld_dsi_config *dsi_config);
+	int (*power_off)(struct mdfld_dsi_config *dsi_config);
+	int (*set_brightness)(struct mdfld_dsi_config *dsi_config, int level);
+	int (*drv_ic_init)(struct mdfld_dsi_config *dsi_config);
+	int (*drv_set_panel_mode)(struct mdfld_dsi_config *dsi_config);
+};
+
+struct intel_mid_panel_list {
+	enum panel_type p_type;
+	int encoder_type;
+	void (*panel_init)(struct drm_device *, struct panel_funcs *);
+};
+
+extern enum panel_type get_panel_type(struct drm_device *dev, int pipe);
+extern bool is_dual_dsi(struct drm_device *dev);
+extern bool is_dual_panel(struct drm_device *dev);
+extern mdfld_dsi_encoder_t is_panel_vid_or_cmd(struct drm_device *dev);
+#endif
+
+extern void mdfld_output_init(struct drm_device *dev);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mofd_dc_callbacks.c b/drivers/external_drivers/intel_media/display/tng/drv/mofd_dc_callbacks.c
new file mode 100755
index 0000000..cdbd0b8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mofd_dc_callbacks.c
@@ -0,0 +1,1111 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+#include <linux/console.h>
+
+#include "psb_drv.h"
+#include "pmu_tng.h"
+#include "psb_fb.h"
+#include "psb_intel_reg.h"
+#include "displayclass_interface.h"
+#include "pwr_mgmt.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dbi_dsr.h"
+#endif
+
+#include <linux/kernel.h>
+#include <string.h>
+#include "android_hdmi.h"
+
+#define KEEP_UNUSED_CODE 0
+
+#if KEEP_UNUSED_CODE
+static int FindCurPipe(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (drm_helper_crtc_in_use(crtc)) {
+			struct psb_intel_crtc *psb_intel_crtc =
+			    to_psb_intel_crtc(crtc);
+			return psb_intel_crtc->pipe;
+		}
+	}
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+static void user_mode_start(struct drm_psb_private *dev_priv)
+{
+	if (!dev_priv->um_start) {
+		dev_priv->um_start = true;
+		dev_priv->b_async_flip_enable = true;
+		if (dev_priv->b_dsr_enable_config)
+			dev_priv->b_dsr_enable = true;
+	}
+}
+
+static void DCWriteReg(struct drm_device *dev, unsigned long ulOffset,
+		       unsigned long ulValue)
+{
+	struct drm_psb_private *dev_priv;
+	void *pvRegAddr;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	pvRegAddr = (void *)(dev_priv->vdc_reg + ulOffset);
+	mb();
+	iowrite32(ulValue, pvRegAddr);
+}
+
+void DCCBGetFramebuffer(struct drm_device *dev, struct psb_framebuffer **ppsb)
+{
+	struct drm_psb_private *dev_priv;
+	struct psb_fbdev *fbdev;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	fbdev = dev_priv->fbdev;
+	if (fbdev != NULL)
+		*ppsb = fbdev->pfb;
+}
+
+int DCChangeFrameBuffer(struct drm_device *dev,
+			struct psb_framebuffer *psbfb)
+{
+	return 0;
+}
+
+int DCCBEnableVSyncInterrupt(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv;
+	int ret = 0;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	if (drm_vblank_get(dev, pipe)) {
+		DRM_DEBUG("Couldn't enable vsync interrupt\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+void DCCBDisableVSyncInterrupt(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	drm_vblank_put(dev, pipe);
+}
+
+void DCCBInstallVSyncISR(struct drm_device *dev,
+			 pfn_vsync_handler pVsyncHandler)
+{
+	struct drm_psb_private *dev_priv;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	dev_priv->psb_vsync_handler = pVsyncHandler;
+}
+
+void DCCBUninstallVSyncISR(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	dev_priv->psb_vsync_handler = NULL;
+}
+
+void DCCBFlipToSurface(struct drm_device *dev, unsigned long uiAddr,
+				unsigned long uiFormat, unsigned long uiStride,
+		       unsigned int pipeflag)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	u32 dspsurf = (dev_priv->cur_pipe == 0 ? DSPASURF : DSPBSURF);
+	u32 dspcntr;
+	u32 dspstride;
+	u32 reg_offset;
+	u32 val = 0;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+#endif
+
+	DRM_DEBUG("%s %s %d, uiAddr = 0x%lx\n", __FILE__, __func__,
+			  __LINE__, uiAddr);
+
+	user_mode_start(dev_priv);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipeflag == 0) {
+		dsi_config = dev_priv->dsi_configs[0];
+		reg_offset = 0;
+	} else if (pipeflag == 2) {
+		dsi_config = dev_priv->dsi_configs[1];
+		reg_offset = 0x2000;
+	} else if (pipeflag == 1) {
+		dsi_config = NULL;
+		reg_offset = 0x1000;
+	} else {
+		DRM_ERROR("%s: invalid pipe %u\n", __func__, pipeflag);
+		return;
+	}
+#else
+	if (pipeflag == 1)
+		reg_offset = 0x1000;
+	else
+		return;
+#endif
+	/*update format*/
+	val = (0x80000000 | uiFormat);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->dspstride = uiStride;
+		dsi_ctx->dspcntr = val;
+		dsi_ctx->dspsurf = uiAddr;
+	}
+#endif
+	dspsurf = DSPASURF + reg_offset;
+	dspcntr = DSPACNTR + reg_offset;
+	dspstride = DSPASTRIDE + reg_offset;
+
+	DCWriteReg(dev, dspcntr, val);
+	/*update stride*/
+	DCWriteReg(dev, dspstride, uiStride);
+	/*update surface address*/
+	DCWriteReg(dev, dspsurf, uiAddr);
+}
+
+static bool is_vblank_period(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = NULL;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+#endif
+	struct android_hdmi_priv *hdmi_priv = NULL;
+	u32 reg_offset = 0;
+	int vdisplay = 0, vrefresh = 0;
+	int delay_us = 5, dsl_threshold = 0, plane_flip_time = 200;
+	int retry = 0;
+	int dsl = 0;
+
+	if (!dev || !dev->dev_private)
+		return false;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	switch (pipe) {
+#ifdef CONFIG_SUPPORT_MIPI
+	case PIPEA:
+		reg_offset = 0;
+		dsi_config = dev_priv->dsi_configs[0];
+		if (dsi_config && dsi_config->mode) {
+			vdisplay = dsi_config->mode->vdisplay;
+			vrefresh = dsi_config->mode->vrefresh;
+		}
+		break;
+#endif
+	case PIPEB:
+		reg_offset = 0x1000;
+		hdmi_priv = dev_priv->hdmi_priv;
+		if (hdmi_priv && hdmi_priv->current_mode) {
+			vdisplay = hdmi_priv->current_mode->vdisplay;
+			vrefresh = hdmi_priv->current_mode->vrefresh;
+		}
+		break;
+#ifdef CONFIG_SUPPORT_MIPI
+	case PIPEC:
+		reg_offset = 0x2000;
+		dsi_config = dev_priv->dsi_configs[1];
+		if (dsi_config && dsi_config->mode) {
+			vdisplay = dsi_config->mode->vdisplay;
+			vrefresh = dsi_config->mode->vrefresh;
+		}
+		break;
+#endif
+	default:
+		DRM_ERROR("Invalid pipe %d\n", pipe);
+		return false;
+	}
+
+	if (vdisplay <= 0) {
+		DRM_ERROR("Invalid vertical active region for pipe %d.\n", pipe);
+		return false;
+	}
+
+	retry = (int)(1000000 / (vrefresh * delay_us));
+	dsl_threshold = vdisplay - (int)(1000000 / (vrefresh * plane_flip_time));
+	while (--retry && ((REG_READ(PIPEADSL + reg_offset) & PIPE_LINE_CNT_MASK) >= dsl_threshold))
+		udelay(delay_us);
+
+	if (!retry) {
+		DRM_ERROR("Pipe %d DSL is sticky.\n", pipe);
+		return false;
+	}
+
+	dsl = REG_READ(PIPEADSL + reg_offset) & PIPE_LINE_CNT_MASK;
+	if (dsl >= dsl_threshold)
+		DRM_INFO("DSL is at line %u for pipe %d.\n", dsl, pipe);
+
+	return true;
+}
+
+void DCCBFlipOverlay(struct drm_device *dev,
+			struct intel_dc_overlay_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+#endif
+	u32 ovadd_reg = OV_OVADD;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+	if (ctx->index == 1)
+		ovadd_reg = OVC_OVADD;
+
+	ctx->ovadd |= 1;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (ctx->pipe == 0)
+		dsi_config = dev_priv->dsi_configs[0];
+	else if (ctx->pipe == 2)
+		dsi_config = dev_priv->dsi_configs[1];
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		if (ctx->index == 0)
+			dsi_ctx->ovaadd = ctx->ovadd;
+		else if (ctx->index == 1)
+			dsi_ctx->ovcadd = ctx->ovadd;
+	}
+#endif
+
+	/*Flip surf*/
+	PSB_WVDC32(ctx->ovadd, ovadd_reg);
+}
+
+void DCCBFlipSprite(struct drm_device *dev,
+			struct intel_dc_sprite_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+#endif
+	u32 reg_offset = 0x3000;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (ctx->index == 1) {
+		reg_offset = 0x4000;
+	} else if (ctx->index == 2) {
+		reg_offset = 0x5000;
+	} else
+#endif
+	if (ctx->index == 0) {
+		reg_offset = 0x3000;
+	} else {
+		DRM_ERROR("%s: invalid index\n", __func__);
+	}
+
+	/* asign sprite to pipe */
+	ctx->cntr &= ~DISPPLANE_SEL_PIPE_MASK;
+
+	if (ctx->pipe == 1)
+		ctx->cntr |= DISPPLANE_SEL_PIPE_B;
+#ifdef CONFIG_SUPPORT_MIPI
+	else if (ctx->pipe == 0) {
+		ctx->cntr |= DISPPLANE_SEL_PIPE_A;
+		dsi_config = dev_priv->dsi_configs[0];
+	} else if (ctx->pipe == 2) {
+		ctx->cntr |= DISPPLANE_SEL_PIPE_C;
+		dsi_config = dev_priv->dsi_configs[1];
+	}
+#endif
+	if ((ctx->update_mask & SPRITE_UPDATE_POSITION))
+		PSB_WVDC32(ctx->pos, DSPAPOS + reg_offset);
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SIZE)) {
+		PSB_WVDC32(ctx->size, DSPASIZE + reg_offset);
+		PSB_WVDC32(ctx->stride, DSPASTRIDE + reg_offset);
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONSTALPHA)) {
+		PSB_WVDC32(ctx->contalpa, DSPACONSTALPHA + reg_offset);
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONTROL)){
+                if(drm_psb_set_gamma_success)
+			PSB_WVDC32(ctx->cntr | DISPPLANE_GAMMA_ENABLE, DSPACNTR + reg_offset);
+                else
+                        PSB_WVDC32(ctx->cntr, DSPACNTR + reg_offset);
+        }
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SURFACE)) {
+		PSB_WVDC32(ctx->linoff, DSPALINOFF + reg_offset);
+		PSB_WVDC32(ctx->tileoff, DSPATILEOFF + reg_offset);
+		PSB_WVDC32(ctx->surf, DSPASURF + reg_offset);
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->sprite_dsppos = ctx->pos;
+		dsi_ctx->sprite_dspsize = ctx->size;
+		dsi_ctx->sprite_dspstride = ctx->stride;
+		dsi_ctx->sprite_dspcntr = ctx->cntr | ((PSB_RVDC32(DSPACNTR + reg_offset) & DISPPLANE_GAMMA_ENABLE));
+		dsi_ctx->sprite_dsplinoff = ctx->linoff;
+		dsi_ctx->sprite_dspsurf = ctx->surf;
+	}
+#endif
+}
+
+void DCCBFlipPrimary(struct drm_device *dev,
+			struct intel_dc_primary_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+#endif
+	u32 reg_offset;
+	int pipe;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (ctx->index == 0) {
+		reg_offset = 0;
+		dsi_config = dev_priv->dsi_configs[0];
+		pipe = 0;
+	} else
+#endif
+	if (ctx->index == 1) {
+		reg_offset = 0x1000;
+		pipe = 1;
+	}
+#ifdef CONFIG_SUPPORT_MIPI
+	else if (ctx->index == 2) {
+		reg_offset = 0x2000;
+		dsi_config = dev_priv->dsi_configs[1];
+		pipe = 2;
+	}
+#endif
+	else
+		return;
+
+	if ((ctx->update_mask & SPRITE_UPDATE_POSITION))
+		PSB_WVDC32(ctx->pos, DSPAPOS + reg_offset);
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SIZE)) {
+		PSB_WVDC32(ctx->size, DSPASIZE + reg_offset);
+		PSB_WVDC32(ctx->stride, DSPASTRIDE + reg_offset);
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONSTALPHA)) {
+		PSB_WVDC32(ctx->contalpa, DSPACONSTALPHA + reg_offset);
+	}
+
+	if ((ctx->update_mask & SPRITE_UPDATE_CONTROL)){
+                if(drm_psb_set_gamma_success)
+                        PSB_WVDC32(ctx->cntr | DISPPLANE_GAMMA_ENABLE, DSPACNTR + reg_offset);
+                else
+                        PSB_WVDC32(ctx->cntr, DSPACNTR + reg_offset);
+        }
+
+	if ((ctx->update_mask & SPRITE_UPDATE_SURFACE)) {
+		PSB_WVDC32(ctx->linoff, DSPALINOFF + reg_offset);
+		PSB_WVDC32(ctx->tileoff, DSPATILEOFF + reg_offset);
+		PSB_WVDC32(ctx->surf, DSPASURF + reg_offset);
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->dsppos = ctx->pos;
+		dsi_ctx->dspsize = ctx->size;
+		dsi_ctx->dspstride = ctx->stride;
+		dsi_ctx->dspcntr = ctx->cntr | ((PSB_RVDC32(DSPACNTR + reg_offset) & DISPPLANE_GAMMA_ENABLE));
+		dsi_ctx->dsplinoff = ctx->linoff;
+		dsi_ctx->dspsurf = ctx->surf;
+	}
+#endif
+}
+
+void DCCBFlipCursor(struct drm_device *dev,
+			struct intel_dc_cursor_ctx *ctx)
+{
+	struct drm_psb_private *dev_priv;
+	u32 reg_offset = 0;
+
+	if (!dev || !ctx)
+		return;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+
+	user_mode_start(dev_priv);
+
+	switch (ctx->pipe) {
+	case 0:
+		reg_offset = 0;
+		break;
+	case 1:
+		reg_offset = 0x40;
+		break;
+	case 2:
+		reg_offset = 0x60;
+		break;
+	}
+
+	PSB_WVDC32(ctx->cntr, CURACNTR + reg_offset);
+	PSB_WVDC32(ctx->pos, CURAPOS + reg_offset);
+	PSB_WVDC32(ctx->surf, CURABASE + reg_offset);
+}
+
+void DCCBSetupZorder(struct drm_device *dev,
+			struct intel_dc_plane_zorder *zorder,
+			int pipe)
+{
+
+}
+
+void DCCBSetPipeToOvadd(u32 *ovadd, int pipe)
+{
+	switch (pipe) {
+	case 0:
+		*ovadd |= OV_PIPE_A << OV_PIPE_SELECT_POS;
+		break;
+	case 1:
+		*ovadd |= OV_PIPE_B << OV_PIPE_SELECT_POS;
+		break;
+	case 2:
+		*ovadd |= OV_PIPE_C << OV_PIPE_SELECT_POS;
+		break;
+	}
+
+	return;
+}
+
+static int _GetPipeFromOvadd(u32 ovadd)
+{
+	int ov_pipe_sel = (ovadd & OV_PIPE_SELECT) >> OV_PIPE_SELECT_POS;
+	int pipe = 0;
+	switch (ov_pipe_sel) {
+	case OV_PIPE_A:
+		pipe = 0;
+		break;
+	case OV_PIPE_B:
+		pipe = 1;
+		break;
+	case OV_PIPE_C:
+		pipe = 2;
+		break;
+	}
+
+	return pipe;
+}
+
+#if 0
+static void _OverlayWaitVblank(struct drm_device *dev, int pipe)
+{
+	union drm_wait_vblank vblwait;
+	int ret;
+
+	vblwait.request.type =
+		(_DRM_VBLANK_RELATIVE |
+		 _DRM_VBLANK_NEXTONMISS);
+	vblwait.request.sequence = 1;
+
+	if (pipe == 1)
+		vblwait.request.type |=
+			_DRM_VBLANK_SECONDARY;
+
+	ret = drm_wait_vblank(dev, (void *)&vblwait, 0);
+	if (ret) {
+		DRM_ERROR("%s: fail to wait vsync of pipe %d\n", __func__, pipe);
+	}
+}
+#endif
+
+static void _OverlayWaitFlip(struct drm_device *dev, u32 ovstat_reg,
+			int index, int pipe)
+{
+	int retry = 600;
+#ifdef CONFIG_SUPPORT_MIPI
+	int ret = -EBUSY;
+
+	/* HDMI pipe can run as low as 24Hz */
+	if (pipe != 1) {
+		retry = 200;  /* 60HZ for MIPI */
+		DCCBDsrForbid(dev, pipe);
+	}
+#else
+	if (pipe != 1)
+		return;
+#endif
+	/**
+	 * make sure overlay command buffer
+	 * was copied before updating the system
+	 * overlay command buffer.
+	 */
+	while (--retry) {
+#ifdef CONFIG_SUPPORT_MIPI
+		if (pipe != 1 && ret == -EBUSY) {
+			ret = DCCBUpdateDbiPanel(dev, pipe);
+		}
+#endif
+		if (BIT31 & PSB_RVDC32(ovstat_reg))
+			break;
+		udelay(100);
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe != 1)
+		DCCBDsrAllow(dev, pipe);
+#endif
+	if (!retry)
+		DRM_ERROR("OVADD %d flip timeout on pipe %d!\n", index, pipe);
+}
+
+int DCCBOverlayDisableAndWait(struct drm_device *dev, u32 ctx,
+			int index)
+{
+	u32 ovadd_reg = OV_OVADD;
+	u32 ovstat_reg = OV_DOVASTA;
+	u32 power_islands = OSPM_DISPLAY_A;
+	int pipe;
+
+	if (index != 0 && index != 1) {
+		DRM_ERROR("Invalid overlay index %d\n", index);
+		return -EINVAL;
+	}
+
+	if (index) {
+		ovadd_reg = OVC_OVADD;
+		ovstat_reg = OVC_DOVCSTA;
+		power_islands |= OSPM_DISPLAY_C;
+	}
+
+	pipe = _GetPipeFromOvadd(ctx);
+
+	if (power_island_get(power_islands)) {
+		PSB_WVDC32(ctx, ovadd_reg);
+
+		/*wait for overlay flipped*/
+		_OverlayWaitFlip(dev, ovstat_reg, index, pipe);
+
+		power_island_put(power_islands);
+	}
+	return 0;
+}
+
+int DCCBOverlayEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx;
+#endif
+	int pipe;
+	u32 ovadd_reg = OV_OVADD;
+	u32 ovstat_reg = OV_DOVASTA;
+	u32 power_islands = OSPM_DISPLAY_A;
+
+	if (index != 0 && index != 1) {
+		DRM_ERROR("Invalid overlay index %d\n", index);
+		return -EINVAL;
+	}
+
+	if (index) {
+		ovadd_reg = OVC_OVADD;
+		ovstat_reg = OVC_DOVCSTA;
+		power_islands |= OSPM_DISPLAY_C;
+	}
+
+	pipe = _GetPipeFromOvadd(ctx);
+#ifdef CONFIG_SUPPORT_MIPI
+	if (!enabled) {
+		if (pipe == 0)
+			dsi_config = dev_priv->dsi_configs[0];
+		else if (pipe == 2)
+			dsi_config = dev_priv->dsi_configs[1];
+
+		if (dsi_config) {
+			dsi_ctx = &dsi_config->dsi_hw_context;
+			if (index == 0)
+				dsi_ctx->ovaadd = 0;
+			else if (index == 1)
+				dsi_ctx->ovcadd = 0;
+		}
+	}
+#endif
+
+	if (power_island_get(power_islands)) {
+		/*make sure previous flip was done*/
+		_OverlayWaitFlip(dev, ovstat_reg, index, pipe);
+#if 0
+		_OverlayWaitVblank(dev, pipe);
+#endif
+
+		PSB_WVDC32(ctx, ovadd_reg);
+
+		power_island_put(power_islands);
+	}
+	return 0;
+}
+
+int DCCBSpriteEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled)
+{
+	u32 power_islands = (OSPM_DISPLAY_A | OSPM_DISPLAY_C);
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx = NULL;
+#endif
+	u32 reg_offset;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspsurf_reg = DSPASURF;
+
+	switch (index) {
+	case 0:
+		reg_offset = 0x3000;
+		break;
+#ifdef CONFIG_SUPPORT_MIPI
+	case 1:
+		reg_offset = 0x4000;
+		break;
+	case 2:
+		reg_offset = 0x5000;
+		break;
+#endif
+	default:
+		DRM_ERROR("Invalid sprite index %d\n", index);
+		return -EINVAL;
+	}
+#ifdef CONFIG_SUPPORT_MIPI
+	/* FIXME: need to check pipe info here. */
+	dsi_config = dev_priv->dsi_configs[0];
+
+	if (dsi_config)
+		dsi_ctx = &dsi_config->dsi_hw_context;
+#endif
+	dspcntr_reg += reg_offset;
+	dspsurf_reg += reg_offset;
+
+	if (power_island_get(power_islands)) {
+#ifdef CONFIG_SUPPORT_MIPI
+		if (dsi_ctx)
+			dsi_ctx->sprite_dspcntr &= ~DISPLAY_PLANE_ENABLE;
+#endif
+		PSB_WVDC32((PSB_RVDC32(dspcntr_reg) & ~DISPLAY_PLANE_ENABLE),
+				dspcntr_reg);
+		PSB_WVDC32((PSB_RVDC32(dspsurf_reg)), dspsurf_reg);
+		power_island_put(power_islands);
+	}
+
+	return 0;
+}
+
+int DCCBPrimaryEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_gtt *pg = dev_priv->pg;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *dsi_ctx = NULL;
+#endif
+	u32 reg_offset;
+
+	if (index < 0 || index > 2) {
+		DRM_ERROR("Invalid primary index %d\n", index);
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (index == 0) {
+		dsi_config = dev_priv->dsi_configs[0];
+		reg_offset = 0;
+	} else if (index == 1) {
+		reg_offset = 0x1000;
+	} else if (index == 2) {
+		dsi_config = dev_priv->dsi_configs[1];
+		reg_offset = 0x2000;
+	}
+
+	if (dsi_config) {
+		dsi_ctx = &dsi_config->dsi_hw_context;
+		dsi_ctx->dsppos = 0;
+		dsi_ctx->dspsize = (63 << 16) | 63;
+		dsi_ctx->dspstride = (64 << 2);
+		dsi_ctx->dspcntr = DISPPLANE_32BPP;
+		dsi_ctx->dspcntr |= DISPPLANE_PREMULT_DISABLE;
+		dsi_ctx->dspcntr |= (BIT31 & PSB_RVDC32(DSPACNTR + reg_offset));
+		dsi_ctx->dsplinoff = 0;
+		dsi_ctx->dspsurf = pg->reserved_gtt_start;
+	}
+#else
+	if (index == 1)
+		reg_offset = 0x1000;
+	else
+		return -EINVAL;
+#endif
+	PSB_WVDC32(0, DSPAPOS + reg_offset);
+	PSB_WVDC32((63 << 16) | 63, DSPASIZE + reg_offset);
+	PSB_WVDC32((64 << 2), DSPASTRIDE + reg_offset);
+	PSB_WVDC32(0x1c800000 | (BIT31 & PSB_RVDC32(DSPACNTR + reg_offset)),
+		DSPACNTR + reg_offset);
+	PSB_WVDC32(0, DSPALINOFF + reg_offset);
+	PSB_WVDC32(0, DSPATILEOFF + reg_offset);
+	PSB_WVDC32(pg->reserved_gtt_start, DSPASURF + reg_offset);
+
+	return 0;
+}
+
+int DCCBCursorDisable(struct drm_device *dev, int index)
+{
+	u32 reg_offset;
+
+	if (index < 0 || index > 2) {
+		DRM_ERROR("Invalid cursor index %d\n", index);
+		return -EINVAL;
+	}
+
+	switch (index) {
+	case 0:
+		reg_offset = 0;
+		break;
+	case 1:
+		reg_offset = 0x40;
+		break;
+	case 2:
+		reg_offset = 0x60;
+		break;
+	}
+
+	PSB_WVDC32(0, CURACNTR + reg_offset);
+	PSB_WVDC32(0, CURAPOS + reg_offset);
+	PSB_WVDC32(0, CURABASE + reg_offset);
+
+	return 0;
+}
+
+int DCCBUpdateDbiPanel(struct drm_device *dev, int pipe)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if ((pipe != 0) && (pipe != 2))
+		return -EINVAL;
+
+	if (dev_priv && dev_priv->dsi_configs)
+		dsi_config = (pipe == 0) ?
+			dev_priv->dsi_configs[0] : dev_priv->dsi_configs[1];
+
+	return mdfld_dsi_dsr_update_panel_fb(dsi_config);
+#else
+	return 0;
+#endif
+}
+
+void DCCBWaitForDbiFifoEmpty(struct drm_device *dev, int pipe)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config;
+	int retry;
+
+	if ((pipe != 0) && (pipe != 2))
+		return;
+
+	dsi_config = (pipe == 0) ? dev_priv->dsi_configs[0] :
+				   dev_priv->dsi_configs[1];
+
+	if (!dsi_config || dsi_config->type != MDFLD_DSI_ENCODER_DBI)
+		return;
+
+	/* shall we use FLIP_DONE on ANN? */
+	if (IS_TNG_B0(dev)) {
+		retry = wait_event_interruptible_timeout(dev_priv->eof_wait,
+				(REG_READ(MIPIA_GEN_FIFO_STAT_REG) & BIT27),
+				msecs_to_jiffies(1000));
+	} else {
+		retry = 1000;
+		while (retry-- && !(REG_READ(MIPIA_GEN_FIFO_STAT_REG)))
+			udelay(500);
+	}
+
+	if (retry == 0)
+		DRM_ERROR("DBI FIFO not empty\n");
+#else
+	return;
+#endif
+}
+
+void DCCBAvoidFlipInVblankInterval(struct drm_device *dev, int pipe)
+{
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if ((pipe == PIPEB) ||
+		(is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DPI))
+#else
+	if (pipe == PIPEB)
+#endif
+		is_vblank_period(dev, pipe);
+}
+
+void DCCBUnblankDisplay(struct drm_device *dev)
+{
+	int res;
+	struct psb_framebuffer *psb_fb = NULL;
+
+	DCCBGetFramebuffer(dev, &psb_fb);
+
+	if (!psb_fb)
+		return;
+
+	console_lock();
+	res = fb_blank(psb_fb->fbdev, 0);
+	console_unlock();
+	if (res != 0) {
+		DRM_ERROR("fb_blank failed (%d)", res);
+	}
+}
+
+void DCCBFlipDSRCb(struct drm_device *dev)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	if (!dev_priv->um_start) {
+		dev_priv->um_start = true;
+
+		if (dev_priv->b_dsr_enable_config)
+			dev_priv->b_dsr_enable = true;
+	}
+
+	if (dev_priv->b_dsr_enable && dev_priv->b_is_in_idle) {
+		dev_priv->exit_idle(dev, MDFLD_DSR_2D_3D, NULL, true);
+	}
+#endif
+}
+
+u32 DCCBGetPipeCount(void)
+{
+	/* FIXME */
+	return 3;
+}
+
+bool DCCBIsSuspended(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	bool ret = false;
+
+	if (!dev_priv)
+		return false;
+
+	mutex_lock(&dev->mode_config.mutex);
+	ret = dev_priv->early_suspended;
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+int DCCBIsPipeActive(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+#endif
+	u32 pipeconf_reg;
+	int active = 0;
+
+	if (pipe == 0)
+		pipeconf_reg = PIPEACONF;
+	else if (pipe == 1)
+		pipeconf_reg = PIPEBCONF;
+	else {
+		DRM_ERROR("%s: unsupported pipe %d\n", __func__, pipe);
+		return 0;
+	}
+
+	/* FIXME: need to remove the suspended state checking. */
+	if (dev_priv->early_suspended)
+		return 0;
+
+	/* get display a for register reading */
+	if (power_island_get(OSPM_DISPLAY_A)) {
+#ifdef CONFIG_SUPPORT_MIPI
+		if ((pipe != 1) && dev_priv->dsi_configs) {
+			dsi_config = (pipe == 0) ? dev_priv->dsi_configs[0] :
+				dev_priv->dsi_configs[1];
+		}
+
+		mdfld_dsi_dsr_forbid(dsi_config);
+#endif
+		active = (PSB_RVDC32(pipeconf_reg) & BIT31) ? 1 : 0 ;
+#ifdef CONFIG_SUPPORT_MIPI
+		mdfld_dsi_dsr_allow(dsi_config);
+#endif
+		power_island_put(OSPM_DISPLAY_A);
+	}
+
+	return active;
+}
+
+void DCCBDsrForbid(struct drm_device *dev, int pipe)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if ((pipe != 0) && (pipe != 2))
+		return;
+
+	if (dev_priv && dev_priv->dsi_configs)
+		dsi_config = (pipe == 0) ?
+			dev_priv->dsi_configs[0] : dev_priv->dsi_configs[1];
+
+	mdfld_dsi_dsr_forbid(dsi_config);
+#endif
+}
+
+void DCCBDsrAllow(struct drm_device *dev, int pipe)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if ((pipe != 0) && (pipe != 2))
+		return;
+
+	if (dev_priv && dev_priv->dsi_configs)
+		dsi_config = (pipe == 0) ?
+			dev_priv->dsi_configs[0] : dev_priv->dsi_configs[1];
+
+	mdfld_dsi_dsr_allow(dsi_config);
+#endif
+}
+
+int DCCBUpdateCursorPos(struct drm_device *dev, int pipe, uint32_t pos)
+{
+	u32 power_island = 0;
+	u32 reg_offset = 0;
+
+	switch (pipe) {
+	case 0:
+		power_island = OSPM_DISPLAY_A;
+		reg_offset = 0;
+		break;
+	case 1:
+		power_island = OSPM_DISPLAY_B;
+		reg_offset = 0x40;
+		break;
+	case 2:
+		power_island = OSPM_DISPLAY_C;
+		reg_offset = 0x60;
+		break;
+	default:
+		DRM_ERROR("%s: invalid pipe %d\n", __func__, pipe);
+		return -1;
+	}
+
+	if (!power_island_get(power_island)) {
+		DRM_ERROR("%s: failed to get power island for pipe %d\n", __func__, pipe);
+		return -1;
+	}
+
+	PSB_WVDC32(pos, CURAPOS + reg_offset);
+	power_island_put(power_island);
+	return 0;
+}
+
+int DCCBDumpPipeStatus(struct drm_device *dev, int pipe)
+{
+	u32 power_island = 0;
+	u32 reg_offset = 0;
+
+	switch (pipe) {
+	case 0:
+		power_island = OSPM_DISPLAY_A;
+		reg_offset = 0;
+		break;
+	case 1:
+		power_island = OSPM_DISPLAY_B;
+		reg_offset = 0x1000;
+		break;
+	case 2:
+		power_island = OSPM_DISPLAY_C;
+		reg_offset = 0x2000;
+		break;
+	default:
+		DRM_ERROR("%s: invalid pipe %d\n", __func__, pipe);
+		return -1;
+	}
+
+	if (!power_island_get(power_island)) {
+		DRM_ERROR("%s: failed to get power island for pipe %d\n", __func__, pipe);
+		return -1;
+	}
+
+	DRM_INFO("========= status on pipe%d ========\n", pipe);
+	DRM_INFO("vblank_refcount = %u\n", atomic_read(&dev->vblank_refcount[pipe]));
+	DRM_INFO("vblank_count = %u\n", drm_vblank_count(dev, pipe));
+	DRM_INFO("PIPECONF = 0x%08x\n", REG_READ(PIPEACONF+reg_offset));
+	DRM_INFO("PIPESTAT = 0x%08x\n\n", REG_READ(PIPEASTAT+reg_offset));
+	DRM_INFO("===================================\n");
+
+	power_island_put(power_island);
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mrfl_display_po.c b/drivers/external_drivers/intel_media/display/tng/drv/mrfl_display_po.c
new file mode 100644
index 0000000..6da87df
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mrfl_display_po.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ */
+
+/*
+ * NOTE: this file is only for merrifield HDMI & JDI panel power on
+ * TODO: remove me later.
+ */
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_dsi_pkg_sender.h"
+#include "psb_drv.h"
+
+static void __iomem *io_base;
+void setiobase(uint8_t *value)
+{
+	io_base = value;
+}
+/*common functions*/
+/*----------------------------------------------------------------------------*/
+static void gunit_sb_write(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+{
+	u32 ret;
+	int retry=0;
+	u32 sb_pkt = (arg1 << 16) | (arg0 << 8) | 0xf0;
+
+	/* write the register to side band register address */
+	iowrite32(arg2, io_base + 0x2108);
+	iowrite32(arg3, io_base + 0x2104);
+	iowrite32(sb_pkt, io_base + 0x2100);
+
+	ret = ioread32(io_base + 0x210c);
+	while ((retry++ < 0x1000) && (ret != 0x2)) {
+		msleep(1);
+		ret = ioread32(io_base + 0x210c);
+	}
+
+	if (ret != 2)
+		DRM_ERROR("%s:Failed to received SB interrupt\n", __func__);
+}
+
+static u32 gunit_sb_read(u32 arg0, u32 arg1, u32 arg2)
+{
+	u32 ret;
+	int retry=0;
+	u32 sb_pkt = arg1 << 16 | arg0 << 8 | 0xf0;
+
+	/* write the register to side band register address */
+	iowrite32(arg2, io_base + 0x2108);
+	iowrite32(sb_pkt, io_base + 0x2100);
+
+	ret = ioread32(io_base + 0x210c);
+	while ((retry < 0x1000) && (ret != 2)) {
+		msleep(1);
+		ret = ioread32(io_base + 0x210c);
+	}
+
+	if (ret != 2)
+		DRM_ERROR("%s: Failed to received SB interrupt\n", __func__);
+	else
+		ret = ioread32(io_base + 0x2104);
+
+	return ret;
+}
+
+void power_off_pipe(u32 msg_port, u32 msg_reg, u32 val, u32 val_comp)
+{
+	u32 ret;
+	int retry=0;
+
+	ret = intel_mid_msgbus_read32(msg_port, msg_reg);
+
+	intel_mid_msgbus_write32(msg_port, msg_reg, ret | val);
+
+	ret = intel_mid_msgbus_read32(msg_port, msg_reg);
+	if (((ret & val_comp) != val_comp) && (retry < 1000)) {
+		retry++;
+		ret = intel_mid_msgbus_read32(msg_port, msg_reg);
+	}
+}
+
+void power_on_pipe(u32 msg_port, u32 msg_reg,
+							u32 val_comp, u32 val_write)
+{
+	u32 ret;
+	int retry=0;
+
+	ret = intel_mid_msgbus_read32(msg_port, msg_reg);
+
+	if ((ret & val_comp) == 0) {
+		DRM_ERROR("%s: pipe is already powered on\n", __func__);
+		return;
+	} else {
+		intel_mid_msgbus_write32(msg_port, msg_reg, ret & val_write);
+		ret = intel_mid_msgbus_read32(msg_port, msg_reg);
+		while ((retry < 1000) && ((ret & val_comp) != 0)) {
+			msleep(1);
+			ret = intel_mid_msgbus_read32(msg_port, msg_reg);
+			retry++;
+		}
+		if ((ret & val_comp) != 0)
+			DRM_ERROR("%s: powering on pipe failed\n", __func__);
+		if (msg_port == 0x4 && msg_reg == 0x3b) {
+			DRM_ERROR("%s: skip powering up MIO AFE\n", __func__);
+		}
+	}
+}
+
+static void pipe_timing(int pipe, u32 arg0, u32 arg1, u32 arg2,
+		u32 arg3, u32 arg4, u32 arg5, u32 arg6, u32 arg7)
+{
+	/* Pipe A Horizontal Total Register */
+	iowrite32(arg0, io_base + 0x60000 + pipe);
+
+	/* Pipe A Horizontal Blank Register */
+	iowrite32(arg1, io_base + 0x60004 + pipe);
+
+	/* Pipe A Horizontal Sync Register */
+	iowrite32(arg2, io_base + 0x60008 + pipe);
+
+	/* Pipe A Vertical Total Register */
+	iowrite32(arg3, io_base + 0x6000c + pipe);
+
+	/* Pipe A Vertical Blank Register */
+	iowrite32(arg4, io_base + 0x60010 + pipe);
+
+	/* Pipe A Vertical Sync Register */
+	iowrite32(arg5, io_base + 0x60014 + pipe);
+
+	/* Pipe A Source image size Register */
+	iowrite32(arg6, io_base + 0x6001c + pipe);
+
+	/* Pipe A Vertical Shift Sync Register */
+	iowrite32(arg7, io_base + 0x60028 + pipe);
+}
+
+/*HDMI power on sequence*/
+/*----------------------------------------------------------------------------*/
+
+#define VIDEO_DIP_CTL   0x61170
+#define VIDEO_DIP_DATA  0x61178
+#define VIDEO_DIP_FREQ_EVERY_FRAME 0x00010000
+
+/* HDMI resolutions */
+#define HD_640x480              1
+#define HD_720x480              2
+#define HD_1920x1080            3
+#define HD_1920x1200            4
+
+#define RESO6x4             (0)
+#define RRESO13x7_mipi      (0x1)
+#define RRESO10x7           (0x2)
+#define RRESO10x7_mipi      (0x3)
+#define RRESO19x10          (0x4)
+#define RRESO19x10_mipi     (0x5)
+#define RRESO19x12_mipi     (0x6)
+#define RRESO16x12          (0x7)
+#define RRESO16x12_mipi     (0x8)
+#define RRESO8x4_mipi       (0x9)
+#define RRESO8x6            (0xa)
+#define RRESO8x6_new        (0xb)
+#define RRESO8x6_2          (0xc)
+#define RRESO12x10          (0xd)
+#define RRESO12x7_mipi      (0xe)
+#define RRESO12x10_75       (0xf)
+#define RRESO25x16_mipi     (0x10)
+#define RRESO7x12_mipi      (0x11)
+#define RRESO7x4            (0x12)
+#define RRESO8x12_mipi      (0x13)
+
+static void power_off_all_pipes(void)
+{
+	power_off_pipe(0x4, 0x3C, 0x3, 0x3000000);
+	power_off_pipe(0x4, 0x36, 0xC, 0xC000000);
+	power_off_pipe(0x4, 0x36, 0x30, 0x3000000);
+	//power_off_pipe(0x4, 0x3b, 0x3, 0x3000000);
+	//power_off_pipe(0x4, 0x36, 0x3, 0x3000000);
+}
+
+static void power_on_all_pipes(void)
+{
+	//power_on_pipe(0x4, 0x36, 0x3000000, 0xFFFFFFFC); /* pipe A */
+	//power_on_pipe(0x4, 0x3b, 0x3000000, 0xFFFFFFFC); /* MIO */
+	power_on_pipe(0x4, 0x36, 0xc000000, 0xfffffff3); /* pipe B */
+	power_on_pipe(0x4, 0x3c, 0x3000000, 0xfffffffc); /* HDMI */
+}
+
+void hdmi_dll_program(void)
+{
+	u32 ret, status;
+	u32 arg3 = (0x11 << 24) | (0x1 << 11) | (2 << 8) |
+		(116) | (3 << 21) | (2 << 16) | (1 << 12);
+	int retry=0;
+
+	/* Common reset */
+        iowrite32(0x70006800, io_base + 0xF018);
+
+	gunit_sb_write(0x13,0x1,0x800c, arg3);
+	gunit_sb_write(0x13, 0x1,0x8048, 0x009F0051);
+	gunit_sb_write(0x13, 0x1,0x8014, 0x0D714300);
+
+	/* enable pll */
+	iowrite32(0xf0006800, io_base + 0xf018);
+	ret = ioread32(io_base + 0xf018);
+	ret &= 0x8000;
+	while ((retry++ < 1000) && (ret != 0x8000)) {
+		msleep(1);
+		ret = ioread32(io_base + 0xf018);
+		ret &= 0x8000;
+	}
+
+	if (ret != 0x8000) {
+		DRM_ERROR("%s: DPLL failed to lock, exit...\n", __func__);
+		return;
+	}
+
+	/* Enabling firewall for modphy */
+    gunit_sb_write(0x13,0x1,0x801c,0x01000000);
+    status = gunit_sb_read(0x13,0x0,0x801c);
+
+	/* Disabling global Rcomp */
+	gunit_sb_write(0x13,0x1,0x80E0,0x8000);
+
+	/* Stagger Programming */
+	gunit_sb_write(0x13,0x1,0x0230,0x401F00);
+	gunit_sb_write(0x13,0x1,0x0430,0x541F00);
+}
+
+static void hdmi_pipe_set_reso(int pipe, int reso)
+{
+	int i;
+
+	if (reso == RRESO19x10) {
+		pipe_timing(pipe, 0x0A0F077F, 0x0A0F077F, 0x08C707F7, 0x045D0437,
+				0x045D0437, 0x043B0438, 0x077F0437, 0x0 );
+	} else if (reso == RESO6x4) {
+		pipe_timing(pipe, 0x31f027f, 0x31f027f, 0x2ef028f, 0x20c01df,
+				0x20c01df, 0x1eb01e9, 0x27f01df, 0x0 );
+	} else {
+		DRM_ERROR("%s: only support 1920x1080 or 640x480, exit...\n", __func__);
+	}
+
+	i = 0;
+	while (i <= 0x1c) {
+		i += 4;
+	}
+}
+
+static void hdmi_sprite_enable(struct pci_dev *pdev, int sprite, int size_h,
+		int size_v, int pos_x, int pos_y, u32 base_addr)
+{
+	u32 bgsm, gtt_base_addr, surface_base_addr;
+	u32 controlval, positionval, resolutionval;
+	void __iomem *bgsm_virt;
+	int pipe_src_w, pipe_src_h;
+	int stride = size_h * 4;
+	//int stride = size_h * 2; /* 16 bit */
+
+	if (sprite != 0x1000) {
+		DRM_ERROR("%s, unsupported sprite: 0x%x\n", __func__, sprite);
+		return;
+	}
+
+	/*set center*/
+	pos_x = 600;
+	pos_y = 0;
+	pipe_src_w = 1920;
+	pipe_src_h = 1080;
+
+	size_h = (size_h > 1920) ? 1920 : size_h;
+	size_v = (size_v > 1080) ? 1080 : size_v;
+
+	controlval = 0x98000000; /* BGRX888 */
+	//controlval = 0x94000000; /* BGRX5650 */
+
+	pci_read_config_dword(pdev, 0x70, &bgsm);
+
+	bgsm_virt = ioremap_nocache(bgsm, 16);
+
+	gtt_base_addr = ioread32(bgsm_virt);
+
+	surface_base_addr = base_addr - (gtt_base_addr & 0xFFFFF000);
+
+	positionval = (pos_y << 16 ) | pos_x;
+        resolutionval = ((size_v -1)<< 16 )| (size_h-1);
+
+	/* Disable VGA */
+        iowrite32(0x80000000, io_base + 0x71400);
+
+        /* Disable PND deadline calc and enable HDMI lanes ready as workaround */
+        iowrite32(0x80800000, io_base + 0x70400);
+
+        /* Disable clock gating */
+	/* Disabling clock gating is not needed. Not sure why it
+	 * was put here. If this code executes, it causes display
+	 * controller not doing clock gating and preventing S0i1-Display
+	 * from working properly
+	 */
+        //iowrite32(0xffffffff, io_base + 0x70500);
+        //iowrite32(0xffffffff, io_base + 0x70504);
+
+	/* Sprite Control Register (Sprite B) */
+
+	/*    Sprite Format = BGRX */
+	iowrite32(controlval, io_base + 0x70180 + sprite);
+
+	/* Sprite Linear Offet (panning) */
+	iowrite32(0x0, io_base + 0x70184 + sprite);
+
+	/* Sprite Stride */
+	iowrite32(stride, io_base + 0x70188 + sprite);
+
+	/* Sprite Position */
+	iowrite32(positionval, io_base + 0x7018c + sprite);
+
+	/* Sprite HxW */
+	iowrite32(resolutionval, io_base + 0x70190 + sprite);
+
+	/*override pipe src*/
+	iowrite32(((pipe_src_w - 1) << 16) | (pipe_src_h - 1),
+				io_base + 0x6001c + sprite);
+
+	/* Sprite Base Address Register */
+	iowrite32(0, io_base + 0x7019c + sprite);
+
+       printk(KERN_ERR "SPBCNTR[0x%p] = 0x%x \n", io_base +  0x70180 + sprite,
+               ioread32(io_base +  0x70180 + sprite));
+       printk(KERN_ERR "SPBSTRIDE[0x%p] = 0x%x \n", io_base + 0x70184 + sprite,
+               ioread32(io_base + 0x70184 + sprite));
+       printk("SPBLINOFF [0x%p] = 0x%x \n", io_base  + 0x70188 + sprite,
+               ioread32(io_base  + 0x70188 + sprite));
+        printk("SPBPOS [0x%p] = 0x%x \n", io_base + 0x7018c + sprite,
+               ioread32(io_base +  0x7018c + sprite));
+        printk("SPBSIZE [0x%p] = 0x%x \n", io_base  + 0x70190 + sprite,
+               ioread32(io_base + 0x70190 + sprite));
+        printk("SPBSURF [0x%p] = 0x%x \n", io_base  + 0x7019c + sprite,
+               ioread32(io_base  + 0x7019c + sprite));
+
+}
+
+static void hdmi_configure(int reso)
+{
+	u32 temp_val;
+
+	/* HDMI PHY SET */
+	iowrite32(0xaa1b8700, io_base + 0x61134); /* HDMIPHYMISCCTL */
+
+	/* set port */
+	temp_val = 0xc0000818;
+	temp_val |= 0x00000200; /* MODE_HDMI */
+
+	iowrite32(temp_val, io_base + 0X61140);
+
+	/* set video dip data */
+	//	hdmi_set_avi_dip();
+
+	/* DIP control reg */
+	temp_val = ioread32(io_base + VIDEO_DIP_CTL);
+	temp_val |= 0xa0000000; /* enable DIP */
+
+	/* AVI DIP */
+	temp_val = temp_val & 0xFFC7FFFF;
+	temp_val = temp_val | 0x00000000;
+	iowrite32(temp_val, io_base + VIDEO_DIP_CTL);
+
+	/* Pb0,Length, Version, Type code */
+	iowrite32(0x000d0282, io_base + VIDEO_DIP_DATA);
+	/*    #PB4, PB3, PB2, PB1 */
+
+	/* temp_val = hdmi_get_h_v_aspect_ratio(); */
+	/* 0x100000 works for all resolutions */
+	temp_val = 0x100000;
+
+	iowrite32(temp_val, io_base + VIDEO_DIP_DATA);
+
+	/* temp_val = hdmi_get_video_index(); */
+	/* 640x480 */
+	if (reso == HD_640x480)
+		temp_val = 0;
+	/* 720x480 */
+	else if (reso == HD_720x480)
+		temp_val = 1;
+	else
+		temp_val = 1;
+
+
+	iowrite32(temp_val, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+	iowrite32(0, io_base + VIDEO_DIP_DATA);
+
+
+ 	/* enable the bit 21 */
+	temp_val = temp_val|0x00200000;
+	/* reset the frequencey */
+	temp_val = temp_val & 0xFFFCFFFF;
+	temp_val = temp_val | VIDEO_DIP_FREQ_EVERY_FRAME;
+    iowrite32(temp_val, io_base + VIDEO_DIP_CTL);
+}
+
+static void hdmi_wait_for_vblank(int pipe)
+{
+	int count = 1000;
+	u32 dwcrcerr = ioread32(io_base + 0x70024 + pipe);
+	dwcrcerr |= 0xb3060000;
+	iowrite32(dwcrcerr, io_base + 0x70024 + pipe);
+	dwcrcerr = ioread32(io_base + 0x70024 + pipe);
+	dwcrcerr &= 0x4;
+	while (count-- && dwcrcerr != 0x4) {
+		dwcrcerr = ioread32(io_base + 0x70024 + pipe);
+		dwcrcerr &= 0x4;
+		msleep(1);
+	}
+	if (dwcrcerr != 0x4) {
+		DRM_ERROR("Vblank interrupt failed to generate.Test exiting\n");
+		return;
+	}
+
+	/* Clearing Vblank interrupt */
+	dwcrcerr = ioread32(io_base + 0x70024 + pipe);
+	dwcrcerr &= 0x4;
+	iowrite32(dwcrcerr, io_base + 0x70024 + pipe);
+}
+
+void hdmi_power_on(struct drm_device *dev)
+{
+
+	struct pci_dev *pdev = dev->pdev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 ret;
+
+	DRM_INFO("=================== hdmi begin ==========================");
+
+	io_base = dev_priv->vdc_reg;
+
+	power_off_all_pipes();
+
+	power_on_all_pipes();
+
+	pci_read_config_dword(pdev, 0x4, &ret);
+
+	pci_read_config_dword(pdev, 0x70, &ret);
+
+	pci_read_config_dword(pdev, 0x10, &ret);
+
+	hdmi_dll_program();
+
+	/* PIPE_B, RESO19x10 */
+	hdmi_pipe_set_reso(0x1000, RRESO19x10);
+
+	 /* SPRITE_B:1920x1080 */
+	hdmi_sprite_enable(pdev, 0x1000,720,1280,0,0,0x2900000);
+
+	hdmi_configure(HD_1920x1200);
+
+	/* pipe_enable(0x1000, 0); PIPE_B, VIDEO_MODE */
+	iowrite32(0x80000000, io_base + 0x70008 + 0x1000);
+
+	/* wait for vblank */
+	hdmi_wait_for_vblank(0x1000);
+	DRM_INFO("=================== hdmi end ==========================");
+}
+
+void mrfl_power_on_displays(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	io_base = dev_priv->vdc_reg;
+
+	/*power on hdmi*/
+	//hdmi_power_on(dev);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mrfld_clock.c b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_clock.c
new file mode 100644
index 0000000..0cff424
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_clock.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>	
+ */
+
+#include <drm/drmP.h>
+#include "psb_fb.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "mrfld_clock.h"
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_output.h"
+#endif
+#include <asm/intel-mid.h>
+
+#define MRFLD_LIMT_DPLL_19	    0
+#define MRFLD_LIMT_DPLL_25	    1
+#define MRFLD_LIMT_DPLL_83	    2
+#define MRFLD_LIMT_DPLL_100	    3
+#define MRFLD_LIMT_DSIPLL_19	    4
+#define MRFLD_LIMT_DSIPLL_25	    5
+#define MRFLD_LIMT_DSIPLL_83	    6
+#define MRFLD_LIMT_DSIPLL_100	    7
+
+#define MRFLD_DOT_MIN		  19750
+#define MRFLD_DOT_MAX		  120000
+#define MRFLD_DPLL_M_MIN_19	    113
+#define MRFLD_DPLL_M_MAX_19	    155
+#define MRFLD_DPLL_P1_MIN_19	    2
+#define MRFLD_DPLL_P1_MAX_19	    10
+#define MRFLD_DPLL_M_MIN_25	    101
+#define MRFLD_DPLL_M_MAX_25	    130
+#define MRFLD_DPLL_P1_MIN_25	    2
+#define MRFLD_DPLL_P1_MAX_25	    10
+#define MRFLD_DPLL_M_MIN_83	    64
+#define MRFLD_DPLL_M_MAX_83	    64
+#define MRFLD_DPLL_P1_MIN_83	    2
+#define MRFLD_DPLL_P1_MAX_83	    2
+#define MRFLD_DPLL_M_MIN_100	    64
+#define MRFLD_DPLL_M_MAX_100	    64
+#define MRFLD_DPLL_P1_MIN_100	    2
+#define MRFLD_DPLL_P1_MAX_100	    2
+#define MRFLD_DSIPLL_M_MIN_19	    131
+#define MRFLD_DSIPLL_M_MAX_19	    175
+#define MRFLD_DSIPLL_P1_MIN_19	    3
+#define MRFLD_DSIPLL_P1_MAX_19	    8
+#define MRFLD_DSIPLL_M_MIN_25	    97
+#define MRFLD_DSIPLL_M_MAX_25	    140
+#define MRFLD_DSIPLL_P1_MIN_25	    3
+#define MRFLD_DSIPLL_P1_MAX_25	    9
+#define MRFLD_DSIPLL_M_MIN_83	    33
+#define MRFLD_DSIPLL_M_MAX_83	    92
+#define MRFLD_DSIPLL_P1_MIN_83	    2
+#define MRFLD_DSIPLL_P1_MAX_83	    3
+#define MRFLD_DSIPLL_M_MIN_100	    97
+#define MRFLD_DSIPLL_M_MAX_100	    140
+#define MRFLD_DSIPLL_P1_MIN_100	    3
+#define MRFLD_DSIPLL_P1_MAX_100	    9
+
+static const struct mrst_limit_t mrfld_limits[] = {
+	{			/* MRFLD_LIMT_DPLL_19 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DPLL_M_MIN_19,.max = MRFLD_DPLL_M_MAX_19},
+	 .p1 = {.min = MRFLD_DPLL_P1_MIN_19,.max = MRFLD_DPLL_P1_MAX_19},
+	 },
+	{			/* MRFLD_LIMT_DPLL_25 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DPLL_M_MIN_25,.max = MRFLD_DPLL_M_MAX_25},
+	 .p1 = {.min = MRFLD_DPLL_P1_MIN_25,.max = MRFLD_DPLL_P1_MAX_25},
+	 },
+	{			/* MRFLD_LIMT_DPLL_83 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DPLL_M_MIN_83,.max = MRFLD_DPLL_M_MAX_83},
+	 .p1 = {.min = MRFLD_DPLL_P1_MIN_83,.max = MRFLD_DPLL_P1_MAX_83},
+	 },
+	{			/* MRFLD_LIMT_DPLL_100 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DPLL_M_MIN_100,.max = MRFLD_DPLL_M_MAX_100},
+	 .p1 = {.min = MRFLD_DPLL_P1_MIN_100,.max = MRFLD_DPLL_P1_MAX_100},
+	 },
+	{			/* MRFLD_LIMT_DSIPLL_19 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DSIPLL_M_MIN_19,.max = MRFLD_DSIPLL_M_MAX_19},
+	 .p1 = {.min = MRFLD_DSIPLL_P1_MIN_19,.max = MRFLD_DSIPLL_P1_MAX_19},
+	 },
+	{			/* MRFLD_LIMT_DSIPLL_25 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DSIPLL_M_MIN_25,.max = MRFLD_DSIPLL_M_MAX_25},
+	 .p1 = {.min = MRFLD_DSIPLL_P1_MIN_25,.max = MRFLD_DSIPLL_P1_MAX_25},
+	 },
+	{			/* MRFLD_LIMT_DSIPLL_83 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DSIPLL_M_MIN_83,.max = MRFLD_DSIPLL_M_MAX_83},
+	 .p1 = {.min = MRFLD_DSIPLL_P1_MIN_83,.max = MRFLD_DSIPLL_P1_MAX_83},
+	 },
+	{			/* MRFLD_LIMT_DSIPLL_100 */
+	 .dot = {.min = MRFLD_DOT_MIN,.max = MRFLD_DOT_MAX},
+	 .m = {.min = MRFLD_DSIPLL_M_MIN_100,.max = MRFLD_DSIPLL_M_MAX_100},
+	 .p1 = {.min = MRFLD_DSIPLL_P1_MIN_100,.max = MRFLD_DSIPLL_P1_MAX_100},
+	 },
+};
+
+#define MRFLD_M_MIN	    21
+#define MRFLD_M_MAX	    180
+static const u32 mrfld_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+	224, 368, 440, 220, 366, 439, 219, 365, 182, 347,	/* 21 - 30 */
+	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,	/* 31 - 40 */
+	388, 194, 353, 432, 216, 108, 310, 155, 333, 166,	/* 41 - 50 */
+	83, 41, 276, 138, 325, 162, 337, 168, 340, 170,	/* 51 - 60 */
+	341, 426, 469, 234, 373, 442, 221, 110, 311, 411,	/* 61 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,	/* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506,	/* 91 - 100 */
+	253, 126, 63, 287, 399, 455, 483, 241, 376, 444,	/* 101 - 110 */
+	478, 495, 503, 251, 381, 446, 479, 239, 375, 443,	/* 111 - 120 */
+	477, 238, 119, 315, 157, 78, 295, 147, 329, 420,	/* 121 - 130 */
+	210, 105, 308, 154, 77, 38, 275, 137, 68, 290,	/* 131 - 140 */
+	145, 328, 164, 82, 297, 404, 458, 485, 498, 249,	/* 141 - 150 */
+	380, 190, 351, 431, 471, 235, 117, 314, 413, 206,	/* 151 - 160 */
+	103, 51, 25, 12, 262, 387, 193, 96, 48, 280,	/* 161 - 170 */
+	396, 198, 99, 305, 152, 76, 294, 403, 457, 228,	/* 171 - 180 */
+};
+static const struct mrst_limit_t *mrfld_limit(struct drm_device *dev, int pipe)
+{
+	const struct mrst_limit_t *limit = NULL;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	if ((pipe == 0) || (pipe == 2)) {
+		if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_19))
+			limit = &mrfld_limits[MRFLD_LIMT_DSIPLL_19];
+		else if (dev_priv->ksel == KSEL_BYPASS_25)
+			limit = &mrfld_limits[MRFLD_LIMT_DSIPLL_25];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+			 && (dev_priv->core_freq == 166))
+			limit = &mrfld_limits[MRFLD_LIMT_DSIPLL_83];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+			 (dev_priv->core_freq == 100
+			  || dev_priv->core_freq == 200))
+			limit = &mrfld_limits[MRFLD_LIMT_DSIPLL_100];
+	} else if (pipe == 1) {
+		if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_19))
+			limit = &mrfld_limits[MRFLD_LIMT_DPLL_19];
+		else if (dev_priv->ksel == KSEL_BYPASS_25)
+			limit = &mrfld_limits[MRFLD_LIMT_DPLL_25];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+			 && (dev_priv->core_freq == 166))
+			limit = &mrfld_limits[MRFLD_LIMT_DPLL_83];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+			 (dev_priv->core_freq == 100
+			  || dev_priv->core_freq == 200))
+			limit = &mrfld_limits[MRFLD_LIMT_DPLL_100];
+	} else {
+		limit = NULL;
+		PSB_DEBUG_ENTRY("mrfld_limit Wrong display type. \n");
+	}
+
+	return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors. */
+static void mrfld_clock(int refclk, struct mrst_clock_t *clock)
+{
+	clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE.  
+ */
+static bool
+mrfld_find_best_PLL(struct drm_device *dev, int pipe, int target, int refclk,
+		    struct mrst_clock_t *best_clock)
+{
+	struct mrst_clock_t clock;
+	const struct mrst_limit_t *limit = mrfld_limit(dev, pipe);
+	int err = target;
+
+	if (!limit) {
+		DRM_ERROR("limit is NULL\n");
+		return false;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	PSB_DEBUG_ENTRY
+	    ("target = %d, m_min = %d, m_max = %d, p_min = %d, p_max = %d. \n",
+	     target, limit->m.min, limit->m.max, limit->p1.min, limit->p1.max);
+
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+		     clock.p1++) {
+			int this_err;
+
+			mrfld_clock(refclk, &clock);
+
+			this_err = abs(clock.dot - target);
+			if (this_err < err) {
+				*best_clock = clock;
+				err = this_err;
+			}
+		}
+	}
+	PSB_DEBUG_ENTRY("mdfldFindBestPLL target = %d,"
+			"m = %d, p = %d. \n", target, best_clock->m,
+			best_clock->p1);
+	PSB_DEBUG_ENTRY("mdfldFindBestPLL err = %d.\n", err);
+
+	return err != target;
+}
+
+/**
+ * Set up the display clock 
+ *
+ */
+void mrfld_setup_pll(struct drm_device *dev, int pipe, int clk)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	int refclk = 0;
+	int clk_n = 0, clk_p2 = 0, clk_byte = 1, m_conv = 0, clk_tmp = 0;
+	struct mrst_clock_t clock;
+	bool ok;
+	u32 pll = 0, fp = 0;
+	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+	if (pipe == 0)
+		dsi_config = dev_priv->dsi_configs[0];
+	else if (pipe == 2)
+		dsi_config = dev_priv->dsi_configs[1];
+
+	if ((pipe != 1) && !dsi_config) {
+		DRM_ERROR("Invalid DSI config\n");
+		return;
+	}
+
+	if (pipe != 1) {
+		ctx = &dsi_config->dsi_hw_context;
+
+		mutex_lock(&dsi_config->context_lock);
+	}
+
+	switch (pipe) {
+	case 0:
+		is_mipi = true;
+		break;
+	case 1:
+		is_hdmi = true;
+		break;
+	case 2:
+		is_mipi2 = true;
+		break;
+	}
+#else
+	if (pipe == 1)
+		is_hdmi = true;
+	else
+		return;
+#endif
+
+	if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+	    || (dev_priv->ksel == KSEL_BYPASS_19)) {
+		refclk = 19200;
+
+		if (is_mipi || is_mipi2) {
+			clk_n = 1, clk_p2 = 8;
+		} else if (is_hdmi) {
+			clk_n = 1, clk_p2 = 10;
+		}
+	} else if (dev_priv->ksel == KSEL_BYPASS_25) {
+		refclk = 25000;
+
+		if (is_mipi || is_mipi2) {
+			clk_n = 1, clk_p2 = 8;
+		} else if (is_hdmi) {
+			clk_n = 1, clk_p2 = 10;
+		}
+	} else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+		   && (dev_priv->core_freq == 166)) {
+		refclk = 83000;
+
+		if (is_mipi || is_mipi2) {
+			clk_n = 4, clk_p2 = 8;
+		} else if (is_hdmi) {
+			clk_n = 4, clk_p2 = 10;
+		}
+	} else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+		   (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) {
+		refclk = 100000;
+		if (is_mipi || is_mipi2) {
+			clk_n = 4, clk_p2 = 8;
+		} else if (is_hdmi) {
+			clk_n = 4, clk_p2 = 10;
+		}
+	}
+
+	if (is_mipi || is_mipi2)
+		clk_byte = 3;
+
+	clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+	PSB_DEBUG_ENTRY("clk = %d, clk_n = %d, clk_p2 = %d. \n", clk, clk_n,
+			clk_p2);
+	PSB_DEBUG_ENTRY("clk = %d, clk_tmp = %d, clk_byte = %d. \n", clk,
+			clk_tmp, clk_byte);
+
+	ok = mrfld_find_best_PLL(dev, pipe, clk_tmp, refclk, &clock);
+	dev_priv->tmds_clock_khz = clock.dot / (clk_n * clk_p2 * clk_byte);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	/*
+	 * FIXME: Hard code the divisors' value for JDI panel, and need to
+	 * calculate them according to the DSI PLL HAS spec.
+	 */
+	if (pipe != 1) {
+		switch(get_panel_type(dev, pipe)) {
+		case SDC_16x25_CMD:
+				clock.p1 = 3;
+				clock.m = 126;
+				break;
+		case SHARP_10x19_CMD:
+				clock.p1 = 3;
+				clock.m = 137;
+				break;
+		case SHARP_10x19_DUAL_CMD:
+				clock.p1 = 3;
+				clock.m = 125;
+				break;
+		case CMI_7x12_CMD:
+				clock.p1 = 4;
+				clock.m = 120;
+				break;
+		case SDC_25x16_CMD:
+		case JDI_25x16_CMD:
+		case SHARP_25x16_CMD:
+				clock.p1 = 3;
+				clock.m = 138;
+				break;
+		case SHARP_25x16_VID:
+		case JDI_25x16_VID:
+				clock.p1 = 3;
+				clock.m = 162;
+				break;
+		case JDI_7x12_VID:
+				clock.p1 = 5;
+				clk_n = 1;
+				clock.m = 144;
+				break;
+		default:
+			/* for JDI_7x12_CMD */
+				clock.p1 = 4;
+				clock.m = 142;
+				break;
+		}
+		clk_n = 1;
+	}
+#endif
+
+	if (!ok) {
+		DRM_ERROR("mdfldFindBestPLL fail in mrfld_crtc_mode_set.\n");
+	} else {
+		m_conv = mrfld_m_converts[(clock.m - MRFLD_M_MIN)];
+
+		PSB_DEBUG_ENTRY("dot clock = %d,"
+				"m = %d, p1 = %d, m_conv = %d. \n", clock.dot,
+				clock.m, clock.p1, m_conv);
+	}
+
+	/* Write the N1 & M1 parameters into DSI_PLL_DIV_REG */
+	fp = (clk_n / 2) << 16;
+	fp |= m_conv;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (is_mipi) {
+		/* Enable DSI PLL clocks for DSI0 rather than CCK. */
+		pll |= _CLK_EN_PLL_DSI0;
+		pll &= ~_CLK_EN_CCK_DSI0;
+		/* Select DSI PLL as the source of the mux input clocks. */
+		pll &= ~_DSI_MUX_SEL_CCK_DSI0;
+	}
+
+	if (is_mipi2 || is_dual_dsi(dev)) {
+		/* Enable DSI PLL clocks for DSI1 rather than CCK. */
+		pll |= _CLK_EN_PLL_DSI1;
+		pll &= ~_CLK_EN_CCK_DSI1;
+		/* Select DSI PLL as the source of the mux input clocks. */
+		pll &= ~_DSI_MUX_SEL_CCK_DSI1;
+	}
+#endif
+
+	if (is_hdmi)
+		pll |= MDFLD_VCO_SEL;
+
+	/* compute bitmask from p1 value */
+	pll |= (1 << (clock.p1 - 2)) << 17;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe != 1) {
+		ctx->dpll = pll;
+		ctx->fp = fp;
+		mutex_unlock(&dsi_config->context_lock);
+	}
+#endif
+}
+
+/**
+ * Set up the HDMI display clock 
+ *
+ */
+void mrfld_setup_dpll(struct drm_device *dev, int clk)
+{
+	int clk_n = 0, clk_m1 = 0, clk_m2 = 0, clk_p1 = 0, clk_p2 = 0;
+	int dpll_div = 0;
+	int pllctl = 0, tldrv = 0, pllin = 0, pllmisc = 0;
+	u32 dpll_tmp = 0;
+	u32 pll = 0;
+
+	pll = MRFLD_CRI_ICK_PLL | MRFLD_INPUT_REF_SSC;
+	REG_WRITE(MDFLD_DPLL_B, pll);
+	udelay(500);		/* revisit it for exact delay. */
+
+	pll |= MRFLD_EXT_CLK_BUF_EN | MRFLD_REF_CLK_EN | MRFLD_CMNRST;
+	REG_WRITE(MDFLD_DPLL_B, pll);
+
+	/* Main PLL Configuration. */
+	clk_n = 1;
+	clk_m1 = 2;
+	clk_m2 = 145;
+	clk_p1 = 3;
+	clk_p2 = 5;
+	dpll_div = clk_m2 | (clk_m1 << 8) | (clk_n << 12) | (clk_p2 << 16) |
+	    (clk_p1 << 21);
+	intel_mid_msgbus_write32(HDMIPHY_PORT, DPLL_DIV_REG, dpll_div);
+
+	/* Set up LCPLL in Digital Mode. */
+	pllctl = 0;		/* idthsen reset to 0 for display operation. */
+	tldrv = 0xCC;
+	pllin = 0x73;		/* pllrefsel selects alt core ref clock(19.2MHz). */
+	pllmisc = 0x0D;		/* Digital mode for LCPLL, pllrefselorden set. */
+	dpll_tmp = pllctl | (tldrv << 8) | (pllin << 16) | (pllmisc << 24);
+	intel_mid_msgbus_write32(HDMIPHY_PORT, PLL_CTL_IN_MISC_TLDRV_REG,
+		dpll_tmp);
+
+	/* Program Co-efficients for LCPLL in Digital Mode. */
+	dpll_tmp = 0x001f0077;
+	intel_mid_msgbus_write32(HDMIPHY_PORT, LPF_COEFF_REG, dpll_tmp);
+
+	/* Enable DPLL VCO. */
+	pll |= DPLL_VCO_ENABLE;
+	REG_WRITE(MDFLD_DPLL_B, pll);
+
+	/* Enable DCLP to core. */
+	dpll_tmp = 0x00030101;	/* FIXME need to read_mask_write. */
+	intel_mid_msgbus_write32(HDMIPHY_PORT, PLL_AFC_MISC_REG, dpll_tmp);
+
+	/* Disable global rcomp. */
+	dpll_tmp = 0x07010101;	/* FIXME need to read_mask_write. */
+	intel_mid_msgbus_write32(HDMIPHY_PORT, GLOBAL_RCOMP_REG, dpll_tmp);
+
+	/* Stagger Programming */
+	dpll_tmp = 0x00401f00;
+	intel_mid_msgbus_write32(HDMIPHY_PORT, DPLL_STAGER_CTL_REG1, dpll_tmp);
+
+	dpll_tmp = 0x00541f00;
+	intel_mid_msgbus_write32(HDMIPHY_PORT, DPLL_STAGER_CTL_REG2, dpll_tmp);
+}
+
+void enable_HFPLL(struct drm_device *dev)
+{
+	uint32_t pll_select = 0, ctrl_reg5 = 0;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	/* Enable HFPLL for command mode panel */
+	if (dev_priv->bUseHFPLL) {
+			pll_select = intel_mid_msgbus_read32(CCK_PORT,
+						DSI_PLL_CTRL_REG);
+			ctrl_reg5 = intel_mid_msgbus_read32(CCK_PORT,
+						FUSE_OVERRIDE_FREQ_CNTRL_REG5);
+
+			pll_select &= ~(_DSI_MUX_SEL_CCK_DSI1 |
+					_DSI_MUX_SEL_CCK_DSI0);
+
+			intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_CTRL_REG,
+					pll_select | _DSI_CCK_PLL_SELECT);
+			ctrl_reg5 |= (1 << 7) | 0xF;
+
+#ifdef CONFIG_SUPPORT_MIPI
+			if (get_panel_type(dev, 0) == SHARP_10x19_CMD)
+				ctrl_reg5 = 0x1f87;
+#endif
+			intel_mid_msgbus_write32(CCK_PORT,
+					FUSE_OVERRIDE_FREQ_CNTRL_REG5,
+					ctrl_reg5);
+	}
+}
+
+#ifdef CONFIG_SUPPORT_MIPI
+bool enable_DSIPLL(struct drm_device *dev)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	u32 guit_val = 0x0;
+	u32 retry;
+
+	if (!dev_priv)
+		goto err_out;
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		goto err_out;
+	ctx = &dsi_config->dsi_hw_context;
+
+	if (IS_ANN(dev)) {
+		int dspfreq;
+
+		if ((get_panel_type(dev, 0) == JDI_7x12_CMD) ||
+			(get_panel_type(dev, 0) == JDI_7x12_VID))
+			dspfreq = DISPLAY_FREQ_FOR_200;
+		else
+			dspfreq = DISPLAY_FREQ_FOR_333;
+
+		intel_mid_msgbus_write32(CCK_PORT,
+			FUSE_OVERRIDE_FREQ_CNTRL_REG5,
+			CKESC_GATE_EN | CKDP1X_GATE_EN | DISPLAY_FRE_EN
+			| dspfreq);
+	}
+
+	/* Prepare DSI  PLL register before enabling */
+	intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_DIV_REG, 0);
+	guit_val = intel_mid_msgbus_read32(CCK_PORT, DSI_PLL_CTRL_REG);
+	guit_val &= ~(DPLL_VCO_ENABLE | _DSI_LDO_EN
+			|_CLK_EN_MASK | _DSI_MUX_SEL_CCK_DSI0 | _DSI_MUX_SEL_CCK_DSI1);
+	intel_mid_msgbus_write32(CCK_PORT,
+					DSI_PLL_CTRL_REG, guit_val);
+	udelay(1);
+	/* Program PLL */
+
+	/*first set up the dpll and fp variables
+	 * dpll - will contain the following information
+	 *      - the clock source - DSI vs HFH vs LFH PLL
+	 * 	- what clocks should be running DSI0, DSI1
+	 *      - and the divisor.
+	 *
+	 */
+
+	intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_DIV_REG, ctx->fp);
+	guit_val &= ~_P1_POST_DIV_MASK;	/*clear the divisor bit*/
+	/* the ctx->dpll contains the divisor that we need to use as well as which clocks
+	 * need to start up */
+	guit_val |= ctx->dpll;
+	guit_val &= ~_DSI_LDO_EN;	/* We want to clear the LDO enable when programming*/
+	guit_val |=  DPLL_VCO_ENABLE;	/* Enable the DSI PLL */
+
+	/* For the CD clock (clock used by Display controller), we need to set
+	 * the DSI_CCK_PLL_SELECT bit (bit 11). This should already be set. But
+	 * setting it just in case
+	 */
+	if (dev_priv->bUseHFPLL)
+		guit_val |= _DSI_CCK_PLL_SELECT;
+
+	intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_CTRL_REG, guit_val);
+
+	/* Wait for DSI PLL lock */
+	retry = 10000;
+	guit_val = intel_mid_msgbus_read32(CCK_PORT, DSI_PLL_CTRL_REG);
+	while (((guit_val & _DSI_PLL_LOCK) != _DSI_PLL_LOCK) && (--retry)) {
+		udelay(3);
+		guit_val = intel_mid_msgbus_read32(CCK_PORT, DSI_PLL_CTRL_REG);
+		if (!retry%1000)
+			DRM_ERROR("DSI PLL taking too long to lock"
+				"- retry count=%d\n", 10000-retry);
+	}
+	if (retry == 0) {
+		DRM_ERROR("DSI PLL fails to lock\n");
+		return false;
+	}
+
+	return true;
+err_out:
+	return false;
+
+}
+
+bool disable_DSIPLL(struct drm_device * dev)
+{
+	u32 val, guit_val;
+
+	/* Disable PLL*/
+	intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_DIV_REG, 0);
+
+	val = intel_mid_msgbus_read32(CCK_PORT, DSI_PLL_CTRL_REG);
+	val &= ~_CLK_EN_MASK;
+	intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_CTRL_REG, val);
+	udelay(1);
+	val &= ~DPLL_VCO_ENABLE;
+	val |= _DSI_LDO_EN;
+	intel_mid_msgbus_write32(CCK_PORT, DSI_PLL_CTRL_REG, val);
+	udelay(1);
+
+	guit_val = intel_mid_msgbus_read32(CCK_PORT, DSI_PLL_CTRL_REG);
+	if ((guit_val & _DSI_PLL_LOCK) == _DSI_PLL_LOCK ) {
+		DRM_ERROR("DSI PLL fails to Unlock\n");
+		return false;
+	}
+	return true;
+}
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mrfld_clock.h b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_clock.h
new file mode 100644
index 0000000..de2c049
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_clock.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#ifndef _MRFLD_CLOCK_H_
+#define _MRFLD_CLOCK_H_
+
+#define HDMIPHY_PORT			0x13
+#define CCK_PORT			0x14
+#define DSI_PLL_CTRL_REG		0x48
+#define _DSI_LDO_EN			(1 << 30)
+#define _P1_POST_DIV_MASK		(0x1ff << 17)
+#define _DSI_CCK_PLL_SELECT		(1 << 11)
+#define _DSI_MUX_SEL_CCK_DSI0		(1 << 10)
+#define _DSI_MUX_SEL_CCK_DSI1		(1 << 9)
+#define _CLK_EN_PLL_DSI0		(1 << 8)
+#define _CLK_EN_PLL_DSI1		(1 << 7)
+#define _CLK_EN_CCK_DSI0		(1 << 6)
+#define _CLK_EN_CCK_DSI1		(1 << 5)
+#define _CLK_EN_MASK			(0xf << 5)
+#define _DSI_PLL_LOCK			(1 << 0)
+#define DSI_PLL_DIV_REG			0x4C
+#define FUSE_OVERRIDE_FREQ_CNTRL_REG3	0x54
+#define FUSE_OVERRIDE_FREQ_CNTRL_REG5	0x68
+#define CKDP_DIV2_ENABLE	(1 << 12)
+#define CKDP2X_ENABLE		(1 << 11)
+#define CKESC_GATE_EN		(1 << 10)
+#define CKDP1X_GATE_EN		(1 << 9)
+#define CKDP2X_GATE_EN		(1 << 8)
+#define DISPLAY_FRE_EN		(1 << 7)
+#define DISPLAY_FREQ_FOR_200	4
+#define DISPLAY_FREQ_FOR_333	2
+#define DPLL_STAGER_CTL_REG1		0x0230
+#define DPLL_STAGER_CTL_REG2		0x0430
+#define DPLL_DIV_REG			0x800C
+#define PLL_CTL_IN_MISC_TLDRV_REG	0x8014
+#define PLL_AFC_MISC_REG		0x801C
+#define LPF_COEFF_REG			0x8048
+#define GLOBAL_RCOMP_REG		0x80E0
+
+struct psb_intel_range_t {
+	int min, max;
+};
+
+struct mrst_limit_t {
+	struct psb_intel_range_t dot, m, p1;
+};
+
+struct mrst_clock_t {
+	/* derived values */
+	int dot;
+	int m;
+	int p1;
+};
+
+void enable_HFPLL(struct drm_device *dev);
+bool enable_DSIPLL(struct drm_device *dev);
+bool disable_DSIPLL(struct drm_device *dev);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mrfld_display.c b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_display.c
new file mode 100644
index 0000000..c578f99
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_display.c
@@ -0,0 +1,3536 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#include "android_hdmi.h"
+#include "displayclass_interface.h"
+#include "pwr_mgmt.h"
+
+#define KEEP_UNUSED_CODE_S3D 0
+
+/* Functions will be deleted after simulated on MDFLD_PLATFORM */
+
+/* MRFLD_PLATFORM start */
+
+#if KEEP_UNUSED_CODE_S3D
+/**
+ * Set up the HDMI Vendor Specific InfoFrame Packet and send it to HDMI display
+ *
+ */
+static int mrfld_set_up_s3d_InfoFrame(struct drm_device *dev, enum
+				      s3d_structure s3d_format)
+{
+	u8 vsif[12] = { 0x81, 0x01, 0x06, 0x00, 0x03, 0x0c,
+		0x00, 0x40, 0x00, 0x00, 0x00, 0x00
+	};
+	u8 checksum = 0;
+	u32 *p_vsif = (u32 *) vsif;
+	u32 viddipctl_val = 0;
+	u32 buf_size = 0;
+	int i = 0;
+
+	PSB_DEBUG_ENTRY("s3d_format = %d\n", s3d_format);
+
+	/* Fill the 3d format in the HDMI Vendor Specific InfoFrame. */
+	vsif[8] = s3d_format << 4;
+
+	/* Get the buffer size in bytes */
+	buf_size = vsif[2] + 3;
+
+	/* Get the checksum byte. */
+	for (i = 0; i < buf_size; i++)
+		checksum += vsif[i];
+
+	checksum = 0xff - checksum + 1;
+	vsif[3] = checksum;
+
+	/* Wait for 2 VSyncs. */
+	mdelay(20);		/* msleep(20); */
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/* Wait for 3 HSync. */
+
+	/* Disable the VS DIP type */
+	viddipctl_val = REG_READ(VIDEO_DIP_CTL);
+	viddipctl_val &= ~DIP_TYPE_VS;
+	REG_WRITE(VIDEO_DIP_CTL, viddipctl_val);
+
+	/* set the DIP buffer index to vendor specific. */
+	viddipctl_val &= ~(DIP_BUFF_INDX_MASK |
+			   DIP_RAM_ADDR_MASK | DIP_TX_FREQ_MASK);
+	viddipctl_val |= DIP_BUFF_INDX_VS | EN_DIP;
+	REG_WRITE(VIDEO_DIP_CTL, viddipctl_val);
+
+	/* Get the buffer size in DWORD. */
+	buf_size = (buf_size + 3) / 4;
+
+	/* Write HDMI Vendor Specific InfoFrame. */
+	for (i = 0; i < buf_size; i++)
+		REG_WRITE(VIDEO_DIP_DATA, *(p_vsif++));
+
+	/* Enable the DIP type and transmission frequency. */
+	viddipctl_val |= DIP_TYPE_VS | DIP_TX_FREQ_2VSNC;
+	REG_WRITE(VIDEO_DIP_CTL, viddipctl_val);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_S3D */
+
+#if KEEP_UNUSED_CODE_S3D
+/**
+ * Disable sending the HDMI Vendor Specific InfoFrame Packet.
+ *
+ */
+static int mrfld_disable_s3d_InfoFrame(struct drm_device *dev)
+{
+	u32 viddipctl_val = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* Wait for 2 VSyncs. */
+	mdelay(20);		/* msleep(20); */
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/* Wait for 3 HSync. */
+
+	/* Disable the VS DIP type */
+	viddipctl_val = REG_READ(VIDEO_DIP_CTL);
+	viddipctl_val &= ~DIP_TYPE_VS;
+	REG_WRITE(VIDEO_DIP_CTL, viddipctl_val);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_S3D */
+
+/**
+ * Disable the pipe, plane and pll.
+ *
+ * Note: FIXME need to modify PLL
+ */
+void mrfld_disable_crtc(struct drm_device *dev, int pipe, bool plane_d)
+{
+	int dpll_reg = MRST_DPLL_A;
+	int dspcntr_reg = DSPACNTR;
+	int dspcntr_reg_d = DSPDCNTR;
+	int dspsurf_reg = DSPASURF;
+	int dspsurf_reg_d = DSPDSURF;
+	int pipeconf_reg = PIPEACONF;
+	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+	u32 temp;
+
+	PSB_DEBUG_ENTRY("pipe = %d\n", pipe);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe != 1 && ((get_panel_type(dev, pipe) == JDI_7x12_VID) ||
+			(get_panel_type(dev, pipe) == CMI_7x12_VID)))
+#else
+	if (pipe != 1)
+#endif
+		return;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		dpll_reg = MDFLD_DPLL_B;
+		dspcntr_reg = DSPBCNTR;
+		dspsurf_reg = DSPBSURF;
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		dpll_reg = MRST_DPLL_A;
+		dspcntr_reg = DSPCCNTR;
+		dspsurf_reg = DSPCSURF;
+		pipeconf_reg = PIPECCONF;
+		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return;
+	}
+
+	if (pipe != 1)
+		mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+					 HS_CTRL_FIFO_EMPTY |
+					 HS_DATA_FIFO_EMPTY);
+
+	/* Disable display plane */
+	temp = REG_READ(dspcntr_reg);
+	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+		REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+		/* Flush the plane changes */
+		REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+		REG_READ(dspsurf_reg);
+	}
+
+	/* Disable display plane D. */
+	if (plane_d) {
+		temp = REG_READ(dspcntr_reg_d);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(dspcntr_reg_d, temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(dspsurf_reg_d, REG_READ(dspsurf_reg_d));
+			REG_READ(dspsurf_reg_d);
+		}
+	}
+
+	/* Next, disable display pipes */
+	temp = REG_READ(pipeconf_reg);
+	if ((temp & PIPEACONF_ENABLE) != 0) {
+		temp &= ~PIPEACONF_ENABLE;
+		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+		REG_WRITE(pipeconf_reg, temp);
+		REG_READ(pipeconf_reg);
+
+		/* Wait for for the pipe disable to take effect. */
+		mdfldWaitForPipeDisable(dev, pipe);
+	}
+
+	/* Disable PLLs. */
+}
+
+void mofd_update_fifo_size(struct drm_device *dev, bool hdmi_on)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	struct mdfld_dsi_hw_context *ctx = NULL;
+
+	DRM_INFO("setting fifo size, hdmi_suspend: %d\n", hdmi_on);
+	if (!hdmi_on) {
+		/* no hdmi, 12KB for plane A D E F */
+		REG_WRITE(DSPARB2, 0x90180);
+		REG_WRITE(DSPARB, 0xc0300c0);
+	} else {
+		/* with hdmi, 10KB for plane A D E F; 8KB for plane B */
+		REG_WRITE(DSPARB2, 0x981c0);
+		REG_WRITE(DSPARB, 0x120480a0);
+	}
+
+	if (dsi_config) {
+		ctx = &dsi_config->dsi_hw_context;
+		ctx->dsparb = REG_READ(DSPARB);
+		ctx->dsparb2 = REG_READ(DSPARB2);
+	}
+#else
+	if (hdmi_on) {
+		REG_WRITE(DDL1, 0x86868686);
+		REG_WRITE(DDL2, 0x86868686);
+		REG_WRITE(DDL3, 0x86);
+		REG_WRITE(DDL4, 0x8686);
+
+		/* FIXME: tune for HDMI only device */
+		/* with hdmi, 16KB for plane A B D */
+		REG_WRITE(DSPARB2, 0xc0300);
+		REG_WRITE(DSPARB, 0x20080100);
+		DRM_INFO("setting fifo size, arb2:0x%x, arb: 0x%x\n",
+			REG_READ(DSPARB2), REG_READ(DSPARB));
+	}
+#endif
+}
+
+/**
+ * Sets the power management mode of crtc including the pll pipe and plane.
+ *
+ */
+static void mrfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	int dpll_reg = MRST_DPLL_A;
+	int dspcntr_reg = DSPACNTR;
+	int dspbase_reg = MRST_DSPABASE;
+	int pipeconf_reg = PIPEACONF;
+	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+	u32 pipeconf = dev_priv->pipeconf;
+	u32 dspcntr = dev_priv->dspcntr;
+#ifdef CONFIG_SUPPORT_MIPI
+	u32 pipestat_reg = PIPEASTAT;
+	u32 mipi_enable_reg = MIPIA_DEVICE_READY_REG;
+#endif
+	u32 temp;
+	bool enabled;
+	u32 power_island = 0;
+	unsigned long irqflags;
+	struct android_hdmi_priv *hdmi_priv = dev_priv->hdmi_priv;
+
+	PSB_DEBUG_ENTRY("mode = %d, pipe = %d\n", mode, pipe);
+
+#ifdef CONFIG_SUPPORT_MIPI
+#ifndef CONFIG_SUPPORT_TOSHIBA_MIPI_DISPLAY
+	/**
+	 * MIPI dpms
+	 * NOTE: this path only works for TMD panel now. update it to
+	 * support all MIPI panels later.
+	 */
+	if (pipe != 1 && (IS_MOFD(dev) ||
+				(get_panel_type(dev, pipe) == TMD_VID) ||
+				(get_panel_type(dev, pipe) == TMD_6X10_VID) ||
+				(get_panel_type(dev, pipe) == CMI_7x12_VID) ||
+				(get_panel_type(dev, pipe) == CMI_7x12_CMD) ||
+				(get_panel_type(dev, pipe) == SHARP_10x19_CMD) ||
+				(get_panel_type(dev, pipe) == SHARP_10x19_DUAL_CMD) ||
+				(get_panel_type(dev, pipe) == SHARP_25x16_CMD) ||
+				(get_panel_type(dev, pipe) == SDC_16x25_CMD) ||
+				(get_panel_type(dev, pipe) == SDC_25x16_CMD) ||
+				(get_panel_type(dev, pipe) == JDI_7x12_CMD) ||
+				(get_panel_type(dev, pipe) == JDI_7x12_VID) ||
+				(get_panel_type(dev, pipe) == SHARP_25x16_VID) ||
+				(get_panel_type(dev, pipe) == JDI_25x16_CMD) ||
+				(get_panel_type(dev, pipe) == JDI_25x16_VID))) {
+		return;
+	}
+#endif
+#else
+	if (pipe != 1)
+		return;
+#endif
+
+	power_island = pipe_to_island(pipe);
+
+	if (!power_island_get(power_island))
+		return;
+
+	switch (pipe) {
+#ifdef CONFIG_SUPPORT_MIPI
+	case 0:
+		break;
+#endif
+	case 1:
+		dpll_reg = DPLL_B;
+		dspcntr_reg = DSPBCNTR;
+		dspbase_reg = MRST_DSPBBASE;
+		pipeconf_reg = PIPEBCONF;
+		pipeconf = dev_priv->pipeconf1;
+		dspcntr = dev_priv->dspcntr1;
+		if (IS_MDFLD(dev))
+			dpll_reg = MDFLD_DPLL_B;
+		break;
+#ifdef CONFIG_SUPPORT_MIPI
+	case 2:
+		dpll_reg = MRST_DPLL_A;
+		dspcntr_reg = DSPCCNTR;
+		dspbase_reg = MDFLD_DSPCBASE;
+		pipeconf_reg = PIPECCONF;
+		pipestat_reg = PIPECSTAT;
+		pipeconf = dev_priv->pipeconf2;
+		dspcntr = dev_priv->dspcntr2;
+		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+		mipi_enable_reg = MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET;
+		break;
+#endif
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		goto crtc_dpms_err;
+	}
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		DCLockMutex();
+
+		/* Enable the DPLL */
+		temp = REG_READ(dpll_reg);
+
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			/* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+			if (temp & MDFLD_PWR_GATE_EN) {
+				temp &= ~MDFLD_PWR_GATE_EN;
+				REG_WRITE(dpll_reg, temp);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(500);
+			}
+
+			REG_WRITE(dpll_reg, temp);
+			REG_READ(dpll_reg);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+
+			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+			REG_READ(dpll_reg);
+
+#if 0				/* FIXME MRFLD */
+			/**
+			 * wait for DSI PLL to lock
+			 * NOTE: only need to poll status of pipe 0 and pipe 1,
+			 * since both MIPI pipes share the same PLL.
+			 */
+			while ((pipe != 2) && (timeout < 20000)
+			       && !(REG_READ(pipeconf_reg) &
+				    PIPECONF_DSIPLL_LOCK)) {
+				udelay(150);
+				timeout++;
+			}
+#endif				/* FIXME MRFLD */
+		}
+
+		/* Enable the plane */
+		temp = REG_READ(dspcntr_reg);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+		}
+
+		/* Enable the pipe */
+		temp = REG_READ(pipeconf_reg);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(pipeconf_reg, pipeconf);
+
+			/* Wait for for the pipe enable to take effect. */
+			mdfldWaitForPipeEnable(dev, pipe);
+		}
+
+#ifdef CONFIG_SUPPORT_MIPI
+		/*workaround for sighting 3741701 Random X blank display */
+		/*perform w/a in video mode only on pipe A or C */
+		if ((pipe == 0 || pipe == 2) &&
+		    (is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DPI)) {
+			REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+			msleep(100);
+			if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) {
+				PSB_DEBUG_ENTRY("OK");
+			} else {
+				PSB_DEBUG_ENTRY("STUCK!!!!");
+				/*shutdown controller */
+				temp = REG_READ(dspcntr_reg);
+				REG_WRITE(dspcntr_reg,
+					  temp & ~DISPLAY_PLANE_ENABLE);
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				/*mdfld_dsi_dpi_shut_down(dev, pipe); */
+				REG_WRITE(0xb048, 1);
+				msleep(100);
+				temp = REG_READ(pipeconf_reg);
+				temp &= ~PIPEACONF_ENABLE;
+				REG_WRITE(pipeconf_reg, temp);
+				msleep(100);	/*wait for pipe disable */
+				/*printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
+				   printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074)); */
+				REG_WRITE(mipi_enable_reg, 0);
+				msleep(100);
+				PSB_DEBUG_ENTRY("70008 is %x\n",
+						REG_READ(0x70008));
+				PSB_DEBUG_ENTRY("b074 is %x\n",
+						REG_READ(0xb074));
+				REG_WRITE(0xb004, REG_READ(0xb004));
+				/* try to bring the controller back up again */
+				REG_WRITE(mipi_enable_reg, 1);
+				temp = REG_READ(dspcntr_reg);
+				REG_WRITE(dspcntr_reg,
+					  temp | DISPLAY_PLANE_ENABLE);
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				/*mdfld_dsi_dpi_turn_on(dev, pipe); */
+				REG_WRITE(0xb048, 2);
+				msleep(100);
+				temp = REG_READ(pipeconf_reg);
+				temp |= PIPEACONF_ENABLE;
+				REG_WRITE(pipeconf_reg, temp);
+			}
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+#endif
+
+		if ((pipe == 1) && hdmi_priv)
+			hdmi_priv->hdmi_suspended = false;
+
+		psb_enable_vblank(dev, pipe);
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		dev->vblank_enabled[pipe] = 1;
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+		DCAttachPipe(pipe);
+		DC_MRFLD_onPowerOn(pipe);
+		DCUnLockMutex();
+
+		/* Give the overlay scaler a chance to enable
+		   if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+		break;
+	case DRM_MODE_DPMS_OFF:
+		DCLockMutex();
+
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+		if (pipe != 1)
+			mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+						 HS_CTRL_FIFO_EMPTY |
+						 HS_DATA_FIFO_EMPTY);
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		if (!(pipe == 1 && dev_priv->hdmi_first_boot)) {
+			/* Disable display plane */
+			temp = REG_READ(dspcntr_reg);
+			if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+				REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+				/* Flush the plane changes */
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				REG_READ(dspbase_reg);
+			}
+
+			/* Next, disable display pipes */
+			temp = REG_READ(pipeconf_reg);
+			if ((temp & PIPEACONF_ENABLE) != 0) {
+				temp &= ~PIPEACONF_ENABLE;
+				temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+				REG_WRITE(pipeconf_reg, temp);
+				REG_READ(pipeconf_reg);
+
+				/* Wait for for the pipe disable to take effect. */
+				mdfldWaitForPipeDisable(dev, pipe);
+			}
+
+			temp = REG_READ(dpll_reg);
+			if (temp & DPLL_VCO_ENABLE) {
+				if (((pipe != 1)
+				&& !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) &
+								PIPEACONF_ENABLE))
+						|| (pipe == 1)) {
+					temp &= ~(DPLL_VCO_ENABLE);
+					REG_WRITE(dpll_reg, temp);
+					REG_READ(dpll_reg);
+					/* Wait for the clocks to turn off. */
+					/* FIXME_MDFLD PO may need more delay */
+					udelay(500);
+				}
+			}
+
+			drm_handle_vblank(dev, pipe);
+
+			/* Turn off vsync interrupt. */
+			drm_vblank_off(dev, pipe);
+		}
+
+		if ((pipe == 1) && hdmi_priv)
+			hdmi_priv->hdmi_suspended = true;
+
+		if (IS_ANN(dev))
+			mofd_update_fifo_size(dev, false);
+
+		/* Make the pending flip request as completed. */
+		DCUnAttachPipe(pipe);
+		DC_MRFLD_onPowerOff(pipe);
+		DCUnLockMutex();
+		break;
+	}
+
+	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+crtc_dpms_err:
+	power_island_put(power_island);
+}
+
+static int mrfld_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+#endif
+	int pipe = psb_intel_crtc->pipe;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	switch (pipe) {
+	case 0:
+		dsi_config = dev_priv->dsi_configs[0];
+		break;
+	case 1:
+		break;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	if (pipe != 1) {
+		int clk;
+
+		if (dsi_config->lane_count)
+			clk = adjusted_mode->clock / dsi_config->lane_count;
+		else
+			clk = adjusted_mode->clock;
+
+		mrfld_setup_pll(dev, pipe, clk);
+
+		return mdfld_crtc_dsi_mode_set(crtc, dsi_config, mode,
+				adjusted_mode, x, y, old_fb);
+	} else {
+		if (IS_ANN(dev))
+			mofd_update_fifo_size(dev, true);
+		android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
+				x, y, old_fb);
+
+		return 0;
+	}
+#else
+	if (pipe == 1) {
+		struct psb_fbdev *fbdev = NULL;
+		struct psb_framebuffer *psbfb = NULL;
+		struct fb_info *info = NULL;
+
+		if (dev_priv)
+			fbdev = dev_priv->fbdev;
+
+		if (fbdev)
+			psbfb = fbdev->pfb;
+
+		if (psbfb)
+			info = psbfb->fbdev;
+
+		if (IS_ANN(dev))
+			mofd_update_fifo_size(dev, true);
+
+		android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
+				x, y, old_fb);
+
+		// clear init fb once driver set its own mode
+		if (info && info->screen_base &&
+			!dev_priv->hdmi_first_boot && dev_priv->um_start)
+			memset(info->screen_base, 0, info->screen_size);
+	}
+	return 0;
+#endif
+}
+
+
+#if KEEP_UNUSED_CODE_S3D
+
+/**
+ * Perform display 3D mode set to half line interleaving 3D display with two buffers.
+ *
+ * FIXME modify the following function with option to disable PLL or not.
+ *
+ * Function return value: 0 if error, 1 if success.
+ */
+int mrfld_s3d_flip_surf_addr(struct drm_device *dev, int pipe, struct
+			     mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 dspsurf_reg = DSPASURF;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		dspsurf_reg = DSPBSURF;
+		break;
+	case 2:
+		dspsurf_reg = DSPCSURF;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	if (ps3d_flip->s3d_state & S3D_STATE_ENALBLED) {
+		REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+		REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+	} else {
+		REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	}
+
+	return 1;
+}
+
+/*
+extern void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
+				      int pipe);
+*/
+
+/**
+ * Perform display 3D mode set to half line interleaving 3D display with two buffers.
+ *
+ * FIXME modify the following function with option to disable PLL or not.
+ */
+int mrfld_s3d_to_line_interleave_half(struct drm_device *dev, int pipe, struct
+				      mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		DRM_ERROR("HDMI doesn't support line interleave half. \n");
+		return 0;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up line interleaving display 3D format. */
+	dspsize_val = (((dspsize_val & 0xFFFF0000) + 0x00010000) / 2 -
+		       0x00010000) | (dspsize_val & 0x0000FFFF);
+	REG_WRITE(dspsize_reg, dspsize_val);
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_LINE;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	return 0;
+}
+
+/**
+ * Perform 2d mode set back from half line interleaving 3D display.
+ *
+ */
+int mrfld_s3d_from_line_interleave_half(struct drm_device *dev, int pipe, struct
+					mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		DRM_ERROR("HDMI doesn't support line interleave half. \n");
+		return 0;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		DRM_ERROR("HDMI doesn't support line interleave half. \n");
+		return 0;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* restore to 2D display size. */
+	REG_WRITE(dspsize_reg, (((dspsize_val & 0xFFFF0000) + 0x00010000) * 2 -
+				0x00010000) | (dspsize_val & 0x0000FFFF));
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to line interleaving 3D display with two buffers.
+ *
+ * FIXME: Assume the 3D buffer is the same as display resolution. Will re-visit
+ * it for panel fitting mode.
+ * Set up the pll, two times 2D clock.
+ */
+int mrfld_s3d_to_line_interleave(struct drm_device *dev, int pipe, struct
+				 mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 pipesrc_reg = PIPEASRC;
+	u32 vtot_reg = VTOTAL_A;
+	u32 vblank_reg = VBLANK_A;
+	u32 vsync_reg = VSYNC_A;
+
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 vtot_val = 0;
+	u32 vblank_val = 0;
+	u32 vsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		pipesrc_reg = PIPEBSRC;
+		vtot_reg = VTOTAL_B;
+		vblank_reg = VBLANK_B;
+		vsync_reg = VSYNC_B;
+		dspcntr_reg = DSPBCNTR;
+		dspstride_reg = DSPBSTRIDE;
+		dsplinoff_reg = DSPBLINOFF;
+		dspsize_reg = DSPBSIZE;
+		dspsurf_reg = DSPBSURF;
+		break;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		pipesrc_reg = PIPECSRC;
+		vtot_reg = VTOTAL_C;
+		vblank_reg = VBLANK_C;
+		vsync_reg = VSYNC_C;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	vtot_val = REG_READ(vtot_reg);
+	vblank_val = REG_READ(vblank_reg);
+	vsync_val = REG_READ(vsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up line interleaving display 3D format. */
+	/* Get the 3D pipe source. */
+	REG_WRITE(pipesrc_reg, (((pipesrc_val & 0x0000ffff) + 1) * 2 - 1) |
+		  (pipesrc_val & 0xFFFF0000));
+
+	/* Get the 3D Vactive and Vtotal. */
+	temp = ((vtot_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((vtot_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(vtot_reg, temp1 | temp);
+
+	/* Get the 3D Vblank. */
+	temp = ((vblank_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((vblank_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(vblank_reg, temp1 | temp);
+
+	/* Get the 3D Vsync */
+	temp = ((vsync_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((vsync_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(vsync_reg, temp1 | temp);
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_LINE;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_LINE_ALTERNATIVE);
+
+	return 0;
+}
+
+/**
+ * Perform 2d mode set back from line interleaving 3D display.
+ *
+ */
+int mrfld_s3d_from_line_interleave(struct drm_device *dev, int pipe, struct
+				   mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 pipesrc_reg = PIPEASRC;
+	u32 vtot_reg = VTOTAL_A;
+	u32 vblank_reg = VBLANK_A;
+	u32 vsync_reg = VSYNC_A;
+
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 vtot_val = 0;
+	u32 vblank_val = 0;
+	u32 vsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		pipesrc_reg = PIPEBSRC;
+		vtot_reg = VTOTAL_B;
+		vblank_reg = VBLANK_B;
+		vsync_reg = VSYNC_B;
+		dspcntr_reg = DSPBCNTR;
+		dspstride_reg = DSPBSTRIDE;
+		dsplinoff_reg = DSPBLINOFF;
+		dspsize_reg = DSPBSIZE;
+		dspsurf_reg = DSPBSURF;
+		break;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		pipesrc_reg = PIPECSRC;
+		vtot_reg = VTOTAL_C;
+		vblank_reg = VBLANK_C;
+		vsync_reg = VSYNC_C;
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	vtot_val = REG_READ(vtot_reg);
+	vblank_val = REG_READ(vblank_reg);
+	vsync_val = REG_READ(vsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, half of 3D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* set up pipe related registers */
+	/* Get the 2D pipe source. */
+	REG_WRITE(pipesrc_reg, (((pipesrc_val & 0x0000ffff) + 1) / 2 - 1) |
+		  (pipesrc_val & 0xFFFF0000));
+
+	/* Get the 2D Vactive and Vtotal. */
+	temp = ((vtot_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((vtot_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(vtot_reg, temp1 | temp);
+
+	/* Get the 2D Vblank. */
+	temp = ((vblank_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((vblank_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(vblank_reg, temp1 | temp);
+
+	/* Get the 2D Vsync */
+	temp = ((vsync_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((vsync_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(vsync_reg, temp1 | temp);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_disable_s3d_InfoFrame(dev);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to frame packing 3D display.
+ *
+ * FIXME: Assume the 3D buffer is the same as display resolution. Will re-visit
+ * it for panel fitting mode.
+ * Set up the pll, two times 2D clock.
+ */
+int mrfld_s3d_to_frame_packing(struct drm_device *dev, int pipe, struct
+			       mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 vtot_reg = VTOTAL_B;
+	u32 vblank_reg = VBLANK_B;
+	u32 vsync_reg = VSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+	u32 dsppos_reg_d = DSPDPOS;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 vtot_val = 0;
+	u32 vblank_val = 0;
+	u32 vsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 dsppos_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+	u32 temp2 = 0;
+	u32 temp3 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	vtot_val = REG_READ(vtot_reg);
+	vblank_val = REG_READ(vblank_reg);
+	vsync_val = REG_READ(vsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, two times 2D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* Set up frame packing display 3D format. */
+
+	/* set up pipe related registers */
+	/* Get the Vblank and Vborder period. */
+	temp = ((vtot_val & 0xFFFF0000) >> 16) - (vtot_val & 0x0000FFFF);
+
+	/* Get the 3D pipe source. */
+	REG_WRITE(pipesrc_reg,
+		  (((pipesrc_val & 0x0000ffff) + 1) * 2 + temp -
+		   1) | (pipesrc_val & 0xFFFF0000));
+	dsppos_val = temp + (pipesrc_val & 0x0000ffff) + 1;
+	dsppos_val <<= 16;
+
+	/* Get the 3D Vactive. */
+	temp += ((vtot_val & 0x0000FFFF) + 1) * 2 - 1;
+
+	/* Get the 3D Vtotal. */
+	temp1 = ((vtot_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+
+	REG_WRITE(vtot_reg, temp1 | temp);
+
+	/* Get the 3D Vblank. */
+	temp2 = (vblank_val & 0x0000FFFF) - (vtot_val & 0x0000FFFF);
+	temp3 = (vtot_val & 0xFFFF0000) - (vblank_val & 0xFFFF0000);
+	REG_WRITE(vblank_reg, (temp1 - temp3) | (temp + temp2));
+
+	/* Get the 3D Vsync */
+	temp2 = (vsync_val & 0x0000FFFF) - (vtot_val & 0x0000FFFF);
+	temp3 = (vtot_val & 0xFFFF0000) - (vsync_val & 0xFFFF0000);
+	REG_WRITE(vsync_reg, (temp1 - temp3) | (temp + temp2));
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+	REG_WRITE(dsppos_reg_d, dsppos_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_FRAME_PACKING);
+
+	return 0;
+}
+
+/**
+ * Perform 2d mode set back from frame packing 3D display.
+ *
+ */
+int mrfld_s3d_from_frame_packing(struct drm_device *dev, int pipe, struct
+				 mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 vtot_reg = VTOTAL_B;
+	u32 vblank_reg = VBLANK_B;
+	u32 vsync_reg = VSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 vtot_val = 0;
+	u32 vblank_val = 0;
+	u32 vsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+	u32 temp2 = 0;
+	u32 temp3 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	vtot_val = REG_READ(vtot_reg);
+	vblank_val = REG_READ(vblank_reg);
+	vsync_val = REG_READ(vsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, half of 3D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* set up pipe related registers */
+	/* Get the Vblank and Vborder period. */
+	temp = ((vtot_val & 0xFFFF0000) >> 16) - (vtot_val & 0x0000FFFF);
+
+	/* Get the 2D pipe source. */
+	temp1 = ((pipesrc_val & 0x0000ffff) + 1 - temp) / 2 - 1;
+	REG_WRITE(pipesrc_reg, temp1 | (pipesrc_val & 0xFFFF0000));
+
+	/* Get the 2D Vactive. */
+	temp = ((vtot_val & 0x0000FFFF) + 1 - temp) / 2 - 1;
+
+	/* Get the 2D Vtotal. */
+	temp1 = ((vtot_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+
+	REG_WRITE(vtot_reg, temp1 | temp);
+
+	/* Get the 2D Vblank. */
+	temp2 = (vblank_val & 0x0000FFFF) - (vtot_val & 0x0000FFFF);
+	temp3 = (vtot_val & 0xFFFF0000) - (vblank_val & 0xFFFF0000);
+	REG_WRITE(vblank_reg, (temp1 - temp3) | (temp + temp2));
+
+	/* Get the 3D Vsync */
+	temp2 = (vsync_val & 0x0000FFFF) - (vtot_val & 0x0000FFFF);
+	temp3 = (vtot_val & 0xFFFF0000) - (vsync_val & 0xFFFF0000);
+	REG_WRITE(vblank_reg, (temp1 - temp3) | (temp + temp2));
+
+	/* set up plane related registers */
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_disable_s3d_InfoFrame(dev);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to top_and_bottom 3D display.
+ *
+ * FIXME: Assume the 3D buffer is the same as display resolution. Will re-visit
+ * it for panel fitting mode.
+ */
+int mrfld_s3d_to_top_and_bottom(struct drm_device *dev, int pipe, struct
+				mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+	u32 dsppos_reg_d = DSPDPOS;
+
+	/* values */
+	u32 pipeconf_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 dsppos_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* set up plane related registers */
+	dsppos_val = ((dspsize_val & 0xFFFF0000) + 0x00010000) / 2;
+	dspsize_val = (((dspsize_val & 0xFFFF0000) + 0x00010000) / 2 -
+		       0x00010000) | (dspsize_val & 0x0000FFFF);
+	REG_WRITE(dspsize_reg, dspsize_val);
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+	REG_WRITE(dsppos_reg_d, dsppos_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_TOP_AND_BOTTOM);
+
+	return 0;
+}
+
+/**
+ * Perform 2d mode set back from top_and_bottom 3D display.
+ *
+ */
+int mrfld_s3d_from_top_and_bottom(struct drm_device *dev, int pipe, struct
+				  mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* set up plane related registers */
+	dspsize_val = (((dspsize_val & 0xFFFF0000) + 0x00010000) * 2 -
+		       0x00010000) | (dspsize_val & 0x0000FFFF);
+	REG_WRITE(dspsize_reg, dspsize_val);
+
+	/* set up plane related registers */
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_disable_s3d_InfoFrame(dev);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to full side_by_side 3D display.
+ *
+ * FIXME: Assume the 3D buffer is the same as display resolution. Will re-visit
+ * it for panel fitting mode.
+ * Set up the pll, two times 2D clock.
+ */
+int mrfld_s3d_to_full_side_by_side(struct drm_device *dev, int pipe, struct
+				   mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 htot_reg = HTOTAL_B;
+	u32 hblank_reg = HBLANK_B;
+	u32 hsync_reg = HSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+	u32 dsppos_reg_d = DSPDPOS;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 htot_val = 0;
+	u32 hblank_val = 0;
+	u32 hsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 dsppos_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	htot_val = REG_READ(htot_reg);
+	hblank_val = REG_READ(hblank_reg);
+	hsync_val = REG_READ(hsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, two times 2D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* Set up full side-by-side display 3D format. */
+
+	/* set up pipe related registers */
+	/* Get the 3D pipe source. */
+	REG_WRITE(pipesrc_reg, (pipesrc_val & 0x0000ffff) |
+		  (((pipesrc_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000));
+
+	/* Get the 3D Hactive and Htotal. */
+	temp = ((htot_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((htot_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(htot_reg, temp1 | temp);
+
+	/* Get the 3D Hblank. */
+	temp = ((hblank_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((hblank_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(hblank_reg, temp1 | temp);
+
+	/* Get the 3D Hsync */
+	temp = ((hsync_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((hsync_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(hsync_reg, temp1 | temp);
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+	dsppos_val = (dspsize_val & 0x0000ffff) + 1;
+	REG_WRITE(dsppos_reg_d, dsppos_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_SIDE_BY_SIDE_FULL);
+
+	return 0;
+}
+
+/**
+ * Perform 2d mode set back from full side_by_side 3D display.
+ *
+ */
+int mrfld_s3d_from_full_side_by_side(struct drm_device *dev, int pipe, struct
+				     mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 htot_reg = HTOTAL_B;
+	u32 hblank_reg = HBLANK_B;
+	u32 hsync_reg = HSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 htot_val = 0;
+	u32 hblank_val = 0;
+	u32 hsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	htot_val = REG_READ(htot_reg);
+	hblank_val = REG_READ(hblank_reg);
+	hsync_val = REG_READ(hsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, half of 3D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* set up pipe related registers */
+	/* Get the 2D pipe source. */
+	REG_WRITE(pipesrc_reg, (pipesrc_val & 0x0000ffff) |
+		  (((pipesrc_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000));
+
+	/* Get the 2D Hactive and Htotal. */
+	temp = ((htot_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((htot_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(htot_reg, temp1 | temp);
+
+	/* Get the 2D Hblank. */
+	temp = ((hblank_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((hblank_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(hblank_reg, temp1 | temp);
+
+	/* Get the 2D Hsync */
+	temp = ((hsync_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((hsync_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(hsync_reg, temp1 | temp);
+
+	/* set up plane related registers */
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_disable_s3d_InfoFrame(dev);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to half side_by_side 3D display.
+ *
+ * FIXME: Assume the 3D buffer is the same as display resolution. Will re-visit
+ * it for panel fitting mode.
+ */
+int mrfld_s3d_to_half_side_by_side(struct drm_device *dev, int pipe, struct
+				   mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+	u32 dsppos_reg_d = DSPDPOS;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 dsppos_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up half side-by-side display 3D format. */
+
+	/* set up plane related registers */
+	dsppos_val = ((dspsize_val & 0x0000ffff) + 1) / 2;
+	REG_WRITE(dsppos_reg_d, dsppos_val);
+
+	dspsize_val = (dspsize_val & 0xffff0000) | (((dspsize_val & 0x0000ffff)
+						     + 1) / 2 - 1);
+	REG_WRITE(dspsize_reg, dspsize_val);
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_SIDE_BY_SIDE_HALF);
+
+	return 0;
+}
+
+/**
+ * Perform 2d mode set back from half side_by_side 3D display.
+ *
+ */
+int mrfld_s3d_from_half_side_by_side(struct drm_device *dev, int pipe, struct
+				     mrfld_s3d_flip *ps3d_flip)
+{
+	/* register */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* set up plane related registers */
+	dspsize_val = (dspsize_val & 0xffff0000) | (((dspsize_val & 0x0000ffff)
+						     + 1) * 2 - 1);
+	REG_WRITE(dspsize_reg, dspsize_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_disable_s3d_InfoFrame(dev);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to pixel alternative 3D display.
+ *
+ * FIXME: Assume the 3D buffer is the same as display resolution. Will re-visit
+ * it for panel fitting mode.
+ * Set up the pll, two times 2D clock.
+ */
+int mrfld_s3d_to_pixel_interleaving_full(struct drm_device *dev, int pipe, struct
+					 mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 pipesrc_reg = PIPEASRC;
+	u32 htot_reg = HTOTAL_A;
+	u32 hblank_reg = HBLANK_A;
+	u32 hsync_reg = HSYNC_A;
+
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 htot_val = 0;
+	u32 hblank_val = 0;
+	u32 hsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		DRM_ERROR("HDMI doesn't support pixel interleave full. \n");
+		return 0;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		pipesrc_reg = PIPECSRC;
+		htot_reg = HTOTAL_C;
+		hblank_reg = HBLANK_C;
+		hsync_reg = HSYNC_C;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	htot_val = REG_READ(htot_reg);
+	hblank_val = REG_READ(hblank_reg);
+	hsync_val = REG_READ(hsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, two times 2D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* Set up full pixel interleaving display 3D format. */
+
+	/* set up pipe related registers */
+	/* Get the 3D pipe source. */
+	REG_WRITE(pipesrc_reg, (pipesrc_val & 0x0000ffff) |
+		  (((pipesrc_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000));
+
+	/* Get the 3D Hactive and Htotal. */
+	temp = ((htot_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((htot_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(htot_reg, temp1 | temp);
+
+	/* Get the 3D Hblank. */
+	temp = ((hblank_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((hblank_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(hblank_reg, temp1 | temp);
+
+	/* Get the 3D Hsync */
+	temp = ((hsync_val & 0x0000FFFF) + 1) * 2 - 1;
+	temp1 = ((hsync_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+	REG_WRITE(hsync_reg, temp1 | temp);
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_PIXEL;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	return 0;
+}
+
+/**
+ * Perform display 2D mode set from pixel alternative 3D display.
+ *
+ */
+int mrfld_s3d_from_pixel_interleaving_full(struct drm_device *dev, int pipe, struct
+					   mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 pipesrc_reg = PIPEASRC;
+	u32 htot_reg = HTOTAL_A;
+	u32 hblank_reg = HBLANK_A;
+	u32 hsync_reg = HSYNC_A;
+
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 htot_val = 0;
+	u32 hblank_val = 0;
+	u32 hsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		DRM_ERROR("HDMI doesn't support pixel interleave full. \n");
+		return 0;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		pipesrc_reg = PIPECSRC;
+		htot_reg = HTOTAL_C;
+		hblank_reg = HBLANK_C;
+		hsync_reg = HSYNC_C;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	htot_val = REG_READ(htot_reg);
+	hblank_val = REG_READ(hblank_reg);
+	hsync_val = REG_READ(hsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, half of 3D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* set up pipe related registers */
+	/* Get the 2D pipe source. */
+	REG_WRITE(pipesrc_reg, (pipesrc_val & 0x0000ffff) |
+		  (((pipesrc_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000));
+
+	/* Get the 2D Hactive and Htotal. */
+	temp = ((htot_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((htot_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(htot_reg, temp1 | temp);
+
+	/* Get the 2D Hblank. */
+	temp = ((hblank_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((hblank_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(hblank_reg, temp1 | temp);
+
+	/* Get the 2D Hsync */
+	temp = ((hsync_val & 0x0000FFFF) + 1) / 2 - 1;
+	temp1 = ((hsync_val & 0xFFFF0000) + 0x00010000) / 2 - 0x00010000;
+	REG_WRITE(hsync_reg, temp1 | temp);
+
+	/* set up plane related registers */
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set to pixel alternative 3D display with two
+ * half-width L & R frame buffers.
+ *
+ */
+int mrfld_s3d_to_pixel_interleaving_half(struct drm_device *dev, int pipe, struct
+					 mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		DRM_ERROR("HDMI doesn't support pixel interleave half. \n");
+		return 0;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up half pixel interleaving display 3D format. */
+
+	/* set up plane related registers */
+	dspsize_val = (dspsize_val & 0xFFFF0000) | (((dspsize_val & 0x0000FFFF)
+						     + 1) / 2 - 1);
+	REG_WRITE(dspsize_reg, dspsize_val);
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dspstride_reg_d, ps3d_flip->pitch_r);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dsplinoff_reg_d, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+	REG_WRITE(dspsurf_reg_d, ps3d_flip->uiAddr_r);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_PIXEL;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	return 0;
+}
+
+/**
+ * Perform display 2D mode set from pixel alternative 3D display with two
+ * half-width L & R frame buffers.
+ *
+ */
+int mrfld_s3d_from_pixel_interleaving_half(struct drm_device *dev, int pipe, struct
+					   mrfld_s3d_flip *ps3d_flip)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* register */
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 dspcntr_val = 0;
+	u32 dspsize_val = 0;
+	u32 mipi_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		DRM_ERROR("HDMI doesn't support pixel interleave half. \n");
+		return 0;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* set up plane related registers */
+	dspsize_val = (dspsize_val & 0xFFFF0000) | (((dspsize_val & 0x0000FFFF)
+						     + 1) * 2 - 1);
+	REG_WRITE(dspsize_reg, dspsize_val);
+	/* set up the frame buffer stride, offset and start. */
+	REG_WRITE(dspstride_reg, ps3d_flip->pitch_l);
+	REG_WRITE(dsplinoff_reg, 0);
+	REG_WRITE(dspsurf_reg, ps3d_flip->uiAddr_l);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	if ((pipe == 0) || (pipe == 2)) {
+		/*set up mipi port related registers */
+		REG_WRITE(mipi_reg, mipi_val);
+
+		/*setup MIPI adapter + MIPI IP registers */
+		/* mdfld_dsi_controller_init(dsi_config, pipe); */
+		mdelay(20);	/* msleep(20); */
+
+		/* re-init the panel */
+		dsi_config->drv_ic_inited = 0;
+		/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+	}
+
+	/*enable the plane */
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	return 0;
+}
+
+/**
+ * Check if the DSI display supports S3D. If so, report supported S3D formats
+ *
+ */
+int mrfld_dsi_s3d_query(struct drm_device *dev, struct drm_psb_s3d_query
+			*s3d_query)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	uint32_t s3d_display_type = s3d_query->s3d_display_type;
+
+	switch (s3d_display_type) {
+	case S3D_MIPIA_DISPLAY:
+		break;
+	case S3D_MIPIC_DISPLAY:
+		dsi_config = dev_priv->dsi_configs[1];
+		break;
+	default:
+		DRM_ERROR("invalid parameters. \n");
+		return -EINVAL;
+	}
+
+	if (dsi_config->s3d_format) {
+		s3d_query->is_s3d_supported = 1;
+		s3d_query->s3d_format = dsi_config->s3d_format;
+	}
+
+	return 0;
+}
+
+/**
+ * Check if the display supports S3D. If so, report supported S3D formats.
+ *
+ */
+int mrfld_s3d_query(struct drm_device *dev, struct drm_psb_s3d_query
+		    *s3d_query)
+{
+	uint32_t s3d_display_type = s3d_query->s3d_display_type;
+
+	switch (s3d_display_type) {
+	case S3D_MIPIA_DISPLAY:
+	case S3D_MIPIC_DISPLAY:
+		return mrfld_dsi_s3d_query(dev, s3d_query);
+	case S3D_HDMI_DISPLAY:
+		return mrfld_hdmi_s3d_query(dev, s3d_query);
+	default:
+		DRM_ERROR("invalid parameters. \n");
+		return -EINVAL;
+	}
+}
+
+#endif /* if KEEP_UNUSED_CODE_S3D */
+
+#if 0
+
+/**
+ * Perform display 3D mode set.
+ *
+ */
+static int mrfld_s3d_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	int pipe = psb_intel_crtc->pipe;
+	u32 dspcntr_reg = DSPACNTR;
+	u32 pipeconf_reg = PIPEACONF;
+	u32 htot_reg = HTOTAL_A;
+	u32 hblank_reg = HBLANK_A;
+	u32 hsync_reg = HSYNC_A;
+	u32 vtot_reg = VTOTAL_A;
+	u32 vblank_reg = VBLANK_A;
+	u32 vsync_reg = VSYNC_A;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dsppos_reg = DSPAPOS;
+	u32 pipesrc_reg = PIPEASRC;
+	u32 *pipeconf = &dev_priv->pipeconf;
+	u32 *dspcntr = &dev_priv->dspcntr;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct psb_intel_output *psb_intel_output = NULL;
+	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		dspcntr_reg = DSPBCNTR;
+		pipeconf_reg = PIPEBCONF;
+		htot_reg = HTOTAL_B;
+		hblank_reg = HBLANK_B;
+		hsync_reg = HSYNC_B;
+		vtot_reg = VTOTAL_B;
+		vblank_reg = VBLANK_B;
+		vsync_reg = VSYNC_B;
+		dspsize_reg = DSPBSIZE;
+		dsppos_reg = DSPBPOS;
+		pipesrc_reg = PIPEBSRC;
+		pipeconf = &dev_priv->pipeconf1;
+		dspcntr = &dev_priv->dspcntr1;
+		break;
+	case 2:
+		dspcntr_reg = DSPCCNTR;
+		pipeconf_reg = PIPECCONF;
+		htot_reg = HTOTAL_C;
+		hblank_reg = HBLANK_C;
+		hsync_reg = HSYNC_C;
+		vtot_reg = VTOTAL_C;
+		vblank_reg = VBLANK_C;
+		vsync_reg = VSYNC_C;
+		dspsize_reg = DSPCSIZE;
+		dsppos_reg = DSPCPOS;
+		pipesrc_reg = PIPECSRC;
+		pipeconf = &dev_priv->pipeconf2;
+		dspcntr = &dev_priv->dspcntr2;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	memcpy(&psb_intel_crtc->saved_mode, mode,
+	       sizeof(struct drm_display_mode));
+	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+	       sizeof(struct drm_display_mode));
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (!connector)
+			continue;
+
+		encoder = connector->encoder;
+
+		if (!encoder)
+			continue;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		psb_intel_output = to_psb_intel_output(connector);
+
+		PSB_DEBUG_ENTRY("output->type = 0x%x \n",
+				psb_intel_output->type);
+
+	}
+
+	/* Disable the panel fitter if it was on our pipe */
+	if (psb_intel_panel_fitter_pipe(dev) == pipe)
+		REG_WRITE(PFIT_CONTROL, 0);
+
+	/* pipesrc and dspsize control the size that is scaled from,
+	 * which should always be the user's requested size.
+	 */
+	if (pipe == 1) {
+		/* FIXME: To make HDMI display with 864x480 (TPO), 480x864 (PYR) or 480x854 (TMD), set the sprite
+		 * width/height and souce image size registers with the adjusted mode for pipe B. */
+
+		/* The defined sprite rectangle must always be completely contained within the displayable
+		 * area of the screen image (frame buffer). */
+		REG_WRITE(dspsize_reg,
+			  ((MIN
+			    (mode->crtc_vdisplay,
+			     adjusted_mode->crtc_vdisplay) - 1) << 16)
+			  |
+			  (MIN
+			   (mode->crtc_hdisplay,
+			    adjusted_mode->crtc_hdisplay) - 1));
+		/* Set the CRTC with encoder mode. */
+		REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+			  | (mode->crtc_vdisplay - 1));
+	} else {
+		REG_WRITE(dspsize_reg,
+			  ((mode->crtc_vdisplay -
+			    1) << 16) | (mode->crtc_hdisplay - 1));
+		REG_WRITE(pipesrc_reg,
+			  ((mode->crtc_hdisplay -
+			    1) << 16) | (mode->crtc_vdisplay - 1));
+	}
+
+	REG_WRITE(dsppos_reg, 0);
+
+	if (psb_intel_output)
+		drm_object_property_get_value(&psb_intel_output->base->base,
+						 dev->
+						 mode_config.scaling_mode_property,
+						 &scalingType);
+
+	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+		/*Moorestown doesn't have register support for centering so we need to
+		   mess with the h/vblank and h/vsync start and ends to get centering */
+		int offsetX = 0, offsetY = 0;
+
+		offsetX =
+		    (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+		offsetY =
+		    (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+
+		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+			  ((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+			  ((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(hblank_reg,
+			  (adjusted_mode->crtc_hblank_start - offsetX -
+			   1) | ((adjusted_mode->crtc_hblank_end - offsetX -
+				  1) << 16));
+		REG_WRITE(hsync_reg,
+			  (adjusted_mode->crtc_hsync_start - offsetX -
+			   1) | ((adjusted_mode->crtc_hsync_end - offsetX -
+				  1) << 16));
+		REG_WRITE(vblank_reg,
+			  (adjusted_mode->crtc_vblank_start - offsetY -
+			   1) | ((adjusted_mode->crtc_vblank_end - offsetY -
+				  1) << 16));
+		REG_WRITE(vsync_reg,
+			  (adjusted_mode->crtc_vsync_start - offsetY -
+			   1) | ((adjusted_mode->crtc_vsync_end - offsetY -
+				  1) << 16));
+	} else {
+		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+			  ((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+			  ((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+			  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+			  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+			  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+			  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	}
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	/* setup pipeconf */
+	*pipeconf = PIPEACONF_ENABLE;
+
+	/* Set up the display plane register */
+	*dspcntr = REG_READ(dspcntr_reg);
+	*dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
+	*dspcntr |= DISPLAY_PLANE_ENABLE;
+	mrfld_setup_pll(dev, pipe, adjusted_mode->clock);
+
+	if (pipe != 1)
+		goto mrst_crtc_mode_set_exit;
+
+	REG_WRITE(pipeconf_reg, *pipeconf);
+	REG_READ(pipeconf_reg);
+
+	REG_WRITE(dspcntr_reg, *dspcntr);
+	psb_intel_wait_for_vblank(dev);
+
+ mrst_crtc_mode_set_exit:
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set from half Top-and-Bottom to half Top-and-Bottom 3D display.
+ *
+ */
+static int mrfld_s3d_half_top_and_bottom(struct drm_device *dev, int pipe)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	/* regester */
+	u32 dspsurf_reg = DSPASURF;
+
+	/* values */
+	u32 dspsurf_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+	dspsurf_val = REG_READ(dspsurf_reg);
+
+	/* need to figure out the start. */
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_TOP_AND_BOTTOM);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set from two full source inputs to line
+ * interleaving 3D display.
+ *
+ */
+static int mrfld_s3d_line_interleaving(struct drm_device *dev, int pipe)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	/* regester */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 vtot_reg = VTOTAL_B;
+	u32 vblank_reg = VBLANK_B;
+	u32 vsync_reg = VSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 vtot_val = 0;
+	u32 vblank_val = 0;
+	u32 vsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspstride_val = 0;
+	u32 dsplinoff_val = 0;
+	u32 dspsize_val = 0;
+	u32 dspsurf_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	vtot_val = REG_READ(vtot_reg);
+	vblank_val = REG_READ(vblank_reg);
+	vsync_val = REG_READ(vsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspstride_val = REG_READ(dspstride_reg);
+	dsplinoff_val = REG_READ(dsplinoff_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	dspsurf_val = REG_READ(dspsurf_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, two times 2D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* Set up from either full Top-and-Bottom or side-by-side to
+	 * full side-by-side display 3D format.
+	 */
+
+	/* set up pipe related registers */
+	REG_WRITE(pipesrc_reg, (((pipesrc_val & 0x0000FFFF) + 1) * 2 - 1) |
+		  (pipesrc_val & 0xFFFF0000));
+	REG_WRITE(vtot_reg, (vtot_val + 0x00010001) * 2 - 0x00010001);
+	REG_WRITE(vblank_reg, (vblank_val + 0x00010001) * 2 - 0x00010001);
+	REG_WRITE(vsync_reg, (vsync_val + 0x00010001) * 2 - 0x00010001);
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* need to figure out the stride, offset and start. */
+	REG_WRITE(dspstride_reg, dspstride_val * 2);
+	REG_WRITE(dspstride_reg_d, dspstride_val * 2);
+	REG_WRITE(dsplinoff_reg, dsplinoff_val);
+	REG_WRITE(dsplinoff_reg_d, dsplinoff_val + dspstride_val);
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+	REG_WRITE(dspsurf_reg_d, dspsurf_val);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/* send 3D info frame. */
+
+	/*enable the plane */
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_LINE;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_LINE_ALTERNATIVE);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set from full side-by-side to side-by-side 3D display.
+ *
+ */
+static int mrfld_s3d_side_by_side(struct drm_device *dev, int pipe)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	/* regester */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 htot_reg = HTOTAL_B;
+	u32 hblank_reg = HBLANK_B;
+	u32 hsync_reg = HSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 htot_val = 0;
+	u32 hblank_val = 0;
+	u32 hsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspstride_val = 0;
+	u32 dsplinoff_val = 0;
+	u32 dspsize_val = 0;
+	u32 dspsurf_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	htot_val = REG_READ(htot_reg);
+	hblank_val = REG_READ(hblank_reg);
+	hsync_val = REG_READ(hsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspstride_val = REG_READ(dspstride_reg);
+	dsplinoff_val = REG_READ(dsplinoff_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	dspsurf_val = REG_READ(dspsurf_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, two times 2D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* Set up from full side-by-side to full side-by-side display 3D format. */
+
+	/* set up pipe related registers */
+	REG_WRITE(pipesrc_reg, (pipesrc_val & 0x0000FFFF) |
+		  (((pipesrc_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000));
+	REG_WRITE(htot_reg, (htot_val + 0x00010001) * 2 - 0x00010001);
+	REG_WRITE(hblank_reg, (hblank_val + 0x00010001) * 2 - 0x00010001);
+	REG_WRITE(hsync_reg, (hsync_val + 0x00010001) * 2 - 0x00010001);
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg_d, dspsize_val);
+
+	/* need to figure out the stride, offset and start. */
+	REG_WRITE(dspstride_reg, dspstride_val * 2);
+	REG_WRITE(dspstride_reg_d, dspstride_val * 2);
+	REG_WRITE(dsplinoff_reg, dsplinoff_val);
+	REG_WRITE(dsplinoff_reg_d, dsplinoff_val + dspstride_val);
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+	REG_WRITE(dspsurf_reg_d, dspsurf_val);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/* send 3D info frame. */
+
+	/*enable the plane */
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_LINE;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_SIDE_BY_SIDE_FULL);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set from half side-by-side to half side-by-side 3D display.
+ *
+ */
+static int mrfld_s3d_half_side_by_side(struct drm_device *dev, int pipe)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	/* regester */
+	u32 dspsurf_reg = DSPASURF;
+
+	/* values */
+	u32 dspsurf_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+	dspsurf_val = REG_READ(dspsurf_reg);
+
+	/* need to figure out the start. */
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_SIDE_BY_SIDE_HALF);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set from full Top_Bottom to frame packing 3D display.
+ *
+ */
+static int mrfld_s3d_frame_packing(struct drm_device *dev, int pipe)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	/* regester */
+	u32 pipeconf_reg = PIPEBCONF;
+	u32 pipesrc_reg = PIPEBSRC;
+	u32 vtot_reg = VTOTAL_B;
+	u32 vblank_reg = VBLANK_B;
+	u32 vsync_reg = VSYNC_B;
+
+	u32 dspcntr_reg = DSPBCNTR;
+	u32 dspstride_reg = DSPBSTRIDE;
+	u32 dsplinoff_reg = DSPBLINOFF;
+	u32 dspsize_reg = DSPBSIZE;
+	u32 dspsurf_reg = DSPBSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+	u32 pipesrc_val = 0;
+	u32 vtot_val = 0;
+	u32 vblank_val = 0;
+	u32 vsync_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspstride_val = 0;
+	u32 dsplinoff_val = 0;
+	u32 dspsize_val = 0;
+	u32 dspsurf_val = 0;
+	u32 temp = 0;
+	u32 temp1 = 0;
+	u32 temp2 = 0;
+	u32 temp3 = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	if (pipe != 1) {
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+	pipesrc_val = REG_READ(pipesrc_reg);
+	vtot_val = REG_READ(vtot_reg);
+	vblank_val = REG_READ(vblank_reg);
+	vsync_val = REG_READ(vsync_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspstride_val = REG_READ(dspstride_reg);
+	dsplinoff_val = REG_READ(dsplinoff_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	dspsurf_val = REG_READ(dspsurf_reg);
+
+	/* Disable pipe and port. */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up the pll, two times 2D clock. */
+	/* mrfld_setup_pll (dev, pipe, adjusted_mode->clock); */
+
+	/* Set up frame packing display 3D format. */
+
+	/* set up pipe related registers */
+	REG_WRITE(pipesrc_reg, (((pipesrc_val & 0x0000FFFF) + 1) * 2 - 1) |
+		  (pipesrc_val & 0xFFFF0000));
+
+	/* Get the 3D Vactive. */
+	temp = ((vtot_val & 0xFFFF0000) >> 16) - (vtot_val & 0x0000FFFF);
+	temp += ((vtot_val & 0x0000FFFF) + 1) * 2 - 1;
+
+	/* Get the 3D Vtotal. */
+	temp1 = ((vtot_val & 0xFFFF0000) + 0x00010000) * 2 - 0x00010000;
+
+	REG_WRITE(vtot_reg, temp1 | temp);
+
+	/* Get the 3D Vblank. */
+	temp2 = (vblank_val & 0x0000FFFF) - (vtot_val & 0x0000FFFF);
+	temp3 = (vtot_val & 0xFFFF0000) - (vblank_val & 0xFFFF0000);
+	REG_WRITE(vblank_reg, (temp1 - temp3) | (temp + temp2));
+
+	/* Get the 3D Vsync */
+	temp2 = (vsync_val & 0x0000FFFF) - (vtot_val & 0x0000FFFF);
+	temp3 = (vtot_val & 0xFFFF0000) - (vsync_val & 0xFFFF0000);
+	REG_WRITE(vblank_reg, (temp1 - temp3) | (temp + temp2));
+
+	/* set up plane related registers */
+	REG_WRITE(dspsize_reg, (((dspsize_val & 0xFFFF0000) + 0x00010000) * 2 -
+				0x00010000) | (dspsize_val & 0x0000FFFF));
+
+	/* need to figure out the offset and start. */
+	REG_WRITE(dsplinoff_reg, dsplinoff_val);
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/* send 3D info frame. */
+
+	/*enable the plane */
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_FRAME_PACKING);
+
+	return 0;
+}
+
+/**
+ * Perform display 3D mode set from half Top_Bottom to line interleaving 3D display.
+ *
+ */
+static int mrfld_s3d_half_top_bottom_to_line_interleave(struct drm_device *dev,
+							int pipe)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+
+	/* regester */
+	u32 pipeconf_reg = PIPEACONF;
+
+	u32 dspcntr_reg = DSPACNTR;
+	u32 dspstride_reg = DSPASTRIDE;
+	u32 dsplinoff_reg = DSPALINOFF;
+	u32 dspsize_reg = DSPASIZE;
+	u32 dspsurf_reg = DSPASURF;
+	u32 mipi_reg = MIPI;
+
+	u32 dspcntr_reg_d = DSPDCNTR;
+	u32 dspstride_reg_d = DSPDSTRIDE;
+	u32 dsplinoff_reg_d = DSPDLINOFF;
+	u32 dspsize_reg_d = DSPDSIZE;
+	u32 dspsurf_reg_d = DSPDSURF;
+
+	/* values */
+	u32 pipeconf_val = 0;
+
+	u32 dspcntr_val = 0;
+	u32 dspstride_val = 0;
+	u32 dsplinoff_val = 0;
+	u32 dspsize_val = 0;
+	u32 dspsurf_val = 0;
+	u32 mipi_val = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x \n", pipe);
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 2:
+		dsi_config = dev_priv->dsi_configs[1];
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+		dspstride_reg = DSPCSTRIDE;
+		dsplinoff_reg = DSPCLINOFF;
+		dspsize_reg = DSPCSIZE;
+		dspsurf_reg = DSPCSURF;
+		mipi_reg = MIPI_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	/* Save the related DC registers. */
+	pipeconf_val = REG_READ(pipeconf_reg);
+
+	dspcntr_val = REG_READ(dspcntr_reg);
+	dspstride_val = REG_READ(dspstride_reg);
+	dsplinoff_val = REG_READ(dsplinoff_reg);
+	dspsize_val = REG_READ(dspsize_reg);
+	dspsurf_val = REG_READ(dspsurf_reg);
+	mipi_val = REG_READ(mipi_reg);
+
+	/* Disable pipe and port, don't disable the PLL. */
+	/* FIXME modify the following function with option to disable PLL or
+	 *  not.
+	 *  */
+	mrfld_disable_crtc(dev, pipe, true);
+
+	/* Set up line interleaving display 3D format. */
+	REG_WRITE(dspsize_reg, (((dspsize_val & 0xFFFF0000) + 0x00010000) / 2 -
+				0x00010000) | (dspsize_val & 0x0000FFFF));
+	REG_WRITE(dspsize_reg_d,
+		  (((dspsize_val & 0xFFFF0000) + 0x00010000) / 2 -
+		   0x00010000) | (dspsize_val & 0x0000FFFF));
+	REG_WRITE(dspstride_reg_d, dspstride_val);
+
+	/* need to figure out the offset and start. */
+	REG_WRITE(dsplinoff_reg, dsplinoff_val);
+	REG_WRITE(dsplinoff_reg_d, dsplinoff_val
+		  + (((dspsize_val >> 16) + 1) / 2) * dspstride_val);
+	REG_WRITE(dspsurf_reg, dspsurf_val);
+	REG_WRITE(dspsurf_reg_d, dspsurf_val);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* eError = MRSTLFBChangeSwapChainProperty(&Start, pipe); */
+
+	/*set up mipi port related registers */
+	REG_WRITE(mipi_reg, mipi_val);
+
+	/*setup MIPI adapter + MIPI IP registers */
+	/* mdfld_dsi_controller_init(dsi_config, pipe); */
+	mdelay(20);		/* msleep(20); */
+
+	/* re-init the panel */
+	dsi_config->drv_ic_inited = 0;
+	/* mdfld_dsi_tmd_drv_ic_init(dsi_config, pipe); */
+
+	/*enable the plane */
+	REG_WRITE(dspcntr_reg_d, dspcntr_val);
+	dspcntr_val &= ~(S3D_SPRITE_ORDER_BITS | S3D_SPRITE_INTERLEAVING_BITS);
+	dspcntr_val |= S3D_SPRITE_ORDER_A_FIRST | S3D_SPRITE_INTERLEAVING_LINE;
+	REG_WRITE(dspcntr_reg, dspcntr_val);
+	mdelay(20);		/* msleep(20); */
+	/* psb_intel_wait_for_vblank(dev); */
+
+	/*enable the pipe */
+	REG_WRITE(pipeconf_reg, pipeconf_val);
+
+	/* set up Vendor Specific InfoFrame for 3D format. */
+	if (pipe == 1)
+		mrfld_set_up_s3d_InfoFrame(dev, S3D_LINE_ALTERNATIVE);
+
+	return 0;
+}
+
+/* MDFLD_PLATFORM start */
+void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+{
+	int count, temp;
+	u32 pipeconf_reg = PIPEACONF;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return;
+	}
+
+	/* Wait for for the pipe disable to take effect. */
+	for (count = 0; count < COUNT_MAX; count++) {
+		temp = REG_READ(pipeconf_reg);
+		if (!(temp & PIPEACONF_PIPE_STATE))
+			break;
+
+		udelay(20);
+	}
+
+	PSB_DEBUG_ENTRY("cout = %d. \n", count);
+}
+
+void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+{
+	int count, temp;
+	u32 pipeconf_reg = PIPEACONF;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return;
+	}
+
+	/* Wait for for the pipe enable to take effect. */
+	for (count = 0; count < COUNT_MAX; count++) {
+		temp = REG_READ(pipeconf_reg);
+		if ((temp & PIPEACONF_PIPE_STATE))
+			break;
+
+		udelay(20);
+	}
+
+	PSB_DEBUG_ENTRY("cout = %d. \n", count);
+}
+
+static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
+				       struct drm_file *file_priv,
+				       uint32_t handle,
+				       uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt *pg = dev_priv->pg;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t control = CURACNTR;
+	uint32_t base = CURABASE;
+	uint32_t temp;
+	size_t addr = 0;
+	uint32_t page_offset;
+	size_t size;
+	void *bo;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+#ifdef CONFIG_SUPPORT_MIPI
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		control = CURBCNTR;
+		base = CURBBASE;
+		break;
+	case 2:
+		control = CURCCNTR;
+		base = CURCBASE;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return -EINVAL;
+	}
+
+	/* Can't enalbe HW cursor on plane B/C. */
+	if (pipe != 0)
+		return 0;
+
+	/* if we want to turn of the cursor ignore width and height */
+	if (!handle) {
+		DRM_DEBUG("cursor off\n");
+		/* turn off the cursor */
+		temp = 0;
+		temp |= CURSOR_MODE_DISABLE;
+
+		REG_WRITE(control, temp);
+		REG_WRITE(base, 0);
+
+		/* unpin the old bo */
+		if (psb_intel_crtc->cursor_bo) {
+			mode_dev->bo_unpin_for_scanout(dev,
+						       psb_intel_crtc->
+						       cursor_bo);
+			psb_intel_crtc->cursor_bo = NULL;
+		}
+		return 0;
+	}
+
+	/* Currently we only support 64x64 cursors */
+	if (width != 64 || height != 64) {
+		DRM_ERROR("we currently only support 64x64 cursors\n");
+		return -EINVAL;
+	}
+
+	bo = mode_dev->bo_from_handle(dev, file_priv, handle);
+	if (!bo)
+		return -ENOENT;
+
+	ret = mode_dev->bo_pin_for_scanout(dev, bo);
+	if (ret)
+		return ret;
+	size = mode_dev->bo_size(dev, bo);
+	if (size < width * height * 4) {
+		DRM_ERROR("buffer is to small\n");
+		return -ENOMEM;
+	}
+
+	/*insert this bo into gtt */
+//        DRM_INFO("%s: map meminfo for hw cursor. handle %x, pipe = %d \n", __FUNCTION__, handle, pipe);
+
+	ret = psb_gtt_map_meminfo(dev, (IMG_HANDLE) handle, 0, &page_offset);
+	if (ret) {
+		DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle);
+		return ret;
+	}
+
+	addr = page_offset << PAGE_SHIFT;
+
+	psb_intel_crtc->cursor_addr = addr;
+
+	temp = 0;
+	/* set the pipe for the cursor */
+	temp |= (pipe << 28);
+	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+	REG_WRITE(control, temp);
+	REG_WRITE(base, addr);
+
+	/* unpin the old bo */
+	if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
+		mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
+		psb_intel_crtc->cursor_bo = bo;
+	}
+#endif
+
+	return 0;
+}
+
+static struct drm_device globle_dev;
+
+void mdfld__intel_plane_set_alpha(int enable)
+{
+	struct drm_device *dev = &globle_dev;
+	int dspcntr_reg = DSPACNTR;
+	u32 dspcntr;
+
+	dspcntr = REG_READ(dspcntr_reg);
+
+	if (enable) {
+		dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+		dspcntr |= DISPPLANE_32BPP;
+	} else {
+		dspcntr &= ~DISPPLANE_32BPP;
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+	}
+
+	REG_WRITE(dspcntr_reg, dspcntr);
+}
+
+/**
+ * Disable the pipe, plane and pll.
+ *
+ */
+void mdfld_disable_crtc(struct drm_device *dev, int pipe)
+{
+	int dpll_reg = MRST_DPLL_A;
+	int dspcntr_reg = DSPACNTR;
+	int dspbase_reg = MRST_DSPABASE;
+	int pipeconf_reg = PIPEACONF;
+	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+	u32 temp;
+
+	PSB_DEBUG_ENTRY("pipe = %d\n", pipe);
+
+	switch (pipe) {
+#ifdef CONFIG_SUPPORT_MIPI
+	case 0:
+		break;
+#endif
+	case 1:
+		dpll_reg = MDFLD_DPLL_B;
+		dspcntr_reg = DSPBCNTR;
+		dspbase_reg = DSPBSURF;
+		pipeconf_reg = PIPEBCONF;
+		break;
+#ifdef CONFIG_SUPPORT_MIPI
+	case 2:
+		dpll_reg = MRST_DPLL_A;
+		dspcntr_reg = DSPCCNTR;
+		dspbase_reg = MDFLD_DSPCBASE;
+		pipeconf_reg = PIPECCONF;
+		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+		break;
+#endif
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return;
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe != 1)
+		mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+					 HS_CTRL_FIFO_EMPTY |
+					 HS_DATA_FIFO_EMPTY);
+#endif
+
+	/* Disable display plane */
+	temp = REG_READ(dspcntr_reg);
+	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+		REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+		/* Flush the plane changes */
+		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+		REG_READ(dspbase_reg);
+	}
+
+	/* Next, disable display pipes */
+	temp = REG_READ(pipeconf_reg);
+	if ((temp & PIPEACONF_ENABLE) != 0) {
+		temp &= ~PIPEACONF_ENABLE;
+		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+		REG_WRITE(pipeconf_reg, temp);
+		REG_READ(pipeconf_reg);
+
+		/* Wait for for the pipe disable to take effect. */
+		mdfldWaitForPipeDisable(dev, pipe);
+	}
+
+	temp = REG_READ(dpll_reg);
+	if (temp & DPLL_VCO_ENABLE) {
+		if (((pipe != 1)
+		     && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) &
+			  PIPEACONF_ENABLE))
+		    || (pipe == 1)) {
+			temp &= ~(DPLL_VCO_ENABLE);
+			REG_WRITE(dpll_reg, temp);
+			REG_READ(dpll_reg);
+			/* Wait for the clocks to turn off. */
+			/* FIXME_MDFLD PO may need more delay */
+			udelay(500);
+
+			if (!(temp & MDFLD_PWR_GATE_EN)) {
+				/* gating power of DPLL */
+				REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(5000);
+			}
+		}
+	}
+
+}
+
+/* MDFLD_PLATFORM end */
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mrfld_display.h b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_display.h
new file mode 100644
index 0000000..1a01498
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_display.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#ifndef _MRFLD_DISPLAY_H_
+#define _MRFLD_DISPLAY_H_
+
+/* Functions will be deleted after simulated on MDFLD_PLATFORM */
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/mrfld_s3d.h b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_s3d.h
new file mode 100644
index 0000000..ee92d21
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/mrfld_s3d.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#ifndef MRFLD_S3D_H
+#define MRFLD_S3D_H
+
+int mrfld_s3d_query(struct drm_device *dev, struct drm_psb_s3d_query
+		    *s3d_query);
+int mrfld_hdmi_s3d_query(struct drm_device *dev, struct drm_psb_s3d_query
+			 *s3d_query);
+#endif				/* MRFLD_S3D_H */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_maxfifo.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_maxfifo.c
new file mode 100644
index 0000000..4da98ff
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_maxfifo.c
@@ -0,0 +1,483 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Vinil Cheeramvelill <vinil.cheeramvelil@intel.com>
+ */
+
+#include "displayclass_interface.h"
+#include "dc_maxfifo.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#include <linux/device.h>
+#include <linux/intel_mid_pm.h>
+
+#define MAXFIFO_IDLE_FRAME_COUNT	10
+
+/* When the Display controller buffer is above the high watermark
+ * the display controller has enough data and does not need to
+ * fetch any more data from memory till the low watermark point
+ * is reached
+ * Register layout
+ *	Bits  9:0	- Low watermark
+ *	Bits 19:10	- High watermark
+ * The Watermark values represent 64 bytes of space. So a high
+ * watermark value of 0x200 in bits 19:10 is
+ * 0x200 X 64 = 512 X 64 = 32768 bytes
+ */
+#define MAXFIFO_TNG_SYSFS_GROUP_NAME	"dc_maxfifo"
+
+#define MAXFIFO_HIGH_WATERMARK		(0x200<<10)
+#define MAXFIFO_LOW_WATEMARK		(0x100<<0)
+
+#define TNG_DSPSRCTRL_DEFAULT	(MAXFIFO_LOW_WATEMARK |\
+				MAXFIFO_HIGH_WATERMARK |\
+				DSPSRCTRL_MAXFIFO_MODE_ALWAYS_MAXFIFO |\
+				DSPSRCTRL_MAXFIFO_ENABLE)
+
+#define DC_MAXFIFO_REGSTOSET_DSPSRCTRL_ENABLE	0x1
+#define DC_MAXFIO_REGSTOSET_DSPSSM_S0i1_DISP	0x2
+#define DC_MAXFIFO_REGSTOSET_DSPSRCTRL_MAXFIFO	0x4
+
+#define TNG_MAXFIFO_REGS_TO_SET_DEFAULT  (DC_MAXFIFO_REGSTOSET_DSPSRCTRL_ENABLE |\
+		DC_MAXFIFO_REGSTOSET_DSPSRCTRL_MAXFIFO |\
+		DC_MAXFIO_REGSTOSET_DSPSSM_S0i1_DISP )
+
+typedef enum {
+	S0i1_DISP_STATE_NOT_READY = 0,
+	S0i1_DISP_STATE_READY,
+	S0i1_DISP_STATE_ENTERED
+} S0i1_DISP_STATE;
+
+struct dc_maxfifo {
+	struct mutex maxfifo_mtx;
+
+	struct drm_device	*dev_drm;
+	bool repeat_frame_interrupt_on;
+	int  regs_to_set;
+	S0i1_DISP_STATE s0i1_disp_state;
+	struct work_struct repeat_frame_interrupt_work;
+};
+
+/* Sysfs Entries for Maxfifo mode
+ */
+static ssize_t _show_sysfs_enable (struct device *kdev,
+					struct device_attribute *attr,
+					char *buf);
+static ssize_t _store_sysfs_enable (struct device *kdev,
+					struct device_attribute *attr,
+					const char *buf, size_t count);
+static ssize_t _show_sysfs_state (struct device *kdev,
+					struct device_attribute *attr, char *buf);
+static inline bool _maxfifo_create_sysfs_entries(struct drm_device * dev);
+
+#ifndef ENABLE_HW_REPEAT_FRAME
+int maxfifo_entry_delay = 150;
+EXPORT_SYMBOL(maxfifo_entry_delay);
+module_param_named(maxfifo_delay, maxfifo_entry_delay, int, 0600);
+
+static void maxfifo_timer_func(unsigned long data)
+{
+	maxfifo_report_repeat_frame_interrupt((struct drm_device *)data);
+}
+
+static int maxfifo_timer_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct timer_list *maxfifo_timer = &dev_priv->maxfifo_timer;
+
+	init_timer(maxfifo_timer);
+
+	maxfifo_timer->data = (unsigned long)dev;
+	maxfifo_timer->function = maxfifo_timer_func;
+	maxfifo_timer->expires = jiffies + maxfifo_entry_delay*HZ/1000;
+
+	return 0;
+}
+
+void maxfifo_timer_start(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct timer_list *maxfifo_timer = &dev_priv->maxfifo_timer;
+	struct dc_maxfifo * maxfifo_info =
+		(struct dc_maxfifo *)
+		((struct drm_psb_private *)dev->dev_private)->dc_maxfifo_info;
+
+	if(maxfifo_info->repeat_frame_interrupt_on)
+		mod_timer(maxfifo_timer, jiffies + maxfifo_entry_delay*HZ/1000);
+}
+
+void maxfifo_timer_stop(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	del_timer(&dev_priv->maxfifo_timer);
+}
+#endif
+
+
+void enable_repeat_frame_intr(struct drm_device *dev)
+{
+#ifndef ENABLE_HW_REPEAT_FRAME
+	struct dc_maxfifo * maxfifo_info =
+		(struct dc_maxfifo *)
+		((struct drm_psb_private *)dev->dev_private)->dc_maxfifo_info;
+
+	maxfifo_info->repeat_frame_interrupt_on = true;
+#else
+	if (power_island_get(OSPM_DISPLAY_A)) {
+		mrfl_enable_repeat_frame_intr(dev, MAXFIFO_IDLE_FRAME_COUNT);
+		power_island_put(OSPM_DISPLAY_A);
+	}
+#endif
+}
+
+void disable_repeat_frame_intr(struct drm_device *dev)
+{
+#ifndef ENABLE_HW_REPEAT_FRAME
+	struct dc_maxfifo * maxfifo_info =
+		(struct dc_maxfifo *)
+		((struct drm_psb_private *)dev->dev_private)->dc_maxfifo_info;
+
+	maxfifo_info->repeat_frame_interrupt_on = false;
+#else
+	if (power_island_get(OSPM_DISPLAY_A)) {
+		mrfl_disable_repeat_frame_intr(dev);
+		power_island_put(OSPM_DISPLAY_A);
+	}
+#endif
+}
+
+static void _maxfifo_send_hwc_uevent(struct drm_device * dev)
+{
+	char *event_string = "REPEATED_FRAME";
+	char *envp[] = { event_string, NULL };
+	PSB_DEBUG_PM("Sending uevent to HWC\n");
+
+	kobject_uevent_env(&dev->primary->kdev.kobj,
+				KOBJ_CHANGE, envp);
+}
+
+static void _maxfifo_send_hwc_event_work(struct work_struct *work)
+{
+	struct dc_maxfifo * maxfifo_info =
+		container_of(work, struct dc_maxfifo, repeat_frame_interrupt_work);
+	 _maxfifo_send_hwc_uevent(maxfifo_info->dev_drm);
+
+}
+
+void maxfifo_report_repeat_frame_interrupt(struct drm_device * dev)
+{
+	struct dc_maxfifo * maxfifo_info =
+		(struct dc_maxfifo *)
+		((struct drm_psb_private *)dev->dev_private)->dc_maxfifo_info;
+#ifdef ENABLE_HW_REPEAT_FRAME
+	mrfl_disable_repeat_frame_intr(dev);
+#else
+	if (maxfifo_info)
+		maxfifo_info->repeat_frame_interrupt_on = false;
+#endif
+	if (maxfifo_info)
+		schedule_work(&maxfifo_info->repeat_frame_interrupt_work);
+}
+
+bool enter_s0i1_display_mode(struct drm_device *dev)
+{
+	struct drm_psb_private * dev_priv = dev->dev_private;
+	struct dc_maxfifo * maxfifo_info =
+		(struct dc_maxfifo *) dev_priv->dc_maxfifo_info;
+
+
+	if (maxfifo_info &&
+		(maxfifo_info->s0i1_disp_state == S0i1_DISP_STATE_READY)){
+
+		pmu_set_s0i1_disp_vote(true);
+/*
+		u32 dsp_ss_pm_val;
+
+		dsp_ss_pm_val = intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM);
+		dsp_ss_pm_val |= PUNIT_DSPSSPM_ENABLE_S0i1_DISPLAY;
+		intel_mid_msgbus_write32(PUNIT_PORT, DSP_SS_PM, dsp_ss_pm_val);
+*/
+		maxfifo_info->s0i1_disp_state = S0i1_DISP_STATE_ENTERED;
+		PSB_DEBUG_PM("Enabled S0i1-Display Punit DSPSSMP Register\n");
+	}
+
+	return true;
+}
+
+bool exit_s0i1_display_mode(struct drm_device *dev)
+{
+	struct drm_psb_private * dev_priv = dev->dev_private;
+	struct dc_maxfifo * maxfifo_info =
+		(struct dc_maxfifo *) dev_priv->dc_maxfifo_info;
+	if (maxfifo_info &&
+		(maxfifo_info->s0i1_disp_state == S0i1_DISP_STATE_ENTERED)){
+
+		pmu_set_s0i1_disp_vote(false);
+/*
+		u32 dsp_ss_pm_val;
+
+		dsp_ss_pm_val = intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM);
+		dsp_ss_pm_val &= ~PUNIT_DSPSSPM_ENABLE_S0i1_DISPLAY;
+		intel_mid_msgbus_write32(PUNIT_PORT, DSP_SS_PM, dsp_ss_pm_val);
+*/
+		maxfifo_info->s0i1_disp_state = S0i1_DISP_STATE_NOT_READY;
+		PSB_DEBUG_PM(" Disabled S0i1-Display in Punit DSPSSMP Register\n");
+	}
+
+	return true;
+}
+
+bool enter_maxfifo_mode(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct dc_maxfifo * maxfifo_info = dev_priv->dc_maxfifo_info;
+	u32 dspsrctrl_val = MAXFIFO_LOW_WATEMARK | MAXFIFO_HIGH_WATERMARK;
+	u32 regs_to_set;
+
+	if (!maxfifo_info)
+		return false;
+
+#ifndef ENABLE_HW_REPEAT_FRAME
+	maxfifo_timer_stop(dev);
+#endif
+	mutex_lock(&maxfifo_info->maxfifo_mtx);
+	regs_to_set = maxfifo_info->regs_to_set;
+
+	if (dev_priv->psb_dpst_state)
+		psb_irq_disable_dpst(dev);
+
+	if (power_island_get(OSPM_DISPLAY_A)) {
+		if (regs_to_set & DC_MAXFIFO_REGSTOSET_DSPSRCTRL_ENABLE) {
+			dspsrctrl_val |= DSPSRCTRL_MAXFIFO_ENABLE;
+			if (regs_to_set & DC_MAXFIFO_REGSTOSET_DSPSRCTRL_MAXFIFO)
+				dspsrctrl_val |= DSPSRCTRL_MAXFIFO_MODE_ALWAYS_MAXFIFO;
+			PSB_WVDC32(dspsrctrl_val, DSPSRCTRL_REG);
+		}
+
+		if (regs_to_set & DC_MAXFIO_REGSTOSET_DSPSSM_S0i1_DISP) {
+			unsigned long irqflags;
+			spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+			maxfifo_info->s0i1_disp_state = S0i1_DISP_STATE_READY;
+			spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+		}
+		PSB_DEBUG_PM("S0i1-Display-ENABLE : Reg DSPSRCTRL = %08x, "
+				"DSP_SS_PM = %08x\n", PSB_RVDC32(DSPSRCTRL_REG),
+				intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM));
+		power_island_put(OSPM_DISPLAY_A);
+	}
+	mutex_unlock(&maxfifo_info->maxfifo_mtx);
+	return true;
+}
+
+bool exit_maxfifo_mode(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct dc_maxfifo *maxfifo_info = dev_priv->dc_maxfifo_info;
+	u32 dspsrctrl_val = MAXFIFO_LOW_WATEMARK | MAXFIFO_HIGH_WATERMARK;
+
+	if (!maxfifo_info)
+		return false;
+
+	if (dev_priv->psb_dpst_state)
+		psb_irq_enable_dpst(dev);
+
+	mutex_lock(&maxfifo_info->maxfifo_mtx);
+	if (power_island_get(OSPM_DISPLAY_A)) {
+		unsigned long irqflags;
+		PSB_WVDC32(dspsrctrl_val, DSPSRCTRL_REG);
+
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+		exit_s0i1_display_mode(dev);
+		// ? maxfifo_info->s0i1_disp_state = S0i1_DISP_STATE_NOT_READY;
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+		PSB_DEBUG_PM("S0i1-Display-DISABLE : Reg DSPSRCTRL = %08x, "
+				"DSP_SS_PM = %08x\n", PSB_RVDC32(DSPSRCTRL_REG),
+				intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM));
+		power_island_put(OSPM_DISPLAY_A);
+	}
+	mutex_unlock(&maxfifo_info->maxfifo_mtx);
+	return true;
+}
+
+int dc_maxfifo_init(struct drm_device *dev)
+{
+	struct dc_maxfifo * maxfifo_info = NULL;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	if (dev_priv->dc_maxfifo_info)
+		return true;
+
+	dev_priv->dc_maxfifo_info =
+		kzalloc(sizeof(struct dc_maxfifo), GFP_KERNEL);
+
+	if (!dev_priv->dc_maxfifo_info) {
+		DRM_ERROR("No memory\n");
+		return -ENOMEM;
+	}
+	maxfifo_info = dev_priv->dc_maxfifo_info;
+
+	mutex_init(&maxfifo_info->maxfifo_mtx);
+
+	mutex_lock(&maxfifo_info->maxfifo_mtx);
+
+	maxfifo_info->dev_drm = dev;
+	maxfifo_info->regs_to_set = TNG_MAXFIFO_REGS_TO_SET_DEFAULT;
+	maxfifo_info->s0i1_disp_state = S0i1_DISP_STATE_NOT_READY;
+
+	INIT_WORK(&maxfifo_info->repeat_frame_interrupt_work,
+			_maxfifo_send_hwc_event_work);
+
+#ifndef ENABLE_HW_REPEAT_FRAME
+	maxfifo_info->repeat_frame_interrupt_on = false;
+	maxfifo_timer_init(dev);
+#endif
+	_maxfifo_create_sysfs_entries(dev);
+	/*Initialize the sysfs entries*/
+
+	mutex_unlock(&maxfifo_info->maxfifo_mtx);
+	return 0;
+
+
+}
+
+
+static ssize_t _show_sysfs_enable (struct device *kdev,
+					struct device_attribute *attr, char *buf)
+{
+	int enabled = 0;
+
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private * dev_priv = dev->dev_private;
+	struct dc_maxfifo * maxfifo_info = dev_priv->dc_maxfifo_info;
+
+	if (maxfifo_info){
+		mutex_lock(&maxfifo_info->maxfifo_mtx);
+		enabled = maxfifo_info->regs_to_set;
+		mutex_unlock(&maxfifo_info->maxfifo_mtx);
+	}
+	return sprintf(buf, "%d\n", enabled);
+
+}
+
+static ssize_t _store_sysfs_enable (struct device *kdev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int ret = -EINVAL;
+	int enable = 0;
+
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private * dev_priv = dev->dev_private;
+	struct dc_maxfifo * maxfifo_info = dev_priv->dc_maxfifo_info;
+
+	if (maxfifo_info) {
+		ret = sscanf(buf, "%d", &enable);
+		mutex_lock(&maxfifo_info->maxfifo_mtx);
+		maxfifo_info->regs_to_set = enable;
+		mutex_unlock(&maxfifo_info->maxfifo_mtx);
+		if (enable & 0x8)
+			enter_maxfifo_mode(dev);
+		if (enable & 0x10)
+			exit_maxfifo_mode(dev);
+		if (enable & 0x20)
+			maxfifo_report_repeat_frame_interrupt(dev);
+		if (enable & 0x40)
+			enable_repeat_frame_intr(dev);
+		if (enable & 0x80)
+			_maxfifo_send_hwc_uevent(dev);
+
+	}
+
+	return count;
+}
+
+static ssize_t _show_sysfs_state (struct device *kdev,
+					struct device_attribute *attr, char *buf)
+{
+	int ret = 0;
+
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private * dev_priv = dev->dev_private;
+	struct dc_maxfifo * maxfifo_info = dev_priv->dc_maxfifo_info;
+
+	if (maxfifo_info){
+		mutex_lock(&maxfifo_info->maxfifo_mtx);
+		ret = sprintf(buf, "S0i1-Display-Status : Reg DSPSRCTRL = %08x, "
+				"DSP_SS_PM = %08x\n", PSB_RVDC32(DSPSRCTRL_REG),
+				intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM));
+		PSB_DEBUG_PM("S0i1-Display-Status : Reg DSPSRCTRL = %08x, "
+				"DSP_SS_PM = %08x\n", PSB_RVDC32(DSPSRCTRL_REG),
+				intel_mid_msgbus_read32(PUNIT_PORT, DSP_SS_PM));
+		mutex_unlock(&maxfifo_info->maxfifo_mtx);
+	}
+	return ret;
+
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, _show_sysfs_enable, _store_sysfs_enable);
+static DEVICE_ATTR(state, S_IRUGO, _show_sysfs_state, NULL);
+
+static struct attribute *tng_maxfifo_attrs[] = {
+	&dev_attr_enable.attr,
+	&dev_attr_state.attr,
+	NULL
+};
+
+static struct attribute_group tng_maxfifo_attr_group = {
+	.name = MAXFIFO_TNG_SYSFS_GROUP_NAME,
+	.attrs = tng_maxfifo_attrs
+};
+
+
+static inline bool _maxfifo_create_sysfs_entries(struct drm_device * dev) {
+	int ret;
+
+	ret = sysfs_create_group(&dev->primary->kdev.kobj,
+				&tng_maxfifo_attr_group);
+	if (ret)
+		DRM_ERROR("Maxfifo sysfs setup failed\n");
+	return ret;
+}
+
+int dc_maxfifo_uninit(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	if (!dev_priv->dc_maxfifo_info)
+		return true;
+
+	mutex_destroy(&((struct dc_maxfifo *)
+		(dev_priv->dc_maxfifo_info))->maxfifo_mtx);
+
+	kfree(dev_priv->dc_maxfifo_info);
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_maxfifo.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_maxfifo.h
new file mode 100644
index 0000000..9f53668
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_maxfifo.h
@@ -0,0 +1,49 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Vinil Cheeramvelill <vinil.cheeramvelil@intel.com>
+ */
+
+#ifndef __DC_MAXFIFO_H__
+#define __DC_MAXFIFO_H__
+
+#include "psb_drv.h"
+
+
+int dc_maxfifo_init(struct drm_device *dev);
+void maxfifo_report_repeat_frame_interrupt(struct drm_device * dev);
+bool enter_maxfifo_mode(struct drm_device *dev);
+bool exit_maxfifo_mode(struct drm_device *dev);
+bool enter_s0i1_display_mode(struct drm_device *dev);
+bool exit_s0i1_display_mode(struct drm_device *dev);
+void enable_repeat_frame_intr(struct drm_device *dev);
+void disable_repeat_frame_intr(struct drm_device *dev);
+#ifndef ENABLE_HW_REPEAT_FRAME
+void maxfifo_timer_stop(struct drm_device *dev);
+void maxfifo_timer_start(struct drm_device *dev);
+#endif
+
+int dc_maxfifo_uninit(struct drm_device *dev);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_ospm.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_ospm.c
new file mode 100644
index 0000000..5f6839a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_ospm.c
@@ -0,0 +1,398 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#include "psb_drv.h"
+#include "dc_ospm.h"
+#include "pmu_tng.h"
+#include "tng_wa.h"
+#include "mrfld_clock.h"
+#include <asm/intel-mid.h>
+
+/***********************************************************
++ * Sideband implementation
++ ***********************************************************/
+#define	SB_PCKT		0x2100
+#define	SB_DATA		0x2104
+#define	SB_ADDR		0x2108
+#define	SB_STATUS	0x210C
+
+#define MIO_SB_ADDR	0x3b
+#define	MIO_ON		0x00
+#define	MIO_OFF		0x03
+
+extern struct drm_device *gpDrmDevice;
+
+void sb_write_packet(bool pwr_on)
+{
+	struct drm_device *dev = gpDrmDevice;
+	u32 ulData = MIO_ON;
+
+	if (!pwr_on)
+		ulData = MIO_OFF;
+
+	REG_WRITE(SB_ADDR, MIO_SB_ADDR);
+	REG_WRITE(SB_DATA, ulData);
+	REG_WRITE(SB_PCKT, 0x00070410);
+
+}
+
+
+/***********************************************************
+ * display A Island implementation
+ ***********************************************************/
+
+/**
+ * disp_a_power_up
+ *
+ * Power up island : return true if success
+ */
+static bool disp_a_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	enable_HFPLL(dev);
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_DISP_A, OSPM_ISLAND_UP, DSP_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(DSP_SS_PM, DPA_SSC, TNG_COMPOSITE_I0);
+#endif
+
+	/*
+	 * This workarounds are only needed for TNG A0/A1 silicon.
+	 * Any TNG SoC which is newer than A0/A1 won't need this.
+	 */
+	if (IS_TNG_A0(dev))
+	{
+		if (!ret)
+			apply_TNG_A0_workarounds(OSPM_DISPLAY_ISLAND, 0);
+	}
+
+	PSB_DEBUG_PM("Power on island %x, returned %d\n", p_island->island, ret);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	/* FIXME: Can we move dpst out of ospm code? */
+	psb_dpst_diet_restore(dev);
+#endif
+	return !ret;
+}
+
+/**
+ * disp_a_power_down
+ *
+ * Power down island : return true if success
+ */
+static bool disp_a_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	psb_dpst_diet_save(dev);
+#endif
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_DISP_A, OSPM_ISLAND_DOWN, DSP_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(DSP_SS_PM, DPA_SSC, TNG_COMPOSITE_D3);
+#endif
+
+	PSB_DEBUG_PM("Power off island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * ospm_disp_a_init
+ *
+ * initilize
+ */
+void ospm_disp_a_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	p_island->p_funcs->power_up = disp_a_power_up;
+	p_island->p_funcs->power_down = disp_a_power_down;
+	p_island->p_dependency = NULL;
+}
+
+/***********************************************************
+ * display B Island implementation
+ ***********************************************************/
+/**
+ * disp_b_power_up
+ *
+ * Power up island : return true if success
+ */
+static bool disp_b_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_DISP_B, OSPM_ISLAND_UP, DSP_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(DSP_SS_PM, DPB_SSC, TNG_COMPOSITE_I0);
+#endif
+	PSB_DEBUG_PM("Power on island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * disp_b_power_down
+ *
+ * Power down island : return true if success
+ */
+static bool disp_b_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_DISP_B, OSPM_ISLAND_DOWN, DSP_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(DSP_SS_PM, DPB_SSC, TNG_COMPOSITE_D3);
+#endif
+	PSB_DEBUG_PM("Power off island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * ospm_disp_b_init
+ *
+ * initilize
+ */
+void ospm_disp_b_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	p_island->p_funcs->power_up = disp_b_power_up;
+	p_island->p_funcs->power_down = disp_b_power_down;
+	p_island->p_dependency = get_island_ptr(OSPM_DISPLAY_A);
+}
+
+/***********************************************************
+ * display C Island implementation
+ ***********************************************************/
+/**
+ * disp_c_power_up
+ *
+ * Power up island : return true if success
+ */
+static bool disp_c_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_DISP_C, OSPM_ISLAND_UP, DSP_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(DSP_SS_PM, DPC_SSC, TNG_COMPOSITE_I0);
+#endif
+
+	PSB_DEBUG_PM("Power on island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * disp_c_power_down
+ *
+ * Power down island : return true if success
+ */
+static bool disp_c_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_DISP_C, OSPM_ISLAND_DOWN, DSP_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(DSP_SS_PM, DPC_SSC, TNG_COMPOSITE_D3);
+#endif
+
+	PSB_DEBUG_PM("Power off island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * ospm_disp_c_init
+ *
+ * initilize
+ */
+void ospm_disp_c_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	p_island->p_funcs->power_up = disp_c_power_up;
+	p_island->p_funcs->power_down = disp_c_power_down;
+	p_island->p_dependency = get_island_ptr(OSPM_DISPLAY_A);
+}
+
+/***********************************************************
+ * display MIO Island implementation
+ ***********************************************************/
+/**
+ * mio_power_up
+ *
+ * Power up island : return true if success
+ */
+static bool mio_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret = false;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (!enable_DSIPLL(dev)){
+		DRM_ERROR("Not Powering up MIO since DSI PLL could not be locked");
+		return ret;
+	}
+#endif
+
+	if (IS_TNG_A0(dev))
+	{
+		sb_write_packet(true);
+		udelay(50);
+		sb_write_packet(false);
+		udelay(50);
+		sb_write_packet(true);
+		udelay(50);
+		PSB_DEBUG_PM("%s:using sideband to powerup MIO\n", __func__);
+	} else {
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_MIO, OSPM_ISLAND_UP, MIO_SS_PM);
+	ret = pmu_nc_set_power_state(PMU_MIO, OSPM_ISLAND_DOWN, MIO_SS_PM);
+	ret = pmu_nc_set_power_state(PMU_MIO, OSPM_ISLAND_UP, MIO_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(MIO_SS_PM, MIO_SSC, TNG_COMPOSITE_I0);
+	ret = pmu_set_power_state_tng(MIO_SS_PM, MIO_SSC, TNG_COMPOSITE_D3);
+	ret = pmu_set_power_state_tng(MIO_SS_PM, MIO_SSC, TNG_COMPOSITE_I0);
+#endif
+	}
+
+	PSB_DEBUG_PM("Power on island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * mio_power_down
+ *
+ * Power down island : return true if success
+ */
+static bool mio_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+#ifdef CONFIG_SUPPORT_MIPI
+	if (!disable_DSIPLL(dev)){
+		DRM_ERROR("Skipping MIO power down ad DSI PLL could not be unlocked\n");
+		return false;
+	}
+#endif
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_MIO, OSPM_ISLAND_DOWN, MIO_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(MIO_SS_PM, MIO_SSC, TNG_COMPOSITE_D3);
+#endif
+
+	PSB_DEBUG_PM("Power off island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * ospm_mio_init
+ *
+ * initilize
+ */
+void ospm_mio_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	p_island->p_funcs->power_up = mio_power_up;
+	p_island->p_funcs->power_down = mio_power_down;
+	p_island->p_dependency = get_island_ptr(OSPM_DISPLAY_A);
+}
+
+/***********************************************************
+ * display HDMI Island implementation
+ ***********************************************************/
+/**
+ * hdmi_power_up
+ *
+ * Power up island : return true if success
+ */
+static bool hdmi_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_HDMI, OSPM_ISLAND_UP, HDMIO_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(HDMIO_SS_PM, HDMIO_SSC, TNG_COMPOSITE_I0);
+#endif
+
+	PSB_DEBUG_PM("Power on island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * hdmi_power_down
+ *
+ * Power down island : return true if success
+ */
+static bool hdmi_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	ret = pmu_nc_set_power_state(PMU_HDMI, OSPM_ISLAND_DOWN, HDMIO_SS_PM);
+#else
+	ret = pmu_set_power_state_tng(HDMIO_SS_PM, HDMIO_SSC, TNG_COMPOSITE_D3);
+#endif
+	PSB_DEBUG_PM("Power off island %x, returned %d\n", p_island->island, ret);
+
+	return !ret;
+}
+
+/**
+ * ospm_hdmi_init
+ *
+ * initilize
+ */
+void ospm_hdmi_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	p_island->p_funcs->power_up = hdmi_power_up;
+	p_island->p_funcs->power_down = hdmi_power_down;
+	p_island->p_dependency = get_island_ptr(OSPM_DISPLAY_A);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_ospm.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_ospm.h
new file mode 100644
index 0000000..76a4cd2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/dc_ospm.h
@@ -0,0 +1,56 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _TNG_DISPLAY_OSPM_H_
+#define _TNG_DISPLAY_OSPM_H_
+
+#include "pwr_mgmt.h"
+
+
+#define PMU_DISP_A		0x1
+#define PMU_DISP_B		0x2
+#define PMU_DISP_C		0x4
+#define PMU_MIO			0x1
+#define PMU_HDMI		0x1
+
+void ospm_disp_a_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+
+void ospm_disp_b_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+
+void ospm_disp_c_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+
+void ospm_mio_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+
+void ospm_hdmi_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+
+
+#endif	/* _TNG_DISPLAY_OSPM_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend.c
new file mode 100755
index 0000000..fa2e331
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend.c
@@ -0,0 +1,154 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+#include <linux/earlysuspend.h>
+#include <linux/mutex.h>
+#include "psb_drv.h"
+#include "early_suspend.h"
+#include "android_hdmi.h"
+#include "gfx_rtpm.h"
+
+static struct drm_device *g_dev;
+
+static void gfx_early_suspend(struct early_suspend *h)
+{
+	struct drm_psb_private *dev_priv = g_dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	flush_workqueue(dev_priv->power_wq);
+
+	/* protect early_suspend with dpms and mode config */
+	mutex_lock(&dev->mode_config.mutex);
+
+	list_for_each_entry(encoder,
+			&dev->mode_config.encoder_list,
+			head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->save)
+			enc_funcs->save(encoder);
+
+		if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
+			DCLockMutex();
+
+			DC_MRFLD_onPowerOff(1);
+			/* give time to the last flip to take effective, if we
+			 * disable hardware too quickly, overlay hardware may
+			 * crash, causing pipe hang next time when we try to
+			 * use overlay
+			 */
+			msleep(50);
+
+			drm_handle_vblank(dev, 1);
+			/* Turn off vsync interrupt. */
+			drm_vblank_off(dev, 1);
+
+			/* Make the pending flip request as completed. */
+			DCUnAttachPipe(1);
+			DCUnLockMutex();
+		}
+	}
+
+	/* Suspend hdmi
+	 * Note: hotplug detection is disabled if audio is not playing
+	 */
+	android_hdmi_suspend_display(dev);
+
+	ospm_power_suspend();
+	dev_priv->early_suspended = true;
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void gfx_late_resume(struct early_suspend *h)
+{
+	struct drm_psb_private *dev_priv = g_dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	/* protect early_suspend with dpms and mode config */
+	mutex_lock(&dev->mode_config.mutex);
+
+	dev_priv->early_suspended = false;
+	ospm_power_resume();
+
+	list_for_each_entry(encoder,
+			&dev->mode_config.encoder_list,
+			head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->restore)
+			enc_funcs->restore(encoder);
+	}
+
+	/* Resume HDMI */
+	android_hdmi_resume_display(dev);
+
+	/*
+	 * Devices connect status will be changed
+	 * when system suspend,re-detect once here.
+	 */
+	if (android_hdmi_is_connected(dev)) {
+		DCLockMutex();
+		DCAttachPipe(1);
+		DC_MRFLD_onPowerOn(1);
+		mid_hdmi_audio_resume(dev);
+		DCUnLockMutex();
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static struct early_suspend intel_media_early_suspend = {
+	.level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+	.suspend = gfx_early_suspend,
+	.resume = gfx_late_resume,
+};
+
+void intel_media_early_suspend_init(struct drm_device *dev)
+{
+	g_dev = dev;
+	register_early_suspend(&intel_media_early_suspend);
+}
+
+void intel_media_early_suspend_uninit(void)
+{
+	unregister_early_suspend(&intel_media_early_suspend);
+}
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend.h
new file mode 100644
index 0000000..26e6814
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _INTEL_MEDIA_EARLY_SUSPEND_H_
+#define _INTEL_MEDIA_EARLY_SUSPEND_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void intel_media_early_suspend_init(struct drm_device *dev);
+void intel_media_early_suspend_uninit(void);
+#endif
+
+#endif /* _INTEL_MEDIA_EARLY_SUSPEND_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend_sysfs.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend_sysfs.c
new file mode 100755
index 0000000..fe2b6c3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend_sysfs.c
@@ -0,0 +1,159 @@
+/**************************************************************************
+ * Copyright (c) 2014, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ *    Sathya Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ */
+
+#include <linux/mutex.h>
+#include <linux/early_suspend_sysfs.h>
+#include "psb_drv.h"
+#include "early_suspend.h"
+#include "android_hdmi.h"
+#include "gfx_rtpm.h"
+
+static struct drm_device *g_dev;
+
+static void gfx_early_suspend(void)
+{
+	struct drm_psb_private *dev_priv = g_dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	/* protect early_suspend with dpms and mode config */
+	mutex_lock(&dev->mode_config.mutex);
+
+	list_for_each_entry(encoder,
+			&dev->mode_config.encoder_list,
+			head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->save)
+			enc_funcs->save(encoder);
+
+		if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
+			DCLockMutex();
+
+			DC_MRFLD_onPowerOff(1);
+			/* give time to the last flip to take effective, if we
+			 * disable hardware too quickly, overlay hardware may
+			 * crash, causing pipe hang next time when we try to
+			 * use overlay
+			 */
+			msleep(50);
+
+			drm_handle_vblank(dev, 1);
+
+			/* Turn off vsync interrupt. */
+			drm_vblank_off(dev, 1);
+
+			/* Make the pending flip request as completed. */
+			DCUnAttachPipe(1);
+			DCUnLockMutex();
+		}
+	}
+
+	/* Suspend hdmi
+	 * Note: hotplug detection is disabled if audio is not playing
+	 */
+	android_hdmi_suspend_display(dev);
+
+	ospm_power_suspend();
+	dev_priv->early_suspended = true;
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void gfx_late_resume(void)
+{
+	struct drm_psb_private *dev_priv = g_dev->dev_private;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	/* protect early_suspend with dpms and mode config */
+	mutex_lock(&dev->mode_config.mutex);
+
+	dev_priv->early_suspended = false;
+	ospm_power_resume();
+
+	list_for_each_entry(encoder,
+			&dev->mode_config.encoder_list,
+			head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->restore)
+			enc_funcs->restore(encoder);
+	}
+
+	/* Resume HDMI */
+	android_hdmi_resume_display(dev);
+
+	/*
+	 * Devices connect status will be changed
+	 * when system suspend,re-detect once here.
+	 */
+	if (android_hdmi_is_connected(dev)) {
+		DCLockMutex();
+		DCAttachPipe(1);
+		DC_MRFLD_onPowerOn(1);
+		mid_hdmi_audio_resume(dev);
+		DCUnLockMutex();
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static ssize_t early_suspend_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	if (!strncmp(buf, EARLY_SUSPEND_ON, EARLY_SUSPEND_STATUS_LEN))
+		gfx_early_suspend();
+	else if (!strncmp(buf, EARLY_SUSPEND_OFF, EARLY_SUSPEND_STATUS_LEN))
+		gfx_late_resume();
+
+	return count;
+}
+
+static DEVICE_EARLY_SUSPEND_ATTR(early_suspend_store);
+
+void intel_media_early_suspend_sysfs_init(struct drm_device *dev)
+{
+	g_dev = dev;
+	device_create_file(&dev->pdev->dev, &dev_attr_early_suspend);
+	register_early_suspend_device(&dev->pdev->dev);
+}
+
+void intel_media_early_suspend_sysfs_uninit(struct drm_device *dev)
+{
+	device_remove_file(&dev->pdev->dev, &dev_attr_early_suspend);
+	unregister_early_suspend_device(&dev->pdev->dev);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend_sysfs.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend_sysfs.h
new file mode 100644
index 0000000..c88c01c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/early_suspend_sysfs.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ * Copyright (c) 2014, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ *    Sathya Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ */
+
+#ifndef _INTEL_MEDIA_EARLY_SUSPEND_SYSFS_H_
+#define _INTEL_MEDIA_EARLY_SUSPEND_SYSFS_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+void intel_media_early_suspend_sysfs_init(struct drm_device *dev);
+void intel_media_early_suspend_sysfs_uninit(struct drm_device *dev);
+
+#endif /* _INTEL_MEDIA_EARLY_SUSPEND_SYSFS_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_freq.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_freq.h
new file mode 100644
index 0000000..4ec0d5a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_freq.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _TNG_GRAPHICS_OSPM_H_
+#define _TNG_GRAPHICS_OSPM_H_
+
+int gpu_freq_mhz_to_code(int freq_mhz, int *p_freq_out);
+
+int gpu_freq_set_from_code(int freq_code);
+
+void gpu_freq_set_suspend_func(int (*suspend_func)(void));
+
+void gpu_freq_set_resume_func(int (*resume_func)(void));
+
+#endif	/* _TNG_GRAPHICS_OSPM_H_*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm.c
new file mode 100644
index 0000000..d66600f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm.c
@@ -0,0 +1,713 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <asm/intel-mid.h>
+
+#include "psb_drv.h"
+#include "gfx_ospm.h"
+#include "gfx_freq.h"
+#include "pmu_tng.h"
+#include "tng_wa.h"
+
+
+
+
+#define	USE_GFX_PM_FUNC			0
+
+/* WRAPPER Offset 0x160024 */
+#define GFX_STATUS_OFFSET		0x24
+
+#define GFX_POWER_UP(x) \
+	pmu_nc_set_power_state(x, OSPM_ISLAND_UP, GFX_SS_PM0)
+
+#define GFX_POWER_DOWN(x) \
+	pmu_nc_set_power_state(x, OSPM_ISLAND_DOWN, GFX_SS_PM0)
+
+extern IMG_BOOL gbSystemActivePMEnabled;
+extern IMG_BOOL gbSystemActivePMInit;
+
+enum GFX_ISLAND_STATUS {
+	POWER_ON = 0,		/* No gating (clk or power) */
+	CLOCK_GATED,		/* Clock Gating */
+	SOFT_RESET,		/* Soft Reset */
+	POWER_OFF,		/* Powered off or Power gated.*/
+};
+
+int is_tng_a0 = 0;
+EXPORT_SYMBOL(is_tng_a0);
+
+/**
+  * gpu_freq_code_to_mhz() - Given frequency as a code (as defined for *_PM1
+  * register), return frequency in mhz.
+  * @freq_code_in - Input: A frequency code as specified for *_PM1 registers.
+  * Function return value: corresponding frequency in MHz or < 0 if error.
+  */
+static int gpu_freq_code_to_mhz(int freq_code_in)
+{
+	int freq_mhz_out;
+
+	switch (freq_code_in) {
+	case IP_FREQ_100_00:
+		freq_mhz_out = 100;
+		break;
+	case IP_FREQ_106_67:
+		freq_mhz_out = 106;
+		break;
+	case IP_FREQ_133_30:
+		freq_mhz_out = 133;
+		break;
+	case IP_FREQ_160_00:
+		freq_mhz_out = 160;
+		break;
+	case IP_FREQ_177_78:
+		freq_mhz_out = 177;
+		break;
+	case IP_FREQ_200_00:
+		freq_mhz_out = 200;
+		break;
+	case IP_FREQ_213_33:
+		freq_mhz_out = 213;
+		break;
+	case IP_FREQ_266_67:
+		freq_mhz_out = 266;
+		break;
+	case IP_FREQ_320_00:
+		freq_mhz_out = 320;
+		break;
+	case IP_FREQ_355_56:
+		freq_mhz_out = 355;
+		break;
+	case IP_FREQ_400_00:
+		freq_mhz_out = 400;
+		break;
+	case IP_FREQ_457_14:
+		freq_mhz_out = 457;
+		break;
+	case IP_FREQ_533_33:
+		freq_mhz_out = 533;
+		break;
+	case IP_FREQ_640_00:
+		freq_mhz_out = 640;
+		break;
+	case IP_FREQ_800_00:
+		freq_mhz_out = 800;
+		break;
+	default:
+		printk(KERN_ALERT "%s: Invalid freq code: %#x\n", __func__,
+			freq_code_in);
+		return -EINVAL;
+	}
+
+	return freq_mhz_out;
+}
+
+/**
+ * mrfl_pwr_cmd_gfx - Change graphics power state.
+ * Change island power state in the require sequence.
+ *
+ * @gfx_mask: Mask of islands to be changed.
+ * @new_state: 0 for power-off, 1 for power-on.
+ */
+#ifdef USE_GFX_INTERNAL_PM_FUNC
+static int mrfl_pwr_cmd_gfx(u32 gfx_mask, int new_state)
+{
+	/*
+	 * pwrtab - gfx pwr sub-islands in required power-up order and
+	 * in reverse of required power-down order.
+	 */
+	static const u32 pwrtab[] = {
+		GFX_SLC_LDO_SHIFT,
+		GFX_SLC_SHIFT,
+		GFX_SDKCK_SHIFT,
+		GFX_RSCD_SHIFT,
+	};
+	const int pwrtablen = ARRAY_SIZE(pwrtab);
+	int i;
+	int j;
+	int ret;
+	u32 ns_mask;
+	u32 done_mask;
+	u32 this_mask;
+	u32 pwr_state_prev;
+
+	pwr_state_prev = intel_mid_msgbus_read32(PUNIT_PORT, GFX_SS_PM0);
+
+	if (new_state == OSPM_ISLAND_UP)
+		ns_mask = TNG_COMPOSITE_I0;
+	else
+		ns_mask = TNG_COMPOSITE_D3;
+
+	/*  Call underlying function separately for each step in the
+	    power sequence. */
+	done_mask = 0;
+	for (i = 0; i < pwrtablen ; i++) {
+		if (new_state == OSPM_ISLAND_UP)
+			j = i;
+		else
+			j = pwrtablen - i - 1;
+
+		done_mask |= TNG_SSC_MASK << pwrtab[j];
+		this_mask = gfx_mask & done_mask;
+		if (this_mask) {
+		/*  FIXME - if (new_state == 0), check for required
+			    conditions per the SAS. */
+			ret = pmu_set_power_state_tng(GFX_SS_PM0,
+					this_mask, ns_mask);
+			if (ret)
+			return ret;
+		}
+
+		/**
+		 * If turning some power on, and the power to be on includes SLC,
+		 * and SLC was not previously on, then setup some registers.
+		 */
+		if ((new_state == OSPM_ISLAND_UP)
+			&& (pwrtab[j] == GFX_SLC_SHIFT)
+			&& ((pwr_state_prev >> GFX_SLC_SHIFT) != TNG_SSC_I0))
+		{
+			/* TNG A0 workarounds */
+			if (IS_TNG_A0(dev))
+				apply_TNG_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+
+			/* ANN A0 workarounds */
+			if (IS_ANN(dev))
+				apply_ANN_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+		}
+
+		if ((gfx_mask & ~done_mask) == 0)
+			break;
+	}
+
+	return 0;
+}
+#endif
+
+/**
+ * pm_cmd_freq_wait() - Wait for frequency valid via specified register.
+ * Optionally, return realized frequency to caller.
+ * @reg_freq: The frequency control register.  One of *_PM1.
+ * @freq_code_rlzd - If non-NULL, pointer to receive the realized Tangier
+ * frequency code.
+ */
+static int pm_cmd_freq_wait(u32 reg_freq, u32 *freq_code_rlzd)
+{
+	int tcount;
+	u32 freq_val;
+
+	for (tcount = 0; ; tcount++) {
+		freq_val = intel_mid_msgbus_read32(PUNIT_PORT, reg_freq);
+		if ((freq_val & IP_FREQ_VALID) == 0)
+			break;
+		if (tcount > 500) {
+			WARN(1, "%s: P-Unit freq request wait timeout",
+				__func__);
+			return -EBUSY;
+		}
+		udelay(1);
+	}
+
+	if (freq_code_rlzd) {
+		*freq_code_rlzd = ((freq_val >> IP_FREQ_STAT_POS) &
+			IP_FREQ_MASK);
+	}
+
+	return 0;
+}
+
+
+/**
+ * pm_cmd_freq_set() - Set operating frequency via specified register.
+ * Optionally, return realized frequency to caller.
+ * @reg_freq: The frequency control register.  One of *_PM1.
+ * @freq_code: Tangier frequency code.
+ * @p_freq_code_rlzd - If non-NULL, pointer to receive the realized Tangier
+ * frequency code.
+ */
+static int pm_cmd_freq_set(u32 reg_freq, u32 freq_code, u32 *p_freq_code_rlzd)
+{
+	u32 freq_val;
+	u32 freq_code_realized;
+	int rva;
+
+	rva = pm_cmd_freq_wait(reg_freq, NULL);
+	if (rva < 0) {
+		printk(KERN_ALERT "%s: pm_cmd_freq_wait 1 failed\n", __func__);
+		return rva;
+	}
+
+	freq_val = IP_FREQ_VALID | freq_code;
+	intel_mid_msgbus_write32(PUNIT_PORT, reg_freq, freq_val);
+
+	rva = pm_cmd_freq_wait(reg_freq, &freq_code_realized);
+	if (rva < 0) {
+		printk(KERN_ALERT "%s: pm_cmd_freq_wait 2 failed\n", __func__);
+		return rva;
+	}
+
+	if (p_freq_code_rlzd)
+		*p_freq_code_rlzd = freq_code_realized;
+
+	return rva;
+}
+
+
+/**
+ * pm_cmd_freq_from_code() - Set operating frequency via specified register.
+ * Optionally, return realized frequency to caller.
+ * @reg_freq: The frequency control register.  One of *_PM1.
+ * @freq_code: Tangier frequency code.
+ * @function return value: - <0 if error, or frequency in MHz.
+ */
+int gpu_freq_set_from_code(int freq_code)
+{
+	u32 freq_realized_code;
+	int rva;
+
+	rva = pm_cmd_freq_set(GFX_SS_PM1, freq_code, &freq_realized_code);
+	if (rva < 0)
+		return rva;
+
+	return gpu_freq_code_to_mhz(freq_realized_code);
+}
+EXPORT_SYMBOL(gpu_freq_set_from_code);
+
+
+/**
+  * gpu_freq_mhz_to_code() - Given frequency in MHz, return frequency code
+  * used for frequency control.
+  * Always pick the code less than equal to the integer MHz value.
+  * @freq_mhz_in - Input: A MHz frequency specification.
+  * @*p_freq_out - Out: The quantized MHz frequency specification.
+  * Function return value: frequency code as in register definition.
+  */
+int gpu_freq_mhz_to_code(int freq_mhz_in, int *p_freq_out)
+{
+	int freq_code;
+	int freq_out;
+
+	if (freq_mhz_in >= 800) {
+		freq_code = IP_FREQ_800_00;	/* 800.00 */
+		freq_out = 800;
+	} else if (freq_mhz_in >= 640) {
+		freq_code = IP_FREQ_640_00;	/* 640.00 */
+		freq_out = 640;
+	} else if (freq_mhz_in >= 533) {
+		freq_code = IP_FREQ_533_33;	/* 533.33 */
+		freq_out = 533;
+	} else if (freq_mhz_in >= 457) {
+		freq_code = IP_FREQ_457_14;	/* 457.14 */
+		freq_out = 457;
+	} else if (freq_mhz_in >= 400) {
+		freq_code = IP_FREQ_400_00;	/* 400.00 */
+		freq_out = 400;
+	} else if (freq_mhz_in >= 355) {
+		freq_code = IP_FREQ_355_56;	/* 355.56 */
+		freq_out = 355;
+	} else if (freq_mhz_in >= 320) {
+		freq_code = IP_FREQ_320_00;	/* 320.00 */
+		freq_out = 320;
+	} else if (freq_mhz_in >= 266) {
+		freq_code = IP_FREQ_266_67;	/* 266.67 */
+		freq_out = 266;
+	} else if (freq_mhz_in >= 213) {
+		freq_code = IP_FREQ_213_33;	/* 213.33 */
+		freq_out = 213;
+	} else if (freq_mhz_in >= 200) {
+		freq_code = IP_FREQ_200_00;	/* 200.00 */
+		freq_out = 200;
+	} else if (freq_mhz_in >= 177) {
+		freq_code = IP_FREQ_177_78;	/* 177.78 */
+		freq_out = 177;
+	} else if (freq_mhz_in >= 160) {
+		freq_code = IP_FREQ_160_00;	/* 160.00 */
+		freq_out = 160;
+	} else if (freq_mhz_in >= 133) {
+		freq_code = IP_FREQ_133_30;	/* 133.30 */
+		freq_out = 133;
+	} else if (freq_mhz_in >= 106) {
+		freq_code = IP_FREQ_106_67;	/* 106.67 */
+		freq_out = 106;
+	} else {
+		freq_code = IP_FREQ_100_00;	/* 100.00 */
+		freq_out = 100;
+	}
+
+	*p_freq_out = freq_out;
+
+	return freq_code;
+}
+EXPORT_SYMBOL(gpu_freq_mhz_to_code);
+
+/***********************************************************
+ * All Graphics Island
+ ***********************************************************/
+
+/**
+ * ospm_gfx_power_up
+ *
+ * Power up graphics islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_gfx_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("Pre-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	PSB_DEBUG_PM("Power down LDO, then RSCD");
+	ret = GFX_POWER_UP(PMU_LDO);
+
+	ret = GFX_POWER_UP(PMU_RSCD);
+
+	PSB_DEBUG_PM("Post-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* If APM is enabled, then we need to make sure that the IRQs
+	 * are installed. It is possible the the GUnit has been turned
+	 * off and the IER and IMR registers have lost their state.
+	 * So we need to enable interrupts after powering on.
+	 * If the IRQs are not turned on, the interrupt sent from RGX
+	 * to indicate that it is done with processing is lost. RGX
+	 * island would then remain ON.
+	 */
+	psb_irq_preinstall_islands(dev,OSPM_GRAPHICS_ISLAND);
+	psb_irq_postinstall_islands(dev,OSPM_GRAPHICS_ISLAND);
+
+	return !ret;
+}
+
+/**
+ * ospm_gfx_power_down
+ *
+ * Power down Graphics islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_gfx_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+	uint32_t reg, data;
+
+	PSB_DEBUG_PM("OSPM: ospm_gfx_power_down \n");
+
+	PSB_DEBUG_PM("Pre-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* If APM is enabled, we can turn off the RGX interrupts. This is
+	 * kind of a no-op but still better coding to turn of IRQs for
+	 * devices/ components that are turned off
+	 */
+	psb_irq_uninstall_islands(dev,OSPM_GRAPHICS_ISLAND);
+	synchronize_irq(dev->pdev->irq);
+
+	PSB_DEBUG_PM("Flush SLC, then power down SLC LDO");
+	/* write 1 to RGX_CR_SLC_CTRL_FLUSH_INVAL */
+	reg = 0x103818 - RGX_OFFSET;
+	data = 1;
+	RGX_REG_WRITE(reg, data);
+
+	ret = GFX_POWER_DOWN(PMU_LDO);
+
+	/* power down every thing */
+	ret = GFX_POWER_DOWN(PMU_RSCD);
+
+	PSB_DEBUG_PM("Post-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+static void ospm_check_registers(struct drm_device *dev)
+{
+	uint32_t reg, data;
+
+	PSB_DEBUG_PM("start\n", data);
+
+	reg = 0x103800 - RGX_OFFSET;
+	data = RGX_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x SLC_CTRL_MISC(0x103800)\n", data);
+
+	reg = 0x103808 - RGX_OFFSET;
+	data = RGX_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x SLC_CTRL_INVAL(0x103808)\n", data);
+
+	reg = 0x103818 - RGX_OFFSET;
+	data = RGX_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x SLC_CTRL_FLUSH_INVAL(0x103818)\n", data);
+
+	reg = 0x103820 - RGX_OFFSET;
+	data = RGX_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x SLC_STATUS0(0x103820)\n", data);
+
+	reg = 0x103828 - RGX_OFFSET;
+	data = RGX_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x SLC_CTRL_BYPASS(0x103828)\n", data);
+
+	reg = 0x160008 - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x GFX_CONTROL(0x160008)\n", data);
+	reg = 0x16000c - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x GFX_THROT(0x16000c)\n", data);
+	reg = 0x160020 - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x GCILP_CONTROL(0x160020)\n", data);
+	reg = 0x160028 - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	PSB_DEBUG_PM("0x%08x GCILP_ARB_CONTROL(0x160028)\n", data);
+
+	return ;
+}
+
+
+static void ospm_pnp_settings(struct drm_device *dev)
+{
+	uint32_t reg, data, count = 0;
+
+	/*set BYP_CC to 1 on  SLC_CTRL_BYPASS */
+	reg = 0x103828 - RGX_OFFSET;
+	data = RGX_REG_READ(reg);
+	data |= 1 << 20;
+	RGX_REG_WRITE(reg, data);
+
+	reg = 0x160008 - GFX_WRAPPER_OFFSET;
+	data = 0x0;
+	WRAPPER_REG_WRITE(reg, data);
+
+	/* soc.gfx_wrapper.gclip_control.aes_bypass_disable = 1*/
+	reg = 0x160020 - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	data |= 0x080;
+	WRAPPER_REG_WRITE(reg, data);
+
+	/* set [20:16] to 0x12 */
+	if (IS_ANN(dev))
+		data |= 0x019<<16;
+	else
+		data |= 0x012<<16;
+	/* set CONCURRENCY_PERF_MODE to 0x01 */
+	data |= 0x01<<8;
+	/* set PFI_CREDIT_INIT to 1 */
+	data |= 0x01 << 23;
+	WRAPPER_REG_WRITE(reg, data);
+
+	count = 0;
+	do {
+		usleep_range(450, 550);
+		data = RGX_REG_READ(reg);
+	} while ((data & (0x01<<23)) && (count++ < 10000));
+
+	if (unlikely(count > 10000))
+		PSB_DEBUG_PM("PFI_CREDIT_INIT: flush and invalide timeout\n" );
+
+	reg = 0x160028 - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	/*
+	GCILP_ARB_CONTROL[3:0] = SLCRD_WEIGHT = 3
+	GCILP_ARB_CONTROL[7:4] = SLCWR_WEIGHT = 3
+	GCILP_ARB_CONTROL[11:8] = VED_WEIGHT = 3
+	GCILP_ARB_CONTROL[15:12] = VEC_WEIGHT = 3
+	GCILP_ARB_CONTROL[19:16] = VSP_WEIGHT = 3
+	GCILP_ARB_CONTROL[23:20] = FIRST_ARB_WEIGHT = 3
+	GCILP_ARB_CONTROL[31] = ARB_MODE = 0
+	*/
+	data |= 0x333333;
+	WRAPPER_REG_WRITE(reg, data);
+
+	return ;
+}
+/**
+ * ospm_slc_power_up
+ *
+ * Power up slc islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_slc_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+	uint32_t reg, data, count;
+
+	PSB_DEBUG_PM("%s: Pre-power-off Status = 0x%08x\n",
+		__func__,
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* bind LDO power up with GFX */
+	/*
+	 * ret = GFX_POWER_UP(PMU_LDO);
+	 */
+	ret = GFX_POWER_UP(PMU_SLC);
+
+	/*
+	 * This workarounds are only needed for TNG A0/A1 silicon.
+	 * Any TNG SoC which is newer than A0/A1 won't need this.
+	 */
+	if (!ret && IS_TNG_A0(dev))
+	{
+		/**
+		* If turning some power on, and the power to be on includes SLC,
+		* and SLC was not previously on, then setup some registers.
+		*/
+		apply_TNG_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+	}
+	if (!ret && IS_ANN(dev))
+		apply_ANN_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+
+	if (!ret)
+		ret = GFX_POWER_UP(PMU_SDKCK);
+
+	PSB_DEBUG_PM("Post-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* SLC flush and invalidate */
+	if (!ret)
+	{
+		reg = 0x100100 - RGX_OFFSET;
+		data = RGX_REG_READ(reg);
+		RGX_REG_WRITE(reg, data | (1 << 27));
+		RGX_REG_WRITE(reg, data);
+
+		/* write 1 to RGX_CR_SLC_CTRL_FLUSH_INVAL */
+		reg = 0x103818 - RGX_OFFSET;
+		data = 1;
+		RGX_REG_WRITE(reg, data);
+
+		count = 0;
+		/* Poll RGX_CR_SLC_STATUS0 */
+		reg = 0x103820 - RGX_OFFSET;
+		do {
+			udelay(500);
+			data = RGX_REG_READ(reg);
+		} while ((data & 0x2) && (count++ < 10000));
+
+		if (unlikely(count >= 10000))
+			PSB_DEBUG_PM("SLC: flush and invalide timeout\n" );
+	}
+
+	if (!ret) {
+		/* soc.gfx_wrapper.gbypassenable_sw = 1 */
+		reg = 0x160854 - GFX_WRAPPER_OFFSET;
+		data = WRAPPER_REG_READ(reg);
+		if (IS_TNG_B0(dev))
+			data |= 0x101; /*Disable Bypass SLC for VED on Merfld PR2 B0*/
+		else
+			data |= 0x100; /*Bypass SLC for VEC*/
+		WRAPPER_REG_WRITE(reg, data);
+	}
+
+	if (!ret && IS_TNG_B0(dev)) {
+		/* soc.gfx_wrapper.gclip_control.aes_bypass_disable = 1*/
+		reg = 0x160020 - GFX_WRAPPER_OFFSET;
+		data = WRAPPER_REG_READ(reg);
+
+		data |= 0x80;
+		WRAPPER_REG_WRITE(reg, data);
+	}
+
+	/* SLC hash set */
+	if (!ret)
+	{
+		reg = 0x103800 - RGX_OFFSET;
+		data = 0x200001;
+		RGX_REG_WRITE(reg, data);
+	}
+
+	ospm_pnp_settings(dev);
+
+	return !ret;
+}
+
+/**
+ * ospm_slc_power_down
+ *
+ * Power down SLC islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_slc_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("%s: Pre-power-off Status = 0x%08x\n",
+		__func__,
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* power down SLC islands */
+	ret = GFX_POWER_DOWN(PMU_SDKCK);
+
+	if (!ret)
+		ret = GFX_POWER_DOWN(PMU_SLC);
+	/* bind LDO power down with GFX */
+	/*
+	 *if (!ret)
+	 *	ret = GFX_POWER_DOWN(PMU_LDO);
+	 */
+	PSB_DEBUG_PM("Post-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+/**
+ * ospm_gfx_init
+ *
+ * Graphics power island init
+ */
+void ospm_gfx_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	if (IS_TNG_A0(dev))
+		is_tng_a0 = 1;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+	p_island->p_funcs->power_up = ospm_gfx_power_up;
+	p_island->p_funcs->power_down = ospm_gfx_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SLC);
+}
+
+/**
+ * ospm_slc_init
+ *
+ * SLC power island init
+ */
+void ospm_slc_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	if (IS_TNG_A0(dev))
+		is_tng_a0 = 1;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+	p_island->p_funcs->power_up = ospm_slc_power_up;
+	p_island->p_funcs->power_down = ospm_slc_power_down;
+	p_island->p_dependency = NULL;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm.h
new file mode 100644
index 0000000..26ca877
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm.h
@@ -0,0 +1,43 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _TNG_GRAPHICS_OSPM_H_
+#define _TNG_GRAPHICS_OSPM_H_
+
+#include "pwr_mgmt.h"
+
+#define PMU_SLC			0x1
+#define PMU_SDKCK		0x2
+#define PMU_RSCD		0x4
+#define PMU_LDO			0x8
+
+void ospm_gfx_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+void ospm_slc_init(struct drm_device *dev,
+			struct ospm_power_island *p_island);
+
+#endif	/* _TNG_GRAPHICS_OSPM_H_*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm_ann.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm_ann.c
new file mode 100644
index 0000000..c1f6a32
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm_ann.c
@@ -0,0 +1,765 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <asm/intel-mid.h>
+
+#include "psb_drv.h"
+#include "gfx_ospm_ann.h"
+#include "gfx_freq.h"
+#include "pmu_tng.h"
+#include "tng_wa.h"
+
+
+
+
+#define	USE_GFX_PM_FUNC			0
+
+/* WRAPPER Offset 0x160024 */
+#define GFX_STATUS_OFFSET		0x24
+
+#define GFX_POWER_UP(x) \
+	pmu_nc_set_power_state(x, OSPM_ISLAND_UP, GFX_SS_PM0)
+
+#define GFX_POWER_DOWN(x) \
+	pmu_nc_set_power_state(x, OSPM_ISLAND_DOWN, GFX_SS_PM0)
+
+extern IMG_BOOL gbSystemActivePMEnabled;
+extern IMG_BOOL gbSystemActivePMInit;
+
+enum GFX_ISLAND_STATUS {
+	POWER_ON = 0,		/* No gating (clk or power) */
+	CLOCK_GATED,		/* Clock Gating */
+	SOFT_RESET,		/* Soft Reset */
+	POWER_OFF,		/* Powered off or Power gated.*/
+};
+
+static int (*pSuspend_func)(void) = NULL;
+static int (*pResume_func)(void) = NULL;
+
+int is_tng_a0 = 0;
+EXPORT_SYMBOL(is_tng_a0);
+
+/**
+  * gpu_freq_code_to_mhz() - Given frequency as a code (as defined for *_PM1
+  * register), return frequency in mhz.
+  * @freq_code_in - Input: A frequency code as specified for *_PM1 registers.
+  * Function return value: corresponding frequency in MHz or < 0 if error.
+  */
+static int gpu_freq_code_to_mhz(int freq_code_in)
+{
+	int freq_mhz_out;
+
+	switch (freq_code_in) {
+	case IP_FREQ_100_00:
+		freq_mhz_out = 100;
+		break;
+	case IP_FREQ_106_67:
+		freq_mhz_out = 106;
+		break;
+	case IP_FREQ_133_30:
+		freq_mhz_out = 133;
+		break;
+	case IP_FREQ_160_00:
+		freq_mhz_out = 160;
+		break;
+	case IP_FREQ_177_78:
+		freq_mhz_out = 177;
+		break;
+	case IP_FREQ_200_00:
+		freq_mhz_out = 200;
+		break;
+	case IP_FREQ_213_33:
+		freq_mhz_out = 213;
+		break;
+	case IP_FREQ_266_67:
+		freq_mhz_out = 266;
+		break;
+	case IP_FREQ_320_00:
+		freq_mhz_out = 320;
+		break;
+	case IP_FREQ_355_56:
+		freq_mhz_out = 355;
+		break;
+	case IP_FREQ_400_00:
+		freq_mhz_out = 400;
+		break;
+	case IP_FREQ_457_14:
+		freq_mhz_out = 457;
+		break;
+	case IP_FREQ_533_33:
+		freq_mhz_out = 533;
+		break;
+	case IP_FREQ_640_00:
+		freq_mhz_out = 640;
+		break;
+	case IP_FREQ_800_00:
+		freq_mhz_out = 800;
+		break;
+	default:
+		printk(KERN_ALERT "%s: Invalid freq code: %#x\n", __func__,
+			freq_code_in);
+		return -EINVAL;
+	}
+
+	return freq_mhz_out;
+}
+
+/**
+ * mrfl_pwr_cmd_gfx - Change graphics power state.
+ * Change island power state in the require sequence.
+ *
+ * @gfx_mask: Mask of islands to be changed.
+ * @new_state: 0 for power-off, 1 for power-on.
+ */
+#ifdef USE_GFX_INTERNAL_PM_FUNC
+static int mrfl_pwr_cmd_gfx(u32 gfx_mask, int new_state)
+{
+	/*
+	 * pwrtab - gfx pwr sub-islands in required power-up order and
+	 * in reverse of required power-down order.
+	 */
+	static const u32 pwrtab[] = {
+		GFX_SLC_LDO_SHIFT,
+		GFX_SLC_SHIFT,
+		GFX_SDKCK_SHIFT,
+		GFX_RSCD_SHIFT,
+	};
+	const int pwrtablen = ARRAY_SIZE(pwrtab);
+	int i;
+	int j;
+	int ret;
+	u32 ns_mask;
+	u32 done_mask;
+	u32 this_mask;
+	u32 pwr_state_prev;
+
+	pwr_state_prev = intel_mid_msgbus_read32(PUNIT_PORT, GFX_SS_PM0);
+
+	if (new_state == OSPM_ISLAND_UP)
+		ns_mask = TNG_COMPOSITE_I0;
+	else
+		ns_mask = TNG_COMPOSITE_D3;
+
+	/*  Call underlying function separately for each step in the
+	    power sequence. */
+	done_mask = 0;
+	for (i = 0; i < pwrtablen ; i++) {
+		if (new_state == OSPM_ISLAND_UP)
+			j = i;
+		else
+			j = pwrtablen - i - 1;
+
+		done_mask |= TNG_SSC_MASK << pwrtab[j];
+		this_mask = gfx_mask & done_mask;
+		if (this_mask) {
+		/*  FIXME - if (new_state == 0), check for required
+			    conditions per the SAS. */
+			ret = pmu_set_power_state_tng(GFX_SS_PM0,
+					this_mask, ns_mask);
+			if (ret)
+			return ret;
+		}
+
+		/**
+		 * If turning some power on, and the power to be on includes SLC,
+		 * and SLC was not previously on, then setup some registers.
+		 */
+		if ((new_state == OSPM_ISLAND_UP)
+			&& (pwrtab[j] == GFX_SLC_SHIFT)
+			&& ((pwr_state_prev >> GFX_SLC_SHIFT) != TNG_SSC_I0))
+		{
+			/* TNG A0 workarounds */
+			if (IS_TNG_A0(dev))
+				apply_TNG_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+
+			/* ANN A0 workarounds */
+			if (IS_ANN(dev))
+				apply_ANN_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+		}
+
+		if ((gfx_mask & ~done_mask) == 0)
+			break;
+	}
+
+	return 0;
+}
+#endif
+
+/**
+ * pm_cmd_freq_wait() - Wait for frequency valid via specified register.
+ * Optionally, return realized frequency to caller.
+ * @reg_freq: The frequency control register.  One of *_PM1.
+ * @freq_code_rlzd - If non-NULL, pointer to receive the realized Tangier
+ * frequency code.
+ */
+static int pm_cmd_freq_wait(u32 reg_freq, u32 *freq_code_rlzd)
+{
+	int tcount;
+	u32 freq_val;
+
+	for (tcount = 0; ; tcount++) {
+		freq_val = intel_mid_msgbus_read32(PUNIT_PORT, reg_freq);
+		if ((freq_val & IP_FREQ_VALID) == 0)
+			break;
+		if (tcount > 1500) {
+			WARN(1, "%s: P-Unit freq request wait timeout",
+				__func__);
+			return -EBUSY;
+		}
+		udelay(1);
+	}
+
+	if (freq_code_rlzd) {
+		*freq_code_rlzd = ((freq_val >> IP_FREQ_STAT_POS) &
+			IP_FREQ_MASK);
+	}
+
+	return 0;
+}
+
+
+/**
+ * pm_cmd_freq_set() - Set operating frequency via specified register.
+ * Optionally, return realized frequency to caller.
+ * @reg_freq: The frequency control register.  One of *_PM1.
+ * @freq_code: Tangier frequency code.
+ * @p_freq_code_rlzd - If non-NULL, pointer to receive the realized Tangier
+ * frequency code.
+ */
+static int pm_cmd_freq_set(u32 reg_freq, u32 freq_code, u32 *p_freq_code_rlzd)
+{
+	u32 freq_val;
+	u32 freq_code_realized;
+	int rva;
+
+	rva = pm_cmd_freq_wait(reg_freq, NULL);
+	if (rva < 0) {
+		printk(KERN_ALERT "%s: pm_cmd_freq_wait 1 failed\n", __func__);
+		return rva;
+	}
+
+	freq_val = IP_FREQ_VALID | freq_code;
+	intel_mid_msgbus_write32(PUNIT_PORT, reg_freq, freq_val);
+
+	rva = pm_cmd_freq_wait(reg_freq, &freq_code_realized);
+	if (rva < 0) {
+		printk(KERN_ALERT "%s: pm_cmd_freq_wait 2 failed\n", __func__);
+		return rva;
+	}
+
+	if (p_freq_code_rlzd)
+		*p_freq_code_rlzd = freq_code_realized;
+
+	return rva;
+}
+
+
+/**
+ * pm_cmd_freq_from_code() - Set operating frequency via specified register.
+ * Optionally, return realized frequency to caller.
+ * @reg_freq: The frequency control register.  One of *_PM1.
+ * @freq_code: Tangier frequency code.
+ * @function return value: - <0 if error, or frequency in MHz.
+ */
+int gpu_freq_set_from_code(int freq_code)
+{
+	u32 freq_realized_code;
+	int rva;
+
+	rva = pm_cmd_freq_set(GFX_SS_PM1, freq_code, &freq_realized_code);
+	if (rva < 0)
+		return rva;
+
+	return gpu_freq_code_to_mhz(freq_realized_code);
+}
+EXPORT_SYMBOL(gpu_freq_set_from_code);
+
+
+/**
+  * gpu_freq_mhz_to_code() - Given frequency in MHz, return frequency code
+  * used for frequency control.
+  * Always pick the code less than equal to the integer MHz value.
+  * @freq_mhz_in - Input: A MHz frequency specification.
+  * @*p_freq_out - Out: The quantized MHz frequency specification.
+  * Function return value: frequency code as in register definition.
+  */
+int gpu_freq_mhz_to_code(int freq_mhz_in, int *p_freq_out)
+{
+	int freq_code;
+	int freq_out;
+
+	if (freq_mhz_in >= 800) {
+		freq_code = IP_FREQ_800_00;	/* 800.00 */
+		freq_out = 800;
+	} else if (freq_mhz_in >= 640) {
+		freq_code = IP_FREQ_640_00;	/* 640.00 */
+		freq_out = 640;
+	} else if (freq_mhz_in >= 533) {
+		freq_code = IP_FREQ_533_33;	/* 533.33 */
+		freq_out = 533;
+	} else if (freq_mhz_in >= 457) {
+		freq_code = IP_FREQ_457_14;	/* 457.14 */
+		freq_out = 457;
+	} else if (freq_mhz_in >= 400) {
+		freq_code = IP_FREQ_400_00;	/* 400.00 */
+		freq_out = 400;
+	} else if (freq_mhz_in >= 355) {
+		freq_code = IP_FREQ_355_56;	/* 355.56 */
+		freq_out = 355;
+	} else if (freq_mhz_in >= 320) {
+		freq_code = IP_FREQ_320_00;	/* 320.00 */
+		freq_out = 320;
+	} else if (freq_mhz_in >= 266) {
+		freq_code = IP_FREQ_266_67;	/* 266.67 */
+		freq_out = 266;
+	} else if (freq_mhz_in >= 213) {
+		freq_code = IP_FREQ_213_33;	/* 213.33 */
+		freq_out = 213;
+	} else if (freq_mhz_in >= 200) {
+		freq_code = IP_FREQ_200_00;	/* 200.00 */
+		freq_out = 200;
+	} else if (freq_mhz_in >= 177) {
+		freq_code = IP_FREQ_177_78;	/* 177.78 */
+		freq_out = 177;
+	} else if (freq_mhz_in >= 160) {
+		freq_code = IP_FREQ_160_00;	/* 160.00 */
+		freq_out = 160;
+	} else if (freq_mhz_in >= 133) {
+		freq_code = IP_FREQ_133_30;	/* 133.30 */
+		freq_out = 133;
+	} else if (freq_mhz_in >= 106) {
+		freq_code = IP_FREQ_106_67;	/* 106.67 */
+		freq_out = 106;
+	} else {
+		freq_code = IP_FREQ_100_00;	/* 100.00 */
+		freq_out = 100;
+	}
+
+	*p_freq_out = freq_out;
+
+	return freq_code;
+}
+EXPORT_SYMBOL(gpu_freq_mhz_to_code);
+
+void gpu_freq_set_suspend_func(int (*suspend_func)(void))
+{
+	pSuspend_func = suspend_func;
+	PSB_DEBUG_PM("OSPM: suspend \n");
+}
+EXPORT_SYMBOL(gpu_freq_set_suspend_func);
+
+void gpu_freq_set_resume_func(int (*resume_func)(void))
+{
+	pResume_func = resume_func;
+	PSB_DEBUG_PM("OSPM: Resume \n");
+}
+EXPORT_SYMBOL(gpu_freq_set_resume_func);
+
+/***********************************************************
+ * All Graphics Island
+ ***********************************************************/
+
+/**
+ * ospm_rscd_power_up
+ *
+ * Power up Rascal/Dust islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_rscd_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+	int error = 0;
+
+	if (pResume_func) {
+		error = (*pResume_func)();
+		if (error) {
+			PSB_DEBUG_PM("OSPM: Could not resume DFRGX");
+			return false;
+		}
+	}
+
+	PSB_DEBUG_PM("Pre-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	ret = GFX_POWER_UP(PMU_RSCD);
+
+	PSB_DEBUG_PM("Post-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* If APM is enabled, then we need to make sure that the IRQs
+	 * are installed. It is possible the the GUnit has been turned
+	 * off and the IER and IMR registers have lost their state.
+	 * So we need to enable interrupts after powering on.
+	 * If the IRQs are not turned on, the interrupt sent from RGX
+	 * to indicate that it is done with processing is lost. RGX
+	 * island would then remain ON.
+	 */
+	psb_irq_preinstall_islands(dev, OSPM_GRAPHICS_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_GRAPHICS_ISLAND);
+
+	return !ret;
+}
+
+/**
+ * ospm_rscd_power_down
+ *
+ * Power down Rascal/Dust islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_rscd_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+	int error = 0;
+
+	PSB_DEBUG_PM("OSPM: ospm_gfx_power_down \n");
+
+	if (pSuspend_func) {
+	error = (*pSuspend_func)();
+		if (error) {
+			PSB_DEBUG_PM("OSPM :Could not suspend DFRGX");
+			return false;
+		}
+	}
+
+	PSB_DEBUG_PM("Pre-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* If APM is enabled, we can turn off the RGX interrupts. This is
+	 * kind of a no-op but still better coding to turn of IRQs for
+	 * devices/ components that are turned off
+	 */
+	psb_irq_uninstall_islands(dev, OSPM_GRAPHICS_ISLAND);
+	synchronize_irq(dev->pdev->irq);
+
+	/* power down every thing */
+	ret = GFX_POWER_DOWN(PMU_RSCD);
+
+	PSB_DEBUG_PM("Post-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+/**
+ * ospm_rscd_init
+ *
+ * Rascal/Dut power island init
+ */
+void ospm_rscd_init(struct drm_device *dev,
+		struct ospm_power_island *p_island)
+{
+	if (IS_TNG_A0(dev))
+		is_tng_a0 = 1;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+	p_island->p_funcs->power_up = ospm_rscd_power_up;
+	p_island->p_funcs->power_down = ospm_rscd_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SDKCK);
+}
+
+/**
+ * ospm_sidekick_power_up
+ *
+ * Power up Sidekick island
+ * Sequence & flow from SAS
+ */
+static bool ospm_sidekick_power_up(struct drm_device *dev,
+		struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("Pre-power-up status = 0x%08x\n",
+			intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	ret = GFX_POWER_UP(PMU_SDKCK);
+
+	PSB_DEBUG_PM("Post-power-up status = 0x%08x\n",
+			intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+#if 0
+	/* SLC flush and invalidate */
+	if (!ret)
+	{
+		uint32_t reg, data, count;
+
+		reg = 0x100100 - RGX_OFFSET;
+		data = RGX_REG_READ(reg);
+		RGX_REG_WRITE(reg, data | (1 << 27));
+		RGX_REG_WRITE(reg, data);
+
+		/* write 1 to RGX_CR_SLC_CTRL_FLUSH_INVAL */
+		reg = 0x103818 - RGX_OFFSET;
+		data = 1;
+		RGX_REG_WRITE(reg, data);
+
+		count = 0;
+		/* Poll RGX_CR_SLC_STATUS0 */
+		reg = 0x103820 - RGX_OFFSET;
+		do {
+			udelay(500);
+			data = RGX_REG_READ(reg);
+		} while ((data & 0x2) && (count++ < 10000));
+
+		if (unlikely(count >= 10000))
+			PSB_DEBUG_PM("SLC: flush and invalide timeout\n" );
+	}
+#endif
+
+	if (!ret && IS_TNG_B0(dev)) {
+		uint32_t reg, data;
+		/* soc.gfx_wrapper.gclip_control.aes_bypass_disable = 1*/
+		reg = 0x160020 - GFX_WRAPPER_OFFSET;
+		data = WRAPPER_REG_READ(reg);
+
+		data |= 0x80;
+		WRAPPER_REG_WRITE(reg, data);
+	}
+
+#if 0
+	/* SLC hash set */
+	if (!ret)
+	{
+		uint32_t reg, data;
+		reg = 0x103800 - RGX_OFFSET;
+		data = 0x200001;
+		RGX_REG_WRITE(reg, data);
+	}
+#endif
+
+	return !ret;
+}
+
+/**
+ * ospm_sidekick_power_down
+ *
+ * Power down Sidekick island
+ * Sequence & flow from SAS
+ */
+static bool ospm_sidekick_power_down(struct drm_device *dev,
+		struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("Pre-power-off Status = 0x%08x\n",
+			intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	/* power down every thing */
+	ret = GFX_POWER_DOWN(PMU_SDKCK);
+
+	PSB_DEBUG_PM("Post-power-off Status = 0x%08x\n",
+			intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+/**
+ * ospm_sidekick_init
+ *
+ * Sidekick power island init
+ */
+void ospm_sidekick_init(struct drm_device *dev,
+		struct ospm_power_island *p_island)
+{
+	PSB_DEBUG_PM("%s\n", __func__);
+	p_island->p_funcs->power_up = ospm_sidekick_power_up;
+	p_island->p_funcs->power_down = ospm_sidekick_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SLC);
+}
+
+static void ospm_pnp_settings(struct drm_device *dev)
+{
+	uint32_t reg, data;
+
+	reg = 0x160008 - GFX_WRAPPER_OFFSET;
+	data = 0x0;
+	WRAPPER_REG_WRITE(reg, data);
+
+	reg = 0x160028 - GFX_WRAPPER_OFFSET;
+	data = WRAPPER_REG_READ(reg);
+	/*
+	GCILP_ARB_CONTROL[3:0] = SLCRD_WEIGHT = 3
+	GCILP_ARB_CONTROL[7:4] = SLCWR_WEIGHT = 3
+	GCILP_ARB_CONTROL[11:8] = VED_WEIGHT = 3
+	GCILP_ARB_CONTROL[15:12] = VEC_WEIGHT = 3
+	GCILP_ARB_CONTROL[19:16] = VSP_WEIGHT = 3
+	GCILP_ARB_CONTROL[23:20] = FIRST_ARB_WEIGHT = 3
+	GCILP_ARB_CONTROL[31] = ARB_MODE = 0
+	*/
+	data |= 0x333333;
+	WRAPPER_REG_WRITE(reg, data);
+
+	return ;
+}
+
+/**
+ * ospm_slc_power_up
+ *
+ * Power up slc islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_slc_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("%s: Pre-power-off Status = 0x%08x\n",
+		__func__,
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	ret = GFX_POWER_UP(PMU_SLC);
+
+	if (!ret && IS_ANN(dev))
+		apply_ANN_A0_workarounds(OSPM_GRAPHICS_ISLAND, 1);
+
+	ospm_pnp_settings(dev);
+
+	PSB_DEBUG_PM("Post-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+
+	if (!ret) {
+		uint32_t reg, data;
+
+		/* soc.gfx_wrapper.gbypassenable_sw = 1 */
+		reg = 0x160854 - GFX_WRAPPER_OFFSET;
+		data = WRAPPER_REG_READ(reg);
+		data |= 0x100; /*Bypass SLC for VEC*/
+		WRAPPER_REG_WRITE(reg, data);
+	}
+
+	return !ret;
+}
+
+/**
+ * ospm_slc_power_down
+ *
+ * Power down SLC islands
+ * Sequence & flow from SAS
+ */
+static bool ospm_slc_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("%s: Pre-power-off Status = 0x%08x\n",
+		__func__,
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	ret = GFX_POWER_DOWN(PMU_SLC);
+
+	PSB_DEBUG_PM("Post-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+/**
+ * ospm_slc_init
+ *
+ * SLC power island init
+ */
+void ospm_slc_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	if (IS_TNG_A0(dev))
+		is_tng_a0 = 1;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+	p_island->p_funcs->power_up = ospm_slc_power_up;
+	p_island->p_funcs->power_down = ospm_slc_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SLC_LDO);
+}
+
+/**
+ * ospm_slc_ldo_power_up
+ *
+ * Power up SLC LDO island
+ * Sequence & flow from SAS
+ */
+static bool ospm_slc_ldo_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("%s: Pre-power-off Status = 0x%08x\n",
+		__func__,
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	ret = GFX_POWER_UP(PMU_LDO);
+
+	PSB_DEBUG_PM("Post-power-up status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+/**
+ * ospm_slc_ldo_power_down
+ *
+ * Power down SLC LDO island
+ * Sequence & flow from SAS
+ */
+static bool ospm_slc_ldo_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret;
+
+	PSB_DEBUG_PM("%s: Pre-power-off Status = 0x%08x\n",
+		__func__,
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	ret = GFX_POWER_DOWN(PMU_LDO);
+
+	PSB_DEBUG_PM("Post-power-off Status = 0x%08x\n",
+		intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS));
+
+	return !ret;
+}
+
+/**
+ * ospm_slc_ldo_init
+ *
+ * SLC LDO power island init
+ */
+void ospm_slc_ldo_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	if (IS_TNG_A0(dev))
+		is_tng_a0 = 1;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+	p_island->p_funcs->power_up = ospm_slc_ldo_power_up;
+	p_island->p_funcs->power_down = ospm_slc_ldo_power_down;
+	p_island->p_dependency = NULL;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm_ann.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm_ann.h
new file mode 100644
index 0000000..0133ebd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_ospm_ann.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _TNG_GRAPHICS_OSPM_H_
+#define _TNG_GRAPHICS_OSPM_H_
+
+#include "pwr_mgmt.h"
+
+#define PMU_SLC			0x1
+#define PMU_SDKCK		0x2
+#define PMU_RSCD		0x4
+#define PMU_LDO			0x8
+
+void ospm_rscd_init(struct drm_device *dev,
+		struct ospm_power_island *p_island);
+void ospm_sidekick_init(struct drm_device *dev,
+		struct ospm_power_island *p_island);
+void ospm_slc_init(struct drm_device *dev,
+		struct ospm_power_island *p_island);
+void ospm_slc_ldo_init(struct drm_device *dev,
+		struct ospm_power_island *p_island);
+
+#endif	/* _TNG_GRAPHICS_OSPM_H_*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_rtpm.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_rtpm.c
new file mode 100755
index 0000000..f576026
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_rtpm.c
@@ -0,0 +1,102 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#include <linux/mutex.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+#include <linux/pm_runtime.h>
+#include "psb_drv.h"
+#include "pwr_mgmt.h"
+
+extern struct ospm_power_island island_list[9];
+extern struct drm_device *gpDrmDevice;
+
+int rtpm_suspend(struct device *dev)
+{
+	struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	rtpm_suspend_pci();
+	if (pm_qos_request_active(&dev_priv->s0ix_qos))
+		pm_qos_remove_request(&dev_priv->s0ix_qos);
+
+	return 0;
+}
+
+int rtpm_resume(struct device *dev)
+{
+	struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
+	PSB_DEBUG_PM("%s\n", __func__);
+	pm_qos_add_request(&dev_priv->s0ix_qos,
+			PM_QOS_CPU_DMA_LATENCY, CSTATE_EXIT_LATENCY_S0i1 - 1);
+	/* No OPs of GFX/VED/VEC/VSP/DISP */
+	rtpm_resume_pci();
+
+	return 0;
+}
+
+int rtpm_idle(struct device *dev)
+{
+	int ref_count = 0;
+	int i;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	for (i = 0; i < ARRAY_SIZE(island_list); i++)
+		ref_count += atomic_read(&island_list[i].ref_count);
+
+	if (ref_count) {
+		PSB_DEBUG_PM("%s return busy\n", __func__);
+		return -EBUSY;
+	} else
+		return 0;
+}
+
+int rtpm_allow(struct drm_device *dev)
+{
+	PSB_DEBUG_PM("%s\n", __func__);
+	pm_runtime_allow(&dev->pdev->dev);
+	return 0;
+}
+
+void rtpm_forbid(struct drm_device *dev)
+{
+	PSB_DEBUG_PM("%s\n", __func__);
+	pm_runtime_forbid(&dev->pdev->dev);
+	return;
+}
+
+void rtpm_init(struct drm_device *dev)
+{
+	rtpm_allow(dev);
+}
+
+void rtpm_uninit(struct drm_device *dev)
+{
+	pm_runtime_get_noresume(&dev->pdev->dev);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_rtpm.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_rtpm.h
new file mode 100644
index 0000000..9c9388b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/gfx_rtpm.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _INTEL_MEDIA_RUNTIME_PM_H_
+#define _INTEL_MEDIA_RUNTIME_PM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+void rtpm_init(struct drm_device *dev);
+void rtpm_uninit(struct drm_device *dev);
+
+/*
+* GFX-Runtime PM callbacks
+*/
+int rtpm_suspend(struct device *dev);
+int rtpm_resume(struct device *dev);
+int rtpm_idle(struct device *dev);
+int rtpm_allow(struct drm_device *dev);
+void rtpm_forbid(struct drm_device *dev);
+void rtpm_suspend_pci(void);
+void rtpm_resume_pci(void);
+#endif /* _INTEL_MEDIA_RUNTIME_PM_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/pwr_mgmt.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/pwr_mgmt.c
new file mode 100755
index 0000000..2b124cb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/pwr_mgmt.c
@@ -0,0 +1,881 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+
+#include <linux/spinlock.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+#include <linux/pm_runtime.h>
+
+#include "psb_drv.h"
+#include "pmu_tng.h"
+#include "tng_wa.h"
+#include "pwr_mgmt.h"
+#include "gfx_rtpm.h"
+#ifdef CONFIG_MOOREFIELD
+#include "gfx_ospm_ann.h"
+#else
+#include "gfx_ospm.h"
+#endif
+#include "dc_ospm.h"
+#include "dc_maxfifo.h"
+#include "video_ospm.h"
+#include "early_suspend.h"
+#include "early_suspend_sysfs.h"
+
+
+struct _ospm_data_ *g_ospm_data;
+struct drm_device *gpDrmDevice;
+
+/* island, state, ref_count, init_func, power_func */
+#ifdef CONFIG_MOOREFIELD
+struct ospm_power_island island_list[] = {
+	{OSPM_DISPLAY_A, OSPM_POWER_OFF, {0}, ospm_disp_a_init, NULL},
+	{OSPM_DISPLAY_B, OSPM_POWER_OFF, {0}, ospm_disp_b_init, NULL},
+	{OSPM_DISPLAY_C, OSPM_POWER_OFF, {0}, ospm_disp_c_init, NULL},
+	{OSPM_DISPLAY_MIO, OSPM_POWER_OFF, {0}, ospm_mio_init, NULL},
+	{OSPM_DISPLAY_HDMI, OSPM_POWER_OFF, {0}, ospm_hdmi_init, NULL},
+	{OSPM_GRAPHICS_ISLAND, OSPM_POWER_OFF, {0}, ospm_rscd_init, NULL},
+	{OSPM_SIDEKICK_ISLAND, OSPM_POWER_OFF, {0}, ospm_sidekick_init, NULL},
+	{OSPM_SLC_ISLAND, OSPM_POWER_OFF, {0}, ospm_slc_init, NULL},
+	{OSPM_SLC_LDO_ISLAND, OSPM_POWER_OFF, {0}, ospm_slc_ldo_init, NULL},
+	{OSPM_VIDEO_VPP_ISLAND, OSPM_POWER_OFF, {0}, ospm_vsp_init, NULL},
+	{OSPM_VIDEO_DEC_ISLAND, OSPM_POWER_OFF, {0}, ospm_ved_init, NULL},
+	{OSPM_VIDEO_ENC_ISLAND, OSPM_POWER_OFF, {0}, ospm_vec_init, NULL},
+};
+#else
+struct ospm_power_island island_list[] = {
+	{OSPM_DISPLAY_A, OSPM_POWER_OFF, {0}, ospm_disp_a_init, NULL},
+	{OSPM_DISPLAY_B, OSPM_POWER_OFF, {0}, ospm_disp_b_init, NULL},
+	{OSPM_DISPLAY_C, OSPM_POWER_OFF, {0}, ospm_disp_c_init, NULL},
+	{OSPM_DISPLAY_MIO, OSPM_POWER_OFF, {0}, ospm_mio_init, NULL},
+	{OSPM_DISPLAY_HDMI, OSPM_POWER_OFF, {0}, ospm_hdmi_init, NULL},
+	{OSPM_GRAPHICS_ISLAND, OSPM_POWER_OFF, {0}, ospm_gfx_init, NULL},
+	{OSPM_SLC_ISLAND, OSPM_POWER_OFF, {0}, ospm_slc_init, NULL},
+	{OSPM_VIDEO_VPP_ISLAND, OSPM_POWER_OFF, {0}, ospm_vsp_init, NULL},
+	{OSPM_VIDEO_DEC_ISLAND, OSPM_POWER_OFF, {0}, ospm_ved_init, NULL},
+	{OSPM_VIDEO_ENC_ISLAND, OSPM_POWER_OFF, {0}, ospm_vec_init, NULL},
+};
+#endif
+
+/**
+ * in_atomic_or_interrupt() - Return non-zero if in atomic context.
+ * Problems with this code:
+ * - Function in_atomic is not guaranteed to detect the atomic state entered
+ *   by acquisition of a spinlock (and indeed does so only if CONFIG_PREEMPT).
+ *   For a discussion on the use of in_atomic and why is it considered (in
+ *   general) problematic, see: http://lwn.net/Articles/274695/
+ * - Therefore, scripts/checkpatch.pl will complain about use of function
+ *   in_atomic in non-core kernel.  For this reason, the several uses of
+ *   in_atomic in this file were centralized here (so only one warning).
+ *
+ * Note: The test herein was originally:
+ *   in_atomic() || in_interrupt()
+ * but the test for in_interrupt() is redundant with the in_atomic test.
+ */
+#if !defined CONFIG_PREEMPT
+#error Function in_atomic (in general) requires CONFIG_PREEMPT
+#endif
+
+#undef OSPM_DEBUG_INFO
+#ifdef OSPM_DEBUG_INFO
+const char *get_island_name(u32 hw_island)
+{
+	const char *pstr;
+
+	switch (hw_island) {
+	case OSPM_DISPLAY_A:
+		pstr = "DISP A ";
+		break;
+	case OSPM_DISPLAY_B:
+		pstr = "DISP B ";
+		break;
+	case OSPM_DISPLAY_C:
+		pstr = "DISP C ";
+		break;
+	case OSPM_DISPLAY_MIO:
+		pstr = "MIO    ";
+		break;
+	case OSPM_DISPLAY_HDMI:
+		pstr = "HDMI   ";
+		break;
+	case OSPM_VIDEO_VPP_ISLAND:
+		pstr = "VSP    ";
+		break;
+	case OSPM_VIDEO_DEC_ISLAND:
+		pstr = "VED    ";
+		break;
+	case OSPM_VIDEO_ENC_ISLAND:
+		pstr = "VEC    ";
+		break;
+	case OSPM_GRAPHICS_ISLAND:
+		pstr = "GFX    ";
+		break;
+	default:
+		pstr = "(unknown hw_island)";
+		break;
+	}
+
+	return pstr;
+}
+
+static void dump_ref_count(u32 hw_island)
+{
+	int i = 0;
+	int ref_value = 0;
+	struct ospm_power_island *p_island = NULL;
+
+	PSB_DEBUG_PM("*** power island refrence count. ***\n");
+
+	for (i = 0; i < ARRAY_SIZE(island_list); i++) {
+		if (hw_island & island_list[i].island) {
+			p_island = &island_list[i];
+			ref_value = atomic_read(&p_island->ref_count);
+			printk(KERN_ALERT
+				"*** %s: %d\n",
+				get_island_name(island_list[i].island),
+				ref_value);
+		}
+	}
+
+	OSPM_DPF("%s: ************************************\n");
+}
+#endif	/* OSPM_DEBUG_INFO */
+
+/**
+ * ospm_suspend_pci
+ *
+ * Description: Suspend the pci device saving state and disabling
+ * as necessary.
+ */
+static void ospm_suspend_pci(struct drm_device *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int bsm, vbt, bgsm;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	pci_read_config_dword(pdev, 0x5C, &bsm);
+	dev_priv->saveBSM = bsm;
+	pci_read_config_dword(pdev, 0xFC, &vbt);
+	dev_priv->saveVBT = vbt;
+	pci_read_config_dword(pdev, 0x70, &bgsm);
+	dev_priv->saveBGSM = bgsm;
+	pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+	pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+}
+
+/**
+ * ospm_resume_pci
+ *
+ * Description: Resume the pci device restoring state and enabling
+ * as necessary.
+ */
+static void ospm_resume_pci(struct drm_device *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int mmadr, ret = 0;
+
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	if (dev_priv->saveBGSM != 0)
+		pci_write_config_dword(pdev, 0x70, dev_priv->saveBGSM);
+
+	if (dev_priv->saveBSM != 0)
+		pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+
+	if (dev_priv->saveVBT != 0)
+		pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+
+	/* retoring MSI address and data in PCIx space */
+	if (dev_priv->msi_addr != 0)
+		pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC,
+				dev_priv->msi_addr);
+
+	if (dev_priv->msi_data != 0)
+		pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC,
+				dev_priv->msi_data);
+
+	ret = pci_enable_device(pdev);
+
+	/* FixMe, Change should be removed Once bz 115181 is fixed */
+	pci_read_config_dword(pdev, 0x10, &mmadr);
+	if (mmadr == 0) {
+		pr_err("GFX OSPM : Bad PCI config\n");
+		BUG();
+	}
+
+	if (ret != 0)
+		PSB_DEBUG_PM("pci_enable_device failed: %d\n", ret);
+}
+
+void rtpm_suspend_pci(void)
+{
+	ospm_suspend_pci(g_ospm_data->dev);
+}
+
+void rtpm_resume_pci(void)
+{
+	ospm_resume_pci(g_ospm_data->dev);
+}
+
+
+/**
+ * get_island_ptr
+ *
+ * get pointer to the island
+ * use it to get array item for setting dependency
+ *
+ * Although island values are defined as bit mask values,
+ * this function only supports having a single bit set
+ * in this parameter.
+ */
+struct ospm_power_island *get_island_ptr(u32 hw_island)
+{
+	struct ospm_power_island *p_island = NULL;
+	int i = 0;
+
+	/* got through islands array to find the island */
+	while ((i < ARRAY_SIZE(island_list) && (!p_island))) {
+		/* do we have the island? */
+		if (hw_island & island_list[i].island) {
+			/* Found it */
+			p_island = &island_list[i];
+			break;
+		}
+
+		i++;
+	}
+
+	if (i == ARRAY_SIZE(island_list))
+		PSB_DEBUG_PM("island %x not found\n", hw_island);
+
+	return p_island;
+}
+
+static bool power_down_island(struct ospm_power_island *p_island);
+
+/**
+ * power_up_island
+ *
+ * Description: Power up the island and all of it's dependent islands
+ */
+static bool power_up_island(struct ospm_power_island *p_island)
+{
+	bool ret = true;
+
+	/* handle the dependency first */
+	if (p_island->p_dependency) {
+		/* Power up dependent island */
+		ret = power_up_island(p_island->p_dependency);
+		if (!ret)
+			return ret;
+	}
+
+	/* if successfully handled dependency */
+	if (!atomic_read(&p_island->ref_count)) {
+		/* power on the island */
+		PSB_DEBUG_PM("Power up island %x\n", p_island->island);
+		ret = p_island->p_funcs->power_up(g_ospm_data->dev, p_island);
+		if (ret)
+			p_island->island_state = OSPM_POWER_ON;
+		else {
+			PSB_DEBUG_PM("Power up island %x failed!\n", p_island->island);
+			if (p_island->p_dependency)
+				power_down_island(p_island->p_dependency);
+			return ret;
+		}
+	}
+
+	/* increment the ref count */
+	atomic_inc(&p_island->ref_count);
+
+	return ret;
+}
+
+/**
+ * power_down_island
+ *
+ * Description: Power down the island and all of it's dependent islands
+ */
+static bool power_down_island(struct ospm_power_island *p_island)
+{
+	bool ret = true;
+
+	if (atomic_dec_return(&p_island->ref_count) < 0) {
+		DRM_ERROR("Island %x, UnExpect RefCount %d\n",
+				p_island->island,
+				atomic_read(&p_island->ref_count));
+		dump_stack();
+		goto power_down_err;
+	}
+
+	/* check to see if island is turned off */
+	if (!atomic_read(&p_island->ref_count)) {
+		/* power on the island */
+		PSB_DEBUG_PM("Power down island %x\n", p_island->island);
+		ret = p_island->p_funcs->power_down(
+				g_ospm_data->dev,
+				p_island);
+
+		/* set the island state */
+		if (ret)
+			p_island->island_state = OSPM_POWER_OFF;
+		else
+			goto power_down_err;
+	}
+
+	/* handle the dependency later */
+	if (p_island->p_dependency) {
+		/* Power down dependent island */
+		ret = power_down_island(p_island->p_dependency);
+		if (!ret)
+			goto power_down_err;
+	}
+
+	return ret;
+
+power_down_err:
+	atomic_inc(&p_island->ref_count);
+	ret = false;
+	return ret;
+}
+
+static bool any_island_on(void)
+{
+	struct ospm_power_island *p_island = NULL;
+	u32 i = 0;
+	bool ret = false;
+
+	for (i = 0; i < ARRAY_SIZE(island_list); i++) {
+		p_island = &island_list[i];
+
+		if (atomic_read(&p_island->ref_count) > 0) {
+			ret = true;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * power_island_get
+ *
+ * Description: Notify PowerMgmt module that you will be accessing the
+ * specified island's hw so don't power it off.  If the island is not
+ * powered up, it will power it on.
+ *
+ */
+bool power_island_get(u32 hw_island)
+{
+	u32 i = 0;
+	bool ret = true, first_island = false;
+	int pm_ret;
+	struct ospm_power_island *p_island;
+	struct drm_psb_private *dev_priv = g_ospm_data->dev->dev_private;
+
+	mutex_lock(&g_ospm_data->ospm_lock);
+
+	if (!any_island_on()) {
+		PSB_DEBUG_PM("Resuming PCI\n");
+		/* Here, we use runtime pm framework to suit
+		 * S3 PCI suspend/resume
+		 */
+		wake_lock(&dev_priv->ospm_wake_lock);
+		pm_ret = pm_runtime_get_sync(&g_ospm_data->dev->pdev->dev);
+		if (pm_ret < 0) {
+			ret = false;
+			PSB_DEBUG_PM("pm_runtime_get_sync failed 0x%p.\n",
+				&g_ospm_data->dev->pdev->dev);
+			goto out_err;
+		}
+		first_island = true;
+
+	}
+
+	for (i = 0; i < ARRAY_SIZE(island_list); i++) {
+		if (hw_island & island_list[i].island) {
+			p_island = &island_list[i];
+			ret = power_up_island(p_island);
+			if (!ret) {
+				PSB_DEBUG_PM("power up failed %x\n",
+					island_list[i].island);
+				goto out_err;
+			}
+		}
+	}
+
+out_err:
+	if (ret && first_island)
+		pm_qos_remove_request(&dev_priv->s0ix_qos);
+	mutex_unlock(&g_ospm_data->ospm_lock);
+
+	return ret;
+}
+
+/**
+ * power_island_put
+ *
+ * Description: Notify PowerMgmt module that you are done accessing the
+ * specified island's hw so feel free to power it off.  Note that this
+ * function doesn't actually power off the islands.
+ */
+bool power_island_put(u32 hw_island)
+{
+	bool ret = true;
+	u32 i = 0;
+	struct drm_psb_private *dev_priv = g_ospm_data->dev->dev_private;
+	struct ospm_power_island *p_island;
+
+	mutex_lock(&g_ospm_data->ospm_lock);
+
+	for (i = 0; i < ARRAY_SIZE(island_list); i++) {
+		if (hw_island & island_list[i].island) {
+			/* Power down the island if needed */
+			p_island = &island_list[i];
+			ret = power_down_island(p_island);
+			if (!ret) {
+				PSB_DEBUG_PM("power down failed %x\n",
+					island_list[i].island);
+			}
+		}
+	}
+
+/* out_err: */
+	/* Check to see if we need to suspend PCI */
+	if (!any_island_on()) {
+		PSB_DEBUG_PM("Suspending PCI\n");
+		/* Here, we use runtime pm framework to suit
+		 * S3 PCI suspend/resume
+		 */
+		pm_qos_add_request(&dev_priv->s0ix_qos,
+				PM_QOS_CPU_DMA_LATENCY, CSTATE_EXIT_LATENCY_S0i1 - 1);
+		pm_runtime_put_sync_suspend(&g_ospm_data->dev->pdev->dev);
+		wake_unlock(&dev_priv->ospm_wake_lock);
+	}
+	mutex_unlock(&g_ospm_data->ospm_lock);
+
+	return ret;
+}
+
+/**
+ * is_island_on
+ *
+ * Description: checks to see if the island is up
+ * returns true if hw_island is ON
+ * returns false if hw_island is OFF
+ */
+bool is_island_on(u32 hw_island)
+{
+	/* get the power island */
+	struct ospm_power_island *p_island = get_island_ptr(hw_island);
+	bool island_on = false;
+
+	if (!p_island) {
+		DRM_ERROR("p_island is NULL\n");
+		return false;
+	}
+
+	/* TODO: add lock here. */
+	island_on = (p_island->island_state == OSPM_POWER_ON) ? true : false;
+
+	return island_on;
+}
+
+u32 pipe_to_island(u32 pipe)
+{
+	u32 power_island = 0;
+
+	switch (pipe) {
+	case 0:
+		power_island = OSPM_DISPLAY_A;
+		break;
+	case 1:
+		power_island = OSPM_DISPLAY_B;
+		break;
+	case 2:
+		power_island = OSPM_DISPLAY_C;
+		break;
+	default:
+		DRM_ERROR("%s: invalid pipe %u\n", __func__, pipe);
+		return 0;
+	}
+
+	return power_island;
+}
+
+/**
+ * ospm_power_init
+ *
+ * Description: Initialize this ospm power management module
+ */
+void ospm_power_init(struct drm_device *dev)
+{
+	u32 i = 0;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	/* allocate ospm data */
+	g_ospm_data = kmalloc(sizeof(struct _ospm_data_), GFP_KERNEL);
+	if (!g_ospm_data)
+		goto out_err;
+
+	mutex_init(&g_ospm_data->ospm_lock);
+	g_ospm_data->dev = dev;
+	gpDrmDevice = dev;
+
+	wake_lock_init(&dev_priv->ospm_wake_lock, WAKE_LOCK_SUSPEND,
+			"ospm_wake_lock");
+	/* initilize individual islands */
+	for (i = 0; i < ARRAY_SIZE(island_list); i++) {
+		island_list[i].p_funcs = kmalloc(sizeof(struct power_ops),
+						GFP_KERNEL);
+		if ((island_list[i].p_funcs) && (island_list[i].init_func)) {
+			island_list[i].init_func(dev, &island_list[i]);
+			atomic_set(&island_list[i].ref_count, 0);
+
+			switch (island_list[i].island){
+			case OSPM_DISPLAY_A:
+			case OSPM_DISPLAY_C:
+			case OSPM_DISPLAY_MIO:
+				atomic_set(&island_list[i].ref_count, 1);
+				island_list[i].island_state = OSPM_POWER_ON;
+				if (island_list[i].p_dependency) {
+					atomic_inc(&island_list[i].p_dependency->ref_count);
+					island_list[i].island_state = OSPM_POWER_ON;
+				}
+
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	/* register early_suspend runtime pm */
+	intel_media_early_suspend_init(dev);
+#endif
+	intel_media_early_suspend_sysfs_init(dev);
+	dc_maxfifo_init(dev);
+	rtpm_init(dev);
+out_err:
+	return;
+}
+
+/**
+ * ospm_power_uninit
+ *
+ * Description: Uninitialize this ospm power management module
+ */
+void ospm_power_uninit(void)
+{
+	int i;
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	rtpm_uninit(gpDrmDevice);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	/* un-init early suspend */
+	intel_media_early_suspend_uninit();
+#endif
+	intel_media_early_suspend_sysfs_uninit(gpDrmDevice);
+
+	/* Do we need to turn off all islands? */
+	power_island_put(OSPM_ALL_ISLANDS);
+
+	for (i = 0; i < ARRAY_SIZE(island_list); i++)
+		kfree(island_list[i].p_funcs);
+
+	kfree(g_ospm_data);
+}
+
+/**
+ * ospm_power_suspend
+ *
+ * Description: suspend all islands
+ */
+bool ospm_power_suspend(void)
+{
+	PSB_DEBUG_PM("%s\n", __func__);
+
+	return true;
+}
+
+/**
+ * ospm_power_resume
+ *
+ * Description: resume previously suspended islands.
+ */
+void ospm_power_resume(void)
+{
+	PSB_DEBUG_PM("%s\n", __func__);
+}
+
+/* FIXME: hkpatel */
+/*** LEGACY SUPPORT ****/
+/*** REMOVE ONCE CONVERTED ALL FUNCTIONS TO NEW ARCH */
+
+/* Legacy Function for support */
+bool ospm_power_using_hw_begin(int hw_island, u32 usage)
+{
+	bool ret = true;
+
+	/*
+	 * FIXME: make ospm_power_using_hw_begin used for Display islands only
+	 * take effect for DSPB/HDMIO islands, becaused it's called by the OTM
+	 * HDMI codes and not to impact CTP/MDFLD. But eventually need to
+	 * replace hw_begin() with power_island_get() in OTM HDMI.
+	 */
+	if (hw_island == OSPM_DISPLAY_ISLAND)
+		hw_island = OSPM_DISPLAY_B | OSPM_DISPLAY_HDMI;
+
+	ret = power_island_get(hw_island);
+
+	return ret;
+}
+EXPORT_SYMBOL(ospm_power_using_hw_begin);
+
+bool ospm_power_is_hw_on(u32 hw_island)
+{
+	return is_island_on(hw_island);
+}
+EXPORT_SYMBOL(ospm_power_is_hw_on);
+
+void ospm_power_using_hw_end(int hw_island)
+{
+	/*
+	 * FIXME: make ospm_power_using_hw_end used for Display islands only
+	 * take effect for DSPB/HDMIO islands, becaused it's called by the OTM
+	 * HDMI codes and not to impact CTP/MDFLD. But eventually need to
+	 * replace hw_end() with power_island_put() in OTM HDMI.
+	 */
+	if (hw_island == OSPM_DISPLAY_ISLAND)
+		hw_island = OSPM_DISPLAY_B | OSPM_DISPLAY_HDMI;
+
+	power_island_put(hw_island);
+}
+EXPORT_SYMBOL(ospm_power_using_hw_end);
+
+void ospm_apm_power_down_msvdx(struct drm_device *dev, int force_off)
+{
+	unsigned long irq_flags;
+	int ret, frame_finished = 0;
+	int shp_ctx_count = 0;
+	struct ospm_power_island *p_island;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct psb_video_ctx *pos, *n;
+
+
+	p_island = get_island_ptr(OSPM_VIDEO_DEC_ISLAND);
+
+	if (!p_island) {
+		DRM_ERROR("p_island is NULL\n");
+		return;
+	}
+
+	if (force_off) {
+		if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
+			PSB_DEBUG_PM("msvdx in power off.\n");
+			return;
+		}
+
+		mutex_lock(&g_ospm_data->ospm_lock);
+		ret = p_island->p_funcs->power_down(
+			g_ospm_data->dev,
+			p_island);
+
+		/* set the island state */
+		if (ret)
+			p_island->island_state = OSPM_POWER_OFF;
+
+		mutex_unlock(&g_ospm_data->ospm_lock);
+
+		/* MSVDX_NEW_PMSTATE(dev, msvdx_priv, PSB_PMSTATE_POWERDOWN); */
+
+		mutex_lock(&g_ospm_data->ospm_lock);
+		ret = p_island->p_funcs->power_up(
+			g_ospm_data->dev,
+			p_island);
+
+		/* set the island state */
+		if (ret)
+			p_island->island_state = OSPM_POWER_ON;
+
+		mutex_unlock(&g_ospm_data->ospm_lock);
+
+		power_island_put(OSPM_VIDEO_DEC_ISLAND);
+		return;
+	}
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
+		PSB_DEBUG_PM("msvdx in power off.\n");
+
+
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head){
+		if (pos->slice_extract_flag){
+			shp_ctx_count++;
+		}
+		if (pos->frame_end) {
+			frame_finished = 1;
+			pos->frame_end = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+#endif
+
+	psb_msvdx_dequeue_send(dev);
+
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	if (shp_ctx_count == 0 || frame_finished)
+		power_island_put(OSPM_VIDEO_DEC_ISLAND);
+#else
+	power_island_put(OSPM_VIDEO_DEC_ISLAND);
+#endif
+
+	return;
+}
+
+void ospm_apm_power_down_topaz(struct drm_device *dev)
+{
+	int ret;
+	struct ospm_power_island *p_island;
+
+	PSB_DEBUG_PM("Power down VEC...\n");
+	p_island = get_island_ptr(OSPM_VIDEO_ENC_ISLAND);
+
+	if (!p_island) {
+		DRM_ERROR("p_island is NULL\n");
+		return;
+	}
+
+	mutex_lock(&g_ospm_data->ospm_lock);
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
+		goto out;
+
+	if (atomic_read(&p_island->ref_count)) {
+		PSB_DEBUG_PM("vec ref_count has been set(%d), bypass\n",
+			     atomic_read(&p_island->ref_count));
+		goto out;
+	}
+
+	ret = p_island->p_funcs->power_down(
+			g_ospm_data->dev,
+			p_island);
+
+	/* set the island state */
+	if (ret)
+		p_island->island_state = OSPM_POWER_OFF;
+
+	PSB_DEBUG_PM("Power down VEC done\n");
+out:
+	mutex_unlock(&g_ospm_data->ospm_lock);
+	return;
+}
+
+void ospm_apm_power_down_vsp(struct drm_device *dev)
+{
+	int ret;
+	struct ospm_power_island *p_island;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int island_ref;
+
+	PSB_DEBUG_PM("Power down VPP...\n");
+	p_island = get_island_ptr(OSPM_VIDEO_VPP_ISLAND);
+
+	if (!p_island) {
+		DRM_ERROR("p_island is NULL\n");
+		return;
+	}
+
+	mutex_lock(&g_ospm_data->ospm_lock);
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_VPP_ISLAND))
+		goto out;
+
+	island_ref = atomic_read(&p_island->ref_count);
+	if (island_ref)
+		PSB_DEBUG_PM("VPP ref_count has been set(%d), bypass\n",
+			     island_ref);
+
+	if (vsp_priv->vsp_cmd_num > 0) {
+		VSP_DEBUG("command in VSP, by pass\n");
+		goto out;
+	}
+
+	ret = p_island->p_funcs->power_down(
+		g_ospm_data->dev,
+		p_island);
+
+	/* set the island state */
+	if (ret) {
+		p_island->island_state = OSPM_POWER_OFF;
+		atomic_set(&p_island->ref_count, 0);
+	}
+
+	/* handle the dependency */
+	if (p_island->p_dependency) {
+		/* Power down dependent island */
+		do {
+			power_down_island(p_island->p_dependency);
+			island_ref--;
+		} while (island_ref);
+	}
+
+	if (!any_island_on()) {
+		PSB_DEBUG_PM("Suspending PCI\n");
+		pm_qos_add_request(&dev_priv->s0ix_qos,
+				   PM_QOS_CPU_DMA_LATENCY, CSTATE_EXIT_LATENCY_S0i1 - 1);
+		pm_runtime_put(&g_ospm_data->dev->pdev->dev);
+		wake_unlock(&dev_priv->ospm_wake_lock);
+	}
+
+	PSB_DEBUG_PM("Power down VPP done\n");
+out:
+	mutex_unlock(&g_ospm_data->ospm_lock);
+	return;
+}
+
+int ospm_runtime_pm_allow(struct drm_device *dev)
+{
+	pm_runtime_allow(&dev->pdev->dev);
+	return 0;
+}
+
+void ospm_runtime_pm_forbid(struct drm_device *dev)
+{
+	pm_runtime_forbid(&dev->pdev->dev);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/pwr_mgmt.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/pwr_mgmt.h
new file mode 100644
index 0000000..7b1744b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/pwr_mgmt.h
@@ -0,0 +1,164 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef __INTEL_MEDIA_ISLAND_MANAGEMENT_H__
+#define __INTEL_MEDIA_ISLAND_MANAGEMENT_H__
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <linux/intel_mid_pm.h>
+#include "gfx_rtpm.h"
+#include "pmu_tng.h"
+
+/* TNG Power Islands defination
+ * Subsystem status bits for NC_PM_SSS.  Status of all North Cluster IPs.
+ * These correspond to the bits below.
+ */
+#define	OSPM_DISPLAY_A		NC_PM_SSS_DPA
+#define	OSPM_DISPLAY_B		NC_PM_SSS_DPB
+#define	OSPM_DISPLAY_C		NC_PM_SSS_DPC
+#define	OSPM_DISPLAY_MIO	NC_PM_SSS_MIO
+#define	OSPM_DISPLAY_HDMI	NC_PM_SSS_HDMIO
+#define	OSPM_VIDEO_VPP_ISLAND	NC_PM_SSS_VSP
+#define	OSPM_VIDEO_DEC_ISLAND	NC_PM_SSS_VED
+#define	OSPM_VIDEO_ENC_ISLAND	NC_PM_SSS_VEC
+
+enum POWER_ISLAND_STATE {
+	OSPM_POWER_OFF = 0,	/* power island/device OFF */
+	OSPM_POWER_ON,		/* power island/device ON */
+};
+
+/* All Graphics Islands */
+#define	OSPM_GRAPHICS_ISLAND	NC_PM_SSS_GFX_RSCD
+
+#ifdef CONFIG_MOOREFIELD
+#define	OSPM_SIDEKICK_ISLAND	NC_PM_SSS_GFX_SDKCK
+#define	OSPM_SLC_ISLAND		NC_PM_SSS_GFX_SLC
+#define	OSPM_SLC_LDO_ISLAND	NC_PM_SSS_GFX_SLC_LDO
+#else
+/* All SLC Islands */
+#define	OSPM_SLC_ISLAND	(NC_PM_SSS_GFX_SLC | \
+				NC_PM_SSS_GFX_SDKCK | \
+				NC_PM_SSS_GFX_SLC_LDO)
+#endif
+
+/* All Display Islands */
+#define OSPM_DISPLAY_ISLAND	(OSPM_DISPLAY_A |\
+				OSPM_DISPLAY_B |\
+				OSPM_DISPLAY_C |\
+				OSPM_DISPLAY_MIO |\
+				OSPM_DISPLAY_HDMI)
+
+/* All Video Islands */
+#define OSPM_VIDEO_ISLAND	(OSPM_VIDEO_VPP_ISLAND |\
+				OSPM_VIDEO_DEC_ISLAND |\
+				OSPM_VIDEO_ENC_ISLAND)
+
+/* All Island for Intel Media */
+#define OSPM_ALL_ISLANDS	(OSPM_GRAPHICS_ISLAND |\
+				OSPM_VIDEO_ISLAND |\
+				OSPM_DISPLAY_ISLAND)
+
+struct power_ops;
+
+/* Generic defination of a power island */
+struct ospm_power_island {
+	/* bit for identifying power island */
+	u32 island;
+	/* power state of the island state */
+	enum POWER_ISLAND_STATE island_state;
+	/* Ref count for the Power island */
+	atomic_t ref_count;
+	void (*init_func)(struct drm_device *, struct ospm_power_island *);
+	/* power island up/down functions */
+	/* Function pointer must be initilized in init_func above */
+	struct power_ops *p_funcs;
+
+	/* we MUST power/on off the dependent island */
+	/* before modifying power state of "this" island */
+	/* dependency (if any) must be set in the init function */
+	struct ospm_power_island *p_dependency;
+};
+
+/* power island up/down functions */
+struct power_ops {
+	bool (*power_up)(struct drm_device *,
+			struct ospm_power_island *);
+	bool (*power_down)(struct drm_device *,
+			struct ospm_power_island *);
+};
+
+struct _ospm_data_ {
+	struct mutex ospm_lock;
+
+	/* drm device */
+	struct drm_device	*dev;
+};
+
+/* get pointer to the island */
+/* use it to get array item for setting dependency */
+struct ospm_power_island *get_island_ptr(u32 hw_island);
+bool ospm_power_suspend(void);
+void ospm_power_resume(void);
+void ospm_power_init(struct drm_device *dev);
+void ospm_power_uninit(void);
+
+/* Power up */
+bool power_island_get(u32 hw_island);
+/* Power down */
+bool power_island_put(u32 hw_island);
+
+/* Check the state of the island */
+bool is_island_on(u32 hw_island);
+
+/* Get Display island from the pipe */
+u32 pipe_to_island(u32 pipe);
+
+/* FIXME: hkpatel */
+/*** LEGACY SUPPORT ****/
+/*** REMOVE ONCE CONVERTED ALL FUNCTIONS TO NEW ARCH */
+
+/* Panel presence */
+#define DISPLAY_A	0x1
+#define DISPLAY_B	0x2
+#define DISPLAY_C	0x4
+
+#define OSPM_UHB_ONLY_IF_ON        0
+#define OSPM_UHB_FORCE_POWER_ON    1
+
+/* Legacy Function for support */
+bool ospm_power_using_hw_begin(int hw_island, u32 usage);
+void ospm_power_using_hw_end(int hw_island);
+
+void ospm_apm_power_down_msvdx(struct drm_device *dev, int on);
+bool ospm_power_is_hw_on(u32 hw_island);
+void ospm_apm_power_down_topaz(struct drm_device *dev);
+void ospm_apm_power_down_vsp(struct drm_device *dev);
+int ospm_runtime_pm_allow(struct drm_device *dev);
+void ospm_runtime_pm_forbid(struct drm_device *dev);
+
+#endif		/* __INTEL_MEDIA_ISLAND_MANAGEMENT_H__ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/video_ospm.c b/drivers/external_drivers/intel_media/display/tng/drv/ospm/video_ospm.c
new file mode 100644
index 0000000..9c1aaae
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/video_ospm.c
@@ -0,0 +1,588 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+#include "psb_drv.h"
+#include "pmu_tng.h"
+#include "video_ospm.h"
+#include "vsp.h"
+#include "psb_msvdx.h"
+#include "tng_topaz.h"
+#include "tng_wa.h"
+#include <asm/intel-mid.h>
+#include "pmu_tng.h"
+
+static bool need_set_ved_freq = true;
+
+static int pm_cmd_freq_get(u32 reg_freq);
+static int pm_cmd_freq_set(u32 reg_freq, u32 freq_code, u32 *p_freq_code_rlzd);
+static int pm_cmd_freq_wait(u32 reg_freq, u32 *freq_code_rlzd);
+static void pm_cmd_power_set(int pm_reg, int pm_mask) __attribute__((unused));
+
+static void vsp_set_max_frequency(struct drm_device *dev);
+static void vsp_set_default_frequency(struct drm_device *dev);
+
+extern struct drm_device *gpDrmDevice;
+/***********************************************************
+ * vsp islands
+ ***********************************************************/
+/**
+ * vsp_power_up
+ *
+ * Power up island
+ */
+static bool vsp_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret = true;
+	int pm_ret = 0;
+
+	if (p_island->island_state == OSPM_POWER_ON)
+		return true;
+	/*
+	 * This workarounds are only needed for TNG A0/A1 silicon.
+	 * Any TNG SoC which is newer than A0/A1 won't need this.
+	 */
+	if (IS_TNG_A0(dev))
+	{
+		apply_TNG_A0_workarounds(OSPM_VIDEO_VPP_ISLAND, 1);
+	}
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	pm_ret = pmu_nc_set_power_state(PMU_VPP, OSPM_ISLAND_UP, VSP_SS_PM0);
+#else
+	pm_ret = pmu_set_power_state_tng(VSP_SS_PM0, VSP_SSC, TNG_COMPOSITE_I0);
+#endif
+
+	if (pm_ret) {
+		PSB_DEBUG_PM("VSP: pmu_nc_set_power_state ON failed!\n");
+		return false;
+	}
+
+	if (drm_vsp_burst)
+		vsp_set_max_frequency(dev);
+
+	psb_irq_preinstall_islands(dev, OSPM_VIDEO_VPP_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_VIDEO_VPP_ISLAND);
+
+	PSB_DEBUG_PM("Power ON VSP!\n");
+	return ret;
+}
+
+/**
+ * vsp_power_down
+ *
+ * Power down island
+ */
+static bool vsp_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret = true;
+	int pm_ret = 0;
+
+	/* whether the VSP is idle */
+	if (psb_check_vsp_idle(dev)) {
+		PSB_DEBUG_PM("The VSP isn't in idle!\n");
+		return false;
+	}
+
+	psb_irq_uninstall_islands(dev, OSPM_VIDEO_VPP_ISLAND);
+
+	/* save VSP registers */
+	psb_vsp_save_context(dev);
+
+	if (drm_vsp_burst)
+		vsp_set_default_frequency(dev);
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	pm_ret = pmu_nc_set_power_state(PMU_VPP, OSPM_ISLAND_DOWN, VSP_SS_PM0);
+#else
+	pm_ret = pmu_set_power_state_tng(VSP_SS_PM0, VSP_SSC, TNG_COMPOSITE_D3);
+#endif
+
+	if (pm_ret) {
+		PSB_DEBUG_PM("VSP: pmu_nc_set_power_state OFF failed!\n");
+		return false;
+	}
+
+	PSB_DEBUG_PM("Power OFF VSP!\n");
+	return ret;
+}
+
+/**
+ * ospm_vsp_init
+ *
+ * initilize
+ */
+void ospm_vsp_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_island->p_funcs->power_up = vsp_power_up;
+	p_island->p_funcs->power_down = vsp_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SLC);
+}
+
+/***********************************************************
+ * slc workaround for ved
+ ***********************************************************/
+/**
+ * apply_ved_slc_workaround
+ *
+ * bypass SLC for ved if there is ctx that needs the workaround
+ */
+#define GFX_WRAPPER_GBYPASSENABLE_SW 0x160854
+static void apply_ved_slc_workaround(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	if (atomic_read(&msvdx_priv->vc1_workaround_ctx)) {
+		uint32_t reg, data;
+
+		/* soc.gfx_wrapper.gbypassenable_sw = 1 */
+		reg = GFX_WRAPPER_GBYPASSENABLE_SW - GFX_WRAPPER_OFFSET;
+		data = WRAPPER_REG_READ(reg);
+		data |= 0x1; /* Disable Bypass SLC for VED on MOFD */
+		WRAPPER_REG_WRITE(reg, data);
+	}
+}
+
+/***********************************************************
+ * ved islands
+ ***********************************************************/
+/**
+ * ved_power_up
+ *
+ * Power up island
+ */
+static bool ved_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret = true;
+	int pm_ret = 0;
+	unsigned int pci_device = dev->pci_device & 0xffff;
+	/* struct drm_psb_private *dev_priv = dev->dev_private; */
+
+	PSB_DEBUG_PM("powering up ved\n");
+	apply_ved_slc_workaround(dev);
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	pm_ret = pmu_nc_set_power_state(PMU_DEC, OSPM_ISLAND_UP, VED_SS_PM0);
+#else
+	pm_ret = pmu_set_power_state_tng(VED_SS_PM0, VED_SSC, TNG_COMPOSITE_I0);
+#endif
+
+	if (pm_ret) {
+		PSB_DEBUG_PM("power up ved failed\n");
+		return false;
+	}
+
+	/* iowrite32(0xffffffff, dev_priv->ved_wrapper_reg + 0); */
+
+	if (need_set_ved_freq && (pci_device != 0x1182)) {
+		if (!psb_msvdx_set_ved_freq(IP_FREQ_320_00))
+			PSB_DEBUG_PM("MSVDX: Set VED frequency to " \
+				"320MHZ after power up\n");
+	}
+
+	return ret;
+}
+
+/**
+ * ved_power_down
+ *
+ * Power down island
+ */
+static bool ved_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	bool ret = true;
+	int pm_ret = 0;
+	unsigned int pci_device = dev->pci_device & 0xffff;
+
+	/* Need to implement force_off */
+	PSB_DEBUG_PM("powering down ved\n");
+
+	/*
+	if (psb_check_msvdx_idle(dev))
+		return false;
+	*/
+
+	psb_msvdx_save_context(dev);
+
+
+	if (need_set_ved_freq && (pci_device != 0x1182)) {
+		if (!psb_msvdx_set_ved_freq(IP_FREQ_200_00))
+			PSB_DEBUG_PM("MSVDX: Set VED frequency to " \
+				"200MHZ before power down\n");
+	}
+
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	pm_ret = pmu_nc_set_power_state(PMU_DEC, OSPM_ISLAND_DOWN, VED_SS_PM0);
+#else
+	pm_ret = pmu_set_power_state_tng(VED_SS_PM0, VED_SSC, TNG_COMPOSITE_D3);
+#endif
+
+	if (pm_ret) {
+		PSB_DEBUG_PM("power down ved failed\n");
+		return false;
+	}
+
+	return ret;
+}
+
+/**
+ * ospm_ved_init
+ *
+ * initilize
+ */
+void ospm_ved_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_island->p_funcs->power_up = ved_power_up;
+	p_island->p_funcs->power_down = ved_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SLC);
+}
+
+/***********************************************************
+ * vec islands
+ ***********************************************************/
+static u32 vec_get_max_freq(struct drm_device *dev)
+{
+	unsigned int pci_device = dev->pci_device & 0xffff;
+	u32 max_freq = IP_FREQ_320_00;
+
+	if ((pci_device == 0x1180) ||
+		(pci_device == 0x1181)) {
+		max_freq = IP_FREQ_400_00;
+		PSB_DEBUG_PM("vec 1180 1181 maximum freq is 400\n");
+	} else if (pci_device == 0x1182) {
+		max_freq = IP_FREQ_266_67;
+		PSB_DEBUG_PM("vec 1182 maximum freq is 400\n");
+	} else if (pci_device == 0x1480) {
+		max_freq = IP_FREQ_400_00;
+		PSB_DEBUG_PM("vec 1480 maximum freq is 400\n");
+	} else {
+		DRM_ERROR("invalid pci device id %x\n", pci_device);
+	}
+	return max_freq;
+}
+
+/**
+ * vec_power_up
+ *
+ * Power up island
+ */
+static bool vec_power_up(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	int pm_ret = 0;
+	u32 freq_code = 0;
+	u32 freq_max = 0;
+
+	PSB_DEBUG_PM("powering up vec\n");
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	pm_ret = pmu_nc_set_power_state(PMU_ENC, OSPM_ISLAND_UP, VEC_SS_PM0);
+#else
+	pm_ret = pmu_set_power_state_tng(VEC_SS_PM0, VEC_SSC, TNG_COMPOSITE_I0);
+#endif
+
+	if (pm_ret) {
+		PSB_DEBUG_PM("power up vec failed\n");
+		return false;
+	}
+
+	freq_max = vec_get_max_freq(dev);
+
+	if (drm_vec_force_up_freq < 0) {
+		drm_vec_force_up_freq = 0;
+		freq_code = freq_max;
+	} else {
+		if (freq_max < drm_vec_force_up_freq)
+			freq_code = drm_vec_force_up_freq;
+		else
+			freq_code = freq_max;
+	}
+
+	if(!tng_topaz_set_vec_freq(freq_code))
+		PSB_DEBUG_PM("TOPAZ: Set VEC freq by code %d\n", freq_code);
+	else {
+		PSB_DEBUG_PM("TOPAZ: Fail to set VEC freq by code %d!\n",
+			freq_code);
+		/*return false;*/
+	}
+
+	if (drm_topaz_cgpolicy != PSB_CGPOLICY_ON)
+		tng_topaz_CG_disable(dev);
+
+	PSB_DEBUG_PM("powering up vec done\n");
+
+	return true;
+}
+
+/**
+ * vec_power_down
+ *
+ * Power down island
+ */
+static bool vec_power_down(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	int pm_ret = 0;
+	int freq_code = 0;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	/* Avoid handle the previous context's power down request */
+	topaz_priv->power_down_by_release = 0;
+
+	PSB_DEBUG_PM("TOPAZ: powering down vec\n");
+
+	tng_topaz_save_mtx_state(dev);
+
+	if (drm_vec_force_down_freq < 0)
+		drm_vec_force_down_freq = 0;
+
+	if (!drm_vec_force_down_freq)
+		freq_code = IP_FREQ_200_00;
+	else
+		freq_code = drm_vec_force_down_freq;
+
+	if(!tng_topaz_set_vec_freq(freq_code))
+		PSB_DEBUG_PM("TOPAZ: Set VEC freq by code %d\n", freq_code);
+	else {
+		PSB_DEBUG_PM("TOPAZ: Fail to set VEC freq by code %d!\n",
+			freq_code);
+		/*return false;*/
+	}
+
+#ifndef USE_GFX_INTERNAL_PM_FUNC
+	pm_ret = pmu_nc_set_power_state(PMU_ENC, \
+		OSPM_ISLAND_DOWN, VEC_SS_PM0);
+#else
+	pm_ret = pmu_set_power_state_tng(VEC_SS_PM0, VEC_SSC, TNG_COMPOSITE_D3);
+#endif
+
+	if (pm_ret) {
+		DRM_ERROR("Power down ved failed\n");
+		return false;
+	}
+
+	PSB_DEBUG_PM("TOPAZ: powering down vec done\n");
+
+	return true;
+}
+
+/**
+ * ospm_vec_init
+ *
+ * initilize
+ */
+void ospm_vec_init(struct drm_device *dev,
+			struct ospm_power_island *p_island)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_island->p_funcs->power_up = vec_power_up;
+	p_island->p_funcs->power_down = vec_power_down;
+	p_island->p_dependency = get_island_ptr(NC_PM_SSS_GFX_SLC);
+}
+
+static int pm_cmd_freq_wait(u32 reg_freq, u32 *freq_code_rlzd)
+{
+	int tcount;
+	u32 freq_val;
+
+	for (tcount = 0; ; tcount++) {
+		freq_val = intel_mid_msgbus_read32(PUNIT_PORT, reg_freq);
+		if ((freq_val & IP_FREQ_VALID) == 0)
+			break;
+		if (tcount > 1500) {
+			DRM_ERROR("P-Unit freq request wait timeout %x",
+				freq_val);
+			return -EBUSY;
+		}
+		udelay(1);
+	}
+
+	if (freq_code_rlzd) {
+		*freq_code_rlzd = ((freq_val >> IP_FREQ_STAT_POS) &
+			IP_FREQ_MASK);
+	}
+
+	return 0;
+}
+
+static int pm_cmd_freq_get(u32 reg_freq)
+{
+	u32 freq_val;
+	int freq_code=0;
+
+	pm_cmd_freq_wait(reg_freq, NULL);
+
+	freq_val = intel_mid_msgbus_read32(PUNIT_PORT, reg_freq);
+	freq_code =(int)((freq_val>>IP_FREQ_STAT_POS) & ~IP_FREQ_VALID);
+	return freq_code;
+}
+
+static int pm_cmd_freq_set(u32 reg_freq, u32 freq_code, u32 *p_freq_code_rlzd)
+{
+	u32 freq_val;
+	u32 freq_code_realized;
+	int rva;
+
+	rva = pm_cmd_freq_wait(reg_freq, NULL);
+	if (rva < 0) {
+		printk(KERN_ALERT "%s: pm_cmd_freq_wait 1 failed\n", __func__);
+		return rva;
+	}
+
+	freq_val = IP_FREQ_VALID | freq_code;
+	intel_mid_msgbus_write32(PUNIT_PORT, reg_freq, freq_val);
+
+	rva = pm_cmd_freq_wait(reg_freq, &freq_code_realized);
+	if (rva < 0) {
+		printk(KERN_ALERT "%s: pm_cmd_freq_wait 2 failed\n", __func__);
+		return rva;
+	}
+
+	if (p_freq_code_rlzd)
+		*p_freq_code_rlzd = freq_code_realized;
+
+	return rva;
+}
+
+static void vsp_set_max_frequency(struct drm_device *dev)
+{
+	unsigned int pci_device = dev->pci_device & 0xffff;
+	u32 freq_code_rlzd;
+	u32 freq_code, max_freq_code;
+	u32 freq, max_freq;
+	int ret;
+
+	freq_code = 0;
+	max_freq_code = 0;
+	if (pci_device == 0x1180) {
+		max_freq_code = IP_FREQ_457_14;
+		PSB_DEBUG_PM("vsp maximum freq is 457\n");
+	} else if (pci_device == 0x1181) {
+		max_freq_code = IP_FREQ_400_00;
+		PSB_DEBUG_PM("vsp maximum freq is 400\n");
+	} else if (pci_device == 0x1480) {
+		max_freq_code = IP_FREQ_400_00;
+		PSB_DEBUG_PM("vsp maximum freq for ANN A0 is 400\n");
+	} else if (pci_device == 0x1182) {
+		PSB_DEBUG_PM("Max freq is the default freq 200MHZ for SKU3 \n");
+		max_freq_code = IP_FREQ_200_00;
+	} else {
+		DRM_ERROR("invalid pci device id %x\n", pci_device);
+		return;
+	}
+
+	// according to the latest scheme, set VSP max frequency to 400MHZ
+	freq_code = IP_FREQ_400_00;
+
+	if (drm_vsp_force_up_freq)
+		freq_code = drm_vsp_force_up_freq;
+
+	freq = 1600 * 2 / (freq_code + 1);
+	max_freq = 1600 * 2 / (max_freq_code + 1);
+	VSP_DEBUG("try to set %dMHZ, max freq is %dMHZ\n", freq, max_freq);
+	if (freq > max_freq)
+		freq_code = max_freq_code;
+
+	ret = pm_cmd_freq_set(VSP_SS_PM1, freq_code, &freq_code_rlzd);
+	if (ret < 0) {
+		DRM_ERROR("failed to set freqency, current is %x\n",
+			  freq_code_rlzd);
+	}
+
+	PSB_DEBUG_PM("set maximum frequency\n");
+	return;
+}
+
+static void vsp_set_default_frequency(struct drm_device *dev)
+{
+	u32 freq_code_rlzd;
+	int ret;
+	u32 freq_code;
+
+	freq_code = IP_FREQ_200_00;
+
+	if (drm_vsp_force_down_freq)
+		freq_code = drm_vsp_force_down_freq;
+
+	ret = pm_cmd_freq_set(VSP_SS_PM1, freq_code, &freq_code_rlzd);
+	if (ret < 0) {
+		DRM_ERROR("failed to set freqency, current is %x\n",
+			  freq_code_rlzd);
+	}
+
+	PSB_DEBUG_PM("set default frequency\n");
+	return;
+}
+
+int psb_msvdx_set_ved_freq(u32 freq_code)
+{
+       u32 freq_code_rlzd;
+       int ret;
+
+       ret = pm_cmd_freq_set(VED_SS_PM1, freq_code, &freq_code_rlzd);
+       if (ret < 0) {
+               DRM_ERROR("failed to set freqency, current is %x\n",
+                               freq_code_rlzd);
+       }
+
+       return ret;
+}
+
+int psb_msvdx_get_ved_freq(u32 reg_freq)
+{
+	return  pm_cmd_freq_get(reg_freq);
+}
+
+void psb_set_freq_control_switch(bool config_value)
+{
+	need_set_ved_freq = config_value;
+}
+
+static void pm_cmd_power_set(int pm_reg, int pm_mask)
+{
+	intel_mid_msgbus_write32(0x04, pm_reg, pm_mask);
+	udelay(500);
+
+	if (pm_reg == VEC_SS_PM0 && !(pm_mask & 0x3)) {
+		PSB_DEBUG_PM("Power up VEC, delay another 1500 us\n");
+		udelay(1500);
+	}
+
+	pm_mask = intel_mid_msgbus_read32(0x04, pm_reg);
+	PSB_DEBUG_PM("pwr_mask read: reg=0x%x pwr_mask=0x%x \n", pm_reg, pm_mask);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/ospm/video_ospm.h b/drivers/external_drivers/intel_media/display/tng/drv/ospm/video_ospm.h
new file mode 100644
index 0000000..856611d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/ospm/video_ospm.h
@@ -0,0 +1,54 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _INTEL_MEDIA_VIDEO_OSPM_H_
+#define _INTEL_MEDIA_VIDEO_OSPM_H_
+
+#include "pwr_mgmt.h"
+#include "psb_msvdx.h"
+#include "vsp.h"
+
+#define PMU_VPP			0x1
+#define PMU_ENC			0x1
+#define PMU_DEC			0x1
+
+void ospm_vsp_init(struct drm_device *dev,
+		   struct ospm_power_island *p_island);
+
+void ospm_ved_init(struct drm_device *dev,
+		   struct ospm_power_island *p_island);
+
+void ospm_vec_init(struct drm_device *dev,
+		   struct ospm_power_island *p_island);
+
+int psb_msvdx_get_ved_freq(u32 reg_freq);
+
+int psb_msvdx_set_ved_freq(u32 freq_code);
+
+void psb_set_freq_control_switch(bool config_value);
+
+#endif	/* _INTEL_MEDIA_VIDEO_OSPM_H_*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/pmu_tng.c b/drivers/external_drivers/intel_media/display/tng/drv/pmu_tng.c
new file mode 100644
index 0000000..4089ca0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/pmu_tng.c
@@ -0,0 +1,206 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+
+#include <asm/intel-mid.h>
+
+#include "pmu_tng.h"
+
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+const char *pm_cmd_reg_name(u32 reg_addr)
+{
+	const char *pstr;
+
+	switch (reg_addr) {
+	case GFX_SS_PM0:
+		pstr = "GFX_SS_PM0";
+		break;
+	case GFX_SS_PM1:
+		pstr = "GFX_SS_PM1";
+		break;
+	case VED_SS_PM0:
+		pstr = "VED_SS_PM0";
+		break;
+	case VED_SS_PM1:
+		pstr = "VED_SS_PM1";
+		break;
+	case VEC_SS_PM0:
+		pstr = "VEC_SS_PM0";
+		break;
+	case VEC_SS_PM1:
+		pstr = "VEC_SS_PM1";
+		break;
+	case DSP_SS_PM:
+		pstr = "DSP_SS_PM";
+		break;
+	case VSP_SS_PM0:
+		pstr = "VSP_SS_PM0";
+		break;
+	case VSP_SS_PM1:
+		pstr = "VSP_SS_PM1";
+		break;
+	case MIO_SS_PM:
+		pstr = "MIO_SS_PM";
+		break;
+	case HDMIO_SS_PM:
+		pstr = "HDMIO_SS_PM";
+		break;
+	case NC_PM_SSS:
+		pstr = "NC_PM_SSS";
+		break;
+	default:
+		pstr = "(unknown_pm_reg)";
+		break;
+	}
+
+	return pstr;
+}
+#endif /* if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD */
+
+
+/**
+ * pmu_set_power_state_tng() - Send power management cmd to punit and
+ * wait for completion.
+ *
+ * This function implements Tangier/Merrifield punit-based power control.
+ *
+ * @reg_pm0 - Address of PM control register.  Example: GFX_SS_PM0
+ *
+ * @si_mask: Control bits.  "si" stands for "sub-islands".
+ * Bit mask specifying of one or more of the power islands to be affected.
+ * Each power island is a two bit field.  These bits are set for every bit
+ * in each power island to be affected by this command.
+ * For each island, either 0 or all 2 of its bits may be specified, but it
+ * is an error to specify only 1 of its bits.
+ *
+ * @ns_mask: "ns" stands for "new state".
+ * New state for bits specified by si_mask.
+ * Bits in ns_mask that are not set in si_mask are ignored.
+ * Mask of new power state for the power islands specified by si_mask.
+ * These bits are 0b00 for full power off and 0b11 for full power on.
+ * Note that other values may be specified (0b01 and 0b10).
+ *
+ * Bit field values:
+ *   TNG_SSC_I0    0b00      - i0 - power on, no clock or p[ower gating
+ *   TNG_SSC_I1    0b01      - i1 - clock gated
+ *   TNG_SSC_I2    0b01      - i2 - soft reset
+ *   TNG_SSC_D3    0b11      - d3 - power off, hw state not retained
+ *
+ * NOTE: Bit mask ns_mask is inverted from the *actual* hardware register
+ * values being used for power control.  This convention was adopted so that
+ * the API accepts 0b11 for full power-on and 0b00 for full power-off.
+ *
+ * Function return value: 0 if success, or -error_value.
+ *
+ * Example calls (ignoring return status):
+ * Turn on all gfx islands:
+ *   si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   ns_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ * Turn on all gfx islands:  (Another way):
+ *   si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   ns_mask = 0xFFFFFFFF;
+ *   pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ * Turn off all gfx islands:
+ *   si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   ns_mask = 0;
+ *   pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ *
+ * Replaces (for Tangier):
+ *    int pmu_nc_set_power_state(int islands, int state_type, int reg_type);
+ */
+int pmu_set_power_state_tng(u32 reg_pm0, u32 si_mask, u32 ns_mask)
+{
+	u32 pwr_cur;
+	u32 pwr_val;
+	int tcount;
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	u32 pwr_prev;
+	int pwr_stored;
+#endif
+
+	ns_mask &= si_mask;
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	printk(KERN_ALERT "%s(\"%s\"=%#x, %#x, %#x);\n", __func__,
+		pm_cmd_reg_name(reg_pm0), reg_pm0, si_mask, ns_mask);
+#endif
+
+	pwr_cur = intel_mid_msgbus_read32(PUNIT_PORT, reg_pm0);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	printk(KERN_ALERT "%s: before: %s: read: %#x\n",
+		__func__, pm_cmd_reg_name(reg_pm0), pwr_cur);
+#endif
+	/*  Return if already in desired state. */
+	if ((((pwr_cur >> SSC_TO_SSS_SHIFT) ^ ns_mask) & si_mask) == 0)
+		return 0;
+
+	pwr_val = (pwr_cur & ~si_mask) | ns_mask;
+	intel_mid_msgbus_write32(PUNIT_PORT, reg_pm0, pwr_val);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	printk(KERN_ALERT "%s: %s: write: %#x\n",
+		__func__, pm_cmd_reg_name(reg_pm0), pwr_val);
+	pwr_prev = 0;
+	pwr_stored = 0;
+#endif
+
+	for (tcount = 0; ; tcount++) {
+		if (tcount > 50) {
+			WARN(1, "%s: P-Unit PM action request timeout",
+				__func__);
+			return -EBUSY;
+		}
+		pwr_cur = intel_mid_msgbus_read32(PUNIT_PORT, reg_pm0);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+		if (!pwr_stored || (pwr_prev != pwr_cur)) {
+			printk(KERN_ALERT
+				"%s: tries=%d: %s: read: %#x\n",
+				__func__, tcount,
+				pm_cmd_reg_name(reg_pm0),
+				pwr_cur);
+			pwr_stored = 1;
+			pwr_prev = pwr_cur;
+		}
+#endif
+
+		if ((((pwr_cur >> SSC_TO_SSS_SHIFT) ^ ns_mask) & si_mask) == 0)
+			break;
+		udelay(10);
+	}
+
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/pmu_tng.h b/drivers/external_drivers/intel_media/display/tng/drv/pmu_tng.h
new file mode 100644
index 0000000..ef91013
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/pmu_tng.h
@@ -0,0 +1,180 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Austin Hu <austin.hu@intel.com>
+ */
+
+#ifndef _PMU_TNG_H_
+#define _PMU_TNG_H_
+
+#include <linux/types.h>
+
+/* Per TNG Punit HAS */
+
+#define PUNIT_PORT              0x04
+
+/*
+ * Registers on msgbus port 4 (p-unit) for power/freq control.
+ * Bits 7:0 of the PM0 (or just PM) registers are power control bits, whereas
+ * bits 31:24 are the corresponding status bits.
+*/
+
+/*  Subsystem status of all North Cluster IPs (bits NC_PM_SSS_*) */
+#define NC_PM_SSS               0x3f
+
+/*
+ * Bit masks for power islands, as present in PM0 or PM registers.
+ * These reside as control bits in bits 7:0 of each register and
+ * as status bits in bits 31:24 of each register.
+ * Each power island has a 2-bit field which contains a value of TNG_SSC_*.
+ */
+#define SSC_TO_SSS_SHIFT        24
+
+/* GFX_SS_PM0 island */
+#define GFX_SS_PM0              0x30
+#define GFX_SS_PM1              0x31
+
+#define GFX_SLC_SSC             0x03
+#define GFX_SDKCK_SSC           0x0c
+#define GFX_RSCD_SSC            0x30
+#define GFX_SLC_LDO_SSC         0xc0
+
+#define GFX_SLC_SHIFT           0
+#define GFX_SDKCK_SHIFT         2
+#define GFX_RSCD_SHIFT          4
+#define GFX_SLC_LDO_SHIFT       6
+
+/* VED_SS_PMx power island */
+#define VED_SS_PM0              0x32
+#define VED_SS_PM1              0x33
+
+#define VED_SSC                 0x03
+
+/* VEC_SS_PMx power island */
+#define VEC_SS_PM0              0x34
+#define VEC_SS_PM1              0x35
+
+#define VEC_SSC                 0x03
+
+/* DSP_SS_PM power islands */
+#define DSP_SS_PM               0x36
+
+#define DPA_SSC                 0x03
+#define DPB_SSC                 0x0c
+#define DPC_SSC                 0x30
+
+#define DPA_SHIFT               0
+#define DPB_SHIFT               2
+#define DPC_SHIFT               4
+
+/* VSP_SS_PMx power islands */
+#define VSP_SS_PM0              0x37
+#define VSP_SS_PM1              0x38
+
+#define VSP_SSC                 0x03
+
+/* ISP_SS_PMx power islands */
+#define ISP_SS_PM0              0x39
+#define ISP_SS_PM1              0x3a
+
+#define ISP_SSC                 0x03
+
+/* MIO_SS_PM power island */
+#define MIO_SS_PM               0x3b
+
+#define MIO_SSC                 0x03
+
+/* HDMIO_SS_PM power island */
+#define HDMIO_SS_PM             0x3c
+
+#define HDMIO_SSC               0x03
+
+/*
+ * Subsystem status bits for NC_PM_SSS.  Status of all North Cluster IPs.
+ * These correspond to the above bits.
+ */
+#define NC_PM_SSS_GFX_SLC       0x00000003
+#define NC_PM_SSS_GFX_SDKCK     0x0000000c
+#define NC_PM_SSS_GFX_RSCD      0x00000030
+#define NC_PM_SSS_VED           0x000000c0
+#define NC_PM_SSS_VEC           0x00000300
+#define NC_PM_SSS_DPA           0x00000c00
+#define NC_PM_SSS_DPB           0x00003000
+#define NC_PM_SSS_DPC           0x0000c000
+#define NC_PM_SSS_VSP           0x00030000
+#define NC_PM_SSS_ISP           0x000c0000
+#define NC_PM_SSS_MIO           0x00300000
+#define NC_PM_SSS_HDMIO         0x00c00000
+#define NC_PM_SSS_GFX_SLC_LDO   0x03000000
+
+/*
+ * Frequency bits for *_PM1 registers above.
+ */
+#define IP_FREQ_VALID     0x80     /* Freq is valid bit */
+
+#define IP_FREQ_SIZE         5     /* number of bits in freq fields */
+#define IP_FREQ_MASK      0x1f     /* Bit mask for freq field */
+
+/*  Positions of various frequency fields */
+#define IP_FREQ_POS          0     /* Freq control [4:0] */
+#define IP_FREQ_GUAR_POS     8     /* Freq guar   [12:8] */
+#define IP_FREQ_STAT_POS    24     /* Freq status [28:24] */
+
+#define IP_FREQ_100_00 0x1f        /* 0b11111 100.00 */
+#define IP_FREQ_106_67 0x1d        /* 0b11101 106.67 */
+#define IP_FREQ_133_30 0x17        /* 0b10111 133.30 */
+#define IP_FREQ_160_00 0x13        /* 0b10011 160.00 */
+#define IP_FREQ_177_78 0x11        /* 0b10001 177.78 */
+#define IP_FREQ_200_00 0x0f        /* 0b01111 200.00 */
+#define IP_FREQ_213_33 0x0e        /* 0b01110 213.33 */
+#define IP_FREQ_266_67 0x0b        /* 0b01011 266.67 */
+#define IP_FREQ_320_00 0x09        /* 0b01001 320.00 */
+#define IP_FREQ_355_56 0x08        /* 0b01000 355.56 */
+#define IP_FREQ_400_00 0x07        /* 0b00111 400.00 */
+#define IP_FREQ_457_14 0x06        /* 0b00110 457.14 */
+#define IP_FREQ_533_33 0x05        /* 0b00101 533.33 */
+#define IP_FREQ_640_00 0x04        /* 0b00100 640.00 */
+#define IP_FREQ_800_00 0x03        /* 0b00011 800.00 */
+#define IP_FREQ_RESUME_SET 0x64
+
+/*  Tangier power states for each island */
+#define TNG_SSC_I0    (0b00)    /* i0 - power on, no clock or p[ower gating */
+#define TNG_SSC_I1    (0b01)    /* i1 - clock gated */
+#define TNG_SSC_I2    (0b01)    /* i2 - soft reset */
+#define TNG_SSC_D3    (0b11)    /* d3 - power off, hw state not retained */
+
+#define TNG_SSC_MASK  (0b11)    /* bit mask of all involved bits. */
+
+/*  Masks for the completely on and off states for 4 islands */
+#define TNG_COMPOSITE_I0    (0b00000000)
+#define TNG_COMPOSITE_D3    (0b11111111)
+
+#define DEBUG_PM_CMD 0
+#if !defined DEBUG_PM_CMD
+#define DEBUG_PM_CMD 1
+#endif
+
+int pmu_set_power_state_tng(u32 reg_pm0, u32 si_mask, u32 ns_mask);
+#endif /* ifndef _PMU_TNG_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_bl.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_bl.c
new file mode 100644
index 0000000..2662ecb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_bl.c
@@ -0,0 +1,188 @@
+/*
+ *  psb backlight using HAL
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/version.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "pwr_mgmt.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_dbi.h"
+#endif
+
+#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BRIGHTNESS_MIN_LEVEL 0
+#define BRIGHTNESS_INIT_LEVEL 50
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK	0xFF
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+#define BLC_ADJUSTMENT_MAX 255
+
+#define PSB_BLC_PWM_PRECISION_FACTOR    10
+#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+
+int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+u8 blc_pol;
+u8 blc_type;
+
+int lastFailedBrightness = -1;
+
+int psb_set_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev =
+	    (struct drm_device *)bl_get_data(psb_backlight_device);
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	int level;
+
+	if (bd != NULL)
+		level = bd->props.brightness;
+	else
+		level = lastFailedBrightness;
+
+	DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
+	PSB_DEBUG_ENTRY("[DISPLAY] %s: level is %d\n", __func__, level);	//DIV5-MM-DISPLAY-NC-LCM_INIT-00
+
+	/* Perform value bounds checking */
+	if (level < BRIGHTNESS_MIN_LEVEL)
+		level = BRIGHTNESS_MIN_LEVEL;
+
+	lastFailedBrightness = -1;
+
+	if (IS_FLDS(dev)) {
+		u32 adjusted_level = 0;
+
+		/* Adjust the backlight level with the percent in
+		 * dev_priv->blc_adj2;
+		 * But we do not divid blc_adj2 with its full range so that
+		 * we can map 100 to 255 scale.
+		 */
+		adjusted_level = level * dev_priv->blc_adj2;
+		adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX / BRIGHTNESS_MAX_LEVEL;
+		dev_priv->brightness_adjusted = adjusted_level;
+
+#ifdef CONFIG_SUPPORT_MIPI
+#ifndef CONFIG_MID_DSI_DPU
+		if (!(dev_priv->dsr_fb_update & MDFLD_DSR_MIPI_CONTROL)
+				&& (dev_priv->dbi_panel_on
+					|| dev_priv->dbi_panel_on2)) {
+			mdfld_dsi_dbi_exit_dsr(dev,
+					MDFLD_DSR_MIPI_CONTROL,
+					0, 0);
+			PSB_DEBUG_ENTRY
+				("Out of DSR before set brightness to %d.\n",
+				 adjusted_level);
+		}
+#endif
+
+		PSB_DEBUG_BL("Adjusted Backlight value: %d\n", adjusted_level);
+		mdfld_dsi_brightness_control(dev, 0, adjusted_level);
+		mdfld_dsi_brightness_control(dev, 2, adjusted_level);
+#endif
+	}
+
+	/* cache the brightness for later use */
+	psb_brightness = level;
+	return 0;
+}
+
+int psb_get_brightness(struct backlight_device *bd)
+{
+	DRM_DEBUG_DRIVER("brightness = 0x%x \n", psb_brightness);
+
+	/* return locally cached var instead of HW read (due to DPST etc.) */
+	return psb_brightness;
+}
+
+const struct backlight_ops psb_ops = {
+	.get_brightness = psb_get_brightness,
+	.update_status = psb_set_brightness,
+};
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static int device_backlight_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX * BLC_ADJUSTMENT_MAX;
+
+	return 0;
+}
+#endif
+
+int psb_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	int ret = 0;
+	struct backlight_properties props;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_PLATFORM;
+	props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+	props.type = BACKLIGHT_RAW;
+
+	psb_backlight_device =
+	    backlight_device_register("psb-bl", NULL, (void *)dev, &psb_ops,
+				      &props);
+	if (IS_ERR(psb_backlight_device))
+		return PTR_ERR(psb_backlight_device);
+
+	if ((ret = device_backlight_init(dev)) != 0)
+		return ret;
+
+	psb_backlight_device->props.brightness = BRIGHTNESS_INIT_LEVEL;
+	psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+	backlight_update_status(psb_backlight_device);
+#endif
+	return 0;
+}
+
+void psb_backlight_exit(void)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	psb_backlight_device->props.brightness = 0;
+	backlight_update_status(psb_backlight_device);
+	backlight_device_unregister(psb_backlight_device);
+#endif
+	return;
+}
+
+struct backlight_device *psb_get_backlight_device(void)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	return psb_backlight_device;
+#endif
+	return NULL;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst.c
new file mode 100644
index 0000000..7b12283
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    James C. Gualario <james.c.gualario@intel.com>
+ *
+ */
+
+#include "psb_umevents.h"
+#include "psb_dpst.h"
+#include "psb_dpst_func.h"
+
+/**
+ * inform the kernel of the work to be performed and related function.
+ *
+ */
+DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq);
+/**
+ * psb_dpst_notify_change_um - notify user mode of hotplug changes
+ *
+ * @name: name of event to notify user mode of change to
+ * @state: dpst state struct to get workqueue from
+ *
+ */
+int psb_dpst_notify_change_um(enum dpst_event_enum event,
+			      struct dpst_state *state)
+{
+	if (state == NULL)
+		return IRQ_HANDLED;
+
+	state->dpst_change_wq_data.dev_name_arry_rw_status
+	    [state->dpst_change_wq_data.dev_name_write] =
+	    DRM_DPST_READY_TO_READ;
+	state->dpst_change_wq_data.dpst_events
+	    [state->dpst_change_wq_data.dev_name_write] = event;
+	if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1)
+		state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
+	state->dpst_change_wq_data.dev_name_write++;
+	if (state->dpst_change_wq_data.dev_name_write ==
+	    state->dpst_change_wq_data.dev_name_read) {
+		state->dpst_change_wq_data.dev_name_write--;
+		return IRQ_NONE;
+	}
+	if (state->dpst_change_wq_data.dev_name_write > DRM_DPST_RING_DEPTH_MAX) {
+		state->dpst_change_wq_data.dev_name_write = 0;
+		state->dpst_change_wq_data.dev_name_write_wrap = 1;
+	}
+	state->dpst_change_wq_data.hotplug_dev_list = state->list;
+	queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work));
+	return IRQ_HANDLED;
+}
+
+EXPORT_SYMBOL(psb_dpst_notify_change_um);
+/**
+ *
+ * psb_dpst_create_and_notify_um - create and notify user mode of new dev
+ *
+ * @name: name to give for new event / device
+ * @state: dpst state instaces to associate event with
+ *
+ */
+struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
+						  struct dpst_state *state)
+{
+	return psb_create_umevent_obj(name, state->list);
+
+}
+
+EXPORT_SYMBOL(psb_dpst_create_and_notify_um);
+/**
+ * psb_dpst_device_pool_create_and_init - make new hotplug device pool
+ *
+ * @parent_kobj - parent kobject to associate dpst kset with
+ * @state - dpst state instance to associate list with
+ *
+ */
+struct umevent_list *psb_dpst_device_pool_create_and_init(struct kobject
+							  *parent_kobj, struct dpst_state
+							  *state)
+{
+	struct umevent_list *new_hotplug_dev_list = NULL;
+	new_hotplug_dev_list = psb_umevent_create_list();
+	if (new_hotplug_dev_list)
+		psb_umevent_init(parent_kobj, new_hotplug_dev_list, "psb_dpst");
+
+	state->dpst_wq = create_singlethread_workqueue("dpst-wq");
+
+	if (!state->dpst_wq)
+		return NULL;
+
+	INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq);
+
+	state->dpst_change_wq_data.dev_name_read = 0;
+	state->dpst_change_wq_data.dev_name_write = 0;
+	state->dpst_change_wq_data.dev_name_write_wrap = 0;
+	state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
+
+	memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]),
+	       0, sizeof(int) * DRM_DPST_RING_DEPTH);
+
+	return new_hotplug_dev_list;
+}
+
+EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init);
+/**
+ * psb_dpst_init - init dpst subsystem
+ * @parent_kobj - parent kobject to associate dpst state with
+ *
+ */
+struct dpst_state *psb_dpst_init(struct kobject *parent_kobj)
+{
+	struct dpst_state *state;
+	struct umevent_obj *working_umevent;
+
+	state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	state->list = NULL;
+	state->list = psb_dpst_device_pool_create_and_init(parent_kobj, state);
+	working_umevent = psb_dpst_create_and_notify_um("init", state);
+	state->dpst_change_wq_data.dev_umevent_arry
+	    [DPST_EVENT_INIT_COMPLETE] = &(working_umevent->head);
+	working_umevent = psb_dpst_create_and_notify_um("hist_int", state);
+	state->dpst_change_wq_data.dev_umevent_arry
+	    [DPST_EVENT_HIST_INTERRUPT] = &(working_umevent->head);
+	working_umevent = psb_dpst_create_and_notify_um("term", state);
+	state->dpst_change_wq_data.dev_umevent_arry
+	    [DPST_EVENT_TERMINATE] = &(working_umevent->head);
+	working_umevent = psb_dpst_create_and_notify_um("phase_done", state);
+	state->dpst_change_wq_data.dev_umevent_arry
+	    [DPST_EVENT_PHASE_COMPLETE] = &(working_umevent->head);
+
+	return state;
+}
+
+EXPORT_SYMBOL(psb_dpst_init);
+/**
+ * psb_dpst_device_pool_destroy - destroy all dpst related resources
+ *
+ * @state: dpst state instance to destroy
+ *
+ */
+void psb_dpst_device_pool_destroy(struct dpst_state *state)
+{
+	int i;
+	struct umevent_list *list;
+	struct umevent_obj *umevent_test;
+
+	if(state == NULL)
+	{
+		DRM_INFO("DPST state already NULL in psb_dpst_device_pool_destroy\n");
+		return;
+	}
+
+	list = state->list;
+	flush_workqueue(state->dpst_wq);
+	destroy_workqueue(state->dpst_wq);
+	for (i = 0; i < DRM_DPST_MAX_NUM_EVENTS; i++) {
+		umevent_test =
+		    list_entry((state->dpst_change_wq_data.dev_umevent_arry[i]),
+			       struct umevent_obj, head);
+		state->dpst_change_wq_data.dev_umevent_arry[i] = NULL;
+	}
+	psb_umevent_cleanup(list);
+	kfree(state);
+}
+
+EXPORT_SYMBOL(psb_dpst_device_pool_destroy);
+/**
+ * psb_dpst_dev_change_wq - change workqueue implementation
+ *
+ * @work: work struct to use for kernel scheduling
+ *
+ */
+void psb_dpst_dev_change_wq(struct work_struct *work)
+{
+	struct dpst_disp_workqueue_data *wq_data;
+	int curr_event_index;
+	wq_data = to_dpst_disp_workqueue_data(work);
+	if (wq_data->dev_name_write_wrap == 1) {
+		wq_data->dev_name_read_write_wrap_ack = 1;
+		wq_data->dev_name_write_wrap = 0;
+		while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_DPST_READY_TO_READ) {
+				wq_data->dev_name_arry_rw_status
+				    [wq_data->dev_name_read] =
+				    DRM_DPST_READ_COMPLETE;
+				curr_event_index = wq_data->dpst_events
+				    [wq_data->dev_name_read];
+				// SH DPST psb_umevent_notify_change_gfxsock
+				if(curr_event_index < DRM_DPST_MAX_NUM_EVENTS) {
+					dpst_process_event
+						(list_entry((wq_data->dev_umevent_arry
+							[curr_event_index]),
+							struct umevent_obj, head),
+							DRM_DPST_SOCKET_GROUP_ID);
+				} else {
+					DRM_ERROR("Work queue event index out of bounds.\n");
+				}
+			}
+			wq_data->dev_name_read++;
+		}
+		wq_data->dev_name_read = 0;
+		while (wq_data->dev_name_read < wq_data->dev_name_write - 1) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_DPST_READY_TO_READ) {
+				wq_data->dev_name_arry_rw_status
+				    [wq_data->dev_name_read] =
+				    DRM_DPST_READ_COMPLETE;
+				curr_event_index = wq_data->dpst_events
+				    [wq_data->dev_name_read];
+				// SH DPST psb_umevent_notify_change_gfxsock
+				if (curr_event_index < DRM_DPST_MAX_NUM_EVENTS) {
+					dpst_process_event
+						(list_entry((wq_data->dev_umevent_arry
+							[curr_event_index]),
+							struct umevent_obj, head),
+							DRM_DPST_SOCKET_GROUP_ID);
+				} else {
+					DRM_ERROR("Work queue event index out of bounds.\n");
+				}
+			}
+			wq_data->dev_name_read++;
+		}
+	} else {
+		while (wq_data->dev_name_read < wq_data->dev_name_write) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_DPST_READY_TO_READ) {
+				wq_data->dev_name_arry_rw_status
+				    [wq_data->dev_name_read] =
+				    DRM_DPST_READ_COMPLETE;
+				curr_event_index = wq_data->dpst_events
+				    [wq_data->dev_name_read];
+				// SH DPST psb_umevent_notify_change_gfxsock
+				if (curr_event_index < DRM_DPST_MAX_NUM_EVENTS) {
+					dpst_process_event
+						(list_entry((wq_data->dev_umevent_arry
+							[curr_event_index]),
+							struct umevent_obj, head),
+							DRM_DPST_SOCKET_GROUP_ID);
+				} else {
+					DRM_ERROR("Work queue event index out of bounds.\n");
+				}
+
+			}
+			wq_data->dev_name_read++;
+		}
+	}
+	if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX)
+		wq_data->dev_name_read = 0;
+}
+
+EXPORT_SYMBOL(psb_dpst_dev_change_wq);
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst.h
new file mode 100644
index 0000000..90436fe
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    James C. Gualario <james.c.gualario@intel.com>
+ *
+ */
+
+#ifndef _PSB_DPST_H_
+#define _PSB_DPST_H_
+/**
+ * required includes
+ *
+ */
+#include "psb_umevents.h"
+/**
+ * dpst event enumeration
+ *
+ */
+enum dpst_event_enum {
+	DPST_EVENT_INIT_COMPLETE,
+	DPST_EVENT_HIST_INTERRUPT,
+	DPST_EVENT_TERMINATE,
+	DPST_EVENT_PHASE_COMPLETE,
+	DPST_MAX_EVENT
+};
+/**
+ * dpst specific defines
+ *
+ */
+#define DRM_DPST_RING_DEPTH 256
+#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1)
+#define DRM_DPST_READY_TO_READ 1
+#define DRM_DPST_READ_COMPLETE 2
+#define DRM_DPST_MAX_NUM_EVENTS (DPST_MAX_EVENT)
+/**
+ * dpst workqueue data struct.
+ */
+struct dpst_disp_workqueue_data {
+	struct work_struct work;
+	const char *dev_name;
+	int dev_name_write;
+	int dev_name_read;
+	int dev_name_write_wrap;
+	int dev_name_read_write_wrap_ack;
+	enum dpst_event_enum dpst_events[DRM_DPST_RING_DEPTH];
+	int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH];
+	struct umevent_list *hotplug_dev_list;
+	struct list_head *dev_umevent_arry[DRM_DPST_MAX_NUM_EVENTS];
+};
+/**
+ * dpst state structure
+ *
+ */
+struct dpst_state {
+	struct workqueue_struct *dpst_wq;
+	struct dpst_disp_workqueue_data dpst_change_wq_data;
+	struct umevent_list *list;
+};
+/**
+ * main interface function prototytpes for dpst support.
+ *
+ */
+extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj);
+extern int psb_dpst_notify_change_um(enum dpst_event_enum event,
+				     struct dpst_state *state);
+extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name, struct dpst_state
+							 *state);
+extern struct umevent_list *psb_dpst_device_pool_create_and_init(struct kobject
+								 *parent_kobj, struct
+								 dpst_state
+								 *state);
+extern void psb_dpst_device_pool_destroy(struct dpst_state *state);
+/**
+ * to go back and forth between work struct and workqueue data
+ *
+ */
+#define to_dpst_disp_workqueue_data(x) \
+	container_of(x, struct dpst_disp_workqueue_data, work)
+
+/**
+ * function prototypes for workqueue implementation
+ *
+ */
+extern void psb_dpst_dev_change_wq(struct work_struct *work);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst_func.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst_func.c
new file mode 100644
index 0000000..adde0c4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst_func.c
@@ -0,0 +1,849 @@
+/******************************************************************************
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Portions (c), Imagination Technology, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and Use.  Redistribution and use in binary form, without
+ * modification, of the software code provided with this license ("Software"),
+ * are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions must reproduce the above copyright notice and this
+ *     license in the documentation and/or other materials provided with the
+ *     Software.
+ *  2. Neither the name of Intel Corporation nor the name of Imagination
+ *     Technology, Ltd may be used to endorse or promote products derived from
+ *     the Software without specific prior written permission.
+ *  3. The Software can only be used in connection with the Intel hardware
+ *     designed to use the Software as outlined in the documentation. No other
+ *     use is authorized.
+ *  4. No reverse engineering, decompilation, or disassembly of the Software
+ *     is permitted.
+ *  5. The Software may not be distributed under terms different than this
+ *     license.
+ *
+ * Limited Patent License.  Intel Corporation grants a world-wide, royalty-free
+ * , non-exclusive license under patents it now or hereafter owns or controls
+ * to make, have made, use, import, offer to sell and sell ("Utilize") the
+ * Software, but solely to the extent that any such patent is necessary to
+ * Utilize the Software alone.  The patent license shall not apply to any
+ * combinations which include the Software.  No hardware per se is licensed
+ * hereunder.
+ *
+ * Ownership of Software and Copyrights. Title to all copies of the Software
+ * remains with the copyright holders. The Software is copyrighted and
+ * protected by the laws of the United States and other countries, and
+ * international treaty provisions.
+ *
+ * DISCLAIMER.  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#include <linux/version.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_dpst.h"
+#include "dispmgrnl.h"
+#include "psb_dpst_func.h"
+#include "mdfld_dsi_dbi_dsr.h"
+
+#define HSV
+#define dpst_print PSB_DEBUG_DPST
+#define DPST_START_PERCENTAGE  10000
+
+static struct mutex dpst_mutex;
+static int blc_adj2;
+#ifdef GAMMA_SETTINGS
+static u32 lut_adj[256];
+#endif
+
+static struct drm_device *g_dev = NULL;	// hack for the queue
+static uint32_t diet_saved[33];
+
+void dpst_disable_post_process(struct drm_device *dev);
+
+int send_hist(void)
+{
+	struct dispmgr_command_hdr dispmgr_cmd;
+	struct drm_psb_hist_status_arg mydata;
+
+	/* before we send get the status for run_algorithm */
+	dpst_histogram_get_status(g_dev, &mydata);
+	dispmgr_cmd.module = DISPMGR_MOD_DPST;
+	dispmgr_cmd.cmd = DISPMGR_DPST_HIST_DATA;
+	dispmgr_cmd.data_size = sizeof(struct drm_psb_hist_status_arg);
+	dispmgr_cmd.data = (uintptr_t) &mydata;
+	dispmgr_nl_send_msg(&dispmgr_cmd);
+	return 0;
+}
+
+
+/* IOCTL - moved to standard calls for Kernel Integration */
+int psb_hist_enable(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct dpst_guardband guardband_reg;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	uint32_t * enable = data;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if (!dev_priv)
+		return 0;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if(!dsi_config)
+		return 0;
+	ctx = &dsi_config->dsi_hw_context;
+
+	mutex_lock(&dpst_mutex);
+	/*
+	* FIXME: We need to force the Display to
+	* turn on but on TNG OSPM how we can force PIPEA to do it?
+	*/
+	if (power_island_get(OSPM_DISPLAY_A)) {
+
+		mdfld_dsi_dsr_forbid(dsi_config);
+
+		if (*enable == 1) {
+			ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+			ie_hist_cont_reg.ie_pipe_assignment = 0;
+			ie_hist_cont_reg.bin_reg_func_select = 1;
+			ie_hist_cont_reg.bin_reg_index = 0;
+			ie_hist_cont_reg.ie_histogram_enable = 1;
+			ie_hist_cont_reg.ie_mode_table_enabled = 1;
+#ifdef HSV
+			ie_hist_cont_reg.histogram_mode_select = 1;//HSV
+			ie_hist_cont_reg.alt_enhancement_mode = 2;//dpst_hsv_multiplier;
+#else
+			ie_hist_cont_reg.histogram_mode_select = 0;//YUV
+			ie_hist_cont_reg.alt_enhancement_mode = 1; //additive
+#endif
+			PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+			ctx->histogram_logic_ctrl = ie_hist_cont_reg.data;
+			dpst_print("hist_ctl 0x%x\n", ie_hist_cont_reg.data);
+
+			guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+			guardband_reg.interrupt_enable = 1;
+			guardband_reg.interrupt_status = 1;
+
+			PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+			ctx->histogram_intr_ctrl = guardband_reg.data;
+			dpst_print("guardband 0x%x\n", guardband_reg.data);
+
+			/* Wait for two vblanks */
+		} else {
+			guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+			guardband_reg.interrupt_enable = 0;
+			guardband_reg.interrupt_status = 1;
+			PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+			ctx->histogram_intr_ctrl = guardband_reg.data;
+			dpst_print("guardband 0x%x\n", guardband_reg.data);
+
+			ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+			ie_hist_cont_reg.ie_histogram_enable = 0;
+			PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+			ctx->histogram_logic_ctrl = ie_hist_cont_reg.data;
+			dpst_print("disabled: hist_ctl 0x%x\n", ie_hist_cont_reg.data);
+
+			dpst_disable_post_process(g_dev);
+		}
+
+		mdfld_dsi_dsr_allow(dsi_config);
+		power_island_put(OSPM_DISPLAY_A);
+	}
+
+	mutex_unlock(&dpst_mutex);
+
+	return 0;
+}
+
+ static int psb_hist_status(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_hist_status_arg *hist_status = data;
+	uint32_t * arg = hist_status->buf;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	u32 iedbr_reg_data = 0;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	u32 i;
+	uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
+	uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
+	uint32_t segvalue_max_22_bit = 0x3fffff;
+	uint32_t iedbr_busy_bit = 0x80000000;
+	int dpst3_bin_count = 32; //Different DPST Algs has different Values?
+
+	if (!dev_priv)
+		return 0;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return 0;
+
+	mutex_lock(&dpst_mutex);
+	/*
+	 * FIXME: We need to force the Display to
+	 * turn on but on TNG OSPM how we can force PIPEA to do it?
+	 */
+	if (power_island_get(OSPM_DISPLAY_A))
+	{
+		mdfld_dsi_dsr_forbid(dsi_config);
+
+		ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+		dpst_print("hist_ctl: 0x%x\n", ie_hist_cont_reg.data);
+		ie_hist_cont_reg.bin_reg_func_select = 0;
+		ie_hist_cont_reg.bin_reg_index = 0;
+		ie_hist_cont_reg.ie_histogram_enable = 1;
+		ie_hist_cont_reg.ie_mode_table_enabled = 1;
+		ie_hist_cont_reg.ie_pipe_assignment = 0;
+#ifdef HSV
+		ie_hist_cont_reg.histogram_mode_select = 1;//HSV
+		ie_hist_cont_reg.alt_enhancement_mode = 2;//dpst_hsv_multiplier;
+#else
+		ie_hist_cont_reg.histogram_mode_select = 0;//YUV
+		ie_hist_cont_reg.alt_enhancement_mode = 1; //additive
+#endif
+
+		PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+		dpst_print("hist static: \n");
+		for (i = 0; i < dpst3_bin_count; i++) {
+			iedbr_reg_data = PSB_RVDC32(iebdr_reg);
+			if (!(iedbr_reg_data & iedbr_busy_bit)) {
+				arg[i] = iedbr_reg_data & segvalue_max_22_bit;
+				dpst_print("hist_ctl 0x%d 0x%x\n", 0x3ff & PSB_RVDC32(blm_hist_ctl), arg[i]);
+			} else {
+				i = -1;
+				ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+				ie_hist_cont_reg.bin_reg_index = 0;
+				PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+			}
+		}
+		mdfld_dsi_dsr_allow(dsi_config);
+		power_island_put(OSPM_DISPLAY_A);
+	}
+
+	mutex_unlock(&dpst_mutex);
+
+	return 0;
+}
+
+
+// SH START DIET
+int psb_diet_enable(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	uint32_t * arg = data;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	u32 i;
+	uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
+	uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
+	int dpst3_bin_count = 32;
+	 u32 temp =0;
+
+	if (!dev_priv)
+		return 0;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return 0;
+	ctx = &dsi_config->dsi_hw_context;
+
+	mutex_lock(&dpst_mutex);
+
+	if (power_island_get(OSPM_DISPLAY_A))
+	{
+		mdfld_dsi_dsr_forbid(dsi_config);
+		if (data) {
+			ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+			dpst_print("hist_ctl: 0x%x\n", ie_hist_cont_reg.data);
+			ie_hist_cont_reg.bin_reg_func_select = 1;
+			ie_hist_cont_reg.bin_reg_index = 0;
+			ie_hist_cont_reg.ie_histogram_enable = 1;
+			ie_hist_cont_reg.ie_mode_table_enabled = 1;
+			ie_hist_cont_reg.ie_pipe_assignment = 0;
+#ifdef HSV
+			ie_hist_cont_reg.histogram_mode_select = 1;//HSV
+			ie_hist_cont_reg.alt_enhancement_mode = 2;//dpst_hsv_multiplier;
+#else
+			ie_hist_cont_reg.histogram_mode_select = 0;//YUV
+			ie_hist_cont_reg.alt_enhancement_mode = 1; //additive
+#endif
+			PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+			if (drm_psb_debug & PSB_D_DPST) {
+				printk("previous corr: ");
+				for (i = 0; i <= dpst3_bin_count; i++)
+				{
+					printk(" 0x%x ", PSB_RVDC32(iebdr_reg));
+				}
+				printk("\n");
+			}
+			PSB_WVDC32(0x200, iebdr_reg);
+			for (i = 1; i <= dpst3_bin_count; i++)
+			{
+				PSB_WVDC32(arg[i], iebdr_reg);
+			}
+			ctx->aimg_enhance_bin = PSB_RVDC32(iebdr_reg);
+
+			ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+
+			temp = ie_hist_cont_reg.data;
+			temp|=(1<<27);
+			temp&=~0x7f;
+			ie_hist_cont_reg.data = temp;
+			PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+
+			ctx->histogram_logic_ctrl = ie_hist_cont_reg.data;
+		} else {
+			ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+			ie_hist_cont_reg.ie_mode_table_enabled = 0;
+			PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+			ctx->histogram_logic_ctrl = ie_hist_cont_reg.data;
+		}
+
+		mdfld_dsi_dsr_allow(dsi_config);
+		power_island_put(OSPM_DISPLAY_A);
+	}
+
+	mutex_unlock(&dpst_mutex);
+
+	return 0;
+}
+
+/* dsr lock and power island lock should be hold before calling this function */
+int psb_dpst_diet_save(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = NULL;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
+	uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
+	int dpst3_bin_count = 32;
+	u32 i;
+
+	if (!dev)
+		return 0;
+
+	dev_priv = psb_priv(dev);
+	if (!dev_priv || !dev_priv->psb_dpst_state)
+		return 0;
+
+	ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+	ie_hist_cont_reg.bin_reg_func_select = 1;
+	ie_hist_cont_reg.bin_reg_index = 0;
+	PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+	for (i = 0; i <= dpst3_bin_count; i++)
+		diet_saved[i] = PSB_RVDC32(iebdr_reg);
+	dpst_print("diet saved\n");
+		diet_saved[0] = 0x200;
+
+	return 0;
+}
+
+/* dsr lock and power island lock should be hold before calling this function */
+int psb_dpst_diet_restore(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = NULL;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	u32 i;
+	uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
+	uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
+	int dpst3_bin_count = 32;
+	u32 temp =0;
+
+	if (!dev)
+		return 0;
+
+	dev_priv = psb_priv(dev);
+	if (!dev_priv || !dev_priv->psb_dpst_state)
+		return 0;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return 0;
+	ctx = &dsi_config->dsi_hw_context;
+
+	PSB_WVDC32(ctx->histogram_intr_ctrl, HISTOGRAM_INT_CONTROL);
+	ie_hist_cont_reg.data = ctx->histogram_logic_ctrl;
+	dpst_print("hist_ctl: 0x%x\n", ie_hist_cont_reg.data);
+	ie_hist_cont_reg.bin_reg_func_select = 1;
+	ie_hist_cont_reg.bin_reg_index = 0;
+	ie_hist_cont_reg.ie_histogram_enable = 1;
+	ie_hist_cont_reg.ie_mode_table_enabled = 1;
+	ie_hist_cont_reg.ie_pipe_assignment = 0;
+#ifdef HSV
+	ie_hist_cont_reg.histogram_mode_select = 1;//HSV
+	ie_hist_cont_reg.alt_enhancement_mode = 2;//dpst_hsv_multiplier;
+#else
+	ie_hist_cont_reg.histogram_mode_select = 0;//YUV
+	ie_hist_cont_reg.alt_enhancement_mode = 1; //additive
+#endif
+	PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+	if (drm_psb_debug & PSB_D_DPST) {
+		printk("before restore: ");
+		for (i = 0; i <= dpst3_bin_count; i++)
+		{
+			printk(" 0x%x ", PSB_RVDC32(iebdr_reg));
+		}
+		printk("\n");
+	}
+	for (i = 0; i <= dpst3_bin_count; i++)
+	{
+		PSB_WVDC32(diet_saved[i], iebdr_reg);
+	}
+	ctx->aimg_enhance_bin = PSB_RVDC32(iebdr_reg);
+
+	ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+
+	temp = ie_hist_cont_reg.data;
+	temp|=(1<<27);
+	temp&=~0x7f;
+	ie_hist_cont_reg.data = temp;
+	PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+
+	ctx->histogram_logic_ctrl = ie_hist_cont_reg.data;
+
+	return 0;
+}
+
+// SH END
+int psb_init_comm(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct pci_dev *pdev = NULL;
+	struct device *ddev = NULL;
+	struct kobject *kobj = NULL;
+	uint32_t * arg = data;
+
+	if(!dev_priv)
+		return 0;
+
+	if (*arg == 1) {
+
+		    /*find handle to drm kboject */
+		    pdev = dev->pdev;
+		ddev = &pdev->dev;
+		kobj = &ddev->kobj;
+		if (dev_priv->psb_dpst_state == NULL)
+		{
+			/*init dpst kmum comms */
+			dev_priv->psb_dpst_state = psb_dpst_init(kobj);
+		}
+		psb_irq_enable_dpst(dev);
+		psb_dpst_notify_change_um(DPST_EVENT_INIT_COMPLETE,
+					   dev_priv->psb_dpst_state);
+	} else {
+
+		/*hotplug and dpst destroy examples */
+		psb_irq_disable_dpst(dev);
+		psb_dpst_notify_change_um(DPST_EVENT_TERMINATE,
+					   dev_priv->psb_dpst_state);
+		psb_dpst_device_pool_destroy(dev_priv->psb_dpst_state);
+		dev_priv->psb_dpst_state = NULL;
+	}
+	return 0;
+}
+
+ int psb_dpst_mode(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = NULL;
+	uint32_t * arg = data;
+	uint32_t x = 0;
+	uint32_t y = 0;
+	uint32_t reg;
+
+	if (!dev_priv)
+		return 0;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return 0;
+
+	if (!power_island_get(OSPM_DISPLAY_A))
+		return 0;
+
+	reg = PSB_RVDC32(PIPEASRC);
+
+	power_island_put(OSPM_DISPLAY_A);
+
+	/* horizontal is the left 16 bits */
+	x = reg >> 16;
+
+	/* vertical is the right 16 bits */
+	y = reg & 0x0000ffff;
+
+	/* the values are the image size minus one */
+	x += 1;
+	y += 1;
+	*arg = (x << 16) | y;
+
+	return 0;
+}
+
+ int psb_update_guard(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct dpst_guardband *input = (struct dpst_guardband *)data;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	struct dpst_guardband reg_data;
+
+	if (!dev_priv)
+		return 0;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return 0;
+	ctx = &dsi_config->dsi_hw_context;
+
+	/*
+        * FIXME: We need to force the Display to
+        * turn on but on TNG OSPM how we can force PIPEA to do it?
+        */
+        if (!power_island_get(OSPM_DISPLAY_A))
+        {
+		return 0;
+	}
+
+	reg_data.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+	reg_data.guardband = input->guardband;
+	reg_data.guardband_interrupt_delay = input->guardband_interrupt_delay;
+
+	/* printk(KERN_ALERT "guardband = %u\ninterrupt delay = %u\n",
+		reg_data.guardband, reg_data.guardband_interrupt_delay); */
+	PSB_WVDC32(reg_data.data, HISTOGRAM_INT_CONTROL);
+	ctx->histogram_intr_ctrl = reg_data.data;
+	dpst_print("guardband 0x%x\n", reg_data.data);
+
+	power_island_put(OSPM_DISPLAY_A);
+
+	return 0;
+}
+
+int dpst_disable(struct drm_device *dev)
+{
+	uint32_t enable = 0;
+	int ret = 0;
+	 ret = psb_init_comm(dev, &enable);
+	 return ret;
+}
+
+ void dpst_process_event(struct umevent_obj *notify_disp_obj,
+			   int dst_group_id)
+{
+	int messageType;
+	int do_not_quit = 1;
+
+	    /* Call into UMComm layer to receive histogram interrupts */
+	    //eventval = Xpsb_kmcomm_get_kmevent((void *)tid);
+	    /* fprintf(stderr, "Got message %d for DPST\n", eventval); */
+	    messageType = notify_disp_obj->kobj.name[0];	/* need to debug to figure out which field this is */
+	 switch (messageType)
+		 {
+	case 'i':		//DPST_EVENT_INIT_COMPLETE:
+	case 'h':		//DPST_EVENT_HIST_INTERRUPT:
+		/* DPST histogram */
+		    send_hist();
+		break;
+	case 'p':		//DPST_EVENT_PHASE_COMPLETE:
+		break;
+	case 't':		//DPST_EVENT_TERMINATE:
+		break;
+	default:
+
+		    /* disable DPST */
+		    do_not_quit = 0;
+		break;
+		}
+}
+
+ int dpst_histogram_enable(struct drm_device *dev, int enable)
+{
+	int ret = 0;
+
+	    /* enable histogram interrupts */
+	    ret = psb_hist_enable(dev, &enable);
+	return ret;
+}
+
+ int dpst_histogram_get_status(struct drm_device *dev,
+				 struct drm_psb_hist_status_arg *hist_data)
+{
+	int ret = 0;
+	ret = psb_hist_status(dev, hist_data);
+	if (ret) {
+		printk(KERN_ERR
+			"Error: histogram get status ioctl returned error: %d\n",
+			ret);
+		return 1;
+	}
+	return 0;
+}
+
+  int psb_dpst_bl(struct drm_device *dev, void *data)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t * arg = data;
+	struct backlight_device bd;
+
+	if(!dev_priv)
+		return 0;
+
+	dpst_print("adjust percentage: %d.%d\n", *arg / 100, *arg % 100);
+	dev_priv->blc_adj2 = (*arg * 255 / 100) * 255 / 100;
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	bd.props.brightness = psb_get_brightness(&bd);
+	psb_set_brightness(&bd);
+#endif				/*  */
+	    return 0;
+}
+
+ int psb_gamma_set(struct drm_device *dev, void *data)
+{
+	uint16_t * lut_arg = data;
+
+//      struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct psb_intel_crtc *psb_intel_crtc;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i = 0;
+
+	DRM_ERROR("GAMMA tuning is not expected in DPST on Moorefield.\n");
+//      int32_t obj_id;
+
+//      obj_id = lut_arg->output_id;
+//      obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+//      if (!obj) {
+//              printk(KERN_ERR "Invalid Connector object, id = %d\n", obj_id);
+//              DRM_DEBUG("Invalid Connector object.\n");
+//              return -EINVAL;
+//      }
+	connector = dev_priv->dpst_connector;	//= obj_to_connector(obj);
+	crtc = connector->encoder->crtc;
+
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+	 for (i = 0; i < 256; i++) {
+		psb_intel_crtc->lut_adj[i] = lut_arg[i];
+	}
+	 psb_intel_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static void dpst_save_bl_adj_factor(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	blc_adj2 = DPST_START_PERCENTAGE;
+}
+
+static void dpst_restore_bl_adj_factor(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i = 0;
+
+	if (!dev_priv)
+		return;
+
+	/*compute the original adj in function psb_dpst_bl*/
+	dev_priv->blc_adj2 = ((dev_priv->blc_adj2 * 100) / 255) *100 /255;
+
+	if (blc_adj2 != dev_priv->blc_adj2 && blc_adj2 != 0)
+	{
+		for(i = dev_priv->blc_adj2; i <= blc_adj2; i = i+30)
+		{
+			psb_dpst_bl(dev, &i);
+			msleep(100);
+		}
+		i = blc_adj2;
+		psb_dpst_bl(dev, &i);
+	}
+}
+
+#ifdef GAMMA_SETTINGS
+static void dpst_save_gamma_settings(struct drm_device *dev)
+{
+    struct drm_psb_private *dev_priv = dev->dev_private;
+    struct drm_connector *connector;
+    struct mdfld_dsi_config *dsi_config;
+    struct drm_crtc *crtc;
+    struct psb_intel_crtc *psb_intel_crtc;
+    int i = 0;
+
+    if (!dev_priv)
+        return;
+
+    connector = dev_priv->dpst_connector;
+    dsi_config = dev_priv->dsi_configs[0];
+
+    crtc = connector->encoder->crtc;
+    psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+    /*
+    * FIXME: We need to force the Display to
+    * turn on but on TNG OSPM how we can force PIPEA to do it?
+    */
+    if (!power_island_get(OSPM_DISPLAY_A))
+    {
+	return;
+    }
+
+    for (i = 0; i < 256; i++)
+        lut_adj[i] = REG_READ((PALETTE_A + 4 * i));
+
+    power_island_put(OSPM_DISPLAY_A);
+}
+
+static void dpst_restore_gamma_settings(struct drm_device *dev)
+{
+    struct drm_psb_private *dev_priv = dev->dev_private;
+    struct mdfld_dsi_config *dsi_config;
+    struct mdfld_dsi_hw_context *ctx;
+    struct drm_connector *connector;
+    struct drm_crtc *crtc;
+    struct psb_intel_crtc *psb_intel_crtc;
+    int i = 0;
+
+    if (!dev_priv)
+        return;
+
+    connector = dev_priv->dpst_connector;
+    dsi_config = dev_priv->dsi_configs[0];
+    ctx = &dsi_config->dsi_hw_context;
+
+    crtc = connector->encoder->crtc;
+    psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+    /*
+    * FIXME: We need to force the Display to
+    * turn on but on TNG OSPM how we can force PIPEA to do it?
+    */
+    if (!power_island_get(OSPM_DISPLAY_A))
+    {
+        return;
+    }
+
+    for (i = 0; i < 256; i++) {
+        ctx->palette[i] = lut_adj[i];
+        REG_WRITE((PALETTE_A + 4 * i), lut_adj[i]);
+    }
+
+    power_island_put(OSPM_DISPLAY_A);
+}
+#endif
+
+void dpst_disable_post_process(struct drm_device *dev)
+{
+	dpst_restore_bl_adj_factor(dev);
+	//dpst_restore_gamma_settings(dev);
+}
+
+ void dpst_execute_recv_command(struct dispmgr_command_hdr *cmd_hdr)
+{
+	switch (cmd_hdr->cmd) {
+	case DISPMGR_DPST_GET_MODE:
+		 {
+			uint32_t xy = 0;
+			struct dispmgr_command_hdr send_cmd_hdr;
+			psb_dpst_mode(g_dev, &xy);
+			send_cmd_hdr.data_size = sizeof(xy);
+			send_cmd_hdr.data = (uintptr_t) &xy;
+			send_cmd_hdr.module = DISPMGR_MOD_DPST;
+			send_cmd_hdr.cmd = DISPMGR_DPST_GET_MODE;
+			dispmgr_nl_send_msg(&send_cmd_hdr);
+		}
+		break;
+	case DISPMGR_DPST_INIT_COMM:
+		 {
+			if (cmd_hdr->data_size) {
+				unsigned long value =
+				    *((unsigned long *)cmd_hdr->data);
+				uint32_t enable = value;
+				psb_init_comm(g_dev, &enable);
+			}
+		} break;
+	case DISPMGR_DPST_UPDATE_GUARD:
+		 {
+			if (cmd_hdr->data_size) {
+				unsigned long value =
+				    *((unsigned long *)cmd_hdr->data);
+				uint32_t gb_arg = value;
+				psb_update_guard(g_dev, &gb_arg);
+			}
+		} break;
+	case DISPMGR_DPST_BL_CMD:
+		 {
+			if (cmd_hdr->data_size) {
+				unsigned long value =
+				    *((unsigned long *)cmd_hdr->data);
+				uint32_t data = value;
+				psb_dpst_bl(g_dev, (void *)&data);
+			}
+		} break;
+	case DISPMGR_DPST_HIST_ENABLE:
+		 {
+			if (cmd_hdr->data_size) {
+				unsigned long value =
+				    *((unsigned long *)cmd_hdr->data);
+				uint32_t enable = value;
+				psb_hist_enable(g_dev, &enable);
+			}
+		} break;
+	case DISPMGR_DPST_GAMMA_SET_CMD:
+		 {
+			if (cmd_hdr->data_size) {
+				uint16_t * arg = (uint16_t *) cmd_hdr->data;
+				psb_gamma_set(g_dev, (void *)arg);
+			}
+		} break;
+	case DISPMGR_DPST_DIET_ENABLE:
+		 {
+			if (cmd_hdr->data_size) {
+				uint32_t * arg = (uint32_t *) cmd_hdr->data;
+				psb_diet_enable(g_dev, (void *)arg);
+			}
+		} break;
+	case DISPMGR_DPST_DIET_DISABLE:
+		 {
+			psb_diet_enable(g_dev, 0);
+		}
+		break;
+	default:
+		 {
+			printk
+			    ("kdispmgr: received unknown dpst command = %d.\n",
+			     cmd_hdr->cmd);
+		};
+	};			/* switch */
+}
+
+/* Initialize the dpst data */
+int dpst_init(struct drm_device *dev, int level, int output_id)
+{
+	g_dev = dev;            /* hack for now - the work queue does not have the device */
+
+	mutex_init(&dpst_mutex);
+
+	dpst_save_bl_adj_factor(dev);
+	//dpst_save_gamma_settings(dev);
+
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst_func.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst_func.h
new file mode 100644
index 0000000..cb504a4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_dpst_func.h
@@ -0,0 +1,76 @@
+/******************************************************************************
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ * Portions (c), Imagination Technology, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and Use.  Redistribution and use in binary form, without
+ * modification, of the software code provided with this license ("Software"),
+ * are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions must reproduce the above copyright notice and this
+ *     license in the documentation and/or other materials provided with the
+ *     Software.
+ *  2. Neither the name of Intel Corporation nor the name of Imagination
+ *     Technology, Ltd may be used to endorse or promote products derived from
+ *     the Software without specific prior written permission.
+ *  3. The Software can only be used in connection with the Intel hardware
+ *     designed to use the Software as outlined in the documentation. No other
+ *     use is authorized.
+ *  4. No reverse engineering, decompilation, or disassembly of the Software
+ *     is permitted.
+ *  5. The Software may not be distributed under terms different than this
+ *     license.
+ *
+ * Limited Patent License.  Intel Corporation grants a world-wide, royalty-free
+ * , non-exclusive license under patents it now or hereafter owns or controls
+ * to make, have made, use, import, offer to sell and sell ("Utilize") the
+ * Software, but solely to the extent that any such patent is necessary to
+ * Utilize the Software alone.  The patent license shall not apply to any
+ * combinations which include the Software.  No hardware per se is licensed
+ * hereunder.
+ *
+ * Ownership of Software and Copyrights. Title to all copies of the Software
+ * remains with the copyright holders. The Software is copyrighted and
+ * protected by the laws of the United States and other countries, and
+ * international treaty provisions.
+ *
+ * DISCLAIMER.  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#ifndef _PSB_DPST_FUNC_H_
+#define _PSB_DPST_FUNC_H_
+
+#include "psb_drm.h"
+#include <dispmgrnl.h>
+#include <psb_umevents.h>
+
+int dpst_init(struct drm_device *dev, int level, int output_id);
+int dpst_histogram_get_status(struct drm_device *dev,
+			       struct drm_psb_hist_status_arg *hist_data);
+void dpst_process_event(struct umevent_obj *notify_disp_obj, int dst_group_id);
+void dpst_execute_recv_command(struct dispmgr_command_hdr *cmd_hdr);
+ int psb_dpst_mode(struct drm_device *, void *);
+int psb_init_comm(struct drm_device *, void *);
+int psb_update_guard(struct drm_device *, void *);
+int psb_hist_enable(struct drm_device *, void *);
+int psb_gamma_set(struct drm_device *, void *);
+int psb_dpst_bl(struct drm_device *, void *);
+
+// SH START DPST This doesn't work and is not used.
+int psb_diet_enable(struct drm_device *, void *);
+
+// SH END DPST
+
+#endif	/* _PSB_DPST_FUNC_H_ */
+
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_drv.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_drv.c
new file mode 100755
index 0000000..16b19c2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_drv.c
@@ -0,0 +1,4973 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drm.h"
+#include "drm_shared.h"
+#include "psb_drv.h"
+#include "psb_fb.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "psb_msvdx.h"
+#include "psb_video_drv.h"
+
+#ifdef SUPPORT_VSP
+#include "vsp.h"
+#endif
+
+#include "tng_topaz.h"
+
+#include <drm/drm_pciids.h>
+#include "pwr_mgmt.h"
+#include "psb_intel_display.h"
+#include "mdfld_output.h"
+
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "dispmgrnl.h"
+#include "mdfld_dsi_dbi.h"
+#ifdef CONFIG_MID_DSI_DPU
+#include "mdfld_dsi_dbi_dpu.h"
+#endif
+#include "mdfld_dsi_dbi_dsr.h"
+/* SH DPST */
+#include "psb_dpst_func.h"
+#endif
+
+#ifdef CONFIG_MID_HDMI
+#include "mdfld_msic.h"
+#endif
+#include "psb_intel_hdmi.h"
+
+#include "otm_hdmi.h"
+#include "android_hdmi.h"
+#include "bufferclass_interface.h"
+
+#include "mdfld_hdcp.h"
+#include "mdfld_csc.h"
+#include "mrfld_s3d.h"
+
+#include "pvr_bridge.h"
+
+#include "mrfld_clock.h"
+#include "mdfld_debugfs.h"
+
+/* MaxFifo/ S0i1-Display */
+#include "dc_maxfifo.h"
+#define VBLANK_OFF_DELAY_DEFAULT	300
+
+#define KEEP_UNUSED_CODE 0
+#define KEEP_UNUSED_CODE_S3D 0
+
+#define KEEP_UNUSED_CODE_DRIVER_DISPATCH 0
+
+#define HDMI_MONITOR_NAME_LENGTH 20
+
+/* Hack to Turn GFX islands up - BEGIN */
+static void power_up(int pm_reg, u32 pm_mask);
+
+static void power_up(int pm_reg, u32 pm_mask) {
+	u32 pwr_mask = 0;
+
+	pwr_mask = intel_mid_msgbus_read32(0x04, pm_reg);
+	// pwr_mask &= ~pm_mask;
+	pwr_mask =0;
+
+	intel_mid_msgbus_write32(0x04, pm_reg, pwr_mask);
+	udelay(10);
+}
+/* Hack to Turn GFX islands up - END */
+
+/*
+	IED clean-up handling:
+	With Monkey stress test, user space gets killed
+	multiple time. Google PlayMovies App tries to
+	restart playback once MediaServer is back. For every
+	new instance, a new file descriptor is used.
+	g_ied_context can track upto MAX_IED_SESSION file
+	descriptors for clean-up. In future, we should make
+	insert/delete to be smart.
+*/
+#define MAX_IED_SESSION 100
+struct file *g_ied_context[MAX_IED_SESSION];
+/* index for g_ied_context */
+uint32_t g_ied_context_index;
+/* DC driver ied_ref count */
+uint32_t g_ied_ref;
+/* Flag used during Overlay Disabling to clear IED*/
+uint32_t g_ied_force_clean;
+/* Mutex to access ied globals */
+struct mutex g_ied_mutex;
+
+int drm_psb_debug;
+int drm_decode_flag = 0x0;
+int drm_psb_enable_pr2_cabc = 1;
+int drm_psb_enable_gamma;
+int drm_psb_enable_color_conversion;
+int drm_psb_set_gamma_success = 0;
+int drm_psb_set_gamma_pending = 0;
+int drm_psb_set_gamma_pipe = MDFLD_PIPE_MAX;
+int gamma_setting_save[256] = {0};
+int csc_setting_save[6] = {0};
+/*EXPORT_SYMBOL(drm_psb_debug);*/
+static int drm_psb_trap_pagefaults;
+
+int drm_psb_no_fb;
+int drm_psb_force_pipeb;
+int drm_idle_check_interval = 5;
+int drm_msvdx_pmpolicy = PSB_PMPOLICY_POWERDOWN;
+int drm_psb_cpurelax;
+int drm_psb_udelaydivider = 1;
+int drm_topaz_pmpolicy = PSB_PMPOLICY_POWERDOWN;
+int drm_vsp_pmpolicy = PSB_PMPOLICY_SUSPEND_HWIDLE;
+int drm_topaz_cgpolicy = PSB_CGPOLICY_ON;
+int drm_topaz_cmdpolicy = PSB_CMDPOLICY_PARALLEL;
+int drm_topaz_sbuswa;
+int drm_psb_ospm = 1;
+int drm_psb_dsr;
+int drm_psb_gfx_pm;
+int drm_psb_vsp_pm;
+int drm_psb_ved_pm;
+int drm_psb_vec_pm;
+int drm_psb_topaz_clockgating;
+int gfxrtdelay = 2 * 1000;
+int drm_psb_3D_vblank;
+int drm_psb_smart_vsync = 1;
+int drm_psb_te_timer_delay = (DRM_HZ / 40);
+char HDMI_EDID[HDMI_MONITOR_NAME_LENGTH];
+int hdmi_state;
+u32 DISP_PLANEB_STATUS = ~DISPLAY_PLANE_ENABLE;
+int drm_psb_msvdx_tiling = 1;
+int drm_msvdx_bottom_half;
+int drm_hdmi_hpd_auto;
+#ifdef CONFIG_SUPPORT_MIPI
+int default_hdmi_scaling_mode = DRM_MODE_SCALE_CENTER;
+#else
+int default_hdmi_scaling_mode = DRM_MODE_SCALE_ASPECT;
+#endif
+#ifdef CONFIG_ITE_HDMI_CEC
+int hdmi_edid_src_phy_addr = 0;
+#endif
+int drm_vsp_burst = 1;
+int drm_vsp_force_up_freq = 0;
+int drm_vsp_force_down_freq = 0;
+int drm_vsp_single_int = 0;
+int drm_vec_force_up_freq = 0;
+int drm_vec_force_down_freq = 0;
+int drm_vsp_vpp_batch_cmd = 1;
+int drm_video_sepkey = -1;
+int gamma_setting[129] = {0};
+int csc_setting[6] = {0};
+int gamma_number = 129;
+int csc_number = 6;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+#ifdef CONFIG_COMPAT
+static long psb_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(no_fb, "Disable FBdev");
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
+MODULE_PARM_DESC(ospm, "switch for ospm support");
+MODULE_PARM_DESC(rtpm, "Specifies Runtime PM delay for GFX");
+MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
+MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
+MODULE_PARM_DESC(vsp_pmpolicy, "vsp power managerment policy btw frames");
+MODULE_PARM_DESC(topaz_cgpolicy, "disable VEC and GFX clock gating");
+MODULE_PARM_DESC(topaz_cmdpolicy, "execute cmd in parallel mode");
+MODULE_PARM_DESC(topaz_sbuswa, "WA for topaz sysbus write");
+MODULE_PARM_DESC(PanelID, "Panel info for querying");
+MODULE_PARM_DESC(hdmi_edid, "EDID info for HDMI monitor");
+MODULE_PARM_DESC(hdmi_state, "Whether HDMI Monitor is connected or not");
+MODULE_PARM_DESC(vblank_sync,
+		 "whether sync to vblank interrupt when do 3D flip");
+MODULE_PARM_DESC(smart_vsync, "Enable Smart Vsync for Display");
+MODULE_PARM_DESC(te_delay, "swap delay after TE interrpt");
+MODULE_PARM_DESC(cpu_relax, "replace udelay with cpu_relax for video");
+MODULE_PARM_DESC(udelay_divider, "divide the usec value of video udelay");
+MODULE_PARM_DESC(vsp_pm, "Power on/off the VSP");
+MODULE_PARM_DESC(ved_pm, "Power on/off the Msvdx");
+MODULE_PARM_DESC(vec_pm, "Power on/off the Topaz");
+MODULE_PARM_DESC(hdmi_hpd_auto, "HDMI hot-plug auto test flag");
+MODULE_PARM_DESC(default_hdmi_scaling_mode, "Default HDMI scaling mode");
+#ifdef CONFIG_ITE_HDMI_CEC
+MODULE_PARM_DESC(hdmi_edid_src_phy_addr, "HDMI edid for HDMI CEC HAL");
+#endif
+MODULE_PARM_DESC(vsp_burst, "VSP burst mode enable");
+MODULE_PARM_DESC(vsp_force_up_freq, "force VSP running at certain freq");
+MODULE_PARM_DESC(vsp_force_down_freq, "force VSP power down at certain freq");
+MODULE_PARM_DESC(vsp_single_int, "force VSP VPP generate one irq per command group");
+MODULE_PARM_DESC(vec_force_up_freq, "force VEC running at certain freq");
+MODULE_PARM_DESC(vec_force_down_freq, "force VEC power down at certain freq");
+MODULE_PARM_DESC(vsp_vpp_batch_cmd, "set vsp vpp for batch cmd submit");
+MODULE_PARM_DESC(video_sepkey, "Force sepapp to use specified key index to verify ved/vec/vsp firmware");
+
+module_param_named(enable_color_conversion, drm_psb_enable_color_conversion,
+					int, 0600);
+module_param_named(enable_gamma, drm_psb_enable_gamma, int, 0600);
+module_param_named(debug, drm_psb_debug, int, 0600);
+module_param_named(psb_enable_pr2_cabc, drm_psb_enable_pr2_cabc, int, 0600);
+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
+module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
+module_param_named(cpu_relax, drm_psb_cpurelax, int, 0600);
+module_param_named(udelay_divider, drm_psb_udelaydivider, int, 0600);
+module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
+module_param_named(vsp_pmpolicy, drm_vsp_pmpolicy, int, 0600);
+module_param_named(topaz_cgpolicy, drm_topaz_cgpolicy, int, 0600);
+module_param_named(topaz_cmdpolicy, drm_topaz_cmdpolicy, int, 0600);
+module_param_named(msvdx_bottom_half, drm_msvdx_bottom_half, int, 0600);
+module_param_named(topaz_sbuswa, drm_topaz_sbuswa, int, 0600);
+module_param_named(ospm, drm_psb_ospm, int, 0600);
+module_param_named(dsr, drm_psb_dsr, int, 0600);
+module_param_named(gfx_pm, drm_psb_gfx_pm, int, 0600);
+module_param_named(vsp_pm, drm_psb_vsp_pm, int, 0600);
+module_param_named(ved_pm, drm_psb_ved_pm, int, 0600);
+module_param_named(vec_pm, drm_psb_vec_pm, int, 0600);
+module_param_named(msvdx_tiling_memory, drm_psb_msvdx_tiling, int, 0600);
+module_param_named(rtpm, gfxrtdelay, int, 0600);
+module_param_named(topaz_clockgating, drm_psb_topaz_clockgating, int, 0600);
+module_param_named(PanelID, PanelID, int, 0600);
+module_param_string(hdmi_edid, HDMI_EDID, 20, 0600);
+module_param_named(hdmi_state, hdmi_state, int, 0600);
+module_param_named(vblank_sync, drm_psb_3D_vblank, int, 0600);
+module_param_named(smart_vsync, drm_psb_smart_vsync, int, 0600);
+module_param_named(te_delay, drm_psb_te_timer_delay, int, 0600);
+#ifdef CONFIG_SLICE_HEADER_PARSING
+module_param_named(decode_flag, drm_decode_flag, int, 0600);
+#endif
+module_param_named(hdmi_hpd_auto, drm_hdmi_hpd_auto, int, 0600);
+module_param_named(default_hdmi_scaling_mode, default_hdmi_scaling_mode,
+					int, 0600);
+#ifdef CONFIG_ITE_HDMI_CEC
+module_param_named(hdmi_edid_src_phy_addr, hdmi_edid_src_phy_addr, int, 0600);
+#endif
+module_param_named(vsp_burst, drm_vsp_burst, int, 0600);
+module_param_named(vsp_force_up_freq, drm_vsp_force_up_freq, int, 0600);
+module_param_named(vsp_force_down_freq, drm_vsp_force_down_freq, int, 0600);
+module_param_named(vsp_single_int, drm_vsp_single_int, int, 0600);
+module_param_named(vec_force_up_freq, drm_vec_force_up_freq, int, 0600);
+module_param_named(vec_force_down_freq, drm_vec_force_down_freq, int, 0600);
+module_param_named(vsp_vpp_batch_cmd, drm_vsp_vpp_batch_cmd, int, 0600);
+module_param_named(video_sepkey, drm_video_sepkey, int, 0600);
+module_param_array_named(gamma_adjust, gamma_setting, int, &gamma_number, 0600);
+module_param_array_named(csc_adjust, csc_setting, int, &csc_number, 0600);
+
+#ifndef MODULE
+/* Make ospm configurable via cmdline firstly,
+* and others can be enabled if needed.
+*/
+static int __init config_ospm(char *arg)
+{
+	/* ospm turn on/off control can be passed in as a cmdline parameter */
+	/* to enable this feature add ospm=1 to cmdline */
+	/* to disable this feature add ospm=0 to cmdline */
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		drm_psb_ospm = 0;
+	else if (!strcasecmp(arg, "1"))
+		drm_psb_ospm = 1;
+
+	return 0;
+}
+
+static int __init config_dsr(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		drm_psb_dsr = 0;
+	else if (!strcasecmp(arg, "1"))
+		drm_psb_dsr = 1;
+
+	return 0;
+}
+
+early_param("ospm", config_ospm);
+early_param("dsr", config_dsr);
+#endif
+
+static struct pci_device_id pciidlist[] = {
+	{0x8086, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1180},
+	{0x8086, 0x1480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRFLD_1480},
+	{0, 0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_KMS_OFF	\
+		DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_KMS_ON	\
+		DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_VT_LEAVE	\
+		DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_VT_ENTER	\
+		DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_EXTENSION	\
+		DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
+			 union drm_psb_extension_arg)
+#define DRM_IOCTL_PSB_SIZES	\
+		DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
+			struct drm_psb_sizes_arg)
+#define DRM_IOCTL_PSB_FUSE_REG	\
+		DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_VBT	\
+		DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
+			struct gct_ioctl_arg)
+#define DRM_IOCTL_PSB_DC_STATE	\
+		DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
+			struct drm_psb_dc_state_arg)
+#define DRM_IOCTL_PSB_ADB	\
+		DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION	\
+		DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
+			 struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY	\
+		DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+			 struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_REGISTER_RW	\
+		DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
+			 struct drm_psb_register_rw_arg)
+#define DRM_IOCTL_PSB_GTT_MAP	\
+		DRM_IOWR(DRM_PSB_GTT_MAP + DRM_COMMAND_BASE, \
+			 struct psb_gtt_mapping_arg)
+#define DRM_IOCTL_PSB_GTT_UNMAP	\
+		DRM_IOW(DRM_PSB_GTT_UNMAP + DRM_COMMAND_BASE, \
+			struct psb_gtt_mapping_arg)
+#define DRM_IOCTL_PSB_GETPAGEADDRS	\
+		DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_GETPAGEADDRS,\
+			 struct drm_psb_getpageaddrs_arg)
+#define DRM_IOCTL_PSB_HIST_ENABLE	\
+		DRM_IOWR(DRM_PSB_HIST_ENABLE + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_HIST_STATUS	\
+		DRM_IOWR(DRM_PSB_HIST_STATUS + DRM_COMMAND_BASE, \
+			 struct drm_psb_hist_status_arg)
+#define DRM_IOCTL_PSB_UPDATE_GUARD	\
+		DRM_IOWR(DRM_PSB_UPDATE_GUARD + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_INIT_COMM	\
+		DRM_IOWR(DRM_PSB_INIT_COMM + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_DPST	\
+		DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_GAMMA	\
+		DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
+			 struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_PSB_DPST_BL	\
+		DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID	\
+		DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+			 struct drm_psb_get_pipe_from_crtc_id_arg)
+
+/*DPU/DSR stuff*/
+#define DRM_IOCRL_PSB_DPU_QUERY \
+	DRM_IOR(DRM_PSB_DPU_QUERY + DRM_COMMAND_BASE, unsigned int)
+#define DRM_IOCRL_PSB_DPU_DSR_ON \
+	DRM_IOW(DRM_PSB_DPU_DSR_ON + DRM_COMMAND_BASE, unsigned int)
+/* #define DRM_IOCRL_PSB_DPU_DSR_OFF \
+*	DRM_IOW(DRM_PSB_DPU_DSR_OFF + DRM_COMMAND_BASE, unsigned int)
+*/
+#define DRM_IOCRL_PSB_DPU_DSR_OFF \
+	DRM_IOW(DRM_PSB_DPU_DSR_OFF + DRM_COMMAND_BASE, \
+	struct drm_psb_drv_dsr_off_arg)
+
+/*HDMI FB stuff*/
+#define DRM_IOCTL_PSB_HDMI_FB_CMD \
+	DRM_IOWR(DRM_PSB_HDMI_FB_CMD + DRM_COMMAND_BASE, \
+	struct drm_psb_disp_ctrl)
+
+/* HDCP IOCTLs */
+#define DRM_IOCTL_PSB_QUERY_HDCP \
+		DRM_IOR(DRM_PSB_QUERY_HDCP + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_VALIDATE_HDCP_KSV \
+	DRM_IOWR(DRM_PSB_VALIDATE_HDCP_KSV + DRM_COMMAND_BASE, sqword_tt)
+#define DRM_IOCTL_PSB_GET_HDCP_STATUS \
+		DRM_IOR(DRM_PSB_GET_HDCP_STATUS + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_ENABLE_HDCP \
+		DRM_IO(DRM_PSB_ENABLE_HDCP + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_DISABLE_HDCP \
+		DRM_IO(DRM_PSB_DISABLE_HDCP + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_GET_HDCP_LINK_STATUS \
+		DRM_IOR(DRM_PSB_GET_HDCP_LINK_STATUS + \
+		DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_HDCP_DISPLAY_IED_OFF \
+		DRM_IO(DRM_PSB_HDCP_DISPLAY_IED_OFF + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_HDCP_DISPLAY_IED_ON \
+		DRM_IO(DRM_PSB_HDCP_DISPLAY_IED_ON + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_QUERY_HDCP_DISPLAY_IED_CAPS \
+		DRM_IOR(DRM_PSB_QUERY_HDCP_DISPLAY_IED_CAPS \
+			+ DRM_COMMAND_BASE, uint32_t)
+
+/* S3D IOCTLs */
+#define DRM_IOCTL_PSB_S3D_QUERY \
+	DRM_IOWR(DRM_PSB_S3D_QUERY + DRM_COMMAND_BASE, \
+		struct drm_psb_s3d_query)
+#define DRM_IOCTL_PSB_S3D_PREMODESET \
+	DRM_IOW(DRM_PSB_S3D_PREMODESET + DRM_COMMAND_BASE, \
+		struct drm_psb_s3d_premodeset)
+#define DRM_IOCTL_PSB_S3D_ENABLE \
+		DRM_IOW(DRM_PSB_S3D_ENABLE + DRM_COMMAND_BASE, \
+		uint32_t)
+
+/* CSC IOCTLS */
+#define DRM_IOCTL_PSB_SET_CSC \
+	DRM_IOW(DRM_PSB_SET_CSC + DRM_COMMAND_BASE, struct drm_psb_csc_matrix)
+
+/*CSC GAMMA Setting*/
+#define DRM_IOCTL_PSB_CSC_GAMMA_SETTING \
+                DRM_IOWR(DRM_PSB_CSC_GAMMA_SETTING + DRM_COMMAND_BASE, struct drm_psb_csc_gamma_setting)
+
+#define DRM_IOCTL_PSB_ENABLE_IED_SESSION \
+		DRM_IO(DRM_PSB_ENABLE_IED_SESSION + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_DISABLE_IED_SESSION \
+		DRM_IO(DRM_PSB_DISABLE_IED_SESSION + DRM_COMMAND_BASE)
+
+/* Panel type query */
+#define DRM_IOCTL_PSB_PANEL_QUERY \
+		DRM_IOR(DRM_PSB_PANEL_QUERY + DRM_COMMAND_BASE, \
+			uint32_t)
+
+/* Idle control */
+#define DRM_IOCTL_PSB_IDLE_CTRL \
+		DRM_IOW(DRM_PSB_IDLE_CTRL + DRM_COMMAND_BASE, \
+			struct drm_psb_idle_ctrl)
+
+/*
+ * TTM execbuf extension.
+ */
+
+#define DRM_PSB_SCENE_UNREF	  (DRM_PSB_CMDBUF + 1)
+#define DRM_IOCTL_PSB_CMDBUF	\
+		DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE,	\
+			struct drm_psb_cmdbuf_arg)
+#define DRM_IOCTL_PSB_SCENE_UNREF	\
+		DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
+			struct drm_psb_scene)
+#define DRM_IOCTL_PSB_KMS_OFF	  DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_KMS_ON	  DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
+#define DRM_IOCTL_PSB_EXTENSION	\
+		DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
+			 union drm_psb_extension_arg)
+/*
+ * TTM placement user extension.
+ */
+
+#define DRM_PSB_PLACEMENT_OFFSET   (DRM_PSB_SCENE_UNREF + 1)
+
+#define DRM_PSB_TTM_PL_CREATE	 (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_UNREF	 (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_SYNCCPU	 (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_WAITIDLE  (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
+#define DRM_PSB_TTM_PL_CREATE_UB (TTM_PL_CREATE_UB + DRM_PSB_PLACEMENT_OFFSET)
+
+/*
+ * TTM fence extension.
+ */
+
+#define DRM_PSB_FENCE_OFFSET	   (DRM_PSB_TTM_PL_CREATE_UB + 1)
+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
+#define DRM_PSB_TTM_FENCE_FINISH   (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
+#define DRM_PSB_TTM_FENCE_UNREF    (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
+
+#define DRM_PSB_FLIP	   (DRM_PSB_TTM_FENCE_UNREF + 1)
+		/*20 */
+/* PSB video extension */
+#define DRM_PSB_VIDEO_GETPARAM		(DRM_PSB_FLIP + 1)
+
+/*BC_VIDEO ioctl*/
+#define DRM_BUFFER_CLASS_VIDEO      (DRM_PSB_VIDEO_GETPARAM + 1)
+	/*0x32 */
+
+#define DRM_IOCTL_PSB_TTM_PL_CREATE    \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
+		 union ttm_pl_create_arg)
+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
+		 union ttm_pl_reference_arg)
+#define DRM_IOCTL_PSB_TTM_PL_UNREF    \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
+		struct ttm_pl_reference_req)
+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU	\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
+		struct ttm_pl_synccpu_arg)
+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE	 \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
+		struct ttm_pl_waitidle_arg)
+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
+		 union ttm_pl_setstatus_arg)
+#define DRM_IOCTL_PSB_TTM_PL_CREATE_UB    \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE_UB,\
+		 union ttm_pl_create_ub_arg)
+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED,	\
+		  union ttm_fence_signaled_arg)
+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH,	\
+		 union ttm_fence_finish_arg)
+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
+	DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF,	\
+		 struct ttm_fence_unref_arg)
+#define DRM_IOCTL_PSB_FLIP \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
+		 struct drm_psb_pageflip_arg)
+#define DRM_IOCTL_PSB_VIDEO_GETPARAM \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_VIDEO_GETPARAM, \
+		 struct drm_lnc_video_getparam_arg)
+
+    /*****************************
+     *  HDMI TEST IOCTLs
+     */
+#define DRM_IOCTL_PSB_HDMITEST    \
+	DRM_IOWR(DRM_PSB_HDMITEST + DRM_COMMAND_BASE, drm_psb_hdmireg_t)
+
+/* VSYNC IOCTL */
+#define DRM_IOCTL_PSB_VSYNC_SET \
+	DRM_IOWR(DRM_PSB_VSYNC_SET + DRM_COMMAND_BASE,		\
+			struct drm_psb_vsync_set_arg)
+
+#define DRM_IOCTL_PSB_PANEL_ORIENTATION \
+	DRM_IOR(DRM_PSB_PANEL_ORIENTATION + DRM_COMMAND_BASE,          \
+			int)
+
+#define DRM_IOCTL_PSB_UPDATE_CURSOR_POS \
+	DRM_IOW(DRM_PSB_UPDATE_CURSOR_POS + DRM_COMMAND_BASE,\
+			struct intel_dc_cursor_ctx)
+
+
+struct user_printk_arg {
+	char string[512];
+};
+#define DRM_IOCTL_USER_PRINTK \
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_RESERVED2, \
+		 struct user_printk_arg)
+
+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+static int psb_dc_state_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+static int psb_vsync_set_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#ifdef CONFIG_SUPPORT_MIPI
+static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+static int psb_dpst_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+static int psb_dpu_query_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+static int psb_dpu_dsr_on_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#endif
+static int psb_get_panel_orientation_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#ifdef CONFIG_SUPPORT_HDMI
+static int psb_disp_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+
+static int psb_query_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_validate_hdcp_ksv_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_get_hdcp_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_enable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_get_hdcp_link_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_enable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_query_display_ied_caps_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+
+#if KEEP_UNUSED_CODE_S3D
+static int psb_s3d_query_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+static int psb_s3d_premodeset_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+static int psb_s3d_enable_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+#endif /* if KEEP_UNUSED_CODE_S3D */
+#endif
+static int psb_set_csc_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+static int psb_csc_gamma_setting_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv);
+static int psb_enable_ied_session_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_disable_ied_session_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+static int psb_panel_query_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+static int psb_idle_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+static int psb_update_cursor_pos_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+
+static int user_printk_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct user_printk_arg *user_string = data;
+	printk(KERN_ERR "%s", user_string->string);
+	return 0;
+}
+
+    /****************************
+     *  HDMI TEST IOCTLS
+     */
+static int psb_drm_hdmi_test_ioctl(struct drm_device *,
+				   void *, struct drm_file *);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+	 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = \
+	 {ioctl, flags, func}
+#else
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+	 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = \
+	 {ioctl, flags, func, ioctl}
+#endif
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
+		      DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON,
+		      psbfb_kms_on_ioctl,
+		      DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
+		      DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER,
+		      psb_vt_enter_ioctl,
+		      DRM_ROOT_ONLY),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+		      DRM_AUTH),
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
+		      DRM_AUTH),
+#else
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+#endif
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_MAP,
+		      psb_gtt_map_meminfo_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_UNMAP,
+		      psb_gtt_unmap_meminfo_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GETPAGEADDRS,
+		      psb_getpageaddrs_ioctl,
+		      DRM_AUTH),
+#ifdef CONFIG_SUPPORT_MIPI
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_ENABLE,
+		      psb_hist_enable_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_STATUS,
+		      psb_hist_status_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_UPDATE_GUARD, psb_update_guard_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_INIT_COMM, psb_init_comm_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+#endif
+#if 0
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+		      psb_intel_get_pipe_from_crtc_id, 0),
+#endif
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF,
+		      psb_cmdbuf_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	/*to be removed later */
+	/*PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
+	   DRM_AUTH), */
+
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE_UB, psb_pl_ub_create_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
+		      psb_fence_signaled_ioctl, DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	/*to be removed later */
+	/*PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH), */
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VIDEO_GETPARAM,
+		psb_video_getparam, DRM_AUTH | DRM_UNLOCKED),
+#ifdef CONFIG_SUPPORT_MIPI
+	PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_QUERY, psb_dpu_query_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_DSR_ON, psb_dpu_dsr_on_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCRL_PSB_DPU_DSR_OFF, psb_dpu_dsr_off_ioctl,
+		      DRM_AUTH),
+#endif
+#ifdef CONFIG_SUPPORT_HDMI
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDMI_FB_CMD, psb_disp_ioctl, 0),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_QUERY_HDCP, psb_query_hdcp_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VALIDATE_HDCP_KSV,
+		      psb_validate_hdcp_ksv_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_HDCP_STATUS, psb_get_hdcp_status_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ENABLE_HDCP, psb_enable_hdcp_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DISABLE_HDCP, psb_disable_hdcp_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_HDCP_LINK_STATUS,
+			psb_get_hdcp_link_status_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDCP_DISPLAY_IED_OFF,
+			psb_disable_display_ied_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDCP_DISPLAY_IED_ON,
+			psb_enable_display_ied_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_QUERY_HDCP_DISPLAY_IED_CAPS,
+			psb_query_display_ied_caps_ioctl, DRM_AUTH),
+#endif
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_SET_CSC, psb_set_csc_ioctl, DRM_AUTH),
+        PSB_IOCTL_DEF(DRM_IOCTL_PSB_CSC_GAMMA_SETTING, psb_csc_gamma_setting_ioctl, DRM_AUTH),
+/*
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_S3D_QUERY, psb_s3d_query_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_S3D_PREMODESET, psb_s3d_premodeset_ioctl,
+		      DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_S3D_ENABLE, psb_s3d_enable_ioctl, DRM_AUTH),
+*/
+    /*****************************
+     *  HDMI TEST IOCTLs
+     */
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_HDMITEST, psb_drm_hdmi_test_ioctl,
+		      DRM_AUTH),
+
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_VSYNC_SET, psb_vsync_set_ioctl,
+			DRM_AUTH | DRM_UNLOCKED),
+
+	PSB_IOCTL_DEF(DRM_IOCTL_USER_PRINTK, user_printk_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ENABLE_IED_SESSION,
+		psb_enable_ied_session_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DISABLE_IED_SESSION,
+		psb_disable_ied_session_ioctl, DRM_AUTH),
+
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_PANEL_QUERY,
+		psb_panel_query_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_IDLE_CTRL,
+		psb_idle_ioctl, DRM_AUTH),
+
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_PANEL_ORIENTATION,
+		psb_get_panel_orientation_ioctl, DRM_AUTH),
+	PSB_IOCTL_DEF(DRM_IOCTL_PSB_UPDATE_CURSOR_POS,
+		psb_update_cursor_pos_ioctl, DRM_AUTH),
+};
+
+static void psb_set_uopt(struct drm_psb_uopt *uopt)
+{
+	return;
+}
+
+static void psb_lastclose(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct msvdx_private *msvdx_priv;
+#ifdef SUPPORT_VSP
+	struct vsp_private *vsp_priv;
+#endif
+
+	if (!dev_priv)
+		return;
+
+	msvdx_priv = dev_priv->msvdx_private;
+#ifdef SUPPORT_VSP
+	vsp_priv = dev_priv->vsp_private;
+#endif
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	if (dev_priv->encode_context.buffers) {
+		vfree(dev_priv->encode_context.buffers);
+		dev_priv->encode_context.buffers = NULL;
+	}
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	if (msvdx_priv) {
+		mutex_lock(&msvdx_priv->msvdx_mutex);
+		if (dev_priv->decode_context.buffers) {
+			vfree(dev_priv->decode_context.buffers);
+			dev_priv->decode_context.buffers = NULL;
+		}
+		mutex_unlock(&msvdx_priv->msvdx_mutex);
+	}
+
+#ifdef SUPPORT_VSP
+	mutex_lock(&vsp_priv->vsp_mutex);
+	if (dev_priv->vsp_context.buffers) {
+		vfree(dev_priv->vsp_context.buffers);
+		dev_priv->vsp_context.buffers = NULL;
+	}
+	mutex_unlock(&vsp_priv->vsp_mutex);
+#endif
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+
+	if (dev_priv->have_mem_mmu) {
+		ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
+		dev_priv->have_mem_mmu = 0;
+	}
+
+	if (dev_priv->have_tt) {
+		ttm_bo_clean_mm(bdev, TTM_PL_TT);
+		dev_priv->have_tt = 0;
+	}
+
+	psb_msvdx_uninit(dev);
+
+#ifdef SUPPORT_VSP
+	vsp_deinit(dev);
+#endif
+	tng_topaz_uninit(dev);
+}
+
+#if KEEP_UNUSED_CODE
+static void psb_get_core_freq(struct drm_device *dev)
+{
+	uint32_t clock;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004); */
+	/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000); */
+
+	clock = intel_mid_msgbus_read32_raw(0xD00503F0);
+
+	switch (clock & 0x07) {
+	case 0:
+		dev_priv->core_freq = 100;
+		break;
+	case 1:
+		dev_priv->core_freq = 133;
+		break;
+	case 2:
+		dev_priv->core_freq = 150;
+		break;
+	case 3:
+		dev_priv->core_freq = 178;
+		break;
+	case 4:
+		dev_priv->core_freq = 200;
+		break;
+	case 5:
+	case 6:
+	case 7:
+		dev_priv->core_freq = 266;
+	default:
+		dev_priv->core_freq = 0;
+	}
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#define FB_REG06_MRST 0xD08106F0
+#define FB_REG06_MDFLD 0x108106F0
+#define FB_TOPAZ_DISABLE BIT0
+#define FB_MIPI_DISABLE  BIT11
+#define FB_REG09_MRST 0xD08109F0
+#define FB_REG09_MDFLD 0x108109F0
+#define FB_SKU_MASK  (BIT12|BIT13|BIT14)
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+#define FB_GFX_CLK_DIVIDE_MASK	(BIT20|BIT21|BIT22)
+#define FB_GFX_CLK_DIVIDE_SHIFT 20
+#define FB_VED_CLK_DIVIDE_MASK	(BIT23|BIT24)
+#define FB_VED_CLK_DIVIDE_SHIFT 23
+#define FB_VEC_CLK_DIVIDE_MASK	(BIT25|BIT26)
+#define FB_VEC_CLK_DIVIDE_SHIFT 25
+
+void mrst_get_fuse_settings(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t fuse_value = 0;
+	uint32_t fuse_value_tmp = 0;
+
+	fuse_value = intel_mid_msgbus_read32_raw(IS_MDFLD(dev) ?
+			FB_REG06_MDFLD : FB_REG06_MRST);
+
+	dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+	if (IS_FLDS(dev))
+		dev_priv->iLVDS_enable = 0;
+
+	PSB_DEBUG_ENTRY("internal display is %s\n",
+			dev_priv->iLVDS_enable ? "LVDS display" :
+			"MIPI display");
+
+	/*prevent Runtime suspend at start */
+	if (dev_priv->iLVDS_enable) {
+		dev_priv->is_lvds_on = true;
+		dev_priv->is_mipi_on = false;
+	} else {
+		dev_priv->is_mipi_on = true;
+		dev_priv->is_lvds_on = false;
+	}
+
+	if (dev_priv->dev->pci_device == PCI_ID_TOPAZ_DISABLED)
+		dev_priv->topaz_disabled = 1;
+	else
+		dev_priv->topaz_disabled = 0;
+
+	dev_priv->video_device_fuse = fuse_value;
+
+	PSB_DEBUG_ENTRY("topaz is %s\n",
+			dev_priv->topaz_disabled ? "disabled" : "enabled");
+
+	fuse_value = intel_mid_msgbus_read32_raw(IS_MDFLD(dev) ?
+			FB_REG09_MDFLD : FB_REG09_MRST);
+
+	PSB_DEBUG_ENTRY("SKU values is 0x%x.\n", fuse_value);
+	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+	dev_priv->fuse_reg_value = fuse_value;
+
+	switch (fuse_value_tmp) {
+	case FB_SKU_100:
+		dev_priv->core_freq = 200;
+		break;
+	case FB_SKU_100L:
+		dev_priv->core_freq = 100;
+		break;
+	case FB_SKU_83:
+		dev_priv->core_freq = 166;
+		break;
+	default:
+		DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
+			  fuse_value_tmp);
+		dev_priv->core_freq = 0;
+	}
+	PSB_DEBUG_ENTRY("LNC core clk is %dMHz.\n", dev_priv->core_freq);
+
+#if 0				/* debug message */
+	fuse_value_tmp =
+	    (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
+
+	switch (fuse_value_tmp) {
+	case 0:
+		DRM_INFO("Gfx clk : core clk = 1:1.\n");
+		break;
+	case 1:
+		DRM_INFO("Gfx clk : core clk = 4:3.\n");
+		break;
+	case 2:
+		DRM_INFO("Gfx clk : core clk = 8:5.\n");
+		break;
+	case 3:
+		DRM_INFO("Gfx clk : core clk = 2:1.\n");
+		break;
+	case 4:
+		DRM_INFO("Gfx clk : core clk = 16:7.\n");
+		break;
+	case 5:
+		DRM_INFO("Gfx clk : core clk = 8:3.\n");
+		break;
+	case 6:
+		DRM_INFO("Gfx clk : core clk = 16:5.\n");
+		break;
+	case 7:
+		DRM_INFO("Gfx clk : core clk = 4:1.\n");
+		break;
+	default:
+		DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
+			  fuse_value_tmp);
+	}
+
+	fuse_value_tmp =
+	    (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
+
+	switch (fuse_value_tmp) {
+	case 0:
+		DRM_INFO("Ved clk : core clk = 1:1.\n");
+		break;
+	case 1:
+		DRM_INFO("Ved clk : core clk = 4:3.\n");
+		break;
+	case 2:
+		DRM_INFO("Ved clk : core clk = 8:5.\n");
+		break;
+	case 3:
+		DRM_INFO("Ved clk : core clk = 2:1.\n");
+		break;
+	default:
+		DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
+			  fuse_value_tmp);
+	}
+
+	fuse_value_tmp =
+	    (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
+
+	switch (fuse_value_tmp) {
+	case 0:
+		DRM_INFO("Vec clk : core clk = 1:1.\n");
+		break;
+	case 1:
+		DRM_INFO("Vec clk : core clk = 4:3.\n");
+		break;
+	case 2:
+		DRM_INFO("Vec clk : core clk = 8:5.\n");
+		break;
+	case 3:
+		DRM_INFO("Vec clk : core clk = 2:1.\n");
+		break;
+	default:
+		DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
+			  fuse_value_tmp);
+	}
+#endif				/* FIXME remove it after PO */
+
+	if (IS_FLDS(dev)) {
+#if KSEL_BYPASS_83_100_ENABLE
+		dev_priv->ksel = KSEL_BYPASS_83_100;
+#endif				/* KSEL_BYPASS_83_100_ENABLE */
+
+#if  KSEL_CRYSTAL_19_ENABLED
+		dev_priv->ksel = KSEL_CRYSTAL_19;
+#endif				/*  KSEL_CRYSTAL_19_ENABLED */
+	}
+
+	return;
+}
+
+bool mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+	uint32_t platform_rev_id = 0;
+	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+	if (!pci_gfx_root) {
+		DRM_ERROR("pci_gfx_root is NULL\n");
+		return false;
+	}
+
+	/*get the revison ID, B0:D2:F0;0x08 */
+	pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+	dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+	pci_dev_put(pci_gfx_root);
+	PSB_DEBUG_ENTRY("platform_rev_id is %x\n", dev_priv->platform_rev_id);
+
+	return true;
+}
+
+#ifdef CONFIG_SUPPORT_MIPI
+bool mrst_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	dev_priv->panel_id = PanelID;
+	dev_priv->mipi_encoder_type = is_panel_vid_or_cmd(dev_priv->dev);
+
+	if (is_dual_dsi(dev) && IS_ANN(dev)) {
+		dev_priv->bUseHFPLL = false;
+		dev_priv->bRereadZero = false;
+	} else if (IS_TNG_B0(dev) || IS_ANN_A0(dev)) {
+		if (dev_priv->mipi_encoder_type == MDFLD_DSI_ENCODER_DBI) {
+			if (IS_ANN(dev))
+				dev_priv->bUseHFPLL = false;
+			else {
+				dev_priv->bUseHFPLL = true;
+				enable_HFPLL(dev_priv->dev);
+			}
+			dev_priv->bRereadZero = false;
+		} else {
+			dev_priv->bUseHFPLL = false;
+			dev_priv->bRereadZero = true;
+		}
+	} else {
+		dev_priv->bUseHFPLL = false;
+		dev_priv->bRereadZero = false;
+	}
+	return true;
+}
+#endif
+
+void hdmi_do_hotplug_wq(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv = container_of(work,
+							struct drm_psb_private,
+							hdmi_hotplug_wq);
+	atomic_inc(&dev_priv->hotplug_wq_done);
+	/* notify user space of hotplug event via a uevent message */
+
+#ifdef CONFIG_X86_MRST
+	{
+		u8 data = 0;
+		intel_scu_ipc_iowrite8(MSIC_VCC330CNT, VCC330_ON);
+
+		intel_scu_ipc_ioread8(MSIC_HDMI_STATUS, &data);
+
+		if (data & HPD_SIGNAL_STATUS) {
+			PSB_DEBUG_ENTRY(
+				"hdmi_do_hotplug_wq: HDMI plugged in\n");
+			hdmi_state = 1;
+			if (dev_priv->had_interface)
+				dev_priv->had_interface->probe(
+					dev_priv->had_pvt_data, 0);
+
+			drm_sysfs_hotplug_event(dev_priv->dev);
+		} else {
+			PSB_DEBUG_ENTRY("hdmi_do_hotplug_wq: HDMI unplugged\n");
+			hdmi_state = 0;
+			drm_sysfs_hotplug_event(dev_priv->dev);
+
+			if (dev_priv->had_interface)
+				dev_priv->had_interface->
+				    disconnect(dev_priv->had_pvt_data);
+		}
+	}
+#endif
+
+	atomic_dec(&dev_priv->hotplug_wq_done);
+}
+
+#ifdef CONFIG_SUPPORT_HDMI
+void hdmi_do_audio_wq(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv = container_of(work,
+		struct drm_psb_private,
+		hdmi_audio_wq);
+	bool hdmi_hpd_connected = false;
+
+	/* As in the hdmi_do_hotplug_wq() function above
+	* it seems we should not be running this section of
+	* code if we don't also have CONFIG_SUPPORT_HDMI set,
+	* some devices might not want/need support for HDMI
+	* early in the platform bring up and by having this
+	* available to run might produce unexpected results
+	* if HDMI connector is plugged in.
+	*/
+
+	DRM_INFO("hdmi_do_audio_wq: Checking for HDMI connection at boot\n");
+	hdmi_hpd_connected = android_hdmi_is_connected(dev_priv->dev);
+	if (hdmi_hpd_connected) {
+		DRM_INFO("hdmi_do_audio_wq: HDMI plugged in\n");
+		mid_hdmi_audio_signal_event(dev_priv->dev, HAD_EVENT_HOT_PLUG);
+	}
+}
+#endif
+
+#ifdef CONFIG_SUPPORT_HDMI
+#define HDMI_HOTPLUG_DELAY (2*HZ)
+static void hdmi_hotplug_timer_func(unsigned long data)
+{
+/* FIXME: TODO: hkpatel - Enable once runtime pm is enabled */
+#if 0
+	struct drm_device *dev = (struct drm_device *)data;
+
+	PSB_DEBUG_ENTRY("\n");
+	ospm_runtime_pm_allow(dev);
+#endif
+}
+
+static int hdmi_hotplug_timer_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct timer_list *hdmi_timer = &dev_priv->hdmi_timer;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	init_timer(hdmi_timer);
+
+	hdmi_timer->data = (unsigned long)dev;
+	hdmi_timer->function = hdmi_hotplug_timer_func;
+	hdmi_timer->expires = jiffies + HDMI_HOTPLUG_DELAY;
+
+	PSB_DEBUG_ENTRY("successfully\n");
+
+	return 0;
+}
+
+void hdmi_hotplug_timer_start(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct timer_list *hdmi_timer = &dev_priv->hdmi_timer;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (!timer_pending(hdmi_timer)) {
+		hdmi_timer->expires = jiffies + HDMI_HOTPLUG_DELAY;
+		add_timer(hdmi_timer);
+	}
+}
+#endif
+
+static int psb_do_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct psb_gtt *pg = dev_priv->pg;
+
+	uint32_t tmp;
+
+	int ret = -ENOMEM;
+
+	/*
+	 * Initialize sequence numbers for the different command
+	 * submission mechanisms.
+	 */
+
+	dev_priv->sequence[PSB_ENGINE_2D] = 0;
+	dev_priv->sequence[PSB_ENGINE_VIDEO] = 1;
+	dev_priv->sequence[LNC_ENGINE_ENCODE] = 0;
+	dev_priv->sequence[VSP_ENGINE_VPP] = 1;
+
+	if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+		DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	dev_priv->sizes.ta_mem_size = 0;
+
+	/* TT region managed by TTM. */
+	if (!ttm_bo_init_mm(bdev, TTM_PL_TT,
+			    pg->gatt_pages -
+			    (pg->gtt_video_start >> PAGE_SHIFT)
+			    )) {
+
+		dev_priv->have_tt = 1;
+		dev_priv->sizes.tt_size = (pg->gatt_pages -
+			(pg->gtt_video_start >> PAGE_SHIFT)) / 256;
+		printk("[TTM] TT heap size is %d\n", pg->gtt_video_start);
+	}
+	if (!ttm_bo_init_mm(bdev,
+			    DRM_PSB_MEM_MMU,
+			    PSB_MEM_TT_START >> PAGE_SHIFT)) {
+		dev_priv->have_mem_mmu = 1;
+		dev_priv->sizes.mmu_size =
+			PSB_MEM_TT_START / (1024 * 1024);
+		printk("[TTM] MMU heap size is %d\n", PSB_MEM_TT_START);
+	}
+
+	if (IS_MSVDX_MEM_TILE(dev)) {
+		/* Create tiling MMU region managed by TTM */
+		tmp = (0x10000000) >> PAGE_SHIFT;
+		if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU_TILING, tmp))
+			dev_priv->have_mem_mmu_tiling = 1;
+		printk("[TTM] MMU tiling heap size is %d\n", tmp);
+	}
+
+	PSB_DEBUG_INIT("Init MSVDX\n");
+
+	/* on TNG B0, VED not needed to be on here since firmware is not loaded in psb_msvdx_init */
+	if (IS_TNG_A0(dev))
+		power_island_get(OSPM_VIDEO_DEC_ISLAND);
+
+	psb_msvdx_init(dev);
+
+	if (IS_TNG_A0(dev))
+		power_island_put(OSPM_VIDEO_DEC_ISLAND);
+
+#ifdef SUPPORT_VSP
+	VSP_DEBUG("Init VSP\n");
+	vsp_init(dev);
+#endif
+
+	PSB_DEBUG_INIT("Init Topaz\n");
+	tng_topaz_init(dev);
+	return 0;
+ out_err:
+	psb_do_takedown(dev);
+	return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	/*Fristly, unload pvr driver */
+	PVRSRVDrmUnload(dev);
+
+	/*TODO: destroy DSR/DPU infos here */
+	psb_backlight_exit();	/*writes minimum value to backlight HW reg */
+
+	if (drm_psb_no_fb == 0)
+		psb_modeset_cleanup(dev);
+
+	if (dev_priv) {
+		destroy_workqueue(dev_priv->vsync_wq);
+		destroy_workqueue(dev_priv->power_wq);
+
+		/* psb_watchdog_takedown(dev_priv); */
+		psb_do_takedown(dev);
+
+		if (dev_priv->pf_pd) {
+			psb_mmu_free_pagedir(dev_priv->pf_pd);
+			dev_priv->pf_pd = NULL;
+		}
+		if (dev_priv->mmu) {
+			struct psb_gtt *pg = dev_priv->pg;
+
+			down_read(&pg->sem);
+			psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
+						    (dev_priv->mmu),
+						    pg->mmu_gatt_start,
+						    pg->vram_stolen_size >>
+						    PAGE_SHIFT);
+			up_read(&pg->sem);
+			psb_mmu_driver_takedown(dev_priv->mmu);
+			dev_priv->mmu = NULL;
+		}
+
+#ifdef SUPPORT_VSP
+		if (dev_priv->vsp_mmu) {
+			struct psb_gtt *pg = dev_priv->pg;
+
+			down_read(&pg->sem);
+			psb_mmu_remove_pfn_sequence(
+				psb_mmu_get_default_pd
+				(dev_priv->vsp_mmu),
+				pg->mmu_gatt_start,
+				pg->vram_stolen_size >> PAGE_SHIFT);
+			up_read(&pg->sem);
+			psb_mmu_driver_takedown(dev_priv->vsp_mmu);
+			dev_priv->vsp_mmu = NULL;
+		}
+#endif
+		if (IS_MRFLD(dev))
+			mrfld_gtt_takedown(dev_priv->pg, 1);
+		else
+			psb_gtt_takedown(dev_priv->pg, 1);
+
+		if (dev_priv->scratch_page) {
+			__free_page(dev_priv->scratch_page);
+			dev_priv->scratch_page = NULL;
+		}
+		if (dev_priv->has_bo_device) {
+			ttm_bo_device_release(&dev_priv->bdev);
+			dev_priv->has_bo_device = 0;
+		}
+		if (dev_priv->has_fence_device) {
+			ttm_fence_device_release(&dev_priv->fdev);
+			dev_priv->has_fence_device = 0;
+		}
+		if (dev_priv->vdc_reg) {
+			iounmap(dev_priv->vdc_reg);
+			dev_priv->vdc_reg = NULL;
+		}
+		if (dev_priv->rgx_reg) {
+			iounmap(dev_priv->rgx_reg);
+			dev_priv->rgx_reg = NULL;
+		}
+		if (dev_priv->wrapper_reg) {
+			iounmap(dev_priv->wrapper_reg);
+			dev_priv->wrapper_reg = NULL;
+		}
+		if (dev_priv->ved_wrapper_reg) {
+			iounmap(dev_priv->ved_wrapper_reg);
+			dev_priv->ved_wrapper_reg = NULL;
+		}
+		if (dev_priv->vec_wrapper_reg) {
+			iounmap(dev_priv->vec_wrapper_reg);
+			dev_priv->vec_wrapper_reg = NULL;
+		}
+		if (dev_priv->msvdx_reg) {
+			iounmap(dev_priv->msvdx_reg);
+			dev_priv->msvdx_reg = NULL;
+		}
+#ifdef SUPPORT_VSP
+		if (dev_priv->vsp_reg) {
+			iounmap(dev_priv->vsp_reg);
+			dev_priv->vsp_reg = NULL;
+		}
+#endif
+
+		if (IS_TOPAZ(dev)) {
+			if (dev_priv->topaz_reg) {
+				iounmap(dev_priv->topaz_reg);
+				dev_priv->topaz_reg = NULL;
+			}
+		}
+
+		if (dev_priv->tdev)
+			ttm_object_device_release(&dev_priv->tdev);
+
+		if (dev_priv->has_global)
+			psb_ttm_global_release(dev_priv);
+
+		tasklet_kill(&dev_priv->hdmi_audio_bufferdone_tasklet);
+
+
+		kfree(dev_priv);
+		dev->dev_private = NULL;
+	}
+
+	ospm_power_uninit();
+
+	return 0;
+}
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	struct drm_psb_private *dev_priv;
+	struct ttm_bo_device *bdev;
+	unsigned long resource_start;
+	struct psb_gtt *pg;
+	unsigned long irqflags;
+	int ret = -ENOMEM;
+	uint32_t tt_pages;
+	u32 pm_mask = 0x0;
+	int pm_reg = 0x0;
+
+	if (IS_MOFD(dev)) {
+		pm_reg = 0x3f;
+		pm_mask = intel_mid_msgbus_read32(0x04, pm_reg);
+
+		pm_mask = 0x0;
+		pm_reg = 0x30; //GFXSS
+		power_up(pm_reg,pm_mask);
+
+		pm_mask = 0x0;
+		pm_reg = 0x36; //DSPSS
+		power_up(pm_reg,pm_mask);
+
+		pm_reg = 0x3c; //HDMISS
+		power_up(pm_reg,pm_mask);
+
+		pm_reg = 0x3f;
+		pm_mask = intel_mid_msgbus_read32(0x04, pm_reg);
+	}
+
+	DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
+
+	DRM_INFO("Run drivers on Merrifield platform!\n");
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&dev_priv->video_ctx);
+	spin_lock_init(&dev_priv->video_ctx_lock);
+	if (IS_FLDS(dev))
+		dev_priv->num_pipe = 3;
+	else
+		dev_priv->num_pipe = 2;
+	atomic_set(&dev_priv->hotplug_wq_done, 1);
+
+	/*init DPST umcomm to NULL */
+	dev_priv->psb_dpst_state = NULL;
+	dev_priv->psb_hotplug_state = NULL;
+	dev_priv->hdmi_done_reading_edid = false;
+	dev_priv->um_start = false;
+
+	dev_priv->dev = dev;
+	bdev = &dev_priv->bdev;
+
+	hdmi_state = 0;
+
+	memset(g_ied_context, 0x0, sizeof(g_ied_context));
+	g_ied_ref = 0;
+	g_ied_force_clean = 0;
+	g_ied_context_index = 0;
+
+	drm_hdmi_hpd_auto = 0;
+
+	ret = psb_ttm_global_init(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_err;
+	dev_priv->has_global = 1;
+
+	dev_priv->tdev = ttm_object_device_init
+	    (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
+	if (unlikely(dev_priv->tdev == NULL))
+		goto out_err;
+
+	mutex_init(&dev_priv->temp_mem);
+	mutex_init(&dev_priv->cmdbuf_mutex);
+	mutex_init(&g_ied_mutex);
+	mutex_init(&dev_priv->reset_mutex);
+	INIT_LIST_HEAD(&dev_priv->decode_context.validate_list);
+	INIT_LIST_HEAD(&dev_priv->encode_context.validate_list);
+#ifdef SUPPORT_VSP
+	INIT_LIST_HEAD(&dev_priv->vsp_context.validate_list);
+#endif
+
+	mutex_init(&dev_priv->dpms_mutex);
+        mutex_init(&dev_priv->gamma_csc_lock);
+	mutex_init(&dev_priv->dsr_mutex);
+	mutex_init(&dev_priv->vsync_lock);
+
+	spin_lock_init(&dev_priv->reloc_lock);
+	spin_lock_init(&dev_priv->irqmask_lock);
+
+	DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
+	init_waitqueue_head(&dev_priv->eof_wait);
+
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->chipset = chipset;
+	psb_set_uopt(&dev_priv->uopt);
+
+	PSB_DEBUG_INIT("Mapping MMIO\n");
+	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+	if (IS_MSVDX(dev))	/* Work around for medfield by Li */
+		dev_priv->msvdx_reg =
+		    ioremap(resource_start + MRST_MSVDX_OFFSET, PSB_MSVDX_SIZE);
+	else
+		dev_priv->msvdx_reg =
+		    ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
+
+	if (!dev_priv->msvdx_reg)
+		goto out_err;
+
+#ifdef SUPPORT_VSP
+	dev_priv->vsp_reg =
+		ioremap(resource_start + TNG_VSP_OFFSET, TNG_VSP_SIZE);
+	if (!dev_priv->vsp_reg)
+		goto out_err;
+#endif
+	if (IS_TOPAZ(dev)) {
+		dev_priv->topaz_reg =
+		    ioremap(resource_start + TNG_TOPAZ_OFFSET,
+			    TNG_TOPAZ_SIZE);
+
+		if (!dev_priv->topaz_reg)
+			goto out_err;
+	}
+
+	dev_priv->vdc_reg =
+	    ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+	if (!dev_priv->vdc_reg)
+		goto out_err;
+
+	if (IS_MRFLD(dev)) {
+		dev_priv->rgx_reg =
+			ioremap(resource_start + RGX_OFFSET,
+				RGX_SIZE);
+
+		if (!dev_priv->rgx_reg)
+			goto out_err;
+
+		dev_priv->wrapper_reg =
+			ioremap(resource_start + GFX_WRAPPER_OFFSET,
+				GFX_WRAPPER_SIZE);
+
+		if (!dev_priv->wrapper_reg)
+			goto out_err;
+
+		dev_priv->ved_wrapper_reg =
+			ioremap(resource_start + VED_WRAPPER_OFFSET,
+				VED_WRAPPER_SIZE);
+
+		if (!dev_priv->ved_wrapper_reg)
+			goto out_err;
+
+		dev_priv->vec_wrapper_reg =
+			ioremap(resource_start + VEC_WRAPPER_OFFSET,
+				VEC_WRAPPER_SIZE);
+
+		if (!dev_priv->vec_wrapper_reg)
+			goto out_err;
+
+	}
+
+	dev_priv->pci_root = pci_get_bus_and_slot(0, 0);
+#ifdef CONFIG_SUPPORT_HDMI
+	/* setup hdmi driver */
+	android_hdmi_driver_setup(dev);
+#endif
+	if (IS_MID(dev)) {
+		mrst_get_fuse_settings(dev);
+#ifdef CONFIG_SUPPORT_MIPI
+		mrst_get_vbt_data(dev_priv);
+#endif
+		mid_get_pci_revID(dev_priv);
+	}
+
+	PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
+
+	/* Init OSPM support */
+	ospm_power_init(dev);
+
+	ret = psb_ttm_fence_device_init(&dev_priv->fdev);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	/* For VXD385 DE2.x firmware support 16bit fence value */
+	if (IS_FLDS(dev)) {
+		dev_priv->fdev.fence_class[PSB_ENGINE_VIDEO].wrap_diff =
+		    (1 << 14);
+		dev_priv->fdev.fence_class[PSB_ENGINE_VIDEO].flush_diff =
+		    (1 << 13);
+		dev_priv->fdev.fence_class[PSB_ENGINE_VIDEO].sequence_mask =
+		    0x0000ffff;
+	}
+
+	dev_priv->has_fence_device = 1;
+	ret = ttm_bo_device_init(bdev,
+				 dev_priv->bo_global_ref.ref.object,
+				 &psb_ttm_bo_driver,
+				 DRM_PSB_FILE_PAGE_OFFSET, false);
+	if (unlikely(ret != 0))
+		goto out_err;
+	dev_priv->has_bo_device = 1;
+	ttm_lock_init(&dev_priv->ttm_lock);
+
+	ret = -ENOMEM;
+
+	dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+	if (!dev_priv->scratch_page)
+		goto out_err;
+
+	set_pages_uc(dev_priv->scratch_page, 1);
+
+	dev_priv->pg = psb_gtt_alloc(dev);
+	if (!dev_priv->pg)
+		goto out_err;
+
+	if (IS_MRFLD(dev))
+		ret = mrfld_gtt_init(dev_priv->pg, 0);
+	else
+		ret = psb_gtt_init(dev_priv->pg, 0);
+
+	if (ret)
+		goto out_err;
+
+	ret = psb_gtt_mm_init(dev_priv->pg);
+	if (ret)
+		goto out_err;
+
+	dev_priv->mmu = psb_mmu_driver_init((void *)0,
+					    drm_psb_trap_pagefaults, 0,
+					    dev_priv, IMG_MMU);
+	if (!dev_priv->mmu)
+		goto out_err;
+
+#ifdef SUPPORT_VSP
+	dev_priv->vsp_mmu = psb_mmu_driver_init((void *)0,
+					    drm_psb_trap_pagefaults, 0,
+					    dev_priv, VSP_MMU);
+	if (!dev_priv->vsp_mmu)
+		goto out_err;
+#endif
+
+	pg = dev_priv->pg;
+
+	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+	    (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	/* CI/RAR use the lower half of TT. */
+	pg->gtt_video_start = (tt_pages / 2) << PAGE_SHIFT;
+
+	dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+	if (!dev_priv->pf_pd)
+		goto out_err;
+
+	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+	psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+#ifdef SUPPORT_VSP
+	/* for vsp mmu */
+	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->vsp_mmu), 0);
+#endif
+	spin_lock_init(&dev_priv->sequence_lock);
+
+	PSB_DEBUG_INIT("Begin to init MSVDX/Topaz\n");
+
+	/*initialize the MSI for MRST */
+	if (IS_MID(dev)) {
+		if (pci_enable_msi(dev->pdev)) {
+			DRM_ERROR("Enable MSI failed!\n");
+		} else {
+			PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
+				       dev->pdev->irq);
+			/* pci_write_config_word(pdev, 0x04, 0x07); */
+		}
+	}
+
+	ret = drm_vblank_init(dev, dev_priv->num_pipe);
+	if (ret)
+		goto out_err;
+
+	/*
+	 * Install interrupt handlers prior to powering off SGX or else we will
+	 * crash.
+	 */
+	dev_priv->vdc_irq_mask = 0;
+	dev_priv->pipestat[0] = 0;
+	dev_priv->pipestat[1] = 0;
+	dev_priv->pipestat[2] = 0;
+	spin_lock_init(&dev_priv->irqmask_lock);
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_irq_install(dev);
+
+	dev->vblank_disable_allowed = 1;
+	dev->max_vblank_count = 0xffffff;
+	/* only 24 bits of frame count */
+
+	/* For Video mode panels, set the drm_vblank_offdelay so that we turn
+	 * off faster than the default of 5 seconds. This is done to have
+	 * better S0i1-Display residency for idle use cases
+	 */
+	drm_vblank_offdelay = VBLANK_OFF_DELAY_DEFAULT;
+
+	dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (IS_FLDS(dev) &&
+			(is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DBI)) {
+#ifdef CONFIG_MID_DSI_DPU
+		/*init dpu info */
+		mdfld_dbi_dpu_init(dev);
+#else
+		mdfld_dbi_dsr_init(dev);
+#endif				/*CONFIG_MID_DSI_DPU */
+		INIT_WORK(&dev_priv->te_work, mdfld_te_handler_work);
+		INIT_WORK(&dev_priv->reset_panel_work,
+				mdfld_reset_panel_handler_work);
+	}
+#endif
+	INIT_WORK(&dev_priv->vsync_event_work, mdfld_vsync_event_work);
+
+	dev_priv->vsync_wq = alloc_workqueue("vsync_wq", WQ_UNBOUND, 2);
+	if (!dev_priv->vsync_wq) {
+		DRM_ERROR("failed to create vsync workqueue\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	dev_priv->power_wq = alloc_ordered_workqueue("power_wq", WQ_HIGHPRI);
+	if (!dev_priv->power_wq) {
+		DRM_ERROR("failed to create vsync workqueue\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	dev_priv->pipea_dpi_underrun_count = 0;
+	dev_priv->pipec_dpi_underrun_count = 0;
+
+	dev_priv->hdmi_first_boot = true;
+
+	if (drm_psb_no_fb == 0) {
+		psb_modeset_init(dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+		psb_fbdev_init(dev);
+
+		if (IS_MRFLD(dev))
+			intel_drm_kms_helper_poll_init(dev);
+		else
+			drm_kms_helper_poll_init(dev);
+#else
+		drm_helper_initial_config(dev);
+#endif
+	}
+
+	/*must be after mrst_get_fuse_settings() */
+	ret = psb_backlight_init(dev);
+	if (ret)
+		return ret;
+
+	ret = psb_do_init(dev);
+	if (ret)
+		return ret;
+
+	/* initialize HDMI Hotplug interrupt forwarding
+	 * notifications for user mode
+	 */
+	if (IS_FLDS(dev)) {
+		struct pci_dev *pdev = NULL;
+		struct device *ddev = NULL;
+		struct kobject *kobj = NULL;
+
+		/*find handle to drm kboject */
+		pdev = dev->pdev;
+		ddev = &pdev->dev;
+		kobj = &ddev->kobj;
+
+		dev_priv->psb_hotplug_state = psb_hotplug_init(kobj);
+	}
+
+#ifdef CONFIG_SUPPORT_HDMI
+	hdmi_hotplug_timer_init(dev);
+	atomic_set(&dev_priv->hotplug_wq_done, 0);
+	INIT_WORK(&dev_priv->hdmi_hotplug_wq, hdmi_do_hotplug_wq);
+	INIT_WORK(&dev_priv->hdmi_audio_wq, hdmi_do_audio_wq);
+	INIT_WORK(&dev_priv->hdmi_audio_underrun_wq, hdmi_do_audio_underrun_wq);
+	INIT_WORK(&dev_priv->hdmi_audio_bufferdone_wq, hdmi_do_audio_bufferdone_wq);
+	tasklet_init(&dev_priv->hdmi_audio_bufferdone_tasklet,
+		     hdmi_audio_bufferdone_tasklet_func,
+		     (unsigned long)dev_priv);
+#endif
+	dev_priv->hdmi_first_boot = false;
+
+	/*Intel drm driver load is done, continue doing pvr load */
+	DRM_DEBUG("Pvr driver load\n");
+
+#ifdef CONFIG_SUPPORT_MIPI
+	/* init display manager */
+	dispmgr_start(dev);
+
+	/* SH START DPST
+	* SH This hooks dpst with the device.
+	*/
+	dpst_init(dev, 5, 1);
+
+	mdfld_dsi_dsr_enable(dev_priv->dsi_configs[0]);
+#else
+	/* power down islands turned on by firmware */
+	power_island_put(OSPM_DISPLAY_A |
+			OSPM_DISPLAY_C | OSPM_DISPLAY_MIO);
+#endif
+
+	return PVRSRVDrmLoad(dev, chipset);
+ out_err:
+	psb_driver_unload(dev);
+	return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+	return 0;
+}
+
+int psb_extension_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	union drm_psb_extension_arg *arg = data;
+	struct drm_psb_extension_rep *rep = &arg->rep;
+
+	if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+	if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+	if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+
+	/*return the page flipping ioctl offset */
+	if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_FLIP;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+
+	/* return the video rar offset */
+	if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
+		rep->exists = 1;
+		rep->driver_ioctl_offset = DRM_PSB_VIDEO_GETPARAM;
+		rep->sarea_offset = 0;
+		rep->major = 1;
+		rep->minor = 0;
+		rep->pl = 0;
+		return 0;
+	}
+
+	rep->exists = 0;
+	return 0;
+}
+
+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct ttm_mem_type_manager *man;
+	int clean;
+	int ret;
+
+	ret = ttm_vt_lock(&dev_priv->ttm_lock, 1,
+			  BCVideoGetPriv(file_priv)->tfile);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	man = &bdev->man[TTM_PL_TT];
+	/*spin_lock(&bdev->lru_lock); */
+	clean = drm_mm_clean((struct drm_mm *)man->priv);
+	/*spin_unlock(&bdev->lru_lock); */
+	if (unlikely(!clean))
+		DRM_INFO("Warning: GATT was not clean after VT switch.\n");
+
+	ttm_bo_swapout_all(&dev_priv->bdev);
+
+	return 0;
+ out_unlock:
+	(void)ttm_vt_unlock(&dev_priv->ttm_lock);
+	return ret;
+}
+
+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	return ttm_vt_unlock(&dev_priv->ttm_lock);
+}
+
+static int psb_sizes_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_sizes_arg *arg = (struct drm_psb_sizes_arg *)data;
+
+	*arg = dev_priv->sizes;
+	return 0;
+}
+
+static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+
+	*arg = dev_priv->fuse_reg_value;
+	return 0;
+}
+
+    /*****************************
+     *  HDMI TEST IOCTLS
+     */
+static int psb_drm_hdmi_test_ioctl(struct drm_device *dev,
+				   void *data, struct drm_file *file_priv)
+{
+	drm_psb_hdmireg_p reg = data;
+
+	if (!power_island_get(OSPM_DISPLAY_B | OSPM_DISPLAY_HDMI))
+		return -EAGAIN;
+
+	if (reg->mode & HT_WRITE)
+		PSB_WVDC32(reg->data, reg->reg);
+
+	if (reg->mode & HT_READ)
+		reg->data = PSB_RVDC32(reg->reg);
+
+	power_island_put(OSPM_DISPLAY_B | OSPM_DISPLAY_HDMI);
+	return 0;
+}				/* psb_drm_hdmi_test_ioctl */
+
+    /*
+     *  END HDMI TEST IOCTLS
+     ****************************/
+
+static int psb_vbt_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct gct_ioctl_arg *pGCT = data;
+
+	memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
+
+	return 0;
+}
+
+static int psb_csc_gamma_setting_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+        struct drm_psb_csc_gamma_setting *csc_gamma_setting = NULL;
+        int ret = 0;
+        csc_gamma_setting = (struct drm_psb_csc_gamma_setting *)
+                                        data;
+        printk("seting gamma ioctl!\n");
+        if (!csc_gamma_setting)
+                return -EINVAL;
+
+        if (csc_gamma_setting->type == GAMMA){
+                drm_psb_enable_gamma = 1;
+                ret = mdfld_intel_crtc_set_gamma(dev,
+                        &csc_gamma_setting->data.gamma_data);
+	}
+        else if (csc_gamma_setting->type == CSC){
+                drm_psb_enable_color_conversion = 1;
+                ret = mdfld_intel_crtc_set_color_conversion(dev,
+                        &csc_gamma_setting->data.csc_data);
+	}
+        return ret;
+}
+
+static int psb_enable_ied_session_ioctl(struct drm_device *dev, void *data,
+						struct drm_file *file_priv)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+#endif
+
+	DRM_DEBUG("Enabling IED session...\n");
+
+	if (file_priv == NULL) {
+		DRM_ERROR("%s: file_priv is NULL.\n", __func__);
+		return -1;
+	}
+
+	mutex_lock(&g_ied_mutex);
+	g_ied_context[g_ied_context_index++] = file_priv->filp;
+	g_ied_ref++;
+	if (g_ied_context_index == MAX_IED_SESSION) {
+		DRM_ERROR("ied_ctx_index == MAX_IED_SESSION!!!");
+		g_ied_context_index = 0;
+	}
+	DRM_INFO("Enable IED: ied_ref: %d ied_ctx_index: %d\n",
+			g_ied_ref, g_ied_context_index);
+	DRM_INFO("ied_ctx[ied_ref]: %p\n", g_ied_context[g_ied_ref]);
+	mutex_unlock(&g_ied_mutex);
+
+	if (power_island_get(OSPM_DISPLAY_A)) {
+#ifdef CONFIG_SUPPORT_MIPI
+		mdfld_dsi_dsr_forbid(dsi_config);
+#endif
+
+		/* Set bit 31 to enable IED pipeline */
+		REG_WRITE(PSB_IED_DRM_CNTL_STATUS, 0x80000000);
+		power_island_put(OSPM_DISPLAY_A);
+		return 0;
+	} else {
+		DRM_ERROR("%s: Failed to power on display island.\n", __func__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int psb_disable_ied_session_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv)
+{
+	int ret = 0;
+	int i = 0;
+	bool ied_context_found = false;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+#endif
+
+	DRM_DEBUG("Disabling IED session...\n");
+
+	if (file_priv == NULL) {
+		DRM_ERROR("%s: file_priv is NULL.\n", __func__);
+		return -1;
+	}
+
+
+	if (power_island_get(OSPM_DISPLAY_A)) {
+		REG_WRITE(PSB_IED_DRM_CNTL_STATUS, 0);
+#ifdef CONFIG_SUPPORT_MIPI
+		mdfld_dsi_dsr_allow(dsi_config);
+#endif
+
+		power_island_put(OSPM_DISPLAY_A);
+
+		mutex_lock(&g_ied_mutex);
+		if (!g_ied_ref) {
+			DRM_ERROR("%s: ied_ref: %d\n", __func__, g_ied_ref);
+			mutex_unlock(&g_ied_mutex);
+			return 0;
+		}
+		for (i = 0; i < MAX_IED_SESSION; i++) {
+			if (g_ied_context[i] == file_priv->filp) {
+				g_ied_context[i] = NULL;
+				if (g_ied_ref)
+					g_ied_ref--;
+			DRM_INFO("Disable IED: ied_ref:%d\
+				g_ied_context:%p\n",
+				g_ied_ref, g_ied_context[i]);
+			ied_context_found = true;
+			break;
+			}
+		}
+		if (!ied_context_found)
+			DRM_ERROR("ied_ref:%d ied_context not found!\n",
+				g_ied_ref);
+		mutex_unlock(&g_ied_mutex);
+		ret = 0;
+	} else {
+		DRM_ERROR("%s: Failed to power on display island.\n", __func__);
+		ret = -1;
+	}
+
+	return 0;
+}
+
+static int psb_panel_query_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv) {
+#ifdef CONFIG_SUPPORT_MIPI
+	uint32_t *arg = data;
+
+	*arg = (is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DPI);
+#endif
+	return 0;
+}
+
+static int psb_idle_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv) {
+
+	struct drm_psb_idle_ctrl *ctrl = (struct drm_psb_idle_ctrl*) data;
+
+	switch (ctrl->cmd) {
+	case IDLE_CTRL_ENABLE:
+		PSB_DEBUG_PM("IDLE_CTRL_ENABLE\n");
+		enable_repeat_frame_intr(dev);
+		break;
+	case IDLE_CTRL_DISABLE:
+		PSB_DEBUG_PM("IDLE_CTRL_DISABLE\n");
+		exit_maxfifo_mode(dev);
+		disable_repeat_frame_intr(dev);
+		break;
+	case IDLE_CTRL_ENTER:
+		PSB_DEBUG_PM("IDLE_CTRL_ENTER\n");
+		enter_maxfifo_mode(dev);
+		break;
+	case IDLE_CTRL_EXIT:
+		PSB_DEBUG_PM("IDLE_CTRL_EXIT\n");
+		exit_maxfifo_mode(dev);
+		enable_repeat_frame_intr(dev);
+		break;
+	default:
+		DRM_ERROR("%s: invalid command.\n", __func__);
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_SUPPORT_HDMI
+static int psb_disp_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	static uint32_t hdmi_export_handle;
+	struct drm_psb_disp_ctrl *dp_ctrl = data;
+	/*DRM_COPY_FROM_USER(&dp_ctrl, data,
+		sizeof(struct drm_psb_disp_ctrl)); */
+	DRM_INFO("disp cmd:%d\n", dp_ctrl->cmd);
+	if (dp_ctrl->cmd == DRM_PSB_DISP_SAVE_HDMI_FB_HANDLE) {
+		hdmi_export_handle = dp_ctrl->u.data;
+		DRM_INFO("save hdmi export handle:%d\n",
+						hdmi_export_handle);
+	} else if (dp_ctrl->cmd == DRM_PSB_DISP_GET_HDMI_FB_HANDLE) {
+		dp_ctrl->u.data = hdmi_export_handle;
+		DRM_INFO("retrive hdmi export handle:%d\n", hdmi_export_handle);
+	}
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_SUPPORT_HDMI
+static int psb_query_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	uint8_t bksv[5];
+
+	/* Attempting to read the BKSV value from the HDMI device.
+	 * A successful read of this data indicates that HDCP is
+	 * supported.  A value of zero would indicate that it's
+	 * not.
+	 */
+	*arg = android_query_hdmi_hdcp_sink(dev, bksv);
+
+	return 0;
+}
+static int psb_validate_hdcp_ksv_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	sqword_tt *arg = data;
+	sqword_tt hw_bksv;
+	if (android_query_hdmi_hdcp_sink(dev, (uint8_t *)&hw_bksv)) {
+		*arg = hw_bksv;
+		return 0;
+	}
+
+	return -1;
+}
+static int psb_get_hdcp_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	*arg = android_check_hdmi_hdcp_enc_status(dev);
+
+	return 0;
+}
+static int psb_enable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret;
+	ret = android_enable_hdmi_hdcp(dev);
+	if (ret)
+		return 0;
+	else
+		return -1;
+}
+static int psb_disable_hdcp_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret;
+	ret = android_disable_hdmi_hdcp(dev);
+	if (ret)
+		return 0;
+	else
+		return -1;
+}
+static int psb_enable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret = 0;
+	int temp = 0;
+
+	if (power_island_get(OSPM_DISPLAY_ISLAND)) {
+		temp = PSB_RVDC32(DSPCHICKENBIT);
+		temp &= ~(1 << 31);
+		PSB_WVDC32(temp, DSPCHICKENBIT);
+		temp = PSB_RVDC32(DSPCHICKENBIT);
+		power_island_put(OSPM_DISPLAY_ISLAND);
+	} else
+		ret = -1;
+
+	return ret;
+}
+static int psb_disable_display_ied_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	int ret = 0;
+	int temp = 0;
+
+	if (power_island_get(OSPM_DISPLAY_ISLAND)) {
+		temp = PSB_RVDC32(DSPCHICKENBIT);
+		temp |= (1 << 31);
+		PSB_WVDC32(temp, DSPCHICKENBIT);
+		temp = PSB_RVDC32(DSPCHICKENBIT);
+		power_island_put(OSPM_DISPLAY_ISLAND);
+	} else
+		ret = -1;
+
+	return ret;
+}
+static int psb_query_display_ied_caps_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+
+	/* IED control is always enabled on merrifield platform */
+	*arg = 1;
+
+	return 0;
+}
+
+static int psb_get_hdcp_link_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	*arg = android_check_hdmi_hdcp_link_status(dev);
+
+	return 0;
+}
+
+
+#endif /* ifdef CONFIG_SUPPORT_HDMI */
+
+static int psb_set_csc_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_psb_csc_matrix *csc_matrix = data;
+        struct csc_setting csc;
+
+	csc.pipe = csc_matrix->pipe;
+	csc.type = CSC_SETTING;
+	csc.enable_state = true;
+	csc.data_len = CSC_COUNT;
+	memcpy(csc.data.csc_data, csc_matrix->matrix, sizeof(csc.data.csc_data));
+	drm_psb_enable_color_conversion = 1;
+	mdfld_intel_crtc_set_color_conversion(dev, &csc);
+
+	return 0;
+}
+
+static int psb_dc_state_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	uint32_t flags;
+	uint32_t obj_id;
+	struct drm_mode_object *obj;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc;
+	struct drm_psb_dc_state_arg *arg = (struct drm_psb_dc_state_arg *)data;
+
+	if (IS_MID(dev))
+		return 0;
+
+	flags = arg->flags;
+	obj_id = arg->obj_id;
+
+	if (flags & PSB_DC_CRTC_MASK) {
+		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CRTC);
+		if (!obj) {
+			DRM_DEBUG("Invalid CRTC object.\n");
+			return -EINVAL;
+		}
+
+		crtc = obj_to_crtc(obj);
+
+		mutex_lock(&dev->mode_config.mutex);
+		if (drm_helper_crtc_in_use(crtc)) {
+			if (flags & PSB_DC_CRTC_SAVE)
+				crtc->funcs->save(crtc);
+			else
+				crtc->funcs->restore(crtc);
+		}
+		mutex_unlock(&dev->mode_config.mutex);
+
+		return 0;
+	} else if (flags & PSB_DC_OUTPUT_MASK) {
+		obj = drm_mode_object_find(dev, obj_id,
+					   DRM_MODE_OBJECT_CONNECTOR);
+		if (!obj) {
+			DRM_DEBUG("Invalid connector id.\n");
+			return -EINVAL;
+		}
+
+		connector = obj_to_connector(obj);
+		if (flags & PSB_DC_OUTPUT_SAVE)
+			connector->funcs->save(connector);
+		else
+			connector->funcs->restore(connector);
+
+		return 0;
+	}
+
+	DRM_DEBUG("Bad flags 0x%x\n", flags);
+	return -EINVAL;
+}
+
+#ifdef CONFIG_SUPPORT_MIPI
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+	dev_priv->blc_adj2 = *arg;
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	{
+		struct backlight_device bd;
+		bd.props.brightness = psb_get_brightness(&bd);
+		psb_set_brightness(&bd);
+	}
+#endif
+	return 0;
+}
+#endif
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+	dev_priv->blc_adj1 = *arg;
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	{
+		struct backlight_device bd;
+		bd.props.brightness = psb_get_brightness(&bd);
+		psb_set_brightness(&bd);
+	}
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_SUPPORT_MIPI
+static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	u32 irqCtrl = 0;
+	struct dpst_guardband guardband_reg;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	uint32_t *enable = data;
+
+	/* FIXME: revisit the power island when touching the DPST feature. */
+	if (!power_island_get(OSPM_DISPLAY_A))
+		return 0;
+
+	if (*enable == 1) {
+		ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		ie_hist_cont_reg.ie_pipe_assignment = 0;
+		ie_hist_cont_reg.histogram_mode_select = DPST_YUV_LUMA_MODE;
+		ie_hist_cont_reg.ie_histogram_enable = 1;
+		PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+
+		guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		guardband_reg.interrupt_enable = 1;
+		guardband_reg.interrupt_status = 1;
+		PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+
+		irqCtrl = PSB_RVDC32(PIPEASTAT);
+		PSB_WVDC32(irqCtrl | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
+		/* Wait for two vblanks */
+	} else {
+		guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		guardband_reg.interrupt_enable = 0;
+		guardband_reg.interrupt_status = 1;
+		PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+
+		ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		ie_hist_cont_reg.ie_histogram_enable = 0;
+		PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+
+		irqCtrl = PSB_RVDC32(PIPEASTAT);
+		irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
+		PSB_WVDC32(irqCtrl, PIPEASTAT);
+	}
+
+	power_island_put(OSPM_DISPLAY_A);
+
+	return 0;
+}
+
+static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_hist_status_arg *hist_status = data;
+	uint32_t *arg = hist_status->buf;
+	u32 iedbr_reg_data = 0;
+	struct dpst_ie_histogram_control ie_hist_cont_reg;
+	u32 i;
+	int dpst3_bin_threshold_count = 0;
+	uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
+	uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
+	uint32_t segvalue_max_22_bit = 0x3fffff;
+	uint32_t iedbr_busy_bit = 0x80000000;
+	int dpst3_bin_count = 32;
+
+	/* FIXME: revisit the power island when touching the DPST feature. */
+	if (!power_island_get(OSPM_DISPLAY_A))
+		return 0;
+
+	ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+	ie_hist_cont_reg.bin_reg_func_select = dpst3_bin_threshold_count;
+	ie_hist_cont_reg.bin_reg_index = 0;
+
+	PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+
+	for (i = 0; i < dpst3_bin_count; i++) {
+		iedbr_reg_data = PSB_RVDC32(iebdr_reg);
+
+		if (!(iedbr_reg_data & iedbr_busy_bit)) {
+			arg[i] = iedbr_reg_data & segvalue_max_22_bit;
+		} else {
+			i = 0;
+			ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
+			ie_hist_cont_reg.bin_reg_index = 0;
+			PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
+		}
+	}
+
+	power_island_put(OSPM_DISPLAY_A);
+
+	return 0;
+}
+
+static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct pci_dev *pdev = NULL;
+	struct device *ddev = NULL;
+	struct kobject *kobj = NULL;
+	uint32_t *arg = data;
+
+	if (*arg == 1) {
+		/*find handle to drm kboject */
+		pdev = dev->pdev;
+		ddev = &pdev->dev;
+		kobj = &ddev->kobj;
+
+		if (dev_priv->psb_dpst_state == NULL) {
+			/*init dpst kmum comms */
+			dev_priv->psb_dpst_state = psb_dpst_init(kobj);
+		} else {
+			PSB_DEBUG_ENTRY("DPST already initialized\n");
+		}
+
+		psb_irq_enable_dpst(dev);
+		psb_dpst_notify_change_um(DPST_EVENT_INIT_COMPLETE,
+					  dev_priv->psb_dpst_state);
+	} else {
+		/*hotplug and dpst destroy examples */
+		psb_irq_disable_dpst(dev);
+		psb_dpst_notify_change_um(DPST_EVENT_TERMINATE,
+					  dev_priv->psb_dpst_state);
+		psb_dpst_device_pool_destroy(dev_priv->psb_dpst_state);
+		dev_priv->psb_dpst_state = NULL;
+	}
+	return 0;
+}
+
+/* return the current mode to the dpst module */
+static int psb_dpst_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	uint32_t *arg = data;
+	uint32_t x;
+	uint32_t y;
+	uint32_t reg;
+
+	if (!power_island_get(OSPM_DISPLAY_A))
+		return 0;
+
+	reg = PSB_RVDC32(PIPEASRC);
+
+	power_island_put(OSPM_DISPLAY_A);
+
+	/* horizontal is the left 16 bits */
+	x = reg >> 16;
+	/* vertical is the right 16 bits */
+	y = reg & 0x0000ffff;
+
+	/* the values are the image size minus one */
+	x += 1;
+	y += 1;
+
+	*arg = (x << 16) | y;
+
+	return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_dpst_lut_arg *lut_arg = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct psb_intel_crtc *psb_intel_crtc;
+	int i = 0;
+	int32_t obj_id;
+
+	obj_id = lut_arg->output_id;
+	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+	if (!obj) {
+		DRM_DEBUG("Invalid Connector object.\n");
+		return -EINVAL;
+	}
+
+	connector = obj_to_connector(obj);
+	crtc = connector->encoder->crtc;
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+	for (i = 0; i < 256; i++)
+		psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+	psb_intel_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct dpst_guardband *input = (struct dpst_guardband *)data;
+	struct dpst_guardband reg_data;
+
+	/* FIXME: revisit the power island when touching the DPST feature. */
+	if (!power_island_get(OSPM_DISPLAY_A))
+		return 0;
+
+	reg_data.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+	reg_data.guardband = input->guardband;
+	reg_data.guardband_interrupt_delay = input->guardband_interrupt_delay;
+	/* PSB_DEBUG_ENTRY( "guardband = %u\ninterrupt delay = %u\n",
+	   reg_data.guardband, reg_data.guardband_interrupt_delay); */
+	PSB_WVDC32(reg_data.data, HISTOGRAM_INT_CONTROL);
+
+	power_island_put(OSPM_DISPLAY_A);
+
+	return 0;
+}
+#endif
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	uint32_t obj_id;
+	uint16_t op;
+	struct drm_mode_modeinfo *umode;
+	struct drm_display_mode *mode = NULL;
+	struct drm_psb_mode_operation_arg *arg;
+	struct drm_mode_object *obj;
+	struct drm_connector *connector;
+	struct drm_framebuffer *drm_fb;
+	struct psb_framebuffer *psb_fb;
+	struct drm_connector_helper_funcs *connector_funcs;
+	int ret = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+
+	arg = (struct drm_psb_mode_operation_arg *)data;
+	obj_id = arg->obj_id;
+	op = arg->operation;
+
+	switch (op) {
+	case PSB_MODE_OPERATION_SET_DC_BASE:
+		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
+		if (!obj) {
+			DRM_ERROR("Invalid FB id %d\n", obj_id);
+			return -EINVAL;
+		}
+
+		drm_fb = obj_to_fb(obj);
+		psb_fb = to_psb_fb(drm_fb);
+
+		if (power_island_get(OSPM_DISPLAY_A)) {
+			REG_WRITE(DSPASURF, psb_fb->offset);
+			REG_READ(DSPASURF);
+			power_island_put(OSPM_DISPLAY_A);
+		} else {
+			dev_priv->saveDSPASURF = psb_fb->offset;
+		}
+
+		return 0;
+	case PSB_MODE_OPERATION_MODE_VALID:
+		umode = &arg->mode;
+
+		mutex_lock(&dev->mode_config.mutex);
+
+		obj =
+		    drm_mode_object_find(dev, obj_id,
+					 DRM_MODE_OBJECT_CONNECTOR);
+		if (!obj) {
+			ret = -EINVAL;
+			goto mode_op_out;
+		}
+
+		connector = obj_to_connector(obj);
+
+		mode = drm_mode_create(dev);
+		if (!mode) {
+			ret = -ENOMEM;
+			goto mode_op_out;
+		}
+
+		/* drm_crtc_convert_umode(mode, umode); */
+		{
+			mode->clock = umode->clock;
+			mode->hdisplay = umode->hdisplay;
+			mode->hsync_start = umode->hsync_start;
+			mode->hsync_end = umode->hsync_end;
+			mode->htotal = umode->htotal;
+			mode->hskew = umode->hskew;
+			mode->vdisplay = umode->vdisplay;
+			mode->vsync_start = umode->vsync_start;
+			mode->vsync_end = umode->vsync_end;
+			mode->vtotal = umode->vtotal;
+			mode->vscan = umode->vscan;
+			mode->vrefresh = umode->vrefresh;
+			mode->flags = umode->flags;
+			mode->type = umode->type;
+			strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+			mode->name[DRM_DISPLAY_MODE_LEN - 1] = 0;
+		}
+
+		connector_funcs = (struct drm_connector_helper_funcs *)
+			connector->helper_private;
+
+		if (connector_funcs->mode_valid) {
+			arg->data = (void *)
+				(uintptr_t) connector_funcs->
+				mode_valid(connector, mode);
+		}
+
+		/*do some clean up work */
+		if (mode)
+			drm_mode_destroy(dev, mode);
+ mode_op_out:
+		mutex_unlock(&dev->mode_config.mutex);
+		return ret;
+
+	default:
+		DRM_DEBUG("Unsupported psb mode operation");
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_stolen_memory_arg *arg = data;
+
+	arg->base = dev_priv->pg->stolen_base;
+	arg->size = dev_priv->pg->vram_stolen_size;
+
+	return 0;
+}
+
+#ifdef CONFIG_SUPPORT_MIPI
+static int psb_dpu_query_ioctl(struct drm_device *dev, void *arg,
+			       struct drm_file *file_priv)
+{
+	int *data = (int *)arg;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	mdfld_dsi_encoder_t encoder_type;
+
+	/*reject requests from non-flds platforms */
+	if (!IS_FLDS(dev)) {
+		DRM_INFO("Not Medfield or Merrifield platform! return.");
+		return -EOPNOTSUPP;
+	}
+	DRM_INFO("dsr query.\n");
+
+	dev_priv->um_start = true;
+	encoder_type = is_panel_vid_or_cmd(dev);
+
+	if (encoder_type == MDFLD_DSI_ENCODER_DPI) {
+		DRM_INFO("DSI panel is working in video mode\n");
+		dev_priv->b_dsr_enable = false;
+		*data = 0;
+		return 0;
+	}
+#if defined(CONFIG_MID_DSI_DSR)
+	dev_priv->b_dsr_enable = true;
+	*data = MDFLD_DSR_RR | MDFLD_DSR_FULLSCREEN;
+#elif defined(CONFIG_MID_DSI_DPU)
+	dev_priv->b_dsr_enable = true;
+	*data = MDFLD_DSR_RR | MDFLD_DPU_ENABLE;
+#else				/*DBI panel but DSR was not defined */
+	DRM_INFO("DSR is disabled by kernel configuration.\n");
+
+	dev_priv->b_dsr_enable = false;
+	*data = 0;
+#endif				/*CONFIG_MID_DSI_DSR */
+	return 0;
+}
+
+static int psb_dpu_dsr_on_ioctl(struct drm_device *dev, void *arg,
+				struct drm_file *file_priv)
+{
+	u32 *param = (u32 *) arg;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	mdfld_dsi_encoder_t encoder_type;
+
+	/*reject requests from non-fld platforms */
+	if (!IS_FLDS(dev)) {
+		DRM_INFO("Not Medfield or Merrifield platform! return.");
+		return -EOPNOTSUPP;
+	}
+
+	encoder_type = is_panel_vid_or_cmd(dev);
+
+	if (encoder_type == MDFLD_DSI_ENCODER_DPI) {
+		DRM_INFO("DSI panel is working in video mode\n");
+		dev_priv->b_dsr_enable = false;
+		return 0;
+	}
+
+	if (!param) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_ENTRY("dsr kick in. param 0x%08x\n", *param);
+
+	if (*param == DRM_PSB_DSR_DISABLE) {
+		PSB_DEBUG_ENTRY("DSR is turned off\n");
+		dev_priv->b_dsr_enable = false;
+#if defined(CONFIG_MID_DSI_DPU)
+		mdfld_dbi_dpu_report_fullscreen_damage(dev);
+#elif defined(CONFIG_MID_DSI_DSR)
+		mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D, 0, 0);
+#endif
+		return 0;
+	} else if (*param == DRM_PSB_DSR_ENABLE) {
+		PSB_DEBUG_ENTRY("DSR is turned on\n");
+#if defined(CONFIG_MID_DSI_DPU) || defined(CONFIG_MID_DSI_DSR)
+		dev_priv->b_dsr_enable = true;
+#endif
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int psb_dpu_dsr_off_ioctl(struct drm_device *dev, void *arg,
+				 struct drm_file *file_priv)
+{
+#if defined(CONFIG_MID_DSI_DPU)
+	struct drm_psb_drv_dsr_off_arg *dsr_off_arg =
+	    (struct drm_psb_drv_dsr_off_arg *)arg;
+	struct psb_drm_dpu_rect rect = dsr_off_arg->damage_rect;
+
+	return mdfld_dsi_dbi_dsr_off(dev, &rect);
+#elif defined(CONFIG_MID_DSI_DSR)
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	if ((dev_priv->dsr_fb_update & MDFLD_DSR_2D_3D) !=
+		MDFLD_DSR_2D_3D)
+			mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D, 0, 0);
+#if 0
+	{
+		static int pipe;
+
+		if (pipe > 0) {
+			pipe = 0;
+			if (gdbi_output && gbdispstatus == false) {
+				dev_priv->b_dsr_enable = true;
+				mdfld_dsi_dbi_enter_dsr(gdbi_output, 1);
+				mdfld_dsi_dbi_enter_dsr(gdbi_output, 2);
+			}
+		}
+	}
+#endif
+
+#endif
+	return 0;
+}
+#endif
+
+#if KEEP_UNUSED_CODE
+/*wait for vblank*/
+static void overlay_wait_vblank(struct drm_device *dev, uint32_t ovadd)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t ovadd_pipe;
+	uint32_t pipestat_reg;
+	int retry;
+
+	ovadd_pipe = ((ovadd >> 6) & 0x3);
+	switch (ovadd_pipe) {
+	case 0:
+		pipestat_reg = PIPEASTAT;
+		break;
+	case 1:
+		pipestat_reg = PIPECSTAT;
+		break;
+	case 2:
+		pipestat_reg = PIPEBSTAT;
+		break;
+	default:
+		DRM_ERROR("wrong OVADD pipe seletion\n");
+		return;
+	}
+
+	/**
+	 * wait for vblank upto 30ms,the period of vblank is 22ms.
+	 */
+	retry = 3000;
+	while (--retry) {
+		if ((PSB_RVDC32(pipestat_reg) & PIPE_VBLANK_STATUS))
+			break;
+		udelay(10);
+	}
+
+	if (!retry)
+		DRM_ERROR("wait vblank timeout!\n");
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+static void vsync_state_dump(struct drm_device *dev, int pipe)
+{
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_FORCE_POWER_ON))
+		return;
+
+	DRM_INFO("vblank_refcount = %u\n", atomic_read(&dev->vblank_refcount[pipe]));
+	DRM_INFO("vblank_enabled = %d\n", dev->vblank_enabled[pipe]);
+	DRM_INFO("vblank_count = %u\n", drm_vblank_count(dev, pipe));
+	DRM_INFO("PIPECONF = 0x%08x\n", pipe ? REG_READ(PIPEBCONF) : REG_READ(PIPEACONF));
+	DRM_INFO("PIPESTAT = 0x%08x\n\n", pipe ? REG_READ(PIPEBSTAT) : REG_READ(PIPEASTAT));
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+}
+
+static int psb_vsync_set_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_vsync_set_arg *arg = data;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_config *dsi_config = NULL;
+#endif
+	struct timespec now;
+	uint32_t pipe;
+	union drm_wait_vblank vblwait;
+	u32 vbl_count = 0;
+	s64 nsecs = 0;
+	int ret = -EINVAL;
+
+	if (arg->vsync_operation_mask) {
+		pipe = arg->vsync.pipe;
+		if (pipe < 0 || pipe > 2) {
+			DRM_ERROR("%s: invalid pipe %d", __func__, pipe);
+			return -EINVAL;
+		}
+
+		if (arg->vsync_operation_mask & GET_VSYNC_COUNT) {
+			vbl_count = drm_vblank_count(dev, pipe);
+
+			getrawmonotonic(&now);
+			nsecs = timespec_to_ns(&now);
+
+			arg->vsync.timestamp = (uint64_t)nsecs;
+			arg->vsync.vsync_count = (uint64_t)vbl_count;
+		}
+
+#ifdef CONFIG_SUPPORT_MIPI
+		if (!pipe)
+			dsi_config = dev_priv->dsi_configs[0];
+		else if (pipe == 2)
+			dsi_config = dev_priv->dsi_configs[1];
+#endif
+
+		if (arg->vsync_operation_mask & VSYNC_WAIT) {
+
+#ifdef CONFIG_SUPPORT_MIPI
+			if (dev_priv->vsync_enabled[pipe] && ((pipe == 1) ||
+						(dsi_config &&
+						 dsi_config->dsi_hw_context.panel_on))) {
+#else
+			if (dev_priv->vsync_enabled[pipe] && (pipe == 1)) {
+#endif
+				vblwait.request.type =
+					(_DRM_VBLANK_RELATIVE |
+					 _DRM_VBLANK_NEXTONMISS);
+				vblwait.request.sequence = 1;
+
+				if (pipe == 1)
+					vblwait.request.type |=
+						_DRM_VBLANK_SECONDARY;
+
+				ret = drm_wait_vblank(dev, (void *)&vblwait,
+						file_priv);
+				if (ret && (ret != -EINTR)) {
+					DRM_ERROR("fail to get vsync on pipe %d, ret %d\n", pipe, ret);
+					vsync_state_dump(dev, pipe);
+
+#ifdef CONFIG_SUPPORT_MIPI
+					if (!IS_ANN(dev)) {
+						if (pipe != 1 &&
+							is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DBI &&
+							dsi_config &&
+							dsi_config->dsi_hw_context.panel_on) {
+							schedule_work(&dev_priv->reset_panel_work);
+						}
+					}
+#endif
+				} else if (ret == -EINTR)
+					ret = 0;
+			}else{
+				DRM_INFO("request VSYNC on pipe(%d) when vsync_enabled=%d.\n",
+						 pipe, dev_priv->vsync_enabled[pipe]);
+			}
+
+			getrawmonotonic(&now);
+			nsecs = timespec_to_ns(&now);
+			arg->vsync.timestamp = (uint64_t)nsecs;
+			return ret;
+		}
+
+		if (arg->vsync_operation_mask & VSYNC_ENABLE) {
+			if (dev_priv->vsync_enabled[pipe]) {
+				DRM_ERROR("%s: vsync has been enabled on pipe %d",
+						__func__, pipe);
+				return 0;
+			}
+#ifdef CONFIG_SUPPORT_MIPI
+			mdfld_dsi_dsr_forbid(dsi_config);
+#if 0
+			ret = drm_vblank_get(dev, pipe);
+			if (ret != 0) {
+				DRM_ERROR("%s: fail to enable vsync on pipe %d\n",
+						__func__, pipe);
+				mdfld_dsi_dsr_allow(dsi_config);
+			} else
+#endif
+#endif
+			dev_priv->vsync_enabled[pipe] = true;
+			ret = 0;
+		}
+
+		if (arg->vsync_operation_mask & VSYNC_DISABLE) {
+			if (!dev_priv->vsync_enabled[pipe]) {
+				DRM_ERROR("%s: vsync has been disabled on pipe %d",
+						__func__, pipe);
+				return 0;
+			}
+			dev_priv->vsync_enabled[pipe] = false;
+#ifdef CONFIG_SUPPORT_MIPI
+#if 0
+			drm_vblank_put(dev, pipe);
+#endif
+			mdfld_dsi_dsr_allow(dsi_config);
+#endif
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_psb_register_rw_arg *arg = data;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct mdfld_dsi_config *dsi_config = NULL;
+#endif
+	u32 power_island = 0;
+
+	if (arg->overlay_write_mask != 0) {
+		if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL)
+			power_island |= OSPM_DISPLAY_A;
+
+		if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL)
+			power_island |= OSPM_DISPLAY_C;
+
+		if (power_island_get(power_island)) {
+			u32 index = arg->overlay.index;
+			u32 ov_ogamc5_reg;
+			u32 ov_ogamc4_reg;
+			u32 ov_ogamc3_reg;
+			u32 ov_ogamc2_reg;
+			u32 ov_ogamc1_reg;
+			u32 ov_ogamc0_reg;
+
+			switch (index) {
+			case 0:  /* overlay A */
+				ov_ogamc5_reg = OV_OGAMC5;
+				ov_ogamc4_reg = OV_OGAMC4;
+				ov_ogamc3_reg = OV_OGAMC3;
+				ov_ogamc2_reg = OV_OGAMC2;
+				ov_ogamc1_reg = OV_OGAMC1;
+				ov_ogamc0_reg = OV_OGAMC0;
+				break;
+			case 1:  /* overlay C */
+				ov_ogamc5_reg = OVC_OGAMC5;
+				ov_ogamc4_reg = OVC_OGAMC4;
+				ov_ogamc3_reg = OVC_OGAMC3;
+				ov_ogamc2_reg = OVC_OGAMC2;
+				ov_ogamc1_reg = OVC_OGAMC1;
+				ov_ogamc0_reg = OVC_OGAMC0;
+				break;
+			default:
+				DRM_ERROR("Invalid overlay index %d\n", index);
+				return -EINVAL;
+			}
+
+#ifdef CONFIG_SUPPORT_MIPI
+			/*forbid dsr which will restore regs*/
+			dsi_config = dev_priv->dsi_configs[0];
+			mdfld_dsi_dsr_forbid(dsi_config);
+#endif
+
+			if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
+				PSB_WVDC32(arg->overlay.OGAMC5, ov_ogamc5_reg);
+				PSB_WVDC32(arg->overlay.OGAMC4, ov_ogamc4_reg);
+				PSB_WVDC32(arg->overlay.OGAMC3, ov_ogamc3_reg);
+				PSB_WVDC32(arg->overlay.OGAMC2, ov_ogamc2_reg);
+				PSB_WVDC32(arg->overlay.OGAMC1, ov_ogamc1_reg);
+				PSB_WVDC32(arg->overlay.OGAMC0, ov_ogamc0_reg);
+			}
+
+#ifdef CONFIG_SUPPORT_MIPI
+			if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
+				PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
+				if (arg->overlay.b_wms){
+					mdfld_dsi_dsr_update_panel_fb(dsi_config);
+				}
+			}
+			/*allow entering dsr*/
+			mdfld_dsi_dsr_allow(dsi_config);
+#endif
+
+			power_island_put(power_island);
+		}
+	}
+#if 0
+	if (arg->plane_enable_mask != 0)
+		DC_MRFLD_Enable_Plane(arg->plane.type,
+				arg->plane.index, arg->plane.ctx);
+
+	if (arg->plane_disable_mask != 0)
+		DC_MRFLD_Disable_Plane(arg->plane.type,
+				arg->plane.index, arg->plane.ctx);
+#endif
+	if (arg->get_plane_state_mask != 0) {
+		u32 pipe = arg->plane.ctx;
+		bool bDisabled = DC_MRFLD_Is_Plane_Disabled(arg->plane.type,
+						arg->plane.index, pipe);
+		if (bDisabled)
+			arg->plane.ctx = PSB_DC_PLANE_DISABLED;
+		else
+			arg->plane.ctx = PSB_DC_PLANE_ENABLED;
+	}
+
+	if (arg->overlay_read_mask & OVSTATUS_REGRBIT_OVR_UPDT) {
+		u32 ovstat_reg = OV_DOVASTA;
+		u32 pipe = arg->plane.ctx;
+		u32 pipeconf_reg;
+		power_island |= OSPM_DISPLAY_A;
+		if (arg->plane.index) {
+			power_island |= OSPM_DISPLAY_C;
+			ovstat_reg = OVC_DOVCSTA;
+		}
+		/* By default overlay is not updated since last vblank event*/
+		arg->plane.ctx = 1;
+		if (pipe == PIPEA)
+			pipeconf_reg = PIPEACONF;
+		else if (pipe == PIPEB)
+			pipeconf_reg = PIPEBCONF;
+		else {
+			DRM_ERROR("Invalid pipe:%d!\n", pipe);
+			return -EINVAL;
+		}
+
+		if (REG_READ(pipeconf_reg) & BIT31) {
+			if (power_island_get(power_island)) {
+				arg->plane.ctx =
+					(PSB_RVDC32(ovstat_reg) & BIT31) == 0 ? 0 : 1;
+				power_island_put(power_island);
+			}
+		} else
+			DRM_INFO("%s: pipe %d is disabled!\n", __func__, pipe);
+	}
+	return 0;
+}
+
+static int psb_get_panel_orientation_ioctl(struct drm_device *dev, void *data,
+						 struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	int *arg = data;
+
+	if (dev_priv->panel_180_rotation)
+            *arg = 1; // panel orientation 180
+        else
+            *arg = 0;
+	return 0;
+}
+
+static int psb_update_cursor_pos_ioctl(struct drm_device *dev, void *data,
+						 struct drm_file *file_priv)
+{
+	struct intel_dc_cursor_ctx *ctx = (struct intel_dc_cursor_ctx*) data;
+
+	if (ctx == NULL) {
+		DRM_ERROR("%s: invalid cursor context\n", __func__);
+		return -1;
+	}
+
+	return DCUpdateCursorPos(ctx->pipe, ctx->pos);
+}
+
+
+/* always available as we are SIGIO'd */
+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	return POLLIN | POLLRDNORM;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+	DRM_DEBUG("\n");
+	return PVRSRVOpen(dev, priv);
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+			       unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	long ret;
+
+	DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr);
+	/*
+	 * The driver private ioctls and TTM ioctls should be
+	 * thread-safe.
+	 */
+
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+		struct drm_ioctl_desc *ioctl =
+		    &psb_ioctls[nr - DRM_COMMAND_BASE];
+
+		if (unlikely(ioctl->cmd != cmd)) {
+			DRM_ERROR("Invalid drm cmnd %d ioctl->cmd %x, cmd %x\n",
+				  nr - DRM_COMMAND_BASE, ioctl->cmd, cmd);
+			return -EINVAL;
+		}
+	}
+	/*
+	 * Not all old drm ioctls are thread-safe.
+	 */
+
+	ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
+
+static int psb_blc_proc_show(struct seq_file *seq, void *v)
+{
+	struct drm_minor *minor = (struct drm_minor *)seq->private;
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	int user_brightness = 0;
+	int final_brightness = 0;
+
+	user_brightness = psb_get_brightness(NULL);
+	final_brightness = (user_brightness * dev_priv->blc_adj1) / 255;
+	final_brightness = (final_brightness * dev_priv->blc_adj2) / 255;
+
+	DRM_INFO("%i\n", final_brightness);
+	seq_printf(seq, "%i\n", final_brightness);
+
+	return 0;
+}
+
+static int psb_blc_proc_open(struct inode *inode, struct file *file)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+	return single_open(file, psb_blc_proc_show, PDE(inode)->data);
+#else
+	return single_open(file, psb_blc_proc_show, PDE_DATA(inode));
+#endif
+}
+
+static const struct file_operations psb_blc_proc_fops = {
+	.owner = THIS_MODULE,
+	.open = psb_blc_proc_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_rtpm_read(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	PSB_DEBUG_ENTRY("Current Runtime PM delay for GFX: %d (ms)\n",
+			gfxrtdelay);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_rtpm_write(struct file *file, const char *buffer,
+			  unsigned long count, void *data)
+{
+	char buf[2];
+	int temp = 0;
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count - 1] != '\n')
+			return -EINVAL;
+		temp = buf[0] - '0';
+		switch (temp) {
+		case 1:
+			gfxrtdelay = 10 * 1000;
+			break;
+
+		case 2:
+			gfxrtdelay = 20 * 1000;
+			break;
+		default:
+			gfxrtdelay = 30 * 1000;
+			break;
+		}
+		PSB_DEBUG_ENTRY("Runtime PM delay set for GFX: %d (ms)\n",
+				gfxrtdelay);
+	}
+	return count;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_gfx_pm_read(char *buf, char **start, off_t offset, int request,
+			   int *eof, void *data)
+{
+	printk(KERN_ALERT "drm_psb_gfx_pm: %d\n", drm_psb_gfx_pm);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_gfx_pm_write(struct file *file, const char *buffer,
+			    unsigned long count, void *data)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count - 1] != '\n')
+			return -EINVAL;
+		drm_psb_gfx_pm = buf[0] - '0';
+	}
+
+	printk(KERN_ALERT "\n drm_psb_gfx_pm: %x\n", drm_psb_gfx_pm);
+
+	if (drm_psb_gfx_pm) {
+		printk(KERN_ALERT "\n Starting Test Sequence: %x\n",
+		       drm_psb_gfx_pm);
+#if 0	/* IF MRFLD */
+		{
+			int i = 0;
+			/*  Invalid - MRFLD_GFX_ALL_ISLANDS is a bit mask.*/
+			for (i = 0; i < MRFLD_GFX_ALL_ISLANDS; i++) {
+				mrfld_set_power_state(OSPM_DISPLAY_ISLAND,
+							i,
+							POWER_ISLAND_DOWN);
+				mdelay(1);
+				mrfld_set_power_state(OSPM_DISPLAY_ISLAND,
+							i,
+							POWER_ISLAND_UP);
+			}
+		}
+#else
+		mrfld_set_power_state(OSPM_DISPLAY_ISLAND,
+					MRFLD_GFX_ALL_ISLANDS,
+					POWER_ISLAND_DOWN);
+		mdelay(1);
+		mrfld_set_power_state(OSPM_DISPLAY_ISLAND,
+					MRFLD_GFX_ALL_ISLANDS,
+					POWER_ISLAND_UP);
+#endif
+	}
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_vsp_pm_read(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	printk(KERN_ALERT "psb_vsp_pm_read: %d\n", drm_psb_vsp_pm);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_vsp_pm_write(struct file *file, const char *buffer,
+			  unsigned long count, void *data)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		drm_psb_vsp_pm = buf[0] - '0';
+	}
+
+	printk(KERN_ALERT  "\n drm_psb_vsp_pm=%x\n", drm_psb_vsp_pm);
+
+	if (drm_psb_gfx_pm == 0) {
+		printk(KERN_ALERT "\n Starting power off the VSP...\n");
+
+		mrfld_set_power_state(OSPM_VIDEO_VPP_ISLAND,
+					0,
+					POWER_ISLAND_DOWN);
+
+	} else if (drm_psb_gfx_pm == 1) {
+		printk(KERN_ALERT "\n Starting power on the VSP...\n");
+		mrfld_set_power_state(OSPM_VIDEO_VPP_ISLAND,
+					0,
+					POWER_ISLAND_UP);
+	} else {
+		printk(KERN_ALERT "\n Don't support this operation! %x\n",
+			drm_psb_vsp_pm);
+	}
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_ved_pm_read(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	printk(KERN_ALERT "drm_psb_ved_pm: %d\n", drm_psb_ved_pm);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_ved_pm_write(struct file *file, const char *buffer,
+			  unsigned long count, void *data)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		drm_psb_ved_pm = buf[0] - '0';
+	}
+
+	printk(KERN_ALERT  "\n drm_psb_ved_pm=%x\n", drm_psb_ved_pm);
+
+	if (drm_psb_ved_pm == 0) {
+		printk(KERN_ALERT "\n Starting power off the VED...\n");
+
+		mrfld_set_power_state(OSPM_VIDEO_DEC_ISLAND,
+					0,
+					POWER_ISLAND_DOWN);
+
+	} else if (drm_psb_ved_pm == 1) {
+		printk(KERN_ALERT "\n Starting power on the VED...\n");
+		mrfld_set_power_state(OSPM_VIDEO_DEC_ISLAND,
+					0,
+					POWER_ISLAND_UP);
+	} else {
+		printk(KERN_ALERT "\n Don't support this operation! %x\n",
+			drm_psb_ved_pm);
+	}
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_vec_pm_read(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	printk(KERN_ALERT "drm_psb_vec_pm: %d\n", drm_psb_vec_pm);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_vec_pm_write(struct file *file, const char *buffer,
+			  unsigned long count, void *data)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		drm_psb_vec_pm = buf[0] - '0';
+	}
+
+	printk(KERN_ALERT  "\n drm_psb_vec_pm=%x\n", drm_psb_vec_pm);
+
+	if (drm_psb_vec_pm == 0) {
+		printk(KERN_ALERT "\n Starting power off the VEC...\n");
+
+		mrfld_set_power_state(OSPM_VIDEO_ENC_ISLAND,
+					0,
+					POWER_ISLAND_DOWN);
+
+	} else if (drm_psb_vec_pm == 1) {
+		printk(KERN_ALERT "\n Starting power on the VEC...\n");
+		mrfld_set_power_state(OSPM_VIDEO_ENC_ISLAND,
+					0,
+					POWER_ISLAND_UP);
+	} else {
+		printk(KERN_ALERT "\n Don't support this operation! %x\n",
+			drm_psb_vec_pm);
+	}
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_dsr_read(char *buf, char **start, off_t offset, int request,
+			int *eof, void *data)
+{
+	if (drm_psb_dsr)
+		DRM_INFO("GFX DSR: enabled	      ");
+	else
+		DRM_INFO("GFX DSR: disabled	      ");
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_dsr_write(struct file *file, const char *buffer,
+			 unsigned long count, void *data)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count - 1] != '\n')
+			return -EINVAL;
+		drm_psb_dsr = buf[0] - '0';
+	}
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *)data;
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	int len = 0;
+#ifdef OSPM_STAT
+	unsigned long on_time = 0;
+	unsigned long off_time = 0;
+#endif
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	/*#ifdef SUPPORT_ACTIVE_POWER_MANAGEMENT
+	   DRM_INFO("GFX D0i3: enabled        ");
+	   #else
+	   DRM_INFO("GFX D0i3: disabled       ");
+	   #endif */
+	if (drm_psb_ospm)
+		DRM_INFO("GFX D0i3: enabled	      ");
+	else
+		DRM_INFO("GFX D0i3: disabled	      ");
+
+#ifdef OSPM_STAT
+	switch (dev_priv->graphics_state) {
+	case PSB_PWR_STATE_ON:
+		DRM_INFO("GFX state:%s\n", "on");
+		break;
+	case PSB_PWR_STATE_OFF:
+		DRM_INFO("GFX state:%s\n", "off");
+		break;
+	default:
+		DRM_INFO("GFX state:%s\n", "unknown");
+	}
+
+	on_time = dev_priv->gfx_on_time * 1000 / HZ;
+	off_time = dev_priv->gfx_off_time * 1000 / HZ;
+	switch (dev_priv->graphics_state) {
+	case PSB_PWR_STATE_ON:
+		on_time += (jiffies - dev_priv->gfx_last_mode_change) *
+		    1000 / HZ;
+		break;
+	case PSB_PWR_STATE_OFF:
+		off_time += (jiffies - dev_priv->gfx_last_mode_change) *
+		    1000 / HZ;
+		break;
+	}
+	DRM_INFO("GFX(count/ms):\n");
+	DRM_INFO("on:%lu/%lu, off:%lu/%lu\n",
+		 dev_priv->gfx_on_cnt, on_time, dev_priv->gfx_off_cnt,
+		 off_time);
+#endif
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_ospm_write(struct file *file, const char *buffer,
+			  unsigned long count, void *data)
+{
+	char buf[2];
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count - 1] != '\n')
+			return -EINVAL;
+		drm_psb_ospm = buf[0] - '0';
+		PSB_DEBUG_ENTRY(" SGX (D0i3) drm_psb_ospm: %d\n",
+				drm_psb_ospm);
+		/*Work around for video encode, it needs sgx always on */
+		if (!drm_psb_ospm) {
+			/* So weird */
+			power_island_get(OSPM_GRAPHICS_ISLAND);
+			power_island_put(OSPM_GRAPHICS_ISLAND);
+		}
+	}
+	return count;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+static int psb_display_register_read(char *buf, char **start, off_t offset,
+				     int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *)data;
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	/*do nothing */
+	int len = dev_priv->count;
+	*eof = 1;
+	memcpy(buf, dev_priv->buf, dev_priv->count);
+	return len - offset;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+/*
+* use to read and write display register. and print to standard output.
+*/
+static int psb_display_register_write(struct file *file, const char *buffer,
+				      unsigned long count, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *)data;
+	struct drm_device *dev = minor->dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = dev_priv->dsi_configs[0];
+	int reg_val = 0;
+	char buf[256];
+	char op = '0';
+	int reg = 0, start = 0, end = 0;
+	unsigned int val = 0;
+	int len = 0;
+	int Offset = 0;
+	int ret = 0;
+
+	dev_priv->count = 0;
+	memset(buf, '\0', sizeof(buf));
+
+	if (count > sizeof(buf)) {
+		PSB_DEBUG_ENTRY
+		    ("The input is too bigger, kernel can not handle.\n");
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count - 1] != '\n')
+			return -EINVAL;
+		PSB_DEBUG_ENTRY("input = %s", buf);
+	}
+
+	sscanf(buf, "%c%x%x", &op, &reg, &val);
+
+	if (op != 'r' && op != 'w' && op != 'a') {
+		PSB_DEBUG_ENTRY("The input format is not right!\n");
+		PSB_DEBUG_ENTRY("for exampe: r 70184(read register 70184.)\n");
+		PSB_DEBUG_ENTRY("for exampe: w 70184 123(write register 70184 with value 123.)\n");
+		PSB_DEBUG_ENTRY("for exmape: a 60000 60010(read all registers start at 60000 and end at 60010.\n)");
+		return -EINVAL;
+	}
+	if ((reg < 0xa000 || reg > 0x720ff) &&
+				(reg < 0x40 || reg > 0x64)) {
+		PSB_DEBUG_ENTRY("the register is out of display controller registers rang.\n");
+		return -EINVAL;
+	}
+
+	if (val < 0) {
+		PSB_DEBUG_ENTRY("the register value is should be greater than zero.\n");
+		return -EINVAL;
+	}
+
+	if ((reg % 0x4) != 0) {
+		PSB_DEBUG_ENTRY("the register address should aligned to 4 byte. please refrence display controller specification.\n");
+		return -EINVAL;
+	}
+
+	if (!power_island_get(OSPM_DISPLAY_ISLAND)) {
+		PSB_DEBUG_ENTRY("Display controller can not power on.!\n");
+		return -EPERM;
+	}
+	if (IS_FLDS(dev)) {
+#ifndef CONFIG_MID_DSI_DPU
+		if (!(dev_priv->dsr_fb_update & MDFLD_DSR_MIPI_CONTROL) &&
+		    (dev_priv->dbi_panel_on || dev_priv->dbi_panel_on2))
+			mdfld_dsi_dbi_exit_dsr(dev,
+			MDFLD_DSR_MIPI_CONTROL, 0, 0);
+#endif
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	/*forbid dsr which will restore regs*/
+	mdfld_dsi_dsr_forbid(dsi_config);
+#endif
+
+	if (op == 'r') {
+		if (reg >= 0xa000) {
+			reg_val = REG_READ(reg);
+			PSB_DEBUG_ENTRY("Read :reg=0x%08x , val=0x%08x.\n", reg,
+					reg_val);
+		}
+		dev_priv->count = sprintf(dev_priv->buf, "%08x %08x\n", reg,
+					  reg_val);
+	}
+	if (op == 'w') {
+		if (reg >= 0xa000) {
+			reg_val = REG_READ(reg);
+			PSB_DEBUG_ENTRY
+			    ("Before change:reg=0x%08x , val=0x%08x.\n", reg,
+			     reg_val);
+			REG_WRITE(reg, val);
+			reg_val = REG_READ(reg);
+			PSB_DEBUG_ENTRY
+			    ("After change:reg=0x%08x , val=0x%08x.\n", reg,
+			     reg_val);
+		}
+	}
+
+	if (op == 'a') {
+		start = reg;
+		end = val;
+		PSB_DEBUG_ENTRY("start:0x%08x\n", start);
+		PSB_DEBUG_ENTRY("end:  0x%08x\n", end);
+		if ((start % 0x4) != 0) {
+			PSB_DEBUG_ENTRY
+			("The start address should be 4 byte aligned. Please reference the display controller specification.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+
+		if ((end % 0x4) != 0) {
+			PSB_DEBUG_ENTRY
+			("The end address should be 4 byte aligned. Please reference the display controller specification.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+
+		len = end - start + 1;
+		if (len <= 0)
+			PSB_DEBUG_ENTRY("The end address should be greater than the start address.\n");
+
+		if (end < 0xa000 || end > 0x720ff) {
+			PSB_DEBUG_ENTRY
+			("The end address is out of the display controller register range.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+
+		if (start < 0xa000 || start > 0x720ff) {
+			PSB_DEBUG_ENTRY
+			    ("The start address is out of the display controller register range.\n");
+			ret = -EINVAL;
+			goto fun_exit;
+		}
+		for (Offset = start; Offset < end;
+					Offset = Offset + 0x10) {
+			if (reg >= 0xa000) {
+				PSB_DEBUG_ENTRY
+				    ("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				     Offset, REG_READ(Offset + 0x0),
+				     REG_READ(Offset + 0x4),
+				     REG_READ(Offset + 0x8),
+				     REG_READ(Offset + 0xc));
+
+				dev_priv->count +=
+				    sprintf(dev_priv->buf + dev_priv->count,
+						"%08x %08x %08x %08x %08x\n",
+					    Offset, REG_READ(Offset + 0x0),
+					    REG_READ(Offset + 0x4),
+					    REG_READ(Offset + 0x8),
+					    REG_READ(Offset + 0xc));
+			}
+		}
+	}
+fun_exit:
+#ifdef CONFIG_SUPPORT_MIPI
+	/*allow entering dsr*/
+	mdfld_dsi_dsr_allow(dsi_config);
+#endif
+
+	power_island_put(OSPM_DISPLAY_ISLAND);
+	return count;
+}
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+
+#ifdef CONFIG_SUPPORT_HDMI
+ssize_t gpio_control_read(struct file *file, char *buffer,
+				      size_t count, loff_t *offset)
+{
+	unsigned int value = 0;
+	unsigned int pin_num = otm_hdmi_get_hpd_pin();
+	if (pin_num)
+		value = gpio_get_value(pin_num);
+
+	printk(KERN_ALERT "read pin_num: %8d value:%8d\n", pin_num, value);
+	return 0;
+}
+
+ssize_t gpio_control_write(struct file *file, const char *buffer,
+				      size_t count, loff_t *offset)
+{
+	char buf[2];
+	int  gpio_control;
+	bool auto_state = drm_hdmi_hpd_auto;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+	struct drm_minor *minor =
+		(struct drm_minor *) PDE_DATA(file_inode(file));
+#else
+	struct drm_minor *minor =
+		(struct drm_minor *) PDE(file->f_path.dentry->d_inode)->data;
+#endif
+	struct drm_device *dev = minor->dev;
+
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		gpio_control = buf[0] - '0';
+
+		switch (gpio_control) {
+		case 0x0:
+			otm_hdmi_override_cable_status(false, auto_state);
+			android_hdmi_irq_test(dev);
+			break;
+		case 0x1:
+			otm_hdmi_override_cable_status(true, auto_state);
+			android_hdmi_irq_test(dev);
+			break;
+		default:
+			printk(KERN_ALERT "invalid parameters\n");
+		}
+	}
+	return count;
+}
+
+static ssize_t csc_control_read(struct file *file, char __user *buf,
+				    size_t nbytes,loff_t *ppos)
+{
+	return 0;
+}
+
+static ssize_t csc_control_write(struct file *file, const char *buffer,
+				      size_t count, loff_t *ppos)
+{
+	char buf[2];
+	int  csc_control;
+	struct drm_minor *minor = (struct drm_minor *)PDE_DATA(file_inode(file));
+	struct drm_device *dev = minor->dev;
+	struct csc_setting csc;
+	struct gamma_setting gamma;
+
+	if (count != sizeof(buf)) {
+		return -EINVAL;
+	} else {
+		if (copy_from_user(buf, buffer, count))
+			return -EINVAL;
+		if (buf[count-1] != '\n')
+			return -EINVAL;
+		csc_control = buf[0] - '0';
+		PSB_DEBUG_ENTRY(" csc control: %d\n", csc_control);
+
+		switch (csc_control) {
+		case 0x0:
+			csc.pipe = 0;
+			csc.type = CSC_REG_SETTING;
+			csc.enable_state = true;
+			csc.data_len = CSC_REG_COUNT;
+			memcpy(csc.data.csc_reg_data, csc_setting, sizeof(csc.data.csc_reg_data));
+                        drm_psb_enable_color_conversion = 1;
+			mdfld_intel_crtc_set_color_conversion(dev, &csc);
+			break;
+		case 0x1:
+			gamma.pipe = 0;
+			gamma.type = GAMMA_REG_SETTING;
+			gamma.enable_state = true;
+			gamma.data_len = GAMMA_10_BIT_TABLE_COUNT;
+			memcpy(gamma.gamma_tableX100, gamma_setting, sizeof(gamma.gamma_tableX100));
+                        drm_psb_enable_gamma = 1;
+			mdfld_intel_crtc_set_gamma(dev, &gamma);
+			break;
+		default:
+			printk("invalied parameters\n");
+		}
+	}
+	return count;
+}
+
+static const struct file_operations psb_csc_proc_fops = {
+       .owner = THIS_MODULE,
+       .read = csc_control_read,
+       .write = csc_control_write,
+};
+
+static int psb_hdmi_proc_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int psb_hdmi_proc_close(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static struct file_operations psb_hdmi_proc_fops = {
+	.owner	= THIS_MODULE,
+	.open	= psb_hdmi_proc_open,
+	.read	= gpio_control_read,
+	.write	= gpio_control_write,
+	.release= psb_hdmi_proc_close,
+};
+
+static int psb_hdmi_proc_init(struct drm_minor *minor)
+{
+	struct proc_dir_entry *gpio_setting;
+
+	gpio_setting = proc_create_data(GPIO_PROC_ENTRY,
+				0644, minor->proc_root,
+				&psb_hdmi_proc_fops, minor);
+
+	if (!gpio_setting)
+		return -1;
+
+	return 0;
+}
+
+#endif /* CONFIG_SUPPORT_HDMI */
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	drm_put_dev(dev);
+}
+
+static void psb_shutdown(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *enc_funcs;
+
+	if (dev_priv->early_suspended)
+		return;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	/*
+	 * We borrow the early_suspended to avoid entering flip path after
+	 * shutdown is called
+	 */
+	dev_priv->early_suspended = true;
+
+	/* wait for the previous flip to be finished */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		enc_funcs = encoder->helper_private;
+		if (!drm_helper_encoder_in_use(encoder))
+			continue;
+		if (enc_funcs && enc_funcs->save)
+			enc_funcs->save(encoder);
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static int psb_proc_init(struct drm_minor *minor)
+{
+        struct proc_dir_entry *csc_setting;
+#ifdef CONFIG_SUPPORT_HDMI
+	psb_hdmi_proc_init(minor);
+#endif
+        csc_setting = proc_create_data(CSC_PROC_ENTRY, 0644, minor->proc_root, &psb_csc_proc_fops, minor);
+
+	return 0;
+}
+
+static void psb_proc_cleanup(struct drm_minor *minor)
+{
+#ifdef CONFIG_SUPPORT_HDMI
+	remove_proc_entry(GPIO_PROC_ENTRY, minor->proc_root);
+#endif
+	return;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+/*debugfs init entry*/
+static int psb_debugfs_init(struct drm_minor *minor)
+{
+	/*call original psb_proc_init*/
+	/*NOTE: why we are doing proc init here????*/
+	if (psb_proc_init(minor))
+		DRM_ERROR("psb_proc_init failed\n");
+
+	/*call debugfs init*/
+	return mdfld_debugfs_init(minor);
+}
+
+/*debugfs cleanup entry*/
+static void psb_debugfs_cleanup(struct drm_minor *minor)
+{
+	/*call original psb_proc_cleanup*/
+	/*NOTE: why we are doing proc cleanup here????*/
+	psb_proc_cleanup(minor);
+
+	/*call debugfs cleanup*/
+	mdfld_debugfs_cleanup(minor);
+}
+#endif
+static const struct dev_pm_ops psb_pm_ops = {
+	.runtime_suspend = rtpm_suspend,
+	.runtime_resume = rtpm_resume,
+	.runtime_idle = rtpm_idle,
+	.suspend_noirq = rtpm_suspend,
+	.resume_noirq = rtpm_resume,
+};
+
+static struct vm_operations_struct psb_ttm_vm_ops;
+
+/**
+ * NOTE: driver_private of drm_file is now a PVRSRV_FILE_PRIVATE_DATA struct
+ * pPriv in PVRSRV_FILE_PRIVATE_DATA contains the original psb_fpriv;
+ */
+int psb_open(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv;
+	struct drm_psb_private *dev_priv;
+	struct psb_fpriv *psb_fp;
+
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	ret = drm_open(inode, filp);
+	if (unlikely(ret))
+		return ret;
+
+	psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
+
+	if (unlikely(psb_fp == NULL))
+		goto out_err0;
+
+	file_priv = (struct drm_file *) filp->private_data;
+
+	/* In case that the local file priv has created a master,
+	 * which has been referenced, even if it's not authenticated
+	 * (non-root user). */
+	if ((file_priv->minor->master)
+		&& (file_priv->master == file_priv->minor->master)
+		&& (!file_priv->is_master))
+		file_priv->is_master = 1;
+
+	dev_priv = psb_priv(file_priv->minor->dev);
+
+	DRM_DEBUG("is_master %d\n", file_priv->is_master ? 1 : 0);
+
+	psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
+					     PSB_FILE_OBJECT_HASH_ORDER);
+	psb_fp->bcd_index = -1;
+	if (unlikely(psb_fp->tfile == NULL))
+		goto out_err1;
+
+	if (!file_priv->driver_priv) {
+		DRM_ERROR("drm file private is NULL\n");
+		goto out_err1;
+	}
+
+	BCVideoSetPriv(file_priv, psb_fp);
+
+	if (unlikely(dev_priv->bdev.dev_mapping == NULL))
+		dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
+
+	return 0;
+
+out_err1:
+	kfree(psb_fp);
+out_err0:
+	(void) drm_release(inode, filp);
+	return ret;
+}
+
+int psb_release(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv;
+	struct psb_fpriv *psb_fp;
+	struct drm_psb_private *dev_priv;
+	struct msvdx_private *msvdx_priv;
+	struct ttm_object_file *tfile;
+	struct psb_msvdx_ec_ctx *ec_ctx;
+	int ret, i;
+
+	file_priv = (struct drm_file *)filp->private_data;
+	psb_fp = BCVideoGetPriv(file_priv);
+	dev_priv = psb_priv(file_priv->minor->dev);
+	tfile = psb_fpriv(file_priv)->tfile;
+	msvdx_priv = (struct msvdx_private *)dev_priv->msvdx_private;
+
+	/* Set flag to clean-up platform IED state as
+		user space component might have died*/
+	mutex_lock(&g_ied_mutex);
+	for (i = 0; i < MAX_IED_SESSION; i++) {
+		if (g_ied_context[i] == file_priv->filp) {
+			g_ied_force_clean = true;
+			DRM_INFO("ied_ref: %d g_ied_context: %p\
+				ied_force_clean = TRUE\n",
+				g_ied_ref, g_ied_context[i]);
+			break;
+		}
+	}
+	mutex_unlock(&g_ied_mutex);
+
+#if 0
+	/*cleanup for msvdx */
+	if (msvdx_priv->tfile == BCVideoGetPriv(file_priv)->tfile) {
+		msvdx_priv->fw_status = 0;
+		msvdx_priv->host_be_opp_enabled = 0;
+		memset(&msvdx_priv->frame_info, 0,
+		       sizeof(struct drm_psb_msvdx_frame_info) *
+		       MAX_DECODE_BUFFERS);
+	}
+#endif
+
+	tng_topaz_handle_sigint(file_priv->minor->dev, filp);
+
+	BCVideoDestroyBuffers(psb_fp->bcd_index);
+
+	if (msvdx_priv->msvdx_ec_ctx[0] != NULL) {
+		for (i = 0; i < PSB_MAX_EC_INSTANCE; i++) {
+			if (msvdx_priv->msvdx_ec_ctx[i]->tfile == tfile)
+				break;
+		}
+
+		if (i < PSB_MAX_EC_INSTANCE) {
+			ec_ctx = msvdx_priv->msvdx_ec_ctx[i];
+			PSB_DEBUG_ENTRY("remove ec ctx with tfile %p\n",
+					ec_ctx->tfile);
+			ec_ctx->tfile = NULL;
+			ec_ctx->fence = PSB_MSVDX_INVALID_FENCE;
+		}
+	}
+
+	ttm_object_file_release(&psb_fp->tfile);
+	kfree(psb_fp);
+
+	/* remove video context */
+	/* psb_remove_videoctx(dev_priv, filp); */
+
+	ret = drm_release(inode, filp);
+
+	return ret;
+}
+
+/**
+ * if vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET call directly to PVRMMap
+ */
+int psb_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct drm_psb_private *dev_priv;
+	int ret;
+
+	if (vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET ||
+	    vma->vm_pgoff > 2 * DRM_PSB_FILE_PAGE_OFFSET)
+		return PVRSRVMMap(filp, vma);
+
+	file_priv = (struct drm_file *) filp->private_data;
+	dev_priv = psb_priv(file_priv->minor->dev);
+
+	ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
+		dev_priv->ttm_vm_ops = (struct vm_operations_struct *)vma->vm_ops;
+		psb_ttm_vm_ops = *vma->vm_ops;
+		psb_ttm_vm_ops.fault = &psb_ttm_fault;
+	}
+
+	vma->vm_ops = &psb_ttm_vm_ops;
+
+	return 0;
+}
+
+static const struct file_operations driver_psb_fops = {
+	.owner = THIS_MODULE,
+	.open = psb_open,
+	.release = psb_release,
+	.unlocked_ioctl = psb_unlocked_ioctl,
+	.mmap = psb_mmap,
+	.poll = psb_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+        .compat_ioctl = psb_compat_ioctl,
+#endif
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+	    DRIVER_IRQ_VBL | DRIVER_MODESET,
+	.load = psb_driver_load,
+	.unload = psb_driver_unload,
+
+	.ioctls = psb_ioctls,
+	.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+	.device_is_agp = psb_driver_device_is_agp,
+	.irq_preinstall = psb_irq_preinstall,
+	.irq_postinstall = psb_irq_postinstall,
+	.irq_uninstall = psb_irq_uninstall,
+	.irq_handler = psb_irq_handler,
+	.enable_vblank = psb_enable_vblank,
+	.disable_vblank = psb_disable_vblank,
+	.get_vblank_counter = psb_get_vblank_counter,
+	.get_vblank_timestamp = intel_get_vblank_timestamp,
+	.get_scanout_position = intel_get_crtc_scanoutpos,
+	.firstopen = NULL,
+	.lastclose = psb_lastclose,
+	.open = psb_driver_open,
+	.postclose = PVRSRVDrmPostClose,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = psb_debugfs_init,
+	.debugfs_cleanup = psb_debugfs_cleanup,
+#endif
+
+#if KEEP_UNUSED_CODE_DRIVER_DISPATCH
+/*
+*	.get_map_ofs = drm_core_get_map_ofs,
+*	.get_reg_ofs = drm_core_get_reg_ofs,
+*/
+#endif /* if KEEP_UNUSED_CODE_DRIVER_DISPATCH */
+	.preclose = psb_driver_preclose,
+	.fops = &driver_psb_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = PSB_DRM_DRIVER_DATE,
+	.major = PSB_DRM_DRIVER_MAJOR,
+	.minor = PSB_DRM_DRIVER_MINOR,
+	.patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = psb_probe,
+	.remove = psb_remove,
+#ifdef CONFIG_PM
+	.driver.pm = &psb_pm_ops,
+#endif
+	.shutdown = psb_shutdown
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	pdev->d3_delay = 0;
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+#ifndef MODULE
+static __init int parse_panelid(char *arg)
+{
+	/* panel ID can be passed in as a cmdline parameter */
+	/* to enable this feature add panelid=TMD to cmdline for TMD panel */
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "TMD_CMD"))
+		PanelID = TMD_CMD;
+	else if (!strcasecmp(arg, "TPO_CMD"))
+		PanelID = TPO_CMD;
+	else if (!strcasecmp(arg, "PYR_CMD"))
+		PanelID = PYR_CMD;
+	else if (!strcasecmp(arg, "TMD_VID"))
+		PanelID = TMD_VID;
+	else if (!strcasecmp(arg, "TPO_VID"))
+		PanelID = TPO_VID;
+	else if (!strcasecmp(arg, "PYR_VID"))
+		PanelID = PYR_VID;
+	else
+		PanelID = GCT_DETECT;
+
+	return 0;
+}
+
+early_param("panelid", parse_panelid);
+#endif
+
+#ifndef MODULE
+static __init int parse_hdmi_edid(char *arg)
+{
+	/* HDMI EDID info can be passed in as a cmdline parameter,
+	 * and need to remove it after we can get EDID info via MSIC.*/
+	if ((!arg) || (strlen(arg) >= 20))
+		return -EINVAL;
+
+	strncpy(HDMI_EDID, arg, sizeof(HDMI_EDID));
+
+	return 0;
+}
+
+early_param("hdmi_edid", parse_hdmi_edid);
+#endif
+
+static int __init psb_init(void)
+{
+	int ret;
+
+#if defined(MODULE) && defined(CONFIG_NET)
+	psb_kobject_uevent_init();
+#endif
+
+	PVRSRVQueryIoctls(psb_ioctls);
+
+	BCVideoQueryIoctls(psb_ioctls);
+
+	ret = drm_pci_init(&driver, &psb_pci_driver);
+	if (ret != 0) {
+		DRM_ERROR("drm_init fail!\n");
+		return ret;
+	}
+#ifdef WANT_GFX
+	/*init for bc_video */
+	ret = BCVideoModInit();
+	if (ret != 0)
+		return ret;
+#endif
+
+#ifdef CONFIG_SUPPORT_HDMI
+	if (otm_hdmi_hpd_init() == OTM_HDMI_SUCCESS)
+		DRM_INFO("OTM_HDMI_INIT_SUCCESS\n");
+	else
+		DRM_INFO("OTM_HDMI_INIT_FAILE\n");
+#endif
+
+	return ret;
+}
+
+static void __exit psb_exit(void)
+{
+	int ret;
+	/*cleanup for bc_video */
+	ret = BCVideoModCleanup();
+	if (ret != 0)
+		return;
+	drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+#ifdef CONFIG_COMPAT
+
+#define PVR_DRM_SRVKM_CMD       DRM_PVR_RESERVED1
+#define PVR_DRM_IS_MASTER_CMD   DRM_PVR_RESERVED4
+#define PVR_DRM_DBGDRV_CMD      DRM_PVR_RESERVED6
+
+#define PVR_DRM_SRVKM_IOCTL \
+	DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
+
+#define PVR_DRM_IS_MASTER_IOCTL \
+	DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
+
+typedef struct drm_psb_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or agp */
+} drm_psb_mem_alloc32_t;
+
+typedef struct drm_psb_mem_alloc {
+	int region;
+	int alignment;
+	int size;
+	int __user *region_offset;	/* offset from start of fb or agp */
+} drm_psb_mem_alloc_t;
+
+typedef struct pvrsrv_bridge_package_32
+{
+	u32	ui32BridgeID;		/*!< ioctl/drvesc index */
+	u32	ui32FunctionID;			/*! bridge function ID */
+	u32	ui32Size;			/*!< size of structure */
+	u32	pvParamIn;			/*!< input data buffer */
+	u32	ui32InBufferSize;		/*!< size of input data buf */
+	u32	pvParamOut;			/*!< output data buffer */
+	u32	ui32OutBufferSize;		/*!< size of output data buf */
+} pvrsrv_bridge_package_32_t;
+
+int compat_PVRSRV_BridgeDispatchKM2(struct file *filp, unsigned int cmd,
+					unsigned long arg)
+{
+	int retval;
+	pvrsrv_bridge_package_32_t req32;
+	PVRSRV_BRIDGE_PACKAGE __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+		|| __put_user(req32.ui32BridgeID, &request->ui32BridgeID)
+		|| __put_user(req32.ui32FunctionID, &request->ui32FunctionID)
+		|| __put_user(req32.ui32Size, &request->ui32Size)
+		|| __put_user((void __user *)(unsigned long)req32.pvParamIn, &request->pvParamIn)
+		|| __put_user(req32.ui32InBufferSize, &request->ui32InBufferSize)
+		|| __put_user((void __user *)(unsigned long)req32.pvParamOut, &request->pvParamOut)
+		|| __put_user(req32.ui32OutBufferSize, &request->ui32OutBufferSize)) {
+		printk(KERN_ERR "%s: __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	/* Correct cmd with the proper size */
+	cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+	cmd |= (sizeof(*request) << _IOC_SIZESHIFT);
+
+	retval = drm_ioctl(filp, cmd, (unsigned long)request);
+	return retval;
+}
+
+struct drm_psb_register_rw_arg_32 {
+	uint32_t b_force_hw_on;
+	uint32_t display_read_mask;
+	uint32_t display_write_mask;
+	struct {
+		uint32_t pfit_controls;
+		uint32_t pfit_autoscale_ratios;
+		uint32_t pfit_programmed_scale_ratios;
+		uint32_t pipeasrc;
+		uint32_t pipebsrc;
+		uint32_t vtotal_a;
+		uint32_t vtotal_b;
+		uint32_t dspcntr_a;
+		uint32_t dspcntr_b;
+		uint32_t pipestat_a;
+		uint32_t int_mask;
+		uint32_t int_enable;
+	} display;
+	uint32_t overlay_read_mask;
+	uint32_t overlay_write_mask;
+	struct {
+		uint32_t OVADD;
+		uint32_t OGAMC0;
+		uint32_t OGAMC1;
+		uint32_t OGAMC2;
+		uint32_t OGAMC3;
+		uint32_t OGAMC4;
+		uint32_t OGAMC5;
+		uint32_t IEP_ENABLED;
+		uint32_t IEP_BLE_MINMAX;
+		uint32_t IEP_BSSCC_CONTROL;
+		uint32_t index;
+		uint32_t b_wait_vblank;
+		uint32_t b_wms;
+		uint32_t buffer_handle;
+	} overlay;
+	uint32_t vsync_operation_mask;
+	struct {
+		uint32_t pipe;
+		int vsync_pipe;
+		int vsync_count;
+		uint64_t timestamp  __attribute__ ((__packed__));
+	} vsync;
+	uint32_t sprite_enable_mask;
+	uint32_t sprite_disable_mask;
+	struct {
+		uint32_t dspa_control;
+		uint32_t dspa_key_value;
+		uint32_t dspa_key_mask;
+		uint32_t dspc_control;
+		uint32_t dspc_stride;
+		uint32_t dspc_position;
+		uint32_t dspc_linear_offset;
+		uint32_t dspc_size;
+		uint32_t dspc_surface;
+	} sprite;
+	uint32_t subpicture_enable_mask;
+	uint32_t subpicture_disable_mask;
+	struct {
+		uint32_t CursorADDR;
+		uint32_t xPos;
+		uint32_t yPos;
+		uint32_t CursorSize;
+	} cursor;
+	uint32_t cursor_enable_mask;
+	uint32_t cursor_disable_mask;
+	uint32_t plane_enable_mask;
+	uint32_t plane_disable_mask;
+	uint32_t get_plane_state_mask;
+	struct {
+		uint32_t type;
+		uint32_t index;
+		uint32_t ctx;
+	} plane;
+}__attribute__ ((__packed__));
+
+int compat_PVRSRV_BridgeDispatchKM3(struct file *filp, unsigned int cmd,
+					unsigned long arg)
+{
+	int retval;
+	struct drm_psb_register_rw_arg_32 req32;
+	struct drm_psb_register_rw_arg __user *request;
+	struct drm_psb_register_rw_arg returnBuffer;
+	struct drm_psb_register_rw_arg_32 __user *p_buf =
+		(struct drm_psb_register_rw_arg_32 __user *)((void __user *)arg);
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+		|| __put_user(req32.b_force_hw_on, &request->b_force_hw_on)
+		|| __put_user(req32.display_read_mask, &request->display_read_mask)
+		|| __put_user(req32.display_write_mask, &request->display_write_mask)
+		|| __put_user(req32.display.pfit_controls, &request->display.pfit_controls)
+		|| __put_user(req32.display.pfit_autoscale_ratios, &request->display.pfit_autoscale_ratios)
+		|| __put_user(req32.display.pfit_programmed_scale_ratios, &request->display.pfit_programmed_scale_ratios)
+		|| __put_user(req32.display.pipeasrc, &request->display.pipeasrc)
+		|| __put_user(req32.display.pipebsrc, &request->display.pipebsrc)
+		|| __put_user(req32.display.vtotal_a, &request->display.vtotal_a)
+		|| __put_user(req32.display.vtotal_b, &request->display.vtotal_b)
+		|| __put_user(req32.display.dspcntr_a, &request->display.dspcntr_a)
+		|| __put_user(req32.display.dspcntr_b, &request->display.dspcntr_b)
+		|| __put_user(req32.display.pipestat_a, &request->display.pipestat_a)
+		|| __put_user(req32.display.int_mask, &request->display.int_mask)
+		|| __put_user(req32.display.int_enable, &request->display.int_enable)
+		|| __put_user(req32.overlay_read_mask, &request->overlay_read_mask)
+		|| __put_user(req32.overlay_write_mask, &request->overlay_write_mask)
+		|| __put_user(req32.overlay.OVADD, &request->overlay.OVADD)
+		|| __put_user(req32.overlay.OGAMC0, &request->overlay.OGAMC0)
+		|| __put_user(req32.overlay.OGAMC1, &request->overlay.OGAMC1)
+		|| __put_user(req32.overlay.OGAMC2, &request->overlay.OGAMC2)
+		|| __put_user(req32.overlay.OGAMC3, &request->overlay.OGAMC3)
+		|| __put_user(req32.overlay.OGAMC4, &request->overlay.OGAMC4)
+		|| __put_user(req32.overlay.OGAMC5, &request->overlay.OGAMC5)
+		|| __put_user(req32.overlay.IEP_ENABLED, &request->overlay.IEP_ENABLED)
+		|| __put_user(req32.overlay.IEP_BLE_MINMAX, &request->overlay.IEP_BLE_MINMAX)
+		|| __put_user(req32.overlay.IEP_BSSCC_CONTROL, &request->overlay.IEP_BSSCC_CONTROL)
+		|| __put_user(req32.overlay.index, &request->overlay.index)
+		|| __put_user(req32.overlay.b_wait_vblank, &request->overlay.b_wait_vblank)
+		|| __put_user(req32.overlay.b_wms, &request->overlay.b_wms)
+		|| __put_user(req32.overlay.buffer_handle, &request->overlay.buffer_handle)
+		|| __put_user(req32.vsync_operation_mask, &request->vsync_operation_mask)
+		|| __put_user(req32.vsync.pipe, &request->vsync.pipe)
+		|| __put_user(req32.vsync.vsync_pipe, &request->vsync.vsync_pipe)
+		|| __put_user(req32.vsync.vsync_count, &request->vsync.vsync_count)
+		|| __put_user(req32.vsync.timestamp, &request->vsync.timestamp)
+		|| __put_user(req32.sprite_enable_mask, &request->sprite_enable_mask)
+		|| __put_user(req32.sprite_disable_mask, &request->sprite_disable_mask)
+		|| __put_user(req32.sprite.dspa_control, &request->sprite.dspa_control)
+		|| __put_user(req32.sprite.dspa_key_value, &request->sprite.dspa_key_value)
+		|| __put_user(req32.sprite.dspa_key_mask, &request->sprite.dspa_key_mask)
+		|| __put_user(req32.sprite.dspc_control, &request->sprite.dspc_control)
+		|| __put_user(req32.sprite.dspc_stride, &request->sprite.dspc_stride)
+		|| __put_user(req32.sprite.dspc_position, &request->sprite.dspc_position)
+		|| __put_user(req32.sprite.dspc_linear_offset, &request->sprite.dspc_linear_offset)
+		|| __put_user(req32.sprite.dspc_size, &request->sprite.dspc_size)
+		|| __put_user(req32.sprite.dspc_surface, &request->sprite.dspc_surface)
+		|| __put_user(req32.subpicture_enable_mask, &request->subpicture_enable_mask)
+		|| __put_user(req32.subpicture_disable_mask, &request->subpicture_disable_mask)
+		|| __put_user(req32.cursor.CursorADDR, &request->cursor.CursorADDR)
+		|| __put_user(req32.cursor.xPos, &request->cursor.xPos)
+		|| __put_user(req32.cursor.yPos, &request->cursor.yPos)
+		|| __put_user(req32.cursor.CursorSize, &request->cursor.CursorSize)
+		|| __put_user(req32.cursor_enable_mask, &request->cursor_enable_mask)
+		|| __put_user(req32.cursor_disable_mask, &request->cursor_disable_mask)
+		|| __put_user(req32.plane_enable_mask, &request->plane_enable_mask)
+		|| __put_user(req32.plane_disable_mask, &request->plane_disable_mask)
+		|| __put_user(req32.get_plane_state_mask, &request->get_plane_state_mask)
+		|| __put_user(req32.plane.type, &request->plane.type)
+		|| __put_user(req32.plane.index, &request->plane.index)
+		|| __put_user(req32.plane.ctx, &request->plane.ctx)) {
+		printk(KERN_ERR "%s: __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	/* Correct cmd with the proper size */
+	cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+	cmd |= (sizeof(*request) << _IOC_SIZESHIFT);
+
+	retval = drm_ioctl(filp, cmd, (unsigned long)request);
+	if (copy_from_user(&returnBuffer, (void __user *)request, sizeof(returnBuffer))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*p_buf))
+		|| __put_user(returnBuffer.b_force_hw_on, &p_buf->b_force_hw_on)
+		|| __put_user(returnBuffer.display_read_mask, &p_buf->display_read_mask)
+		|| __put_user(returnBuffer.display_write_mask, &p_buf->display_write_mask)
+		|| __put_user(returnBuffer.display.pfit_controls, &p_buf->display.pfit_controls)
+		|| __put_user(returnBuffer.display.pfit_autoscale_ratios, &p_buf->display.pfit_autoscale_ratios)
+		|| __put_user(returnBuffer.display.pfit_programmed_scale_ratios, &p_buf->display.pfit_programmed_scale_ratios)
+		|| __put_user(returnBuffer.display.pipeasrc, &p_buf->display.pipeasrc)
+		|| __put_user(returnBuffer.display.pipebsrc, &p_buf->display.pipebsrc)
+		|| __put_user(returnBuffer.display.vtotal_a, &p_buf->display.vtotal_a)
+		|| __put_user(returnBuffer.display.vtotal_b, &p_buf->display.vtotal_b)
+		|| __put_user(returnBuffer.display.dspcntr_a, &p_buf->display.dspcntr_a)
+		|| __put_user(returnBuffer.display.dspcntr_b, &p_buf->display.dspcntr_b)
+		|| __put_user(returnBuffer.display.pipestat_a, &p_buf->display.pipestat_a)
+		|| __put_user(returnBuffer.display.int_mask, &p_buf->display.int_mask)
+		|| __put_user(returnBuffer.display.int_enable, &p_buf->display.int_enable)
+		|| __put_user(returnBuffer.overlay_read_mask, &p_buf-> overlay_read_mask)
+		|| __put_user(returnBuffer.overlay_write_mask, &p_buf->overlay_write_mask)
+		|| __put_user(returnBuffer.overlay.OVADD, &p_buf->overlay.OVADD)
+		|| __put_user(returnBuffer.overlay.OGAMC0, &p_buf->overlay.OGAMC0)
+		|| __put_user(returnBuffer.overlay.OGAMC1, &p_buf->overlay.OGAMC1)
+		|| __put_user(returnBuffer.overlay.OGAMC2, &p_buf->overlay.OGAMC2)
+		|| __put_user(returnBuffer.overlay.OGAMC3, &p_buf->overlay.OGAMC3)
+		|| __put_user(returnBuffer.overlay.OGAMC4, &p_buf->overlay.OGAMC4)
+		|| __put_user(returnBuffer.overlay.OGAMC5, &p_buf->overlay.OGAMC5)
+		|| __put_user(returnBuffer.overlay.IEP_ENABLED, &p_buf->overlay.IEP_ENABLED)
+		|| __put_user(returnBuffer.overlay.IEP_BLE_MINMAX, &p_buf->overlay.IEP_BLE_MINMAX)
+		|| __put_user(returnBuffer.overlay.IEP_BSSCC_CONTROL, &p_buf->overlay.IEP_BSSCC_CONTROL)
+		|| __put_user(returnBuffer.overlay.index, &p_buf->overlay.index)
+		|| __put_user(returnBuffer.overlay.b_wait_vblank, &p_buf->overlay.b_wait_vblank)
+		|| __put_user(returnBuffer.overlay.b_wms, &p_buf-> overlay.b_wms)
+		|| __put_user(returnBuffer.overlay.buffer_handle, &p_buf->overlay.buffer_handle)
+		|| __put_user(returnBuffer.vsync_operation_mask, &p_buf->vsync_operation_mask)
+		|| __put_user(returnBuffer.vsync.pipe, &p_buf->vsync.pipe)
+		|| __put_user(returnBuffer.vsync.vsync_pipe, &p_buf->vsync.vsync_pipe)
+		|| __put_user(returnBuffer.vsync.vsync_count, &p_buf->vsync.vsync_count)
+		|| __put_user(returnBuffer.vsync.timestamp, &p_buf->vsync.timestamp)
+		|| __put_user(returnBuffer.sprite_enable_mask, &p_buf->sprite_enable_mask)
+		|| __put_user(returnBuffer.sprite_disable_mask, &p_buf->sprite_disable_mask)
+		|| __put_user(returnBuffer.sprite.dspa_control, &p_buf->sprite.dspa_control)
+		|| __put_user(returnBuffer.sprite.dspa_key_value, &p_buf->sprite.dspa_key_value)
+		|| __put_user(returnBuffer.sprite.dspa_key_mask, &p_buf->sprite.dspa_key_mask)
+		|| __put_user(returnBuffer.sprite.dspc_control, &p_buf->sprite.dspc_control)
+		|| __put_user(returnBuffer.sprite.dspc_stride, &p_buf->sprite.dspc_stride)
+		|| __put_user(returnBuffer.sprite.dspc_position, &p_buf->sprite.dspc_position)
+		|| __put_user(returnBuffer.sprite.dspc_linear_offset, &p_buf->sprite.dspc_linear_offset)
+		|| __put_user(returnBuffer.sprite.dspc_size, &p_buf->sprite.dspc_size)
+		|| __put_user(returnBuffer.sprite.dspc_surface, &p_buf->sprite.dspc_surface)
+		|| __put_user(returnBuffer.subpicture_enable_mask, &p_buf->subpicture_enable_mask)
+		|| __put_user(returnBuffer.subpicture_disable_mask, &p_buf->subpicture_disable_mask)
+		|| __put_user(returnBuffer.cursor.CursorADDR, &p_buf->cursor.CursorADDR)
+		|| __put_user(returnBuffer.cursor.xPos, &p_buf->cursor.xPos)
+		|| __put_user(returnBuffer.cursor.yPos, &p_buf->cursor.yPos)
+		|| __put_user(returnBuffer.cursor.CursorSize, &p_buf->cursor.CursorSize)
+		|| __put_user(returnBuffer.cursor_enable_mask, &p_buf->cursor_enable_mask)
+		|| __put_user(returnBuffer.cursor_disable_mask, &p_buf->cursor_disable_mask)
+		|| __put_user(returnBuffer.plane_enable_mask, &p_buf->plane_enable_mask)
+		|| __put_user(returnBuffer.plane_disable_mask, &p_buf->plane_disable_mask)
+		|| __put_user(returnBuffer.plane.type, &p_buf->plane.type)
+		|| __put_user(returnBuffer.plane.index, &p_buf->plane.index)
+		|| __put_user(returnBuffer.plane.ctx, &p_buf->plane.ctx)) {
+		printk(KERN_ERR "%s: __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	return retval;
+}
+
+struct drm_psb_vsync_set_arg_32 {
+	uint32_t vsync_operation_mask;
+	struct {
+		uint32_t pipe;
+		int vsync_pipe;
+		int vsync_count;
+		uint64_t timestamp  __attribute__ ((__packed__));
+	} vsync;
+} __attribute__ ((__packed__));
+
+int compat_PVRSRV_BridgeDispatchKM4(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval;
+	struct drm_psb_vsync_set_arg_32 req32;
+	struct drm_psb_vsync_set_arg __user *request;
+	struct drm_psb_vsync_set_arg returnBuffer;
+	struct drm_psb_vsync_set_arg_32 __user *p_buf =
+		(struct drm_psb_vsync_set_arg_32 __user *)((void __user *)arg);
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+		|| __put_user(req32.vsync_operation_mask, &request->vsync_operation_mask)
+		|| __put_user(req32.vsync.pipe, &request->vsync.pipe)
+		|| __put_user(req32.vsync.vsync_pipe, &request->vsync.vsync_pipe)
+		|| __put_user(req32.vsync.vsync_count, &request->vsync.vsync_count)
+		|| __put_user(req32.vsync.timestamp, &request->vsync.timestamp)) {
+		printk(KERN_ERR "%s __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	/* Correct cmd with the proper size */
+	cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+	cmd |= (sizeof(*request) << _IOC_SIZESHIFT);
+
+	retval = drm_ioctl(filp, cmd, (unsigned long)request);
+	if (copy_from_user(&returnBuffer, (void __user *)request, sizeof(returnBuffer))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*p_buf))
+		|| __put_user(returnBuffer.vsync_operation_mask, &p_buf->vsync_operation_mask)
+		|| __put_user(returnBuffer.vsync.pipe, &p_buf->vsync.pipe)
+		|| __put_user(returnBuffer.vsync.vsync_pipe, &p_buf->vsync.vsync_pipe)
+		|| __put_user(returnBuffer.vsync.vsync_count, &p_buf->vsync.vsync_count)
+		|| __put_user(returnBuffer.vsync.timestamp, &p_buf->vsync.timestamp)){
+		printk(KERN_ERR "%s: __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	return retval;
+}
+
+struct psb_gtt_mapping_arg_32 {
+	uint32_t type;
+	uint32_t hKernelMemInfo; /* void *hKernelMemInfo; */
+	uint32_t offset_pages;
+	uint32_t page_align;
+	uint32_t bcd_device_id;
+	uint32_t bcd_buffer_id;
+	uint32_t bcd_buffer_count;
+	uint32_t bcd_buffer_stride;
+	uint32_t vaddr;
+	uint32_t size;
+};
+
+int compat_PVRSRV_BridgeDispatchKM5(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval;
+	struct psb_gtt_mapping_arg_32 req32;
+	struct psb_gtt_mapping_arg __user *request;
+	struct psb_gtt_mapping_arg returnBuffer;
+	struct psb_gtt_mapping_arg_32 __user *p_buf = (struct psb_gtt_mapping_arg_32 __user *)((void __user *)arg);
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+		|| __put_user(req32.type, &request->type)
+		|| __put_user((void *)(unsigned long)req32.hKernelMemInfo, &request->hKernelMemInfo)
+		|| __put_user(req32.offset_pages, &request->offset_pages)
+		|| __put_user(req32.page_align, &request->page_align)
+		|| __put_user(req32.bcd_device_id, &request->bcd_device_id)
+		|| __put_user(req32.bcd_buffer_id, &request->bcd_buffer_id)
+		|| __put_user(req32.bcd_buffer_count, &request->bcd_buffer_count)
+		|| __put_user(req32.bcd_buffer_stride, &request->bcd_buffer_stride)
+		|| __put_user(req32.vaddr, &request->vaddr)
+		|| __put_user(req32.size, &request->size)) {
+		printk(KERN_ERR "%s: __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	/* Correct cmd with the proper size */
+	cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+	cmd |= (sizeof(*request) << _IOC_SIZESHIFT);
+
+	retval = drm_ioctl(filp, cmd, (unsigned long)request);
+	if (copy_from_user(&returnBuffer, (void __user *)request, sizeof(returnBuffer))) {
+		printk(KERN_ERR "%s: copy_from_user failed\n", __func__);
+		return -EFAULT;
+        }
+
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*p_buf))
+		|| __put_user(returnBuffer.type, &p_buf->type)
+		|| __put_user((uint32_t)(unsigned long)returnBuffer.hKernelMemInfo, &p_buf->hKernelMemInfo)
+		|| __put_user(returnBuffer.offset_pages, &p_buf->offset_pages)
+		|| __put_user(returnBuffer.page_align, &p_buf->page_align)
+		|| __put_user(returnBuffer.bcd_device_id, &p_buf->bcd_device_id)
+		|| __put_user(returnBuffer.bcd_buffer_id, &p_buf->bcd_buffer_id)
+		|| __put_user(returnBuffer.bcd_buffer_count, &p_buf->bcd_buffer_count)
+		|| __put_user(returnBuffer.bcd_buffer_stride, &p_buf->bcd_buffer_stride)
+		|| __put_user(returnBuffer.vaddr, &p_buf->vaddr)
+		|| __put_user(returnBuffer.size, &p_buf->size)) {
+		printk(KERN_ERR "%s: __put_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	return retval;
+}
+
+static drm_ioctl_compat_t *psb_compat_ioctls[] = {
+	[PVR_DRM_SRVKM_CMD] = compat_PVRSRV_BridgeDispatchKM2,
+	[DRM_PSB_REGISTER_RW] = compat_PVRSRV_BridgeDispatchKM3,
+	[DRM_PSB_VSYNC_SET] = compat_PVRSRV_BridgeDispatchKM4,
+	[DRM_PSB_GTT_MAP] = compat_PVRSRV_BridgeDispatchKM5,
+	[DRM_PSB_GTT_UNMAP] = compat_PVRSRV_BridgeDispatchKM5,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+static long psb_compat_ioctl(struct file *filp, unsigned int cmd,
+			       unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	long ret;
+
+	/*
+	 * The driver private ioctls and TTM ioctls should be
+	 * thread-safe.
+	 */
+
+	if (nr < DRM_COMMAND_BASE) {
+		ret = drm_compat_ioctl(filp, cmd, arg);
+		goto out;
+	}
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(psb_compat_ioctls))
+		fn = psb_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	if (fn != NULL) {
+		ret = (*fn) (filp, cmd, arg);
+	} else {
+		ret = drm_ioctl(filp, cmd, arg);
+	}
+out:
+	return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_SUPPORT_TMD_MIPI_600X1024_DISPLAY
+module_init(psb_init);
+#else
+late_initcall(psb_init);
+#endif
+module_exit(psb_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_drv.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_drv.h
new file mode 100644
index 0000000..8517f21
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_drv.h
@@ -0,0 +1,1455 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/version.h>
+#include <linux/panel_psb_drv.h>
+
+#include <drm/drmP.h>
+#include "psb_drm.h"
+#include "psb_reg.h"
+
+#include "psb_intel_drv.h"
+#include "psb_hotplug.h"
+#ifdef CONFIG_SUPPORT_MIPI
+#include "psb_dpst.h"
+#endif
+#include "psb_gtt.h"
+#include "ospm/pwr_mgmt.h"
+#include "ttm/ttm_object.h"
+#include "psb_ttm_fence_driver.h"
+#include "psb_ttm_userobj_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_lock.h"
+#include "psb_video_drv.h"
+#include "mdfld_hdmi_audio_if.h"
+
+#include "bufferclass_interface.h"
+#include "pvrsrv_interface.h"
+#include "displayclass_interface.h"
+#include "display_callbacks.h"
+#include <linux/wakelock.h>
+#include <linux/pm_qos.h>
+
+/*Append new drm mode definition here, align with libdrm definition*/
+#define DRM_MODE_SCALE_NO_SCALE   4
+
+extern int drm_psb_debug;
+extern int drm_psb_dsr;
+extern int drm_psb_enable_pr2_cabc;
+extern int gfxrtdelay;
+extern int drm_psb_te_timer_delay;
+extern int drm_psb_enable_gamma;
+extern int drm_psb_enable_color_conversion;
+extern int drm_psb_set_gamma_success;
+extern int drm_psb_set_gamma_pending;
+extern int drm_psb_set_gamma_pipe;
+extern int gamma_setting_save[256];
+extern int csc_setting_save[6];
+extern u32 DISP_PLANEB_STATUS;
+
+extern struct ttm_bo_driver psb_ttm_bo_driver;
+
+enum {
+	CHIP_PSB_8108 = 0,
+	CHIP_PSB_8109 = 1,
+	CHIP_MRST_4100 = 2,
+	CHIP_MDFLD_0130 = 3,
+	CHIP_MRFLD_1180 = 4,
+	CHIP_MRFLD_1480 = 5
+};
+
+enum enum_ports {
+	PORT_A = 0,
+	PORT_C = 1
+} ;
+
+#define PCI_ID_TOPAZ_DISABLED 0x4101
+
+/*
+ *Hardware bugfixes
+ */
+
+#define FIX_TG_16
+#define FIX_TG_2D_CLOCKGATE
+#define OSPM_STAT
+
+#define DRIVER_NAME "pvrsrvkm"
+#define DRIVER_DESC "drm driver for the Intel GMA500"
+#define DRIVER_AUTHOR "Intel Corporation"
+#define OSPM_PROC_ENTRY "ospm"
+#define RTPM_PROC_ENTRY "rtpm"
+#define DSR_PROC_ENTRY "dsr"
+#define GFXPM_PROC_ENTRY "mrfld_gfx_pm"
+#define VSPPM_PROC_ENTRY "mrfld_vsp_pm"
+#define VEDPM_PROC_ENTRY "mrfld_ved_pm"
+#define VECPM_PROC_ENTRY "mrfld_vec_pm"
+#define BLC_PROC_ENTRY "mrst_blc"
+#define DISPLAY_PROC_ENTRY "display_status"
+#define PANEL_PROC_ENTRY "panel_status"
+#define HDMI_PROC_ENTRY "hdmi_power"
+#define GPIO_PROC_ENTRY "hdmi_gpio_control"
+#define CSC_PROC_ENTRY "csc_control"
+
+
+#define PSB_DRM_DRIVER_DATE "2009-03-10"
+#define PSB_DRM_DRIVER_MAJOR 8
+#define PSB_DRM_DRIVER_MINOR 1
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+#define PSB_VDC_OFFSET		 0x00000000
+#define PSB_VDC_SIZE		 0x00080000
+#define PSB_MMIO_RESOURCE	 0
+#define PSB_GATT_RESOURCE	 2
+#define PSB_GTT_RESOURCE	 3
+#define PSB_GMCH_CTRL		 0x52
+#define PSB_BSM			 0x5C
+
+#define RGX_OFFSET		 0x00100000	/* GFX-IP IOSF-SB */
+#define RGX_SIZE		 0x00010000
+
+#define GFX_WRAPPER_OFFSET	 0x00160000	/* GFX Wrapper */
+#define GFX_WRAPPER_SIZE	 0x00001000
+
+#define VED_WRAPPER_OFFSET	 0x00161000	/* VED Wrapper */
+#define VED_WRAPPER_SIZE	 0x00001000
+
+#define VEC_WRAPPER_OFFSET	 0x00162000	/* VEC Wrapper */
+#define VEC_WRAPPER_SIZE	 0x00001000
+
+#define MRFLD_MSAC		 0x60
+#define _APERTURE_SIZE_POS	 28
+#define _APERTURE_SIZE_MASK	 ((1<<28) | (1<<29))
+#define _1G_APERTURE_SIZE	 (1<<30)
+#define _512M_APERTURE_SIZE      (1<<29)
+#define _256M_APERTURE_SIZE      (1<<28)
+#define _1G_APERTURE 		 0x0
+#define _512M_APERTURE 		 0x1
+#define _256M_APERTURE 		 0x2
+#define MRFLD_BGSM		 0x70
+#define _PSB_GMCH_ENABLED	 0x4
+#define PSB_PGETBL_CTL		 0x2020
+#define _PSB_PGETBL_ENABLED	 0x00000001
+#define PSB_SGX_2D_SLAVE_PORT	 0x4000
+
+/*
+ *Flags for external memory type field.
+ */
+#define CONFIG_VIDEO_MRFLD  1
+#define CONFIG_VIDEO_MRFLD_VP  1
+
+#ifdef CONFIG_VIDEO_MRFLD
+#define MRST_MSVDX_OFFSET	0x120000	/*MSVDX Base offset */
+#else
+#define MRST_MSVDX_OFFSET	0x90000	/*MSVDX Base offset */
+#endif
+#define PSB_MSVDX_OFFSET	0x50000	/*MSVDX Base offset */
+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
+#define PSB_MSVDX_SIZE		0x10000
+
+#define PSB_IED_DRM_CNTL_STATUS		0x2208
+
+#define TNG_VSP_OFFSET		0x800000
+#define TNG_VSP_SIZE		0x400000
+
+#define LNC_TOPAZ_OFFSET	0xA0000
+#define PNW_TOPAZ_OFFSET	0xC0000
+#define TNG_TOPAZ_OFFSET	0x140000
+#define LNC_TOPAZ_SIZE		0x10000
+#define PNW_TOPAZ_SIZE		0x30000	/* PNW VXE285 has two cores */
+#define TNG_TOPAZ_SIZE		0x20000
+#define PSB_MMU_CACHED_MEMORY	  0x0001	/* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY	  0x0002	/* MMU RO memory */
+#define PSB_MMU_WO_MEMORY	  0x0004	/* MMU WO memory */
+
+/*
+ *PTE's and PDE's
+ */
+
+#define PSB_PDE_MASK		  0x003FFFFF
+#define PSB_PDE_SHIFT		  22
+#define PSB_PTE_SHIFT		  12
+
+#define PSB_PTE_VALID		  0x0001	/* PTE / PDE valid */
+#define PSB_PTE_WO		  0x0002	/* Write only */
+#define PSB_PTE_RO		  0x0004	/* Read only */
+#define PSB_PTE_CACHED		  0x0008	/* CPU cache coherent */
+
+/*
+ * VSP PTE's and PDE's
+ */
+
+#define VSP_PDE_MASK		  0x003FFFFF
+#define VSP_PDE_SHIFT		  24
+#define VSP_PTE_SHIFT		  8
+
+/* PTE / PDE valid */
+#define VSP_PTE_VALID		  (0x1 << VSP_PDE_SHIFT)
+
+/*
+ *VDC registers and bits
+ */
+#define PSB_MSVDX_CLOCKGATING	  0x2064
+#define PSB_TOPAZ_CLOCKGATING	  0x2068
+#define PSB_HWSTAM		  0x2098
+#define PSB_INSTPM		  0x20C0
+#define PSB_INT_IDENTITY_R        0x20A4
+#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
+#define _PSB_DPST_PIPEB_FLAG      (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG	  (1<<5)
+#define _PSB_DPST_PIPEA_FLAG      (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG	  (1<<7)
+#define _MDFLD_MIPIA_FLAG	  (1<<16)
+#define _MDFLD_MIPIC_FLAG	  (1<<17)
+#define _PSB_IRQ_SGX_FLAG	  (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG	  (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG	  (1<<20)
+#define _TNG_IRQ_VSP_FLAG	  (1<<21)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | _MDFLD_PIPEB_EVENT_FLAG | \
+        _PSB_PIPEA_EVENT_FLAG | _PSB_VSYNC_PIPEA_FLAG | _MDFLD_MIPIA_FLAG | _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R	  0x20A4
+#define PSB_INT_MASK_R		  0x20A8
+#define PSB_INT_ENABLE_R	  0x20A0
+
+#define _PSB_MMU_ER_MASK      0x0001FF00
+#define _PSB_MMU_ER_HOST      (1 << 16)
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+#define GPIO_CLOCK_DIR_MASK		(1 << 0)
+#define GPIO_CLOCK_DIR_IN		(0 << 1)
+#define GPIO_CLOCK_DIR_OUT		(1 << 1)
+#define GPIO_CLOCK_VAL_MASK		(1 << 2)
+#define GPIO_CLOCK_VAL_OUT		(1 << 3)
+#define GPIO_CLOCK_VAL_IN		(1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+#define GPIO_DATA_DIR_MASK		(1 << 8)
+#define GPIO_DATA_DIR_IN		(0 << 9)
+#define GPIO_DATA_DIR_OUT		(1 << 9)
+#define GPIO_DATA_VAL_MASK		(1 << 10)
+#define GPIO_DATA_VAL_OUT		(1 << 11)
+#define GPIO_DATA_VAL_IN		(1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV	    0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST	       1
+#define PSB_UIRQ_OOM_REPLY	       2
+#define PSB_UIRQ_FIRE_TA_REPLY	       3
+#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_A0 0x00
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0 	BIT0
+#define MDFLD_DSR_2D_3D_2 	BIT1
+#define MDFLD_DSR_CURSOR_0 	BIT2
+#define MDFLD_DSR_CURSOR_2	BIT3
+#define MDFLD_DSR_OVERLAY_0 	BIT4
+#define MDFLD_DSR_OVERLAY_2 	BIT5
+#define MDFLD_DSR_MIPI_CONTROL	BIT6
+#define MDFLD_DSR_DAMAGE_MASK_0	(BIT0 | BIT2 | BIT4)
+#define MDFLD_DSR_DAMAGE_MASK_2	(BIT1 | BIT3 | BIT5)
+#define MDFLD_DSR_2D_3D 	(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR 45
+#define MDFLD_DPU_ENABLE BIT31
+#define MDFLD_DSR_FULLSCREEN BIT30
+#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON		1
+#define PSB_PWR_STATE_OFF		2
+
+#define PSB_PMPOLICY_NOPM		0
+#define PSB_PMPOLICY_CLOCKGATING	1
+#define PSB_PMPOLICY_POWERDOWN		2
+#define PSB_PMPOLICY_HWIDLE		3
+#define PSB_PMPOLICY_SUSPEND_HWIDLE	4
+#define PSB_PMPOLICY_FORCE_PM		5
+
+
+#define PSB_CGPOLICY_ON		0
+#define PSB_CGPOLICY_GFXCG_DIS		1
+#define PSB_CGPOLICY_VECCG_DIS		2
+#define PSB_CGPOLICY_VEDCG_DIS		3
+
+#define PSB_CMDPOLICY_PARALLEL		1
+
+#define PSB_BOTTOM_HALF_WQ              1
+#define PSB_BOTTOM_HALF_TQ              2
+
+#define PSB_PMSTATE_POWERUP		0
+#define PSB_PMSTATE_CLOCKGATED		1
+#define PSB_PMSTATE_POWERDOWN		2
+#define PSB_PCIx_MSI_ADDR_LOC		0x94
+#define PSB_PCIx_MSI_DATA_LOC		0x98
+
+#define MDFLD_PLANE_MAX_WIDTH		2048
+#define MDFLD_PLANE_MAX_HEIGHT		2048
+
+#define IS_POULSBO(dev) 0
+
+#define IS_MDFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+#define IS_CTP(dev) (((dev->pci_device & 0xffff) == 0x08c0) ||	\
+		((dev->pci_device & 0xffff) == 0x08c7) ||  \
+		((dev->pci_device & 0xffff) == 0x08c8))
+#define IS_MRFLD(dev) (((dev)->pci_device & 0xfff8) == 0x1180 || ((dev)->pci_device & 0xfff8) == 0x1480)
+
+#define IS_TNG_A0(dev) ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) && (intel_mid_soc_stepping() == 0))
+
+#if defined(CONFIG_DRM_CTP)
+#define IS_TNG_B0(dev)		0
+#elif defined(CONFIG_DRM_VXD_BYT)
+#define IS_TNG_B0(dev)		0
+#else
+#define IS_TNG_B0(dev) ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) && (intel_mid_soc_stepping() == 1))
+#endif
+
+#define IS_ANN(dev) (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+
+#define IS_ANN_A0(dev) ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) && (intel_mid_soc_stepping() == 0))
+
+#define IS_ANN_B0(dev) ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) && (intel_mid_soc_stepping() == 1))
+
+#define IS_MOFD(dev) (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+
+#define IS_MID(dev) (IS_MDFLD(dev) || IS_MRFLD(dev))
+#define IS_FLDS(dev) (IS_MDFLD(dev) || IS_MRFLD(dev))
+#define IS_MSVDX(dev) (IS_MDFLD(dev) || IS_MRFLD(dev))
+#define IS_TOPAZ(dev) (IS_MDFLD(dev) || IS_MRFLD(dev))
+
+#define IS_MSVDX_MEM_TILE(dev) ((IS_MRFLD(dev)) || (IS_CTP(dev)))
+
+
+/*
+ *User options.
+ */
+
+struct drm_psb_uopt {
+	int pad;		/*keep it here in case we use it in future */
+};
+
+#define MODE_SETTING_IN_CRTC 	0x1
+#define MODE_SETTING_IN_ENCODER 0x2
+#define MODE_SETTING_ON_GOING 	0x3
+#define MODE_SETTING_IN_DSR 	0x4
+#define MODE_SETTING_ENCODER_DONE 0x8
+#define GCT_R10_HEADER_SIZE	16
+#define GCT_R10_DISPLAY_DESC_SIZE	28
+
+struct psb_context;
+struct psb_validate_buffer;
+struct psb_video_ctx;
+
+/*  enum mdfld_dsi_encoder_t is required by mdfld_output.h */
+
+typedef enum {
+	MDFLD_DSI_ENCODER_DBI = 0,
+	MDFLD_DSI_ENCODER_DPI,
+} mdfld_dsi_encoder_t;
+
+typedef enum {
+	MDFLD_PIPE_A = 0,
+	MDFLD_PIPE_B,
+	MDFLD_PIPE_C,
+	MDFLD_PIPE_MAX,
+} mdfld_pipe_num;
+
+struct drm_psb_private {
+	/*
+	 * DSI info.
+	 */
+	void *dbi_dsr_info;
+#ifdef CONFIG_MID_DSI_DPU
+	void *dbi_dpu_info;
+#endif
+	/* QOS */
+	struct pm_qos_request s0ix_qos;
+
+	struct mdfld_dsi_config *dsi_configs[2];
+
+	struct workqueue_struct *vsync_wq;
+	struct workqueue_struct *power_wq;
+
+	struct work_struct te_work;
+	int te_pipe;
+	struct work_struct reset_panel_work;
+
+	struct work_struct vsync_event_work;
+	int vsync_pipe;
+
+	struct mutex vsync_lock;
+	atomic_t *vblank_count;
+	bool vsync_enabled[3];
+
+	bool pipea_dpi_underrun_count;
+	bool pipec_dpi_underrun_count;
+
+	/*
+	 *TTM Glue.
+	 */
+
+	struct drm_global_reference mem_global_ref;
+	struct ttm_bo_global_ref bo_global_ref;
+	int has_global;
+
+	struct drm_device *dev;
+	struct ttm_object_device *tdev;
+	struct ttm_fence_device fdev;
+	struct ttm_bo_device bdev;
+	struct ttm_lock ttm_lock;
+	struct vm_operations_struct *ttm_vm_ops;
+	int has_fence_device;
+	int has_bo_device;
+
+	unsigned long chipset;
+
+	struct drm_psb_dev_info_arg dev_info;
+	struct drm_psb_uopt uopt;
+
+	struct psb_gtt *pg;
+
+	/*GTT Memory manager */
+	struct psb_gtt_mm *gtt_mm;
+
+	struct page *scratch_page;
+	uint32_t sequence[PSB_NUM_ENGINES];
+	uint32_t last_sequence[PSB_NUM_ENGINES];
+	uint32_t last_submitted_seq[PSB_NUM_ENGINES];
+
+	struct psb_mmu_driver *mmu;
+	struct psb_mmu_pd *pf_pd;
+
+	/* VSP MMU */
+	struct psb_mmu_driver *vsp_mmu;
+	struct psb_mmu_pd *vsp_pf_pd;
+
+	uint8_t *rgx_reg;
+	uint8_t *wrapper_reg;
+	uint8_t *ved_wrapper_reg;
+	uint8_t *vec_wrapper_reg;
+	uint8_t *vdc_reg;
+
+	/* IMG video context */
+	struct list_head video_ctx;
+	spinlock_t video_ctx_lock;
+	/* Current video context */
+	struct psb_video_ctx *topaz_ctx;
+	/* previous vieo context */
+	struct psb_video_ctx *last_topaz_ctx;
+
+	/*
+	 *MSVDX
+	 */
+	uint8_t *msvdx_reg;
+	atomic_t msvdx_mmu_invaldc;
+	void *msvdx_private;
+
+	/*
+	 *TOPAZ
+	 */
+	uint8_t *topaz_reg;
+	void *topaz_private;
+	uint8_t topaz_disabled;
+	uint32_t video_device_fuse;
+	atomic_t topaz_mmu_invaldc;
+
+	/*
+	 * VSP
+	 */
+	uint8_t *vsp_reg;
+	atomic_t vsp_mmu_invaldc;
+	void *vsp_private;
+
+	/*
+	 *Fencing / irq.
+	 */
+
+	uint32_t vdc_irq_mask;
+	uint32_t pipestat[PSB_NUM_PIPE];
+	bool vblanksEnabledForFlips;
+
+	spinlock_t irqmask_lock;
+	spinlock_t sequence_lock;
+
+	/*
+	 *Modesetting
+	 */
+	struct psb_intel_mode_device mode_dev;
+
+	struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+	struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+	uint32_t num_pipe;
+
+	/*
+	 *Memory managers
+	 */
+	int have_tt;
+	int have_mem_mmu;
+	int have_mem_mmu_tiling;
+	struct mutex temp_mem;
+
+	/*
+	 *Relocation buffer mapping.
+	 */
+
+	spinlock_t reloc_lock;
+	unsigned int rel_mapped_pages;
+	wait_queue_head_t rel_mapped_queue;
+
+	/*
+	 *SAREA
+	 */
+	struct drm_psb_sarea *sarea_priv;
+
+	/*
+	 *OSPM info
+	 */
+	uint8_t panel_desc;
+	bool early_suspended;
+	struct wake_lock ospm_wake_lock;
+
+	/*
+	 *MAXFIFO/ S0i1-Display info
+	 */
+	void * dc_maxfifo_info;
+
+
+	/*
+	 * Sizes info
+	 */
+
+	struct drm_psb_sizes_arg sizes;
+
+	uint32_t fuse_reg_value;
+
+	/* vbt (gct) header information */
+	struct mrst_vbt vbt_data;
+	/* info that is stored from the gct */
+	struct gct_ioctl_arg gct_data;
+	enum panel_type panel_id;
+
+	/* pci revision id for B0:D2:F0 */
+	uint8_t platform_rev_id;
+
+	/* Feature bits from the VBIOS */
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	int lvds_ssc_freq;
+	bool is_lvds_on;
+
+	/* MRST private date start */
+	unsigned int core_freq;
+	uint32_t iLVDS_enable;
+
+	/* pipe config register value */
+	uint32_t pipeconf;
+	uint32_t pipeconf1;
+	uint32_t pipeconf2;
+
+	/* plane control register value */
+	uint32_t dspcntr;
+	uint32_t dspcntr1;
+	uint32_t dspcntr2;
+
+	/* MRST_DSI private date start */
+	struct work_struct dsi_work;
+
+	/*
+	 *MRST DSI info
+	 */
+
+	/* The DPI panel power on */
+	bool dpi_panel_on;
+
+	/* The DBI panel power on */
+	bool dbi_panel_on;
+
+	/* The DPI display */
+	bool dpi;
+
+	enum mipi_panel_type panel_make;
+
+	/* Set if MIPI encoder wants to control plane/pipe */
+	bool dsi_plane_pipe_control;
+
+	/* status */
+	uint32_t videoModeFormat:2;
+	uint32_t laneCount:3;
+	uint32_t channelNumber:2;
+	uint32_t status_reserved:25;
+
+	/* dual display - DPI & DBI */
+	bool dual_display;
+
+	/* HS or LP transmission */
+	bool lp_transmission;
+
+	/* configuration phase */
+	bool config_phase;
+
+	/* first boot phase */
+	bool first_boot;
+
+	bool hdmi_first_boot;
+
+	bool is_mipi_on;
+
+	/* DSI clock */
+	uint32_t RRate;
+	uint32_t DDR_Clock;
+	uint32_t DDR_Clock_Calculated;
+	uint32_t ClockBits;
+
+	/* DBI Buffer pointer */
+	u32 DBI_CB_phys;
+	u8 *p_DBI_commandBuffer;
+	uint32_t DBI_CB_pointer;
+	u8 *p_DBI_dataBuffer_orig;
+	u8 *p_DBI_dataBuffer;
+	uint32_t DBI_DB_pointer;
+
+	/* DSI panel spec */
+	uint32_t pixelClock;
+	uint32_t HsyncWidth;
+	uint32_t HbackPorch;
+	uint32_t HfrontPorch;
+	uint32_t HactiveArea;
+	uint32_t VsyncWidth;
+	uint32_t VbackPorch;
+	uint32_t VfrontPorch;
+	uint32_t VactiveArea;
+	uint32_t bpp:5;
+	uint32_t Reserved:27;
+	/* MRST_DSI private date end */
+
+	/* MDFLD_DSI private date start */
+	/* dual display - DPI & DBI */
+	bool dual_mipi;
+	uint32_t ksel;
+	uint32_t mipi_lane_config;
+	uint32_t mipi_ctrl_display;
+	/*
+	 *MRST DSI info
+	 */
+	/* The DPI panel power on */
+	bool dpi_panel_on2;
+
+	/* The DBI panel power on */
+	bool dbi_panel_on2;
+
+	/* The DPI display */
+	bool dpi2;
+
+	/* status */
+	uint32_t videoModeFormat2:2;
+	uint32_t laneCount2:3;
+	uint32_t channelNumber2:2;
+	uint32_t status_reserved2:25;
+
+	/* HS or LP transmission */
+	bool lp_transmission2;
+
+	/* configuration phase */
+	bool config_phase2;
+
+	/* DSI clock */
+	uint32_t RRate2;
+	uint32_t DDR_Clock2;
+	uint32_t DDR_Clock_Calculated2;
+	uint32_t ClockBits2;
+
+	/* DBI Buffer pointer */
+	u32 DBI_CB_phys2;
+	u8 *p_DBI_commandBuffer2;
+	uint32_t DBI_CB_pointer2;
+	u8 *p_DBI_dataBuffer_orig2;
+	u8 *p_DBI_dataBuffer2;
+
+	/* DSI panel spec */
+	uint32_t pixelClock2;
+	uint32_t HsyncWidth2;
+	uint32_t HbackPorch2;
+	uint32_t HfrontPorch2;
+	uint32_t HactiveArea2;
+	uint32_t VsyncWidth2;
+	uint32_t VbackPorch2;
+	uint32_t VfrontPorch2;
+	uint32_t VactiveArea2;
+	uint32_t bpp2:5;
+	uint32_t Reserved2:27;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct mdfld_dsi_dbi_output *dbi_output2;
+	/* MDFLD_DSI private date end */
+
+	/* wait queue for write_mem_status complete (EOF interrupt) */
+	wait_queue_head_t eof_wait;
+
+	/*
+	 *Register state
+	 */
+	uint32_t saveDSPACNTR;
+	uint32_t saveDSPBCNTR;
+	uint32_t savePIPEACONF;
+	uint32_t savePIPEBCONF;
+	uint32_t savePIPEASRC;
+	uint32_t savePIPEBSRC;
+	uint32_t saveFPA0;
+	uint32_t saveFPA1;
+	uint32_t saveDPLL_A;
+	uint32_t saveDPLL_A_MD;
+	uint32_t saveHTOTAL_A;
+	uint32_t saveHBLANK_A;
+	uint32_t saveHSYNC_A;
+	uint32_t saveVTOTAL_A;
+	uint32_t saveVBLANK_A;
+	uint32_t saveVSYNC_A;
+	uint32_t saveDSPASTRIDE;
+	uint32_t saveDSPASIZE;
+	uint32_t saveDSPAPOS;
+	uint32_t saveDSPABASE;
+	uint32_t saveDSPASURF;
+	uint32_t saveDSPASTATUS;
+	uint32_t saveFPB0;
+	uint32_t saveFPB1;
+	uint32_t saveDPLL_B;
+	uint32_t saveDPLL_B_MD;
+	uint32_t saveHTOTAL_B;
+	uint32_t saveHBLANK_B;
+	uint32_t saveHSYNC_B;
+	uint32_t saveVTOTAL_B;
+	uint32_t saveVBLANK_B;
+	uint32_t saveVSYNC_B;
+	uint32_t saveDSPBSTRIDE;
+	uint32_t saveDSPBSIZE;
+	uint32_t saveDSPBPOS;
+	uint32_t saveDSPBBASE;
+	uint32_t saveDSPBSURF;
+	uint32_t saveDSPBSTATUS;
+	uint32_t saveVCLK_DIVISOR_VGA0;
+	uint32_t saveVCLK_DIVISOR_VGA1;
+	uint32_t saveVCLK_POST_DIV;
+	uint32_t saveVGACNTRL;
+	uint32_t saveADPA;
+	uint32_t saveLVDS;
+	uint32_t saveDVOA;
+	uint32_t saveDVOB;
+	uint32_t saveDVOC;
+	uint32_t savePP_ON;
+	uint32_t savePP_OFF;
+	uint32_t savePP_CONTROL;
+	uint32_t savePP_CYCLE;
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePaletteA[256];
+	uint32_t savePaletteB[256];
+	uint32_t saveBLC_PWM_CTL2;
+	uint32_t saveBLC_PWM_CTL;
+	uint32_t saveCLOCKGATING;
+	uint32_t saveDSPARB;
+	uint32_t saveDSPATILEOFF;
+	uint32_t saveDSPBTILEOFF;
+	uint32_t saveDSPAADDR;
+	uint32_t saveDSPBADDR;
+	uint32_t savePFIT_AUTO_RATIOS;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t savePP_ON_DELAYS;
+	uint32_t savePP_OFF_DELAYS;
+	uint32_t savePP_DIVISOR;
+	uint32_t saveBSM;
+	uint32_t saveBGSM;
+	uint32_t saveVBT;
+	uint32_t saveBCLRPAT_A;
+	uint32_t saveBCLRPAT_B;
+	uint32_t saveDSPALINOFF;
+	uint32_t saveDSPBLINOFF;
+	uint32_t savePERF_MODE;
+	uint32_t saveDSPFW1;
+	uint32_t saveDSPFW2;
+	uint32_t saveDSPFW3;
+	uint32_t saveDSPFW4;
+	uint32_t saveDSPFW5;
+	uint32_t saveDSPFW6;
+	uint32_t saveCHICKENBIT;
+	uint32_t saveDSPACURSOR_CTRL;
+	uint32_t saveDSPBCURSOR_CTRL;
+	uint32_t saveDSPACURSOR_BASE;
+	uint32_t saveDSPBCURSOR_BASE;
+	uint32_t saveDSPACURSOR_POS;
+	uint32_t saveDSPBCURSOR_POS;
+	uint32_t save_palette_a[256];
+	uint32_t save_palette_b[256];
+	uint32_t saveOV_OVADD;
+	uint32_t saveOV_OGAMC0;
+	uint32_t saveOV_OGAMC1;
+	uint32_t saveOV_OGAMC2;
+	uint32_t saveOV_OGAMC3;
+	uint32_t saveOV_OGAMC4;
+	uint32_t saveOV_OGAMC5;
+	uint32_t saveOVC_OVADD;
+	uint32_t saveOVC_OGAMC0;
+	uint32_t saveOVC_OGAMC1;
+	uint32_t saveOVC_OGAMC2;
+	uint32_t saveOVC_OGAMC3;
+	uint32_t saveOVC_OGAMC4;
+	uint32_t saveOVC_OGAMC5;
+
+	/*
+	 * extra MDFLD Register state
+	 */
+	uint32_t saveHDMIPHYMISCCTL;
+	uint32_t saveHDMIB_CONTROL;
+	uint32_t saveDSPCCNTR;
+	uint32_t savePIPECCONF;
+	uint32_t savePIPECSRC;
+	uint32_t saveHTOTAL_C;
+	uint32_t saveHBLANK_C;
+	uint32_t saveHSYNC_C;
+	uint32_t saveVTOTAL_C;
+	uint32_t saveVBLANK_C;
+	uint32_t saveVSYNC_C;
+	uint32_t saveDSPCSTRIDE;
+	uint32_t saveDSPCSIZE;
+	uint32_t saveDSPCPOS;
+	uint32_t saveDSPCSURF;
+	uint32_t saveDSPCSTATUS;
+	uint32_t saveDSPCLINOFF;
+	uint32_t saveDSPCTILEOFF;
+	uint32_t saveDSPCCURSOR_CTRL;
+	uint32_t saveDSPCCURSOR_BASE;
+	uint32_t saveDSPCCURSOR_POS;
+	uint32_t save_palette_c[256];
+	uint32_t saveOV_OVADD_C;
+	uint32_t saveOV_OGAMC0_C;
+	uint32_t saveOV_OGAMC1_C;
+	uint32_t saveOV_OGAMC2_C;
+	uint32_t saveOV_OGAMC3_C;
+	uint32_t saveOV_OGAMC4_C;
+	uint32_t saveOV_OGAMC5_C;
+
+	/* DSI reg save */
+	uint32_t saveDEVICE_READY_REG;
+	uint32_t saveINTR_EN_REG;
+	uint32_t saveDSI_FUNC_PRG_REG;
+	uint32_t saveHS_TX_TIMEOUT_REG;
+	uint32_t saveLP_RX_TIMEOUT_REG;
+	uint32_t saveTURN_AROUND_TIMEOUT_REG;
+	uint32_t saveDEVICE_RESET_REG;
+	uint32_t saveDPI_RESOLUTION_REG;
+	uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+	uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+	uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+	uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+	uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+	uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+	uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+	uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+	uint32_t saveINIT_COUNT_REG;
+	uint32_t saveMAX_RET_PAK_REG;
+	uint32_t saveVIDEO_FMT_REG;
+	uint32_t saveEOT_DISABLE_REG;
+	uint32_t saveLP_BYTECLK_REG;
+	uint32_t saveHS_LS_DBI_ENABLE_REG;
+	uint32_t saveTXCLKESC_REG;
+	uint32_t saveDPHY_PARAM_REG;
+	uint32_t saveMIPI_CONTROL_REG;
+	uint32_t saveMIPI;
+	uint32_t saveMIPI_C;
+	void (*init_drvIC) (struct drm_device * dev);
+	void (*dsi_prePowerState) (struct drm_device * dev);
+	void (*dsi_postPowerState) (struct drm_device * dev);
+
+	/* RGX IRQ handler function and data */
+	int (*pfn_rgxIrqHandler) (void * prgx_irqData);
+	void * prgx_irqData;
+
+	/* DPST Register Save */
+	uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+	uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+	// SH START DPST
+	struct drm_connector *dpst_connector;
+	// SH END DPST
+	uint32_t savePWM_CONTROL_LOGIC;
+
+	/* MSI reg save */
+
+	uint32_t msi_addr;
+	uint32_t msi_data;
+
+	/*
+	 *Scheduling.
+	 */
+
+	struct mutex reset_mutex;
+	struct mutex cmdbuf_mutex;
+	/*uint32_t ta_mem_pages;
+	   struct psb_ta_mem *ta_mem;
+	   int force_ta_mem_load; */
+	atomic_t val_seq;
+
+	/*
+	 *TODO: change this to be per drm-context.
+	 */
+
+	struct psb_context decode_context;
+	struct psb_context encode_context;
+#ifdef SUPPORT_VSP
+	struct psb_context vsp_context;
+#endif
+	/*
+	 * LID-Switch
+	 */
+	spinlock_t lid_lock;
+	struct timer_list lid_timer;
+	u32 *lid_state;
+	u32 lid_last_state;
+
+	/*
+	 *Watchdog
+	 */
+
+	spinlock_t watchdog_lock;
+	struct timer_list watchdog_timer;
+	struct work_struct watchdog_wq;
+	struct work_struct msvdx_watchdog_wq;
+	struct work_struct topaz_watchdog_wq;
+	struct work_struct hdmi_hotplug_wq;
+	struct work_struct hdmi_audio_wq;
+	struct work_struct hdmi_audio_underrun_wq;
+	struct work_struct hdmi_audio_bufferdone_wq;
+	struct tasklet_struct hdmi_audio_bufferdone_tasklet;
+	atomic_t hotplug_wq_done;
+	int timer_available;
+
+#ifdef OSPM_STAT
+	unsigned char graphics_state;
+	unsigned long gfx_on_time;
+	unsigned long gfx_off_time;
+	unsigned long gfx_last_mode_change;
+	unsigned long gfx_on_cnt;
+	unsigned long gfx_off_cnt;
+#endif
+
+	/*to be removed later */
+	/*int dri_page_flipping;
+	   int current_page;
+	   int pipe_active[3];
+	   int saved_start[2];
+	   int saved_offset[2];
+	   int saved_stride[2];
+
+	   int flip_start[2];
+	   int flip_offset[2];
+	   int flip_stride[2]; */
+
+	/*
+	 * Used for modifying backlight from
+	 * xrandr -- consider removing and using HAL instead
+	 */
+	struct drm_property *backlight_property;
+	uint32_t blc_adj1;
+	uint32_t blc_adj2;
+
+	/*
+	 * DPST and Hotplug state
+	 */
+
+	struct dpst_state *psb_dpst_state;
+	struct hotplug_state *psb_hotplug_state;
+	pfn_vsync_handler psb_vsync_handler;
+
+	struct mutex dsr_mutex;
+	bool b_dsr_enable_config;
+	bool b_dsr_enable;
+	bool dsr_fb_update_done_0;
+	bool dsr_fb_update_done_2;
+	uint32_t dsr_fb_update;
+	uint32_t dsr_idle_count;
+	bool b_is_in_idle;
+	void (*exit_idle) (struct drm_device * dev, u32 update_src,
+			   void *p_surfaceAddr, bool check_hw_on_only);
+
+	bool b_async_flip_enable;
+	/*
+	 * DSR TIMER
+	 */
+	spinlock_t dsr_lock;
+	struct timer_list dsr_timer;
+
+	bool dsi_device_ready;
+	bool hdmi_done_reading_edid;
+	bool um_start;
+
+	uint32_t tmds_clock_khz;
+	had_event_call_back mdfld_had_event_callbacks;
+	struct snd_intel_had_interface *had_interface;
+	void *had_pvt_data;
+
+	uint32_t hdmi_audio_interrupt_mask;
+
+	struct mdfld_dsi_encoder *encoder0;
+	struct mdfld_dsi_encoder *encoder2;
+	mdfld_dsi_encoder_t mipi_encoder_type;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+	/*psb fb dev */
+	void *fbdev;
+#endif
+	uint32_t cur_pipe;
+
+	struct mutex dpms_mutex;
+
+	/* read register value through sysfs. */
+	int count;
+	char buf[256];
+
+	/*
+	 * HDMI config data
+	*/
+	void *hdmi_priv;
+
+#define DRM_PSB_HDMI_FLIP_ARRAY_SIZE 4
+	void *flip_array[DRM_PSB_HDMI_FLIP_ARRAY_SIZE];
+	unsigned int addr_array[DRM_PSB_HDMI_FLIP_ARRAY_SIZE];
+	unsigned int flip_valid_size;
+	unsigned int flip_head;
+	unsigned int flip_tail;
+	unsigned int flip_inited;
+	unsigned int head_fliped;
+	spinlock_t flip_lock;
+
+	/*hdmi connected status */
+	bool bhdmiconnected;
+	bool dpms_on_off;
+	struct workqueue_struct *hpd_detect;
+/*	check it later
+*	pfn_screen_event_handler pvr_screen_event_handler;
+*/
+	struct timer_list hdmi_timer;
+	struct timer_list maxfifo_timer;
+	/* fix Lock screen flip in resume issue */
+	unsigned long init_screen_start;
+	unsigned long init_screen_offset;
+	unsigned long init_screen_size;
+	unsigned long init_screen_stride;
+
+	/* gamma and csc setting lock*/
+	struct mutex gamma_csc_lock;
+	/* overlay setting lock*/
+	struct mutex overlay_lock;
+	uint32_t overlay_wait;
+	uint32_t overlay_fliped;
+	int brightness_adjusted;
+
+	/*
+	 * S3D state 
+	 */
+	uint32_t cur_s3d_state;
+	/* whether is DVI port */
+	bool bDVIport;
+
+	struct pci_dev *pci_root;
+	bool bUseHFPLL;
+	bool bRereadZero;
+	bool panel_180_rotation;
+};
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+extern int mdfld_intel_crtc_set_gamma(struct drm_device *dev,
+					struct gamma_setting *setting_data);
+
+extern int mdfld_intel_crtc_set_color_conversion(struct drm_device *dev,
+                                        struct csc_setting *setting_data);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+	return (struct drm_psb_private *)dev->dev_private;
+}
+
+/*
+ *psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern int psb_dpst_diet_save(struct drm_device *dev);
+extern int psb_dpst_diet_restore(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int intel_get_vblank_timestamp(struct drm_device *dev, int pipe,
+		int *max_error, struct timeval *vblank_time, unsigned flags);
+extern int intel_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+		int *vpos, int *hpos);
+extern int mdfld_enable_te(struct drm_device *dev, int pipe);
+extern int mdfld_recover_te(struct drm_device *dev, int pipe);
+extern void mdfld_disable_te(struct drm_device *dev, int pipe);
+extern int mrfl_enable_repeat_frame_intr(struct drm_device *dev,
+					int idle_frame_count);
+extern void mrfl_disable_repeat_frame_intr(struct drm_device *dev);
+extern int mdfld_irq_enable_hdmi_audio(struct drm_device *dev);
+extern int mdfld_irq_disable_hdmi_audio(struct drm_device *dev);
+extern void psb_te_timer_func(unsigned long data);
+extern void mdfld_te_handler_work(struct work_struct *te_work);
+extern void mdfld_vsync_event_work(struct work_struct *work);
+
+#ifdef CONFIG_SUPPORT_HDMI
+void hdmi_do_audio_underrun_wq(struct work_struct *work);
+void hdmi_do_audio_bufferdone_wq(struct work_struct *work);
+void hdmi_audio_bufferdone_tasklet_func(unsigned long data);
+#endif
+extern u32 intel_vblank_count(struct drm_device *dev, int pipe);
+
+/*
+ *psb_fb.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern void *psbfb_vdc_reg(struct drm_device *dev);
+
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+extern void mdfld_dsr_timer_init(struct drm_psb_private *dev_priv);
+extern void mdfld_dsr_timer_takedown(struct drm_psb_private *dev_priv);
+
+extern int mid_irq_enable_hdmi_audio(struct drm_device *dev);
+extern int mid_irq_disable_hdmi_audio(struct drm_device *dev);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+/*fbdev*/
+extern int psb_fbdev_init(struct drm_device *dev);
+#endif
+
+/* psb_bl.c */
+extern int psb_brightness;
+int psb_backlight_init(struct drm_device *dev);
+void psb_backlight_exit(void);
+int psb_set_brightness(struct backlight_device *bd);
+int psb_get_brightness(struct backlight_device *bd);
+struct backlight_device *psb_get_backlight_device(void);
+
+/*
+ *Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT    (1 << 1)
+#define PSB_D_IRQ     (1 << 2)
+#define PSB_D_ENTRY   (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV      (1 << 4)
+#define PSB_D_DBI_BF  (1 << 5)
+#define PSB_D_PM      (1 << 6)
+#define PSB_D_RENDER  (1 << 7)
+#define PSB_D_REG     (1 << 8)
+#define PSB_D_MSVDX   (1 << 9)
+#define PSB_D_TOPAZ   (1 << 10)
+#define VSP_D_LOG   (1 << 11)
+#define VSP_D_PERF   (1 << 12)
+#define PSB_D_WARN    (1 << 13)
+#define PSB_D_MIPI    (1 << 14)
+#define PSB_D_BL    (1 << 15)
+#define PSB_D_DPST    (1 << 16)
+
+#ifndef DRM_DEBUG_CODE
+/* To enable debug printout, set drm_psb_debug in psb_drv.c
+ * to any combination of above print flags.
+ */
+#define DRM_DEBUG_CODE 2
+#endif
+
+extern int drm_psb_debug;
+extern int drm_psb_no_fb;
+extern int drm_idle_check_interval;
+extern int drm_topaz_sbuswa;
+
+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
+#define PSB_DEBUG_INIT(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
+#define PSB_DEBUG_ENTRY(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_ENTRY, _fmt, ##_arg)
+#define PSB_DEBUG_HV(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_HV, _fmt, ##_arg)
+#define PSB_DEBUG_DBI_BF(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_DBI_BF, _fmt, ##_arg)
+#define PSB_DEBUG_PM(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
+#define PSB_DEBUG_REG(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_REG, _fmt, ##_arg)
+#define PSB_DEBUG_MSVDX(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_MSVDX, _fmt, ##_arg)
+#define PSB_DEBUG_TOPAZ(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_TOPAZ, _fmt, ##_arg)
+#define VSP_DEBUG(_fmt, _arg...) \
+	PSB_DEBUG(VSP_D_LOG, "VSP: "_fmt, ##_arg)
+#define VSP_PERF(_fmt, _arg...) \
+	PSB_DEBUG(VSP_D_PERF, "VSP PERFORMANCE: "_fmt, ##_arg)
+/* force to print WARN msg */
+#define PSB_DEBUG_WARN(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_WARN, _fmt, ##_arg)
+#define PSB_DEBUG_MIPI(_fmt, _arg...) \
+	PSB_DEBUG(PSB_D_MIPI, _fmt, ##_arg)
+#define PSB_DEBUG_BL(_fmt, _arg...) \
+        PSB_DEBUG(PSB_D_BL, _fmt, ##_arg)
+#define PSB_DEBUG_DPST(_fmt, _arg...) \
+        PSB_DEBUG(PSB_D_DPST, _fmt, ##_arg)
+
+#if DRM_DEBUG_CODE
+#define PSB_DEBUG(_flag, _fmt, _arg...)					\
+	do {								\
+		if (unlikely((_flag) & drm_psb_debug))			\
+			printk(KERN_INFO				\
+			       "[psb:0x%02x:%s] " _fmt , _flag,		\
+			       __func__ , ##_arg);			\
+	} while (0)
+#else
+#define PSB_DEBUG(_fmt, _arg...)     do { } while (0)
+#endif
+
+/*
+ *Utilities
+ */
+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
+
+#define MAX_READ_COUNT		0x3
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i = 0;
+	int reg_val = ioread32(dev_priv->vdc_reg + (reg));
+
+	/* we might need to re-read registers if video mode panel */
+	if (dev_priv->bRereadZero) {
+		if (!reg_val) {
+			for (i = 0; i < MAX_READ_COUNT; i++) {
+				reg_val = ioread32(dev_priv->vdc_reg + (reg));
+				if (reg_val)
+					break;
+			}
+		}
+
+		if (i == MAX_READ_COUNT) {
+			PSB_DEBUG_REG("Register (reg = 0x%x) read failure.\n",
+					reg);
+		}
+	}
+
+	PSB_DEBUG_REG("reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
+	return reg_val;
+}
+
+#define REG_READ(reg)	       REGISTER_READ(dev, (reg))
+
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+				  uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if ((reg < 0x70084 || reg > 0x70088) && (reg < 0xa000 || reg > 0xa3ff))
+		PSB_DEBUG_REG("reg = 0x%x, val = 0x%x.\n", reg, val);
+	iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+				    uint32_t reg, uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
+
+	iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val)	  REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+				   uint32_t reg, uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
+
+	iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val)	 REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_ALIGN_TO(_val, _align) \
+  (((_val) + ((_align) - 1)) & ~((_align) - 1))
+
+#define PSB_WVDC32(_val, _offs)		REG_WRITE(_offs, _val)
+#define PSB_RVDC32(_offs)		REG_READ(_offs)
+
+static inline uint32_t RGX_REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int reg_val = ioread32(dev_priv->rgx_reg + (reg));
+       PSB_DEBUG_REG("rgx reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
+       return reg_val;
+}
+
+#define RGX_REG_READ(reg)             RGX_REGISTER_READ(dev, (reg))
+static inline void RGX_REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+						uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       PSB_DEBUG_REG("rgx reg = 0x%x, val = 0x%x. \n", reg, val);
+
+       iowrite32((val), dev_priv->rgx_reg + (reg));
+}
+
+#define RGX_REG_WRITE(reg, val)        RGX_REGISTER_WRITE(dev, (reg), (val))
+
+static inline uint32_t WRAPPER_REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       int reg_val = ioread32(dev_priv->wrapper_reg + (reg));
+       PSB_DEBUG_REG("wrapper reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
+       return reg_val;
+}
+
+#define WRAPPER_REG_READ(reg)             WRAPPER_REGISTER_READ(dev, (reg))
+static inline void WRAPPER_REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+						uint32_t val)
+{
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       PSB_DEBUG_REG("wrapper reg = 0x%x, val = 0x%x. \n", reg, val);
+
+       iowrite32((val), dev_priv->wrapper_reg + (reg));
+}
+
+#define WRAPPER_REG_WRITE(reg, val)        WRAPPER_REGISTER_WRITE(dev, (reg), (val))
+
+#define PSB_ALPL(_val, _base)			\
+  (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
+#define PSB_ALPLM(_val, _base)			\
+  ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
+
+extern int drm_psb_ospm;
+extern int drm_psb_cpurelax;
+extern int drm_psb_udelaydivider;
+extern int drm_psb_gl3_enable;
+extern int drm_psb_topaz_clockgating;
+extern int drm_vsp_burst;
+extern int drm_vsp_force_up_freq;
+extern int drm_vsp_force_down_freq;
+extern int drm_vsp_single_int;
+extern int drm_vec_force_up_freq;
+extern int drm_vec_force_down_freq;
+
+extern int drm_decode_flag;
+
+#define PSB_DEFAULT_HDMI_FB_WIDTH  1920
+#define PSB_DEFAULT_HDMI_FB_HEIGHT 1080
+
+extern char HDMI_EDID[20];
+extern int hdmi_state;
+extern int drm_vsp_vpp_batch_cmd;
+extern int drm_video_sepkey;
+
+/*
+ * set cpu_relax = 1 in sysfs to use cpu_relax instead of udelay bysy loop
+ * set udelay_divider to reduce the udelay values,e.g.= 10, reduce 10 times
+ */
+#define PSB_UDELAY(usec)                        \
+do {                                            \
+	if (drm_psb_cpurelax == 0)              \
+		DRM_UDELAY(usec / drm_psb_udelaydivider);   \
+	else                                    \
+		cpu_relax();                    \
+} while (0)
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_fb.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_fb.c
new file mode 100644
index 0000000..faeaa51
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_fb.c
@@ -0,0 +1,1002 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "psb_ttm_userobj_api.h"
+#include "psb_fb.h"
+#include "pvrsrv_interface.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_output.h"
+#endif
+#include "mdfld_output.h"
+
+#include <linux/compat.h>
+
+#include "android_hdmi.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+					struct drm_file *file_priv,
+					unsigned int *handle);
+
+static int psbfb_set_recovery_mode_hdmi(struct fb_info *info,
+					struct psb_fbdev *fbdev);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+	.destroy = psb_user_framebuffer_destroy,
+	.create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+void *psbfb_vdc_reg(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv;
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	return dev_priv->vdc_reg;
+}
+
+/*EXPORT_SYMBOL(psbfb_vdc_reg); */
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+			   unsigned blue, unsigned transp, struct fb_info *info)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+	uint32_t v;
+
+	if (!fb)
+		return -ENOMEM;
+
+	if (regno > 255)
+		return 1;
+
+	red = CMAP_TOHW(red, info->var.red.length);
+	blue = CMAP_TOHW(blue, info->var.blue.length);
+	green = CMAP_TOHW(green, info->var.green.length);
+	transp = CMAP_TOHW(transp, info->var.transp.length);
+
+	v = (red << info->var.red.offset) |
+	    (green << info->var.green.offset) |
+	    (blue << info->var.blue.offset) |
+	    (transp << info->var.transp.offset);
+
+	if (regno < 16) {
+		switch (fb->bits_per_pixel) {
+		case 16:
+			((uint32_t *) info->pseudo_palette)[regno] = v;
+			break;
+		case 24:
+		case 32:
+			((uint32_t *) info->pseudo_palette)[regno] = v;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int psbfb_kms_off(struct drm_device *dev, int suspend)
+{
+	struct drm_framebuffer *fb = 0;
+	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+	DRM_DEBUG("psbfb_kms_off_ioctl\n");
+
+	if (!psbfb) {
+		DRM_ERROR("psbfb is NULL\n");
+		return 0;
+	}
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+		struct fb_info *info = psbfb->fbdev;
+
+		if (suspend) {
+			fb_set_suspend(info, 1);
+			drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+	return 0;
+}
+
+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	int ret;
+
+	if (drm_psb_no_fb)
+		return 0;
+	console_trylock();
+	ret = psbfb_kms_off(dev, 0);
+	console_unlock();
+
+	return ret;
+}
+
+static int psbfb_kms_on(struct drm_device *dev, int resume)
+{
+	struct drm_framebuffer *fb = 0;
+	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+	DRM_DEBUG("psbfb_kms_on_ioctl\n");
+
+	if (!psbfb) {
+		DRM_ERROR("psbfb is NULL\n");
+		return 0;
+	}
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+		struct fb_info *info = psbfb->fbdev;
+
+		if (resume) {
+			fb_set_suspend(info, 0);
+			drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int ret;
+
+	if (drm_psb_no_fb)
+		return 0;
+	console_trylock();
+	ret = psbfb_kms_on(dev, 0);
+	console_unlock();
+	drm_helper_disable_unused_functions(dev);
+	return ret;
+}
+
+void psbfb_suspend(struct drm_device *dev)
+{
+	console_trylock();
+	psbfb_kms_off(dev, 1);
+	console_unlock();
+}
+
+void psbfb_resume(struct drm_device *dev)
+{
+	console_trylock();
+	psbfb_kms_on(dev, 1);
+	console_unlock();
+	drm_helper_disable_unused_functions(dev);
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	int page_num = 0;
+	int i;
+	unsigned long address = 0;
+	int ret;
+	unsigned long pfn;
+	struct psb_framebuffer *psbfb =
+	    (struct psb_framebuffer *)vma->vm_private_data;
+	struct drm_device *dev = psbfb->base.dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt *pg = dev_priv->pg;
+	unsigned long phys_addr = (unsigned long)pg->stolen_base;
+
+	page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+	address = (unsigned long)vmf->virtual_address;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	for (i = 0; i < page_num; i++) {
+		pfn = (phys_addr >> PAGE_SHIFT);
+		/*phys_to_pfn(phys_addr);*/
+
+		ret = vm_insert_mixed(vma, address, pfn);
+		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+			break;
+		else if (unlikely(ret != 0)) {
+			ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+			return ret;
+		}
+
+		address += PAGE_SIZE;
+		phys_addr += PAGE_SIZE;
+	}
+
+	return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+	DRM_DEBUG("vm_open\n");
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+	DRM_DEBUG("vm_close\n");
+}
+
+static struct vm_operations_struct psbfb_vm_ops = {
+	.fault = psbfb_vm_fault,
+	.open = psbfb_vm_open,
+	.close = psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct psb_framebuffer *psbfb = fbdev->pfb;
+	char *fb_screen_base = NULL;
+	struct drm_device *dev = psbfb->base.dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt *pg = dev_priv->pg;
+
+	if (vma->vm_pgoff != 0)
+		return -EINVAL;
+	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+		return -EINVAL;
+
+	if (!psbfb->addr_space)
+		psbfb->addr_space = vma->vm_file->f_mapping;
+
+	fb_screen_base = (char *)info->screen_base;
+
+	DRM_DEBUG("vm_pgoff 0x%lx, screen base %p vram_addr %p\n",
+		  vma->vm_pgoff, fb_screen_base, pg->vram_addr);
+
+	/*if using stolen memory, */
+	if (fb_screen_base == pg->vram_addr) {
+		vma->vm_ops = &psbfb_vm_ops;
+		vma->vm_private_data = (void *)psbfb;
+		vma->vm_flags |=
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+			VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+#else
+			VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+#endif
+	} else {
+		/*using IMG meminfo, can I use pvrmmap to map it? */
+
+	}
+
+	return 0;
+}
+
+/* Disable the obsolete fb blank callback function*/
+static int fb_blank_void(int blank_mode, struct fb_info *info)
+{
+	return 0;
+}
+
+#define FB_MIN_WIDTH            1280
+#define FBIO_PSB_SET_RGBX	_IOWR('F', 0x42, struct fb_var_screeninfo)
+#define FBIO_PSB_SET_RMODE      _IOWR('F', 0x43, struct fb_var_screeninfo)
+
+static int psb_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct psb_framebuffer *psbfb = fbdev->pfb;
+	struct drm_device *dev = psbfb->base.dev;
+	int ret = 0;
+
+	switch(cmd) {
+		case FBIO_PSB_SET_RGBX:
+			REG_WRITE(DSPBCNTR, REG_READ(DSPBCNTR)|0xb8000000);
+			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+			break;
+		case FBIO_PSB_SET_RMODE:
+			/* set recovery mode if below 1280 */
+			ret = psbfb_set_recovery_mode_hdmi(info, fbdev);
+			break;
+		default:
+			return -ENOTTY;
+	}
+
+	return ret;
+}
+
+static struct fb_ops psbfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_blank = fb_blank_void,
+	.fb_setcolreg = psbfb_setcolreg,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_mmap = psbfb_mmap,
+	.fb_ioctl = psb_fb_ioctl,
+#ifdef CONFIG_COMPAT
+	.fb_compat_ioctl = psb_fb_ioctl,
+#endif
+};
+
+static struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *r)
+{
+	struct psb_framebuffer *fb;
+	int ret;
+
+	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+	if (!fb)
+		return NULL;
+
+	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+
+	if (ret)
+		goto err;
+
+	drm_helper_mode_fill_fb_struct(&fb->base, r);
+
+	return &fb->base;
+
+ err:
+	kfree(fb);
+	return NULL;
+}
+
+static struct drm_framebuffer *psb_user_framebuffer_create(
+	struct drm_device *dev, struct drm_file *filp,
+	struct drm_mode_fb_cmd2 *r)
+{
+	struct psb_framebuffer *psbfb;
+	struct drm_framebuffer *fb;
+	struct fb_info *info;
+	struct drm_psb_private *dev_priv
+	    = (struct drm_psb_private *)dev->dev_private;
+	struct psb_fbdev *fbdev = dev_priv->fbdev;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint64_t size;
+	uint32_t page_offset;
+	uint32_t user_virtual_addr = (uint32_t) r->handles[0];
+	int ret;
+
+	size = r->height * r->pitches[0];
+	if (size < r->height * r->pitches[0])
+		return ERR_PTR(-ENOMEM);
+
+	/* JB: TODO not drop, refcount buffer */
+	/* return psb_framebuffer_create(dev, r, bo); */
+
+	fb = psb_framebuffer_create(dev, r);
+	if (!fb) {
+		DRM_ERROR("failed to allocate fb.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	psbfb = to_psb_fb(fb);
+	psbfb->size = size;
+	psbfb->hKernelMemInfo = 0;
+	psbfb->user_virtual_addr = user_virtual_addr;
+	psbfb->stolen_base = pg->stolen_base;
+	psbfb->vram_addr = pg->vram_addr;
+	psbfb->tt_pages =
+	    (pg->gatt_pages <
+	     PSB_TT_PRIV0_PLIMIT) ? pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
+
+	/* map GTT */
+	ret = psb_gtt_map_vaddr(dev, user_virtual_addr, size, 0, &page_offset);
+	if (ret) {
+		DRM_ERROR("Can not map cpu address (%x) to GTT handle\n",
+			  user_virtual_addr);
+		psbfb->offset = 0;
+	} else
+		psbfb->offset =  page_offset << PAGE_SHIFT;
+
+	info = framebuffer_alloc(0, &dev->pdev->dev);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	info->par = fbdev;
+	info->screen_base = pg->vram_addr;
+
+	strcpy(info->fix.id, "psbfb");
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &psbfb_ops;
+
+	info->fix.smem_start = dev->mode_config.fb_base;
+	info->fix.smem_len = size;
+
+	info->screen_size = size;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, fb->width,
+			       fb->height);
+
+	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+	info->pixmap.size = 64 * 1024;
+	info->pixmap.buf_align = 8;
+	info->pixmap.access_align = 32;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+	info->pixmap.scan_align = 1;
+
+	psbfb->fbdev = info;
+	fbdev->pfb = psbfb;
+
+	fbdev->psb_fb_helper.fb = fb;
+	fbdev->psb_fb_helper.fbdev = info;
+
+	DCChangeFrameBuffer(dev, psbfb);
+
+	return fb;
+}
+
+static int psbfb_set_recovery_mode_hdmi(struct fb_info *info, struct psb_fbdev *fbdev)
+{
+	struct drm_device *dev = fbdev->psb_fb_helper.dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	int ret = 0;
+	int i;
+	struct drm_fb_helper *psb_fb_helper = (struct drm_fb_helper *)fbdev;
+	struct drm_fb_helper_crtc *fb_crtc;
+	struct drm_display_mode *desired_mode = NULL;
+	struct drm_crtc *crtc;
+	struct fb_var_screeninfo var;
+
+	crtc = dev_priv->pipe_to_crtc_mapping[1];
+	for (i = 0; i < psb_fb_helper->crtc_count; i++) {
+		fb_crtc = &psb_fb_helper->crtc_info[i];
+		if (fb_crtc != NULL ) {
+			desired_mode = psb_fb_helper->crtc_info[i].desired_mode;
+			if (desired_mode) {
+				DRM_ERROR("crtc %d, width %d, heigth %d\n",
+					i, desired_mode->hdisplay, desired_mode->vdisplay);
+				ret = 0; /* prev loop iteration could have changed */
+				break;
+			} else {
+				DRM_ERROR("crtc %d, no desired mode\n", i);
+				ret = -ENODEV;
+			}
+		} else {
+			DRM_ERROR("crtc - no modes\n");
+			ret = -ENODEV;
+		}
+	}
+
+	if (ret != 0 )
+		return ret;
+
+	if (desired_mode && desired_mode->hdisplay >= FB_MIN_WIDTH)
+		return ret;
+
+	if (desired_mode ) {
+		memcpy(&var, &info->var, sizeof(struct fb_var_screeninfo));
+		var.xres = desired_mode->hdisplay;
+		var.yres = desired_mode->vdisplay;
+		fb_set_var(info, &var);
+		info->fix.line_length = ALIGN(var.xres * (var.bits_per_pixel / 8), 64);
+		info->screen_size = ALIGN(var.yres * info->fix.line_length, PAGE_SIZE);
+		info->fix.smem_len = info->screen_size;
+		REG_WRITE(DSPBSTRIDE, info->fix.line_length);
+		ret = 0;
+	}
+	else
+		ret = -ENOTTY;
+
+	return ret;
+}
+
+
+static int psbfb_create(struct psb_fbdev *fbdev,
+			struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = fbdev->psb_fb_helper.dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt *pg = dev_priv->pg;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct psb_framebuffer *psbfb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct device *device = &dev->pdev->dev;
+	int size;
+	int ret;
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_encoder *dsi_encoder =
+		MDFLD_DSI_ENCODER_WITH_DRM_ENABLE(dev_priv->encoder0);
+	struct mdfld_dsi_config *dsi_config =
+		mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_display_mode *fixed_mode;
+#endif
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (!dsi_config) {
+		DRM_ERROR("Failed to get encoder config\n");
+		return -EINVAL;
+	}
+
+	fixed_mode = dsi_config->fixed_mode;
+
+	/* PR2 panel must have 200 pixel dummy clocks,
+	 * So the display timing should be 800x1024, and surface
+	 * is 608x1024(64 bits align), or the information between android
+	 * and Linux frame buffer is not consistent.
+	 */
+
+	if (get_panel_type(dev, 0) == TMD_6X10_VID)
+		mode_cmd.width = fixed_mode->hdisplay - 200;
+	else
+		mode_cmd.width = fixed_mode->hdisplay;
+	mode_cmd.height = fixed_mode->vdisplay;
+#else
+	mode_cmd.width = PSB_DEFAULT_HDMI_FB_WIDTH;
+	mode_cmd.height = PSB_DEFAULT_HDMI_FB_HEIGHT;
+	DRM_INFO("sizes fb [%d, %d], surface [%d %d]\n",
+				sizes->fb_width, sizes->fb_height,
+				sizes->surface_width, sizes->surface_height);
+#endif
+
+	mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+
+	/* HW requires pitch to be 64 byte aligned. */
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], 64);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+		sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	size = ALIGN(size, PAGE_SIZE);
+
+	mutex_lock(&dev->struct_mutex);
+
+	fb = psb_framebuffer_create(dev, &mode_cmd);
+	if (!fb) {
+		DRM_ERROR("failed to allocate fb.\n");
+		ret = -ENOMEM;
+		goto out_err1;
+	}
+
+	psbfb = to_psb_fb(fb);
+	psbfb->size = size;
+	psbfb->depth = fb->depth;
+	psbfb->hKernelMemInfo = NULL;
+	psbfb->stolen_base = pg->stolen_base;
+	psbfb->vram_addr = pg->vram_addr;
+	psbfb->tt_pages =
+	    (pg->gatt_pages <
+	     PSB_TT_PRIV0_PLIMIT) ? pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
+
+	info = framebuffer_alloc(sizeof(struct psb_fbdev), device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out_err0;
+	}
+
+	info->par = fbdev;
+
+	psbfb->fbdev = info;
+
+	fbdev->psb_fb_helper.fb = fb;
+	fbdev->psb_fb_helper.fbdev = info;
+	fbdev->pfb = psbfb;
+
+	strcpy(info->fix.id, "psbfb");
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &psbfb_ops;
+	info->fix.smem_start = dev->mode_config.fb_base;
+	info->fix.smem_len = size;
+	info->screen_base = (char *)pg->vram_addr;
+	info->screen_size = size;
+	//memset(info->screen_base, 0xf0, size);
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (get_panel_type(dev, 0) == TMD_6X10_VID)
+		drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+				       fixed_mode->hdisplay - 200, fixed_mode->vdisplay);
+	else
+		drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+				       fixed_mode->hdisplay, fixed_mode->vdisplay);
+#else
+		drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+				       mode_cmd.width, mode_cmd.height);
+#endif
+
+	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+#if 1
+	info->pixmap.size = 64 * 1024;
+	info->pixmap.buf_align = 8;
+	info->pixmap.access_align = 32;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+	info->pixmap.scan_align = 1;
+#else
+	info->pixmap.size = 4096;
+	info->pixmap.buf_align = 4;
+	info->pixmap.scan_align = 1;
+	info->pixmap.access_align = 32;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+#endif
+
+	DRM_DEBUG("fb depth is %d\n", fb->depth);
+	DRM_DEBUG("   pitch is %d\n", fb->pitches[0]);
+
+	printk(KERN_INFO "allocated %dx%d fb\n", psbfb->base.width,
+	       psbfb->base.height);
+#if 0
+	/*power on hdmi*/
+	hdmi_power_on(dev);
+#endif
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+ out_err0:
+	fb->funcs->destroy(fb);
+ out_err1:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue,
+			    int regno)
+{
+	DRM_DEBUG("%s\n", __func__);
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			    u16 *blue, int regno)
+{
+	DRM_DEBUG("%s\n", __func__);
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+		       struct drm_fb_helper_surface_size *sizes)
+{
+	struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	PSB_DEBUG_ENTRY("\n");
+#if 1				/* FIXME MRFLD */
+	if (!helper) {
+		DRM_INFO(" helper == NULL.\n");
+		return 1;
+	}
+#endif				/* FIXME MRFLD */
+	if (!helper->fb) {
+		ret = psbfb_create(psb_fbdev, sizes);
+		if (ret)
+			return ret;
+
+		new_fb = 1;
+	}
+
+	return new_fb;
+}
+
+struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+	.gamma_set = psbfb_gamma_set,
+	.gamma_get = psbfb_gamma_get,
+	.fb_probe = psbfb_probe,
+};
+
+int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+	struct fb_info *info;
+	struct psb_framebuffer *psbfb = fbdev->pfb;
+
+	if (fbdev->psb_fb_helper.fbdev) {
+		info = fbdev->psb_fb_helper.fbdev;
+		unregister_framebuffer(info);
+		memset(info->screen_base, 0x0, info->screen_size);
+		iounmap(info->screen_base);
+		framebuffer_release(info);
+	}
+
+	drm_fb_helper_fini(&fbdev->psb_fb_helper);
+
+	drm_framebuffer_cleanup(&psbfb->base);
+
+	return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+	struct psb_fbdev *fbdev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	int num_crtc;
+
+	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+	if (!fbdev) {
+		DRM_ERROR("no memory\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->fbdev = fbdev;
+	fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+	num_crtc = dev_priv->num_pipe;
+
+	drm_fb_helper_init(dev, &fbdev->psb_fb_helper, num_crtc,
+			   INTELFB_CONN_LIMIT);
+
+	drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+	drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+	drm_fb_helper_set_par(fbdev->psb_fb_helper.fbdev);
+	return 0;
+}
+
+void psb_fbdev_fini(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	if (!dev_priv->fbdev)
+		return;
+
+	psb_fbdev_destroy(dev, dev_priv->fbdev);
+	kfree(dev_priv->fbdev);
+	dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+	if (fbdev)
+		drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+	struct fb_info *info;
+	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+	if (drm_psb_no_fb)
+		return 0;
+
+	info = psbfb->fbdev;
+
+	DCChangeFrameBuffer(dev, psbfb);
+
+	if (info)
+		framebuffer_release(info);
+
+	return 0;
+}
+
+/*EXPORT_SYMBOL(psbfb_remove); */
+
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+					      struct drm_file *file_priv,
+					      unsigned int *handle)
+{
+	/* JB: TODO currently we can't go from a bo to a handle with ttm */
+	(void)file_priv;
+	*handle = 0;
+	return 0;
+}
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+	/*ummap gtt pages */
+	psb_gtt_unmap_vaddr(dev, psbfb->user_virtual_addr, psbfb->size);
+	if (psbfb->fbdev)
+		psbfb_remove(dev, fb);
+
+	/* JB: TODO not drop, refcount buffer */
+	drm_framebuffer_cleanup(fb);
+
+	kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+	.fb_create = psb_user_framebuffer_create,
+	.output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv
+	    = (struct drm_psb_private *)dev->dev_private;
+	struct drm_property *backlight;
+
+	if (dev_priv->backlight_property)
+		return 0;
+
+	backlight = drm_property_create(dev,
+					DRM_MODE_PROP_RANGE, "backlight", 2);
+	if (!backlight) {
+		DRM_ERROR("backlight is NULL\n");
+		return 0;
+	}
+	backlight->values[0] = 0;
+	backlight->values[1] = 255;
+
+	dev_priv->backlight_property = backlight;
+
+	return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	drm_mode_create_scaling_mode_property(dev);
+
+	psb_create_backlight_property(dev);
+
+	mdfld_output_init(dev);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct psb_intel_output *psb_intel_output =
+		    to_psb_intel_output(connector);
+		struct drm_encoder *encoder = &psb_intel_output->enc;
+		int crtc_mask = 0, clone_mask = 0;
+
+		/* valid crtcs */
+		switch (psb_intel_output->type) {
+		case INTEL_OUTPUT_SDVO:
+			crtc_mask = ((1 << 0) | (1 << 1));
+			clone_mask = (1 << INTEL_OUTPUT_SDVO);
+			break;
+		case INTEL_OUTPUT_LVDS:
+			PSB_DEBUG_ENTRY("LVDS.\n");
+			crtc_mask = (1 << 1);
+
+			clone_mask = (1 << INTEL_OUTPUT_LVDS);
+			break;
+		case INTEL_OUTPUT_MIPI:
+			PSB_DEBUG_ENTRY("MIPI.\n");
+			crtc_mask = (1 << 0);
+			clone_mask = (1 << INTEL_OUTPUT_MIPI);
+			break;
+		case INTEL_OUTPUT_MIPI2:
+			PSB_DEBUG_ENTRY("MIPI2.\n");
+			crtc_mask = (1 << 2);
+			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+			break;
+		case INTEL_OUTPUT_HDMI:
+			PSB_DEBUG_ENTRY("HDMI.\n");
+			crtc_mask = (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_HDMI);
+			break;
+		}
+
+		encoder->possible_crtcs = crtc_mask;
+		encoder->possible_clones =
+		    psb_intel_connector_clones(dev, clone_mask);
+
+	}
+}
+
+static void *psb_bo_from_handle(struct drm_device *dev,
+				struct drm_file *file_priv, unsigned int handle)
+{
+	struct drm_psb_private *dev_priv
+	    = (struct drm_psb_private *)dev->dev_private;
+	struct psb_fbdev *fbdev = dev_priv->fbdev;
+
+	return (void *)fbdev->pfb;
+}
+
+static size_t psb_bo_size(struct drm_device *dev, void *bof)
+{
+	struct psb_framebuffer *psbfb = (struct psb_framebuffer *)bof;
+
+	return (size_t) psbfb->size;
+}
+
+static size_t psb_bo_offset(struct drm_device *dev, void *bof)
+{
+	struct psb_framebuffer *psbfb = (struct psb_framebuffer *)bof;
+
+	return (size_t) psbfb->offset;
+}
+
+static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
+{
+	return 0;
+}
+
+static int psb_bo_unpin_for_scanout(struct drm_device *dev, void *bo)
+{
+	return 0;
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+	/* Init mm functions */
+	mode_dev->bo_from_handle = psb_bo_from_handle;
+	mode_dev->bo_size = psb_bo_size;
+	mode_dev->bo_offset = psb_bo_offset;
+	mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
+	mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.funcs = (void *)&psb_mode_funcs;
+
+	/* set memory base */
+	/* MRST and PSB should use BAR 2 */
+	pci_read_config_dword(dev->pdev, PSB_BSM,
+			      (uint32_t *) & (dev->mode_config.fb_base));
+
+	for (i = 0; i < dev_priv->num_pipe; i++)
+		psb_intel_crtc_init(dev, i, mode_dev);
+
+	dev->mode_config.max_width =
+	    dev->mode_config.num_crtc * MDFLD_PLANE_MAX_WIDTH;
+	dev->mode_config.max_height =
+	    dev->mode_config.num_crtc * MDFLD_PLANE_MAX_HEIGHT;
+
+	psb_setup_outputs(dev);
+
+	/* setup fbs */
+	/* drm_initial_config(dev); */
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+	mutex_lock(&dev->struct_mutex);
+
+	drm_kms_helper_poll_fini(dev);
+	psb_fbdev_fini(dev);
+
+	drm_mode_config_cleanup(dev);
+
+	mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_fb.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_fb.h
new file mode 100644
index 0000000..145094e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_fb.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *      Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _PSB_FB_H_
+#define _PSB_FB_H_
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#include <drm/drm_fb_helper.h>
+#endif
+
+#include "psb_drv.h"
+
+#include "displayclass_interface.h"
+
+extern struct psb_framebuffer *psbfb;
+int MRSTLFBHandleChangeFB(struct drm_device *dev,
+			  struct psb_framebuffer *psbfb);
+
+struct psb_fbdev {
+	struct drm_fb_helper psb_fb_helper;
+	struct psb_framebuffer *pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_gtt.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_gtt.c
new file mode 100644
index 0000000..9e8c2d5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_gtt.c
@@ -0,0 +1,1588 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "pvrsrv_interface.h"
+#include "display_callbacks.h"
+
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+	uint32_t mask = PSB_PTE_VALID;
+
+	if (type & PSB_MMU_CACHED_MEMORY)
+		mask |= PSB_PTE_CACHED;
+	if (type & PSB_MMU_RO_MEMORY)
+		mask |= PSB_PTE_RO;
+	if (type & PSB_MMU_WO_MEMORY)
+		mask |= PSB_PTE_WO;
+
+	return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
+{
+	struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+
+	if (!tmp)
+		return NULL;
+
+	init_rwsem(&tmp->sem);
+	tmp->dev = dev;
+
+	return tmp;
+}
+
+void mrfld_gtt_takedown(struct psb_gtt *pg, int free)
+{
+	if (!pg)
+		return;
+
+	if (pg->gtt_map) {
+		iounmap(pg->gtt_map);
+		pg->gtt_map = NULL;
+	}
+	if (free)
+		kfree(pg);
+}
+
+int mrfld_gtt_init(struct psb_gtt *pg, int resume)
+{
+	struct drm_device *dev = pg->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned gtt_pages;
+	unsigned long stolen_size, vram_stolen_size;
+	unsigned i, num_pages;
+	unsigned pfn_base;
+	uint32_t vram_pages;
+	uint32_t tt_pages;
+	uint32_t *ttm_gtt_map;
+
+	int ret = 0;
+	uint32_t pte;
+
+	pg->initialized = 1;
+
+	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+	/* fix me: video mmu has hw bug to access 0x0D0000000,
+	 * then make gatt start at 0x0e000,0000 */
+	pg->mmu_gatt_start = PSB_MEM_TT_START;
+	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+	    >> PAGE_SHIFT;
+
+	pci_read_config_dword(dev->pdev, MRFLD_BGSM, &pg->pge_ctl);
+	pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
+
+	pci_read_config_dword(dev->pdev, MRFLD_MSAC, &gtt_pages);
+	printk(KERN_INFO "01 gtt_pages = 0x%x \n", gtt_pages);
+	gtt_pages &= _APERTURE_SIZE_MASK;
+	gtt_pages >>= _APERTURE_SIZE_POS;
+
+	printk(KERN_INFO "02 gtt_pages = 0x%x \n", gtt_pages);
+	switch (gtt_pages) {
+	case _1G_APERTURE:
+		gtt_pages = _1G_APERTURE_SIZE >> PAGE_SHIFT;
+		break;
+	case _512M_APERTURE:
+		gtt_pages = _512M_APERTURE_SIZE >> PAGE_SHIFT;
+		break;
+	case _256M_APERTURE:
+		gtt_pages = _256M_APERTURE_SIZE >> PAGE_SHIFT;
+		break;
+	default:
+		DRM_ERROR("%s, invalded aperture size.\n", __func__);
+		gtt_pages = _1G_APERTURE_SIZE >> PAGE_SHIFT;
+	}
+
+	gtt_pages >>= PAGE_SHIFT;
+	gtt_pages *= 4;
+
+	printk(KERN_INFO "03 gtt_pages = 0x%x \n", gtt_pages);
+	/* HW removed the PSB_BSM, SW/FW needs it. */
+	pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
+	vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
+
+	/* Don't add CI & RAR share buffer space
+	 * managed by TTM to stolen_size */
+	stolen_size = vram_stolen_size;
+
+	printk(KERN_INFO "GMMADR(region 0) start: 0x%08x (%dM).\n",
+	       pg->gatt_start, pg->gatt_pages / 256);
+	printk(KERN_INFO "GTT (can map %dM RAM), and actual RAM base 0x%08x.\n",
+	       gtt_pages * 4, pg->gtt_phys_start);
+	printk(KERN_INFO "Stole memory information \n");
+	printk(KERN_INFO "      base in RAM: 0x%x \n", pg->stolen_base);
+	printk(KERN_INFO
+	       "      size: %luK, calculated by (GTT RAM base) - (Stolen base).\n",
+	       vram_stolen_size / 1024);
+
+	if (resume && (gtt_pages != pg->gtt_pages) &&
+	    (stolen_size != pg->stolen_size)) {
+		DRM_ERROR("GTT resume error.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	pg->gtt_pages = gtt_pages;
+	pg->stolen_size = stolen_size;
+	pg->vram_stolen_size = vram_stolen_size;
+	pg->gtt_map =
+	    ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
+	if (!pg->gtt_map) {
+		DRM_ERROR("Failure to map gtt.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
+	if (!pg->vram_addr) {
+		DRM_ERROR("Failure to map stolen base.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	DRM_INFO("%s: vram kernel virtual address %p\n", __FUNCTION__,
+		 pg->vram_addr);
+
+	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+	    (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	ttm_gtt_map = pg->gtt_map + tt_pages / 2;
+
+	/*
+	 * insert vram stolen pages.
+	 */
+
+	pfn_base = pg->stolen_base >> PAGE_SHIFT;
+	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+	printk(KERN_INFO
+	       "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+	       num_pages, pfn_base, 0);
+	for (i = 0; i < num_pages; ++i) {
+		pte = psb_gtt_mask_pte(pfn_base + i, 0);
+		iowrite32(pte, pg->gtt_map + i);
+	}
+
+	/*
+	 * Init rest of gtt managed by IMG.
+	 */
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	for (; i < tt_pages / 2 - 1; ++i)
+		iowrite32(pte, pg->gtt_map + i);
+
+	/*
+	 * Init rest of gtt managed by TTM.
+	 */
+
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	printk(KERN_INFO
+	       "Initializing the rest of a total "
+	       "of %d gtt pages.\n", pg->gatt_pages);
+
+	for (i = 0; i < pg->gatt_pages - tt_pages / 2; ++i)
+		iowrite32(pte, ttm_gtt_map + i);
+	(void)ioread32(pg->gtt_map + i - 1);
+
+	return 0;
+
+ out_err:
+	mrfld_gtt_takedown(pg, 0);
+	return ret;
+}
+
+void psb_gtt_takedown(struct psb_gtt *pg, int free)
+{
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+
+	if (!pg)
+		return;
+
+	dev = pg->dev;
+	dev_priv = dev->dev_private;
+
+	if (pg->gtt_map) {
+		iounmap(pg->gtt_map);
+		pg->gtt_map = NULL;
+	}
+	if (pg->initialized) {
+		pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
+				      pg->gmch_ctrl);
+		PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
+		(void)PSB_RVDC32(PSB_PGETBL_CTL);
+	}
+	if (free)
+		kfree(pg);
+}
+
+int psb_gtt_init(struct psb_gtt *pg, int resume)
+{
+	struct drm_device *dev = pg->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned gtt_pages;
+	unsigned long stolen_size, vram_stolen_size;
+	unsigned i, num_pages;
+	unsigned pfn_base;
+	uint32_t vram_pages;
+	uint32_t tt_pages;
+	uint32_t *ttm_gtt_map;
+	uint32_t dvmt_mode = 0;
+
+	int ret = 0;
+	uint32_t pte;
+
+	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
+	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+			      pg->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+	pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+	PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+	(void)PSB_RVDC32(PSB_PGETBL_CTL);
+
+	pg->initialized = 1;
+
+	pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
+
+	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+	/* fix me: video mmu has hw bug to access 0x0D0000000,
+	 * then make gatt start at 0x0e000,0000 */
+	pg->mmu_gatt_start = PSB_MEM_TT_START;
+	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
+	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+	    >> PAGE_SHIFT;
+
+	pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
+	vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
+
+	/* Don't add CI & RAR share buffer space
+	 * managed by TTM to stolen_size */
+	stolen_size = vram_stolen_size;
+
+
+	printk(KERN_INFO "GMMADR(region 0) start: 0x%08x (%dM).\n",
+	       pg->gatt_start, pg->gatt_pages / 256);
+	printk(KERN_INFO
+	       "GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
+	       pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
+	printk(KERN_INFO "Stole memory information \n");
+	printk(KERN_INFO "      base in RAM: 0x%x \n", pg->stolen_base);
+	printk(KERN_INFO
+	       "      size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+	       vram_stolen_size / 1024);
+	dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
+	printk(KERN_INFO
+	       "      the correct size should be: %dM(dvmt mode=%d) \n",
+	       (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+
+	if (resume && (gtt_pages != pg->gtt_pages) &&
+	    (stolen_size != pg->stolen_size)) {
+		DRM_ERROR("GTT resume error.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	pg->gtt_pages = gtt_pages;
+	pg->stolen_size = stolen_size;
+	pg->vram_stolen_size = vram_stolen_size;
+	pg->gtt_map =
+	    ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
+	if (!pg->gtt_map) {
+		DRM_ERROR("Failure to map gtt.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
+	if (!pg->vram_addr) {
+		DRM_ERROR("Failure to map stolen base.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	DRM_INFO("%s: vram kernel virtual address %p\n", __FUNCTION__,
+		 pg->vram_addr);
+
+	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+	    (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	ttm_gtt_map = pg->gtt_map + tt_pages / 2;
+
+	/*
+	 * insert vram stolen pages.
+	 */
+
+	pfn_base = pg->stolen_base >> PAGE_SHIFT;
+	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+	printk(KERN_INFO
+	       "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+	       num_pages, pfn_base, 0);
+	for (i = 0; i < num_pages; ++i) {
+		pte = psb_gtt_mask_pte(pfn_base + i, 0);
+		iowrite32(pte, pg->gtt_map + i);
+	}
+
+	/*
+	 * Init rest of gtt managed by IMG.
+	 */
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	for (; i < tt_pages / 2 - 1; ++i)
+		iowrite32(pte, pg->gtt_map + i);
+
+	/*
+	 * Init rest of gtt managed by TTM.
+	 */
+
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	PSB_DEBUG_INIT("Initializing the rest of a total "
+		       "of %d gtt pages.\n", pg->gatt_pages);
+
+	for (i = 0; i < pg->gatt_pages - tt_pages / 2; ++i)
+		iowrite32(pte, ttm_gtt_map + i);
+	(void)ioread32(pg->gtt_map + i - 1);
+
+	return 0;
+
+ out_err:
+	psb_gtt_takedown(pg, 0);
+	return ret;
+}
+
+/*********************************
+ *  Added  (imported from 'mrst')  because memory allocated by PVRSRV
+ *      won't succeed using 'get_user_pages' in PVRSRVGetMeminfoPages
+ *      So, we will follow pfn's obtained by PVRSRVGetMeminfoPfn.
+ *
+ *  williamx.f.schmidt@intel.com
+ */
+static int psb_gtt_insert_pfn_list(struct psb_gtt *pg, unsigned long * pfn_list,
+				   unsigned offset_pages, unsigned num_pages,
+				   unsigned desired_tile_stride,
+				   unsigned hw_tile_stride, int type)
+{
+	unsigned rows = 1;
+	unsigned add;
+	unsigned row_add;
+	unsigned i;
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	uint32_t pte;
+
+	if (!pg || !pfn_list)
+		return -EINVAL;
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride;
+	row_add = hw_tile_stride;
+
+	down_read(&pg->sem);
+	for (i = 0; i < rows; ++i) {
+		cur_page = pg->gtt_map + offset_pages;
+		for (j = 0; j < desired_tile_stride; ++j) {
+			pte = psb_gtt_mask_pte(*pfn_list++, type);
+			iowrite32(pte, cur_page++);
+		}
+		offset_pages += add;
+	}
+	(void)ioread32(cur_page - 1);
+	up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
+			 unsigned offset_pages, unsigned num_pages,
+			 unsigned desired_tile_stride,
+			 unsigned hw_tile_stride, int type)
+{
+	unsigned rows = 1;
+	unsigned add;
+	unsigned row_add;
+	unsigned i;
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	uint32_t pte;
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride;
+	row_add = hw_tile_stride;
+
+	down_read(&pg->sem);
+	for (i = 0; i < rows; ++i) {
+		cur_page = pg->gtt_map + offset_pages;
+		for (j = 0; j < desired_tile_stride; ++j) {
+			pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
+			iowrite32(pte, cur_page++);
+		}
+		offset_pages += add;
+	}
+	(void)ioread32(cur_page - 1);
+	up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_insert_phys_addresses(struct psb_gtt *pg,
+		IMG_SYS_PHYADDR *pPhysFrames,
+		unsigned offset_pages, unsigned num_pages,
+		int type)
+{
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	uint32_t pte;
+
+	//printk("Allocatng IMG GTT mem at %x (pages %d)\n",offset_pages,num_pages);
+	down_read(&pg->sem);
+
+	cur_page = pg->gtt_map + offset_pages;
+	for (j = 0; j < num_pages; ++j) {
+		pte = psb_gtt_mask_pte((u32)pPhysFrames[j].uiAddr >> PAGE_SHIFT,
+				type);
+		iowrite32(pte, cur_page++);
+		//printk("PTE %d: %x/%x\n",j,(pPhysFrames-1)->uiAddr,pte);
+	}
+	(void)ioread32(cur_page - 1);
+
+	up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
+			 unsigned num_pages, unsigned desired_tile_stride,
+			 unsigned hw_tile_stride, int rc_prot)
+{
+	struct drm_psb_private *dev_priv = pg->dev->dev_private;
+	unsigned rows = 1;
+	unsigned add;
+	unsigned row_add;
+	unsigned i;
+	unsigned j;
+	uint32_t *cur_page = NULL;
+	unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
+	uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride;
+	row_add = hw_tile_stride;
+
+	if (rc_prot)
+		down_read(&pg->sem);
+	for (i = 0; i < rows; ++i) {
+		cur_page = pg->gtt_map + offset_pages;
+		for (j = 0; j < desired_tile_stride; ++j)
+			iowrite32(pte, cur_page++);
+
+		offset_pages += add;
+	}
+	(void)ioread32(cur_page - 1);
+	if (rc_prot)
+		up_read(&pg->sem);
+
+	return 0;
+}
+
+int psb_gtt_mm_init(struct psb_gtt *pg)
+{
+	struct psb_gtt_mm *gtt_mm;
+	struct drm_psb_private *dev_priv = pg->dev->dev_private;
+	struct drm_open_hash *ht;
+	struct drm_mm *mm;
+	int ret;
+	uint32_t tt_start;
+	uint32_t tt_size;
+
+	if (!pg || !pg->initialized) {
+		DRM_DEBUG("Invalid gtt struct\n");
+		return -EINVAL;
+	}
+
+	gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
+	if (!gtt_mm)
+		return -ENOMEM;
+
+	spin_lock_init(&gtt_mm->lock);
+
+	ht = &gtt_mm->hash;
+	ret = drm_ht_create(ht, 20);
+	if (ret) {
+		DRM_DEBUG("Create hash table failed(%d)\n", ret);
+		goto err_free;
+	}
+
+	tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
+	tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+	    (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+	mm = &gtt_mm->base;
+
+	if (IS_MOFD(pg->dev)) {
+		pg->reserved_gtt_start = tt_start << PAGE_SHIFT;
+		/*reserve 1M for HW W/A*/ /* reserved region doesn't include TTM heap */
+		tt_start += MOFD_HW_WA_GTT_PAGES;
+	}
+
+	tt_size /= 2;
+
+	drm_mm_init(mm, tt_start, (tt_size - tt_start));
+
+	gtt_mm->count = 0;
+
+	dev_priv->gtt_mm = gtt_mm;
+
+	DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
+		 (unsigned long)tt_start,
+		 (unsigned long)(tt_size - tt_start));
+	return 0;
+
+ err_free:
+	kfree(gtt_mm);
+	return ret;
+}
+
+/**
+ * Delete all hash entries;
+ */
+void psb_gtt_mm_takedown(void)
+{
+	return;
+}
+
+static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
+					   u32 tgid,
+					   struct psb_gtt_hash_entry **hentry)
+{
+	struct drm_hash_item *entry;
+	struct psb_gtt_hash_entry *psb_entry;
+	int ret;
+
+	ret = drm_ht_find_item(&mm->hash, tgid, &entry);
+	if (ret) {
+		DRM_DEBUG("Cannot find entry pid=%d\n", tgid);
+		return ret;
+	}
+
+	psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
+	if (!psb_entry) {
+		DRM_DEBUG("Invalid entry");
+		return -EINVAL;
+	}
+
+	*hentry = psb_entry;
+	return 0;
+}
+
+static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
+				       u32 tgid,
+				       struct psb_gtt_hash_entry *hentry)
+{
+	struct drm_hash_item *item;
+	int ret;
+
+	if (!hentry) {
+		DRM_DEBUG("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	item = &hentry->item;
+	item->key = tgid;
+
+	/**
+	 * NOTE: drm_ht_insert_item will perform such a check
+	ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
+	if (!ret) {
+		DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
+		return -EAGAIN;
+	}
+	*/
+
+	/*Insert the given entry */
+	ret = drm_ht_insert_item(&mm->hash, item);
+	if (ret) {
+		DRM_DEBUG("Insert failure\n");
+		return ret;
+	}
+
+	mm->count++;
+
+	return 0;
+}
+
+static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
+				      u32 tgid,
+				      struct psb_gtt_hash_entry **entry)
+{
+	struct psb_gtt_hash_entry *hentry;
+	int ret;
+
+	/*if the hentry for this tgid exists, just get it and return */
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
+	if (!ret) {
+		DRM_DEBUG("Entry for tgid %d exist, hentry %p\n",
+			  tgid, hentry);
+		*entry = hentry;
+		spin_unlock(&mm->lock);
+		return 0;
+	}
+	spin_unlock(&mm->lock);
+
+	DRM_DEBUG("Entry for tgid %d doesn't exist, will create it\n", tgid);
+
+	hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
+	if (!hentry) {
+		DRM_DEBUG("Kmalloc failled\n");
+		return -ENOMEM;
+	}
+
+	ret = drm_ht_create(&hentry->ht, 20);
+	if (ret) {
+		DRM_DEBUG("Create hash table failed\n");
+		return ret;
+	}
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
+	spin_unlock(&mm->lock);
+
+	if (!ret)
+		*entry = hentry;
+
+	return ret;
+}
+
+static struct psb_gtt_hash_entry *psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm
+							      *mm, u32 tgid)
+{
+	struct psb_gtt_hash_entry *tmp;
+	int ret;
+
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
+	if (ret) {
+		DRM_DEBUG("Cannot find entry pid %d\n", tgid);
+		return NULL;
+	}
+
+	/*remove it from ht */
+	drm_ht_remove_item(&mm->hash, &tmp->item);
+
+	mm->count--;
+
+	return tmp;
+}
+
+static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
+{
+	struct psb_gtt_hash_entry *entry;
+
+	entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
+
+	if (!entry) {
+		DRM_DEBUG("Invalid entry");
+		return -EINVAL;
+	}
+
+	/*delete ht */
+	drm_ht_remove(&entry->ht);
+
+	/*free this entry */
+	kfree(entry);
+	return 0;
+}
+
+static int
+psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
+				  u32 key, struct psb_gtt_mem_mapping **hentry)
+{
+	struct drm_hash_item *entry;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	ret = drm_ht_find_item(ht, key, &entry);
+	if (ret) {
+		DRM_DEBUG("Cannot find key %d\n", key);
+		return ret;
+	}
+
+	mapping = container_of(entry, struct psb_gtt_mem_mapping, item);
+	if (!mapping) {
+		DRM_DEBUG("Invalid entry\n");
+		return -EINVAL;
+	}
+
+	*hentry = mapping;
+	return 0;
+}
+
+static int
+psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
+				     u32 key,
+				     struct psb_gtt_mem_mapping *hentry)
+{
+	struct drm_hash_item *item;
+	struct psb_gtt_hash_entry *entry;
+	int ret;
+
+	if (!hentry || !ht) {
+		DRM_DEBUG("parameter error\n");
+		return -EINVAL;
+	}
+
+	item = &hentry->item;
+	item->key = key;
+
+	ret = drm_ht_insert_item(ht, item);
+	if (ret) {
+		DRM_DEBUG("insert_item failed\n");
+		return ret;
+	}
+
+	entry = container_of(ht, struct psb_gtt_hash_entry, ht);
+	if (entry)
+		entry->count++;
+
+	return 0;
+}
+
+static int
+psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
+				    struct drm_open_hash *ht,
+				    u32 key,
+				    struct drm_mm_node *node,
+				    struct psb_gtt_mem_mapping **entry)
+{
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	if (!node || !ht) {
+		DRM_DEBUG("parameter error\n");
+		return -EINVAL;
+	}
+
+	/*try to get this mem_map */
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
+	if (!ret) {
+		DRM_DEBUG("mapping entry for key %d exists, entry %p\n",
+			  key, mapping);
+		*entry = mapping;
+		spin_unlock(&mm->lock);
+		return 0;
+	}
+	spin_unlock(&mm->lock);
+
+	DRM_DEBUG("Mapping entry for key %d doesn't exist, will create it\n",
+		  key);
+
+	mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
+	if (!mapping) {
+		DRM_DEBUG("kmalloc failed\n");
+		return -ENOMEM;
+	}
+
+	mapping->node = node;
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
+	spin_unlock(&mm->lock);
+
+	if (!ret)
+		*entry = mapping;
+
+	return ret;
+}
+
+static struct psb_gtt_mem_mapping *psb_gtt_mm_remove_mem_mapping_locked(struct
+									drm_open_hash
+									*ht,
+									u32 key)
+{
+	struct psb_gtt_mem_mapping *tmp;
+	struct psb_gtt_hash_entry *entry;
+	int ret;
+
+	if (!ht) {
+		DRM_DEBUG("ht is NULL\n");
+		return NULL;
+	}
+
+	ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
+	if (ret) {
+		DRM_DEBUG("Cannot find key %d\n", key);
+		return NULL;
+	}
+
+	drm_ht_remove_item(ht, &tmp->item);
+
+	entry = container_of(ht, struct psb_gtt_hash_entry, ht);
+	if (entry)
+		entry->count--;
+
+	return tmp;
+}
+
+static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
+						     u32 key,
+						     struct drm_mm_node **node)
+{
+	struct psb_gtt_mem_mapping *entry;
+
+	entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
+	if (!entry) {
+		DRM_DEBUG("entry is NULL\n");
+		return -EINVAL;
+	}
+
+	*node = entry->node;
+
+	kfree(entry);
+	return 0;
+}
+
+static int psb_gtt_add_node(struct psb_gtt_mm *mm,
+			    u32 tgid,
+			    u32 key,
+			    struct drm_mm_node *node,
+			    struct psb_gtt_mem_mapping **entry)
+{
+	struct psb_gtt_hash_entry *hentry;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
+	if (ret) {
+		DRM_DEBUG("alloc_insert failed\n");
+		return ret;
+	}
+
+	ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
+						  &hentry->ht,
+						  key, node, &mapping);
+	if (ret) {
+		DRM_DEBUG("mapping alloc_insert failed\n");
+		return ret;
+	}
+
+	*entry = mapping;
+
+	return 0;
+}
+
+static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
+			       u32 tgid, u32 key, struct drm_mm_node **node)
+{
+	struct psb_gtt_hash_entry *hentry;
+	struct drm_mm_node *tmp;
+	int ret;
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
+	if (ret) {
+		DRM_DEBUG("Cannot find entry for pid %d\n", tgid);
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+	spin_unlock(&mm->lock);
+
+	/*remove mapping entry */
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht, key, &tmp);
+	if (ret) {
+		DRM_DEBUG("remove_free failed\n");
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+
+	*node = tmp;
+
+	/*check the count of mapping entry */
+	if (!hentry->count) {
+		DRM_DEBUG("count of mapping entry is zero, tgid=%d\n", tgid);
+		psb_gtt_mm_remove_free_ht_locked(mm, tgid);
+	}
+
+	spin_unlock(&mm->lock);
+
+	return 0;
+}
+
+static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
+				uint32_t pages,
+				uint32_t align, struct drm_mm_node **node)
+{
+	struct drm_mm_node *tmp_node;
+	int ret;
+
+	if (IS_ANN(dev)) {
+		if (align < 32)
+			align = 32;
+	}
+
+	do {
+		ret = drm_mm_pre_get(&mm->base);
+		if (unlikely(ret)) {
+			DRM_DEBUG("drm_mm_pre_get error\n");
+			return ret;
+		}
+
+		spin_lock(&mm->lock);
+		tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
+		if (unlikely(!tmp_node)) {
+			DRM_DEBUG("No free node found\n");
+			spin_unlock(&mm->lock);
+			break;
+		}
+
+		tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
+		spin_unlock(&mm->lock);
+	} while (!tmp_node);
+
+	if (!tmp_node) {
+		DRM_DEBUG("Node allocation failed\n");
+		return -ENOMEM;
+	}
+
+	*node = tmp_node;
+	return 0;
+}
+
+static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
+{
+	spin_lock(&mm->lock);
+	drm_mm_put_block(node);
+	spin_unlock(&mm->lock);
+}
+
+static u32 gtt_get_tgid(void)
+{
+	if (in_interrupt()) {
+		return -1;
+	}
+
+	return task_tgid_nr(current);
+}
+
+static int
+psb_gtt_mm_get_mem_mapping_anyused_locked(struct drm_open_hash *ht,
+				  struct psb_gtt_mem_mapping **hentry)
+{
+	struct drm_hash_item *entry;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	ret = drm_ht_find_item_anyused(ht, &entry);
+	if (ret) {
+		DRM_DEBUG("Cannot find\n");
+		return ret;
+	}
+
+	mapping =  container_of(entry, struct psb_gtt_mem_mapping, item);
+	if (!mapping) {
+		DRM_DEBUG("Invalid entry\n");
+		return -EINVAL;
+	}
+
+	*hentry = mapping;
+	return 0;
+}
+
+static struct psb_gtt_mem_mapping *
+psb_gtt_mm_remove_mem_mapping_anyused_locked(struct drm_open_hash *ht) {
+	struct psb_gtt_mem_mapping *tmp;
+	struct psb_gtt_hash_entry *entry;
+	int ret;
+
+	if (!ht) {
+		DRM_DEBUG("hash table is NULL\n");
+		return NULL;
+	}
+
+	ret = psb_gtt_mm_get_mem_mapping_anyused_locked(ht, &tmp);
+	if (ret) {
+		DRM_DEBUG("Cannot find any used\n");
+		return NULL;
+	}
+
+	drm_ht_remove_item(ht, &tmp->item);
+
+	entry = container_of(ht, struct psb_gtt_hash_entry, ht);
+	if (entry)
+		entry->count--;
+
+	return tmp;
+}
+
+static int psb_gtt_mm_remove_free_mem_mapping_anyused_locked
+	(struct drm_open_hash *ht,
+	struct drm_mm_node **node)
+{
+	struct psb_gtt_mem_mapping *entry;
+
+	entry = psb_gtt_mm_remove_mem_mapping_anyused_locked(ht);
+	if (!entry) {
+		DRM_DEBUG("entry is NULL\n");
+		return -EINVAL;
+	}
+
+	*node = entry->node;
+
+	kfree(entry);
+	return 0;
+}
+
+static int psb_gtt_remove_node_anyused(struct psb_gtt_mm *mm,
+			       u32 tgid,
+			       struct drm_mm_node **node)
+{
+	struct psb_gtt_hash_entry *hentry;
+	struct drm_mm_node *tmp;
+	int ret;
+
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
+	if (ret) {
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+	spin_unlock(&mm->lock);
+
+	/*remove mapping entry*/
+	spin_lock(&mm->lock);
+	ret = psb_gtt_mm_remove_free_mem_mapping_anyused_locked(&hentry->ht,
+			&tmp);
+	if (ret) {
+		DRM_DEBUG("remove_free failed\n");
+		spin_unlock(&mm->lock);
+		return ret;
+	}
+
+	*node = tmp;
+
+	/*check the count of mapping entry*/
+	if (!hentry->count) {
+		DRM_DEBUG("count of mapping entry is zero, tgid=%d\n", tgid);
+		psb_gtt_mm_remove_free_ht_locked(mm, tgid);
+	}
+
+	spin_unlock(&mm->lock);
+
+	return 0;
+}
+
+static int psb_gtt_unmap_anyused(struct drm_device *dev,
+			unsigned int ui32TaskId)
+{
+	struct drm_psb_private *dev_priv
+	= (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	int ret;
+
+	ret = psb_gtt_remove_node_anyused(mm,
+				  (u32)ui32TaskId,
+				  &node);
+	if (ret) {
+		DRM_DEBUG("remove node failed\n");
+		return ret;
+	}
+
+	/*remove gtt entries*/
+	offset_pages = node->start;
+	pages = node->size;
+
+	psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
+
+
+	/*free tt node*/
+
+	psb_gtt_mm_free_mem(mm, node);
+	return 0;
+
+}
+
+int psb_gtt_map_meminfo(struct drm_device *dev,
+			void *hKernelMemInfo,
+			uint32_t page_align, uint32_t * offset)
+{
+	struct drm_psb_private *dev_priv;
+	struct psb_gtt_mm *mm;
+	struct psb_gtt *pg;
+	uint32_t size, pages, offset_pages;
+	void *kmem;
+	struct drm_mm_node *node;
+	struct page **page_list;
+	unsigned long *pfn_list;
+	struct psb_gtt_mem_mapping *mapping;
+	int ret;
+
+	/*
+	 *  Initialize locals then get  the allocation size and calculate
+	 *  the number of pages that this requires. To do this we use the
+	 *  interface routines provided in pvr_drm.c, a module.
+	 */
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	mm = dev_priv->gtt_mm;
+	pg = dev_priv->pg;
+	mapping = NULL;
+
+	size = PVRSRVGetMeminfoSize(hKernelMemInfo);
+	kmem = PVRSRVGetMeminfoCPUAddr(hKernelMemInfo);
+	pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	/*
+	 *  Alloc memory in TT apeture and update the GTT mm...
+	 */
+	if ((ret = psb_gtt_mm_alloc_mem(mm, pages, page_align, &node)) != 0) {
+		DRM_DEBUG("alloc TT memory error\n");
+
+		return ret;
+	}
+
+	if ((ret = psb_gtt_add_node(mm,
+				    (u32) gtt_get_tgid(),
+				    (u32) (uintptr_t) hKernelMemInfo,
+				    node, &mapping)) != 0) {
+		DRM_DEBUG("add_node failed");
+
+		psb_gtt_mm_free_mem(mm, node);
+		return ret;
+	}
+
+	node = mapping->node;
+	offset_pages = node->start;
+
+	/*
+	 *  Memory represented by  'hKernelMemInfo'  should either map by
+	 *  page records, or by pfn.  It will map user allocated memory -
+	 *  i.e., with malloc - by pages. PVRSRV allocations from general
+	 *  heap map via their pfn's.  Insert items that map into the gtt
+	 *  page table.
+	 */
+	if (PVRSRVGetMeminfoPages(hKernelMemInfo, pages, &page_list) == 0) {	/* Works with user space 'malloc' */
+		psb_gtt_insert_pages(pg, page_list,
+				     (unsigned)offset_pages,
+				     (unsigned)pages, 0, 0, 0);
+		kfree(page_list);
+	} else if ((ret = PVRSRVGetMeminfoPfn(hKernelMemInfo, pages, &pfn_list)) == 0) {	/* Works with 'PVRSRVAllocDeviceMemMIW' */
+		psb_gtt_insert_pfn_list(pg, pfn_list,
+					(unsigned)offset_pages,
+					(unsigned)pages, 0, 0, 0);
+		kfree(pfn_list);
+	} else {
+		psb_gtt_remove_node(mm,
+				    (u32) gtt_get_tgid(),
+				    (u32) (uintptr_t) hKernelMemInfo, &node);
+		psb_gtt_mm_free_mem(mm, node);
+
+		return ret;
+	}
+
+	*offset = offset_pages;
+	return 0;
+}				/* psb_gtt_map_meminfo */
+
+int psb_gtt_unmap_meminfo(struct drm_device *dev, void *hKernelMemInfo)
+{
+	struct drm_psb_private *dev_priv
+	    = (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	int ret;
+
+	ret = psb_gtt_remove_node(mm,
+				  (u32) gtt_get_tgid(),
+				  (u32) (uintptr_t) hKernelMemInfo, &node);
+	if (ret) {
+		DRM_DEBUG("remove node failed\n");
+		return ret;
+	}
+
+	/*remove gtt entries */
+	offset_pages = node->start;
+	pages = node->size;
+
+	psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
+
+	/*free tt node */
+
+	psb_gtt_mm_free_mem(mm, node);
+	return 0;
+}
+
+static int psb_gtt_unmap_common(struct drm_device *dev,
+			unsigned int ui32TaskId,
+			unsigned int hHandle)
+{
+	struct drm_psb_private *dev_priv
+	= (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	int ret;
+
+	ret = psb_gtt_remove_node(mm,
+				  (u32)ui32TaskId,
+				  (u32)hHandle,
+				  &node);
+	if (ret) {
+		DRM_DEBUG("remove node failed\n");
+		return ret;
+	}
+
+	/*remove gtt entries*/
+	offset_pages = node->start;
+	pages = node->size;
+
+	psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
+
+	/*free tt node*/
+
+	psb_gtt_mm_free_mem(mm, node);
+	return 0;
+
+}
+
+static int psb_get_vaddr_pages(u32 vaddr, u32 size,
+				unsigned long **pfn_list, int *page_count)
+{
+	u32 num_pages;
+	struct page **pages = 0;
+	struct task_struct *task = current;
+	struct mm_struct *mm = task->mm;
+	struct vm_area_struct *vma;
+	unsigned long *pfns = 0;
+	int ret;
+	int i;
+
+	if (unlikely(!pfn_list || !page_count || !vaddr || !size))
+		return -EINVAL;
+
+	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
+	if (unlikely(!pages)) {
+		DRM_ERROR("Failed to allocate page list\n");
+		return -ENOMEM;
+	}
+
+	down_read(&mm->mmap_sem);
+	ret = get_user_pages(task, mm, vaddr, num_pages, 0, 0, pages, NULL);
+	up_read(&mm->mmap_sem);
+
+	if (ret <= 0) {
+		DRM_DEBUG("failed to get user pages\n");
+		kfree(pages);
+		pages = 0;
+	} else {
+		DRM_ERROR("num_pages %d, ret %d\n", num_pages, ret);
+		num_pages = ret;
+	}
+
+	/*allocate page list*/
+	pfns = kzalloc(num_pages * sizeof(unsigned long), GFP_KERNEL);
+	if (!pfns) {
+		DRM_ERROR("No memory\n");
+		goto get_page_err;
+	}
+
+	if (!pages) {
+		DRM_DEBUG("No pages found, trying to follow pfn\n");
+		for (i = 0; i < num_pages; i++) {
+			vma = find_vma(mm, vaddr + i * PAGE_SIZE);
+			if (!vma) {
+				DRM_ERROR("failed to find vma\n");
+				goto find_vma_err;
+			}
+
+			ret = follow_pfn(vma,
+				(unsigned long)(vaddr + i * PAGE_SIZE),
+				&pfns[i]);
+			if (ret) {
+				DRM_ERROR("failed to follow pfn\n");
+				goto follow_pfn_err;
+			}
+		}
+	} else {
+		DRM_DEBUG("Found pages\n");
+		for (i = 0; i < num_pages; i++)
+			pfns[i] = page_to_pfn(pages[i]);
+	}
+
+	*pfn_list = pfns;
+	*page_count = num_pages;
+
+	kfree(pages);
+
+	return 0;
+find_vma_err:
+follow_pfn_err:
+	kfree(pfns);
+get_page_err:
+	if (pages) {
+		for (i = 0; i < num_pages; i++)
+			put_page(pages[i]);
+		kfree(pages);
+	}
+	return -EINVAL;
+}
+
+int psb_gtt_map_vaddr(struct drm_device *dev,
+			uint32_t vaddr,
+			uint32_t size,
+			uint32_t page_align,
+			uint32_t *offset)
+{
+	struct drm_psb_private *dev_priv
+		= (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	unsigned long *pfn_list = 0;
+	struct psb_gtt_mem_mapping *mapping = NULL;
+	int ret;
+
+	/*get pages*/
+	ret = psb_get_vaddr_pages(vaddr, size, &pfn_list, &pages);
+	if (ret) {
+		DRM_DEBUG("get pages error\n");
+		return ret;
+	}
+
+	DRM_DEBUG("get %d pages\n", pages);
+
+	/*alloc memory in TT apeture*/
+	ret = psb_gtt_mm_alloc_mem(mm, pages, page_align, &node);
+	if (ret) {
+		DRM_DEBUG("alloc TT memory error\n");
+		goto failed_pages_alloc;
+	}
+
+	/*update psb_gtt_mm*/
+	ret = psb_gtt_add_node(mm,
+			       (u32)gtt_get_tgid(),
+			       vaddr,
+			       node,
+			       &mapping);
+	if (ret) {
+		DRM_DEBUG("add_node failed");
+		goto failed_add_node;
+	}
+
+	node = mapping->node;
+	offset_pages = node->start;
+
+	DRM_DEBUG("get free node for %d pages, offset %d pages",
+		  pages, offset_pages);
+
+	/*update gtt*/
+	psb_gtt_insert_pfn_list(pg, pfn_list,
+			     (unsigned)offset_pages,
+			     (unsigned)pages,
+			     0,
+			     0,
+			     0);
+
+	/*free pfn_list if allocated*/
+	kfree(pfn_list);
+
+	*offset = offset_pages;
+	return 0;
+
+failed_add_node:
+	psb_gtt_mm_free_mem(mm, node);
+failed_pages_alloc:
+	kfree(pfn_list);
+	return ret;
+}
+
+int psb_gtt_unmap_vaddr(struct drm_device *dev,
+			uint32_t vaddr,
+			uint32_t size)
+{
+	return psb_gtt_unmap_common(dev, gtt_get_tgid(), vaddr);
+
+}
+
+int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct psb_gtt_mapping_arg *arg
+	= (struct psb_gtt_mapping_arg *)data;
+	uint32_t *offset_pages = &arg->offset_pages;
+	uint32_t page_align = arg->page_align;
+	uint32_t vaddr = arg->vaddr;
+	uint32_t size = arg->size;
+	uint32_t type = arg->type;
+
+	DRM_DEBUG("\n");
+
+	switch (type) {
+	case PSB_GTT_MAP_TYPE_MEMINFO:
+		return psb_gtt_map_meminfo(dev,
+				arg->hKernelMemInfo,
+				page_align,
+				offset_pages);
+	case PSB_GTT_MAP_TYPE_VIRTUAL:
+		return psb_gtt_map_vaddr(dev,
+					vaddr,
+					size,
+					page_align,
+					offset_pages);
+	default:
+		DRM_ERROR("unsupported buffer type %d\n", type);
+		return -EINVAL;
+	}
+}
+
+int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+
+	struct psb_gtt_mapping_arg *arg
+		= (struct psb_gtt_mapping_arg *)data;
+	uint32_t vaddr = arg->vaddr;
+	uint32_t size = arg->size;
+	uint32_t type = arg->type;
+
+	DRM_DEBUG("\n");
+
+	switch (type) {
+	case PSB_GTT_MAP_TYPE_MEMINFO:
+		return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
+	case PSB_GTT_MAP_TYPE_VIRTUAL:
+		return psb_gtt_unmap_vaddr(dev, vaddr, size);
+	default:
+		DRM_ERROR("unsupported buffer type %d\n", type);
+		return -EINVAL;
+	}
+}
+
+int DCCBgttMapMemory(struct drm_device *dev,
+		     unsigned int hHandle,
+		     unsigned int ui32TaskId,
+		     IMG_SYS_PHYADDR *pPages,
+		     unsigned int ui32PagesNum, unsigned int *ui32Offset)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+
+	uint32_t size, pages, offset_pages;
+	struct drm_mm_node *node = NULL;
+	struct psb_gtt_mem_mapping *mapping = NULL;
+	int ret;
+
+	size = ui32PagesNum * PAGE_SIZE;
+	pages = 0;
+
+	/*alloc memory in TT apeture */
+	ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
+	if (ret) {
+		DRM_DEBUG("alloc TT memory error\n");
+		goto failed_pages_alloc;
+	}
+
+	/*update psb_gtt_mm */
+	ret = psb_gtt_add_node(mm,
+			       (u32) ui32TaskId, (u32) hHandle, node, &mapping);
+	if (ret) {
+		DRM_DEBUG("add_node failed");
+		goto failed_add_node;
+	}
+
+	node = mapping->node;
+	offset_pages = node->start;
+
+	DRM_DEBUG("get free node for %d pages, offset %d pages", pages,
+		  offset_pages);
+
+	/*update gtt */
+	psb_gtt_insert_phys_addresses(pg, pPages, (unsigned)offset_pages,
+				      (unsigned)ui32PagesNum, 0);
+
+	*ui32Offset = offset_pages;
+	return 0;
+
+ failed_add_node:
+	psb_gtt_mm_free_mem(mm, node);
+ failed_pages_alloc:
+	return ret;
+}
+
+int DCCBgttUnmapMemory(struct drm_device *dev, unsigned int hHandle,
+		       unsigned int ui32TaskId)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_gtt_mm *mm = dev_priv->gtt_mm;
+	struct psb_gtt *pg = dev_priv->pg;
+	uint32_t pages, offset_pages;
+	struct drm_mm_node *node;
+	int ret;
+
+	ret = psb_gtt_remove_node(mm, (u32) ui32TaskId, (u32) hHandle, &node);
+	if (ret) {
+		printk(KERN_ERR "remove node failed\n");
+		return ret;
+	}
+
+	/*remove gtt entries */
+	offset_pages = node->start;
+	pages = node->size;
+
+	psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1);
+
+	/*free tt node */
+	psb_gtt_mm_free_mem(mm, node);
+	return 0;
+}
+
+int DCCBgttCleanupMemoryOnTask(struct drm_device *dev, unsigned int ui32TaskId)
+{
+	/* unmap all gtt for tgid */
+	while (!psb_gtt_unmap_anyused(dev, ui32TaskId))
+		;
+
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_gtt.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_gtt.h
new file mode 100644
index 0000000..8113c1b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_gtt.h
@@ -0,0 +1,104 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/*Reserve 1M GTT for MOFD HW WA*/
+#define MOFD_RESERVED_GTT_PAGES		256
+#define MOFD_HW_WA_GTT_PAGES		8
+#define MOFD_TTM_TT_PAGES	(MOFD_RESERVED_GTT_PAGES - MOFD_HW_WA_GTT_PAGES)
+
+struct psb_gtt {
+	struct drm_device *dev;
+	int initialized;
+	uint32_t gatt_start;
+	uint32_t mmu_gatt_start;
+	uint32_t gtt_video_start;
+	uint32_t gtt_start;
+	uint32_t reserved_gtt_start;
+	uint32_t gtt_phys_start;
+	unsigned gtt_pages;
+	unsigned gatt_pages;
+	uint32_t stolen_base;
+	void *vram_addr;
+	uint32_t pge_ctl;
+	u16 gmch_ctrl;
+	unsigned long stolen_size;
+	unsigned long vram_stolen_size;
+	uint32_t *gtt_map;
+	struct rw_semaphore sem;
+};
+
+struct psb_gtt_mm {
+	struct drm_mm base;
+	struct drm_open_hash hash;
+	uint32_t count;
+	spinlock_t lock;
+};
+
+struct psb_gtt_hash_entry {
+	struct drm_open_hash ht;
+	uint32_t count;
+	struct drm_hash_item item;
+};
+
+struct psb_gtt_mem_mapping {
+	struct drm_mm_node *node;
+	struct drm_hash_item item;
+};
+
+#if 0
+/*Ioctl args*/
+struct psb_gtt_mapping_arg {
+	void *hKernelMemInfo;
+};
+#endif
+
+/*Exported functions*/
+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
+extern int mrfld_gtt_init(struct psb_gtt *pg, int resume);
+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
+				unsigned offset_pages, unsigned num_pages,
+				unsigned desired_tile_stride,
+				unsigned hw_tile_stride, int type);
+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
+				unsigned num_pages,
+				unsigned desired_tile_stride,
+				unsigned hw_tile_stride, int rc_prot);
+
+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
+extern void mrfld_gtt_takedown(struct psb_gtt *pg, int free);
+extern int psb_gtt_map_meminfo(struct drm_device *dev,
+			       void *hKernelMemInfo,
+			       uint32_t page_align, uint32_t *offset);
+extern int psb_gtt_unmap_meminfo(struct drm_device *dev, void *hKernelMemInfo);
+extern int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int psb_gtt_mm_init(struct psb_gtt *pg);
+extern void psb_gtt_mm_takedown(void);
+extern int psb_gtt_map_vaddr(struct drm_device *dev, uint32_t vaddr, uint32_t size,
+			uint32_t page_align, uint32_t *offset);
+extern int psb_gtt_unmap_vaddr(struct drm_device *dev, uint32_t vaddr, uint32_t size);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_hotplug.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_hotplug.c
new file mode 100644
index 0000000..0bcd87f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_hotplug.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    James C. Gualario <james.c.gualario@intel.com>
+ *
+ */
+
+#include "psb_umevents.h"
+#include "psb_hotplug.h"
+/**
+ * inform the kernel of the work to be performed and related function.
+ *
+ */
+DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq);
+DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq);
+DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq);
+/**
+ * psb_hotplug_notify_change_um - notify user mode of hotplug changes
+ *
+ * @name: name of event to notify user mode of change to
+ * @state: hotplug state to search for event object in
+ *
+ */
+int psb_hotplug_notify_change_um(const char *name, struct hotplug_state *state)
+{
+	strcpy(&(state->hotplug_change_wq_data.dev_name_arry
+		 [state->hotplug_change_wq_data.dev_name_write][0]), name);
+	state->hotplug_change_wq_data.dev_name_arry_rw_status
+	    [state->hotplug_change_wq_data.dev_name_write] =
+	    DRM_HOTPLUG_READY_TO_READ;
+	if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1)
+		state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
+	state->hotplug_change_wq_data.dev_name_write++;
+	if (state->hotplug_change_wq_data.dev_name_write ==
+	    state->hotplug_change_wq_data.dev_name_read) {
+		state->hotplug_change_wq_data.dev_name_write--;
+		return IRQ_NONE;
+	}
+	if (state->hotplug_change_wq_data.dev_name_write >
+	    DRM_HOTPLUG_RING_DEPTH_MAX) {
+		state->hotplug_change_wq_data.dev_name_write = 0;
+		state->hotplug_change_wq_data.dev_name_write_wrap = 1;
+	}
+	state->hotplug_change_wq_data.hotplug_dev_list = state->list;
+	queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work));
+	return IRQ_HANDLED;
+}
+
+/**
+ *
+ * psb_hotplug_create_and_notify_um - create and notify user mode of new dev
+ *
+ * @name: name to give for new event / device
+ * @state: hotplug state to track new event /device in
+ *
+ */
+int psb_hotplug_create_and_notify_um(const char *name,
+				     struct hotplug_state *state)
+{
+	strcpy(&(state->hotplug_create_wq_data.dev_name_arry
+		 [state->hotplug_create_wq_data.dev_name_write][0]), name);
+	state->hotplug_create_wq_data.dev_name_arry_rw_status
+	    [state->hotplug_create_wq_data.dev_name_write] =
+	    DRM_HOTPLUG_READY_TO_READ;
+	if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1)
+		state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
+	state->hotplug_create_wq_data.dev_name_write++;
+	if (state->hotplug_create_wq_data.dev_name_write ==
+	    state->hotplug_create_wq_data.dev_name_read) {
+		state->hotplug_create_wq_data.dev_name_write--;
+		return IRQ_NONE;
+	}
+	if (state->hotplug_create_wq_data.dev_name_write >
+	    DRM_HOTPLUG_RING_DEPTH_MAX) {
+		state->hotplug_create_wq_data.dev_name_write = 0;
+		state->hotplug_create_wq_data.dev_name_write_wrap = 1;
+	}
+	state->hotplug_create_wq_data.hotplug_dev_list = state->list;
+	queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work));
+	return IRQ_HANDLED;
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_create_and_notify_um); */
+/**
+ * psb_hotplug_remove_and_notify_um - remove device and notify user mode
+ *
+ * @name: name of event / device to remove
+ * @state: hotplug state to remove event / device from
+ *
+ */
+int psb_hotplug_remove_and_notify_um(const char *name,
+				     struct hotplug_state *state)
+{
+	strcpy(&(state->hotplug_remove_wq_data.dev_name_arry
+		 [state->hotplug_remove_wq_data.dev_name_write][0]), name);
+	state->hotplug_remove_wq_data.dev_name_arry_rw_status
+	    [state->hotplug_remove_wq_data.dev_name_write] =
+	    DRM_HOTPLUG_READY_TO_READ;
+	if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1)
+		state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
+	state->hotplug_remove_wq_data.dev_name_write++;
+	if (state->hotplug_remove_wq_data.dev_name_write ==
+	    state->hotplug_remove_wq_data.dev_name_read) {
+		state->hotplug_remove_wq_data.dev_name_write--;
+		return IRQ_NONE;
+	}
+	if (state->hotplug_remove_wq_data.dev_name_write >
+	    DRM_HOTPLUG_RING_DEPTH_MAX) {
+		state->hotplug_remove_wq_data.dev_name_write = 0;
+		state->hotplug_remove_wq_data.dev_name_write_wrap = 1;
+	}
+	state->hotplug_remove_wq_data.hotplug_dev_list = state->list;
+	queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work));
+	return IRQ_HANDLED;
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um); */
+/**
+ * psb_hotplug_device_pool_create_and_init - make new hotplug device pool
+ *
+ * @parent_kobj: parent kobject to associate hotplug kset with
+ * @state: hotplug state to assocaite workqueues with
+ *
+ */
+struct umevent_list *psb_hotplug_device_pool_create_and_init(struct kobject
+							     *parent_kobj, struct
+							     hotplug_state
+							     *state)
+{
+	struct umevent_list *new_hotplug_dev_list = NULL;
+
+	new_hotplug_dev_list = psb_umevent_create_list();
+	if (new_hotplug_dev_list)
+		psb_umevent_init(parent_kobj, new_hotplug_dev_list,
+				 "psb_hotplug");
+
+	state->hotplug_wq = create_singlethread_workqueue("hotplug-wq");
+	if (!state->hotplug_wq)
+		return NULL;
+
+	INIT_WORK(&state->hotplug_create_wq_data.work,
+		  psb_hotplug_dev_create_wq);
+	INIT_WORK(&state->hotplug_remove_wq_data.work,
+		  psb_hotplug_dev_remove_wq);
+	INIT_WORK(&state->hotplug_change_wq_data.work,
+		  psb_hotplug_dev_change_wq);
+
+	state->hotplug_create_wq_data.dev_name_read = 0;
+	state->hotplug_create_wq_data.dev_name_write = 0;
+	state->hotplug_create_wq_data.dev_name_write_wrap = 0;
+	state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
+	memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]),
+	       0, sizeof(int) * DRM_HOTPLUG_RING_DEPTH);
+
+	state->hotplug_remove_wq_data.dev_name_read = 0;
+	state->hotplug_remove_wq_data.dev_name_write = 0;
+	state->hotplug_remove_wq_data.dev_name_write_wrap = 0;
+	state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
+	memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]),
+	       0, sizeof(int) * DRM_HOTPLUG_RING_DEPTH);
+
+	state->hotplug_change_wq_data.dev_name_read = 0;
+	state->hotplug_change_wq_data.dev_name_write = 0;
+	state->hotplug_change_wq_data.dev_name_write_wrap = 0;
+	state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
+	memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]),
+	       0, sizeof(int) * DRM_HOTPLUG_RING_DEPTH);
+
+	return new_hotplug_dev_list;
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init); */
+/**
+ *
+ * psb_hotplug_init - init hotplug subsystem
+ *
+ * @parent_kobj: parent kobject to associate hotplug state with
+ *
+ */
+struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj)
+{
+	struct hotplug_state *state;
+	state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL);
+
+	if (!state)
+		return state;
+
+	state->list = NULL;
+	state->list = psb_hotplug_device_pool_create_and_init(parent_kobj,
+							      state);
+
+	psb_hotplug_create_and_notify_um("hpd_hdmi", state);
+
+	return state;
+}
+
+/**
+ * psb_hotplug_device_pool_destroy - destroy all hotplug related resources
+ *
+ * @state: hotplug state to destroy
+ *
+ */
+void psb_hotplug_device_pool_destroy(struct hotplug_state *state)
+{
+	flush_workqueue(state->hotplug_wq);
+	destroy_workqueue(state->hotplug_wq);
+	psb_umevent_cleanup(state->list);
+	kfree(state);
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_device_pool_destroy); */
+/**
+ * psb_hotplug_dev_create_wq - create workqueue implementation
+ *
+ * @work: work struct to use for kernel scheduling
+ *
+ */
+void psb_hotplug_dev_create_wq(struct work_struct *work)
+{
+	struct hotplug_disp_workqueue_data *wq_data;
+	struct umevent_obj *wq_working_hotplug_disp_obj;
+	wq_data = to_hotplug_disp_workqueue_data(work);
+	if (wq_data->dev_name_write_wrap == 1) {
+		wq_data->dev_name_read_write_wrap_ack = 1;
+		wq_data->dev_name_write_wrap = 0;
+		while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				wq_working_hotplug_disp_obj =
+				    psb_create_umevent_obj
+				    (&wq_data->dev_name_arry
+				     [wq_data->dev_name_read][0],
+				     wq_data->hotplug_dev_list);
+				wq_data->
+				    dev_name_arry_rw_status
+				    [wq_data->dev_name_read]
+				    = DRM_HOTPLUG_READ_COMPLETE;
+				/* psb_umevent_notify
+				   (wq_working_hotplug_disp_obj); */
+			}
+			wq_data->dev_name_read++;
+		}
+		wq_data->dev_name_read = 0;
+		while (wq_data->dev_name_read < wq_data->dev_name_write - 1) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				wq_working_hotplug_disp_obj =
+				    psb_create_umevent_obj
+				    (&wq_data->dev_name_arry
+				     [wq_data->dev_name_read][0],
+				     wq_data->hotplug_dev_list);
+				wq_data->
+				    dev_name_arry_rw_status
+				    [wq_data->dev_name_read]
+				    = DRM_HOTPLUG_READ_COMPLETE;
+				/*psb_umevent_notify
+				   (wq_working_hotplug_disp_obj); */
+			}
+			wq_data->dev_name_read++;
+		}
+	} else {
+		while (wq_data->dev_name_read < wq_data->dev_name_write) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				wq_working_hotplug_disp_obj =
+				    psb_create_umevent_obj
+				    (&wq_data->dev_name_arry
+				     [wq_data->dev_name_read][0],
+				     wq_data->hotplug_dev_list);
+				wq_data->
+				    dev_name_arry_rw_status
+				    [wq_data->dev_name_read]
+				    = DRM_HOTPLUG_READ_COMPLETE;
+				/*psb_umevent_notify
+				   (wq_working_hotplug_disp_obj); */
+			}
+			wq_data->dev_name_read++;
+		}
+	}
+	if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
+		wq_data->dev_name_read = 0;
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_dev_create_wq); */
+/**
+ * psb_hotplug_dev_remove_wq - remove workqueue implementation
+ *
+ * @work: work struct to use for kernel scheduling
+ *
+ */
+void psb_hotplug_dev_remove_wq(struct work_struct *work)
+{
+	struct hotplug_disp_workqueue_data *wq_data;
+	wq_data = to_hotplug_disp_workqueue_data(work);
+	if (wq_data->dev_name_write_wrap == 1) {
+		wq_data->dev_name_read_write_wrap_ack = 1;
+		wq_data->dev_name_write_wrap = 0;
+		while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				psb_umevent_remove_from_list
+				    (wq_data->hotplug_dev_list,
+				     &wq_data->dev_name_arry
+				     [wq_data->dev_name_read]
+				     [0]);
+				wq_data->
+				    dev_name_arry_rw_status
+				    [wq_data->dev_name_read]
+				    = DRM_HOTPLUG_READ_COMPLETE;
+			}
+			wq_data->dev_name_read++;
+		}
+		wq_data->dev_name_read = 0;
+		while (wq_data->dev_name_read < wq_data->dev_name_write - 1) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				psb_umevent_remove_from_list
+				    (wq_data->hotplug_dev_list,
+				     &wq_data->dev_name_arry
+				     [wq_data->dev_name_read]
+				     [0]);
+				wq_data->
+				    dev_name_arry_rw_status
+				    [wq_data->dev_name_read]
+				    = DRM_HOTPLUG_READ_COMPLETE;
+			}
+			wq_data->dev_name_read++;
+		}
+	} else {
+		while (wq_data->dev_name_read < wq_data->dev_name_write) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				psb_umevent_remove_from_list
+				    (wq_data->hotplug_dev_list,
+				     &wq_data->dev_name_arry
+				     [wq_data->dev_name_read]
+				     [0]);
+				wq_data->
+				    dev_name_arry_rw_status
+				    [wq_data->dev_name_read]
+				    = DRM_HOTPLUG_READ_COMPLETE;
+			}
+			wq_data->dev_name_read++;
+		}
+	}
+	if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
+		wq_data->dev_name_read = 0;
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_dev_remove_wq); */
+/**
+ * psb_hotplug_dev_change_wq - change workqueue implementation
+ *
+ * @work: work struct to use for kernel scheduling
+ *
+ */
+void psb_hotplug_dev_change_wq(struct work_struct *work)
+{
+	struct hotplug_disp_workqueue_data *wq_data;
+	struct umevent_obj *wq_working_hotplug_disp_obj;
+	wq_data = to_hotplug_disp_workqueue_data(work);
+	if (wq_data->dev_name_write_wrap == 1) {
+		wq_data->dev_name_read_write_wrap_ack = 1;
+		wq_data->dev_name_write_wrap = 0;
+		while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				wq_data->dev_name_arry_rw_status
+				    [wq_data->dev_name_read] =
+				    DRM_HOTPLUG_READ_COMPLETE;
+
+				wq_working_hotplug_disp_obj =
+				    psb_umevent_find_obj(&wq_data->dev_name_arry
+							 [wq_data->dev_name_read]
+							 [0],
+							 wq_data->hotplug_dev_list);
+				psb_umevent_notify_change_gfxsock
+				    (wq_working_hotplug_disp_obj,
+				     DRM_HOTPLUG_SOCKET_GROUP_ID);
+			}
+			wq_data->dev_name_read++;
+		}
+		wq_data->dev_name_read = 0;
+		while (wq_data->dev_name_read < wq_data->dev_name_write - 1) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				wq_data->dev_name_arry_rw_status
+				    [wq_data->dev_name_read] =
+				    DRM_HOTPLUG_READ_COMPLETE;
+
+				wq_working_hotplug_disp_obj =
+				    psb_umevent_find_obj(&wq_data->dev_name_arry
+							 [wq_data->dev_name_read]
+							 [0],
+							 wq_data->hotplug_dev_list);
+				psb_umevent_notify_change_gfxsock
+				    (wq_working_hotplug_disp_obj,
+				     DRM_HOTPLUG_SOCKET_GROUP_ID);
+			}
+			wq_data->dev_name_read++;
+		}
+	} else {
+		while (wq_data->dev_name_read < wq_data->dev_name_write) {
+			if (wq_data->dev_name_arry_rw_status
+			    [wq_data->dev_name_read] ==
+			    DRM_HOTPLUG_READY_TO_READ) {
+				wq_data->dev_name_arry_rw_status
+				    [wq_data->dev_name_read] =
+				    DRM_HOTPLUG_READ_COMPLETE;
+
+				wq_working_hotplug_disp_obj =
+				    psb_umevent_find_obj(&wq_data->dev_name_arry
+							 [wq_data->dev_name_read]
+							 [0],
+							 wq_data->hotplug_dev_list);
+				psb_umevent_notify_change_gfxsock
+				    (wq_working_hotplug_disp_obj,
+				     DRM_HOTPLUG_SOCKET_GROUP_ID);
+			}
+			wq_data->dev_name_read++;
+		}
+	}
+	if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
+		wq_data->dev_name_read = 0;
+}
+
+/*EXPORT_SYMBOL(psb_hotplug_dev_change_wq); */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_hotplug.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_hotplug.h
new file mode 100644
index 0000000..7e56994
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_hotplug.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    James C. Gualario <james.c.gualario@intel.com>
+ *
+ */
+#ifndef _PSB_HOTPLUG_H_
+#define _PSB_HOTPLUG_H_
+/**
+ * required includes
+ *
+ */
+#include "psb_umevents.h"
+/**
+ * hotplug specific defines
+ *
+ */
+#define DRM_HOTPLUG_RING_DEPTH 256
+#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1)
+#define DRM_HOTPLUG_READY_TO_READ 1
+#define DRM_HOTPLUG_READ_COMPLETE 2
+/**
+ * hotplug workqueue data struct.
+ */
+struct hotplug_disp_workqueue_data {
+	struct work_struct work;
+	const char *dev_name;
+	int dev_name_write;
+	int dev_name_read;
+	int dev_name_write_wrap;
+	int dev_name_read_write_wrap_ack;
+	char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24];
+	int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH];
+	struct umevent_list *hotplug_dev_list;
+};
+/**
+ * hotplug state structure
+ *
+ */
+struct hotplug_state {
+	struct workqueue_struct *hotplug_wq;
+	struct hotplug_disp_workqueue_data hotplug_remove_wq_data;
+	struct hotplug_disp_workqueue_data hotplug_create_wq_data;
+	struct hotplug_disp_workqueue_data hotplug_change_wq_data;
+	struct umevent_list *list;
+};
+/**
+ * main interface function prototytpes for hotplug support.
+ *
+ */
+struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj);
+extern int psb_hotplug_notify_change_um(const char *name,
+					struct hotplug_state *state);
+extern int psb_hotplug_create_and_notify_um(const char *name,
+					    struct hotplug_state *state);
+extern int psb_hotplug_remove_and_notify_um(const char *name,
+					    struct hotplug_state *state);
+extern struct umevent_list *psb_hotplug_device_pool_create_and_init(struct
+								    kobject
+								    *parent_kobj, struct
+								    hotplug_state
+								    *state);
+extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state);
+/**
+ * to go back and forth between work strauct and workqueue data
+ *
+ */
+#define to_hotplug_disp_workqueue_data(x) \
+	container_of(x, struct hotplug_disp_workqueue_data, work)
+
+/**
+ * function prototypes for workqueue implementation
+ *
+ */
+extern void psb_hotplug_dev_create_wq(struct work_struct *work);
+extern void psb_hotplug_dev_remove_wq(struct work_struct *work);
+extern void psb_hotplug_dev_change_wq(struct work_struct *work);
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display.c
new file mode 100644
index 0000000..265e62d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display.c
@@ -0,0 +1,1232 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include "psb_fb.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "pwr_mgmt.h"
+#include "mrfld_clock.h"
+#include "mrfld_s3d.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dbi_dsr.h"
+#endif
+/* FIXME may delete after MRFLD PO */
+#include "mrfld_display.h"
+#include "mdfld_csc.h"
+
+#define DRM_OUTPUT_POLL_PERIOD (10 * HZ)
+#define MAX_GAMMA                       0x10000
+/*MRFLD defines */
+static int mrfld_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y, struct drm_framebuffer *old_fb);
+static void mrfld_crtc_dpms(struct drm_crtc *crtc, int mode);
+/*MRFLD defines end */
+
+struct psb_intel_clock_t {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int dot;
+	int vco;
+	int m;
+	int p;
+};
+
+struct psb_intel_p2_t {
+	int dot_limit;
+	int p2_slow, p2_fast;
+};
+
+#define INTEL_P2_NUM		      2
+
+struct psb_intel_limit_t {
+	struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+	struct psb_intel_p2_t p2;
+};
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *l_entry;
+
+	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+			struct psb_intel_output *psb_intel_output =
+			    to_psb_intel_output(l_entry);
+			if (psb_intel_output->type == type)
+				return true;
+		}
+	}
+	return false;
+}
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+void psb_intel_wait_for_vblank(struct drm_device *dev)
+{
+	/* Wait for 20ms, i.e. one cycle at 50hz. */
+
+	/*
+	 * Between kernel 3.0 and 3.3, udelay was made to complain at compile
+	 * time for argument == 20000 or more.
+	 * Therefore, reduce it from 20000 to 19999.
+	 */
+	udelay(19999);
+}
+
+int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+			    int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	/* struct drm_i915_master_private *master_priv; */
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
+	int pipe = psb_intel_crtc->pipe;
+	unsigned long Start, Offset;
+	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+	u32 dspcntr;
+	u32 power_island = 0;
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		DRM_DEBUG("No FB bound\n");
+		return 0;
+	}
+
+	power_island = pipe_to_island(pipe);
+
+	if (!power_island_get(power_island))
+		return 0;
+
+	Start = mode_dev->bo_offset(dev, psbfb);
+	Offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		DRM_ERROR("Unknown color depth\n");
+		ret = -EINVAL;
+		goto psb_intel_pipe_set_base_exit;
+	}
+	REG_WRITE(dspcntr_reg, dspcntr);
+
+	DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+	REG_WRITE(dspbase, Start + Offset);
+	REG_READ(dspbase);
+
+ psb_intel_pipe_set_base_exit:
+
+	power_island_put(power_island);
+
+	return ret;
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+	    encoder->helper_private;
+	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void psb_intel_encoder_commit(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+	    encoder->helper_private;
+	/* lvds has its own version of commit see psb_intel_lvds_commit */
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+	u32 pfit_control;
+
+	pfit_control = REG_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+
+	/* 965 can place panel fitter on either pipe */
+	if (IS_MID(dev))
+		return (pfit_control >> 29) & 0x3;
+
+	/* older chips can only use pipe 1 */
+	return 1;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	int palreg = PALETTE_A;
+	u32 power_island = 0;
+	int i;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled || !dev_priv)
+		return;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if (!dsi_config)
+		return;
+
+	ctx = &dsi_config->dsi_hw_context;
+
+	switch (psb_intel_crtc->pipe) {
+	case 0:
+		break;
+	case 1:
+		palreg = PALETTE_B;
+		break;
+	case 2:
+		palreg = PALETTE_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return;
+	}
+
+	power_island = pipe_to_island(psb_intel_crtc->pipe);
+
+	if (power_island_get(power_island)) {
+
+		for (i = 0; i < 256; i++) {
+			ctx->palette[i]=
+				((psb_intel_crtc->lut_r[i] +
+				psb_intel_crtc->lut_adj[i]) << 16) |
+				((psb_intel_crtc->lut_g[i] +
+				psb_intel_crtc->lut_adj[i]) << 8) |
+				(psb_intel_crtc->lut_b[i] +
+				psb_intel_crtc->lut_adj[i]);
+			REG_WRITE((palreg + 4 * i), ctx->palette[i]);
+		}
+
+		power_island_put(power_island);
+	} else {
+		for (i = 0; i < 256; i++) {
+			dev_priv->save_palette_a[i] =
+			    ((psb_intel_crtc->lut_r[i] +
+			      psb_intel_crtc->lut_adj[i]) << 16) |
+			    ((psb_intel_crtc->lut_g[i] +
+			      psb_intel_crtc->lut_adj[i]) << 8) |
+			    (psb_intel_crtc->lut_b[i] +
+			     psb_intel_crtc->lut_adj[i]);
+		}
+
+	}
+#endif
+}
+
+#ifndef CONFIG_X86_MRST
+/**
+ * Save HW states of giving crtc
+ */
+static void psb_intel_crtc_save(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	/* struct drm_psb_private *dev_priv =
+	   (struct drm_psb_private *)dev->dev_private; */
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+	int pipeA = (psb_intel_crtc->pipe == 0);
+	uint32_t paletteReg;
+	int i;
+
+	DRM_DEBUG("\n");
+
+	if (!crtc_state) {
+		DRM_DEBUG("No CRTC state found\n");
+		return;
+	}
+
+	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+	/*NOTE: DSPSIZE DSPPOS only for psb */
+	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+	DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+		  crtc_state->saveDSPCNTR,
+		  crtc_state->savePIPECONF,
+		  crtc_state->savePIPESRC,
+		  crtc_state->saveFP0,
+		  crtc_state->saveFP1,
+		  crtc_state->saveDPLL,
+		  crtc_state->saveHTOTAL,
+		  crtc_state->saveHBLANK,
+		  crtc_state->saveHSYNC,
+		  crtc_state->saveVTOTAL,
+		  crtc_state->saveVBLANK,
+		  crtc_state->saveVSYNC,
+		  crtc_state->saveDSPSTRIDE,
+		  crtc_state->saveDSPSIZE,
+		  crtc_state->saveDSPPOS, crtc_state->saveDSPBASE);
+
+	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+	for (i = 0; i < 256; ++i)
+		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	/* struct drm_psb_private * dev_priv =
+	   (struct drm_psb_private *)dev->dev_private; */
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+	int pipeA = (psb_intel_crtc->pipe == 0);
+	uint32_t paletteReg;
+	int i;
+
+	DRM_DEBUG("\n");
+
+	if (!crtc_state) {
+		DRM_DEBUG("No crtc state\n");
+		return;
+	}
+
+	DRM_DEBUG("current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+		  REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+		  REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+		  REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+		  REG_READ(pipeA ? FPA0 : FPB0),
+		  REG_READ(pipeA ? FPA1 : FPB1),
+		  REG_READ(pipeA ? DPLL_A : DPLL_B),
+		  REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+		  REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+		  REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+		  REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+		  REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+		  REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+		  REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+		  REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+		  REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+		  REG_READ(pipeA ? DSPABASE : DSPBBASE)
+	    );
+
+	DRM_DEBUG("saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+		  crtc_state->saveDSPCNTR,
+		  crtc_state->savePIPECONF,
+		  crtc_state->savePIPESRC,
+		  crtc_state->saveFP0,
+		  crtc_state->saveFP1,
+		  crtc_state->saveDPLL,
+		  crtc_state->saveHTOTAL,
+		  crtc_state->saveHBLANK,
+		  crtc_state->saveHSYNC,
+		  crtc_state->saveVTOTAL,
+		  crtc_state->saveVBLANK,
+		  crtc_state->saveVSYNC,
+		  crtc_state->saveDSPSTRIDE,
+		  crtc_state->saveDSPSIZE,
+		  crtc_state->saveDSPPOS, crtc_state->saveDSPBASE);
+
+	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+			  crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+		REG_READ(pipeA ? DPLL_A : DPLL_B);
+		DRM_DEBUG("write dpll: %x\n",
+			  REG_READ(pipeA ? DPLL_A : DPLL_B));
+		udelay(150);
+	}
+
+	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+	REG_READ(pipeA ? FPA0 : FPB0);
+
+	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+	REG_READ(pipeA ? FPA1 : FPB1);
+
+	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+	REG_READ(pipeA ? DPLL_A : DPLL_B);
+	udelay(150);
+
+	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+	psb_intel_wait_for_vblank(dev);
+
+	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+	psb_intel_wait_for_vblank(dev);
+
+	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+	for (i = 0; i < 256; ++i)
+		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+#endif
+
+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 * red,
+					u16 * green, u16 * blue,
+					uint32_t start, uint32_t size)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int i;
+	int brk = (start + size > 256) ? 256 : start + size;
+
+	for (i = start; i < brk; i++) {
+		psb_intel_crtc->lut_r[i] = red[i] >> 8;
+		psb_intel_crtc->lut_g[i] = green[i] >> 8;
+		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+	}
+
+	psb_intel_crtc_load_lut(crtc);
+}
+
+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+#ifndef CONFIG_X86_MRST
+	kfree(psb_intel_crtc->crtc_state);
+#endif
+	drm_crtc_cleanup(crtc);
+	kfree(psb_intel_crtc);
+}
+
+/*
+ * set display controller side palette, it will influence
+ * brightness , saturation , contrast.
+ * KAI1
+*/
+int mdfld_intel_crtc_set_gamma(struct drm_device *dev,
+				struct gamma_setting *setting_data)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	int ret = 0;
+	int pipe = 0;
+	u32 val = 0;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dev || !setting_data) {
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if (!(setting_data->type &
+		(GAMMA_SETTING|GAMMA_INITIA|GAMMA_REG_SETTING))) {
+		ret = -EINVAL;
+		return ret;
+	}
+	if ((setting_data->type == GAMMA_SETTING &&
+		setting_data->data_len != GAMMA_10_BIT_TABLE_COUNT) ||
+		(setting_data->type == GAMMA_REG_SETTING &&
+		setting_data->data_len != GAMMA_10_BIT_TABLE_COUNT)) {
+		ret = -EINVAL;
+		return ret;
+	}
+
+	dev_priv = dev->dev_private;
+	pipe = setting_data->pipe;
+
+	drm_psb_set_gamma_pipe = setting_data->pipe;
+
+	if (pipe == 0)
+		dsi_config = dev_priv->dsi_configs[0];
+	else if (pipe == 2)
+		dsi_config = dev_priv->dsi_configs[1];
+	else if (pipe == 1) {
+		PSB_DEBUG_ENTRY("/KAI1 palette no implement for HDMI\n"
+				"do it later\n");
+		return -EINVAL;
+	} else
+		return -EINVAL;
+
+	mutex_lock(&dev_priv->gamma_csc_lock);
+
+	ctx = &dsi_config->dsi_hw_context;
+	regs = &dsi_config->regs;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+					OSPM_UHB_FORCE_POWER_ON)) {
+		ret = -EAGAIN;
+		goto _fun_exit;
+	}
+
+	/*forbid dsr which will restore regs*/
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	/*enable gamma*/
+	if (drm_psb_enable_gamma && setting_data->enable_state) {
+		int i = 0, temp = 0;
+		u32 integer_part = 0, fraction_part = 0, even_part = 0,
+		    odd_part = 0;
+		u32 int_red_9_2 = 0, int_green_9_2 = 0, int_blue_9_2 = 0;
+		u32 int_red_1_0 = 0, int_green_1_0 = 0, int_blue_1_0 = 0;
+		u32 fra_red = 0, fra_green = 0, fra_blue = 0;
+		int j = 0;
+		/*here set r/g/b the same curve*/
+		for (i = 0; i <= 1024; i = i + 8) {
+			if (setting_data->type == GAMMA_INITIA) {
+				switch (setting_data->initia_mode) {
+				case GAMMA_05:
+					/* gamma 0.5 */
+					temp = 32 * int_sqrt(i * 10000);
+					printk(KERN_ALERT "gamma 0.5\n");
+					break;
+				case GAMMA_20:
+					/* gamma 2 */
+					temp = (i * i * 100) / 1024;
+					printk(KERN_ALERT "gamma 2\n");
+					break;
+				case GAMMA_05_20:
+					/*
+					 * 0 ~ 511 gamma 0.5
+					 * 512 ~1024 gamma 2
+					 */
+					if (i < 512)
+						temp = int_sqrt(i * 512 *
+								10000);
+					else
+						temp = (i - 512) * (i - 512) *
+							100 / 512  + 512 * 100;
+					printk(KERN_ALERT "gamma 0.5 + gamma 2\n");
+					break;
+				case GAMMA_20_05:
+					/*
+					 * 0 ~ 511 gamma 2
+					 * 512 ~1024 gamma 0.5
+					 */
+					if (i < 512)
+						temp = i * i * 100 / 512;
+					else
+						temp = int_sqrt((i - 512) *
+								512 * 10000)
+							+ 512 * 100;
+					printk(KERN_ALERT "gamma 2 + gamma 0.5\n");
+					break;
+				case GAMMA_10:
+					/* gamma 1 */
+					temp = i * 100;
+					printk(KERN_ALERT "gamma 1\n");
+					break;
+				default:
+					/* gamma 0.5 */
+					temp = 32 * int_sqrt(i *  10000);
+					printk(KERN_ALERT "gamma 0.5\n");
+					break;
+				}
+			} else {
+				temp = setting_data->gamma_tableX100[i / 8];
+			}
+
+			if (setting_data->type == GAMMA_REG_SETTING) {
+				if (i != 1024) {
+					ctx->palette[(i / 8) * 2] = 0;
+					ctx->palette[(i / 8) * 2 + 1] = temp;
+				} else {
+					REG_WRITE(regs->gamma_red_max_reg, MAX_GAMMA);
+					REG_WRITE(regs->gamma_green_max_reg, MAX_GAMMA);
+					REG_WRITE(regs->gamma_blue_max_reg, MAX_GAMMA);
+				}
+			} else {
+				if (temp < 0)
+					temp = 0;
+				if (temp > 1024 * 100)
+					temp = 1024 * 100;
+
+				integer_part = temp / 100;
+				fraction_part = (temp - integer_part * 100);
+				/*get r/g/b each channel*/
+				int_blue_9_2 = integer_part >> 2;
+				int_green_9_2 = int_blue_9_2 << 8;
+				int_red_9_2 = int_blue_9_2 << 16;
+				int_blue_1_0 = (integer_part & 0x3) << 6;
+				int_green_1_0 = int_blue_1_0 << 8;
+				int_red_1_0 = int_blue_1_0 << 16;
+				fra_blue = fraction_part*64/100;
+				fra_green = fra_blue << 8;
+				fra_red = fra_blue << 16;
+				/*get even and odd part*/
+				odd_part = int_red_9_2 | int_green_9_2 | int_blue_9_2;
+				even_part = int_red_1_0 | fra_red | int_green_1_0 |
+					fra_green | int_blue_1_0 | fra_blue;
+				if (i != 1024) {
+					ctx->palette[(i / 8) * 2] = even_part;
+					ctx->palette[(i / 8) * 2 + 1] = odd_part;
+				} else {
+					REG_WRITE(regs->gamma_red_max_reg,
+							(integer_part << 6) |
+							(fraction_part));
+					REG_WRITE(regs->gamma_green_max_reg,
+							(integer_part << 6) |
+							(fraction_part));
+					REG_WRITE(regs->gamma_blue_max_reg,
+							(integer_part << 6) |
+							(fraction_part));
+					printk(KERN_ALERT
+							"max (red %x, green 0x%x, blue 0x%x)\n",
+						REG_READ(regs->gamma_red_max_reg),
+						REG_READ(regs->gamma_green_max_reg),
+						REG_READ(regs->gamma_blue_max_reg));
+				}
+			}
+
+			j = j + 8;
+		}
+		/* save palette (gamma) */
+                for (i = 0; i < 256; i++)
+                        gamma_setting_save[i] = ctx->palette[i];
+
+		drm_psb_set_gamma_success = 1;
+		drm_psb_set_gamma_pending = 1;
+	} else {
+		drm_psb_enable_gamma = 0;
+		drm_psb_set_gamma_success = 0;
+		drm_psb_set_gamma_pending = 0;
+		drm_psb_set_gamma_pipe = MDFLD_PIPE_MAX;
+
+                /*reset gamma setting*/
+                for (i = 0; i < 256; i++)
+                        gamma_setting_save[i] = 0;
+
+		/*disable */
+		val = REG_READ(regs->pipeconf_reg);
+		val &= ~(PIPEACONF_GAMMA);
+		REG_WRITE(regs->pipeconf_reg, val);
+		ctx->pipeconf = val;
+		REG_WRITE(regs->dspcntr_reg,
+				REG_READ(regs->dspcntr_reg) &
+				~(DISPPLANE_GAMMA_ENABLE));
+		ctx->dspcntr = REG_READ(regs->dspcntr_reg) & (~DISPPLANE_GAMMA_ENABLE);
+		REG_READ(regs->dspcntr_reg);
+	}
+
+	mdfld_dsi_dsr_update_panel_fb(dsi_config);
+	/*allow entering dsr*/
+	mdfld_dsi_dsr_allow(dsi_config);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+_fun_exit:
+	mutex_unlock(&dev_priv->gamma_csc_lock);
+	return ret;
+#else
+	return 0;
+#endif
+}
+
+/*
+ * set display controller side color conversion
+ * KAI1
+*/
+int mdfld_intel_crtc_set_color_conversion(struct drm_device *dev,
+					struct csc_setting *setting_data)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	struct mdfld_dsi_hw_registers *regs;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	int ret = 0;
+	int i = 0;
+	int pipe = 0;
+	u32 val = 0;
+	/*Rx, Ry, Gx, Gy, Bx, By, Wx, Wy*/
+	/*sRGB color space*/
+	uint32_t chrom_input[8] = {	6400, 3300,
+		3000, 6000,
+		1500, 600,
+		3127, 3290 };
+	/* PR3 color space*/
+	uint32_t chrom_output[8] = { 6382, 3361,
+		2979, 6193,
+		1448, 478,
+		3000, 3236 };
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!dev) {
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if (!(setting_data->type &
+		(CSC_CHROME_SETTING | CSC_INITIA | CSC_SETTING | CSC_REG_SETTING))) {
+		ret = -EINVAL;
+		return ret;
+	}
+	if ((setting_data->type == CSC_SETTING &&
+		setting_data->data_len != CSC_COUNT) ||
+		(setting_data->type == CSC_CHROME_SETTING &&
+		setting_data->data_len != CHROME_COUNT) ||
+		(setting_data->type == CSC_REG_SETTING &&
+		setting_data->data_len != CSC_REG_COUNT)) {
+		ret = -EINVAL;
+		return ret;
+	}
+
+	dev_priv = dev->dev_private;
+	pipe = setting_data->pipe;
+
+	if (pipe == 0)
+		dsi_config = dev_priv->dsi_configs[0];
+	else if (pipe == 2)
+		dsi_config = dev_priv->dsi_configs[1];
+	else if (pipe == 1) {
+		PSB_DEBUG_ENTRY("/KAI1 color conversion no implement for HDMI\n"
+				"do it later\n");
+		return -EINVAL;
+	} else
+		return -EINVAL;
+
+	mutex_lock(&dev_priv->gamma_csc_lock);
+
+	ctx = &dsi_config->dsi_hw_context;
+	regs = &dsi_config->regs;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+					OSPM_UHB_FORCE_POWER_ON)) {
+		ret = -EAGAIN;
+		goto _fun_exit;
+	}
+
+	/*forbid dsr which will restore regs*/
+	mdfld_dsi_dsr_forbid(dsi_config);
+
+	if (drm_psb_enable_color_conversion && setting_data->enable_state) {
+		if (setting_data->type == CSC_INITIA) {
+			/*initialize*/
+			csc(dev, &chrom_input[0], &chrom_output[0], pipe);
+		} else if (setting_data->type == CSC_CHROME_SETTING) {
+			/*use chrome to caculate csc*/
+			memcpy(chrom_input, setting_data->data.chrome_data,
+					8 * sizeof(int));
+			memcpy(chrom_output, setting_data->data.chrome_data + 8,
+					8 * sizeof(int));
+			csc(dev, &chrom_input[0], &chrom_output[0], pipe);
+		} else if (setting_data->type == CSC_SETTING) {
+			/*use user space csc*/
+			csc_program_DC(dev, &setting_data->data.csc_data[0],
+					pipe);
+		} else if (setting_data->type == CSC_REG_SETTING) {
+			/*use user space csc regiseter setting*/
+			for (i = 0; i < 6; i++) {
+				REG_WRITE(regs->color_coef_reg + (i<<2), setting_data->data.csc_reg_data[i]);
+				ctx->color_coef[i] = setting_data->data.csc_reg_data[i];
+			}
+		}
+
+                /*save color_coef (chrome) */
+                for (i = 0; i < 6; i++)
+                        csc_setting_save[i] = REG_READ(regs->color_coef_reg + (i<<2));
+
+		/*enable*/
+		val = REG_READ(regs->pipeconf_reg);
+		val |= (PIPEACONF_COLOR_MATRIX_ENABLE);
+		REG_WRITE(regs->pipeconf_reg, val);
+		ctx->pipeconf = val;
+		val = REG_READ(regs->dspcntr_reg);
+		REG_WRITE(regs->dspcntr_reg, val);
+	} else {
+		drm_psb_enable_color_conversion = 0;
+
+                /*reset color_conversion color setting*/
+                for (i = 0; i < 6; i++)
+                        csc_setting_save[i] = 0;
+
+		/*disable*/
+		val = REG_READ(regs->pipeconf_reg);
+		val &= ~(PIPEACONF_COLOR_MATRIX_ENABLE);
+		REG_WRITE(regs->pipeconf_reg, val);
+		ctx->pipeconf = val;
+		val = REG_READ(regs->dspcntr_reg);
+		REG_WRITE(regs->dspcntr_reg, val);
+	}
+
+	mdfld_dsi_dsr_update_panel_fb(dsi_config);
+	/*allow entering dsr*/
+	mdfld_dsi_dsr_allow(dsi_config);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+_fun_exit:
+	mutex_unlock(&dev_priv->gamma_csc_lock);
+	return ret;
+
+#else
+	return 0;
+#endif
+}
+static const struct drm_crtc_helper_funcs mrfld_helper_funcs;
+const struct drm_crtc_funcs mdfld_intel_crtc_funcs;
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+			 struct psb_intel_mode_device *mode_dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc;
+	int i;
+	uint16_t *r_base, *g_base, *b_base;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* We allocate a extra array of drm_connector pointers
+	 * for fbdev after the crtc */
+	psb_intel_crtc =
+	    kzalloc(sizeof(struct psb_intel_crtc) +
+		    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+		    GFP_KERNEL);
+	if (psb_intel_crtc == NULL)
+		return;
+
+#ifndef CONFIG_X86_MRST
+	psb_intel_crtc->crtc_state =
+	    kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+	if (!psb_intel_crtc->crtc_state) {
+		DRM_INFO("Crtc state error: No memory\n");
+		kfree(psb_intel_crtc);
+		return;
+	}
+#endif
+
+	drm_crtc_init(dev, &psb_intel_crtc->base, &mdfld_intel_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+	psb_intel_crtc->pipe = pipe;
+	psb_intel_crtc->plane = pipe;
+
+	r_base = psb_intel_crtc->base.gamma_store;
+	g_base = r_base + 256;
+	b_base = g_base + 256;
+	for (i = 0; i < 256; i++) {
+		psb_intel_crtc->lut_r[i] = i;
+		psb_intel_crtc->lut_g[i] = i;
+		psb_intel_crtc->lut_b[i] = i;
+		r_base[i] = i << 8;
+		g_base[i] = i << 8;
+		b_base[i] = i << 8;
+
+		psb_intel_crtc->lut_adj[i] = 0;
+	}
+
+	psb_intel_crtc->mode_dev = mode_dev;
+	psb_intel_crtc->cursor_addr = 0;
+
+	drm_crtc_helper_add(&psb_intel_crtc->base, &mrfld_helper_funcs);
+
+	/* Setup the array of drm_connector pointer array */
+	psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+	       dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+	dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+	    &psb_intel_crtc->base;
+	dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+	    &psb_intel_crtc->base;
+	psb_intel_crtc->mode_set.connectors =
+	    (struct drm_connector **)(psb_intel_crtc + 1);
+	psb_intel_crtc->mode_set.num_connectors = 0;
+}
+
+int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+	struct drm_mode_object *drmmode_obj;
+	struct psb_intel_crtc *crtc;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+					   DRM_MODE_OBJECT_CRTC);
+
+	if (!drmmode_obj) {
+		DRM_ERROR("no such CRTC id\n");
+		return -EINVAL;
+	}
+
+	crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+	pipe_from_crtc_id->pipe = crtc->pipe;
+
+	return 0;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+	struct drm_crtc *crtc = NULL;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+		if (psb_intel_crtc->pipe == pipe)
+			break;
+	}
+	return crtc;
+}
+
+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+	int index_mask = 0;
+	struct drm_connector *connector;
+	int entry = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct psb_intel_output *psb_intel_output =
+		    to_psb_intel_output(connector);
+		if (type_mask & (1 << psb_intel_output->type))
+			index_mask |= (1 << entry);
+		entry++;
+	}
+	return index_mask;
+}
+
+#if 0				/* JB: Rework framebuffer code into something none device specific */
+static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct psb_intel_framebuffer *psb_intel_fb =
+	    to_psb_intel_framebuffer(fb);
+	struct drm_device *dev = fb->dev;
+
+	if (fb->fbdev)
+		intelfb_remove(dev, fb);
+
+	drm_framebuffer_cleanup(fb);
+	drm_gem_object_unreference(fb->mm_private);
+
+	kfree(psb_intel_fb);
+}
+
+static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						    struct drm_file *file_priv,
+						    unsigned int *handle)
+{
+	struct drm_gem_object *object = fb->mm_private;
+
+	return drm_gem_handle_create(file_priv, object, handle);
+}
+
+static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
+	.destroy = psb_intel_user_framebuffer_destroy,
+	.create_handle = psb_intel_user_framebuffer_create_handle,
+};
+
+struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2
+						     *mode_cmd,
+						     void *mm_private)
+{
+	struct psb_intel_framebuffer *psb_intel_fb;
+
+	psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
+	if (!psb_intel_fb)
+		return NULL;
+
+	if (!drm_framebuffer_init(dev,
+				  &psb_intel_fb->base, &psb_intel_fb_funcs))
+		return NULL;
+
+	drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
+
+	return &psb_intel_fb->base;
+}
+
+static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
+								 drm_device
+								 *dev, struct
+								 drm_file
+								 *filp, struct
+								 drm_mode_fb_cmd2
+								 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+	if (!obj)
+		return NULL;
+
+	return psb_intel_framebuffer_create(dev, mode_cmd, obj);
+}
+
+static int psb_intel_insert_new_fb(struct drm_device *dev,
+				   struct drm_file *file_priv,
+				   struct drm_framebuffer *fb,
+				   struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct psb_intel_framebuffer *psb_intel_fb;
+	struct drm_gem_object *obj;
+	struct drm_crtc *crtc;
+
+	psb_intel_fb = to_psb_intel_framebuffer(fb);
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+
+	if (!obj) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+	drm_gem_object_unreference(psb_intel_fb->base.mm_private);
+	drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->fb == fb) {
+			struct drm_crtc_helper_funcs *crtc_funcs =
+			    crtc->helper_private;
+			crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
+		}
+	}
+	return 0;
+}
+
+static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
+	.resize_fb = psb_intel_insert_new_fb,
+	.fb_create = psb_intel_user_framebuffer_create,
+	.fb_changed = intelfb_probe,
+};
+#endif
+
+#if 0				/* Should be per device */
+void psb_intel_modeset_init(struct drm_device *dev)
+{
+	int num_pipe;
+	int i;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.funcs = (void *)&psb_intel_mode_funcs;
+
+	if (IS_I965G(dev)) {
+		dev->mode_config.max_width = 8192;
+		dev->mode_config.max_height = 8192;
+	} else {
+		dev->mode_config.max_width = 2048;
+		dev->mode_config.max_height = 2048;
+	}
+
+	/* set memory base */
+	/* MRST and PSB should use BAR 2 */
+	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+
+	if (IS_MOBILE(dev) || IS_I9XX(dev))
+		num_pipe = 2;
+	else
+		num_pipe = 1;
+	DRM_DEBUG("%d display pipe%s available.\n",
+		  num_pipe, num_pipe > 1 ? "s" : "");
+
+	for (i = 0; i < num_pipe; i++)
+		psb_intel_crtc_init(dev, i);
+
+	psb_intel_setup_outputs(dev);
+
+	/* setup fbs */
+	/* drm_initial_config(dev); */
+}
+#endif
+
+void psb_intel_modeset_cleanup(struct drm_device *dev)
+{
+	drm_mode_config_cleanup(dev);
+}
+
+/* current intel driver doesn't take advantage of encoders
+   always give back the encoder for the connector
+*/
+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+{
+	struct psb_intel_output *psb_intel_output =
+	    to_psb_intel_output(connector);
+
+	return &psb_intel_output->enc;
+}
+
+#define COUNT_MAX 1000
+
+static const struct drm_crtc_helper_funcs mrfld_helper_funcs = {
+	.dpms = mrfld_crtc_dpms,
+	.mode_fixup = psb_intel_crtc_mode_fixup,
+	.mode_set = mrfld_crtc_mode_set,
+	.mode_set_base = mdfld__intel_pipe_set_base,
+	.prepare = psb_intel_crtc_prepare,
+	.commit = psb_intel_crtc_commit,
+};
+
+static void intel_output_poll_execute(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct drm_device *dev = container_of(delayed_work, struct drm_device,
+			mode_config.output_poll_work);
+	struct drm_connector *connector;
+	bool repoll = false, changed = false;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+		/* if this is HPD or polled don't check it -
+		   TV out for instance */
+		if (!connector->polled)
+			continue;
+
+		else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					DRM_CONNECTOR_POLL_DISCONNECT))
+			repoll = true;
+
+		connector->status = connector->funcs->detect(connector, false);
+
+		if ((connector->status == connector_status_disconnected &&
+					connector->encoder) ||
+				(!connector->encoder &&
+				 connector->status ==
+				 connector_status_connected))
+			changed = true;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (changed) {
+		/* call fbdev then send a uevent */
+		if (dev->mode_config.funcs->output_poll_changed)
+			dev->mode_config.funcs->output_poll_changed(dev);
+
+		drm_sysfs_hotplug_event(dev);
+	}
+
+	if (repoll)
+		queue_delayed_work(system_nrt_wq, delayed_work,
+				DRM_OUTPUT_POLL_PERIOD);
+}
+
+void intel_drm_kms_helper_poll_init(struct drm_device *dev)
+{
+	INIT_DELAYED_WORK(&dev->mode_config.output_poll_work,
+			intel_output_poll_execute);
+	dev->mode_config.poll_enabled = true;
+
+	drm_kms_helper_poll_enable(dev);
+}
+
+/* MRST_PLATFORM end */
+
+#include "psb_intel_display2.c"
+#include "mrfld_display.c"
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display.h
new file mode 100644
index 0000000..2ffb652
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display.h
@@ -0,0 +1,28 @@
+/* copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+			       struct drm_framebuffer *old_fb);
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void intel_drm_kms_helper_poll_init(struct drm_device *dev);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display2.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display2.c
new file mode 100644
index 0000000..646538e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_display2.c
@@ -0,0 +1,1771 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+//#include "mdfld_dsi_output.h"
+#ifdef CONFIG_MID_DSI_DPU
+#include "mdfld_dsi_dbi_dpu.h"
+#endif
+#endif
+
+#include "psb_intel_display.h"
+#include "displayclass_interface.h"
+
+#define KEEP_UNUSED_CODE 0
+
+#ifdef MIN
+#undef MIN
+#endif
+
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+
+/* MDFLD_PLATFORM start */
+void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+{
+	int count, temp;
+	u32 pipeconf_reg = PIPEACONF;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return;
+	}
+
+	/* Wait for for the pipe disable to take effect. */
+	for (count = 0; count < COUNT_MAX; count++) {
+		temp = REG_READ(pipeconf_reg);
+		if (!(temp & PIPEACONF_PIPE_STATE))
+			break;
+
+		udelay(20);
+	}
+
+	PSB_DEBUG_ENTRY("cout = %d. \n", count);
+}
+
+void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+{
+	int count, temp;
+	u32 pipeconf_reg = PIPEACONF;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return;
+	}
+
+	/* Wait for for the pipe enable to take effect. */
+	for (count = 0; count < COUNT_MAX; count++) {
+		temp = REG_READ(pipeconf_reg);
+		if ((temp & PIPEACONF_PIPE_STATE))
+			break;
+
+		udelay(20);
+	}
+
+	PSB_DEBUG_ENTRY("cout = %d. \n", count);
+}
+
+static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
+				       struct drm_file *file_priv,
+				       uint32_t handle,
+				       uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t control = CURACNTR;
+	uint32_t base = CURABASE;
+	uint32_t temp;
+	size_t addr = 0;
+	uint32_t page_offset;
+	size_t size;
+	void *bo;
+	u32 power_island = 0;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		control = CURBCNTR;
+		base = CURBBASE;
+		break;
+	case 2:
+		control = CURCCNTR;
+		base = CURCBASE;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return -EINVAL;
+	}
+
+	/* Can't enalbe HW cursor on plane B/C. */
+	if (pipe != 0)
+		return 0;
+
+	power_island = pipe_to_island(pipe);
+
+	/* if we want to turn of the cursor ignore width and height */
+	if (!handle) {
+		DRM_DEBUG("cursor off\n");
+		/* turn off the cursor */
+		temp = 0;
+		temp |= CURSOR_MODE_DISABLE;
+
+		if (power_island_get(power_island)) {
+			REG_WRITE(control, temp);
+			REG_WRITE(base, 0);
+			power_island_put(power_island);
+		}
+
+		/* unpin the old bo */
+		if (psb_intel_crtc->cursor_bo) {
+			mode_dev->bo_unpin_for_scanout(dev,
+						       psb_intel_crtc->
+						       cursor_bo);
+			psb_intel_crtc->cursor_bo = NULL;
+		}
+		return 0;
+	}
+
+	/* Currently we only support 64x64 cursors */
+	if (width != 64 || height != 64) {
+		DRM_ERROR("we currently only support 64x64 cursors\n");
+		return -EINVAL;
+	}
+
+	bo = mode_dev->bo_from_handle(dev, file_priv, handle);
+	if (!bo)
+		return -ENOENT;
+
+	ret = mode_dev->bo_pin_for_scanout(dev, bo);
+	if (ret)
+		return ret;
+	size = mode_dev->bo_size(dev, bo);
+	if (size < width * height * 4) {
+		DRM_ERROR("buffer is to small\n");
+		return -ENOMEM;
+	}
+
+	/* insert this bo into gtt */
+	/* DRM_INFO("%s: map meminfo for hw cursor. handle %x, pipe = %d\n",
+	   __FUNCTION__, handle, pipe); */
+
+	ret = psb_gtt_map_meminfo(dev, (void *)(uintptr_t)handle, 0,
+				  &page_offset);
+	if (ret) {
+		DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle);
+		return ret;
+	}
+
+	addr = page_offset << PAGE_SHIFT;
+
+	psb_intel_crtc->cursor_addr = addr;
+
+	temp = 0;
+	/* set the pipe for the cursor */
+	temp |= (pipe << 28);
+	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+	if (power_island_get(power_island)) {
+		REG_WRITE(control, temp);
+		REG_WRITE(base, addr);
+		power_island_put(power_island);
+	}
+
+	/* unpin the old bo */
+	if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
+		mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
+		psb_intel_crtc->cursor_bo = bo;
+	}
+
+	return 0;
+}
+
+static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_device *dev = crtc->dev;
+#ifndef CONFIG_MID_DSI_DPU
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+#else
+	struct psb_drm_dpu_rect rect;
+#endif
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t pos = CURAPOS;
+	uint32_t base = CURABASE;
+	uint32_t temp = 0;
+	uint32_t addr;
+	u32 power_island = 0;
+
+	switch (pipe) {
+	case 0:
+#ifndef CONFIG_MID_DSI_DPU
+		if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_0))
+			mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_0, 0, 0);
+#else				/*CONFIG_MID_DSI_DPU */
+		rect.x = x;
+		rect.y = y;
+
+		mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORA, &rect);
+		mdfld_dpu_exit_dsr(dev);
+#endif
+		break;
+	case 1:
+		pos = CURBPOS;
+		base = CURBBASE;
+		break;
+	case 2:
+#ifndef CONFIG_MID_DSI_DPU
+		if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_2))
+			mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_2, 0, 0);
+#else				/*CONFIG_MID_DSI_DPU */
+		mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORC, &rect);
+		mdfld_dpu_exit_dsr(dev);
+#endif
+		pos = CURCPOS;
+		base = CURCBASE;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return -EINVAL;
+	}
+
+	/* Can't enalbe HW cursor on plane B/C. */
+	if (pipe != 0)
+		return 0;
+
+	if (x < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+		x = -x;
+	}
+	if (y < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+		y = -y;
+	}
+
+	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+	addr = psb_intel_crtc->cursor_addr;
+
+	power_island = pipe_to_island(pipe);
+
+	if (power_island_get(power_island)) {
+		REG_WRITE(pos, temp);
+		REG_WRITE(base, addr);
+		power_island_put(power_island);
+	}
+#endif
+	return 0;
+}
+
+const struct drm_crtc_funcs mdfld_intel_crtc_funcs = {
+#ifndef CONFIG_X86_MRST
+	.save = psb_intel_crtc_save,
+	.restore = psb_intel_crtc_restore,
+#endif
+	.cursor_set = mdfld_intel_crtc_cursor_set,
+	.cursor_move = mdfld_intel_crtc_cursor_move,
+	.gamma_set = psb_intel_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = psb_intel_crtc_destroy,
+};
+
+static struct drm_device globle_dev;
+
+void mdfld__intel_plane_set_alpha(int enable)
+{
+	struct drm_device *dev = &globle_dev;
+	int dspcntr_reg = DSPACNTR;
+	u32 dspcntr;
+
+	dspcntr = REG_READ(dspcntr_reg);
+
+	if (enable) {
+		dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+		dspcntr |= DISPPLANE_32BPP;
+	} else {
+		dspcntr &= ~DISPPLANE_32BPP;
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+	}
+
+	REG_WRITE(dspcntr_reg, dspcntr);
+}
+
+int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+			       struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	/* struct drm_i915_master_private *master_priv; */
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
+	int pipe = psb_intel_crtc->pipe;
+	struct drm_display_mode *adjusted_mode = NULL;
+	int swapchain_plane = PVRSRV_SWAPCHAIN_ATTACHED_PLANE_NONE;
+	unsigned long Start, Offset;
+	int dsplinoff = DSPALINOFF;
+	int dspsurf = DSPASURF;
+	int dspstride = DSPASTRIDE;
+	int dspcntr_reg = DSPACNTR;
+	u32 dspcntr;
+	int fb_width, fb_height, bpp;
+	u32 stride = 0;
+	u32 power_island = 0;
+	int ret = 0;
+
+	memcpy(&globle_dev, dev, sizeof(struct drm_device));
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x. \n", pipe);
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		PSB_DEBUG_ENTRY("No FB bound\n");
+		return 0;
+	}
+
+	switch (pipe) {
+#ifdef CONFIG_SUPPORT_MIPI
+	case 0:
+		if (IS_MID(dev))
+			dsplinoff = DSPALINOFF;
+		swapchain_plane = PVRSRV_SWAPCHAIN_ATTACHED_PLANE_A;
+		break;
+#endif
+	case 1:
+		dsplinoff = DSPBLINOFF;
+		dspsurf = DSPBSURF;
+		dspstride = DSPBSTRIDE;
+		dspcntr_reg = DSPBCNTR;
+		swapchain_plane = PVRSRV_SWAPCHAIN_ATTACHED_PLANE_B;
+		break;
+#ifdef CONFIG_SUPPORT_MIPI
+	case 2:
+		dsplinoff = DSPCLINOFF;
+		dspsurf = DSPCSURF;
+		dspstride = DSPCSTRIDE;
+		dspcntr_reg = DSPCCNTR;
+		swapchain_plane = PVRSRV_SWAPCHAIN_ATTACHED_PLANE_C;
+		break;
+#endif
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return -EINVAL;
+	}
+
+	power_island = pipe_to_island(pipe);
+
+	if (!power_island_get(power_island))
+		return 0;
+
+	Start = mode_dev->bo_offset(dev, psbfb);
+	Offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	/* Try to attach/de-attach Plane B to an existing swap chain,
+	 * especially with another frame buffer inserted into GTT. */
+	/* TODO: remove it since there's no swap chain anymore*/
+#if 0
+	if (!DCChangeSwapChainProperty(&Start, swapchain_plane)) {
+		DRM_ERROR("Failed to attach/de-attach swapchain_plane %d to a"
+			  "swap chain.\n", pipe);
+#if 0				/* FIXME MRFLD */
+		ret = -EINVAL;
+		goto psb_intel_pipe_set_base_exit;
+#endif				/* FIXME MRFLD */
+	}
+#endif
+
+#ifdef CONFIG_SUPPORT_MIPI
+	REG_WRITE(dspstride, crtc->fb->pitches[0]);
+#else
+	stride = crtc->fb->pitches[0];
+	adjusted_mode = &psb_intel_crtc->saved_adjusted_mode;
+
+	if (adjusted_mode) {
+		fb_width = crtc->fb->width;
+		fb_height = crtc->fb->height;
+		bpp = crtc->fb->bits_per_pixel;
+
+		/* panel fitter does not support scaling greater
+		 * than 1.5, using orignal stride of the image */
+		if (fb_width / (float)adjusted_mode->crtc_hdisplay > 1.5 ||
+		    fb_height / (float)adjusted_mode->crtc_vdisplay > 1.5) {
+			stride = ALIGN(adjusted_mode->crtc_hdisplay*(bpp>>3), 64);
+		}
+
+		DRM_INFO("dsp stride=%d bpp=%d\n", stride, bpp);
+	}
+	REG_WRITE(dspstride, stride);
+#endif
+	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		DRM_ERROR("Unknown color depth\n");
+		ret = -EINVAL;
+		goto psb_intel_pipe_set_base_exit;
+	}
+	REG_WRITE(dspcntr_reg, dspcntr);
+
+	PSB_DEBUG_ENTRY("Writing base %08lX %08lX %d %d\n", Start, Offset, x,
+			y);
+
+	REG_WRITE(dsplinoff, Offset);
+	REG_READ(dsplinoff);
+	REG_WRITE(dspsurf, Start);
+	REG_READ(dspsurf);
+
+ psb_intel_pipe_set_base_exit:
+
+	power_island_put(power_island);
+
+	return ret;
+}
+
+/**
+ * Disable the pipe, plane and pll.
+ *
+ */
+void mdfld_disable_crtc(struct drm_device *dev, int pipe)
+{
+	int dpll_reg = MRST_DPLL_A;
+	int dspcntr_reg = DSPACNTR;
+	int dspbase_reg = MRST_DSPABASE;
+	int pipeconf_reg = PIPEACONF;
+#ifdef CONFIG_SUPPORT_MIPI
+	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+#endif
+	u32 temp;
+
+	PSB_DEBUG_ENTRY("pipe = %d\n", pipe);
+
+#ifdef CONFIG_SUPPORT_MIPI
+#ifndef CONFIG_SUPPORT_TOSHIBA_MIPI_DISPLAY
+	/**
+	 * NOTE: this path only works for TMD panel now. update it to
+	 * support all MIPI panels later.
+	 */
+	if (pipe != 1 && ((get_panel_type(dev, pipe) == TMD_VID) ||
+			  (get_panel_type(dev, pipe) == TMD_6X10_VID)))
+		return;
+#endif
+#else
+	if (pipe != 1)
+		return;
+#endif
+
+	switch (pipe) {
+#ifdef CONFIG_SUPPORT_MIPI
+	case 0:
+		break;
+#endif
+	case 1:
+		dpll_reg = MDFLD_DPLL_B;
+		dspcntr_reg = DSPBCNTR;
+		dspbase_reg = DSPBSURF;
+		pipeconf_reg = PIPEBCONF;
+		break;
+#ifdef CONFIG_SUPPORT_MIPI
+	case 2:
+		dpll_reg = MRST_DPLL_A;
+		dspcntr_reg = DSPCCNTR;
+		dspbase_reg = MDFLD_DSPCBASE;
+		pipeconf_reg = PIPECCONF;
+		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+		break;
+#endif
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return;
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe != 1)
+		mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+					 HS_CTRL_FIFO_EMPTY |
+					 HS_DATA_FIFO_EMPTY);
+#endif
+
+	/* Disable display plane */
+	temp = REG_READ(dspcntr_reg);
+	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+		REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+		/* Flush the plane changes */
+		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+		REG_READ(dspbase_reg);
+	}
+
+	/* Next, disable display pipes */
+	temp = REG_READ(pipeconf_reg);
+	if ((temp & PIPEACONF_ENABLE) != 0) {
+		temp &= ~PIPEACONF_ENABLE;
+		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+		REG_WRITE(pipeconf_reg, temp);
+		REG_READ(pipeconf_reg);
+
+		/* Wait for for the pipe disable to take effect. */
+		mdfldWaitForPipeDisable(dev, pipe);
+	}
+
+	temp = REG_READ(dpll_reg);
+	if (temp & DPLL_VCO_ENABLE) {
+		if (((pipe != 1)
+		     && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) &
+			  PIPEACONF_ENABLE))
+		    || (pipe == 1)) {
+			temp &= ~(DPLL_VCO_ENABLE);
+			REG_WRITE(dpll_reg, temp);
+			REG_READ(dpll_reg);
+			/* Wait for the clocks to turn off. */
+			/* FIXME_MDFLD PO may need more delay */
+			udelay(500);
+
+			if (!(temp & MDFLD_PWR_GATE_EN)) {
+				/* gating power of DPLL */
+				REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(5000);
+			}
+		}
+	}
+
+}
+
+#if KEEP_UNUSED_CODE
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_hw_context *ctx;
+	struct mdfld_dsi_hw_registers *regs;
+	int pipe = psb_intel_crtc->pipe;
+	int dpll_reg = MRST_DPLL_A;
+	int dspcntr_reg = DSPACNTR;
+	int dspbase_reg = MRST_DSPABASE;
+	int pipeconf_reg = PIPEACONF;
+	u32 pipestat_reg = PIPEASTAT;
+	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+	u32 pipeconf = dev_priv->pipeconf;
+	u32 dspcntr = dev_priv->dspcntr;
+	u32 mipi_enable_reg = MIPIA_DEVICE_READY_REG;
+	u32 temp;
+	u32 power_island = 0;
+	bool enabled;
+	int timeout = 0;
+
+	PSB_DEBUG_ENTRY("mode = %d, pipe = %d\n", mode, pipe);
+
+#ifndef CONFIG_SUPPORT_TOSHIBA_MIPI_DISPLAY
+	/**
+	 * MIPI dpms
+	 * NOTE: this path only works for TMD panel now. update it to
+	 * support all MIPI panels later.
+	 */
+	if (pipe != 1 && ((get_panel_type(dev, pipe) == TMD_VID) ||
+			  (get_panel_type(dev, pipe) == TMD_6X10_VID))) {
+		return;
+	}
+#endif
+
+	power_island = pipe_to_island(pipe);
+
+	if (!power_island_get(power_island))
+		return;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		dpll_reg = DPLL_B;
+		dspcntr_reg = DSPBCNTR;
+		dspbase_reg = MRST_DSPBBASE;
+		pipeconf_reg = PIPEBCONF;
+		pipeconf = dev_priv->pipeconf1;
+		dspcntr = dev_priv->dspcntr1;
+		if (IS_MDFLD(dev))
+			dpll_reg = MDFLD_DPLL_B;
+		break;
+	case 2:
+		dpll_reg = MRST_DPLL_A;
+		dspcntr_reg = DSPCCNTR;
+		dspbase_reg = MDFLD_DSPCBASE;
+		pipeconf_reg = PIPECCONF;
+		pipestat_reg = PIPECSTAT;
+		pipeconf = dev_priv->pipeconf2;
+		dspcntr = dev_priv->dspcntr2;
+		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+		mipi_enable_reg = MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+
+		power_island_put(power_island);
+		return;
+	}
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Enable the DPLL */
+		temp = REG_READ(dpll_reg);
+
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			/* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+			if (temp & MDFLD_PWR_GATE_EN) {
+				temp &= ~MDFLD_PWR_GATE_EN;
+				REG_WRITE(dpll_reg, temp);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(500);
+			}
+
+			REG_WRITE(dpll_reg, temp);
+			REG_READ(dpll_reg);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+
+			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+			REG_READ(dpll_reg);
+
+			/**
+			 * wait for DSI PLL to lock
+			 * NOTE: only need to poll status of pipe 0 and pipe 1,
+			 * since both MIPI pipes share the same PLL.
+			 */
+			while ((pipe != 2) && (timeout < 20000)
+			       && !(REG_READ(pipeconf_reg) &
+				    PIPECONF_DSIPLL_LOCK)) {
+				udelay(150);
+				timeout++;
+			}
+		}
+
+		/* Enable the plane */
+		temp = REG_READ(dspcntr_reg);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+		}
+
+		/* Enable the pipe */
+		temp = REG_READ(pipeconf_reg);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(pipeconf_reg, pipeconf);
+
+			/* Wait for for the pipe enable to take effect. */
+			mdfldWaitForPipeEnable(dev, pipe);
+		}
+
+		/*workaround for sighting 3741701 Random X blank display */
+		/*perform w/a in video mode only on pipe A or C */
+		if ((pipe == 0 || pipe == 2) &&
+		    (is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DPI)) {
+			REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+			msleep(100);
+			if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) {
+				PSB_DEBUG_ENTRY("OK");
+			} else {
+				PSB_DEBUG_ENTRY("STUCK!!!!");
+				/*shutdown controller */
+				temp = REG_READ(dspcntr_reg);
+				REG_WRITE(dspcntr_reg,
+					  temp & ~DISPLAY_PLANE_ENABLE);
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				/*mdfld_dsi_dpi_shut_down(dev, pipe); */
+				REG_WRITE(0xb048, 1);
+				msleep(100);
+				temp = REG_READ(pipeconf_reg);
+				temp &= ~PIPEACONF_ENABLE;
+				REG_WRITE(pipeconf_reg, temp);
+				msleep(100);	/*wait for pipe disable */
+				/*printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
+				   printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074)); */
+				REG_WRITE(mipi_enable_reg, 0);
+				msleep(100);
+				PSB_DEBUG_ENTRY("70008 is %x\n",
+						REG_READ(0x70008));
+				PSB_DEBUG_ENTRY("b074 is %x\n",
+						REG_READ(0xb074));
+				REG_WRITE(0xb004, REG_READ(0xb004));
+				/* try to bring the controller back up again */
+				REG_WRITE(mipi_enable_reg, 1);
+				temp = REG_READ(dspcntr_reg);
+				REG_WRITE(dspcntr_reg,
+					  temp | DISPLAY_PLANE_ENABLE);
+				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				/*mdfld_dsi_dpi_turn_on(dev, pipe); */
+				REG_WRITE(0xb048, 2);
+				msleep(100);
+				temp = REG_READ(pipeconf_reg);
+				temp |= PIPEACONF_ENABLE;
+				REG_WRITE(pipeconf_reg, temp);
+			}
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+
+		/* Give the overlay scaler a chance to enable
+		   if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+		if (pipe != 1)
+			mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+						 HS_CTRL_FIFO_EMPTY |
+						 HS_DATA_FIFO_EMPTY);
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		/* Disable display plane */
+		temp = REG_READ(dspcntr_reg);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			REG_READ(dspbase_reg);
+		}
+
+		/* Next, disable display pipes */
+		temp = REG_READ(pipeconf_reg);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			temp &= ~PIPEACONF_ENABLE;
+			temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+			REG_WRITE(pipeconf_reg, temp);
+			REG_READ(pipeconf_reg);
+
+			/* Wait for for the pipe disable to take effect. */
+			mdfldWaitForPipeDisable(dev, pipe);
+		}
+
+		temp = REG_READ(dpll_reg);
+		if (temp & DPLL_VCO_ENABLE) {
+			if (((pipe != 1)
+			     && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) &
+				  PIPEACONF_ENABLE))
+			    || (pipe == 1)) {
+				temp &= ~(DPLL_VCO_ENABLE);
+				REG_WRITE(dpll_reg, temp);
+				REG_READ(dpll_reg);
+				/* Wait for the clocks to turn off. */
+				/* FIXME_MDFLD PO may need more delay */
+				udelay(500);
+#if 0				/* FIXME_MDFLD Check if we need to power gate the PLL */
+				if (!(temp & MDFLD_PWR_GATE_EN)) {
+					/* gating power of DPLL */
+					REG_WRITE(dpll_reg,
+						  temp | MDFLD_PWR_GATE_EN);
+					/* FIXME_MDFLD PO - change 500 to 1 after PO */
+					udelay(5000);
+				}
+#endif
+			}
+		}
+		break;
+	}
+
+	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+#if 0				/* JB: Add vblank support later */
+	if (enabled)
+		dev_priv->vblank_pipe |= (1 << pipe);
+	else
+		dev_priv->vblank_pipe &= ~(1 << pipe);
+#endif
+
+#if 0				/* JB: Add sarea support later */
+	if (!dev->primary->master)
+		return;
+
+	master_priv = dev->primary->master->driver_priv;
+	if (!master_priv->sarea_priv)
+		return;
+
+	switch (pipe) {
+	case 0:
+		master_priv->sarea_priv->planeA_w =
+		    enabled ? crtc->mode.hdisplay : 0;
+		master_priv->sarea_priv->planeA_h =
+		    enabled ? crtc->mode.vdisplay : 0;
+		break;
+	case 1:
+		master_priv->sarea_priv->planeB_w =
+		    enabled ? crtc->mode.hdisplay : 0;
+		master_priv->sarea_priv->planeB_h =
+		    enabled ? crtc->mode.vdisplay : 0;
+		break;
+	default:
+		DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+		break;
+	}
+#endif
+
+	power_island_put(power_island);
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#define MDFLD_LIMT_DPLL_19	    0
+#define MDFLD_LIMT_DPLL_25	    1
+#define MDFLD_LIMT_DPLL_83	    2
+#define MDFLD_LIMT_DPLL_100	    3
+#define MDFLD_LIMT_DSIPLL_19	    4
+#define MDFLD_LIMT_DSIPLL_25	    5
+#define MDFLD_LIMT_DSIPLL_83	    6
+#define MDFLD_LIMT_DSIPLL_100	    7
+
+#define MDFLD_DOT_MIN		  19750
+#define MDFLD_DOT_MAX		  120000
+#define MDFLD_DPLL_M_MIN_19	    113
+#define MDFLD_DPLL_M_MAX_19	    155
+#define MDFLD_DPLL_P1_MIN_19	    2
+#define MDFLD_DPLL_P1_MAX_19	    10
+#define MDFLD_DPLL_M_MIN_25	    101
+#define MDFLD_DPLL_M_MAX_25	    130
+#define MDFLD_DPLL_P1_MIN_25	    2
+#define MDFLD_DPLL_P1_MAX_25	    10
+#define MDFLD_DPLL_M_MIN_83	    64
+#define MDFLD_DPLL_M_MAX_83	    64
+#define MDFLD_DPLL_P1_MIN_83	    2
+#define MDFLD_DPLL_P1_MAX_83	    2
+#define MDFLD_DPLL_M_MIN_100	    64
+#define MDFLD_DPLL_M_MAX_100	    64
+#define MDFLD_DPLL_P1_MIN_100	    2
+#define MDFLD_DPLL_P1_MAX_100	    2
+#define MDFLD_DSIPLL_M_MIN_19	    64
+#define MDFLD_DSIPLL_M_MAX_19	    175
+#define MDFLD_DSIPLL_P1_MIN_19	    3
+#define MDFLD_DSIPLL_P1_MAX_19	    8
+#define MDFLD_DSIPLL_M_MIN_25	    97
+#define MDFLD_DSIPLL_M_MAX_25	    140
+#define MDFLD_DSIPLL_P1_MIN_25	    3
+#define MDFLD_DSIPLL_P1_MAX_25	    9
+#define MDFLD_DSIPLL_M_MIN_83	    33
+#define MDFLD_DSIPLL_M_MAX_83	    92
+#define MDFLD_DSIPLL_P1_MIN_83	    2
+#define MDFLD_DSIPLL_P1_MAX_83	    3
+#define MDFLD_DSIPLL_M_MIN_100	    97
+#define MDFLD_DSIPLL_M_MAX_100	    140
+#define MDFLD_DSIPLL_P1_MIN_100	    3
+#define MDFLD_DSIPLL_P1_MAX_100	    9
+
+static const struct mrst_limit_t mdfld_limits[] = {
+	{			/* MDFLD_LIMT_DPLL_19 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_19,.max = MDFLD_DPLL_M_MAX_19},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_19,.max = MDFLD_DPLL_P1_MAX_19},
+	 },
+	{			/* MDFLD_LIMT_DPLL_25 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_25,.max = MDFLD_DPLL_M_MAX_25},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_25,.max = MDFLD_DPLL_P1_MAX_25},
+	 },
+	{			/* MDFLD_LIMT_DPLL_83 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_83,.max = MDFLD_DPLL_M_MAX_83},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_83,.max = MDFLD_DPLL_P1_MAX_83},
+	 },
+	{			/* MDFLD_LIMT_DPLL_100 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_100,.max = MDFLD_DPLL_M_MAX_100},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_100,.max = MDFLD_DPLL_P1_MAX_100},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_19 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_19,.max = MDFLD_DSIPLL_M_MAX_19},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19,.max = MDFLD_DSIPLL_P1_MAX_19},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_25 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_25,.max = MDFLD_DSIPLL_M_MAX_25},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25,.max = MDFLD_DSIPLL_P1_MAX_25},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_83 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_83,.max = MDFLD_DSIPLL_M_MAX_83},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83,.max = MDFLD_DSIPLL_P1_MAX_83},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_100 */
+	 .dot = {.min = MDFLD_DOT_MIN,.max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_100,.max = MDFLD_DSIPLL_M_MAX_100},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100,.max = MDFLD_DSIPLL_P1_MAX_100},
+	 },
+};
+
+#define MDFLD_M_MIN	    21
+#define MDFLD_M_MAX	    180
+static const u32 mdfld_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+	224, 368, 440, 220, 366, 439, 219, 365, 182, 347,	/* 21 - 30 */
+	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,	/* 31 - 40 */
+	388, 194, 353, 432, 216, 108, 310, 155, 333, 166,	/* 41 - 50 */
+	83, 41, 276, 138, 325, 162, 337, 168, 340, 170,	/* 51 - 60 */
+	341, 426, 469, 234, 373, 442, 221, 110, 311, 411,	/* 61 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,	/* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506,	/* 91 - 100 */
+	253, 126, 63, 287, 399, 455, 483, 241, 376, 444,	/* 101 - 110 */
+	478, 495, 503, 251, 381, 446, 479, 239, 375, 443,	/* 111 - 120 */
+	477, 238, 119, 315, 157, 78, 295, 147, 329, 420,	/* 121 - 130 */
+	210, 105, 308, 154, 77, 38, 275, 137, 68, 290,	/* 131 - 140 */
+	145, 328, 164, 82, 297, 404, 458, 485, 498, 249,	/* 141 - 150 */
+	380, 190, 351, 431, 471, 235, 117, 314, 413, 206,	/* 151 - 160 */
+	103, 51, 25, 12, 262, 387, 193, 96, 48, 280,	/* 161 - 170 */
+	396, 198, 99, 305, 152, 76, 294, 403, 457, 228,	/* 171 - 180 */
+};
+
+#if KEEP_UNUSED_CODE
+static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
+{
+	const struct mrst_limit_t *limit = NULL;
+	struct drm_device *dev = crtc->dev;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+		if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_19))
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
+		else if (dev_priv->ksel == KSEL_BYPASS_25)
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+			 && (dev_priv->core_freq == 166))
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+			 (dev_priv->core_freq == 100
+			  || dev_priv->core_freq == 200))
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
+	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+		if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_19))
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
+		else if (dev_priv->ksel == KSEL_BYPASS_25)
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+			 && (dev_priv->core_freq == 166))
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
+		else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+			 (dev_priv->core_freq == 100
+			  || dev_priv->core_freq == 200))
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
+	} else {
+		limit = NULL;
+		PSB_DEBUG_ENTRY("mdfld_limit Wrong display type. \n");
+	}
+
+	return limit;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
+{
+	clock->dot = (refclk * clock->m) / clock->p1;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE.  Divisor values are the actual divisors for
+ */
+static bool
+mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+		 struct mrst_clock_t *best_clock)
+{
+	struct mrst_clock_t clock;
+	const struct mrst_limit_t *limit = mdfld_limit(crtc);
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	PSB_DEBUG_ENTRY("mdfldFindBestPLL target = %d,"
+			"m_min = %d, m_max = %d, p_min = %d, p_max = %d. \n",
+			target, limit->m.min, limit->m.max, limit->p1.min,
+			limit->p1.max);
+
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+		     clock.p1++) {
+			int this_err;
+
+			mdfld_clock(refclk, &clock);
+
+			this_err = abs(clock.dot - target);
+			if (this_err < err) {
+				*best_clock = clock;
+				err = this_err;
+			}
+		}
+	}
+	PSB_DEBUG_ENTRY("mdfldFindBestPLL target = %d,"
+			"m = %d, p = %d. \n", target, best_clock->m,
+			best_clock->p1);
+	PSB_DEBUG_ENTRY("mdfldFindBestPLL err = %d.\n", err);
+
+	return err != target;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#if KEEP_UNUSED_CODE
+static int mdfld_crtc_dsi_pll_calc(struct drm_crtc *crtc,
+				   struct drm_device *dev,
+				   struct mdfld_dsi_hw_context *ctx,
+				   struct drm_display_mode *adjusted_mode)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mrst_clock_t clock;
+	u32 dpll = 0, fp = 0;
+	int refclk = 0;
+	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp =
+	    0;
+	bool ok;
+
+	if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+	    || (dev_priv->ksel == KSEL_BYPASS_19)) {
+		refclk = 19200;
+		clk_n = 1, clk_p2 = 8;
+	} else if (dev_priv->ksel == KSEL_BYPASS_25) {
+		refclk = 25000;
+		clk_n = 1, clk_p2 = 8;
+	} else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+		   && (dev_priv->core_freq == 166)) {
+		refclk = 83000;
+		clk_n = 4, clk_p2 = 8;
+	} else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+		   (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) {
+		refclk = 100000;
+		clk_n = 4, clk_p2 = 8;
+	} else {
+		refclk = 19200;
+		clk_n = 1, clk_p2 = 8;
+	}
+
+	dev_priv->bpp = 24;
+	clk_byte = dev_priv->bpp / 8;
+	clk = adjusted_mode->clock;
+
+	clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+	PSB_DEBUG_ENTRY("ref_clk: %d, clk = %d, clk_n = %d, clk_p2 = %d. \n",
+			refclk, clk, clk_n, clk_p2);
+	PSB_DEBUG_ENTRY("adjusted_mode->clock = %d, clk_tmp = %d. \n",
+			adjusted_mode->clock, clk_tmp);
+
+	ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+	dev_priv->tmds_clock_khz = clock.dot / (clk_n * clk_p2 * clk_byte);
+
+	if (!ok) {
+		DRM_ERROR("mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
+	} else {
+		m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+		PSB_DEBUG_ENTRY("dot clock = %d,"
+				"m = %d, p1 = %d, m_conv = %d. \n", clock.dot,
+				clock.m, clock.p1, m_conv);
+	}
+
+	dpll = 0x00000000;
+	fp = (clk_n / 2) << 16;
+	fp |= m_conv;
+
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (clock.p1 - 2)) << 17;
+
+	ctx->dpll = dpll;
+	ctx->fp = fp;
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+#ifdef CONFIG_SUPPORT_MIPI
+static int mdfld_crtc_dsi_mode_set(struct drm_crtc *crtc,
+				   struct mdfld_dsi_config *dsi_config,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev;
+	struct psb_intel_crtc *mdfld_dsi_crtc;
+	struct psb_framebuffer *mdfld_fb;
+	struct psb_intel_mode_device *mode_dev;
+	struct mdfld_dsi_hw_context *ctx;
+	struct drm_psb_private *dev_priv;
+	int fb_bpp;
+	int fb_pitch;
+	int fb_depth;
+	int hdelay;
+
+	if (!crtc || !crtc->fb) {
+		DRM_ERROR("Invalid CRTC\n");
+		return -EINVAL;
+	}
+
+	if (!dsi_config) {
+		DRM_ERROR("Invalid DSI config\n");
+		return -EINVAL;
+	}
+
+	mdfld_dsi_crtc = to_psb_intel_crtc(crtc);
+	mdfld_fb = to_psb_fb(crtc->fb);
+	mode_dev = mdfld_dsi_crtc->mode_dev;
+	mode = adjusted_mode;
+	ctx = &dsi_config->dsi_hw_context;
+	fb_bpp = crtc->fb->bits_per_pixel;
+	fb_pitch = crtc->fb->pitches[0];
+	fb_depth = crtc->fb->depth;
+	dev = crtc->dev;
+	dev_priv = dev->dev_private;
+
+	mutex_lock(&dsi_config->context_lock);
+
+	ctx->vgacntr = 0x80000000;
+
+	/*set up pipe timings */
+
+	ctx->htotal = (mode->crtc_hdisplay - 1) |
+	    ((mode->crtc_htotal - 1) << 16);
+	ctx->hblank = (mode->crtc_hblank_start - 1) |
+	    ((mode->crtc_hblank_end - 1) << 16);
+	ctx->hsync = (mode->crtc_hsync_start - 1) |
+	    ((mode->crtc_hsync_end - 1) << 16);
+	ctx->vtotal = (mode->crtc_vdisplay - 1) |
+	    ((mode->crtc_vtotal - 1) << 16);
+	ctx->vblank = (mode->crtc_vblank_start - 1) |
+	    ((mode->crtc_vblank_end - 1) << 16);
+	ctx->vsync = (mode->crtc_vsync_start - 1) |
+	    ((mode->crtc_vsync_end - 1) << 16);
+
+	/*pipe source */
+	ctx->pipesrc = ((mode->crtc_hdisplay - 1) << 16) |
+	    (mode->crtc_vdisplay - 1);
+
+	/*setup dsp plane */
+	ctx->dsppos = 0;
+	/* PR2 panel has 200 pixel dummy clocks,
+	 * So the display timing should be 800x1024, and surface
+	 * is 608x1024(64 bits align), then the information between android
+	 * and Linux frame buffer is not consistent.
+	 */
+	if (get_panel_type(dev, 0) == TMD_6X10_VID)
+		ctx->dspsize = ((mode->crtc_vdisplay - 1) << 16) |
+		    (mode->crtc_hdisplay - 200 - 1);
+	else
+		ctx->dspsize = ((mode->crtc_vdisplay - 1) << 16) |
+		    (mode->crtc_hdisplay - 1);
+
+	ctx->dspstride = fb_pitch;
+
+	ctx->dspsurf = mode_dev->bo_offset(dev, mdfld_fb);
+	if (dev_priv->panel_180_rotation && dsi_config->pipe == 0)
+		ctx->dsplinoff = y * fb_pitch + x * (fb_bpp / 8) + (mode->crtc_vdisplay - 1) * fb_pitch + (mode->crtc_hdisplay - 1) * (fb_bpp / 8);
+	else
+		ctx->dsplinoff = y * fb_pitch + x * (fb_bpp / 8);
+
+	switch (fb_bpp) {
+	case 8:
+		ctx->dspcntr = DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (fb_depth == 15)
+			ctx->dspcntr = DISPPLANE_15_16BPP;
+		else
+			ctx->dspcntr = DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		ctx->dspcntr = DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		DRM_ERROR("Unknown color depth\n");
+		mutex_unlock(&dsi_config->context_lock);
+		return -EINVAL;
+	}
+
+	if (dsi_config->pipe == 2)
+		ctx->dspcntr |= (0x2 << 24);
+
+	if (dev_priv->panel_180_rotation && dsi_config->pipe == 0)
+		ctx->dspcntr |= (0x1 << 15);
+
+	/*
+	 * Setup pipe configuration for different panels
+	 * The formula recommended from hw team is as below:
+	 * (htotal * 5ns * hdelay) >= 8000ns
+	 * hdelay is the count of delayed HBLANK scan lines
+	 * And the max hdelay is 4
+	 * by programming of PIPE(A/C) CONF bit 28:27:
+	 * 00 = 1 scan line, 01 = 2 scan line,
+	 * 02 = 3 scan line, 03 = 4 scan line
+	 */
+	ctx->pipeconf &= ~(BIT27 | BIT28);
+
+	hdelay = 8000 / mode->crtc_htotal / 5;
+	if (8000 % (mode->crtc_htotal * 5) > 0)
+		hdelay += 1;
+
+	if (hdelay > 4) {
+		DRM_ERROR("Do not support such panel setting yet\n");
+		hdelay = 4;	/* Use the max hdelay instead */
+	}
+
+	ctx->pipeconf |= ((hdelay - 1) << 27);
+
+	mutex_unlock(&dsi_config->context_lock);
+	return 0;
+}
+#endif
+
+#if KEEP_UNUSED_CODE
+static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_framebuffer *fb = crtc->fb;
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	int pipe = psb_intel_crtc->pipe;
+	int fp_reg = MRST_FPA0;
+	int dpll_reg = MRST_DPLL_A;
+	int dspcntr_reg = DSPACNTR;
+	int pipeconf_reg = PIPEACONF;
+	int htot_reg = HTOTAL_A;
+	int hblank_reg = HBLANK_A;
+	int hsync_reg = HSYNC_A;
+	int vtot_reg = VTOTAL_A;
+	int vblank_reg = VBLANK_A;
+	int vsync_reg = VSYNC_A;
+	int dspsize_reg = DSPASIZE;
+	int dsppos_reg = DSPAPOS;
+	int pipesrc_reg = PIPEASRC;
+	u32 *pipeconf = &dev_priv->pipeconf;
+	u32 *dspcntr = &dev_priv->dspcntr;
+	int refclk = 0;
+	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp =
+	    0;
+	struct mrst_clock_t clock;
+	bool ok;
+	u32 dpll = 0, fp = 0;
+	bool is_crt = false, is_lvds = false, is_tv = false;
+	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct psb_intel_output *psb_intel_output = NULL;
+	struct mdfld_dsi_config *dsi_config;
+	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int timeout = 0;
+	u32 power_island = 0;
+
+	PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
+
+#ifndef CONFIG_SUPPORT_TOSHIBA_MIPI_DISPLAY
+	/**
+	 * MIPI panel mode setting
+	 * NOTE: this path only works for TMD panel now. update it to
+	 * support all MIPI panels later.
+	 */
+	if (pipe != 1 && ((get_panel_type(dev, pipe) == TMD_VID) ||
+			  (get_panel_type(dev, pipe) == TMD_6X10_VID))) {
+		if (pipe == 0)
+			dsi_config = dev_priv->dsi_configs[0];
+		else if (pipe == 2)
+			dsi_config = dev_priv->dsi_configs[1];
+		else
+			return -EINVAL;
+		return mdfld_crtc_dsi_mode_set(crtc, dsi_config, mode,
+					       adjusted_mode, x, y, old_fb);
+	}
+#endif
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		fp_reg = FPB0;
+		dpll_reg = DPLL_B;
+		dspcntr_reg = DSPBCNTR;
+		pipeconf_reg = PIPEBCONF;
+		htot_reg = HTOTAL_B;
+		hblank_reg = HBLANK_B;
+		hsync_reg = HSYNC_B;
+		vtot_reg = VTOTAL_B;
+		vblank_reg = VBLANK_B;
+		vsync_reg = VSYNC_B;
+		dspsize_reg = DSPBSIZE;
+		dsppos_reg = DSPBPOS;
+		pipesrc_reg = PIPEBSRC;
+		pipeconf = &dev_priv->pipeconf1;
+		dspcntr = &dev_priv->dspcntr1;
+		if (IS_MDFLD(dev)) {
+			fp_reg = MDFLD_DPLL_DIV0;
+			dpll_reg = MDFLD_DPLL_B;
+		}
+		break;
+	case 2:
+		dpll_reg = MRST_DPLL_A;
+		dspcntr_reg = DSPCCNTR;
+		pipeconf_reg = PIPECCONF;
+		htot_reg = HTOTAL_C;
+		hblank_reg = HBLANK_C;
+		hsync_reg = HSYNC_C;
+		vtot_reg = VTOTAL_C;
+		vblank_reg = VBLANK_C;
+		vsync_reg = VSYNC_C;
+		dspsize_reg = DSPCSIZE;
+		dsppos_reg = DSPCPOS;
+		pipesrc_reg = PIPECSRC;
+		pipeconf = &dev_priv->pipeconf2;
+		dspcntr = &dev_priv->dspcntr2;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number. \n");
+		return 0;
+	}
+
+	PSB_DEBUG_ENTRY("adjusted_hdisplay = %d\n", adjusted_mode->hdisplay);
+	PSB_DEBUG_ENTRY("adjusted_vdisplay = %d\n", adjusted_mode->vdisplay);
+	PSB_DEBUG_ENTRY("adjusted_hsync_start = %d\n",
+			adjusted_mode->hsync_start);
+	PSB_DEBUG_ENTRY("adjusted_hsync_end = %d\n", adjusted_mode->hsync_end);
+	PSB_DEBUG_ENTRY("adjusted_htotal = %d\n", adjusted_mode->htotal);
+	PSB_DEBUG_ENTRY("adjusted_vsync_start = %d\n",
+			adjusted_mode->vsync_start);
+	PSB_DEBUG_ENTRY("adjusted_vsync_end = %d\n", adjusted_mode->vsync_end);
+	PSB_DEBUG_ENTRY("adjusted_vtotal = %d\n", adjusted_mode->vtotal);
+	PSB_DEBUG_ENTRY("adjusted_clock = %d\n", adjusted_mode->clock);
+	PSB_DEBUG_ENTRY("hdisplay = %d\n", mode->hdisplay);
+	PSB_DEBUG_ENTRY("vdisplay = %d\n", mode->vdisplay);
+
+	power_island = pipe_to_island(pipe);
+
+	if (!power_island_get(power_island))
+		return 0;
+
+	memcpy(&psb_intel_crtc->saved_mode, mode,
+	       sizeof(struct drm_display_mode));
+	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+	       sizeof(struct drm_display_mode));
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (!connector)
+			continue;
+
+		encoder = connector->encoder;
+
+		if (!encoder)
+			continue;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		psb_intel_output = to_psb_intel_output(connector);
+
+		PSB_DEBUG_ENTRY("output->type = 0x%x \n",
+				psb_intel_output->type);
+
+		switch (psb_intel_output->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		case INTEL_OUTPUT_ANALOG:
+			is_crt = true;
+			break;
+		case INTEL_OUTPUT_MIPI:
+			is_mipi = true;
+			break;
+		case INTEL_OUTPUT_MIPI2:
+			is_mipi2 = true;
+			break;
+		case INTEL_OUTPUT_HDMI:
+			is_hdmi = true;
+			break;
+		}
+	}
+
+	/* Disable the VGA plane that we never use */
+	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+	/* Disable the panel fitter if it was on our pipe */
+	if (psb_intel_panel_fitter_pipe(dev) == pipe)
+		REG_WRITE(PFIT_CONTROL, 0);
+
+	if (psb_intel_output)
+		drm_object_property_get_value(&psb_intel_output->base->base,
+						 dev->
+						 mode_config.scaling_mode_property,
+						 &scalingType);
+
+	PSB_DEBUG_ENTRY("scalingType 0x%x\n", scalingType);
+	/* pipesrc and dspsize control the size that is scaled from,
+	 * which should always be the user's requested size.
+	 */
+	if (pipe == 1) {
+		if (scalingType == DRM_MODE_SCALE_CENTER) {
+			/* This mode is used to support centering the screen by setting reg
+			 * in DISPLAY controller */
+			int startX = (1920 - fb->width) * fb->height / 1080 / 2;
+			int startY = 0x00;
+
+			PSB_DEBUG_ENTRY("fb height = %d; fb width = %d\n",
+					fb->height, fb->width);
+			PSB_DEBUG_ENTRY("startX = %d; startY = %d \n", startX,
+					startY);
+
+			REG_WRITE(dspsize_reg,
+				  ((fb->height - 1) << 16) | (fb->width - 1));
+			REG_WRITE(pipesrc_reg,
+				  (((1920 * fb->height /
+				     1080)) << 16) | (fb->height - 1));
+
+			REG_WRITE(dsppos_reg, (startY << 16) | startX);
+
+			if ((adjusted_mode->hdisplay != fb->width)
+			    || (adjusted_mode->vdisplay != fb->height))
+				REG_WRITE(PFIT_CONTROL,
+					  PFIT_ENABLE | PFIT_PIPE_SELECT_B);
+		} else {
+			/* Android will not change mode, however ,we have tools to change HDMI timing
+			   so there is some cases frame buffer no change ,but timing changed mode setting,
+			   in this case. mode information for source size is not right,
+			   so here use fb information for source/sprite size */
+
+			/* The defined sprite rectangle must always be completely contained within the displayable
+			 * area of the screen image (frame buffer). */
+			REG_WRITE(dspsize_reg,
+				  ((fb->height - 1) << 16) | (fb->width - 1));
+			/* Set the CRTC with encoder mode. */
+			REG_WRITE(pipesrc_reg,
+				  ((fb->width - 1) << 16) | (fb->height - 1));
+			if ((adjusted_mode->hdisplay != fb->width)
+			    || (adjusted_mode->vdisplay != fb->height))
+				REG_WRITE(PFIT_CONTROL,
+					  PFIT_ENABLE | PFIT_PIPE_SELECT_B);
+
+			REG_WRITE(dsppos_reg, 0);
+		}
+	} else {
+		REG_WRITE(dspsize_reg,
+			  ((mode->crtc_vdisplay -
+			    1) << 16) | (mode->crtc_hdisplay - 1));
+		REG_WRITE(pipesrc_reg,
+			  ((mode->crtc_hdisplay -
+			    1) << 16) | (mode->crtc_vdisplay - 1));
+		REG_WRITE(dsppos_reg, 0);
+	}
+
+	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+		/*Moorestown doesn't have register support for centering so we need to
+		   mess with the h/vblank and h/vsync start and ends to get centering */
+		int offsetX = 0, offsetY = 0;
+
+		offsetX =
+		    (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+		offsetY =
+		    (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+
+		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+			  ((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+			  ((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(hblank_reg,
+			  (adjusted_mode->crtc_hblank_start - offsetX -
+			   1) | ((adjusted_mode->crtc_hblank_end - offsetX -
+				  1) << 16));
+		REG_WRITE(hsync_reg,
+			  (adjusted_mode->crtc_hsync_start - offsetX -
+			   1) | ((adjusted_mode->crtc_hsync_end - offsetX -
+				  1) << 16));
+		REG_WRITE(vblank_reg,
+			  (adjusted_mode->crtc_vblank_start - offsetY -
+			   1) | ((adjusted_mode->crtc_vblank_end - offsetY -
+				  1) << 16));
+		REG_WRITE(vsync_reg,
+			  (adjusted_mode->crtc_vsync_start - offsetY -
+			   1) | ((adjusted_mode->crtc_vsync_end - offsetY -
+				  1) << 16));
+	} else {
+		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+			  ((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+			  ((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+			  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+			  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+			  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+			  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	}
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	/* setup pipeconf */
+	*pipeconf = PIPEACONF_ENABLE;
+
+	/* Set up the display plane register */
+	*dspcntr = REG_READ(dspcntr_reg);
+	*dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
+	*dspcntr |= DISPLAY_PLANE_ENABLE;
+
+	if (is_mipi2) {
+		goto mrst_crtc_mode_set_exit;
+	}
+
+	clk = adjusted_mode->clock;
+
+	if (is_hdmi) {
+		if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_19)) {
+			refclk = 19200;
+
+			if (is_mipi || is_mipi2) {
+				clk_n = 1, clk_p2 = 8;
+			} else if (is_hdmi) {
+				clk_n = 1, clk_p2 = 10;
+			}
+		} else if (dev_priv->ksel == KSEL_BYPASS_25) {
+			refclk = 25000;
+
+			if (is_mipi || is_mipi2) {
+				clk_n = 1, clk_p2 = 8;
+			} else if (is_hdmi) {
+				clk_n = 1, clk_p2 = 10;
+			}
+		} else if ((dev_priv->ksel == KSEL_BYPASS_83_100)
+			   && (dev_priv->core_freq == 166)) {
+			refclk = 83000;
+
+			if (is_mipi || is_mipi2) {
+				clk_n = 4, clk_p2 = 8;
+			} else if (is_hdmi) {
+				clk_n = 4, clk_p2 = 10;
+			}
+		} else if ((dev_priv->ksel == KSEL_BYPASS_83_100) &&
+			   (dev_priv->core_freq == 100
+			    || dev_priv->core_freq == 200)) {
+			refclk = 100000;
+			if (is_mipi || is_mipi2) {
+				clk_n = 4, clk_p2 = 8;
+			} else if (is_hdmi) {
+				clk_n = 4, clk_p2 = 10;
+			}
+		}
+
+		if (is_mipi)
+			clk_byte = dev_priv->bpp / 8;
+		else if (is_mipi2)
+			clk_byte = dev_priv->bpp2 / 8;
+
+		clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+		PSB_DEBUG_ENTRY("clk = %d, clk_n = %d, clk_p2 = %d. \n", clk,
+				clk_n, clk_p2);
+		PSB_DEBUG_ENTRY("adjusted_mode->clock = %d, clk_tmp = %d. \n",
+				adjusted_mode->clock, clk_tmp);
+
+		ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+		dev_priv->tmds_clock_khz =
+		    clock.dot / (clk_n * clk_p2 * clk_byte);
+
+		if (!ok) {
+			DRM_ERROR
+			    ("mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
+		} else {
+			m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+
+			PSB_DEBUG_ENTRY("dot clock = %d,"
+					"m = %d, p1 = %d, m_conv = %d. \n",
+					clock.dot, clock.m, clock.p1, m_conv);
+		}
+
+		dpll = REG_READ(dpll_reg);
+
+		if (dpll & DPLL_VCO_ENABLE) {
+			dpll &= ~DPLL_VCO_ENABLE;
+			REG_WRITE(dpll_reg, dpll);
+			REG_READ(dpll_reg);
+
+			/* FIXME check the DPLL lock bit PIPEACONF[29] */
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+
+			/* reset M1, N1 & P1 */
+			REG_WRITE(fp_reg, 0);
+			dpll &= ~MDFLD_P1_MASK;
+			REG_WRITE(dpll_reg, dpll);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+		}
+
+		/* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+		if (dpll & MDFLD_PWR_GATE_EN) {
+			dpll &= ~MDFLD_PWR_GATE_EN;
+			REG_WRITE(dpll_reg, dpll);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+		}
+
+		dpll = 0;
+
+#if 0				/* FIXME revisit later */
+		if ((dev_priv->ksel == KSEL_CRYSTAL_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_19)
+		    || (dev_priv->ksel == KSEL_BYPASS_25)) {
+			dpll &= ~MDFLD_INPUT_REF_SEL;
+		} else if (dev_priv->ksel == KSEL_BYPASS_83_100) {
+			dpll |= MDFLD_INPUT_REF_SEL;
+		}
+#endif				/* FIXME revisit later */
+
+		if (is_hdmi)
+			dpll |= MDFLD_VCO_SEL;
+
+		fp = (clk_n / 2) << 16;
+		fp |= m_conv;
+
+		/* compute bitmask from p1 value */
+		dpll |= (1 << (clock.p1 - 2)) << 17;
+
+#if 0				/* 1080p30 & 720p */
+		dpll = 0x00050000;
+		fp = 0x000001be;
+#endif
+#if 0				/* 480p */
+		dpll = 0x02010000;
+		fp = 0x000000d2;
+#endif
+	} else {
+#if 0				/*DBI_TPO_480x864 */
+		dpll = 0x00020000;
+		fp = 0x00000156;
+#endif	/* DBI_TPO_480x864 */			/* get from spec. */
+
+		dpll = 0x00800000;
+		fp = 0x000000c1;
+	}
+
+	REG_WRITE(fp_reg, fp);
+	REG_WRITE(dpll_reg, dpll);
+	/* FIXME_MDFLD PO - change 500 to 1 after PO */
+	udelay(500);
+
+	dpll |= DPLL_VCO_ENABLE;
+	REG_WRITE(dpll_reg, dpll);
+	REG_READ(dpll_reg);
+
+	/* wait for DSI PLL to lock */
+	while ((timeout < 20000)
+	       && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+		udelay(150);
+		timeout++;
+	}
+
+	if (is_mipi)
+		goto mrst_crtc_mode_set_exit;
+
+	PSB_DEBUG_ENTRY("is_mipi = 0x%x \n", is_mipi);
+
+	REG_WRITE(pipeconf_reg, *pipeconf);
+	REG_READ(pipeconf_reg);
+
+	REG_WRITE(dspcntr_reg, *dspcntr);
+	psb_intel_wait_for_vblank(dev);
+
+ mrst_crtc_mode_set_exit:
+
+	power_island_put(power_island);
+
+	return 0;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+/* MDFLD_PLATFORM end */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_drv.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_drv.h
new file mode 100644
index 0000000..173d2f6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_drv.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+//#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+
+#define KEEP_UNUSED_CODE_S3D 0
+
+/* Switch - don't change before PO */
+#define MDFLD_GET_SYNC_BURST 0	/* Consider BURST_MODE when calcaulation H/V sync counts */
+/* MDFLD FEATURE SWITCHES*/
+/* MDFLD MIPI panels only one of them can be set to 1 */
+
+/* MDFLD KSEL only one of them can be set to 1 */
+#define KSEL_CRYSTAL_19_ENABLED 1
+#define KSEL_BYPASS_19_ENABLED 0
+#define KSEL_BYPASS_25_ENABLED 0
+#define KSEL_BYPASS_83_100_ENABLE 0
+
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+/*
+ * MOORESTOWN defines
+ */
+#define DELAY_TIME1 2000	/* 1000 = 1ms */
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+enum s3d_structure {
+	S3D_FRAME_PACKING = 0x0,
+	S3D_FIELD_ALTERNATIVE = 0x1,
+	S3D_LINE_ALTERNATIVE = 0x2,
+	S3D_SIDE_BY_SIDE_FULL = 0x3,
+	S3D_L_DEPTH = 0x4,
+	S3D_L_DEPTH_GRAPHICS = 0x5,
+	S3D_TOP_AND_BOTTOM = 0x6,
+	S3D_SIDE_BY_SIDE_HALF = 0x8,
+	S3D_SIDE_BY_SIDE_HALF_QUINCUNX = 0xF,
+	S3D_LINE_ALTERNATIVE_HALF = 0x10,
+	S3D_PIXEL_INTERLEAVING_HALF = 0x11,
+	S3D_PIXEL_INTERLEAVING = 0x12,
+	S3D_DISABLED = 0xff
+};
+
+/**
+ * These define the S3D format in bit 
+*/
+#define S3D_FRAME_PACKING_BIT (1 << S3D_FRAME_PACKING)
+#define S3D_FIELD_ALTERNATIVE_BIT (1 << S3D_FIELD_ALTERNATIVE)
+#define S3D_LINE_ALTERNATIVE_BIT (1 << S3D_LINE_ALTERNATIVE)
+#define S3D_SIDE_BY_SIDE_FULL_BIT (1 << S3D_SIDE_BY_SIDE_FULL)
+#define S3D_L_DEPTH_BIT (1 << S3D_L_DEPTH)
+#define S3D_L_DEPTH_GRAPHICS_BIT (1 << S3D_L_DEPTH_GRAPHICS)
+#define S3D_TOP_AND_BOTTOM_BIT (1 << S3D_TOP_AND_BOTTOM)
+#define S3D_SIDE_BY_SIDE_HALF_BIT (1 << S3D_SIDE_BY_SIDE_HALF)
+#define S3D_SIDE_BY_SIDE_HALF_QUINCUNX_BIT (1 << S3D_SIDE_BY_SIDE_HALF_QUINCUNX)
+#define S3D_PIXEL_INTERLEAVING_BIT (1 << S3D_PIXEL_INTERLEAVING)
+
+#define S3D_STATE_MASK		(1 << 0)
+#define S3D_STATE_ENALBLED	(1 << 0)
+#define S3D_L_FRAME_ONLY	(1 << 1)
+
+#if KEEP_UNUSED_CODE_S3D
+struct mrfld_s3d_flip {
+	unsigned long uiAddr_l;
+	unsigned long uiAddr_r;
+	unsigned long s3d_format;
+	unsigned long pitch_l;
+	unsigned long pitch_r;
+	unsigned long s3d_state;
+};
+#endif /* if KEEP_UNUSED_CODE_S3D */
+
+enum mipi_panel_type {
+	NSC_800X480 = 1,
+	LGE_480X1024 = 2,
+	TPO_864X480 = 3
+};
+
+/**
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+*/
+struct psb_intel_mode_device {
+
+	/*
+	 * Abstracted memory manager operations
+	 */
+	void *(*bo_from_handle) (struct drm_device * dev,
+				 struct drm_file * file_priv,
+				 unsigned int handle);
+	 size_t(*bo_size) (struct drm_device * dev, void *bo);
+	 size_t(*bo_offset) (struct drm_device * dev, void *bo);
+	int (*bo_pin_for_scanout) (struct drm_device * dev, void *bo);
+	int (*bo_unpin_for_scanout) (struct drm_device * dev, void *bo);
+
+	/*
+	 * Cursor
+	 */
+	int cursor_needs_physical;
+
+	/*
+	 * LVDS info
+	 */
+	int backlight_duty_cycle;	/* restore backlight to this value */
+	bool panel_wants_dither;
+	struct drm_display_mode *panel_fixed_mode;
+	struct drm_display_mode *panel_fixed_mode2;
+	struct drm_display_mode *vbt_mode;	/* if any */
+
+	uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+	/* for getting at dev. private (mmio etc.) */
+	struct drm_device *drm_dev;
+	u32 reg;		/* GPIO reg */
+	struct i2c_adapter adapter;
+	struct i2c_algo_bit_data algo;
+	u8 slave_addr;
+};
+
+struct psb_intel_output {
+	struct drm_connector base;
+
+	struct drm_encoder enc;
+	int type;
+	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
+	struct psb_intel_i2c_chan *i2c_bus;	/* for control functions */
+	struct psb_intel_i2c_chan *ddc_bus;	/* for DDC only stuff */
+	bool load_detect_temp;
+	void *dev_priv;
+
+	struct psb_intel_mode_device *mode_dev;
+
+};
+
+struct psb_intel_crtc_state {
+	uint32_t saveDSPCNTR;
+	uint32_t savePIPECONF;
+	uint32_t savePIPESRC;
+	uint32_t saveDPLL;
+	uint32_t saveFP0;
+	uint32_t saveFP1;
+	uint32_t saveHTOTAL;
+	uint32_t saveHBLANK;
+	uint32_t saveHSYNC;
+	uint32_t saveVTOTAL;
+	uint32_t saveVBLANK;
+	uint32_t saveVSYNC;
+	uint32_t saveDSPSTRIDE;
+	uint32_t saveDSPSIZE;
+	uint32_t saveDSPPOS;
+	uint32_t saveDSPBASE;
+	uint32_t savePalette[256];
+};
+
+struct psb_intel_crtc {
+	struct drm_crtc base;
+	int pipe;
+	int plane;
+	uint32_t cursor_addr;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	u8 lut_adj[256];
+	struct psb_intel_framebuffer *fbdev_fb;
+	/* a mode_set for fbdev users on this crtc */
+	struct drm_mode_set mode_set;
+
+	/* current bo we scanout from */
+	void *scanout_bo;
+
+	/* current bo we cursor from */
+	void *cursor_bo;
+
+	struct drm_display_mode saved_mode;
+	struct drm_display_mode saved_adjusted_mode;
+
+	struct psb_intel_mode_device *mode_dev;
+
+	/*crtc mode setting flags */
+	u32 mode_flags;
+	u32 scaling_type;
+/*FIXME: Workaround to avoid MRST block.*/
+#ifndef CONFIG_X86_MRST
+	/* Saved Crtc HW states */
+	struct psb_intel_crtc_state *crtc_state;
+#endif
+};
+
+#define to_psb_intel_crtc(x)	\
+		container_of(x, struct psb_intel_crtc, base)
+#define to_psb_intel_output(x)	\
+		container_of(x, struct psb_intel_output, base)
+#define enc_to_psb_intel_output(x)	\
+		container_of(x, struct psb_intel_output, enc)
+#define to_psb_intel_framebuffer(x)	\
+		container_of(x, struct psb_intel_framebuffer, base)
+
+void mrst_init_TPO_MIPI(struct drm_device *dev);
+void aava_koski_dsi_init(struct drm_device *dev,
+			 struct psb_intel_mode_device *mode_dev);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+				struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern void mrst_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+
+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+
+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+						  *connector);
+
+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+					   struct drm_file *file_priv);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+						     int pipe);
+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+							    *dev, struct
+							    drm_mode_fb_cmd2
+							    *mode_cmd,
+							    void *mm_private);
+extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
+
+extern void mdfld_dbi_update_panel(struct drm_device *dev, int pipe);
+extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+					 int level);
+extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+				     u32 gen_fifo_stat_reg, u32 fifo_stat);
+extern void mdfld_dsi_dbi_CB_ready(struct drm_device *dev,
+				   u32 mipi_command_address_reg,
+				   u32 gen_fifo_stat_reg);
+extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+extern uint8_t blc_pol;
+extern uint8_t blc_freq;
+
+extern void mrfld_setup_pll(struct drm_device *dev, int pipe, int clk);
+
+#if KEEP_UNUSED_CODE_S3D
+extern int mrfld_s3d_flip_surf_addr(struct drm_device *dev, int pipe, struct
+				    mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_line_interleave_half(struct drm_device *dev, int pipe, struct
+					     mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_line_interleave_half(struct drm_device *dev, int pipe, struct
+					       mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_line_interleave(struct drm_device *dev, int pipe, struct
+					mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_line_interleave(struct drm_device *dev, int pipe, struct
+					  mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_frame_packing(struct drm_device *dev, int pipe, struct
+				      mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_frame_packing(struct drm_device *dev, int pipe, struct
+					mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_top_and_bottom(struct drm_device *dev, int pipe, struct
+				       mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_top_and_bottom(struct drm_device *dev, int pipe, struct
+					 mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_full_side_by_side(struct drm_device *dev, int pipe, struct
+					  mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_full_side_by_side(struct drm_device *dev, int pipe, struct
+					    mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_half_side_by_side(struct drm_device *dev, int pipe, struct
+					  mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_half_side_by_side(struct drm_device *dev, int pipe, struct
+					    mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_pixel_interleaving_full(struct drm_device *dev,
+						int pipe, struct
+						mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_pixel_interleaving_full(struct drm_device *dev,
+						  int pipe, struct
+						  mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_to_pixel_interleaving_half(struct drm_device *dev,
+						int pipe, struct
+						mrfld_s3d_flip *ps3d_flip);
+extern int mrfld_s3d_from_pixel_interleaving_half(struct drm_device *dev,
+						  int pipe, struct
+						  mrfld_s3d_flip *ps3d_flip);
+#endif /* if KEEP_UNUSED_CODE_S3D */
+
+#endif				/* __INTEL_DRV_H__ */
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi.c
new file mode 100644
index 0000000..f7bb988
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_hdmi.h"
+
+/* start of S3D functions */
+
+/**
+ * Check if the HDMI display supports S3D. If so, report supported S3D formats
+ *
+ */
+int mrfld_hdmi_s3d_query(struct drm_device *dev, struct drm_psb_s3d_query
+			 *s3d_query)
+{
+	DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+	struct mid_intel_hdmi_priv *hdmi_priv = dev_priv->hdmi_priv;
+
+	if (!hdmi_priv) {
+		DRM_ERROR("%s, HDMI is not initialized. \n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	if (hdmi_priv->s3d_info.s3d_mandatory) {
+		if ((s3d_query->mode_resolution_x == 1920) &&
+		    (s3d_query->mode_resolution_y == 1080) &&
+		    (s3d_query->mode_refresh_rate == 24)) {
+			s3d_query->is_s3d_supported = 1;
+			s3d_query->s3d_format = HDMI_3D_MANDATORY_1080P24;
+		} else if ((s3d_query->mode_resolution_x == 1280) &&
+			   (s3d_query->mode_resolution_y == 720)) {
+			if ((s3d_query->mode_refresh_rate == 50) ||
+			    (s3d_query->mode_refresh_rate == 60)) {
+				s3d_query->is_s3d_supported = 1;
+				s3d_query->s3d_format = HDMI_3D_MANDATORY_720P;
+			}
+		} else if ((s3d_query->mode_resolution_x == 1920) &&
+			   (s3d_query->mode_resolution_y == 1080) &&
+			   s3d_query->is_interleaving) {
+			if ((s3d_query->mode_refresh_rate == 50) ||
+			    (s3d_query->mode_refresh_rate == 60)) {
+				s3d_query->is_s3d_supported = 1;
+				s3d_query->s3d_format = HDMI_3D_MANDATORY_1080I;
+			}
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi.h
new file mode 100644
index 0000000..d5b92c6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi.h
@@ -0,0 +1,934 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Chunfeng Zhao <chunfeng.zhao@intel.com>
+ *	Jim Liu <jim.liu@intel.com>
+ */
+
+#ifndef __PSB_INTEL_HDMI_H__
+#define __PSB_INTEL_HDMI_H__
+
+/*
+ * HDMI Parameters
+ */
+
+#define HDMI_DEVICE_NAME "ABC_VEND"
+#define HDMI_DEVICE_DESC "XZ05 PC VIDEO"
+
+#define HDMI_MAX_PIXEL_REPETITION 0x04	// On Cantiga only upto 4X pixel repetition is supported
+#define HDMI_HBR_AUDIO_SAMPLE_RATE 192000	// 192kHz is the sample rate corresponding to the HBR audio formats
+#define HDMI_AUDIO_CLOCK_PACKET_RATE 1500	// Audio clock packet rate of 1.5kHz has to be considered while calculating audio BW
+
+#define HDMI_BAR_INFO_LENGTH 8	// 8 bytes of barinfo
+
+#define HDMI_MONITOR_NAME_LENGTH 20
+
+// BaseLineDataLength.
+// Total size is in multiple of 4 bytes. i.e, 80/4 = 20
+#define HDMI_EELD_BASELINE_DATA_LENGTH 0x14
+
+// Header = 4, Baseline Data = 80 and Vendor (INTEL) specific = 2 as per EELD spec
+// 4 + 80 + = 84
+#define HDMI_EELD_SIZE 84
+
+struct mid_intel_hdmi_priv;
+extern void mdfld_hdcp_init(struct mid_intel_hdmi_priv *p_hdmi_priv);
+extern void mdfld_msic_init(struct mid_intel_hdmi_priv *p_hdmi_priv);
+//
+// HDMI command types
+//
+typedef enum {
+	HDMI_COMMAND_GET,
+	HDMI_COMMAND_SET
+} hdmi_command_t;
+
+#define HDMI_AVI_FLAG_ITCONTENT 0x00800000
+#define HDMI_AVI_FLAG_RGB_QUANT_RANGE 0x00040000
+#define HDMI_AVI_FLAG_SCAN_INFO 0x00000001
+#define HDMI_AVI_FLAG_BAR_INFO 0x00000010
+//
+// CEA-861b definitions
+//
+#define HDMI_CEA_VERSION             0x00
+#define HDMI_ELD_VERSION             0x01
+#define HDMI_EELD_VERSION            0x02
+#define HDMI_BASE_ELD_SIZE           0x0E
+#define HDMI_CEA_EDID_HEADER_SIZE    0x04
+#define HDMI_EELD_CEA_EDID_VERSION   0x03
+#define HDMI_CEA_EDID_BLOCK_SIZE     128
+
+//
+//Basic Audio support definitions
+//
+
+#define HDMI_BASIC_AUDIO_SUPPORTED                0x40
+#define HDMI_CEA_EXTENSION_BLOCK_BYTE_3            3
+#define HDMI_FL_AND_FR_SPEAKERS_CONNECTED        0x1
+
+//
+// HDMI buffer/information types
+//
+typedef enum {
+	// Non-standard or non-HDMI type
+	HDMI_ELD_TYPE = 0x00,	// ELD buffer type
+	HDMI_EELD_TYPE = 0x01,	// EELD buffer type
+
+	// Per HDMI Spec, refer Table 2-1 in HDMI EDS 
+	// or Table 5-8 in HDMI spec
+	HDMI_VS_TYPE = 0x81,	// Vendor-Specific InfoFrame type
+	HDMI_AVI_TYPE = 0x82,	// AVI InfoFrame type
+	HDMI_SPD_TYPE = 0x83,	// SPD InfoFrame type
+	HDMI_AUDIO_TYPE = 0x84,	// Audio InfoFrame type
+	HDMI_MS_TYPE = 0x85,	// MPEG Source InfoFrame type
+
+	// Non-standard or non-HDMI types
+	HDMI_PR_PE_TYPE = 0x86,	// Pixel Replication & Pixel Encoding(colorimetry) type
+	HDMI_AUDIO_CAPS_TYPE = 0x87,	// Encoder Audio Capabilities type
+	HDMI_AUDIO_ENABLE_FLAGS_TYPE = 0x88	// Flags for enabling / disabling audio
+} hdmi_info_type_t;
+
+//
+// InfoFrame Version Information
+//
+typedef enum {
+	HDMI_VS_VERSION = 1,	// Vendor-Specific InfoFrame Version 1
+	HDMI_AVI_VERSION = 1,	// AVI InfoFrame Version 1
+	HDMI_AVI_VERSION2 = 2,	// AVI InfoFrame Version 2
+	HDMI_SPD_VERSION = 1,	// SPD InfoFrame Version 1
+	HDMI_AUDIO_VERSION = 1,	// Audio InfoFrame Version 1
+	HDMI_MS_VERSION = 1	// MPEG Source InfoFrame Version 1
+} infoframe_version_t;
+
+//
+// InfoFrame Payload Length in bytes
+//
+typedef enum {
+	HDMI_VS_MAX_LENGTH = 27,	// Vendor-Specific InfoFrame Payload Length, including IEEE reg ID
+	HDMI_AVI_LENGTH = 13,	// AVI InfoFrame Payload Length
+	HDMI_SPD_LENGTH = 25,	// SPD InfoFrame Payload Length
+	HDMI_AUDIO_LENGTH = 10,	// Audio InfoFrame Payload Length
+	HDMI_MS_LENGTH = 10,	// MPEG Source InfoFrame Payload Length
+	HDMI_PR_PE_LENGTH = 4,	// Length of PR_PE_TYPE
+	HDMI_AUDIO_CAPS_LENGTH = 4	// Length of AUDIO_CAPS_TYPE
+} infoframe_length_t;
+
+//
+// InfoFrame TOTAL Length in bytes (includes header + payload)
+//
+typedef enum {
+	HDMI_VS_MAX_TOTAL_LENGTH = HDMI_VS_MAX_LENGTH + 4,	// Max Total size of Vendor-Specific InfoFrame
+	HDMI_AVI_TOTAL_LENGTH = HDMI_AVI_LENGTH + 5,	// Total size of AVI InfoFrame
+	HDMI_SPD_TOTAL_LENGTH = HDMI_SPD_LENGTH + 4,	// Total size of SPD InfoFrame
+	HDMI_AUDIO_TOTAL_LENGTH = HDMI_AUDIO_LENGTH + 4,	// Total size of Audio InfoFrame
+	HDMI_MS_TOTAL_LENGTH = HDMI_MS_LENGTH + 4,	// Total size of MPEG Source InfoFrame
+} infoframe_total_length_t;
+
+//
+// Pixel Replication multipliers
+//
+typedef enum {
+	HDMI_PR_ONE = 0,	// No repetition (ie., pixel sent once)
+	HDMI_PR_TWO,		// Pixel sent 2 times (ie.,repeated once)
+	HDMI_PR_THREE,		// Pixel sent 3 times
+	HDMI_PR_FOUR,		// Pixel sent 4 times
+	HDMI_PR_FIVE,		// Pixel sent 5 times
+	HDMI_PR_SIX,		// Pixel sent 6 times
+	HDMI_PR_SEVEN,		// Pixel sent 7 times
+	HDMI_PR_EIGHT,		// Pixel sent 8 times
+	HDMI_PR_NINE,		// Pixel sent 9 times
+	HDMI_PR_TEN		// Pixel sent 10 times
+} hdmi_pixel_replication_t;
+
+//
+// Pixel encoding modes
+//
+//typedef typedef enum  {
+  //  HDMI_RGB256 = 0x01,
+  //  HDMI_RGB220 = 0x02,
+  //  HDMI_YCrCb422 = 0x04,
+  //  HDMI_YCrCb444 = 0x08
+//}HDMI_COLORIMETRY;
+
+//
+// Pixel encoding modes
+//
+typedef enum {
+	HDMI_COLORIMETRY_RGB256 = 0x01,
+	HDMI_COLORIMETRY_RGB220 = 0x02,
+	HDMI_COLORIMETRY_YCrCb422 = 0x04,
+	HDMI_COLORIMETRY_YCrCb444 = 0x08
+} hdmi_colorimetry_t;
+
+//
+// AVI InfoFrame definitions - start
+//
+// Scan Info
+typedef enum {
+	HDMI_AVI_SCAN_NODATA = 0,	// No data
+	HDMI_AVI_SCAN_OVERSCAN = 1,	// Overscanned (TV)
+	HDMI_AVI_SCAN_UNDERSCAN = 2,	// Underscanned (Computer)
+	HDMI_AVI_SCAN_FUTURE = 3	// Future
+} avi_scan_info_t;
+
+// Bar Info
+typedef enum {
+	HDMI_AVI_BAR_INVALID = 0,	// Bar data not valid
+	HDMI_AVI_BAR_VALID_VERTICAL = 1,	// Vertical Bar data valid
+	HDMI_AVI_BAR_VALID_HORIZONTAL = 2,	// Horizontal Bar data valid
+	HDMI_AVI_BAR_VALID_BOTH = 3	// Vertical & Horizontal Bar data valid
+} avi_bar_info_t;
+
+// Active Format Information
+typedef enum {
+	HDMI_AVI_AFI_INVALID = 0,	// No data
+	HDMI_AVI_AFI_VALID = 1	// Active Format Information valid
+} avi_fi_info_t;
+
+// AVI Pixel Encoding modes
+typedef enum {
+	HDMI_AVI_RGB_MODE = 0,	// RGB pixel encoding mode
+	HDMI_AVI_YCRCB422_MODE = 1,	// YCrCb 4:2:2 mode
+	HDMI_AVI_YCRCB444_MODE = 2,	// YCrCb 4:4:4 mode
+	HDMI_AVI_FUTURE_MODE = 3	// Future mode
+} avi_encoding_mode_t;
+
+// AVI Active Format Aspect Ratio
+typedef enum {
+	HDMI_AVI_AFAR_SAME = 8,	// same as picture aspect ratio
+	HDMI_AVI_AFAR_4_3 = 9,	// 4:3 center
+	HDMI_AVI_AFAR_16_9 = 10,	// 16:9 center
+	HDMI_AVI_AFAR_14_9 = 11	// 14:9 center
+} avi_afar_info_t;
+
+// AVI Picture Aspect Ratio
+typedef enum {
+	HDMI_AVI_PAR_NODATA = 0,	// No Data
+	HDMI_AVI_PAR_4_3 = 1,	// 4:3
+	HDMI_AVI_PAR_16_9 = 2,	// 16:9
+	HDMI_AVI_PAR_FUTURE = 3	// Future
+} avi_par_info_t;
+
+// AVI Colorimetry Information
+typedef enum {
+	HDMI_AVI_COLOR_NODATA = 0,	// No data
+	HDMI_AVI_COLOR_ITU601 = 1,	// SMPTE 170M, ITU601
+	HDMI_AVI_COLOR_ITU709 = 2,	// ITU709
+	HDMI_AVI_COLOR_FUTURE = 3	// Future
+} avi_color_info_t;
+
+// AVI Non-uniform Picture Scaling Info
+typedef enum {
+	HDMI_AVI_SCALING_NODATA = 0,	// No scaling
+	HDMI_AVI_SCALING_HORIZONTAL = 1,	// horizontal scaling
+	HDMI_AVI_SCALING_VERTICAL = 2,	// vertical scaling
+	HDMI_AVI_SCALING_BOTH = 3	// horizontal & vertical scaling
+} avi_scaling_infp_t;
+
+// AVI RGB Quantization Range
+typedef enum {
+	HDMI_AVI_RGBQUANT_DEFAULT = 0,	// Default value
+	HDMI_AVI_RGBQUANT_LIMITED = 1,	// Limited Range
+	HDMI_AVI_RGBQUANT_FULL = 2,	// Full Range
+	HDMI_AVI_RGBQUANT_FUTURE = 3	// Future use
+} avi_rgbquant_range_t;
+
+// AVI IT Content
+typedef enum {
+	HDMI_AVI_ITC_NODATA = 0,	// No Data
+	HDMI_AVI_ITC_ITCONTENT = 1	//IT Content
+} avi_it_content_t;
+
+//
+// AVI InfoFrame definitions - end
+//
+
+//
+// SPD InfoFrame definitions - start
+//
+// SPD InfoFrame Data Byte 25, refer Table-17 in CEA-861b
+typedef enum {
+	HDMI_SPD_SRC_UNKNOWN = 0x00,	// unknown
+	HDMI_SPD_SRC_DIGITAL_STB = 0x01,	// Digital STB
+	HDMI_SPD_SRC_DVD = 0x02,	// DVD
+	HDMI_SPD_SRC_DVHS = 0x03,	// D-VHS
+	HDMI_SPD_SRC_HDD_VIDEO = 0x04,	// HDD Video
+	HDMI_SPD_SRC_DVC = 0x05,	// DVC
+	HDMI_SPD_SRC_DSC = 0x06,	// DSC
+	HDMI_SPD_SRC_VCD = 0x07,	// Video CD
+	HDMI_SPD_SRC_GAME = 0x08,	// Game
+	HDMI_SPD_SRC_PC = 0x09	// PC General
+} spd_src_type_t;
+
+// SPD InfoFrame Vendor Name & Descriptor Length in bytes
+typedef enum {
+	HDMI_SPD_VNAME_LENGTH = 8,	// SPD Vendor Name Length in bytes
+	HDMI_SPD_VDESC_LENGTH = 16,	// SPD Vendor Descriptor Length in bytes
+} spd_namedesc_length_info_t;
+
+//
+// SPD InfoFrame definitions - end
+//
+
+//
+// InfoFrame Packet Header - generic
+//
+typedef struct _if_header {
+	uint8_t type;		// InfoFrame Type
+	uint8_t version;	// InfoFrame Version
+	uint8_t length;		// InfoFrame Length
+	uint8_t ecc;		// ECC Parity
+} if_header_t;
+
+//
+// AVI InfoFrame structure
+//
+typedef union _avi_if {
+	uint8_t avi_buf[HDMI_AVI_TOTAL_LENGTH];
+#pragma pack(1)
+	struct {
+		if_header_t avi_if_header;	// AVI header data
+		uint8_t chksum;	//checksum
+		union {
+			uint8_t byte1;
+			struct {
+				uint8_t scan_info:2;	// scan information
+				uint8_t bar_info:2;	// bar information
+				uint8_t format:1;	// active format information
+				uint8_t enc_mode:2;	// pixel encoding (RGB or YCrCb)
+				uint8_t b1rsvd:1;	// reserved
+			} byte1_bits;
+		};
+		union {
+			uint8_t byte2;
+			struct {
+				uint8_t afar:4;	// Active Format Aspect Ratio
+				uint8_t par:2;	// Picture Aspect Ratio
+				uint8_t colorimetry:2;	// colorimetry
+			} byte2_bits;
+		};
+		union {
+			uint8_t byte3;
+			struct {
+				uint8_t scaling_info:2;	// Scaling information
+				uint8_t rgbquant_range:2;	// RGB Quantization Range
+				uint8_t ext_colorimetry:3;	//Extended Colorimetry
+				uint8_t it_content:1;	//IT Content
+			} byte3_bits;
+		};
+		union {
+			uint8_t byte4;
+			struct {
+				uint8_t vic:7;	// Video Identification code (refer Table 13 in CEA-861b)
+				uint8_t b4rsvd:1;	// reserved
+			} byte4_bits;
+		};
+		union {
+			uint8_t byte5;
+			struct {
+				uint8_t pr:4;	// pixel repetition (refer Table 15 in CEA-861b)
+				uint8_t b5rsvd:4;	// reserved
+			} byte5_bits;
+		};
+		uint8_t byte6;	// end of top bar(lower), set to "00"
+		uint8_t byte7;	// end of top bar(upper), set to "00"
+		uint8_t byte8;	// start of bottom bar(lower), set to "00"
+		uint8_t byte9;	// start of bottom bar(upper), set to "00"
+		uint8_t byte10;	// end of left bar(lower), set to "00"
+		uint8_t byte11;	// end of left bar(upper), set to "00"
+		uint8_t byte12;	// start of right bar(lower), set to "00"
+		uint8_t byte13;	// start of right bar(upper), set to "00"
+	} avi_info;
+#pragma pack()
+} avi_if_t;
+
+//
+// SPD InfoFrame structure
+//
+typedef union _spd_if {
+	uint8_t spd_buf[HDMI_SPD_TOTAL_LENGTH];
+#pragma pack(1)
+	struct {
+		if_header_t spd_if_header;	// SPD header data
+		uint8_t name[8];	// Vendor Name, 8 characters
+		uint8_t desc[16];	// Product Description, 16 characters
+		uint8_t sdi;	// Source Device Information
+	};
+#pragma pack()
+} spd_if_t;
+
+//
+// Vendor Specific InfoFrame structure
+//
+typedef union _vs_if {
+	uint8_t vs_buf[HDMI_VS_MAX_TOTAL_LENGTH];
+#pragma pack(1)
+	struct {
+		if_header_t vs_if_header;	// VS header data
+		uint8_t ieee_reg_id[3];	// 3-byte IEEE registration ID
+		uint8_t pay_load[24];	// Payload bytes
+	};
+#pragma pack()
+} vs_if_t;
+
+//
+// AVI Infoframe structure for customization
+//
+
+typedef struct _avi_infoframe_custom {
+	//GUID        guid;                   // GUID
+	int32_t command;	// Command
+	int32_t flags;		// Flags
+	uint32_t type_code;	// Type code of AVI Infoframe
+	uint32_t version;	// Version of AVI Infoframe
+	uint32_t length;	// Length of AVI Info Frame 
+	uint8_t r3r0_valid;	// Reserved
+	uint8_t it_content;	// IT Content
+	uint8_t bar_info[8];	// Reserved
+	int32_t active_format_aspect_ratio;	// Reserved 
+	int32_t non_uniform_scaling;	// Reserved 
+	int32_t rgb_ycc_indicator;	// Reserved 
+	int32_t ext_colorimetry;	// Reserved 
+	int32_t pixel_factor;	// Reserved 
+	int32_t bar_info_valid;	// Reserved 
+	int32_t colorimetry;	// Reserved 
+	int32_t aspect_ratio;	// Reserved 
+	int32_t quant_range;	// Quantization Range
+	int32_t video_code;	// Reserved 
+	int32_t scan_info;	// Scan Information
+} avi_infoframe_custom_t;
+
+//
+// LinearPCM Consolidated Audio Data(CAD) structure
+//
+typedef union _lpcm_cad {
+	uint8_t value;
+	struct {
+		uint8_t maxch_cp_on:3;	// Max channels-1 supported with CP turned ON
+		uint8_t maxch_cp_off:3;	// Max channels-1 supported with CP turned OFF
+		uint8_t sp_20bit:1;	// 20-bit sample support
+		uint8_t sp_24bit:1;	// 24-bit sample support
+	};
+} lpcm_cad_t;
+
+//
+// CEA Short Audio Descriptor
+// 
+typedef struct _cea_861b_adb {
+#pragma pack(1)
+	union {
+		uint8_t byte1;
+		struct {
+			uint8_t max_channels:3;	// Bits[0-2]
+			uint8_t audio_format_code:4;	// Bits[3-6], see AUDIO_FORMAT_CODES
+			uint8_t b1reserved:1;	// Bit[7] - reserved
+		};
+	};
+	union {
+		uint8_t byte2;
+		struct {
+			uint8_t sp_rate_32kHz:1;	// Bit[0] sample rate = 32kHz
+			uint8_t sp_rate_44kHz:1;	// Bit[1] sample rate = 44kHz
+			uint8_t sp_rate_48kHz:1;	// Bit[2] sample rate = 48kHz
+			uint8_t sp_rate_88kHz:1;	// Bit[3] sample rate = 88kHz
+			uint8_t sp_rate_96kHz:1;	// Bit[4] sample rate = 96kHz
+			uint8_t sp_rate_176kHz:1;	// Bit[5] sample rate = 176kHz
+			uint8_t sp_rate_192kHz:1;	// Bit[6] sample rate = 192kHz
+			uint8_t sp_rate_b2reserved:1;	// Bit[7] - reserved
+		};
+	};
+	union {
+		uint8_t byte3;	// maximum bit rate divided by 8kHz
+		// following is the format of 3rd byte for uncompressed(LPCM) audio
+		struct {
+			uint8_t bit_rate_16bit:1;	// Bit[0]
+			uint8_t bit_rate_20bit:1;	// Bit[1]
+			uint8_t bit_rate_24bit:1;	// Bit[2]
+			uint8_t bit_rate_b3reserved:5;	// Bits[3-7]
+		};
+	};
+#pragma pack()
+} cea_861b_adb_t;
+
+//
+// Enhanced EDID Like Data aka EELD structure
+//
+typedef union _hdmi_eeld {
+	uint8_t eeld[HDMI_EELD_SIZE];
+#pragma pack(1)
+	struct {
+		// Byte[0] = ELD Version Number
+		union {
+			uint8_t byte0;
+			struct {
+				uint8_t reserved:3;	// Reserf
+				uint8_t eld_ver:5;	// ELD Version Number
+				//  00000b - reserved
+				//  00001b - first rev
+				//  00010b:11111b - reserved for future
+			};
+		};
+
+		// Byte[1] = Vendor Version Field
+		union {
+			uint8_t vendor_version;
+			struct {
+				uint8_t reserved1:3;
+				uint8_t veld_ver:5;	// Version number of the ELD extension.
+				// This value is provisioned and unique to each vendor.
+			};
+		};
+
+		// Byte[2] = Baseline Lenght field
+		uint8_t baseline_eld_length;	// Length of the Baseline structure divided by Four.
+
+		// Byte [3] = Reserved for future use
+		uint8_t byte3;
+
+		// Starting of the BaseLine EELD structure
+		// Byte[4] = Monitor Name Length 
+		union {
+			uint8_t byte4;
+			struct {
+				uint8_t mnl:5;
+				uint8_t cea_edid_rev_id:3;
+			};
+		};
+
+		// Byte[5] = Capabilities
+		union {
+			uint8_t capabilities;
+			struct {
+				uint8_t hdcp:1;	// Indicates HDCP support
+				uint8_t ai_support:1;	// Inidcates AI support
+				uint8_t connection_type:2;	// Indicates Connection type 
+				// 00 - HDMI
+				// 01 - DP
+				// 10 -11  Reserved for future connection types
+				uint8_t sadc:4;	// Indicates number of 3 bytes Short Audio Descriptors. 
+			};
+		};
+
+		// Byte[6] = Audio Synch Delay
+		uint8_t audio_synch_delay;	// Amount of time reported by the sink that the video trails audio in milliseconds.
+
+		// Byte[7] = Speaker Allocation Block
+		union {
+			uint8_t speaker_allocation_block;
+			struct {
+				uint8_t flr:1;	// Front Left and Right channels
+				uint8_t lfe:1;	// Low Frequency Effect channel
+				uint8_t fc:1;	// Center transmission channel
+				uint8_t rlr:1;	// Rear Left and Right channels
+				uint8_t rc:1;	// Rear Center channel
+				uint8_t flrc:1;	// Front left and Right of Center transmission channels
+				uint8_t rlrc:1;	// Rear left and Right of Center transmission channels
+				uint8_t reserved3:1;	// Reserved
+			};
+		};
+
+		// Byte[8 - 15] - 8 Byte port identification value
+		uint8_t port_id_value[8];
+
+		// Byte[16 - 17] - 2 Byte Manufacturer ID
+		uint8_t manufacturer_id[2];
+
+		// Byte[18 - 19] - 2 Byte Product ID
+		uint8_t product_id[2];
+
+		// Byte [20-83] - 64 Bytes of BaseLine Data
+		uint8_t mn_sand_sads[64];	// This will include
+		// - ASCII string of Monitor name
+		// - List of 3 byte SADs
+		// - Zero padding
+
+		// Vendor ELD Block should continue here!
+		// No Vendor ELD block defined as of now. 
+	};
+#pragma pack()
+} hdmi_eeld_t;
+
+//
+// Data structure for misc HDMI data
+//
+typedef struct _misc_hdmi_data {
+	int32_t colorimetry:4;	// 
+	int32_t pr:4;		// pixel repetition value
+	int32_t reserved:24;	// reserved bits
+} misc_hdmi_data_t;
+
+//
+// Audio capability structure
+//
+typedef struct _device_audio_caps {
+	int32_t npl_design:8;	// max number of audio packets device can
+	// deliver per line
+	int32_t k0:8;		// The overhead(in pixels) per line requied
+	// by device for setting up audio packets when
+	// CP is disabled
+	int32_t k1:8;		// The overhead(in pixels) per line requied
+	// by device for setting up audio packets when
+	// CP is enabled
+	// Misc data
+	int32_t pr:4;		// Pixel Replication value
+	int32_t is_hdcp:1;	// Driver, Device and Receiver support HDCP
+	int32_t is_rptr:1;	// Receiver is HDCP repeater
+	int32_t reserved:2;	// reserved bits
+} device_audio_caps_t;
+
+typedef struct _audio_enable_flags {
+	int32_t is_hdmi_display:1;	//1 if HDMI display, 0 if not HDMI display
+	int32_t is_eld_valid:1;	//1 if ELD valid, 0 if ELD not valid
+	int32_t reserved1:30;
+} audio_enable_flags_t;
+
+//
+// Data structure to exchange HDMI data through GetSetParameters interface
+//
+typedef struct _hdmi_parameters {
+	//GUID              Guid;
+	hdmi_command_t command;
+	uint8_t type;
+	uint8_t size;
+	union {
+		hdmi_eeld_t eeld_buffer;
+		avi_if_t avi_infoframe;
+		spd_if_t spd_infoframe;
+		vs_if_t vs_infoframe;
+		union {
+			int32_t gen_data;
+			device_audio_caps_t audio_caps;
+			misc_hdmi_data_t misc_data;
+			audio_enable_flags_t fl_audio_enable_flags;
+		};
+	};
+} hdmi_parameters_t;
+
+//
+// Audio format codes
+//
+typedef enum {
+	AUDIO_LPCM = 0x0001,	// Linear PCM (eg. IEC60958)
+	AUDIO_AC3 = 0x0002,	// AC-3
+	AUDIO_MPEG1 = 0x0003,	// MPEG1 (Layers 1 & 2)
+	AUDIO_MP3 = 0x0004,	// MP3   (MPEG1 Layer 3)
+	AUDIO_MPEG2 = 0x0005,	// MPEG2 (multichannel)
+	AUDIO_AAC = 0x0006,	// AAC
+	AUDIO_DTS = 0x0007,	// DTS
+	AUDIO_ATRAC = 0x0008,	// ATRAC
+	AUDIO_OBA = 0x0009,	// One Bit Audio
+	AUDIO_DOLBY_DIGITAL = 0x000A,	// Dolby Digital
+	AUDIO_DTS_HD = 0x000B,	// DTS-HD
+	AUDIO_MAT = 0x000C,	// MAT (MLP)
+	AUDIO_DST = 0x000D,	// DST
+	AUDIO_WMA_PRO = 0x000E	// WMA Pro
+} audio_format_codes_t;
+
+//
+// Data structure for byte #6 to 8 which has fixed definition
+//
+typedef struct _vsdb_char6_to_char8 {
+#pragma pack(1)
+
+	union {
+		uint8_t byte1;
+		struct {
+			uint8_t dvi_dual:1;	// Bit[0]
+			uint8_t b1reserved:2;	// Bits[1-2]
+			uint8_t dcy444:1;	// Bit[3] YCBCR 4:4:4 in Deep Color modes.
+			uint8_t dc30bit:1;	//Bit[4]
+			uint8_t dc36bit:1;	//Bit[5]
+			uint8_t dc48bit:1;	//Bit[6]
+			uint8_t supports_ai:1;	// Bit[7]
+		};
+	};
+
+	uint8_t max_tmds_clock;
+
+	union {
+		uint8_t byte3;
+		struct {
+			uint8_t b3reserved:6;	// Bit[0-5] reserved
+			uint8_t i_latency_field_present:1;	// Bit[6]
+			uint8_t latency_field_present:1;	// Bits[7]
+		};
+	};
+
+#pragma pack()
+} vsdb_byte6_to_byte8_t;
+
+//
+// Gamut metadata structure
+//
+// Note : The data is written in big endian format
+
+#define HDMI_GBD_PKT_TYPE 0x0A
+#define HDMI_GBD_P0_DATA_SIZE 27
+#define HDMI_MAX_VERTICES_DATA 25
+#define HDMI_MAX_FACET_DATA 25
+
+typedef enum {
+	VERTICES_AND_FACETS = 0,
+	RGB_MIN_MAX_RANGE = 1
+} gbd_format_flag_t;
+
+typedef enum {
+	GBD_8BIT_PRECISION = 0,
+	GBD_10BIT_PRECISION = 1,
+	GBD_12BIT_PRECISION = 2
+} gbd_color_precision_t;
+
+typedef enum {
+	RGB_BT709 = 0,
+	XVY_CC601 = 1,
+	XVY_CC709 = 2,
+	RESERVED_COLORSPACE
+} gbd_color_space_t;
+
+typedef enum {
+	MIN_RED_INDEX = 0,
+	MAX_RED_INDEX = 1,
+	MIN_GREEN_INDEX = 2,
+	MAX_GREEN_INDEX = 3,
+	MIN_BLUE_INDEX = 4,
+	MAX_BLUE_INDEX = 5,
+	MAX_RANGE_DATA_INDEX_LIMIT = 6
+} gbd_rgb_range_data_index_t;
+
+//
+// App needs to feel the data in this structure
+//
+typedef struct _gbd_p0_hdmi_1_3 {
+	uint8_t enable;		// Enable/Disable GBD profile sending
+	gbd_format_flag_t format_flag;	// uses GBD_FORMAT_FLAG_EN, this defines the gamut data format
+	gbd_color_precision_t color_precision;	// uses GBD_COLOR_PRECISION, this is the bit precision of GBD vertex and range data
+	gbd_color_space_t color_space;	// uses GBD_COLOR_SPACE_EN, this defines the color space being represented
+
+	union {
+		// If bFormatFlag is 0
+		struct {
+			uint8_t facet_mode;	// spec supports 0 alone right now
+			uint16_t num_vertices;	// Number of vertices 
+			uint16_t num_facets;	// Number of faces
+
+			// For 4 vertices of 12bits size is 18
+			// Max possible with 0 facets and 28 bytes of GBD is 28-5=23 bytes
+			uint16_t vertices_data[HDMI_MAX_VERTICES_DATA];	// Vertices data representation
+			uint16_t facets_data[HDMI_MAX_FACET_DATA];	// kept it as input data but to be defined based on future spec
+		} vertices_facets_data;
+
+		// If eFormatFlag is 1
+		struct {
+			uint16_t rgb_primary_data[MAX_RANGE_DATA_INDEX_LIMIT];
+		} rgb_range_data;
+	};
+
+} gbd_p0_hdmi_1_3_t;
+
+#define HDMI_GBD_MAX_SEQ_NUM_INDEX 16
+
+// various GBD profiles
+typedef enum {
+	P0_PROFILE = 0,
+	P1_PROFILE = 1,
+	P2_PROFILE = 2,
+	P3_PROFILE = 3,
+	INVALID_PROFILE
+} gbd_profile_type_t;
+
+// various packet transmission options
+typedef enum {
+	INTERMEDIATE_PKT_IN_SEQ = 0,
+	FIRST_PKT_IN_SEQ = 1,
+	LAST_PKT_IN_SEQ = 2,
+	ONLY_PKT_IN_SEQ = 3
+} gbd_pkt_seq_t;
+
+//
+// Packet header defn as per HDMI spec
+//
+typedef struct _gamut_pkt_header {
+	uint8_t pkt_type;	// Defines the pkt type
+	union {
+		uint8_t field_byte;
+		struct {
+			uint8_t affected_gamut_info:4;	// BIT 3:0
+			uint8_t gbd_profile:3;	// BIT 6:4 ; uses GBD_PROFILE_TYPE_EN
+			uint8_t next_field:1;	// BIT7
+		};
+	};
+
+	union {
+		uint8_t gbd_seq_info;
+		struct {
+			uint8_t current_gamut_info:4;	// BIT 3:0
+			uint8_t packet_seq:2;	// BIT 5:4 ; use GBD_PKT_SEQ_EN
+			uint8_t reserved2:1;	// BIT 6
+			uint8_t no_current_gbd:1;	// BIT 7
+		};
+	};
+} gamut_pkt_header_t;
+
+//
+// Gamut structure contains data in following format
+// 
+typedef struct _gamut_metadata_struct {
+#pragma pack(1)
+	gamut_pkt_header_t pkt_hdr;	// Gamut Metadata header data
+	union {
+		uint8_t byte1;
+		struct {
+			uint8_t gbd_color_space:3;
+			// Note: GBD buffer is formatted based upon the color precision
+			// 8 bit precision : 1 sign bit, 2 bits of integer, 5 bits of fraction
+			// 10 bit precision : 1 sign bit, 2 bits of integer, 7 bits of fraction
+			// 12 bit precision : 1 sign bit, 2 bits of integer, 9 bits of fraction
+			uint8_t gbd_color_precision:2;
+			uint8_t reserved3:1;
+			uint8_t facet_mode:1;	// 0 - No facet info in GBD; 1 - Facet info in GBD
+			uint8_t format_flag:1;	// uses GBD_FORMAT_FLAG_EN
+		};
+	};
+
+	// For P0 profile below is the syntax in which data will be filled
+	// If Format is YUV
+	// char 2 : Higher 8 bits of number of vertices
+	// char 3 : Lower 8 bits of number of vertices
+	// char 4 to VSIZE+2 : Vertex data of size VSIZE, 
+	// where VSIZE = 3*number of vertices*GBD color precision/8 + 0.99999
+	// char VSIZE+3: Higher 8 bits of number of facets
+	// char VSIZE+4: Lower 8 bits of number of facets
+	// char VSIZE+5 to VSIZE+FSIZE+4 : Facet data
+	// where VSIZE = number of facet data
+	uint8_t gbd_data[HDMI_GBD_P0_DATA_SIZE];	// data will be filled
+
+#pragma pack()
+} gamut_metadata_st_t;
+
+struct hdmi_edid_info {
+	char monitor_name[HDMI_MONITOR_NAME_LENGTH];
+	char *edid_info;
+};
+
+#define HDMI_EDID_INFO(nm, info) \
+	.monitor_name = nm, .edid_info = info
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+#define MSIC_IRQLVL1_STATUS 0x02
+#define VREG_STATUS (1 << 5)
+#define MSIC_VRINT_STATUS 0x0d
+#define HDMI_HPD_STATUS (1 << 3)
+#define HDMI_OCP_STATUS (1 << 2)
+#define VR_OCP_STATUS (1 << 1)
+#define VR_OVP_STATUS (1 << 0)
+#define MSIC_VRINT_MASK 0x1c
+#define HDMI_HPD_MASK (1 << 3)
+#define HDMI_OCP_MASK (1 << 2)
+#define VR_OCP_MASK (1 << 1)
+#define VR_OVP_MASK (1 << 0)
+#define MSIC_IRQLVL1_MASK 0x21
+#define VREG_MASK (1 << 5)
+#define MSIC_VCC330CNT 0xd3
+#define VCC330_OFF 0x24
+#define VCC330_ON 0x37
+#define MSIC_VHDMICNT 0xde
+#define VHDMI_OFF 0x24
+#define VHDMI_ON 0xa4
+#define VHDMI_DB_30MS 0x60
+#define MSIC_HDMI_STATUS 0x281
+#define HPD_SIGNAL_STATUS (1 << 0)
+#endif
+
+/* HDMI Video Timing */
+struct hdmi_video_format_timing {
+	u32 video_code;
+	u32 hdisplay;
+	u32 vdisplay;
+	u32 refresh;
+	bool bInterlace;
+	u32 hpolarity;
+	u32 vpolarity;
+	avi_afar_info_t afar;
+};
+
+/* start of S3D header */
+#define HDMI_S3D_CEA_EDID_VERSION	0x03
+#define HDMI_S3D_INFO_VERSION		0x01
+#define CEA_EDID_EXTENSION_TAG		0x02
+#define CEA_VDB_VIC_MASK		0x7f
+#define HDMI_VSDB_MASK			0x00ffffff
+#define HDMI_VSDB			0x00000c03
+#define HDMI_VIDEO_PRESENT_OFFSET	0x8
+#define HDMI_VIDEO_PRESENT_BIT		(1 << 5)
+#define I_LATENCY_FIELDS_PRESENT_BIT	(1 << 6)
+#define LATENCY_FIELDS_PRESENT_BIT	(1 << 7)
+#define HDMI_3D_PRESENT_BIT		(1 << 7)
+#define HDMI_3D_MULTI_PRESENT_BITS	(0x3 << 5)
+#define HDMI_3D_MULTI_PRESENT_BITS_POS	0x5
+#define HDMI_3D_LEN_BITS		0x1f
+#define HDMI_VIC_LEN_BITS		(0x7 << 5)
+#define HDMI_VIC_LEN_BITS_POS		0x5
+#define HDMI_3D_STRUCTURE_BITS		0xf
+#define HDMI_2D_VIC_ORDER_BITS		(0xf << 4)
+#define HDMI_2D_VIC_ORDER_BITS_POS	0x4
+#define RESERVED_BITS			0xf
+#define HDMI_3D_DETAILS_BITS		(0xf << 4)
+#define HDMI_3D_DETAILS_BITS_POS	0x4
+#define HDMI_3D_STRUCTURE_EXTRA_BIT	0x8
+
+#define HDMI_3D_MANDATORY_1080P24	(S3D_FRAME_PACKING_BIT | S3D_TOP_AND_BOTTOM_BIT)
+#define HDMI_3D_MANDATORY_720P		(S3D_FRAME_PACKING_BIT | S3D_TOP_AND_BOTTOM_BIT)
+#define HDMI_3D_MANDATORY_1080I		S3D_SIDE_BY_SIDE_HALF_BIT
+
+/* Display 3D format data structure. */
+typedef struct _hdmi_3d_info {
+	uint8_t cea_edid_rev_id;
+	uint8_t s3d_info_ver;
+	uint16_t s3d_structure_all[16];
+	/* Mandatory 3D display is supported by the HDMI display. */
+	bool s3d_mandatory;
+	bool s3d_vic;
+} hdmi_3d_info_t;
+
+/* end of S3D header */
+
+struct mid_intel_hdmi_priv {
+	u32 hdmib_reg;
+	u32 save_HDMIB;
+	bool has_hdmi_sink;
+	/* Should set this when detect hotplug */
+	bool hdmi_device_connected;
+	struct mdfld_hdmi_i2c *i2c_bus;
+	/* EELD packet holder */
+	hdmi_eeld_t eeld;
+	u32 hdmi_eeld_size;
+	cea_861b_adb_t lpcm_sad;
+	bool is_hdcp_supported;
+	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
+	hdmi_3d_info_t s3d_info;
+	uint8_t vdb_vic[16];
+	struct drm_device *dev;
+	struct drm_display_mode *mimic_mode;
+	struct drm_display_mode *edid_preferred_mode;
+};
+
+#endif				//__IHDMI_H__
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi_edid.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi_edid.h
new file mode 100644
index 0000000..dacc909
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi_edid.h
@@ -0,0 +1,1021 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	
+ */
+/* chunfeng.zhao@intel.com
+  */
+#ifndef PSB_INTEL_HDMI_EDID_H
+#define PSB_INTEL_HDMI_EDID_H
+
+//#include "..\\..\\Common\\Platform.h"
+
+////////////////////////////////////////////
+//
+// Max number of EDID extensions possible
+//
+////////////////////////////////////////////
+#define MAX_EDID_EXTENSIONS				254	//Max EDID blocks minus Block 0
+#define NUM_BASEEDID_STANDARD_TIMING	8
+#define MAX_BASEEDID_DTD_BLOCKS			4
+
+#define MAX_VIC_DEFINED					128
+
+// New Macros for supporting EDID 1.4
+
+// Macros for EDID Revision and Version
+#define EDID_VERSION_1 0x01
+#define EDID_REVISION_4 0x04
+
+// Macros for CVT and GTF related support in Monitor descriptor
+#define EDID14_CVT_TIMING_SUPPORTED 0x04
+#define EDID14_DEFAULT_GTF_SUPPORTED 0x00
+#define EDID14_SECONDARY_GTF_SUPPORTED 0x02
+
+// Macros for display device data block in CEA.
+#define EDID14_DISPLAY_DEVICE_DATA_TAG 0xFF
+#define EDID14_DISPLAY_DEVICE_DATA_CHILD_TAG 0x02
+#define EDID14_DISPLAY_DEVICE_DATA_LENGTH 0x20
+#define EDID14_DISPLAY_PORT_INTERFACE 0x09
+
+// Macros indicating digital interfaces supported by the display. 
+#define EDID14_DVI_SUPPORTED 0x01
+#define EDID14_DISPLAY_PORT_SUPPORTED 0x05
+#define EDID14_HDMI_A_SUPPORTED 0x02
+#define EDID14_HDMI_B_SUPPORTED 0x03
+
+#define EDID14_MAX_MONITOR_DESCRIPTORS 0x03
+
+// Macros related to EDID 1.4 Color Bit Depth support
+#define EDID14_COLOR_BIT_DEPTH_UNDEFINED         0x00
+#define EDID14_SIX_BITS_PER_PRIMARY_COLOR        0x06
+#define EDID14_EIGHT_BITS_PER_PRIMARY_COLOR      0x08
+#define EDID14_TEN_BITS_PER_PRIMARY_COLOR        0x0A
+#define EDID14_TWELVE_BITS_PER_PRIMARY_COLOR     0x0C
+#define EDID14_FOURTEEN_BITS_PER_PRIMARY_COLOR   0x0E
+#define EDID14_SIXTEEN_BITS_PER_PRIMARY_COLOR    0x10
+#define EDID14_INVALID_COLOR_BIT_DEPTH           0x07
+
+// Macro for showing Color Bit Depth support for existing displays
+#define EDID_EIGHT_BITS_PER_PRIMARY_COLOR        0x08
+
+// Macro for Established Timings III Block descriptor 
+#define EST_TIMINGS_III_BLOCK_TAG                0xF7
+#define EST_TIMINGS_III_BLOCK_DATA_LENGTH        0x06
+
+// Macro for indicating byte length
+#define BYTE_LENGTH                              0x08
+
+////////////////////////////////////////////
+//
+// Max number of EDID Blocks
+//
+////////////////////////////////////////////
+#define MAX_EDID_BLOCKS					255	//According to E-EDID Standard doc.
+#define EDID_BLOCK_SIZE					128
+
+// Macros for EDID Revision and Version for EDID 1.3
+#define EDID_VERSION_1_3 0x01
+#define EDID_REVISION_1_3 0x03
+
+////////////////////////////////////////////
+// Base EDID header
+////////////////////////////////////////////
+static const unsigned char BASEEDID_Header[8] =
+    { 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00 };
+
+// Display Range Limits Offset Flags.
+// Applicable only from EDID 1.4 onwards
+typedef union _edid_range_limits_flags {
+	uint8_t ucRangeLimitOffsetFlags;	// Range Limits Offset Flags
+	struct {
+		uint8_t ucVerticalRateOffset:2;	// Vertical Rate Offset
+		uint8_t ucHorizontalRateOffset:2;	// Horizontal Rate Offset
+		uint8_t ucReserved:4;	// Reserved.
+	};
+} edid_range_limits_flags_t;
+
+////////////////////////////////////////////
+//
+//      18-byte DTD block
+//  Refer Table 3.16, 3.17 & 3.18 of 
+//  EDID spec
+//
+////////////////////////////////////////////
+typedef struct _edid_dtd_timing {
+#pragma pack(1)
+
+	int16_t wPixelClock;	// Pixel clock / 10000
+
+	uint8_t ucHA_low;	// Lower 8 bits of H. active pixels
+	uint8_t ucHBL_low;	// Lower 8 bits of H. blanking
+	union {
+		uint8_t ucHAHBL_high;
+		struct {
+			uint8_t ucHBL_high:4;	// Upper 4 bits of H. blanking
+			uint8_t ucHA_high:4;	// Upper 4 bits of H. active pixels
+		};
+	};
+
+	uint8_t ucVA_low;	// Lower 8 bits of V. active lines
+	uint8_t ucVBL_low;	// Lower 8 bits of V. blanking
+	union {
+		uint8_t ucVAVBL_high;
+		struct {
+			uint8_t ucVBL_high:4;	// Upper 4 bits of V. blanking
+			uint8_t ucVA_high:4;	// Upper 4 bits of V. active pixels
+		};
+	};
+
+	uint8_t ucHSO_low;	// Lower 8 bits of H. sync offset
+	uint8_t ucHSPW_low;	// Lower 8 bits of H. sync pulse width
+	union {
+		uint8_t ucVSOVSPW_low;
+		struct {
+			uint8_t ucVSPW_low:4;	// Lower 4 bits of V. sync pulse width
+			uint8_t ucVSO_low:4;	// Lower 4 bits of V. sync offset
+		};
+	};
+	union {
+		uint8_t ucHSVS_high;
+		struct {
+			uint8_t ucVSPW_high:2;	// Upper 2 bits of V. sync pulse width
+			uint8_t ucVSO_high:2;	// Upper 2 bits of V. sync offset
+			uint8_t ucHSPW_high:2;	// Upper 2 bits of H. sync pulse width
+			uint8_t ucHSO_high:2;	// Upper 2 bits of H. sync offset
+		};
+	};
+
+	uint8_t ucHIS_low;	// Lower 8 bits of H. image size in mm
+	uint8_t ucVIS_low;	// Lower 8 bits of V. image size in mm
+	union {
+		uint8_t ucHISVIS_high;
+		struct {
+			uint8_t ucVIS_high:4;	// Upper 4 bits of V. image size
+			uint8_t ucHIS_high:4;	// Upper 4 bits of H. image size
+		};
+	};
+
+	uint8_t ucHBorder;	// H. border in pixels
+	uint8_t ucVBorder;	// V. border in pixels
+
+	union {
+		uint8_t ucFlags;	// Hsync & Vsync polarity, etc. flags
+		struct {
+			uint8_t ucStereo1:1;	// Stereo definition with bit[6:5]
+			uint8_t ucHSync_Pol:1;	// Hsync polarity (0: Neg, 1: Pos)
+			uint8_t ucVSync_Pol:1;	// Vsync polarity (0: Neg, 1: Pos)
+			uint8_t ucSync_Conf:2;	// Sync configuration
+			// 00 : Analog composite
+			// 01 : Bipolar analog composite
+			// 00 : Digital composite
+			// 00 : Digital separate
+			uint8_t ucStereo2:2;	// Stereo definition
+			// 00 : Normal display, no stereo 
+			// xx : Stereo definition with bit0
+			uint8_t ucInterlaced:1;	// Interlaced / Non-interlaced
+			// 0 : Non-interlaced
+			// 1 : Interlaced
+		};
+	};
+
+#pragma pack()
+} edid_dtd_timing_t;
+
+////////////////////////////////////////////
+//
+//      Standard timing identification
+//  Refer Table 3.15 of EDID spec
+//
+////////////////////////////////////////////
+typedef union _edid_std_timing {
+	uint16_t usStdTiming;
+
+	struct {
+#pragma pack(1)
+		uint8_t ucHActive;	// (HActive/8) - 31;
+		struct {
+			uint8_t ucRefreshRate:6;	// Refresh Rate - 60
+			uint8_t ucAspectRatio:2;	// Aspect ratio (HActive/VActive)
+			// 00:  1:1 Aspect ratio
+			// 01:  4:3 Aspect ratio
+			// 10:  5:4 Aspect ratio
+			// 11: 16:9 Aspect ratio
+		};
+	};
+#pragma pack()
+
+} edid_std_timing_t;
+////////////////////////////////////////////////////////
+// Aspect Ratio def's as per Edid 1.3 Standard Timings
+////////////////////////////////////////////////////////
+#define EDID_STD_ASPECT_RATIO_16_10   0x0
+#define EDID_STD_ASPECT_RATIO_4_3     0x1
+#define EDID_STD_ASPECT_RATIO_5_4     0x2
+#define EDID_STD_ASPECT_RATIO_16_9    0x3
+
+////////////////////////////////////////////
+//
+//      Monitor range limits
+//
+////////////////////////////////////////////
+typedef struct _monitor_range_limits {
+#pragma pack(1)
+
+	uint8_t ucMin_vert_rate;	//Min Vertical Rate,in Hz
+	uint8_t ucMax_vert_rate;	//Max Vertical Rate, in Hz
+	uint8_t ucMin_horz_rate;	//Min Horizontal Rate, in Hz
+	uint8_t ucMax_horz_rate;	//Max Horizontal Rate, in Hz
+	uint8_t ucMax_pixel_clock;	//Max Pixel Clock,Value/10 Mhz
+	uint8_t ucTiming_formula_support;	//00 - No Secondary Timing Formula Supported
+	//02 - Secondary GTF Curve Supported
+	//In EDID 1.4, this may indicate CVT support as well
+	//If timing_formula_support is 02
+	uint8_t ucReserved;	//00h
+	uint8_t ucStart_freq;	//Horizontal Freq, Value/2, KHz
+	uint8_t ucByte_C;	//C*2
+	uint8_t ucLSB_M;	//LSB of M Value
+	uint8_t ucMSB_M;	//MSB of M Value
+	uint8_t ucByte_K;	//K Value
+	uint8_t ucByte_J;	//J*2
+
+#pragma pack()
+} monitor_range_limits_t;
+
+////////////////////////////////////////////
+//
+// Color point
+//
+////////////////////////////////////////////
+typedef struct _color_point {
+#pragma pack(1)
+
+	uint8_t ucWhite_point_index_number_1;
+	uint8_t ucWhite_low_bits_1;
+	uint8_t ucWhite_x_1;
+	uint8_t ucWhite_y_1;
+	uint8_t ucWhite_gamma_1;
+	uint8_t ucWhite_point_index_number_2;
+	uint8_t ucWhite_low_bits_2;
+	uint8_t ucWhite_x_2;
+	uint8_t ucWhite_y_2;
+	uint8_t ucWhite_gamma_2;
+	uint8_t ucByte_15;
+	uint8_t ucByte_16_17[2];
+
+#pragma pack()
+} color_point_t;
+
+////////////////////////////////////////////
+//
+//      Monitor description descriptor
+//  Refer Table 3.19 & 3.20 of EDID spec
+//
+////////////////////////////////////////////
+#define BASEEDID_MONITORSN_MDDATATYPE			0xFF
+#define BASEEDID_ASCIISTRING_MDDATATYPE			0xFE
+#define BASEEDID_MONITORRANGELIMIT_MDDATATYPE	0xFD
+#define BASEEDID_MONITORNAME_MDDATATYPE			0xFC
+#define BASEEDID_COLORPOINT_MDDATATYPE			0xFB
+#define BASEEDID_STDTIMINGS_MDDATATYPE			0xFA
+
+// Structure definition for Established Timings III monitor block
+typedef struct _est_timings_iii_block {
+#pragma pack(1)
+	// The first byte will show the VESA DMTS Standard Version. 
+	// The following six bytes will have the Timings Bit Mask.
+	// Right now only 6 bytes are used for this!!!
+	// Rest is reserved.
+	uint8_t ucVesaDMTVersion;	//Byte 0 indicating the VESA DMT Version.
+	uint8_t ucTimingBitMask[6];	// Next 6 bytes indicating the Timing Bit Mask Bytes used in Est Timing III.
+	uint8_t bReserved[6];	//Next 6 bytes are reserved
+#pragma pack()
+} est_timings_iii_block_t;
+
+typedef struct _monitor_descriptor {
+#pragma pack(1)
+
+	int16_t wFlag;		// = 0000 when block is used as descriptor
+	uint8_t ucFlag0;	// Reserved
+
+	uint8_t ucDataTypeTag;
+
+	uint8_t ucFlag1;	// 00 for descriptor
+
+	union {
+
+		// Monitor S/N (ucDataTypeTag = FF)
+		uint8_t ucMonitorSerialNumber[13];
+
+		// ASCII string (ucDataTypeTag = FE)
+		uint8_t ucASCIIString[13];
+
+		// Monitor range limit (ucDataTypeTag = FD)
+		monitor_range_limits_t MonitorRangeLimits;
+
+		// Monitor name (ucDataTypeTag = FC)
+		uint8_t ucMonitorName[13];
+
+		// Color point (ucDataTypeTag = FB)
+		color_point_t ColorPoint;
+
+		// ESTABLISHED TIMINGS III BLOCK = F7 (Added for EDID 1.4)
+		est_timings_iii_block_t stEstTimingsIIIBlock;
+
+		// Standard timings (ucDataTypeTag = FA)
+		struct {
+			edid_std_timing_t ExtraStdTiming[6];
+			uint8_t ucFixedValueOfA0;	// Should be 0xA0
+		};
+
+		// Manufacturer specific value (ucDataTypeTag = 0F-00)
+		uint8_t ucMfgSpecificData[13];
+	};
+
+#pragma pack()
+} monitor_descriptor_t;
+
+////////////////////////////////////////////
+//
+//      EDID PnP ID fields
+//
+////////////////////////////////////////////
+typedef union _baseedid_pnpid {
+	uint8_t VendorProductID[10];	// Vendor / Product identification
+
+	struct {
+		uint8_t ManufacturerID[2];	// Bytes 8, 9: Manufacturer ID 
+		uint8_t ProductID[2];	// Bytes 10, 11: Product ID
+		uint8_t SerialNumber[4];	// Bytes 12-15: Serial numbers
+		uint8_t WeekOfManufacture;	// Byte 16: Week of manufacture
+		uint8_t YearOfManufacture;	// Byte 17: Year of manufacture
+	};
+} baseedid_pnpid_t;
+
+//
+// Chromaticity structure
+// Table 3.12 of Base Block for details
+//
+typedef struct _baseedid_chromaticity_block {
+	union {
+		uint8_t RedGreenLowBits;	// Byte 1
+		struct {
+			uint8_t ucGreenYLowBits:2;	// bit 1:0
+			uint8_t ucGreenXLowBits:2;	// bit 3:2
+			uint8_t ucRedYLowBits:2;	// bit 5:4
+			uint8_t ucRedXLowBits:2;	// bit 7:6
+		};
+	};
+
+	union {
+		uint8_t ucBlueWhiteLowBits;	// Byte 2
+		struct {
+			uint8_t ucWhiteYLowBits:2;	// bit 1:0
+			uint8_t ucWhiteXLowBits:2;	// bit 3:2
+			uint8_t ucBlueYLowBits:2;	// bit 5:4
+			uint8_t ucBlueXLowBits:2;	// bit 7:6
+		};
+	};
+
+	uint8_t ucRedXUpperBits;	// bit 9:2          Byte 3
+	uint8_t ucRedYUpperBits;	// bit 9:2          Byte 4
+
+	uint8_t ucGreenXUpperBits;	// bit 9:2        Byte 5
+	uint8_t ucGreenYUpperBits;	// bit 9:2        Byte 6
+
+	uint8_t ucBlueXUpperBits;	// bit 9:2         Byte 7
+	uint8_t ucBlueYUpperBits;	// bit 9:2         Byte 8
+
+	uint8_t ucWhiteXUpperBits;	// bit 9:2        Byte 9
+	uint8_t ucWhiteYUpperBits;	// bit 9:2        Byte 10
+} baseedid_chromaticity_block_t;
+
+////////////////////////////////////////////
+//
+//      128-byte EDID 1.x block0 structure
+//
+////////////////////////////////////////////
+typedef struct _baseedid_1_x {
+#pragma pack(1)
+
+	//
+	// Header: 8 bytes (Table 3.3 of EDID spec)
+	char Header[8];		// EDID1.x header "0 FFh FFh FFh FFh FFh FFh 0"
+
+	//
+	// Vendor/Product ID: 10 bytes (Table 3.4, 3.5 & 3.6 of EDID spec)
+	//baseedid_pnpid_t;
+	union {
+		uint8_t VendorProductID[10];	// Vendor / Product identification
+		struct {
+			uint8_t ManufacturerID[2];	// Bytes 8, 9: Manufacturer ID 
+			uint8_t ProductID[2];	// Bytes 10, 11: Product ID
+			uint8_t SerialNumber[4];	// Bytes 12-15: Serial numbers
+			uint8_t WeekOfManufacture;	// Byte 16: Week of manufacture
+			uint8_t YearOfManufacture;	// Byte 17: Year of manufacture
+		};
+	};
+
+	//
+	// EDID structure Version/Revision: 2 bytes (Table 3.7 of EDID spec)
+	uint8_t ucVersion;	// EDID version no.
+	uint8_t ucRevision;	// EDID revision no.
+
+	//
+	// Basic display parameters & features: 5 bytes (Table 3.8 of EDID spec)
+	union {
+		uint8_t ucVideoInput;	// Video input definition (Refer Table 3.9 of EDID spec)
+
+		struct {
+			uint8_t ucSyncInput:4;	// Sync input supported (iff ucDigitInput = 0)
+			uint8_t ucSetup:1;	// Display setup (iff ucDigitInput = 0)
+			uint8_t ucSigLevStd:2;	// Signal level Standard (iff ucDigitInput = 0)
+
+			uint8_t ucDigitInput:1;	// 1: Digital input; 0: Analog input
+		};
+	};
+
+	// Image size (Table 3.10 of EDID spec)
+	uint8_t ucMaxHIS;	// Maximum H. image size in cm
+	uint8_t ucMaxVIS;	// Maximum V. image size in cm
+
+	// Gamma (display transfer characteristic)
+	uint8_t ucGamma;	// Display gamma value  [= (gamma*100)-100]
+
+	// Feature support (Table 3.11 of EDID spec)
+	union {
+		uint8_t ucDMPSFeature;	// DPMS feature support
+
+		struct {
+			uint8_t ucGTFSupport:1;	// GTF timing support (1: Yes)
+			uint8_t ucPTM:1;	// Preferred timing is 1st DTD (1: Yes) [Must if EDID >= 1.3]
+			uint8_t ucColorSpace:1;	// Use STD color space (1:Yes) [If set ColorChars should match sRGB values in EDID spec Appendix A]
+			uint8_t ucDispType:2;	// Display type
+			// 00: Monochrome
+			// 01: R/G/B color display
+			// 10: Non R/G/B multicolor display
+			// 11: Undefined
+			uint8_t ucActiveOff:1;	// Active off (Display consumes less power/blanks out when it receives an out of range timing)
+			uint8_t ucSuspend:1;	// Suspend      (Refer VESA DPMS spec)
+			uint8_t ucStandBy:1;	// Stand-by     (Refer VESA DPMS spec)
+		};
+	};
+
+	//
+	// Phosphor or Filter Chromaticity: 10 bytes
+	uint8_t ColorChars[10];	// Color characteristics        (Refer Table 3.12 of EDID spec)
+
+	//
+	// Established timings: 3 bytes (Table 3.14 of EDID spec)
+	union {
+		uint8_t EstTiming1;
+		struct {
+			uint8_t bSupports800x600_60:1;
+			uint8_t bSupports800x600_56:1;
+			uint8_t bSupports640x480_75:1;
+			uint8_t bSupports640x480_72:1;
+			uint8_t bSupports640x480_67:1;
+			uint8_t bSupports640x480_60:1;
+			uint8_t bSupports720x400_88:1;
+			uint8_t bSupports720x400_70:1;
+		};
+	};
+	union {
+		uint8_t EstTiming2;
+		struct {
+			uint8_t bSupports1280x1024_75:1;
+			uint8_t bSupports1024x768_75:1;
+			uint8_t bSupports1024x768_70:1;
+			uint8_t bSupports1024x768_60:1;
+			uint8_t bSupports1024x768_87i:1;
+			uint8_t bSupports832x624_75:1;
+			uint8_t bSupports800x600_75:1;
+			uint8_t bSupports800x600_72:1;
+		};
+	};
+	union {
+		uint8_t MfgTimings;
+		struct {
+			uint8_t bMfgReservedTimings:7;
+			uint8_t bSupports1152x870_75:1;
+		};
+	};
+
+	//
+	// Standard timings: 8 bytes (Table 3.15 of EDID spec)
+	edid_std_timing_t StdTiming[NUM_BASEEDID_STANDARD_TIMING];	// 8 Standard timing support
+
+	//
+	// Detailed timing section - 72 bytes (4*18 bytes)
+	union {
+		edid_dtd_timing_t DTD[MAX_BASEEDID_DTD_BLOCKS];	// Four DTD data blocks
+
+		monitor_descriptor_t MonitorInfo[MAX_BASEEDID_DTD_BLOCKS];
+	};
+
+	uint8_t ucNumExtBlocks;	// Number of extension EDID blocks
+	uint8_t ucChecksum;	// Checksum of the EDID block
+
+#pragma pack()
+} baseedid_1_x_t;
+
+////////////////////////////////////////////
+//
+//      128-byte EDID 1.4 block0 structure
+//  EDID 1.4 block0 structure is different from 1.3 block0
+//  Thats why this new structure has been added 
+//  Changes are commented in the structure itself
+//
+////////////////////////////////////////////
+typedef struct _baseedid_1_4 {
+#pragma pack(1)
+
+	//
+	// Header: 8 bytes (Table 3.3 of EDID spec)
+	char Header[8];		// EDID1.x header "0 FFh FFh FFh FFh FFh FFh 0"
+
+	//
+	// Vendor/Product ID: 10 bytes (Table 3.4, 3.5 & 3.6 of EDID spec)
+	union {
+		uint8_t VendorProductID[10];	// Vendor / Product identification
+		struct {
+			uint8_t ManufacturerID[2];	// Bytes 8, 9: Manufacturer ID 
+			uint8_t ProductID[2];	// Bytes 10, 11: Product ID
+			uint8_t SerialNumber[4];	// Bytes 12-15: Serial numbers
+			uint8_t WeekOfManufacture;	// Byte 16: Week of manufacture
+			uint8_t YearOfManufacture;	// Byte 17: Year of manufacture
+		};
+	};
+
+	//
+	// EDID structure Version/Revision: 2 bytes (Table 3.7 of EDID spec)
+	uint8_t ucVersion;	// EDID version no.
+	uint8_t ucRevision;	// EDID revision no.
+
+	//
+	// Basic display parameters & features: 5 bytes (Table 3.8 of EDID spec)
+	union {
+		uint8_t ucVideoInput;	// Video input definition (Refer Table 3.9 of EDID spec)
+
+		struct {
+			uint8_t ucSyncInput:4;	// Sync input supported (iff ucDigitInput = 0)
+			uint8_t ucSetup:1;	// Display setup (iff ucDigitInput = 0)
+			uint8_t ucSigLevStd:2;	// Signal level Standard (iff ucDigitInput = 0)
+
+			uint8_t ucDigitInput:1;	// 1: Digital input; 0: Analog input
+		};
+		// This structure has been introduced to reflect the changes in EDID 1.4 spec
+		// This sturcture shows new meaning of VIDEO INPUT DEFINITION when input is digital 
+		struct {
+			uint8_t ucDigitalVideoInterface:4;	// Digital Video Interface Standard Supported.
+			uint8_t ucColorBitDepth:3;	// Color Bit Depth. 
+			// 0 0 0 -- Color Bit Depth is undefined
+			// 0 0 1 -- 6 Bits per Primary Color
+			// 0 1 0 -- 8 Bits per Primary Color
+			// 0 1 1 -- 10 Bits per Primary Color
+			// 1 0 0 -- 12 Bits per Primary Color
+			// 1 0 1 -- 14 Bits per Primary Color
+			// 1 1 0 -- 16 Bits per Primary Color
+			// 1 1 1 -- Reserved (Do Not Use)
+			uint8_t bIsDigitalVideoSignalInterface:1;	// Bit 7
+		};
+	};
+
+	// As per the EDID spec 1.4, the following two fields can be aspect ratios as well. 
+	union {
+		uint8_t ucMaxHIS;	// Maximum H. image size in cm 
+		uint8_t ucARLandscape;	// Landscape Aspect raio as per EDID 1.4 spec
+	};
+	union {
+		uint8_t ucMaxVIS;	// Maximum V. image size in cm 
+		uint8_t ucARPortrait;	// Portrait Aspect raio as per EDID 1.4 spec
+	};
+
+	// Gamma (display transfer characteristic)
+	uint8_t ucGamma;	// Display gamma value  [= (gamma*100)-100]
+
+	// Feature support (Table 3.11 of EDID spec)
+	union {
+		uint8_t ucDMPSFeature;	// DPMS feature support
+
+		struct {
+			uint8_t ucContinuousDisplay:1;	// Display is continuous or non-continuous (1: Yes)
+			uint8_t ucPTM:1;	// Preferred timing mode indicates native pixel format and native RR. 
+			uint8_t ucColorSpace:1;	// Use STD color space (1:Yes) [If set ColorChars should match sRGB values in EDID spec Appendix A]
+			uint8_t ucDispType:2;	// Display type
+			// 00: Monochrome
+			// 01: R/G/B color display
+			// 10: Non R/G/B multicolor display
+			// 11: Undefined
+			uint8_t ucActiveOff:1;	// Active off (Display consumes less power/blanks out when it receives an out of range timing)
+			uint8_t ucSuspend:1;	// Suspend      (Refer VESA DPMS spec)
+			uint8_t ucStandBy:1;	// Stand-by     (Refer VESA DPMS spec)
+		};
+
+		struct {
+			uint8_t bReserved0:1;
+			uint8_t bReserved1:1;
+			uint8_t bReserved2:1;
+			uint8_t ucColorEncodingFormat:2;	// Supported Color Encoding Format if Video Input is digital
+			// 00: RGB 4:4:4
+			// 01: RGB 4:4:4 & YCrCb 4:4:4
+			// 10: RGB 4:4:4 & YCrCb 4:2:2
+			// 11: RGB 4:4:4 & YCrCb 4:4:4 & YCrCb 4:2:2
+			uint8_t bReserved3:1;
+			uint8_t bReserved4:1;
+			uint8_t bReserved5:1;
+		};
+	};
+
+	//
+	// Phosphor or Filter Chromaticity: 10 bytes
+	uint8_t ColorChars[10];	// Color characteristics        (Refer Table 3.12 of EDID spec)
+
+	//
+	// Established timings: 3 bytes (Table 3.14 of EDID spec)
+	union {
+		uint8_t EstTiming1;
+		struct {
+			uint8_t bSupports800x600_60:1;
+			uint8_t bSupports800x600_56:1;
+			uint8_t bSupports640x480_75:1;
+			uint8_t bSupports640x480_72:1;
+			uint8_t bSupports640x480_67:1;
+			uint8_t bSupports640x480_60:1;
+			uint8_t bSupports720x400_88:1;
+			uint8_t bSupports720x400_70:1;
+		};
+	};
+	union {
+		uint8_t EstTiming2;
+		struct {
+			uint8_t bSupports1280x1024_75:1;
+			uint8_t bSupports1024x768_75:1;
+			uint8_t bSupports1024x768_70:1;
+			uint8_t bSupports1024x768_60:1;
+			uint8_t bSupports1024x768_87i:1;
+			uint8_t bSupports832x624_75:1;
+			uint8_t bSupports800x600_75:1;
+			uint8_t bSupports800x600_72:1;
+		};
+	};
+	union {
+		uint8_t MfgTimings;
+		struct {
+			uint8_t bMfgReservedTimings:7;
+			uint8_t bSupports1152x870_75:1;
+		};
+	};
+
+	//
+	// Standard timings: 8 bytes (Table 3.15 of EDID spec)
+	edid_std_timing_t StdTiming[NUM_BASEEDID_STANDARD_TIMING];	// 8 Standard timing support
+
+	// Detailed timing section - 72 bytes (4*18 bytes)
+	// As per the new spec 1.4, the first Detailed Timing Section should contain the PREFERED TIMING BLOCK
+	edid_dtd_timing_t PreferedTimingMode;
+	// The rest 54 bytes of the Detailed Timing Section. 
+	union {
+		edid_dtd_timing_t DTD[MAX_BASEEDID_DTD_BLOCKS - 1];	// Three DTD data blocks
+
+		monitor_descriptor_t MonitorInfo[MAX_BASEEDID_DTD_BLOCKS - 1];	// Three Monitor Descriptor blocks
+	};
+
+	uint8_t ucNumExtBlocks;	// Number of extension EDID blocks
+	uint8_t ucChecksum;	// Checksum of the EDID block
+
+#pragma pack()
+} baseedid_1_4_t;
+
+//*****************************************************
+//*****************************************************
+//
+// DATA STRUCTURES AND DEFINITIONS FOR CE-EXTENSION
+//
+//*****************************************************
+//*****************************************************
+
+/////////////////////////////////
+//
+//CE - Extension Block Structure
+//
+/////////////////////////////////
+typedef struct _ce_edid {
+	uint8_t ucTag;
+	uint8_t ucRevision;
+	uint8_t ucDTDOffset;
+	uint8_t ucCapabilty;
+	uint8_t data[123];
+	uint8_t ucCheckSum;
+} ce_edid_t;
+
+////////////////////////////////////////////
+//
+//CE - Video Capability Data block structure
+//
+////////////////////////////////////////////
+typedef union _video_cap_data_block {
+	uint8_t ucValue;
+	struct {
+		uint8_t ucCEScanBehavior:2;	// Indicates scan behavior of CE mode
+		uint8_t ucITScanBehavior:2;	// Indicates scan behavior of IT mode
+		uint8_t ucPTScanBehavior:2;	// Indicates scan behavior of Preferred mode
+		uint8_t ucQuantRangeSelectable:1;	// Indicates if RGB Quantization Range can be overridden
+		uint8_t ucReserved:1;
+	};
+} video_cap_data_block_t;
+
+////////////////////////////////////////////
+//
+//CEA Extn Block Byte3 structure
+//
+////////////////////////////////////////////
+typedef union _cea_ext_capability {
+	uint8_t ucValue;
+	struct {
+		uint8_t ucTotalNativeDTDs:4;	// Total number of DTDs in extension block
+		uint8_t ucSupportsYCBCR422:1;	// Indicates support for YCBCR 4:2:2
+		uint8_t ucSupportsYCBCR444:1;	// Indicates support for YCBCR 4:4:4
+		uint8_t ucSupportsBasicAudio:1;	// Indicates support for Basic audio
+		uint8_t ucUnderscansITFormats:1;	// Indicates underscan behavior of IT formats
+	};
+} cea_ext_capability_t;
+
+////////////////////////////////////////////
+//
+//CE - Video Capability Data block structure
+//
+////////////////////////////////////////////
+typedef enum {
+	FORMAT_NOT_SUPPORTED = 0,	// Format is not supported
+	ALWAYS_OVERSCANNED = 1,	// Format is always overscanned
+	ALWAYS_UNDERSCANNED = 2,	// Format is always underscanned
+	SUPPORTS_OVER_AND_UNDERSCAN = 3	// Sink supports both overscan and underscan
+} cea_scan_behavior_t;
+
+/////////////////////////////////
+//
+// #defines required for CE Etxn
+//
+/////////////////////////////////
+#define CEA_EXT_TAG 0x02
+#define CEA_EXT_SUPPORTED_VERSION 0x03
+#define CEA_EXT_861_REVISION 0x01
+
+#define CEA_USE_EXTENDED_TAG   0x7
+
+#define CEA_AUDIO_DATABLOCK         0x1
+#define CEA_VIDEO_DATABLOCK         0x2
+#define CEA_VENDOR_DATABLOCK        0x3
+#define CEA_SPEAKER_DATABLOCK       0x4
+#define CEA_VIDEO_CAP_DATABLOCK     0x0
+
+#define CEA_DATABLOCK_TAG_MASK                  0xE0
+#define CEA_DATABLOCK_LENGTH_MASK               0x1F
+#define CEA_SHORT_VIDEO_DESCRIPTOR_CODE_MASK    0x7F
+#define CEA_NATIVE_FORMAT_BIT_MASK              0x80
+
+#define CEA_HDMI_IEEE_REG_ID    0x00000C03
+#define CEA_EDID_HEADER_SIZE    0x04
+
+// Extended Data block type
+// This bit definitions are as per CE 861-D spec
+#define CEA_COLORIMETRY_DATABLOCK   0x5
+#define CE_COLORIMETRY_MD0_MASK BIT0
+#define CE_COLORIMETRY_MD1_MASK BIT1
+#define CE_COLORIMETRY_MD2_MASK BIT3
+#if 0				/* for future reference */
+//==================================================================================
+//==================================================================================
+//      DATA Structure definitions for VTB parsing.....
+//  Reference VESA Documents are VTB Extension(Release A) & CVT standard version 1.1
+//===================================================================================
+//      #defines for VTB-EXT
+//===================================================================================
+
+#define VTB_EXT_TAG	0x10
+#define	VTB_EXT_SUPPORTED_VERSION 0x03
+
+#define	VTB_MAX_DTD_TIMINGS			 6
+#define	VTB_MAX_CVT_TIMINGS			40
+#define	VTB_MAX_STANDARD_TIMINGS	61
+
+#define VTB_DTD_OFFSET		5
+#define VTB_DTD_SIZE		18
+#define	VTB_CVT_SIZE		3
+#define VTB_ST_SIZE			2
+
+// This struct is for VTB Extension block.
+typedef struct _VTB_EXT {
+	uint8_t ucTag;
+	uint8_t ucVersion;
+	uint8_t ulNumDTD;
+	uint8_t ulNumCVT;
+	uint8_t ulNumST;
+	uint8_t DATA[122];
+	uint8_t ucChecksum;
+} VTB_EXT, *PVTB_EXT;
+
+// Following struct is for CVT descriptor (Version 1.1)
+typedef struct _VTB_CVT_TIMING {
+#pragma pack(1)
+
+	uint8_t ucVA_low;	// Lower 8 bits of Vertical size. This Vsize = (vertical active lines/2)-1. 
+	//      Range for VA lines is 2 to 8192. CVT supprts only an even no. of active lines per frame.
+
+	union {
+		uint8_t ucVA_high_AR;
+		struct {
+
+			uint8_t ucReserved00:2;	//Bits 1-0 are reserved and set to 00h
+			uint8_t ucAspectRatio:2;	//      Aspect Ratio specifier bits.
+			// 00:   4:3 Aspect ratio
+			// 01:  16:9 Aspect ratio
+			// 10:  16:10 Aspect ratio
+			// 11: Undefined (Reserved)
+
+			uint8_t ucVA_high:4;	//      Upper 4 bits of Vertical Size.
+		};
+	};
+
+	union {
+		uint8_t ucRefresh_Rate_Bits;
+		struct {
+
+			uint8_t ucRR_60Hz_RB:1;	// When set, indicates 60Hz support with Reduced Blanking.
+			uint8_t ucRR_85Hz:1;	//                              ||         85Hz                         ||                                                                                              .
+			uint8_t ucRR_75Hz:1;	//                              ||         75Hz                         ||                                                                                              .
+			uint8_t ucRR_60Hz:1;	//                              ||         60Hz                         ||                                                                                              .
+			uint8_t ucRR_50Hz:1;	// When set, indicates 50Hz Refrash Rate with CRT Blanking supports specified pixel format.
+			uint8_t ucPreferredRefresh_Rate:2;	// Preferred Refresh Rate specifier bits.
+			// 00:  50 Hz
+			// 01:  60 Hz (this means either CRT blanking or Reduced Blanking whichever is supported. 
+			//                              If both are supported, then RB is preferred.)
+			// 10:  75 Hz
+			// 11:  85 Hz   
+
+			uint8_t ucReserved0:1;	// This bit is reserved and set to '0'.
+
+		};
+	};
+#pragma	pack()
+} VTB_CVT_TIMING, *PVTB_CVT_TIMING;
+
+// This struct is for storing extracted Info from CVT descriptor....
+// This is defined by author.....not based on CVT specs.
+typedef struct _CVT_INFO {
+	ULONG ulYRes;
+	ULONG ulXRes;
+	ULONG ulRRate[5];	//As max 5 Refresh Rates can be supported.
+	BOOLEAN bRed_Blank_Req[5];
+	BOOLEAN bPreferred_RR[5];	//To set flag for Preffered RR
+	ULONG ulNumRates;	//Number of Refresh rates Supported. (Max. 5)
+} CVT_INFO, *PCVT_INFO;
+#endif
+// This structure is for stroing the Display device Data retreived from CEA block
+// This is defined as per the Display Device Data Block standard.
+typedef struct _display_device_data {
+#pragma pack (1)
+	union {
+		uint8_t ucTagAndLength;	// Data Block Tag and Block Length. should be 0xFF
+		struct {
+			uint8_t ucLength:5;
+			uint8_t ucTag:3;
+		};
+	};
+	uint8_t ucChildTag;	// Child tag required as per CEA spec  should be 0x02
+	union {
+		uint8_t ucInterfaceType;
+		struct {
+			uint8_t ucNumOfChannels:4;	// Number of channels supported
+			uint8_t ucInterfaceCode:4;	// Interface code 
+		};
+	};
+	union {
+		uint8_t ucVerAndRel;
+		struct {
+			uint8_t ucRelease:4;	// Release 
+			uint8_t ucVersion:4;	// Version.
+		};
+	};
+	uint8_t ucContentProtectionSuppFlag;	// Flag indicating support for content protection. 
+	union {
+		uint16_t usClockFrequency;	// Clock Frequency
+		struct {
+			uint16_t usMinClockFrequency:6;	// First 6 bits indicates Min frequency
+			uint16_t usMaxClockFrequency:10;	// Next 10 bits indicates Max frequency
+		};
+	};
+	union {
+		uint8_t ucNativePixelFormat[4];	// Pixel Format
+		struct {
+			uint8_t ucHorizontalPixelCntLower;	// Lower byte value of the Horizontal pixel count
+			uint8_t ucHorizontalPixelCntUpper;	// Upper byte value of the Horizontal pixel count
+			uint8_t ucVerticalPixelCntLower;	//  Lower byte value of the vertical pixel count
+			uint8_t ucVerticalPixelCntUpper;	// Upper byte value of the vertical pixel count
+		};
+	};
+	uint8_t ucAspectRatio;	// Byte indicating Aspect ratio. 
+	union {
+		uint8_t ucOrientationAndRotation;
+		struct {
+			uint8_t ucScanDirection:2;	// Scan direction.
+			uint8_t ucZeroPixelLocation:2;	// Zero Pixel Location.
+			uint8_t ucRotationCapability:2;	// Indicates rotation capability
+			uint8_t ucDefaultOrientation:2;	// Default Orientation.
+		};
+	};
+	uint8_t ucSubPixelInfo;	// Sub-Pixle Information.
+	uint8_t ucHorizontalPitch;	// Horizontal Pitch
+	uint8_t ucVerticalPitch;	// Vertical Pitch
+	union {
+		uint8_t ucMiscDisplayCapabilities;
+		struct {
+			uint8_t bReserved:3;
+			uint8_t ucDeinterlacing:1;	// indicates deinterlacing support
+			uint8_t ucOverdriverNotRecommended:1;
+			uint8_t ucDirectDrive:1;	// indicates DirectDrive support
+			uint8_t ucDithering:2;	// indicates Dithering support.
+		};
+	};
+	union {
+		uint8_t ucAudioFlags;	// Flags indicating Audio details
+		struct {
+			uint8_t bReserved1:4;
+			uint8_t ucAudioInputOverride:1;	// Indicates Audio Input Override
+			uint8_t ucSeparateAudioInputs:1;	// Indicates Separate Audio Inputs
+			uint8_t ucAudioInputOnVideoInterface:1;	// Shows whether Audio input is through the video interface.
+		};
+	};
+	union {
+		uint8_t ucAudioDelayFlags;	// Audio Delay Flags
+		struct {
+			uint8_t ucAudioDelay:7;	// Absolute offset between the audio and video signals.
+			uint8_t ucAudioSign:1;	// Indicates positive or negative delay.
+		};
+	};
+	union {
+		uint8_t ucFrameRateAndModeConversion;
+		struct {
+			uint8_t ucFrameRateRange:6;	//Device Frame rate Range
+			uint8_t ucFrameRateConversion:2;	//00 � No dedicated rate conversion hardware is provided;
+			//01 � The display provides a single frame buffer
+			//10 � The display provides double-buffering
+			//11- The display provides frame-rate conversion involving interframe interpolation
+		};
+	};
+	uint8_t ucDeviceNativeRate;	// Device Native Frame rate
+	union {
+		uint8_t ucColorBitDepth;	// Color bit depth
+		struct {
+			uint8_t ucDisplayDeviceColBitDepth:4;	// Color bit depth of the display device
+			uint8_t ucInterfaceColBitDepth:4;	// color bit depth supported by the interface.h
+		};
+	};
+	uint8_t ucAddPrimaryChromaticities[8];	// Additional Primary Chromaticities.
+	union {
+		uint8_t ucResponseTimeFlags;
+		struct {
+			uint8_t ucResponseTime:7;	// Time for transition.
+			uint8_t ucBlackToWhite:1;	// if 1, then transition from black to white
+			// if 0, then transition from white to black
+		};
+	};
+	union {
+		uint8_t ucOverscanInformation;
+		struct {
+			uint8_t ucVerticalPercentage:4;	// Percentage of Overscan in vertical direction.
+			uint8_t ucHorizontalPercentage:4;	// Percentage of Overscan in horizontal direction.
+		};
+	};
+#pragma pack()
+} display_device_data_t;
+
+//=========================================================================
+//=========================================================================
+// #defines for Block Map Ext.
+//=========================================================================
+//=========================================================================
+#define BLOCK_MAP_EXT_TAG 0xF0
+
+#endif				// EDIDSTRUCTS_H
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi_reg.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi_reg.h
new file mode 100644
index 0000000..1cf5738
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_hdmi_reg.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	
+ */
+/* chunfeng.zhao@intel.com
+  */
+
+#ifndef PSB_INTEL_HDMI_REG_H
+#define PSB_INTEL_HDMI_REG_H
+
+//////////////////////////////////////////
+//
+// Integrated HDMI specific registers
+//
+/////////////////////////////////////////
+
+#define RESERVED2(x,y)  x##y
+#define RESERVED1(x,y)  RESERVED2(x,y)
+#define RANDOMNUMBER	__LINE__	// __COUNTER__
+#define UNIQUENAME(ValueName) RESERVED1(ValueName, RANDOMNUMBER)
+
+/** Requird for HDMI operation */
+#define   HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define   HDMI_BORDER_ENABLE		(1 << 7)
+#define   HDMI_AUDIO_ENABLE		(1 << 6)
+/** New with 965, default is to be set */
+#define   HDMI_VSYNC_ACTIVE_HIGH	(1 << 4)
+/** New with 965, default is to be set */
+#define   HDMI_HSYNC_ACTIVE_HIGH	(1 << 3)
+#define   HDMIB_PCIE_CONCURRENCY	(1 << 3)
+#define   HDMI_DETECTED			(1 << 2)
+//
+//AUDIO configuration register
+//
+#define MDFLD_AUD_CONFIG_REG 0x69000
+#define MDFLD_AUD_CONFIG_REG_RESERVED_BITS	BITRANGE(31,25)
+typedef union _mdfld_aud_config {
+	uint32_t value;
+
+	struct {
+		const uint32_t disable_ncts:1;	//Bit 0
+		uint32_t lay_out:1;	//Bit 1 (0 - layout 0 1 - layout 1)
+		uint32_t format:2;	/*Bit [3:2] 
+					 * 00 - l-PCM or IEC 61937 
+					 * 01 - High bit rate IEC 61937 stream packet)
+					 * 10 - Not supported
+					 * 11 - Not supported
+					 */
+		uint32_t num_audio_ch:2;	/*Bit [5:4]
+						 * 00 - 2 channels(stereo)
+						 * 01 - 3 or 4 channels
+						 * 10 - 5 or 6 channels
+						 * 11 - 7 or 8 channels
+						 */
+		uint32_t UNIQUENAME(Reserved):1;	//Bit 6
+		uint32_t b_bit_enabled:1;	/* Bit 7 (0 - B bit set only for sub-packet 0
+						 *        1 - B bit set for all valid sub packet)
+						 */
+		uint32_t sample_flat_bit:1;	//Bit 8
+		uint32_t validity_bit:1;	//Bit 9 (1 - set V bit in sub-frame 0 - clear V bit(debugging, testing))
+		uint32_t user_bit:1;	//Bit 10 (1 - set U bit in sub frame 0 - clear U bit(default)
+		uint32_t underrun_packet_bit:1;	//Bit 11 (1 - send underrun packet 0 - send null packet)
+		uint32_t UNIQUENAME(Reserved):20;	//Bit [31:12]
+	};
+} mdfld_aud_config_t;
+
+//
+// Audio control state register
+//
+#define MDFLD_AUD_CNTL_ST_REG 0x69060
+#define MDFLD_AUD_CNTL_ST_RESERVED_BITS	(BITRANGE(14,4) | BITRANGE(31,25))
+// Note => DIP : Data Island Packet
+typedef union _mdfld_aud_cntl {
+	uint32_t value;
+
+	struct {
+		uint32_t dip_ram_access_address:4;	// bit 3:0
+		uint32_t UNIQUENAME(Reserved):11;	// bit 14:4
+		uint32_t cp_ready:1;	// bit 15 
+		uint32_t video_dip_trans_freq:2;	// bit 17:16
+		uint32_t dip_buffer_index:3;	// bit 20:18
+		uint32_t enable_dip_type:4;	// bit 24:21
+		uint32_t UNIQUENAME(Reserved):7;	// bit 31:25
+	};
+
+} mdfld_aud_cntl_t;
+
+// HDMI Audio Data Island Packet Data
+//
+#define MDFLD_HDMI_AUDPAC_DATA_REG 0x69114
+
+typedef union _mdfld_hdmi_audpac_data {
+	uint32_t value;
+
+	struct {
+		uint32_t audio_dip_data:32;	// bit 31:0
+	};
+} mdfld_hdmi_audpac_data_t;
+
+#endif				// PSB_INTEL_HDMI_REG_H
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_reg.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_reg.h
new file mode 100644
index 0000000..5040c57
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_intel_reg.h
@@ -0,0 +1,1378 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+#define BLC_PWM_CTL		0x61254
+#define BLC_PWM_CTL2		0x61250
+#define BLC_PWM_CTL_C		0x62254
+#define BLC_PWM_CTL2_C		0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+/**
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
+#define BLM_LEGACY_MODE				(1 << 16)
+/**
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+
+#define I915_GCFGC			0xf0
+#define I915_LOW_FREQUENCY_ENABLE		(1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ		(0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ		(4 << 4)
+#define I915_DISPLAY_CLOCK_MASK			(7 << 4)
+
+#define I855_HPLLCC			0xc0
+#define I855_CLOCK_CONTROL_MASK			(3 << 0)
+#define I855_CLOCK_133_200			(0 << 0)
+#define I855_CLOCK_100_200			(1 << 0)
+#define I855_CLOCK_100_133			(2 << 0)
+#define I855_CLOCK_166_250			(3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A	0x60000
+#define HBLANK_A	0x60004
+#define HSYNC_A 	0x60008
+#define VTOTAL_A	0x6000c
+#define VBLANK_A	0x60010
+#define VSYNC_A 	0x60014
+#define PIPEASRC	0x6001c
+#define BCLRPAT_A	0x60020
+#define VSYNCSHIFT_A	0x60028
+
+#define HTOTAL_B	0x61000
+#define HBLANK_B	0x61004
+#define HSYNC_B 	0x61008
+#define VTOTAL_B	0x6100c
+#define VBLANK_B	0x61010
+#define VSYNC_B 	0x61014
+#define PIPEBSRC	0x6101c
+#define BCLRPAT_B	0x61020
+#define VSYNCSHIFT_B	0x61028
+
+#define HTOTAL_C	0x62000
+#define HBLANK_C	0x62004
+#define HSYNC_C 	0x62008
+#define VTOTAL_C	0x6200c
+#define VBLANK_C	0x62010
+#define VSYNC_C 	0x62014
+#define PIPECSRC	0x6201c
+#define BCLRPAT_C	0x62020
+#define VSYNCSHIFT_C	0x62028
+
+#define PP_STATUS	0x61200
+#define PP_ON					(1 << 31)
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY				(1 << 30)
+#define PP_SEQUENCE_NONE			(0 << 28)
+#define PP_SEQUENCE_ON				(1 << 28)
+#define PP_SEQUENCE_OFF			(2 << 28)
+#define PP_SEQUENCE_MASK			0x30000000
+#define PP_CONTROL	0x61204
+#define POWER_TARGET_ON			(1 << 0)
+
+#define LVDSPP_ON       0x61208
+#define LVDSPP_OFF      0x6120c
+#define PP_CYCLE        0x61210
+
+#define PFIT_CONTROL	0x61230
+
+#define PFIT_ENABLE                            (1 << 31)
+#define PFIT_PIPE_MASK                         (3 << 29)
+#define PFIT_PIPE_SHIFT                        29
+#define PFIT_PIPE_SELECT_A                     (0 << PFIT_PIPE_SHIFT)
+#define PFIT_PIPE_SELECT_B                     (1 << PFIT_PIPE_SHIFT)
+#define PFIT_PIPE_SELECT_C                     (2 << PFIT_PIPE_SHIFT)
+#define PFIT_PIPE_SELECT_D                     (3 << PFIT_PIPE_SHIFT)
+#define PFIT_SCALING_MODE_PILLARBOX            (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX            (3 << 26)
+#define VERT_INTERP_DISABLE                    (0 << 10)
+#define VERT_INTERP_BILINEAR                   (1 << 10)
+#define VERT_INTERP_MASK                       (3 << 10)
+#define VERT_AUTO_SCALE                        (1 << 9)
+#define HORIZ_INTERP_DISABLE                   (0 << 6)
+#define HORIZ_INTERP_BILINEAR                  (1 << 6)
+#define HORIZ_INTERP_MASK                      (3 << 6)
+#define HORIZ_AUTO_SCALE                       (1 << 5)
+
+#if 0
+# define PFIT_ENABLE				(1 << 31)
+# define PFIT_PIPE_MASK				(3 << 29)
+# define PFIT_PIPE_SHIFT			29
+# define PFIT_PIPE_SELECT_A			(0 << PFIT_PIPE_SHIFT)
+# define PFIT_PIPE_SELECT_B			(1 << PFIT_PIPE_SHIFT)
+# define PFIT_PIPE_SELECT_C			(2 << PFIT_PIPE_SHIFT)
+# define PFIT_PIPE_SELECT_D			(3 << PFIT_PIPE_SHIFT)
+# define PFIT_SCALING_MODE_SHIFT		26
+# define PFIT_SCALING_MODE_AUTO			(0 << PFIT_SCALING_MODE_SHIFT)
+# define PFIT_SCALING_MODE_PROGRAM		(1 << PFIT_SCALING_MODE_SHIFT)
+# define PFIT_SCALING_MODE_PILLARBOX		(2 << PFIT_SCALING_MODE_SHIFT)
+# define PFIT_SCALING_MODE_LETTERBOX		(3 << PFIT_SCALING_MODE_SHIFT)
+# define VERT_INTERP_DISABLE			(0 << 10)
+# define VERT_INTERP_BILINEAR			(1 << 10)
+# define VERT_INTERP_MASK			(3 << 10)
+# define VERT_AUTO_SCALE			(1 << 9)
+# define HORIZ_INTERP_DISABLE			(0 << 6)
+# define HORIZ_INTERP_BILINEAR			(1 << 6)
+# define HORIZ_INTERP_MASK			(3 << 6)
+# define HORIZ_AUTO_SCALE			(1 << 5)
+#endif
+
+# define PANEL_8TO6_DITHER_ENABLE		(1 << 3)
+#define PANEL_8TO6_DITHER_ENABLE		(1 << 3)
+
+#define PFIT_PGM_RATIOS	0x61234
+#define PFIT_VERT_SCALE_MASK			0xfff00000
+#define PFIT_HORIZ_SCALE_MASK			0x0000fff0
+
+#define PFIT_AUTO_RATIOS	0x61238
+
+#define PFIT_FRACTIONAL_VALUE                   (1<<12)
+#define PFIT_VERT_MSB_SHIFT                     28
+#define PFIT_HORIZ_MSB_SHIFT                    12
+#define PFIT_VERT_SCALE_SHIFT                   16
+#define PFIT_HORIZ_SCALE_SHIFT                  0
+
+
+#define DPLL_A		0x06014
+#define DPLL_B		0x06018
+#define DPLL_VCO_ENABLE			(1 << 31)
+#define DPLL_DVO_HIGH_SPEED			(1 << 30)
+#define DPLL_SYNCLOCK_ENABLE			(1 << 29)
+#define DPLL_VGA_MODE_DIS			(1 << 28)
+#define DPLLB_MODE_DAC_SERIAL			(1 << 26)	/* i915 */
+#define DPLLB_MODE_LVDS			(2 << 26)	/* i915 */
+#define DPLL_MODE_MASK				(3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24)	/* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5		(1 << 24)	/* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14		(0 << 24)	/* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7		(1 << 24)	/* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK			0x03000000	/* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK		0x00ff0000	/* i915 */
+/**
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT		16
+#define PLL_P2_DIVIDE_BY_4		(1 << 23)	/* i830, required
+							 * in DVO non-gang */
+#define PLL_P1_DIVIDE_BY_TWO			(1 << 21)	/* i830 */
+#define PLL_REF_INPUT_DREFCLK			(0 << 13)
+#define PLL_REF_INPUT_TVCLKINA			(1 << 13)	/* i830 */
+#define PLL_REF_INPUT_TVCLKINBC		(2 << 13)	/* SDVO
+							 * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
+#define PLL_REF_INPUT_MASK			(3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT		9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK	(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1	(1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK			0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES		4
+#define SDVO_MULTIPLIER_SHIFT_VGA		0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD		0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD		0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT		24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+/** @} */
+
+#define DPLL_TEST		0x606c
+#define DPLLB_TEST_SDVO_DIV_1			(0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2			(1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4			(2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK		(3 << 22)
+#define DPLLB_TEST_N_BYPASS			(1 << 19)
+#define DPLLB_TEST_M_BYPASS			(1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE		(1 << 16)
+#define DPLLA_TEST_N_BYPASS			(1 << 3)
+#define DPLLA_TEST_M_BYPASS			(1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE		(1 << 0)
+
+#define ADPA			0x61100
+#define ADPA_DAC_ENABLE 	(1<<31)
+#define ADPA_DAC_DISABLE	0
+#define ADPA_PIPE_SELECT_MASK	(1<<30)
+#define ADPA_PIPE_A_SELECT	0
+#define ADPA_PIPE_B_SELECT	(1<<30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY	0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE	0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE	0
+#define ADPA_VSYNC_ACTIVE_HIGH	(1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW	0
+#define ADPA_HSYNC_ACTIVE_HIGH	(1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW	0
+
+#define FPA0		0x06040
+#define FPA1		0x06044
+#define FPB0		0x06048
+#define FPB1		0x0604c
+#define FP_N_DIV_MASK				0x003f0000
+#define FP_N_DIV_SHIFT				16
+#define FP_M1_DIV_MASK				0x00003f00
+#define FP_M1_DIV_SHIFT			8
+#define FP_M2_DIV_MASK				0x0000003f
+#define FP_M2_DIV_SHIFT			0
+
+#define PORT_HOTPLUG_EN		0x61110
+#define SDVOB_HOTPLUG_INT_EN			(1 << 26)
+#define SDVOC_HOTPLUG_INT_EN			(1 << 25)
+#define TV_HOTPLUG_INT_EN			(1 << 18)
+#define CRT_HOTPLUG_INT_EN			(1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+
+#define PORT_HOTPLUG_STAT	0x61114
+#define CRT_HOTPLUG_INT_STATUS			(1 << 11)
+#define TV_HOTPLUG_INT_STATUS			(1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS		(1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS		(1 << 6)
+
+#define SDVOB			0x61140
+#define SDVOC			0x61160
+#define SDVO_ENABLE				(1 << 31)
+#define SDVO_PIPE_B_SELECT			(1 << 30)
+#define SDVO_STALL_SELECT			(1 << 29)
+#define SDVO_INTERRUPT_ENABLE			(1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK			(7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT		23
+#define SDVO_PHASE_SELECT_MASK			(15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT		(6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT		(1 << 18)
+#define SDVOC_GANG_MODE				(1 << 16)
+#define SDVO_BORDER_ENABLE			(1 << 7)
+#define SDVOB_PCIE_CONCURRENCY			(1 << 3)
+#define SDVO_DETECTED				(1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK		((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK			(1 << 17)
+
+/** @defgroup LVDS
+ * @{
+ */
+/**
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS			0x61180
+/**
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN			(1 << 31)
+/** Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT		(1 << 30)
+
+/** Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN                 (1 << 15)
+
+/**
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/**
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK		(3 << 6)
+#define LVDS_A3_POWER_DOWN		(0 << 6)
+#define LVDS_A3_POWER_UP		(3 << 6)
+/**
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK		(3 << 4)
+#define LVDS_CLKB_POWER_DOWN		(0 << 4)
+#define LVDS_CLKB_POWER_UP		(3 << 4)
+
+/**
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK		(3 << 2)
+#define LVDS_B0B3_POWER_DOWN		(0 << 2)
+#define LVDS_B0B3_POWER_UP		(3 << 2)
+
+#define PIPEADSL 0x70000
+#define PIPE_LINE_CNT_MASK	0x1fff
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE	(1<<31)
+#define PIPEACONF_DISABLE	0
+#define PIPEACONF_DOUBLE_WIDE	(1<<30)
+#define PIPECONF_ACTIVE		(1<<30)
+#define I965_PIPECONF_ACTIVE	(1<<30)
+#define PIPECONF_DSIPLL_LOCK	(1<<29)
+#define PIPEACONF_SINGLE_WIDE	0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_DSR		(1<<26)
+#define PIPEACONF_PIPE_LOCKED	(1<<25)
+#define PIPEACONF_PALETTE	0
+#define PIPECONF_FORCE_BORDER	(1<<25)
+#define PIPEACONF_GAMMA 	(1<<24)
+#define PIPECONF_PROGRESSIVE	(0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+#define PIPEACONF_COLOR_MATRIX_ENABLE (1 << 20)
+#define PIPECONF_PLANE_OFF 	(1<<19)
+#define PIPECONF_CURSOR_OFF 	(1<<18)
+
+#define PIPEBDSL 0x71000
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE	(1<<31)
+#define PIPEBCONF_DISABLE	0
+#define PIPEBCONF_DOUBLE_WIDE	(1<<30)
+#define PIPEBCONF_DISABLE	0
+#define PIPEBCONF_GAMMA 	(1<<24)
+#define PIPEBCONF_PALETTE	0
+
+#define PIPECDSL 0x72000
+#define PIPECCONF 0x72008
+
+#define PIPEBGCMAXRED		0x71010
+#define PIPEBGCMAXGREEN		0x71014
+#define PIPEBGCMAXBLUE		0x71018
+
+#define PIPEASTAT               0x70024
+#define PIPEBSTAT		0x71024
+#define PIPECSTAT		0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS         (1UL<<1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL<<2)
+#define PIPE_VBLANK_CLEAR                    (1 << 1)
+#define PIPE_VBLANK_STATUS                   (1 << 1)
+#define PIPE_TE_STATUS		             (1UL<<6)
+#define PIPE_DPST_EVENT_STATUS		     (1UL<<7)
+#define PIPE_VSYNC_CLEAR                     (1UL<<9)
+#define PIPE_VSYNC_STATUS                    (1UL<<9)
+#define PIPE_REPEATED_FRAME_STATUS	     (1UL<<11)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS      (1UL<<10)
+#define PIPE_FRAME_DONE_STATUS		     (1UL<<10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS   (1UL<<11)
+#define PIPE_CMD_DONE_STATUS		     (1UL<<14)
+#define PIPE_VBLANK_INTERRUPT_ENABLE         (1UL<<17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE   (1UL<<18)
+#define PIPE_TE_ENABLE		             (1UL<<22)
+#define PIPE_DPST_EVENT_ENABLE               (1UL<<23)
+#define PIPE_VSYNC_ENABL                     (1UL<<25)
+#define PIPE_HDMI_AUDIO_UNDERRUN             (1UL<<26)
+#define PIPE_FRAME_DONE_ENABLE		     (1UL<<26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE          (1UL<<27)
+#define PIPE_REPEATED_FRAME_ENABLE           (1UL<<27)
+#define PIPE_CMD_DONE_ENABLE		     (1UL<<30)
+#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK (BIT29|BIT28|BIT27|BIT26|BIT25|BIT24|BIT23|BIT22|BIT21 \
+		|BIT20|BIT18|BIT17|BIT16)
+#define PIPE_VBLANK_MASK (BIT25|BIT24|BIT18|BIT17)
+#define HISTOGRAM_INT_CONTROL		0x61268
+#define HISTOGRAM_BIN_DATA		0X61264
+#define HISTOGRAM_LOGIC_CONTROL		0x61260
+#define PWM_CONTROL_LOGIC		0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS	(1UL<<10)
+#define HISTOGRAM_INTERRUPT_ENABLE	(1UL<<31)
+#define HISTOGRAM_LOGIC_ENABLE		(1UL<<31)
+#define PWM_LOGIC_ENABLE		(1UL<<31)
+#define PWM_PHASEIN_ENABLE		(1UL<<25)
+#define PWM_PHASEIN_INT_ENABLE		(1UL<<24)
+#define PWM_PHASEIN_VB_COUNT		0x00001f00
+#define PWM_PHASEIN_INC			0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR	(1UL<<30)
+#define DPST_YUV_LUMA_MODE		0
+
+struct dpst_ie_histogram_control {
+	union {
+		uint32_t data;
+		struct {
+			uint32_t bin_reg_index:7;
+			uint32_t reserved:4;
+			uint32_t bin_reg_func_select:1;
+			uint32_t sync_to_phase_in:1;
+			uint32_t alt_enhancement_mode:2;
+			uint32_t reserved1:1;
+			uint32_t sync_to_phase_in_count:8;
+			uint32_t histogram_mode_select:1;
+			uint32_t reserved2:4;
+			uint32_t ie_pipe_assignment:1;
+			uint32_t ie_mode_table_enabled:1;
+			uint32_t ie_histogram_enable:1;
+		};
+	};
+};
+
+struct dpst_guardband {
+	union {
+		uint32_t data;
+		struct {
+			uint32_t guardband:22;
+			uint32_t guardband_interrupt_delay:8;
+			uint32_t interrupt_status:1;
+			uint32_t interrupt_enable:1;
+		};
+	};
+};
+
+#define PIPEAFRAMEHIGH		0x70040
+#define PIPEAFRAMEPIXEL		0x70044
+#define PIPEBFRAMEHIGH		0x71040
+#define PIPEBFRAMEPIXEL		0x71044
+#define PIPECFRAMEHIGH		0x72040
+#define PIPECFRAMEPIXEL		0x72044
+#define PIPE_FRAME_HIGH_MASK    0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT   0
+#define PIPE_FRAME_LOW_MASK     0xff000000
+#define PIPE_FRAME_LOW_SHIFT    24
+#define PIPE_PIXEL_MASK         0x00ffffff
+#define PIPE_PIXEL_SHIFT        0
+
+#define GCI_CTRL		0x650c
+#define DSPARB			0x70030
+#define DSPARB2			0x7002C
+#define DSPFW1			0x70034
+#define DSPFW2			0x70038
+#define DSPFW3			0x7003c
+#define DSPFW4			0x70050
+#define DSPFW5			0x70054
+#define DSPFW6			0x70058
+#define DSPFW7			0x70070
+#define DDL1			0x70060
+#define DDL2			0x70064
+#define DDL3			0x70068
+#define DDL4			0x7006C
+#define DSPCHICKENBIT		0x70400
+#define DSPACNTR		0x70180
+#define DSPBCNTR		0x71180
+#define DSPCCNTR		0x72180
+#define DSPDCNTR		0x73180
+#define DSPECNTR		0x74180
+#define DSPFCNTR		0x75180
+#define DISPLAY_PLANE_ENABLE 			(1<<31)
+#define DISPLAY_PLANE_DISABLE			0
+#define DISPPLANE_GAMMA_ENABLE			(1<<30)
+#define DISPPLANE_GAMMA_DISABLE			0
+#define DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define DISPPLANE_8BPP				(0x2<<26)
+#define DISPPLANE_15_16BPP			(0x4<<26)
+#define DISPPLANE_16BPP				(0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA 		(0x6<<26)
+#define DISPPLANE_32BPP				(0x7<<26)
+#define DISPPLANE_PREMULT_DISABLE		(0x1<<23)
+#define DISPPLANE_STEREO_ENABLE			(1<<25)
+#define DISPPLANE_STEREO_DISABLE		0
+#define DISPPLANE_SEL_PIPE_MASK			(3<<24)
+#define DISPPLANE_SEL_PIPE_POS			24
+#define DISPPLANE_SEL_PIPE_A			(0<<24)
+#define DISPPLANE_SEL_PIPE_B			(1<<24)
+#define DISPPLANE_SEL_PIPE_C			(2<<24)
+#define DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE		0
+#define DISPPLANE_LINE_DOUBLE			(1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE		0
+#define DISPPLANE_STEREO_POLARITY_FIRST		0
+#define DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define S3D_SPRITE_ORDER_BITS			(1<<14)
+#define S3D_SPRITE_ORDER_A_FIRST		(1<<14)
+#define S3D_SPRITE_INTERLEAVING_BITS		(0x3<<12)
+#define S3D_SPRITE_INTERLEAVING_LINE		(0x1<<12)
+#define S3D_SPRITE_INTERLEAVING_PIXEL		(0x2<<12)
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
+#define DISPPLANE_BOTTOM			(4)
+
+#define DSPABASE		0x70184
+#define DSPALINOFF		0x70184
+#define DSPASTRIDE		0x70188
+
+#define DSPBBASE		0x71184
+#define DSPBLINOFF		0X71184
+#define DSPBADDR		DSPBBASE
+#define DSPBSTRIDE		0x71188
+
+#define DSPCBASE		0x72184
+#define DSPCLINOFF		0x72184
+#define DSPCSTRIDE		0x72188
+
+#define DSPDLINOFF		0x73184
+#define DSPDSTRIDE		0x73188
+
+#define DSPAKEYVAL		0x70194
+#define DSPAKEYMASK		0x70198
+
+#define DSPAPOS			0x7018C	/* reserved */
+#define DSPASIZE		0x70190
+#define DSPBPOS			0x7118C
+#define DSPBSIZE		0x71190
+#define DSPCPOS			0x7218C
+#define DSPCSIZE		0x72190
+#define DSPDPOS			0x7318C
+#define DSPDSIZE		0x73190
+
+#define DSPASURF		0x7019C
+#define DSPATILEOFF		0x701A4
+#define DSPACONSTALPHA		0x701A8
+
+#define DSPBSURF		0x7119C
+#define DSPBTILEOFF		0x711A4
+#define DSPBCONSTALPHA		0x711A8
+
+#define DSPCSURF		0x7219C
+#define DSPCTILEOFF		0x721A4
+#define DSPCKEYMAXVAL 		0x721A0
+#define DSPCCONSTALPHA		0x721A8
+#define DSPCKEYMINVAL 		0x72194
+#define DSPCKEYMSK 		0x72198
+
+#define DSPDSURF		0x7319C
+#define DSPDTILEOFF		0x731A4
+#define DSPDCONSTALPHA		0x731A8
+
+#define VGACNTRL		0x71400
+#define VGA_DISP_DISABLE			(1 << 31)
+#define VGA_2X_MODE				(1 << 30)
+#define VGA_PIPE_B_SELECT			(1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET		0x08000
+#define OV_OVADD		0x30000
+#define OV_DOVASTA              0x30008
+#define OV_PIPE_SELECT				(BIT6|BIT7)
+#define OV_PIPE_SELECT_POS			6
+#define OV_PIPE_A				0
+#define OV_PIPE_B				2
+#define OV_PIPE_C				1
+#define OV_OGAMC5		0x30010
+#define OV_OGAMC4		0x30014
+#define OV_OGAMC3		0x30018
+#define OV_OGAMC2		0x3001C
+#define OV_OGAMC1		0x30020
+#define OV_OGAMC0		0x30024
+#define OVC_OVADD		0x38000
+#define OVC_DOVCSTA             0x38008
+#define OVC_OGAMC5		0x38010
+#define OVC_OGAMC4		0x38014
+#define OVC_OGAMC3		0x38018
+#define OVC_OGAMC2		0x3801C
+#define OVC_OGAMC1		0x38020
+#define OVC_OGAMC0		0x38024
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0			0x71410
+#define SWF1			0x71414
+#define SWF2			0x71418
+#define SWF3			0x7141c
+#define SWF4			0x71420
+#define SWF5			0x71424
+#define SWF6			0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00			0x70410
+#define SWF01			0x70414
+#define SWF02			0x70418
+#define SWF03			0x7041c
+#define SWF04			0x70420
+#define SWF05			0x70424
+#define SWF06			0x70428
+
+#define SWF10			SWF0
+#define SWF11			SWF1
+#define SWF12			SWF2
+#define SWF13			SWF3
+#define SWF14			SWF4
+#define SWF15			SWF5
+#define SWF16			SWF6
+
+#define SWF30			0x72414
+#define SWF31			0x72418
+#define SWF32			0x7241c
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A		0x0a000
+#define PALETTE_B		0x0a800
+#define PALETTE_C		0x0ac00
+
+/*Gamma max register*/
+#define GAMMA_RED_MAX_A         0x70010
+#define GAMMA_GREEN_MAX_A       0x70014
+#define GAMMA_BLUE_MAX_A        0x70018
+
+#define GAMMA_RED_MAX_C         0x72010
+#define GAMMA_GREEN_MAX_C       0x72014
+#define GAMMA_BLUE_MAX_C        0x72018
+
+/*Gamma max register*/
+#define GAMMA_RED_MAX_A         0x70010
+#define GAMMA_GREEN_MAX_A       0x70014
+#define GAMMA_BLUE_MAX_A        0x70018
+
+#define GAMMA_RED_MAX_C         0x72010
+#define GAMMA_GREEN_MAX_C       0x72014
+#define GAMMA_BLUE_MAX_C        0x72018
+
+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
+
+
+/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
+
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+		       (dev)->pci_device == 0x2982 || \
+		       (dev)->pci_device == 0x2992 || \
+		       (dev)->pci_device == 0x29A2 || \
+		       (dev)->pci_device == 0x2A02 || \
+		       (dev)->pci_device == 0x2A12)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
+			(dev)->pci_device == 0x29B2 ||	\
+			(dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+		      IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
+		      IS_MRST(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+			IS_I945GM(dev) || IS_I965GM(dev) || \
+			IS_POULSBO(dev) || IS_MRST(dev))
+
+/* Cursor A & B regs */
+#define CURACNTR		0x70080
+#define   CURSOR_MODE_DISABLE   0x00
+#define   CURSOR_MODE_64_32B_AX 0x07
+#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define CURABASE		0x70084
+#define CURAPOS			0x70088
+#define   CURSOR_POS_MASK       0x007FF
+#define   CURSOR_POS_SIGN       0x8000
+#define   CURSOR_X_SHIFT        0
+#define   CURSOR_Y_SHIFT        16
+#define CURBCNTR		0x700c0
+#define CURBBASE		0x700c4
+#define CURBPOS			0x700c8
+#define CURCCNTR		0x700e0
+#define CURCBASE		0x700e4
+#define CURCPOS			0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A		0x0f014
+#define MDFLD_DPLL_B		0x0f018
+#define MRFLD_CMNRST		(1 << 11)
+#define MRFLD_INPUT_REF_SSC	(3 << 13)
+#define MDFLD_INPUT_REF_SEL	(1 << 14)
+#define MDFLD_VCO_SEL		(1 << 16)
+#define DPLLA_MODE_LVDS		(2 << 26)	/* mrst */
+#define MDFLD_PLL_LATCHEN	(1 << 28)
+#define MRFLD_CRI_ICK_PLL	(1 << 28)
+#define MRFLD_REF_CLK_EN	(1 << 29)
+#define MDFLD_PWR_GATE_EN	(1 << 30)
+#define MRFLD_EXT_CLK_BUF_EN	(1 << 30)
+#define MDFLD_P1_MASK		(0x1FF << 17)
+#define MRST_FPA0		0x0f040
+#define MRST_FPA1		0x0f044
+#define MDFLD_DPLL_DIV0		0x0f048
+#define MDFLD_DPLL_DIV1		0x0f04c
+#define MRST_PERF_MODE		0x020f4
+
+/* MEDFIELD HDMI registers */
+#define HDMIPHYMISCCTL   	0x61134
+#define HDMI_PHY_POWER_DOWN	0x7f
+#define HDMIB_CONTROL   	0x61140
+#define HDMIB_PORT_EN			(1 << 31)
+#define HDMIB_PIPE_B_SELECT		(1 << 30)
+#define HDMIB_NULL_PACKET		(1 << 9)
+# define HDMIB_AUDIO_ENABLE     (1 << 6)
+#define HDMIB_HDCP_PORT 		(1 << 5)
+#define VIDEO_DIP_CTL		0x61170
+#define EN_DIP				(1 << 31)
+#define PORT_B_SELECT			(1 << 29)
+#define DIP_TYPE_MASK			(0xf << 21)
+#define DIP_TYPE_AVI			(1 << 21)
+#define DIP_TYPE_VS			(2 << 21)
+#define DIP_TYPE_SPD			(8 << 21)
+#define DIP_BUFF_INDX_MASK		(3 << 19)
+#define DIP_BUFF_INDX_AVI		(0 << 19)
+#define DIP_BUFF_INDX_VS		(1 << 19)
+#define DIP_BUFF_INDX_SPD		(3 << 19)
+#define DIP_TX_FREQ_MASK		(3 << 16)
+#define DIP_TX_FREQ_ONCE		(0 << 16)
+#define DIP_TX_FREQ_1VSNC		(1 << 16)
+#define DIP_TX_FREQ_2VSNC		(2 << 16)
+#define DIP_RAM_ADDR_MASK		0xf
+#define VIDEO_DIP_DATA		0x61178
+
+#define AUDIO_DIP_CTL   0x69060
+
+/* MEDFIELD HDMI audio unit registers */
+#define AUD_CONFIG		0x69000
+#define AUD_CONF_AUDIO_ENABLE	(1 << 0)
+
+/* #define LVDS			0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE		(1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT		(1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT		(1 << 6)
+
+#define MIPI			0x61190
+#define MIPI_C			0x62190
+#define MIPI_PORT_EN			(1 << 31)
+#define DUAL_LINK_MODE_PIXEL_ALTER	(1 << 26)
+/** Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX		(1 << 23)
+#define PASS_FROM_SPHY_TO_AFE 		(1 << 16)
+#define MIPI_BORDER_EN			(1 << 15)
+#define BANDGAP_CHICKEN_BIT		(1 << 8)
+#define MIPIA_3LANE_MIPIC_1LANE	0x1
+#define MIPIA_2LANE_MIPIC_2LANE	0x2
+#define TE_TRIGGER_DSI_PROTOCOL	(1 << 2)
+#define TE_TRIGGER_GPIO_PIN		(1 << 3)
+#define DUAL_LINK_ENABLE		(1 << 1)
+#define DUAL_LINK_CAPABLE		(1)
+#define MIPI_TE_COUNT			0x61194
+
+/* #define PP_CONTROL	0x61204 */
+#define POWER_DOWN_ON_RESET		(1 << 1)
+
+/* #define PFIT_CONTROL	0x61230 */
+#define PFIT_PIPE_SELECT				(3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT			(29)
+
+/* #define BLC_PWM_CTL		0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT		(16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK		(0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE	(1<<30)
+
+/* Registers for S0i1 Display Enabling */
+#define PIPEA_REPEAT_FRM_CNT_TRESHOLD_REG	0x60090
+#define PIPEA_REPEAT_FRM_CNT_TRESHOLD_ENABLE	(1<<31)
+#define PIPEA_REPEAT_FRM_CNT_TRESHOLD_DISABLE	(0)
+#define PIPEA_CALCULATE_CRC_REG			0x60050
+#define PIPEA_CALCULATE_CRC_ENABLE		(1<<31)
+#define PIPEA_CALCULATE_CRC_DISABLE		(0)
+#define DSPSRCTRL_REG				0x7005C
+#define DSPSRCTRL_MAXFIFO_ENABLE		(1<<20)
+#define DSPSRCTRL_MAXFIFO_MODE_ALWAYS_MAXFIFO	(1<<23)
+#define PUNIT_DSPSSPM_ENABLE_S0i1_DISPLAY	(1<<8)
+
+#define MRST_DSPABASE		0x7019c
+#define MRST_DSPBBASE		0x7119c
+#define MDFLD_DSPCBASE		0x7219c
+
+/*
+ * Moorestown registers.
+ */
+/*===========================================================================
+; General Constants
+;--------------------------------------------------------------------------*/
+#define BIT0  0x00000001
+#define BIT1  0x00000002
+#define BIT2  0x00000004
+#define BIT3  0x00000008
+#define BIT4  0x00000010
+#define BIT5  0x00000020
+#define BIT6  0x00000040
+#define BIT7  0x00000080
+#define BIT8  0x00000100
+#define BIT9  0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+/*===========================================================================
+; MIPI IP registers
+;--------------------------------------------------------------------------*/
+#define MIPIC_REG_OFFSET             0x800
+#define DEVICE_READY_REG             0xb000
+#define LP_OUTPUT_HOLD               BIT16
+#define EXIT_ULPS_DEV_READY          0x3
+#define LP_OUTPUT_HOLD_RELEASE       0x810000
+#define ENTERING_ULPS		(2 << 1)
+#define EXITING_ULPS		(1 << 1)
+#define ULPS_MASK		(3 << 1)
+#define BUS_POSSESSION		(1 << 3)
+#define INTR_STAT_REG                0xb004
+#define RX_SOT_ERROR BIT0
+#define RX_SOT_SYNC_ERROR BIT1
+#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
+#define RX_LP_TX_SYNC_ERROR BIT4
+#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
+#define RX_FALSE_CONTROL_ERROR BIT6
+#define RX_ECC_SINGLE_BIT_ERROR BIT7
+#define RX_ECC_MULTI_BIT_ERROR BIT8
+#define RX_CHECKSUM_ERROR BIT9
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
+#define RX_DSI_VC_ID_INVALID BIT11
+#define TX_FALSE_CONTROL_ERROR BIT12
+#define TX_ECC_SINGLE_BIT_ERROR BIT13
+#define TX_ECC_MULTI_BIT_ERROR BIT14
+#define TX_CHECKSUM_ERROR BIT15
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
+#define TX_DSI_VC_ID_INVALID BIT17
+#define HIGH_CONTENTION BIT18
+#define LOW_CONTENTION BIT19
+#define DPI_FIFO_UNDER_RUN BIT20
+#define HS_TX_TIMEOUT BIT21
+#define LP_RX_TIMEOUT BIT22
+#define TURN_AROUND_ACK_TIMEOUT BIT23
+#define ACK_WITH_NO_ERROR BIT24
+#define HS_GENERIC_WR_FIFO_FULL BIT27
+#define LP_GENERIC_WR_FIFO_FULL BIT28
+#define SPL_PKT_SENT			BIT30
+#define INTR_EN_REG                  0xb008
+#define DSI_FUNC_PRG_REG             0xb00c
+#define DPI_CHANNEL_NUMBER_POS   0x03
+#define DBI_CHANNEL_NUMBER_POS   0x05
+#define FMT_DPI_POS              0x07
+#define FMT_DBI_POS              0x0A
+#define DBI_DATA_WIDTH_POS       0x0D
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT		     0x01	/* RGB 565 FORMAT */
+#define RGB_666_FMT		     0x02	/* RGB 666 FORMAT */
+#define LRGB_666_FMT		     0x03	/* RGB LOOSELY PACKED
+						 * 666 FORMAT
+						 */
+#define RGB_888_FMT		     0x04	/* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0	0x00	/* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1	0x01	/* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2	0x02	/* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3	0x03	/* Virtual channel 3 */
+#define DBI_NOT_SUPPORTED		0x00	/* command mode
+						 * is not supported
+						 */
+#define DBI_DATA_WIDTH_16BIT		0x01	/* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT			0x02	/* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT			0x03	/* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1		0x04	/* option 1 */
+#define DBI_DATA_WIDTH_OPT2		0x05	/* option 2 */
+#define HS_TX_TIMEOUT_REG            0xb010
+#define LP_RX_TIMEOUT_REG            0xb014
+#define TURN_AROUND_TIMEOUT_REG      0xb018
+#define DEVICE_RESET_REG             0xb01C
+#define DPI_RESOLUTION_REG           0xb020
+#define RES_V_POS                0x10
+#define DBI_RESOLUTION_REG           0xb024	/* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG     0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG   0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG  0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG  0xb034
+#define VERT_SYNC_PAD_COUNT_REG      0xb038
+#define VERT_BACK_PORCH_COUNT_REG    0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG   0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG    0xb044
+#define DPI_CONTROL_REG              0xb048
+#define DPI_SHUT_DOWN            BIT0
+#define DPI_TURN_ON              BIT1
+#define DPI_COLOR_MODE_ON        BIT2
+#define DPI_COLOR_MODE_OFF       BIT3
+#define DPI_BACK_LIGHT_ON        BIT4
+#define DPI_BACK_LIGHT_OFF       BIT5
+#define DPI_LP                   BIT6
+#define DPI_DATA_REG                 0xb04c
+#define DPI_BACK_LIGHT_ON_DATA   0x07
+#define DPI_BACK_LIGHT_OFF_DATA  0x17
+#define INIT_COUNT_REG               0xb050
+#define MAX_RET_PAK_REG              0xb054
+#define VIDEO_FMT_REG                0xb058
+#define COMPLETE_LAST_PCKT       BIT2
+#define EOT_DISABLE_REG              0xb05c
+#define ENABLE_CLOCK_STOPPING    BIT1
+#define LP_BYTECLK_REG               0xb060
+#define LP_GEN_DATA_REG              0xb064
+#define HS_GEN_DATA_REG              0xb068
+#define LP_GEN_CTRL_REG              0xb06C
+#define HS_GEN_CTRL_REG              0xb070
+#define DCS_CHANNEL_NUMBER_POS   0x06
+#define MCS_COMMANDS_POS	0x8
+#define WORD_COUNTS_POS		0x8
+#define MCS_PARAMETER_POS	0x10
+#define GEN_FIFO_STAT_REG            0xb074
+#define HS_DATA_FIFO_FULL        BIT0
+#define HS_DATA_FIFO_HALF_EMPTY  BIT1
+#define HS_DATA_FIFO_EMPTY       BIT2
+#define LP_DATA_FIFO_FULL        BIT8
+#define LP_DATA_FIFO_HALF_EMPTY  BIT9
+#define LP_DATA_FIFO_EMPTY       BIT10
+#define HS_CTRL_FIFO_FULL        BIT16
+#define HS_CTRL_FIFO_HALF_EMPTY  BIT17
+#define HS_CTRL_FIFO_EMPTY       BIT18
+#define LP_CTRL_FIFO_FULL        BIT24
+#define LP_CTRL_FIFO_HALF_EMPTY  BIT25
+#define LP_CTRL_FIFO_EMPTY       BIT26
+#define DBI_FIFO_EMPTY           BIT27
+#define DPI_FIFO_EMPTY           BIT28
+#define HS_LS_DBI_ENABLE_REG         0xb078
+#define TXCLKESC_REG		     0xb07c
+#define DPHY_PARAM_REG               0xb080
+#define DBI_BW_CTRL_REG              0xb084
+#define CLK_LANE_SWT_REG             0xb088
+/*===========================================================================
+; MIPI Adapter registers
+;--------------------------------------------------------------------------*/
+#define MIPI_CONTROL_REG             0xb104
+#define MIPI_2X_CLOCK_BITS       (BIT0 | BIT1)
+#define MIPI_DATA_ADDRESS_REG        0xb108
+#define MIPI_DATA_LENGTH_REG         0xb10C
+#define MIPI_COMMAND_ADDRESS_REG     0xb110
+#define MIPI_COMMAND_LENGTH_REG      0xb114
+#define MIPI_READ_DATA_RETURN_REG0   0xb118
+#define MIPI_READ_DATA_RETURN_REG1   0xb11C
+#define MIPI_READ_DATA_RETURN_REG2   0xb120
+#define MIPI_READ_DATA_RETURN_REG3   0xb124
+#define MIPI_READ_DATA_RETURN_REG4   0xb128
+#define MIPI_READ_DATA_RETURN_REG5   0xb12C
+#define MIPI_READ_DATA_RETURN_REG6   0xb130
+#define MIPI_READ_DATA_RETURN_REG7   0xb134
+#define MIPI_READ_DATA_VALID_REG     0xb138
+/* DBI COMMANDS */
+#define soft_reset                   0x01
+/* ************************************************************************* *\
+The display module performs a software reset.
+Registers are written with their SW Reset default values.
+\* ************************************************************************* */
+#define get_power_mode               0x0a
+/* ************************************************************************* *\
+The display module returns the current power mode
+\* ************************************************************************* */
+#define get_address_mode             0x0b
+/* ************************************************************************* *\
+The display module returns the current status.
+\* ************************************************************************* */
+#define get_pixel_format             0x0c
+/* ************************************************************************* *\
+This command gets the pixel format for the RGB image data
+used by the interface.
+\* ************************************************************************* */
+#define get_display_mode             0x0d
+/* ************************************************************************* *\
+The display module returns the Display Image Mode status.
+\* ************************************************************************* */
+#define get_signal_mode              0x0e
+/* ************************************************************************* *\
+The display module returns the Display Signal Mode.
+\* ************************************************************************* */
+#define get_diagnostic_result        0x0f
+/* ************************************************************************* *\
+The display module returns the self-diagnostic results following
+a Sleep Out command.
+\* ************************************************************************* */
+#define enter_sleep_mode             0x10
+/* ************************************************************************* *\
+This command causes the display module to enter the Sleep mode.
+In this mode, all unnecessary blocks inside the display module are disabled
+except interface communication. This is the lowest power mode
+the display module supports.
+\* ************************************************************************* */
+#define exit_sleep_mode              0x11
+/* ************************************************************************* *\
+This command causes the display module to exit Sleep mode.
+All blocks inside the display module are enabled.
+\* ************************************************************************* */
+#define enter_partial_mode           0x12
+/* ************************************************************************* *\
+This command causes the display module to enter the Partial Display Mode.
+The Partial Display Mode window is described by the set_partial_area command.
+\* ************************************************************************* */
+#define enter_normal_mode            0x13
+/* ************************************************************************* *\
+This command causes the display module to enter the Normal mode.
+Normal Mode is defined as Partial Display mode and Scroll mode are off
+\* ************************************************************************* */
+#define exit_invert_mode             0x20
+/* ************************************************************************* *\
+This command causes the display module to stop inverting the image data on
+the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define enter_invert_mode            0x21
+/* ************************************************************************* *\
+This command causes the display module to invert the image data only on
+the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_gamma_curve              0x26
+/* ************************************************************************* *\
+This command selects the desired gamma curve for the display device.
+Four fixed gamma curves are defined in section DCS spec.
+\* ************************************************************************* */
+#define set_display_off              0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on               0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address           0x2a
+/* ************************************************************************* *\
+This command defines the column extent of the frame memory accessed by the
+hostprocessor with the read_memory_continue and write_memory_continue commands.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_page_addr             0x2b
+/* ************************************************************************* *\
+This command defines the page extent of the frame memory accessed by the host
+processor with the write_memory_continue and read_memory_continue command.
+No status bits are changed.
+\* ************************************************************************* */
+#define write_mem_start              0x2c
+/* ************************************************************************* *\
+This command transfers image data from the host processor to the display
+module s frame memory starting at the pixel location specified by
+preceding set_column_address and set_page_address commands.
+\* ************************************************************************* */
+#define set_partial_area             0x30
+/* ************************************************************************* *\
+This command defines the Partial Display mode s display area.
+There are two parameters associated with
+this command, the first defines the Start Row (SR) and the second the End Row
+(ER). SR and ER refer to the Frame Memory Line Pointer.
+\* ************************************************************************* */
+#define set_scroll_area              0x33
+/* ************************************************************************* *\
+This command defines the display modules Vertical Scrolling Area.
+\* ************************************************************************* */
+#define set_tear_off                 0x34
+/* ************************************************************************* *\
+This command turns off the display modules Tearing Effect output signal on
+the TE signal line.
+\* ************************************************************************* */
+#define set_tear_on                  0x35
+/* ************************************************************************* *\
+This command turns on the display modules Tearing Effect output signal
+on the TE signal line.
+\* ************************************************************************* */
+#define set_address_mode             0x36
+/* ************************************************************************* *\
+This command sets the data order for transfers from the host processor to
+display modules frame memory,bits B[7:5] and B3, and from the display
+modules frame memory to the display device, bits B[2:0] and B4.
+\* ************************************************************************* */
+#define set_scroll_start             0x37
+/* ************************************************************************* *\
+This command sets the start of the vertical scrolling area in the frame memory.
+The vertical scrolling area is fully defined when this command is used with
+the set_scroll_area command The set_scroll_start command has one parameter,
+the Vertical Scroll Pointer. The VSP defines the line in the frame memory
+that is written to the display device as the first line of the vertical
+scroll area.
+\* ************************************************************************* */
+#define exit_idle_mode               0x38
+/* ************************************************************************* *\
+This command causes the display module to exit Idle mode.
+\* ************************************************************************* */
+#define enter_idle_mode              0x39
+/* ************************************************************************* *\
+This command causes the display module to enter Idle Mode.
+In Idle Mode, color expression is reduced. Colors are shown on the display
+device using the MSB of each of the R, G and B color components in the frame
+memory
+\* ************************************************************************* */
+#define set_pixel_format             0x3a
+/* ************************************************************************* *\
+This command sets the pixel format for the RGB image data used by the interface.
+Bits D[6:4]  DPI Pixel Format Definition
+Bits D[2:0]  DBI Pixel Format Definition
+Bits D7 and D3 are not used.
+\* ************************************************************************* */
+#define DCS_PIXEL_FORMAT_3bbp	 	0x1
+#define DCS_PIXEL_FORMAT_8bbp 	0x2
+#define DCS_PIXEL_FORMAT_12bbp 	0x3
+#define DCS_PIXEL_FORMAT_16bbp	0x5
+#define DCS_PIXEL_FORMAT_18bbp	0x6
+#define DCS_PIXEL_FORMAT_24bbp 	0x7
+#define write_mem_cont               0x3c
+/* ************************************************************************* *\
+This command transfers image data from the host processor to the display
+module's frame memory continuing from the pixel location following the
+previous write_memory_continue or write_memory_start command.
+\* ************************************************************************* */
+#define set_tear_scanline            0x44
+/* ************************************************************************* *\
+This command turns on the display modules Tearing Effect output signal on the
+TE signal line when the display module reaches line N.
+\* ************************************************************************* */
+#define get_scanline                 0x45
+/* ************************************************************************* *\
+The display module returns the current scanline, N, used to update the
+display device. The total number of scanlines on a display device is
+defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+the first line of V Sync and is denoted as Line 0.
+When in Sleep Mode, the value returned by get_scanline is undefined.
+\* ************************************************************************* */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0	0x03	/* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1	0x13	/* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2	0x23	/* generic short write, 2 parameters */
+#define GEN_READ_0		0x04	/* generic read, no parameters */
+#define GEN_READ_1		0x14	/* generic read, 1 parameters */
+#define GEN_READ_2		0x24	/* generic read, 2 parameters */
+#define GEN_LONG_WRITE		0x29	/* generic long write */
+#define MCS_SHORT_WRITE_0	0x05	/* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1	0x15	/* MCS short write, 1 parameters */
+#define MCS_READ		0x06	/* MCS read, no parameters */
+#define MCS_LONG_WRITE		0x39	/* MCS long write */
+/* MCS/generic commands */
+/****TPO MCS ****/
+#define write_display_profile		0x50
+#define write_display_brightness	0x51
+#define write_ctrl_display		0x53
+#define write_ctrl_cabc			0x55
+#define UI_IMAGE		0x01
+#define STILL_IMAGE		0x02
+#define MOVING_IMAGE		0x03
+#define write_hysteresis		0x57
+#define write_gamma_setting		0x58
+#define write_cabc_min_bright		0x5e
+#define write_kbbc_profile		0x60
+/**** TMD MCS ****/
+#define tmd_write_display_brightness	0x8c
+#define set_video_mode 0xc2
+#define set_panel 0xcc
+#define set_eq_func_ltps 0xd4
+
+#define access_protect 0xb0
+#define low_power_mode 0xb1
+/* ************************************************************************* *\
+This command is used to control ambient light, panel backlight brightness and
+gamma settings.
+\* ************************************************************************* */
+#define BRIGHT_CNTL_BLOCK_ON	BIT5
+#define AMBIENT_LIGHT_SENSE_ON	BIT4
+#define DISPLAY_DIMMING_ON	BIT3
+#define BACKLIGHT_ON		BIT2
+#define DISPLAY_BRIGHTNESS_AUTO	BIT1
+#define GAMMA_AUTO		BIT0
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP         0x1
+#define DCS_PIXEL_FORMAT_8BPP         0x2
+#define DCS_PIXEL_FORMAT_12BPP        0x3
+#define DCS_PIXEL_FORMAT_16BPP        0x5
+#define DCS_PIXEL_FORMAT_18BPP        0x6
+#define DCS_PIXEL_FORMAT_24BPP        0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data           0xfc
+#define diag_res_data            0x00
+#define disp_mode_data           0x23
+#define pxl_fmt_data             0x77
+#define pwr_mode_data            0x74
+#define sig_mode_data            0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1           0xff
+#define scanline_data2           0xff
+#define NON_BURST_MODE_SYNC_PULSE	0x01	/* Non Burst Mode
+						 * with Sync Pulse
+						 */
+#define NON_BURST_MODE_SYNC_EVENTS	0x02	/* Non Burst Mode
+						 * with Sync events
+						 */
+#define BURST_MODE			0x03	/* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE		0x240  /* 0x32 */    /* 0x120 */	/* Allocate at least
+										 * 0x100 Byte with 32
+										 * byte alignment
+										 */
+#define DBI_DATA_BUFFER_SIZE		0x120	/* Allocate at least
+						 * 0x100 Byte with 32
+						 * byte alignment
+						 */
+#define DBI_CB_TIME_OUT 0xFFFF
+#define GEN_FB_TIME_OUT 2000
+#define ALIGNMENT_32BYTE_MASK		(~(BIT0|BIT1|BIT2|BIT3|BIT4))
+#define SKU_83 						0x01
+#define SKU_100 					0x02
+#define SKU_100L 					0x04
+#define SKU_BYPASS 					0x08
+
+/* MDFLD delta registers */
+#define PIPEA			0x0
+#define PIPEB			0x1
+#define PIPEC			0x2
+#define PIPEB_OFFSET		0x1000
+#define PIPEC_OFFSET		0x2000
+#define PIPEA_COLOR_COEF0 	0x60070
+#define CC_1_POS		16
+#define CC_0_POS		0
+#define PIPEA_COLOR_COEF2 	0x60074
+#define PIPEA_COLOR_COEF11 	0x60078
+#define PIPEA_COLOR_COEF12 	0x6007c
+#define PIPEA_COLOR_COEF21 	0x60080
+#define PIPEA_COLOR_COEF22 	0x60084
+#define PIPEB_COLOR_COEF0 	0x61070
+#define PIPEB_COLOR_COEF2 	0x61074
+#define PIPEB_COLOR_COEF11 	0x61078
+#define PIPEB_COLOR_COEF12 	0x6107c
+#define PIPEB_COLOR_COEF21 	0x61080
+#define PIPEB_COLOR_COEF22 	0x61084
+#define PIPEC_COLOR_COEF0 	0x62070
+#define PIPEC_COLOR_COEF2 	0x62074
+#define PIPEC_COLOR_COEF11 	0x62078
+#define PIPEC_COLOR_COEF12 	0x6207c
+#define PIPEC_COLOR_COEF21 	0x62080
+#define PIPEC_COLOR_COEF22 	0x62084
+
+/* Necessary reset registers for ANN A0*/
+#define DSPCLK_GATE_D		0x70500
+#define RAMCLK_GATE_D		0x70504
+#define DSPIEDCFGSHDW		0x6414
+#define DSPSRCTRL		0x7005c
+#define FBDC_CHICKEN		0x70508
+#define IEP_OVA_CTRL 		0x32000
+#define IEP_OVC_CTRL 		0x3A000
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_irq.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_irq.c
new file mode 100644
index 0000000..6fe8600
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_irq.c
@@ -0,0 +1,1368 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_msvdx.h"
+#include "tng_topaz.h"
+
+#ifdef SUPPORT_VSP
+#include "vsp.h"
+#endif
+
+#include "psb_intel_reg.h"
+#include "pwr_mgmt.h"
+#include "dc_maxfifo.h"
+
+#include "psb_irq.h"
+#include "psb_intel_hdmi.h"
+
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_dbi_dsr.h"
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_pkg_sender.h"
+#endif
+
+#define KEEP_UNUSED_CODE 0
+
+extern struct drm_device *gpDrmDevice;
+extern int drm_psb_smart_vsync;
+/*
+ * inline functions
+ */
+
+static inline void update_te_counter(struct drm_device *dev, uint32_t pipe)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct mdfld_dsi_pkg_sender *sender;
+	struct mdfld_dsi_dbi_output **dbi_outputs;
+	struct mdfld_dsi_dbi_output *dbi_output;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+
+	if (!dsr_info)
+		return;
+
+	dbi_outputs = dsr_info->dbi_outputs;
+	dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
+	sender = mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+	mdfld_dsi_report_te(sender);
+#else
+	return;
+#endif
+}
+
+static inline u32 psb_pipestat(int pipe)
+{
+	if (pipe == 0)
+		return PIPEASTAT;
+	if (pipe == 1)
+		return PIPEBSTAT;
+	if (pipe == 2)
+		return PIPECSTAT;
+	BUG();
+	/* This should be unreachable */
+	return 0;
+}
+
+static inline u32 mid_pipe_event(int pipe)
+{
+	if (pipe == 0)
+		return _PSB_PIPEA_EVENT_FLAG;
+	if (pipe == 1)
+		return _MDFLD_PIPEB_EVENT_FLAG;
+	if (pipe == 2)
+		return _MDFLD_PIPEC_EVENT_FLAG;
+	BUG();
+	/* This should be unreachable */
+	return 0;
+}
+
+static inline u32 mid_pipe_vsync(int pipe)
+{
+	if (pipe == 0)
+		return _PSB_VSYNC_PIPEA_FLAG;
+	if (pipe == 1)
+		return _PSB_VSYNC_PIPEB_FLAG;
+	if (pipe == 2)
+		return _MDFLD_PIPEC_VBLANK_FLAG;
+	BUG();
+	/* This should be unreachable */
+	return 0;
+}
+
+static inline u32 mid_pipeconf(int pipe)
+{
+	if (pipe == 0)
+		return PIPEACONF;
+	if (pipe == 1)
+		return PIPEBCONF;
+	if (pipe == 2)
+		return PIPECCONF;
+	BUG();
+	/* This should be unreachable */
+	return 0;
+}
+
+void psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 write_val;
+
+	u32 reg = psb_pipestat(pipe);
+	dev_priv->pipestat[pipe] |= mask;
+	/* Enable the interrupt, clear any pending status */
+	write_val = PSB_RVDC32(reg);
+
+	write_val |= (mask | (mask >> 16));
+	PSB_WVDC32(write_val, reg);
+	(void)PSB_RVDC32(reg);
+}
+
+void psb_recover_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	struct drm_device *dev = dev_priv->dev;
+	if ((dev_priv->pipestat[pipe] & mask) == mask) {
+		u32 reg = psb_pipestat(pipe);
+		u32 write_val = PSB_RVDC32(reg);
+		write_val |= (mask | (mask >> 16));
+		PSB_WVDC32(write_val, reg);
+		(void)PSB_RVDC32(reg);
+	}
+}
+
+void psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 reg = psb_pipestat(pipe);
+	u32 write_val;
+
+	dev_priv->pipestat[pipe] &= ~mask;
+	write_val = PSB_RVDC32(reg);
+	write_val &= ~mask;
+	PSB_WVDC32(write_val, reg);
+	(void)PSB_RVDC32(reg);
+}
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 pipe_event = mid_pipe_event(pipe);
+	/* S0i1-Display registers can only be programmed after the pipe events
+	 * are disabled. Otherwise MSI from the display controller can reach
+	 * the Punit without SCU knowing about it. We have to prevent this
+	 * scenario. So, if we about to enable the pipe_event, check if the
+	 * we have already entered the S0i1-Display mode. If so clear it
+	 * and set the state back to ready.
+	 */
+	exit_s0i1_display_mode(dev);
+	dev_priv->vdc_irq_mask |= pipe_event;
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+}
+
+void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	if (dev_priv->pipestat[pipe] == 0) {
+		u32 pipe_event = mid_pipe_event(pipe);
+		dev_priv->vdc_irq_mask &= ~pipe_event;
+		PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+		PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+		/* S0i1-Display registers can only be programmed after the pipe events
+		 * are disabled. Otherwise MSI from the display controller can reach
+		 * the Punit without SCU knowing about it. We have to prevent this
+		 * scenario. So, if we are disabling the pipe_event, check if are
+		 * ready to enter S0i1-Display mode. If so clear it
+		 * and set the state back to entered.
+		 */
+		enter_s0i1_display_mode(dev);
+	}
+}
+
+#if KEEP_UNUSED_CODE
+/**
+ * Check if we can disable vblank for video MIPI display
+ *
+ */
+static void mid_check_vblank(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+	static unsigned long cnt = 0;
+
+	if (drm_psb_smart_vsync == 0) {
+		if ((cnt++) % 600 == 0) {
+			PSB_DEBUG_ENTRY("[vsync irq] 600 times !\n");
+		}
+		return;
+	}
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	if (dev_priv->dsr_idle_count > 50)
+		dev_priv->b_is_in_idle = true;
+	else
+		dev_priv->dsr_idle_count++;
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+u32 intel_vblank_count(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	return atomic_read(&dev_priv->vblank_count[pipe]);
+}
+
+/**
+ *  Display controller interrupt handler for vsync/vblank.
+ *
+ *  Modified to handle the midi to hdmi clone 7/13/2012
+ *      williamx.f.schmidt@intel.com
+ */
+static void mid_vblank_handler(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	if (dev_priv->psb_vsync_handler)
+		(*dev_priv->psb_vsync_handler)(dev, pipe);
+}
+
+#ifdef CONFIG_SUPPORT_HDMI
+/**
+ * Display controller interrupt handler for pipe hdmi audio underrun.
+ *
+ */
+void hdmi_do_audio_underrun_wq(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv = container_of(work,
+			struct drm_psb_private, hdmi_audio_underrun_wq);
+	void *had_pvt_data = dev_priv->had_pvt_data;
+	enum had_event_type event_type = HAD_EVENT_AUDIO_BUFFER_UNDERRUN;
+
+	if (dev_priv->mdfld_had_event_callbacks)
+		(*dev_priv->mdfld_had_event_callbacks)(event_type,
+							had_pvt_data);
+}
+
+/**
+ * Display controller interrupt handler for pipe hdmi audio buffer done.
+ *
+ */
+void hdmi_do_audio_bufferdone_wq(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv = container_of(work,
+			struct drm_psb_private, hdmi_audio_bufferdone_wq);
+
+	if (dev_priv->mdfld_had_event_callbacks)
+		(*dev_priv->mdfld_had_event_callbacks)
+		    (HAD_EVENT_AUDIO_BUFFER_DONE, dev_priv->had_pvt_data);
+}
+
+/**
+ * Display controller tasklet entry function for pipe hdmi audio buffer done.
+ *
+ */
+
+void hdmi_audio_bufferdone_tasklet_func(unsigned long data)
+{
+	struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
+
+	if (dev_priv->mdfld_had_event_callbacks)
+		(*dev_priv->mdfld_had_event_callbacks)
+		    (HAD_EVENT_AUDIO_BUFFER_DONE, dev_priv->had_pvt_data);
+}
+
+#endif
+
+void psb_te_timer_func(unsigned long data)
+{
+	/*
+	   struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+	   struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+	   uint32_t pipe = dev_priv->cur_pipe;
+	   drm_handle_vblank(dev, pipe);
+	   if( dev_priv->psb_vsync_handler != NULL)
+	   (*dev_priv->psb_vsync_handler)(dev,pipe);
+	 */
+}
+
+void mdfld_vsync_event_work(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(work, struct drm_psb_private, vsync_event_work);
+	int pipe = dev_priv->vsync_pipe;
+	struct drm_device *dev = dev_priv->dev;
+
+	mid_vblank_handler(dev, pipe);
+
+	/* TODO: to report vsync event to HWC. */
+	/*report vsync event*/
+	/* mdfld_vsync_event(dev, pipe); */
+}
+
+#ifdef CONFIG_SUPPORT_MIPI
+void mdfld_te_handler_work(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(work, struct drm_psb_private, te_work);
+	int pipe = dev_priv->te_pipe;
+	struct drm_device *dev = dev_priv->dev;
+	struct mdfld_dsi_config *dsi_config = NULL;
+
+	if (dev_priv->b_async_flip_enable) {
+		if (dev_priv->psb_vsync_handler != NULL)
+			(*dev_priv->psb_vsync_handler)(dev, pipe);
+
+		dsi_config = (pipe == 0) ? dev_priv->dsi_configs[0] :
+			dev_priv->dsi_configs[1];
+		mdfld_dsi_dsr_report_te(dsi_config);
+	} else {
+#ifdef CONFIG_MID_DSI_DPU
+		mdfld_dpu_update_panel(dev);
+#else
+		mdfld_dbi_update_panel(dev, pipe);
+#endif
+
+		if (dev_priv->psb_vsync_handler != NULL)
+			(*dev_priv->psb_vsync_handler)(dev, pipe);
+	}
+}
+#endif
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	uint32_t pipe_stat_val = 0;
+	uint32_t pipe_stat_reg = psb_pipestat(pipe);
+	uint32_t pipe_enable = dev_priv->pipestat[pipe];
+	uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+	unsigned long irq_flags;
+	uint32_t read_count = 0;
+
+#ifdef CONFIG_SUPPORT_MIPI
+	uint32_t i = 0;
+	uint32_t mipi_intr_stat_val = 0;
+	uint32_t mipi_intr_stat_reg = 0;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_registers *regs = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	u32 val = 0;
+#endif
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe == MDFLD_PIPE_A)
+		mipi_intr_stat_reg = MIPIA_INTR_STAT_REG;
+
+	if ((mipi_intr_stat_reg) && (is_panel_vid_or_cmd(dev) == MDFLD_DSI_ENCODER_DPI)) {
+		mipi_intr_stat_val = PSB_RVDC32(mipi_intr_stat_reg);
+		PSB_WVDC32(mipi_intr_stat_val, mipi_intr_stat_reg);
+		if (mipi_intr_stat_val & DPI_FIFO_UNDER_RUN) {
+			dev_priv->pipea_dpi_underrun_count++;
+			/* ignore the first dpi underrun after dpi panel power on */
+			if (dev_priv->pipea_dpi_underrun_count > 1)
+				DRM_INFO("Display pipe A received a DPI_FIFO_UNDER_RUN event\n");
+		}
+
+		mipi_intr_stat_reg = MIPIA_INTR_STAT_REG + MIPIC_REG_OFFSET;
+
+		mipi_intr_stat_val = PSB_RVDC32(mipi_intr_stat_reg);
+		PSB_WVDC32(mipi_intr_stat_val, mipi_intr_stat_reg);
+		if (mipi_intr_stat_val & DPI_FIFO_UNDER_RUN) {
+			dev_priv->pipec_dpi_underrun_count++;
+			/* ignore the first dpi underrun after dpi panel power on */
+			if (dev_priv->pipec_dpi_underrun_count > 1)
+				DRM_INFO("Display pipe C received a DPI_FIFO_UNDER_RUN event\n");
+		}
+	}
+#else
+	if (pipe != 1) {
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+		DRM_ERROR("called with other than HDMI PIPE %d\n", pipe);
+		return;
+	}
+#endif
+	pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+	/* Sometimes We read 0 from HW - keep reading until we get non-zero */
+	while ((!pipe_stat_val) && (read_count < 1000)) {
+		pipe_stat_val = REG_READ(pipe_stat_reg);
+		read_count ++;
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+#ifdef ENABLE_HW_REPEAT_FRAME
+	/* In the case of the repeated frame count interrupt, we need to disable
+	   the repeat_frame_count_threshold register as otherwise we keep getting
+	   an interrupt
+	*/
+	if ((pipe == MDFLD_PIPE_A) &&
+               (pipe_stat_val & PIPE_REPEATED_FRAME_STATUS)) {
+		PSB_DEBUG_PM("Frame count interrupt Before Clearing "
+				"PipeAStat Reg=0x%8x Val=0x%8x\n",
+			pipe_stat_reg, pipe_stat_val);
+		PSB_WVDC32(PIPEA_REPEAT_FRM_CNT_TRESHOLD_DISABLE,
+				PIPEA_REPEAT_FRM_CNT_TRESHOLD_REG);
+	}
+#endif
+#endif
+
+	/* clear the 2nd level interrupt status bits */
+	PSB_WVDC32(pipe_stat_val, pipe_stat_reg);
+	pipe_stat_val &= pipe_enable | pipe_status;
+	pipe_stat_val &= pipe_stat_val >> 16;
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if ((pipe_stat_val & PIPE_DPST_EVENT_STATUS) &&
+	    (dev_priv->psb_dpst_state != NULL)) {
+		uint32_t pwm_reg = 0;
+		uint32_t hist_reg = 0;
+		struct dpst_guardband guardband_reg;
+		struct dpst_ie_histogram_control ie_hist_cont_reg;
+
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+		/* Determine if this is histogram or pwm interrupt */
+		if ((hist_reg & HISTOGRAM_INT_CTRL_CLEAR) &&
+				(hist_reg & HISTOGRAM_INTERRUPT_ENABLE)) {
+			/* Notify UM of histogram interrupt */
+			psb_dpst_notify_change_um(DPST_EVENT_HIST_INTERRUPT,
+						  dev_priv->psb_dpst_state);
+
+			/* disable dpst interrupts */
+			guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+			guardband_reg.interrupt_enable = 0;
+			guardband_reg.interrupt_status = 1;
+			PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
+
+			ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+			ie_hist_cont_reg.ie_histogram_enable = 0;
+			PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
+		}
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		if ((pwm_reg & PWM_PHASEIN_INT_ENABLE) &&
+		    !(pwm_reg & PWM_PHASEIN_ENABLE)) {
+			/* Notify UM of the phase complete */
+			psb_dpst_notify_change_um(DPST_EVENT_PHASE_COMPLETE,
+						  dev_priv->psb_dpst_state);
+
+			/* Temporarily get phase mngr ready to generate
+			 * another interrupt until this can be moved to
+			 * user mode */
+			/* PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+			   PWM_CONTROL_LOGIC); */
+		}
+	}
+#endif
+
+	if (pipe_stat_val & PIPE_VBLANK_STATUS) {
+		dev_priv->vsync_pipe = pipe;
+		drm_handle_vblank(dev, pipe);
+		queue_work(dev_priv->vsync_wq, &dev_priv->vsync_event_work);
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (pipe_stat_val & PIPE_TE_STATUS) {
+		dev_priv->te_pipe = pipe;
+		update_te_counter(dev, pipe);
+		drm_handle_vblank(dev, pipe);
+		queue_work(dev_priv->vsync_wq, &dev_priv->te_work);
+	}
+
+	if (pipe == drm_psb_set_gamma_pipe && drm_psb_set_gamma_pending) {
+		dsi_config = dev_priv->dsi_configs[pipe];
+		regs = &dsi_config->regs;
+		ctx = &dsi_config->dsi_hw_context;
+
+		for (i = 0; i < 256; i++)
+			REG_WRITE(regs->palette_reg + i*4, gamma_setting_save[i] );
+
+		val = REG_READ(regs->pipeconf_reg);
+		val |= (PIPEACONF_GAMMA);
+		REG_WRITE(regs->pipeconf_reg, val);
+		ctx->pipeconf = val;
+		REG_WRITE(regs->dspcntr_reg, REG_READ(regs->dspcntr_reg) |
+				DISPPLANE_GAMMA_ENABLE);
+		ctx->dspcntr = REG_READ(regs->dspcntr_reg) | DISPPLANE_GAMMA_ENABLE;
+		REG_READ(regs->dspcntr_reg);
+		drm_psb_set_gamma_pending = 0 ;
+		drm_psb_set_gamma_pipe = MDFLD_PIPE_MAX;
+	}
+
+	if (pipe == 0) { /* only for pipe A */
+		if (pipe_stat_val & PIPE_FRAME_DONE_STATUS)
+			wake_up_interruptible(&dev_priv->eof_wait);
+	}
+
+#ifdef ENABLE_HW_REPEAT_FRAME
+	if ((pipe == MDFLD_PIPE_A) &&
+               (pipe_stat_val & PIPE_REPEATED_FRAME_STATUS)) {
+		maxfifo_report_repeat_frame_interrupt(dev);
+	}
+#endif
+#endif
+
+#ifdef CONFIG_SUPPORT_HDMI
+	if (pipe == 1) { /* HDMI is only on PIPE B */
+		if (pipe_stat_val & PIPE_HDMI_AUDIO_UNDERRUN_STATUS)
+			schedule_work(&dev_priv->hdmi_audio_underrun_wq);
+
+		if (pipe_stat_val & PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS)
+			tasklet_schedule(&dev_priv->hdmi_audio_bufferdone_tasklet);
+	}
+#endif
+}
+
+/**
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+
+	if (vdc_stat & _PSB_PIPEA_EVENT_FLAG) {
+		mid_pipe_event_handler(dev, 0);
+	}
+
+	if (vdc_stat & _MDFLD_PIPEB_EVENT_FLAG) {
+		mid_pipe_event_handler(dev, 1);
+	}
+
+	if (vdc_stat & _MDFLD_PIPEC_EVENT_FLAG) {
+		mid_pipe_event_handler(dev, 2);
+	}
+}
+
+/**
+ * Registration function for RGX irq handler
+ * When we get a RGX irq, we call the handler function
+ * We do not want RGX to register their own irq_handler
+ * since they would be getting interrupted for all
+ * gunit interrupts (display controller, video encoder,
+ * video decoder etc)
+ * This is because we just have a single PCI device for
+ * all of "graphics".
+ */
+
+void register_rgx_irq_handler(int (*pfn_rgxIrqHandler) (void *), void * pData)
+{
+	if (gpDrmDevice){
+		struct drm_psb_private *dev_priv =
+		    (struct drm_psb_private *)gpDrmDevice->dev_private;
+		dev_priv->pfn_rgxIrqHandler = pfn_rgxIrqHandler;
+		dev_priv->prgx_irqData = pData;
+	}
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+
+	uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, msvdx_int = 0;
+	uint32_t topaz_int = 0, vsp_int = 0;
+	int handled = 0;
+	unsigned long irq_flags;
+
+	/*      PSB_DEBUG_ENTRY("\n"); */
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irq_flags);
+
+	vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+	if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG) {
+		PSB_DEBUG_IRQ("Got DISP interrupt\n");
+		dsp_int = 1;
+	}
+
+	if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
+		PSB_DEBUG_IRQ("Got SGX interrupt\n");
+		sgx_int = 1;
+	}
+	if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
+		PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
+		msvdx_int = 1;
+	}
+
+	if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
+		PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
+		topaz_int = 1;
+	}
+
+	if (vdc_stat & _TNG_IRQ_VSP_FLAG) {
+		PSB_DEBUG_IRQ("Got VSP interrupt\n");
+		vsp_int = 1;
+	}
+
+	vdc_stat &= dev_priv->vdc_irq_mask;
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irq_flags);
+
+	if (dsp_int && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+		psb_vdc_interrupt(dev, vdc_stat);
+		handled = 1;
+	}
+
+	if (msvdx_int && (IS_FLDS(dev)
+			  || ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))) {
+		psb_msvdx_interrupt(dev);
+		handled = 1;
+	}
+	if ((topaz_int)) {
+		tng_topaz_interrupt(dev);
+		handled = 1;
+	}
+#ifdef SUPPORT_VSP
+	if (vsp_int) {
+		vsp_interrupt(dev);
+		handled = 1;
+		vdc_stat &= ~_TNG_IRQ_VSP_FLAG;
+	}
+#endif
+
+	if (sgx_int) {
+		if (dev_priv->pfn_rgxIrqHandler){
+			handled = dev_priv->pfn_rgxIrqHandler(
+					dev_priv->prgx_irqData) ? 1 : 0;
+		}
+	}
+
+	PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+	(void)PSB_RVDC32(PSB_INT_IDENTITY_R);
+	DRM_READMEMORYBARRIER();
+
+	if (!handled)
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+	psb_irq_preinstall_islands(dev, OSPM_ALL_ISLANDS);
+}
+
+/**
+ * FIXME: should I remove display irq enable here??
+ */
+void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+	if (dev_priv->b_dsr_enable)
+		dev_priv->exit_idle(dev, MDFLD_DSR_2D_3D, 0, true);
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND) {
+		if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+			if (dev->vblank_enabled[0])
+				dev_priv->vdc_irq_mask |= _PSB_PIPEA_EVENT_FLAG;
+			if (dev->vblank_enabled[1])
+				dev_priv->vdc_irq_mask |=
+				    _MDFLD_PIPEB_EVENT_FLAG;
+			if (dev->vblank_enabled[2])
+				dev_priv->vdc_irq_mask |=
+				    _MDFLD_PIPEC_EVENT_FLAG;
+		}
+	}
+	if (hw_islands & OSPM_GRAPHICS_ISLAND) {
+		dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
+	}
+
+	if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
+		if (IS_MID(dev) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
+			dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
+
+	if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
+		if (IS_MID(dev) && ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
+			dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
+
+	if (hw_islands & OSPM_VIDEO_VPP_ISLAND)
+		if (IS_MID(dev) && ospm_power_is_hw_on(OSPM_VIDEO_VPP_ISLAND))
+			dev_priv->vdc_irq_mask |= _TNG_IRQ_VSP_FLAG;
+
+	/*This register is safe even if display island is off*/
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+	return psb_irq_postinstall_islands(dev, OSPM_ALL_ISLANDS);
+}
+
+int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands)
+{
+
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/*This register is safe even if display island is off */
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	if (IS_MID(dev) && !dev_priv->topaz_disabled)
+		if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
+			if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+				tng_topaz_enableirq(dev);
+
+			}
+
+	if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
+		psb_msvdx_enableirq(dev);
+
+#ifdef SUPPORT_VSP
+	if (hw_islands & OSPM_VIDEO_VPP_ISLAND)
+		vsp_enableirq(dev);
+#endif
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+	psb_irq_uninstall_islands(dev, OSPM_ALL_ISLANDS);
+}
+
+void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	PSB_DEBUG_IRQ("\n");
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (hw_islands & OSPM_DISPLAY_ISLAND)
+		dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+					  _PSB_IRQ_MSVDX_FLAG |
+					  _LNC_IRQ_TOPAZ_FLAG |
+					  _TNG_IRQ_VSP_FLAG;
+
+	/*TODO: remove follwoing code */
+	if (hw_islands & OSPM_GRAPHICS_ISLAND) {
+		dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
+	}
+
+	if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && IS_MID(dev))
+		dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
+
+	if ((hw_islands & OSPM_VIDEO_ENC_ISLAND) && IS_MID(dev))
+		dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
+
+	if ((hw_islands & OSPM_VIDEO_VPP_ISLAND) && IS_MID(dev))
+		dev_priv->vdc_irq_mask &= ~_TNG_IRQ_VSP_FLAG;
+
+	/*These two registers are safe even if display island is off*/
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	wmb();
+
+	/*This register is safe even if display island is off */
+	PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+
+	if (IS_MID(dev) && !dev_priv->topaz_disabled)
+		if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
+			if (ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+				tng_topaz_disableirq(dev);
+			}
+
+	if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
+		if (ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
+			psb_msvdx_disableirq(dev);
+
+#ifdef SUPPORT_VSP
+	if (hw_islands & OSPM_VIDEO_VPP_ISLAND)
+		if (ospm_power_is_hw_on(OSPM_VIDEO_VPP_ISLAND))
+			vsp_disableirq(dev);
+#endif
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct mdfld_dsi_hw_context *ctx = NULL;
+	unsigned long irqflags;
+
+	if(!dev_priv)
+		return;
+
+	dsi_config = dev_priv->dsi_configs[0];
+	if(!dsi_config)
+		return;
+
+	ctx = &dsi_config->dsi_hw_context;
+
+	/* TODO: use DPST spinlock */
+	/* FIXME: revisit the power island when touching the DPST feature. */
+	if (power_island_get(OSPM_DISPLAY_A)) {
+
+		PSB_WVDC32(ctx->histogram_logic_ctrl, HISTOGRAM_LOGIC_CONTROL);
+		PSB_WVDC32(ctx->histogram_intr_ctrl, HISTOGRAM_INT_CONTROL);
+
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+/*
+		PSB_WVDC32(BIT31, HISTOGRAM_LOGIC_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		ctx->histogram_logic_ctrl = hist_reg;
+		PSB_WVDC32(BIT31, HISTOGRAM_INT_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		ctx->histogram_intr_ctrl = hist_reg;
+
+		PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE |
+			   PWM_PHASEIN_INT_ENABLE, PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+			   HISTOGRAM_INT_CONTROL);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+			   PWM_CONTROL_LOGIC);
+*/
+
+		power_island_put(OSPM_DISPLAY_A);
+	}
+#else
+	return;
+#endif
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+	/* enable DPST */
+	//mid_enable_pipe_event(dev_priv, 0);
+	psb_irq_turn_on_dpst(dev);
+
+	return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+#ifdef CONFIG_SUPPORT_MIPI
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	unsigned long irqflags;
+
+	if (!dev_priv)
+		return;
+	dsi_config = dev_priv->dsi_configs[0];
+	if(!dsi_config)
+		return;
+
+	/* TODO: use DPST spinlock */
+	/* FIXME: revisit the power island when touching the DPST feature. */
+	if (power_island_get(OSPM_DISPLAY_A)) {
+
+               PSB_WVDC32(PSB_RVDC32(HISTOGRAM_INT_CONTROL) & 0x7fffffff,
+			HISTOGRAM_INT_CONTROL);
+
+		spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+		psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+/*
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+			   PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+*/
+
+		power_island_put(OSPM_DISPLAY_A);
+	}
+#else
+	return;
+#endif
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+	//mid_disable_pipe_event(dev_priv, 0);
+	psb_irq_turn_off_dpst(dev);
+
+	return 0;
+}
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+			      unsigned int *sequence, atomic_t * counter)
+{
+	unsigned int cur_vblank;
+	int ret = 0;
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(counter))
+		      - *sequence) <= (1 << 23)));
+	*sequence = cur_vblank;
+
+	return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+	uint32_t reg_val = 0;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+#ifdef CONFIG_SUPPORT_MIPI
+	mdfld_dsi_encoder_t encoder_type;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	encoder_type = is_panel_vid_or_cmd(dev);
+	if (IS_MRFLD(dev) && (encoder_type == MDFLD_DSI_ENCODER_DBI) &&
+			(pipe != 1))
+		return mdfld_enable_te(dev, pipe);
+#else
+	if (pipe != 1)
+		return -EINVAL;
+#endif
+	reg_val = REG_READ(pipeconf_reg);
+
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		DRM_ERROR("%s: pipe %d is disabled %#x\n",
+			  __func__, pipe, reg_val);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_enable_pipe_event(dev_priv, pipe);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	PSB_DEBUG_ENTRY("%s: Enabled VBlank for pipe %d\n", __func__, pipe);
+
+	return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+#ifdef CONFIG_SUPPORT_MIPI
+	mdfld_dsi_encoder_t encoder_type;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	encoder_type = is_panel_vid_or_cmd(dev);
+	if (IS_MRFLD(dev) && (encoder_type == MDFLD_DSI_ENCODER_DBI) &&
+			(pipe != 1)) {
+		mdfld_disable_te(dev, pipe);
+		return;
+	}
+#else
+	if (pipe != 1)
+		return;
+#endif
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+	mid_disable_pipe_event(dev_priv, pipe);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	PSB_DEBUG_ENTRY("%s: Disabled VBlank for pipe %d\n", __func__, pipe);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	uint32_t high_frame = PIPEAFRAMEHIGH;
+	uint32_t low_frame = PIPEAFRAMEPIXEL;
+	uint32_t pipeconf_reg = PIPEACONF;
+	uint32_t reg_val = 0;
+	uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		high_frame = PIPEBFRAMEHIGH;
+		low_frame = PIPEBFRAMEPIXEL;
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		high_frame = PIPECFRAMEHIGH;
+		low_frame = PIPECFRAMEPIXEL;
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		DRM_ERROR("%s, invalded pipe.\n", __func__);
+		return 0;
+	}
+
+	reg_val = REG_READ(pipeconf_reg);
+
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n",
+			  pipe);
+		goto psb_get_vblank_counter_exit;
+	}
+
+	/* we always get 0 reading these two registers on MOFD
+	 * and reading these registers can causes UI freeze
+	 * when connected with HDMI using 640x480p / 720x480p
+	 */
+	if (IS_MOFD(dev))
+		return 0;
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+		low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+		       PIPE_FRAME_LOW_SHIFT);
+		high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+	} while (high1 != high2);
+
+	count = (high1 << 8) | low;
+
+ psb_get_vblank_counter_exit:
+
+	return count;
+}
+
+int intel_get_vblank_timestamp(struct drm_device *dev, int pipe,
+		int *max_error,
+		struct timeval *vblank_time,
+		unsigned flags)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct drm_crtc *crtc;
+
+	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
+		return -EINVAL;
+	}
+
+	/* Get drm_crtc to timestamp: */
+	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+	if (crtc == NULL) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
+		return -EINVAL;
+	}
+
+	if (!crtc->enabled) {
+		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+		return -EBUSY;
+	}
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+			vblank_time, flags,
+			crtc);
+}
+
+int intel_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+		int *vpos, int *hpos)
+{
+	u32 vbl = 0, position = 0;
+	int vbl_start, vbl_end, vtotal;
+	bool in_vbl = true;
+	int pipeconf_reg = PIPEACONF;
+	int vtot_reg = VTOTAL_A;
+	int dsl_reg = PIPEADSL;
+	int vblank_reg = VBLANK_A;
+	int ret = 0;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		pipeconf_reg = PIPEBCONF;
+		vtot_reg = VTOTAL_B;
+		dsl_reg = PIPEBDSL;
+		vblank_reg = VBLANK_B;
+		break;
+	case 2:
+		pipeconf_reg = PIPECCONF;
+		vtot_reg = VTOTAL_C;
+		dsl_reg = PIPECDSL;
+		vblank_reg = VBLANK_C;
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return 0;
+	}
+
+	if (!REG_READ(pipeconf_reg)) {
+		DRM_DEBUG_DRIVER("Failed to get scanoutpos in pipe %d\n", pipe);
+		return 0;
+	}
+
+	/* Get vtotal. */
+	vtotal = 1 + ((REG_READ(vtot_reg) >> 16) & 0x1fff);
+
+	position = REG_READ(dsl_reg);
+
+	/*
+	 * Decode into vertical scanout position. Don't have
+	 * horizontal scanout position.
+	 */
+	*vpos = position & 0x1fff;
+	*hpos = 0;
+
+	/* Query vblank area. */
+	vbl = REG_READ(vblank_reg);
+
+	/* Test position against vblank region. */
+	vbl_start = vbl & 0x1fff;
+	vbl_end = (vbl >> 16) & 0x1fff;
+
+	if ((*vpos < vbl_start) || (*vpos > vbl_end))
+		in_vbl = false;
+
+	/* Inside "upper part" of vblank area? Apply corrective offset: */
+	if (in_vbl && (*vpos >= vbl_start))
+		*vpos = *vpos - vtotal;
+
+	/* Readouts valid? */
+	if (vbl > 0)
+		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_INVBL;
+
+	return ret;
+}
+
+/*
+ * It is used to enable TE interrupt
+ */
+int mdfld_enable_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+	uint32_t retry = 0;
+
+	while ((REG_READ(pipeconf_reg) & PIPEACONF_ENABLE) == 0) {
+		retry++;
+		if (retry > 10) {
+			DRM_ERROR("%s: pipe %d is disabled\n", __func__, pipe);
+			return -EINVAL;
+		}
+		udelay(3);
+	}
+	if (retry != 0)
+		DRM_INFO("Take %d retries to get pipe %d config register\n", retry, pipe);
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_enable_pipe_event(dev_priv, pipe);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_FRAME_DONE_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	PSB_DEBUG_ENTRY("%s: Enabled TE for pipe %d\n", __func__, pipe);
+
+	return 0;
+}
+
+/*
+ * It is used to recover TE interrupt in case pysical stat mismatch with logical stat
+ */
+int mdfld_recover_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+	uint32_t reg_val = 0;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+	reg_val = REG_READ(pipeconf_reg);
+
+	if (!(reg_val & PIPEACONF_ENABLE))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	psb_recover_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * It is used to disable TE interrupt
+ */
+void mdfld_disable_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_disable_pipe_event(dev_priv, pipe);
+	psb_disable_pipestat(dev_priv, pipe, PIPE_FRAME_DONE_ENABLE);
+	psb_disable_pipestat(dev_priv, pipe, 
+		(PIPE_TE_ENABLE | PIPE_DPST_EVENT_ENABLE));
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	PSB_DEBUG_ENTRY("%s: Disabled TE for pipe %d\n", __func__, pipe);
+}
+
+#ifdef ENABLE_HW_REPEAT_FRAME
+int mrfl_enable_repeat_frame_intr(struct drm_device *dev, int idle_frame_count)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/* Disable first to restart the count. This is for the
+	 * scenario that we are changing the treshold count and the
+	 * new count is lower than the new count - we don't want
+	 * interrupt immediately
+	 */
+	PSB_WVDC32(PIPEA_CALCULATE_CRC_DISABLE, PIPEA_CALCULATE_CRC_REG);
+	PSB_WVDC32(PIPEA_REPEAT_FRM_CNT_TRESHOLD_DISABLE,
+			PIPEA_REPEAT_FRM_CNT_TRESHOLD_REG);
+	/*Enable the CRC calculation*/
+	PSB_WVDC32(PIPEA_CALCULATE_CRC_ENABLE, PIPEA_CALCULATE_CRC_REG);
+
+	mid_enable_pipe_event(dev_priv, MDFLD_PIPE_A);
+	/*Enable receiving the interrupt through pipestat register*/
+	psb_enable_pipestat(dev_priv, MDFLD_PIPE_A, PIPE_REPEATED_FRAME_ENABLE);
+
+	PSB_WVDC32(PIPEA_REPEAT_FRM_CNT_TRESHOLD_ENABLE | idle_frame_count,
+		PIPEA_REPEAT_FRM_CNT_TRESHOLD_REG);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	PSB_DEBUG_PM("Enabled Repeat Frame Interrupt\n");
+	return 0;
+}
+
+void mrfl_disable_repeat_frame_intr(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	PSB_WVDC32(PIPEA_CALCULATE_CRC_DISABLE, PIPEA_CALCULATE_CRC_REG);
+	PSB_WVDC32(PIPEA_REPEAT_FRM_CNT_TRESHOLD_DISABLE,
+			PIPEA_REPEAT_FRM_CNT_TRESHOLD_REG);
+	psb_disable_pipestat(dev_priv, MDFLD_PIPE_A, PIPE_REPEATED_FRAME_ENABLE);
+	PSB_WVDC32(PIPEA_CALCULATE_CRC_DISABLE, PIPEA_CALCULATE_CRC_REG);
+
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	PSB_DEBUG_PM("Disabled Repeat Frame Interrupt\n");
+}
+#endif
+
+int mid_irq_enable_hdmi_audio(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+	u32 reg_val = 0, mask = 0;
+
+	reg_val = REG_READ(PIPEBCONF);
+	if (!(reg_val & PIPEACONF_ENABLE))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/* enable HDMI audio interrupt */
+	mid_enable_pipe_event(dev_priv, 1);
+	dev_priv->pipestat[1] &= ~PIPE_HDMI_AUDIO_INT_MASK;
+	mask = dev_priv->hdmi_audio_interrupt_mask;
+	psb_enable_pipestat(dev_priv, 1, mask);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	return 0;
+}
+
+int mid_irq_disable_hdmi_audio(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *)dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_disable_pipe_event(dev_priv, 1);
+	psb_disable_pipestat(dev_priv, 1, PIPE_HDMI_AUDIO_INT_MASK);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	return 0;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_irq.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_irq.h
new file mode 100644
index 0000000..d84e718
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_irq.h
@@ -0,0 +1,54 @@
+/**************************************************************************
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _SYSIRQ_H_
+#define _SYSIRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
+int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
+void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
+extern int intel_get_vblank_timestamp(struct drm_device *dev, int pipe,
+		int *max_error, struct timeval *vblank_time, unsigned flags);
+extern int intel_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+		int *vpos, int *hpos);
+
+void register_rgx_irq_handler(int (*pfn_rgxIrqHandler) (void *), void * pData);
+#endif				//_SYSIRQ_H_
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_powermgmt.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_powermgmt.h
new file mode 100644
index 0000000..fa53d18
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_powermgmt.h
@@ -0,0 +1,30 @@
+/**************************************************************************
+ * Copyright (c) 2009, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include "ospm/pwr_mgmt.h"
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_reg.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_reg.h
new file mode 100644
index 0000000..e62b168
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_reg.h
@@ -0,0 +1,576 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL                0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG   (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK  (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK  (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT  (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK   (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK  (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK  (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT  (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK   (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED   (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED  (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO      (2)
+
+#define PSB_CR_CORE_ID                   0x0010
+#define _PSB_CC_ID_ID_SHIFT              (16)
+#define _PSB_CC_ID_ID_MASK               (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT          (0)
+#define _PSB_CC_ID_CONFIG_MASK           (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION               0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT    (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK     (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT       (16)
+#define _PSB_CC_REVISION_MAJOR_MASK        (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT       (8)
+#define _PSB_CC_REVISION_MINOR_MASK        (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK  (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1       0x0018
+
+#define PSB_CR_SOFT_RESET                0x0080
+#define _PSB_CS_RESET_TSP_RESET          (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET          (1 << 5)
+#define _PSB_CS_RESET_USE_RESET          (1 << 4)
+#define _PSB_CS_RESET_TA_RESET           (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET          (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET         (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET          (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2       0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2        0x0110
+
+#define PSB_CR_EVENT_STATUS2             0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2         0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT     (1 << 4)
+
+#define PSB_CR_EVENT_STATUS              0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE         0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR          0x0134
+#define _PSB_CE_MASTER_INTERRUPT         (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT             (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE            (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS    (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE          (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER       (1 << 18)
+#define _PSB_CE_SW_EVENT                 (1 << 14)
+#define _PSB_CE_TA_FINISHED              (1 << 13)
+#define _PSB_CE_TA_TERMINATE             (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH   (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL    (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT     (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE          (1 << 0)
+
+#define PSB_USE_OFFSET_MASK              0x0007FFFF
+#define PSB_USE_OFFSET_SIZE              (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0            0x0A0C
+#define PSB_CR_USE_CODE_BASE1            0x0A10
+#define PSB_CR_USE_CODE_BASE2            0x0A14
+#define PSB_CR_USE_CODE_BASE3            0x0A18
+#define PSB_CR_USE_CODE_BASE4            0x0A1C
+#define PSB_CR_USE_CODE_BASE5            0x0A20
+#define PSB_CR_USE_CODE_BASE6            0x0A24
+#define PSB_CR_USE_CODE_BASE7            0x0A28
+#define PSB_CR_USE_CODE_BASE8            0x0A2C
+#define PSB_CR_USE_CODE_BASE9            0x0A30
+#define PSB_CR_USE_CODE_BASE10           0x0A34
+#define PSB_CR_USE_CODE_BASE11           0x0A38
+#define PSB_CR_USE_CODE_BASE12           0x0A3C
+#define PSB_CR_USE_CODE_BASE13           0x0A40
+#define PSB_CR_USE_CODE_BASE14           0x0A44
+#define PSB_CR_USE_CODE_BASE15           0x0A48
+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT           (25)
+#define _PSB_CUC_BASE_DM_MASK            (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT         (0)	/* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT    (7)
+#define _PSB_CUC_BASE_ADDR_MASK          (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX		 (0)
+#define _PSB_CUC_DM_PIXEL		 (1)
+#define _PSB_CUC_DM_RESERVED		 (2)
+#define _PSB_CUC_DM_EDM	                 (3)
+
+#define PSB_CR_PDS_EXEC_BASE             0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20)	/* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
+
+#define PSB_CR_EVENT_KICKER              0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT     (4)	/* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK                0x0AC8
+#define _PSB_CE_KICK_NOW                 (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1        0x0C38
+
+#define PSB_CR_BIF_CTRL                  0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT         (1 << 4)
+#define _PSB_CB_CTRL_INVALDC             (1 << 3)
+#define _PSB_CB_CTRL_FLUSH               (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT              0x0C04
+
+#define PSB_CR_BIF_FAULT                 0x0C08
+#define _PSB_CBI_STAT_PF_N_RW            (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT        (0)
+#define _PSB_CBI_STAT_FAULT_MASK         (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE        (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA           (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM          (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D           (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE          (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP          (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP          (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS      (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST         (1 << 9)
+
+#define PSB_CR_BIF_BANK0                 0x0C78
+
+#define PSB_CR_BIF_BANK1                 0x0C7C
+
+#define PSB_CR_BIF_DIR_LIST_BASE0        0x0C84
+
+#define PSB_CR_BIF_TWOD_REQ_BASE         0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE           0x0CAC
+
+#define PSB_CR_2D_SOCIF                  0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT    (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK     (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY              (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS            0x0E04
+#define _PSB_C2B_STATUS_BUSY             (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT   (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK    (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define	PSB_2D_CLIP_BH                   (0x00000000)
+#define	PSB_2D_PAT_BH                    (0x10000000)
+#define	PSB_2D_CTRL_BH                   (0x20000000)
+#define	PSB_2D_SRC_OFF_BH                (0x30000000)
+#define	PSB_2D_MASK_OFF_BH               (0x40000000)
+#define	PSB_2D_RESERVED1_BH              (0x50000000)
+#define	PSB_2D_RESERVED2_BH              (0x60000000)
+#define	PSB_2D_FENCE_BH                  (0x70000000)
+#define	PSB_2D_BLIT_BH                   (0x80000000)
+#define	PSB_2D_SRC_SURF_BH               (0x90000000)
+#define	PSB_2D_DST_SURF_BH               (0xA0000000)
+#define	PSB_2D_PAT_SURF_BH               (0xB0000000)
+#define	PSB_2D_SRC_PAL_BH                (0xC0000000)
+#define	PSB_2D_PAT_PAL_BH                (0xD0000000)
+#define	PSB_2D_MASK_SURF_BH              (0xE0000000)
+#define	PSB_2D_FLUSH_BH                  (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX             (1)
+#define PSB_2D_CLIPCOUNT_MASK            (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK         (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT           (0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK            (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK         (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT           (12)
+#define PSB_2D_CLIP_XMIN_MASK            (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK         (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT           (0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK            (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK         (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT           (12)
+#define PSB_2D_CLIP_YMIN_MASK            (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK         (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT           (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK           (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT          (0)
+#define PSB_2D_PAT_WIDTH_MASK            (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT           (5)
+#define PSB_2D_PAT_YSTART_MASK           (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT          (10)
+#define PSB_2D_PAT_XSTART_MASK           (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT          (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL                (0x00000001)
+#define PSB_2D_DSTCK_CTRL                (0x00000002)
+#define PSB_2D_ALPHA_CTRL                (0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK               (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK            (0x00000000)
+#define PSB_2D_CK_COL_SHIFT              (0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK              (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK           (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT             (0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK             (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK          (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT            (12)
+#define PSB_2D_SRCALPHA_OP_MASK          (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK       (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT         (20)
+#define PSB_2D_SRCALPHA_OP_ONE           (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC           (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST           (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG            (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG            (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL           (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO          (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT           (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR       (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK          (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK       (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT         (24)
+#define PSB_2D_DSTALPHA_OP_ONE           (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC           (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST           (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG            (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG            (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL           (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO          (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT           (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR       (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE  (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE   (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK  (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK        ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT       (12)
+#define PSB_2D_SRCOFF_YSTART_MASK        (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT       (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK       ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT      (12)
+#define PSB_2D_MASKOFF_YSTART_MASK       (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT      (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK                  (3<<25)
+#define PSB_2D_ROT_CLRMASK               (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE                  (0<<25)
+#define PSB_2D_ROT_90DEGS                (1<<25)
+#define PSB_2D_ROT_180DEGS               (2<<25)
+#define PSB_2D_ROT_270DEGS               (3<<25)
+
+#define PSB_2D_COPYORDER_MASK            (3<<23)
+#define PSB_2D_COPYORDER_CLRMASK         (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR           (0<<23)
+#define PSB_2D_COPYORDER_BR2TL           (1<<23)
+#define PSB_2D_COPYORDER_TR2BL           (2<<23)
+#define PSB_2D_COPYORDER_BL2TR           (3<<23)
+
+#define PSB_2D_DSTCK_CLRMASK             (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE             (0x00000000)
+#define PSB_2D_DSTCK_PASS                (0x00200000)
+#define PSB_2D_DSTCK_REJECT              (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK             (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE             (0x00000000)
+#define PSB_2D_SRCCK_PASS                (0x00080000)
+#define PSB_2D_SRCCK_REJECT              (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE               (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE              (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK               (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK                  (0x00010000)
+#define PSB_2D_USE_PAT                   (0x00010000)
+#define PSB_2D_USE_FILL                  (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK                (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK             (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT               (8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK                (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK             (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT               (0)
+
+#define PSB_2D_ROP4_MASK                 (0x0000FFFF)
+/*
+ *	DWORD0:	(Only pass if Pattern control == Use Fill Colour)
+ *	Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK           (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT          (0)
+/*
+ *	DWORD1: (Always Present)
+ *	X Start (Dest)
+ *	Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK           (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK        (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT          (12)
+#define PSB_2D_DST_YSTART_MASK           (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK        (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT          (0)
+/*
+ *	DWORD2: (Always Present)
+ *	X Size (Dest)
+ *	Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK            (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK         (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT           (12)
+#define PSB_2D_DST_YSIZE_MASK            (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK         (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT           (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ *      WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK           (0x00078000)
+#define PSB_2D_SRC_1_PAL                 (0x00000000)
+#define PSB_2D_SRC_2_PAL                 (0x00008000)
+#define PSB_2D_SRC_4_PAL                 (0x00010000)
+#define PSB_2D_SRC_8_PAL                 (0x00018000)
+#define PSB_2D_SRC_8_ALPHA               (0x00020000)
+#define PSB_2D_SRC_4_ALPHA               (0x00028000)
+#define PSB_2D_SRC_332RGB                (0x00030000)
+#define PSB_2D_SRC_4444ARGB              (0x00038000)
+#define PSB_2D_SRC_555RGB                (0x00040000)
+#define PSB_2D_SRC_1555ARGB              (0x00048000)
+#define PSB_2D_SRC_565RGB                (0x00050000)
+#define PSB_2D_SRC_0888ARGB              (0x00058000)
+#define PSB_2D_SRC_8888ARGB              (0x00060000)
+#define PSB_2D_SRC_8888UYVY              (0x00068000)
+#define PSB_2D_SRC_RESERVED              (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP       (0x00078000)
+
+#define PSB_2D_SRC_STRIDE_MASK           (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK        (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT          (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK             (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK          (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT            (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT       (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ *  WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK           (0x00078000)
+#define PSB_2D_PAT_1_PAL                 (0x00000000)
+#define PSB_2D_PAT_2_PAL                 (0x00008000)
+#define PSB_2D_PAT_4_PAL                 (0x00010000)
+#define PSB_2D_PAT_8_PAL                 (0x00018000)
+#define PSB_2D_PAT_8_ALPHA               (0x00020000)
+#define PSB_2D_PAT_4_ALPHA               (0x00028000)
+#define PSB_2D_PAT_332RGB                (0x00030000)
+#define PSB_2D_PAT_4444ARGB              (0x00038000)
+#define PSB_2D_PAT_555RGB                (0x00040000)
+#define PSB_2D_PAT_1555ARGB              (0x00048000)
+#define PSB_2D_PAT_565RGB                (0x00050000)
+#define PSB_2D_PAT_0888ARGB              (0x00058000)
+#define PSB_2D_PAT_8888ARGB              (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK           (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK        (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT          (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK             (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK          (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT            (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT       (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK           (0x00078000)
+#define PSB_2D_DST_332RGB                (0x00030000)
+#define PSB_2D_DST_4444ARGB              (0x00038000)
+#define PSB_2D_DST_555RGB                (0x00040000)
+#define PSB_2D_DST_1555ARGB              (0x00048000)
+#define PSB_2D_DST_565RGB                (0x00050000)
+#define PSB_2D_DST_0888ARGB              (0x00058000)
+#define PSB_2D_DST_8888ARGB              (0x00060000)
+#define PSB_2D_DST_8888AYUV              (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK           (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK        (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT          (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK             (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK          (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT            (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT       (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK          (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK       (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT         (0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK            (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK         (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT           (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT      (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT         (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK       (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK          (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN          (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT         (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK       (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK          (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN          (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY              (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY              (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS            (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS            (0x0000)
+#define PSB_2D_ROP3_SRC                  (0xCC)
+#define PSB_2D_ROP3_PAT                  (0xF0)
+#define PSB_2D_ROP3_DST                  (0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE 16
+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES          2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK 0
+#define PSB_RASTER       1
+#define PSB_RETURN       2
+#define PSB_TA       3
+
+#define PNW_IMR_ADDRESS_MASK 0x00fffffcu
+#define PNW_IMR_ADDRESS_SHIFT 8
+#define PNW_IMR_MSG_PORT      3
+#define PNW_IMR3L_MSG_REGADDR 0x4C
+#define PNW_IMR3H_MSG_REGADDR 0x4D
+#define PNW_IMR4L_MSG_REGADDR 0x50
+#define PNW_IMR4H_MSG_REGADDR 0x51
+#define TNG_IMR11L_MSG_REGADDR 0xAC
+#define TNG_IMR11H_MSG_REGADDR 0xAD
+#define TNG_IMR11RAC_MSG_REGADDR 0xAE
+
+#define TNG_IMR_ADDRESS_MASK 0x03ffffff
+#define TNG_IMR_ADDRESS_SHIFT 10
+#define TNG_IMR_MSG_PORT      3
+#define TNG_IMR_MSG_REGBASE   0x80
+#define TNG_IMR5L_MSG_REGADDR 0x94
+#define TNG_IMR5H_MSG_REGADDR 0x95
+
+#define TNG_IMR6L_MSG_REGADDR 0x98
+#define TNG_IMR6H_MSG_REGADDR 0x99
+#define TNG_IMR6_RAC_MSG_REGADDR 0x9a
+#define TNG_IMR6_WAC_MSG_REGADDR 0x9b
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_socket.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_socket.c
new file mode 100644
index 0000000..194a7d0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_socket.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Copyright (C) 2004 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2004 Novell, Inc.  All rights reserved.
+ * Copyright (C) 2004 IBM, Inc. All rights reserved.
+ * Copyright (C) 2009 Intel Corporation.  All rights reserved.
+ *
+ * Licensed under the GNU GPL v2.
+ *
+ * Authors:
+ *	Robert Love		<rml@novell.com>
+ *	Kay Sievers		<kay.sievers@vrfy.org>
+ *	Arjan van de Ven	<arjanv@redhat.com>
+ *	Greg Kroah-Hartman	<greg@kroah.com>
+ *
+ * Notes:
+ *      Adapted from existing kobj event socket code to enable
+ *      mutlicast usermode communication for gfx driver to mutiple
+ *      usermode threads via different socket broadcast groups.
+ *      Original kobject uevent code does not allow for different
+ *      broadcast groups.  Due to the frequency of usermode events
+ *      generated by some gfx subsystems it is necessary to open
+ *      a new dedicated socket with multicast group support.  In
+ *      the future it is hoped that this code can be removed
+ *      and either a new netlink protocol type added for graphics
+ *      or conversely to simply enable group routing to be leveraged
+ *      on the existing kobject uevent infrastructure.
+ */
+
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <net/sock.h>
+#include "psb_umevents.h"
+
+#define NETLINK_PSB_KOBJECT_UEVENT	31
+
+u64 psb_uevent_seqnum;
+char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
+static DEFINE_SPINLOCK(sequence_lock);
+#if defined(CONFIG_NET)
+static struct sock *uevent_sock;
+#endif
+
+/* the strings here must match the enum in include/linux/kobject.h */
+static const char *psb_kobject_actions[] = {
+	[KOBJ_ADD] = "add",
+	[KOBJ_REMOVE] = "remove",
+	[KOBJ_CHANGE] = "change",
+	[KOBJ_MOVE] = "move",
+	[KOBJ_ONLINE] = "online",
+	[KOBJ_OFFLINE] = "offline",
+};
+
+/**
+ * kobject_action_type - translate action string to numeric type
+ *
+ * @buf: buffer containing the action string, newline is ignored
+ * @len: length of buffer
+ * @type: pointer to the location to store the action type
+ *
+ * Returns 0 if the action string was recognized.
+ */
+int psb_kobject_action_type(const char *buf, size_t count,
+			    enum kobject_action *type)
+{
+	enum kobject_action action;
+	int ret = -EINVAL;
+
+	if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0'))
+		count--;
+
+	if (!count)
+		goto out;
+
+	for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) {
+		if (strncmp(psb_kobject_actions[action], buf, count) != 0)
+			continue;
+		if (psb_kobject_actions[action][count] != '\0')
+			continue;
+		*type = action;
+		ret = 0;
+		break;
+	}
+ out:
+	return ret;
+}
+
+/**
+ * psb_kobject_uevent_env - send an uevent with environmental data
+ *
+ * @action: action that is happening
+ * @kobj: struct kobject that the action is happening to
+ * @envp_ext: pointer to environmental data
+ *
+ * Returns 0 if kobject_uevent() is completed with success or the
+ * corresponding error when it fails.
+ */
+int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+			   char *envp_ext[], int dst_group_id)
+{
+	struct kobj_uevent_env *env;
+	const char *action_string = psb_kobject_actions[action];
+	const char *devpath = NULL;
+	const char *subsystem;
+	struct kobject *top_kobj;
+	struct kset *kset;
+	const struct kset_uevent_ops *uevent_ops;
+	u64 seq;
+	int i = 0;
+	int retval = 0;
+
+	pr_debug("kobject: '%s' (%p): %s\n",
+		 kobject_name(kobj), kobj, __func__);
+
+	/* search the kset we belong to */
+	top_kobj = kobj;
+	while (!top_kobj->kset && top_kobj->parent)
+		top_kobj = top_kobj->parent;
+
+	if (!top_kobj->kset) {
+		pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
+			 "without kset!\n", kobject_name(kobj), kobj, __func__);
+		return -EINVAL;
+	}
+
+	kset = top_kobj->kset;
+	uevent_ops = (const struct kset_uevent_ops *)kset->uevent_ops;
+
+	/* skip the event, if uevent_suppress is set */
+	if (kobj->uevent_suppress) {
+		pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
+			 "caused the event to drop!\n",
+			 kobject_name(kobj), kobj, __func__);
+		return 0;
+	}
+	/* skip the event, if the filter returns zero. */
+	if (uevent_ops && uevent_ops->filter)
+		if (!uevent_ops->filter(kset, kobj)) {
+			pr_debug("kobject: '%s' (%p): %s: filter function "
+				 "caused the event to drop!\n",
+				 kobject_name(kobj), kobj, __func__);
+			return 0;
+		}
+
+	/* originating subsystem */
+	if (uevent_ops && uevent_ops->name)
+		subsystem = uevent_ops->name(kset, kobj);
+	else
+		subsystem = kobject_name(&kset->kobj);
+	if (!subsystem) {
+		pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
+			 "event to drop!\n", kobject_name(kobj), kobj,
+			 __func__);
+		return 0;
+	}
+
+	/* environment buffer */
+	env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+	if (!env)
+		return -ENOMEM;
+
+	/* complete object path */
+	devpath = kobject_get_path(kobj, GFP_KERNEL);
+	if (!devpath) {
+		retval = -ENOENT;
+		goto exit;
+	}
+
+	/* default keys */
+	retval = add_uevent_var(env, "ACTION=%s", action_string);
+	if (retval)
+		goto exit;
+	retval = add_uevent_var(env, "DEVPATH=%s", devpath);
+	if (retval)
+		goto exit;
+	retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
+	if (retval)
+		goto exit;
+
+	/* keys passed in from the caller */
+	if (envp_ext) {
+		for (i = 0; envp_ext[i]; i++) {
+			retval = add_uevent_var(env, "%s", envp_ext[i]);
+			if (retval)
+				goto exit;
+		}
+	}
+
+	/* let the kset specific function add its stuff */
+	if (uevent_ops && uevent_ops->uevent) {
+		retval = uevent_ops->uevent(kset, kobj, env);
+		if (retval) {
+			pr_debug("kobject: '%s' (%p): %s: uevent() returned "
+				 "%d\n", kobject_name(kobj), kobj,
+				 __func__, retval);
+			goto exit;
+		}
+	}
+
+	/*
+	 * Mark "add" and "remove" events in the object to ensure proper
+	 * events to userspace during automatic cleanup. If the object did
+	 * send an "add" event, "remove" will automatically generated by
+	 * the core, if not already done by the caller.
+	 */
+	if (action == KOBJ_ADD)
+		kobj->state_add_uevent_sent = 1;
+	else if (action == KOBJ_REMOVE)
+		kobj->state_remove_uevent_sent = 1;
+
+	/* we will send an event, so request a new sequence number */
+	spin_lock(&sequence_lock);
+	seq = ++psb_uevent_seqnum;
+	spin_unlock(&sequence_lock);
+	retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
+	if (retval)
+		goto exit;
+
+#if defined(CONFIG_NET)
+	/* send netlink message */
+	if (uevent_sock) {
+		struct sk_buff *skb;
+		size_t len;
+
+		/* allocate message with the maximum possible size */
+		len = strlen(action_string) + strlen(devpath) + 2;
+		skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+		if (skb) {
+			char *scratch;
+
+			/* add header */
+			scratch = skb_put(skb, len);
+			sprintf(scratch, "%s@%s", action_string, devpath);
+
+			/* copy keys to our continuous event payload buffer */
+			for (i = 0; i < env->envp_idx; i++) {
+				len = strlen(env->envp[i]) + 1;
+				scratch = skb_put(skb, len);
+				strcpy(scratch, env->envp[i]);
+			}
+
+			NETLINK_CB(skb).dst_group = dst_group_id;
+			retval = netlink_broadcast(uevent_sock, skb, 0,
+						   dst_group_id, GFP_KERNEL);
+
+			/* ENOBUFS should be handled in userspace */
+			if (retval == -ENOBUFS)
+				retval = 0;
+		} else
+			retval = -ENOMEM;
+	}
+#endif
+
+	/* call psb_uevent_helper, usually only enabled during early boot */
+	if (psb_uevent_helper[0]) {
+		char *argv[3];
+
+		argv[0] = psb_uevent_helper;
+		argv[1] = (char *)subsystem;
+		argv[2] = NULL;
+		retval = add_uevent_var(env, "HOME=/");
+		if (retval)
+			goto exit;
+		retval = add_uevent_var(env,
+					"PATH=/sbin:/bin:/usr/sbin:/usr/bin");
+		if (retval)
+			goto exit;
+
+		retval = call_usermodehelper(argv[0], argv,
+					     env->envp, UMH_WAIT_EXEC);
+	}
+
+ exit:
+	kfree(devpath);
+	kfree(env);
+	return retval;
+}
+
+/*EXPORT_SYMBOL_GPL(psb_kobject_uevent_env); */
+
+/**
+ * psb_kobject_uevent - notify userspace by ending an uevent
+ *
+ * @action: action that is happening
+ * @kobj: struct kobject that the action is happening to
+ *
+ * Returns 0 if psb_kobject_uevent() is completed with success or the
+ * corresponding error when it fails.
+ */
+int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action,
+		       int dst_group_id)
+{
+	return psb_kobject_uevent_env(kobj, action, NULL, dst_group_id);
+}
+
+/*EXPORT_SYMBOL_GPL(psb_kobject_uevent); */
+
+/**
+ * psb_add_uevent_var - add key value string to the environment buffer
+ * @env: environment buffer structure
+ * @format: printf format for the key=value pair
+ *
+ * Returns 0 if environment variable was added successfully or -ENOMEM
+ * if no space was available.
+ */
+int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+{
+	va_list args;
+	int len;
+
+	if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
+		WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n");
+		return -ENOMEM;
+	}
+
+	va_start(args, format);
+	len = vsnprintf(&env->buf[env->buflen],
+			sizeof(env->buf) - env->buflen, format, args);
+	va_end(args);
+
+	if (len >= (sizeof(env->buf) - env->buflen)) {
+		WARN(1, KERN_ERR "psb_add_uevent_var: buffer size too small\n");
+		return -ENOMEM;
+	}
+
+	env->envp[env->envp_idx++] = &env->buf[env->buflen];
+	env->buflen += len + 1;
+	return 0;
+}
+
+/*EXPORT_SYMBOL_GPL(psb_add_uevent_var);*/
+
+#if defined(CONFIG_NET)
+int __init psb_kobject_uevent_init(void)
+{
+	/* This should be the 15, but 3 seems to work better.  Why? WHY!? */
+	/* uevent_sock = netlink_kernel_create(&init_net,
+	   NETLINK_PSB_KOBJECT_UEVENT,
+	   DRM_GFX_SOCKET_GROUPS,
+	   NULL, NULL, THIS_MODULE); */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+	uevent_sock = netlink_kernel_create(&init_net, NETLINK_PSB_KOBJECT_UEVENT, 0x3,	/* 3 is for hotplug & dpst */
+					    NULL, NULL, THIS_MODULE);
+#else
+	struct netlink_kernel_cfg netlnk_cfg;
+	memset(&netlnk_cfg, 0, sizeof(struct netlink_kernel_cfg));
+	netlnk_cfg.groups = 0x3;
+	netlnk_cfg.flags = NL_CFG_F_NONROOT_RECV;
+	uevent_sock = netlink_kernel_create(&init_net, NETLINK_PSB_KOBJECT_UEVENT,
+						&netlnk_cfg);
+#endif
+	if (!uevent_sock) {
+		printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n");
+		return -ENODEV;
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+	netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV);
+#endif
+	return 0;
+}
+
+#ifndef MODULE
+postcore_initcall(psb_kobject_uevent_init);
+#endif
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_umevents.c b/drivers/external_drivers/intel_media/display/tng/drv/psb_umevents.c
new file mode 100644
index 0000000..f5d5f30
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_umevents.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    James C. Gualario <james.c.gualario@intel.com>
+ *
+ */
+#include "psb_umevents.h"
+/**
+ * define sysfs operations supported by umevent objects.
+ *
+ */
+static const struct sysfs_ops umevent_obj_sysfs_ops = {
+	.show = psb_umevent_attr_show,
+	.store = psb_umevent_attr_store,
+};
+
+/**
+ * define the data attributes we will expose through sysfs.
+ *
+ */
+static struct umevent_attribute data_0 =
+__ATTR(data_0_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_1 =
+__ATTR(data_1_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_2 =
+__ATTR(data_2_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_3 =
+__ATTR(data_3_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_4 =
+__ATTR(data_4_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_5 =
+__ATTR(data_5_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_6 =
+__ATTR(data_6_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+static struct umevent_attribute data_7 =
+__ATTR(data_7_val, 0664, psb_umevent_attr_show_imp,
+       psb_umevent_attr_store_imp);
+/**
+ * define the structure used to seed our ktype.
+ *
+ */
+static struct attribute *umevent_obj_default_attrs[] = {
+	&data_0.attr,
+	&data_1.attr,
+	&data_2.attr,
+	&data_3.attr,
+	&data_4.attr,
+	&data_5.attr,
+	&data_6.attr,
+	&data_7.attr,
+	NULL,			/* need to NULL terminate the list of attributes */
+};
+
+/**
+ * specify the ktype for our kobjects.
+ *
+ */
+static struct kobj_type umevent_obj_ktype = {
+	.sysfs_ops = &umevent_obj_sysfs_ops,
+	.release = psb_umevent_obj_release,
+	.default_attrs = umevent_obj_default_attrs,
+};
+
+/**
+ * psb_umevent_attr_show - default kobject show function
+ *
+ * @kobj: kobject associated with the show operation
+ * @attr: attribute being requested
+ * @buf: pointer to the return buffer
+ *
+ */
+ssize_t psb_umevent_attr_show(struct kobject *kobj,
+			      struct attribute *attr, char *buf)
+{
+	struct umevent_attribute *attribute;
+	struct umevent_obj *any_umevent_obj;
+	attribute = to_umevent_attr(attr);
+	any_umevent_obj = to_umevent_obj(kobj);
+	if (!attribute->show)
+		return -EIO;
+
+	return attribute->show(any_umevent_obj, attribute, buf);
+}
+
+/**
+ * psb_umevent_attr_store - default kobject store function
+ *
+ * @kobj: kobject associated with the store operation
+ * @attr: attribute being requested
+ * @buf: input data to write to attribute
+ * @len: character count
+ *
+ */
+ssize_t psb_umevent_attr_store(struct kobject * kobj,
+			       struct attribute * attr,
+			       const char *buf, size_t len)
+{
+	struct umevent_attribute *attribute;
+	struct umevent_obj *any_umevent_obj;
+	attribute = to_umevent_attr(attr);
+	any_umevent_obj = to_umevent_obj(kobj);
+	if (!attribute->store)
+		return -EIO;
+
+	return attribute->store(any_umevent_obj, attribute, buf, len);
+}
+
+/**
+ * psb_umevent_obj_release - kobject release funtion
+ *
+ * @kobj: kobject to be released.
+ */
+void psb_umevent_obj_release(struct kobject *kobj)
+{
+	struct umevent_obj *any_umevent_obj;
+	any_umevent_obj = to_umevent_obj(kobj);
+	kfree(any_umevent_obj);
+}
+
+/**
+ *  psb_umevent_attr_show_imp - attribute show implementation
+ *
+ * @any_umevent_obj: kobject managed data to read from
+ * @attr: attribute being requested
+ * @buf: pointer to the return buffer
+ *
+ */
+ssize_t psb_umevent_attr_show_imp(struct umevent_obj
+				  *any_umevent_obj,
+				  struct umevent_attribute *attr, char *buf)
+{
+	int var;
+
+	if (strcmp(attr->attr.name, "data_0_val") == 0)
+		var = any_umevent_obj->data_0_val;
+	else if (strcmp(attr->attr.name, "data_1_val") == 0)
+		var = any_umevent_obj->data_1_val;
+	else if (strcmp(attr->attr.name, "data_2_val") == 0)
+		var = any_umevent_obj->data_2_val;
+	else if (strcmp(attr->attr.name, "data_3_val") == 0)
+		var = any_umevent_obj->data_3_val;
+	else if (strcmp(attr->attr.name, "data_4_val") == 0)
+		var = any_umevent_obj->data_4_val;
+	else if (strcmp(attr->attr.name, "data_5_val") == 0)
+		var = any_umevent_obj->data_5_val;
+	else if (strcmp(attr->attr.name, "data_6_val") == 0)
+		var = any_umevent_obj->data_6_val;
+	else
+		var = any_umevent_obj->data_7_val;
+
+	return sprintf(buf, "%d\n", var);
+}
+
+/**
+ * psb_umevent_attr_store_imp - attribute store implementation
+ *
+ * @any_umevent_obj: kobject managed data to write to
+ * @attr: attribute being requested
+ * @buf: input data to write to attribute
+ * @count: character count
+ *
+ */
+ssize_t psb_umevent_attr_store_imp(struct umevent_obj
+				   * any_umevent_obj,
+				   struct umevent_attribute * attr,
+				   const char *buf, size_t count)
+{
+	int var;
+
+	sscanf(buf, "%du", &var);
+	if (strcmp(attr->attr.name, "data_0_val") == 0)
+		any_umevent_obj->data_0_val = var;
+	else if (strcmp(attr->attr.name, "data_1_val") == 0)
+		any_umevent_obj->data_1_val = var;
+	else if (strcmp(attr->attr.name, "data_2_val") == 0)
+		any_umevent_obj->data_2_val = var;
+	else if (strcmp(attr->attr.name, "data_3_val") == 0)
+		any_umevent_obj->data_3_val = var;
+	else if (strcmp(attr->attr.name, "data_4_val") == 0)
+		any_umevent_obj->data_4_val = var;
+	else if (strcmp(attr->attr.name, "data_5_val") == 0)
+		any_umevent_obj->data_5_val = var;
+	else if (strcmp(attr->attr.name, "data_6_val") == 0)
+		any_umevent_obj->data_6_val = var;
+	else
+		any_umevent_obj->data_7_val = var;
+	return count;
+}
+
+/**
+ * psb_create_umevent_obj - create and track new event objects
+ *
+ * @name: name to give to new sysfs / kobject entry
+ * @list: event object list to track the kobject in
+ */
+struct umevent_obj *psb_create_umevent_obj(const char *name, struct umevent_list
+					   *list)
+{
+	struct umevent_obj *new_umevent_obj;
+	int retval;
+	new_umevent_obj = kzalloc(sizeof(*new_umevent_obj), GFP_KERNEL);
+	if (!new_umevent_obj)
+		return NULL;
+
+	new_umevent_obj->kobj.kset = list->umevent_disp_pool;
+	retval = kobject_init_and_add(&new_umevent_obj->kobj,
+				      &umevent_obj_ktype, NULL, "%s", name);
+	if (retval) {
+		kobject_put(&new_umevent_obj->kobj);
+		return NULL;
+	}
+	psb_umevent_add_to_list(list, new_umevent_obj);
+	return new_umevent_obj;
+}
+
+/*EXPORT_SYMBOL(psb_create_umevent_obj); */
+/**
+ * psb_umevent_notify - info user mode of a new device
+ *
+ * @notify_disp_obj: event object to perform notification for
+ *
+ */
+void psb_umevent_notify(struct umevent_obj *notify_disp_obj)
+{
+	kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD);
+}
+
+/*EXPORT_SYMBOL(psb_umevent_notify); */
+
+void psb_umevent_notify_gfxsock(struct umevent_obj *notify_disp_obj,
+				int dst_group_id)
+{
+	psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD, dst_group_id);
+}
+
+/*EXPORT_SYMBOL(psb_umevent_notify_gfxsock);*/
+/**
+ * psb_umevent_notify_change - notify user mode of a change to a device
+ *
+ * @notify_disp_obj: event object to perform notification for
+ *
+ */
+void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj)
+{
+	kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
+}
+
+/*EXPORT_SYMBOL(psb_umevent_notify_change); */
+/**
+ * psb_umevent_notify_change - notify user mode of a change to a device
+ *
+ * @notify_disp_obj: event object to perform notification for
+ *
+ */
+void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj,
+				       int dst_group_id)
+{
+	psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE, dst_group_id);
+}
+
+/*EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock); */
+/**
+ * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it
+ *
+ * @any_umevent_obj: event object to destroy
+ *
+ */
+void psb_destroy_umevent_obj(struct umevent_obj
+			     *any_umevent_obj)
+{
+	kobject_put(&any_umevent_obj->kobj);
+}
+
+/**
+ *
+ * psb_umevent_init - init the event pool
+ *
+ * @parent_kobj: parent kobject to associate new kset with
+ * @new_umevent_list: event list to associate kset with
+ * @name: name to give to new sysfs entry
+ *
+ */
+int psb_umevent_init(struct kobject *parent_kobj,
+		     struct umevent_list *new_umevent_list, const char *name)
+{
+	psb_umevent_init_list(new_umevent_list);
+	new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL,
+								  parent_kobj);
+	if (!new_umevent_list->umevent_disp_pool)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/*EXPORT_SYMBOL(psb_umevent_init); */
+/**
+ *
+ * psb_umevent_cleanup - cleanup all event objects
+ *
+ * @kill_list: list of events to destroy
+ *
+ */
+void psb_umevent_cleanup(struct umevent_list *kill_list)
+{
+	psb_umevent_destroy_list(kill_list);
+}
+
+/*EXPORT_SYMBOL(psb_umevent_cleanup); */
+/**
+ * psb_umevent_add_to_list - add an event to the event list
+ *
+ * @list: list to add the event to
+ * @umevent_obj_to_add: event to add
+ *
+ */
+void psb_umevent_add_to_list(struct umevent_list *list,
+			     struct umevent_obj *umevent_obj_to_add)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&list->list_lock, flags);
+	list_add(&umevent_obj_to_add->head, &list->head);
+	spin_unlock_irqrestore(&list->list_lock, flags);
+}
+
+/**
+ * psb_umevent_init_list - initialize event list
+ *
+ * @list: list to initialize
+ *
+ */
+void psb_umevent_init_list(struct umevent_list *list)
+{
+	spin_lock_init(&list->list_lock);
+	INIT_LIST_HEAD(&list->head);
+}
+
+/**
+ * psb_umevent_create_list - allocate an event list
+ *
+ */
+struct umevent_list *psb_umevent_create_list()
+{
+	struct umevent_list *new_umevent_list;
+	new_umevent_list = NULL;
+	new_umevent_list = kmalloc(sizeof(struct umevent_list), GFP_ATOMIC);
+	return new_umevent_list;
+}
+
+/*EXPORT_SYMBOL(psb_umevent_create_list); */
+/**
+ * psb_umevent_destroy_list - destroy a list and clean up all mem
+ *
+ * @list: list to destroy and clean up after
+ *
+ */
+void psb_umevent_destroy_list(struct umevent_list *list)
+{
+	struct umevent_obj *umevent_obj_curr;
+	struct list_head *node;
+	struct list_head *node_kill;
+	int i;
+	i = 0;
+	node = NULL;
+	node_kill = NULL;
+	node = list->head.next;
+	while (node != (&list->head)) {
+		umevent_obj_curr = list_entry(node, struct umevent_obj, head);
+		node_kill = node;
+		node = umevent_obj_curr->head.next;
+		list_del(node_kill);
+		psb_destroy_umevent_obj(umevent_obj_curr);
+		umevent_obj_curr = NULL;
+		i++;
+	}
+	kset_unregister(list->umevent_disp_pool);
+	kfree(list);
+}
+
+/**
+ * psb_umevent_remove_from_list - remove an event from tracking list
+ *
+ * @list: list to remove the event from
+ * @disp_to_remove: name of event to remove.
+ *
+ */
+void psb_umevent_remove_from_list(struct umevent_list *list,
+				  const char *disp_to_remove)
+{
+	struct umevent_obj *umevent_obj_curr = NULL;
+	struct list_head *node = NULL;
+	struct list_head *node_kill = NULL;
+	int i = 0;
+	int found_match = 0;
+	i = 0;
+	node = NULL;
+	node_kill = NULL;
+	node = list->head.next;
+	while (node != (&list->head)) {
+		umevent_obj_curr = list_entry(node, struct umevent_obj, head);
+		if (strcmp(umevent_obj_curr->kobj.name, disp_to_remove) == 0) {
+			found_match = 1;
+			break;
+		}
+		node = NULL;
+		node = umevent_obj_curr->head.next;
+		i++;
+	}
+	if (found_match == 1) {
+		node_kill = node;
+		node = umevent_obj_curr->head.next;
+		list_del(node_kill);
+		psb_destroy_umevent_obj(umevent_obj_curr);
+		umevent_obj_curr = NULL;
+	}
+}
+
+/*EXPORT_SYMBOL(psb_umevent_remove_from_list); */
+/**
+ * psb_umevent_find_obj - find an event in a tracking list
+ *
+ * @name: name of the event to find
+ * @list: list to find the event in
+ *
+ */
+struct umevent_obj *psb_umevent_find_obj(const char *name,
+					 struct umevent_list *list)
+{
+	struct umevent_obj *umevent_obj_curr = NULL;
+	struct list_head *node = NULL;
+	struct list_head *node_find = NULL;
+	int i = 0;
+	int found_match = 0;
+	i = 0;
+	node = NULL;
+	node_find = NULL;
+	node = list->head.next;
+	while (node != (&list->head)) {
+		umevent_obj_curr = list_entry(node, struct umevent_obj, head);
+		if (strcmp(umevent_obj_curr->kobj.name, name) == 0) {
+			found_match = 1;
+			break;
+		}
+		node = NULL;
+		node = umevent_obj_curr->head.next;
+		i++;
+	}
+	if (found_match == 1)
+		return umevent_obj_curr;
+
+	return NULL;
+}
+
+/*EXPORT_SYMBOL(psb_umevent_find_obj); */
+/**
+ * psb_umevent_debug_dump_list - debug list dump
+ *
+ * @list: list to dump
+ *
+ */
+void psb_umevent_debug_dump_list(struct umevent_list *list)
+{
+	struct umevent_obj *umevent_obj_curr;
+	unsigned long flags;
+	struct list_head *node;
+	int i;
+	spin_lock_irqsave(&list->list_lock, flags);
+	i = 0;
+	node = NULL;
+	node = list->head.next;
+	while (node != (&list->head)) {
+		umevent_obj_curr = list_entry(node, struct umevent_obj, head);
+		/*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK */
+		node = NULL;
+		node = umevent_obj_curr->head.next;
+		i++;
+	}
+	spin_unlock_irqrestore(&list->list_lock, flags);
+}
+
+void psb_sysfs_uevent(struct drm_device *dev, char *event_string)
+{
+	char *envp[] = { event_string, NULL };
+
+	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/psb_umevents.h b/drivers/external_drivers/intel_media/display/tng/drv/psb_umevents.h
new file mode 100644
index 0000000..2a72077
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/psb_umevents.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    James C. Gualario <james.c.gualario@intel.com>
+ *
+ */
+#ifndef _PSB_UMEVENT_H_
+#define _PSB_UMEVENT_H_
+/**
+ * required includes
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
+#include <drm/drm_pciids.h>
+#include <linux/spinlock.h>
+/**
+ * event groups for routing to different user mode threads
+ *
+ */
+#define DRM_DPST_SOCKET_GROUP_ID 1
+#define DRM_HOTPLUG_SOCKET_GROUP_ID 2
+#define DRM_HDMI_AUDIO_SOCKET_GROUP 4
+#define DRM_HDMI_HDCP_SOCKET_GROUP 8
+#define DRM_GFX_SOCKET_GROUPS 15
+/**
+ * event structure managed by kobjects
+ *
+ */
+struct umevent_obj {
+	struct kobject kobj;
+	struct list_head head;
+	int data_0_val;
+	int data_1_val;
+	int data_2_val;
+	int data_3_val;
+	int data_4_val;
+	int data_5_val;
+	int data_6_val;
+	int data_7_val;
+};
+/**
+ * event tracking list element
+ *
+ */
+struct umevent_list {
+	struct list_head head;
+	struct kset *umevent_disp_pool;
+	spinlock_t list_lock;
+};
+/**
+ * to go back and forth between kobjects and their main container
+ *
+ */
+#define to_umevent_obj(x) \
+	container_of(x, struct umevent_obj, kobj)
+
+/**
+ * event attributes exposed via sysfs
+ *
+ */
+struct umevent_attribute {
+	struct attribute attr;
+	 ssize_t(*show) (struct umevent_obj * any_umevent_obj,
+			 struct umevent_attribute * attr, char *buf);
+	 ssize_t(*store) (struct umevent_obj * any_umevent_obj,
+			  struct umevent_attribute * attr,
+			  const char *buf, size_t count);
+};
+/**
+ * to go back and forth between the attribute passed to us by the OS
+ * and the umevent_attribute
+ *
+ */
+#define to_umevent_attr(x) \
+	container_of(x, struct umevent_attribute, \
+	attr)
+
+/**
+ * umevent function prototypes
+ *
+ */
+extern struct umevent_obj *psb_create_umevent_obj(const char *name, struct umevent_list
+						  *list);
+extern ssize_t psb_umevent_attr_show(struct kobject *kobj,
+				     struct attribute *attr, char *buf);
+extern ssize_t psb_umevent_attr_store(struct kobject *kobj,
+				      struct attribute *attr,
+				      const char *buf, size_t len);
+extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj
+					 *any_umevent_obj,
+					 struct umevent_attribute *attr,
+					 char *buf);
+extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj
+					  *any_umevent_obj,
+					  struct umevent_attribute *attr,
+					  const char *buf, size_t count);
+extern void psb_umevent_cleanup(struct umevent_list *kill_list);
+extern int psb_umevent_init(struct kobject *parent_kobj,
+			    struct umevent_list *new_umevent_list,
+			    const char *name);
+extern void psb_umevent_init_list(struct umevent_list *list);
+extern void psb_umevent_debug_dump_list(struct umevent_list *list);
+extern void psb_umevent_add_to_list(struct umevent_list *list, struct umevent_obj
+				    *umevent_obj_to_add);
+extern void psb_umevent_destroy_list(struct umevent_list *list);
+extern struct umevent_list *psb_umevent_create_list(void);
+extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj);
+extern void psb_umevent_notify_gfxsock(struct umevent_obj *notify_disp_obj,
+				       int dst_group_id);
+extern void psb_umevent_obj_release(struct kobject *kobj);
+extern void psb_umevent_remove_from_list(struct umevent_list *list,
+					 const char *disp_to_remove);
+extern void psb_umevent_workqueue_dispatch(int work_type, const char *name,
+					   struct umevent_list *list);
+extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj);
+extern void psb_umevent_notify_change_gfxsock(struct umevent_obj
+					      *notify_disp_obj,
+					      int dst_group_id);
+extern struct umevent_obj *psb_umevent_find_obj(const char *name, struct umevent_list
+						*list);
+/**
+ * socket function prototypes
+ *
+ */
+extern int psb_kobject_uevent(struct kobject *kobj,
+			      enum kobject_action action, int dst_group_id);
+extern int psb_kobject_uevent_env(struct kobject *kobj,
+				  enum kobject_action action,
+				  char *envp[], int dst_group_id);
+int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+    __attribute__ ((format(printf, 2, 3)));
+int psb_kobject_action_type(const char *buf,
+			    size_t count, enum kobject_action *type);
+
+extern void psb_sysfs_uevent(struct drm_device *dev, char *event_string);
+
+#if defined(CONFIG_NET)
+int psb_kobject_uevent_init(void);
+#endif
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/sdc16x25_8_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/sdc16x25_8_cmd.c
new file mode 100644
index 0000000..1756bdf
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/sdc16x25_8_cmd.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eckhart Koeppen <eckhart.koeppen@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+#include "displays/sdc16x25_8_cmd.h"
+
+#define WIDTH 1600
+#define HEIGHT 2560
+
+static int mipi_reset_gpio;
+static int bias_en_gpio;
+
+static u8 sdc16x25_8_eight_lane_enable[] = { 0xf2, 0x03, 0x00, 0x01, 0xa4, 0x03, 0x05, 0xa0 };
+static u8 sdc16x25_8_test_key_enable[] = { 0xf0, 0x5a, 0x5a };
+static u8 sdc16x25_8_test_key_disable[] = { 0xf0, 0xa5, 0xa5 };
+static u8 sdc16x25_8_mcs_column_addr[] = { 0x2a, 0x00, 0x00, (WIDTH - 1) >> 8, (WIDTH - 1)  & 0xff };
+static u8 sdc16x25_8_mcs_page_addr[] = { 0x2b, 0x00, 0x00, (HEIGHT - 1) >> 8, (HEIGHT -1) & 0xff };
+
+static
+int sdc16x25_8_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int ret;
+	u8 cmd;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* interface control: dual DSI */
+	cmd = sdc16x25_8_test_key_enable[0];
+	ret = mdfld_dsi_send_mcs_long_lp(sender, sdc16x25_8_test_key_enable, sizeof(sdc16x25_8_test_key_enable),
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	cmd = sdc16x25_8_eight_lane_enable[0];
+	ret = mdfld_dsi_send_mcs_long_lp(sender, sdc16x25_8_eight_lane_enable, sizeof(sdc16x25_8_eight_lane_enable),
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	cmd = sdc16x25_8_test_key_disable[0];
+	ret = mdfld_dsi_send_mcs_long_lp(sender, sdc16x25_8_test_key_disable, sizeof(sdc16x25_8_test_key_disable),
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	msleep(200);
+
+	/* exit sleep */
+	cmd = exit_sleep_mode;
+	ret = mdfld_dsi_send_mcs_short_lp(sender,
+					  cmd, 0, 0, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+	msleep(200);
+
+	/* send display brightness */
+	cmd = write_display_brightness;
+	ret = mdfld_dsi_send_mcs_short_lp(sender,
+					  cmd, 0xff, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* display control */
+	cmd = write_ctrl_display;
+	ret = mdfld_dsi_send_mcs_short_lp(sender,
+					  cmd, 0x20, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* tear on*/
+	cmd = set_tear_on;
+	ret = mdfld_dsi_send_mcs_short_lp(sender,
+					  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* column address */
+	cmd = sdc16x25_8_mcs_column_addr[0];
+	ret = mdfld_dsi_send_mcs_long_lp(sender, sdc16x25_8_mcs_column_addr, 5,
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* page address */
+	cmd = sdc16x25_8_mcs_page_addr[0];
+	ret = mdfld_dsi_send_mcs_long_lp(sender, sdc16x25_8_mcs_page_addr, 5,
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	DRM_ERROR("failed to send command %#x\n", cmd);
+	return ret;
+}
+
+static
+void sdc16x25_8_cmd_controller_init(
+				    struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+		&dsi_config->dsi_hw_context;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x3f;
+	hw_ctx->high_low_switch_count = 0x2b;
+	hw_ctx->clk_lane_switch_time_cnt =  0x2b0014;
+	hw_ctx->lp_byteclk = 0x6;
+	hw_ctx->dphy_param = 0x2a18681f;
+	hw_ctx->eot_disable = 0x3;
+	hw_ctx->init_count = 0xf0;
+	hw_ctx->dbi_bw_ctrl = 1024;
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+
+	hw_ctx->mipi = SEL_FLOPPED_HSTX
+			| PASS_FROM_SPHY_TO_AFE
+			| DUAL_LINK_ENABLE
+			| DUAL_LINK_CAPABLE
+			;
+	hw_ctx->video_mode_format = 0xf;
+}
+
+static int
+sdc16x25_8_cmd_panel_connection_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static
+int sdc16x25_8_cmd_power_on(
+			    struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int ret;
+	u8 cmd;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/* display on */
+	cmd = set_display_on;
+	ret = mdfld_dsi_send_mcs_short_lp(sender,
+					  cmd, 0, 0, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+	return 0;
+
+err_out:
+	DRM_ERROR("failed to send command %#x\n", cmd);
+	return ret;
+}
+
+static int sdc16x25_8_cmd_power_off(
+				    struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	err = mdfld_dsi_send_mcs_short_lp(sender,
+					  set_display_off, 0, 0,
+					  MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display Off\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+
+	msleep(35);
+
+	err = mdfld_dsi_send_mcs_short_lp(sender,
+					  enter_sleep_mode, 0, 0,
+					  MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Enter Sleep Mode\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+
+	msleep(120);
+
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 0);
+	usleep_range(1000, 1500);
+	return 0;
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static
+int sdc16x25_8_cmd_set_brightness(
+				  struct mdfld_dsi_config *dsi_config,
+				  int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	mdfld_dsi_send_mcs_short_lp(sender,
+				    write_display_brightness, duty_val, 1,
+				    MDFLD_DSI_SEND_PACKAGE);
+	return 0;
+}
+
+static void _get_panel_reset_gpio(void)
+{
+	int ret = 0;
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+}
+
+static
+int sdc16x25_8_cmd_panel_reset(
+			       struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+	}
+
+	_get_panel_reset_gpio();
+
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	msleep(15);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	msleep(5);
+	return 0;
+}
+
+static
+int sdc16x25_8_cmd_exit_deep_standby(
+				     struct mdfld_dsi_config *dsi_config)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 1);
+	_get_panel_reset_gpio();
+	gpio_direction_output(mipi_reset_gpio, 0);
+
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	msleep(15);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	msleep(5);
+	return 0;
+}
+
+static
+struct drm_display_mode *sdc16x25_8_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = WIDTH;
+
+	mode->hsync_start = mode->hdisplay + 48;
+	mode->hsync_end = mode->hsync_start + 32;
+	mode->htotal = mode->hsync_end + 80;
+
+	mode->vdisplay = HEIGHT;
+	mode->vsync_start = mode->vdisplay + 3;
+	mode->vsync_end = mode->vsync_start + 33;
+	mode->vtotal = mode->vsync_end + 10;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+
+static void sdc16x25_8_cmd_get_panel_info(int pipe, struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = 130;
+		pi->height_mm = 181;
+		pi->panel_180_rotation = true;
+	}
+}
+
+void sdc16x25_8_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+	PSB_DEBUG_ENTRY("\n");
+	p_funcs->reset = sdc16x25_8_cmd_panel_reset;
+	p_funcs->power_on = sdc16x25_8_cmd_power_on;
+	p_funcs->power_off = sdc16x25_8_cmd_power_off;
+	p_funcs->drv_ic_init = sdc16x25_8_cmd_drv_ic_init;
+	p_funcs->get_config_mode = sdc16x25_8_cmd_get_config_mode;
+	p_funcs->get_panel_info = sdc16x25_8_cmd_get_panel_info;
+	p_funcs->dsi_controller_init =
+		sdc16x25_8_cmd_controller_init;
+	p_funcs->detect =
+		sdc16x25_8_cmd_panel_connection_detect;
+	p_funcs->set_brightness =
+		sdc16x25_8_cmd_set_brightness;
+	p_funcs->exit_deep_standby =
+		sdc16x25_8_cmd_exit_deep_standby;
+
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/sdc25x16_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/sdc25x16_cmd.c
new file mode 100644
index 0000000..2c55c85
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/sdc25x16_cmd.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include <asm/intel_scu_pmic.h>
+#include "displays/sdc25x16_cmd.h"
+
+static int vdd_1_8v_gpio;
+
+static u8 sdc_column_addr[] = {
+			0x2a, 0x00, 0x00, 0x04, 0xff};
+static u8 sdc_page_addr[] = {
+			0x2b, 0x00, 0x00, 0x06, 0x3f};
+static	u8 sdc_set_300nit[34] = { 0x83,
+								0x80, 0x80,
+								0x80, 0x80,
+								0x80, 0x80,
+								0x80, 0x00,
+								0x80, 0x80,
+								0x00,
+								0x80, 0x80,
+								0x80, 0x80,
+								0x80, 0x80,
+								0x80, 0x00,
+								0x80, 0x80,
+								0x00,
+								0x80, 0x80,
+								0x80, 0x80,
+								0x80, 0x80,
+								0x80, 0x00,
+								0x80, 0x80,
+								0x00};
+static	u8 sdc_set_AID[] = { 0x85, 0x06, 0x00 };
+
+static
+int sdc25x16_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+	err = mdfld_dsi_send_gen_long_hs(sender,
+			sdc_set_300nit,
+			34, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set 300nit\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	err = mdfld_dsi_send_gen_long_hs(sender,
+			sdc_set_AID,
+			3, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set AID\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_gen_short_hs(sender,
+			0xB0, 0x34, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set global para.53rd\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_gen_short_hs(sender,
+			0xBB, 0x19, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set ELVSS\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	err = mdfld_dsi_send_gen_short_hs(sender,
+			0xB0, 0x2E, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set global para.47th\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	err = mdfld_dsi_send_gen_short_hs(sender,
+			0xBB, 0x01, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Gamma Update\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	msleep(5);
+	/* Sleep Out */
+	err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto ic_init_err;
+	}
+
+	msleep(80);
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			write_display_brightness, 0xff, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Brightness\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	/* Write control display */
+	err = mdfld_dsi_send_mcs_short_hs(sender, write_ctrl_display,
+			0x20, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Write Control Display\n", __func__,
+				__LINE__);
+		goto ic_init_err;
+	}
+
+	err = mdfld_dsi_send_mcs_long_hs(sender,
+			sdc_column_addr,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Column Address\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	err = mdfld_dsi_send_mcs_long_hs(sender,
+			sdc_page_addr,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Page Address\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_tear_on, 0x0, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Tear On\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	return 0;
+ic_init_err:
+	err = -EIO;
+	return err;
+}
+
+static
+void sdc25x16_cmd_controller_init(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+				&dsi_config->dsi_hw_context;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x3f;
+	hw_ctx->high_low_switch_count = 0x2b;
+	hw_ctx->clk_lane_switch_time_cnt =  0x2b0014;
+	hw_ctx->lp_byteclk = 0x6;
+	hw_ctx->dphy_param = 0x2a18681f;
+	hw_ctx->eot_disable = 0x1 | BIT8;
+	hw_ctx->init_count = 0xf0;
+	hw_ctx->dbi_bw_ctrl = 1024;
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+
+	hw_ctx->mipi = SEL_FLOPPED_HSTX	| PASS_FROM_SPHY_TO_AFE |
+		DUAL_LINK_ENABLE | DUAL_LINK_CAPABLE | TE_TRIGGER_GPIO_PIN | BANDGAP_CHICKEN_BIT;
+	hw_ctx->video_mode_format = 0xf;
+}
+static
+int sdc25x16_cmd_panel_connection_detect(
+	struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n",
+		__func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static
+int sdc25x16_cmd_power_on(
+	struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	/* Set Display on 0x29 */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_err;
+	}
+power_err:
+	return err;
+}
+
+static int sdc25x16_cmd_power_off(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display Off\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+	msleep(150);
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Enter Sleep Mode\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+	msleep(120);
+
+	return 0;
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static
+int sdc25x16_cmd_set_brightness(
+		struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	mdfld_dsi_send_mcs_short_hs(sender,
+		write_display_brightness, duty_val, 1,
+		MDFLD_DSI_SEND_PACKAGE);
+	return 0;
+}
+
+static
+int sdc25x16_cmd_panel_reset(
+		struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+
+	msleep(30);
+	if (vdd_1_8v_gpio == 0) {
+		vdd_1_8v_gpio = 155;
+		ret = gpio_request(vdd_1_8v_gpio, "vdd_1_8v_gpio");
+		if (ret) {
+			DRM_ERROR("Faild to request vdd_1_8v gpio\n");
+			return -EINVAL;
+		}
+	}
+	gpio_direction_output(vdd_1_8v_gpio, 0);
+	gpio_set_value_cansleep(vdd_1_8v_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(vdd_1_8v_gpio, 1);
+	msleep(800);
+	return 0;
+}
+
+static
+int sdc25x16_cmd_exit_deep_standby(
+		struct mdfld_dsi_config *dsi_config)
+{
+	static bool bFirst = true;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (bFirst) bFirst = false;
+	else {
+		msleep(30);
+		gpio_direction_output(vdd_1_8v_gpio, 0);
+		gpio_set_value_cansleep(vdd_1_8v_gpio, 0);
+		usleep_range(2000, 2500);
+		gpio_set_value_cansleep(vdd_1_8v_gpio, 1);
+		usleep_range(2000, 2500);
+	}
+	return 0;
+}
+
+static
+struct drm_display_mode *sdc25x16_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 2560;
+
+	mode->hsync_start = mode->hdisplay + 48;
+	mode->hsync_end = mode->hsync_start + 32;
+	mode->htotal = mode->hsync_end + 80;
+
+	mode->vdisplay = 1600;
+	mode->vsync_start = mode->vdisplay + 3;
+	mode->vsync_end = mode->vsync_start + 33;
+	mode->vtotal = mode->vsync_end + 10;
+
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static
+void sdc25x16_cmd_get_panel_info(int pipe,
+		struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = 130;
+		pi->height_mm = 181;
+	}
+}
+
+void sdc25x16_cmd_init(struct drm_device *dev,
+		struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+	PSB_DEBUG_ENTRY("\n");
+	p_funcs->reset = sdc25x16_cmd_panel_reset;
+	p_funcs->power_on = sdc25x16_cmd_power_on;
+	p_funcs->power_off = sdc25x16_cmd_power_off;
+	p_funcs->drv_ic_init = sdc25x16_cmd_drv_ic_init;
+	p_funcs->get_config_mode = sdc25x16_cmd_get_config_mode;
+	p_funcs->get_panel_info = sdc25x16_cmd_get_panel_info;
+	p_funcs->dsi_controller_init =
+			sdc25x16_cmd_controller_init;
+	p_funcs->detect =
+			sdc25x16_cmd_panel_connection_detect;
+	p_funcs->set_brightness =
+			sdc25x16_cmd_set_brightness;
+	p_funcs->exit_deep_standby =
+				sdc25x16_cmd_exit_deep_standby;
+
+}
+
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/sharp10x19_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/sharp10x19_cmd.c
new file mode 100644
index 0000000..e9ecd1c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/sharp10x19_cmd.c
@@ -0,0 +1,623 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_esd.h"
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "displays/sharp10x19_cmd.h"
+
+/* The register to control secure I2C FLIS pin */
+#define SECURE_I2C_FLIS_REG	0xFF0C1D30
+
+#define EXPANDER_BUS_NUMBER 7
+
+static int mipi_reset_gpio;
+static int mipic_reset_gpio;
+static int bias_en_gpio;
+
+#define sharp10x19_remove_nvm_reload 0xd6
+static	u8 sharp10x19_mcs_column_addr[] = { 0x2a, 0x00, 0x00, 0x04, 0x37 };
+static	u8 sharp10x19_mcs_page_addr[] = { 0x2b, 0x00, 0x00, 0x07, 0x7f };
+
+static int sharp10x19_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	int ret;
+	u8 cmd;
+	int i;
+	int loop = 1;
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	if (is_dual_panel(dev))
+		loop = 2;
+	for(i = 0; i < loop; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+
+		/* exit sleep */
+		cmd = exit_sleep_mode;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0, 0, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+		msleep(120);
+
+		/* unlock MCW */
+		cmd = access_protect;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* reload NVM */
+		cmd = sharp10x19_remove_nvm_reload;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x1, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* send display brightness */
+		cmd = write_display_brightness;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0xff, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* display control */
+		cmd = write_ctrl_display;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x0c, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* CABC */
+		cmd = 0x55;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* tear on*/
+		cmd = set_tear_on;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* column address */
+		cmd = 0x2a;
+		ret = mdfld_dsi_send_mcs_long_hs(sender, sharp10x19_mcs_column_addr, 5,
+						 MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* page address */
+		cmd = 0x2b;
+		ret = mdfld_dsi_send_mcs_long_hs(sender, sharp10x19_mcs_page_addr, 5,
+						 MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+err_out:
+	sender->work_for_slave_panel = false;
+	DRM_ERROR("failed to send command %#x\n", cmd);
+	return ret;
+}
+
+static void sharp10x19_cmd_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+				&dsi_config->dsi_hw_context;
+	struct drm_device *dev = dsi_config->dev;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x14;
+	if (is_dual_panel(dev)) {
+		hw_ctx->high_low_switch_count = 0x2B;
+		hw_ctx->clk_lane_switch_time_cnt =  0x2b0014;
+		hw_ctx->eot_disable = 0x0;
+	} else {
+		hw_ctx->high_low_switch_count = 0x2c;
+		hw_ctx->clk_lane_switch_time_cnt =  0x2e0016;
+		hw_ctx->eot_disable = 0x1;
+	}
+	hw_ctx->lp_byteclk = 0x6;
+	hw_ctx->dphy_param = 0x2a18681f;
+
+	hw_ctx->init_count = 0xf0;
+	hw_ctx->dbi_bw_ctrl = calculate_dbi_bw_ctrl(dsi_config->lane_count);
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+	if (is_dual_panel(dev))
+		hw_ctx->mipi = SEL_FLOPPED_HSTX	| PASS_FROM_SPHY_TO_AFE |
+			DUAL_LINK_ENABLE | DUAL_LINK_CAPABLE;
+	else
+		hw_ctx->mipi = PASS_FROM_SPHY_TO_AFE |
+			BANDGAP_CHICKEN_BIT | TE_TRIGGER_GPIO_PIN;
+
+	hw_ctx->video_mode_format = 0xf;
+
+}
+
+static int
+sharp10x19_cmd_panel_connection_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static int sharp10x19_cmd_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	int ret;
+	u8 cmd;
+	int i;
+	int loop = 1;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	if (is_dual_panel(dev))
+		loop = 2;
+	for(i = 0; i < loop; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+		/* address mode */
+		cmd = set_address_mode;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* pixel format*/
+		cmd = set_pixel_format;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0x77, 1, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+
+		/* display on */
+		cmd = set_display_on;
+		ret = mdfld_dsi_send_mcs_short_hs(sender,
+						  cmd, 0, 0, MDFLD_DSI_SEND_PACKAGE);
+		if (ret)
+			goto err_out;
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+
+err_out:
+	sender->work_for_slave_panel = false;
+	DRM_ERROR("failed to send command %#x\n", cmd);
+	return ret;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n", __func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int sharp10x19_cmd_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	int err;
+	int i;
+	int loop = 1;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	if (is_dual_panel(dev))
+		loop = 2;
+
+	for(i = 0; i < loop; i++) {
+		if (i == 0)
+			sender->work_for_slave_panel = false;
+		else
+			sender->work_for_slave_panel = true;
+
+		err = mdfld_dsi_send_mcs_short_hs(sender,
+				set_display_off, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Display Off\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+		usleep_range(20000, 20100);
+
+		err = mdfld_dsi_send_mcs_short_hs(sender,
+				set_tear_off, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Tear Off\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+
+		err = mdfld_dsi_send_mcs_short_hs(sender,
+				enter_sleep_mode, 0, 0,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Enter Sleep Mode\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+
+		msleep(60);
+
+		err = mdfld_dsi_send_gen_short_hs(sender,
+			access_protect, 4, 2,
+			MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Access Protect\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+
+		err = mdfld_dsi_send_gen_short_hs(sender, low_power_mode, 1, 2,
+						  MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Low Power Mode\n", __func__, __LINE__);
+			goto power_off_err;
+		}
+		if (bias_en_gpio)
+			gpio_set_value_cansleep(bias_en_gpio, 0);
+		usleep_range(1000, 1500);
+	}
+	sender->work_for_slave_panel = false;
+	return 0;
+power_off_err:
+	sender->work_for_slave_panel = false;
+	err = -EIO;
+	return err;
+}
+
+static int
+sharp10x19_cmd_set_brightness( struct mdfld_dsi_config *dsi_config, int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+	struct drm_device *dev = dsi_config->dev;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	mdfld_dsi_send_mcs_short_hs(sender,
+			write_display_brightness, duty_val, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (is_dual_panel(dev)) {
+		sender->work_for_slave_panel = true;
+		mdfld_dsi_send_mcs_short_hs(sender,
+				write_display_brightness, duty_val, 1,
+				MDFLD_DSI_SEND_PACKAGE);
+		sender->work_for_slave_panel = false;
+	}
+	return 0;
+}
+
+static void _get_panel_reset_gpio(bool is_dual_panel)
+{
+	int ret = 0;
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+		pr_info("gpio_reseta=%d\n", mipi_reset_gpio);
+	}
+	if (is_dual_panel && (mipic_reset_gpio == 0)) {
+		ret = 155;
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return;
+		}
+		mipic_reset_gpio = ret;
+		ret = gpio_request(mipic_reset_gpio, "mipic_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio(c)\n");
+			return;
+		}
+		gpio_direction_output(mipic_reset_gpio, 0);
+		pr_info("gpio_resetc=%d\n", mipic_reset_gpio);
+	}
+
+}
+
+static int sharp10x19_cmd_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+	u8 *vaddr = NULL, *vaddr1 = NULL;
+	struct drm_device *dev = dsi_config->dev;
+	int reg_value_scl = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (is_dual_panel(dev)) {
+		struct i2c_adapter *adapter;
+		u8 i2_data[4];
+		adapter = i2c_get_adapter(EXPANDER_BUS_NUMBER);
+		if (adapter) {
+			i2_data[0] = 0x4;
+			i2_data[1] = 0x0;
+			i2c_clients_command(adapter, 1, i2_data);
+			i2_data[0] = 0x5;
+			i2_data[1] = 0x3;
+			i2c_clients_command(adapter, 1, i2_data);
+		}
+	}
+	/* Because when reset touchscreen panel, touchscreen will pull i2c bus
+	 * to low, sometime this operation will cause i2c bus enter into wrong
+	 * status, so before reset, switch i2c scl pin */
+	vaddr1 = ioremap(SECURE_I2C_FLIS_REG, 4);
+	reg_value_scl = ioread32(vaddr1);
+	reg_value_scl &= ~0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+
+	__vpro2_power_ctrl(true);
+	usleep_range(2000, 2500);
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+		pr_info("gpio_bias_enable=%d\n", bias_en_gpio);
+	}
+
+	_get_panel_reset_gpio(is_dual_panel(dev));
+
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	if (is_dual_panel(dev))
+		gpio_direction_output(mipic_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	if (is_dual_panel(dev))
+		gpio_set_value_cansleep(mipic_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(2000, 2500);
+	if (is_dual_panel(dev)) {
+		gpio_set_value_cansleep(mipic_reset_gpio, 1);
+		usleep_range(3000, 3500);
+	}
+	vaddr = ioremap(0xff0c2d00, 0x60);
+	iowrite32(0x3221, vaddr + 0x1c);
+	usleep_range(2000, 2500);
+	iounmap(vaddr);
+
+	/* switch i2c scl pin back */
+	reg_value_scl |= 0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+	iounmap(vaddr1);
+	return 0;
+}
+
+static int sharp10x19_cmd_exit_deep_standby(struct mdfld_dsi_config *dsi_config)
+{
+	struct drm_device *dev = dsi_config->dev;
+	PSB_DEBUG_ENTRY("\n");
+
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 1);
+	_get_panel_reset_gpio(is_dual_panel(dev));
+	gpio_direction_output(mipi_reset_gpio, 0);
+
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(1000, 1500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(3000, 3500);
+	if (is_dual_panel(dev)) {
+		gpio_direction_output(mipic_reset_gpio, 0);
+		gpio_set_value_cansleep(mipic_reset_gpio, 0);
+		usleep_range(1000, 1500);
+		gpio_set_value_cansleep(mipic_reset_gpio, 1);
+		usleep_range(3000, 3500);
+	}
+	return 0;
+}
+
+static struct drm_display_mode *sharp10x19_dual_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 2160;
+	mode->hsync_start = mode->hdisplay + 8;
+	mode->hsync_end = mode->hsync_start + 24;
+	mode->htotal = mode->hsync_end + 8;
+
+	mode->vdisplay = 1920;
+	mode->vsync_start = 1923;
+	mode->vsync_end = 1926;
+	mode->vtotal = 1987;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static struct drm_display_mode *sharp10x19_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 1080;
+	mode->hsync_start = mode->hdisplay + 8;
+	mode->hsync_end = mode->hsync_start + 24;
+	mode->htotal = mode->hsync_end + 8;
+
+	mode->vdisplay = 1920;
+	mode->vsync_start = 1923;
+	mode->vsync_end = 1926;
+	mode->vtotal = 1987;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+
+static void sharp10x19_cmd_get_panel_info(int pipe, struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = PANEL_4DOT3_WIDTH;
+		pi->height_mm = PANEL_4DOT3_HEIGHT;
+	}
+}
+
+void sharp10x19_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+	if (is_dual_panel(dev))
+		p_funcs->get_config_mode =
+			sharp10x19_dual_cmd_get_config_mode;
+	else
+		p_funcs->get_config_mode =
+			sharp10x19_cmd_get_config_mode;
+
+	p_funcs->reset = sharp10x19_cmd_panel_reset;
+	p_funcs->power_on = sharp10x19_cmd_power_on;
+	p_funcs->power_off = sharp10x19_cmd_power_off;
+	p_funcs->drv_ic_init = sharp10x19_cmd_drv_ic_init;
+	p_funcs->get_panel_info = sharp10x19_cmd_get_panel_info;
+	p_funcs->dsi_controller_init = sharp10x19_cmd_controller_init;
+	p_funcs->detect = sharp10x19_cmd_panel_connection_detect;
+	p_funcs->set_brightness = sharp10x19_cmd_set_brightness;
+	p_funcs->exit_deep_standby = sharp10x19_cmd_exit_deep_standby;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/sharp25x16_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/sharp25x16_cmd.c
new file mode 100644
index 0000000..c9aafc6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/sharp25x16_cmd.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu<faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_esd.h"
+#include <asm/intel_scu_pmic.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+
+#include "displays/sharp25x16_cmd.h"
+
+static int mipi_reset_gpio;
+
+static u8 sharp_mode_set_data[7][3] = {
+			{0x10, 0x00, 0x2f},
+			{0x10, 0x01, 0x01},
+			{0x10, 0x07, 0x00},
+			{0x70, 0x00, 0x70},
+			{0x00, 0x1f, 0x00},
+			{0x20, 0x2e, 0x12},
+			{0x20, 0x2a, 0x0}
+			};
+static u8 sharp_clumn_addr_left[] = {
+			0x2a, 0x00, 0x00, 0x09, 0xff};
+static u8 sharp_page_addr_left[] = {
+			0x2b, 0x00, 0x00, 0x06, 0x3f};
+static u8 sharp_set_brightness[3] =
+			{0x20, 0x2a, 0x0};
+static
+int sharp25x16_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	int err = 0;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < 7; i++) {
+		err = mdfld_dsi_send_gen_long_hs(sender, sharp_mode_set_data[i],
+				3,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Mode data\n", __func__, __LINE__);
+			goto ic_init_err;
+		}
+		REG_WRITE(MIPIA_HS_GEN_CTRL_REG, 5);
+	}
+
+	err = mdfld_dsi_send_mcs_long_hs(sender,
+			sharp_clumn_addr_left,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Clumn Address\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	REG_WRITE(MIPIA_HS_GEN_CTRL_REG, 5);
+	err = mdfld_dsi_send_mcs_long_hs(sender,
+			sharp_page_addr_left,
+			5, MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Page Address\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	REG_WRITE(MIPIA_HS_GEN_CTRL_REG, 5);
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_tear_on, 0x00, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Tear On\n",
+		__func__, __LINE__);
+		goto ic_init_err;
+	}
+	return 0;
+
+ic_init_err:
+	err = -EIO;
+	return err;
+}
+
+static
+void sharp25x16_cmd_controller_init(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+				&dsi_config->dsi_hw_context;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x3f;
+	hw_ctx->high_low_switch_count = 0x40;
+	hw_ctx->clk_lane_switch_time_cnt = 0x16002d;
+	hw_ctx->lp_byteclk = 0x5;
+	hw_ctx->dphy_param = 0x3c1fc51f;
+	hw_ctx->eot_disable = 0x0;
+	hw_ctx->init_count = 0xfa0;
+	hw_ctx->dbi_bw_ctrl = 1024;
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+
+	hw_ctx->mipi = SEL_FLOPPED_HSTX	| PASS_FROM_SPHY_TO_AFE |
+		DUAL_LINK_ENABLE | DUAL_LINK_CAPABLE | TE_TRIGGER_GPIO_PIN |
+		DUAL_LINK_MODE_PIXEL_ALTER;
+	hw_ctx->video_mode_format = 0xf;
+
+}
+static
+int sharp25x16_cmd_panel_connection_detect(
+	struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n",
+		__func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static
+int sharp25x16_cmd_power_on(
+	struct mdfld_dsi_config *dsi_config)
+{
+
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	msleep(150);
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+		set_address_mode, 0x0, 1,
+		MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Address Mode\n",
+		__func__, __LINE__);
+		goto power_err;
+	}
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_pixel_format, 0x77, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Pixel format\n",
+		__func__, __LINE__);
+		goto power_err;
+	}
+
+	/* Sleep Out */
+	err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_err;
+	}
+
+	msleep(20);
+	/* Set Display on 0x29 */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_err;
+	}
+	msleep(150);
+
+power_err:
+	return err;
+}
+static int sharp25x16_cmd_power_off(
+		struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	msleep(100);
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display Off\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Enter Sleep Mode\n",
+		__func__, __LINE__);
+		goto power_off_err;
+	}
+	/* enable it after AOB re-work
+	* gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	*/
+	msleep(100);
+	return 0;
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static
+int sharp25x16_cmd_set_brightness(
+		struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	duty_val = (0xFF * level) / 255;
+	if (duty_val < 12)
+		duty_val = 0;
+	sharp_set_brightness[2] = duty_val;
+	mdfld_dsi_send_gen_long_hs(sender, sharp_set_brightness,
+				3,
+				MDFLD_DSI_SEND_PACKAGE);
+
+	REG_WRITE(MIPIA_HS_GEN_CTRL_REG, 5);
+	return 0;
+}
+
+static
+int sharp25x16_cmd_panel_reset(
+		struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	msleep(10);
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return 0;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return 0;
+		}
+	}
+	gpio_direction_output(mipi_reset_gpio, 0);
+	usleep_range(1000, 1500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+
+	return 0;
+}
+
+static
+int sharp25x16_cmd_exit_deep_standby(
+		struct mdfld_dsi_config *dsi_config)
+{
+	PSB_DEBUG_ENTRY("\n");
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(1000, 1500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	return 0;
+}
+
+static
+struct drm_display_mode *sharp25x16_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 2560;
+
+	mode->hsync_start = mode->hdisplay + 48;
+	mode->hsync_end = mode->hsync_start + 32;
+	mode->htotal = mode->hsync_end + 80;
+
+	mode->vdisplay = 1600;
+	mode->vsync_start = mode->vdisplay + 3;
+	mode->vsync_end = mode->vsync_start + 33;
+	mode->vtotal = mode->vsync_end + 10;
+
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static
+void sharp25x16_cmd_get_panel_info(int pipe,
+		struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = 217;
+		pi->height_mm = 136;
+	}
+}
+
+void sharp25x16_cmd_init(struct drm_device *dev,
+		struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+	PSB_DEBUG_ENTRY("\n");
+	p_funcs->reset = sharp25x16_cmd_panel_reset;
+	p_funcs->power_on = sharp25x16_cmd_power_on;
+	p_funcs->power_off = sharp25x16_cmd_power_off;
+	p_funcs->drv_ic_init = sharp25x16_cmd_drv_ic_init;
+	p_funcs->get_config_mode = sharp25x16_cmd_get_config_mode;
+	p_funcs->get_panel_info = sharp25x16_cmd_get_panel_info;
+	p_funcs->dsi_controller_init =
+			sharp25x16_cmd_controller_init;
+	p_funcs->detect =
+			sharp25x16_cmd_panel_connection_detect;
+	p_funcs->set_brightness =
+			sharp25x16_cmd_set_brightness;
+	p_funcs->exit_deep_standby =
+				sharp25x16_cmd_exit_deep_standby;
+
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/sharp25x16_vid.c b/drivers/external_drivers/intel_media/display/tng/drv/sharp25x16_vid.c
new file mode 100644
index 0000000..bec7925
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/sharp25x16_vid.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+ */
+
+#include <asm/intel_scu_pmic.h>
+
+#include "displays/sharp25x16_vid.h"
+static int mipi_reset_gpio;
+
+static u8 sharp_mode_set_data[7][3] = {
+			{0x10, 0x00, 0x3f},
+			{0x10, 0x01, 0x00},
+			{0x10, 0x07, 0x00},
+			{0x70, 0x00, 0x70},
+			{0x00, 0x1f, 0x00},
+			{0x20, 0x2e, 0x12},
+			{0x20, 0x2a, 0x00}
+			};
+static u8 sharp_set_brightness[3] =
+			{0x20, 0x2a, 0x0};
+int mdfld_dsi_sharp25x16_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	int err = 0;
+	int i;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < 7; i++) {
+		err = mdfld_dsi_send_gen_long_hs(sender, sharp_mode_set_data[i],
+				3,
+				MDFLD_DSI_SEND_PACKAGE);
+		if (err) {
+			DRM_ERROR("%s: %d: Set Mode data\n", __func__, __LINE__);
+			goto ic_init_err;
+		}
+		REG_WRITE(MIPIA_HS_GEN_CTRL_REG, 5);
+	}
+	return 0;
+
+ic_init_err:
+	err = -EIO;
+	return err;
+}
+
+static
+void mdfld_dsi_sharp25x16_dsi_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+		&dsi_config->dsi_hw_context;
+	/* Virtual channel number */
+	int mipi_vc = 0;
+	int mipi_pixel_format = 0x4;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->pll_bypass_mode = 0;
+	/* This is for 400 mhz.  Set it to 0 for 800mhz */
+	hw_ctx->cck_div = 1;
+
+	hw_ctx->mipi_control = 0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x3f;
+	hw_ctx->high_low_switch_count = 0x40;
+	hw_ctx->clk_lane_switch_time_cnt =  0x16002d;
+	hw_ctx->lp_byteclk = 0x5;
+	hw_ctx->dphy_param = 0x3c1fc51f;
+	hw_ctx->eot_disable = 0x2;
+	hw_ctx->init_count = 0xfa0;
+	hw_ctx->dbi_bw_ctrl = 0x820;
+
+	/*setup video mode format*/
+	hw_ctx->video_mode_format = 0xf;
+
+	/*set up func_prg*/
+	hw_ctx->dsi_func_prg = ((mipi_pixel_format << 7) | (mipi_vc << 3) |
+			dsi_config->lane_count);
+
+	/*setup mipi port configuration*/
+	hw_ctx->mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE |
+		dsi_config->lane_config |
+		DUAL_LINK_ENABLE | DUAL_LINK_CAPABLE | DUAL_LINK_MODE_PIXEL_ALTER;
+}
+
+static int mdfld_dsi_sharp25x16_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static int mdfld_dsi_sharp25x16_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+	/* Sleep Out */
+	err = mdfld_dsi_send_mcs_short_hs(sender, exit_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+	/* Wait for 6 frames after exit_sleep_mode. */
+	msleep(100);
+
+	/* Set Display on */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_on, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_on_err;
+	}
+	/* Wait for 1 frame after set_display_on. */
+	msleep(20);
+
+	/* Send TURN_ON packet */
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender, MDFLD_DSI_DPI_SPK_TURN_ON);
+	if (err) {
+		DRM_ERROR("Failed to send turn on packet\n");
+		goto power_on_err;
+	}
+	return 0;
+
+power_on_err:
+	err = -EIO;
+	return err;
+}
+
+static int mdfld_dsi_sharp25x16_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/*send SHUT_DOWN packet */
+	err = mdfld_dsi_send_dpi_spk_pkg_hs(sender,
+			MDFLD_DSI_DPI_SPK_SHUT_DOWN);
+	if (err) {
+		DRM_ERROR("Failed to send turn off packet\n");
+		goto power_off_err;
+	}
+	/* According HW DSI spec, need to wait for 100ms. */
+	msleep(100);
+
+	/* Set Display off */
+	err = mdfld_dsi_send_mcs_short_hs(sender, set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display On\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	/* Wait for 1 frame after set_display_on. */
+	msleep(20);
+
+	/* Sleep In */
+	err = mdfld_dsi_send_mcs_short_hs(sender, enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Exit Sleep Mode\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	/* Wait for 3 frames after enter_sleep_mode. */
+	msleep(51);
+
+	/* enable it after AOB re-work
+	* gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	*/
+	return 0;
+
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static int mdfld_dsi_sharp25x16_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+	msleep(10);
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return 0;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return 0;
+		}
+	}
+	gpio_direction_output(mipi_reset_gpio, 0);
+	usleep_range(1000, 1500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+
+	return 0;
+}
+
+static struct drm_display_mode *sharp25x16_vid_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 2560;
+
+	mode->hsync_start = mode->hdisplay + 8;
+	mode->hsync_end = mode->hsync_start + 30;
+	mode->htotal = mode->hsync_end + 32;
+
+	mode->vdisplay = 1600;
+	mode->vsync_start = mode->vdisplay + 12;
+	mode->vsync_end = mode->vsync_start + 4;
+	mode->vtotal = mode->vsync_end + 4;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal *
+		mode->htotal / 1000;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static int mdfld_dsi_sharp25x16_set_brightness(struct mdfld_dsi_config *dsi_config,
+		int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev = dsi_config->dev;
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	if (duty_val < 12)
+		duty_val = 0;
+	sharp_set_brightness[2] = duty_val;
+	mdfld_dsi_send_gen_long_hs(sender, sharp_set_brightness,
+				3,
+				MDFLD_DSI_SEND_PACKAGE);
+
+	REG_WRITE(MIPIA_HS_GEN_CTRL_REG, 5);
+	return 0;
+}
+
+static void sharp25x16_vid_get_panel_info(int pipe, struct panel_info *pi)
+{
+	if (!pi)
+		return;
+
+	if (pipe == 0) {
+		pi->width_mm = 217;
+		pi->height_mm = 136;
+	}
+
+	return;
+}
+
+void sharp25x16_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs->get_config_mode = sharp25x16_vid_get_config_mode;
+	p_funcs->get_panel_info = sharp25x16_vid_get_panel_info;
+	p_funcs->reset = mdfld_dsi_sharp25x16_panel_reset;
+	p_funcs->drv_ic_init = mdfld_dsi_sharp25x16_ic_init;
+	p_funcs->dsi_controller_init = mdfld_dsi_sharp25x16_dsi_controller_init;
+	p_funcs->detect = mdfld_dsi_sharp25x16_detect;
+	p_funcs->power_on = mdfld_dsi_sharp25x16_power_on;
+	p_funcs->power_off = mdfld_dsi_sharp25x16_power_off;
+	p_funcs->set_brightness = mdfld_dsi_sharp25x16_set_brightness;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/sharp5_cmd.c b/drivers/external_drivers/intel_media/display/tng/drv/sharp5_cmd.c
new file mode 100644
index 0000000..2ee163b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/sharp5_cmd.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Faxing Lu <faxing.lu@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_esd.h"
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "displays/sharp5_cmd.h"
+
+/* The register to control secure I2C FLIS pin */
+#define SECURE_I2C_FLIS_REG	0xFF0C1D30
+
+static int mipi_reset_gpio;
+static int bias_en_gpio;
+
+#define sharp5_remove_nvm_reload 0xd6
+static	u8 sharp5_mcs_column_addr[] = { 0x2a, 0x00, 0x00, 0x04, 0x37 };
+static	u8 sharp5_mcs_page_addr[] = { 0x2b, 0x00, 0x00, 0x07, 0x7f };
+
+static int sharp5_cmd_drv_ic_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int ret;
+	u8 cmd;
+
+	/* exit sleep */
+	cmd = exit_sleep_mode;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0, 0, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+	msleep(120);
+
+	/* unlock MCW */
+	cmd = access_protect;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* reload NVM */
+	cmd = sharp5_remove_nvm_reload;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x1, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* send display brightness */
+	cmd = write_display_brightness;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0xff, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* display control */
+	cmd = write_ctrl_display;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x0c, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* CABC */
+	cmd = 0x55;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* tear on*/
+	cmd = set_tear_on;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* column address */
+	cmd = 0x2a;
+	ret = mdfld_dsi_send_mcs_long_hs(sender, sharp5_mcs_column_addr, 5,
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* page address */
+	cmd = 0x2b;
+	ret = mdfld_dsi_send_mcs_long_hs(sender, sharp5_mcs_page_addr, 5,
+					 MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	DRM_ERROR("failed to send command %#x\n", cmd);
+	return ret;
+}
+
+static void sharp5_cmd_controller_init(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_hw_context *hw_ctx =
+				&dsi_config->dsi_hw_context;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/*reconfig lane configuration*/
+	dsi_config->lane_count = 4;
+	dsi_config->lane_config = MDFLD_DSI_DATA_LANE_4_0;
+	hw_ctx->cck_div = 1;
+	hw_ctx->pll_bypass_mode = 0;
+
+	hw_ctx->mipi_control = 0x0;
+	hw_ctx->intr_en = 0xFFFFFFFF;
+	hw_ctx->hs_tx_timeout = 0xFFFFFF;
+	hw_ctx->lp_rx_timeout = 0xFFFFFF;
+	hw_ctx->device_reset_timer = 0xffff;
+	hw_ctx->turn_around_timeout = 0x14;
+	hw_ctx->high_low_switch_count = 0x2B;
+	hw_ctx->clk_lane_switch_time_cnt =  0x2b0014;
+	hw_ctx->lp_byteclk = 0x6;
+	hw_ctx->dphy_param = 0x2a18681f;
+	hw_ctx->eot_disable = 0x0;
+	hw_ctx->init_count = 0xf0;
+	hw_ctx->dbi_bw_ctrl = 1100;
+	hw_ctx->hs_ls_dbi_enable = 0x0;
+	hw_ctx->dsi_func_prg = ((DBI_DATA_WIDTH_OPT2 << 13) |
+				dsi_config->lane_count);
+
+	hw_ctx->mipi = PASS_FROM_SPHY_TO_AFE |
+			BANDGAP_CHICKEN_BIT |
+		TE_TRIGGER_GPIO_PIN;
+	hw_ctx->video_mode_format = 0xf;
+}
+
+static int
+sharp5_cmd_panel_connection_detect(struct mdfld_dsi_config *dsi_config)
+{
+	int status;
+	int pipe = dsi_config->pipe;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		status = MDFLD_DSI_PANEL_CONNECTED;
+	} else {
+		DRM_INFO("%s: do NOT support dual panel\n", __func__);
+		status = MDFLD_DSI_PANEL_DISCONNECTED;
+	}
+
+	return status;
+}
+
+static int sharp5_cmd_power_on(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int ret;
+	u8 cmd;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	/* address mode */
+	cmd = set_address_mode;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x0, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* pixel format*/
+	cmd = set_pixel_format;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0x77, 1, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	/* display on */
+	cmd = set_display_on;
+	ret = mdfld_dsi_send_mcs_short_hs(sender,
+					  cmd, 0, 0, MDFLD_DSI_SEND_PACKAGE);
+	if (ret)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	DRM_ERROR("failed to send command %#x\n", cmd);
+	return ret;
+}
+
+static void __vpro2_power_ctrl(bool on)
+{
+	u8 addr, value;
+	addr = 0xad;
+	if (intel_scu_ipc_ioread8(addr, &value))
+		DRM_ERROR("%s: %d: failed to read vPro2\n", __func__, __LINE__);
+
+	/* Control vPROG2 power rail with 2.85v. */
+	if (on)
+		value |= 0x1;
+	else
+		value &= ~0x1;
+
+	if (intel_scu_ipc_iowrite8(addr, value))
+		DRM_ERROR("%s: %d: failed to write vPro2\n",
+				__func__, __LINE__);
+}
+
+static int sharp5_cmd_power_off(struct mdfld_dsi_config *dsi_config)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	int err;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_display_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Display Off\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	usleep_range(20000, 20100);
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			set_tear_off, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Tear Off\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+
+	err = mdfld_dsi_send_mcs_short_hs(sender,
+			enter_sleep_mode, 0, 0,
+			MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Enter Sleep Mode\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+
+	msleep(60);
+
+	err = mdfld_dsi_send_gen_short_hs(sender,
+		access_protect, 4, 2,
+		MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Access Protect\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+
+	err = mdfld_dsi_send_gen_short_hs(sender, low_power_mode, 1, 2,
+					  MDFLD_DSI_SEND_PACKAGE);
+	if (err) {
+		DRM_ERROR("%s: %d: Set Low Power Mode\n", __func__, __LINE__);
+		goto power_off_err;
+	}
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 0);
+	usleep_range(1000, 1500);
+
+	return 0;
+power_off_err:
+	err = -EIO;
+	return err;
+}
+
+static int
+sharp5_cmd_set_brightness( struct mdfld_dsi_config *dsi_config, int level)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+		mdfld_dsi_get_pkg_sender(dsi_config);
+	u8 duty_val = 0;
+
+	PSB_DEBUG_ENTRY("level = %d\n", level);
+
+	if (!sender) {
+		DRM_ERROR("Failed to get DSI packet sender\n");
+		return -EINVAL;
+	}
+
+	duty_val = (0xFF * level) / 255;
+	mdfld_dsi_send_mcs_short_hs(sender,
+			write_display_brightness, duty_val, 1,
+			MDFLD_DSI_SEND_PACKAGE);
+	return 0;
+}
+
+static void _get_panel_reset_gpio(void)
+{
+	int ret = 0;
+	if (mipi_reset_gpio == 0) {
+		ret = get_gpio_by_name("disp0_rst");
+		if (ret < 0) {
+			DRM_ERROR("Faild to get panel reset gpio, " \
+				  "use default reset pin\n");
+			return;
+		}
+		mipi_reset_gpio = ret;
+		ret = gpio_request(mipi_reset_gpio, "mipi_display");
+		if (ret) {
+			DRM_ERROR("Faild to request panel reset gpio\n");
+			return;
+		}
+		gpio_direction_output(mipi_reset_gpio, 0);
+	}
+}
+
+static int sharp5_cmd_panel_reset(struct mdfld_dsi_config *dsi_config)
+{
+	int ret = 0;
+	u8 *vaddr = NULL, *vaddr1 = NULL;
+	int reg_value_scl = 0;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	/* Because when reset touchscreen panel, touchscreen will pull i2c bus
+	 * to low, sometime this operation will cause i2c bus enter into wrong
+	 * status, so before reset, switch i2c scl pin */
+	vaddr1 = ioremap(SECURE_I2C_FLIS_REG, 4);
+	reg_value_scl = ioread32(vaddr1);
+	reg_value_scl &= ~0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+
+	__vpro2_power_ctrl(true);
+	usleep_range(2000, 2500);
+
+	if (bias_en_gpio == 0) {
+		bias_en_gpio = 189;
+		ret = gpio_request(bias_en_gpio, "bias_enable");
+		if (ret) {
+			DRM_ERROR("Faild to request bias_enable gpio\n");
+			return -EINVAL;
+		}
+		gpio_direction_output(bias_en_gpio, 0);
+	}
+
+	_get_panel_reset_gpio();
+
+	gpio_direction_output(bias_en_gpio, 0);
+	gpio_direction_output(mipi_reset_gpio, 0);
+	gpio_set_value_cansleep(bias_en_gpio, 0);
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(bias_en_gpio, 1);
+	usleep_range(2000, 2500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(3000, 3500);
+	vaddr = ioremap(0xff0c2d00, 0x60);
+	iowrite32(0x3221, vaddr + 0x1c);
+	usleep_range(2000, 2500);
+	iounmap(vaddr);
+
+	/* switch i2c scl pin back */
+	reg_value_scl |= 0x1000;
+	rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&reg_value_scl, 4,
+					NULL, 0,
+					SECURE_I2C_FLIS_REG, 0);
+	iounmap(vaddr1);
+	return 0;
+}
+
+static int sharp5_cmd_exit_deep_standby(struct mdfld_dsi_config *dsi_config)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (bias_en_gpio)
+		gpio_set_value_cansleep(bias_en_gpio, 1);
+	_get_panel_reset_gpio();
+	gpio_direction_output(mipi_reset_gpio, 0);
+
+	gpio_set_value_cansleep(mipi_reset_gpio, 0);
+	usleep_range(1000, 1500);
+	gpio_set_value_cansleep(mipi_reset_gpio, 1);
+	usleep_range(3000, 3500);
+	return 0;
+}
+
+static struct drm_display_mode *sharp5_cmd_get_config_mode(void)
+{
+	struct drm_display_mode *mode;
+
+	PSB_DEBUG_ENTRY("\n");
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->hdisplay = 1080;
+	mode->hsync_start = 1168;
+	mode->hsync_end = 1200;
+	mode->htotal = 1496;
+
+	mode->vdisplay = 1920;
+	mode->vsync_start = 1923;
+	mode->vsync_end = 1926;
+	mode->vtotal = 1987;
+
+	mode->vrefresh = 60;
+	mode->clock =  mode->vrefresh * mode->vtotal * mode->htotal / 1000;
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static void sharp5_cmd_get_panel_info(int pipe, struct panel_info *pi)
+{
+	PSB_DEBUG_ENTRY("\n");
+
+	if (pipe == 0) {
+		pi->width_mm = 58;
+		pi->height_mm = 103;
+	}
+}
+
+void sharp5_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+	if (!dev || !p_funcs) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+
+	PSB_DEBUG_ENTRY("\n");
+
+	p_funcs->reset = sharp5_cmd_panel_reset;
+	p_funcs->power_on = sharp5_cmd_power_on;
+	p_funcs->power_off = sharp5_cmd_power_off;
+	p_funcs->drv_ic_init = sharp5_cmd_drv_ic_init;
+	p_funcs->get_config_mode = sharp5_cmd_get_config_mode;
+	p_funcs->get_panel_info = sharp5_cmd_get_panel_info;
+	p_funcs->dsi_controller_init = sharp5_cmd_controller_init;
+	p_funcs->detect = sharp5_cmd_panel_connection_detect;
+	p_funcs->set_brightness = sharp5_cmd_set_brightness;
+	p_funcs->exit_deep_standby = sharp5_cmd_exit_deep_standby;
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/tng_wa.c b/drivers/external_drivers/intel_media/display/tng/drv/tng_wa.c
new file mode 100755
index 0000000..cdc54cd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/tng_wa.c
@@ -0,0 +1,235 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#include "tng_wa.h"
+
+/* psb_intel_reg.h - for BIT* definitions */
+#include "psb_intel_reg.h"
+
+/* TNG A0 Workarounds */
+
+static void apply_HSD_4568161_4582997(struct drm_device *dev)
+{
+	/* HSD - 4568161: Local idle gating on SLC core clock
+	   causes SLC to drop video mem request if clock is
+	   heavily Throttling
+	   Workaround: SW should set GFX_CG_DIS_0[8] at offset
+	   0x160000 whenever powering up GFX */
+	/* HSD 4582997: Trunk and idle clock gating must be disabled
+	   Workaround: The driver should set GFX_CG_DIS_0[1:0] = 2'b11
+	   at MMADR offset 0x160000 */
+
+	uint32_t GFX_CG_DIS_0_OFFSET = 0x160000 - GFX_WRAPPER_OFFSET;
+	uint32_t GFX_CG_DIS_0_DATA = WRAPPER_REG_READ(GFX_CG_DIS_0_OFFSET);
+
+	GFX_CG_DIS_0_DATA |= (BIT0 | BIT1 | BIT8);
+
+	WRAPPER_REG_WRITE(GFX_CG_DIS_0_OFFSET, GFX_CG_DIS_0_DATA);
+}
+
+static void apply_HSD_3940227_4568479(struct drm_device *dev)
+{
+	/* HSD - 3940227: SLC wrapper must support breaking 4K
+	   crossing VSP brusts on SLC path for IMG sepc compliancy
+	   Workaround: Read-Modify-Write to set GBYPASSENABLE[2]
+	   and GBYPASSENABLE[8] on same write at MMADR offset
+	   0x2850 before powering up VSP */
+	/**
+	 * HSD - 4568479: VED issues write fence cache hint incorrectly
+	 * Workaround: Read-Modify-Write to set GBYPASSENABLE[0] and
+	 * GBYPASSENABLE[8] on same write at MMADR offset 0x2850
+	 * before powering up VSP
+	 */
+	uint32_t GBYPASSENABLE_OFFSET = 0x2850 - PSB_VDC_OFFSET;
+	uint32_t GBYPASSENABLE_DATA = REG_READ(GBYPASSENABLE_OFFSET);
+
+	GBYPASSENABLE_DATA |= (BIT0 | BIT2 | BIT8);
+
+	REG_WRITE(GBYPASSENABLE_OFFSET, GBYPASSENABLE_DATA);
+}
+
+#if (defined HSD_4568152) && HSD_4568152
+/* Not needed if apply 3940227 and 4568479 */
+static void apply_HSD_4568152(struct drm_device *dev)
+{
+	/* HSD - 4568152: VP8 test fail with SLC due to bad cache policy
+	   Workaround: SW must set RGX_CR_SLC_CTRL_BYPASS[6] at
+	   MMADR offset 0x103828 before powering up VED for VP8 decode */
+	uint32_t RGX_CR_SLC_CTRL_BYPASS_OFFSET = 0x103828 - RGX_OFFSET;
+	uint32_t RGX_CR_SLC_CTRL_BYPASS_DATA = RGX_REG_READ(
+					RGX_CR_SLC_CTRL_BYPASS_OFFSET);
+
+	RGX_CR_SLC_CTRL_BYPASS_DATA |= BIT6;
+
+	RGX_REG_WRITE(RGX_CR_SLC_CTRL_BYPASS_OFFSET,
+			RGX_CR_SLC_CTRL_BYPASS_DATA);
+}
+#endif
+
+static void apply_HSD_4568473(struct drm_device *dev)
+{
+	/* HSD 4568473: PFI credits
+	   Workaround: GCLIP_CONTROL[19:16] should be programmed to
+	   4'b1110 and GCILP_CONTROL[23] should be programmed to
+	   1'b1 on same write (MMADDR offset 0x160020) */
+
+	int GCLIP_CONTROL_OFFSET = 0x160020 - GFX_WRAPPER_OFFSET;
+	int GCLIP_CONTROL_DATA = WRAPPER_REG_READ(GCLIP_CONTROL_OFFSET);
+
+	GCLIP_CONTROL_DATA &= ~(BIT16);
+	GCLIP_CONTROL_DATA |= (BIT23 | BIT19 | BIT18 | BIT17);
+
+	WRAPPER_REG_WRITE(GCLIP_CONTROL_OFFSET, GCLIP_CONTROL_DATA);
+}
+
+static void apply_HSD_4582616(struct drm_device *dev)
+{
+	/* HSD 45682616: czclk remains gated for ~5us after pipeA
+	   framestart in S0i1-display mode, could lead to underflows
+	   Workaround: offset 70404[5] must be set to '1' when
+	   Display is powered on */
+
+	uint32_t DISPLAY_OFFSET = 0x70404 - PSB_VDC_OFFSET;
+	uint32_t DISPLAY_DATA = REG_READ(DISPLAY_OFFSET);
+
+	DISPLAY_DATA |= BIT5;
+
+	REG_WRITE(DISPLAY_OFFSET, DISPLAY_DATA);
+}
+
+#if (defined NO_HSD_WORKAROUND) && NO_HSD_WORKAROUND
+/* No need as this is default setting of HW */
+static void apply_NO_HSD_Workaround(struct drm_device *dev)
+{
+	/* HSD (Not specified): SLC brust disable
+	   Workaround: RGX_CR_SLC_CTRL_MISC[0] at MMADR
+	   offset 0x103800 must be programmed to 1'b1 */
+	uint32_t RGX_CR_SLC_CTRL_MISC_OFFSET = 0x103800 - RGX_OFFSET;
+	uint32_t RGX_CR_SLC_CTRL_MISC_DATA = RGX_REG_READ(
+					RGX_CR_SLC_CTRL_MISC_OFFSET);
+
+	RGX_CR_SLC_CTRL_MISC_DATA |= BIT0;
+
+	RGX_REG_WRITE(RGX_CR_SLC_CTRL_MISC_OFFSET,
+			RGX_CR_SLC_CTRL_MISC_DATA);
+}
+#endif
+
+/* Apply the TNG A0 Workaround */
+void apply_TNG_A0_workarounds(int islands, int pre_po)
+{
+	struct drm_device *dev = gpDrmDevice;
+
+	if (!dev)
+		return;
+
+	/* Only apply workaround on power up. */
+
+	switch (islands) {
+	case OSPM_DISPLAY_ISLAND:
+		/*  When display is powered-on. */
+		if (!pre_po)
+			apply_HSD_4582616(dev);
+		break;
+
+	case OSPM_GRAPHICS_ISLAND:
+		/* Before GFX is powered-on. */
+		if (pre_po) {
+			apply_HSD_4568161_4582997(dev);
+			apply_HSD_4568473(dev);
+		}
+		break;
+
+	case OSPM_VIDEO_ENC_ISLAND:
+	case OSPM_VIDEO_VPP_ISLAND:
+	case OSPM_VIDEO_DEC_ISLAND:
+		/*  Before powering up VED for VP8 decode */
+		if (pre_po)
+			apply_HSD_3940227_4568479(dev);
+		break;
+
+	default:
+		break;
+	}
+}
+
+/* ANN A0 Workarounds */
+
+static void apply_HSD_4613012_5129793(struct drm_device *dev)
+{
+	/* HSD - 4613012: CLONE of TNG B0 4585013
+	   GCLIP_CONTROL is rest to incorrect default values
+	   HSD - 5129793: GFX wrapper RTL initializing PFI Credits
+	   to 16 instead of 25
+	   Workaround: program following bits when GFX is powerd on or
+	   coming out of reset:
+	   GCLIP_CONTROL[8] to 1'b1
+	   GCLIP_CONTROL[20:16] to 5'h19
+	   GCILP_CONTROL[23] to 1'b1
+	   (MMADDR offset 0x160020) */
+
+	int GCLIP_CONTROL_OFFSET = 0x160020 - GFX_WRAPPER_OFFSET;
+	int GCLIP_CONTROL_DATA = WRAPPER_REG_READ(GCLIP_CONTROL_OFFSET);
+
+	GCLIP_CONTROL_DATA &= ~(BIT18 | BIT17);
+	GCLIP_CONTROL_DATA |= (BIT23 | BIT20 | BIT19 | BIT16 | BIT8);
+
+	WRAPPER_REG_WRITE(GCLIP_CONTROL_OFFSET, GCLIP_CONTROL_DATA);
+}
+
+/* Apply the ANN A0 Workaround */
+void apply_ANN_A0_workarounds(int islands, int pre_po)
+{
+	struct drm_device *dev = gpDrmDevice;
+
+	if (!dev)
+		return;
+
+	/* Only apply workaround on power up. */
+
+	switch (islands) {
+	case OSPM_DISPLAY_ISLAND:
+		/*  When display is powered-on. */
+		break;
+
+	case OSPM_GRAPHICS_ISLAND:
+		/* Before GFX is powered-on. */
+		if (pre_po) {
+			apply_HSD_4613012_5129793(dev);
+		}
+		break;
+
+	case OSPM_VIDEO_ENC_ISLAND:
+	case OSPM_VIDEO_VPP_ISLAND:
+	case OSPM_VIDEO_DEC_ISLAND:
+		/*  Before powering up VED for VP8 decode */
+		break;
+
+	default:
+		break;
+	}
+}
diff --git a/drivers/external_drivers/intel_media/display/tng/drv/tng_wa.h b/drivers/external_drivers/intel_media/display/tng/drv/tng_wa.h
new file mode 100644
index 0000000..6daccb6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/drv/tng_wa.h
@@ -0,0 +1,41 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ */
+
+#ifndef _TNG_WORKAROUNDS_H_
+#define _TNG_WORKAROUNDS_H_
+
+#include "psb_drv.h"
+
+extern struct drm_device *gpDrmDevice;
+
+/* Apply the TNG A0 Workaround */
+void apply_TNG_A0_workarounds(int islands, int pre_po);
+
+/* Apply the ANN A0 Workaround */
+void apply_ANN_A0_workarounds(int islands, int pre_po);
+
+#endif	/* _TNG_WORKAROUNDS_H_ */
diff --git a/drivers/external_drivers/intel_media/display/tng/fbdrv/Makefile b/drivers/external_drivers/intel_media/display/tng/fbdrv/Makefile
new file mode 100644
index 0000000..462260c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/fbdrv/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MRFL_DISPLAY) += mrfl_display_controller.o
+
diff --git a/drivers/external_drivers/intel_media/display/tng/fbdrv/mrfl_display_controller.c b/drivers/external_drivers/intel_media/display/tng/fbdrv/mrfl_display_controller.c
new file mode 100644
index 0000000..e8ad07d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/fbdrv/mrfl_display_controller.c
@@ -0,0 +1,420 @@
+/*
+ * Merrifield Display Controller Driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ * Author: Mark F. Brown <mark.f.brown@intel.com>
+ * Author: Joel Rosenzweig <joel.b.rosenzweig@intel.com>
+ * This code is based from goldfish_fb.c from Google Android
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include "mrfl_display_controller.h"
+
+#define DRV_NAME "Merrifield Display Controller"
+
+struct merrifield_fb {
+	void __iomem *io_virt;
+	uint32_t reg_base;
+	int rotation;
+	struct fb_info fb;
+	u32 cmap[16];
+};
+
+static struct fb_fix_screeninfo merrifield_fb_fix = {
+	.id = "mrfl_fb",
+	.type = FB_TYPE_PACKED_PIXELS,
+	.ypanstep = 1,
+	.visual = FB_VISUAL_PSEUDOCOLOR,
+	.accel = FB_ACCEL_NONE,
+};
+
+void __iomem *lcd_io_virt_global;
+
+static inline u32 convert_bitfield(int val, struct fb_bitfield *bf)
+{
+	unsigned int mask = (1 << bf->length) - 1;
+
+	return (val >> (16 - bf->length) & mask) << bf->offset;
+}
+
+static int merrifield_fb_setcolreg(unsigned int regno, unsigned int red,
+				   unsigned int green, unsigned int blue,
+				   unsigned int transp, struct fb_info *info)
+{
+	struct merrifield_fb *fb = container_of(info,
+						struct merrifield_fb, fb);
+
+	if (regno < 16) {
+		fb->cmap[regno] = convert_bitfield(transp, &fb->fb.var.transp) |
+		    convert_bitfield(blue, &fb->fb.var.blue) |
+		    convert_bitfield(green, &fb->fb.var.green) |
+		    convert_bitfield(red, &fb->fb.var.red);
+		return 0;
+	} else
+		return 1;
+
+}
+
+static int merrifield_fb_check_var(struct fb_var_screeninfo *var,
+				   struct fb_info *info)
+{
+	if ((var->rotate & 1) != (info->var.rotate & 1)) {
+		if ((var->xres != info->var.yres) ||
+		    (var->yres != info->var.xres) ||
+		    (var->xres_virtual != info->var.yres) ||
+		    (var->yres_virtual > info->var.xres * 2) ||
+		    (var->yres_virtual < info->var.xres)) {
+			return -EINVAL;
+		}
+	} else {
+		if ((var->xres != info->var.xres) ||
+		    (var->yres != info->var.yres) ||
+		    (var->xres_virtual != info->var.xres) ||
+		    (var->yres_virtual > info->var.yres * 2) ||
+		    (var->yres_virtual < info->var.yres)) {
+			return -EINVAL;
+		}
+	}
+
+	if ((var->xoffset != info->var.xoffset) ||
+	    (var->bits_per_pixel != info->var.bits_per_pixel) ||
+	    (var->grayscale != info->var.grayscale)) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int merrifield_fb_set_par(struct fb_info *info)
+{
+	struct merrifield_fb *fb = container_of(info,
+						struct merrifield_fb, fb);
+
+	if (fb->rotation != fb->fb.var.rotate) {
+		info->fix.line_length = info->var.xres * 2;
+		fb->rotation = fb->fb.var.rotate;
+	}
+
+	return 0;
+}
+
+static int merrifield_fb_pan_display(struct fb_var_screeninfo *var,
+				     struct fb_info *info)
+{
+	/* TODO convert magic numbers to macro definitions */
+	if (var->yoffset == 0)
+		/* surface address register */
+		iowrite32(0x0, lcd_io_virt_global + 0x7019C);
+	else
+		/* surface address register */
+		iowrite32(var->xres * (var->bits_per_pixel / 8) * var->yres,
+			  lcd_io_virt_global + 0x7019C);
+
+	return 0;
+}
+
+static struct fb_ops merrifield_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = merrifield_fb_check_var,
+	.fb_set_par = merrifield_fb_set_par,
+	.fb_setcolreg = merrifield_fb_setcolreg,
+	.fb_pan_display = merrifield_fb_pan_display,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+};
+
+static void init_display_controller_registers(void __iomem *io_virt)
+{
+
+	int width = H_ACTIVE;
+	int height = V_ACTIVE;
+	int viewport_width = H_ACTIVE;
+	int viewport_height = V_ACTIVE;
+
+	/* TODO convert magic numbers to macro definitions */
+	/* Programming LNC CRC Registers */
+	iowrite32(0x80135937, io_virt + 0x60050);
+	iowrite32(0x0048efae, io_virt + 0x60054);
+	iowrite32(0x004ad4cb, io_virt + 0x60058);
+	iowrite32(0x0, io_virt + 0x6005c);
+	/* Program clocks and check for clock lock */
+	/* Program DSI PLL */
+	iowrite32(0x0, io_virt + 0x0F014);
+	iowrite32(0x000000c1, io_virt + 0xF040);
+	iowrite32(0x00800000, io_virt + 0xF014);
+	iowrite32(0x80800000, io_virt + 0xF014);
+	iowrite32(0x0, io_virt + 0x62190);
+	/* Enable MIPI port */
+	iowrite32(0x80810000, io_virt + 0x61190);
+	iowrite32(0x270f04, io_virt + 0x61210);
+	/* MIPI DPHY PARAM REG X */
+	iowrite32(0xb14540c, io_virt + 0xB080);
+	/* Data lanes - 2 RGB 888 X */
+	iowrite32((RGB888 << 7) | 0x12, io_virt + 0xB00c);
+	/* Video mode - Burst mode X */
+	iowrite32(0x5, io_virt + 0xB058);
+	/* MIPI control register X */
+	iowrite32(0x18, io_virt + 0xB104);
+	/* Interrupt enable X */
+	iowrite32(0xffffffff, io_virt + 0xB008);
+	/* MIPI HS-TX timeout reg X */
+	iowrite32(0x3fffff, io_virt + 0xB010);
+	/* LP reception timeout reg X */
+	iowrite32(0xffff, io_virt + 0xB014);
+	/* Turnaround timeout X */
+	iowrite32(0x1f, io_virt + 0xB018);
+	/* Reset timeout X */
+	iowrite32(0xff, io_virt + 0xB01c);
+	/* HS to LP timeout reg X */
+	iowrite32(0x46, io_virt + 0xB044);
+	/* MIPI Clock lane switching time count */
+	iowrite32(0xa0014, io_virt + 0xB088);
+	/* DBI bandwidth control register */
+	iowrite32(0x400, io_virt + 0xB084);
+	/* Master_init_timer X */
+	iowrite32(0x7d0, io_virt + 0xB050);
+	/* Disable clock stopping X */
+	iowrite32(0x0, io_virt + 0xB05C);
+	/* LP Byte clock X */
+	iowrite32(0x4, io_virt + 0xB060);
+	/* DPI resolution X */
+	iowrite32((height << 16) | width, io_virt + 0xB020);
+	/* Horizontal sync padding X */
+	iowrite32(0x4, io_virt + 0xB028);
+	/* Horizontal back porch X */
+	iowrite32(0xe, io_virt + 0xB02c);
+	/* Horizontal Front porch X */
+	iowrite32(0x8, io_virt + 0xB030);
+	/* Horizontal active area count X */
+	iowrite32(0x2d0, io_virt + 0xB034);
+	/* Vertical Sync padding X */
+	iowrite32(0x4, io_virt + 0xB038);
+	/* Vertical back porch X */
+	iowrite32(0x8, io_virt + 0xB03c);
+	/* Vertical front porch X */
+	iowrite32(0x7, io_virt + 0xB040);
+	/* Turn on DPI */
+	iowrite32(0x1, io_virt + 0xB000);
+	/* Turn on DPI Control register */
+	iowrite32(0x2, io_virt + 0xB048);
+	/* Programming Pipe A */
+	iowrite32((((H_ACTIVE + 80 - 1) << 16) | (H_ACTIVE - 1)),
+		  io_virt + 0x60000);
+	iowrite32((((H_ACTIVE + 80 - 1) << 16) | (H_ACTIVE - 1)),
+		  io_virt + 0x60004);
+	iowrite32(((H_ACTIVE + 48 - 1) << 16) | (H_ACTIVE + 8 - 1),
+		  io_virt + 0x60008);
+	iowrite32((((V_ACTIVE + 5) << 16) | (V_ACTIVE - 1)), io_virt + 0x6000C);
+	iowrite32((((V_ACTIVE + 5) << 16) | (V_ACTIVE - 1)), io_virt + 0x60010);
+	iowrite32((((V_ACTIVE + 2) << 16) | (V_ACTIVE + 1)), io_virt + 0x60014);
+	iowrite32((((H_ACTIVE - 1) << 16) | (V_ACTIVE - 1)), io_virt + 0x6001C);
+	iowrite32(0x7b1dffff, io_virt + 0x70500);
+	iowrite32(0x6c000000, io_virt + 0x70504);
+	iowrite32(0x0, io_virt + 0x701D0);
+	iowrite32(0x0, io_virt + 0x701D4);
+	/* 0x5 == RGB565 */
+	/* 0xa == RGB888 24-bit RGB no Alpha */
+	/* 0xf == RGBA8888 32-bit RGB */
+	/* Enable Display Sprite A */
+	iowrite32(0x80000000 | (0x5 << 26), io_virt + 0x70180);
+	/* Stride */
+	iowrite32(0x00000780, io_virt + 0x70188);
+	/* Linear offset register */
+	iowrite32(0x0, io_virt + 0x70184);
+	/* Position */
+	iowrite32(0x0, io_virt + 0x7018C);
+	/* Width and height X */
+	iowrite32(((viewport_height - 1) << 16) | (viewport_width - 1),
+		  io_virt + 0x70190);
+	/* Surface address register */
+	iowrite32(0x0, io_virt + 0x7019C);
+	/* Disable VGA plane */
+	iowrite32(0x80000000, io_virt + 0x71400);
+	/* Pipe A Enable */
+	iowrite32(0x80000000, io_virt + 0x70008);
+	/* Pipe A Status register */
+	iowrite32(0xb000ffff, io_virt + 0x70024);
+
+}
+
+static int merrifield_fb_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *id)
+{
+	int err;
+	void __iomem *io_virt;
+	struct fb_info *info;
+	int fb_base = 0;
+	int width = H_ACTIVE;
+	int height = V_ACTIVE;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable device!\n");
+		goto failure;
+	}
+
+	info = framebuffer_alloc(sizeof *info, &pdev->dev);
+	if (!info) {
+		dev_err(&pdev->dev, "framebuffer allocation failure.\n");
+		goto failure;
+	}
+
+	merrifield_fb_fix.mmio_start = pci_resource_start(pdev, 0);
+	merrifield_fb_fix.mmio_len = pci_resource_len(pdev, 0);
+
+	if (!request_mem_region(merrifield_fb_fix.mmio_start,
+				merrifield_fb_fix.mmio_len, DRV_NAME)) {
+		dev_err(&pdev->dev, "mmio request_mem_region failure!\n");
+		goto failure;
+	}
+
+	info->par = kzalloc(sizeof *info->par, GFP_KERNEL);
+	if (info->par == NULL) {
+		dev_err(&pdev->dev, "failed to allocate info->par\n");
+		goto failure;
+	}
+	lcd_io_virt_global = io_virt =
+	    ioremap_nocache(merrifield_fb_fix.mmio_start,
+			    merrifield_fb_fix.mmio_len);
+
+	pci_read_config_dword(pdev, 0x5C, &fb_base);
+
+	/* Allocate enough for up 2 x 16-bit frame buffers at
+	 * our given resolution which is used for double buffering */
+	merrifield_fb_fix.smem_start = fb_base;
+	merrifield_fb_fix.smem_len = H_ACTIVE * V_ACTIVE * 4;
+
+	info->screen_base = ioremap_nocache(merrifield_fb_fix.smem_start,
+					    merrifield_fb_fix.smem_len);
+	info->fix = merrifield_fb_fix;
+	info->fbops = &merrifield_fb_ops;
+	info->flags =
+	    FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED | FBINFO_FLAG_DEFAULT;
+	strcat(info->fix.id, DRV_NAME);
+	info->var.activate = FB_ACTIVATE_NOW;
+	info->device = &pdev->dev;
+
+	err = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (err < 0) {
+		dev_err(&pdev->dev, "cmap allocation failure.\n");
+		goto failure;
+	}
+
+	/* RGB 5:6:5 */
+	info->pseudo_palette = &info->cmap;
+	info->cmap.len = 16;
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = FB_VISUAL_TRUECOLOR;
+	info->fix.line_length = width * 2;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.ypanstep = 1;
+	info->fix.smem_start = merrifield_fb_fix.smem_start;
+	info->fix.smem_len = merrifield_fb_fix.smem_len;
+	info->var.xres = width;
+	info->var.yres = height;
+	info->var.xres_virtual = width;
+	info->var.yres_virtual = height * 2;
+	info->var.bits_per_pixel = 16;
+	info->var.height = height;
+	info->var.width = width;
+	info->var.red.offset = 11;
+	info->var.red.length = 5;
+	info->var.green.offset = 5;
+	info->var.green.length = 6;
+	info->var.blue.offset = 0;
+	info->var.blue.length = 5;
+
+	err = fb_set_var(info, &info->var);
+	if (err) {
+		dev_err(&pdev->dev, "error setting var info\n");
+		goto failure;
+	}
+
+	info->pixmap.addr = kmalloc(4096, GFP_KERNEL);
+	if (!info->pixmap.addr) {
+		dev_err(&pdev->dev, "pixmap allocation failure\n");
+		goto failure;
+	}
+
+	info->pixmap.size = 4096;
+	info->pixmap.buf_align = 4;
+	info->pixmap.scan_align = 1;
+	info->pixmap.access_align = 32;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+	pci_set_drvdata(pdev, info);
+	platform_set_drvdata(pdev, info);
+	if (register_framebuffer(info) < 0) {
+		dev_err(&pdev->dev, "could not register framebuffer\n");
+		goto failure;
+	}
+
+	init_display_controller_registers(io_virt);
+
+	return 0;
+
+ failure:
+	/* TODO clean-up routine */
+	BUG();
+}
+
+static void merrifield_fb_remove(struct pci_dev *pdev)
+{
+	/* TODO add teardown routine */
+	BUG();
+}
+
+static DEFINE_PCI_DEVICE_TABLE(merrifield_fb_devices) = {
+	{
+	PCI_VENDOR_ID_INTEL, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	0,}
+};
+
+MODULE_DEVICE_TABLE(pci, merrifield_fb_devices);
+
+static struct pci_driver merrifield_fb_driver = {
+	.name = DRV_NAME,
+	.id_table = merrifield_fb_devices,
+	.probe = merrifield_fb_probe,
+	.remove = merrifield_fb_remove,
+};
+
+static int __init merrifield_fb_init(void)
+{
+	return pci_register_driver(&merrifield_fb_driver);
+}
+
+static void __exit merrifield_fb_exit(void)
+{
+	pci_unregister_driver(&merrifield_fb_driver);
+}
+
+module_init(merrifield_fb_init);
+module_exit(merrifield_fb_exit);
diff --git a/drivers/external_drivers/intel_media/display/tng/fbdrv/mrfl_display_controller.h b/drivers/external_drivers/intel_media/display/tng/fbdrv/mrfl_display_controller.h
new file mode 100644
index 0000000..3b387eb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/fbdrv/mrfl_display_controller.h
@@ -0,0 +1,9 @@
+#ifndef __MRFL_DISPLAY_CONTROLLER_H
+#define __MRFL_DISPLAY_CONTROLLER_H
+
+#define H_ACTIVE 480
+#define V_ACTIVE 800
+#define RGB888 0x4
+#define RGB565 0x1
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/interface/bufferclass_interface.h b/drivers/external_drivers/intel_media/display/tng/interface/bufferclass_interface.h
new file mode 100644
index 0000000..e4f41e0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/interface/bufferclass_interface.h
@@ -0,0 +1,45 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#ifndef __BC_EXPORT_H__
+#define __BC_EXPORT_H__
+
+#include <linux/ioctl.h>
+#include <drm/drmP.h>
+
+struct psb_fpriv {
+	int bcd_index;
+	struct ttm_object_file *tfile;
+};
+
+int BCVideoDestroyBuffers(int id);
+int BCVideoModInit(void);
+int BCVideoModCleanup(void);
+void BCVideoQueryIoctls(struct drm_ioctl_desc *ioctls);
+
+struct psb_fpriv *BCVideoGetPriv(struct drm_file *file);
+void BCVideoSetPriv(struct drm_file *file, void *fpriv);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/interface/bufferclass_video_linux.h b/drivers/external_drivers/intel_media/display/tng/interface/bufferclass_video_linux.h
new file mode 100644
index 0000000..36be669
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/interface/bufferclass_video_linux.h
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#ifndef __BC_VIDEO_LINUX_H__
+#define __BC_VIDEO_LINUX_H__
+
+#include <linux/ioctl.h>
+
+#define BC_FOURCC(a, b, c, d) \
+	((unsigned long) ((a) | (b)<<8 | (c)<<16 | (d)<<24))
+
+#define BC_PIX_FMT_NV12     BC_FOURCC('N', 'V', '1', '2')	/*YUV 4:2:0 */
+#define BC_PIX_FMT_UYVY     BC_FOURCC('U', 'Y', 'V', 'Y')	/*YUV 4:2:2 */
+#define BC_PIX_FMT_YUYV     BC_FOURCC('Y', 'U', 'Y', 'V')	/*YUV 4:2:2 */
+#define BC_PIX_FMT_RGB565   BC_FOURCC('R', 'G', 'B', 'P')	/*RGB 5:6:5 */
+
+int FillBuffer(unsigned int uiBufferIndex);
+
+typedef struct BC_Video_ioctl_package_TAG {
+	int ioctl_cmd;
+	int device_id;
+	int inputparam;
+	int outputparam;
+} BC_Video_ioctl_package;
+
+typedef struct bc_buf_ptr {
+	unsigned int index;
+	int size;
+	unsigned long pa;
+	unsigned long handle;
+} bc_buf_ptr_t;
+
+#define BC_Video_ioctl_fill_buffer              0
+#define BC_Video_ioctl_get_buffer_count         1
+/*get physical address by index */
+#define BC_Video_ioctl_get_buffer_phyaddr       2
+/*get index by physical address */
+#define BC_Video_ioctl_get_buffer_index         3
+#define BC_Video_ioctl_request_buffers          4
+#define BC_Video_ioctl_set_buffer_phyaddr       5
+#define BC_Video_ioctl_release_buffer_device    6
+
+#define BC_Video_ioctl_alloc_buffer             7
+#define BC_Video_ioctl_free_buffer              8
+#define BC_Video_ioctl_get_buffer_handle        9
+
+int BC_Camera_Bridge(BC_Video_ioctl_package *psBridge, unsigned long pAddr);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/interface/display_callbacks.h b/drivers/external_drivers/intel_media/display/tng/interface/display_callbacks.h
new file mode 100755
index 0000000..f8c2282
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/interface/display_callbacks.h
@@ -0,0 +1,96 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#ifndef __DC_CALLBACKS_H__
+#define __DC_CALLBACKS_H__
+
+#include <drm/drmP.h>
+#include <displayclass_interface.h>
+#include "img_types.h"
+#include "psb_drm.h"
+
+struct psb_framebuffer;
+
+typedef int (*pfn_vsync_handler) (struct drm_device *dev, int pipe);
+
+void DCCBGetFramebuffer(struct drm_device *dev, struct psb_framebuffer **);
+int DCChangeFrameBuffer(struct drm_device *dev,
+			struct psb_framebuffer *psbfb);
+int DCCBEnableVSyncInterrupt(struct drm_device *dev, int pipe);
+void DCCBDisableVSyncInterrupt(struct drm_device *dev, int pipe);
+void DCCBInstallVSyncISR(struct drm_device *dev,
+			 pfn_vsync_handler pVsyncHandler);
+void DCCBUninstallVSyncISR(struct drm_device *dev);
+void DCCBFlipToSurface(struct drm_device *dev,
+				unsigned long uiAddr,
+				unsigned long uiFormat,
+				unsigned long uiStride,
+		       unsigned int pipeflag);
+void DCCBFlipOverlay(struct drm_device *dev,
+			struct intel_dc_overlay_ctx *ctx);
+void DCCBFlipSprite(struct drm_device *dev,
+			struct intel_dc_sprite_ctx *ctx);
+void DCCBFlipPrimary(struct drm_device *dev,
+			struct intel_dc_primary_ctx *ctx);
+void DCCBFlipCursor(struct drm_device *dev,
+			struct intel_dc_cursor_ctx *ctx);
+void DCCBSetupZorder(struct drm_device *dev,
+			struct intel_dc_plane_zorder *zorder,
+			int pipe);
+void DCCBWaitForDbiFifoEmpty(struct drm_device *dev, int pipe);
+void DCCBAvoidFlipInVblankInterval(struct drm_device *dev, int pipe);
+int DCCBUpdateDbiPanel(struct drm_device *dev, int pipe);
+int DCCBOverlayDisableAndWait(struct drm_device *dev, u32 ctx,
+			int index);
+int DCCBOverlayEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled);
+int DCCBSpriteEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled);
+int DCCBPrimaryEnable(struct drm_device *dev, u32 ctx,
+			int index, int enabled);
+int DCCBCursorDisable(struct drm_device *dev, int index);
+void DCCBFlipDSRCb(struct drm_device *dev);
+void DCCBUnblankDisplay(struct drm_device *dev);
+int DCCBgttMapMemory(struct drm_device *dev,
+		     unsigned int hHandle,
+		     unsigned int ui32TaskId,
+		     IMG_SYS_PHYADDR *pPages,
+		     unsigned int ui32PagesNum, unsigned int *ui32Offset);
+int DCCBgttUnmapMemory(struct drm_device *dev,
+		       unsigned int hHandle, unsigned int ui32TaskId);
+int DCCBgttCleanupMemoryOnTask(struct drm_device *dev,
+				unsigned int ui32TaskId);
+bool DCChangeSwapChainProperty(unsigned long *psSwapChainGTTOffset,
+			int pipe);
+u32 DCCBGetPipeCount(void);
+void DCCBSetPipeToOvadd(u32 *ovadd, int pipe);
+bool DCCBIsSuspended(struct drm_device *dev);
+int DCCBIsPipeActive(struct drm_device *dev, int pipe);
+
+void DCCBDsrForbid(struct drm_device *dev, int pipe);
+void DCCBDsrAllow(struct drm_device *dev, int pipe);
+int DCCBUpdateCursorPos(struct drm_device *dev, int pipe, uint32_t pos);
+int DCCBDumpPipeStatus(struct drm_device *dev, int pipe);
+#endif				/* __DC_CALLBACKS_H__ */
diff --git a/drivers/external_drivers/intel_media/display/tng/interface/displayclass_interface.h b/drivers/external_drivers/intel_media/display/tng/interface/displayclass_interface.h
new file mode 100755
index 0000000..787a48a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/interface/displayclass_interface.h
@@ -0,0 +1,79 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#ifndef __DC_INTERFACE_H__
+#define __DC_INTERFACE_H__
+
+/*NOTE: this file is exported to user mode*/
+
+#ifdef __KERNEL__
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "ttm/ttm_bo_driver.h"
+#include <ttm/ttm_bo_api.h>
+
+/*
+ * TODO: remove these macros as we don't need swapchains anymore
+ */
+#define PVRSRV_SWAPCHAIN_ATTACHED_PLANE_NONE (0 << 0)
+#define PVRSRV_SWAPCHAIN_ATTACHED_PLANE_A    (1 << 0)
+#define PVRSRV_SWAPCHAIN_ATTACHED_PLANE_B    (1 << 1)
+#define PVRSRV_SWAPCHAIN_ATTACHED_PLANE_C    (1 << 2)
+
+/*
+ * TODO: move this definition back to psb_fb.h
+ * This is NOT a part of IMG display class
+ */
+struct psb_framebuffer {
+	struct drm_framebuffer base;
+	struct address_space *addr_space;
+	struct ttm_buffer_object *bo;
+	struct fb_info *fbdev;
+	uint32_t tt_pages;
+	uint32_t stolen_base;
+	void *vram_addr;
+	/* struct ttm_bo_kmap_obj kmap; */
+	void *hKernelMemInfo;
+	uint32_t depth;
+	uint32_t size;
+	uint32_t offset;
+	uint32_t user_virtual_addr;  /* user space address */
+};
+
+#endif /*__KERNEL__*/
+
+void DCAttachPipe(uint32_t uiPipe);
+void DCUnAttachPipe(uint32_t uiPipe);
+void DC_MRFLD_onPowerOn(uint32_t iPipe);
+void DC_MRFLD_onPowerOff(uint32_t iPipe);
+int DC_MRFLD_Enable_Plane(int type, int index, uint32_t ctx);
+int DC_MRFLD_Disable_Plane(int type, int index, uint32_t ctx);
+bool DC_MRFLD_Is_Plane_Disabled(int type, int index, uint32_t ctx);
+void DCLockMutex(void);
+void DCUnLockMutex(void);
+int DCUpdateCursorPos(uint32_t pipe, uint32_t pos);
+
+#endif				/* __DC_INTERFACE_H__ */
diff --git a/drivers/external_drivers/intel_media/display/tng/interface/drm_shared.h b/drivers/external_drivers/intel_media/display/tng/interface/drm_shared.h
new file mode 100644
index 0000000..6bbf686
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/interface/drm_shared.h
@@ -0,0 +1,102 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <img_types.h>
+
+#ifndef _DRM_SHARED_H_
+#define _DRM_SHARED_H_
+
+
+/* Controlling the kernel modesetting buffers */
+
+#define DRM_PSB_KMS_OFF		0x00
+#define DRM_PSB_KMS_ON		0x01
+#define DRM_PSB_VT_LEAVE        0x02
+#define DRM_PSB_VT_ENTER        0x03
+#define DRM_PSB_EXTENSION       0x06
+#define DRM_PSB_SIZES           0x07
+#define DRM_PSB_FUSE_REG	0x08
+#define DRM_PSB_VBT		0x09
+#define DRM_PSB_DC_STATE	0x0A
+#define DRM_PSB_ADB		0x0B
+#define DRM_PSB_MODE_OPERATION	0x0C
+#define DRM_PSB_STOLEN_MEMORY	0x0D
+#define DRM_PSB_REGISTER_RW	0x0E
+#define DRM_PSB_GTT_MAP         0x0F
+#define DRM_PSB_GTT_UNMAP       0x10
+#define DRM_PSB_GETPAGEADDRS	0x11
+/**
+ * NOTE: Add new commands here, but increment
+ * the values below and increment their
+ * corresponding defines where they're
+ * defined elsewhere.
+ */
+#define DRM_PVR_RESERVED1	0x12
+#define DRM_PVR_RESERVED2	0x13
+#define DRM_PVR_RESERVED3	0x14
+#define DRM_PVR_RESERVED4	0x15
+#define DRM_PVR_RESERVED5	0x16
+
+#define DRM_PSB_HIST_ENABLE	0x17
+#define DRM_PSB_HIST_STATUS	0x18
+#define DRM_PSB_UPDATE_GUARD	0x19
+#define DRM_PSB_INIT_COMM	0x1A
+#define DRM_PSB_DPST		0x1B
+#define DRM_PSB_GAMMA		0x1C
+#define DRM_PSB_DPST_BL		0x1D
+
+#define DRM_PVR_RESERVED6	0x1E
+
+#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
+#define DRM_PSB_DPU_QUERY 0x20
+#define DRM_PSB_DPU_DSR_ON 0x21
+#define DRM_PSB_DPU_DSR_OFF 0x22
+#define DRM_PSB_HDMI_FB_CMD 0x23
+
+
+#if 0
+struct psb_gtt_mapping_arg {
+	void *hKernelMemInfo;
+	uint32_t offset_pages;
+	uint32_t page_align;
+};
+#endif
+
+#ifdef __KERNEL__
+    /*****************************
+     *  Clone  of a similarly named record in  pvrsrv_devmem_miw.h in
+     *      user space for use by the gtt mapping ioctl.  This one is
+     *      neutrally  typed.  This is what  actually appears in  the
+     *      'hKernelMemInfo' field above.
+     */
+typedef struct tagPVRSRVMEMINFO {
+    void               *hMemDesc;
+    IMG_DEV_VIRTADDR    sDevVirtAddr;
+
+#if !defined(PVRSRV_NO_MEMINFO_CPU_VIRT_ADDR)
+    void               *pvCpuVirtAddr;
+#endif
+
+    unsigned long long  uiAllocationSize;
+    unsigned long       uiMemAllocFlags;
+} PVRSRV_MEMINFO;
+#endif
+
+#endif
diff --git a/drivers/external_drivers/intel_media/display/tng/interface/pvrsrv_interface.h b/drivers/external_drivers/intel_media/display/tng/interface/pvrsrv_interface.h
new file mode 100644
index 0000000..5b7aa02
--- /dev/null
+++ b/drivers/external_drivers/intel_media/display/tng/interface/pvrsrv_interface.h
@@ -0,0 +1,42 @@
+/*****************************************************************************
+ *
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#if !defined(__PVR_DRM_EXPORT_H__)
+#define __PVR_DRM_EXPORT_H__
+
+int PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
+int PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
+int PVRSRVDrmUnload(struct drm_device *dev);
+void PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
+void PVRSRVQueryIoctls(struct drm_ioctl_desc *ioctls);
+
+unsigned int PVRSRVGetMeminfoSize(void *hKernelMemInfo);
+void *PVRSRVGetMeminfoCPUAddr(void *hMemHandle);
+int PVRSRVGetMeminfoPages(void *hMemHandle, int npages, struct page ***pages);
+int PVRSRVGetMeminfoPfn(void *hMemHandle, int npages, unsigned long **pfns);
+int PVRSRVMMap(struct file *pFile, struct vm_area_struct *ps_vma);
+int PVRSRVInterrupt(struct drm_device *dev);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/Kbuild b/drivers/external_drivers/intel_media/graphics/dfrgx/Kbuild
new file mode 100644
index 0000000..349f4a9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/Kbuild
@@ -0,0 +1,102 @@
+# Kbuild - the "makefile" used to build the dfrgx software.
+#
+# Invocations
+# 1.  usual: as a part of a full kernel build if CONFIG_GFX_RGX_DEVFREQ is
+# defined as "y" or "m".  This file ("Kbuild") is selected by the kernel
+# build system because "Kbuild" has priority over "Makefile".
+#
+# In which case, either:
+# -- (CONFIG_GFX_RGX_DEVFREQ=y) the dfrgx software is built-in to the kernel.
+# or
+# -- (CONFIG_GFX_RGX_DEVFREQ=m) the dfrgx software is built as a module into
+#    dfrgx.ko .  HOWEVER, some portions of dfrgx (e.g., hooks into the
+#    graphics device driver) are still built into the kernel.
+#
+# 2.  Alternate invocation: The module may be built separately from the rest of
+# the kernel (typically to reduce debug cycle time during development):
+# File "Makefile" in this directory is invoked from the command line,
+# defines DF_RGX_EXT_MOD_BUILD as "y", and then causes Kbuild to be invoked.
+# The kernel against which the module will be loaded should have been created
+# in the usual way with CONFIG_GFX_RGX_DEVFREQ=m.
+# Requires one of the following have been done:
+# -- "make modules_prepare"    (ok, but does not set up Module.symvers)
+# -- "make" or "make bzImage" -- regular kernel build to establish build
+#    environment.
+
+# To add verbosity during build:
+#   make KBUILD_VERBOSE=1
+
+ifeq ($(DF_RGX_EXT_MOD_BUILD),y)
+CONFIG_GFX_RGX_DEVFREQ := m
+endif
+
+# MY_DEBUG - 1 to force compilation to include "-g".
+MY_DEBUG := 1
+
+# THERMAL_DEBUG for force_states and get_available_states sysfs entries
+THERMAL_DEBUG := 1
+
+ifeq ($(THERMAL_DEBUG),1)
+ccflags-y += -DTHERMAL_DEBUG
+endif
+
+ccflags-y += -Werror
+
+# This makefile written for dir: drivers/staging/intel_media/graphics/dfrgx
+
+ifneq ($(MY_DEBUG),)
+ifneq ($(MY_DEBUG),0)
+# Causes build errors: ## ccflags-y += -O0 -fno-inline
+ifndef CONFIG_DEBUG_INFO
+# If CONFIG_DEBUG_INFO, then "-g" is already present by default.
+ccflags-y += -g
+endif
+endif
+endif
+
+obj-$(CONFIG_GFX_RGX_DEVFREQ)	+= dfrgx.o
+
+dfrgx-y :=
+dfrgx-y += df_rgx.o
+dfrgx-y += dev_freq_graphics_pm.o
+dfrgx-y += df_rgx_utils.o
+dfrgx-y += df_rgx_burst.o
+dfrgx-y += dev_freq_attrib.o
+
+include drivers/external_drivers/intel_media/graphics/dfrgx/dependencies/Makefile
+
+my_warning_flags :=
+my_warning_flags += -Wall -fmessage-length=0 -Wunused-parameter
+
+my_warning_flags += -Wextra -Wno-sign-compare -Wformat-nonliteral -Wformat-security -fdiagnostics-show-option -Wdeclaration-after-statement -Wmissing-format-attribute -Wpointer-arith -Wlogical-op -Wbad-function-cast -Wmissing-prototypes
+
+# Turn off things that too many kernel headers cause.
+my_warning_flags += -Wno-unused-parameter -Wno-pointer-arith -Wno-bad-function-cast
+
+## my_warning_flags += -Wundef -Wc++-compat
+
+ccflags-y += $(my_warning_flags)
+
+# To request an assembly listing:
+## ccflags-y += -Wa,-alh=$(PWD)/q.lst
+
+# Variable c_flags is the embodiment of the kbuild compilation options.
+
+# TOP_REL_* - directory spec relative to top directory.
+#    Used for makefile include references.
+#    Used for -I header file inclusion.
+
+CFLAGS_df_rgx.o += -Idrivers/devfreq
+
+# Fixme: Replace the hardcoded path with something more intelligent approach
+CFLAGS_df_rgx.o += \
+	-Idrivers/external_drivers/intel_media/display/tng/drv \
+	-Idrivers/external_drivers/intel_media/graphics/dfrgx \
+	-I$(INCLUDES_RGX)
+
+CFLAGS_dev_freq_graphics_pm.o += -I$(INCLUDES_RGX)
+
+CFLAGS_df_rgx_burst.o += \
+	-Idrivers/external_drivers/intel_media/display/tng/drv \
+	-I$(INCLUDES_RGX)
+
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/Makefile b/drivers/external_drivers/intel_media/graphics/dfrgx/Makefile
new file mode 100644
index 0000000..3e98ef7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/Makefile
@@ -0,0 +1,91 @@
+# NOTE: This file is *not* invoked by the kernel build system,
+# as the presence of file "Kbuild" takes precedence.  Rather, this
+# file can be invoked by a "make" in this directory to build the
+# module stand-alone.
+#
+# See commentary in file "Kbuild".
+
+# For development and test, be more verbose for build of this module.
+export KBUILD_VERBOSE := 1
+
+ifeq ($(KERNELRELEASE),)
+
+ifeq ($(wildcard $(ANDROID_BUILD_TOP)),)
+$(error Error: required directory not present: ANDROID_BUILD_TOP = $(ANDROID_BUILD_TOP))
+endif
+
+ifeq ($(wildcard $(ANDROID_PRODUCT_OUT)),)
+$(error Error: required directory not present: ANDROID_PRODUCT_OUT = $(ANDROID_PRODUCT_OUT))
+endif
+
+# Assume the source tree is where the running kernel was built
+# You should set KERNELDIR in the environment if it's elsewhere
+KERNEL_SRC_DIR ?= linux/kernel
+KERNELDIR ?= $(ANDROID_BUILD_TOP)/$(KERNEL_SRC_DIR)
+
+MODULE_SRC  := $(ANDROID_PRODUCT_OUT)/kernel_modules
+MODULE_DEST := $(ANDROID_PRODUCT_OUT)/root/lib/modules
+
+INSTALL_MOD_PATH=$(MODULE_SRC)
+export INSTALL_MOD_PATH
+
+# The current directory is passed to sub-makes as argument
+CURDIR := $(shell pwd)
+
+# Note: This export of KBUILD_OUTPUT is equivalent to -O=the_same
+export KBUILD_OUTPUT := $(ANDROID_PRODUCT_OUT)/linux/kernel
+
+# In this makefile (used only for external module builds), force
+# DF_RGX_EXT_MOD_BUILD=y to allow standalone module builds for development
+# and testing.
+
+default:	modules
+
+make_and_install: modules
+	$(MAKE) modules_install
+
+modules:
+	$(MAKE) DF_RGX_EXT_MOD_BUILD=y -C $(KERNELDIR) M=$(CURDIR) modules
+
+modules_install:
+	$(MAKE) DF_RGX_EXT_MOD_BUILD=y -C $(KERNELDIR) M=$(CURDIR) modules_install
+	cp -vpf df_rgx.ko $(MODULE_DEST)
+
+clean:
+	$(MAKE) DF_RGX_EXT_MOD_BUILD=y -C $(KERNELDIR) M=$(CURDIR) $@
+	rm -f m.log
+	rm -f $(MODULE_DEST)/df_rgx.ko
+
+
+cleanx:
+	rm -f .*.o.d built-in.o .built-in.o.cmd
+	rm -f *.o *~ core .depend .*.cmd *.ko *.mod.c
+	rm -f Module.symvers Module.markers modules.order
+	rm -f *.lst m.log
+	rm -rf .tmp_versions
+
+.PHONY: modules modules_install clean
+
+# Required once per boot before pushing module.
+adb_rw:
+	adb shell mount -o rw,remount /
+
+# Push the module to its home on the device-under-test.
+adb_push:
+	adb push df_rgx.ko /lib/modules/df_rgx.ko
+
+# Notes about module installation location:
+# Default directory is: /lib/modules/<kernel-version>/extra
+#
+# If specified, INSTALL_MOD_PATH is a prefix for the above (used only by default definition of MODLIB).
+#   Definition not necessary if MODLIB also being defined.
+# MODLIB - Initial part of directory specification.  Default is /lib/modules/<kernel-version>.
+# INSTALL_MOD_DIR - default is "extra" for out-of-tree, "kernel" for in-tree.
+# install-dir - Default definition is INSTALL_MOD_DIR if non-blank, else "extra".
+#   Same as INSTALL_MOD_DIR, but only for directory creation.
+#
+# The command line assignment overrides all makefile assignments.
+#
+## make MODLIB=${ANDROID_PRODUCT_OUT}/root/lib/modules INSTALL_MOD_DIR= install-dir=. modules_install
+
+endif
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/dependencies/Makefile b/drivers/external_drivers/intel_media/graphics/dfrgx/dependencies/Makefile
new file mode 100644
index 0000000..c2368b2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/dependencies/Makefile
@@ -0,0 +1,11 @@
+INCLUDES_RGX += \
+	-Idrivers/external_drivers/intel_media/graphics/dfrgx \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/services/server/include \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/services/shared/include \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/services/include \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/services/include/shared \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/include \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/hwdefs \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/hwdefs/km \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_nohw \
+	-Idrivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_attrib.c b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_attrib.c
new file mode 100644
index 0000000..1d526cf
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_attrib.c
@@ -0,0 +1,292 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/kernel.h>
+#include "dev_freq_attrib.h"
+#include "df_rgx_defs.h"
+#include "dev_freq_debug.h"
+#include "df_rgx_burst.h"
+
+extern struct gpu_freq_thresholds a_governor_profile[3];
+/**
+ * show_dynamic_turbo_state() - Read function for sysfs
+ * sys/devices/platform/dfrgx/dynamic_turbo_state.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t show_dynamic_turbo_state(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+	int dt_state = dfrgx_burst_is_enabled(&bfdata->g_dfrgx_data);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: value = %d\n",
+				__func__, dt_state);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", dt_state);
+}
+
+/**
+ * store_dynamic_turbo_state() - Write function for sysfs
+ * sys/devices/platform/dfrgx/dynamic_turbo_state.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t store_dynamic_turbo_state(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+	int dt_state;
+	int ret = -EINVAL;
+
+	ret = sscanf(buf, "%d", &dt_state);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s:  count = %zu, ret = %u , state = %d\n",
+				__func__, count, ret, dt_state);
+	if (ret != 1)
+		goto out;
+
+	dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, dt_state);
+
+	ret = count;
+out:
+	return ret;
+}
+
+/**
+ * show_profiling_state() - Read function for sysfs
+ * sys/devices/platform/dfrgx/profiling_state.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t show_profiling_state(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+	int profiling_state = dfrgx_profiling_is_enabled(&bfdata->g_dfrgx_data);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: value = %d\n",
+				__func__, profiling_state);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", profiling_state);
+}
+
+/**
+ * store_profiling_state() - Write function for sysfs
+ * sys/devices/platform/dfrgx/profiling_state.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t store_profiling_state(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+	int profiling_state;
+	int ret = -EINVAL;
+
+	ret = sscanf(buf, "%d", &profiling_state);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: count = %zu, ret = %u , state = %d\n",
+				__func__, count, ret, profiling_state);
+	if (ret != 1)
+		goto out;
+
+	dfrgx_profiling_set_enable(&bfdata->g_dfrgx_data, profiling_state);
+
+	ret = count;
+out:
+	return ret;
+}
+
+/**
+ * show_profiling_stats() - Read function for sysfs
+ * sys/devices/platform/dfrgx/profiling_show_stats.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t show_profiling_stats(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	int ret = 0;
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s\n",
+				__func__);
+
+	ret = gpu_profiling_records_show(buf);
+
+	return ret;
+}
+
+/**
+ * reset_profiling_stats() - Command function to reset stats.
+ * Use cat sysfs sys/devices/platform/dfrgx/profiling_reset_stats
+ * to perform this action.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to. Ignored.
+ */
+static ssize_t reset_profiling_stats(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s\n",
+				__func__);
+
+	gpu_profiling_records_restart();
+
+	return 0;
+}
+
+/**
+ * show_turbo_profile() - Read function for sysfs
+ * sys/devices/platform/dfrgx/custom_turbo_profile.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t show_turbo_profile(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s\n",
+				__func__);
+
+	return scnprintf(buf, PAGE_SIZE, "%d %d\n",
+		a_governor_profile[DFRGX_TURBO_PROFILE_CUSTOM].util_th_low,
+		a_governor_profile[DFRGX_TURBO_PROFILE_CUSTOM].util_th_high);
+}
+
+/**
+ * store_turbo_profile() - Write function for sysfs
+ * sys/devices/platform/dfrgx/custom_turbo_profile.
+ * @dev: platform device.
+ * @attr: 1 Attribute associated to this entry.
+ * @buf: Buffer to write to.
+ */
+static ssize_t store_turbo_profile(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+	int ret = -EINVAL;
+	int low_th = 0, high_th = 0;
+
+	ret = sscanf(buf, "%d %d", &low_th, &high_th);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: count = %zu, ret = %u\n",
+				__func__, count, ret);
+	if (ret != 2)
+		goto out;
+
+	if (low_th > high_th) {
+		ret = 0;
+		goto out;
+	}
+
+	a_governor_profile[DFRGX_TURBO_PROFILE_CUSTOM].util_th_low = low_th;
+	a_governor_profile[DFRGX_TURBO_PROFILE_CUSTOM].util_th_high = high_th;
+
+	bfdata->g_dfrgx_data.g_profile_index = DFRGX_TURBO_PROFILE_CUSTOM;
+	ret = count;
+
+out:
+	return ret;
+}
+
+static const struct device_attribute devfreq_attrs[] = {
+	__ATTR(dynamic_turbo_state, S_IRUGO | S_IWUSR,
+		show_dynamic_turbo_state, store_dynamic_turbo_state),
+	__ATTR(profiling_state, S_IRUGO | S_IWUSR,
+		show_profiling_state, store_profiling_state),
+	__ATTR(profiling_show_stats, S_IRUGO,
+		show_profiling_stats, NULL),
+	__ATTR(profiling_reset_stats, S_IRUGO,
+		reset_profiling_stats, NULL),
+	__ATTR(custom_turbo_profile, S_IRUGO | S_IWUSR,
+		show_turbo_profile, store_turbo_profile),
+	__ATTR(empty, 0, NULL, NULL)
+};
+
+/**
+ * dev_freq_add_attributes_to_sysfs() - Creates all the sysfs entries
+ * for the specified platform device.
+ * @dev: platform device.
+ */
+int dev_freq_add_attributes_to_sysfs(struct device *device)
+{
+	int error = 0;
+	int i = 0;
+
+	if (!device)
+		return -1;
+
+	for (i = 0; devfreq_attrs[i].attr.mode != 0; i++) {
+		error = device_create_file(device, &devfreq_attrs[i]);
+		if (error) {
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: could not"
+				"create device file error = %0x\n",
+				__func__, error);
+			break;
+		}
+	}
+
+	/*Were all the files created?*/
+	if (devfreq_attrs[i].attr.mode != 0) {
+		int j = 0;
+		for (j = 0; j < i; j++)
+			device_remove_file(device, &devfreq_attrs[i]);
+	}
+
+	return error;
+}
+
+/**
+ * dev_freq_remove_attributes_on_sysfs() - Removes all the sysfs entries
+ * for the specified
+ * platform device.
+ * @dev: platform device.
+ */
+void dev_freq_remove_attributes_on_sysfs(struct device *device)
+{
+	int i = 0;
+
+	if (device) {
+		for (i = 0; devfreq_attrs[i].attr.mode != 0; i++)
+			device_remove_file(device, &devfreq_attrs[i]);
+	}
+}
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_attrib.h b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_attrib.h
new file mode 100644
index 0000000..15d5648
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_attrib.h
@@ -0,0 +1,31 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#include <linux/device.h>
+
+int dev_freq_add_attributes_to_sysfs(struct device *device);
+void dev_freq_remove_attributes_on_sysfs(struct device *device);
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_debug.h b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_debug.h
new file mode 100644
index 0000000..1fd6197
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_debug.h
@@ -0,0 +1,48 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+#if !defined DEVFREQ_DEBUG_H
+#define  DEVFREQ_DEBUG_H
+#include <linux/kernel.h>
+#define DF_RGX_DEV    "dfrgx"
+#define DFRGX_ALERT    KERN_ALERT DF_RGX_DEV ": "
+#define DFRGX_DEBUG_MASK	0x01
+#define DFRGX_DEBUG_HIGH	0x01
+#define DFRGX_DEBUG_MED		0x02
+#define DFRGX_DEBUG_LOW		0x04
+
+#define DFRGX_DEBUG 0
+
+#if (defined DFRGX_DEBUG) && DFRGX_DEBUG
+#define DFRGX_DPF(mask, ...) if (mask & DFRGX_DEBUG_MASK) \
+		{ \
+			printk(DFRGX_ALERT __VA_ARGS__); \
+		}
+#else
+#define DFRGX_DPF(mask, ...)
+#endif
+#endif /*DEVFREQ_DEBUG_H*/
+
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_graphics_pm.c b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_graphics_pm.c
new file mode 100644
index 0000000..8f4e2d3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_graphics_pm.c
@@ -0,0 +1,60 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#include <rgxdf.h>
+#include "dev_freq_graphics_pm.h"
+#include "dev_freq_debug.h"
+
+/*dfrgx copy of RGX power state ON/OFF*/
+static int df_rgx_active = 0;
+
+/**
+ * df_rgx_is_active() - Determines if RGX device
+ * is either ON or OFF, it queries the state.
+ */
+int df_rgx_is_active(void)
+{
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s\n",
+		__func__);
+
+	/*Initially We need to know the state of RGX ON/OFF,
+	 *then We keep a local copy of it
+	*/
+	if (rgx_is_device_powered()) {
+		DFRGX_DPF(DFRGX_DEBUG_MED, "%s: RGX is Powered ON\n",
+		__func__);
+		df_rgx_active = 1;
+	} else {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: RGX is Powered OFF,"
+		" dev freq will not change freq\n",
+		__func__);
+		df_rgx_active = 0;
+	}
+
+	return df_rgx_active;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_graphics_pm.h b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_graphics_pm.h
new file mode 100644
index 0000000..371c097
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/dev_freq_graphics_pm.h
@@ -0,0 +1,34 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#if !defined DEVFREQ_GRAPHICS_PM_H
+#define DEVFREQ_GRAPHICS_PM_H
+
+/*PM on dfrgx*/
+int df_rgx_is_active(void);
+
+#endif /*DEVFREQ_GRAPHICS_PM_H*/
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx.c b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx.c
new file mode 100644
index 0000000..fc7ba54
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx.c
@@ -0,0 +1,1015 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *     Dale B. Stimson <dale.b.stimson@intel.com>
+ *     Javier Torres Castillo <javier.torres.castillo@intel.com>
+ *
+ * df_rgx.c - devfreq driver for IMG rgx graphics in Tangier.
+ * Description:
+ *  Early devfreq driver for rgx.  Utilization measures and on-demand
+ *  frequency control will be added later.  For now, only thermal
+ *  conditions and sysfs file inputs are taken into account.
+ *
+ *  This driver currently only allows frequencies between 200MHz and
+ *  533 MHz.
+ *
+ *  This driver observes the limits set by the values in:
+ *
+ *      sysfs file                           initial value (KHz)
+ *      ---------------------------------    -------------------
+ *      /sys/class/devfreq/dfrgx/min_freq    200000
+ *      /sys/class/devfreq/dfrgx/max_freq    320000, 533000 on B0
+ *  and provides current frequency from:
+ *      /sys/class/devfreq/dfrgx/cur_freq
+ *
+ *  With current development silicon, instability is a real possibility
+ *  at 400 MHz and higher.
+ *
+ *  While the driver is informed that a thermal condition exists, it
+ *  reduces the gpu frequency to 200 MHz.
+ *
+ *  Temporary:
+ *      No use of performance counters.
+ *      No utilization computation.
+ *      Uses governor "devfreq_powersave", although with throttling if hot.
+ *
+ *  It would be nice to have more sysfs or debugfs files for testing purposes.
+ *
+ *  All DEBUG printk messages start with "dfrgx:" for easy searching of
+ *  dmesg output.
+ *
+ *  To test with the module: insmod /lib/modules/dfrgx.ko
+ *  To unload module: rmmod dfrgx
+ *
+ *  See files under /sys/class/devfreq/dfrgx .
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <linux/thermal.h>
+#include <asm/errno.h>
+
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+
+#include <governor.h>
+
+#include <rgxdf.h>
+#include <ospm/gfx_freq.h>
+#include "dev_freq_debug.h"
+#include "dev_freq_graphics_pm.h"
+#include "df_rgx_defs.h"
+#include "df_rgx_burst.h"
+#define DFRGX_GLOBAL_ENABLE_DEFAULT 1
+
+#define DF_RGX_NAME_DEV    "dfrgx"
+#define DF_RGX_NAME_DRIVER "dfrgxdrv"
+
+#define DFRGX_HEADING DF_RGX_NAME_DEV ": "
+
+/* DF_RGX_POLLING_INTERVAL_MS - Polling interval in milliseconds.
+ * FIXME - Need to have this be 5 ms, but have to workaround HZ tick usage.
+ */
+#define DF_RGX_POLLING_INTERVAL_MS 50
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+/**
+ * Potential governors:
+ *     #define GOVERNOR_TO_USE "performance"
+ *     #define GOVERNOR_TO_USE "simple_ondemand"
+ *     #define GOVERNOR_TO_USE "userspace"
+ *     #define GOVERNOR_TO_USE "powersave"
+ */
+#define GOVERNOR_TO_USE "simple_ondemand"
+#else
+/**
+ * Potential governors:
+ *     #define GOVERNOR_TO_USE devfreq_simple_ondemand
+ *     #define GOVERNOR_TO_USE devfreq_performance
+ *     #define GOVERNOR_TO_USE devfreq_powersave
+ */
+#define GOVERNOR_TO_USE devfreq_simple_ondemand
+#endif
+
+
+/*is tng a0 hw*/
+extern int is_tng_a0;
+
+/* df_rgx_created_dev - Pointer to created device, if any. */
+static struct platform_device *df_rgx_created_dev;
+
+void df_rgx_init_available_freq_table(struct device *dev);
+int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
+
+
+
+/**
+ * Module parameters:
+ *
+ * - can be updated (if permission allows) via writing:
+ *     /sys/module/dfrgx/parameters/<name>
+ * - can be set at module load time:
+ *     insmod /lib/modules/dfrgx.ko enable=0
+ * - For built-in drivers, can be on kernel command line:
+ *     dfrgx.enable=0
+ */
+
+/**
+ * module parameter "enable" is not writable in sysfs as there is presently
+ * no code to detect the transition between 0 and 1.
+ */
+static unsigned int mprm_enable = DFRGX_GLOBAL_ENABLE_DEFAULT;
+module_param_named(enable, mprm_enable, uint, S_IRUGO);
+
+static unsigned int mprm_verbosity = 2;
+module_param_named(verbosity, mprm_verbosity, uint, S_IRUGO|S_IWUSR);
+
+
+#define DRIVER_AUTHOR "Intel Corporation"
+#define DRIVER_DESC "devfreq driver for rgx graphics"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/**
+ *  MODULE_VERSION - Allows specification of a module version.
+ *  Version of form [<epoch>:]<version>[-<extra-version>].
+ *  Or for CVS/RCS ID version, everything but the number is stripped.
+ * <epoch>: A (small) unsigned integer which allows you to start versions
+ *          anew. If not mentioned, it's zero.  eg. "2:1.0" is after
+ *          "1:2.0".
+ * <version>: The <version> may contain only alphanumerics and the
+ *          character `.'.  Ordered by numeric sort for numeric parts,
+ *          ascii sort for ascii parts (as per RPM or DEB algorithm).
+ * <extraversion>: Like <version>, but inserted for local
+ *          customizations, eg "rh3" or "rusty1".
+
+ * Using this automatically adds a checksum of the .c files and the
+ * local headers in "srcversion".
+ *
+ * Also, if the module is under drivers/staging, this causes a warning to
+ * be issued:
+ *     <mname>: module is from the staging directory, the quality is unknown,
+ *     you have been warned.
+ *
+ * Example invocation:
+ *     MODULE_VERSION("0.1");
+ */
+
+/**
+ * df_rgx_bus_target - Request setting of a new frequency.
+ * @*p_freq: Input: desired frequency in KHz, output: realized freq in KHz.
+ * @flags: DEVFREQ_FLAG_* - not used by this implementation.
+ */
+static int df_rgx_bus_target(struct device *dev, unsigned long *p_freq,
+			      u32 flags)
+{
+	struct platform_device	*pdev;
+	struct busfreq_data	*bfdata;
+	struct df_rgx_data_s	*pdfrgx_data;
+	struct devfreq		*df;
+	unsigned long desired_freq = 0;
+	int ret = 0;
+	int adjust_curfreq = 0;
+	int set_freq = 0;
+	(void) flags;
+
+	pdev = container_of(dev, struct platform_device, dev);
+	bfdata = platform_get_drvdata(pdev);
+
+	if (bfdata && bfdata->devfreq) {
+		int gpu_defer_req = 0;
+		df = bfdata->devfreq;
+		pdfrgx_data = &bfdata->g_dfrgx_data;
+		if (!pdfrgx_data || !pdfrgx_data->g_initialized)
+			goto out;
+
+		desired_freq = *p_freq;
+
+		/* Governor changed, will be updated after updatedevfreq() */
+		if (strncmp(df->governor->name,
+			bfdata->prev_governor, DEVFREQ_NAME_LEN)) {
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: Governor changed,"
+				" prev : %s, current : %s!\n",
+				__func__,
+				bfdata->prev_governor,
+				df->governor->name);
+
+			if (dfrgx_burst_is_enabled(pdfrgx_data))
+				dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, 0);
+
+			df_rgx_set_governor_profile(df->governor->name, pdfrgx_data);
+			strncpy(bfdata->prev_governor, df->governor->name, DEVFREQ_NAME_LEN);
+
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: Governors should be "
+					"the same now, prev : %s, current : %s!\n",
+					__func__,
+					bfdata->prev_governor,
+					df->governor->name);
+
+			set_freq = 1;
+		} else if (df->min_freq != pdfrgx_data->g_freq_mhz_min) {
+			int new_index = -1;
+
+			if (dfrgx_burst_is_enabled(pdfrgx_data))
+				dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, 0);
+
+			new_index = df_rgx_get_util_record_index_by_freq(df->min_freq);
+			if (new_index > -1) {
+				mutex_lock(&pdfrgx_data->g_mutex_sts);
+				pdfrgx_data->g_freq_mhz_min = df->min_freq;
+				bfdata->gbp_cooldv_latest_freq_min = df->min_freq;
+				pdfrgx_data->g_min_freq_index = new_index;
+				mutex_unlock(&pdfrgx_data->g_mutex_sts);
+			}
+
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s:Min freq changed!,"
+					" prev_freq %lu, min_freq %lu\n",
+					__func__,
+					df->previous_freq,
+					df->min_freq);
+
+			if (df->previous_freq < df->min_freq) {
+				desired_freq = df->min_freq;
+				adjust_curfreq = 1;
+			}
+		} else if (df->max_freq != pdfrgx_data->g_freq_mhz_max) {
+			int new_index = -1;
+
+			if (dfrgx_burst_is_enabled(pdfrgx_data))
+				dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, 0);
+
+			new_index = df_rgx_get_util_record_index_by_freq(df->max_freq);
+			if (new_index > -1) {
+				mutex_lock(&pdfrgx_data->g_mutex_sts);
+				pdfrgx_data->g_freq_mhz_max = df->max_freq;
+				bfdata->gbp_cooldv_latest_freq_max = df->max_freq;
+				pdfrgx_data->g_max_freq_index = new_index;
+				mutex_unlock(&pdfrgx_data->g_mutex_sts);
+			}
+
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s:Max freq changed!,"
+				" prev_freq %lu, max_freq %lu\n",
+				__func__,
+				df->previous_freq,
+				df->max_freq);
+
+			if (df->previous_freq > df->max_freq) {
+				desired_freq = df->max_freq;
+				adjust_curfreq  = 1;
+			}
+		} else if (!strncmp(df->governor->name,
+				"simple_ondemand", DEVFREQ_NAME_LEN)) {
+			*p_freq = df->previous_freq;
+			goto out;
+		}
+
+		/* set_freq changed on userspace governor*/
+		if (!strncmp(df->governor->name, "userspace", DEVFREQ_NAME_LEN)) {
+			/* update userspace freq*/
+			struct userspace_gov_data *data = df->data;
+
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s:userspace governor,"
+				" desired %lu, data->user_frequency %lu, input_freq = %lu\n",
+				__func__,
+				desired_freq,
+				data->user_frequency,
+				*p_freq);
+
+			data->valid = 1;
+			data->user_frequency = desired_freq;
+			set_freq = 1;
+		}
+
+		if (adjust_curfreq)
+			set_freq = 1;
+
+		if (set_freq) {
+			/* Freq will be reflected once GPU is back on*/
+			if (!df_rgx_is_active()) {
+				bfdata->bf_desired_freq = desired_freq;
+				mutex_lock(&bfdata->lock);
+				bfdata->b_need_freq_update = 1;
+				mutex_unlock(&bfdata->lock);
+				*p_freq = desired_freq;
+				gpu_defer_req = 1;
+			} else {
+				ret = df_rgx_set_freq_khz(bfdata, desired_freq);
+				if (ret > 0) {
+					*p_freq = ret;
+					ret = 0;
+				}
+			}
+		} else {
+			*p_freq = df->previous_freq;
+		}
+
+		if ((!strncmp(df->governor->name,
+				"simple_ondemand", DEVFREQ_NAME_LEN)
+			&& !dfrgx_burst_is_enabled(&bfdata->g_dfrgx_data))
+			|| gpu_defer_req)
+			dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, 1);
+	}
+
+out:
+	return ret;
+}
+
+/**
+ * df_rgx_bus_get_dev_status() - Update current status, including:
+ * - stat->current_frequency - Frequency in KHz.
+ * - stat->total_time
+ * - stat->busy_time
+ * Note: total_time and busy_time have arbitrary units, as they are
+ * used only as ratios.
+ * Utilization is busy_time / total_time .
+ */
+static int df_rgx_bus_get_dev_status(struct device *dev,
+				      struct devfreq_dev_status *stat)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: entry\n", __func__);
+
+	stat->current_frequency = bfdata->bf_freq_mhz_rlzd * 1000;
+
+	/* FIXME - Compute real utilization statistics. */
+	stat->total_time = 100;
+	stat->busy_time = 50;
+
+	return 0;
+}
+
+/**
+ * tcd_get_max_state() - thermal cooling device callback get_max_state.
+ * @tcd: Thermal cooling device structure.
+ * @pms: Pointer to integer through which output value is stored.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_get_max_state(struct thermal_cooling_device *tcd,
+	unsigned long *pms)
+{
+	*pms = THERMAL_COOLING_DEVICE_MAX_STATE - 1;
+
+	return 0;
+}
+
+/**
+ * tcd_get_cur_state() - thermal cooling device callback get_cur_state.
+ * @tcd: Thermal cooling device structure.
+ * @pcs: Pointer to integer through which output value is stored.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_get_cur_state(struct thermal_cooling_device *tcd,
+	unsigned long *pcs)
+{
+	struct busfreq_data *bfdata = (struct busfreq_data *) tcd->devdata;
+	*pcs = bfdata->gbp_cooldv_state_cur;
+
+	return 0;
+}
+
+/**
+ * tcd_set_cur_state() - thermal cooling
+ * device callback set_cur_state.
+ * @tcd: Thermal cooling device structure.
+ * @cs: Input state.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_set_cur_state(struct thermal_cooling_device *tcd,
+	unsigned long cs)
+{
+	struct busfreq_data *bfdata;
+	struct devfreq *df;
+	int ret = 0;
+
+	bfdata = (struct busfreq_data *) tcd->devdata;
+
+	if (cs >= THERMAL_COOLING_DEVICE_MAX_STATE)
+		cs = THERMAL_COOLING_DEVICE_MAX_STATE - 1;
+
+	/*If different state*/
+	if (bfdata->gbp_cooldv_state_cur != cs) {
+		int new_index = -1;
+
+		/* Dynamic turbo is not enabled so try
+		* to change the state
+		*/
+		if (!bfdata->g_dfrgx_data.g_enable) {
+
+			if(!df_rgx_is_active()) {
+				return -EBUSY;
+			}
+
+			/* If thermal state is specified explicitely
+			* then suspend burst/unburst thread
+			* because the user needs the GPU to run
+			* at specific frequency/thermal state level
+			*/
+
+			ret = df_rgx_set_freq_khz(bfdata,
+				bfdata->gpudata[cs].freq_limit);
+			if (ret <= 0)
+				return ret;
+		} else {
+			/* In this case we want to limit the max_freq
+			* to the thermal state limit
+			*/
+			int b_update_freq = 0;
+			df = bfdata->devfreq;
+
+			if (!cs) {
+				/* We are back in normal operation so set initial values*/
+				df->max_freq = bfdata->gbp_cooldv_latest_freq_max;
+				df->min_freq = bfdata->gbp_cooldv_latest_freq_min;
+				b_update_freq = 1;
+			}
+			else {
+				dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, 0);
+				df->max_freq = bfdata->gpudata[cs].freq_limit;
+
+				if (df->previous_freq > df->max_freq)
+					b_update_freq = 1;
+
+				if (bfdata->gpudata[cs].freq_limit < df->min_freq) {
+					df->min_freq = bfdata->gpudata[cs].freq_limit;
+					new_index = df_rgx_get_util_record_index_by_freq(df->min_freq);
+
+					if (new_index > -1) {
+						bfdata->g_dfrgx_data.g_freq_mhz_min = df->min_freq;
+						bfdata->g_dfrgx_data.g_min_freq_index = new_index;
+					}
+					b_update_freq = 1;
+				}
+
+				new_index = df_rgx_get_util_record_index_by_freq(df->max_freq);
+
+				if (new_index > -1) {
+					bfdata->g_dfrgx_data.g_freq_mhz_max = df->max_freq;
+					bfdata->g_dfrgx_data.g_max_freq_index = new_index;
+				}
+
+				dfrgx_burst_set_enable(&bfdata->g_dfrgx_data, 1);
+			}
+
+			if (b_update_freq) {
+				/* Pick the min freq this time*/
+				bfdata->bf_desired_freq = df->min_freq;
+				mutex_lock(&bfdata->lock);
+				bfdata->b_need_freq_update = 1;
+				mutex_unlock(&bfdata->lock);
+			}
+		}
+
+		bfdata->gbp_cooldv_state_prev = bfdata->gbp_cooldv_state_cur;
+		bfdata->gbp_cooldv_state_cur = cs;
+
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "Thermal state changed from %d to %d\n",
+				bfdata->gbp_cooldv_state_prev,
+				bfdata->gbp_cooldv_state_cur);
+
+	}
+
+	return 0;
+}
+
+
+unsigned long voltage_gfx= 0.95;
+void df_rgx_init_available_freq_table(struct device *dev)
+{
+	int i = 0;
+	if (!is_tng_a0) {
+		for(i = 0; i < NUMBER_OF_LEVELS_B0; i++)
+			opp_add(dev, a_available_state_freq[i].freq, voltage_gfx);
+	} else {
+		for(i = 0; i < NUMBER_OF_LEVELS; i++)
+			opp_add(dev, a_available_state_freq[i].freq, voltage_gfx);
+	}
+}
+/**
+ * tcd_get_available_states() - thermal cooling device
+ * callback get_available_states.
+ * @tcd: Thermal cooling device structure.
+ * @pcs: Pointer to char through which output values are stored.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_get_available_states(struct thermal_cooling_device *tcd,
+	char *buf)
+{
+	int ret = 0;
+
+	if (!is_tng_a0) {
+	ret = scnprintf(buf, PAGE_SIZE,
+			"%lu %lu %lu %lu %lu %lu %lu %lu\n",
+			a_available_state_freq[0].freq,
+			a_available_state_freq[1].freq,
+			a_available_state_freq[2].freq,
+			a_available_state_freq[3].freq,
+			a_available_state_freq[4].freq,
+			a_available_state_freq[5].freq,
+			a_available_state_freq[6].freq,
+			a_available_state_freq[7].freq);
+	} else {
+		ret = scnprintf(buf, PAGE_SIZE,
+			"%lu %lu %lu %lu\n",
+			a_available_state_freq[0].freq,
+			a_available_state_freq[1].freq,
+			a_available_state_freq[2].freq,
+			a_available_state_freq[3].freq);
+	}
+
+	return ret;
+}
+
+#if defined(THERMAL_DEBUG)
+/**
+ * tcd_get_force_state_override() - thermal cooling
+ * device callback get_force_state_override.
+ * @tcd: Thermal cooling device structure.
+ * @pcs: Pointer to char through which output values are stored.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_get_force_state_override(struct thermal_cooling_device *tcd,
+	char *buf)
+{
+	struct busfreq_data *bfdata = (struct busfreq_data *) tcd->devdata;
+
+	return scnprintf(buf, PAGE_SIZE,
+			"%lu %lu %lu %lu\n",
+			bfdata->gpudata[0].freq_limit,
+			bfdata->gpudata[1].freq_limit,
+			bfdata->gpudata[2].freq_limit,
+			bfdata->gpudata[3].freq_limit);
+}
+
+/**
+ * tcd_set_force_state_override() - thermal cooling device
+ * callback set_force_state_override.
+ * @tcd: Thermal cooling device structure.
+ * @pcs: Pointer to char containing the input values.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_set_force_state_override(struct thermal_cooling_device *tcd,
+	char *buf)
+{
+	struct busfreq_data *bfdata = (struct busfreq_data *) tcd->devdata;
+	unsigned long int freqs[THERMAL_COOLING_DEVICE_MAX_STATE];
+	unsigned long int prev_freq = DFRGX_FREQ_320_MHZ;
+	int i = 0;
+
+	if (!is_tng_a0)
+		prev_freq = DFRGX_FREQ_533_MHZ;
+
+	sscanf(buf, "%lu %lu %lu %lu\n", &freqs[0],
+			 &freqs[1],
+			 &freqs[2],
+			 &freqs[3]);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s values: %lu %lu %lu %lu\n", __func__,
+			freqs[0],
+			freqs[1],
+			freqs[2],
+			freqs[3]);
+
+	for (i = 0; (i < THERMAL_COOLING_DEVICE_MAX_STATE) &&
+				df_rgx_is_valid_freq(freqs[i]) &&
+				prev_freq >= freqs[i]; i++) {
+		prev_freq = freqs[i];
+	}
+
+	if (i < THERMAL_COOLING_DEVICE_MAX_STATE)
+		return -EINVAL;
+
+	for (i = 0; i < THERMAL_COOLING_DEVICE_MAX_STATE; i++)
+		bfdata->gpudata[i].freq_limit = freqs[i];
+
+	return 0;
+}
+
+#endif /*THERMAL_DEBUG*/
+
+/**
+ * df_rgx_bus_exit() - An optional callback that is called when devfreq is
+ * removing the devfreq object due to error or from devfreq_remove_device()
+ * call. If the user has registered devfreq->nb at a notifier-head, this is
+ * the time to unregister it.
+ */
+static void df_rgx_bus_exit(struct device *dev)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+	(void) bfdata;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: entry\n", __func__);
+
+	/*  devfreq_unregister_opp_notifier(dev, bfdata->devfreq); */
+}
+
+
+static struct devfreq_dev_profile df_rgx_devfreq_profile = {
+	.initial_freq	= DF_RGX_INITIAL_FREQ_KHZ,
+	.polling_ms	= DF_RGX_POLLING_INTERVAL_MS,
+	.target		= df_rgx_bus_target,
+	.get_dev_status	= df_rgx_bus_get_dev_status,
+	.exit		= df_rgx_bus_exit,
+};
+
+
+/**
+ * busfreq_mon_reset() - Initialize or reset monitoring
+ * hardware state as desired.
+ */
+static void busfreq_mon_reset(struct busfreq_data *bfdata)
+{
+	/*  FIXME - reset monitoring? */
+}
+
+
+static int df_rgx_busfreq_pm_notifier_event(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	struct busfreq_data *bfdata = container_of(this, struct busfreq_data,
+						 pm_notifier);
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: entry\n", __func__);
+
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		/* Set Fastest and Deactivate DVFS */
+		mutex_lock(&bfdata->lock);
+		bfdata->disabled = true;
+		mutex_unlock(&bfdata->lock);
+		return NOTIFY_OK;
+	case PM_POST_RESTORE:
+	case PM_POST_SUSPEND:
+		/* Reactivate */
+		mutex_lock(&bfdata->lock);
+		bfdata->disabled = false;
+		mutex_unlock(&bfdata->lock);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int df_rgx_busfreq_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct busfreq_data *bfdata;
+	struct devfreq *df;
+	int error = 0;
+	int sts = 0;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: entry\n", __func__);
+
+	/* dev_err(dev, "example.\n"); */
+
+	bfdata = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+	if (bfdata == NULL) {
+		dev_err(dev, "Cannot allocate memory.\n");
+		return -ENOMEM;
+	}
+
+	bfdata->pm_notifier.notifier_call = df_rgx_busfreq_pm_notifier_event;
+	bfdata->dev = dev;
+	mutex_init(&bfdata->lock);
+
+	platform_set_drvdata(pdev, bfdata);
+
+	busfreq_mon_reset(bfdata);
+
+	df = devfreq_add_device(dev, &df_rgx_devfreq_profile,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+					   GOVERNOR_TO_USE, NULL);
+#else
+					   &GOVERNOR_TO_USE, NULL);
+#endif
+
+	if (IS_ERR(df)) {
+		sts = PTR_ERR(bfdata->devfreq);
+		goto err_000;
+	}
+
+	strncpy(bfdata->prev_governor, df->governor->name, DEVFREQ_NAME_LEN);
+
+	bfdata->devfreq = df;
+
+	df->previous_freq = DF_RGX_FREQ_KHZ_MIN_INITIAL;
+	bfdata->bf_prev_freq_rlzd = DF_RGX_FREQ_KHZ_MIN_INITIAL;
+
+	/* Set min/max freq depending on stepping/SKU */
+	if (RGXGetDRMDeviceID() == 0x1182) {
+		/* TNG SKU3 */
+		df->min_freq = DFRGX_FREQ_200_MHZ;
+		df->max_freq = DFRGX_FREQ_266_MHZ;
+	}
+	else if (is_tng_a0) {
+		df->min_freq = DF_RGX_FREQ_KHZ_MIN_INITIAL;
+		df->max_freq = DF_RGX_FREQ_KHZ_MAX_INITIAL;
+	}
+	else {
+		df->min_freq = DFRGX_FREQ_457_MHZ;
+		df->max_freq = DF_RGX_FREQ_KHZ_MAX;
+	}
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: dev_id = 0x%x, min_freq = %lu, max_freq = %lu\n",
+		__func__, RGXGetDRMDeviceID(), df->min_freq, df->max_freq);
+
+	bfdata->gbp_cooldv_state_override = -1;
+
+	/* Thermal freq-state mapping after characterization */
+	bfdata->gpudata[0].freq_limit = DFRGX_FREQ_533_MHZ;
+	bfdata->gpudata[1].freq_limit = DFRGX_FREQ_457_MHZ;
+	bfdata->gpudata[2].freq_limit = DFRGX_FREQ_200_MHZ;
+	bfdata->gpudata[3].freq_limit = DFRGX_FREQ_200_MHZ;
+
+
+	df_rgx_init_available_freq_table(dev);
+
+
+	{
+		static const char *tcd_type = "gpu_burst";
+		static const struct thermal_cooling_device_ops tcd_ops = {
+			.get_max_state = tcd_get_max_state,
+			.get_cur_state = tcd_get_cur_state,
+			.set_cur_state = tcd_set_cur_state,
+#if defined(THERMAL_DEBUG)
+			.get_force_state_override =
+				tcd_get_force_state_override,
+			.set_force_state_override =
+				tcd_set_force_state_override,
+#else
+			.get_force_state_override = NULL,
+			.set_force_state_override = NULL,
+#endif
+			.get_available_states =
+				tcd_get_available_states,
+		};
+		struct thermal_cooling_device *tcdhdl;
+
+		/**
+		* Example: Thermal zone "type"s and temps milli-deg-C.
+		* These are just examples and are not specific
+		*to our usage.
+		*   type              temp
+		*   --------          -------
+		*   skin0             15944
+		*   skin1             22407
+		*   msicdie           37672
+		*
+		* See /sys/class/thermal/thermal_zone<i>
+		* See /sys/class/thermal/cooling_device<i>
+		*/
+
+		tcdhdl = thermal_cooling_device_register(
+			(char *) tcd_type, bfdata, &tcd_ops);
+		if (IS_ERR(tcdhdl)) {
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "Cooling device"
+				" registration failed: %ld\n",
+				-PTR_ERR(tcdhdl));
+			sts = PTR_ERR(tcdhdl);
+			goto err_001;
+		}
+		bfdata->gbp_cooldv_hdl = tcdhdl;
+	}
+
+	sts = register_pm_notifier(&bfdata->pm_notifier);
+	if (sts) {
+		dev_err(dev, "Failed to setup pm notifier\n");
+		goto err_002;
+	}
+
+	bfdata->g_dfrgx_data.bus_freq_data = bfdata;
+	bfdata->g_dfrgx_data.g_enable = mprm_enable;
+	bfdata->g_dfrgx_data.gpu_utilization_record_index = 
+		df_rgx_get_util_record_index_by_freq(df->min_freq);
+	bfdata->g_dfrgx_data.g_min_freq_index = 
+		df_rgx_get_util_record_index_by_freq(df->min_freq);
+	bfdata->g_dfrgx_data.g_freq_mhz_min = df->min_freq;
+	bfdata->g_dfrgx_data.g_max_freq_index =
+		df_rgx_get_util_record_index_by_freq(df->max_freq);
+	bfdata->g_dfrgx_data.g_freq_mhz_max = df->max_freq;
+	bfdata->gbp_cooldv_latest_freq_min = df->min_freq;
+	bfdata->gbp_cooldv_latest_freq_max = df->max_freq;
+
+	df_rgx_set_governor_profile(df->governor->name,
+			&bfdata->g_dfrgx_data);
+
+	error = dfrgx_burst_init(&bfdata->g_dfrgx_data);
+
+	if (error) {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: dfrgx_burst_init failed!"
+		", no utilization data\n", __func__);
+		sts = -1;
+		goto err_002;
+	}
+
+	/*Set the initial frequency at 457MHZ in B0/ 200MHZ otherwise*/
+	{
+		int ret = 0;
+		if (!df_rgx_is_active()) {
+				/*Change the freq once it is active*/
+				bfdata->bf_desired_freq = df->min_freq;
+				mutex_lock(&bfdata->lock);
+				bfdata->b_need_freq_update = 1;
+				mutex_unlock(&bfdata->lock);
+		} else {
+			ret = df_rgx_set_freq_khz(bfdata, df->min_freq);
+			if (ret < 0) {
+				DFRGX_DPF(DFRGX_DEBUG_HIGH,
+					"%s: could not initialize freq: %0x error\n",
+					__func__, ret);
+			}
+		}
+	}
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: success\n", __func__);
+
+	return 0;
+
+err_002:
+	thermal_cooling_device_unregister(bfdata->gbp_cooldv_hdl);
+	bfdata->gbp_cooldv_hdl = NULL;
+err_001:
+	devfreq_remove_device(bfdata->devfreq);
+err_000:
+	platform_set_drvdata(pdev, NULL);
+	mutex_destroy(&bfdata->lock);
+	kfree(bfdata);
+	return sts;
+}
+
+static int df_rgx_busfreq_remove(struct platform_device *pdev)
+{
+	struct busfreq_data *bfdata = platform_get_drvdata(pdev);
+
+	dfrgx_burst_deinit(&bfdata->g_dfrgx_data);
+
+	unregister_pm_notifier(&bfdata->pm_notifier);
+	devfreq_remove_device(bfdata->devfreq);
+	mutex_destroy(&bfdata->lock);
+	kfree(bfdata);
+
+	return 0;
+}
+
+static int df_rgx_busfreq_resume(struct device *dev)
+{
+	struct busfreq_data *bfdata = dev_get_drvdata(dev);
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: entry\n", __func__);
+
+	busfreq_mon_reset(bfdata);
+	return 0;
+}
+
+
+static const struct dev_pm_ops df_rgx_busfreq_pm = {
+	.resume	= df_rgx_busfreq_resume,
+};
+
+static const struct platform_device_id df_rgx_busfreq_id[] = {
+	{ DF_RGX_NAME_DEV, 0 },
+	{ "", 0 },
+};
+
+
+static struct platform_driver df_rgx_busfreq_driver = {
+	.probe	= df_rgx_busfreq_probe,
+	.remove	= df_rgx_busfreq_remove,
+	.id_table = df_rgx_busfreq_id,
+	.driver = {
+		.name	= DF_RGX_NAME_DRIVER,
+		.owner	= THIS_MODULE,
+		.pm	= &df_rgx_busfreq_pm,
+	},
+};
+
+
+static struct platform_device * __init df_rgx_busfreq_device_create(void)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	pdev = platform_device_alloc(DF_RGX_NAME_DEV, -1);
+	if (!pdev) {
+		pr_err("%s: platform_device_alloc failed\n",
+			DF_RGX_NAME_DEV);
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret < 0) {
+		pr_err("%s: platform_device_add failed\n",
+			DF_RGX_NAME_DEV);
+		platform_device_put(pdev);
+		return ERR_PTR(ret);
+	}
+
+	return pdev;
+}
+
+static int __init df_rgx_busfreq_init(void)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	if (!mprm_enable) {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: %s: disabled\n",
+			DF_RGX_NAME_DRIVER, __func__);
+		return -ENODEV;
+	}
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: %s: starting\n",
+		DF_RGX_NAME_DRIVER, __func__);
+
+	pdev = df_rgx_busfreq_device_create();
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+	if (!pdev)
+		return -ENOMEM;
+
+	df_rgx_created_dev = pdev;
+
+	ret = platform_driver_register(&df_rgx_busfreq_driver);
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: %s: success\n",
+		DF_RGX_NAME_DRIVER, __func__);
+
+	return ret;
+}
+late_initcall(df_rgx_busfreq_init);
+
+static void __exit df_rgx_busfreq_exit(void)
+{
+	struct platform_device *pdev = df_rgx_created_dev;
+	struct busfreq_data *bfdata = platform_get_drvdata(pdev);
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s:\n", __func__);
+
+	if (bfdata && bfdata->gbp_cooldv_hdl) {
+		thermal_cooling_device_unregister(bfdata->gbp_cooldv_hdl);
+		bfdata->gbp_cooldv_hdl = NULL;
+	}
+
+	platform_driver_unregister(&df_rgx_busfreq_driver);
+
+	/* Most state reset is done by function df_rgx_busfreq_remove,
+	 * including invocation of:
+	 * - unregister_pm_notifier
+	 * - devfreq_remove_device
+	 * - mutex_destroy(&bfdata->lock);
+	 * - kfree(bfdata);
+	*/
+
+	if (pdev)
+		platform_device_unregister(pdev);
+}
+module_exit(df_rgx_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RGX busfreq driver with devfreq framework");
+MODULE_AUTHOR("Dale B Stimson <dale.b.stimson@intel.com>");
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_burst.c b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_burst.c
new file mode 100644
index 0000000..6f7bbc2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_burst.c
@@ -0,0 +1,916 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Jari Luoma-aho  <jari.luoma-aho@intel.com>
+ *    Jari Nippula    <jari.nippula@intel.com>
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#include <linux/devfreq.h>
+#include <ospm/gfx_freq.h>
+#include <dfrgx_utilstats.h>
+#include <rgxdf.h>
+#include "dev_freq_debug.h"
+#include "df_rgx_defs.h"
+#include "df_rgx_burst.h"
+#include "dfrgx_interface.h"
+#include "dev_freq_attrib.h"
+
+#define MAX_NUM_SAMPLES		10
+
+/*Profiling Information - */
+struct gpu_profiling_record a_profiling_info[NUMBER_OF_LEVELS_B0];
+
+/**
+ * gpu_profiling_records_init() - Initializes profiling array.
+ */
+static void gpu_profiling_records_init(void)
+{
+	gpu_profiling_records_restart();
+}
+
+/**
+ * gpu_profiling_records_restart() - Memset to 0, profiling stats array.
+ */
+void gpu_profiling_records_restart(void)
+{
+	memset(a_profiling_info, 0, sizeof(a_profiling_info));
+}
+
+/**
+ * gpu_profiling_records_update_entry() - Updates the profiling calculations.
+ * @util_index: Frequency level index.
+ * @is_current_entry: 1 if We want to update the current freq level,
+ * 0 if it the previous.
+ * realized frequency in KHz.
+ */
+static void gpu_profiling_records_update_entry(int util_index ,
+						int is_current_entry)
+{
+	long long time_diff_ms;
+	ktime_t time_now = ktime_get();
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: index %d, current %d\n",
+		__func__, util_index, is_current_entry);
+
+	if (is_current_entry) {
+		a_profiling_info[util_index].last_timestamp_ns = time_now;
+	} else {
+		ktime_t time_diff_ns = ktime_sub(time_now,
+			a_profiling_info[util_index].last_timestamp_ns);
+		time_diff_ms = ktime_to_ms(time_diff_ns);
+		a_profiling_info[util_index].time_ms += time_diff_ms;
+	}
+
+}
+
+/**
+ * gpu_profiling_records_show() - Shows profiling stats.
+ * @buf: Buffer for sysfs entry.
+ */
+int gpu_profiling_records_show(char *buf)
+{
+	int i;
+	int ret = 0;
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s\n",
+		__func__);
+
+	for (i = 0; i < NUMBER_OF_LEVELS_B0; i++) {
+		ret += sprintf((buf+ret), "Time for %lu KHZ : %llu ms\n",
+			a_available_state_freq[i].freq,
+			a_profiling_info[i].time_ms);
+	}
+	return ret;
+}
+
+/**
+ * set_desired_frequency_khz() - Set gpu frequency.
+ * @bfdata: Pointer to private data structure
+ * @freq_khz: Desired frequency in KHz (not MHz).
+ * Returns: <0 if error, 0 if success, but no frequency update, or
+ * realized frequency in KHz.
+ */
+static long set_desired_frequency_khz(struct busfreq_data *bfdata,
+	unsigned long freq_khz)
+{
+	int sts;
+	struct devfreq *df;
+	unsigned long freq_req;
+	unsigned long freq_limited;
+	unsigned long freq_mhz;
+	unsigned long prev_freq = 0;
+	unsigned int freq_mhz_quantized;
+	u32 freq_code;
+	int prev_util_record_index = -1;
+	int gfx_pcs_result = 0;
+
+	sts = 0;
+
+	/* Warning - this function may be called from devfreq_add_device,
+	 * but if it is, bfdata->devfreq will not yet be set.
+	 */
+	df = bfdata->devfreq;
+
+	if (!df) {
+		/*
+		* Initial call, so set initial frequency.
+		* Limits from min_freq
+		* and max_freq would not have been applied by caller.
+		*/
+		freq_req = DF_RGX_INITIAL_FREQ_KHZ;
+	} else if ((freq_khz == 0) &&  bfdata->bf_prev_freq_rlzd)
+		freq_req =  bfdata->bf_prev_freq_rlzd;
+	else
+		freq_req = freq_khz;
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: entry, caller requesting %luKHz\n",
+		__func__, freq_khz);
+
+	if (freq_req < DF_RGX_FREQ_KHZ_MIN)
+		freq_limited = DF_RGX_FREQ_KHZ_MIN;
+	else if (freq_req > DF_RGX_FREQ_KHZ_MAX)
+		freq_limited = DF_RGX_FREQ_KHZ_MAX;
+	else
+		freq_limited = freq_req;
+
+	freq_mhz = freq_limited / 1000;
+
+	/* follow the right lock order: pvr_power_lock -> bus_freq_data_lock */
+	gfx_pcs_result = RGXPreClockSpeed();
+	if (gfx_pcs_result) {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH,
+			"%s: Could not perform pre-clock-speed change\n",
+			__func__);
+		sts = -EBUSY;
+		goto out;
+	}
+
+	mutex_lock(&bfdata->lock);
+
+	if (bfdata->disabled)
+		goto lock_out;
+
+	freq_code = gpu_freq_mhz_to_code(freq_mhz, &freq_mhz_quantized);
+
+	if ((bfdata->bf_freq_mhz_rlzd != freq_mhz_quantized) || bfdata->b_resumed ) {
+		sts = gpu_freq_set_from_code(freq_code);
+		if (sts < 0) {
+			DFRGX_DPF(DFRGX_DEBUG_MED,
+				"%s: error (%d) from gpu_freq_set_from_code for %dMHz\n",
+				__func__, sts, freq_mhz_quantized);
+			goto lock_out;
+		} else {
+			bfdata->bf_freq_mhz_rlzd = sts;
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: freq MHz"
+				"(requested, realized) = %u, %lu\n",
+				__func__, freq_mhz_quantized,
+				bfdata->bf_freq_mhz_rlzd);
+			bfdata->b_resumed = 0;
+			bfdata->b_need_freq_update = 0;
+		}
+
+		/*
+		 * Inform gfx driver that clock speed changed.
+		 * This operation can't be failed
+		 */
+		RGXUpdateClockSpeed((bfdata->bf_freq_mhz_rlzd * 1000000));
+
+		if (df) {
+
+			prev_freq = bfdata->bf_prev_freq_rlzd;
+			/*
+			 * Setting df->previous_freq will be redundant
+			 * when called from target dispatch function, but
+			 * not otherwise.
+			 */
+
+			bfdata->bf_prev_freq_rlzd =
+				bfdata->bf_freq_mhz_rlzd * 1000;
+			df->previous_freq = bfdata->bf_prev_freq_rlzd;
+			bfdata->bf_desired_freq =
+				bfdata->bf_prev_freq_rlzd;
+		}
+	}
+
+	sts = bfdata->bf_freq_mhz_rlzd * 1000;
+
+	/* Update our record accordingly*/
+	bfdata->g_dfrgx_data.gpu_utilization_record_index =
+		df_rgx_get_util_record_index_by_freq(sts);
+
+	if (bfdata->g_dfrgx_data.g_profiling_enable) {
+		/* for profiling purposes*/
+		prev_util_record_index =
+			df_rgx_get_util_record_index_by_freq(prev_freq);
+		if ((prev_util_record_index >= 0)
+			&& (prev_util_record_index < NUMBER_OF_LEVELS_B0))
+			gpu_profiling_records_update_entry(prev_util_record_index, 0);
+
+		if (bfdata->g_dfrgx_data.gpu_utilization_record_index >= 0)
+			gpu_profiling_records_update_entry(bfdata->g_dfrgx_data.gpu_utilization_record_index, 1);
+	}
+
+lock_out:
+	mutex_unlock(&bfdata->lock);
+	RGXPostClockSpeed();
+out:
+	return sts;
+}
+
+/**
+ * df_rgx_set_freq_khz() - Set gpu frequency, public version of set_desired_frequency_khz .
+ * @bfdata: Pointer to private data structure
+ * @freq_khz: Desired frequency in KHz (not MHz).
+ * Returns: <0 if error, 0 if success, but no frequency update, or
+ * realized frequency in KHz.
+ */
+long df_rgx_set_freq_khz(struct busfreq_data *bfdata,
+	unsigned long freq_khz)
+{
+	struct devfreq *df = bfdata->devfreq;
+	unsigned long ret = 0;
+
+	ret = set_desired_frequency_khz(bfdata, freq_khz);
+
+	if (!df)
+		goto go_ret;
+
+	if (!strncmp(df->governor->name, "userspace", DEVFREQ_NAME_LEN)) {
+		/* update userspace freq*/
+		struct userspace_gov_data *data = df->data;
+
+		data->user_frequency = ret;
+	}
+
+go_ret:
+	return ret;
+}
+
+/**
+ * wake_thread() - Wake the work thread.
+ * @df_rgx_data_s: dfrgx burst handle.
+ */
+static void dfrgx_add_sample_data(struct df_rgx_data_s *g_dfrgx,
+				struct gpu_util_stats util_stats_sample)
+{
+	static int num_samples;
+	static int sum_samples_active;
+	int ret = 0;
+	int active_high = util_stats_sample.ui64GpuStatActiveHigh;
+
+	/*There might be a case where util_stats_sample.ui64GpuStatCumulative is actually zero
+	* due to the getutilstats functionality assumes certain conditions in the driver making low
+	* high and blocked values actually 0 */
+	if (util_stats_sample.ui64GpuStatCumulative == 0) {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: Ignoring util stats from gpu!\n",
+				__func__);
+		return;
+	}
+
+	/* convert ui64GpuStatActiveHigh time period to a 0.01% precision ratio */
+	active_high *= 10000;
+	do_div(active_high, util_stats_sample.ui64GpuStatCumulative);
+
+	sum_samples_active += (active_high / 100);
+	num_samples++;
+
+	/* When we collect MAX_NUM_SAMPLES samples we need to decide
+	* bursting or unbursting
+	*/
+	if (num_samples == MAX_NUM_SAMPLES) {
+		int average_active_util = sum_samples_active / MAX_NUM_SAMPLES;
+		unsigned int burst = DFRGX_NO_BURST_REQ;
+
+		/* Reset */
+		sum_samples_active = 0;
+		num_samples = 0;
+
+		DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: Average Active: %d !\n",
+		__func__, average_active_util);
+
+		burst = df_rgx_request_burst(g_dfrgx, average_active_util);
+
+		if (burst == DFRGX_NO_BURST_REQ) {
+			DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: NO BURST REQ!\n",
+				__func__);
+		} else if (df_rgx_is_active()) {
+			ret = set_desired_frequency_khz(g_dfrgx->bus_freq_data,
+				a_available_state_freq[g_dfrgx->gpu_utilization_record_index].freq);
+
+			if (ret <= 0) {
+				DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: Failed to burst/unburst at : %lu !\n",
+				__func__, a_available_state_freq[g_dfrgx->gpu_utilization_record_index].freq);
+
+			}
+		}
+
+	}
+}
+
+
+/**
+ * wake_thread() - Wake the work thread.
+ * @df_rgx_data_s: dfrgx burst handle.
+ */
+static void wake_thread(struct df_rgx_data_s *g_dfrgx)
+{
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: time to wake up!\n",
+			__func__);
+	if (g_dfrgx->g_task)
+		wake_up_process(g_dfrgx->g_task);
+}
+
+/**
+ * df_rgx_action() - Perform utilization stats polling and freq burst.
+ * @df_rgx_data_s: dfrgx burst handle.
+ */
+static int df_rgx_action(struct df_rgx_data_s *g_dfrgx)
+{
+	struct gpu_util_stats util_stats;
+
+	/*Initialize the data*/
+	memset(&util_stats, 0, sizeof(struct gpu_util_stats));
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: !\n",
+		__func__);
+
+	/*Let's check if We can query the utilisation numbers now*/
+	if (!g_dfrgx->g_initialized) {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: Not initialized yet !\n",
+		__func__);
+		goto go_out;
+	}
+
+	if (!df_rgx_is_active()) {
+		DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: RGX not Active!\n",
+		__func__);
+		goto go_out;
+	}
+
+	/* This will happen when min or max freq are modified or using userpace governor*/
+	if(g_dfrgx->bus_freq_data->bf_desired_freq && g_dfrgx->bus_freq_data->b_need_freq_update)
+	{
+		int ret = 0;
+
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: desiredfreq: %lu, prevrlzd %lu, prevfreq %lu !\n",
+			__func__,g_dfrgx->bus_freq_data->bf_desired_freq,
+			g_dfrgx->bus_freq_data->bf_prev_freq_rlzd, 
+			g_dfrgx->bus_freq_data->devfreq->previous_freq);
+
+		ret = set_desired_frequency_khz(g_dfrgx->bus_freq_data,
+			g_dfrgx->bus_freq_data->bf_desired_freq);
+
+		if (ret <= 0) {
+			DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: Failed to set at : %lu !\n",
+			__func__,g_dfrgx->bus_freq_data->bf_desired_freq);
+		}
+		/*freq was changed and We don't need this thread working for now, so let it be disabled*/
+		else if (dfrgx_burst_is_enabled(g_dfrgx)
+				&& g_dfrgx->g_profile_index != DFRGX_TURBO_PROFILE_SIMPLE_ON_DEMAND
+				&& g_dfrgx->g_profile_index != DFRGX_TURBO_PROFILE_CUSTOM) {
+			/*freq was changed and We don't need this thread
+			* working for now, so let it be disabled
+			*/
+
+			dfrgx_burst_set_enable(g_dfrgx, 0);
+			return 1;
+		}
+	}
+
+	/*So don't need to do any utilization polling when
+	* simple_on_demand is not the current governor
+	*/
+	if (g_dfrgx->g_profile_index != DFRGX_TURBO_PROFILE_SIMPLE_ON_DEMAND
+		&& g_dfrgx->g_profile_index != DFRGX_TURBO_PROFILE_CUSTOM)
+		return 1;
+
+	if (gpu_rgx_get_util_stats(&util_stats)) {
+		DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: Active: %llu, "
+			"Blocked: %llu, Idle: %llu !\n",
+			__func__,
+			util_stats.ui64GpuStatActiveHigh,
+			util_stats.ui64GpuStatBlocked,
+			util_stats.ui64GpuStatIdle);
+
+		dfrgx_add_sample_data(g_dfrgx, util_stats);
+
+	} else {
+		DFRGX_DPF(DFRGX_DEBUG_MED, "%s: Invalid Util stats !\n",
+		__func__);
+	}
+go_out:
+	return 1;
+}
+
+/**
+ * work_thread_loop() - the main loop for the worker thread.
+ * @pvd: The "void *" private data provided to kthread_create.
+ *       This can be cast to the gbprv handle.
+ *
+ * Upon return, thread will exit.
+ */
+static int df_rgx_worker_thread(void *pvd)
+{
+	struct df_rgx_data_s *g_dfrgx = (struct df_rgx_data_s *) pvd;
+	int rva;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: kernel thread started !\n",
+		__func__);
+
+	for (;;) {
+
+		/**
+		 * Synchronization is via a call to:
+		 * int wake_up_process(struct task_struct *p)
+		 */
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		schedule();
+
+		rva = df_rgx_action(g_dfrgx);
+		if (rva == 0) {
+			/* Thread exit requested */
+			break;
+		}
+
+		if (kthread_should_stop())
+			break;
+	}
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: kernel thread stopping !\n",
+		__func__);
+
+	return 0;
+}
+
+/**
+ * df_rgx_create_worker_thread() - Create work thread.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * This thread is not truely a "real-time" thread, in that there will be no
+ * catastrophe if its execution is somewhat delayed.  However, knowing that
+ * the nominal execution interval for this timer-woken thread is 5 msecs and
+ * knowing that the thread execution will be very short, it seems appropriate
+ * to request an elevated scheduling priority.  Perhaps a consensus will be
+ * reached as to whether or not this is truely a good idea.
+ *
+ * Function return value:  < 0 if error, otherwise 0.
+ */
+static int df_rgx_create_worker_thread(struct df_rgx_data_s *g_dfrgx)
+{
+
+	struct task_struct *tskhdl;
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: creating the thread !\n",
+		__func__);
+
+	if (!g_dfrgx->g_task) {
+		tskhdl = kthread_create(df_rgx_worker_thread,
+			(void *) g_dfrgx, "kdfrgx");
+
+		/* Keep a reference on task structure. */
+		get_task_struct(tskhdl);
+
+		if (IS_ERR(tskhdl) || !tskhdl) {
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s: kernel thread"
+			" create fail !\n",
+			__func__);
+			return PTR_ERR(tskhdl);
+		}
+
+		g_dfrgx->g_task = tskhdl;
+		wake_up_process(tskhdl);
+
+	}
+
+	return 0;
+}
+
+/**
+ * df_rgx_stop_worker_thread - kill the worker thread.
+ * @df_rgx_data_s: dfrgx burst handle.
+ */
+static void df_rgx_stop_worker_thread(struct df_rgx_data_s *g_dfrgx)
+{
+	if (g_dfrgx->g_task) {
+		/* kthread_stop will not return until the thread is gone. */
+		kthread_stop(g_dfrgx->g_task);
+
+		put_task_struct(g_dfrgx->g_task);
+		g_dfrgx->g_task = NULL;
+	}
+}
+
+/**
+ * hrt_start() - start (or restart) timer.
+ * @df_rgx_data_s: dfrgx burst handle.
+ */
+static void hrt_start(struct df_rgx_data_s *g_dfrgx)
+{
+	if (g_dfrgx->g_enable) {
+		if (g_dfrgx->g_timer_is_enabled) {
+			/* Due to the gbp_timer is auto-restart timer
+			 * in most case, we must use hrtimer_cancel
+			 * it at first if it is in active state, to avoid
+			 * hitting the BUG_ON(timer->state !=
+			 * HRTIMER_STATE_CALLBACK) in hrtimer.c.
+			*/
+			hrtimer_cancel(&g_dfrgx->g_timer);
+		} else {
+			g_dfrgx->g_timer_is_enabled = 1;
+		}
+
+		hrtimer_start(&g_dfrgx->g_timer, g_dfrgx->g_hrt_period,
+			HRTIMER_MODE_REL);
+	}
+}
+
+/**
+ * hrt_cancel() - cancel a timer.
+ * @df_rgx_data_s: dfrgx burst handle.
+ */
+static void hrt_cancel(struct df_rgx_data_s *g_dfrgx)
+{
+	/* The timer can be restarted with hrtimer_start. */
+	hrtimer_cancel(&g_dfrgx->g_timer);
+
+	g_dfrgx->g_timer_is_enabled = 0;
+}
+
+/**
+ * hrt_event_processor() - Process timer-driven things.
+ * Called by kernel hrtimer system when the timer expires.
+ * @hrthdl: Pointer to the associated hrtimer struct.
+ *
+ * Execution context: hard irq level.
+ * Invoked via interrupt/callback.
+ */
+static enum hrtimer_restart hrt_event_processor(struct hrtimer *hrthdl)
+{
+	struct df_rgx_data_s *g_dfrgx =
+		container_of(hrthdl, struct df_rgx_data_s, g_timer);
+	ktime_t mc_now;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: time is up! -- init %d,"
+				" enable %d, suspended %d !\n",
+				__func__,
+				g_dfrgx->g_initialized,
+				g_dfrgx->g_enable,
+				g_dfrgx->g_suspended);
+
+	if (g_dfrgx->g_initialized && g_dfrgx->g_enable &&
+		!g_dfrgx->g_suspended) {
+		wake_thread(g_dfrgx);
+	}
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: time is up! --"
+				" timer_enabled %d !\n",
+				__func__,
+				g_dfrgx->g_timer_is_enabled);
+
+	if (!g_dfrgx->g_timer_is_enabled)
+		return HRTIMER_NORESTART;
+
+	mc_now = ktime_get();
+	hrtimer_forward(hrthdl, mc_now, g_dfrgx->g_hrt_period);
+
+	return HRTIMER_RESTART;
+}
+
+/**
+ * dfrgx_burst_resume() - Callback for gfx hw transition from state D3.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+static int dfrgx_burst_resume(struct df_rgx_data_s *g_dfrgx)
+{
+
+	if (!g_dfrgx || !g_dfrgx->g_initialized || !g_dfrgx->g_enable)
+		return 0;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: resume!\n",
+				__func__);
+
+	g_dfrgx->g_suspended = 0;
+	
+	/*Need to update the freq after coming back from D0i3/S0i3*/
+	mutex_lock(&g_dfrgx->bus_freq_data->lock);
+	g_dfrgx->bus_freq_data->b_resumed = 1;
+	g_dfrgx->bus_freq_data->b_need_freq_update = 1;
+	mutex_unlock(&g_dfrgx->bus_freq_data->lock);
+
+	hrt_start(g_dfrgx);
+
+	return 0;
+}
+
+/**
+ * dfrgx_burst_suspend() - Callback for gfx hw transition from state D3.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+static int dfrgx_burst_suspend(struct df_rgx_data_s *g_dfrgx)
+{
+	if (!g_dfrgx || !g_dfrgx->g_initialized || !g_dfrgx->g_enable)
+		return 0;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s: suspend!\n",
+				__func__);
+
+	g_dfrgx->g_suspended = 1;
+	hrt_cancel(g_dfrgx);
+
+	return 0;
+}
+
+/**
+ * frgx_burst_set_enable() - Is burst enabled?.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+void dfrgx_burst_set_enable(struct df_rgx_data_s *g_dfrgx, int enable)
+{
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s enable: %d\n",
+				__func__, enable);
+
+	if (!g_dfrgx || !g_dfrgx->g_initialized)
+		return;
+
+
+	mutex_lock(&g_dfrgx->g_mutex_sts);
+	if (g_dfrgx->g_enable != enable) {
+		g_dfrgx->g_enable = enable;
+
+		if (g_dfrgx->g_enable)
+			hrt_start(g_dfrgx);
+		else
+			hrt_cancel(g_dfrgx);
+	}
+	mutex_unlock(&g_dfrgx->g_mutex_sts);
+}
+
+/**
+ * dfrgx_burst_is_enabled() - Is burst enabled?.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+int dfrgx_burst_is_enabled(struct df_rgx_data_s *g_dfrgx)
+{
+	int enabled;
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s\n",
+				__func__);
+
+	if (!g_dfrgx || !g_dfrgx->g_initialized)
+		return 0;
+
+	mutex_lock(&g_dfrgx->g_mutex_sts);
+	enabled = g_dfrgx->g_enable;
+	mutex_unlock(&g_dfrgx->g_mutex_sts);
+
+	return enabled;
+}
+
+/**
+ * dfrgx_profiling_is_enabled() - Is profiling enabled?.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+void dfrgx_profiling_set_enable(struct df_rgx_data_s *g_dfrgx, int enable)
+{
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s enable: %d\n",
+				__func__,
+				enable);
+
+	if (!g_dfrgx || !g_dfrgx->g_initialized)
+		return;
+
+	if (g_dfrgx->g_profiling_enable != enable)
+		g_dfrgx->g_profiling_enable = enable;
+}
+
+/**
+ * dfrgx_profiling_is_enabled() - Is profiling enabled?.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+int dfrgx_profiling_is_enabled(struct df_rgx_data_s *g_dfrgx)
+{
+	int enabled;
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s\n",
+				__func__);
+
+	if (!g_dfrgx || !g_dfrgx->g_initialized)
+		return 0;
+
+	enabled = g_dfrgx->g_profiling_enable;
+
+	return enabled;
+}
+
+
+/**
+ * dfrgx_power_state_set() - Callback informing of gfx
+ * hw power state change.
+ * @df_rgx_data_s: dfrgx burst handle.
+ * @st_on: 1 if powering on, 0 if powering down.
+ */
+static void dfrgx_power_state_set(struct df_rgx_data_s *g_dfrgx,
+				int st_on)
+{
+	if (g_dfrgx->g_enable) {
+		if (st_on)
+			dfrgx_burst_resume(g_dfrgx);
+		else
+			dfrgx_burst_suspend(g_dfrgx);
+	}
+}
+
+
+/**
+ * dfrgx_burst_init() - dfrgx burst module initialization.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Invokes sub-function to initialize.  If failure, invokes cleanup.
+ *
+ * Function return value: negative to abort module installation.
+ */
+int dfrgx_burst_init(struct df_rgx_data_s *g_dfrgx)
+{
+	int sts = 0;
+	unsigned int error = 0;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s:gpu burst mode initialization"
+				" -- begin !\n",
+				__func__);
+
+	mutex_init(&g_dfrgx->g_mutex_sts);
+
+	/* No 3D activity is initialized to run until kernel finish
+	 * its initialization. so here make the gburst status consistent
+	 * with HW
+	 * */
+	g_dfrgx->g_suspended = 1;
+
+	g_dfrgx->g_hrt_period = ktime_set(0,
+		DFRGX_BURST_TIMER_PERIOD_DEFAULT_USECS * NSEC_PER_USEC);
+
+	{
+		struct dfrgx_interface_s dfrgx_interface;
+
+		dfrgx_interface.dfrgx_priv = g_dfrgx;
+		dfrgx_interface.dfrgx_power_state_set = dfrgx_power_state_set;
+
+		dfrgx_interface_set_data(&dfrgx_interface);
+	}
+
+	error = dev_freq_add_attributes_to_sysfs(g_dfrgx->bus_freq_data->dev);
+	if (error)
+		goto attrib_creation_failed;
+
+	/* Initialize profiling info*/
+	g_dfrgx->g_profiling_enable = 0;
+	gpu_profiling_records_init();
+
+	/* Initialize timer.  This does not start the timer. */
+	hrtimer_init(&g_dfrgx->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	g_dfrgx->g_timer.function = &hrt_event_processor;
+
+	/* FIXME: Need to re-think this*/
+	msleep(500);
+
+	error = gpu_rgx_utilstats_init_obj();
+	if (error) {
+		DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s:gpu_rgx_utilstats_init_obj"
+				" -- failed!\n",
+				__func__);
+		sts = -EAGAIN;
+		goto error_init_obj;
+	}
+
+	if (g_dfrgx->g_enable) {
+		hrt_start(g_dfrgx);
+		sts = df_rgx_create_worker_thread(g_dfrgx);
+		if (sts < 0) {
+			/* abort init if unable to create thread. */
+			DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s:thread creation"
+				" failed %d !\n",
+				__func__,
+				-sts);
+			goto err_thread_creation;
+
+		}
+	}
+
+	g_dfrgx->g_initialized = 1;
+
+	/* Initialize to suspended state */
+	/* Allows system to enter sleep states while charging */
+	dfrgx_burst_suspend(g_dfrgx);
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s:gpu burst mode initialization"
+				" -- done!\n",
+				__func__);
+
+	return 0;
+
+error_init_obj:
+	df_rgx_stop_worker_thread(g_dfrgx);
+
+attrib_creation_failed:
+err_thread_creation:
+
+	{
+		struct dfrgx_interface_s dfrgx_interface;
+		memset(&dfrgx_interface, 0, sizeof(struct dfrgx_interface_s));
+		dfrgx_interface_set_data(&dfrgx_interface);
+	}
+
+	mutex_destroy(&g_dfrgx->g_mutex_sts);
+
+	return sts;
+}
+
+/**
+ * dfrgx_burst_deinit() - dfrgx burst module initialization.
+ * @df_rgx_data_s: dfrgx burst handle.
+ *
+ * Invokes sub-function to initialize.  If failure, invokes cleanup.
+ *
+ * Function return value: negative to abort module installation.
+ */
+void dfrgx_burst_deinit(struct df_rgx_data_s *g_dfrgx)
+{
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s:gpu burst mode deinitialization"
+				" -- begin !\n",
+				__func__);
+
+	{
+		struct dfrgx_interface_s dfrgx_interface;
+		memset(&dfrgx_interface, 0, sizeof(struct dfrgx_interface_s));
+		dfrgx_interface_set_data(&dfrgx_interface);
+	}
+
+	dev_freq_remove_attributes_on_sysfs(g_dfrgx->bus_freq_data->dev);
+
+	gpu_rgx_utilstats_deinit_obj();
+
+	hrt_cancel(g_dfrgx);
+
+	g_dfrgx->g_timer.function = NULL;
+
+	df_rgx_stop_worker_thread(g_dfrgx);
+
+	mutex_destroy(&g_dfrgx->g_mutex_sts);
+	g_dfrgx->g_initialized = 0;
+
+	DFRGX_DPF(DFRGX_DEBUG_LOW, "%s:gpu burst mode deinitialization"
+				" -- done!\n",
+				__func__);
+
+	return;
+
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_burst.h b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_burst.h
new file mode 100644
index 0000000..6a75062
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_burst.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#if !defined DFRGX_BURST_H
+#define  DFRGX_BURST_H
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/thermal.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include "df_rgx_defs.h"
+#include "dev_freq_graphics_pm.h"
+
+#define DFRGX_BURST_TIMER_PERIOD_DEFAULT_USECS 5000
+
+
+int dfrgx_burst_init(struct df_rgx_data_s *g_dfrgx);
+void dfrgx_burst_deinit(struct df_rgx_data_s *g_dfrgx);
+void dfrgx_burst_set_enable(struct df_rgx_data_s *g_dfrgx, int enable);
+void dfrgx_profiling_set_enable(struct df_rgx_data_s *g_dfrgx, int enable);
+int dfrgx_burst_is_enabled(struct df_rgx_data_s *g_dfrgx);
+int dfrgx_profiling_is_enabled(struct df_rgx_data_s *g_dfrgx);
+void gpu_profiling_records_restart(void);
+int gpu_profiling_records_show(char *buf);
+
+#endif /*DFRGX_BURST*/
+
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_defs.h b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_defs.h
new file mode 100644
index 0000000..12b1f4b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_defs.h
@@ -0,0 +1,210 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#if !defined DF_RGX_DEFS_H
+#define DF_RGX_DEFS_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/hrtimer.h>
+#include <linux/devfreq.h>
+
+/**
+ * THERMAL_COOLING_DEVICE_MAX_STATE - The maximum cooling state that this
+ * driver (as a thermal cooling device by reducing frequency) supports.
+ */
+#define THERMAL_COOLING_DEVICE_MAX_STATE	4
+#define NUMBER_OF_LEVELS_B0			8
+#define NUMBER_OF_LEVELS			4
+
+#define DF_RGX_FREQ_KHZ_MIN             200000
+#define DF_RGX_FREQ_KHZ_MAX             533000
+
+#define DF_RGX_FREQ_KHZ_MIN_INITIAL     DF_RGX_FREQ_KHZ_MIN
+
+#define DF_RGX_FREQ_KHZ_MAX_INITIAL     320000
+
+#define DF_RGX_INITIAL_FREQ_KHZ         320000
+
+#define DF_RGX_THERMAL_LIMITED_FREQ_KHZ 200000
+
+typedef enum _DFRGX_FREQ_ {
+	DFRGX_FREQ_200_MHZ = 200000,
+	DFRGX_FREQ_213_MHZ = 213000,
+	DFRGX_FREQ_266_MHZ = 266000,
+	DFRGX_FREQ_320_MHZ = 320000,
+	DFRGX_FREQ_355_MHZ = 355000,
+	DFRGX_FREQ_400_MHZ = 400000,
+	DFRGX_FREQ_457_MHZ = 457000,
+	DFRGX_FREQ_533_MHZ = 533000,
+} DFRGX_FREQ;
+
+typedef enum _DFRGX_TURBO_PROFILE_ {
+	DFRGX_TURBO_PROFILE_SIMPLE_ON_DEMAND	= 0,
+	DFRGX_TURBO_PROFILE_POWERSAVE		= 1,
+	DFRGX_TURBO_PROFILE_CUSTOM		= 2,
+	DFRGX_TURBO_PROFILE_USERSPACE		= 3,
+	DFRGX_TURBO_PROFILE_PERFORMANCE 	= 4,
+	DFRGX_TURBO_PROFILE_MAX			= 5,
+} DFRGX_TURBO_PROFILE;
+
+typedef enum _DFRGX_BURST_MODE_ {
+	DFRGX_NO_BURST_REQ	= 0,
+	DFRGX_BURST_REQ		= 1,
+	DFRGX_UNBURST_REQ	= 2,
+} DFRGX_BURST_MODE;
+
+struct gpu_util_stats {
+	/* if TRUE, statistict are valid, otherwise
+	* there was not enough data to calculate the times
+	*/
+	unsigned int				bValid;
+	/* GPU active time expressed in ms */
+	unsigned long long			ui64GpuStatActiveHigh;
+	/* GPU active time expressed in ms */
+	unsigned long long			ui64GpuStatActiveLow;
+	/* GPU blocked time expressed in ms */
+	unsigned long long			ui64GpuStatBlocked;
+	/* GPU idle time expressed in ms */
+	unsigned long long			ui64GpuStatIdle;
+	/* GPU time cumulative total in ms */
+	unsigned long long			ui64GpuStatCumulative;
+};
+
+/**
+ * struct gpu_profiling_record - profiling information
+ */
+struct gpu_profiling_record {
+	ktime_t		last_timestamp_ns;
+	long long	time_ms;
+};
+
+struct gpu_utilization_record {
+	unsigned long		freq;
+	int			code;
+};
+
+struct gpu_freq_thresholds {
+	/*lower limit utilization percentage, unburst it!*/
+	int			util_th_low;
+	/*upper limit utilization percentage, burst it!*/
+	int			util_th_high;
+};
+
+struct gpu_data {
+	unsigned long int     freq_limit;
+};
+
+/**
+ * struct df_rgx_data_s - dfrgx burst private data
+ */
+struct df_rgx_data_s {
+
+	struct busfreq_data		*bus_freq_data;
+	struct hrtimer			g_timer;
+
+	/* gbp_task - pointer to task structure for work thread or NULL. */
+	struct task_struct		*g_task;
+
+	/* gbp_hrt_period - Period for timer interrupts as a ktime_t. */
+	ktime_t				g_hrt_period;
+	int				g_initialized;
+	int				g_suspended;
+	int				g_thread_check_utilization;
+
+
+	/* gbp_enable - Usually 1.  If 0, gpu burst is disabled. */
+	int				g_enable;
+	int				g_profiling_enable;
+	int				g_timer_is_enabled;
+
+	struct mutex			g_mutex_sts;
+	unsigned long			g_recommended_freq_level;
+	unsigned long int		g_freq_mhz_min;
+	unsigned long int		g_freq_mhz_max;
+	int				gpu_utilization_record_index;
+	int				g_min_freq_index;
+	int				g_max_freq_index;
+	int				g_profile_index;
+};
+
+struct busfreq_data {
+	struct df_rgx_data_s g_dfrgx_data;
+	struct device        *dev;
+	struct devfreq       *devfreq;
+	struct notifier_block pm_notifier;
+	struct mutex          lock;
+	bool                  disabled;
+	unsigned long int     bf_freq_mhz_rlzd;
+	unsigned long int     bf_prev_freq_rlzd;
+	unsigned long int     bf_desired_freq;
+	unsigned int	      b_resumed;
+	unsigned int	      b_need_freq_update;
+	char		      prev_governor[DEVFREQ_NAME_LEN + 1];
+
+	struct thermal_cooling_device *gbp_cooldv_hdl;
+	int                   gbp_cooldv_state_cur;
+	int                   gbp_cooldv_state_prev;
+	int                   gbp_cooldv_state_highest;
+	int                   gbp_cooldv_state_override;
+	unsigned int	      gbp_cooldv_latest_freq_max;
+	unsigned int	      gbp_cooldv_latest_freq_min;
+	struct gpu_data	      gpudata[THERMAL_COOLING_DEVICE_MAX_STATE];
+};
+
+/**
+ * struct userspace_gov_data - Must the same as  userspace_data
+ */
+struct userspace_gov_data {
+	unsigned long user_frequency;
+	bool valid;
+};
+
+
+/*Available states - freq mapping table*/
+static const struct gpu_utilization_record a_available_state_freq[] = {
+					{DFRGX_FREQ_200_MHZ, 0xF},
+					/*Need a proper value for this freq*/
+					{DFRGX_FREQ_213_MHZ, 0xE},
+					{DFRGX_FREQ_266_MHZ, 0xB},
+					{DFRGX_FREQ_320_MHZ, 0x9},
+					{DFRGX_FREQ_355_MHZ, 0x8},
+					{DFRGX_FREQ_400_MHZ, 0x7},
+					{DFRGX_FREQ_457_MHZ, 0x6},
+					{DFRGX_FREQ_533_MHZ, 0x5}
+					};
+
+unsigned int df_rgx_is_valid_freq(unsigned long int freq);
+unsigned int df_rgx_request_burst(struct df_rgx_data_s *pdfrgx_data,
+					int util_percentage);
+int df_rgx_get_util_record_index_by_freq(unsigned long freq);
+long df_rgx_set_freq_khz(struct busfreq_data *bfdata,
+				unsigned long freq_khz);
+int df_rgx_set_governor_profile(const char *governor_name,
+					struct df_rgx_data_s *g_dfrgx);
+
+#endif /*DF_RGX_DEFS_H*/
diff --git a/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_utils.c b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_utils.c
new file mode 100644
index 0000000..bb74cb1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/dfrgx/df_rgx_utils.c
@@ -0,0 +1,165 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+#include "df_rgx_defs.h"
+#include "dev_freq_debug.h"
+
+extern int is_tng_a0;
+
+struct gpu_freq_thresholds a_governor_profile[] = {
+			/* low, high thresholds for Performance profile */
+			{67, 85},
+			/* low, high thresholds for Power Save profile*/
+			{80, 95},
+			/* low, high Custom thresholds */
+			{50, 100},
+			/* low, high Performance */
+			{25, 45}
+			};
+/**
+ * df_rgx_is_valid_freq() - Determines if We are about to use
+ * a valid frequency.
+ * @freq: frequency to be validated.
+ *
+ * Function return value: 1 if Valid 0 if not.
+ */
+unsigned int df_rgx_is_valid_freq(unsigned long int freq)
+{
+	unsigned int valid = 0;
+	int i;
+	int a_size = NUMBER_OF_LEVELS;
+
+	if(!is_tng_a0)
+		a_size = NUMBER_OF_LEVELS_B0;
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s freq: %lu\n",
+			__func__, freq);
+
+	for (i = 0; i < a_size; i++) {
+		if (freq == a_available_state_freq[i].freq) {
+			valid = 1;
+			break;
+		}
+	}
+
+	DFRGX_DPF(DFRGX_DEBUG_HIGH, "%s valid: %u\n",
+			__func__, valid);
+
+	return valid;
+}
+
+/**
+ * df_rgx_get_util_record_index_by_freq() - Obtains the index to a
+ * a record from the avalable frequencies table.
+ * @freq: frequency to be validated.
+ *
+ * Function return value: the index if found, -1 if not.
+ */
+int df_rgx_get_util_record_index_by_freq(unsigned long freq)
+{
+	int n_levels = NUMBER_OF_LEVELS;
+	int i = 0;
+
+	if(!is_tng_a0)
+		n_levels = NUMBER_OF_LEVELS_B0;
+
+	for (i = 0; i < n_levels; i++) {
+		if (freq == a_available_state_freq[i].freq)
+			break;
+	}
+
+	if (i == n_levels)
+		i = -1;
+
+	return i;
+}
+
+/**
+ * df_rgx_request_burst() - Decides if dfrgx needs to BURST, UNBURST
+ * or keep the current frequency level.
+ * @pdfrgx_data: Dynamic turbo information
+ * @util_percentage: percentage of utilization in active state.
+ * Function return value: DFRGX_NO_BURST_REQ, DFRGX_BURST_REQ,
+ * DFRGX_UNBURST_REQ.
+ */
+unsigned int df_rgx_request_burst(struct df_rgx_data_s *pdfrgx_data,
+			int util_percentage)
+{
+	int current_index = pdfrgx_data->gpu_utilization_record_index;
+	unsigned long freq = a_available_state_freq[current_index].freq;
+	int new_index;
+	unsigned int burst = DFRGX_NO_BURST_REQ;
+	int n_levels = NUMBER_OF_LEVELS;
+
+	if(!is_tng_a0)
+		n_levels = NUMBER_OF_LEVELS_B0;
+
+	new_index = df_rgx_get_util_record_index_by_freq(freq);
+
+	if (new_index < 0)
+		goto out;
+
+	/* Decide unburst/burst based on utilization*/
+	if (util_percentage > a_governor_profile[pdfrgx_data->g_profile_index].util_th_high
+		&& new_index < pdfrgx_data->g_max_freq_index) {
+		/* Provide recommended burst*/
+		pdfrgx_data->gpu_utilization_record_index = new_index+1;
+		burst = DFRGX_BURST_REQ;
+	} else if (util_percentage < a_governor_profile[pdfrgx_data->g_profile_index].util_th_low
+		&& new_index > pdfrgx_data->g_min_freq_index) {
+		/* Provide recommended unburst*/
+		pdfrgx_data->gpu_utilization_record_index = new_index-1;
+		burst = DFRGX_UNBURST_REQ;
+	}
+
+out:
+	return burst;
+}
+
+/**
+ * df_rgx_set_governor_profile() -Updates the thresholds based on the governor.
+ * @governor_name: governor id
+ * @g_dfrgx_data: Dynamic turbo information
+ * Function return value: 1 if changed, 0 otherwise.
+ */
+int df_rgx_set_governor_profile(const char *governor_name,
+			struct df_rgx_data_s *g_dfrgx)
+{
+	int ret = 0;
+
+	if (!strncmp(governor_name, "performance", DEVFREQ_NAME_LEN))
+		g_dfrgx->g_profile_index = DFRGX_TURBO_PROFILE_PERFORMANCE;
+	else if (!strncmp(governor_name, "powersave", DEVFREQ_NAME_LEN))
+		g_dfrgx->g_profile_index = DFRGX_TURBO_PROFILE_POWERSAVE;
+	else if (!strncmp(governor_name, "simple_ondemand", DEVFREQ_NAME_LEN)) {
+		g_dfrgx->g_profile_index = DFRGX_TURBO_PROFILE_SIMPLE_ON_DEMAND;
+		ret = 1;
+	} else if (!strncmp(governor_name, "userspace", DEVFREQ_NAME_LEN))
+		g_dfrgx->g_profile_index = DFRGX_TURBO_PROFILE_USERSPACE;
+
+	return ret;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/Kbuild b/drivers/external_drivers/intel_media/graphics/gburst/Kbuild
new file mode 100644
index 0000000..14ba1c0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/Kbuild
@@ -0,0 +1,84 @@
+# Kbuild - the "makefile" used to build the gburst software.
+#
+# Invocations
+# 1.  usual: as a part of a full kernel build if CONFIG_GPU_BURST is
+# defined as "y" or "m".  This file ("Kbuild") is selected by the kernel
+# build system because "Kbuild" has priority over "Makefile".
+#
+# In which case, either:
+# -- (CONFIG_GPU_BURST=y) the gburst software is built-in to the kernel.
+# or
+# -- (CONFIG_GPU_BURST=m) the gburst software is built as a module into
+#    gburst.ko .  HOWEVER, some portions of gburst (e.g., hooks into the
+#    graphics device driver) are still built into the kernel.
+#
+# 2.  Alternate invocation: The module may be built separately from the rest of
+# the kernel (typically to reduce debug cycle time during development):
+# File "Makefile" in this directory is invoked from the command line,
+# defines GBURST_EXT_MOD_BUILD as "y", and then causes Kbuild to be invoked.
+# The kernel against which the module will be loaded should have been created
+# in the usual way with CONFIG_GPU_BURST=m.
+# Requires one of the following have been done:
+# -- "make modules_prepare"    (ok, but does not set up Module.symvers)
+# -- "make" or "make bzImage" -- regular kernel build to establish build
+#    environment.
+
+# To add verbosity during build:
+#   make KBUILD_VERBOSE=1
+
+ifeq ($(GBURST_EXT_MOD_BUILD),y)
+CONFIG_GPU_BURST := m
+endif
+
+# MY_DEBUG - 1 to force compilation to include "-g".
+MY_DEBUG := 1
+
+# This makefile written for dir: drivers/staging/intel_media/graphics/gburst
+
+# TOP_REL_* - directory spec relative to top directory.
+#    Used for makefile include references.
+#    Used for -I header file inclusion.
+
+TOP_REL_DRIVERS_STAGING := drivers/staging
+
+ifneq ($(MY_DEBUG),)
+ifneq ($(MY_DEBUG),0)
+# Causes build errors: ## ccflags-y += -O0 -fno-inline
+ifndef CONFIG_DEBUG_INFO
+# If CONFIG_DEBUG_INFO, then "-g" is already present by default.
+ccflags-y += -g
+endif
+endif
+endif
+
+obj-$(CONFIG_GPU_BURST)	+= gburst.o
+
+gburst-y :=
+gburst-y += gburstm.o
+gburst-y += gburst_stats.o
+gburst-y += utilf.o
+
+gburst_warning_flags :=
+gburst_warning_flags += -Wall -fmessage-length=0 -Wunused-parameter
+
+gburst_warning_flags += -Wextra -Wno-sign-compare -Wformat-nonliteral -Wformat-security -fdiagnostics-show-option -Wdeclaration-after-statement -Wmissing-format-attribute -Wpointer-arith -Wshadow -Wlogical-op -Wbad-function-cast -Wmissing-prototypes -Wwrite-strings
+
+# Turn off things that too many kernel headers cause.
+gburst_warning_flags += -Wno-unused-parameter -Wno-pointer-arith -Wno-bad-function-cast
+
+## gburst_warning_flags += -Wundef -Wc++-compat
+
+ccflags-y += $(gburst_warning_flags)
+
+# To request an assembly listing:
+## ccflags-y += -Wa,-alh=$(PWD)/q.lst
+
+# Variable c_flags is the embodiment of the kbuild compilation options.
+
+TOP_REL_DEV_SGX   := $(TOP_REL_DRIVERS_STAGING)/mrst/pvr/services4/srvkm/devices/sgx
+
+CFLAGS_gburst_stats.o += -I$(TOP_REL_DEV_SGX)
+
+TOP_REL_SYSCONFIG   := $(TOP_REL_DRIVERS_STAGING)/mrst/pvr/services4/system/intel_drm
+
+CFLAGS_gburstm.o += -I$(TOP_REL_SYSCONFIG)
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/Makefile b/drivers/external_drivers/intel_media/graphics/gburst/Makefile
new file mode 100644
index 0000000..ae6f2b9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/Makefile
@@ -0,0 +1,90 @@
+# NOTE: This file is *not* invoked by the kernel build system,
+# as the presence of file "Kbuild" takes precedence.  Rather, this
+# file can be invoked by a "make" in this directory to build the
+# module stand-alone.
+#
+# See commentary in file "Kbuild".
+
+# For development and test, be more verbose for build of this module.
+export KBUILD_VERBOSE := 1
+
+ifeq ($(KERNELRELEASE),)
+
+ifeq ($(wildcard $(ANDROID_BUILD_TOP)),)
+$(error Error: required directory not present: ANDROID_BUILD_TOP = $(ANDROID_BUILD_TOP))
+endif
+
+ifeq ($(wildcard $(ANDROID_PRODUCT_OUT)),)
+$(error Error: required directory not present: ANDROID_PRODUCT_OUT = $(ANDROID_PRODUCT_OUT))
+endif
+
+# Assume the source tree is where the running kernel was built
+# You should set KERNELDIR in the environment if it's elsewhere
+KERNELDIR ?= $(ANDROID_BUILD_TOP)/$(KERNEL_SRC_DIR)
+
+MODULE_SRC  := $(ANDROID_PRODUCT_OUT)/kernel_modules
+MODULE_DEST := $(ANDROID_PRODUCT_OUT)/root/lib/modules
+
+INSTALL_MOD_PATH=$(MODULE_SRC)
+export INSTALL_MOD_PATH
+
+# The current directory is passed to sub-makes as argument
+CURDIR := $(shell pwd)
+
+# Note: This export of KBUILD_OUTPUT is equivalent to -O=the_same
+export KBUILD_OUTPUT := $(ANDROID_PRODUCT_OUT)/linux/kernel
+
+# In this makefile (used only for external module builds), force
+# GBURST_EXT_MOD_BUILD=y to allow standalone module builds for development
+# and testing.
+
+default:	modules
+
+make_and_install: modules
+	$(MAKE) modules_install
+
+modules:
+	$(MAKE) GBURST_EXT_MOD_BUILD=y -C $(KERNELDIR) M=$(CURDIR) modules
+
+modules_install:
+	$(MAKE) GBURST_EXT_MOD_BUILD=y -C $(KERNELDIR) M=$(CURDIR) modules_install
+	cp -vpf gburst.ko $(MODULE_DEST)
+
+clean:
+	$(MAKE) GBURST_EXT_MOD_BUILD=y -C $(KERNELDIR) M=$(CURDIR) $@
+	rm -f m.log
+	rm -f $(MODULE_DEST)/gburst.ko
+
+
+cleanx:
+	rm -f .*.o.d built-in.o .built-in.o.cmd
+	rm -f *.o *~ core .depend .*.cmd *.ko *.mod.c
+	rm -f Module.symvers Module.markers modules.order
+	rm -f *.lst m.log
+	rm -rf .tmp_versions
+
+.PHONY: modules modules_install clean
+
+# Required once per boot before pushing module.
+adb_rw:
+	adb shell mount -o rw,remount /
+
+# Push the module to its home on the device-under-test.
+adb_push:
+	adb push gburst.ko /lib/modules/gburst.ko
+
+# Notes about module installation location:
+# Default directory is: /lib/modules/<kernel-version>/extra
+#
+# If specified, INSTALL_MOD_PATH is a prefix for the above (used only by default definition of MODLIB).
+#   Definition not necessary if MODLIB also being defined.
+# MODLIB - Initial part of directory specification.  Default is /lib/modules/<kernel-version>.
+# INSTALL_MOD_DIR - default is "extra" for out-of-tree, "kernel" for in-tree.
+# install-dir - Default definition is INSTALL_MOD_DIR if non-blank, else "extra".
+#   Same as INSTALL_MOD_DIR, but only for directory creation.
+#
+# The command line assignment overrides all makefile assignments.
+#
+## make MODLIB=${ANDROID_PRODUCT_OUT}/root/lib/modules INSTALL_MOD_DIR= install-dir=. modules_install
+
+endif
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/Readme.txt b/drivers/external_drivers/intel_media/graphics/gburst/Readme.txt
new file mode 100644
index 0000000..e297af8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/Readme.txt
@@ -0,0 +1,222 @@
+High level flow
+---------------
+
+================================================================================
+
+The following diagram provides the high level flow of graphics driver
+interaction with other system components for burst entry/exit and
+notification of changes to GPU frequency.
+
+                                                   |-----------------|
+                                                   |     Android     |
+             ---(ADC Input)----------------------->|     Thermal     |
+                                                   |     Manager     |
+                                                   |-----------------|
+                                                            |
+             Thermal State via                              |
+             Sysfs/class/system/thermal/coolingdevice/      |
+             {maxstate,cstate}                              |
+             3 -- Critical Thermal Condition                |
+             2 -- Alert                                     |
+             1 -- Warning                                   |
+             0 -- Normal Thermal Condition                  |
+                                                            |
+             Graphics Driver                                v
+|---------|  must request MSI    |------------|    |-----------------|
+|         |  for IRQ TBD         |            |    |                 |
+|   SCU   |                      |  Graphics  |    |    Frequency    |
+|         |--(MSI Interrupt)---->|   Driver   |--->|      Change     |
+|         |                      |            |    |       ISR       |
+|---------|                      |------------|    |-----------------|
+     ^                                  |                   ^
+     |                                  |                   |
+     |   Frequency Change               |                   |
+     |   1.  boost up/down              |                   |
+     |   2.  throttling                 |                   |
+     |                                  |                   |
+|---------|                             |                   |
+|         |<--(MBI Write PWRGT_CNT)-----|                   |
+|  PUnit  |                                                 |
+|         |----------------------------(MBI Read PWRGT_STS)--
+|---------|
+
+
+In the above diagram, the graphics driver requires the following:
+
+1. Initialization of the PUnit PWRGT_CNT register to receive interrupts
+   notification of frequency change events.  Note: this bit must be set on
+   every write to this register when interrupt notification is desired.
+   For the Clovertrail this is every write to the register other than
+   D3 entry.
+2. MSI interrupt support from the SCU to route PUnit interrupt events
+   via virtual ioapic.
+3. Monitoring of Android Thermal Management controlled state changes to
+   the cooling device.
+4. Reading the frequency and power status from the PUnit PWRGT_STS register.
+
+================================================================================
+Registers:
+================================================================================
+Register PWRGT_CNT -- write-only (except to determine toggle bit state)
+================================================================================
+--  The graphics driver must remember the last-written value to this register
+    and should restore the register value after all S0ix transitions.
+--  This register is write only by the graphics driver (except to determine
+    toggle bit state)and is accessed via
+    Message Bus Interface (MBI).  See MBI write below for more details.
+--  PWRGT_CNT[31] (the toggle bit) is toggled on every write (so the firmware
+    can detect that the register has been written).
+    The value of the toggle bit is preserved across D3.
+--  During driver initialization (and exit from D3) the following values
+    are written to the PWRGT_CNT register:
+--  Upon D3 entry, all bits are set to 0 except toggle.
+    See the CLV Gfx Burst HAS for more details.  (FIXME)
+--  Upon D3 exit (transition from D3 to S0), entry, all bits are set to 0 except
+    toggle and PWRGT_CNT[30] (which is interrupt enable).
+    Because the PWRGT_CNT[27:24] is being set to 0, frequency will be
+    0000b == 400 MHz (e.g., burst mode off).
+
+PWRGT_CNT bits          Description
+--------------          -------------------------------------------------------
+PWRGT_CNT[31]           Required to be set by the graphics driver for each
+                        write to the PWRGT_CNT register to signal the PUnit.
+                        During initialization this bit is set to 1.
+                        Each subsequent write to the PWRGT_CNT register must
+                        toggle this bit.
+                        The driver always preserves the state of this bit
+                        including across D3 (Power) Entry/Exit.
+PWRGT_CNT[30]           Enables notification of graphics frequency changes to
+                        the graphics driver via SCU.  Always set to 1 by
+                        the graphics driver unless D3 entry or Driver Unload,
+                        in which case, set to 0.
+PWRGT_CNT[29]           Reserved the graphics driver will always set to 0
+PWRGT_CNT[28]           Enable/disable automatic burst entry.  The graphics
+                        driver will set this bit to 1 at initialization and
+                        to 0 upon driver unload.
+
+FIXME - This is *automatic* burst mode.  Not wanted when driver is involved, right?
+
+PWRGT_CNT[27:24]        Burst Entry/Exit request.  The graphics driver will
+                        set these bits to 0000b (400MHz operation) at
+                        initialization and upon driver unload.
+                        0000b -- Burst Exit request and IA SW preference
+                            for 400 MHz Graphics Clock.
+                        0001b -- Burst Entry request and IA SW preference
+                            for 533 MHz Graphics Clock
+                        Anything else -- reserved
+PWRGT_CNT[23:0]         Reserved.  The graphics must set these bits to 0 always.
+
+================================================================================
+Register PWRGT_STS -- read-only
+================================================================================
+
+On successful read of the PWRGT_STS register the bits below are returned.
+See CLV GFX Burst HAS for more details on register values.
+
+PWRGT_STS bits          Description
+--------------          -------------------------------------------------------
+PWRGT_STS[31]           Fuse status to indicate if bursting is supported on
+                        the SKU.
+                        1b = burst available.  0 = burst not available.
+PWRGT_STS[30]           Graphics Clock Change Interrupt setting.
+                        1b = interrupt enabled for graphics driver.
+PWRGT_STS[29]           Reserved.  Graphics driver should ignore this bit.
+PWRGT_STS[28]           Automatic Burst Entry Enable Setting.
+                        1b = PUnit FW performs automatic burst entry under
+                        appropriate conditions.
+PWRGT_STS[27:24]        Graphics Driver Burst Request Setting.
+                        0001b = Burst Entry Request has been processed and
+                            graphics frequency preference is for 533Mhz.
+                        0000b = Burst Exit request has been processed and
+                            graphics frequency preference is 400Mhz.
+
+PWRGT_STS[23:20]        Graphics Clock / Throttle Status
+    0001b = Graphics Clock is 533 MHz Unthrottled
+    0000b = Graphics Clock is 400 MHz Unthrottled
+    1001b = Graphics Clock is 400 MHz at 12.5% Throttled (350 MHz effective)
+    1010b = Graphics Clock is 400 MHz at 25% Throttled (300 MHz effective)
+    1011b = Graphics Clock is 400 MHz at 37.5% Throttled (250 MHz effective)
+    1100b = Graphics Clock is 400 MHz at 50% Throttled (200 MHz effective)
+    1101b = Graphics Clock is 400 MHz at 62.5% Throttled (150 MHz effective)
+    1110b = Graphics Clock is 400 MHz at 75% Throttled (100 MHz effective)
+    1111b = Graphics Clock is 400 MHz at 87.5% Throttled (50 MHz effective)
+PWRGT_STS[19:0]         Reserved
+
+================================================================================
+Thermal cooling device interface
+================================================================================
+The following taken from Document/thermal/sysfs-api.txt in the Linux kernel
+describes device registration interface:
+
+1.2.1 struct thermal_cooling_device *thermal_cooling_device_register(char *name,
+                void *devdata, struct thermal_cooling_device_ops *)
+
+    This interface function adds a new thermal cooling device
+    (fan/processor/...) to /sys/class/thermal/ folder as cooling_device[0-*].
+    It tries to bind itself to all the thermal zone devices register
+    at the same time.
+    name: the cooling device name.
+    devdata: device private data.
+    ops: thermal cooling devices call-backs.
+        .get_max_state: get the Maximum throttle state of the cooling device.
+        .get_cur_state: get the Current throttle state of the cooling device.
+        .set_cur_state: set the Current throttle state of the cooling device.
+
+================================================================================
+Burst Determination Flow
+================================================================================
+
+/----------\   |-----------|   |------------|
+|  Linux   |   |  cpugpu_  |   |  Read      |
+|  Kernel  |-->|  thread   |-->|  PWRGT_STS |
+|  Timer   |   |           |   |  Graphics  |
+|   5ms    |   |           |   |            |
+\----------/   |-----------|   |------------|
+                                      |
+       |------------------------------|
+       v
+  /----------\
+ /  If        \  FIXME - If already active, then exit burst
+|  Burst       |---(Yes)--->---------------------------|
+|  Entry       |                                       |
+ \ Prohibited /                                        |
+  \----------/                                         |
+       |                                               |
+       v                                               |
+|--------------|                                       |
+|  Read        |                                       |
+|  Performance |                                       |
+|  Counters    |                                       |
+|--------------|                                       |
+       |                                               |
+       v                                               |
+|--------------|                                       |
+|  Compute Max |    [Don't know if this is             |
+|  Utilization |    precisely what is done.]           |
+|  Over last   |                                       |
+|  10 samples  |                                       |
+|--------------|                                       |
+       |                                               |
+       v                                               |
+  /------------\              /-----------\            |
+ /  Burst not   \            /  Burst      \           |
+|  Active and    |--(No)--->|  Active and   |--(No)--->v
+|  Utilization > |          | Utilization < |          |
+ \ Threshold    /            \ Threshold   /           |
+  \------------/              \-----------/            |
+       |                           |                   |
+     (Yes)                       (Yes)                 |
+       |                           |                   |
+       v                           v                   |
+|--------------|            |--------------|           |
+|  Request     |            |  Request     |           |
+|  Burst       |            |  Burst       |           |
+|  Entry       |            |  Exit        |           |
+|--------------|            |--------------|           |
+       |                           |                   |
+       |---------------------------|-------------------|
+                                   v
+                            |--------------|
+                            |  Exit        |
+                            |--------------|
+================================================================================
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/gburst.h b/drivers/external_drivers/intel_media/graphics/gburst/gburst.h
new file mode 100644
index 0000000..5b00e82
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/gburst.h
@@ -0,0 +1,81 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Jari Luoma-aho  <jari.luoma-aho@intel.com>
+ *    Jari Nippula    <jari.nippula@intel.com>
+ *
+ */
+
+#if !defined GBURST_H
+#define GBURST_H
+
+#if (defined CONFIG_GPU_BURST) || (defined CONFIG_GPU_BURST_MODULE)
+
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+
+/* Global variables */
+
+/**
+ * gburst_debug_msg_on - Enables some debug messages.
+ *
+ * Initialized to zero.  May be set via /proc
+ * If set, one function in gburst_stats.c
+ * uses printk to output index, perf_counter->utilization.
+ */
+extern int gburst_debug_msg_on;
+
+/* Functions for gathering utilization information. */
+
+/**
+ * gburst_stats_gpu_freq_mhz_info() - Give cpu frequency to stats and to
+ * graphics system.
+ * @freq_MHz: Frequency in MHz.
+ */
+int gburst_stats_gpu_freq_mhz_info(int freq_MHz);
+
+void gburst_stats_gfx_hw_perf_max_values_clear(void);
+
+int gburst_stats_gfx_hw_perf_max_values_to_string(int ix_in,
+	char *buf, size_t buflen);
+
+int gburst_stats_gfx_hw_perf_counters_to_string(int ix_in,
+	char *buf, size_t buflen);
+
+int gburst_stats_gfx_hw_perf_counters_set(const char *buf);
+
+int gburst_stats_gfx_hw_perf_record();
+
+int gburst_stats_active_counters_from_string(const char *buf, int nbytes);
+
+int gburst_stats_active_counters_to_string(char *buf, int breq);
+
+int gburst_stats_shutdown(void);
+
+void gburst_stats_cleanup_gfx_load_data(void);
+
+#endif /* if (defined CONFIG_GPU_BURST) || (defined CONFIG_GPU_BURST_MODULE) */
+
+#endif /* if !defined GBURST_H */
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/gburst_stats.c b/drivers/external_drivers/intel_media/graphics/gburst/gburst_stats.c
new file mode 100644
index 0000000..22f8d78
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/gburst_stats.c
@@ -0,0 +1,526 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Hitesh K. Patel <hitesh.k.patel@intel.com>
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Jari Luoma-aho  <jari.luoma-aho@intel.com>
+ *    Jari Nippula    <jari.nippula@intel.com>
+ *
+ */
+
+#if (defined CONFIG_GPU_BURST) || (defined CONFIG_GPU_BURST_MODULE)
+
+#include <stddef.h>
+#include <linux/ctype.h>
+
+#include "utilf.h"
+
+#include "gburst.h"
+#include <gburst_hw.h>
+
+#define MAX_NUM_CORES 2
+#define MAX_NUM_COUNTERS 8
+
+
+/**
+ * struct pcel_s -- Performance counter element.
+ * Data for each counter.
+ */
+struct pcel_s {
+	/* for calculating delta */
+	uint32_t last_value;
+	/* pce_time_stamp - gpu clock divided by 16. */
+	uint32_t pce_time_stamp;
+};
+
+struct gburst_stats_data_s {
+	/**
+	 * gsd_stats_initialized - Set to !0 when initialization has been
+	 * completed.
+	 */
+	int           gsd_stats_initialized;
+
+	int           gsd_gpu_freq_mhz;
+	unsigned int  gsd_num_cores;
+	unsigned int  gsd_num_counters_hw;
+	unsigned int  gsd_first_active_counter;
+	unsigned int  gsd_last_active_counter;
+	unsigned int  gsd_num_counters;
+
+	/* gsd_pcd - performance counter data */
+	struct pcel_s gsd_pcd[MAX_NUM_CORES][MAX_NUM_COUNTERS];
+};
+
+
+/* Variables visible at file scope */
+
+struct gburst_stats_data_s gsdat;
+
+
+/* Forward references */
+static int gburst_stats_init(void);
+
+
+/**
+ * gburst_stats_initialization_complete() - Attempt initialization,
+ * if not already done.
+ *
+ * Function return value:  1 if initialized, 0 if not.
+ */
+static inline int gburst_stats_initialization_complete(void)
+{
+	if (!gsdat.gsd_stats_initialized)
+		return gburst_stats_init();
+	return 1;
+}
+
+
+/**
+ * gpu_init_perf_counters() - Initialize local data that describes the
+ * performance counters.  Causes old history to be forgotten.
+ */
+static void gpu_init_perf_counters(void)
+{
+
+	/* All zero, except for counter limit max. */
+	memset(&gsdat.gsd_pcd, 0, sizeof(gsdat.gsd_pcd));
+
+}
+
+
+/**
+ * update_perf_counter_value() - Store one hw counter into database.
+ * @ix_core - gpu core index
+ * @ndx     - our counter index
+ * @value   - raw value of counter, as read from device.
+ * @time_stamp - clocks divided by 16
+ * @counters_storable - non-zero to store and use counter value.
+ *                  Ancilliary actions may be taken regardless.
+ * Called from function gburst_stats_gfx_hw_perf_record.
+ */
+static uint32_t update_perf_counter_value(int ix_core, int ndx,
+	uint32_t value, uint32_t time_stamp, uint32_t counters_storable)
+{
+	struct pcel_s *pce;
+	int value_index;
+	uint32_t numerator;
+	uint32_t denominator;
+	uint32_t utilization;
+	uint32_t counter_coeff;
+
+	numerator = 0;
+	denominator = 0;
+	utilization = 0;
+	pce = &gsdat.gsd_pcd[ix_core][ndx];
+
+	if (counters_storable) {
+		/* Update counter data history only when periodic event arrives */
+		counter_coeff = gburst_hw_inq_counter_coeff(ndx);
+		if ((time_stamp > pce->pce_time_stamp)
+			&& (counter_coeff != 0)
+			&& (value > pce->last_value)) {
+
+			/*  Calculate counter utilization percentage */
+			numerator = 100*(value - pce->last_value);
+			denominator = counter_coeff * (time_stamp -
+				pce->pce_time_stamp);
+			utilization = (uint32_t) (numerator / denominator);
+		}
+		pce->pce_time_stamp = time_stamp;
+		pce->last_value = value;
+	} else if (time_stamp < pce->pce_time_stamp ||
+				value < pce->last_value) {
+		/* counter reset or roll over between periodic events
+		=> start over with new counter values */
+		pce->pce_time_stamp = time_stamp;
+		pce->last_value = value;
+	}
+
+	if (gburst_debug_msg_on)
+		printk(KERN_ALERT "GBUTIL: %d %d %d\n",
+			ix_core, ndx, utilization);
+
+	return utilization;
+}
+
+
+/**
+ * gburst_stats_gfx_hw_perf_counters_set() -- Specify counter visibility.
+ * @buf: A null terminated string that specifies a configuration of counters
+ * to be used for utilization computations.
+ *
+ * Function return value: < 0 for error, otherwise 0.
+ *
+ * This function allows changing which counters are visible.
+ *
+ * This function scans counter specifications from a null-terminated string
+ * which is expected to be from write to a /proc file (with explicit null
+ * termination added by this function's caller.
+ *
+ * The input string specifies which counters are to be visible as
+ * whitespace-separated (e.g. space,tab,newline) groups of: %u:%u:%u:%u:%u:%u
+ * which correspond to counter:group:bit:coeff:cntrbit:summux.
+ * then assigns the values into data structures for all cores.
+ * These per-counter values are:
+ * 1. counter - An index into this module's counter data arrays.
+ * 2. group -- The hardware "group" from which this counter is taken.
+ * 3. bit   -- The hardware bit (for this group) that selects this counter.
+ * 4. coeff -- A counter specific increment value.
+ * 5. cntrbit -- Counter bits, MSB of 16-bits group to be output to counter mux
+ * 6. summux -- SumMux register value, 1=Sum, 0=Mux. Selected counter bits are
+ *                either summed or the MSB is counted to counter
+ * Example input string: "1:1:0:16:3:1   6:0:24:32:0:0"
+ */
+int gburst_stats_gfx_hw_perf_counters_set(const char *buf)
+{
+	int i;
+	int sts;
+	uint32_t counter;
+	uint32_t group;
+	uint32_t bit;
+	uint32_t coeff;
+	unsigned int ix_core;
+	uint32_t cntrbits;
+	uint32_t summux;	
+	const int svix_counter = 0;
+	const int svix_group = 1;
+	const int svix_bit = 2;
+	const int svix_coeff = 3;
+	const int nitems = 6;
+	const int svix_cntrbits = 4;
+	const int svix_summux = 5;
+	int sval[nitems];
+	const char *pstr;
+
+	if (!gburst_stats_initialization_complete())
+		return -EINVAL;
+
+	pstr = buf;
+	for (;;) {
+		int nchrs = 0;
+
+		while (isspace(*pstr))
+			pstr++;
+
+		/**
+		 * The "%n" transfer will affect the return value, and will
+		 * only be done if there are enough characters in the input
+		 * string (e.g., terminating newline) to reach it.
+		 */
+		i = sscanf(pstr, " %u:%u:%u:%u:%u:%u%n",
+			sval+0, sval+1, sval+2, sval+3, sval+4, sval+5, &nchrs);
+		pstr += nchrs;
+		if ((*pstr != '\0') && !isspace(*pstr))
+			return -EINVAL;
+
+		if (i < 6)
+			return -EINVAL;
+
+		counter = sval[svix_counter];
+		group = sval[svix_group];
+		bit = sval[svix_bit];
+		coeff = sval[svix_coeff];
+		cntrbits = sval[svix_cntrbits];
+		summux = sval[svix_summux];
+
+		printk(KERN_INFO "#:%u, g:%u, b:%u, c:%u, cb:%u, s:%u\n",
+			counter, group, bit, coeff, cntrbits, summux);
+
+		/* Must call to gburst_hw_set_perf_status_periodic, below. */
+		sts = gburst_hw_set_counter_id(counter, group, bit, cntrbits, summux);
+		if (sts < 0)
+			return sts;
+
+		for (ix_core = 0; ix_core < gsdat.gsd_num_cores; ix_core++) {
+			gburst_hw_set_counter_coeff(counter, coeff);
+		}
+
+		/*
+		 * When all characters are handled continue to call
+		 * gburst_hw_set_perf_status_periodic()
+		 */
+		if (*pstr == '\n')
+			break;
+
+	}
+
+	return gburst_hw_set_perf_status_periodic(1);
+}
+
+
+/**
+ * gburst_stats_gfx_hw_perf_record() - Record performance info.
+ *
+ * Function return value: Utilization value (0-100) or
+ * < 0 if error (indicating not inited).
+ */
+int gburst_stats_gfx_hw_perf_record()
+{
+	int icx;            /* counter index */
+	int i;
+	int ndx;
+	int sts;
+	struct pcel_s *pce;
+	unsigned int ix_core;
+	uint32_t curr_util;
+	uint32_t sgx_util;
+	int ix_roff;
+	int ix_woff;
+
+	if (!gburst_stats_initialization_complete())
+		return -EINVAL;
+
+	if (gburst_hw_is_access_denied())
+		return -EINVAL;
+
+	curr_util = 0;
+	sgx_util = 0;
+
+	/**
+	 * The incoming data is in a circular buffer, with ix_roff the
+	 * index for reading (which this function does) and ix_woff the index
+	 * for writing.
+	 * Index ix_roff will be updated as processing is done.  The value
+	 * for index ix_woff captured here is used for determination of
+	 * buffer empty, as it would be undesirable to use the updated write
+	 * index and thereby possibly continue processing around the buffer
+	 * without end.  The buffer is empty when ix_roff == ix_woff.
+	 */
+	sts = gburst_hw_mutex_lock();
+	if (sts < 0)
+		return sts;
+
+	sts = gburst_hw_perf_data_get_indices(&ix_roff, &ix_woff);
+
+	if (sts < 0) {
+		gburst_hw_mutex_unlock();
+		return sts;
+	}
+
+	sts = gburst_hw_inq_num_counters(&gsdat.gsd_first_active_counter,
+		&gsdat.gsd_last_active_counter);
+	if (sts < 0) {
+		gburst_hw_mutex_unlock();
+		return sts;
+	}
+
+	if (ix_woff == ix_roff) {
+		gburst_hw_mutex_unlock();
+		return 0; /* return with zero utilization */
+	}
+
+	if (gburst_debug_msg_on)
+		printk(KERN_INFO "START_UTIL_CALC %d %d\n", ix_roff, ix_woff);
+
+	while (ix_woff != ix_roff) {
+		/* Time stamp is gpu clock divided by 16. */
+		uint32_t time_stamp;
+		uint32_t counters_storable;
+		uint32_t *pdat_base;
+		uint32_t *pdat;
+
+		/**
+		 * Get information a single entry in the circular buffer, which
+		 * includes information such as timestamp and all counter
+		 * values.
+		 */
+		sts = gburst_hw_perf_data_get_data(&time_stamp,
+			&counters_storable, &pdat_base);
+		if (sts < 0) {
+			gburst_hw_mutex_unlock();
+			return sts;
+		}
+
+		/**
+		 * For each active counter, store its value in the database
+		 * along with the current timestamp.
+		 */
+		pdat = pdat_base;
+		/* For each core */
+		for (ix_core = 0; ix_core < gsdat.gsd_num_cores;
+			ix_core++, pdat += gsdat.gsd_num_counters_hw) {
+			/* For each counter */
+			curr_util = update_perf_counter_value(
+				ix_core, gsdat.gsd_first_active_counter,
+				pdat[gsdat.gsd_first_active_counter],
+				time_stamp, counters_storable);
+			if (sgx_util < curr_util)
+				sgx_util = curr_util;
+
+			curr_util = update_perf_counter_value(
+				ix_core, gsdat.gsd_last_active_counter,
+				pdat[gsdat.gsd_last_active_counter],
+				time_stamp, counters_storable);
+			if (sgx_util < curr_util)
+				sgx_util = curr_util;
+
+		}
+		sts = gburst_hw_perf_data_read_index_incr(&ix_roff);
+		if (sts < 0) {
+			gburst_hw_mutex_unlock();
+			return sts;
+		}
+	}
+
+	gburst_hw_mutex_unlock();
+
+	return sgx_util;
+}
+
+
+/**
+ * gburst_stats_gfx_hw_perf_counters_to_string() -- output values to string.
+ * @ix_in: Initial index with buf at which to store string.
+ * @buf: A buffer to hold the output string.
+ * @buflen: Length of buf.
+ *
+ * Output string is guaranteed to be null-terminated.
+ *
+ * Function return value:
+ * negative error code or number of characters in output string (even
+ * if only part of the string would fit in buffer).
+ */
+int gburst_stats_gfx_hw_perf_counters_to_string(int ix_in, char *buf,
+	size_t buflen)
+{
+	int ix;
+	int ndx;
+	int sts;
+	int ix_core;
+	int ctr_grp;
+	int ctr_bit;
+	int cntrbits;
+	int summux;
+
+	if (!gburst_stats_initialization_complete())
+		return -EINVAL;
+
+	ix = ix_in;
+
+	ix = ut_isnprintf(ix, buf, buflen, "cix   grp    bit   coeff cbits sum\n");
+	ix = ut_isnprintf(ix, buf, buflen, "==================================\n");
+
+	for (ix_core = 0; ix_core < gsdat.gsd_num_cores; ix_core++) {
+		ix = ut_isnprintf(ix, buf, buflen, "Core %d:\n", ix_core);
+		for (ndx = 0; ndx < gsdat.gsd_num_counters; ndx++) {
+			sts = gburst_hw_inq_counter_id(ndx, &ctr_grp, &ctr_bit,
+				&cntrbits, &summux);
+			if (sts < 0)
+				return sts;
+
+			if (ndx == gsdat.gsd_first_active_counter ||
+				ndx == gsdat.gsd_last_active_counter)
+				ix = ut_isnprintf(ix, buf, buflen,
+				"%u:  %3u     %2u     %5u   %3u  %2u  *\n", ndx,
+				ctr_grp, ctr_bit,
+				gburst_hw_inq_counter_coeff(ndx),
+				cntrbits, summux);
+			else
+				ix = ut_isnprintf(ix, buf, buflen,
+				"%u:  %3u     %2u     %5u   %3u  %2u\n", ndx,
+				ctr_grp, ctr_bit,
+				gburst_hw_inq_counter_coeff(ndx),
+				cntrbits, summux);
+
+		}
+	}
+
+	return ix;
+}
+
+
+
+/**
+ * gburst_stats_gpu_freq_mhz_info() - Set gpu frequency.
+ * @freq_mhz: gpu frequency in MHz.
+ *
+ * The specified gpu frequency is used to update local data and potentially
+ * the graphics driver.
+ */
+int gburst_stats_gpu_freq_mhz_info(int freq_mhz)
+{
+	gsdat.gsd_gpu_freq_mhz = freq_mhz;
+
+	return gburst_hw_gpu_freq_mhz_info(freq_mhz);
+}
+
+
+/**
+ * gburst_stats_cleanup_gfx_load_data() -- clean-up gpu load information
+ * storage from all data.
+ */
+void gburst_stats_cleanup_gfx_load_data(void)
+{
+	gburst_hw_flush_buffer();
+}
+
+
+/**
+ * gburst_stats_init() -- Attempt initialization.
+ * Function return value:  1 if initialized, 0 if not.
+ */
+static int gburst_stats_init(void)
+{
+	int ncores;
+	int sts;
+
+	sts = gburst_hw_init();
+	if (sts <= 0)
+		return 0;
+
+	ncores = gburst_hw_inq_num_cores();
+
+	if (ncores > MAX_NUM_CORES) {
+		printk(KERN_ALERT
+			"%s: warning: %u cores present, limiting to %u\n",
+			__FILE__, gsdat.gsd_num_cores, MAX_NUM_CORES);
+		gsdat.gsd_num_cores = MAX_NUM_CORES;
+	} else {
+		gsdat.gsd_num_cores = ncores;
+	}
+
+	gsdat.gsd_num_counters_hw =
+	gburst_hw_inq_num_counters(&gsdat.gsd_first_active_counter,
+		&gsdat.gsd_last_active_counter);
+
+	if (gsdat.gsd_num_counters_hw > MAX_NUM_COUNTERS) {
+		printk(KERN_ALERT
+			"%s: warning: %u counters present, limiting to %u\n",
+			__FILE__, gsdat.gsd_num_counters, MAX_NUM_COUNTERS);
+		gsdat.gsd_num_counters = MAX_NUM_COUNTERS;
+	} else {
+		gsdat.gsd_num_counters = gsdat.gsd_num_counters_hw;
+	}
+
+	gpu_init_perf_counters();
+
+	sts = gburst_hw_set_perf_status_periodic(1);
+	if (sts < 0)
+		return 0;
+
+	gsdat.gsd_stats_initialized = 1;
+
+	return 1;
+}
+#endif /* if (defined CONFIG_GPU_BURST) || (defined CONFIG_GPU_BURST_MODULE) */
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/gburstm.c b/drivers/external_drivers/intel_media/graphics/gburst/gburstm.c
new file mode 100644
index 0000000..7349f0c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/gburstm.c
@@ -0,0 +1,3339 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Jari Luoma-aho  <jari.luoma-aho@intel.com>
+ *    Jari Nippula    <jari.nippula@intel.com>
+ */
+
+/**
+ *  To-do:
+ * - Select appropriate loglevel for each printk, instead of all ALERT.
+ * - Verify thermal cooling device properly bound to thermal zones.
+ *   At the moment, this is waiting on kernel work external to this driver.
+ *   -  The four temperature states known to the firmware are
+ *       normal, warning, alert, and critical.
+ * - Check all smp_rmb, smp_wmb, smp_mb
+ * - Access to gbprv->gbp_task protected enough?
+ * - Comment functions with particular requirements for execution context:
+ *   - Execution context: non-atomic
+ *   - Execution context: hard irq level
+	if (gbprv->gbp_task)
+ * - Preference "long battery life" should disable burst.
+ * - Low battery state should disable burst.
+ * - Check TSC freq. properly (timestamp function). Now assumes 2GHz.
+ */
+
+
+#if (defined CONFIG_GPU_BURST) || (defined CONFIG_GPU_BURST_MODULE)
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/thermal.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <asm/intel-mid.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+
+#define GBURST_DEBUG 1
+
+#include "utilf.h"
+
+#include <gburst_interface.h>
+#include "gburst.h"
+
+#define GBURST_GLOBAL_ENABLE_DEFAULT 1
+
+#define GBURST_DRIVER_NAME "gburst"
+
+#define GBURST_HEADING GBURST_DRIVER_NAME ": "
+
+#define GBURST_ALERT KERN_ALERT GBURST_DRIVER_NAME ": "
+
+#define GBURST_VERBOSITY_WHYMSG 3
+
+/**
+ * GBURST_PFS_NAME_DIR_GBURST - directory name under /proc for gburst.
+ */
+#define GBURST_PFS_NAME_DIR_GBURST          "gburst"
+
+/**
+ * GBURST_IRQ_LEVEL - could possibly be obtained dynamically via access to
+ * platform device or via call to arch/x86/platform/intel-mid/mrst.c.
+ */
+#define GBURST_IRQ_LEVEL 73
+
+
+/**
+ * GBURST_GPU_FREQ_* - frequency specifications.
+ * Request is in PWRGT_CNT and PWRGT_STS, bits 27:24.
+ * As realized is in PWRGT_STS, bits 23:20.
+ *
+ *   Values for request or realized:
+ *   0001b = Graphics Clock is 533 MHz Unthrottled
+ *   0000b = Graphics Clock is 400 MHz Unthrottled
+ *
+ *   Values only used for realized in PWRGT_STS (not in PWRGT_CNT):
+ *   1001b = Graphics Clock is 400 MHz at 12.5% Throttled (350 MHz effective)
+ *   1010b = Graphics Clock is 400 MHz at 25%   Throttled (300 MHz effective)
+ *   1011b = Graphics Clock is 400 MHz at 37.5% Throttled (250 MHz effective)
+ *   1100b = Graphics Clock is 400 MHz at 50%   Throttled (200 MHz effective)
+ *   1101b = Graphics Clock is 400 MHz at 62.5% Throttled (150 MHz effective)
+ *   1110b = Graphics Clock is 400 MHz at 75%   Throttled (100 MHz effective)
+ *   1111b = Graphics Clock is 400 MHz at 87.5% Throttled ( 50 MHz effective)
+ */
+
+#define GBURST_GPU_FREQ_400          0x00
+#define GBURST_GPU_FREQ_533          0x01
+/* The following names are not fully descriptive, but oh, well. */
+#define GBURST_GPU_FREQ_350          0x09
+#define GBURST_GPU_FREQ_300          0x0a
+#define GBURST_GPU_FREQ_250          0x0b
+#define GBURST_GPU_FREQ_200          0x0c
+#define GBURST_GPU_FREQ_150          0x0d
+#define GBURST_GPU_FREQ_100          0x0e
+#define GBURST_GPU_FREQ_50           0x0f
+
+#define GBURST_GPU_FREQ_LEN          0x10
+
+/* MS bit of realized frequency field indicates throttled frequency. */
+#define GBURST_GPU_FREQ_THROTTLE_BIT 0x08
+
+/**
+ * gpu burst register addresses.
+ */
+#define PWRGT_CNT_PORT 0x4
+#define PWRGT_CNT_ADDR 0x60
+#define PWRGT_STS_PORT 0x4
+#define PWRGT_STS_ADDR 0x61
+
+/**
+ * gpu burst register PWRGT_CNT bits and fields.
+ * From the perspective of the OS, this register is intended only for
+ * writing and should be read only if necessary to obtain the current state
+ * of the toggle bit.
+ */
+
+/* PWRGT_CNT_TOGGLE_BIT - toggle on every write so fw can detect change. */
+#define PWRGT_CNT_TOGGLE_BIT              0x80000000
+
+/* PWRGT_CNT_INT_ENABLE_BIT - enable interrupt for freq chg notification */
+#define PWRGT_CNT_INT_ENABLE_BIT          0x40000000
+
+#define PWRGT_CNT_RESERVED_1_BIT          0x20000000
+
+/**
+ * PWRGT_CNT_ENABLE_AUTO_BURST_ENTRY_BIT -
+ * If set and the driver has requested gpu burst mode, but the request was
+ * denied by the firmware due to burst mode inhibitors (such as high temp),
+ * then when the inhibitors go away, automatically enter the previously
+ * requested mode.
+ * If not set, do not automatically enter the burst mode in that case.
+ */
+#define PWRGT_CNT_ENABLE_AUTO_BURST_ENTRY_BIT  0x10000000
+
+/**
+ * PWRGT_CNT_BURST_REQUEST_* - Burst entry/exit request from OS to fw,
+ * bits 27:24
+ */
+#define PWRGT_CNT_BURST_REQUEST_M         0x0F000000
+#define PWRGT_CNT_BURST_REQUEST_S         4
+#define PWRGT_CNT_BURST_REQUEST_P         24
+
+/* Values in the field are: GBURST_GPU_FREQ_*. */
+
+#define PWRGT_CNT_BURST_REQUEST_M_400 \
+	(GBURST_GPU_FREQ_400 << PWRGT_CNT_BURST_REQUEST_P)
+#define PWRGT_CNT_BURST_REQUEST_M_533 \
+	(GBURST_GPU_FREQ_533 << PWRGT_CNT_BURST_REQUEST_P)
+
+/* All other PWRGT_CNT bits are reserved. */
+
+/**
+ * gpu burst register PWRGT_STS bits and fields.
+ * From the perspective of the OS, this register is intended only for
+ * reading.  Except for bit 31 and new field 23:20, it more or less
+ * reflects the state of what was written to PWRGT_CNT as so far *realized*
+ * by the firmware.
+ */
+
+#define PWRGT_STS_BURST_SUPPORT_PRESENT_BIT   0x80000000
+
+/* PWRGT_STS_INT_ENABLE_BIT - interrupt enabled for freq chg notification */
+#define PWRGT_STS_INT_ENABLE_BIT          0x40000000
+
+#define PWRGT_STS_RESERVED_1_BIT          0x20000000
+
+/**
+ * PWRGT_STS_ENABLE_AUTO_BURST_ENTRY_BIT - Reflects previously set value
+ * of PWRGT_CNT_ENABLE_AUTO_BURST_ENTRY_BIT in PWRGT_CNT.
+ * See description of PWRGT_CNT_ENABLE_AUTO_BURST_ENTRY_BIT.
+ */
+#define PWRGT_STS_ENABLE_AUTO_BURST_ENTRY_BIT  0x10000000
+
+/**
+ * PWRGT_STS_BURST_REQUEST_M - Field containing GBURST_GPU_FREQ_*.
+ * as requested via PWRGT_CNT, bits 27:24
+ */
+#define PWRGT_STS_BURST_REQUEST_M         0x0F000000
+#define PWRGT_STS_BURST_REQUEST_S         4
+#define PWRGT_STS_BURST_REQUEST_P         24
+
+/**
+ * PWRGT_STS_BURST_REALIZED_M - Field containing GBURST_GPU_FREQ_*.
+ * as realized, based on request and firmware decisions,
+ * bits 23:20
+ */
+#define PWRGT_STS_BURST_REALIZED_M        0x00F00000
+#define PWRGT_STS_BURST_REALIZED_S        4
+#define PWRGT_STS_BURST_REALIZED_P        20
+
+#define PWRGT_STS_BURST_REALIZED_M_400 \
+	(GBURST_GPU_FREQ_400 << PWRGT_STS_BURST_REALIZED_P)
+
+#define PWRGT_STS_BURST_REALIZED_M_533 \
+	(GBURST_GPU_FREQ_533 << PWRGT_STS_BURST_REALIZED_P)
+
+#define PWRGT_STS_FREQ_THROTTLE_M (GBURST_GPU_FREQ_THROTTLE_BIT << \
+	PWRGT_STS_BURST_REALIZED_P)
+
+/* Macros to test for states */
+
+#define GBURST_BURST_REQUESTED(gbprv) ((gbprv->gbp_pwrgt_cnt_last_written \
+	& PWRGT_CNT_BURST_REQUEST_M) == PWRGT_CNT_BURST_REQUEST_M_533)
+
+#define GBURST_BURST_REALIZED(gbprv) ((gbprv->gbp_pwrgt_sts_last_read \
+	& PWRGT_STS_BURST_REALIZED_M) == PWRGT_STS_BURST_REALIZED_M_533)
+
+#define GBURST_BURST_THROTTLED(gbprv) (gbprv->gbp_pwrgt_sts_last_read \
+	& PWRGT_STS_FREQ_THROTTLE_M)
+
+
+/**
+ * THERMAL_COOLING_DEVICE_MAX_STATE - The maximum cooling state that
+ * gburst (as a thermal cooling device by non-bursting) support.
+ */
+#define THERMAL_COOLING_DEVICE_MAX_STATE 1
+
+
+#define GBURST_TIMER_PERIOD_DEFAULT_USECS 5000
+
+#define GBURST_THRESHOLD_DEFAULT_HIGH       80
+#define GBURST_THRESHOLD_DEFAULT_DOWN_DIFF  20
+
+/**
+ * Burst dynamic control parameters
+ * VSYNC_FRAMES      - number of frames rendered within vsync time before
+ * we deny burst request even if utilization would indicate otherwise
+ * FRAME_TIME_BUFFER - additional buffer after frame 'ready' before the
+ * next vsync event (as CPU cycles)
+ * FRAME_DURATION    - frame time as max. CPU freq cycles, given system
+ * latencies this value is taken as 17ms
+ * OFFSCREEN_TIME    - time to last resume even after which we infer that
+ * we have an offscreen rendering case (or a very long frame rendering)
+ */
+#define VSYNC_FRAMES         1
+#define FRAME_TIME_BUFFER    ((unsigned long long)(0))
+#define OFFSCREEN_FRAMES     ((unsigned long long)(20))
+#define FRAME_DURATION       ((unsigned long long)(34000000))
+#define OFFSCREEN_TIME       (OFFSCREEN_FRAMES*FRAME_DURATION)
+
+/**
+ * timestamp - Helper used when storing internal timestamps used for burst control
+ */
+unsigned long long timestamp(void)
+{
+	unsigned int a, d;
+	__asm__ volatile("rdtsc" : "=a" (a), "=d" (d));
+	return ((unsigned long long)a) | (((unsigned long long)d) << 32);
+}
+
+
+/**
+ * pfs_data - Structure to describe one file under /proc/gburst.
+ */
+struct pfs_data {
+	const char   *pfd_file_name;
+	ssize_t (*pfd_func_read) (struct file *, char __user *, size_t, loff_t *);
+	ssize_t (*pfd_func_write) (struct file *, const char __user *, size_t, loff_t *);
+	mode_t        pfd_mode;
+};
+
+
+/*
+ * Forward references for procfs read and write functions:
+ */
+
+static int pfs_debug_message_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_debug_message_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_disable_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_disable_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_dump_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_enable_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_enable_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_gpu_monitored_counters_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_gpu_monitored_counters_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_pwrgt_sts_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_state_times_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_state_times_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_thermal_override_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_thermal_override_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_thermal_state_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_gb_threshold_down_diff_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_gb_threshold_down_diff_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_gb_threshold_high_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_gb_threshold_high_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_gb_threshold_low_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_gb_threshold_low_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_timer_period_usecs_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_timer_period_usecs_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_utilization_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_utilization_override_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_utilization_override_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+static int pfs_verbosity_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos);
+static int pfs_verbosity_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos);
+
+
+/**
+ * pfs_tab -- table specifying each gburst file under /proc/gburst.
+ */
+static const struct pfs_data pfs_tab[] = {
+	{ "debug_message",
+		pfs_debug_message_read,
+		pfs_debug_message_write,
+		0644, },
+	{ "disable",
+		pfs_disable_read,
+		pfs_disable_write,
+		0666, },
+	{ "dump",
+		pfs_dump_read,
+		NULL,
+		0644, },
+	{ "enable",
+		pfs_enable_read,
+		pfs_enable_write,
+		0644, },
+	{ "monitored",
+		pfs_gpu_monitored_counters_read,
+		pfs_gpu_monitored_counters_write,
+		0644, },
+	{ "pwrgt_sts",
+		pfs_pwrgt_sts_read,
+		NULL,
+		0644, },
+	{ "state_times",
+		pfs_state_times_read,
+		pfs_state_times_write,
+		0644, },
+	{ "thermal_override",
+		pfs_thermal_override_read,
+		pfs_thermal_override_write,
+		0644, },
+	{ "thermal_state",
+		pfs_thermal_state_read,
+		NULL,
+		0644, },
+	{ "threshold_down_diff",
+		pfs_gb_threshold_down_diff_read,
+		pfs_gb_threshold_down_diff_write,
+		0644, },
+	{ "threshold_high",
+		pfs_gb_threshold_high_read,
+		pfs_gb_threshold_high_write,
+		0644, },
+	{ "threshold_low",
+		pfs_gb_threshold_low_read,
+		pfs_gb_threshold_low_write,
+		0644, },
+	{ "timer_period_usecs",
+		pfs_timer_period_usecs_read,
+		pfs_timer_period_usecs_write,
+		0644, },
+	{ "utilization",
+		pfs_utilization_read,
+		NULL,
+		0644, },
+	{ "utilization_override",
+		pfs_utilization_override_read,
+		pfs_utilization_override_write,
+		0644, },
+	{ "verbosity",
+		pfs_verbosity_read,
+		pfs_verbosity_write,
+		0644, },
+};
+
+
+/**
+ * freq_mhz_table - "local data" array translating from frequency code to
+ * associated frequency in MHz.
+ */
+static const int freq_mhz_table[GBURST_GPU_FREQ_LEN] = {
+	[GBURST_GPU_FREQ_400] = 400,
+	[GBURST_GPU_FREQ_533] = 533,
+	[GBURST_GPU_FREQ_350] = 350,
+	[GBURST_GPU_FREQ_300] = 300,
+	[GBURST_GPU_FREQ_250] = 250,
+	[GBURST_GPU_FREQ_200] = 200,
+	[GBURST_GPU_FREQ_150] = 150,
+	[GBURST_GPU_FREQ_100] = 100,
+	[GBURST_GPU_FREQ_50]  =  50,
+};
+
+struct gb_state_times_s {
+	struct timespec         gst_uptime;
+	struct timespec         gst_time_gfx_power;
+	struct timespec         gst_time_burst_requested;
+	struct timespec         gst_time_burst_realized;
+	struct timespec         gst_time_throttled;
+};
+
+
+/**
+ * struct gburst_pvt_s - gburst private data
+ */
+struct gburst_pvt_s {
+	struct hrtimer          gbp_timer;
+	struct proc_dir_entry  *gbp_proc_parent;
+	struct proc_dir_entry  *gbp_proc_gburst;
+	struct thermal_cooling_device *gbp_cooldv_hdl;
+
+	/* gbp_task - pointer to task structure for work thread or NULL. */
+	struct task_struct     *gbp_task;
+
+	struct mutex            gbp_mutex_pwrgt_sts;
+
+	/* gbp_hrt_period - Period for timer interrupts as a ktime_t. */
+	ktime_t                 gbp_hrt_period;
+
+	/* gbp_pfs_handle */
+	struct proc_dir_entry  *gbp_pfs_handle[ARRAY_SIZE(pfs_tab)];
+
+	/**
+	 * Multipe time values, all updated at once.
+	 * All access to these times protected by mutex.
+	 */
+	struct mutex            gbp_state_times_mutex;
+	struct gb_state_times_s gbp_state_times;
+	int                     gbp_state_time_header;
+
+	int                     gbp_initialized;
+	int                     gbp_suspended;
+	int                     gbp_thread_check_utilization;
+
+#if GBURST_DEBUG
+	unsigned int            gbp_interrupt_count;
+	unsigned int            gbp_thread_work_count;
+	unsigned int            gbp_thermal_state_change_count;
+
+	unsigned int            gbp_suspend_count;
+	unsigned int            gbp_resume_count;
+#endif /* if GBURST_DEBUG */
+
+	int                     gbp_cooldv_state_cur;
+	int                     gbp_cooldv_state_prev;
+	int                     gbp_cooldv_state_highest;
+	int                     gbp_cooldv_state_override;
+
+	/*  1 if disable requested via /proc/gburst/disable */
+	int                     gbp_request_disable;
+
+	/*  1 if enable requested via /proc/gburst/enable */
+	int                     gbp_request_enable;
+
+	/* gbp_enable - Usually 1.  If 0, gpu burst is disabled. */
+	int                     gbp_enable;
+	int                     gbp_timer_is_enabled;
+
+	/**
+	 * Utilization and threshold values, in percent, 0 to 100, or
+	 * -1 if utilization not yet read.
+	 */
+	int                     gbp_utilization_percentage;
+	int                     gbp_utilization_override;
+
+	int                     gbp_burst_th_high;
+
+	/**
+	 * gbp_burst_th_down_diff and gbp_burst_th_low
+	 * are related.  One of them (selected by gbp_thold_via) is definitive
+	 * and the other is computed from the high and definitive values.
+	 */
+	enum {
+		GBP_THOLD_VIA_LOW,
+		GBP_THOLD_VIA_DOWN_DIFF
+	}                       gbp_thold_via;
+
+	int                     gbp_burst_th_down_diff;
+	int                     gbp_burst_th_low;
+
+	u32                     gbp_pwrgt_cnt_toggle_bit;
+	u32                     gbp_pwrgt_sts_last_read;
+	u32                     gbp_pwrgt_cnt_last_written;
+
+	/**
+	 * Burst dynamic control parameters
+	 */
+	unsigned long long	gbp_resume_time;
+	int			gbp_offscreen_rendering;
+	int			gbp_num_of_vsync_limited_frames;
+};
+
+
+/* Global variables.  */
+int gburst_debug_msg_on;
+
+static struct gburst_pvt_s gburst_private_data;
+
+/**
+ * gburst_private_ptr - Static place to save handle for access at module unload.
+ * There will never be more than a single instantiation of this driver.
+ */
+static struct gburst_pvt_s *gburst_private_ptr;
+
+
+/**
+ * Module parameters:
+ *
+ * - can be updated (if permission allows) via writing:
+ *     /sys/module/gburst/parameters/<name>
+ * - can be set at module load time:
+ *     insmod /lib/modules/gburst.ko enable=0
+ * - For built-in modules, can be on kernel command line:
+ *     gburst.enable=0
+ */
+
+/**
+ * module parameter "enable" is not writable in sysfs as there is presently
+ * no code to detect the transition between 0 and 1.
+ */
+static unsigned int mprm_enable = GBURST_GLOBAL_ENABLE_DEFAULT;
+module_param_named(enable, mprm_enable, uint, S_IRUGO);
+
+static unsigned int mprm_verbosity = 1;
+module_param_named(verbosity, mprm_verbosity, uint, S_IRUGO|S_IWUSR);
+
+
+#define DRIVER_AUTHOR "Intel Corporation"
+#define DRIVER_DESC "gpu burst driver for Intel Clover Trail Plus"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/**
+ *  MODULE_VERSION - Allows specification of a module version.
+ *  Version of form [<epoch>:]<version>[-<extra-version>].
+ *  Or for CVS/RCS ID version, everything but the number is stripped.
+ * <epoch>: A (small) unsigned integer which allows you to start versions
+ *          anew. If not mentioned, it's zero.  eg. "2:1.0" is after
+ *          "1:2.0".
+ * <version>: The <version> may contain only alphanumerics and the
+ *          character `.'.  Ordered by numeric sort for numeric parts,
+ *          ascii sort for ascii parts (as per RPM or DEB algorithm).
+ * <extraversion>: Like <version>, but inserted for local
+ *          customizations, eg "rh3" or "rusty1".
+
+ * Using this automatically adds a checksum of the .c files and the
+ * local headers in "srcversion".
+ *
+ * Also, if the module is under drivers/staging, this causes a warning to
+ * be issued:
+ *     <mname>: module is from the staging directory, the quality is unknown,
+ *     you have been warned.
+ *
+ * Example invocation:
+ *     MODULE_VERSION("0.1");
+ */
+
+
+/**
+ * update_state_times() - Uptime times, current states.
+ * @gbprv: gb handle.
+ * @gst:   NULL or pointer to receive struct gb_state_times_s output.
+ */
+static void update_state_times(struct gburst_pvt_s *gbprv,
+	struct gb_state_times_s *gst)
+{
+	struct timespec ts;
+	time_t delta_sec;
+	s64 delta_nsec;
+	struct gb_state_times_s *pst = &gbprv->gbp_state_times;
+
+	mutex_lock(&gbprv->gbp_state_times_mutex);
+
+	get_monotonic_boottime(&ts);
+
+	delta_sec = ts.tv_sec - pst->gst_uptime.tv_sec;
+	delta_nsec = ts.tv_nsec - pst->gst_uptime.tv_nsec;
+
+	pst->gst_uptime = ts;
+
+	if (GBURST_BURST_REQUESTED(gbprv)) {
+		set_normalized_timespec(&pst->gst_time_burst_requested,
+			pst->gst_time_burst_requested.tv_sec + delta_sec,
+			pst->gst_time_burst_requested.tv_nsec + delta_nsec);
+	}
+
+	if (!gbprv->gbp_suspended) {
+		set_normalized_timespec(&pst->gst_time_gfx_power,
+			pst->gst_time_gfx_power.tv_sec + delta_sec,
+			pst->gst_time_gfx_power.tv_nsec + delta_nsec);
+	}
+
+	if (GBURST_BURST_REALIZED(gbprv)) {
+		set_normalized_timespec(&pst->gst_time_burst_realized,
+			pst->gst_time_burst_realized.tv_sec + delta_sec,
+			pst->gst_time_burst_realized.tv_nsec + delta_nsec);
+	}
+
+	if (GBURST_BURST_THROTTLED(gbprv)) {
+		set_normalized_timespec(&pst->gst_time_throttled,
+			pst->gst_time_throttled.tv_sec + delta_sec,
+			pst->gst_time_throttled.tv_nsec + delta_nsec);
+	}
+
+	if (gst)
+		*gst = *pst;
+
+	mutex_unlock(&gbprv->gbp_state_times_mutex);
+}
+
+
+/**
+ * write_PWRGT_CNT() - Write PUnit register PWRGT_CNT via MBI
+ * (Message Bus Interface).
+ * @gbprv: gb handle.
+ * @value: value to be written to the register.
+ */
+static void write_PWRGT_CNT(struct gburst_pvt_s *gbprv, u32 value)
+{
+	u32 wvl;
+
+	/*
+	 * Change the state of the toggle bit.  Its state must be reversed
+	 * for every write to this register.
+	 */
+	gbprv->gbp_pwrgt_cnt_toggle_bit ^= PWRGT_CNT_TOGGLE_BIT;
+
+	wvl = (value & ~PWRGT_CNT_TOGGLE_BIT) | gbprv->gbp_pwrgt_cnt_toggle_bit;
+
+	intel_mid_msgbus_write32(PWRGT_CNT_PORT, PWRGT_CNT_ADDR, wvl);
+
+	/**
+	 * If the requested burst state is being changed from before, update
+	 * the cumulative times spent in particular states.
+	 */
+	if ((wvl ^ gbprv->gbp_pwrgt_cnt_last_written)
+		& PWRGT_CNT_BURST_REQUEST_M) {
+		/**
+		  * Uptime cumulative times.
+		  * Important: Not yet changed:
+		  * gbprv->gbp_pwrgt_cnt_last_written
+		  */
+		update_state_times(gbprv, NULL);
+	}
+
+	gbprv->gbp_pwrgt_cnt_last_written = wvl;
+}
+
+
+/**
+ * read_PWRGT_CNT_toggle() - Read PUnit register PWRGT_CNT via MBI
+ * (Message Bus Interface).
+ * Warning:  The HAS specifies that this register may be read only in
+ * order to determine the current setting of the toggle bit (bit 31).
+ * @gbprv: gb handle.
+ */
+static void read_PWRGT_CNT_toggle(struct gburst_pvt_s *gbprv)
+{
+	u32 uval;
+
+	uval = intel_mid_msgbus_read32(PWRGT_CNT_PORT, PWRGT_CNT_ADDR);
+
+	gbprv->gbp_pwrgt_cnt_toggle_bit = (uval & PWRGT_CNT_TOGGLE_BIT);
+}
+
+
+/**
+ * generate_freq_string() - Convert frequency enum to a string.
+ * @freq_enum: Frequency enum: GBURST_GPU_FREQ_* .
+ *             Must be in the range 0 to 15.
+ * @sbuf: Buffer in which string result is returned.
+ * @slen: size of sbuf.
+ * Function return value: String describing the frequency.
+ */
+static const char *generate_freq_string(u32 freq_enum, char *sbuf, int slen)
+{
+	if (freq_mhz_table[freq_enum] != 0)
+		snprintf(sbuf, slen, "%d MHz", freq_mhz_table[freq_enum]);
+	else
+		snprintf(sbuf, slen, "unrecognized_code_%u", freq_enum);
+
+	return sbuf;
+}
+
+
+/**
+ * read_PWRGT_STS_simple() - Read register PWRGT_STS.
+ * Restriction to non-atomic context by intel_mid_msgbus_read32 use of
+ * pci_get_bus_and_slot.
+ * Execution context: non-atomic
+ */
+static inline u32 read_PWRGT_STS_simple(void)
+{
+	return intel_mid_msgbus_read32(PWRGT_STS_PORT, PWRGT_STS_ADDR);
+}
+
+
+/**
+ * hrt_start() - start (or restart) timer.
+ * @gbprv: gb handle.
+ */
+static void hrt_start(struct gburst_pvt_s *gbprv)
+{
+	if (gbprv->gbp_enable) {
+		if (gbprv->gbp_timer_is_enabled) {
+			/* Due to the gbp_timer is auto-restart timer
+			 * in most case, we must use hrtimer_cancel
+			 * it at first if it is in active state, to avoid
+			 * hitting the BUG_ON(timer->state !=
+			 * HRTIMER_STATE_CALLBACK) in hrtimer.c.
+			*/
+			hrtimer_cancel(&gbprv->gbp_timer);
+		} else {
+			gbprv->gbp_timer_is_enabled = 1;
+		}
+
+		hrtimer_start(&gbprv->gbp_timer, gbprv->gbp_hrt_period,
+			HRTIMER_MODE_REL);
+	}
+}
+
+
+/**
+ * hrt_cancel() - cancel a timer.
+ * @gbprv: gb handle.
+ */
+static void hrt_cancel(struct gburst_pvt_s *gbprv)
+{
+	/* The timer can be restarted with hrtimer_start. */
+	hrtimer_cancel(&gbprv->gbp_timer);
+
+	gbprv->gbp_timer_is_enabled = 0;
+}
+
+
+/**
+ * set_state_pwrgt_cnt() - write bits to pwrgt control register.
+ * @gbprv: gb handle.
+ * @more_bits_to_set: Additional bits to set, beyond those that are set
+ *     automatically.
+ */
+static void set_state_pwrgt_cnt(struct gburst_pvt_s *gbprv,
+	u32 more_bits_to_set)
+{
+	u32 gt_cnt;
+
+	gt_cnt = 0;
+
+	smp_rmb();
+	if (gbprv->gbp_initialized && gbprv->gbp_enable) {
+		gt_cnt |= PWRGT_CNT_INT_ENABLE_BIT | more_bits_to_set;
+
+		if ((gt_cnt & PWRGT_CNT_BURST_REQUEST_M)
+			== PWRGT_CNT_BURST_REQUEST_M_533)
+			gt_cnt |= PWRGT_CNT_ENABLE_AUTO_BURST_ENTRY_BIT;
+	}
+
+	write_PWRGT_CNT(gbprv, gt_cnt);
+}
+
+
+/**
+ * read_and_process_PWRGT_STS() - Read PUnit register PWRGT_STS
+ * @gbprv: gb handle.
+ */
+static u32 read_and_process_PWRGT_STS(struct gburst_pvt_s *gbprv)
+{
+	u32 uval;
+	u32 valprv;
+
+	mutex_lock(&gbprv->gbp_mutex_pwrgt_sts);
+
+	uval = read_PWRGT_STS_simple();
+
+	valprv = gbprv->gbp_pwrgt_sts_last_read;
+
+	/**
+	 * If either the burst_request or the burst_realized states have
+	 * changed (as evidenced by their bit fields within this register),
+	 * then process the new state.
+	 */
+	if ((uval ^ valprv) & (PWRGT_STS_BURST_REQUEST_M
+			| PWRGT_STS_BURST_REALIZED_M)) {
+		int freq_code;
+		int freq_mhz;
+
+		/**
+		  * Uptime cumulative times.
+		  * Important: Not yet changed: gbprv->gbp_pwrgt_sts_last_read
+		  */
+		update_state_times(gbprv, NULL);
+
+		freq_code = (uval & PWRGT_STS_BURST_REALIZED_M) >>
+			PWRGT_STS_BURST_REALIZED_P;
+		freq_mhz = freq_mhz_table[freq_code];
+
+		/**
+		 * If either the burst_realized state has changed (as
+		 * evidenced by their bit fields within this register), then
+		 * process the new state.
+		 */
+		if (!gbprv->gbp_suspended &&
+			((uval ^ valprv) & PWRGT_STS_BURST_REALIZED_M)
+			&& (freq_mhz != 0)) {
+#if GBURST_UPDATE_GPU_TIMING
+			PVRSRV_ERROR eError;
+			eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
+			if (eError == PVRSRV_OK) {
+				/**
+				 * Tell graphics subsystem the updated frequency,
+				 * including both pvr km and utilization computations
+				 * (which may or may not use the information).
+				 */
+				gburst_stats_gpu_freq_mhz_info(freq_mhz);
+				PVRSRVPowerUnlock(KERNEL_ID);
+			}
+#else
+			/**
+			 * Tell graphics subsystem the updated frequency,
+			 * including both pvr km and utilization computations
+			 * (which may or may not use the information).
+			 */
+			gburst_stats_gpu_freq_mhz_info(freq_mhz);
+#endif
+		}
+
+		if (mprm_verbosity >= 2) {
+			int freq_code_req;
+			freq_code_req = (uval & PWRGT_STS_BURST_REQUEST_M) >>
+				PWRGT_STS_BURST_REQUEST_P;
+
+			if (freq_code_req != freq_code) {
+				printk(GBURST_ALERT
+					"freq req/rlzd = %d/%d MHz\n",
+					freq_mhz_table[freq_code_req],
+					freq_mhz);
+			} else {
+				printk(GBURST_ALERT "freq = %d MHz\n",
+					freq_mhz);
+			}
+		}
+
+		/* If GPU clock throttling is entered or exited... */
+		if ((valprv ^ uval) & PWRGT_STS_FREQ_THROTTLE_M) {
+			if (uval & PWRGT_STS_FREQ_THROTTLE_M) {
+				/* GPU clock throttling state entered... */
+				hrt_cancel(gbprv);
+
+				if ((gbprv->gbp_pwrgt_cnt_last_written
+					& PWRGT_CNT_BURST_REQUEST_M)
+					!= PWRGT_CNT_BURST_REQUEST_M_400) {
+					/**
+					 * Remove any outstanding burst
+					 * request.
+					 */
+					set_state_pwrgt_cnt(gbprv,
+						PWRGT_CNT_BURST_REQUEST_M_400);
+				}
+			} else  {
+				/* GPU clock throttling state exited... */
+				hrt_start(gbprv);
+			}
+		}
+	}
+
+	gbprv->gbp_pwrgt_sts_last_read = uval;
+
+	mutex_unlock(&gbprv->gbp_mutex_pwrgt_sts);
+
+	return uval;
+}
+
+
+#define GBURST_VERBOSE_EXPLANATION 1
+
+/**
+ * desired_burst_state_query() - determine desired burst state.
+ * @gbprv: gb handle.
+ * @p_whymsg: An explanatory string.
+ * @sbuf: A buffer that may be used to store a string.
+ * @slen: length of sbuf.
+ *
+ * Function return values:
+ * 0 -> request un-burst,
+ * 1 -> request burst,
+ * 2 -> no change.
+ */
+static int desired_burst_state_query(struct gburst_pvt_s *gbprv,
+	const char **p_whymsg
+#if GBURST_VERBOSE_EXPLANATION
+	, char *sbuf, int slen
+#endif /* if GBURST_VERBOSE_EXPLANATION */
+	)
+{
+	int utilpct;
+	int thermal_state;
+
+	if (gbprv->gbp_utilization_override >= 0)
+		utilpct = gbprv->gbp_utilization_override;
+	else
+		utilpct = gbprv->gbp_utilization_percentage;
+
+	if (gbprv->gbp_cooldv_state_override >= 0)
+		thermal_state = gbprv->gbp_cooldv_state_override;
+	else
+		thermal_state = gbprv->gbp_cooldv_state_cur;
+
+	smp_rmb();
+	if (!gbprv->gbp_initialized) {
+		*p_whymsg = "!gbprv->gbp_initialized";
+		return 0;
+	}
+
+	if (!gbprv->gbp_enable) {
+		*p_whymsg = "!enable";
+		return 0;
+	}
+
+	if (gbprv->gbp_suspended) {
+		*p_whymsg = "suspended";
+		return 0;
+	}
+
+	if (thermal_state != 0) {
+#if GBURST_VERBOSE_EXPLANATION
+		if (mprm_verbosity >= GBURST_VERBOSITY_WHYMSG) {
+			if (gbprv->gbp_cooldv_state_override >= 0) {
+				snprintf(sbuf, slen,
+					"thermal_state = %d (%d)",
+					gbprv->gbp_cooldv_state_override,
+					gbprv->gbp_cooldv_state_cur);
+			} else {
+				snprintf(sbuf, slen,
+					"thermal_state = %d",
+					gbprv->gbp_cooldv_state_cur);
+			}
+			*p_whymsg = sbuf;
+		} else
+#endif /* if !GBURST_VERBOSE_EXPLANATION */
+		{
+			*p_whymsg = "thermal_state != 0";
+		}
+		return 0;
+	}
+
+	/**
+	 * Utilization values and utilization thresholds are represented as
+	 * a number from 0 through 100, which is considered a percentage of
+	 * nominal maximum utilization.
+	 *
+	 * When current utilization (range 0..100) falls below
+	 * gbprv->gbp_burst_th_low (which is known as threshold_low), then
+	 * this driver removes any outstanding request for gpu clock burst.
+	 *
+	 * When current utilization (range 0..100) rises above
+	 * gbprv->gbp_burst_th_high (which is known as threshold_high), then
+	 * (if not already doing so) this driver submits a request for gpu
+	 * clock burst.
+	 *
+	 * Normally, the threshold_low is less than threshold_high,and the
+	 * difference between them represents a range of values that provide
+	 * hystersis.
+	 *
+	 * In order to facilitate testing and validation, the following
+	 * special case "magic" numbers are implemented:
+	 *
+	 *   threshold_low == threshold_high == 0
+	 *       Force a request for burst mode.
+	 *   threshold_low == threshold_high == 100
+	 *       Force no request for burst mode.
+	 */
+
+	if ((gbprv->gbp_burst_th_low == 100)
+		&& (gbprv->gbp_burst_th_high == 100)) {
+		/**
+		 * Threshold values (normal range 0 to 100) are both set to
+		 * 100, so force that no request be made for burst.
+		 */
+#if GBURST_VERBOSE_EXPLANATION
+		if (mprm_verbosity >= GBURST_VERBOSITY_WHYMSG) {
+			snprintf(sbuf, slen,
+				"util == %d, forced_non_burst_request ",
+				gbprv->gbp_utilization_percentage);
+			*p_whymsg = sbuf;
+		} else
+#endif
+		{
+			*p_whymsg = "forced_non_burst_request";
+		}
+		return 0;
+	}
+
+	if ((gbprv->gbp_burst_th_low == 0)
+		&& (gbprv->gbp_burst_th_high == 0)) {
+		/**
+		 * Threshold values (normal range 0 to 100) are both set to
+		 * 0, so force that a request will be made for burst.
+		 */
+#if GBURST_VERBOSE_EXPLANATION
+		if (mprm_verbosity >= GBURST_VERBOSITY_WHYMSG) {
+			snprintf(sbuf, slen,
+				"util == %d, forced_burst_request ",
+				gbprv->gbp_utilization_percentage);
+			*p_whymsg = sbuf;
+		} else
+#endif
+		{
+			*p_whymsg = "forced_burst_request";
+		}
+		return 1;
+	}
+
+	if (utilpct <= gbprv->gbp_burst_th_low) {
+#if GBURST_VERBOSE_EXPLANATION
+		if (mprm_verbosity >= GBURST_VERBOSITY_WHYMSG) {
+			if (gbprv->gbp_utilization_override >= 0) {
+				snprintf(sbuf, slen,
+					"util (%d (%d)) <= threshold_low (%d)",
+					gbprv->gbp_utilization_override,
+					gbprv->gbp_utilization_percentage,
+					gbprv->gbp_burst_th_low);
+			} else {
+				snprintf(sbuf, slen,
+					"util (%d) <= threshold_low (%d)",
+					gbprv->gbp_utilization_percentage,
+					gbprv->gbp_burst_th_low);
+			}
+			*p_whymsg = sbuf;
+		} else
+#endif
+		{
+			*p_whymsg = "below threshold_low";
+		}
+		return 0;
+	}
+
+	if (utilpct >= gbprv->gbp_burst_th_high) {
+#if GBURST_VERBOSE_EXPLANATION
+		if (mprm_verbosity >= GBURST_VERBOSITY_WHYMSG) {
+			if (gbprv->gbp_utilization_override >= 0) {
+				snprintf(sbuf, slen,
+					"util (%d (%d)) >= threshold_high (%d)",
+					gbprv->gbp_utilization_override,
+					gbprv->gbp_utilization_percentage,
+					gbprv->gbp_burst_th_high);
+			} else {
+				snprintf(sbuf, slen,
+					"util (%d) >= threshold_high (%d)",
+					gbprv->gbp_utilization_percentage,
+					gbprv->gbp_burst_th_high);
+			}
+			*p_whymsg = sbuf;
+		} else
+#endif
+		{
+			*p_whymsg = "above threshold_high";
+		}
+		return 1;
+	}
+
+	/* No change, return same as before. */
+	*p_whymsg = "same as before";
+	return 2;
+}
+
+
+/**
+ * request_desired_burst_mode() - Determine and issue a request for the
+ * desired burst state.
+ * @gbprv: gb handle.
+ */
+static void request_desired_burst_mode(struct gburst_pvt_s *gbprv)
+{
+	int rva;
+	u32 reqbits;
+	const char *whymsg;
+	int burst_request_prev;
+#if GBURST_VERBOSE_EXPLANATION
+	char sbuf[64];
+#endif /* if GBURST_VERBOSE_EXPLANATION */
+
+	smp_rmb();
+	if (!gbprv->gbp_initialized)
+		return;
+
+	if (gbprv->gbp_offscreen_rendering) {
+		if (!GBURST_BURST_REQUESTED(gbprv)) {
+			reqbits = PWRGT_CNT_BURST_REQUEST_M_533;
+			set_state_pwrgt_cnt(gbprv, reqbits);
+		}
+	} else {
+
+		rva = desired_burst_state_query(gbprv, &whymsg
+#if GBURST_VERBOSE_EXPLANATION
+			, sbuf, sizeof(sbuf)
+#endif /* if GBURST_VERBOSE_EXPLANATION */
+			);
+
+		/**
+		* The value returned by desired_burst_state_query indicates
+		* the desired burst state, vis a vis the presentvburst state.
+		* 0 -> request un-burst
+		* 1 -> request burst
+		* 2 -> no change.
+		*/
+
+		/* Get previous burst_request state. */
+		burst_request_prev = GBURST_BURST_REQUESTED(gbprv);
+
+		/**
+		 * If desired burst_request state changed, then issue the request.
+		 */
+		if ((rva != 2) && (rva != burst_request_prev)) {
+			if ((rva) && (gbprv->gbp_num_of_vsync_limited_frames < VSYNC_FRAMES))
+				reqbits = PWRGT_CNT_BURST_REQUEST_M_533;
+			else
+				reqbits = PWRGT_CNT_BURST_REQUEST_M_400;
+
+			set_state_pwrgt_cnt(gbprv, reqbits);
+		}
+	}
+}
+
+
+/**
+ * wake_thread() - Wake the work thread.
+ * @gbprv: gb handle.
+ */
+static void wake_thread(struct gburst_pvt_s *gbprv)
+{
+	if (gbprv->gbp_task)
+		wake_up_process(gbprv->gbp_task);
+}
+
+
+/**
+ * hrt_event_processor() - Process timer-driven things.
+ * Called by kernel hrtimer system when the timer expires.
+ * @hrthdl: Pointer to the associated hrtimer struct.
+ *
+ * Execution context: hard irq level.
+ * Invoked via interrupt/callback.
+ */
+static enum hrtimer_restart hrt_event_processor(struct hrtimer *hrthdl)
+{
+	struct gburst_pvt_s *gbprv =
+		container_of(hrthdl, struct gburst_pvt_s, gbp_timer);
+	ktime_t mc_now;
+
+	smp_rmb();
+
+	if (gbprv->gbp_initialized && gbprv->gbp_enable &&
+		!gbprv->gbp_suspended) {
+		gbprv->gbp_thread_check_utilization = 1;
+		smp_wmb();
+		wake_thread(gbprv);
+	}
+
+	if (!gbprv->gbp_timer_is_enabled)
+		return HRTIMER_NORESTART;
+
+	mc_now = ktime_get();
+	hrtimer_forward(hrthdl, mc_now, gbprv->gbp_hrt_period);
+
+	return HRTIMER_RESTART;
+}
+
+
+/**
+ * thread_action() - Perform desired thread actions when woken due to
+ * interrupt or timer expiration.
+ * @gbprv: gb handle.
+ *
+ * Called only from work_thread_loop.
+ * Function return value:
+ * 0 to request thread exit.  Either an error or gpu burst is disabled.
+ * 1 otherwise.
+ */
+static int thread_action(struct gburst_pvt_s *gbprv)
+{
+	int gpustate;
+	int utilpct;
+	unsigned long long ctime;
+	unsigned long long delta;
+
+	smp_rmb();
+	if (!gbprv->gbp_initialized || gbprv->gbp_suspended)
+		return 1;
+
+#if GBURST_DEBUG
+	gbprv->gbp_thread_work_count++;
+#endif /* if GBURST_DEBUG */
+
+	if (!gbprv->gbp_thread_check_utilization) {
+		read_and_process_PWRGT_STS(gbprv);
+		return 1;
+	}
+
+	gbprv->gbp_thread_check_utilization = 0;
+
+	ctime = timestamp();
+	delta = ctime - gbprv->gbp_resume_time;
+
+	if (delta > FRAME_DURATION)
+		gbprv->gbp_num_of_vsync_limited_frames = 0;
+	if (delta > OFFSCREEN_TIME) {
+		gbprv->gbp_offscreen_rendering = 1;
+		hrt_cancel(gbprv);
+	}
+
+	if (!gbprv->gbp_offscreen_rendering) {
+		utilpct = gburst_stats_gfx_hw_perf_record();
+
+		if (mprm_verbosity >= 4)
+			printk(GBURST_ALERT "util: %d %%\n", utilpct);
+
+		if (utilpct < 0) {
+			/**
+			 * This should only fail if not initialized and is
+			 * most likely because some initialization has
+			 * yet to complete.
+			 */
+			if (gbprv->gbp_utilization_percentage >= 0) {
+				/* Only fail if already succeeded once. */
+				printk(GBURST_ALERT "obtaining counters failed\n");
+				return 0;
+			}
+		} else
+			gbprv->gbp_utilization_percentage = utilpct;
+	}
+
+	/* Read current status. */
+	read_and_process_PWRGT_STS(gbprv);
+	request_desired_burst_mode(gbprv);
+
+	return 1;
+}
+
+
+/**
+ * work_thread_loop() - the main loop for the worker thread.
+ * @pvd: The "void *" private data provided to kthread_create.
+ *       This can be cast to the gbprv handle.
+ *
+ * Upon return, thread will exit.
+ */
+static int work_thread_loop(void *pvd)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *) pvd;
+	int rva;
+
+	if (mprm_verbosity >= 4)
+		printk(GBURST_ALERT "kernel thread started\n");
+
+	for ( ; ; ) {
+		/**
+		 * Synchronization is via a call to:
+		 * int wake_up_process(struct task_struct *p)
+		 */
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (kthread_should_stop())
+			break;
+
+		schedule();
+
+		if (kthread_should_stop())
+			break;
+
+		rva = thread_action(gbprv);
+		if (rva == 0) {
+			/* Thread exit requested */
+			break;
+		}
+	}
+
+	if (mprm_verbosity >= 4)
+		printk(GBURST_ALERT "kernel thread stopping\n");
+
+	return 0;
+}
+
+
+/**
+ * work_thread_create() - Create work thread.
+ * @gbprv: gb handle.
+ *
+ * This thread is not truely a "real-time" thread, in that there will be no
+ * catastrophe if its execution is somewhat delayed.  However, knowing that
+ * the nominal execution interval for this timer-woken thread is 5 msecs and
+ * knowing that the thread execution will be very short, it seems appropriate
+ * to request an elevated scheduling priority.  Perhaps a consensus will be
+ * reached as to whether or not this is truely a good idea.
+ *
+ * Function return value:  < 0 if error, otherwise 0.
+ */
+static int work_thread_create(struct gburst_pvt_s *gbprv)
+{
+	struct task_struct *tskhdl;
+
+	if (!gbprv->gbp_task) {
+		tskhdl = kthread_create(work_thread_loop,
+			(void *) gbprv, "kernel_gburst");
+
+		/* Keep a reference on task structure. */
+		get_task_struct(tskhdl);
+
+		if (IS_ERR(tskhdl)) {
+			printk(GBURST_ALERT "kernel thread create fail\n");
+			return PTR_ERR(tskhdl);
+		}
+
+		{
+			/**
+			 *  Potential values for policy:
+			 *    SCHED_NORMAL
+			 *    SCHED_FIFO
+			 *    SCHED_RR
+			 *    SCHED_BATCH
+			 *    SCHED_IDLE
+			 * optionally OR'd with
+			 *   SCHED_RESET_ON_FORK
+			 * Valid priorities for SCHED_FIFO and SCHED_RR are
+			 * 1..MAX_USER_RT_PRIO-1,
+			 * valid priority for SCHED_NORMAL, SCHED_BATCH, and
+			 * SCHED_IDLE is 0.
+			 */
+
+			/**
+			 * An alternative should normal thread priority be
+			 * desired would be the following.
+			 *     static const int sc_policy = SCHED_NORMAL;
+			 *     static const struct sched_param sc_param = {
+			 *             .sched_priority = 0,
+			 *     };
+			 */
+
+			/**
+			 * It seems advisable to run our kernel thread
+			 * at elevated priority.
+			 */
+			static const int sc_policy = SCHED_RR;
+			static const struct sched_param sc_param = {
+				.sched_priority = 1,
+			};
+			int rva;
+
+			rva = sched_setscheduler_nocheck(tskhdl,
+				sc_policy, &sc_param);
+			if (rva < 0) {
+				printk(GBURST_ALERT
+					"task priority set failed,"
+					" code: %d\n", -rva);
+			}
+		}
+
+		gbprv->gbp_task = tskhdl;
+
+		wake_up_process(tskhdl);
+	}
+
+	return 0;
+}
+
+
+/**
+ * gburst_thread_stop - kill the worker thread.
+ * @gbprv: gb handle.
+ */
+static void gburst_thread_stop(struct gburst_pvt_s *gbprv)
+{
+	if (gbprv->gbp_task) {
+		/* kthread_stop will not return until the thread is gone. */
+		kthread_stop(gbprv->gbp_task);
+
+		put_task_struct(gbprv->gbp_task);
+		gbprv->gbp_task = NULL;
+	}
+}
+
+
+/**
+ * gburst_enable_set() - gburst enable/disable.
+ * @gbprv: gb handle.
+ *
+ * Typically triggered through a /proc reference.
+ * When disabled, overhead is reduced by turning off the kernel thread
+ * and timer.
+ */
+static void gburst_enable_set(struct gburst_pvt_s *gbprv)
+{
+	int flgenb;
+
+	flgenb = gbprv->gbp_request_enable &&
+		!gbprv->gbp_request_disable;
+
+	if (gbprv->gbp_enable == flgenb)
+		return;
+
+	gbprv->gbp_enable = flgenb;
+
+	if (!gbprv->gbp_enable) {
+		hrt_cancel(gbprv);
+
+		/* stop the thread */
+		if (gbprv->gbp_task) {
+			/* Stop thread. */
+			gburst_thread_stop(gbprv);
+		}
+	} else {
+		work_thread_create(gbprv);
+		hrt_start(gbprv);
+	}
+
+	request_desired_burst_mode(gbprv);
+}
+
+
+/**
+ * state_times_to_string - Return string describing state_times.
+ * @gbprv:  data structure handle
+ * @prhdg:  non-zero to include a heading.
+ * @slen:   length of buffer
+ * @sbuf:   buffer to receive string.
+ */
+static int state_times_to_string(struct gburst_pvt_s *gbprv, const char *pfx,
+	int prhdg, int ix, int slen, char *sbuf)
+{
+	struct gb_state_times_s gst;
+	struct gb_state_times_s *pst = &gst;
+
+	update_state_times(gbprv, pst);
+
+	if (prhdg) {
+		ix = ut_isnprintf(ix, sbuf, slen, "%s%s\n", pfx,
+			"        uptime      gfx_power burst_request "
+			" burst_realized  gpu_throttled");
+	}
+	if (prhdg == 2) {
+		ix = ut_isnprintf(ix, sbuf, slen, "%s%s\n",
+			pfx,
+			"-------------- -------------- --------------"
+			" -------------- --------------");
+	}
+
+	ix = ut_isnprintf(ix, sbuf, slen,
+		"%s%7lu.%06lu %7lu.%06lu %7lu.%06lu %7lu.%06lu %7lu.%06lu\n",
+		pfx,
+		pst->gst_uptime.tv_sec,
+		pst->gst_uptime.tv_nsec / NSEC_PER_USEC,
+		pst->gst_time_gfx_power.tv_sec,
+		pst->gst_time_gfx_power.tv_nsec / NSEC_PER_USEC,
+		pst->gst_time_burst_requested.tv_sec,
+		pst->gst_time_burst_requested.tv_nsec / NSEC_PER_USEC,
+		pst->gst_time_burst_realized.tv_sec,
+		pst->gst_time_burst_realized.tv_nsec / NSEC_PER_USEC,
+		pst->gst_time_throttled.tv_sec,
+		pst->gst_time_throttled.tv_nsec / NSEC_PER_USEC);
+
+	return ix;
+}
+
+
+/**
+ * copy_from_user_nt() - Like copy_from_user, but ensures null termination,
+ * plus accepts as an input the size of the destination buffer.
+ * @tbuf: kernel buffer to receive data
+ * @tlen: length of kernel buffer
+ * @ubuf: user-space buffer
+ * @ucnt: Number of bytes in ubuf.
+ */
+static int copy_from_user_nt(char *tbuf, size_t tlen,
+		const void __user *ubuf, unsigned long ucnt)
+{
+	if ((ucnt >= tlen) || copy_from_user(tbuf, ubuf, ucnt))
+		return -EINVAL;
+
+	tbuf[ucnt] = '\0';
+
+	return 0;
+}
+
+
+/**
+ * threshold_derive_low() - Compute low, given high and down_diff.
+ * @gbprv: gb handle.
+ *
+ * The bottom threshold may be specified either as a specific value ("low") or
+ * as a delta ("down_diff") below the high threshold's current value.  This is
+ * done because of differing desires of testers versus those characterizing
+ * utilization numbers.  Ultimately, the "low" value is computed and used
+ * in utilization comparisons.
+ */
+static void threshold_derive_low(struct gburst_pvt_s *gbprv)
+{
+	int tlow;
+
+	tlow = gbprv->gbp_burst_th_high - gbprv->gbp_burst_th_down_diff;
+	if (tlow < 0)
+		tlow = 0;
+
+	gbprv->gbp_burst_th_low = tlow;
+}
+
+
+/**
+ * threshold_derive_down_diff() - Compute down_diff, given high and low.
+ * @gbprv: gb handle.
+ *
+ * The bottom threshold may be specified either as a specific value ("low") or
+ * as a delta ("down_diff") below the high threshold's current value.  This is
+ * done because of differing desires of testers versus those characterizing
+ * utilization numbers.  Ultimately, the "low" value is computed and used
+ * in utilization comparisons.
+ */
+static void threshold_derive_down_diff(struct gburst_pvt_s *gbprv)
+{
+	int tdd;
+
+	tdd = gbprv->gbp_burst_th_high - gbprv->gbp_burst_th_low;
+	if (tdd < 0)
+		tdd = 0;
+
+	gbprv->gbp_burst_th_down_diff = tdd;
+}
+
+
+/**
+ * threshold_derive_either() - Compute either down_diff or low, given
+ * high and the other.
+ * @gbprv: gb handle.
+ *
+ * The bottom threshold may be specified either as a specific value ("low") or
+ * as a delta ("down_diff") below the high threshold's current value.  This is
+ * done because of differing desires of testers versus those characterizing
+ * utilization numbers.  Ultimately, the "low" value is computed and used
+ * in utilization comparisons.
+ */
+static void threshold_derive_either(struct gburst_pvt_s *gbprv)
+{
+	if (gbprv->gbp_thold_via == GBP_THOLD_VIA_LOW)
+		threshold_derive_down_diff(gbprv);
+	else /* if (gbprv->gbp_thold_via == GBP_THOLD_VIA_DOWN_DIFF) */
+		threshold_derive_low(gbprv);
+}
+
+
+/**
+ * generate_dump_string() - Dump all status variables for gpu burst.
+ * Useful during development and test.
+ * @gbprv: gb handle.
+ * @buflen: Length of buf.
+ * @buf: Buffer to receive output string.
+ *
+ * Function return value: negative if error or number of character stored
+ * in buf.
+ *
+ * Side effect: Output of dump string via printk.
+ * Useful during development.  Candidate for removal later.
+ */
+static int generate_dump_string(struct gburst_pvt_s *gbprv, size_t buflen,
+	char *buf)
+{
+	u32 pwrgt_sts;
+	u32 tmpv0;
+	int ix;
+
+	ix = 0;
+
+	/* Get Punit Status Register */
+	pwrgt_sts = read_and_process_PWRGT_STS(gbprv);
+
+	if (!gbprv->gbp_enable) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"enable = %d\n",
+			gbprv->gbp_enable);
+	}
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"thermal_state = %d\n",
+		gbprv->gbp_cooldv_state_cur);
+
+	if (gbprv->gbp_cooldv_state_highest > gbprv->gbp_cooldv_state_cur) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"thermal_state_highest = %d\n",
+			gbprv->gbp_cooldv_state_highest);
+	}
+
+	if (gbprv->gbp_cooldv_state_override >= 0) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"thermal_override = %d\n",
+			gbprv->gbp_cooldv_state_override);
+	}
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"utilization_threshold_low = %d\n",
+		gbprv->gbp_burst_th_low);
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"utilization_threshold_high = %d\n",
+		gbprv->gbp_burst_th_high);
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"utilization = %d\n",
+		gbprv->gbp_utilization_percentage);
+
+	if (gbprv->gbp_utilization_override >= 0) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"utilization_override = %d\n",
+			gbprv->gbp_utilization_override);
+	}
+
+	ix = state_times_to_string(gbprv, GBURST_HEADING, 1, ix, buflen, buf);
+
+	if (!gbprv->gbp_task) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"task_handle = %p\n",
+			gbprv->gbp_task);
+	}
+
+#if GBURST_DEBUG
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"counts (thrd work, intr, thrm_chg) = %u, %u, %u\n",
+		gbprv->gbp_thread_work_count,
+		gbprv->gbp_interrupt_count,
+		gbprv->gbp_thermal_state_change_count);
+
+	if (gbprv->gbp_suspend_count || gbprv->gbp_resume_count) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"suspend_resume_count = %u, %u\n",
+			gbprv->gbp_suspend_count,
+			gbprv->gbp_resume_count);
+	}
+
+	if (gbprv->gbp_suspended) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"gfx_suspended = %u\n",
+			gbprv->gbp_suspended);
+	}
+#endif /* if GBURST_DEBUG */
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING "hrt_enable_state = %d\n",
+		gbprv->gbp_timer_is_enabled);
+
+	tmpv0 = hrtimer_active(&gbprv->gbp_timer);
+	if (gbprv->gbp_timer_is_enabled != tmpv0) {
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING "hrt_enable_state_actual = %d\n",
+			tmpv0);
+	}
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"vsync_limited_frames mode = %d\n",
+		(gbprv->gbp_num_of_vsync_limited_frames >= VSYNC_FRAMES));
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"offscreen_rendering mode = %d\n",
+		gbprv->gbp_offscreen_rendering);
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"PWRGT_CNT_last_write = %#8.8x\n",
+		gbprv->gbp_pwrgt_cnt_last_written);
+
+	ix = ut_isnprintf(ix, buf, buflen,
+		GBURST_HEADING
+		"PWRGT_STS = %#8.8x\n", pwrgt_sts);
+
+	{
+		u32 tmpv1;
+		char sbuf0[32];
+		char sbuf1[32];
+
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"gpu_burst_interrupt enable = %d\n",
+			!!(pwrgt_sts & PWRGT_STS_INT_ENABLE_BIT));
+
+		if (pwrgt_sts & PWRGT_STS_RESERVED_1_BIT)
+			ix = ut_isnprintf(ix, buf, buflen,
+				GBURST_HEADING
+				"reserved_bit_29 = 1\n");
+
+		if (pwrgt_sts & PWRGT_STS_ENABLE_AUTO_BURST_ENTRY_BIT)
+			ix = ut_isnprintf(ix, buf, buflen,
+				GBURST_HEADING
+				"ENABLE_AUTO_BURST_ENTRY = 1\n");
+
+		tmpv0 = (pwrgt_sts & PWRGT_STS_BURST_REQUEST_M) >>
+			PWRGT_STS_BURST_REQUEST_P;
+		tmpv1 = (pwrgt_sts & PWRGT_STS_BURST_REALIZED_M) >>
+			PWRGT_STS_BURST_REALIZED_P;
+
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"frequency_requested = %s\n",
+			generate_freq_string(tmpv0, sbuf0, sizeof(sbuf0)));
+
+		ix = ut_isnprintf(ix, buf, buflen,
+			GBURST_HEADING
+			"frequency_realized = %s\n",
+			generate_freq_string(tmpv1, sbuf1, sizeof(sbuf1)));
+	}
+
+	if (ix >= buflen)
+		return -EINVAL;
+
+	printk(GBURST_ALERT
+		"Begin - Read from /proc/gburst/dump\n"
+		"%s"
+		GBURST_HEADING
+		"End   - Read from /proc/gburst/dump\n",
+		buf);
+
+	return ix;
+}
+
+
+/**
+ * pfs_debug_message_read - Procfs read function for
+ * /proc/gburst/debug_message read
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Return current enable state (0 or 1) of some debug messages.
+ * Associated variable to control debug messages: gburst_debug_msg_on
+ */
+static int pfs_debug_message_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gburst_debug_msg_on);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_debug_message_write() - Procfs writer function for
+ * /proc/gburst/debug_message write
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Control for some debug messages: Non-zero to enable, zero to disable.
+ * Associated variable to control debug messages: gburst_debug_msg_on
+ */
+static int pfs_debug_message_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	gburst_debug_msg_on = uval;
+
+	return count;
+}
+
+
+/**
+ * pfs_disable_read - Procfs read function for
+ * /proc/gburst/disable -- Global and non-privileged disable for gpu burst.
+ * This is a non-privileged override that provides a way for any application
+ * (especially those that require exclusive use of the performance counters)
+ * to disable gburst.
+ *
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns current state of this *disable* flag as "0" or "1".
+ * Write:
+ * 0: Revert any previous disable done via /proc/gburst/disable.
+ *    Also happens when writing 1 to /proc/gburst/enable .
+ * 1: Disable gpu burst mode.  Stop the timer.  Stop the background thread.
+ */
+static int pfs_disable_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_request_disable);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_disable_write - Procfs write function for
+ * /proc/gburst/disable -- Global and non-privileged disable for gpu burst.
+ * This is a non-privileged override that provides a way for any application
+ * (especially those that require exclusive use of the performance counters)
+ * to disable gburst.
+ *
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns current state of this *disable* flag as "0" or "1".
+ * Write:
+ * 0: Revert any previous disable done via /proc/gburst/disable.
+ *    Also happens when writing 1 to /proc/gburst/enable .
+ * 1: Disable gpu burst mode.  Stop the timer.  Stop the background thread.
+ */
+static int pfs_disable_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	if (gbprv->gbp_request_disable != uval) {
+		gbprv->gbp_request_disable = uval;
+
+		gburst_enable_set(gbprv);
+	}
+
+	return count;
+}
+
+
+/**
+ * pfs_dump_read() - Procfs read function for
+ * /proc/gburst/dump
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * read: return a verbose textual status dump.
+ * write: null.
+ */
+static int pfs_dump_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[4096];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = generate_dump_string(gbprv, nbytes, msg);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_enable_read - Procfs read function for
+ * /proc/gburst/enable -- Global enable/disable for gpu burst.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns current state as "0" or "1".
+ * Write:
+ * 0: Disable gpu burst mode.  Stop the timer.  Stop the background thread.
+ * 1: Enable gpu burst mode.
+ *    Also, revert any previous disable done via /proc/gburst/disable.
+ */
+static int pfs_enable_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_enable);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_enable_write - Procfs write function for
+ * /proc/gburst/enable -- Global enable/disable for gpu burst.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns current state as "0" or "1".
+ * Write:
+ * 0: Disable gpu burst mode.  Stop the timer.  Stop the background thread.
+ * 1: Enable gpu burst mode.
+ *    Also, revert any previous disable done via /proc/gburst/disable.
+ */
+static int pfs_enable_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	/*  Setting enable also clears disable. */
+	if ((uval && gbprv->gbp_request_disable) ||
+		(gbprv->gbp_request_enable != uval)) {
+
+		gbprv->gbp_request_enable = uval;
+
+		if (uval)
+			gbprv->gbp_request_disable = 0;
+
+		gburst_enable_set(gbprv);
+
+		if (!gbprv->gbp_enable)
+			printk(GBURST_ALERT "gpu burst globally disabled via procfs.\n");
+		else
+			printk(GBURST_ALERT "gpu burst globally enabled via procfs.\n");
+	}
+
+	return count;
+}
+
+
+/**
+ * pfs_gpu_monitored_counters_read - Procfs read function for
+ * /proc/gburst/monitored_counters -- write or read which hardware counters are
+ * monitored and therefore available to be considered in performance
+ * calculation.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * The output string is formatted in a way acceptable to use as an input
+ * string for writing to this proc file.
+ *
+ * See function pfs_gpu_monitored_counters_write.
+ */
+static int pfs_gpu_monitored_counters_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[1024];
+	int ix;
+
+	ix = gburst_stats_gfx_hw_perf_counters_to_string(0, msg, sizeof(msg));
+	if (ix < 0)
+		return ix;
+
+	if (ix >= sizeof(msg))
+		return -EINVAL;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, ix);
+}
+
+
+/**
+ * pfs_gpu_monitored_counters_write - Procfs write function for
+ * /proc/gburst/monitored_counters -- write or read which hardware counters are
+ * monitored and therefore available to be considered in performance
+ * calculation.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * See comments at function gburst_stats_gfx_hw_perf_counters_set
+ * definition, from which the following format description is excerpted:
+ *
+ * The input string specifies which counters are to be visible as
+ * whitespace-separated (e.g., space, tab, newline) groups of: %u:%u:%u:%u
+ * which correspond to counter:group:bit:coeff .
+ * then assigns the values into data structures for all cores.
+ * These per-counter values are:
+ * 1.  counter - An index into this module's counter data arrays.
+ * 2.  group -- The hardware "group" from which this counter is taken.
+ * 3.  bit   -- The hardware bit (for this group) that selects this counter.
+ * 4.  coeff -- A counter specific increment value.
+ * Example input string: "1:0:1:16   6:0:24:32"
+ */
+static int pfs_gpu_monitored_counters_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	char buf[128];
+	int rva;
+
+	if (copy_from_user_nt(buf, sizeof(buf), buffer, count))
+		return -EINVAL;
+
+	rva = gburst_stats_gfx_hw_perf_counters_set(buf);
+	if (rva < 0)
+		return rva;
+
+	return count;
+}
+
+
+/**
+ * pfs_pwrgt_sts_read() - Procfs read function for
+ * /proc/gburst/pwrgt_sts -- Return register PWRGT_STS contents.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns register PWRGT_STS content as a hex number
+ * with a leading "0x".
+ * Write: N/A
+ */
+static int pfs_pwrgt_sts_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	read_and_process_PWRGT_STS(gbprv);
+
+	res = scnprintf(msg, sizeof(msg), "%#x\n", gbprv->gbp_pwrgt_sts_last_read);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_thermal_override_read() - Procfs read function for
+ * /proc/gburst/thermal_override -- thermal state override.  For testing.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * The maximum of the current thermal state and the thermal override state
+ * are used to make thermal decisions in the driver.
+ * Read: Returns thermal override state.  0 is base state, > 0 indicates
+ * hot.  Currently 0..1.
+ * Initial value is 0.
+ * Write: >= 0 to set thermal override state, -1 to reset.
+ * Silently limited to range -1, 0..1.
+ * Warning: setting override to 0 disables OS gpu burst thermal controls,
+ * although firmware controls will still be in effect.
+ */
+static int pfs_thermal_override_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_cooldv_state_override);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_thermal_override_write() - Procfs write function for
+ * /proc/gburst/thermal_override -- thermal state override.  For testing.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * The maximum of the current thermal state and the thermal override state
+ * are used to make thermal decisions in the driver.
+ * Read: Returns thermal override state.  0 is base state, > 0 indicates
+ * hot.  Currently 0..1.
+ * Initial value is 0.
+ * Write: >= 0 to set thermal override state, -1 to reset.
+ * Silently limited to range -1, 0..1.
+ * Warning: setting override to 0 disables OS gpu burst thermal controls,
+ * although firmware controls will still be in effect.
+ */
+static int pfs_thermal_override_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	int sval;
+	int rva;
+
+	rva = kstrtoint_from_user(buffer, count, 0, &sval);
+	if (rva < 0)
+		return rva;
+
+	if (sval < 0) {
+		gbprv->gbp_cooldv_state_override = -1;
+	} else if (sval <= 1) {
+		gbprv->gbp_cooldv_state_override = sval;
+	} else {
+		gbprv->gbp_cooldv_state_override = -1;
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+
+/**
+ * pfs_thermal_state_read() - Procfs read function for
+ * /proc/gburst/thermal_state -- current thermal state.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns current thermal state.  0 is base state, > 0 indicates
+ * hot.  Currently 0..1.
+ * This value read does not reflect any override that may be in effect.
+ * Write: N/A
+ */
+static int pfs_thermal_state_read(struct file *file, char __user *buf,
+		size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_cooldv_state_cur);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_gb_threshold_down_diff_read() - Procfs read function for
+ * /proc/gburst/threshold_down_diff -- read/write un-burst trigger threshold
+ * as a "down_diff" from the high threshold.  (0-100).
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ */
+static int pfs_gb_threshold_down_diff_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_burst_th_down_diff);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_gb_threshold_down_diff_write() - Procfs write function for
+ * /proc/gburst/threshold_down_diff -- read/write un-burst trigger threshold
+ * as a "down_diff" from the high threshold.  (0-100).
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ */
+static int pfs_gb_threshold_down_diff_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	if (uval > 100)
+		return -EINVAL;
+
+	printk(GBURST_ALERT "Changing threshold_down_diff from %d to %d\n",
+		gbprv->gbp_burst_th_down_diff, uval);
+
+	gbprv->gbp_burst_th_down_diff = uval;
+	gbprv->gbp_thold_via = GBP_THOLD_VIA_DOWN_DIFF;
+
+	threshold_derive_low(gbprv);
+
+	return count;
+}
+
+
+/**
+ * pfs_gb_threshold_high_write - Procfs write function for
+ * /proc/gburst/threshold_high -- read/write burst trigger threshold (0-100).
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ */
+static int pfs_gb_threshold_high_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	if (uval > 100)
+		return -EINVAL;
+
+	if (gbprv->gbp_burst_th_high != uval) {
+		printk(GBURST_ALERT "Changing threshold_high from %u to %u\n",
+			gbprv->gbp_burst_th_high, uval);
+	}
+
+	gbprv->gbp_burst_th_high = uval;
+
+	threshold_derive_either(gbprv);
+
+	return count;
+}
+
+
+/**
+ * pfs_gb_threshold_high_read - Procfs read function for
+ * /proc/gburst/threshold_high -- read/write burst trigger threshold (0-100).
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ */
+static int pfs_gb_threshold_high_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_burst_th_high);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_gb_threshold_low_read() - Procfs read function for
+ * /proc/gburst/threshold_low -- read/write un-burst trigger threshold (0-100).
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ */
+static int pfs_gb_threshold_low_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_burst_th_low);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_gb_threshold_low_write() - Procfs write function for
+ * /proc/gburst/threshold_low -- read/write un-burst trigger threshold (0-100).
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ */
+static int pfs_gb_threshold_low_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	if (uval > 100)
+		return -EINVAL;
+
+	printk(GBURST_ALERT "Changing threshold_low from %d to %d\n",
+		gbprv->gbp_burst_th_low, uval);
+
+	gbprv->gbp_burst_th_low = uval;
+	gbprv->gbp_thold_via = GBP_THOLD_VIA_LOW;
+
+	threshold_derive_down_diff(gbprv);
+
+	return count;
+}
+
+
+/**
+ * pfs_state_times_read() - Procfs read function for
+ * /proc/gburst/state_times -- uptime and times in states, seconds.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Write: 0, 1, 2 for increasing verbosity of header line output.
+ * Read:
+ * Three times, which all include time when system or device is suspended:
+ * - uptime - time since boot
+ * - cumulative time that burst has been requested.
+ * - cumulative time that burst has been realized.
+ * - cumulative time that gpu frequency has been throttled.
+ *
+ * The time values are stored internally with nanosecond resolution,
+ * but are returned by this read function to microsecond resolution.
+ * The times are expressed for this interface as <seconds>.<fraction>.
+ * Example output:
+ * "  12721.541788       1.349854       1.349790       0.000000"
+ */
+static int pfs_state_times_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = state_times_to_string(gbprv, "", 0, gbprv->gbp_state_time_header,
+		sizeof(msg), msg);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_state_times_write() - Procfs write function for
+ * /proc/gburst/state_times -- uptime and times in states, seconds.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Write: 0, 1, 2 for increasing verbosity of header line output.
+ * Read:
+ * Three times, which all include time when system or device is suspended:
+ * - uptime - time since boot
+ * - cumulative time that burst has been requested.
+ * - cumulative time that burst has been realized.
+ * - cumulative time that gpu frequency has been throttled.
+ *
+ * The time values are stored internally with nanosecond resolution,
+ * but are returned by this read function to microsecond resolution.
+ * The times are expressed for this interface as <seconds>.<fraction>.
+ * Example output:
+ * "  12721.541788       1.349854       1.349790       0.000000"
+ */
+static int pfs_state_times_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	gbprv->gbp_state_time_header = uval;
+
+	return count;
+}
+
+
+/**
+ * pfs_timer_period_usecs_read() - Procfs read function for
+ * /proc/gburst/timer_period_usecs -- Kernel thread's timer period, usecs.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read/write.
+ */
+static int pfs_timer_period_usecs_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	int period_usecs;
+
+	period_usecs = ktime_to_us(gbprv->gbp_hrt_period);
+	if (mprm_verbosity >= 3)
+		printk(GBURST_ALERT "timer period = %u microseconds\n",
+			period_usecs);
+
+	res = scnprintf(msg, sizeof(msg), "%u microseconds\n", period_usecs);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_timer_period_usecs_write() - Procfs write function for
+ * /proc/gburst/timer_period_usecs -- Kernel thread's timer period, usecs.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read/write.
+ */
+static int pfs_timer_period_usecs_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	unsigned int uval;
+	int rva;
+
+	/* Get period in microseconds. */
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	if (uval > USEC_PER_SEC)
+		uval = USEC_PER_SEC;
+
+	gbprv->gbp_hrt_period = ktime_set(0, uval * NSEC_PER_USEC);
+
+	/**
+	 * Change the time immediately, abandoning current interval.
+	 * If this call was not made, then change would take place
+	 * after the next timer expiration.
+	 */
+	hrt_start(gbprv);
+
+	return count;
+}
+
+
+/**
+ * pfs_utilization_read() - Procfs read function for
+ * /proc/gburst/utilization -- current utilization.  (0 to 100).
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Read: Returns current utilization as a number from 0 to 100 or -1
+ * if no value is available.
+ * This value read does not reflect any override that may be in effect.
+ * Write: N/A
+ */
+static int pfs_utilization_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_utilization_percentage);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_utilization_override_read() - Procfs read function for
+ * /proc/gburst/utilization_override -- utilization override.  For testing.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * If set, a utilization override value to be used instead of actual
+ * utilization.
+ * Read: Returns current utilization override as a number from 0 to 100
+ * or -1 if override state is not set.
+ * Write:
+ * 0 to 100 - utilization override value or -1 to reset override.
+ */
+static int pfs_utilization_override_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", gbprv->gbp_utilization_override);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+}
+
+
+/**
+ * pfs_utilization_override_write - Procfs write function for
+ * /proc/gburst/utilization_override -- utilization override.  For testing.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * If set, a utilization override value to be used instead of actual
+ * utilization.
+ * Read: Returns current utilization override as a number from 0 to 100
+ * or -1 if override state is not set.
+ * Write:
+ * 0 to 100 - utilization override value or -1 to reset override.
+ */
+static int pfs_utilization_override_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *)PDE_DATA(file_inode(file));
+	int sval;
+	int rva;
+
+	rva = kstrtoint_from_user(buffer, count, 0, &sval);
+	if (rva < 0) {
+		printk(GBURST_ALERT "read int failed\n");
+		return rva;
+	}
+
+	if (sval < 0) {
+		gbprv->gbp_utilization_override = -1;
+	} else if (sval <= 100) {
+		gbprv->gbp_utilization_override = sval;
+	} else {
+		if (mprm_verbosity >= 4)
+			printk(GBURST_ALERT "invalid value\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+
+/**
+ * pfs_verbosity_read() - Procfs read function for
+ * /proc/gburst/verbosity -- Specify routine verbosity.
+ * Parameters are the standard ones for procfs read functions.
+ * @buf: buffer into which output can be provided for read function.
+ * @start: May be used to provide multiple buffers of output.
+ * @offset: May be used to provide multiple buffers of output.
+ * @breq: Number of bytes available in buf.
+ * @eof: Set by this function to indicate EOF.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Associated variable to control verbosity of message issuance.
+ * read:  number >= 0 indicating verbosity, with 0 being most quiet.
+ * write: number >= 0 indicating verbosity, with 0 being most quiet.
+ */
+static int pfs_verbosity_read(struct file *file, char __user *buf,
+        size_t nbytes,loff_t *ppos)
+{
+	char msg[128];
+	int res;
+
+	res = scnprintf(msg, sizeof(msg), "%d\n", mprm_verbosity);
+	return simple_read_from_buffer(buf, nbytes, ppos, msg, res);
+
+}
+
+
+/**
+ * pfs_verbosity_write() - Procfs write function for
+ * /proc/gburst/verbosity -- Specify routine verbosity.
+ * Parameters are the standard ones for procfs read functions.
+ * @file: Pointer to procfs associated struct file.
+ * @buffer: buffer with data written to the proc file, input to this function.
+ * @count: number of bytes of data present in buffer.
+ * @pvd: Private data (in this case, gbprv).
+ *
+ * Associated variable to control verbosity of message issuance.
+ * read:  number >= 0 indicating verbosity, with 0 being most quiet.
+ * write: number >= 0 indicating verbosity, with 0 being most quiet.
+ */
+static int pfs_verbosity_write(struct file *file,
+	const char *buffer, size_t count, loff_t *ppos)
+{
+	unsigned int uval;
+	int rva;
+
+	rva = kstrtouint_from_user(buffer, count, 0, &uval);
+	if (rva < 0)
+		return rva;
+
+	mprm_verbosity = uval;
+
+	return count;
+}
+
+
+/**
+ * pfs_init() - Create all /proc/gburst entries.
+ * @gbprv: gb handle.
+ *
+ * Table driven from pfs_tab.
+ */
+static void pfs_init(struct gburst_pvt_s *gbprv)
+{
+	struct proc_dir_entry *pdehdl;
+	const struct pfs_data *pfsdat;
+	int fmode;
+	int ix;
+	struct file_operations *pfd_proc_fops;
+
+	/* Create /proc/gburst */
+	if (!gbprv->gbp_proc_gburst) {
+		/**
+		 * gbprv->gbp_proc_parent will be NULL at this point,
+		 * which means to create at the top level of /proc.
+		 */
+		gbprv->gbp_proc_gburst = proc_mkdir(GBURST_PFS_NAME_DIR_GBURST,
+			gbprv->gbp_proc_parent);
+		if (!gbprv->gbp_proc_gburst) {
+			printk(GBURST_ALERT "Error creating gburst proc dir: %s\n",
+				GBURST_PFS_NAME_DIR_GBURST);
+			return;
+		}
+	}
+
+	for (ix = 0; ix < ARRAY_SIZE(pfs_tab); ix++) {
+		pfsdat = pfs_tab + ix;
+
+		fmode = pfsdat->pfd_mode;
+		if (!pfsdat->pfd_func_read)
+			fmode &= ~0444;
+		if (!pfsdat->pfd_func_write)
+			fmode &= ~0222;
+
+                pfd_proc_fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
+                if(!pfd_proc_fops) {
+                    printk(GBURST_ALERT "Error creating gburst proc file: %s\n",
+                              pfsdat->pfd_file_name);
+                    continue;
+                }
+                pfd_proc_fops->read = pfsdat->pfd_func_read;
+                pfd_proc_fops->write = pfsdat->pfd_func_write;
+
+                pdehdl = proc_create_data(pfsdat->pfd_file_name, fmode,
+                               gbprv->gbp_proc_gburst, pfd_proc_fops, gbprv);
+
+		gbprv->gbp_pfs_handle[ix] = pdehdl;
+		if (!pdehdl) {
+			printk(GBURST_ALERT "Error creating gburst proc file: %s\n",
+				pfsdat->pfd_file_name);
+		}
+	}
+}
+
+
+/**
+ * pfs_cleanup() - Remove /proc/gburst files (e.g., at module exit).
+ * @gbprv: gb handle.
+ */
+static void pfs_cleanup(struct gburst_pvt_s *gbprv)
+{
+	struct proc_dir_entry *pde_root = gbprv->gbp_proc_gburst;
+	int ix;
+
+	if (!pde_root)
+		return;
+
+	for (ix = 0; ix < ARRAY_SIZE(pfs_tab); ix++) {
+		if (gbprv->gbp_pfs_handle[ix]) {
+			const struct pfs_data *pfsdat = pfs_tab + ix;
+
+			/**
+			 * Function remove_proc_entry may wait for completion
+			 * of in-progress operations on the corresponding
+			 * proc file.
+			 */
+			remove_proc_entry(pfsdat->pfd_file_name, pde_root);
+			gbprv->gbp_pfs_handle[ix] = NULL;
+		}
+	}
+
+	remove_proc_entry(GBURST_PFS_NAME_DIR_GBURST, gbprv->gbp_proc_parent);
+}
+
+
+/**
+ * gburst_irq_handler() - Interrupt handler for frequency change interrupts.
+ * @irq: The irq for this interrupt.
+ * @pvd: Private data for this interrupt.  In this case, gbprv.
+ *
+ * Execution context: hard irq level
+ *
+ * Warning: Currently, reading or writing registers (via intel_mid_msgbus_read32
+ * and intel_mid_msgbus_write32 family) must not be done at interrupt level.
+ */
+static irqreturn_t gburst_irq_handler(int irq, void *pvd)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *) pvd;
+
+	smp_rmb();
+	if (!gbprv->gbp_initialized)
+		return IRQ_HANDLED;
+
+#if GBURST_DEBUG
+	gbprv->gbp_interrupt_count++;
+#endif /* if GBURST_DEBUG */
+
+	if (gbprv->gbp_suspended) {
+		/* Interrupt while suspended is not an abnormal event. */
+		return IRQ_HANDLED;
+	}
+
+	wake_thread(gbprv);
+
+	/**
+	 * Potential return values from an interrupt handler:
+	 * IRQ_NONE        -- Not our interrupt
+	 * IRQ_HANDLED     -- Interrupt handled.
+	 * IRQ_WAKE_THREAD -- Pass on to this interrupt's kernel thread.
+	 */
+	return IRQ_HANDLED;
+}
+
+
+/**
+ * gburst_suspend() - Callback for gfx hw transition to state D3.
+ * @gbprv: gb handle.
+ *
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+static int gburst_suspend(struct gburst_pvt_s *gbprv)
+{
+	unsigned long long ctime;
+
+	smp_rmb();
+	if (!gbprv || !gbprv->gbp_initialized)
+		return 0;
+
+	if (mprm_verbosity >= 3)
+		printk(GBURST_ALERT "suspend\n");
+
+#if GBURST_DEBUG
+	gbprv->gbp_suspend_count++;
+#endif
+
+	/* Must update times before changing flag. */
+	update_state_times(gbprv, NULL);
+	mutex_lock(&gbprv->gbp_mutex_pwrgt_sts);
+	gbprv->gbp_suspended = 1;
+	mutex_unlock(&gbprv->gbp_mutex_pwrgt_sts);
+	smp_wmb();
+
+	/* Cancel timer events. */
+	hrt_cancel(gbprv);
+
+	ctime = timestamp();
+	if ((ctime-gbprv->gbp_resume_time+FRAME_TIME_BUFFER) < FRAME_DURATION)
+		gbprv->gbp_num_of_vsync_limited_frames += 1;
+	else
+		gbprv->gbp_num_of_vsync_limited_frames = 0;
+
+	write_PWRGT_CNT(gbprv, 0);
+
+/*  GBURST_TIMING_BEFORE_SUSPEND - Code currently under test. */
+#define GBURST_TIMING_BEFORE_SUSPEND 1
+
+#if GBURST_TIMING_BEFORE_SUSPEND
+	/*
+	 * Before suspend takes place, tell gfx hw driver that gpu
+	 * frequency is normal.  It is hoped that this will lead to
+	 * better stability.
+	 * No additional call to update_state_times is desired here, as
+	 * the delta time should be insignificant from the previous call.
+	 */
+
+	mutex_lock(&gbprv->gbp_mutex_pwrgt_sts);
+
+	if ((gbprv->gbp_pwrgt_sts_last_read & PWRGT_STS_BURST_REALIZED_M)
+		!= PWRGT_STS_BURST_REALIZED_M_400) {
+		/*  Notify driver. */
+		gburst_stats_gpu_freq_mhz_info(
+			freq_mhz_table[GBURST_GPU_FREQ_400]);
+	}
+
+	gbprv->gbp_pwrgt_sts_last_read = (gbprv->gbp_pwrgt_sts_last_read &
+		~(PWRGT_STS_BURST_REQUEST_M | PWRGT_STS_BURST_REALIZED_M))
+		| (GBURST_GPU_FREQ_400 << PWRGT_STS_BURST_REQUEST_P)
+		| (GBURST_GPU_FREQ_400 << PWRGT_STS_BURST_REALIZED_P);
+
+	mutex_unlock(&gbprv->gbp_mutex_pwrgt_sts);
+#endif /* if GBURST_TIMING_BEFORE_SUSPEND */
+
+	/* Clear current utilization value before suspend as there has
+	been 5ms GPU idle period anyway due to suspend request */
+	if (gbprv->gbp_utilization_percentage > 0)
+		gbprv->gbp_utilization_percentage = 0;
+
+	/* Clear utilization check request to avoid extra calculation
+	on next freq change wakeup in case timer expires during suspend
+	sequence */
+	gbprv->gbp_thread_check_utilization = 0;
+
+	/* Clean up GFX load information storage from old and obsolete data */
+	gburst_stats_cleanup_gfx_load_data();
+
+	return 0;
+}
+
+
+/**
+ * gburst_resume() - Callback for gfx hw transition from state D3.
+ * @gbprv: gb handle.
+ *
+ * Device power on.  Assume the device has retained no state.
+ * Invoked via interrupt/callback.
+ * Execution context: non-atomic
+ */
+static int gburst_resume(struct gburst_pvt_s *gbprv)
+{
+	smp_rmb();
+
+	if (!gbprv || !gbprv->gbp_initialized)
+		return 0;
+
+	if (mprm_verbosity >= 3)
+		printk(GBURST_ALERT "resume\n");
+
+#if GBURST_DEBUG
+	gbprv->gbp_resume_count++;
+#endif /* if GBURST_DEBUG */
+
+	update_state_times(gbprv, NULL);
+
+	read_PWRGT_CNT_toggle(gbprv);
+
+	/* Assume thermal state is current or will be updated soon. */
+
+	/* PWRGT_CNT to 0 except toggle and interrupt enable. */
+        if ((gbprv->gbp_burst_th_low == 0) &&
+             (gbprv->gbp_burst_th_high == 0)) {
+	        set_state_pwrgt_cnt(gbprv, PWRGT_CNT_BURST_REQUEST_M_533);
+	} else {
+		set_state_pwrgt_cnt(gbprv, PWRGT_CNT_BURST_REQUEST_M_400);
+	}
+
+	gbprv->gbp_suspended = 0;
+	gbprv->gbp_resume_time = timestamp();
+	gbprv->gbp_offscreen_rendering = 0;
+
+	smp_wmb();
+
+	mutex_lock(&gbprv->gbp_mutex_pwrgt_sts);
+	if (!(read_PWRGT_STS_simple() & PWRGT_STS_FREQ_THROTTLE_M)) {
+		hrt_start(gbprv);
+	}
+	mutex_unlock(&gbprv->gbp_mutex_pwrgt_sts);
+
+	return 0;
+}
+
+
+/**
+ * gburst_power_state_set() - Callback informing of gfx hw power state change.
+ * @gbprv: gb handle.
+ * @st_on: 1 if powering on, 0 if powering down.
+ */
+static void gburst_power_state_set(struct gburst_pvt_s *gbprv, int st_on)
+{
+	if (gbprv->gbp_enable) {
+		if (st_on)
+			gburst_resume(gbprv);
+		else
+			gburst_suspend(gbprv);
+	}
+}
+
+
+/**
+ * tcd_get_max_state() - thermal cooling device callback get_max_state.
+ * @tcd: Thermal cooling device structure.
+ * @pms: Pointer to integer through which output value is stored.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_get_max_state(struct thermal_cooling_device *tcd,
+	unsigned long *pms)
+{
+	*pms = THERMAL_COOLING_DEVICE_MAX_STATE;
+
+	return 0;
+}
+
+
+/**
+ * tcd_get_cur_state() - thermal cooling device callback get_cur_state.
+ * @tcd: Thermal cooling device structure.
+ * @pcs: Pointer to integer through which output value is stored.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_get_cur_state(struct thermal_cooling_device *tcd,
+	unsigned long *pcs)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *) tcd->devdata;
+	*pcs = gbprv->gbp_cooldv_state_cur;
+
+	return 0;
+}
+
+
+/**
+ * tcd_set_cur_state() - thermal cooling device callback set_cur_state.
+ * @tcd: Thermal cooling device structure.
+ * @cs: Input state.
+ *
+ * Invoked via interrupt/callback.
+ * Function return value: 0 if success, otherwise -error.
+ * Execution context: non-atomic
+ */
+static int tcd_set_cur_state(struct thermal_cooling_device *tcd,
+	unsigned long cs)
+{
+	struct gburst_pvt_s *gbprv = (struct gburst_pvt_s *) tcd->devdata;
+	if (cs > THERMAL_COOLING_DEVICE_MAX_STATE)
+		cs = THERMAL_COOLING_DEVICE_MAX_STATE;
+
+	/* If state change between zero and non-zero... */
+	if (!!gbprv->gbp_cooldv_state_cur != !!cs) {
+		gbprv->gbp_cooldv_state_prev = gbprv->gbp_cooldv_state_cur;
+		gbprv->gbp_cooldv_state_cur = cs;
+
+		if (gbprv->gbp_cooldv_state_highest <
+			gbprv->gbp_cooldv_state_cur) {
+			gbprv->gbp_cooldv_state_highest =
+				gbprv->gbp_cooldv_state_cur;
+		}
+
+#if GBURST_DEBUG
+		gbprv->gbp_thermal_state_change_count++;
+#endif
+
+		if (mprm_verbosity >= 2)
+			printk(GBURST_ALERT "Thermal state changed from %d to %d\n",
+				gbprv->gbp_cooldv_state_prev,
+				gbprv->gbp_cooldv_state_cur);
+
+		request_desired_burst_mode(gbprv);
+	}
+
+	return 0;
+}
+
+
+/**
+ * gburst_cleanup() -- Clean up module data structures, release resources, etc.
+ * @gbprv: gb handle.
+ */
+static void __exit gburst_cleanup(struct gburst_pvt_s *gbprv)
+{
+	if (!gbprv)
+		return;
+
+	gbprv->gbp_initialized = 0;
+
+	smp_mb();
+
+	{
+		struct gburst_interface_s gd_interface;
+		memset(&gd_interface, 0, sizeof(struct gburst_interface_s));
+		gburst_interface_set_data(&gd_interface);
+	}
+
+	if (gbprv->gbp_cooldv_hdl) {
+		thermal_cooling_device_unregister(gbprv->gbp_cooldv_hdl);
+		gbprv->gbp_cooldv_hdl = NULL;
+	}
+
+	write_PWRGT_CNT(gbprv, 0);
+
+	/**
+	 * Interestingly, free_irq will (if defined CONFIG_DEBUG_SHIRQ,
+	 * for shared interrupts) call the irq handler spuriously
+	 * just in order to make sure the driver handles the
+	 * spurious call correctly.
+	 */
+	free_irq(GBURST_IRQ_LEVEL, gbprv);
+
+	/* De-install /proc/gburst entries. */
+	if (gbprv->gbp_proc_gburst)
+		pfs_cleanup(gbprv);
+
+	hrtimer_cancel(&gbprv->gbp_timer);
+
+	/* stop the thread */
+	if (gbprv->gbp_task)
+		gburst_thread_stop(gbprv);
+
+	mutex_destroy(&gbprv->gbp_state_times_mutex);
+	mutex_destroy(&gbprv->gbp_mutex_pwrgt_sts);
+}
+
+
+/**
+ * gburst_init() - gburst module initialization.
+ * @gbprv: gb handle.
+ *
+ * Invokes sub-function to initialize.  If failure, invokes cleanup.
+ *
+ * Function return value: negative to abort module installation.
+ */
+static int gburst_init(struct gburst_pvt_s *gbprv)
+{
+	u32 gt_sts;
+	int sts;
+
+	gt_sts = read_PWRGT_STS_simple();
+
+	if (!(gt_sts & PWRGT_STS_BURST_SUPPORT_PRESENT_BIT)) {
+		printk(GBURST_ALERT "gpu burst mode is not supported\n");
+		return -ENODEV;
+	}
+
+	printk(GBURST_ALERT "gpu burst mode initialization -- begin\n");
+
+	mutex_init(&gbprv->gbp_mutex_pwrgt_sts);
+	mutex_init(&gbprv->gbp_state_times_mutex);
+
+	gbprv->gbp_request_disable = 0;
+	gbprv->gbp_request_enable = mprm_enable;
+
+	gbprv->gbp_enable = gbprv->gbp_request_enable &&
+		!gbprv->gbp_request_disable;
+
+	gburst_debug_msg_on = 0;
+
+	/* -1 indicates that no utilization value has been seen. */
+	gbprv->gbp_utilization_percentage = -1;
+
+	/* -1 indicates no override is in effect. */
+	gbprv->gbp_utilization_override = -1;
+
+	/* No 3D activity is initialized to run until kernel finish
+	 * its initialization. so here make the gburst status consistent
+	 * with HW
+	 * */
+	gbprv->gbp_suspended = 1;
+
+	read_PWRGT_CNT_toggle(gbprv);
+
+	{       /* Not a shared interrupt, so no IRQF_SHARED. */
+		const unsigned long request_flags = IRQF_TRIGGER_RISING;
+
+		sts = request_irq(GBURST_IRQ_LEVEL, gburst_irq_handler,
+			request_flags, GBURST_DRIVER_NAME, gbprv);
+		if (sts != 0) {
+			printk(GBURST_ALERT "Interrupt assignment failed: %d\n",
+				GBURST_IRQ_LEVEL);
+			sts = -ENXIO;
+			goto err_interrupt;
+		}
+	}
+
+	/* Set default values for GPU burst control and counters monitored */
+	gbprv->gbp_burst_th_high = GBURST_THRESHOLD_DEFAULT_HIGH;
+
+	gbprv->gbp_thold_via = GBP_THOLD_VIA_DOWN_DIFF;
+	gbprv->gbp_burst_th_down_diff = GBURST_THRESHOLD_DEFAULT_DOWN_DIFF;
+
+	threshold_derive_either(gbprv);
+
+	gbprv->gbp_hrt_period = ktime_set(0,
+		GBURST_TIMER_PERIOD_DEFAULT_USECS * NSEC_PER_USEC);
+
+	{
+		struct gburst_interface_s gd_interface;
+
+		gd_interface.gbs_priv = gbprv;
+		gd_interface.gbs_power_state_set = gburst_power_state_set;
+
+		gburst_interface_set_data(&gd_interface);
+	}
+
+	gbprv->gbp_cooldv_state_override = -1;
+
+	/* Burst dynamic control parameters*/
+	gbprv->gbp_resume_time = 0;
+	gbprv->gbp_offscreen_rendering = 0;
+	gbprv->gbp_num_of_vsync_limited_frames = 0;
+
+	/* Initialize timer.  This does not start the timer. */
+	hrtimer_init(&gbprv->gbp_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	gbprv->gbp_timer.function = hrt_event_processor;
+
+	{
+		static const char *tcd_type = "gpu_burst";
+		static const struct thermal_cooling_device_ops tcd_ops = {
+			.get_max_state = tcd_get_max_state,
+			.get_cur_state = tcd_get_cur_state,
+			.set_cur_state = tcd_set_cur_state,
+		};
+		struct thermal_cooling_device *tcdhdl;
+
+		/**
+		  * Example: Thermal zone "type"s and temps milli-deg-C.
+		  * These are just examples and are not specific to our usage.
+		  *   type              temp
+		  *   --------          -------
+		  *   skin0             15944
+		  *   skin1             22407
+		  *   msicdie           37672
+		  *
+		  * See /sys/class/thermal/thermal_zone<i>
+		  * See /sys/class/thermal/cooling_device<i>
+		  */
+
+		tcdhdl = thermal_cooling_device_register(
+			(char *) tcd_type, gbprv, &tcd_ops);
+		if (IS_ERR(tcdhdl)) {
+			printk(GBURST_ALERT "Cooling device registration failed: %ld\n",
+				-PTR_ERR(tcdhdl));
+			sts = PTR_ERR(tcdhdl);
+			goto err_thermal;
+		}
+		gbprv->gbp_cooldv_hdl = tcdhdl;
+	}
+
+	if (gbprv->gbp_enable) {
+		sts = work_thread_create(gbprv);
+		if (sts < 0) {
+			/* abort init if unable to create thread. */
+			printk(GBURST_ALERT "Thread creation failed: %d\n",
+				-sts);
+			goto err_thread_creation;
+
+		}
+	}
+
+	/**
+	 * Ensure that all preceding stores are realized before
+	 * any following stores.
+	 */
+	smp_wmb();
+
+	gbprv->gbp_initialized = 1;
+
+	smp_wmb();
+
+	pfs_init(gbprv);
+
+	printk(GBURST_ALERT "gpu burst mode initialization -- done\n");
+
+	return 0;
+
+err_thread_creation:
+	thermal_cooling_device_unregister(gbprv->gbp_cooldv_hdl);
+	gbprv->gbp_cooldv_hdl = NULL;
+
+err_thermal:
+	{
+		struct gburst_interface_s gd_interface;
+		memset(&gd_interface, 0, sizeof(struct gburst_interface_s));
+		gburst_interface_set_data(&gd_interface);
+	}
+
+	free_irq(GBURST_IRQ_LEVEL, gbprv);
+
+err_interrupt:
+	mutex_destroy(&gbprv->gbp_state_times_mutex);
+	mutex_destroy(&gbprv->gbp_mutex_pwrgt_sts);
+
+	return sts;
+}
+
+
+/**
+ * gburst_module_init() - Classic module init function.
+ *
+ * Calls lower-level initialization.
+ *
+ * Function return value: negative to abort module installation.
+ */
+int gburst_module_init(void)
+{
+	struct gburst_pvt_s *gbprv;
+	int rva;
+
+	memset(&gburst_private_data, 0, sizeof(gburst_private_data));
+	gburst_private_ptr = &gburst_private_data;
+	gbprv = gburst_private_ptr;
+
+	rva = gburst_init(gbprv);
+
+	return rva;
+}
+EXPORT_SYMBOL(gburst_module_init);
+
+
+/**
+ * gburst_module_exit() - Classic module exit function.
+ */
+void __exit gburst_module_exit(void)
+{
+	gburst_cleanup(gburst_private_ptr);
+}
+EXPORT_SYMBOL(gburst_module_exit);
+
+
+#if 0
+#if (defined MODULE)
+module_init(gburst_module_init);
+#else
+/**
+ * Ensure that init happens for this module after init happens for the
+ * graphics driver, which in turn is observed to potentially be done late
+ * via late_initcall (which is in order right before the late_initcall_sync
+ * that this module may use).
+ */
+
+late_initcall_sync(gburst_module_init);
+#endif
+#endif
+
+/* module_exit(gburst_module_exit); */
+
+#endif /* if (defined CONFIG_GPU_BURST) || (defined CONFIG_GPU_BURST_MODULE) */
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/utilf.c b/drivers/external_drivers/intel_media/graphics/gburst/utilf.c
new file mode 100644
index 0000000..4f674aa
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/utilf.c
@@ -0,0 +1,75 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "utilf.h"
+
+
+/**
+ * ut_isnprintf -- Like snprintf, except also accepts initial buffer index.
+ * @ix: Index within the buffer at which to begin output.
+ * @pbuf: Pointer to output buffer
+ * @bdim: Dimension of output buffer
+ * @fmt: snprintf-style format string.
+ * @...:  snprintf-style variable argument list.
+ *
+ * Except when input ix == -1, the output buffer is guaranteed to be
+ * null-terminated, even if underlying functions do not do so.
+ *
+ * If the specified initial index is negative, then an immediate error
+ * return is done.  One can therefore make multiple sequential calls to this
+ * function without having to check the return value until the very end.
+ * Example:
+ *      char sbuf[some_size];
+ *      int ix = 0;
+ *      ix = ut_isnprintf(ix, sbuf, sizeof(sbuf), "%s", arga);
+ *      ix = ut_isnprintf(ix, sbuf, sizeof(sbuf), "%s", argb);
+ *      if (ix < 0)
+ *              error_generic();
+ *      if (ix >= sizeof(sbuf)
+ *              error_buffer_too_small();
+ * Function return value:
+ * -1 if error:
+ * --  Underlying vsnprintf call gives error return.  Should not happen.
+ * --  Input parameter ix is negative.
+ * otherwise, the return value is the number of characters which would
+ * be generated for the given input, excluding the trailing '\0', as
+ * per ISO C99 (i.e., this may exceed the actual buffer length).
+ */
+int ut_isnprintf(int ix, char *pbuf, size_t bdim, const char *fmt, ...)
+{
+	va_list ap;
+	int nchrs;
+	int ii;
+
+	if (ix < 0)
+		return -1;
+
+	/**
+	 * If already past end of buffer, pass dimension of 0 to vsnprintf
+	 * so it will tell us how many additional characters are required.
+	 */
+	if (ix > bdim)
+		ii = bdim;
+	else
+		ii = ix;
+
+	va_start(ap, fmt);
+	nchrs = vsnprintf(pbuf+ii, bdim-ii, fmt, ap);
+	va_end(ap);
+
+	/**
+	 * Check in case vsnprintf returns an error value (not thought to
+	 * happen).
+	 */
+	if (nchrs < 0)
+		pbuf[ii] = '\0';
+	else {
+		nchrs += ix;
+		if (nchrs < bdim)
+			pbuf[nchrs] = '\0';
+		else
+			pbuf[bdim-1] = '\0';
+	}
+
+	return nchrs;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/gburst/utilf.h b/drivers/external_drivers/intel_media/graphics/gburst/utilf.h
new file mode 100644
index 0000000..a880fa9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/gburst/utilf.h
@@ -0,0 +1,41 @@
+#if !defined UTILF_H
+#define UTILF_H
+
+#include <linux/types.h>
+
+/**
+ * ut_isnprintf -- Like snprintf, except also accepts initial buffer index.
+ * @ix: Index within the buffer at which to begin output.
+ * @pbuf: Pointer to output buffer
+ * @bdim: Dimension of output buffer
+ * @fmt: snprintf-style format string.
+ * @...:  snprintf-style variable argument list.
+ *
+ * Except when input ix == -1, the output buffer is guaranteed to be
+ * null-terminated, even if underlying functions do not do so.
+ *
+ * If the specified initial index is negative, then an immediate error
+ * return is done.  One can therefore make multiple sequential calls to this
+ * function without having to check the return value until the very end.
+ * Example:
+ *      char sbuf[some_size];
+ *      int ix = 0;
+ *      ix = ut_isnprintf(ix, sbuf, sizeof(sbuf), "%s", arga);
+ *      ix = ut_isnprintf(ix, sbuf, sizeof(sbuf), "%s", argb);
+ *      if (ix < 0)
+ *              error_generic();
+ *      if (ix >= sizeof(sbuf)
+ *              error_buffer_too_small();
+ * Function return value:
+ * -1 if error:
+ * --  Underlying vsnprintf call gives error return.  Should not happen.
+ * --  Input parameter ix is negative.
+ * otherwise, the return value is the number of characters which would
+ * be generated for the given input, excluding the trailing '\0', as
+ * per ISO C99 (i.e., this may exceed the actual buffer length).
+ */
+int ut_isnprintf(int ix, char *pbuf, size_t bdim, const char *fmt, ...)
+	__attribute__((format(__printf__, 4, 5)));
+
+
+#endif /* if !defined UTILF_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/breakpoint_bridge/common_breakpoint_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/breakpoint_bridge/common_breakpoint_bridge.h
new file mode 100644
index 0000000..835666d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/breakpoint_bridge/common_breakpoint_bridge.h
@@ -0,0 +1,158 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for breakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for breakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_BREAKPOINT_BRIDGE_H
+#define COMMON_BREAKPOINT_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST			0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS			PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST			(PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXSetBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_HANDLE hPrivData;
+	IMG_UINT32 eFWDataMaster;
+	IMG_UINT32 ui32BreakpointAddr;
+	IMG_UINT32 ui32HandlerAddr;
+	IMG_UINT32 ui32DM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT;
+
+
+/* Bridge out structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT;
+
+/*******************************************
+            RGXClearBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT;
+
+
+/* Bridge out structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT;
+
+/*******************************************
+            RGXEnableBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT;
+
+
+/* Bridge out structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT;
+
+/*******************************************
+            RGXDisableBreakpoint          
+ *******************************************/
+
+/* Bridge in structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT;
+
+
+/* Bridge out structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT;
+
+/*******************************************
+            RGXOverallocateBPRegisters          
+ *******************************************/
+
+/* Bridge in structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32TempRegs;
+	IMG_UINT32 ui32SharedRegs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS;
+
+
+/* Bridge out structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS;
+
+#endif /* COMMON_BREAKPOINT_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/breakpoint_bridge/server_breakpoint_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/breakpoint_bridge/server_breakpoint_bridge.c
new file mode 100644
index 0000000..e376604
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/breakpoint_bridge/server_breakpoint_bridge.c
@@ -0,0 +1,392 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for breakpoint
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for breakpoint
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxbreakpoint.h"
+
+
+#include "common_breakpoint_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXSetBreakpointIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXSetBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSetBreakpoint_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXSetBreakpointIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXSetBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSetBreakpoint_exit;
+					}
+				}
+
+
+	psRGXSetBreakpointOUT->eError =
+		PVRSRVRGXSetBreakpointKM(
+					hDevNodeInt,
+					hPrivDataInt,
+					psRGXSetBreakpointIN->eFWDataMaster,
+					psRGXSetBreakpointIN->ui32BreakpointAddr,
+					psRGXSetBreakpointIN->ui32HandlerAddr,
+					psRGXSetBreakpointIN->ui32DM);
+
+
+
+
+RGXSetBreakpoint_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXClearBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXClearBreakpointIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXClearBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXClearBreakpoint_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXClearBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXClearBreakpointIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXClearBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXClearBreakpoint_exit;
+					}
+				}
+
+
+	psRGXClearBreakpointOUT->eError =
+		PVRSRVRGXClearBreakpointKM(
+					hDevNodeInt,
+					hPrivDataInt);
+
+
+
+
+RGXClearBreakpoint_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXEnableBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXEnableBreakpointIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXEnableBreakpoint_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXEnableBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXEnableBreakpointIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXEnableBreakpoint_exit;
+					}
+				}
+
+
+	psRGXEnableBreakpointOUT->eError =
+		PVRSRVRGXEnableBreakpointKM(
+					hDevNodeInt,
+					hPrivDataInt);
+
+
+
+
+RGXEnableBreakpoint_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN,
+					  PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXDisableBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXDisableBreakpointIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXDisableBreakpoint_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXDisableBreakpointOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXDisableBreakpointIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)
+					{
+						goto RGXDisableBreakpoint_exit;
+					}
+				}
+
+
+	psRGXDisableBreakpointOUT->eError =
+		PVRSRVRGXDisableBreakpointKM(
+					hDevNodeInt,
+					hPrivDataInt);
+
+
+
+
+RGXDisableBreakpoint_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN,
+					  PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXOverallocateBPRegistersOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXOverallocateBPRegistersIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXOverallocateBPRegistersOUT->eError != PVRSRV_OK)
+					{
+						goto RGXOverallocateBPRegisters_exit;
+					}
+				}
+
+
+	psRGXOverallocateBPRegistersOUT->eError =
+		PVRSRVRGXOverallocateBPRegistersKM(
+					hDevNodeInt,
+					psRGXOverallocateBPRegistersIN->ui32TempRegs,
+					psRGXOverallocateBPRegistersIN->ui32SharedRegs);
+
+
+
+
+RGXOverallocateBPRegisters_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitBREAKPOINTBridge(IMG_VOID);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(IMG_VOID);
+
+/*
+ * Register all BREAKPOINT functions with services
+ */
+PVRSRV_ERROR InitBREAKPOINTBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT, PVRSRVBridgeRGXSetBreakpoint,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT, PVRSRVBridgeRGXClearBreakpoint,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT, PVRSRVBridgeRGXEnableBreakpoint,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT, PVRSRVBridgeRGXDisableBreakpoint,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS, PVRSRVBridgeRGXOverallocateBPRegisters,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all breakpoint functions with services
+ */
+PVRSRV_ERROR DeinitBREAKPOINTBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/cachegeneric_bridge/common_cachegeneric_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/cachegeneric_bridge/common_cachegeneric_bridge.h
new file mode 100644
index 0000000..8d3fd02
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/cachegeneric_bridge/common_cachegeneric_bridge.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for cachegeneric
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for cachegeneric
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_CACHEGENERIC_BRIDGE_H
+#define COMMON_CACHEGENERIC_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_external.h"
+
+
+#define PVRSRV_BRIDGE_CACHEGENERIC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_CACHEGENERIC_CACHEOPQUEUE			PVRSRV_BRIDGE_CACHEGENERIC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHEGENERIC_CMD_LAST			(PVRSRV_BRIDGE_CACHEGENERIC_CMD_FIRST+0)
+
+
+/*******************************************
+            CacheOpQueue          
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+	PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+#endif /* COMMON_CACHEGENERIC_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/cachegeneric_bridge/server_cachegeneric_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/cachegeneric_bridge/server_cachegeneric_bridge.c
new file mode 100644
index 0000000..359074a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/cachegeneric_bridge/server_cachegeneric_bridge.c
@@ -0,0 +1,131 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for cachegeneric
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cachegeneric
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_generic.h"
+
+
+#include "common_cachegeneric_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN,
+					  PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+	psCacheOpQueueOUT->eError =
+		CacheOpQueue(
+					psCacheOpQueueIN->iuCacheOp);
+
+
+
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitCACHEGENERICBridge(IMG_VOID);
+PVRSRV_ERROR DeinitCACHEGENERICBridge(IMG_VOID);
+
+/*
+ * Register all CACHEGENERIC functions with services
+ */
+PVRSRV_ERROR InitCACHEGENERICBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CACHEGENERIC, PVRSRV_BRIDGE_CACHEGENERIC_CACHEOPQUEUE, PVRSRVBridgeCacheOpQueue,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cachegeneric functions with services
+ */
+PVRSRV_ERROR DeinitCACHEGENERICBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/cmm_bridge/common_cmm_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/cmm_bridge/common_cmm_bridge.h
new file mode 100644
index 0000000..c0ec88d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/cmm_bridge/common_cmm_bridge.h
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTCTXEXPORT			PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTCTXUNEXPORT			PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTCTXIMPORT			PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST			(PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+
+/*******************************************
+            DevmemIntCtxExport          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxExport */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXEXPORT_TAG
+{
+	IMG_HANDLE hDevMemServerContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXEXPORT;
+
+
+/* Bridge out structure for DevmemIntCtxExport */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXEXPORT_TAG
+{
+	IMG_HANDLE hDevMemIntCtxExport;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXEXPORT;
+
+/*******************************************
+            DevmemIntCtxUnexport          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxUnexport */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXUNEXPORT_TAG
+{
+	IMG_HANDLE hDevMemIntCtxExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXUNEXPORT;
+
+
+/* Bridge out structure for DevmemIntCtxUnexport */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXUNEXPORT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXUNEXPORT;
+
+/*******************************************
+            DevmemIntCtxImport          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxImport */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXIMPORT_TAG
+{
+	IMG_HANDLE hDevMemIntCtxExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXIMPORT;
+
+
+/* Bridge out structure for DevmemIntCtxImport */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXIMPORT_TAG
+{
+	IMG_HANDLE hDevMemServerContext;
+	IMG_HANDLE hPrivData;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXIMPORT;
+
+#endif /* COMMON_CMM_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/cmm_bridge/server_cmm_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/cmm_bridge/server_cmm_bridge.c
new file mode 100644
index 0000000..5c3fe15
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/cmm_bridge/server_cmm_bridge.c
@@ -0,0 +1,393 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for cmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for cmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+static PVRSRV_ERROR ReleaseDevMemIntCtxExport(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxExport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXEXPORT *psDevmemIntCtxExportIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXEXPORT *psDevmemIntCtxExportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX * psDevMemServerContextInt = IMG_NULL;
+	DEVMEMINT_CTX_EXPORT * psDevMemIntCtxExportInt = IMG_NULL;
+	IMG_HANDLE hDevMemIntCtxExportInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntCtxExportOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevMemServerContextInt,
+											psDevmemIntCtxExportIN->hDevMemServerContext,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					if(psDevmemIntCtxExportOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIntCtxExport_exit;
+					}
+				}
+
+
+	psDevmemIntCtxExportOUT->eError =
+		DevmemIntCtxExport(
+					psDevMemServerContextInt,
+					&psDevMemIntCtxExportInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntCtxExportOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxExport_exit;
+	}
+
+
+	/*
+	 * For cases where we need a cross process handle we actually allocate two.
+	 * 
+	 * The first one is a connection specific handle and it gets given the real
+	 * release function. This handle does *NOT* get returned to the caller. It's
+	 * purpose is to release any leaked resources when we either have a bad or
+	 * abnormally terminated client. If we didn't do this then the resource
+	 * wouldn't be freed until driver unload. If the resource is freed normally,
+	 * this handle can be looked up via the cross process handle and then
+	 * released accordingly.
+	 * 
+	 * The second one is a cross process handle and it gets given a noop release
+	 * function. This handle does get returned to the caller.
+	 */
+	psDevmemIntCtxExportOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&hDevMemIntCtxExportInt,
+							(IMG_VOID *) psDevMemIntCtxExportInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_SHARED
+							,(PFN_HANDLE_RELEASE)&DevmemIntCtxUnexport);
+	if (psDevmemIntCtxExportOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxExport_exit;
+	}
+
+	psDevmemIntCtxExportOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
+							&psDevmemIntCtxExportOUT->hDevMemIntCtxExport,
+							(IMG_VOID *) psDevMemIntCtxExportInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+							(PFN_HANDLE_RELEASE)&ReleaseDevMemIntCtxExport);
+	if (psDevmemIntCtxExportOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxExport_exit;
+	}
+
+
+
+DevmemIntCtxExport_exit:
+	if (psDevmemIntCtxExportOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemIntCtxExportOUT->hDevMemIntCtxExport)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+						(IMG_HANDLE) psDevmemIntCtxExportOUT->hDevMemIntCtxExport,
+						PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+		}
+
+		if (hDevMemIntCtxExportInt)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						hDevMemIntCtxExportInt,
+						PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psDevMemIntCtxExportInt = IMG_NULL;
+		}
+
+		if (psDevMemIntCtxExportInt)
+		{
+			DevmemIntCtxUnexport(psDevMemIntCtxExportInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxUnexport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXUNEXPORT *psDevmemIntCtxUnexportIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXUNEXPORT *psDevmemIntCtxUnexportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX_EXPORT * psDevMemIntCtxExportInt = IMG_NULL;
+	IMG_HANDLE hDevMemIntCtxExportInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+
+	psDevmemIntCtxUnexportOUT->eError =
+		PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+					(IMG_VOID **) &psDevMemIntCtxExportInt,
+					(IMG_HANDLE) psDevmemIntCtxUnexportIN->hDevMemIntCtxExport,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+	PVR_ASSERT(psDevmemIntCtxUnexportOUT->eError == PVRSRV_OK);
+
+	/*
+	 * Find the connection specific handle that represents the same data
+	 * as the cross process handle as releasing it will actually call the
+	 * data's real release function (see the function where the cross
+	 * process handle is allocated for more details).
+	 */
+	psDevmemIntCtxUnexportOUT->eError =
+		PVRSRVFindHandle(psConnection->psHandleBase,
+					&hDevMemIntCtxExportInt,
+					psDevMemIntCtxExportInt,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+	PVR_ASSERT(psDevmemIntCtxUnexportOUT->eError == PVRSRV_OK);
+
+	psDevmemIntCtxUnexportOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					hDevMemIntCtxExportInt,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+	PVR_ASSERT((psDevmemIntCtxUnexportOUT->eError == PVRSRV_OK) || (psDevmemIntCtxUnexportOUT->eError == PVRSRV_ERROR_RETRY));
+
+	psDevmemIntCtxUnexportOUT->eError =
+		PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+					(IMG_HANDLE) psDevmemIntCtxUnexportIN->hDevMemIntCtxExport,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+	if ((psDevmemIntCtxUnexportOUT->eError != PVRSRV_OK) && (psDevmemIntCtxUnexportOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DevmemIntCtxUnexport_exit;
+	}
+
+
+
+DevmemIntCtxUnexport_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxImport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXIMPORT *psDevmemIntCtxImportIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXIMPORT *psDevmemIntCtxImportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX_EXPORT * psDevMemIntCtxExportInt = IMG_NULL;
+	DEVMEMINT_CTX * psDevMemServerContextInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+
+
+	psDevmemIntCtxImportOUT->hDevMemServerContext = IMG_NULL;
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntCtxImportOUT->eError =
+						PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+											(IMG_VOID **) &psDevMemIntCtxExportInt,
+											psDevmemIntCtxImportIN->hDevMemIntCtxExport,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+					if(psDevmemIntCtxImportOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIntCtxImport_exit;
+					}
+				}
+
+
+	psDevmemIntCtxImportOUT->eError =
+		DevmemIntCtxImport(
+					psDevMemIntCtxExportInt,
+					&psDevMemServerContextInt,
+					&hPrivDataInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntCtxImportOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxImport_exit;
+	}
+
+
+	psDevmemIntCtxImportOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDevmemIntCtxImportOUT->hDevMemServerContext,
+							(IMG_VOID *) psDevMemServerContextInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+	if (psDevmemIntCtxImportOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxImport_exit;
+	}
+
+
+	psDevmemIntCtxImportOUT->eError = PVRSRVAllocSubHandle(psConnection->psHandleBase,
+							&psDevmemIntCtxImportOUT->hPrivData,
+							(IMG_VOID *) hPrivDataInt,
+							PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,psDevmemIntCtxImportOUT->hDevMemServerContext);
+	if (psDevmemIntCtxImportOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxImport_exit;
+	}
+
+
+
+
+DevmemIntCtxImport_exit:
+	if (psDevmemIntCtxImportOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemIntCtxImportOUT->hDevMemServerContext)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						(IMG_HANDLE) psDevmemIntCtxImportOUT->hDevMemServerContext,
+						PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psDevMemServerContextInt = IMG_NULL;
+		}
+
+
+		if (psDevMemServerContextInt)
+		{
+			DevmemIntCtxDestroy(psDevMemServerContextInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitCMMBridge(IMG_VOID);
+PVRSRV_ERROR DeinitCMMBridge(IMG_VOID);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTCTXEXPORT, PVRSRVBridgeDevmemIntCtxExport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTCTXUNEXPORT, PVRSRVBridgeDevmemIntCtxUnexport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTCTXIMPORT, PVRSRVBridgeDevmemIntCtxImport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+PVRSRV_ERROR DeinitCMMBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dc_bridge/common_dc_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/dc_bridge/common_dc_bridge.h
new file mode 100644
index 0000000..1bc77f0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dc_bridge/common_dc_bridge.h
@@ -0,0 +1,579 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for dc
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for dc
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DC_BRIDGE_H
+#define COMMON_DC_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_surface.h"
+#include "dc_external.h"
+#include "dc_common.h"
+
+
+#define PVRSRV_BRIDGE_DC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DC_DCDEVICESQUERYCOUNT			PVRSRV_BRIDGE_DC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DC_DCDEVICESENUMERATE			PVRSRV_BRIDGE_DC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DC_DCDEVICEACQUIRE			PVRSRV_BRIDGE_DC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DC_DCDEVICERELEASE			PVRSRV_BRIDGE_DC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DC_DCGETINFO			PVRSRV_BRIDGE_DC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DC_DCPANELQUERYCOUNT			PVRSRV_BRIDGE_DC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_DC_DCPANELQUERY			PVRSRV_BRIDGE_DC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_DC_DCFORMATQUERY			PVRSRV_BRIDGE_DC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_DC_DCDIMQUERY			PVRSRV_BRIDGE_DC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_DC_DCSETBLANK			PVRSRV_BRIDGE_DC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_DC_DCSETVSYNCREPORTING			PVRSRV_BRIDGE_DC_CMD_FIRST+10
+#define PVRSRV_BRIDGE_DC_DCLASTVSYNCQUERY			PVRSRV_BRIDGE_DC_CMD_FIRST+11
+#define PVRSRV_BRIDGE_DC_DCSYSTEMBUFFERACQUIRE			PVRSRV_BRIDGE_DC_CMD_FIRST+12
+#define PVRSRV_BRIDGE_DC_DCSYSTEMBUFFERRELEASE			PVRSRV_BRIDGE_DC_CMD_FIRST+13
+#define PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTCREATE			PVRSRV_BRIDGE_DC_CMD_FIRST+14
+#define PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTCONFIGURECHECK			PVRSRV_BRIDGE_DC_CMD_FIRST+15
+#define PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTCONFIGURE			PVRSRV_BRIDGE_DC_CMD_FIRST+16
+#define PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTDESTROY			PVRSRV_BRIDGE_DC_CMD_FIRST+17
+#define PVRSRV_BRIDGE_DC_DCBUFFERALLOC			PVRSRV_BRIDGE_DC_CMD_FIRST+18
+#define PVRSRV_BRIDGE_DC_DCBUFFERIMPORT			PVRSRV_BRIDGE_DC_CMD_FIRST+19
+#define PVRSRV_BRIDGE_DC_DCBUFFERFREE			PVRSRV_BRIDGE_DC_CMD_FIRST+20
+#define PVRSRV_BRIDGE_DC_DCBUFFERUNIMPORT			PVRSRV_BRIDGE_DC_CMD_FIRST+21
+#define PVRSRV_BRIDGE_DC_DCBUFFERPIN			PVRSRV_BRIDGE_DC_CMD_FIRST+22
+#define PVRSRV_BRIDGE_DC_DCBUFFERUNPIN			PVRSRV_BRIDGE_DC_CMD_FIRST+23
+#define PVRSRV_BRIDGE_DC_DCBUFFERACQUIRE			PVRSRV_BRIDGE_DC_CMD_FIRST+24
+#define PVRSRV_BRIDGE_DC_DCBUFFERRELEASE			PVRSRV_BRIDGE_DC_CMD_FIRST+25
+#define PVRSRV_BRIDGE_DC_CMD_LAST			(PVRSRV_BRIDGE_DC_CMD_FIRST+25)
+
+
+/*******************************************
+            DCDevicesQueryCount          
+ *******************************************/
+
+/* Bridge in structure for DCDevicesQueryCount */
+typedef struct PVRSRV_BRIDGE_IN_DCDEVICESQUERYCOUNT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDEVICESQUERYCOUNT;
+
+
+/* Bridge out structure for DCDevicesQueryCount */
+typedef struct PVRSRV_BRIDGE_OUT_DCDEVICESQUERYCOUNT_TAG
+{
+	IMG_UINT32 ui32DeviceCount;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDEVICESQUERYCOUNT;
+
+/*******************************************
+            DCDevicesEnumerate          
+ *******************************************/
+
+/* Bridge in structure for DCDevicesEnumerate */
+typedef struct PVRSRV_BRIDGE_IN_DCDEVICESENUMERATE_TAG
+{
+	IMG_UINT32 ui32DeviceArraySize;
+	/* Output pointer pui32DeviceIndex is also an implied input */
+	IMG_UINT32 * pui32DeviceIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDEVICESENUMERATE;
+
+
+/* Bridge out structure for DCDevicesEnumerate */
+typedef struct PVRSRV_BRIDGE_OUT_DCDEVICESENUMERATE_TAG
+{
+	IMG_UINT32 ui32DeviceCount;
+	IMG_UINT32 * pui32DeviceIndex;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDEVICESENUMERATE;
+
+/*******************************************
+            DCDeviceAcquire          
+ *******************************************/
+
+/* Bridge in structure for DCDeviceAcquire */
+typedef struct PVRSRV_BRIDGE_IN_DCDEVICEACQUIRE_TAG
+{
+	IMG_UINT32 ui32DeviceIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDEVICEACQUIRE;
+
+
+/* Bridge out structure for DCDeviceAcquire */
+typedef struct PVRSRV_BRIDGE_OUT_DCDEVICEACQUIRE_TAG
+{
+	IMG_HANDLE hDevice;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDEVICEACQUIRE;
+
+/*******************************************
+            DCDeviceRelease          
+ *******************************************/
+
+/* Bridge in structure for DCDeviceRelease */
+typedef struct PVRSRV_BRIDGE_IN_DCDEVICERELEASE_TAG
+{
+	IMG_HANDLE hDevice;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDEVICERELEASE;
+
+
+/* Bridge out structure for DCDeviceRelease */
+typedef struct PVRSRV_BRIDGE_OUT_DCDEVICERELEASE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDEVICERELEASE;
+
+/*******************************************
+            DCGetInfo          
+ *******************************************/
+
+/* Bridge in structure for DCGetInfo */
+typedef struct PVRSRV_BRIDGE_IN_DCGETINFO_TAG
+{
+	IMG_HANDLE hDevice;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCGETINFO;
+
+
+/* Bridge out structure for DCGetInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DCGETINFO_TAG
+{
+	DC_DISPLAY_INFO sDisplayInfo;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCGETINFO;
+
+/*******************************************
+            DCPanelQueryCount          
+ *******************************************/
+
+/* Bridge in structure for DCPanelQueryCount */
+typedef struct PVRSRV_BRIDGE_IN_DCPANELQUERYCOUNT_TAG
+{
+	IMG_HANDLE hDevice;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCPANELQUERYCOUNT;
+
+
+/* Bridge out structure for DCPanelQueryCount */
+typedef struct PVRSRV_BRIDGE_OUT_DCPANELQUERYCOUNT_TAG
+{
+	IMG_UINT32 ui32NumPanels;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCPANELQUERYCOUNT;
+
+/*******************************************
+            DCPanelQuery          
+ *******************************************/
+
+/* Bridge in structure for DCPanelQuery */
+typedef struct PVRSRV_BRIDGE_IN_DCPANELQUERY_TAG
+{
+	IMG_HANDLE hDevice;
+	IMG_UINT32 ui32PanelsArraySize;
+	/* Output pointer psPanelInfo is also an implied input */
+	PVRSRV_PANEL_INFO * psPanelInfo;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCPANELQUERY;
+
+
+/* Bridge out structure for DCPanelQuery */
+typedef struct PVRSRV_BRIDGE_OUT_DCPANELQUERY_TAG
+{
+	IMG_UINT32 ui32NumPanels;
+	PVRSRV_PANEL_INFO * psPanelInfo;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCPANELQUERY;
+
+/*******************************************
+            DCFormatQuery          
+ *******************************************/
+
+/* Bridge in structure for DCFormatQuery */
+typedef struct PVRSRV_BRIDGE_IN_DCFORMATQUERY_TAG
+{
+	IMG_HANDLE hDevice;
+	IMG_UINT32 ui32NumFormats;
+	PVRSRV_SURFACE_FORMAT * psFormat;
+	/* Output pointer pui32Supported is also an implied input */
+	IMG_UINT32 * pui32Supported;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCFORMATQUERY;
+
+
+/* Bridge out structure for DCFormatQuery */
+typedef struct PVRSRV_BRIDGE_OUT_DCFORMATQUERY_TAG
+{
+	IMG_UINT32 * pui32Supported;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCFORMATQUERY;
+
+/*******************************************
+            DCDimQuery          
+ *******************************************/
+
+/* Bridge in structure for DCDimQuery */
+typedef struct PVRSRV_BRIDGE_IN_DCDIMQUERY_TAG
+{
+	IMG_HANDLE hDevice;
+	IMG_UINT32 ui32NumDims;
+	PVRSRV_SURFACE_DIMS * psDim;
+	/* Output pointer pui32Supported is also an implied input */
+	IMG_UINT32 * pui32Supported;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDIMQUERY;
+
+
+/* Bridge out structure for DCDimQuery */
+typedef struct PVRSRV_BRIDGE_OUT_DCDIMQUERY_TAG
+{
+	IMG_UINT32 * pui32Supported;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDIMQUERY;
+
+/*******************************************
+            DCSetBlank          
+ *******************************************/
+
+/* Bridge in structure for DCSetBlank */
+typedef struct PVRSRV_BRIDGE_IN_DCSETBLANK_TAG
+{
+	IMG_HANDLE hDevice;
+	IMG_BOOL bEnabled;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCSETBLANK;
+
+
+/* Bridge out structure for DCSetBlank */
+typedef struct PVRSRV_BRIDGE_OUT_DCSETBLANK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCSETBLANK;
+
+/*******************************************
+            DCSetVSyncReporting          
+ *******************************************/
+
+/* Bridge in structure for DCSetVSyncReporting */
+typedef struct PVRSRV_BRIDGE_IN_DCSETVSYNCREPORTING_TAG
+{
+	IMG_HANDLE hDevice;
+	IMG_BOOL bEnabled;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCSETVSYNCREPORTING;
+
+
+/* Bridge out structure for DCSetVSyncReporting */
+typedef struct PVRSRV_BRIDGE_OUT_DCSETVSYNCREPORTING_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCSETVSYNCREPORTING;
+
+/*******************************************
+            DCLastVSyncQuery          
+ *******************************************/
+
+/* Bridge in structure for DCLastVSyncQuery */
+typedef struct PVRSRV_BRIDGE_IN_DCLASTVSYNCQUERY_TAG
+{
+	IMG_HANDLE hDevice;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCLASTVSYNCQUERY;
+
+
+/* Bridge out structure for DCLastVSyncQuery */
+typedef struct PVRSRV_BRIDGE_OUT_DCLASTVSYNCQUERY_TAG
+{
+	IMG_INT64 i64Timestamp;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCLASTVSYNCQUERY;
+
+/*******************************************
+            DCSystemBufferAcquire          
+ *******************************************/
+
+/* Bridge in structure for DCSystemBufferAcquire */
+typedef struct PVRSRV_BRIDGE_IN_DCSYSTEMBUFFERACQUIRE_TAG
+{
+	IMG_HANDLE hDevice;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCSYSTEMBUFFERACQUIRE;
+
+
+/* Bridge out structure for DCSystemBufferAcquire */
+typedef struct PVRSRV_BRIDGE_OUT_DCSYSTEMBUFFERACQUIRE_TAG
+{
+	IMG_UINT32 ui32Stride;
+	IMG_HANDLE hBuffer;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCSYSTEMBUFFERACQUIRE;
+
+/*******************************************
+            DCSystemBufferRelease          
+ *******************************************/
+
+/* Bridge in structure for DCSystemBufferRelease */
+typedef struct PVRSRV_BRIDGE_IN_DCSYSTEMBUFFERRELEASE_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCSYSTEMBUFFERRELEASE;
+
+
+/* Bridge out structure for DCSystemBufferRelease */
+typedef struct PVRSRV_BRIDGE_OUT_DCSYSTEMBUFFERRELEASE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCSYSTEMBUFFERRELEASE;
+
+/*******************************************
+            DCDisplayContextCreate          
+ *******************************************/
+
+/* Bridge in structure for DCDisplayContextCreate */
+typedef struct PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCREATE_TAG
+{
+	IMG_HANDLE hDevice;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCREATE;
+
+
+/* Bridge out structure for DCDisplayContextCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCREATE_TAG
+{
+	IMG_HANDLE hDisplayContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCREATE;
+
+/*******************************************
+            DCDisplayContextConfigureCheck          
+ *******************************************/
+
+/* Bridge in structure for DCDisplayContextConfigureCheck */
+typedef struct PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCONFIGURECHECK_TAG
+{
+	IMG_HANDLE hDisplayContext;
+	IMG_UINT32 ui32PipeCount;
+	PVRSRV_SURFACE_CONFIG_INFO * psSurfInfo;
+	IMG_HANDLE * phBuffers;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCONFIGURECHECK;
+
+
+/* Bridge out structure for DCDisplayContextConfigureCheck */
+typedef struct PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCONFIGURECHECK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCONFIGURECHECK;
+
+/*******************************************
+            DCDisplayContextConfigure          
+ *******************************************/
+
+/* Bridge in structure for DCDisplayContextConfigure */
+typedef struct PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCONFIGURE_TAG
+{
+	IMG_HANDLE hDisplayContext;
+	IMG_UINT32 ui32PipeCount;
+	PVRSRV_SURFACE_CONFIG_INFO * psSurfInfo;
+	IMG_HANDLE * phBuffers;
+	IMG_UINT32 ui32SyncCount;
+	IMG_HANDLE * phSync;
+	IMG_BOOL * pbUpdate;
+	IMG_UINT32 ui32DisplayPeriod;
+	IMG_UINT32 ui32MaxDepth;
+	IMG_INT32 i32AcquireFd;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCONFIGURE;
+
+
+/* Bridge out structure for DCDisplayContextConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCONFIGURE_TAG
+{
+	IMG_INT32 i32ReleaseFd;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCONFIGURE;
+
+/*******************************************
+            DCDisplayContextDestroy          
+ *******************************************/
+
+/* Bridge in structure for DCDisplayContextDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTDESTROY_TAG
+{
+	IMG_HANDLE hDisplayContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTDESTROY;
+
+
+/* Bridge out structure for DCDisplayContextDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTDESTROY;
+
+/*******************************************
+            DCBufferAlloc          
+ *******************************************/
+
+/* Bridge in structure for DCBufferAlloc */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERALLOC_TAG
+{
+	IMG_HANDLE hDisplayContext;
+	DC_BUFFER_CREATE_INFO sSurfInfo;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERALLOC;
+
+
+/* Bridge out structure for DCBufferAlloc */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERALLOC_TAG
+{
+	IMG_UINT32 ui32Stride;
+	IMG_HANDLE hBuffer;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERALLOC;
+
+/*******************************************
+            DCBufferImport          
+ *******************************************/
+
+/* Bridge in structure for DCBufferImport */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERIMPORT_TAG
+{
+	IMG_HANDLE hDisplayContext;
+	IMG_UINT32 ui32NumPlanes;
+	IMG_HANDLE * phImport;
+	DC_BUFFER_IMPORT_INFO sSurfAttrib;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERIMPORT;
+
+
+/* Bridge out structure for DCBufferImport */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERIMPORT_TAG
+{
+	IMG_HANDLE hBuffer;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERIMPORT;
+
+/*******************************************
+            DCBufferFree          
+ *******************************************/
+
+/* Bridge in structure for DCBufferFree */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERFREE_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERFREE;
+
+
+/* Bridge out structure for DCBufferFree */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERFREE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERFREE;
+
+/*******************************************
+            DCBufferUnimport          
+ *******************************************/
+
+/* Bridge in structure for DCBufferUnimport */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERUNIMPORT_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERUNIMPORT;
+
+
+/* Bridge out structure for DCBufferUnimport */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERUNIMPORT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERUNIMPORT;
+
+/*******************************************
+            DCBufferPin          
+ *******************************************/
+
+/* Bridge in structure for DCBufferPin */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERPIN_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERPIN;
+
+
+/* Bridge out structure for DCBufferPin */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERPIN_TAG
+{
+	IMG_HANDLE hPinHandle;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERPIN;
+
+/*******************************************
+            DCBufferUnpin          
+ *******************************************/
+
+/* Bridge in structure for DCBufferUnpin */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERUNPIN_TAG
+{
+	IMG_HANDLE hPinHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERUNPIN;
+
+
+/* Bridge out structure for DCBufferUnpin */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERUNPIN_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERUNPIN;
+
+/*******************************************
+            DCBufferAcquire          
+ *******************************************/
+
+/* Bridge in structure for DCBufferAcquire */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERACQUIRE_TAG
+{
+	IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERACQUIRE;
+
+
+/* Bridge out structure for DCBufferAcquire */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERACQUIRE_TAG
+{
+	IMG_HANDLE hExtMem;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERACQUIRE;
+
+/*******************************************
+            DCBufferRelease          
+ *******************************************/
+
+/* Bridge in structure for DCBufferRelease */
+typedef struct PVRSRV_BRIDGE_IN_DCBUFFERRELEASE_TAG
+{
+	IMG_HANDLE hExtMem;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DCBUFFERRELEASE;
+
+
+/* Bridge out structure for DCBufferRelease */
+typedef struct PVRSRV_BRIDGE_OUT_DCBUFFERRELEASE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DCBUFFERRELEASE;
+
+#endif /* COMMON_DC_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dc_bridge/server_dc_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dc_bridge/server_dc_bridge.c
new file mode 100644
index 0000000..4bbbf48
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dc_bridge/server_dc_bridge.c
@@ -0,0 +1,1798 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for dc
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for dc
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "dc_server.h"
+
+
+#include "common_dc_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDCDevicesQueryCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDEVICESQUERYCOUNT *psDCDevicesQueryCountIN,
+					  PVRSRV_BRIDGE_OUT_DCDEVICESQUERYCOUNT *psDCDevicesQueryCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDCDevicesQueryCountIN);
+
+
+
+
+
+
+	psDCDevicesQueryCountOUT->eError =
+		DCDevicesQueryCount(
+					&psDCDevicesQueryCountOUT->ui32DeviceCount);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDevicesEnumerate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDEVICESENUMERATE *psDCDevicesEnumerateIN,
+					  PVRSRV_BRIDGE_OUT_DCDEVICESENUMERATE *psDCDevicesEnumerateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_UINT32 *pui32DeviceIndexInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	psDCDevicesEnumerateOUT->pui32DeviceIndex = psDCDevicesEnumerateIN->pui32DeviceIndex;
+
+
+	if (psDCDevicesEnumerateIN->ui32DeviceArraySize != 0)
+	{
+		pui32DeviceIndexInt = OSAllocMem(psDCDevicesEnumerateIN->ui32DeviceArraySize * sizeof(IMG_UINT32));
+		if (!pui32DeviceIndexInt)
+		{
+			psDCDevicesEnumerateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDevicesEnumerate_exit;
+		}
+	}
+
+
+
+
+	psDCDevicesEnumerateOUT->eError =
+		DCDevicesEnumerate(
+					psDCDevicesEnumerateIN->ui32DeviceArraySize,
+					&psDCDevicesEnumerateOUT->ui32DeviceCount,
+					pui32DeviceIndexInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psDCDevicesEnumerateOUT->pui32DeviceIndex, (psDCDevicesEnumerateOUT->ui32DeviceCount * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psDCDevicesEnumerateOUT->pui32DeviceIndex, pui32DeviceIndexInt,
+		(psDCDevicesEnumerateOUT->ui32DeviceCount * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psDCDevicesEnumerateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto DCDevicesEnumerate_exit;
+	}
+
+
+DCDevicesEnumerate_exit:
+	if (pui32DeviceIndexInt)
+		OSFreeMem(pui32DeviceIndexInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDeviceAcquire(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDEVICEACQUIRE *psDCDeviceAcquireIN,
+					  PVRSRV_BRIDGE_OUT_DCDEVICEACQUIRE *psDCDeviceAcquireOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+
+
+
+
+
+
+
+	psDCDeviceAcquireOUT->eError =
+		DCDeviceAcquire(
+					psDCDeviceAcquireIN->ui32DeviceIndex,
+					&psDeviceInt);
+	/* Exit early if bridged call fails */
+	if(psDCDeviceAcquireOUT->eError != PVRSRV_OK)
+	{
+		goto DCDeviceAcquire_exit;
+	}
+
+
+	psDCDeviceAcquireOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCDeviceAcquireOUT->hDevice,
+							(IMG_VOID *) psDeviceInt,
+							PVRSRV_HANDLE_TYPE_DC_DEVICE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCDeviceRelease);
+	if (psDCDeviceAcquireOUT->eError != PVRSRV_OK)
+	{
+		goto DCDeviceAcquire_exit;
+	}
+
+
+
+
+DCDeviceAcquire_exit:
+	if (psDCDeviceAcquireOUT->eError != PVRSRV_OK)
+	{
+		if (psDeviceInt)
+		{
+			DCDeviceRelease(psDeviceInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDeviceRelease(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDEVICERELEASE *psDCDeviceReleaseIN,
+					  PVRSRV_BRIDGE_OUT_DCDEVICERELEASE *psDCDeviceReleaseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCDeviceReleaseOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCDeviceReleaseIN->hDevice,
+					PVRSRV_HANDLE_TYPE_DC_DEVICE);
+	if ((psDCDeviceReleaseOUT->eError != PVRSRV_OK) && (psDCDeviceReleaseOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCDeviceRelease_exit;
+	}
+
+
+
+DCDeviceRelease_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCGetInfo(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCGETINFO *psDCGetInfoIN,
+					  PVRSRV_BRIDGE_OUT_DCGETINFO *psDCGetInfoOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCGetInfoOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCGetInfoIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCGetInfoOUT->eError != PVRSRV_OK)
+					{
+						goto DCGetInfo_exit;
+					}
+				}
+
+
+	psDCGetInfoOUT->eError =
+		DCGetInfo(
+					psDeviceInt,
+					&psDCGetInfoOUT->sDisplayInfo);
+
+
+
+
+DCGetInfo_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCPanelQueryCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCPANELQUERYCOUNT *psDCPanelQueryCountIN,
+					  PVRSRV_BRIDGE_OUT_DCPANELQUERYCOUNT *psDCPanelQueryCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCPanelQueryCountOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCPanelQueryCountIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCPanelQueryCountOUT->eError != PVRSRV_OK)
+					{
+						goto DCPanelQueryCount_exit;
+					}
+				}
+
+
+	psDCPanelQueryCountOUT->eError =
+		DCPanelQueryCount(
+					psDeviceInt,
+					&psDCPanelQueryCountOUT->ui32NumPanels);
+
+
+
+
+DCPanelQueryCount_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCPanelQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCPANELQUERY *psDCPanelQueryIN,
+					  PVRSRV_BRIDGE_OUT_DCPANELQUERY *psDCPanelQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+	PVRSRV_PANEL_INFO *psPanelInfoInt = IMG_NULL;
+
+
+	psDCPanelQueryOUT->psPanelInfo = psDCPanelQueryIN->psPanelInfo;
+
+
+	if (psDCPanelQueryIN->ui32PanelsArraySize != 0)
+	{
+		psPanelInfoInt = OSAllocMem(psDCPanelQueryIN->ui32PanelsArraySize * sizeof(PVRSRV_PANEL_INFO));
+		if (!psPanelInfoInt)
+		{
+			psDCPanelQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCPanelQuery_exit;
+		}
+	}
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCPanelQueryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCPanelQueryIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCPanelQueryOUT->eError != PVRSRV_OK)
+					{
+						goto DCPanelQuery_exit;
+					}
+				}
+
+
+	psDCPanelQueryOUT->eError =
+		DCPanelQuery(
+					psDeviceInt,
+					psDCPanelQueryIN->ui32PanelsArraySize,
+					&psDCPanelQueryOUT->ui32NumPanels,
+					psPanelInfoInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psDCPanelQueryOUT->psPanelInfo, (psDCPanelQueryOUT->ui32NumPanels * sizeof(PVRSRV_PANEL_INFO))) 
+		|| (OSCopyToUser(NULL, psDCPanelQueryOUT->psPanelInfo, psPanelInfoInt,
+		(psDCPanelQueryOUT->ui32NumPanels * sizeof(PVRSRV_PANEL_INFO))) != PVRSRV_OK) )
+	{
+		psDCPanelQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto DCPanelQuery_exit;
+	}
+
+
+DCPanelQuery_exit:
+	if (psPanelInfoInt)
+		OSFreeMem(psPanelInfoInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCFormatQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCFORMATQUERY *psDCFormatQueryIN,
+					  PVRSRV_BRIDGE_OUT_DCFORMATQUERY *psDCFormatQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+	PVRSRV_SURFACE_FORMAT *psFormatInt = IMG_NULL;
+	IMG_UINT32 *pui32SupportedInt = IMG_NULL;
+
+
+	psDCFormatQueryOUT->pui32Supported = psDCFormatQueryIN->pui32Supported;
+
+
+	if (psDCFormatQueryIN->ui32NumFormats != 0)
+	{
+		psFormatInt = OSAllocMem(psDCFormatQueryIN->ui32NumFormats * sizeof(PVRSRV_SURFACE_FORMAT));
+		if (!psFormatInt)
+		{
+			psDCFormatQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCFormatQuery_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCFormatQueryIN->psFormat, psDCFormatQueryIN->ui32NumFormats * sizeof(PVRSRV_SURFACE_FORMAT))
+				|| (OSCopyFromUser(NULL, psFormatInt, psDCFormatQueryIN->psFormat,
+				psDCFormatQueryIN->ui32NumFormats * sizeof(PVRSRV_SURFACE_FORMAT)) != PVRSRV_OK) )
+			{
+				psDCFormatQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCFormatQuery_exit;
+			}
+	if (psDCFormatQueryIN->ui32NumFormats != 0)
+	{
+		pui32SupportedInt = OSAllocMem(psDCFormatQueryIN->ui32NumFormats * sizeof(IMG_UINT32));
+		if (!pui32SupportedInt)
+		{
+			psDCFormatQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCFormatQuery_exit;
+		}
+	}
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCFormatQueryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCFormatQueryIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCFormatQueryOUT->eError != PVRSRV_OK)
+					{
+						goto DCFormatQuery_exit;
+					}
+				}
+
+
+	psDCFormatQueryOUT->eError =
+		DCFormatQuery(
+					psDeviceInt,
+					psDCFormatQueryIN->ui32NumFormats,
+					psFormatInt,
+					pui32SupportedInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psDCFormatQueryOUT->pui32Supported, (psDCFormatQueryIN->ui32NumFormats * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psDCFormatQueryOUT->pui32Supported, pui32SupportedInt,
+		(psDCFormatQueryIN->ui32NumFormats * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psDCFormatQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto DCFormatQuery_exit;
+	}
+
+
+DCFormatQuery_exit:
+	if (psFormatInt)
+		OSFreeMem(psFormatInt);
+	if (pui32SupportedInt)
+		OSFreeMem(pui32SupportedInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDimQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDIMQUERY *psDCDimQueryIN,
+					  PVRSRV_BRIDGE_OUT_DCDIMQUERY *psDCDimQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+	PVRSRV_SURFACE_DIMS *psDimInt = IMG_NULL;
+	IMG_UINT32 *pui32SupportedInt = IMG_NULL;
+
+
+	psDCDimQueryOUT->pui32Supported = psDCDimQueryIN->pui32Supported;
+
+
+	if (psDCDimQueryIN->ui32NumDims != 0)
+	{
+		psDimInt = OSAllocMem(psDCDimQueryIN->ui32NumDims * sizeof(PVRSRV_SURFACE_DIMS));
+		if (!psDimInt)
+		{
+			psDCDimQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDimQuery_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDimQueryIN->psDim, psDCDimQueryIN->ui32NumDims * sizeof(PVRSRV_SURFACE_DIMS))
+				|| (OSCopyFromUser(NULL, psDimInt, psDCDimQueryIN->psDim,
+				psDCDimQueryIN->ui32NumDims * sizeof(PVRSRV_SURFACE_DIMS)) != PVRSRV_OK) )
+			{
+				psDCDimQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDimQuery_exit;
+			}
+	if (psDCDimQueryIN->ui32NumDims != 0)
+	{
+		pui32SupportedInt = OSAllocMem(psDCDimQueryIN->ui32NumDims * sizeof(IMG_UINT32));
+		if (!pui32SupportedInt)
+		{
+			psDCDimQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDimQuery_exit;
+		}
+	}
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCDimQueryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCDimQueryIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCDimQueryOUT->eError != PVRSRV_OK)
+					{
+						goto DCDimQuery_exit;
+					}
+				}
+
+
+	psDCDimQueryOUT->eError =
+		DCDimQuery(
+					psDeviceInt,
+					psDCDimQueryIN->ui32NumDims,
+					psDimInt,
+					pui32SupportedInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psDCDimQueryOUT->pui32Supported, (psDCDimQueryIN->ui32NumDims * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psDCDimQueryOUT->pui32Supported, pui32SupportedInt,
+		(psDCDimQueryIN->ui32NumDims * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psDCDimQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto DCDimQuery_exit;
+	}
+
+
+DCDimQuery_exit:
+	if (psDimInt)
+		OSFreeMem(psDimInt);
+	if (pui32SupportedInt)
+		OSFreeMem(pui32SupportedInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCSetBlank(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCSETBLANK *psDCSetBlankIN,
+					  PVRSRV_BRIDGE_OUT_DCSETBLANK *psDCSetBlankOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCSetBlankOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCSetBlankIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCSetBlankOUT->eError != PVRSRV_OK)
+					{
+						goto DCSetBlank_exit;
+					}
+				}
+
+
+	psDCSetBlankOUT->eError =
+		DCSetBlank(
+					psDeviceInt,
+					psDCSetBlankIN->bEnabled);
+
+
+
+
+DCSetBlank_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCSetVSyncReporting(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCSETVSYNCREPORTING *psDCSetVSyncReportingIN,
+					  PVRSRV_BRIDGE_OUT_DCSETVSYNCREPORTING *psDCSetVSyncReportingOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCSetVSyncReportingOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCSetVSyncReportingIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCSetVSyncReportingOUT->eError != PVRSRV_OK)
+					{
+						goto DCSetVSyncReporting_exit;
+					}
+				}
+
+
+	psDCSetVSyncReportingOUT->eError =
+		DCSetVSyncReporting(
+					psDeviceInt,
+					psDCSetVSyncReportingIN->bEnabled);
+
+
+
+
+DCSetVSyncReporting_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCLastVSyncQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCLASTVSYNCQUERY *psDCLastVSyncQueryIN,
+					  PVRSRV_BRIDGE_OUT_DCLASTVSYNCQUERY *psDCLastVSyncQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCLastVSyncQueryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCLastVSyncQueryIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCLastVSyncQueryOUT->eError != PVRSRV_OK)
+					{
+						goto DCLastVSyncQuery_exit;
+					}
+				}
+
+
+	psDCLastVSyncQueryOUT->eError =
+		DCLastVSyncQuery(
+					psDeviceInt,
+					&psDCLastVSyncQueryOUT->i64Timestamp);
+
+
+
+
+DCLastVSyncQuery_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCSystemBufferAcquire(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCSYSTEMBUFFERACQUIRE *psDCSystemBufferAcquireIN,
+					  PVRSRV_BRIDGE_OUT_DCSYSTEMBUFFERACQUIRE *psDCSystemBufferAcquireOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+	DC_BUFFER * psBufferInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCSystemBufferAcquireOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCSystemBufferAcquireIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCSystemBufferAcquireOUT->eError != PVRSRV_OK)
+					{
+						goto DCSystemBufferAcquire_exit;
+					}
+				}
+
+
+	psDCSystemBufferAcquireOUT->eError =
+		DCSystemBufferAcquire(
+					psDeviceInt,
+					&psDCSystemBufferAcquireOUT->ui32Stride,
+					&psBufferInt);
+	/* Exit early if bridged call fails */
+	if(psDCSystemBufferAcquireOUT->eError != PVRSRV_OK)
+	{
+		goto DCSystemBufferAcquire_exit;
+	}
+
+
+	psDCSystemBufferAcquireOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCSystemBufferAcquireOUT->hBuffer,
+							(IMG_VOID *) psBufferInt,
+							PVRSRV_HANDLE_TYPE_DC_BUFFER,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCSystemBufferRelease);
+	if (psDCSystemBufferAcquireOUT->eError != PVRSRV_OK)
+	{
+		goto DCSystemBufferAcquire_exit;
+	}
+
+
+
+
+DCSystemBufferAcquire_exit:
+	if (psDCSystemBufferAcquireOUT->eError != PVRSRV_OK)
+	{
+		if (psBufferInt)
+		{
+			DCSystemBufferRelease(psBufferInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCSystemBufferRelease(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCSYSTEMBUFFERRELEASE *psDCSystemBufferReleaseIN,
+					  PVRSRV_BRIDGE_OUT_DCSYSTEMBUFFERRELEASE *psDCSystemBufferReleaseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCSystemBufferReleaseOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCSystemBufferReleaseIN->hBuffer,
+					PVRSRV_HANDLE_TYPE_DC_BUFFER);
+	if ((psDCSystemBufferReleaseOUT->eError != PVRSRV_OK) && (psDCSystemBufferReleaseOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCSystemBufferRelease_exit;
+	}
+
+
+
+DCSystemBufferRelease_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDisplayContextCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCREATE *psDCDisplayContextCreateIN,
+					  PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCREATE *psDCDisplayContextCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DEVICE * psDeviceInt = IMG_NULL;
+	DC_DISPLAY_CONTEXT * psDisplayContextInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCDisplayContextCreateOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDeviceInt,
+											psDCDisplayContextCreateIN->hDevice,
+											PVRSRV_HANDLE_TYPE_DC_DEVICE);
+					if(psDCDisplayContextCreateOUT->eError != PVRSRV_OK)
+					{
+						goto DCDisplayContextCreate_exit;
+					}
+				}
+
+
+	psDCDisplayContextCreateOUT->eError =
+		DCDisplayContextCreate(
+					psDeviceInt,
+					&psDisplayContextInt);
+	/* Exit early if bridged call fails */
+	if(psDCDisplayContextCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DCDisplayContextCreate_exit;
+	}
+
+
+	psDCDisplayContextCreateOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCDisplayContextCreateOUT->hDisplayContext,
+							(IMG_VOID *) psDisplayContextInt,
+							PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCDisplayContextDestroy);
+	if (psDCDisplayContextCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DCDisplayContextCreate_exit;
+	}
+
+
+
+
+DCDisplayContextCreate_exit:
+	if (psDCDisplayContextCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psDisplayContextInt)
+		{
+			DCDisplayContextDestroy(psDisplayContextInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDisplayContextConfigureCheck(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCONFIGURECHECK *psDCDisplayContextConfigureCheckIN,
+					  PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCONFIGURECHECK *psDCDisplayContextConfigureCheckOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DISPLAY_CONTEXT * psDisplayContextInt = IMG_NULL;
+	PVRSRV_SURFACE_CONFIG_INFO *psSurfInfoInt = IMG_NULL;
+	DC_BUFFER * *psBuffersInt = IMG_NULL;
+	IMG_HANDLE *hBuffersInt2 = IMG_NULL;
+
+
+
+
+	if (psDCDisplayContextConfigureCheckIN->ui32PipeCount != 0)
+	{
+		psSurfInfoInt = OSAllocMem(psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(PVRSRV_SURFACE_CONFIG_INFO));
+		if (!psSurfInfoInt)
+		{
+			psDCDisplayContextConfigureCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigureCheck_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDisplayContextConfigureCheckIN->psSurfInfo, psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(PVRSRV_SURFACE_CONFIG_INFO))
+				|| (OSCopyFromUser(NULL, psSurfInfoInt, psDCDisplayContextConfigureCheckIN->psSurfInfo,
+				psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(PVRSRV_SURFACE_CONFIG_INFO)) != PVRSRV_OK) )
+			{
+				psDCDisplayContextConfigureCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDisplayContextConfigureCheck_exit;
+			}
+	if (psDCDisplayContextConfigureCheckIN->ui32PipeCount != 0)
+	{
+		psBuffersInt = OSAllocMem(psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(DC_BUFFER *));
+		if (!psBuffersInt)
+		{
+			psDCDisplayContextConfigureCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigureCheck_exit;
+		}
+		hBuffersInt2 = OSAllocMem(psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(IMG_HANDLE));
+		if (!hBuffersInt2)
+		{
+			psDCDisplayContextConfigureCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigureCheck_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDisplayContextConfigureCheckIN->phBuffers, psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hBuffersInt2, psDCDisplayContextConfigureCheckIN->phBuffers,
+				psDCDisplayContextConfigureCheckIN->ui32PipeCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psDCDisplayContextConfigureCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDisplayContextConfigureCheck_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCDisplayContextConfigureCheckOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDisplayContextInt,
+											psDCDisplayContextConfigureCheckIN->hDisplayContext,
+											PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT);
+					if(psDCDisplayContextConfigureCheckOUT->eError != PVRSRV_OK)
+					{
+						goto DCDisplayContextConfigureCheck_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psDCDisplayContextConfigureCheckIN->ui32PipeCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psDCDisplayContextConfigureCheckOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psBuffersInt[i],
+											hBuffersInt2[i],
+											PVRSRV_HANDLE_TYPE_DC_BUFFER);
+					if(psDCDisplayContextConfigureCheckOUT->eError != PVRSRV_OK)
+					{
+						goto DCDisplayContextConfigureCheck_exit;
+					}
+				}
+
+		}
+	}
+
+	psDCDisplayContextConfigureCheckOUT->eError =
+		DCDisplayContextConfigureCheck(
+					psDisplayContextInt,
+					psDCDisplayContextConfigureCheckIN->ui32PipeCount,
+					psSurfInfoInt,
+					psBuffersInt);
+
+
+
+
+DCDisplayContextConfigureCheck_exit:
+	if (psSurfInfoInt)
+		OSFreeMem(psSurfInfoInt);
+	if (psBuffersInt)
+		OSFreeMem(psBuffersInt);
+	if (hBuffersInt2)
+		OSFreeMem(hBuffersInt2);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDisplayContextConfigure(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTCONFIGURE *psDCDisplayContextConfigureIN,
+					  PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTCONFIGURE *psDCDisplayContextConfigureOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DISPLAY_CONTEXT * psDisplayContextInt = IMG_NULL;
+	PVRSRV_SURFACE_CONFIG_INFO *psSurfInfoInt = IMG_NULL;
+	DC_BUFFER * *psBuffersInt = IMG_NULL;
+	IMG_HANDLE *hBuffersInt2 = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psSyncInt = IMG_NULL;
+	IMG_HANDLE *hSyncInt2 = IMG_NULL;
+	IMG_BOOL *bUpdateInt = IMG_NULL;
+
+
+
+
+	if (psDCDisplayContextConfigureIN->ui32PipeCount != 0)
+	{
+		psSurfInfoInt = OSAllocMem(psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(PVRSRV_SURFACE_CONFIG_INFO));
+		if (!psSurfInfoInt)
+		{
+			psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigure_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDisplayContextConfigureIN->psSurfInfo, psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(PVRSRV_SURFACE_CONFIG_INFO))
+				|| (OSCopyFromUser(NULL, psSurfInfoInt, psDCDisplayContextConfigureIN->psSurfInfo,
+				psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(PVRSRV_SURFACE_CONFIG_INFO)) != PVRSRV_OK) )
+			{
+				psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDisplayContextConfigure_exit;
+			}
+	if (psDCDisplayContextConfigureIN->ui32PipeCount != 0)
+	{
+		psBuffersInt = OSAllocMem(psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(DC_BUFFER *));
+		if (!psBuffersInt)
+		{
+			psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigure_exit;
+		}
+		hBuffersInt2 = OSAllocMem(psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(IMG_HANDLE));
+		if (!hBuffersInt2)
+		{
+			psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigure_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDisplayContextConfigureIN->phBuffers, psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hBuffersInt2, psDCDisplayContextConfigureIN->phBuffers,
+				psDCDisplayContextConfigureIN->ui32PipeCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDisplayContextConfigure_exit;
+			}
+	if (psDCDisplayContextConfigureIN->ui32SyncCount != 0)
+	{
+		psSyncInt = OSAllocMem(psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psSyncInt)
+		{
+			psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigure_exit;
+		}
+		hSyncInt2 = OSAllocMem(psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(IMG_HANDLE));
+		if (!hSyncInt2)
+		{
+			psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigure_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDisplayContextConfigureIN->phSync, psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hSyncInt2, psDCDisplayContextConfigureIN->phSync,
+				psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDisplayContextConfigure_exit;
+			}
+	if (psDCDisplayContextConfigureIN->ui32SyncCount != 0)
+	{
+		bUpdateInt = OSAllocMem(psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(IMG_BOOL));
+		if (!bUpdateInt)
+		{
+			psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCDisplayContextConfigure_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCDisplayContextConfigureIN->pbUpdate, psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(IMG_BOOL))
+				|| (OSCopyFromUser(NULL, bUpdateInt, psDCDisplayContextConfigureIN->pbUpdate,
+				psDCDisplayContextConfigureIN->ui32SyncCount * sizeof(IMG_BOOL)) != PVRSRV_OK) )
+			{
+				psDCDisplayContextConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCDisplayContextConfigure_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCDisplayContextConfigureOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDisplayContextInt,
+											psDCDisplayContextConfigureIN->hDisplayContext,
+											PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT);
+					if(psDCDisplayContextConfigureOUT->eError != PVRSRV_OK)
+					{
+						goto DCDisplayContextConfigure_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psDCDisplayContextConfigureIN->ui32PipeCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psDCDisplayContextConfigureOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psBuffersInt[i],
+											hBuffersInt2[i],
+											PVRSRV_HANDLE_TYPE_DC_BUFFER);
+					if(psDCDisplayContextConfigureOUT->eError != PVRSRV_OK)
+					{
+						goto DCDisplayContextConfigure_exit;
+					}
+				}
+
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psDCDisplayContextConfigureIN->ui32SyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psDCDisplayContextConfigureOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncInt[i],
+											hSyncInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psDCDisplayContextConfigureOUT->eError != PVRSRV_OK)
+					{
+						goto DCDisplayContextConfigure_exit;
+					}
+				}
+
+		}
+	}
+
+	psDCDisplayContextConfigureOUT->eError =
+		DCDisplayContextConfigure(
+					psDisplayContextInt,
+					psDCDisplayContextConfigureIN->ui32PipeCount,
+					psSurfInfoInt,
+					psBuffersInt,
+					psDCDisplayContextConfigureIN->ui32SyncCount,
+					psSyncInt,
+					bUpdateInt,
+					psDCDisplayContextConfigureIN->ui32DisplayPeriod,
+					psDCDisplayContextConfigureIN->ui32MaxDepth,
+					psDCDisplayContextConfigureIN->i32AcquireFd,
+					&psDCDisplayContextConfigureOUT->i32ReleaseFd);
+
+
+
+
+DCDisplayContextConfigure_exit:
+	if (psSurfInfoInt)
+		OSFreeMem(psSurfInfoInt);
+	if (psBuffersInt)
+		OSFreeMem(psBuffersInt);
+	if (hBuffersInt2)
+		OSFreeMem(hBuffersInt2);
+	if (psSyncInt)
+		OSFreeMem(psSyncInt);
+	if (hSyncInt2)
+		OSFreeMem(hSyncInt2);
+	if (bUpdateInt)
+		OSFreeMem(bUpdateInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCDisplayContextDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCDISPLAYCONTEXTDESTROY *psDCDisplayContextDestroyIN,
+					  PVRSRV_BRIDGE_OUT_DCDISPLAYCONTEXTDESTROY *psDCDisplayContextDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCDisplayContextDestroyOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCDisplayContextDestroyIN->hDisplayContext,
+					PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT);
+	if ((psDCDisplayContextDestroyOUT->eError != PVRSRV_OK) && (psDCDisplayContextDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCDisplayContextDestroy_exit;
+	}
+
+
+
+DCDisplayContextDestroy_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferAlloc(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERALLOC *psDCBufferAllocIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERALLOC *psDCBufferAllocOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DISPLAY_CONTEXT * psDisplayContextInt = IMG_NULL;
+	DC_BUFFER * psBufferInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCBufferAllocOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDisplayContextInt,
+											psDCBufferAllocIN->hDisplayContext,
+											PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT);
+					if(psDCBufferAllocOUT->eError != PVRSRV_OK)
+					{
+						goto DCBufferAlloc_exit;
+					}
+				}
+
+
+	psDCBufferAllocOUT->eError =
+		DCBufferAlloc(
+					psDisplayContextInt,
+					&psDCBufferAllocIN->sSurfInfo,
+					&psDCBufferAllocOUT->ui32Stride,
+					&psBufferInt);
+	/* Exit early if bridged call fails */
+	if(psDCBufferAllocOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferAlloc_exit;
+	}
+
+
+	psDCBufferAllocOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCBufferAllocOUT->hBuffer,
+							(IMG_VOID *) psBufferInt,
+							PVRSRV_HANDLE_TYPE_DC_BUFFER,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCBufferFree);
+	if (psDCBufferAllocOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferAlloc_exit;
+	}
+
+
+
+
+DCBufferAlloc_exit:
+	if (psDCBufferAllocOUT->eError != PVRSRV_OK)
+	{
+		if (psBufferInt)
+		{
+			DCBufferFree(psBufferInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferImport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERIMPORT *psDCBufferImportIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERIMPORT *psDCBufferImportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_DISPLAY_CONTEXT * psDisplayContextInt = IMG_NULL;
+	PMR * *psImportInt = IMG_NULL;
+	IMG_HANDLE *hImportInt2 = IMG_NULL;
+	DC_BUFFER * psBufferInt = IMG_NULL;
+
+
+
+
+	if (psDCBufferImportIN->ui32NumPlanes != 0)
+	{
+		psImportInt = OSAllocMem(psDCBufferImportIN->ui32NumPlanes * sizeof(PMR *));
+		if (!psImportInt)
+		{
+			psDCBufferImportOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCBufferImport_exit;
+		}
+		hImportInt2 = OSAllocMem(psDCBufferImportIN->ui32NumPlanes * sizeof(IMG_HANDLE));
+		if (!hImportInt2)
+		{
+			psDCBufferImportOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DCBufferImport_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDCBufferImportIN->phImport, psDCBufferImportIN->ui32NumPlanes * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hImportInt2, psDCBufferImportIN->phImport,
+				psDCBufferImportIN->ui32NumPlanes * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psDCBufferImportOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DCBufferImport_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCBufferImportOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDisplayContextInt,
+											psDCBufferImportIN->hDisplayContext,
+											PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT);
+					if(psDCBufferImportOUT->eError != PVRSRV_OK)
+					{
+						goto DCBufferImport_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psDCBufferImportIN->ui32NumPlanes;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psDCBufferImportOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psImportInt[i],
+											hImportInt2[i],
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psDCBufferImportOUT->eError != PVRSRV_OK)
+					{
+						goto DCBufferImport_exit;
+					}
+				}
+
+		}
+	}
+
+	psDCBufferImportOUT->eError =
+		DCBufferImport(
+					psDisplayContextInt,
+					psDCBufferImportIN->ui32NumPlanes,
+					psImportInt,
+					&psDCBufferImportIN->sSurfAttrib,
+					&psBufferInt);
+	/* Exit early if bridged call fails */
+	if(psDCBufferImportOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferImport_exit;
+	}
+
+
+	psDCBufferImportOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCBufferImportOUT->hBuffer,
+							(IMG_VOID *) psBufferInt,
+							PVRSRV_HANDLE_TYPE_DC_BUFFER,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCBufferFree);
+	if (psDCBufferImportOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferImport_exit;
+	}
+
+
+
+
+DCBufferImport_exit:
+	if (psDCBufferImportOUT->eError != PVRSRV_OK)
+	{
+		if (psBufferInt)
+		{
+			DCBufferFree(psBufferInt);
+		}
+	}
+
+	if (psImportInt)
+		OSFreeMem(psImportInt);
+	if (hImportInt2)
+		OSFreeMem(hImportInt2);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferFree(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERFREE *psDCBufferFreeIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERFREE *psDCBufferFreeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCBufferFreeOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCBufferFreeIN->hBuffer,
+					PVRSRV_HANDLE_TYPE_DC_BUFFER);
+	if ((psDCBufferFreeOUT->eError != PVRSRV_OK) && (psDCBufferFreeOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCBufferFree_exit;
+	}
+
+
+
+DCBufferFree_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferUnimport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERUNIMPORT *psDCBufferUnimportIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERUNIMPORT *psDCBufferUnimportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCBufferUnimportOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCBufferUnimportIN->hBuffer,
+					PVRSRV_HANDLE_TYPE_DC_BUFFER);
+	if ((psDCBufferUnimportOUT->eError != PVRSRV_OK) && (psDCBufferUnimportOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCBufferUnimport_exit;
+	}
+
+
+
+DCBufferUnimport_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferPin(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERPIN *psDCBufferPinIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERPIN *psDCBufferPinOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_BUFFER * psBufferInt = IMG_NULL;
+	DC_PIN_HANDLE hPinHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCBufferPinOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psBufferInt,
+											psDCBufferPinIN->hBuffer,
+											PVRSRV_HANDLE_TYPE_DC_BUFFER);
+					if(psDCBufferPinOUT->eError != PVRSRV_OK)
+					{
+						goto DCBufferPin_exit;
+					}
+				}
+
+
+	psDCBufferPinOUT->eError =
+		DCBufferPin(
+					psBufferInt,
+					&hPinHandleInt);
+	/* Exit early if bridged call fails */
+	if(psDCBufferPinOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferPin_exit;
+	}
+
+
+	psDCBufferPinOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCBufferPinOUT->hPinHandle,
+							(IMG_VOID *) hPinHandleInt,
+							PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCBufferUnpin);
+	if (psDCBufferPinOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferPin_exit;
+	}
+
+
+
+
+DCBufferPin_exit:
+	if (psDCBufferPinOUT->eError != PVRSRV_OK)
+	{
+		if (hPinHandleInt)
+		{
+			DCBufferUnpin(hPinHandleInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferUnpin(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERUNPIN *psDCBufferUnpinIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERUNPIN *psDCBufferUnpinOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCBufferUnpinOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCBufferUnpinIN->hPinHandle,
+					PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE);
+	if ((psDCBufferUnpinOUT->eError != PVRSRV_OK) && (psDCBufferUnpinOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCBufferUnpin_exit;
+	}
+
+
+
+DCBufferUnpin_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferAcquire(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERACQUIRE *psDCBufferAcquireIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERACQUIRE *psDCBufferAcquireOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DC_BUFFER * psBufferInt = IMG_NULL;
+	PMR * psExtMemInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDCBufferAcquireOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psBufferInt,
+											psDCBufferAcquireIN->hBuffer,
+											PVRSRV_HANDLE_TYPE_DC_BUFFER);
+					if(psDCBufferAcquireOUT->eError != PVRSRV_OK)
+					{
+						goto DCBufferAcquire_exit;
+					}
+				}
+
+
+	psDCBufferAcquireOUT->eError =
+		DCBufferAcquire(
+					psBufferInt,
+					&psExtMemInt);
+	/* Exit early if bridged call fails */
+	if(psDCBufferAcquireOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferAcquire_exit;
+	}
+
+
+	psDCBufferAcquireOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDCBufferAcquireOUT->hExtMem,
+							(IMG_VOID *) psExtMemInt,
+							PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DCBufferRelease);
+	if (psDCBufferAcquireOUT->eError != PVRSRV_OK)
+	{
+		goto DCBufferAcquire_exit;
+	}
+
+
+
+
+DCBufferAcquire_exit:
+	if (psDCBufferAcquireOUT->eError != PVRSRV_OK)
+	{
+		if (psExtMemInt)
+		{
+			DCBufferRelease(psExtMemInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDCBufferRelease(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DCBUFFERRELEASE *psDCBufferReleaseIN,
+					  PVRSRV_BRIDGE_OUT_DCBUFFERRELEASE *psDCBufferReleaseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDCBufferReleaseOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDCBufferReleaseIN->hExtMem,
+					PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+	if ((psDCBufferReleaseOUT->eError != PVRSRV_OK) && (psDCBufferReleaseOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DCBufferRelease_exit;
+	}
+
+
+
+DCBufferRelease_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitDCBridge(IMG_VOID);
+PVRSRV_ERROR DeinitDCBridge(IMG_VOID);
+
+/*
+ * Register all DC functions with services
+ */
+PVRSRV_ERROR InitDCBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDEVICESQUERYCOUNT, PVRSRVBridgeDCDevicesQueryCount,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDEVICESENUMERATE, PVRSRVBridgeDCDevicesEnumerate,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDEVICEACQUIRE, PVRSRVBridgeDCDeviceAcquire,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDEVICERELEASE, PVRSRVBridgeDCDeviceRelease,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCGETINFO, PVRSRVBridgeDCGetInfo,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCPANELQUERYCOUNT, PVRSRVBridgeDCPanelQueryCount,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCPANELQUERY, PVRSRVBridgeDCPanelQuery,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCFORMATQUERY, PVRSRVBridgeDCFormatQuery,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDIMQUERY, PVRSRVBridgeDCDimQuery,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCSETBLANK, PVRSRVBridgeDCSetBlank,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCSETVSYNCREPORTING, PVRSRVBridgeDCSetVSyncReporting,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCLASTVSYNCQUERY, PVRSRVBridgeDCLastVSyncQuery,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCSYSTEMBUFFERACQUIRE, PVRSRVBridgeDCSystemBufferAcquire,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCSYSTEMBUFFERRELEASE, PVRSRVBridgeDCSystemBufferRelease,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTCREATE, PVRSRVBridgeDCDisplayContextCreate,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTCONFIGURECHECK, PVRSRVBridgeDCDisplayContextConfigureCheck,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTCONFIGURE, PVRSRVBridgeDCDisplayContextConfigure,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCDISPLAYCONTEXTDESTROY, PVRSRVBridgeDCDisplayContextDestroy,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERALLOC, PVRSRVBridgeDCBufferAlloc,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERIMPORT, PVRSRVBridgeDCBufferImport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERFREE, PVRSRVBridgeDCBufferFree,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERUNIMPORT, PVRSRVBridgeDCBufferUnimport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERPIN, PVRSRVBridgeDCBufferPin,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERUNPIN, PVRSRVBridgeDCBufferUnpin,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERACQUIRE, PVRSRVBridgeDCBufferAcquire,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DC, PVRSRV_BRIDGE_DC_DCBUFFERRELEASE, PVRSRVBridgeDCBufferRelease,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dc functions with services
+ */
+PVRSRV_ERROR DeinitDCBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/ddevicememhistory_bridge/client_devicememhistory_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/ddevicememhistory_bridge/client_devicememhistory_bridge.c
new file mode 100644
index 0000000..21225f7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/ddevicememhistory_bridge/client_devicememhistory_bridge.c
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "mm_common.h"
+
+#include "devicemem_history_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+								 IMG_DEV_VIRTADDR sDevVAddr,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 const IMG_CHAR *puiText)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		DevicememHistoryMapKM(
+					sDevVAddr,
+					uiSize,
+					puiText);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+								   IMG_DEV_VIRTADDR sDevVAddr,
+								   IMG_DEVMEM_SIZE_T uiSize,
+								   const IMG_CHAR *puiText)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		DevicememHistoryUnmapKM(
+					sDevVAddr,
+					uiSize,
+					puiText);
+
+	return eError;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/debugmisc_bridge/common_debugmisc_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/debugmisc_bridge/common_debugmisc_bridge.h
new file mode 100644
index 0000000..ef103fb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/debugmisc_bridge/common_debugmisc_bridge.h
@@ -0,0 +1,139 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for debugmisc
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for debugmisc
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DEBUGMISC_BRIDGE_H
+#define COMMON_DEBUGMISC_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEBUGMISC_PHYSMEMIMPORTSECBUF			PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST			(PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+3)
+
+
+/*******************************************
+            DebugMiscSLCSetBypassState          
+ *******************************************/
+
+/* Bridge in structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32Flags;
+	IMG_BOOL bIsBypassed;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE;
+
+
+/* Bridge out structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE;
+
+/*******************************************
+            RGXDebugMiscSetFWLog          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32RGXFWLogType;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG;
+
+
+/* Bridge out structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG;
+
+/*******************************************
+            RGXDebugMiscDumpFreelistPageList          
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+
+/* Bridge out structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+/*******************************************
+            PhysmemImportSecBuf          
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF;
+
+
+/* Bridge out structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF;
+
+#endif /* COMMON_DEBUGMISC_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/debugmisc_bridge/server_debugmisc_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/debugmisc_bridge/server_debugmisc_bridge.c
new file mode 100644
index 0000000..09dbc01
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/debugmisc_bridge/server_debugmisc_bridge.c
@@ -0,0 +1,314 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for debugmisc
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for debugmisc
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "debugmisc_server.h"
+#include "pmr.h"
+#include "physmem_osmem.h"
+
+
+#include "common_debugmisc_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDebugMiscSLCSetBypassState(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateIN,
+					  PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDebugMiscSLCSetBypassStateOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psDebugMiscSLCSetBypassStateIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psDebugMiscSLCSetBypassStateOUT->eError != PVRSRV_OK)
+					{
+						goto DebugMiscSLCSetBypassState_exit;
+					}
+				}
+
+
+	psDebugMiscSLCSetBypassStateOUT->eError =
+		PVRSRVDebugMiscSLCSetBypassStateKM(
+					hDevNodeInt,
+					psDebugMiscSLCSetBypassStateIN->ui32Flags,
+					psDebugMiscSLCSetBypassStateIN->bIsBypassed);
+
+
+
+
+DebugMiscSLCSetBypassState_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXDebugMiscSetFWLogOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXDebugMiscSetFWLogIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXDebugMiscSetFWLogOUT->eError != PVRSRV_OK)
+					{
+						goto RGXDebugMiscSetFWLog_exit;
+					}
+				}
+
+
+	psRGXDebugMiscSetFWLogOUT->eError =
+		PVRSRVRGXDebugMiscSetFWLogKM(
+					hDevNodeInt,
+					psRGXDebugMiscSetFWLogIN->ui32RGXFWLogType);
+
+
+
+
+RGXDebugMiscSetFWLog_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListIN,
+					  PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXDebugMiscDumpFreelistPageListOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXDebugMiscDumpFreelistPageListIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXDebugMiscDumpFreelistPageListOUT->eError != PVRSRV_OK)
+					{
+						goto RGXDebugMiscDumpFreelistPageList_exit;
+					}
+				}
+
+
+	psRGXDebugMiscDumpFreelistPageListOUT->eError =
+		PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+					hDevNodeInt);
+
+
+
+
+RGXDebugMiscDumpFreelistPageList_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSecBuf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	PMR * psPMRPtrInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPhysmemImportSecBufOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psPhysmemImportSecBufIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+					{
+						goto PhysmemImportSecBuf_exit;
+					}
+				}
+
+
+	psPhysmemImportSecBufOUT->eError =
+		PhysmemNewTDSecureBufPMR(
+					hDevNodeInt,
+					psPhysmemImportSecBufIN->uiSize,
+					psPhysmemImportSecBufIN->ui32Log2PageSize,
+					psPhysmemImportSecBufIN->uiFlags,
+					&psPMRPtrInt);
+	/* Exit early if bridged call fails */
+	if(psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportSecBuf_exit;
+	}
+
+
+	psPhysmemImportSecBufOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psPhysmemImportSecBufOUT->hPMRPtr,
+							(IMG_VOID *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportSecBuf_exit;
+	}
+
+
+
+
+PhysmemImportSecBuf_exit:
+	if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitDEBUGMISCBridge(IMG_VOID);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(IMG_VOID);
+
+/*
+ * Register all DEBUGMISC functions with services
+ */
+PVRSRV_ERROR InitDEBUGMISCBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE, PVRSRVBridgeDebugMiscSLCSetBypassState,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG, PVRSRVBridgeRGXDebugMiscSetFWLog,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST, PVRSRVBridgeRGXDebugMiscDumpFreelistPageList,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_PHYSMEMIMPORTSECBUF, PVRSRVBridgePhysmemImportSecBuf,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all debugmisc functions with services
+ */
+PVRSRV_ERROR DeinitDEBUGMISCBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/client_devicememhistory_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/client_devicememhistory_bridge.h
new file mode 100644
index 0000000..c5ae4f7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/client_devicememhistory_bridge.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+								 IMG_DEV_VIRTADDR sDevVAddr,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 const IMG_CHAR *puiText);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+								   IMG_DEV_VIRTADDR sDevVAddr,
+								   IMG_DEVMEM_SIZE_T uiSize,
+								   const IMG_CHAR *puiText);
+
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/common_devicememhistory_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/common_devicememhistory_bridge.h
new file mode 100644
index 0000000..62ee411
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/common_devicememhistory_bridge.h
@@ -0,0 +1,99 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "mm_common.h"
+
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP			PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST			(PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1)
+
+
+/*******************************************
+            DevicememHistoryMap          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR * puiText;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+/*******************************************
+            DevicememHistoryUnmap          
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	const IMG_CHAR * puiText;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/server_devicememhistory_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/server_devicememhistory_bridge.c
new file mode 100644
index 0000000..95a19e7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/devicememhistory_bridge/server_devicememhistory_bridge.c
@@ -0,0 +1,217 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for devicememhistory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for devicememhistory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_history_server.h"
+
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiTextInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	
+	{
+		uiTextInt = OSAllocMem(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR));
+		if (!uiTextInt)
+		{
+			psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DevicememHistoryMap_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDevicememHistoryMapIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryMapIN->puiText,
+				DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DevicememHistoryMap_exit;
+			}
+
+
+
+	psDevicememHistoryMapOUT->eError =
+		DevicememHistoryMapKM(
+					psDevicememHistoryMapIN->sDevVAddr,
+					psDevicememHistoryMapIN->uiSize,
+					uiTextInt);
+
+
+
+
+DevicememHistoryMap_exit:
+	if (uiTextInt)
+		OSFreeMem(uiTextInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN,
+					  PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiTextInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	
+	{
+		uiTextInt = OSAllocMem(DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR));
+		if (!uiTextInt)
+		{
+			psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DevicememHistoryUnmap_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDevicememHistoryUnmapIN->puiText, DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiTextInt, psDevicememHistoryUnmapIN->puiText,
+				DEVICEMEM_HISTORY_TEXT_BUFSZ * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DevicememHistoryUnmap_exit;
+			}
+
+
+
+	psDevicememHistoryUnmapOUT->eError =
+		DevicememHistoryUnmapKM(
+					psDevicememHistoryUnmapIN->sDevVAddr,
+					psDevicememHistoryUnmapIN->uiSize,
+					uiTextInt);
+
+
+
+
+DevicememHistoryUnmap_exit:
+	if (uiTextInt)
+		OSFreeMem(uiTextInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+static IMG_BYTE pbyDEVICEMEMHISTORYBridgeBuffer[56 +  4];
+
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(IMG_VOID);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(IMG_VOID);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(IMG_VOID)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, PVRSRVBridgeDevicememHistoryMap,
+					pDEVICEMEMHISTORYBridgeLock, pbyDEVICEMEMHISTORYBridgeBuffer,
+					56,  4);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, PVRSRVBridgeDevicememHistoryUnmap,
+					pDEVICEMEMHISTORYBridgeLock, pbyDEVICEMEMHISTORYBridgeBuffer,
+					56,  4);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(IMG_VOID)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), "OSLockDestroy");
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dmabuf_bridge/common_dmabuf_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmabuf_bridge/common_dmabuf_bridge.h
new file mode 100644
index 0000000..a2fa1d1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmabuf_bridge/common_dmabuf_bridge.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST			0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF			PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST			(PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0)
+
+
+/*******************************************
+            PhysmemImportDmaBuf          
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+	IMG_INT ifd;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dmabuf_bridge/server_dmabuf_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmabuf_bridge/server_dmabuf_bridge.c
new file mode 100644
index 0000000..f8d8100
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmabuf_bridge/server_dmabuf_bridge.c
@@ -0,0 +1,162 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for dmabuf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for dmabuf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRPtrInt = IMG_NULL;
+
+
+
+
+
+
+
+	psPhysmemImportDmaBufOUT->eError =
+		PhysmemImportDmaBuf(psConnection,
+					psPhysmemImportDmaBufIN->ifd,
+					psPhysmemImportDmaBufIN->uiFlags,
+					&psPMRPtrInt,
+					&psPhysmemImportDmaBufOUT->uiSize,
+					&psPhysmemImportDmaBufOUT->sAlign);
+	/* Exit early if bridged call fails */
+	if(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+
+	psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psPhysmemImportDmaBufOUT->hPMRPtr,
+							(IMG_VOID *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemImportDmaBuf_exit;
+	}
+
+
+
+
+PhysmemImportDmaBuf_exit:
+	if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitDMABUFBridge(IMG_VOID);
+PVRSRV_ERROR DeinitDMABUFBridge(IMG_VOID);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, PVRSRVBridgePhysmemImportDmaBuf,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+PVRSRV_ERROR DeinitDMABUFBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dmm_bridge/client_mm_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmm_bridge/client_mm_bridge.c
new file mode 100644
index 0000000..47ab7c1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmm_bridge/client_mm_bridge.c
@@ -0,0 +1,555 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR,
+							  IMG_HANDLE *phPMRExport,
+							  IMG_UINT64 *pui64Size,
+							  IMG_UINT32 *pui32Log2Contig,
+							  IMG_UINT64 *pui64Password)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PMR_EXPORT * psPMRExportInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRExportPMR(
+					psPMRInt,
+					&psPMRExportInt,
+					pui64Size,
+					pui32Log2Contig,
+					pui64Password);
+
+	*phPMRExport = psPMRExportInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMRExport)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT * psPMRExportInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError =
+		PMRUnexportPMR(
+					psPMRExportInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+						       IMG_HANDLE hPMR,
+						       IMG_UINT64 *pui64UID)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRGetUID(
+					psPMRInt,
+					pui64UID);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeServerExportClientExport(IMG_HANDLE hBridge,
+									     DEVMEM_SERVER_EXPORTCOOKIE hPMRServerExport,
+									     IMG_HANDLE *phPMRExportOut,
+									     IMG_UINT64 *pui64Size,
+									     IMG_UINT32 *pui32Log2Contig,
+									     IMG_UINT64 *pui64Password)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_EXPORTCOOKIE * psPMRServerExportInt;
+	PMR_EXPORT * psPMRExportOutInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRServerExportInt = (DEVMEM_EXPORTCOOKIE *) hPMRServerExport;
+
+	eError =
+		PMRMakeServerExportClientExport(
+					psPMRServerExportInt,
+					&psPMRExportOutInt,
+					pui64Size,
+					pui32Log2Contig,
+					pui64Password);
+
+	*phPMRExportOut = psPMRExportOutInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeServerExportClientExport(IMG_HANDLE hBridge,
+									       IMG_HANDLE hPMRExport)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT * psPMRExportInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError =
+		PMRUnmakeServerExportClientExport(
+					psPMRExportInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMRExport,
+							  IMG_UINT64 ui64uiPassword,
+							  IMG_UINT64 ui64uiSize,
+							  IMG_UINT32 ui32uiLog2Contig,
+							  IMG_HANDLE *phPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR_EXPORT * psPMRExportInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+	eError =
+		PMRImportPMR(
+					psPMRExportInt,
+					ui64uiPassword,
+					ui64uiSize,
+					ui32uiLog2Contig,
+					&psPMRInt);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+								IMG_HANDLE hDeviceNode,
+								IMG_HANDLE *phDevMemServerContext,
+								IMG_HANDLE *phPrivData)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	DEVMEMINT_CTX * psDevMemServerContextInt;
+	IMG_HANDLE hPrivDataInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+
+	eError =
+		DevmemIntCtxCreate(
+					hDeviceNodeInt,
+					&psDevMemServerContextInt,
+					&hPrivDataInt);
+
+	*phDevMemServerContext = psDevMemServerContextInt;
+	*phPrivData = hPrivDataInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemServerContext)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemServerContextInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+	eError =
+		DevmemIntCtxDestroy(
+					psDevmemServerContextInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemCtx,
+								 IMG_DEV_VIRTADDR sHeapBaseAddr,
+								 IMG_DEVMEM_SIZE_T uiHeapLength,
+								 IMG_UINT32 ui32Log2DataPageSize,
+								 IMG_HANDLE *phDevmemHeapPtr)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemCtxInt;
+	DEVMEMINT_HEAP * psDevmemHeapPtrInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+		DevmemIntHeapCreate(
+					psDevmemCtxInt,
+					sHeapBaseAddr,
+					uiHeapLength,
+					ui32Log2DataPageSize,
+					&psDevmemHeapPtrInt);
+
+	*phDevmemHeapPtr = psDevmemHeapPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+								  IMG_HANDLE hDevmemHeap)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psDevmemHeapInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+	eError =
+		DevmemIntHeapDestroy(
+					psDevmemHeapInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+							     IMG_HANDLE hDevmemServerHeap,
+							     IMG_HANDLE hReservation,
+							     IMG_HANDLE hPMR,
+							     PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+							     IMG_HANDLE *phMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psDevmemServerHeapInt;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PMR * psPMRInt;
+	DEVMEMINT_MAPPING * psMappingInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		DevmemIntMapPMR(
+					psDevmemServerHeapInt,
+					psReservationInt,
+					psPMRInt,
+					uiMapFlags,
+					&psMappingInt);
+
+	*phMapping = psMappingInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hMapping)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_MAPPING * psMappingInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+	eError =
+		DevmemIntUnmapPMR(
+					psMappingInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemServerHeap,
+								   IMG_DEV_VIRTADDR sAddress,
+								   IMG_DEVMEM_SIZE_T uiLength,
+								   IMG_HANDLE *phReservation)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_HEAP * psDevmemServerHeapInt;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+	eError =
+		DevmemIntReserveRange(
+					psDevmemServerHeapInt,
+					sAddress,
+					uiLength,
+					&psReservationInt);
+
+	*phReservation = psReservationInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+								     IMG_HANDLE hReservation)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_RESERVATION * psReservationInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+	eError =
+		DevmemIntUnreserveRange(
+					psReservationInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+								    IMG_HANDLE hDeviceNode,
+								    IMG_DEVMEM_SIZE_T uiSize,
+								    IMG_DEVMEM_SIZE_T uiChunkSize,
+								    IMG_UINT32 ui32NumPhysChunks,
+								    IMG_UINT32 ui32NumVirtChunks,
+								    IMG_BOOL *pbMappingTable,
+								    IMG_UINT32 ui32Log2PageSize,
+								    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+								    IMG_HANDLE *phPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	PMR * psPMRPtrInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+
+	eError =
+		PhysmemNewRamBackedPMR(
+					hDeviceNodeInt,
+					uiSize,
+					uiChunkSize,
+					ui32NumPhysChunks,
+					ui32NumVirtChunks,
+					pbMappingTable,
+					ui32Log2PageSize,
+					uiFlags,
+					&psPMRPtrInt);
+
+	*phPMRPtr = psPMRPtrInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hExtHandle,
+							       IMG_HANDLE *phPMR,
+							       IMG_DEVMEM_SIZE_T *puiSize,
+							       IMG_DEVMEM_ALIGN_T *psAlign)
+{
+	PVRSRV_ERROR eError;
+	PMR * psExtHandleInt;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psExtHandleInt = (PMR *) hExtHandle;
+
+	eError =
+		PMRLocalImportPMR(
+					psExtHandleInt,
+					&psPMRInt,
+					puiSize,
+					psAlign);
+
+	*phPMR = psPMRInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRUnrefPMR(
+					psPMRInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemSLCFlushInvalRequest(IMG_HANDLE hBridge,
+									IMG_HANDLE hDeviceNode,
+									IMG_HANDLE hPmr)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	PMR * psPmrInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+	psPmrInt = (PMR *) hPmr;
+
+	eError =
+		DevmemSLCFlushInvalRequest(
+					hDeviceNodeInt,
+					psPmrInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemCtx,
+								   IMG_DEV_VIRTADDR sAddress)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemCtxInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+	eError =
+		DevmemIntIsVDevAddrValid(
+					psDevmemCtxInt,
+					sAddress);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+								    IMG_HANDLE hDeviceNode,
+								    IMG_UINT32 *pui32NumHeapConfigs)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+
+	eError =
+		HeapCfgHeapConfigCount(
+					hDeviceNodeInt,
+					pui32NumHeapConfigs);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+							      IMG_HANDLE hDeviceNode,
+							      IMG_UINT32 ui32HeapConfigIndex,
+							      IMG_UINT32 *pui32NumHeaps)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+
+	eError =
+		HeapCfgHeapCount(
+					hDeviceNodeInt,
+					ui32HeapConfigIndex,
+					pui32NumHeaps);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDeviceNode,
+								   IMG_UINT32 ui32HeapConfigIndex,
+								   IMG_UINT32 ui32HeapConfigNameBufSz,
+								   IMG_CHAR *puiHeapConfigName)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+
+	eError =
+		HeapCfgHeapConfigName(
+					hDeviceNodeInt,
+					ui32HeapConfigIndex,
+					ui32HeapConfigNameBufSz,
+					puiHeapConfigName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+								IMG_HANDLE hDeviceNode,
+								IMG_UINT32 ui32HeapConfigIndex,
+								IMG_UINT32 ui32HeapIndex,
+								IMG_UINT32 ui32HeapNameBufSz,
+								IMG_CHAR *puiHeapNameOut,
+								IMG_DEV_VIRTADDR *psDevVAddrBase,
+								IMG_DEVMEM_SIZE_T *puiHeapLength,
+								IMG_UINT32 *pui32Log2DataPageSizeOut,
+								IMG_UINT32 *pui32Log2ImportAlignmentOut)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDeviceNodeInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDeviceNodeInt = (IMG_HANDLE) hDeviceNode;
+
+	eError =
+		HeapCfgHeapDetails(
+					hDeviceNodeInt,
+					ui32HeapConfigIndex,
+					ui32HeapIndex,
+					ui32HeapNameBufSz,
+					puiHeapNameOut,
+					psDevVAddrBase,
+					puiHeapLength,
+					pui32Log2DataPageSizeOut,
+					pui32Log2ImportAlignmentOut);
+
+	return eError;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dmm_bridge/client_mm_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmm_bridge/client_mm_bridge.h
new file mode 100644
index 0000000..f9d847b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dmm_bridge/client_mm_bridge.h
@@ -0,0 +1,170 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMR,
+							  IMG_HANDLE *phPMRExport,
+							  IMG_UINT64 *pui64Size,
+							  IMG_UINT32 *pui32Log2Contig,
+							  IMG_UINT64 *pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+							    IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+						       IMG_HANDLE hPMR,
+						       IMG_UINT64 *pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeServerExportClientExport(IMG_HANDLE hBridge,
+									     DEVMEM_SERVER_EXPORTCOOKIE hPMRServerExport,
+									     IMG_HANDLE *phPMRExportOut,
+									     IMG_UINT64 *pui64Size,
+									     IMG_UINT32 *pui32Log2Contig,
+									     IMG_UINT64 *pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeServerExportClientExport(IMG_HANDLE hBridge,
+									       IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+							  IMG_HANDLE hPMRExport,
+							  IMG_UINT64 ui64uiPassword,
+							  IMG_UINT64 ui64uiSize,
+							  IMG_UINT32 ui32uiLog2Contig,
+							  IMG_HANDLE *phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+								IMG_HANDLE hDeviceNode,
+								IMG_HANDLE *phDevMemServerContext,
+								IMG_HANDLE *phPrivData);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+								 IMG_HANDLE hDevmemCtx,
+								 IMG_DEV_VIRTADDR sHeapBaseAddr,
+								 IMG_DEVMEM_SIZE_T uiHeapLength,
+								 IMG_UINT32 ui32Log2DataPageSize,
+								 IMG_HANDLE *phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+								  IMG_HANDLE hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+							     IMG_HANDLE hDevmemServerHeap,
+							     IMG_HANDLE hReservation,
+							     IMG_HANDLE hPMR,
+							     PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+							     IMG_HANDLE *phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemServerHeap,
+								   IMG_DEV_VIRTADDR sAddress,
+								   IMG_DEVMEM_SIZE_T uiLength,
+								   IMG_HANDLE *phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+								     IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+								    IMG_HANDLE hDeviceNode,
+								    IMG_DEVMEM_SIZE_T uiSize,
+								    IMG_DEVMEM_SIZE_T uiChunkSize,
+								    IMG_UINT32 ui32NumPhysChunks,
+								    IMG_UINT32 ui32NumVirtChunks,
+								    IMG_BOOL *pbMappingTable,
+								    IMG_UINT32 ui32Log2PageSize,
+								    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+								    IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+							       IMG_HANDLE hExtHandle,
+							       IMG_HANDLE *phPMR,
+							       IMG_DEVMEM_SIZE_T *puiSize,
+							       IMG_DEVMEM_ALIGN_T *psAlign);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemSLCFlushInvalRequest(IMG_HANDLE hBridge,
+									IMG_HANDLE hDeviceNode,
+									IMG_HANDLE hPmr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDevmemCtx,
+								   IMG_DEV_VIRTADDR sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+								    IMG_HANDLE hDeviceNode,
+								    IMG_UINT32 *pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+							      IMG_HANDLE hDeviceNode,
+							      IMG_UINT32 ui32HeapConfigIndex,
+							      IMG_UINT32 *pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+								   IMG_HANDLE hDeviceNode,
+								   IMG_UINT32 ui32HeapConfigIndex,
+								   IMG_UINT32 ui32HeapConfigNameBufSz,
+								   IMG_CHAR *puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+								IMG_HANDLE hDeviceNode,
+								IMG_UINT32 ui32HeapConfigIndex,
+								IMG_UINT32 ui32HeapIndex,
+								IMG_UINT32 ui32HeapNameBufSz,
+								IMG_CHAR *puiHeapNameOut,
+								IMG_DEV_VIRTADDR *psDevVAddrBase,
+								IMG_DEVMEM_SIZE_T *puiHeapLength,
+								IMG_UINT32 *pui32Log2DataPageSizeOut,
+								IMG_UINT32 *pui32Log2ImportAlignmentOut);
+
+
+#endif /* CLIENT_MM_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dpdumpmm_bridge/client_pdumpmm_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dpdumpmm_bridge/client_pdumpmm_bridge.c
new file mode 100644
index 0000000..a6dc4d0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dpdumpmm_bridge/client_pdumpmm_bridge.c
@@ -0,0 +1,256 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pdumpmm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMR,
+							     IMG_DEVMEM_OFFSET_T uiOffset,
+							     IMG_DEVMEM_SIZE_T uiSize,
+							     IMG_UINT32 ui32PDumpFlags,
+							     IMG_BOOL bbZero)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpLoadMem(
+					psPMRInt,
+					uiOffset,
+					uiSize,
+					ui32PDumpFlags,
+					bbZero);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT32 ui32Value,
+								    IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpLoadMemValue32(
+					psPMRInt,
+					uiOffset,
+					ui32Value,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT64 ui64Value,
+								    IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpLoadMemValue64(
+					psPMRInt,
+					uiOffset,
+					ui64Value,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+								IMG_HANDLE hPMR,
+								IMG_DEVMEM_OFFSET_T uiOffset,
+								IMG_DEVMEM_SIZE_T uiSize,
+								IMG_UINT32 ui32ArraySize,
+								const IMG_CHAR *puiFileName)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpSaveToFile(
+					psPMRInt,
+					uiOffset,
+					uiSize,
+					ui32ArraySize,
+					puiFileName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+								  IMG_HANDLE hPMR,
+								  IMG_DEVMEM_OFFSET_T uiOffset,
+								  IMG_UINT32 ui32MemspaceNameLen,
+								  IMG_CHAR *puiMemspaceName,
+								  IMG_UINT32 ui32SymbolicAddrLen,
+								  IMG_CHAR *puiSymbolicAddr,
+								  IMG_DEVMEM_OFFSET_T *puiNewOffset,
+								  IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMR_PDumpSymbolicAddr(
+					psPMRInt,
+					uiOffset,
+					ui32MemspaceNameLen,
+					puiMemspaceName,
+					ui32SymbolicAddrLen,
+					puiSymbolicAddr,
+					puiNewOffset,
+					puiNextSymName);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+							   IMG_HANDLE hPMR,
+							   IMG_DEVMEM_OFFSET_T uiOffset,
+							   IMG_UINT32 ui32Value,
+							   IMG_UINT32 ui32Mask,
+							   PDUMP_POLL_OPERATOR eOperator,
+							   IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpPol32(
+					psPMRInt,
+					uiOffset,
+					ui32Value,
+					ui32Mask,
+					eOperator,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_DEVMEM_OFFSET_T uiReadOffset,
+							 IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							 IMG_DEVMEM_SIZE_T uiPacketSize,
+							 IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRInt = (PMR *) hPMR;
+
+	eError =
+		PMRPDumpCBP(
+					psPMRInt,
+					uiReadOffset,
+					uiWriteOffset,
+					uiPacketSize,
+					uiBufferSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+									     IMG_HANDLE hDevmemServerContext,
+									     IMG_DEV_VIRTADDR sAddress,
+									     IMG_DEVMEM_SIZE_T uiSize,
+									     IMG_UINT32 ui32ArraySize,
+									     const IMG_CHAR *puiFileName,
+									     IMG_UINT32 ui32FileOffset,
+									     IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	DEVMEMINT_CTX * psDevmemServerContextInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+	eError =
+		DevmemIntPDumpSaveToFileVirtual(
+					psDevmemServerContextInt,
+					sAddress,
+					uiSize,
+					ui32ArraySize,
+					puiFileName,
+					ui32FileOffset,
+					ui32PDumpFlags);
+
+	return eError;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dpvrtl_bridge/client_pvrtl_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dpvrtl_bridge/client_pvrtl_bridge.c
new file mode 100644
index 0000000..252deee
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dpvrtl_bridge/client_pvrtl_bridge.c
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvr_tl.h"
+
+#include "tlserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLConnect(IMG_HANDLE hBridge)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		TLServerConnectKM(hBridge
+					);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDisconnect(IMG_HANDLE hBridge)
+{
+	PVRSRV_ERROR eError;
+
+
+	eError =
+		TLServerDisconnectKM(hBridge
+					);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+							  IMG_CHAR *puiName,
+							  IMG_UINT32 ui32Mode,
+							  IMG_HANDLE *phSD,
+							  DEVMEM_SERVER_EXPORTCOOKIE *phClientBUFExportCookie)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	DEVMEM_EXPORTCOOKIE * psClientBUFExportCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		TLServerOpenStreamKM(
+					puiName,
+					ui32Mode,
+					&psSDInt,
+					&psClientBUFExportCookieInt);
+
+	*phSD = psSDInt;
+	*phClientBUFExportCookie = psClientBUFExportCookieInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerCloseStreamKM(
+					psSDInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 *pui32ReadOffset,
+							   IMG_UINT32 *pui32ReadLen)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerAcquireDataKM(
+					psSDInt,
+					pui32ReadOffset,
+					pui32ReadLen);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 ui32ReadOffset,
+							   IMG_UINT32 ui32ReadLen)
+{
+	PVRSRV_ERROR eError;
+	TL_STREAM_DESC * psSDInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSDInt = (TL_STREAM_DESC *) hSD;
+
+	eError =
+		TLServerReleaseDataKM(
+					psSDInt,
+					ui32ReadOffset,
+					ui32ReadLen);
+
+	return eError;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dri_bridge/client_ri_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dri_bridge/client_ri_bridge.c
new file mode 100644
index 0000000..1ef8edd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dri_bridge/client_ri_bridge.c
@@ -0,0 +1,181 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMRHandle,
+							     IMG_UINT32 ui32TextASize,
+							     const IMG_CHAR *puiTextA,
+							     IMG_UINT64 ui64LogicalSize)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+		RIWritePMREntryKM(
+					psPMRHandleInt,
+					ui32TextASize,
+					puiTextA,
+					ui64LogicalSize);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+								 IMG_HANDLE hPMRHandle,
+								 IMG_UINT32 ui32TextBSize,
+								 const IMG_CHAR *puiTextB,
+								 IMG_UINT64 ui64Offset,
+								 IMG_UINT64 ui64Size,
+								 IMG_BOOL bIsImport,
+								 IMG_BOOL bIsExportable,
+								 IMG_HANDLE *phRIHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRHandleInt;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+		RIWriteMEMDESCEntryKM(
+					psPMRHandleInt,
+					ui32TextBSize,
+					puiTextB,
+					ui64Offset,
+					ui64Size,
+					bIsImport,
+					bIsExportable,
+					&psRIHandleInt);
+
+	*phRIHandle = psRIHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+								 IMG_HANDLE hRIHandle,
+								 IMG_DEV_VIRTADDR sAddr)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError =
+		RIUpdateMEMDESCAddrKM(
+					psRIHandleInt,
+					sAddr);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+								  IMG_HANDLE hRIHandle)
+{
+	PVRSRV_ERROR eError;
+	RI_HANDLE psRIHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+	eError =
+		RIDeleteMEMDESCEntryKM(
+					psRIHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMRHandle)
+{
+	PVRSRV_ERROR eError;
+	PMR * psPMRHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psPMRHandleInt = (PMR *) hPMRHandle;
+
+	eError =
+		RIDumpListKM(
+					psPMRHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		RIDumpAllKM(
+					);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+							   IMG_PID ui32Pid)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+	eError =
+		RIDumpProcessKM(
+					ui32Pid);
+
+	return eError;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dsync_bridge/client_sync_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/dsync_bridge/client_sync_bridge.c
new file mode 100644
index 0000000..36e7574
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dsync_bridge/client_sync_bridge.c
@@ -0,0 +1,492 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+
+#include "sync_server.h"
+#include "pdump.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								     IMG_HANDLE hDevNode,
+								     IMG_HANDLE *phSyncHandle,
+								     IMG_UINT32 *pui32SyncPrimVAddr,
+								     IMG_UINT32 *pui32SyncPrimBlockSize,
+								     DEVMEM_SERVER_EXPORTCOOKIE *phExportCookie)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDevNodeInt;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	DEVMEM_EXPORTCOOKIE * psExportCookieInt;
+
+	hDevNodeInt = (IMG_HANDLE) hDevNode;
+
+	eError =
+		PVRSRVAllocSyncPrimitiveBlockKM(hBridge
+		,
+					hDevNodeInt,
+					&psSyncHandleInt,
+					pui32SyncPrimVAddr,
+					pui32SyncPrimBlockSize,
+					&psExportCookieInt);
+
+	*phSyncHandle = psSyncHandleInt;
+	*phExportCookie = psExportCookieInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								    IMG_HANDLE hSyncHandle)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVFreeSyncPrimitiveBlockKM(
+					psSyncHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSyncHandle,
+							 IMG_UINT32 ui32Index,
+							 IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimSetKM(
+					psSyncHandleInt,
+					ui32Index,
+					ui32Value);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+							       IMG_HANDLE hSyncHandle,
+							       IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+		PVRSRVServerSyncPrimSetKM(
+					psSyncHandleInt,
+					ui32Value);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+								      IMG_HANDLE hhRecord)
+{
+	PVRSRV_ERROR eError;
+	SYNC_RECORD_HANDLE pshRecordInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+	eError =
+		PVRSRVSyncRecordRemoveByHandleKM(
+					pshRecordInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+							   IMG_HANDLE *phhRecord,
+							   IMG_HANDLE hhServerSyncPrimBlock,
+							   IMG_UINT32 ui32ui32FwBlockAddr,
+							   IMG_UINT32 ui32ui32SyncOffset,
+							   IMG_BOOL bbServerSync,
+							   IMG_UINT32 ui32ClassNameSize,
+							   const IMG_CHAR *puiClassName)
+{
+	PVRSRV_ERROR eError;
+	SYNC_RECORD_HANDLE pshRecordInt;
+	SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+	eError =
+		PVRSRVSyncRecordAddKM(
+					&pshRecordInt,
+					pshServerSyncPrimBlockInt,
+					ui32ui32FwBlockAddr,
+					ui32ui32SyncOffset,
+					bbServerSync,
+					ui32ClassNameSize,
+					puiClassName);
+
+	*phhRecord = pshRecordInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+							     IMG_HANDLE hDevNode,
+							     IMG_HANDLE *phSyncHandle,
+							     IMG_UINT32 *pui32SyncPrimVAddr,
+							     IMG_UINT32 ui32ClassNameSize,
+							     const IMG_CHAR *puiClassName)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hDevNodeInt;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	hDevNodeInt = (IMG_HANDLE) hDevNode;
+
+	eError =
+		PVRSRVServerSyncAllocKM(
+					hDevNodeInt,
+					&psSyncHandleInt,
+					pui32SyncPrimVAddr,
+					ui32ClassNameSize,
+					puiClassName);
+
+	*phSyncHandle = psSyncHandleInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSyncHandle)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+		PVRSRVServerSyncFreeKM(
+					psSyncHandleInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+								 IMG_HANDLE hSyncHandle,
+								 IMG_BOOL bbUpdate,
+								 IMG_UINT32 *pui32FenceValue,
+								 IMG_UINT32 *pui32UpdateValue)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+	eError =
+		PVRSRVServerSyncQueueHWOpKM(
+					psSyncHandleInt,
+					bbUpdate,
+					pui32FenceValue,
+					pui32UpdateValue);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+								 IMG_UINT32 ui32SyncCount,
+								 IMG_HANDLE *phSyncHandle,
+								 IMG_UINT32 *pui32UID,
+								 IMG_UINT32 *pui32FWAddr,
+								 IMG_UINT32 *pui32CurrentOp,
+								 IMG_UINT32 *pui32NextOp)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_PRIMITIVE * *psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **) phSyncHandle;
+
+	eError =
+		PVRSRVServerSyncGetStatusKM(
+					ui32SyncCount,
+					psSyncHandleInt,
+					pui32UID,
+					pui32FWAddr,
+					pui32CurrentOp,
+					pui32NextOp);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32SyncBlockCount,
+							      IMG_HANDLE *phBlockList,
+							      IMG_UINT32 ui32ClientSyncCount,
+							      IMG_UINT32 *pui32SyncBlockIndex,
+							      IMG_UINT32 *pui32Index,
+							      IMG_UINT32 ui32ServerSyncCount,
+							      IMG_HANDLE *phServerSync,
+							      IMG_HANDLE *phServerCookie)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * *psBlockListInt;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncInt;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psBlockListInt = (SYNC_PRIMITIVE_BLOCK **) phBlockList;
+	psServerSyncInt = (SERVER_SYNC_PRIMITIVE **) phServerSync;
+
+	eError =
+		PVRSRVSyncPrimOpCreateKM(
+					ui32SyncBlockCount,
+					psBlockListInt,
+					ui32ClientSyncCount,
+					pui32SyncBlockIndex,
+					pui32Index,
+					ui32ServerSyncCount,
+					psServerSyncInt,
+					&psServerCookieInt);
+
+	*phServerCookie = psServerCookieInt;
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+							    IMG_HANDLE hServerCookie,
+							    IMG_UINT32 ui32ClientSyncCount,
+							    IMG_UINT32 *pui32Flags,
+							    IMG_UINT32 *pui32FenceValue,
+							    IMG_UINT32 *pui32UpdateValue,
+							    IMG_UINT32 ui32ServerSyncCount,
+							    IMG_UINT32 *pui32ServerFlags)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpTakeKM(
+					psServerCookieInt,
+					ui32ClientSyncCount,
+					pui32Flags,
+					pui32FenceValue,
+					pui32UpdateValue,
+					ui32ServerSyncCount,
+					pui32ServerFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+							     IMG_HANDLE hServerCookie,
+							     IMG_BOOL *pbReady)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpReadyKM(
+					psServerCookieInt,
+					pbReady);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpCompleteKM(
+					psServerCookieInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+							       IMG_HANDLE hServerCookie)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpDestroyKM(
+					psServerCookieInt);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSyncHandle,
+							   IMG_UINT32 ui32Offset)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpKM(
+					psSyncHandleInt,
+					ui32Offset);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+								IMG_HANDLE hSyncHandle,
+								IMG_UINT32 ui32Offset,
+								IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpValueKM(
+					psSyncHandleInt,
+					ui32Offset,
+					ui32Value);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_UINT32 ui32Value,
+							      IMG_UINT32 ui32Mask,
+							      PDUMP_POLL_OPERATOR eOperator,
+							      PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpPolKM(
+					psSyncHandleInt,
+					ui32Offset,
+					ui32Value,
+					ui32Mask,
+					eOperator,
+					uiPDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie,
+								PDUMP_POLL_OPERATOR eOperator,
+								PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	SERVER_OP_COOKIE * psServerCookieInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+	eError =
+		PVRSRVSyncPrimOpPDumpPolKM(
+					psServerCookieInt,
+					eOperator,
+					uiPDumpFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							      IMG_DEVMEM_SIZE_T uiPacketSize,
+							      IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+	PVR_UNREFERENCED_PARAMETER(hBridge);
+
+	psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+	eError =
+		PVRSRVSyncPrimPDumpCBPKM(
+					psSyncHandleInt,
+					ui32Offset,
+					uiWriteOffset,
+					uiPacketSize,
+					uiBufferSize);
+
+	return eError;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/dsync_bridge/client_sync_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/dsync_bridge/client_sync_bridge.h
new file mode 100644
index 0000000..e80501c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/dsync_bridge/client_sync_bridge.h
@@ -0,0 +1,161 @@
+/*************************************************************************/ /*!
+@Title          Direct client bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								     IMG_HANDLE hDevNode,
+								     IMG_HANDLE *phSyncHandle,
+								     IMG_UINT32 *pui32SyncPrimVAddr,
+								     IMG_UINT32 *pui32SyncPrimBlockSize,
+								     DEVMEM_SERVER_EXPORTCOOKIE *phExportCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+								    IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+							 IMG_HANDLE hSyncHandle,
+							 IMG_UINT32 ui32Index,
+							 IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+							       IMG_HANDLE hSyncHandle,
+							       IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+								      IMG_HANDLE hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+							   IMG_HANDLE *phhRecord,
+							   IMG_HANDLE hhServerSyncPrimBlock,
+							   IMG_UINT32 ui32ui32FwBlockAddr,
+							   IMG_UINT32 ui32ui32SyncOffset,
+							   IMG_BOOL bbServerSync,
+							   IMG_UINT32 ui32ClassNameSize,
+							   const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+							     IMG_HANDLE hDevNode,
+							     IMG_HANDLE *phSyncHandle,
+							     IMG_UINT32 *pui32SyncPrimVAddr,
+							     IMG_UINT32 ui32ClassNameSize,
+							     const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+							    IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+								 IMG_HANDLE hSyncHandle,
+								 IMG_BOOL bbUpdate,
+								 IMG_UINT32 *pui32FenceValue,
+								 IMG_UINT32 *pui32UpdateValue);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+								 IMG_UINT32 ui32SyncCount,
+								 IMG_HANDLE *phSyncHandle,
+								 IMG_UINT32 *pui32UID,
+								 IMG_UINT32 *pui32FWAddr,
+								 IMG_UINT32 *pui32CurrentOp,
+								 IMG_UINT32 *pui32NextOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+							      IMG_UINT32 ui32SyncBlockCount,
+							      IMG_HANDLE *phBlockList,
+							      IMG_UINT32 ui32ClientSyncCount,
+							      IMG_UINT32 *pui32SyncBlockIndex,
+							      IMG_UINT32 *pui32Index,
+							      IMG_UINT32 ui32ServerSyncCount,
+							      IMG_HANDLE *phServerSync,
+							      IMG_HANDLE *phServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+							    IMG_HANDLE hServerCookie,
+							    IMG_UINT32 ui32ClientSyncCount,
+							    IMG_UINT32 *pui32Flags,
+							    IMG_UINT32 *pui32FenceValue,
+							    IMG_UINT32 *pui32UpdateValue,
+							    IMG_UINT32 ui32ServerSyncCount,
+							    IMG_UINT32 *pui32ServerFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+							     IMG_HANDLE hServerCookie,
+							     IMG_BOOL *pbReady);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+							       IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSyncHandle,
+							   IMG_UINT32 ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+								IMG_HANDLE hSyncHandle,
+								IMG_UINT32 ui32Offset,
+								IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_UINT32 ui32Value,
+							      IMG_UINT32 ui32Mask,
+							      PDUMP_POLL_OPERATOR eOperator,
+							      PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+								IMG_HANDLE hServerCookie,
+								PDUMP_POLL_OPERATOR eOperator,
+								PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+							      IMG_HANDLE hSyncHandle,
+							      IMG_UINT32 ui32Offset,
+							      IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							      IMG_DEVMEM_SIZE_T uiPacketSize,
+							      IMG_DEVMEM_SIZE_T uiBufferSize);
+
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/mm_bridge/common_mm_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/mm_bridge/common_mm_bridge.h
new file mode 100644
index 0000000..0f35c46
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/mm_bridge/common_mm_bridge.h
@@ -0,0 +1,530 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID			PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKESERVEREXPORTCLIENTEXPORT			PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKESERVEREXPORTCLIENTEXPORT			PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE			PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY			PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE			PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY			PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE			PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE			PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR			PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMSLCFLUSHINVALREQUEST			PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID			PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT			PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT			PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME			PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS			PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_CMD_LAST			(PVRSRV_BRIDGE_MM_CMD_FIRST+22)
+
+
+/*******************************************
+            PMRExportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+	IMG_UINT64 ui64Size;
+	IMG_UINT32 ui32Log2Contig;
+	IMG_UINT64 ui64Password;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+/*******************************************
+            PMRUnexportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+/*******************************************
+            PMRGetUID          
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRGETUID;
+
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+	IMG_UINT64 ui64UID;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+/*******************************************
+            PMRMakeServerExportClientExport          
+ *******************************************/
+
+/* Bridge in structure for PMRMakeServerExportClientExport */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKESERVEREXPORTCLIENTEXPORT_TAG
+{
+	DEVMEM_SERVER_EXPORTCOOKIE hPMRServerExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRMAKESERVEREXPORTCLIENTEXPORT;
+
+
+/* Bridge out structure for PMRMakeServerExportClientExport */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKESERVEREXPORTCLIENTEXPORT_TAG
+{
+	IMG_HANDLE hPMRExportOut;
+	IMG_UINT64 ui64Size;
+	IMG_UINT32 ui32Log2Contig;
+	IMG_UINT64 ui64Password;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRMAKESERVEREXPORTCLIENTEXPORT;
+
+/*******************************************
+            PMRUnmakeServerExportClientExport          
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeServerExportClientExport */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKESERVEREXPORTCLIENTEXPORT_TAG
+{
+	IMG_HANDLE hPMRExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKESERVEREXPORTCLIENTEXPORT;
+
+
+/* Bridge out structure for PMRUnmakeServerExportClientExport */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKESERVEREXPORTCLIENTEXPORT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKESERVEREXPORTCLIENTEXPORT;
+
+/*******************************************
+            PMRImportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMRExport;
+	IMG_UINT64 ui64uiPassword;
+	IMG_UINT64 ui64uiSize;
+	IMG_UINT32 ui32uiLog2Contig;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+/*******************************************
+            DevmemIntCtxCreate          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+	IMG_HANDLE hDeviceNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+	IMG_HANDLE hDevMemServerContext;
+	IMG_HANDLE hPrivData;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+/*******************************************
+            DevmemIntCtxDestroy          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+	IMG_HANDLE hDevmemServerContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+/*******************************************
+            DevmemIntHeapCreate          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_DEV_VIRTADDR sHeapBaseAddr;
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_UINT32 ui32Log2DataPageSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+	IMG_HANDLE hDevmemHeapPtr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+/*******************************************
+            DevmemIntHeapDestroy          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+	IMG_HANDLE hDevmemHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+/*******************************************
+            DevmemIntMapPMR          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+	IMG_HANDLE hDevmemServerHeap;
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+	IMG_HANDLE hMapping;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+/*******************************************
+            DevmemIntUnmapPMR          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+	IMG_HANDLE hMapping;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+/*******************************************
+            DevmemIntReserveRange          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+	IMG_HANDLE hDevmemServerHeap;
+	IMG_DEV_VIRTADDR sAddress;
+	IMG_DEVMEM_SIZE_T uiLength;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+	IMG_HANDLE hReservation;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+/*******************************************
+            DevmemIntUnreserveRange          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+	IMG_HANDLE hReservation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+/*******************************************
+            PhysmemNewRamBackedPMR          
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_SIZE_T uiChunkSize;
+	IMG_UINT32 ui32NumPhysChunks;
+	IMG_UINT32 ui32NumVirtChunks;
+	IMG_BOOL * pbMappingTable;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+	IMG_HANDLE hPMRPtr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+/*******************************************
+            PMRLocalImportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+	IMG_HANDLE hExtHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+/*******************************************
+            PMRUnrefPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+/*******************************************
+            DevmemSLCFlushInvalRequest          
+ *******************************************/
+
+/* Bridge in structure for DevmemSLCFlushInvalRequest */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMSLCFLUSHINVALREQUEST_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_HANDLE hPmr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMSLCFLUSHINVALREQUEST;
+
+
+/* Bridge out structure for DevmemSLCFlushInvalRequest */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMSLCFLUSHINVALREQUEST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMSLCFLUSHINVALREQUEST;
+
+/*******************************************
+            DevmemIsVDevAddrValid          
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+	IMG_HANDLE hDevmemCtx;
+	IMG_DEV_VIRTADDR sAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+/*******************************************
+            HeapCfgHeapConfigCount          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+	IMG_HANDLE hDeviceNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+	IMG_UINT32 ui32NumHeapConfigs;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+/*******************************************
+            HeapCfgHeapCount          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_UINT32 ui32HeapConfigIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+	IMG_UINT32 ui32NumHeaps;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+/*******************************************
+            HeapCfgHeapConfigName          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_UINT32 ui32HeapConfigIndex;
+	IMG_UINT32 ui32HeapConfigNameBufSz;
+	/* Output pointer puiHeapConfigName is also an implied input */
+	IMG_CHAR * puiHeapConfigName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+	IMG_CHAR * puiHeapConfigName;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+/*******************************************
+            HeapCfgHeapDetails          
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_UINT32 ui32HeapConfigIndex;
+	IMG_UINT32 ui32HeapIndex;
+	IMG_UINT32 ui32HeapNameBufSz;
+	/* Output pointer puiHeapNameOut is also an implied input */
+	IMG_CHAR * puiHeapNameOut;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+	IMG_CHAR * puiHeapNameOut;
+	IMG_DEV_VIRTADDR sDevVAddrBase;
+	IMG_DEVMEM_SIZE_T uiHeapLength;
+	IMG_UINT32 ui32Log2DataPageSizeOut;
+	IMG_UINT32 ui32Log2ImportAlignmentOut;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+#endif /* COMMON_MM_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/mm_bridge/server_mm_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/mm_bridge/server_mm_bridge.c
new file mode 100644
index 0000000..03e3e33
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/mm_bridge/server_mm_bridge.c
@@ -0,0 +1,1699 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for mm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for mm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+static PVRSRV_ERROR ReleasePMRExport(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+static PVRSRV_ERROR ReleasePMRExportOut(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+	PMR_EXPORT * psPMRExportInt = IMG_NULL;
+	IMG_HANDLE hPMRExportInt = IMG_NULL;
+
+
+
+
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRExportPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRExportPMRIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRExportPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto PMRExportPMR_exit;
+					}
+				}
+
+
+	psPMRExportPMROUT->eError =
+		PMRExportPMR(
+					psPMRInt,
+					&psPMRExportInt,
+					&psPMRExportPMROUT->ui64Size,
+					&psPMRExportPMROUT->ui32Log2Contig,
+					&psPMRExportPMROUT->ui64Password);
+	/* Exit early if bridged call fails */
+	if(psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto PMRExportPMR_exit;
+	}
+	PMRUnlock();
+
+
+	/*
+	 * For cases where we need a cross process handle we actually allocate two.
+	 * 
+	 * The first one is a connection specific handle and it gets given the real
+	 * release function. This handle does *NOT* get returned to the caller. It's
+	 * purpose is to release any leaked resources when we either have a bad or
+	 * abnormally terminated client. If we didn't do this then the resource
+	 * wouldn't be freed until driver unload. If the resource is freed normally,
+	 * this handle can be looked up via the cross process handle and then
+	 * released accordingly.
+	 * 
+	 * The second one is a cross process handle and it gets given a noop release
+	 * function. This handle does get returned to the caller.
+	 */
+	psPMRExportPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&hPMRExportInt,
+							(IMG_VOID *) psPMRExportInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_SHARED
+							,(PFN_HANDLE_RELEASE)&PMRUnexportPMR);
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRExportPMR_exit;
+	}
+
+	psPMRExportPMROUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
+							&psPMRExportPMROUT->hPMRExport,
+							(IMG_VOID *) psPMRExportInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+							(PFN_HANDLE_RELEASE)&ReleasePMRExport);
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRExportPMR_exit;
+	}
+
+
+
+PMRExportPMR_exit:
+	if (psPMRExportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRExportPMROUT->hPMRExport)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+						(IMG_HANDLE) psPMRExportPMROUT->hPMRExport,
+						PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+		}
+
+		if (hPMRExportInt)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						hPMRExportInt,
+						PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psPMRExportInt = IMG_NULL;
+		}
+
+		if (psPMRExportInt)
+		{
+			PMRUnexportPMR(psPMRExportInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR_EXPORT * psPMRExportInt = IMG_NULL;
+	IMG_HANDLE hPMRExportInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+	PMRLock();
+
+
+
+	psPMRUnexportPMROUT->eError =
+		PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+					(IMG_VOID **) &psPMRExportInt,
+					(IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+	/*
+	 * Find the connection specific handle that represents the same data
+	 * as the cross process handle as releasing it will actually call the
+	 * data's real release function (see the function where the cross
+	 * process handle is allocated for more details).
+	 */
+	psPMRUnexportPMROUT->eError =
+		PVRSRVFindHandle(psConnection->psHandleBase,
+					&hPMRExportInt,
+					psPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+	psPMRUnexportPMROUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					hPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) || (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+
+	psPMRUnexportPMROUT->eError =
+		PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+					(IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) && (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		PMRUnlock();
+		goto PMRUnexportPMR_exit;
+	}
+
+	PMRUnlock();
+
+
+PMRUnexportPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN,
+					  PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRGetUIDOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRGetUIDIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRGetUIDOUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto PMRGetUID_exit;
+					}
+				}
+
+
+	psPMRGetUIDOUT->eError =
+		PMRGetUID(
+					psPMRInt,
+					&psPMRGetUIDOUT->ui64UID);
+	PMRUnlock();
+
+
+
+
+PMRGetUID_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRMakeServerExportClientExport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRMAKESERVEREXPORTCLIENTEXPORT *psPMRMakeServerExportClientExportIN,
+					  PVRSRV_BRIDGE_OUT_PMRMAKESERVEREXPORTCLIENTEXPORT *psPMRMakeServerExportClientExportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEM_EXPORTCOOKIE * psPMRServerExportInt = IMG_NULL;
+	PMR_EXPORT * psPMRExportOutInt = IMG_NULL;
+	IMG_HANDLE hPMRExportOutInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRMakeServerExportClientExportOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRServerExportInt,
+											psPMRMakeServerExportClientExportIN->hPMRServerExport,
+											PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+					if(psPMRMakeServerExportClientExportOUT->eError != PVRSRV_OK)
+					{
+						goto PMRMakeServerExportClientExport_exit;
+					}
+				}
+
+
+	psPMRMakeServerExportClientExportOUT->eError =
+		PMRMakeServerExportClientExport(
+					psPMRServerExportInt,
+					&psPMRExportOutInt,
+					&psPMRMakeServerExportClientExportOUT->ui64Size,
+					&psPMRMakeServerExportClientExportOUT->ui32Log2Contig,
+					&psPMRMakeServerExportClientExportOUT->ui64Password);
+	/* Exit early if bridged call fails */
+	if(psPMRMakeServerExportClientExportOUT->eError != PVRSRV_OK)
+	{
+		goto PMRMakeServerExportClientExport_exit;
+	}
+
+
+	/*
+	 * For cases where we need a cross process handle we actually allocate two.
+	 * 
+	 * The first one is a connection specific handle and it gets given the real
+	 * release function. This handle does *NOT* get returned to the caller. It's
+	 * purpose is to release any leaked resources when we either have a bad or
+	 * abnormally terminated client. If we didn't do this then the resource
+	 * wouldn't be freed until driver unload. If the resource is freed normally,
+	 * this handle can be looked up via the cross process handle and then
+	 * released accordingly.
+	 * 
+	 * The second one is a cross process handle and it gets given a noop release
+	 * function. This handle does get returned to the caller.
+	 */
+	psPMRMakeServerExportClientExportOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&hPMRExportOutInt,
+							(IMG_VOID *) psPMRExportOutInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_SHARED
+							,(PFN_HANDLE_RELEASE)&PMRUnmakeServerExportClientExport);
+	if (psPMRMakeServerExportClientExportOUT->eError != PVRSRV_OK)
+	{
+		goto PMRMakeServerExportClientExport_exit;
+	}
+
+	psPMRMakeServerExportClientExportOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
+							&psPMRMakeServerExportClientExportOUT->hPMRExportOut,
+							(IMG_VOID *) psPMRExportOutInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+							(PFN_HANDLE_RELEASE)&ReleasePMRExportOut);
+	if (psPMRMakeServerExportClientExportOUT->eError != PVRSRV_OK)
+	{
+		goto PMRMakeServerExportClientExport_exit;
+	}
+
+
+
+PMRMakeServerExportClientExport_exit:
+	if (psPMRMakeServerExportClientExportOUT->eError != PVRSRV_OK)
+	{
+		if (psPMRMakeServerExportClientExportOUT->hPMRExportOut)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+						(IMG_HANDLE) psPMRMakeServerExportClientExportOUT->hPMRExportOut,
+						PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+		}
+
+		if (hPMRExportOutInt)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						hPMRExportOutInt,
+						PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psPMRExportOutInt = IMG_NULL;
+		}
+
+		if (psPMRExportOutInt)
+		{
+			PMRUnmakeServerExportClientExport(psPMRExportOutInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeServerExportClientExport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNMAKESERVEREXPORTCLIENTEXPORT *psPMRUnmakeServerExportClientExportIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNMAKESERVEREXPORTCLIENTEXPORT *psPMRUnmakeServerExportClientExportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR_EXPORT * psPMRExportInt = IMG_NULL;
+	IMG_HANDLE hPMRExportInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+
+	psPMRUnmakeServerExportClientExportOUT->eError =
+		PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+					(IMG_VOID **) &psPMRExportInt,
+					(IMG_HANDLE) psPMRUnmakeServerExportClientExportIN->hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	PVR_ASSERT(psPMRUnmakeServerExportClientExportOUT->eError == PVRSRV_OK);
+
+	/*
+	 * Find the connection specific handle that represents the same data
+	 * as the cross process handle as releasing it will actually call the
+	 * data's real release function (see the function where the cross
+	 * process handle is allocated for more details).
+	 */
+	psPMRUnmakeServerExportClientExportOUT->eError =
+		PVRSRVFindHandle(psConnection->psHandleBase,
+					&hPMRExportInt,
+					psPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	PVR_ASSERT(psPMRUnmakeServerExportClientExportOUT->eError == PVRSRV_OK);
+
+	psPMRUnmakeServerExportClientExportOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					hPMRExportInt,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	PVR_ASSERT((psPMRUnmakeServerExportClientExportOUT->eError == PVRSRV_OK) || (psPMRUnmakeServerExportClientExportOUT->eError == PVRSRV_ERROR_RETRY));
+
+	psPMRUnmakeServerExportClientExportOUT->eError =
+		PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
+					(IMG_HANDLE) psPMRUnmakeServerExportClientExportIN->hPMRExport,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+	if ((psPMRUnmakeServerExportClientExportOUT->eError != PVRSRV_OK) && (psPMRUnmakeServerExportClientExportOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto PMRUnmakeServerExportClientExport_exit;
+	}
+
+
+
+PMRUnmakeServerExportClientExport_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR_EXPORT * psPMRExportInt = IMG_NULL;
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+#if defined (SUPPORT_AUTH)
+	psPMRImportPMROUT->eError = OSCheckAuthentication(psConnection, 1);
+	if (psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRImportPMR_exit;
+	}
+#endif
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRImportPMROUT->eError =
+						PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
+											(IMG_VOID **) &psPMRExportInt,
+											psPMRImportPMRIN->hPMRExport,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+					if(psPMRImportPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto PMRImportPMR_exit;
+					}
+				}
+
+
+	psPMRImportPMROUT->eError =
+		PMRImportPMR(
+					psPMRExportInt,
+					psPMRImportPMRIN->ui64uiPassword,
+					psPMRImportPMRIN->ui64uiSize,
+					psPMRImportPMRIN->ui32uiLog2Contig,
+					&psPMRInt);
+	/* Exit early if bridged call fails */
+	if(psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto PMRImportPMR_exit;
+	}
+	PMRUnlock();
+
+
+	psPMRImportPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psPMRImportPMROUT->hPMR,
+							(IMG_VOID *) psPMRInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRImportPMR_exit;
+	}
+
+
+
+
+PMRImportPMR_exit:
+	if (psPMRImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			PMRUnrefPMR(psPMRInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+	DEVMEMINT_CTX * psDevMemServerContextInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+
+
+	psDevmemIntCtxCreateOUT->hDevMemServerContext = IMG_NULL;
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntCtxCreateOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psDevmemIntCtxCreateIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIntCtxCreate_exit;
+					}
+				}
+
+
+	psDevmemIntCtxCreateOUT->eError =
+		DevmemIntCtxCreate(
+					hDeviceNodeInt,
+					&psDevMemServerContextInt,
+					&hPrivDataInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxCreate_exit;
+	}
+
+
+	psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDevmemIntCtxCreateOUT->hDevMemServerContext,
+							(IMG_VOID *) psDevMemServerContextInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxCreate_exit;
+	}
+
+
+	psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandle(psConnection->psHandleBase,
+							&psDevmemIntCtxCreateOUT->hPrivData,
+							(IMG_VOID *) hPrivDataInt,
+							PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,psDevmemIntCtxCreateOUT->hDevMemServerContext);
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntCtxCreate_exit;
+	}
+
+
+
+
+DevmemIntCtxCreate_exit:
+	if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						(IMG_HANDLE) psDevmemIntCtxCreateOUT->hDevMemServerContext,
+						PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psDevMemServerContextInt = IMG_NULL;
+		}
+
+
+		if (psDevMemServerContextInt)
+		{
+			DevmemIntCtxDestroy(psDevMemServerContextInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDevmemIntCtxDestroyOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntCtxDestroyIN->hDevmemServerContext,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+	if ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DevmemIntCtxDestroy_exit;
+	}
+
+
+
+DevmemIntCtxDestroy_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX * psDevmemCtxInt = IMG_NULL;
+	DEVMEMINT_HEAP * psDevmemHeapPtrInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntHeapCreateOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevmemCtxInt,
+											psDevmemIntHeapCreateIN->hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIntHeapCreate_exit;
+					}
+				}
+
+
+	psDevmemIntHeapCreateOUT->eError =
+		DevmemIntHeapCreate(
+					psDevmemCtxInt,
+					psDevmemIntHeapCreateIN->sHeapBaseAddr,
+					psDevmemIntHeapCreateIN->uiHeapLength,
+					psDevmemIntHeapCreateIN->ui32Log2DataPageSize,
+					&psDevmemHeapPtrInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntHeapCreate_exit;
+	}
+
+
+	psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDevmemIntHeapCreateOUT->hDevmemHeapPtr,
+							(IMG_VOID *) psDevmemHeapPtrInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntHeapDestroy);
+	if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntHeapCreate_exit;
+	}
+
+
+
+
+DevmemIntHeapCreate_exit:
+	if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psDevmemHeapPtrInt)
+		{
+			DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDevmemIntHeapDestroyOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+	if ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DevmemIntHeapDestroy_exit;
+	}
+
+
+
+DevmemIntHeapDestroy_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_HEAP * psDevmemServerHeapInt = IMG_NULL;
+	DEVMEMINT_RESERVATION * psReservationInt = IMG_NULL;
+	PMR * psPMRInt = IMG_NULL;
+	DEVMEMINT_MAPPING * psMappingInt = IMG_NULL;
+
+
+
+
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevmemServerHeapInt,
+											psDevmemIntMapPMRIN->hDevmemServerHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto DevmemIntMapPMR_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psReservationInt,
+											psDevmemIntMapPMRIN->hReservation,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+					if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto DevmemIntMapPMR_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntMapPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psDevmemIntMapPMRIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto DevmemIntMapPMR_exit;
+					}
+				}
+
+
+	psDevmemIntMapPMROUT->eError =
+		DevmemIntMapPMR(
+					psDevmemServerHeapInt,
+					psReservationInt,
+					psPMRInt,
+					psDevmemIntMapPMRIN->uiMapFlags,
+					&psMappingInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto DevmemIntMapPMR_exit;
+	}
+	PMRUnlock();
+
+
+	psDevmemIntMapPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDevmemIntMapPMROUT->hMapping,
+							(IMG_VOID *) psMappingInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntUnmapPMR);
+	if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntMapPMR_exit;
+	}
+
+
+
+
+DevmemIntMapPMR_exit:
+	if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+	{
+		if (psMappingInt)
+		{
+			DevmemIntUnmapPMR(psMappingInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+	PMRLock();
+
+
+
+
+	psDevmemIntUnmapPMROUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+	if ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) && (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		PMRUnlock();
+		goto DevmemIntUnmapPMR_exit;
+	}
+
+	PMRUnlock();
+
+
+DevmemIntUnmapPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_HEAP * psDevmemServerHeapInt = IMG_NULL;
+	DEVMEMINT_RESERVATION * psReservationInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntReserveRangeOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevmemServerHeapInt,
+											psDevmemIntReserveRangeIN->hDevmemServerHeap,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+					if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIntReserveRange_exit;
+					}
+				}
+
+
+	psDevmemIntReserveRangeOUT->eError =
+		DevmemIntReserveRange(
+					psDevmemServerHeapInt,
+					psDevmemIntReserveRangeIN->sAddress,
+					psDevmemIntReserveRangeIN->uiLength,
+					&psReservationInt);
+	/* Exit early if bridged call fails */
+	if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntReserveRange_exit;
+	}
+
+
+	psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psDevmemIntReserveRangeOUT->hReservation,
+							(IMG_VOID *) psReservationInt,
+							PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&DevmemIntUnreserveRange);
+	if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		goto DevmemIntReserveRange_exit;
+	}
+
+
+
+
+DevmemIntReserveRange_exit:
+	if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+	{
+		if (psReservationInt)
+		{
+			DevmemIntUnreserveRange(psReservationInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psDevmemIntUnreserveRangeOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psDevmemIntUnreserveRangeIN->hReservation,
+					PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+	if ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto DevmemIntUnreserveRange_exit;
+	}
+
+
+
+DevmemIntUnreserveRange_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN,
+					  PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+	IMG_BOOL *bMappingTableInt = IMG_NULL;
+	PMR * psPMRPtrInt = IMG_NULL;
+
+
+
+
+	if (psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks != 0)
+	{
+		bMappingTableInt = OSAllocMem(psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks * sizeof(IMG_BOOL));
+		if (!bMappingTableInt)
+		{
+			psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto PhysmemNewRamBackedPMR_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPhysmemNewRamBackedPMRIN->pbMappingTable, psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks * sizeof(IMG_BOOL))
+				|| (OSCopyFromUser(NULL, bMappingTableInt, psPhysmemNewRamBackedPMRIN->pbMappingTable,
+				psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks * sizeof(IMG_BOOL)) != PVRSRV_OK) )
+			{
+				psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto PhysmemNewRamBackedPMR_exit;
+			}
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psPhysmemNewRamBackedPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psPhysmemNewRamBackedPMRIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto PhysmemNewRamBackedPMR_exit;
+					}
+				}
+
+
+	psPhysmemNewRamBackedPMROUT->eError =
+		PhysmemNewRamBackedPMR(
+					hDeviceNodeInt,
+					psPhysmemNewRamBackedPMRIN->uiSize,
+					psPhysmemNewRamBackedPMRIN->uiChunkSize,
+					psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
+					psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
+					bMappingTableInt,
+					psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+					psPhysmemNewRamBackedPMRIN->uiFlags,
+					&psPMRPtrInt);
+	/* Exit early if bridged call fails */
+	if(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+	PMRUnlock();
+
+
+	psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psPhysmemNewRamBackedPMROUT->hPMRPtr,
+							(IMG_VOID *) psPMRPtrInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		goto PhysmemNewRamBackedPMR_exit;
+	}
+
+
+
+
+PhysmemNewRamBackedPMR_exit:
+	if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRPtrInt)
+		{
+			PMRUnrefPMR(psPMRPtrInt);
+		}
+	}
+
+	if (bMappingTableInt)
+		OSFreeMem(bMappingTableInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psExtHandleInt = IMG_NULL;
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRLocalImportPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psExtHandleInt,
+											psPMRLocalImportPMRIN->hExtHandle,
+											PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+					if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto PMRLocalImportPMR_exit;
+					}
+				}
+
+
+	psPMRLocalImportPMROUT->eError =
+		PMRLocalImportPMR(
+					psExtHandleInt,
+					&psPMRInt,
+					&psPMRLocalImportPMROUT->uiSize,
+					&psPMRLocalImportPMROUT->sAlign);
+	/* Exit early if bridged call fails */
+	if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto PMRLocalImportPMR_exit;
+	}
+	PMRUnlock();
+
+
+	psPMRLocalImportPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psPMRLocalImportPMROUT->hPMR,
+							(IMG_VOID *) psPMRInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRLocalImportPMR_exit;
+	}
+
+
+
+
+PMRLocalImportPMR_exit:
+	if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			PMRUnrefPMR(psPMRInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+	PMRLock();
+
+
+
+
+	psPMRUnrefPMROUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if ((psPMRUnrefPMROUT->eError != PVRSRV_OK) && (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		PMRUnlock();
+		goto PMRUnrefPMR_exit;
+	}
+
+	PMRUnlock();
+
+
+PMRUnrefPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemSLCFlushInvalRequest(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMSLCFLUSHINVALREQUEST *psDevmemSLCFlushInvalRequestIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMSLCFLUSHINVALREQUEST *psDevmemSLCFlushInvalRequestOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+	PMR * psPmrInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemSLCFlushInvalRequestOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psDevmemSLCFlushInvalRequestIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psDevmemSLCFlushInvalRequestOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemSLCFlushInvalRequest_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemSLCFlushInvalRequestOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPmrInt,
+											psDevmemSLCFlushInvalRequestIN->hPmr,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psDevmemSLCFlushInvalRequestOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemSLCFlushInvalRequest_exit;
+					}
+				}
+
+
+	psDevmemSLCFlushInvalRequestOUT->eError =
+		DevmemSLCFlushInvalRequest(
+					hDeviceNodeInt,
+					psPmrInt);
+
+
+
+
+DevmemSLCFlushInvalRequest_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX * psDevmemCtxInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIsVDevAddrValidOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevmemCtxInt,
+											psDevmemIsVDevAddrValidIN->hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					if(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIsVDevAddrValid_exit;
+					}
+				}
+
+
+	psDevmemIsVDevAddrValidOUT->eError =
+		DevmemIntIsVDevAddrValid(
+					psDevmemCtxInt,
+					psDevmemIsVDevAddrValidIN->sAddress);
+
+
+
+
+DevmemIsVDevAddrValid_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psHeapCfgHeapConfigCountOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psHeapCfgHeapConfigCountIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psHeapCfgHeapConfigCountOUT->eError != PVRSRV_OK)
+					{
+						goto HeapCfgHeapConfigCount_exit;
+					}
+				}
+
+
+	psHeapCfgHeapConfigCountOUT->eError =
+		HeapCfgHeapConfigCount(
+					hDeviceNodeInt,
+					&psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs);
+
+
+
+
+HeapCfgHeapConfigCount_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psHeapCfgHeapCountOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psHeapCfgHeapCountIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psHeapCfgHeapCountOUT->eError != PVRSRV_OK)
+					{
+						goto HeapCfgHeapCount_exit;
+					}
+				}
+
+
+	psHeapCfgHeapCountOUT->eError =
+		HeapCfgHeapCount(
+					hDeviceNodeInt,
+					psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+					&psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+
+
+
+HeapCfgHeapCount_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+	IMG_CHAR *puiHeapConfigNameInt = IMG_NULL;
+
+
+	psHeapCfgHeapConfigNameOUT->puiHeapConfigName = psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+
+	if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+	{
+		puiHeapConfigNameInt = OSAllocMem(psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR));
+		if (!puiHeapConfigNameInt)
+		{
+			psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto HeapCfgHeapConfigName_exit;
+		}
+	}
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psHeapCfgHeapConfigNameOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psHeapCfgHeapConfigNameIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psHeapCfgHeapConfigNameOUT->eError != PVRSRV_OK)
+					{
+						goto HeapCfgHeapConfigName_exit;
+					}
+				}
+
+
+	psHeapCfgHeapConfigNameOUT->eError =
+		HeapCfgHeapConfigName(
+					hDeviceNodeInt,
+					psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex,
+					psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz,
+					puiHeapConfigNameInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psHeapCfgHeapConfigNameOUT->puiHeapConfigName, (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) 
+		|| (OSCopyToUser(NULL, psHeapCfgHeapConfigNameOUT->puiHeapConfigName, puiHeapConfigNameInt,
+		(psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK) )
+	{
+		psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto HeapCfgHeapConfigName_exit;
+	}
+
+
+HeapCfgHeapConfigName_exit:
+	if (puiHeapConfigNameInt)
+		OSFreeMem(puiHeapConfigNameInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN,
+					  PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+	IMG_CHAR *puiHeapNameOutInt = IMG_NULL;
+
+
+	psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+
+	if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+	{
+		puiHeapNameOutInt = OSAllocMem(psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR));
+		if (!puiHeapNameOutInt)
+		{
+			psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto HeapCfgHeapDetails_exit;
+		}
+	}
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psHeapCfgHeapDetailsOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psHeapCfgHeapDetailsIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)
+					{
+						goto HeapCfgHeapDetails_exit;
+					}
+				}
+
+
+	psHeapCfgHeapDetailsOUT->eError =
+		HeapCfgHeapDetails(
+					hDeviceNodeInt,
+					psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+					psHeapCfgHeapDetailsIN->ui32HeapIndex,
+					psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+					puiHeapNameOutInt,
+					&psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+					&psHeapCfgHeapDetailsOUT->uiHeapLength,
+					&psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut,
+					&psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psHeapCfgHeapDetailsOUT->puiHeapNameOut, (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) 
+		|| (OSCopyToUser(NULL, psHeapCfgHeapDetailsOUT->puiHeapNameOut, puiHeapNameOutInt,
+		(psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK) )
+	{
+		psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto HeapCfgHeapDetails_exit;
+	}
+
+
+HeapCfgHeapDetails_exit:
+	if (puiHeapNameOutInt)
+		OSFreeMem(puiHeapNameOutInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitMMBridge(IMG_VOID);
+PVRSRV_ERROR DeinitMMBridge(IMG_VOID);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, PVRSRVBridgePMRExportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, PVRSRVBridgePMRUnexportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKESERVEREXPORTCLIENTEXPORT, PVRSRVBridgePMRMakeServerExportClientExport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKESERVEREXPORTCLIENTEXPORT, PVRSRVBridgePMRUnmakeServerExportClientExport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, PVRSRVBridgePMRImportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, PVRSRVBridgeDevmemIntCtxCreate,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, PVRSRVBridgeDevmemIntCtxDestroy,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, PVRSRVBridgeDevmemIntHeapCreate,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, PVRSRVBridgeDevmemIntHeapDestroy,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, PVRSRVBridgeDevmemIntMapPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, PVRSRVBridgeDevmemIntUnmapPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, PVRSRVBridgeDevmemIntReserveRange,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, PVRSRVBridgeDevmemIntUnreserveRange,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, PVRSRVBridgePhysmemNewRamBackedPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, PVRSRVBridgePMRLocalImportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, PVRSRVBridgePMRUnrefPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMSLCFLUSHINVALREQUEST, PVRSRVBridgeDevmemSLCFlushInvalRequest,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, PVRSRVBridgeDevmemIsVDevAddrValid,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, PVRSRVBridgeHeapCfgHeapConfigCount,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, PVRSRVBridgeHeapCfgHeapCount,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, PVRSRVBridgeHeapCfgHeapConfigName,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, PVRSRVBridgeHeapCfgHeapDetails,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+PVRSRV_ERROR DeinitMMBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdump_bridge/common_pdump_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdump_bridge/common_pdump_bridge.h
new file mode 100644
index 0000000..0c86fc5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdump_bridge/common_pdump_bridge.h
@@ -0,0 +1,107 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMP_BRIDGE_H
+#define COMMON_PDUMP_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP			PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT			PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST			(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
+
+
+/*******************************************
+            DevmemPDumpBitmap          
+ *******************************************/
+
+/* Bridge in structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_CHAR * puiFileName;
+	IMG_UINT32 ui32FileOffset;
+	IMG_UINT32 ui32Width;
+	IMG_UINT32 ui32Height;
+	IMG_UINT32 ui32StrideInBytes;
+	IMG_DEV_VIRTADDR sDevBaseAddr;
+	IMG_HANDLE hDevmemCtx;
+	IMG_UINT32 ui32Size;
+	PDUMP_PIXEL_FORMAT ePixelFormat;
+	IMG_UINT32 ui32AddrMode;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP;
+
+
+/* Bridge out structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP;
+
+/*******************************************
+            PVRSRVPDumpComment          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG
+{
+	IMG_CHAR * puiComment;
+	IMG_UINT32 ui32Flags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT;
+
+
+/* Bridge out structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT;
+
+#endif /* COMMON_PDUMP_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdump_bridge/server_pdump_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdump_bridge/server_pdump_bridge.c
new file mode 100644
index 0000000..ee5e089
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdump_bridge/server_pdump_bridge.c
@@ -0,0 +1,250 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+#include "common_pdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeDevmemPDumpBitmap(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+	IMG_CHAR *uiFileNameInt = IMG_NULL;
+	DEVMEMINT_CTX * psDevmemCtxInt = IMG_NULL;
+
+
+
+
+	
+	{
+		uiFileNameInt = OSAllocMem(PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR));
+		if (!uiFileNameInt)
+		{
+			psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DevmemPDumpBitmap_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDevmemPDumpBitmapIN->puiFileName, PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiFileNameInt, psDevmemPDumpBitmapIN->puiFileName,
+				PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DevmemPDumpBitmap_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemPDumpBitmapOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psDevmemPDumpBitmapIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemPDumpBitmap_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemPDumpBitmapOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevmemCtxInt,
+											psDevmemPDumpBitmapIN->hDevmemCtx,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					if(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemPDumpBitmap_exit;
+					}
+				}
+
+
+	psDevmemPDumpBitmapOUT->eError =
+		DevmemIntPDumpBitmap(
+					hDeviceNodeInt,
+					uiFileNameInt,
+					psDevmemPDumpBitmapIN->ui32FileOffset,
+					psDevmemPDumpBitmapIN->ui32Width,
+					psDevmemPDumpBitmapIN->ui32Height,
+					psDevmemPDumpBitmapIN->ui32StrideInBytes,
+					psDevmemPDumpBitmapIN->sDevBaseAddr,
+					psDevmemCtxInt,
+					psDevmemPDumpBitmapIN->ui32Size,
+					psDevmemPDumpBitmapIN->ePixelFormat,
+					psDevmemPDumpBitmapIN->ui32AddrMode,
+					psDevmemPDumpBitmapIN->ui32PDumpFlags);
+
+
+
+
+DevmemPDumpBitmap_exit:
+	if (uiFileNameInt)
+		OSFreeMem(uiFileNameInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiCommentInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+	
+	{
+		uiCommentInt = OSAllocMem(PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR));
+		if (!uiCommentInt)
+		{
+			psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto PVRSRVPDumpComment_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPVRSRVPDumpCommentIN->puiComment, PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiCommentInt, psPVRSRVPDumpCommentIN->puiComment,
+				PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto PVRSRVPDumpComment_exit;
+			}
+
+
+
+	psPVRSRVPDumpCommentOUT->eError =
+		PDumpCommentKM(
+					uiCommentInt,
+					psPVRSRVPDumpCommentIN->ui32Flags);
+
+
+
+
+PVRSRVPDumpComment_exit:
+	if (uiCommentInt)
+		OSFreeMem(uiCommentInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitPDUMPBridge(IMG_VOID);
+PVRSRV_ERROR DeinitPDUMPBridge(IMG_VOID);
+
+/*
+ * Register all PDUMP functions with services
+ */
+PVRSRV_ERROR InitPDUMPBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP, PVRSRVBridgeDevmemPDumpBitmap,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, PVRSRVBridgePVRSRVPDumpComment,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdump functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpctrl_bridge/common_pdumpctrl_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpctrl_bridge/common_pdumpctrl_bridge.h
new file mode 100644
index 0000000..c56b3d8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpctrl_bridge/common_pdumpctrl_bridge.h
@@ -0,0 +1,190 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMPCTRL_BRIDGE_H
+#define COMMON_PDUMPCTRL_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "services.h"
+
+
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISCAPTURING			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETFRAME			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSTARTINITPHASE			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSTOPINITPHASE			PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST			(PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+6)
+
+
+/*******************************************
+            PVRSRVPDumpIsCapturing          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsCapturing */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING;
+
+
+/* Bridge out structure for PVRSRVPDumpIsCapturing */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING_TAG
+{
+	IMG_BOOL bIsCapturing;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING;
+
+/*******************************************
+            PVRSRVPDumpSetFrame          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG
+{
+	IMG_UINT32 ui32Frame;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME;
+
+
+/* Bridge out structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME;
+
+/*******************************************
+            PVRSRVPDumpGetFrame          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME;
+
+
+/* Bridge out structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG
+{
+	IMG_UINT32 ui32Frame;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME;
+
+/*******************************************
+            PVRSRVPDumpSetDefaultCaptureParams          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+	IMG_UINT32 ui32Mode;
+	IMG_UINT32 ui32Start;
+	IMG_UINT32 ui32End;
+	IMG_UINT32 ui32Interval;
+	IMG_UINT32 ui32MaxParamFileSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+
+/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/*******************************************
+            PVRSRVPDumpIsLastCaptureFrame          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+
+/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/*******************************************
+            PVRSRVPDumpStartInitPhase          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpStartInitPhase */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSTARTINITPHASE_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSTARTINITPHASE;
+
+
+/* Bridge out structure for PVRSRVPDumpStartInitPhase */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSTARTINITPHASE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSTARTINITPHASE;
+
+/*******************************************
+            PVRSRVPDumpStopInitPhase          
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpStopInitPhase */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSTOPINITPHASE_TAG
+{
+	IMG_MODULE_ID eModuleID;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSTOPINITPHASE;
+
+
+/* Bridge out structure for PVRSRVPDumpStopInitPhase */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSTOPINITPHASE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSTOPINITPHASE;
+
+#endif /* COMMON_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.c
new file mode 100644
index 0000000..3e544b7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpctrl_bridge/server_pdumpctrl_bridge.c
@@ -0,0 +1,316 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pdumpctrl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdumpctrl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pdump_km.h"
+
+
+#include "common_pdumpctrl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsCapturing(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPISCAPTURING *psPVRSRVPDumpIsCapturingIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISCAPTURING *psPVRSRVPDumpIsCapturingOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsCapturingIN);
+
+
+
+
+
+
+	psPVRSRVPDumpIsCapturingOUT->eError =
+		PDumpIsCaptureFrameKM(
+					&psPVRSRVPDumpIsCapturingOUT->bIsCapturing);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+	psPVRSRVPDumpSetFrameOUT->eError =
+		PDumpSetFrameKM(psConnection,
+					psPVRSRVPDumpSetFrameIN->ui32Frame);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN);
+
+
+
+
+
+
+	psPVRSRVPDumpGetFrameOUT->eError =
+		PDumpGetFrameKM(psConnection,
+					&psPVRSRVPDumpGetFrameOUT->ui32Frame);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+	psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError =
+		PDumpSetDefaultCaptureParamsKM(
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
+					psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32MaxParamFileSize);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN);
+
+
+
+
+
+
+	psPVRSRVPDumpIsLastCaptureFrameOUT->eError =
+		PDumpIsLastCaptureFrameKM(
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpStartInitPhase(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPSTARTINITPHASE *psPVRSRVPDumpStartInitPhaseIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSTARTINITPHASE *psPVRSRVPDumpStartInitPhaseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpStartInitPhaseIN);
+
+
+
+
+
+
+	psPVRSRVPDumpStartInitPhaseOUT->eError =
+		PDumpStartInitPhaseKM(
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpStopInitPhase(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PVRSRVPDUMPSTOPINITPHASE *psPVRSRVPDumpStopInitPhaseIN,
+					  PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSTOPINITPHASE *psPVRSRVPDumpStopInitPhaseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+	psPVRSRVPDumpStopInitPhaseOUT->eError =
+		PDumpStopInitPhaseKM(
+					psPVRSRVPDumpStopInitPhaseIN->eModuleID);
+
+
+
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+static POS_LOCK pPDUMPCTRLBridgeLock;
+static IMG_BYTE pbyPDUMPCTRLBridgeBuffer[20 +  8];
+
+PVRSRV_ERROR InitPDUMPCTRLBridge(IMG_VOID);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(IMG_VOID);
+
+/*
+ * Register all PDUMPCTRL functions with services
+ */
+PVRSRV_ERROR InitPDUMPCTRLBridge(IMG_VOID)
+{
+	PVR_LOGR_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISCAPTURING, PVRSRVBridgePVRSRVPDumpIsCapturing,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETFRAME, PVRSRVBridgePVRSRVPDumpSetFrame,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, PVRSRVBridgePVRSRVPDumpGetFrame,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSTARTINITPHASE, PVRSRVBridgePVRSRVPDumpStartInitPhase,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSTOPINITPHASE, PVRSRVBridgePVRSRVPDumpStopInitPhase,
+					pPDUMPCTRLBridgeLock, pbyPDUMPCTRLBridgeBuffer,
+					20,  8);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpctrl functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(IMG_VOID)
+{
+	PVR_LOGR_IF_ERROR(OSLockDestroy(pPDUMPCTRLBridgeLock), "OSLockDestroy");
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/client_pdumpmm_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/client_pdumpmm_bridge.h
new file mode 100644
index 0000000..fdcabc8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/client_pdumpmm_bridge.h
@@ -0,0 +1,113 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PDUMPMM_BRIDGE_H
+#define CLIENT_PDUMPMM_BRIDGE_H
+
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+
+#include "common_pdumpmm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMR,
+							     IMG_DEVMEM_OFFSET_T uiOffset,
+							     IMG_DEVMEM_SIZE_T uiSize,
+							     IMG_UINT32 ui32PDumpFlags,
+							     IMG_BOOL bbZero);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT32 ui32Value,
+								    IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+								    IMG_HANDLE hPMR,
+								    IMG_DEVMEM_OFFSET_T uiOffset,
+								    IMG_UINT64 ui64Value,
+								    IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+								IMG_HANDLE hPMR,
+								IMG_DEVMEM_OFFSET_T uiOffset,
+								IMG_DEVMEM_SIZE_T uiSize,
+								IMG_UINT32 ui32ArraySize,
+								const IMG_CHAR *puiFileName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+								  IMG_HANDLE hPMR,
+								  IMG_DEVMEM_OFFSET_T uiOffset,
+								  IMG_UINT32 ui32MemspaceNameLen,
+								  IMG_CHAR *puiMemspaceName,
+								  IMG_UINT32 ui32SymbolicAddrLen,
+								  IMG_CHAR *puiSymbolicAddr,
+								  IMG_DEVMEM_OFFSET_T *puiNewOffset,
+								  IMG_DEVMEM_OFFSET_T *puiNextSymName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+							   IMG_HANDLE hPMR,
+							   IMG_DEVMEM_OFFSET_T uiOffset,
+							   IMG_UINT32 ui32Value,
+							   IMG_UINT32 ui32Mask,
+							   PDUMP_POLL_OPERATOR eOperator,
+							   IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+							 IMG_HANDLE hPMR,
+							 IMG_DEVMEM_OFFSET_T uiReadOffset,
+							 IMG_DEVMEM_OFFSET_T uiWriteOffset,
+							 IMG_DEVMEM_SIZE_T uiPacketSize,
+							 IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+									     IMG_HANDLE hDevmemServerContext,
+									     IMG_DEV_VIRTADDR sAddress,
+									     IMG_DEVMEM_SIZE_T uiSize,
+									     IMG_UINT32 ui32ArraySize,
+									     const IMG_CHAR *puiFileName,
+									     IMG_UINT32 ui32FileOffset,
+									     IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_PDUMPMM_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/common_pdumpmm_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/common_pdumpmm_bridge.h
new file mode 100644
index 0000000..8aa4862
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/common_pdumpmm_bridge.h
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PDUMPMM_BRIDGE_H
+#define COMMON_PDUMPMM_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL			PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST			(PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7)
+
+
+/*******************************************
+            PMRPDumpLoadMem          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32PDumpFlags;
+	IMG_BOOL bbZero;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
+
+
+/* Bridge out structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM;
+
+/*******************************************
+            PMRPDumpLoadMemValue32          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32;
+
+
+/* Bridge out structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32;
+
+/*******************************************
+            PMRPDumpLoadMemValue64          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT64 ui64Value;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64;
+
+
+/* Bridge out structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64;
+
+/*******************************************
+            PMRPDumpSaveToFile          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32ArraySize;
+	const IMG_CHAR * puiFileName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE;
+
+
+/* Bridge out structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE;
+
+/*******************************************
+            PMRPDumpSymbolicAddr          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32MemspaceNameLen;
+	IMG_UINT32 ui32SymbolicAddrLen;
+	/* Output pointer puiMemspaceName is also an implied input */
+	IMG_CHAR * puiMemspaceName;
+	/* Output pointer puiSymbolicAddr is also an implied input */
+	IMG_CHAR * puiSymbolicAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR;
+
+
+/* Bridge out structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG
+{
+	IMG_CHAR * puiMemspaceName;
+	IMG_CHAR * puiSymbolicAddr;
+	IMG_DEVMEM_OFFSET_T uiNewOffset;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR;
+
+/*******************************************
+            PMRPDumpPol32          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32Mask;
+	PDUMP_POLL_OPERATOR eOperator;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPPOL32;
+
+
+/* Bridge out structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32;
+
+/*******************************************
+            PMRPDumpCBP          
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_OFFSET_T uiReadOffset;
+	IMG_DEVMEM_OFFSET_T uiWriteOffset;
+	IMG_DEVMEM_SIZE_T uiPacketSize;
+	IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCBP;
+
+
+/* Bridge out structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCBP;
+
+/*******************************************
+            DevmemIntPDumpSaveToFileVirtual          
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+	IMG_HANDLE hDevmemServerContext;
+	IMG_DEV_VIRTADDR sAddress;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_UINT32 ui32ArraySize;
+	const IMG_CHAR * puiFileName;
+	IMG_UINT32 ui32FileOffset;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+
+/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+#endif /* COMMON_PDUMPMM_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/server_pdumpmm_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/server_pdumpmm_bridge.c
new file mode 100644
index 0000000..4c9e338
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pdumpmm_bridge/server_pdumpmm_bridge.c
@@ -0,0 +1,586 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pdumpmm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pdumpmm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+#include "common_pdumpmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpLoadMemOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpLoadMemIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpLoadMem_exit;
+					}
+				}
+
+
+	psPMRPDumpLoadMemOUT->eError =
+		PMRPDumpLoadMem(
+					psPMRInt,
+					psPMRPDumpLoadMemIN->uiOffset,
+					psPMRPDumpLoadMemIN->uiSize,
+					psPMRPDumpLoadMemIN->ui32PDumpFlags,
+					psPMRPDumpLoadMemIN->bbZero);
+
+
+
+
+PMRPDumpLoadMem_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpLoadMemValue32OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpLoadMemValue32IN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpLoadMemValue32_exit;
+					}
+				}
+
+
+	psPMRPDumpLoadMemValue32OUT->eError =
+		PMRPDumpLoadMemValue32(
+					psPMRInt,
+					psPMRPDumpLoadMemValue32IN->uiOffset,
+					psPMRPDumpLoadMemValue32IN->ui32Value,
+					psPMRPDumpLoadMemValue32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue32_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpLoadMemValue64OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpLoadMemValue64IN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpLoadMemValue64_exit;
+					}
+				}
+
+
+	psPMRPDumpLoadMemValue64OUT->eError =
+		PMRPDumpLoadMemValue64(
+					psPMRInt,
+					psPMRPDumpLoadMemValue64IN->uiOffset,
+					psPMRPDumpLoadMemValue64IN->ui64Value,
+					psPMRPDumpLoadMemValue64IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue64_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+	IMG_CHAR *uiFileNameInt = IMG_NULL;
+
+
+
+
+	if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0)
+	{
+		uiFileNameInt = OSAllocMem(psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR));
+		if (!uiFileNameInt)
+		{
+			psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto PMRPDumpSaveToFile_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPMRPDumpSaveToFileIN->puiFileName, psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiFileNameInt, psPMRPDumpSaveToFileIN->puiFileName,
+				psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto PMRPDumpSaveToFile_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpSaveToFileOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpSaveToFileIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpSaveToFile_exit;
+					}
+				}
+
+
+	psPMRPDumpSaveToFileOUT->eError =
+		PMRPDumpSaveToFile(
+					psPMRInt,
+					psPMRPDumpSaveToFileIN->uiOffset,
+					psPMRPDumpSaveToFileIN->uiSize,
+					psPMRPDumpSaveToFileIN->ui32ArraySize,
+					uiFileNameInt);
+
+
+
+
+PMRPDumpSaveToFile_exit:
+	if (uiFileNameInt)
+		OSFreeMem(uiFileNameInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+	IMG_CHAR *puiMemspaceNameInt = IMG_NULL;
+	IMG_CHAR *puiSymbolicAddrInt = IMG_NULL;
+
+
+	psPMRPDumpSymbolicAddrOUT->puiMemspaceName = psPMRPDumpSymbolicAddrIN->puiMemspaceName;
+	psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = psPMRPDumpSymbolicAddrIN->puiSymbolicAddr;
+
+
+	if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0)
+	{
+		puiMemspaceNameInt = OSAllocMem(psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR));
+		if (!puiMemspaceNameInt)
+		{
+			psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto PMRPDumpSymbolicAddr_exit;
+		}
+	}
+
+	if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0)
+	{
+		puiSymbolicAddrInt = OSAllocMem(psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR));
+		if (!puiSymbolicAddrInt)
+		{
+			psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto PMRPDumpSymbolicAddr_exit;
+		}
+	}
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpSymbolicAddrOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpSymbolicAddrIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpSymbolicAddr_exit;
+					}
+				}
+
+
+	psPMRPDumpSymbolicAddrOUT->eError =
+		PMR_PDumpSymbolicAddr(
+					psPMRInt,
+					psPMRPDumpSymbolicAddrIN->uiOffset,
+					psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen,
+					puiMemspaceNameInt,
+					psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen,
+					puiSymbolicAddrInt,
+					&psPMRPDumpSymbolicAddrOUT->uiNewOffset,
+					&psPMRPDumpSymbolicAddrOUT->uiNextSymName);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psPMRPDumpSymbolicAddrOUT->puiMemspaceName, (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) 
+		|| (OSCopyToUser(NULL, psPMRPDumpSymbolicAddrOUT->puiMemspaceName, puiMemspaceNameInt,
+		(psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) != PVRSRV_OK) )
+	{
+		psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto PMRPDumpSymbolicAddr_exit;
+	}
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) 
+		|| (OSCopyToUser(NULL, psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, puiSymbolicAddrInt,
+		(psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) != PVRSRV_OK) )
+	{
+		psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto PMRPDumpSymbolicAddr_exit;
+	}
+
+
+PMRPDumpSymbolicAddr_exit:
+	if (puiMemspaceNameInt)
+		OSFreeMem(puiMemspaceNameInt);
+	if (puiSymbolicAddrInt)
+		OSFreeMem(puiSymbolicAddrInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpPol32OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpPol32IN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpPol32OUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpPol32_exit;
+					}
+				}
+
+
+	psPMRPDumpPol32OUT->eError =
+		PMRPDumpPol32(
+					psPMRInt,
+					psPMRPDumpPol32IN->uiOffset,
+					psPMRPDumpPol32IN->ui32Value,
+					psPMRPDumpPol32IN->ui32Mask,
+					psPMRPDumpPol32IN->eOperator,
+					psPMRPDumpPol32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpPol32_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN,
+					  PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRPDumpCBPOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRPDumpCBPIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRPDumpCBPOUT->eError != PVRSRV_OK)
+					{
+						goto PMRPDumpCBP_exit;
+					}
+				}
+
+
+	psPMRPDumpCBPOUT->eError =
+		PMRPDumpCBP(
+					psPMRInt,
+					psPMRPDumpCBPIN->uiReadOffset,
+					psPMRPDumpCBPIN->uiWriteOffset,
+					psPMRPDumpCBPIN->uiPacketSize,
+					psPMRPDumpCBPIN->uiBufferSize);
+
+
+
+
+PMRPDumpCBP_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualIN,
+					  PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEMINT_CTX * psDevmemServerContextInt = IMG_NULL;
+	IMG_CHAR *uiFileNameInt = IMG_NULL;
+
+
+
+
+	if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0)
+	{
+		uiFileNameInt = OSAllocMem(psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR));
+		if (!uiFileNameInt)
+		{
+			psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto DevmemIntPDumpSaveToFileVirtual_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psDevmemIntPDumpSaveToFileVirtualIN->puiFileName, psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiFileNameInt, psDevmemIntPDumpSaveToFileVirtualIN->puiFileName,
+				psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto DevmemIntPDumpSaveToFileVirtual_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psDevmemServerContextInt,
+											psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+					if(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK)
+					{
+						goto DevmemIntPDumpSaveToFileVirtual_exit;
+					}
+				}
+
+
+	psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+		DevmemIntPDumpSaveToFileVirtual(
+					psDevmemServerContextInt,
+					psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
+					psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
+					psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
+					uiFileNameInt,
+					psDevmemIntPDumpSaveToFileVirtualIN->ui32FileOffset,
+					psDevmemIntPDumpSaveToFileVirtualIN->ui32PDumpFlags);
+
+
+
+
+DevmemIntPDumpSaveToFileVirtual_exit:
+	if (uiFileNameInt)
+		OSFreeMem(uiFileNameInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitPDUMPMMBridge(IMG_VOID);
+PVRSRV_ERROR DeinitPDUMPMMBridge(IMG_VOID);
+
+/*
+ * Register all PDUMPMM functions with services
+ */
+PVRSRV_ERROR InitPDUMPMMBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, PVRSRVBridgePMRPDumpLoadMem,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, PVRSRVBridgePMRPDumpLoadMemValue32,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, PVRSRVBridgePMRPDumpLoadMemValue64,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, PVRSRVBridgePMRPDumpSaveToFile,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, PVRSRVBridgePMRPDumpSymbolicAddr,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, PVRSRVBridgePMRPDumpPol32,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, PVRSRVBridgePMRPDumpCBP,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpmm functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPMMBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/client_pvrtl_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/client_pvrtl_bridge.h
new file mode 100644
index 0000000..5fe114a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/client_pvrtl_bridge.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLConnect(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDisconnect(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+							  IMG_CHAR *puiName,
+							  IMG_UINT32 ui32Mode,
+							  IMG_HANDLE *phSD,
+							  DEVMEM_SERVER_EXPORTCOOKIE *phClientBUFExportCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 *pui32ReadOffset,
+							   IMG_UINT32 *pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+							   IMG_HANDLE hSD,
+							   IMG_UINT32 ui32ReadOffset,
+							   IMG_UINT32 ui32ReadLen);
+
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/common_pvrtl_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/common_pvrtl_bridge.h
new file mode 100644
index 0000000..37033e1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/common_pvrtl_bridge.h
@@ -0,0 +1,174 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvr_tl.h"
+
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST			0
+#define PVRSRV_BRIDGE_PVRTL_TLCONNECT			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLDISCONNECT			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA			PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST			(PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5)
+
+
+/*******************************************
+            TLConnect          
+ *******************************************/
+
+/* Bridge in structure for TLConnect */
+typedef struct PVRSRV_BRIDGE_IN_TLCONNECT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCONNECT;
+
+
+/* Bridge out structure for TLConnect */
+typedef struct PVRSRV_BRIDGE_OUT_TLCONNECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCONNECT;
+
+/*******************************************
+            TLDisconnect          
+ *******************************************/
+
+/* Bridge in structure for TLDisconnect */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCONNECT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLDISCONNECT;
+
+
+/* Bridge out structure for TLDisconnect */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCONNECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLDISCONNECT;
+
+/*******************************************
+            TLOpenStream          
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+	IMG_CHAR * puiName;
+	IMG_UINT32 ui32Mode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+	IMG_HANDLE hSD;
+	DEVMEM_SERVER_EXPORTCOOKIE hClientBUFExportCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+/*******************************************
+            TLCloseStream          
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+	IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+/*******************************************
+            TLAcquireData          
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+	IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+	IMG_UINT32 ui32ReadOffset;
+	IMG_UINT32 ui32ReadLen;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+/*******************************************
+            TLReleaseData          
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+	IMG_HANDLE hSD;
+	IMG_UINT32 ui32ReadOffset;
+	IMG_UINT32 ui32ReadLen;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/server_pvrtl_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/server_pvrtl_bridge.c
new file mode 100644
index 0000000..89709ad
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/pvrtl_bridge/server_pvrtl_bridge.c
@@ -0,0 +1,397 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for pvrtl
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for pvrtl
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeTLConnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLCONNECT *psTLConnectIN,
+					  PVRSRV_BRIDGE_OUT_TLCONNECT *psTLConnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psTLConnectIN);
+
+
+
+
+
+
+	psTLConnectOUT->eError =
+		TLServerConnectKM(psConnection
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLDISCONNECT *psTLDisconnectIN,
+					  PVRSRV_BRIDGE_OUT_TLDISCONNECT *psTLDisconnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psTLDisconnectIN);
+
+
+
+
+
+
+	psTLDisconnectOUT->eError =
+		TLServerDisconnectKM(psConnection
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN,
+					  PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_CHAR *uiNameInt = IMG_NULL;
+	TL_STREAM_DESC * psSDInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psClientBUFExportCookieInt = IMG_NULL;
+
+
+
+	psTLOpenStreamOUT->hSD = IMG_NULL;
+
+	
+	{
+		uiNameInt = OSAllocMem(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR));
+		if (!uiNameInt)
+		{
+			psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto TLOpenStream_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psTLOpenStreamIN->puiName, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiNameInt, psTLOpenStreamIN->puiName,
+				PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto TLOpenStream_exit;
+			}
+
+
+
+	psTLOpenStreamOUT->eError =
+		TLServerOpenStreamKM(
+					uiNameInt,
+					psTLOpenStreamIN->ui32Mode,
+					&psSDInt,
+					&psClientBUFExportCookieInt);
+	/* Exit early if bridged call fails */
+	if(psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		goto TLOpenStream_exit;
+	}
+
+
+	psTLOpenStreamOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psTLOpenStreamOUT->hSD,
+							(IMG_VOID *) psSDInt,
+							PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&TLServerCloseStreamKM);
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		goto TLOpenStream_exit;
+	}
+
+
+	psTLOpenStreamOUT->eError = PVRSRVAllocSubHandle(psConnection->psHandleBase,
+							&psTLOpenStreamOUT->hClientBUFExportCookie,
+							(IMG_VOID *) psClientBUFExportCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psTLOpenStreamOUT->hSD);
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		goto TLOpenStream_exit;
+	}
+
+
+
+
+TLOpenStream_exit:
+	if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+	{
+		if (psTLOpenStreamOUT->hSD)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						(IMG_HANDLE) psTLOpenStreamOUT->hSD,
+						PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psSDInt = IMG_NULL;
+		}
+
+
+		if (psSDInt)
+		{
+			TLServerCloseStreamKM(psSDInt);
+		}
+	}
+
+	if (uiNameInt)
+		OSFreeMem(uiNameInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN,
+					  PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psTLCloseStreamOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psTLCloseStreamIN->hSD,
+					PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+	if ((psTLCloseStreamOUT->eError != PVRSRV_OK) && (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto TLCloseStream_exit;
+	}
+
+
+
+TLCloseStream_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN,
+					  PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	TL_STREAM_DESC * psSDInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLAcquireDataOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSDInt,
+											psTLAcquireDataIN->hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					if(psTLAcquireDataOUT->eError != PVRSRV_OK)
+					{
+						goto TLAcquireData_exit;
+					}
+				}
+
+
+	psTLAcquireDataOUT->eError =
+		TLServerAcquireDataKM(
+					psSDInt,
+					&psTLAcquireDataOUT->ui32ReadOffset,
+					&psTLAcquireDataOUT->ui32ReadLen);
+
+
+
+
+TLAcquireData_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN,
+					  PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	TL_STREAM_DESC * psSDInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psTLReleaseDataOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSDInt,
+											psTLReleaseDataIN->hSD,
+											PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+					if(psTLReleaseDataOUT->eError != PVRSRV_OK)
+					{
+						goto TLReleaseData_exit;
+					}
+				}
+
+
+	psTLReleaseDataOUT->eError =
+		TLServerReleaseDataKM(
+					psSDInt,
+					psTLReleaseDataIN->ui32ReadOffset,
+					psTLReleaseDataIN->ui32ReadLen);
+
+
+
+
+TLReleaseData_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitPVRTLBridge(IMG_VOID);
+PVRSRV_ERROR DeinitPVRTLBridge(IMG_VOID);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCONNECT, PVRSRVBridgeTLConnect,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCONNECT, PVRSRVBridgeTLDisconnect,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, PVRSRVBridgeTLOpenStream,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, PVRSRVBridgeTLCloseStream,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, PVRSRVBridgeTLAcquireData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, PVRSRVBridgeTLReleaseData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+PVRSRV_ERROR DeinitPVRTLBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/regconfig_bridge/common_regconfig_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/regconfig_bridge/common_regconfig_bridge.h
new file mode 100644
index 0000000..edda33f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/regconfig_bridge/common_regconfig_bridge.h
@@ -0,0 +1,151 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for regconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for regconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_REGCONFIG_BRIDGE_H
+#define COMMON_REGCONFIG_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST			0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGPI			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG			PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_LAST			(PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXSetRegConfigPI          
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigPI */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGPI_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT8 ui8RegPowerIsland;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGPI;
+
+
+/* Bridge out structure for RGXSetRegConfigPI */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGPI_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGPI;
+
+/*******************************************
+            RGXAddRegconfig          
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32RegAddr;
+	IMG_UINT64 ui64RegValue;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+/*******************************************
+            RGXClearRegConfig          
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+/*******************************************
+            RGXEnableRegConfig          
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+/*******************************************
+            RGXDisableRegConfig          
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+#endif /* COMMON_REGCONFIG_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/regconfig_bridge/server_regconfig_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/regconfig_bridge/server_regconfig_bridge.c
new file mode 100644
index 0000000..5c23842
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/regconfig_bridge/server_regconfig_bridge.c
@@ -0,0 +1,325 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for regconfig
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for regconfig
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+
+#include "common_regconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigPI(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETREGCONFIGPI *psRGXSetRegConfigPIIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGPI *psRGXSetRegConfigPIOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetRegConfigPIOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXSetRegConfigPIIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXSetRegConfigPIOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSetRegConfigPI_exit;
+					}
+				}
+
+
+	psRGXSetRegConfigPIOUT->eError =
+		PVRSRVRGXSetRegConfigPIKM(
+					hDevNodeInt,
+					psRGXSetRegConfigPIIN->ui8RegPowerIsland);
+
+
+
+
+RGXSetRegConfigPI_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXAddRegconfigOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXAddRegconfigIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXAddRegconfigOUT->eError != PVRSRV_OK)
+					{
+						goto RGXAddRegconfig_exit;
+					}
+				}
+
+
+	psRGXAddRegconfigOUT->eError =
+		PVRSRVRGXAddRegConfigKM(
+					hDevNodeInt,
+					psRGXAddRegconfigIN->ui32RegAddr,
+					psRGXAddRegconfigIN->ui64RegValue);
+
+
+
+
+RGXAddRegconfig_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXClearRegConfigOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXClearRegConfigIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXClearRegConfigOUT->eError != PVRSRV_OK)
+					{
+						goto RGXClearRegConfig_exit;
+					}
+				}
+
+
+	psRGXClearRegConfigOUT->eError =
+		PVRSRVRGXClearRegConfigKM(
+					hDevNodeInt);
+
+
+
+
+RGXClearRegConfig_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXEnableRegConfigOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXEnableRegConfigIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXEnableRegConfigOUT->eError != PVRSRV_OK)
+					{
+						goto RGXEnableRegConfig_exit;
+					}
+				}
+
+
+	psRGXEnableRegConfigOUT->eError =
+		PVRSRVRGXEnableRegConfigKM(
+					hDevNodeInt);
+
+
+
+
+RGXEnableRegConfig_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN,
+					  PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXDisableRegConfigOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXDisableRegConfigIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXDisableRegConfigOUT->eError != PVRSRV_OK)
+					{
+						goto RGXDisableRegConfig_exit;
+					}
+				}
+
+
+	psRGXDisableRegConfigOUT->eError =
+		PVRSRVRGXDisableRegConfigKM(
+					hDevNodeInt);
+
+
+
+
+RGXDisableRegConfig_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitREGCONFIGBridge(IMG_VOID);
+PVRSRV_ERROR DeinitREGCONFIGBridge(IMG_VOID);
+
+/*
+ * Register all REGCONFIG functions with services
+ */
+PVRSRV_ERROR InitREGCONFIGBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGPI, PVRSRVBridgeRGXSetRegConfigPI,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG, PVRSRVBridgeRGXAddRegconfig,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG, PVRSRVBridgeRGXClearRegConfig,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG, PVRSRVBridgeRGXEnableRegConfig,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG, PVRSRVBridgeRGXDisableRegConfig,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all regconfig functions with services
+ */
+PVRSRV_ERROR DeinitREGCONFIGBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxcmp_bridge/common_rgxcmp_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxcmp_bridge/common_rgxcmp_bridge.h
new file mode 100644
index 0000000..c3806ed
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxcmp_bridge/common_rgxcmp_bridge.h
@@ -0,0 +1,202 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "sync_external.h"
+#include "rgx_fwif_shared.h"
+
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKSYNCCDM			PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST			(PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5)
+
+
+/*******************************************
+            RGXCreateComputeContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32Priority;
+	IMG_DEV_VIRTADDR sMCUFenceAddr;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hComputeContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+/*******************************************
+            RGXDestroyComputeContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+/*******************************************
+            RGXKickCDM          
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR * psClientFenceUFOAddress;
+	IMG_UINT32 * pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClientUpdateUFOAddress;
+	IMG_UINT32 * pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSyncs;
+	IMG_UINT32 ui32CmdSize;
+	IMG_BYTE * psDMCmd;
+	IMG_BOOL bbPDumpContinuous;
+	IMG_UINT32 ui32ExternalJobReference;
+	IMG_UINT32 ui32InternalJobReference;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM;
+
+
+/* Bridge out structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM;
+
+/*******************************************
+            RGXFlushComputeData          
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+	IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+/*******************************************
+            RGXSetComputeContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/*******************************************
+            RGXKickSyncCDM          
+ *******************************************/
+
+/* Bridge in structure for RGXKickSyncCDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNCCDM_TAG
+{
+	IMG_HANDLE hComputeContext;
+	IMG_UINT32 ui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR * psClientFenceUFOAddress;
+	IMG_UINT32 * pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClientUpdateUFOAddress;
+	IMG_UINT32 * pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSyncs;
+	IMG_UINT32 ui32NumCheckFenceFDs;
+	IMG_INT32 * pi32CheckFenceFDs;
+	IMG_INT32 i32UpdateFenceFD;
+	IMG_BOOL bbPDumpContinuous;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNCCDM;
+
+
+/* Bridge out structure for RGXKickSyncCDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNCCDM_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNCCDM;
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxcmp_bridge/server_rgxcmp_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxcmp_bridge/server_rgxcmp_bridge.c
new file mode 100644
index 0000000..406e019
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxcmp_bridge/server_rgxcmp_bridge.c
@@ -0,0 +1,840 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxcmp
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxcmp
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_BYTE *psFrameworkCmdInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = IMG_NULL;
+
+
+
+
+	if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = OSAllocMem(psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE));
+		if (!psFrameworkCmdInt)
+		{
+			psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXCreateComputeContext_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXCreateComputeContextIN->psFrameworkCmd, psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateComputeContextIN->psFrameworkCmd,
+				psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXCreateComputeContext_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateComputeContextOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateComputeContextIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateComputeContext_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateComputeContextOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXCreateComputeContextIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateComputeContext_exit;
+					}
+				}
+
+
+	psRGXCreateComputeContextOUT->eError =
+		PVRSRVRGXCreateComputeContextKM(psConnection,
+					hDevNodeInt,
+					psRGXCreateComputeContextIN->ui32Priority,
+					psRGXCreateComputeContextIN->sMCUFenceAddr,
+					psRGXCreateComputeContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psComputeContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateComputeContext_exit;
+	}
+
+
+	psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateComputeContextOUT->hComputeContext,
+							(IMG_VOID *) psComputeContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyComputeContextKM);
+	if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateComputeContext_exit;
+	}
+
+
+
+
+RGXCreateComputeContext_exit:
+	if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+	{
+		if (psComputeContextInt)
+		{
+			PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+		}
+	}
+
+	if (psFrameworkCmdInt)
+		OSFreeMem(psFrameworkCmdInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyComputeContextOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyComputeContextIN->hComputeContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+	if ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyComputeContext_exit;
+	}
+
+
+
+RGXDestroyComputeContext_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKCDM *psRGXKickCDMIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKCDM *psRGXKickCDMOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = IMG_NULL;
+	IMG_HANDLE *hServerSyncsInt2 = IMG_NULL;
+	IMG_BYTE *psDMCmdInt = IMG_NULL;
+
+
+
+
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		sClientFenceUFOAddressInt = OSAllocMem(psRGXKickCDMIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientFenceUFOAddressInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->psClientFenceUFOAddress, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientFenceUFOAddressInt, psRGXKickCDMIN->psClientFenceUFOAddress,
+				psRGXKickCDMIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+	if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt = OSAllocMem(psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32));
+		if (!ui32ClientFenceValueInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->pui32ClientFenceValue, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickCDMIN->pui32ClientFenceValue,
+				psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		sClientUpdateUFOAddressInt = OSAllocMem(psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientUpdateUFOAddressInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->psClientUpdateUFOAddress, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientUpdateUFOAddressInt, psRGXKickCDMIN->psClientUpdateUFOAddress,
+				psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+	if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt = OSAllocMem(psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32ClientUpdateValueInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->pui32ClientUpdateValue, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickCDMIN->pui32ClientUpdateValue,
+				psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+	if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = OSAllocMem(psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32));
+		if (!ui32ServerSyncFlagsInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->pui32ServerSyncFlags, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickCDMIN->pui32ServerSyncFlags,
+				psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+	if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt = OSAllocMem(psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServerSyncsInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+		hServerSyncsInt2 = OSAllocMem(psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE));
+		if (!hServerSyncsInt2)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->phServerSyncs, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickCDMIN->phServerSyncs,
+				psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+	if (psRGXKickCDMIN->ui32CmdSize != 0)
+	{
+		psDMCmdInt = OSAllocMem(psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE));
+		if (!psDMCmdInt)
+		{
+			psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickCDMIN->psDMCmd, psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, psDMCmdInt, psRGXKickCDMIN->psDMCmd,
+				psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickCDM_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickCDMOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psComputeContextInt,
+											psRGXKickCDMIN->hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickCDM_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickCDMOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerSyncsInt[i],
+											hServerSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickCDM_exit;
+					}
+				}
+
+		}
+	}
+
+	psRGXKickCDMOUT->eError =
+		PVRSRVRGXKickCDMKM(
+					psComputeContextInt,
+					psRGXKickCDMIN->ui32ClientFenceCount,
+					sClientFenceUFOAddressInt,
+					ui32ClientFenceValueInt,
+					psRGXKickCDMIN->ui32ClientUpdateCount,
+					sClientUpdateUFOAddressInt,
+					ui32ClientUpdateValueInt,
+					psRGXKickCDMIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncsInt,
+					psRGXKickCDMIN->ui32CmdSize,
+					psDMCmdInt,
+					psRGXKickCDMIN->bbPDumpContinuous,
+					psRGXKickCDMIN->ui32ExternalJobReference,
+					psRGXKickCDMIN->ui32InternalJobReference);
+
+
+
+
+RGXKickCDM_exit:
+	if (sClientFenceUFOAddressInt)
+		OSFreeMem(sClientFenceUFOAddressInt);
+	if (ui32ClientFenceValueInt)
+		OSFreeMem(ui32ClientFenceValueInt);
+	if (sClientUpdateUFOAddressInt)
+		OSFreeMem(sClientUpdateUFOAddressInt);
+	if (ui32ClientUpdateValueInt)
+		OSFreeMem(ui32ClientUpdateValueInt);
+	if (ui32ServerSyncFlagsInt)
+		OSFreeMem(ui32ServerSyncFlagsInt);
+	if (psServerSyncsInt)
+		OSFreeMem(psServerSyncsInt);
+	if (hServerSyncsInt2)
+		OSFreeMem(hServerSyncsInt2);
+	if (psDMCmdInt)
+		OSFreeMem(psDMCmdInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN,
+					  PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXFlushComputeDataOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psComputeContextInt,
+											psRGXFlushComputeDataIN->hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					if(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)
+					{
+						goto RGXFlushComputeData_exit;
+					}
+				}
+
+
+	psRGXFlushComputeDataOUT->eError =
+		PVRSRVRGXFlushComputeDataKM(
+					psComputeContextInt);
+
+
+
+
+RGXFlushComputeData_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetComputeContextPriorityOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psComputeContextInt,
+											psRGXSetComputeContextPriorityIN->hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					if(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSetComputeContextPriority_exit;
+					}
+				}
+
+
+	psRGXSetComputeContextPriorityOUT->eError =
+		PVRSRVRGXSetComputeContextPriorityKM(psConnection,
+					psComputeContextInt,
+					psRGXSetComputeContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetComputeContextPriority_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXKickSyncCDM(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKSYNCCDM *psRGXKickSyncCDMIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKSYNCCDM *psRGXKickSyncCDMOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = IMG_NULL;
+	IMG_HANDLE *hServerSyncsInt2 = IMG_NULL;
+	IMG_INT32 *i32CheckFenceFDsInt = IMG_NULL;
+
+
+
+
+	if (psRGXKickSyncCDMIN->ui32ClientFenceCount != 0)
+	{
+		sClientFenceUFOAddressInt = OSAllocMem(psRGXKickSyncCDMIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientFenceUFOAddressInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->psClientFenceUFOAddress, psRGXKickSyncCDMIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientFenceUFOAddressInt, psRGXKickSyncCDMIN->psClientFenceUFOAddress,
+				psRGXKickSyncCDMIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+	if (psRGXKickSyncCDMIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt = OSAllocMem(psRGXKickSyncCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32));
+		if (!ui32ClientFenceValueInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->pui32ClientFenceValue, psRGXKickSyncCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickSyncCDMIN->pui32ClientFenceValue,
+				psRGXKickSyncCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+	if (psRGXKickSyncCDMIN->ui32ClientUpdateCount != 0)
+	{
+		sClientUpdateUFOAddressInt = OSAllocMem(psRGXKickSyncCDMIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientUpdateUFOAddressInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->psClientUpdateUFOAddress, psRGXKickSyncCDMIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientUpdateUFOAddressInt, psRGXKickSyncCDMIN->psClientUpdateUFOAddress,
+				psRGXKickSyncCDMIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+	if (psRGXKickSyncCDMIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt = OSAllocMem(psRGXKickSyncCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32ClientUpdateValueInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->pui32ClientUpdateValue, psRGXKickSyncCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickSyncCDMIN->pui32ClientUpdateValue,
+				psRGXKickSyncCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+	if (psRGXKickSyncCDMIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = OSAllocMem(psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32));
+		if (!ui32ServerSyncFlagsInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->pui32ServerSyncFlags, psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickSyncCDMIN->pui32ServerSyncFlags,
+				psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+	if (psRGXKickSyncCDMIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt = OSAllocMem(psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServerSyncsInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+		hServerSyncsInt2 = OSAllocMem(psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE));
+		if (!hServerSyncsInt2)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->phServerSyncs, psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickSyncCDMIN->phServerSyncs,
+				psRGXKickSyncCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+	if (psRGXKickSyncCDMIN->ui32NumCheckFenceFDs != 0)
+	{
+		i32CheckFenceFDsInt = OSAllocMem(psRGXKickSyncCDMIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32));
+		if (!i32CheckFenceFDsInt)
+		{
+			psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncCDM_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncCDMIN->pi32CheckFenceFDs, psRGXKickSyncCDMIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32))
+				|| (OSCopyFromUser(NULL, i32CheckFenceFDsInt, psRGXKickSyncCDMIN->pi32CheckFenceFDs,
+				psRGXKickSyncCDMIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncCDM_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncCDMOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psComputeContextInt,
+											psRGXKickSyncCDMIN->hComputeContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+					if(psRGXKickSyncCDMOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncCDM_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncCDMIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncCDMOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerSyncsInt[i],
+											hServerSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickSyncCDMOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncCDM_exit;
+					}
+				}
+
+		}
+	}
+
+	psRGXKickSyncCDMOUT->eError =
+		PVRSRVRGXKickSyncCDMKM(
+					psComputeContextInt,
+					psRGXKickSyncCDMIN->ui32ClientFenceCount,
+					sClientFenceUFOAddressInt,
+					ui32ClientFenceValueInt,
+					psRGXKickSyncCDMIN->ui32ClientUpdateCount,
+					sClientUpdateUFOAddressInt,
+					ui32ClientUpdateValueInt,
+					psRGXKickSyncCDMIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncsInt,
+					psRGXKickSyncCDMIN->ui32NumCheckFenceFDs,
+					i32CheckFenceFDsInt,
+					psRGXKickSyncCDMIN->i32UpdateFenceFD,
+					psRGXKickSyncCDMIN->bbPDumpContinuous);
+
+
+
+
+RGXKickSyncCDM_exit:
+	if (sClientFenceUFOAddressInt)
+		OSFreeMem(sClientFenceUFOAddressInt);
+	if (ui32ClientFenceValueInt)
+		OSFreeMem(ui32ClientFenceValueInt);
+	if (sClientUpdateUFOAddressInt)
+		OSFreeMem(sClientUpdateUFOAddressInt);
+	if (ui32ClientUpdateValueInt)
+		OSFreeMem(ui32ClientUpdateValueInt);
+	if (ui32ServerSyncFlagsInt)
+		OSFreeMem(ui32ServerSyncFlagsInt);
+	if (psServerSyncsInt)
+		OSFreeMem(psServerSyncsInt);
+	if (hServerSyncsInt2)
+		OSFreeMem(hServerSyncsInt2);
+	if (i32CheckFenceFDsInt)
+		OSFreeMem(i32CheckFenceFDsInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRGXCMPBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRGXCMPBridge(IMG_VOID);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, PVRSRVBridgeRGXCreateComputeContext,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, PVRSRVBridgeRGXDestroyComputeContext,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM, PVRSRVBridgeRGXKickCDM,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, PVRSRVBridgeRGXFlushComputeData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, PVRSRVBridgeRGXSetComputeContextPriority,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKSYNCCDM, PVRSRVBridgeRGXKickSyncCDM,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+PVRSRV_ERROR DeinitRGXCMPBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h
new file mode 100644
index 0000000..7e13a61
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxhwperf_bridge/common_rgxhwperf_bridge.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS			PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST			(PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3)
+
+
+/*******************************************
+            RGXCtrlHWPerf          
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_BOOL bToggle;
+	IMG_UINT64 ui64Mask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+/*******************************************
+            RGXConfigEnableHWPerfCounters          
+ *******************************************/
+
+/* Bridge in structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32ArrayLen;
+	RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+
+/* Bridge out structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+/*******************************************
+            RGXCtrlHWPerfCounters          
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_BOOL bEnable;
+	IMG_UINT32 ui32ArrayLen;
+	IMG_UINT16 * pui16BlockIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS;
+
+
+/* Bridge out structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS;
+
+/*******************************************
+            RGXConfigCustomCounters          
+ *******************************************/
+
+/* Bridge in structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT16 ui16CustomBlockID;
+	IMG_UINT16 ui16NumCustomCounters;
+	IMG_UINT32 * pui32CustomCounterIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS;
+
+
+/* Bridge out structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS;
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c
new file mode 100644
index 0000000..1639e9b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxhwperf_bridge/server_rgxhwperf_bridge.c
@@ -0,0 +1,357 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxhwperf
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxhwperf
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN,
+					  PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCtrlHWPerfOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCtrlHWPerfIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCtrlHWPerfOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCtrlHWPerf_exit;
+					}
+				}
+
+
+	psRGXCtrlHWPerfOUT->eError =
+		PVRSRVRGXCtrlHWPerfKM(
+					hDevNodeInt,
+					psRGXCtrlHWPerfIN->bToggle,
+					psRGXCtrlHWPerfIN->ui64Mask);
+
+
+
+
+RGXCtrlHWPerf_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXConfigEnableHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = IMG_NULL;
+
+
+
+
+	if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen != 0)
+	{
+		psBlockConfigsInt = OSAllocMem(psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK));
+		if (!psBlockConfigsInt)
+		{
+			psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXConfigEnableHWPerfCounters_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXConfigEnableHWPerfCountersIN->psBlockConfigs, psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK))
+				|| (OSCopyFromUser(NULL, psBlockConfigsInt, psRGXConfigEnableHWPerfCountersIN->psBlockConfigs,
+				psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) )
+			{
+				psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXConfigEnableHWPerfCounters_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXConfigEnableHWPerfCountersOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXConfigEnableHWPerfCountersIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXConfigEnableHWPerfCountersOUT->eError != PVRSRV_OK)
+					{
+						goto RGXConfigEnableHWPerfCounters_exit;
+					}
+				}
+
+
+	psRGXConfigEnableHWPerfCountersOUT->eError =
+		PVRSRVRGXConfigEnableHWPerfCountersKM(
+					hDevNodeInt,
+					psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen,
+					psBlockConfigsInt);
+
+
+
+
+RGXConfigEnableHWPerfCounters_exit:
+	if (psBlockConfigsInt)
+		OSFreeMem(psBlockConfigsInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_UINT16 *ui16BlockIDsInt = IMG_NULL;
+
+
+
+
+	if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen != 0)
+	{
+		ui16BlockIDsInt = OSAllocMem(psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16));
+		if (!ui16BlockIDsInt)
+		{
+			psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXCtrlHWPerfCounters_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXCtrlHWPerfCountersIN->pui16BlockIDs, psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16))
+				|| (OSCopyFromUser(NULL, ui16BlockIDsInt, psRGXCtrlHWPerfCountersIN->pui16BlockIDs,
+				psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK) )
+			{
+				psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXCtrlHWPerfCounters_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCtrlHWPerfCountersOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCtrlHWPerfCountersIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCtrlHWPerfCountersOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCtrlHWPerfCounters_exit;
+					}
+				}
+
+
+	psRGXCtrlHWPerfCountersOUT->eError =
+		PVRSRVRGXCtrlHWPerfCountersKM(
+					hDevNodeInt,
+					psRGXCtrlHWPerfCountersIN->bEnable,
+					psRGXCtrlHWPerfCountersIN->ui32ArrayLen,
+					ui16BlockIDsInt);
+
+
+
+
+RGXCtrlHWPerfCounters_exit:
+	if (ui16BlockIDsInt)
+		OSFreeMem(ui16BlockIDsInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN,
+					  PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_UINT32 *ui32CustomCounterIDsInt = IMG_NULL;
+
+
+
+
+	if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0)
+	{
+		ui32CustomCounterIDsInt = OSAllocMem(psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32));
+		if (!ui32CustomCounterIDsInt)
+		{
+			psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXConfigCustomCounters_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXConfigCustomCountersIN->pui32CustomCounterIDs, psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32CustomCounterIDsInt, psRGXConfigCustomCountersIN->pui32CustomCounterIDs,
+				psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXConfigCustomCounters_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXConfigCustomCountersOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXConfigCustomCountersIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXConfigCustomCountersOUT->eError != PVRSRV_OK)
+					{
+						goto RGXConfigCustomCounters_exit;
+					}
+				}
+
+
+	psRGXConfigCustomCountersOUT->eError =
+		PVRSRVRGXConfigCustomCountersKM(
+					hDevNodeInt,
+					psRGXConfigCustomCountersIN->ui16CustomBlockID,
+					psRGXConfigCustomCountersIN->ui16NumCustomCounters,
+					ui32CustomCounterIDsInt);
+
+
+
+
+RGXConfigCustomCounters_exit:
+	if (ui32CustomCounterIDsInt)
+		OSFreeMem(ui32CustomCounterIDsInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRGXHWPERFBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(IMG_VOID);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, PVRSRVBridgeRGXCtrlHWPerf,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS, PVRSRVBridgeRGXConfigEnableHWPerfCounters,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS, PVRSRVBridgeRGXCtrlHWPerfCounters,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, PVRSRVBridgeRGXConfigCustomCounters,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+PVRSRV_ERROR DeinitRGXHWPERFBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxinit_bridge/common_rgxinit_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxinit_bridge/common_rgxinit_bridge.h
new file mode 100644
index 0000000..9097cb5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxinit_bridge/common_rgxinit_bridge.h
@@ -0,0 +1,201 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxinit
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for rgxinit
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXINIT_BRIDGE_H
+#define COMMON_RGXINIT_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgxscript.h"
+#include "devicemem_typedefs.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif.h"
+
+
+#define PVRSRV_BRIDGE_RGXINIT_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITALLOCFWIMGMEM			PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITFIRMWARE			PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITLOADFWIMAGE			PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXINIT_RGXINITDEVPART2			PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXINIT_GPUVIRTPOPULATELMASUBARENAS			PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXINIT_CMD_LAST			(PVRSRV_BRIDGE_RGXINIT_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXInitAllocFWImgMem          
+ *******************************************/
+
+/* Bridge in structure for RGXInitAllocFWImgMem */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITALLOCFWIMGMEM_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_DEVMEM_SIZE_T uiFWCodeLen;
+	IMG_DEVMEM_SIZE_T uiFWDataLen;
+	IMG_DEVMEM_SIZE_T uiFWCoremem;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITALLOCFWIMGMEM;
+
+
+/* Bridge out structure for RGXInitAllocFWImgMem */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITALLOCFWIMGMEM_TAG
+{
+	DEVMEM_SERVER_EXPORTCOOKIE hFWCodeAllocServerExportCookie;
+	IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+	DEVMEM_SERVER_EXPORTCOOKIE hFWDataAllocServerExportCookie;
+	IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+	DEVMEM_SERVER_EXPORTCOOKIE hFWCorememAllocServerExportCookie;
+	IMG_DEV_VIRTADDR sFWCorememDevVAddrBase;
+	RGXFWIF_DEV_VIRTADDR sFWCorememMetaVAddrBase;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITALLOCFWIMGMEM;
+
+/*******************************************
+            RGXInitFirmware          
+ *******************************************/
+
+/* Bridge in structure for RGXInitFirmware */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITFIRMWARE_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_BOOL bEnableSignatureChecks;
+	IMG_UINT32 ui32SignatureChecksBufSize;
+	IMG_UINT32 ui32HWPerfFWBufSizeKB;
+	IMG_UINT64 ui64HWPerfFilter;
+	IMG_UINT32 ui32RGXFWAlignChecksSize;
+	IMG_UINT32 * pui32RGXFWAlignChecks;
+	IMG_UINT32 ui32ConfigFlags;
+	IMG_UINT32 ui32LogType;
+	IMG_UINT32 ui32FilterFlags;
+	IMG_UINT32 ui32JonesDisableMask;
+	IMG_UINT32 ui32ui32HWRDebugDumpLimit;
+	RGXFWIF_COMPCHECKS_BVNC sClientBVNC;
+	IMG_UINT32 ui32HWPerfCountersDataSize;
+	RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITFIRMWARE;
+
+
+/* Bridge out structure for RGXInitFirmware */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITFIRMWARE_TAG
+{
+	RGXFWIF_DEV_VIRTADDR spsRGXFwInit;
+	DEVMEM_SERVER_EXPORTCOOKIE hHWPerfDataAllocServerExportCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITFIRMWARE;
+
+/*******************************************
+            RGXInitLoadFWImage          
+ *******************************************/
+
+/* Bridge in structure for RGXInitLoadFWImage */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITLOADFWIMAGE_TAG
+{
+	IMG_HANDLE hImgDestImport;
+	IMG_HANDLE hImgSrcImport;
+	IMG_UINT64 ui64ImgLen;
+	IMG_HANDLE hSigImport;
+	IMG_UINT64 ui64SigLen;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITLOADFWIMAGE;
+
+
+/* Bridge out structure for RGXInitLoadFWImage */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITLOADFWIMAGE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITLOADFWIMAGE;
+
+/*******************************************
+            RGXInitDevPart2          
+ *******************************************/
+
+/* Bridge in structure for RGXInitDevPart2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXINITDEVPART2_TAG
+{
+	IMG_HANDLE hDevNode;
+	RGX_INIT_COMMAND * psInitScript;
+	RGX_INIT_COMMAND * psDbgScript;
+	RGX_INIT_COMMAND * psDbgBusScript;
+	RGX_INIT_COMMAND * psDeinitScript;
+	IMG_UINT32 ui32ui32KernelCatBaseIdReg;
+	IMG_UINT32 ui32KernelCatBaseId;
+	IMG_UINT32 ui32KernelCatBaseReg;
+	IMG_UINT32 ui32KernelCatBaseWordSize;
+	IMG_UINT32 ui32KernelCatBaseAlignShift;
+	IMG_UINT32 ui32KernelCatBaseShift;
+	IMG_UINT64 ui64KernelCatBaseMask;
+	IMG_UINT32 ui32DeviceFlags;
+	IMG_UINT32 ui32RGXActivePMConf;
+	DEVMEM_SERVER_EXPORTCOOKIE hFWCodeAllocServerExportCookie;
+	DEVMEM_SERVER_EXPORTCOOKIE hFWDataAllocServerExportCookie;
+	DEVMEM_SERVER_EXPORTCOOKIE hFWCorememAllocServerExportCookie;
+	DEVMEM_SERVER_EXPORTCOOKIE hHWPerfDataAllocServerExportCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXINITDEVPART2;
+
+
+/* Bridge out structure for RGXInitDevPart2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXINITDEVPART2_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXINITDEVPART2;
+
+/*******************************************
+            GPUVIRTPopulateLMASubArenas          
+ *******************************************/
+
+/* Bridge in structure for GPUVIRTPopulateLMASubArenas */
+typedef struct PVRSRV_BRIDGE_IN_GPUVIRTPOPULATELMASUBARENAS_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32NumElements;
+	IMG_UINT32 * pui32Elements;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GPUVIRTPOPULATELMASUBARENAS;
+
+
+/* Bridge out structure for GPUVIRTPopulateLMASubArenas */
+typedef struct PVRSRV_BRIDGE_OUT_GPUVIRTPOPULATELMASUBARENAS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GPUVIRTPOPULATELMASUBARENAS;
+
+#endif /* COMMON_RGXINIT_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxinit_bridge/server_rgxinit_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxinit_bridge/server_rgxinit_bridge.c
new file mode 100644
index 0000000..5f0d05d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxinit_bridge/server_rgxinit_bridge.c
@@ -0,0 +1,733 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxinit
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxinit
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxinit.h"
+
+
+#include "common_rgxinit_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+static PVRSRV_ERROR ReleaseFWCodeAllocServerExportCookie(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+static PVRSRV_ERROR ReleaseFWDataAllocServerExportCookie(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+static PVRSRV_ERROR ReleaseFWCorememAllocServerExportCookie(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+static PVRSRV_ERROR ReleaseHWPerfDataAllocServerExportCookie(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXInitAllocFWImgMem(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXINITALLOCFWIMGMEM *psRGXInitAllocFWImgMemIN,
+					  PVRSRV_BRIDGE_OUT_RGXINITALLOCFWIMGMEM *psRGXInitAllocFWImgMemOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psFWCodeAllocServerExportCookieInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psFWDataAllocServerExportCookieInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psFWCorememAllocServerExportCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitAllocFWImgMemOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXInitAllocFWImgMemIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXInitAllocFWImgMemOUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitAllocFWImgMem_exit;
+					}
+				}
+
+
+	psRGXInitAllocFWImgMemOUT->eError =
+		PVRSRVRGXInitAllocFWImgMemKM(
+					hDevNodeInt,
+					psRGXInitAllocFWImgMemIN->uiFWCodeLen,
+					psRGXInitAllocFWImgMemIN->uiFWDataLen,
+					psRGXInitAllocFWImgMemIN->uiFWCoremem,
+					&psFWCodeAllocServerExportCookieInt,
+					&psRGXInitAllocFWImgMemOUT->sFWCodeDevVAddrBase,
+					&psFWDataAllocServerExportCookieInt,
+					&psRGXInitAllocFWImgMemOUT->sFWDataDevVAddrBase,
+					&psFWCorememAllocServerExportCookieInt,
+					&psRGXInitAllocFWImgMemOUT->sFWCorememDevVAddrBase,
+					&psRGXInitAllocFWImgMemOUT->sFWCorememMetaVAddrBase);
+	/* Exit early if bridged call fails */
+	if(psRGXInitAllocFWImgMemOUT->eError != PVRSRV_OK)
+	{
+		goto RGXInitAllocFWImgMem_exit;
+	}
+
+
+	psRGXInitAllocFWImgMemOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXInitAllocFWImgMemOUT->hFWCodeAllocServerExportCookie,
+							(IMG_VOID *) psFWCodeAllocServerExportCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&ReleaseFWCodeAllocServerExportCookie);
+	if (psRGXInitAllocFWImgMemOUT->eError != PVRSRV_OK)
+	{
+		goto RGXInitAllocFWImgMem_exit;
+	}
+
+
+	psRGXInitAllocFWImgMemOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXInitAllocFWImgMemOUT->hFWDataAllocServerExportCookie,
+							(IMG_VOID *) psFWDataAllocServerExportCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&ReleaseFWDataAllocServerExportCookie);
+	if (psRGXInitAllocFWImgMemOUT->eError != PVRSRV_OK)
+	{
+		goto RGXInitAllocFWImgMem_exit;
+	}
+
+
+	psRGXInitAllocFWImgMemOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXInitAllocFWImgMemOUT->hFWCorememAllocServerExportCookie,
+							(IMG_VOID *) psFWCorememAllocServerExportCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&ReleaseFWCorememAllocServerExportCookie);
+	if (psRGXInitAllocFWImgMemOUT->eError != PVRSRV_OK)
+	{
+		goto RGXInitAllocFWImgMem_exit;
+	}
+
+
+
+
+RGXInitAllocFWImgMem_exit:
+	if (psRGXInitAllocFWImgMemOUT->eError != PVRSRV_OK)
+	{
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXInitFirmware(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXINITFIRMWARE *psRGXInitFirmwareIN,
+					  PVRSRV_BRIDGE_OUT_RGXINITFIRMWARE *psRGXInitFirmwareOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_UINT32 *ui32RGXFWAlignChecksInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psHWPerfDataAllocServerExportCookieInt = IMG_NULL;
+
+
+
+
+	if (psRGXInitFirmwareIN->ui32RGXFWAlignChecksSize != 0)
+	{
+		ui32RGXFWAlignChecksInt = OSAllocMem(psRGXInitFirmwareIN->ui32RGXFWAlignChecksSize * sizeof(IMG_UINT32));
+		if (!ui32RGXFWAlignChecksInt)
+		{
+			psRGXInitFirmwareOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXInitFirmware_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXInitFirmwareIN->pui32RGXFWAlignChecks, psRGXInitFirmwareIN->ui32RGXFWAlignChecksSize * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32RGXFWAlignChecksInt, psRGXInitFirmwareIN->pui32RGXFWAlignChecks,
+				psRGXInitFirmwareIN->ui32RGXFWAlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXInitFirmwareOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXInitFirmware_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitFirmwareOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXInitFirmwareIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXInitFirmwareOUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitFirmware_exit;
+					}
+				}
+
+
+	psRGXInitFirmwareOUT->eError =
+		PVRSRVRGXInitFirmwareKM(
+					hDevNodeInt,
+					&psRGXInitFirmwareOUT->spsRGXFwInit,
+					psRGXInitFirmwareIN->bEnableSignatureChecks,
+					psRGXInitFirmwareIN->ui32SignatureChecksBufSize,
+					psRGXInitFirmwareIN->ui32HWPerfFWBufSizeKB,
+					psRGXInitFirmwareIN->ui64HWPerfFilter,
+					psRGXInitFirmwareIN->ui32RGXFWAlignChecksSize,
+					ui32RGXFWAlignChecksInt,
+					psRGXInitFirmwareIN->ui32ConfigFlags,
+					psRGXInitFirmwareIN->ui32LogType,
+					psRGXInitFirmwareIN->ui32FilterFlags,
+					psRGXInitFirmwareIN->ui32JonesDisableMask,
+					psRGXInitFirmwareIN->ui32ui32HWRDebugDumpLimit,
+					&psRGXInitFirmwareIN->sClientBVNC,
+					psRGXInitFirmwareIN->ui32HWPerfCountersDataSize,
+					&psHWPerfDataAllocServerExportCookieInt,
+					psRGXInitFirmwareIN->eRGXRDPowerIslandConf);
+	/* Exit early if bridged call fails */
+	if(psRGXInitFirmwareOUT->eError != PVRSRV_OK)
+	{
+		goto RGXInitFirmware_exit;
+	}
+
+
+	psRGXInitFirmwareOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXInitFirmwareOUT->hHWPerfDataAllocServerExportCookie,
+							(IMG_VOID *) psHWPerfDataAllocServerExportCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&ReleaseHWPerfDataAllocServerExportCookie);
+	if (psRGXInitFirmwareOUT->eError != PVRSRV_OK)
+	{
+		goto RGXInitFirmware_exit;
+	}
+
+
+
+
+RGXInitFirmware_exit:
+	if (psRGXInitFirmwareOUT->eError != PVRSRV_OK)
+	{
+	}
+
+	if (ui32RGXFWAlignChecksInt)
+		OSFreeMem(ui32RGXFWAlignChecksInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXInitLoadFWImage(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXINITLOADFWIMAGE *psRGXInitLoadFWImageIN,
+					  PVRSRV_BRIDGE_OUT_RGXINITLOADFWIMAGE *psRGXInitLoadFWImageOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psImgDestImportInt = IMG_NULL;
+	PMR * psImgSrcImportInt = IMG_NULL;
+	PMR * psSigImportInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitLoadFWImageOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psImgDestImportInt,
+											psRGXInitLoadFWImageIN->hImgDestImport,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRGXInitLoadFWImageOUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitLoadFWImage_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitLoadFWImageOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psImgSrcImportInt,
+											psRGXInitLoadFWImageIN->hImgSrcImport,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRGXInitLoadFWImageOUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitLoadFWImage_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitLoadFWImageOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSigImportInt,
+											psRGXInitLoadFWImageIN->hSigImport,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRGXInitLoadFWImageOUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitLoadFWImage_exit;
+					}
+				}
+
+
+	psRGXInitLoadFWImageOUT->eError =
+		PVRSRVRGXInitLoadFWImageKM(
+					psImgDestImportInt,
+					psImgSrcImportInt,
+					psRGXInitLoadFWImageIN->ui64ImgLen,
+					psSigImportInt,
+					psRGXInitLoadFWImageIN->ui64SigLen);
+
+
+
+
+RGXInitLoadFWImage_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXInitDevPart2(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXINITDEVPART2 *psRGXInitDevPart2IN,
+					  PVRSRV_BRIDGE_OUT_RGXINITDEVPART2 *psRGXInitDevPart2OUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	RGX_INIT_COMMAND *psInitScriptInt = IMG_NULL;
+	RGX_INIT_COMMAND *psDbgScriptInt = IMG_NULL;
+	RGX_INIT_COMMAND *psDbgBusScriptInt = IMG_NULL;
+	RGX_INIT_COMMAND *psDeinitScriptInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psFWCodeAllocServerExportCookieInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psFWDataAllocServerExportCookieInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psFWCorememAllocServerExportCookieInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psHWPerfDataAllocServerExportCookieInt = IMG_NULL;
+
+
+
+
+	
+	{
+		psInitScriptInt = OSAllocMem(RGX_MAX_INIT_COMMANDS * sizeof(RGX_INIT_COMMAND));
+		if (!psInitScriptInt)
+		{
+			psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXInitDevPart2_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXInitDevPart2IN->psInitScript, RGX_MAX_INIT_COMMANDS * sizeof(RGX_INIT_COMMAND))
+				|| (OSCopyFromUser(NULL, psInitScriptInt, psRGXInitDevPart2IN->psInitScript,
+				RGX_MAX_INIT_COMMANDS * sizeof(RGX_INIT_COMMAND)) != PVRSRV_OK) )
+			{
+				psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXInitDevPart2_exit;
+			}
+	
+	{
+		psDbgScriptInt = OSAllocMem(RGX_MAX_DEBUG_COMMANDS * sizeof(RGX_INIT_COMMAND));
+		if (!psDbgScriptInt)
+		{
+			psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXInitDevPart2_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXInitDevPart2IN->psDbgScript, RGX_MAX_DEBUG_COMMANDS * sizeof(RGX_INIT_COMMAND))
+				|| (OSCopyFromUser(NULL, psDbgScriptInt, psRGXInitDevPart2IN->psDbgScript,
+				RGX_MAX_DEBUG_COMMANDS * sizeof(RGX_INIT_COMMAND)) != PVRSRV_OK) )
+			{
+				psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXInitDevPart2_exit;
+			}
+	
+	{
+		psDbgBusScriptInt = OSAllocMem(RGX_MAX_DBGBUS_COMMANDS * sizeof(RGX_INIT_COMMAND));
+		if (!psDbgBusScriptInt)
+		{
+			psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXInitDevPart2_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXInitDevPart2IN->psDbgBusScript, RGX_MAX_DBGBUS_COMMANDS * sizeof(RGX_INIT_COMMAND))
+				|| (OSCopyFromUser(NULL, psDbgBusScriptInt, psRGXInitDevPart2IN->psDbgBusScript,
+				RGX_MAX_DBGBUS_COMMANDS * sizeof(RGX_INIT_COMMAND)) != PVRSRV_OK) )
+			{
+				psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXInitDevPart2_exit;
+			}
+	
+	{
+		psDeinitScriptInt = OSAllocMem(RGX_MAX_DEINIT_COMMANDS * sizeof(RGX_INIT_COMMAND));
+		if (!psDeinitScriptInt)
+		{
+			psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXInitDevPart2_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXInitDevPart2IN->psDeinitScript, RGX_MAX_DEINIT_COMMANDS * sizeof(RGX_INIT_COMMAND))
+				|| (OSCopyFromUser(NULL, psDeinitScriptInt, psRGXInitDevPart2IN->psDeinitScript,
+				RGX_MAX_DEINIT_COMMANDS * sizeof(RGX_INIT_COMMAND)) != PVRSRV_OK) )
+			{
+				psRGXInitDevPart2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXInitDevPart2_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitDevPart2OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXInitDevPart2IN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXInitDevPart2OUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitDevPart2_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitDevPart2OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psFWCodeAllocServerExportCookieInt,
+											psRGXInitDevPart2IN->hFWCodeAllocServerExportCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+					if(psRGXInitDevPart2OUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitDevPart2_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitDevPart2OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psFWDataAllocServerExportCookieInt,
+											psRGXInitDevPart2IN->hFWDataAllocServerExportCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+					if(psRGXInitDevPart2OUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitDevPart2_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitDevPart2OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psFWCorememAllocServerExportCookieInt,
+											psRGXInitDevPart2IN->hFWCorememAllocServerExportCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+					if(psRGXInitDevPart2OUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitDevPart2_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXInitDevPart2OUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psHWPerfDataAllocServerExportCookieInt,
+											psRGXInitDevPart2IN->hHWPerfDataAllocServerExportCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+					if(psRGXInitDevPart2OUT->eError != PVRSRV_OK)
+					{
+						goto RGXInitDevPart2_exit;
+					}
+				}
+
+
+	psRGXInitDevPart2OUT->eError =
+		PVRSRVRGXInitDevPart2KM(
+					hDevNodeInt,
+					psInitScriptInt,
+					psDbgScriptInt,
+					psDbgBusScriptInt,
+					psDeinitScriptInt,
+					psRGXInitDevPart2IN->ui32ui32KernelCatBaseIdReg,
+					psRGXInitDevPart2IN->ui32KernelCatBaseId,
+					psRGXInitDevPart2IN->ui32KernelCatBaseReg,
+					psRGXInitDevPart2IN->ui32KernelCatBaseWordSize,
+					psRGXInitDevPart2IN->ui32KernelCatBaseAlignShift,
+					psRGXInitDevPart2IN->ui32KernelCatBaseShift,
+					psRGXInitDevPart2IN->ui64KernelCatBaseMask,
+					psRGXInitDevPart2IN->ui32DeviceFlags,
+					psRGXInitDevPart2IN->ui32RGXActivePMConf,
+					psFWCodeAllocServerExportCookieInt,
+					psFWDataAllocServerExportCookieInt,
+					psFWCorememAllocServerExportCookieInt,
+					psHWPerfDataAllocServerExportCookieInt);
+
+
+	psRGXInitDevPart2OUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXInitDevPart2IN->hFWCodeAllocServerExportCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+	if ((psRGXInitDevPart2OUT->eError != PVRSRV_OK) && (psRGXInitDevPart2OUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXInitDevPart2_exit;
+	}
+
+	psRGXInitDevPart2OUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXInitDevPart2IN->hFWDataAllocServerExportCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+	if ((psRGXInitDevPart2OUT->eError != PVRSRV_OK) && (psRGXInitDevPart2OUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXInitDevPart2_exit;
+	}
+
+	psRGXInitDevPart2OUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXInitDevPart2IN->hFWCorememAllocServerExportCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+	if ((psRGXInitDevPart2OUT->eError != PVRSRV_OK) && (psRGXInitDevPart2OUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXInitDevPart2_exit;
+	}
+
+	psRGXInitDevPart2OUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXInitDevPart2IN->hHWPerfDataAllocServerExportCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE);
+	if ((psRGXInitDevPart2OUT->eError != PVRSRV_OK) && (psRGXInitDevPart2OUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXInitDevPart2_exit;
+	}
+
+
+
+RGXInitDevPart2_exit:
+	if (psInitScriptInt)
+		OSFreeMem(psInitScriptInt);
+	if (psDbgScriptInt)
+		OSFreeMem(psDbgScriptInt);
+	if (psDbgBusScriptInt)
+		OSFreeMem(psDbgBusScriptInt);
+	if (psDeinitScriptInt)
+		OSFreeMem(psDeinitScriptInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGPUVIRTPopulateLMASubArenas(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_GPUVIRTPOPULATELMASUBARENAS *psGPUVIRTPopulateLMASubArenasIN,
+					  PVRSRV_BRIDGE_OUT_GPUVIRTPOPULATELMASUBARENAS *psGPUVIRTPopulateLMASubArenasOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_UINT32 *ui32ElementsInt = IMG_NULL;
+
+
+
+
+	if (psGPUVIRTPopulateLMASubArenasIN->ui32NumElements != 0)
+	{
+		ui32ElementsInt = OSAllocMem(psGPUVIRTPopulateLMASubArenasIN->ui32NumElements * sizeof(IMG_UINT32));
+		if (!ui32ElementsInt)
+		{
+			psGPUVIRTPopulateLMASubArenasOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto GPUVIRTPopulateLMASubArenas_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psGPUVIRTPopulateLMASubArenasIN->pui32Elements, psGPUVIRTPopulateLMASubArenasIN->ui32NumElements * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ElementsInt, psGPUVIRTPopulateLMASubArenasIN->pui32Elements,
+				psGPUVIRTPopulateLMASubArenasIN->ui32NumElements * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psGPUVIRTPopulateLMASubArenasOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto GPUVIRTPopulateLMASubArenas_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psGPUVIRTPopulateLMASubArenasOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psGPUVIRTPopulateLMASubArenasIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psGPUVIRTPopulateLMASubArenasOUT->eError != PVRSRV_OK)
+					{
+						goto GPUVIRTPopulateLMASubArenas_exit;
+					}
+				}
+
+
+	psGPUVIRTPopulateLMASubArenasOUT->eError =
+		PVRSRVGPUVIRTPopulateLMASubArenasKM(
+					hDevNodeInt,
+					psGPUVIRTPopulateLMASubArenasIN->ui32NumElements,
+					ui32ElementsInt);
+
+
+
+
+GPUVIRTPopulateLMASubArenas_exit:
+	if (ui32ElementsInt)
+		OSFreeMem(ui32ElementsInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRGXINITBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRGXINITBridge(IMG_VOID);
+
+/*
+ * Register all RGXINIT functions with services
+ */
+PVRSRV_ERROR InitRGXINITBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXINIT, PVRSRV_BRIDGE_RGXINIT_RGXINITALLOCFWIMGMEM, PVRSRVBridgeRGXInitAllocFWImgMem,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXINIT, PVRSRV_BRIDGE_RGXINIT_RGXINITFIRMWARE, PVRSRVBridgeRGXInitFirmware,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXINIT, PVRSRV_BRIDGE_RGXINIT_RGXINITLOADFWIMAGE, PVRSRVBridgeRGXInitLoadFWImage,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXINIT, PVRSRV_BRIDGE_RGXINIT_RGXINITDEVPART2, PVRSRVBridgeRGXInitDevPart2,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXINIT, PVRSRV_BRIDGE_RGXINIT_GPUVIRTPOPULATELMASUBARENAS, PVRSRVBridgeGPUVIRTPopulateLMASubArenas,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxinit functions with services
+ */
+PVRSRV_ERROR DeinitRGXINITBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxpdump_bridge/common_rgxpdump_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxpdump_bridge/common_rgxpdump_bridge.h
new file mode 100644
index 0000000..a1d6a35
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxpdump_bridge/common_rgxpdump_bridge.h
@@ -0,0 +1,96 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXPDUMP_BRIDGE_H
+#define COMMON_RGXPDUMP_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER			PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER			PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST			(PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1)
+
+
+/*******************************************
+            PDumpTraceBuffer          
+ *******************************************/
+
+/* Bridge in structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER;
+
+
+/* Bridge out structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER;
+
+/*******************************************
+            PDumpSignatureBuffer          
+ *******************************************/
+
+/* Bridge in structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG
+{
+	IMG_HANDLE hDeviceNode;
+	IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER;
+
+
+/* Bridge out structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER;
+
+#endif /* COMMON_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxpdump_bridge/server_rgxpdump_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxpdump_bridge/server_rgxpdump_bridge.c
new file mode 100644
index 0000000..648b7db
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxpdump_bridge/server_rgxpdump_bridge.c
@@ -0,0 +1,192 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxpdump
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxpdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxpdump.h"
+
+
+#include "common_rgxpdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN,
+					  PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPDumpTraceBufferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psPDumpTraceBufferIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psPDumpTraceBufferOUT->eError != PVRSRV_OK)
+					{
+						goto PDumpTraceBuffer_exit;
+					}
+				}
+
+
+	psPDumpTraceBufferOUT->eError =
+		PVRSRVPDumpTraceBufferKM(
+					hDeviceNodeInt,
+					psPDumpTraceBufferIN->ui32PDumpFlags);
+
+
+
+
+PDumpTraceBuffer_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN,
+					  PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDeviceNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psPDumpSignatureBufferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDeviceNodeInt,
+											psPDumpSignatureBufferIN->hDeviceNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psPDumpSignatureBufferOUT->eError != PVRSRV_OK)
+					{
+						goto PDumpSignatureBuffer_exit;
+					}
+				}
+
+
+	psPDumpSignatureBufferOUT->eError =
+		PVRSRVPDumpSignatureBufferKM(
+					hDeviceNodeInt,
+					psPDumpSignatureBufferIN->ui32PDumpFlags);
+
+
+
+
+PDumpSignatureBuffer_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRGXPDUMPBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(IMG_VOID);
+
+/*
+ * Register all RGXPDUMP functions with services
+ */
+PVRSRV_ERROR InitRGXPDUMPBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, PVRSRVBridgePDumpTraceBuffer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, PVRSRVBridgePDumpSignatureBuffer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxpdump functions with services
+ */
+PVRSRV_ERROR DeinitRGXPDUMPBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxta3d_bridge/common_rgxta3d_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxta3d_bridge/common_rgxta3d_bridge.h
new file mode 100644
index 0000000..90e6ced
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxta3d_bridge/common_rgxta3d_bridge.h
@@ -0,0 +1,522 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "sync_external.h"
+#include "rgx_fwif_shared.h"
+
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXADDBLOCKTOFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXREMOVEBLOCKFROMFREELIST			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+16
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+17
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKSYNCTA			PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+18
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST			(PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+18)
+
+
+/*******************************************
+            RGXCreateHWRTData          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32RenderTarget;
+	IMG_DEV_VIRTADDR sPMMlistDevVAddr;
+	IMG_DEV_VIRTADDR sVFPPageTableAddr;
+	IMG_HANDLE * phapsFreeLists;
+	IMG_UINT32 ui32PPPScreen;
+	IMG_UINT32 ui32PPPGridOffset;
+	IMG_UINT64 ui64PPPMultiSampleCtl;
+	IMG_UINT32 ui32TPCStride;
+	IMG_DEV_VIRTADDR sTailPtrsDevVAddr;
+	IMG_UINT32 ui32TPCSize;
+	IMG_UINT32 ui32TEScreen;
+	IMG_UINT32 ui32TEAA;
+	IMG_UINT32 ui32TEMTILE1;
+	IMG_UINT32 ui32TEMTILE2;
+	IMG_UINT32 ui32MTileStride;
+	IMG_UINT32 ui32ui32ISPMergeLowerX;
+	IMG_UINT32 ui32ui32ISPMergeLowerY;
+	IMG_UINT32 ui32ui32ISPMergeUpperX;
+	IMG_UINT32 ui32ui32ISPMergeUpperY;
+	IMG_UINT32 ui32ui32ISPMergeScaleX;
+	IMG_UINT32 ui32ui32ISPMergeScaleY;
+	IMG_UINT16 ui16MaxRTs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA;
+
+
+/* Bridge out structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	IMG_HANDLE hRTACtlMemDesc;
+	IMG_HANDLE hsHWRTDataMemDesc;
+	IMG_UINT32 ui32FWHWRTData;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA;
+
+/*******************************************
+            RGXDestroyHWRTData          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA;
+
+
+/* Bridge out structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA;
+
+/*******************************************
+            RGXCreateRenderTarget          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_DEV_VIRTADDR spsVHeapTableDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET;
+
+
+/* Bridge out structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET_TAG
+{
+	IMG_HANDLE hsRenderTargetMemDesc;
+	IMG_UINT32 ui32sRenderTargetFWDevVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET;
+
+/*******************************************
+            RGXDestroyRenderTarget          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET_TAG
+{
+	IMG_HANDLE hsRenderTargetMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET;
+
+
+/* Bridge out structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET;
+
+/*******************************************
+            RGXCreateZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_HANDLE hReservation;
+	IMG_HANDLE hPMR;
+	PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferKM;
+	IMG_UINT32 ui32sZSBufferFWDevVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+/*******************************************
+            RGXDestroyZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+/*******************************************
+            RGXPopulateZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsZSBufferKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsPopulation;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+/*******************************************
+            RGXUnpopulateZSBuffer          
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+	IMG_HANDLE hsPopulation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+/*******************************************
+            RGXCreateFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32ui32MaxFLPages;
+	IMG_UINT32 ui32ui32InitFLPages;
+	IMG_UINT32 ui32ui32GrowFLPages;
+	IMG_BOOL bbFreeListCheck;
+	IMG_DEV_VIRTADDR spsFreeListDevVAddr;
+	IMG_HANDLE hsFreeListPMR;
+	IMG_DEVMEM_OFFSET_T uiPMROffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+/*******************************************
+            RGXDestroyFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+/*******************************************
+            RGXAddBlockToFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXAddBlockToFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST_TAG
+{
+	IMG_HANDLE hsFreeList;
+	IMG_UINT32 ui3232NumPages;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST;
+
+
+/* Bridge out structure for RGXAddBlockToFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST;
+
+/*******************************************
+            RGXRemoveBlockFromFreeList          
+ *******************************************/
+
+/* Bridge in structure for RGXRemoveBlockFromFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST_TAG
+{
+	IMG_HANDLE hsFreeList;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST;
+
+
+/* Bridge out structure for RGXRemoveBlockFromFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST;
+
+/*******************************************
+            RGXCreateRenderContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32Priority;
+	IMG_DEV_VIRTADDR sMCUFenceAddr;
+	IMG_DEV_VIRTADDR sVDMCallStackAddr;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+	IMG_HANDLE hRenderContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+/*******************************************
+            RGXDestroyRenderContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+	IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+/*******************************************
+            RGXKickTA3D          
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32ClientTAFenceCount;
+	PRGXFWIF_UFO_ADDR * psClientTAFenceUFOAddress;
+	IMG_UINT32 * pui32ClientTAFenceValue;
+	IMG_UINT32 ui32ClientTAUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClientTAUpdateUFOAddress;
+	IMG_UINT32 * pui32ClientTAUpdateValue;
+	IMG_UINT32 ui32ServerTASyncPrims;
+	IMG_UINT32 * pui32ServerTASyncFlags;
+	IMG_HANDLE * phServerTASyncs;
+	IMG_UINT32 ui32Client3DFenceCount;
+	PRGXFWIF_UFO_ADDR * psClient3DFenceUFOAddress;
+	IMG_UINT32 * pui32Client3DFenceValue;
+	IMG_UINT32 ui32Client3DUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClient3DUpdateUFOAddress;
+	IMG_UINT32 * pui32Client3DUpdateValue;
+	IMG_UINT32 ui32Server3DSyncPrims;
+	IMG_UINT32 * pui32Server3DSyncFlags;
+	IMG_HANDLE * phServer3DSyncs;
+	PRGXFWIF_UFO_ADDR sPRFenceUFOAddress;
+	IMG_UINT32 ui32FRFenceValue;
+	IMG_UINT32 ui32NumCheckFenceFDs;
+	IMG_INT32 * pi32CheckFenceFDs;
+	IMG_INT32 i32UpdateFenceFD;
+	IMG_UINT32 ui32TACmdSize;
+	IMG_BYTE * psTACmd;
+	IMG_UINT32 ui323DPRCmdSize;
+	IMG_BYTE * ps3DPRCmd;
+	IMG_UINT32 ui323DCmdSize;
+	IMG_BYTE * ps3DCmd;
+	IMG_UINT32 ui32ExternalJobReference;
+	IMG_UINT32 ui32InternalJobReference;
+	IMG_BOOL bbLastTAInScene;
+	IMG_BOOL bbKickTA;
+	IMG_BOOL bbKickPR;
+	IMG_BOOL bbKick3D;
+	IMG_BOOL bbAbort;
+	IMG_BOOL bbPDumpContinuous;
+	IMG_HANDLE hRTDataCleanup;
+	IMG_HANDLE hZBuffer;
+	IMG_HANDLE hSBuffer;
+	IMG_BOOL bbCommitRefCountsTA;
+	IMG_BOOL bbCommitRefCounts3D;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D;
+
+
+/* Bridge out structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D_TAG
+{
+	IMG_BOOL bbCommittedRefCountsTA;
+	IMG_BOOL bbCommittedRefCounts3D;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D;
+
+/*******************************************
+            RGXSetRenderContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXGetLastRenderContextResetReason          
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+	IMG_HANDLE hRenderContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+
+/* Bridge out structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+	IMG_UINT32 ui32LastResetReason;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+/*******************************************
+            RGXGetPartialRenderCount          
+ *******************************************/
+
+/* Bridge in structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT_TAG
+{
+	IMG_HANDLE hHWRTDataMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT;
+
+
+/* Bridge out structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT_TAG
+{
+	IMG_UINT32 ui32NumPartialRenders;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT;
+
+/*******************************************
+            RGXKickSyncTA          
+ *******************************************/
+
+/* Bridge in structure for RGXKickSyncTA */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNCTA_TAG
+{
+	IMG_HANDLE hRenderContext;
+	IMG_UINT32 ui32ClientTAFenceCount;
+	PRGXFWIF_UFO_ADDR * psClientTAFenceUFOAddress;
+	IMG_UINT32 * pui32ClientTAFenceValue;
+	IMG_UINT32 ui32ClientTAUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClientTAUpdateUFOAddress;
+	IMG_UINT32 * pui32ClientTAUpdateValue;
+	IMG_UINT32 ui32ServerTASyncPrims;
+	IMG_UINT32 * pui32ServerTASyncFlags;
+	IMG_HANDLE * phServerTASyncs;
+	IMG_UINT32 ui32Client3DFenceCount;
+	PRGXFWIF_UFO_ADDR * psClient3DFenceUFOAddress;
+	IMG_UINT32 * pui32Client3DFenceValue;
+	IMG_UINT32 ui32Client3DUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClient3DUpdateUFOAddress;
+	IMG_UINT32 * pui32Client3DUpdateValue;
+	IMG_UINT32 ui32Server3DSyncPrims;
+	IMG_UINT32 * pui32Server3DSyncFlags;
+	IMG_HANDLE * phServer3DSyncs;
+	IMG_UINT32 ui32NumCheckFenceFDs;
+	IMG_INT32 * pi32CheckFenceFDs;
+	IMG_INT32 i32UpdateFenceFD;
+	IMG_BOOL bbPDumpContinuous;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNCTA;
+
+
+/* Bridge out structure for RGXKickSyncTA */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNCTA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNCTA;
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxta3d_bridge/server_rgxta3d_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxta3d_bridge/server_rgxta3d_bridge.c
new file mode 100644
index 0000000..94451ad
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxta3d_bridge/server_rgxta3d_bridge.c
@@ -0,0 +1,2189 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxta3d
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxta3d
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA *psRGXCreateHWRTDataIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA *psRGXCreateHWRTDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	RGX_FREELIST * *psapsFreeListsInt = IMG_NULL;
+	IMG_HANDLE *hapsFreeListsInt2 = IMG_NULL;
+	RGX_RTDATA_CLEANUP_DATA * psCleanupCookieInt = IMG_NULL;
+	DEVMEM_MEMDESC * psRTACtlMemDescInt = IMG_NULL;
+	DEVMEM_MEMDESC * pssHWRTDataMemDescInt = IMG_NULL;
+
+
+
+	psRGXCreateHWRTDataOUT->hCleanupCookie = IMG_NULL;
+
+	
+	{
+		psapsFreeListsInt = OSAllocMem(RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *));
+		if (!psapsFreeListsInt)
+		{
+			psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXCreateHWRTData_exit;
+		}
+		hapsFreeListsInt2 = OSAllocMem(RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE));
+		if (!hapsFreeListsInt2)
+		{
+			psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXCreateHWRTData_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXCreateHWRTDataIN->phapsFreeLists, RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hapsFreeListsInt2, psRGXCreateHWRTDataIN->phapsFreeLists,
+				RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXCreateHWRTData_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateHWRTDataOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateHWRTDataIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateHWRTData_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXCreateHWRTDataOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psapsFreeListsInt[i],
+											hapsFreeListsInt2[i],
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateHWRTData_exit;
+					}
+				}
+
+		}
+	}
+
+	psRGXCreateHWRTDataOUT->eError =
+		RGXCreateHWRTData(
+					hDevNodeInt,
+					psRGXCreateHWRTDataIN->ui32RenderTarget,
+					psRGXCreateHWRTDataIN->sPMMlistDevVAddr,
+					psRGXCreateHWRTDataIN->sVFPPageTableAddr,
+					psapsFreeListsInt,
+					&psCleanupCookieInt,
+					&psRTACtlMemDescInt,
+					psRGXCreateHWRTDataIN->ui32PPPScreen,
+					psRGXCreateHWRTDataIN->ui32PPPGridOffset,
+					psRGXCreateHWRTDataIN->ui64PPPMultiSampleCtl,
+					psRGXCreateHWRTDataIN->ui32TPCStride,
+					psRGXCreateHWRTDataIN->sTailPtrsDevVAddr,
+					psRGXCreateHWRTDataIN->ui32TPCSize,
+					psRGXCreateHWRTDataIN->ui32TEScreen,
+					psRGXCreateHWRTDataIN->ui32TEAA,
+					psRGXCreateHWRTDataIN->ui32TEMTILE1,
+					psRGXCreateHWRTDataIN->ui32TEMTILE2,
+					psRGXCreateHWRTDataIN->ui32MTileStride,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerX,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerY,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperX,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperY,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleX,
+					psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleY,
+					psRGXCreateHWRTDataIN->ui16MaxRTs,
+					&pssHWRTDataMemDescInt,
+					&psRGXCreateHWRTDataOUT->ui32FWHWRTData);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateHWRTData_exit;
+	}
+
+
+	psRGXCreateHWRTDataOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateHWRTDataOUT->hCleanupCookie,
+							(IMG_VOID *) psCleanupCookieInt,
+							PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyHWRTData);
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateHWRTData_exit;
+	}
+
+
+	psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandle(psConnection->psHandleBase,
+							&psRGXCreateHWRTDataOUT->hRTACtlMemDesc,
+							(IMG_VOID *) psRTACtlMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psRGXCreateHWRTDataOUT->hCleanupCookie);
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateHWRTData_exit;
+	}
+
+
+	psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandle(psConnection->psHandleBase,
+							&psRGXCreateHWRTDataOUT->hsHWRTDataMemDesc,
+							(IMG_VOID *) pssHWRTDataMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psRGXCreateHWRTDataOUT->hCleanupCookie);
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateHWRTData_exit;
+	}
+
+
+
+
+RGXCreateHWRTData_exit:
+	if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+	{
+		if (psRGXCreateHWRTDataOUT->hCleanupCookie)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						(IMG_HANDLE) psRGXCreateHWRTDataOUT->hCleanupCookie,
+						PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psCleanupCookieInt = IMG_NULL;
+		}
+
+
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyHWRTData(psCleanupCookieInt);
+		}
+	}
+
+	if (psapsFreeListsInt)
+		OSFreeMem(psapsFreeListsInt);
+	if (hapsFreeListsInt2)
+		OSFreeMem(hapsFreeListsInt2);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyHWRTDataOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyHWRTDataIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+	if ((psRGXDestroyHWRTDataOUT->eError != PVRSRV_OK) && (psRGXDestroyHWRTDataOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyHWRTData_exit;
+	}
+
+
+
+RGXDestroyHWRTData_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET *psRGXCreateRenderTargetIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET *psRGXCreateRenderTargetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	RGX_RT_CLEANUP_DATA * pssRenderTargetMemDescInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRenderTargetOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateRenderTargetIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateRenderTarget_exit;
+					}
+				}
+
+
+	psRGXCreateRenderTargetOUT->eError =
+		RGXCreateRenderTarget(
+					hDevNodeInt,
+					psRGXCreateRenderTargetIN->spsVHeapTableDevVAddr,
+					&pssRenderTargetMemDescInt,
+					&psRGXCreateRenderTargetOUT->ui32sRenderTargetFWDevVAddr);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRenderTarget_exit;
+	}
+
+
+	psRGXCreateRenderTargetOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateRenderTargetOUT->hsRenderTargetMemDesc,
+							(IMG_VOID *) pssRenderTargetMemDescInt,
+							PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyRenderTarget);
+	if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRenderTarget_exit;
+	}
+
+
+
+
+RGXCreateRenderTarget_exit:
+	if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+	{
+		if (pssRenderTargetMemDescInt)
+		{
+			RGXDestroyRenderTarget(pssRenderTargetMemDescInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyRenderTargetOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRenderTargetIN->hsRenderTargetMemDesc,
+					PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET);
+	if ((psRGXDestroyRenderTargetOUT->eError != PVRSRV_OK) && (psRGXDestroyRenderTargetOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyRenderTarget_exit;
+	}
+
+
+
+RGXDestroyRenderTarget_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	DEVMEMINT_RESERVATION * psReservationInt = IMG_NULL;
+	PMR * psPMRInt = IMG_NULL;
+	RGX_ZSBUFFER_DATA * pssZSBufferKMInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateZSBufferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateZSBufferIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateZSBuffer_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateZSBufferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psReservationInt,
+											psRGXCreateZSBufferIN->hReservation,
+											PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+					if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateZSBuffer_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateZSBufferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psRGXCreateZSBufferIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateZSBuffer_exit;
+					}
+				}
+
+
+	psRGXCreateZSBufferOUT->eError =
+		RGXCreateZSBufferKM(
+					hDevNodeInt,
+					psReservationInt,
+					psPMRInt,
+					psRGXCreateZSBufferIN->uiMapFlags,
+					&pssZSBufferKMInt,
+					&psRGXCreateZSBufferOUT->ui32sZSBufferFWDevVAddr);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateZSBuffer_exit;
+	}
+
+
+	psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateZSBufferOUT->hsZSBufferKM,
+							(IMG_VOID *) pssZSBufferKMInt,
+							PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyZSBufferKM);
+	if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateZSBuffer_exit;
+	}
+
+
+
+
+RGXCreateZSBuffer_exit:
+	if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		if (pssZSBufferKMInt)
+		{
+			RGXDestroyZSBufferKM(pssZSBufferKMInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyZSBufferOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyZSBufferIN->hsZSBufferMemDesc,
+					PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+	if ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyZSBuffer_exit;
+	}
+
+
+
+RGXDestroyZSBuffer_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_ZSBUFFER_DATA * pssZSBufferKMInt = IMG_NULL;
+	RGX_POPULATION * pssPopulationInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXPopulateZSBufferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &pssZSBufferKMInt,
+											psRGXPopulateZSBufferIN->hsZSBufferKM,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXPopulateZSBuffer_exit;
+					}
+				}
+
+
+	psRGXPopulateZSBufferOUT->eError =
+		RGXPopulateZSBufferKM(
+					pssZSBufferKMInt,
+					&pssPopulationInt);
+	/* Exit early if bridged call fails */
+	if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		goto RGXPopulateZSBuffer_exit;
+	}
+
+
+	psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXPopulateZSBufferOUT->hsPopulation,
+							(IMG_VOID *) pssPopulationInt,
+							PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXUnpopulateZSBufferKM);
+	if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		goto RGXPopulateZSBuffer_exit;
+	}
+
+
+
+
+RGXPopulateZSBuffer_exit:
+	if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+	{
+		if (pssPopulationInt)
+		{
+			RGXUnpopulateZSBufferKM(pssPopulationInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN,
+					  PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXUnpopulateZSBufferOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation,
+					PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+	if ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXUnpopulateZSBuffer_exit;
+	}
+
+
+
+RGXUnpopulateZSBuffer_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	PMR * pssFreeListPMRInt = IMG_NULL;
+	RGX_FREELIST * psCleanupCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateFreeListOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateFreeListIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateFreeList_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateFreeListOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &pssFreeListPMRInt,
+											psRGXCreateFreeListIN->hsFreeListPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateFreeList_exit;
+					}
+				}
+
+
+	psRGXCreateFreeListOUT->eError =
+		RGXCreateFreeList(
+					hDevNodeInt,
+					psRGXCreateFreeListIN->ui32ui32MaxFLPages,
+					psRGXCreateFreeListIN->ui32ui32InitFLPages,
+					psRGXCreateFreeListIN->ui32ui32GrowFLPages,
+					psRGXCreateFreeListIN->bbFreeListCheck,
+					psRGXCreateFreeListIN->spsFreeListDevVAddr,
+					pssFreeListPMRInt,
+					psRGXCreateFreeListIN->uiPMROffset,
+					&psCleanupCookieInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateFreeList_exit;
+	}
+
+
+	psRGXCreateFreeListOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateFreeListOUT->hCleanupCookie,
+							(IMG_VOID *) psCleanupCookieInt,
+							PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RGXDestroyFreeList);
+	if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateFreeList_exit;
+	}
+
+
+
+
+RGXCreateFreeList_exit:
+	if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+	{
+		if (psCleanupCookieInt)
+		{
+			RGXDestroyFreeList(psCleanupCookieInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyFreeListOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+	if ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyFreeList_exit;
+	}
+
+
+
+RGXDestroyFreeList_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXAddBlockToFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXADDBLOCKTOFREELIST *psRGXAddBlockToFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXADDBLOCKTOFREELIST *psRGXAddBlockToFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_FREELIST * pssFreeListInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXAddBlockToFreeListOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &pssFreeListInt,
+											psRGXAddBlockToFreeListIN->hsFreeList,
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					if(psRGXAddBlockToFreeListOUT->eError != PVRSRV_OK)
+					{
+						goto RGXAddBlockToFreeList_exit;
+					}
+				}
+
+
+	psRGXAddBlockToFreeListOUT->eError =
+		RGXAddBlockToFreeListKM(
+					pssFreeListInt,
+					psRGXAddBlockToFreeListIN->ui3232NumPages);
+
+
+
+
+RGXAddBlockToFreeList_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXRemoveBlockFromFreeList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXREMOVEBLOCKFROMFREELIST *psRGXRemoveBlockFromFreeListIN,
+					  PVRSRV_BRIDGE_OUT_RGXREMOVEBLOCKFROMFREELIST *psRGXRemoveBlockFromFreeListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_FREELIST * pssFreeListInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXRemoveBlockFromFreeListOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &pssFreeListInt,
+											psRGXRemoveBlockFromFreeListIN->hsFreeList,
+											PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+					if(psRGXRemoveBlockFromFreeListOUT->eError != PVRSRV_OK)
+					{
+						goto RGXRemoveBlockFromFreeList_exit;
+					}
+				}
+
+
+	psRGXRemoveBlockFromFreeListOUT->eError =
+		RGXRemoveBlockFromFreeListKM(
+					pssFreeListInt);
+
+
+
+
+RGXRemoveBlockFromFreeList_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_BYTE *psFrameworkCmdInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = IMG_NULL;
+
+
+
+
+	if (psRGXCreateRenderContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = OSAllocMem(psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE));
+		if (!psFrameworkCmdInt)
+		{
+			psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXCreateRenderContext_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXCreateRenderContextIN->psFrameworkCmd, psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateRenderContextIN->psFrameworkCmd,
+				psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXCreateRenderContext_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRenderContextOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateRenderContextIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateRenderContext_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateRenderContextOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXCreateRenderContextIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateRenderContext_exit;
+					}
+				}
+
+
+	psRGXCreateRenderContextOUT->eError =
+		PVRSRVRGXCreateRenderContextKM(psConnection,
+					hDevNodeInt,
+					psRGXCreateRenderContextIN->ui32Priority,
+					psRGXCreateRenderContextIN->sMCUFenceAddr,
+					psRGXCreateRenderContextIN->sVDMCallStackAddr,
+					psRGXCreateRenderContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psRenderContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRenderContext_exit;
+	}
+
+
+	psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateRenderContextOUT->hRenderContext,
+							(IMG_VOID *) psRenderContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRenderContextKM);
+	if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateRenderContext_exit;
+	}
+
+
+
+
+RGXCreateRenderContext_exit:
+	if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+	{
+		if (psRenderContextInt)
+		{
+			PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+		}
+	}
+
+	if (psFrameworkCmdInt)
+		OSFreeMem(psFrameworkCmdInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyRenderContextOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyRenderContextIN->hCleanupCookie,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+	if ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyRenderContext_exit;
+	}
+
+
+
+RGXDestroyRenderContext_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKTA3D *psRGXKickTA3DIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKTA3D *psRGXKickTA3DOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientTAFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientTAFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientTAUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientTAUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerTASyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerTASyncsInt = IMG_NULL;
+	IMG_HANDLE *hServerTASyncsInt2 = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClient3DFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32Client3DFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClient3DUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32Client3DUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32Server3DSyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServer3DSyncsInt = IMG_NULL;
+	IMG_HANDLE *hServer3DSyncsInt2 = IMG_NULL;
+	IMG_INT32 *i32CheckFenceFDsInt = IMG_NULL;
+	IMG_BYTE *psTACmdInt = IMG_NULL;
+	IMG_BYTE *ps3DPRCmdInt = IMG_NULL;
+	IMG_BYTE *ps3DCmdInt = IMG_NULL;
+	RGX_RTDATA_CLEANUP_DATA * psRTDataCleanupInt = IMG_NULL;
+	RGX_ZSBUFFER_DATA * psZBufferInt = IMG_NULL;
+	RGX_ZSBUFFER_DATA * psSBufferInt = IMG_NULL;
+
+
+
+
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		sClientTAFenceUFOAddressInt = OSAllocMem(psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientTAFenceUFOAddressInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->psClientTAFenceUFOAddress, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientTAFenceUFOAddressInt, psRGXKickTA3DIN->psClientTAFenceUFOAddress,
+				psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceValueInt = OSAllocMem(psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32));
+		if (!ui32ClientTAFenceValueInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pui32ClientTAFenceValue, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientTAFenceValueInt, psRGXKickTA3DIN->pui32ClientTAFenceValue,
+				psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		sClientTAUpdateUFOAddressInt = OSAllocMem(psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientTAUpdateUFOAddressInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->psClientTAUpdateUFOAddress, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientTAUpdateUFOAddressInt, psRGXKickTA3DIN->psClientTAUpdateUFOAddress,
+				psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateValueInt = OSAllocMem(psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32ClientTAUpdateValueInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pui32ClientTAUpdateValue, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientTAUpdateValueInt, psRGXKickTA3DIN->pui32ClientTAUpdateValue,
+				psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+	{
+		ui32ServerTASyncFlagsInt = OSAllocMem(psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32));
+		if (!ui32ServerTASyncFlagsInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pui32ServerTASyncFlags, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerTASyncFlagsInt, psRGXKickTA3DIN->pui32ServerTASyncFlags,
+				psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+	{
+		psServerTASyncsInt = OSAllocMem(psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServerTASyncsInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+		hServerTASyncsInt2 = OSAllocMem(psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE));
+		if (!hServerTASyncsInt2)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->phServerTASyncs, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServerTASyncsInt2, psRGXKickTA3DIN->phServerTASyncs,
+				psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		sClient3DFenceUFOAddressInt = OSAllocMem(psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClient3DFenceUFOAddressInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->psClient3DFenceUFOAddress, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClient3DFenceUFOAddressInt, psRGXKickTA3DIN->psClient3DFenceUFOAddress,
+				psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceValueInt = OSAllocMem(psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32));
+		if (!ui32Client3DFenceValueInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pui32Client3DFenceValue, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32Client3DFenceValueInt, psRGXKickTA3DIN->pui32Client3DFenceValue,
+				psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		sClient3DUpdateUFOAddressInt = OSAllocMem(psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClient3DUpdateUFOAddressInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->psClient3DUpdateUFOAddress, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClient3DUpdateUFOAddressInt, psRGXKickTA3DIN->psClient3DUpdateUFOAddress,
+				psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateValueInt = OSAllocMem(psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32Client3DUpdateValueInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pui32Client3DUpdateValue, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32Client3DUpdateValueInt, psRGXKickTA3DIN->pui32Client3DUpdateValue,
+				psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+	{
+		ui32Server3DSyncFlagsInt = OSAllocMem(psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32));
+		if (!ui32Server3DSyncFlagsInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pui32Server3DSyncFlags, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32Server3DSyncFlagsInt, psRGXKickTA3DIN->pui32Server3DSyncFlags,
+				psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+	{
+		psServer3DSyncsInt = OSAllocMem(psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServer3DSyncsInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+		hServer3DSyncsInt2 = OSAllocMem(psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE));
+		if (!hServer3DSyncsInt2)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->phServer3DSyncs, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServer3DSyncsInt2, psRGXKickTA3DIN->phServer3DSyncs,
+				psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32NumCheckFenceFDs != 0)
+	{
+		i32CheckFenceFDsInt = OSAllocMem(psRGXKickTA3DIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32));
+		if (!i32CheckFenceFDsInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->pi32CheckFenceFDs, psRGXKickTA3DIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32))
+				|| (OSCopyFromUser(NULL, i32CheckFenceFDsInt, psRGXKickTA3DIN->pi32CheckFenceFDs,
+				psRGXKickTA3DIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui32TACmdSize != 0)
+	{
+		psTACmdInt = OSAllocMem(psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE));
+		if (!psTACmdInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->psTACmd, psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, psTACmdInt, psRGXKickTA3DIN->psTACmd,
+				psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui323DPRCmdSize != 0)
+	{
+		ps3DPRCmdInt = OSAllocMem(psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE));
+		if (!ps3DPRCmdInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->ps3DPRCmd, psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, ps3DPRCmdInt, psRGXKickTA3DIN->ps3DPRCmd,
+				psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+	if (psRGXKickTA3DIN->ui323DCmdSize != 0)
+	{
+		ps3DCmdInt = OSAllocMem(psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE));
+		if (!ps3DCmdInt)
+		{
+			psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickTA3D_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickTA3DIN->ps3DCmd, psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, ps3DCmdInt, psRGXKickTA3DIN->ps3DCmd,
+				psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickTA3D_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psRenderContextInt,
+											psRGXKickTA3DIN->hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup RenderContext handle\n", __FUNCTION__));
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerTASyncsInt[i],
+											hServerTASyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup TA Sync handle[%d]\n", __FUNCTION__, i));
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServer3DSyncsInt[i],
+											hServer3DSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup 3D Sync handle[%d]\n", __FUNCTION__, i));
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+		}
+	}
+
+				if (psRGXKickTA3DIN->hRTDataCleanup)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psRTDataCleanupInt,
+											psRGXKickTA3DIN->hRTDataCleanup,
+											PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup Cleanup handle\n", __FUNCTION__));
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+				if (psRGXKickTA3DIN->hZBuffer)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psZBufferInt,
+											psRGXKickTA3DIN->hZBuffer,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup Z Buffer handle\n", __FUNCTION__));
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+				if (psRGXKickTA3DIN->hSBuffer)
+				{
+					/* Look up the address from the handle */
+					psRGXKickTA3DOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSBufferInt,
+											psRGXKickTA3DIN->hSBuffer,
+											PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+					if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup S Buffer handle\n", __FUNCTION__));
+						goto RGXKickTA3D_exit;
+					}
+				}
+
+
+	psRGXKickTA3DOUT->eError =
+		PVRSRVRGXKickTA3DKM(
+					psRenderContextInt,
+					psRGXKickTA3DIN->ui32ClientTAFenceCount,
+					sClientTAFenceUFOAddressInt,
+					ui32ClientTAFenceValueInt,
+					psRGXKickTA3DIN->ui32ClientTAUpdateCount,
+					sClientTAUpdateUFOAddressInt,
+					ui32ClientTAUpdateValueInt,
+					psRGXKickTA3DIN->ui32ServerTASyncPrims,
+					ui32ServerTASyncFlagsInt,
+					psServerTASyncsInt,
+					psRGXKickTA3DIN->ui32Client3DFenceCount,
+					sClient3DFenceUFOAddressInt,
+					ui32Client3DFenceValueInt,
+					psRGXKickTA3DIN->ui32Client3DUpdateCount,
+					sClient3DUpdateUFOAddressInt,
+					ui32Client3DUpdateValueInt,
+					psRGXKickTA3DIN->ui32Server3DSyncPrims,
+					ui32Server3DSyncFlagsInt,
+					psServer3DSyncsInt,
+					psRGXKickTA3DIN->sPRFenceUFOAddress,
+					psRGXKickTA3DIN->ui32FRFenceValue,
+					psRGXKickTA3DIN->ui32NumCheckFenceFDs,
+					i32CheckFenceFDsInt,
+					psRGXKickTA3DIN->i32UpdateFenceFD,
+					psRGXKickTA3DIN->ui32TACmdSize,
+					psTACmdInt,
+					psRGXKickTA3DIN->ui323DPRCmdSize,
+					ps3DPRCmdInt,
+					psRGXKickTA3DIN->ui323DCmdSize,
+					ps3DCmdInt,
+					psRGXKickTA3DIN->ui32ExternalJobReference,
+					psRGXKickTA3DIN->ui32InternalJobReference,
+					psRGXKickTA3DIN->bbLastTAInScene,
+					psRGXKickTA3DIN->bbKickTA,
+					psRGXKickTA3DIN->bbKickPR,
+					psRGXKickTA3DIN->bbKick3D,
+					psRGXKickTA3DIN->bbAbort,
+					psRGXKickTA3DIN->bbPDumpContinuous,
+					psRTDataCleanupInt,
+					psZBufferInt,
+					psSBufferInt,
+					psRGXKickTA3DIN->bbCommitRefCountsTA,
+					psRGXKickTA3DIN->bbCommitRefCounts3D,
+					&psRGXKickTA3DOUT->bbCommittedRefCountsTA,
+					&psRGXKickTA3DOUT->bbCommittedRefCounts3D);
+
+
+
+
+RGXKickTA3D_exit:
+	if (sClientTAFenceUFOAddressInt)
+		OSFreeMem(sClientTAFenceUFOAddressInt);
+	if (ui32ClientTAFenceValueInt)
+		OSFreeMem(ui32ClientTAFenceValueInt);
+	if (sClientTAUpdateUFOAddressInt)
+		OSFreeMem(sClientTAUpdateUFOAddressInt);
+	if (ui32ClientTAUpdateValueInt)
+		OSFreeMem(ui32ClientTAUpdateValueInt);
+	if (ui32ServerTASyncFlagsInt)
+		OSFreeMem(ui32ServerTASyncFlagsInt);
+	if (psServerTASyncsInt)
+		OSFreeMem(psServerTASyncsInt);
+	if (hServerTASyncsInt2)
+		OSFreeMem(hServerTASyncsInt2);
+	if (sClient3DFenceUFOAddressInt)
+		OSFreeMem(sClient3DFenceUFOAddressInt);
+	if (ui32Client3DFenceValueInt)
+		OSFreeMem(ui32Client3DFenceValueInt);
+	if (sClient3DUpdateUFOAddressInt)
+		OSFreeMem(sClient3DUpdateUFOAddressInt);
+	if (ui32Client3DUpdateValueInt)
+		OSFreeMem(ui32Client3DUpdateValueInt);
+	if (ui32Server3DSyncFlagsInt)
+		OSFreeMem(ui32Server3DSyncFlagsInt);
+	if (psServer3DSyncsInt)
+		OSFreeMem(psServer3DSyncsInt);
+	if (hServer3DSyncsInt2)
+		OSFreeMem(hServer3DSyncsInt2);
+	if (i32CheckFenceFDsInt)
+		OSFreeMem(i32CheckFenceFDsInt);
+	if (psTACmdInt)
+		OSFreeMem(psTACmdInt);
+	if (ps3DPRCmdInt)
+		OSFreeMem(ps3DPRCmdInt);
+	if (ps3DCmdInt)
+		OSFreeMem(ps3DCmdInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetRenderContextPriorityOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psRenderContextInt,
+											psRGXSetRenderContextPriorityIN->hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					if(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSetRenderContextPriority_exit;
+					}
+				}
+
+
+	psRGXSetRenderContextPriorityOUT->eError =
+		PVRSRVRGXSetRenderContextPriorityKM(psConnection,
+					psRenderContextInt,
+					psRGXSetRenderContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRenderContextPriority_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonIN,
+					  PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXGetLastRenderContextResetReasonOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psRenderContextInt,
+											psRGXGetLastRenderContextResetReasonIN->hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					if(psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK)
+					{
+						goto RGXGetLastRenderContextResetReason_exit;
+					}
+				}
+
+
+	psRGXGetLastRenderContextResetReasonOUT->eError =
+		PVRSRVRGXGetLastRenderContextResetReasonKM(
+					psRenderContextInt,
+					&psRGXGetLastRenderContextResetReasonOUT->ui32LastResetReason);
+
+
+
+
+RGXGetLastRenderContextResetReason_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetPartialRenderCount(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountIN,
+					  PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	DEVMEM_MEMDESC * psHWRTDataMemDescInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXGetPartialRenderCountOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psHWRTDataMemDescInt,
+											psRGXGetPartialRenderCountIN->hHWRTDataMemDesc,
+											PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC);
+					if(psRGXGetPartialRenderCountOUT->eError != PVRSRV_OK)
+					{
+						goto RGXGetPartialRenderCount_exit;
+					}
+				}
+
+
+	psRGXGetPartialRenderCountOUT->eError =
+		PVRSRVRGXGetPartialRenderCountKM(
+					psHWRTDataMemDescInt,
+					&psRGXGetPartialRenderCountOUT->ui32NumPartialRenders);
+
+
+
+
+RGXGetPartialRenderCount_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXKickSyncTA(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKSYNCTA *psRGXKickSyncTAIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKSYNCTA *psRGXKickSyncTAOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientTAFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientTAFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientTAUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientTAUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerTASyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerTASyncsInt = IMG_NULL;
+	IMG_HANDLE *hServerTASyncsInt2 = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClient3DFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32Client3DFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClient3DUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32Client3DUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32Server3DSyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServer3DSyncsInt = IMG_NULL;
+	IMG_HANDLE *hServer3DSyncsInt2 = IMG_NULL;
+	IMG_INT32 *i32CheckFenceFDsInt = IMG_NULL;
+
+
+
+
+	if (psRGXKickSyncTAIN->ui32ClientTAFenceCount != 0)
+	{
+		sClientTAFenceUFOAddressInt = OSAllocMem(psRGXKickSyncTAIN->ui32ClientTAFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientTAFenceUFOAddressInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->psClientTAFenceUFOAddress, psRGXKickSyncTAIN->ui32ClientTAFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientTAFenceUFOAddressInt, psRGXKickSyncTAIN->psClientTAFenceUFOAddress,
+				psRGXKickSyncTAIN->ui32ClientTAFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32ClientTAFenceCount != 0)
+	{
+		ui32ClientTAFenceValueInt = OSAllocMem(psRGXKickSyncTAIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32));
+		if (!ui32ClientTAFenceValueInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pui32ClientTAFenceValue, psRGXKickSyncTAIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientTAFenceValueInt, psRGXKickSyncTAIN->pui32ClientTAFenceValue,
+				psRGXKickSyncTAIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32ClientTAUpdateCount != 0)
+	{
+		sClientTAUpdateUFOAddressInt = OSAllocMem(psRGXKickSyncTAIN->ui32ClientTAUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientTAUpdateUFOAddressInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->psClientTAUpdateUFOAddress, psRGXKickSyncTAIN->ui32ClientTAUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientTAUpdateUFOAddressInt, psRGXKickSyncTAIN->psClientTAUpdateUFOAddress,
+				psRGXKickSyncTAIN->ui32ClientTAUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32ClientTAUpdateCount != 0)
+	{
+		ui32ClientTAUpdateValueInt = OSAllocMem(psRGXKickSyncTAIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32ClientTAUpdateValueInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pui32ClientTAUpdateValue, psRGXKickSyncTAIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientTAUpdateValueInt, psRGXKickSyncTAIN->pui32ClientTAUpdateValue,
+				psRGXKickSyncTAIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32ServerTASyncPrims != 0)
+	{
+		ui32ServerTASyncFlagsInt = OSAllocMem(psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32));
+		if (!ui32ServerTASyncFlagsInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pui32ServerTASyncFlags, psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerTASyncFlagsInt, psRGXKickSyncTAIN->pui32ServerTASyncFlags,
+				psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32ServerTASyncPrims != 0)
+	{
+		psServerTASyncsInt = OSAllocMem(psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServerTASyncsInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+		hServerTASyncsInt2 = OSAllocMem(psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE));
+		if (!hServerTASyncsInt2)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->phServerTASyncs, psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServerTASyncsInt2, psRGXKickSyncTAIN->phServerTASyncs,
+				psRGXKickSyncTAIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32Client3DFenceCount != 0)
+	{
+		sClient3DFenceUFOAddressInt = OSAllocMem(psRGXKickSyncTAIN->ui32Client3DFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClient3DFenceUFOAddressInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->psClient3DFenceUFOAddress, psRGXKickSyncTAIN->ui32Client3DFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClient3DFenceUFOAddressInt, psRGXKickSyncTAIN->psClient3DFenceUFOAddress,
+				psRGXKickSyncTAIN->ui32Client3DFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32Client3DFenceCount != 0)
+	{
+		ui32Client3DFenceValueInt = OSAllocMem(psRGXKickSyncTAIN->ui32Client3DFenceCount * sizeof(IMG_UINT32));
+		if (!ui32Client3DFenceValueInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pui32Client3DFenceValue, psRGXKickSyncTAIN->ui32Client3DFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32Client3DFenceValueInt, psRGXKickSyncTAIN->pui32Client3DFenceValue,
+				psRGXKickSyncTAIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32Client3DUpdateCount != 0)
+	{
+		sClient3DUpdateUFOAddressInt = OSAllocMem(psRGXKickSyncTAIN->ui32Client3DUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClient3DUpdateUFOAddressInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->psClient3DUpdateUFOAddress, psRGXKickSyncTAIN->ui32Client3DUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClient3DUpdateUFOAddressInt, psRGXKickSyncTAIN->psClient3DUpdateUFOAddress,
+				psRGXKickSyncTAIN->ui32Client3DUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32Client3DUpdateCount != 0)
+	{
+		ui32Client3DUpdateValueInt = OSAllocMem(psRGXKickSyncTAIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32Client3DUpdateValueInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pui32Client3DUpdateValue, psRGXKickSyncTAIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32Client3DUpdateValueInt, psRGXKickSyncTAIN->pui32Client3DUpdateValue,
+				psRGXKickSyncTAIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32Server3DSyncPrims != 0)
+	{
+		ui32Server3DSyncFlagsInt = OSAllocMem(psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32));
+		if (!ui32Server3DSyncFlagsInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pui32Server3DSyncFlags, psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32Server3DSyncFlagsInt, psRGXKickSyncTAIN->pui32Server3DSyncFlags,
+				psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32Server3DSyncPrims != 0)
+	{
+		psServer3DSyncsInt = OSAllocMem(psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServer3DSyncsInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+		hServer3DSyncsInt2 = OSAllocMem(psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE));
+		if (!hServer3DSyncsInt2)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->phServer3DSyncs, psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServer3DSyncsInt2, psRGXKickSyncTAIN->phServer3DSyncs,
+				psRGXKickSyncTAIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+	if (psRGXKickSyncTAIN->ui32NumCheckFenceFDs != 0)
+	{
+		i32CheckFenceFDsInt = OSAllocMem(psRGXKickSyncTAIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32));
+		if (!i32CheckFenceFDsInt)
+		{
+			psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTA_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTAIN->pi32CheckFenceFDs, psRGXKickSyncTAIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32))
+				|| (OSCopyFromUser(NULL, i32CheckFenceFDsInt, psRGXKickSyncTAIN->pi32CheckFenceFDs,
+				psRGXKickSyncTAIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTAOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTA_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncTAOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psRenderContextInt,
+											psRGXKickSyncTAIN->hRenderContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+					if(psRGXKickSyncTAOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncTA_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncTAIN->ui32ServerTASyncPrims;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncTAOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerTASyncsInt[i],
+											hServerTASyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickSyncTAOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncTA_exit;
+					}
+				}
+
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncTAIN->ui32Server3DSyncPrims;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncTAOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServer3DSyncsInt[i],
+											hServer3DSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickSyncTAOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncTA_exit;
+					}
+				}
+
+		}
+	}
+
+	psRGXKickSyncTAOUT->eError =
+		PVRSRVRGXKickSyncTAKM(
+					psRenderContextInt,
+					psRGXKickSyncTAIN->ui32ClientTAFenceCount,
+					sClientTAFenceUFOAddressInt,
+					ui32ClientTAFenceValueInt,
+					psRGXKickSyncTAIN->ui32ClientTAUpdateCount,
+					sClientTAUpdateUFOAddressInt,
+					ui32ClientTAUpdateValueInt,
+					psRGXKickSyncTAIN->ui32ServerTASyncPrims,
+					ui32ServerTASyncFlagsInt,
+					psServerTASyncsInt,
+					psRGXKickSyncTAIN->ui32Client3DFenceCount,
+					sClient3DFenceUFOAddressInt,
+					ui32Client3DFenceValueInt,
+					psRGXKickSyncTAIN->ui32Client3DUpdateCount,
+					sClient3DUpdateUFOAddressInt,
+					ui32Client3DUpdateValueInt,
+					psRGXKickSyncTAIN->ui32Server3DSyncPrims,
+					ui32Server3DSyncFlagsInt,
+					psServer3DSyncsInt,
+					psRGXKickSyncTAIN->ui32NumCheckFenceFDs,
+					i32CheckFenceFDsInt,
+					psRGXKickSyncTAIN->i32UpdateFenceFD,
+					psRGXKickSyncTAIN->bbPDumpContinuous);
+
+
+
+
+RGXKickSyncTA_exit:
+	if (sClientTAFenceUFOAddressInt)
+		OSFreeMem(sClientTAFenceUFOAddressInt);
+	if (ui32ClientTAFenceValueInt)
+		OSFreeMem(ui32ClientTAFenceValueInt);
+	if (sClientTAUpdateUFOAddressInt)
+		OSFreeMem(sClientTAUpdateUFOAddressInt);
+	if (ui32ClientTAUpdateValueInt)
+		OSFreeMem(ui32ClientTAUpdateValueInt);
+	if (ui32ServerTASyncFlagsInt)
+		OSFreeMem(ui32ServerTASyncFlagsInt);
+	if (psServerTASyncsInt)
+		OSFreeMem(psServerTASyncsInt);
+	if (hServerTASyncsInt2)
+		OSFreeMem(hServerTASyncsInt2);
+	if (sClient3DFenceUFOAddressInt)
+		OSFreeMem(sClient3DFenceUFOAddressInt);
+	if (ui32Client3DFenceValueInt)
+		OSFreeMem(ui32Client3DFenceValueInt);
+	if (sClient3DUpdateUFOAddressInt)
+		OSFreeMem(sClient3DUpdateUFOAddressInt);
+	if (ui32Client3DUpdateValueInt)
+		OSFreeMem(ui32Client3DUpdateValueInt);
+	if (ui32Server3DSyncFlagsInt)
+		OSFreeMem(ui32Server3DSyncFlagsInt);
+	if (psServer3DSyncsInt)
+		OSFreeMem(psServer3DSyncsInt);
+	if (hServer3DSyncsInt2)
+		OSFreeMem(hServer3DSyncsInt2);
+	if (i32CheckFenceFDsInt)
+		OSFreeMem(i32CheckFenceFDsInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRGXTA3DBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRGXTA3DBridge(IMG_VOID);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA, PVRSRVBridgeRGXCreateHWRTData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA, PVRSRVBridgeRGXDestroyHWRTData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET, PVRSRVBridgeRGXCreateRenderTarget,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET, PVRSRVBridgeRGXDestroyRenderTarget,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, PVRSRVBridgeRGXCreateZSBuffer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, PVRSRVBridgeRGXDestroyZSBuffer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, PVRSRVBridgeRGXPopulateZSBuffer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, PVRSRVBridgeRGXUnpopulateZSBuffer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, PVRSRVBridgeRGXCreateFreeList,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, PVRSRVBridgeRGXDestroyFreeList,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXADDBLOCKTOFREELIST, PVRSRVBridgeRGXAddBlockToFreeList,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXREMOVEBLOCKFROMFREELIST, PVRSRVBridgeRGXRemoveBlockFromFreeList,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, PVRSRVBridgeRGXCreateRenderContext,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, PVRSRVBridgeRGXDestroyRenderContext,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D, PVRSRVBridgeRGXKickTA3D,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, PVRSRVBridgeRGXSetRenderContextPriority,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON, PVRSRVBridgeRGXGetLastRenderContextResetReason,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT, PVRSRVBridgeRGXGetPartialRenderCount,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKSYNCTA, PVRSRVBridgeRGXKickSyncTA,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+PVRSRV_ERROR DeinitRGXTA3DBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxtq_bridge/common_rgxtq_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxtq_bridge/common_rgxtq_bridge.h
new file mode 100644
index 0000000..ce9d3be
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxtq_bridge/common_rgxtq_bridge.h
@@ -0,0 +1,188 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RGXTQ_BRIDGE_H
+#define COMMON_RGXTQ_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "sync_external.h"
+#include "rgx_fwif_shared.h"
+
+
+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ_RGXKICKSYNCTRANSFER			PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST			(PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4)
+
+
+/*******************************************
+            RGXCreateTransferContext          
+ *******************************************/
+
+/* Bridge in structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32Priority;
+	IMG_DEV_VIRTADDR sMCUFenceAddr;
+	IMG_UINT32 ui32FrameworkCmdize;
+	IMG_BYTE * psFrameworkCmd;
+	IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
+
+
+/* Bridge out structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
+
+/*******************************************
+            RGXDestroyTransferContext          
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+	IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT;
+
+
+/* Bridge out structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT;
+
+/*******************************************
+            RGXSubmitTransfer          
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32PrepareCount;
+	IMG_UINT32 * pui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR* * psFenceUFOAddress;
+	IMG_UINT32* * pui32FenceValue;
+	IMG_UINT32 * pui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR* * psUpdateUFOAddress;
+	IMG_UINT32* * pui32UpdateValue;
+	IMG_UINT32 * pui32ServerSyncCount;
+	IMG_UINT32* * pui32ServerSyncFlags;
+	IMG_HANDLE* * phServerSync;
+	IMG_UINT32 ui32NumCheckFenceFDs;
+	IMG_INT32 * pi32CheckFenceFDs;
+	IMG_INT32 i32UpdateFenceFD;
+	IMG_UINT32 * pui32CommandSize;
+	IMG_UINT8* * pui8FWCommand;
+	IMG_UINT32 * pui32TQPrepareFlags;
+	IMG_UINT32 ui32ExternalJobReference;
+	IMG_UINT32 ui32InternalJobReference;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER;
+
+
+/* Bridge out structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER;
+
+/*******************************************
+            RGXSetTransferContextPriority          
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
+
+
+/* Bridge out structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/*******************************************
+            RGXKickSyncTransfer          
+ *******************************************/
+
+/* Bridge in structure for RGXKickSyncTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNCTRANSFER_TAG
+{
+	IMG_HANDLE hTransferContext;
+	IMG_UINT32 ui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR * psClientFenceUFOAddress;
+	IMG_UINT32 * pui32ClientFenceValue;
+	IMG_UINT32 ui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR * psClientUpdateUFOAddress;
+	IMG_UINT32 * pui32ClientUpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerSyncFlags;
+	IMG_HANDLE * phServerSyncs;
+	IMG_UINT32 ui32NumCheckFenceFDs;
+	IMG_INT32 * pi32CheckFenceFDs;
+	IMG_INT32 i32UpdateFenceFD;
+	IMG_UINT32 ui32TQPrepareFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNCTRANSFER;
+
+
+/* Bridge out structure for RGXKickSyncTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNCTRANSFER_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNCTRANSFER;
+
+#endif /* COMMON_RGXTQ_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxtq_bridge/server_rgxtq_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxtq_bridge/server_rgxtq_bridge.c
new file mode 100644
index 0000000..b12acdc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/rgxtq_bridge/server_rgxtq_bridge.c
@@ -0,0 +1,1351 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for rgxtq
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for rgxtq
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtransfer.h"
+
+
+#include "common_rgxtq_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	IMG_BYTE *psFrameworkCmdInt = IMG_NULL;
+	IMG_HANDLE hPrivDataInt = IMG_NULL;
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = IMG_NULL;
+
+
+
+
+	if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0)
+	{
+		psFrameworkCmdInt = OSAllocMem(psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE));
+		if (!psFrameworkCmdInt)
+		{
+			psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXCreateTransferContext_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXCreateTransferContextIN->psFrameworkCmd, psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE))
+				|| (OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateTransferContextIN->psFrameworkCmd,
+				psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK) )
+			{
+				psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXCreateTransferContext_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateTransferContextOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCreateTransferContextIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateTransferContext_exit;
+					}
+				}
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCreateTransferContextOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hPrivDataInt,
+											psRGXCreateTransferContextIN->hPrivData,
+											PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+					if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCreateTransferContext_exit;
+					}
+				}
+
+
+	psRGXCreateTransferContextOUT->eError =
+		PVRSRVRGXCreateTransferContextKM(psConnection,
+					hDevNodeInt,
+					psRGXCreateTransferContextIN->ui32Priority,
+					psRGXCreateTransferContextIN->sMCUFenceAddr,
+					psRGXCreateTransferContextIN->ui32FrameworkCmdize,
+					psFrameworkCmdInt,
+					hPrivDataInt,
+					&psTransferContextInt);
+	/* Exit early if bridged call fails */
+	if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateTransferContext_exit;
+	}
+
+
+	psRGXCreateTransferContextOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRGXCreateTransferContextOUT->hTransferContext,
+							(IMG_VOID *) psTransferContextInt,
+							PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyTransferContextKM);
+	if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		goto RGXCreateTransferContext_exit;
+	}
+
+
+
+
+RGXCreateTransferContext_exit:
+	if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+	{
+		if (psTransferContextInt)
+		{
+			PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
+		}
+	}
+
+	if (psFrameworkCmdInt)
+		OSFreeMem(psFrameworkCmdInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN,
+					  PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRGXDestroyTransferContextOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRGXDestroyTransferContextIN->hTransferContext,
+					PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+	if ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RGXDestroyTransferContext_exit;
+	}
+
+
+
+RGXDestroyTransferContext_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER *psRGXSubmitTransferIN,
+					  PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER *psRGXSubmitTransferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientFenceCountInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR **sFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 **ui32FenceValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientUpdateCountInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR **sUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 **ui32UpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerSyncCountInt = IMG_NULL;
+	IMG_UINT32 **ui32ServerSyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * **psServerSyncInt = IMG_NULL;
+	IMG_HANDLE **hServerSyncInt2 = IMG_NULL;
+	IMG_INT32 *i32CheckFenceFDsInt = IMG_NULL;
+	IMG_UINT32 *ui32CommandSizeInt = IMG_NULL;
+	IMG_UINT8 **ui8FWCommandInt = IMG_NULL;
+	IMG_UINT32 *ui32TQPrepareFlagsInt = IMG_NULL;
+
+
+
+
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ClientFenceCountInt = OSAllocMem(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32));
+		if (!ui32ClientFenceCountInt)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXSubmitTransferIN->pui32ClientFenceCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientFenceCountInt, psRGXSubmitTransferIN->pui32ClientFenceCount,
+				psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(PRGXFWIF_UFO_ADDR *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				sFenceUFOAddressInt = (PRGXFWIF_UFO_ADDR **) pui8Ptr;
+				pui8Ptr += ui32Size;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32ClientFenceCountInt[i] * sizeof(PRGXFWIF_UFO_ADDR);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+					}
+					else
+					{
+						sFenceUFOAddressInt[i] = (PRGXFWIF_UFO_ADDR *) pui8Ptr;
+						pui8Ptr += ui32Size;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		PRGXFWIF_UFO_ADDR **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->psFenceUFOAddress[i], sizeof(PRGXFWIF_UFO_ADDR **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->psFenceUFOAddress[i],
+				sizeof(PRGXFWIF_UFO_ADDR **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32ClientFenceCount[i] * sizeof(PRGXFWIF_UFO_ADDR)))
+				|| (OSCopyFromUser(NULL, (sFenceUFOAddressInt[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32ClientFenceCount[i] * sizeof(PRGXFWIF_UFO_ADDR))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				ui32FenceValueInt = (IMG_UINT32 **) pui8Ptr;
+				pui8Ptr += ui32Size;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+					}
+					else
+					{
+						ui32FenceValueInt[i] = (IMG_UINT32 *) pui8Ptr;
+						pui8Ptr += ui32Size;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->pui32FenceValue[i], sizeof(IMG_UINT32 **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32FenceValue[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32ClientFenceCount[i] * sizeof(IMG_UINT32)))
+				|| (OSCopyFromUser(NULL, (ui32FenceValueInt[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32ClientFenceCount[i] * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ClientUpdateCountInt = OSAllocMem(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32));
+		if (!ui32ClientUpdateCountInt)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXSubmitTransferIN->pui32ClientUpdateCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientUpdateCountInt, psRGXSubmitTransferIN->pui32ClientUpdateCount,
+				psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(PRGXFWIF_UFO_ADDR *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				sUpdateUFOAddressInt = (PRGXFWIF_UFO_ADDR **) pui8Ptr;
+				pui8Ptr += ui32Size;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32ClientUpdateCountInt[i] * sizeof(PRGXFWIF_UFO_ADDR);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+					}
+					else
+					{
+						sUpdateUFOAddressInt[i] = (PRGXFWIF_UFO_ADDR *) pui8Ptr;
+						pui8Ptr += ui32Size;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		PRGXFWIF_UFO_ADDR **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->psUpdateUFOAddress[i], sizeof(PRGXFWIF_UFO_ADDR **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->psUpdateUFOAddress[i],
+				sizeof(PRGXFWIF_UFO_ADDR **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32ClientUpdateCount[i] * sizeof(PRGXFWIF_UFO_ADDR)))
+				|| (OSCopyFromUser(NULL, (sUpdateUFOAddressInt[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32ClientUpdateCount[i] * sizeof(PRGXFWIF_UFO_ADDR))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				ui32UpdateValueInt = (IMG_UINT32 **) pui8Ptr;
+				pui8Ptr += ui32Size;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+					}
+					else
+					{
+						ui32UpdateValueInt[i] = (IMG_UINT32 *) pui8Ptr;
+						pui8Ptr += ui32Size;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->pui32UpdateValue[i], sizeof(IMG_UINT32 **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32UpdateValue[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32ClientUpdateCount[i] * sizeof(IMG_UINT32)))
+				|| (OSCopyFromUser(NULL, (ui32UpdateValueInt[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32ClientUpdateCount[i] * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32ServerSyncCountInt = OSAllocMem(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32));
+		if (!ui32ServerSyncCountInt)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXSubmitTransferIN->pui32ServerSyncCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerSyncCountInt, psRGXSubmitTransferIN->pui32ServerSyncCount,
+				psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32 *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				ui32ServerSyncFlagsInt = (IMG_UINT32 **) pui8Ptr;
+				pui8Ptr += ui32Size;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+					}
+					else
+					{
+						ui32ServerSyncFlagsInt[i] = (IMG_UINT32 *) pui8Ptr;
+						pui8Ptr += ui32Size;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->pui32ServerSyncFlags[i], sizeof(IMG_UINT32 **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui32ServerSyncFlags[i],
+				sizeof(IMG_UINT32 **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32ServerSyncCount[i] * sizeof(IMG_UINT32)))
+				|| (OSCopyFromUser(NULL, (ui32ServerSyncFlagsInt[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32ServerSyncCount[i] * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+		IMG_UINT32 ui32AllocSize2=0;
+		IMG_UINT32 ui32Size2;
+		IMG_UINT8 *pui8Ptr2 = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE * *);
+			ui32Size2 = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+				ui32AllocSize2 += ui32Size2;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				psServerSyncInt = (SERVER_SYNC_PRIMITIVE * **) pui8Ptr;
+				pui8Ptr += ui32Size;
+				pui8Ptr2 = OSAllocMem(ui32AllocSize2);
+				if (pui8Ptr2 == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				hServerSyncInt2 = (IMG_HANDLE **) pui8Ptr2;
+				pui8Ptr2 += ui32Size2;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);		
+				ui32Size2 = ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+						ui32AllocSize2 += ui32Size2;
+					}
+					else
+					{
+						psServerSyncInt[i] = (SERVER_SYNC_PRIMITIVE * *) pui8Ptr;
+						pui8Ptr += ui32Size;
+						hServerSyncInt2[i] = (IMG_HANDLE *) pui8Ptr2;
+						pui8Ptr2 += ui32Size2;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_HANDLE **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->phServerSync[i], sizeof(IMG_HANDLE **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->phServerSync[i],
+				sizeof(IMG_HANDLE **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32ServerSyncCount[i] * sizeof(IMG_HANDLE)))
+				|| (OSCopyFromUser(NULL, (hServerSyncInt2[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32ServerSyncCount[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32NumCheckFenceFDs != 0)
+	{
+		i32CheckFenceFDsInt = OSAllocMem(psRGXSubmitTransferIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32));
+		if (!i32CheckFenceFDsInt)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXSubmitTransferIN->pi32CheckFenceFDs, psRGXSubmitTransferIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32))
+				|| (OSCopyFromUser(NULL, i32CheckFenceFDsInt, psRGXSubmitTransferIN->pi32CheckFenceFDs,
+				psRGXSubmitTransferIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32CommandSizeInt = OSAllocMem(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32));
+		if (!ui32CommandSizeInt)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXSubmitTransferIN->pui32CommandSize, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32CommandSizeInt, psRGXSubmitTransferIN->pui32CommandSize,
+				psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		IMG_UINT32 ui32Pass=0;
+		IMG_UINT32 i;
+		IMG_UINT32 ui32AllocSize=0;
+		IMG_UINT32 ui32Size;
+		IMG_UINT8 *pui8Ptr = IMG_NULL;
+
+		/*
+			Two pass loop, 1st find out the size and 2nd allocation and set offsets.
+			Keeps allocation cost down and simplifies the free path
+		*/
+		for (ui32Pass=0;ui32Pass<2;ui32Pass++)
+		{
+			ui32Size = psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8 *);
+			if (ui32Pass == 0)
+			{
+				ui32AllocSize += ui32Size;
+			}
+			else
+			{
+				pui8Ptr = OSAllocMem(ui32AllocSize);
+				if (pui8Ptr == IMG_NULL)
+				{
+					psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+					goto RGXSubmitTransfer_exit;
+				}
+				ui8FWCommandInt = (IMG_UINT8 **) pui8Ptr;
+				pui8Ptr += ui32Size;
+			}
+			
+			for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+			{
+				ui32Size = ui32CommandSizeInt[i] * sizeof(IMG_UINT8);		
+				if (ui32Size)
+				{
+					if (ui32Pass == 0)
+					{
+						ui32AllocSize += ui32Size;
+					}
+					else
+					{
+						ui8FWCommandInt[i] = (IMG_UINT8 *) pui8Ptr;
+						pui8Ptr += ui32Size;
+					}
+				}
+			}
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+		IMG_UINT8 **psPtr;
+
+		/* Loop over all the pointers in the array copying the data into the kernel */
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			/* Copy the pointer over from the client side */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) &psRGXSubmitTransferIN->pui8FWCommand[i], sizeof(IMG_UINT8 **))
+				|| (OSCopyFromUser(NULL, &psPtr, &psRGXSubmitTransferIN->pui8FWCommand[i],
+				sizeof(IMG_UINT8 **)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psPtr, (psRGXSubmitTransferIN->pui32CommandSize[i] * sizeof(IMG_UINT8)))
+				|| (OSCopyFromUser(NULL, (ui8FWCommandInt[i]), psPtr,
+				(psRGXSubmitTransferIN->pui32CommandSize[i] * sizeof(IMG_UINT8))) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+		}
+	}
+	if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+	{
+		ui32TQPrepareFlagsInt = OSAllocMem(psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32));
+		if (!ui32TQPrepareFlagsInt)
+		{
+			psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXSubmitTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXSubmitTransferIN->pui32TQPrepareFlags, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32TQPrepareFlagsInt, psRGXSubmitTransferIN->pui32TQPrepareFlags,
+				psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXSubmitTransfer_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psTransferContextInt,
+											psRGXSubmitTransferIN->hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+		{
+			IMG_UINT32 j;
+			for (j=0;j<ui32ServerSyncCountInt[i];j++)
+			{
+				{
+					/* Look up the address from the handle */
+					psRGXSubmitTransferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerSyncInt[i][j],
+											hServerSyncInt2[i][j],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSubmitTransfer_exit;
+					}
+				}
+
+			}
+		}
+	}
+
+	psRGXSubmitTransferOUT->eError =
+		PVRSRVRGXSubmitTransferKM(
+					psTransferContextInt,
+					psRGXSubmitTransferIN->ui32PrepareCount,
+					ui32ClientFenceCountInt,
+					sFenceUFOAddressInt,
+					ui32FenceValueInt,
+					ui32ClientUpdateCountInt,
+					sUpdateUFOAddressInt,
+					ui32UpdateValueInt,
+					ui32ServerSyncCountInt,
+					ui32ServerSyncFlagsInt,
+					psServerSyncInt,
+					psRGXSubmitTransferIN->ui32NumCheckFenceFDs,
+					i32CheckFenceFDsInt,
+					psRGXSubmitTransferIN->i32UpdateFenceFD,
+					ui32CommandSizeInt,
+					ui8FWCommandInt,
+					ui32TQPrepareFlagsInt,
+					psRGXSubmitTransferIN->ui32ExternalJobReference,
+					psRGXSubmitTransferIN->ui32InternalJobReference);
+
+
+
+
+RGXSubmitTransfer_exit:
+	if (ui32ClientFenceCountInt)
+		OSFreeMem(ui32ClientFenceCountInt);
+	if (sFenceUFOAddressInt)
+		OSFreeMem(sFenceUFOAddressInt);
+	if (ui32FenceValueInt)
+		OSFreeMem(ui32FenceValueInt);
+	if (ui32ClientUpdateCountInt)
+		OSFreeMem(ui32ClientUpdateCountInt);
+	if (sUpdateUFOAddressInt)
+		OSFreeMem(sUpdateUFOAddressInt);
+	if (ui32UpdateValueInt)
+		OSFreeMem(ui32UpdateValueInt);
+	if (ui32ServerSyncCountInt)
+		OSFreeMem(ui32ServerSyncCountInt);
+	if (ui32ServerSyncFlagsInt)
+		OSFreeMem(ui32ServerSyncFlagsInt);
+	if (psServerSyncInt)
+		OSFreeMem(psServerSyncInt);
+	if (hServerSyncInt2)
+		OSFreeMem(hServerSyncInt2);
+	if (i32CheckFenceFDsInt)
+		OSFreeMem(i32CheckFenceFDsInt);
+	if (ui32CommandSizeInt)
+		OSFreeMem(ui32CommandSizeInt);
+	if (ui8FWCommandInt)
+		OSFreeMem(ui8FWCommandInt);
+	if (ui32TQPrepareFlagsInt)
+		OSFreeMem(ui32TQPrepareFlagsInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN,
+					  PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXSetTransferContextPriorityOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psTransferContextInt,
+											psRGXSetTransferContextPriorityIN->hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+					if(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+					{
+						goto RGXSetTransferContextPriority_exit;
+					}
+				}
+
+
+	psRGXSetTransferContextPriorityOUT->eError =
+		PVRSRVRGXSetTransferContextPriorityKM(psConnection,
+					psTransferContextInt,
+					psRGXSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetTransferContextPriority_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXKickSyncTransfer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXKICKSYNCTRANSFER *psRGXKickSyncTransferIN,
+					  PVRSRV_BRIDGE_OUT_RGXKICKSYNCTRANSFER *psRGXKickSyncTransferOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RGX_SERVER_TQ_CONTEXT * psTransferContextInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientFenceUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientFenceValueInt = IMG_NULL;
+	PRGXFWIF_UFO_ADDR *sClientUpdateUFOAddressInt = IMG_NULL;
+	IMG_UINT32 *ui32ClientUpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerSyncFlagsInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = IMG_NULL;
+	IMG_HANDLE *hServerSyncsInt2 = IMG_NULL;
+	IMG_INT32 *i32CheckFenceFDsInt = IMG_NULL;
+
+
+
+
+	if (psRGXKickSyncTransferIN->ui32ClientFenceCount != 0)
+	{
+		sClientFenceUFOAddressInt = OSAllocMem(psRGXKickSyncTransferIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientFenceUFOAddressInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->psClientFenceUFOAddress, psRGXKickSyncTransferIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientFenceUFOAddressInt, psRGXKickSyncTransferIN->psClientFenceUFOAddress,
+				psRGXKickSyncTransferIN->ui32ClientFenceCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+	if (psRGXKickSyncTransferIN->ui32ClientFenceCount != 0)
+	{
+		ui32ClientFenceValueInt = OSAllocMem(psRGXKickSyncTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32));
+		if (!ui32ClientFenceValueInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->pui32ClientFenceValue, psRGXKickSyncTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientFenceValueInt, psRGXKickSyncTransferIN->pui32ClientFenceValue,
+				psRGXKickSyncTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+	if (psRGXKickSyncTransferIN->ui32ClientUpdateCount != 0)
+	{
+		sClientUpdateUFOAddressInt = OSAllocMem(psRGXKickSyncTransferIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR));
+		if (!sClientUpdateUFOAddressInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->psClientUpdateUFOAddress, psRGXKickSyncTransferIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR))
+				|| (OSCopyFromUser(NULL, sClientUpdateUFOAddressInt, psRGXKickSyncTransferIN->psClientUpdateUFOAddress,
+				psRGXKickSyncTransferIN->ui32ClientUpdateCount * sizeof(PRGXFWIF_UFO_ADDR)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+	if (psRGXKickSyncTransferIN->ui32ClientUpdateCount != 0)
+	{
+		ui32ClientUpdateValueInt = OSAllocMem(psRGXKickSyncTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32));
+		if (!ui32ClientUpdateValueInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->pui32ClientUpdateValue, psRGXKickSyncTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ClientUpdateValueInt, psRGXKickSyncTransferIN->pui32ClientUpdateValue,
+				psRGXKickSyncTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+	if (psRGXKickSyncTransferIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerSyncFlagsInt = OSAllocMem(psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32));
+		if (!ui32ServerSyncFlagsInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->pui32ServerSyncFlags, psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, psRGXKickSyncTransferIN->pui32ServerSyncFlags,
+				psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+	if (psRGXKickSyncTransferIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncsInt = OSAllocMem(psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServerSyncsInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+		hServerSyncsInt2 = OSAllocMem(psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE));
+		if (!hServerSyncsInt2)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->phServerSyncs, psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServerSyncsInt2, psRGXKickSyncTransferIN->phServerSyncs,
+				psRGXKickSyncTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+	if (psRGXKickSyncTransferIN->ui32NumCheckFenceFDs != 0)
+	{
+		i32CheckFenceFDsInt = OSAllocMem(psRGXKickSyncTransferIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32));
+		if (!i32CheckFenceFDsInt)
+		{
+			psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RGXKickSyncTransfer_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXKickSyncTransferIN->pi32CheckFenceFDs, psRGXKickSyncTransferIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32))
+				|| (OSCopyFromUser(NULL, i32CheckFenceFDsInt, psRGXKickSyncTransferIN->pi32CheckFenceFDs,
+				psRGXKickSyncTransferIN->ui32NumCheckFenceFDs * sizeof(IMG_INT32)) != PVRSRV_OK) )
+			{
+				psRGXKickSyncTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RGXKickSyncTransfer_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncTransferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psTransferContextInt,
+											psRGXKickSyncTransferIN->hTransferContext,
+											PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+					if(psRGXKickSyncTransferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncTransfer_exit;
+					}
+				}
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psRGXKickSyncTransferIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psRGXKickSyncTransferOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerSyncsInt[i],
+											hServerSyncsInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psRGXKickSyncTransferOUT->eError != PVRSRV_OK)
+					{
+						goto RGXKickSyncTransfer_exit;
+					}
+				}
+
+		}
+	}
+
+	psRGXKickSyncTransferOUT->eError =
+		PVRSRVRGXKickSyncTransferKM(
+					psTransferContextInt,
+					psRGXKickSyncTransferIN->ui32ClientFenceCount,
+					sClientFenceUFOAddressInt,
+					ui32ClientFenceValueInt,
+					psRGXKickSyncTransferIN->ui32ClientUpdateCount,
+					sClientUpdateUFOAddressInt,
+					ui32ClientUpdateValueInt,
+					psRGXKickSyncTransferIN->ui32ServerSyncCount,
+					ui32ServerSyncFlagsInt,
+					psServerSyncsInt,
+					psRGXKickSyncTransferIN->ui32NumCheckFenceFDs,
+					i32CheckFenceFDsInt,
+					psRGXKickSyncTransferIN->i32UpdateFenceFD,
+					psRGXKickSyncTransferIN->ui32TQPrepareFlags);
+
+
+
+
+RGXKickSyncTransfer_exit:
+	if (sClientFenceUFOAddressInt)
+		OSFreeMem(sClientFenceUFOAddressInt);
+	if (ui32ClientFenceValueInt)
+		OSFreeMem(ui32ClientFenceValueInt);
+	if (sClientUpdateUFOAddressInt)
+		OSFreeMem(sClientUpdateUFOAddressInt);
+	if (ui32ClientUpdateValueInt)
+		OSFreeMem(ui32ClientUpdateValueInt);
+	if (ui32ServerSyncFlagsInt)
+		OSFreeMem(ui32ServerSyncFlagsInt);
+	if (psServerSyncsInt)
+		OSFreeMem(psServerSyncsInt);
+	if (hServerSyncsInt2)
+		OSFreeMem(hServerSyncsInt2);
+	if (i32CheckFenceFDsInt)
+		OSFreeMem(i32CheckFenceFDsInt);
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRGXTQBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRGXTQBridge(IMG_VOID);
+
+/*
+ * Register all RGXTQ functions with services
+ */
+PVRSRV_ERROR InitRGXTQBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, PVRSRVBridgeRGXCreateTransferContext,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXDestroyTransferContext,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER, PVRSRVBridgeRGXSubmitTransfer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXSetTransferContextPriority,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXKICKSYNCTRANSFER, PVRSRVBridgeRGXKickSyncTransfer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/client_ri_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/client_ri_bridge.h
new file mode 100644
index 0000000..ec290bf
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/client_ri_bridge.h
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title          Client bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exports the client bridge functions for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+							     IMG_HANDLE hPMRHandle,
+							     IMG_UINT32 ui32TextASize,
+							     const IMG_CHAR *puiTextA,
+							     IMG_UINT64 ui64LogicalSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+								 IMG_HANDLE hPMRHandle,
+								 IMG_UINT32 ui32TextBSize,
+								 const IMG_CHAR *puiTextB,
+								 IMG_UINT64 ui64Offset,
+								 IMG_UINT64 ui64Size,
+								 IMG_BOOL bIsImport,
+								 IMG_BOOL bIsExportable,
+								 IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+								 IMG_HANDLE hRIHandle,
+								 IMG_DEV_VIRTADDR sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+								  IMG_HANDLE hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+							IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+							   IMG_PID ui32Pid);
+
+
+#endif /* CLIENT_RI_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/common_ri_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/common_ri_bridge.h
new file mode 100644
index 0000000..415360d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/common_ri_bridge.h
@@ -0,0 +1,195 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST			0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR			PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY			PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST			PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIDUMPALL			PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS			PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_CMD_LAST			(PVRSRV_BRIDGE_RI_CMD_FIRST+6)
+
+
+/*******************************************
+            RIWritePMREntry          
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+	IMG_HANDLE hPMRHandle;
+	IMG_UINT32 ui32TextASize;
+	const IMG_CHAR * puiTextA;
+	IMG_UINT64 ui64LogicalSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+/*******************************************
+            RIWriteMEMDESCEntry          
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hPMRHandle;
+	IMG_UINT32 ui32TextBSize;
+	const IMG_CHAR * puiTextB;
+	IMG_UINT64 ui64Offset;
+	IMG_UINT64 ui64Size;
+	IMG_BOOL bIsImport;
+	IMG_BOOL bIsExportable;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+/*******************************************
+            RIUpdateMEMDESCAddr          
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+	IMG_HANDLE hRIHandle;
+	IMG_DEV_VIRTADDR sAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+/*******************************************
+            RIDeleteMEMDESCEntry          
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+	IMG_HANDLE hRIHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+/*******************************************
+            RIDumpList          
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+	IMG_HANDLE hPMRHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+/*******************************************
+            RIDumpAll          
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+/*******************************************
+            RIDumpProcess          
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+	IMG_PID ui32Pid;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+#endif /* COMMON_RI_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/server_ri_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/server_ri_bridge.c
new file mode 100644
index 0000000..ca370d9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/ri_bridge/server_ri_bridge.c
@@ -0,0 +1,456 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for ri
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for ri
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN,
+					  PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRHandleInt = IMG_NULL;
+	IMG_CHAR *uiTextAInt = IMG_NULL;
+
+
+
+
+	if (psRIWritePMREntryIN->ui32TextASize != 0)
+	{
+		uiTextAInt = OSAllocMem(psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR));
+		if (!uiTextAInt)
+		{
+			psRIWritePMREntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RIWritePMREntry_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRIWritePMREntryIN->puiTextA, psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiTextAInt, psRIWritePMREntryIN->puiTextA,
+				psRIWritePMREntryIN->ui32TextASize * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psRIWritePMREntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RIWritePMREntry_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIWritePMREntryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRHandleInt,
+											psRIWritePMREntryIN->hPMRHandle,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRIWritePMREntryOUT->eError != PVRSRV_OK)
+					{
+						goto RIWritePMREntry_exit;
+					}
+				}
+
+
+	psRIWritePMREntryOUT->eError =
+		RIWritePMREntryKM(
+					psPMRHandleInt,
+					psRIWritePMREntryIN->ui32TextASize,
+					uiTextAInt,
+					psRIWritePMREntryIN->ui64LogicalSize);
+
+
+
+
+RIWritePMREntry_exit:
+	if (uiTextAInt)
+		OSFreeMem(uiTextAInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN,
+					  PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRHandleInt = IMG_NULL;
+	IMG_CHAR *uiTextBInt = IMG_NULL;
+	RI_HANDLE psRIHandleInt = IMG_NULL;
+
+
+
+
+	if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+	{
+		uiTextBInt = OSAllocMem(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR));
+		if (!uiTextBInt)
+		{
+			psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto RIWriteMEMDESCEntry_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRIWriteMEMDESCEntryIN->puiTextB, psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiTextBInt, psRIWriteMEMDESCEntryIN->puiTextB,
+				psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto RIWriteMEMDESCEntry_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIWriteMEMDESCEntryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRHandleInt,
+											psRIWriteMEMDESCEntryIN->hPMRHandle,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+					{
+						goto RIWriteMEMDESCEntry_exit;
+					}
+				}
+
+
+	psRIWriteMEMDESCEntryOUT->eError =
+		RIWriteMEMDESCEntryKM(
+					psPMRHandleInt,
+					psRIWriteMEMDESCEntryIN->ui32TextBSize,
+					uiTextBInt,
+					psRIWriteMEMDESCEntryIN->ui64Offset,
+					psRIWriteMEMDESCEntryIN->ui64Size,
+					psRIWriteMEMDESCEntryIN->bIsImport,
+					psRIWriteMEMDESCEntryIN->bIsExportable,
+					&psRIHandleInt);
+	/* Exit early if bridged call fails */
+	if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+
+	psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psRIWriteMEMDESCEntryOUT->hRIHandle,
+							(IMG_VOID *) psRIHandleInt,
+							PVRSRV_HANDLE_TYPE_RI_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+	if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		goto RIWriteMEMDESCEntry_exit;
+	}
+
+
+
+
+RIWriteMEMDESCEntry_exit:
+	if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+	{
+		if (psRIHandleInt)
+		{
+			RIDeleteMEMDESCEntryKM(psRIHandleInt);
+		}
+	}
+
+	if (uiTextBInt)
+		OSFreeMem(uiTextBInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN,
+					  PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	RI_HANDLE psRIHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIUpdateMEMDESCAddrOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psRIHandleInt,
+											psRIUpdateMEMDESCAddrIN->hRIHandle,
+											PVRSRV_HANDLE_TYPE_RI_HANDLE);
+					if(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)
+					{
+						goto RIUpdateMEMDESCAddr_exit;
+					}
+				}
+
+
+	psRIUpdateMEMDESCAddrOUT->eError =
+		RIUpdateMEMDESCAddrKM(
+					psRIHandleInt,
+					psRIUpdateMEMDESCAddrIN->sAddr);
+
+
+
+
+RIUpdateMEMDESCAddr_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN,
+					  PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psRIDeleteMEMDESCEntryOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle,
+					PVRSRV_HANDLE_TYPE_RI_HANDLE);
+	if ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) && (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto RIDeleteMEMDESCEntry_exit;
+	}
+
+
+
+RIDeleteMEMDESCEntry_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN,
+					  PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRIDumpListOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRHandleInt,
+											psRIDumpListIN->hPMRHandle,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psRIDumpListOUT->eError != PVRSRV_OK)
+					{
+						goto RIDumpList_exit;
+					}
+				}
+
+
+	psRIDumpListOUT->eError =
+		RIDumpListKM(
+					psPMRHandleInt);
+
+
+
+
+RIDumpList_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN,
+					  PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+
+
+
+
+
+	psRIDumpAllOUT->eError =
+		RIDumpAllKM(
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN,
+					  PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+	psRIDumpProcessOUT->eError =
+		RIDumpProcessKM(
+					psRIDumpProcessIN->ui32Pid);
+
+
+
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitRIBridge(IMG_VOID);
+PVRSRV_ERROR DeinitRIBridge(IMG_VOID);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, PVRSRVBridgeRIWritePMREntry,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, PVRSRVBridgeRIWriteMEMDESCEntry,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, PVRSRVBridgeRIUpdateMEMDESCAddr,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, PVRSRVBridgeRIDeleteMEMDESCEntry,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, PVRSRVBridgeRIDumpProcess,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+PVRSRV_ERROR DeinitRIBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/smm_bridge/common_smm_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/smm_bridge/common_smm_bridge.h
new file mode 100644
index 0000000..52c5b1c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/smm_bridge/common_smm_bridge.h
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for smm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for smm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SMM_BRIDGE_H
+#define COMMON_SMM_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_SMM_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SMM_PMRSECUREEXPORTPMR			PVRSRV_BRIDGE_SMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SMM_PMRSECUREUNEXPORTPMR			PVRSRV_BRIDGE_SMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SMM_PMRSECUREIMPORTPMR			PVRSRV_BRIDGE_SMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SMM_CMD_LAST			(PVRSRV_BRIDGE_SMM_CMD_FIRST+2)
+
+
+/*******************************************
+            PMRSecureExportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRSecureExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRSECUREEXPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRSECUREEXPORTPMR;
+
+
+/* Bridge out structure for PMRSecureExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRSECUREEXPORTPMR_TAG
+{
+	IMG_SECURE_TYPE Export;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRSECUREEXPORTPMR;
+
+/*******************************************
+            PMRSecureUnexportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRSecureUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRSECUREUNEXPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRSECUREUNEXPORTPMR;
+
+
+/* Bridge out structure for PMRSecureUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRSECUREUNEXPORTPMR_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRSECUREUNEXPORTPMR;
+
+/*******************************************
+            PMRSecureImportPMR          
+ *******************************************/
+
+/* Bridge in structure for PMRSecureImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRSECUREIMPORTPMR_TAG
+{
+	IMG_SECURE_TYPE Export;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRSECUREIMPORTPMR;
+
+
+/* Bridge out structure for PMRSecureImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRSECUREIMPORTPMR_TAG
+{
+	IMG_HANDLE hPMR;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_DEVMEM_ALIGN_T sAlign;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRSECUREIMPORTPMR;
+
+#endif /* COMMON_SMM_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/smm_bridge/server_smm_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/smm_bridge/server_smm_bridge.c
new file mode 100644
index 0000000..4fb7d5c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/smm_bridge/server_smm_bridge.c
@@ -0,0 +1,281 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for smm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for smm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "secure_export.h"
+
+
+#include "common_smm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgePMRSecureExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRSECUREEXPORTPMR *psPMRSecureExportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRSECUREEXPORTPMR *psPMRSecureExportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+	PMR * psPMROutInt = IMG_NULL;
+	IMG_HANDLE hPMROutInt = IMG_NULL;
+	CONNECTION_DATA *psSecureConnection;
+
+
+
+
+
+	PMRLock();
+
+
+				{
+					/* Look up the address from the handle */
+					psPMRSecureExportPMROUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psPMRInt,
+											psPMRSecureExportPMRIN->hPMR,
+											PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+					if(psPMRSecureExportPMROUT->eError != PVRSRV_OK)
+					{
+						PMRUnlock();
+						goto PMRSecureExportPMR_exit;
+					}
+				}
+
+
+	psPMRSecureExportPMROUT->eError =
+		PMRSecureExportPMR(psConnection,
+					psPMRInt,
+					&psPMRSecureExportPMROUT->Export,
+					&psPMROutInt, &psSecureConnection);
+	/* Exit early if bridged call fails */
+	if(psPMRSecureExportPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto PMRSecureExportPMR_exit;
+	}
+	PMRUnlock();
+
+
+	psPMRSecureExportPMROUT->eError = PVRSRVAllocHandle(psSecureConnection->psHandleBase,
+							&hPMROutInt,
+							(IMG_VOID *) psPMROutInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_SHARED
+							,(PFN_HANDLE_RELEASE)&PMRSecureUnexportPMR);
+	if (psPMRSecureExportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRSecureExportPMR_exit;
+	}
+
+
+
+
+PMRSecureExportPMR_exit:
+	if (psPMRSecureExportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMROutInt)
+		{
+			PMRSecureUnexportPMR(psPMROutInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRSecureUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRSECUREUNEXPORTPMR *psPMRSecureUnexportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRSECUREUNEXPORTPMR *psPMRSecureUnexportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+	PMRLock();
+
+
+
+
+	psPMRSecureUnexportPMROUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psPMRSecureUnexportPMRIN->hPMR,
+					PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT);
+	if ((psPMRSecureUnexportPMROUT->eError != PVRSRV_OK) && (psPMRSecureUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		PMRUnlock();
+		goto PMRSecureUnexportPMR_exit;
+	}
+
+	PMRUnlock();
+
+
+PMRSecureUnexportPMR_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRSecureImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_PMRSECUREIMPORTPMR *psPMRSecureImportPMRIN,
+					  PVRSRV_BRIDGE_OUT_PMRSECUREIMPORTPMR *psPMRSecureImportPMROUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PMR * psPMRInt = IMG_NULL;
+
+
+
+
+
+	PMRLock();
+
+
+	psPMRSecureImportPMROUT->eError =
+		PMRSecureImportPMR(
+					psPMRSecureImportPMRIN->Export,
+					&psPMRInt,
+					&psPMRSecureImportPMROUT->uiSize,
+					&psPMRSecureImportPMROUT->sAlign);
+	/* Exit early if bridged call fails */
+	if(psPMRSecureImportPMROUT->eError != PVRSRV_OK)
+	{
+		PMRUnlock();
+		goto PMRSecureImportPMR_exit;
+	}
+	PMRUnlock();
+
+
+	psPMRSecureImportPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psPMRSecureImportPMROUT->hPMR,
+							(IMG_VOID *) psPMRInt,
+							PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+	if (psPMRSecureImportPMROUT->eError != PVRSRV_OK)
+	{
+		goto PMRSecureImportPMR_exit;
+	}
+
+
+
+
+PMRSecureImportPMR_exit:
+	if (psPMRSecureImportPMROUT->eError != PVRSRV_OK)
+	{
+		if (psPMRInt)
+		{
+			PMRUnrefPMR(psPMRInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitSMMBridge(IMG_VOID);
+PVRSRV_ERROR DeinitSMMBridge(IMG_VOID);
+
+/*
+ * Register all SMM functions with services
+ */
+PVRSRV_ERROR InitSMMBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SMM, PVRSRV_BRIDGE_SMM_PMRSECUREEXPORTPMR, PVRSRVBridgePMRSecureExportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SMM, PVRSRV_BRIDGE_SMM_PMRSECUREUNEXPORTPMR, PVRSRVBridgePMRSecureUnexportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SMM, PVRSRV_BRIDGE_SMM_PMRSECUREIMPORTPMR, PVRSRVBridgePMRSecureImportPMR,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all smm functions with services
+ */
+PVRSRV_ERROR DeinitSMMBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/srvcore_bridge/common_srvcore_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/srvcore_bridge/common_srvcore_bridge.h
new file mode 100644
index 0000000..c37fb1c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/srvcore_bridge/common_srvcore_bridge.h
@@ -0,0 +1,387 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_external.h"
+
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_ENUMERATEDEVICES			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREDEVICEDATA			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEDEVICEDATA			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_INITSRVDISCONNECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_KICKDEVICES			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SRVCORE_RESETHWRLOGS			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SRVCORE_SOFTRESET			PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST			(PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16)
+
+
+/*******************************************
+            Connect          
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+	IMG_UINT32 ui32Flags;
+	IMG_UINT32 ui32ClientBuildOptions;
+	IMG_UINT32 ui32ClientDDKVersion;
+	IMG_UINT32 ui32ClientDDKBuild;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CONNECT;
+
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+	IMG_UINT8 ui8KernelArch;
+	IMG_UINT32 ui32Log2PageSize;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CONNECT;
+
+/*******************************************
+            Disconnect          
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DISCONNECT;
+
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+/*******************************************
+            EnumerateDevices          
+ *******************************************/
+
+/* Bridge in structure for EnumerateDevices */
+typedef struct PVRSRV_BRIDGE_IN_ENUMERATEDEVICES_TAG
+{
+	/* Output pointer peDeviceType is also an implied input */
+	PVRSRV_DEVICE_TYPE * peDeviceType;
+	/* Output pointer peDeviceClass is also an implied input */
+	PVRSRV_DEVICE_CLASS * peDeviceClass;
+	/* Output pointer pui32DeviceIndex is also an implied input */
+	IMG_UINT32 * pui32DeviceIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ENUMERATEDEVICES;
+
+
+/* Bridge out structure for EnumerateDevices */
+typedef struct PVRSRV_BRIDGE_OUT_ENUMERATEDEVICES_TAG
+{
+	IMG_UINT32 ui32NumDevices;
+	PVRSRV_DEVICE_TYPE * peDeviceType;
+	PVRSRV_DEVICE_CLASS * peDeviceClass;
+	IMG_UINT32 * pui32DeviceIndex;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ENUMERATEDEVICES;
+
+/*******************************************
+            AcquireDeviceData          
+ *******************************************/
+
+/* Bridge in structure for AcquireDeviceData */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREDEVICEDATA_TAG
+{
+	IMG_UINT32 ui32DevIndex;
+	PVRSRV_DEVICE_TYPE eDeviceType;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ACQUIREDEVICEDATA;
+
+
+/* Bridge out structure for AcquireDeviceData */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREDEVICEDATA_TAG
+{
+	IMG_HANDLE hDevCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ACQUIREDEVICEDATA;
+
+/*******************************************
+            ReleaseDeviceData          
+ *******************************************/
+
+/* Bridge in structure for ReleaseDeviceData */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEDEVICEDATA_TAG
+{
+	IMG_HANDLE hDevCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RELEASEDEVICEDATA;
+
+
+/* Bridge out structure for ReleaseDeviceData */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEDEVICEDATA_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RELEASEDEVICEDATA;
+
+/*******************************************
+            InitSrvDisconnect          
+ *******************************************/
+
+/* Bridge in structure for InitSrvDisconnect */
+typedef struct PVRSRV_BRIDGE_IN_INITSRVDISCONNECT_TAG
+{
+	IMG_BOOL bInitSuccesful;
+	IMG_UINT32 ui32ClientBuildOptions;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_INITSRVDISCONNECT;
+
+
+/* Bridge out structure for InitSrvDisconnect */
+typedef struct PVRSRV_BRIDGE_OUT_INITSRVDISCONNECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_INITSRVDISCONNECT;
+
+/*******************************************
+            AcquireGlobalEventObject          
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+	IMG_HANDLE hGlobalEventObject;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+/*******************************************
+            ReleaseGlobalEventObject          
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+	IMG_HANDLE hGlobalEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+/*******************************************
+            EventObjectOpen          
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+	IMG_HANDLE hEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+/*******************************************
+            EventObjectWait          
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+	IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+/*******************************************
+            EventObjectClose          
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+	IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+/*******************************************
+            DumpDebugInfo          
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+	IMG_UINT32 ui32ui32VerbLevel;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+/*******************************************
+            GetDevClockSpeed          
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+	IMG_UINT32 ui32ui32ClockSpeed;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+/*******************************************
+            HWOpTimeout          
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+/*******************************************
+            KickDevices          
+ *******************************************/
+
+/* Bridge in structure for KickDevices */
+typedef struct PVRSRV_BRIDGE_IN_KICKDEVICES_TAG
+{
+	 IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_KICKDEVICES;
+
+
+/* Bridge out structure for KickDevices */
+typedef struct PVRSRV_BRIDGE_OUT_KICKDEVICES_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_KICKDEVICES;
+
+/*******************************************
+            ResetHWRLogs          
+ *******************************************/
+
+/* Bridge in structure for ResetHWRLogs */
+typedef struct PVRSRV_BRIDGE_IN_RESETHWRLOGS_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RESETHWRLOGS;
+
+
+/* Bridge out structure for ResetHWRLogs */
+typedef struct PVRSRV_BRIDGE_OUT_RESETHWRLOGS_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RESETHWRLOGS;
+
+/*******************************************
+            SoftReset          
+ *******************************************/
+
+/* Bridge in structure for SoftReset */
+typedef struct PVRSRV_BRIDGE_IN_SOFTRESET_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT64 ui64ResetValue1;
+	IMG_UINT64 ui64ResetValue2;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SOFTRESET;
+
+
+/* Bridge out structure for SoftReset */
+typedef struct PVRSRV_BRIDGE_OUT_SOFTRESET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SOFTRESET;
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/srvcore_bridge/server_srvcore_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/srvcore_bridge/server_srvcore_bridge.c
new file mode 100644
index 0000000..6b21060
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/srvcore_bridge/server_srvcore_bridge.c
@@ -0,0 +1,867 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for srvcore
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for srvcore
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+#include "pvrsrv.h"
+
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+static PVRSRV_ERROR ReleaseDevCookie(IMG_VOID *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_CONNECT *psConnectIN,
+					  PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+	psConnectOUT->eError =
+		PVRSRVConnectKM(psConnection,
+					psConnectIN->ui32Flags,
+					psConnectIN->ui32ClientBuildOptions,
+					psConnectIN->ui32ClientDDKVersion,
+					psConnectIN->ui32ClientDDKBuild,
+					&psConnectOUT->ui8KernelArch,
+					&psConnectOUT->ui32Log2PageSize);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN,
+					  PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+
+
+
+
+
+	psDisconnectOUT->eError =
+		PVRSRVDisconnectKM(
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEnumerateDevices(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ENUMERATEDEVICES *psEnumerateDevicesIN,
+					  PVRSRV_BRIDGE_OUT_ENUMERATEDEVICES *psEnumerateDevicesOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	PVRSRV_DEVICE_TYPE *peDeviceTypeInt = IMG_NULL;
+	PVRSRV_DEVICE_CLASS *peDeviceClassInt = IMG_NULL;
+	IMG_UINT32 *pui32DeviceIndexInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psEnumerateDevicesIN);
+
+	psEnumerateDevicesOUT->peDeviceType = psEnumerateDevicesIN->peDeviceType;
+	psEnumerateDevicesOUT->peDeviceClass = psEnumerateDevicesIN->peDeviceClass;
+	psEnumerateDevicesOUT->pui32DeviceIndex = psEnumerateDevicesIN->pui32DeviceIndex;
+
+
+	
+	{
+		peDeviceTypeInt = OSAllocMem(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_TYPE));
+		if (!peDeviceTypeInt)
+		{
+			psEnumerateDevicesOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto EnumerateDevices_exit;
+		}
+	}
+
+	
+	{
+		peDeviceClassInt = OSAllocMem(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_CLASS));
+		if (!peDeviceClassInt)
+		{
+			psEnumerateDevicesOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto EnumerateDevices_exit;
+		}
+	}
+
+	
+	{
+		pui32DeviceIndexInt = OSAllocMem(PVRSRV_MAX_DEVICES * sizeof(IMG_UINT32));
+		if (!pui32DeviceIndexInt)
+		{
+			psEnumerateDevicesOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto EnumerateDevices_exit;
+		}
+	}
+
+
+
+
+	psEnumerateDevicesOUT->eError =
+		PVRSRVEnumerateDevicesKM(
+					&psEnumerateDevicesOUT->ui32NumDevices,
+					peDeviceTypeInt,
+					peDeviceClassInt,
+					pui32DeviceIndexInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psEnumerateDevicesOUT->peDeviceType, (PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_TYPE))) 
+		|| (OSCopyToUser(NULL, psEnumerateDevicesOUT->peDeviceType, peDeviceTypeInt,
+		(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_TYPE))) != PVRSRV_OK) )
+	{
+		psEnumerateDevicesOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto EnumerateDevices_exit;
+	}
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psEnumerateDevicesOUT->peDeviceClass, (PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_CLASS))) 
+		|| (OSCopyToUser(NULL, psEnumerateDevicesOUT->peDeviceClass, peDeviceClassInt,
+		(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_CLASS))) != PVRSRV_OK) )
+	{
+		psEnumerateDevicesOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto EnumerateDevices_exit;
+	}
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psEnumerateDevicesOUT->pui32DeviceIndex, (PVRSRV_MAX_DEVICES * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psEnumerateDevicesOUT->pui32DeviceIndex, pui32DeviceIndexInt,
+		(PVRSRV_MAX_DEVICES * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psEnumerateDevicesOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto EnumerateDevices_exit;
+	}
+
+
+EnumerateDevices_exit:
+	if (peDeviceTypeInt)
+		OSFreeMem(peDeviceTypeInt);
+	if (peDeviceClassInt)
+		OSFreeMem(peDeviceClassInt);
+	if (pui32DeviceIndexInt)
+		OSFreeMem(pui32DeviceIndexInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireDeviceData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ACQUIREDEVICEDATA *psAcquireDeviceDataIN,
+					  PVRSRV_BRIDGE_OUT_ACQUIREDEVICEDATA *psAcquireDeviceDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+	psAcquireDeviceDataOUT->eError =
+		PVRSRVAcquireDeviceDataKM(
+					psAcquireDeviceDataIN->ui32DevIndex,
+					psAcquireDeviceDataIN->eDeviceType,
+					&hDevCookieInt);
+	/* Exit early if bridged call fails */
+	if(psAcquireDeviceDataOUT->eError != PVRSRV_OK)
+	{
+		goto AcquireDeviceData_exit;
+	}
+
+
+	psAcquireDeviceDataOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psAcquireDeviceDataOUT->hDevCookie,
+							(IMG_VOID *) hDevCookieInt,
+							PVRSRV_HANDLE_TYPE_DEV_NODE,
+							PVRSRV_HANDLE_ALLOC_FLAG_SHARED
+							,(PFN_HANDLE_RELEASE)&ReleaseDevCookie);
+	if (psAcquireDeviceDataOUT->eError != PVRSRV_OK)
+	{
+		goto AcquireDeviceData_exit;
+	}
+
+
+
+
+AcquireDeviceData_exit:
+	if (psAcquireDeviceDataOUT->eError != PVRSRV_OK)
+	{
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseDeviceData(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RELEASEDEVICEDATA *psReleaseDeviceDataIN,
+					  PVRSRV_BRIDGE_OUT_RELEASEDEVICEDATA *psReleaseDeviceDataOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+	psReleaseDeviceDataOUT->eError =
+		PVRSRVReleaseDeviceDataKM(
+					hDevCookieInt);
+
+
+	psReleaseDeviceDataOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psReleaseDeviceDataIN->hDevCookie,
+					PVRSRV_HANDLE_TYPE_DEV_NODE);
+	if ((psReleaseDeviceDataOUT->eError != PVRSRV_OK) && (psReleaseDeviceDataOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto ReleaseDeviceData_exit;
+	}
+
+
+
+ReleaseDeviceData_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeInitSrvDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_INITSRVDISCONNECT *psInitSrvDisconnectIN,
+					  PVRSRV_BRIDGE_OUT_INITSRVDISCONNECT *psInitSrvDisconnectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+	psInitSrvDisconnectOUT->eError =
+		PVRSRVInitSrvDisconnectKM(psConnection,
+					psInitSrvDisconnectIN->bInitSuccesful,
+					psInitSrvDisconnectIN->ui32ClientBuildOptions);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN,
+					  PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hGlobalEventObjectInt = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+
+
+
+
+
+	psAcquireGlobalEventObjectOUT->eError =
+		AcquireGlobalEventObjectServer(
+					&hGlobalEventObjectInt);
+	/* Exit early if bridged call fails */
+	if(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		goto AcquireGlobalEventObject_exit;
+	}
+
+
+	psAcquireGlobalEventObjectOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psAcquireGlobalEventObjectOUT->hGlobalEventObject,
+							(IMG_VOID *) hGlobalEventObjectInt,
+							PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&ReleaseGlobalEventObjectServer);
+	if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		goto AcquireGlobalEventObject_exit;
+	}
+
+
+
+
+AcquireGlobalEventObject_exit:
+	if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+	{
+		if (hGlobalEventObjectInt)
+		{
+			ReleaseGlobalEventObjectServer(hGlobalEventObjectInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN,
+					  PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psReleaseGlobalEventObjectOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psReleaseGlobalEventObjectIN->hGlobalEventObject,
+					PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+	if ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto ReleaseGlobalEventObject_exit;
+	}
+
+
+
+ReleaseGlobalEventObject_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hEventObjectInt = IMG_NULL;
+	IMG_HANDLE hOSEventInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psEventObjectOpenOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hEventObjectInt,
+											psEventObjectOpenIN->hEventObject,
+											PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+					if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+					{
+						goto EventObjectOpen_exit;
+					}
+				}
+
+
+	psEventObjectOpenOUT->eError =
+		OSEventObjectOpen(
+					hEventObjectInt,
+					&hOSEventInt);
+	/* Exit early if bridged call fails */
+	if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		goto EventObjectOpen_exit;
+	}
+
+
+	psEventObjectOpenOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psEventObjectOpenOUT->hOSEvent,
+							(IMG_VOID *) hOSEventInt,
+							PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&OSEventObjectClose);
+	if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		goto EventObjectOpen_exit;
+	}
+
+
+
+
+EventObjectOpen_exit:
+	if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+	{
+		if (hOSEventInt)
+		{
+			OSEventObjectClose(hOSEventInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hOSEventKMInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psEventObjectWaitOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hOSEventKMInt,
+											psEventObjectWaitIN->hOSEventKM,
+											PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+					if(psEventObjectWaitOUT->eError != PVRSRV_OK)
+					{
+						goto EventObjectWait_exit;
+					}
+				}
+
+
+	psEventObjectWaitOUT->eError =
+		OSEventObjectWait(
+					hOSEventKMInt);
+
+
+
+
+EventObjectWait_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN,
+					  PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psEventObjectCloseOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psEventObjectCloseIN->hOSEventKM,
+					PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+	if ((psEventObjectCloseOUT->eError != PVRSRV_OK) && (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto EventObjectClose_exit;
+	}
+
+
+
+EventObjectClose_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN,
+					  PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+	psDumpDebugInfoOUT->eError =
+		PVRSRVDumpDebugInfoKM(
+					psDumpDebugInfoIN->ui32ui32VerbLevel);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN,
+					  PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psGetDevClockSpeedOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psGetDevClockSpeedIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psGetDevClockSpeedOUT->eError != PVRSRV_OK)
+					{
+						goto GetDevClockSpeed_exit;
+					}
+				}
+
+
+	psGetDevClockSpeedOUT->eError =
+		PVRSRVGetDevClockSpeedKM(
+					hDevNodeInt,
+					&psGetDevClockSpeedOUT->ui32ui32ClockSpeed);
+
+
+
+
+GetDevClockSpeed_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN,
+					  PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+
+
+
+
+
+	psHWOpTimeoutOUT->eError =
+		PVRSRVHWOpTimeoutKM(
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeKickDevices(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_KICKDEVICES *psKickDevicesIN,
+					  PVRSRV_BRIDGE_OUT_KICKDEVICES *psKickDevicesOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(psKickDevicesIN);
+
+
+
+
+
+
+	psKickDevicesOUT->eError =
+		PVRSRVKickDevicesKM(
+					);
+
+
+
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeResetHWRLogs(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RESETHWRLOGS *psResetHWRLogsIN,
+					  PVRSRV_BRIDGE_OUT_RESETHWRLOGS *psResetHWRLogsOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psResetHWRLogsOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psResetHWRLogsIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psResetHWRLogsOUT->eError != PVRSRV_OK)
+					{
+						goto ResetHWRLogs_exit;
+					}
+				}
+
+
+	psResetHWRLogsOUT->eError =
+		PVRSRVResetHWRLogsKM(
+					hDevNodeInt);
+
+
+
+
+ResetHWRLogs_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSoftReset(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SOFTRESET *psSoftResetIN,
+					  PVRSRV_BRIDGE_OUT_SOFTRESET *psSoftResetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSoftResetOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psSoftResetIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psSoftResetOUT->eError != PVRSRV_OK)
+					{
+						goto SoftReset_exit;
+					}
+				}
+
+
+	psSoftResetOUT->eError =
+		PVRSRVSoftResetKM(
+					hDevNodeInt,
+					psSoftResetIN->ui64ResetValue1,
+					psSoftResetIN->ui64ResetValue2);
+
+
+
+
+SoftReset_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitSRVCOREBridge(IMG_VOID);
+PVRSRV_ERROR DeinitSRVCOREBridge(IMG_VOID);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, PVRSRVBridgeConnect,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, PVRSRVBridgeDisconnect,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ENUMERATEDEVICES, PVRSRVBridgeEnumerateDevices,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREDEVICEDATA, PVRSRVBridgeAcquireDeviceData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEDEVICEDATA, PVRSRVBridgeReleaseDeviceData,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_INITSRVDISCONNECT, PVRSRVBridgeInitSrvDisconnect,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, PVRSRVBridgeAcquireGlobalEventObject,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, PVRSRVBridgeReleaseGlobalEventObject,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, PVRSRVBridgeEventObjectOpen,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, PVRSRVBridgeEventObjectWait,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, PVRSRVBridgeEventObjectClose,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, PVRSRVBridgeDumpDebugInfo,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, PVRSRVBridgeGetDevClockSpeed,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, PVRSRVBridgeHWOpTimeout,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_KICKDEVICES, PVRSRVBridgeKickDevices,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RESETHWRLOGS, PVRSRVBridgeResetHWRLogs,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_SOFTRESET, PVRSRVBridgeSoftReset,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+PVRSRV_ERROR DeinitSRVCOREBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/sync_bridge/common_sync_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/sync_bridge/common_sync_bridge.h
new file mode 100644
index 0000000..dc28674
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/sync_bridge/common_sync_bridge.h
@@ -0,0 +1,481 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK			PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK			PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET			PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET			PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SYNCRECORDREMOVEBYHANDLE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SYNCRECORDADD			PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC			PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS			PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY			PVRSRV_BRIDGE_SYNC_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY			PVRSRV_BRIDGE_SYNC_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE			PVRSRV_BRIDGE_SYNC_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL			PVRSRV_BRIDGE_SYNC_CMD_FIRST+17
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL			PVRSRV_BRIDGE_SYNC_CMD_FIRST+18
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP			PVRSRV_BRIDGE_SYNC_CMD_FIRST+19
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST			(PVRSRV_BRIDGE_SYNC_CMD_FIRST+19)
+
+
+/*******************************************
+            AllocSyncPrimitiveBlock          
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	IMG_UINT32 ui32SyncPrimBlockSize;
+	DEVMEM_SERVER_EXPORTCOOKIE hExportCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+/*******************************************
+            FreeSyncPrimitiveBlock          
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+/*******************************************
+            SyncPrimSet          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Index;
+	IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+/*******************************************
+            ServerSyncPrimSet          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET;
+
+
+/* Bridge out structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET;
+
+/*******************************************
+            SyncRecordRemoveByHandle          
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+	IMG_HANDLE hhRecord;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+/*******************************************
+            SyncRecordAdd          
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+	IMG_HANDLE hhServerSyncPrimBlock;
+	IMG_UINT32 ui32ui32FwBlockAddr;
+	IMG_UINT32 ui32ui32SyncOffset;
+	IMG_BOOL bbServerSync;
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+	IMG_HANDLE hhRecord;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+/*******************************************
+            ServerSyncAlloc          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCALLOC_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32ClassNameSize;
+	const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCALLOC;
+
+
+/* Bridge out structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC;
+
+/*******************************************
+            ServerSyncFree          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCFREE_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCFREE;
+
+
+/* Bridge out structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCFREE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCFREE;
+
+/*******************************************
+            ServerSyncQueueHWOp          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_BOOL bbUpdate;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP;
+
+
+/* Bridge out structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP_TAG
+{
+	IMG_UINT32 ui32FenceValue;
+	IMG_UINT32 ui32UpdateValue;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP;
+
+/*******************************************
+            ServerSyncGetStatus          
+ *******************************************/
+
+/* Bridge in structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS_TAG
+{
+	IMG_UINT32 ui32SyncCount;
+	IMG_HANDLE * phSyncHandle;
+	/* Output pointer pui32UID is also an implied input */
+	IMG_UINT32 * pui32UID;
+	/* Output pointer pui32FWAddr is also an implied input */
+	IMG_UINT32 * pui32FWAddr;
+	/* Output pointer pui32CurrentOp is also an implied input */
+	IMG_UINT32 * pui32CurrentOp;
+	/* Output pointer pui32NextOp is also an implied input */
+	IMG_UINT32 * pui32NextOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS;
+
+
+/* Bridge out structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS_TAG
+{
+	IMG_UINT32 * pui32UID;
+	IMG_UINT32 * pui32FWAddr;
+	IMG_UINT32 * pui32CurrentOp;
+	IMG_UINT32 * pui32NextOp;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS;
+
+/*******************************************
+            SyncPrimOpCreate          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE_TAG
+{
+	IMG_UINT32 ui32SyncBlockCount;
+	IMG_HANDLE * phBlockList;
+	IMG_UINT32 ui32ClientSyncCount;
+	IMG_UINT32 * pui32SyncBlockIndex;
+	IMG_UINT32 * pui32Index;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_HANDLE * phServerSync;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE;
+
+
+/* Bridge out structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE_TAG
+{
+	IMG_HANDLE hServerCookie;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE;
+
+/*******************************************
+            SyncPrimOpTake          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE_TAG
+{
+	IMG_HANDLE hServerCookie;
+	IMG_UINT32 ui32ClientSyncCount;
+	IMG_UINT32 * pui32Flags;
+	IMG_UINT32 * pui32FenceValue;
+	IMG_UINT32 * pui32UpdateValue;
+	IMG_UINT32 ui32ServerSyncCount;
+	IMG_UINT32 * pui32ServerFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE;
+
+
+/* Bridge out structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE;
+
+/*******************************************
+            SyncPrimOpReady          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY;
+
+
+/* Bridge out structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY_TAG
+{
+	IMG_BOOL bReady;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY;
+
+/*******************************************
+            SyncPrimOpComplete          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE;
+
+
+/* Bridge out structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE;
+
+/*******************************************
+            SyncPrimOpDestroy          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY_TAG
+{
+	IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY;
+
+
+/* Bridge out structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY;
+
+/*******************************************
+            SyncPrimPDump          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+/*******************************************
+            SyncPrimPDumpValue          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+/*******************************************
+            SyncPrimPDumpPol          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_UINT32 ui32Value;
+	IMG_UINT32 ui32Mask;
+	PDUMP_POLL_OPERATOR eOperator;
+	PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+/*******************************************
+            SyncPrimOpPDumpPol          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL_TAG
+{
+	IMG_HANDLE hServerCookie;
+	PDUMP_POLL_OPERATOR eOperator;
+	PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL;
+
+
+/* Bridge out structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL;
+
+/*******************************************
+            SyncPrimPDumpCBP          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32Offset;
+	IMG_DEVMEM_OFFSET_T uiWriteOffset;
+	IMG_DEVMEM_SIZE_T uiPacketSize;
+	IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+#endif /* COMMON_SYNC_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/sync_bridge/server_sync_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/sync_bridge/server_sync_bridge.c
new file mode 100644
index 0000000..a7f79c4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/sync_bridge/server_sync_bridge.c
@@ -0,0 +1,1548 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for sync
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for sync
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync_server.h"
+#include "pdump.h"
+
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN,
+					  PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = IMG_NULL;
+	DEVMEM_EXPORTCOOKIE * psExportCookieInt = IMG_NULL;
+
+
+
+	psAllocSyncPrimitiveBlockOUT->hSyncHandle = IMG_NULL;
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psAllocSyncPrimitiveBlockOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psAllocSyncPrimitiveBlockIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+					{
+						goto AllocSyncPrimitiveBlock_exit;
+					}
+				}
+
+
+	psAllocSyncPrimitiveBlockOUT->eError =
+		PVRSRVAllocSyncPrimitiveBlockKM(psConnection,
+					hDevNodeInt,
+					&psSyncHandleInt,
+					&psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr,
+					&psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize,
+					&psExportCookieInt);
+	/* Exit early if bridged call fails */
+	if(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+
+	psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+							(IMG_VOID *) psSyncHandleInt,
+							PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVFreeSyncPrimitiveBlockKM);
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+
+	psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocSubHandle(psConnection->psHandleBase,
+							&psAllocSyncPrimitiveBlockOUT->hExportCookie,
+							(IMG_VOID *) psExportCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,psAllocSyncPrimitiveBlockOUT->hSyncHandle);
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		goto AllocSyncPrimitiveBlock_exit;
+	}
+
+
+
+
+AllocSyncPrimitiveBlock_exit:
+	if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+	{
+		if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+		{
+			PVRSRV_ERROR eError = PVRSRVReleaseHandle(psConnection->psHandleBase,
+						(IMG_HANDLE) psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+						PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+
+			/* Releasing the handle should free/destroy/release the resource. This should never fail... */
+			PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+			/* Avoid freeing/destroying/releasing the resource a second time below */
+			psSyncHandleInt = IMG_NULL;
+		}
+
+
+		if (psSyncHandleInt)
+		{
+			PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN,
+					  PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psFreeSyncPrimitiveBlockOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle,
+					PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+	if ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) && (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto FreeSyncPrimitiveBlock_exit;
+	}
+
+
+
+FreeSyncPrimitiveBlock_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimSetOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psSyncPrimSetIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncPrimSetOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimSet_exit;
+					}
+				}
+
+
+	psSyncPrimSetOUT->eError =
+		PVRSRVSyncPrimSetKM(
+					psSyncHandleInt,
+					psSyncPrimSetIN->ui32Index,
+					psSyncPrimSetIN->ui32Value);
+
+
+
+
+SyncPrimSet_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeServerSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET *psServerSyncPrimSetIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET *psServerSyncPrimSetOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psServerSyncPrimSetOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psServerSyncPrimSetIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psServerSyncPrimSetOUT->eError != PVRSRV_OK)
+					{
+						goto ServerSyncPrimSet_exit;
+					}
+				}
+
+
+	psServerSyncPrimSetOUT->eError =
+		PVRSRVServerSyncPrimSetKM(
+					psSyncHandleInt,
+					psServerSyncPrimSetIN->ui32Value);
+
+
+
+
+ServerSyncPrimSet_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN,
+					  PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psSyncRecordRemoveByHandleOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord,
+					PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+	if ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) && (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto SyncRecordRemoveByHandle_exit;
+	}
+
+
+
+SyncRecordRemoveByHandle_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN,
+					  PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_RECORD_HANDLE pshRecordInt = IMG_NULL;
+	SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt = IMG_NULL;
+	IMG_CHAR *uiClassNameInt = IMG_NULL;
+
+
+
+
+	if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt = OSAllocMem(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR));
+		if (!uiClassNameInt)
+		{
+			psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncRecordAdd_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncRecordAddIN->puiClassName, psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiClassNameInt, psSyncRecordAddIN->puiClassName,
+				psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncRecordAdd_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncRecordAddOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &pshServerSyncPrimBlockInt,
+											psSyncRecordAddIN->hhServerSyncPrimBlock,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+					{
+						goto SyncRecordAdd_exit;
+					}
+				}
+
+
+	psSyncRecordAddOUT->eError =
+		PVRSRVSyncRecordAddKM(
+					&pshRecordInt,
+					pshServerSyncPrimBlockInt,
+					psSyncRecordAddIN->ui32ui32FwBlockAddr,
+					psSyncRecordAddIN->ui32ui32SyncOffset,
+					psSyncRecordAddIN->bbServerSync,
+					psSyncRecordAddIN->ui32ClassNameSize,
+					uiClassNameInt);
+	/* Exit early if bridged call fails */
+	if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		goto SyncRecordAdd_exit;
+	}
+
+
+	psSyncRecordAddOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psSyncRecordAddOUT->hhRecord,
+							(IMG_VOID *) pshRecordInt,
+							PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+							PVRSRV_HANDLE_ALLOC_FLAG_NONE
+							,(PFN_HANDLE_RELEASE)&PVRSRVSyncRecordRemoveByHandleKM);
+	if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		goto SyncRecordAdd_exit;
+	}
+
+
+
+
+SyncRecordAdd_exit:
+	if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+	{
+		if (pshRecordInt)
+		{
+			PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+		}
+	}
+
+	if (uiClassNameInt)
+		OSFreeMem(uiClassNameInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeServerSyncAlloc(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCALLOC *psServerSyncAllocIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC *psServerSyncAllocOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = IMG_NULL;
+	IMG_CHAR *uiClassNameInt = IMG_NULL;
+
+
+
+
+	if (psServerSyncAllocIN->ui32ClassNameSize != 0)
+	{
+		uiClassNameInt = OSAllocMem(psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR));
+		if (!uiClassNameInt)
+		{
+			psServerSyncAllocOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncAlloc_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psServerSyncAllocIN->puiClassName, psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR))
+				|| (OSCopyFromUser(NULL, uiClassNameInt, psServerSyncAllocIN->puiClassName,
+				psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) )
+			{
+				psServerSyncAllocOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto ServerSyncAlloc_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psServerSyncAllocOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psServerSyncAllocIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psServerSyncAllocOUT->eError != PVRSRV_OK)
+					{
+						goto ServerSyncAlloc_exit;
+					}
+				}
+
+
+	psServerSyncAllocOUT->eError =
+		PVRSRVServerSyncAllocKM(
+					hDevNodeInt,
+					&psSyncHandleInt,
+					&psServerSyncAllocOUT->ui32SyncPrimVAddr,
+					psServerSyncAllocIN->ui32ClassNameSize,
+					uiClassNameInt);
+	/* Exit early if bridged call fails */
+	if(psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		goto ServerSyncAlloc_exit;
+	}
+
+
+	psServerSyncAllocOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psServerSyncAllocOUT->hSyncHandle,
+							(IMG_VOID *) psSyncHandleInt,
+							PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVServerSyncFreeKM);
+	if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		goto ServerSyncAlloc_exit;
+	}
+
+
+
+
+ServerSyncAlloc_exit:
+	if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+	{
+		if (psSyncHandleInt)
+		{
+			PVRSRVServerSyncFreeKM(psSyncHandleInt);
+		}
+	}
+
+	if (uiClassNameInt)
+		OSFreeMem(uiClassNameInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeServerSyncFree(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCFREE *psServerSyncFreeIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCFREE *psServerSyncFreeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psServerSyncFreeOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psServerSyncFreeIN->hSyncHandle,
+					PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+	if ((psServerSyncFreeOUT->eError != PVRSRV_OK) && (psServerSyncFreeOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto ServerSyncFree_exit;
+	}
+
+
+
+ServerSyncFree_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeServerSyncQueueHWOp(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psServerSyncQueueHWOpOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psServerSyncQueueHWOpIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psServerSyncQueueHWOpOUT->eError != PVRSRV_OK)
+					{
+						goto ServerSyncQueueHWOp_exit;
+					}
+				}
+
+
+	psServerSyncQueueHWOpOUT->eError =
+		PVRSRVServerSyncQueueHWOpKM(
+					psSyncHandleInt,
+					psServerSyncQueueHWOpIN->bbUpdate,
+					&psServerSyncQueueHWOpOUT->ui32FenceValue,
+					&psServerSyncQueueHWOpOUT->ui32UpdateValue);
+
+
+
+
+ServerSyncQueueHWOp_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeServerSyncGetStatus(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS *psServerSyncGetStatusIN,
+					  PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS *psServerSyncGetStatusOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * *psSyncHandleInt = IMG_NULL;
+	IMG_HANDLE *hSyncHandleInt2 = IMG_NULL;
+	IMG_UINT32 *pui32UIDInt = IMG_NULL;
+	IMG_UINT32 *pui32FWAddrInt = IMG_NULL;
+	IMG_UINT32 *pui32CurrentOpInt = IMG_NULL;
+	IMG_UINT32 *pui32NextOpInt = IMG_NULL;
+
+
+	psServerSyncGetStatusOUT->pui32UID = psServerSyncGetStatusIN->pui32UID;
+	psServerSyncGetStatusOUT->pui32FWAddr = psServerSyncGetStatusIN->pui32FWAddr;
+	psServerSyncGetStatusOUT->pui32CurrentOp = psServerSyncGetStatusIN->pui32CurrentOp;
+	psServerSyncGetStatusOUT->pui32NextOp = psServerSyncGetStatusIN->pui32NextOp;
+
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		psSyncHandleInt = OSAllocMem(psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psSyncHandleInt)
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncGetStatus_exit;
+		}
+		hSyncHandleInt2 = OSAllocMem(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE));
+		if (!hSyncHandleInt2)
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psServerSyncGetStatusIN->phSyncHandle, psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hSyncHandleInt2, psServerSyncGetStatusIN->phSyncHandle,
+				psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto ServerSyncGetStatus_exit;
+			}
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32UIDInt = OSAllocMem(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32));
+		if (!pui32UIDInt)
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32FWAddrInt = OSAllocMem(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32));
+		if (!pui32FWAddrInt)
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32CurrentOpInt = OSAllocMem(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32));
+		if (!pui32CurrentOpInt)
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+	if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+	{
+		pui32NextOpInt = OSAllocMem(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32));
+		if (!pui32NextOpInt)
+		{
+			psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto ServerSyncGetStatus_exit;
+		}
+	}
+
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psServerSyncGetStatusOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt[i],
+											hSyncHandleInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psServerSyncGetStatusOUT->eError != PVRSRV_OK)
+					{
+						goto ServerSyncGetStatus_exit;
+					}
+				}
+
+		}
+	}
+
+	psServerSyncGetStatusOUT->eError =
+		PVRSRVServerSyncGetStatusKM(
+					psServerSyncGetStatusIN->ui32SyncCount,
+					psSyncHandleInt,
+					pui32UIDInt,
+					pui32FWAddrInt,
+					pui32CurrentOpInt,
+					pui32NextOpInt);
+
+
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psServerSyncGetStatusOUT->pui32UID, (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32UID, pui32UIDInt,
+		(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto ServerSyncGetStatus_exit;
+	}
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psServerSyncGetStatusOUT->pui32FWAddr, (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32FWAddr, pui32FWAddrInt,
+		(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto ServerSyncGetStatus_exit;
+	}
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psServerSyncGetStatusOUT->pui32CurrentOp, (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32CurrentOp, pui32CurrentOpInt,
+		(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto ServerSyncGetStatus_exit;
+	}
+
+	if ( !OSAccessOK(PVR_VERIFY_WRITE, (IMG_VOID*) psServerSyncGetStatusOUT->pui32NextOp, (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) 
+		|| (OSCopyToUser(NULL, psServerSyncGetStatusOUT->pui32NextOp, pui32NextOpInt,
+		(psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK) )
+	{
+		psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+		goto ServerSyncGetStatus_exit;
+	}
+
+
+ServerSyncGetStatus_exit:
+	if (psSyncHandleInt)
+		OSFreeMem(psSyncHandleInt);
+	if (hSyncHandleInt2)
+		OSFreeMem(hSyncHandleInt2);
+	if (pui32UIDInt)
+		OSFreeMem(pui32UIDInt);
+	if (pui32FWAddrInt)
+		OSFreeMem(pui32FWAddrInt);
+	if (pui32CurrentOpInt)
+		OSFreeMem(pui32CurrentOpInt);
+	if (pui32NextOpInt)
+		OSFreeMem(pui32NextOpInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpCreate(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE *psSyncPrimOpCreateIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE *psSyncPrimOpCreateOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * *psBlockListInt = IMG_NULL;
+	IMG_HANDLE *hBlockListInt2 = IMG_NULL;
+	IMG_UINT32 *ui32SyncBlockIndexInt = IMG_NULL;
+	IMG_UINT32 *ui32IndexInt = IMG_NULL;
+	SERVER_SYNC_PRIMITIVE * *psServerSyncInt = IMG_NULL;
+	IMG_HANDLE *hServerSyncInt2 = IMG_NULL;
+	SERVER_OP_COOKIE * psServerCookieInt = IMG_NULL;
+
+
+
+
+	if (psSyncPrimOpCreateIN->ui32SyncBlockCount != 0)
+	{
+		psBlockListInt = OSAllocMem(psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *));
+		if (!psBlockListInt)
+		{
+			psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpCreate_exit;
+		}
+		hBlockListInt2 = OSAllocMem(psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE));
+		if (!hBlockListInt2)
+		{
+			psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpCreateIN->phBlockList, psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hBlockListInt2, psSyncPrimOpCreateIN->phBlockList,
+				psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpCreate_exit;
+			}
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+	{
+		ui32SyncBlockIndexInt = OSAllocMem(psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32));
+		if (!ui32SyncBlockIndexInt)
+		{
+			psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpCreateIN->pui32SyncBlockIndex, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32SyncBlockIndexInt, psSyncPrimOpCreateIN->pui32SyncBlockIndex,
+				psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpCreate_exit;
+			}
+	if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+	{
+		ui32IndexInt = OSAllocMem(psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32));
+		if (!ui32IndexInt)
+		{
+			psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpCreateIN->pui32Index, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32IndexInt, psSyncPrimOpCreateIN->pui32Index,
+				psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpCreate_exit;
+			}
+	if (psSyncPrimOpCreateIN->ui32ServerSyncCount != 0)
+	{
+		psServerSyncInt = OSAllocMem(psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *));
+		if (!psServerSyncInt)
+		{
+			psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpCreate_exit;
+		}
+		hServerSyncInt2 = OSAllocMem(psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE));
+		if (!hServerSyncInt2)
+		{
+			psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpCreate_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpCreateIN->phServerSync, psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE))
+				|| (OSCopyFromUser(NULL, hServerSyncInt2, psSyncPrimOpCreateIN->phServerSync,
+				psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpCreate_exit;
+			}
+
+
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpCreateOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psBlockListInt[i],
+											hBlockListInt2[i],
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimOpCreate_exit;
+					}
+				}
+
+		}
+	}
+
+	{
+		IMG_UINT32 i;
+
+		for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+		{
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpCreateOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerSyncInt[i],
+											hServerSyncInt2[i],
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimOpCreate_exit;
+					}
+				}
+
+		}
+	}
+
+	psSyncPrimOpCreateOUT->eError =
+		PVRSRVSyncPrimOpCreateKM(
+					psSyncPrimOpCreateIN->ui32SyncBlockCount,
+					psBlockListInt,
+					psSyncPrimOpCreateIN->ui32ClientSyncCount,
+					ui32SyncBlockIndexInt,
+					ui32IndexInt,
+					psSyncPrimOpCreateIN->ui32ServerSyncCount,
+					psServerSyncInt,
+					&psServerCookieInt);
+	/* Exit early if bridged call fails */
+	if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimOpCreate_exit;
+	}
+
+
+	psSyncPrimOpCreateOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psSyncPrimOpCreateOUT->hServerCookie,
+							(IMG_VOID *) psServerCookieInt,
+							PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVSyncPrimOpDestroyKM);
+	if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimOpCreate_exit;
+	}
+
+
+
+
+SyncPrimOpCreate_exit:
+	if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+	{
+		if (psServerCookieInt)
+		{
+			PVRSRVSyncPrimOpDestroyKM(psServerCookieInt);
+		}
+	}
+
+	if (psBlockListInt)
+		OSFreeMem(psBlockListInt);
+	if (hBlockListInt2)
+		OSFreeMem(hBlockListInt2);
+	if (ui32SyncBlockIndexInt)
+		OSFreeMem(ui32SyncBlockIndexInt);
+	if (ui32IndexInt)
+		OSFreeMem(ui32IndexInt);
+	if (psServerSyncInt)
+		OSFreeMem(psServerSyncInt);
+	if (hServerSyncInt2)
+		OSFreeMem(hServerSyncInt2);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpTake(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE *psSyncPrimOpTakeIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE *psSyncPrimOpTakeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_OP_COOKIE * psServerCookieInt = IMG_NULL;
+	IMG_UINT32 *ui32FlagsInt = IMG_NULL;
+	IMG_UINT32 *ui32FenceValueInt = IMG_NULL;
+	IMG_UINT32 *ui32UpdateValueInt = IMG_NULL;
+	IMG_UINT32 *ui32ServerFlagsInt = IMG_NULL;
+
+
+
+
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32FlagsInt = OSAllocMem(psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32));
+		if (!ui32FlagsInt)
+		{
+			psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpTake_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpTakeIN->pui32Flags, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32FlagsInt, psSyncPrimOpTakeIN->pui32Flags,
+				psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpTake_exit;
+			}
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32FenceValueInt = OSAllocMem(psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32));
+		if (!ui32FenceValueInt)
+		{
+			psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpTake_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpTakeIN->pui32FenceValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32FenceValueInt, psSyncPrimOpTakeIN->pui32FenceValue,
+				psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpTake_exit;
+			}
+	if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+	{
+		ui32UpdateValueInt = OSAllocMem(psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32));
+		if (!ui32UpdateValueInt)
+		{
+			psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpTake_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpTakeIN->pui32UpdateValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32UpdateValueInt, psSyncPrimOpTakeIN->pui32UpdateValue,
+				psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpTake_exit;
+			}
+	if (psSyncPrimOpTakeIN->ui32ServerSyncCount != 0)
+	{
+		ui32ServerFlagsInt = OSAllocMem(psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32));
+		if (!ui32ServerFlagsInt)
+		{
+			psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+			goto SyncPrimOpTake_exit;
+		}
+	}
+
+			/* Copy the data over */
+			if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psSyncPrimOpTakeIN->pui32ServerFlags, psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32))
+				|| (OSCopyFromUser(NULL, ui32ServerFlagsInt, psSyncPrimOpTakeIN->pui32ServerFlags,
+				psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK) )
+			{
+				psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+				goto SyncPrimOpTake_exit;
+			}
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpTakeOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerCookieInt,
+											psSyncPrimOpTakeIN->hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					if(psSyncPrimOpTakeOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimOpTake_exit;
+					}
+				}
+
+
+	psSyncPrimOpTakeOUT->eError =
+		PVRSRVSyncPrimOpTakeKM(
+					psServerCookieInt,
+					psSyncPrimOpTakeIN->ui32ClientSyncCount,
+					ui32FlagsInt,
+					ui32FenceValueInt,
+					ui32UpdateValueInt,
+					psSyncPrimOpTakeIN->ui32ServerSyncCount,
+					ui32ServerFlagsInt);
+
+
+
+
+SyncPrimOpTake_exit:
+	if (ui32FlagsInt)
+		OSFreeMem(ui32FlagsInt);
+	if (ui32FenceValueInt)
+		OSFreeMem(ui32FenceValueInt);
+	if (ui32UpdateValueInt)
+		OSFreeMem(ui32UpdateValueInt);
+	if (ui32ServerFlagsInt)
+		OSFreeMem(ui32ServerFlagsInt);
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpReady(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY *psSyncPrimOpReadyIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY *psSyncPrimOpReadyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_OP_COOKIE * psServerCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpReadyOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerCookieInt,
+											psSyncPrimOpReadyIN->hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					if(psSyncPrimOpReadyOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimOpReady_exit;
+					}
+				}
+
+
+	psSyncPrimOpReadyOUT->eError =
+		PVRSRVSyncPrimOpReadyKM(
+					psServerCookieInt,
+					&psSyncPrimOpReadyOUT->bReady);
+
+
+
+
+SyncPrimOpReady_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpComplete(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_OP_COOKIE * psServerCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpCompleteOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerCookieInt,
+											psSyncPrimOpCompleteIN->hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					if(psSyncPrimOpCompleteOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimOpComplete_exit;
+					}
+				}
+
+
+	psSyncPrimOpCompleteOUT->eError =
+		PVRSRVSyncPrimOpCompleteKM(
+					psServerCookieInt);
+
+
+
+
+SyncPrimOpComplete_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpDestroy(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psSyncPrimOpDestroyOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psSyncPrimOpDestroyIN->hServerCookie,
+					PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+	if ((psSyncPrimOpDestroyOUT->eError != PVRSRV_OK) && (psSyncPrimOpDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto SyncPrimOpDestroy_exit;
+	}
+
+
+
+SyncPrimOpDestroy_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psSyncPrimPDumpIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncPrimPDumpOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimPDump_exit;
+					}
+				}
+
+
+	psSyncPrimPDumpOUT->eError =
+		PVRSRVSyncPrimPDumpKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpIN->ui32Offset);
+
+
+
+
+SyncPrimPDump_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpValueOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psSyncPrimPDumpValueIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimPDumpValue_exit;
+					}
+				}
+
+
+	psSyncPrimPDumpValueOUT->eError =
+		PVRSRVSyncPrimPDumpValueKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpValueIN->ui32Offset,
+					psSyncPrimPDumpValueIN->ui32Value);
+
+
+
+
+SyncPrimPDumpValue_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpPolOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psSyncPrimPDumpPolIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimPDumpPol_exit;
+					}
+				}
+
+
+	psSyncPrimPDumpPolOUT->eError =
+		PVRSRVSyncPrimPDumpPolKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpPolIN->ui32Offset,
+					psSyncPrimPDumpPolIN->ui32Value,
+					psSyncPrimPDumpPolIN->ui32Mask,
+					psSyncPrimPDumpPolIN->eOperator,
+					psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimPDumpPol_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_OP_COOKIE * psServerCookieInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimOpPDumpPolOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psServerCookieInt,
+											psSyncPrimOpPDumpPolIN->hServerCookie,
+											PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+					if(psSyncPrimOpPDumpPolOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimOpPDumpPol_exit;
+					}
+				}
+
+
+	psSyncPrimOpPDumpPolOUT->eError =
+		PVRSRVSyncPrimOpPDumpPolKM(
+					psServerCookieInt,
+					psSyncPrimOpPDumpPolIN->eOperator,
+					psSyncPrimOpPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimOpPDumpPol_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimPDumpCBPOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psSyncPrimPDumpCBPIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+					if(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimPDumpCBP_exit;
+					}
+				}
+
+
+	psSyncPrimPDumpCBPOUT->eError =
+		PVRSRVSyncPrimPDumpCBPKM(
+					psSyncHandleInt,
+					psSyncPrimPDumpCBPIN->ui32Offset,
+					psSyncPrimPDumpCBPIN->uiWriteOffset,
+					psSyncPrimPDumpCBPIN->uiPacketSize,
+					psSyncPrimPDumpCBPIN->uiBufferSize);
+
+
+
+
+SyncPrimPDumpCBP_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitSYNCBridge(IMG_VOID);
+PVRSRV_ERROR DeinitSYNCBridge(IMG_VOID);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, PVRSRVBridgeAllocSyncPrimitiveBlock,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, PVRSRVBridgeFreeSyncPrimitiveBlock,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, PVRSRVBridgeSyncPrimSet,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET, PVRSRVBridgeServerSyncPrimSet,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCRECORDREMOVEBYHANDLE, PVRSRVBridgeSyncRecordRemoveByHandle,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCRECORDADD, PVRSRVBridgeSyncRecordAdd,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC, PVRSRVBridgeServerSyncAlloc,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE, PVRSRVBridgeServerSyncFree,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP, PVRSRVBridgeServerSyncQueueHWOp,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS, PVRSRVBridgeServerSyncGetStatus,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE, PVRSRVBridgeSyncPrimOpCreate,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE, PVRSRVBridgeSyncPrimOpTake,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY, PVRSRVBridgeSyncPrimOpReady,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE, PVRSRVBridgeSyncPrimOpComplete,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY, PVRSRVBridgeSyncPrimOpDestroy,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, PVRSRVBridgeSyncPrimPDump,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, PVRSRVBridgeSyncPrimPDumpValue,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, PVRSRVBridgeSyncPrimPDumpPol,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL, PVRSRVBridgeSyncPrimOpPDumpPol,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, PVRSRVBridgeSyncPrimPDumpCBP,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+PVRSRV_ERROR DeinitSYNCBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/syncsexport_bridge/common_syncsexport_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/syncsexport_bridge/common_syncsexport_bridge.h
new file mode 100644
index 0000000..978b143
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/syncsexport_bridge/common_syncsexport_bridge.h
@@ -0,0 +1,114 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for syncsexport
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for syncsexport
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_SYNCSEXPORT_BRIDGE_H
+#define COMMON_SYNCSEXPORT_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_SYNCSEXPORT_CMD_FIRST			0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_SYNCPRIMSERVERSECUREEXPORT			PVRSRV_BRIDGE_SYNCSEXPORT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_SYNCPRIMSERVERSECUREUNEXPORT			PVRSRV_BRIDGE_SYNCSEXPORT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCSEXPORT_SYNCPRIMSERVERSECUREIMPORT			PVRSRV_BRIDGE_SYNCSEXPORT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNCSEXPORT_CMD_LAST			(PVRSRV_BRIDGE_SYNCSEXPORT_CMD_FIRST+2)
+
+
+/*******************************************
+            SyncPrimServerSecureExport          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimServerSecureExport */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREEXPORT_TAG
+{
+	IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREEXPORT;
+
+
+/* Bridge out structure for SyncPrimServerSecureExport */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREEXPORT_TAG
+{
+	IMG_SECURE_TYPE Export;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREEXPORT;
+
+/*******************************************
+            SyncPrimServerSecureUnexport          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimServerSecureUnexport */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREUNEXPORT_TAG
+{
+	IMG_HANDLE hExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREUNEXPORT;
+
+
+/* Bridge out structure for SyncPrimServerSecureUnexport */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREUNEXPORT_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREUNEXPORT;
+
+/*******************************************
+            SyncPrimServerSecureImport          
+ *******************************************/
+
+/* Bridge in structure for SyncPrimServerSecureImport */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREIMPORT_TAG
+{
+	IMG_SECURE_TYPE Export;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREIMPORT;
+
+
+/* Bridge out structure for SyncPrimServerSecureImport */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREIMPORT_TAG
+{
+	IMG_HANDLE hSyncHandle;
+	IMG_UINT32 ui32SyncPrimVAddr;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREIMPORT;
+
+#endif /* COMMON_SYNCSEXPORT_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/syncsexport_bridge/server_syncsexport_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/syncsexport_bridge/server_syncsexport_bridge.c
new file mode 100644
index 0000000..095cbd7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/syncsexport_bridge/server_syncsexport_bridge.c
@@ -0,0 +1,269 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for syncsexport
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for syncsexport
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync_server.h"
+
+
+#include "common_syncsexport_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeSyncPrimServerSecureExport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREEXPORT *psSyncPrimServerSecureExportIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREEXPORT *psSyncPrimServerSecureExportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = IMG_NULL;
+	SERVER_SYNC_EXPORT * psExportInt = IMG_NULL;
+	IMG_HANDLE hExportInt = IMG_NULL;
+	CONNECTION_DATA *psSecureConnection;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psSyncPrimServerSecureExportOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &psSyncHandleInt,
+											psSyncPrimServerSecureExportIN->hSyncHandle,
+											PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+					if(psSyncPrimServerSecureExportOUT->eError != PVRSRV_OK)
+					{
+						goto SyncPrimServerSecureExport_exit;
+					}
+				}
+
+
+	psSyncPrimServerSecureExportOUT->eError =
+		PVRSRVSyncPrimServerSecureExportKM(psConnection,
+					psSyncHandleInt,
+					&psSyncPrimServerSecureExportOUT->Export,
+					&psExportInt, &psSecureConnection);
+	/* Exit early if bridged call fails */
+	if(psSyncPrimServerSecureExportOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimServerSecureExport_exit;
+	}
+
+
+	psSyncPrimServerSecureExportOUT->eError = PVRSRVAllocHandle(psSecureConnection->psHandleBase,
+							&hExportInt,
+							(IMG_VOID *) psExportInt,
+							PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+							PVRSRV_HANDLE_ALLOC_FLAG_SHARED
+							,(PFN_HANDLE_RELEASE)&PVRSRVSyncPrimServerSecureUnexportKM);
+	if (psSyncPrimServerSecureExportOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimServerSecureExport_exit;
+	}
+
+
+
+
+SyncPrimServerSecureExport_exit:
+	if (psSyncPrimServerSecureExportOUT->eError != PVRSRV_OK)
+	{
+		if (psExportInt)
+		{
+			PVRSRVSyncPrimServerSecureUnexportKM(psExportInt);
+		}
+	}
+
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimServerSecureUnexport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREUNEXPORT *psSyncPrimServerSecureUnexportIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREUNEXPORT *psSyncPrimServerSecureUnexportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+	psSyncPrimServerSecureUnexportOUT->eError =
+		PVRSRVReleaseHandle(psConnection->psHandleBase,
+					(IMG_HANDLE) psSyncPrimServerSecureUnexportIN->hExport,
+					PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT);
+	if ((psSyncPrimServerSecureUnexportOUT->eError != PVRSRV_OK) && (psSyncPrimServerSecureUnexportOUT->eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_ASSERT(0);
+		goto SyncPrimServerSecureUnexport_exit;
+	}
+
+
+
+SyncPrimServerSecureUnexport_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimServerSecureImport(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_SYNCPRIMSERVERSECUREIMPORT *psSyncPrimServerSecureImportIN,
+					  PVRSRV_BRIDGE_OUT_SYNCPRIMSERVERSECUREIMPORT *psSyncPrimServerSecureImportOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	SERVER_SYNC_PRIMITIVE * psSyncHandleInt = IMG_NULL;
+
+
+
+
+
+
+
+	psSyncPrimServerSecureImportOUT->eError =
+		PVRSRVSyncPrimServerSecureImportKM(
+					psSyncPrimServerSecureImportIN->Export,
+					&psSyncHandleInt,
+					&psSyncPrimServerSecureImportOUT->ui32SyncPrimVAddr);
+	/* Exit early if bridged call fails */
+	if(psSyncPrimServerSecureImportOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimServerSecureImport_exit;
+	}
+
+
+	psSyncPrimServerSecureImportOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+							&psSyncPrimServerSecureImportOUT->hSyncHandle,
+							(IMG_VOID *) psSyncHandleInt,
+							PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+							PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+							,(PFN_HANDLE_RELEASE)&PVRSRVServerSyncFreeKM);
+	if (psSyncPrimServerSecureImportOUT->eError != PVRSRV_OK)
+	{
+		goto SyncPrimServerSecureImport_exit;
+	}
+
+
+
+
+SyncPrimServerSecureImport_exit:
+	if (psSyncPrimServerSecureImportOUT->eError != PVRSRV_OK)
+	{
+		if (psSyncHandleInt)
+		{
+			PVRSRVServerSyncFreeKM(psSyncHandleInt);
+		}
+	}
+
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitSYNCSEXPORTBridge(IMG_VOID);
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(IMG_VOID);
+
+/*
+ * Register all SYNCSEXPORT functions with services
+ */
+PVRSRV_ERROR InitSYNCSEXPORTBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCSEXPORT, PVRSRV_BRIDGE_SYNCSEXPORT_SYNCPRIMSERVERSECUREEXPORT, PVRSRVBridgeSyncPrimServerSecureExport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCSEXPORT, PVRSRV_BRIDGE_SYNCSEXPORT_SYNCPRIMSERVERSECUREUNEXPORT, PVRSRVBridgeSyncPrimServerSecureUnexport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCSEXPORT, PVRSRV_BRIDGE_SYNCSEXPORT_SYNCPRIMSERVERSECUREIMPORT, PVRSRVBridgeSyncPrimServerSecureImport,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all syncsexport functions with services
+ */
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/timerquery_bridge/common_timerquery_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/generated/timerquery_bridge/common_timerquery_bridge.h
new file mode 100644
index 0000000..ffd457a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/timerquery_bridge/common_timerquery_bridge.h
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common bridge header for timerquery
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Declares common defines and structures that are used by both
+                the client and sever side of the bridge for timerquery
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef COMMON_TIMERQUERY_BRIDGE_H
+#define COMMON_TIMERQUERY_BRIDGE_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST			0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME			PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST			(PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3)
+
+
+/*******************************************
+            RGXBeginTimerQuery          
+ *******************************************/
+
+/* Bridge in structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY;
+
+
+/* Bridge out structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY;
+
+/*******************************************
+            RGXEndTimerQuery          
+ *******************************************/
+
+/* Bridge in structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY;
+
+
+/* Bridge out structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG
+{
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY;
+
+/*******************************************
+            RGXQueryTimer          
+ *******************************************/
+
+/* Bridge in structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG
+{
+	IMG_HANDLE hDevNode;
+	IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXQUERYTIMER;
+
+
+/* Bridge out structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG
+{
+	IMG_UINT64 ui64StartTime;
+	IMG_UINT64 ui64EndTime;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXQUERYTIMER;
+
+/*******************************************
+            RGXCurrentTime          
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+	IMG_HANDLE hDevNode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+	IMG_UINT64 ui64Time;
+	PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+#endif /* COMMON_TIMERQUERY_BRIDGE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/generated/timerquery_bridge/server_timerquery_bridge.c b/drivers/external_drivers/intel_media/graphics/rgx/generated/timerquery_bridge/server_timerquery_bridge.c
new file mode 100644
index 0000000..6b64f9d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/generated/timerquery_bridge/server_timerquery_bridge.c
@@ -0,0 +1,283 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server bridge for timerquery
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side of the bridge for timerquery
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+#include <asm/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtimerquery.h"
+
+
+#include "common_timerquery_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#include "rgx_bridge.h"
+#include "srvcore.h"
+#include "handle.h"
+
+#if defined (SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#include <linux/slab.h>
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+ 
+static IMG_INT
+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN,
+					  PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXBeginTimerQueryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXBeginTimerQueryIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXBeginTimerQueryOUT->eError != PVRSRV_OK)
+					{
+						goto RGXBeginTimerQuery_exit;
+					}
+				}
+
+
+	psRGXBeginTimerQueryOUT->eError =
+		PVRSRVRGXBeginTimerQueryKM(
+					hDevNodeInt,
+					psRGXBeginTimerQueryIN->ui32QueryId);
+
+
+
+
+RGXBeginTimerQuery_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN,
+					  PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXEndTimerQueryOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXEndTimerQueryIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXEndTimerQueryOUT->eError != PVRSRV_OK)
+					{
+						goto RGXEndTimerQuery_exit;
+					}
+				}
+
+
+	psRGXEndTimerQueryOUT->eError =
+		PVRSRVRGXEndTimerQueryKM(
+					hDevNodeInt);
+
+
+
+
+RGXEndTimerQuery_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN,
+					  PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXQueryTimerOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXQueryTimerIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXQueryTimerOUT->eError != PVRSRV_OK)
+					{
+						goto RGXQueryTimer_exit;
+					}
+				}
+
+
+	psRGXQueryTimerOUT->eError =
+		PVRSRVRGXQueryTimerKM(
+					hDevNodeInt,
+					psRGXQueryTimerIN->ui32QueryId,
+					&psRGXQueryTimerOUT->ui64StartTime,
+					&psRGXQueryTimerOUT->ui64EndTime);
+
+
+
+
+RGXQueryTimer_exit:
+
+	return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+					  PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN,
+					  PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT,
+					 CONNECTION_DATA *psConnection)
+{
+	IMG_HANDLE hDevNodeInt = IMG_NULL;
+
+
+
+
+
+
+
+				{
+					/* Look up the address from the handle */
+					psRGXCurrentTimeOUT->eError =
+						PVRSRVLookupHandle(psConnection->psHandleBase,
+											(IMG_VOID **) &hDevNodeInt,
+											psRGXCurrentTimeIN->hDevNode,
+											PVRSRV_HANDLE_TYPE_DEV_NODE);
+					if(psRGXCurrentTimeOUT->eError != PVRSRV_OK)
+					{
+						goto RGXCurrentTime_exit;
+					}
+				}
+
+
+	psRGXCurrentTimeOUT->eError =
+		PVRSRVRGXCurrentTime(
+					hDevNodeInt,
+					&psRGXCurrentTimeOUT->ui64Time);
+
+
+
+
+RGXCurrentTime_exit:
+
+	return 0;
+}
+
+
+
+/* *************************************************************************** 
+ * Server bridge dispatch related glue 
+ */
+
+
+PVRSRV_ERROR InitTIMERQUERYBridge(IMG_VOID);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(IMG_VOID);
+
+/*
+ * Register all TIMERQUERY functions with services
+ */
+PVRSRV_ERROR InitTIMERQUERYBridge(IMG_VOID)
+{
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY, PVRSRVBridgeRGXBeginTimerQuery,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY, PVRSRVBridgeRGXEndTimerQuery,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+	SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME, PVRSRVBridgeRGXCurrentTime,
+					IMG_NULL, IMG_NULL,
+					0, 0);
+
+
+	return PVRSRV_OK;
+}
+
+/*
+ * Unregister all timerquery functions with services
+ */
+PVRSRV_ERROR DeinitTIMERQUERYBridge(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.0.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.0.h
new file mode 100644
index 0000000..78a8b4c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.0.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.2.0
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_0_H_
+#define _RGXCONFIG_KM_1_V_2_0_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 0
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_2_0_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.20.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.20.h
new file mode 100644
index 0000000..ea00da1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.20.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.2.20
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_20_H_
+#define _RGXCONFIG_KM_1_V_2_20_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 20
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_2_20_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.30.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.30.h
new file mode 100644
index 0000000..83e5408
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.30.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.2.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_30_H_
+#define _RGXCONFIG_KM_1_V_2_30_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 30
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_2_30_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.5.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.5.h
new file mode 100644
index 0000000..35dbddc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.2.5.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.2.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_2_5_H_
+#define _RGXCONFIG_KM_1_V_2_5_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_2_5_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.12.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.12.h
new file mode 100644
index 0000000..be9f47d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.12.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.12
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_12_H_
+#define _RGXCONFIG_KM_1_V_4_12_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 12
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (256*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_4_12_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.15.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.15.h
new file mode 100644
index 0000000..7238641
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.15.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.15
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_15_H_
+#define _RGXCONFIG_KM_1_V_4_15_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 15
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (256*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_4_15_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.19.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.19.h
new file mode 100644
index 0000000..34fa658
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.19.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.19
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_19_H_
+#define _RGXCONFIG_KM_1_V_4_19_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 19
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_4_19_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.5.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.5.h
new file mode 100644
index 0000000..7c52ec4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.5.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_5_H_
+#define _RGXCONFIG_KM_1_V_4_5_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 5
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_4_5_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.6.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.6.h
new file mode 100644
index 0000000..a385f5d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_1.V.4.6.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 1.V.4.6
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_1_V_4_6_H_
+#define _RGXCONFIG_KM_1_V_4_6_H_
+
+/***** Automatically generated file (3/25/2015 5:08:35 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:35 AM)************************************************************/
+
+#define RGX_BNC_KM_B 1
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 6
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3)
+#define RGX_FEATURE_META (MTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_1_V_4_6_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_12.V.1.20.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_12.V.1.20.h
new file mode 100644
index 0000000..0f5ef18
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_12.V.1.20.h
@@ -0,0 +1,67 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 12.V.1.20
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_12_V_1_20_H_
+#define _RGXCONFIG_KM_12_V_1_20_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 12
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 20
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0*1024)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_12_V_1_20_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.51.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.51.h
new file mode 100644
index 0000000..3230b74
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.51.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.2.51
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_51_H_
+#define _RGXCONFIG_KM_4_V_2_51_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 51
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_4_V_2_51_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.52.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.52.h
new file mode 100644
index 0000000..dc439c2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.52.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.2.52
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_52_H_
+#define _RGXCONFIG_KM_4_V_2_52_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 52
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_4_V_2_52_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.57.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.57.h
new file mode 100644
index 0000000..55f9261
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.57.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.2.57
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_57_H_
+#define _RGXCONFIG_KM_4_V_2_57_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 57
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_4_V_2_57_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.58.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.58.h
new file mode 100644
index 0000000..8c44433
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.2.58.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.2.58
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_2_58_H_
+#define _RGXCONFIG_KM_4_V_2_58_H_
+
+/***** Automatically generated file (3/25/2015 5:08:37 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:37 AM)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 58
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_4_V_2_58_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.4.53.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.4.53.h
new file mode 100644
index 0000000..2b408a3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.4.53.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.4.53
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_4_53_H_
+#define _RGXCONFIG_KM_4_V_4_53_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 53
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_4_V_4_53_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.6.62.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.6.62.h
new file mode 100644
index 0000000..5ef29d2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_4.V.6.62.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 4.V.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_4_V_6_62_H_
+#define _RGXCONFIG_KM_4_V_6_62_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 6
+#define RGX_BNC_KM_C 62
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_NUM_CLUSTERS (6)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_SLC_BANKS (4)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_TLA 
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_4_V_6_62_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_5.V.1.46.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_5.V.1.46.h
new file mode 100644
index 0000000..64541c4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_5.V.1.46.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 5.V.1.46
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_5_V_1_46_H_
+#define _RGXCONFIG_KM_5_V_1_46_H_
+
+/***** Automatically generated file (3/25/2015 5:08:36 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:36 AM)************************************************************/
+
+#define RGX_BNC_KM_B 5
+#define RGX_BNC_KM_N 1
+#define RGX_BNC_KM_C 46
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_NUM_CLUSTERS (1)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (16*1024)
+#define RGX_FEATURE_SLC_BANKS (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (1)
+#define RGX_FEATURE_META (LTP217)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (1)
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_5_V_1_46_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_8.V.2.34.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_8.V.2.34.h
new file mode 100644
index 0000000..1df1eb7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_8.V.2.34.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 8.V.2.34
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_8_V_2_34_H_
+#define _RGXCONFIG_KM_8_V_2_34_H_
+
+/***** Automatically generated file (3/25/2015 5:08:37 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:37 AM)************************************************************/
+
+#define RGX_BNC_KM_B 8
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 34
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_SLC_VIVT 
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY 
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_SLC_BANKS (2)
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (3)
+#define RGX_FEATURE_META_COREMEM_BANKS (8)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_META_DMA 
+#define RGX_FEATURE_META_COREMEM_SIZE (64)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_8_V_2_34_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_8.V.4.38.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_8.V.4.38.h
new file mode 100644
index 0000000..c2f3365
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/configs/rgxconfig_km_8.V.4.38.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          RGX Config BVNC 8.V.4.38
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCONFIG_KM_8_V_4_38_H_
+#define _RGXCONFIG_KM_8_V_4_38_H_
+
+/***** Automatically generated file (3/25/2015 5:08:37 AM): Do not edit manually ********************/
+/***** Timestamp:  (3/25/2015 5:08:37 AM)************************************************************/
+
+#define RGX_BNC_KM_B 8
+#define RGX_BNC_KM_N 4
+#define RGX_BNC_KM_C 38
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (4)
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+#define RGX_FEATURE_AXI_ACELITE 
+#define RGX_FEATURE_CLUSTER_GROUPING 
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1)
+#define RGX_FEATURE_GS_RTA_SUPPORT 
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_SLC_VIVT 
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (256)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY 
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE 
+#define RGX_FEATURE_SLC_BANKS (2)
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (3)
+#define RGX_FEATURE_META_COREMEM_BANKS (8)
+#define RGX_FEATURE_GPU_VIRTUALISATION 
+#define RGX_FEATURE_META_DMA 
+#define RGX_FEATURE_META_COREMEM_SIZE (64)
+#define RGX_FEATURE_COMPUTE 
+
+
+#endif /* _RGXCONFIG_8_V_4_38_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.32.4.19.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.32.4.19.h
new file mode 100644
index 0000000..f078e42
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.32.4.19.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.32.4.19
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_32_4_19_H_
+#define _RGXCORE_KM_1_32_4_19_H_
+
+/***** Automatically generated file (3/4/2015 2:27:37 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:37 PM)************************************************************/
+/***** CS: @2615289 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.32.4.19 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 32
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 19
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_32_4_19_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.33.2.5.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.33.2.5.h
new file mode 100644
index 0000000..f9939a1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.33.2.5.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.33.2.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_33_2_5_H_
+#define _RGXCORE_KM_1_33_2_5_H_
+
+/***** Automatically generated file (3/4/2015 2:27:36 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:36 PM)************************************************************/
+/***** CS: @2106753 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.33.2.5 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 33
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_33_2_5_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.39.4.19.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.39.4.19.h
new file mode 100644
index 0000000..cd293a5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.39.4.19.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.39.4.19
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_39_4_19_H_
+#define _RGXCORE_KM_1_39_4_19_H_
+
+/***** Automatically generated file (3/4/2015 2:27:37 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:37 PM)************************************************************/
+/***** CS: @2784771 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.39.4.19 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 39
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 19
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_39_4_19_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.48.2.0.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.48.2.0.h
new file mode 100644
index 0000000..e970ddb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.48.2.0.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.48.2.0
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_48_2_0_H_
+#define _RGXCORE_KM_1_48_2_0_H_
+
+/***** Automatically generated file (3/4/2015 2:27:36 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:36 PM)************************************************************/
+/***** CS: @2523218 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.48.2.0 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 48
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 0
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_48_2_0_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.72.4.12.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.72.4.12.h
new file mode 100644
index 0000000..5dc662d8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.72.4.12.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.72.4.12
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_72_4_12_H_
+#define _RGXCORE_KM_1_72_4_12_H_
+
+/***** Automatically generated file (3/4/2015 2:27:34 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:34 PM)************************************************************/
+/***** CS: @2646650 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.72.4.12 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 72
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 12
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_72_4_12_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.75.2.20.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.75.2.20.h
new file mode 100644
index 0000000..ece76e0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.75.2.20.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.75.2.20
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_75_2_20_H_
+#define _RGXCORE_KM_1_75_2_20_H_
+
+/***** Automatically generated file (3/4/2015 2:27:36 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:36 PM)************************************************************/
+/***** CS: @2309075 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.75.2.20 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 20
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_75_2_20_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.75.2.30.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.75.2.30.h
new file mode 100644
index 0000000..f03f0a5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.75.2.30.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.75.2.30
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_75_2_30_H_
+#define _RGXCORE_KM_1_75_2_30_H_
+
+/***** Automatically generated file (3/4/2015 2:27:33 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:33 PM)************************************************************/
+/***** CS: @2309075 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.75.2.30 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 75
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 30
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_75_2_30_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.76.4.6.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.76.4.6.h
new file mode 100644
index 0000000..1ba4058
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.76.4.6.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.76.4.6
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_76_4_6_H_
+#define _RGXCORE_KM_1_76_4_6_H_
+
+/***** Automatically generated file (3/4/2015 2:27:34 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:34 PM)************************************************************/
+/***** CS: @2318404 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.76.4.6 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 76
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 6
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+#define FIX_HW_BRN_42480
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_76_4_6_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.81.4.15.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.81.4.15.h
new file mode 100644
index 0000000..c285483
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.81.4.15.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.81.4.15
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_81_4_15_H_
+#define _RGXCORE_KM_1_81_4_15_H_
+
+/***** Automatically generated file (3/4/2015 2:27:36 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:36 PM)************************************************************/
+/***** CS: @2373516 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.81.4.15 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 81
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 15
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_81_4_15_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.82.4.5.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.82.4.5.h
new file mode 100644
index 0000000..5254ea8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_1.82.4.5.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 1.82.4.5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_1_82_4_5_H_
+#define _RGXCORE_KM_1_82_4_5_H_
+
+/***** Automatically generated file (3/4/2015 2:27:34 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:34 PM)************************************************************/
+/***** CS: @2503111 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 1.82.4.5 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 1
+#define RGX_BVNC_KM_V 82
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 5
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_1_82_4_5_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_12.5.1.20.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_12.5.1.20.h
new file mode 100644
index 0000000..79d82e1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_12.5.1.20.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 12.5.1.20
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_12_5_1_20_H_
+#define _RGXCORE_KM_12_5_1_20_H_
+
+/***** Automatically generated file (4/27/2015 2:03:36 PM): Do not edit manually ********************/
+/***** Timestamp:  (4/27/2015 2:03:36 PM)************************************************************/
+/***** CS: @3146507 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 12.5.1.20 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 12
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 20
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_12_5_1_20_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.29.2.51.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.29.2.51.h
new file mode 100644
index 0000000..5b5c677
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.29.2.51.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.29.2.51
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_29_2_51_H_
+#define _RGXCORE_KM_4_29_2_51_H_
+
+/***** Automatically generated file (3/2/2015 6:26:08 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/2/2015 6:26:08 PM)************************************************************/
+/***** CS: @2944502 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.29.2.51 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 29
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 51
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_29_2_51_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.32.2.52.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.32.2.52.h
new file mode 100644
index 0000000..2538f13
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.32.2.52.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.32.2.52
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_32_2_52_H_
+#define _RGXCORE_KM_4_32_2_52_H_
+
+/***** Automatically generated file (3/2/2015 6:26:04 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/2/2015 6:26:04 PM)************************************************************/
+/***** CS: @2966609 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.32.2.52 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 32
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 52
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_32_2_52_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.40.2.51.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.40.2.51.h
new file mode 100644
index 0000000..b1ac04e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.40.2.51.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.40.2.51
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_40_2_51_H_
+#define _RGXCORE_KM_4_40_2_51_H_
+
+/***** Automatically generated file (3/2/2015 6:26:02 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/2/2015 6:26:02 PM)************************************************************/
+/***** CS: @3254374 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.40.2.51 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 51
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_40_2_51_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.41.2.57.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.41.2.57.h
new file mode 100644
index 0000000..203fcc9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.41.2.57.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.41.2.57
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_41_2_57_H_
+#define _RGXCORE_KM_4_41_2_57_H_
+
+/***** Automatically generated file (3/2/2015 6:26:02 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/2/2015 6:26:02 PM)************************************************************/
+/***** CS: @3254338 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.41.2.57 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 41
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 57
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_41_2_57_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.42.4.53.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.42.4.53.h
new file mode 100644
index 0000000..ed4c16d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.42.4.53.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.42.4.53
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_42_4_53_H_
+#define _RGXCORE_KM_4_42_4_53_H_
+
+/***** Automatically generated file (3/2/2015 6:26:02 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/2/2015 6:26:02 PM)************************************************************/
+/***** CS: @3250390 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.42.4.53 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 42
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 53
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_42_4_53_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.43.6.62.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.43.6.62.h
new file mode 100644
index 0000000..bb4bd10
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.43.6.62.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.43.6.62
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_43_6_62_H_
+#define _RGXCORE_KM_4_43_6_62_H_
+
+/***** Automatically generated file (3/2/2015 6:26:02 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/2/2015 6:26:02 PM)************************************************************/
+/***** CS: @3253129 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.43.6.62 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 43
+#define RGX_BVNC_KM_N 6
+#define RGX_BVNC_KM_C 62
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_43_6_62_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.45.2.58.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.45.2.58.h
new file mode 100644
index 0000000..6c44af5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_4.45.2.58.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 4.45.2.58
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_45_2_58_H_
+#define _RGXCORE_KM_4_45_2_58_H_
+
+/***** Automatically generated file (3/23/2015 2:49:16 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/23/2015 2:49:16 PM)************************************************************/
+/***** CS: @3478233 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.45.2.58 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 45
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 58
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_4_45_2_58_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_5.11.1.46.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_5.11.1.46.h
new file mode 100644
index 0000000..ef21655
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_5.11.1.46.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 5.11.1.46
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_5_11_1_46_H_
+#define _RGXCORE_KM_5_11_1_46_H_
+
+/***** Automatically generated file (4/2/2015 11:53:03 AM): Do not edit manually ********************/
+/***** Timestamp:  (4/2/2015 11:53:03 AM)************************************************************/
+/***** CS: @3485232 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 5.11.1.46 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 5
+#define RGX_BVNC_KM_V 11
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 46
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_42480
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_5_11_1_46_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_5.9.1.46.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_5.9.1.46.h
new file mode 100644
index 0000000..9105e8c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_5.9.1.46.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 5.9.1.46
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_5_9_1_46_H_
+#define _RGXCORE_KM_5_9_1_46_H_
+
+/***** Automatically generated file (3/4/2015 2:27:37 PM): Do not edit manually ********************/
+/***** Timestamp:  (3/4/2015 2:27:37 PM)************************************************************/
+/***** CS: @2967148 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 5.9.1.46 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 5
+#define RGX_BVNC_KM_V 9
+#define RGX_BVNC_KM_N 1
+#define RGX_BVNC_KM_C 46
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+#define FIX_HW_BRN_38344
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+
+
+
+#endif /* _RGXCORE_KM_5_9_1_46_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_8.21.2.34.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_8.21.2.34.h
new file mode 100644
index 0000000..11fb9bc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_8.21.2.34.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 8.21.2.34
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_8_21_2_34_H_
+#define _RGXCORE_KM_8_21_2_34_H_
+
+/***** Automatically generated file (21/04/2015 18:47:55): Do not edit manually ********************/
+/***** Timestamp:  (21/04/2015 18:47:55)************************************************************/
+/***** CS: @3480967 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 8.21.2.34 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 8
+#define RGX_BVNC_KM_V 21
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 34
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_45914
+
+
+
+#endif /* _RGXCORE_KM_8_21_2_34_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_8.41.4.38.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_8.41.4.38.h
new file mode 100644
index 0000000..5d26a7f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/cores/rgxcore_km_8.41.4.38.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@Title          RGX Core BVNC 8.41.4.38
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_8_41_4_38_H_
+#define _RGXCORE_KM_8_41_4_38_H_
+
+/***** Automatically generated file (5/12/2015 5:46:10 AM): Do not edit manually ********************/
+/***** Timestamp:  (5/12/2015 5:46:10 AM)************************************************************/
+/***** CS: @3498807 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 8.41.4.38 
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 8
+#define RGX_BVNC_KM_V 41
+#define RGX_BVNC_KM_N 4
+#define RGX_BVNC_KM_C 38
+
+/******************************************************************************
+ * Errata 
+ *****************************************************************************/
+
+
+
+ 
+/******************************************************************************
+ * Enhancements 
+ *****************************************************************************/
+#define HW_ERN_45914
+
+
+
+#endif /* _RGXCORE_KM_8_41_4_38_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgx_cr_defs_km.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgx_cr_defs_km.h
new file mode 100644
index 0000000..6b0c403
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgx_cr_defs_km.h
@@ -0,0 +1,3836 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgx_cr_defs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ * Generated by regconv version MAIN@3250555
+ *   from files:
+ */
+
+#if !defined(__IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#ifndef _RGX_CR_DEFS_KM_H_
+#define _RGX_CR_DEFS_KM_H_
+
+#include "img_types.h"
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*
+    Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL                                   (0x0000U)
+#define RGX_CR_CLK_CTRL_MASKFULL                          (IMG_UINT64_C(0xFFFFC3003F3F3F0F))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT                   (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK                  (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF                     (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON                      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO                    (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_SHIFT                         (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK                        (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_ON                            (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_AUTO                          (IMG_UINT64_C(0x2000000000000000))
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_CLK_CTRL_FBC_SHIFT                         (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK                        (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_ON                            (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL_FBC_AUTO                          (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT                        (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK                       (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF                          (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_ON                           (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_AUTO                         (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT                  (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK                 (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON                     (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO                   (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL_USCS_SHIFT                        (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK                       (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF                          (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USCS_ON                           (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL_USCS_AUTO                         (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL_PBE_SHIFT                         (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK                        (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PBE_ON                            (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL_PBE_AUTO                          (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT                      (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK                     (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_ON                         (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO                       (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL_CDM_SHIFT                         (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK                        (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_CDM_ON                            (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_CLK_CTRL_CDM_AUTO                          (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_CLK_CTRL_BIF_SHIFT                         (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK                        (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_ON                            (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL_BIF_AUTO                          (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT               (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF                 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON                  (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO                (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT                      (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_ON                         (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO                       (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL_TPU_SHIFT                         (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_ON                            (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL_TPU_AUTO                          (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL_USC_SHIFT                         (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USC_ON                            (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL_USC_AUTO                          (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL_TLA_SHIFT                         (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TLA_ON                            (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL_TLA_AUTO                          (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL_SLC_SHIFT                         (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SLC_ON                            (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL_SLC_AUTO                          (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL_PDS_SHIFT                         (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PDS_ON                            (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL_PDS_AUTO                          (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL_VDM_SHIFT                         (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_VDM_ON                            (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL_VDM_AUTO                          (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL_PM_SHIFT                          (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK                         (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF                            (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PM_ON                             (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL_PM_AUTO                           (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL_TSP_SHIFT                         (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TSP_ON                            (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL_TSP_AUTO                          (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL_ISP_SHIFT                         (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_ISP_ON                            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL_ISP_AUTO                          (IMG_UINT64_C(0x0000000000000002))
+#endif /* RGX_FEATURE_S7_TOP_INFRASTRUCTURE */ 
+
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*
+    Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL                                   (0x0000U)
+#define RGX_CR_CLK_CTRL_MASKFULL                          (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT                   (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK                  (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF                     (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON                      (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO                    (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_SHIFT                         (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK                        (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_ON                            (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_AUTO                          (IMG_UINT64_C(0x2000000000000000))
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_CLK_CTRL_FBC_SHIFT                         (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK                        (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_ON                            (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL_FBC_AUTO                          (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT                        (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK                       (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF                          (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_ON                           (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_AUTO                         (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT                  (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK                 (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON                     (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO                   (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL_USCS_SHIFT                        (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK                       (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF                          (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USCS_ON                           (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL_USCS_AUTO                         (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL_PBE_SHIFT                         (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK                        (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PBE_ON                            (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL_PBE_AUTO                          (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT                      (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK                     (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_ON                         (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO                       (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL_CDM_SHIFT                         (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK                        (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_CDM_ON                            (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_CLK_CTRL_CDM_AUTO                          (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT                    (44U)
+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK                   (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF                      (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_ON                       (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO                     (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT                (42U)
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK               (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF                  (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON                   (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO                 (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_CLK_CTRL_BIF_SHIFT                         (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK                        (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_ON                            (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL_BIF_AUTO                          (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT               (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF                 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON                  (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO                (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT                      (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_ON                         (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO                       (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL_TPU_SHIFT                         (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_ON                            (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL_TPU_AUTO                          (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL_USC_SHIFT                         (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USC_ON                            (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL_USC_AUTO                          (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL_TLA_SHIFT                         (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TLA_ON                            (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL_TLA_AUTO                          (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL_SLC_SHIFT                         (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SLC_ON                            (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL_SLC_AUTO                          (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL_UVS_SHIFT                         (14U)
+#define RGX_CR_CLK_CTRL_UVS_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL_UVS_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_UVS_ON                            (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_CTRL_UVS_AUTO                          (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_CTRL_PDS_SHIFT                         (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PDS_ON                            (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL_PDS_AUTO                          (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL_VDM_SHIFT                         (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_VDM_ON                            (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL_VDM_AUTO                          (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL_PM_SHIFT                          (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK                         (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF                            (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PM_ON                             (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL_PM_AUTO                           (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL_GPP_SHIFT                         (6U)
+#define RGX_CR_CLK_CTRL_GPP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL_GPP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_GPP_ON                            (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_CTRL_GPP_AUTO                          (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_CTRL_TE_SHIFT                          (4U)
+#define RGX_CR_CLK_CTRL_TE_CLRMSK                         (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL_TE_OFF                            (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TE_ON                             (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL_TE_AUTO                           (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL_TSP_SHIFT                         (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TSP_ON                            (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL_TSP_AUTO                          (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL_ISP_SHIFT                         (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_ISP_ON                            (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL_ISP_AUTO                          (IMG_UINT64_C(0x0000000000000002))
+#endif /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) */
+
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*
+    Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS                                 (0x0008U)
+#define RGX_CR_CLK_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000001FF907773))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT                  (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED                  (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING                (IMG_UINT64_C(0x0000000100000000))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT                 (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED                 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS_IPP_SHIFT                       (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_IPP_RUNNING                     (IMG_UINT64_C(0x0000000040000000))
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_CLK_STATUS_FBC_SHIFT                       (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBC_RUNNING                     (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT                      (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED                      (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING                    (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT                (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING              (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS_USCS_SHIFT                      (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED                      (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USCS_RUNNING                    (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS_PBE_SHIFT                       (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PBE_RUNNING                     (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT                    (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING                  (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS_CDM_SHIFT                       (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_CDM_RUNNING                     (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS_BIF_SHIFT                       (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_RUNNING                     (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT             (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED             (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING           (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT                    (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING                  (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS_TPU_SHIFT                       (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_RUNNING                     (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS_USC_SHIFT                       (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USC_RUNNING                     (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS_TLA_SHIFT                       (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TLA_RUNNING                     (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS_SLC_SHIFT                       (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SLC_RUNNING                     (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS_PDS_SHIFT                       (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PDS_RUNNING                     (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS_VDM_SHIFT                       (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_VDM_RUNNING                     (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS_PM_SHIFT                        (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PM_RUNNING                      (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS_TSP_SHIFT                       (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TSP_RUNNING                     (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS_ISP_SHIFT                       (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_ISP_RUNNING                     (IMG_UINT64_C(0x0000000000000001))
+#endif /* RGX_FEATURE_S7_TOP_INFRASTRUCTURE */ 
+
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*
+    Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS                                 (0x0008U)
+#define RGX_CR_CLK_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT                  (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED                  (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING                (IMG_UINT64_C(0x0000000100000000))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT                 (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED                 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS_IPP_SHIFT                       (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_IPP_RUNNING                     (IMG_UINT64_C(0x0000000040000000))
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_CLK_STATUS_FBC_SHIFT                       (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBC_RUNNING                     (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT                      (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED                      (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING                    (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT                (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING              (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS_USCS_SHIFT                      (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED                      (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USCS_RUNNING                    (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS_PBE_SHIFT                       (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PBE_RUNNING                     (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT                    (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING                  (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS_CDM_SHIFT                       (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_CDM_RUNNING                     (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT                  (22U)
+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED                  (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING                (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT              (21U)
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED              (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING            (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_STATUS_BIF_SHIFT                       (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_RUNNING                     (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT             (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED             (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING           (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT                    (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING                  (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS_TPU_SHIFT                       (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_RUNNING                     (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS_USC_SHIFT                       (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USC_RUNNING                     (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS_TLA_SHIFT                       (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TLA_RUNNING                     (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS_SLC_SHIFT                       (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SLC_RUNNING                     (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS_UVS_SHIFT                       (7U)
+#define RGX_CR_CLK_STATUS_UVS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS_UVS_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_UVS_RUNNING                     (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_STATUS_PDS_SHIFT                       (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PDS_RUNNING                     (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS_VDM_SHIFT                       (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_VDM_RUNNING                     (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS_PM_SHIFT                        (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PM_RUNNING                      (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS_GPP_SHIFT                       (3U)
+#define RGX_CR_CLK_STATUS_GPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS_GPP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_GPP_RUNNING                     (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_STATUS_TE_SHIFT                        (2U)
+#define RGX_CR_CLK_STATUS_TE_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS_TE_GATED                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TE_RUNNING                      (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS_TSP_SHIFT                       (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TSP_RUNNING                     (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS_ISP_SHIFT                       (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_ISP_RUNNING                     (IMG_UINT64_C(0x0000000000000001))
+#endif /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) */
+
+
+/*
+    Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID                                    (0x0018U)
+#define RGX_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_ID_ID_SHIFT                           (16U)
+#define RGX_CR_CORE_ID_ID_CLRMSK                          (0X0000FFFFU)
+#define RGX_CR_CORE_ID_CONFIG_SHIFT                       (0U)
+#define RGX_CR_CORE_ID_CONFIG_CLRMSK                      (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_CORE_REVISION
+*/
+#define RGX_CR_CORE_REVISION                              (0x0020U)
+#define RGX_CR_CORE_REVISION_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT               (24U)
+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK              (0X00FFFFFFU)
+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT                  (16U)
+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK                 (0XFF00FFFFU)
+#define RGX_CR_CORE_REVISION_MINOR_SHIFT                  (8U)
+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK                 (0XFFFF00FFU)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT            (0U)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK           (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD1
+*/
+#define RGX_CR_DESIGNER_REV_FIELD1                        (0x0028U)
+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (00000000U)
+
+
+/*
+    Register RGX_CR_DESIGNER_REV_FIELD2
+*/
+#define RGX_CR_DESIGNER_REV_FIELD2                        (0x0030U)
+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (00000000U)
+
+
+/*
+    Register RGX_CR_CHANGESET_NUMBER
+*/
+#define RGX_CR_CHANGESET_NUMBER                           (0x0040U)
+#define RGX_CR_CHANGESET_NUMBER_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT    (0U)
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK   (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_CTRL
+*/
+#define RGX_CR_CLK_XTPLUS_CTRL                            (0x0080U)
+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT                 (34U)
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK                (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF                   (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON                    (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO                  (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT                  (32U)
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON                     (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO                   (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT              (30U)
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK             (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON                 (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO               (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT                (28U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF                  (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON                   (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO                 (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT               (26U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF                 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON                  (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO                (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT                (24U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF                  (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON                   (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO                 (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT           (22U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF             (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON              (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO            (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT       (20U)
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF         (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON          (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO        (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT           (18U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF             (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON              (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO            (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT             (16U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF               (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON                (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO              (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_XTPLUS_CTRL_RAST_SHIFT                 (14U)
+#define RGX_CR_CLK_XTPLUS_CTRL_RAST_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_RAST_OFF                   (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_RAST_ON                    (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_XTPLUS_CTRL_RAST_AUTO                  (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_XTPLUS_CTRL_UVB_SHIFT                  (12U)
+#define RGX_CR_CLK_XTPLUS_CTRL_UVB_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_UVB_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_UVB_ON                     (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_XTPLUS_CTRL_UVB_AUTO                   (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GPP_SHIFT                  (10U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GPP_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GPP_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GPP_ON                     (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_XTPLUS_CTRL_GPP_AUTO                   (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_PIPE_SHIFT             (8U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_PIPE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_PIPE_OFF               (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_PIPE_ON                (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_PIPE_AUTO              (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_SHIFT                  (6U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_ON                     (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_XTPLUS_CTRL_VDM_AUTO                   (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_XTPLUS_CTRL_TE3_SHIFT                  (4U)
+#define RGX_CR_CLK_XTPLUS_CTRL_TE3_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_XTPLUS_CTRL_TE3_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TE3_ON                     (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_XTPLUS_CTRL_TE3_AUTO                   (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_XTPLUS_CTRL_VCE_SHIFT                  (2U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VCE_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_XTPLUS_CTRL_VCE_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VCE_ON                     (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_XTPLUS_CTRL_VCE_AUTO                   (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_XTPLUS_CTRL_VBS_SHIFT                  (0U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VBS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_XTPLUS_CTRL_VBS_OFF                    (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VBS_ON                     (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_XTPLUS_CTRL_VBS_AUTO                   (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+    Register RGX_CR_CLK_XTPLUS_STATUS
+*/
+#define RGX_CR_CLK_XTPLUS_STATUS                          (0x0088U)
+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL                 (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT               (7U)
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED               (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING             (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_XTPLUS_STATUS_UVB_SHIFT                (6U)
+#define RGX_CR_CLK_XTPLUS_STATUS_UVB_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_XTPLUS_STATUS_UVB_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_UVB_RUNNING              (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_XTPLUS_STATUS_GPP_SHIFT                (5U)
+#define RGX_CR_CLK_XTPLUS_STATUS_GPP_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_XTPLUS_STATUS_GPP_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_GPP_RUNNING              (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_PIPE_SHIFT           (4U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_PIPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_PIPE_GATED           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_PIPE_RUNNING         (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_SHIFT                (3U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VDM_RUNNING              (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_XTPLUS_STATUS_TE3_SHIFT                (2U)
+#define RGX_CR_CLK_XTPLUS_STATUS_TE3_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_XTPLUS_STATUS_TE3_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_TE3_RUNNING              (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_XTPLUS_STATUS_VCE_SHIFT                (1U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VCE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_XTPLUS_STATUS_VCE_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VCE_RUNNING              (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_XTPLUS_STATUS_VBS_SHIFT                (0U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VBS_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_XTPLUS_STATUS_VBS_GATED                (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VBS_RUNNING              (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET                                 (0x0100U)
+#define RGX_CR_SOFT_RESET_MASKFULL                        (IMG_UINT64_C(0xFFE7FFFFFFFFFC1D))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT             (63U)
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK            (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_EN                (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT             (62U)
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK            (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_EN                (IMG_UINT64_C(0X4000000000000000))
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_SHIFT             (61U)
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK            (IMG_UINT64_C(0XDFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO2_CORE_EN                (IMG_UINT64_C(0X2000000000000000))
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_SOFT_RESET_JONES_CORE_SHIFT                (60U)
+#define RGX_CR_SOFT_RESET_JONES_CORE_CLRMSK               (IMG_UINT64_C(0XEFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_JONES_CORE_EN                   (IMG_UINT64_C(0X1000000000000000))
+#define RGX_CR_SOFT_RESET_TILING_CORE_SHIFT               (59U)
+#define RGX_CR_SOFT_RESET_TILING_CORE_CLRMSK              (IMG_UINT64_C(0XF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TILING_CORE_EN                  (IMG_UINT64_C(0X0800000000000000))
+#define RGX_CR_SOFT_RESET_TE3_SHIFT                       (58U)
+#define RGX_CR_SOFT_RESET_TE3_CLRMSK                      (IMG_UINT64_C(0XFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TE3_EN                          (IMG_UINT64_C(0X0400000000000000))
+#define RGX_CR_SOFT_RESET_VCE_SHIFT                       (57U)
+#define RGX_CR_SOFT_RESET_VCE_CLRMSK                      (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VCE_EN                          (IMG_UINT64_C(0X0200000000000000))
+#define RGX_CR_SOFT_RESET_VBS_SHIFT                       (56U)
+#define RGX_CR_SOFT_RESET_VBS_CLRMSK                      (IMG_UINT64_C(0XFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VBS_EN                          (IMG_UINT64_C(0X0100000000000000))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT                 (55U)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK                (IMG_UINT64_C(0XFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN                    (IMG_UINT64_C(0X0080000000000000))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT                 (54U)
+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK                (IMG_UINT64_C(0XFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN                    (IMG_UINT64_C(0X0040000000000000))
+#define RGX_CR_SOFT_RESET_FBA_SHIFT                       (53U)
+#define RGX_CR_SOFT_RESET_FBA_CLRMSK                      (IMG_UINT64_C(0XFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBA_EN                          (IMG_UINT64_C(0X0020000000000000))
+#define RGX_CR_SOFT_RESET_SH_SHIFT                        (50U)
+#define RGX_CR_SOFT_RESET_SH_CLRMSK                       (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SH_EN                           (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_SOFT_RESET_VRDM_SHIFT                      (49U)
+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK                     (IMG_UINT64_C(0XFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VRDM_EN                         (IMG_UINT64_C(0X0002000000000000))
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT                  (48U)
+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK                 (IMG_UINT64_C(0XFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN                     (IMG_UINT64_C(0X0001000000000000))
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT             (47U)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK            (IMG_UINT64_C(0XFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN                (IMG_UINT64_C(0X0000800000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT             (46U)
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK            (IMG_UINT64_C(0XFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN                (IMG_UINT64_C(0X0000400000000000))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT             (45U)
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK            (IMG_UINT64_C(0XFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN                (IMG_UINT64_C(0X0000200000000000))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT             (44U)
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK            (IMG_UINT64_C(0XFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN                (IMG_UINT64_C(0X0000100000000000))
+#define RGX_CR_SOFT_RESET_IPP_SHIFT                       (43U)
+#define RGX_CR_SOFT_RESET_IPP_CLRMSK                      (IMG_UINT64_C(0XFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_IPP_EN                          (IMG_UINT64_C(0X0000080000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT                 (42U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK                (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN                    (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT              (41U)
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK             (IMG_UINT64_C(0XFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN                 (IMG_UINT64_C(0X0000020000000000))
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT               (40U)
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN                  (IMG_UINT64_C(0X0000010000000000))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT               (39U)
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN                  (IMG_UINT64_C(0X0000008000000000))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT               (38U)
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN                  (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT               (37U)
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN                  (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT               (36U)
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN                  (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT               (35U)
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN                  (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_SOFT_RESET_MMU_SHIFT                       (34U)
+#define RGX_CR_SOFT_RESET_MMU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MMU_EN                          (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_SOFT_RESET_BIF1_SHIFT                      (33U)
+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF1_EN                         (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT                    (32U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN                       (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT               (31U)
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN                  (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT               (30U)
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN                  (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT               (29U)
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN                  (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT                (28U)
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN                   (IMG_UINT64_C(0X0000000010000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT                       (27U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN                          (IMG_UINT64_C(0X0000000008000000))
+#define RGX_CR_SOFT_RESET_TLA_SHIFT                       (26U)
+#define RGX_CR_SOFT_RESET_TLA_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_SOFT_RESET_TLA_EN                          (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_SOFT_RESET_UVS_SHIFT                       (25U)
+#define RGX_CR_SOFT_RESET_UVS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_SOFT_RESET_UVS_EN                          (IMG_UINT64_C(0X0000000002000000))
+#define RGX_CR_SOFT_RESET_TE_SHIFT                        (24U)
+#define RGX_CR_SOFT_RESET_TE_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_SOFT_RESET_TE_EN                           (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_SOFT_RESET_GPP_SHIFT                       (23U)
+#define RGX_CR_SOFT_RESET_GPP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_SOFT_RESET_GPP_EN                          (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT                      (22U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN                         (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT                       (21U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN                          (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT                        (20U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN                           (IMG_UINT64_C(0X0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT                       (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN                          (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT                (18U)
+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_SOFT_RESET_USC_SHARED_EN                   (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT                    (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN                       (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_SOFT_RESET_BIF_SHIFT                       (16U)
+#define RGX_CR_SOFT_RESET_BIF_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_BIF_EN                          (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_SOFT_RESET_CDM_SHIFT                       (15U)
+#define RGX_CR_SOFT_RESET_CDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_CDM_EN                          (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_SOFT_RESET_VDM_SHIFT                       (14U)
+#define RGX_CR_SOFT_RESET_VDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_VDM_EN                          (IMG_UINT64_C(0X0000000000004000))
+#if defined(RGX_FEATURE_TESSELLATION)
+#define RGX_CR_SOFT_RESET_TESS_SHIFT                      (13U)
+#define RGX_CR_SOFT_RESET_TESS_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_SOFT_RESET_TESS_EN                         (IMG_UINT64_C(0X0000000000002000))
+#endif /* RGX_FEATURE_TESSELLATION */
+
+#define RGX_CR_SOFT_RESET_PDS_SHIFT                       (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN                          (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT                       (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN                          (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_SOFT_RESET_TSP_SHIFT                       (10U)
+#define RGX_CR_SOFT_RESET_TSP_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TSP_EN                          (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT             (4U)
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN                (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT                    (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN                       (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT                       (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN                          (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SOFT_RESET_USC_SHIFT                       (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN                          (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_SOFT_RESET2
+*/
+#define RGX_CR_SOFT_RESET2                                (0x0108U)
+#define RGX_CR_SOFT_RESET2_MASKFULL                       (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT                     (10U)
+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK                    (0XFFFFFBFFU)
+#define RGX_CR_SOFT_RESET2_ASTC_EN                        (0X00000400U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT               (9U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK              (0XFFFFFDFFU)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN                  (0X00000200U)
+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT                    (8U)
+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK                   (0XFFFFFEFFU)
+#define RGX_CR_SOFT_RESET2_USCPS_EN                       (0X00000100U)
+#define RGX_CR_SOFT_RESET2_IPF_SHIFT                      (7U)
+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK                     (0XFFFFFF7FU)
+#define RGX_CR_SOFT_RESET2_IPF_EN                         (0X00000080U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT                 (6U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK                (0XFFFFFFBFU)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN                    (0X00000040U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT               (5U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK              (0XFFFFFFDFU)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN                  (0X00000020U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT               (4U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK              (0XFFFFFFEFU)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN                  (0X00000010U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT           (3U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK          (0XFFFFFFF7U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN              (0X00000008U)
+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT                    (2U)
+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK                   (0XFFFFFFFBU)
+#define RGX_CR_SOFT_RESET2_PIXEL_EN                       (0X00000004U)
+#define RGX_CR_SOFT_RESET2_COMPUTE_SHIFT                  (1U)
+#define RGX_CR_SOFT_RESET2_COMPUTE_CLRMSK                 (0XFFFFFFFDU)
+#define RGX_CR_SOFT_RESET2_COMPUTE_EN                     (0X00000002U)
+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT                   (0U)
+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK                  (0XFFFFFFFEU)
+#define RGX_CR_SOFT_RESET2_VERTEX_EN                      (0X00000001U)
+
+
+/*
+    Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS                               (0x0130U)
+#define RGX_CR_EVENT_STATUS_MASKFULL                      (IMG_UINT64_C(0x000000001FFEFFFF))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT       (28U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK      (0XEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN          (0X10000000U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT      (27U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK     (0XF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN         (0X08000000U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT       (26U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK      (0XFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN          (0X04000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT        (25U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK       (0XFDFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN           (0X02000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT        (24U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK       (0XFEFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN           (0X01000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT        (23U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK       (0XFF7FFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN           (0X00800000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT        (22U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK       (0XFFBFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN           (0X00400000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT        (21U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK       (0XFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN           (0X00200000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT        (20U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK       (0XFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN           (0X00100000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT        (19U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK       (0XFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN           (0X00080000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT        (18U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK       (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN           (0X00040000U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT            (17U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK           (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN               (0X00020000U)
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT             (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK            (0XFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN                (0X00008000U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT            (14U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK           (0XFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN               (0X00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT                (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK               (0XFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN                   (0X00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT                (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK               (0XFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN                   (0X00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT             (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK            (0XFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN                (0X00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT          (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK         (0XFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN             (0X00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT          (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK         (0XFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN             (0X00000200U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT          (8U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK         (0XFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN             (0X00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT        (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK       (0XFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN           (0X00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT            (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK           (0XFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN               (0X00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT             (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK            (0XFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN                (0X00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT       (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK      (0XFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN          (0X00000010U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT      (3U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK     (0XFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN         (0X00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT        (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK       (0XFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN           (0X00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT         (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK        (0XFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN            (0X00000002U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT            (0U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER                                      (0x0160U)
+#define RGX_CR_TIMER_MASKFULL                             (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT                          (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK                         (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN                             (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT                          (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK                         (IMG_UINT64_C(0XFFFF000000000000))
+
+
+/*
+    Register RGX_CR_TLA_STATUS
+*/
+#define RGX_CR_TLA_STATUS                                 (0x0178U)
+#define RGX_CR_TLA_STATUS_MASKFULL                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT                (39U)
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK               (IMG_UINT64_C(0X0000007FFFFFFFFF))
+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT                   (7U)
+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK                  (IMG_UINT64_C(0XFFFFFF800000007F))
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT             (1U)
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFF81))
+#define RGX_CR_TLA_STATUS_BUSY_SHIFT                      (0U)
+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_TLA_STATUS_BUSY_EN                         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE                   (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT          (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK         (0XFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN             (0X00000001U)
+
+
+/*
+    Register RGX_CR_SIDEKICK_IDLE
+*/
+#define RGX_CR_SIDEKICK_IDLE                              (0x03C8U)
+#define RGX_CR_SIDEKICK_IDLE_MASKFULL                     (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT                 (6U)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK                (0XFFFFFFBFU)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN                    (0X00000040U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT                    (5U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK                   (0XFFFFFFDFU)
+#define RGX_CR_SIDEKICK_IDLE_MMU_EN                       (0X00000020U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT                 (4U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK                (0XFFFFFFEFU)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN                    (0X00000010U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT                    (3U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK                   (0XFFFFFFF7U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_EN                       (0X00000008U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT                 (2U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK                (0XFFFFFFFBU)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN                    (0X00000004U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT                 (1U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK                (0XFFFFFFFDU)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN                    (0X00000002U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT                  (0U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS                   (0x0430U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x00000000000000F3))
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT   (4U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK  (0XFFFFFF0FU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0X00000002U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0XFFFFFFFEU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0X00000001U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0                    (0x0438U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT   (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK  (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK  (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1                    (0x0440U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT   (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK  (00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_STORE_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2                    (0x0448U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0                   (0x0450U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT  (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1                   (0x0458U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT  (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (00000000U)
+
+
+/*
+    Register RGX_CR_VDM_CONTEXT_RESUME_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2                   (0x0460U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL          (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS                   (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL          (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN    (0X00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT    (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK   (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN       (0X00000001U)
+
+
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0                           (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL                  (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT           (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK          (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE       (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT           (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK          (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT      (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE       (16U)
+
+
+#if defined(RGX_FEATURE_PDS_TEMPSIZE8)
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1                           (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL                  (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT         (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK        (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN            (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT         (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK        (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN            (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT              (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK             (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN                 (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT        (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK       (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT       (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK      (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN          (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT         (12U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK        (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT           (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK          (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT           (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK          (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT               (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK              (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN                  (0X00000001U)
+#endif /* RGX_FEATURE_PDS_TEMPSIZE8 */ 
+
+
+#if !defined(RGX_FEATURE_PDS_TEMPSIZE8)
+/*
+    Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1                           (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL                  (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT         (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK        (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN            (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT         (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK        (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN            (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT              (27U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK             (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN                 (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT        (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK       (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT       (20U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK      (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN          (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT         (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK        (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT           (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK          (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT           (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK          (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT               (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK              (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN                  (0X00000001U)
+#endif /* !defined(RGX_FEATURE_PDS_TEMPSIZE8) */
+
+
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS                          (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL                 (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT          (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK         (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE      (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT          (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT     (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE      (16U)
+
+
+#if defined(RGX_FEATURE_PDS_TEMPSIZE8)
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1                         (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL                (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT       (30U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK      (0XBFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN          (0X40000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT       (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK      (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN          (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT            (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK           (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN               (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT      (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK     (0XF03FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT     (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK    (0XFFDFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN        (0X00200000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT       (12U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK      (0XFFE00FFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT         (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK        (0XFFFFF07FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT         (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK        (0XFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT             (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK            (0XFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN                (0X00000001U)
+#endif /* RGX_FEATURE_PDS_TEMPSIZE8 */ 
+
+
+#if !defined(RGX_FEATURE_PDS_TEMPSIZE8)
+/*
+    Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1                         (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL                (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT       (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK      (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN          (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT       (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK      (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN          (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT            (27U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK           (0XF7FFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN               (0X08000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT      (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK     (0XF81FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT     (20U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK    (0XFFEFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN        (0X00100000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT       (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK      (0XFFF007FFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT         (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK        (0XFFFFF87FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT         (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK        (0XFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT             (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK            (0XFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN                (0X00000001U)
+#endif /* !defined(RGX_FEATURE_PDS_TEMPSIZE8) */
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX                          (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT                          (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0                          (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT               (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK              (0X00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT           (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK          (0XFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN              (0X00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT                 (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK                (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN                    (0X00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1                          (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL                 (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT       (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK      (0X3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT    (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK   (0XDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN       (0X20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT   (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK  (0XEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN      (0X10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT       (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK      (0XFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN          (0X04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT       (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK      (0XFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN          (0X02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT              (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK             (0XFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN                 (0X01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT           (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK          (0XFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT             (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK            (0XFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN                (0X00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT          (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK         (0XFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN             (0X00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT             (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK            (0XFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK        (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT         (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK        (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE                       (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL              (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT           (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK          (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT          (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK         (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK                         (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI                        (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK                         (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI                        (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK                         (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI                        (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK                         (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL                (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT        (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK       (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI                        (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT      (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST                            (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS                      (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT      (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK     (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN         (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT      (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK     (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN         (0X00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE                      (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL             (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT         (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK        (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN            (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT         (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK        (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN            (0X00000004U)
+
+
+/*
+    Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL                       (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT            (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE                               (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL                      (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT                    (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK                   (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER                 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST                     (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT                (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK               (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0                 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1                 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2                 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3                 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT                 (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK                (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX                 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX                (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT                    (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK                   (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED              (00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED                  (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT                      (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK                     (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0                        (00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1                        (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2                        (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3                        (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4                        (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5                        (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6                        (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7                        (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL                     (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1                              (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2                              (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3                              (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4                              (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5                              (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6                              (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7                              (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL                     (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT                   (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK                  (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER                (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST                    (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT               (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK              (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0                (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1                (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2                (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3                (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT                (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK               (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX                (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX               (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT                   (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK                  (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED             (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED                 (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT                     (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK                    (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0                       (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1                       (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2                       (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3                       (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4                       (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5                       (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6                       (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7                       (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL                    (0X0000000FU)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC                 (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC                 (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL        (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT  (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC                (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC                (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*
+    Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG                  (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL         (IMG_UINT64_C(0x000FF0FFFFFFF701))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT (9U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF9FF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (8U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT  (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META   (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS    (IMG_UINT64_C(0x0000000000000001))
+#endif /* RGX_FEATURE_S7_TOP_INFRASTRUCTURE */ 
+
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*
+    Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG                  (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL         (IMG_UINT64_C(0x0000FFFFFFFFF001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT   (40U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK  (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT  (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META   (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS    (IMG_UINT64_C(0x0000000000000001))
+#endif /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) */
+
+
+/*
+    Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX                                 (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL                        (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT          (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK         (0XC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT                    (18U)
+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK                   (0XFFC3FFFFU)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT             (16U)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK            (0XFFFCFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT         (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK        (0XFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX                                  (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL                         (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT                     (10U)
+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK                    (0XFFFFC3FFU)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT              (8U)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK             (0XFFFFFCFFU)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT     (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE                 (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT       (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK      (IMG_UINT64_C(0X00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT       (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK      (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT       (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK      (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT       (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK      (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT       (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT       (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT       (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT       (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS                         (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT            (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK           (00000000U)
+
+
+/*
+    Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT                                  (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL                         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT                       (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK                      (0XFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN                          (0X00000001U)
+
+
+/*
+    Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC                                 (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL                        (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT           (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK          (0XFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN              (0X00000001U)
+
+
+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK                   (0x00000003U)
+/*
+ Top-left to bottom-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR                  (0x00000000U)
+/*
+ Top-right to bottom-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL                  (0x00000001U)
+/*
+ Bottom-left to top-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR                  (0x00000002U)
+/*
+ Bottom-right to top-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL                  (0x00000003U)
+
+
+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK                  (0x00000003U)
+/*
+ Normal render     */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM                  (0x00000000U)
+/*
+ Fast 2D render    */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D               (0x00000002U)
+/*
+ Fast scale render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE            (0x00000003U)
+
+
+/*
+    Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER                                 (0x0F08U)
+#define RGX_CR_ISP_RENDER_MASKFULL                        (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT                    (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK                   (0XFFFFFFEFU)
+#define RGX_CR_ISP_RENDER_RESUME_EN                       (0X00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT                       (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK                      (0XFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR                       (00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL                       (0X00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR                       (0X00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL                       (0X0000000CU)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT                      (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK                     (0XFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM                       (00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D                    (0X00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE                 (0X00000003U)
+
+
+/*
+    Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL                                    (0x0F38U)
+#define RGX_CR_ISP_CTL_MASKFULL                           (IMG_UINT64_C(0x0000000001FFF3FF))
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT          (23U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK         (0XFE7FFFFFU)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9            (00000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10           (0X00800000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL            (0X01000000U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT            (21U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK           (0XFF9FFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT                 (20U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK                (0XFFEFFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN                    (0X00100000U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT           (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK          (0XFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN              (0X00080000U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT     (18U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK    (0XFFFBFFFFU)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN        (0X00040000U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT          (17U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK         (0XFFFDFFFFU)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN             (0X00020000U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT                   (16U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK                  (0XFFFEFFFFU)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN                      (0X00010000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT                  (12U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK                 (0XFFFF0FFFU)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE               (00000000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO               (0X00001000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE             (0X00002000U)
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR              (0X00003000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE              (0X00004000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX               (0X00005000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN             (0X00006000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT             (0X00007000U)
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+
+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT                     (4U)
+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK                    (0XFFFFFC0FU)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT                  (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK                 (0XFFFFFFF0U)
+
+
+/*
+    Register RGX_CR_ISP_STORE0
+*/
+#define RGX_CR_ISP_STORE0                                 (0x1008U)
+#define RGX_CR_ISP_STORE0_MASKFULL                        (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_STORE0_ACTIVE_SHIFT                    (30U)
+#define RGX_CR_ISP_STORE0_ACTIVE_CLRMSK                   (0XBFFFFFFFU)
+#define RGX_CR_ISP_STORE0_ACTIVE_EN                       (0X40000000U)
+#define RGX_CR_ISP_STORE0_EOR_SHIFT                       (29U)
+#define RGX_CR_ISP_STORE0_EOR_CLRMSK                      (0XDFFFFFFFU)
+#define RGX_CR_ISP_STORE0_EOR_EN                          (0X20000000U)
+#define RGX_CR_ISP_STORE0_TILE_LAST_SHIFT                 (28U)
+#define RGX_CR_ISP_STORE0_TILE_LAST_CLRMSK                (0XEFFFFFFFU)
+#define RGX_CR_ISP_STORE0_TILE_LAST_EN                    (0X10000000U)
+#define RGX_CR_ISP_STORE0_MT_SHIFT                        (24U)
+#define RGX_CR_ISP_STORE0_MT_CLRMSK                       (0XF0FFFFFFU)
+#define RGX_CR_ISP_STORE0_TILE_X_SHIFT                    (12U)
+#define RGX_CR_ISP_STORE0_TILE_X_CLRMSK                   (0XFFC00FFFU)
+#define RGX_CR_ISP_STORE0_TILE_Y_SHIFT                    (0U)
+#define RGX_CR_ISP_STORE0_TILE_Y_CLRMSK                   (0XFFFFFC00U)
+
+
+/*
+    Register RGX_CR_ISP_STORE1
+*/
+#define RGX_CR_ISP_STORE1                                 (0x1010U)
+#define RGX_CR_ISP_STORE1_MASKFULL                        (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_STORE1_ACTIVE_SHIFT                    (30U)
+#define RGX_CR_ISP_STORE1_ACTIVE_CLRMSK                   (0XBFFFFFFFU)
+#define RGX_CR_ISP_STORE1_ACTIVE_EN                       (0X40000000U)
+#define RGX_CR_ISP_STORE1_EOR_SHIFT                       (29U)
+#define RGX_CR_ISP_STORE1_EOR_CLRMSK                      (0XDFFFFFFFU)
+#define RGX_CR_ISP_STORE1_EOR_EN                          (0X20000000U)
+#define RGX_CR_ISP_STORE1_TILE_LAST_SHIFT                 (28U)
+#define RGX_CR_ISP_STORE1_TILE_LAST_CLRMSK                (0XEFFFFFFFU)
+#define RGX_CR_ISP_STORE1_TILE_LAST_EN                    (0X10000000U)
+#define RGX_CR_ISP_STORE1_MT_SHIFT                        (24U)
+#define RGX_CR_ISP_STORE1_MT_CLRMSK                       (0XF0FFFFFFU)
+#define RGX_CR_ISP_STORE1_TILE_X_SHIFT                    (12U)
+#define RGX_CR_ISP_STORE1_TILE_X_CLRMSK                   (0XFFC00FFFU)
+#define RGX_CR_ISP_STORE1_TILE_Y_SHIFT                    (0U)
+#define RGX_CR_ISP_STORE1_TILE_Y_CLRMSK                   (0XFFFFFC00U)
+
+
+/*
+    Register RGX_CR_ISP_STORE2
+*/
+#define RGX_CR_ISP_STORE2                                 (0x1018U)
+#define RGX_CR_ISP_STORE2_MASKFULL                        (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_STORE2_ACTIVE_SHIFT                    (30U)
+#define RGX_CR_ISP_STORE2_ACTIVE_CLRMSK                   (0XBFFFFFFFU)
+#define RGX_CR_ISP_STORE2_ACTIVE_EN                       (0X40000000U)
+#define RGX_CR_ISP_STORE2_EOR_SHIFT                       (29U)
+#define RGX_CR_ISP_STORE2_EOR_CLRMSK                      (0XDFFFFFFFU)
+#define RGX_CR_ISP_STORE2_EOR_EN                          (0X20000000U)
+#define RGX_CR_ISP_STORE2_TILE_LAST_SHIFT                 (28U)
+#define RGX_CR_ISP_STORE2_TILE_LAST_CLRMSK                (0XEFFFFFFFU)
+#define RGX_CR_ISP_STORE2_TILE_LAST_EN                    (0X10000000U)
+#define RGX_CR_ISP_STORE2_MT_SHIFT                        (24U)
+#define RGX_CR_ISP_STORE2_MT_CLRMSK                       (0XF0FFFFFFU)
+#define RGX_CR_ISP_STORE2_TILE_X_SHIFT                    (12U)
+#define RGX_CR_ISP_STORE2_TILE_X_CLRMSK                   (0XFFC00FFFU)
+#define RGX_CR_ISP_STORE2_TILE_Y_SHIFT                    (0U)
+#define RGX_CR_ISP_STORE2_TILE_Y_CLRMSK                   (0XFFFFFC00U)
+
+
+/*
+    Register RGX_CR_ISP_RESUME0
+*/
+#define RGX_CR_ISP_RESUME0                                (0x1020U)
+#define RGX_CR_ISP_RESUME0_MASKFULL                       (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_RESUME0_TILE_X_SHIFT                   (12U)
+#define RGX_CR_ISP_RESUME0_TILE_X_CLRMSK                  (0XFFC00FFFU)
+#define RGX_CR_ISP_RESUME0_TILE_Y_SHIFT                   (0U)
+#define RGX_CR_ISP_RESUME0_TILE_Y_CLRMSK                  (0XFFFFFC00U)
+
+
+/*
+    Register RGX_CR_ISP_RESUME1
+*/
+#define RGX_CR_ISP_RESUME1                                (0x1028U)
+#define RGX_CR_ISP_RESUME1_MASKFULL                       (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_RESUME1_TILE_X_SHIFT                   (12U)
+#define RGX_CR_ISP_RESUME1_TILE_X_CLRMSK                  (0XFFC00FFFU)
+#define RGX_CR_ISP_RESUME1_TILE_Y_SHIFT                   (0U)
+#define RGX_CR_ISP_RESUME1_TILE_Y_CLRMSK                  (0XFFFFFC00U)
+
+
+/*
+    Register RGX_CR_ISP_RESUME2
+*/
+#define RGX_CR_ISP_RESUME2                                (0x1030U)
+#define RGX_CR_ISP_RESUME2_MASKFULL                       (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_RESUME2_TILE_X_SHIFT                   (12U)
+#define RGX_CR_ISP_RESUME2_TILE_X_CLRMSK                  (0XFFC00FFFU)
+#define RGX_CR_ISP_RESUME2_TILE_Y_SHIFT                   (0U)
+#define RGX_CR_ISP_RESUME2_TILE_Y_CLRMSK                  (0XFFFFFC00U)
+
+
+/*
+    Register RGX_CR_ISP_STATUS
+*/
+#define RGX_CR_ISP_STATUS                                 (0x1038U)
+#define RGX_CR_ISP_STATUS_MASKFULL                        (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT                 (2U)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK                (0XFFFFFFFBU)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN                    (0X00000004U)
+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT                    (1U)
+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK                   (0XFFFFFFFDU)
+#define RGX_CR_ISP_STATUS_ACTIVE_EN                       (0X00000002U)
+#define RGX_CR_ISP_STATUS_EOR_SHIFT                       (0U)
+#define RGX_CR_ISP_STATUS_EOR_CLRMSK                      (0XFFFFFFFEU)
+#define RGX_CR_ISP_STATUS_EOR_EN                          (0X00000001U)
+
+
+/*
+    Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT                   (8)
+/*
+    Register RGX_CR_BIF_CAT_BASE0
+*/
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE0_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE1
+*/
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+#define RGX_CR_BIF_CAT_BASE1_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE2
+*/
+#define RGX_CR_BIF_CAT_BASE2                              (0x1210U)
+#define RGX_CR_BIF_CAT_BASE2_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE3
+*/
+#define RGX_CR_BIF_CAT_BASE3                              (0x1218U)
+#define RGX_CR_BIF_CAT_BASE3_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE4
+*/
+#define RGX_CR_BIF_CAT_BASE4                              (0x1220U)
+#define RGX_CR_BIF_CAT_BASE4_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE5
+*/
+#define RGX_CR_BIF_CAT_BASE5                              (0x1228U)
+#define RGX_CR_BIF_CAT_BASE5_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE6
+*/
+#define RGX_CR_BIF_CAT_BASE6                              (0x1230U)
+#define RGX_CR_BIF_CAT_BASE6_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE7
+*/
+#define RGX_CR_BIF_CAT_BASE7                              (0x1238U)
+#define RGX_CR_BIF_CAT_BASE7_MASKFULL                     (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT                   (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK                  (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT              (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE               (4096U)
+
+
+/*
+    Register RGX_CR_BIF_CAT_BASE_INDEX
+*/
+#define RGX_CR_BIF_CAT_BASE_INDEX                         (0x1240U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL                (IMG_UINT64_C(0x0007070707070707))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT              (48U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK             (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT               (40U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK              (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT              (32U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK             (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT               (24U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT               (16U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT             (8U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT                (0U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0                       (0x1248U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK           (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN               (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN              (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE0                        (0x1250U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK            (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN                (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0                     (0x1260U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN             (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_VCE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1                       (0x1268U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL              (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT       (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK      (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT            (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK           (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT            (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN               (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT           (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN              (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_TE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE1                        (0x1270U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL               (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT        (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK       (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT             (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK            (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT             (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN                (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT            (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_PM_CAT_BASE_ALIST1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1                     (0x1280U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT          (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN             (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_BIF_MMU_ENTRY_STATUS                       (0x1288U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFF0F3))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT         (12U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK        (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT        (4U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT       (0U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register RGX_CR_BIF_MMU_ENTRY
+*/
+#define RGX_CR_BIF_MMU_ENTRY                              (0x1290U)
+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL                     (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT                 (1U)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK                (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN                    (0X00000002U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT                (0U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK               (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN                   (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL_INVAL
+*/
+#define RGX_CR_BIF_CTRL_INVAL                             (0x12A0U)
+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL                    (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK                 (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN                     (0X00000008U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT                    (2U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK                   (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_INVAL_PC_EN                       (0X00000004U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT                    (1U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK                   (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_INVAL_PD_EN                       (0X00000002U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT                    (0U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK                   (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_INVAL_PT_EN                       (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_CTRL
+*/
+#define RGX_CR_BIF_CTRL                                   (0x12A8U)
+#define RGX_CR_BIF_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT     (7U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK    (0XFFFFFF7FU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN        (0X00000080U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT    (6U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK   (0XFFFFFFBFU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN       (0X00000040U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT              (5U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK             (0XFFFFFFDFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN                 (0X00000020U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT              (4U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK             (0XFFFFFFEFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN                 (0X00000010U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT                  (3U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK                 (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN                     (0X00000008U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT                (2U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK               (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN                   (0X00000004U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT              (1U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK             (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN                 (0X00000002U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT              (0U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK             (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN                 (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS                 (0x12B0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN     (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK    (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN        (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS                 (0x12B8U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS                 (0x12C0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL        (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT  (12U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT  (4U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN     (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT     (0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK    (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN        (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS                 (0x12C8U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL        (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT       (50U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK      (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN          (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT    (44U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK   (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT    (40U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK   (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT   (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_BIF_MMU_STATUS
+*/
+#define RGX_CR_BIF_MMU_STATUS                             (0x12D0U)
+#define RGX_CR_BIF_MMU_STATUS_MASKFULL                    (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT              (28U)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK             (0XEFFFFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN                 (0X10000000U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT               (20U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK              (0XF00FFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT               (12U)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK              (0XFFF00FFFU)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT               (4U)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK              (0XFFFFF00FU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT               (2U)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK              (0XFFFFFFFBU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN                  (0X00000004U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT                (1U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK               (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN                   (0X00000002U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT                  (0U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_BIF_READS_EXT_STATUS
+*/
+#define RGX_CR_BIF_READS_EXT_STATUS                       (0x1320U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK            (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK          (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIF_READS_INT_STATUS
+*/
+#define RGX_CR_BIF_READS_INT_STATUS                       (0x1328U)
+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT             (16U)
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK            (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT           (0U)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK          (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_INT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_INT_STATUS                     (0x1330U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_READS_EXT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_EXT_STATUS                     (0x1338U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT         (0U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register RGX_CR_BIFPM_STATUS_MMU
+*/
+#define RGX_CR_BIFPM_STATUS_MMU                           (0x1350U)
+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT            (0U)
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK           (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_STATUS_MMU
+*/
+#define RGX_CR_BIF_STATUS_MMU                             (0x1358U)
+#define RGX_CR_BIF_STATUS_MMU_MASKFULL                    (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT              (0U)
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK             (0XFFFFFF00U)
+
+
+/*
+    Register RGX_CR_BIF_FAULT_READ
+*/
+#define RGX_CR_BIF_FAULT_READ                             (0x13E0U)
+#define RGX_CR_BIF_FAULT_READ_MASKFULL                    (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT               (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK              (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT          (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE           (16U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS           (0x1430U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL  (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN  (0X00000001U)
+
+
+/*
+    Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS           (0x1438U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL  (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN    (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+    Register RGX_CR_MCU_FENCE
+*/
+#define RGX_CR_MCU_FENCE                                  (0x1740U)
+#define RGX_CR_MCU_FENCE_MASKFULL                         (IMG_UINT64_C(0x000007FFFFFFFFE0))
+#define RGX_CR_MCU_FENCE_DM_SHIFT                         (40U)
+#define RGX_CR_MCU_FENCE_DM_CLRMSK                        (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_MCU_FENCE_DM_VERTEX                        (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MCU_FENCE_DM_PIXEL                         (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MCU_FENCE_DM_COMPUTE                       (IMG_UINT64_C(0x0000020000000000))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX                    (IMG_UINT64_C(0x0000030000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY                           (IMG_UINT64_C(0x0000040000000000))
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_MCU_FENCE_ADDR_SHIFT                       (5U)
+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK                      (IMG_UINT64_C(0XFFFFFF000000001F))
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT                  (5U)
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE                   (32U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC
+*/
+#define RGX_CR_SLC_CTRL_MISC                              (0x3800U)
+#define RGX_CR_SLC_CTRL_MISC_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFF00FF0105))
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT          (32U)
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK         (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT       (16U)
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1   (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT                  (8U)
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN                     (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT  (2U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN     (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT  (0U)
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN     (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_FLUSH_INVAL
+*/
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL                       (0x3818U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL              (IMG_UINT64_C(0x00000000800007FF))
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT            (31U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK           (0X7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN               (0X80000000U)
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT   (10U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK  (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN      (0X00000400U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT          (9U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK         (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN             (0X00000200U)
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT          (8U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK         (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN             (0X00000100U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT          (7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK         (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN             (0X00000080U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT          (6U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK         (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN             (0X00000040U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT    (5U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK   (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN       (0X00000020U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT          (4U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK         (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN             (0X00000010U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT      (3U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK     (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN         (0X00000008U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT        (2U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK       (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN           (0X00000004U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT           (1U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK          (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN              (0X00000002U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT             (0U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK            (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN                (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS0
+*/
+#define RGX_CR_SLC_STATUS0                                (0x3820U)
+#define RGX_CR_SLC_STATUS0_MASKFULL                       (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT      (2U)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK     (0XFFFFFFFBU)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN         (0X00000004U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT            (1U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK           (0XFFFFFFFDU)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN               (0X00000002U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT            (0U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK           (0XFFFFFFFEU)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN               (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_CTRL_BYPASS
+*/
+#define RGX_CR_SLC_CTRL_BYPASS                            (0x3828U)
+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL                   (IMG_UINT64_C(0x000000000FFFFFFF))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT        (27U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK       (0XF7FFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN           (0X08000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT               (26U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK              (0XFBFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN                  (0X04000000U)
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT          (25U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK         (0XFDFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN             (0X02000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT              (24U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK             (0XFEFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN                 (0X01000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT             (23U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK            (0XFF7FFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN                (0X00800000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT              (22U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK             (0XFFBFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN                 (0X00400000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT             (21U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK            (0XFFDFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN                (0X00200000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT               (20U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK              (0XFFEFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN                  (0X00100000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT              (19U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK             (0XFFF7FFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN                 (0X00080000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT              (18U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK             (0XFFFBFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN                 (0X00040000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT              (17U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK             (0XFFFDFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN                 (0X00020000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT           (16U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK          (0XFFFEFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN              (0X00010000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT          (15U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK         (0XFFFF7FFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN             (0X00008000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT              (14U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK             (0XFFFFBFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN                 (0X00004000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT             (13U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK            (0XFFFFDFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN                (0X00002000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT             (12U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK            (0XFFFFEFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN                (0X00001000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT           (11U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK          (0XFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN              (0X00000800U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT           (10U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK          (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN              (0X00000400U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT           (9U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK          (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN              (0X00000200U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT               (8U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK              (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN                  (0X00000100U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT               (7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK              (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN                  (0X00000080U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT               (6U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK              (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN                  (0X00000040U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT         (5U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK        (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN            (0X00000020U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT               (4U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK              (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN                  (0X00000010U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT           (3U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK          (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN              (0X00000008U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT             (2U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK            (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN                (0X00000004U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT                (1U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK               (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN                   (0X00000002U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT                  (0U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK                 (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN                     (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1                                (0x3870U)
+#define RGX_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0x800003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT                   (63U)
+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK                  (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_EN                      (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SLC_STATUS1_READS1_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK                  (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS1_READS0_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE                                   (0x3898U)
+#define RGX_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT                      (7U)
+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK                     (0XFFFFFF7FU)
+#define RGX_CR_SLC_IDLE_IMGBV4_EN                         (0X00000080U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (6U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0XFFFFFFBFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN                    (0X00000040U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT                     (5U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK                    (0XFFFFFFDFU)
+#define RGX_CR_SLC_IDLE_RBOFIFO_EN                        (0X00000020U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT                    (4U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK                   (0XFFFFFFEFU)
+#define RGX_CR_SLC_IDLE_FRC_CONV_EN                       (0X00000010U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT                    (3U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK                   (0XFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_EN                       (0X00000008U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT                    (2U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK                   (0XFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_VXD_CONV_EN                       (0X00000004U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT                   (1U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK                  (0XFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN                      (0X00000002U)
+#define RGX_CR_SLC_IDLE_CBAR_SHIFT                        (0U)
+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK                       (0XFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_CBAR_EN                           (0X00000001U)
+
+
+/*
+    Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2                                (0x3908U)
+#define RGX_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x000003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS2_READS3_SHIFT                   (32U)
+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK                  (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS2_READS2_SHIFT                   (16U)
+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT               (8U)
+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT               (0U)
+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register RGX_CR_SLC_CTRL_MISC2
+*/
+#define RGX_CR_SLC_CTRL_MISC2                             (0x3930U)
+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT         (0U)
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK        (00000000U)
+
+
+/*
+    Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE
+*/
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE                  (0x3938U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT     (0U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK    (0XFFFFFFFEU)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN        (0X00000001U)
+
+
+/*
+    Register RGX_CR_PERF_TA_PHASE
+*/
+#define RGX_CR_PERF_TA_PHASE                              (0x6008U)
+#define RGX_CR_PERF_TA_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_PHASE
+*/
+#define RGX_CR_PERF_3D_PHASE                              (0x6010U)
+#define RGX_CR_PERF_3D_PHASE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_PHASE
+*/
+#define RGX_CR_PERF_COMPUTE_PHASE                         (0x6018U)
+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK            (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_CYCLE
+*/
+#define RGX_CR_PERF_TA_CYCLE                              (0x6020U)
+#define RGX_CR_PERF_TA_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_CYCLE
+*/
+#define RGX_CR_PERF_3D_CYCLE                              (0x6028U)
+#define RGX_CR_PERF_3D_CYCLE_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT                  (0U)
+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK                 (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_COMPUTE_CYCLE
+*/
+#define RGX_CR_PERF_COMPUTE_CYCLE                         (0x6030U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT             (0U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK            (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_TA_OR_3D_CYCLE
+*/
+#define RGX_CR_PERF_TA_OR_3D_CYCLE                        (0x6038U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT            (0U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK           (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_INITIAL_TA_CYCLE
+*/
+#define RGX_CR_PERF_INITIAL_TA_CYCLE                      (0x6040U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL                       (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL                      (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL                       (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL                      (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL                       (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL                      (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL                       (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT           (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL                      (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT          (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_PERF_3D_SPINUP
+*/
+#define RGX_CR_PERF_3D_SPINUP                             (0x6220U)
+#define RGX_CR_PERF_3D_SPINUP_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT                (0U)
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK               (00000000U)
+
+
+/*
+    Register RGX_CR_AXI_ACE_LITE_CONFIGURATION
+*/
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION                 (0x38C0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL        (IMG_UINT64_C(0x0000001FFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC3FFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC3FFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC3FFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register RGX_CR_POWER_ESTIMATE_RESULT
+*/
+#define RGX_CR_POWER_ESTIMATE_RESULT                      (0x6328U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT          (0U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK         (00000000U)
+
+
+/*
+    Register RGX_CR_OCP_REVINFO
+*/
+#define RGX_CR_OCP_REVINFO                                (0x9000U)
+#define RGX_CR_OCP_REVINFO_MASKFULL                       (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT            (33U)
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK           (IMG_UINT64_C(0XFFFFFFF9FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT            (32U)
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK           (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN               (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT                 (0U)
+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register RGX_CR_OCP_SYSCONFIG
+*/
+#define RGX_CR_OCP_SYSCONFIG                              (0x9010U)
+#define RGX_CR_OCP_SYSCONFIG_MASKFULL                     (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT     (10U)
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK    (0XFFFFF3FFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT     (8U)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK    (0XFFFFFCFFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT     (6U)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK    (0XFFFFFF3FU)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT     (4U)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK    (0XFFFFFFCFU)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT           (2U)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK          (0XFFFFFFF3U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT              (0U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK             (0XFFFFFFFCU)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_0                        (0x9020U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_1                        (0x9028U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_RAW_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_2                        (0x9030U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT      (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK     (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN         (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_0                            (0x9038U)
+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN  (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_1                            (0x9040U)
+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQSTATUS_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_2                            (0x9048U)
+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT       (0U)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK      (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN          (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_0
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_0                        (0x9050U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_1
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_1                        (0x9058U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_SET_2
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_2                        (0x9060U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT   (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK  (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN      (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_0
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_0                        (0x9068U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_1
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_1                        (0x9070U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQENABLE_CLR_2
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_2                        (0x9078U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT  (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN     (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_IRQ_EVENT
+*/
+#define RGX_CR_OCP_IRQ_EVENT                              (0x9080U)
+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL                     (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000000200))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN  (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN   (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_CONFIG
+*/
+#define RGX_CR_OCP_DEBUG_CONFIG                           (0x9088U)
+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT                 (0U)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK                (0XFFFFFFFEU)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN                    (0X00000001U)
+
+
+/*
+    Register RGX_CR_OCP_DEBUG_STATUS
+*/
+#define RGX_CR_OCP_DEBUG_STATUS                           (0x9090U)
+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL                  (IMG_UINT64_C(0x001F1F77FFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT    (51U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK   (IMG_UINT64_C(0XFFE7FFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT    (50U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN       (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT    (48U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT    (43U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK   (IMG_UINT64_C(0XFFFFE7FFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT    (42U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN       (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT    (40U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK   (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT        (38U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK       (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN           (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN    (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT        (34U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK       (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN           (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN  (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN    (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT      (31U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN         (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT         (30U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN            (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT      (29U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN         (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT      (27U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFE7FFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT      (26U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN         (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT      (24U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT      (23U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN         (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT         (22U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN            (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT      (21U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN         (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT      (19U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFE7FFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT      (18U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN         (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT      (16U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT      (15U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN         (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT         (14U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN            (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT      (13U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN         (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT      (11U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFE7FF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT      (10U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN         (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT      (8U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT      (7U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN         (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT         (6U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN            (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT      (5U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN         (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT      (3U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT      (2U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN         (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT      (0U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT           (6U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK          (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN              (0X00000040U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT               (5U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK              (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN                  (0X00000020U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT               (4U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK              (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN                  (0X00000010U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT             (3U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK            (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN                (0X00000008U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT              (2U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK             (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN                 (0X00000004U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT             (1U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK            (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN                (0X00000002U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT                (0U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK               (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN                   (0X00000001U)
+
+
+#define RGX_CR_BIF_TRUST_DM_MASK                          (0x0000007FU)
+
+
+/*
+    Register RGX_CR_BIF_TRUST
+*/
+#define RGX_CR_BIF_TRUST                                  (0xA000U)
+#define RGX_CR_BIF_TRUST_MASKFULL                         (IMG_UINT64_C(0x00000000001FFFFF))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN   (0X00100000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT  (19U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN     (0X00080000U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT       (18U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK      (0XFFFBFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN          (0X00040000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT         (17U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK        (0XFFFDFFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN            (0X00020000U)
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT                     (16U)
+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK                    (0XFFFEFFFFU)
+#define RGX_CR_BIF_TRUST_ENABLE_EN                        (0X00010000U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT                 (9U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK                (0XFFFF01FFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT   (8U)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK  (0XFFFFFEFFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN      (0X00000100U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT     (7U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK    (0XFFFFFF7FU)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN        (0X00000080U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT     (6U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK    (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN        (0X00000040U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT     (5U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK    (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN        (0X00000020U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT       (4U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK      (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN          (0X00000010U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT       (3U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK      (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN          (0X00000008U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT    (2U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK   (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN       (0X00000004U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT      (1U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK     (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN         (0X00000002U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT      (0U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK     (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN         (0X00000001U)
+
+
+/*
+    Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE                             (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT                (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK               (0XFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN                   (0X00000001U)
+
+
+/*
+    Register RGX_CR_CLK_CTRL2
+*/
+#define RGX_CR_CLK_CTRL2                                  (0xD200U)
+#define RGX_CR_CLK_CTRL2_MASKFULL                         (IMG_UINT64_C(0x0000000000000F33))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT                   (10U)
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF                     (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON                      (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO                    (IMG_UINT64_C(0x0000000000000800))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT                       (8U)
+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL2_VRDM_OFF                         (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_VRDM_ON                          (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL2_VRDM_AUTO                        (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL2_SH_SHIFT                         (4U)
+#define RGX_CR_CLK_CTRL2_SH_CLRMSK                        (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL2_SH_OFF                           (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_SH_ON                            (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL2_SH_AUTO                          (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL2_FBA_SHIFT                        (0U)
+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL2_FBA_OFF                          (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_FBA_ON                           (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL2_FBA_AUTO                         (IMG_UINT64_C(0x0000000000000002))
+
+
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+/*
+    Register RGX_CR_CLK_STATUS2
+*/
+#define RGX_CR_CLK_STATUS2                                (0xD208U)
+#define RGX_CR_CLK_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000000000000015))
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT                     (4U)
+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK                    (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS2_VRDM_GATED                     (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING                   (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS2_SH_SHIFT                       (2U)
+#define RGX_CR_CLK_STATUS2_SH_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS2_SH_GATED                       (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_SH_RUNNING                     (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS2_FBA_SHIFT                      (0U)
+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS2_FBA_GATED                      (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_FBA_RUNNING                    (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS                  (0xC5C8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL         (IMG_UINT64_C(0x000000000000F775))
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT   (12U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK  (0XFFFF0FFFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT  (8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT  (5U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT   (4U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK  (0XFFFFFFEFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN      (0X00000010U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT      (0U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK     (0XFFFFFFFEU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN         (0X00000001U)
+
+
+/*
+    Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS                  (0xC5D0U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL         (IMG_UINT64_C(0x03FFFFFFFFFFFFF0))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT        (57U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK       (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN           (IMG_UINT64_C(0X0200000000000000))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT     (44U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK    (IMG_UINT64_C(0XFE000FFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT     (40U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK    (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT    (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK   (IMG_UINT64_C(0XFFFFFF000000000F))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS
+*/
+#define RGX_CR_MMU_FAULT_STATUS                           (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL                  (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT             (28U)
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK            (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT             (20U)
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT              (12U)
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT              (6U)
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT               (4U)
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT                 (3U)
+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN                    (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT                (1U)
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT               (0U)
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN                  (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META                      (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (28U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (20U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT         (12U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (6U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register RGX_CR_SLC3_CTRL_MISC
+*/
+#define RGX_CR_SLC3_CTRL_MISC                             (0xE200U)
+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL                    (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT      (0U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK     (0XFFFFFFF8U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR     (00000000U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0X00000001U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0X00000002U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0X00000003U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0X00000004U)
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE
+*/
+#define RGX_CR_SLC3_SCRAMBLE                              (0xE208U)
+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT                   (0U)
+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK                  (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE2
+*/
+#define RGX_CR_SLC3_SCRAMBLE2                             (0xE210U)
+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK                 (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE3
+*/
+#define RGX_CR_SLC3_SCRAMBLE3                             (0xE218U)
+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK                 (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register RGX_CR_SLC3_SCRAMBLE4
+*/
+#define RGX_CR_SLC3_SCRAMBLE4                             (0xE260U)
+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT                  (0U)
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK                 (IMG_UINT64_C(0000000000000000))
+
+
+#endif /* _RGX_CR_DEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgxdefs_km.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgxdefs_km.h
new file mode 100644
index 0000000..83151cf
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgxdefs_km.h
@@ -0,0 +1,175 @@
+/*************************************************************************/ /*!
+@Title          Rogue hw definitions (kernel mode)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXDEFS_KM_H_
+#define _RGXDEFS_KM_H_
+
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+
+#define __IMG_EXPLICIT_INCLUDE_HWDEFS
+#include "rgx_cr_defs_km.h"
+#undef __IMG_EXPLICIT_INCLUDE_HWDEFS
+
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C) 
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define _RGX_BVNC_ST2(S)	#S
+#define _RGX_BVNC_ST(S)		_RGX_BVNC_ST2(S)
+#define RGX_BVNC_KM			_RGX_BVNC_ST(RGX_BVNC_KM_B) "." _RGX_BVNC_ST(RGX_BVNC_KM_V) "." _RGX_BVNC_ST(RGX_BVNC_KM_N) "." _RGX_BVNC_ST(RGX_BVNC_KM_C)
+#define RGX_BVNC_KM_V_ST	_RGX_BVNC_ST(RGX_BVNC_KM_V)
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218   (1)
+#define MTP219   (2)
+#define LTP218   (3)
+#define LTP217   (4)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K      (32*1024)
+#define RGX_META_COREMEM_48K      (48*1024)
+#define RGX_META_COREMEM_64K      (64*1024)
+
+#define RGX_META_COREMEM_SIZE     (RGX_FEATURE_META_COREMEM_SIZE*1024)
+
+#if (RGX_FEATURE_META_COREMEM_SIZE != 0)
+#define RGX_META_COREMEM          (1)
+#define RGX_META_COREMEM_CODE     (1)
+#if !defined(FIX_HW_BRN_50767)
+#define RGX_META_COREMEM_DATA     (1)
+#endif
+#endif
+
+/* ISP requires valid state on all three pipes regardless of the number of
+ * active pipes/tiles in flight.
+ */
+#define RGX_MAX_NUM_PIPES	3
+
+#define ROGUE_CACHE_LINE_SIZE				((RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS)/8)
+
+#define MAX_HW_TA3DCONTEXTS	2
+
+
+/* useful extra defines for clock ctrl*/
+#define RGX_CR_CLK_CTRL_ALL_ON   (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL)
+#define RGX_CR_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL)
+
+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN	(RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN	(RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \
+											 RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN  | \
+                                 RGX_CR_SOFT_RESET_VDM_EN | \
+								 RGX_CR_SOFT_RESET_ISP_EN)
+
+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES  | \
+									 RGX_CR_SOFT_RESET_BIF_EN | \
+                                     RGX_CR_SOFT_RESET_SLC_EN | \
+								     RGX_CR_SOFT_RESET_GARTEN_EN)
+
+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \
+                            RGX_CR_SOFT_RESET2_PIXEL_EN | \
+							RGX_CR_SOFT_RESET2_COMPUTE_EN | \
+							RGX_CR_SOFT_RESET2_VERTEX_EN)
+#endif
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT		(12)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE			(1 << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT		(14)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE			(1 << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+/******************************************************************************
+ * WA HWBRNs
+ *****************************************************************************/
+#if defined(FIX_HW_BRN_36492)
+
+#undef RGX_CR_SOFT_RESET_SLC_EN
+#undef RGX_CR_SOFT_RESET_SLC_CLRMSK
+#undef RGX_CR_SOFT_RESET_SLC_SHIFT
+
+/* Remove the SOFT_RESET_SLC_EN bit from SOFT_RESET_MASKFULL */
+#undef RGX_CR_SOFT_RESET_MASKFULL 
+#define RGX_CR_SOFT_RESET_MASKFULL IMG_UINT64_C(0x000001FFF7FFFC1D)
+
+#endif /* FIX_HW_BRN_36492 */
+
+#define DPX_MAX_RAY_CONTEXTS 4 /* FIXME should this be in dpx file? */
+#define DPX_MAX_FBA_AP 16
+#define DPX_MAX_FBA_FILTER_WIDTH 24
+
+#if !defined(RGX_FEATURE_SLC_SIZE_IN_BYTES)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (RGX_FEATURE_SLC_SIZE_IN_KILOBYTES * 1024)
+#endif
+
+#endif /* _RGXDEFS_KM_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgxmmudefs_km.h b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgxmmudefs_km.h
new file mode 100644
index 0000000..7419db5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/hwdefs/km/rgxmmudefs_km.h
@@ -0,0 +1,395 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file rgxmmudefs_km.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ * Generated by regconv version MAIN@2782796
+ *   from files:
+ *      rogue_bif.def 
+ *      rogue_bif.def 
+ */
+
+
+#ifndef _RGXMMUDEFS_KM_H_
+#define _RGXMMUDEFS_KM_H_
+
+#include "img_types.h"
+
+/*
+
+		Encoding of DM (note value 0x6 not used)
+	
+*/
+#define RGX_BIF_DM_ENCODING_VERTEX                        (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL                         (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE                       (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA                           (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE                        (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE                         (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META                          (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST                          (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST                      (0x00000009U)
+
+
+/*
+
+		Labelling of fields within virtual address
+	
+*/
+/*
+Page Catalogue entry #
+*/
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT                  (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK                 (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+/*
+Page Directory entry #
+*/
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT                  (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFC01FFFFF))
+/*
+Page Table entry #
+*/
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT                  (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFE00FFF))
+
+
+/*
+
+		Number of entries in a PC
+	
+*/
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE                      (0x00000400U)
+
+
+/*
+
+		Number of entries in a PD
+	
+*/
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE                      (0x00000200U)
+
+
+/*
+
+		Number of entries in a PT
+	
+*/
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE                      (0x00000200U)
+
+
+/*
+
+		Size in bits of the PC entries in memory
+	
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE                   (0x00000020U)
+
+
+/*
+
+		Size in bits of the PD entries in memory
+	
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE                   (0x00000040U)
+
+
+/*
+
+		Size in bits of the PT entries in memory
+	
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE                   (0x00000040U)
+
+
+/*
+
+		Encoding of page size field
+	
+*/
+#define RGX_MMUCTRL_PAGE_SIZE_MASK                        (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB                         (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB                        (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB                        (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB                       (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB                         (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB                         (0x00000005U)
+
+
+/*
+
+		Range of bits used for 4KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT                  (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK                 (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+		Range of bits used for 16KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT                 (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK                (IMG_UINT64_C(0XFFFFFF0000003FFF))
+
+
+/*
+
+		Range of bits used for 64KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT                 (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK                (IMG_UINT64_C(0XFFFFFF000000FFFF))
+
+
+/*
+
+		Range of bits used for 256KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT                (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK               (IMG_UINT64_C(0XFFFFFF000003FFFF))
+
+
+/*
+
+		Range of bits used for 1MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT                  (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK                 (IMG_UINT64_C(0XFFFFFF00000FFFFF))
+
+
+/*
+
+		Range of bits used for 2MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT                  (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK                 (IMG_UINT64_C(0XFFFFFF00001FFFFF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 4KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT               (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK              (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 16KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT              (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK             (IMG_UINT64_C(0XFFFFFF00000003FF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 64KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT              (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK             (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+
+		Range of bits used for PT Base Address for 256KB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT             (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK            (IMG_UINT64_C(0XFFFFFF000000003F))
+
+
+/*
+
+		Range of bits used for PT Base Address for 1MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK              (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+		Range of bits used for PT Base Address for 2MB Physical Page
+	
+*/
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT               (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK              (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+		Format of Page Table data
+	
+*/
+/*
+PM/Meta protect bit
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT         (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK        (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN            (IMG_UINT64_C(0X4000000000000000))
+/*
+Upper part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT              (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK             (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+/*
+Physical page address
+*/
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT                    (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK                   (IMG_UINT64_C(0XFFFFFF0000000FFF))
+/*
+Lower part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT              (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT           (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0X0000000000000020))
+/*
+PM Src
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT                  (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN                     (IMG_UINT64_C(0X0000000000000010))
+/*
+SLC Bypass Ctrl
+*/
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT         (3U)
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN            (IMG_UINT64_C(0X0000000000000008))
+/*
+Cache Coherency bit
+*/
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT                      (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN                         (IMG_UINT64_C(0X0000000000000004))
+/*
+Read only
+*/
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT               (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN                  (IMG_UINT64_C(0X0000000000000002))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN                      (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+		Format of Page Directory data
+	
+*/
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT           (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK          (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN              (IMG_UINT64_C(0X0000010000000000))
+/*
+Page Table base address
+*/
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT                 (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK                (IMG_UINT64_C(0XFFFFFF000000001F))
+/*
+Page Size
+*/
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT               (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB                 (IMG_UINT64_C(0000000000000000))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB                (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB                (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB               (IMG_UINT64_C(0x0000000000000006))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB                 (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB                 (IMG_UINT64_C(0x000000000000000a))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN                      (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+		Format of Page Catalogue data
+	
+*/
+/*
+Page Catalogue base address
+*/
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT                 (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK                (0X0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT            (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE             (4096U)
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT           (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK          (0XFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN              (0X00000002U)
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT                   (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK                  (0XFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN                      (0X00000001U)
+
+
+#endif /* _RGXMMUDEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/adf/adf_ext.h b/drivers/external_drivers/intel_media/graphics/rgx/include/adf/adf_ext.h
new file mode 100644
index 0000000..d730fe0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/adf/adf_ext.h
@@ -0,0 +1,108 @@
+/*************************************************************************/ /*!
+@File           adf_ext.h
+@Title          IMG extension ioctls and ioctl packages for ADF
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+#ifndef __ADF_EXT_H__
+#define __ADF_EXT_H__
+
+#include <drm/drm.h>
+
+#define ADF_BUFFER_TRANSFORM_NONE_EXT		(0 << 0)
+#define ADF_BUFFER_TRANSFORM_FLIP_H_EXT		(1 << 0)
+#define ADF_BUFFER_TRANSFORM_FLIP_V_EXT		(1 << 1)
+#define ADF_BUFFER_TRANSFORM_ROT_90_EXT		(1 << 2)
+#define ADF_BUFFER_TRANSFORM_ROT_180_EXT	((1 << 0) + (1 << 1))
+#define ADF_BUFFER_TRANSFORM_ROT_270_EXT	((1 << 0) + (1 << 1) + (1 << 2))
+
+#define ADF_BUFFER_BLENDING_NONE_EXT		0
+#define ADF_BUFFER_BLENDING_PREMULT_EXT		1
+#define ADF_BUFFER_BLENDING_COVERAGE_EXT	2
+
+struct adf_buffer_config_ext {
+	/* Crop applied to surface (BEFORE transformation) */
+	struct drm_clip_rect	crop;
+
+	/* Region of screen to display surface in (AFTER scaling) */
+	struct drm_clip_rect	display;
+
+	/* Surface rotation / flip / mirror */
+	__u32			transform;
+
+	/* Alpha blending mode e.g. none / premult / coverage */
+	__u32			blend_type;
+
+	/* Plane alpha */
+	__u8			plane_alpha;
+	__u8			reserved[3];
+} __attribute__((packed, aligned(8)));
+
+struct adf_post_ext {
+	__u32	post_id;
+	struct adf_buffer_config_ext bufs_ext[];
+} __attribute__((packed, aligned(8)));
+
+struct adf_validate_config_ext {
+	__u32 n_interfaces;
+	__u32 __user *interfaces;
+
+	__u32 n_bufs;
+
+	struct adf_buffer_config __user *bufs;
+	struct adf_post_ext __user *post_ext;
+} __attribute__((packed, aligned(8)));
+
+/* These shouldn't be stripped by the uapi process in the bionic headers,
+ * but currently are being. Redefine them so the custom ioctl interface is
+ * actually useful.
+ */
+#ifndef ADF_IOCTL_TYPE
+#define ADF_IOCTL_TYPE 'D'
+#endif
+
+#ifndef ADF_IOCTL_NR_CUSTOM
+#define ADF_IOCTL_NR_CUSTOM 128
+#endif
+
+#define ADF_VALIDATE_CONFIG_EXT \
+ _IOW(ADF_IOCTL_TYPE, ADF_IOCTL_NR_CUSTOM + 0, struct adf_validate_config_ext)
+
+#endif /* __ADF_EXT_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/cache_external.h b/drivers/external_drivers/intel_media/graphics/rgx/include/cache_external.h
new file mode 100644
index 0000000..2967f1e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/cache_external.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally
+                and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_EXTERNAL_H_
+#define _CACHE_EXTERNAL_H_
+#include "img_types.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP;
+
+#define PVRSRV_CACHE_OP_NONE		0x0			/*!< No operation */
+#define PVRSRV_CACHE_OP_CLEAN		0x1			/*!< Flush w/o invalidate */
+#define PVRSRV_CACHE_OP_INVALIDATE	0x2			/*!< Invalidate w/o flush */
+#define PVRSRV_CACHE_OP_FLUSH		0x3			/*!< Flush w/ invalidate */
+
+/*
+	If we get multiple cache operations before the operation which will
+	trigger the operation to happen then we need to make sure we do
+	the right thing.
+
+	Note: PVRSRV_CACHE_OP_INVALIDATE should never be passed into here
+*/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SetCacheOp)
+#endif
+static INLINE PVRSRV_CACHE_OP SetCacheOp(PVRSRV_CACHE_OP uiCurrent,
+										 PVRSRV_CACHE_OP uiNew)
+{
+	PVRSRV_CACHE_OP uiRet;
+
+	uiRet = uiCurrent | uiNew;
+	return uiRet;
+}
+
+#endif	/* _CACHE_EXTERNAL_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/dbgdrvif_srv5.h b/drivers/external_drivers/intel_media/graphics/rgx/include/dbgdrvif_srv5.h
new file mode 100644
index 0000000..4f61359
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/dbgdrvif_srv5.h
@@ -0,0 +1,264 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug driver for Services 5
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Debug Driver Interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRVIF_SRV5_
+#define _DBGDRVIF_SRV5_
+
+#if defined(_MSC_VER) 
+#pragma  warning(disable:4200)
+#endif
+
+#if defined(__linux__)
+
+#define FILE_DEVICE_UNKNOWN             0
+#define METHOD_BUFFERED                 0
+#define FILE_ANY_ACCESS                 0
+
+#define CTL_CODE( DeviceType, Function, Method, Access ) (Function) 
+#define MAKEIOCTLINDEX(i)	((i) & 0xFFF)
+
+#else
+
+#include "ioctldef.h"
+
+#endif
+
+
+/*****************************************************************************
+ Stream mode stuff.
+*****************************************************************************/
+#define DEBUG_CAPMODE_FRAMED			0x00000001UL /* Default capture mode, set when streams created */
+#define DEBUG_CAPMODE_CONTINUOUS		0x00000002UL /* Only set in WDDM, streams created with it set to this mode */
+
+#define DEBUG_FLAGS_USE_NONPAGED_MEM	0x00000001UL /* Only set in WDDM */
+#define DEBUG_FLAGS_NO_BUF_EXPANDSION	0x00000002UL
+#define DEBUG_FLAGS_READONLY			0x00000008UL
+#define DEBUG_FLAGS_WRITEONLY			0x00000010UL
+#define DEBUG_FLAGS_CIRCULAR			0x00000020UL
+
+/*****************************************************************************
+ IOCTL values.
+*****************************************************************************/
+/* IOCTL values defined here so that the windows based OS layer of PDump
+   in the server can access the GetServiceTable method.
+ */
+#define DEBUG_SERVICE_IOCTL_BASE		0x800UL
+#define DEBUG_SERVICE_GETSERVICETABLE	CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETSTREAM			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READ				CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETMARKER			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETMARKER			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WAITFOREVENT		CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETFRAME			CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#if defined(__QNXNTO__)
+#define DEBUG_SERVICE_CREATESTREAM		CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_MAX_API			8
+#else
+#define DEBUG_SERVICE_MAX_API			9
+#endif
+
+
+#if defined(_WIN32)
+/*****************************************************************************
+ Debug driver device name
+*****************************************************************************/
+#if defined (DBGDRV_MODULE_NAME)
+#define REGISTRY_PATH_TO_DEBUG_DRIVER \
+	L"\\Registry\\Machine\\System\\CurrentControlSet\\Services\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_DEVICE_NAME				L"\\Device\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_SYMLINK					L"\\DosDevices\\" DBGDRV_MODULE_NAME
+#else
+#error Debug driver name must be specified
+/*
+#define DBGDRV_NT_DEVICE_NAME				L"\\Device\\VLDbgDrv"
+#define DBGDRV_NT_SYMLINK					L"\\DosDevices\\VLDBGDRV"
+*/
+#endif
+
+/* symbolic link name */
+#define DBGDRV_WIN32_DEVICE_NAME			"\\\\.\\VLDBGDRV"
+
+#define DBGDRV_WINCE_DEVICE_NAME			L"DBD1:"
+#endif
+
+#ifdef __GNUC__
+#define DBG_ALIGN(n) __attribute__ ((aligned (n)))
+#else
+#define DBG_ALIGN(n)
+#endif
+
+/* A pointer type which is at least 64 bits wide. The fixed width ensures
+ * consistency in structures between 32 and 64-bit code.
+ * The UM code (be it 32 or 64 bit) can simply write to the native pointer type (pvPtr).
+ * 64-bit KM code must read ui32Ptr if in the case of a 32-bit client, otherwise it can
+ * just read pvPtr if the client is also 64-bit
+ *
+ * ui64Ptr ensures the union is 64-bits wide in a 32-bit client.
+ *
+ * The union is explicitly 64-bit aligned as it was found gcc on x32 only
+ * aligns it to 32-bit, as the ABI permits aligning 64-bit types to a 32-bit
+ * boundary.
+ */
+typedef union
+{
+	/* native pointer type for UM to write to */
+	IMG_VOID *pvPtr;
+	/* the pointer written by a 32-bit client */
+	IMG_UINT32 ui32Ptr;
+	/* force the union width */
+	IMG_UINT64 ui64Ptr;
+} DBG_WIDEPTR DBG_ALIGN(8);
+
+/* Helper macro for dbgdriv (KM) to get the pointer value from the WIDEPTR type,
+ * depending on whether the client is 32 or 64-bit.
+ *
+ * note: double cast is required to avoid
+ * 'cast to pointer from integer of different size' warning.
+ * this is solved by first casting to an integer type.
+ */
+
+#if defined(CONFIG_COMPAT)
+#define WIDEPTR_GET_PTR(p, bCompat) (bCompat ? \
+					(IMG_VOID *) (IMG_UINTPTR_T) (p).ui32Ptr : \
+					(p).pvPtr)
+#else
+#define WIDEPTR_GET_PTR(p, bCompat) (p).pvPtr
+#endif
+
+typedef enum _DBG_EVENT_
+{
+	DBG_EVENT_STREAM_DATA = 1
+} DBG_EVENT;
+
+
+/*****************************************************************************
+ In/Out Structures
+*****************************************************************************/
+#if defined(__QNXNTO__)
+typedef struct _DBG_IN_CREATESTREAM_
+{
+	union
+	{
+		IMG_CHAR *pszName;
+		IMG_UINT64 ui64Name;
+	} u;
+	IMG_UINT32 ui32Pages;
+	IMG_UINT32 ui32CapMode;
+	IMG_UINT32 ui32OutMode;
+}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
+
+typedef struct _DBG_OUT_CREATESTREAM_
+{
+	IMG_HANDLE phInit;
+	IMG_HANDLE phMain;
+	IMG_HANDLE phDeinit;
+} DBG_OUT_CREATESTREAM, *PDBG_OUT_CREATESTREAM;
+#endif
+
+typedef struct _DBG_IN_FINDSTREAM_
+{
+	DBG_WIDEPTR pszName;
+	IMG_BOOL bResetStream;
+}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
+
+#define DEBUG_READ_BUFID_MAIN			0
+#define DEBUG_READ_BUFID_INIT			1
+#define DEBUG_READ_BUFID_DEINIT			2
+
+typedef struct _DBG_IN_READ_
+{
+	DBG_WIDEPTR pui8OutBuffer;
+	IMG_SID hStream;
+	IMG_UINT32 ui32BufID;
+	IMG_UINT32 ui32OutBufferSize;
+} DBG_IN_READ, *PDBG_IN_READ;
+
+typedef struct _DBG_OUT_READ_
+{
+	IMG_UINT32 ui32DataRead;
+	IMG_UINT32 ui32SplitMarker;
+} DBG_OUT_READ, *PDBG_OUT_READ;
+
+typedef struct _DBG_IN_SETMARKER_
+{
+	IMG_SID hStream;
+	IMG_UINT32 ui32Marker;
+} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
+
+/*
+	DBG STREAM abstract types
+*/
+
+typedef struct _DBG_STREAM_CONTROL_* PDBG_STREAM_CONTROL;
+typedef struct _DBG_STREAM_* PDBG_STREAM;
+
+/*
+	Lookup identifiers for the GetState method in the KM service table.
+ */
+#define DBG_GET_STATE_FLAG_IS_READONLY    0x03
+
+
+/*****************************************************************************
+ Kernel mode service table
+*****************************************************************************/
+typedef struct _DBGKM_SERVICE_TABLE_
+{
+	IMG_UINT32 ui32Size;
+	IMG_BOOL	(IMG_CALLCONV *pfnCreateStream)			(IMG_CHAR * pszName,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+	IMG_VOID 	(IMG_CALLCONV *pfnDestroyStream)		(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+	IMG_UINT32	(IMG_CALLCONV *pfnDBGDrivWrite2)		(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+	IMG_VOID 	(IMG_CALLCONV *pfnSetMarker)			(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+	IMG_VOID 	(IMG_CALLCONV *pfnWaitForEvent)			(DBG_EVENT eEvent);
+	IMG_UINT32  (IMG_CALLCONV *pfnGetCtrlState)			(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+	IMG_VOID 	(IMG_CALLCONV *pfnSetFrame)				(IMG_UINT32 ui32Frame);
+} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
+
+#if defined(_MSC_VER) 
+#pragma  warning(default:4200)
+#endif
+
+#endif
+
+/*****************************************************************************
+ End of file
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/dc_external.h b/drivers/external_drivers/intel_media/graphics/rgx/include/dc_external.h
new file mode 100644
index 0000000..dd64225
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/dc_external.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device class external
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines DC specific structures which are externally visible
+                (i.e. visible to clients of services), but are also required
+                within services.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DC_EXTERNAL_H_
+#define _DC_EXTERNAL_H_
+
+#include "img_types.h"
+
+#define DC_NAME_SIZE	50
+typedef struct _DC_DISPLAY_INFO_
+{
+	IMG_CHAR		szDisplayName[DC_NAME_SIZE];
+	IMG_UINT32		ui32MinDisplayPeriod;
+	IMG_UINT32		ui32MaxDisplayPeriod;
+	IMG_UINT32		ui32MaxPipes;
+	IMG_BOOL		bUnlatchedSupported;
+} DC_DISPLAY_INFO;
+
+typedef struct _DC_BUFFER_IMPORT_INFO_
+{
+	IMG_UINT32		ePixFormat;
+	IMG_UINT32		ui32BPP;
+	IMG_UINT32		ui32Width[3];
+	IMG_UINT32		ui32Height[3];
+	IMG_UINT32		ui32ByteStride[3];
+	IMG_UINT32		ui32PrivData[3];
+} DC_BUFFER_IMPORT_INFO;
+
+#endif /* _DC_EXTERNAL_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/devicemem_typedefs.h b/drivers/external_drivers/intel_media/graphics/rgx/include/devicemem_typedefs.h
new file mode 100644
index 0000000..561d75f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/devicemem_typedefs.h
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of device memory management -- this file
+                is forked from new_devmem_allocation.h as this one has to
+                reside in the top level include so that client code is able
+                to make use of the typedefs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_TYPEDEFS_H
+#define DEVICEMEM_TYPEDEFS_H
+
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+
+typedef struct _DEVMEM_CONTEXT_ DEVMEM_CONTEXT;     /*!< Convenience typedef for struct _DEVMEM_CONTEXT_ */
+typedef struct _DEVMEM_HEAP_ DEVMEM_HEAP;           /*!< Convenience typedef for struct _DEVMEM_HEAP_ */
+typedef struct _DEVMEM_MEMDESC_ DEVMEM_MEMDESC;     /*!< Convenience typedef for struct _DEVMEM_MEMDESC_ */
+typedef struct _DEVMEM_PAGELIST_ DEVMEM_PAGELIST;	/*!< Convenience typedef for struct _DEVMEM_PAGELIST_ */
+typedef PVRSRV_MEMALLOCFLAGS_T DEVMEM_FLAGS_T;      /*!< Conveneince typedef for PVRSRV_MEMALLOCFLAGS_T */
+
+typedef IMG_HANDLE /* FIXME: should be a SID */ DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */
+typedef IMG_UINT64 DEVMEM_EXPORTKEY;                                /*!< Typedef for DeviceMem Export Key */
+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T;                            /*!< Typedef for DeviceMem SIZE_T */
+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T;                  /*!< Typdef for DeviceMem LOG2 Alignment */
+
+/*! calling code needs all the info in this struct, to be able to pass it around */
+typedef struct
+{
+    /*! A handle to the PMR.  Should be a SID.  FIXME: decide whether
+       this is right... as the PMR would have to be a cross-process
+       handle */
+    IMG_HANDLE hPMRExportHandle;
+    /*! The "key" to prove we have authorization to use this PMR */
+    IMG_UINT64 uiPMRExportPassword;
+    /*! Size and alignment properties for this PMR.  Note, these
+       numbers are not trusted in kernel, but we need to cache them
+       client-side in order to allocate from the VM arena.  The kernel
+       will know the actual alignment and size of the PMR and thus
+       would prevent client code from breaching security here.  Ditto
+       for physmem granularity (aka page size) if this is different
+       from alignment */
+    IMG_DEVMEM_SIZE_T uiSize;
+    /*! We call this "contiguity guarantee" to be more precise than
+       calling it "alignment" or "page size", terms which may seem
+       similar but have different emphasis.  The number reported here
+       is the minimum contiguity guarantee from the creator of the
+       PMR.  Now, there is no requirement to allocate that coarsely
+       from the RA.  The alignment given to the RA simply needs to be
+       at least as coarse as the device page size for the heap we
+       ultimately intend to map into.  What is important is that the
+       device MMU data page size is not greater than the minimum
+       contiguity guarantee from the PMR.  This value is reported to
+       the client in order that it can choose to make early checks and
+       perhaps decide which heap (in a variable page size scenario) it
+       would be safe to map this PMR into.  For convenience, the
+       client may choose to use this argument as the alignment of the
+       virtual range he chooses to allocate, but this is _not_
+       necessary and in many cases would be able to get away with a
+       finer alignment, should the heap into which this PMR will be
+       mapped support it. */
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+} DEVMEM_EXPORTCOOKIE;
+
+typedef IMG_HANDLE DEVMEM_SERVER_EXPORTCOOKIE; /*!< typedef for DeviceMem Server Export Cookie */
+
+#endif /* #ifndef SRVCLIENT_NEW_DEVMEM_ALLOCATION_TYPEDEFS_H */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/fbc_types.h b/drivers/external_drivers/intel_media/graphics/rgx/include/fbc_types.h
new file mode 100644
index 0000000..28be347
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/fbc_types.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title          Frame buffer compression definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _FBC_TYPES_H_
+#define	_FBC_TYPES_H_
+
+/**
+ * Types of framebuffer compression available.
+ */
+typedef enum _FB_COMPRESSION_
+{
+	FB_COMPRESSION_NONE,
+	FB_COMPRESSION_DIRECT_8x8,
+	FB_COMPRESSION_DIRECT_16x4,
+	FB_COMPRESSION_DIRECT_32x2,
+	FB_COMPRESSION_INDIRECT_8x8,
+	FB_COMPRESSION_INDIRECT_16x4,
+	FB_COMPRESSION_INDIRECT_4TILE_8x8,
+	FB_COMPRESSION_INDIRECT_4TILE_16x4
+} FB_COMPRESSION;
+
+#endif	/* _FBC_TYPES_H_ */
+
+/* EOF */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/img_3dtypes.h b/drivers/external_drivers/intel_media/graphics/rgx/include/img_3dtypes.h
new file mode 100644
index 0000000..d7ad113
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/img_3dtypes.h
@@ -0,0 +1,236 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global 3D types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines 3D types for use by IMG APIs
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_3DTYPES_H__
+#define __IMG_3DTYPES_H__
+
+/**
+ * Comparison functions
+ * This comparison function is defined as:
+ * A {CmpFunc} B
+ * A is a reference value, e.g., incoming depth etc.
+ * B is the sample value, e.g., value in depth buffer. 
+ */
+typedef enum _IMG_COMPFUNC_
+{
+	IMG_COMPFUNC_NEVER,			/**< The comparison never succeeds */
+	IMG_COMPFUNC_LESS,			/**< The comparison is a less-than operation */
+	IMG_COMPFUNC_EQUAL,			/**< The comparison is an equal-to operation */
+	IMG_COMPFUNC_LESS_EQUAL,	/**< The comparison is a less-than or equal-to 
+									 operation */
+	IMG_COMPFUNC_GREATER,		/**< The comparison is a greater-than operation 
+								*/
+	IMG_COMPFUNC_NOT_EQUAL,		/**< The comparison is a no-equal-to operation
+								*/
+	IMG_COMPFUNC_GREATER_EQUAL,	/**< The comparison is a greater-than or 
+									 equal-to operation */
+	IMG_COMPFUNC_ALWAYS,		/**< The comparison always succeeds */
+} IMG_COMPFUNC;
+
+/**
+ * Stencil op functions
+ */
+typedef enum _IMG_STENCILOP_
+{
+	IMG_STENCILOP_KEEP,		/**< Keep original value */
+	IMG_STENCILOP_ZERO,		/**< Set stencil to 0 */
+	IMG_STENCILOP_REPLACE,	/**< Replace stencil entry */
+	IMG_STENCILOP_INCR_SAT,	/**< Increment stencil entry, clamping to max */
+	IMG_STENCILOP_DECR_SAT,	/**< Decrement stencil entry, clamping to zero */
+	IMG_STENCILOP_INVERT,	/**< Invert bits in stencil entry */
+	IMG_STENCILOP_INCR,		/**< Increment stencil entry, 
+								 wrapping if necessary */
+	IMG_STENCILOP_DECR,		/**< Decrement stencil entry, 
+								 wrapping if necessary */
+} IMG_STENCILOP;
+
+/**
+ * Memory layout enumeration.
+ * Defines how pixels are layed out within a surface.
+ */
+typedef enum _IMG_MEMLAYOUT_
+{
+	IMG_MEMLAYOUT_STRIDED,			/**< Resource is strided, one row at a time */
+    IMG_MEMLAYOUT_TWIDDLED,			/**< Resource is 2D twiddled, classic style */
+    IMG_MEMLAYOUT_3DTWIDDLED,       /**< Resource is 3D twiddled, classic style */
+	IMG_MEMLAYOUT_TILED,			/**< Resource is tiled, tiling config specified elsewhere. */
+	IMG_MEMLAYOUT_PAGETILED,		/**< Resource is pagetiled */
+} IMG_MEMLAYOUT;
+
+/**
+ * Alpha blending allows colours and textures on one surface
+ * to be blended with transparancy onto another surface.
+ * These definitions apply to both source and destination blending
+ * states
+ */
+typedef enum _IMG_BLEND_
+{
+	IMG_BLEND_ZERO = 0,        /**< Blend factor is (0,0,0,0) */
+	IMG_BLEND_ONE,             /**< Blend factor is (1,1,1,1) */
+	IMG_BLEND_SRC_COLOUR,      /**< Blend factor is the source colour */
+	IMG_BLEND_INV_SRC_COLOUR,  /**< Blend factor is the inverted source colour
+									(i.e. 1-src_col) */
+	IMG_BLEND_SRC_ALPHA,       /**< Blend factor is the source alpha */
+	IMG_BLEND_INV_SRC_ALPHA,   /**< Blend factor is the inverted source alpha
+									(i.e. 1-src_alpha) */
+	IMG_BLEND_DEST_ALPHA,      /**< Blend factor is the destination alpha */
+	IMG_BLEND_INV_DEST_ALPHA,  /**< Blend factor is the inverted destination 
+									alpha */
+	IMG_BLEND_DEST_COLOUR,     /**< Blend factor is the destination colour */
+	IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination 
+									colour */
+	IMG_BLEND_SRC_ALPHASAT,    /**< Blend factor is the alpha saturation (the 
+									minimum of (Src alpha, 
+									1 - destination alpha)) */
+	IMG_BLEND_BLEND_FACTOR,    /**< Blend factor is a constant */
+	IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/
+	IMG_BLEND_SRC1_COLOUR,     /**< Blend factor is the colour outputted from 
+									the pixel shader */
+	IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour 
+									outputted from the pixel shader */
+	IMG_BLEND_SRC1_ALPHA,      /**< Blend factor is the alpha outputted from 
+									the pixel shader */
+	IMG_BLEND_INV_SRC1_ALPHA   /**< Blend factor is the inverted alpha
+									outputted from the pixel shader */
+} IMG_BLEND;
+
+/**
+ * The arithmetic operation to perform when blending
+ */
+typedef enum _IMG_BLENDOP_
+{
+	IMG_BLENDOP_ADD = 0,      /**< Result = (Source + Destination) */
+	IMG_BLENDOP_SUBTRACT,     /**< Result = (Source - Destination) */
+	IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */
+	IMG_BLENDOP_MIN,          /**< Result = min (Source, Destination) */
+	IMG_BLENDOP_MAX           /**< Result = max (Source, Destination) */
+} IMG_BLENDOP;
+
+/**
+ * Logical operation to perform when logic ops are enabled
+ */
+typedef enum _IMG_LOGICOP_
+{
+	IMG_LOGICOP_CLEAR = 0,     /**< Result = 0 */
+	IMG_LOGICOP_SET,           /**< Result = -1 */
+	IMG_LOGICOP_COPY,          /**< Result = Source */
+	IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */
+	IMG_LOGICOP_NOOP,          /**< Result = Destination */
+	IMG_LOGICOP_INVERT,        /**< Result = ~Destination */
+	IMG_LOGICOP_AND,           /**< Result = Source & Destination */
+	IMG_LOGICOP_NAND,          /**< Result = ~(Source & Destination) */
+	IMG_LOGICOP_OR,            /**< Result = Source | Destination */
+	IMG_LOGICOP_NOR,           /**< Result = ~(Source | Destination) */
+	IMG_LOGICOP_XOR,           /**< Result = Source ^ Destination */
+	IMG_LOGICOP_EQUIV,         /**< Result = ~(Source ^ Destination) */
+	IMG_LOGICOP_AND_REVERSE,   /**< Result = Source & ~Destination */
+	IMG_LOGICOP_AND_INVERTED,  /**< Result = ~Source & Destination */
+	IMG_LOGICOP_OR_REVERSE,    /**< Result = Source | ~Destination */
+	IMG_LOGICOP_OR_INVERTED    /**< Result = ~Source | Destination */
+} IMG_LOGICOP;
+
+/**
+ * Type of fog blending supported
+ */
+typedef enum _IMG_FOGMODE_
+{
+	IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are
+					   *   based on the value output from the vertex phase */
+	IMG_FOGMODE_LINEAR, /**< Linear interpolation */
+	IMG_FOGMODE_EXP, /**< Exponential */
+	IMG_FOGMODE_EXP2, /**< Exponential squaring */
+} IMG_FOGMODE;
+
+/**
+ * Types of filtering
+ */
+typedef enum _IMG_FILTER_
+{
+	IMG_FILTER_DONTCARE,	/**< Any filtering mode is acceptable */
+	IMG_FILTER_POINT,		/**< Point filtering */
+	IMG_FILTER_LINEAR,		/**< Bi-linear filtering */
+	IMG_FILTER_BICUBIC,		/**< Bi-cubic filtering */
+} IMG_FILTER;
+
+/**
+ * Addressing modes for textures
+ */
+typedef enum _IMG_ADDRESSMODE_
+{
+	IMG_ADDRESSMODE_REPEAT,	/**< Texture repeats conintuously */
+	IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
+	IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
+	IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clampe */
+	IMG_ADDRESSMODE_CLAMPBORDER,
+	IMG_ADDRESSMODE_OGL_CLAMP,
+	IMG_ADDRESSMODE_OVG_TILEFILL,
+	IMG_ADDRESSMODE_DONTCARE,
+} IMG_ADDRESSMODE;
+
+/**
+ * Culling based on winding order of triangle.
+ */
+typedef enum _IMG_CULLMODE_
+{
+	IMG_CULLMODE_NONE,			/**< Don't cull */
+	IMG_CULLMODE_FRONTFACING,	/**< Front facing triangles */
+	IMG_CULLMODE_BACKFACING,	/**< Back facing triangles */
+} IMG_CULLMODE;
+
+/**
+ * Rotation clockwise
+ */
+typedef enum _IMG_ROTATION_
+{
+	IMG_ROTATION_0DEG = 0,
+	IMG_ROTATION_90DEG = 1,
+	IMG_ROTATION_180DEG = 2,
+	IMG_ROTATION_270DEG = 3,
+	IMG_ROTATION_FLIP_Y = 4
+} IMG_ROTATION;
+
+
+#endif /* __IMG_3DTYPES_H__ */
+/******************************************************************************
+ End of file (img_3dtypes.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/img_defs.h b/drivers/external_drivers/intel_media/graphics/rgx/include/img_defs.h
new file mode 100644
index 0000000..a4a3c22
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/img_defs.h
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common header containing type definitions for portability
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Contains variable and structure definitions. Any platform
+                specific types should be defined in this file.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__IMG_DEFS_H__)
+#define __IMG_DEFS_H__
+
+#include <stddef.h>
+
+#include "img_types.h"
+
+#if defined (NO_INLINE_FUNCS)
+	#define	INLINE
+	#define	FORCE_INLINE
+#else
+#if defined (__cplusplus)
+	#define INLINE					inline
+	#define	FORCE_INLINE			inline
+#else
+#if	!defined(INLINE)
+	#define	INLINE					__inline
+#endif
+#if defined(UNDER_WDDM) && defined(_X86_)
+	#define	FORCE_INLINE			__forceinline
+#else
+	#define	FORCE_INLINE			static __inline
+#endif
+#endif
+#endif
+
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define	PVR_UNREFERENCED_PARAMETER(param) ((void)(param))
+#endif
+
+/*! Macro used to check structure size and alignment at compile time. */
+#define BLD_ASSERT(expr, file) _impl_ASSERT_LINE(expr,__LINE__,file)
+#define _impl_JOIN(a,b) a##b
+#define _impl_ASSERT_LINE(expr, line, file) \
+	typedef char _impl_JOIN(build_assertion_failed_##file##_,line)[2*!!(expr)-1];
+
+/*! Macro to calculate the n-byte aligned value from that supplied rounding up.
+ * n must be a power of two.
+ *
+ * Both arguments should be of a type with the same size otherwise the macro may
+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n.
+ */
+#define PVR_ALIGN(_x, _n)   (((_x)+((_n)-1)) & ~((_n)-1))
+
+
+/* The best way to supress unused parameter warnings using GCC is to use a
+ * variable attribute.  Place the unref__ between the type and name of an
+ * unused parameter in a function parameter list, eg `int unref__ var'. This
+ * should only be used in GCC build environments, for example, in files that
+ * compile only on Linux. Other files should use UNREFERENCED_PARAMETER */
+#ifdef __GNUC__
+#define unref__ __attribute__ ((unused))
+#else
+#define unref__
+#endif
+
+#if defined(_WIN32)
+	#define IMG_CALLCONV __stdcall
+	#define IMG_INTERNAL
+	#define	IMG_EXPORT	__declspec(dllexport)
+	#define IMG_RESTRICT __restrict
+	#define C_CALLCONV	__cdecl
+
+	/* IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations match.
+	 * Some compilers require the header to be declared IMPORT, while the implementation is declared EXPORT 
+	 */
+	#define	IMG_IMPORT	IMG_EXPORT
+
+#if defined(UNDER_WDDM)
+	#ifndef	_INC_STDLIB
+			#if defined (UNDER_MSBUILD)
+			_CRTIMP __declspec(noreturn) void __cdecl abort(void);
+		#else
+			_CRTIMP void __cdecl abort(void);
+		#endif
+	#endif
+	#if defined(EXIT_ON_ABORT)
+		#define IMG_ABORT()	exit(1);
+	#else
+		#define IMG_ABORT()	abort();
+	#endif
+//	#define IMG_ABORT()	img_abort()
+#endif /* UNDER_WDDM */
+#else
+	#if defined(LINUX) || defined(__METAG) || defined(__QNXNTO__)
+
+		#define IMG_CALLCONV
+		#define C_CALLCONV
+		#if defined(__linux__) || defined(__QNXNTO__)
+			#define IMG_INTERNAL	__attribute__((visibility("hidden")))
+		#else
+			#define IMG_INTERNAL
+		#endif
+		#define IMG_EXPORT		__attribute__((visibility("default")))
+		#define IMG_IMPORT
+		#define IMG_RESTRICT	__restrict__
+
+	#else
+		#error("define an OS")
+	#endif
+#endif
+
+// Use default definition if not overridden
+#ifndef IMG_ABORT
+	#if defined(EXIT_ON_ABORT)
+		#define IMG_ABORT()	exit(1)
+	#else
+		#define IMG_ABORT()	abort()
+	#endif
+#endif
+
+#if defined(__GNUC__)
+#define IMG_FORMAT_PRINTF(x,y)		__attribute__((format(printf,x,y)))
+#else
+#define IMG_FORMAT_PRINTF(x,y)
+#endif
+
+#if defined(__GNUC__)
+#define IMG_WARN_UNUSED_RESULT		__attribute__((warn_unused_result))
+#else
+#define IMG_WARN_UNUSED_RESULT
+#endif
+
+#if defined(_MSC_VER) || defined(CC_ARM)
+	#define IMG_NORETURN __declspec(noreturn)
+#else
+	#if defined(__GNUC__)
+		#define IMG_NORETURN __attribute__((noreturn))
+	#else
+		#define IMG_NORETURN
+	#endif
+#endif
+
+#define MAX(a,b) 					(((a) > (b)) ? (a) : (b))
+#define MIN(a,b) 					(((a) < (b)) ? (a) : (b))
+
+/* Get a structures address from the address of a member */
+#define IMG_CONTAINER_OF(ptr, type, member) \
+	(type *) ((IMG_UINT8 *) (ptr) - offsetof(type, member))
+
+/* The number of elements in a fixed-sized array, IMGs ARRAY_SIZE macro */
+#define IMG_ARR_NUM_ELEMS(ARR) \
+	(sizeof(ARR) / sizeof((ARR)[0]))
+
+/* To guarantee that __func__ can be used, define it as a macro here if it
+   isn't already provided by the compiler. */
+#if defined(_MSC_VER)
+#define __func__ __FUNCTION__
+#endif
+
+#if defined(__cplusplus)
+/* C++ Specific:
+ * Disallow use of copy and assignment operator within a class.
+ * Should be placed under private. */
+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \
+	C(const C&); \
+	void operator=(const C&)
+#endif
+
+#if defined(SUPPORT_PVR_VALGRIND)
+	#if !defined(__METAG)
+		#include "/usr/include/valgrind/memcheck.h"
+
+		#define VALGRIND_HEADER_PRESENT
+
+		#define VG_MARK_INITIALIZED(pvData,ui32Size)  VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size)
+	#else
+		#define VG_MARK_INITIALIZED(pvData,ui32Size) do { } while(0)
+	#endif
+#else
+
+	#define VG_MARK_INITIALIZED(pvData,ui32Size) do { } while(0)
+#endif
+
+
+#endif /* #if !defined (__IMG_DEFS_H__) */
+/*****************************************************************************
+ End of file (IMG_DEFS.H)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/img_types.h b/drivers/external_drivers/intel_media/graphics/rgx/include/img_types.h
new file mode 100644
index 0000000..54a752c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/img_types.h
@@ -0,0 +1,289 @@
+/*************************************************************************/ /*!
+@File
+@Title          Global types for use by IMG APIs
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines type aliases for use by IMG APIs.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_TYPES_H__
+#define __IMG_TYPES_H__
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* To use C99 types and definitions, there are two special cases we need to
+ * cater for:
+ *
+ * - Visual Studio: in VS2010 or later, some standard headers are available,
+ *   and MSVC has its own built-in sized types. We can define the C99 types
+ *   in terms of these.
+ *
+ * - Linux kernel code: C99 sized types are defined in <linux/types.h>, but
+ *   some other features (like macros for constants or printf format
+ *   strings) are missing, so we need to fill in the gaps ourselves.
+ *
+ * For other cases (userspace code under Linux, Android or Neutrino, or
+ * firmware code), we can include the standard headers.
+ */
+#if defined(_MSC_VER)
+	#include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+	#include "kernel_types.h"
+#elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__)
+	#include <stddef.h>			/* NULL */
+	#include <inttypes.h>		/* intX_t/uintX_t, format specifiers */
+	#include <limits.h>			/* INT_MIN, etc */
+#else
+	#error C99 support not set up for this build
+#endif
+
+/* number of bits in the units returned by sizeof */
+#define IMG_CHAR_BIT CHAR_BIT
+
+typedef unsigned int	IMG_UINT,	*IMG_PUINT;
+typedef int				IMG_INT,	*IMG_PINT;
+
+typedef uint8_t			IMG_UINT8,	*IMG_PUINT8;
+typedef uint8_t			IMG_BYTE,	*IMG_PBYTE;
+typedef int8_t			IMG_INT8,	*IMG_PINT8;
+typedef char			IMG_CHAR,	*IMG_PCHAR;
+typedef IMG_CHAR const				*IMG_PCCHAR;
+
+typedef uint16_t		IMG_UINT16,	*IMG_PUINT16;
+typedef int16_t			IMG_INT16,	*IMG_PINT16;
+typedef uint32_t		IMG_UINT32,	*IMG_PUINT32;
+typedef int32_t			IMG_INT32,	*IMG_PINT32;
+
+typedef uint64_t		IMG_UINT64,	*IMG_PUINT64;
+typedef int64_t			IMG_INT64,	*IMG_PINT64;
+#define IMG_INT64_C(c)	INT64_C(c)
+#define IMG_UINT64_C(c)	UINT64_C(c)
+#define IMG_UINT64_FMTSPECX PRIX64
+#define IMG_UINT64_FMTSPEC PRIu64
+
+#define IMG_UINT16_MAX	UINT16_MAX
+#define IMG_UINT32_MAX	UINT32_MAX
+#define IMG_UINT64_MAX	UINT64_MAX
+
+typedef IMG_UINT16 const* IMG_PCUINT16;
+typedef IMG_INT16 const* IMG_PCINT16;
+typedef IMG_UINT32 const* IMG_PCUINT32;
+typedef IMG_INT32 const* IMG_PCINT32;
+
+/* Linux kernel mode does not use floating point */
+typedef float			IMG_FLOAT,	*IMG_PFLOAT;
+typedef double			IMG_DOUBLE, *IMG_PDOUBLE;
+
+typedef union _IMG_UINT32_FLOAT_
+{
+	IMG_UINT32 ui32;
+	IMG_FLOAT f;
+} IMG_UINT32_FLOAT;
+
+typedef int				IMG_SECURE_TYPE;
+
+typedef	enum tag_img_bool
+{
+	IMG_FALSE		= 0,
+	IMG_TRUE		= 1,
+	IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+typedef IMG_BOOL const* IMG_PCBOOL;
+
+typedef void            IMG_VOID, *IMG_PVOID;
+typedef IMG_VOID const* IMG_PCVOID;
+
+typedef uintptr_t		IMG_UINTPTR_T;
+typedef size_t			IMG_SIZE_T;
+
+#define IMG_SIZE_T_MAX	SIZE_MAX
+
+#if defined(_MSC_VER)
+#define IMG_SIZE_FMTSPEC  "%Iu"
+#define IMG_SIZE_FMTSPECX "%Ix"
+#else
+#define IMG_SIZE_FMTSPEC  "%zu"
+#define IMG_SIZE_FMTSPECX "%zx"
+#endif
+
+typedef IMG_PVOID       IMG_HANDLE;
+
+#define IMG_NULL        NULL
+
+#if defined(LINUX) && defined(__KERNEL__)
+/* prints the function name when used with printk */
+#define IMG_PFN_FMTSPEC "%pf"
+#else
+#define IMG_PFN_FMTSPEC "%p"
+#endif
+
+/* services/stream ID */
+typedef IMG_UINT64      IMG_SID;
+
+/* Process IDs */
+typedef IMG_UINT32      IMG_PID;
+
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ * 
+ */
+
+
+/*
+ *
+ * +------------+    +------------+      +------------+        +------------+
+ * |    CPU     |    |    DEV     |      |    DEV     |        |    DEV     |
+ * +------------+    +------------+      +------------+        +------------+
+ *       |                 |                   |                     |
+ *       | PVOID           |IMG_DEV_VIRTADDR   |IMG_DEV_VIRTADDR     |
+ *       |                 \-------------------/                     |
+ *       |                          |                                |
+ * +------------+             +------------+                         |     
+ * |    MMU     |             |    MMU     |                         |
+ * +------------+             +------------+                         | 
+ *       |                          |                                | 
+ *       |                          |                                |
+ *       |                          |                                |
+ *   +--------+                +---------+                      +--------+
+ *   | Offset |                | (Offset)|                      | Offset |
+ *   +--------+                +---------+                      +--------+    
+ *       |                          |                IMG_DEV_PHYADDR | 
+ *       |                          |                                |
+ *       |                          | IMG_DEV_PHYADDR                |
+ * +---------------------------------------------------------------------+ 
+ * |                         System Address bus                          |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+typedef IMG_PVOID IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct _IMG_DEV_VIRTADDR
+{
+	IMG_UINT64  uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var)		(IMG_UINT64)(var)
+	
+} IMG_DEV_VIRTADDR;
+
+typedef IMG_UINT64 IMG_DEVMEM_SIZE_T;
+typedef IMG_UINT64 IMG_DEVMEM_ALIGN_T;
+typedef IMG_UINT64 IMG_DEVMEM_OFFSET_T;
+typedef IMG_UINT32 IMG_DEVMEM_LOG2ALIGN_T;
+
+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+
+/* cpu physical address */
+typedef struct _IMG_CPU_PHYADDR
+{
+#if defined(UNDER_WDDM)
+	IMG_UINTPTR_T uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(IMG_UINTPTR_T)(var)
+#else
+	IMG_UINT64 uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var)		(IMG_UINT64)(var)
+#endif
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct _IMG_DEV_PHYADDR
+{
+	IMG_UINT64 uiAddr;
+} IMG_DEV_PHYADDR;
+
+/* system physical address */
+typedef struct _IMG_SYS_PHYADDR
+{
+#if defined(UNDER_WDDM)
+	IMG_UINTPTR_T uiAddr;
+#else
+	IMG_UINT64 uiAddr;
+#endif
+} IMG_SYS_PHYADDR;
+
+/* 32-bit device virtual address (e.g. MSVDX) */
+typedef struct _IMG_DEV_VIRTADDR32
+{
+	IMG_UINT32 uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT32(var) (IMG_UINT32)(var)
+} IMG_DEV_VIRTADDR32;
+
+/*
+	rectangle structure
+*/
+typedef struct _IMG_RECT_
+{
+	IMG_INT32	x0;
+	IMG_INT32	y0;
+	IMG_INT32	x1;
+	IMG_INT32	y1;
+}IMG_RECT;
+
+typedef struct _IMG_RECT_16_
+{
+	IMG_INT16	x0;
+	IMG_INT16	y0;
+	IMG_INT16	x1;
+	IMG_INT16	y1;
+}IMG_RECT_16;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#include "img_defs.h"
+
+#endif	/* __IMG_TYPES_H__ */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/imgpixfmts_km.h b/drivers/external_drivers/intel_media/graphics/rgx/include/imgpixfmts_km.h
new file mode 100644
index 0000000..5834034
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/imgpixfmts_km.h
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title          Pixel formats
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/****************************************************************************
+ **
+ ** WARNING: This file is autogenerated - DO NOT EDIT.
+ **
+ ** See fmts_systable.txt to add new formats.
+ ****************************************************************************/
+
+#include "imgyuv.h"
+#if !defined(_IMGPIXFMTS_KM_H_)
+#define _IMGPIXFMTS_KM_H_
+
+typedef enum _IMG_PIXFMT_
+{
+	IMG_PIXFMT_UNKNOWN = 0,
+	IMG_PIXFMT_R8G8B8A8_UNORM = 31,
+	IMG_PIXFMT_R8G8B8X8_UNORM = 36,
+	IMG_PIXFMT_R8_UNORM = 74,
+	IMG_PIXFMT_B5G6R5_UNORM = 82,
+	IMG_PIXFMT_B5G5R5A1_UNORM = 83,
+	IMG_PIXFMT_B5G5R5X1_UNORM = 84,
+	IMG_PIXFMT_B8G8R8A8_UNORM = 85,
+	IMG_PIXFMT_B8G8R8X8_UNORM = 86,
+	IMG_PIXFMT_B4G4R4A4_UNORM = 141,
+	IMG_PIXFMT_UYVY = 167,
+	IMG_PIXFMT_VYUY = 168,
+	IMG_PIXFMT_YUYV = 169,
+	IMG_PIXFMT_YVYU = 170,
+	IMG_PIXFMT_YVU420_2PLANE = 171,
+	IMG_PIXFMT_YUV420_2PLANE = 172,
+	IMG_PIXFMT_YVU420_2PLANE_MACRO_BLOCK = 173,
+	IMG_PIXFMT_YUV420_3PLANE = 174,
+	IMG_PIXFMT_YVU420_3PLANE = 175,
+	IMG_PIXFMT_V8U8Y8A8 = 180,
+	IMG_PIXFMT_UYVY10_422_1PLANE_PACK10_CUST1 = 248,
+} IMG_PIXFMT;
+
+
+
+#endif /* _IMGPIXFMTS_KM_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/imgyuv.h b/drivers/external_drivers/intel_media/graphics/rgx/include/imgyuv.h
new file mode 100644
index 0000000..9359c35
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/imgyuv.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          YUV defines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_IMGYUV_H_)
+#define _IMGYUV_H_
+
+typedef enum
+{
+	IMG_COLORSPACE_BT601_CONFORMANT_RANGE = 1,
+	IMG_COLORSPACE_BT601_FULL_RANGE = 2,
+	IMG_COLORSPACE_BT709_CONFORMANT_RANGE = 3,
+	IMG_COLORSPACE_BT709_FULL_RANGE = 4
+}IMG_YUV_COLORSPACE;
+
+typedef enum
+{
+	IMG_CHROMA_INTERP_ZERO = 1,
+	IMG_CHROMA_INTERP_QUARTER = 2,
+	IMG_CHROMA_INTERP_HALF = 3,
+	IMG_CHROMA_INTERP_THREEQUARTERS = 4
+}IMG_YUV_CHROMA_INTERP;
+
+
+#endif /* _IMGYUV_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/kernel_types.h b/drivers/external_drivers/intel_media/graphics/rgx/include/kernel_types.h
new file mode 100644
index 0000000..b53da19
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/kernel_types.h
@@ -0,0 +1,136 @@
+/*************************************************************************/ /*!
+@Title          C99-compatible types and definitions for Linux kernel code
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+
+/* Limits of specified-width integer types */
+
+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for
+ * earlier kernels. They can be removed once older kernels don't need to be
+ * supported.
+ */
+#ifdef S8_MIN
+	#define INT8_MIN	S8_MIN
+#else
+	#define INT8_MIN	(-128)
+#endif
+
+#ifdef S8_MAX
+	#define INT8_MAX	S8_MAX
+#else
+	#define INT8_MAX	127
+#endif
+
+#ifdef U8_MAX
+	#define UINT8_MAX	U8_MAX
+#else
+	#define UINT8_MAX	0xFF
+#endif
+
+#ifdef S16_MIN
+	#define INT16_MIN	S16_MIN
+#else
+	#define INT16_MIN	(-32768)
+#endif
+
+#ifdef S16_MAX
+	#define INT16_MAX	S16_MAX
+#else
+	#define INT16_MAX	32767
+#endif
+
+#ifdef U16_MAX
+	#define UINT16_MAX	U16_MAX
+#else
+	#define UINT16_MAX	0xFFFF
+#endif
+
+#ifdef S32_MIN
+	#define INT32_MIN	S32_MIN
+#else
+	#define INT32_MIN	(-2147483647 - 1)
+#endif
+
+#ifdef S32_MAX
+	#define INT32_MAX	S32_MAX
+#else
+	#define INT32_MAX	2147483647
+#endif
+
+#ifdef U32_MAX
+	#define UINT32_MAX	U32_MAX
+#else
+	#define UINT32_MAX	0xFFFFFFFF
+#endif
+
+#ifdef S64_MIN
+	#define INT64_MIN	S64_MIN
+#else
+	#define INT64_MIN	(-9223372036854775807LL)
+#endif
+
+#ifdef S64_MAX
+	#define INT64_MAX	S64_MAX
+#else
+	#define INT64_MAX	9223372036854775807LL
+#endif
+
+#ifdef U64_MAX
+	#define UINT64_MAX	U64_MAX
+#else
+	#define UINT64_MAX	0xFFFFFFFFFFFFFFFFULL
+#endif
+
+/* Macros for integer constants */
+#define INT8_C			S8_C
+#define UINT8_C			U8_C
+#define INT16_C			S16_C
+#define UINT16_C		U16_C
+#define INT32_C			S32_C
+#define UINT32_C		U32_C
+#define INT64_C			S64_C
+#define UINT64_C		U64_C
+
+/* Format conversion of integer types <inttypes.h> */
+/* Only define PRIX64 for the moment, as this is the only format macro that
+ * img_types.h needs.
+ */
+#define PRIX64		"llX"
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/lock_types.h b/drivers/external_drivers/intel_media/graphics/rgx/include/lock_types.h
new file mode 100644
index 0000000..8caf861
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/lock_types.h
@@ -0,0 +1,90 @@
+/*************************************************************************/ /*!
+@File           lock_types.h
+@Title          Locking types
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Locking specific enums, defines and structures
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_TYPES_H_
+#define _LOCK_TYPES_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+/* The mutex is defined as a pointer to be compatible with the other code. This
+ * isn't ideal and usually you wouldn't do that in kernel code. */
+typedef struct mutex *POS_LOCK;
+typedef atomic_t ATOMIC_T;
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+typedef struct _OS_LOCK_ *POS_LOCK;
+#if defined(LINUX)
+	typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(__QNXNTO__)
+	typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#elif defined(_WIN32)
+	/*
+	 * Dummy definition. WDDM doesn't use Services, but some headers
+	 * still have to be shared. This is one such case.
+	 */
+	typedef struct _OS_ATOMIC {IMG_INT counter;} ATOMIC_T;
+#else
+	#error "Please type-define an atomic lock for this environment"
+#endif
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+typedef enum
+{
+	LOCK_TYPE_NONE 			= 0x00,
+
+	LOCK_TYPE_MASK			= 0x0F,
+	LOCK_TYPE_PASSIVE		= 0x01,		/* Passive level lock e.g. mutex, system may promote to dispatch */
+	LOCK_TYPE_DISPATCH		= 0x02,		/* Dispatch level lock e.g. spin lock, may be used in ISR/MISR */
+
+	LOCK_TYPE_INSIST_FLAG	= 0x80,		/* When set caller can guarantee lock not used in ISR/MISR */
+	LOCK_TYPE_PASSIVE_ONLY	= LOCK_TYPE_INSIST_FLAG | LOCK_TYPE_PASSIVE
+
+} LOCK_TYPE;
+#endif	/* _LOCK_TYPES_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pdumpdefs.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pdumpdefs.h
new file mode 100644
index 0000000..e6c7f07
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pdumpdefs.h
@@ -0,0 +1,202 @@
+/*************************************************************************/ /*!
+@File
+@Title          PDUMP definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    PDUMP definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PDUMPDEFS_H__)
+#define __PDUMPDEFS_H__
+
+/*! PDump Pixel Format Enumeration */
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+	PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+	PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+//	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
+	PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+	PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+	PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+	PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+	PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+	PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+	PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+	PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+	PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+	PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+	PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+	PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+	PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+	PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+	PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+	PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+	PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49,
+	PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50,
+	PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51,
+	PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56,
+	PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64,
+	PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65,
+	PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66,
+	
+	PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+/*! PDump addrmode */
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT			0
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK			0x000000FF 
+
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT			8
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE		(1 << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT		12
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK			0x000FF000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT				20
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK				0x00F00000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT			24
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT			28
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK			0xF0000000
+
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE			(0 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED		(9 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED		(11 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE				(0 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT		(1 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT		(2 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT		(3 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT		(4 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT		(5 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE	(6 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE	(7 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR					(1 << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE			(1 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED		(2 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2				(3 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+
+/*! PDump Poll Operator */
+typedef enum _PDUMP_POLL_OPERATOR
+{
+	PDUMP_POLL_OPERATOR_EQUAL = 0,
+	PDUMP_POLL_OPERATOR_LESS = 1,
+	PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+	PDUMP_POLL_OPERATOR_GREATER = 3,
+	PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+	PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE			75  /*!< Max length of a pdump log file name */
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE			350 /*!< Max length of a pdump comment */
+
+/*!
+	PDump MMU type
+	(Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
+*/
+typedef enum
+{
+	PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE 	= 1,
+	PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE 	= 2,
+	PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE 	= 3,
+	PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE 	= 4,
+	PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE 	= 5,
+	PDUMP_MMU_TYPE_VARPAGE_40BIT 			= 6,
+	PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE		= 7,
+	PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE		= 8,
+	PDUMP_MMU_TYPE_LAST
+} PDUMP_MMU_TYPE;
+
+#endif /* __PDUMPDEFS_H__ */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_debug.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_debug.h
new file mode 100644
index 0000000..7259a48
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_debug.h
@@ -0,0 +1,476 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Debug Declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_DEBUG_H__
+#define __PVR_DEBUG_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if defined(_MSC_VER)
+#	define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+#else
+#	define MSC_SUPPRESS_4127
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN	(512)   /*!< Max length of a Debug Message */
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL			0x001UL  /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR			0x002UL  /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING			0x004UL  /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE			0x008UL  /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE			0x010UL  /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE		0x020UL  /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC			0x040UL  /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED		0x080UL  /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG			0x100UL  /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_DBGDRV_MESSAGE	0x200UL  /*!< Debug-DbgDrivMessage. Privately used by pvr_debug. */
+#define DBGPRIV_LAST			0x200UL  /*!< Always set to highest mask value. Privately used by pvr_debug. */
+
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+#if defined(__KERNEL__)
+	IMG_IMPORT const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError);
+#	define PVRSRVGETERRORSTRING PVRSRVGetErrorStringKM
+#else
+	IMG_IMPORT const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+#	define PVRSRVGETERRORSTRING PVRSRVGetErrorString
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+
+/* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+#if defined(__KLOCWORK__) 
+  #define PVR_ASSERT(x) do { if (!(x)) abort(); } while (0)
+#else /* ! __KLOCWORKS__ */
+
+#if defined(_WIN32)
+#define PVR_ASSERT(expr) do 									\
+	{															\
+		MSC_SUPPRESS_4127										\
+		if (!(expr))											\
+		{														\
+			PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\
+					  "*** Debug assertion failed!");			\
+			__debugbreak();										\
+		}														\
+	MSC_SUPPRESS_4127											\
+	} while (0)
+
+#else
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/* In Linux kernel mode, use BUG() directly. This produces the correct
+   filename and line number in the panic message. */
+#define PVR_ASSERT(EXPR) do											\
+	{																\
+		if (!(EXPR))												\
+		{															\
+			PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,	\
+							  "Debug assertion failed!");			\
+			BUG();													\
+		}															\
+	} while (0)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+													   IMG_UINT32 ui32Line,
+													   const IMG_CHAR *pszAssertion)
+#if defined(__GNUC__)
+	__attribute__((noreturn))
+#endif
+	;
+
+#if defined(_MSC_VER)
+/* This alternate definition is for MSVC, which warns about do {} while (0) */
+#define PVR_ASSERT(EXPR)    MSC_SUPPRESS_4127										\
+							if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__)
+#else
+#define PVR_ASSERT(EXPR) do										\
+	{															\
+		if (!(EXPR))											\
+			PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR);	\
+	} while (0)
+#endif
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+#endif /* __KLOCWORKS__ */
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT)*/
+
+#if defined(__KLOCWORK__)
+	#define PVR_DBG_BREAK do { abort(); } while (0)
+#else
+	#if defined (WIN32)
+		#define PVR_DBG_BREAK __debugbreak();   /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */
+	#else
+		#if defined(PVR_DBG_BREAK_ASSERT_FAIL)
+		/*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */
+			#if defined(_WIN32)
+				#define PVR_DBG_BREAK	DBG_BREAK
+			#else
+				#if defined(LINUX) && defined(__KERNEL__)
+					#define PVR_DBG_BREAK BUG()
+				#else
+					#define PVR_DBG_BREAK	PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK")
+				#endif
+			#endif
+		#else
+			/*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+			#define PVR_DBG_BREAK
+		#endif
+	#endif
+#endif
+
+
+#else  /* defined(PVRSRV_NEED_PVR_ASSERT) */
+    /* Unfortunately the klocworks static analysis checker doesn't understand our
+     * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+     * macros in a special way when the code is analysed by klocworks avoids
+     * them. */
+    #if defined(__KLOCWORK__) 
+        #define PVR_ASSERT(EXPR) do { if (!(EXPR)) abort(); } while (0)
+    #else
+        #define PVR_ASSERT(EXPR) (IMG_VOID)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */
+    #endif
+
+    #define PVR_DBG_BREAK    /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+	/* New logging mechanism */
+	#define PVR_DBG_FATAL		DBGPRIV_FATAL
+	#define PVR_DBG_ERROR		DBGPRIV_ERROR
+	#define PVR_DBG_WARNING		DBGPRIV_WARNING
+	#define PVR_DBG_MESSAGE		DBGPRIV_MESSAGE
+	#define PVR_DBG_VERBOSE		DBGPRIV_VERBOSE
+	#define PVR_DBG_CALLTRACE	DBGPRIV_CALLTRACE
+	#define PVR_DBG_ALLOC		DBGPRIV_ALLOC
+	#define PVR_DBG_BUFFERED	DBGPRIV_BUFFERED
+	#define PVR_DBG_DEBUG		DBGPRIV_DEBUG
+	#define PVR_DBGDRIV_MESSAGE	DBGPRIV_DBGDRV_MESSAGE
+
+	/* These levels are always on with PVRSRV_NEED_PVR_DPF */
+	#define __PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+	#define __PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+	#define __PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+
+	/*
+	  The AdHoc-Debug level is only supported when enabled in the local
+	  build environment and may need to be used in both debug and release
+	  builds. An error is generated in the formal build if it is checked in.
+	*/
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+	#define __PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+#else
+    /* Use an undefined token here to stop compilation dead in the offending module */
+	#define __PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+#endif
+
+	/* Some are compiled out completely in release builds */
+#if defined(DEBUG)
+	#define __PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+	#define __PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+	#define __PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+	#define __PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+	#define __PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+	#define __PVR_DPF_0x200UL(...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, __VA_ARGS__)
+#else
+	#define __PVR_DPF_0x004UL(...)
+	#define __PVR_DPF_0x008UL(...)
+	#define __PVR_DPF_0x010UL(...)
+	#define __PVR_DPF_0x020UL(...)
+	#define __PVR_DPF_0x040UL(...)
+	#define __PVR_DPF_0x200UL(...)
+#endif
+
+	/* Translate the different log levels to separate macros
+	 * so they can each be compiled out.
+	 */
+#if defined(DEBUG)
+	#define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__)
+#else
+	#define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl ("", 0, __VA_ARGS__)
+#endif
+
+	/* Get rid of the double bracketing */
+	#define PVR_DPF(x) __PVR_DPF x
+
+	#define PVR_LOG_ERROR(_rc, _call) \
+		PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+	#define PVR_LOG_IF_ERROR(_rc, _call) do \
+		{ if (_rc != PVRSRV_OK) \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+
+	#define PVR_LOGR_IF_ERROR(_rc, _call) do \
+		{ if (_rc != PVRSRV_OK) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			return (_rc); }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGRN_IF_ERROR(_rc, _call) do \
+		{ if (_rc != PVRSRV_OK) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			return; }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_ERROR(_rc, _call, _go) do \
+		{ if (_rc != PVRSRV_OK) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+			goto _go; }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOG_IF_FALSE(_expr, _msg) do \
+		{ if (!(_expr)) \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do \
+		{ if (!(_expr)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+			return (_rc); }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+	#define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do \
+		{ if (!(_expr)) { \
+			PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+			goto _go; }\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+												   const IMG_CHAR *pszFileName,
+												   IMG_UINT32 ui32Line,
+												   const IMG_CHAR *pszFormat,
+												   ...) IMG_FORMAT_PRINTF(4, 5);
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void);
+
+#else  /* defined(PVRSRV_NEED_PVR_DPF) */
+
+	#define PVR_DPF(X)  /*!< Null Implementation of PowerVR Debug Printf (does nothing) */
+
+	#define PVR_LOG_ERROR(_rc, _call) (void)(_rc)
+	#define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc)
+	#define PVR_LOGR_IF_ERROR(_rc, _call) do { if (_rc != PVRSRV_OK) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGRN_IF_ERROR(_rc, _call) do { if (_rc != PVRSRV_OK) { return; } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGG_IF_ERROR(_rc, _call, _go) do { if (_rc != PVRSRV_OK) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+	
+	#define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr)
+	#define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do { if (!(_expr)) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+	#define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do { if (!(_expr)) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+	#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+
+	#define PVR_DPF_ENTERED \
+        PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered", __func__, __LINE__))
+
+	#define PVR_DPF_ENTERED1(p1) \
+		PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1)))
+
+	#define PVR_DPF_RETURN_RC(a) \
+        do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_RC1(a,p1) \
+		do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_VAL(a) \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned with value", __func__, __LINE__ )); return (a); MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN_OK \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0)
+
+	#define PVR_DPF_RETURN \
+		do { PVR_DPF((PVR_DBG_CALLTRACE, "-< %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0)
+
+	#if !defined(DEBUG)
+	#error PVR DPF Function trace enabled in release build, rectify
+	#endif
+
+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+	#define PVR_DPF_ENTERED
+	#define PVR_DPF_ENTERED1(p1)
+	#define PVR_DPF_RETURN_RC(a) 	 return (a)
+	#define PVR_DPF_RETURN_RC1(a,p1) return (a)
+	#define PVR_DPF_RETURN_VAL(a) 	 return (a)
+	#define PVR_DPF_RETURN_OK 		 return PVRSRV_OK
+	#define PVR_DPF_RETURN	 		 return
+
+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+	#define PVR_TRACE(X)	PVRSRVTrace X    /*!< PowerVR Debug Trace Macro */
+	/* Empty string implementation that is -O0 build friendly */
+	#define PVR_TRACE_EMPTY_LINE()	PVR_TRACE(("%s", ""))
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+	IMG_FORMAT_PRINTF(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+    /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */
+	#define PVR_TRACE(X)    
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_32BITS)
+#endif
+	INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput)
+	{
+		 IMG_UINT32 uiTruncated;
+
+		 uiTruncated = (IMG_UINT32)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T)
+#endif
+	INLINE static IMG_SIZE_T TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput)
+	{
+		 IMG_SIZE_T uiTruncated;
+
+		 uiTruncated = (IMG_SIZE_T)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS)
+#endif
+	INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(IMG_SIZE_T uiInput)
+	{
+		 IMG_UINT32 uiTruncated;
+
+		 uiTruncated = (IMG_UINT32)uiInput;
+		 PVR_ASSERT(uiInput == uiTruncated);
+		 return uiTruncated;
+	}
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+	#define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr))
+	#define TRUNCATE_64BITS_TO_SIZE_T(expr) ((IMG_SIZE_T)(expr))
+	#define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr))
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+/* Macros used to trace calls */
+#if defined(DEBUG)
+	#define PVR_DBG_FILELINE , __FILE__, __LINE__
+	#define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line
+	#define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line
+	#define PVR_DBG_FILELINE_FMT " %s:%u"
+	#define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \
+				PVR_UNREFERENCED_PARAMETER(ui32Line); } while(0)
+#else
+	#define PVR_DBG_FILELINE
+	#define PVR_DBG_FILELINE_PARAM
+	#define PVR_DBG_FILELINE_ARG
+	#define PVR_DBG_FILELINE_FMT
+	#define PVR_DBG_FILELINE_UNREF()
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif	/* __PVR_DEBUG_H__ */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_drm_external.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_drm_external.h
new file mode 100644
index 0000000..484d621
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_drm_external.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external DRM interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services DRM declarations and definitions that are visible
+                internally and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_PVR_DRM_EXTERNAL_)
+#define _PVR_DRM_EXTERNAL_
+
+typedef enum _PVRSRV_GEM_SYNC_TYPE_
+{
+	PVRSRV_GEM_SYNC_TYPE_WRITE = 0,
+	PVRSRV_GEM_SYNC_TYPE_READ_HW,
+	PVRSRV_GEM_SYNC_TYPE_READ_SW,
+	PVRSRV_GEM_SYNC_TYPE_READ_DISPLAY,
+	PVRSRV_GEM_SYNC_TYPE_COUNT
+} PVRSRV_GEM_SYNC_TYPE;
+
+#endif /* !defined(_PVR_DRM_EXTERNAL_) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_fd_sync_kernel.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_fd_sync_kernel.h
new file mode 100644
index 0000000..4280641
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvr_fd_sync_kernel.h
@@ -0,0 +1,112 @@
+/*************************************************************************/ /*!
+@File           pvr_fd_sync_kernel.h
+@Title          Kernel/userspace interface definitions to use the kernel sync
+                driver
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+
+#ifndef _PVR_FD_SYNC_KERNEL_H_
+#define _PVR_FD_SYNC_KERNEL_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14
+
+#define PVR_SYNC_IOC_MAGIC 'W'
+
+#define PVR_SYNC_IOC_CREATE_FENCE \
+ _IOWR(PVR_SYNC_IOC_MAGIC, 0, struct pvr_sync_create_fence_ioctl_data)
+
+#define PVR_SYNC_IOC_ENABLE_FENCING \
+ _IOW(PVR_SYNC_IOC_MAGIC,  1, struct pvr_sync_enable_fencing_ioctl_data)
+
+#define PVR_SYNC_IOC_ALLOC_FENCE \
+ _IOWR(PVR_SYNC_IOC_MAGIC, 3, struct pvr_sync_alloc_fence_ioctl_data)
+
+#define PVR_SYNC_IOC_FORCE_SW_ONLY \
+ _IO(PVR_SYNC_IOC_MAGIC,   5)
+
+#define PVRSYNC_MODNAME "pvr_sync"
+
+struct pvr_sync_alloc_fence_ioctl_data
+{
+	/* Output */
+	int				iFenceFd;
+	int				bTimelineIdle;
+}
+__attribute__((packed, aligned(8)));
+
+struct pvr_sync_create_fence_ioctl_data
+{
+	/* Input */
+	int				iAllocFenceFd;
+	char				szName[32];
+
+	/* Output */
+	int				iFenceFd;
+}
+__attribute__((packed, aligned(8)));
+
+struct pvr_sync_enable_fencing_ioctl_data
+{
+	/* Input */
+	int				bFencingEnabled;
+}
+__attribute__((packed, aligned(8)));
+
+struct pvr_sync_pt_info {
+	/* Output */
+	__u8				ui8Foreign;
+	union
+	{
+		struct {
+			__u32		id;
+			__u32		ui32FWAddr;
+			__u32		ui32CurrOp;
+			__u32		ui32NextOp;
+			__u32		ui32TlTaken;
+		} s;
+		char			szForeignVal[16];
+	};
+} __attribute__((packed, aligned(8)));
+
+#endif /* _PVR_FD_SYNC_KERNEL_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrmodule.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrmodule.h
new file mode 100644
index 0000000..267c7b6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrmodule.h
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@Title          Module Author and License.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef	_PVRMODULE_H_
+#define	_PVRMODULE_H_
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif	/* _PVRMODULE_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_device_types.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_device_types.h
new file mode 100644
index 0000000..de54d87
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_device_types.h
@@ -0,0 +1,112 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR device type definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVRSRV_DEVICE_TYPES_H__)
+#define __PVRSRV_DEVICE_TYPES_H__
+
+#include "img_types.h"
+
+#define PVRSRV_MAX_DEVICES		16	/*!< Largest supported number of devices on the system */
+
+/*!
+ ******************************************************************************
+ * List of known device types.
+ *****************************************************************************/
+typedef enum PVRSRV_DEVICE_TYPE
+{
+	PVRSRV_DEVICE_TYPE_UNKNOWN			= 0,  /*!< Unknown device type */
+	PVRSRV_DEVICE_TYPE_MBX1				= 1,  /*!< MBX1 */
+	PVRSRV_DEVICE_TYPE_MBX1_LITE		= 2,  /*!< MBX1 Lite */
+	PVRSRV_DEVICE_TYPE_M24VA			= 3,  /*!< M24VA */
+	PVRSRV_DEVICE_TYPE_MVDA2			= 4,  /*!< MVDA2 */
+	PVRSRV_DEVICE_TYPE_MVED1			= 5,  /*!< MVED1 */
+	PVRSRV_DEVICE_TYPE_MSVDX			= 6,  /*!< MSVDX */
+	PVRSRV_DEVICE_TYPE_SGX				= 7,  /*!< SGX */
+	PVRSRV_DEVICE_TYPE_VGX				= 8,  /*!< VGX */
+	PVRSRV_DEVICE_TYPE_EXT				= 9,  /*!< 3rd party devices take ext type */
+	PVRSRV_DEVICE_TYPE_RGX				= 10, /*!< RGX */
+
+    PVRSRV_DEVICE_TYPE_LAST             = 10, /*!< Last device type */
+
+	PVRSRV_DEVICE_TYPE_FORCE_I32		= 0x7fffffff /*!< Force enum to be 32-bit width */
+
+} PVRSRV_DEVICE_TYPE;
+
+
+/*!
+ *****************************************************************************
+ * List of known device classes.
+ *****************************************************************************/
+typedef enum _PVRSRV_DEVICE_CLASS_
+{
+	PVRSRV_DEVICE_CLASS_3D				= 0 ,       /*!< 3D Device Class */
+	PVRSRV_DEVICE_CLASS_DISPLAY			= 1 ,       /*!< Display Device Class */
+	PVRSRV_DEVICE_CLASS_BUFFER			= 2 ,       /*!< Buffer Class */
+	PVRSRV_DEVICE_CLASS_VIDEO			= 3 ,       /*!< Video Device Class */
+
+	PVRSRV_DEVICE_CLASS_FORCE_I32 		= 0x7fffffff /* Force enum to be at least 32-bits wide */
+
+} PVRSRV_DEVICE_CLASS;
+
+
+/*!
+ ******************************************************************************
+ * Device identifier structure
+ *****************************************************************************/
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+	PVRSRV_DEVICE_TYPE		eDeviceType;		/*!< Identifies the type of the device */
+	PVRSRV_DEVICE_CLASS		eDeviceClass;		/*!< Identifies more general class of device - display/3d/mpeg etc */
+	IMG_UINT32				ui32DeviceIndex;	/*!< Index of the device within the system */
+	IMG_CHAR				*pszPDumpDevName;	/*!< Pdump memory bank name */
+	IMG_CHAR				*pszPDumpRegName;	/*!< Pdump register bank name */
+
+} PVRSRV_DEVICE_IDENTIFIER;
+
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* __PVRSRV_DEVICE_TYPES_H__ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_devmem.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_devmem.h
new file mode 100644
index 0000000..f6967a7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_devmem.h
@@ -0,0 +1,577 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management core
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of device memory management -- This
+                file defines the exposed Services API to core memory management
+                functions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_DEVMEM_H
+#define PVRSRV_DEVMEM_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "services.h"	/* For PVRSRV_DEV_DATA */
+#include "sync_external.h"
+
+/*
+  Device memory contexts, heaps and memory descriptors are passed
+  through to underlying memory APIs directly, but are to be regarded
+  as an opaque handle externally.
+*/
+typedef DEVMEM_CONTEXT *PVRSRV_DEVMEMCTX;       /*!< Device-Mem Client-Side Interface: Typedef for Context Ptr */
+typedef DEVMEM_HEAP *PVRSRV_HEAP;               /*!< Device-Mem Client-Side Interface: Typedef for Heap Ptr */
+typedef DEVMEM_MEMDESC *PVRSRV_MEMDESC;         /*!< Device-Mem Client-Side Interface: Typedef for Memory Descriptor Ptr */
+typedef DEVMEM_EXPORTCOOKIE PVRSRV_DEVMEM_EXPORTCOOKIE;     /*!< Device-Mem Client-Side Interface: Typedef for Export Cookie */
+typedef DEVMEM_FLAGS_T PVRSRV_MEMMAP_FLAGS_T;               /*!< Device-Mem Client-Side Interface: Typedef for Memory-Mapping Flags Enum */
+typedef DEVMEM_SERVER_EXPORTCOOKIE PVRSRV_DEVMEM_SERVER_EXPORTCOOKIE;   /*!< Device-Mem Client-Side Interface: Typedef for Server Export Cookie */
+
+/* N.B.  Flags are now defined in pvrsrv_memallocflags.h as they need
+         to be omnipresent. */
+
+/*
+ *
+ *  API functions
+ *
+ */
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCreateDeviceMemContext
+@Description    Creates a device memory context.  There is a one-to-one
+                correspondence between this context data structure and the top
+                level MMU page table (known as the Page Catalogue, in the case of a
+                3-tier MMU).  It is intended that a process with its own virtual
+                space on the CPU will also have its own virtual space on the GPU.
+                Thus there is loosely a one-to-one correspondence between process
+                and device memory context, but this is not enforced at this API.
+ 
+                Every process must create the device memory context before any
+                memory allocations are made, and is responsible for freeing all
+                such allocations before destroying the context
+     
+                This is a wrapper function above the "bare-metal" device memory
+                context creation function which would create just a context and no
+                heaps.  This function will also create the heaps, according to the
+                heap config that the device specific initialization code has
+                nominated for use by this API.
+     
+                The number of heaps thus created is returned to the caller, such
+                that the caller can allocate an array and the call in to fetch
+                details of each heap, or look up the heap with the "Find Heap" API
+                described below.
+     
+                In order to derive the details of the MMU configuration for the
+                device, and for retrieving the "bridge handle" for communication
+                internally in services, is is necessary to pass in the
+                PVRSRV_DEV_DATA object as populated with a prior call to
+                PVRSRVAcquireDeviceData()
+@Input          psDev           dev data
+@Output         phCtxOut        On success, the returned DevMem Context. The
+                                caller is responsible for providing storage
+                                for this.
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+extern IMG_IMPORT PVRSRV_ERROR
+PVRSRVCreateDeviceMemContext(const PVRSRV_DEV_DATA *psDev,
+                              PVRSRV_DEVMEMCTX *phCtxOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDestroyDeviceMemContext
+@Description    Destroy cannot fail.  Well.  It shouldn't, assuming the caller
+                has obeyed the protocol, i.e. has freed all his allocations 
+                beforehand.
+@Input          hCtx            Handle to a DevMem Context
+@Return         None
+*/ /***************************************************************************/
+extern IMG_IMPORT IMG_VOID
+PVRSRVDestroyDeviceMemContext(PVRSRV_DEVMEMCTX hCtx);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVFindHeapByName
+@Description    Returns the heap handle for the named heap which is assumed to
+                exist in this context. PVRSRV_HEAP *phHeapOut,  
+
+                N.B.  No need for acquire/release semantics here, as when using
+                this wrapper layer, the heaps are automatically instantiated at
+                context creation time and destroyed when the context is 
+                destroyed.
+
+                The caller is required to know the heap names already as these 
+                will vary from device to device and from purpose to purpose.
+@Input          hCtx            Handle to a DevMem Context
+@Input          pszHeapName     Name of the heap to look for
+@Output         phHeapOut       a handle to the heap, for use in future calls 
+                                to OpenAllocation / AllocDeviceMemory / Map 
+                                DeviceClassMemory, etc. (The PVRSRV_HEAP type
+                                to be regarded by caller as an opaque, but 
+                                strongly typed, handle)
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVFindHeapByName(PVRSRV_DEVMEMCTX hCtx,
+                     const IMG_CHAR *pszHeapName,
+                     PVRSRV_HEAP *phHeapOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDevmemGetHeapBaseDevVAddr
+@Description    returns the device virtual address of the base of the heap.
+@Input          hHeap           Handle to a Heap
+@Output         pDevVAddr       On success, the device virtual address of the
+                                base of the heap.
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVDevmemGetHeapBaseDevVAddr(PVRSRV_HEAP hHeap,
+			        IMG_DEV_VIRTADDR *pDevVAddr);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocDeviceMem
+@Description    Allocate memory from the specified heap, acquiring physical
+                memory from OS as we go and mapping this into
+                the GPU (mandatorily) and CPU (optionally)
+
+                Size must be a positive integer multiple of alignment, or, to
+                put it another way, the uiLog2Align LSBs should all be zero, but
+                at least one other bit should not be.
+
+                Caller to take charge of the PVRSRV_MEMDESC (the memory
+                descriptor) which is to be regarded as an opaque handle.
+@Input          hHeap               Handle to the heap from which memory will be
+                                    allocated
+@Input          uiSize              Amount of memory to be allocated.
+@Input          uiLog2Align         LOG2 of the required alignment
+@Input          uiMemAllocFlags     Allocation Flags
+@Input          pszText     		Text to describe the allocation
+@Output         phMemDescOut        On success, the resulting memory descriptor
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVAllocDeviceMem(PVRSRV_HEAP hHeap,
+                     IMG_DEVMEM_SIZE_T uiSize,
+                     IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+                     PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags,
+                     IMG_PCHAR pszText,
+                     PVRSRV_MEMDESC *phMemDescOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVFreeDeviceMem
+@Description    Free that allocated by PVRSRVAllocDeviceMem (Memory descriptor 
+                will be destroyed)
+@Input          hMemDesc            Handle to the descriptor of the memory to be
+                                    freed
+@Return         None
+*/ /***************************************************************************/
+extern IMG_VOID
+PVRSRVFreeDeviceMem(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireCPUMapping
+@Description    Causes the allocation referenced by this memory descriptor to be
+                mapped into cpu virtual memory, if it wasn't already, and the
+                CPU virtual address returned in the caller-provided location.
+
+                The caller must call PVRSRVReleaseCPUMapping to advise when he
+                has finished with the mapping.
+@Input          hMemDesc            Handle to the memory descriptor for which a
+                                    CPU mapping is required
+@Output         ppvCpuVirtAddrOut   On success, the caller's ptr is set to the
+                                    new CPU mapping
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVAcquireCPUMapping(PVRSRV_MEMDESC hMemDesc,
+                        IMG_VOID **ppvCpuVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseCPUMapping
+@Description    Relinquishes the cpu mapping acquired with 
+                PVRSRVAcquireCPUMapping()
+@Input          hMemDesc            Handle of the memory descriptor
+@Return         None
+*/ /***************************************************************************/
+extern IMG_VOID
+PVRSRVReleaseCPUMapping(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVMapToDevice
+@Description    Map allocation into the device MMU. This function must only be
+                called once, any further calls will return
+                PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED
+
+                The caller must call PVRSRVReleaseDeviceMapping when they
+                are finished with the mapping.
+
+@Input          hMemDesc            Handle of the memory descriptor
+@Input          hHeap               Device heap to map the allocation into
+@Output         psDevVirtAddrOut    Device virtual address
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVMapToDevice(PVRSRV_MEMDESC hMemDesc,
+				  PVRSRV_HEAP hHeap,
+				  IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireDeviceMapping
+@Description    Acquire a reference on the device mapping the allocation.
+                If the allocation wasn't mapped into the device then 
+                and the device virtual address returned in the
+                PVRSRV_ERROR_DEVICEMEM_NO_MAPPING will be returned as
+                PVRSRVMapToDevice must be called first.
+
+                The caller must call PVRSRVReleaseDeviceMapping when they
+                are finished with the mapping.
+@Input          hMemDesc            Handle to the memory descriptor for which a
+                                    device mapping is required
+@Output         psDevVirtAddrOut    On success, the caller's ptr is set to the
+                                    new device mapping
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVAcquireDeviceMapping(PVRSRV_MEMDESC hMemDesc,
+						   IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseDeviceMapping
+@Description    Relinquishes the device mapping acquired with
+                PVRSRVAcquireDeviceMapping or PVRSRVMapToDevice
+@Input          hMemDesc            Handle of the memory descriptor
+@Return         None
+*/ /***************************************************************************/
+extern IMG_VOID
+PVRSRVReleaseDeviceMapping(PVRSRV_MEMDESC hMemDesc);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevmemLocalImport
+
+@Description    Import a PMR that was created with this connection to services.
+
+@Input          hExtHandle              External memory handle
+
+@Input          uiFlags                 Import flags
+
+@Output         phMemDescPtr            Created MemDesc
+
+@Output         puiSizePtr              Size of the created MemDesc
+
+@Return         PVRSRV_OK is succesful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR PVRSRVDevmemLocalImport(const PVRSRV_CONNECTION *psConnection,
+									 IMG_HANDLE hExtHandle,
+									 PVRSRV_MEMMAP_FLAGS_T uiFlags,
+									 PVRSRV_MEMDESC *phMemDescPtr,
+									 IMG_DEVMEM_SIZE_T *puiSizePtr);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDevmemGetImportUID
+
+@Description    Get the UID of the import that backs this MemDesc
+
+@Input          hMemDesc                MemDesc
+
+@Return         UID of import
+*/
+/*****************************************************************************/
+PVRSRV_ERROR PVRSRVDevmemGetImportUID(PVRSRV_MEMDESC hMemDesc,
+									  IMG_UINT64 *pui64UID);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocExportableDevMem
+@Description    Allocate memory without mapping into device memory context.  This
+                memory is exported and ready to be mapped into the device memory
+                context of other processes, or to CPU only with 
+                PVRSRVMapMemoryToCPUOnly(). The caller agrees to later call 
+                PVRSRVFreeUnmappedExportedMemory(). The caller must give the page
+                size of the heap into which this memory may be subsequently 
+                mapped, or the largest of such page sizes if it may be mapped 
+                into multiple places.  This information is to be communicated in
+                the Log2Align field.
+
+                Size must be a positive integer multiple of the page size
+@Input          uiLog2Align         Log2 of the alignment required
+@Input          uiSize              the amount of memory to be allocated
+@Input          uiFlags             Allocation flags
+@Input          pszText     		Text to describe the allocation
+@Output         hMemDesc
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocExportableDevMem(const PVRSRV_DEV_DATA *psDevData,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							IMG_PCHAR pszText,
+							PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocSparseDevMem
+@Description    Allocate sparse memory without mapping into device memory context.
+				Sparse memory is used where you have an allocation that has a
+				logical size (i.e. the amount of VM space it will need when
+				mapping it into a device) that is larger then the amount of
+				physical memory that allocation will use. An example of this
+				is a NPOT texture where the twiddling algorithm requires you
+				to round the width and height to next POT and so you know there
+				will be pages that are never accessed.
+
+				This memory is can to be exported and mapped into the device
+				memory context of other processes, or to CPU.
+
+                Size must be a positive integer multiple of the page size
+@Input          psDevData           Device to allocation the memory for
+@Input          uiSize              The logical size of allocation
+@Input          uiChunkSize         The size of the chunk
+@Input          ui32NumPhysChunks   The number of physical chunks required
+@Input          ui32NumVirtChunks   The number of virtual chunks required
+@Input			pabMappingTable		Mapping table
+@Input          uiLog2Align         Log2 of the required alignment
+@Input          uiFlags             Allocation flags
+@Input          pszText     		Text to describe the allocation
+@Output         hMemDesc
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAllocSparseDevMem(const PVRSRV_DEV_DATA *psDevData,
+						IMG_DEVMEM_SIZE_T uiSize,
+						IMG_DEVMEM_SIZE_T uiChunkSize,
+						IMG_UINT32 ui32NumPhysChunks,
+						IMG_UINT32 ui32NumVirtChunks,
+						IMG_BOOL *pabMappingTable,
+						IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+						DEVMEM_FLAGS_T uiFlags,
+						IMG_PCHAR pszText,
+						PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetLog2PageSize
+@Description    Just call AFTER setting up the connection to the kernel module
+                otherwise it will run into an assert.
+                Gives the log2 of the page size that is currently utilised by
+                devmem.
+
+@Return         The page size
+*/ /***************************************************************************/
+
+IMG_UINT32 PVRSRVGetLog2PageSize(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetHeapLog2ImportAlignment
+@Description    Queries the import alignment of a passed heap.
+
+@Input          hHeap                   Heap that is queried
+@Output         puiLog2ImportAlignment  Log2 import alignment will be
+                                        returned in this
+
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVGetHeapLog2ImportAlignment(PVRSRV_HEAP hHeap,
+                                 IMG_UINT32* puiLog2ImportAlignment);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVExport
+@Description    Given a memory allocation allocated with Devmem_Allocate(),
+                create a "cookie" that can be passed intact by the caller's own
+                choice of secure IPC to another process and used as the argument
+                to "map" to map this memory into a heap in the target processes.
+                N.B.  This can also be used to map into multiple heaps in one 
+                process, though that's not the intention.
+
+                Note, the caller must later call Unexport before freeing the
+                memory.
+@Input          hMemDesc        handle to the descriptor of the memory to be
+                                exported
+@Output         phExportCookie  On success, a handle to the exported cookie
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVExportDevMem(PVRSRV_MEMDESC hMemDesc,
+						  		PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function DevmemMakeServerExportClientExport
+@Description    This is a "special case" function for making a server export 
+                cookie which went through the direct bridge into an export 
+                cookie that can be passed through the client bridge.
+@Input          psConnection        Services connection
+@Input          hServerExportCookie server export cookie
+@Output         psExportCookie      ptr to export cookie
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVMakeServerExportClientExport(const PVRSRV_CONNECTION *psConnection,
+                                   PVRSRV_DEVMEM_SERVER_EXPORTCOOKIE hServerExportCookie,
+                                   PVRSRV_DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+/**************************************************************************/ /*!
+@Function DevmemUnmakeServerExportClientExport
+@Description    Remove any associated resource from the Make operation
+@Input          psConnection        Services connection
+@Output         psExportCookie      ptr to export cookie
+@Return         PVRSRV_ERROR:       PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnmakeServerExportClientExport(const PVRSRV_CONNECTION *psConnection,
+                                   PVRSRV_DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnexport
+@Description    Undo the export caused by "PVRSRVExport" - note - it doesn't
+                actually tear down any mapping made by processes that received
+                the export cookie.  It will simply make the cookie null and void
+                and prevent further mappings.
+@Input          hMemDesc        handle to the descriptor of the memory which
+                                will no longer be exported
+@Output         phExportCookie  On success, the export cookie provided will be
+                                set to null
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVUnexportDevMem(PVRSRV_MEMDESC hMemDesc,
+								  PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVImportDevMem
+@Description    Import memory that was previously exported with PVRSRVExport()
+                into the current process.
+
+                Note: This call only makes the memory accessible to this
+                process, it doesn't map it into the device or CPU.
+
+@Input          psConnection    Connection to services
+@Input          phExportCookie  Ptr to the handle of the export-cookie 
+                                identifying                          
+@Output         phMemDescOut    On Success, a handle to a new memory descriptor
+                                representing the memory as mapped into the
+                                local process address space.
+@Input          uiFlags         Device memory mapping flags                                
+@Input          pszText     	Text to describe the import
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVImportDevMem(const PVRSRV_CONNECTION *psConnection,
+								PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie,
+								PVRSRV_MEMMAP_FLAGS_T uiFlags,
+								PVRSRV_MEMDESC *phMemDescOut);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVIsDeviceMemAddrValid
+@Description    Checks if given device virtual memory address is valid
+                from the GPU's point of view.
+@Input          hContext handle to memory context
+@Input          sDevVAddr device 40bit virtual memory address
+@Return         PVRSRV_OK if address is valid or
+                PVRSRV_ERROR_INVALID_GPU_ADDR when address is invalid
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVIsDeviceMemAddrValid(PVRSRV_DEVMEMCTX hContext,
+                                        IMG_DEV_VIRTADDR sDevVAddr);
+
+#if defined (SUPPORT_EXPORTING_MEMORY_CONTEXT)
+/**************************************************************************/ /*!
+@Function       PVRSRVExportDevmemContext
+@Description    Export a device memory context to another process
+
+@Input          hCtx            Memory context to export                        
+@Output         phExport        On Success, a export handle that can be passed
+                                to another process and used with 
+                                PVRSRVImportDeviceMemContext to import the
+                                memory context                            
+@Return         PVRSRV_ERROR:   PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVExportDevmemContext(PVRSRV_DEVMEMCTX hCtx,
+						  IMG_HANDLE *phExport);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnexportDevmemContext
+@Description    Unexport an exported device memory context
+
+@Input          psConnection    Services connection
+@Input          hExport         Export handle created to be unexported
+
+@Return         None
+*/ /***************************************************************************/
+IMG_VOID
+PVRSRVUnexportDevmemContext(PVRSRV_CONNECTION *psConnection,
+							IMG_HANDLE hExport);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVImportDeviceMemContext
+@Description    Import an exported device memory context
+
+                Note: The memory context created with this function is not
+                complete and can only be used with debugger related functions
+
+@Input          psConnection    Services connection
+@Input          hExport         Export handle to import
+@Output         phCtxOut        Device memory context
+
+@Return         None
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVImportDeviceMemContext(PVRSRV_CONNECTION *psConnection,
+							 IMG_HANDLE hExport,
+							 PVRSRV_DEVMEMCTX *phCtxOut);
+
+#endif /* SUPPORT_EXPORTING_MEMORY_CONTEXT */
+#if defined __cplusplus
+};
+#endif
+#endif /* PVRSRV_DEVMEM_H */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_error.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_error.h
new file mode 100644
index 0000000..82ef82a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_error.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_error.h
+@Title          services error enumerant
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PVRSRV_ERROR_H__)
+#define __PVRSRV_ERROR_H__
+
+/*!
+ *****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum PVRSRV_ERROR
+{
+	PVRSRV_OK,
+#define PVRE(x) x,
+#include "pvrsrv_errors.h"
+#undef PVRE
+	PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+#endif /* !defined (__PVRSRV_ERROR_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_errors.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_errors.h
new file mode 100644
index 0000000..084ce4a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_errors.h
@@ -0,0 +1,327 @@
+/*************************************************************************/ /*!
+@File           pvrsrv_errors.h
+@Title          services error codes
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines error codes used by any/all services modules
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Don't add include guards to this file! */
+
+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY)
+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS)
+PVRE(PVRSRV_ERROR_INVALID_PARAMS)
+PVRE(PVRSRV_ERROR_INIT_FAILURE)
+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK)
+PVRE(PVRSRV_ERROR_INVALID_DEVICE)
+PVRE(PVRSRV_ERROR_NOT_OWNER)
+PVRE(PVRSRV_ERROR_BAD_MAPPING)
+PVRE(PVRSRV_ERROR_TIMEOUT)
+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED)
+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS)
+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL)
+PVRE(PVRSRV_ERROR_SCENE_INVALID)
+PVRE(PVRSRV_ERROR_STREAM_ERROR)
+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES)
+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED)
+PVRE(PVRSRV_ERROR_CMD_TOO_BIG)
+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED)
+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS)
+PVRE(PVRSRV_ERROR_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED)
+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS)
+PVRE(PVRSRV_ERROR_RETRY)
+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH)
+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH)
+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH)
+PVRE(PVRSRV_ERROR_BVNC_MISMATCH)
+PVRE(PVRSRV_ERROR_META_MISMATCH)
+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG)
+PVRE(PVRSRV_ERROR_INVALID_FLAGS)
+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY)
+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR)
+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED)
+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED)
+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR)
+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE)
+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP)
+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP)
+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED)
+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY)
+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES)
+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE)
+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED)
+PVRE(PVRSRV_ERROR_PMR_PMR_ALREADY_OCCUPIED)
+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE)
+PVRE(PVRSRV_ERROR_PMR_HAS_BEEN_MAPPED)
+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE)
+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE)
+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH)
+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS)
+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT)
+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED)
+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL)
+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH)
+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES)
+PVRE(PVRSRV_ERROR_STILL_MAPPED)
+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK)
+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA)
+PVRE(PVRSRV_ERROR_INVALID_DEVINFO)
+PVRE(PVRSRV_ERROR_INVALID_MEMINFO)
+PVRE(PVRSRV_ERROR_INVALID_MISCINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL)
+PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
+PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_PERPROC)
+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST)
+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP)
+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE)
+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS)
+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD)
+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR)
+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED)
+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE)
+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND)
+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL)
+PVRE(PVRSRV_ERROR_FLIP_FAILED)
+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED)
+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE)
+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB)
+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED)
+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID)
+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED)
+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL)
+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE)
+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES)
+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED)
+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND)
+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED)
+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED)
+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE)
+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND)
+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE)
+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND)
+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND)
+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND)
+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER)
+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION)
+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE)
+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP)
+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE)
+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE)
+PVRE(PVRSRV_ERROR_INVALID_DEVICEID)
+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK)
+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED)
+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK)
+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR)
+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE)
+PVRE(PVRSRV_ERROR_CACHEOP_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID)
+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID)
+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT)
+PVRE(PVRSRV_ERROR_BP_NOT_SET)
+PVRE(PVRSRV_ERROR_BP_ALREADY_SET)
+PVRE(PVRSRV_ERROR_FEATURE_DISABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL)
+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_PI)
+PVRE(PVRSRV_ERROR_MEMORY_ACCESS)
+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER)
+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG)
+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS)
+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM)
+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE)
+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM)
+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES)
+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA)
+PVRE(PVRSRV_ERROR_NOT_READY)
+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER)
+PVRE(PVRSRV_ERROR_NOT_FOUND)
+PVRE(PVRSRV_ERROR_ALREADY_OPEN)
+PVRE(PVRSRV_ERROR_STREAM_MISUSE)
+PVRE(PVRSRV_ERROR_STREAM_FULL)
+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN)
+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG)
+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED)
+PVRE(PVRSRV_ERROR_REQUEST_TDMETACODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDMETACODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED)
+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE)
+PVRE(PVRSRV_ERROR_TASK_FAILED)
+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_OFFSET)
+PVRE(PVRSRV_ERROR_CCCB_STALLED)
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_memallocflags.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_memallocflags.h
new file mode 100644
index 0000000..094f157
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_memallocflags.h
@@ -0,0 +1,528 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines flags used on memory allocations and mappings
+                These flags are relevant throughout the memory management 
+                software stack and are specified by users of services and 
+                understood by all levels of the memory management in both 
+                client and server.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_H
+#define PVRSRV_MEMALLOCFLAGS_H
+
+#include "img_types.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_memallocflags.h"
+#endif
+typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T;
+
+/*!
+ *  **********************************************************
+ *  *                                                        *
+ *  *                       MAPPING FLAGS                    *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be read by the GPU (is this always true?)
+ *
+ * Typically all device memory allocations would specify this flag.
+ *
+ * At the moment, memory allocations without this flag are not supported
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be read by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a readable mapping
+ *
+ * To be clear:
+ * - When used as an argument on PMR creation; it specifies
+ *       that GPU readable mappings will be _permitted_
+ * - When used as an argument to a "map" function: it specifies
+ *       that a GPU readable mapping is _desired_
+ * - When used as an argument to "AllocDeviceMem": it specifies
+ *       that the PMR will be created with permission to be mapped
+ *       with a GPU readable mapping, _and_ that this PMR will be
+ *       mapped with a GPU readble mapping.
+ * This distinction becomes important when (a) we export allocations;
+ * and (b) when we separate the creation of the PMR from the mapping.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE 		(1U<<0)
+#define PVRSRV_CHECK_GPU_READABLE(uiFlags) 		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0)
+
+/*!
+ * PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be written by the GPU
+ *
+ * Using this flag on an allocation signifies that the allocation is
+ * intended to be written by the GPU.
+ *
+ * Omitting this flag causes a read-only mapping.
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag.  It specifies that memory is permitted
+ * to be written by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a writeable mapping (see note above about
+ * permission vs. mapping mode, and why this flag causes permissions
+ * to be inferred from mapping mode on first allocation)
+ *
+ * N.B.  This flag has no relevance to the CPU's MMU mapping, if any,
+ * and would therefore not enforce read-only mapping on CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE       (1U<<1) /*!< mapped as writeable to the GPU */
+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED  (1U<<2) /*!< can be mapped is GPU readable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) 		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3) /*!< can be mapped is GPU writable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE        (1U<<4) /*!< mapped as readable to the CPU */
+#define PVRSRV_CHECK_CPU_READABLE(uiFlags) 				((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE       (1U<<5) /*!< mapped as writeable to the CPU */
+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED  (1U<<6) /*!< can be mapped is CPU readable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7) /*!< can be mapped is CPU writable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0)
+
+
+/*
+ *  **********************************************************
+ *  *                                                        *
+ *  *                    CACHE CONTROL FLAGS                 *
+ *  *                                                        *
+ *  **********************************************************
+ */
+
+/*
+	GPU domain
+	==========
+
+	The following defines are used to control the GPU cache bit field.
+	The defines are mutually exclusive.
+	
+	A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU cache
+	bit field from the flags. This should be used whenever the GPU cache mode
+	needs to be determined.
+*/
+
+/*!
+   GPU domain. Request uncached memory. This means that any writes to memory
+  allocated with this flag are written straight to memory and thus are coherent
+  for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED         		(0U<<8)
+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags)		 		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) != 0)
+
+/*!
+   GPU domain. Use write combiner (if supported) to combine sequential writes 
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE    		(1U<<8)
+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags)	 		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE) != 0)
+
+/*!
+    GPU domain. This flag affects the device MMU protection flags.
+ 
+    This flag ensures that the GPU and the CPU will always be coherent.
+    This is done by either by snooping each others caches or, if this is
+    not supported, by making the allocation uncached. Please note that
+    this will _not_ guaranty coherency with memory so if this memory
+    is accessed by another device (eg display controller) a flush will
+    be required.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT   		(2U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) 		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) != 0)
+
+/*!
+   GPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+   This means that if the allocation needs to transition from one device
+   to another services has to be informed so it can flush/invalidate the 
+   appropriate caches.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT 		(3U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) != 0)
+
+/*!
+    GPU domain.
+ 
+	Request cached cached coherent memory. This is like 
+	PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT but doesn't fall back on
+	uncached memory if the system doesn't support cache-snooping
+	but rather returns an error.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT   (4U<<8)
+#define PVRSRV_CHECK_GPU_CACHED_CACHE_COHERENT(uiFlags)	((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT) != 0)
+
+/*!
+	GPU domain.
+
+	This flag is for internal use only and is used to indicate
+	that the underlying allocation should be cached on the GPU
+	after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED					(7U<<8)
+#define PVRSRV_CHECK_GPU_CACHED(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHED) != 0)
+
+/*!
+	GPU domain.
+	
+	GPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK  		(7U<<8)
+#define PVRSRV_GPU_CACHE_MODE(uiFlags)					(uiFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+
+/*
+	CPU domain
+	==========
+
+	The following defines are used to control the CPU cache bit field.
+	The defines are mutually exclusive.
+	
+	A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU cache
+	bit field from the flags. This should be used whenever the CPU cache mode
+	needs to be determined.
+*/
+
+/*!
+   CPU domain. Request uncached memory. This means that any writes to memory
+  allocated with this flag are written straight to memory and thus are coherent
+  for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED         		(0U<<11)
+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) != 0)
+
+/*!
+   CPU domain. Use write combiner (if supported) to combine sequential writes 
+   together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE 		   	(1U<<11)
+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)			((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE) != 0)
+
+/*!
+    CPU domain. This flag affects the device MMU protection flags.
+ 
+    This flag ensures that the GPU and the CPU will always be coherent.
+    This is done by either by snooping each others caches or, if this is
+    not supported, by making the allocation uncached. Please note that
+    this will _not_ guaranty coherency with memory so if this memory
+    is accessed by another device (eg display controller) a flush will
+    be required.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT   		(2U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) != 0)
+
+/*!
+   CPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+   This means that if the allocation needs to transition from one device
+   to another services has to be informed so it can flush/invalidate the 
+   appropriate caches.
+
+    Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+    expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT 		(3U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) != 0)
+
+/*!
+    CPU domain.
+ 
+	Request cached cached coherent memory. This is like 
+	PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT but doesn't fall back on
+	uncached memory if the system doesn't support cache-snooping
+	but rather returns an error.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT   (4U<<11)
+#define PVRSRV_CHECK_CPU_CACHED_CACHE_COHERENT(uiFlags)	((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT) != 0)
+
+/*!
+	CPU domain.
+
+	This flag is for internal use only and is used to indicate
+	that the underlying allocation should be cached on the CPU
+	after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED					(7U<<11)
+#define PVRSRV_CHECK_CPU_CACHED(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHED) != 0)
+
+/*!
+	CPU domain.
+	
+	CPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK  		(7U<<11)
+#define PVRSRV_CPU_CACHE_MODE(uiFlags)					(uiFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/* Helper flags for usual cases */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED             		(PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) /*!< Memory will be uncached */
+#define PVRSRV_CHECK_UNCACHED(uiFlags)					((uiFlags & PVRSRV_MEMALLOCFLAG_UNCACHED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE        		(PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)   /*!< Memory will be write-combined */
+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_WRITE_COMBINE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT       		(PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)  /*!< Memory will be cache-coherent */
+#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags)			((uiFlags & PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT     		(PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) /*!< Memory will be cache-incoherent */
+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags)			((uiFlags & PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) != 0)
+
+
+/*!
+   CPU MMU Flags mask -- intended for use internal to services only
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+												PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+												PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/*!
+   MMU Flags mask -- intended for use internal to services only - used
+   for partitioning the flags bits and determining which flags to pass
+   down to mmu_common.c
+ */
+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK  (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                                PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+/*!
+    PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+ 
+    Indicates that the PMR created due to this allocation will support
+    in-kernel CPU mappings.  Only privileged processes may use this
+    flag as it may cause wastage of precious kernel virtual memory on
+    some platforms.
+ */
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE 		(1U<<14)
+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)		((uiFlags & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0)
+
+
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *            ALLOC MEMORY FLAGS                          *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 15)
+ *
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC			(1U<<15)
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags)					((uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0)
+
+/*!
+    PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+
+    Indicates that the allocation will primarily be accessed by
+    the CPU, so a UMA allocation (if available) is preferable.
+    If not set, the allocation will primarily be accessed by
+    the GPU, so LMA allocation (if available) is preferable.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_LOCAL 					(1U<<16)
+#define PVRSRV_CHECK_CPU_LOCAL(uiFlags)					((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0)
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *            MEMORY ZEROING AND POISONING FLAGS          *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * Zero / Poison, on alloc/free
+ *
+ * We think the following usecases are required:
+ *
+ *  don't poison or zero on alloc or free
+ *     (normal operation, also most efficient)
+ *  poison on alloc
+ *     (for helping to highlight bugs)
+ *  poison on alloc and free
+ *     (for helping to highlight bugs)
+ *  zero on alloc
+ *     (avoid highlighting security issues in other uses of memory)
+ *  zero on alloc and poison on free
+ *     (avoid highlighting security issues in other uses of memory,
+ *      while helping to highlight a subset of bugs e.g. memory
+ *      freed prematurely)
+ *
+ * Since there are more than 4, we can't encode this in just two bits,
+ * so we might as well have a separate flag for each of the three
+ * actions.
+ */
+
+/*!
+    PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+    Ensures that the memory allocated is initialized with zeroes.
+ */
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC 				(1U<<31)
+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)				((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0)
+
+/*!
+    VRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+
+    Scribbles over the allocated memory with a poison value
+
+    Not compatible with ZERO_ON_ALLOC
+
+    Poisoning is very deliberately _not_ reflected in PDump as we want
+    a simulation to cry loudly if the initialised data propogates to a
+    result.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC 			(1U<<30)
+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) 			((uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0)
+
+/*!
+    PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+
+    Causes memory to be trashed when freed, as a lazy man's security
+    measure.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29)
+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags)			((uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0)
+
+/*
+ *
+ *  **********************************************************
+ *  *                                                        *
+ *  *                Device specific MMU flags               *
+ *  *                                                        *
+ *  **********************************************************
+ *
+ * (Bits 24 to 27)
+ *
+ * Some services controled devices have device specific control
+ * bits in their page table entries, we need to allow these flags
+ * to be passed down the memory managament layers so the user
+ * can control these bits.
+ */
+
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET		24
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK		0x0f000000UL
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n)	\
+			(((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+
+/*!
+  PMR flags mask -- for internal services use only.  This is the set
+  of flags that will be passed down and stored with the PMR, this also
+  includes the MMU flags which the PMR has to pass down to mm_common.c
+  at PMRMap time.
+*/
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK  (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+											PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+                                            PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                            PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+                                            PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+                                            PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK) & PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK
+#endif
+
+/*!
+  RA differentiation mask
+
+  for use internal to services
+
+  this is the set of flags bits that are able to determine whether a
+  pair of allocations are permitted to live in the same page table.
+  Allocations whose flags differ in any of these places would be
+  allocated from separate RA Imports and therefore would never coexist
+  in the same page
+*/
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                                      PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                                      PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                                      PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                                      PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
+                                                      PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+                                                      PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+                                                      PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK | \
+                                                      PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+/*!
+  Flags that affect _allocation_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
+
+/*!
+  Flags that affect _mapping_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK   (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+													PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+                                                    PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
+                                                    PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+                                                    PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+#endif /* #ifndef PVRSRV_MEMALLOCFLAGS_H */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_surface.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_surface.h
new file mode 100644
index 0000000..d98d85e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrsrv_surface.h
@@ -0,0 +1,150 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device class external
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines DC specific structures which are externally visible
+                (i.e. visible to clients of services), but are also required
+                within services.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRSRV_SURFACE_H_
+#define _PVRSRV_SURFACE_H_
+
+#include "img_types.h"
+#include "fbc_types.h"
+
+#define PVRSRV_SURFACE_TRANSFORM_NONE	   (0 << 0)
+#define PVRSRV_SURFACE_TRANSFORM_FLIP_H    (1 << 0)
+#define PVRSRV_SURFACE_TRANSFORM_FLIP_V    (1 << 1)
+#define PVRSRV_SURFACE_TRANSFORM_ROT_90    (1 << 2)
+#define PVRSRV_SURFACE_TRANSFORM_ROT_180   ((1 << 0) + (1 << 1))
+#define PVRSRV_SURFACE_TRANSFORM_ROT_270   ((1 << 0) + (1 << 1) + (1 << 2))
+
+#define PVRSRV_SURFACE_BLENDING_NONE	   0
+#define PVRSRV_SURFACE_BLENDING_PREMULT	   1
+#define PVRSRV_SURFACE_BLENDING_COVERAGE   2
+
+typedef enum _PVRSRV_SURFACE_MEMLAYOUT_  {
+	PVRSRV_SURFACE_MEMLAYOUT_STRIDED = 0,		/*!< Strided memory buffer */
+	PVRSRV_SURFACE_MEMLAYOUT_FBC,				/*!< Frame buffer compressed buffer */
+	PVRSRV_SURFACE_MEMLAYOUT_BIF_PAGE_TILED,	/*!< BIF page tiled buffer */
+} PVRSRV_SURFACE_MEMLAYOUT;
+
+typedef struct _PVRSRV_SURFACE_FBC_LAYOUT_ {
+	FB_COMPRESSION	eFBCompressionMode;
+} PVRSRV_SURFACE_FBC_LAYOUT;
+
+typedef struct _PVRSRV_SURFACE_FORMAT_
+{
+	IMG_UINT32					ePixFormat;
+	PVRSRV_SURFACE_MEMLAYOUT	eMemLayout;
+	union {
+		PVRSRV_SURFACE_FBC_LAYOUT	sFBCLayout;
+	} u;
+} PVRSRV_SURFACE_FORMAT;
+
+typedef struct _PVRSRV_SURFACE_DIMS_
+{
+	IMG_UINT32		ui32Width;
+	IMG_UINT32		ui32Height;
+} PVRSRV_SURFACE_DIMS;
+
+typedef struct _PVRSRV_SURFACE_INFO_
+{
+	PVRSRV_SURFACE_DIMS		sDims;
+	PVRSRV_SURFACE_FORMAT	sFormat;
+} PVRSRV_SURFACE_INFO;
+
+typedef struct _PVRSRV_SURFACE_RECT_
+{
+	IMG_INT32				i32XOffset;
+	IMG_INT32				i32YOffset;
+	PVRSRV_SURFACE_DIMS		sDims;
+} PVRSRV_SURFACE_RECT;
+
+typedef struct _PVRSRV_SURFACE_CONFIG_INFO_
+{
+	/*!< Crop applied to surface (BEFORE transformation) */
+	PVRSRV_SURFACE_RECT		sCrop;
+
+	/*!< Region of screen to display surface in (AFTER scaling) */
+	PVRSRV_SURFACE_RECT		sDisplay;
+
+	/*!< Surface rotation / flip / mirror */
+	IMG_UINT32				ui32Transform;
+
+	/*!< Alpha blending mode e.g. none / premult / coverage */
+	IMG_UINT32				eBlendType;
+
+	/*!< Custom data for the display engine */
+	IMG_UINT32				ui32Custom;
+
+	/*!< Plane alpha */
+	IMG_UINT8				ui8PlaneAlpha;
+	IMG_UINT8				ui8Reserved1[3];
+} PVRSRV_SURFACE_CONFIG_INFO;
+
+typedef struct _PVRSRV_PANEL_INFO_
+{
+	PVRSRV_SURFACE_INFO sSurfaceInfo;
+	IMG_UINT32			ui32RefreshRate;
+	IMG_UINT32			ui32XDpi;
+	IMG_UINT32			ui32YDpi;
+} PVRSRV_PANEL_INFO;
+
+/*
+	Helper function to create a Config Info based on a Surface Info
+	to do a flip with no scale, transformation etc.
+*/
+static INLINE IMG_VOID SurfaceConfigFromSurfInfo(PVRSRV_SURFACE_INFO *psSurfaceInfo,
+												 PVRSRV_SURFACE_CONFIG_INFO *psConfigInfo)
+{
+	psConfigInfo->sCrop.sDims = psSurfaceInfo->sDims;
+	psConfigInfo->sCrop.i32XOffset = 0;
+	psConfigInfo->sCrop.i32YOffset = 0;
+	psConfigInfo->sDisplay.sDims = psSurfaceInfo->sDims;
+	psConfigInfo->sDisplay.i32XOffset = 0;
+	psConfigInfo->sDisplay.i32YOffset = 0;
+	psConfigInfo->ui32Transform = PVRSRV_SURFACE_TRANSFORM_NONE;
+	psConfigInfo->eBlendType = PVRSRV_SURFACE_BLENDING_NONE;
+	psConfigInfo->ui32Custom = 0;
+	psConfigInfo->ui8PlaneAlpha = 0xff;
+}
+
+#endif /* _PVRSRV_SURFACE_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/pvrversion.h b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrversion.h
new file mode 100644
index 0000000..4c083f0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/pvrversion.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File
+@Title          Version numbers and strings.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Version numbers and strings for PVR Consumer services
+                components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRVERSION_H_
+#define _PVRVERSION_H_
+
+#define PVR_STR(X) #X
+#define PVR_STR2(X) PVR_STR(X)
+
+#define PVRVERSION_MAJ               1
+#define PVRVERSION_MIN               5
+
+#define PVRVERSION_FAMILY           "rogueddk"
+#define PVRVERSION_BRANCHNAME       "1.5"
+#define PVRVERSION_BUILD             3591622
+#define PVRVERSION_BSCONTROL        "Rogue_DDK_Android"
+
+#define PVRVERSION_STRING           "Rogue_DDK_Android rogueddk 1.5@" PVR_STR2(PVRVERSION_BUILD)
+#define PVRVERSION_STRING_SHORT     "1.5@" PVR_STR2(PVRVERSION_BUILD) ""
+
+#define COPYRIGHT_TXT               "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI          359
+#define PVRVERSION_BUILD_LO          1622
+#define PVRVERSION_STRING_NUMERIC    PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO)
+
+#define PVRVERSION_PACK(MAJ,MIN) ((((MAJ)&0xFFFF) << 16) | (((MIN)&0xFFFF) << 0))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16) & 0xFFFF)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0) & 0xFFFF)
+
+#endif /* _PVRVERSION_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_common.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_common.h
new file mode 100644
index 0000000..1ffd810
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_common.h
@@ -0,0 +1,146 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Common Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common types and definitions for RGX software
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_H_
+#define RGX_COMMON_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/* Included to get the BVNC_KM_N defined and other feature defs */
+#include "km/rgxdefs_km.h"
+
+/*! This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver */
+#define RGX_FW_ALIGNMENT_LSB (7)
+
+/*! Macro to test structure size alignment */
+#define RGX_FW_STRUCT_SIZE_ASSERT(_a)	\
+	BLD_ASSERT((sizeof(_a)&RGX_FW_ALIGNMENT_LSB)==0, _a##struct_size)
+
+/*! Macro to test structure member alignment */
+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b)	\
+	BLD_ASSERT((offsetof(_a, _b)&RGX_FW_ALIGNMENT_LSB)==0, _a##struct_offset)
+
+
+/*! The number of performance counters in each layout block */
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#define RGX_HWPERF_CNTRS_IN_BLK 6
+#define RGX_HWPERF_CNTRS_IN_BLK_MIN 4
+#else
+#define RGX_HWPERF_CNTRS_IN_BLK 4
+#define RGX_HWPERF_CNTRS_IN_BLK_MIN 4
+#endif
+
+
+/*! The master definition for data masters known to the firmware of RGX.
+ * The DM in a V1 HWPerf packet uses this definition. */
+typedef enum _RGXFWIF_DM_
+{
+	RGXFWIF_DM_GP			= 0,
+	RGXFWIF_DM_2D			= 1,
+	RGXFWIF_DM_TA			= 2,
+	RGXFWIF_DM_3D			= 3,
+	RGXFWIF_DM_CDM			= 4,
+#if defined(RGX_FEATURE_RAY_TRACING)
+	RGXFWIF_DM_RTU			= 5,
+	RGXFWIF_DM_SHG			= 6,
+#endif
+	RGXFWIF_DM_LAST,
+
+	RGXFWIF_DM_FORCE_I32  = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+} RGXFWIF_DM;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFWIF_DM_MAX_MTS 8
+#else
+#define RGXFWIF_DM_MAX_MTS 6
+#endif
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+/* Maximum number of DM in use: GP, 2D, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_MAX			(7)
+#else
+#define RGXFWIF_DM_MAX			(5)
+#endif
+
+/* Min/Max number of HW DMs (all but GP) */
+#if defined(RGX_FEATURE_TLA)
+#define RGXFWIF_HWDM_MIN		(1)
+#else
+#define RGXFWIF_HWDM_MIN		(2)
+#endif
+#define RGXFWIF_HWDM_MAX		(RGXFWIF_DM_MAX)
+
+/*!
+ ******************************************************************************
+ * RGXFW Compiler alignment definitions
+ *****************************************************************************/
+#if defined(__GNUC__)
+#define RGXFW_ALIGN			__attribute__ ((aligned (8)))
+#elif defined(_MSC_VER)
+#define RGXFW_ALIGN			__declspec(align(8))
+#pragma warning (disable : 4324)
+#else
+#error "Align MACROS need to be defined for this compiler"
+#endif
+
+/*!
+ ******************************************************************************
+ * Force 8-byte alignment for structures allocated uncached.
+ *****************************************************************************/
+#define UNCACHED_ALIGN      RGXFW_ALIGN
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_heaps.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_heaps.h
new file mode 100644
index 0000000..e476f94
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_heaps.h
@@ -0,0 +1,98 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX heap definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_HEAPS_H__)
+#define __RGX_HEAPS_H__
+
+#include "km/rgxdefs_km.h"
+
+/* RGX Heap IDs, note: not all heaps are available to clients */
+/* N.B.  Old heap identifiers are deprecated now that the old memory
+   management is. New heap identifiers should be suitably renamed */
+#define RGX_UNDEFINED_HEAP_ID					(~0LU)          /*!< RGX Undefined Heap ID */
+#define RGX_GENERAL_HEAP_ID						0               /*!< RGX General Heap ID */
+#define RGX_PDSCODEDATA_HEAP_ID					1               /*!< RGX PDS Code/Data Heap ID */
+//#define RGX_3DPARAMETERS_HEAP_ID				2               /*!< RGX 3D Parameters Heap ID */
+#define RGX_USCCODE_HEAP_ID						2               /*!< RGX USC Code Heap ID */
+#define RGX_FIRMWARE_HEAP_ID					3               /*!< RGX Firmware Heap ID */
+#define RGX_TQ3DPARAMETERS_HEAP_ID				4               /*!< RGX Firmware Heap ID */
+#define RGX_BIF_TILING_HEAP_1_ID				5 				/*!< RGX BIF Tiling Heap 1 ID */
+#define RGX_BIF_TILING_HEAP_2_ID				6 				/*!< RGX BIF Tiling Heap 2 ID */
+#define RGX_BIF_TILING_HEAP_3_ID				7 				/*!< RGX BIF Tiling Heap 3 ID */
+#define RGX_BIF_TILING_HEAP_4_ID				8 				/*!< RGX BIF Tiling Heap 4 ID */
+#define RGX_HWBRN37200_HEAP_ID					9				/*!< RGX HWBRN37200 */
+#define RGX_DOPPLER_HEAP_ID						10				/*!< Doppler Heap ID */
+#define RGX_DOPPLER_OVERFLOW_HEAP_ID			11				/*!< Doppler Overflow Heap ID */
+
+/* FIXME: work out what this ought to be.  In the old days it was
+   typically bigger than it needed to be.  Is the correct thing
+   "max + 1" ?? */
+#define RGX_MAX_HEAP_ID     	(RGX_DOPPLER_OVERFLOW_HEAP_ID + 1)		/*!< Max Valid Heap ID */
+
+/*
+  Identify heaps by their names
+*/
+#define RGX_GENERAL_HEAP_IDENT 			"General"               /*!< RGX General Heap Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT 		"PDS Code and Data"     /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT			"USC Code"              /*!< RGX USC Code Heap Identifier */
+#define RGX_TQ3DPARAMETERS_HEAP_IDENT	"TQ3DParameters"        /*!< RGX TQ 3D Parameters Heap Identifier */
+#define RGX_BIF_TILING_HEAP_1_IDENT	    "BIF Tiling Heap l"	    /*!< RGX BIF Tiling Heap 1 identifier */
+#define RGX_BIF_TILING_HEAP_2_IDENT	    "BIF Tiling Heap 2"	    /*!< RGX BIF Tiling Heap 2 identifier */
+#define RGX_BIF_TILING_HEAP_3_IDENT	    "BIF Tiling Heap 3"	    /*!< RGX BIF Tiling Heap 3 identifier */
+#define RGX_BIF_TILING_HEAP_4_IDENT	    "BIF Tiling Heap 4"	    /*!< RGX BIF Tiling Heap 4 identifier */
+#define RGX_DOPPLER_HEAP_IDENT			"Doppler"				/*!< Doppler Heap Identifier */
+#define RGX_DOPPLER_OVERFLOW_HEAP_IDENT	"Doppler Overflow"				/*!< Doppler Heap Identifier */
+
+/* BIF tiling heaps have specific buffer requirements based on their XStride
+ * configuration. This is detailed in the BIF tiling documentation and ensures
+ * that the bits swapped by the BIF tiling algorithm do not result in addresses
+ * outside the allocated buffer. The representation here reflects the diagram
+ * in the BIF tiling documentation.
+ * XStride is defined for a platform in sysconfig.h, but the resulting alignment
+ * can be queried through the PVRSRVGetHeapLog2ImportAlignment() API.
+ */
+#define RGX_BIF_TILING_HEAP_STRIDE_LOG2_FROM_XSTRIDE(X)        (X+1+8)
+#define RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(X)       (4+X+1+8)
+#define RGX_BIF_TILING_HEAP_STRIDE_LOG2_FROM_ALIGN_LOG2(A)       (A-4)
+
+#endif /* __RGX_HEAPS_H__ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_hwperf_km.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_hwperf_km.h
new file mode 100644
index 0000000..86880b4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_hwperf_km.h
@@ -0,0 +1,385 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HWPerf Types and Defines Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common data types definitions for hardware performance API
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_KM_H_
+#define RGX_HWPERF_KM_H_
+
+/* 
+ * This header file holds the HWPerf related macros and types needed by the
+ * code in the Kernel Mode (KM) server/driver module and its content is
+ * intended to be suitable for distribution under a public software license.
+ * The definitions within are common and may be used in user-mode, kernel-mode
+ * and firmware compilation units.
+ */
+ 
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define RGX_HWPERF_V2_FORMAT 2
+
+#include "rgx_common.h"
+
+
+/******************************************************************************
+ * 	Data Stream Common Types
+ *****************************************************************************/
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities
+ * at 8 byte aligned addresses.  BLD_ASSERT() is used to check this.
+ */
+ 
+/*! Type used to encode the event that generated the HW performance packet.
+ * NOTE: When this type is updated the corresponding hwperfbin2json tool source
+ * needs to be updated as well. Also need to update the table in rgxhwperf.c.
+ * The RGX_HWPERF_EVENT_MASK_* macros will also need updating when adding new
+ * types.
+ */
+typedef enum
+{
+	RGX_HWPERF_INVALID				= 0x00,
+	/* FW types 0x01..0x07 */
+	RGX_HWPERF_FW_BGSTART			= 0x01,
+	RGX_HWPERF_FW_BGEND				= 0x02,
+	RGX_HWPERF_FW_IRQSTART			= 0x03,
+
+	RGX_HWPERF_FW_IRQEND			= 0x04,
+	RGX_HWPERF_FW_DBGSTART			= 0x05,
+	RGX_HWPERF_FW_DBGEND			= 0x06,
+
+	/* HW types 0x08..0x18 */
+	RGX_HWPERF_HW_TAKICK			= 0x08,
+	RGX_HWPERF_HW_TAFINISHED		= 0x09,
+	RGX_HWPERF_HW_3DTQKICK			= 0x0A,
+/*	RGX_HWPERF_HW_3DTQFINISHED		= 0x17, */
+/*	RGX_HWPERF_HW_3DSPMKICK			= 0x11, */
+/*	RGX_HWPERF_HW_3DSPMFINISHED		= 0x18, */
+	RGX_HWPERF_HW_3DKICK			= 0x0B,
+
+	RGX_HWPERF_HW_3DFINISHED		= 0x0C,
+	RGX_HWPERF_HW_CDMKICK			= 0x0D,
+	RGX_HWPERF_HW_CDMFINISHED		= 0x0E,
+	RGX_HWPERF_HW_TLAKICK			= 0x0F,
+
+	RGX_HWPERF_HW_TLAFINISHED		= 0x10,
+	RGX_HWPERF_HW_3DSPMKICK			= 0x11,
+	RGX_HWPERF_HW_PERIODIC			= 0x12,
+	RGX_HWPERF_HW_RTUKICK			= 0x13,
+	
+	RGX_HWPERF_HW_RTUFINISHED		= 0x14,
+	RGX_HWPERF_HW_SHGKICK			= 0x15,
+	RGX_HWPERF_HW_SHGFINISHED		= 0x16,
+	RGX_HWPERF_HW_3DTQFINISHED		= 0x17,
+
+	RGX_HWPERF_HW_3DSPMFINISHED		= 0x18,
+
+	/* other types 0x1A..0x1F */
+	RGX_HWPERF_CLKS_CHG				= 0x1A,
+	RGX_HWPERF_GPU_STATE_CHG		= 0x1B,
+
+	/* power types 0x20..0x27 */
+	RGX_HWPERF_PWR_EST_REQUEST		= 0x20,
+	RGX_HWPERF_PWR_EST_READY		= 0x21,
+	RGX_HWPERF_PWR_EST_RESULT		= 0x22,
+	RGX_HWPERF_PWR_CHG				= 0x23,
+
+	/* context switch types 0x30..0x31 */
+	RGX_HWPERF_CSW_START			= 0x30,
+	RGX_HWPERF_CSW_FINISHED			= 0x31,
+	
+	/* last */
+	RGX_HWPERF_LAST_TYPE,
+
+	/* This enumeration must have a value that is a power of two as it is
+	 * used in masks and a filter bit field (currently 64 bits long).
+	 */
+	RGX_HWPERF_MAX_TYPE				= 0x40
+} RGX_HWPERF_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 63 event types.
+ */
+BLD_ASSERT((RGX_HWPERF_LAST_TYPE<RGX_HWPERF_MAX_TYPE), rgx_hwperf_h)
+
+/*! Type obsolete and will be removed in a later release, use RGXFWIF_DM */
+typedef RGXFWIF_DM RGX_HWPERF_DM;
+#define RGX_HWPERF_DM_GP	RGXFWIF_DM_GP
+#define RGX_HWPERF_DM_2D	RGXFWIF_DM_2D
+#define RGX_HWPERF_DM_TA	RGXFWIF_DM_TA
+#define RGX_HWPERF_DM_3D	RGXFWIF_DM_3D
+#define RGX_HWPERF_DM_CDM	RGXFWIF_DM_CDM
+#define RGX_HWPERF_DM_RTU	RGXFWIF_DM_RTU
+#define RGX_HWPERF_DM_SHG   RGXFWIF_DM_SHG
+#define RGX_HWPERF_DM_LAST	RGXFWIF_DM_LAST
+
+
+/******************************************************************************
+ * 	Packet Format Version 2 Types
+ *****************************************************************************/
+
+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet
+ */
+#define HWPERF_PACKET_V2_SIG		0x48575032
+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet
+ */
+#define HWPERF_PACKET_V2A_SIG		0x48575041
+
+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet
+ */
+#define HWPERF_PACKET_V2B_SIG		0x48575042
+
+#define HWPERF_PACKET_ISVALID(_ptr) (((_ptr) == HWPERF_PACKET_V2_SIG) || ((_ptr) == HWPERF_PACKET_V2A_SIG)|| ((_ptr) == HWPERF_PACKET_V2B_SIG))
+
+/*! This structure defines version 2 of the packet format which is
+ * based around a header and a variable length data payload structure.
+ * The address of the next packet can be found by adding the ui16Size field
+ * in the header to the current packet address.
+ * Producers of packets must always ensure the size field is a multiple of 8
+ * as packets must start on an 8-byte granular address.
+ */
+typedef struct
+{
+	/* HEADER - packet header fields common to all packet types */
+	IMG_UINT32  ui32Sig;        /*!< Always the value HWPERF_PACKET_SIG */
+
+	IMG_UINT32  ui32Size;       /*!< Overall packet size in bytes, includes
+	                             * header and payload. Size is a 16-bit field
+	                             * stored in the 16 LSb. 16 MSb reserved.
+	                             * Use RGX_HWPERF_MAKE_SIZE_* and RGX_HWPERF_GET_SIZE
+	                             * macros to set/get, never write directly. */
+
+	IMG_UINT32  eTypeId;        /*!< Includes event type and META thread ID in
+	                             * the 16 LSb. 16 MSb reserved.
+	                             * Use RGX_HWPERF_MAKE_TYPEID and RGX_HWPERF_GET_*
+	                             * macros to set/get, never write directly. */
+
+	IMG_UINT32  ui32Ordinal;    /*!< Sequential number of the packet */
+	IMG_UINT64  ui64RGXTimer;   /*!< Value of RGX_CR_TIMER at event */
+
+	/* PAYLOAD - bytes from this point on in the buffer are from the
+	 * RGX_HWPERF_V2_PACKET_DATA union which encodes the payload data specific
+	 * to the event type set in the header. When the structure in the union
+	 * has a variable length member e.g. HW packets the payload length
+	 * varies.
+	 */
+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR;
+
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64RGXTimer)
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR)
+
+
+/*! Mask for use with the IMG_UINT32 ui32Size header field */
+#define RGX_HWPERF_SIZE_MASK			0xFFFFU
+
+/*! Macro which takes a structure name and provides the packet size for
+ * a fixed size payload packet for assignment to the ui16Size field. */
+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct)       ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+sizeof(_struct))))
+
+/*! Macro which takes the number of bytes written in the data payload of a
+ * packet for a variable size payload packet, rounds it up to 8 bytes where
+ * it may be assigned to the ui16Size field. */
+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size)       ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(_size, 8))))
+
+/*! Macro to obtain the size of the packet */
+#define RGX_HWPERF_GET_SIZE(_packet_addr)    ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK))
+
+/*! Macro to obtain the size of the packet data */
+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr)   (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR))
+
+
+/*! Masks for use with the IMG_UINT32 eTypeId header field */
+#define RGX_HWPERF_TYPEID_MASK			0xFFFFU
+#define RGX_HWPERF_TYPEID_THREAD_MASK	0x8000U
+#define RGX_HWPERF_TYPEID_EVENT_MASK	(RGX_HWPERF_MAX_TYPE-1)
+
+/*! Meta thread macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_META_THREAD_SHIFT	15U
+#define RGX_HWPERF_META_THREAD_ID0		0x0U
+#define RGX_HWPERF_META_THREAD_ID1		0x1U
+/*! Obsolete, kept for source compatibility */
+#define RGX_HWPERF_META_THREAD_MASK		0x1U
+
+/*! Macros used to set the packet type and encode meta thread ID (0|1) within */
+#define RGX_HWPERF_MAKE_TYPEID(_type,_thread) ((IMG_UINT32) ((RGX_HWPERF_TYPEID_THREAD_MASK&((_thread)<<RGX_HWPERF_META_THREAD_SHIFT)) | (RGX_HWPERF_TYPEID_EVENT_MASK&(_type))))
+
+/*! Obtains the event type that generated the packet */
+#define RGX_HWPERF_GET_TYPE(_packet_addr)            (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK)
+
+/*! Obtains the META Thread number that generated the packet */
+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr)       (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT))
+
+/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */
+#define RGX_HWPERF_GET_PACKET(_buffer_addr)            ((RGX_HWPERF_V2_PACKET_HDR*)  (_buffer_addr))
+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) ((IMG_BYTE*) ( ((IMG_BYTE*)(_packet_addr)) +sizeof(RGX_HWPERF_V2_PACKET_HDR) ) )
+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr)       ((RGX_HWPERF_V2_PACKET_HDR*)  ( ((IMG_BYTE*)(_packet_addr))+(RGX_HWPERF_SIZE_MASK&(_packet_addr)->ui32Size)) )
+
+/*! Obtains a typed pointer to a packet header given the packed data address */
+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr)     ((RGX_HWPERF_V2_PACKET_HDR*)  ( ((IMG_BYTE*)(_packet_addr)) - sizeof(RGX_HWPERF_V2_PACKET_HDR) ))
+
+
+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK	0xFFFF0000U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK	0x0000FFFFU
+
+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT	16U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U
+
+/*! Macro used to set the block info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_BLKINFO(_numblks,_blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT))))
+
+/*! Macro used to obtain get the number of counter blocks present in the packet */
+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo)            ((_blkinfo & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)
+
+/*! Obtains the offset of the counter block stream in the packet */
+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo)           ((_blkinfo & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)
+
+/* This is the maximum frame contexts that are supported in the driver at the moment */
+#define RGX_HWPERF_HW_MAX_WORK_CONTEXT               2
+/*! This structure holds the field data of a Hardware packet.
+ */
+#define RGX_HWPERF_HW_DATA_FIELDS_LIST \
+IMG_UINT32 ui32DMCyc;         /*!< DataMaster cycle count register, 0 if none */\
+IMG_UINT32 ui32FrameNum;      /*!< Frame number */\
+IMG_UINT32 ui32PID;           /*!< Process identifier */\
+IMG_UINT32 ui32DMContext;     /*!< RenderContext for a TA,3D, Compute context for CDM, etc. */\
+IMG_UINT32 ui32RenderTarget;  /*!< RenderTarget for a TA,3D, 0x0 otherwise */\
+IMG_UINT32 ui32ExtJobRef;     /*!< Externally provided job reference used to track work for debugging purposes */\
+IMG_UINT32 ui32IntJobRef;     /*!< Internally provided job reference used to track work for debugging purposes */\
+IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */\
+IMG_UINT32 ui32BlkInfo;       /*!< <31..16> NumBlocks <15..0> Counterblock stream offset */\
+IMG_UINT32 ui32WorkContext;   /*!< Work context number. Frame number for RTU DM, 0x0 otherwise */
+
+typedef struct
+{
+	RGX_HWPERF_HW_DATA_FIELDS_LIST
+} RGX_HWPERF_HW_DATA_FIELDS;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA_FIELDS)
+
+
+/******************************************************************************
+ * 	API Types
+ *****************************************************************************/
+
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ * RGX_HWPERF_EVENT_ALL is obsolete, use RGX_HWPERF_EVENT_MASK_ALL
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE          (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_ALL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_HWPERF_EVENT_MASK_ALL_FW        (IMG_UINT64_C(0x000000000000007E))
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH (IMG_UINT64_C(0x0000000001FBFF00))
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC   (IMG_UINT64_C(0x0000000000040000))
+#define RGX_HWPERF_EVENT_MASK_ALL_HW        (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH \
+                                            | RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST   (IMG_UINT64_C(0X0000000700000000))
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR       (IMG_UINT64_C(0X0000000800000000))
+#define RGX_HWPERF_EVENT_MASK_VALUE(e)      (((IMG_UINT64)1)<<(e))
+
+/*! Type used in the RGX API RGXConfigureAndEnableHWPerfCounters()
+ * It is used to configure the performance counter module in a layout
+ * block and allows one or more counters in the block to be 
+ * configured in one operation based on the counter select mask. The bit
+ * shifts for this are the values in RGX_HWPERF_CNTBLK_COUNTER_ID. This mask
+ * also encodes which values in the arrays are valid, for example, if bit 1 set
+ * then aui8Mode[1], aui16GroupSelect[1], aui16BitSelect[1], aui32BatchMax[1],
+ * and aui32BatchMin[1] must be set. If these array elements are all set to 
+ * 0 then the counter will not count and will not be in the HW event, 
+ * effectively disabling the counter from the callers point of view. 
+ * If any are non zero then the counter will be included in the HW event.
+ *
+ * Each layout block has 4 or 6 counters that can be programmed independently to
+ * profile the performance of a HW block. Each counter can be configured to
+ * accumulate statistics from 1 of 32 counter groups defined for that block.
+ * Each counter group can have up to 16	signals/bits defined that can be
+ * selected. Each counter may accumulate in one of two modes.
+ * See hwdefs/regapiperf.h for block/group/signal definitions.
+ */
+ typedef struct _RGX_HWPERF_CONFIG_CNTBLK_
+{
+	/*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+	IMG_UINT16 ui16BlockID;
+
+	/*! 4 or 6 LSBs are a mask of which counters to configure. Bit 0 is counter 0,
+	 * bit 1 is counter 1 on so on. */
+	IMG_UINT8   ui8CounterSelect;
+
+	/*! 4 or 6 LSBs 0 for counting 1's in the group, 1 for treating the group
+	 * signals as a number for unsigned addition. Bit 0 is counter 0, bit 1 is
+	 * counter 1 on so on. This member relates to the MODE field
+	 * in the RGX_CR_<N>_PERF_SELECTm register for each counter */
+	IMG_UINT8	ui8Mode;
+
+	/*! 5 or 6 LSBs used as the GROUP_SELECT field in the RGX_CR_<N>_PERF_SELECTm
+	 * register. Array index 0 is counter 0, index 1 is counter 1 and so on. */
+	IMG_UINT8	aui8GroupSelect[RGX_HWPERF_CNTRS_IN_BLK];
+
+	/*! 16 LSBs used as the BIT_SELECT field in the RGX_CR_<N>_PERF_SELECTm
+	 * register. Array indexes relate to counters as above. */
+	IMG_UINT16  aui16BitSelect[RGX_HWPERF_CNTRS_IN_BLK];
+
+	/*! 14 LSBs used as the BATCH_MAX field in the RGX_CR_<N>_PERF_SELECTm
+	 * register. Array indexes relate to counters as above. */
+	IMG_UINT32  aui32BatchMax[RGX_HWPERF_CNTRS_IN_BLK];
+
+	/*! 14 LSBs used as the BATCH_MIN field in the RGX_CR_<N>_PERF_SELECTm
+	 * register. Array indexes relate to counters as above. */
+	IMG_UINT32  aui32BatchMin[RGX_HWPERF_CNTRS_IN_BLK];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK)
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_KM_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_memallocflags.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_memallocflags.h
new file mode 100644
index 0000000..5fa1909
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_memallocflags.h
@@ -0,0 +1,49 @@
+/**************************************************************************/ /*!
+@File
+@Title          RGX memory allocation flags
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGX_MEMALLOCFLAGS_H_
+#define _RGX_MEMALLOCFLAGS_H_
+
+#define PMMETA_PROTECT		(1 << 0)	/* Memory that only the PM and Meta can access */
+#define META_CACHED		    (1 << 1)	/* Memory that is cached in META */
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_meta.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_meta.h
new file mode 100644
index 0000000..b1c0aaa
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_meta.h
@@ -0,0 +1,352 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX META definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX META helper definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_META_H__)
+#define __RGX_META_H__
+
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+
+#include "img_defs.h"
+
+/************************************************************************
+* META registers and MACROS 
+************************************************************************/
+#define	META_CR_CTRLREG_BASE(T)					(0x04800000 + 0x1000*(T))
+
+#define META_CR_TXPRIVEXT						(0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN				(0x1<<7)
+
+#define META_CR_SYSC_JTAG_THREAD				(0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN		(0x00000004)
+
+#define META_CR_PERF_COUNT0						(0x0480FFE0)
+#define META_CR_PERF_COUNT1						(0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT			(28)
+#define META_CR_PERF_COUNT_CTRL_MASK			(0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS		(0x8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS		(0x9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS		(0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE			(0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT			(24)
+#define META_CR_PERF_COUNT_THR_MASK				(0x0F000000)
+#define META_CR_PERF_COUNT_THR_0				(0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1				(0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT					(0x04820500)
+#define META_CR_PERF_ICORE0						(0x0480FFD0)
+#define META_CR_PERF_ICORE1						(0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS			(0x8)
+
+typedef enum
+{
+	META_PERF_CONF_NONE = 0,
+	META_PERF_CONF_ICACHE = 1,
+	META_PERF_CONF_DCACHE = 2,
+	META_PERF_CONF_POLLS = 3,
+	META_PERF_CONF_CUSTOM_TIMER = 4
+} META_PERF_CONF;
+
+#define META_CR_PERF_COUNT(CTRL, THR)			((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+												 (THR << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define	META_CR_TXUXXRXDT_OFFSET				(META_CR_CTRLREG_BASE(0) + 0x0000FFF0)
+#define	META_CR_TXUXXRXRQ_OFFSET				(META_CR_CTRLREG_BASE(0) + 0x0000FFF8)
+
+#define META_CR_TXUXXRXRQ_DREADY_BIT			(0x80000000)	/* Poll for done */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT  			(0x00010000)	/* Set for read  */
+#define META_CR_TXUXXRXRQ_TX_S       			(12)
+#define META_CR_TXUXXRXRQ_RX_S       			(4)
+#define META_CR_TXUXXRXRQ_UXX_S      			(0)
+
+#define META_CR_TXUA0_ID						(0x3)			/* Address unit regs */
+#define META_CR_TXUPC_ID						(0x5)			/* PC registers */
+
+/* Macros to calculate register access values */
+#define META_CR_CORE_REG(Thr, RegNum, Unit)	(((Thr)			<< META_CR_TXUXXRXRQ_TX_S ) | \
+											 ((RegNum)		<< META_CR_TXUXXRXRQ_RX_S ) | \
+											 ((Unit)		<< META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC		META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX	META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP		META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC		META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX	META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP		META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(Thread)	META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(Thread)	META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID)
+
+#define	META_CR_COREREG_ENABLE			(0x0000000)
+#define	META_CR_COREREG_STATUS			(0x0000010)
+#define	META_CR_COREREG_DEFR			(0x00000A0)
+
+#define	META_CR_T0ENABLE_OFFSET			(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_ENABLE)
+#define	META_CR_T0STATUS_OFFSET			(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_STATUS)
+#define	META_CR_T0DEFR_OFFSET			(META_CR_CTRLREG_BASE(0) + META_CR_COREREG_DEFR)
+
+#define	META_CR_T1ENABLE_OFFSET			(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_ENABLE)
+#define	META_CR_T1STATUS_OFFSET			(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_STATUS)
+#define	META_CR_T1DEFR_OFFSET			(META_CR_CTRLREG_BASE(1) + META_CR_COREREG_DEFR)
+
+#define META_CR_TXENABLE_ENABLE_BIT		(0x00000001)   /* Set if running */
+#define META_CR_TXSTATUS_PRIV			(0x00020000)   
+
+#define META_MEM_GLOBAL_RANGE_BIT				(0x80000000)
+
+
+/************************************************************************
+* META LDR Format
+************************************************************************/
+/* Block header structure */
+typedef struct 
+{
+	IMG_UINT32	ui32DevID;
+	IMG_UINT32	ui32SLCode;
+	IMG_UINT32	ui32SLData;
+	IMG_UINT16	ui16PLCtrl;
+	IMG_UINT16	ui16CRC;
+
+} RGX_META_LDR_BLOCK_HDR;
+
+/* High level data stream block  structure */
+typedef struct 
+{
+	IMG_UINT16	ui16Cmd;
+	IMG_UINT16	ui16Length;
+	IMG_UINT32	ui32Next;
+	IMG_UINT32	aui32CmdData[4];
+
+} RGX_META_LDR_L1_DATA_BLK;
+
+/* High level data stream block  structure */
+typedef struct
+{
+	IMG_UINT16	ui16Tag;
+	IMG_UINT16	ui16Length;
+	IMG_UINT32	aui32BlockData[4];
+
+} RGX_META_LDR_L2_DATA_BLK;
+
+/* Config command structure */
+typedef struct
+{
+	IMG_UINT32	ui32Type;
+	IMG_UINT32	aui32BlockData[4];
+
+} RGX_META_LDR_CFG_BLK;
+
+/* Block type definitions */
+#define RGX_META_LDR_COMMENT_TYPE_MASK			(0x0010)
+#define RGX_META_LDR_BLK_IS_COMMENT(X)			((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0)
+
+/* Command definitions
+	Value	Name			Description
+	0		LoadMem			Load memory with binary data.
+	1		LoadCore		Load a set of core registers.
+	2		LoadMMReg		Load a set of memory mapped registers.
+	3		StartThreads	Set each thread PC and SP, then enable	threads.
+	4		ZeroMem			Zeros a memory region.
+	5		Config			Perform	a configuration command. */
+#define RGX_META_LDR_CMD_MASK				(0x000F)
+
+#define RGX_META_LDR_CMD_LOADMEM			(0x0000)
+#define RGX_META_LDR_CMD_LOADCORE			(0x0001)
+#define RGX_META_LDR_CMD_LOADMMREG			(0x0002)
+#define RGX_META_LDR_CMD_START_THREADS		(0x0003)
+#define RGX_META_LDR_CMD_ZEROMEM			(0x0004)
+#define RGX_META_LDR_CMD_CONFIG			(0x0005)
+
+/* Config Command definitions
+	Value	Name		Description
+	0		Pause		Pause for x times 100 instructions
+	1		Read		Read a value from register - No value return needed.
+						Utilises effects of issuing reads to certain registers
+	2		Write		Write to mem location
+	3		MemSet		Set mem to value
+	4		MemCheck	check mem for specific value.*/
+#define RGX_META_LDR_CFG_PAUSE			(0x0000)
+#define RGX_META_LDR_CFG_READ			(0x0001)
+#define RGX_META_LDR_CFG_WRITE			(0x0002)
+#define RGX_META_LDR_CFG_MEMSET			(0x0003)
+#define RGX_META_LDR_CFG_MEMCHECK		(0x0004)
+
+/************************************************************************
+* RGX FW segmented MMU definitions
+************************************************************************/
+/* All threads can access the segment */
+#define RGXFW_SEGMMU_ALLTHRS	(0xf << 8)
+/* Writeable */
+#define RGXFW_SEGMMU_WRITEABLE	(0x1 << 1)
+/* All threads can access and writeable */
+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE	(RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE)
+
+/* Direct map regions mapping (8-10) */
+#define RGXFW_SEGMMU_DMAP_ID_START			(8)
+#define RGXFW_SEGMMU_DMAP_ADDR_START		(0x06000000U)
+#define RGXFW_SEGMMU_DMAP_ADDR_META			(0x86000000U)
+#define RGXFW_SEGMMU_DMAP_SIZE				(8*1024*1024) /* 8 MB */
+
+/* Direct map region 11 used for mapping GPU memory */
+#define RGXFW_SEGMMU_DMAP_GPU_ID			(11)
+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START	(RGXFW_SEGMMU_DMAP_ADDR_START + 3*RGXFW_SEGMMU_DMAP_SIZE)
+
+/* Segment IDs */
+#define RGXFW_SEGMMU_TEXT_ID			(0)
+#define RGXFW_SEGMMU_SHARED_ID			(1)
+#define RGXFW_SEGMMU_BOOTLDR_ID			(2)
+#define RGXFW_SEGMMU_DATA_ID			(3)
+
+#define RGXFW_SEGMMU_META_DM_ID			(0x7)
+
+#if defined(HW_ERN_45914)
+/* SLC caching strategy is emitted through the segment MMU. All the segments configured 
+   through this macro are CACHED in the SLC. The interface has been kept the same to 
+   simplify the code changes. The bifdm argument is ignored (no longer relevant). */
+#if defined(HW_ERN_49144)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7(pers, coheren, mmu_ctx)     ( (((IMG_UINT64) ((pers)    & 0x3))  << 50) | \
+                                                                  (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 42) | \
+                                                                  (((IMG_UINT64) ((coheren) & 0x1))  << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx)         RGXFW_SEGMMU_OUTADDR_TOP_S7(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED(mmu_ctx)       RGXFW_SEGMMU_OUTADDR_TOP_S7(0x0, 0x1, mmu_ctx)
+
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP(mmu_ctx, bifdm)                RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx | (bifdm&0x0))
+#else
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7(pers, coheren, mmu_ctx)     ( (((IMG_UINT64) ((pers)    & 0x3))  << 52) | \
+                                                                  (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 44) | \
+                                                                  (((IMG_UINT64) ((coheren) & 0x1))  << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx)         RGXFW_SEGMMU_OUTADDR_TOP_S7(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED(mmu_ctx)       RGXFW_SEGMMU_OUTADDR_TOP_S7(0x0, 0x1, mmu_ctx)
+
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP(mmu_ctx, bifdm)                RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED(mmu_ctx | (bifdm&0x0))
+#endif
+#else
+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten accesses through this segment */
+#define RGXFW_SEGMMU_OUTADDR_TOP(pc, bifdm)			            ( (((IMG_UINT64) ((pc)    & 0xF)) << 44) | \
+                                                                  (((IMG_UINT64) ((bifdm) & 0xF)) << 40) )
+#endif
+
+/* META segments have 4kB minimum size */
+#define RGXFW_SEGMMU_ALIGN			(0x1000) 
+
+/* Segmented MMU registers (n = segment id) */
+#define META_CR_MMCU_SEGMENTn_BASE(n)			(0x04850000 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_LIMIT(n)			(0x04850004 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA0(n)			(0x04850008 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA1(n)			(0x0485000C + (n)*0x10)
+
+/* Win mode for data cache */
+#define RGXFW__SEGMMU_DMAP_DC_WIN (0x3)
+#define RGXFW__SEGMMU_DMAP_DC_SHIFT (0x6)
+
+/************************************************************************
+* RGX FW Bootloader defaults
+************************************************************************/
+#define RGXFW_BOOTLDR_META_ADDR		(0x40000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_0	(0xC0000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_1	(0x000000E1)
+#define RGXFW_BOOTLDR_DEVV_ADDR		((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0)
+#define RGXFW_BOOTLDR_LIMIT			(0x1FFFF000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define RGXFW_BOOTLDR_CONF_OFFSET	(0x80)
+
+/************************************************************************
+* RGX META Stack
+************************************************************************/
+#define RGX_META_STACK_SIZE  (0xC00)
+
+/************************************************************************
+* RGX META Core memory
+************************************************************************/
+#define RGX_META_COREMEM_BSS_SIZE    (0xA00)
+#define RGX_META_COREMEM_DATA_SIZE   (RGX_META_COREMEM_BSS_SIZE + RGX_META_STACK_SIZE)
+#define RGX_META_COREMEM_CODE_SIZE   (RGX_META_COREMEM_SIZE - RGX_META_COREMEM_DATA_SIZE)
+/* code and data both map to the same physical memory */
+#define RGX_META_COREMEM_CODE_ADDR   (0x80000000)
+#define RGX_META_COREMEM_DATA_ADDR   (0x82000000)
+#define RGX_META_COREMEM_STACK_ADDR  (RGX_META_COREMEM_DATA_ADDR)
+#define RGX_META_COREMEM_BSS_ADDR    (RGX_META_COREMEM_STACK_ADDR + RGX_META_STACK_SIZE)
+/* because data and code share the same memory, base address for code is offset by the data */
+#define RGX_META_COREMEM_CODE_BADDR  (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_DATA_SIZE)
+
+#define RGX_META_IS_COREMEM_CODE(A)  (((A) >= RGX_META_COREMEM_CODE_BADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_SIZE)))
+#define RGX_META_IS_COREMEM_DATA(A)  (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + RGX_META_COREMEM_DATA_SIZE)))
+
+/************************************************************************
+* 2nd thread
+************************************************************************/
+#define RGXFW_THR1_PC		(0x18930000)
+#define RGXFW_THR1_SP		(0x78890000)
+
+/************************************************************************
+* META compatibility
+************************************************************************/
+
+#define META_CR_CORE_ID			(0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT	(16U)
+#define META_CR_CORE_ID_VER_CLRMSK	(0XFF00FFFFU)
+
+#if (RGX_FEATURE_META == MTP218)
+#define RGX_CR_META_CORE_ID_VALUE 0x19
+#elif (RGX_FEATURE_META == MTP219)
+#define RGX_CR_META_CORE_ID_VALUE 0x1E
+#elif (RGX_FEATURE_META == LTP218)
+#define RGX_CR_META_CORE_ID_VALUE 0x1C
+#elif (RGX_FEATURE_META == LTP217)
+#define RGX_CR_META_CORE_ID_VALUE 0x1F
+#else
+#error "Unknown META ID"
+#endif
+
+#endif /*  __RGX_META_H__ */
+
+/******************************************************************************
+ End of file (rgx_meta.h)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_options_km.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_options_km.h
new file mode 100644
index 0000000..475ff51
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgx_options_km.h
@@ -0,0 +1,192 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX KM build options
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which
+ * provides up to log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags.
+ * The corresponding bit is set if the build option 
+ * was enabled at compile time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST
+ * switch should be enabled in a client program which includes this
+ * header. Then the client can test specific build flags by reading
+ * the bit value at ##OPTIONNAME##_SET_OFFSET in RGX_BUILD_OPTIONS_KM.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2) so that the bitfield remains backwards
+ * compatible.
+ */
+
+#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+
+#if defined(NO_HARDWARE) || defined (INTERNAL_TEST)
+	#define NO_HARDWARE_SET_OFFSET	OPTIONS_BIT0
+	#define OPTIONS_BIT0		(0x1ul << 0)
+	#if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT0		0x0
+#endif /* NO_HARDWARE */
+
+
+#if defined(PDUMP) || defined (INTERNAL_TEST)
+	#define PDUMP_SET_OFFSET	OPTIONS_BIT1
+	#define OPTIONS_BIT1		(0x1ul << 1)
+	#if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT1		0x0
+#endif /* PDUMP */
+
+
+#if defined(SUPPORT_META_SLAVE_BOOT) || defined (INTERNAL_TEST)
+	#define SUPPORT_META_SLAVE_BOOT_SET_OFFSET	OPTIONS_BIT2
+	#define OPTIONS_BIT2		(0x1ul << 2)
+	#if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT2		0x0
+#endif /* SUPPORT_META_SLAVE_BOOT */
+
+
+#if defined(SUPPORT_MMU_FREELIST) || defined (INTERNAL_TEST)
+	#define SUPPORT_MMU_FREELIST_SET_OFFSET	OPTIONS_BIT3
+	#define OPTIONS_BIT3		(0x1ul << 3)
+	#if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT3		0x0
+#endif /* SUPPORT_MMU_FREELIST */
+
+
+#if defined(SUPPORT_RGX) || defined (INTERNAL_TEST)
+	#define SUPPORT_RGX_SET_OFFSET	OPTIONS_BIT4
+	#define OPTIONS_BIT4		(0x1ul << 4)
+	#if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT4		0x0
+#endif /* SUPPORT_RGX */
+
+
+#if defined(SUPPORT_SECURE_EXPORT) || defined (INTERNAL_TEST)
+	#define SUPPORT_SECURE_EXPORT_SET_OFFSET	OPTIONS_BIT5
+	#define OPTIONS_BIT5		(0x1ul << 5)
+	#if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT5		0x0
+#endif /* SUPPORT_SECURE_EXPORT */
+
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined (INTERNAL_TEST)
+	#define SUPPORT_INSECURE_EXPORT_SET_OFFSET	OPTIONS_BIT6
+	#define OPTIONS_BIT6		(0x1ul << 6)
+	#if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT6	0x0
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+
+#if defined(SUPPORT_VFP) || defined (INTERNAL_TEST)
+	#define SUPPORT_VFP_SET_OFFSET	OPTIONS_BIT7
+	#define OPTIONS_BIT7		(0x1ul << 7)
+	#if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT7		0x0
+#endif /* SUPPORT_VFP */
+
+
+#if defined(SUPPORT_DRM) || defined (INTERNAL_TEST)
+	#define SUPPORT_DRM_SET_OFFSET	OPTIONS_BIT8
+	#define OPTIONS_BIT8		(0x1ul << 8)
+	#if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT8		0x0
+#endif /* SUPPORT_DRM */
+
+
+#if defined(SUPPORT_ION) || defined (INTERNAL_TEST)
+	#define SUPPORT_ION_SET_OFFSET	OPTIONS_BIT9
+	#define OPTIONS_BIT9		(0x1ul << 9)
+	#if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT9		0x0
+#endif /* SUPPORT_ION */
+
+#if defined(DEBUG) || defined (INTERNAL_TEST)
+	#define DEBUG_SET_OFFSET	OPTIONS_BIT10
+	#define OPTIONS_BIT10		(0x1ul << 10)
+	#if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+	#error "Bit exceeds reserved range"
+	#endif
+#else
+	#define OPTIONS_BIT10		0x0
+#endif /* DEBUG */
+
+
+#define RGX_BUILD_OPTIONS_KM	\
+	OPTIONS_BIT0  |\
+	OPTIONS_BIT1  |\
+	OPTIONS_BIT2  |\
+	OPTIONS_BIT3  |\
+	OPTIONS_BIT4  |\
+	OPTIONS_BIT6  |\
+	OPTIONS_BIT7  |\
+	OPTIONS_BIT8  |\
+	OPTIONS_BIT9  |\
+	OPTIONS_BIT10
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/rgxscript.h b/drivers/external_drivers/intel_media/graphics/rgx/include/rgxscript.h
new file mode 100644
index 0000000..d6f21c0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/rgxscript.h
@@ -0,0 +1,179 @@
+/*************************************************************************/ /*!
+@File
+@Title          rgx kernel services structues/functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX initialisation script definitions.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXSCRIPT_H__
+#define __RGXSCRIPT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define	RGX_MAX_INIT_COMMANDS	(256)
+#define	RGX_MAX_DEBUG_COMMANDS	(320)
+#define	RGX_MAX_DBGBUS_COMMANDS	(4096)
+#define	RGX_MAX_DEINIT_COMMANDS	(32)
+#define RGX_DBG_CMD_NAME_SIZE	(40)
+
+typedef	enum _RGX_INIT_OPERATION
+{
+	RGX_INIT_OP_ILLEGAL = 0,
+	RGX_INIT_OP_WRITE_HW_REG,
+	RGX_INIT_OP_POLL_64_HW_REG,
+	RGX_INIT_OP_POLL_HW_REG,
+	RGX_INIT_OP_COND_POLL_HW_REG,
+	RGX_INIT_OP_LOOP_POINT,
+	RGX_INIT_OP_COND_BRANCH,
+	RGX_INIT_OP_HALT,
+	RGX_INIT_OP_DBG_READ32_HW_REG,
+	RGX_INIT_OP_DBG_READ64_HW_REG,
+	RGX_INIT_OP_DBG_CALC,
+	RGX_INIT_OP_DBG_WAIT,
+	RGX_INIT_OP_DBG_STRING,
+	RGX_INIT_OP_PDUMP_HW_REG,
+} RGX_INIT_OPERATION;
+
+typedef union _RGX_INIT_COMMAND_
+{
+	RGX_INIT_OPERATION eOp;
+	
+	struct {
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+	} sWriteHWReg;
+
+	struct {
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+	} sPDumpHWReg;
+	
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT64 ui64Value;
+		IMG_UINT64 ui64Mask;		
+	} sPoll64HWReg;
+
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Mask;		
+	} sPollHWReg;
+	
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32CondOffset;
+		IMG_UINT32 ui32CondValue;
+		IMG_UINT32 ui32CondMask;		
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Mask;		
+	} sCondPollHWReg;
+	
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+	} sLoopPoint;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_UINT32 ui32Value;
+		IMG_UINT32 ui32Mask;
+
+	} sConditionalBranchPoint;
+
+	struct 
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset;
+		IMG_CHAR aszName[RGX_DBG_CMD_NAME_SIZE];
+	} sDBGReadHWReg;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32Offset1;
+		IMG_UINT32 ui32Offset2;
+		IMG_UINT32 ui32Offset3;
+		IMG_CHAR aszName[RGX_DBG_CMD_NAME_SIZE];
+	} sDBGCalc;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_UINT32 ui32WaitInUs;
+	} sDBGWait;
+
+	struct
+	{
+		RGX_INIT_OPERATION eOp;
+		IMG_CHAR aszString[RGX_DBG_CMD_NAME_SIZE];
+	} sDBGString;
+
+} RGX_INIT_COMMAND;
+
+typedef struct _RGX_INIT_SCRIPTS_
+{
+	RGX_INIT_COMMAND asInitCommands[RGX_MAX_INIT_COMMANDS];
+	RGX_INIT_COMMAND asDbgCommands[RGX_MAX_DEBUG_COMMANDS];
+	RGX_INIT_COMMAND asDbgBusCommands[RGX_MAX_DBGBUS_COMMANDS];
+	RGX_INIT_COMMAND asDeinitCommands[RGX_MAX_DEINIT_COMMANDS];
+} RGX_SCRIPTS;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __RGXSCRIPT_H__ */
+
+/*****************************************************************************
+ End of file (rgxscript.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/ri_typedefs.h b/drivers/external_drivers/intel_media/graphics/rgx/include/ri_typedefs.h
new file mode 100644
index 0000000..c7e9a46
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/ri_typedefs.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Information (RI) Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Client side part of RI management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_TYPEDEFS_H
+#define RI_TYPEDEFS_H
+
+#include "img_types.h"
+
+#define RI_MAX_TEXT_LEN 80
+
+typedef struct RI_SUBLIST_ENTRY RI_ENTRY;
+typedef RI_ENTRY* RI_HANDLE;
+
+#endif /* #ifndef RI_TYPEDEFS_H */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/services.h b/drivers/external_drivers/intel_media/graphics/rgx/include/services.h
new file mode 100644
index 0000000..06e1ab5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/services.h
@@ -0,0 +1,1337 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services API Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported services API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SERVICES_H__
+#define __SERVICES_H__
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	#if !defined(OSID_BITS_FLAGS_OFFSET)
+		#define  OSID_BITS_FLAGS_OFFSET	20
+		#define  OSID_BITS_FLAGS_MASK	7
+	#endif
+
+	#if !defined(GPUVIRT_VALIDATION_MAX_STRING_LENGTH)
+		#define GPUVIRT_VALIDATION_MAX_STRING_LENGTH 100
+	#endif
+
+	#if !defined(GPUVIRT_VALIDATION_NUM_REGIONS)
+		#define GPUVIRT_VALIDATION_NUM_REGIONS	2
+	#endif
+
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "servicesext.h"
+#include "sync_external.h"
+#include "pdumpdefs.h"
+#include "lock_types.h"
+#include "pvr_debug.h"
+
+/* FIXME: Can't do this as dc_client includes services.h
+#include "dc_client.h"
+*/
+
+#if defined(LDDM)
+/* LDDM build needs to include this for the allocation structure */
+#include "umallocation.h"
+#endif
+
+#include "pvrsrv_device_types.h"
+
+/* The comment below is the front page for code-generated doxygen documentation */
+/*!
+ ******************************************************************************
+ @mainpage
+ This document details the APIs and implementation of the Consumer Services.
+ It is intended to be used in conjunction with the Consumer Services
+ Software Architectural Specification and the Consumer Services Software
+ Functional Specification.
+ *****************************************************************************/
+
+/******************************************************************************
+ * 	#defines
+ *****************************************************************************/
+
+/*! 4k page size definition */
+#define PVRSRV_4K_PAGE_SIZE					4096UL      /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT		12          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 16k page size definition */
+#define PVRSRV_16K_PAGE_SIZE					16384UL      /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT		14          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 64k page size definition */
+#define PVRSRV_64K_PAGE_SIZE					65536UL      /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT		16          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 256k page size definition */
+#define PVRSRV_256K_PAGE_SIZE					262144UL      /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT		18          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 1MB page size definition */
+#define PVRSRV_1M_PAGE_SIZE					1048576UL      /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT		20          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+/*! 2MB page size definition */
+#define PVRSRV_2M_PAGE_SIZE					2097152UL      /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT		21          /*!< Amount to shift an address by so that
+                                                          it is always page-aligned */
+
+
+#define EVENTOBJNAME_MAXLENGTH (50) /*!< Max length of an event object name */
+
+
+/*!
+	Flags for Services connection.
+	Allows to define per-client policy for Services
+*/
+#define SRV_FLAGS_PERSIST		(1U << 0)  /*!< Persist client flag */
+#define SRV_FLAGS_INIT_PROCESS	(1U << 1)  /*!< Allows connect to succeed if SrvInit
+                                            * has not yet run (used by SrvInit itself) */
+#define SRV_FLAGS_PDUMPCTRL     (1U << 31) /*!< PDump Ctrl client flag */
+
+/*
+	Pdump flags which are accessible to Services clients
+*/
+/* FIXME: defined to be the same as
+ * #define PDUMP_FLAGS_CONTINUOUS		0x40000000UL
+ * (from services/include/pdump.h)
+ * The flags need to either be moved here, or e.g. all PDump functions need a bContinuous parameter
+ */
+#define PVRSRV_PDUMP_FLAGS_CONTINUOUS		0x40000000UL /*!< pdump continuous */
+
+#define PVRSRV_UNDEFINED_HEAP_ID			(~0LU)
+
+/*!
+ ******************************************************************************
+ * User Module type
+ *****************************************************************************/
+typedef enum
+{
+	IMG_EGL				= 0x00000001,       /*!< EGL Module */
+	IMG_OPENGLES1		= 0x00000002,       /*!< OGLES1 Module */
+	IMG_OPENGLES3		= 0x00000003,       /*!< OGLES3 Module */
+	IMG_D3DM			= 0x00000004,       /*!< D3DM Module */
+	IMG_SRV_UM			= 0x00000005,       /*!< Services User-Mode */
+	IMG_SRV_INIT		= 0x00000006,		/*!< Services initialisation */
+	IMG_SRVCLIENT		= 0x00000007,       /*!< Services Client */
+	IMG_OPENGL			= 0x00000008,       /*!< OpenGL */
+	IMG_D3D				= 0x00000009,       /*!< D3D */
+	IMG_OPENCL			= 0x0000000A,       /*!< OpenCL */
+	IMG_ANDROID_HAL		= 0x0000000B,       /*!< Graphics HAL */
+	IMG_WEC_GPE			= 0x0000000C,		/*!< WinEC-specific GPE */
+	IMG_PVRGPE			= 0x0000000D,		/*!< WinEC/WinCE GPE */
+	IMG_RSCOMPUTE       = 0x0000000E,       /*!< RenderScript Compute */
+	IMG_OPENRL          = 0x0000000F,       /*!< OpenRL Module */
+	IMG_PDUMPCTRL		= 0x00000010,       /*!< PDump control client */
+	IMG_USC2			= 0x00000011,       /*!< Uniflex compiler */
+
+} IMG_MODULE_ID;
+
+/*! Max length of an App-Hint string */
+#define APPHINT_MAX_STRING_SIZE	256
+
+/*!
+ ******************************************************************************
+ * IMG data types
+ *****************************************************************************/
+typedef enum
+{
+	IMG_STRING_TYPE		= 1,                    /*!< String type */
+	IMG_FLOAT_TYPE		,                       /*!< Float type */
+	IMG_UINT_TYPE		,                       /*!< Unsigned Int type */
+	IMG_INT_TYPE		,                       /*!< (Signed) Int type */
+	IMG_FLAG_TYPE                               /*!< Flag Type */
+}IMG_DATA_TYPE;
+
+
+/******************************************************************************
+ * Structure definitions.
+ *****************************************************************************/
+
+/*!
+ * Forward declaration
+ */
+typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA;
+/*!
+ * Forward declaration (look on connection.h)
+ */
+typedef struct _PVRSRV_CONNECTION_ PVRSRV_CONNECTION;
+
+/*!
+ ******************************************************************************
+ * This structure allows the user mode glue code to have an OS independent
+ * set of prototypes.
+ *****************************************************************************/
+typedef struct _PVRSRV_DEV_DATA_
+{
+	PVRSRV_CONNECTION	 *psConnection;	/*!< Services connection info */
+	IMG_HANDLE			hDevCookie;				/*!< Dev cookie */
+
+} PVRSRV_DEV_DATA;
+
+/*************************************************************************/ /*! 
+    PVR Client Event handling in Services
+*/ /**************************************************************************/
+typedef enum _PVRSRV_CLIENT_EVENT_
+{
+	PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0,              /*!< hw timeout event */
+} PVRSRV_CLIENT_EVENT;
+
+/**************************************************************************/ /*!
+@Function       PVRSRVClientEvent
+@Description    Handles timeouts occurring in client drivers
+@Input          eEvent          event type
+@Input          psDevData       pointer to the PVRSRV_DEV_DATA context
+@Input          pvData          client-specific data
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_ 
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(const PVRSRV_CLIENT_EVENT eEvent,
+											PVRSRV_DEV_DATA *psDevData,
+											IMG_PVOID pvData);
+
+/******************************************************************************
+ * PVR Services API prototypes.
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function       PVRSRVConnect
+@Description    Creates a services connection from an application to the
+                services module.
+@Output         ppsConnection   on Success, *ppsConnection is set to the new 
+                                PVRSRV_CONNECTION instance.
+@Input          ui32SrvFlags    a bit-wise OR of the following:
+                                SRV_FLAGS_PERSIST
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_ 
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION **ppsConnection,
+					IMG_UINT32 ui32SrvFlags);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDisconnect 
+@Description    Disconnects from the services module
+@Input          psConnection    the connection to be disconnected
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVEnumerateDevices
+@Description    Enumerate all services managed devices in the 
+                system.
+
+                The function returns a list of the device IDs stored either
+                in the services (or constructed in the user mode glue 
+                component in certain environments). The number of devices 
+                in the list is also returned.
+
+                The user is required to provide a buffer large enough to 
+                receive an array of MAX_NUM_DEVICE_IDS *
+                PVRSRV_DEVICE_IDENTIFIER structures.
+
+                In a binary layered component which does not support dynamic
+                runtime selection, the glue code should compile to return 
+                the supported devices statically, e.g. multiple instances of
+                the same device if multiple devices are supported
+
+                In the case of an environment (for instance) where one 
+                services managed device may connect to two display devices
+                this code would enumerate all three devices and even
+                non-dynamic device selection code should retain the facility
+                to parse the list to find the index of a given device.}
+
+@Input          psConnection    Services connection
+@Output         puiNumDevices   Number of devices present in the system
+@Output         puiDevIDs       Pointer to called supplied array of
+                                PVRSRV_DEVICE_IDENTIFIER structures. The
+                                array is assumed to be at least
+                                PVRSRV_MAX_DEVICES long.
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(const PVRSRV_CONNECTION 	*psConnection,
+													IMG_UINT32 					*puiNumDevices,
+													PVRSRV_DEVICE_IDENTIFIER 	*puiDevIDs);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVAcquireDeviceData
+@Description    Returns device info structure pointer for the requested device.
+                This populates a PVRSRV_DEV_DATA structure with appropriate 
+                pointers to the DevInfo structure for the device requested.
+
+                In a non-plug-and-play the first call to GetDeviceInfo for a
+                device causes device initialisation
+
+                Calls to GetDeviceInfo are reference counted
+@Input          psConnection    Services connection
+@Input          uiDevIndex      Index to the required device obtained from the 
+                                PVRSRVEnumerateDevice function 
+@Output         psDevData       The returned Device Data
+@Input          eDeviceType     Required device type. If type is unknown use 
+                                uiDevIndex to locate device data
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(PVRSRV_CONNECTION 	*psConnection,
+													IMG_UINT32			uiDevIndex,
+													PVRSRV_DEV_DATA		*psDevData,
+													PVRSRV_DEVICE_TYPE	eDeviceType);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPollForValue
+@Description    Polls for a value to match a masked read of System Memory.
+                The function returns when either (1) the value read back
+                matches ui32Value, or (2) the maximum number of tries has
+                been reached.
+@Input          psConnection        Services connection
+@Input          hOSEvent            Handle to OS event to wait for
+@Input          pui32LinMemAddr     the address of the memory to poll
+@Input          ui32Value           the required value
+@Input          ui32Mask            the mask to use
+@Input          ui32Waitus          interval between tries (us)
+@Input          ui32Tries           number of tries to make before giving up
+@Return                             PVRSRV_OK on success. Otherwise, a 
+                                    PVRSRV_ error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVPollForValue(const PVRSRV_CONNECTION	*psConnection,
+								IMG_HANDLE				hOSEvent,
+								volatile IMG_UINT32		*pui32LinMemAddr,
+								IMG_UINT32				ui32Value,
+								IMG_UINT32				ui32Mask,
+								IMG_UINT32				ui32Waitus,
+								IMG_UINT32				ui32Tries);
+
+/* this function is almost the same as PVRSRVPollForValue. The only difference
+ * is that it now handles the interval between tries itself. Therefore it can
+ * correctly handles the differences between the different platforms.
+ */
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVWaitForValue(const PVRSRV_CONNECTION	*psConnection,
+                                IMG_HANDLE				hOSEvent,
+                                volatile IMG_UINT32		*pui32LinMemAddr,
+                                IMG_UINT32				ui32Value,
+                                IMG_UINT32				ui32Mask);
+
+
+/**************************************************************************/ /*!
+ @Function      PVRSRVConditionCheckCallback
+ @Description   Function prototype for use with the PVRSRVWaitForCondition()
+                API. Clients implement this callback to test if the condition
+                waited for has been met and become true.
+
+ @Input         pvUserData      Pointer to client user data needed for
+                                 the check
+ @Output        pbCondMet       Updated on exit with condition state
+
+ @Return        PVRSRV_OK  when condition tested without error
+                PVRSRV_*   other system error that will lead to the
+                           abnormal termination of the wait API.
+ */
+/******************************************************************************/
+typedef
+PVRSRV_ERROR (*PVRSRVConditionCheckCallback)(
+        IMG_PVOID  pvUserData,
+        IMG_BOOL*  pbCondMet);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVWaitForCondition
+@Description    Wait using PVRSRVEventObjectWait() for a
+                condition (pfnCallback) to become true. It periodically
+                checks the condition state by employing a loop and
+                waiting on either the event supplied or sleeping for a brief
+                time (if hEvent is null) each time the condition is
+                checked and found not to be met. When the condition is true
+                the function returns. It will also return when the time
+                period has been exceeded or an error has occurred.
+
+@Input          psConnection    Services connection
+@Input          hEvent          Event to wait on or NULL not to use event
+                                 objects but OS wait for a short time.
+@Input          pfnCallback     Client condition check callback
+@Input          pvUserData      Client user data supplied to callback
+
+@Return         PVRSRV_OK	          When condition met
+                PVRSRV_ERROR_TIMEOUT  When condition not met and time is up
+                PVRSRV_*              Otherwise, some other error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForCondition(
+        const PVRSRV_CONNECTION*     psConnection,
+        IMG_HANDLE                   hEvent,
+        PVRSRVConditionCheckCallback pfnCallback,
+        IMG_PVOID                    pvUserData);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVWaitUntilSyncPrimOpReady
+@Description    Wait using PVRSRVWaitForCondition for a sync operation to
+                become ready.
+
+@Input          psConnection    Services connection
+@Input          hEvent          Event to wait on or NULL not to use event
+                                 objects but OS wait for a short time.
+@Input          psOpCookie      Sync operation cookie to test
+
+@Return         PVRSRV_OK	          When condition met
+                PVRSRV_ERROR_TIMEOUT  When condition not met and time is up
+                PVRSRV_*              Otherwise, some other error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitUntilSyncPrimOpReady(
+        const PVRSRV_CONNECTION* psConnection,
+        IMG_HANDLE               hEvent,
+        PSYNC_OP_COOKIE          psOpCookie);
+
+
+/******************************************************************************
+ * PDUMP Function prototypes...
+ *****************************************************************************/
+#if defined(PDUMP)
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpInit
+@Description    Pdump initialisation
+@Input          psConnection    Services connection
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(const PVRSRV_CONNECTION *psConnection);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpStartInitPhase
+@Description    Resume the pdump init phase state   
+@Input          psConnection    Services connection
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(const PVRSRV_CONNECTION *psConnection);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpStopInitPhase
+@Description    Stop the pdump init phase state
+@Input          psConnection    Services connection
+@Input          eModuleID       Which module is requesting to stop the init phase
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(const PVRSRV_CONNECTION *psConnection,
+												IMG_MODULE_ID eModuleID);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpSetFrame
+@Description    Sets the pdump frame
+@Input          psConnection    Services connection
+@Input          ui32Frame       frame id
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+*/ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(const PVRSRV_CONNECTION *psConnection,
+											  IMG_UINT32 ui32Frame);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpGetFrame
+@Description    Gets the current pdump frame
+@Input          psConnection    Services connection
+@Output         pui32Frame       frame id
+@Return         PVRSRV_OK on success. Otherwise, a PVRSRV_error code
+*/ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpGetFrame(const PVRSRV_CONNECTION *psConnection,
+											  IMG_UINT32 *pui32Frame);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpIsLastCaptureFrame
+@Description    Returns whether this is the last frame of the capture range
+@Input          psConnection    Services connection
+@Return                         IMG_TRUE if last frame,
+                                IMG_FALSE otherwise
+*/ /**************************************************************************/
+IMG_IMPORT
+IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsLastCaptureFrame(const PVRSRV_CONNECTION *psConnection);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpAfterRender
+@Description    Executes TraceBuffer and SignatureBuffer commands
+@Input          psDevData       Device data
+*/ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpAfterRender(PVRSRV_DEV_DATA *psDevData);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpComment
+@Description    PDumps a comment
+@Input          psConnection        Services connection
+@Input          pszComment          Comment to be inserted
+@Input          bContinuous         pdump contiunous boolean
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(const PVRSRV_CONNECTION *psConnection,
+											 const IMG_CHAR *pszComment,
+											 IMG_BOOL bContinuous);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpCommentf
+@Description    PDumps a formatted comment
+@Input          psConnection        Services connection
+@Input          bContinuous         pdump continuous boolean
+@Input          pszFormat           Format string
+@Input          ...                 vararg list
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(const PVRSRV_CONNECTION *psConnection,
+											  IMG_BOOL bContinuous,
+											  const IMG_CHAR *pszFormat, ...)
+											  IMG_FORMAT_PRINTF(3, 4);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpCommentWithFlagsf
+@Description    PDumps a formatted comment, passing in flags
+@Input          psConnection        Services connection
+@Input          ui32Flags           Flags
+@Input          pszFormat           Format string
+@Input          ...                 vararg list
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(const PVRSRV_CONNECTION *psConnection,
+													   IMG_UINT32 ui32Flags,
+													   const IMG_CHAR *pszFormat, ...)
+													   IMG_FORMAT_PRINTF(3, 4);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpIsCapturing
+@Description    Reports whether PDump is currently capturing or not
+@Input          psConnection        Services connection
+@Output         pbIsCapturing       Indicates whether PDump is currently
+                                    capturing
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(const PVRSRV_CONNECTION *psConnection,
+								 				IMG_BOOL *pbIsCapturing);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVPDumpIsCapturingTest
+@Description    checks whether pdump is currently in frame capture range
+@Input          psConnection        Services connection
+@Return         IMG_BOOL
+ */ /**************************************************************************/
+IMG_IMPORT
+IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(const PVRSRV_CONNECTION *psConnection);
+
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetDefaultCaptureParams(const PVRSRV_CONNECTION *psConnection,
+                                                             IMG_UINT32 ui32Mode,
+                                                             IMG_UINT32 ui32Start,
+                                                             IMG_UINT32 ui32End,
+                                                             IMG_UINT32 ui32Interval,
+                                                             IMG_UINT32 ui32MaxParamFileSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpInit)
+#endif
+static INLINE PVRSRV_ERROR 
+PVRSRVPDumpInit(const PVRSRV_CONNECTION *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpStartInitPhase)
+#endif
+static INLINE PVRSRV_ERROR 
+PVRSRVPDumpStartInitPhase(const PVRSRV_CONNECTION *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpStopInitPhase)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpStopInitPhase(const PVRSRV_CONNECTION *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpSetFrame)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSetFrame(const PVRSRV_CONNECTION *psConnection,
+					IMG_UINT32 ui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpGetFrame)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpGetFrame(const PVRSRV_CONNECTION *psConnection,
+					IMG_UINT32 *pui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(pui32Frame);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpIsLastCaptureFrame)
+#endif
+static INLINE IMG_BOOL
+PVRSRVPDumpIsLastCaptureFrame(const PVRSRV_CONNECTION *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return IMG_FALSE;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpAfterRender)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpAfterRender(PVRSRV_DEV_DATA *psDevData)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevData);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpComment)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpComment(const PVRSRV_CONNECTION *psConnection,
+				   const IMG_CHAR *pszComment,
+				   IMG_BOOL bContinuous)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(pszComment);
+	PVR_UNREFERENCED_PARAMETER(bContinuous);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpCommentf)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpCommentf(const PVRSRV_CONNECTION *psConnection,
+					IMG_BOOL bContinuous,
+					const IMG_CHAR *pszFormat, ...)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(bContinuous);
+	PVR_UNREFERENCED_PARAMETER(pszFormat);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpCommentWithFlagsf)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpCommentWithFlagsf(const PVRSRV_CONNECTION *psConnection,
+							 IMG_UINT32 ui32Flags,
+							 const IMG_CHAR *pszFormat, ...)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Flags);
+	PVR_UNREFERENCED_PARAMETER(pszFormat);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpIsCapturing)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpIsCapturing(const PVRSRV_CONNECTION *psConnection,
+					   IMG_BOOL *pbIsCapturing)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	*pbIsCapturing = IMG_FALSE;
+	return PVRSRV_OK;
+}								 				
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpIsCapturingTest)
+#endif
+static INLINE IMG_BOOL
+PVRSRVPDumpIsCapturingTest(const PVRSRV_CONNECTION *psConnection)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	return IMG_FALSE;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetPidCapRange)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSetDefaultCaptureParams(const PVRSRV_CONNECTION *psConnection,
+                                   IMG_UINT32 ui32Mode,
+                                   IMG_UINT32 ui32Start,
+                                   IMG_UINT32 ui32End,
+                                   IMG_UINT32 ui32Interval,
+                                   IMG_UINT32 ui32MaxParamFileSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Mode);
+	PVR_UNREFERENCED_PARAMETER(ui32Start);
+	PVR_UNREFERENCED_PARAMETER(ui32End);
+	PVR_UNREFERENCED_PARAMETER(ui32Interval);
+	PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+	return PVRSRV_OK;
+}
+
+
+#endif	/* PDUMP */
+
+/**************************************************************************/ /*!
+@Function       PVRSRVLoadLibrary
+@Description    Load the named Dynamic-Link (Shared) Library. This will perform
+				reference counting in association with PVRSRVUnloadLibrary,
+				so for example if the same library is loaded twice and unloaded once,
+				a reference to the library will remain.
+@Input          pszLibraryName      the name of the library to load
+@Return                             On success, the handle of the newly-loaded
+                                    library. Otherwise, zero.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_HANDLE	PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnloadLibrary
+@Description    Unload the Dynamic-Link (Shared) Library which had previously been
+				loaded using PVRSRVLoadLibrary(). See PVRSRVLoadLibrary() for
+				information regarding reference counting.
+@Input          hExtDrv             handle of the Dynamic-Link / Shared library
+                                    to unload, as returned by PVRSRVLoadLibrary().
+@Return                             PVRSRV_OK if successful. Otherwise,
+                                    PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED.
+ */ /**************************************************************************/
+IMG_IMPORT PVRSRV_ERROR	PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetLibFuncAddr
+@Description    Returns the address of a function in a Dynamic-Link / Shared
+                Library.
+@Input          hExtDrv             handle of the Dynamic-Link / Shared Library
+                                    in which the function resides
+@Input          pszFunctionName     the name of the function
+@Output         ppvFuncAddr         on success, the address of the function
+                                    requested. Otherwise, NULL.
+@Return                             PVRSRV_OK if successful. Otherwise,
+                                    PVRSRV_ERROR_UNABLE_TO_GET_FUNC_ADDR.
+ */ /**************************************************************************/
+IMG_IMPORT PVRSRV_ERROR	PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, 
+                                            const IMG_CHAR *pszFunctionName, 
+                                            IMG_VOID **ppvFuncAddr);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVClockus
+@Description    Returns the current system clock time, in microseconds.  Note 
+                that this does not necessarily guarantee microsecond accuracy.
+@Return                             the curent system clock time, in 
+                                    microseconds
+ */ /**************************************************************************/
+IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVWaitus
+@Description    Waits for the specified number of microseconds
+@Input          ui32Timeus          the time to wait for, in microseconds 
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseThreadQuanta
+@Description    Releases thread quanta
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetCurrentProcessID
+@Description    Returns handle for current process
+@Return         ID of current process
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PID  IMG_CALLCONV PVRSRVGetCurrentProcessID(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVSetLocale
+@Description    Thin wrapper on posix setlocale
+@Input          pszLocale
+@Return         IMG_NULL (currently)
+ */ /**************************************************************************/
+IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCreateAppHintState
+@Description    Create app hint state
+@Input          eModuleID       module id
+@Input          pszAppName      app name
+@Output         ppvState        state
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
+														const IMG_CHAR *pszAppName,
+														IMG_VOID **ppvState);
+/**************************************************************************/ /*!
+@Function       PVRSRVFreeAppHintState
+@Description    Free the app hint state, if it was created
+@Input          eModuleID       module id
+@Input          pvHintState     app hint state
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
+										 IMG_VOID *pvHintState);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetAppHint
+@Description    Return the value of this hint from state or use default
+@Input          pvHintState     hint state
+@Input          pszHintName     hint name
+@Input          eDataType       data type
+@Input          pvDefault       default value
+@Output         pvReturn        hint value
+@Return                         True if hint read, False if used default
+ */ /**************************************************************************/
+IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID			*pvHintState,
+												  const IMG_CHAR	*pszHintName,
+												  IMG_DATA_TYPE		eDataType,
+												  const IMG_VOID	*pvDefault,
+												  IMG_VOID			*pvReturn);
+
+/******************************************************************************
+ * Memory API(s)
+ *****************************************************************************/
+
+/* Exported APIs */
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocUserModeMem
+@Description    Allocate a block of user-mode memory
+@Input          ui32Size    the amount of memory to allocate
+@Return                     On success, a pointer to the memory allocated.
+                            Otherwise, NULL.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T ui32Size);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCallocUserModeMem
+@Description    Allocate a block of user-mode memory
+@Input          ui32Size    the amount of memory to allocate
+@Return                     On success, a pointer to the memory allocated.
+                            Otherwise, NULL.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T ui32Size);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReallocUserModeMem
+@Description    Re-allocate a block of memory
+@Input          pvBase      the address of the existing memory, previously
+                            allocated with PVRSRVAllocUserModeMem
+@Input          uNewSize    the newly-desired size of the memory chunk
+@Return                     On success, a pointer to the memory block. If the
+                            size of the block could not be changed, the
+                            return value is NULL.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
+/**************************************************************************/ /*!
+@Function       PVRSRVFreeUserModeMem
+@Description    Free a block of memory previously allocated with
+                PVRSRVAllocUserModeMem
+@Input          pvMem       pointer to the block of memory to be freed
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID  IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVMemCopy
+@Description    Copy a block of memory
+                Safe implementation of memset for use with device memory.
+@Input          pvDst       Pointer to the destination
+@Input          pvSrc       Pointer to the source location
+@Input          uiSize      The amount of memory to copy in bytes
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T uiSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCachedMemCopy
+@Description    Copy a block of memory between two cached memory allocations.
+                For use only when source and destination are both cached memory allocations.
+@Input          pvDst       Pointer to the destination
+@Input          pvSrc       Pointer to the source location
+@Input          uiSize      The amount of memory to copy in bytes
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT void PVRSRVCachedMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T uiSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDeviceMemCopy
+@Description    Copy a block of memory to/from a device memory allocation.
+                For use when one or both of the allocations is a device memory allocation.
+@Input          pvDst       Pointer to the destination
+@Input          pvSrc       Pointer to the source location
+@Input          uiSize      The amount of memory to copy in bytes
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVDeviceMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T uiSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVMemSet
+@Description    Set all bytes in a region of memory to the specified value.
+                Safe implementation of memset for use with device memory.
+@Input          pvDest      Pointer to the start of the memory region
+@Input          ui8Value    The value to be written
+@Input          uiSize      The number of bytes to be set to ui8Value
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T uiSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCachedMemSet
+@Description    Set all bytes in a region of cached memory to the specified value.
+                For use only when the destination is a cached memory allocation.
+@Input          pvDest      Pointer to the start of the memory region
+@Input          ui8Value    The value to be written
+@Input          uiSize      The number of bytes to be set to ui8Value
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVCachedMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T uiSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDeviceMemSet
+@Description    Set all bytes in a region of device memory to the specified value.
+                The destination pointer should be a device memory buffer.
+@Input          pvDest      Pointer to the start of the memory region
+@Input          ui8Value    The value to be written
+@Input          uiSize      The number of bytes to be set to ui8Value
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVDeviceMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T uiSize);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVLockProcessGlobalMutex
+@Description    Locking function for non-recursive coarse-grained mutex shared
+                between all threads in a proccess.
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockProcessGlobalMutex(void);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnlockProcessGlobalMutex
+@Description    Unlocking function for non-recursive coarse-grained mutex shared
+                between all threads in a proccess.
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockProcessGlobalMutex(void);
+
+
+typedef	struct _OS_MUTEX_ *PVRSRV_MUTEX_HANDLE;
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCreateMutex
+@Description    creates a mutex
+@Output         phMutex             ptr to mutex handle
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+#if !defined(PVR_DEBUG_MUTEXES)
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex);
+#else
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex,
+													   IMG_CHAR pszMutexName[],
+													   IMG_CHAR pszFilename[],
+													   IMG_INT iLine);
+#define PVRSRVCreateMutex(phMutex) \
+	PVRSRVCreateMutex(phMutex, #phMutex, __FILE__, __LINE__)
+#endif
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDestroyMutex
+@Description    Create a mutex.
+@Input          hMutex              On success, filled with the new Mutex
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**********************************************************************/
+#if !defined(PVR_DEBUG_MUTEXES)
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex);
+#else
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex,
+														IMG_CHAR pszMutexName[],
+														IMG_CHAR pszFilename[],
+														IMG_INT iLine);
+#define PVRSRVDestroyMutex(hMutex) \
+	PVRSRVDestroyMutex(hMutex, #hMutex, __FILE__, __LINE__)
+#endif
+
+/**************************************************************************/ /*!
+@Function       PVRSRVLockMutex
+@Description    Lock the mutex passed
+@Input          hMutex              handle of the mutex to be locked
+@Return         None
+ */ /**********************************************************************/
+#if !defined(PVR_DEBUG_MUTEXES)
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex);
+#else
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex,
+												 IMG_CHAR pszMutexName[],
+												 IMG_CHAR pszFilename[],
+												 IMG_INT iLine);
+#define PVRSRVLockMutex(hMutex) \
+	PVRSRVLockMutex(hMutex, #hMutex, __FILE__, __LINE__)
+#endif
+
+/**************************************************************************/ /*!
+@Function       PVRSRVUnlockMutex
+@Description    Unlock the mutex passed
+@Input          hMutex              handle of the mutex to be unlocked
+@Return         None
+ */ /**********************************************************************/
+#if !defined(PVR_DEBUG_MUTEXES)
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex);
+#else
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex,
+												   IMG_CHAR pszMutexName[],
+												   IMG_CHAR pszFilename[],
+												   IMG_INT iLine);
+#define PVRSRVUnlockMutex(hMutex) \
+	PVRSRVUnlockMutex(hMutex, #hMutex, __FILE__, __LINE__)
+#endif
+
+struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_;
+typedef	struct  _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_ *PVRSRV_SEMAPHORE_HANDLE; /*!< Convenience typedef */
+
+
+#if defined(_MSC_VER)
+    /*! 
+      Used when waiting for a semaphore to become unlocked. Indicates that 
+      the caller is willing to wait forever.
+     */
+    #define IMG_SEMAPHORE_WAIT_INFINITE       ((IMG_UINT64)0xFFFFFFFFFFFFFFFF)
+#else
+    /*! 
+      Used when waiting for a semaphore to become unlocked. Indicates that 
+      the caller is willing to wait forever.
+     */
+  	#define IMG_SEMAPHORE_WAIT_INFINITE       ((IMG_UINT64)0xFFFFFFFFFFFFFFFFull)
+#endif
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVCreateSemaphore)
+#endif
+/**************************************************************************/ /*!
+@Function       PVRSRVCreateSemaphore
+@Description    Create a semaphore with an initial count
+@Output         phSemaphore         on success, ptr to the handle of the new 
+                                    semaphore. Otherwise, zero.
+@Input          iInitialCount       initial count
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+static INLINE PVRSRV_ERROR PVRSRVCreateSemaphore(PVRSRV_SEMAPHORE_HANDLE *phSemaphore, 
+                                                IMG_INT iInitialCount)
+{
+	PVR_UNREFERENCED_PARAMETER(iInitialCount);
+	*phSemaphore = 0;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDestroySemaphore)
+#endif
+/**************************************************************************/ /*!
+@Function       PVRSRVDestroySemaphore
+@Description    destroy the semaphore passed
+@Input          hSemaphore          the semaphore to be destroyed
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+static INLINE PVRSRV_ERROR PVRSRVDestroySemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore)
+{
+	PVR_UNREFERENCED_PARAMETER(hSemaphore);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVWaitSemaphore)
+#endif
+/**************************************************************************/ /*!
+@Function       PVRSRVWaitSemaphore
+@Description    wait on the specified semaphore
+@Input          hSemaphore          the semephore on which to wait
+@Input          ui64TimeoutMicroSeconds the time to wait for the semaphore to
+                                    become unlocked, if locked when the function
+                                    is called.
+@Return                             PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                    error code
+ */ /**************************************************************************/
+static INLINE PVRSRV_ERROR PVRSRVWaitSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, 
+                                            IMG_UINT64 ui64TimeoutMicroSeconds)
+{
+	PVR_UNREFERENCED_PARAMETER(hSemaphore);
+	PVR_UNREFERENCED_PARAMETER(ui64TimeoutMicroSeconds);
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPostSemaphore)
+#endif
+/**************************************************************************/ /*!
+@Function       PVRSRVPostSemaphore
+@Description    post semphore
+@Input          hSemaphore      handle to semaphore
+@Input          iPostCount      post count
+@Return         None
+ */ /**************************************************************************/
+static INLINE IMG_VOID PVRSRVPostSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_INT iPostCount)
+{
+	PVR_UNREFERENCED_PARAMETER(hSemaphore);
+	PVR_UNREFERENCED_PARAMETER(iPostCount);
+}
+
+/* Non-exported APIs */
+#if defined(DEBUG) && (defined(__linux__) || defined(_WIN32) || defined(__QNXNTO__))
+/**************************************************************************/ /*!
+@Function       PVRSRVAllocUserModeMemTracking
+@Description    Wrapper function for malloc, used for memory-leak detection
+@Input          ui32Size            number of bytes to be allocated
+@Input          pszFileName         filename of the calling code
+@Input          ui32LineNumber      line number of the calling code
+@Return                             On success, a ptr to the newly-allocated
+                                    memory. Otherwise, NULL.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, 
+                                                                 IMG_CHAR *pszFileName, 
+                                                                 IMG_UINT32 ui32LineNumber);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVCallocUserModeMemTracking
+@Description    Wrapper function for calloc, used for memory-leak detection
+@Input          ui32Size            number of bytes to be allocated
+@Input          pszFileName         filename of the calling code
+@Input          ui32LineNumber      line number of the calling code
+@Return                             On success, a ptr to the newly-allocated
+                                    memory. Otherwise, NULL.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, 
+                                                                  IMG_CHAR *pszFileName, 
+                                                                  IMG_UINT32 ui32LineNumber);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVFreeUserModeMemTracking
+@Description    Wrapper for free - see PVRSRVAllocUserModeMemTracking
+@Input          pvMem               pointer to the memory to be freed
+@Return         None
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID  IMG_CALLCONV PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReallocUserModeMemTracking
+@Description    Wrapper for realloc, used in memory-leak detection
+@Input          pvMem           pointer to the existing memory block
+@Input          ui32NewSize     the desired new size of the block
+@Input          pszFileName     the filename of the calling code
+@Input          ui32LineNumber  the line number of the calling code
+@Return                         on success, a pointer to the memory block.
+                                This may not necessarily be the same
+                                location as the block was at before the
+                                call. On failure, NULL is returned.
+ */ /**************************************************************************/
+IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, 
+                                                                IMG_SIZE_T ui32NewSize, 
+													            IMG_CHAR *pszFileName, 
+                                                                IMG_UINT32 ui32LineNumber);
+#endif /* defined(DEBUG) && (defined(__linux__) || defined(_WIN32)) */
+
+/**************************************************************************/ /*!
+@Function       PVRSRVDumpDebugInfo
+@Description    Dump debug information to kernel log
+@Input          psConnection    Services connection
+@Return         IMG_VOID
+ */ /**************************************************************************/
+IMG_IMPORT IMG_VOID
+PVRSRVDumpDebugInfo(const PVRSRV_CONNECTION *psConnection, IMG_UINT32 ui32VerbLevel);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVGetDevClockSpeed
+@Description    Gets the clock speed
+@Input          psConnection		Services connection
+@Input          psDevData			Pointer to the PVRSRV_DEV_DATA context
+@Output         pui32ClockSpeed     Variable for storing clock speed
+@Return         IMG_BOOL			True if the operation was successful
+ */ /**************************************************************************/
+IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetDevClockSpeed(const PVRSRV_CONNECTION *psConnection,
+														PVRSRV_DEV_DATA  *psDevData,
+														IMG_PUINT32 pui32ClockSpeed);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVResetHWRLogs
+@Description    Resets the HWR Logs buffer (the hardware recovery count is not reset)
+@Input          psConnection		Services connection
+@Input          psDevData			Pointer to the PVRSRV_DEV_DATA context
+@Return         PVRSRV_ERROR		PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                	error code
+ */ /**************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVResetHWRLogs(const PVRSRV_CONNECTION *psConnection, PVRSRV_DEV_DATA  *psDevData);
+
+
+/******************************************************************************
+ * PVR Global Event Object - Event APIs
+ *****************************************************************************/
+
+/*****************************************************************************
+@Function       PVRSRVAcquireGlobalEventHandle
+@Description    Gets a handle to an event that is opened on the global
+                event object.
+@Input          psConnection    Services connection
+@Output         phEvent         Global event handle
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVAcquireGlobalEventHandle(const PVRSRV_CONNECTION *psConnection,
+                               IMG_HANDLE *phEvent);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVReleaseGlobalEventHandle
+@Description    Destroys the event handle previously acquired.
+@Input          psConnection    Services connection
+@Input          phEvent         Global event handle
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVReleaseGlobalEventHandle(const PVRSRV_CONNECTION *psConnection,
+                               IMG_HANDLE hEvent);
+
+/**************************************************************************/ /*!
+@Function       PVRSRVEventObjectWait
+@Description    Wait (block) on the OS-specific event object passed
+@Input          psConnection    Services connection
+@Input          hEvent          Global event handle to wait on
+@Return                         PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                error code
+ */ /**************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection,
+                      IMG_HANDLE hEvent);
+
+/******************************************************************************
+ * PVR Global Event Object - Event APIs End
+ *****************************************************************************/
+
+
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVKickDevices(const PVRSRV_CONNECTION *psConnection);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVSoftReset
+@Description    Resets some modules of the device
+@Input          psConnection    Services connection
+@Input          psDevData		Pointer to the PVRSRV_DEV_DATA context
+@Input          ui64ResetValue1 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET
+                                register).
+@Input          ui64ResetValue2 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET2
+                                register).
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+IMG_IMPORT PVRSRV_ERROR
+PVRSRVSoftReset(const PVRSRV_CONNECTION *psConnection,
+				PVRSRV_DEV_DATA  *psDevData,
+				IMG_UINT64 ui64ResetValue1,
+				IMG_UINT64 ui64ResetValue2);
+
+/*!
+ Time wrapping macro
+*/
+#define TIME_NOT_PASSED_UINT32(a,b,c)		(((a) - (b)) < (c))
+
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* __SERVICES_H__ */
+
+/******************************************************************************
+ End of file (services.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/servicesext.h b/drivers/external_drivers/intel_media/graphics/rgx/include/servicesext.h
new file mode 100644
index 0000000..f6fc8c1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/servicesext.h
@@ -0,0 +1,195 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services definitions required by external drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides services data structures, defines and prototypes
+                required by external drivers
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__SERVICESEXT_H__)
+#define __SERVICESEXT_H__
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_3dtypes.h"
+#include "pvrsrv_device_types.h"
+
+
+/*
+ * Lock buffer read/write flags
+ */
+#define PVRSRV_LOCKFLG_READONLY     	(1)		/*!< The locking process will only read the locked surface */
+
+/*!
+ *****************************************************************************
+ *	Services State
+ *****************************************************************************/
+typedef enum _PVRSRV_SERVICES_STATE_
+{
+	PVRSRV_SERVICES_STATE_OK = 0,
+	PVRSRV_SERVICES_STATE_BAD,
+} PVRSRV_SERVICES_STATE;
+
+
+/*!
+ *****************************************************************************
+ *	States for power management
+ *****************************************************************************/
+/*!
+  System Power State Enum
+ */
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+	PVRSRV_SYS_POWER_STATE_Unspecified		= -1,	/*!< Unspecified : Uninitialised */
+	PVRSRV_SYS_POWER_STATE_OFF				= 0,	/*!< Off */
+	PVRSRV_SYS_POWER_STATE_ON				= 1,	/*!< On */
+
+	PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; /*!< Typedef for ptr to PVRSRV_SYS_POWER_STATE */
+
+/*!
+  Device Power State Enum
+ */
+typedef enum _PVRSRV_DEV_POWER_STATE_
+{
+	PVRSRV_DEV_POWER_STATE_DEFAULT	= -1,	/*!< Default state for the device */
+	PVRSRV_DEV_POWER_STATE_OFF		= 0,	/*!< Unpowered */
+	PVRSRV_DEV_POWER_STATE_ON		= 1,	/*!< Running */
+
+	PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff   /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;	/*!< Typedef for ptr to PVRSRV_DEV_POWER_STATE */ /* PRQA S 3205 */
+
+
+/* Power transition handler prototypes */
+
+/*! 
+  Typedef for a pointer to a Function that will be called before a transition
+  from one power state to another. See also PFN_POST_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE				hDevHandle,
+									   PVRSRV_DEV_POWER_STATE	eNewPowerState,
+									   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+									   IMG_BOOL					bForced);
+/*! 
+  Typedef for a pointer to a Function that will be called after a transition
+  from one power state to another. See also PFN_PRE_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE				hDevHandle,
+										PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+										IMG_BOOL				bForced);
+
+/* Clock speed handler prototypes */
+
+/*!
+  Typedef for a pointer to a Function that will be caled before a transition
+  from one clockspeed to another. See also PFN_POST_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE				hDevHandle,
+												   PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a Function that will be caled after a transition
+  from one clockspeed to another. See also PFN_PRE_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE				hDevHandle,
+													PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+  Typedef for a pointer to a function that will be called to transition the device
+  to a forced idle state. Used in unison with (forced) power requests, DVFS and cluster count changes.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE				hDevHandle,
+							IMG_BOOL			bDeviceOffPermitted);
+
+/*!
+  Typedef for a pointer to a function that will be called to cancel a forced idle state
+  and return the firmware back to a state where the hardware can be scheduled.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE			hDevHandle);
+
+typedef PVRSRV_ERROR (*PFN_DUST_COUNT_REQUEST) (IMG_HANDLE			hDevHandle,
+						IMG_UINT32			ui32DustCount);
+
+/*!
+ *****************************************************************************
+ * Enumeration of possible alpha types.
+ *****************************************************************************/
+typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
+	PVRSRV_COLOURSPACE_FORMAT_UNKNOWN		=  0x00000000,  /*!< Colourspace Format: Unknown */
+	PVRSRV_COLOURSPACE_FORMAT_LINEAR		=  0x00010000,  /*!< Colourspace Format: Linear */
+	PVRSRV_COLOURSPACE_FORMAT_NONLINEAR		=  0x00020000,  /*!< Colourspace Format: Non-Linear */
+	PVRSRV_COLOURSPACE_FORMAT_MASK			=  0x000F0000,  /*!< Colourspace Format Mask */
+} PVRSRV_COLOURSPACE_FORMAT;
+
+
+/*!
+ * Drawable orientation (in degrees clockwise).
+ */
+typedef enum _IMG_ROTATION_ PVRSRV_ROTATION;
+
+#define PVRSRV_ROTATE_0 IMG_ROTATION_0DEG
+#define PVRSRV_ROTATE_90 IMG_ROTATION_90DEG
+#define PVRSRV_ROTATE_180 IMG_ROTATION_180DEG
+#define PVRSRV_ROTATE_270 IMG_ROTATION_270DEG
+#define PVRSRV_FLIP_Y IMG_ROTATION_FLIP_Y
+
+
+/*!
+ *****************************************************************************
+ * This structure is used for OS independent registry (profile) access
+ *****************************************************************************/
+
+typedef struct _PVRSRV_REGISTRY_INFO
+{
+	IMG_UINT32			ui32DevCookie;
+	IMG_PCHAR			pszKey;
+	IMG_PCHAR			pszValue;
+	IMG_PCHAR			pszBuf;
+	IMG_UINT32			ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+#endif /* __SERVICESEXT_H__ */
+/*****************************************************************************
+ End of file (servicesext.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/include/sync_external.h b/drivers/external_drivers/intel_media/graphics/rgx/include/sync_external.h
new file mode 100644
index 0000000..7ee0945
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/include/sync_external.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services external synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines synchronisation structures that are visible internally
+                and externally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+
+#ifndef _SYNC_EXTERNAL_
+#define _SYNC_EXTERNAL_
+
+#define SYNC_MAX_CLASS_NAME_LEN 32
+
+typedef IMG_HANDLE SYNC_BRIDGE_HANDLE;
+typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT;
+typedef struct _SYNC_OP_COOKIE_ *PSYNC_OP_COOKIE;
+
+typedef struct PVRSRV_CLIENT_SYNC_PRIM
+{
+	volatile IMG_UINT32	*pui32LinAddr;	/*!< User pointer to the primitive */
+} PVRSRV_CLIENT_SYNC_PRIM;
+
+typedef IMG_HANDLE PVRSRV_CLIENT_SYNC_PRIM_HANDLE;
+
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP
+{
+	IMG_UINT32 					ui32Flags;				/*!< Operation flags */
+#define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK	(1 << 0)
+#define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE	(1 << 1)
+#define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1<<2))
+	PVRSRV_CLIENT_SYNC_PRIM		*psSync;				/*!< Pointer to the client sync */
+	IMG_UINT32					ui32FenceValue;			/*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */
+	IMG_UINT32					ui32UpdateValue;		/*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */
+} PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* _SYNC_EXTERNAL_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/pvr_sync.c b/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/pvr_sync.c
new file mode 100644
index 0000000..a925df7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/pvr_sync.c
@@ -0,0 +1,2182 @@
+/*************************************************************************/ /*!
+@File           pvr_sync.c
+@Title          Kernel driver for Android's sync mechanism
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+#include "pvr_sync.h"
+#include "pvr_fd_sync_kernel.h"
+#include "services_kernel_client.h"
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/syscalls.h>
+#include <linux/miscdevice.h>
+#include <linux/anon_inodes.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#include <linux/sync.h>
+#ifndef CONFIG_SW_SYNC_USER
+#include <linux/sw_sync.h>
+#endif
+#else
+#include <../drivers/staging/android/sync.h>
+#ifndef CONFIG_SW_SYNC_USER
+#include <../drivers/staging/android/sw_sync.h>
+#endif
+#endif
+
+/* #define DEBUG_OUTPUT 1 */
+
+#ifdef DEBUG_OUTPUT
+#define DPF(fmt, ...) pr_err("pvr_sync: " fmt "\n", __VA_ARGS__)
+#else
+#define DPF(fmt, ...) do {} while (0)
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, fmt, ...) \
+	do { \
+		if (pfnDumpDebugPrintf) { \
+			pfnDumpDebugPrintf(fmt, __VA_ARGS__); \
+		} else { \
+			pr_info("pvr_sync: " fmt, __VA_ARGS__); \
+		} \
+	} while (0)
+
+#define SYNC_MAX_POOL_SIZE 10
+
+enum {
+	SYNC_TL_TYPE = 0,
+	SYNC_PT_FENCE_TYPE = 1,
+	SYNC_PT_CLEANUP_TYPE = 2,
+	SYNC_PT_FOREIGN_FENCE_TYPE = 3,
+	SYNC_PT_FOREIGN_CLEANUP_TYPE = 4,
+};
+
+/* Services client sync prim wrapper. This is used to hold debug information
+ * and make it possible to cache unused syncs. */
+struct pvr_sync_native_sync_prim {
+	/* List for the sync pool support. */
+	struct list_head list;
+
+	/* Base services sync prim structure */
+	struct PVRSRV_CLIENT_SYNC_PRIM *client_sync;
+
+	/* The next queued value which should be used */
+	u32 next_value;
+
+	/* Every sync data will get some unique id */
+	u32 id;
+
+	/* FWAddr used by the client sync */
+	u32 vaddr;
+
+	/* The type this sync is used for in our driver. Used in
+	 * pvr_sync_debug_request. */
+	u8 type;
+
+	/* A debug class name also printed in pvr_sync_debug_request */
+	char class[32];
+};
+
+/* This is the IMG extension of a sync_timeline */
+struct pvr_sync_timeline {
+	/* Original timeline struct. Needs to come first. */
+	struct sync_timeline obj;
+
+	/* Global timeline list support */
+	struct list_head list;
+
+	/* Timeline sync */
+	struct pvr_sync_native_sync_prim *timeline_sync;
+
+	/* Should we do timeline idle detection when creating a new fence? */
+	bool fencing_enabled;
+};
+
+struct pvr_sync_tl_to_signal {
+	/* List entry support for the list of timelines which needs signaling */
+	struct list_head list;
+
+	/* The timeline to signal */
+	struct pvr_sync_timeline *timeline;
+};
+
+struct pvr_sync_kernel_pair {
+	/* Binary sync point representing the android native sync in hw. */
+	struct pvr_sync_native_sync_prim *fence_sync;
+
+	/* Cleanup sync structure.
+	 * If the base sync prim is used for "checking" only within a gl stream,
+	 * there is no way of knowing when this has happened. So use a second
+	 * sync prim which just gets updated and check the update count when
+	 * freeing this struct. */
+	struct pvr_sync_native_sync_prim *cleanup_sync;
+
+	/* Sync points can go away when there are deferred hardware operations
+	 * still outstanding. We must not free the SERVER_SYNC_PRIMITIVE until
+	 * the hardware is finished, so we add it to a defer list which is
+	 * processed periodically ("defer-free").
+	 *
+	 * Note that the defer-free list is global, not per-timeline.
+	 */
+	struct list_head list;
+};
+
+struct pvr_sync_data {
+	/* Every sync point has a services sync object. This object is used
+	 * by the hardware to enforce ordering -- it is attached as a source
+	 * dependency to various commands.
+	 */
+	struct pvr_sync_kernel_pair *kernel;
+
+	/* The timeline update value for this sync point. */
+	u32 timeline_update_value;
+
+	/* This refcount is incremented at create and dup time, and decremented
+	 * at free time. It ensures the object doesn't start the defer-free
+	 * process until it is no longer referenced.
+	 */
+	atomic_t refcount;
+};
+
+struct pvr_sync_alloc_data {
+	struct pvr_sync_data *sync_data;
+	struct file *file;
+	/* alloc syncs need a reference to the timeline for timeline sync
+	 * access during the operation scheduling. There is currently no way
+	 * to access the timeline's kref to take a reference count directly,
+	 * which means there is a possibility of the timeline still having a
+	 * reference after it has been free'd.
+	 *
+	 * We believe this is a non-issue, so long as the userspace application
+	 * holds a fd open to the corresponding pvr_sync node for the length
+	 * of time the alloc sync is alive. This holds the timeline open, and
+	 * as alloc syncs are short lived, this should not be harmful.
+	 *
+	 * If an application is closed, it is not determined if the timeline
+	 * fd will be closed (possibly destroying the timeline) before any
+	 * alloc syncs are closed. Due to this, the alloc sync release method
+	 * /must not/ assume this timeline pointer is valid */
+	struct pvr_sync_timeline *timeline;
+};
+
+/* This is the IMG extension of a sync_pt */
+struct pvr_sync_pt {
+	/* Original sync_pt structure. Needs to come first. */
+	struct sync_pt pt;
+
+	/* Private shared data */
+	struct pvr_sync_data *sync_data;
+};
+
+/* This is the IMG extension of a sync_fence */
+struct pvr_sync_fence {
+	/* Original sync_fence structure. Needs to come first. */
+	struct sync_fence *fence;
+
+	/* To ensure callbacks are always received for fences / sync_pts, even
+	 * after the fence has been 'put' (freed), we must take a reference to
+	 * the fence. We still need to 'put' the fence ourselves, but this might
+	 * happen in irq context, where fput() is not allowed (in kernels <3.6).
+	 * We must add the fence to a list which is processed in WQ context.
+	 */
+	struct list_head list;
+};
+
+/* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow"
+ * sync prim. This is modelled as a software operation. The foreign driver
+ * completes the operation by calling a callback we registered with it. */
+struct pvr_sync_fence_waiter {
+	/* Base sync driver waiter structure */
+	struct sync_fence_waiter waiter;
+
+	/* "Shadow" sync prim backing the foreign driver's sync_pt */
+	struct pvr_sync_kernel_pair *kernel;
+
+	/* Optimizes lookup of fence for defer-put operation */
+	struct pvr_sync_fence *sync_fence;
+};
+
+/* Global data for the sync driver */
+static struct {
+	/* Services connection */
+	void *device_cookie;
+
+	/* Complete notify handle */
+	void *command_complete_handle;
+
+	/* defer_free workqueue. Syncs may still be in use by the HW when freed,
+	 * so we have to keep them around until the HW is done with them at
+	 * some later time. This workqueue iterates over the list of free'd
+	 * syncs, checks if they are in use, and frees the sync device memory
+	 * when done with. */
+	struct workqueue_struct *defer_free_wq;
+	struct work_struct defer_free_work;
+
+	/* check_status workqueue: When a foreign point is completed, a SW
+	 * operation marks the sync as completed to allow the operations to
+	 * continue. This completion may require the hardware to be notified,
+	 * which may be expensive/take locks, so we push that to a workqueue
+	 */
+	struct workqueue_struct *check_status_wq;
+	struct work_struct check_status_work;
+
+	/* Context used to create client sync prims. */
+	struct SYNC_PRIM_CONTEXT *sync_prim_context;
+
+	/* Debug notify handle */
+	void *debug_notify_handle;
+
+	/* Unique id counter for the sync prims */
+	atomic_t sync_id;
+
+	/* The global event object (used to wait between checks for deferred-
+	 * free sync status) */
+	void *event_object_handle;
+} pvr_sync_data;
+
+/* List of timelines created by this driver */
+static LIST_HEAD(timeline_list);
+static DEFINE_MUTEX(timeline_list_mutex);
+
+/* Sync pool support */
+static LIST_HEAD(sync_pool_free_list);
+static LIST_HEAD(sync_pool_active_list);
+static DEFINE_MUTEX(sync_pool_mutex);
+static s32 sync_pool_size;
+static u32 sync_pool_created;
+static u32 sync_pool_reused;
+
+/* The "defer-free" object list. Driver global. */
+static LIST_HEAD(sync_prim_free_list);
+static DEFINE_SPINLOCK(sync_prim_free_list_spinlock);
+
+/* The "defer-put" object list. Driver global. */
+static LIST_HEAD(sync_fence_put_list);
+static DEFINE_SPINLOCK(sync_fence_put_list_spinlock);
+
+static inline void set_sync_value(struct pvr_sync_native_sync_prim *sync,
+				  u32 value)
+{
+	*(sync->client_sync->pui32LinAddr) = value;
+}
+
+static inline u32 get_sync_value(struct pvr_sync_native_sync_prim *sync)
+{
+	return *(sync->client_sync->pui32LinAddr);
+}
+
+static inline void complete_sync(struct pvr_sync_native_sync_prim *sync)
+{
+	*(sync->client_sync->pui32LinAddr) = sync->next_value;
+}
+
+static inline int is_sync_met(struct pvr_sync_native_sync_prim *sync)
+{
+	return *(sync->client_sync->pui32LinAddr) == sync->next_value;
+}
+
+static struct pvr_sync_alloc_data *pvr_sync_alloc_fence_fdget(int fd);
+
+#ifdef DEBUG_OUTPUT
+
+static char *debug_info_timeline(struct sync_timeline *tl)
+{
+	struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *)tl;
+	static char info[256];
+
+	info[0] = '\0';
+
+	snprintf(info, sizeof(info),
+		 "n='%s' id=%u fw=0x%x tl_curr=%u tl_next=%u",
+		 tl->name, timeline->timeline_sync->id,
+		 timeline->timeline_sync->vaddr,
+		 get_sync_value(timeline->timeline_sync),
+		 timeline->timeline_sync->next_value);
+
+	return info;
+}
+
+static char *debug_info_sync_pt(struct sync_pt *pt)
+{
+	struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)pt;
+	struct pvr_sync_kernel_pair *kernel = pvr_pt->sync_data->kernel;
+	static char info[256], info1[256];
+
+	info[0] = '\0';
+	info1[0] = '\0';
+
+	if (kernel) {
+		struct pvr_sync_native_sync_prim *cleanup_sync =
+			kernel->cleanup_sync;
+
+		if (cleanup_sync) {
+			snprintf(info1, sizeof(info1),
+				 " # cleanup: id=%u fw=0x%x curr=%u next=%u",
+				 cleanup_sync->id,
+				 cleanup_sync->vaddr,
+				 get_sync_value(cleanup_sync),
+				 cleanup_sync->next_value);
+		}
+
+		snprintf(info, sizeof(info),
+			 "status=%d tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u%s # tl: %s",
+			 pt->status,
+			 pvr_pt->sync_data->timeline_update_value,
+			 atomic_read(&pvr_pt->sync_data->refcount),
+			 kernel->fence_sync->id,
+			 kernel->fence_sync->vaddr,
+			 get_sync_value(kernel->fence_sync),
+			 kernel->fence_sync->next_value,
+			 info1, debug_info_timeline(pt->parent));
+	} else {
+		snprintf(info, sizeof(info),
+			 "status=%d tl_taken=%u ref=%d # sync: idle # tl: %s",
+			 pt->status,
+			 pvr_pt->sync_data->timeline_update_value,
+			 atomic_read(&pvr_pt->sync_data->refcount),
+			 debug_info_timeline(pt->parent));
+	}
+
+	return info;
+}
+
+#endif /* DEBUG_OUTPUT */
+
+static enum PVRSRV_ERROR
+sync_pool_get(struct pvr_sync_native_sync_prim **_sync,
+	      const char *class_name, u8 type)
+{
+	struct pvr_sync_native_sync_prim *sync;
+	enum PVRSRV_ERROR error = PVRSRV_OK;
+
+	mutex_lock(&sync_pool_mutex);
+
+	if (list_empty(&sync_pool_free_list)) {
+		/* If there is nothing in the pool, create a new sync prim. */
+		sync = kmalloc(sizeof(struct pvr_sync_native_sync_prim),
+			       GFP_KERNEL);
+		if (!sync) {
+			pr_err("pvr_sync: %s: Failed to allocate sync data",
+			       __func__);
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_unlock;
+		}
+
+		error = SyncPrimAlloc(pvr_sync_data.sync_prim_context,
+				      &sync->client_sync, class_name);
+		if (error != PVRSRV_OK) {
+			pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
+			       __func__, PVRSRVGetErrorStringKM(error));
+			goto err_free;
+		}
+
+		sync->vaddr = SyncPrimGetFirmwareAddr(sync->client_sync);
+
+		list_add_tail(&sync->list, &sync_pool_active_list);
+		++sync_pool_created;
+	} else {
+		sync = list_first_entry(&sync_pool_free_list,
+					struct pvr_sync_native_sync_prim, list);
+		list_move_tail(&sync->list, &sync_pool_active_list);
+		--sync_pool_size;
+		++sync_pool_reused;
+	}
+
+	sync->id = atomic_inc_return(&pvr_sync_data.sync_id);
+	sync->type = type;
+
+	strncpy(sync->class, class_name, sizeof(sync->class));
+	/* Its crucial to reset the sync to zero */
+	set_sync_value(sync, 0);
+	sync->next_value = 0;
+
+	*_sync = sync;
+err_unlock:
+	mutex_unlock(&sync_pool_mutex);
+	return error;
+
+err_free:
+	kfree(sync);
+	goto err_unlock;
+}
+
+static void sync_pool_put(struct pvr_sync_native_sync_prim *sync)
+{
+	mutex_lock(&sync_pool_mutex);
+
+	if (sync_pool_size < SYNC_MAX_POOL_SIZE) {
+		/* Mark it as unused */
+		set_sync_value(sync, 0xffffffff);
+
+		list_move(&sync->list, &sync_pool_free_list);
+		++sync_pool_size;
+	} else {
+		/* Mark it as invalid */
+		set_sync_value(sync, 0xdeadbeef);
+
+		list_del(&sync->list);
+		SyncPrimFree(sync->client_sync);
+		kfree(sync);
+	}
+
+	mutex_unlock(&sync_pool_mutex);
+}
+
+static void sync_pool_clear(void)
+{
+	struct pvr_sync_native_sync_prim *sync, *n;
+
+	mutex_lock(&sync_pool_mutex);
+
+	list_for_each_entry_safe(sync, n, &sync_pool_free_list, list) {
+		/* Mark it as invalid */
+		set_sync_value(sync, 0xdeadbeef);
+
+		list_del(&sync->list);
+		SyncPrimFree(sync->client_sync);
+		kfree(sync);
+		--sync_pool_size;
+	}
+
+	mutex_unlock(&sync_pool_mutex);
+}
+
+static void pvr_sync_debug_request(void *hDebugRequestHandle,
+				   u32 ui32VerbLevel)
+{
+	struct pvr_sync_native_sync_prim *sync;
+
+	static const char *const type_names[] = {
+		"Timeline", "Fence", "Cleanup",
+		"Foreign Fence", "Foreign Cleanup"
+	};
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH) {
+		mutex_lock(&sync_pool_mutex);
+
+		PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
+				  "Dumping all pending android native syncs (Pool usage: %d%% - %d %d)",
+				  sync_pool_reused ?
+				  (10000 /
+				   ((sync_pool_created + sync_pool_reused) *
+				    100 / sync_pool_reused)) : 0,
+				  sync_pool_created, sync_pool_reused);
+
+		list_for_each_entry(sync, &sync_pool_active_list, list) {
+			if (is_sync_met(sync))
+				continue;
+
+			BUG_ON(sync->type >= ARRAY_SIZE(type_names));
+
+			PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
+					  "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
+					  sync->id, sync->vaddr,
+					  get_sync_value(sync),
+					  sync->next_value,
+					  sync->class,
+					  type_names[sync->type]);
+		}
+#if 0
+		PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
+				  "Dumping all unused syncs");
+		list_for_each_entry(sync, &sync_pool_free_list, list) {
+			BUG_ON(sync->type >= ARRAY_SIZE(type_names));
+
+			PVR_DUMPDEBUG_LOG(g_pfnDumpDebugPrintf,
+					  "\tID = %d, FWAddr = 0x%08x: Current = 0x%08x, Next = 0x%08x, %s (%s)",
+					  sync->id, sync->vaddr,
+					  get_sync_value(sync),
+					  sync->next_value,
+					  sync->class,
+					  type_names[sync->type]);
+		}
+#endif
+		mutex_unlock(&sync_pool_mutex);
+	}
+}
+
+static struct sync_pt *pvr_sync_dup(struct sync_pt *sync_pt)
+{
+	struct pvr_sync_pt *pvr_pt_a = (struct pvr_sync_pt *)sync_pt;
+	struct pvr_sync_pt *pvr_pt_b = NULL;
+
+	DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+	pvr_pt_b = (struct pvr_sync_pt *)
+		sync_pt_create(pvr_pt_a->pt.parent, sizeof(struct pvr_sync_pt));
+	if (!pvr_pt_b) {
+		pr_err("pvr_sync: %s: Failed to dup sync pt", __func__);
+		goto err_out;
+	}
+
+	atomic_inc(&pvr_pt_a->sync_data->refcount);
+
+	pvr_pt_b->sync_data = pvr_pt_a->sync_data;
+
+err_out:
+	return (struct sync_pt *)pvr_pt_b;
+}
+
+static int pvr_sync_has_signaled(struct sync_pt *sync_pt)
+{
+	struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+	DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+	/* Idle syncs are always signaled */
+	if (!pvr_pt->sync_data->kernel)
+		return 1;
+
+	return is_sync_met(pvr_pt->sync_data->kernel->fence_sync);
+}
+
+static int pvr_sync_compare(struct sync_pt *a, struct sync_pt *b)
+{
+	u32 a1 = ((struct pvr_sync_pt *)a)->sync_data->timeline_update_value;
+	u32 b1 = ((struct pvr_sync_pt *)b)->sync_data->timeline_update_value;
+
+	DPF("%s: a # %s", __func__, debug_info_sync_pt(a));
+	DPF("%s: b # %s", __func__, debug_info_sync_pt(b));
+
+	if (a1 == b1)
+		return 0;
+
+	/* Take integer wrapping into account */
+	return ((s32)a1 - (s32)b1) < 0 ? -1 : 1;
+}
+
+static void wait_for_sync(struct pvr_sync_native_sync_prim *sync)
+{
+#ifndef NO_HARDWARE
+	void *event_object = NULL;
+	enum PVRSRV_ERROR error = PVRSRV_OK;
+
+	while (sync && !is_sync_met(sync)) {
+		if (!event_object) {
+			error = OSEventObjectOpen(
+				pvr_sync_data.event_object_handle,
+				&event_object);
+			if (error != PVRSRV_OK) {
+				pr_err("pvr_sync: %s: Error opening event object (%s)\n",
+					__func__,
+					PVRSRVGetErrorStringKM(error));
+				break;
+			}
+		}
+		error = OSEventObjectWait(event_object);
+		if (error != PVRSRV_OK && error != PVRSRV_ERROR_TIMEOUT) {
+			pr_err("pvr_sync: %s: Error waiting on event object (%s)\n",
+				__func__,
+				PVRSRVGetErrorStringKM(error));
+		}
+	}
+
+	if (event_object)
+		OSEventObjectClose(event_object);
+#endif
+}
+
+static void pvr_sync_release_timeline(struct sync_timeline *psObj)
+{
+	struct pvr_sync_timeline *timeline = (struct pvr_sync_timeline *)psObj;
+
+	DPF("%s: # %s", __func__, debug_info_timeline(psObj));
+
+	wait_for_sync(timeline->timeline_sync);
+
+	/*
+	 * If pvr_sync_open failed after calling sync_timeline_create, this
+	 * can be called with a timeline that has not got a timeline sync
+	 * or been added to our timeline list. Use a NULL timeline_sync
+	 * to detect and handle this condition
+	 */
+	if (timeline->timeline_sync) {
+
+		mutex_lock(&timeline_list_mutex);
+		list_del(&timeline->list);
+		mutex_unlock(&timeline_list_mutex);
+
+		OSAcquireBridgeLock();
+		sync_pool_put(timeline->timeline_sync);
+		OSReleaseBridgeLock();
+	}
+}
+
+static void pvr_sync_print_obj(struct seq_file *s,
+			       struct sync_timeline *sync_timeline)
+{
+	struct pvr_sync_timeline *timeline =
+	    (struct pvr_sync_timeline *)sync_timeline;
+
+	seq_printf(s, "id=%u fw=0x%x curr=%u next=%u",
+			   timeline->timeline_sync->id,
+			   timeline->timeline_sync->vaddr,
+			   get_sync_value(timeline->timeline_sync),
+			   timeline->timeline_sync->next_value);
+}
+
+static void pvr_sync_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+	struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+	struct pvr_sync_kernel_pair *kernel;
+
+	if (!pvr_pt->sync_data)
+		return;
+
+	kernel = pvr_pt->sync_data->kernel;
+	if (kernel) {
+		if (!kernel->cleanup_sync) {
+			seq_printf(s, "tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u",
+				 pvr_pt->sync_data->timeline_update_value,
+				 atomic_read(&pvr_pt->sync_data->refcount),
+				 kernel->fence_sync->id,
+				 kernel->fence_sync->vaddr,
+				 get_sync_value(kernel->fence_sync),
+				 kernel->fence_sync->next_value);
+		} else {
+			seq_printf(s, "tl_taken=%u ref=%d # sync: id=%u fw=0x%x curr=%u next=%u\n   cleanup: id=%u fw=0x%x curr=%u next=%u",
+				 pvr_pt->sync_data->timeline_update_value,
+				 atomic_read(&pvr_pt->sync_data->refcount),
+				 kernel->fence_sync->id,
+				 kernel->fence_sync->vaddr,
+				 get_sync_value(kernel->fence_sync),
+				 kernel->fence_sync->next_value,
+				 kernel->cleanup_sync->id,
+				 kernel->cleanup_sync->vaddr,
+				 get_sync_value(kernel->cleanup_sync),
+				 kernel->cleanup_sync->next_value);
+		}
+	} else {
+		seq_printf(s, "tl_taken=%u ref=%d # sync: idle",
+			 pvr_pt->sync_data->timeline_update_value,
+			 atomic_read(&pvr_pt->sync_data->refcount));
+	}
+}
+
+static struct pvr_sync_data*
+pvr_sync_create_sync_data(struct pvr_sync_timeline *timeline)
+{
+	struct pvr_sync_data *sync_data = NULL;
+	enum PVRSRV_ERROR error;
+
+	sync_data = kzalloc(sizeof(struct pvr_sync_data), GFP_KERNEL);
+	if (!sync_data)
+		goto err_out;
+
+	atomic_set(&sync_data->refcount, 1);
+
+	sync_data->kernel =
+		kzalloc(sizeof(struct pvr_sync_kernel_pair),
+		GFP_KERNEL);
+
+	if (!sync_data->kernel)
+		goto err_free_data;
+
+	OSAcquireBridgeLock();
+	error = sync_pool_get(&sync_data->kernel->fence_sync,
+			      timeline->obj.name, SYNC_PT_FENCE_TYPE);
+	OSReleaseBridgeLock();
+
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
+		       __func__, PVRSRVGetErrorStringKM(error));
+		goto err_free_kernel;
+	}
+
+err_out:
+	return sync_data;
+
+err_free_kernel:
+	kfree(sync_data->kernel);
+err_free_data:
+	kfree(sync_data);
+	sync_data = NULL;
+	goto err_out;
+}
+
+static struct pvr_sync_pt *
+pvr_sync_create_sync(struct pvr_sync_timeline *timeline,
+	struct pvr_sync_data *sync_data)
+{
+	struct pvr_sync_pt *pvr_pt = NULL;
+
+	pvr_pt = (struct pvr_sync_pt *)
+		sync_pt_create(&timeline->obj, sizeof(struct pvr_sync_pt));
+	if (!pvr_pt) {
+		pr_err("pvr_sync: %s: Failed to create sync pt", __func__);
+		goto err_complete_sync;
+	}
+
+	/* Attach our sync data to the new sync point. */
+	pvr_pt->sync_data = sync_data;
+
+err_out:
+	return pvr_pt;
+
+err_complete_sync:
+	if (sync_data->kernel) {
+		/* Complete the sync taken on the TL sync and delete the
+		 * new fence sync. */
+		complete_sync(timeline->timeline_sync);
+		OSAcquireBridgeLock();
+		sync_pool_put(sync_data->kernel->fence_sync);
+		OSReleaseBridgeLock();
+	}
+	kfree(sync_data->kernel);
+	kfree(sync_data);
+	goto err_out;
+}
+
+static void pvr_sync_defer_free(struct pvr_sync_kernel_pair *kernel)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
+	list_add_tail(&kernel->list, &sync_prim_free_list);
+	spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
+
+	queue_work(pvr_sync_data.defer_free_wq, &pvr_sync_data.defer_free_work);
+}
+
+static void pvr_sync_free_sync(struct sync_pt *sync_pt)
+{
+	struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+	DPF("%s: # %s", __func__, debug_info_sync_pt(sync_pt));
+
+	/* Only free on the last reference */
+	if (atomic_dec_return(&pvr_pt->sync_data->refcount) != 0)
+		return;
+
+	if (pvr_pt->sync_data->kernel)
+		pvr_sync_defer_free(pvr_pt->sync_data->kernel);
+
+	kfree(pvr_pt->sync_data);
+}
+
+/* this function uses pvr_sync_timeline_ops defined below */
+static int pvr_sync_fill_driver_data(struct sync_pt *, void *, int);
+
+static struct sync_timeline_ops pvr_sync_timeline_ops = {
+	.driver_name        = PVRSYNC_MODNAME,
+	.dup                = pvr_sync_dup,
+	.has_signaled       = pvr_sync_has_signaled,
+	.compare            = pvr_sync_compare,
+	.free_pt            = pvr_sync_free_sync,
+	.release_obj        = pvr_sync_release_timeline,
+	.print_obj          = pvr_sync_print_obj,
+	.print_pt           = pvr_sync_print_pt,
+	.fill_driver_data   = pvr_sync_fill_driver_data,
+};
+
+static int
+pvr_sync_fill_driver_data(struct sync_pt *sync_pt, void *data, int size)
+{
+	struct pvr_sync_pt_info *inf = (struct pvr_sync_pt_info *)data;
+	struct pvr_sync_pt *pvr_pt = (struct pvr_sync_pt *)sync_pt;
+
+	if (size < sizeof(struct pvr_sync_pt_info))
+		return -ENOMEM;
+
+	if (sync_pt->parent->ops == &pvr_sync_timeline_ops) {
+		struct pvr_sync_data *sync_data = pvr_pt->sync_data;
+		struct pvr_sync_kernel_pair *kernel = sync_data->kernel;
+
+		inf->ui8Foreign  = 0;
+		inf->s.ui32TlTaken = sync_data->timeline_update_value;
+
+		if (kernel) {
+			inf->s.id         = kernel->fence_sync->id;
+			inf->s.ui32FWAddr = kernel->fence_sync->vaddr;
+			inf->s.ui32CurrOp = get_sync_value(kernel->fence_sync);
+			inf->s.ui32NextOp = kernel->fence_sync->next_value;
+		} else {
+			inf->s.id         = 0;
+			inf->s.ui32FWAddr = 0;
+			inf->s.ui32CurrOp = 0;
+			inf->s.ui32NextOp = 0;
+		}
+	} else {
+		inf->ui8Foreign = 1;
+
+		if (sync_pt->parent->ops->pt_value_str) {
+			sync_pt->parent->ops->pt_value_str(sync_pt,
+				inf->szForeignVal, sizeof(inf->szForeignVal));
+		} else {
+			inf->szForeignVal[0] = 0;
+		}
+	}
+
+	return sizeof(struct pvr_sync_pt_info);
+}
+
+
+/* foreign sync handling */
+
+static void pvr_sync_foreign_sync_pt_signaled(struct sync_fence *fence,
+					      struct sync_fence_waiter *_waiter)
+{
+	struct pvr_sync_fence_waiter *waiter =
+		(struct pvr_sync_fence_waiter *)_waiter;
+	unsigned long flags;
+
+	/* Complete the SW operation and free the sync if we can. If we can't,
+	 * it will be checked by a later workqueue kick. */
+	complete_sync(waiter->kernel->fence_sync);
+
+	/* We can 'put' the fence now, but this function might be called in
+	* irq context so we must defer to WQ.
+	* This WQ is triggered in pvr_sync_defer_free, so adding it to the
+	* put list before that should guarantee it's cleaned up on the next
+	* wq run */
+	spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
+	list_add_tail(&waiter->sync_fence->list, &sync_fence_put_list);
+	spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
+
+	pvr_sync_defer_free(waiter->kernel);
+
+	/* The completed sw-sync may allow other tasks to complete,
+	 * so we need to allow them to progress */
+	queue_work(pvr_sync_data.check_status_wq,
+		&pvr_sync_data.check_status_work);
+
+	kfree(waiter);
+}
+
+static struct pvr_sync_kernel_pair *
+pvr_sync_create_waiter_for_foreign_sync(int fd)
+{
+	struct pvr_sync_kernel_pair *kernel = NULL;
+	struct pvr_sync_fence_waiter *waiter;
+	struct pvr_sync_fence *sync_fence;
+	struct sync_fence *fence;
+	enum PVRSRV_ERROR error;
+	int err;
+
+	fence = sync_fence_fdget(fd);
+	if (!fence) {
+		pr_err("pvr_sync: %s: Failed to take reference on fence",
+		       __func__);
+		goto err_out;
+	}
+
+	kernel = kmalloc(sizeof(struct pvr_sync_kernel_pair), GFP_KERNEL);
+	if (!kernel) {
+		pr_err("pvr_sync: %s: Failed to allocate sync kernel",
+		       __func__);
+		goto err_put_fence;
+	}
+
+	sync_fence = kmalloc(sizeof(struct pvr_sync_fence), GFP_KERNEL);
+	if (!sync_fence) {
+		pr_err("pvr_sync: %s: Failed to allocate pvr sync fence",
+		       __func__);
+		goto err_free_kernel;
+	}
+
+	sync_fence->fence = fence;
+
+	error = sync_pool_get(&kernel->fence_sync,
+			      fence->name, SYNC_PT_FOREIGN_FENCE_TYPE);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
+		       __func__, PVRSRVGetErrorStringKM(error));
+		goto err_free_sync_fence;
+	}
+
+	kernel->fence_sync->next_value++;
+
+	error = sync_pool_get(&kernel->cleanup_sync,
+			      fence->name, SYNC_PT_FOREIGN_CLEANUP_TYPE);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)",
+		       __func__, PVRSRVGetErrorStringKM(error));
+		goto err_free_sync;
+	}
+
+	kernel->cleanup_sync->next_value++;
+
+	/* The custom waiter structure is freed in the waiter callback */
+	waiter = kmalloc(sizeof(struct pvr_sync_fence_waiter), GFP_KERNEL);
+	if (!waiter) {
+		pr_err("pvr_sync: %s: Failed to allocate waiter", __func__);
+		goto err_free_cleanup_sync;
+	}
+
+	waiter->kernel = kernel;
+	waiter->sync_fence = sync_fence;
+
+	sync_fence_waiter_init(&waiter->waiter,
+			       pvr_sync_foreign_sync_pt_signaled);
+
+	err = sync_fence_wait_async(fence, &waiter->waiter);
+	if (err) {
+		if (err < 0) {
+			pr_err("pvr_sync: %s: Fence was in error state (%d)",
+			       __func__, err);
+			/* Fall-thru */
+		}
+
+		/* -1 means the fence was broken, 1 means the fence already
+		 * signalled. In either case, roll back what we've done and
+		 * skip using this sync_pt for synchronization.
+		 */
+		goto err_free_waiter;
+	}
+
+err_out:
+	return kernel;
+err_free_waiter:
+	kfree(waiter);
+err_free_cleanup_sync:
+	sync_pool_put(kernel->cleanup_sync);
+err_free_sync:
+	sync_pool_put(kernel->fence_sync);
+err_free_sync_fence:
+	kfree(sync_fence);
+err_free_kernel:
+	kfree(kernel);
+	kernel = NULL;
+err_put_fence:
+	sync_fence_put(fence);
+	goto err_out;
+}
+
+enum PVRSRV_ERROR pvr_sync_append_fences(
+	const char                  *name,
+	const u32                   nr_check_fences,
+	const s32                   *check_fence_fds,
+	const s32                   update_fence_fd,
+	const u32                   nr_updates,
+	const PRGXFWIF_UFO_ADDR     *update_ufo_addresses,
+	const u32                   *update_values,
+	const u32                   nr_checks,
+	const PRGXFWIF_UFO_ADDR     *check_ufo_addresses,
+	const u32                   *check_values,
+	struct pvr_sync_append_data **append_sync_data)
+{
+	struct pvr_sync_append_data *sync_data;
+	enum PVRSRV_ERROR err = PVRSRV_OK;
+	struct pvr_sync_native_sync_prim **cleanup_sync_pos;
+	PRGXFWIF_UFO_ADDR *update_address_pos;
+	PRGXFWIF_UFO_ADDR *check_address_pos;
+	u32 *update_value_pos;
+	u32 *check_value_pos;
+	unsigned num_used_sync_checks;
+	unsigned num_used_sync_updates;
+	struct pvr_sync_alloc_data *alloc_sync_data = NULL;
+	unsigned i;
+
+	if ((nr_updates && (!update_ufo_addresses || !update_values)) ||
+		(nr_checks && (!check_ufo_addresses || !check_values)))
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	sync_data =
+		kzalloc(sizeof(struct pvr_sync_append_data)
+			+ nr_check_fences * sizeof(struct sync_fence *),
+			GFP_KERNEL);
+	if (!sync_data) {
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+	sync_data->nr_checks = nr_checks;
+	sync_data->nr_updates = nr_updates;
+
+	sync_data->nr_fences = nr_check_fences;
+
+	/* Loop through once to get the fences and count the total number of
+	 * points */
+	for (i = 0; i < nr_check_fences; i++) {
+		unsigned points_on_fence = 0;
+		bool has_foreign_point = false;
+		struct sync_fence *fence = sync_fence_fdget(check_fence_fds[i]);
+		struct sync_pt *sync_pt;
+		struct pvr_sync_kernel_pair *sync_kernel;
+
+		if (!fence) {
+			pr_err("pvr_sync: %s: Failed to read sync private data for fd %d\n",
+				__func__, check_fence_fds[i]);
+			err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+			goto err_free_append_data;
+		}
+
+		sync_data->fences[i] = fence;
+
+		list_for_each_entry(sync_pt, &fence->pt_list_head, pt_list) {
+			struct pvr_sync_pt *pvr_pt;
+
+			if (sync_pt->parent->ops != &pvr_sync_timeline_ops) {
+				if (!sync_pt->status)
+					has_foreign_point = true;
+				continue;
+			}
+
+			pvr_pt = (struct pvr_sync_pt *)sync_pt;
+			sync_kernel = pvr_pt->sync_data->kernel;
+
+			if (!sync_kernel ||
+			    is_sync_met(sync_kernel->fence_sync)) {
+				continue;
+			}
+			/* We will use the above sync for "check" only. In this
+			* case also insert a "cleanup" update command into the
+			* opengl stream. This can later be used for checking if
+			* the sync prim could be freed. */
+			if (!sync_kernel->cleanup_sync) {
+				err = sync_pool_get(&sync_kernel->cleanup_sync,
+					pvr_pt->pt.parent->name,
+					SYNC_PT_CLEANUP_TYPE);
+				if (err != PVRSRV_OK) {
+					pr_err("pvr_sync: %s: Failed to allocate cleanup sync prim (%s)",
+					       __func__,
+					       PVRSRVGetErrorStringKM(err));
+					goto err_free_append_data;
+				}
+			}
+			points_on_fence++;
+		}
+
+		if (has_foreign_point)
+			points_on_fence++;
+
+		/* Each point has 1 check value, and 1 update value (for the
+		 * cleanup fence) */
+		sync_data->nr_checks += points_on_fence;
+		sync_data->nr_updates += points_on_fence;
+		sync_data->nr_cleaup_syncs += points_on_fence;
+	}
+
+	if (update_fence_fd >= 0) {
+		alloc_sync_data = pvr_sync_alloc_fence_fdget(update_fence_fd);
+		if (!alloc_sync_data) {
+			pr_err("pvr_sync: %s: Failed to read alloc sync private data for fd %d\n",
+				__func__, update_fence_fd);
+			err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+			goto err_free_append_data;
+		}
+		/* Store the alloc sync data now, so it's correctly fput()
+		 * even on error */
+		sync_data->update_sync_data = alloc_sync_data;
+		/* If an alloc-sync has already been appended to a kick that
+		 * is an error (and the sync_data will be NULL */
+		if (!alloc_sync_data->sync_data) {
+			pr_err("pvr_sync: %s: Failed to read alloc sync sync_data for fd %d\n",
+				__func__, update_fence_fd);
+			err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+			goto err_free_append_data;
+
+		}
+		/* A fence update requires 2 update values (fence and timeline)
+		 */
+		 sync_data->nr_updates += 2;
+	}
+
+	sync_data->update_ufo_addresses =
+		kzalloc(sizeof(PRGXFWIF_UFO_ADDR) * sync_data->nr_updates,
+			GFP_KERNEL);
+	if (!sync_data->update_ufo_addresses) {
+		pr_err("pvr_sync: %s: Failed to allocate update UFO address list\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_free_append_data;
+	}
+
+	sync_data->update_values =
+		kzalloc(sizeof(u32) * sync_data->nr_updates,
+			GFP_KERNEL);
+	if (!sync_data->update_values) {
+		pr_err("pvr_sync: %s: Failed to allocate update value list\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_free_append_data;
+	}
+
+	sync_data->check_ufo_addresses =
+		kzalloc(sizeof(PRGXFWIF_UFO_ADDR) * sync_data->nr_checks,
+			GFP_KERNEL);
+	if (!sync_data->check_ufo_addresses) {
+		pr_err("pvr_sync: %s: Failed to allocate check UFO address list\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_free_append_data;
+	}
+
+	sync_data->check_values =
+		kzalloc(sizeof(u32) * sync_data->nr_checks,
+			GFP_KERNEL);
+	if (!sync_data->check_values) {
+		pr_err("pvr_sync: %s: Failed to allocate check value list\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_free_append_data;
+	}
+
+	sync_data->cleanup_syncs =
+		kzalloc(sizeof(struct pvr_sync_native_sync_prim *) *
+			sync_data->nr_cleaup_syncs, GFP_KERNEL);
+	if (!sync_data->cleanup_syncs) {
+		pr_err("pvr_sync: %s: Failed to allocate cleanup rollback list\n",
+			__func__);
+		err = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_free_append_data;
+	}
+
+	update_address_pos = sync_data->update_ufo_addresses;
+	update_value_pos = sync_data->update_values;
+	check_address_pos = sync_data->check_ufo_addresses;
+	check_value_pos = sync_data->check_values;
+	cleanup_sync_pos = sync_data->cleanup_syncs;
+
+
+	/* Everything should be allocated/sanity checked. No errors are possible
+	 * after this point */
+
+	/* Append any check syncs */
+	for (i = 0; i < nr_check_fences; i++) {
+		struct sync_fence *fence = sync_data->fences[i];
+		struct sync_pt *sync_pt;
+		bool has_foreign_point = false;
+
+		list_for_each_entry(sync_pt, &fence->pt_list_head, pt_list) {
+			struct pvr_sync_pt *pvr_pt;
+			struct pvr_sync_kernel_pair *sync_kernel;
+
+			if (sync_pt->parent->ops != &pvr_sync_timeline_ops) {
+				if (!sync_pt->status)
+					has_foreign_point = true;
+				continue;
+			}
+			pvr_pt = (struct pvr_sync_pt *) sync_pt;
+			sync_kernel = pvr_pt->sync_data->kernel;
+
+			if (!sync_kernel ||
+			    is_sync_met(sync_kernel->fence_sync)) {
+				continue;
+			}
+
+			(*check_address_pos++).ui32Addr =
+				sync_kernel->fence_sync->vaddr;
+			*check_value_pos++ =
+				sync_kernel->fence_sync->next_value;
+
+			(*update_address_pos++).ui32Addr =
+				sync_kernel->cleanup_sync->vaddr;
+			*update_value_pos++ =
+				++sync_kernel->cleanup_sync->next_value;
+			*cleanup_sync_pos++ = sync_kernel->cleanup_sync;
+		}
+
+		if (has_foreign_point) {
+			struct pvr_sync_kernel_pair *foreign_sync_kernel =
+				pvr_sync_create_waiter_for_foreign_sync(
+					check_fence_fds[i]);
+
+			if (foreign_sync_kernel) {
+				struct pvr_sync_native_sync_prim *fence_sync =
+					foreign_sync_kernel->fence_sync;
+				struct pvr_sync_native_sync_prim *cleanup_sync =
+					foreign_sync_kernel->cleanup_sync;
+
+
+				(*check_address_pos++).ui32Addr =
+					fence_sync->vaddr;
+				*check_value_pos++ =
+					fence_sync->next_value;
+
+				(*update_address_pos++).ui32Addr =
+					cleanup_sync->vaddr;
+				*update_value_pos++ =
+					++cleanup_sync->next_value;
+				*cleanup_sync_pos++ = cleanup_sync;
+			}
+		}
+	}
+
+	/* Append the update sync (if supplied) */
+	if (sync_data->update_sync_data) {
+		struct pvr_sync_alloc_data *update_data =
+			sync_data->update_sync_data;
+		struct pvr_sync_timeline *timeline =
+			update_data->timeline;
+		struct pvr_sync_kernel_pair *sync_kernel =
+			update_data->sync_data->kernel;
+
+		(*update_address_pos++).ui32Addr =
+			sync_kernel->fence_sync->vaddr;
+		*update_value_pos++ =
+			++sync_kernel->fence_sync->next_value;
+
+		(*update_address_pos++).ui32Addr =
+			timeline->timeline_sync->vaddr;
+
+		/* Increment the timeline value... */
+		update_data->sync_data->timeline_update_value =
+			++timeline->timeline_sync->next_value;
+
+		/* ...and set that to be updated when this kick is completed */
+		*update_value_pos++ =
+			update_data->sync_data->timeline_update_value;
+
+
+		/* Reset the fencing enabled flag. If nobody sets this to 1
+		 * until the next fence point is inserted, we will do timeline
+		 * idle detection. */
+		timeline->fencing_enabled = false;
+	}
+	/* We count the total number of sync points we attach, as it's possible
+	* some have become complete since the first loop through, or a waiter
+	* for a foreign point skipped (But they can never become un-complete, so
+	* it will only ever be the same or less, so the allocated arrays should
+	* still be sufficiently sized) */
+	num_used_sync_updates =
+		update_address_pos - sync_data->update_ufo_addresses;
+	num_used_sync_checks =
+		check_address_pos - sync_data->check_ufo_addresses;
+
+
+	sync_data->nr_checks = nr_checks + num_used_sync_checks;
+	sync_data->nr_updates = nr_updates + num_used_sync_updates;
+	/* Append original check and update sync values/addresses */
+	if (update_ufo_addresses)
+		memcpy(update_address_pos, update_ufo_addresses,
+			   sizeof(PRGXFWIF_UFO_ADDR) * nr_updates);
+	if (update_values)
+		memcpy(update_value_pos, update_values,
+			   sizeof(u32) * nr_updates);
+
+	if (check_ufo_addresses)
+		memcpy(check_address_pos, check_ufo_addresses,
+			   sizeof(PRGXFWIF_UFO_ADDR) * nr_checks);
+	if (check_values)
+		memcpy(check_value_pos, check_values,
+			   sizeof(u32) * nr_checks);
+
+	*append_sync_data = sync_data;
+
+	return PVRSRV_OK;
+
+err_free_append_data:
+	pvr_sync_free_append_fences_data(sync_data);
+err_out:
+	return err;
+}
+
+void pvr_sync_rollback_append_fences(
+	struct pvr_sync_append_data *sync_append_data)
+{
+	unsigned i;
+
+	if (!sync_append_data)
+		return;
+
+	for (i = 0; i < sync_append_data->nr_cleaup_syncs; i++) {
+		struct pvr_sync_native_sync_prim *cleanup_sync =
+			sync_append_data->cleanup_syncs[i];
+		/* If this cleanup was called on a partially-created data set
+		 * it's possible to have NULL cleanup sync pointers */
+		if (!cleanup_sync)
+			continue;
+		cleanup_sync->next_value--;
+	}
+
+	if (sync_append_data->update_sync_data) {
+		struct pvr_sync_data *sync_data =
+			sync_append_data->update_sync_data->sync_data;
+		struct pvr_sync_timeline *timeline =
+			sync_append_data->update_sync_data->timeline;
+		/* We can get a NULL sync_data if the corresponding
+		 * append failed with a re-used alloc sync */
+		if (sync_data) {
+			sync_data->kernel->fence_sync->next_value--;
+			timeline->fencing_enabled = true;
+			timeline->timeline_sync->next_value--;
+		}
+	}
+}
+
+void pvr_sync_free_append_fences_data(
+	struct pvr_sync_append_data *sync_append_data)
+{
+	unsigned i;
+
+	if (!sync_append_data)
+		return;
+
+	for (i = 0; i < sync_append_data->nr_fences; i++) {
+		struct sync_fence *fence = sync_append_data->fences[i];
+		/* If this cleanup was called on a partially-created data set
+		 * it's possible to have NULL sync data pointers */
+		if (!fence)
+			continue;
+		sync_fence_put(fence);
+	}
+	if (sync_append_data->update_sync_data)
+		fput(sync_append_data->update_sync_data->file);
+
+	kfree(sync_append_data->update_ufo_addresses);
+	kfree(sync_append_data->update_values);
+	kfree(sync_append_data->check_ufo_addresses);
+	kfree(sync_append_data->check_values);
+	kfree(sync_append_data->cleanup_syncs);
+	kfree(sync_append_data);
+}
+
+void pvr_sync_nohw_complete_fences(
+	struct pvr_sync_append_data *sync_append_data)
+{
+	unsigned i;
+
+	if (!sync_append_data)
+		return;
+
+	for (i = 0; i < sync_append_data->nr_cleaup_syncs; i++) {
+		struct pvr_sync_native_sync_prim *cleanup_sync =
+			sync_append_data->cleanup_syncs[i];
+
+		if (!cleanup_sync)
+			continue;
+
+		complete_sync(cleanup_sync);
+	}
+	if (sync_append_data->update_sync_data) {
+		/* Skip any invalid update syncs (should only be hit on error */
+		if (sync_append_data->update_sync_data->sync_data) {
+			struct pvr_sync_data *sync_data =
+				sync_append_data->update_sync_data->sync_data;
+			struct pvr_sync_timeline *timeline =
+				sync_append_data->update_sync_data->timeline;
+			complete_sync(sync_data->kernel->fence_sync);
+			set_sync_value(timeline->timeline_sync,
+				sync_data->timeline_update_value);
+		}
+	}
+}
+
+/* ioctl and fops handling */
+
+static int pvr_sync_open(struct inode *inode, struct file *file)
+{
+	struct pvr_sync_timeline *timeline;
+	enum PVRSRV_ERROR error;
+	char name[32] = {};
+	int err = -ENOMEM;
+
+	task_lock(current);
+	rcu_read_lock();
+
+	if (strncmp(current->group_leader->comm,
+		current->comm, TASK_COMM_LEN) == 0) {
+		snprintf(name, sizeof(name), "%.26s-%d",
+			current->group_leader->comm, current->pid);
+	} else {
+		snprintf(name, sizeof(name), "%.15s-%.10s-%d",
+			current->group_leader->comm, current->comm,
+			current->pid);
+	}
+
+	rcu_read_unlock();
+	task_unlock(current);
+
+	timeline = (struct pvr_sync_timeline *)
+		sync_timeline_create(&pvr_sync_timeline_ops,
+			sizeof(struct pvr_sync_timeline), name);
+	if (!timeline) {
+		pr_err("pvr_sync: %s: sync_timeline_create failed", __func__);
+		goto err_out;
+	}
+
+	OSAcquireBridgeLock();
+
+	error = sync_pool_get(&timeline->timeline_sync, name, SYNC_TL_TYPE);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to allocate sync prim (%s)",
+			__func__, PVRSRVGetErrorStringKM(error));
+		OSReleaseBridgeLock();
+
+		/*
+		 * Use a NULL timeline_sync to detect this partially-setup
+		 * timeline in the timeline release function (called by
+		 * sync_timeline_destroy) and handle it appropriately
+		 */
+		timeline->timeline_sync = NULL;
+		goto err_free_tl;
+	}
+
+	OSReleaseBridgeLock();
+
+	timeline->fencing_enabled = true;
+
+	DPF("%s: # %s", __func__,
+	    debug_info_timeline((struct sync_timeline *)timeline));
+
+	mutex_lock(&timeline_list_mutex);
+	list_add_tail(&timeline->list, &timeline_list);
+	mutex_unlock(&timeline_list_mutex);
+
+	file->private_data = timeline;
+
+	err = 0;
+err_out:
+	return err;
+
+err_free_tl:
+	sync_timeline_destroy(&timeline->obj);
+	goto err_out;
+}
+
+static int pvr_sync_close(struct inode *inode, struct file *file)
+{
+	struct sync_timeline *timeline = file->private_data;
+
+	if (timeline->ops == &pvr_sync_timeline_ops)
+		DPF("%s: # %s", __func__, debug_info_timeline(timeline));
+
+	sync_timeline_destroy(timeline);
+	return 0;
+}
+
+static void pvr_sync_free_sync_data(struct pvr_sync_data *sync_data)
+{
+	if (sync_data && sync_data->kernel)
+		pvr_sync_defer_free(sync_data->kernel);
+	kfree(sync_data);
+}
+
+static int pvr_sync_alloc_release(struct inode *inode, struct file *file)
+{
+	struct pvr_sync_alloc_data *alloc_sync_data = file->private_data;
+	/* the sync_data may be null if a sync has been created using this
+	 * alloc_sync data */
+	pvr_sync_free_sync_data(alloc_sync_data->sync_data);
+	kfree(alloc_sync_data);
+	return 0;
+}
+
+static const struct file_operations pvr_alloc_sync_fops = {
+	.release = pvr_sync_alloc_release,
+};
+
+static struct pvr_sync_alloc_data *pvr_sync_alloc_fence_fdget(int fd)
+{
+	struct file *file = fget(fd);
+
+	if (!file)
+		return NULL;
+	if (file->f_op != &pvr_alloc_sync_fops)
+		goto err;
+	return file->private_data;
+err:
+	fput(file);
+	return NULL;
+}
+
+static long
+pvr_sync_ioctl_create_fence(struct pvr_sync_timeline *timeline,
+			    void __user *user_data)
+{
+	struct pvr_sync_create_fence_ioctl_data data;
+	struct pvr_sync_alloc_data *alloc_sync_data;
+	int err = -EFAULT, fd = get_unused_fd();
+	struct pvr_sync_data *sync_data;
+	struct sync_fence *fence;
+	struct sync_pt *sync_pt;
+
+	if (fd < 0) {
+		pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
+		       __func__, fd);
+		goto err_out;
+	}
+
+	if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
+		goto err_put_fd;
+
+	if (copy_from_user(&data, user_data, sizeof(data)))
+		goto err_put_fd;
+
+	alloc_sync_data = pvr_sync_alloc_fence_fdget(data.iAllocFenceFd);
+	if (!alloc_sync_data) {
+		pr_err("pvr_sync: %s: Invalid alloc sync fd (%d)\n",
+			__func__, data.iAllocFenceFd);
+		goto err_put_fd;
+	}
+
+	if (alloc_sync_data->timeline != timeline) {
+		pr_err("pvr_sync: %s: Trying to create sync from alloc of timeline %p in timeline %p\n",
+			__func__, alloc_sync_data->timeline, timeline);
+		fput(alloc_sync_data->file);
+		goto err_put_fd;
+	}
+
+	sync_data = alloc_sync_data->sync_data;
+	alloc_sync_data->sync_data = NULL;
+
+	fput(alloc_sync_data->file);
+
+	sync_pt = (struct sync_pt *)
+		pvr_sync_create_sync(timeline, sync_data);
+	if (!sync_pt) {
+		pr_err("pvr_sync: %s: Failed to create a sync point (%d)",
+		       __func__, fd);
+		err = -ENOMEM;
+		goto err_free_sync_data;
+	}
+
+	data.szName[sizeof(data.szName) - 1] = '\0';
+
+	DPF("%s: %d('%s') # %s", __func__,
+		fd, data.szName,
+		debug_info_timeline((struct sync_timeline *)timeline));
+
+	fence = sync_fence_create(data.szName, sync_pt);
+	if (!fence) {
+		pr_err("pvr_sync: %s: Failed to create a fence (%d)",
+		       __func__, fd);
+		sync_pt_free(sync_pt);
+		err = -ENOMEM;
+		goto err_free_sync_data;
+	}
+
+	data.iFenceFd = fd;
+
+	if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
+		goto err_put_fence;
+
+	if (copy_to_user(user_data, &data, sizeof(data)))
+		goto err_put_fence;
+
+	sync_fence_install(fence, fd);
+
+	err = 0;
+err_out:
+	return err;
+
+err_put_fence:
+	sync_fence_put(fence);
+err_free_sync_data:
+	pvr_sync_free_sync_data(sync_data);
+err_put_fd:
+	put_unused_fd(fd);
+	goto err_out;
+}
+
+static long
+pvr_sync_ioctl_alloc_fence(struct pvr_sync_timeline *timeline,
+		void __user *user_data) {
+	struct pvr_sync_alloc_fence_ioctl_data data;
+	int err = -EFAULT, fd = get_unused_fd();
+	struct pvr_sync_data *sync_data;
+	struct pvr_sync_alloc_data *alloc_sync_data;
+	struct file *file;
+
+	if (fd < 0) {
+		pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
+		       __func__, fd);
+		goto err_out;
+	}
+
+	if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
+		goto err_put_fd;
+
+	if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
+		goto err_put_fd;
+
+	alloc_sync_data =
+		kzalloc(sizeof(struct pvr_sync_alloc_data), GFP_KERNEL);
+	if (!alloc_sync_data) {
+		err = -ENOMEM;
+		pr_err("pvr_sync: %s: Failed to alloc sync data\n", __func__);
+		goto err_put_fd;
+	}
+
+	sync_data = pvr_sync_create_sync_data(timeline);
+	if (!sync_data) {
+		err = -ENOMEM;
+		pr_err("pvr_sync: %s: Failed to create sync data\n", __func__);
+		goto err_free_alloc_data;
+	}
+
+	file = anon_inode_getfile("pvr_sync_alloc", &pvr_alloc_sync_fops,
+		alloc_sync_data, 0);
+	if (!file) {
+		err = -ENOMEM;
+		pr_err("pvr_sync: %s: Failed to create alloc inode\n",
+			__func__);
+		goto err_free_data;
+	}
+
+	alloc_sync_data->file = file;
+	alloc_sync_data->sync_data = sync_data;
+	alloc_sync_data->timeline = timeline;
+
+	data.bTimelineIdle = is_sync_met(timeline->timeline_sync) &&
+		timeline->fencing_enabled == false;
+
+	data.iFenceFd = fd;
+
+	if (!access_ok(VERIFY_WRITE, user_data, sizeof(data)))
+		goto err_free_data;
+
+	if (copy_to_user(user_data, &data, sizeof(data)))
+		goto err_free_data;
+
+	fd_install(fd, file);
+	err = 0;
+
+err_out:
+	return err;
+err_free_data:
+	pvr_sync_free_sync_data(sync_data);
+err_free_alloc_data:
+	kfree(alloc_sync_data);
+err_put_fd:
+	put_unused_fd(fd);
+	goto err_out;
+}
+
+static long
+pvr_sync_ioctl_enable_fencing(struct pvr_sync_timeline *timeline,
+			      void __user *user_data)
+{
+	struct pvr_sync_enable_fencing_ioctl_data data;
+	int err = -EFAULT;
+
+	if (!access_ok(VERIFY_READ, user_data, sizeof(data)))
+		goto err_out;
+
+	if (copy_from_user(&data, user_data, sizeof(data)))
+		goto err_out;
+
+	timeline->fencing_enabled = data.bFencingEnabled;
+	err = 0;
+err_out:
+	return err;
+}
+
+#ifndef CONFIG_SW_SYNC_USER
+
+static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline,
+	void **private_data)
+{
+	struct sw_sync_timeline *sw_sync_timeline;
+
+	/* We can only convert an empty GPU timeline */
+	if (timeline->timeline_sync->next_value)
+		return -EFAULT;
+
+	/* Create a sw_sync timeline with the old GPU timeline's name */
+	sw_sync_timeline = sw_sync_timeline_create(timeline->obj.name);
+	if (!sw_sync_timeline)
+		return -ENOMEM;
+
+	/* Destroy the old GPU timeline and update the struct file */
+	DPF("%s: # %s", __func__,
+	    debug_info_timeline((struct sync_timeline *)timeline));
+
+	sync_timeline_destroy(&timeline->obj);
+	*private_data = sw_sync_timeline;
+	return 0;
+}
+
+static long pvr_sync_ioctl_sw_create_fence(struct sw_sync_timeline *timeline,
+	void __user *user_data)
+{
+	struct sw_sync_create_fence_data data;
+	struct sync_fence *fence;
+	int fd = get_unused_fd();
+	struct sync_pt *sync_pt;
+	int err = -EFAULT;
+
+	if (fd < 0) {
+		pr_err("pvr_sync: %s: Failed to find unused fd (%d)",
+		       __func__, fd);
+		goto err_out;
+	}
+
+	if (copy_from_user(&data, user_data, sizeof(data)))
+		goto err_put_fd;
+
+	sync_pt = sw_sync_pt_create(timeline, data.value);
+	if (!sync_pt) {
+		pr_err("pvr_sync: %s: Failed to create a sync point (%d)",
+		       __func__, fd);
+		err = -ENOMEM;
+		goto err_put_fd;
+	}
+
+	data.name[sizeof(data.name) - 1] = '\0';
+	fence = sync_fence_create(data.name, sync_pt);
+	if (!fence) {
+		pr_err("pvr_sync: %s: Failed to create a fence (%d)",
+		       __func__, fd);
+		sync_pt_free(sync_pt);
+		err = -ENOMEM;
+		goto err_put_fd;
+	}
+
+	data.fence = fd;
+
+	if (copy_to_user(user_data, &data, sizeof(data)))
+		goto err_put_fence;
+
+	sync_fence_install(fence, fd);
+	err = 0;
+err_out:
+	return err;
+err_put_fence:
+	sync_fence_put(fence);
+err_put_fd:
+	put_unused_fd(fd);
+	goto err_out;
+}
+
+static long pvr_sync_ioctl_sw_inc(struct sw_sync_timeline *timeline,
+	void __user *user_data)
+{
+	u32 value;
+
+	if (copy_from_user(&value, user_data, sizeof(value)))
+		return -EFAULT;
+
+	sw_sync_timeline_inc(timeline, value);
+	return 0;
+}
+
+#endif /* !CONFIG_SW_SYNC_USER */
+
+static long
+pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long __user arg)
+{
+	struct sync_timeline *timeline = file->private_data;
+	void __user *user_data = (void __user *)arg;
+	long err = -ENOTTY;
+
+	if (timeline->ops == &pvr_sync_timeline_ops) {
+		struct pvr_sync_timeline *pvr = file->private_data;
+
+		switch (cmd) {
+		case PVR_SYNC_IOC_CREATE_FENCE:
+			err = pvr_sync_ioctl_create_fence(pvr, user_data);
+			break;
+		case PVR_SYNC_IOC_ENABLE_FENCING:
+			err = pvr_sync_ioctl_enable_fencing(pvr, user_data);
+			break;
+		case PVR_SYNC_IOC_ALLOC_FENCE:
+			err = pvr_sync_ioctl_alloc_fence(pvr, user_data);
+			break;
+#ifndef CONFIG_SW_SYNC_USER
+		case PVR_SYNC_IOC_FORCE_SW_ONLY:
+			err = pvr_sync_ioctl_force_sw_only(pvr,
+				&file->private_data);
+			break;
+#endif /* !CONFIG_SW_SYNC_USER */
+		default:
+			break;
+		}
+	} else {
+#ifndef CONFIG_SW_SYNC_USER
+		struct sw_sync_timeline *sw = file->private_data;
+
+		switch (cmd) {
+		case SW_SYNC_IOC_CREATE_FENCE:
+			err = pvr_sync_ioctl_sw_create_fence(sw, user_data);
+			break;
+		case SW_SYNC_IOC_INC:
+			err = pvr_sync_ioctl_sw_inc(sw, user_data);
+			break;
+		default:
+			break;
+		}
+#endif /* !CONFIG_SW_SYNC_USER */
+	}
+
+	return err;
+}
+
+static void
+pvr_sync_check_status_work_queue_function(struct work_struct *data)
+{
+	/* A completed SW operation may un-block the GPU */
+	PVRSRVCheckStatus(NULL);
+}
+
+/* Returns true if the freelist still has entries, else false if empty */
+static bool
+pvr_sync_clean_freelist(void)
+{
+	struct pvr_sync_kernel_pair *kernel, *k;
+	struct pvr_sync_fence *sync_fence, *f;
+	LIST_HEAD(unlocked_free_list);
+	unsigned long flags;
+	bool freelist_empty;
+
+	/* We can't call PVRSRVServerSyncFreeKM directly in this loop because
+	 * that will take the mmap mutex. We can't take mutexes while we have
+	 * this list locked with a spinlock. So move all the items we want to
+	 * free to another, local list (no locking required) and process it
+	 * in a second loop. */
+
+	spin_lock_irqsave(&sync_prim_free_list_spinlock, flags);
+	list_for_each_entry_safe(kernel, k, &sync_prim_free_list, list) {
+		/* Check if this sync is not used anymore. */
+		if (!is_sync_met(kernel->fence_sync) ||
+		    (kernel->cleanup_sync &&
+		     !is_sync_met(kernel->cleanup_sync))) {
+			continue;
+		}
+
+		/* Remove the entry from the free list. */
+		list_move_tail(&kernel->list, &unlocked_free_list);
+	}
+
+	/* Wait and loop if there are still syncs on the free list (IE
+	 * are still in use by the HW) */
+	freelist_empty = list_empty(&sync_prim_free_list);
+
+	spin_unlock_irqrestore(&sync_prim_free_list_spinlock, flags);
+
+	OSAcquireBridgeLock();
+
+	list_for_each_entry_safe(kernel, k, &unlocked_free_list, list) {
+		list_del(&kernel->list);
+
+		sync_pool_put(kernel->fence_sync);
+		if (kernel->cleanup_sync)
+			sync_pool_put(kernel->cleanup_sync);
+		kfree(kernel);
+	}
+
+	OSReleaseBridgeLock();
+
+	/* sync_fence_put() must be called from process/WQ context
+	 * because it uses fput(), which is not allowed to be called
+	 * from interrupt context in kernels <3.6.
+	 */
+	INIT_LIST_HEAD(&unlocked_free_list);
+
+	spin_lock_irqsave(&sync_fence_put_list_spinlock, flags);
+	list_for_each_entry_safe(sync_fence, f, &sync_fence_put_list, list) {
+		list_move_tail(&sync_fence->list, &unlocked_free_list);
+	}
+	spin_unlock_irqrestore(&sync_fence_put_list_spinlock, flags);
+
+	list_for_each_entry_safe(sync_fence, f, &unlocked_free_list, list) {
+		list_del(&sync_fence->list);
+		sync_fence_put(sync_fence->fence);
+		kfree(sync_fence);
+	}
+
+	return !freelist_empty;
+}
+
+static void
+pvr_sync_defer_free_work_queue_function(struct work_struct *data)
+{
+	enum PVRSRV_ERROR error = PVRSRV_OK;
+	void *event_object;
+
+	error = OSEventObjectOpen(pvr_sync_data.event_object_handle,
+		&event_object);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Error opening event object (%s)\n",
+			__func__, PVRSRVGetErrorStringKM(error));
+		return;
+
+	}
+
+	while (pvr_sync_clean_freelist()) {
+
+		error = OSEventObjectWait(event_object);
+
+		switch (error) {
+
+		case PVRSRV_OK:
+		case PVRSRV_ERROR_TIMEOUT:
+			/* Timeout is normal behaviour */
+			continue;
+		default:
+			pr_err("pvr_sync: %s: Error waiting for event object (%s)\n",
+				__func__, PVRSRVGetErrorStringKM(error));
+			break;
+		}
+	}
+	error = OSEventObjectClose(event_object);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Error closing event object (%s)\n",
+			__func__, PVRSRVGetErrorStringKM(error));
+	}
+}
+
+static const struct file_operations pvr_sync_fops = {
+	.owner          = THIS_MODULE,
+	.open           = pvr_sync_open,
+	.release        = pvr_sync_close,
+	.unlocked_ioctl = pvr_sync_ioctl,
+	.compat_ioctl   = pvr_sync_ioctl,
+};
+
+static struct miscdevice pvr_sync_device = {
+	.minor          = MISC_DYNAMIC_MINOR,
+	.name           = PVRSYNC_MODNAME,
+	.fops           = &pvr_sync_fops,
+};
+
+static
+void pvr_sync_update_all_timelines(void *command_complete_handle)
+{
+	struct pvr_sync_tl_to_signal *timeline_to_signal, *n;
+	struct pvr_sync_timeline *timeline;
+	LIST_HEAD(timeline_to_signal_list);
+	struct sync_pt *sync_pt;
+	unsigned long flags;
+	bool signal;
+
+	mutex_lock(&timeline_list_mutex);
+	list_for_each_entry(timeline, &timeline_list, list) {
+		signal = false;
+
+		spin_lock_irqsave(&timeline->obj.active_list_lock, flags);
+		list_for_each_entry(sync_pt, &timeline->obj.active_list_head,
+				active_list) {
+			if (sync_pt->parent->ops != &pvr_sync_timeline_ops)
+				continue;
+
+			DPF("%s: check # %s", __func__,
+			    debug_info_sync_pt(sync_pt));
+
+			/* Check for any points which weren't signaled before,
+			 * but are now. If so, mark it for signaling and stop
+			 * processing this timeline. */
+			if (sync_pt->status != 0)
+				continue;
+
+			DPF("%s: signal # %s", __func__,
+			    debug_info_sync_pt(sync_pt));
+
+			/* Create a new entry for the list of timelines which
+			 * needs to be signaled. There are two reasons for not
+			 * doing it right now: It is not possible to signal the
+			 * timeline while holding the spinlock or the mutex.
+			 * pvr_sync_release_timeline may be called by
+			 * timeline_signal which will acquire the mutex as well
+			 * and the spinlock itself is also used within
+			 * timeline_signal. */
+			signal = true;
+			break;
+		}
+		spin_unlock_irqrestore(&timeline->obj.active_list_lock, flags);
+
+		if (signal) {
+			timeline_to_signal =
+				kmalloc(sizeof(struct pvr_sync_tl_to_signal),
+					GFP_KERNEL);
+			if (!timeline_to_signal)
+				break;
+
+			timeline_to_signal->timeline = timeline;
+			list_add_tail(&timeline_to_signal->list,
+				      &timeline_to_signal_list);
+		}
+
+	}
+	mutex_unlock(&timeline_list_mutex);
+
+	/* It is safe to call timeline_signal at this point without holding the
+	 * timeline mutex. We know the timeline can't go away until we have
+	 * called timeline_signal cause the current active point still holds a
+	 * kref to the parent. However, when timeline_signal returns the actual
+	 * timeline structure may be invalid. */
+	list_for_each_entry_safe(timeline_to_signal, n,
+				 &timeline_to_signal_list, list) {
+		struct sync_timeline *timeline =
+			(struct sync_timeline *)timeline_to_signal->timeline;
+		sync_timeline_signal(timeline);
+		list_del(&timeline_to_signal->list);
+		kfree(timeline_to_signal);
+	}
+}
+
+enum PVRSRV_ERROR pvr_sync_init(void)
+{
+	enum PVRSRV_ERROR error;
+	int err;
+
+	DPF("%s", __func__);
+
+	atomic_set(&pvr_sync_data.sync_id, 0);
+
+	error = PVRSRVAcquireDeviceDataKM(0, PVRSRV_DEVICE_TYPE_RGX,
+					  &pvr_sync_data.device_cookie);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to initialise services (%s)",
+		       __func__, PVRSRVGetErrorStringKM(error));
+		goto err_out;
+	}
+
+	error = AcquireGlobalEventObjectServer(
+		&pvr_sync_data.event_object_handle);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to acquire global event object (%s)",
+			__func__, PVRSRVGetErrorStringKM(error));
+		goto err_release_device_data;
+	}
+
+	OSAcquireBridgeLock();
+
+	error = SyncPrimContextCreate(0,
+				      pvr_sync_data.device_cookie,
+				      &pvr_sync_data.sync_prim_context);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to create sync prim context (%s)",
+		       __func__, PVRSRVGetErrorStringKM(error));
+		OSReleaseBridgeLock();
+		goto err_release_event_object;
+	}
+
+	OSReleaseBridgeLock();
+
+	pvr_sync_data.defer_free_wq =
+		create_freezable_workqueue("pvr_sync_defer_free_workqueue");
+	if (!pvr_sync_data.defer_free_wq) {
+		pr_err("pvr_sync: %s: Failed to create pvr_sync defer_free workqueue",
+		       __func__);
+		goto err_free_sync_context;
+	}
+
+	INIT_WORK(&pvr_sync_data.defer_free_work,
+		pvr_sync_defer_free_work_queue_function);
+
+	pvr_sync_data.check_status_wq =
+		create_freezable_workqueue("pvr_sync_check_status_workqueue");
+	if (!pvr_sync_data.check_status_wq) {
+		pr_err("pvr_sync: %s: Failed to create pvr_sync check_status workqueue",
+		       __func__);
+		goto err_destroy_defer_free_wq;
+	}
+
+	INIT_WORK(&pvr_sync_data.check_status_work,
+		pvr_sync_check_status_work_queue_function);
+	error = PVRSRVRegisterCmdCompleteNotify(
+			&pvr_sync_data.command_complete_handle,
+			&pvr_sync_update_all_timelines,
+			&pvr_sync_data.device_cookie);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to register MISR notification (%s)",
+		       __func__, PVRSRVGetErrorStringKM(error));
+		goto err_destroy_status_wq;
+	}
+
+	error = PVRSRVRegisterDbgRequestNotify(
+			&pvr_sync_data.debug_notify_handle,
+			pvr_sync_debug_request,
+			DEBUG_REQUEST_ANDROIDSYNC,
+			NULL);
+	if (error != PVRSRV_OK) {
+		pr_err("pvr_sync: %s: Failed to register debug notifier (%s)",
+			__func__, PVRSRVGetErrorStringKM(error));
+		goto err_unregister_cmd_complete;
+	}
+
+	err = misc_register(&pvr_sync_device);
+	if (err) {
+		pr_err("pvr_sync: %s: Failed to register pvr_sync device (%d)",
+		       __func__, err);
+		error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		goto err_unregister_dbg;
+	}
+
+	error = PVRSRV_OK;
+	return error;
+
+err_unregister_dbg:
+	PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
+err_unregister_cmd_complete:
+	PVRSRVUnregisterCmdCompleteNotify(
+		pvr_sync_data.command_complete_handle);
+err_destroy_status_wq:
+	destroy_workqueue(pvr_sync_data.check_status_wq);
+err_destroy_defer_free_wq:
+	destroy_workqueue(pvr_sync_data.defer_free_wq);
+err_free_sync_context:
+	OSAcquireBridgeLock();
+	SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
+	OSReleaseBridgeLock();
+err_release_event_object:
+	ReleaseGlobalEventObjectServer(pvr_sync_data.event_object_handle);
+err_release_device_data:
+	PVRSRVReleaseDeviceDataKM(pvr_sync_data.device_cookie);
+err_out:
+
+	return error;
+}
+
+void pvr_sync_deinit(void)
+{
+	DPF("%s", __func__);
+
+	misc_deregister(&pvr_sync_device);
+
+	PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.debug_notify_handle);
+
+	PVRSRVUnregisterCmdCompleteNotify(
+		pvr_sync_data.command_complete_handle);
+
+	/* This will drain the workqueue, so we guarantee that all deferred
+	 * syncs are free'd before returning */
+	destroy_workqueue(pvr_sync_data.defer_free_wq);
+	destroy_workqueue(pvr_sync_data.check_status_wq);
+
+	OSAcquireBridgeLock();
+
+	sync_pool_clear();
+
+	SyncPrimContextDestroy(pvr_sync_data.sync_prim_context);
+
+	OSReleaseBridgeLock();
+
+	ReleaseGlobalEventObjectServer(pvr_sync_data.event_object_handle);
+
+	PVRSRVReleaseDeviceDataKM(pvr_sync_data.device_cookie);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/pvr_sync.h b/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/pvr_sync.h
new file mode 100644
index 0000000..faae666
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/pvr_sync.h
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File           pvr_sync.h
+@Title          Kernel driver for Android's sync mechanism
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+#ifndef _PVR_SYNC_H
+#define _PVR_SYNC_H
+
+#include "pvr_fd_sync_kernel.h"
+#include "rgx_fwif_shared.h"
+
+/* Services internal interface */
+enum PVRSRV_ERROR pvr_sync_init(void);
+void pvr_sync_deinit(void);
+
+struct pvr_sync_append_data
+{
+	u32				nr_updates;
+	PRGXFWIF_UFO_ADDR		*update_ufo_addresses;
+	u32				*update_values;
+	u32				nr_checks;
+	PRGXFWIF_UFO_ADDR		*check_ufo_addresses;
+	u32				*check_values;
+
+	/* The cleanup list is needed for rollback (as that's the only op taken) */
+	u32				nr_cleaup_syncs;
+	struct pvr_sync_native_sync_prim	**cleanup_syncs;
+
+	/* Keep the sync points around for fput and if rollback is needed */
+	struct pvr_sync_alloc_data      *update_sync_data;
+	u32				nr_fences;
+	struct sync_fence		*fences[];
+};
+
+enum PVRSRV_ERROR
+pvr_sync_append_fences(
+	const char			*name,
+	const u32			nr_check_fences,
+	const s32			*check_fence_fds,
+	const s32                       update_fence_fd,
+	const u32			nr_updates,
+	const PRGXFWIF_UFO_ADDR		*update_ufo_addresses,
+	const u32			*update_values,
+	const u32			nr_checks,
+	const PRGXFWIF_UFO_ADDR		*check_ufo_addresses,
+	const u32			*check_values,
+	struct pvr_sync_append_data	**append_sync_data);
+
+void pvr_sync_rollback_append_fences(struct pvr_sync_append_data		*sync_check_data);
+void pvr_sync_nohw_complete_fences(struct pvr_sync_append_data	*sync_check_data);
+void pvr_sync_free_append_fences_data(struct pvr_sync_append_data		*sync_check_data);
+
+#endif /* _PVR_SYNC_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/services_kernel_client.h b/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/services_kernel_client.h
new file mode 100644
index 0000000..74211b5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/kernel/drivers/staging/imgtec/services_kernel_client.h
@@ -0,0 +1,152 @@
+/*************************************************************************/ /*!
+@File           services_kernel_client.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+/* This file contains a partial redefinition of the PowerVR Services 5
+ * interface for use by components which are checkpatch clean. This
+ * header is included by the unrefined, non-checkpatch clean headers
+ * to ensure that prototype/typedef/macro changes break the build.
+ */
+
+#ifndef __SERVICES_KERNEL_CLIENT__
+#define __SERVICES_KERNEL_CLIENT__
+
+#include "debug_request_ids.h"
+#include "pvrsrv_error.h"
+
+#include <linux/types.h>
+
+#ifndef __pvrsrv_defined_struct_enum__
+
+/* pvrsrv_device_types.h */
+
+enum PVRSRV_DEVICE_TYPE {
+	PVRSRV_DEVICE_TYPE_RGX = 10,
+};
+
+/* sync_external.h */
+
+struct PVRSRV_CLIENT_SYNC_PRIM {
+	volatile __u32 *pui32LinAddr;
+};
+
+struct PVRSRV_CLIENT_SYNC_PRIM_OP {
+	__u32 ui32Flags;
+	struct pvrsrv_sync_prim *psSync;
+	__u32 ui32FenceValue;
+	__u32 ui32UpdateValue;
+};
+
+#else /* __pvrsrv_defined_struct_enum__ */
+
+enum PVRSRV_DEVICE_TYPE;
+struct PVRSRV_CLIENT_SYNC_PRIM;
+struct PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#endif /* __pvrsrv_defined_struct_enum__ */
+
+struct SYNC_PRIM_CONTEXT;
+
+/* pvrsrv.h */
+
+#define DEBUG_REQUEST_VERBOSITY_LOW    0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH   2
+#define DEBUG_REQUEST_VERBOSITY_MAX    (DEBUG_REQUEST_VERBOSITY_HIGH)
+
+typedef void (DUMPDEBUG_PRINTF_FUNC)(const char *fmt, ...) __printf(1, 2);
+
+extern DUMPDEBUG_PRINTF_FUNC *g_pfnDumpDebugPrintf;
+
+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle);
+enum PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(void **phNotify,
+	PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData);
+enum PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(void *hNotify);
+
+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle,
+	__u32 ui32VerbLevel);
+enum PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(void **phNotify,
+	PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+	__u32 ui32RequesterID, void *hDbgReqeustHandle);
+enum PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(void *hNotify);
+
+enum PVRSRV_ERROR PVRSRVAcquireDeviceDataKM(__u32 ui32DevIndex,
+	enum PVRSRV_DEVICE_TYPE eDeviceType, void **phDevCookie);
+enum PVRSRV_ERROR PVRSRVReleaseDeviceDataKM(void *hDevCookie);
+void PVRSRVCheckStatus(void *hCmdCompCallerHandle);
+enum PVRSRV_ERROR AcquireGlobalEventObjectServer(void **phGlobalEventObject);
+enum PVRSRV_ERROR ReleaseGlobalEventObjectServer(void *hGlobalEventObject);
+
+/* sync.h */
+
+enum PVRSRV_ERROR SyncPrimContextCreate(void *hBridge, void *hDeviceNode,
+	struct SYNC_PRIM_CONTEXT **phSyncPrimContext);
+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+enum PVRSRV_ERROR SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext,
+	struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char * pszClassName);
+void SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync);
+__u32 SyncPrimGetFirmwareAddr(struct PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/* pdump_km.h */
+
+#ifdef PDUMP
+enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...);
+#else
+static inline enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...)
+{
+	return PVRSRV_OK;
+}
+#endif
+
+/* osfunc.h */
+
+void OSAcquireBridgeLock(void);
+void OSReleaseBridgeLock(void);
+enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM);
+enum PVRSRV_ERROR OSEventObjectOpen(void *hEventObject, void **phOSEventKM);
+enum PVRSRV_ERROR OSEventObjectClose(void *hOSEventKM);
+
+/* srvkm.h */
+
+const char *PVRSRVGetErrorStringKM(enum PVRSRV_ERROR eError);
+
+#endif /* __SERVICES_KERNEL_CLIENT__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_interface.c b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_interface.c
new file mode 100644
index 0000000..e041ed9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_interface.c
@@ -0,0 +1,66 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+
+#include <linux/module.h>
+
+#include "dfrgx_interface.h"
+
+static struct dfrgx_interface_s dfrgx_interface;
+
+
+/**
+ * dfrgx_interface_set_data() - Provide some gburst data for hooks
+ * inside the graphics driver.
+ * @dfrgx_interface_in: Data to allow callback to dfrgx burst module.
+ *
+ * Also, the symbol dependency will establish a load order dependency for
+ * the case where both the graphics driver  and the dfrgx driver are modules,
+ * ensuring that the graphics driver is loaded and initialized before gburst.
+ */
+void dfrgx_interface_set_data(struct dfrgx_interface_s *dfrgx_interface_in)
+{
+	dfrgx_interface = *dfrgx_interface_in;
+}
+
+
+/*  Leave this export in place, even if built-in, as it allows easy compilation
+    testing of gburst as a module. */
+EXPORT_SYMBOL(dfrgx_interface_set_data);
+
+
+/**
+ * gburst_interface_power_state_set() - gfx drv calls to indicate power state.
+ * @st_on: 1 if power coming on, 0 if power going off.
+ */
+void dfrgx_interface_power_state_set(int st_on)
+{
+	if (dfrgx_interface.dfrgx_power_state_set && dfrgx_interface.dfrgx_priv)
+		dfrgx_interface.dfrgx_power_state_set(dfrgx_interface.dfrgx_priv, st_on);
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_interface.h b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_interface.h
new file mode 100644
index 0000000..11944d5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_interface.h
@@ -0,0 +1,64 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#if !defined DFRGX_INTERFACE_H
+#define DFRGX_INTERFACE_H
+
+struct df_rgx_data_s;
+
+typedef void (*dfrgx_power_state_set_f)(struct df_rgx_data_s *dfrgx_data, int st_on);
+
+/**
+ * struct gburst_interface_s -
+ * @dfrgx_priv: Private data handle, opaque to the other driver.
+ * @dfrgx_power_state_set: Function to callback when dev power changes.
+ */
+struct dfrgx_interface_s {
+	struct df_rgx_data_s  *dfrgx_priv;
+	dfrgx_power_state_set_f dfrgx_power_state_set;
+};
+
+
+/**
+ * dfrgx_interface_set_data() - Provide some dfrgx data for hooks
+ * inside the graphics driver.
+ * Also, the symbol dependency will establish a load order dependency for
+ * the case where both the graphics driver and the dfrgx driver are modules,
+ * ensuring that the graphics driver is loaded and initialized before dfrgx.
+ */
+void dfrgx_interface_set_data(struct dfrgx_interface_s *gb_interface);
+
+
+/**
+ * dfrgx_interface_power_state_set() - Indicate that power is off (0) or on (1).
+ * This is a hook called from the low-level device driver.
+ */
+void dfrgx_interface_power_state_set(int st_on);
+
+
+#endif /* if !defined DFRGX_INTERFACE_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_utilstats.c b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_utilstats.c
new file mode 100644
index 0000000..7eda403
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_utilstats.c
@@ -0,0 +1,232 @@
+/**************************************************************************
+ * Copyright (c) 2013, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include "device.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "dfrgx_utilstats.h"
+#include "pvr_tlcommon.h"
+#include "img_types.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+#include "rgxinit.h"
+
+
+#define DFRGX_HWPERF_DEBUG 0
+
+#if (defined DFRGX_HWPERF_DEBUG) && DFRGX_HWPERF_DEBUG
+#define DFRGX_DEBUG_MSG(string)				printk(DFRGX_HWPERF_ALERT string, __func__)
+#define DFRGX_DEBUG_MSG_1(string, var1)			printk(DFRGX_HWPERF_ALERT string, __func__, var1)
+#define DFRGX_DEBUG_MSG_2(string, var1, var2)		printk(DFRGX_HWPERF_ALERT string, __func__, var1, var2)
+#define DFRGX_DEBUG_MSG_3(string, var1, var2, var3)	printk(DFRGX_HWPERF_ALERT string, __func__, var1, var2, var3)
+#else
+#define DFRGX_DEBUG_MSG(string)
+#define DFRGX_DEBUG_MSG_1(string, var1)
+#define DFRGX_DEBUG_MSG_2(string, var1, var2)
+#define DFRGX_DEBUG_MSG_3(string, var1, var2, var3)
+#endif
+
+typedef struct _DFRGX_HWPERF_OBJ_ {
+	PVRSRV_DEVICE_NODE *pdev_node;
+	PVRSRV_RGXDEV_INFO *prgx_dev_info;
+	IMG_HANDLE gpu_util_user;
+	unsigned int is_device_acquired;
+} DFRGX_HWPERF_OBJ;
+
+static DFRGX_HWPERF_OBJ *pDFRGX_Obj = NULL;
+
+/******************************************************************************
+ * Helper Functions(s)
+ *****************************************************************************/
+
+static unsigned int gpu_rgx_acquire_device(void){
+
+	PVRSRV_DEVICE_TYPE *peDeviceTypeInt = NULL;
+	PVRSRV_DEVICE_CLASS *peDeviceClassInt = NULL;
+	IMG_UINT32 *pui32DeviceIndexInt = NULL;
+	IMG_HANDLE h_dev_cookie = NULL;
+	IMG_UINT32 num_devices = 0;
+	unsigned int error = DFRGX_HWPERF_OK;
+	IMG_UINT32 rgx_index = IMG_UINT32_MAX;
+	int i = 0;
+
+	if (pDFRGX_Obj) {
+		peDeviceTypeInt = kzalloc(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_TYPE), GFP_KERNEL);
+		if (!peDeviceTypeInt)
+		{
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto go_free;
+		}
+
+		peDeviceClassInt = kzalloc(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_CLASS), GFP_KERNEL);
+		if (!peDeviceClassInt)
+		{
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto go_free;
+		}
+
+		pui32DeviceIndexInt = kzalloc(PVRSRV_MAX_DEVICES * sizeof(IMG_UINT32), GFP_KERNEL);
+		if (!pui32DeviceIndexInt)
+		{
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto go_free;
+		}
+
+		/* Enumerate active devices */
+		error = PVRSRVEnumerateDevicesKM(
+						&num_devices,
+						peDeviceTypeInt,
+						peDeviceClassInt,
+						pui32DeviceIndexInt);
+		if (error) {
+			DFRGX_DEBUG_MSG_1("%s: PVRSRVEnumarateDevicesKM failed %d\n", error);
+			goto go_free;
+		}
+
+		DFRGX_DEBUG_MSG_1("%s: Num Devices : %d \n", num_devices);
+
+		for (i = 0; i < num_devices; i++) {
+			DFRGX_DEBUG_MSG_2("%s: Index %d:  Device %d:\n",
+				i, peDeviceTypeInt[i]);
+
+			if (peDeviceTypeInt[i] == PVRSRV_DEVICE_TYPE_RGX) {
+				rgx_index = i;
+				break;
+			}
+		}
+
+		if (rgx_index == IMG_UINT32_MAX) {
+			error = PVRSRV_ERROR_INIT_FAILURE;
+			goto go_free;
+		}
+
+		/* Now we have to acquire the node to work with, RGX device required*/
+		error = PVRSRVAcquireDeviceDataKM(rgx_index, PVRSRV_DEVICE_TYPE_RGX, &h_dev_cookie);
+		if (error) {
+
+			DFRGX_DEBUG_MSG_1("%s: PVRSRVEnumarateDevicesKM failed %d \n", error);
+			goto go_free;
+		}
+
+		pDFRGX_Obj->pdev_node = (PVRSRV_DEVICE_NODE*)h_dev_cookie;
+		DFRGX_DEBUG_MSG_2("%s: Acquired Device node name: %s, Device type: %d \n",
+			pDFRGX_Obj->pdev_node->szRAName, pDFRGX_Obj->pdev_node->sDevId.eDeviceType);
+	} else {
+
+		DFRGX_DEBUG_MSG_2("%s: Device node already acquired: %s, Device type: %d \n",
+			pDFRGX_Obj->pdev_node->szRAName, pDFRGX_Obj->pdev_node->sDevId.eDeviceType);
+	}
+
+go_free:
+	if (peDeviceTypeInt)
+		kfree(peDeviceTypeInt);
+	if (peDeviceClassInt)
+		kfree(peDeviceClassInt);
+	if (pui32DeviceIndexInt)
+		kfree(pui32DeviceIndexInt);
+
+	return error;
+}
+
+unsigned int gpu_rgx_get_util_stats(void* pvData)
+{
+	RGXFWIF_GPU_UTIL_STATS*	putil_stats = (RGXFWIF_GPU_UTIL_STATS*)pvData;
+	RGXFWIF_GPU_UTIL_STATS utils;
+
+	if (!pDFRGX_Obj || !pDFRGX_Obj->prgx_dev_info ||
+		!pDFRGX_Obj->prgx_dev_info->pfnGetGpuUtilStats ||
+		!pDFRGX_Obj->pdev_node)
+		return 0;
+
+	pDFRGX_Obj->prgx_dev_info->pfnGetGpuUtilStats(pDFRGX_Obj->pdev_node,
+			pDFRGX_Obj->gpu_util_user,
+			&utils);
+
+	putil_stats->bValid = utils.bValid;
+	putil_stats->ui64GpuStatActiveHigh = utils.ui64GpuStatActiveHigh;
+	putil_stats->ui64GpuStatActiveLow = utils.ui64GpuStatActiveLow;
+	putil_stats->ui64GpuStatBlocked = utils.ui64GpuStatBlocked;
+	putil_stats->ui64GpuStatIdle = utils.ui64GpuStatIdle;
+	putil_stats->ui64GpuStatCumulative = utils.ui64GpuStatCumulative;
+
+	return putil_stats->bValid;
+
+}
+EXPORT_SYMBOL(gpu_rgx_get_util_stats);
+
+unsigned int gpu_rgx_utilstats_init_obj(void){
+
+	unsigned int error = DFRGX_HWPERF_OK;
+
+	if (pDFRGX_Obj) {
+		DFRGX_DEBUG_MSG("%s: pDFRGX Object already initialized! \n");
+		goto go_out;
+	}
+
+	pDFRGX_Obj = kzalloc(sizeof(DFRGX_HWPERF_OBJ), GFP_KERNEL);
+	if (!pDFRGX_Obj) {
+		error = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto go_out;
+	}
+
+	error = gpu_rgx_acquire_device();
+	if (error) {
+		DFRGX_DEBUG_MSG_1("%s: gpu_rgx_acquire_device failed %d \n", error);
+		goto go_free_obj;
+	}
+
+	pDFRGX_Obj->prgx_dev_info = (PVRSRV_RGXDEV_INFO*)pDFRGX_Obj->pdev_node->pvDevice;
+	RGXRegisterGpuUtilStats(&pDFRGX_Obj->gpu_util_user);
+go_out:
+	return error;
+go_free_obj:
+	kfree(pDFRGX_Obj);
+	pDFRGX_Obj = NULL;
+	return error;
+}
+EXPORT_SYMBOL(gpu_rgx_utilstats_init_obj);
+
+unsigned int gpu_rgx_utilstats_deinit_obj(void){
+
+	if (!pDFRGX_Obj) {
+		return 0;
+	}
+
+	RGXUnregisterGpuUtilStats(pDFRGX_Obj->gpu_util_user);
+
+	kfree(pDFRGX_Obj);
+	pDFRGX_Obj = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(gpu_rgx_utilstats_deinit_obj);
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_utilstats.h b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_utilstats.h
new file mode 100644
index 0000000..b14fd74
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/dfrgx_utilstats.h
@@ -0,0 +1,58 @@
+/**************************************************************************
+ * Copyright (c) 2013, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Javier Torres Castillo <javier.torres.castillo@intel.com>
+ */
+
+#ifndef _DF_RGXHWPERF_H_
+#define _DF_RGXHWPERF_H_
+
+#define DF_RGX_HWPERF_DEV    "dfrgxhwperf"
+#define DFRGX_HWPERF_ALERT KERN_ALERT DF_RGX_HWPERF_DEV ": "
+
+ /*****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum _DFRGX_HWPERF_ERROR_ {
+	DFRGX_HWPERF_OK,
+	DFRGX_HWPERF_OBJ_NOT_CREATED,
+	DFRGX_HWPERF_ALREADY_INITIAIZED,
+	DFRGX_HWPERF_NODE_NOT_ACQUIRED,
+	DFRGX_HWPERF_EVENTS_NOT_ENABLED,
+	DFRGX_HWPERF_COUNTERS_NOT_CONFIGURED,
+	DFRGX_HWPERF_ERROR_FORCE_I32 = 0x7fffffff
+
+} DFRGX_HW_PERF_ERROR;
+
+/******************************************************************************
+ * RGX  Profiling Server API(s)
+ *****************************************************************************/
+unsigned int gpu_rgx_utilstats_init_obj(void);
+
+unsigned int gpu_rgx_utilstats_deinit_obj(void);
+
+unsigned int gpu_rgx_get_util_stats(void *pvData);
+
+
+#endif	/* _DF_RGXHWPERF_H_*/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/rgxdf.c b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/rgxdf.c
new file mode 100644
index 0000000..d375fd4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/rgxdf.c
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "rgxdf.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "power.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+#include "img_types.h"
+
+extern struct drm_device *gpsPVRDRMDev;
+
+static PVRSRV_DEVICE_NODE* pDevNode = NULL;
+
+static PVRSRV_DEVICE_NODE* RGXGetDeviceNode(void)
+{
+	if(pDevNode == NULL)
+	{
+		PVRSRV_DEVICE_TYPE *peDeviceTypeInt = NULL;
+		PVRSRV_DEVICE_CLASS *peDeviceClassInt = NULL;
+		IMG_UINT32 *pui32DeviceIndexInt = NULL;
+		IMG_HANDLE hDevCookie = NULL;
+		IMG_UINT32 numDevices = 0;
+		IMG_UINT32 i = 0;
+		IMG_UINT32 rgxIndex = IMG_UINT32_MAX;
+		IMG_UINT32 error = 0;
+
+		peDeviceTypeInt = kzalloc(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_TYPE), GFP_KERNEL);
+		if (!peDeviceTypeInt)
+		{
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto EnumerateDevices_exit;
+		}
+
+		peDeviceClassInt = kzalloc(PVRSRV_MAX_DEVICES * sizeof(PVRSRV_DEVICE_CLASS), GFP_KERNEL);
+		if (!peDeviceClassInt)
+		{
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto EnumerateDevices_exit;
+		}
+
+		pui32DeviceIndexInt = kzalloc(PVRSRV_MAX_DEVICES * sizeof(IMG_UINT32), GFP_KERNEL);
+		if (!pui32DeviceIndexInt)
+		{
+			error = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto EnumerateDevices_exit;
+		}
+
+		/* Enumerate active devices */
+		error = PVRSRVEnumerateDevicesKM(
+						&numDevices,
+						peDeviceTypeInt,
+						peDeviceClassInt,
+						pui32DeviceIndexInt);
+		if (error == 0){
+			for(i =0; i < numDevices; i++){
+				if (peDeviceTypeInt[i] == PVRSRV_DEVICE_TYPE_RGX){
+					rgxIndex = pui32DeviceIndexInt[i];
+				}
+			}
+
+			if(rgxIndex != IMG_UINT32_MAX){
+				/* Now we have to acquire the node to work with, RGX device required*/
+				error = PVRSRVAcquireDeviceDataKM (rgxIndex, PVRSRV_DEVICE_TYPE_RGX, &hDevCookie);
+
+				if (error == 0)
+					pDevNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
+			}
+		}
+
+EnumerateDevices_exit:
+		if (peDeviceTypeInt)
+			kfree(peDeviceTypeInt);
+		if (peDeviceClassInt)
+			kfree(peDeviceClassInt);
+		if (pui32DeviceIndexInt)
+			kfree(pui32DeviceIndexInt);
+	}
+
+	return pDevNode;
+}
+
+unsigned int RGXGetDRMDeviceID(void)
+{
+	if (gpsPVRDRMDev != NULL)
+		return gpsPVRDRMDev->pci_device;
+
+	return 0;
+}
+EXPORT_SYMBOL(RGXGetDRMDeviceID);
+
+int rgx_is_device_powered(void)
+{
+
+	PVRSRV_DEVICE_NODE* psDeviceNode = RGXGetDeviceNode();
+
+	int isPowered = IMG_FALSE;
+
+	if(psDeviceNode)
+		isPowered = PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
+
+	return isPowered;
+}
+EXPORT_SYMBOL(rgx_is_device_powered);
+
+unsigned int RGXUpdateClockSpeed(unsigned int ui32ClockSpeed)
+{
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE* psDeviceNode = RGXGetDeviceNode();
+	RGX_DATA	*psRGXData = NULL;
+
+	if(!psDeviceNode){
+		eError = PVRSRV_ERROR_INVALID_DEVICE;
+		goto out;
+	}
+
+	psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+
+	psRGXData->psRGXTimingInfo->ui32CoreClockSpeed = ui32ClockSpeed;
+out:
+	return eError;
+}
+EXPORT_SYMBOL(RGXUpdateClockSpeed);
+
+unsigned int RGXPreClockSpeed(void){
+
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE* psDeviceNode = RGXGetDeviceNode();
+
+	if(!psDeviceNode){
+		eError = PVRSRV_ERROR_INVALID_DEVICE;
+		goto out;
+	}
+
+	eError = PVRSRVDevicePreClockSpeedChange(psDeviceNode->sDevId.ui32DeviceIndex, IMG_FALSE, NULL);
+out:
+	return eError;
+}
+EXPORT_SYMBOL(RGXPreClockSpeed);
+
+unsigned int RGXPostClockSpeed(void){
+
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE* psDeviceNode = RGXGetDeviceNode();
+
+	if(!psDeviceNode){
+		eError = PVRSRV_ERROR_INVALID_DEVICE;
+		goto out;
+	}
+
+
+	PVRSRVDevicePostClockSpeedChange(psDeviceNode->sDevId.ui32DeviceIndex, IMG_FALSE, NULL);
+
+out:
+	return eError;
+}
+EXPORT_SYMBOL(RGXPostClockSpeed);
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/rgxdf.h b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/rgxdf.h
new file mode 100644
index 0000000..cb3ad5e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_devfreq/rgxdf.h
@@ -0,0 +1,125 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Inline functions/structures specific to RGX
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXDF_H_
+#define _RGXDF_H_
+/*!
+******************************************************************************
+
+ @Function	RGXAcquireIsDevicePowered
+
+ @Description
+
+	Whether the device is powered, for the purposes of lockup detection.
+
+ @None
+
+ @Return   int  : 1 if device is powered on, 0 if not
+
+******************************************************************************/
+int rgx_is_device_powered(void);
+
+/*!
+******************************************************************************
+
+ @Function	RGXGetDRMDeviceID
+
+ @Description
+
+	Returns device ID of DMR device.
+
+ @None
+
+ @Return   unsigned int  : device ID
+
+******************************************************************************/
+unsigned int RGXGetDRMDeviceID(void);
+
+/*!
+******************************************************************************
+
+ @Function	RGXUpdateClockSpeed
+
+ @Description
+
+	Pre Clock Speed routine.
+
+ @None
+
+ @Return   None
+
+******************************************************************************/
+unsigned int RGXUpdateClockSpeed(unsigned int ui32ClockSpeed);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPreClockSpeed
+
+ @Description
+
+	Pre Clock Speed routine.
+
+ @None
+
+ @Return   None
+
+******************************************************************************/
+unsigned int RGXPreClockSpeed(void);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostClockSpeed
+
+ @Description
+
+	Post Clock Speed routine.
+
+ @None
+
+ @Return   None
+
+******************************************************************************/
+unsigned int RGXPostClockSpeed(void);
+#endif /*#ifndef _RGXDF_H_*/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_drm/dc_mrfld.c b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_drm/dc_mrfld.c
new file mode 100644
index 0000000..948390a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_drm/dc_mrfld.c
@@ -0,0 +1,2215 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+#include "img_defs.h"
+#include "servicesext.h"
+#include "allocmem.h"
+#include "kerneldisplay.h"
+#include "mm.h"
+#include "pvrsrv_error.h"
+#include "display_callbacks.h"
+#include "dc_server.h"
+#include "dc_mrfld.h"
+#include "pwr_mgmt.h"
+#include "psb_drv.h"
+#ifndef ENABLE_HW_REPEAT_FRAME
+#include "dc_maxfifo.h"
+#endif
+
+#if !defined(SUPPORT_DRM)
+#error "SUPPORT_DRM must be set"
+#endif
+
+#define KEEP_UNUSED_CODE 0
+
+static DC_MRFLD_DEVICE *gpsDevice;
+
+#define	 DRVNAME "Merrifield-DRM"
+
+/* GPU asks for 32 pixels of width alignment */
+#define DC_MRFLD_WIDTH_ALIGN 32
+#define DC_MRFLD_WIDTH_ALIGN_MASK (DC_MRFLD_WIDTH_ALIGN - 1)
+
+/*DC plane asks for 64 bytes alignment*/
+#define DC_MRFLD_STRIDE_ALIGN 64
+#define DC_MRFLD_STRIDE_ALIGN_MASK (DC_MRFLD_STRIDE_ALIGN - 1)
+
+/* Timeout for Flip Watchdog */
+#define FLIP_TIMEOUT (HZ/4)
+
+/* IED Clean-up Handling */
+extern uint32_t g_ied_ref;
+extern uint32_t g_ied_force_clean;
+extern struct mutex g_ied_mutex;
+
+struct power_off_req {
+	struct delayed_work work;
+	struct plane_state *pstate;
+};
+
+static IMG_PIXFMT DC_MRFLD_Supported_PixelFormats[] = {
+	/*supported RGB formats*/
+	IMG_PIXFMT_B8G8R8A8_UNORM,
+	IMG_PIXFMT_B5G6R5_UNORM,
+
+	/*supported YUV formats*/
+	IMG_PIXFMT_YUV420_2PLANE,
+};
+
+#if 0
+static uint32_t DC_MRFLD_PixelFormat_Mapping[] = {
+	[IMG_PIXFMT_B5G6R5_UNORM] = (0x5 << 26),
+	[IMG_PIXFMT_B8G8R8A8_UNORM] = (0x6 << 26),
+};
+#endif
+
+#ifdef CONFIG_SUPPORT_MIPI
+static uint32_t DC_ExtraPowerIslands[DC_PLANE_MAX][MAX_PLANE_INDEX] = {
+	{ 0,              0,              0},
+#ifdef CONFIG_MOOREFIELD
+	{ 0,              0,              0},
+#else
+	{ OSPM_DISPLAY_C, 0,              0},
+#endif
+	{ 0,              OSPM_DISPLAY_C, 0},
+	{ 0,              0,              0},
+};
+#else
+static uint32_t DC_ExtraPowerIslands[DC_PLANE_MAX][MAX_PLANE_INDEX] = {
+	{ 0,              0,              0},
+	{ OSPM_DISPLAY_A, 0,              0},
+	{ OSPM_DISPLAY_A, OSPM_DISPLAY_C, 0},
+	{ 0,              0,              0},
+};
+#endif
+
+static inline IMG_UINT32 _Align_To(IMG_UINT32 ulValue,
+				IMG_UINT32 ulAlignment)
+{
+	return (ulValue + ulAlignment - 1) & ~(ulAlignment - 1);
+}
+
+static IMG_BOOL _Is_Valid_PixelFormat(IMG_PIXFMT ePixelFormat)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(DC_MRFLD_Supported_PixelFormats); i++) {
+		if (ePixelFormat == DC_MRFLD_Supported_PixelFormats[i])
+			return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+#if KEEP_UNUSED_CODE
+/*
+ * NOTE: only use the 1st plane now.
+ */
+static IMG_BOOL _Is_Valid_DC_Buffer(DC_BUFFER_IMPORT_INFO *psBufferInfo)
+{
+	if (!psBufferInfo)
+		return IMG_FALSE;
+
+	/*common check*/
+	if (!psBufferInfo->ui32BPP || !psBufferInfo->ui32ByteStride[0] ||
+		!psBufferInfo->ui32Height[0] || !psBufferInfo->ui32Width[0])
+		return IMG_FALSE;
+
+	/*check format*/
+	if (!_Is_Valid_PixelFormat(psBufferInfo->ePixFormat))
+		return IMG_FALSE;
+
+	/*check stride*/
+	if (psBufferInfo->ui32ByteStride[0] & DC_MRFLD_STRIDE_ALIGN_MASK)
+		return IMG_FALSE;
+
+	return IMG_TRUE;
+}
+#endif /* if KEEP_UNUSED_CODE */
+
+
+static IMG_BOOL _Is_Task_KThread(void)
+{
+	/* skip task from user space and work queue */
+	if (((current->flags & PF_NO_SETAFFINITY) == 0)
+	    && ((current->flags & PF_WQ_WORKER) == 0)
+	    && ((current->flags & PF_KTHREAD) != 0))
+		return IMG_TRUE;
+	else
+		return IMG_FALSE;
+}
+
+static void _Update_PlanePipeMapping(DC_MRFLD_DEVICE *psDevice,
+					IMG_UINT32 uiType,
+					IMG_UINT32 uiIndex,
+					IMG_INT32 iPipe)
+{
+	mutex_lock(&psDevice->sMappingLock);
+
+	psDevice->ui32PlanePipeMapping[uiType][uiIndex] = iPipe;
+
+	mutex_unlock(&psDevice->sMappingLock);
+}
+
+#if 0
+static IMG_BOOL _Enable_ExtraPowerIslands(DC_MRFLD_DEVICE *psDevice,
+					IMG_UINT32 ui32ExtraPowerIslands)
+{
+	IMG_UINT32 ui32PowerIslands = 0;
+
+	/*turn on extra power islands which were not turned on*/
+	ui32PowerIslands = psDevice->ui32ExtraPowerIslandsStatus &
+			ui32ExtraPowerIslands;
+
+	ui32ExtraPowerIslands &= ~ui32PowerIslands;
+
+	if (!ui32ExtraPowerIslands)
+		return IMG_TRUE;
+
+	if (!power_island_get(ui32ExtraPowerIslands)) {
+		DRM_ERROR("Failed to turn on islands %x\n",
+			ui32ExtraPowerIslands);
+		return IMG_FALSE;
+
+	}
+
+	psDevice->ui32ExtraPowerIslandsStatus |= ui32ExtraPowerIslands;
+	return IMG_TRUE;
+}
+
+static IMG_BOOL _Disable_ExtraPowerIslands(DC_MRFLD_DEVICE *psDevice,
+					IMG_UINT32 ui32ExtraPowerIslands)
+{
+	IMG_UINT32 ui32PowerIslands = 0;
+	IMG_UINT32 ui32ActivePlanes;
+	IMG_UINT32 ui32ActiveExtraIslands = 0;
+	int i, j;
+
+	/*turn off extra power islands which were turned on*/
+	ui32PowerIslands = psDevice->ui32ExtraPowerIslandsStatus &
+				ui32ExtraPowerIslands;
+
+	if (!ui32PowerIslands)
+		return IMG_TRUE;
+
+	/*don't turn off extra power islands used by other planes*/
+	for (i = 1; i < DC_PLANE_MAX; i++) {
+		for (j = 0; j < MAX_PLANE_INDEX; j++) {
+			ui32ActivePlanes = gpsDevice->ui32ActivePlanes[i];
+
+			/* don't need to power it off when it's active */
+			if (ui32ActivePlanes & (1 << j)) {
+				ui32ActiveExtraIslands =
+					DC_ExtraPowerIslands[i][j];
+				/*remove power islands needed by this plane*/
+				ui32PowerIslands &= ~ui32ActiveExtraIslands;
+			}
+		}
+	}
+
+	if (ui32PowerIslands)
+		power_island_put(ui32PowerIslands);
+
+	psDevice->ui32ExtraPowerIslandsStatus &= ~ui32PowerIslands;
+	return IMG_TRUE;
+}
+
+static void _Flip_To_Surface(DC_MRFLD_DEVICE *psDevice,
+				IMG_UINT32 ulSurfAddr,
+				IMG_PIXFMT eFormat,
+				IMG_UINT32 ulStride,
+				IMG_INT iPipe)
+{
+	struct drm_device *psDrmDev = psDevice->psDrmDevice;
+	uint32_t format = DC_MRFLD_PixelFormat_Mapping[eFormat];
+	DCCBFlipToSurface(psDrmDev, ulSurfAddr, format, ulStride, iPipe);
+}
+#endif
+
+static void _Flip_Overlay(DC_MRFLD_DEVICE *psDevice,
+			DC_MRFLD_OVERLAY_CONTEXT *psContext,
+			IMG_INT iPipe)
+{
+	if ((iPipe && psContext->pipe) || (!iPipe && !psContext->pipe))
+		DCCBFlipOverlay(psDevice->psDrmDevice, psContext);
+}
+
+static void _Flip_Sprite(DC_MRFLD_DEVICE *psDevice,
+			DC_MRFLD_SPRITE_CONTEXT *psContext,
+			IMG_INT iPipe)
+{
+	if ((iPipe && psContext->pipe) || (!iPipe && !psContext->pipe))
+		DCCBFlipSprite(psDevice->psDrmDevice, psContext);
+}
+
+static void _Flip_Primary(DC_MRFLD_DEVICE *psDevice,
+			DC_MRFLD_PRIMARY_CONTEXT *psContext,
+			IMG_INT iPipe)
+{
+	if ((iPipe && psContext->pipe) || (!iPipe && !psContext->pipe))
+		DCCBFlipPrimary(psDevice->psDrmDevice, psContext);
+}
+
+static void _Flip_Cursor(DC_MRFLD_DEVICE *psDevice,
+			DC_MRFLD_CURSOR_CONTEXT *psContext,
+			IMG_INT iPipe)
+{
+	if ((iPipe && psContext->pipe) || (!iPipe && !psContext->pipe))
+		DCCBFlipCursor(psDevice->psDrmDevice, psContext);
+}
+
+static void _Setup_ZOrder(DC_MRFLD_DEVICE *psDevice,
+			DC_MRFLD_DC_PLANE_ZORDER *psZorder,
+			IMG_INT iPipe)
+{
+	DCCBSetupZorder(psDevice->psDrmDevice, psZorder, iPipe);
+}
+
+void display_power_work(struct work_struct *work)
+{
+	struct power_off_req *req = container_of(work,
+				struct power_off_req, work.work);
+	struct plane_state *pstate = req->pstate;
+
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+
+	if (!pstate->active) {
+		if (pstate->extra_power_island  && !pstate->powered_off) {
+			DRM_DEBUG("power off island %#x for plane (%d %d)\n",
+				 pstate->extra_power_island,
+				 pstate->type, pstate->index);
+			power_island_put(pstate->extra_power_island);
+			pstate->powered_off = true;
+		}
+		pstate->disabled = true;
+	}
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+
+	kfree(req);
+}
+
+static void free_flip(DC_MRFLD_FLIP *psFlip)
+{
+	int i;
+	struct flip_plane *tmp, *plane;
+	DC_MRFLD_BUFFER *psBuffer;
+
+	for (i = 0; i < MAX_PIPE_NUM; i++) {
+		list_for_each_entry_safe(plane, tmp,
+				&psFlip->asPipeInfo[i].flip_planes, list) {
+			list_del(&plane->list);
+			kfree(plane);
+		}
+	}
+
+	/* free buffers which allocated in configureCheck*/
+	for (i = 0; i < psFlip->uiNumBuffers; i++) {
+		psBuffer = psFlip->pasBuffers[i];
+		if (!psBuffer) {
+			continue;
+		}
+		OSFreeMem(psBuffer);
+	}
+
+	kfree(psFlip);
+}
+
+static void disable_plane(struct plane_state *pstate)
+{
+	int type = pstate->type;
+	int index = pstate->index;
+	int pipe = pstate->attached_pipe;
+	u32 ctx = 0;
+	struct drm_psb_private *dev_priv = gpsDevice->psDrmDevice->dev_private;
+
+	DRM_DEBUG("disable plane (%d %d)\n", type, index);
+
+	switch (type) {
+	case DC_SPRITE_PLANE:
+		DCCBSpriteEnable(gpsDevice->psDrmDevice, ctx, index, 0);
+		break;
+	case DC_OVERLAY_PLANE:
+		/* TODO: disable overlay, need to allocat backbuf in kernel? */
+		DCCBSetPipeToOvadd(&ctx, pipe);
+		DCCBOverlayEnable(gpsDevice->psDrmDevice, ctx, index, 0);
+		break;
+	case DC_PRIMARY_PLANE:
+		DCCBPrimaryEnable(gpsDevice->psDrmDevice, ctx, index, 0);
+		break;
+	case DC_CURSOR_PLANE:
+		DCCBCursorDisable(gpsDevice->psDrmDevice, index);
+		break;
+	default:
+		DRM_ERROR("unsupport plane type (%d %d)\n", type, index);
+		return;
+	}
+
+	{
+		struct power_off_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+		if (!req) {
+			DRM_ERROR("can't alloc power off req, plane (%d %d)\n",
+				  type, index);
+			return;
+		}
+
+		DRM_DEBUG("schedule power off island %#x for plane (%d %d)\n",
+			  pstate->extra_power_island, type, index);
+
+		INIT_DELAYED_WORK(&req->work, display_power_work);
+		req->pstate = pstate;
+
+		queue_delayed_work(dev_priv->power_wq, &req->work,
+				msecs_to_jiffies(50));
+	}
+
+	pstate->active = false;
+}
+
+static IMG_BOOL enable_plane(struct flip_plane *plane)
+{
+	int type = plane->type;
+	int index = plane->index;
+	struct plane_state *pstate = &gpsDevice->plane_states[type][index];
+
+	if (pstate->powered_off && pstate->extra_power_island) {
+		DRM_DEBUG("power on island %#x for plane (%d %d)\n",
+			  pstate->extra_power_island, type, index);
+
+		if (!power_island_get(pstate->extra_power_island)) {
+			DRM_ERROR("fail to power on island %#x"
+				  " for plane (%d %d)\n",
+				  pstate->extra_power_island, type, index);
+			return IMG_FALSE;
+		}
+		pstate->powered_off = false;
+	}
+
+	pstate->active = true;
+	pstate->disabled = false;
+	pstate->flip_active = true;
+	pstate->attached_pipe = plane->attached_pipe;
+	return IMG_TRUE;
+}
+
+static void clear_plane_flip_state(int pipe)
+{
+	int i, j;
+	struct plane_state *pstate;
+
+	for (i = 1; i < DC_PLANE_MAX; i++) {
+		for (j = 0; j < MAX_PLANE_INDEX; j++) {
+			pstate = &gpsDevice->plane_states[i][j];
+			if (pstate->attached_pipe != pipe)
+				continue;
+
+			pstate->flip_active = false;
+		}
+	}
+}
+
+static bool disable_unused_planes(int pipe)
+{
+	int i, j;
+	struct plane_state *pstate;
+	bool ret = false;
+
+	for (i = 1; i < DC_PLANE_MAX; i++) {
+		for (j = 0; j < MAX_PLANE_INDEX; j++) {
+			pstate = &gpsDevice->plane_states[i][j];
+
+			/* if already inactive or not on this pipe */
+			if (!pstate->active || pstate->attached_pipe != pipe)
+				continue;
+
+			DRM_DEBUG("plane (%d %d) active:%d, flip_active:%d\n",
+				  i, j, pstate->active, pstate->flip_active);
+
+			if (pstate->active && !pstate->flip_active) {
+				disable_plane(pstate);
+				ret = true;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void disable_ied_session(struct drm_device *dev)
+{
+	/* Make sure overlay planes are in-active prior to turning off IED */
+	if (g_ied_force_clean) {
+		struct plane_state *oa_state =
+			&gpsDevice->plane_states[DC_OVERLAY_PLANE][0];
+		struct plane_state *oc_state =
+			&gpsDevice->plane_states[DC_OVERLAY_PLANE][1];
+		uint32_t ret = 0;
+		if ((oa_state->active == false) &&
+			(oc_state->active == false)) {
+			DRM_INFO("%s: ied_ref: %d\n", __func__, g_ied_ref);
+			mutex_lock(&g_ied_mutex);
+			while (g_ied_ref) {
+				DRM_INFO("disable_ied_session - ied_ref: %d\n",
+						g_ied_ref);
+				ret = sepapp_drm_playback(false);
+				if (ret) {
+					DRM_ERROR("IED Clean-up \
+						Failed: 0x%x\n", ret);
+					break;
+				}
+				g_ied_ref--;
+			}
+			g_ied_force_clean = false;
+			mutex_unlock(&g_ied_mutex);
+		}
+	}
+}
+static void free_flip_states_on_pipe(struct drm_device *psDrmDev, int pipe)
+{
+	struct list_head *psFlipQueue;
+	DC_MRFLD_FLIP *psFlip, *psTmp;
+	IMG_UINT32 eFlipState;
+
+	if (pipe != DC_PIPE_A && pipe != DC_PIPE_B)
+		return;
+
+	psFlipQueue = &gpsDevice->sFlipQueues[pipe];
+
+	list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[pipe])
+	{
+		eFlipState = psFlip->eFlipStates[pipe];
+
+		if (eFlipState == DC_MRFLD_FLIP_DC_UPDATED) {
+			/* done with this flip item, disable vsync now*/
+			DCCBDisableVSyncInterrupt(psDrmDev, pipe);
+
+			if (pipe != DC_PIPE_B)
+				DCCBDsrAllow(psDrmDev, pipe);
+		}
+
+		/*remove this entry from flip queue, decrease refCount*/
+		list_del(&psFlip->sFlips[pipe]);
+
+		if ((psFlip->uiRefCount>0) && !(--psFlip->uiRefCount)) {
+			/*retire all buffers possessed by this flip*/
+			DCDisplayConfigurationRetired(
+					psFlip->hConfigData);
+			/* free it */
+			free_flip(psFlip);
+			psFlip = NULL;
+		}
+	}
+
+	return;
+}
+
+static void timer_flip_handler(struct work_struct *work)
+{
+	struct list_head *psFlipQueue;
+	DC_MRFLD_FLIP *psFlip, *psTmp;
+	IMG_UINT32 eFlipState;
+	int iPipe;
+	bool bHasUpdatedFlip[MAX_PIPE_NUM] = { false };
+	bool bHasDisplayedFlip[MAX_PIPE_NUM] = { false };
+	bool bHasPendingCommand[MAX_PIPE_NUM] = { false };
+	struct drm_device *dev;
+
+	if (!gpsDevice)
+		return;
+
+	/* acquire flip queue mutex */
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+
+	/* check flip queue state */
+	for (iPipe=DC_PIPE_A; iPipe<=DC_PIPE_B; iPipe++) {
+		psFlipQueue = &gpsDevice->sFlipQueues[iPipe];
+
+		list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[iPipe])
+		{
+			eFlipState = psFlip->eFlipStates[iPipe];
+			if (eFlipState == DC_MRFLD_FLIP_DC_UPDATED) {
+				bHasUpdatedFlip[iPipe] = true;
+			} else if (eFlipState == DC_MRFLD_FLIP_DISPLAYED) {
+				bHasDisplayedFlip[iPipe] = true;
+			}
+		}
+
+		/* check if there are pending scp cmds */
+		psFlip = list_first_entry_or_null(psFlipQueue, DC_MRFLD_FLIP, sFlips[iPipe]);
+		if (psFlip && DCDisplayHasPendingCommand(psFlip->hConfigData))
+			bHasPendingCommand[iPipe] = true;
+
+		if (bHasUpdatedFlip[iPipe]) {
+			DRM_INFO("flip timer triggered, maybe vsync lost on pipe%d!\n", iPipe);
+			dev = gpsDevice->psDrmDevice;
+			if (dev != NULL)
+				DCCBDumpPipeStatus(dev, iPipe);
+		} else if (bHasDisplayedFlip[iPipe] && bHasPendingCommand[iPipe]) {
+			DRM_INFO("flip timer triggered, scp cmd pending on pipe%d!\n", iPipe);
+		}
+
+		/* free all flips whenever vysnc lost or scp cmd pending */
+		if ((bHasUpdatedFlip[iPipe]) ||
+		    (bHasDisplayedFlip[iPipe] && bHasPendingCommand[iPipe]))
+			free_flip_states_on_pipe(gpsDevice->psDrmDevice, iPipe);
+
+		if (list_empty_careful(psFlipQueue))
+			INIT_LIST_HEAD(&gpsDevice->sFlipQueues[iPipe]);
+	}
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+
+	return;
+}
+
+static void _Flip_Timer_Fn(unsigned long arg)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)arg;
+
+        schedule_work(&psDevice->flip_retire_work);
+}
+
+static IMG_BOOL _Do_Flip(DC_MRFLD_FLIP *psFlip, int iPipe)
+{
+	struct intel_dc_plane_zorder *zorder = NULL;
+	DC_MRFLD_BUFFER **pasBuffers;
+	struct flip_plane *plane;
+	IMG_UINT32 uiNumBuffers;
+	IMG_BOOL bUpdated;
+	unsigned long flags;
+
+	if (!gpsDevice || !psFlip) {
+		DRM_ERROR("%s: Invalid Flip\n", __func__);
+		return IMG_FALSE;
+	}
+
+	if (iPipe != DC_PIPE_A && iPipe != DC_PIPE_B) {
+		DRM_ERROR("%s: Invalid pipe %d\n", __func__, iPipe);
+		return IMG_FALSE;
+	}
+
+	/* skip it if updated*/
+	if (psFlip->eFlipStates[iPipe] == DC_MRFLD_FLIP_DC_UPDATED)
+		return IMG_TRUE;
+
+	pasBuffers = psFlip->pasBuffers;
+	uiNumBuffers = psFlip->uiNumBuffers;
+
+	if (!pasBuffers || !uiNumBuffers) {
+		DRM_ERROR("%s: Invalid buffer list\n", __func__);
+		return IMG_FALSE;
+	}
+
+	/*turn on required power islands*/
+	if (!power_island_get(psFlip->uiPowerIslands))
+		return IMG_FALSE;
+
+	/* start update display controller hardware */
+	bUpdated = IMG_FALSE;
+
+	if (iPipe != DC_PIPE_B)
+		DCCBDsrForbid(gpsDevice->psDrmDevice, iPipe);
+
+	/*
+	 * make sure vsync interrupt of this pipe is active before kicking
+	 * off flip
+	 */
+	if (DCCBEnableVSyncInterrupt(gpsDevice->psDrmDevice, iPipe)) {
+		DRM_ERROR("%s: failed to enable vsync on pipe %d\n",
+				__func__, iPipe);
+		if (iPipe != DC_PIPE_B)
+			DCCBDsrAllow(gpsDevice->psDrmDevice, iPipe);
+		goto err_out;
+	}
+
+	clear_plane_flip_state(iPipe);
+
+	list_for_each_entry(plane,
+			    &(psFlip->asPipeInfo[iPipe].flip_planes), list) {
+		zorder = &plane->flip_ctx->zorder;
+
+		enable_plane(plane);
+	}
+
+	/* Delay the Flip if we are close the Vblank interval since we
+	 * do not want to update plane registers during the vblank period
+	 */
+	DCCBAvoidFlipInVblankInterval(gpsDevice->psDrmDevice, iPipe);
+	local_irq_save(flags);
+
+	list_for_each_entry(plane,
+			    &(psFlip->asPipeInfo[iPipe].flip_planes), list) {
+		int type = plane->type;
+		int index = plane->index;
+		struct plane_state *pstate =
+			&gpsDevice->plane_states[type][index];
+
+		if (!pstate->active)
+			continue;
+
+		switch (plane->type) {
+		case DC_SPRITE_PLANE:
+			if (!plane->flip_ctx->ctx.sp_ctx.surf)
+				plane->flip_ctx->ctx.sp_ctx.surf =
+					plane->flip_buf->sDevVAddr.uiAddr;
+			_Flip_Sprite(gpsDevice,
+					&plane->flip_ctx->ctx.sp_ctx, iPipe);
+			break;
+		case DC_PRIMARY_PLANE:
+			if (!plane->flip_ctx->ctx.prim_ctx.surf)
+				plane->flip_ctx->ctx.prim_ctx.surf =
+					plane->flip_buf->sDevVAddr.uiAddr;
+			_Flip_Primary(gpsDevice,
+					&plane->flip_ctx->ctx.prim_ctx, iPipe);
+			break;
+
+		case DC_OVERLAY_PLANE:
+			_Flip_Overlay(gpsDevice,
+					&plane->flip_ctx->ctx.ov_ctx, iPipe);
+			break;
+		case DC_CURSOR_PLANE:
+			_Flip_Cursor(gpsDevice,
+					&plane->flip_ctx->ctx.cs_ctx, iPipe);
+			break;
+		}
+	}
+
+	local_irq_restore(flags);
+
+	if (zorder)
+		_Setup_ZOrder(gpsDevice, zorder, iPipe);
+
+	disable_ied_session(gpsDevice->psDrmDevice);
+
+	disable_unused_planes(iPipe);
+
+	psFlip->eFlipStates[iPipe] = DC_MRFLD_FLIP_DC_UPDATED;
+	psFlip->uiVblankCounters[iPipe] =
+			drm_vblank_count(gpsDevice->psDrmDevice, iPipe);
+
+	/*start Flip watch dog*/
+	mod_timer(&gpsDevice->sFlipTimer, FLIP_TIMEOUT + jiffies);
+
+	bUpdated = IMG_TRUE;
+
+#ifndef ENABLE_HW_REPEAT_FRAME
+	/* maxfifo is only enabled in mipi only mode */
+	if (iPipe == DC_PIPE_A && !hdmi_state)
+		maxfifo_timer_start(gpsDevice->psDrmDevice);
+#endif
+err_out:
+	power_island_put(psFlip->uiPowerIslands);
+	return bUpdated;
+}
+
+static DC_MRFLD_FLIP *_Next_Queued_Flip(int iPipe)
+{
+	struct list_head *psFlipQueue;
+	DC_MRFLD_FLIP *psFlip, *psTmp;
+	DC_MRFLD_FLIP *psQueuedFlip = 0;
+
+	if (iPipe != DC_PIPE_A && iPipe != DC_PIPE_B) {
+		DRM_ERROR("%s: Invalid pipe %d\n", __func__, iPipe);
+		return NULL;
+	}
+
+	psFlipQueue = &gpsDevice->sFlipQueues[iPipe];
+
+	list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[iPipe])
+	{
+		if (psFlip->eFlipStates[iPipe] == DC_MRFLD_FLIP_QUEUED) {
+			psQueuedFlip = psFlip;
+			break;
+		}
+	}
+
+	return psQueuedFlip;
+}
+
+static bool _Can_Flip(int iPipe)
+{
+	struct list_head *psFlipQueue;
+	DC_MRFLD_FLIP *psFlip, *psTmp;
+	bool ret = true;
+	int num_queued = 0;
+	int num_updated = 0;
+
+	if (iPipe != DC_PIPE_A && iPipe != DC_PIPE_B) {
+		DRM_ERROR("%s: Invalid pipe %d\n", __func__, iPipe);
+		return false;
+	}
+
+	psFlipQueue = &gpsDevice->sFlipQueues[iPipe];
+	list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[iPipe])
+	{
+		if (psFlip->eFlipStates[iPipe] == DC_MRFLD_FLIP_QUEUED) {
+			num_queued++;
+			ret = false;
+		}
+		if (psFlip->eFlipStates[iPipe] == DC_MRFLD_FLIP_DC_UPDATED) {
+			num_updated++;
+			ret = false;
+		}
+	}
+
+	if (num_queued > 2) {
+		DRM_ERROR("num of queued buffers is %d\n", num_queued);
+	}
+
+	if (num_updated > 1) {
+		DRM_ERROR("num of updated buffers is %d\n", num_updated);
+	}
+	return ret;
+}
+
+static void _Dispatch_Flip(DC_MRFLD_FLIP *psFlip)
+{
+	DC_MRFLD_SURF_CUSTOM *psSurfCustom;
+	DC_MRFLD_BUFFER **pasBuffers;
+	IMG_UINT32 uiNumBuffers;
+	struct flip_plane *flip_plane;
+	int type, index, pipe;
+	int i, j;
+	bool send_wms = false;
+
+	if (!gpsDevice || !psFlip) {
+		DRM_ERROR("%s: Invalid Flip\n", __func__);
+		return;
+	}
+
+	pasBuffers = psFlip->pasBuffers;
+	uiNumBuffers = psFlip->uiNumBuffers;
+
+	if (!pasBuffers || !uiNumBuffers) {
+		DRM_ERROR("%s: Invalid buffer list\n", __func__);
+		return;
+	}
+
+	mutex_lock(&gpsDevice->sMappingLock);
+
+	for (i = 0; i < uiNumBuffers; i++) {
+		if (pasBuffers[i]->eFlipOp == DC_MRFLD_FLIP_SURFACE) {
+			/* assign to pipe 0 by default*/
+			psFlip->bActivePipes[DC_PIPE_A] = IMG_TRUE;
+			continue;
+		}
+
+		if (pasBuffers[i]->eFlipOp != DC_MRFLD_FLIP_CONTEXT) {
+			DRM_ERROR("%s: bad flip operation %d\n", __func__,
+					pasBuffers[i]->eFlipOp);
+			continue;
+		}
+
+		for (j = 0; j < pasBuffers[i]->ui32ContextCount; j++) {
+			psSurfCustom = &pasBuffers[i]->sContext[j];
+
+			type = psSurfCustom->type;
+			index = -1;
+			pipe = -1;
+
+			switch (type) {
+			case DC_SPRITE_PLANE:
+				/*Flip sprite context*/
+				index = psSurfCustom->ctx.sp_ctx.index;
+				pipe = psSurfCustom->ctx.sp_ctx.pipe;
+				break;
+			case DC_PRIMARY_PLANE:
+				index = psSurfCustom->ctx.prim_ctx.index;
+				pipe = psSurfCustom->ctx.prim_ctx.pipe;
+				break;
+			case DC_OVERLAY_PLANE:
+				index = psSurfCustom->ctx.ov_ctx.index;
+				pipe = psSurfCustom->ctx.ov_ctx.pipe;
+				break;
+			case DC_CURSOR_PLANE:
+				index = psSurfCustom->ctx.cs_ctx.index;
+				pipe = psSurfCustom->ctx.cs_ctx.pipe;
+				break;
+			default:
+				DRM_ERROR("Unknown plane type %d\n",
+						psSurfCustom->type);
+				break;
+			}
+
+			if (index < 0 || pipe < 0 || pipe >= MAX_PIPE_NUM) {
+				DRM_ERROR("Invalid index = %d, pipe = %d\n",
+						index, pipe);
+				continue;
+			}
+
+			flip_plane = kzalloc(sizeof(*flip_plane), GFP_KERNEL);
+			if (!flip_plane) {
+				DRM_ERROR("fail to alloc flip plane\n");
+				continue;
+			}
+
+			flip_plane->type = type;
+			flip_plane->index = index;
+			flip_plane->attached_pipe = pipe;
+			flip_plane->flip_buf = pasBuffers[i];
+			flip_plane->flip_ctx = psSurfCustom;
+
+			list_add_tail(&flip_plane->list,
+				      &psFlip->asPipeInfo[pipe].flip_planes);
+			DRM_DEBUG("flip plane (%d %d) context %p to pipe %d\n",
+					type, index, psSurfCustom, pipe);
+
+			/* update flip active pipes */
+			if (pipe)
+				psFlip->bActivePipes[DC_PIPE_B] = IMG_TRUE;
+			else
+				psFlip->bActivePipes[DC_PIPE_A] = IMG_TRUE;
+
+			/* check whether needs extra power island*/
+			psFlip->uiPowerIslands |=
+				DC_ExtraPowerIslands[type][index];
+
+			/* update plane - pipe mapping*/
+			gpsDevice->ui32PlanePipeMapping[type][index] = pipe;
+		}
+	}
+	mutex_unlock(&gpsDevice->sMappingLock);
+
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+	/* dispatch this flip*/
+	for (i = 0; i < MAX_PIPE_NUM; i++) {
+		if (psFlip->bActivePipes[i] && gpsDevice->bFlipEnabled[i]) {
+			/* if pipe is not active */
+			if (!DCCBIsPipeActive(gpsDevice->psDrmDevice, i))
+				continue;
+
+			/*turn on pipe power island based on active pipes*/
+			if (i == 0)
+				psFlip->uiPowerIslands |= OSPM_DISPLAY_A;
+			else
+				psFlip->uiPowerIslands |= OSPM_DISPLAY_B;
+
+			psFlip->asPipeInfo[i].uiSwapInterval =
+				psFlip->uiSwapInterval;
+
+			/* if there's no pending queued flip, flip it*/
+			if (_Can_Flip(i)) {
+				/* don't queue it, if failed to update DC*/
+				if (!_Do_Flip(psFlip,i))
+					continue;
+				else if (i != DC_PIPE_B)
+					send_wms = true;
+			}
+
+			/*increase refCount*/
+			psFlip->uiRefCount++;
+
+			INIT_LIST_HEAD(&psFlip->sFlips[i]);
+			list_add_tail(&psFlip->sFlips[i],
+					&gpsDevice->sFlipQueues[i]);
+		} else if (!psFlip->bActivePipes[i] && gpsDevice->bFlipEnabled[i]) {
+			/* give a chance to disable planes on inactive pipe */
+			clear_plane_flip_state(i);
+			if (disable_unused_planes(i)) {
+				if (i != DC_PIPE_B) {
+					send_wms = true;
+				}
+			}
+
+			/*
+			 * NOTE:To inactive pipe, need to free the attached
+			 * flipStates on FlipQueue; otherwise, as all pipes
+			 * share same Flip item, other active pipes will be
+			 * blocked by this pipe and no chance to get buffer
+			 * retired.
+			 */
+			free_flip_states_on_pipe(gpsDevice->psDrmDevice, i);
+		}
+	}
+
+	/* if failed to dispatch, skip this flip*/
+	if (!psFlip->uiRefCount) {
+		DCDisplayConfigurationRetired(psFlip->hConfigData);
+		/* free it */
+		free_flip(psFlip);
+		psFlip = NULL;
+	}
+
+#ifdef CONFIG_SUPPORT_MIPI
+	if (send_wms) {
+
+		/* Ensure that *psFlip is not freed while lock is not held. */
+		if (psFlip)
+			psFlip->uiRefCount++;
+
+		mutex_unlock(&gpsDevice->sFlipQueueLock);
+		DCCBWaitForDbiFifoEmpty(gpsDevice->psDrmDevice, DC_PIPE_A);
+		mutex_lock(&gpsDevice->sFlipQueueLock);
+
+		if (psFlip != NULL && --psFlip->uiRefCount == 0) {
+			DCDisplayConfigurationRetired(psFlip->hConfigData);
+			/* free it */
+			free_flip(psFlip);
+			psFlip = NULL;
+		}
+
+		/* Issue "write_mem_start" for command mode panel. */
+		DCCBUpdateDbiPanel(gpsDevice->psDrmDevice, DC_PIPE_A);
+		if (psFlip)
+			psFlip->uiVblankCounters[DC_PIPE_A] =
+				drm_vblank_count(gpsDevice->psDrmDevice, DC_PIPE_A);
+	}
+#endif
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+}
+
+static void _Queue_Flip(IMG_HANDLE hConfigData, IMG_HANDLE *ahBuffers,
+			IMG_UINT32 uiNumBuffers, IMG_UINT32 ui32DisplayPeriod)
+{
+	IMG_UINT32 uiFlipSize;
+	DC_MRFLD_BUFFER *psBuffer;
+	DC_MRFLD_FLIP *psFlip;
+	int i;
+
+	uiFlipSize = sizeof(DC_MRFLD_FLIP);
+	uiFlipSize += uiNumBuffers * sizeof(DC_MRFLD_BUFFER*);
+
+	psFlip = kzalloc(uiFlipSize , GFP_KERNEL);
+	if (!gpsDevice || !psFlip) {
+		DRM_ERROR("Failed to allocate a flip\n");
+		/*force it to complete*/
+		DCDisplayConfigurationRetired(hConfigData);
+		return;
+	}
+
+	/*set flip state as queued*/
+	for (i = 0; i < MAX_PIPE_NUM; i++) {
+		psFlip->eFlipStates[i] = DC_MRFLD_FLIP_QUEUED;
+		psFlip->bActivePipes[i] = IMG_FALSE;
+		INIT_LIST_HEAD(&psFlip->asPipeInfo[i].flip_planes);
+	}
+
+	/*update buffer number*/
+	psFlip->uiNumBuffers = uiNumBuffers;
+
+	/*initialize buffers*/
+	for (i = 0; i < uiNumBuffers; i++) {
+		psBuffer = ahBuffers[i];
+		if (!psBuffer) {
+			DRM_DEBUG("%s: buffer %d is empty!\n", __func__, i);
+			continue;
+		}
+		psFlip->pasBuffers[i] = psBuffer;
+	}
+
+	psFlip->uiSwapInterval = ui32DisplayPeriod;
+
+	psFlip->hConfigData = hConfigData;
+
+	/*queue it to flip queue*/
+	_Dispatch_Flip(psFlip);
+}
+
+static int _Vsync_ISR(struct drm_device *psDrmDev, int iPipe)
+{
+	struct list_head *psFlipQueue;
+	DC_MRFLD_FLIP *psFlip, *psTmp;
+	DC_MRFLD_FLIP *psNextFlip;
+	IMG_UINT32 eFlipState;
+	IMG_UINT32 uiVblankCounter;
+	IMG_BOOL bNewFlipUpdated = IMG_FALSE;
+
+	if (!gpsDevice)
+		return IMG_TRUE;
+
+	if (iPipe != DC_PIPE_A && iPipe != DC_PIPE_B)
+		return IMG_FALSE;
+
+	/* acquire flip queue mutex */
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+
+	psFlipQueue = &gpsDevice->sFlipQueues[iPipe];
+
+	/*
+	 * on vsync interrupt arrival:
+	 * 1) surface composer would be unblocked to switch to a new buffer,
+	 *    the displayed (old) buffer will be released at this point
+	 * 2) we release the displayed buffer here to align with the surface
+	 *    composer's buffer management mechanism.
+	 * 3) in MOST cases, surface composer would release the displayed buffer
+	 *    till it has been switched to consume a new buffer, so it is safe
+	 *    to release the displayed buffer here. However, since there is
+	 *    a work queue between surface composer and our kernel display class
+	 *    driver, if the flip work wasn't scheduled on time and the released
+	 *    displaying buffer was dequeued by a client and being rendered
+	 *    then tearing might happen on the screen.
+	 */
+	uiVblankCounter	= drm_vblank_count(psDrmDev, iPipe);
+
+	list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[iPipe])
+	{
+		/* Only when new flip updated and TE/VBLANK comes after
+		 * that flip indicates the new flip takes effects on screen.
+		 */
+		eFlipState = psFlip->eFlipStates[iPipe];
+
+		if ((eFlipState == DC_MRFLD_FLIP_DC_UPDATED) &&
+		    (uiVblankCounter > psFlip->uiVblankCounters[iPipe])) {
+			bNewFlipUpdated = IMG_TRUE;
+			break;
+		}
+	}
+
+	/* if new flip updated on pipe, give a chance to retire */
+	if (bNewFlipUpdated) {
+		list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[iPipe])
+		{
+			eFlipState = psFlip->eFlipStates[iPipe];
+			if (eFlipState == DC_MRFLD_FLIP_DISPLAYED) {
+				/*remove this entry from flip queue, decrease refCount*/
+				list_del(&psFlip->sFlips[iPipe]);
+
+				if (!(--psFlip->uiRefCount)) {
+					/*retire all buffers possessed by this flip*/
+					DCDisplayConfigurationRetired(
+							psFlip->hConfigData);
+					/* free it */
+					free_flip(psFlip);
+					psFlip = NULL;
+				}
+			} else if (eFlipState == DC_MRFLD_FLIP_DC_UPDATED) {
+
+				psFlip->eFlipStates[iPipe] = DC_MRFLD_FLIP_DISPLAYED;
+
+				/* done with this flip item, disable vsync now*/
+				DCCBDisableVSyncInterrupt(psDrmDev, iPipe);
+
+				if (iPipe != DC_PIPE_B)
+					DCCBDsrAllow(psDrmDev, iPipe);
+				break;
+			}
+		}
+	}
+
+	/*if flip queue isn't empty, flip the first queued flip*/
+	psNextFlip = _Next_Queued_Flip(iPipe);
+	if (psNextFlip) {
+		/* failed to update DC, release it*/
+		if (!_Do_Flip(psNextFlip, iPipe)) {
+
+			/*remove this entry from flip queue, decrease refCount*/
+			list_del(&psNextFlip->sFlips[iPipe]);
+
+			if (!(--psNextFlip->uiRefCount)) {
+				/*retire all buffers possessed by this flip*/
+				DCDisplayConfigurationRetired(
+						psNextFlip->hConfigData);
+				/* free it */
+				free_flip(psNextFlip);
+				psNextFlip = NULL;
+			}
+		} else if (iPipe == DC_PIPE_A) {
+			mutex_unlock(&gpsDevice->sFlipQueueLock);
+			DCCBWaitForDbiFifoEmpty(gpsDevice->psDrmDevice,
+						DC_PIPE_A);
+			mutex_lock(&gpsDevice->sFlipQueueLock);
+
+			DCCBUpdateDbiPanel(gpsDevice->psDrmDevice, iPipe);
+		}
+	}
+
+	if (bNewFlipUpdated) {
+		/*start Flip watch dog*/
+		mod_timer(&gpsDevice->sFlipTimer, FLIP_TIMEOUT + jiffies);
+	}
+
+	if (list_empty_careful(psFlipQueue))
+		INIT_LIST_HEAD(&gpsDevice->sFlipQueues[iPipe]);
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+	return IMG_TRUE;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static void DC_MRFLD_GetInfo(IMG_HANDLE hDeviceData,
+				DC_DISPLAY_INFO *psDisplayInfo)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	if (psDevice && psDisplayInfo)
+		*psDisplayInfo = psDevice->sDisplayInfo;
+}
+
+static PVRSRV_ERROR DC_MRFLD_PanelQueryCount(IMG_HANDLE hDeviceData,
+						IMG_UINT32 *ppui32NumPanels)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+	if (!psDevice || !ppui32NumPanels)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	*ppui32NumPanels = 1;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR DC_MRFLD_PanelQuery(IMG_HANDLE hDeviceData,
+				IMG_UINT32 ui32PanelsArraySize,
+				IMG_UINT32 *pui32NumPanels,
+				PVRSRV_PANEL_INFO *pasPanelInfo)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+
+	if (!psDevice || !pui32NumPanels || !pasPanelInfo)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	*pui32NumPanels = 1;
+
+	pasPanelInfo[0].sSurfaceInfo = psDevice->sPrimInfo;
+	/*TODO: export real panel info*/
+	/*pasPanelInfo[0].ui32RefreshRate = 60;*/
+	/*pasPanelInfo[0].ui32PhysicalWidthmm = 0;*/
+	/*pasPanelInfo[0].ui32PhysicalHeightmm = 0;*/
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR DC_MRFLD_FormatQuery(IMG_HANDLE hDeviceData,
+					IMG_UINT32 ui32NumFormats,
+					PVRSRV_SURFACE_FORMAT *pasFormat,
+					IMG_UINT32 *pui32Supported)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+	int i;
+
+	if (!psDevice || !pasFormat || !pui32Supported)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	for (i = 0; i < ui32NumFormats; i++)
+		pui32Supported[i] =
+			_Is_Valid_PixelFormat(pasFormat[i].ePixFormat) ? 1 : 0;
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR DC_MRFLD_DimQuery(IMG_HANDLE hDeviceData,
+					IMG_UINT32 ui32NumDims,
+					PVRSRV_SURFACE_DIMS *psDim,
+					IMG_UINT32 *pui32Supported)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+	int i;
+
+	if (!psDevice || !psDim || !pui32Supported)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	for (i = 0; i < ui32NumDims; i++) {
+		pui32Supported[i] = 0;
+
+		if (psDim[i].ui32Width != psDevice->sPrimInfo.sDims.ui32Width)
+			continue;
+		if (psDim[i].ui32Height != psDevice->sPrimInfo.sDims.ui32Height)
+			continue;
+
+		pui32Supported[i] = 1;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR DC_MRFLD_BufferSystemAcquire(IMG_HANDLE hDeviceData,
+					IMG_DEVMEM_LOG2ALIGN_T *puiLog2PageSize,
+					IMG_UINT32 *pui32PageCount,
+					IMG_UINT32 *pui32PhysHeapID,
+					IMG_UINT32 *pui32ByteStride,
+					IMG_HANDLE *phSystemBuffer)
+{
+	DC_MRFLD_DEVICE *psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+	IMG_UINT32 ulPagesNumber, ulBufferSize;
+
+	if (!psDevice || !puiLog2PageSize || !pui32PageCount ||
+		!pui32PhysHeapID || !pui32ByteStride || !phSystemBuffer)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	ulBufferSize = psDevice->psSystemBuffer->ui32BufferSize;
+	ulPagesNumber = (ulBufferSize + (PAGE_SIZE - 1))  >> PAGE_SHIFT;
+
+	*puiLog2PageSize = PAGE_SHIFT;
+	*pui32PageCount = ulPagesNumber;
+	*pui32PhysHeapID = 0;
+	*pui32ByteStride = psDevice->psSystemBuffer->ui32ByteStride;
+	*phSystemBuffer = (IMG_HANDLE)psDevice->psSystemBuffer;
+
+	return PVRSRV_OK;
+}
+
+static void DC_MRFLD_BufferSystemRelease(IMG_HANDLE hSystemBuffer)
+{
+	/*TODO: do something here*/
+}
+
+static PVRSRV_ERROR DC_MRFLD_ContextCreate(IMG_HANDLE hDeviceData,
+					IMG_HANDLE *hDisplayContext)
+{
+	DC_MRFLD_DISPLAY_CONTEXT *psDisplayContext = NULL;
+	DC_MRFLD_DEVICE *psDevice;
+	PVRSRV_ERROR eRes = PVRSRV_OK;
+
+	if (!hDisplayContext)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	psDevice = (DC_MRFLD_DEVICE *)hDeviceData;
+
+	/*Allocate a new context*/
+	psDisplayContext =
+		kzalloc(sizeof(DC_MRFLD_DISPLAY_CONTEXT), GFP_KERNEL);
+	if (!psDisplayContext) {
+		DRM_ERROR("Failed to create display context\n");
+		eRes = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto create_error;
+	}
+
+	psDisplayContext->psDevice = psDevice;
+	*hDisplayContext = (IMG_HANDLE)psDisplayContext;
+create_error:
+	return eRes;
+}
+
+/* TODO: refine function name: buffers will be copied in ahBuffers */
+static PVRSRV_ERROR DC_MRFLD_ContextConfigureCheck(
+				IMG_HANDLE hDisplayContext,
+				IMG_UINT32 ui32PipeCount,
+				PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+				IMG_HANDLE *ahBuffers)
+{
+	DC_MRFLD_DISPLAY_CONTEXT *psDisplayContext =
+		(DC_MRFLD_DISPLAY_CONTEXT *)hDisplayContext;
+	DC_MRFLD_DEVICE *psDevice;
+	DC_MRFLD_SURF_CUSTOM *psSurfCustom;
+	DC_MRFLD_BUFFER *psBuffer;
+	int err;
+	int i, j;
+
+	if (!psDisplayContext || !pasSurfAttrib || !ahBuffers)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	psDevice = psDisplayContext->psDevice;
+
+	/*TODO: handle ui32PipeCount = 0*/
+
+	/* reset buffer context count*/
+	for (i = 0; i < ui32PipeCount; i++) {
+		if (!ahBuffers[i]) {
+			continue;
+		}
+		psBuffer = OSAllocMem(sizeof(DC_MRFLD_BUFFER));
+		if (psBuffer == NULL) {
+			for (j = 0; j < i; j++) {
+				if (ahBuffers[j]) {
+					OSFreeMem(ahBuffers[j]);
+				}
+			}
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		OSMemCopy(psBuffer, ahBuffers[i], sizeof(DC_MRFLD_BUFFER));
+		psBuffer->ui32ContextCount = 0;
+		ahBuffers[i] = psBuffer;
+	}
+
+	for (i = 0; i < ui32PipeCount; i++) {
+		psBuffer = (DC_MRFLD_BUFFER *)ahBuffers[i];
+		if (!psBuffer) {
+			DRM_ERROR("%s: no buffer for layer %d\n", __func__, i);
+			continue;
+		}
+
+		/*post flip*/
+		if (!pasSurfAttrib[i].ui32Custom) {
+			psBuffer->eFlipOp = DC_MRFLD_FLIP_SURFACE;
+			continue;
+		}
+
+		/*check context count*/
+		if (psBuffer->ui32ContextCount >= MAX_CONTEXT_COUNT) {
+			DRM_ERROR("%s: plane context overflow\n", __func__);
+			continue;
+		}
+
+		psSurfCustom =
+			&psBuffer->sContext[psBuffer->ui32ContextCount++];
+
+		/*copy the context from userspace*/
+		err = copy_from_user(psSurfCustom,
+				     (void *)(uintptr_t)
+				     pasSurfAttrib[i].ui32Custom,
+				     sizeof(DC_MRFLD_SURF_CUSTOM));
+		if (err) {
+			DRM_ERROR("Failed to copy plane context\n");
+			continue;
+		}
+
+		psBuffer->eFlipOp = DC_MRFLD_FLIP_CONTEXT;
+	}
+
+	return PVRSRV_OK;
+}
+
+static void DC_MRFLD_ContextConfigure(IMG_HANDLE hDisplayContext,
+				IMG_UINT32 ui32PipeCount,
+				PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+				IMG_HANDLE *ahBuffers,
+				IMG_UINT32 ui32DisplayPeriod,
+				IMG_HANDLE hConfigData)
+{
+	DRM_DEBUG("%s\n", __func__);
+
+	if (!ui32PipeCount) {
+		/* Called from DCDisplayContextDestroy()
+		 * Retire the current config  */
+		DCDisplayConfigurationRetired(hConfigData);
+		return;
+	}
+
+	/*queue this configure update*/
+	_Queue_Flip(hConfigData, ahBuffers, ui32PipeCount, ui32DisplayPeriod);
+}
+
+static void DC_MRFLD_ContextDestroy(IMG_HANDLE hDisplayContext)
+{
+	DC_MRFLD_DISPLAY_CONTEXT *psDisplayContext =
+		(DC_MRFLD_DISPLAY_CONTEXT *)hDisplayContext;
+	kfree(psDisplayContext);
+}
+
+/**
+ *
+ */
+static PVRSRV_ERROR DC_MRFLD_BufferAlloc(IMG_HANDLE hDisplayContext,
+					DC_BUFFER_CREATE_INFO *psCreateInfo,
+					IMG_DEVMEM_LOG2ALIGN_T *puiLog2PageSize,
+					IMG_UINT32 *pui32PageCount,
+					IMG_UINT32 *pui32PhysHeapID,
+					IMG_UINT32 *pui32ByteStride,
+					IMG_HANDLE *phBuffer)
+{
+	PVRSRV_ERROR eRes = PVRSRV_OK;
+	DC_MRFLD_BUFFER *psBuffer = NULL;
+	PVRSRV_SURFACE_INFO *psSurfInfo;
+	IMG_UINT32 ulPagesNumber;
+	IMG_UINT32 i, j;
+	DC_MRFLD_DEVICE *psDevice;
+	struct drm_device *psDrmDev;
+	DC_MRFLD_DISPLAY_CONTEXT *psDisplayContext =
+		(DC_MRFLD_DISPLAY_CONTEXT *)hDisplayContext;
+
+	if (!psDisplayContext || !psCreateInfo || !puiLog2PageSize ||
+		!pui32PageCount || !pui32PhysHeapID || !pui32ByteStride ||
+		!phBuffer)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	psBuffer = kzalloc(sizeof(DC_MRFLD_BUFFER), GFP_KERNEL);
+	if (!psBuffer) {
+		DRM_ERROR("Failed to create buffer\n");
+		eRes = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto create_error;
+	}
+
+	psSurfInfo = &psCreateInfo->sSurface;
+
+	/*
+	 * As we're been asked to allocate this buffer we decide what it's
+	 * stride should be.
+	 */
+	psBuffer->eSource = DCMrfldEX_BUFFER_SOURCE_ALLOC;
+	psBuffer->hDisplayContext = hDisplayContext;
+
+	/* Align 32 pixels of width alignment */
+	psBuffer->ui32Width =
+		_Align_To(psSurfInfo->sDims.ui32Width, DC_MRFLD_WIDTH_ALIGN_MASK);
+
+	psBuffer->ui32ByteStride =
+		psBuffer->ui32Width * psCreateInfo->ui32BPP;
+	/*align stride*/
+	psBuffer->ui32ByteStride =
+		_Align_To(psBuffer->ui32ByteStride, DC_MRFLD_STRIDE_ALIGN);
+
+	psBuffer->ui32Height = psSurfInfo->sDims.ui32Height;
+	psBuffer->ui32BufferSize =
+		psBuffer->ui32Height * psBuffer->ui32ByteStride;
+	psBuffer->ePixFormat = psSurfInfo->sFormat.ePixFormat;
+
+	/*
+	 * Allocate display adressable memory. We only need physcial addresses
+	 * at this stage.
+	 * Note: This could be defered till the 1st map or acquire call.
+	 * IMG uses pgprot_noncached(PAGE_KERNEL)
+	 */
+	psBuffer->sCPUVAddr = __vmalloc(psBuffer->ui32BufferSize,
+			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+			 __pgprot((pgprot_val(PAGE_KERNEL) & ~_PAGE_CACHE_MASK)
+			| _PAGE_CACHE_WC));
+	/*FIXME: */
+	//DCCBGetStolen(gpsDevice->psDrmDevice, &psBuffer->sCPUVAddr, &psBuffer->ui32BufferSize);
+	if (psBuffer->sCPUVAddr == NULL) {
+		DRM_ERROR("Failed to allocate buffer\n");
+		eRes = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto alloc_error;
+	}
+
+	ulPagesNumber =
+		(psBuffer->ui32BufferSize + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+	psBuffer->psSysAddr =
+		kzalloc(ulPagesNumber * sizeof(IMG_SYS_PHYADDR), GFP_KERNEL);
+	if (!psBuffer->psSysAddr) {
+		DRM_ERROR("Failed to allocate phy array\n");
+		eRes = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto phy_error;
+	}
+
+	i = 0; j = 0;
+	for (i = 0; i < psBuffer->ui32BufferSize; i += PAGE_SIZE) {
+#if defined(UNDER_WDDM)
+		psBuffer->psSysAddr[j++].uiAddr =
+			(IMG_UINTPTR_T)vmalloc_to_pfn(psBuffer->sCPUVAddr + i) << PAGE_SHIFT;
+#else
+		psBuffer->psSysAddr[j++].uiAddr =
+			(IMG_UINT64)vmalloc_to_pfn(psBuffer->sCPUVAddr + i) << PAGE_SHIFT;
+#endif
+	}
+
+	psBuffer->bIsAllocated = IMG_TRUE;
+	psBuffer->bIsContiguous = IMG_FALSE;
+	psBuffer->ui32OwnerTaskID = task_tgid_nr(current);
+
+	psDevice = psDisplayContext->psDevice;
+	psDrmDev = psDevice->psDrmDevice;
+
+	ulPagesNumber =
+		(psBuffer->ui32BufferSize + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+	/*map this buffer to gtt*/
+	DCCBgttMapMemory(psDrmDev,
+		(unsigned int)(uintptr_t)psBuffer,
+		psBuffer->ui32OwnerTaskID,
+		psBuffer->psSysAddr,
+		ulPagesNumber,
+		(unsigned int *)&psBuffer->sDevVAddr.uiAddr);
+
+	psBuffer->sDevVAddr.uiAddr <<= PAGE_SHIFT;
+
+	/*setup output params*/
+	*pui32ByteStride = psBuffer->ui32ByteStride;
+	*puiLog2PageSize = PAGE_SHIFT;
+	*pui32PageCount = ulPagesNumber;
+	*pui32PhysHeapID = 0;
+	*phBuffer = psBuffer;
+
+	DRM_DEBUG("%s: allocated buffer: %dx%d\n", __func__,
+		psBuffer->ui32Width, psBuffer->ui32Height);
+
+	return PVRSRV_OK;
+phy_error:
+	vfree(psBuffer->sCPUVAddr);
+alloc_error:
+	kfree(psBuffer);
+create_error:
+	return eRes;
+}
+
+static PVRSRV_ERROR DC_MRFLD_BufferImport(IMG_HANDLE hDisplayContext,
+					IMG_UINT32 ui32NumPlanes,
+					IMG_HANDLE **paphImport,
+					DC_BUFFER_IMPORT_INFO *psSurfAttrib,
+					IMG_HANDLE *phBuffer)
+{
+	DC_MRFLD_BUFFER *psBuffer;
+	DC_MRFLD_DISPLAY_CONTEXT *psDisplayContext =
+		(DC_MRFLD_DISPLAY_CONTEXT *)hDisplayContext;
+	DC_MRFLD_DEVICE *psDevice;
+
+	if (!psDisplayContext || !ui32NumPlanes || !paphImport ||
+		!psSurfAttrib || !phBuffer)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	psDevice = psDisplayContext->psDevice;
+
+	/*NOTE: we are only using the first plane(buffer)*/
+	DRM_DEBUG("%s: import surf format 0x%x, w %d, h %d, bpp %d," \
+		"stride %d\n",
+		__func__,
+		psSurfAttrib->ePixFormat,
+		psSurfAttrib->ui32Width[0],
+		psSurfAttrib->ui32Height[0],
+		psSurfAttrib->ui32BPP,
+		psSurfAttrib->ui32ByteStride[0]);
+
+	psBuffer = kzalloc(sizeof(DC_MRFLD_BUFFER), GFP_KERNEL);
+	if (!psBuffer) {
+		DRM_ERROR("Failed to create DC buffer\n");
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/*initialize this buffer*/
+	psBuffer->eSource = DCMrfldEX_BUFFER_SOURCE_IMPORT;
+	psBuffer->hDisplayContext = hDisplayContext;
+	psBuffer->ui32Width = psSurfAttrib->ui32Width[0];
+	psBuffer->ePixFormat = psSurfAttrib->ePixFormat;
+	psBuffer->ui32ByteStride = psSurfAttrib->ui32ByteStride[0];
+	psBuffer->ui32Width = psSurfAttrib->ui32Width[0];
+	psBuffer->ui32Height = psSurfAttrib->ui32Height[0];
+	psBuffer->bIsAllocated = IMG_FALSE;
+	psBuffer->bIsContiguous = IMG_FALSE;
+	psBuffer->ui32OwnerTaskID = task_tgid_nr(current);
+
+	psBuffer->hImport = paphImport[0];
+	/*setup output param*/
+	*phBuffer = psBuffer;
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR DC_MRFLD_BufferAcquire(IMG_HANDLE hBuffer,
+					IMG_DEV_PHYADDR *pasDevPAddr,
+					void **ppvLinAddr)
+{
+	DC_MRFLD_BUFFER *psBuffer = (DC_MRFLD_BUFFER *)hBuffer;
+	IMG_UINT32 ulPages;
+	int i;
+
+	if (!psBuffer || !pasDevPAddr || !ppvLinAddr)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	ulPages = (psBuffer->ui32BufferSize + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+	/*allocate new buffer for import buffer*/
+	if (psBuffer->eSource == DCMrfldEX_BUFFER_SOURCE_ALLOC) {
+		for (i = 0; i < ulPages; i++)
+			pasDevPAddr[i].uiAddr = psBuffer->psSysAddr[i].uiAddr;
+		*ppvLinAddr = psBuffer->sCPUVAddr;
+	}
+	return PVRSRV_OK;
+}
+
+static void DC_MRFLD_BufferRelease(IMG_HANDLE hBuffer)
+{
+
+}
+
+static void DC_MRFLD_BufferFree(IMG_HANDLE hBuffer)
+{
+	DC_MRFLD_DISPLAY_CONTEXT *psDisplayContext;
+	DC_MRFLD_BUFFER *psBuffer = (DC_MRFLD_BUFFER *)hBuffer;
+	DC_MRFLD_DEVICE *psDevice;
+	struct drm_device *psDrmDev;
+
+	if (!psBuffer)
+		return;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	psDisplayContext =
+		(DC_MRFLD_DISPLAY_CONTEXT *)psBuffer->hDisplayContext;
+	if (!psDisplayContext)
+		return;
+
+	psDevice = psDisplayContext->psDevice;
+	psDrmDev = psDevice->psDrmDevice;
+
+	if (psBuffer->eSource == DCMrfldEX_BUFFER_SOURCE_SYSTEM)
+		return;
+
+	/*
+	 * if it's buffer allocated by display device contineu to free
+	 * the buffer pages
+	 */
+	if (psBuffer->eSource == DCMrfldEX_BUFFER_SOURCE_ALLOC) {
+		/*make sure unmap this buffer from gtt*/
+		DCCBgttUnmapMemory(psDrmDev, (unsigned int)
+			(uintptr_t)psBuffer, psBuffer->ui32OwnerTaskID);
+		kfree(psBuffer->psSysAddr);
+		vfree(psBuffer->sCPUVAddr);
+	}
+
+	if (psBuffer->eSource == DCMrfldEX_BUFFER_SOURCE_IMPORT &&
+	    _Is_Task_KThread()) {
+		DRM_DEBUG("owner task id: %d\n", psBuffer->ui32OwnerTaskID);
+		/* KThread is triggered to clean up gtt */
+		DCCBgttCleanupMemoryOnTask(psDrmDev,
+					psBuffer->ui32OwnerTaskID);
+	}
+
+	kfree(psBuffer);
+}
+
+static PVRSRV_ERROR DC_MRFLD_BufferMap(IMG_HANDLE hBuffer)
+{
+	return PVRSRV_OK;
+}
+
+static void DC_MRFLD_BufferUnmap(IMG_HANDLE hBuffer)
+{
+
+}
+
+static DC_DEVICE_FUNCTIONS sDCFunctions = {
+	.pfnGetInfo			= DC_MRFLD_GetInfo,
+	.pfnPanelQueryCount		= DC_MRFLD_PanelQueryCount,
+	.pfnPanelQuery			= DC_MRFLD_PanelQuery,
+	.pfnFormatQuery			= DC_MRFLD_FormatQuery,
+	.pfnDimQuery			= DC_MRFLD_DimQuery,
+	.pfnSetBlank			= NULL,
+	.pfnSetVSyncReporting		= NULL,
+	.pfnLastVSyncQuery		= NULL,
+	.pfnContextCreate		= DC_MRFLD_ContextCreate,
+	.pfnContextDestroy		= DC_MRFLD_ContextDestroy,
+	.pfnContextConfigure		= DC_MRFLD_ContextConfigure,
+	.pfnContextConfigureCheck	= DC_MRFLD_ContextConfigureCheck,
+	.pfnBufferAlloc			= DC_MRFLD_BufferAlloc,
+	.pfnBufferAcquire		= DC_MRFLD_BufferAcquire,
+	.pfnBufferRelease		= DC_MRFLD_BufferRelease,
+	.pfnBufferFree			= DC_MRFLD_BufferFree,
+	.pfnBufferImport		= DC_MRFLD_BufferImport,
+	.pfnBufferMap			= DC_MRFLD_BufferMap,
+	.pfnBufferUnmap			= DC_MRFLD_BufferUnmap,
+	.pfnBufferSystemAcquire         = DC_MRFLD_BufferSystemAcquire,
+	.pfnBufferSystemRelease         = DC_MRFLD_BufferSystemRelease,
+
+};
+
+static PVRSRV_ERROR _SystemBuffer_Init(DC_MRFLD_DEVICE *psDevice)
+{
+	struct drm_device *psDrmDev;
+	struct psb_framebuffer *psPSBFb;
+	IMG_UINT32 ulPagesNumber;
+	int i;
+
+	/*get fbDev*/
+	psDrmDev = psDevice->psDrmDevice;
+	DCCBGetFramebuffer(psDrmDev, &psPSBFb);
+	if (!psPSBFb)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	/*allocate system buffer*/
+	psDevice->psSystemBuffer =
+		kzalloc(sizeof(DC_MRFLD_BUFFER), GFP_KERNEL);
+	if (!psDevice->psSystemBuffer) {
+		DRM_ERROR("Failed to allocate system buffer\n");
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/*initilize system buffer*/
+	psDevice->psSystemBuffer->bIsAllocated = IMG_FALSE;
+	psDevice->psSystemBuffer->bIsContiguous = IMG_FALSE;
+	psDevice->psSystemBuffer->eSource = DCMrfldEX_BUFFER_SOURCE_SYSTEM;
+	psDevice->psSystemBuffer->hDisplayContext = 0;
+	psDevice->psSystemBuffer->hImport = 0;
+	psDevice->psSystemBuffer->sCPUVAddr = psPSBFb->vram_addr;
+	psDevice->psSystemBuffer->sDevVAddr.uiAddr = 0;
+	psDevice->psSystemBuffer->ui32BufferSize = psPSBFb->size;
+	psDevice->psSystemBuffer->ui32ByteStride = psPSBFb->base.pitches[0];
+	psDevice->psSystemBuffer->ui32Height = psPSBFb->base.height;
+	psDevice->psSystemBuffer->ui32Width = psPSBFb->base.width;
+	psDevice->psSystemBuffer->ui32OwnerTaskID = -1;
+	psDevice->psSystemBuffer->ui32RefCount = 0;
+
+	switch (psPSBFb->depth) {
+	case 32:
+	case 24:
+		psDevice->psSystemBuffer->ePixFormat =
+			IMG_PIXFMT_B8G8R8A8_UNORM;
+		break;
+	case 16:
+		psDevice->psSystemBuffer->ePixFormat =
+			IMG_PIXFMT_B5G6R5_UNORM;
+		break;
+	default:
+		DRM_ERROR("Unsupported system buffer format\n");
+	}
+
+	ulPagesNumber = (psPSBFb->size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+	psDevice->psSystemBuffer->psSysAddr =
+		kzalloc(ulPagesNumber * sizeof(IMG_SYS_PHYADDR), GFP_KERNEL);
+	if (!psDevice->psSystemBuffer->psSysAddr) {
+		kfree(psDevice->psSystemBuffer);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i = 0; i < ulPagesNumber; i++) {
+		psDevice->psSystemBuffer->psSysAddr[i].uiAddr =
+			psPSBFb->stolen_base + i * PAGE_SIZE;
+	}
+
+	DRM_DEBUG("%s: allocated system buffer %dx%d, format %d\n",
+			__func__,
+			psDevice->psSystemBuffer->ui32Width,
+			psDevice->psSystemBuffer->ui32Height,
+			psDevice->psSystemBuffer->ePixFormat);
+
+	return PVRSRV_OK;
+}
+
+static void _SystemBuffer_Deinit(DC_MRFLD_DEVICE *psDevice)
+{
+	if (psDevice->psSystemBuffer) {
+		kfree(psDevice->psSystemBuffer->psSysAddr);
+		kfree(psDevice->psSystemBuffer);
+	}
+}
+
+static PVRSRV_ERROR DC_MRFLD_init(struct drm_device *psDrmDev)
+{
+	PVRSRV_ERROR eRes = PVRSRV_OK;
+	DC_MRFLD_DEVICE *psDevice;
+	int i, j;
+
+	if (!psDrmDev)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	/*create new display device*/
+	psDevice = kzalloc(sizeof(DC_MRFLD_DEVICE), GFP_KERNEL);
+	if (!psDevice) {
+		DRM_ERROR("Failed to create display device\n");
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/*init display device*/
+	psDevice->psDrmDevice = psDrmDev;
+	/*init system frame buffer*/
+	eRes = _SystemBuffer_Init(psDevice);
+	if (eRes != PVRSRV_OK)
+		goto init_error;
+
+	/*init primary surface info*/
+	psDevice->sPrimInfo.sDims.ui32Width =
+		psDevice->psSystemBuffer->ui32Width;
+	psDevice->sPrimInfo.sDims.ui32Height =
+		psDevice->psSystemBuffer->ui32Height;
+	psDevice->sPrimInfo.sFormat.ePixFormat =
+		psDevice->psSystemBuffer->ePixFormat;
+
+	/*init display info*/
+	strncpy(psDevice->sDisplayInfo.szDisplayName, DRVNAME, DC_NAME_SIZE);
+	psDevice->sDisplayInfo.ui32MinDisplayPeriod = 0;
+	psDevice->sDisplayInfo.ui32MaxDisplayPeriod = 5;
+	psDevice->sDisplayInfo.ui32MaxPipes = DCCBGetPipeCount();
+
+	/*init flip queue lock*/
+	mutex_init(&psDevice->sFlipQueueLock);
+
+	/*init flip queues*/
+	for (i = 0; i < MAX_PIPE_NUM; i++) {
+		INIT_LIST_HEAD(&psDevice->sFlipQueues[i]);
+		psDevice->bFlipEnabled[i] = IMG_TRUE;
+	}
+
+	/*init plane pipe mapping lock */
+	mutex_init(&psDevice->sMappingLock);
+
+	/*init plane pipe mapping & plane state */
+	for (i = 1; i < DC_PLANE_MAX; i++) {
+		for (j = 0; j < MAX_PLANE_INDEX; j++) {
+			struct plane_state *pstate =
+				&psDevice->plane_states[i][j];
+
+			pstate->type = i;
+			pstate->index = j;
+			pstate->attached_pipe = -1;
+			pstate->active = false;
+			pstate->disabled = true;
+			pstate->extra_power_island =
+				DC_ExtraPowerIslands[i][j];
+			pstate->powered_off = true;
+
+			psDevice->ui32PlanePipeMapping[i][j] = -1;
+		}
+	}
+
+	/*unblank fbdev*/
+	DCCBUnblankDisplay(psDevice->psDrmDevice);
+
+	/*register display device*/
+	eRes = DCRegisterDevice(&sDCFunctions,
+				2,
+				psDevice,
+				&psDevice->hSrvHandle);
+	if (eRes != PVRSRV_OK) {
+		DRM_ERROR("Failed to register display device\n");
+		goto reg_error;
+	}
+
+	/*init ISR*/
+	DCCBInstallVSyncISR(psDrmDev, _Vsync_ISR);
+
+	/*init flip timer*/
+	psDevice->sFlipTimer.data = (unsigned long)psDevice;
+	psDevice->sFlipTimer.function = _Flip_Timer_Fn;
+	init_timer(&psDevice->sFlipTimer);
+	INIT_WORK(&psDevice->flip_retire_work, timer_flip_handler);
+
+	gpsDevice = psDevice;
+
+	return PVRSRV_OK;
+reg_error:
+	_SystemBuffer_Deinit(psDevice);
+init_error:
+	kfree(psDevice);
+	return eRes;
+}
+
+static PVRSRV_ERROR DC_MRFLD_exit(void)
+{
+	if (!gpsDevice)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	/*unregister display device*/
+	DCUnregisterDevice(gpsDevice->hSrvHandle);
+
+	/*destroy system frame buffer*/
+	_SystemBuffer_Deinit(gpsDevice);
+
+	/*free device*/
+	kfree(gpsDevice);
+	gpsDevice = 0;
+
+	return PVRSRV_OK;
+}
+
+void DCLockMutex()
+{
+	if (!gpsDevice)
+		return;
+
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+}
+
+void DCUnLockMutex()
+{
+	if (!gpsDevice)
+		return;
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+}
+
+int DCUpdateCursorPos(uint32_t pipe, uint32_t pos)
+{
+	int ret = 0;
+
+	if (!gpsDevice || !gpsDevice->psDrmDevice)
+		return -1;
+
+	/* TODO: check flip queue state */
+	/* if queue is not empty pending flip should be updated directly */
+	DCLockMutex();
+	ret = DCCBUpdateCursorPos(gpsDevice->psDrmDevice, (int)pipe, pos);
+	DCUnLockMutex();
+
+	return ret;
+}
+
+void DCAttachPipe(uint32_t iPipe)
+{
+	if (!gpsDevice)
+		return;
+
+	DRM_DEBUG("%s: pipe %d\n", __func__, iPipe);
+
+	if (iPipe != DC_PIPE_A && iPipe != DC_PIPE_B)
+		return;
+
+	gpsDevice->bFlipEnabled[iPipe] = IMG_TRUE;
+}
+
+void DCUnAttachPipe(uint32_t iPipe)
+{
+	struct list_head *psFlipQueue;
+	DC_MRFLD_FLIP *psFlip, *psTmp;
+
+	if (!gpsDevice)
+		return;
+
+	DRM_DEBUG("%s: pipe %d\n", __func__, iPipe);
+
+	if (iPipe != DC_PIPE_A && iPipe != DC_PIPE_B)
+		return;
+
+	psFlipQueue = &gpsDevice->sFlipQueues[iPipe];
+	/* complete the flips*/
+	list_for_each_entry_safe(psFlip, psTmp, psFlipQueue, sFlips[iPipe]) {
+
+		if (psFlip->eFlipStates[iPipe] == DC_MRFLD_FLIP_DC_UPDATED) {
+			/* Put pipe's vsync which has been enabled. */
+			DCCBDisableVSyncInterrupt(gpsDevice->psDrmDevice, iPipe);
+
+			if (iPipe != DC_PIPE_B)
+				DCCBDsrAllow(gpsDevice->psDrmDevice, iPipe);
+		}
+
+		/*remove this entry from flip queue, decrease refCount*/
+		list_del(&psFlip->sFlips[iPipe]);
+
+		if (!(--psFlip->uiRefCount)) {
+			/*retire all buffers possessed by this flip*/
+			DCDisplayConfigurationRetired(
+					psFlip->hConfigData);
+			/* free it */
+			free_flip(psFlip);
+			psFlip = NULL;
+		}
+	}
+
+	if (list_empty_careful(psFlipQueue))
+		INIT_LIST_HEAD(&gpsDevice->sFlipQueues[iPipe]);
+
+	gpsDevice->bFlipEnabled[iPipe] = IMG_FALSE;
+}
+
+/*TODO: merge with DCUnAttachPipe*/
+void DC_MRFLD_onPowerOff(uint32_t iPipe)
+{
+
+	int i, j;
+	struct plane_state *pstate;
+	struct drm_psb_private *dev_priv;
+
+	if (!gpsDevice)
+		return;
+
+	dev_priv = gpsDevice->psDrmDevice->dev_private;
+	if (!dev_priv->um_start)
+		return;
+
+	for (i = 1; i < DC_PLANE_MAX; i++) {
+		for (j = 0; j < MAX_PLANE_INDEX; j++) {
+			pstate = &gpsDevice->plane_states[i][j];
+
+			/* if already inactive or not on this pipe */
+			if (!pstate->active || pstate->attached_pipe != iPipe)
+				continue;
+
+			disable_plane(pstate);
+
+			/* turn off extra power island here */
+			if (!pstate->powered_off &&
+			    pstate->extra_power_island) {
+				power_island_put(pstate->extra_power_island);
+				pstate->powered_off = true;
+			}
+
+			/* set plane state to be correct in power off */
+			pstate->disabled = true;
+		}
+	}
+}
+
+/*TODO: merge with DCAttachPipe*/
+void DC_MRFLD_onPowerOn(uint32_t iPipe)
+{
+	/* we do nothing on ExtraPowerIsland during power on.
+	 * It will be automatically turned on during flip.
+	 */
+	int j;
+	struct plane_state *pstate;
+	struct drm_psb_private *dev_priv;
+
+	if (!gpsDevice)
+		return;
+
+	dev_priv = gpsDevice->psDrmDevice->dev_private;
+	if (!dev_priv->um_start)
+		return;
+
+	/* keep primary on and flip to black screen */
+	for (j = 0; j < MAX_PLANE_INDEX; j++) {
+		pstate = &gpsDevice->plane_states[DC_PRIMARY_PLANE][j];
+
+		/* primary plane is fixed to pipe */
+		if (j != iPipe)
+			continue;
+
+		disable_plane(pstate);
+	}
+
+	for (j = 0; j < MAX_PLANE_INDEX; j++) {
+		pstate = &gpsDevice->plane_states[DC_CURSOR_PLANE][j];
+
+		/* primary plane is fixed to pipe */
+		if (j != iPipe)
+			continue;
+
+		disable_plane(pstate);
+	}
+}
+
+int DC_MRFLD_Enable_Plane(int type, int index, u32 ctx)
+{
+	int err = 0;
+	IMG_INT32 *ui32ActivePlanes;
+#if 0
+	IMG_UINT32 uiExtraPowerIslands = 0;
+#endif
+
+	if (type <= DC_UNKNOWN_PLANE || type >= DC_PLANE_MAX) {
+		DRM_ERROR("Invalid plane type %d\n", type);
+		return -EINVAL;
+	}
+
+	if (index < 0 || index >= MAX_PLANE_INDEX) {
+		DRM_ERROR("Invalid plane index %d\n", index);
+		return -EINVAL;
+	}
+
+	/*acquire lock*/
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+
+	ui32ActivePlanes = &gpsDevice->ui32ActivePlanes[type];
+
+	/* add to active planes*/
+	if (!(*ui32ActivePlanes & (1 << index))) {
+		*ui32ActivePlanes |= (1 << index);
+
+#if 0
+		/* power on extra power islands if required */
+		uiExtraPowerIslands = DC_ExtraPowerIslands[type][index];
+		_Enable_ExtraPowerIslands(gpsDevice,
+					uiExtraPowerIslands);
+#endif
+	}
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+
+	return err;
+}
+
+bool DC_MRFLD_Is_Plane_Disabled(int type, int index, u32 ctx)
+{
+	bool bDisabled;
+	struct plane_state *pstate;
+
+	/*acquire lock*/
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+
+	pstate = &gpsDevice->plane_states[type][index];
+	bDisabled = pstate->disabled;
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+
+	return bDisabled;
+}
+
+int DC_MRFLD_Disable_Plane(int type, int index, u32 ctx)
+{
+	int err = 0;
+	IMG_INT32 *ui32ActivePlanes;
+	IMG_UINT32 uiExtraPowerIslands = 0;
+
+	if (type <= DC_UNKNOWN_PLANE || type >= DC_PLANE_MAX) {
+		DRM_ERROR("Invalid plane type %d\n", type);
+		return -EINVAL;
+	}
+
+	if (index < 0 || index >= MAX_PLANE_INDEX) {
+		DRM_ERROR("Invalid plane index %d\n", index);
+		return -EINVAL;
+	}
+
+	/*acquire lock*/
+	mutex_lock(&gpsDevice->sFlipQueueLock);
+
+	/*disable sprite & overlay plane*/
+	switch (type) {
+	case DC_SPRITE_PLANE:
+		err = DCCBSpriteEnable(gpsDevice->psDrmDevice, ctx, index, 0);
+		break;
+	case DC_OVERLAY_PLANE:
+		err = DCCBOverlayEnable(gpsDevice->psDrmDevice, ctx, index, 0);
+		break;
+	}
+
+	ui32ActivePlanes = &gpsDevice->ui32ActivePlanes[type];
+
+	/* remove from active planes*/
+	if (!err && (*ui32ActivePlanes & (1 << index))) {
+		*ui32ActivePlanes &= ~(1 << index);
+
+		/* power off extra power islands if required */
+		uiExtraPowerIslands = DC_ExtraPowerIslands[type][index];
+#if 0
+		if (uiExtraPowerIslands) {
+			req = kzalloc(sizeof(*req), GFP_KERNEL);
+			if (!req) {
+				DRM_ERROR("fail to alloc power_off_req\n");
+				goto out_mapping;
+			}
+			INIT_DELAYED_WORK(&req->work, display_power_work);
+
+			req->power_off_islands = uiExtraPowerIslands;
+
+			queue_delayed_work(dev_priv->power_wq,
+					   &req->work, msecs_to_jiffies(32));
+		}
+	out_mapping:
+#endif
+		/* update plane pipe mapping */
+		_Update_PlanePipeMapping(gpsDevice, type, index, -1);
+	}
+
+	mutex_unlock(&gpsDevice->sFlipQueueLock);
+
+	if (type == DC_OVERLAY_PLANE && !err) {
+		/* avoid big lock as it is a blocking call */
+		// err = DCCBOverlayDisableAndWait(gpsDevice->psDrmDevice, ctx, index);
+	}
+	return err;
+}
+
+/*----------------------------------------------------------------------------*/
+PVRSRV_ERROR MerrifieldDCInit(struct drm_device *psDrmDev)
+{
+	return DC_MRFLD_init(psDrmDev);
+}
+
+PVRSRV_ERROR MerrifieldDCDeinit(void)
+{
+	return DC_MRFLD_exit();
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_drm/dc_mrfld.h b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_drm/dc_mrfld.h
new file mode 100644
index 0000000..6f8cb41
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/3rdparty/intel_drm/dc_mrfld.h
@@ -0,0 +1,182 @@
+/**********************************************************************
+ *
+ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful but, except
+ * as otherwise stated in writing, without any warranty; without even the
+ * implied warranty of merchantability or fitness for a particular purpose.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+ *
+ ******************************************************************************/
+#ifndef __DC_MRFLD_H__
+#define __DC_MRFLD_H__
+
+#include <drm/drmP.h>
+#include <imgpixfmts_km.h>
+#include "kerneldisplay.h"
+#include "psb_drm.h"
+#include "displayclass_interface.h"
+
+
+enum {
+	DC_PIPE_A,
+	DC_PIPE_B,
+	DC_PIPE_C,
+	MAX_PIPE_NUM,
+};
+
+typedef enum {
+	DCMrfldEX_BUFFER_SOURCE_ALLOC,
+	DCMrfldEX_BUFFER_SOURCE_IMPORT,
+	DCMrfldEX_BUFFER_SOURCE_SYSTEM,
+} DCMrfldEX_BUFFER_SOURCE;
+
+typedef enum {
+	DC_MRFLD_FLIP_SURFACE,
+	DC_MRFLD_FLIP_CONTEXT,
+} DC_MRFLD_FLIP_OP;
+
+/* ied session clean-up */
+#define MAX_IED_SESSIONS    8
+extern int sepapp_drm_playback(bool ied_status);
+
+/* max count of plane contexts which share the same buffer*/
+#define MAX_CONTEXT_COUNT   3
+
+/* max index of a plane */
+#define MAX_PLANE_INDEX     3
+
+typedef struct {
+	IMG_HANDLE hDisplayContext;
+	IMG_PIXFMT ePixFormat;
+	IMG_UINT32 ui32BufferSize;
+	IMG_UINT32 ui32ByteStride;
+	IMG_UINT32 ui32Width;
+	IMG_UINT32 ui32Height;
+	/*physical sys page list*/
+	IMG_SYS_PHYADDR *psSysAddr;
+	/*physical device page list*/
+	IMG_DEV_PHYADDR *psDevAddr;
+	/*GTT offset*/
+	IMG_DEV_VIRTADDR sDevVAddr;
+	/*CPU virtual address*/
+	IMG_CPU_VIRTADDR sCPUVAddr;
+	IMG_BOOL bIsContiguous;
+	IMG_BOOL bIsAllocated;
+	IMG_UINT32 ui32OwnerTaskID;
+	IMG_HANDLE hImport;
+	IMG_UINT32 ui32RefCount;
+	DCMrfldEX_BUFFER_SOURCE eSource;
+	DC_MRFLD_FLIP_OP eFlipOp;
+	IMG_UINT32 ui32ContextCount;
+	/* This buffer */
+	DC_MRFLD_SURF_CUSTOM sContext[MAX_CONTEXT_COUNT];
+} DC_MRFLD_BUFFER;
+
+struct plane_state {
+	int type;
+	int index;
+	int attached_pipe;
+	bool active;
+	bool flip_active;
+	bool disabled;
+	u32 extra_power_island;
+	bool powered_off;
+};
+
+/*Display Controller Device*/
+typedef struct {
+	IMG_HANDLE hSrvHandle;
+	struct drm_device *psDrmDevice;
+	DC_MRFLD_BUFFER *psSystemBuffer;
+	PVRSRV_SURFACE_INFO sPrimInfo;
+	DC_DISPLAY_INFO	sDisplayInfo;
+
+	/*plane enabling*/
+	IMG_UINT32 ui32ActiveOverlays;
+	IMG_UINT32 ui32ActiveSprites;
+	IMG_UINT32 ui32ActivePrimarys;
+
+	IMG_UINT32 ui32ActivePlanes[DC_PLANE_MAX];
+	IMG_UINT32 ui32SavedActivePlanes[DC_PLANE_MAX];
+
+	/*mutex lock for flip queue*/
+	struct mutex sFlipQueueLock;
+	/*context configure queue*/
+	struct list_head sFlipQueues[MAX_PIPE_NUM];
+	IMG_BOOL bFlipEnabled[MAX_PIPE_NUM];
+
+	/* lock for plane pipe mapping */
+	struct mutex sMappingLock;
+	/* plane - pipe mapping */
+	IMG_UINT32 ui32PlanePipeMapping[DC_PLANE_MAX][MAX_PLANE_INDEX];
+	IMG_UINT32 ui32ExtraPowerIslandsStatus;
+
+	struct plane_state plane_states[DC_PLANE_MAX][MAX_PLANE_INDEX];
+
+	/* Timer to retire blocked flips */
+        struct timer_list sFlipTimer;
+        struct work_struct flip_retire_work;
+
+} DC_MRFLD_DEVICE;
+
+typedef struct {
+	DC_MRFLD_DEVICE *psDevice;
+} DC_MRFLD_DISPLAY_CONTEXT;
+
+struct flip_plane {
+	struct list_head list;
+	int type;
+	int index;
+	int attached_pipe;
+	DC_MRFLD_BUFFER *flip_buf;
+	DC_MRFLD_SURF_CUSTOM  *flip_ctx;
+};
+
+struct DC_MRFLD_PIPE_INFO {
+	IMG_UINT32 uiSwapInterval;
+	struct list_head flip_planes;
+};
+
+/*flip status*/
+enum DC_MRFLD_FLIP_STATUS {
+	DC_MRFLD_FLIP_ERROR = 0,
+	DC_MRFLD_FLIP_QUEUED,
+	DC_MRFLD_FLIP_DC_UPDATED,
+	DC_MRFLD_FLIP_DISPLAYED,
+};
+
+typedef struct {
+	struct list_head sFlips[MAX_PIPE_NUM];
+	IMG_UINT32 eFlipStates[MAX_PIPE_NUM];
+	IMG_UINT32 uiVblankCounters[MAX_PIPE_NUM];
+	struct DC_MRFLD_PIPE_INFO asPipeInfo[MAX_PIPE_NUM];
+	IMG_BOOL bActivePipes[MAX_PIPE_NUM];
+	IMG_UINT32 uiNumBuffers;
+	IMG_UINT32 uiRefCount;
+	IMG_HANDLE hConfigData;
+	IMG_UINT32 uiSwapInterval;
+	IMG_UINT32 uiPowerIslands;
+	DC_MRFLD_BUFFER *pasBuffers[0];
+} DC_MRFLD_FLIP;
+
+/*exported functions*/
+PVRSRV_ERROR MerrifieldDCInit(struct drm_device * dev);
+PVRSRV_ERROR MerrifieldDCDeinit(void);
+
+#endif /* __DC_MRFLD_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/cache_defines.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/cache_defines.h
new file mode 100644
index 0000000..7779e51
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/cache_defines.h
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_DEFINES_H_
+
+#define CACHEFLUSH_GENERIC	1
+#define CACHEFLUSH_X86		2
+
+#if CACHEFLUSH_TYPE == 0
+#error Unknown cache flush type, please addd to cache_defines.h
+#endif
+
+#endif	/* _CACHE_DEFINES_H_ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/dc_common.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/dc_common.h
new file mode 100644
index 0000000..8b2b594
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/dc_common.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common Display Class header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines DC specific structures which are shared within services
+                only
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "services.h"
+
+#ifndef _DC_COMMON_H_
+#define _DC_COMMON_H_
+
+typedef struct _DC_FBC_CREATE_INFO_
+{
+	IMG_UINT32		ui32FBCWidth;	/*!< Pixel width that the FBC module is working on */
+	IMG_UINT32		ui32FBCHeight;	/*!< Pixel height that the FBC module is working on */
+	IMG_UINT32		ui32FBCStride;	/*!< Pixel stride that the FBC module is working on */
+	IMG_UINT32		ui32Size;		/*!< Size of the buffer to create */
+} DC_FBC_CREATE_INFO;
+
+typedef struct _DC_CREATE_INFO_
+{
+	union {
+		DC_FBC_CREATE_INFO sFBC;
+	} u;
+} DC_CREATE_INFO;
+
+typedef struct _DC_BUFFER_CREATE_INFO_
+{
+	PVRSRV_SURFACE_INFO   	sSurface;	/*!< Surface properies, specificed by user */
+	IMG_UINT32            	ui32BPP;	/*!< Bits per pixel */
+	union {
+		DC_FBC_CREATE_INFO 	sFBC;		/*!< Frame buffer compressed specific data */
+	} u;
+} DC_BUFFER_CREATE_INFO;
+
+#endif /* _DC_COMMON_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/env/linux/ion_sys.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/env/linux/ion_sys.h
new file mode 100644
index 0000000..1e5a60d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/env/linux/ion_sys.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File           ion_sys.c
+@Title          System level interface for Ion
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defined the API between services and system layer
+                required for Ion integration.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _ION_SYS_H_
+#define _ION_SYS_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include PVR_ANDROID_ION_HEADER
+
+
+PVRSRV_ERROR IonInit(void *phPrivateData);
+
+struct ion_device *IonDevAcquire(IMG_VOID);
+
+IMG_VOID IonDevRelease(struct ion_device *psIonDev);
+
+IMG_UINT32 IonPhysHeapID(IMG_VOID);
+
+#if defined(LMA)
+IMG_DEV_PHYADDR IonCPUPhysToDevPhys(IMG_CPU_PHYADDR sCPUPhysAddr,
+									IMG_UINT32 ui32Offset);
+#else
+/* This is a no-op for UMA systems. */
+static inline
+IMG_DEV_PHYADDR IonCPUPhysToDevPhys(IMG_CPU_PHYADDR sCPUPhysAddr,
+									IMG_UINT32 ui32Offset)
+{
+	return (IMG_DEV_PHYADDR){ .uiAddr = sCPUPhysAddr.uiAddr + ui32Offset };
+}
+#endif
+
+IMG_VOID IonDeinit(IMG_VOID);
+
+#endif /* _ION_SYS_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/env/linux/pvr_bridge_io.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/env/linux/pvr_bridge_io.h
new file mode 100644
index 0000000..9ee0993
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/env/linux/pvr_bridge_io.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge IO Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BRIDGE_IO_H__
+#define __PVR_BRIDGE_IO_H__
+
+#include <linux/ioctl.h>
+
+#define PVRSRV_IOC_GID      'g'
+#define PVRSRV_IOWR(INDEX)  _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
+#define PVRSRV_GET_BRIDGE_ID(X)	_IOC_NR(X)
+
+#endif /* __PVR_BRIDGE_IO_H__ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/kerneldisplay.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/kerneldisplay.h
new file mode 100644
index 0000000..4cabd39
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/kerneldisplay.h
@@ -0,0 +1,612 @@
+/*************************************************************************/ /*!
+@File
+@Title          Interface for 3rd party display class (DC) drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API between services and the 3rd party DC driver and vice versa
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__KERNELDISPLAY_H__)
+#define __KERNELDISPLAY_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_surface.h"
+#include "dc_external.h"
+#include "dc_common.h"
+
+/*************************************************************************/ /*!
+@Function       GetInfo
+
+@Description    Query the display controller for its information structure
+
+@Input          hDeviceData             Device private data
+
+@Output         psDisplayInfo           Display info structure
+
+@Return         PVRSRV_OK if the query was successful
+*/
+/*****************************************************************************/
+typedef IMG_VOID (*GetInfo)(IMG_HANDLE hDeviceData,
+							DC_DISPLAY_INFO *psDisplayInfo);
+
+/*************************************************************************/ /*!
+@Function       PanelQueryCount
+
+@Description    Query the display controller for how many panels are
+                contented to it.
+
+@Input          hDeviceData             Device private data
+
+@Output         pui32NumPanels         Number of panels
+
+@Return         PVRSRV_OK if the query was successful
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PanelQueryCount)(IMG_HANDLE hDeviceData,
+										 IMG_UINT32 *ppui32NumPanels);
+
+/*************************************************************************/ /*!
+@Function       PanelQuery
+
+@Description    Query the display controller for information on what panel(s)
+                are connected to it and their properties.
+
+@Input          hDeviceData             Device private data
+
+@Input          ui32PanelsArraySize    Size of the format and dimension
+                                        array size (i.e. number of panels
+                                        that can be returned)
+
+@Output         pui32NumPanels         Number of panels returned
+
+@Output         pasFormat               Array of formats
+
+@Output         pasDims                 Array of dimensions
+
+@Return         PVRSRV_OK if the query was successful
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PanelQuery)(IMG_HANDLE hDeviceData,
+									IMG_UINT32 ui32PanelsArraySize,
+									IMG_UINT32 *pui32NumPanels,
+									PVRSRV_PANEL_INFO *pasPanelInfo);
+
+/*************************************************************************/ /*!
+@Function       FormatQuery
+
+@Description    Query the display controller to see if it supports the specified
+                format(s).
+
+@Input          hDeviceData             Device private data
+
+@Input          ui32NumFormats          Number of formats to check
+
+@Input          pasFormat               Array of formats to check
+
+@Output			pui32Supported          For each format, the number of display
+                                        pipes that support that format
+
+@Return         PVRSRV_OK if the query was successful
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*FormatQuery)(IMG_HANDLE hDeviceData,
+									IMG_UINT32 ui32NumFormats,
+									PVRSRV_SURFACE_FORMAT *pasFormat,
+									IMG_UINT32 *pui32Supported);
+
+/*************************************************************************/ /*!
+@Function       DimQuery
+
+@Description    Query the specificed display plane for the display dimensions
+                it supports.
+
+@Input          hDeviceData             Device private data
+
+@Input          ui32NumDims             Number of dimensions to check
+
+@Input          pasDim                  Array of dimentations to check
+
+@Output         pui32Supported          For each dimension, the number of
+                                        display pipes that support that
+                                         dimension
+
+@Return         PVRSRV_OK if the query was successful
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*DimQuery)(IMG_HANDLE hDeviceData,
+								 IMG_UINT32 ui32NumDims,
+								 PVRSRV_SURFACE_DIMS *psDim,
+								 IMG_UINT32 *pui32Supported);
+
+
+/*************************************************************************/ /*!
+@Function       SetBlank
+
+@Description    Enable/disable blanking of the screen
+
+@Input          psConnection            Services connection
+
+@Input          hDevice                 3rd party display class device
+
+@Input          bEnabled                Enable/Disable the blanking
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*SetBlank)(IMG_HANDLE hDeviceData,
+								 IMG_BOOL bEnabled);
+
+/*************************************************************************/ /*!
+@Function       SetVSyncReporting
+
+@Description    Enable VSync reporting by trigger the global event object on
+                every vsync happened.
+
+@Input          psConnection            Services connection
+
+@Input          hDevice                 3rd party display class device
+
+@Input          bEnabled                Enable/Disable the reporting
+
+@Return         PVRSRV_OK on success
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*SetVSyncReporting)(IMG_HANDLE hDeviceData,
+										  IMG_BOOL bEnabled);
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDCLastVSyncQuery
+
+@Description    Query the time the last vsync happened.
+
+@Input          psConnection            Services connection
+
+@Input          hDevice                 3rd party display class device
+
+@Output         pi64Timestamp           the requested timestamp
+
+@Return         PVRSRV_OK if the query was successful
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*LastVSyncQuery)(IMG_HANDLE hDeviceData,
+									   IMG_INT64 *pi64Timestamp);
+
+typedef PVRSRV_ERROR (*BufferSystemAcquire)(IMG_HANDLE hDeviceData,
+											IMG_DEVMEM_LOG2ALIGN_T *puiLog2PageSize,
+											IMG_UINT32 *pui32PageCount,
+											IMG_UINT32 *pui32PhysHeapID,
+											IMG_UINT32 *pui32ByteStride,
+											IMG_HANDLE *phSystemBuffer);
+
+typedef	IMG_VOID (*BufferSystemRelease)(IMG_HANDLE hSystemBuffer);
+
+/*************************************************************************/ /*!
+@Function       ContextCreate
+
+@Description    Create display context.
+
+@Input          hDeviceData             Device private data
+
+@Output         hDisplayContext         Created display context
+
+@Return         PVRSRV_OK if the context was created
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*ContextCreate)(IMG_HANDLE hDeviceData,
+									  IMG_HANDLE *hDisplayContext);
+
+/*************************************************************************/ /*!
+@Function       ContextConfigureCheck
+
+@Description    Check to see if a configuration is valid for the display
+                controller.
+
+                Note: This function is optional
+
+@Input          hDisplayContext         Display context
+
+@Input          ui32PipeCount           Number of display pipes to configure
+
+@Input          pasSurfAttrib           Array of surface attributes (one for
+                                        each display plane)
+
+@Input          ahBuffers               Array of buffers (one for
+                                        each display plane)
+
+@Return         PVRSRV_OK if the configuration is valid
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*ContextConfigureCheck)(IMG_HANDLE hDisplayContext,
+											  IMG_UINT32 ui32PipeCount,
+											  PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+											  IMG_HANDLE *ahBuffers);
+
+/*************************************************************************/ /*!
+@Function       ContextConfigure
+
+@Description    Configuration the display pipeline.
+
+@Input          hDisplayContext         Display context
+
+@Input          ui32PipeCount           Number of display pipes to configure
+
+@Input          pasSurfAttrib           Array of surface attributes (one for
+                                        each display plane)
+
+@Input          ahBuffers               Array of buffers (one for
+                                        each display plane)
+
+@Input          ui32DisplayPeriod		The number of VSync periods this
+                                        configuration should be displayed for
+
+@Input          hConfigData             Config handle which gets passed to
+                                        DisplayConfigurationRetired when this
+                                        configuration is retired
+
+@Return         PVRSRV_OK if the configuration was successfully queued
+*/
+/*****************************************************************************/
+typedef IMG_VOID (*ContextConfigure)(IMG_HANDLE hDisplayContext,
+									 IMG_UINT32 ui32PipeCount,
+									 PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+									 IMG_HANDLE *ahBuffers,
+									 IMG_UINT32 ui32DisplayPeriod,
+									 IMG_HANDLE hConfigData);
+
+/*************************************************************************/ /*!
+@Function       ContextDestroy
+
+@Description    Destroy a display context.
+
+@Input          hDisplayContext         Display context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef IMG_VOID (*ContextDestroy)(IMG_HANDLE hDisplayContext);
+
+/*************************************************************************/ /*!
+@Function       BufferAlloc
+
+@Description    Allocate a display buffer. This is a request to the display
+                controller to allocate a buffer from memory that is addressable
+                by the display controller.
+
+                Note: The actual allocation of display memory can be deferred
+                until the first call to acquire, but the handle for the buffer
+                still needs to be created and returned to the caller as well
+                as some information about the buffer that's required upfront.
+
+@Input          hDisplayContext         Display context this buffer will be
+                                        used on
+
+@Input          psSurfInfo              Attributes of the buffer
+
+@Output         puiLog2PageSize         Log2 of the pagesize of the buffer
+
+@Output         pui32PageCount          Number of pages in the buffer
+
+@Output         pui32PhysHeapID         Physcial heap ID to use
+
+@Output         pui32ByteStride         Stride (in bytes) of allocated buffer
+
+@Output         phBuffer                Handle to allocated buffer
+
+@Return         PVRSRV_OK if the buffer was successfully allocated
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*BufferAlloc)(IMG_HANDLE hDisplayContext,
+									DC_BUFFER_CREATE_INFO *psSurfInfo,
+									IMG_DEVMEM_LOG2ALIGN_T *puiLog2PageSize,
+									IMG_UINT32 *pui32PageCount,
+									IMG_UINT32 *pui32PhysHeapID,
+									IMG_UINT32 *pui32ByteStride,
+									IMG_HANDLE *phBuffer);
+
+/*************************************************************************/ /*!
+@Function       BufferImport
+
+@Description    Import memory allocated from an external source to the display
+                controller. The DC checks to see if the import is compatible
+                and potentially sets up HW to map the imported buffer, although
+                this isn't require to happen until the first call to DCBufferMap
+
+                Note: This is optional
+
+@Input          hDisplayContext         Display context this buffer will be
+                                        used on
+
+@Input          ui32NumPlanes           Number of planes
+
+@Input          pahImport               Array of handles (one per colour channel)
+
+@Input          psSurfAttrib            Surface attributes of the buffer
+
+@Output         phBuffer                Handle to imported buffer
+
+@Return         PVRSRV_OK if the buffer was successfully imported
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*BufferImport)(IMG_HANDLE hDisplayContext,
+									 IMG_UINT32 ui32NumPlanes,
+									 IMG_HANDLE **paphImport,
+									 DC_BUFFER_IMPORT_INFO *psSurfAttrib,
+									 IMG_HANDLE *phBuffer);
+
+/*************************************************************************/ /*!
+@Function       BufferAcquire
+
+@Description    Acquire the buffer's physcial memory pages. If the buffer doesn't
+                have any memory backing it yet then this will trigger the 3rd
+                party driver to allocate it.
+
+                Note: The page count isn't passed back in this function as
+                services has already obtained it during BufferAlloc.
+
+@Input          hBuffer                 Handle to the buffer
+
+@Output         pasDevPAddr             Array of device physical page address
+                                        of this buffer
+
+@Output         pvLinAddr               CPU virtual address of buffer. This is
+                                        optionial but if you have one you must
+                                        return it otherwise return NULL.
+
+@Return         PVRSRV_OK if the buffer was successfully acquired
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*BufferAcquire)(IMG_HANDLE hBuffer,
+									  IMG_DEV_PHYADDR *pasDevPAddr,
+									  IMG_PVOID *ppvLinAddr);
+
+/*************************************************************************/ /*!
+@Function       BufferRelease
+
+@Description    Release the buffer's physcial memory pages.
+
+@Input          hBuffer                 Handle to the buffer
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef IMG_VOID (*BufferRelease)(IMG_HANDLE hBuffer);
+
+/*************************************************************************/ /*!
+@Function       BufferFree
+
+@Description    Release a reference to the device buffer. If this was the last
+                reference the 3rd party driver is entitled to free the backing
+                memory.
+
+@Input          hBuffer                 Buffer handle we're releasing
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef IMG_VOID (*BufferFree)(IMG_HANDLE hBuffer);
+
+/*************************************************************************/ /*!
+@Function       BufferMap
+
+@Description    Map the buffer into the display controller
+
+                Note: This function is optional
+
+@Input          hBuffer                 Buffer to map
+
+@Return         PVRSRV_OK if the buffer was successfully mapped
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*BufferMap)(IMG_HANDLE hBuffer);
+
+/*************************************************************************/ /*!
+@Function       BufferUnmap
+
+@Description    Unmap a buffer from the display controller
+
+                Note: This function is optional
+
+@Input          hBuffer                 Buffer to unmap
+
+@Return         None
+*/
+/*****************************************************************************/
+typedef IMG_VOID (*BufferUnmap)(IMG_HANDLE hBuffer);
+
+
+/*
+	Function table for server->display
+*/
+typedef struct _DC_DEVICE_FUNCTIONS_
+{
+	/*! Mandatory query functions */
+	GetInfo						pfnGetInfo;
+	PanelQueryCount				pfnPanelQueryCount;
+	PanelQuery					pfnPanelQuery;
+	FormatQuery					pfnFormatQuery;
+	DimQuery					pfnDimQuery;
+
+	/*! Optional blank/vsync function */
+	SetBlank		            pfnSetBlank;
+	SetVSyncReporting		    pfnSetVSyncReporting;
+	LastVSyncQuery				pfnLastVSyncQuery;
+
+	/*! Mandatory configure function */
+	ContextCreate				pfnContextCreate;
+	ContextDestroy				pfnContextDestroy;
+	ContextConfigure			pfnContextConfigure;
+
+	/*! Optional context function */
+	ContextConfigureCheck		pfnContextConfigureCheck;
+
+	/*! Mandatory buffer functions */
+	BufferAlloc					pfnBufferAlloc;
+	BufferAcquire				pfnBufferAcquire;
+	BufferRelease				pfnBufferRelease;
+	BufferFree					pfnBufferFree;
+
+	/*! Optional buffer functions, pfnBufferMap and pfnBufferUnmap are paired
+		functions, provide both or neither */
+	BufferImport				pfnBufferImport;
+	BufferMap					pfnBufferMap;
+	BufferUnmap					pfnBufferUnmap;
+	BufferSystemAcquire			pfnBufferSystemAcquire;
+	BufferSystemRelease			pfnBufferSystemRelease;
+} DC_DEVICE_FUNCTIONS;
+
+
+/*
+	functions exported by kernel services for use by 3rd party kernel display
+	class device driver
+*/
+
+/*************************************************************************/ /*!
+@Function       DCRegisterDevice
+
+@Description    Register a display class device
+
+@Input          psFuncTable             Callback function table
+
+@Input          ui32MaxConfigsInFlight  The maximum number of configs that this
+                                        display device can have in-flight.
+
+@Input          hDeviceData             3rd party device handle, passed into
+                                        DC callbacks
+
+@Output         phSrvHandle             Services handle to pass back into
+                                        UnregisterDCDevice
+
+@Return         PVRSRV_OK if the display class driver was successfully registered
+*/
+/*****************************************************************************/
+PVRSRV_ERROR DCRegisterDevice(DC_DEVICE_FUNCTIONS *psFuncTable,
+							  IMG_UINT32 ui32MaxConfigsInFlight,
+							  IMG_HANDLE hDeviceData,
+							  IMG_HANDLE *phSrvHandle);
+
+/*************************************************************************/ /*!
+@Function       DCUnregisterDevice
+
+@Description    Unregister a display class device
+
+@Input          hDevice                Services device handle
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID DCUnregisterDevice(IMG_HANDLE hSrvHandle);
+
+/*************************************************************************/ /*!
+@Function       DCDisplayConfigurationRetired
+
+@Description    Called when a configuration as been retired due to a new
+                configuration now being active.
+
+@Input          hConfigData             ConfigData that is being retired
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID DCDisplayConfigurationRetired(IMG_HANDLE hConfigData);
+
+/*************************************************************************/ /*!
+@Function       DCDisplayHasPendingCommand
+
+@Description    Called to check if there are still pending commands in
+                the Software Command Processor queue.
+
+@Input          hConfigData             ConfigData to check for pending
+                                        commands
+
+@Return         IMG_TRUE if there is at least one pending command
+*/
+/*****************************************************************************/
+IMG_BOOL DCDisplayHasPendingCommand(IMG_HANDLE hConfigData);
+
+/*************************************************************************/ /*!
+@Function       DCImportBufferAcquire
+
+@Description    Acquire information about a buffer that was imported with
+                BufferImport.
+
+@Input          hImport                 Import buffer
+
+@Input          uiLog2PageSize          Log 2 of the DC's page size
+
+@Output         pui32PageCount          Size of the buffer in pages
+
+@Output         ppasDevPAddr            Array of device physcial page address
+                                        of this buffer
+
+@Return         PVRSRV_OK if the import buffer was successfully acquired
+*/
+/*****************************************************************************/
+PVRSRV_ERROR DCImportBufferAcquire(IMG_HANDLE hImport,
+								   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+								   IMG_UINT32 *pui32PageCount,
+								   IMG_DEV_PHYADDR **ppasDevPAddr);
+
+/*************************************************************************/ /*!
+@Function       DCImportBufferRelease
+
+@Description    Release an imported buffer.
+
+@Input          hImport                 Import handle we're releasing
+
+@Input          pasDevPAddr             Import data was returned from
+                                        DCImportBufferAcquire
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID DCImportBufferRelease(IMG_HANDLE hImport,
+							   IMG_DEV_PHYADDR *pasDevPAddr);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif/* #if !defined (__KERNELDISPLAY_H__) */
+
+/******************************************************************************
+ End of file (kerneldisplay.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/mm_common.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/mm_common.h
new file mode 100644
index 0000000..8478434
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/mm_common.h
@@ -0,0 +1,50 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common memory management definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common memory management definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef MM_COMMON_H
+#define MM_COMMON_H
+
+#define DEVICEMEM_HISTORY_TEXT_BUFSZ 40
+
+#endif
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/pdump.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pdump.h
new file mode 100644
index 0000000..9467c5d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pdump.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _SERVICES_PDUMP_H_
+#define _SERVICES_PDUMP_H_
+
+#include "img_types.h"
+
+typedef IMG_UINT32 PDUMP_FLAGS_T;
+
+
+#define PDUMP_FLAGS_DEINIT		    0x20000000UL /*<! Output this entry to the de-initialisation section */
+
+#define PDUMP_FLAGS_POWER		0x08000000UL /*<! Output this entry even when a power transition is ongoing */
+
+#define PDUMP_FLAGS_CONTINUOUS		0x40000000UL /*<! Output this entry always regardless of framed capture range,
+                                                      used by client applications being dumped. */
+#define PDUMP_FLAGS_PERSISTENT		0x80000000UL /*<! Output this entry always regardless of app and range,
+                                                      used by persistent processes e.g. compositor, window mgr etc/ */
+
+#define PDUMP_FLAGS_DEBUG			0x00010000U  /*<! For internal debugging use */
+
+#define PDUMP_FLAGS_NOHW			0x00000001U  /* For internal use: Skip sending instructions to the hardware */ 
+
+#define PDUMP_FILEOFFSET_FMTSPEC "0x%08X"
+typedef IMG_UINT32 PDUMP_FILEOFFSET_T;
+
+#define PDUMP_PARAM_CHANNEL_NAME  "ParamChannel2"
+#define PDUMP_SCRIPT_CHANNEL_NAME "ScriptChannel2"
+
+#define PDUMP_CHANNEL_PARAM		0
+#define PDUMP_CHANNEL_SCRIPT	1
+#define PDUMP_NUM_CHANNELS      2
+
+#define PDUMP_PARAM_0_FILE_NAME "%%0%%.prm"
+#define PDUMP_PARAM_N_FILE_NAME "%%0%%_%02u.prm"
+
+
+#endif /* _SERVICES_PDUMP_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/physheap.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/physheap.h
new file mode 100644
index 0000000..f600a5b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/physheap.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physcial heap management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the interface for the physical heap management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#ifndef _PHYSHEAP_H_
+#define _PHYSHEAP_H_
+
+typedef struct _PHYS_HEAP_ PHYS_HEAP;
+
+typedef IMG_VOID (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
+									   IMG_UINT32 ui32NumOfAddr,
+									   IMG_DEV_PHYADDR *psDevPAddr,
+									   IMG_CPU_PHYADDR *psCpuPAddr);
+
+typedef IMG_VOID (*DevPAddrToCpuPAddr)(IMG_HANDLE hPrivData,
+									   IMG_UINT32 ui32NumOfAddr,
+									   IMG_CPU_PHYADDR *psCpuPAddr,
+									   IMG_DEV_PHYADDR *psDevPAddr);
+
+typedef struct _PHYS_HEAP_FUNCTIONS_
+{
+	/*! Translate CPU physical address to device physical address */
+	CpuPAddrToDevPAddr	pfnCpuPAddrToDevPAddr;
+	/*! Translate device physical address to CPU physical address */
+	DevPAddrToCpuPAddr	pfnDevPAddrToCpuPAddr;
+} PHYS_HEAP_FUNCTIONS;
+
+typedef enum _PHYS_HEAP_TYPE_
+{
+	PHYS_HEAP_TYPE_UNKNOWN = 0,
+	PHYS_HEAP_TYPE_UMA,
+	PHYS_HEAP_TYPE_LMA,
+} PHYS_HEAP_TYPE;
+
+typedef struct _PHYS_HEAP_CONFIG_
+{
+	IMG_UINT32				ui32PhysHeapID;
+	PHYS_HEAP_TYPE			eType;
+	/*
+		Note:
+		sStartAddr and uiSize are only required for LMA heaps
+	*/
+	IMG_CPU_PHYADDR			sStartAddr;
+	IMG_UINT64				uiSize;
+	IMG_CHAR				*pszPDumpMemspaceName;
+	PHYS_HEAP_FUNCTIONS		*psMemFuncs;
+	IMG_HANDLE				hPrivData;
+} PHYS_HEAP_CONFIG;
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+							  PHYS_HEAP **ppsPhysHeap);
+
+IMG_VOID PhysHeapUnregister(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+							 PHYS_HEAP **ppsPhysHeap);
+
+IMG_VOID PhysHeapRelease(PHYS_HEAP *psPhysHeap);
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapGetAddress(PHYS_HEAP *psPhysHeap,
+								IMG_CPU_PHYADDR *psCpuPAddr);
+
+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
+						     IMG_UINT64 *puiSize);
+
+IMG_VOID PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+									IMG_UINT32 ui32NumOfAddr,
+									IMG_DEV_PHYADDR *psDevPAddr,
+									IMG_CPU_PHYADDR *psCpuPAddr);
+IMG_VOID PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+									IMG_UINT32 ui32NumOfAddr,
+									IMG_CPU_PHYADDR *psCpuPAddr,
+									IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapInit(IMG_VOID);
+PVRSRV_ERROR PhysHeapDeinit(IMG_VOID);
+
+#endif /* _PHYSHEAP_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_bridge.h
new file mode 100644
index 0000000..4b704f3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_bridge.h
@@ -0,0 +1,305 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BRIDGE_H__
+#define __PVR_BRIDGE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_error.h"
+#include "cache_defines.h"
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "common_dc_bridge.h"
+#endif
+#include "common_mm_bridge.h"
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#include "common_mmplat_bridge.h"
+#endif
+#include "common_cmm_bridge.h"
+#include "common_pdumpmm_bridge.h"
+#include "common_pdump_bridge.h"
+#if defined(SUPPORT_ION)
+#include "common_dmabuf_bridge.h"
+#endif
+#include "common_pdumpctrl_bridge.h"
+#include "common_srvcore_bridge.h"
+#include "common_sync_bridge.h"
+#if defined(SUPPORT_INSECURE_EXPORT)
+#include "common_syncexport_bridge.h"
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_syncsexport_bridge.h"
+#endif
+#if (CACHEFLUSH_TYPE == CACHEFLUSH_GENERIC)
+#include "common_cachegeneric_bridge.h"
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_smm_bridge.h"
+#endif
+#include "common_pvrtl_bridge.h"
+#if defined(PVR_RI_DEBUG)
+#include "common_ri_bridge.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION)
+#include "common_validation_bridge.h"
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+#include "common_tutils_bridge.h"
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "common_devicememhistory_bridge.h"
+#endif
+
+/* 
+ * Bridge Cmd Ids
+ */
+
+
+/* Note: The pattern
+ *   #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1)
+ *   #if defined(SUPPORT_FEATURE)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST	(PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1)
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST	(PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST)
+ *   #else
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST	0
+ *   #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST	(PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST)
+ *   #endif
+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_*
+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled.
+ *
+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where 
+ * the feature is not enabled (each bridge group retains its own ioctl number).
+ */
+
+/*   0: CORE functions  */
+#define PVRSRV_BRIDGE_SRVCORE					0UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST	0UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST		(PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST)
+
+/*   1: SYNC functions  */
+#define PVRSRV_BRIDGE_SYNC					1UL
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST	(PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST)
+
+/*   2: SYNCEXPORT functions  */
+#define PVRSRV_BRIDGE_SYNCEXPORT			2UL
+#if defined(SUPPORT_INSECURE_EXPORT)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST	(PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST	(PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+#endif
+
+/*   3: SYNCSEXPORT functions  */
+#define PVRSRV_BRIDGE_SYNCSEXPORT		    3UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST	 (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCSEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST	 (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST)
+#endif
+
+/*   4: PDUMP CTRL layer functions*/
+#define PVRSRV_BRIDGE_PDUMPCTRL				4UL
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST	(PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST)
+
+/*   5: Memory Management functions */
+#define PVRSRV_BRIDGE_MM      				5UL
+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST		(PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST		(PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST)
+
+/*   6: Non-Linux Memory Management functions */
+#define PVRSRV_BRIDGE_MMPLAT          		6UL
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST	(PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST	(PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST	(PVRSRV_BRIDGE_MM_DISPATCH_LAST)
+#endif
+
+/*   7: Context Memory Management functions */
+#define PVRSRV_BRIDGE_CMM      				7UL
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST	(PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST		(PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST)
+
+/*   8: PDUMP Memory Management functions */
+#define PVRSRV_BRIDGE_PDUMPMM      			8UL
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST	 (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST)
+
+/*   9: PDUMP functions */
+#define PVRSRV_BRIDGE_PDUMP      			9UL
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST)
+
+/*  10: DMABUF functions */
+#define PVRSRV_BRIDGE_DMABUF					10UL
+#if defined(SUPPORT_ION)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST	(PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST	(PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST)
+#endif
+
+/*  11: Display Class functions */
+#define PVRSRV_BRIDGE_DC						11UL
+#if defined(SUPPORT_DISPLAY_CLASS)
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST     (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST		(PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST     0
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST		(PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST)
+#endif
+
+/*  12: Generic cache interface functions*/
+#define PVRSRV_BRIDGE_CACHEGENERIC			12UL
+#if (CACHEFLUSH_TYPE == CACHEFLUSH_GENERIC)
+#define PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_LAST  (PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHEGENERIC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_LAST  (PVRSRV_BRIDGE_DC_DISPATCH_LAST)
+#endif
+
+/*  13: Secure Memory Management functions*/
+#define PVRSRV_BRIDGE_SMM					13UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST   (PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  	(PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST   0
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST  	(PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_LAST)
+#endif
+
+/*  14: Transport Layer interface functions */
+#define PVRSRV_BRIDGE_PVRTL					14UL
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST  (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST  	(PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST)
+
+/*  15: Resource Information (RI) interface functions */
+#define PVRSRV_BRIDGE_RI						15UL
+#if defined(PVR_RI_DEBUG)
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST     (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  	(PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST     0
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST  	(PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST)
+#endif
+
+/*  16: Validation interface functions */
+#define PVRSRV_BRIDGE_VALIDATION				16UL
+#if defined(SUPPORT_VALIDATION)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0 
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST  (PVRSRV_BRIDGE_RI_DISPATCH_LAST)
+#endif
+
+/*  17: TUTILS interface functions */
+#define PVRSRV_BRIDGE_TUTILS					17UL
+#if defined(PVR_TESTING_UTILS)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST  (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)
+#endif
+
+/*  18: DevMem history interface functions */
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY		18UL
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST  (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST  (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)
+#endif
+
+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */
+#define PVRSRV_BRIDGE_LAST       			(PVRSRV_BRIDGE_DEVICEMEMHISTORY)
+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */
+#define PVRSRV_BRIDGE_DISPATCH_LAST			(PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)
+
+
+/******************************************************************************
+ * Generic bridge structures 
+ *****************************************************************************/
+
+
+/******************************************************************************
+ *	bridge packaging structure
+ *****************************************************************************/
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+	IMG_UINT32				ui32BridgeID;			/*!< ioctl bridge group */
+	IMG_UINT32				ui32FunctionID;         /*!< ioctl function index */
+	IMG_UINT32				ui32Size;				/*!< size of structure */
+	IMG_VOID				*pvParamIn;				/*!< input data buffer */ 
+	IMG_UINT32				ui32InBufferSize;		/*!< size of input data buffer */
+	IMG_VOID				*pvParamOut;			/*!< output data buffer */
+	IMG_UINT32				ui32OutBufferSize;		/*!< size of output data buffer */
+}PVRSRV_BRIDGE_PACKAGE;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (pvr_bridge.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_bridge_client.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_bridge_client.h
new file mode 100644
index 0000000..072b01d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_bridge_client.h
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BRIDGE_U_H__
+#define __PVR_BRIDGE_U_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+/******************************************************************************
+ * Function prototypes 
+ *****************************************************************************/
+#if(__SIZEOF_POINTER__ == 4)
+IMG_EXPORT IMG_BOOL PVRSRVIsKernel64Bit(void);
+#endif
+
+PVRSRV_ERROR OpenServices(IMG_HANDLE *phServices, IMG_UINT32 ui32SrvFlags);
+PVRSRV_ERROR CloseServices(IMG_HANDLE hServices);
+PVRSRV_ERROR PVRSRVBridgeCall(IMG_HANDLE hServices,
+							  IMG_UINT8	ui8BridgeGroup,
+							  IMG_UINT32 ui32FunctionID,
+							  IMG_VOID *pvParamIn,
+							  IMG_UINT32 ui32InBufferSize,
+							  IMG_VOID *pvParamOut,
+							  IMG_UINT32 ui32OutBufferSize);
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* __PVR_BRIDGE_U_H__ */
+
+/******************************************************************************
+ End of file (pvr_bridge_u.h)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_drm_display.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_drm_display.h
new file mode 100644
index 0000000..d122142
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_drm_display.h
@@ -0,0 +1,201 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_DISPLAY_H__)
+#define __PVR_DRM_DISPLAY_H__
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0))
+#include <drm/drm_gem.h>
+#endif
+
+#define	PVR_DRM_MAKENAME_HELPER(x, y)	x ## y
+#define	PVR_DRM_MAKENAME(x, y)		PVR_DRM_MAKENAME_HELPER(x, y)
+
+typedef irqreturn_t (*pvr_drm_irq_handler)(void *data);
+
+#if defined(SUPPORT_DRM_DC_MODULE)
+struct pvr_drm_display_buffer;
+struct pvr_drm_flip_data;
+
+typedef void (*pvr_drm_flip_func)(struct drm_gem_object *bo, void *data, struct pvr_drm_flip_data *flip_data);
+
+struct pvr_drm_device_funcs
+{
+	void *(*pvr_drm_get_display_device)(struct drm_device *dev);
+	int (*pvr_drm_display_irq_install)(struct drm_device *dev, unsigned int irq, pvr_drm_irq_handler handler, void **irq_handle_out);
+	int (*pvr_drm_display_irq_uninstall)(void *irq_handle);
+	int (*pvr_drm_gem_create)(struct drm_device *dev, size_t size, struct drm_gem_object **bo);
+	int (*pvr_drm_gem_map)(struct drm_gem_object *bo);
+	int (*pvr_drm_gem_unmap)(struct drm_gem_object *bo);
+	int (*pvr_drm_gem_cpu_addr)(struct drm_gem_object *bo, off_t offset, uint64_t *cpu_addr_out);
+	int (*pvr_drm_gem_dev_addr)(struct drm_gem_object *bo, off_t offset, uint64_t *dev_addr_out);
+	struct pvr_drm_display_buffer *(*pvr_drm_gem_buffer)(struct drm_gem_object *bo);
+	int (*pvr_drm_flip_schedule)(struct drm_gem_object *bo, pvr_drm_flip_func flip_cb, void *data);
+	int (*pvr_drm_flip_done)(struct pvr_drm_flip_data *flip_data);
+	int (*pvr_drm_heap_acquire)(uint32_t heap_id, void **heap_out);
+	void (*pvr_drm_heap_release)(void *heap);
+	int (*pvr_drm_heap_info)(void *heap, uint64_t *cpu_phys_base, uint64_t *dev_phys_base, size_t *size);
+};
+
+static inline void *pvr_drm_get_display_device(struct drm_device *dev)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_get_display_device(dev);
+}
+
+static inline int pvr_drm_irq_install(struct drm_device *dev, unsigned int irq, pvr_drm_irq_handler handler, void **irq_handle_out)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_display_irq_install(dev, irq, handler, irq_handle_out);
+}
+
+static inline int pvr_drm_irq_uninstall(struct drm_device *dev, void *irq_handle)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_display_irq_uninstall(irq_handle);
+}
+
+static inline int pvr_drm_gem_create(struct drm_device *dev, size_t size, struct drm_gem_object **bo)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_gem_create(dev, size, bo);
+}
+
+static inline int pvr_drm_gem_map(struct drm_gem_object *bo)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)bo->dev->dev_private;
+
+	return funcs->pvr_drm_gem_map(bo);
+}
+
+static inline int pvr_drm_gem_unmap(struct drm_gem_object *bo)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)bo->dev->dev_private;
+
+	return funcs->pvr_drm_gem_unmap(bo);	
+}
+
+static inline int pvr_drm_gem_cpu_addr(struct drm_gem_object *bo, off_t offset, uint64_t *cpu_addr_out)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)bo->dev->dev_private;
+
+	return funcs->pvr_drm_gem_cpu_addr(bo, offset, cpu_addr_out);
+}
+
+static inline int pvr_drm_gem_dev_addr(struct drm_gem_object *bo, off_t offset, uint64_t *dev_addr_out)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)bo->dev->dev_private;
+
+	return funcs->pvr_drm_gem_dev_addr(bo, offset, dev_addr_out);
+}
+
+static inline struct pvr_drm_display_buffer *pvr_drm_gem_buffer(struct drm_gem_object *bo)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)bo->dev->dev_private;
+
+	return funcs->pvr_drm_gem_buffer(bo);
+}
+
+static inline int pvr_drm_flip_schedule(struct drm_gem_object *bo, pvr_drm_flip_func flip_cb, void *data)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)bo->dev->dev_private;
+
+	return funcs->pvr_drm_flip_schedule(bo, flip_cb, data);
+}
+
+static inline int pvr_drm_flip_done(struct drm_device *dev, struct pvr_drm_flip_data *flip_data)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_flip_done(flip_data);
+}
+
+static inline int pvr_drm_heap_acquire(struct drm_device *dev, uint32_t heap_id, void **heap_out)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_heap_acquire(heap_id, heap_out);
+}
+
+static inline void pvr_drm_heap_release(struct drm_device *dev, void *heap)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	funcs->pvr_drm_heap_release(heap);
+}
+
+static inline int pvr_drm_heap_info(struct drm_device *dev, void *heap, uint64_t *cpu_phys_base, uint64_t *dev_phys_base, size_t *size)
+{
+	struct pvr_drm_device_funcs *funcs = (struct pvr_drm_device_funcs *)dev->dev_private;
+
+	return funcs->pvr_drm_heap_info(heap, cpu_phys_base, dev_phys_base, size);
+}
+
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _init)(struct drm_device *dev, void **display_priv_out);
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _configure)(void *display_priv);
+void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _cleanup)(void *display_priv);
+
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _buffer_alloc)(void *display_priv, size_t size, struct pvr_drm_display_buffer **buffer_out);
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _buffer_free)(struct pvr_drm_display_buffer *buffer);
+uint64_t *PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _buffer_acquire)(struct pvr_drm_display_buffer *buffer);
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _buffer_release)(struct pvr_drm_display_buffer *buffer, uint64_t *dev_paddr_array);
+void *PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _buffer_vmap)(struct pvr_drm_display_buffer *buffer);
+void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _buffer_vunmap)(struct pvr_drm_display_buffer *buffer, void *vaddr);
+
+u32 PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _get_vblank_counter)(void *display_priv, int crtc);
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _enable_vblank)(void *display_priv, int crtc);
+int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _disable_vblank)(void *display_priv, int crtc);
+
+#elif defined(SUPPORT_DISPLAY_CLASS)
+
+extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *dev);
+extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *dev);
+
+#endif /* defined(SUPPORT_DRM_DC_MODULE) */
+
+#endif /* !defined(__PVR_DRM_DISPLAY_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_tl.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_tl.h
new file mode 100644
index 0000000..8e9e7d8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_tl.h
@@ -0,0 +1,192 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer UM Client API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    User mode Transport layer API for clients.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+ 
+#ifndef __PVR_TL_H__
+#define __PVR_TL_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "services.h"
+
+#include "pvr_tlcommon.h"
+
+
+/**************************************************************************/ /*!
+ @Function		PVRSRVTLConnect
+ @Description	Initialise connection to Services kernel server transport layer
+ @Output		ppsConnection	Address of a pointer to a connection object
+ @Return        PVRSRV_ERROR:	for system error codes
+*/ /***************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVTLConnect(PVRSRV_CONNECTION **ppsConnection);
+
+
+/**************************************************************************/ /*!
+ @Function		PVRSRVTLDisconnect
+ @Description	Disconnect from the Services kernel server transport layer
+ @Input			psConnection	Pointer to connection object as returned from
+ 	 	 	 	 	 	 	 	PVRSRVTLConnect()
+ @Return        PVRSRV_ERROR:	for system error codes
+*/ /***************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVTLDisconnect(PVRSRV_CONNECTION* psConnection);
+
+
+/**************************************************************************/ /*!
+ @Function		PVRSRVTLOpenStream
+ @Description	Open a descriptor onto an existing PVR transport stream. If
+				the stream does not exist it returns a NOT_FOUND error unless
+				the OPEN_WAIT flag is supplied. In this case it will wait for
+				the stream to be created. If it is not created in the wait
+				period a TIMEOUT error is returned.
+ @Input			psConnection	Address of a pointer to a connection object
+ @Input			pszName			Address of the stream name string, no longer
+ 	 	 	 	 	 	 	 	than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input			ui32Mode    Flags defined in pvr_tlcommon.h
+							ACQUIRE_NONBLOCKING: Results in non-blocking reads
+							    on stream. Reads are blocking by default
+                            OPEN_WAIT: Causes open to wait for a brief moment
+                                if the stream does not exist
+ @Output		phSD		Address of a pointer to an stream object
+ @Return 		PVRSRV_ERROR_NOT_FOUND:        when named stream not found
+ @Return		PVRSRV_ERROR_ALREADY_OPEN:     stream already open by another
+ @Return		PVRSRV_ERROR_STREAM_ERROR:     internal driver state error
+ @Return        PVRSRV_ERROR_TIMEOUT:          block timed out, stream not found
+ @Return		PVRSRV_ERROR:			       for other system codes
+*/ /***************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVTLOpenStream(PVRSRV_CONNECTION* psConnection,
+		IMG_PCHAR    pszName,
+		IMG_UINT32   ui32Mode,
+		PVRSRVTL_SD* phSD);
+
+
+/**************************************************************************/ /*!
+ @Function		PVRSRVTLCloseStream
+ @Description	Close and release the stream connection to Services kernel
+				server transport layer. Any outstanding Acquire will be
+				released.
+ @Input			psConnection	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to close
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle is not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	  internal driver state error
+ @Return		PVRSRV_ERROR:				  for system codes
+*/ /***************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVTLCloseStream(PVRSRV_CONNECTION* psConnection,
+		PVRSRVTL_SD hSD);
+
+
+
+/****************************************************************************
+ * Stream Buffer Data retrieval API(s)
+ * 
+ * The client must ensure their use of this acquire/release API for a single 
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client’s
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ ****************************************************************************/
+
+/**************************************************************************/ /*!
+ @Function		PVRSRVTLAcquireData
+ @Description	When there is data available in the stream buffer this call
+ 	 	 	 	returns with the address and length of the data buffer the
+ 	 	 	 	client can safely read. This buffer may contain one or more
+ 	 	 	 	packets of data.
+ 	 	 	 	If no data is available then this call blocks until it becomes
+ 	 	 	 	available. However if the stream has been destroyed while
+ 	 	 	 	waiting then a resource unavailable error will be returned
+ 	 	 	 	to the caller. Clients must pair this call with a
+ 	 	 	 	ReleaseData call.
+ @Input			psConnection	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Output		ppPacketBuf		Address of a pointer to an byte buffer. On exit
+								pointer contains address of buffer to read from
+ @Output		puiBufLen		Pointer to an integer. On exit it is the size
+								of the data to read from the packet buffer
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:     when SD handle not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	       internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				   release not called beforehand
+ @Return        PVRSRV_ERROR_TIMEOUT:              block timed out, no data
+ @Return		PVRSRV_ERROR:					   for other system codes
+*/ /***************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVTLAcquireData(PVRSRV_CONNECTION* psConnection,
+		PVRSRVTL_SD hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* puiBufLen);
+
+
+/**************************************************************************/ /*!
+ @Function		PVRSRVTLReleaseData
+ @Description	Called after client has read the stream data out of the buffer
+ 	 	 	 	The data is subsequently flushed from the stream buffer to make
+ 	 	 	 	room for more data packets from the stream source.
+ @Input			psConnection	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:   when SD handle not known to TL
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	     internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				 acquire not called beforehand
+ @Return		PVRSRV_ERROR:	                 for system codes
+*/ /***************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVTLReleaseData(PVRSRV_CONNECTION* psConnection,
+		PVRSRVTL_SD hSD);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_TL_H__ */
+
+/******************************************************************************
+ End of file (pvr_tl.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_tlcommon.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_tlcommon.h
new file mode 100644
index 0000000..63c31b2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/pvr_tlcommon.h
@@ -0,0 +1,193 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services Transport Layer common types and definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common types and definitions included into
+                both user mode and kernel mode source.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_TLCOMMON_H__
+#define __PVR_TLCOMMON_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Handle type for stream descriptor objects as created by this API */
+typedef IMG_HANDLE PVRSRVTL_SD;
+
+
+/*! Maximum stream name length including the null byte */
+#define PRVSRVTL_MAX_STREAM_NAME_SIZE	20U
+
+/*! Packet lengths are always rounded up to a multiple of 4 bytes */
+#define PVRSRVTL_PACKET_ALIGNMENT		4U
+#define PVRSRVTL_ALIGN(x) 				((x+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1))
+
+
+/*! A packet is made up of a header structure followed by the data bytes.
+ * There are 3 types of packet: normal (has data), data lost and padding,
+ * see packet flags. Header kept small to reduce data overhead.
+ *
+ * if the ORDER of the structure members is changed, please UPDATE the 
+ *   PVRSRVTL_PACKET_FLAG_OFFSET macro.
+ */
+typedef struct _PVRSRVTL_PACKETHDR_
+{
+	IMG_UINT16 uiDataLen;	/*!< Number of bytes following header */
+	IMG_UINT16 uiFlags;		/*!< Packet flag word */
+
+	/* First bytes of data ... */
+	//IMG_UINT32 ui32Data;	// ... variable length data array
+
+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR;
+
+/* Structure must always be a size multiple of 4 as stream buffer
+ * still an array of IMG_UINT32s.
+ */
+BLD_ASSERT((sizeof(PVRSRVTL_PACKETHDR)&3)==0, pvr_tlcommon_h)
+
+/*! Packet header mask used to extract the type from the uiFlags member.
+ * Do not use directly, \see TEST_PACKET_FLAG
+ */
+#define PVRSRVTL_PACKETHDR_TYPE_MASK			0x000f
+
+/*! Packet header mask used to extract the flags from the uiFlags member.
+ * Do not use directly, \see GET_PACKET_TYPE
+ */
+#define PVRSRVTL_PACKETHDR_FLAG_MASK			0xfff0
+
+/*! Packet type enumeration.
+ */
+typedef enum _PVRSRVTL_PACKETTYPE_
+{
+	/*! Undefined packet */
+	PVRSRVTL_PACKETTYPE_UNDEF = 0,
+
+	/*! Normal packet type. Indicates data follows the header.
+	 */
+	PVRSRVTL_PACKETTYPE_DATA = 1,
+
+	/*! When seen this packet type indicates that at this moment in the stream
+	 * packet(s) were not able to be accepted due to space constraints and that
+	 * recent data may be lost - depends on how the producer handles the
+	 * error. Such packets have no data, data length is 0.
+	 */
+	PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2,
+
+	/*! Packets with this type set are padding packets that contain undefined
+	 * data and must be ignored/skipped by the client. They are used when the
+	 * circular stream buffer wraps around and there is not enough space for
+	 * the data at the end of the buffer. Such packets have a length of 0 or
+	 * more.
+	 */
+	PVRSRVTL_PACKETTYPE_PADDING = 3,
+
+	/*! This packet type conveys to the stream consumer that the stream producer
+	 * has reached the end of data for that data sequence. The TLDaemon
+	 * has several options for processing these packets that can be selected
+	 * on a per stream basis.
+	 */
+	PVRSRVTL_PACKETTYPE_MARKER_EOS = 4,
+
+	PVRSRVTL_PACKETTYPE_LAST = PVRSRVTL_PACKETTYPE_MARKER_EOS
+} PVRSRVTL_PACKETTYPE;
+
+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared:
+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities.
+ */
+#define PVRSRVTL_PACKET_FLAG_OFFSET		(8 * sizeof( ((PVRSRVTL_PACKETHDR *)NULL)->uiDataLen ))
+#define PVRSRVTL_SET_PACKET_DATA(len)   	(len) | (PVRSRVTL_PACKETTYPE_DATA                  <<PVRSRVTL_PACKET_FLAG_OFFSET)
+#define PVRSRVTL_SET_PACKET_PADDING(len)	(len) | (PVRSRVTL_PACKETTYPE_PADDING               <<PVRSRVTL_PACKET_FLAG_OFFSET)
+#define PVRSRVTL_SET_PACKET_HDR(len,type)	(len) | ((type)						               <<PVRSRVTL_PACKET_FLAG_OFFSET)
+#define PVRSRVTL_SET_PACKET_WRITE_FAILED           (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED <<PVRSRVTL_PACKET_FLAG_OFFSET)
+
+/*! Returns the number of bytes of data in the packet. p may be any address type
+ * */
+#define GET_PACKET_DATA_LEN(p)	\
+	((IMG_UINT32) ((PVRSRVTL_PPACKETHDR)(p))->uiDataLen )
+
+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */
+#define GET_PACKET_DATA_PTR(p)	\
+	((IMG_PBYTE) ( ((IMG_SIZE_T)p) + sizeof(PVRSRVTL_PACKETHDR)) )
+
+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack
+ *  It is up to the caller to determine if the new address is within the packet
+ *  buffer.
+ */
+#define GET_NEXT_PACKET_ADDR(p) \
+	((PVRSRVTL_PPACKETHDR) ( ((IMG_UINT8 *)p) + sizeof(PVRSRVTL_PACKETHDR) + \
+	(((((PVRSRVTL_PPACKETHDR)p)->uiDataLen) + \
+	(PVRSRVTL_PACKET_ALIGNMENT-1)) & (~(PVRSRVTL_PACKET_ALIGNMENT-1)) ) ))
+
+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type
+ */
+#define GET_PACKET_HDR(p)		((PVRSRVTL_PPACKETHDR)(p))
+
+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR
+ */
+#define GET_PACKET_TYPE(p)		(((p)->uiFlags & PVRSRVTL_PACKETHDR_TYPE_MASK))
+
+/*! Tests if a packet flag is set or not. p is of type PVRSRVTL_PPACKETHDR and
+ *  f is one of the flags
+ */
+#define TEST_PACKET_FLAG(p, f)	((p->uiFlags & (f)) ? IMG_TRUE : IMG_FALSE)
+
+
+/*! Flags for use with PVRSRVTLOpenStream
+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
+ * 0x02 - When the stream does not exist wait for a bit (2s) in
+ *        PVRSRVTLOpenStream() and then exit with a timeout error if it still
+ *        does not exist.
+ */
+#define PVRSRV_STREAM_FLAG_NONE                 (1U<<0)
+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING  (1U<<1)
+#define PVRSRV_STREAM_FLAG_OPEN_WAIT            (1U<<2)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_TLCOMMON_H__ */
+/******************************************************************************
+ End of file (pvr_tlcommon.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_bridge.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_bridge.h
new file mode 100644
index 0000000..d26d4fd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_bridge.h
@@ -0,0 +1,156 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the rgx Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_BRIDGE_H__)
+#define __RGX_BRIDGE_H__
+
+#include "pvr_bridge.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "common_rgxinit_bridge.h"
+#include "common_rgxta3d_bridge.h"
+#include "common_rgxcmp_bridge.h"
+#include "common_rgxtq_bridge.h"
+#include "common_breakpoint_bridge.h"
+#include "common_debugmisc_bridge.h"
+#include "common_rgxpdump_bridge.h"
+#include "common_rgxhwperf_bridge.h"
+#if defined(RGX_FEATURE_RAY_TRACING)
+#include "common_rgxray_bridge.h"
+#endif
+#include "common_regconfig_bridge.h"
+#include "common_timerquery_bridge.h"
+
+/* 
+ * Bridge Cmd Ids
+ */
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge
+ * group! 
+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST
+ * offsets follow on from the previous bridge group's commands!
+ *
+ * If a bridge group is optional, ensure you *ALWAYS* define its index
+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is
+ * not defined). If an optional bridge group is not defined you must
+ * still define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an
+ * assigned value of 0.
+ */
+
+/* The RGX bridge groups start at 128 rather than follow-on from the other
+ * non-device bridge groups (meaning that they then won't be displaced if
+ * other non-device bridge groups are added)
+ */
+
+/* 128: RGX TQ interface functions */
+#define PVRSRV_BRIDGE_RGXTQ                      128UL
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST       (PVRSRV_BRIDGE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST        (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST)
+
+/* 129: RGX Compute interface functions */
+#define PVRSRV_BRIDGE_RGXCMP                     129UL
+#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST)
+
+/* 130: RGX Initialisation interface functions */
+#define PVRSRV_BRIDGE_RGXINIT                    130UL
+#define PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST     (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST      (PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXINIT_CMD_LAST)
+
+/* 131: RGX TA/3D interface functions */
+#define PVRSRV_BRIDGE_RGXTA3D                    131UL
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST     (PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST      (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST)
+
+/* 132: RGX Breakpoint interface functions */
+#define PVRSRV_BRIDGE_BREAKPOINT                 132UL
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST  (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST   (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST)
+
+/* 133: RGX Debug/Misc interface functions */
+#define PVRSRV_BRIDGE_DEBUGMISC                  133UL
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST   (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST    (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST + PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST)
+
+/* 134: RGX PDump interface functions */
+#define PVRSRV_BRIDGE_RGXPDUMP                   134UL
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST    (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST     (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST)
+
+/* 135: RGX HWPerf interface functions */
+#define PVRSRV_BRIDGE_RGXHWPERF                  135UL
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST    (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST)
+
+/* 136: RGX Ray Tracing interface functions */
+#define PVRSRV_BRIDGE_RGXRAY                     136UL
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST      (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST      0
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST       (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)
+#endif
+
+/* 137: RGX Register Configuration interface functions */
+#define PVRSRV_BRIDGE_REGCONFIG                  137UL
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST   (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST    (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_REGCONFIG_CMD_LAST)
+
+/* 138: RGX Timer Query interface functions */
+#define PVRSRV_BRIDGE_TIMERQUERY                 138UL
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST  (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST   (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST)
+
+#define PVRSRV_BRIDGE_RGX_LAST                   (PVRSRV_BRIDGE_TIMERQUERY)
+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST          (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __RGX_BRIDGE_H__ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif.h
new file mode 100644
index 0000000..3e46d1a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif.h
@@ -0,0 +1,474 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif.h
+@Title          RGX firmware interface structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by srvinit and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_H__)
+#define __RGX_FWIF_H__
+
+#include "rgx_meta.h"
+#include "rgx_fwif_shared.h"
+
+#include "pvr_tlcommon.h"
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE			0x00000000
+#define RGXFWIF_LOG_TYPE_TRACE			0x00000001
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN		0x00000002
+#define RGXFWIF_LOG_TYPE_GROUP_MTS		0x00000004
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP	0x00000008
+#define RGXFWIF_LOG_TYPE_GROUP_CSW		0x00000010
+#define RGXFWIF_LOG_TYPE_GROUP_BIF		0x00000020
+#define RGXFWIF_LOG_TYPE_GROUP_PM		0x00000040
+#define RGXFWIF_LOG_TYPE_GROUP_RTD		0x00000080
+#define RGXFWIF_LOG_TYPE_GROUP_SPM		0x00000100
+#define RGXFWIF_LOG_TYPE_GROUP_POW		0x00000200
+#define RGXFWIF_LOG_TYPE_GROUP_HWR		0x00000400
+#define RGXFWIF_LOG_TYPE_GROUP_HWP		0x00000800
+#define RGXFWIF_LOG_TYPE_GROUP_RPM		0x00001000
+#define RGXFWIF_LOG_TYPE_GROUP_DMA		0x00002000
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG	0x80000000
+#define RGXFWIF_LOG_TYPE_GROUP_MASK		0x80003FFE
+#define RGXFWIF_LOG_TYPE_MASK			0x80003FFF
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST   "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+	const IMG_CHAR* pszLogGroupName;
+	IMG_UINT32      ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+  Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+  table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "main",    RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+                                         { "mts",     RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+                                         { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+                                         { "csw",     RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+                                         { "bif",     RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+                                         { "pm",      RGXFWIF_LOG_TYPE_GROUP_PM }, \
+                                         { "rtd",     RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+                                         { "spm",     RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+                                         { "pow",     RGXFWIF_LOG_TYPE_GROUP_POW }, \
+                                         { "hwr",     RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+                                         { "hwp",     RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+                                         { "rpm",	  RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+                                         { "dma",     RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+                                         { "debug",   RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC  "%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types)  (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN)	?("main ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS)		?("mts ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP)	?("cleanup ")	:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW)		?("csw ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF)		?("bif ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_PM)		?("pm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD)		?("rtd ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM)		?("spm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_POW)		?("pow ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR)		?("hwr ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP)		?("hwp ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM)		?("rpm ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA)		?("dma ")		:("")),		\
+                                                (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG)	?("debug ")		:(""))
+
+
+/*! Logging function */
+typedef IMG_VOID (*PFN_RGXFW_LOG) (const IMG_CHAR* pszFmt, ...);
+
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (256KB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN		(0x004000)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT    (0x040000)
+#define RGXFW_HWPERF_L1_SIZE_MAX        (0xC00000)
+/* This padding value must always be greater than or equal to
+ * RGX_HWPERF_V2_MAX_PACKET_SIZE for all valid BVNCs. This is asserted in
+ * rgxsrvinit.c. This macro is defined with a constant to avoid a KM
+ * dependency */
+#define RGXFW_HWPERF_L1_PADDING_DEFAULT (0x800)
+
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Number of elements on each line when dumping the trace buffer */
+#define RGXFW_TRACE_BUFFER_LINESIZE	(30)
+
+/*! Total size of RGXFWIF_TRACEBUF dword (needs to be a multiple of RGXFW_TRACE_BUFFER_LINESIZE) */
+#define RGXFW_TRACE_BUFFER_SIZE		(400*RGXFW_TRACE_BUFFER_LINESIZE)
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200
+#define RGXFW_THREAD_NUM 1
+
+#define RGXFW_POLL_TYPE_SET 0x80000000
+
+typedef struct _RGXFWIF_ASSERTBUF_
+{
+	IMG_CHAR	szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+	IMG_CHAR	szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+	IMG_UINT32	ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_ASSERTBUF;
+
+typedef struct _RGXFWIF_TRACEBUF_SPACE_
+{
+	IMG_UINT32			ui32TracePointer;
+	IMG_UINT32			aui32TraceBuffer[RGXFW_TRACE_BUFFER_SIZE];
+	RGXFWIF_ASSERTBUF	sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+#define RGXFWIF_POW_STATES \
+  X(RGXFWIF_POW_OFF)			/* idle and handshaked with the host (ready to full power down) */ \
+  X(RGXFWIF_POW_ON)				/* running HW mds */ \
+  X(RGXFWIF_POW_FORCED_IDLE)	/* forced idle */ \
+  X(RGXFWIF_POW_IDLE)			/* idle waiting for host handshake */
+
+typedef enum _RGXFWIF_POW_STATE_
+{
+#define X(NAME) NAME,
+	RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK		(0x1 << 0)	/*!< Tells if the HW state is ok or locked up */
+#define RGXFWIF_HWR_FREELIST_OK		(0x1 << 1)	/*!< Tells if the freelists are ok or being reconstructed */
+#define RGXFWIF_HWR_ANALYSIS_DONE	(0x1 << 2)	/*!< Tells if the analysis of a GPU lockup has already been performed */
+#define RGXFWIF_HWR_GENERAL_LOCKUP	(0x1 << 3)	/*!< Tells if a DM unrelated lockup has been detected */
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING 					(0x00)		/*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR 				(0x1 << 0)	/*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_FL_RECONSTRUCTION	(0x1 << 1)	/*!< DM need FL reconstruction before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP					(0x1 << 2)	/*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP			(0x1 << 3)	/*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR			(0x1 << 4)	/*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP				(0x1 << 5)	/*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP			(0x1 << 6)	/*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING			(0x1 << 7)	/*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING		(0x1 << 8)	/*!< DM was innocently affected by another DM over-running which caused HWR */
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+typedef struct _RGXFWIF_TRACEBUF_
+{
+    IMG_UINT32				ui32LogType;
+	RGXFWIF_POW_STATE		ePowState;
+	RGXFWIF_TRACEBUF_SPACE	sTraceBuf[RGXFW_THREAD_NUM];
+
+	IMG_UINT16				aui16HwrDmLockedUpCount[RGXFWIF_DM_MAX];
+	IMG_UINT16				aui16HwrDmOverranCount[RGXFWIF_DM_MAX];
+	IMG_UINT16				aui16HwrDmRecoveredCount[RGXFWIF_DM_MAX];
+	IMG_UINT16				aui16HwrDmFalseDetectCount[RGXFWIF_DM_MAX];
+	IMG_UINT32				ui32HwrCounter;
+	RGXFWIF_DEV_VIRTADDR	apsHwrDmFWCommonContext[RGXFWIF_DM_MAX];
+
+	IMG_UINT32				aui32CrPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32				aui32CrPollMask[RGXFW_THREAD_NUM];
+
+	RGXFWIF_HWR_STATEFLAGS		ui32HWRStateFlags;
+	RGXFWIF_HWR_RECOVERYFLAGS	aui32HWRRecoveryFlags[RGXFWIF_HWDM_MAX];
+
+	volatile IMG_UINT32		ui32HWPerfRIdx;
+	volatile IMG_UINT32		ui32HWPerfWIdx;
+	volatile IMG_UINT32		ui32HWPerfWrapCount;
+	IMG_UINT32				ui32HWPerfSize;      /* Constant after setup, needed in FW */
+	IMG_UINT32				ui32HWPerfDropCount; /* The number of times the FW drops a packet due to buffer full */
+	
+	/* These next three items are only valid at runtime when the FW is built
+	 * with RGX_HWPERF_UTILIZATION defined in rgxfw_hwperf.c */
+	IMG_UINT32				ui32HWPerfUt;        /* Buffer utilisation, high watermark of bytes in use */
+	IMG_UINT32				ui32FirstDropOrdinal;/* The ordinal of the first packet the FW dropped */
+	IMG_UINT32              ui32LastDropOrdinal; /* The ordinal of the last packet the FW dropped */
+
+	IMG_UINT32				ui32InterruptCount;
+	IMG_UINT32				ui32KCCBCmdsExecuted;
+	IMG_UINT64 RGXFW_ALIGN			ui64StartIdleTime;
+	IMG_UINT32				ui32PowMonEnergy;	/* Non-volatile power monitor energy count */
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+#define RGXFWIF_GPU_STATS_MAX_VALUE_OF_STATE  10000
+
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW     (0U)
+#define RGXFWIF_GPU_UTIL_STATE_IDLE           (1U)
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH    (2U)
+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED        (3U)
+#define RGXFWIF_GPU_UTIL_STATE_NUM            (4U)
+
+#define RGXFWIF_GPU_UTIL_TIME_MASK            IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)
+#define RGXFWIF_GPU_UTIL_STATE_MASK           IMG_UINT64_C(0x0000000000000003)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word)       ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word)      ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+	((newtime) > (oldtime) ? ((newtime) - (oldtime)) : 0)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+	(RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE            256
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount)  ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+BLD_ASSERT(((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1)) == 0), rgx_fwif_h)
+
+typedef struct _RGXFWIF_GPU_UTIL_FWCB_
+{
+	RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+	IMG_UINT32        ui32TimeCorrSeqCount;
+
+	/* Last GPU state + OS time of the last state update */
+	IMG_UINT64 RGXFW_ALIGN ui64LastWord;
+
+	/* Counters for the amount of time the GPU was active/idle/blocked */
+	IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+typedef enum _RGX_HWRTYPE_
+{
+	RGX_HWRTYPE_UNKNOWNFAILURE  = 0,
+	RGX_HWRTYPE_OVERRUN         = 1,
+	RGX_HWRTYPE_POLLFAILURE     = 2,
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	RGX_HWRTYPE_BIF0FAULT       = 3,
+	RGX_HWRTYPE_BIF1FAULT       = 4,
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+	RGX_HWRTYPE_TEXASBIF0FAULT	= 5,
+#endif
+#if defined(RGX_FEATURE_RAY_TRACING)
+	RGX_HWRTYPE_DPXMMUFAULT		= 6,
+#endif
+#else
+	RGX_HWRTYPE_MMUFAULT        = 7,
+	RGX_HWRTYPE_MMUMETAFAULT    = 8,
+#endif
+} RGX_HWRTYPE;
+
+#define RGXFWIF_BIFFAULTBIT_GET(ui32BIFMMUStatus) \
+		((ui32BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK) >> RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT)
+#define RGXFWIF_MMUFAULTBIT_GET(ui32BIFMMUStatus) \
+		((ui32BIFMMUStatus & ~RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK) >> RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT)
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1 )
+
+typedef struct _RGX_BIFINFO_
+{
+	IMG_UINT64	RGXFW_ALIGN		ui64BIFReqStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64BIFMMUStatus;
+	IMG_UINT64	RGXFW_ALIGN		ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_BIFINFO;
+
+typedef struct _RGX_MMUINFO_
+{
+	IMG_UINT64	RGXFW_ALIGN		ui64MMUStatus;
+} RGX_MMUINFO;
+
+typedef struct _RGX_POLLINFO_
+{
+	IMG_UINT32	ui32ThreadNum;
+	IMG_UINT32 	ui32CrPollAddr;
+	IMG_UINT32 	ui32CrPollMask;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct _RGX_HWRINFO_
+{
+	union
+	{
+		RGX_BIFINFO		sBIFInfo;
+		RGX_MMUINFO		sMMUInfo;
+		RGX_POLLINFO	sPollInfo;
+	} uHWRData;
+
+	IMG_UINT64	RGXFW_ALIGN		ui64CRTimer;
+	IMG_UINT32					ui32FrameNum;
+	IMG_UINT32					ui32PID;
+	IMG_UINT32					ui32ActiveHWRTData;
+	IMG_UINT32					ui32HWRNumber;
+	IMG_UINT32					ui32EventStatus;
+	IMG_UINT32					ui32HWRRecoveryFlags;
+	RGX_HWRTYPE 				eHWRType;
+	RGXFWIF_TIME_CORR			sTimeCorr;
+
+	RGXFWIF_DM					eDM;
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8							/* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8							/* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST)	/* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1)	/* Index of the last log in the HWR log buffer */
+typedef struct _RGXFWIF_HWRINFOBUF_
+{
+	RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX];
+
+	IMG_UINT32	ui32FirstCrPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32FirstCrPollMask[RGXFW_THREAD_NUM];
+	IMG_UINT32	ui32WriteIndex;
+	IMG_UINT32	ui32DDReqCount;
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ *****************************************************************************/
+#define RGXFWIF_INICFG_CTXSWITCH_TA_EN		(0x1 << 0)
+#define RGXFWIF_INICFG_CTXSWITCH_3D_EN		(0x1 << 1)
+#define RGXFWIF_INICFG_CTXSWITCH_CDM_EN		(0x1 << 2)
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND	(0x1 << 3)
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN	(0x1 << 4)
+#define RGXFWIF_INICFG_RSVD					(0x1 << 5)
+#define RGXFWIF_INICFG_POW_RASCALDUST		(0x1 << 6)
+#define RGXFWIF_INICFG_HWPERF_EN			(0x1 << 7)
+#define RGXFWIF_INICFG_HWR_EN				(0x1 << 8)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN		(0x1 << 9)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (0x1 << 10)
+#define RGXFWIF_INICFG_POLL_COUNTERS_EN		(0x1 << 11)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX		(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE	(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST		(RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_CLRMSK	(0xFFFFCFFFU)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT		(12)
+#define RGXFWIF_INICFG_SHG_BYPASS_EN		(0x1 << 14)
+#define RGXFWIF_INICFG_RTU_BYPASS_EN		(0x1 << 15)
+#define RGXFWIF_INICFG_REGCONFIG_EN		(0x1 << 16)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY	(0x1 << 17)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER	(0x1 << 18)
+#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN	(0x1 << 19)
+#define RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN	(0x1 << 20)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP	(0x1 << 21)
+#define RGXFWIF_INICFG_ALL					(0x003FFFDFU)
+#define RGXFWIF_SRVCFG_DISABLE_PDP_EN 		(0x1 << 31)
+#define RGXFWIF_SRVCFG_ALL					(0x80000000U)
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF		(0x1 << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT		(0x1 << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE		(0x1 << 1)
+
+#define RGXFWIF_INICFG_CTXSWITCH_DM_ALL		(RGXFWIF_INICFG_CTXSWITCH_TA_EN | \
+											 RGXFWIF_INICFG_CTXSWITCH_3D_EN | \
+											 RGXFWIF_INICFG_CTXSWITCH_CDM_EN)
+
+#define RGXFWIF_INICFG_CTXSWITCH_CLRMSK		~(RGXFWIF_INICFG_CTXSWITCH_DM_ALL | \
+											 RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+											 RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+typedef enum
+{
+	RGX_ACTIVEPM_FORCE_OFF = 0,
+	RGX_ACTIVEPM_FORCE_ON = 1,
+	RGX_ACTIVEPM_DEFAULT = 2
+} RGX_ACTIVEPM_CONF;
+
+typedef enum
+{
+	RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+	RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+	RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+
+/*!
+ ******************************************************************************
+ * Querying DM state
+ *****************************************************************************/
+
+typedef enum _RGXFWIF_DM_STATE_
+{
+	RGXFWIF_DM_STATE_NORMAL			= 0,
+	RGXFWIF_DM_STATE_LOCKEDUP		= 1,
+
+} RGXFWIF_DM_STATE;
+
+typedef struct
+{
+	IMG_UINT16  ui16RegNum;				/*!< Register number */
+	IMG_UINT16  ui16IndirectRegNum;		/*!< Indirect register number (or 0 if not used) */
+	IMG_UINT16  ui16IndirectStartVal;	/*!< Start value for indirect register */
+	IMG_UINT16  ui16IndirectEndVal;		/*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+#endif /*  __RGX_FWIF_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_alignchecks_km.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_alignchecks_km.h
new file mode 100644
index 0000000..158abba
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_alignchecks_km.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX fw interface alignment checks
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Checks to avoid disalignment in RGX fw data structures shared with the host
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_ALIGNCHECKS_KM_H__)
+#define __RGX_FWIF_ALIGNCHECKS_KM_H__
+
+/* for the offsetof macro */
+#include <stddef.h> 
+
+/*!
+ ******************************************************************************
+ * Alignment checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM							\
+		sizeof(RGXFWIF_INIT),								\
+		offsetof(RGXFWIF_INIT, sFaultPhysAddr),			\
+		offsetof(RGXFWIF_INIT, sPDSExecBase),				\
+		offsetof(RGXFWIF_INIT, sUSCExecBase),				\
+		offsetof(RGXFWIF_INIT, psKernelCCBCtl),				\
+		offsetof(RGXFWIF_INIT, psKernelCCB),				\
+		offsetof(RGXFWIF_INIT, psFirmwareCCBCtl),			\
+		offsetof(RGXFWIF_INIT, psFirmwareCCB),				\
+		offsetof(RGXFWIF_INIT, eDM),						\
+		offsetof(RGXFWIF_INIT, asSigBufCtl),				\
+		offsetof(RGXFWIF_INIT, psTraceBufCtl),				\
+		offsetof(RGXFWIF_INIT, sRGXCompChecks),				\
+															\
+		/* RGXFWIF_FWRENDERCONTEXT checks */				\
+		sizeof(RGXFWIF_FWRENDERCONTEXT),					\
+		offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),		\
+		offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),		\
+															\
+		sizeof(RGXFWIF_FWCOMMONCONTEXT),					\
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext),	\
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode),		\
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB),			\
+		offsetof(RGXFWIF_FWCOMMONCONTEXT, ui64MCUFenceAddr)
+
+#endif /*  __RGX_FWIF_ALIGNCHECKS_KM_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks_km.h)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_km.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_km.h
new file mode 100644
index 0000000..5557d07
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_km.h
@@ -0,0 +1,828 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures used by pvrsrvkm
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures used by pvrsrvkm
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_KM_H__)
+#define __RGX_FWIF_KM_H__
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdefs_km.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+
+#if defined(RGX_FIRMWARE)
+/* Compiling the actual firmware - use a fully typed pointer */
+typedef struct _RGXFWIF_HOST_CTL_			*PRGXFWIF_HOST_CTL;
+typedef struct _RGXFWIF_CCB_CTL_			*PRGXFWIF_CCB_CTL;
+typedef IMG_UINT8							*PRGXFWIF_CCB;
+typedef struct _RGXFWIF_FWMEMCONTEXT_		*PRGXFWIF_FWMEMCONTEXT;
+typedef struct _RGXFWIF_FWRENDERCONTEXT_	*PRGXFWIF_FWRENDERCONTEXT;
+typedef struct _RGXFWIF_FWTQ2DCONTEXT_		*PRGXFWIF_FWTQ2DCONTEXT;
+typedef struct _RGXFWIF_FWTQ3DCONTEXT_		*PRGXFWIF_FWTQ3DCONTEXT;
+typedef struct _RGXFWIF_FWCOMPUTECONTEXT_	*PRGXFWIF_FWCOMPUTECONTEXT;
+typedef struct _RGXFWIF_FWCOMMONCONTEXT_	*PRGXFWIF_FWCOMMONCONTEXT;
+typedef struct _RGXFWIF_ZSBUFFER_			*PRGXFWIF_ZSBUFFER;
+typedef IMG_UINT32							*PRGXFWIF_SIGBUFFER;
+typedef struct _RGXFWIF_INIT_				*PRGXFWIF_INIT;
+typedef struct _RGXFWIF_RUNTIME_CFG			*PRGXFWIF_RUNTIME_CFG;
+typedef struct _RGXFW_UNITTESTS_			*PRGXFW_UNITTESTS;
+typedef struct _RGXFWIF_TRACEBUF_			*PRGXFWIF_TRACEBUF;
+typedef IMG_UINT8							*PRGXFWIF_HWPERFINFO;
+typedef struct _RGXFWIF_HWRINFOBUF_			*PRGXFWIF_HWRINFOBUF;
+typedef struct _RGXFWIF_GPU_UTIL_FWCB_		*PRGXFWIF_GPU_UTIL_FWCB;
+typedef struct _RGXFWIF_REG_CFG_		*PRGXFWIF_REG_CFG;
+typedef IMG_UINT8							*PRGXFWIF_COMMONCTX_STATE;
+typedef struct _RGXFWIF_TACTX_STATE_		*PRGXFWIF_TACTX_STATE;
+typedef struct _RGXFWIF_3DCTX_STATE_		*PRGXFWIF_3DCTX_STATE;
+typedef struct _RGXFWIF_COMPUTECTX_STATE_	*PRGXFWIF_COMPUTECTX_STATE;
+typedef struct _RGXFWIF_VRDMCTX_STATE_		*PRGXFWIF_VRDMCTX_STATE;
+typedef IMG_UINT8							*PRGXFWIF_RF_CMD;
+typedef struct _RGXFWIF_COMPCHECKS_			*PRGXFWIF_COMPCHECKS;
+typedef struct _RGX_HWPERF_CONFIG_CNTBLK_	*PRGX_HWPERF_CONFIG_CNTBLK;
+typedef IMG_UINT32                          *PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef DLLIST_NODE							RGXFWIF_DLLIST_NODE;
+typedef struct _RGXFWIF_HWPERF_CTL_			*PRGXFWIF_HWPERF_CTL;
+#else
+/* Compiling the host driver - use a firmware device virtual pointer */
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HOST_CTL;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWRENDERCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWTQ2DCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWTQ3DCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWCOMPUTECONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_INIT;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFW_UNITTESTS;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HWPERFINFO;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_RF_CMD;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_COMPCHECKS;
+typedef RGXFWIF_DEV_VIRTADDR				PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR                PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+				  RGXFWIF_DEV_VIRTADDR n;}	RGXFWIF_DLLIST_NODE;
+typedef RGXFWIF_DEV_VIRTADDR				PRGXFWIF_HWPERF_CTL;
+#endif /* RGX_FIRMWARE */
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+	Firmware memory context.
+*/
+typedef struct _RGXFWIF_FWMEMCONTEXT_
+{
+	IMG_DEV_PHYADDR			RGXFW_ALIGN sPCDevPAddr;	/*!< device physical address of context's page catalogue */
+	IMG_INT32				uiPageCatBaseRegID;	/*!< associated page catalog base register (-1 == unallocated) */
+	IMG_UINT32				uiBreakpointAddr; /*!< breakpoint address */
+	IMG_UINT32				uiBPHandlerAddr;  /*!< breakpoint handler address */
+	IMG_UINT32				uiBreakpointCtl; /*!< DM and enable control for BP */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+    IMG_UINT32              ui32OSid;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+
+/*!
+ * 	FW context state flags
+ */
+#define	RGXFWIF_CONTEXT_TAFLAGS_NEED_RESUME			(0x00000001)
+#define	RGXFWIF_CONTEXT_RENDERFLAGS_NEED_RESUME		(0x00000002)
+#define RGXFWIF_CONTEXT_CDMFLAGS_NEED_RESUME		(0x00000004)
+#define RGXFWIF_CONTEXT_SHGFLAGS_NEED_RESUME		(0x00000008)
+#define RGXFWIF_CONTEXT_ALLFLAGS_NEED_RESUME		(0x0000000F)
+
+
+typedef struct _RGXFWIF_TACTX_STATE_
+{
+	/* FW-accessible TA state which must be written out to memory on context store */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER;		 /* To store in mid-TA */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init;	 /* Initial value (in case is 'lost' due to a lock-up */
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VDM_BATCH;	
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM0;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM1;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM2;
+	IMG_UINT64	RGXFW_ALIGN uTAReg_VBS_SO_PRIM3;
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+
+typedef struct _RGXFWIF_3DCTX_STATE_
+{
+	/* FW-accessible ISP state which must be written out to memory on context store */
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	IMG_UINT32	RGXFW_ALIGN au3DReg_ISP_STORE[8];
+#else
+	IMG_UINT32	RGXFW_ALIGN au3DReg_ISP_STORE[32];
+#endif
+	IMG_UINT64	RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS;
+	IMG_UINT64	RGXFW_ALIGN u3DReg_PM_PDS_MTILEFREE_STATUS;
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+
+
+typedef struct _RGXFWIF_COMPUTECTX_STATE_
+{
+	IMG_UINT64	RGXFW_ALIGN	ui64Padding;
+} RGXFWIF_COMPUTECTX_STATE;
+
+
+typedef struct _RGXFWIF_VRDMCTX_STATE_
+{
+	/* FW-accessible TA state which must be written out to memory on context store */
+	IMG_UINT64	RGXFW_ALIGN uVRDMReg_VRM_CALL_STACK_POINTER;
+	IMG_UINT64	RGXFW_ALIGN uVRDMReg_VRM_BATCH;
+	
+	/* Number of kicks on this context */
+	IMG_UINT32  ui32NumKicks;
+} UNCACHED_ALIGN RGXFWIF_VRDMCTX_STATE;
+
+
+typedef struct _RGXFWIF_FWCOMMONCONTEXT_
+{
+	/*
+		Used by bg and irq context
+	*/
+	/* CCB details for this firmware context */
+	PRGXFWIF_CCCB_CTL		psCCBCtl;				/*!< CCB control */
+	PRGXFWIF_CCCB			psCCB;					/*!< CCB base */
+
+	/*
+		Used by the bg context only
+	*/
+	RGXFWIF_DLLIST_NODE		RGXFW_ALIGN sWaitingNode;			/*!< List entry for the waiting list */
+
+	/*
+		Used by the irq context only
+	*/
+	RGXFWIF_DLLIST_NODE		sRunNode;				/*!< List entry for the run list */
+	
+	PRGXFWIF_FWMEMCONTEXT	psFWMemContext;			/*!< Memory context */
+
+	/* Context suspend state */
+	PRGXFWIF_COMMONCTX_STATE	RGXFW_ALIGN psContextState;		/*!< TA/3D context suspend state, read/written by FW */
+	
+	/* Framework state
+	 */
+	PRGXFWIF_RF_CMD		RGXFW_ALIGN psRFCmd;		/*!< Register updates for Framework */
+	
+	/*
+	 * 	Flags e.g. for context switching
+	 */
+	IMG_UINT32				ui32Flags;
+	IMG_UINT32				ui32Priority;
+	IMG_UINT32				ui32PrioritySeqNum;
+	IMG_UINT64		RGXFW_ALIGN 	ui64MCUFenceAddr;
+
+	/* References to the host side originators */
+	IMG_UINT32				ui32ServerCommonContextID;			/*!< the Server Common Context */
+	IMG_UINT32				ui32PID;							/*!< associated process ID */
+	
+	/* Statistic updates waiting to be passed back to the host... */
+	IMG_BOOL				bStatsPending;						/*!< True when some stats are pending */
+	IMG_INT32				i32StatsNumStores;					/*!< Number of stores on this context since last update */
+	IMG_INT32				i32StatsNumOutOfMemory;				/*!< Number of OOMs on this context since last update */
+	IMG_INT32				i32StatsNumPartialRenders;			/*!< Number of PRs on this context since last update */
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+/*!
+	Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRENDERCONTEXT_
+{
+	RGXFWIF_FWCOMMONCONTEXT	sTAContext;				/*!< Firmware context for the TA */
+	RGXFWIF_FWCOMMONCONTEXT	s3DContext;				/*!< Firmware context for the 3D */
+
+	/*
+	 * Note: The following fields keep track of OOM and partial render statistics.
+	 * Because these data structures are allocated cache-incoherent,
+	 * and because these fields are updated by the firmware, 
+	 * the host will read valid values only after an SLC flush/inval.
+	 * This is only guaranteed to happen while destroying the render-context.
+	 */
+	IMG_UINT32			ui32TotalNumPartialRenders; /*!< Total number of partial renders */
+	IMG_UINT32			ui32TotalNumOutOfMemory;	/*!< Total number of OOMs */
+
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+	Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRAYCONTEXT_
+{
+	RGXFWIF_FWCOMMONCONTEXT	sSHGContext;				/*!< Firmware context for the SHG */
+	RGXFWIF_FWCOMMONCONTEXT	sRTUContext;				/*!< Firmware context for the RTU */
+	PRGXFWIF_CCCB_CTL		psCCBCtl[DPX_MAX_RAY_CONTEXTS];
+	PRGXFWIF_CCCB			psCCB[DPX_MAX_RAY_CONTEXTS];
+	IMG_UINT32				ui32NextFC;
+	IMG_UINT32				ui32ActiveFCMask;
+} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT;
+
+#define RGXFWIF_INVALID_FRAME_CONTEXT (0xFFFFFFFF)
+
+/*!
+    BIF requester selection
+*/
+typedef enum _RGXFWIF_BIFREQ_
+{
+	RGXFWIF_BIFREQ_TA		= 0,
+	RGXFWIF_BIFREQ_3D		= 1,
+	RGXFWIF_BIFREQ_CDM		= 2,
+	RGXFWIF_BIFREQ_2D		= 3,
+	RGXFWIF_BIFREQ_HOST		= 4,
+	RGXFWIF_BIFREQ_RTU		= 5,
+	RGXFWIF_BIFREQ_SHG		= 6,
+	RGXFWIF_BIFREQ_MAX		= 7
+} RGXFWIF_BIFREQ;
+
+typedef enum _RGXFWIF_PM_DM_
+{
+	RGXFWIF_PM_DM_TA	= 0,
+	RGXFWIF_PM_DM_3D	= 1,
+} RGXFWIF_PM_DM;
+
+typedef enum _RGXFWIF_RPM_DM_
+{
+	RGXFWIF_RPM_DM_SHF	= 0,
+	RGXFWIF_RPM_DM_SHG	= 1,
+	RGXFWIF_RPM_DM_MAX,
+} RGXFWIF_RPM_DM;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCB_CTL_
+{
+	volatile IMG_UINT32		ui32WriteOffset;		/*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+	volatile IMG_UINT32		ui32ReadOffset;			/*!< read offset into array of commands */
+	IMG_UINT32				ui32WrapMask;			/*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+	IMG_UINT32				ui32CmdSize;			/*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB command structure for RGX
+ *****************************************************************************/
+#if !defined(RGX_FEATURE_SLC_VIVT)
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT      (0x1) /* BIF_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD      (0x2) /* BIF_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC      (0x4) /* BIF_CTRL_INVAL_PC_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C)  (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0) /* not used */
+
+#else /* RGX_FEATURE_SLC_VIVT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT      (0x1) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD      (0x2) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC      (0x4) /* MMU_CTRL_INVAL_PC_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB   (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB     (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX(C)  ((C) << 0x3) /* MMU_CTRL_INVAL_CONTEXT_SHIFT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+typedef struct _RGXFWIF_MMUCACHEDATA_
+{
+	PRGXFWIF_FWMEMCONTEXT		psMemoryContext;
+	IMG_UINT32					ui32Flags;
+	IMG_UINT32					ui32CacheSequenceNum;
+} RGXFWIF_MMUCACHEDATA;
+
+typedef struct _RGXFWIF_SLCBPCTLDATA_
+{
+	IMG_BOOL               bSetBypassed;        /*!< Should SLC be/not be bypassed for indicated units? */
+	IMG_UINT32             uiFlags;             /*!< Units to enable/disable */
+} RGXFWIF_SLCBPCTLDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_WRITE	(1 << 0)
+#define RGXFWIF_BPDATA_FLAGS_CTL	(1 << 1)
+#define RGXFWIF_BPDATA_FLAGS_REGS	(1 << 2)
+
+typedef struct _RGXFWIF_FWBPDATA_
+{
+	PRGXFWIF_FWMEMCONTEXT	psFWMemContext;			/*!< Memory context */
+	IMG_UINT32		ui32BPAddr;			/*!< Breakpoint address */
+	IMG_UINT32		ui32HandlerAddr;		/*!< Breakpoint handler */
+	IMG_UINT32		ui32BPDM;			/*!< Breakpoint control */
+	IMG_BOOL		bEnable;
+	IMG_UINT32		ui32Flags;
+	IMG_UINT32		ui32TempRegs;		/*!< Number of temporary registers to overallocate */
+	IMG_UINT32		ui32SharedRegs;		/*!< Number of shared registers to overallocate */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS 4
+
+typedef struct _RGXFWIF_KCCB_CMD_KICK_DATA_
+{
+	PRGXFWIF_FWCOMMONCONTEXT	psContext;			/*!< address of the firmware context */
+	IMG_UINT32					ui32CWoffUpdate;	/*!< Client CCB woff update */
+	IMG_UINT32		ui32NumCleanupCtl;		/*!< number of CleanupCtl pointers attached */
+	PRGXFWIF_CLEANUP_CTL	apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+typedef struct _RGXFWIF_KCCB_CMD_FENCE_DATA_
+{
+	IMG_UINT32 uiSyncObjDevVAddr;
+	IMG_UINT32 uiUpdateVal;
+} RGXFWIF_KCCB_CMD_SYNC_DATA;
+
+typedef enum _RGXFWIF_CLEANUP_TYPE_
+{
+	RGXFWIF_CLEANUP_FWCOMMONCONTEXT,		/*!< FW common context cleanup */
+	RGXFWIF_CLEANUP_HWRTDATA,				/*!< FW HW RT data cleanup */
+	RGXFWIF_CLEANUP_FREELIST,				/*!< FW freelist cleanup */
+	RGXFWIF_CLEANUP_ZSBUFFER,				/*!< FW ZS Buffer cleanup */
+	RGXFWIF_CLEANUP_HWFRAMEDATA,			/*!< FW RPM/RTU frame data */
+	RGXFWIF_CLEANUP_RPM_FREELIST,			/*!< FW RPM freelist */
+} RGXFWIF_CLEANUP_TYPE;
+
+#define RGXFWIF_CLEANUP_RUN		(1 << 0)	/*!< The requested cleanup command has run on the FW */
+#define RGXFWIF_CLEANUP_BUSY	(1 << 1)	/*!< The requested resource is busy */
+
+typedef struct _RGXFWIF_CLEANUP_REQUEST_
+{
+	RGXFWIF_CLEANUP_TYPE			eCleanupType;			/*!< Cleanup type */
+	union {
+		PRGXFWIF_FWCOMMONCONTEXT 	psContext;				/*!< FW common context to cleanup */
+		PRGXFWIF_HWRTDATA 			psHWRTData;				/*!< HW RT to cleanup */
+		PRGXFWIF_FREELIST 			psFreelist;				/*!< Freelist to cleanup */
+		PRGXFWIF_ZSBUFFER 			psZSBuffer;				/*!< ZS Buffer to cleanup */
+#if defined(RGX_FEATURE_RAY_TRACING)
+		PRGXFWIF_RAY_FRAME_DATA		psHWFrameData;			/*!< RPM/RTU frame data to cleanup */
+		PRGXFWIF_RPM_FREELIST 		psRPMFreelist;			/*!< RPM Freelist to cleanup */
+#endif
+	} uCleanupData;
+	IMG_UINT32						uiSyncObjDevVAddr;		/*!< sync primitive used to indicate state of the request */
+} RGXFWIF_CLEANUP_REQUEST;
+
+typedef enum _RGXFWIF_POWER_TYPE_
+{
+	RGXFWIF_POW_OFF_REQ = 1,
+	RGXFWIF_POW_FORCED_IDLE_REQ,
+	RGXFWIF_POW_NUMDUST_CHANGE,
+	RGXFWIF_POW_APM_LATENCY_CHANGE
+} RGXFWIF_POWER_TYPE;
+
+typedef struct _RGXFWIF_POWER_REQUEST_
+{
+	RGXFWIF_POWER_TYPE				ePowType;				/*!< Type of power request */
+	union
+	{
+		IMG_UINT32					ui32NumOfDusts;			/*!< Number of active Dusts */
+		IMG_BOOL					bForced;				/*!< If the operation is mandatory */
+		IMG_BOOL					bCancelForcedIdle;		/*!< If the operation is to cancel previously forced idle */
+		IMG_UINT32					ui32ActivePMLatencyms;		/*!< Number of milliseconds to set APM latency */
+	} uPoweReqData;
+} RGXFWIF_POWER_REQUEST;
+
+typedef struct _RGXFWIF_SLCFLUSHINVALDATA_
+{
+	PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+	IMG_BOOL    bInval;                 /*!< Invalidate the cache as well as flushing */
+	IMG_BOOL    bDMContext;             /*!< The data to flush/invalidate belongs to a specific DM context */
+	RGXFWIF_DM  eDM;                    /*!< DM to flush entries for (only useful when bDMContext == TRUE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef struct _RGXFWIF_HWPERF_CTRL_
+{
+	IMG_BOOL	 			bToggle; 	/*!< Toggle masked bits or apply full mask? */
+	IMG_UINT64	RGXFW_ALIGN	ui64Mask;   /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef struct _RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS_
+{
+	IMG_UINT32				ui32NumBlocks; 	/*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+	PRGX_HWPERF_CONFIG_CNTBLK pasBlockConfigs;	/*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+typedef struct _RGXFWIF_CORECLKSPEEDCHANGE_DATA_
+{
+	IMG_UINT32	ui32NewClockSpeed; 			/*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX	16
+
+typedef struct _RGXFWIF_HWPERF_CTRL_BLKS_
+{
+	IMG_BOOL	bEnable;
+	IMG_UINT32	ui32NumBlocks;                              /*!< Number of block IDs in the array */
+	IMG_UINT16	aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX];   /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+
+typedef struct _RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS_
+{
+	IMG_UINT16 ui16CustomBlock;
+	IMG_UINT16 ui16NumCounters;
+	PRGX_HWPERF_SELECT_CUSTOM_CNTRS pui32CustomCounterIDs;
+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS;
+
+typedef struct _RGXFWIF_ZSBUFFER_BACKING_DATA_
+{
+	IMG_UINT32				psZSBufferFWDevVAddr; 				/*!< ZS-Buffer FW address */
+	IMG_UINT32				bDone;								/*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+/*
+ * Flags to pass in the unused bits of the page size grow request
+ */
+#define RGX_FREELIST_GSDATA_RPM_RESTART_EN		(1 << 31)		/*!< Restart RPM after freelist grow command */
+#define RGX_FREELIST_GSDATA_RPM_PAGECNT_MASK	(0x3FFFFFU)		/*!< Mask for page count. */
+
+typedef struct _RGXFWIF_FREELIST_GS_DATA_
+{
+	IMG_UINT32				psFreeListFWDevVAddr; 				/*!< Freelist FW address */
+	IMG_UINT32				ui32DeltaSize;						/*!< Amount of the Freelist change */
+	IMG_UINT32				ui32NewSize;						/*!< New amount of pages on the freelist */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000
+
+typedef struct _RGXFWIF_FREELISTS_RECONSTRUCTION_DATA_
+{
+	IMG_UINT32			ui32FreelistsCount;
+	IMG_UINT32			aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum _RGXFWIF_REGDATA_CMD_TYPE_
+{
+	RGXFWIF_REGCFG_CMD_ADD 				= 101,
+	RGXFWIF_REGCFG_CMD_CLEAR 			= 102,
+	RGXFWIF_REGCFG_CMD_ENABLE 			= 103,
+	RGXFWIF_REGCFG_CMD_DISABLE 			= 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef struct _RGXFWIF_REGCONFIG_DATA_
+{
+	RGXFWIF_REGDATA_CMD_TYPE	eCmdType;
+	RGXFWIF_PWR_EVT			eRegConfigPI;
+	RGXFWIF_REG_CFG_REC RGXFW_ALIGN     	sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct _RGXFWIF_REG_CFG_
+{
+	IMG_UINT32			ui32NumRegsSidekick;
+	IMG_UINT32			ui32NumRegsRascalDust;
+	RGXFWIF_REG_CFG_REC	RGXFW_ALIGN 	asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+typedef enum _RGXFWIF_KCCB_CMD_TYPE_
+{
+	RGXFWIF_KCCB_CMD_KICK						= 101,
+	RGXFWIF_KCCB_CMD_MMUCACHE					= 102,
+	RGXFWIF_KCCB_CMD_BP							= 104,
+	RGXFWIF_KCCB_CMD_SLCBPCTL   				= 106, /*!< slc bypass control. Requires sSLCBPCtlData. For validation */
+	RGXFWIF_KCCB_CMD_SYNC       				= 107, /*!< host sync command. Requires sSyncData. */
+	RGXFWIF_KCCB_CMD_SLCFLUSHINVAL				= 108, /*!< slc flush and invalidation request */
+	RGXFWIF_KCCB_CMD_CLEANUP					= 109, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+	RGXFWIF_KCCB_CMD_POW						= 110, /*!< Power request */
+	RGXFWIF_KCCB_CMD_HWPERF_CTRL_EVENTS			= 111, /*!< Control the HWPerf event generation behaviour */
+	RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS	= 112, /*!< Configure, clear and enable multiple HWPerf blocks */
+	RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS			= 113, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+	RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE			= 114, /*!< CORE clock speed change event */
+	RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE	= 115, /*!< Backing for on-demand ZS-Buffer done */
+	RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE	= 116, /*!< Unbacking for on-demand ZS-Buffer done */
+	RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE		= 117, /*!< Freelist Grow done */
+	RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE		= 118, /*!< Freelist Shrink done */
+	RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE	= 119, /*!< Freelists Reconstruction done */
+	RGXFWIF_KCCB_CMD_HEALTH_CHECK               = 120, /*!< Health check request */
+	RGXFWIF_KCCB_CMD_REGCONFIG                  = 121,
+	RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 122, /*!< Configure the custom counters for HWPerf */
+	RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT	= 123, /*!< Configure, clear and enable multiple HWPerf blocks during the init process*/
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW		= 130,
+#endif
+} RGXFWIF_KCCB_CMD_TYPE;
+
+/* Kernel CCB command packet */
+typedef struct _RGXFWIF_KCCB_CMD_
+{
+	RGXFWIF_KCCB_CMD_TYPE					eCmdType;			/*!< Command type */
+	union
+	{
+		RGXFWIF_KCCB_CMD_KICK_DATA			sCmdKickData;			/*!< Data for Kick command */
+		RGXFWIF_MMUCACHEDATA				sMMUCacheData;			/*!< Data for MMUCACHE command */
+		RGXFWIF_BPDATA						sBPData;				/*!< Data for Breakpoint Commands */
+		RGXFWIF_SLCBPCTLDATA       			sSLCBPCtlData;  		/*!< Data for SLC Bypass Control */
+		RGXFWIF_KCCB_CMD_SYNC_DATA 			sSyncData;          	/*!< Data for host sync commands */
+		RGXFWIF_SLCFLUSHINVALDATA			sSLCFlushInvalData;		/*!< Data for SLC Flush/Inval commands */
+		RGXFWIF_CLEANUP_REQUEST				sCleanupData; 			/*!< Data for cleanup commands */
+		RGXFWIF_POWER_REQUEST				sPowData;				/*!< Data for power request commands */
+		RGXFWIF_HWPERF_CTRL					sHWPerfCtrl;			/*!< Data for HWPerf control command */
+		RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS	sHWPerfCfgEnableBlks;	/*!< Data for HWPerf configure, clear and enable performance counter block command */
+		RGXFWIF_HWPERF_CTRL_BLKS			sHWPerfCtrlBlks;		/*!< Data for HWPerf enable or disable performance counter block commands */
+		RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS  sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */
+		RGXFWIF_CORECLKSPEEDCHANGE_DATA		sCORECLKSPEEDCHANGEData;/*!< Data for CORE clock speed change */
+		RGXFWIF_ZSBUFFER_BACKING_DATA		sZSBufferBackingData;	/*!< Feedback for Z/S Buffer backing/unbacking */
+		RGXFWIF_FREELIST_GS_DATA			sFreeListGSData;		/*!< Feedback for Freelist grow/shrink */
+		RGXFWIF_FREELISTS_RECONSTRUCTION_DATA	sFreeListsReconstructionData;	/*!< Feedback for Freelists reconstruction */
+		RGXFWIF_REGCONFIG_DATA				sRegConfigData;			/*!< Data for custom register configuration */
+	} UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Firmware CCB command structure for RGX
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA_
+{
+	IMG_UINT32				ui32ZSBufferID;
+	IMG_BOOL				bPopulate;
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA_
+{
+	IMG_UINT32				ui32FreelistID;
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA_
+{
+	IMG_UINT32			ui32FreelistsCount;
+	IMG_UINT32			ui32HwrCounter;
+	IMG_UINT32			aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA_
+{
+	IMG_UINT32						ui32ServerCommonContextID;	/*!< Context affected by the reset */
+	RGXFWIF_CONTEXT_RESET_REASON	eResetReason;				/*!< Reason for reset */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+
+typedef enum _RGXFWIF_FWCCB_CMD_TYPE_
+{
+	RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING				= 101, 	/*!< Requests ZSBuffer to be backed with physical pages */
+	RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING			= 102, 	/*!< Requests ZSBuffer to be unbacked */
+	RGXFWIF_FWCCB_CMD_FREELIST_GROW					= 103, 	/*!< Requests an on-demand freelist grow/shrink */
+	RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION		= 104, 	/*!< Requests freelists reconstruction */
+	RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION	= 105,	/*!< Notifies host of a HWR event on a context */
+	RGXFWIF_FWCCB_CMD_DEBUG_DUMP					= 106,	/*!< Requests an on-demand debug dump */
+	RGXFWIF_FWCCB_CMD_UPDATE_STATS					= 107,	/*!< Requests an on-demand update on process stats */
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+typedef enum
+{
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1,		/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,			/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES,				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumSHStores stat */
+    RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES				/*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+
+/* Firmware CCB command packet */
+
+typedef struct
+{
+    RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE		eElementToUpdate;			/*!< Element to update */
+    IMG_PID									pidOwner;					/*!< The pid of the process whose stats are being updated */
+    IMG_INT32								i32AdjustmentValue;			/*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_
+{
+	RGXFWIF_FWCCB_CMD_TYPE					eCmdType;	/*!< Command type */
+	union
+	{
+		RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA				sCmdZSBufferBacking;			/*!< Data for Z/S-Buffer on-demand (un)backing*/
+		RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA					sCmdFreeListGS;					/*!< Data for on-demand freelist grow/shrink */
+		RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA		sCmdFreeListsReconstruction;	/*!< Data for freelists reconstruction */
+		RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA				sCmdContextResetNotification;	/*!< Data for context reset notification */
+        RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA                 sCmdUpdateStatsData;            /*!< Data for updating process stats */
+	} RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct _RGXFWIF_SIGBUF_CTL_
+{
+	PRGXFWIF_SIGBUFFER		psBuffer;			/*!< Ptr to Signature Buffer memory */
+	IMG_UINT32				ui32LeftSizeInRegs;	/*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+/*!
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ *****************************************************************************/
+typedef struct _RGXFWIF_RUNTIME_CFG_
+{
+	IMG_UINT32				ui32ActivePMLatencyms;		/* APM latency in ms before signalling IDLE to the host */
+	IMG_BOOL				bActivePMLatencyPersistant;	/* If set, APM latency does not reset to system default each GPU power transition */
+	IMG_UINT32				ui32CoreClockSpeed;		/* Core clock speed, currently only used to calculate timer ticks */
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999)
+
+typedef struct _RGXFWIF_INIT_
+{
+	IMG_DEV_PHYADDR 		RGXFW_ALIGN sFaultPhysAddr;
+
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sPDSExecBase;
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sUSCExecBase;
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sResultDumpBase;
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sDPXControlStreamBase;
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sRTUHeapBase;
+
+	IMG_BOOL				bFirstTA;
+	IMG_BOOL				bFirstRender;
+	IMG_BOOL				bFrameworkAfterInit;
+	IMG_BOOL				bEnableHWPerf;
+	IMG_BOOL                bDisableFilterHWPerfCustomCounter;
+	IMG_UINT32				uiPowerSync;
+	IMG_UINT32				ui32FilterFlags;
+
+	/* Kernel CCBs */
+	PRGXFWIF_CCB_CTL		psKernelCCBCtl[RGXFWIF_DM_MAX];
+	PRGXFWIF_CCB			psKernelCCB[RGXFWIF_DM_MAX];
+
+	/* Firmware CCBs */
+	PRGXFWIF_CCB_CTL		psFirmwareCCBCtl[RGXFWIF_DM_MAX];
+	PRGXFWIF_CCB			psFirmwareCCB[RGXFWIF_DM_MAX];
+
+	RGXFWIF_DM				eDM[RGXFWIF_DM_MAX];
+
+	RGXFWIF_SIGBUF_CTL		asSigBufCtl[RGXFWIF_DM_MAX];
+
+	IMG_BOOL				bEnableLogging;
+	IMG_UINT32				ui32ConfigFlags;	/*!< Configuration flags from host */
+	IMG_UINT32				ui32BreakpointTemps;
+	IMG_UINT32				ui32BreakpointShareds;
+	IMG_UINT32				ui32HWRDebugDumpLimit;
+	struct
+	{
+		IMG_UINT64 uiBase;
+		IMG_UINT64 uiLen;
+		IMG_UINT64 uiXStride;
+	}                       RGXFW_ALIGN sBifTilingCfg[RGXFWIF_NUM_BIF_TILING_CONFIGS];
+
+	PRGXFWIF_RUNTIME_CFG		psRuntimeCfg;
+
+	PRGXFWIF_TRACEBUF		psTraceBufCtl;
+	PRGXFWIF_HWPERFINFO		psHWPerfInfoCtl;
+	IMG_UINT64	RGXFW_ALIGN ui64HWPerfFilter;
+
+	PRGXFWIF_HWRINFOBUF		psRGXFWIfHWRInfoBufCtl;
+	PRGXFWIF_GPU_UTIL_FWCB	psGpuUtilFWCbCtl;
+	PRGXFWIF_REG_CFG		psRegCfg;
+	PRGXFWIF_HWPERF_CTL			psHWPerfCtl;
+
+#if defined(RGXFW_ALIGNCHECKS)
+#if defined(RGX_FIRMWARE)
+	IMG_UINT32*				paui32AlignChecks;
+#else
+	RGXFWIF_DEV_VIRTADDR	paui32AlignChecks;
+#endif
+#endif
+
+	/* Core clock speed at FW boot time */ 
+	IMG_UINT32              ui32InitialCoreClockSpeed;
+	
+	/* APM latency in ms before signalling IDLE to the host */
+	IMG_UINT32				ui32ActivePMLatencyms;
+
+	/* Flag to be set by the Firmware after successful start */
+	IMG_BOOL				bFirmwareStarted;
+
+	IMG_UINT32				ui32FirmwareStartedTimeStamp;
+
+	IMG_UINT32				ui32JonesDisableMask;
+
+	/* Compatibility checks to be populated by the Firmware */
+	RGXFWIF_COMPCHECKS		sRGXCompChecks;
+
+	RGXFWIF_DMA_ADDR		sCorememDataStore;
+
+#if defined(RGX_FEATURE_SLC_VIVT)
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sSLC3FenceDevVAddr;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_INIT;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+typedef struct _RGXFWIF_CMD_PRIORITY_
+{
+	IMG_UINT32				ui32Priority;
+} RGXFWIF_CMD_PRIORITY;
+
+/*!
+ ******************************************************************************
+ * RGXFW Unittests declarations
+ *****************************************************************************/
+typedef struct _RGXFW_UNITTEST2_
+{
+	/* Irq events */
+	IMG_UINT32	ui32IrqKicksDM[RGXFWIF_DM_MAX_MTS];
+	IMG_UINT32	ui32IrqKicksBg;
+	IMG_UINT32	ui32IrqKicksTimer;
+
+	/* Bg events */
+	IMG_UINT32	ui32BgKicksDM[RGXFWIF_DM_MAX_MTS];
+	IMG_UINT32	ui32BgKicksCounted;
+
+} RGXFW_UNITTEST2;
+
+/*!
+ ******************************************************************************
+ * RGXFW_UNITTESTS declaration
+ *****************************************************************************/
+#define RGXFW_UNITTEST_FWPING		(0x1)
+#define RGXFW_UNITTEST_FWPONG		(0x2)
+
+#define RGXFW_UNITTEST_IS_BGKICK(DM)	((DM) & 0x1)
+
+typedef struct _RGXFW_UNITTESTS_
+{
+	IMG_UINT32	ui32Status;
+
+	RGXFW_UNITTEST2 sUnitTest2;
+
+} RGXFW_UNITTESTS;
+
+#endif /*  __RGX_FWIF_KM_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_resetframework.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_resetframework.h
new file mode 100644
index 0000000..25ee674
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_resetframework.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File			rgx_fwif_resetframework.h
+@Title         	Post-reset work-around framework FW interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_FWIF_RESETFRAMEWORK_H)
+#define _RGX_FWIF_RESETFRAMEWORK_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+typedef struct _RGXFWIF_RF_REGISTERS_
+{
+	IMG_UINT64  uCDMReg_CDM_CTRL_STREAM_BASE;
+} RGXFWIF_RF_REGISTERS;
+
+#define RGXFWIF_RF_FLAG_ENABLE 0x00000001 /*!< enables the reset framework in the firmware */
+
+typedef struct _RGXFWIF_RF_CMD_
+{
+	IMG_UINT32           ui32Flags;
+
+	/* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+	RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters;
+
+} RGXFWIF_RF_CMD;
+
+/* to opaquely allocate and copy in the kernel */
+#define RGXFWIF_RF_CMD_SIZE  sizeof(RGXFWIF_RF_CMD)
+
+#endif /* _RGX_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_shared.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_shared.h
new file mode 100644
index 0000000..96a9688
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_fwif_shared.h
@@ -0,0 +1,528 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware interface structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware interface structures shared by both host client
+                and host server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SHARED_H__)
+#define __RGX_FWIF_SHARED_H__
+
+#include "img_types.h"
+#include "rgx_common.h"
+#include "devicemem_typedefs.h"
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKMIF_DEVICE_STATE_ZERO_FREELIST			(0x1 << 0)		/*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKMIF_DEVICE_STATE_FTRACE_EN				(0x1 << 1)		/*!< Used to enable device FTrace thread to consume HWPerf data */
+#define RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN	(0x1 << 2)		/*!< Used to disable the Devices Watchdog logging */
+
+/* Required memory alignment for 64-bit variables accessible by Meta 
+  (the gcc meta aligns 64-bit vars to 64-bit; therefore, mem shared between
+   the host and meta that contains 64-bit vars has to maintain this aligment)*/
+#define RGXFWIF_FWALLOC_ALIGN	sizeof(IMG_UINT64)
+
+typedef struct _RGXFWIF_DEV_VIRTADDR_
+{
+	IMG_UINT32	ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct _RGXFWIF_DMA_ADDR_
+{
+	IMG_DEV_VIRTADDR        RGXFW_ALIGN psDevVirtAddr;
+
+#if defined(RGX_FIRMWARE)
+	IMG_PBYTE               pbyFWAddr;
+#else
+	RGXFWIF_DEV_VIRTADDR    pbyFWAddr;
+#endif
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8	RGXFWIF_CCCB;
+
+#if defined(RGX_FIRMWARE)
+/* Compiling the actual firmware - use a fully typed pointer */
+typedef RGXFWIF_CCCB					*PRGXFWIF_CCCB;
+typedef struct _RGXFWIF_CCCB_CTL_		*PRGXFWIF_CCCB_CTL;
+typedef struct _RGXFWIF_RENDER_TARGET_	*PRGXFWIF_RENDER_TARGET;
+typedef struct _RGXFWIF_HWRTDATA_		*PRGXFWIF_HWRTDATA;
+typedef struct _RGXFWIF_FREELIST_		*PRGXFWIF_FREELIST;
+typedef struct _RGXFWIF_RAY_FRAME_DATA_	*PRGXFWIF_RAY_FRAME_DATA;
+typedef struct _RGXFWIF_RPM_FREELIST_	*PRGXFWIF_RPM_FREELIST;
+typedef struct _RGXFWIF_RTA_CTL_		*PRGXFWIF_RTA_CTL;
+typedef IMG_UINT32						*PRGXFWIF_UFO_ADDR;
+typedef struct _RGXFWIF_CLEANUP_CTL_	*PRGXFWIF_CLEANUP_CTL;
+#else
+/* Compiling the host driver - use a firmware device virtual pointer */
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_RENDER_TARGET;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_RAY_FRAME_DATA;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_RPM_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_RTA_CTL;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR	PRGXFWIF_CLEANUP_CTL;
+#endif /* RGX_FIRMWARE */
+
+
+/* FIXME PRGXFWIF_UFO_ADDR and RGXFWIF_UFO should move back into rgx_fwif_client.h */
+typedef struct _RGXFWIF_UFO_
+{
+	PRGXFWIF_UFO_ADDR	puiAddrUFO;
+	IMG_UINT32			ui32Value;
+} RGXFWIF_UFO;
+
+
+/*!
+	Last reset reason for a context.
+*/
+typedef enum _RGXFWIF_CONTEXT_RESET_REASON_
+{
+	RGXFWIF_CONTEXT_RESET_REASON_NONE					= 0,	/*!< No reset reason recorded */
+	RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP			= 1,	/*!< Caused a reset due to locking up */
+	RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP		= 2,	/*!< Affected by another context locking up */
+	RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING		= 3,	/*!< Overran the global deadline */
+	RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING	= 4,	/*!< Affected by another context overrunning */
+} RGXFWIF_CONTEXT_RESET_REASON;
+
+
+/*!
+	HWRTData state the render is in
+*/
+typedef enum
+{
+	RGXFWIF_RTDATA_STATE_NONE = 0,
+	RGXFWIF_RTDATA_STATE_KICKTA,
+	RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+	RGXFWIF_RTDATA_STATE_TAFINISHED,
+	RGXFWIF_RTDATA_STATE_KICK3D,
+	RGXFWIF_RTDATA_STATE_3DFINISHED,
+	RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+	RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+	RGXFWIF_RTDATA_STATE_HWR					/*!< In case of HWR, we can't set the RTDATA state to NONE,
+													 as this will cause any TA to become a first TA.
+													 To ensure all related TA's are skipped, we use the HWR state */
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct _RGXFWIF_CLEANUP_CTL_
+{
+	IMG_UINT32				ui32SubmittedCommands;	/*!< Number of commands received by the FW */
+	IMG_UINT32				ui32ExecutedCommands;	/*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCCB_CTL_
+{
+	IMG_UINT32				ui32WriteOffset;	/*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+	IMG_UINT32				ui32ReadOffset;		/*!< read offset into array of commands */
+	IMG_UINT32				ui32DepOffset;		/*!< Dependency offset */
+	IMG_UINT32				ui32WrapMask;		/*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+typedef enum 
+{
+	RGXFW_LOCAL_FREELIST = 0,
+	RGXFW_GLOBAL_FREELIST = 1,
+#if defined(SUPPORT_MMU_FREELIST)
+	RGXFW_MMU_FREELIST = 2,
+#endif
+	RGXFW_MAX_FREELISTS
+} RGXFW_FREELIST_TYPE;
+
+typedef struct _RGXFWIF_RTA_CTL_
+{
+	IMG_UINT32				ui32RenderTargetIndex;		//Render number
+	IMG_UINT32				ui32CurrentRenderTarget;	//index in RTA
+	IMG_UINT32				ui32ActiveRenderTargets;	//total active RTs
+	IMG_UINT32				ui32CumulActiveRenderTargets;   //total active RTs from the first TA kick, for OOM
+#if defined(RGX_FIRMWARE)
+	IMG_UINT32				*paui32ValidRenderTargets;	//Array of valid RT indices
+	IMG_UINT32              		*paui32NumRenders;  //Array of number of occurred partial renders per render target
+#else
+	RGXFWIF_DEV_VIRTADDR			paui32ValidRenderTargets;  //Array of valid RT indices
+	RGXFWIF_DEV_VIRTADDR    		paui32NumRenders;  //Array of number of occurred partial renders per render target
+#endif
+	IMG_UINT16              		ui16MaxRTs;   //Number of render targets in the array
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+typedef struct _RGXFWIF_FREELIST_
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN psFreeListDevVAddr;
+	IMG_UINT64			RGXFW_ALIGN ui64CurrentDevVAddr;
+	IMG_UINT32			ui32CurrentStackTop;
+	IMG_UINT32			ui32MaxPages;
+	IMG_UINT32			ui32GrowPages;
+	IMG_UINT32			ui32CurrentPages;
+	IMG_UINT32			ui32AllocatedPageCount;
+	IMG_UINT32			ui32AllocatedMMUPageCount;
+	IMG_UINT32			ui32HWRCounter;
+	IMG_UINT32			ui32FreeListID;
+	IMG_BOOL			bGrowPending;
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+typedef enum 
+{
+	RGXFW_RPM_SHF_FREELIST = 0,
+	RGXFW_RPM_SHG_FREELIST = 1,
+} RGXFW_RPM_FREELIST_TYPE;
+
+#define		RGXFW_MAX_RPM_FREELISTS		(2)
+
+typedef struct _RGXFWIF_RPM_FREELIST_
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN sFreeListDevVAddr;		/*!< device base address */
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN sRPMPageListDevVAddr;	/*!< device base address for RPM pages in-use */
+	IMG_UINT32			sSyncAddr;				/*!< Free list sync object for OOM event */
+	IMG_UINT32			ui32MaxPages;			/*!< maximum size */
+	IMG_UINT32			ui32GrowPages;			/*!< grow size = maximum pages which may be added later */
+	IMG_UINT32			ui32CurrentPages;		/*!< number of pages */
+	IMG_UINT32			ui32ReadOffset;			/*!< head: where to read alloc'd pages */
+	IMG_UINT32			ui32WriteOffset;		/*!< tail: where to write de-alloc'd pages */
+	IMG_BOOL			bReadToggle;			/*!< toggle bit for circular buffer */
+	IMG_BOOL			bWriteToggle;
+	IMG_UINT32			ui32AllocatedPageCount; /*!< TODO: not sure yet if this is useful */
+	IMG_UINT32			ui32HWRCounter;
+	IMG_UINT32			ui32FreeListID;			/*!< unique ID per device, e.g. rolling counter */
+	IMG_BOOL			bGrowPending;			/*!< FW is waiting for host to grow the freelist */
+} UNCACHED_ALIGN RGXFWIF_RPM_FREELIST;
+
+typedef struct _RGXFWIF_RAY_FRAME_DATA_
+{
+	/* state manager for shared state between vertex and ray processing */
+	
+	/* TODO: not sure if this will be useful, link it here for now */
+	IMG_UINT32		sRPMFreeLists[RGXFW_MAX_RPM_FREELISTS];
+	
+	IMG_BOOL		bAbortOccurred;
+	
+	/* cleanup state.
+	 * Both the SHG and RTU must complete or discard any outstanding work
+	 * which references this frame data.
+	 */
+	RGXFWIF_CLEANUP_CTL		sCleanupStateSHG;
+	RGXFWIF_CLEANUP_CTL		sCleanupStateRTU;
+	IMG_UINT32				ui32CleanupStatus;
+#define HWFRAMEDATA_SHG_CLEAN	(1 << 0)
+#define HWFRAMEDATA_RTU_CLEAN	(1 << 1)
+
+} UNCACHED_ALIGN RGXFWIF_RAY_FRAME_DATA;
+#endif
+
+typedef struct _RGXFWIF_RENDER_TARGET_
+{
+	IMG_DEV_VIRTADDR	RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap Data Store */
+	IMG_BOOL			bTACachesNeedZeroing;			  /*!< Whether RTC and TPC caches (on mem) need to be zeroed on next first TA kick */
+
+} UNCACHED_ALIGN RGXFWIF_RENDER_TARGET;
+
+
+typedef struct _RGXFWIF_HWRTDATA_ 
+{
+	RGXFWIF_RTDATA_STATE	eState;
+
+	IMG_UINT32				ui32NumPartialRenders; /*!< Number of partial renders. Used to setup ZLS bits correctly */
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */
+
+#if defined(RGX_FEATURE_SCALABLE_TE_ARCH)
+	IMG_UINT64				RGXFW_ALIGN ui64VCECatBase[4];
+	IMG_UINT64				RGXFW_ALIGN ui64VCELastCatBase[4];
+	IMG_UINT64				RGXFW_ALIGN ui64TECatBase[4];
+	IMG_UINT64				RGXFW_ALIGN ui64TELastCatBase[4];
+#else
+	IMG_UINT64				RGXFW_ALIGN ui64VCECatBase;
+	IMG_UINT64				RGXFW_ALIGN ui64VCELastCatBase;
+	IMG_UINT64				RGXFW_ALIGN ui64TECatBase;
+	IMG_UINT64				RGXFW_ALIGN ui64TELastCatBase;
+#endif
+	IMG_UINT64				RGXFW_ALIGN ui64AlistCatBase;
+	IMG_UINT64				RGXFW_ALIGN ui64AlistLastCatBase;
+
+#if defined(SUPPORT_VFP)
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sVFPPageTableAddr;
+#endif
+	IMG_UINT64				RGXFW_ALIGN ui64PMAListStackPointer;
+	IMG_UINT32				ui32PMMListStackPointer;
+
+	PRGXFWIF_FREELIST 		RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; 
+	IMG_UINT32				aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+	
+	PRGXFWIF_RENDER_TARGET	psParentRenderTarget;
+
+	RGXFWIF_CLEANUP_CTL		sTACleanupState;
+	RGXFWIF_CLEANUP_CTL		s3DCleanupState;
+	IMG_UINT32				ui32CleanupStatus;
+#define HWRTDATA_TA_CLEAN	(1 << 0)
+#define HWRTDATA_3D_CLEAN	(1 << 1)
+
+	PRGXFWIF_RTA_CTL		psRTACtl;
+
+	IMG_UINT32				bHasLastTA;
+	IMG_BOOL				bPartialRendered;
+
+	IMG_UINT32				ui32PPPScreen;
+	IMG_UINT32				ui32PPPGridOffset;
+	IMG_UINT64				RGXFW_ALIGN ui64PPPMultiSampleCtl;
+	IMG_UINT32				ui32TPCStride;
+	IMG_DEV_VIRTADDR		RGXFW_ALIGN sTailPtrsDevVAddr;
+	IMG_UINT32				ui32TPCSize;
+	IMG_UINT32				ui32TEScreen;
+	IMG_UINT32				ui32MTileStride;
+	IMG_UINT32				ui32TEAA;
+	IMG_UINT32				ui32TEMTILE1;
+	IMG_UINT32				ui32TEMTILE2;
+	IMG_UINT32				ui32ISPMergeLowerX;
+	IMG_UINT32				ui32ISPMergeLowerY;
+	IMG_UINT32				ui32ISPMergeUpperX;
+	IMG_UINT32				ui32ISPMergeUpperY;
+	IMG_UINT32				ui32ISPMergeScaleX;
+	IMG_UINT32				ui32ISPMergeScaleY;
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+typedef enum
+{
+	RGXFWIF_ZSBUFFER_UNBACKED = 0,
+	RGXFWIF_ZSBUFFER_BACKED,
+	RGXFWIF_ZSBUFFER_BACKING_PENDING,
+	RGXFWIF_ZSBUFFER_UNBACKING_PENDING,
+}RGXFWIF_ZSBUFFER_STATE;
+
+typedef struct _RGXFWIF_ZSBUFFER_
+{
+	IMG_UINT32				ui32ZSBufferID;				/*!< Buffer ID*/
+	IMG_BOOL				bOnDemand;					/*!< Needs On-demand ZS Buffer allocation */
+	RGXFWIF_ZSBUFFER_STATE	eState;						/*!< Z/S-Buffer state */
+	RGXFWIF_CLEANUP_CTL		sCleanupState;				/*!< Cleanup state */
+} UNCACHED_ALIGN RGXFWIF_FWZSBUFFER;
+
+/* Number of BIF tiling configurations / heaps */
+#define RGXFWIF_NUM_BIF_TILING_CONFIGS 4
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+/* WARNING: RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX can be increased only and
+		always equal to (N * sizeof(IMG_UINT32) - 1) */
+#define RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX 3 /* WARNING: Do not change this macro without changing 
+			accesses from dword to byte in function rgx_bvnc_packed() */
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change,
+	following define should be increased by 1 to indicate to compatibility logic, 
+	that layout has changed */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 1
+
+typedef struct _RGXFWIF_COMPCHECKS_BVNC_
+{
+	IMG_UINT32	ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+	IMG_UINT32  ui32VLenMax;
+	IMG_UINT32	ui32BNC;
+	IMG_CHAR	aszV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX + 1];
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) RGXFWIF_COMPCHECKS_BVNC name = { RGXFWIF_COMPCHECKS_LAYOUT_VERSION, RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX }
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) do { (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+												(name).ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX; } while (0)
+
+typedef struct _RGXFWIF_COMPCHECKS_
+{
+	RGXFWIF_COMPCHECKS_BVNC		sHWBVNC;			/*!< hardware BNC (from the RGX registers) */
+	RGXFWIF_COMPCHECKS_BVNC		sFWBVNC;			/*!< firmware BNC */
+	IMG_UINT32					ui32METAVersion;
+	IMG_UINT32					ui32DDKVersion;		/*!< software DDK version */
+	IMG_UINT32					ui32DDKBuild;		/*!< software DDK build no. */
+	IMG_UINT32					ui32BuildOptions;	/*!< build options bit-field */
+	IMG_BOOL					bUpdated;			/*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+	((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+	(Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#define RESERVED_CCB_SPACE 		(sizeof(IMG_UINT32))
+
+
+/* Defines relating to the per-context CCBs */
+#define RGX_CCB_SIZE_LOG2			(16) /* 64kB */
+#define RGX_CCB_ALLOCGRAN			(64)
+#define RGX_CCB_TYPE_TASK			(1 << 31)
+#define RGX_CCB_FWALLOC_ALIGN(size)	(((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1))
+
+/*!
+ ******************************************************************************
+ * Client CCB commands for RGX
+ *****************************************************************************/
+typedef enum _RGXFWIF_CCB_CMD_TYPE_
+{
+	RGXFWIF_CCB_CMD_TYPE_TA			= 201 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_3D			= 202 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_CDM		= 203 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_TQ_3D		= 204 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_TQ_2D		= 205 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_3D_PR		= 206 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_NULL		= 207 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_SHG		= 208 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_RTU		= 209 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_RTU_FC		  = 210 | RGX_CCB_TYPE_TASK,
+	RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP = 211 | RGX_CCB_TYPE_TASK,
+
+/* Leave a gap between CCB specific commands and generic commands */
+	RGXFWIF_CCB_CMD_TYPE_FENCE          = 212,
+	RGXFWIF_CCB_CMD_TYPE_UPDATE         = 213,
+	RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE     = 214,
+	RGXFWIF_CCB_CMD_TYPE_FENCE_PR       = 215,
+	RGXFWIF_CCB_CMD_TYPE_PRIORITY       = 216,
+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+   padding code with the CCB wrap upsets the FW if we don't have the task type
+   bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+*/
+	RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP = 217,
+	RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE = 218,
+	RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE = 219,
+	
+	RGXFWIF_CCB_CMD_TYPE_PADDING	= 220,
+} RGXFWIF_CCB_CMD_TYPE;
+
+typedef struct _RGXFWIF_CCB_CMD_HEADER_
+{
+	RGXFWIF_CCB_CMD_TYPE	eCmdType;
+	IMG_UINT32				ui32CmdSize;
+} RGXFWIF_CCB_CMD_HEADER;
+
+typedef enum _RGXFWIF_PWR_EVT_
+{
+	RGXFWIF_PWR_EVT_PWR_ON,			/* Sidekick power event */
+	RGXFWIF_PWR_EVT_DUST_CHANGE,		/* Rascal / dust power event */
+	RGXFWIF_PWR_EVT_ALL			/* Applies to all power events. Keep as last element */
+} RGXFWIF_PWR_EVT;
+
+typedef struct _RGXFWIF_REG_CFG_REC_
+{
+	IMG_UINT64		ui64Addr;
+	IMG_UINT64		ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+
+typedef struct _RGXFWIF_TIME_CORR_
+{
+	IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+	IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+	IMG_UINT32             ui32CoreClockSpeed;
+
+	/* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+	 * where the deltas are relative to the timestamps above:
+	 * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+	IMG_UINT32             ui32CRDeltaToOSDeltaKNs;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+typedef struct _RGXFWIF_TIMESTAMP_
+{
+	RGXFWIF_TIME_CORR      sTimeCorr;
+	IMG_UINT64 RGXFW_ALIGN ui64Timestamp;
+} UNCACHED_ALIGN RGXFWIF_TIMESTAMP;
+
+
+/* These macros are used to help converting FW timestamps to the Host time domain.
+ * On the FW the RGX_CR_TIMER counter is used to keep track of the time;
+ * it increments by 1 every 256 GPU clock ticks, so the general formula
+ * to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ *   otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ *             deltaCR * 256                                   256 * scale
+ *  deltaOS = --------------- * scale = deltaCR * K    [ K = --------------- ]
+ *             GPUclockspeed                                  GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a costant), and it's relative to
+ * the base OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and
+ * periodic frequency calibration (executed every few seconds if the FW is
+ * doing some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT  (20)
+
+#define RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(clockfreq, remainder) \
+	OSDivide64((256000000ULL << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT), \
+	           ((clockfreq) + 500) / 1000, \
+	           &(remainder))
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+	( ((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+#define RGXFWIF_GET_DELTA_OSTIME_US(deltacr, clockfreq, remainder) \
+	OSDivide64r64((deltacr) * 256000, ((clockfreq) + 500) / 1000, &(remainder))
+
+/* Use this macro to get a more realistic GPU core clock speed than
+ * the one given by the upper layers (used when doing GPU frequency
+ * calibration)
+ */
+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \
+	OSDivide64((deltacr_us) * 256000000, (deltaos_us), &(remainder))
+
+#endif /*  __RGX_FWIF_SHARED_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_pdump_panics.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_pdump_panics.h
new file mode 100644
index 0000000..6bbe5b0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_pdump_panics.h
@@ -0,0 +1,67 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX PDump panic definitions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX PDump panic definitions header
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_PDUMP_PANICS_H_)
+#define RGX_PDUMP_PANICS_H_
+
+
+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of a
+ * RGX PDump panic in a PDump script. */
+typedef enum
+{
+	RGX_PDUMP_PANIC_UNDEFINED = 0,
+
+	/* These panics occur when test parameters and driver configuration
+	 * enable features that require the firmware and host driver to
+	 * communicate. Such features are not supported with off-line playback.
+	 */
+	RGX_PDUMP_PANIC_ZSBUFFER_BACKING         = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+	RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING       = 102, /*!< Requests ZSBuffer to be unbacked */
+	RGX_PDUMP_PANIC_FREELIST_GROW            = 103, /*!< Requests an on-demand freelist grow/shrink */
+	RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+} RGX_PDUMP_PANIC;
+ 
+
+#endif /* RGX_PDUMP_PANICS_H_ */
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_tq_shared.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_tq_shared.h
new file mode 100644
index 0000000..bd3460c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgx_tq_shared.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX transfer queue shared
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared definitions between client and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGX_TQ_SHARED_H__
+#define __RGX_TQ_SHARED_H__
+
+#define TQ_MAX_PREPARES_PER_SUBMIT		16
+
+#define TQ_PREP_FLAGS_COMMAND_3D		0x0
+#define TQ_PREP_FLAGS_COMMAND_2D		0x1
+#define TQ_PREP_FLAGS_COMMAND_MASK		(0xf)
+#define TQ_PREP_FLAGS_COMMAND_SHIFT		0
+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS	(1 << 4)
+#define TQ_PREP_FLAGS_START				(1 << 5)
+#define TQ_PREP_FLAGS_END				(1 << 6)
+
+#define TQ_PREP_FLAGS_COMMAND_SET(m) \
+	((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
+
+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \
+	(((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT)  == TQ_PREP_FLAGS_COMMAND_##n)
+
+#endif /* __RGX_TQ_SHARED_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgxapi_km.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgxapi_km.h
new file mode 100644
index 0000000..3079b44
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/rgxapi_km.h
@@ -0,0 +1,192 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX API Header kernel mode
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Exported RGX API details
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXAPI_KM_H__
+#define __RGXAPI_KM_H__
+
+#if defined(SUPPORT_SHARED_SLC)
+/*!
+******************************************************************************
+
+ @Function	RGXInitSLC
+
+ @Description Init the SLC after a power up. It is required to call this 
+              function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't
+			  be called.
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle);
+#endif
+
+#if defined(SUPPORT_KERNEL_HWPERF)
+
+#include "rgx_hwperf_km.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function      RGXHWPerfConnect
+@Description   Obtain a connection object to the HWPerf device
+@Output        phDevData      Address of a handle to a connection object
+@Return        PVRSRV_ERROR:  for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(
+		IMG_HANDLE* phDevData);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfDisconnect
+@Description    Disconnect from the HWPerf device
+@Input          hSrvHandle    Handle to connection object as returned from
+                                RGXHWPerfConnect()
+@Return         PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(
+		IMG_HANDLE hDevData);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfControl
+@Description    Enable or disable the generation of RGX HWPerf event packets.
+                 See RGXCtrlHWPerf().
+@Input          hDevData         Handle to connection object
+@Input          bToggle          Switch to toggle or apply mask.
+@Input          ui64Mask         Mask of events to control.
+@Return         PVRSRV_ERROR:    for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl(
+		IMG_HANDLE  hDevData,
+		IMG_BOOL    bToggle,
+		IMG_UINT64  ui64Mask);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfConfigureAndEnableCounters
+@Description    Enable and configure the performance counter block for
+                 one or more device layout modules.
+                 See RGXConfigureAndEnableHWPerfCounters().
+@Input          hDevData         Handle to connection object
+@Input          ui32NumBlocks    Number of elements in the array
+@Input          asBlockConfigs   Address of the array of configuration blocks
+@Return         PVRSRV_ERROR:    for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCounters(
+		IMG_HANDLE                 hDevData,
+		IMG_UINT32                 ui32NumBlocks,
+		RGX_HWPERF_CONFIG_CNTBLK*  asBlockConfigs);
+
+
+/**************************************************************************/ /*!
+@Function       RGXDisableHWPerfCounters
+@Description    Disable the performance counter block for one or more
+                 device layout modules. See RGXDisableHWPerfCounters().
+@Input          hDevData        Handle to connection/device object
+@Input          ui32NumBlocks   Number of elements in the array
+@Input          aeBlockIDs      An array of bytes with values taken from
+                                 the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters(
+		IMG_HANDLE   hDevData,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*   aeBlockIDs);
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single 
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client’s
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfAcquireData
+@Description    When there is data available to read this call returns with
+                 the address and length of the data buffer the
+                 client can safely read. This buffer may contain one or more
+                 event packets. If no data is available then this call 
+				 returns OK and sets *puiBufLen to 0 on exit.
+				 Clients must pair this call with a ReleaseData call.
+@Input          hDevData        Handle to connection/device object
+@Output         ppBuf           Address of a pointer to a byte buffer. On exit
+                                 it contains the address of buffer to read from
+@Output         puiBufLen       Pointer to an integer. On exit it is the size
+                                 of the data to read from the buffer
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireData(
+		IMG_HANDLE  hDevData,
+		IMG_PBYTE*  ppBuf,
+		IMG_UINT32* pui32BufLen);
+
+
+/**************************************************************************/ /*!
+@Function       RGXHWPerfReleaseData
+@Description    Called after client has read the event data out of the buffer
+                 retrieved from the Acquire Data call to release resources.
+@Input          hDevData        Handle to connection/device object
+@Return         PVRSRV_ERROR:   for system error codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseData(
+		IMG_HANDLE hDevData);
+
+
+#endif /* SUPPORT_KERNEL_HWPERF */
+
+
+#endif /* __RGXAPI_KM_H__ */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/allocmem.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/allocmem.h
new file mode 100644
index 0000000..1abd355
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/allocmem.h
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title          memory allocation header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory-Allocation API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __ALLOCMEM_H__
+#define __ALLOCMEM_H__
+
+#include "img_types.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+IMG_PVOID OSAllocMem(IMG_UINT32 ui32Size);
+
+IMG_PVOID OSAllocMemstatMem(IMG_UINT32 ui32Size);
+
+IMG_PVOID OSAllocZMem(IMG_UINT32 ui32Size);
+
+IMG_PVOID OSAllocMemstatZMem(IMG_UINT32 ui32Size);
+
+IMG_VOID OSFreeMem(IMG_PVOID pvCpuVAddr);
+
+IMG_VOID OSFreeMemstatMem(IMG_PVOID pvCpuVAddr);
+
+#define OSFREEMEM(_ptr) do \
+	{ OSFreeMem((_ptr)); \
+		(_ptr) = (IMG_VOID*)0; \
+		MSC_SUPPRESS_4127\
+	} while (0)
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __ALLOCMEM_H__ */
+
+/******************************************************************************
+ End of file (allocmem.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/hash.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/hash.h
new file mode 100644
index 0000000..abacd69
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/hash.h
@@ -0,0 +1,229 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements simple self scaling hash tables.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+/* include5/ */
+#include "img_types.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Keys passed to the comparsion function are only guaranteed to
+ * be aligned on an IMG_UINTPTR_T boundary. 
+ */
+typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+	IMG_UINTPTR_T k,
+	IMG_UINTPTR_T v
+);
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of
+                IMG_UINTPTR_T arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table. 
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of IMG_UINTPTR_T arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey1        Pointer to first hash key to compare.
+@Input          pKey2        Pointer to second hash key to compare.
+@Return         IMG_TRUE  - the keys match.
+                IMG_FALSE - the keys don't match.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied
+                key size, and the supllied hash and key comparsion
+                functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the number
+                              of entries in the hash table, not its size in
+                              bytes.
+@Input          uKeySize      The size of the key, in bytes.
+@Input          pfnHashFunc   Pointer to hash function.
+@Input          pfnKeyComp    Pointer to key comparsion function.
+@Return         IMG_NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key
+                consisting of a single IMG_UINTPTR_T, and using
+                the default hash and key comparison functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the
+                              number of entries in the hash table, not its size
+                              in bytes.
+@Return         IMG_NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create.  All entries in the table must have been
+                removed before calling this function.
+@Input          pHash         Hash table
+*/ /**************************************************************************/
+IMG_VOID HASH_Delete (HASH_TABLE *pHash);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created
+                with HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to the key.
+@Input          v             The value associated with the key.
+@Return         IMG_TRUE  - success
+                IMG_FALSE  - failure
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash         The hash table.
+@Input          k             The key value.
+@Input          v             The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated
+                with the key.
+*/ /**************************************************************************/
+IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created
+                with HASH_Create.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated
+                with the key.
+*/ /**************************************************************************/
+IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated with
+                the key.
+*/ /**************************************************************************/
+IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with
+                HASH_Create.
+@Input          pHash         The hash table.
+@Input          pKey          Pointer to key.
+@Return         0 if the key is missing, or the value associated with
+                the key.
+*/ /**************************************************************************/
+IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table
+@Input          pHash			Hash table to iterate
+@Input          pfnCallback		Callback to call with the key and data for
+								each entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback);
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    Dump out some information about a hash table.
+@Input          pHash         The hash table.
+*/ /**************************************************************************/
+IMG_VOID HASH_Dump (HASH_TABLE *pHash);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _HASH_H_ */
+
+/******************************************************************************
+ End of file (hash.h)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/lock.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/lock.h
new file mode 100644
index 0000000..6353c28
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/lock.h
@@ -0,0 +1,169 @@
+/*************************************************************************/ /*!
+@File           lock.h
+@Title          Locking interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal locking interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#include "lock_types.h"
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include "allocmem.h"
+#include <asm/atomic.h>
+
+#define OSLockCreate(phLock, eLockType) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(phLock) = OSAllocMem(sizeof(struct mutex)); \
+	if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;})
+
+#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;})
+#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#define OSLockIsLocked(hLock) ({IMG_BOOL b = ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE); b;})
+#define OSTryLockAcquire(hLock) ({IMG_BOOL b = ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE); b;})
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+#define OSAtomicRead(pCounter)	({IMG_INT rv = atomic_read(pCounter); rv;})
+#define OSAtomicWrite(pCounter, i)	({ atomic_set(pCounter, i); })
+
+/* The following atomic operations, in addition to being SMP-safe, also
+   imply a memory barrier around the operation  */
+#define OSAtomicIncrement(pCounter)	({IMG_INT rv = atomic_inc_return(pCounter); rv;})
+#define OSAtomicDecrement(pCounter) ({IMG_INT rv = atomic_dec_return(pCounter); rv;})
+#define OSAtomicCompareExchange(pCounter, oldv, newv) ({IMG_INT rv = atomic_cmpxchg(pCounter,oldv,newv); rv;})
+
+#define OSAtomicAdd(pCounter, incr) ({IMG_INT rv = atomic_add_return(incr,pCounter); rv;})
+#define OSAtomicAddUnless(pCounter, incr, test) ({IMG_INT rv = __atomic_add_unless(pCounter,incr,test); rv;})
+
+#define OSAtomicSubtract(pCounter, incr) ({IMG_INT rv = atomic_add_return(-(incr),pCounter); rv;})
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+IMG_INTERNAL
+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock, LOCK_TYPE eLockType);
+
+IMG_INTERNAL
+IMG_VOID OSLockDestroy(POS_LOCK hLock);
+
+IMG_INTERNAL
+IMG_VOID OSLockAcquire(POS_LOCK hLock);
+
+/* Nested notation isn't used in UM or other OS's */
+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock))
+
+IMG_INTERNAL
+IMG_VOID OSLockRelease(POS_LOCK hLock);
+
+IMG_INTERNAL
+IMG_BOOL OSLockIsLocked(POS_LOCK hLock);
+
+#if defined(LINUX)
+
+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */
+#define OSAtomicRead(pCounter) ({IMG_INT rv =  *(volatile int *)&(pCounter)->counter; rv;}) 
+#define OSAtomicWrite(pCounter, i) ({(pCounter)->counter = (IMG_INT) i;}) 
+#define OSAtomicIncrement(pCounter) ({IMG_INT rv = __sync_add_and_fetch((&(pCounter)->counter), 1); rv;}) 
+#define OSAtomicDecrement(pCounter) ({IMG_INT rv = __sync_sub_and_fetch((&(pCounter)->counter), 1); rv;}) 
+#define OSAtomicCompareExchange(pCounter, oldv, newv)  \
+	({IMG_INT rv = __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv); rv;})
+	
+#define OSAtomicAdd(pCounter, incr) ({IMG_INT rv = __sync_add_and_fetch((&(pCounter)->counter), incr); rv;}) 
+#define OSAtomicAddUnless(pCounter, incr, test) ({ \
+	int c; int old; \
+	c = OSAtomicRead(pCounter); \
+    while (1) { \
+		if (c == (test)) break; \
+		old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \
+		if (old == c) break; \
+		c = old; \
+	} c; })
+
+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr))	
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+IMG_INTERNAL
+IMG_INT OSAtomicRead(ATOMIC_T *pCounter);
+
+IMG_INTERNAL
+IMG_VOID OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT v);
+
+/* For the following atomic operations, in addition to being SMP-safe, 
+   _should_ also  have a memory barrier around each operation  */
+IMG_INTERNAL
+IMG_INT OSAtomicIncrement(ATOMIC_T *pCounter);
+
+IMG_INTERNAL
+IMG_INT OSAtomicDecrement(ATOMIC_T *pCounter);
+
+IMG_INTERNAL
+IMG_INT OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT v);
+
+IMG_INTERNAL
+IMG_INT OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT v, IMG_INT t);
+
+IMG_INTERNAL
+IMG_INT OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT v);
+
+IMG_INTERNAL
+IMG_INT OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT v, IMG_INT t);
+
+IMG_INTERNAL
+IMG_INT OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT oldv, IMG_INT newv);
+
+#endif /* defined(LINUX) */
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+#endif	/* _LOCK_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/ra.h b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/ra.h
new file mode 100644
index 0000000..506926d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/include/shared/ra.h
@@ -0,0 +1,203 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RA_H_
+#define _RA_H_
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/** Resource arena.
+ *  struct _RA_ARENA_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ RA_ARENA;			//PRQA S 3313
+
+/*
+ * Per-Arena handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data.  It is given it upon
+ * RA_Create, and promises to pass it to calls to the ImportAlloc and
+ * ImportFree callbacks
+ */
+typedef IMG_HANDLE RA_PERARENA_HANDLE;
+/*
+ * Per-Import handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data.  It is given it on a
+ * per-import basis, either the "initial" import at RA_Create time, or
+ * further imports via the ImportAlloc callback.  It sends it back via
+ * the ImportFree callback, and also provides it in answer to any
+ * RA_Alloc request to signify from which "import" the allocation came
+ */
+typedef IMG_HANDLE RA_PERISPAN_HANDLE;
+
+typedef IMG_UINT64 RA_BASE_T;
+typedef IMG_UINT32 RA_LOG2QUANTUM_T;
+typedef IMG_UINT64 RA_LENGTH_T;
+
+/* Lock classes: describes the level of nesting between different arenas. */
+#define RA_LOCKCLASS_0 0
+#define RA_LOCKCLASS_1 1
+#define RA_LOCKCLASS_2 2
+
+/*
+ * Flags in an "import" must much the flags for an allocation
+ */
+typedef IMG_UINT32 RA_FLAGS_T;
+
+struct _RA_SEGMENT_DETAILS_
+{
+	RA_LENGTH_T      uiSize;
+	IMG_CPU_PHYADDR sCpuPhyAddr;
+	IMG_HANDLE      hSegment;
+};
+typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
+
+/**
+ *  @Function   RA_Create
+ *
+ *  @Description
+ *
+ *  To create a resource arena.
+ *
+ *  @Input name - the name of the arena for diagnostic purposes.
+ *  @Input uQuantum - the arena allocation quantum.
+ *  @Input ui32LockClass - the lock class level this arena uses.
+ *  @Input alloc - a resource allocation callback or 0.
+ *  @Input free - a resource de-allocation callback or 0.
+ *  @Input per_arena_handle - user private handle passed to alloc and free or 0.
+ *  @Return pointer to arena, or IMG_NULL.
+ */
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+           /* subsequent imports: */
+           RA_LOG2QUANTUM_T uLog2Quantum,
+		   IMG_UINT32 ui32LockClass,
+           IMG_BOOL (*imp_alloc)(RA_PERARENA_HANDLE _h,
+                                 RA_LENGTH_T uSize,
+                                 RA_FLAGS_T uFlags,
+                                 RA_BASE_T *pBase,
+                                 RA_LENGTH_T *pActualSize,
+                                 RA_PERISPAN_HANDLE *phPriv),
+           IMG_VOID (*imp_free) (RA_PERARENA_HANDLE,
+                                 RA_BASE_T,
+                                 RA_PERISPAN_HANDLE),
+           RA_PERARENA_HANDLE per_arena_handle);
+
+/**
+ *  @Function   RA_Delete
+ *
+ *  @Description
+ *
+ *  To delete a resource arena. All resources allocated from the arena
+ *  must be freed before deleting the arena.
+ *                  
+ *  @Input  pArena - the arena to delete.
+ *  @Return None
+ */
+IMG_VOID
+RA_Delete (RA_ARENA *pArena);
+
+/**
+ *  @Function   RA_Add
+ *
+ *  @Description
+ *
+ *  To add a resource span to an arena. The span must not overlap with
+ *  any span previously added to the arena.
+ *
+ *  @Input pArena - the arena to add a span into.
+ *  @Input base - the base of the span.
+ *  @Input uSize - the extent of the span.
+ *  @Input hPriv - handle associated to the span (reserved to user uses)
+ *  @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+		RA_BASE_T base,
+		RA_LENGTH_T uSize,
+		RA_FLAGS_T uFlags,
+		RA_PERISPAN_HANDLE hPriv);
+
+/**
+ *  @Function   RA_Alloc
+ *
+ *  @Description
+ *
+ *  To allocate resource from an arena.
+ *
+ *  @Input  pArena - the arena
+ *  @Input  uRequestSize - the size of resource segment requested.
+ *  @Output pActualSize - the actual_size of resource segment allocated,
+ *          typcially rounded up by quantum.
+ *  @Input  uFlags - flags influencing allocation policy.
+ *  @Input  uAlignment - the alignment constraint required for the
+ *          allocated segment, use 0 if alignment not required.
+ *  @Output pBase - allocated base resource
+ *  @Output phPriv - the user reference associated with allocated
+ *          resource span.
+ *  @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Alloc (RA_ARENA *pArena, 
+          RA_LENGTH_T uSize,
+          RA_FLAGS_T uFlags,
+          RA_LENGTH_T uAlignment,
+          RA_BASE_T *pBase,
+          RA_LENGTH_T *pActualSize,
+          RA_PERISPAN_HANDLE *phPriv);
+
+/**
+ *  @Function   RA_Free
+ *
+ *  @Description    To free a resource segment.
+ *  
+ *  @Input  pArena - the arena the segment was originally allocated from.
+ *  @Input  base - the base of the resource span to free.
+ *	@Input	bFreeBackingStore - Should backing store memory be freed?
+ *
+ *  @Return None
+ */
+IMG_VOID 
+RA_Free (RA_ARENA *pArena, RA_BASE_T base);
+
+#endif
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/cache_generic.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/cache_generic.c
new file mode 100644
index 0000000..25f2dba
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/cache_generic.c
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File
+@Title          CPU generic cache management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements server side code for CPU cache management in a
+                CPU agnostic manner.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "cache_generic.h"
+#include "cache_internal.h"
+#include "device.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "pmr.h"
+
+PVRSRV_ERROR CacheOpQueue(PVRSRV_CACHE_OP uiCacheOp)
+{
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+	psData->uiCacheOp = SetCacheOp(psData->uiCacheOp, uiCacheOp);
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/connection_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/connection_server.c
new file mode 100644
index 0000000..6e99b67
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/connection_server.c
@@ -0,0 +1,291 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Handles connections coming from the client and the management
+                connection based information
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "sync_server.h"
+#include "process_stats.h"
+#include "pdump_km.h"
+#include "lists.h"
+
+/* PID associated with Connection currently being purged by Cleanup thread */
+static IMG_PID gCurrentPurgeConnectionPid = 0;
+
+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
+{
+	PVRSRV_ERROR eError;
+
+	if (psConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "ConnectionDestroy: Missing connection!"));
+		PVR_ASSERT(0);
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Close the process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	if (psConnection->hProcessStats != IMG_NULL)
+	{
+		PVRSRVStatsDeregisterProcess(psConnection->hProcessStats);
+		psConnection->hProcessStats = IMG_NULL;
+	}
+#endif
+
+	/* Free handle base for this connection */
+	if (psConnection->psHandleBase != IMG_NULL)
+	{
+		PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+		IMG_UINT64 ui64MaxBridgeTime;
+
+		if(psPVRSRVData->bUnload)
+		{
+			/* driver is unloading so do not allow the bridge lock to be released */
+			ui64MaxBridgeTime = 0;
+		}
+		else
+		{
+			ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS;
+		}
+
+		eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime);
+		if (eError != PVRSRV_OK)
+		{
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "ConnectionDataDestroy: Couldn't free handle base for connection (%d)",
+					 eError));
+			}
+
+			return eError;
+		}
+
+		psConnection->psHandleBase = IMG_NULL;
+	}
+
+	if (psConnection->psSyncConnectionData != IMG_NULL)
+	{
+		SyncUnregisterConnection(psConnection->psSyncConnectionData);
+		psConnection->psSyncConnectionData = IMG_NULL;
+	}
+
+	if (psConnection->psPDumpConnectionData != IMG_NULL)
+	{
+		PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+		psConnection->psPDumpConnectionData = IMG_NULL;
+	}
+
+	/* Call environment specific connection data deinit function */
+	if (psConnection->hOsPrivateData != IMG_NULL)
+	{
+		eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+				 "PVRSRVConnectionDataDestroy: OSConnectionPrivateDataDeInit failed (%d)",
+				 eError));
+
+			return eError;
+		}
+
+		psConnection->hOsPrivateData = IMG_NULL;
+	}
+
+	OSFreeMem(psConnection);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVConnectionConnect(IMG_PVOID *ppvPrivData, IMG_PVOID pvOSData)
+{
+	CONNECTION_DATA *psConnection;
+	PVRSRV_ERROR eError;
+
+	/* Allocate connection data area */
+	psConnection = OSAllocZMem(sizeof(*psConnection));
+	if (psConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't allocate connection data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Call environment specific connection data init function */
+	eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData);
+	if (eError != PVRSRV_OK)
+	{
+		 PVR_DPF((PVR_DBG_ERROR,
+			  "PVRSRVConnectionConnect: OSConnectionPrivateDataInit failed (%d)",
+			  eError));
+		goto failure;
+	}
+
+	psConnection->pid = OSGetCurrentProcessID();
+
+	/* Register this connection with the sync core */
+	eError = SyncRegisterConnection(&psConnection->psSyncConnectionData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't register the sync data"));
+		goto failure;
+	}
+
+	/*
+	 * Register this connection with the pdump core. Pass in the sync connection data
+	 * as it will be needed later when we only get passed in the PDump connection data.
+	 */
+	eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
+					 &psConnection->psPDumpConnectionData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't register the PDump data"));
+		goto failure;
+	}
+
+	/* Allocate handle base for this connection */
+	eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't allocate handle base for connection (%d)",
+			 eError));
+		goto failure;
+	}
+
+	/* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVConnectionConnect: Couldn't register process statistics (%d)",
+			 eError));
+		goto failure;
+	}
+#endif
+
+	*ppvPrivData = psConnection;
+
+	return eError;
+
+failure:
+	ConnectionDataDestroy(psConnection);
+
+	return eError;
+}
+
+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData)
+{
+	PVRSRV_ERROR eErrorConnection, eErrorKernel;
+	CONNECTION_DATA *psConnectionData = pvConnectionData;
+
+	OSAcquireBridgeLock();
+
+	gCurrentPurgeConnectionPid = psConnectionData->pid;
+
+	eErrorConnection = ConnectionDataDestroy(psConnectionData);
+	if (eErrorConnection != PVRSRV_OK)
+	{
+		if (eErrorConnection == PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+				 "_CleanupThreadPurgeConnectionData: Failed to purge connection data %p "
+				 "(deferring destruction)",
+				 psConnectionData));
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+			 "_CleanupThreadPurgeConnectionData: Connection data %p deferred destruction finished",
+			 psConnectionData));
+	}
+
+	/* Check if possible resize the global handle base */
+	eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+	if (eErrorKernel != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "_CleanupThreadPurgeConnectionData: Purge of global handle pool failed (%d)",
+			 eErrorKernel));
+	}
+
+	gCurrentPurgeConnectionPid = 0;
+
+	OSReleaseBridgeLock();
+
+	return eErrorConnection;
+}
+
+void PVRSRVConnectionDisconnect(void *pvDataPtr)
+{
+	CONNECTION_DATA *psConnectionData = pvDataPtr;
+
+	/* Defer the release of the connection data */
+	psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
+	psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
+	psConnectionData->sCleanupThreadFn.ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+	PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
+}
+
+PVRSRV_ERROR PVRSRVConnectionInit(void)
+{
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVConnectionDeInit(void)
+{
+	return PVRSRV_OK;
+}
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void)
+{
+	return gCurrentPurgeConnectionPid;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/dc_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/dc_server.c
new file mode 100644
index 0000000..e222e50
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/dc_server.c
@@ -0,0 +1,2323 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side Display Class functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side functions of the Display Class
+                interface.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "allocmem.h"
+#include "lock.h"
+#include "osfunc.h"
+#include "img_types.h"
+#include "scp.h"
+#include "dc_server.h"
+#include "kerneldisplay.h"
+#include "pvr_debug.h"
+#include "pmr.h"
+#include "pdump_physmem.h"
+#include "sync_server.h"
+#include "pvrsrv.h"
+#include "debug_request_ids.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+struct _DC_DISPLAY_CONTEXT_
+{
+	DC_DEVICE		*psDevice;
+	SCP_CONTEXT		*psSCPContext;
+	IMG_HANDLE		hDisplayContext;
+	IMG_UINT32		ui32ConfigsInFlight;
+	IMG_UINT32		ui32RefCount;
+	POS_LOCK		hLock;
+	POS_LOCK		hConfigureLock;			// Guard against concurrent calls to pfnContextConfigure during DisplayContextFlush
+	IMG_UINT32		ui32TokenOut;
+	IMG_UINT32		ui32TokenIn;
+
+	IMG_HANDLE		hCmdCompNotify;
+
+	IMG_BOOL		bIssuedNullFlip;
+	IMG_HANDLE		hMISR;
+	IMG_HANDLE		hDebugNotify;
+	IMG_PVOID		hTimer;
+	
+	IMG_BOOL		bPauseMISR;
+	DLLIST_NODE		sListNode;
+};
+
+struct _DC_DEVICE_
+{
+	const DC_DEVICE_FUNCTIONS	*psFuncTable;
+	IMG_UINT32					ui32MaxConfigsInFlight;
+	IMG_HANDLE					hDeviceData;
+	IMG_UINT32					ui32RefCount;
+	POS_LOCK					hLock;
+	IMG_UINT32					ui32Index;
+	IMG_HANDLE					psEventList;
+	IMG_HANDLE					hSystemBuffer;
+	PMR							*psSystemBufferPMR;
+	DC_DISPLAY_CONTEXT			sSystemContext;
+	DC_DEVICE					*psNext;
+};
+
+typedef enum _DC_BUFFER_TYPE_
+{
+	DC_BUFFER_TYPE_UNKNOWN = 0,
+	DC_BUFFER_TYPE_ALLOC,
+	DC_BUFFER_TYPE_IMPORT,
+	DC_BUFFER_TYPE_SYSTEM,
+} DC_BUFFER_TYPE;
+
+typedef struct _DC_BUFFER_ALLOC_DATA_
+{
+	PMR	*psPMR;
+} DC_BUFFER_ALLOC_DATA;
+
+typedef struct _DC_BUFFER_IMPORT_DATA_
+{
+/*
+	Required as the DC doesn't need to map the PMR during the import call we
+	need to make sure that the PMR doesn't get freed before the DC maps it
+	by taking an ref on the PMR during the import and drop it on the unimport.
+*/
+	IMG_UINT32	ui32NumPlanes;
+	PMR			*apsImport[3];
+} DC_BUFFER_IMPORT_DATA;
+
+struct _DC_BUFFER_
+{
+	DC_DISPLAY_CONTEXT	*psDisplayContext;
+	DC_BUFFER_TYPE		eType;
+	union {
+		DC_BUFFER_ALLOC_DATA	sAllocData;
+		DC_BUFFER_IMPORT_DATA	sImportData;
+	} uBufferData;
+	IMG_HANDLE			hBuffer;
+	IMG_UINT32			ui32MapCount;
+	IMG_UINT32			ui32RefCount;
+	POS_LOCK			hLock;
+	POS_LOCK			hMapLock;
+};
+
+typedef struct _DC_CMD_RDY_DATA_
+{
+	DC_DISPLAY_CONTEXT			*psDisplayContext;
+	IMG_UINT32					ui32BufferCount;
+	PVRSRV_SURFACE_CONFIG_INFO	*pasSurfAttrib;
+	IMG_HANDLE					*pahBuffer;
+	IMG_UINT32					ui32DisplayPeriod;
+} DC_CMD_RDY_DATA;
+
+typedef struct _DC_CMD_COMP_DATA_
+{
+	DC_DISPLAY_CONTEXT	*psDisplayContext;
+	IMG_UINT32			ui32BufferCount;
+	DC_BUFFER			**apsBuffer;
+	IMG_UINT32			ui32Token;
+	IMG_BOOL			bDirectNullFlip;
+} DC_CMD_COMP_DATA;
+
+typedef struct _DC_BUFFER_PMR_DATA_
+{
+	DC_BUFFER				*psBuffer;			/*!< The buffer this PMR private data refers to */
+	IMG_DEVMEM_LOG2ALIGN_T	uiLog2PageSize;		/*!< Log 2 of the buffers pagesize */
+	IMG_UINT32				ui32PageCount;		/*!< Number of pages in this buffer */
+	PHYS_HEAP				*psPhysHeap;		/*!< The physical heap the memory resides on */
+	IMG_DEV_PHYADDR			*pasDevPAddr;		/*!< Pointer to an array of device physcial addresses */
+	IMG_PVOID				pvLinAddr;			/*!< CPU virtual pointer or NULL if the DC driver didn't have one */
+
+	IMG_HANDLE				hPDumpAllocInfo;	/*!< Handle to PDump alloc data */
+	IMG_BOOL				bPDumpMalloced;		/*!< Did we get as far as PDump alloc? */
+} DC_BUFFER_PMR_DATA;
+
+POS_LOCK g_hDCListLock;
+
+DC_DEVICE *g_psDCDeviceList;
+IMG_UINT32 g_ui32DCDeviceCount;
+IMG_UINT32 g_ui32DCNextIndex;
+static DLLIST_NODE g_sDisplayContextsList;
+
+
+#if defined(DC_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DC_REFCOUNT_PRINT(fmt, ...)		\
+	PVRSRVDebugPrintf(PVR_DBG_WARNING,	\
+			  __FILE__,		\
+			  __LINE__,		\
+			  fmt,			\
+			  __VA_ARGS__)
+#else
+#define DC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(DC_DEBUG)
+#define DC_DEBUG_PRINT(fmt, ...)			\
+	PVRSRVDebugPrintf(PVR_DBG_WARNING,		\
+			  __FILE__,			\
+			  __LINE__,			\
+			  fmt,				\
+			  __VA_ARGS__)
+#else
+#define DC_DEBUG_PRINT(fmt, ...)
+#endif
+
+/*****************************************************************************
+ *                             Private functions                             *
+ *****************************************************************************/
+
+static IMG_VOID _DCDeviceAcquireRef(DC_DEVICE *psDevice)
+{
+	OSLockAcquire(psDevice->hLock);
+	psDevice->ui32RefCount++;
+	DC_REFCOUNT_PRINT("%s: DC device %p, refcount = %d",
+					  __FUNCTION__, psDevice, psDevice->ui32RefCount);
+	OSLockRelease(psDevice->hLock);
+}
+
+static IMG_VOID _DCDeviceReleaseRef(DC_DEVICE *psDevice)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psDevice->hLock);
+	ui32RefCount = --psDevice->ui32RefCount;
+	OSLockRelease(psDevice->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		OSLockAcquire(g_hDCListLock);
+		if (psDevice == g_psDCDeviceList)
+		{
+			g_psDCDeviceList = psDevice->psNext;
+		}
+		else
+		{
+			DC_DEVICE *psTmp = g_psDCDeviceList;
+	
+			while (psTmp->psNext != psDevice)
+			{
+				psTmp = psTmp->psNext;
+			}
+			psTmp->psNext = g_psDCDeviceList->psNext;
+		}
+	
+		g_ui32DCDeviceCount--;
+		OSLockRelease(g_hDCListLock);
+	}
+	else
+	{
+		/* Signal this devices event list as the unload might be blocked on it */
+		OSEventObjectSignal(psDevice->psEventList);
+	}
+	DC_REFCOUNT_PRINT("%s: DC device %p, refcount = %d",
+					  __FUNCTION__, psDevice, ui32RefCount);
+}
+
+static IMG_VOID _DCDisplayContextAcquireRef(DC_DISPLAY_CONTEXT *psDisplayContext)
+{
+	OSLockAcquire(psDisplayContext->hLock);
+	psDisplayContext->ui32RefCount++;
+	DC_REFCOUNT_PRINT("%s: DC display context %p, refcount = %d",
+					  __FUNCTION__, psDisplayContext, psDisplayContext->ui32RefCount);
+	OSLockRelease(psDisplayContext->hLock);
+}
+
+static IMG_VOID _DCDisplayContextReleaseRef(DC_DISPLAY_CONTEXT *psDisplayContext)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psDisplayContext->hLock);
+	ui32RefCount = --psDisplayContext->ui32RefCount;
+	OSLockRelease(psDisplayContext->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		DC_DEVICE *psDevice = psDisplayContext->psDevice;
+
+		PVRSRVUnregisterDbgRequestNotify(psDisplayContext->hDebugNotify);
+
+		dllist_remove_node(&psDisplayContext->sListNode);
+
+		/* unregister the device from cmd complete notifications */
+		PVRSRVUnregisterCmdCompleteNotify(psDisplayContext->hCmdCompNotify);
+		psDisplayContext->hCmdCompNotify = IMG_NULL;
+
+		OSUninstallMISR(psDisplayContext->hMISR);
+		SCPDestroy(psDisplayContext->psSCPContext);
+		psDevice->psFuncTable->pfnContextDestroy(psDisplayContext->hDisplayContext);
+		_DCDeviceReleaseRef(psDevice);
+		OSLockDestroy(psDisplayContext->hConfigureLock);
+		OSLockDestroy(psDisplayContext->hLock);
+		OSFreeMem(psDisplayContext);
+	}
+
+	DC_REFCOUNT_PRINT("%s: DC display context %p, refcount = %d",
+					  __FUNCTION__, psDisplayContext, ui32RefCount);
+}
+
+static IMG_VOID _DCBufferAcquireRef(DC_BUFFER *psBuffer)
+{
+	OSLockAcquire(psBuffer->hLock);
+	psBuffer->ui32RefCount++;
+	DC_REFCOUNT_PRINT("%s: DC buffer %p, refcount = %d",
+					  __FUNCTION__, psBuffer, psBuffer->ui32RefCount);
+	OSLockRelease(psBuffer->hLock);
+}
+
+
+static IMG_VOID _DCFreeAllocedBuffer(DC_BUFFER *psBuffer)
+{
+	DC_DISPLAY_CONTEXT *psDisplayContext = psBuffer->psDisplayContext;
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+
+	psDevice->psFuncTable->pfnBufferFree(psBuffer->hBuffer);
+	_DCDisplayContextReleaseRef(psDisplayContext);
+}
+
+static IMG_VOID _DCFreeImportedBuffer(DC_BUFFER *psBuffer)
+{
+	DC_DISPLAY_CONTEXT *psDisplayContext = psBuffer->psDisplayContext;
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+	IMG_UINT32 i;
+
+	for (i=0;i<psBuffer->uBufferData.sImportData.ui32NumPlanes;i++)
+	{
+		PMRUnrefPMR(psBuffer->uBufferData.sImportData.apsImport[i]);
+	}
+	psDevice->psFuncTable->pfnBufferFree(psBuffer->hBuffer);
+	_DCDisplayContextReleaseRef(psDisplayContext);
+}
+
+static IMG_VOID _DCFreeSystemBuffer(DC_BUFFER *psBuffer)
+{
+	DC_DISPLAY_CONTEXT *psDisplayContext = psBuffer->psDisplayContext;
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+
+	psDevice->psFuncTable->pfnBufferSystemRelease(psBuffer->hBuffer);
+	_DCDeviceReleaseRef(psDevice);
+}
+
+/*
+	Drop a reference on the buffer. Last person gets to free it
+*/
+static IMG_VOID _DCBufferReleaseRef(DC_BUFFER *psBuffer)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psBuffer->hLock);
+	ui32RefCount = --psBuffer->ui32RefCount;
+	OSLockRelease(psBuffer->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		switch (psBuffer->eType)
+		{
+			case DC_BUFFER_TYPE_ALLOC:
+					_DCFreeAllocedBuffer(psBuffer);
+					break;
+			case DC_BUFFER_TYPE_IMPORT:
+					_DCFreeImportedBuffer(psBuffer);
+					break;
+			case DC_BUFFER_TYPE_SYSTEM:
+					_DCFreeSystemBuffer(psBuffer);
+					break;
+			default:
+					PVR_ASSERT(IMG_FALSE);
+		}
+		OSLockDestroy(psBuffer->hMapLock);
+		OSLockDestroy(psBuffer->hLock);
+		OSFreeMem(psBuffer);
+	}
+	DC_REFCOUNT_PRINT("%s: DC buffer %p, refcount = %d",
+					  __FUNCTION__, psBuffer, ui32RefCount);
+}
+
+static PVRSRV_ERROR _DCBufferMap(DC_BUFFER *psBuffer)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	OSLockAcquire(psBuffer->hMapLock);
+	if (psBuffer->ui32MapCount++ == 0)
+	{
+		DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice;
+
+		if(psDevice->psFuncTable->pfnBufferMap)
+		{
+			eError = psDevice->psFuncTable->pfnBufferMap(psBuffer->hBuffer);
+			if (eError != PVRSRV_OK)
+			{
+				goto out_unlock;
+			}
+		}
+
+		_DCBufferAcquireRef(psBuffer);
+	}
+
+	DC_REFCOUNT_PRINT("%s: DC buffer %p, MapCount = %d",
+					  __FUNCTION__, psBuffer, psBuffer->ui32MapCount);
+
+out_unlock:
+	OSLockRelease(psBuffer->hMapLock);
+	return eError;
+}
+
+static IMG_VOID _DCBufferUnmap(DC_BUFFER *psBuffer)
+{
+	DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice;
+	IMG_UINT32 ui32MapCount;
+
+	OSLockAcquire(psBuffer->hMapLock);
+	ui32MapCount = --psBuffer->ui32MapCount;
+	OSLockRelease(psBuffer->hMapLock);
+
+	if (ui32MapCount == 0)
+	{
+		if(psDevice->psFuncTable->pfnBufferUnmap)
+		{
+			psDevice->psFuncTable->pfnBufferUnmap(psBuffer->hBuffer);
+		}
+
+		_DCBufferReleaseRef(psBuffer);
+	}
+	DC_REFCOUNT_PRINT("%s: DC Buffer %p, MapCount = %d",
+					  __FUNCTION__, psBuffer, ui32MapCount);
+}
+
+static PVRSRV_ERROR _DCDeviceBufferArrayCreate(IMG_UINT32 ui32BufferCount,
+											   DC_BUFFER **papsBuffers,
+											   IMG_HANDLE **pahDeviceBuffers)
+{
+	IMG_HANDLE *ahDeviceBuffers;
+	IMG_UINT32 i;
+
+	/* Create an array of the DC's private Buffer handles */
+	ahDeviceBuffers = OSAllocMem(sizeof(IMG_HANDLE) * ui32BufferCount);
+	if (ahDeviceBuffers == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	OSMemSet(ahDeviceBuffers, 0, sizeof(IMG_HANDLE) * ui32BufferCount);
+
+	for (i=0;i<ui32BufferCount;i++)
+	{
+		ahDeviceBuffers[i] = papsBuffers[i]->hBuffer;
+	}	
+
+	*pahDeviceBuffers = ahDeviceBuffers;
+
+	return PVRSRV_OK;
+}
+
+static IMG_VOID _DCDeviceBufferArrayDestroy(IMG_HANDLE ahDeviceBuffers)
+{
+	OSFreeMem(ahDeviceBuffers);
+}
+
+static IMG_BOOL _DCDisplayContextReady(IMG_PVOID hReadyData)
+{
+	DC_CMD_RDY_DATA *psReadyData = (DC_CMD_RDY_DATA *) hReadyData;
+	DC_DISPLAY_CONTEXT *psDisplayContext = psReadyData->psDisplayContext;
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+
+	if (psDisplayContext->ui32ConfigsInFlight >= psDevice->ui32MaxConfigsInFlight)
+	{
+		/*
+			We're at the DC's max commands in-flight so don't take this command
+			off the queue
+		*/
+		return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+#if defined SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG
+static IMG_VOID _RetireTimeout(IMG_PVOID pvData)
+{
+	DC_CMD_COMP_DATA *psCompleteData = pvData;
+	DC_DISPLAY_CONTEXT *psDisplayContext = psCompleteData->psDisplayContext;
+
+	PVR_DPF((PVR_DBG_ERROR, "Timeout fired for operation %d", psCompleteData->ui32Token));
+	SCPDumpStatus(psDisplayContext->psSCPContext);
+
+	OSDisableTimer(psDisplayContext->hTimer);
+	OSRemoveTimer(psDisplayContext->hTimer);
+	psDisplayContext->hTimer = IMG_NULL;
+}
+#endif	/* SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG */
+
+static IMG_VOID _DCDisplayContextConfigure(IMG_PVOID hReadyData,
+										   IMG_PVOID hCompleteData)
+{
+	DC_CMD_RDY_DATA *psReadyData = (DC_CMD_RDY_DATA *) hReadyData;
+	DC_DISPLAY_CONTEXT *psDisplayContext = psReadyData->psDisplayContext;
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+
+	OSLockAcquire(psDisplayContext->hLock);
+	psDisplayContext->ui32ConfigsInFlight++;
+
+#if defined SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG
+	if (psDisplayContext->ui32ConfigsInFlight == psDevice->ui32MaxConfigsInFlight)
+	{
+		/*
+			We've just sent out a new config which has filled the DC's pipeline.
+			This means that we expect a retire within a VSync period, start
+			a timer that will print out a message if we haven't got a complete
+			within a reasonable period (200ms)
+		*/
+		PVR_ASSERT(psDisplayContext->hTimer == IMG_NULL);
+		psDisplayContext->hTimer = OSAddTimer(_RetireTimeout, hCompleteData, 200);
+		OSEnableTimer(psDisplayContext->hTimer);
+	}
+#endif
+
+	OSLockRelease(psDisplayContext->hLock);
+
+#if defined(DC_DEBUG)
+	{
+		DC_DEBUG_PRINT("_DCDisplayContextConfigure: Send command (%d) out", 
+				((DC_CMD_COMP_DATA*) hCompleteData)->ui32Token);
+	}
+#endif /* DC_DEBUG */
+
+	/* 
+	 * Note: A risk exists that _DCDisplayContextConfigure may be called simultaneously
+	 *       from both SCPRun (MISR context) and DCDisplayContextFlush.
+	 *       This lock ensures no concurrent calls are made to pfnContextConfigure.
+	 */
+	OSLockAcquire(psDisplayContext->hConfigureLock);
+	/*
+		Note: We've already done all the acquire refs at
+		      DCDisplayContextConfigure time.
+	*/
+	psDevice->psFuncTable->pfnContextConfigure(psDisplayContext->hDisplayContext,
+											   psReadyData->ui32BufferCount,
+											   psReadyData->pasSurfAttrib,
+											   psReadyData->pahBuffer,
+											   psReadyData->ui32DisplayPeriod,
+											   hCompleteData);
+	OSLockRelease(psDisplayContext->hConfigureLock);
+
+}
+
+/*
+	_DCDisplayContextRun
+
+	Kick the MISR which will check for any commands which can be processed
+*/
+static INLINE IMG_VOID _DCDisplayContextRun(DC_DISPLAY_CONTEXT *psDisplayContext)
+{
+	OSScheduleMISR(psDisplayContext->hMISR);
+}
+
+/*
+	_DCDisplayContextMISR
+
+	This gets called when this MISR is fired
+*/
+static IMG_VOID _DCDisplayContextMISR(IMG_VOID *pvData)
+{
+	DC_DISPLAY_CONTEXT *psDisplayContext = pvData;
+
+	if ( !psDisplayContext->bPauseMISR )
+	{
+		SCPRun(psDisplayContext->psSCPContext);
+	}
+}
+
+/*
+ * PMR related functions and structures
+ */
+
+/*
+	Callback function for locking the system physical page addresses.
+	As we acquire the display memory at PMR create time there is nothing
+	to do here.
+*/
+static PVRSRV_ERROR _DCPMRLockPhysAddresses(PMR_IMPL_PRIVDATA pvPriv,
+											IMG_UINT32 uiLog2DevPageSize)
+{
+	DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv;
+	DC_BUFFER *psBuffer = psPMRPriv->psBuffer;
+	DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice;
+	PVRSRV_ERROR eError;
+
+	if (uiLog2DevPageSize < psPMRPriv->uiLog2PageSize)
+	{
+		eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+		goto fail_contigcheck;
+	}
+
+	psPMRPriv->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) *
+							 psPMRPriv->ui32PageCount);
+	if (psPMRPriv->pasDevPAddr == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	OSMemSet(psPMRPriv->pasDevPAddr,
+			 0,
+			 sizeof(IMG_DEV_PHYADDR) * psPMRPriv->ui32PageCount);
+
+	eError = psDevice->psFuncTable->pfnBufferAcquire(psBuffer->hBuffer,
+													 psPMRPriv->pasDevPAddr,
+													 &psPMRPriv->pvLinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_query;
+	}
+
+	return PVRSRV_OK;
+
+fail_query:
+	OSFreeMem(psPMRPriv->pasDevPAddr);
+fail_alloc:
+fail_contigcheck:
+	return eError;
+}
+
+static PVRSRV_ERROR _DCPMRUnlockPhysAddresses(PMR_IMPL_PRIVDATA pvPriv)
+{
+	DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv;
+	DC_BUFFER *psBuffer = psPMRPriv->psBuffer;
+	DC_DEVICE *psDevice = psBuffer->psDisplayContext->psDevice;
+
+	psDevice->psFuncTable->pfnBufferRelease(psBuffer->hBuffer);
+	OSFreeMem(psPMRPriv->pasDevPAddr);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _DCPMRDevPhysAddr(PMR_IMPL_PRIVDATA pvPriv,
+									  IMG_UINT32 ui32NumOfPages,
+									  IMG_DEVMEM_OFFSET_T *puiOffset,
+									  IMG_BOOL *pbValid,
+									  IMG_DEV_PHYADDR *psDevAddrPtr)
+{
+	DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv;
+    IMG_UINT32 uiNumPages = psPMRPriv->ui32PageCount;
+    IMG_UINT32 uiLog2PageSize = psPMRPriv->uiLog2PageSize;
+    IMG_UINT32 uiPageSize = 1ULL << uiLog2PageSize;
+    IMG_UINT32 uiPageIndex;
+    IMG_UINT32 uiInPageOffset;
+    IMG_DEV_PHYADDR sDevAddr;
+    IMG_UINT32 idx;
+
+	for (idx=0; idx < ui32NumOfPages; idx++)
+	{
+		if (pbValid[idx])
+		{
+			/* verify the cast
+			   N.B.  Strictly... this could be triggered by an illegal uiOffset arg too. */
+			uiPageIndex = (IMG_UINT32)(puiOffset[idx] >> uiLog2PageSize);
+			PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize == puiOffset[idx]);
+		
+			uiInPageOffset = (IMG_UINT32)(puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize));		
+			PVR_ASSERT(puiOffset[idx] == ((IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize) + uiInPageOffset);
+			PVR_ASSERT(uiPageIndex < uiNumPages);
+			PVR_ASSERT(uiInPageOffset < uiPageSize);
+
+			sDevAddr.uiAddr = psPMRPriv->pasDevPAddr[uiPageIndex].uiAddr;
+			PVR_ASSERT((sDevAddr.uiAddr & (uiPageSize - 1)) == 0);
+
+			psDevAddrPtr[idx] = sDevAddr;
+			psDevAddrPtr[idx].uiAddr += uiInPageOffset;
+		}
+	}
+
+    return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _DCPMRFinalize(PMR_IMPL_PRIVDATA pvPriv)
+{
+	DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv;
+
+	/* Conditionally do the PDump free, because if CreatePMR failed we
+	   won't have done the PDump MALLOC.  */
+	if (psPMRPriv->bPDumpMalloced)
+	{
+		PDumpPMRFree(psPMRPriv->hPDumpAllocInfo);
+	}
+
+	PhysHeapRelease(psPMRPriv->psPhysHeap);
+	_DCBufferReleaseRef(psPMRPriv->psBuffer);
+	OSFreeMem(psPMRPriv);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _DCPMRReadBytes(PMR_IMPL_PRIVDATA pvPriv,
+									IMG_DEVMEM_OFFSET_T uiOffset,
+									IMG_UINT8 *pcBuffer,
+									IMG_SIZE_T uiBufSz,
+									IMG_SIZE_T *puiNumBytes)
+{
+    DC_BUFFER_PMR_DATA *psPMRPriv = pvPriv;
+    IMG_CPU_PHYADDR sCpuPAddr;
+    IMG_SIZE_T uiBytesCopied = 0;
+    IMG_SIZE_T uiBytesToCopy = uiBufSz;
+    IMG_SIZE_T uiBytesCopyableFromPage;
+    IMG_VOID *pvMapping;
+    IMG_UINT8 *pcKernelPointer;
+    IMG_SIZE_T uiBufferOffset = 0;
+    IMG_SIZE_T uiPageIndex;
+    IMG_SIZE_T uiInPageOffset;
+
+	/* If we already have a CPU mapping just us it */
+	if (psPMRPriv->pvLinAddr)
+	{
+		pcKernelPointer = psPMRPriv->pvLinAddr;
+		OSMemCopy(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+		*puiNumBytes = uiBufSz;
+		return PVRSRV_OK;
+	}
+
+	/* Copy the data page by page */
+    while (uiBytesToCopy > 0)
+    {
+        /* we have to kmap one page in at a time */
+        uiPageIndex = TRUNCATE_64BITS_TO_SIZE_T(uiOffset >> psPMRPriv->uiLog2PageSize);
+
+        uiInPageOffset = TRUNCATE_64BITS_TO_SIZE_T(uiOffset - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psPMRPriv->uiLog2PageSize));
+        uiBytesCopyableFromPage = uiBytesToCopy;
+        if (uiBytesCopyableFromPage + uiInPageOffset > (1U<<psPMRPriv->uiLog2PageSize))
+        {
+            uiBytesCopyableFromPage = (1 << psPMRPriv->uiLog2PageSize)-uiInPageOffset;
+        }
+
+		PhysHeapDevPAddrToCpuPAddr(psPMRPriv->psPhysHeap, 1, &sCpuPAddr, &psPMRPriv->pasDevPAddr[uiPageIndex]);
+
+        pvMapping = OSMapPhysToLin(sCpuPAddr,
+								   1 << psPMRPriv->uiLog2PageSize,
+								   0);
+        PVR_ASSERT(pvMapping != IMG_NULL);
+        pcKernelPointer = pvMapping;
+        OSMemCopy(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInPageOffset], uiBytesCopyableFromPage);
+        OSUnMapPhysToLin(pvMapping, 1 << psPMRPriv->uiLog2PageSize, 0);
+
+        uiBufferOffset += uiBytesCopyableFromPage;
+        uiBytesToCopy -= uiBytesCopyableFromPage;
+        uiOffset += uiBytesCopyableFromPage;
+        uiBytesCopied += uiBytesCopyableFromPage;
+    }
+
+    *puiNumBytes = uiBytesCopied;
+    return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB sDCPMRFuncTab = {
+	_DCPMRLockPhysAddresses,	/* .pfnLockPhysAddresses */
+	_DCPMRUnlockPhysAddresses,	/* .pfnUnlockPhysAddresses */
+	_DCPMRDevPhysAddr,			/* .pfnDevPhysAddr */
+	IMG_NULL,					/* .pfnPDumpSymbolicAddr	*/
+	IMG_NULL,					/* .pfnAcquireKernelMappingData	*/
+	IMG_NULL,					/* .pfnReleaseKernelMappingData */
+	_DCPMRReadBytes,			/* .pfnReadBytes */
+	IMG_NULL,					/* .pfnWriteBytes */
+	_DCPMRFinalize				/* .pfnFinalize */
+};
+
+static PVRSRV_ERROR _DCCreatePMR(IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+								 IMG_UINT32 ui32PageCount,
+								 IMG_UINT32 ui32PhysHeapID,
+								 DC_BUFFER *psBuffer,
+								 PMR **ppsPMR)
+{
+	DC_BUFFER_PMR_DATA *psPMRPriv;
+	PHYS_HEAP *psPhysHeap;
+	IMG_DEVMEM_SIZE_T uiBufferSize;
+	IMG_HANDLE hPDumpAllocInfo;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bMappingTable = IMG_TRUE;
+
+	/*
+		Create the PMR for this buffer.
+
+		Note: At this stage we don't need to know the physical pages just
+		the page size and the size of the PMR. The 1st call that needs the
+		physcial pages will cause a request into the DC driver (pfnBufferQuery)
+	*/
+	psPMRPriv = OSAllocMem(sizeof(DC_BUFFER_PMR_DATA));
+	if (psPMRPriv == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_privalloc;
+	}
+
+	OSMemSet(psPMRPriv, 0, sizeof(DC_BUFFER_PMR_DATA));
+
+	/* Acquire the physical heap the memory is on */
+	eError = PhysHeapAcquire(ui32PhysHeapID, &psPhysHeap);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_physheap;
+	}
+
+	/* Take a reference on the buffer (for the copy in the PMR) */
+	_DCBufferAcquireRef(psBuffer);
+
+	/* Fill in the private data for the PMR */
+	psPMRPriv->uiLog2PageSize = uiLog2PageSize;
+	psPMRPriv->ui32PageCount = ui32PageCount;
+	psPMRPriv->psPhysHeap = psPhysHeap;
+	psPMRPriv->pasDevPAddr = IMG_NULL;
+	psPMRPriv->psBuffer = psBuffer;
+
+	uiBufferSize = (1 << uiLog2PageSize) * ui32PageCount;
+
+	/* Create the PMR for the MM layer */
+	eError = PMRCreatePMR(psPhysHeap,
+						  uiBufferSize,
+						  uiBufferSize,
+						  1,
+						  1,
+						  &bMappingTable,
+						  uiLog2PageSize,
+						  PVRSRV_MEMALLOCFLAG_WRITE_COMBINE,
+						  "DISPLAY",
+						  &sDCPMRFuncTab,
+						  psPMRPriv,
+						  ppsPMR,
+						  &hPDumpAllocInfo,
+						  IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_pmrcreate;
+	}
+
+#if defined(PDUMP)
+	psPMRPriv->hPDumpAllocInfo = hPDumpAllocInfo;
+	psPMRPriv->bPDumpMalloced = IMG_TRUE;
+#endif
+	return PVRSRV_OK;
+
+fail_pmrcreate:
+	PhysHeapRelease(psPhysHeap);
+fail_physheap:
+	OSFreeMem(psPMRPriv);
+fail_privalloc:
+	return eError;
+}
+
+static IMG_VOID _DCDisplayContextNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	DC_DISPLAY_CONTEXT	*psDisplayContext = (DC_DISPLAY_CONTEXT*) hCmdCompHandle;
+
+	_DCDisplayContextRun(psDisplayContext);
+}
+
+static IMG_VOID _DCDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel)
+{
+	DC_DISPLAY_CONTEXT	*psDisplayContext = (DC_DISPLAY_CONTEXT*) hDebugRequestHandle;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+
+	switch(ui32VerbLevel)
+	{
+		case DEBUG_REQUEST_VERBOSITY_LOW:
+			PVR_DUMPDEBUG_LOG(("Configs in-flight = %d", psDisplayContext->ui32ConfigsInFlight));
+			break;
+
+		case DEBUG_REQUEST_VERBOSITY_MEDIUM:
+			PVR_DUMPDEBUG_LOG(("------[ Display context SCP status ]------"));
+			SCPDumpStatus(psDisplayContext->psSCPContext);
+			break;
+
+		default:
+			break;
+	}
+}
+
+/*****************************************************************************
+ * Public interface functions exposed through the bridge to services client  *
+ *****************************************************************************/
+
+PVRSRV_ERROR DCDevicesQueryCount(IMG_UINT32 *pui32DeviceCount)
+{
+	*pui32DeviceCount = g_ui32DCDeviceCount;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCDevicesEnumerate(IMG_UINT32 ui32DeviceArraySize,
+								IMG_UINT32 *pui32DeviceCount,
+								IMG_UINT32 *paui32DeviceIndex)
+{
+	IMG_UINT32 i;
+	IMG_UINT32 ui32LoopCount;
+	DC_DEVICE *psTmp = g_psDCDeviceList;
+
+	OSLockAcquire(g_hDCListLock);
+
+	if (g_ui32DCDeviceCount > ui32DeviceArraySize)
+	{
+		ui32LoopCount = ui32DeviceArraySize;
+	}
+	else
+	{
+		ui32LoopCount = g_ui32DCDeviceCount;
+	}
+	
+	for (i=0;i<ui32LoopCount;i++)
+	{
+		PVR_ASSERT(psTmp != IMG_NULL);
+		paui32DeviceIndex[i] = psTmp->ui32Index;
+		psTmp = psTmp->psNext;
+	}
+
+	*pui32DeviceCount = ui32LoopCount;
+	OSLockRelease(g_hDCListLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCDeviceAcquire(IMG_UINT32 ui32DeviceIndex,
+							 DC_DEVICE **ppsDevice)
+{
+	DC_DEVICE *psDevice = g_psDCDeviceList;
+
+	if (psDevice == IMG_NULL)
+	{
+		return PVRSRV_ERROR_NO_DC_DEVICES_FOUND;
+	}
+
+	while(psDevice->ui32Index != ui32DeviceIndex)
+	{
+		psDevice = psDevice->psNext;
+		if (psDevice == IMG_NULL)
+		{
+			return PVRSRV_ERROR_NO_DC_DEVICES_FOUND;
+		}
+	}
+
+	_DCDeviceAcquireRef(psDevice);
+	*ppsDevice = psDevice;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCDeviceRelease(DC_DEVICE *psDevice)
+{
+	_DCDeviceReleaseRef(psDevice);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCGetInfo(DC_DEVICE *psDevice,
+					   DC_DISPLAY_INFO *psDisplayInfo)
+{
+	psDevice->psFuncTable->pfnGetInfo(psDevice->hDeviceData,
+									  psDisplayInfo);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCPanelQueryCount(DC_DEVICE *psDevice,
+								IMG_UINT32 *pui32NumPanels)
+{
+	psDevice->psFuncTable->pfnPanelQueryCount(psDevice->hDeviceData,
+											  pui32NumPanels);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCPanelQuery(DC_DEVICE *psDevice,
+						   IMG_UINT32 ui32PanelsArraySize,
+						   IMG_UINT32 *pui32NumPanels,
+						   PVRSRV_PANEL_INFO *pasPanelInfo)
+{
+	psDevice->psFuncTable->pfnPanelQuery(psDevice->hDeviceData,
+										 ui32PanelsArraySize,
+										 pui32NumPanels,
+										 pasPanelInfo);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCFormatQuery(DC_DEVICE *psDevice,
+						   IMG_UINT32 ui32FormatArraySize,
+						   PVRSRV_SURFACE_FORMAT *pasFormat,
+						   IMG_UINT32 *pui32Supported)
+{
+	psDevice->psFuncTable->pfnFormatQuery(psDevice->hDeviceData,
+									   ui32FormatArraySize,
+									   pasFormat,
+									   pui32Supported);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCDimQuery(DC_DEVICE *psDevice,
+						IMG_UINT32 ui32DimSize,
+						PVRSRV_SURFACE_DIMS *pasDim,
+						IMG_UINT32 *pui32Supported)
+{
+	psDevice->psFuncTable->pfnDimQuery(psDevice->hDeviceData,
+										  ui32DimSize,
+										  pasDim,
+										  pui32Supported);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR DCSetBlank(DC_DEVICE *psDevice,
+						IMG_BOOL bEnabled)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+	if (psDevice->psFuncTable->pfnSetBlank)
+	{
+		eError = psDevice->psFuncTable->pfnSetBlank(psDevice->hDeviceData,
+													bEnabled);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR DCSetVSyncReporting(DC_DEVICE *psDevice,
+								 IMG_BOOL bEnabled)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+	if (psDevice->psFuncTable->pfnSetVSyncReporting)
+	{
+		eError = psDevice->psFuncTable->pfnSetVSyncReporting(psDevice->hDeviceData,
+															 bEnabled);
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR DCLastVSyncQuery(DC_DEVICE *psDevice,
+							  IMG_INT64 *pi64Timestamp)
+{
+	PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+	if (psDevice->psFuncTable->pfnLastVSyncQuery)
+	{
+		eError = psDevice->psFuncTable->pfnLastVSyncQuery(psDevice->hDeviceData,
+														  pi64Timestamp);
+	}
+
+	return eError;
+}
+
+/*
+	The system buffer breaks the rule of only calling DC callbacks on first
+	ref and last deref. For the pfnBufferSystemAcquire this is expected
+	as each call could get back a different buffer, but calls to
+	pfnBufferAcquire and pfnBufferRelease could happen multiple times
+	for the same buffer
+*/
+PVRSRV_ERROR DCSystemBufferAcquire(DC_DEVICE *psDevice,
+								   IMG_UINT32 *pui32ByteStride,
+								   DC_BUFFER **ppsBuffer)
+{
+	DC_BUFFER *psNew;
+	PMR *psPMR;
+	PVRSRV_ERROR eError;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize;
+	IMG_UINT32 ui32PageCount;
+	IMG_UINT32 ui32PhysHeapID;
+
+	if (psDevice->psFuncTable->pfnBufferSystemAcquire == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_NO_SYSTEM_BUFFER;
+		goto fail_nopfn;
+	}
+
+	psNew = OSAllocMem(sizeof(DC_BUFFER));
+	if (psNew == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	OSMemSet(psNew, 0, sizeof(DC_BUFFER));
+
+	eError = OSLockCreate(&psNew->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock;
+	}
+
+	eError = OSLockCreate(&psNew->hMapLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_maplock;
+	}
+
+	eError = psDevice->psFuncTable->pfnBufferSystemAcquire(psDevice->hDeviceData,
+														   &uiLog2PageSize,
+														   &ui32PageCount,
+														   &ui32PhysHeapID,
+														   pui32ByteStride,
+														   &psNew->hBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_bufferacquire;
+	}
+
+	psNew->psDisplayContext = &psDevice->sSystemContext;
+	psNew->eType = DC_BUFFER_TYPE_SYSTEM;
+	psNew->ui32MapCount = 0;
+	psNew->ui32RefCount = 1;
+
+	/*
+		Creating the PMR for the system buffer is a bit tricky as there is no
+		"create" call for it.
+		We should only ever have one PMR for the same buffer and so we can't
+		just create one every call to this function. We also have to deal with
+		the system buffer changing (mode change) so we can't just create the PMR
+		once at DC driver register time.
+		So what we do is cache the DC's handle to the system buffer and check if
+		this call the handle has changed (indicating a mode change) and create
+		a new PMR in this case.
+	*/
+	if (psNew->hBuffer != psDevice->hSystemBuffer)
+	{
+		if (psDevice->psSystemBufferPMR)
+		{
+			/*
+				Mode change:
+				We've already got a system buffer but the DC has given us a new
+				one so we need to drop the 2nd reference we took on it as a
+				different system buffer will be freed as DC unregister time
+			*/
+			PMRUnrefPMR(psDevice->psSystemBufferPMR);
+		}
+
+		eError = _DCCreatePMR(uiLog2PageSize,
+							  ui32PageCount,
+							  ui32PhysHeapID,
+							  psNew,
+							  &psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_createpmr;
+		}
+
+#if defined(PVR_RI_DEBUG)
+	{
+		/* Dummy handle - we don't need to store the reference to the PMR RI entry. Its deletion is handled internally. */
+		DC_DISPLAY_INFO	sDisplayInfo;
+		IMG_INT32 i32RITextSize;
+		IMG_CHAR pszRIText[RI_MAX_TEXT_LEN];
+
+		DCGetInfo(psDevice, &sDisplayInfo);
+		i32RITextSize = OSSNPrintf((IMG_CHAR *)pszRIText, RI_MAX_TEXT_LEN, "%s: DisplayContext 0x%p SystemBuffer", (IMG_CHAR *)sDisplayInfo.szDisplayName, &psDevice->sSystemContext);
+		if (i32RITextSize < 0) {
+			pszRIText[0] = '\0';
+			i32RITextSize = 0;
+		}
+		else
+		{
+			pszRIText[RI_MAX_TEXT_LEN-1] = '\0';
+		}
+		eError = RIWritePMREntryKM (psPMR,
+									(IMG_UINT32)i32RITextSize,
+									(IMG_CHAR *)pszRIText,
+									(uiLog2PageSize*ui32PageCount));
+	}
+#endif
+
+		psNew->uBufferData.sAllocData.psPMR = psPMR;
+		psDevice->hSystemBuffer = psNew->hBuffer;
+		psDevice->psSystemBufferPMR = psPMR;
+
+		/*
+			Take a 2nd reference on the PMR as we always drop a reference
+			in the release call but we don't want the PMR to be freed until
+			either a new system buffer as been acquired or the DC device gets
+			unregistered
+		*/
+		PMRRefPMR(psDevice->psSystemBufferPMR);
+	}
+	else
+	{
+		/*
+			A PMR for the system buffer as already been created so just
+			take a reference to the PMR to make sure it doesn't go away
+		*/
+		PMRRefPMR(psDevice->psSystemBufferPMR);
+		psNew->uBufferData.sAllocData.psPMR = psDevice->psSystemBufferPMR;
+	}
+
+	/*
+		The system buffer is tied to the device unlike all other buffers
+		which are tied to a display context.
+	*/
+	_DCDeviceAcquireRef(psDevice);
+
+	*ppsBuffer = psNew;
+
+	return PVRSRV_OK;
+
+fail_createpmr:
+fail_bufferacquire:
+	OSLockDestroy(psNew->hMapLock);
+fail_maplock:
+	OSLockDestroy(psNew->hLock);
+fail_lock:
+	OSFreeMem(psNew);
+fail_alloc:
+fail_nopfn:
+	return eError;
+}
+
+PVRSRV_ERROR DCSystemBufferRelease(DC_BUFFER *psBuffer)
+{
+	PMRUnrefPMR(psBuffer->uBufferData.sAllocData.psPMR);
+	_DCBufferReleaseRef(psBuffer);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCDisplayContextCreate(DC_DEVICE *psDevice,
+									DC_DISPLAY_CONTEXT **ppsDisplayContext)
+{
+	DC_DISPLAY_CONTEXT *psDisplayContext;
+	PVRSRV_ERROR eError;
+
+	psDisplayContext = OSAllocMem(sizeof(DC_DISPLAY_CONTEXT));
+	if (psDisplayContext == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	psDisplayContext->psDevice = psDevice;
+	psDisplayContext->hDisplayContext = IMG_NULL;
+	psDisplayContext->ui32TokenOut = 0;
+	psDisplayContext->ui32TokenIn = 0;
+	psDisplayContext->ui32RefCount = 1;
+	psDisplayContext->ui32ConfigsInFlight = 0;
+	psDisplayContext->bIssuedNullFlip = IMG_FALSE;
+	psDisplayContext->hTimer = IMG_NULL;
+	psDisplayContext->bPauseMISR = IMG_FALSE;
+
+	eError = OSLockCreate(&psDisplayContext->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailLock;
+	}
+	eError = OSLockCreate(&psDisplayContext->hConfigureLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailLock2;
+	}
+
+	/* Create a Software Command Processor with 4K CCB size. 
+	 * With the HWC it might be possible to reach the limit off the buffer.
+	 * This could be bad when the buffers currently on the screen can't be
+	 * flipped to the new one, cause the command for them doesn't fit into the
+	 * queue (Deadlock). This situation should properly detected to make at
+	 * least the debugging easier. */
+	eError = SCPCreate(12, &psDisplayContext->psSCPContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailSCP;
+	}
+
+	eError = psDevice->psFuncTable->pfnContextCreate(psDevice->hDeviceData,
+													 &psDisplayContext->hDisplayContext);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto FailDCDeviceContext;
+	}
+
+	_DCDeviceAcquireRef(psDevice);
+
+	/* Create an MISR for our display context */
+	eError = OSInstallMISR(&psDisplayContext->hMISR,
+						   _DCDisplayContextMISR,
+						   psDisplayContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailMISR;
+	}
+	/*
+		Register for the command complete callback.
+
+		Note:
+		After calling this function our MISR can be called at any point.
+	*/
+	eError = PVRSRVRegisterCmdCompleteNotify(&psDisplayContext->hCmdCompNotify, _DCDisplayContextNotify, psDisplayContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailRegisterCmdComplete;
+	}
+
+	/* Register our debug request notify callback */
+	eError = PVRSRVRegisterDbgRequestNotify(&psDisplayContext->hDebugNotify,
+											_DCDebugRequest,
+											DEBUG_REQUEST_DC,
+											psDisplayContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailRegisterDbgRequest;
+	}
+
+	*ppsDisplayContext = psDisplayContext;
+
+	/* store pointer to first/only display context, required for DCDisplayContextFlush */
+	dllist_add_to_tail(&g_sDisplayContextsList, &psDisplayContext->sListNode);
+
+	return PVRSRV_OK;
+
+FailRegisterDbgRequest:
+	PVRSRVUnregisterCmdCompleteNotify(psDisplayContext->hCmdCompNotify);
+FailRegisterCmdComplete:
+	OSUninstallMISR(psDisplayContext->hMISR);
+FailMISR:
+	_DCDeviceReleaseRef(psDevice);
+	psDevice->psFuncTable->pfnContextDestroy(psDisplayContext->hDisplayContext);
+FailDCDeviceContext:
+	SCPDestroy(psDisplayContext->psSCPContext);
+FailSCP:
+	OSLockDestroy(psDisplayContext->hConfigureLock);
+FailLock2:
+	OSLockDestroy(psDisplayContext->hLock);
+FailLock:
+	OSFreeMem(psDisplayContext);
+	return eError;
+}
+
+PVRSRV_ERROR DCDisplayContextConfigureCheck(DC_DISPLAY_CONTEXT *psDisplayContext,
+											IMG_UINT32 ui32PipeCount,
+											PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+											DC_BUFFER **papsBuffers)
+{
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+	PVRSRV_ERROR eError;
+	IMG_HANDLE *ahBuffers;
+	
+	_DCDisplayContextAcquireRef(psDisplayContext);
+
+	/* Create an array of private device specific buffer handles */
+	eError = _DCDeviceBufferArrayCreate(ui32PipeCount,
+										papsBuffers,
+										&ahBuffers);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailBufferArrayCreate;
+	}
+
+	/* Do we need to check if this is valid config? */
+	if (psDevice->psFuncTable->pfnContextConfigureCheck)
+	{
+
+		eError = psDevice->psFuncTable->pfnContextConfigureCheck(psDisplayContext->hDisplayContext,
+																ui32PipeCount,
+																pasSurfAttrib,
+																ahBuffers);
+		if (eError != PVRSRV_OK)
+		{
+			goto FailConfigCheck;
+		}
+	}
+
+	_DCDeviceBufferArrayDestroy(ahBuffers);
+	_DCDisplayContextReleaseRef(psDisplayContext);
+	return PVRSRV_OK;
+
+FailConfigCheck:
+	_DCDeviceBufferArrayDestroy(ahBuffers);
+FailBufferArrayCreate:
+	_DCDisplayContextReleaseRef(psDisplayContext);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static IMG_BOOL _DCDisplayContextFlush( PDLLIST_NODE psNode, IMG_PVOID pvCallbackData )
+{
+	DC_CMD_RDY_DATA sReadyData;
+	DC_CMD_COMP_DATA sCompleteData;
+
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DATA *psData;
+	IMG_UINT32 ui32NumConfigsInSCP, ui32GoodRuns, ui32LoopCount;
+
+	DC_DISPLAY_CONTEXT * psDisplayContext = IMG_CONTAINER_OF(psNode, DC_DISPLAY_CONTEXT, sListNode);
+
+	PVR_UNREFERENCED_PARAMETER(pvCallbackData);
+
+	/* Make the NULL flip command data */
+	sReadyData.psDisplayContext = psDisplayContext;
+	sReadyData.ui32DisplayPeriod = 0;
+	sReadyData.ui32BufferCount = 0;
+	sReadyData.pasSurfAttrib = IMG_NULL;
+	sReadyData.pahBuffer = IMG_NULL;
+
+	sCompleteData.psDisplayContext = psDisplayContext;
+	sCompleteData.ui32BufferCount = 0;
+	sCompleteData.ui32Token = 0;
+	sCompleteData.bDirectNullFlip = IMG_TRUE;
+
+	/* Stop the MISR to stop the SCP from running outside of our control */
+	psDisplayContext->bPauseMISR = IMG_TRUE;
+
+	/*
+	 * Flush loop control:
+	 * take the total number of Configs owned by the SCP including those
+	 * "in-flight" with the DC, then multiply by 2 to account for any padding
+	 * commands in the SCP buffer
+	 */
+	ui32NumConfigsInSCP = psDisplayContext->ui32TokenOut - psDisplayContext->ui32TokenIn;
+	ui32NumConfigsInSCP *= 2;
+	ui32GoodRuns = 0;
+	ui32LoopCount = 0;
+
+	/*
+	 * Calling SCPRun first, ensures that any call to SCPRun from the MISR
+	 * context completes before we insert any NULL flush direct to the DC.
+	 * SCPRun returns PVRSRV_OK (0) if the run command (Configure) executes OR there
+	 * is no work to do OR it consumes a padding command.
+	 * By counting a "good" SCPRun for each of the ui32NumConfigsInSCP we ensure
+	 * that all Configs currently in the SCP are flushed to the DC.
+	 *
+	 * In the case where we fail dependencies (PVRSRV_ERROR_FAILED_DEPENDENCIES (15))
+	 * but there are outstanding ui32ConfigsInFlight that may satisfy them,
+	 * we just loop and try again.
+	 * In the case where there is still work to do but the DC is full
+	 * (PVRSRV_ERROR_NOT_READY (254)) we just loop and try again
+	 *
+	 * During a flush, NULL flips may be inserted if waiting for the 3D (not
+	 * actually deadlocked), but this should be benign
+	 */
+	while ( ui32GoodRuns < ui32NumConfigsInSCP && ui32LoopCount < 500 )
+	{
+		eError = SCPRun( psDisplayContext->psSCPContext );
+
+		if ( 0 == ui32LoopCount && PVRSRV_ERROR_FAILED_DEPENDENCIES != eError && 1 != psDisplayContext->ui32ConfigsInFlight )
+		{
+			PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextFlush: called when not required"));
+			break;
+		}
+
+		if ( PVRSRV_OK == eError )
+		{
+			ui32GoodRuns++;
+		}
+		else if ( PVRSRV_ERROR_FAILED_DEPENDENCIES == eError && 1 == psDisplayContext->ui32ConfigsInFlight )
+		{
+			PVR_DPF((PVR_DBG_WARNING, "DCDisplayContextFlush: inserting NULL flip"));
+
+			/* Check if we need to do any CPU cache operations before sending the NULL flip */
+			psData = PVRSRVGetPVRSRVData();
+			OSCPUOperation(psData->uiCacheOp);
+			psData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+
+			/* The next Config may be dependent on the single Config currently in the DC */
+			/* Issue a NULL flip to free it */
+			_DCDisplayContextAcquireRef(psDisplayContext);
+			_DCDisplayContextConfigure( (IMG_PVOID)&sReadyData, (IMG_PVOID)&sCompleteData );
+		}
+
+		/* Give up the timeslice to let something happen */
+		OSSleepms(1);
+		ui32LoopCount++;
+	}
+
+	if ( ui32LoopCount >= 500 )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextFlush: Failed to flush after > 500 milliseconds"));
+	}
+
+	PVR_DPF((PVR_DBG_WARNING, "DCDisplayContextFlush: inserting final NULL flip"));
+
+	/* Check if we need to do any CPU cache operations before sending the NULL flip */
+	psData = PVRSRVGetPVRSRVData();
+	OSCPUOperation(psData->uiCacheOp);
+	psData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+
+	/* The next Config may be dependent on the single Config currently in the DC */
+	/* Issue a NULL flip to free it */
+	_DCDisplayContextAcquireRef(psDisplayContext);
+	_DCDisplayContextConfigure( (IMG_PVOID)&sReadyData, (IMG_PVOID)&sCompleteData );
+
+	/* re-enable the MISR/SCP */
+	psDisplayContext->bPauseMISR = IMG_FALSE;
+
+	return IMG_TRUE;
+}
+
+
+PVRSRV_ERROR DCDisplayContextFlush( IMG_VOID )
+{	
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	
+	if ( !dllist_is_empty(&g_sDisplayContextsList) )
+	{
+		dllist_foreach_node(&g_sDisplayContextsList, _DCDisplayContextFlush, IMG_NULL);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextFlush: No display contexts found"));
+		eError = PVRSRV_ERROR_INVALID_CONTEXT;
+	}
+		
+	return eError;
+}
+
+
+PVRSRV_ERROR DCDisplayContextConfigure(DC_DISPLAY_CONTEXT *psDisplayContext,
+									   IMG_UINT32 ui32PipeCount,
+									   PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+									   DC_BUFFER **papsBuffers,
+									   IMG_UINT32 ui32SyncOpCount,
+									   SERVER_SYNC_PRIMITIVE **papsSync,
+									   IMG_BOOL *pabUpdate,
+									   IMG_UINT32 ui32DisplayPeriod,
+									   IMG_UINT32 ui32MaxDepth,
+									   IMG_INT32 i32AcquireFenceFd,
+									   IMG_INT32 *pi32ReleaseFenceFd)
+{
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+	PVRSRV_ERROR eError;
+	IMG_HANDLE *ahBuffers;
+	IMG_UINT32 ui32BuffersMapped = 0;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32CmdRdySize;
+	IMG_UINT32 ui32CmdCompSize;
+	IMG_UINT32 ui32CopySize;
+	IMG_PUINT8 pui8ReadyData;
+	IMG_PVOID pvCompleteData;
+	DC_CMD_RDY_DATA *psReadyData;
+	DC_CMD_COMP_DATA *psCompleteData;
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+	_DCDisplayContextAcquireRef(psDisplayContext);
+
+	if (ui32MaxDepth == 1)
+	{
+		eError = PVRSRV_ERROR_DC_INVALID_MAXDEPTH;
+		goto FailMaxDepth;
+	}
+	else if (ui32MaxDepth > 0)
+	{
+		/* ui32TokenOut/In wrap-around case takes care of itself. */
+		if (psDisplayContext->ui32TokenOut - psDisplayContext->ui32TokenIn >= ui32MaxDepth)
+		{
+			eError = PVRSRV_ERROR_RETRY;
+			goto FailMaxDepth;
+		}
+	}
+
+	/* Reset the release fd */
+	if (pi32ReleaseFenceFd)
+		*pi32ReleaseFenceFd = -1;
+
+	/* If we get sent a NULL flip then we don't need to do the check or map */
+	if (ui32PipeCount != 0)
+	{
+		/* Create an array of private device specific buffer handles */
+		eError = _DCDeviceBufferArrayCreate(ui32PipeCount,
+											papsBuffers,
+											&ahBuffers);
+		if (eError != PVRSRV_OK)
+		{
+			goto FailBufferArrayCreate;
+		}
+	
+		/* Do we need to check if this is valid config? */
+		if (psDevice->psFuncTable->pfnContextConfigureCheck)
+		{
+	
+			eError = psDevice->psFuncTable->pfnContextConfigureCheck(psDisplayContext->hDisplayContext,
+																	ui32PipeCount,
+																	pasSurfAttrib,
+																	ahBuffers);
+			if (eError != PVRSRV_OK)
+			{
+				goto FailConfigCheck;
+			}
+		}
+	
+		/* Map all the buffers that are going to be used */
+		for (i=0;i<ui32PipeCount;i++)
+		{
+			eError = _DCBufferMap(papsBuffers[i]);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "DCDisplayContextConfigure: Failed to map buffer"));
+				goto FailMapBuffer;
+			}
+			ui32BuffersMapped++;
+		}
+	}
+
+	ui32CmdRdySize = sizeof(DC_CMD_RDY_DATA) +  
+					 ((sizeof(IMG_HANDLE) + sizeof(PVRSRV_SURFACE_CONFIG_INFO))
+					 * ui32PipeCount);
+	ui32CmdCompSize = sizeof(DC_CMD_COMP_DATA) + 
+					  (sizeof(DC_BUFFER *) * ui32PipeCount);
+
+	/* Allocate a command */
+	eError = SCPAllocCommand(psDisplayContext->psSCPContext,
+							 ui32SyncOpCount,
+							 papsSync,
+							 pabUpdate,
+							 i32AcquireFenceFd,
+							 _DCDisplayContextReady,
+							 _DCDisplayContextConfigure,
+							 ui32CmdRdySize,
+							 ui32CmdCompSize,
+							 (IMG_PVOID *)&pui8ReadyData,
+							 &pvCompleteData,
+							 pi32ReleaseFenceFd);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto FailCommandAlloc;
+	}
+
+	/*
+		Set up command ready data
+	*/
+	psReadyData = (DC_CMD_RDY_DATA *)pui8ReadyData;
+	pui8ReadyData += sizeof(DC_CMD_RDY_DATA);
+
+	psReadyData->ui32DisplayPeriod = ui32DisplayPeriod;
+	psReadyData->psDisplayContext = psDisplayContext;
+	psReadyData->ui32BufferCount = ui32PipeCount;
+
+	/* Copy over surface atrribute array */
+	if (ui32PipeCount != 0)
+	{
+		psReadyData->pasSurfAttrib = (PVRSRV_SURFACE_CONFIG_INFO *)pui8ReadyData;
+		ui32CopySize = sizeof(PVRSRV_SURFACE_CONFIG_INFO) * ui32PipeCount;
+		OSMemCopy(psReadyData->pasSurfAttrib, pasSurfAttrib, ui32CopySize);
+		pui8ReadyData = pui8ReadyData + ui32CopySize;
+	}
+	else
+	{
+		psReadyData->pasSurfAttrib = IMG_NULL;
+	}
+
+	/* Copy over device buffer handle buffer array */
+	if (ui32PipeCount != 0)
+	{
+		psReadyData->pahBuffer = (IMG_HANDLE)pui8ReadyData;
+		ui32CopySize = sizeof(IMG_HANDLE) * ui32PipeCount;
+		OSMemCopy(psReadyData->pahBuffer, ahBuffers, ui32CopySize);
+	}
+	else
+	{
+		psReadyData->pahBuffer = IMG_NULL;
+	}
+
+	/*
+		Set up command complete data
+	*/
+	psCompleteData = pvCompleteData;
+	pvCompleteData = (IMG_PUINT8)pvCompleteData + sizeof(DC_CMD_COMP_DATA);
+
+	psCompleteData->psDisplayContext = psDisplayContext;
+	psCompleteData->ui32Token = psDisplayContext->ui32TokenOut++;
+	psCompleteData->ui32BufferCount = ui32PipeCount;
+	psCompleteData->bDirectNullFlip = IMG_FALSE;
+
+	if (ui32PipeCount != 0)
+	{
+		/* Copy the buffer pointers */
+		psCompleteData->apsBuffer = pvCompleteData;
+		for (i=0;i<ui32PipeCount;i++)
+		{
+			psCompleteData->apsBuffer[i] = papsBuffers[i];
+		}
+	}
+
+	/* Check if we need to do any CPU cache operations before sending the config */
+	OSCPUOperation(psData->uiCacheOp);
+	psData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+
+	/* Submit the command */
+	eError = SCPSubmitCommand(psDisplayContext->psSCPContext);
+
+	/* Check for new work on this display context */
+	_DCDisplayContextRun(psDisplayContext);
+
+	/* The only way this submit can fail is if there is a bug in this module */
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	if (ui32PipeCount != 0)
+	{
+		_DCDeviceBufferArrayDestroy(ahBuffers);
+	}
+
+	return PVRSRV_OK;
+
+FailCommandAlloc:
+FailMapBuffer:
+	if (ui32PipeCount != 0)
+	{
+		for (i=0;i<ui32BuffersMapped;i++)
+		{
+			_DCBufferUnmap(papsBuffers[i]);
+
+			/*
+			 * ahBuffers points to new buffers allocated in
+			 * pfnContextConfigureCheck, need to free them here.
+			 */
+			OSFreeMem(ahBuffers[i]);
+		}
+	}
+FailConfigCheck:
+	if (ui32PipeCount != 0)
+	{
+		_DCDeviceBufferArrayDestroy(ahBuffers);
+	}
+FailBufferArrayCreate:
+FailMaxDepth:
+	_DCDisplayContextReleaseRef(psDisplayContext);
+
+	return eError;
+}
+
+PVRSRV_ERROR DCDisplayContextDestroy(DC_DISPLAY_CONTEXT *psDisplayContext)
+{
+	PVRSRV_ERROR eError;
+
+	/*
+		On the first cleanup request try to issue the NULL flip.
+		If we fail then we should get retry which we pass back to
+		the caller who will try again later.
+	*/
+	if (!psDisplayContext->bIssuedNullFlip)
+	{
+		eError = DCDisplayContextConfigure(psDisplayContext,
+										   0,
+										   IMG_NULL,
+										   IMG_NULL,
+										   0,
+										   IMG_NULL,
+										   IMG_NULL,
+										   0,
+										   0,
+										   -1,
+										   IMG_NULL);
+
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+		psDisplayContext->bIssuedNullFlip = IMG_TRUE;
+	}
+
+	/*
+		Flush out everything from SCP
+		
+		This will ensure that the MISR isn't dropping the last reference
+		which would cause a deadlock during cleanup
+	*/
+	eError = SCPFlush(psDisplayContext->psSCPContext);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	_DCDisplayContextReleaseRef(psDisplayContext);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCBufferAlloc(DC_DISPLAY_CONTEXT *psDisplayContext,
+						   DC_BUFFER_CREATE_INFO *psSurfInfo,
+						   IMG_UINT32 *pui32ByteStride,
+						   DC_BUFFER **ppsBuffer)
+{
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+	DC_BUFFER *psNew;
+	PMR *psPMR;
+	PVRSRV_ERROR eError;
+	IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize;
+	IMG_UINT32 ui32PageCount;
+	IMG_UINT32 ui32PhysHeapID;
+
+	psNew = OSAllocMem(sizeof(DC_BUFFER));
+	if (psNew == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	OSMemSet(psNew, 0, sizeof(DC_BUFFER));
+
+	eError = OSLockCreate(&psNew->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock;
+	}
+
+	eError = OSLockCreate(&psNew->hMapLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_maplock;
+	}
+
+	eError = psDevice->psFuncTable->pfnBufferAlloc(psDisplayContext->hDisplayContext,
+												  psSurfInfo,
+												  &uiLog2PageSize,
+												  &ui32PageCount,
+												  &ui32PhysHeapID,
+												  pui32ByteStride,
+												  &psNew->hBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_bufferalloc;
+	}
+
+	/*
+		Fill in the basic info for our buffer
+		(must be before _DCCreatePMR)
+	*/
+	psNew->psDisplayContext = psDisplayContext;
+	psNew->eType = DC_BUFFER_TYPE_ALLOC;
+	psNew->ui32MapCount = 0;
+	psNew->ui32RefCount = 1;
+
+	eError = _DCCreatePMR(uiLog2PageSize,
+						  ui32PageCount,
+						  ui32PhysHeapID,
+						  psNew,
+						  &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_createpmr;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	{
+		/* Dummy handle - we don't need to store the reference to the PMR RI entry. Its deletion is handled internally. */
+		DC_DISPLAY_INFO	sDisplayInfo;
+		IMG_INT32 i32RITextSize;
+		IMG_CHAR pszRIText[RI_MAX_TEXT_LEN];
+
+		DCGetInfo(psDevice, &sDisplayInfo);
+		i32RITextSize = OSSNPrintf((IMG_CHAR *)pszRIText, RI_MAX_TEXT_LEN, "%s: DisplayContext 0x%p BufferAlloc", (IMG_CHAR *)sDisplayInfo.szDisplayName, &psDevice->sSystemContext);
+		if (i32RITextSize < 0)
+		{
+			pszRIText[0] = '\0';
+			i32RITextSize = 0;
+		}
+		else
+		{
+			pszRIText[RI_MAX_TEXT_LEN-1] = '\0';
+		}
+		eError = RIWritePMREntryKM (psPMR,
+									(IMG_UINT32)i32RITextSize,
+									(IMG_CHAR *)pszRIText,
+									(uiLog2PageSize*ui32PageCount));
+	}
+#endif
+
+	psNew->uBufferData.sAllocData.psPMR = psPMR;
+	_DCDisplayContextAcquireRef(psDisplayContext);
+
+	*ppsBuffer = psNew;
+
+	return PVRSRV_OK;
+
+fail_createpmr:
+	psDevice->psFuncTable->pfnBufferFree(psNew->hBuffer);
+fail_bufferalloc:
+	OSLockDestroy(psNew->hMapLock);
+fail_maplock:
+	OSLockDestroy(psNew->hLock);
+fail_lock:
+	OSFreeMem(psNew);
+	return eError;
+}
+
+PVRSRV_ERROR DCBufferFree(DC_BUFFER *psBuffer)
+{
+	/*
+		Only drop the reference on the PMR if this is a DC allocated
+		buffer. In the case of imported buffers the 3rd party DC
+		driver manages the PMR's "directly"
+	*/
+	if (psBuffer->eType == DC_BUFFER_TYPE_ALLOC)
+	{
+		PMRUnrefPMR(psBuffer->uBufferData.sAllocData.psPMR);
+	}
+	_DCBufferReleaseRef(psBuffer);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCBufferImport(DC_DISPLAY_CONTEXT *psDisplayContext,
+							IMG_UINT32 ui32NumPlanes,
+							PMR **papsImport,
+						    DC_BUFFER_IMPORT_INFO *psSurfAttrib,
+						    DC_BUFFER **ppsBuffer)
+{
+	DC_DEVICE *psDevice = psDisplayContext->psDevice;
+	DC_BUFFER *psNew;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	if(psDevice->psFuncTable->pfnBufferImport == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_NOT_SUPPORTED;
+		goto FailEarlyError;
+	}
+
+	psNew = OSAllocMem(sizeof(DC_BUFFER));
+	if (psNew == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto FailEarlyError;
+	}
+	OSMemSet(psNew, 0, sizeof(DC_BUFFER));
+
+	eError = OSLockCreate(&psNew->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailLock;
+	}
+
+	eError = OSLockCreate(&psNew->hMapLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailMapLock;
+	}
+
+	eError = psDevice->psFuncTable->pfnBufferImport(psDisplayContext->hDisplayContext,
+													ui32NumPlanes,
+													(IMG_HANDLE **)papsImport,
+													psSurfAttrib,
+													&psNew->hBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailBufferImport;
+	}
+
+	/*
+		Take a reference on the PMR to make sure it can't be released before
+		we've finished with it
+	*/
+	for (i=0;i<ui32NumPlanes;i++)
+	{
+		PMRRefPMR(papsImport[i]);
+		psNew->uBufferData.sImportData.apsImport[i] = papsImport[i];
+	}
+
+	_DCDisplayContextAcquireRef(psDisplayContext);
+	psNew->psDisplayContext = psDisplayContext;
+	psNew->eType = DC_BUFFER_TYPE_IMPORT;
+	psNew->uBufferData.sImportData.ui32NumPlanes = ui32NumPlanes;
+	psNew->ui32MapCount = 0;
+	psNew->ui32RefCount = 1;
+
+	*ppsBuffer = psNew;
+
+	return PVRSRV_OK;
+
+FailBufferImport:
+	OSLockDestroy(psNew->hMapLock);
+FailMapLock:
+	OSLockDestroy(psNew->hLock);
+FailLock:
+	OSFreeMem(psNew);
+
+FailEarlyError:
+	return eError;
+}
+
+PVRSRV_ERROR DCBufferUnimport(DC_BUFFER *psBuffer)
+{
+	_DCBufferReleaseRef(psBuffer);
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR DCBufferAcquire(DC_BUFFER *psBuffer, PMR **ppsPMR)
+{
+	PMR *psPMR = psBuffer->uBufferData.sAllocData.psPMR;
+	PVRSRV_ERROR eError;
+
+	if (psBuffer->eType == DC_BUFFER_TYPE_IMPORT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DCBufferAcquire: Invalid request, DC buffer is an import"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_typecheck;
+	}
+	PMRRefPMR(psPMR);
+
+	*ppsPMR = psPMR;
+	return PVRSRV_OK;
+	
+fail_typecheck:
+	return eError;
+}
+
+PVRSRV_ERROR DCBufferRelease(PMR *psPMR)
+{
+	/*
+		Drop our reference on the PMR. If we're the last one then the PMR
+		will be freed and are _DCPMRFinalize function will be called where
+		we drop our reference on the buffer
+	*/
+	PMRUnrefPMR(psPMR);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DCBufferPin(DC_BUFFER *psBuffer, DC_PIN_HANDLE *phPin)
+{
+	*phPin = psBuffer;
+	return _DCBufferMap(psBuffer);
+}
+
+PVRSRV_ERROR DCBufferUnpin(DC_PIN_HANDLE hPin)
+{
+	DC_BUFFER *psBuffer = hPin;
+
+	_DCBufferUnmap(psBuffer);
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *     Public interface functions for 3rd party display class devices        *
+ *****************************************************************************/
+
+PVRSRV_ERROR DCRegisterDevice(DC_DEVICE_FUNCTIONS *psFuncTable,
+							  IMG_UINT32 ui32MaxConfigsInFlight,
+							  IMG_HANDLE hDeviceData,
+							  IMG_HANDLE *phSrvHandle)
+{
+	DC_DEVICE *psNew;
+	PVRSRV_ERROR eError;
+
+	psNew = OSAllocMem(sizeof(DC_DEVICE));
+	if (psNew == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto FailAlloc;
+	}
+
+	eError = OSLockCreate(&psNew->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailLockCreate;
+	}
+
+	psNew->psFuncTable = psFuncTable;
+	psNew->ui32MaxConfigsInFlight = ui32MaxConfigsInFlight;
+	psNew->hDeviceData = hDeviceData;
+	psNew->ui32RefCount = 1;
+	psNew->hSystemBuffer = IMG_NULL;
+	psNew->ui32Index = g_ui32DCNextIndex++;
+	eError = OSEventObjectCreate("DC_EVENT_OBJ", &psNew->psEventList);
+	if (eError != PVRSRV_OK)
+	{
+		goto FailEventObject;
+	}
+
+	/* Init state required for system surface */
+	psNew->hSystemBuffer = IMG_NULL;
+	psNew->psSystemBufferPMR = IMG_NULL;
+	psNew->sSystemContext.psDevice = psNew;
+	psNew->sSystemContext.hDisplayContext = hDeviceData;	/* FIXME: Is this the correct thing to do? */
+
+	OSLockAcquire(g_hDCListLock);
+	psNew->psNext = g_psDCDeviceList;
+	
+	g_psDCDeviceList = psNew;
+	g_ui32DCDeviceCount++;
+	OSLockRelease(g_hDCListLock);
+
+	*phSrvHandle = (IMG_HANDLE) psNew;
+
+	return PVRSRV_OK;
+
+FailEventObject:
+	OSLockDestroy(psNew->hLock);
+FailLockCreate:
+	OSFreeMem(psNew);
+FailAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_VOID DCUnregisterDevice(IMG_HANDLE hSrvHandle)
+{
+	DC_DEVICE *psDevice = (DC_DEVICE *) hSrvHandle;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError;
+
+	/*
+		If the system buffer was acquired and a PMR created for it, release
+		it before releasing the device as the PMR will have a reference to
+		the device
+	*/
+	if (psDevice->psSystemBufferPMR)
+	{
+		PMRUnrefPMR(psDevice->psSystemBufferPMR);
+	}
+
+	/*
+	 * At this stage the DC driver wants to unload, if other things have
+	 * reference to the DC device we need to block here until they have
+	 * been release as when this function returns the DC driver code could
+	 * be unloaded.
+	 */
+
+	/* If the driver is in a bad state we just free resources regardless */
+	if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		volatile IMG_UINT32 * ref_count_ptr = &(psDevice->ui32RefCount);
+
+	    /* Skip the wait if we're the last reference holder */
+		if (*ref_count_ptr != 1)
+		{
+			IMG_HANDLE hEvent;
+			
+			eError = OSEventObjectOpen(psDevice->psEventList, &hEvent);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to open event object (%d), will busy wait",
+						 __FUNCTION__, eError));
+				hEvent = IMG_NULL;
+			}
+			
+			while(*ref_count_ptr != 1)
+			{
+				if (hEvent != IMG_NULL)
+				{
+					OSEventObjectWait(hEvent);
+				}
+			}
+			if (hEvent != IMG_NULL)
+			{
+				OSEventObjectClose(hEvent);
+			}
+		}
+	}
+	else
+	{
+		/* We're in a bad state, force the refcount */
+		psDevice->ui32RefCount = 1;
+	}
+
+	_DCDeviceReleaseRef(psDevice);
+
+	PVR_ASSERT(psDevice->ui32RefCount == 0);
+	OSEventObjectDestroy(psDevice->psEventList);
+	OSLockDestroy(psDevice->hLock);
+	OSFreeMem(psDevice);
+}
+
+IMG_VOID DCDisplayConfigurationRetired(IMG_HANDLE hConfigData)
+{
+	DC_CMD_COMP_DATA *psData = hConfigData;
+	DC_DISPLAY_CONTEXT *psDisplayContext = psData->psDisplayContext;
+	IMG_UINT32 i;
+
+	DC_DEBUG_PRINT("DCDisplayConfigurationRetired: Command (%d) received", psData->ui32Token);
+	/* Sanity check */
+	if (!psData->bDirectNullFlip && psData->ui32Token != psDisplayContext->ui32TokenIn)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"Display config retired in unexpected order (was %d, expecting %d)",
+				psData->ui32Token, psDisplayContext->ui32TokenIn));
+		PVR_ASSERT(IMG_FALSE);
+	}
+
+	OSLockAcquire(psDisplayContext->hLock);
+	if ( !psData->bDirectNullFlip )
+	{
+		psDisplayContext->ui32TokenIn++;
+	}
+
+#if defined SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG
+	if (psDisplayContext->hTimer)
+	{
+		OSDisableTimer(psDisplayContext->hTimer);
+		OSRemoveTimer(psDisplayContext->hTimer);
+		psDisplayContext->hTimer = IMG_NULL;
+	}
+#endif	/* SUPPORT_DC_COMPLETE_TIMEOUT_DEBUG */
+
+	psDisplayContext->ui32ConfigsInFlight--;
+	OSLockRelease(psDisplayContext->hLock);
+
+	for (i = 0; i < psData->ui32BufferCount; i++)
+	{
+		_DCBufferUnmap(psData->apsBuffer[i]);
+	}
+
+	_DCDisplayContextReleaseRef(psDisplayContext);
+
+	/*
+		Note:
+
+		We must call SCPCommandComplete here and not before as we need
+		to ensure that we're not the last to hold the reference as
+		we can't destroy the display context from the MISR which we
+		can be called from.
+	*/
+	SCPCommandComplete(psDisplayContext->psSCPContext);
+
+	/* Notify devices (including ourself) in case some item has been unblocked */
+	PVRSRVCheckStatus(IMG_NULL);
+}
+
+IMG_BOOL DCDisplayHasPendingCommand(IMG_HANDLE hConfigData)
+{
+	DC_CMD_COMP_DATA *psData = hConfigData;
+	DC_DISPLAY_CONTEXT *psDisplayContext = psData->psDisplayContext;
+	IMG_BOOL bRet;
+
+	_DCDisplayContextAcquireRef(psDisplayContext);
+	bRet = SCPHasPendingCommand(psDisplayContext->psSCPContext);
+	_DCDisplayContextReleaseRef(psDisplayContext);
+
+	return bRet;
+}
+
+PVRSRV_ERROR DCImportBufferAcquire(IMG_HANDLE hImport,
+								   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+								   IMG_UINT32 *pui32PageCount,
+								   IMG_DEV_PHYADDR **ppasDevPAddr)
+{
+	PMR *psPMR = hImport;
+	IMG_DEV_PHYADDR *pasDevPAddr;
+	IMG_DEVMEM_SIZE_T uiLogicalSize;
+	IMG_SIZE_T uiPageCount;
+	IMG_BOOL *pbValid;
+	PVRSRV_ERROR eError;
+#if defined(DEBUG)
+	IMG_UINT32 i;
+#endif
+
+	eError = PMR_LogicalSize(psPMR, &uiLogicalSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	uiPageCount = TRUNCATE_64BITS_TO_SIZE_T(uiLogicalSize >> uiLog2PageSize);
+
+	pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) * uiPageCount);
+	if (pasDevPAddr == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+		
+	pbValid = OSAllocMem(uiPageCount * sizeof(IMG_BOOL));
+	if (pbValid == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+	/* Lock the pages */
+	eError = PMRLockSysPhysAddresses(psPMR, uiLog2PageSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/* Get page physical addresses */
+	eError = PMR_DevPhysAddr(psPMR, uiLog2PageSize, uiPageCount, 0,
+							 pasDevPAddr, pbValid);
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+#if defined(DEBUG)
+	/* The DC import function doesn't support 
+	   sparse allocations */
+	for (i=0; i<uiPageCount; i++)
+	{
+		PVR_ASSERT(pbValid[i]);
+	}
+#endif
+
+	OSFreeMem(pbValid);
+
+	*pui32PageCount = TRUNCATE_SIZE_T_TO_32BITS(uiPageCount);
+	*ppasDevPAddr = pasDevPAddr;
+	return PVRSRV_OK;
+
+e3:
+	PMRUnlockSysPhysAddresses(psPMR);
+e2:
+	OSFreeMem(pbValid);
+e1:
+	OSFreeMem(pasDevPAddr);
+e0:
+	return eError;
+}
+
+IMG_VOID DCImportBufferRelease(IMG_HANDLE hImport,
+							   IMG_DEV_PHYADDR *pasDevPAddr)
+{
+	PMR *psPMR = hImport;
+
+	/* Unlock the pages */
+	PMRUnlockSysPhysAddresses(psPMR);
+	OSFreeMem(pasDevPAddr);
+}
+
+/*****************************************************************************
+ *                Public interface functions for services                    *
+ *****************************************************************************/
+PVRSRV_ERROR DCInit()
+{
+	g_psDCDeviceList = IMG_NULL;
+	g_ui32DCNextIndex = 0;
+	dllist_init(&g_sDisplayContextsList);
+	return OSLockCreate(&g_hDCListLock, LOCK_TYPE_NONE);
+}
+
+PVRSRV_ERROR DCDeInit()
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		PVR_ASSERT(g_psDCDeviceList == IMG_NULL);
+	}
+
+	OSLockDestroy(g_hDCListLock);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_heapcfg.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_heapcfg.c
new file mode 100644
index 0000000..6c86b8d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_heapcfg.c
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File           devicemem_heapcfg.c
+@Title          Temporary Device Memory 2 stuff
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* our exported API */
+#include "devicemem_heapcfg.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 *puiNumHeapConfigsOut
+)
+{
+
+    *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapCount(
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 *puiNumHeapsOut
+)
+{
+    if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+    }
+
+    *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapConfigNameBufSz,
+    IMG_CHAR *pszHeapConfigNameOut
+)
+{
+    if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+    }
+
+    OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName);
+
+    return PVRSRV_OK;    
+}
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(
+    const PVRSRV_DEVICE_NODE *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapIndex,
+    IMG_UINT32 uiHeapNameBufSz,
+    IMG_CHAR *pszHeapNameOut,
+    IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+    IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+    IMG_UINT32 *puiLog2DataPageSizeOut,
+    IMG_UINT32 *puiLog2ImportAlignmentOut
+)
+{
+    DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+    if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+    }
+
+    if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+    }
+
+    psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+    OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName);
+    *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr;
+    *puiHeapLengthOut = psHeapBlueprint->uiHeapLength;
+    *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize;
+    *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment;
+
+    return PVRSRV_OK;    
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_history_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_history_server.c
new file mode 100644
index 0000000..0ac171d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_history_server.c
@@ -0,0 +1,225 @@
+/*************************************************************************/ /*!
+@File
+@Title          Devicemem history functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Devicemem history functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "syscommon.h"
+#include "devicemem_server.h"
+#include "lock.h"
+#include "devicemem_history_server.h"
+
+/* a device memory allocation */
+typedef struct _DEVICEMEM_HISTORY_ALLOCATION_
+{
+	IMG_DEV_VIRTADDR sDevVAddr;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_CHAR szString[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+	IMG_UINT64 ui64Time;
+	/* FALSE if this allocation has been freed */
+	IMG_BOOL bAllocated;
+	IMG_PID uiPID;
+} DEVICEMEM_HISTORY_ALLOCATION;
+
+/* this number of entries makes the history buffer allocation just under 2MB */
+#define DEVICEMEM_HISTORY_ALLOCATION_HISTORY_LEN 29127
+
+typedef struct _DEVICEMEM_HISTORY_DATA_
+{
+	IMG_UINT32 ui32Head;
+	DEVICEMEM_HISTORY_ALLOCATION *psAllocations;
+	POS_LOCK hLock;
+} DEVICEMEM_HISTORY_DATA;
+
+static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData = { 0 };
+
+static INLINE IMG_VOID DevicememHistoryLock(IMG_VOID)
+{
+	OSLockAcquire(gsDevicememHistoryData.hLock);
+}
+
+static INLINE IMG_VOID DevicememHistoryUnlock(IMG_VOID)
+{
+	OSLockRelease(gsDevicememHistoryData.hLock);
+}
+
+PVRSRV_ERROR DevicememHistoryInitKM(IMG_VOID)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&gsDevicememHistoryData.hLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create lock"));
+		goto err_lock;
+	}
+
+	gsDevicememHistoryData.psAllocations = OSAllocZMem(sizeof(DEVICEMEM_HISTORY_ALLOCATION) * DEVICEMEM_HISTORY_ALLOCATION_HISTORY_LEN);
+
+	if(gsDevicememHistoryData.psAllocations == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to allocate space for allocations list"));
+		goto err_allocations;
+	}
+
+	return PVRSRV_OK;
+
+err_allocations:
+	OSLockDestroy(gsDevicememHistoryData.hLock);
+err_lock:
+	return eError;
+}
+
+IMG_VOID DevicememHistoryDeInitKM(IMG_VOID)
+{
+	OSFREEMEM(gsDevicememHistoryData.psAllocations);
+	OSLockDestroy(gsDevicememHistoryData.hLock);
+}
+
+static PVRSRV_ERROR DevicememHistoryWrite(IMG_DEV_VIRTADDR sDevVAddr, IMG_SIZE_T uiSize,
+						const char szString[DEVICEMEM_HISTORY_TEXT_BUFSZ],
+						IMG_BOOL bAlloc)
+{
+	DEVICEMEM_HISTORY_ALLOCATION *psAlloc;
+
+	PVR_ASSERT(gsDevicememHistoryData.psAllocations != IMG_NULL);
+
+	DevicememHistoryLock();
+
+	psAlloc = &gsDevicememHistoryData.psAllocations[gsDevicememHistoryData.ui32Head];
+	PVR_ASSERT(gsDevicememHistoryData.ui32Head < DEVICEMEM_HISTORY_ALLOCATION_HISTORY_LEN);
+
+	gsDevicememHistoryData.ui32Head = (gsDevicememHistoryData.ui32Head + 1) % DEVICEMEM_HISTORY_ALLOCATION_HISTORY_LEN;
+
+	psAlloc->sDevVAddr = sDevVAddr;
+	psAlloc->uiSize = uiSize;
+	psAlloc->uiPID = OSGetCurrentProcessID();
+	OSStringNCopy(psAlloc->szString, szString, sizeof(psAlloc->szString));
+	psAlloc->szString[sizeof(psAlloc->szString) - 1] = '\0';
+	psAlloc->bAllocated = bAlloc;
+	psAlloc->ui64Time = OSClockns64();
+
+	DevicememHistoryUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevicememHistoryMapKM(IMG_DEV_VIRTADDR sDevVAddr, IMG_SIZE_T uiSize, const char szString[DEVICEMEM_HISTORY_TEXT_BUFSZ])
+{
+	return DevicememHistoryWrite(sDevVAddr, uiSize, szString, IMG_TRUE);
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapKM(IMG_DEV_VIRTADDR sDevVAddr, IMG_SIZE_T uiSize, const char szString[DEVICEMEM_HISTORY_TEXT_BUFSZ])
+{
+	return DevicememHistoryWrite(sDevVAddr, uiSize, szString, IMG_FALSE);
+}
+
+/* given a time stamp, calculate the age in nanoseconds (relative to now) */
+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Then)
+{
+	IMG_UINT64 ui64Now;
+
+	ui64Now = OSClockns64();
+
+	if(ui64Now >= ui64Then)
+	{
+		/* no clock wrap */
+		return ui64Now - ui64Then;
+	}
+	else
+	{
+		/* clock has wrapped */
+		return ((~(IMG_UINT64) 0) - ui64Then) + ui64Now + 1;
+	}
+}
+
+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut)
+{
+	IMG_UINT32 ui32Entry;
+
+	/* initialise the results count for the caller */
+	psQueryOut->ui32NumResults = 0;
+
+	DevicememHistoryLock();
+
+	/* search from newest to oldest */
+
+	ui32Entry = gsDevicememHistoryData.ui32Head;
+
+	do
+	{
+		DEVICEMEM_HISTORY_ALLOCATION *psAlloc;
+
+		/* searching backwards (from newest to oldest)
+		 * wrap around backwards when going past zero
+		 */
+		ui32Entry = (ui32Entry != 0) ? ui32Entry - 1 : DEVICEMEM_HISTORY_ALLOCATION_HISTORY_LEN - 1;
+		psAlloc = &gsDevicememHistoryData.psAllocations[ui32Entry];
+
+		if((psAlloc->uiPID == psQueryIn->uiPID) &&
+			(psQueryIn->sDevVAddr.uiAddr >= psAlloc->sDevVAddr.uiAddr) &&
+			(psQueryIn->sDevVAddr.uiAddr < psAlloc->sDevVAddr.uiAddr + psAlloc->uiSize))
+		{
+				DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults];
+
+				OSStringNCopy(psResult->szString, psAlloc->szString, sizeof(psResult->szString));
+				psResult->szString[DEVICEMEM_HISTORY_TEXT_BUFSZ - 1] = '\0';
+				psResult->sBaseDevVAddr = psAlloc->sDevVAddr;
+				psResult->uiSize = psAlloc->uiSize;
+				psResult->bAllocated = psAlloc->bAllocated;
+				psResult->ui64Age = _CalculateAge(psAlloc->ui64Time);
+				psResult->ui64When = psAlloc->ui64Time;
+
+				psQueryOut->ui32NumResults++;
+		}
+	} while((psQueryOut->ui32NumResults < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS) &&
+						(ui32Entry != gsDevicememHistoryData.ui32Head));
+
+	DevicememHistoryUnlock();
+
+	return psQueryOut->ui32NumResults > 0;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_server.c
new file mode 100644
index 0000000..53ebd84
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/devicemem_server.c
@@ -0,0 +1,732 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Server-side component of the Device Memory Management.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* our exported API */
+#include "devicemem_server.h"
+#include "devicemem_utils.h"
+#include "devicemem.h"
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+#include "pdump_km.h"
+#include "pmr.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lock.h"
+
+struct _DEVMEMINT_CTX_
+{
+    PVRSRV_DEVICE_NODE *psDevNode;
+
+    /* MMU common code needs to have a context.  There's a one-to-one
+       correspondence between device memory context and MMU context,
+       but we have the abstraction here so that we don't need to care
+       what the MMU does with its context, and the MMU code need not
+       know about us at all. */
+    MMU_CONTEXT *psMMUContext;
+
+    ATOMIC_T hRefCount;
+
+    /* This handle is for devices that require notification when a new
+       memory context is created and they need to store private data that
+       is associated with the context. */
+    IMG_HANDLE hPrivData;
+};
+
+struct _DEVMEMINT_CTX_EXPORT_ 
+{
+	DEVMEMINT_CTX *psDevmemCtx;
+};
+
+struct _DEVMEMINT_HEAP_
+{
+    struct _DEVMEMINT_CTX_ *psDevmemCtx;
+    ATOMIC_T hRefCount;
+};
+
+struct _DEVMEMINT_RESERVATION_
+{
+    struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+    IMG_DEV_VIRTADDR sBase;
+    IMG_DEVMEM_SIZE_T uiLength;
+};
+
+struct _DEVMEMINT_MAPPING_
+{
+    struct _DEVMEMINT_RESERVATION_ *psReservation;
+    PMR *psPMR;
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiLog2PageSize;
+};
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntCtxAcquire
+@Description    Acquire a reference to the provided device memory context.
+@Return         None
+*/ /**************************************************************************/
+static INLINE IMG_VOID _DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx)
+{
+	OSAtomicIncrement(&psDevmemCtx->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntCtxRelease
+@Description    Release the reference to the provided device memory context.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE IMG_VOID _DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx)
+{
+	if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0)
+	{
+		/* The last reference has gone, destroy the context */
+		PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode;
+	
+		if (psDevNode->pfnUnregisterMemoryContext)
+		{
+			psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData);
+		}
+	    MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+	
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", __FUNCTION__, psDevmemCtx));
+		OSFreeMem(psDevmemCtx);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntHeapAcquire
+@Description    Acquire a reference to the provided device memory heap.
+@Return         None
+*/ /**************************************************************************/
+static INLINE IMG_VOID _DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	OSAtomicIncrement(&psDevmemHeap->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       _DevmemIntHeapRelease
+@Description    Release the reference to the provided device memory heap.
+                If this is the last reference which was taken then the
+                memory context will be freed.
+@Return         None
+*/ /**************************************************************************/
+static INLINE IMG_VOID _DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap)
+{
+	OSAtomicDecrement(&psDevmemHeap->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetImportHandle
+@Description    For given exportable memory descriptor returns PMR handle.
+@Return         Memory is exportable - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_HANDLE *phImport)
+{
+	PVRSRV_ERROR eError;
+
+	if (psMemDesc->psImport->bExportable == IMG_FALSE)
+	{
+        eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+        goto e0;
+	}
+
+	*phImport = psMemDesc->psImport->hPMR;
+	return PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemServerGetHeapHandle
+@Description    For given reservation returns the Heap handle.
+@Return         PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+						   IMG_HANDLE *phHeap)
+{
+	*phHeap = psReservation->psDevmemHeap;
+	return PVRSRV_OK;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxCreate
+@Description    Creates and initialises a device memory context.
+@Return         valid Device Memory context handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxCreate(
+                   PVRSRV_DEVICE_NODE *psDeviceNode,
+                   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                   IMG_HANDLE *hPrivData
+                   )
+{
+    PVRSRV_ERROR eError;
+    DEVMEMINT_CTX *psDevmemCtx;
+    IMG_HANDLE hPrivDataInt = IMG_NULL;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s", __FUNCTION__));
+
+	/* allocate a Devmem context */
+    psDevmemCtx = OSAllocMem(sizeof *psDevmemCtx);
+    if (psDevmemCtx == IMG_NULL)
+	{
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+        goto fail_alloc;
+	}
+
+	OSAtomicWrite(&psDevmemCtx->hRefCount, 1);
+    psDevmemCtx->psDevNode = psDeviceNode;
+
+    /* Call down to MMU context creation */
+
+    eError = MMU_ContextCreate(psDeviceNode,
+                               &psDevmemCtx->psMMUContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: MMU_ContextCreate failed", __FUNCTION__));
+		goto fail_mmucontext;
+	}
+
+
+	if (psDeviceNode->pfnRegisterMemoryContext)
+	{
+		eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register MMU context", __FUNCTION__));
+			goto fail_register;
+		}
+	}
+
+	/* Store the private data as it is required to unregister the memory context */
+	psDevmemCtx->hPrivData = hPrivDataInt;
+	*hPrivData = hPrivDataInt;
+    *ppsDevmemCtxPtr = psDevmemCtx;
+
+	return PVRSRV_OK;
+
+fail_register:
+    MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+fail_mmucontext:
+	OSFREEMEM(psDevmemCtx);
+fail_alloc:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntHeapCreate
+@Description    Creates and initialises a device memory heap.
+@Return         valid Device Memory heap handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntHeapCreate(
+                    DEVMEMINT_CTX *psDevmemCtx,
+                    IMG_DEV_VIRTADDR sHeapBaseAddr,
+                    IMG_DEVMEM_SIZE_T uiHeapLength,
+                    IMG_UINT32 uiLog2DataPageSize,
+                    DEVMEMINT_HEAP **ppsDevmemHeapPtr
+                    )
+{
+    PVRSRV_ERROR eError;
+    DEVMEMINT_HEAP *psDevmemHeap;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: DevmemIntHeap_Create", __FUNCTION__));
+
+	/* allocate a Devmem context */
+	psDevmemHeap = OSAllocMem(sizeof *psDevmemHeap);
+    if (psDevmemHeap == IMG_NULL)
+	{
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+        goto fail_alloc;
+	}
+
+    psDevmemHeap->psDevmemCtx = psDevmemCtx;
+
+	_DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx);
+
+	OSAtomicWrite(&psDevmemHeap->hRefCount, 1);
+
+    *ppsDevmemHeapPtr = psDevmemHeap;
+
+	return PVRSRV_OK;
+
+fail_alloc:
+    return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr)
+{
+    PVRSRV_ERROR eError;
+    DEVMEMINT_MAPPING *psMapping;
+    /* number of pages (device pages) that allocation spans */
+    IMG_UINT32 ui32NumDevPages;
+    /* device virtual address of start of allocation */
+    IMG_DEV_VIRTADDR sAllocationDevVAddr;
+    /* and its length */
+    IMG_DEVMEM_SIZE_T uiAllocationSize;
+
+	/* allocate memory to record the mapping info */
+	psMapping = OSAllocMem(sizeof *psMapping);
+    if (psMapping == IMG_NULL)
+	{
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "DevmemIntMapPMR: Alloc failed"));
+        goto e0;
+	}
+
+    uiAllocationSize = psReservation->uiLength;
+
+
+    ui32NumDevPages = 0xffffffffU & (((uiAllocationSize - 1)
+                                      >> GET_LOG2_PAGESIZE()) + 1);
+    PVR_ASSERT(ui32NumDevPages << GET_LOG2_PAGESIZE() == uiAllocationSize);
+
+    eError = PMRLockSysPhysAddresses(psPMR,
+    		GET_LOG2_PAGESIZE());
+    if (eError != PVRSRV_OK)
+	{
+        goto e2;
+	}
+
+    sAllocationDevVAddr = psReservation->sBase;
+
+    /*  N.B.  We pass mapping permission flags to MMU_MapPMR and let
+       it reject the mapping if the permissions on the PMR are not compatible. */
+
+    eError = MMU_MapPMR (psDevmemHeap->psDevmemCtx->psMMUContext,
+                         sAllocationDevVAddr,
+                         psPMR,
+                         ui32NumDevPages << GET_LOG2_PAGESIZE(),
+                         uiMapFlags,
+                         GET_LOG2_PAGESIZE());
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    psMapping->psReservation = psReservation;
+    psMapping->uiNumPages = ui32NumDevPages;
+    psMapping->uiLog2PageSize = GET_LOG2_PAGESIZE();
+    psMapping->psPMR = psPMR;
+    /* Don't bother with refcount on reservation, as a reservation
+       only ever holds one mapping, so we directly increment the
+       refcount on the heap instead */
+    _DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap);
+
+    *ppsMappingPtr = psMapping;
+
+    return PVRSRV_OK;
+
+ e2:
+	OSFreeMem(psMapping);
+
+ e0:
+    PVR_ASSERT (eError != PVRSRV_OK);
+    return eError;
+}
+
+
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping)
+{
+    PVRSRV_ERROR eError;
+    DEVMEMINT_HEAP *psDevmemHeap;
+    /* device virtual address of start of allocation */
+    IMG_DEV_VIRTADDR sAllocationDevVAddr;
+    /* number of pages (device pages) that allocation spans */
+    IMG_UINT32 ui32NumDevPages;
+
+    psDevmemHeap = psMapping->psReservation->psDevmemHeap;
+
+    ui32NumDevPages = psMapping->uiNumPages;
+    sAllocationDevVAddr = psMapping->psReservation->sBase;
+
+
+    MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+                    sAllocationDevVAddr,
+                    ui32NumDevPages,
+                    GET_LOG2_PAGESIZE());
+
+    eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    /* Don't bother with refcount on reservation, as a reservation
+       only ever holds one mapping, so we directly decrement the
+       refcount on the heap instead */
+    _DevmemIntHeapRelease(psDevmemHeap);
+
+	OSFreeMem(psMapping);
+
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr)
+{
+    PVRSRV_ERROR eError;
+    DEVMEMINT_RESERVATION *psReservation;
+
+	/* allocate memory to record the reservation info */
+	psReservation = OSAllocMem(sizeof *psReservation);
+    if (psReservation == IMG_NULL)
+	{
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF ((PVR_DBG_ERROR, "DevmemIntReserveRange: Alloc failed"));
+        goto e0;
+	}
+
+    psReservation->sBase = sAllocationDevVAddr;
+    psReservation->uiLength = uiAllocationSize;
+
+
+    eError = MMU_Alloc (psDevmemHeap->psDevmemCtx->psMMUContext,
+                        uiAllocationSize,
+                        &uiAllocationSize,
+                        0, /* IMG_UINT32 uiProtFlags */
+                        0, /* alignment is n/a since we supply devvaddr */
+                        &sAllocationDevVAddr,
+                        GET_LOG2_PAGESIZE());
+    if (eError != PVRSRV_OK)
+    {
+        goto e1;
+    }
+
+    /* since we supplied the virt addr, MMU_Alloc shouldn't have
+       chosen a new one for us */
+    PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr);
+
+	_DevmemIntHeapAcquire(psDevmemHeap);
+
+    psReservation->psDevmemHeap = psDevmemHeap;
+    *ppsReservationPtr = psReservation;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e1:
+	OSFreeMem(psReservation);
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation)
+{
+
+    MMU_Free (psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+              psReservation->sBase,
+              psReservation->uiLength,
+              GET_LOG2_PAGESIZE());
+
+	_DevmemIntHeapRelease(psReservation->psDevmemHeap);
+	OSFreeMem(psReservation);
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntHeapDestroy(
+                     DEVMEMINT_HEAP *psDevmemHeap
+                     )
+{
+    if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "BUG!  %s called but has too many references (%d) "
+                 "which probably means allocations have been made from the heap and not freed",
+                 __FUNCTION__,
+                 OSAtomicRead(&psDevmemHeap->hRefCount)));
+
+        /*
+	 * Try again later when you've freed all the memory
+	 *
+	 * Note:
+	 * We don't expect the application to retry (after all this call would
+	 * succeed if the client had freed all the memory which it should have
+	 * done before calling this function). However, given there should be
+	 * an associated handle, when the handle base is destroyed it will free
+	 * any allocations leaked by the client and then it will retry this call,
+	 * which should then succeed.
+	 */
+        return PVRSRV_ERROR_RETRY;
+    }
+
+    PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1);
+
+	_DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __FUNCTION__, psDevmemHeap));
+	OSFreeMem(psDevmemHeap);
+
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxDestroy
+@Description    Destroy that created by DevmemIntCtxCreate
+@Input          psDevmemCtx   Device Memory context
+@Return         cannot fail.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxDestroy(
+                    DEVMEMINT_CTX *psDevmemCtx
+                    )
+{
+	/*
+		We can't determine if we should be freeing the context here
+		as it refcount!=1 could be due to either the fact that heap(s)
+		remain with allocations on them, or that this memory context
+		has been exported.
+		As the client couldn’t do anything useful with this information
+		anyway and the fact that the refcount will ensure we only
+		free the context when _all_ references have been released
+		don't bother checking and just return OK regardless.
+	*/
+	_DevmemIntCtxRelease(psDevmemCtx);
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxExport
+@Description    Exports a device memory context.
+@Return         valid Device Memory context handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxExport(DEVMEMINT_CTX *psDevmemCtx,
+                   DEVMEMINT_CTX_EXPORT **ppsExport)
+{
+	DEVMEMINT_CTX_EXPORT *psExport;
+
+	psExport = OSAllocMem(sizeof(*psExport));
+	if (psExport == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	_DevmemIntCtxAcquire(psDevmemCtx);
+	psExport->psDevmemCtx = psDevmemCtx;
+	
+	*ppsExport = psExport;
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxUnexport
+@Description    Unexport an exported a device memory context.
+@Return         None
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxUnexport(DEVMEMINT_CTX_EXPORT *psExport)
+{
+	_DevmemIntCtxRelease(psExport->psDevmemCtx);
+	OSFreeMem(psExport);
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemIntCtxImport
+@Description    Import an exported a device memory context.
+@Return         valid Device Memory context handle - Success
+                PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxImport(DEVMEMINT_CTX_EXPORT *psExport,
+				   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+				   IMG_HANDLE *hPrivData)
+{
+	DEVMEMINT_CTX *psDevmemCtx = psExport->psDevmemCtx;
+
+	_DevmemIntCtxAcquire(psDevmemCtx);
+
+	*ppsDevmemCtxPtr = psDevmemCtx;
+	*hPrivData = psDevmemCtx->hPrivData;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       DevmemSLCFlushInvalRequest
+@Description    Requests a SLC Flush and Invalidate
+@Input          psDeviceNode    Device node
+@Input          psPmr           PMR
+@Return         PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemSLCFlushInvalRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
+							PMR *psPmr)
+{
+
+	/* invoke SLC flush and invalidate request */
+	psDeviceNode->pfnSLCCacheInvalidateRequest(psDeviceNode, psPmr);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(DEVMEMINT_CTX *psDevMemContext,
+                                      IMG_DEV_VIRTADDR sDevAddr)
+{
+    return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+                               GET_LOG2_PAGESIZE(),
+                               sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+}
+
+#if defined (PDUMP)
+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext)
+{
+	IMG_UINT32 ui32MMUContextID;
+	MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID);
+	return ui32MMUContextID;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 ui32ArraySize,
+                                const IMG_CHAR *pszFilename,
+								IMG_UINT32 ui32FileOffset,
+								IMG_UINT32 ui32PDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_UINT32 uiPDumpMMUCtx;
+
+    PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
+
+	eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext,
+										&uiPDumpMMUCtx);
+
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    /*
+      The following SYSMEM refers to the 'MMU Context', hence it
+      should be the MMU context, not the PMR, that says what the PDump
+      MemSpace tag is?
+      From a PDump P.O.V. it doesn't matter which name space we use as long
+      as that MemSpace is used on the 'MMU Context' we're dumping from
+    */
+    eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+                            uiPDumpMMUCtx,
+                            sDevAddrStart,
+                            uiSize,
+                            pszFilename,
+                            ui32FileOffset,
+							ui32PDumpFlags);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext);
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntPDumpBitmap(PVRSRV_DEVICE_NODE *psDeviceNode,
+						IMG_CHAR *pszFileName,
+						IMG_UINT32 ui32FileOffset,
+						IMG_UINT32 ui32Width,
+						IMG_UINT32 ui32Height,
+						IMG_UINT32 ui32StrideInBytes,
+						IMG_DEV_VIRTADDR sDevBaseAddr,
+						DEVMEMINT_CTX *psDevMemContext,
+						IMG_UINT32 ui32Size,
+						PDUMP_PIXEL_FORMAT ePixelFormat,
+						IMG_UINT32 ui32AddrMode,
+						IMG_UINT32 ui32PDumpFlags)
+{
+	IMG_UINT32 ui32ContextID;
+	PVRSRV_ERROR eError;
+
+	eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DevmemIntPDumpBitmap: Failed to acquire MMU context"));
+		return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID;
+	}
+
+	eError = PDumpBitmapKM(psDeviceNode,
+							pszFileName,
+							ui32FileOffset,
+							ui32Width,
+							ui32Height,
+							ui32StrideInBytes,
+							sDevBaseAddr,
+							ui32ContextID,
+							ui32Size,
+							ePixelFormat,
+							ui32AddrMode,
+							ui32PDumpFlags);
+
+	/* Don't care about return value */
+	MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+	return eError;
+}
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/handle.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/handle.c
new file mode 100644
index 0000000..dfff220
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/handle.c
@@ -0,0 +1,2125 @@
+/*************************************************************************/ /*!
+@File
+@Title		Resource Handle Manager
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provide resource handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* See handle.h for a description of the handle API. */
+
+/*
+ * The implmentation supports movable handle structures, allowing the address
+ * of a handle structure to change without having to fix up pointers in
+ * any of the handle structures.  For example, the linked list mechanism
+ * used to link subhandles together uses handle array indices rather than
+ * pointers to the structures themselves.
+ */
+
+#include <stddef.h>
+
+#include "handle.h"
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#define	HANDLE_HASH_TAB_INIT_SIZE		32
+
+#define	SET_FLAG(v, f)				((void)((v) |= (f)))
+#define	CLEAR_FLAG(v, f)			((void)((v) &= (IMG_UINT)~(f)))
+#define	TEST_FLAG(v, f)				((IMG_BOOL)(((v) & (f)) != 0))
+
+#define	TEST_ALLOC_FLAG(psHandleData, f)	TEST_FLAG((psHandleData)->eFlag, f)
+
+#if !defined(ARRAY_SIZE)
+#define ARRAY_SIZE(a)				(sizeof(a) / sizeof((a)[0]))
+#endif
+
+
+/* Linked list structure. Used for both the list head and list items */
+typedef struct _HANDLE_LIST_
+{
+	IMG_HANDLE hPrev;
+	IMG_HANDLE hNext;
+	IMG_HANDLE hParent;
+} HANDLE_LIST;
+
+typedef struct _HANDLE_DATA_
+{
+	/* The handle that represents this structure */
+	IMG_HANDLE hHandle;
+
+	/* Handle type */
+	PVRSRV_HANDLE_TYPE eType;
+
+	/* Flags specified when the handle was allocated */
+	PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+	/* Pointer to the data that the handle represents */
+	void *pvData;
+
+	/*
+	 * Callback specified at handle allocation time to
+	 * release/destroy/free the data represented by the
+	 * handle when it's reference count reaches 0. This
+	 * should always be IMG_NULL for subhandles.
+	 */
+	PFN_HANDLE_RELEASE pfnReleaseData;
+
+	/* List head for subhandles of this handle */
+	HANDLE_LIST sChildren;
+
+	/* List entry for sibling subhandles */
+	HANDLE_LIST sSiblings;
+
+	/* Reference count, always 1 unless handle is shared */
+	IMG_UINT32 ui32Refs;
+} HANDLE_DATA;
+
+struct _HANDLE_BASE_
+{
+	/* Pointer to a handle implementations base structure */
+	HANDLE_IMPL_BASE *psImplBase;
+
+	/*
+	 * Pointer to handle hash table.
+	 * The hash table is used to do reverse lookups, converting data
+	 * pointers to handles.
+	 */
+	HASH_TABLE *psHashTab;
+};
+
+/*
+ * The key for the handle hash table is an array of three elements, the
+ * pointer to the resource, the resource type and the parent handle (or 
+ * IMG_NULL if there is no parent). The eHandKey enumeration gives the 
+ * array indices of the elements making up the key.
+ */
+enum eHandKey
+{
+	HAND_KEY_DATA = 0,
+	HAND_KEY_TYPE,
+	HAND_KEY_PARENT,
+	HAND_KEY_LEN		/* Must be last item in list */
+};
+
+/* HAND_KEY is the type of the hash table key */
+typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
+
+/* Stores a pointer to the function table of the handle back-end in use */
+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs = IMG_NULL;
+
+/* 
+ * Global lock added to avoid to call the handling functions
+ * only in a single threaded context.
+ */
+static POS_LOCK gHandleLock;
+static IMG_BOOL gbLockInitialised = IMG_FALSE;
+
+void LockHandle(void)
+{
+	OSLockAcquire(gHandleLock);
+}
+
+void UnlockHandle(void)
+{
+	OSLockRelease(gHandleLock);
+}
+
+/*
+ * Kernel handle base structure. This is used for handles that are not 
+ * allocated on behalf of a particular process.
+ */
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL;
+
+/*!
+******************************************************************************
+
+ @Function	GetHandleData
+
+ @Description	Get the handle data structure for a given handle
+
+ @Input		psBase - pointer to handle base structure
+		ppsHandleData - location to return pointer to handle data structure
+		hHandle - handle from client
+		eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the
+			handle type is not to be checked.
+
+ @Output	ppsHandleData - points to a pointer to the handle data structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleData)
+#endif
+static INLINE
+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase,
+			   HANDLE_DATA **ppsHandleData,
+			   IMG_HANDLE hHandle,
+			   PVRSRV_HANDLE_TYPE eType)
+{
+	HANDLE_DATA *psHandleData;
+	PVRSRV_ERROR eError;
+
+	eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase, 
+						  hHandle, 
+						  (void **)&psHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	/*
+	 * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function,
+	 * check handle is of the correct type.
+	 */
+	if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "GetHandleData: Handle type mismatch (%d != %d)",
+			 eType, psHandleData->eType));
+		return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+	}
+
+	/* Return the handle structure */
+	*ppsHandleData = psHandleData;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListInit
+
+ @Description	Initialise a linked list structure embedded in a handle
+		structure.
+
+ @Input		hHandle - handle containing the linked list structure
+		psList - pointer to linked list structure
+		hParent - parent handle or IMG_NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent)
+{
+	psList->hPrev = hHandle;
+	psList->hNext = hHandle;
+	psList->hParent = hParent;
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitParentList
+
+ @Description	Initialise the children list head in a handle structure.
+		The children are the subhandles of this handle.
+
+ @Input		psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+void InitParentList(HANDLE_DATA *psHandleData)
+{
+	IMG_HANDLE hParent = psHandleData->hHandle;
+
+	HandleListInit(hParent, &psHandleData->sChildren, hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitChildEntry
+
+ @Description	Initialise the child list entry in a handle structure.
+		The list entry is used to link together subhandles of
+		a given handle.
+
+ @Input		psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+void InitChildEntry(HANDLE_DATA *psHandleData)
+{
+	HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, IMG_NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListIsEmpty
+
+ @Description	Determine whether a given linked list is empty.
+
+ @Input		hHandle - handle containing the list head
+		psList - pointer to the list head
+
+ @Return	IMG_TRUE if the list is empty, IMG_FALSE if it isn't.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */
+{
+	IMG_BOOL bIsEmpty;
+
+	bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle);
+
+#ifdef	DEBUG
+	{
+		IMG_BOOL bIsEmpty2;
+
+		bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle);
+		PVR_ASSERT(bIsEmpty == bIsEmpty2);
+	}
+#endif
+
+	return bIsEmpty;
+}
+
+#ifdef DEBUG
+/*!
+******************************************************************************
+
+ @Function	NoChildren
+
+ @Description	Determine whether a handle has any subhandles
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData)
+{
+	PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle);
+
+	return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren);
+}
+
+/*!
+******************************************************************************
+
+ @Function	NoParent
+
+ @Description	Determine whether a handle is a subhandle
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(HANDLE_DATA *psHandleData)
+{
+	if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings))
+	{
+		PVR_ASSERT(psHandleData->sSiblings.hParent == IMG_NULL);
+
+		return IMG_TRUE;
+	}
+	else
+	{
+		PVR_ASSERT(psHandleData->sSiblings.hParent != IMG_NULL);
+	}
+	return IMG_FALSE;
+}
+#endif /*DEBUG*/
+
+/*!
+******************************************************************************
+
+ @Function	ParentHandle
+
+ @Description	Determine the parent of a handle
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	Parent handle, or IMG_NULL if the handle is not a subhandle.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData)
+{
+	return psHandleData->sSiblings.hParent;
+}
+
+/*
+ * GetHandleListFromHandleAndOffset is used to generate either a
+ * pointer to the subhandle list head, or a pointer to the linked list
+ * structure of an item on a subhandle list.
+ * The list head is itself on the list, but is at a different offset
+ * in the handle structure to the linked list structure for items on
+ * the list.  The two linked list structures are differentiated by
+ * the third parameter, containing the parent handle.  The parent field
+ * in the list head structure references the handle structure that contains
+ * it.  For items on the list, the parent field in the linked list structure
+ * references the parent handle, which will be different from the handle
+ * containing the linked list structure.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleListFromHandleAndOffset)
+#endif
+static INLINE
+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase, 
+					      IMG_HANDLE hEntry, 
+					      IMG_HANDLE hParent, 
+					      IMG_SIZE_T uiParentOffset, 
+					      IMG_SIZE_T uiEntryOffset)
+{
+	HANDLE_DATA *psHandleData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psBase != IMG_NULL);
+
+	eError = GetHandleData(psBase, 
+			       &psHandleData, 
+			       hEntry, 
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		return IMG_NULL;
+	}
+
+	if (hEntry == hParent)
+	{
+		return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiParentOffset);
+	}
+	else
+	{
+		return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiEntryOffset);
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListInsertBefore
+
+ @Description	Insert a handle before a handle currently on the list.
+
+ @Input		hEntry - handle to be inserted after
+		psEntry - pointer to handle structure to be inserted after
+		uiParentOffset - offset to list head struct in handle structure
+		hNewEntry - handle to be inserted
+		psNewEntry - pointer to handle structure of item to be inserted
+		uiEntryOffset - offset of list item struct in handle structure
+		hParent - parent handle of hNewEntry
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase,
+				    IMG_HANDLE hEntry,
+				    HANDLE_LIST *psEntry,
+				    IMG_SIZE_T uiParentOffset,
+				    IMG_HANDLE hNewEntry,
+				    HANDLE_LIST *psNewEntry,
+				    IMG_SIZE_T uiEntryOffset,
+				    IMG_HANDLE hParent)
+{
+	HANDLE_LIST *psPrevEntry;
+
+	if (psBase == IMG_NULL || psEntry == IMG_NULL || psNewEntry == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psPrevEntry = GetHandleListFromHandleAndOffset(psBase, 
+						       psEntry->hPrev, 
+						       hParent, 
+						       uiParentOffset, 
+						       uiEntryOffset);
+	if (psPrevEntry == IMG_NULL)
+	{
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+
+	PVR_ASSERT(psNewEntry->hParent == IMG_NULL);
+	PVR_ASSERT(hEntry == psPrevEntry->hNext);
+
+#if defined(DEBUG)
+	{
+		HANDLE_LIST *psParentList;
+
+		psParentList = GetHandleListFromHandleAndOffset(psBase, 
+								hParent, 
+								hParent, 
+								uiParentOffset, 
+								uiParentOffset);
+		PVR_ASSERT(psParentList && psParentList->hParent == hParent);
+	}
+#endif /* defined(DEBUG) */
+
+	psNewEntry->hPrev = psEntry->hPrev;
+	psEntry->hPrev = hNewEntry;
+
+	psNewEntry->hNext = hEntry;
+	psPrevEntry->hNext = hNewEntry;
+
+	psNewEntry->hParent = hParent;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	AdoptChild
+
+ @Description	Assign a subhandle to a handle
+
+ @Input		psParentData - pointer to handle structure of parent handle
+		psChildData - pointer to handle structure of child subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase,
+			HANDLE_DATA *psParentData,
+			HANDLE_DATA *psChildData)
+{
+	IMG_HANDLE hParent = psParentData->sChildren.hParent;
+
+	PVR_ASSERT(hParent == psParentData->hHandle);
+
+	return HandleListInsertBefore(psBase, 
+				      hParent, 
+				      &psParentData->sChildren, 
+				      offsetof(HANDLE_DATA, sChildren), 
+				      psChildData->hHandle, 
+				      &psChildData->sSiblings, 
+				      offsetof(HANDLE_DATA, sSiblings), 
+				      hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListRemove
+
+ @Description	Remove a handle from a list
+
+ @Input		hEntry - handle to be removed
+		psEntry - pointer to handle structure of item to be removed
+		uiEntryOffset - offset of list item struct in handle structure
+		uiParentOffset - offset to list head struct in handle structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE hEntry,
+			      HANDLE_LIST *psEntry,
+			      IMG_SIZE_T uiEntryOffset,
+			      IMG_SIZE_T uiParentOffset)
+{
+	if (psBase == IMG_NULL || psEntry == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (!HandleListIsEmpty(hEntry, psEntry))
+	{
+		HANDLE_LIST *psPrev;
+		HANDLE_LIST *psNext;
+
+		psPrev = GetHandleListFromHandleAndOffset(psBase, 
+							  psEntry->hPrev, 
+							  psEntry->hParent, 
+							  uiParentOffset, 
+							  uiEntryOffset);
+		if (psPrev == IMG_NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		psNext = GetHandleListFromHandleAndOffset(psBase, 
+							  psEntry->hNext, 
+							  psEntry->hParent, 
+							  uiParentOffset, 
+							  uiEntryOffset);
+		if (psNext == IMG_NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		/*
+		 * The list head is on the list, and we don't want to
+		 * remove it.
+		 */
+		PVR_ASSERT(psEntry->hParent != IMG_NULL);
+
+		psPrev->hNext = psEntry->hNext;
+		psNext->hPrev = psEntry->hPrev;
+
+		HandleListInit(hEntry, psEntry, IMG_NULL);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	UnlinkFromParent
+
+ @Description	Remove a subhandle from its parents list
+
+ @Input		psHandleData - pointer to handle data structure of child subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase,
+			      HANDLE_DATA *psHandleData)
+{
+	return HandleListRemove(psBase, 
+				psHandleData->hHandle, 
+				&psHandleData->sSiblings, 
+				offsetof(HANDLE_DATA, sSiblings), 
+				offsetof(HANDLE_DATA, sChildren));
+}
+
+/*!
+******************************************************************************
+
+ @Function	HandleListIterate
+
+ @Description	Iterate over the items in a list
+
+ @Input		psHead - pointer to list head
+		uiParentOffset - offset to list head struct in handle structure
+		uiEntryOffset - offset of list item struct in handle structure
+		pfnIterFunc - function to be called for each handle in the list
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase,
+			       HANDLE_LIST *psHead,
+			       IMG_SIZE_T uiParentOffset,
+			       IMG_SIZE_T uiEntryOffset,
+			       PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+	IMG_HANDLE hHandle = psHead->hNext;
+	IMG_HANDLE hParent = psHead->hParent;
+	IMG_HANDLE hNext;
+
+	PVR_ASSERT(psHead->hParent != IMG_NULL);
+
+	/*
+ 	 * Follow the next chain from the list head until we reach
+ 	 * the list head again, which signifies the end of the list.
+ 	 */
+	while (hHandle != hParent)
+	{
+		HANDLE_LIST *psEntry;
+		PVRSRV_ERROR eError;
+
+		psEntry = GetHandleListFromHandleAndOffset(psBase, 
+							   hHandle, 
+							   hParent, 
+							   uiParentOffset, 
+							   uiEntryOffset);
+		if (psEntry == IMG_NULL)
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+
+		PVR_ASSERT(psEntry->hParent == psHead->hParent);
+
+		/*
+		 * Get the next index now, in case the list item is
+		 * modified by the iteration function.
+		 */
+		hNext = psEntry->hNext;
+
+		eError = (*pfnIterFunc)(psBase, hHandle);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		hHandle = hNext;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	IterateOverChildren
+
+ @Description	Iterate over the subhandles of a parent handle
+
+ @Input		psParentData - pointer to parent handle structure
+		pfnIterFunc - function to be called for each subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase,
+				 HANDLE_DATA *psParentData,
+				 PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+	 return HandleListIterate(psBase,
+				  &psParentData->sChildren,
+				  offsetof(HANDLE_DATA, sChildren),
+				  offsetof(HANDLE_DATA, sSiblings),
+				  pfnIterFunc);
+}
+
+/*!
+******************************************************************************
+
+ @Function	ParentIfPrivate
+
+ @Description	Return the parent handle if the handle was allocated
+		with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return
+		IMG_NULL
+
+ @Input		psHandleData - pointer to handle data structure
+
+ @Return	Parent handle, or IMG_NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData)
+{
+	return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+			ParentHandle(psHandleData) : IMG_NULL;
+}
+
+/*!
+******************************************************************************
+
+ @Function	InitKey
+
+ @Description	Initialise a hash table key for the current process
+
+ @Input		psBase - pointer to handle base structure
+		aKey - pointer to key
+		pvData - pointer to the resource the handle represents
+		eType - type of resource
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+void InitKey(HAND_KEY aKey,
+	     PVRSRV_HANDLE_BASE *psBase,
+	     void *pvData,
+	     PVRSRV_HANDLE_TYPE eType,
+	     IMG_HANDLE hParent)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+
+	aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
+	aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
+	aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle);
+
+/*!
+******************************************************************************
+
+ @Function	FreeHandle
+
+ @Description	Free a handle data structure.
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle to be freed
+		eType - Type of the handle to be freed
+		ppvData - Location for data associated with the freed handle
+
+ @Output 		ppvData - Points to data that was associated with the freed handle
+
+ @Return	PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE hHandle,
+			       PVRSRV_HANDLE_TYPE eType,
+			       void **ppvData)
+{
+	HANDLE_DATA *psHandleData = IMG_NULL;
+	HANDLE_DATA *psReleasedHandleData;
+	PVRSRV_ERROR eError;
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PVR_ASSERT(psHandleData->ui32Refs > 0);
+
+	/* Call the release data callback for each reference on the handle */
+	if (psHandleData->pfnReleaseData != IMG_NULL)
+	{
+		eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+		if (eError == PVRSRV_ERROR_RETRY)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,
+				 "FreeHandle: "
+				 "Got retry while calling release data callback for %p (type = %d)",
+				 hHandle,
+				 (IMG_UINT32)psHandleData->eType));
+
+			return eError;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	psHandleData->ui32Refs--;
+	if (psHandleData->ui32Refs > 0)
+	{
+		/* Reference count still positive, only possible for shared handles */
+		PVR_ASSERT(TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_SHARED));
+		return PVRSRV_OK;
+	}
+
+	if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+		IMG_HANDLE hRemovedHandle;
+
+		InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData));
+
+		hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey);
+
+		PVR_ASSERT(hRemovedHandle != IMG_NULL);
+		PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+		PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+	}
+
+	eError = UnlinkFromParent(psBase, psHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "FreeHandle: Error whilst unlinking from parent handle (%s)", 
+			 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* Free children */
+	eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "FreeHandle: Error whilst freeing subhandles (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase,
+						  psHandleData->hHandle,
+						  (void **)&psReleasedHandleData);
+	if (eError == PVRSRV_OK)
+	{
+		PVR_ASSERT(psReleasedHandleData == psHandleData);
+	}
+
+	if (ppvData)
+	{
+		*ppvData = psHandleData->pvData;
+	}
+
+	OSFreeMem(psHandleData);
+
+	return eError;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle)
+{
+	return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, IMG_NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function	FindHandle
+
+ @Description	Find handle corresponding to a resource pointer
+
+ @Input		psBase - pointer to handle base structure
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Return	the handle, or IMG_NULL if not found
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase,
+		      void *pvData,
+		      PVRSRV_HANDLE_TYPE eType,
+		      IMG_HANDLE hParent)
+{
+	HAND_KEY aKey;
+
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+
+	InitKey(aKey, psBase, pvData, eType, hParent);
+
+	return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+}
+
+/*!
+******************************************************************************
+
+ @Function	AllocHandle
+
+ @Description	Allocate a new handle
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		hParent - parent handle or IMG_NULL
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase,
+				IMG_HANDLE *phHandle,
+				void *pvData,
+				PVRSRV_HANDLE_TYPE eType,
+				PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				IMG_HANDLE hParent,
+				PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	HANDLE_DATA *psNewHandleData;
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(psBase != IMG_NULL && psBase->psHashTab != IMG_NULL);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		/* Handle must not already exist */
+		PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL);
+	}
+
+	psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData));
+	if (psNewHandleData == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't allocate handle data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, psNewHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Failed to acquire a handle"));
+		goto ErrorFreeHandleData;
+	}
+
+	/*
+	 * If a data pointer can be associated with multiple handles, we
+	 * don't put the handle in the hash table, as the data pointer
+	 * may not map to a unique handle
+	 */
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+
+		/* Initialise hash key */
+		InitKey(aKey, psBase, pvData, eType, hParent);
+
+		/* Put the new handle in the hash table */
+		if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
+			eError = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+			goto ErrorReleaseHandle;
+		}
+	}
+
+	psNewHandleData->hHandle = hHandle;
+	psNewHandleData->eType = eType;
+	psNewHandleData->eFlag = eFlag;
+	psNewHandleData->pvData = pvData;
+	psNewHandleData->pfnReleaseData = pfnReleaseData;
+	psNewHandleData->ui32Refs = 1;
+
+	InitParentList(psNewHandleData);
+#if defined(DEBUG)
+	PVR_ASSERT(NoChildren(psNewHandleData));
+#endif
+
+	InitChildEntry(psNewHandleData);
+#if defined(DEBUG)
+	PVR_ASSERT(NoParent(psNewHandleData));
+#endif
+
+	/* Return the new handle to the client */
+	*phHandle = psNewHandleData->hHandle;
+
+	return PVRSRV_OK;
+
+ErrorReleaseHandle:
+	(void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, IMG_NULL);
+
+ErrorFreeHandleData:
+	OSFreeMem(psNewHandleData);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandle
+
+ @Description	Allocate a handle
+
+ @Input		phHandle - location for new handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+		pfnReleaseData - Function to release resource at handle release
+		                 time
+
+ @Output	phHandle - points to new handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+			       IMG_HANDLE *phHandle,
+			       void *pvData,
+			       PVRSRV_HANDLE_TYPE eType,
+			       PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+			       PFN_HANDLE_RELEASE pfnReleaseData)
+{
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	*phHandle = IMG_NULL;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	if (pfnReleaseData == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing release function"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		/* See if there is already a handle for this data pointer */
+		hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
+		if (hHandle != IMG_NULL)
+		{
+			HANDLE_DATA *psHandleData = IMG_NULL;
+
+			eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVAllocHandle: Lookup of existing handle failed (%s)",
+					 PVRSRVGetErrorStringKM(eError)));
+				goto ExitUnlock;
+			}
+
+			/*
+			 * If the client is willing to share a handle, and the
+			 * existing handle is marked as shareable, return the
+			 * existing handle.
+			 */
+			if (TEST_FLAG(psHandleData->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
+			{
+				/* The same release function should be used for shared handles */
+				PVR_ASSERT(psHandleData->pfnReleaseData == pfnReleaseData);
+
+				psHandleData->ui32Refs++;
+				*phHandle = hHandle;
+
+				eError = PVRSRV_OK;
+				goto ExitUnlock;
+			}
+
+			eError = PVRSRV_ERROR_HANDLE_NOT_SHAREABLE;
+			goto ExitUnlock;
+		}
+	}
+
+	eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL, pfnReleaseData);
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocSubHandle
+
+ @Description	Allocate a subhandle
+
+ @Input		phHandle - location for new subhandle
+		pvData - pointer to resource to be associated with the subhandle
+		eType - the type of resource
+		hParent - parent handle
+
+ @Output	phHandle - points to new subhandle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+				  IMG_HANDLE *phHandle,
+				  void *pvData,
+				  PVRSRV_HANDLE_TYPE eType,
+				  PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+				  IMG_HANDLE hParent)
+{
+	HANDLE_DATA *psPHandleData = IMG_NULL;
+	HANDLE_DATA *psCHandleData = IMG_NULL;
+	IMG_HANDLE hParentKey;
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	*phHandle = IMG_NULL;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : IMG_NULL;
+
+	/* Lookup the parent handle */
+	eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+		goto ExitUnlock;
+	}
+
+	if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		/* See if there is already a handle for this data pointer */
+		hHandle = FindHandle(psBase, pvData, eType, hParentKey);
+		if (hHandle != IMG_NULL)
+		{
+			eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
+				goto ExitUnlock;
+			}
+
+			PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(psCHandleData) == hParent);
+
+			/*
+			 * If the client is willing to share a handle, the
+			 * existing handle is marked as shareable, and the
+			 * existing handle has the same parent, return the
+			 * existing handle.
+			 */
+			if (TEST_FLAG(psCHandleData->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && 
+			    ParentHandle(psCHandleData) == hParent)
+			{
+				psCHandleData->ui32Refs++;
+				*phHandle = hHandle;
+
+				eError = PVRSRV_OK;
+				goto ExitUnlock;
+			}
+
+			eError = PVRSRV_ERROR_HANDLE_NOT_SHAREABLE;
+			goto ExitUnlock;
+		}
+	}
+
+	eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, IMG_NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto ExitUnlock;
+	}
+
+	eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+		/* If we were able to allocate the handle then there should be no reason why we 
+		   can't also get it's handle structure. Otherwise something has gone badly wrong. */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		goto ExitUnlock;
+	}
+
+	/*
+	 * Get the parent handle structure again, in case the handle
+	 * structure has moved (depending on the implementation
+	 * of AllocHandle).
+	 */
+	eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+		(void)FreeHandle(psBase, hHandle, eType, IMG_NULL);
+		goto ExitUnlock;
+	}
+
+	eError = AdoptChild(psBase, psPHandleData, psCHandleData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Parent handle failed to adopt subhandle"));
+
+		(void)FreeHandle(psBase, hHandle, eType, IMG_NULL);
+		goto ExitUnlock;
+	}
+
+	*phHandle = hHandle;
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFindHandle
+
+ @Description	Find handle corresponding to a resource pointer
+
+ @Input		phHandle - location for returned handle
+		pvData - pointer to resource to be associated with the handle
+		eType - the type of resource
+
+ @Output	phHandle - points to handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+			      IMG_HANDLE *phHandle,
+			      void *pvData,
+			      PVRSRV_HANDLE_TYPE eType)
+{
+	IMG_HANDLE hHandle;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVFindHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	/* See if there is a handle for this data pointer */
+	hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
+	if (hHandle == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+		goto ExitUnlock;
+	}
+
+	*phHandle = hHandle;
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupHandle
+
+ @Description	Lookup the data pointer corresponding to a handle
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+				IMG_PVOID *ppvData,
+				IMG_HANDLE hHandle,
+				PVRSRV_HANDLE_TYPE eType)
+{
+	HANDLE_DATA *psHandleData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVLookupHandle: Error looking up handle (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		OSDumpStack();
+		goto ExitUnlock;
+	}
+
+	*ppvData = psHandleData->pvData;
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVLookupSubHandle
+
+ @Description	Lookup the data pointer corresponding to a subhandle
+
+ @Input		ppvData - location to return data pointer
+		hHandle - handle from client
+		eType - handle type
+		hAncestor - ancestor handle
+
+ @Output	ppvData - points to the data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+				   IMG_PVOID *ppvData,
+				   IMG_HANDLE hHandle,
+				   PVRSRV_HANDLE_TYPE eType,
+				   IMG_HANDLE hAncestor)
+{
+	HANDLE_DATA *psPHandleData = IMG_NULL;
+	HANDLE_DATA *psCHandleData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVLookupSubHandle: Error looking up subhandle (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		OSDumpStack();
+		goto ExitUnlock;
+	}
+
+	/* Look for hAncestor among the handle's ancestors */
+	for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; )
+	{
+		eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
+			eError = PVRSRV_ERROR_INVALID_SUBHANDLE;
+			goto ExitUnlock;
+		}
+	}
+
+	*ppvData = psCHandleData->pvData;
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetParentHandle
+
+ @Description	Lookup the parent of a handle
+
+ @Input		phParent - location for returning parent handle
+		hHandle - handle for which the parent handle is required
+		eType - handle type
+		hParent - parent handle
+
+ @Output	*phParent - parent handle, or IMG_NULL if there is no parent
+
+ @Return	Error code or PVRSRV_OK.  Note that not having a parent is
+		not regarded as an error.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+				   IMG_HANDLE *phParent,
+				   IMG_HANDLE hHandle,
+				   PVRSRV_HANDLE_TYPE eType)
+{
+	HANDLE_DATA *psHandleData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVGetParentHandle: Error looking up subhandle (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		OSDumpStack();
+		goto ExitUnlock;
+	}
+
+	*phParent = ParentHandle(psHandleData);
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVReleaseHandle
+
+ @Description	Release a handle that is no longer needed
+
+ @Input 	hHandle - handle from client
+		eType - handle type
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+				 IMG_HANDLE hHandle,
+				 PVRSRV_HANDLE_TYPE eType)
+{
+	PVRSRV_ERROR eError;
+
+	/* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+	PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = FreeHandle(psBase, hHandle, eType, IMG_NULL);
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPurgeHandles
+
+ @Description	Purge handles for a given handle base
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Missing handle base"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ExitUnlock;
+	}
+
+	eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase);
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAllocHandleBase
+
+ @Description	Allocate a handle base structure for a process
+
+ @Input 	ppsBase - pointer to handle base structure pointer
+
+ @Output	ppsBase - points to handle base structure pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	PVRSRV_ERROR eError;
+
+	if (gpsHandleFuncs == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Handle management not initialised"));
+		return PVRSRV_ERROR_NOT_READY;
+	}
+
+	LockHandle();
+
+	if (ppsBase == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrorUnlock;
+	}
+
+	psBase = OSAllocZMem(sizeof(*psBase));
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorUnlock;
+	}
+
+	eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreeHandleBase;
+	}
+
+	psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, 
+						 sizeof(HAND_KEY), 
+						 HASH_Func_Default, 
+						 HASH_Key_Comp_Default);
+	if (psBase->psHashTab == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table"));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+		goto ErrorDestroyHandleBase;
+	}
+
+	*ppsBase = psBase;
+
+	UnlockHandle();
+
+	return PVRSRV_OK;
+
+ErrorDestroyHandleBase:
+	(void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+
+ErrorFreeHandleBase:
+	OSFreeMem(psBase);
+
+ErrorUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+#if defined(DEBUG)
+typedef struct _COUNT_HANDLE_DATA_
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	IMG_UINT32 uiHandleDataCount;
+} COUNT_HANDLE_DATA;
+
+/* Used to count the number of handles that have data associated with them */
+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+	COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psHandleData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psData == IMG_NULL ||
+	    psData->psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Missing free data"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psData->psBase, 
+			       &psHandleData, 
+			       hHandle, 
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Couldn't get handle data for handle"));
+		return eError;
+	}
+
+	if (psHandleData != IMG_NULL)
+	{
+		psData->uiHandleDataCount++;
+	}
+
+	return PVRSRV_OK;
+}
+#endif /* defined(DEBUG) */
+
+typedef struct FREE_HANDLE_DATA_TAG
+{
+	PVRSRV_HANDLE_BASE *psBase;
+	PVRSRV_HANDLE_TYPE eHandleFreeType;
+	/* timing data (ns) to release bridge lock upon the deadline */
+	IMG_UINT64 ui64TimeStart;
+	IMG_UINT64 ui64MaxBridgeTime;
+} FREE_HANDLE_DATA;
+
+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime)
+{
+	IMG_UINT64 ui64Diff;
+	IMG_UINT64 ui64Now = OSClockns64();
+
+	if(ui64Now >= ui64TimeStart)
+	{
+		ui64Diff = ui64Now - ui64TimeStart;
+	}
+	else
+	{
+		/* time has wrapped around */
+		ui64Diff = (0xFFFFFFFFFFFFFFFF - ui64TimeStart) + ui64Now;
+	}
+
+	return ui64Diff >= ui64MaxBridgeTime;
+}
+
+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+	FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData;
+	HANDLE_DATA *psHandleData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	if (psData == IMG_NULL ||
+	    psData->psBase == IMG_NULL ||
+	    psData->eHandleFreeType == PVRSRV_HANDLE_TYPE_NONE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Missing free data"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = GetHandleData(psData->psBase, 
+			       &psHandleData, 
+			       hHandle, 
+			       PVRSRV_HANDLE_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Couldn't get handle data for handle"));
+		return eError;
+	}
+
+	if (psHandleData == IMG_NULL || psHandleData->eType != psData->eHandleFreeType)
+	{
+		return PVRSRV_OK;
+	}
+
+	PVR_ASSERT(psHandleData->ui32Refs > 0);
+
+	while (psHandleData->ui32Refs != 0)
+	{
+		if (psHandleData->pfnReleaseData != IMG_NULL)
+		{
+			eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+			if (eError == PVRSRV_ERROR_RETRY)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE,
+					 "FreeHandleDataWrapper: "
+					 "Got retry while calling release data callback for %p (type = %d)",
+					 hHandle,
+					 (IMG_UINT32)psHandleData->eType));
+
+				return eError;
+			}
+			else if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+		}
+
+		psHandleData->ui32Refs--;
+	}
+
+	if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+	{
+		HAND_KEY aKey;
+		IMG_HANDLE hRemovedHandle;
+
+		InitKey(aKey,
+			psData->psBase,
+			psHandleData->pvData,
+			psHandleData->eType,
+			ParentIfPrivate(psHandleData));
+
+		hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey);
+
+		PVR_ASSERT(hRemovedHandle != IMG_NULL);
+		PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+		PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+	}
+
+	eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, IMG_NULL);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	OSFreeMem(psHandleData);
+
+	/* If we reach the end of the time slice release we can release the global
+	 * lock, invoke the scheduler and reacquire the lock */
+	if((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime))
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock timeout (timeout: %llu)",
+								            psData->ui64MaxBridgeTime));
+		UnlockHandle();
+		OSReleaseBridgeLock();
+		/* Invoke the scheduler to check if other processes are waiting for the lock */
+		OSReleaseThreadQuanta();
+		OSAcquireBridgeLock();
+		LockHandle();
+		/* Set again lock timeout and reset the counter */
+		psData->ui64TimeStart = OSClockns64();
+		PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock acquired again"));
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] =
+{
+	PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+	PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+	PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+	PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+	PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_RPM_CONTEXT_CLEANUP,
+	PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+	PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RI_HANDLE,
+	PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+	PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+	PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+	PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+	PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+	PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+	PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+	PVRSRV_HANDLE_TYPE_DC_BUFFER,
+	PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_DC_DEVICE,
+	PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+	PVRSRV_HANDLE_TYPE_DEV_NODE,
+	PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+};
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFreeHandleBase
+
+ @Description	Free a handle base structure
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime)
+{
+#if defined(DEBUG)
+	COUNT_HANDLE_DATA sCountData = { 0 };
+#endif
+	FREE_HANDLE_DATA sHandleData = { 0 };
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsHandleFuncs);
+
+	LockHandle();
+
+	sHandleData.psBase = psBase;
+	sHandleData.ui64TimeStart = OSClockns64();
+	sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime;
+
+	for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++)
+	{
+		sHandleData.eHandleFreeType = g_aeOrderedFreeList[i];
+
+		/* Make sure all handles have been freed before destroying the handle base */
+		eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+							       &FreeHandleDataWrapper,
+							       (void *)&sHandleData);
+		if (eError != PVRSRV_OK)
+		{
+			goto ExitUnlock;
+		}
+	}
+
+#if defined(DEBUG)
+	/*
+	 * As we're freeing handles based on type, make sure all
+	 * handles have actually had their data freed to avoid
+	 * resources being leaked
+	 */
+	sCountData.psBase = psBase;
+
+	eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+						       &CountHandleDataWrapper,
+						       (void *)&sCountData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVFreeHandleBase: Failed to perform handle count (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ExitUnlock;
+	}
+
+	if (sCountData.uiHandleDataCount != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVFreeHandleBase: Found %u handles that need freeing for handle base %p",
+			 sCountData.uiHandleDataCount,
+			 psBase));
+		PVR_ASSERT(0);
+	}
+#endif /* defined(DEBUG) */
+
+	if (psBase->psHashTab != IMG_NULL)
+	{
+		HASH_Delete(psBase->psHashTab);
+	}
+
+	eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		goto ExitUnlock;
+	}
+
+	OSFreeMem(psBase);
+
+	eError = PVRSRV_OK;
+
+ExitUnlock:
+	UnlockHandle();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVHandleInit
+
+ @Description	Initialise handle management
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleInit(void)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
+	PVR_ASSERT(gpsHandleFuncs == IMG_NULL);
+	PVR_ASSERT(!gbLockInitialised);
+
+	eError = OSLockCreate(&gHandleLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: Creation of handle global lock failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+	gbLockInitialised = IMG_TRUE;
+
+	eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: PVRSRVHandleGetFuncTable failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%s)",
+			 PVRSRVGetErrorStringKM(eError)));
+		goto ErrorHandleDeinit;
+	}
+
+	return PVRSRV_OK;
+
+ErrorHandleDeinit:
+	(void) PVRSRVHandleDeInit();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVHandleDeInit
+
+ @Description	De-initialise handle management
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleDeInit(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (gpsHandleFuncs != IMG_NULL)
+	{
+		if (gpsKernelHandleBase != IMG_NULL)
+		{
+			eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */);
+			if (eError == PVRSRV_OK)
+			{
+				gpsKernelHandleBase = IMG_NULL;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "PVRSRVHandleDeInit: FreeHandleBase failed (%s)",
+					 PVRSRVGetErrorStringKM(eError)));
+			}
+		}
+
+		if (eError == PVRSRV_OK)
+		{
+			gpsHandleFuncs = IMG_NULL;
+		}
+	}
+	else
+	{
+		/* If we don't have a handle function table we shouldn't have a handle base either */
+		PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
+	}
+
+	if (gbLockInitialised)
+	{
+		OSLockDestroy(gHandleLock);
+		gbLockInitialised = IMG_FALSE;
+	}
+
+	return eError;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/lists.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/lists.c
new file mode 100644
index 0000000..b106d16
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/lists.c
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of the list iterators for types shared among
+                more than one file in the services code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "lists.h"
+
+/*===================================================================
+  LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just
+  once are implemented locally).
+  ===================================================================*/
+
+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
+
+IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV)
+IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV)
+
+
+/*===================================================================
+  BELOW ARE IMPLEMENTED SOME COMMON CALLBACKS USED IN DIFFERENT FILES
+  ===================================================================*/
+
+
+/*************************************************************************/ /*!
+@Function       MatchDeviceKM_AnyVaCb
+@Description    Matchs a device node with an id and optionally a class.
+@Input          psDeviceNode  Pointer to the device node.
+@Input          va            Variable argument list, with te following values:
+                                ui32DevIndex  Index of de device to match.
+                                bIgnoreClass  Flag indicating if there's
+                                              no need to check the device class.
+                                eDevClass     Device class, ONLY present if
+                                              bIgnoreClass was IMG_FALSE.
+@Return         The pointer to the device node if it matchs, IMG_NULL
+                otherwise.
+*/ /**************************************************************************/
+IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va)
+{
+	IMG_UINT32 ui32DevIndex;
+	IMG_BOOL bIgnoreClass;
+	PVRSRV_DEVICE_CLASS eDevClass;
+
+	ui32DevIndex = va_arg(va, IMG_UINT32);
+	bIgnoreClass = va_arg(va, IMG_BOOL);
+	if (!bIgnoreClass)
+	{
+		eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS);
+	}
+	else
+	{
+		/*this value will never be used, since the short circuit evaluation
+		of the first clause will stop because bIgnoreClass is true, but the
+		compiler complains if it's not initialized.*/
+		eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32;
+	}
+
+	if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) &&
+		psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
+	{
+		return psDeviceNode;
+	}
+	return IMG_NULL;
+}
+
+/*!
+******************************************************************************
+@Function	MatchPowerDeviceIndex_AnyVaCb
+@Description    Matches a power device with its device index.
+@Input          va               Variable argument list
+                ui32DeviceIndex  Device index
+@Return         The pointer to the device it matched, IMG_NULL otherwise.
+******************************************************************************/
+IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va)
+{
+	IMG_UINT32 ui32DeviceIndex;
+
+	ui32DeviceIndex = va_arg(va, IMG_UINT32);
+
+	if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex)
+	{
+		return psPowerDev;
+	}
+	else
+	{
+		return IMG_NULL;
+	}
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/mmu_common.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/mmu_common.c
new file mode 100644
index 0000000..5c83e35
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/mmu_common.c
@@ -0,0 +1,3199 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "devicemem_server_utils.h"
+
+/* Our own interface */
+#include "mmu_common.h"
+
+/*
+Interfaces to other modules:
+
+Let's keep this graph up-to-date:
+
+   +-----------+
+   | devicemem |
+   +-----------+
+         |
+   +============+
+   | mmu_common |
+   +============+
+         |
+         +-----------------+
+         |                 |
+    +---------+      +----------+
+    |   pmr   |      |  device  |
+    +---------+      +----------+
+*/
+
+#include "img_types.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#if defined(PDUMP)
+#include "pdump_km.h"
+#endif
+#include "pmr.h"
+/* include/ */
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#endif
+
+// #define MMU_OBJECT_REFCOUNT_DEBUGING 1
+#if defined (MMU_OBJECT_REFCOUNT_DEBUGING)
+#define MMU_OBJ_DBG(x)	PVR_DPF(x);
+#else
+#define MMU_OBJ_DBG(x)
+#endif
+
+typedef IMG_UINT32 MMU_FLAGS_T;
+
+typedef enum _MMU_MOD_
+{
+	MMU_MOD_UNKNOWN = 0,
+	MMU_MOD_MAP,
+	MMU_MOD_UNMAP,
+} MMU_MOD;
+
+#if defined(SUPPORT_MMU_MODIFICATION_LOGGING)
+#define MMU_MODIFICATION_HISTORY 10
+#define MMU_MODIFICATION_MAX_ENTRIES 1024
+typedef struct _MMU_MOD_INFO_
+{
+	IMG_UINT32 ui32NextEntry;
+	MMU_MOD eModificationHistory[MMU_MODIFICATION_HISTORY];
+	IMG_UINT64 ui64Value[MMU_MODIFICATION_HISTORY];
+} MMU_MOD_INFO;
+#endif
+/*!
+	All physical allocations and frees are relative to this context, so
+	we would get all the allocations of PCs, PDs, and PTs from the same
+	RA.
+
+	We have one per MMU context in case we have mixed UMA/LMA devices
+	within the same system.
+*/
+typedef struct _MMU_PHYSMEM_CONTEXT_
+{
+	/*! Parent device node */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	/*! Refcount so we know when to free up the arena */
+	IMG_UINT32 uiNumAllocations;
+
+	/*! Arena from which physical memory is derived */
+	RA_ARENA *psPhysMemRA;
+	/*! Arena name */
+	IMG_CHAR *pszPhysMemRAName;
+	/*! Size of arena name string */
+	IMG_SIZE_T uiPhysMemRANameAllocSize;
+
+} MMU_PHYSMEM_CONTEXT;
+
+/*!
+	Mapping structure for MMU memory allocation
+*/
+typedef struct _MMU_MEMORY_MAPPING_
+{
+	/*! Physmem context to allocate from */
+	MMU_PHYSMEM_CONTEXT		*psContext;
+	/*! OS/system Handle for this allocation */
+	Px_HANDLE				sMemHandle;
+	/*! CPU virtual address of this allocation */
+	IMG_VOID				*pvCpuVAddr;
+	/*! Device physical address of this allocation */
+	IMG_DEV_PHYADDR			sDevPAddr;
+	/*! Size of this allocation */
+	IMG_SIZE_T				uiSize;
+	/*! Number of current mappings of this allocation */
+	IMG_UINT32				uiCpuVAddrRefCount;
+} MMU_MEMORY_MAPPING;
+
+/*!
+	Memory descriptor for MMU objects. There can be more then one memory
+	descriptor per MMU memory allocation.
+*/
+typedef struct _MMU_MEMORY_DESC_
+{
+	/* NB: bValid is set if this descriptor describes physical
+	   memory.  This allows "empty" descriptors to exist, such that we
+	   can allocate them in batches.  */
+	/*! Does this MMU object have physical backing */
+	IMG_BOOL				bValid;
+	/*! Device Physical address of physical backing */
+	IMG_DEV_PHYADDR			sDevPAddr;
+	/*! CPU virtual address of physical backing */
+	IMG_VOID				*pvCpuVAddr;
+	/*! Mapping data for this MMU object */
+	MMU_MEMORY_MAPPING		*psMapping;
+} MMU_MEMORY_DESC;
+
+/*!
+	MMU levelx structure. This is generic and is used
+	for all levels (PC, PD, PT).
+*/
+typedef struct _MMU_Levelx_INFO_
+{
+	/*! The Number of entries in this level */
+	IMG_UINT32 ui32NumOfEntries;
+
+	/*! Number of times this level has been reference. Note: For Level1 (PTE)
+	    we still take/drop the reference when setting up the page tables rather
+	    then at map/unmap time as this simplifies things */
+	IMG_UINT32 ui32RefCount;
+
+	/*! MemDesc for this level */
+	MMU_MEMORY_DESC sMemDesc;
+
+#if defined(SUPPORT_MMU_MODIFICATION_LOGGING)
+	MMU_MOD_INFO asModifications[MMU_MODIFICATION_MAX_ENTRIES];
+#endif
+
+	/*! Array of infos for the next level. Must be last member in structure */
+	struct _MMU_Levelx_INFO_ *apsNextLevel[1];
+} MMU_Levelx_INFO;
+
+/*!
+	MMU context structure
+*/
+struct _MMU_CONTEXT_
+{
+	/*! Parent device node */
+	PVRSRV_DEVICE_NODE *psDevNode;
+
+	MMU_DEVICEATTRIBS *psDevAttrs;
+
+	/*! For allocation and deallocation of the physical memory where
+	    the pagetables live */
+	struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx;
+
+#if defined(PDUMP)
+	/*! PDump context ID (required for PDump commands with virtual addresses) */
+	IMG_UINT32 uiPDumpContextID;
+
+	/*! The refcount of the PDump context ID */
+	IMG_UINT32 ui32PDumpContextIDRefCount;
+#endif
+
+	/*! Data that is passed back during device specific callbacks */
+	IMG_HANDLE hDevData;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+    IMG_UINT32  ui32OSid;
+	IMG_UINT32	ui32OSidReg;
+#endif
+
+	/*! Base level info structure. Must be last member in structure */
+	MMU_Levelx_INFO sBaseLevelInfo;
+};
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift)	((Mask >> Shift) + 1)
+
+#define MMU_BAD_PHYS_ADDR 0xbadbad00badULL
+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR};
+
+#if defined(DEBUG)
+static IMG_UINT32 FloorLog2(IMG_UINT64 ui64Val)
+{
+	IMG_UINT32 ui32Ret = 0;
+
+	while (ui64Val >>= 1)
+	{
+		ui32Ret++;
+	}
+
+	return ui32Ret;
+}
+#endif
+
+/*****************************************************************************
+ *                          Utility functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page catalogue index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page catalogue index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+    IMG_UINT32 ui32RetVal;
+
+    sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+        sTmpDevVAddr.uiAddr --;
+    }
+    ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask)
+        >> psDevVAddrConfig->uiPCIndexShift);
+
+    if (bRoundUp)
+    {
+        ui32RetVal ++;
+    }
+
+    return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page directory index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page directory index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+    IMG_UINT32 ui32RetVal;
+
+    sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+        sTmpDevVAddr.uiAddr --;
+    }
+    ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask)
+        >> psDevVAddrConfig->uiPDIndexShift);
+
+    if (bRoundUp)
+    {
+        ui32RetVal ++;
+    }
+        
+    return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _CalcPCEIdx
+
+@Description    Calculate the page entry index
+
+@Input          sDevVAddr           Device virtual address
+
+@Input          psDevVAddrConfig    Configuration of the virtual address
+
+@Input          bRoundUp            Round up the index
+
+@Return         The page entry index
+*/
+/*****************************************************************************/
+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+                              const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+                              IMG_BOOL bRoundUp)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+    IMG_UINT32 ui32RetVal;
+
+    sTmpDevVAddr = sDevVAddr;
+
+	if (bRoundUp)
+	{
+        sTmpDevVAddr.uiAddr --;
+    }
+    ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask)
+        >> psDevVAddrConfig->uiPTIndexShift);
+
+    if (bRoundUp)
+    {
+        ui32RetVal ++;
+    }
+        
+    return ui32RetVal;
+}
+
+/*****************************************************************************
+ *         MMU memory allocation/management functions (mem desc)             *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportAlloc
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual allocation of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          uiSize          Size of the memory to import
+
+@Input          uiFlags         Flags that where passed in the allocation.
+
+@Output         puiBase         The address of where to insert this import
+
+@Output         puiActualSize   The actual size of the import
+
+@Output         phPriv          Handle which will be passed back when
+                                this import is freed
+
+@Return         IMG_TRUE if import alloc was successful, otherwise IMG_FALSE
+*/
+/*****************************************************************************/
+static IMG_BOOL _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
+                                           RA_LENGTH_T uiSize,
+                                           RA_FLAGS_T uiFlags,
+                                           RA_BASE_T *puiBase,
+                                           RA_LENGTH_T *puiActualSize,
+                                           RA_PERISPAN_HANDLE *phPriv)
+{
+	MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psCtx->psDevNode;
+	MMU_MEMORY_MAPPING *psMapping;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+	psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING));
+	if (psMapping == IMG_NULL)
+	{
+		goto e0;
+	}
+
+	eError = psDevNode->pfnMMUPxAlloc(psDevNode, TRUNCATE_64BITS_TO_SIZE_T(uiSize), &psMapping->sMemHandle,
+										&psMapping->sDevPAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	psMapping->psContext = psCtx;
+	psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+
+	psMapping->uiCpuVAddrRefCount = 0;
+
+	*phPriv = (RA_PERISPAN_HANDLE) psMapping;
+
+	/* Note: This assumes this memory never gets paged out */
+	*puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr;
+	*puiActualSize = uiSize;
+
+	return IMG_TRUE;
+
+e1:
+	OSFreeMem(psMapping);
+e0:
+	return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMem_RAImportFree
+
+@Description    Imports MMU Px memory into the RA. This is where the
+                actual free of physical memory happens.
+
+@Input          hArenaHandle    Handle that was passed in during the
+                                creation of the RA
+
+@Input          puiBase         The address of where to insert this import
+
+@Output         phPriv          Private data that the import alloc provided
+
+@Return         None
+*/
+/*****************************************************************************/
+static IMG_VOID _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle,
+                                          RA_BASE_T uiBase,
+                                          RA_PERISPAN_HANDLE hPriv)
+{
+	MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *) hPriv;
+	MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+	PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psCtx->psDevNode;
+
+	PVR_UNREFERENCED_PARAMETER(uiBase);
+
+	/* Check we have dropped all CPU mappings */
+	PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0);
+
+	psDevNode->pfnMMUPxFree(psDevNode, &psMapping->sMemHandle);
+	OSFreeMem(psMapping);
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemAlloc
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psCtx           Physmem context to do the allocation from
+
+@Output         psMemDesc       Allocation description
+
+@Input          uiBytes         Size of the allocation in bytes
+
+@Input          uiAlignment     Alignment requirement of this allocation
+
+@Return         PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+
+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psCtx,
+                                      MMU_MEMORY_DESC *psMemDesc,
+                                      IMG_SIZE_T uiBytes,
+                                      IMG_SIZE_T uiAlignment)
+{
+	RA_BASE_T uiPhysAddr;
+	IMG_BOOL bStatus;
+
+	if (!psMemDesc || psMemDesc->bValid)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	bStatus = RA_Alloc(psCtx->psPhysMemRA,
+					   uiBytes,
+					   0, // flags
+					   uiAlignment,
+					   &uiPhysAddr,
+					   IMG_NULL,
+					   (RA_PERISPAN_HANDLE *) &psMemDesc->psMapping);
+	if(!bStatus)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_PhysMemAlloc: ERROR call to RA_Alloc() failed"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMemDesc->bValid = IMG_TRUE;
+	psMemDesc->pvCpuVAddr = IMG_NULL;
+	psMemDesc->sDevPAddr.uiAddr = (IMG_UINTPTR_T) uiPhysAddr;
+
+#if !defined(SUPPORT_MMU_PxE_MAP_ON_DEMAND)
+	{
+		PVRSRV_ERROR eError;
+
+		if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+		{
+			eError = psCtx->psDevNode->pfnMMUPxMap(psCtx->psDevNode,
+											&psMemDesc->psMapping->sMemHandle,
+											psMemDesc->psMapping->uiSize,
+											&psMemDesc->psMapping->sDevPAddr,
+											&psMemDesc->psMapping->pvCpuVAddr);
+			if (eError != PVRSRV_OK)
+			{
+				RA_Free(psCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr);
+				return eError;
+			}
+		}
+
+		psMemDesc->psMapping->uiCpuVAddrRefCount++;
+		PVR_ASSERT(psMemDesc->psMapping->pvCpuVAddr != IMG_NULL);
+	
+		/* Work out the address for this mem desc */
+		psMemDesc->pvCpuVAddr = ((IMG_UINT8 *) psMemDesc->psMapping->pvCpuVAddr) + 
+									(psMemDesc->sDevPAddr.uiAddr -
+									psMemDesc->psMapping->sDevPAddr.uiAddr);
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PhysMemFree
+
+@Description    Allocates physical memory for MMU objects
+
+@Input          psCtx           Physmem context to do the free on
+
+@Input          psMemDesc       Allocation description
+
+@Return         None
+*/
+/*****************************************************************************/
+static IMG_VOID _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psCtx,
+                                     MMU_MEMORY_DESC *psMemDesc)
+{
+	RA_BASE_T uiPhysAddr;
+
+	PVR_ASSERT(psMemDesc->bValid);
+
+#if !defined(SUPPORT_MMU_PxE_MAP_ON_DEMAND)
+
+	if(--psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+	{
+		psCtx->psDevNode->pfnMMUPxUnmap(psCtx->psDevNode, &psMemDesc->psMapping->sMemHandle,
+				psMemDesc->psMapping->pvCpuVAddr);
+
+		psMemDesc->psMapping->pvCpuVAddr = IMG_NULL;
+	}
+
+	psMemDesc->pvCpuVAddr = IMG_NULL;
+#endif
+
+	uiPhysAddr = psMemDesc->sDevPAddr.uiAddr;
+	RA_Free(psCtx->psPhysMemRA, uiPhysAddr);
+
+	psMemDesc->bValid = IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_MapCPUVAddr
+
+@Description    Map an allocation of physical memory for MMU objects
+                into the CPU address space
+
+@Input          psMemDesc       Allocation description
+
+@Return         PVRSRV_OK if map was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_MapCPUVAddr(MMU_MEMORY_DESC *psMMUMemDesc)
+{
+#if defined(SUPPORT_MMU_PxE_MAP_ON_DEMAND)
+	MMU_MEMORY_MAPPING *psMapping = psMMUMemDesc->psMapping;
+	MMU_PHYSMEM_CONTEXT *psCtx = psMapping->psContext;
+	PVRSRV_DEVICE_NODE *psDevNode = psCtx->psDevNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* There should only be one call to map */
+	PVR_ASSERT(psMMUMemDesc->pvCpuVAddr == IMG_NULL);
+
+	if (psMapping->uiCpuVAddrRefCount == 0)
+	{
+		
+		eError = psDevNode->pfnMMUPxMap(psDevNode,
+										&psMapping->sMemHandle,
+										psMapping->uiSize,
+										&psMapping->sDevPAddr,
+										&psMapping->pvCpuVAddr);
+	}
+	psMapping->uiCpuVAddrRefCount++;
+
+	PVR_ASSERT(psMapping->pvCpuVAddr != IMG_NULL);
+
+	/* Workout the address for this mem desc */
+	psMMUMemDesc->pvCpuVAddr = ((IMG_UINT8 *) psMapping->pvCpuVAddr) + 
+								(psMMUMemDesc->psMapping->sDevPAddr.uiAddr -
+								psMapping->sDevPAddr.uiAddr);
+
+	return eError;
+#else
+	return PVRSRV_OK;
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_UnmapCPUVAddr
+
+@Description    Unmap an allocation of physical memory for MMU objects
+                from the CPU address space
+
+@Input          psMemDesc       Allocation description
+
+@Return         PVRSRV_OK if map was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_UnmapCPUVAddr(MMU_MEMORY_DESC *psMMUMemDesc)
+{
+#if defined(SUPPORT_MMU_PxE_MAP_ON_DEMAND)
+	MMU_MEMORY_MAPPING *psMapping = psMMUMemDesc->psMapping;
+	MMU_PHYSMEM_CONTEXT *psCtx = psMapping->psContext;
+	PVRSRV_DEVICE_NODE *psDevNode = psCtx->psDevNode;
+
+	if (--psMapping->uiCpuVAddrRefCount == 0)
+	{
+		psDevNode->pfnMMUPxUnmap(psDevNode, &psMapping->sMemHandle,
+									psMMUMemDesc->pvCpuVAddr);
+	}
+
+	psMMUMemDesc->pvCpuVAddr = IMG_NULL;
+#endif
+	return PVRSRV_OK;
+}
+
+
+/*****************************************************************************
+ *              MMU object allocation/management functions                   *
+ *****************************************************************************/
+
+#if defined(SUPPORT_MMU_MODIFICATION_LOGGING)
+static IMG_CHAR *_MMU_LogPxEModificationToString(MMU_MOD eMMUMod)
+{
+	switch(eMMUMod)
+	{
+		case MMU_MOD_UNKNOWN:
+			return "Unknown";
+		case MMU_MOD_MAP:
+			return "Map";
+		case MMU_MOD_UNMAP:
+			return "Unmap";
+		default:
+			break;
+	}
+	return "Bad enum";
+}
+
+static INLINE IMG_VOID _MMU_LogPxEModification(MMU_Levelx_INFO *psLevel, IMG_UINT32 ui32Index, MMU_MOD eMMUMod, IMG_UINT64 ui64Value)
+{
+	IMG_UINT32 ui32NextEntry = psLevel->asModifications[ui32Index].ui32NextEntry;
+
+	PVR_ASSERT(ui32Index < MMU_MODIFICATION_MAX_ENTRIES);
+	if (ui32Index < MMU_MODIFICATION_MAX_ENTRIES)
+	{
+		psLevel->asModifications[ui32Index].eModificationHistory[ui32NextEntry] = eMMUMod;
+		psLevel->asModifications[ui32Index].ui64Value[ui32NextEntry] = ui64Value;
+		psLevel->asModifications[ui32Index].ui32NextEntry = (ui32NextEntry + 1) % MMU_MODIFICATION_HISTORY;
+	}
+}
+
+static INLINE IMG_VOID _MMU_LogPxEDump(MMU_Levelx_INFO *psLevel, IMG_UINT32 ui32Index)
+{
+	IMG_UINT32 i;
+
+	PVR_ASSERT(ui32Index < MMU_MODIFICATION_MAX_ENTRIES);
+	if (ui32Index < MMU_MODIFICATION_MAX_ENTRIES)
+	{
+		for (i=0;i<MMU_MODIFICATION_HISTORY;i++)
+		{
+			IMG_UINT32 ui32Tmp = (psLevel->asModifications[ui32Index].ui32NextEntry-1-i+MMU_MODIFICATION_HISTORY)%MMU_MODIFICATION_HISTORY;
+			PVR_LOG(("Mod last - %d (index %d): Op = %s, Value = 0x%016llx", 
+					i,
+					ui32Tmp,
+					_MMU_LogPxEModificationToString(psLevel->asModifications[ui32Index].eModificationHistory[ui32Tmp]),
+					psLevel->asModifications[ui32Index].ui64Value[ui32Tmp]));
+					
+		}
+	}
+}
+#else	// defined(SUPPORT_MMU_MODIFICATION_LOGGING)
+static INLINE IMG_VOID _MMU_LogPxEModification(MMU_Levelx_INFO *psLevel, IMG_UINT32 ui32Index, MMU_MOD eMMUMod, IMG_UINT64 ui64Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psLevel);
+	PVR_UNREFERENCED_PARAMETER(ui32Index);
+	PVR_UNREFERENCED_PARAMETER(eMMUMod);
+}
+
+static INLINE IMG_VOID _MMU_LogPxEDump(MMU_Levelx_INFO *psLevel, IMG_UINT32 ui32Index)
+{
+	PVR_UNREFERENCED_PARAMETER(psLevel);
+}
+#endif	// defined(SUPPORT_MMU_MODIFICATION_LOGGING)
+
+
+static INLINE IMG_VOID _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate,
+                                               PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+                                               MMU_PROTFLAGS_T *uiMMUProtFlags)
+{
+	/* Do flag conversion between devmem flags and MMU generic flags */
+
+	if (bInvalidate == IMG_FALSE)
+	{
+		*uiMMUProtFlags |= ( (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+							>> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+							<< MMU_PROTFLAGS_DEVICE_OFFSET;
+
+		if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_GPU_READABLE)
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+		}
+		if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE)
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE;
+		}
+
+		switch ( DevmemDeviceCacheMode(uiMappingFlags) )
+		{
+			case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+			case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+					break;
+			case PVRSRV_MEMALLOCFLAG_GPU_CACHED:
+					*uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+					break;
+			default:
+					PVR_DPF((PVR_DBG_ERROR,"_MMU_DerivePTProtFlags: Wrong parameters"));
+					return;
+		}
+
+		if (DevmemDeviceCacheCoherency(uiMappingFlags))
+		{
+			*uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT;
+		}
+	}
+	else
+	{
+		*uiMMUProtFlags |= MMU_PROTFLAGS_INVALID;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemAlloc
+
+@Description    Allocates physical memory for MMU objects, initialises
+                and PDumps it.
+
+@Input          psMMUContext    MMU context
+
+@Input          uiNumEntries    Number of entries to allocate
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       MMU level that that allocation is for
+
+@Output         psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext,
+								IMG_UINT32 uiNumEntries,
+								const MMU_PxE_CONFIG *psConfig,
+								MMU_LEVEL eMMULevel,
+								MMU_MEMORY_DESC *psMemDesc,
+								IMG_UINT8 uiLog2Align)
+{
+	PVRSRV_ERROR eError;
+	IMG_SIZE_T uiBytes;
+	IMG_SIZE_T uiAlign;
+#if defined(PDUMP)
+	PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+#endif
+	PVR_ASSERT(psConfig->uiBytesPerEntry != 0);
+
+	uiBytes = uiNumEntries * psConfig->uiBytesPerEntry;
+	/* We need here the alignment of the previous level because that is the entry for we generate here */
+	uiAlign = 1 << uiLog2Align;
+
+	/*  allocate the object */
+	eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx,
+								psMemDesc, uiBytes, uiAlign);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_PxMemAlloc: failed to allocate memory for the  MMU object"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		some OS's can alloc memory without a CPU ptr.
+		Map the memory to the CPU (may be a no-op) 
+	*/
+	eError = _MMU_MapCPUVAddr(psMemDesc);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_PxMemAlloc: failed to map MMU object to CPU"));
+		eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+		goto e1;
+	}
+
+	/*
+		Clear the object
+		Note: if any MMUs are cleared with non-zero values then will need a custom
+		clear function
+	*/
+	OSMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Alloc MMU object");
+
+	PDumpMMUMalloc(psDevNode->pszMMUPxPDumpMemSpaceName,
+                   eMMULevel,
+                   &psMemDesc->sDevPAddr,
+                   uiBytes,
+                   uiAlign);
+	
+	PDumpMMUDumpPxEntries(eMMULevel,
+						  psDevNode->pszMMUPxPDumpMemSpaceName,
+						  psMemDesc->pvCpuVAddr,
+						  psMemDesc->sDevPAddr,
+						  0,
+						  uiNumEntries,
+						  IMG_NULL, IMG_NULL, 0, /* pdump symbolic info is irrelevant here */
+						  psConfig->uiBytesPerEntry,
+						  uiLog2Align,
+						  psConfig->uiAddrShift,
+						  psConfig->uiAddrMask,
+						  psConfig->uiProtMask,
+						  0);
+#endif
+
+	/* unmap the memory from the CPU (may be a no-op) */
+	_MMU_UnmapCPUVAddr(psMemDesc);
+
+	return PVRSRV_OK;
+
+e1:
+	_MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _PxMemFree
+
+@Description    Frees physical memory for MMU objects, de-initialises
+                and PDumps it.
+
+@Input          psMemDesc       Description of allocation
+
+@Return         PVRSRV_OK if allocation was successful
+*/
+/*****************************************************************************/
+
+static IMG_VOID _PxMemFree(MMU_CONTEXT *psMMUContext,
+							MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel)
+{
+#if defined(PDUMP)
+	PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+#endif
+#if defined(MMU_CLEARMEM_ON_FREE)
+	PVRSRV_ERROR eError;
+	/*
+		some OS's can alloc memory without a CPU ptr.
+		Map the memory to the CPU (may be a no-op) 
+	*/
+	eError = _MMU_MapCPUVAddr(psMemDesc);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_PxMemFree: failed to map MMU object to CPU"));
+		PVR_ASSERT(0);
+	}
+
+	/*
+		Clear the MMU object
+		Note: if any MMUs are cleared with non-zero values then will need a custom
+		clear function
+	*/
+	OSMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Clear MMU object before freeing it");
+#endif
+
+	/* unmap the memory from the CPU (may be a no-op) */
+	_MMU_UnmapCPUVAddr(psMemDesc);
+#endif/* MMU_CLEARMEM_ON_FREE */
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Free MMU object");
+	{
+		PDumpMMUFree(psDevNode->pszMMUPxPDumpMemSpaceName, eMMULevel, &psMemDesc->sDevPAddr);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(eMMULevel);
+#endif
+	/*  free the PC */
+	_MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+}
+
+static PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext,
+                              MMU_Levelx_INFO *psLevel,
+                              IMG_UINT32 uiIndex,
+                              const MMU_PxE_CONFIG *psConfig,
+                              const IMG_DEV_PHYADDR *psDevPAddr,
+                              IMG_BOOL bUnmap,
+#if defined(PDUMP)
+                              const IMG_CHAR *pszMemspaceName,
+                              const IMG_CHAR *pszSymbolicAddr,
+                              IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+                              IMG_UINT64 uiProtFlags)
+{
+	MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+	IMG_UINT64 ui64PxE64;
+
+	/* Calculate Entry */
+	ui64PxE64 =    psDevPAddr->uiAddr /* Calculate the offset to that base */
+	            >> psConfig->uiLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+	            << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+	             & psConfig->uiAddrMask; /* Delete unused bits */
+	ui64PxE64 |= uiProtFlags;
+
+	/* Set the entry */
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+		pui64Px[uiIndex] = ui64PxE64;
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+		/* assert that the result fits into 32 bits before writing
+		   it into the 32-bit array with a cast */
+		PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+		pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+
+	/* Log modification */
+	_MMU_LogPxEModification(psLevel,
+	                        uiIndex,
+	                        (bUnmap == IMG_TRUE)?MMU_MOD_UNMAP:MMU_MOD_MAP,
+	                        ui64PxE64);
+
+#if defined (PDUMP)
+	PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+						  psMMUContext->psDevNode->pszMMUPxPDumpMemSpaceName,
+						  psMemDesc->pvCpuVAddr,
+						  psMemDesc->sDevPAddr,
+						  uiIndex,
+						  1,
+						  pszMemspaceName,
+						  pszSymbolicAddr,
+						  uiSymbolicAddrOffset,
+						  psConfig->uiBytesPerEntry,
+						  psConfig->uiLog2Align,
+						  psConfig->uiAddrShift,
+						  psConfig->uiAddrMask,
+						  psConfig->uiProtMask,
+						  0);
+#endif /*PDUMP*/
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SetupPxE
+
+@Description    Sets up an entry of an MMU object to point to the
+                provided address
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel         Level info for MMU object
+
+@Input          uiIndex         Index into the MMU object to setup
+
+@Input          psConfig        MMU Px config
+
+@Input          eMMULevel       Level of MMU object
+
+@Input          psDevPAddr      Address to setup the MMU object to point to
+
+@Input          pszMemspaceName Name of the PDump memory space that the entry
+                                will point to
+
+@Input          pszSymbolicAddr PDump symbolic address that the entry will
+                                point to
+
+@Input          uiProtFlags     MMU protection flags
+
+@Return         PVRSRV_OK if the setup was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext,
+								MMU_Levelx_INFO *psLevel,
+								IMG_UINT32 uiIndex,
+								const MMU_PxE_CONFIG *psConfig,
+								MMU_LEVEL eMMULevel,
+								const IMG_DEV_PHYADDR *psDevPAddr,
+#if defined(PDUMP)
+								const IMG_CHAR *pszMemspaceName,
+								const IMG_CHAR *pszSymbolicAddr,
+								IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+								MMU_FLAGS_T uiProtFlags,
+								IMG_UINT8 uiLog2PageSize)
+{
+	PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+	MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+	PVRSRV_ERROR eError;
+
+	IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32);
+	IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT8);
+
+	if (!psDevPAddr)
+	{
+		/* Invalidate entry */
+		if (~uiProtFlags & MMU_PROTFLAGS_INVALID)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry"));
+			uiProtFlags |= MMU_PROTFLAGS_INVALID;
+		}
+		psDevPAddr = &gsBadDevPhyAddr;
+	}
+	else
+	{
+		if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry"));
+			uiProtFlags |= MMU_PROTFLAGS_INVALID;
+		}
+	}
+
+	switch(eMMULevel)
+	{
+		case MMU_LEVEL_3:
+				pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4;
+				pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8;
+				break;
+
+		case MMU_LEVEL_2:
+				pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4;
+				pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8;
+				break;
+
+		case MMU_LEVEL_1:
+				pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4;
+				pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8;
+				break;
+
+		default:
+				PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__));
+				return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Map the Page Catalogue into CPU virtual memory */
+	eError = _MMU_MapCPUVAddr(psMemDesc);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to map Px to CPU", __func__));
+		return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
+	}
+
+	/* How big is a PxE in bytes? */
+	/* Filling the actual Px entry with an address */
+	switch(psConfig->uiBytesPerEntry)
+	{
+		case 4:
+		{
+			IMG_UINT32 *pui32Px;
+			IMG_UINT64 ui64PxE64;
+
+			pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+			ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */
+							>> psConfig->uiLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+							<< psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+							& psConfig->uiAddrMask; /* Delete unused bits */
+
+			ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags);
+			/* assert that the result fits into 32 bits before writing
+			   it into the 32-bit array with a cast */
+			PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+			/* We should never invalidate an invalid page */
+			if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+			{
+				PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64);
+			}
+			pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+			_MMU_LogPxEModification(psLevel,
+									uiIndex,
+									(uiProtFlags & MMU_PROTFLAGS_INVALID)?MMU_MOD_UNMAP:MMU_MOD_MAP,
+									ui64PxE64);
+			break;	
+		}
+		case 8:
+		{
+			IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+			
+			pui64Px[uiIndex] = psDevPAddr->uiAddr  /* Calculate the offset to that base */
+								>> psConfig->uiLog2Align
+								<< psConfig->uiAddrShift
+								& psConfig->uiAddrMask;
+			pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2PageSize);
+
+			_MMU_LogPxEModification(psLevel,
+									uiIndex,
+									(uiProtFlags & MMU_PROTFLAGS_INVALID)?MMU_MOD_UNMAP:MMU_MOD_MAP,
+									pui64Px[uiIndex]);
+			break;	
+		}
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d",
+									__func__, psConfig->uiBytesPerEntry, eMMULevel));
+
+			_MMU_UnmapCPUVAddr(psMemDesc);
+
+			return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if defined (PDUMP)
+	PDumpMMUDumpPxEntries(eMMULevel,
+						  psDevNode->pszMMUPxPDumpMemSpaceName,
+						  psMemDesc->pvCpuVAddr,
+						  psMemDesc->sDevPAddr,
+						  uiIndex,
+						  1,
+						  pszMemspaceName,
+						  pszSymbolicAddr,
+						  uiSymbolicAddrOffset,
+						  psConfig->uiBytesPerEntry,
+						  psConfig->uiLog2Align,
+						  psConfig->uiAddrShift,
+						  psConfig->uiAddrMask,
+						  psConfig->uiProtMask,
+						  0);
+#endif
+
+	psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext->hDevData,
+									 eMMULevel,
+									 (uiProtFlags & MMU_PROTFLAGS_INVALID)?IMG_TRUE:IMG_FALSE);
+
+
+	/* unmap the memory from the CPU (may be a no-op) */
+	eError = _MMU_UnmapCPUVAddr(psMemDesc);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to release the CPU mapping", __func__));
+		return PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE;
+	}
+	
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                   MMU host control functions (Level Info)                 *
+ *****************************************************************************/
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_FreeLevel
+
+@Description    Recursively frees the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+				At each level we might be crossing a boundary from one Px to
+				another. The values for auiStartArray should be by used for
+				the first call into each level and the values in auiEndArray
+				should only be used in the last call for each level.
+				In order to determine if this is the first/last call we pass
+				in bFirst and bLast.
+				When one level calls down to the next only if bFirst/bLast is set
+				and it's the first/last iteration of the loop at its level will
+				bFirst/bLast set for the next recursion.
+				This means that each iteration has the knowledge of the previous
+				level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level 
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input			bFirst                  This is the first call for this level
+
+@Input			bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext,
+								MMU_Levelx_INFO *psLevel,
+								IMG_UINT32 auiStartArray[],
+								IMG_UINT32 auiEndArray[],
+								IMG_UINT32 auiEntriesPerPxArray[],
+								const MMU_PxE_CONFIG *apsConfig[],
+								MMU_LEVEL aeMMULevel[],
+								IMG_UINT32 *pui32CurrentLevel,
+								IMG_UINT32 uiStartIndex,
+								IMG_UINT32 uiEndIndex,
+								IMG_BOOL bFirst,
+								IMG_BOOL bLast)
+{
+	IMG_UINT32 uiThisLevel = *pui32CurrentLevel;
+	const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel];
+	IMG_UINT32 i;
+	IMG_BOOL bFreed = IMG_FALSE;
+	IMG_UINT8 uiLog2PageSize;
+
+	/* Sanity check */
+	PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+	PVR_ASSERT(psLevel != IMG_NULL);
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d",
+				aeMMULevel[uiThisLevel], uiStartIndex,
+				uiEndIndex, psLevel->ui32RefCount));
+
+	for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != IMG_NULL);i++)
+	{
+		if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+		{
+			MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i];
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			/* If we're crossing a Px then the start index changes */
+			if (bFirst && (i == uiStartIndex))
+			{
+				uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+				bNextFirst = IMG_TRUE;
+			}
+			else
+			{
+				uiNextStartIndex = 0;
+				bNextFirst = IMG_FALSE;
+			}
+
+			/* If we're crossing a Px then the end index changes */
+			if (bLast && (i == (uiEndIndex - 1)))
+			{
+				uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+				bNextLast = IMG_TRUE;
+			}
+			else
+			{
+				uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+				bNextLast = IMG_FALSE;
+			}
+
+			/* Recurse into the next level */
+			(*pui32CurrentLevel)++;
+			if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray,
+								auiEndArray, auiEntriesPerPxArray,
+								apsConfig, aeMMULevel, pui32CurrentLevel,
+								uiNextStartIndex, uiNextEndIndex,
+								bNextFirst, bNextLast))
+			{
+				/* The level below us is empty, drop the refcount and clear the pointer */
+				psLevel->ui32RefCount--;
+				psLevel->apsNextLevel[i] = IMG_NULL;
+
+				/* Level 1 PTE reprogramming is done in the unmap */
+				if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+				{
+					PVRSRV_ERROR eError;
+					/* Take the page size from the page table configs.
+					   Calculate array entry for PT config dependent on max MMU level */
+					uiLog2PageSize = apsConfig[MMU_MAX_LEVEL-1]->uiLog2Align;
+					/* Un-wire the entry */
+					eError = _SetupPxE(psMMUContext,
+									psLevel,
+									i,
+									psConfig,
+									aeMMULevel[uiThisLevel],
+									IMG_NULL,
+#if defined(PDUMP)
+									IMG_NULL,	/* Only required for data page */
+									IMG_NULL,	/* Only required for data page */
+									0,			/* Only required for data page */
+#endif
+									MMU_PROTFLAGS_INVALID,
+									uiLog2PageSize);		
+
+					PVR_ASSERT(eError == PVRSRV_OK);
+				}
+
+
+				/* Check we haven't wrapped around */
+				PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+			}
+			(*pui32CurrentLevel)--;
+		}
+		else
+		{
+			psLevel->ui32RefCount--;
+		}
+
+		/*
+		   Free this level if it is no longer referenced, unless it's the base
+		   level in which case it's part of the MMU context and should be freed
+		   when the MMU context is freed
+		*/
+		if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo))
+		{
+			_PxMemFree(psMMUContext, &psLevel->sMemDesc, aeMMULevel[uiThisLevel]);
+			OSFreeMem(psLevel);
+			psLevel = IMG_NULL;
+			bFreed = IMG_TRUE;
+		}
+	}
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d",
+				aeMMULevel[uiThisLevel], bFreed?0:psLevel->ui32RefCount));
+
+	return bFreed;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_AllocLevel
+
+@Description    Recursively allocates the specified range of Px entries. If any
+                level has its last reference dropped then the MMU object
+                memory and the MMU_Levelx_Info will be freed.
+
+				At each level we might be crossing a boundary from one Px to
+				another. The values for auiStartArray should be by used for
+				the first call into each level and the values in auiEndArray
+				should only be used in the last call for each level.
+				In order to determine if this is the first/last call we pass
+				in bFirst and bLast.
+				When one level calls down to the next only if bFirst/bLast is set
+				and it's the first/last iteration of the loop at its level will
+				bFirst/bLast set for the next recursion.
+				This means that each iteration has the knowledge of the previous
+				level which is required.
+
+@Input          psMMUContext    MMU context to operate on
+
+@Input          psLevel                 Level info on which to to free the
+                                        specified range
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          auiEntriesPerPxArray    Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level 
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Input			bFirst                  This is the first call for this level
+
+@Input			bLast                   This is the last call for this level
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext,
+									MMU_Levelx_INFO *psLevel,
+									IMG_UINT32 auiStartArray[],
+									IMG_UINT32 auiEndArray[],
+									IMG_UINT32 auiEntriesPerPxArray[],
+									const MMU_PxE_CONFIG *apsConfig[],
+									MMU_LEVEL aeMMULevel[],
+									IMG_UINT32 *pui32CurrentLevel,
+									IMG_UINT32 uiStartIndex,
+									IMG_UINT32 uiEndIndex,
+									IMG_BOOL bFirst,
+									IMG_BOOL bLast)
+{
+	IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */
+	const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */
+	PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */
+	IMG_UINT32 i;
+
+	/* Sanity check */
+	PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d",
+				aeMMULevel[uiThisLevel], uiStartIndex,
+				uiEndIndex, psLevel->ui32RefCount));
+
+	/* Go from uiStartIndex to uiEndIndex through the Px */
+	for (i = uiStartIndex;i < uiEndIndex;i++) 
+	{
+		/* Only try an allocation if this is not the last level */
+		/*Because a PT allocation is already done while setting the entry in PD */
+		if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+		{
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+			IMG_UINT8 uiLog2PageSize;
+
+			/* If there is already a next Px level existing, do not allocate it */
+			if (!psLevel->apsNextLevel[i])
+			{
+				MMU_Levelx_INFO *psNextLevel;
+				IMG_UINT32 ui32AllocSize;
+				IMG_UINT32 uiNextEntries;
+
+				/* Allocate and setup the next level */
+				uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1];
+				ui32AllocSize = sizeof(MMU_Levelx_INFO);
+				if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1)
+				{
+					ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1);
+				}
+				psNextLevel = OSAllocMem(ui32AllocSize);
+				if (psNextLevel == IMG_NULL)
+				{
+					uiAllocState = 0;
+					goto e0;
+				}
+				OSMemSet(psNextLevel, 0, ui32AllocSize);
+
+				/* Hook in this level for next time */
+				psLevel->apsNextLevel[i] = psNextLevel;
+
+				psNextLevel->ui32NumOfEntries = uiNextEntries;
+				psNextLevel->ui32RefCount = 0;
+				/* Allocate Px memory for a sub level*/
+				eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1],
+										aeMMULevel[uiThisLevel + 1],
+										&psNextLevel->sMemDesc,
+										psConfig->uiLog2Align);
+				if (eError != PVRSRV_OK)
+				{
+					uiAllocState = 1;
+					goto e0;
+				}
+
+				uiLog2PageSize = apsConfig[MMU_MAX_LEVEL-1]->uiLog2Align;
+				/* Wire up the entry */
+				eError = _SetupPxE(psMMUContext,
+									psLevel,
+									i,
+									psConfig,
+									aeMMULevel[uiThisLevel],
+									&psNextLevel->sMemDesc.sDevPAddr,
+#if defined(PDUMP)
+									IMG_NULL,	/* Only required for data page */
+									IMG_NULL,	/* Only required for data page */
+									0,			/* Only required for data page */
+#endif
+									0,
+									uiLog2PageSize);
+
+				if (eError != PVRSRV_OK)
+				{
+					uiAllocState = 2;
+					goto e0;
+				}
+
+				psLevel->ui32RefCount++;
+			}
+
+			/* If we're crossing a Px then the start index changes */
+			if (bFirst && (i == uiStartIndex))
+			{
+				uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+				bNextFirst = IMG_TRUE;
+			}
+			else
+			{
+				uiNextStartIndex = 0;
+				bNextFirst = IMG_FALSE;
+			}
+
+			/* If we're crossing a Px then the end index changes */
+			if (bLast && (i == (uiEndIndex - 1)))
+			{
+				uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+				bNextLast = IMG_TRUE;
+			}
+			else
+			{
+				uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+				bNextLast = IMG_FALSE;
+			}
+
+			/* Recurse into the next level */
+			(*pui32CurrentLevel)++;
+			eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i],
+										auiStartArray,
+										auiEndArray,
+										auiEntriesPerPxArray,
+										apsConfig,
+										aeMMULevel,
+										pui32CurrentLevel,
+										uiNextStartIndex,
+										uiNextEndIndex,
+										bNextFirst,
+										bNextLast);
+			(*pui32CurrentLevel)--;
+			if (eError != PVRSRV_OK)
+			{
+				uiAllocState = 2;
+				goto e0;
+			}
+		}
+		else
+		{
+			/* All we need to do for level 1 is bump the refcount */
+			psLevel->ui32RefCount++;
+		}
+		PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+	}
+	MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d",
+				aeMMULevel[uiThisLevel], psLevel->ui32RefCount));
+	return PVRSRV_OK;
+
+e0:
+	/* Sanity check that we've not come down this route unexpectedly */
+	PVR_ASSERT(uiAllocState!=99);
+	PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d"
+							,eError, aeMMULevel[uiThisLevel], uiAllocState));
+
+	/* the start value of index variable i is nor initialized on purpose 
+	   indeed this for loop deinitialize what has already been initialized 
+	   just before failing in reverse order. So the i index has already the
+	   right value. */
+	for (/* i already set */ ; i>= uiStartIndex  &&  i< uiEndIndex; i--)
+	{
+		switch(uiAllocState)
+		{
+			IMG_UINT32 uiNextStartIndex;
+			IMG_UINT32 uiNextEndIndex;
+			IMG_BOOL bNextFirst;
+			IMG_BOOL bNextLast;
+
+			case 3:
+					/* If we're crossing a Px then the start index changes */
+					if (bFirst && (i == uiStartIndex))
+					{
+						uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+						bNextFirst = IMG_TRUE;
+					}
+					else
+					{
+						uiNextStartIndex = 0;
+						bNextFirst = IMG_FALSE;
+					}
+
+					/* If we're crossing a Px then the end index changes */
+					if (bLast && (i == (uiEndIndex - 1)))
+					{
+						uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+						bNextLast = IMG_TRUE;
+					}
+					else
+					{
+						uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+						bNextLast = IMG_FALSE;
+					}
+
+					if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+					{
+						(*pui32CurrentLevel)++;
+						if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i],
+											auiStartArray, auiEndArray,
+											auiEntriesPerPxArray, apsConfig,
+											aeMMULevel, pui32CurrentLevel,
+											uiNextStartIndex, uiNextEndIndex,
+											bNextFirst, bNextLast))
+						{
+							psLevel->ui32RefCount--;
+							psLevel->apsNextLevel[i] = IMG_NULL;
+
+							/* Check we haven't wrapped around */
+							PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+						}
+						(*pui32CurrentLevel)--;
+					}
+					else
+					{
+						/* We should never come down this path, but it's here
+						   for completeness */
+						psLevel->ui32RefCount--;
+
+						/* Check we haven't wrapped around */
+						PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+					}
+			case 2:
+					if (psLevel->apsNextLevel[i] != IMG_NULL  &&
+					    psLevel->apsNextLevel[i]->ui32RefCount == 0)
+					{
+						_PxMemFree(psMMUContext, &psLevel->sMemDesc,
+									aeMMULevel[uiThisLevel]);
+					}
+			case 1:
+					if (psLevel->apsNextLevel[i] != IMG_NULL  &&
+					    psLevel->apsNextLevel[i]->ui32RefCount == 0)
+					{
+						OSFreeMem(psLevel->apsNextLevel[i]);
+						psLevel->apsNextLevel[i] = IMG_NULL;
+					}
+			case 0:
+					uiAllocState = 3;
+					break;
+		}
+	}
+	return eError;
+}
+
+/*****************************************************************************
+ *                   MMU page table functions                                *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetLevelData
+
+@Description    Get the all the level data and calculates the indexes for the
+                specified address range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiLog2DataPageSize      Log2 of the page size to use
+
+@Input          auiStartArray           Array of start indexes (one for each level)
+
+@Input          auiEndArray             Array of end indexes (one for each level)
+
+@Input          uiEntriesPerPxArray     Array of number of entries for the Px
+                                        (one for each level)
+
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+@Input          apsConfig               Array of PxE configs (one for each level)
+
+@Input          aeMMULevel              Array of MMU levels (one for each level)
+
+@Input          pui32CurrentLevel       Pointer to a variable which is set to our
+                                        current level 
+
+@Input          uiStartIndex            Start index of the range to free
+
+@Input          uiEndIndex              End index of the range to free
+
+@Return         IMG_TRUE if the last reference to psLevel was dropped
+*/
+/*****************************************************************************/
+static IMG_VOID _MMU_GetLevelData(MMU_CONTEXT *psMMUContext,
+									IMG_DEV_VIRTADDR sDevVAddrStart,
+									IMG_DEV_VIRTADDR sDevVAddrEnd,
+									IMG_UINT32 uiLog2DataPageSize,
+									IMG_UINT32 auiStartArray[],
+									IMG_UINT32 auiEndArray[],
+									IMG_UINT32 auiEntriesPerPx[],
+									const MMU_PxE_CONFIG *apsConfig[],
+									MMU_LEVEL aeMMULevel[],
+									const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+									IMG_HANDLE *phPriv)
+{
+	const MMU_PxE_CONFIG *psMMUPDEConfig;
+	const MMU_PxE_CONFIG *psMMUPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i = 0;
+
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+														&psMMUPDEConfig,
+														&psMMUPTEConfig,
+														ppsMMUDevVAddrConfig,
+														phPriv);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	
+	psDevVAddrConfig = *ppsMMUDevVAddrConfig;
+
+	if (psDevVAddrConfig->uiPCIndexMask != 0)
+	{
+		auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+		auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+		auiEntriesPerPx[i] = (IMG_UINT32)UNITS_IN_BITFIELD(psDevVAddrConfig->uiPCIndexMask,
+													psDevVAddrConfig->uiPCIndexShift);
+		apsConfig[i] = psDevAttrs->psBaseConfig;
+		aeMMULevel[i] = MMU_LEVEL_3;
+		i++;
+	}
+
+	if (psDevVAddrConfig->uiPDIndexMask != 0)
+	{
+		auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+		auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+		auiEntriesPerPx[i] = (IMG_UINT32)UNITS_IN_BITFIELD(psDevVAddrConfig->uiPDIndexMask,
+													psDevVAddrConfig->uiPDIndexShift);
+		if (i == 0)
+		{
+			apsConfig[i] = psDevAttrs->psBaseConfig;
+		}
+		else
+		{
+			apsConfig[i] = psMMUPDEConfig;
+		}
+		aeMMULevel[i] = MMU_LEVEL_2;
+		i++;
+	}
+
+	/*
+		There is always a PTE entry so we have a slightly different behaviour than above.
+		E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there
+		is a PT with one entry.
+
+	*/
+	auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+	if (psDevVAddrConfig->uiPTIndexMask !=0)
+	{
+		auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+	}
+	else
+	{
+		/*
+			If the PTE mask is zero it means there is only 1 PTE and thus
+			the start and end array are one in the same
+		*/
+		auiEndArray[i] = auiStartArray[i];
+	}
+	auiEntriesPerPx[i] = (IMG_UINT32)UNITS_IN_BITFIELD(psDevVAddrConfig->uiPTIndexMask,
+													psDevVAddrConfig->uiPTIndexShift);
+	if (i == 0)
+	{
+		apsConfig[i] = psDevAttrs->psBaseConfig;
+	}
+	else
+	{
+		apsConfig[i] = psMMUPTEConfig;
+	}
+	aeMMULevel[i] = MMU_LEVEL_1;
+}
+
+static IMG_VOID _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+}
+
+/*************************************************************************/ /*!
+@Function       _AllocPageTables
+
+@Description    Allocate page tables and any higher level MMU objects required
+                for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Input          uiProtFlags             Generic MMU protection flags
+
+@Return         PVRSRV_OK if the allocation was successful
+*/
+/*****************************************************************************/
+static PVRSRV_ERROR
+_AllocPageTables(MMU_CONTEXT *psMMUContext,
+                 IMG_DEV_VIRTADDR sDevVAddrStart,
+                 IMG_DEV_VIRTADDR sDevVAddrEnd,
+                 MMU_FLAGS_T uiProtFlags,
+                 IMG_UINT8 uiLog2PageSize)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+	MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+	const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+	const MMU_DEVVADDR_CONFIG	*psDevVAddrConfig;
+	IMG_HANDLE hPriv;
+	IMG_UINT32 ui32CurrentLevel = 0;
+
+
+	PVR_DPF((PVR_DBG_ALLOC,
+			 "_AllocPageTables: vaddr range: 0x%010llx:0x%010llx",
+			 sDevVAddrStart.uiAddr,
+			 sDevVAddrEnd.uiAddr
+			 ));
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Allocating page tables for %llu bytes virtual range: 0x%010llX to 0x%010llX",
+				(IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr,
+                 (IMG_UINT64)sDevVAddrStart.uiAddr,
+                 (IMG_UINT64)sDevVAddrEnd.uiAddr);
+#endif
+
+	_MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+						(IMG_UINT32) uiLog2PageSize, auiStartArray, auiEndArray,
+						auiEntriesPerPx, apsConfig, aeMMULevel,
+						&psDevVAddrConfig, &hPriv);
+
+	eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+								auiStartArray, auiEndArray, auiEntriesPerPx,
+								apsConfig, aeMMULevel, &ui32CurrentLevel,
+								auiStartArray[0], auiEndArray[0],
+								IMG_TRUE, IMG_TRUE);
+
+	_MMU_PutLevelData(psMMUContext, hPriv);
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreePageTables
+
+@Description    Free page tables and any higher level MMU objects at are no
+                longer referenced for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddrStart          Start device virtual address
+
+@Input          sDevVAddrEnd            End device virtual address
+
+@Return         None
+*/
+/*****************************************************************************/
+static IMG_VOID _FreePageTables(MMU_CONTEXT *psMMUContext,
+                                   IMG_DEV_VIRTADDR sDevVAddrStart,
+                                   IMG_DEV_VIRTADDR sDevVAddrEnd,
+                                   IMG_UINT8 uiLog2PageSize)
+{
+	IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+	IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+	MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+	const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+	const MMU_DEVVADDR_CONFIG	*psDevVAddrConfig;
+	IMG_UINT32 ui32CurrentLevel = 0;
+	IMG_HANDLE hPriv;
+
+
+	PVR_DPF((PVR_DBG_ALLOC,
+			 "_FreePageTables: vaddr range: 0x%010llx:0x%010llx",
+			 sDevVAddrStart.uiAddr,
+			 sDevVAddrEnd.uiAddr
+			 ));
+
+	_MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+						uiLog2PageSize, auiStartArray, auiEndArray,
+						auiEntriesPerPx, apsConfig, aeMMULevel,
+						&psDevVAddrConfig, &hPriv);
+
+	_MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+					auiStartArray, auiEndArray, auiEntriesPerPx,
+					apsConfig, aeMMULevel, &ui32CurrentLevel,
+					auiStartArray[0], auiEndArray[0],
+					IMG_TRUE, IMG_TRUE);
+
+	_MMU_PutLevelData(psMMUContext, hPriv);
+}
+
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTInfo
+
+@Description    Get the PT level information and PT entry index for the specified
+                virtual address
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Device virtual address to get the PTE info
+                                        from.
+
+@Input          psDevVAddrConfig        The current virtual address config obtained
+                                        by another function call before.
+
+@Output         psLevel                 Level info of the PT
+
+@Output         pui32PTEIndex           Index into the PT the address corresponds to
+
+@Return         None
+*/
+/*****************************************************************************/
+static INLINE IMG_VOID _MMU_GetPTInfo(MMU_CONTEXT                *psMMUContext,
+                                      IMG_DEV_VIRTADDR            sDevVAddr,
+                                      const MMU_DEVVADDR_CONFIG  *psDevVAddrConfig,
+                                      MMU_Levelx_INFO           **psLevel,
+                                      IMG_UINT32                 *pui32PTEIndex)
+{
+	MMU_Levelx_INFO *psLocalLevel = IMG_NULL;
+
+	IMG_UINT32 uiPCEIndex;
+	IMG_UINT32 uiPDEIndex;
+
+	switch(psMMUContext->psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			/* find the page directory containing the PCE */
+			uiPCEIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex];
+
+		case MMU_LEVEL_2:
+			/* find the page table containing the PDE */
+			uiPDEIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			if (psLocalLevel != IMG_NULL)
+			{
+				psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex];
+			}
+			else
+			{
+				psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex];
+			}
+
+		case MMU_LEVEL_1:
+			/* find PTE index into page table */
+			*pui32PTEIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+			if (psLocalLevel == IMG_NULL)
+			{
+				psLocalLevel = &psMMUContext->sBaseLevelInfo;
+			}
+			break;
+
+		default:
+			PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level"));
+			return;
+	}
+
+	*psLevel = psLocalLevel;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_GetPTConfig
+
+@Description    Get the level config. Call _MMU_PutPTConfig after use!
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uiLog2DataPageSize      Log 2 of the page size
+
+@Output         ppsConfig               Config of the PTE
+
+@Output         phPriv                  Private data handle to be passed back
+                                        when the info is put
+
+@Output         ppsDevVAddrConfig       Config of the device virtual addresses
+
+@Return         None
+*/
+/*****************************************************************************/
+static INLINE IMG_VOID _MMU_GetPTConfig(MMU_CONTEXT               *psMMUContext,
+                                       IMG_UINT32                  uiLog2DataPageSize,
+                                       const MMU_PxE_CONFIG      **ppsConfig,
+                                       IMG_HANDLE                 *phPriv,
+                                       const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+
+	if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+	                                            &psPDEConfig,
+	                                            &psPTEConfig,
+	                                            &psDevVAddrConfig,
+	                                            phPriv) != PVRSRV_OK)
+	{
+		/*
+		   There should be no way we got here unless uiLog2DataPageSize
+		   has changed after the MMU_Alloc call (in which case it's a bug in
+		   the MM code)
+		*/
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config"));
+		PVR_ASSERT(0);
+	}
+
+	*ppsConfig = psPTEConfig;
+	*ppsDevVAddrConfig = psDevVAddrConfig;
+}
+
+/*************************************************************************/ /*!
+@Function       _MMU_PutPTConfig
+
+@Description    Put the level info. Has to be called after _MMU_GetPTConfig to
+                ensure correct refcounting.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          phPriv                  Private data handle created by
+                                        _MMU_GetPTConfig.
+
+@Return         None
+*/
+/*****************************************************************************/
+static INLINE IMG_VOID _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext,
+                                 IMG_HANDLE hPriv)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+	if( psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK )
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not put page size config"));
+		PVR_ASSERT(0);
+	}
+
+}
+
+/*****************************************************************************
+ *                     Public interface functions                            *
+ *****************************************************************************/
+
+/*
+	MMU_ContextCreate
+*/
+PVRSRV_ERROR
+MMU_ContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+                   MMU_CONTEXT **ppsMMUContext)
+{
+	MMU_CONTEXT *psMMUContext;
+	MMU_DEVICEATTRIBS *psDevAttrs;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	const MMU_PxE_CONFIG *psConfig;
+	MMU_PHYSMEM_CONTEXT *psCtx;
+	IMG_UINT32 ui32BaseObjects;
+	IMG_UINT32 ui32Size;
+	IMG_CHAR sBuf[40];
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	psDevAttrs = psDevNode->psMMUDevAttrs;
+	psConfig = psDevAttrs->psBaseConfig;
+	psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig;
+
+	switch(psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:	ui32BaseObjects = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(psDevVAddrConfig->uiPCIndexMask,
+													psDevVAddrConfig->uiPCIndexShift));
+								break;
+
+		case MMU_LEVEL_2:	ui32BaseObjects = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(psDevVAddrConfig->uiPDIndexMask,
+													psDevVAddrConfig->uiPDIndexShift));
+								break;
+
+		case MMU_LEVEL_1:	ui32BaseObjects = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(psDevVAddrConfig->uiPTIndexMask,
+													psDevVAddrConfig->uiPTIndexShift));
+								break;
+
+		default:
+								PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Invalid MMU config"));
+								eError = PVRSRV_ERROR_INVALID_PARAMS;
+								goto e0;
+	}
+
+	/* Allocate the MMU context with the Level 1 Px info's */
+	ui32Size = sizeof(MMU_CONTEXT) + 
+						((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *));
+
+	psMMUContext = OSAllocMem(ui32Size);
+	if (psMMUContext == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	OSMemSet (psMMUContext, 0, ui32Size);
+
+#if defined(PDUMP)
+	/* Clear the refcount */
+	psMMUContext->ui32PDumpContextIDRefCount = 0;
+#endif
+	/* Record Device specific attributes in the context for subsequent use */
+	psMMUContext->psDevAttrs = psDevAttrs;
+	psMMUContext->psDevNode = psDevNode;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32 ui32OSid, ui32OSidReg;
+	RetrieveOSidsfromPidList(OSGetCurrentProcessID(), &ui32OSid, &ui32OSidReg);
+	MMU_SetOSids(psMMUContext, ui32OSid, ui32OSidReg);
+}
+#endif
+
+	/* 
+	  Allocate physmem context and set it up
+	 */
+	psCtx = OSAllocMem(sizeof (MMU_PHYSMEM_CONTEXT));
+	if (psCtx == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+	psMMUContext->psPhysMemCtx = psCtx;
+
+	OSMemSet (psCtx, 0, sizeof(MMU_PHYSMEM_CONTEXT));
+	psCtx->psDevNode = psDevNode;
+
+	OSSNPrintf(sBuf, sizeof(sBuf)-1, "pgtables %p", psCtx);
+	psCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1;
+	psCtx->pszPhysMemRAName = OSAllocMem(psCtx->uiPhysMemRANameAllocSize);
+	if (psCtx->pszPhysMemRAName == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Out of memory"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e2;
+	}
+
+	OSStringCopy(psCtx->pszPhysMemRAName, sBuf);
+
+	psCtx->psPhysMemRA = RA_Create(psCtx->pszPhysMemRAName,
+									/* subsequent import */
+									psDevNode->uiMMUPxLog2AllocGran,
+									RA_LOCKCLASS_1,
+									_MMU_PhysMem_RAImportAlloc,
+									_MMU_PhysMem_RAImportFree,
+									psCtx /* priv */);
+	if (psCtx->psPhysMemRA == IMG_NULL)
+	{
+		OSFreeMem(psCtx->pszPhysMemRAName);
+		psCtx->pszPhysMemRAName = IMG_NULL;
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e3;
+	}
+
+	/* allocate the base level object */
+	/*
+	   Note: Although this is not required by the this file until
+	         the 1st allocation is made, a device specific callback
+	         might request the base object address so we allocate
+	         it up front.
+	*/
+	if (_PxMemAlloc(psMMUContext,
+							ui32BaseObjects,
+							psConfig,
+							psDevAttrs->eTopLevel,
+							&psMMUContext->sBaseLevelInfo.sMemDesc,
+							psConfig->uiLog2Align))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to alloc level 1 object"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e4;
+	}
+
+	psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects;
+	psMMUContext->sBaseLevelInfo.ui32RefCount = 0;
+
+	/* return context */
+	*ppsMMUContext = psMMUContext;
+
+	return PVRSRV_OK;
+
+e4:
+	RA_Delete(psCtx->psPhysMemRA);
+e3:
+	OSFreeMem(psCtx->pszPhysMemRAName);
+e2:
+	OSFreeMem(psCtx);
+e1:
+	OSFreeMem(psMMUContext);
+e0:
+	return eError;
+}
+
+/*
+	MMU_ContextDestroy
+*/
+IMG_VOID
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Enter"));
+
+	if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+	{
+		/* There should be no way to get here with live pages unless
+		   there is a bug in this module or the MM code */
+		PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0);
+	}
+
+	/* Free the top level MMU object */
+	_PxMemFree(psMMUContext,
+				&psMMUContext->sBaseLevelInfo.sMemDesc,
+				psMMUContext->psDevAttrs->eTopLevel);
+
+	/* Free physmem context */
+	RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA);
+	psMMUContext->psPhysMemCtx->psPhysMemRA = IMG_NULL;
+	OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName);
+	psMMUContext->psPhysMemCtx->pszPhysMemRAName = IMG_NULL;
+
+	OSFreeMem(psMMUContext->psPhysMemCtx);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RemovePidOSidCoupling(OSGetCurrentProcessID());
+#endif
+
+	/* free the context itself. */
+	OSFreeMem(psMMUContext);
+	/*not nulling pointer, copy on stack*/
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Exit"));
+}
+
+/*
+	MMU_Alloc
+*/
+PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+		   IMG_DEVMEM_SIZE_T uSize,
+		   IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+		   IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+		   IMG_DEV_VIRTADDR *psDevVAddr,
+		   IMG_UINT8 uiLog2PageSize)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+	MMU_DEVICEATTRIBS *psDevAttrs;
+	IMG_HANDLE hPriv;
+	
+#if !defined (DEBUG)
+	PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment);
+#endif
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Alloc: uSize=0x%010llx, uiProtFlags=0x%x, align=0x%010llx", uSize, uiProtFlags, uDevVAddrAlignment));
+
+	/* check params */
+	if (!psMMUContext || !psDevVAddr || !puActualSize)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid params"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevAttrs = psMMUContext->psDevAttrs;
+
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize,
+													&psPDEConfig,
+													&psPTEConfig,
+													&psDevVAddrConfig,
+													&hPriv);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to get config info (%d)", eError));
+		return eError;
+	}
+
+	/* size and alignment must be datapage granular */
+	if(((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0)
+	|| ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid address or size granularity"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	sDevVAddrEnd = *psDevVAddr;
+	sDevVAddrEnd.uiAddr += uSize;
+	eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiProtFlags, uiLog2PageSize);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
+        return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES;
+	}
+
+	psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_Free
+*/
+IMG_VOID
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT8 uiLog2PageSize)
+{
+	IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+	if (psMMUContext == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
+		return;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%010llX",
+			 sDevVAddr.uiAddr));
+
+	/* ensure the address range to free is inside the heap */
+	sDevVAddrEnd = sDevVAddr;
+	sDevVAddrEnd.uiAddr += uiSize;
+
+	_FreePageTables(psMMUContext, sDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+}
+
+/*
+	MMU_UnmapPages
+*/
+IMG_VOID
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+                IMG_DEV_VIRTADDR sDevVAddr,
+                IMG_UINT32 ui32PageCount,
+                IMG_UINT8 uiLog2PageSize)
+{
+	IMG_UINT32 uiPTEIndex = 0;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	MMU_Levelx_INFO *psLevel = IMG_NULL;
+	MMU_Levelx_INFO *psPrevLevel = IMG_NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_UINT64 uiProtFlags = 0;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+
+#if defined PDUMP
+	PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010llX to 0x%010llX",
+	             ui32PageCount,
+	             (IMG_UINT64)sDevVAddr.uiAddr,
+	             ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+	/* Get PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	_MMU_ConvertDevMemFlags(IMG_TRUE,
+	                        0,
+	                        &uiMMUProtFlags);
+
+	/* Callback to get device specific protection flags */
+	if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+	}
+	else if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+	}
+
+
+	/* Unmap page by page */
+	while (ui32PageCount != 0)
+	{
+		psPrevLevel = psLevel;
+
+		/* Get index for table and table descriptor */
+		_MMU_GetPTInfo(psMMUContext, sDevVAddr,
+		               psDevVAddrConfig, &psLevel, &uiPTEIndex);
+
+		/* Map table if not done in previous loop*/
+		if ( psLevel != psPrevLevel ) {
+
+			if ( psPrevLevel != IMG_NULL )
+			{
+				if (_MMU_UnmapCPUVAddr( &(psPrevLevel->sMemDesc) ) != PVRSRV_OK )
+				{
+					goto e0;
+				}
+			}
+
+			if ( _MMU_MapCPUVAddr( &(psLevel->sMemDesc) ) != PVRSRV_OK )
+			{
+				goto e0;
+			}
+		}
+
+		/* Set the PT entry to invalid and poison it with a bad address */
+		if (_SetupPTE(psMMUContext,
+		              psLevel,
+		              uiPTEIndex,
+		              psConfig,
+		              &gsBadDevPhyAddr,
+		              IMG_TRUE,
+#if defined(PDUMP)
+		              IMG_NULL, IMG_NULL, 0U,
+#endif
+		              uiProtFlags) != PVRSRV_OK )
+		{
+			goto e1;
+		}
+
+		/* Check we haven't wrapped around */
+		PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+		ui32PageCount--;
+		sDevVAddr.uiAddr += uiPageSize;
+	}
+
+	/* Unmap the last used table */
+	if (psLevel != IMG_NULL)
+	{
+		if (_MMU_UnmapCPUVAddr( &(psLevel->sMemDesc) ) != PVRSRV_OK )
+		{
+			goto e0;
+		}
+	}
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_TRUE);
+
+	return;
+
+e1:
+	_MMU_UnmapCPUVAddr( &(psLevel->sMemDesc) );
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+	PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: _SetupPTE failed"));
+	PVR_ASSERT(0);
+	return;
+e0:
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+	PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+	PVR_ASSERT(0);
+	return;
+}
+
+/*
+	MMU_MapPMR
+
+	N.B.  The caller _must_ have already called
+	"PMRLockSysPhysAddr()" on this PMR _before_ calling MMU_MapPMR.
+	Why?  (i) Because we really want this module to concentrate on
+	page table management, and interacts the absolute minimum with
+	the PMR; and (ii) because in the future we may map PMRs in
+	partially (e.g. demand-paging scenario) and it would not be
+	right to call locksysphysaddr on each individual mapping; and
+	(iii) we've already got "unmap pages" where we don't have the
+	PMR handle (we could change the API, but I can't justify this
+	just for this).  However, it may be worth re-thinking this,
+	because we'll eventually want to support mixed page sizes
+	within one allocation (rather than it being a heap attribute)
+	so we may have to move more logic into the mmu code.
+*/
+PVRSRV_ERROR
+MMU_MapPMR (MMU_CONTEXT *psMMUContext,
+            IMG_DEV_VIRTADDR sDevVAddr,
+            const PMR *psPMR,
+            IMG_DEVMEM_SIZE_T uiSizeBytes,
+            PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+            IMG_UINT8 uiLog2PageSize)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 uiCount, i;
+	IMG_UINT32 ui32MappedCount = 0;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_UINT32 uiPTEIndex = 0;
+	IMG_UINT64 uiProtFlags;
+	MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+	MMU_Levelx_INFO *psLevel = IMG_NULL;
+	MMU_Levelx_INFO *psPrevLevel = IMG_NULL;
+	IMG_HANDLE hPriv;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr;
+	IMG_BOOL *pbValid;
+
+#if defined(PDUMP)
+	IMG_CHAR aszMemspaceName[PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT];
+	IMG_CHAR aszSymbolicAddress[PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT];
+	IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+
+	PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%lld bytes)", uiSizeBytes);
+#endif /*PDUMP*/
+
+	/* We should verify the size and contiguity when supporting variable page size */
+
+	PVR_ASSERT (psMMUContext != IMG_NULL);
+	PVR_ASSERT (psPMR != IMG_NULL);
+
+	/* Get general PT and address configs */
+	_MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+	                 &psConfig, &hPriv, &psDevVAddrConfig);
+
+	_MMU_ConvertDevMemFlags(IMG_FALSE,
+	                        uiMappingFlags,
+	                        &uiMMUProtFlags);
+
+	/* Callback to get device specific protection flags */
+
+	if (psConfig->uiBytesPerEntry == 8)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+	}
+	else if (psConfig->uiBytesPerEntry == 4)
+	{
+		uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"MMU_MapPMR: The page table entry byte length is not supported"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/* Allocate memory for page-frame-numbers and validity states,
+	   N.B. assert could be triggered by an illegal uiSizeBytes */
+	uiCount = uiSizeBytes >> uiLog2PageSize;
+	PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2PageSize == uiSizeBytes);
+    if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+    {
+		psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR));
+		if (psDevPAddr == IMG_NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e0;
+		}
+
+		pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL));
+		if (pbValid == IMG_NULL)
+		{
+			/* Should allocation fail, clean-up here before exit */
+			PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;			
+			OSFreeMem(psDevPAddr);			
+			goto e0;
+		}
+    }
+	else
+	{
+		psDevPAddr = asDevPAddr;
+		pbValid	= abValid;
+	}
+
+	/* "uiSize" is the amount of contiguity in the underlying
+	   page.  Normally this would be constant for the system, but,
+	   that constant needs to be communicated, in case it's ever
+	   different; caller guarantees that PMRLockSysPhysAddr() has
+	   already been called */
+	eError = PMR_DevPhysAddr(psPMR,
+							 uiLog2PageSize,
+							 uiCount,
+							 0,
+							 psDevPAddr,
+							 pbValid);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	/* Map in all pages of that PMR page by page*/
+	for (i=0, uiCount=0; uiCount < uiSizeBytes; i++, uiCount += uiPageSize)
+	{
+#if defined(PDUMP)
+		IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif /*PDUMP*/
+
+		/*
+			The default value of the entry is invalid so we don't need to mark
+			it as such if the page wasn't valid, we just advance pass that address
+		*/
+		if (pbValid[i])
+		{
+			/* check the physical alignment of the memory to map */
+			PVR_ASSERT((psDevPAddr[i].uiAddr & (uiPageSize-1)) == 0);
+
+#if defined(DEBUG)
+{
+			IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr);
+			if (ui32BitLength > RGX_FEATURE_PHYS_BUS_WIDTH )
+			{
+				PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+				         "is greater than what the chip can handle (%d).",
+				         ui32BitLength, RGX_FEATURE_PHYS_BUS_WIDTH));
+
+				PVR_ASSERT(ui32BitLength <= RGX_FEATURE_PHYS_BUS_WIDTH );
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e1;
+			}
+}
+#endif /*DEBUG*/
+
+#if defined(PDUMP)
+			eError = PMR_PDumpSymbolicAddr(psPMR, uiCount,
+			                               sizeof(aszMemspaceName), &aszMemspaceName[0],
+			                               sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+			                               &uiSymbolicAddrOffset,
+			                               &uiNextSymName);
+			PVR_ASSERT(eError == PVRSRV_OK);
+#endif /*PDUMP*/
+
+			psPrevLevel = psLevel;
+
+			/* Calculate PT index and get table descriptor */
+			_MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+			                &psLevel, &uiPTEIndex);
+
+			/* Map table if not done in previous loop*/
+			if ( psLevel != psPrevLevel ) {
+
+				eError = _MMU_MapCPUVAddr( &(psLevel->sMemDesc) );
+				PVR_ASSERT(eError == PVRSRV_OK);
+
+				if ( psPrevLevel != IMG_NULL )
+				{
+					eError = _MMU_UnmapCPUVAddr( &(psPrevLevel->sMemDesc) );
+					PVR_ASSERT(eError == PVRSRV_OK);
+				}
+			}
+
+			/* Set the PT entry with the specified address and protection flags */
+			eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex,
+			                   psConfig, &psDevPAddr[i], IMG_FALSE,
+#if defined(PDUMP)
+			                   aszMemspaceName,
+			                   aszSymbolicAddress,
+			                   uiSymbolicAddrOffset,
+#endif /*PDUMP*/
+			                   uiProtFlags);
+
+	
+			if(eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "MMU_MapPMR: Mapping failed"));
+				goto e1;
+			}
+
+			PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+			PVR_DPF ((PVR_DBG_MESSAGE,
+			          "MMU_MapPMR: devVAddr=%10llX, size=0x%x/0x%010llx",
+			          sDevVAddr.uiAddr,
+			          uiCount,
+			          uiSizeBytes));
+
+			ui32MappedCount++;
+		}
+
+		sDevVAddr.uiAddr += uiPageSize;
+	}
+
+	if (psLevel != IMG_NULL)
+	{
+		eError = _MMU_UnmapCPUVAddr( &(psLevel->sMemDesc) );
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	_MMU_PutPTConfig(psMMUContext, hPriv);
+
+	/* Flush TLB for PTs*/
+	psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+	                                               psMMUContext->hDevData,
+	                                               MMU_LEVEL_1,
+	                                               IMG_FALSE);
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i);
+#endif /*PDUMP*/
+
+e1:
+	if (psDevPAddr != asDevPAddr)
+	{
+		/* uiCount modified, check against stack address */
+		OSFreeMem(pbValid);
+		OSFreeMem(psDevPAddr);
+	}
+e0:
+	PVR_ASSERT(eError == PVRSRV_OK);
+    return eError;
+}
+
+/*
+	MMU_AcquireBaseAddr
+*/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr)
+{
+	if (!psMMUContext)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	*psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr;
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_ReleaseBaseAddr
+*/
+IMG_VOID
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext)
+{
+	PVR_UNREFERENCED_PARAMETER(psMMUContext);
+}
+
+/*
+	MMU_SetDeviceData
+*/
+IMG_VOID MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData)
+{
+	psMMUContext->hDevData = hDevData;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+    MMU_SetOSid, MMU_GetOSid
+*/
+
+IMG_VOID MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg)
+{
+	psMMUContext->ui32OSid = ui32OSid;
+	psMMUContext->ui32OSidReg = ui32OSidReg;
+
+    return ;
+}
+
+IMG_VOID MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg)
+{
+	*pui32OSid = psMMUContext->ui32OSid;
+	*pui32OSidReg = psMMUContext->ui32OSidReg;
+
+	return ;
+}
+
+#endif
+
+/*
+	MMU_CheckFaultAddress
+*/
+IMG_VOID MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, IMG_DEV_VIRTADDR *psDevVAddr)
+{
+	MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+	const MMU_PxE_CONFIG *psConfig;
+	const MMU_PxE_CONFIG *psMMUPDEConfig;
+	const MMU_PxE_CONFIG *psMMUPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig;
+	IMG_HANDLE hPriv;
+	MMU_Levelx_INFO *psLevel;
+	PVRSRV_ERROR eError;
+	IMG_UINT64 uiIndex;
+	IMG_UINT32 ui32PCIndex;
+	IMG_UINT32 ui32PDIndex;
+	IMG_UINT32 ui32PTIndex;
+	IMG_UINT32 ui32Log2PageSize;
+
+	/*
+		At this point we don't know the page size so assume it's 4K.
+		When we get the the PD level (MMU_LEVEL_2) we can check to see
+		if this assumption is correct.
+	*/
+	eError = psDevAttrs->pfnGetPageSizeConfiguration(12,
+													 &psMMUPDEConfig,
+													 &psMMUPTEConfig,
+													 &psMMUDevVAddrConfig,
+													 &hPriv);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get the page size info for log2 page sizeof 12"));
+	}
+
+	psLevel = &psMMUContext->sBaseLevelInfo;
+	psConfig = psDevAttrs->psBaseConfig;
+
+	switch(psMMUContext->psDevAttrs->eTopLevel)
+	{
+		case MMU_LEVEL_3:
+			/* Determine the PC index */
+			uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask;
+			uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift;
+			ui32PCIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex));
+			
+			if (ui32PCIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_LOG(("PC index (%d) out of bounds (%d)", ui32PCIndex, psLevel->ui32NumOfEntries));
+				break;
+			}
+
+			if (_MMU_MapCPUVAddr(&psLevel->sMemDesc) != PVRSRV_OK)
+			{
+				PVR_LOG(("Failed to map MMU MemDesc"));
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_LOG(("PCE for index %d = 0x%08x and %s be valid",
+						 ui32PCIndex,
+						 pui32Ptr[ui32PCIndex],
+						 psLevel->apsNextLevel[ui32PCIndex]?"should":"should not"));
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_LOG(("PCE for index %d = 0x%016llx and %s be valid",
+						 ui32PCIndex,
+						 pui64Ptr[ui32PCIndex],
+						 psLevel->apsNextLevel[ui32PCIndex]?"should":"should not"));
+			}
+
+			_MMU_UnmapCPUVAddr(&psLevel->sMemDesc);
+			_MMU_LogPxEDump(psLevel, ui32PCIndex);
+
+			psLevel = psLevel->apsNextLevel[ui32PCIndex];
+			if (!psLevel)
+			{
+				break;
+			}
+			psConfig = psMMUPDEConfig;
+			/* Fall through */
+
+		case MMU_LEVEL_2:
+			/* Determine the PD index */
+			uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask;
+			uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift;
+			ui32PDIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex));
+
+			if (ui32PDIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_LOG(("PD index (%d) out of bounds (%d)", ui32PDIndex, psLevel->ui32NumOfEntries));
+				break;
+			}
+
+			if (_MMU_MapCPUVAddr(&psLevel->sMemDesc) != PVRSRV_OK)
+			{
+				PVR_LOG(("Failed to map MMU MemDesc"));
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_LOG(("PDE for index %d = 0x%08x and %s be valid",
+						 ui32PDIndex,
+						 pui32Ptr[ui32PDIndex],
+						 psLevel->apsNextLevel[ui32PDIndex]?"should":"should not"));
+
+				if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size from the PDE"));
+				}
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_LOG(("PDE for index %d = 0x%016llx and %s be valid",
+						 ui32PDIndex,
+						 pui64Ptr[ui32PDIndex],
+						 psLevel->apsNextLevel[ui32PDIndex]?"should":"should not"));
+
+				if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size from the PDE"));
+				}
+			}
+
+			_MMU_UnmapCPUVAddr(&psLevel->sMemDesc);
+			_MMU_LogPxEDump(psLevel, ui32PDIndex);
+
+			/*
+				We assumed the page size was 4K, now we have the actual size
+				from the PDE we can confirm if our assumption was correct.
+				Until now it hasn't mattered as the PC and PD are the same
+				regardless of the page size
+			*/
+			if (ui32Log2PageSize != 12)
+			{
+				/* Put the 4K page size data */
+				psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+				/* Get the correct size data */
+				eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize,
+																 &psMMUPDEConfig,
+																 &psMMUPTEConfig,
+																 &psMMUDevVAddrConfig,
+																 &hPriv);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize));
+					break;
+				}
+			}
+			psLevel = psLevel->apsNextLevel[ui32PDIndex];
+			if (!psLevel)
+			{
+				break;
+			}
+			psConfig = psMMUPTEConfig;
+			/* Fall through */
+
+		case MMU_LEVEL_1:
+			/* Determine the PT index */
+			uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask;
+			uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift;
+			ui32PTIndex = (IMG_UINT32) uiIndex;
+			PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex));
+
+			if (ui32PTIndex >= psLevel->ui32NumOfEntries)
+			{
+				PVR_LOG(("PT index (%d) out of bounds (%d)", ui32PTIndex, psLevel->ui32NumOfEntries));
+				break;
+			}
+
+			if (_MMU_MapCPUVAddr(&psLevel->sMemDesc) != PVRSRV_OK)
+			{
+				PVR_LOG(("Failed to map MMU MemDesc"));
+			}
+
+			if (psConfig->uiBytesPerEntry == 4)
+			{
+				IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_LOG(("PTE for index %d = 0x%08x",
+						 ui32PTIndex,
+						 pui32Ptr[ui32PTIndex]));
+			}
+			else
+			{
+				IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+				PVR_LOG(("PTE for index %d = 0x%016llx",
+						 ui32PTIndex,
+						 pui64Ptr[ui32PTIndex]));
+			}
+
+			_MMU_UnmapCPUVAddr(&psLevel->sMemDesc);
+			_MMU_LogPxEDump(psLevel, ui32PTIndex);
+			
+			break;
+			default:
+				PVR_LOG(("Unsupported MMU setup"));
+				break;
+	}
+}
+
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr)
+{
+    MMU_Levelx_INFO *psLevel = IMG_NULL;
+    const MMU_PxE_CONFIG *psConfig;
+    const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+    IMG_HANDLE hPriv;
+    IMG_UINT32 uiIndex = 0;
+    IMG_BOOL bStatus = IMG_FALSE;
+
+    _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig);
+
+    switch(psMMUContext->psDevAttrs->eTopLevel)
+    {
+        case MMU_LEVEL_3:
+            uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+            psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+            if (psLevel == IMG_NULL)
+                break;
+            /* fall through */
+        case MMU_LEVEL_2:
+            uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+            if (psLevel != IMG_NULL)
+                psLevel = psLevel->apsNextLevel[uiIndex];
+            else
+                psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+
+            if (psLevel == IMG_NULL)
+                break;
+            /* fall through */
+        case MMU_LEVEL_1:
+            uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+            if (psLevel == IMG_NULL)
+                psLevel = &psMMUContext->sBaseLevelInfo;
+
+            if (_MMU_MapCPUVAddr(&psLevel->sMemDesc) != PVRSRV_OK)
+            {
+                PVR_LOG(("MMU_IsVDevAddrValid: _MMU_MapCPUVAddr failed"));
+                break;
+            }
+
+            bStatus = ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiIndex]
+                      & psConfig->uiValidEnMask;
+            _MMU_UnmapCPUVAddr(&psLevel->sMemDesc); /* ignore return status */
+            break;
+        default:
+            PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup"));
+            break;
+    }
+
+    _MMU_PutPTConfig(psMMUContext, hPriv);
+
+    return bStatus;
+}
+
+#if defined(PDUMP)
+/*
+	MMU_ContextDerivePCPDumpSymAddr
+*/
+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                             IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                             IMG_SIZE_T uiPDumpSymbolicNameBufferSize)
+{
+    IMG_SIZE_T uiCount;
+    IMG_UINT64 ui64PhysAddr;
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+    if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid)
+    {
+        /* We don't have any allocations.  You're not allowed to ask
+           for the page catalogue base address until you've made at
+           least one allocation */
+        return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR;
+    }
+
+    ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr;
+
+    PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName)));
+
+    /* Page table Symbolic Name is formed from page table phys addr
+       prefixed with MMUPT_. */
+
+    uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer,
+                         uiPDumpSymbolicNameBufferSize,
+                         ":%s:%s%016llX",
+                         psDevId->pszPDumpDevName,
+                         psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX",
+                         ui64PhysAddr);
+    
+    if (uiCount + 1 > uiPDumpSymbolicNameBufferSize)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    return PVRSRV_OK;
+}
+
+/*
+	MMU_PDumpWritePageCatBase
+*/
+PVRSRV_ERROR
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+                          const IMG_CHAR *pszSpaceName,
+                          IMG_DEVMEM_OFFSET_T uiOffset,
+                          IMG_UINT32 ui32WordSize,
+                          IMG_UINT32 ui32AlignShift,
+                          IMG_UINT32 ui32Shift,
+						  PDUMP_FLAGS_T uiPdumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszPageCatBaseSymbolicAddr[100];
+	const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevNode->pszMMUPxPDumpMemSpaceName;
+
+
+	eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext,
+                                             &aszPageCatBaseSymbolicAddr[0],
+                                             sizeof(aszPageCatBaseSymbolicAddr));
+    if (eError ==  PVRSRV_OK)
+    {
+		eError = PDumpWriteSymbAddress(pszSpaceName,
+										   uiOffset,
+										   aszPageCatBaseSymbolicAddr,
+										   0, /* offset -- Could be non-zero for var. pgsz */
+										   pszPDumpDevName,
+										   ui32WordSize,
+										   ui32AlignShift,
+										   ui32Shift,
+										   uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS);
+    }
+
+    return eError;
+}
+
+/*
+	MMU_AcquirePDumpMMUContext
+*/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+                                        IMG_UINT32 *pui32PDumpMMUContextID)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	if (!psMMUContext->ui32PDumpContextIDRefCount)
+	{
+		PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName,
+									psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr,
+									psMMUContext->psDevAttrs->eMMUType,
+									&psMMUContext->uiPDumpContextID);
+	}
+
+	psMMUContext->ui32PDumpContextIDRefCount++;
+	*pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID;
+
+	return PVRSRV_OK;
+}
+
+/*
+	MMU_ReleasePDumpMMUContext
+*/ 
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+	PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0);
+	psMMUContext->ui32PDumpContextIDRefCount--;
+
+	if (psMMUContext->ui32PDumpContextIDRefCount == 0)
+	{
+		PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName,
+									psMMUContext->uiPDumpContextID);
+	}
+
+	return PVRSRV_OK;
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu_common.c)
+******************************************************************************/
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_common.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_common.c
new file mode 100644
index 0000000..817dfd1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_common.c
@@ -0,0 +1,2906 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common Server PDump functions layer
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+   
+#if defined(PDUMP)
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pdump_physmem.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "sync_server.h"
+
+/* pdump headers */
+#include "dbgdrvif_srv5.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+/* Allow temporary buffer size override */
+#if !defined(PDUMP_TEMP_BUFFER_SIZE)
+#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U)
+#endif
+
+/* DEBUG */
+#if 0
+#define PDUMP_DBG(a)   PDumpOSDebugPrintf (a)
+#else
+#define PDUMP_DBG(a)
+#endif
+
+
+#define	PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x)))
+#define	VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID *, p, x)
+#define	VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x))
+#define MAX_PDUMP_MMU_CONTEXTS	(32)
+static IMG_VOID *gpvTempBuffer = IMG_NULL;
+
+#define PERSISTANT_MAGIC           ((IMG_UINTPTR_T) 0xe33ee33e)
+#define PDUMP_PERSISTENT_HASH_SIZE 10
+
+#define PDUMP_PRM_FILE_NAME_MAX	32         /*|< Size of parameter name used*/
+#define PDUMP_PRM_FILE_SIZE_MAX	0x7FDFFFFF /*!< Default maximum file size to split output files, 2GB-2MB as fwrite limits it to 2GB-1 on 32bit systems */
+
+
+static HASH_TABLE *g_psPersistentHash = IMG_NULL;
+
+static IMG_BOOL		g_PDumpInitialised = IMG_FALSE;
+static IMG_UINT32	g_ConnectionCount = 0;
+
+
+typedef struct
+{
+	PDUMP_CHANNEL sCh;         /*!< Channel handles */
+} PDUMP_SCRIPT;
+
+typedef struct
+{
+	IMG_UINT32    ui32Init;    /*|< Count of bytes written to the init phase stream */
+	IMG_UINT32    ui32Main;    /*!< Count of bytes written to the main stream */
+	IMG_UINT32    ui32Deinit;  /*!< Count of bytes written to the deinit stream */
+} PDUMP_CHANNEL_WOFFSETS;
+
+typedef struct
+{
+	PDUMP_CHANNEL          sCh;             /*!< Channel handles */
+	PDUMP_CHANNEL_WOFFSETS sWOff;           /*!< Channel file write offsets */
+	IMG_UINT32             ui32FileIdx;     /*!< File index used when file size limit reached and a new file is started, parameter channel only */
+	IMG_UINT32             ui32MaxFileSize; /*!< Maximum file size for parameter files */
+
+	PDUMP_FILEOFFSET_T     uiZeroPageOffset; /*!< Offset of the zero page in the parameter file */
+	IMG_SIZE_T             uiZeroPageSize; /*!< Size of the zero page in the parameter file */
+	IMG_CHAR               szZeroPageFilename[PDUMP_PRM_FILE_NAME_MAX]; /*< PRM file name where the zero page was pdumped */
+} PDUMP_PARAMETERS;
+
+static PDUMP_SCRIPT     g_PDumpScript    = { { 0, 0, 0} };
+static PDUMP_PARAMETERS g_PDumpParameters = { { 0, 0, 0}, {0, 0, 0}, 0, PDUMP_PRM_FILE_SIZE_MAX};
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+IMG_UINT32 g_ui32EveryLineCounter = 1U;
+#endif
+
+#if defined(PDUMP_DEBUG) || defined(REFCOUNT_DEBUG)
+#define PDUMP_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define PDUMP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* Prototype for the test/debug state dump rotuine used in debugging */
+IMG_VOID PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState);
+#undef PDUMP_TRACE_STATE
+
+
+/*****************************************************************************/
+/*	PDump Control Module Definitions                                         */
+/*****************************************************************************/
+
+typedef struct _PDUMP_CAPTURE_RANGE_
+{
+	IMG_UINT32 ui32Start;       /*!< Start frame number of range */
+	IMG_UINT32 ui32End;         /*!< Send frame number of range */
+	IMG_UINT32 ui32Interval;    /*!< Frame sample rate interval */
+} PDUMP_CAPTURE_RANGE;
+
+/* No direct access to members from outside the control module - please */
+typedef struct _PDUMP_CTRL_STATE_
+{
+	IMG_BOOL            bInitPhaseActive;   /*!< State of driver initialisation phase */
+	IMG_UINT32          ui32Flags;          /*!< Unused */
+
+	IMG_UINT32          ui32DefaultCapMode; /*!< Capture mode of the dump */
+	PDUMP_CAPTURE_RANGE sDefaultRange;      /*|< The default capture range */
+	IMG_UINT32          ui32CurrentFrame;   /*!< Current frame number */
+
+	IMG_BOOL            bCaptureOn;         /*!< Current capture status, is current frame in range */
+	IMG_BOOL            bSuspended;         /*!< Suspend flag set on unrecoverable error */
+	IMG_BOOL            bInPowerTransition; /*!< Device power transition state */
+	POS_LOCK            hLock;              /*!< Exclusive lock to this structure */
+} PDUMP_CTRL_STATE;
+
+static PDUMP_CTRL_STATE g_PDumpCtrl =
+{
+	IMG_TRUE,
+	0,
+
+	0,              /*!< Value obtained from OS PDump layer during initialisation */
+	{
+		0xFFFFFFFF,
+		0xFFFFFFFF,
+		1
+	},
+	0,
+
+	IMG_FALSE,
+	IMG_FALSE,
+	IMG_FALSE,
+	IMG_NULL
+};
+
+static PVRSRV_ERROR PDumpCtrlInit(IMG_UINT32 ui32InitCapMode)
+{
+	g_PDumpCtrl.ui32DefaultCapMode = ui32InitCapMode;
+	PVR_ASSERT(g_PDumpCtrl.ui32DefaultCapMode != 0);
+
+	/* Create lock for PDUMP_CTRL_STATE struct, which is shared between pdump client
+	   and PDumping app. This lock will help us serialize calls from pdump client
+	   and PDumping app */
+	PVR_LOGR_IF_ERROR(OSLockCreate(&g_PDumpCtrl.hLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+	
+	return PVRSRV_OK;
+}
+
+static IMG_VOID PDumpCtrlDeInit(IMG_VOID)
+{
+	if (g_PDumpCtrl.hLock)
+	{
+		OSLockDestroy(g_PDumpCtrl.hLock);
+		g_PDumpCtrl.hLock = IMG_NULL;
+	}
+}
+
+static INLINE IMG_VOID PDumpCtrlLockAcquire(IMG_VOID)
+{
+	OSLockAcquire(g_PDumpCtrl.hLock);
+}
+
+static INLINE IMG_VOID PDumpCtrlLockRelease(IMG_VOID)
+{
+	OSLockRelease(g_PDumpCtrl.hLock);
+}
+
+/**********************************************************************************************************
+	NOTE:
+	The following PDumpCtrl*** functions require the PDUMP_CTRL_STATE lock be acquired BEFORE they are
+	called. This is because the PDUMP_CTRL_STATE data is shared between the PDumping App and the PDump
+	client, hence an exclusive access is required. The lock can be acquired and released by using the
+	PDumpCtrlLockAcquire & PDumpCtrlLockRelease functions respectively.
+**********************************************************************************************************/
+
+static IMG_VOID PDumpCtrlUpdateCaptureStatus(IMG_VOID)
+{
+	if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+	{
+		if ((g_PDumpCtrl.ui32CurrentFrame >= g_PDumpCtrl.sDefaultRange.ui32Start) &&
+			(g_PDumpCtrl.ui32CurrentFrame <= g_PDumpCtrl.sDefaultRange.ui32End) &&
+			(((g_PDumpCtrl.ui32CurrentFrame - g_PDumpCtrl.sDefaultRange.ui32Start) % g_PDumpCtrl.sDefaultRange.ui32Interval) == 0))
+		{
+			g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+		}
+		else
+		{
+			g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+		}
+	}
+	else if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS)
+	{
+		g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+	}
+	else
+	{
+		g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+		PVR_DPF((PVR_DBG_ERROR, "PDumpCtrlSetCurrentFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+	}
+
+}
+
+static IMG_VOID PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame)
+{
+	g_PDumpCtrl.ui32CurrentFrame = ui32Frame;
+	/* Mirror the value into the debug driver */
+	PDumpOSSetFrame(ui32Frame);
+
+	PDumpCtrlUpdateCaptureStatus();
+
+#if defined(PDUMP_TRACE_STATE)	
+	PDumpCommonDumpState(IMG_FALSE);
+#endif
+}
+
+static IMG_VOID PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval)
+{
+	PVR_ASSERT(ui32Interval > 0);
+	PVR_ASSERT(ui32End >= ui32Start);
+	PVR_ASSERT((ui32Mode == DEBUG_CAPMODE_FRAMED) || (ui32Mode == DEBUG_CAPMODE_CONTINUOUS));
+
+	/*
+		Set the default capture range to that supplied by the PDump client tool
+	 */
+	g_PDumpCtrl.ui32DefaultCapMode = ui32Mode;
+	g_PDumpCtrl.sDefaultRange.ui32Start = ui32Start;
+	g_PDumpCtrl.sDefaultRange.ui32End = ui32End;
+	g_PDumpCtrl.sDefaultRange.ui32Interval = ui32Interval;
+
+	/*
+		Reset the current frame on reset of the default capture range, helps
+		avoid inter-pdump start frame issues when the driver is not reloaded.
+	 */
+	PDumpCtrlSetCurrentFrame(0);
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsFramed(IMG_VOID)
+{
+	return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsContinuous(IMG_VOID)
+{
+	return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS;
+}
+
+static IMG_UINT32 PDumpCtrlGetCurrentFrame(IMG_VOID)
+{
+	return g_PDumpCtrl.ui32CurrentFrame;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureOn(IMG_VOID)
+{
+	return !g_PDumpCtrl.bSuspended && g_PDumpCtrl.bCaptureOn;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureRangePast(IMG_VOID)
+{
+	return (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sDefaultRange.ui32End);
+}
+
+/* Used to imply if the PDump client is connected or not. */
+static INLINE IMG_BOOL PDumpCtrlCaptureRangeUnset(IMG_VOID)
+{
+	return ((g_PDumpCtrl.sDefaultRange.ui32Start == 0xFFFFFFFFU) &&
+			(g_PDumpCtrl.sDefaultRange.ui32End == 0xFFFFFFFFU));
+}
+
+static IMG_BOOL PDumpCtrlIsLastCaptureFrame(IMG_VOID)
+{
+	if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+	{
+		/* Is the next capture frame within the range end limit? */
+		if ((g_PDumpCtrl.ui32CurrentFrame + g_PDumpCtrl.sDefaultRange.ui32Interval) > g_PDumpCtrl.sDefaultRange.ui32End)
+		{
+			return IMG_TRUE;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpCtrIsLastCaptureFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+	}
+
+	/* Return false for continuous capture mode or when in framed mode */
+	return IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInitPhaseComplete(IMG_VOID)
+{
+	return !g_PDumpCtrl.bInitPhaseActive;
+}
+
+static INLINE IMG_VOID PDumpCtrlSetInitPhaseComplete(IMG_BOOL bIsComplete)
+{
+	if (bIsComplete)
+	{
+		g_PDumpCtrl.bInitPhaseActive = IMG_FALSE;
+		PDUMP_HEREA(102);
+	}
+	else
+	{
+		g_PDumpCtrl.bInitPhaseActive = IMG_TRUE;
+		PDUMP_HEREA(103);
+	}
+}
+
+static INLINE IMG_VOID PDumpCtrlSuspend(IMG_VOID)
+{
+	PDUMP_HEREA(104);
+	g_PDumpCtrl.bSuspended = IMG_TRUE;
+}
+
+static INLINE IMG_VOID PDumpCtrlResume(IMG_VOID)
+{
+	PDUMP_HEREA(105);
+	g_PDumpCtrl.bSuspended = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlIsDumpSuspended(IMG_VOID)
+{
+	return g_PDumpCtrl.bSuspended;
+}
+
+static INLINE IMG_VOID PDumpCtrlPowerTransitionStart(IMG_VOID)
+{
+	g_PDumpCtrl.bInPowerTransition = IMG_TRUE;
+}
+
+static INLINE IMG_VOID PDumpCtrlPowerTransitionEnd(IMG_VOID)
+{
+	g_PDumpCtrl.bInPowerTransition = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInPowerTransition(IMG_VOID)
+{
+	return g_PDumpCtrl.bInPowerTransition;
+}
+
+static PVRSRV_ERROR PDumpCtrlIsCaptureFrame(IMG_BOOL *bIsCapturing)
+{
+	*bIsCapturing = PDumpCtrlCaptureOn();
+	return PVRSRV_OK;
+}
+
+/********************************************************************************
+	End of PDumpCtrl*** functions
+*********************************************************************************/
+
+/*
+	Wrapper functions which need to be exposed in pdump_km.h for use in other
+	pdump_*** modules safely. These functions call the specific PDumpCtrl layer
+	function after acquiring the PDUMP_CTRL_STATE lock, hence making the calls 
+	from other modules hassle free by avoiding the acquire/release CtrlLock
+	calls.
+*/
+
+IMG_VOID PDumpPowerTransitionStart(IMG_VOID)
+{
+	PDumpCtrlLockAcquire();
+	PDumpCtrlPowerTransitionStart();
+	PDumpCtrlLockRelease();
+}
+
+IMG_VOID PDumpPowerTransitionEnd(IMG_VOID)
+{
+	PDumpCtrlLockAcquire();
+	PDumpCtrlPowerTransitionEnd();
+	PDumpCtrlLockRelease();
+}
+
+IMG_BOOL PDumpInPowerTransition(IMG_VOID)
+{
+	IMG_BOOL bPDumpInPowerTransition = IMG_FALSE;
+	
+	PDumpCtrlLockAcquire();
+	bPDumpInPowerTransition = PDumpCtrlInPowerTransition();
+	PDumpCtrlLockRelease();
+
+	return bPDumpInPowerTransition;
+}
+
+IMG_BOOL PDumpIsDumpSuspended(IMG_VOID)
+{
+	IMG_BOOL bPDumpIsDumpSuspended;
+
+	PDumpCtrlLockAcquire();
+	bPDumpIsDumpSuspended = PDumpCtrlIsDumpSuspended();
+	PDumpCtrlLockRelease();
+
+	return bPDumpIsDumpSuspended;
+}
+
+/*****************************************************************************/
+/*	PDump Common Write Layer just above PDump OS Layer                       */
+/*****************************************************************************/
+
+
+/* 
+	Checks in this method were seeded from the original PDumpWriteILock()
+	and DBGDrivWriteCM() and have grown since to ensure PDump output
+	matches legacy output.
+	Note: the order of the checks in this method is important as some
+	writes have multiple pdump flags set!
+ */
+static IMG_BOOL PDumpWriteAllowed(IMG_UINT32 ui32Flags)
+{
+	/* Lock down the PDUMP_CTRL_STATE struct before calling the following
+	   PDumpCtrl*** functions. This is to avoid updates to the Control data
+	   while we are reading from it */
+	PDumpCtrlLockAcquire();
+
+	/* No writes if in framed mode and range pasted */
+	if (PDumpCtrlCaptureRangePast())
+	{
+		PDUMP_HERE(10);
+		goto unlockAndReturnFalse;
+	}
+
+	/* No writes while writing is suspended */
+	if (PDumpCtrlIsDumpSuspended())
+	{
+		PDUMP_HERE(11);
+		goto unlockAndReturnFalse;
+	}
+
+	/* Prevent PDumping during a power transition */
+	if (PDumpCtrlInPowerTransition())
+	{	/* except when it's flagged */
+		if (ui32Flags & PDUMP_FLAGS_POWER)
+		{
+			PDUMP_HERE(20);
+			goto unlockAndReturnTrue;
+		}
+		PDUMP_HERE(16);
+		goto unlockAndReturnFalse;
+	}
+
+	/* Always allow dumping in init phase and when persistent flagged */
+	if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+	{
+		PDUMP_HERE(12);
+		goto unlockAndReturnTrue;
+	}
+	if (!PDumpCtrlInitPhaseComplete())
+	{
+		PDUMP_HERE(15);
+		goto unlockAndReturnTrue;
+	}
+
+	/* The following checks are made when the driver has completed initialisation */
+
+	/* If PDump client connected allow continuous flagged writes */
+	if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
+	{
+		if (PDumpCtrlCaptureRangeUnset()) /* Is client connected? */
+		{
+			PDUMP_HERE(13);
+			goto unlockAndReturnFalse;
+		}
+		PDUMP_HERE(14);
+		goto unlockAndReturnTrue;
+	}
+
+	/* No last/deinit statements allowed when not in initialisation phase */
+	if (ui32Flags & PDUMP_FLAGS_DEINIT)
+	{
+		if (PDumpCtrlInitPhaseComplete())
+		{
+			PDUMP_HERE(17);
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteAllowed: DEINIT flag used at the wrong time outside of initialisation!"));
+			goto unlockAndReturnFalse;
+		}
+	}
+
+	/* 
+		If no flags are provided then it is FRAMED output and the frame
+		range must be checked matching expected behaviour.
+	 */
+	if (PDumpCtrlCapModIsFramed() && !PDumpCtrlCaptureOn())
+	{
+		PDUMP_HERE(18);
+		goto unlockAndReturnFalse;
+	}
+
+	PDUMP_HERE(19);
+
+unlockAndReturnTrue:
+	/* Allow the write to take place */
+	PDumpCtrlLockRelease();
+	return IMG_TRUE;
+
+unlockAndReturnFalse:
+	PDumpCtrlLockRelease();
+	return IMG_FALSE;
+}
+
+#undef PDUMP_DEBUG_SCRIPT_LINES
+
+#if defined(PDUMP_DEBUG_SCRIPT_LINES)
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) _PDumpOSDebugDriverWrite(a,b,c,d)
+static IMG_UINT32 _PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+									IMG_UINT8 *pui8Data,
+									IMG_UINT32 ui32BCount,
+									IMG_UINT32 ui32Flags)
+{
+	IMG_CHAR tmp1[80];
+	IMG_CHAR* streamName = "unkn";
+
+	if (g_PDumpScript.sCh.hDeinit == psStream)
+		streamName = "dein";
+	else if (g_PDumpScript.sCh.hInit == psStream)
+		streamName = "init";
+	else if (g_PDumpScript.sCh.hMain == psStream)
+		streamName = "main";
+
+	(void) PDumpOSSprintf(tmp1, 80, "-- %s, %x\n", streamName, ui32Flags);
+	(void) PDumpOSDebugDriverWrite(psStream, tmp1, OSStringLength(tmp1));
+
+	return PDumpOSDebugDriverWrite(psStream, pui8Data, ui32BCount);
+}
+#else
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) PDumpOSDebugDriverWrite(a,b,c)
+#endif
+
+
+/**************************************************************************/ /*!
+ @Function		PDumpWriteToBuffer
+ @Description	Write the supplied data to the PDump stream buffer and attempt
+                to handle any buffer full conditions to ensure all the data
+                requested to be written, is.
+
+ @Input			psStream	The address of the PDump stream buffer to write to
+ @Input			pui8Data    Pointer to the data to be written
+ @Input			ui32BCount	Number of bytes to write
+ @Input			ui32Flags	PDump statement flags.
+
+ @Return 		IMG_UINT32  Actual number of bytes written, may be less than
+ 	 	 	 	 	 	 	ui32BCount when buffer full condition could not
+ 	 	 	 	 	 	 	be avoided.
+*/ /***************************************************************************/
+static IMG_UINT32 PDumpWriteToBuffer(IMG_HANDLE psStream, IMG_UINT8 *pui8Data,
+		IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
+{
+	IMG_UINT32	ui32BytesWritten = 0;
+	IMG_UINT32	ui32Off = 0;
+
+	while (ui32BCount > 0)
+	{
+		ui32BytesWritten = PDUMPOSDEBUGDRIVERWRITE(psStream, &pui8Data[ui32Off], ui32BCount, ui32Flags);
+
+		if (ui32BytesWritten == 0)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: Zero bytes written - release execution"));
+			PDumpOSReleaseExecution();
+		}
+
+		if (ui32BytesWritten != 0xFFFFFFFFU)
+		{
+			if (ui32BCount != ui32BytesWritten)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: partial write of %d bytes of %d bytes", ui32BytesWritten, ui32BCount));
+			}
+			ui32Off += ui32BytesWritten;
+			ui32BCount -= ui32BytesWritten;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToBuffer: Unrecoverable error received from the debug driver"));
+			if( PDumpOSGetCtrlState(psStream, DBG_GET_STATE_FLAG_IS_READONLY) )
+			{
+				/* Fatal -suspend PDump to prevent flooding kernel log buffer */
+				PVR_LOG(("PDump suspended, debug driver out of memory"));
+				/*
+					Acquire the control lock before updating "suspended" state. This may not be required
+					because "this" is the context which checks the "suspended" state in PDumpWriteAllowed
+					before calling this function. So, this update is mainly for other contexts.
+					Also, all the other contexts which will/wish-to read the "suspended" state ought to be
+					waiting on the bridge lock first and then the PDUMP_OSLOCK (to pdump into script or 
+					parameter buffer). However, this acquire may be useful incase the PDump call is being
+					made from a direct bridge
+				*/
+				PDumpCtrlLockAcquire();
+				PDumpCtrlSuspend();
+				PDumpCtrlLockRelease();
+			}
+			return 0;
+		}
+	}
+
+	/* reset buffer counters */
+	ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0;
+
+	return ui32BCount;
+}
+
+
+/**************************************************************************/ /*!
+ @Function		PDumpWriteToChannel
+ @Description	Write the supplied data to the PDump channel specified obeying
+ 	            flags to write to the necessary channel buffers.
+
+ @Input			psChannel	The address of the script or parameter channel object
+ @Input/Output	psWOff		The address of the channel write offsets object to
+                            update on successful writing
+ @Input			pui8Data    Pointer to the data to be written
+ @Input			ui32Size	Number of bytes to write
+ @Input			ui32Flags	PDump statement flags, they may be clear (no flags)
+                            which implies framed data, continuous flagged,
+                            persistent flagged, or continuous AND persistent
+                            flagged and they determine how the data is output.
+                            On the first test app run after driver load, the
+                            Display Controller dumps a resource that is both
+                            continuous and persistent and this needs writing to
+                            both the init (persistent) and main (continuous)
+                            channel buffers to ensure the data is dumped in
+                            subsequent test runs without reloading the driver.
+    						In subsequent runs the PDump client 'freezes' the
+    						init buffer so that only one dump of persistent data
+    						for the "extended init phase" is captured to the
+    						init buffer.
+
+ @Return 		IMG_BOOL    True when the data has been consumed, false otherwise
+*/ /***************************************************************************/
+static IMG_BOOL PDumpWriteToChannel(PDUMP_CHANNEL* psChannel, PDUMP_CHANNEL_WOFFSETS* psWOff,
+		IMG_UINT8* pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
+{
+	IMG_UINT32   ui32BytesWritten = 0;
+
+	PDUMP_HERE(210);
+
+	/* Dump data to deinit buffer when flagged as deinit */
+	if (ui32Flags & PDUMP_FLAGS_DEINIT)
+	{
+		PDUMP_HERE(211);
+		ui32BytesWritten = PDumpWriteToBuffer(psChannel->hDeinit, pui8Data, ui32Size, ui32Flags);
+		if (ui32BytesWritten != ui32Size)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: DEINIT Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+			PDUMP_HERE(212);
+			return IMG_FALSE;
+		}
+
+		if (psWOff)
+		{
+			psWOff->ui32Deinit += ui32Size;
+		}
+
+	}
+	else
+	{
+		IMG_BOOL bDumpedToInitAlready = IMG_FALSE;
+		IMG_HANDLE*  phStream = IMG_NULL;
+		IMG_UINT32*  pui32Offset = IMG_NULL;
+
+		/* Always append persistent data to init phase so it's available on
+		 * subsequent app runs, but also to the main stream if client connected */
+		if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+		{
+			PDUMP_HERE(213);
+			ui32BytesWritten = PDumpWriteToBuffer(	psChannel->hInit, pui8Data, ui32Size, ui32Flags);
+			if (ui32BytesWritten != ui32Size)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: PERSIST Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+				PDUMP_HERE(214);
+				return IMG_FALSE;
+			}
+
+			bDumpedToInitAlready = IMG_TRUE;
+			if (psWOff)
+			{
+				psWOff->ui32Init += ui32Size;
+			}
+
+			/* Don't write continuous data if client not connected */
+			PDumpCtrlLockAcquire();
+			if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) && PDumpCtrlCaptureRangeUnset())
+			{
+				PDumpCtrlLockRelease();
+				return IMG_TRUE;
+			}
+			PDumpCtrlLockRelease();
+		}
+
+		/* Prepare to write the data to the main stream for
+		 * persistent, continuous or framed data. Override and use init
+		 * stream if driver still in init phase and we have not written 
+		 * to it yet.*/
+		PDumpCtrlLockAcquire();
+		if (!PDumpCtrlInitPhaseComplete() && !bDumpedToInitAlready)
+		{
+			PDUMP_HERE(215);
+			phStream = &psChannel->hInit;
+			if (psWOff)
+			{
+				pui32Offset = &psWOff->ui32Init;
+			}
+		}
+		else
+		{
+			PDUMP_HERE(216);
+			phStream = &psChannel->hMain;
+			if (psWOff)
+			{
+				pui32Offset = &psWOff->ui32Main;
+			}
+		}
+		PDumpCtrlLockRelease();
+
+		/* Write the data to the stream */
+		ui32BytesWritten = PDumpWriteToBuffer(*phStream, pui8Data, ui32Size, ui32Flags);
+		if (ui32BytesWritten != ui32Size)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: MAIN Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+			PDUMP_HERE(217);
+			return IMG_FALSE;
+		}
+
+		if (pui32Offset)
+		{
+			*pui32Offset += ui32BytesWritten;
+		}
+	}
+
+	return IMG_TRUE;
+}
+
+
+PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags,
+		IMG_UINT32* pui32FileOffset, IMG_CHAR* aszFilenameStr)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_BOOL bPDumpCtrlInitPhaseComplete = IMG_FALSE;
+
+	PVR_ASSERT(pui8Data && (ui32Size!=0));
+	PVR_ASSERT(pui32FileOffset && aszFilenameStr);
+
+	PDUMP_HERE(1);
+
+	if (!PDumpWriteAllowed(ui32Flags))
+	{
+		/* Abort write for the above reasons but indicated it was OK to
+		 * caller to avoid disrupting the driver */
+		return PVRSRV_OK;
+	}
+
+	PDUMP_HERE(2);
+
+	PDumpCtrlLockAcquire();
+	bPDumpCtrlInitPhaseComplete = PDumpCtrlInitPhaseComplete();
+	PDumpCtrlLockRelease();
+
+	if (!bPDumpCtrlInitPhaseComplete || (ui32Flags & PDUMP_FLAGS_PERSISTENT))
+	{
+		PDUMP_HERE(3);
+
+		/* Init phase stream not expected to get above the file size max */
+		PVR_ASSERT(g_PDumpParameters.sWOff.ui32Init < g_PDumpParameters.ui32MaxFileSize);
+
+		/* Return the file write offset at which the parameter data was dumped */
+		*pui32FileOffset = g_PDumpParameters.sWOff.ui32Init;
+	}
+	else
+	{
+		PDUMP_HERE(4);
+
+		/* Do we need to signal the PDump client that a split is required? */
+		if (g_PDumpParameters.sWOff.ui32Main + ui32Size > g_PDumpParameters.ui32MaxFileSize)
+		{
+			PDUMP_HERE(5);
+			PDumpOSSetSplitMarker(g_PDumpParameters.sCh.hMain, g_PDumpParameters.sWOff.ui32Main);
+			g_PDumpParameters.ui32FileIdx++;
+			g_PDumpParameters.sWOff.ui32Main = 0;
+		}
+
+		/* Return the file write offset at which the parameter data was dumped */
+		*pui32FileOffset = g_PDumpParameters.sWOff.ui32Main;
+	}
+
+	/* Create the parameter file name, based on index, to be used in the script */
+	if (g_PDumpParameters.ui32FileIdx == 0)
+	{
+		eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PRM_FILE_NAME_MAX, PDUMP_PARAM_0_FILE_NAME);
+	}
+	else
+	{
+		PDUMP_HERE(6);
+		eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PRM_FILE_NAME_MAX, PDUMP_PARAM_N_FILE_NAME, g_PDumpParameters.ui32FileIdx);
+	}
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSSprintf", errExit);
+
+	/* Write the parameter data to the parameter channel */
+	eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+	if (!PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff, pui8Data, ui32Size, ui32Flags))
+	{
+		PDUMP_HERE(7);
+		PVR_LOGG_IF_ERROR(eError, "PDumpWrite", errExit);
+	}
+
+	return PVRSRV_OK;
+
+errExit:
+	return eError;
+}
+
+
+IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags)
+{
+	PVR_ASSERT(hString);
+
+	PDUMP_HERE(201);
+
+	if (!PDumpWriteAllowed(ui32Flags))
+	{
+		/* Abort write for the above reasons but indicated it was OK to
+		 * caller to avoid disrupting the driver */
+		return IMG_TRUE;
+	}
+
+	return PDumpWriteToChannel(&g_PDumpScript.sCh, IMG_NULL, (IMG_UINT8*) hString, (IMG_UINT32) OSStringLength((IMG_CHAR*) hString), ui32Flags);
+}
+
+
+/*****************************************************************************/
+
+
+
+
+
+
+struct _PDUMP_CONNECTION_DATA_ {
+	IMG_UINT32				ui32RefCount;
+	POS_LOCK				hLock;
+	DLLIST_NODE				sListHead;
+	IMG_BOOL				bLastInto;
+	IMG_UINT32				ui32LastSetFrameNumber;
+	IMG_BOOL				bWasInCaptureRange;
+	IMG_BOOL				bIsInCaptureRange;
+	IMG_BOOL				bLastTransitionFailed;
+	SYNC_CONNECTION_DATA	*psSyncConnectionData;
+};
+
+static PDUMP_CONNECTION_DATA * _PDumpConnectionAcquire(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psPDumpConnectionData->hLock);
+	ui32RefCount = ++psPDumpConnectionData->ui32RefCount;
+	OSLockRelease(psPDumpConnectionData->hLock);
+
+	PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d",
+						 __FUNCTION__, psPDumpConnectionData, ui32RefCount);
+
+	return psPDumpConnectionData;
+}
+
+static IMG_VOID _PDumpConnectionRelease(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psPDumpConnectionData->hLock);
+	ui32RefCount = --psPDumpConnectionData->ui32RefCount;
+	OSLockRelease(psPDumpConnectionData->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		OSLockDestroy(psPDumpConnectionData->hLock);
+		PVR_ASSERT(dllist_is_empty(&psPDumpConnectionData->sListHead));
+		OSFreeMem(psPDumpConnectionData);
+	}
+
+	PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d",
+						 __FUNCTION__, psPDumpConnectionData, ui32RefCount);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsPersistent)
+#endif
+
+IMG_BOOL PDumpIsPersistent(IMG_VOID)
+{
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	IMG_UINTPTR_T puiRetrieve;
+
+	puiRetrieve = HASH_Retrieve(g_psPersistentHash, uiPID);
+	if (puiRetrieve != 0)
+	{
+		PVR_ASSERT(puiRetrieve == PERSISTANT_MAGIC);
+		PDUMP_HEREA(110);
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+
+/**************************************************************************
+ * Function Name  : GetTempBuffer
+ * Inputs         : None
+ * Outputs        : None
+ * Returns        : Temporary buffer address, or IMG_NULL
+ * Description    : Get temporary buffer address.
+**************************************************************************/
+static IMG_VOID *GetTempBuffer(IMG_VOID)
+{
+	/*
+	 * Allocate the temporary buffer, it it hasn't been allocated already.
+	 * Return the address of the temporary buffer, or IMG_NULL if it
+	 * couldn't be allocated.
+	 * It is expected that the buffer will be allocated once, at driver
+	 * load time, and left in place until the driver unloads.
+	 */
+
+	if (gpvTempBuffer == IMG_NULL)
+	{
+		gpvTempBuffer = OSAllocMem(PDUMP_TEMP_BUFFER_SIZE);
+		if (gpvTempBuffer == IMG_NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed"));
+		}
+	}
+
+	return gpvTempBuffer;
+}
+
+static IMG_VOID FreeTempBuffer(IMG_VOID)
+{
+
+	if (gpvTempBuffer != IMG_NULL)
+	{
+		OSFreeMem(gpvTempBuffer);
+		gpvTempBuffer = IMG_NULL;
+	}
+}
+
+/**************************************************************************
+ * Function Name  : PDumpParameterChannelZeroedPageBlock
+ * Inputs         : None
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Set up the zero page block in the parameter stream
+**************************************************************************/
+static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(IMG_VOID)
+{
+	IMG_UINT8 aui8Zero[32] = { 0 };
+	IMG_SIZE_T uiBytesToWrite;
+	PVRSRV_ERROR eError;
+
+	g_PDumpParameters.uiZeroPageSize = OSGetPageSize();
+
+	/* ensure the zero page size of a multiple of the zero source on the stack */
+	PVR_ASSERT(g_PDumpParameters.uiZeroPageSize % sizeof(aui8Zero) == 0);
+
+	/* the first write gets the parameter file name and stream offset,
+	 * then subsequent writes do not need to know this as the data is
+	 * contiguous in the stream
+	 */
+	PDUMP_LOCK();
+	eError = PDumpWriteParameter(aui8Zero,
+							sizeof(aui8Zero),
+							0,
+							&g_PDumpParameters.uiZeroPageOffset,
+							g_PDumpParameters.szZeroPageFilename);
+
+	if(eError != PVRSRV_OK)
+	{
+		goto err_write;
+	}
+
+	uiBytesToWrite = g_PDumpParameters.uiZeroPageSize - sizeof(aui8Zero);
+
+	while(uiBytesToWrite)
+	{
+		IMG_BOOL bOK;
+
+		bOK = PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff,
+									aui8Zero,
+									sizeof(aui8Zero), 0);
+
+		if(!bOK)
+		{
+			eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+			goto err_write;
+		}
+
+		uiBytesToWrite -= sizeof(aui8Zero);
+	}
+
+err_write:
+	PDUMP_UNLOCK();
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to initialise parameter stream zero block"));
+	}
+
+	return eError;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpGetParameterZeroPageInfo
+ * Inputs         : None
+ * Outputs        : puiZeroPageOffset: will be set to the offset of the zero page
+ *                : puiZeroPageSize: will be set to the size of the zero page
+ *                : ppszZeroPageFilename: will be set to a pointer to the PRM file name
+ *                :                       containing the zero page
+ * Returns        : None
+ * Description    : Get information about the zero page
+**************************************************************************/
+IMG_VOID PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+					IMG_SIZE_T *puiZeroPageSize,
+					const IMG_CHAR **ppszZeroPageFilename)
+{
+		*puiZeroPageOffset = g_PDumpParameters.uiZeroPageOffset;
+		*puiZeroPageSize = g_PDumpParameters.uiZeroPageSize;
+		*ppszZeroPageFilename = g_PDumpParameters.szZeroPageFilename;
+}
+
+PVRSRV_ERROR PDumpInitCommon(IMG_VOID)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32InitCapMode = 0;
+	IMG_CHAR* pszEnvComment = IMG_NULL;
+
+	PDUMP_HEREA(2010);
+
+	/* Allocate temporary buffer for copying from user space */
+	(IMG_VOID) GetTempBuffer();
+
+	eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	g_psPersistentHash = HASH_Create(PDUMP_PERSISTENT_HASH_SIZE);
+	PVR_LOGG_IF_FALSE((g_psPersistentHash != IMG_NULL), "Failed to create persistent process hash", errExit);
+
+	/* create the global PDump lock */
+	eError = PDumpCreateLockKM();
+	PVR_LOGG_IF_ERROR(eError, "PDumpCreateLockKM", errExit);
+
+	/* Call environment specific PDump initialisation */
+	eError = PDumpOSInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh, &ui32InitCapMode, &pszEnvComment);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSInit", errExitLock);
+
+	/* Initialise PDump control module in common layer */
+	eError = PDumpCtrlInit(ui32InitCapMode);
+	PVR_LOGG_IF_ERROR(eError, "PDumpCtrlInit", errExitOSDeInit);
+
+	/* Test PDump initialised and ready by logging driver details */
+	eError = PDumpComment("Driver Product Name: %s", PVRSRVGetSystemName());
+	PVR_LOGG_IF_ERROR(eError, "PDumpComment", errExitCtrl);
+	eError = PDumpComment("Driver Product Version: %s - %s (%s)", PVRVERSION_STRING, PVR_BUILD_DIR, PVR_BUILD_TYPE);
+	PVR_LOGG_IF_ERROR(eError, "PDumpComment", errExitCtrl);
+	if (pszEnvComment != IMG_NULL)
+	{
+		eError = PDumpComment("%s", pszEnvComment);
+		PVR_LOGG_IF_ERROR(eError, "PDumpComment", errExitCtrl);
+	}
+	eError = PDumpComment("Start of Init Phase");
+	PVR_LOGG_IF_ERROR(eError, "PDumpComment", errExitCtrl);
+
+	eError = PDumpParameterChannelZeroedPageBlock();
+	PVR_LOGG_IF_ERROR(eError, "PDumpParameterChannelZeroedPageBlock", errExitCtrl);
+
+	g_PDumpInitialised = IMG_TRUE;
+
+	PDUMP_HEREA(2011);
+
+	return PVRSRV_OK;
+
+errExitCtrl:
+	PDumpCtrlDeInit();
+errExitOSDeInit:
+	PDUMP_HEREA(2018);
+	PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+errExitLock:
+	PDUMP_HEREA(2019);
+	PDumpDestroyLockKM();
+errExit:
+	return eError;
+}
+
+IMG_VOID PDumpDeInitCommon(IMG_VOID)
+{
+	PDUMP_HEREA(2020);
+
+	g_PDumpInitialised = IMG_FALSE;
+
+	/* Free temporary buffer */
+	FreeTempBuffer();
+
+	/* DeInit the PDUMP_CTRL_STATE data */
+	PDumpCtrlDeInit();
+
+	/* Call environment specific PDump Deinitialisation */
+	PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh);
+
+	/* take down the global PDump lock */
+	PDumpDestroyLockKM();
+}
+
+IMG_BOOL PDumpReady(IMG_VOID)
+{
+	return g_PDumpInitialised;
+}
+
+
+PVRSRV_ERROR PDumpAddPersistantProcess(IMG_VOID)
+{
+	IMG_PID uiPID = OSGetCurrentProcessID();
+	IMG_UINTPTR_T puiRetrieve;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PDUMP_HEREA(121);
+
+	puiRetrieve = HASH_Retrieve(g_psPersistentHash, uiPID);
+	if (puiRetrieve == 0)
+	{
+		if (!HASH_Insert(g_psPersistentHash, uiPID, PERSISTANT_MAGIC))
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+	else
+	{
+		PVR_ASSERT(puiRetrieve == PERSISTANT_MAGIC);
+	}
+	PDUMP_HEREA(122);
+
+	return eError;
+}
+
+PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID)
+{
+	PDUMPCOMMENT("Start Init Phase");
+	PDumpCtrlLockAcquire();
+	PDumpCtrlSetInitPhaseComplete(IMG_FALSE);
+	PDumpCtrlLockRelease();
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_MODULE_ID eModuleID)
+{
+	/* Check with the OS we a running on */
+	if (PDumpOSAllowInitPhaseToComplete(eModuleID))
+	{
+		PDUMPCOMMENT("Stop Init Phase");
+		PDumpCtrlLockAcquire();
+		PDumpCtrlSetInitPhaseComplete(IMG_TRUE);
+		PDumpCtrlLockRelease();
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
+{
+	IMG_BOOL bIsLastCaptureFrame = IMG_FALSE;
+
+	PDumpCtrlLockAcquire();
+	bIsLastCaptureFrame = PDumpCtrlIsLastCaptureFrame();
+	PDumpCtrlLockRelease();
+
+	return bIsLastCaptureFrame;
+}
+
+
+
+typedef struct _PDUMP_Transition_DATA_ {
+	PFN_PDUMP_TRANSITION	pfnCallback;
+	IMG_PVOID				hPrivData;
+	PDUMP_CONNECTION_DATA	*psPDumpConnectionData;
+	DLLIST_NODE				sNode;
+} PDUMP_Transition_DATA;
+
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+											  PFN_PDUMP_TRANSITION pfnCallback,
+											  IMG_PVOID hPrivData,
+											  IMG_PVOID *ppvHandle)
+{
+	PDUMP_Transition_DATA *psData;
+	PVRSRV_ERROR eError;
+
+	psData = OSAllocMem(sizeof(*psData));
+	if (psData == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	/* Setup the callback and add it to the list for this process */
+	psData->pfnCallback = pfnCallback;
+	psData->hPrivData = hPrivData;
+	dllist_add_to_head(&psPDumpConnectionData->sListHead, &psData->sNode);
+
+	/* Take a reference on the connection so it doesn't get freed too early */
+	psData->psPDumpConnectionData =_PDumpConnectionAcquire(psPDumpConnectionData);
+	*ppvHandle = psData;
+
+	return PVRSRV_OK;
+
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_VOID PDumpUnregisterTransitionCallback(IMG_PVOID pvHandle)
+{
+	PDUMP_Transition_DATA *psData = pvHandle;
+
+	dllist_remove_node(&psData->sNode);
+	_PDumpConnectionRelease(psData->psPDumpConnectionData);
+	OSFreeMem(psData);
+}
+
+typedef struct _PTCB_DATA_ {
+	IMG_BOOL bInto;
+	IMG_BOOL bContinuous;
+	PVRSRV_ERROR eError;
+} PTCB_DATA;
+
+static IMG_BOOL _PDumpTransition(DLLIST_NODE *psNode, IMG_PVOID hData)
+{
+	PDUMP_Transition_DATA *psData = IMG_CONTAINER_OF(psNode, PDUMP_Transition_DATA, sNode);
+	PTCB_DATA *psPTCBData = (PTCB_DATA *) hData;
+
+	psPTCBData->eError = psData->pfnCallback(psData->hPrivData, psPTCBData->bInto, psPTCBData->bContinuous);
+	if (psPTCBData->eError != PVRSRV_OK)
+	{
+		/* Got an error, break out of the loop */
+		return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_BOOL bContinuous)
+{
+	PTCB_DATA sPTCBData;
+
+	/* Only call the callbacks if we've really done a Transition */
+	if (bInto != psPDumpConnectionData->bLastInto)
+	{
+		/* We're Transitioning either into or out of capture range */
+		sPTCBData.bInto = bInto;
+		sPTCBData.bContinuous = bContinuous;
+		sPTCBData.eError = PVRSRV_OK;
+		dllist_foreach_node(&psPDumpConnectionData->sListHead, _PDumpTransition, &sPTCBData);
+		if (sPTCBData.eError != PVRSRV_OK)
+		{
+			/* We failed so bail out leaving the state as it is ready for the retry */
+			return sPTCBData.eError;
+		}
+
+		if (bInto)
+		{
+			SyncConnectionPDumpSyncBlocks(psPDumpConnectionData->psSyncConnectionData);
+		}
+		psPDumpConnectionData->bLastInto = bInto;
+	}
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+	PDumpCtrlLockAcquire();
+	PDumpCtrlIsCaptureFrame(bIsCapturing);
+	PDumpCtrlLockRelease();
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _PDumpSetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Frame)
+{
+	PDUMP_CONNECTION_DATA *psPDumpConnectionData = psConnection->psPDumpConnectionData;
+	IMG_BOOL bWasInCaptureRange = IMG_FALSE;
+	IMG_BOOL bIsInCaptureRange = IMG_FALSE;
+	PVRSRV_ERROR eError;
+
+	/*
+		Note:
+		As we can't test to see if the new frame will be in capture range
+		before we set the frame number and we don't want to roll back
+		the frame number if we fail then we have to save the "transient"
+		data which decides if we're entering or exiting capture range
+		along with a failure boolean so we know what to do on a retry
+	*/
+	if (psPDumpConnectionData->ui32LastSetFrameNumber != ui32Frame)
+	{
+		/*
+			The boolean values below decide if the PDump transition
+			should trigger because of the current context setting the
+			frame number, hence the functions below should execute
+			atomically and do not give a chance to some other context
+			to transition
+		*/
+		PDumpCtrlLockAcquire(); 
+		
+		PDumpCtrlIsCaptureFrame(&bWasInCaptureRange);
+		PDumpCtrlSetCurrentFrame(ui32Frame);
+		PDumpCtrlIsCaptureFrame(&bIsInCaptureRange);
+
+		PDumpCtrlLockRelease();
+
+		psPDumpConnectionData->ui32LastSetFrameNumber = ui32Frame;
+
+		/* Save the Transition data incase we fail the Transition */
+		psPDumpConnectionData->bWasInCaptureRange = bWasInCaptureRange;
+		psPDumpConnectionData->bIsInCaptureRange = bIsInCaptureRange;
+	}
+	else if (psPDumpConnectionData->bLastTransitionFailed)
+	{
+		/* Load the Transition data so we can try again */
+		bWasInCaptureRange = psPDumpConnectionData->bWasInCaptureRange;
+		bIsInCaptureRange = psPDumpConnectionData->bIsInCaptureRange;
+	}
+	else
+	{
+		/* New frame is the same as the last frame set and the last
+		 * transition succeeded, no need to perform another transition.
+		 */
+		return PVRSRV_OK;
+	}
+
+	if (!bWasInCaptureRange && bIsInCaptureRange)
+	{
+		eError = PDumpTransition(psPDumpConnectionData, IMG_TRUE, IMG_FALSE);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_Transition;
+		}
+	}
+	else if (bWasInCaptureRange && !bIsInCaptureRange)
+	{
+		eError = PDumpTransition(psPDumpConnectionData, IMG_FALSE, IMG_FALSE);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_Transition;
+		}
+	}
+	else
+	{
+		/* Here both previous and current frames are in or out of range */
+		/* Should never reach here due to the above goto success */
+	}
+
+	psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+	return PVRSRV_OK;
+
+fail_Transition:
+	psPDumpConnectionData->bLastTransitionFailed = IMG_TRUE;
+	return eError;
+}
+
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Frame)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;	
+	
+
+#if defined(PDUMP_TRACE_STATE)
+	PVR_DPF((PVR_DBG_WARNING, "PDumpSetFrameKM: ui32Frame( %d )", ui32Frame));
+#endif
+
+	/* Ignore errors as it is not fatal if the comments do not appear */
+	(void) PDumpComment("Set pdump frame %u (pre)", ui32Frame);
+
+	eError = _PDumpSetFrameKM(psConnection, ui32Frame);
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_LOG_ERROR(eError, "_PDumpSetFrameKM");
+	}
+
+	(void) PDumpComment("Set pdump frame %u (post)", ui32Frame);
+
+	return eError;
+}
+
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32* pui32Frame)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	/*
+		It may be safe to avoid acquiring this lock here as all the other calls
+		which read/modify current frame will wait on the PDump Control bridge
+		lock first. Also, in no way as of now, does the PDumping app modify the
+		current frame through a call which acquires the global bridge lock.
+		Still, as a legacy we acquire and then read.
+	*/	
+	PDumpCtrlLockAcquire();
+
+	*pui32Frame = PDumpCtrlGetCurrentFrame();
+
+	PDumpCtrlLockRelease();
+	return eError;
+}
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+                                           IMG_UINT32 ui32Start,
+                                           IMG_UINT32 ui32End,
+                                           IMG_UINT32 ui32Interval,
+                                           IMG_UINT32 ui32MaxParamFileSize)
+{
+	/*
+		Acquire PDUMP_CTRL_STATE struct lock before modifications as a 
+		PDumping app may be reading the state data for some checks
+	*/
+	PDumpCtrlLockAcquire();
+	PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval);
+	PDumpCtrlLockRelease();
+
+	if (ui32MaxParamFileSize == 0)
+	{
+		g_PDumpParameters.ui32MaxFileSize = PDUMP_PRM_FILE_SIZE_MAX;
+	}
+	else
+	{
+		g_PDumpParameters.ui32MaxFileSize = ui32MaxParamFileSize;
+	}
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpReg32
+ * Inputs         : pszPDumpDevName, Register offset, and value to write
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg32(IMG_CHAR	*pszPDumpRegName,
+						IMG_UINT32	ui32Reg,
+						IMG_UINT32	ui32Data,
+						IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpReg32"));
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", pszPDumpRegName, ui32Reg, ui32Data);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpReg64
+ * Inputs         : pszPDumpDevName, Register offset, and value to write
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg64(IMG_CHAR	*pszPDumpRegName,
+						IMG_UINT32	ui32Reg,
+						IMG_UINT64	ui64Data,
+						IMG_UINT32	ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpRegKM"));
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X 0x%010llX", pszPDumpRegName, ui32Reg, ui64Data);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpLDW
+ * Inputs         : pcBuffer -- buffer to send to register bank
+ *                  ui32NumLoadBytes -- number of bytes in pcBuffer
+ *                  pszDevSpaceName -- devspace for register bank
+ *                  ui32Offset -- value of offset control register
+ *                  ui32PDumpFlags -- flags to pass to PDumpOSWriteScript
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Dumps the contents of pcBuffer to a .prm file and
+ *                  writes an LDW directive to the pdump output.
+ *                  NB: ui32NumLoadBytes must be divisible by 4
+**************************************************************************/
+PVRSRV_ERROR PDumpLDW(IMG_CHAR      *pcBuffer,
+                      IMG_CHAR      *pszDevSpaceName,
+                      IMG_UINT32    ui32OffsetBytes,
+                      IMG_UINT32    ui32NumLoadBytes,
+                      PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+	IMG_CHAR aszParamStreamFilename[PMR_MAX_PARAMSTREAM_FILENAME_LENGTH_DEFAULT];
+	IMG_UINT32 ui32ParamStreamFileOffset;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	eError = PDumpWriteBuffer((IMG_UINT8 *)pcBuffer,
+	                          ui32NumLoadBytes,
+	                          uiPDumpFlags,
+	                          &aszParamStreamFilename[0],
+	                          sizeof(aszParamStreamFilename),
+	                          &ui32ParamStreamFileOffset);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	uiPDumpFlags |= (PDumpIsPersistent()) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "LDW :%s:0x%x 0x%x 0x%x %s\n",
+	                          pszDevSpaceName,
+	                          ui32OffsetBytes,
+	                          ui32NumLoadBytes / (IMG_UINT32)sizeof(IMG_UINT32),
+	                          ui32ParamStreamFileOffset,
+	                          aszParamStreamFilename);
+
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpSAW
+ * Inputs         : pszDevSpaceName -- device space from which to output
+ *                  ui32Offset -- offset value from register base
+ *                  ui32NumSaveBytes -- number of bytes to output
+ *                  pszOutfileName -- name of file to output to
+ *                  ui32OutfileOffsetByte -- offset into output file to write
+ *                  uiPDumpFlags -- flags to pass to PDumpOSWriteScript
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Dumps the contents of a register bank into a file
+ *                  NB: ui32NumSaveBytes must be divisible by 4
+**************************************************************************/
+PVRSRV_ERROR PDumpSAW(IMG_CHAR      *pszDevSpaceName,
+                      IMG_UINT32    ui32HPOffsetBytes,
+                      IMG_UINT32    ui32NumSaveBytes,
+                      IMG_CHAR      *pszOutfileName,
+                      IMG_UINT32    ui32OutfileOffsetByte,
+                      PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	PVR_DPF((PVR_DBG_ERROR, "PDumpSAW\n"));
+
+	uiPDumpFlags |= (PDumpIsPersistent()) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+	                          ui32MaxLen,
+	                          "SAW :%s:0x%x 0x%x 0x%x %s\n",
+	                          pszDevSpaceName,
+	                          ui32HPOffsetBytes,
+	                          ui32NumSaveBytes / (IMG_UINT32)sizeof(IMG_UINT32),
+	                          ui32OutfileOffsetByte,
+	                          pszOutfileName);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpOSBufprintf failed: eError=%u\n", eError));
+		return eError;
+	}
+
+	PDUMP_LOCK();
+	if(! PDumpWriteScript(hScript, uiPDumpFlags))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpWriteScript failed!\n"));
+	}
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+	
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpRegPolKM
+ * Inputs         : Description of what this register read is trying to do
+ *					pszPDumpDevName
+ *					Register offset
+ *					expected value
+ *					mask for that value
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents a register read
+ *					with the expected value
+**************************************************************************/
+PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR				*pszPDumpRegName,
+						   IMG_UINT32			ui32RegAddr, 
+						   IMG_UINT32			ui32RegValue, 
+						   IMG_UINT32			ui32Mask,
+						   IMG_UINT32			ui32Flags,
+						   PDUMP_POLL_OPERATOR	eOperator)
+{
+	/* Timings correct for linux and XP */
+	/* Timings should be passed in */
+	#define POLL_DELAY			1000U
+	#define POLL_COUNT_LONG		(2000000000U / POLL_DELAY)
+	#define POLL_COUNT_SHORT	(1000000U / POLL_DELAY)
+
+	PVRSRV_ERROR eErr;
+	IMG_UINT32	ui32PollCount;
+
+	PDUMP_GET_SCRIPT_STRING();
+	PDUMP_DBG(("PDumpRegPolKM"));
+	if ( PDumpIsPersistent() )
+	{
+		/* Don't pdump-poll if the process is persistent */
+		return PVRSRV_OK;
+	}
+
+	ui32PollCount = POLL_COUNT_LONG;
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d",
+							pszPDumpRegName, ui32RegAddr, ui32RegValue,
+							ui32Mask, eOperator, ui32PollCount, POLL_DELAY);
+	if(eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpCommentKM
+ * Inputs         : pszComment, ui32Flags
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Dumps a comment
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+#if defined(PDUMP_DEBUG_OUTFILES)
+	IMG_CHAR pszTemp[256];
+#endif
+	PDUMP_GET_SCRIPT_STRING();
+	PDUMP_DBG(("PDumpCommentKM"));
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	/* include comments in the "extended" init phase.
+	 * default is to ignore them.
+	 */
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+#endif
+
+	if((pszComment == IMG_NULL) || (PDumpOSBuflen(pszComment, ui32MaxLen) == 0))
+	{
+		/* PDumpOSVerifyLineEnding silently fails if pszComment is too short to
+		   actually hold the line endings that it's trying to enforce, so
+		   short circuit it and force safety */
+		pszComment = "\n";
+	}
+	else
+	{
+		/* Put line ending sequence at the end if it isn't already there */
+		PDumpOSVerifyLineEnding(pszComment, ui32MaxLen);
+	}
+
+	PDUMP_LOCK();
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	/* Prefix comment with PID and line number */
+	eErr = PDumpOSSprintf(pszTemp, 256, "%u %u:%lu %s: %s",
+		g_ui32EveryLineCounter,
+		OSGetCurrentProcessID(),
+		(unsigned long)OSGetCurrentThreadID(),
+		OSGetCurrentProcessName(),
+		pszComment);
+
+	/* Append the comment to the script stream */
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+		pszTemp);
+#else
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+		pszComment);
+#endif
+	if( (eErr != PVRSRV_OK) &&
+		(eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
+	{
+		PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrUnlock);
+	}
+
+	if (!PDumpWriteScript(hScript, ui32Flags))
+	{
+		if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
+		{
+			eErr = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+			PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+		}
+		else
+		{
+			eErr = PVRSRV_ERROR_CMD_NOT_PROCESSED;
+			PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+		}
+	}
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+	return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpCommentWithFlags
+ * Inputs         : psPDev - PDev for PDump device
+ *				  : pszFormat - format string for comment
+ *				  : ... - args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_va_list ap;
+	PDUMP_GET_MSG_STRING();
+
+	/* Construct the string */
+	PDUMP_va_start(ap, pszFormat);
+	eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap);
+	PDUMP_va_end(ap);
+
+	if(eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+	return PDumpCommentKM(pszMsg, ui32Flags);
+}
+
+/**************************************************************************
+ * Function Name  : PDumpComment
+ * Inputs         : psPDev - PDev for PDump device
+ *				  : pszFormat - format string for comment
+ *				  : ... - args for format string
+ * Outputs        : None
+ * Returns        : None
+ * Description    : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_va_list ap;
+	PDUMP_GET_MSG_STRING();
+
+	/* Construct the string */
+	PDUMP_va_start(ap, pszFormat);
+	eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap);
+	PDUMP_va_end(ap);
+	PVR_LOGR_IF_ERROR(eErr, "PDumpOSVSprintf");
+
+	return PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS);
+}
+
+/*************************************************************************/ /*!
+ * Function Name  : PDumpPanic
+ * Inputs         : ui32PanicNo - Unique number for panic condition
+ *				  : pszPanicMsg - Panic reason message limited to ~90 chars
+ *				  : pszPPFunc   - Function name string where panic occurred
+ *				  : ui32PPline  - Source line number where panic occurred
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : PDumps a panic assertion. Used when the host driver
+ *                : detects a condition that will lead to an invalid PDump
+ *                : script that cannot be played back off-line.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpPanic(IMG_UINT32      ui32PanicNo,
+						IMG_CHAR*       pszPanicMsg,
+						const IMG_CHAR* pszPPFunc,
+						IMG_UINT32      ui32PPline)
+{
+	PVRSRV_ERROR   eError = PVRSRV_OK;
+	PDUMP_FLAGS_T  uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+	IMG_CHAR       pszConsoleMsg[] =
+"COM ***************************************************************************\n"
+"COM Script invalid and not compatible with off-line playback. Check test \n"
+"COM parameters and driver configuration, stop imminent.\n"
+"COM ***************************************************************************\n";
+	PDUMP_GET_SCRIPT_STRING();
+
+	/* Log the panic condition to the live kern.log in both REL and DEB mode 
+	 * to aid user PDump trouble shooting. */
+	PVR_LOG(("PDUMP PANIC %08x: %s", ui32PanicNo, pszPanicMsg));
+	PVR_DPF((PVR_DBG_MESSAGE, "PDUMP PANIC start %s:%d", pszPPFunc, ui32PPline));
+
+	/* Check the supplied panic reason string is within length limits */
+	PVR_ASSERT(OSStringLength(pszPanicMsg)+sizeof("PANIC   ") < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+	/* Add persistent flag if required and obtain lock to keep the multi-line
+	 * panic statement together in a single atomic write */
+	uiPDumpFlags |= (PDumpIsPersistent()) ? PDUMP_FLAGS_PERSISTENT : 0;
+	PDUMP_LOCK();
+
+	/* Write -- Panic start (Function:line) */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic start (%s:%d)", pszPPFunc, ui32PPline);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(IMG_VOID)PDumpWriteScript(hScript, uiPDumpFlags);
+
+	/* Write COM <message> x4 */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, pszConsoleMsg);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(IMG_VOID)PDumpWriteScript(hScript, uiPDumpFlags);
+
+	/* Write PANIC no msg command */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "PANIC %08x %s", ui32PanicNo, pszPanicMsg);
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(IMG_VOID)PDumpWriteScript(hScript, uiPDumpFlags);
+
+	/* Write -- Panic end */
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic end");
+	PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+	(IMG_VOID)PDumpWriteScript(hScript, uiPDumpFlags);
+
+e1:
+	PDUMP_UNLOCK();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PDumpBitmapKM
+
+ @Description
+
+ Dumps a bitmap from device memory to a file
+
+ @Input    psDevId
+ @Input    pszFileName
+ @Input    ui32FileOffset
+ @Input    ui32Width
+ @Input    ui32Height
+ @Input    ui32StrideInBytes
+ @Input    sDevBaseAddr
+ @Input    ui32Size
+ @Input    ePixelFormat
+ @Input    eMemFormat
+ @Input    ui32PDumpFlags
+
+ @Return   PVRSRV_ERROR			:
+
+******************************************************************************/
+PVRSRV_ERROR PDumpBitmapKM(	PVRSRV_DEVICE_NODE *psDeviceNode,
+							IMG_CHAR *pszFileName,
+							IMG_UINT32 ui32FileOffset,
+							IMG_UINT32 ui32Width,
+							IMG_UINT32 ui32Height,
+							IMG_UINT32 ui32StrideInBytes,
+							IMG_DEV_VIRTADDR sDevBaseAddr,
+							IMG_UINT32 ui32MMUContextID,
+							IMG_UINT32 ui32Size,
+							PDUMP_PIXEL_FORMAT ePixelFormat,
+							IMG_UINT32 ui32AddrMode,
+							IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId;
+	PVRSRV_ERROR eErr=0;
+	PDUMP_GET_SCRIPT_STRING();
+
+	if ( PDumpIsPersistent() )
+	{
+		return PVRSRV_OK;
+	}
+	
+	PDumpCommentWithFlags(ui32PDumpFlags, "Dump bitmap of render.");
+	
+	switch (ePixelFormat)
+	{
+		case PVRSRV_PDUMP_PIXEL_FORMAT_YUV8:
+		{
+			PDumpCommentWithFlags(ui32PDumpFlags, "YUV data. Switching from SII to SAB. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+							 						
+			eErr = PDumpOSBufprintf(hScript,
+									ui32MaxLen,
+									"SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin\n",
+									psDevId->pszPDumpDevName,
+									ui32MMUContextID,
+									sDevBaseAddr.uiAddr,
+									ui32Size,
+									ui32FileOffset,
+									pszFileName);
+			
+			if (eErr != PVRSRV_OK)
+			{
+				return eErr;
+			}
+			
+			PDUMP_LOCK();
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();		
+			break;
+		}
+		case PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8: // YUV420 2 planes
+		{
+			const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+			const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>1; // YUV420
+			const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+			const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+			
+			PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 2-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						
+						// Plane 0 (Y)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// Context id
+						sDevBaseAddr.uiAddr,		// virtaddr
+						ui32Plane0Size,				// size
+						ui32FileOffset,				// fileoffset
+						
+						// Plane 1 (UV)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// Context id
+						sDevBaseAddr.uiAddr+ui32Plane1MemOffset,	// virtaddr
+						ui32Plane1Size,				// size
+						ui32Plane1FileOffset,		// fileoffset
+						
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				return eErr;
+			}
+			
+			PDUMP_LOCK();
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+		
+		case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12: // YUV420 3 planes
+		{
+			const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+			const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>2; // YUV420
+			const IMG_UINT32 ui32Plane2Size = ui32Plane1Size;
+			const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+			const IMG_UINT32 ui32Plane2FileOffset = ui32Plane1FileOffset + ui32Plane1Size;
+			const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+			const IMG_UINT32 ui32Plane2MemOffset = ui32Plane0Size+ui32Plane1Size;
+	
+			PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 3-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						
+						// Plane 0 (Y)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr,		// virtaddr
+						ui32Plane0Size,				// size
+						ui32FileOffset,				// fileoffset
+						
+						// Plane 1 (U)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane1MemOffset,	// virtaddr
+						ui32Plane1Size,				// size
+						ui32Plane1FileOffset,		// fileoffset
+						
+						// Plane 2 (V)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane2MemOffset,	// virtaddr
+						ui32Plane2Size,				// size
+						ui32Plane2FileOffset,		// fileoffset
+						
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				return eErr;
+			}
+			
+			PDUMP_LOCK();
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+		
+		case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32: // YV32 - 4 contiguous planes in the order VUYA, stride can be > width.
+		{
+			const IMG_UINT32 ui32PlaneSize = ui32StrideInBytes*ui32Height; // All 4 planes are the same size
+			const IMG_UINT32 ui32Plane0FileOffset = ui32FileOffset + (ui32PlaneSize<<1);		// SII plane 0 is Y, which is YV32 plane 2
+			const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32PlaneSize;				// SII plane 1 is U, which is YV32 plane 1
+			const IMG_UINT32 ui32Plane2FileOffset = ui32FileOffset;								// SII plane 2 is V, which is YV32 plane 0
+			const IMG_UINT32 ui32Plane3FileOffset = ui32Plane0FileOffset + ui32PlaneSize;		// SII plane 3 is A, which is YV32 plane 3
+			const IMG_UINT32 ui32Plane0MemOffset = ui32PlaneSize<<1;
+			const IMG_UINT32 ui32Plane1MemOffset = ui32PlaneSize;
+			const IMG_UINT32 ui32Plane2MemOffset = 0;
+			const IMG_UINT32 ui32Plane3MemOffset = ui32Plane0MemOffset + ui32PlaneSize;
+							 						
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 4 planes. Width=0x%08X Height=0x%08X Stride=0x%08X",
+							 						ui32Width, ui32Height, ui32StrideInBytes);
+			
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 plane size is 0x%08X", ui32PlaneSize);
+			
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 0 Mem Offset=0x%08X", ui32Plane0MemOffset);
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 1 Mem Offset=0x%08X", ui32Plane1MemOffset);
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 2 Mem Offset=0x%08X", ui32Plane2MemOffset);
+			PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 3 Mem Offset=0x%08X", ui32Plane3MemOffset);
+			
+			/*
+				SII <imageset> <filename>	:<memsp1>:v<id1>:<virtaddr1> <size1> <fileoffset1>		Y
+											:<memsp2>:v<id2>:<virtaddr2> <size2> <fileoffset2>		U
+											:<memsp3>:v<id3>:<virtaddr3> <size3> <fileoffset3>		V
+											:<memsp4>:v<id4>:<virtaddr4> <size4> <fileoffset4>		A
+											<pixfmt> <width> <height> <stride> <addrmode>
+			*/
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						
+						// Plane 0 (V)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane0MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane0FileOffset,		// fileoffset
+						
+						// Plane 1 (U)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane1MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane1FileOffset,		// fileoffset
+						
+						// Plane 2 (Y)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane2MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane2FileOffset,		// fileoffset
+						
+						// Plane 3 (A)
+						psDevId->pszPDumpDevName,	// memsp
+						ui32MMUContextID,			// MMU context id
+						sDevBaseAddr.uiAddr+ui32Plane3MemOffset,	// virtaddr
+						ui32PlaneSize,				// size
+						ui32Plane3FileOffset,		// fileoffset
+						
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				return eErr;
+			}
+			
+			PDUMP_LOCK();
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+				
+		default: // Single plane formats
+		{
+			eErr = PDumpOSBufprintf(hScript,
+						ui32MaxLen,
+						"SII %s %s.bin :%s:v%x:0x%010llX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+						pszFileName,
+						pszFileName,
+						psDevId->pszPDumpDevName,
+						ui32MMUContextID,
+						sDevBaseAddr.uiAddr,
+						ui32Size,
+						ui32FileOffset,
+						ePixelFormat,
+						ui32Width,
+						ui32Height,
+						ui32StrideInBytes,
+						ui32AddrMode);
+						
+			if (eErr != PVRSRV_OK)
+			{
+				return eErr;
+			}
+
+			PDUMP_LOCK();
+			PDumpWriteScript( hScript, ui32PDumpFlags);
+			PDUMP_UNLOCK();
+			break;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PDumpReadRegKM
+
+ @Description
+
+ Dumps a read from a device register to a file
+
+ @Input    psConnection 		: connection info
+ @Input    pszFileName
+ @Input    ui32FileOffset
+ @Input    ui32Address
+ @Input    ui32Size
+ @Input    ui32PDumpFlags
+
+ @Return   PVRSRV_ERROR			:
+
+******************************************************************************/
+PVRSRV_ERROR PDumpReadRegKM		(	IMG_CHAR *pszPDumpRegName,
+									IMG_CHAR *pszFileName,
+									IMG_UINT32 ui32FileOffset,
+									IMG_UINT32 ui32Address,
+									IMG_UINT32 ui32Size,
+									IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	eErr = PDumpOSBufprintf(hScript,
+			ui32MaxLen,
+			"SAB :%s:0x%08X 0x%08X %s",
+			pszPDumpRegName,
+			ui32Address,
+			ui32FileOffset,
+			pszFileName);
+	if(eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript( hScript, ui32PDumpFlags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name		PDumpRegRead32
+ @brief		Dump 32-bit register read to script
+ @param		pszPDumpDevName - pdump device name
+ @param		ui32RegOffset - register offset
+ @param		ui32Flags - pdump flags
+ @return	Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+							const IMG_UINT32 ui32RegOffset,
+							IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+							pszPDumpRegName, 
+							ui32RegOffset);
+	if(eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name		PDumpRegRead64
+ @brief		Dump 64-bit register read to script
+ @param		pszPDumpDevName - pdump device name
+ @param		ui32RegOffset - register offset
+ @param		ui32Flags - pdump flags
+ @return	Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+							const IMG_UINT32 ui32RegOffset,
+							IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW64 :%s:0x%X",
+							pszPDumpRegName, 
+							ui32RegOffset);
+	if(eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+
+/*****************************************************************************
+ FUNCTION	: PDumpWriteShiftedMaskedValue
+
+ PURPOSE	: Emits the PDump commands for writing a masked shifted address
+              into another location
+
+ PARAMETERS	: PDump symbolic name and offset of target word
+              PDump symbolic name and offset of source address
+              right shift amount
+              left shift amount
+              mask
+
+ RETURNS	: None
+*****************************************************************************/
+PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags)
+{
+	PVRSRV_ERROR         eError;
+
+    /* Suffix of WRW command in PDump (i.e. WRW or WRW64) */
+    const IMG_CHAR       *pszWrwSuffix;
+
+    /* Internal PDump register used for interim calculation */
+    const IMG_CHAR       *pszPDumpIntRegSpace;
+    IMG_UINT32           uiPDumpIntRegNum;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+    if ((uiWordSize != 4) && (uiWordSize != 8))
+    {
+        return PVRSRV_ERROR_NOT_SUPPORTED;
+    }
+
+    pszWrwSuffix = (uiWordSize == 8) ? "64" : "";
+
+    /* Should really "Acquire" a pdump register here */
+    pszPDumpIntRegSpace = pszDestRegspaceName;
+    uiPDumpIntRegNum = 1;
+        
+    eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              /* Should this be "MOV" instead? */
+                              "WRW :%s:$%d :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+                              /* dest */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum,
+                              /* src */
+                              pszRefRegspaceName,
+                              pszRefSymbolicName,
+                              uiRefOffset);
+    if (eError != PVRSRV_OK)
+    {
+        goto ErrOut;
+    }
+
+    PDUMP_LOCK();
+    PDumpWriteScript(hScript, uiPDumpFlags);
+
+    if (uiSHRAmount > 0)
+    {
+        eError = PDumpOSBufprintf(hScript,
+                                  ui32MaxLen,
+                                  "SHR :%s:$%d :%s:$%d 0x%X\n",
+                                  /* dest */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src A */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src B */
+                                  uiSHRAmount);
+        if (eError != PVRSRV_OK)
+        {
+            goto ErrUnlock;
+        }
+        PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+    
+    if (uiSHLAmount > 0)
+    {
+        eError = PDumpOSBufprintf(hScript,
+                                  ui32MaxLen,
+                                  "SHL :%s:$%d :%s:$%d 0x%X\n",
+                                  /* dest */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src A */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src B */
+                                  uiSHLAmount);
+        if (eError != PVRSRV_OK)
+        {
+            goto ErrUnlock;
+        }
+        PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+    
+    if (uiMask != (1ULL << (8*uiWordSize))-1)
+    {
+        eError = PDumpOSBufprintf(hScript,
+                                  ui32MaxLen,
+                                  "AND :%s:$%d :%s:$%d 0x%X\n",
+                                  /* dest */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src A */
+                                  pszPDumpIntRegSpace,
+                                  uiPDumpIntRegNum,
+                                  /* src B */
+                                  uiMask);
+        if (eError != PVRSRV_OK)
+        {
+            goto ErrUnlock;
+        }
+        PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+
+    eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "WRW%s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " :%s:$%d\n",
+                              pszWrwSuffix,
+                              /* dest */
+                              pszDestRegspaceName,
+                              pszDestSymbolicName,
+                              uiDestOffset,
+                              /* src */
+                              pszPDumpIntRegSpace,
+                              uiPDumpIntRegNum);
+    if(eError != PVRSRV_OK)
+    {
+        goto ErrUnlock;
+    }
+    PDumpWriteScript(hScript, uiPDumpFlags);
+
+ErrUnlock:
+	PDUMP_UNLOCK();
+ErrOut:
+	return eError;
+}
+
+
+PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags)
+{
+    const IMG_CHAR       *pszWrwSuffix = "";
+	PVRSRV_ERROR         eError = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+    if (ui32WordSize == 8)
+    {
+        pszWrwSuffix = "64";
+    }
+
+    PDUMP_LOCK();
+
+    if (ui32AlignShift != ui32Shift)
+    {
+    	/* Write physical address into a variable */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"WRW%s :%s:$1 %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+    							pszWrwSuffix,
+    							/* dest */
+    							pszPDumpDevName,
+    							/* src */
+    							pszRefSymbolicName,
+    							uiRefOffset);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+
+    	/* apply address alignment  */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"SHR :%s:$1 :%s:$1 0x%X",
+    							/* dest */
+    							pszPDumpDevName,
+    							/* src A */
+    							pszPDumpDevName,
+    							/* src B */
+    							ui32AlignShift);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+
+    	/* apply address shift  */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"SHL :%s:$1 :%s:$1 0x%X",
+    							/* dest */
+    							pszPDumpDevName,
+    							/* src A */
+    							pszPDumpDevName,
+    							/* src B */
+    							ui32Shift);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+
+
+    	/* write result to register */
+    	eError = PDumpOSBufprintf(hScript,
+    							ui32MaxLen,
+    							"WRW%s :%s:0x%08X :%s:$1",
+    							pszWrwSuffix,
+    							pszDestSpaceName,
+    							(IMG_UINT32)uiDestOffset,
+    							pszPDumpDevName);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+    	PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+    else
+    {
+		eError = PDumpOSBufprintf(hScript,
+								  ui32MaxLen,
+								  "WRW%s :%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+								  pszWrwSuffix,
+								  /* dest */
+								  pszDestSpaceName,
+								  uiDestOffset,
+								  /* src */
+								  pszRefSymbolicName,
+								  uiRefOffset);
+		if (eError != PVRSRV_OK)
+		{
+			goto symbAddress_error;
+		}
+	    PDumpWriteScript(hScript, uiPDumpFlags);
+    }
+
+symbAddress_error:
+
+    PDUMP_UNLOCK();
+
+	return eError;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpIDLWithFlags
+ * Inputs         : Idle time in clocks
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+	PDUMP_DBG(("PDumpIDLWithFlags"));
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u", ui32Clocks);
+	if(eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpIDL
+ * Inputs         : Idle time in clocks
+ * Outputs        : None
+ * Returns        : Error
+ * Description    : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
+{
+	return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
+}
+
+/*****************************************************************************
+ FUNCTION	: PDumpRegBasedCBP
+    
+ PURPOSE	: Dump CBP command to script
+
+ PARAMETERS	:
+			  
+ RETURNS	: None
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR		*pszPDumpRegName,
+							  IMG_UINT32	ui32RegOffset,
+							  IMG_UINT32	ui32WPosVal,
+							  IMG_UINT32	ui32PacketSize,
+							  IMG_UINT32	ui32BufferSize,
+							  IMG_UINT32	ui32Flags)
+{
+	PDUMP_GET_SCRIPT_STRING();
+
+	PDumpOSBufprintf(hScript,
+			 ui32MaxLen,
+			 "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X",
+			 pszPDumpRegName,
+			 ui32RegOffset,
+			 ui32WPosVal,
+			 ui32PacketSize,
+			 ui32BufferSize);
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+	
+	return PVRSRV_OK;		
+}
+
+PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+                      IMG_UINT32 ui32MMUCtxID,
+                      IMG_UINT32 ui32RegionID,
+                      IMG_BOOL bEnable,
+                      IMG_UINT64 ui64VAddr,
+                      IMG_UINT64 ui64LenBytes,
+                      IMG_UINT32 ui32XStride,
+                      IMG_UINT32 ui32Flags)
+{
+	PDUMP_GET_SCRIPT_STRING();
+
+	if(bEnable)
+	{
+		PDumpOSBufprintf(hScript, ui32MaxLen,
+		                 "TRG :%s:v%u %u 0x%08llX 0x%08llX %u",
+		                 pszMemSpace, ui32MMUCtxID, ui32RegionID,
+		                 ui64VAddr, ui64LenBytes, ui32XStride);
+	}
+	else
+	{
+		PDumpOSBufprintf(hScript, ui32MaxLen,
+		                 "TRG :%s:v%u %u",
+		                 pszMemSpace, ui32MMUCtxID, ui32RegionID);
+
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpConnectionNotify
+ * Description    : Called by the srvcore to tell PDump core that the
+ *                  PDump capture and control client has connected
+ **************************************************************************/
+IMG_VOID PDumpConnectionNotify(IMG_VOID)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE	*psThis;
+
+	if (PDumpOSAllowInitPhaseToComplete(IMG_PDUMPCTRL))
+	{
+		/* No 'Stop Init Phase' comment here as in this case as the PDump
+		 * client can connect multiple times and we don't want the comment
+		 * appearing multiple times in out files.
+		 */
+		PDumpCtrlLockAcquire();
+		PDumpCtrlSetInitPhaseComplete(IMG_TRUE);
+		PDumpCtrlLockRelease();
+	}
+
+	g_ConnectionCount++;
+	PVR_LOG(("PDump has connected (%u)", g_ConnectionCount));
+	
+	/* Reset the parameter file attributes */
+	g_PDumpParameters.sWOff.ui32Main = g_PDumpParameters.sWOff.ui32Init;
+	g_PDumpParameters.ui32FileIdx = 0;
+
+	/* Loop over all known devices */
+	psThis = psPVRSRVData->psDeviceNodeList;
+	while (psThis)
+	{
+		if (psThis->pfnPDumpInitDevice)
+		{
+			/* Reset pdump according to connected device */
+			psThis->pfnPDumpInitDevice(psThis);
+		}
+		psThis = psThis->psNext;
+	}
+}
+
+/**************************************************************************
+ * Function Name  : PDumpIfKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents IF command 
+					with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpIfKM(IMG_CHAR		*pszPDumpCond)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpIfKM"));
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IF %s\n", pszPDumpCond);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpElseKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents ELSE command 
+					with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpElseKM(IMG_CHAR		*pszPDumpCond)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpElseKM"));
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "ELSE %s\n", pszPDumpCond);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpFiKM
+ * Inputs         : pszPDumpCond - string for condition
+ * Outputs        : None
+ * Returns        : None
+ * Description    : Create a PDUMP string which represents FI command 
+					with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpFiKM(IMG_CHAR		*pszPDumpCond)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING()
+	PDUMP_DBG(("PDumpFiKM"));
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FI %s\n", pszPDumpCond);
+
+	if (eErr != PVRSRV_OK)
+	{
+		return eErr;
+	}
+
+	PDUMP_LOCK();
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+	PDUMP_UNLOCK();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCreateLockKM(IMG_VOID)
+{
+	return PDumpOSCreateLock();
+}
+
+IMG_VOID PDumpDestroyLockKM(IMG_VOID)
+{
+	PDumpOSDestroyLock();
+}
+
+IMG_VOID PDumpLockKM(IMG_VOID)
+{
+	PDumpOSLock();
+}
+
+IMG_VOID PDumpUnlockKM(IMG_VOID)
+{
+	PDumpOSUnlock();
+}
+
+#if defined(PVR_TESTING_UTILS)
+extern IMG_VOID PDumpOSDumpState(IMG_VOID);
+
+#if !defined(LINUX)
+IMG_VOID PDumpOSDumpState(IMG_BOOL bDumpOSLayerState)
+{
+	PVR_UNREFERENCED_PARAMETER(bDumpOSLayerState);
+}
+#endif
+
+IMG_VOID PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState)
+{
+	IMG_UINT32* ui32HashData = (IMG_UINT32*)g_psPersistentHash;
+
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpInitialised( %d )",
+			g_PDumpInitialised) );
+	PVR_LOG(("--- PDUMP COMMON: g_psPersistentHash( %p ) uSize( %d ) uCount( %d )",
+			g_psPersistentHash, ui32HashData[0], ui32HashData[1]) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.hInit( %p ) g_PDumpScript.sCh.hMain( %p ) g_PDumpScript.sCh.hDeinit( %p )",
+			g_PDumpScript.sCh.hInit, g_PDumpScript.sCh.hMain, g_PDumpScript.sCh.hDeinit) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.hInit( %p ) g_PDumpParameters.sCh.hMain( %p ) g_PDumpParameters.sCh.hDeinit( %p )",
+			g_PDumpParameters.sCh.hInit, g_PDumpParameters.sCh.hMain, g_PDumpParameters.sCh.hDeinit) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sWOff.ui32Init( %d ) g_PDumpParameters.sWOff.ui32Main( %d ) g_PDumpParameters.sWOff.ui32Deinit( %d )",
+			g_PDumpParameters.sWOff.ui32Init, g_PDumpParameters.sWOff.ui32Main, g_PDumpParameters.sWOff.ui32Deinit) );
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.ui32FileIdx( %d )",
+			g_PDumpParameters.ui32FileIdx) );
+
+	PVR_LOG(("--- PDUMP COMMON: g_PDumpCtrl( %p ) bInitPhaseActive( %d ) ui32Flags( %x )",
+			&g_PDumpCtrl, g_PDumpCtrl.bInitPhaseActive, g_PDumpCtrl.ui32Flags) );
+	PVR_LOG(("--- PDUMP COMMON: ui32DefaultCapMode( %d ) ui32CurrentFrame( %d )",
+			g_PDumpCtrl.ui32DefaultCapMode, g_PDumpCtrl.ui32CurrentFrame) );
+	PVR_LOG(("--- PDUMP COMMON: sDefaultRange.ui32Start( %d ) sDefaultRange.ui32End( %d ) sDefaultRange.ui32Interval( %d )",
+			g_PDumpCtrl.sDefaultRange.ui32Start, g_PDumpCtrl.sDefaultRange.ui32End, g_PDumpCtrl.sDefaultRange.ui32Interval) );
+	PVR_LOG(("--- PDUMP COMMON: bCaptureOn( %d ) bSuspended( %d ) bInPowerTransition( %d )",
+			g_PDumpCtrl.bCaptureOn, g_PDumpCtrl.bSuspended, g_PDumpCtrl.bInPowerTransition) );
+
+	if (bDumpOSLayerState)
+	{
+		PDumpOSDumpState();
+	}
+}
+#endif
+
+
+PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+									 PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+	PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(ppsPDumpConnectionData != IMG_NULL);
+
+	psPDumpConnectionData = OSAllocMem(sizeof(*psPDumpConnectionData));
+	if (psPDumpConnectionData == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = OSLockCreate(&psPDumpConnectionData->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lockcreate;
+	}
+
+	dllist_init(&psPDumpConnectionData->sListHead);
+	psPDumpConnectionData->ui32RefCount = 1;
+	psPDumpConnectionData->bLastInto = IMG_FALSE;
+	psPDumpConnectionData->ui32LastSetFrameNumber = 0xFFFFFFFFU;
+	psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+
+	/*
+	 * Although we don't take a ref count here, handle base destruction
+	 * will ensure that any resource that might trigger us to do a
+	 * Transition will have been freed before the sync blocks which
+	 * are keeping the sync connection data alive.
+	 */
+	psPDumpConnectionData->psSyncConnectionData = psSyncConnectionData;
+	*ppsPDumpConnectionData = psPDumpConnectionData;
+
+	return PVRSRV_OK;
+
+fail_lockcreate:
+	OSFreeMem(psPDumpConnectionData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_VOID PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	_PDumpConnectionRelease(psPDumpConnectionData);
+}
+
+#else	/* defined(PDUMP) */
+/* disable warning about empty module */
+#ifdef	_WIN32
+#pragma warning (disable:4206)
+#endif
+#endif	/* defined(PDUMP) */
+/*****************************************************************************
+ End of file (pdump_common.c)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_mmu.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_mmu.c
new file mode 100644
index 0000000..2efacb1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_mmu.c
@@ -0,0 +1,1008 @@
+/*************************************************************************/ /*!
+@File
+@Title		MMU PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common PDump (MMU specific) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined (PDUMP)
+
+#include "img_types.h"
+#include "pdump_mmu.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#define MAX_PDUMP_MMU_CONTEXTS	(10)
+static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1;
+
+/* arbitrary buffer length here. */
+#define MAX_SYMBOLIC_ADDRESS_LENGTH 40
+
+#define MMUPX_FMT(X) ((X<3) ? ((X<2) ?  "MMUPT_\0" : "MMUPD_\0") : "MMUPC_\0")
+
+
+/* Array used to look-up debug strings from MMU_LEVEL */
+static IMG_CHAR ai8MMULevelStringLookup[MMU_LEVEL_LAST][15] =
+		{
+				"MMU_LEVEL_0",
+				"PAGE_TABLE",
+				"PAGE_DIRECTORY",
+				"PAGE_CATALOGUE",
+		};
+
+static PVRSRV_ERROR 
+_ContiguousPDumpBytes(const IMG_CHAR *pszSymbolicName,
+                      IMG_UINT32 ui32SymAddrOffset,
+                      IMG_BOOL bFlush,
+                      IMG_UINT32 uiNumBytes,
+                      IMG_VOID *pvBytes,
+                      IMG_UINT32 ui32Flags)
+{
+    static const IMG_CHAR *pvBeyondLastPointer;
+    static const IMG_CHAR *pvBasePointer;
+    static IMG_UINT32 ui32BeyondLastOffset;
+    static IMG_UINT32 ui32BaseOffset;
+    static IMG_UINT32 uiAccumulatedBytes = 0;
+	IMG_UINT32 ui32ParamOutPos;
+    PVRSRV_ERROR eErr = PVRSRV_OK;
+
+	PDUMP_GET_SCRIPT_AND_FILE_STRING();
+	PVR_UNREFERENCED_PARAMETER(ui32MaxLenFileName);
+
+    if (!bFlush && uiAccumulatedBytes > 0)
+    {
+        /* do some tests for contiguity.  If it fails, we flush anyway */
+
+        if (pvBeyondLastPointer != pvBytes ||
+            ui32SymAddrOffset != ui32BeyondLastOffset
+            /* NB: ought to check that symbolic name agrees too, but
+               we know this always to be the case in the current use-case */
+            )
+        {
+            bFlush = IMG_TRUE;
+        }
+    }
+
+    /* Flush if necessary */
+    if (bFlush && uiAccumulatedBytes > 0)
+    {        
+        eErr = PDumpWriteParameter((IMG_UINT8 *)(IMG_UINTPTR_T)pvBasePointer,
+                               uiAccumulatedBytes, ui32Flags,
+                               &ui32ParamOutPos, pszFileName);
+    	PVR_LOGG_IF_ERROR(eErr, "PDumpWriteParameter", ErrOut);
+
+        eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript,
+                                "LDB %s:0x%X 0x%X 0x%X %s",
+                                /* dest */
+                                pszSymbolicName,
+                                ui32BaseOffset,
+                                /* size */
+                                uiAccumulatedBytes,
+                                /* file offset */
+                                ui32ParamOutPos,
+                                /* filename */
+                                pszFileName);
+    	PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrOut);
+
+        PDumpWriteScript(hScript, ui32Flags);
+
+        uiAccumulatedBytes = 0;
+    }
+
+    /* Initialise offsets and pointers if necessary */
+    if (uiAccumulatedBytes == 0)
+    {
+        ui32BaseOffset = ui32BeyondLastOffset = ui32SymAddrOffset;
+        pvBeyondLastPointer = pvBasePointer = (const IMG_CHAR *)pvBytes;
+    }
+
+    /* Accumulate some bytes */
+    ui32BeyondLastOffset += uiNumBytes;
+    pvBeyondLastPointer += uiNumBytes;
+    uiAccumulatedBytes += uiNumBytes;
+
+ErrOut:
+    return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUMalloc
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR			*pszPDumpDevName,
+							MMU_LEVEL 				eMMULevel,
+							IMG_DEV_PHYADDR			*psDevPAddr,
+							IMG_UINT32				ui32Size,
+							IMG_UINT32				ui32Align)
+{
+	PVRSRV_ERROR eErr;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+	IMG_UINT64 ui64SymbolicAddr;
+	IMG_CHAR *pszMMUPX;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	if (eMMULevel >= MMU_LEVEL_LAST)
+	{
+		eErr = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrOut;
+	}
+
+	/*
+		Write a comment to the PDump2 script streams indicating the memory allocation
+	*/
+	eErr = PDumpOSBufprintf(hScript,
+							ui32MaxLen,
+							"-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X DevPAddr=0x%08llX",
+							pszPDumpDevName,
+							ai8MMULevelStringLookup[eMMULevel],
+							ui32Size,
+							ui32Align,
+							psDevPAddr->uiAddr);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		construct the symbolic address
+	*/
+	ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+	/*
+		Write to the MMU script stream indicating the memory allocation
+	*/
+	pszMMUPX = MMUPX_FMT(eMMULevel);
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s%016llX 0x%X 0x%X",
+											pszPDumpDevName,
+											pszMMUPX,
+											ui64SymbolicAddr,
+											ui32Size,
+											ui32Align
+											/* don't need this sDevPAddr.uiAddr*/);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFree
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR				*pszPDumpDevName,
+							MMU_LEVEL 					eMMULevel,
+							IMG_DEV_PHYADDR				*psDevPAddr)
+{
+	PVRSRV_ERROR eErr;
+	IMG_UINT64 ui64SymbolicAddr;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+	IMG_CHAR *pszMMUPX;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	if (eMMULevel >= MMU_LEVEL_LAST)
+	{
+		eErr = PVRSRV_ERROR_INVALID_PARAMS;
+		goto ErrOut;
+	}
+
+	/*
+		Write a comment to the PDUMP2 script streams indicating the memory free
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s", 
+							pszPDumpDevName, ai8MMULevelStringLookup[eMMULevel]);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		construct the symbolic address
+	*/
+	ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+	/*
+		Write to the MMU script stream indicating the memory free
+	*/
+	pszMMUPX = MMUPX_FMT(eMMULevel);
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s%016llX",
+							pszPDumpDevName,
+							pszMMUPX,
+							ui64SymbolicAddr);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUMalloc2
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR			*pszPDumpDevName,
+							const IMG_CHAR			*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                             const IMG_CHAR *pszSymbolicAddr,
+                             IMG_UINT32				ui32Size,
+                             IMG_UINT32				ui32Align)
+{
+	PVRSRV_ERROR eErr;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	/*
+		Write a comment to the PDump2 script streams indicating the memory allocation
+	*/
+	eErr = PDumpOSBufprintf(hScript,
+							ui32MaxLen,
+							"-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X\n",
+							pszPDumpDevName,
+							pszTableType,
+							ui32Size,
+							ui32Align);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		Write to the MMU script stream indicating the memory allocation
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s 0x%X 0x%X\n",
+											pszPDumpDevName,
+											pszSymbolicAddr,
+											ui32Size,
+											ui32Align
+											/* don't need this sDevPAddr.uiAddr*/);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUFree2
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR				*pszPDumpDevName,
+							const IMG_CHAR				*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                           const IMG_CHAR *pszSymbolicAddr)
+{
+	PVRSRV_ERROR eErr;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	/*
+		Write a comment to the PDUMP2 script streams indicating the memory free
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s\n", 
+							pszPDumpDevName, pszTableType);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+
+	/*
+		Write to the MMU script stream indicating the memory free
+	*/
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s\n",
+                            pszPDumpDevName,
+							pszSymbolicAddr);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+	PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+/**************************************************************************
+ * Function Name  : PDumpMMUDumpPxEntries
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+								   const IMG_CHAR *pszPDumpDevName,
+                                   IMG_VOID *pvPxMem,
+                                   IMG_DEV_PHYADDR sPxDevPAddr,
+                                   IMG_UINT32 uiFirstEntry,
+                                   IMG_UINT32 uiNumEntries,
+                                   const IMG_CHAR *pszMemspaceName,
+                                   const IMG_CHAR *pszSymbolicAddr,
+                                   IMG_UINT64 uiSymbolicAddrOffset,
+                                   IMG_UINT32 uiBytesPerEntry,
+                                   IMG_UINT32 uiLog2Align,
+                                   IMG_UINT32 uiAddrShift,
+                                   IMG_UINT64 uiAddrMask,
+                                   IMG_UINT64 uiPxEProtMask,
+                                   IMG_UINT32 ui32Flags)
+{
+	PVRSRV_ERROR eErr = PVRSRV_OK;
+    IMG_UINT64 ui64PxSymAddr;
+    IMG_UINT64 ui64PxEValueSymAddr;
+    IMG_UINT32 ui32SymAddrOffset = 0;
+    IMG_UINT32 *pui32PxMem;
+    IMG_UINT64 *pui64PxMem;
+    IMG_BOOL   bPxEValid;
+    IMG_UINT32 uiPxEIdx;
+    IMG_INT32  iShiftAmount;
+    IMG_CHAR   *pszWrwSuffix = 0;
+    IMG_VOID *pvRawBytes = 0;
+    IMG_CHAR aszPxSymbolicAddr[MAX_SYMBOLIC_ADDRESS_LENGTH];
+    IMG_UINT64 ui64PxE64;
+    IMG_UINT64 ui64Protflags64;
+    IMG_CHAR *pszMMUPX;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	if (!PDumpReady())
+	{
+		eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		goto ErrOut;
+	}
+
+
+	if (PDumpIsDumpSuspended())
+	{
+		eErr = PVRSRV_OK;
+		goto ErrOut;
+	}
+
+    if (pvPxMem == IMG_NULL)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "PDUMPMMUDUMPPxENTRIES: PxMem is Null"));
+        eErr = PVRSRV_ERROR_INVALID_PARAMS;
+        goto ErrOut;
+    }
+
+
+	/*
+		create the symbolic address of the Px
+	*/
+	ui64PxSymAddr = sPxDevPAddr.uiAddr;
+
+	pszMMUPX = MMUPX_FMT(eMMULevel);
+    OSSNPrintf(aszPxSymbolicAddr,
+               MAX_SYMBOLIC_ADDRESS_LENGTH,
+               ":%s:%s%016llX",
+               pszPDumpDevName,
+               pszMMUPX,
+               ui64PxSymAddr);
+
+    PDumpOSLock();
+
+	/*
+		traverse PxEs, dumping entries
+	*/
+	for(uiPxEIdx = uiFirstEntry;
+        uiPxEIdx < uiFirstEntry + uiNumEntries;
+        uiPxEIdx++)
+	{
+		/* Calc the symbolic address offset of the PxE location
+		   This is what we have to add to the table address to get to a certain entry */
+		ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry);
+
+		/* Calc the symbolic address of the PxE value and HW protflags */
+		/* just read it here */
+		switch(uiBytesPerEntry)
+		{
+			case 4:
+			{
+			 	pui32PxMem = pvPxMem;
+                ui64PxE64 = pui32PxMem[uiPxEIdx];
+                pszWrwSuffix = "";
+                pvRawBytes = &pui32PxMem[uiPxEIdx];
+				break;
+			}
+			case 8:
+			{
+			 	pui64PxMem = pvPxMem;
+                ui64PxE64 = pui64PxMem[uiPxEIdx];
+                pszWrwSuffix = "64";
+                pvRawBytes = &pui64PxMem[uiPxEIdx];
+				break;
+			}
+			default:
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PDumpMMUPxEntries: error"));
+				ui64PxE64 = 0;
+                //!!error
+				break;
+			}
+		}
+
+        ui64PxEValueSymAddr = (ui64PxE64 & uiAddrMask) >> uiAddrShift << uiLog2Align;
+        ui64Protflags64 = ui64PxE64 & uiPxEProtMask;
+
+        bPxEValid = (ui64Protflags64 & 1) ? IMG_TRUE : IMG_FALSE;
+
+        if(bPxEValid)
+        {
+            _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+                                  0, 0,
+                                  ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+            iShiftAmount = (IMG_INT32)(uiLog2Align - uiAddrShift);
+
+            /* First put the symbolic representation of the actual
+               address of the entry into a pdump internal register */
+            /* MOV seemed cleaner here, since (a) it's 64-bit; (b) the
+               target is not memory.  However, MOV cannot do the
+               "reference" of the symbolic address.  Apparently WRW is
+               correct. */
+
+			if (pszSymbolicAddr == IMG_NULL)
+			{
+				pszSymbolicAddr = "none";
+			}
+
+            if (eMMULevel == MMU_LEVEL_1)
+            {
+             	if (iShiftAmount == 0)
+			    {
+             		eErr = PDumpOSBufprintf(hScript,
+											ui32MaxLen,
+											"WRW%s :%s:%s%016llX:0x%08X :%s:%s:0x%llx | 0x%llX\n",
+										  	pszWrwSuffix,
+											/* dest */
+											pszPDumpDevName,
+											pszMMUPX,
+											ui64PxSymAddr,
+											ui32SymAddrOffset,
+											/* src */
+											pszMemspaceName,
+											pszSymbolicAddr,
+											uiSymbolicAddrOffset,
+											/* ORing prot flags */
+											ui64Protflags64);
+                }
+                else
+                {
+                	eErr = PDumpOSBufprintf(hScript,
+					                        ui32MaxLen,
+					                        "WRW :%s:$1 :%s:%s:0x%llx\n",
+					                        /* dest */
+					                        pszPDumpDevName,
+										    /* src */
+									        pszMemspaceName,
+											pszSymbolicAddr,
+											uiSymbolicAddrOffset);
+                }
+            }
+            else
+            {
+            	pszMMUPX = MMUPX_FMT(eMMULevel-1);
+            	eErr = PDumpOSBufprintf(hScript,
+                                    ui32MaxLen,
+                                    "WRW :%s:$1 :%s:%s%016llX:0x0",
+                                    /* dest */
+                                    pszPDumpDevName,
+                                    /* src */
+                                    pszPDumpDevName,
+                                    pszMMUPX,
+                                    ui64PxEValueSymAddr);
+            	pszMMUPX = MMUPX_FMT(eMMULevel);
+            }
+            if (eErr != PVRSRV_OK)
+            {
+                goto ErrUnlock;
+            }
+            PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+            /* Now shift it to the right place, if necessary: */
+            /* Now shift that value down, by the "Align shift"
+               amount, to get it into units (ought to assert that
+               we get an integer - i.e. we don't shift any bits
+               off the bottom, don't know how to do PDUMP
+               assertions yet) and then back up by the right
+               amount to get it into the position of the field.
+               This is optimised into a single shift right by the
+               difference between the two. */
+            if (iShiftAmount > 0)
+            {
+                /* Page X Address is specified in units larger
+                   than the position in the PxE would suggest.  */
+                eErr = PDumpOSBufprintf(hScript,
+                                        ui32MaxLen,
+                                        "SHR :%s:$1 :%s:$1 0x%X",
+                                        /* dest */
+                                        pszPDumpDevName,
+                                        /* src A */
+                                        pszPDumpDevName,
+                                        /* src B */
+                                        iShiftAmount);
+                if (eErr != PVRSRV_OK)
+                {
+                    goto ErrUnlock;
+                }
+                PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            }
+            else if (iShiftAmount < 0)
+            {
+                /* Page X Address is specified in units smaller
+                   than the position in the PxE would suggest.  */
+                eErr = PDumpOSBufprintf(hScript,
+                                        ui32MaxLen,
+                                        "SHL :%s:$1 :%s:$1 0x%X",
+                                        /* dest */
+                                        pszPDumpDevName,
+                                        /* src A */
+                                        pszPDumpDevName,
+                                        /* src B */
+                                        -iShiftAmount);
+                if (eErr != PVRSRV_OK)
+                {
+                    goto ErrUnlock;
+                }
+                PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            }
+
+            if (eMMULevel == MMU_LEVEL_1)
+            {
+            	if( iShiftAmount != 0)
+            	{
+					eErr = PDumpOSBufprintf(hScript,
+											ui32MaxLen,
+											"WRW%s :%s:%s%016llX:0x%08X :%s:$1  | 0x%llX",
+											pszWrwSuffix,
+											/* dest */
+											pszPDumpDevName,
+											pszMMUPX,
+											ui64PxSymAddr,
+											ui32SymAddrOffset,
+											/* src */
+											pszPDumpDevName,
+											/* ORing prot flags */
+											ui64Protflags64);
+					if(eErr != PVRSRV_OK)
+					{
+						goto ErrUnlock;
+					}
+					PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            	}
+             }
+            else
+            {
+            	/* Now we can "or" in the protection flags */
+            	eErr = PDumpOSBufprintf(hScript,
+                                    	ui32MaxLen,
+                                    	"OR :%s:$1 :%s:$1 0x%llX",
+                                    	/* dest */
+                                    	pszPDumpDevName,
+                                    	/* src A */
+                                    	pszPDumpDevName,
+                                    	/* src B */
+                                        ui64Protflags64);
+            	if (eErr != PVRSRV_OK)
+            	{
+                	goto ErrUnlock;
+            	}
+                PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+                /* Finally, we write the register into the actual PxE */
+            	eErr = PDumpOSBufprintf(hScript,
+                                        ui32MaxLen,
+                                        "WRW%s :%s:%s%016llX:0x%08X :%s:$1",
+                                        pszWrwSuffix,
+                                        /* dest */
+                                        pszPDumpDevName,
+                                        pszMMUPX,
+                                        ui64PxSymAddr,
+                                        ui32SymAddrOffset,
+                                        /* src */
+                                        pszPDumpDevName);
+				if(eErr != PVRSRV_OK)
+				{
+					goto ErrUnlock;
+				}
+				PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+        	}
+        }
+        else
+        {
+            /* If the entry was "invalid", simply write the actual
+               value found to the memory location */
+            eErr = _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_FALSE,
+                                         uiBytesPerEntry, pvRawBytes,
+                                         ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+            if (eErr != PVRSRV_OK)
+            {
+                goto ErrUnlock;
+            }
+        }
+	}
+
+    /* flush out any partly accumulated stuff for LDB */
+    _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+                          0, 0,
+                          ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : _PdumpAllocMMUContext
+ * Inputs         : pui32MMUContextID
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : pdump util to allocate MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
+{
+	IMG_UINT32 i;
+
+	/* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */
+	for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
+	{
+		if((guiPDumpMMUContextAvailabilityMask & (1U << i)))
+		{
+			/* mark in use */
+			guiPDumpMMUContextAvailabilityMask &= ~(1U << i);
+			*pui32MMUContextID = i;
+			return PVRSRV_OK;
+		}
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
+
+	return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name  : _PdumpFreeMMUContext
+ * Inputs         : ui32MMUContextID
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : pdump util to free MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
+{
+	if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
+	{
+		/* free the id */
+        PVR_ASSERT (!(guiPDumpMMUContextAvailabilityMask & (1U << ui32MMUContextID)));
+		guiPDumpMMUContextAvailabilityMask |= (1U << ui32MMUContextID);
+		return PVRSRV_OK;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
+
+	return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpSetMMUContext
+ * Inputs         :
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Set MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                     IMG_DEV_PHYADDR sPCDevPAddr,
+                                     PDUMP_MMU_TYPE eMMUType,
+                                     IMG_UINT32 *pui32MMUContextID)
+{
+    IMG_UINT64 ui64PCSymAddr;
+    IMG_CHAR *pszMMUPX;
+
+	IMG_UINT32 ui32MMUContextID;
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	eErr = _PdumpAllocMMUContext(&ui32MMUContextID);
+	if(eErr != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d", eErr));
+        PVR_DBG_BREAK;
+		goto ErrOut;
+	}
+
+	/*
+		create the symbolic address of the PC
+    */
+	ui64PCSymAddr = sPCDevPAddr.uiAddr;
+
+	pszMMUPX = MMUPX_FMT(3);
+	eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen, 
+                            "MMU :%s:v%d %d :%s:%s%016llX",
+                            /* mmu context */
+                            pszPDumpMemSpaceName,
+                            ui32MMUContextID,
+                            /* mmu type */
+                            eMMUType,
+                            /* PC base address */
+                            pszPDumpMemSpaceName,
+                            pszMMUPX,
+                            ui64PCSymAddr);
+	if(eErr != PVRSRV_OK)
+	{
+        PVR_DBG_BREAK;
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+    PDumpOSUnlock();
+
+	/* return the MMU Context ID */
+	*pui32MMUContextID = ui32MMUContextID;
+
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpClearMMUContext
+ * Inputs         :
+ * Outputs        : None
+ * Returns        : PVRSRV_ERROR
+ * Description    : Clear MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                    IMG_UINT32 ui32MMUContextID)
+{
+	PVRSRV_ERROR eErr;
+	PDUMP_GET_SCRIPT_STRING();
+
+	eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen,
+                            "-- Clear MMU Context for memory space %s", pszPDumpMemSpaceName);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+	eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen, 
+                            "MMU :%s:v%d",
+                            pszPDumpMemSpaceName,
+                            ui32MMUContextID);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrUnlock;
+	}
+
+	PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+	eErr = _PdumpFreeMMUContext(ui32MMUContextID);
+	if(eErr != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d", eErr));
+		goto ErrUnlock;
+	}
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpMMUActivateCatalog
+ * Inputs         :
+ * Outputs        : 
+ * Returns        : PVRSRV_ERROR
+ * Description    : 
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+                                     const IMG_CHAR *pszPDumpRegName,
+                                     IMG_UINT32 uiRegAddr,
+                                     const IMG_CHAR *pszPDumpPCSymbolicName)
+{
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+	PVRSRV_ERROR eErr;
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	if (!PDumpReady())
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+	}
+
+
+	if (PDumpIsDumpSuspended())
+	{
+		return PVRSRV_OK;
+	}
+
+	eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+							"-- Write Page Catalogue Address to %s",
+							pszPDumpRegName);
+	if(eErr != PVRSRV_OK)
+	{
+		goto ErrOut;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+
+    eErr = PDumpOSBufprintf(hScript,
+                            ui32MaxLen,
+                            "WRW :%s:0x%04X %s:0",
+                            /* dest */
+                            pszPDumpRegSpaceName,
+                            uiRegAddr,
+                            /* src */
+                            pszPDumpPCSymbolicName);
+    if (eErr != PVRSRV_OK)
+    {
+        goto ErrUnlock;
+    }
+    PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+	PDumpOSUnlock();
+ErrOut:
+	return eErr;
+}
+
+
+PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+               IMG_UINT32 uiPDumpMMUCtx,
+               IMG_DEV_VIRTADDR sDevAddrStart,
+               IMG_DEVMEM_SIZE_T uiSize,
+               const IMG_CHAR *pszFilename,
+               IMG_UINT32 uiFileOffset,
+			   IMG_UINT32 ui32PDumpFlags)
+{    
+    PVRSRV_ERROR eError;
+
+    //							"SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin",
+
+	PDUMP_GET_SCRIPT_STRING();
+
+	ui32PDumpFlags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	if (!PDumpReady())
+	{
+		eError = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		goto ErrOut;
+	}
+
+
+	if (PDumpIsDumpSuspended())
+	{
+		eError = PVRSRV_OK;
+		goto ErrOut;
+	}
+
+    eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "SAB :%s:v%x:" IMG_DEV_VIRTADDR_FMTSPEC " "
+                              IMG_DEVMEM_SIZE_FMTSPEC " "
+                              "0x%x %s.bin\n",
+                              pszPDumpMemNamespace,
+                              uiPDumpMMUCtx,
+                              sDevAddrStart.uiAddr,
+                              uiSize,
+                              uiFileOffset,
+                              pszFilename);
+    PVR_ASSERT(eError == PVRSRV_OK);
+    PDumpOSLock();
+    PDumpWriteScript(hScript, ui32PDumpFlags);
+    PDumpOSUnlock();
+
+ErrOut:
+    return eError;
+}
+
+#endif /* #if defined (PDUMP) */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_physmem.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_physmem.c
new file mode 100644
index 0000000..8628594
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pdump_physmem.c
@@ -0,0 +1,479 @@
+/*************************************************************************/ /*!
+@File
+@Title		Physmem PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Common PDump (PMR specific) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined(PDUMP)
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump_physmem.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+
+/* #define MAX_PDUMP_MMU_CONTEXTS	(10) */
+/* static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1; */
+
+
+/* arbitrary buffer length here. */
+#define MAX_SYMBOLIC_ADDRESS_LENGTH 40
+
+struct _PDUMP_PHYSMEM_INFO_T_
+{
+    IMG_CHAR aszSymbolicAddress[MAX_SYMBOLIC_ADDRESS_LENGTH];
+    IMG_UINT64 ui64Size;
+    IMG_UINT32 ui32Align;
+    IMG_UINT32 ui32SerialNum;
+};
+
+
+/**************************************************************************
+ * Function Name  : PDumpPMRMalloc
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpPMRMalloc(const IMG_CHAR *pszDevSpace,
+                            const IMG_CHAR *pszSymbolicAddress,
+                            IMG_UINT64 ui64Size,
+                            IMG_DEVMEM_ALIGN_T uiAlign,
+                            IMG_BOOL bForcePersistent,
+                            IMG_HANDLE *phHandlePtr)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+    PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+    psPDumpAllocationInfo = OSAllocMem(sizeof*psPDumpAllocationInfo);
+    PVR_ASSERT(psPDumpAllocationInfo != IMG_NULL);
+
+	if (bForcePersistent)
+	{
+		ui32Flags |= PDUMP_FLAGS_PERSISTENT;
+	}
+	else
+	{
+		ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+	}
+
+	/*
+		construct the symbolic address
+	*/
+
+    OSSNPrintf(psPDumpAllocationInfo->aszSymbolicAddress,
+               sizeof(psPDumpAllocationInfo->aszSymbolicAddress),
+               ":%s:%s",
+               pszDevSpace,
+               pszSymbolicAddress);
+
+	/*
+		Write to the MMU script stream indicating the memory allocation
+	*/
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC %s 0x%llX 0x%llX\n",
+                            psPDumpAllocationInfo->aszSymbolicAddress,
+                            ui64Size,
+                            uiAlign);
+	if(eError != PVRSRV_OK)
+	{
+		OSFreeMem(psPDumpAllocationInfo);
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDumpOSUnlock();
+
+    psPDumpAllocationInfo->ui64Size = ui64Size;
+    psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign);
+
+    *phHandlePtr = (IMG_HANDLE)psPDumpAllocationInfo;
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name  : PDumpPMRFree
+ * Inputs         :
+ * Outputs        :
+ * Returns        : PVRSRV_ERROR
+ * Description    :
+**************************************************************************/
+PVRSRV_ERROR PDumpPMRFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+    PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+    psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPDumpAllocationInfoHandle;
+
+	ui32Flags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	/*
+		Write to the MMU script stream indicating the memory free
+	*/
+	eError = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE %s\n",
+                              psPDumpAllocationInfo->aszSymbolicAddress);
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	PDumpOSLock();
+	PDumpWriteScript(hScript, ui32Flags);
+	PDumpOSUnlock();
+
+    OSFreeMem(psPDumpAllocationInfo);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	uiPDumpFlags |= (PDumpIsPersistent()) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              PMR_VALUE32_FMTSPEC " ",
+                              pszDevSpace,
+                              pszSymbolicName,
+                              uiOffset,
+                              ui32Value);
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDumpOSUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT64 ui64Value,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	uiPDumpFlags |= (PDumpIsPersistent()) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              PMR_VALUE64_FMTSPEC " ",
+                              pszDevSpace,
+                              pszSymbolicName,
+                              uiOffset,
+                              ui64Value);
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDumpOSUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	uiPDumpFlags |= (PDumpIsPersistent()) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "LDB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              IMG_DEVMEM_SIZE_FMTSPEC " "
+                              PDUMP_FILEOFFSET_FMTSPEC " %s\n",
+                              pszDevSpace,
+                              pszSymbolicName,
+                              uiOffset,
+                              uiSize,
+                              uiFileOffset,
+                              pszFilename);
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDumpOSUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+                         const IMG_CHAR *pszSymbolicName,
+                         IMG_DEVMEM_OFFSET_T uiOffset,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         const IMG_CHAR *pszFileName,
+                         IMG_UINT32 uiFileOffset)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiPDumpFlags;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+    uiPDumpFlags = 0; //PDUMP_FLAGS_CONTINUOUS;
+	uiPDumpFlags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "SAB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              IMG_DEVMEM_SIZE_FMTSPEC " "
+                              "0x%08X %s.bin\n",
+                              pszDevSpace,
+                              pszSymbolicName,
+                              uiOffset,
+                              uiSize,
+                              uiFileOffset,
+                              pszFileName);
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDumpOSUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+
+	uiPDumpFlags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "POL :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              "0x%08X 0x%08X %d %d %d\n",
+                              pszMemspaceName,
+                              pszSymbolicName,
+                              uiOffset,
+                              ui32Value,
+                              ui32Mask,
+                              eOperator,
+                              uiCount,
+                              uiDelay);
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDumpOSUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+	PDUMP_FLAGS_T uiPDumpFlags = 0;
+
+	PDUMP_GET_SCRIPT_STRING()
+
+	uiPDumpFlags |= ( PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0;
+
+	eError = PDumpOSBufprintf(hScript,
+                              ui32MaxLen,
+                              "CBP :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+                              IMG_DEVMEM_OFFSET_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC "\n",
+                              pszMemspaceName,
+                              pszSymbolicName,
+                              uiReadOffset,
+                              uiWriteOffset,
+                              uiPacketSize,
+                              uiBufferSize);
+
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	PDumpOSLock();
+	PDumpWriteScript(hScript, uiPDumpFlags);
+	PDumpOSUnlock();
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+                 IMG_SIZE_T uiNumBytes,
+                 PDUMP_FLAGS_T uiPDumpFlags,
+                 IMG_CHAR *pszFilenameOut,
+                 IMG_SIZE_T uiFilenameBufSz,
+                 PDUMP_FILEOFFSET_T *puiOffsetOut)
+{
+	PVRSRV_ERROR eError;
+	PVR_UNREFERENCED_PARAMETER(uiFilenameBufSz);
+
+	if (!PDumpReady())
+	{
+		eError = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+        goto e0;
+	}
+
+    PVR_ASSERT(uiNumBytes > 0);
+
+	/* PRQA S 3415 1 */ /* side effects desired */
+	if (PDumpIsDumpSuspended())
+	{
+		return PVRSRV_OK;
+	}
+
+	PDumpOSLock();
+
+	eError = PDumpWriteParameter(pcBuffer, uiNumBytes, uiPDumpFlags, puiOffsetOut, pszFilenameOut);
+
+	PDumpOSUnlock();
+
+	PVR_LOGG_IF_ERROR(eError, "PDumpWriteParameter", e0);
+
+	return PVRSRV_OK;
+
+ e0:
+	/* Die on debug builds */
+	PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL IMG_VOID
+PDumpPMRMallocPMR(const PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_BOOL bForcePersistent,
+                  IMG_HANDLE *phPDumpAllocInfoPtr)
+{
+    PVRSRV_ERROR eError;
+    IMG_HANDLE hPDumpAllocInfo;
+    IMG_CHAR aszMemspaceName[30];
+    IMG_CHAR aszSymbolicName[30];
+    IMG_DEVMEM_OFFSET_T uiOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    uiOffset = 0;
+    eError = PMR_PDumpSymbolicAddr(psPMR,
+                                   uiOffset,
+                                   sizeof(aszMemspaceName),
+                                   &aszMemspaceName[0],
+                                   sizeof(aszSymbolicName),
+                                   &aszSymbolicName[0],
+                                   &uiOffset,
+				   &uiNextSymName);
+    PVR_ASSERT(eError == PVRSRV_OK);
+    PVR_ASSERT(uiOffset == 0);
+    PVR_ASSERT((uiOffset + uiSize) <= uiNextSymName);
+
+	PDumpPMRMalloc(aszMemspaceName,
+				   aszSymbolicName,
+				   uiSize,
+				   uiBlockSize,
+				   bForcePersistent,
+				   &hPDumpAllocInfo);
+
+	*phPDumpAllocInfoPtr = hPDumpAllocInfo;
+}
+#endif /* PDUMP */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physheap.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physheap.c
new file mode 100644
index 0000000..d80c9da
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physheap.c
@@ -0,0 +1,275 @@
+/*************************************************************************/ /*!
+@File           physheap.c
+@Title          Physical heap management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Management functions for the physical heap(s). A heap contains
+                all the information required by services when using memory from
+                that heap (such as CPU <> Device physical address translation).
+                A system must register one heap but can have more then one which
+                is why a heap must register with a (system) unique ID.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "physheap.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+struct _PHYS_HEAP_
+{
+	/*! ID of this physcial memory heap */
+	IMG_UINT32					ui32PhysHeapID;
+	/*! The type of this heap */
+	PHYS_HEAP_TYPE			eType;
+
+	/*! Start address of the physcial memory heap (LMA only) */
+	IMG_CPU_PHYADDR				sStartAddr;
+	/*! Size of the physcial memory heap (LMA only) */
+	IMG_UINT64					uiSize;
+
+	/*! PDump name of this physcial memory heap */
+	IMG_CHAR					*pszPDumpMemspaceName;
+	/*! Private data for the translate routines */
+	IMG_HANDLE					hPrivData;
+	/*! Function callbacks */
+	PHYS_HEAP_FUNCTIONS			*psMemFuncs;
+
+
+	/*! Refcount */
+	IMG_UINT32					ui32RefCount;
+	/*! Pointer to next physcial heap */
+	struct _PHYS_HEAP_		*psNext;
+};
+
+PHYS_HEAP *g_psPhysHeapList;
+
+#if defined(REFCOUNT_DEBUG)
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)	\
+	PVRSRVDebugPrintf(PVR_DBG_WARNING,	\
+			  __FILE__,		\
+			  __LINE__,		\
+			  fmt,			\
+			  __VA_ARGS__)
+#else
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+							  PHYS_HEAP **ppsPhysHeap)
+{
+	PHYS_HEAP *psNew;
+	PHYS_HEAP *psTmp;
+
+	PVR_DPF_ENTERED;
+
+	if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Check this heap ID isn't already in use */
+	psTmp = g_psPhysHeapList;
+	while (psTmp)
+	{
+		if (psTmp->ui32PhysHeapID == psConfig->ui32PhysHeapID)
+		{
+			return PVRSRV_ERROR_PHYSHEAP_ID_IN_USE;
+		}
+		psTmp = psTmp->psNext;
+	}
+
+	psNew = OSAllocMem(sizeof(PHYS_HEAP));
+	if (psNew == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psNew->ui32PhysHeapID = psConfig->ui32PhysHeapID;
+	psNew->eType = psConfig->eType;
+	psNew->sStartAddr = psConfig->sStartAddr;
+	psNew->uiSize = psConfig->uiSize;
+	psNew->psMemFuncs = psConfig->psMemFuncs;
+	psNew->hPrivData = psConfig->hPrivData;
+	psNew->ui32RefCount = 0;
+	psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName;
+
+	psNew->psNext = g_psPhysHeapList;
+	g_psPhysHeapList = psNew;
+
+	*ppsPhysHeap = psNew;
+
+	PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+}
+
+IMG_VOID PhysHeapUnregister(PHYS_HEAP *psPhysHeap)
+{
+	PVR_DPF_ENTERED1(psPhysHeap);
+
+	PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
+
+	if (g_psPhysHeapList == psPhysHeap)
+	{
+		g_psPhysHeapList = psPhysHeap->psNext;
+	}
+	else
+	{
+		PHYS_HEAP *psTmp = g_psPhysHeapList;
+
+		while(psTmp->psNext != psPhysHeap)
+		{
+			psTmp = psTmp->psNext;
+		}
+		psTmp->psNext = psPhysHeap->psNext;
+	}
+
+	OSFreeMem(psPhysHeap);
+
+	PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+							 PHYS_HEAP **ppsPhysHeap)
+{
+	PHYS_HEAP *psTmp = g_psPhysHeapList;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED1(ui32PhysHeapID);
+
+	while (psTmp)
+	{
+		if (psTmp->ui32PhysHeapID == ui32PhysHeapID)
+		{
+			break;
+		}
+		psTmp = psTmp->psNext;
+	}
+	
+	if (psTmp == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+	}
+	else
+	{
+		psTmp->ui32RefCount++;
+		PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psTmp, psTmp->ui32RefCount);
+	}
+
+	*ppsPhysHeap = psTmp;
+	PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+IMG_VOID PhysHeapRelease(PHYS_HEAP *psPhysHeap)
+{
+	PVR_DPF_ENTERED1(psPhysHeap);
+
+	psPhysHeap->ui32RefCount--;
+	PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psPhysHeap, psPhysHeap->ui32RefCount);
+
+	PVR_DPF_RETURN;
+}
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->eType;
+}
+
+PVRSRV_ERROR PhysHeapGetAddress(PHYS_HEAP *psPhysHeap,
+								IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA)
+	{
+		*psCpuPAddr = psPhysHeap->sStartAddr;
+		return PVRSRV_OK;
+	}
+
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
+							   IMG_UINT64 *puiSize)
+{
+	if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA)
+	{
+		*puiSize = psPhysHeap->uiSize;
+		return PVRSRV_OK;
+	}
+
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+IMG_VOID PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+									IMG_UINT32 ui32NumOfAddr,
+									IMG_DEV_PHYADDR *psDevPAddr,
+									IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData,
+												 ui32NumOfAddr,
+												 psDevPAddr,
+												 psCpuPAddr);
+}
+
+IMG_VOID PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+									IMG_UINT32 ui32NumOfAddr,
+									IMG_CPU_PHYADDR *psCpuPAddr,
+									IMG_DEV_PHYADDR *psDevPAddr)
+{
+	psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData,
+												 ui32NumOfAddr,
+												 psCpuPAddr,
+												 psDevPAddr);
+}
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap)
+{
+	return psPhysHeap->pszPDumpMemspaceName;
+}
+
+PVRSRV_ERROR PhysHeapInit(IMG_VOID)
+{
+	g_psPhysHeapList = IMG_NULL;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapDeinit(IMG_VOID)
+{
+	PVR_ASSERT(g_psPhysHeapList == IMG_NULL);
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physmem.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physmem.c
new file mode 100644
index 0000000..61bbbe9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physmem.c
@@ -0,0 +1,130 @@
+/*************************************************************************/ /*!
+@File           physmem.c
+@Title          Physmem
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "device.h"
+#include "physmem.h"
+#include "pvrsrv.h"
+
+#if defined(DEBUG)
+IMG_UINT32 gPMRAllocFail = 0;
+#endif /* defined(DEBUG) */
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+						IMG_DEVMEM_SIZE_T uiSize,
+						PMR_SIZE_T uiChunkSize,
+						IMG_UINT32 ui32NumPhysChunks,
+						IMG_UINT32 ui32NumVirtChunks,
+						IMG_BOOL *pabMappingTable,
+						IMG_UINT32 uiLog2PageSize,
+						PVRSRV_MEMALLOCFLAGS_T uiFlags,
+						PMR **ppsPMRPtr)
+{
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx = (uiFlags & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) ? 1: 0;
+	PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize = \
+										psDevNode->psDevConfig->pfnCheckMemAllocSize;
+#if defined(DEBUG)
+	static IMG_UINT32 ui32AllocCount = 1;
+#endif /* defined(DEBUG) */
+	/********************************
+	 * Sanity check the cache flags *
+	 ********************************/
+	/* Check if we can honour cached cache-coherent allocations */
+	if ((PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT) &&
+		(!PVRSRVSystemHasCacheSnooping()))
+	{
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	/* Both or neither have to be cache-coherent */
+	if ((PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ^
+		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT))
+	{
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	if ((PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT) ^
+		(PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT))
+	{
+		return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+	}
+
+	/* Apply memory budgeting policy */
+	if (pfnCheckMemAllocSize)
+	{
+		PVRSRV_ERROR eError = \
+						pfnCheckMemAllocSize(psDevNode, (IMG_UINT64)uiChunkSize*ui32NumPhysChunks);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+#if defined(DEBUG)
+	if (gPMRAllocFail > 0)
+	{
+		if (ui32AllocCount < gPMRAllocFail)
+		{
+			ui32AllocCount++;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.",
+			         __func__, ui32AllocCount));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+#endif /* defined(DEBUG) */
+
+	return psDevNode->pfnCreateRamBackedPMR[ePhysHeapIdx](psDevNode,
+											uiSize,
+											uiChunkSize,
+											ui32NumPhysChunks,
+											ui32NumVirtChunks,
+											pabMappingTable,
+											uiLog2PageSize,
+											uiFlags,
+											ppsPMRPtr);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physmem_lma.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physmem_lma.c
new file mode 100644
index 0000000..2e64a8e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/physmem_lma.c
@@ -0,0 +1,1104 @@
+/*************************************************************************/ /*!
+@File           physmem_lma.c
+@Title          Local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "physmem_lma.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "rgxutils.h"
+#endif
+
+typedef struct _PMR_LMALLOCARRAY_DATA_ {
+	PVRSRV_DEVICE_NODE *psDevNode;
+	IMG_UINT32 uiNumAllocs;
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiAllocSize;
+	IMG_DEV_PHYADDR *pasDevPAddr;
+
+	IMG_BOOL bZeroOnAlloc;
+	IMG_BOOL bPoisonOnAlloc;
+
+	/* Tells if allocation is physically backed */
+	IMG_BOOL bHasLMPages;
+	IMG_BOOL bOnDemand;
+
+	/*
+	  for pdump...
+	*/
+	IMG_BOOL bPDumpMalloced;
+	IMG_HANDLE hPDumpAllocInfo;
+
+	/*
+	  record at alloc time whether poisoning will be required when the
+	  PMR is freed.
+	*/
+	IMG_BOOL bPoisonOnFree;
+} PMR_LMALLOCARRAY_DATA;
+
+static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_DEV_PHYADDR *psDevPAddr,
+								IMG_SIZE_T uiSize, IMG_VOID **pvPtr, PMR_FLAGS_T ulFlags)
+{
+	IMG_CPU_PHYADDR sCpuPAddr;
+
+	PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr);
+	*pvPtr = OSMapPhysToLin(sCpuPAddr,
+							uiSize,
+							ulFlags);
+
+	if (*pvPtr == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+static IMG_VOID _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_SIZE_T uiSize, IMG_VOID *pvPtr)
+{
+	OSUnMapPhysToLin(pvPtr, uiSize, 0);
+}
+
+static PVRSRV_ERROR
+_PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+			 IMG_DEV_PHYADDR *psDevPAddr,
+			 IMG_UINT32 uiAllocSize,
+			 const IMG_CHAR *pacPoisonData,
+			 IMG_SIZE_T uiPoisonSize)
+{
+	IMG_UINT32 uiSrcByteIndex;
+	IMG_UINT32 uiDestByteIndex;
+	IMG_VOID *pvKernLin = IMG_NULL;
+	IMG_CHAR *pcDest = IMG_NULL;
+
+	PVRSRV_ERROR eError;
+
+	eError = _MapAlloc(psDevNode, psDevPAddr, uiAllocSize, &pvKernLin, 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto map_failed;
+	}
+	pcDest = pvKernLin;
+
+	uiSrcByteIndex = 0;
+	for(uiDestByteIndex=0; uiDestByteIndex<uiAllocSize; uiDestByteIndex++)
+	{
+		pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+		uiSrcByteIndex++;
+		if (uiSrcByteIndex == uiPoisonSize)
+		{
+			uiSrcByteIndex = 0;
+		}
+	}
+
+	_UnMapAlloc(psDevNode, uiAllocSize, pvKernLin);
+	return PVRSRV_OK;
+
+map_failed:
+	PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
+	return eError;
+}
+
+static PVRSRV_ERROR
+_ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+		   IMG_DEV_PHYADDR *psDevPAddr,
+		   IMG_UINT32 uiAllocSize)
+{
+	IMG_VOID *pvKernLin = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	eError = _MapAlloc(psDevNode, psDevPAddr, uiAllocSize, &pvKernLin, 0);
+	if (eError != PVRSRV_OK)
+	{
+		goto map_failed;
+	}
+
+	OSMemSet(pvKernLin, 0, uiAllocSize);
+
+	_UnMapAlloc(psDevNode, uiAllocSize, pvKernLin);
+	return PVRSRV_OK;
+
+map_failed:
+	PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+	return eError;
+}
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static PVRSRV_ERROR
+_AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+			  PMR_SIZE_T uiSize,
+			  PMR_SIZE_T uiChunkSize,
+			  IMG_UINT32 ui32NumPhysChunks,
+			  IMG_UINT32 ui32NumVirtChunks,
+			  IMG_BOOL *pabMappingTable,
+			  IMG_UINT32 uiLog2PageSize,
+			  IMG_BOOL bZero,
+			  IMG_BOOL bPoisonOnAlloc,
+			  IMG_BOOL bPoisonOnFree,
+			  IMG_BOOL bContig,
+			  IMG_BOOL bOnDemand,
+			  PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr
+			  )
+{
+	PMR_LMALLOCARRAY_DATA *psPageArrayData = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(!bZero || !bPoisonOnAlloc);
+
+	if (uiSize >= 0x1000000000ULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "physmem_lma.c: Do you really want 64GB of physical memory in one go?  This is likely a bug"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errorOnParam;
+	}
+
+	PVR_ASSERT(OSGetPageShift() <= uiLog2PageSize);
+
+	if ((uiSize & ((1ULL << uiLog2PageSize) - 1)) != 0)
+	{
+		eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+		goto errorOnParam;
+	}
+
+	psPageArrayData = OSAllocMem(sizeof(PMR_LMALLOCARRAY_DATA));
+	if (psPageArrayData == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocArray;
+	}
+	OSMemSet(psPageArrayData, 0, sizeof(PMR_LMALLOCARRAY_DATA));
+
+	if (bContig)
+	{
+		/*
+			Some allocations require kernel mappings in which case in order
+			to be virtually contiguous we also have to be physically contiguous.
+		*/
+		psPageArrayData->uiNumAllocs = 1;
+		psPageArrayData->uiAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
+		psPageArrayData->uiLog2AllocSize = uiLog2PageSize;
+	}
+	else
+	{
+		IMG_UINT32 uiNumPages;
+
+		/* Use of cast below is justified by the assertion that follows to
+		prove that no significant bits have been truncated */
+		uiNumPages = (IMG_UINT32)(((uiSize-1)>>uiLog2PageSize) + 1);
+		PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2PageSize) == uiSize);
+
+		psPageArrayData->uiNumAllocs = uiNumPages;
+		psPageArrayData->uiAllocSize = 1 << uiLog2PageSize;
+		psPageArrayData->uiLog2AllocSize = uiLog2PageSize;
+	}
+	psPageArrayData->psDevNode = psDevNode;
+	psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR)*
+												psPageArrayData->uiNumAllocs);
+	if (psPageArrayData->pasDevPAddr == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto errorOnAllocAddr;
+	}
+	OSMemSet(psPageArrayData->pasDevPAddr, 0, sizeof(IMG_DEV_PHYADDR)*
+												psPageArrayData->uiNumAllocs);
+
+	/* N.B.  We have a window of opportunity where a failure in
+	   createPMR the finalize function can be called before the PMR
+	   MALLOC and thus the hPDumpAllocInfo won't be set.  So we have
+	   to conditionally call the PDumpFree function. */
+	psPageArrayData->bPDumpMalloced = IMG_FALSE;
+
+	psPageArrayData->bZeroOnAlloc = bZero;
+	psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+	psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ 	psPageArrayData->bHasLMPages = IMG_FALSE;
+ 	psPageArrayData->bOnDemand = bOnDemand;
+
+	*ppsPageArrayDataPtr = psPageArrayData;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow:
+	*/
+
+errorOnAllocAddr:
+	OSFreeMem(psPageArrayData);
+
+errorOnAllocArray:
+errorOnParam:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayDataPtr)
+{
+	IMG_UINT32 uiAllocSize = psPageArrayDataPtr->uiAllocSize;
+	IMG_UINT32 uiLog2AllocSize = psPageArrayDataPtr->uiLog2AllocSize;
+	PVRSRV_DEVICE_NODE *psDevNode = psPageArrayDataPtr->psDevNode;
+	IMG_BOOL bPoisonOnAlloc =  psPageArrayDataPtr->bPoisonOnAlloc;
+	IMG_BOOL bZeroOnAlloc =  psPageArrayDataPtr->bZeroOnAlloc;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bAllocResult;
+	RA_BASE_T uiCardAddr;
+	RA_LENGTH_T uiActualSize;
+	IMG_UINT32 i;
+	RA_ARENA *pArena=psDevNode->psLocalDevMemArena;
+
+	PVR_ASSERT(!psPageArrayDataPtr->bHasLMPages);
+
+	for(i=0;i<psPageArrayDataPtr->uiNumAllocs;i++)
+	{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		IMG_UINT32  ui32OSid=0, ui32OSidReg=0;
+		IMG_PID     pId;
+
+		pId=OSGetCurrentProcessID();
+		RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg);
+
+		pArena=psDevNode->psOSidSubArena[ui32OSid];
+		PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Giving from OS slot %d",ui32OSid));
+}
+#endif
+
+		bAllocResult = RA_Alloc(pArena,
+								uiAllocSize,
+								0,                                      /* No flags */
+								1ULL << uiLog2AllocSize,
+								&uiCardAddr,
+								&uiActualSize,
+								IMG_NULL);                      /* No private handle */
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Address: %llu \n",uiCardAddr));
+}
+#endif
+
+		if (!bAllocResult)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto errorOnRAAlloc;
+		}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		/* Allocation is done a page at a time */
+		PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize);
+#else
+		{
+			IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+			sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+									 IMG_NULL,
+									 sLocalCpuPAddr,
+									 uiActualSize,
+									 IMG_NULL);
+		}
+#endif
+#endif
+
+		psPageArrayDataPtr->pasDevPAddr[i].uiAddr = uiCardAddr;
+
+		if (bPoisonOnAlloc)
+		{
+			eError = _PoisonAlloc(psDevNode,
+								  &psPageArrayDataPtr->pasDevPAddr[i],
+								  uiAllocSize,
+								  _AllocPoison,
+								  _AllocPoisonSize);
+			if (eError !=PVRSRV_OK)
+			{
+				goto errorOnPoison;
+			}
+		}
+
+		if (bZeroOnAlloc)
+		{
+			eError = _ZeroAlloc(psDevNode,
+								&psPageArrayDataPtr->pasDevPAddr[i],
+								uiAllocSize);
+			if (eError !=PVRSRV_OK)
+			{
+				goto errorOnZero;
+			}
+		}
+	}
+
+	psPageArrayDataPtr->bHasLMPages = IMG_TRUE;
+
+	return PVRSRV_OK;
+
+	/*
+	  error exit paths follow:
+	*/
+errorOnZero:
+errorOnPoison:
+errorOnRAAlloc:
+	while (i)
+	{
+		i--;
+		RA_Free(psDevNode->psLocalDevMemArena,
+				psPageArrayDataPtr->pasDevPAddr[i].uiAddr);
+	}
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+	OSFreeMem(psPageArrayData->pasDevPAddr);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_lma.c: freed local memory array structure for PMR @0x%p", psPageArrayData));
+
+	OSFreeMem(psPageArrayData);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+	IMG_UINT32 uiAllocSize;
+	IMG_UINT32 i;
+
+	PVR_ASSERT(psPageArrayData->bHasLMPages);
+
+	uiAllocSize = psPageArrayData->uiAllocSize;
+
+	for (i = 0;i < psPageArrayData->uiNumAllocs;i++)
+	{
+		if (psPageArrayData->bPoisonOnFree)
+		{
+			_PoisonAlloc(psPageArrayData->psDevNode,
+						 &psPageArrayData->pasDevPAddr[i],
+						 uiAllocSize,
+						 _FreePoison,
+						 _FreePoisonSize);
+		}
+		RA_Free(psPageArrayData->psDevNode->psLocalDevMemArena,
+				psPageArrayData->pasDevPAddr[i].uiAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		/* Allocation is done a page at a time */
+		PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiAllocSize);
+#else
+        {
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, psPageArrayData->pasDevPAddr[i].uiAddr);
+        }
+#endif
+#endif
+	}
+
+	psPageArrayData->bHasLMPages = IMG_FALSE;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_lma.c: freed local memory for PMR @0x%p", psPageArrayData));
+
+	return PVRSRV_OK;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+   before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv
+				 )
+{
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = IMG_NULL;
+
+	psLMAllocArrayData = pvPriv;
+
+	/* Conditionally do the PDump free, because if CreatePMR failed we
+	   won't have done the PDump MALLOC.  */
+	if (psLMAllocArrayData->bPDumpMalloced)
+	{
+		PDumpPMRFree(psLMAllocArrayData->hPDumpAllocInfo);
+	}
+
+	/*  We can't free pages until now. */
+	if (psLMAllocArrayData->bHasLMPages)
+	{
+		eError = _FreeLMPages(psLMAllocArrayData);
+		PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+
+	eError = _FreeLMPageArray(psLMAllocArrayData);
+	PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+
+	return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+   As we are LMA there is nothing to do as we control physical memory. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+							 IMG_UINT32 uiLog2DevPageSize)
+{
+
+    PVRSRV_ERROR eError;
+    PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+    psLMAllocArrayData = pvPriv;
+
+    if (psLMAllocArrayData->bOnDemand)
+    {
+		/* Allocate Memory for deferred allocation */
+    	eError = _AllocLMPages(psLMAllocArrayData);
+    	if (eError != PVRSRV_OK)
+    	{
+    		return eError;
+    	}
+    }
+
+	PVR_UNREFERENCED_PARAMETER(uiLog2DevPageSize);
+
+	return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv
+							   )
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+    psLMAllocArrayData = pvPriv;
+
+	if (psLMAllocArrayData->bOnDemand)
+    {
+		/* Free Memory for deferred allocation */
+    	eError = _FreeLMPages(psLMAllocArrayData);
+    	if (eError != PVRSRV_OK)
+    	{
+    		return eError;
+    	}
+    }
+
+	PVR_ASSERT(eError == PVRSRV_OK);
+	return eError;
+}
+
+/* N.B.  It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+					   IMG_UINT32 ui32NumOfPages,
+					   IMG_DEVMEM_OFFSET_T *puiOffset,
+					   IMG_BOOL *pbValid,
+					   IMG_DEV_PHYADDR *psDevPAddr)
+{
+	IMG_UINT32 idx;
+	IMG_UINT32 uiLog2AllocSize;
+	IMG_UINT32 uiNumAllocs;
+	IMG_UINT64 uiAllocIndex;
+	IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+
+	uiNumAllocs = psLMAllocArrayData->uiNumAllocs;
+	if (uiNumAllocs > 1)
+	{
+		PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+		uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			if (pbValid[idx])
+			{
+				uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+				uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+				PVR_ASSERT(uiAllocIndex < uiNumAllocs);
+				PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+				psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+			}
+		}
+	}
+	else
+	{
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			if (pbValid[idx])
+			{
+				psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+								 IMG_SIZE_T uiOffset,
+								 IMG_SIZE_T uiSize,
+								 IMG_VOID **ppvKernelAddressOut,
+								 IMG_HANDLE *phHandleOut,
+								 PMR_FLAGS_T ulFlags)
+{
+	PVRSRV_ERROR eError;
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = IMG_NULL;
+	IMG_VOID *pvKernLinAddr = IMG_NULL;
+	IMG_UINT32 ui32PageIndex = 0;
+
+	PVR_UNREFERENCED_PARAMETER(ulFlags);
+
+	psLMAllocArrayData = pvPriv;
+
+	/* Check that we can map this in contiguously */
+	if (psLMAllocArrayData->uiNumAllocs != 1)
+	{
+		IMG_SIZE_T uiStart = uiOffset;
+		IMG_SIZE_T uiEnd = uiOffset + uiSize - 1;
+		IMG_SIZE_T uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+
+		/* We can still map if only one page is required */
+		if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+		{
+			eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+			goto e0;
+		}
+
+		/* Locate the desired physical page to map in */
+		ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+	}
+
+	PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiNumAllocs);
+
+	eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+						&psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
+						psLMAllocArrayData->uiAllocSize,
+						&pvKernLinAddr, 
+						ulFlags);
+
+	*ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & ((1U << psLMAllocArrayData->uiLog2AllocSize) - 1));
+	*phHandleOut = pvKernLinAddr;
+
+	return eError;
+
+	/*
+	  error exit paths follow
+	*/
+
+ e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static IMG_VOID PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+												 IMG_HANDLE hHandle)
+{
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = IMG_NULL;
+	IMG_VOID *pvKernLinAddr = IMG_NULL;
+
+	psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+	pvKernLinAddr = (IMG_VOID *) hHandle;
+
+	_UnMapAlloc(psLMAllocArrayData->psDevNode,
+				psLMAllocArrayData->uiAllocSize, pvKernLinAddr);
+}
+
+
+static PVRSRV_ERROR
+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_DEVMEM_OFFSET_T uiOffset,
+				  IMG_UINT8 *pcBuffer,
+				  IMG_SIZE_T uiBufSz,
+				  IMG_SIZE_T *puiNumBytes,
+				  IMG_VOID (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
+										   IMG_UINT8 *pcPMR,
+										   IMG_SIZE_T uiSize))
+{
+	PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = IMG_NULL;
+	IMG_SIZE_T uiBytesCopied;
+	IMG_SIZE_T uiBytesToCopy;
+	IMG_SIZE_T uiBytesCopyableFromAlloc;
+	IMG_VOID *pvMapping = IMG_NULL;
+	IMG_UINT8 *pcKernelPointer = IMG_NULL;
+	IMG_SIZE_T uiBufferOffset;
+	IMG_UINT64 uiAllocIndex;
+	IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+	PVRSRV_ERROR eError;
+
+	psLMAllocArrayData = pvPriv;
+
+	uiBytesCopied = 0;
+	uiBytesToCopy = uiBufSz;
+	uiBufferOffset = 0;
+
+	if (psLMAllocArrayData->uiNumAllocs > 1)
+	{
+		while (uiBytesToCopy > 0)
+		{
+			/* we have to map one alloc in at a time */
+			PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+			uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+			uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+			uiBytesCopyableFromAlloc = uiBytesToCopy;
+			if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+			{
+				uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+			}
+
+			PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
+			PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiNumAllocs);
+			PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
+
+			eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+								&psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
+								psLMAllocArrayData->uiAllocSize,
+								&pvMapping, 0);
+			if (eError != PVRSRV_OK)
+			{
+				goto e0;
+			}
+			pcKernelPointer = pvMapping;
+			pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
+			_UnMapAlloc(psLMAllocArrayData->psDevNode, psLMAllocArrayData->uiAllocSize, pvMapping);
+			uiBufferOffset += uiBytesCopyableFromAlloc;
+			uiBytesToCopy -= uiBytesCopyableFromAlloc;
+			uiOffset += uiBytesCopyableFromAlloc;
+			uiBytesCopied += uiBytesCopyableFromAlloc;
+		}
+	}
+	else
+	{
+			PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiAllocSize);
+			PVR_ASSERT(psLMAllocArrayData->uiAllocSize != 0);
+			eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+								&psLMAllocArrayData->pasDevPAddr[0],
+								psLMAllocArrayData->uiAllocSize,
+								&pvMapping, 0);
+			if (eError != PVRSRV_OK)
+			{
+				goto e0;
+			}
+			pcKernelPointer = pvMapping;
+			pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+			_UnMapAlloc(psLMAllocArrayData->psDevNode, psLMAllocArrayData->uiAllocSize, pvMapping);
+			uiBytesCopied = uiBufSz;
+	}
+	*puiNumBytes = uiBytesCopied;
+	return PVRSRV_OK;
+e0:
+	*puiNumBytes = uiBytesCopied;
+	return eError;
+}
+
+static IMG_VOID ReadLocalMem(IMG_UINT8 *pcBuffer,
+							 IMG_UINT8 *pcPMR,
+							 IMG_SIZE_T uiSize)
+{
+	OSMemCopy(pcBuffer, pcPMR, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_DEVMEM_OFFSET_T uiOffset,
+				  IMG_UINT8 *pcBuffer,
+				  IMG_SIZE_T uiBufSz,
+				  IMG_SIZE_T *puiNumBytes)
+{
+	return CopyBytesLocalMem(pvPriv,
+							 uiOffset,
+							 pcBuffer,
+							 uiBufSz,
+							 puiNumBytes,
+							 ReadLocalMem);
+}
+
+static IMG_VOID WriteLocalMem(IMG_UINT8 *pcBuffer,
+							  IMG_UINT8 *pcPMR,
+							  IMG_SIZE_T uiSize)
+{
+	OSMemCopy(pcPMR, pcBuffer, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+					  IMG_DEVMEM_OFFSET_T uiOffset,
+					  IMG_UINT8 *pcBuffer,
+					  IMG_SIZE_T uiBufSz,
+					  IMG_SIZE_T *puiNumBytes)
+{
+	return CopyBytesLocalMem(pvPriv,
+							 uiOffset,
+							 pcBuffer,
+							 uiBufSz,
+							 puiNumBytes,
+							 WriteLocalMem);
+}
+
+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
+	/* pfnLockPhysAddresses */
+	&PMRLockSysPhysAddressesLocalMem,
+	/* pfnUnlockPhysAddresses */
+	&PMRUnlockSysPhysAddressesLocalMem,
+	/* pfnDevPhysAddr */
+	&PMRSysPhysAddrLocalMem,
+	/* pfnPDumpSymbolicAddr */
+	IMG_NULL,
+	/* pfnAcquireKernelMappingData */
+	&PMRAcquireKernelMappingDataLocalMem,
+	/* pfnReleaseKernelMappingData */
+	&PMRReleaseKernelMappingDataLocalMem,
+	/* pfnReadBytes */
+	&PMRReadBytesLocalMem,
+	/* pfnWriteBytes */
+	&PMRWriteBytesLocalMem,
+	/* pfnFinalize */
+	&PMRFinalizeLocalMem
+};
+
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_BOOL *pabMappingTable,
+							IMG_UINT32 uiLog2PageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							PMR **ppsPMRPtr)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+	PMR *psPMR = IMG_NULL;
+	PMR_LMALLOCARRAY_DATA *psPrivData = IMG_NULL;
+	IMG_HANDLE hPDumpAllocInfo = IMG_NULL;
+	PMR_FLAGS_T uiPMRFlags;
+	IMG_BOOL bZero;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	IMG_BOOL bOnDemand = ((uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) > 0);
+	IMG_BOOL bContig;
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+	{
+		bZero = IMG_TRUE;
+	}
+	else
+	{
+		bZero = IMG_FALSE;
+	}
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
+	{
+		bPoisonOnAlloc = IMG_TRUE;
+	}
+	else
+	{
+		bPoisonOnAlloc = IMG_FALSE;
+	}
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+	{
+		bPoisonOnFree = IMG_TRUE;
+	}
+	else
+	{
+		bPoisonOnFree = IMG_FALSE;
+	}
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE)
+	{
+		bContig = IMG_TRUE;
+	}
+	else
+	{
+		bContig = IMG_FALSE;
+	}
+
+	if ((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+		(uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+	{
+		/* Zero on Alloc and Poison on Alloc are mutually exclusive */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto errorOnParam;
+	}
+
+	/* Silently round up alignment/pagesize if request was less that
+	   PAGE_SHIFT, because it would never be harmful for memory to be
+	   _more_ contiguous that was desired */
+
+	uiLog2PageSize = OSGetPageShift() > uiLog2PageSize
+		? OSGetPageShift()
+		: uiLog2PageSize;
+
+	/* Create Array structure that holds the physical pages */
+	eError = _AllocLMPageArray(psDevNode,
+						   uiChunkSize * ui32NumPhysChunks,
+						   uiChunkSize,
+                           ui32NumPhysChunks,
+                           ui32NumVirtChunks,
+                           pabMappingTable,
+						   uiLog2PageSize,
+						   bZero,
+						   bPoisonOnAlloc,
+						   bPoisonOnFree,
+						   bContig,
+						   bOnDemand,
+						   &psPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnAllocPageArray;
+	}
+
+
+	if (!bOnDemand)
+	{
+		/* Allocate the physical pages */
+		eError = _AllocLMPages(psPrivData);
+		if (eError != PVRSRV_OK)
+		{
+			goto errorOnAllocPages;
+		}
+	}
+
+	/* In this instance, we simply pass flags straight through.
+
+	   Generically, uiFlags can include things that control the PMR
+	   factory, but we don't need any such thing (at the time of
+	   writing!), and our caller specifies all PMR flags so we don't
+	   need to meddle with what was given to us.
+	*/
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+	/* check no significant bits were lost in cast due to different
+	   bit widths for flags */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+    if (bOnDemand)
+    {
+    	PDUMPCOMMENT("Deferred Allocation PMR (LMA)");
+    }
+	eError = PMRCreatePMR(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+						  uiSize,
+                          uiChunkSize,
+                          ui32NumPhysChunks,
+                          ui32NumVirtChunks,
+                          pabMappingTable,
+						  uiLog2PageSize,
+						  uiPMRFlags,
+						  "PMRLMA",
+						  &_sPMRLMAFuncTab,
+						  psPrivData,
+						  &psPMR,
+						  &hPDumpAllocInfo,
+						  IMG_FALSE);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnCreate;
+	}
+
+	psPrivData->hPDumpAllocInfo = hPDumpAllocInfo;
+	psPrivData->bPDumpMalloced = IMG_TRUE;
+
+	*ppsPMRPtr = psPMR;
+	return PVRSRV_OK;
+
+errorOnCreate:
+	if(!bOnDemand)
+	{
+		eError2 = _FreeLMPages(psPrivData);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+errorOnAllocPages:
+	eError2 = _FreeLMPageArray(psPrivData);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+
+struct PidOSidCouplingList
+{
+	IMG_PID     pId;
+	IMG_UINT32  ui32OSid;
+	IMG_UINT32	ui32OSidReg;
+
+	struct PidOSidCouplingList *psNext;
+};
+typedef struct PidOSidCouplingList PidOSidCouplingList;
+
+static PidOSidCouplingList *psPidOSidHead=NULL;
+static PidOSidCouplingList *psPidOSidTail=NULL;
+
+IMG_VOID InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg)
+{
+	PidOSidCouplingList *psTmp;
+
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Inserting (PID/ OSid/ OSidReg) (%d/ %d/ %d) into list",pId,ui32OSid, ui32OSidReg));
+
+	psTmp=OSAllocMem(sizeof(PidOSidCouplingList));
+
+	if (psTmp==IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"(GPU Virtualization Validation): Memory allocation failed. No list insertion => program will execute normally.\n"));
+		return ;
+	}
+
+	psTmp->pId=pId;
+	psTmp->ui32OSid=ui32OSid;
+	psTmp->ui32OSidReg=ui32OSidReg;
+
+	psTmp->psNext=NULL;
+	if (psPidOSidHead==NULL)
+	{
+		psPidOSidHead=psTmp;
+		psPidOSidTail=psTmp;
+	}
+	else
+	{
+		psPidOSidTail->psNext=psTmp;
+		psPidOSidTail=psTmp;
+	}
+
+	return ;
+}
+
+IMG_VOID RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 * pui32OSidReg)
+{
+	PidOSidCouplingList *psTmp;
+
+	for (psTmp=psPidOSidHead;psTmp!=NULL;psTmp=psTmp->psNext)
+	{
+		if (psTmp->pId==pId)
+		{
+			(*pui32OSid) = psTmp->ui32OSid;
+			(*pui32OSidReg) = psTmp->ui32OSidReg;
+
+			return ;
+		}
+	}
+
+	(*pui32OSid)=0;
+	(*pui32OSidReg)=0;
+	return ;
+}
+
+IMG_VOID    RemovePidOSidCoupling(IMG_PID pId)
+{
+	PidOSidCouplingList *psTmp, *psPrev=NULL;
+
+	for (psTmp=psPidOSidHead; psTmp!=NULL; psTmp=psTmp->psNext)
+	{
+		if (psTmp->pId==pId) break;
+		psPrev=psTmp;
+	}
+
+	if (psTmp==NULL)
+	{
+		return ;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Deleting Pairing %d / (%d - %d) from list",psTmp->pId, psTmp->ui32OSid, psTmp->ui32OSidReg));
+
+	if (psTmp==psPidOSidHead)
+	{
+		if (psPidOSidHead->psNext==NULL)
+		{
+			psPidOSidHead=NULL;
+			psPidOSidTail=NULL;
+			OSFreeMem(psTmp);
+
+			return ;
+		}
+
+		psPidOSidHead=psPidOSidHead->psNext;
+		OSFreeMem(psTmp);
+		return ;
+	}
+
+	if (psPrev==NULL) return ;
+
+	psPrev->psNext=psTmp->psNext;
+	if (psTmp==psPidOSidTail)
+	{
+		psPidOSidTail=psPrev;
+	}
+
+	OSFreeMem(psTmp);
+
+	return ;
+}
+
+#endif
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pmr.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pmr.c
new file mode 100644
index 0000000..ed310b2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pmr.c
@@ -0,0 +1,2786 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excuseable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pmr_impl.h"
+#include "pvrsrv.h"
+
+#include "allocmem.h"
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC) && defined(LINUX)
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#endif
+#include "lock.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "secure_export.h"
+#include "ossecure_export.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif 
+
+/* ourselves */
+#include "pmr.h"
+
+/* A "context" for the physical memory block resource allocator.
+
+   Context is probably the wrong word.
+
+   There is almost certainly only one of these, ever, in the system.
+   But, let's keep the notion of a context anyway, "just-in-case".
+*/
+struct _PMR_CTX_
+{
+    /* For debugging, and PDump, etc., let's issue a forever
+       incrementing serial number to each allocation. */
+    IMG_UINT64 uiNextSerialNum;
+
+    /* For security, we only allow a PMR to be mapped if the caller
+       knows its key.  We can pseudo-randomly generate keys */
+    IMG_UINT64 uiNextKey;
+
+    /* For debugging only, I guess:  Number of live PMRs */
+    IMG_UINT32 uiNumLivePMRs;
+
+	/* Lock for this structure */
+	POS_LOCK hLock;
+
+    /* In order to seed the uiNextKey, we enforce initialisation at
+       driver load time.  Also, we can debug check at driver unload
+       that the PMR count is zero. */
+  IMG_BOOL bModuleInitialised;
+} _gsSingletonPMRContext = { 1, 0, 0, IMG_NULL, IMG_FALSE };
+
+
+typedef struct _PMR_MAPPING_TABLE_
+{
+	PMR_SIZE_T	uiChunkSize;			/*!< Size of a "chunk" */
+	IMG_UINT32 	ui32NumPhysChunks;		/*!< Number of physical chunks that are valid */
+	IMG_UINT32 	ui32NumVirtChunks;		/*!< Number of virtual chunks in the mapping */
+	/* Must be last */
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+	IMG_UINT32 	*aui32Translation;      /*!< Translation mapping for "logical" to physical */
+#else
+	IMG_UINT32 	aui32Translation[1];    /*!< Translation mapping for "logical" to physical */
+#endif
+} PMR_MAPPING_TABLE;
+
+#define TRANSLATION_INVALID 0xFFFFFFFFL
+
+/* A PMR. One per physical allocation.  May be "shared".
+
+   "shared" is ambiguous.  We need to be careful with terminology.
+   There are two ways in which a PMR may be "shared" and we need to be
+   sure that we are clear which we mean.
+
+   i)   multiple small allocations living together inside one PMR;
+
+   ii)  one single allocation filling a PMR but mapped into multiple
+        memory contexts.
+
+   This is more important further up the stack - at this level, all we
+   care is that the PMR is being referenced multiple times.
+*/
+struct _PMR_
+{
+    /* This object is strictly refcounted.  References include:
+       - mapping
+       - live handles (to this object)
+       - live export handles
+       (thus it is normal for allocated and exported memory to have a refcount of 3)
+       The object is destroyed when and only when the refcount reaches 0
+    */
+    /*
+       Physical address translation (device <> cpu) is done on a per device
+       basis which means we need the physcial heap info
+    */
+    PHYS_HEAP *psPhysHeap;
+
+    IMG_UINT32 uiRefCount;
+
+    /* lock count - this is the number of times
+       PMRLockSysPhysAddresses() has been called, less the number of
+       PMRUnlockSysPhysAddresses() calls.  This is arguably here for
+       debug reasons only, as the refcount is already incremented as a
+       matter of course.  Really, this just allows us to trap protocol
+       errors: i.e. calling PMRSysPhysAddr(),
+       without a lock, or calling PMRUnlockSysPhysAddresses() too many
+       or too few times. */
+    IMG_UINT32 uiLockCount;
+
+	/* Lock for this structure */
+	POS_LOCK hLock;
+
+    /* Incrementing serial number to each allocation. */
+    IMG_UINT64 uiSerialNum;
+
+    /* For security, we only allow a PMR to be mapped if the caller
+       knows its key.  We can pseudo-randomly generate keys */
+    PMR_PASSWORD_T uiKey;
+
+    /* Callbacks for per-flavour functions */
+    const PMR_IMPL_FUNCTAB *psFuncTab;
+
+    /* Data associated with the "subtype" */
+    PMR_IMPL_PRIVDATA pvFlavourData;
+
+    /* And for pdump */
+    const IMG_CHAR *pszPDumpDefaultMemspaceName;
+    const IMG_CHAR *pszPDumpFlavour;
+
+    /* Logical size of allocation.  "logical", because a PMR can
+       represent memory that will never physically exist.  This is the
+       amount of virtual space that the PMR would consume when it's
+       mapped into a virtual allocation. */
+    PMR_SIZE_T uiLogicalSize;
+
+	/* Mapping table for the allocation.
+	   PMR's can be sparse in which case not all the "logic" addresses
+	   in it are valid. We need to know which addresses are and aren't
+	   valid when mapping or reading the PMR.
+	   The mapping table translates "logical" offsets into physical
+	   offsets which is what we always pass to the PMR factory
+	   (so it doesn't have to be concerned about sparseness issues) */
+    PMR_MAPPING_TABLE *psMappingTable;
+
+    /* Minimum Physical Contiguity Guarantee.  Might be called "page
+       size", but that would be incorrect, as page size is something
+       meaningful only in virtual realm.  This contiguity guarantee
+       provides an inequality that can be verified/asserted/whatever
+       to ensure that this PMR conforms to the page size requirement
+       of the place the PMR gets mapped.  (May be used to select an
+       appropriate heap in variable page size systems)
+
+       The absolutely necessary condition is this:
+
+       device MMU page size <= actual physical contiguity.
+
+       We go one step further in order to be able to provide an early warning / early compatibility check and say this:
+
+       device MMU page size <= 2**(uiLog2ContiguityGuarantee) <= actual physical contiguity.
+
+       In this way, it is possible to make the page table reservation
+       in the device MMU without even knowing the granularity of the
+       physical memory (i.e. useful for being able to allocate virtual
+       before physical)
+    */
+    PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+
+    /* Flags.  We store a copy of the "PMR flags" (usually a subset of
+       the flags given at allocation time) and return them to any
+       caller of PMR_Flags().  The intention of these flags is that
+       the ones stored here are used to represent permissions, such
+       that noone is able to map a PMR in a mode in which they are not
+       allowed, e.g. writeable for a read-only PMR, etc. */
+    PMR_FLAGS_T uiFlags;
+
+    /* Do we really need this? For now we'll keep it, until we know we don't. */
+    /* NB: this is not the "memory context" in client terms - this is
+       _purely_ the "PMR" context, of which there is almost certainly only
+       ever one per system as a whole, but we'll keep the concept
+       anyway, just-in-case. */
+    struct _PMR_CTX_ *psContext;
+
+#if defined(PVR_RI_DEBUG)
+    /*
+	 * Stored handle to PMR RI entry
+	 */
+	IMG_PVOID	hRIHandle;
+#endif
+
+	/* Whether PDumping of this PMR must be persistent
+	 * (i.e. it must be present in every future PDump stream as well)
+	 */
+	IMG_BOOL	bForcePersistent;
+};
+
+/* do we need a struct for the export handle?  I'll use one for now, but if nothing goes in it, we'll lose it */
+struct _PMR_EXPORT_
+{
+    struct _PMR_ *psPMR;
+};
+
+struct _PMR_PAGELIST_
+{
+	struct _PMR_ *psReferencePMR;
+};
+
+/*
+ * This Lock is used to protect the sequence of operation used in MMapPMR and in
+ * the memory management bridge. This should make possible avoid the use of the bridge
+ * lock in mmap.c avoiding regressions.
+ */
+
+/* this structure tracks the current owner of the PMR lock, avoiding use of
+ * the Linux (struct mutex).owner field which is not guaranteed to be up to date.
+ * there is Linux-specific code to provide an opimised approach for Linux,
+ * using the kernel (struct task_struct *) instead of a PID/TID combination.
+ */
+typedef struct _PMR_LOCK_OWNER_
+{
+#if defined(LINUX)
+	struct task_struct *task;
+#else
+	POS_LOCK hPIDTIDLock;
+	IMG_PID uiPID;
+	IMG_UINTPTR_T uiTID;
+#endif
+} PMR_LOCK_OWNER;
+
+POS_LOCK gGlobalLookupPMRLock;
+static PMR_LOCK_OWNER gsPMRLockOwner;
+
+static IMG_VOID _SetPMRLockOwner(IMG_VOID)
+{
+#if defined(LINUX)
+	gsPMRLockOwner.task = current;
+#else
+	OSLockAcquire(gsPMRLockOwner.hPIDTIDLock);
+	gsPMRLockOwner.uiPID = OSGetCurrentProcessID();
+	gsPMRLockOwner.uiTID = OSGetCurrentThreadID();
+	OSLockRelease(gsPMRLockOwner.hPIDTIDLock);
+#endif
+}
+
+/* Must only be called by the thread which owns the PMR lock */
+static IMG_VOID _ClearPMRLockOwner(IMG_VOID)
+{
+#if defined(LINUX)
+	gsPMRLockOwner.task = IMG_NULL;
+#else
+	OSLockAcquire(gsPMRLockOwner.hPIDTIDLock);
+	gsPMRLockOwner.uiPID = 0;
+	gsPMRLockOwner.uiTID = 0;
+	OSLockRelease(gsPMRLockOwner.hPIDTIDLock);
+#endif
+}
+
+static IMG_BOOL _ComparePMRLockOwner(IMG_VOID)
+{
+#if defined(LINUX)
+	return gsPMRLockOwner.task == current;
+#else
+	IMG_BOOL bRet;
+
+	OSLockAcquire(gsPMRLockOwner.hPIDTIDLock);
+	bRet = (gsPMRLockOwner.uiPID == OSGetCurrentProcessID()) &&
+			(gsPMRLockOwner.uiTID == OSGetCurrentThreadID());
+	OSLockRelease(gsPMRLockOwner.hPIDTIDLock);
+	return bRet;
+#endif
+}
+
+IMG_VOID PMRLock()
+{
+	OSLockAcquire(gGlobalLookupPMRLock);
+	_SetPMRLockOwner();
+}
+
+IMG_VOID PMRUnlock()
+{
+	_ClearPMRLockOwner();
+	OSLockRelease(gGlobalLookupPMRLock);
+}
+
+IMG_BOOL PMRIsLocked(void)
+{
+	return OSLockIsLocked(gGlobalLookupPMRLock);
+}
+
+
+IMG_BOOL PMRIsLockedByMe(void)
+{
+	return PMRIsLocked() && _ComparePMRLockOwner();
+}
+
+#define MIN3(a,b,c)	(((a) < (b)) ? (((a) < (c)) ? (a):(c)) : (((b) < (c)) ? (b):(c)))
+
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+#if defined(LINUX)
+static INLINE IMG_BOOL _IsVmallocAddr(const IMG_VOID *pvAddr)
+{
+	unsigned long lAddr = (unsigned long) pvAddr;
+	return (lAddr >= VMALLOC_START) && (lAddr < VMALLOC_END);
+}
+#endif
+
+static INLINE IMG_VOID *_AllocMem(const IMG_SIZE_T size)
+{
+#if defined(LINUX)
+	if (size > OSGetPageSize())
+		return vmalloc(size);
+	else
+		return OSAllocMem(size);
+#else
+	return OSAllocMem(size);
+#endif
+}
+
+static INLINE IMG_VOID _FreeMem(IMG_VOID *pvAddr)
+{
+#if defined(LINUX)
+	if (_IsVmallocAddr(pvAddr))
+		vfree(pvAddr);
+	else
+		OSFreeMem(pvAddr);
+#else
+	OSFreeMem(pvAddr);
+#endif
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRCreate(PMR_SIZE_T uiLogicalSize,
+           PMR_SIZE_T uiChunkSize,
+           IMG_UINT32 ui32NumPhysChunks,
+           IMG_UINT32 ui32NumVirtChunks,
+           IMG_BOOL *pabMappingTable,
+           PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+           PMR_FLAGS_T uiFlags,
+           PMR **ppsPMR)
+{
+    IMG_VOID *pvPMRLinAddr;
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+    IMG_VOID *pvMapLinAddr;
+#endif
+    PMR *psPMR;
+    PMR_MAPPING_TABLE *psMappingTable;
+    struct _PMR_CTX_ *psContext;
+    IMG_UINT32 i;
+    IMG_UINT32 ui32ValidCount = 0;
+    IMG_UINT32 ui32Remainder;
+    PVRSRV_ERROR eError;
+    IMG_UINT32 ui32PhysIndex = 0;
+
+    psContext = &_gsSingletonPMRContext;
+
+
+	/* Extra checks required for sparse PMRs */
+	if (uiLogicalSize != uiChunkSize)
+	{
+		/* Check the logical size and chunk information agree with each other */
+		if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)",
+					__FUNCTION__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
+			return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+		}
+
+		/* Check that the chunk size is a multiple of the contiguity */
+		OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
+		if (ui32Remainder)
+		{
+			return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
+		}
+
+		/* Check the mapping table */
+		for (i = 0; i<ui32NumVirtChunks;i++)
+		{
+			if (pabMappingTable[i])
+			{
+				ui32ValidCount++;
+			}
+		}
+	
+		if (ui32ValidCount != ui32NumPhysChunks)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Mismatch in mapping table, expecting %d valid entries but found %d",
+					 __FUNCTION__,
+					 ui32NumPhysChunks,
+					 ui32ValidCount));
+			return PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH;
+		}
+	}
+
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+	pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable));
+#else
+	pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
+#endif
+	if (pvPMRLinAddr == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+	pvMapLinAddr = _AllocMem(sizeof(IMG_UINT32) * ui32NumVirtChunks);
+	if (pvMapLinAddr == IMG_NULL)
+	{
+		OSFreeMem(pvPMRLinAddr);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psPMR = (PMR *) pvPMRLinAddr;
+	psMappingTable = (PMR_MAPPING_TABLE *) (((IMG_CHAR *) pvPMRLinAddr) + sizeof(*psPMR));
+	psMappingTable->aui32Translation = (IMG_UINT32 *) pvMapLinAddr;
+#else
+	psPMR = (PMR *) pvPMRLinAddr;
+	psMappingTable = (PMR_MAPPING_TABLE *) (((IMG_CHAR *) pvPMRLinAddr) + sizeof(*psPMR));
+#endif
+
+	eError = OSLockCreate(&psPMR->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+		_FreeMem(psMappingTable->aui32Translation);
+#endif
+		OSFreeMem(psPMR);
+		return eError;
+	}
+
+	/* Setup the mapping table */
+	psMappingTable->uiChunkSize = uiChunkSize;
+	psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks;
+	psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks;
+	for (i=0;i<ui32NumVirtChunks;i++)
+	{
+		if (pabMappingTable[i])
+		{
+			psMappingTable->aui32Translation[i] = ui32PhysIndex++;
+		}
+		else
+		{
+			psMappingTable->aui32Translation[i] = TRANSLATION_INVALID;
+		}
+	}
+
+	/* Setup the PMR */
+	psPMR->uiRefCount = 0;
+	psPMR->uiLockCount = 0;
+	psPMR->psContext = psContext;
+	psPMR->uiLogicalSize = uiLogicalSize;
+	psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee;
+	psPMR->uiFlags = uiFlags;
+	psPMR->psMappingTable = psMappingTable;
+	psPMR->uiKey = psContext->uiNextKey;
+	psPMR->uiSerialNum = psContext->uiNextSerialNum;
+
+#if defined(PVR_RI_DEBUG)
+	psPMR->hRIHandle = IMG_NULL;
+#endif
+
+	OSLockAcquire(psContext->hLock);
+	psContext->uiNextKey = (0x80200003 * psContext->uiNextKey)
+		^ (0xf00f0081 * (IMG_UINTPTR_T)pvPMRLinAddr);
+	psContext->uiNextSerialNum ++;
+	*ppsPMR = psPMR;
+	PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR));
+	/* Increment live PMR count */
+	psContext->uiNumLivePMRs ++;
+	OSLockRelease(psContext->hLock);
+
+	return PVRSRV_OK;
+}
+
+static IMG_UINT32
+_RefNoLock(PMR *psPMR)
+{
+	psPMR->uiRefCount++;
+	return psPMR->uiRefCount;
+}
+
+static IMG_UINT32
+_UnrefNoLock(PMR *psPMR)
+{
+    PVR_ASSERT(psPMR->uiRefCount > 0);
+	psPMR->uiRefCount--;
+	return psPMR->uiRefCount;
+}
+
+static IMG_VOID
+_Ref(PMR *psPMR)
+{
+	OSLockAcquire(psPMR->hLock);
+	_RefNoLock(psPMR);
+	OSLockRelease(psPMR->hLock);
+}
+
+static IMG_VOID
+_UnrefAndMaybeDestroy(PMR *psPMR)
+{
+    PVRSRV_ERROR eError2;
+    struct _PMR_CTX_ *psCtx;
+    IMG_UINT32 uiRefCount;
+
+    PVR_ASSERT(psPMR != IMG_NULL);
+    PVR_ASSERT(psPMR->uiRefCount > 0);
+
+    OSLockAcquire(psPMR->hLock);
+	uiRefCount = _UnrefNoLock(psPMR);
+    OSLockRelease(psPMR->hLock);
+
+    if (uiRefCount == 0)
+    {
+        if (psPMR->psFuncTab->pfnFinalize != IMG_NULL)
+        {
+            eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
+            PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+        }
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+        OSLockAcquire(psPMR->hLock);
+        PVR_ASSERT(psPMR->uiLockCount == 0);
+        OSLockRelease(psPMR->hLock);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+		{
+            PVRSRV_ERROR eError;
+
+			/* Delete RI entry */
+            if (psPMR->hRIHandle)
+            {
+            	eError = RIDeletePMREntryKM (psPMR->hRIHandle);
+            }
+		}
+#endif /* if defined(PVR_RI_DEBUG) */
+		psCtx = psPMR->psContext;
+
+		OSLockDestroy(psPMR->hLock);
+#if defined(PVRSRV_SPLIT_LARGE_OSMEM_ALLOC)
+		_FreeMem(psPMR->psMappingTable->aui32Translation);
+#endif
+        OSFreeMem(psPMR);
+
+        /* Decrement live PMR count.  Probably only of interest for debugging */
+        PVR_ASSERT(psCtx->uiNumLivePMRs > 0);
+
+        OSLockAcquire(psCtx->hLock);
+        psCtx->uiNumLivePMRs --;
+        OSLockRelease(psCtx->hLock);
+    }
+}
+
+static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+{
+	if (psPMR->psMappingTable->ui32NumVirtChunks == psPMR->psMappingTable->ui32NumPhysChunks)
+	{
+		return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+PVRSRV_ERROR
+PMRCreatePMR(PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_BOOL *pabMappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszPDumpFlavour,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR **ppsPMRPtr,
+             IMG_HANDLE *phPDumpAllocInfo,
+             IMG_BOOL bForcePersistent)
+{
+    PMR *psPMR = IMG_NULL;
+    PVRSRV_ERROR eError;
+
+    eError = _PMRCreate(uiLogicalSize,
+						uiChunkSize,
+						ui32NumPhysChunks,
+						ui32NumVirtChunks,
+						pabMappingTable,
+						uiLog2ContiguityGuarantee,
+						uiFlags,
+                        &psPMR);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    psPMR->psPhysHeap = psPhysHeap;
+    psPMR->psFuncTab = psFuncTab;
+    psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap);
+    psPMR->pszPDumpFlavour = pszPDumpFlavour;
+    psPMR->pvFlavourData = pvPrivData;
+    psPMR->uiRefCount = 1;
+    psPMR->bForcePersistent = bForcePersistent;
+
+    *ppsPMRPtr = psPMR;
+
+
+	if (phPDumpAllocInfo)
+	{
+		PDumpPMRMallocPMR(psPMR,
+						  (uiChunkSize * ui32NumPhysChunks),
+						  1ULL<<uiLog2ContiguityGuarantee,
+						  bForcePersistent,
+						  phPDumpAllocInfo);
+	}
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR,
+                        IMG_UINT32 uiLog2RequiredContiguity,
+                        IMG_UINT32 ui32NestingLevel)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(psPMR != IMG_NULL);
+
+    if (uiLog2RequiredContiguity > psPMR->uiLog2ContiguityGuarantee)
+    {
+        eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+        goto e0;
+    }
+
+	OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+    /* We also count the locks as references, so that the PMR is not
+       freed while someone is using a physical address. */
+    /* "lock" here simply means incrementing the refcount.  It means
+       the refcount is multipurpose, but that's okay.  We only have to
+       promise that physical addresses are valid after this point, and
+       remain valid until the corresponding
+       PMRUnlockSysPhysAddressesOSMem() */
+    _RefNoLock(psPMR);
+
+    /* Also count locks separately from other types of references, to
+       allow for debug assertions */
+    psPMR->uiLockCount++;
+
+    /* Only call callback if lockcount transitions from 0 to 1 */
+    if (psPMR->uiLockCount == 1)
+    {
+        if (psPMR->psFuncTab->pfnLockPhysAddresses != IMG_NULL)
+        {
+            /* must always have lock and unlock in pairs! */
+            PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != IMG_NULL);
+
+            eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData,
+                                                            uiLog2RequiredContiguity);
+
+            if (eError != PVRSRV_OK)
+            {
+                goto e1;
+            }
+        }
+    }
+	OSLockRelease(psPMR->hLock);
+
+    return PVRSRV_OK;
+
+ e1:
+    psPMR->uiLockCount--;
+    _UnrefNoLock(psPMR);
+    PVR_ASSERT(psPMR->uiRefCount != 0);
+    OSLockRelease(psPMR->hLock);
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR,
+                        IMG_UINT32 uiLog2RequiredContiguity)
+{
+	return PMRLockSysPhysAddressesNested(psPMR, uiLog2RequiredContiguity, 0);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(psPMR != IMG_NULL);
+
+	OSLockAcquire(psPMR->hLock);
+	PVR_ASSERT(psPMR->uiLockCount > 0);
+	psPMR->uiLockCount--;
+
+    if (psPMR->uiLockCount == 0)
+    {
+        if (psPMR->psFuncTab->pfnUnlockPhysAddresses != IMG_NULL)
+        {
+            PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != IMG_NULL);
+
+            eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+            /* must never fail */
+            PVR_ASSERT(eError == PVRSRV_OK);
+        }
+    }
+
+    OSLockRelease(psPMR->hLock);
+
+    /* We also count the locks as references, so that the PMR is not
+       freed while someone is using a physical address. */
+    _UnrefAndMaybeDestroy(psPMR);
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExportPtr,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword)
+{
+    IMG_UINT64 uiPassword;
+    PMR_EXPORT *psPMRExport;
+
+    uiPassword = psPMR->uiKey;
+
+    psPMRExport = OSAllocMem(sizeof(*psPMRExport));
+    if (psPMRExport == IMG_NULL)
+    {
+        return PVRSRV_ERROR_OUT_OF_MEMORY;
+    }
+
+    psPMRExport->psPMR = psPMR;
+    _Ref(psPMR);
+
+    *ppsPMRExportPtr = psPMRExport;
+    *puiSize = psPMR->uiLogicalSize;
+    *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee;
+    *puiPassword = uiPassword;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRMakeServerExportClientExport(DEVMEM_EXPORTCOOKIE *psPMRExportIn,
+								PMR_EXPORT **ppsPMRExportPtr,
+								PMR_SIZE_T *puiSize,
+								PMR_LOG2ALIGN_T *puiLog2Contig,
+								PMR_PASSWORD_T *puiPassword)
+{
+	*ppsPMRExportPtr = (PMR_EXPORT *) psPMRExportIn->hPMRExportHandle;
+	*puiSize = psPMRExportIn->uiSize;
+	*puiLog2Contig = psPMRExportIn->uiLog2ContiguityGuarantee;
+	*puiPassword = psPMRExportIn->uiPMRExportPassword;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnmakeServerExportClientExport(PMR_EXPORT *psPMRExport)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMRExport);
+
+	/*
+	 * There is nothing to do here, the server will call unexport
+	 * regardless of the type of shutdown. In order to play ball
+	 * with the handle manager (where it's used) we need to pair
+	 * functions and this is PMRMakeServerExportClientExport
+	 * counterpart.
+	 */
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+    /* FIXME: probably shouldn't be assertions? */
+    PVR_ASSERT(psPMRExport != IMG_NULL);
+    PVR_ASSERT(psPMRExport->psPMR != IMG_NULL);
+    PVR_ASSERT(psPMRExport->psPMR->uiRefCount > 0);
+
+    _UnrefAndMaybeDestroy(psPMRExport->psPMR);
+
+    OSFreeMem(psPMRExport);
+
+    return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR)
+{
+    PMR *psPMR;
+
+    /* FIXME: probably shouldn't be assertions? */
+    PVR_ASSERT(psPMRExport != IMG_NULL);
+    PVR_ASSERT(psPMRExport->psPMR != IMG_NULL);
+    PVR_ASSERT(psPMRExport->psPMR->uiRefCount > 0);
+
+    psPMR = psPMRExport->psPMR;
+
+    if (psPMR->uiKey != uiPassword)
+    {
+        PVR_DPF((PVR_DBG_ERROR,
+                 "PMRImport: password given = %016llx, expected = %016llx\n",
+                 uiPassword,
+                 psPMR->uiKey));
+        return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR;
+    }
+
+    if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig)
+    {
+        return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES;
+    }
+
+    _Ref(psPMR);
+
+    *ppsPMR = psPMR;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+    _UnrefAndMaybeDestroy(psPMR);
+
+    return PVRSRV_OK;
+}
+
+/*
+	Note:
+	We pass back the PMR as it was passed in as a different handle type
+	(DEVMEM_MEM_IMPORT) and it allows us to change the import structure
+	type if we should need to embed any meta data in it.
+*/
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+				  PMR **ppsPMR,
+				  IMG_DEVMEM_SIZE_T *puiSize,
+				  IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	 _Ref(psPMR);
+
+	/* Return the PMR */
+	*ppsPMR = psPMR;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+		  IMG_UINT64 *pui64UID)
+{
+	PVR_ASSERT(psPMR != IMG_NULL);
+
+	*pui64UID = psPMR->uiSerialNum;
+
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+								PMR *psPMR,
+								IMG_SECURE_TYPE *phSecure,
+								PMR **ppsPMR,
+								CONNECTION_DATA **ppsSecureConnection)
+{
+	PVRSRV_ERROR eError;
+
+	/* We are acquiring reference to PMR here because OSSecureExport
+	 * releases bridge lock and PMR lock for a moment and we don't want PMR
+	 * to be removed by other thread in the meantime. */
+	_Ref(psPMR);
+
+	eError = OSSecureExport(psConnection,
+							(IMG_PVOID) psPMR,
+							phSecure,
+							ppsSecureConnection);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	*ppsPMR = psPMR;
+
+	return PVRSRV_OK;
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	_UnrefAndMaybeDestroy(psPMR);
+	return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR)
+{
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PMRSecureImportPMR(IMG_SECURE_TYPE hSecure,
+								PMR **ppsPMR,
+								IMG_DEVMEM_SIZE_T *puiSize,
+								IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PVRSRV_ERROR eError;
+	PMR *psPMR;
+
+	eError = OSSecureImport(hSecure, (IMG_PVOID *) &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	_Ref(psPMR);
+
+	/* Return the PMR */
+	*ppsPMR = psPMR;
+	*puiSize = psPMR->uiLogicalSize;
+	*puiAlign = 1 << psPMR->uiLog2ContiguityGuarantee;
+	return PVRSRV_OK;
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR)
+{
+	_UnrefAndMaybeDestroy(psPMR);
+	return PVRSRV_OK;
+}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+				 IMG_PVOID hRIHandle)
+{
+    PVR_ASSERT(psPMR != IMG_NULL);
+
+    psPMR->hRIHandle = hRIHandle;
+    return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRAcquireKernelMappingData(PMR *psPMR,
+                            IMG_SIZE_T uiLogicalOffset,
+                            IMG_SIZE_T uiSize,
+                            IMG_VOID **ppvKernelAddressOut,
+                            IMG_SIZE_T *puiLengthOut,
+                            IMG_HANDLE *phPrivOut,
+                            IMG_BOOL bMapSparse)
+{
+    PVRSRV_ERROR eError;
+    IMG_VOID *pvKernelAddress;
+    IMG_HANDLE hPriv;
+    PMR_FLAGS_T ulFlags;
+
+    PVR_ASSERT(psPMR != IMG_NULL);
+
+    if (_PMRIsSparse(psPMR) && !bMapSparse)
+    {
+        /* Generally we don't support mapping of sparse allocations but if there
+           is a justified need we can do that by passing IMG_TRUE in bMapSparse.
+           Although the callback is supported by the PMR it will always map
+           the physical 1:1 as sparseness issues are handled here in the core */
+        return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+    }
+
+    /* Acquire/Release functions must be overridden in pairs */
+    if (psPMR->psFuncTab->pfnAcquireKernelMappingData == IMG_NULL)
+    {
+        PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == IMG_NULL);
+
+        /* If PMR implementation does not supply this pair of
+           functions, it means they do not permit the PMR to be mapped
+           into kernel memory at all */
+        eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+        goto e0;
+    }
+    PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != IMG_NULL);
+
+    PMR_Flags(psPMR, &ulFlags);
+
+    eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                           uiLogicalOffset,
+                                                           uiSize,
+                                                           &pvKernelAddress,
+                                                           &hPriv,
+                                                           ulFlags);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    *ppvKernelAddressOut = pvKernelAddress;
+    if (uiSize == 0)
+    {
+        /* Zero size means map the whole PMR in ...*/
+        *puiLengthOut = (IMG_SIZE_T)psPMR->uiLogicalSize;
+    }
+    else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee))
+    {
+    	/* ... map in the requested pages ...*/
+		*puiLengthOut = uiSize;
+    }
+    else
+    {
+        /* ... otherwise we just map in one page */
+        *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee;
+    }
+    *phPrivOut = hPriv;
+
+    return PVRSRV_OK;
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            IMG_SIZE_T uiLogicalOffset,
+                            IMG_SIZE_T uiSize,
+                            IMG_VOID **ppvKernelAddressOut,
+                            IMG_SIZE_T *puiLengthOut,
+                            IMG_HANDLE *phPrivOut)
+{
+    return _PMRAcquireKernelMappingData(psPMR,
+                                        uiLogicalOffset,
+                                        uiSize,
+                                        ppvKernelAddressOut,
+                                        puiLengthOut,
+                                        phPrivOut,
+                                        IMG_FALSE);
+}
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  IMG_SIZE_T uiLogicalOffset,
+                                  IMG_SIZE_T uiSize,
+                                  IMG_VOID **ppvKernelAddressOut,
+                                  IMG_SIZE_T *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut)
+{
+    return _PMRAcquireKernelMappingData(psPMR,
+                                        uiLogicalOffset,
+                                        uiSize,
+                                        ppvKernelAddressOut,
+                                        puiLengthOut,
+                                        phPrivOut,
+                                        IMG_TRUE);
+}
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv)
+{
+    PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != IMG_NULL);
+    PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != IMG_NULL);
+
+    psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                  hPriv);
+
+    return PVRSRV_OK;
+}
+
+/*
+	_PMRLogicalOffsetToPhysicalOffset
+
+	Translate between the "logical" offset which the upper levels
+	provide and the physical offset which is what the PMR
+	factories works on.
+
+	As well as returning the physical offset we return the number of
+	bytes remaining till the next chunk and if this chunk is valid.
+
+	For multi-page operations, upper layers communicate their 
+	Log2PageSize else argument is redundant (set to zero). 
+*/
+
+static IMG_VOID
+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR,
+								  IMG_UINT32 ui32Log2PageSize,
+								  IMG_UINT32 ui32NumOfPages,
+								  IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+								  IMG_DEVMEM_OFFSET_T *puiPhysicalOffset,
+								  IMG_UINT32 *pui32BytesRemain,
+								  IMG_BOOL *bValid)
+{
+	PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable;
+	IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize;
+	IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset;
+	IMG_UINT64 ui64ChunkIndex;
+	IMG_UINT32 ui32Remain;
+	IMG_UINT32 idx;
+
+	/* Must be translating at least a page */
+	PVR_ASSERT(ui32NumOfPages);
+
+	if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks)
+	{
+		/* Fast path the common case, as logical and physical offsets are
+			equal we _assume_ the ui32NumOfPages span is also valid */
+		*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset);
+		puiPhysicalOffset[0] = uiOffset;
+		bValid[0] = IMG_TRUE;
+		
+		if (ui32NumOfPages > 1)
+		{
+			/* initial offset may not be page aligned, round down */
+			uiOffset &= ~(uiPageSize-1);
+			for (idx=1; idx < ui32NumOfPages; idx++)
+			{
+				uiOffset += uiPageSize;
+				puiPhysicalOffset[idx] = uiOffset;
+				bValid[idx] = IMG_TRUE;
+			}
+		}
+	}
+	else
+	{
+		for (idx=0; idx < ui32NumOfPages; idx++)
+		{
+			ui64ChunkIndex = OSDivide64r64(
+					uiOffset,
+					TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize),
+					&ui32Remain);
+
+			if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID)
+			{
+				bValid[idx] = IMG_FALSE;
+			}
+			else
+			{
+				bValid[idx] = IMG_TRUE;
+			}
+
+			if (idx == 0)
+			{
+				if (ui32Remain == 0)
+				{
+					/* Start of chunk so return the chunk size */
+					*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize);
+				}
+				else
+				{
+					*pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain);
+				}
+
+				puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) +	 ui32Remain;
+				
+				/* initial offset may not be page aligned, round down */
+				uiOffset &= ~(uiPageSize-1);
+			}
+			else
+			{
+				puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize;
+			}
+			uiOffset += uiPageSize;
+		}
+	}
+}
+
+static PVRSRV_ERROR
+_PMR_ReadBytesPhysical(PMR *psPMR,
+                       IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                       IMG_UINT8 *pcBuffer,
+                       IMG_SIZE_T uiBufSz,
+                       IMG_SIZE_T *puiNumBytes)
+{
+	PVRSRV_ERROR eError;
+
+    if (psPMR->psFuncTab->pfnReadBytes != IMG_NULL)
+    {
+        /* defer to callback if present */
+
+        eError = PMRLockSysPhysAddresses(psPMR,
+                                         psPMR->uiLog2ContiguityGuarantee);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+        eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData,
+                                                uiPhysicalOffset,
+                                                pcBuffer,
+                                                uiBufSz,
+                                                puiNumBytes);
+        PMRUnlockSysPhysAddresses(psPMR);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+    }
+    else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+    {
+        /* "default" handler for reading bytes */
+
+        IMG_HANDLE hKernelMappingHandle;
+        IMG_UINT8 *pcKernelAddress;
+        PMR_FLAGS_T ulFlags;
+
+        PMR_Flags(psPMR, &ulFlags);
+
+        eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                               (IMG_SIZE_T) uiPhysicalOffset,
+                                                               uiBufSz,
+                                                               (IMG_VOID **)&pcKernelAddress,
+                                                               &hKernelMappingHandle,
+                                                               ulFlags);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+        OSMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz);
+        *puiNumBytes = uiBufSz;
+
+        psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                      hKernelMappingHandle);
+    }
+    else
+    {
+        PVR_DPF((PVR_DBG_ERROR, "PMR_ReadBytes: can't read from this PMR"));
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        OSPanic();
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              IMG_SIZE_T uiBufSz,
+              IMG_SIZE_T *puiNumBytes)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+    IMG_SIZE_T uiBytesCopied = 0;
+
+    if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+    {
+		uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+    }
+    PVR_ASSERT(uiBufSz > 0);
+    PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+    /*
+      PMR implementations can override this.  If they don't, a
+      "default" handler uses kernel virtual mappings.  If the kernel
+      can't provide a kernel virtual mapping, this function fails
+    */
+    PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != IMG_NULL ||
+               psPMR->psFuncTab->pfnReadBytes != IMG_NULL);
+
+	while (uiBytesCopied != uiBufSz)
+	{
+		IMG_UINT32 ui32Remain;
+		IMG_SIZE_T uiBytesToCopy;
+		IMG_SIZE_T uiRead;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+										  0,
+										  1,
+										  uiLogicalOffset,
+										  &uiPhysicalOffset,
+										  &ui32Remain,
+										  &bValid);
+		/* 
+			Copy till either then end of the
+			chunk or end end of the buffer
+		*/
+		uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+		if (bValid)
+		{
+			/* Read the data from the PMR */
+			eError = _PMR_ReadBytesPhysical(psPMR,
+											uiPhysicalOffset,
+											&pcBuffer[uiBytesCopied],
+											uiBytesToCopy,
+											&uiRead);
+			if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to read chunk (eError = %s, uiRead = "IMG_SIZE_FMTSPEC" uiBytesToCopy = "IMG_SIZE_FMTSPEC")",
+						 __FUNCTION__,
+						 PVRSRVGetErrorStringKM(eError),
+						 uiRead,
+						 uiBytesToCopy));
+				/* Bail out as soon as we hit an error */
+				break;
+			}
+		}
+		else
+		{
+			/* Fill invalid chunks with 0 */
+			OSMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy);
+			uiRead = uiBytesToCopy;
+		}
+		uiLogicalOffset += uiRead;
+		uiBytesCopied += uiRead;
+	}
+
+	*puiNumBytes = uiBytesCopied;
+    return eError;
+}
+
+static PVRSRV_ERROR
+_PMR_WriteBytesPhysical(PMR *psPMR,
+						IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+						IMG_UINT8 *pcBuffer,
+						IMG_SIZE_T uiBufSz,
+						IMG_SIZE_T *puiNumBytes)
+{
+	PVRSRV_ERROR eError;
+
+    if (psPMR->psFuncTab->pfnWriteBytes != IMG_NULL)
+    {
+        /* defer to callback if present */
+
+        eError = PMRLockSysPhysAddresses(psPMR,
+                                         psPMR->uiLog2ContiguityGuarantee);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+        eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData,
+												 uiPhysicalOffset,
+                                                 pcBuffer,
+                                                 uiBufSz,
+                                                 puiNumBytes);
+        PMRUnlockSysPhysAddresses(psPMR);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+    }
+    else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+    {
+        /* "default" handler for reading bytes */
+
+        IMG_HANDLE hKernelMappingHandle;
+        IMG_UINT8 *pcKernelAddress;
+        PMR_FLAGS_T ulFlags;
+
+        PMR_Flags(psPMR, &ulFlags);
+
+        eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+                                                               (IMG_SIZE_T) uiPhysicalOffset,
+                                                               uiBufSz,
+                                                               (IMG_VOID **)&pcKernelAddress,
+                                                               &hKernelMappingHandle,
+                                                               ulFlags);
+        if (eError != PVRSRV_OK)
+        {
+            goto e0;
+        }
+
+		OSMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz);
+        *puiNumBytes = uiBufSz;
+
+        psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+                                                      hKernelMappingHandle);
+    }
+    else
+    {
+		/*
+			The write callback is optional as it's only required by the debug
+			tools
+		*/
+        PVR_DPF((PVR_DBG_ERROR, "_PMR_WriteBytesPhysical: can't write to this PMR"));
+        eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+        OSPanic();
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+			   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               IMG_SIZE_T uiBufSz,
+               IMG_SIZE_T *puiNumBytes)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+	#if 0
+    PMR_FLAGS_T uiFlags;
+	#endif
+    IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+    IMG_SIZE_T uiBytesCopied = 0;
+
+	/* FIXME: When we honour CPU mapping flags remove the #if 0*/
+	#if 0
+	/* Check that writes are allowed */
+	PMR_Flags(psPMR, &uiFlags);
+	if (!(uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))
+	{
+		return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+	}
+	#endif
+
+    if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+    {
+        uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+    }
+    PVR_ASSERT(uiBufSz > 0);
+    PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+    /*
+      PMR implementations can override this.  If they don't, a
+      "default" handler uses kernel virtual mappings.  If the kernel
+      can't provide a kernel virtual mapping, this function fails
+    */
+    PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != IMG_NULL ||
+               psPMR->psFuncTab->pfnWriteBytes != IMG_NULL);
+
+	while (uiBytesCopied != uiBufSz)
+	{
+		IMG_UINT32 ui32Remain;
+		IMG_SIZE_T uiBytesToCopy;
+		IMG_SIZE_T uiWrite;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+										  0,
+										  1,
+										  uiLogicalOffset,
+										  &uiPhysicalOffset,
+										  &ui32Remain,
+										  &bValid);
+
+		/* 
+			Copy till either then end of the
+			chunk or end end of the buffer
+		*/
+		uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+		if (bValid)
+		{
+			/* Write the data to the PMR */
+			eError = _PMR_WriteBytesPhysical(psPMR,
+											 uiPhysicalOffset,
+											 &pcBuffer[uiBytesCopied],
+											 uiBytesToCopy,
+											 &uiWrite);
+			if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s: Failed to read chunk (eError = %s, uiWrite = "IMG_SIZE_FMTSPEC" uiBytesToCopy = "IMG_SIZE_FMTSPEC")",
+						 __FUNCTION__,
+						 PVRSRVGetErrorStringKM(eError),
+						 uiWrite,
+						 uiBytesToCopy));
+				/* Bail out as soon as we hit an error */
+				break;
+			}
+		}
+		else
+		{
+			/* Ignore writes to invalid pages */
+			uiWrite = uiBytesToCopy;
+		}
+		uiLogicalOffset += uiWrite;
+		uiBytesCopied += uiWrite;
+	}
+
+	*puiNumBytes = uiBytesCopied;
+    return eError;
+}
+
+IMG_VOID
+PMRRefPMR(PMR *psPMR)
+{
+	PVR_ASSERT(psPMR != IMG_NULL);
+	_Ref(psPMR);
+}
+
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR)
+{
+    _UnrefAndMaybeDestroy(psPMR);
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMR_Flags(const PMR *psPMR,
+          PMR_FLAGS_T *puiPMRFlags)
+{
+    PVR_ASSERT(psPMR != IMG_NULL);
+
+    *puiPMRFlags = psPMR->uiFlags;
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+				IMG_DEVMEM_SIZE_T *puiLogicalSize)
+{
+	PVR_ASSERT(psPMR != IMG_NULL);
+
+    *puiLogicalSize = psPMR->uiLogicalSize;
+    return PVRSRV_OK;
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+				IMG_UINT32 ui32Log2PageSize,
+				IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddrPtr,
+                IMG_BOOL *pbValid)
+{
+	IMG_UINT32 ui32Remain;	
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+    IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+
+    PVR_ASSERT(psPMR != IMG_NULL);
+    PVR_ASSERT(ui32NumOfPages > 0);
+    PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != IMG_NULL);
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+    OSLockAcquire(psPMR->hLock);
+    PVR_ASSERT(psPMR->uiLockCount > 0);
+    OSLockRelease(psPMR->hLock);
+#endif
+
+    if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+    {
+    	puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+    	if (puiPhysicalOffset == IMG_NULL)
+    	{
+    		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+    		goto e0;
+    	}
+    }
+
+	_PMRLogicalOffsetToPhysicalOffset(psPMR,
+									 ui32Log2PageSize,
+									 ui32NumOfPages,
+									 uiLogicalOffset,
+									 puiPhysicalOffset,
+									 &ui32Remain,
+									 pbValid);
+	if (*pbValid || _PMRIsSparse(psPMR))
+	{
+		/* Sparse PMR may not always have the first page valid */
+		eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+												  ui32NumOfPages,
+												  puiPhysicalOffset,
+												  pbValid,
+												  psDevAddrPtr);
+	}
+
+	if (puiPhysicalOffset != auiPhysicalOffset)
+	{
+		OSFreeMem(puiPhysicalOffset);
+	}
+
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid)
+{
+    PVRSRV_ERROR eError;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr;
+
+    if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+    {
+    	psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+    	if (psDevPAddr == IMG_NULL)
+    	{
+    		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+    		goto e0;
+    	}
+    }
+
+    eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages, 
+							 uiLogicalOffset, psDevPAddr, pbValid);
+    if (eError != PVRSRV_OK)
+    {
+        goto e1;
+    }
+	PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr);
+	
+	if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+
+    return PVRSRV_OK;
+e1:
+	if (psDevPAddr != asDevPAddr)
+	{
+		OSFreeMem(psDevPAddr);
+	}
+e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR,
+                               IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+                               IMG_UINT32 ui32MemspaceNameLen,
+                               IMG_CHAR *pszMemspaceName,
+                               IMG_UINT32 ui32SymbolicAddrLen,
+                               IMG_CHAR *pszSymbolicAddr,
+                               IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                               IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	const IMG_CHAR *pszPrefix;
+
+    if (psPMR->psFuncTab->pfnPDumpSymbolicAddr != IMG_NULL)
+    {
+        /* defer to callback if present */
+        return psPMR->psFuncTab->pfnPDumpSymbolicAddr(psPMR->pvFlavourData,
+                                                      uiPhysicalOffset,
+                                                      pszMemspaceName,
+                                                      ui32MemspaceNameLen,
+                                                      pszSymbolicAddr,
+                                                      ui32SymbolicAddrLen,
+                                                      puiNewOffset,
+                                                      puiNextSymName);
+    }
+    else
+    {
+        OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, "%s",
+                   psPMR->pszPDumpDefaultMemspaceName);
+
+        if (psPMR->pszPDumpFlavour != IMG_NULL)
+        {
+            pszPrefix = psPMR->pszPDumpFlavour;
+        }
+        else
+        {
+            pszPrefix = PMR_DEFAULT_PREFIX;
+        }
+        OSSNPrintf(pszSymbolicAddr, ui32SymbolicAddrLen, PMR_SYMBOLICADDR_FMTSPEC,
+                   pszPrefix, psPMR->uiSerialNum);
+        *puiNewOffset = uiPhysicalOffset;
+        *puiNextSymName = (IMG_DEVMEM_OFFSET_T) psPMR->uiLogicalSize;
+
+        return PVRSRV_OK;
+    }
+}
+
+
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32MemspaceNameLen,
+                      IMG_CHAR *pszMemspaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName
+                      )
+{
+    IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+    IMG_UINT32 ui32Remain;
+    IMG_BOOL bValid;
+
+    PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize);
+
+    _PMRLogicalOffsetToPhysicalOffset(psPMR,
+								      0,
+								      1,
+								      uiLogicalOffset,
+								      &uiPhysicalOffset,
+								      &ui32Remain,
+								      &bValid);
+
+	if (!bValid)
+	{
+		/* We should never be asked a symbolic address of an invalid chunk */
+		return PVRSRV_ERROR_PMR_INVALID_CHUNK;
+	}
+
+	return _PMR_PDumpSymbolicAddrPhysical(psPMR,
+										  uiPhysicalOffset,
+										  ui32MemspaceNameLen,
+										  pszMemspaceName,
+										  ui32SymbolicAddrLen,
+										  pszSymbolicAddr,
+										  puiNewOffset,
+										  puiNextSymName);
+}
+
+/*!
+ * @brief Writes a WRW command to the script2 buffer, representing a
+ * 		  dword write to a physical allocation. Size is always
+ * 		  sizeof(IMG_UINT32).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui32Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT];
+    IMG_CHAR aszSymbolicName[PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT];
+    IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    PVR_ASSERT(uiLogicalOffset + sizeof(IMG_UINT32) <= psPMR->uiLogicalSize);
+
+    eError = PMRLockSysPhysAddresses(psPMR,
+                                     psPMR->uiLog2ContiguityGuarantee);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the PMR */
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+								   uiLogicalOffset,
+								   sizeof(aszMemspaceName),
+								   &aszMemspaceName[0],
+								   sizeof(aszSymbolicName),
+								   &aszSymbolicName[0],
+								   &uiPDumpSymbolicOffset,
+				                   &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW32(aszMemspaceName,
+						 aszSymbolicName,
+						 uiPDumpSymbolicOffset,
+						 ui32Value,
+						 uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+    eError = PMRUnlockSysPhysAddresses(psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a WRW64 command to the script2 buffer, representing a
+ * 		  dword write to a physical allocation. Size is always
+ * 		  sizeof(IMG_UINT64).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui64Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT];
+    IMG_CHAR aszSymbolicName[PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT];
+    IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    PVR_ASSERT(uiLogicalOffset + sizeof(IMG_UINT64) <= psPMR->uiLogicalSize);
+
+    eError = PMRLockSysPhysAddresses(psPMR,
+                                     psPMR->uiLog2ContiguityGuarantee);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Get the symbolic address of the PMR */
+	eError = PMR_PDumpSymbolicAddr(psPMR,
+								   uiLogicalOffset,
+								   sizeof(aszMemspaceName),
+								   &aszMemspaceName[0],
+								   sizeof(aszSymbolicName),
+								   &aszSymbolicName[0],
+								   &uiPDumpSymbolicOffset,
+				                   &uiNextSymName);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Write the WRW script command */
+	eError = PDumpPMRWRW64(aszMemspaceName,
+						 aszSymbolicName,
+						 uiPDumpSymbolicOffset,
+						 ui64Value,
+						 uiPDumpFlags);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+    eError = PMRUnlockSysPhysAddresses(psPMR);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+
+/*!
+ * @brief PDumps the contents of the given allocation.
+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used
+ * as the source of data, rather than the allocation's actual backing.
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - Offset to write at
+ * @param uiSize - Number of bytes to write
+ * @param uiPDumpFlags - PDump flags
+ * @param bZero - Use the PDump zero page as the source
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+					IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+					IMG_DEVMEM_SIZE_T uiSize,
+					PDUMP_FLAGS_T uiPDumpFlags,
+					IMG_BOOL bZero)
+{
+	/* common variables */
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_CHAR aszMemspaceName[PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT];
+	IMG_CHAR aszSymbolicName[PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT];
+	IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+	PDUMP_FILEOFFSET_T uiParamStreamFileOffset;
+	IMG_SIZE_T uiBufSz;
+	IMG_SIZE_T uiNumBytes;
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+	const IMG_CHAR *pszParamStreamFileName;
+
+	/* required when !bZero */
+	#define PMR_MAX_PDUMP_BUFSZ 16384
+	IMG_CHAR aszParamStreamFilename[PMR_MAX_PARAMSTREAM_FILENAME_LENGTH_DEFAULT];
+	IMG_UINT8 *pcBuffer = IMG_NULL;
+
+	PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+	if(bZero)
+	{
+		/* Check if this PMR needs to be persistent:
+		 * If the allocation is persistent then it will be present in every
+		 * pdump stream after its allocation. We must ensure the zeroing is also
+		 * persistent so that every PDump MALLOC is accompanied by the initialisation
+		 * to zero.
+		 */
+		if(psPMR->bForcePersistent)
+		{
+			uiPDumpFlags = PDUMP_FLAGS_PERSISTENT;
+		}
+
+		PDumpCommentWithFlags(uiPDumpFlags, "Zeroing allocation (%llu bytes)",
+										(unsigned long long) uiSize);
+
+		/* get the zero page information. it is constant for this function */
+		PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset, &uiBufSz, &pszParamStreamFileName);
+	}
+	else
+	{
+		uiBufSz = PMR_MAX_PDUMP_BUFSZ;
+		if (uiBufSz > uiSize)
+		{
+			uiBufSz = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+		}
+
+		pcBuffer = OSAllocMem(uiBufSz);
+		PVR_ASSERT(pcBuffer != IMG_NULL);
+
+		eError = PMRLockSysPhysAddresses(psPMR,
+								psPMR->uiLog2ContiguityGuarantee);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		pszParamStreamFileName = aszParamStreamFilename;
+	}
+
+	while (uiSize > 0)
+	{
+		IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+		IMG_UINT32 ui32Remain;
+		IMG_BOOL bValid;
+
+		_PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 										  0,
+ 										  1,
+										  uiLogicalOffset,
+										  &uiPhysicalOffset,
+										  &ui32Remain,
+										  &bValid);
+
+		if (bValid)
+		{
+			eError = _PMR_PDumpSymbolicAddrPhysical(psPMR,
+													uiPhysicalOffset,
+													sizeof(aszMemspaceName),
+													&aszMemspaceName[0],
+													sizeof(aszSymbolicName),
+													&aszSymbolicName[0],
+													&uiPDumpSymbolicOffset,
+													&uiNextSymName);
+			if(eError != PVRSRV_OK)
+			{
+				goto err_unlock_phys;
+			}
+
+			if(bZero)
+			{
+				uiNumBytes = TRUNCATE_64BITS_TO_SIZE_T(MIN(uiSize, uiBufSz));
+			}
+			else
+			{
+
+				/* Reads enough to fill buffer, or until next chunk,
+				or until end of PMR, whichever comes first */
+				eError = _PMR_ReadBytesPhysical(psPMR,
+												uiPhysicalOffset,
+												pcBuffer,
+												TRUNCATE_64BITS_TO_SIZE_T(MIN3(uiBufSz, uiSize, ui32Remain)),
+												&uiNumBytes);
+				if(eError != PVRSRV_OK)
+				{
+				    goto err_unlock_phys;
+				}
+				PVR_ASSERT(uiNumBytes > 0);
+
+				eError = PDumpWriteBuffer(pcBuffer,
+							  uiNumBytes,
+							  uiPDumpFlags,
+							  &aszParamStreamFilename[0],
+							  sizeof(aszParamStreamFilename),
+							  &uiParamStreamFileOffset);
+				if(eError != PVRSRV_OK)
+				{
+				    goto err_unlock_phys;
+				}
+			}
+
+			eError = PDumpPMRLDB(aszMemspaceName,
+									aszSymbolicName,
+									uiPDumpSymbolicOffset,
+									uiNumBytes,
+									pszParamStreamFileName,
+									uiParamStreamFileOffset,
+									uiPDumpFlags);
+
+			if(eError != PVRSRV_OK)
+			{
+				goto err_unlock_phys;
+			}
+		}
+		else
+		{
+			/* Skip over invalid chunks */
+			uiNumBytes = TRUNCATE_64BITS_TO_SIZE_T(MIN(ui32Remain, uiSize));
+		}
+
+		 uiLogicalOffset += uiNumBytes;
+		 PVR_ASSERT(uiNumBytes <= uiSize);
+		 uiSize -= uiNumBytes;
+	}
+
+err_unlock_phys:
+
+	if(!bZero)
+	{
+	    eError = PMRUnlockSysPhysAddresses(psPMR);
+	    PVR_ASSERT(eError == PVRSRV_OK);
+
+	    OSFreeMem(pcBuffer);
+	}
+    return eError;
+}
+
+
+
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT];
+    IMG_CHAR aszSymbolicName[PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT];
+    IMG_DEVMEM_OFFSET_T uiOutOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    PVR_UNREFERENCED_PARAMETER(uiArraySize);
+
+    PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+    eError = PMR_PDumpSymbolicAddr(psPMR,
+                                   uiLogicalOffset,
+                                   sizeof(aszMemspaceName),
+                                   &aszMemspaceName[0],
+                                   sizeof(aszSymbolicName),
+                                   &aszSymbolicName[0],
+                                   &uiOutOffset,
+				   &uiNextSymName);
+    PVR_ASSERT(eError == PVRSRV_OK);
+    PVR_ASSERT(uiLogicalOffset + uiSize <= uiNextSymName);
+
+    eError = PDumpPMRSAB(aszMemspaceName,
+                         aszSymbolicName,
+                         uiOutOffset,
+                         uiSize,
+                         pszFilename,
+                         0);
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+    return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+
+/*
+   FIXME: Find a better way to do this
+ */
+
+IMG_VOID *PMRGetPrivateDataHack(const PMR *psPMR,
+                                const PMR_IMPL_FUNCTAB *psFuncTab)
+{
+    PVR_ASSERT(psFuncTab == psPMR->psFuncTab);
+
+    return psPMR->pvFlavourData;
+}
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+                   PMR *psPageListPMR,
+                   IMG_DEVMEM_OFFSET_T uiTableOffset,
+                   IMG_DEVMEM_SIZE_T  uiTableLength,
+                   /* Referenced PMR, and "page" granularity */
+                   PMR *psReferencePMR,
+                   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+                   PMR_PAGELIST **ppsPageList,
+                   IMG_UINT64 *pui64CheckSum)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEVMEM_SIZE_T uiWordSize;
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiPageIndex;
+    IMG_UINT32 ui32CheckSumXor = 0;
+    IMG_UINT32 ui32CheckSumAdd = 0;
+    PMR_FLAGS_T uiFlags;
+    PMR_PAGELIST *psPageList;
+#if defined(PDUMP)
+    IMG_CHAR aszTableEntryMemspaceName[100];
+    IMG_CHAR aszTableEntrySymbolicName[100];
+    IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset;
+    IMG_CHAR aszPageMemspaceName[100];
+    IMG_CHAR aszPageSymbolicName[100];
+    IMG_DEVMEM_OFFSET_T uiPagePDumpOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif
+#if !defined(NO_HARDWARE)
+    IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee;
+    IMG_BOOL bPageIsMapped = IMG_FALSE;
+    IMG_UINT64 uiPageListPMRPage = 0;
+    IMG_UINT64 uiPrevPageListPMRPage = 0;
+    IMG_HANDLE hPrivData = IMG_NULL;
+    IMG_VOID *pvKernAddr = IMG_NULL;
+	IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+    IMG_DEV_PHYADDR *pasDevAddrPtr;
+    IMG_UINT32 *pui32DataPtr;
+    IMG_BOOL *pbPageIsValid;
+#endif
+    /* FIXME: should this be configurable? */
+    uiWordSize = 4;
+
+    /* check we're being asked to write the same number of 4-byte units as there are pages */
+    uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize);
+
+    if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize)
+    {
+		/* Strictly speaking, it's possible to provoke this error in two ways:
+			(i) if it's not a whole multiple of the page size; or
+			(ii) if there are more than 4 billion pages.
+           The latter is unlikely. :)  but the check is required in order to justify the cast.
+		*/
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto e0;
+    }
+    uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages;
+    if (uiNumPages * uiWordSize != uiTableLength)
+    {
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto e0;
+    }
+
+    /* Check we're not being asked to write off the end of the PMR */
+    if (uiTableOffset + uiTableLength > psPageListPMR->uiLogicalSize)
+    {
+        /* table memory insufficient to store all the entries */
+        /* table insufficient to store addresses of whole block */
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    /* the PMR into which we are writing must not be user CPU mappable: */
+    eError = PMR_Flags(psPageListPMR, &uiFlags);
+    if ((eError != PVRSRV_OK) ||
+		((uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)) != 0))
+    {
+		PVR_DPF((PVR_DBG_ERROR, "eError = %d", eError));
+		PVR_DPF((PVR_DBG_ERROR, "masked flags = 0x%08x", (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))));
+		PVR_DPF((PVR_DBG_ERROR, "Page list PMR allows CPU mapping (0x%08x)", uiFlags));
+		eError = PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS;
+        goto e0;
+    }
+
+	if (_PMRIsSparse(psPageListPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PageList PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	if (_PMRIsSparse(psReferencePMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Reference PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	psPageList = OSAllocMem(sizeof(PMR_PAGELIST));
+	if (psPageList == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+		goto e0;
+	}
+	psPageList->psReferencePMR = psReferencePMR;
+
+    /* Need to lock down the physical addresses of the reference PMR */
+    /* N.B.  This also checks that the requested "contiguity" is achievable */
+    eError = PMRLockSysPhysAddresses(psReferencePMR,
+                                     uiLog2PageSize);
+    if(eError != PVRSRV_OK)
+    {
+        goto e1;
+    }
+
+#if !defined(NO_HARDWARE)
+    if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+	{
+	    pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR));
+		if (pasDevAddrPtr == IMG_NULL)
+		{
+			 PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			 goto e2;
+		}
+
+		pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL));
+		if (pbPageIsValid == IMG_NULL)
+		{
+			/* Clean-up before exit */
+			 OSFreeMem(pasDevAddrPtr);
+
+			 PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page state"));
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			 goto e2;
+		}
+	}
+	else
+	{
+		pasDevAddrPtr = asDevPAddr;
+		pbPageIsValid = abValid;
+	}
+	
+	
+	eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0,
+							 pasDevAddrPtr, pbPageIsValid);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+		goto e3;
+	}	
+#endif
+
+    for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+    {
+#if !defined(NO_HARDWARE)
+        IMG_DEV_PHYADDR sOldDevAddrPtr = {1}; //Set to non-aligned non-valid page
+#endif
+        IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex);
+#if defined(PDUMP)
+        eError = PMR_PDumpSymbolicAddr(psPageListPMR,
+                                       uiPMROffset,
+                                       sizeof(aszTableEntryMemspaceName),
+                                       &aszTableEntryMemspaceName[0],
+                                       sizeof(aszTableEntrySymbolicName),
+                                       &aszTableEntrySymbolicName[0],
+                                       &uiTableEntryPDumpOffset,
+                                       &uiNextSymName);
+        PVR_ASSERT(eError == PVRSRV_OK);
+
+        eError = PMR_PDumpSymbolicAddr(psReferencePMR,
+                                       (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+                                       sizeof(aszPageMemspaceName),
+                                       &aszPageMemspaceName[0],
+                                       sizeof(aszPageSymbolicName),
+                                       &aszPageSymbolicName[0],
+                                       &uiPagePDumpOffset,
+                                       &uiNextSymName);
+        PVR_ASSERT(eError == PVRSRV_OK);
+
+        eError = PDumpWriteShiftedMaskedValue(/* destination */
+                                              aszTableEntryMemspaceName,
+                                              aszTableEntrySymbolicName,
+                                              uiTableEntryPDumpOffset,
+                                              /* source */
+                                              aszPageMemspaceName,
+                                              aszPageSymbolicName,
+                                              uiPagePDumpOffset,
+                                              /* shift right */
+                                              uiLog2PageSize,
+                                              /* shift left */
+                                              0,
+                                              /* mask */
+                                              0xffffffff,
+                                              /* word size */
+                                              uiWordSize,
+                                              /* flags */
+                                              PDUMP_FLAGS_CONTINUOUS);
+        PVR_ASSERT(eError == PVRSRV_OK);
+#else
+		PVR_UNREFERENCED_PARAMETER(uiPMROffset);
+#endif
+#if !defined(NO_HARDWARE)
+
+		/*
+			We check for sparse PMR's at function entry, but as we can,
+			check that every page is valid
+		*/
+		PVR_ASSERT(pbPageIsValid[uiPageIndex]);
+
+        uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee;
+
+        if ((bPageIsMapped == IMG_FALSE) || (uiPageListPMRPage != uiPrevPageListPMRPage))
+        {
+            IMG_SIZE_T uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1));
+            IMG_SIZE_T uiMappedSize;
+
+            if (bPageIsMapped == IMG_TRUE)
+            {
+                PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+            }
+
+            eError = PMRAcquireKernelMappingData(psPageListPMR,
+                                                 uiMappingOffset,
+                                                 uiPageListPageSize,
+                                                 &pvKernAddr,
+                                                 &uiMappedSize,
+                                                 &hPrivData);
+            if (eError != PVRSRV_OK)
+            {
+                PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%llu) into kernel (%d)",
+                         uiPageListPMRPage, eError));
+                goto e3;
+            }
+
+            bPageIsMapped = IMG_TRUE;
+            uiPrevPageListPMRPage = uiPageListPMRPage;
+            PVR_ASSERT(uiMappedSize >= uiPageListPageSize);
+            PVR_ASSERT(pvKernAddr != IMG_NULL);
+        }
+
+        PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+        /* Write the physcial page index into the page list PMR */
+        pui32DataPtr = (IMG_UINT32 *) (((IMG_CHAR *) pvKernAddr) + (uiPMROffset & (uiPageListPageSize - 1)));
+        *pui32DataPtr = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+        ui32CheckSumXor ^= TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+        ui32CheckSumAdd += TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+        PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0);
+        PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != sOldDevAddrPtr.uiAddr);
+        sOldDevAddrPtr.uiAddr = pasDevAddrPtr[uiPageIndex].uiAddr;
+        /* Last page so unmap */
+        if (uiPageIndex == (uiNumPages - 1))
+        {
+            PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+        }
+#endif
+    }
+
+#if !defined(NO_HARDWARE)
+    if (pasDevAddrPtr != asDevPAddr)
+	{
+		OSFreeMem(pbPageIsValid);
+		OSFreeMem(pasDevAddrPtr);
+	}
+#endif
+    *pui64CheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+    *ppsPageList = psPageList;
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+#if !defined(NO_HARDWARE)
+e3: 
+    if (pasDevAddrPtr != asDevPAddr)
+	{
+		OSFreeMem(pbPageIsValid);  
+		OSFreeMem(pasDevAddrPtr);
+	}
+ e2:
+   PMRUnlockSysPhysAddresses(psReferencePMR);
+#endif
+ e1:
+	OSFreeMem(psPageList);
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+
+PVRSRV_ERROR /* FIXME: should be IMG_VOID */
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList)
+{
+    PVRSRV_ERROR eError2;
+
+    eError2 = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR);
+    PVR_ASSERT(eError2 == PVRSRV_OK);
+	OSFreeMem(psPageList);
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+				IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiPageIndex;
+    IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize;
+    IMG_HANDLE hPrivData = IMG_NULL;
+    IMG_VOID *pvKernAddr = IMG_NULL;
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    IMG_SIZE_T uiMapedSize;
+
+    PVR_ASSERT(psPMR);
+
+    /* Calculate number of pages in this PMR */
+	uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+	/* Verify the logical Size is a multiple or the physical page size */
+    if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+    {
+		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is not a multiple of %u",ui32PageSize));
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto MultiPage_Error;
+    }
+
+	if (_PMRIsSparse(psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Sparse_Error;
+	}
+
+	/* Scan through all pages of the PMR */
+    for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+    {
+        /* map the physical page (for a given PMR offset) into kernel space */
+        eError = PMRAcquireKernelMappingData(psPMR,
+                                             (IMG_SIZE_T)uiPageIndex << uiLog2PageSize,
+                                             ui32PageSize,
+                                             &pvKernAddr,
+                                             &uiMapedSize,
+                                             &hPrivData);
+        if (eError != PVRSRV_OK)
+        {
+    		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: AcquireKernelMapping failed with error %u", eError));
+        	goto AcquireKernelMapping_Error;
+        }
+
+        /* ensure the mapped page size is the same as the physical page size */
+        if (uiMapedSize != ui32PageSize)
+        {
+    		PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: Physical Page size = 0x%08x, Size of Mapping = 0x%016llx",
+    								ui32PageSize,
+    								(IMG_UINT64)uiMapedSize));
+    		eError = PVRSRV_ERROR_INVALID_PARAMS;
+        	goto MappingSize_Error;
+        }
+
+        /* zeroing page content */
+        OSMemSet(pvKernAddr, 0, ui32PageSize);
+
+        /* release mapping */
+        PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+    }
+
+    PVR_DPF((PVR_DBG_WARNING,"PMRZeroingPMR: Zeroing PMR %p done (num pages %u, page size %u)",
+    						psPMR,
+    						uiNumPages,
+    						ui32PageSize));
+
+    return PVRSRV_OK;
+
+
+    /* Error handling */
+
+MappingSize_Error:
+	PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+AcquireKernelMapping_Error:
+Sparse_Error:
+MultiPage_Error:
+
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psPMR,
+					IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+    IMG_DEV_PHYADDR sDevAddrPtr;
+    IMG_UINT32 uiNumPages;
+    IMG_UINT32 uiPageIndex;
+    IMG_BOOL bPageIsValid;
+    IMG_UINT32 ui32Col = 16;
+    IMG_UINT32 ui32SizePerCol = 11;
+    IMG_UINT32 ui32ByteCount = 0;
+    IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1];
+    PVRSRV_ERROR eError = PVRSRV_OK;
+
+    /* Get number of pages */
+	uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+	/* Verify the logical Size is a multiple or the physical page size */
+    if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+    {
+		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is not a multiple of %u", 1 << uiLog2PageSize));
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto MultiPage_Error;
+    }
+
+	if (_PMRIsSparse(psPMR))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is sparse"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto Sparse_Error;
+	}
+
+	PVR_LOG(("    PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize));
+
+	/* Print the address of the physical pages */
+    for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+    {
+    	/* Get Device physical Address */
+        eError = PMR_DevPhysAddr(psPMR,
+                        uiLog2PageSize,
+                        1,
+                        (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+                        &sDevAddrPtr,
+                        &bPageIsValid);
+        if (eError != PVRSRV_OK)
+        {
+    		PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR %p failed to get DevPhysAddr with error %u",
+    								psPMR,
+    								eError));
+        	goto DevPhysAddr_Error;
+        }
+
+        ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize));
+        PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol);
+
+		if (uiPageIndex % ui32Col == ui32Col -1)
+		{
+			PVR_LOG(("      Phys Page: %s", pszBuffer));
+			ui32ByteCount = 0;
+		}
+    }
+    if (ui32ByteCount > 0)
+    {
+		PVR_LOG(("      Phys Page: %s", pszBuffer));
+    }
+
+    return PVRSRV_OK;
+
+    /* Error handling */
+DevPhysAddr_Error:
+Sparse_Error:
+MultiPage_Error:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[100];
+    IMG_CHAR aszSymbolicName[100];
+    IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    eError = PMR_PDumpSymbolicAddr(psPMR,
+                                   uiLogicalOffset,
+                                   sizeof(aszMemspaceName),
+                                   &aszMemspaceName[0],
+                                   sizeof(aszSymbolicName),
+                                   &aszSymbolicName[0],
+                                   &uiPDumpOffset,
+				   &uiNextSymName);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+#define _MEMPOLL_DELAY		(1000)
+#define _MEMPOLL_COUNT		(2000000000 / _MEMPOLL_DELAY)
+
+    eError = PDumpPMRPOL(aszMemspaceName,
+                         aszSymbolicName,
+                         uiPDumpOffset,
+                         ui32Value,
+                         ui32Mask,
+                         eOperator,
+                         _MEMPOLL_COUNT,
+                         _MEMPOLL_DELAY,
+                         uiPDumpFlags);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+    PVRSRV_ERROR eError;
+    IMG_CHAR aszMemspaceName[100];
+    IMG_CHAR aszSymbolicName[100];
+    IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+    IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+    eError = PMR_PDumpSymbolicAddr(psPMR,
+                                   uiReadOffset,
+                                   sizeof(aszMemspaceName),
+                                   &aszMemspaceName[0],
+                                   sizeof(aszSymbolicName),
+                                   &aszSymbolicName[0],
+                                   &uiPDumpOffset,
+                                   &uiNextSymName);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    eError = PDumpPMRCBP(aszMemspaceName,
+                         aszSymbolicName,
+                         uiPDumpOffset,
+                         uiWriteOffset,
+                         uiPacketSize,
+                         uiBufferSize);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+#endif
+
+PVRSRV_ERROR
+PMRInit()
+{
+	PVRSRV_ERROR eError;
+
+    if (_gsSingletonPMRContext.bModuleInitialised)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "pmr.c:  oops, already initialized"));
+        return PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+    }
+
+	eError = OSLockCreate(&_gsSingletonPMRContext.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = OSLockCreate(&gGlobalLookupPMRLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+    _gsSingletonPMRContext.uiNextSerialNum = 1;
+
+    _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (IMG_UINTPTR_T)&_gsSingletonPMRContext;
+
+    _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE;
+
+    _gsSingletonPMRContext.uiNumLivePMRs = 0;
+
+    return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRDeInit()
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		return PVRSRV_OK;
+	}
+
+    PVR_ASSERT(_gsSingletonPMRContext.bModuleInitialised);
+    if (!_gsSingletonPMRContext.bModuleInitialised)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "pmr.c:  oops, not initialized"));
+        return PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+    }
+
+    PVR_ASSERT(_gsSingletonPMRContext.uiNumLivePMRs == 0);
+    if (_gsSingletonPMRContext.uiNumLivePMRs != 0)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "pmr.c:  %d live PMR(s) remain(s)", _gsSingletonPMRContext.uiNumLivePMRs));
+        PVR_DPF((PVR_DBG_ERROR, "pmr.c:  This is an unrecoverable error; a subsequent crash is inevitable"));
+        return PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+    }
+
+	OSLockDestroy(_gsSingletonPMRContext.hLock);
+	OSLockDestroy(gGlobalLookupPMRLock);
+
+    _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE;
+
+    /*
+      FIXME:
+
+      should deinitialise the mutex here
+    */
+
+    return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/power.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/power.c
new file mode 100644
index 0000000..d6e7fa4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/power.c
@@ -0,0 +1,1290 @@
+/*************************************************************************/ /*!
+@File           power.c
+@Title          Power management functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "lists.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "process_stats.h"
+
+static IMG_BOOL gbInitServerRunning = IMG_FALSE;
+static IMG_BOOL gbInitServerRan = IMG_FALSE;
+static IMG_BOOL gbInitSuccessful = IMG_FALSE;
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVSetInitServerState
+
+ @Description	Sets given services init state.
+
+ @Input		eInitServerState : a services init state
+ @Input		bState : a state to set
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState)
+{
+
+	switch(eInitServerState)
+	{
+		case PVRSRV_INIT_SERVER_RUNNING:
+			gbInitServerRunning	= bState;
+			break;
+		case PVRSRV_INIT_SERVER_RAN:
+			gbInitServerRan	= bState;
+			break;
+		case PVRSRV_INIT_SERVER_SUCCESSFUL:
+			gbInitSuccessful = bState;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+				"PVRSRVSetInitServerState : Unknown state %x", eInitServerState));
+			return PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetInitServerState
+
+ @Description	Tests whether a given services init state was run.
+
+ @Input		eInitServerState : a services init state
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+IMG_EXPORT
+IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
+{
+	IMG_BOOL	bReturnVal;
+
+	switch(eInitServerState)
+	{
+		case PVRSRV_INIT_SERVER_RUNNING:
+			bReturnVal = gbInitServerRunning;
+			break;
+		case PVRSRV_INIT_SERVER_RAN:
+			bReturnVal = gbInitServerRan;
+			break;
+		case PVRSRV_INIT_SERVER_SUCCESSFUL:
+			bReturnVal = gbInitSuccessful;
+			break;
+		default:
+			PVR_DPF((PVR_DBG_ERROR,
+				"PVRSRVGetInitServerState : Unknown state %x", eInitServerState));
+			bReturnVal = IMG_FALSE;
+	}
+
+	return bReturnVal;
+}
+
+/*!
+******************************************************************************
+
+ @Function	_IsSystemStatePowered
+
+ @Description	Tests whether a given system state represents powered-up.
+
+ @Input		eSystemPowerState : a system power state
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+	return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerLock
+
+ @Description	Obtain the mutex for power transitions. Only allowed when
+                system power is on.
+
+ @Return	PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPowerLock()
+{
+	PVRSRV_ERROR	eError;
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Only allow to take powerlock when the system power is on */
+	if (_IsSystemStatePowered(psPVRSRVData->eCurrentPowerState))
+	{
+		OSLockAcquire(psPVRSRVData->hPowerLock);
+		eError = PVRSRV_OK;
+	}
+	else
+	{
+		eError = PVRSRV_ERROR_RETRY;
+	}
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVForcedPowerLock
+
+ @Description	Obtain the mutex for power transitions regardless of
+                system power state
+
+ @Return	PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+IMG_EXPORT
+IMG_VOID PVRSRVForcedPowerLock()
+{
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+	OSLockAcquire(psPVRSRVData->hPowerLock);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPowerUnlock
+
+ @Description	Release the mutex for power transitions
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+IMG_VOID PVRSRVPowerUnlock()
+{
+	PVRSRV_DATA	*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	OSLockRelease(psPVRSRVData->hPowerLock);
+}
+IMG_EXPORT
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice)
+{
+	return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF);
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePrePowerStateKM_AnyVaCb
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input		psPowerDevice : the device
+ @Input		va : variable argument list with:
+ 				bAllDevices : IMG_TRUE - All devices
+ 						  	  IMG_FALSE - Use ui32DeviceIndex
+				ui32DeviceIndex : device index
+				eNewPowerState : New power state
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
+{
+	PVRSRV_DEV_POWER_STATE	eNewDevicePowerState;
+	PVRSRV_ERROR			eError;
+
+	/*Variable Argument variables*/
+	IMG_BOOL				bAllDevices;
+	IMG_UINT32				ui32DeviceIndex;
+	PVRSRV_DEV_POWER_STATE	eNewPowerState;
+	IMG_BOOL				bForced;
+	IMG_UINT64				ui32SysTimer1=0, ui32SysTimer2=0, ui32DevTimer1=0, ui32DevTimer2=0;
+
+	/*WARNING! if types were not aligned to 4 bytes, this could be dangerous!!!*/
+	bAllDevices = va_arg(va, IMG_BOOL);
+	ui32DeviceIndex = va_arg(va, IMG_UINT32);
+	eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
+	bForced = va_arg(va, IMG_BOOL);
+
+	if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
+	{
+		eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
+							psPowerDevice->eDefaultPowerState : eNewPowerState;
+
+		if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
+		{
+			if (psPowerDevice->pfnDevicePrePower != IMG_NULL)
+			{
+				/* Call the device's power callback. */
+
+				ui32DevTimer1=OSClockns64();
+
+				eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+															eNewDevicePowerState,
+															psPowerDevice->eCurrentPowerState,
+															bForced);
+
+				ui32DevTimer2=OSClockns64();
+
+				if (eError != PVRSRV_OK)
+				{
+					return eError;
+				}
+			}
+
+			/* Do any required system-layer processing. */
+			if (psPowerDevice->pfnSystemPrePower != IMG_NULL)
+			{
+
+				ui32SysTimer1=OSClockus();
+
+				eError = psPowerDevice->pfnSystemPrePower(eNewDevicePowerState,
+														  psPowerDevice->eCurrentPowerState,
+														  bForced);
+
+				ui32SysTimer2=OSClockus();
+
+				if (eError != PVRSRV_OK)
+				{
+					return eError;
+				}
+			}
+		}
+	}
+
+
+    InsertPowerTimeStatistic(PVRSRV_POWER_ENTRY_TYPE_PRE,
+			psPowerDevice->eCurrentPowerState, eNewPowerState,
+            ui32SysTimer1,ui32SysTimer2,
+			ui32DevTimer1,ui32DevTimer2,
+			bForced);
+
+
+	return  PVRSRV_OK;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleKM_AnyVaCb
+
+ @Description
+
+ Perform device-specific processing required to force the device idle.
+
+ @Input		psPowerDevice : the device
+ @Input		va : variable argument list with:
+				bAllDevices : 	IMG_TRUE - All devices
+						IMG_FALSE - Use ui32DeviceIndex
+				ui32DeviceIndex : device index
+				pfnCheckIdleReq : Filter function used to determine whether a forced idle is required for the device
+				bDeviceOffPermitted :	IMG_TRUE if the transition should not fail if device off
+							IMG_FALSE if the transition should fail if device off
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR PVRSRVDeviceIdleKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+
+	/*Variable Argument variables*/
+	IMG_BOOL				bAllDevices;
+	IMG_UINT32				ui32DeviceIndex;
+	PFN_SYS_DEV_IS_DEFAULT_STATE_OFF	pfnIsDefaultStateOff;
+	IMG_BOOL				bDeviceOffPermitted;
+
+	/*WARNING! if types were not aligned to 4 bytes, this could be dangerous!!!*/
+	bAllDevices = va_arg(va, IMG_BOOL);
+	ui32DeviceIndex = va_arg(va, IMG_UINT32);
+	pfnIsDefaultStateOff = va_arg(va, PFN_SYS_DEV_IS_DEFAULT_STATE_OFF);
+	bDeviceOffPermitted = va_arg(va, IMG_BOOL);
+
+	if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
+	{
+		if (psPowerDevice->pfnForcedIdleRequest != IMG_NULL)
+		{
+			if ((pfnIsDefaultStateOff == IMG_NULL) || pfnIsDefaultStateOff(psPowerDevice))
+			{
+				eError = psPowerDevice->pfnForcedIdleRequest(psPowerDevice->hDevCookie, bDeviceOffPermitted);
+			}
+		}
+	}
+
+	return eError;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleRequestKM
+
+ @Description
+
+ Perform device-specific processing required to force the device idle.
+
+ @Input		bAllDevices : 	IMG_TRUE - All devices
+				IMG_FALSE - Use ui32DeviceIndex
+ @Input		ui32DeviceIndex : device index
+ @Input		pfnCheckIdleReq : Filter function used to determine whether a forced idle is required for the device
+ @Input		bDeviceOffPermitted :	IMG_TRUE if the transition should not fail if device off
+					IMG_FALSE if the transition should fail if device off
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(IMG_BOOL					bAllDevices,
+					IMG_UINT32				ui32DeviceIndex,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF	pfnIsDefaultStateOff,
+					IMG_BOOL				bDeviceOffPermitted)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Loop through the power devices. */
+	eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psPVRSRVData->psPowerDeviceList,
+								&PVRSRVDeviceIdleKM_AnyVaCb,
+								bAllDevices,
+								ui32DeviceIndex,
+								pfnIsDefaultStateOff,
+								bDeviceOffPermitted);
+
+	return eError;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleCancelKM_AnyVaCb
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input		psPowerDevice : the device
+ @Input		va : variable argument list with:
+				bAllDevices : 	IMG_TRUE - All devices
+						IMG_FALSE - Use ui32DeviceIndex
+				ui32DeviceIndex : device index
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR PVRSRVDeviceIdleCancelKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
+{
+	/*Variable Argument variables*/
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_BOOL		bAllDevices;
+	IMG_UINT32		ui32DeviceIndex;
+
+	/*WARNING! if types were not aligned to 4 bytes, this could be dangerous!!!*/
+	bAllDevices = va_arg(va, IMG_BOOL);
+	ui32DeviceIndex = va_arg(va, IMG_UINT32);
+
+	if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
+	{
+		if (psPowerDevice->pfnForcedIdleCancelRequest != IMG_NULL)
+		{
+			eError = psPowerDevice->pfnForcedIdleCancelRequest(psPowerDevice->hDevCookie);
+		}
+	}
+
+	return eError;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceIdleCancelRequestKM
+
+ @Description
+
+ Perform device-specific processing required to cancel the forced idle state on the device, returning to normal operation.
+
+ @Input		bAllDevices : 	IMG_TRUE - All devices
+				IMG_FALSE - Use ui32DeviceIndex
+ @Input		ui32DeviceIndex : device index
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(IMG_BOOL			bAllDevices,
+						IMG_UINT32		ui32DeviceIndex)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Loop through the power devices. */
+	eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psPVRSRVData->psPowerDeviceList,
+					&PVRSRVDeviceIdleCancelKM_AnyVaCb,
+					bAllDevices,
+					ui32DeviceIndex);
+
+	return eError;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePrePowerStateKM
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input		bAllDevices : IMG_TRUE - All devices
+ 						  IMG_FALSE - Use ui32DeviceIndex
+ @Input		ui32DeviceIndex : device index
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL				bAllDevices,
+										 IMG_UINT32				ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Loop through the power devices. */
+	eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psPVRSRVData->psPowerDeviceList,
+														&PVRSRVDevicePrePowerStateKM_AnyVaCb,
+														bAllDevices,
+														ui32DeviceIndex,
+														eNewPowerState,
+														bForced);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePostPowerStateKM_AnyVaCb
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input		psPowerDevice : the device
+ @Input		va : variable argument list with:
+ 				bAllDevices : IMG_TRUE - All devices
+ 						  	  IMG_FALSE - Use ui32DeviceIndex
+				ui32DeviceIndex : device index
+				eNewPowerState : New power state
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
+{
+	PVRSRV_DEV_POWER_STATE	eNewDevicePowerState;
+	PVRSRV_ERROR			eError;
+
+	/*Variable Argument variables*/
+	IMG_BOOL				bAllDevices;
+	IMG_UINT32				ui32DeviceIndex;
+	PVRSRV_DEV_POWER_STATE	eNewPowerState;
+	IMG_BOOL				bForced;
+	IMG_UINT64				ui32SysTimer1=0, ui32SysTimer2=0, ui32DevTimer1=0, ui32DevTimer2=0;
+
+	/*WARNING! if types were not aligned to 4 bytes, this could be dangerous!!!*/
+	bAllDevices = va_arg(va, IMG_BOOL);
+	ui32DeviceIndex = va_arg(va, IMG_UINT32);
+	eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
+	bForced = va_arg(va, IMG_BOOL);
+
+	if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
+	{
+		eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
+								psPowerDevice->eDefaultPowerState : eNewPowerState;
+
+		if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
+		{
+			/* Do any required system-layer processing. */
+			if (psPowerDevice->pfnSystemPostPower != IMG_NULL)
+			{
+
+				ui32SysTimer1=OSClockns64();
+
+				eError = psPowerDevice->pfnSystemPostPower(eNewDevicePowerState,
+														   psPowerDevice->eCurrentPowerState,
+														   bForced);
+
+				ui32SysTimer2=OSClockns64();
+
+				if (eError != PVRSRV_OK)
+				{
+					return eError;
+				}
+			}
+
+			if (psPowerDevice->pfnDevicePostPower != IMG_NULL)
+			{
+				/* Call the device's power callback. */
+
+				ui32DevTimer1=OSClockus();
+
+				eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+														   eNewDevicePowerState,
+														   psPowerDevice->eCurrentPowerState,
+														   bForced);
+
+				ui32DevTimer2=OSClockus();
+
+				if (eError != PVRSRV_OK)
+				{
+					return eError;
+				}
+			}
+
+			psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
+		}
+	}
+
+
+    InsertPowerTimeStatistic(PVRSRV_POWER_ENTRY_TYPE_POST,
+							psPowerDevice->eCurrentPowerState, eNewPowerState,
+                            ui32SysTimer1,ui32SysTimer2,
+							ui32DevTimer1,ui32DevTimer2,
+							bForced);
+
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePostPowerStateKM
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input		bAllDevices : IMG_TRUE - All devices
+ 						  IMG_FALSE - Use ui32DeviceIndex
+ @Input		ui32DeviceIndex : device index
+ @Input		eNewPowerState : New power state
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL					bAllDevices,
+										  IMG_UINT32				ui32DeviceIndex,
+										  PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										  IMG_BOOL					bForced)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* Loop through the power devices. */
+	eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psPVRSRVData->psPowerDeviceList,
+														&PVRSRVDevicePostPowerStateKM_AnyVaCb,
+														bAllDevices,
+														ui32DeviceIndex,
+														eNewPowerState,
+														bForced);
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVSetDevicePowerStateKM
+
+ @Description	Set the Device into a new state
+
+ @Input		ui32DeviceIndex : device index
+ @Input		eNewPowerState : New power state
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32				ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced)
+{
+	PVRSRV_ERROR	eError;
+	PVRSRV_DATA*    psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEV_POWER_STATE eOldPowerState;
+
+	eError = PVRSRVGetDevicePowerState(ui32DeviceIndex, &eOldPowerState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVSetDevicePowerStateKM: Couldn't read power state."));
+		eOldPowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
+	}
+
+	eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState, bForced);
+	if(eError != PVRSRV_OK)
+	{
+		goto Exit;
+	}
+
+	eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState, bForced);
+
+	/* Signal Device Watchdog Thread about power mode change. */
+	if (eOldPowerState != eNewPowerState && eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+	{
+		psPVRSRVData->ui32DevicesWatchdogPwrTrans++;
+
+		if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)
+		{
+			if (psPVRSRVData->hDevicesWatchdogEvObj)
+			{
+				eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+				PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+			}
+		}
+	}
+
+Exit:
+
+	if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,
+				"PVRSRVSetDevicePowerStateKM : Transition to %d was denied, Forced=%d", eNewPowerState, bForced));
+	}
+	else if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				"PVRSRVSetDevicePowerStateKM : Transition to %d FAILED (%s)", eNewPowerState, PVRSRVGetErrorStringKM(eError)));
+	}
+	
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVSetPowerStateKM
+
+ @Description	Set the system into a new state
+
+ @Input		eNewPowerState :
+ @Input		bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState, IMG_BOOL bForced)
+{
+	PVRSRV_ERROR	eError;
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_UINT        uiStage = 0;
+
+	PVRSRV_DEV_POWER_STATE eNewDevicePowerState = 
+	  _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+
+	/* require a proper power state */
+	if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Prevent simultaneous SetPowerStateKM calls */
+	PVRSRVForcedPowerLock();
+
+	/* no power transition requested, so do nothing */
+	if (eNewSysPowerState == psPVRSRVData->eCurrentPowerState)
+	{
+		PVRSRVPowerUnlock();
+		return PVRSRV_OK;
+	}
+
+	/* For a forced power down, all devices must be forced idle before being powered off */
+	if (bForced && ((eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_OFF) || (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)))
+	{
+		/* If setting devices to default state, selectively force idle all devices whose default state is off */
+		 PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
+			(eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : IMG_NULL;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = PVRSRVDeviceIdleRequestKM(IMG_TRUE, 0, pfnIsDefaultStateOff, IMG_TRUE);
+
+			if (eError == PVRSRV_OK)
+			{
+				break;
+			}
+			else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVRSRVPowerUnlock();
+				OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+				PVRSRVForcedPowerLock();
+			}
+			else
+			{
+				uiStage++;
+				goto ErrorExit;
+			}
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+
+	/* Perform pre transitions: first device and then sys layer */
+	eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState, bForced);
+	if (eError != PVRSRV_OK)
+	{
+		uiStage++;
+		goto ErrorExit;
+	}
+	eError = PVRSRVSysPrePowerState(eNewSysPowerState, bForced);
+	if (eError != PVRSRV_OK)
+	{
+		uiStage++;
+		goto ErrorExit;
+	}
+
+	/* Perform system-specific post power transitions: first sys layer and then device */
+	eError = PVRSRVSysPostPowerState(eNewSysPowerState, bForced);
+	if (eError != PVRSRV_OK)
+	{
+		uiStage++;
+		goto ErrorExit;
+	}
+	eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState, bForced);
+	if (eError != PVRSRV_OK)
+	{
+		uiStage++;
+		goto ErrorExit;
+	}
+
+	psPVRSRVData->eCurrentPowerState = eNewSysPowerState;
+	psPVRSRVData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
+
+	PVRSRVPowerUnlock();
+
+	/*
+		Reprocess the devices' queues in case commands were blocked during
+		the power transition.
+	*/
+	if (_IsSystemStatePowered(eNewSysPowerState) &&
+			PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
+	{
+		PVRSRVCheckStatus(IMG_NULL);
+	}
+
+	return PVRSRV_OK;
+
+ErrorExit:
+	/* save the power state for the re-attempt */
+	psPVRSRVData->eFailedPowerState = eNewSysPowerState;
+
+	PVRSRVPowerUnlock();
+
+	PVR_DPF((PVR_DBG_ERROR,
+			"PVRSRVSetPowerStateKM: Transition from %d to %d FAILED (%s) at stage %d, forced: %d. Dumping debug info.",
+			psPVRSRVData->eCurrentPowerState, eNewSysPowerState, PVRSRVGetErrorStringKM(eError), uiStage, bForced));
+
+	PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+
+	return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32					ui32DeviceIndex,
+									   PFN_PRE_POWER				pfnDevicePrePower,
+									   PFN_POST_POWER				pfnDevicePostPower,
+									   PFN_SYS_DEV_PRE_POWER		pfnSystemPrePower,
+									   PFN_SYS_DEV_POST_POWER		pfnSystemPostPower,
+									   PFN_PRE_CLOCKSPEED_CHANGE	pfnPreClockSpeedChange,
+									   PFN_POST_CLOCKSPEED_CHANGE	pfnPostClockSpeedChange,
+									   PFN_FORCED_IDLE_REQUEST	pfnForcedIdleRequest,
+									   PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest,
+									   PFN_DUST_COUNT_REQUEST	pfnDustCountRequest,
+									   IMG_HANDLE					hDevCookie,
+									   PVRSRV_DEV_POWER_STATE		eCurrentPowerState,
+									   PVRSRV_DEV_POWER_STATE		eDefaultPowerState)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV	*psPowerDevice;
+
+	if (pfnDevicePrePower == IMG_NULL &&
+		pfnDevicePostPower == IMG_NULL)
+	{
+		return PVRSRVRemovePowerDevice(ui32DeviceIndex);
+	}
+
+	psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV));
+	if (psPowerDevice == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* setup device for power manager */
+	psPowerDevice->pfnDevicePrePower = pfnDevicePrePower;
+	psPowerDevice->pfnDevicePostPower = pfnDevicePostPower;
+	psPowerDevice->pfnSystemPrePower = pfnSystemPrePower;
+	psPowerDevice->pfnSystemPostPower = pfnSystemPostPower;
+	psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+	psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+	psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest;
+	psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest;
+	psPowerDevice->pfnDustCountRequest = pfnDustCountRequest;
+	psPowerDevice->hDevCookie = hDevCookie;
+	psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
+	psPowerDevice->eCurrentPowerState = eCurrentPowerState;
+	psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+	/* insert into power device list */
+	List_PVRSRV_POWER_DEV_Insert(&(psPVRSRVData->psPowerDeviceList), psPowerDevice);
+
+	return (PVRSRV_OK);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVRemovePowerDevice
+
+ @Description
+
+ Removes device from power management register. Device is located by Device Index
+
+ @Input		ui32DeviceIndex : device index
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV	*psPowerDev;
+
+	/* find device in list and remove it */
+	psPowerDev = (PVRSRV_POWER_DEV*)
+					List_PVRSRV_POWER_DEV_Any_va(psPVRSRVData->psPowerDeviceList,
+												 &MatchPowerDeviceIndex_AnyVaCb,
+												 ui32DeviceIndex);
+
+	if (psPowerDev)
+	{
+		List_PVRSRV_POWER_DEV_Remove(psPowerDev);
+		OSFreeMem(psPowerDev);
+		/*not nulling pointer, copy on stack*/
+	}
+
+	return (PVRSRV_OK);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetDevicePowerState
+
+ @Description
+
+	Return the device power state
+
+ @Input		ui32DeviceIndex : device index
+ @Output	psPowerState : Current power state 
+
+ @Return	PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. PVRSRV_OK otherwise.
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVGetDevicePowerState(IMG_UINT32 ui32DeviceIndex, PPVRSRV_DEV_POWER_STATE pePowerState)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV	*psPowerDevice;
+
+	psPowerDevice = (PVRSRV_POWER_DEV*)
+					List_PVRSRV_POWER_DEV_Any_va(psPVRSRVData->psPowerDeviceList,
+												 &MatchPowerDeviceIndex_AnyVaCb,
+												 ui32DeviceIndex);
+	if (psPowerDevice == IMG_NULL)
+	{
+		return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+	}
+
+	*pePowerState = psPowerDevice->eCurrentPowerState;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVIsDevicePowered
+
+ @Description
+
+	Whether the device is powered, for the purposes of lockup detection.
+
+ @Input		ui32DeviceIndex : device index
+
+ @Return	IMG_BOOL
+
+******************************************************************************/
+IMG_EXPORT
+IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
+{
+	PVRSRV_DATA            *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	if (OSLockIsLocked(psPVRSRVData->hPowerLock))
+	{
+		return IMG_FALSE;
+	}
+
+	if (PVRSRVGetDevicePowerState(ui32DeviceIndex, &ePowerState) != PVRSRV_OK)
+	{
+		return IMG_FALSE;
+	}
+
+	return (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePreClockSpeedChange
+
+ @Description
+
+	Notification from system layer that a device clock speed change is about to happen.
+
+ @Input		ui32DeviceIndex : device index
+ @Input		bIdleDevice : whether the device should be idled
+ @Input		pvInfo
+
+ @Return	IMG_VOID
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32	ui32DeviceIndex,
+											 IMG_BOOL	bIdleDevice,
+											 IMG_VOID	*pvInfo)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV	*psPowerDevice;
+	IMG_UINT64			ui64StartTimer, ui64StopTimer;
+
+	PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+	ui64StartTimer = OSClockus();
+
+	/* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
+	eError = PVRSRVPowerLock();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%x", eError));
+		return eError;
+	}
+
+	/*search the device and then do the pre clock speed change*/
+	psPowerDevice = (PVRSRV_POWER_DEV*)
+					List_PVRSRV_POWER_DEV_Any_va(psPVRSRVData->psPowerDeviceList,
+												 &MatchPowerDeviceIndex_AnyVaCb,
+												 ui32DeviceIndex);
+
+
+	if (psPowerDevice)
+	{
+		if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+		{
+			LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+			{	/* We can change the clock speed if the device is either IDLE or OFF */
+				eError = PVRSRVDeviceIdleRequestKM(IMG_FALSE, ui32DeviceIndex, IMG_NULL, IMG_TRUE);
+
+				if (eError == PVRSRV_OK)
+				{
+					break;
+				}
+				else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+				{
+					PVRSRV_ERROR	eError2;
+
+					PVRSRVPowerUnlock();
+					OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+					eError2 = PVRSRVPowerLock();
+
+					if (eError2 != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,	"PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%x", eError));
+						return eError2;
+					}
+				}
+				else
+				{
+					PVRSRVPowerUnlock();
+					return eError;
+				}
+			} END_LOOP_UNTIL_TIMEOUT();
+		}
+
+		eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+													   psPowerDevice->eCurrentPowerState);
+
+		if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"PVRSRVDevicePreClockSpeedChange : Device %u failed, error:0x%x",
+					ui32DeviceIndex, eError));
+		}
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVRSRVPowerUnlock();
+		return eError;
+	}
+
+	ui64StopTimer = OSClockus();
+
+	InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDevicePostClockSpeedChange
+
+ @Description
+
+	Notification from system layer that a device clock speed change has just happened.
+
+ @Input		ui32DeviceIndex : device index
+ @Input		bIdleDevice : whether the device had been idled
+ @Input		pvInfo
+
+ @Return	IMG_VOID
+
+******************************************************************************/
+IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32	ui32DeviceIndex,
+										  IMG_BOOL		bIdleDevice,
+										  IMG_VOID		*pvInfo)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV	*psPowerDevice;
+	IMG_UINT64			ui64StartTimer, ui64StopTimer;
+
+    PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+	ui64StartTimer = OSClockus();
+
+	/*search the device and then do the post clock speed change*/
+	psPowerDevice = (PVRSRV_POWER_DEV*)
+					List_PVRSRV_POWER_DEV_Any_va(psPVRSRVData->psPowerDeviceList,
+												 &MatchPowerDeviceIndex_AnyVaCb,
+												 ui32DeviceIndex);
+
+	if (psPowerDevice)
+	{
+		eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+														psPowerDevice->eCurrentPowerState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					"PVRSRVDevicePostClockSpeedChange : Device %u failed, error:0x%x",
+					ui32DeviceIndex, eError));
+		}
+
+		if((psPowerDevice->eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && bIdleDevice)
+		{
+			eError = PVRSRVDeviceIdleCancelRequestKM(IMG_FALSE, ui32DeviceIndex);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePostClockSpeedChange : Failed to cancel forced IDLE."));
+			}
+		}
+	}
+
+	/* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
+	PVRSRVPowerUnlock();
+
+	ui64StopTimer = OSClockus();
+
+	InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeviceDustCountChange
+
+ @Description
+
+	Request from system layer that a dust count change is requested.
+
+ @Input		ui32DeviceIndex : device index
+ @Input		ui32DustCount : dust count to be set
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(IMG_UINT32	ui32DeviceIndex,
+						IMG_UINT32	ui32DustCount)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_POWER_DEV	*psPowerDevice;
+
+	/*search the device and then do the pre clock speed change*/
+	psPowerDevice = (PVRSRV_POWER_DEV*)
+					List_PVRSRV_POWER_DEV_Any_va(psPVRSRVData->psPowerDeviceList,
+												 &MatchPowerDeviceIndex_AnyVaCb,
+												 ui32DeviceIndex);
+
+	if (psPowerDevice)
+	{
+		eError = PVRSRVPowerLock();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"PVRSRVDeviceDustCountChange : failed to acquire lock, error:0x%x", eError));
+			return eError;
+		}
+
+		/* Device must be idle to change dust count  */
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			PDUMPPOWCMDSTART();
+			eError = PVRSRVSetDevicePowerStateKM(ui32DeviceIndex,
+								PVRSRV_DEV_POWER_STATE_ON,
+								IMG_FALSE);
+			PDUMPPOWCMDEND();
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceDustCountChange: failed to transition Rogue to ON (%s)",
+							PVRSRVGetErrorStringKM(eError)));
+				goto ErrorExit;
+			}
+
+			eError = PVRSRVDeviceIdleRequestKM(IMG_FALSE, ui32DeviceIndex, IMG_NULL, IMG_FALSE);
+
+			if (eError == PVRSRV_OK)
+			{
+				break;
+			}
+			else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVRSRV_ERROR	eError2;
+
+				PVRSRVPowerUnlock();
+				OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+				eError2 = PVRSRVPowerLock();
+
+				if (eError2 != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,	"PVRSRVDeviceDustCountChange : failed to acquire lock, error:0x%x", eError));
+					return eError2;
+				}
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR,	"PVRSRVDeviceDustCountChange : error occurred whilst forcing idle, error:0x%x", eError));
+				goto ErrorExit;
+			}
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,	"PVRSRVDeviceDustCountChange : timeout occurred attempting to force idle, error:0x%x", eError));
+			goto ErrorExit;
+		}
+
+		if (psPowerDevice->pfnDustCountRequest != IMG_NULL)
+		{
+			PVRSRV_ERROR	eError2 = psPowerDevice->pfnDustCountRequest(psPowerDevice->hDevCookie, ui32DustCount);
+
+			if (eError2 != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+						"PVRSRVDeviceDustCountChange : Device %u failed, error:0x%x",
+						ui32DeviceIndex, eError));
+			}
+		}
+
+		eError = PVRSRVDeviceIdleCancelRequestKM(IMG_FALSE, ui32DeviceIndex);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePostClockSpeedChange : Failed to cancel forced IDLE."));
+			goto ErrorExit;
+		}
+
+		PVRSRVPowerUnlock();
+	}
+
+	return eError;
+
+ErrorExit:
+	PVRSRVPowerUnlock();
+	return eError;
+}
+
+
+/******************************************************************************
+ End of file (power.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/process_stats.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/process_stats.c
new file mode 100644
index 0000000..765a6df
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/process_stats.c
@@ -0,0 +1,2426 @@
+/*************************************************************************/ /*!
+@File
+@Title          Process based statistics
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Manages a collection of statistics based around a process
+                and referenced via OS agnostic methods.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lists.h"
+#include "process_stats.h"
+#include "ri_server.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+
+#define DBGTIMEDIFF(T0, T1)  ((IMG_UINT64) ( (T0) <= (T1) ? (T1) - (T0): IMG_UINT64_MAX - (T0) + (T1) ))
+#define MEAN_TIME(A, B)     ( ((3*(A))/4) + ((1 * (B))/4) )
+
+
+/*
+ *  Maximum history of process statistics that will be kept.
+ */
+#define MAX_DEAD_LIST_PROCESSES  (10)
+
+void *pvOSPowerStatsEntryData=NULL;
+
+
+/*
+ * Definition of all process based statistics and the strings used to
+ * format them.
+ */
+typedef enum
+{
+    /* Stats that are per process... */
+    PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS,
+    PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS,
+
+    PVRSRV_PROCESS_STAT_TYPE_RC_OOMS,
+    PVRSRV_PROCESS_STAT_TYPE_RC_PRS,
+    PVRSRV_PROCESS_STAT_TYPE_RC_GROWS,
+    PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS,
+    PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES,
+    PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES,
+    PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES,
+    PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES,
+    PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP,
+    PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW,
+    PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP,
+    PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW,
+    PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT,
+    PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES,
+    PVRSRV_PROCESS_STAT_TYPE_KMALLOC,
+    PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_VMALLOC,
+    PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA,
+    PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA,
+    PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES,
+    PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX,
+    PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES,
+    PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX,
+
+	/* Must be the last enum...*/
+	PVRSRV_PROCESS_STAT_TYPE_COUNT
+} PVRSRV_PROCESS_STAT_TYPE;
+
+
+typedef enum
+{
+    PVRSRV_POWER_TIMING_STAT_FORCED_POWER_TRANSITION=0,
+    PVRSRV_POWER_TIMING_STAT_PRE_DEVICE,
+    PVRSRV_POWER_TIMING_STAT_PRE_SYSTEM,
+    PVRSRV_POWER_TIMING_STAT_POST_DEVICE,
+    PVRSRV_POWER_TIMING_STAT_POST_SYSTEM,
+    PVRSRV_POWER_TIMING_STAT_NEWLINE1,
+    PVRSRV_POWER_TIMING_STAT_NOT_FORCED_POWER_TRANSITION,
+    PVRSRV_POWER_TIMING_STAT_NON_PRE_DEVICE,
+    PVRSRV_POWER_TIMING_STAT_NON_PRE_SYSTEM,
+    PVRSRV_POWER_TIMING_STAT_NON_POST_DEVICE,
+    PVRSRV_POWER_TIMING_STAT_NON_POST_SYSTEM,
+    PVRSRV_POWER_TIMING_STAT_NEWLINE2,
+    PVRSRV_POWER_TIMING_STAT_FW_BOOTUP_TIME,
+    PVRSRV_POWER_TIMING_STAT_HOST_ACK
+} PVR_SRV_OTHER_STAT_TYPE;
+
+
+static IMG_CHAR*  pszProcessStatFmt[PVRSRV_PROCESS_STAT_TYPE_COUNT] = {
+	"Connections                       %10d\n", /* PVRSRV_STAT_TYPE_CONNECTIONS */
+	"ConnectionsMax                    %10d\n", /* PVRSRV_STAT_TYPE_MAXCONNECTIONS */
+
+    "RenderContextOutOfMemoryEvents    %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_OOMS */
+    "RenderContextPartialRenders       %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PRS */
+    "RenderContextGrows                %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_GROWS */
+    "RenderContextPushGrows            %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS */
+    "RenderContextTAStores             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES */
+    "RenderContext3DStores             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES */
+    "RenderContextSHStores             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES */
+    "RenderContextCDMStores            %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES */
+    "ZSBufferRequestsByApp             %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP */
+    "ZSBufferRequestsByFirmware        %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW */
+    "FreeListGrowRequestsByApp         %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP */
+    "FreeListGrowRequestsByFirmware    %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW */
+    "FreeListInitialPages              %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT */
+    "FreeListMaxPages                  %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES */
+    "MemoryUsageKMalloc                %10d\n", /* PVRSRV_STAT_TYPE_KMALLOC */
+    "MemoryUsageKMallocMax             %10d\n", /* PVRSRV_STAT_TYPE_MAX_KMALLOC */
+    "MemoryUsageVMalloc                %10d\n", /* PVRSRV_STAT_TYPE_VMALLOC */
+    "MemoryUsageVMallocMax             %10d\n", /* PVRSRV_STAT_TYPE_MAX_VMALLOC */
+    "MemoryUsageAllocPTMemoryUMA       %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_UMA */
+    "MemoryUsageAllocPTMemoryUMAMax    %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_UMA */
+    "MemoryUsageVMapPTUMA              %10d\n", /* PVRSRV_STAT_TYPE_VMAP_PT_UMA */
+    "MemoryUsageVMapPTUMAMax           %10d\n", /* PVRSRV_STAT_TYPE_MAX_VMAP_PT_UMA */
+    "MemoryUsageAllocPTMemoryLMA       %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_LMA */
+    "MemoryUsageAllocPTMemoryLMAMax    %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_LMA */
+    "MemoryUsageIORemapPTLMA           %10d\n", /* PVRSRV_STAT_TYPE_IOREMAP_PT_LMA */
+    "MemoryUsageIORemapPTLMAMax        %10d\n", /* PVRSRV_STAT_TYPE_MAX_IOREMAP_PT_LMA */
+    "MemoryUsageAllocGPUMemLMA         %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_LMA_PAGES */
+    "MemoryUsageAllocGPUMemLMAMax      %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_LMA_PAGES */
+    "MemoryUsageAllocGPUMemUMA         %10d\n", /* PVRSRV_STAT_TYPE_ALLOC_UMA_PAGES */
+    "MemoryUsageAllocGPUMemUMAMax      %10d\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_UMA_PAGES */
+    "MemoryUsageMappedGPUMemUMA/LMA    %10d\n", /* PVRSRV_STAT_TYPE_MAP_UMA_LMA_PAGES */
+    "MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", /* PVRSRV_STAT_TYPE_MAX_MAP_UMA_LMA_PAGES */
+};
+
+
+/*
+ *  Functions for printing the information stored...
+ */
+IMG_VOID  ProcessStatsPrintElements(IMG_PVOID pvFilePtr, IMG_PVOID pvStatPtr,
+                                    OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+IMG_VOID  MemStatsPrintElements(IMG_PVOID pvFilePtr, IMG_PVOID pvStatPtr,
+                                OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+IMG_VOID  RIMemStatsPrintElements(IMG_PVOID pvFilePtr, IMG_PVOID pvStatPtr,
+                                  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+IMG_VOID  PowerStatsPrintElements(IMG_PVOID pvFilePtr, IMG_PVOID pvStatPtr,
+                                  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+IMG_VOID  GlobalStatsPrintElements(IMG_PVOID pvFilePtr, IMG_PVOID pvStatPtr,
+								   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+
+
+/*
+ *  Macros for updating stat values.
+ */
+#define UPDATE_MAX_VALUE(a,b)                  do { if ((b) > (a)) {(a) = (b);} } while(0)
+#define INCREASE_STAT_VALUE(ptr,var,val)       do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while(0)
+#define DECREASE_STAT_VALUE(ptr,var,val)       do { if ((IMG_SIZE_T)(ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while(0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,val)    do { (var) += (val); if ((var) > (var##Max)) {(var##Max) = (var);} } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,val)    do { if ((var) >= (val)) { (var) -= (val); } else { (var) = 0; } } while(0)
+
+
+/*
+ * Structures for holding statistics...
+ */
+typedef enum
+{
+	PVRSRV_STAT_STRUCTURE_PROCESS = 1,
+	PVRSRV_STAT_STRUCTURE_RENDER_CONTEXT = 2,
+	PVRSRV_STAT_STRUCTURE_MEMORY = 3,
+	PVRSRV_STAT_STRUCTURE_RIMEMORY = 4
+} PVRSRV_STAT_STRUCTURE_TYPE;
+
+#define MAX_PROC_NAME_LENGTH   (32)
+
+typedef struct _PVRSRV_PROCESS_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE        eStructureType;
+
+	/* Linked list pointers */
+	struct _PVRSRV_PROCESS_STATS_*    psNext;
+	struct _PVRSRV_PROCESS_STATS_*    psPrev;
+
+	/* OS level process ID */
+	IMG_PID                           pid;
+	IMG_UINT32                        ui32RefCount;
+	IMG_UINT32                        ui32MemRefCount;
+
+	/* Folder name used to store the statistic */
+	IMG_CHAR				          szFolderName[MAX_PROC_NAME_LENGTH];
+
+	/* OS specific data */
+	IMG_PVOID                         pvOSPidFolderData;
+	IMG_PVOID                         pvOSPidEntryData;
+
+	/* Stats... */
+	IMG_INT32                         i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+
+	/* Other statistics structures */
+	struct _PVRSRV_RENDER_STATS_*     psRenderLiveList;
+	struct _PVRSRV_RENDER_STATS_*     psRenderDeadList;
+
+	struct _PVRSRV_MEMORY_STATS_*     psMemoryStats;
+	struct _PVRSRV_RI_MEMORY_STATS_*  psRIMemoryStats;
+} PVRSRV_PROCESS_STATS;
+
+typedef struct _PVRSRV_RENDER_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE     eStructureType;
+
+	/* Linked list pointers */
+	struct _PVRSRV_RENDER_STATS_*  psNext;
+	struct _PVRSRV_RENDER_STATS_*  psPrev;
+
+	/* OS specific data */
+	IMG_PVOID                      pvOSData;
+
+	/* Stats... */
+	IMG_INT32                      i32StatValue[4];
+} PVRSRV_RENDER_STATS;
+
+typedef struct _PVRSRV_MEM_ALLOC_REC_
+{
+    PVRSRV_MEM_ALLOC_TYPE  eAllocType;
+    IMG_UINT64			ui64Key;
+    IMG_VOID               *pvCpuVAddr;
+    IMG_CPU_PHYADDR        sCpuPAddr;
+	IMG_SIZE_T			   uiBytes;
+    IMG_PVOID              pvPrivateData;
+
+    struct _PVRSRV_MEM_ALLOC_REC_  *psNext;
+	struct _PVRSRV_MEM_ALLOC_REC_  **ppsThis;
+} PVRSRV_MEM_ALLOC_REC;
+
+typedef struct _PVRSRV_MEMORY_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE  eStructureType;
+
+	/* OS specific data */
+	IMG_PVOID                   pvOSMemEntryData;
+
+	/* Stats... */
+	PVRSRV_MEM_ALLOC_REC        *psMemoryRecords;
+} PVRSRV_MEMORY_STATS;
+
+typedef struct _PVRSRV_RI_MEMORY_STATS_ {
+	/* Structure type (must be first!) */
+	PVRSRV_STAT_STRUCTURE_TYPE  eStructureType;
+
+	/* OS level process ID */
+	IMG_PID                   	pid;
+
+	/* OS specific data */
+	IMG_PVOID                   pvOSRIMemEntryData;
+} PVRSRV_RI_MEMORY_STATS;
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
+#endif
+
+
+/*
+ *  Global Boolean to flag when the statistics are ready to monitor
+ *  memory allocations.
+ */
+static  IMG_BOOL  bProcessStatsInitialised = IMG_FALSE;
+
+/*
+ * Linked lists for process stats. Live stats are for processes which are still running
+ * and the dead list holds those that have exited.
+ */
+static PVRSRV_PROCESS_STATS*  psLiveList = IMG_NULL;
+static PVRSRV_PROCESS_STATS*  psDeadList = IMG_NULL;
+
+POS_LOCK  psLinkedListLock = IMG_NULL;
+
+
+/*
+ * Pointer to OS folder to hold PID folders.
+ */
+IMG_CHAR*  pszOSLivePidFolderName = "pid";
+IMG_CHAR*  pszOSDeadPidFolderName = "pids_retired";
+IMG_PVOID  pvOSLivePidFolder      = IMG_NULL;
+IMG_PVOID  pvOSDeadPidFolder      = IMG_NULL;
+
+/* global driver-data folders */
+typedef struct _GLOBAL_STATS_
+{
+	IMG_UINT32 ui32MemoryUsageKMalloc;
+	IMG_UINT32 ui32MemoryUsageKMallocMax;
+	IMG_UINT32 ui32MemoryUsageVMalloc;
+	IMG_UINT32 ui32MemoryUsageVMallocMax;
+	IMG_UINT32 ui32MemoryUsageAllocPTMemoryUMA;
+	IMG_UINT32 ui32MemoryUsageAllocPTMemoryUMAMax;
+	IMG_UINT32 ui32MemoryUsageVMapPTUMA;
+	IMG_UINT32 ui32MemoryUsageVMapPTUMAMax;
+	IMG_UINT32 ui32MemoryUsageAllocPTMemoryLMA;
+	IMG_UINT32 ui32MemoryUsageAllocPTMemoryLMAMax;
+	IMG_UINT32 ui32MemoryUsageIORemapPTLMA;
+	IMG_UINT32 ui32MemoryUsageIORemapPTLMAMax;
+	IMG_UINT32 ui32MemoryUsageAllocGPUMemLMA;
+	IMG_UINT32 ui32MemoryUsageAllocGPUMemLMAMax;
+	IMG_UINT32 ui32MemoryUsageAllocGPUMemUMA;
+	IMG_UINT32 ui32MemoryUsageAllocGPUMemUMAMax;
+	IMG_UINT32 ui32MemoryUsageAllocGPUMemUMAPool;
+	IMG_UINT32 ui32MemoryUsageAllocGPUMemUMAPoolMax;
+	IMG_UINT32 ui32MemoryUsageMappedGPUMemUMA_LMA;
+	IMG_UINT32 ui32MemoryUsageMappedGPUMemUMA_LMAMax;
+} GLOBAL_STATS;
+
+static IMG_PVOID  pvOSGlobalMemEntryRef = IMG_NULL;
+static IMG_CHAR* const pszDriverStatFilename = "driver_stats";
+static GLOBAL_STATS gsGlobalStats;
+
+#define HASH_INITIAL_SIZE 5
+/* A hash table used to store the size of any vmalloc'd allocation
+ * against its address (not needed for kmallocs as we can use ksize()) */
+static HASH_TABLE* gpsVmallocSizeHashTable;
+static POS_LOCK	 gpsVmallocSizeHashTableLock;
+
+/*Power Statistics List */
+
+static IMG_UINT64 ui64TotalForcedEntries=0,ui64TotalNotForcedEntries=0;
+
+static IMG_UINT64 ui64ForcedPreDevice=0, ui64ForcedPreSystem=0, ui64ForcedPostDevice=0, ui64ForcedPostSystem=0;
+static IMG_UINT64 ui64NotForcedPreDevice=0, ui64NotForcedPreSystem=0, ui64NotForcedPostDevice=0, ui64NotForcedPostSystem=0;
+
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(IMG_PVOID pvStatPtr);
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(IMG_PVOID pvStatPtr);
+
+IMG_VOID InsertPowerTimeStatistic(PVRSRV_POWER_ENTRY_TYPE bType,
+		IMG_INT32 i32CurrentState, IMG_INT32 i32NextState,
+        IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+		IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+		IMG_BOOL bForced)
+{
+    IMG_UINT64 ui64Device;
+    IMG_UINT64 ui64System;
+
+	if (i32CurrentState==i32NextState) return ;
+
+    ui64Device=ui64DevEndTime-ui64DevStartTime;
+    ui64System=ui64SysEndTime-ui64SysStartTime;
+
+    if (bForced)
+    {
+        ui64TotalForcedEntries++;
+        if (bType==PVRSRV_POWER_ENTRY_TYPE_POST)
+        {
+            ui64ForcedPostDevice+=ui64Device;
+            ui64ForcedPostSystem+=ui64System;
+        }
+        else
+        {
+            ui64ForcedPreDevice+=ui64Device;
+            ui64ForcedPreSystem+=ui64System;
+        }
+    }
+    else
+    {
+        ui64TotalNotForcedEntries++;
+        if (bType==PVRSRV_POWER_ENTRY_TYPE_POST)
+        {
+            ui64NotForcedPostDevice+=ui64Device;
+            ui64NotForcedPostSystem+=ui64System;
+        }
+        else
+        {
+            ui64NotForcedPreDevice+=ui64Device;
+            ui64NotForcedPreSystem+=ui64System;
+        }
+    }
+
+	return;
+}
+
+typedef struct _EXTRA_POWER_STATS_
+{
+	IMG_UINT64	ui64PreClockSpeedChangeDuration;
+	IMG_UINT64	ui64BetweenPreEndingAndPostStartingDuration;
+	IMG_UINT64	ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+#define NUM_EXTRA_POWER_STATS	10
+
+static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+static IMG_UINT32	ui32ClockSpeedIndexStart = 0, ui32ClockSpeedIndexEnd = 0;
+
+static IMG_UINT64 ui64PreClockSpeedChangeMark = 0;
+
+IMG_VOID InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
+{
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
+
+	ui64PreClockSpeedChangeMark = OSClockus();
+
+	return ;
+}
+
+IMG_VOID InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
+{
+	IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
+
+	PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
+
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+	asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+	ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+	if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
+	{
+		ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+	}
+
+	ui64PreClockSpeedChangeMark = 0;
+
+	return;
+}
+
+/*************************************************************************/ /*!
+@Function       _RemoveRenderStatsFromList
+@Description    Detaches a process from either the live or dead list.
+@Input          psProcessStats  Process to remove the stats from.
+@Input          psRenderStats   Render stats to remove.
+*/ /**************************************************************************/
+static IMG_VOID
+_RemoveRenderStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats,
+                           PVRSRV_RENDER_STATS* psRenderStats)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+	PVR_ASSERT(psRenderStats != IMG_NULL);
+
+	/* Remove the item from the linked lists... */
+	if (psProcessStats->psRenderLiveList == psRenderStats)
+	{
+		psProcessStats->psRenderLiveList = psRenderStats->psNext;
+
+		if (psProcessStats->psRenderLiveList != IMG_NULL)
+		{
+			psProcessStats->psRenderLiveList->psPrev = IMG_NULL;
+		}
+	}
+	else if (psProcessStats->psRenderDeadList == psRenderStats)
+	{
+		psProcessStats->psRenderDeadList = psRenderStats->psNext;
+
+		if (psProcessStats->psRenderDeadList != IMG_NULL)
+		{
+			psProcessStats->psRenderDeadList->psPrev = IMG_NULL;
+		}
+	}
+	else
+	{
+		PVRSRV_RENDER_STATS*  psNext = psRenderStats->psNext;
+		PVRSRV_RENDER_STATS*  psPrev = psRenderStats->psPrev;
+
+		if (psRenderStats->psNext != IMG_NULL)
+		{
+			psRenderStats->psNext->psPrev = psPrev;
+		}
+		if (psRenderStats->psPrev != IMG_NULL)
+		{
+			psRenderStats->psPrev->psNext = psNext;
+		}
+	}
+
+	/* Reset the pointers in this cell, as it is not attached to anything */
+	psRenderStats->psNext = IMG_NULL;
+	psRenderStats->psPrev = IMG_NULL;
+} /* _RemoveRenderStatsFromList */
+
+
+/*************************************************************************/ /*!
+@Function       _DestoryRenderStat
+@Description    Frees memory and resources held by a render statistic.
+@Input          psRenderStats  Render stats to destroy.
+*/ /**************************************************************************/
+static IMG_VOID
+_DestoryRenderStat(PVRSRV_RENDER_STATS* psRenderStats)
+{
+	PVR_ASSERT(psRenderStats != IMG_NULL);
+
+	/* Remove the statistic from the OS... */
+//	OSRemoveStatisticEntry(psRenderStats->pvOSData);
+
+	/* Free the memory... */
+	OSFreeMem(psRenderStats);
+} /* _DestoryRenderStat */
+
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInLiveList
+@Description    Searches the Live Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInLiveList(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats = psLiveList;
+
+	while (psProcessStats != IMG_NULL)
+	{
+		if (psProcessStats->pid == pid)
+		{
+			return psProcessStats;
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	return IMG_NULL;
+} /* _FindProcessStatsInLiveList */
+
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStatsInDeadList
+@Description    Searches the Dead Process List for a statistics structure that
+                matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInDeadList(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats = psDeadList;
+
+	while (psProcessStats != IMG_NULL)
+	{
+		if (psProcessStats->pid == pid)
+		{
+			return psProcessStats;
+		}
+
+		psProcessStats = psProcessStats->psNext;
+	}
+
+	return IMG_NULL;
+} /* _FindProcessStatsInDeadList */
+
+
+/*************************************************************************/ /*!
+@Function       _FindProcessStats
+@Description    Searches the Live and Dead Process Lists for a statistics
+                structure that matches the PID given.
+@Input          pid  Process to search for.
+@Return         Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStats(IMG_PID pid)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats = _FindProcessStatsInLiveList(pid);
+
+	if (psProcessStats == IMG_NULL)
+	{
+		psProcessStats = _FindProcessStatsInDeadList(pid);
+	}
+
+	return psProcessStats;
+} /* _FindProcessStats */
+
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfLiveList
+@Description    Add a statistic to the live list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static IMG_VOID
+_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+
+	if (psLiveList != IMG_NULL)
+	{
+		psLiveList->psPrev     = psProcessStats;
+		psProcessStats->psNext = psLiveList;
+	}
+
+	psLiveList = psProcessStats;
+} /* _AddProcessStatsToFrontOfLiveList */
+
+
+/*************************************************************************/ /*!
+@Function       _AddProcessStatsToFrontOfDeadList
+@Description    Add a statistic to the dead list head.
+@Input          psProcessStats  Process stats to add.
+*/ /**************************************************************************/
+static IMG_VOID
+_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+
+	if (psDeadList != IMG_NULL)
+	{
+		psDeadList->psPrev     = psProcessStats;
+		psProcessStats->psNext = psDeadList;
+	}
+
+	psDeadList = psProcessStats;
+} /* _AddProcessStatsToFrontOfDeadList */
+
+
+/*************************************************************************/ /*!
+@Function       _RemoveProcessStatsFromList
+@Description    Detaches a process from either the live or dead list.
+@Input          psProcessStats  Process stats to remove.
+*/ /**************************************************************************/
+static IMG_VOID
+_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+
+	/* Remove the item from the linked lists... */
+	if (psLiveList == psProcessStats)
+	{
+		psLiveList = psProcessStats->psNext;
+
+		if (psLiveList != IMG_NULL)
+		{
+			psLiveList->psPrev = IMG_NULL;
+		}
+	}
+	else if (psDeadList == psProcessStats)
+	{
+		psDeadList = psProcessStats->psNext;
+
+		if (psDeadList != IMG_NULL)
+		{
+			psDeadList->psPrev = IMG_NULL;
+		}
+	}
+	else
+	{
+		PVRSRV_PROCESS_STATS*  psNext = psProcessStats->psNext;
+		PVRSRV_PROCESS_STATS*  psPrev = psProcessStats->psPrev;
+
+		if (psProcessStats->psNext != IMG_NULL)
+		{
+			psProcessStats->psNext->psPrev = psPrev;
+		}
+		if (psProcessStats->psPrev != IMG_NULL)
+		{
+			psProcessStats->psPrev->psNext = psNext;
+		}
+	}
+
+	/* Reset the pointers in this cell, as it is not attached to anything */
+	psProcessStats->psNext = IMG_NULL;
+	psProcessStats->psPrev = IMG_NULL;
+} /* _RemoveProcessStatsFromList */
+
+
+/*************************************************************************/ /*!
+@Function       _CreateOSStatisticEntries
+@Description    Create all OS entries for this statistic.
+@Input          psProcessStats  Process stats to destroy.
+@Input          pvOSPidFolder   Pointer to OS folder to place the entrys in.
+*/ /**************************************************************************/
+static IMG_VOID
+_CreateOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats,
+                          IMG_PVOID pvOSPidFolder)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+
+	psProcessStats->pvOSPidFolderData = OSCreateStatisticFolder(psProcessStats->szFolderName, pvOSPidFolder);
+	psProcessStats->pvOSPidEntryData  = OSCreateStatisticEntry("process_stats",
+	                                                           psProcessStats->pvOSPidFolderData,
+	                                                           ProcessStatsPrintElements,
+															   _PVRSRVIncrMemStatRefCount,
+															   _PVRSRVDecrMemStatRefCount,
+	                                                           (IMG_PVOID) psProcessStats);
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	psProcessStats->psMemoryStats->pvOSMemEntryData = OSCreateStatisticEntry("mem_area",
+	                                                           psProcessStats->pvOSPidFolderData,
+	                                                           MemStatsPrintElements,
+															   IMG_NULL,
+															   IMG_NULL,
+	                                                           (IMG_PVOID) psProcessStats->psMemoryStats);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	psProcessStats->psRIMemoryStats->pvOSRIMemEntryData = OSCreateStatisticEntry("ri_mem_area",
+	                                                           psProcessStats->pvOSPidFolderData,
+	                                                           RIMemStatsPrintElements,
+															   IMG_NULL,
+															   IMG_NULL,
+	                                                           (IMG_PVOID) psProcessStats->psRIMemoryStats);
+#endif
+} /* _CreateOSStatisticEntries */
+
+
+/*************************************************************************/ /*!
+@Function       _RemoveOSStatisticEntries
+@Description    Removed all OS entries used by this statistic.
+@Input          psProcessStats  Process stats to destroy.
+*/ /**************************************************************************/
+static IMG_VOID
+_RemoveOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+
+#if defined(PVR_RI_DEBUG)
+	OSRemoveStatisticEntry(psProcessStats->psRIMemoryStats->pvOSRIMemEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	OSRemoveStatisticEntry(psProcessStats->psMemoryStats->pvOSMemEntryData);
+#endif
+
+	if( psProcessStats->pvOSPidEntryData != IMG_NULL)
+	{
+		OSRemoveStatisticEntry(psProcessStats->pvOSPidEntryData);
+	}
+	if( psProcessStats->pvOSPidFolderData != IMG_NULL)
+	{
+		OSRemoveStatisticFolder(psProcessStats->pvOSPidFolderData);
+	}
+} /* _RemoveOSStatisticEntries */
+
+
+/*************************************************************************/ /*!
+@Function       _DestoryProcessStat
+@Description    Frees memory and resources held by a process statistic.
+@Input          psProcessStats  Process stats to destroy.
+*/ /**************************************************************************/
+static IMG_VOID
+_DestoryProcessStat(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	PVR_ASSERT(psProcessStats != IMG_NULL);
+
+	/* Remove this statistic from the OS... */
+	//_RemoveOSStatisticEntries(psProcessStats);
+
+	/* Free the live and dead render statistic lists... */
+	while (psProcessStats->psRenderLiveList != IMG_NULL)
+	{
+		PVRSRV_RENDER_STATS*  psRenderStats = psProcessStats->psRenderLiveList;
+
+		_RemoveRenderStatsFromList(psProcessStats, psRenderStats);
+		_DestoryRenderStat(psRenderStats);
+	}
+
+	while (psProcessStats->psRenderDeadList != IMG_NULL)
+	{
+		PVRSRV_RENDER_STATS*  psRenderStats = psProcessStats->psRenderDeadList;
+
+		_RemoveRenderStatsFromList(psProcessStats, psRenderStats);
+		_DestoryRenderStat(psRenderStats);
+	}
+
+	/* Free the memory statistics... */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	while (psProcessStats->psMemoryStats->psMemoryRecords)
+	{
+		List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryStats->psMemoryRecords);
+	}
+	OSFreeMem(psProcessStats->psMemoryStats);
+#endif
+
+	/* Free the memory... */
+	OSFreeMem(psProcessStats);
+} /* _DestoryProcessStat */
+
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(IMG_PVOID pvStatPtr)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*  psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+	IMG_UINT32 ui32Res = 7777;
+
+    switch (*peStructureType)
+	{
+		case PVRSRV_STAT_STRUCTURE_PROCESS:
+		{
+			/* Increment stat memory refCount */
+			ui32Res = ++psProcessStats->ui32MemRefCount;
+			break;
+		}
+		default:
+		{
+			break;
+		}
+	}
+	return ui32Res;
+}
+
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(IMG_PVOID pvStatPtr)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*  psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+	IMG_UINT32 ui32Res = 7777;
+
+    switch (*peStructureType)
+	{
+		case PVRSRV_STAT_STRUCTURE_PROCESS:
+		{
+			/* Decrement stat memory refCount and free if now zero */
+			ui32Res = --psProcessStats->ui32MemRefCount;
+			if (ui32Res == 0)
+			{
+				_DestoryProcessStat(psProcessStats);
+			}
+			break;
+		}
+		default:
+		{
+			break;
+		}
+	}
+	return ui32Res;
+}
+
+/*************************************************************************/ /*!
+@Function       _CompressMemoryUsage
+@Description    Reduces memory usage by deleting old statistics data.
+                This function requires that the list lock is not held!
+*/ /**************************************************************************/
+static IMG_VOID
+_CompressMemoryUsage(IMG_VOID)
+{
+	PVRSRV_PROCESS_STATS*  psProcessStats;
+	PVRSRV_PROCESS_STATS*  psProcessStatsToBeFreed;
+	IMG_UINT32  ui32ItemsRemaining;
+
+	/*
+	 *  We hold the lock whilst checking the list, but we'll release it
+	 *  before freeing memory (as that will require the lock too)!
+	 */
+    OSLockAcquire(psLinkedListLock);
+
+	/* Check that the dead list is not bigger than the max size... */
+	psProcessStats          = psDeadList;
+	psProcessStatsToBeFreed = IMG_NULL;
+	ui32ItemsRemaining      = MAX_DEAD_LIST_PROCESSES;
+
+	while (psProcessStats != IMG_NULL  &&  ui32ItemsRemaining > 0)
+    {
+		ui32ItemsRemaining--;
+		if (ui32ItemsRemaining == 0)
+		{
+			/* This is the last allowed process, cut the linked list here! */
+			psProcessStatsToBeFreed = psProcessStats->psNext;
+			psProcessStats->psNext  = IMG_NULL;
+		}
+		else
+		{
+			psProcessStats = psProcessStats->psNext;
+		}
+	}
+
+	OSLockRelease(psLinkedListLock);
+
+	/* Any processes stats remaining will need to be destroyed... */
+	while (psProcessStatsToBeFreed != IMG_NULL)
+    {
+		PVRSRV_PROCESS_STATS*  psNextProcessStats = psProcessStatsToBeFreed->psNext;
+
+		psProcessStatsToBeFreed->psNext = IMG_NULL;
+		_RemoveOSStatisticEntries(psProcessStatsToBeFreed);
+		_PVRSRVDecrMemStatRefCount((void*)psProcessStatsToBeFreed);
+		//_DestoryProcessStat(psProcessStatsToBeFreed);
+
+		psProcessStatsToBeFreed = psNextProcessStats;
+	}
+} /* _CompressMemoryUsage */
+
+/* These functions move the process stats from the living to the dead list.
+ * _MoveProcessToDeadList moves the entry in the global lists and
+ * it needs to be protected by psLinkedListLock.
+ * _MoveProcessToDeadListDebugFS performs the OS calls and it
+ * shouldn't be used under psLinkedListLock because this could generate a
+ * lockdep warning. */
+static IMG_VOID
+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Take the element out of the live list and append to the dead list... */
+	_RemoveProcessStatsFromList(psProcessStats);
+	_AddProcessStatsToFrontOfDeadList(psProcessStats);
+} /* _MoveProcessToDeadList */
+
+static IMG_VOID
+_MoveProcessToDeadListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+	/* Transfer the OS entries to the folder for dead processes... */
+	_RemoveOSStatisticEntries(psProcessStats);
+	_CreateOSStatisticEntries(psProcessStats, pvOSDeadPidFolder);
+} /* _MoveProcessToDeadListDebugFS */
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsInitialise
+@Description    Entry point for initialising the statistics module.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsInitialise(IMG_VOID)
+{
+    PVRSRV_ERROR error;
+
+    PVR_ASSERT(psLiveList == IMG_NULL);
+    PVR_ASSERT(psDeadList == IMG_NULL);
+    PVR_ASSERT(psLinkedListLock == IMG_NULL);
+	PVR_ASSERT(gpsVmallocSizeHashTable == NULL);
+	PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
+
+	/* We need a lock to protect the linked lists... */
+	error = OSLockCreate(&psLinkedListLock, LOCK_TYPE_NONE);
+	if (error == PVRSRV_OK)
+	{
+		/* We also need a lock to protect the hash table used for vmalloc size tracking.. */
+		error = OSLockCreate(&gpsVmallocSizeHashTableLock, LOCK_TYPE_NONE);
+
+		if (error != PVRSRV_OK)
+		{
+			goto e0;
+		}
+		/* Create a pid folders for putting the PID files in... */
+		pvOSLivePidFolder = OSCreateStatisticFolder(pszOSLivePidFolderName, IMG_NULL);
+		pvOSDeadPidFolder = OSCreateStatisticFolder(pszOSDeadPidFolderName, IMG_NULL);
+
+		/* Create power stats entry... */
+		pvOSPowerStatsEntryData = OSCreateStatisticEntry("power_timing_stats",
+														 IMG_NULL,
+														 PowerStatsPrintElements,
+													     IMG_NULL,
+													     IMG_NULL,
+													     IMG_NULL);
+
+		pvOSGlobalMemEntryRef = OSCreateStatisticEntry(pszDriverStatFilename,
+													   IMG_NULL,
+													   GlobalStatsPrintElements,
+												       IMG_NULL,
+													   IMG_NULL,
+													   IMG_NULL);
+
+		/* Flag that we are ready to start monitoring memory allocations. */
+
+		gpsVmallocSizeHashTable = HASH_Create(HASH_INITIAL_SIZE);
+
+		OSMemSet(&gsGlobalStats, 0, sizeof(gsGlobalStats));
+
+		OSMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+	
+		bProcessStatsInitialised = IMG_TRUE;
+	}
+	return error;
+e0:
+	OSLockDestroy(psLinkedListLock);
+	psLinkedListLock = NULL;
+	return error;
+
+} /* PVRSRVStatsInitialise */
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDestroy
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+IMG_VOID
+PVRSRVStatsDestroy(IMG_VOID)
+{
+	PVR_ASSERT(bProcessStatsInitialised == IMG_TRUE);
+
+	/* Stop monitoring memory allocations... */
+	bProcessStatsInitialised = IMG_FALSE;
+
+	/* Destroy the power stats entry... */
+	if (pvOSPowerStatsEntryData!=NULL)
+	{
+		OSRemoveStatisticEntry(pvOSPowerStatsEntryData);
+		pvOSPowerStatsEntryData=NULL;
+	}
+
+	/* Destroy the global data entry */
+	if (pvOSGlobalMemEntryRef!=NULL)
+	{
+		OSRemoveStatisticEntry(pvOSGlobalMemEntryRef);
+		pvOSGlobalMemEntryRef=NULL;
+	}
+	
+	/* Destroy the lock... */
+	if (psLinkedListLock != IMG_NULL)
+	{
+		OSLockDestroy(psLinkedListLock);
+		psLinkedListLock = IMG_NULL;
+	}
+
+	/* Free the live and dead lists... */
+    while (psLiveList != IMG_NULL)
+    {
+		PVRSRV_PROCESS_STATS*  psProcessStats = psLiveList;
+
+		_RemoveProcessStatsFromList(psProcessStats);
+		_RemoveOSStatisticEntries(psProcessStats);
+	}
+
+    while (psDeadList != IMG_NULL)
+    {
+		PVRSRV_PROCESS_STATS*  psProcessStats = psDeadList;
+
+		_RemoveProcessStatsFromList(psProcessStats);
+		_RemoveOSStatisticEntries(psProcessStats);
+	}
+
+	/* Remove the OS folders used by the PID folders... */
+    OSRemoveStatisticFolder(pvOSLivePidFolder);
+    pvOSLivePidFolder = IMG_NULL;
+    OSRemoveStatisticFolder(pvOSDeadPidFolder);
+    pvOSDeadPidFolder = IMG_NULL;
+
+	if (gpsVmallocSizeHashTable != IMG_NULL)
+	{
+		HASH_Delete(gpsVmallocSizeHashTable);
+	}
+	if (gpsVmallocSizeHashTableLock != IMG_NULL)
+	{
+		OSLockDestroy(gpsVmallocSizeHashTableLock);
+		gpsVmallocSizeHashTableLock = IMG_NULL;
+	}
+
+} /* PVRSRVStatsDestroy */
+
+
+
+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								  IMG_SIZE_T uiBytes)
+{
+	switch (eAllocType)
+	{
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageKMalloc, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMalloc, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMapPTUMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageIORemapPTLMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemLMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:	
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+			DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPool, uiBytes);
+			break;
+
+		default:
+			PVR_ASSERT(0);
+			break;
+	}
+}
+
+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								  IMG_SIZE_T uiBytes)
+{
+	switch (eAllocType)
+	{
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageKMalloc, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMalloc, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageVMapPTUMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageIORemapPTLMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemLMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:	
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMA, uiBytes);
+			break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+			INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPool, uiBytes);
+			break;
+
+		default:
+			PVR_ASSERT(0);
+			break;
+	}
+}
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsRegisterProcess
+@Description    Register a process into the list statistics list.
+@Output         phProcessStats  Handle to the process to be used to deregister.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats)
+{
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+    IMG_PID                currentPid = OSGetCurrentProcessID();
+	IMG_BOOL               bMoveProcess = IMG_FALSE;
+
+    PVR_ASSERT(phProcessStats != IMG_NULL);
+
+    /* Check the PID has not already moved to the dead list... */
+	OSLockAcquire(psLinkedListLock);
+	psProcessStats = _FindProcessStatsInDeadList(currentPid);
+    if (psProcessStats != IMG_NULL)
+    {
+		/* Move it back onto the live list! */
+		_RemoveProcessStatsFromList(psProcessStats);
+		_AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+		/* we can perform the OS operation out of lock */
+		bMoveProcess = IMG_TRUE;
+	}
+	else
+	{
+		/* Check the PID is not already registered in the live list... */
+		psProcessStats = _FindProcessStatsInLiveList(currentPid);
+	}
+
+	/* If the PID is on the live list then just increment the ref count and return... */
+    if (psProcessStats != IMG_NULL)
+    {
+		psProcessStats->ui32RefCount++;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+		UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
+		                 psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+		OSLockRelease(psLinkedListLock);
+
+		*phProcessStats = psProcessStats;
+
+		/* Check if we need to perform any OS operation */
+		if (bMoveProcess)
+		{
+			/* Transfer the OS entries back to the folder for live processes... */
+			_RemoveOSStatisticEntries(psProcessStats);
+			_CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+		}
+
+		return PVRSRV_OK;
+	}
+	OSLockRelease(psLinkedListLock);
+
+	/* Allocate a new node structure and initialise it... */
+	psProcessStats = OSAllocMem(sizeof(PVRSRV_PROCESS_STATS));
+	if (psProcessStats == IMG_NULL)
+	{
+		*phProcessStats = 0;
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSMemSet(psProcessStats, 0, sizeof(PVRSRV_PROCESS_STATS));
+
+	psProcessStats->eStructureType  = PVRSRV_STAT_STRUCTURE_PROCESS;
+	psProcessStats->pid             = currentPid;
+	psProcessStats->ui32RefCount    = 1;
+	psProcessStats->ui32MemRefCount = 1;
+
+	psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]     = 1;
+	psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	psProcessStats->psMemoryStats = OSAllocMem(sizeof(PVRSRV_MEMORY_STATS));
+	if (psProcessStats->psMemoryStats == IMG_NULL)
+	{
+		OSFreeMem(psProcessStats);
+		*phProcessStats = 0;
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSMemSet(psProcessStats->psMemoryStats, 0, sizeof(PVRSRV_MEMORY_STATS));
+	psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	psProcessStats->psRIMemoryStats = OSAllocMem(sizeof(PVRSRV_RI_MEMORY_STATS));
+	if (psProcessStats->psRIMemoryStats == IMG_NULL)
+	{
+		OSFreeMem(psProcessStats->psMemoryStats);
+		OSFreeMem(psProcessStats);
+		*phProcessStats = 0;
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSMemSet(psProcessStats->psRIMemoryStats, 0, sizeof(PVRSRV_RI_MEMORY_STATS));
+	psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+	psProcessStats->psRIMemoryStats->pid            = currentPid;
+#endif
+
+	/* Add it to the live list... */
+    OSLockAcquire(psLinkedListLock);
+	_AddProcessStatsToFrontOfLiveList(psProcessStats);
+	OSLockRelease(psLinkedListLock);
+
+	/* Create the process stat in the OS... */
+	OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+	           "%d", currentPid);
+	_CreateOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+
+	/* Done */
+	*phProcessStats = (IMG_HANDLE) psProcessStats;
+
+	return PVRSRV_OK;
+} /* PVRSRVStatsRegisterProcess */
+
+
+/*************************************************************************/ /*!
+@Function       PVRSRVStatsDeregisterProcess
+@Input          hProcessStats  Handle to the process returned when registered.
+@Description    Method for destroying the statistics module data.
+*/ /**************************************************************************/
+IMG_VOID
+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats)
+{
+	IMG_BOOL    bMoveProcess = IMG_FALSE;
+
+	if (hProcessStats != 0)
+	{
+		PVRSRV_PROCESS_STATS*  psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats;
+
+		/* Lower the reference count, if zero then move it to the dead list */
+		OSLockAcquire(psLinkedListLock);
+		if (psProcessStats->ui32RefCount > 0)
+		{
+			psProcessStats->ui32RefCount--;
+			psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+
+			if (psProcessStats->ui32RefCount == 0)
+			{
+				_MoveProcessToDeadList(psProcessStats);
+				bMoveProcess = IMG_TRUE;
+			}
+		}
+		OSLockRelease(psLinkedListLock);
+
+		/* The OS calls need to be performed without psLinkedListLock */
+		if (bMoveProcess == IMG_TRUE)
+		{
+			_MoveProcessToDeadListDebugFS(psProcessStats);
+		}
+
+		/* Check if the dead list needs to be reduced */
+		_CompressMemoryUsage();
+	}
+} /* PVRSRVStatsDeregisterProcess */
+
+
+IMG_VOID
+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                             IMG_VOID *pvCpuVAddr,
+                             IMG_CPU_PHYADDR sCpuPAddr,
+                             IMG_SIZE_T uiBytes,
+                             IMG_PVOID pvPrivateData)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_PID                currentPid = OSGetCurrentProcessID();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+    PVRSRV_DATA*		   psPVRSRVData = PVRSRVGetPVRSRVData();
+    PVRSRV_MEM_ALLOC_REC*  psRecord   = IMG_NULL;
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+    PVRSRV_MEMORY_STATS*   psMemoryStats;
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	/*
+	 *  To prevent a recursive loop, we make the memory allocations
+	 *  for our memstat records via OSAllocMemstatMem(), which does not try to
+	 *  create a memstat record entry..
+	 */
+
+    /* Allocate the memory record... */
+	psRecord = OSAllocMemstatMem(sizeof(PVRSRV_MEM_ALLOC_REC));
+	if (psRecord == IMG_NULL)
+	{
+		return;
+	}
+
+	OSMemSet(psRecord, 0, sizeof(PVRSRV_MEM_ALLOC_REC));
+	psRecord->eAllocType       = eAllocType;
+	psRecord->pvCpuVAddr       = pvCpuVAddr;
+	psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
+	psRecord->uiBytes          = uiBytes;
+	psRecord->pvPrivateData    = pvPrivateData;
+
+	/* Lock while we find the correct process... */
+	OSLockAcquire(psLinkedListLock);
+
+	_increase_global_stat(eAllocType, uiBytes);
+	
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			   (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+	    else
+	    {
+	    	psProcessStats = _FindProcessStats(currentPid);
+	    }
+	}
+    else
+    {
+    	psProcessStats = _FindProcessStats(currentPid);
+    }
+    if (psProcessStats == IMG_NULL)
+    {
+		OSLockRelease(psLinkedListLock);
+		if (psRecord != IMG_NULL)
+		{
+			OSFreeMemstatMem(psRecord);
+		}
+		return;
+	}
+	psMemoryStats = psProcessStats->psMemoryStats;
+
+	/* Insert the memory record... */
+	if (psRecord != IMG_NULL)
+	{
+		List_PVRSRV_MEM_ALLOC_REC_Insert(&psMemoryStats->psMemoryRecords, psRecord);
+	}
+
+	/* Update the memory watermarks... */
+	switch (eAllocType)
+	{
+		case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				if (pvCpuVAddr == IMG_NULL)
+				{
+					return;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(IMG_UINTPTR_T)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				if (pvCpuVAddr == IMG_NULL)
+				{
+					return;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(IMG_UINTPTR_T)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				if (pvCpuVAddr == IMG_NULL)
+				{
+					return;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(IMG_UINTPTR_T)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				if (pvCpuVAddr == IMG_NULL)
+				{
+					return;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(IMG_UINTPTR_T)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				if (pvCpuVAddr == IMG_NULL)
+				{
+					return;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(IMG_UINTPTR_T)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				psRecord->ui64Key = sCpuPAddr.uiAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes);
+		}
+		break;
+
+		case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+		{
+			if (psRecord != IMG_NULL)
+			{
+				if (pvCpuVAddr == IMG_NULL)
+				{
+					return;
+				}
+				psRecord->ui64Key = (IMG_UINT64)(IMG_UINTPTR_T)pvCpuVAddr;
+			}
+			INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes);
+		}
+		break;
+
+		default:
+		{
+			PVR_ASSERT(0);
+		}
+		break;
+	}
+
+	OSLockRelease(psLinkedListLock);
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
+PVR_UNREFERENCED_PARAMETER(sCpuPAddr);
+PVR_UNREFERENCED_PARAMETER(uiBytes);
+PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+#endif
+} /* PVRSRVStatsAddMemAllocRecord */
+
+
+IMG_VOID
+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+								IMG_UINT64 ui64Key)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_PID                currentPid     = OSGetCurrentProcessID();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+    PVRSRV_DATA*		   psPVRSRVData = PVRSRVGetPVRSRVData();
+    PVRSRV_PROCESS_STATS*  psProcessStats = IMG_NULL;
+	PVRSRV_MEMORY_STATS*   psMemoryStats  = IMG_NULL;
+	PVRSRV_MEM_ALLOC_REC*  psRecord       = IMG_NULL;
+    IMG_BOOL               bFound         = IMG_FALSE;
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	/* Lock while we find the correct process and remove this record... */
+	OSLockAcquire(psLinkedListLock);
+
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			 (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+	    else
+	    {
+	    	psProcessStats = _FindProcessStats(currentPid);
+	    }
+	}
+    else
+    {
+    	psProcessStats = _FindProcessStats(currentPid);
+    }
+    if (psProcessStats != IMG_NULL)
+    {
+		psMemoryStats = psProcessStats->psMemoryStats;
+		psRecord      = psMemoryStats->psMemoryRecords;
+		while (psRecord != IMG_NULL)
+		{
+			if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+			{
+				bFound = IMG_TRUE;
+				break;
+			}
+
+			psRecord = psRecord->psNext;
+		}
+	}
+
+	/* If not found, we need to do a full search in case it was allocated to a different PID... */
+	if (!bFound)
+	{
+		PVRSRV_PROCESS_STATS*  psProcessStatsAlreadyChecked = psProcessStats;
+
+		/* Search all live lists first... */
+		psProcessStats = psLiveList;
+		while (psProcessStats != IMG_NULL)
+		{
+			if (psProcessStats != psProcessStatsAlreadyChecked)
+			{
+				psMemoryStats = psProcessStats->psMemoryStats;
+				psRecord      = psMemoryStats->psMemoryRecords;
+				while (psRecord != IMG_NULL)
+				{
+					if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+					{
+						bFound = IMG_TRUE;
+						break;
+					}
+
+					psRecord = psRecord->psNext;
+				}
+			}
+
+			if (bFound)
+			{
+				break;
+			}
+
+			psProcessStats = psProcessStats->psNext;
+		}
+
+		/* If not found, then search all dead lists next... */
+		if (!bFound)
+		{
+			psProcessStats = psDeadList;
+			while (psProcessStats != IMG_NULL)
+			{
+				if (psProcessStats != psProcessStatsAlreadyChecked)
+				{
+					psMemoryStats = psProcessStats->psMemoryStats;
+					psRecord      = psMemoryStats->psMemoryRecords;
+					while (psRecord != IMG_NULL)
+					{
+						if (psRecord->ui64Key == ui64Key  &&  psRecord->eAllocType == eAllocType)
+						{
+							bFound = IMG_TRUE;
+							break;
+						}
+
+						psRecord = psRecord->psNext;
+					}
+				}
+
+				if (bFound)
+				{
+					break;
+				}
+
+				psProcessStats = psProcessStats->psNext;
+			}
+		}
+	}
+
+	/* Update the watermark and remove this record...*/
+	if (bFound)
+	{
+		_decrease_global_stat(eAllocType, psRecord->uiBytes);
+	
+		switch (eAllocType)
+		{
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, psRecord->uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, psRecord->uiBytes);
+			}
+			break;
+
+			default:
+			{
+				PVR_ASSERT(0);
+			}
+			break;
+		}
+
+		List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
+	}
+
+	OSLockRelease(psLinkedListLock);
+
+	/*
+	 * Free the record outside the lock so we don't deadlock and so we
+	 * reduce the time the lock is held.
+	 */
+	if (psRecord != IMG_NULL)
+	{
+		OSFreeMemstatMem(psRecord);
+	}
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(ui64Key);
+#endif
+} /* PVRSRVStatsRemoveMemAllocRecord */
+
+IMG_VOID
+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_SIZE_T uiBytes,
+        							IMG_UINT64 uiCpuVAddr)
+{
+	IMG_BOOL bRes;
+
+	if (!bProcessStatsInitialised || (gpsVmallocSizeHashTable == NULL) )
+	{
+		return;
+	}
+
+	OSLockAcquire(gpsVmallocSizeHashTableLock);
+	bRes = HASH_Insert(gpsVmallocSizeHashTable, uiCpuVAddr, uiBytes);
+	OSLockRelease(gpsVmallocSizeHashTableLock);
+	if (bRes)
+	{
+		PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!!", __FUNCTION__, __LINE__));
+	}
+}
+
+IMG_VOID
+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_SIZE_T uiBytes)
+{
+	IMG_PID                currentPid = OSGetCurrentProcessID();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+    PVRSRV_DATA* 		   psPVRSRVData = PVRSRVGetPVRSRVData();
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	_increase_global_stat(eAllocType, uiBytes);
+	
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			 (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+	    else
+	    {
+	    	psProcessStats = _FindProcessStats(currentPid);
+	    }
+	}
+    else
+    {
+    	psProcessStats = _FindProcessStats(currentPid);
+    }
+
+    if (psProcessStats != IMG_NULL)
+    {
+		/* Update the memory watermarks... */
+		switch (eAllocType)
+		{
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			{
+				INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes);
+			}
+			break;
+
+			default:
+			{
+				PVR_ASSERT(0);
+			}
+			break;
+		}
+    }
+}
+
+IMG_VOID
+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                      IMG_UINT64 uiCpuVAddr)
+{
+	IMG_SIZE_T uiBytes;
+
+	if (!bProcessStatsInitialised || (gpsVmallocSizeHashTable == NULL) )
+	{
+		return;
+	}
+
+	OSLockAcquire(gpsVmallocSizeHashTableLock);
+	uiBytes = HASH_Remove(gpsVmallocSizeHashTable, uiCpuVAddr);
+	OSLockRelease(gpsVmallocSizeHashTableLock);
+
+	PVRSRVStatsDecrMemAllocStat(eAllocType, uiBytes);
+}
+
+IMG_VOID
+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                            IMG_SIZE_T uiBytes)
+{
+	IMG_PID                currentPid = OSGetCurrentProcessID();
+	IMG_PID				   currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+    PVRSRV_DATA* 		   psPVRSRVData = PVRSRVGetPVRSRVData();
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	_decrease_global_stat(eAllocType, uiBytes);
+	
+	if (psPVRSRVData)
+	{
+		if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+			 (currentCleanupPid != 0))
+		{
+			psProcessStats = _FindProcessStats(currentCleanupPid);
+		}
+	    else
+	    {
+	    	psProcessStats = _FindProcessStats(currentPid);
+		}
+	}
+    else
+    {
+    	psProcessStats = _FindProcessStats(currentPid);
+	}
+    if (psProcessStats != IMG_NULL)
+    {
+		/* Update the memory watermarks... */
+		switch (eAllocType)
+		{
+			case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes);
+			}
+			break;
+
+			case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+			{
+				DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes);
+			}
+			break;
+
+			default:
+			{
+				PVR_ASSERT(0);
+			}
+			break;
+		}
+	}
+}
+
+/* For now we do not want to expose the global stats API
+ * so we wrap it into this specific function for pooled pages.
+ * As soon as we need to modify the global stats directly somewhere else
+ * we want to replace these functions with more general ones.
+ */
+IMG_VOID
+PVRSRVStatsIncrMemAllocPoolStat(IMG_SIZE_T uiBytes)
+{
+	_increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+IMG_VOID
+PVRSRVStatsDecrMemAllocPoolStat(IMG_SIZE_T uiBytes)
+{
+	_decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+IMG_VOID
+PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+                                    IMG_UINT32 ui32TotalNumOutOfMemory,
+                                    IMG_UINT32 ui32NumTAStores,
+                                    IMG_UINT32 ui32Num3DStores,
+                                    IMG_UINT32 ui32NumSHStores,
+                                    IMG_UINT32 ui32NumCDMStores,
+                                    IMG_PID pidOwner)
+{
+	//IMG_PID                currentPid = OSGetCurrentProcessID();
+	IMG_PID	pidCurrent=pidOwner;
+
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(psLinkedListLock);
+
+    psProcessStats = _FindProcessStats(pidCurrent);
+    if (psProcessStats != IMG_NULL)
+    {
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS]       += ui32TotalNumPartialRenders;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS]      += ui32TotalNumOutOfMemory;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES] += ui32NumSHStores;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+	}
+    else
+    {
+    	PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Null process. Pid=%d", pidCurrent));
+    }
+
+	OSLockRelease(psLinkedListLock);
+} /* PVRSRVStatsUpdateRenderContextStats */
+
+
+IMG_VOID
+PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+                               IMG_UINT32 ui32NumReqByFW,
+                               IMG_PID owner)
+{
+	IMG_PID                currentPid = (owner==0)?OSGetCurrentProcessID():owner;
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(psLinkedListLock);
+
+    psProcessStats = _FindProcessStats(currentPid);
+    if (psProcessStats != IMG_NULL)
+    {
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW]  += ui32NumReqByFW;
+	}
+
+	OSLockRelease(psLinkedListLock);
+} /* PVRSRVStatsUpdateZSBufferStats */
+
+
+IMG_VOID
+PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+                               IMG_UINT32 ui32NumGrowReqByFW,
+                               IMG_UINT32 ui32InitFLPages,
+                               IMG_UINT32 ui32NumHighPages,
+                               IMG_PID ownerPid)
+{
+	IMG_PID                currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentProcessID();
+    PVRSRV_PROCESS_STATS*  psProcessStats;
+
+    /* Don't do anything if we are not initialised or we are shutting down! */
+    if (!bProcessStatsInitialised)
+    {
+		return;
+	}
+
+	/* Lock while we find the correct process and update the record... */
+	OSLockAcquire(psLinkedListLock);
+
+	psProcessStats = _FindProcessStats(currentPid);
+
+    if (psProcessStats != IMG_NULL)
+    {
+		/* Avoid signed / unsigned mismatch which is flagged by some compilers */
+		IMG_INT32 a, b;
+
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW]  += ui32NumGrowReqByFW;
+
+		a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT];
+		b=(IMG_INT32)(ui32InitFLPages);
+		UPDATE_MAX_VALUE(a, b);
+
+
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+		ui32InitFLPages=(IMG_UINT32)b;
+
+		a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES];
+		b=(IMG_INT32)ui32NumHighPages;
+
+		UPDATE_MAX_VALUE(a, b);
+		psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+		ui32InitFLPages=(IMG_UINT32)b;
+
+	}
+
+	OSLockRelease(psLinkedListLock);
+} /* PVRSRVStatsUpdateFreelistStats */
+
+
+/*************************************************************************/ /*!
+@Function       ProcessStatsPrintElements
+@Description    Prints all elements for this process statistic record.
+@Input          pvFilePtr         Pointer to seq_file.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+IMG_VOID
+ProcessStatsPrintElements(IMG_PVOID pvFilePtr,
+						  IMG_PVOID pvStatPtr,
+                          OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_PROCESS_STATS*        psProcessStats  = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+	IMG_UINT32                   ui32StatNumber = 0;
+
+	if (peStructureType == IMG_NULL  ||  *peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS)
+	{
+		PVR_ASSERT(peStructureType != IMG_NULL  &&  *peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/* Loop through all the values and print them... */
+    while (ui32StatNumber < PVRSRV_PROCESS_STAT_TYPE_COUNT)
+    {
+        if (psProcessStats->ui32MemRefCount > 0)
+        {
+            pfnOSStatsPrintf(pvFilePtr, pszProcessStatFmt[ui32StatNumber], psProcessStats->i32StatValue[ui32StatNumber]);
+        }
+        else
+        {
+            PVR_DPF((PVR_DBG_ERROR, "%s: Called with psProcessStats->ui32MemRefCount=%d", __FUNCTION__, psProcessStats->ui32MemRefCount));
+        }
+        ui32StatNumber++;
+    }
+} /* ProcessStatsPrintElements */
+
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+/*************************************************************************/ /*!
+@Function       MemStatsPrintElements
+@Description    Prints all elements for the memory statistic record.
+@Input          pvFilePtr         Pointer to seq_file.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+IMG_VOID
+MemStatsPrintElements(IMG_PVOID pvFilePtr,
+					  IMG_PVOID pvStatPtr,
+                      OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE*  peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_MEMORY_STATS*         psMemoryStats   = (PVRSRV_MEMORY_STATS*) pvStatPtr;
+	IMG_UINT32	ui32VAddrFields = sizeof(IMG_VOID*)/sizeof(IMG_UINT32);
+	IMG_UINT32	ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+	PVRSRV_MEM_ALLOC_REC  *psRecord;
+	IMG_UINT32  ui32ItemNumber;
+
+	if (peStructureType == IMG_NULL  ||  *peStructureType != PVRSRV_STAT_STRUCTURE_MEMORY)
+	{
+		PVR_ASSERT(peStructureType != IMG_NULL  &&  *peStructureType == PVRSRV_STAT_STRUCTURE_MEMORY);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/* Write the header... */
+    pfnOSStatsPrintf(pvFilePtr, "Type                VAddress");
+	for (ui32ItemNumber = 1;  ui32ItemNumber < ui32VAddrFields;  ui32ItemNumber++)
+	{
+        pfnOSStatsPrintf(pvFilePtr, "        ");
+	}
+
+    pfnOSStatsPrintf(pvFilePtr, "  PAddress");
+	for (ui32ItemNumber = 1;  ui32ItemNumber < ui32PAddrFields;  ui32ItemNumber++)
+	{
+        pfnOSStatsPrintf(pvFilePtr, "        ");
+	}
+
+    pfnOSStatsPrintf(pvFilePtr, "  Size(bytes)\n");
+
+	/* The lock has to be held whilst moving through the memory list... */
+	OSLockAcquire(psLinkedListLock);
+	psRecord = psMemoryStats->psMemoryRecords;
+
+	while (psRecord != IMG_NULL)
+	{
+		switch (psRecord->eAllocType)
+		{
+            case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:      		pfnOSStatsPrintf(pvFilePtr, "KMALLOC             "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:      		pfnOSStatsPrintf(pvFilePtr, "VMALLOC             "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:  pfnOSStatsPrintf(pvFilePtr, "ALLOC_PAGES_PT_LMA  "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:  pfnOSStatsPrintf(pvFilePtr, "ALLOC_PAGES_PT_UMA  "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:      pfnOSStatsPrintf(pvFilePtr, "IOREMAP_PT_LMA      "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:         pfnOSStatsPrintf(pvFilePtr, "VMAP_PT_UMA         "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: 	pfnOSStatsPrintf(pvFilePtr, "ALLOC_LMA_PAGES     "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: 	pfnOSStatsPrintf(pvFilePtr, "ALLOC_UMA_PAGES     "); break;
+            case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: 	pfnOSStatsPrintf(pvFilePtr, "MAP_UMA_LMA_PAGES   "); break;
+            default:                                 		pfnOSStatsPrintf(pvFilePtr, "INVALID             "); break;
+		}
+
+		for (ui32ItemNumber = 0;  ui32ItemNumber < ui32VAddrFields;  ui32ItemNumber++)
+		{
+            pfnOSStatsPrintf(pvFilePtr, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1));
+		}
+        pfnOSStatsPrintf(pvFilePtr, "  ");
+
+		for (ui32ItemNumber = 0;  ui32ItemNumber < ui32PAddrFields;  ui32ItemNumber++)
+		{
+            pfnOSStatsPrintf(pvFilePtr, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1));
+		}
+
+        pfnOSStatsPrintf(pvFilePtr, "  %u\n", psRecord->uiBytes);
+
+		/* Move to next record... */
+		psRecord = psRecord->psNext;
+	}
+
+	OSLockRelease(psLinkedListLock);
+} /* MemStatsPrintElements */
+#endif
+
+
+#if defined(PVR_RI_DEBUG)
+/*************************************************************************/ /*!
+@Function       RIMemStatsPrintElements
+@Description    Prints all elements for the RI Memory record.
+@Input          pvFilePtr         Pointer to seq_file.
+@Input          pvStatPtr         Pointer to statistics structure.
+@Input          pfnOSStatsPrintf  Printf function to use for output.
+*/ /**************************************************************************/
+IMG_VOID
+RIMemStatsPrintElements(IMG_PVOID pvFilePtr,
+						IMG_PVOID pvStatPtr,
+                        OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	PVRSRV_STAT_STRUCTURE_TYPE  *peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+	PVRSRV_RI_MEMORY_STATS      *psRIMemoryStats = (PVRSRV_RI_MEMORY_STATS*) pvStatPtr;
+	IMG_CHAR                    *pszStatFmtText  = IMG_NULL;
+	IMG_HANDLE                  *pRIHandle       = IMG_NULL;
+
+	if (peStructureType == IMG_NULL  ||  *peStructureType != PVRSRV_STAT_STRUCTURE_RIMEMORY)
+	{
+		PVR_ASSERT(peStructureType != IMG_NULL  &&  *peStructureType == PVRSRV_STAT_STRUCTURE_RIMEMORY);
+		return;
+	}
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	/*
+	 *  Loop through the RI system to get each line of text.
+	 */
+	while (RIGetListEntryKM(psRIMemoryStats->pid,
+							&pRIHandle,
+							&pszStatFmtText))
+	{
+        pfnOSStatsPrintf(pvFilePtr, "%s", pszStatFmtText);
+	}
+} /* RIMemStatsPrintElements */
+#endif
+
+
+static IMG_UINT32	ui32FirmwareStartTimestamp=0;
+static IMG_UINT64	ui64FirmwareIdleDuration=0;
+
+/* Averaging each new value with the previous accumulated knowledge. There are many coefficients for that
+ * (e.g.) 50 / 50 but i chose 75 / 25 meaning that previous knowledge affects the weighted average more
+ * than any new knowledge. As time goes by though eventually the number converges to the most commonly used
+ */
+
+IMG_VOID SetFirmwareStartTime(IMG_UINT32 ui32Time)
+{
+    if (ui32FirmwareStartTimestamp > 0)
+    {
+        ui32FirmwareStartTimestamp = MEAN_TIME(ui32FirmwareStartTimestamp, ui32Time);
+    }
+    else
+    {
+        ui32FirmwareStartTimestamp = ui32Time;
+    }
+}
+
+IMG_VOID SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
+{
+    if (ui64FirmwareIdleDuration > 0)
+	{
+        ui64FirmwareIdleDuration = MEAN_TIME(ui64FirmwareIdleDuration, ui64Duration);
+	}
+	else
+	{
+		ui64FirmwareIdleDuration = ui64Duration;
+	}
+}
+
+
+IMG_VOID PowerStatsPrintElements(IMG_PVOID pvFilePtr,
+                                 IMG_PVOID pvStatPtr,
+                                 OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+	IMG_UINT32			ui32Idx;
+
+	PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+	if (pfnOSStatsPrintf == NULL)
+	{
+		return;
+	}
+
+	if (ui64TotalForcedEntries > 0)
+	{
+        pfnOSStatsPrintf(pvFilePtr, "Forced Power Transition (nanoseconds):\n");
+        pfnOSStatsPrintf(pvFilePtr, "Pre-Device:  %u\n", (IMG_UINT32)(ui64ForcedPreDevice)  / (IMG_UINT32)(ui64TotalForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "Pre-System:  %u\n", (IMG_UINT32)(ui64ForcedPreSystem)  / (IMG_UINT32)(ui64TotalForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "Post-Device: %u\n", (IMG_UINT32)(ui64ForcedPostDevice) / (IMG_UINT32)(ui64TotalForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "Post-System: %u\n", (IMG_UINT32)(ui64ForcedPostSystem) / (IMG_UINT32)(ui64TotalForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "\n");
+	}
+
+	if (ui64TotalNotForcedEntries > 0)
+	{
+        pfnOSStatsPrintf(pvFilePtr, "Not Forced Power Transition (nanoseconds):\n");
+        pfnOSStatsPrintf(pvFilePtr, "Pre-Device:  %u\n", (IMG_UINT32)(ui64NotForcedPreDevice)  / (IMG_UINT32)(ui64TotalNotForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "Pre-System:  %u\n", (IMG_UINT32)(ui64NotForcedPreSystem)  / (IMG_UINT32)(ui64TotalNotForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "Post-Device: %u\n", (IMG_UINT32)(ui64NotForcedPostDevice) / (IMG_UINT32)(ui64TotalNotForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "Post-System: %u\n", (IMG_UINT32)(ui64NotForcedPostSystem) / (IMG_UINT32)(ui64TotalNotForcedEntries));
+        pfnOSStatsPrintf(pvFilePtr, "\n");
+	}
+
+    pfnOSStatsPrintf(pvFilePtr, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
+    pfnOSStatsPrintf(pvFilePtr, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
+    pfnOSStatsPrintf(pvFilePtr, "\n");
+
+    pfnOSStatsPrintf(pvFilePtr, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+    pfnOSStatsPrintf(pvFilePtr, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+	for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+	{
+        pfnOSStatsPrintf(pvFilePtr, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+												 asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+												 asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+	}
+
+
+} /* PowerStatsPrintElements */
+
+
+IMG_VOID GlobalStatsPrintElements(IMG_PVOID pvFilePtr,
+                                  IMG_PVOID pvStatPtr,
+                                  OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+	if (pfnOSGetStatsPrintf != IMG_NULL)
+	{
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageKMalloc                %10d\n", gsGlobalStats.ui32MemoryUsageKMalloc);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageKMallocMax             %10d\n", gsGlobalStats.ui32MemoryUsageKMallocMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageVMalloc                %10d\n", gsGlobalStats.ui32MemoryUsageVMalloc);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageVMallocMax             %10d\n", gsGlobalStats.ui32MemoryUsageVMallocMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocPTMemoryUMA       %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocPTMemoryUMAMax    %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryUMAMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageVMapPTUMA              %10d\n", gsGlobalStats.ui32MemoryUsageVMapPTUMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageVMapPTUMAMax           %10d\n", gsGlobalStats.ui32MemoryUsageVMapPTUMAMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocPTMemoryLMA       %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocPTMemoryLMAMax    %10d\n", gsGlobalStats.ui32MemoryUsageAllocPTMemoryLMAMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageIORemapPTLMA           %10d\n", gsGlobalStats.ui32MemoryUsageIORemapPTLMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageIORemapPTLMAMax        %10d\n", gsGlobalStats.ui32MemoryUsageIORemapPTLMAMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocGPUMemLMA         %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemLMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocGPUMemLMAMax      %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemLMAMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocGPUMemUMA         %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocGPUMemUMAMax      %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocGPUMemUMAPool     %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPool);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageAllocGPUMemUMAPoolMax  %10d\n", gsGlobalStats.ui32MemoryUsageAllocGPUMemUMAPoolMax);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageMappedGPUMemUMA/LMA    %10d\n", gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMA);
+        pfnOSGetStatsPrintf(pvFilePtr, "MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", gsGlobalStats.ui32MemoryUsageMappedGPUMemUMA_LMAMax);
+	}
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pvrsrv.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pvrsrv.c
new file mode 100644
index 0000000..6dd20ec
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/pvrsrv.c
@@ -0,0 +1,2848 @@
+/*************************************************************************/ /*!
+@File
+@Title          core services functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for core services functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdebug.h"
+#include "handle.h"
+#include "connection_server.h"
+#include "pdump_km.h"
+#include "ra.h"
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "sync.h"
+#include "sync_server.h"
+#include "devicemem.h"
+
+#include "pvrversion.h"
+
+#include "lists.h"
+#include "dllist.h"
+#include "syscommon.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#include "tlintern.h"
+
+#if defined (SUPPORT_RGX)
+#include "rgxinit.h"
+#include "rgxfwutils.h"
+#endif
+
+#include "debug_request_ids.h"
+#include "pvrsrv.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	#if !defined(GPUVIRT_SIZEOF_ARENA0)
+		#define GPUVIRT_SIZEOF_ARENA0	64 * 1024 * 1024 //Giving 64 megs of LMA memory to arena 0 for firmware and other allocations
+	#endif
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+
+/*! Wait 100ms before retrying deferred clean-up again */
+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 0x00000064
+
+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times
+ * a day to check for any missed clean-up. */
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 0x01B77400
+
+
+typedef struct DEBUG_REQUEST_ENTRY_TAG
+{
+	IMG_UINT32		ui32RequesterID;
+	DLLIST_NODE		sListHead;
+} DEBUG_REQUEST_ENTRY;
+
+typedef struct DEBUG_REQUEST_TABLE_TAG
+{
+	IMG_UINT32				ui32RequestCount;
+	DEBUG_REQUEST_ENTRY		asEntry[1];
+}DEBUG_REQUEST_TABLE;
+
+PVRSRV_DATA	*gpsPVRSRVData = IMG_NULL;
+static IMG_HANDLE   g_hDbgSysNotify;
+
+static PVRSRV_SYSTEM_CONFIG *gpsSysConfig = IMG_NULL;
+
+typedef PVRSRV_ERROR (*PFN_REGISTER_DEVICE)(PVRSRV_DEVICE_NODE *psDeviceNode);
+typedef PVRSRV_ERROR (*PFN_UNREGISTER_DEVICE)(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+static PFN_REGISTER_DEVICE sRegisterDevice[PVRSRV_DEVICE_TYPE_LAST + 1];
+static PFN_UNREGISTER_DEVICE sUnregisterDevice[PVRSRV_DEVICE_TYPE_LAST + 1];
+
+static PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PVRSRV_DEVICE_CONFIG *psDevConfig);
+static PVRSRV_ERROR IMG_CALLCONV PVRSRVUnregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+static PVRSRV_ERROR PVRSRVRegisterDbgTable(IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length, IMG_PVOID *phTable);
+static IMG_VOID PVRSRVUnregisterDbgTable(IMG_PVOID hTable);
+
+static IMG_VOID _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel);
+
+IMG_UINT32	g_ui32InitFlags;
+
+/* mark which parts of Services were initialised */
+#define		INIT_DATA_ENABLE_PDUMPINIT	0x1U
+
+/* Head of the list of callbacks called when Cmd complete happens */
+static DLLIST_NODE sCmdCompNotifyHead;
+static POSWR_LOCK hNotifyLock = IMG_NULL;
+
+/* Debug request table and lock */
+static POSWR_LOCK g_hDbgNotifyLock = IMG_NULL;
+static DEBUG_REQUEST_TABLE *g_psDebugTable;
+
+static IMG_PVOID g_hDebugTable = IMG_NULL;
+
+static IMG_UINT32 g_aui32DebugOrderTable[] = {
+	DEBUG_REQUEST_SYS,
+	DEBUG_REQUEST_RGX,
+	DEBUG_REQUEST_DC,
+	DEBUG_REQUEST_SERVERSYNC,
+	DEBUG_REQUEST_ANDROIDSYNC
+};
+
+DUMPDEBUG_PRINTF_FUNC *g_pfnDumpDebugPrintf = IMG_NULL;
+
+/*!
+******************************************************************************
+
+ @Function	AllocateDeviceID
+
+ @Description
+
+ allocates a device id from the pool of valid ids
+
+ @input psPVRSRVData :	Services private data
+
+ @input pui32DevID : device id to return
+
+ @Return device id
+
+******************************************************************************/
+static PVRSRV_ERROR AllocateDeviceID(PVRSRV_DATA *psPVRSRVData, IMG_UINT32 *pui32DevID)
+{
+	SYS_DEVICE_ID* psDeviceWalker;
+	SYS_DEVICE_ID* psDeviceEnd;
+
+	psDeviceWalker = &psPVRSRVData->sDeviceID[0];
+	psDeviceEnd = psDeviceWalker + SYS_DEVICE_COUNT;
+
+	/* find a free ID */
+	while (psDeviceWalker < psDeviceEnd)
+	{
+		if (!psDeviceWalker->bInUse)
+		{
+			psDeviceWalker->bInUse = IMG_TRUE;
+			*pui32DevID = psDeviceWalker->uiID;
+
+			return PVRSRV_OK;
+		}
+		psDeviceWalker++;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
+
+	/* Should never get here: sDeviceID[] may have been setup too small */
+	PVR_ASSERT(psDeviceWalker < psDeviceEnd);
+
+	return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	FreeDeviceID
+
+ @Description
+
+ frees a device id from the pool of valid ids
+
+ @input psPVRSRVData :	Services private data
+
+ @input ui32DevID : device id to free
+
+ @Return device id
+
+******************************************************************************/
+static PVRSRV_ERROR FreeDeviceID(PVRSRV_DATA *psPVRSRVData, IMG_UINT32 ui32DevID)
+{
+	SYS_DEVICE_ID* psDeviceWalker;
+	SYS_DEVICE_ID* psDeviceEnd;
+
+	psDeviceWalker = &psPVRSRVData->sDeviceID[0];
+	psDeviceEnd = psDeviceWalker + SYS_DEVICE_COUNT;
+
+	/* find the ID to free */
+	while (psDeviceWalker < psDeviceEnd)
+	{
+		/* if matching id and in use, free */
+		if	(
+				(psDeviceWalker->uiID == ui32DevID) &&
+				(psDeviceWalker->bInUse)
+			)
+		{
+			psDeviceWalker->bInUse = IMG_FALSE;
+			return PVRSRV_OK;
+		}
+		psDeviceWalker++;
+	}
+
+	PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
+
+	/* should never get here */
+	PVR_ASSERT(psDeviceWalker < psDeviceEnd);
+
+	return PVRSRV_ERROR_INVALID_DEVICEID;
+}
+
+
+/*!
+******************************************************************************
+ @Function	PVRSRVEnumerateDevicesKM_ForEachVaCb
+
+ @Description
+
+ Enumerates the device node (if is of the same class as given).
+
+ @Input psDeviceNode	- The device node to be enumerated
+ 		va				- variable arguments list, with:
+							pui32DevCount	- The device count pointer (to be increased)
+							ppeDeviceType     - The pointer to the device type pointer (to be updated and increased)
+							ppeDeviceClass    - The pointer to the device classes pointer (to be updated and increased)
+							ppui32DeviceIndex - The pointer to the device indexes pointer (to be updated and increased)
+******************************************************************************/
+static IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+	IMG_UINT *pui32DevCount;
+	PVRSRV_DEVICE_TYPE **ppeDeviceType;
+	PVRSRV_DEVICE_CLASS **ppeDeviceClass;
+	IMG_UINT32 **ppui32DeviceIndex;
+
+	pui32DevCount = va_arg(va, IMG_UINT*);
+	ppeDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE**);
+	ppeDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS**);
+	ppui32DeviceIndex = va_arg(va, IMG_UINT32**);
+
+	if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
+	{
+		**ppeDeviceType = psDeviceNode->sDevId.eDeviceType;
+		**ppeDeviceClass = psDeviceNode->sDevId.eDeviceClass;
+		**ppui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
+
+		(*ppeDeviceType)++;
+		(*ppeDeviceClass)++;
+		(*ppui32DeviceIndex)++;
+
+		(*pui32DevCount)++;
+	}
+}
+
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVEnumerateDevicesKM
+
+ @Description
+ This function will enumerate all the devices supported by the
+ PowerVR services within the target system.
+ The function returns a list of the device ID structures stored either in
+ the services or constructed in the user mode glue component in certain
+ environments. The number of devices in the list is also returned.
+
+ In a binary layered component which does not support dynamic runtime selection,
+ the glue code should compile to return the supported devices statically,
+ e.g. multiple instances of the same device if multiple devices are supported,
+ or the target combination of Rogue and display device.
+
+ In the case of an environment (for instance) where one Rogue may connect to two
+ display devices this code would enumerate all three devices and even
+ non-dynamic Rogue selection code should retain the facility to parse the list
+ to find the index of the Rogue device
+
+ @output pui32NumDevices :	On success, contains the number of devices present
+ 							in the system
+
+ @output peDeviceType	 :	Pointer to called supplied buffer to receive the
+ 							list of PVRSRV_DEVICE_TYPE
+
+ @output peDeviceClass	 :	Pointer to called supplied buffer to receive the
+ 							list of PVRSRV_DEVICE_CLASS
+
+ @output pui32DeviceIndex:	Pointer to called supplied buffer to receive the
+ 							list of device indexes
+
+ @return PVRSRV_ERROR  :	
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
+                                                   PVRSRV_DEVICE_TYPE *peDeviceType,
+                                                   PVRSRV_DEVICE_CLASS *peDeviceClass,
+                                                   IMG_UINT32 *pui32DeviceIndex)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_UINT32 			i;
+
+	if (!pui32NumDevices || !peDeviceType || !peDeviceClass || !pui32DeviceIndex)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/*
+		setup input buffer to be `empty'
+	*/
+	for (i=0; i<PVRSRV_MAX_DEVICES; i++)
+	{
+		peDeviceType[i] = PVRSRV_DEVICE_TYPE_UNKNOWN;
+	}
+
+	/* and zero device count */
+	*pui32NumDevices = 0;
+
+	/*
+		Search through the device list for services managed devices
+		return id info for each device and the number of devices
+		available
+	*/
+	List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+									   &PVRSRVEnumerateDevicesKM_ForEachVaCb,
+									   pui32NumDevices,
+									   &peDeviceType,
+									   &peDeviceClass,
+									   &pui32DeviceIndex);
+
+	return PVRSRV_OK;
+}
+
+/* Add work to the cleanup thread work list.
+ * The work item will be executed by the cleanup thread
+ */
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData)
+{
+	PVRSRV_DATA *psPVRSRVData;
+	PVRSRV_ERROR eError;
+
+	psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_ASSERT(psData != NULL);
+
+	if(psPVRSRVData->bUnload)
+	{
+		CLEANUP_THREAD_FN pfnFree = psData->pfnFree;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately"));
+
+		eError = pfnFree(psData->pvData);
+
+		if(eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+						"(callback " IMG_PFN_FMTSPEC "). "
+						"Immediate free will not be retried.",
+						pfnFree));
+		}
+	}
+	else
+	{
+		/* add this work item to the list */
+		OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+		dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode);
+		OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+		/* signal the cleanup thread to ensure this item gets processed */
+		eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+	}
+}
+
+/* Pop an item from the head of the cleanup thread work list */
+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData)
+{
+	DLLIST_NODE *psNode;
+
+	OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+	psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList);
+	if(psNode != NULL)
+	{
+		dllist_remove_node(psNode);
+	}
+	OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	return psNode;
+}
+
+/* Process the cleanup thread work list */
+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData)
+{
+	DLLIST_NODE *psNodeIter, *psNodeLast;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bNeedRetry = IMG_FALSE;
+
+	/* any callback functions which return error will be
+	 * moved to the back of the list, and additional items can be added
+	 * to the list at any time so we ensure we only iterate from the
+	 * head of the list to the current tail (since the tail may always
+	 * be changing)
+	 */
+
+	OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+	psNodeLast = psPVRSRVData->sCleanupThreadWorkList.psPrevNode;
+	OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	do
+	{
+		PVRSRV_CLEANUP_THREAD_WORK *psData;
+
+		psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData);
+
+		if(psNodeIter != NULL)
+		{
+			CLEANUP_THREAD_FN pfnFree;
+
+			psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode);
+
+			/* get the function pointer address here so we have acess to it
+			 * in order to report the error in case of failure, without having
+			 * to depend on psData not having been freed
+			 */
+			pfnFree = psData->pfnFree;
+
+			eError = pfnFree(psData->pvData);
+
+			if(eError != PVRSRV_OK)
+			{
+				/* move to back of the list, if this item's
+				 * retry count hasn't hit zero.
+				 */
+				if(psData->ui32RetryCount-- > 0)
+				{
+					OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+					dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter);
+					OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+					bNeedRetry = IMG_TRUE;
+				}
+				else
+				{
+					PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+								"(callback " IMG_PFN_FMTSPEC "). "
+								"Retry limit reached",
+								pfnFree));
+				}
+			}
+		}
+	} while((psNodeIter != NULL) && (psNodeIter != psNodeLast));
+
+	return bNeedRetry;
+}
+
+// #define CLEANUP_DPFL PVR_DBG_WARNING
+#define CLEANUP_DPFL    PVR_DBG_MESSAGE
+
+static IMG_VOID CleanupThread(IMG_PVOID pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	IMG_BOOL     bRetryWorkList = IMG_FALSE;
+	IMG_HANDLE	 hOSEvent;
+	PVRSRV_ERROR eRc;
+
+	/* Store the process id (pid) of the clean-up thread */
+	psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID();
+
+	PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... "));
+
+	/* Open an event on the clean up event object so we can listen on it,
+	 * abort the clean up thread and driver if this fails.
+	 */
+	eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent);
+	PVR_ASSERT(eRc == PVRSRV_OK);
+
+	eRc = OSLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock, LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eRc == PVRSRV_OK);
+
+	dllist_init(&psPVRSRVData->sCleanupThreadWorkList);
+
+	/* While the driver is in a good state and is not being unloaded
+	 * try to free any deferred items when signalled
+	 */
+	while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && 
+			(!psPVRSRVData->bUnload))
+	{
+		/* Wait until signalled for deferred clean up OR wait for a
+		 * short period if the previous deferred clean up was not able
+		 * to release all the resources before trying again.
+		 * Bridge lock re-acquired on our behalf before the wait call returns.
+		 */
+		eRc = OSEventObjectWaitTimeout(hOSEvent,
+				bRetryWorkList ?
+				CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+				CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
+		if (eRc == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout"));
+		}
+		else if (eRc == PVRSRV_OK)
+		{
+			PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "CleanupThread: wait error %d", eRc));
+		}
+
+		bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData);
+	}
+
+	OSLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock);
+
+	eRc = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+	PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... "));
+}
+
+static IMG_VOID DevicesWatchdogThread(IMG_PVOID pvData)
+{
+	PVRSRV_DATA *psPVRSRVData = pvData;
+	PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR  eError;
+	IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.",
+			DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT));
+
+	/* Open an event on the devices watchdog event object so we can listen on it
+	   and abort the devices watchdog thread. */
+	eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent);
+	PVR_LOGRN_IF_ERROR(eError, "OSEventObjectOpen");
+
+	/* Loop continuously checking the device status every few seconds. */
+	while (!psPVRSRVData->bUnload)
+	{
+		IMG_UINT32 i;
+		IMG_BOOL bPwrIsOn = IMG_FALSE;
+
+		/* Wait time between polls (done at the start of the loop to allow devices
+		   to initialise) or for the event signal (shutdown or power on). */
+		eError = OSEventObjectWaitTimeout(hOSEvent, ui32Timeout);
+
+#ifdef PVR_TESTING_UTILS
+		psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+		if (eError == PVRSRV_OK)
+		{
+			if (psPVRSRVData->bUnload)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received."));
+				break;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received."));
+			}
+		}
+		else if (eError != PVRSRV_ERROR_TIMEOUT)
+		{
+			/* If timeout do nothing otherwise print warning message. */
+			PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+					"Error (%d) when waiting for event!", eError));
+		}
+
+		eError = PVRSRVPowerLock();
+		if (eError == PVRSRV_ERROR_RETRY)
+		{
+			/* power lock cannot be acquired at this time (sys power is off) */
+			ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			/* any other error is unexpected */
+			PVR_DPF((PVR_DBG_ERROR,"DevicesWatchdogThread: Failed to acquire power lock (%s)", PVRSRVGetErrorStringKM(eError)));
+		}
+		else
+		{
+			/* Check if at least one of the devices is on. */
+			for (i = 0; i < psPVRSRVData->ui32RegisteredDevices && !bPwrIsOn; i++)
+			{
+				if (PVRSRVGetDevicePowerState(i, &ePowerState) == PVRSRV_OK)
+				{
+					bPwrIsOn = ePowerState == PVRSRV_DEV_POWER_STATE_ON;
+					break;
+				}
+			}
+
+			if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans)
+			{
+				psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0;
+				ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+			}
+			else
+			{
+				ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+			}
+
+			PVRSRVPowerUnlock();
+		}
+
+		for (i = 0;  i < psPVRSRVData->ui32RegisteredDevices; i++)
+		{
+			PVRSRV_DEVICE_NODE* psDeviceNode = psPVRSRVData->apsRegisteredDevNodes[i];
+			PVRSRV_RGXDEV_INFO* psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice;
+			
+			if (psDeviceNode->pfnUpdateHealthStatus != IMG_NULL)
+			{
+				eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_TRUE);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+							"Could not check for fatal error (%d)!",
+							eError));
+				}
+			}
+
+			if (psDeviceNode->eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK)
+			{
+				if (psDeviceNode->eHealthStatus != ePreviousHealthStatus)
+				{
+					if (!(psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN))
+					{
+						PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Device not responding!!!"));
+						PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+					}
+				}
+			}
+			ePreviousHealthStatus = psDeviceNode->eHealthStatus;
+			
+			/* Attempt to service the HWPerf buffer to regularly transport 
+			 * idle / periodic packets to host buffer. */
+			if (psDeviceNode->pfnServiceHWPerf != IMG_NULL)
+			{
+				eError = psDeviceNode->pfnServiceHWPerf(psDeviceNode);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+							"Error occurred when servicing HWPerf buffer (%d)",
+							eError));
+				}
+			}
+		}
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+		{
+			#if defined(EMULATOR)
+			{
+				SysPrintAndResetFaultStatusRegister();
+			}
+			#endif
+		}
+#endif
+	}
+
+	eError = OSEventObjectClose(hOSEvent);
+	PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+
+PVRSRV_DATA *PVRSRVGetPVRSRVData()
+{
+	return gpsPVRSRVData;
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(void *hDevice)
+{
+	PVRSRV_ERROR	eError;
+	PVRSRV_SYSTEM_CONFIG *psSysConfig;
+	IMG_UINT32 i;
+
+#if defined (SUPPORT_RGX)
+	/* FIXME find a way to do this without device-specific code here */
+	sRegisterDevice[PVRSRV_DEVICE_TYPE_RGX] = RGXRegisterDevice;
+#endif
+
+	SET_LOG2_PAGESIZE(OSGetPageShift());
+
+	eError = PhysHeapInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Get the system config */
+	eError = SysCreateConfigData(&psSysConfig, hDevice);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	/* Save to global pointer for later */
+	gpsSysConfig = psSysConfig;
+
+    /*
+     * Allocate the device-independent data
+     */
+    gpsPVRSRVData = OSAllocMem(sizeof(*gpsPVRSRVData));
+    if (gpsPVRSRVData == IMG_NULL)
+    {
+        return PVRSRV_ERROR_OUT_OF_MEMORY;
+    }
+    OSMemSet(gpsPVRSRVData, 0, sizeof(*gpsPVRSRVData));
+	gpsPVRSRVData->ui32NumDevices = psSysConfig->uiDeviceCount;
+
+	for (i=0;i<SYS_DEVICE_COUNT;i++)
+	{
+		gpsPVRSRVData->sDeviceID[i].uiID = i;
+		gpsPVRSRVData->sDeviceID[i].bInUse = IMG_FALSE;
+	}
+
+	/*
+	 * Register the physical memory heaps
+	 */
+	PVR_ASSERT(psSysConfig->ui32PhysHeapCount <= SYS_PHYS_HEAP_COUNT);
+	for (i=0;i<psSysConfig->ui32PhysHeapCount;i++)
+	{
+		eError = PhysHeapRegister(&psSysConfig->pasPhysHeaps[i],
+								  &gpsPVRSRVData->apsRegisteredPhysHeaps[i]);
+		if (eError != PVRSRV_OK)
+		{
+			goto Error;
+		}
+		gpsPVRSRVData->ui32RegisteredPhysHeaps++;
+	}
+
+	/* Init any OS specific's */
+	eError = OSInitEnvData();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	RIInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = DevicememHistoryInitKM();
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to initialise DevicememHistory"));
+		goto Error;
+	}
+#endif
+
+	eError = PMRInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DCInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+#endif
+
+	/* Initialise Power Manager Lock */
+	eError = OSLockCreate(&gpsPVRSRVData->hPowerLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Initialise system power state */
+	gpsPVRSRVData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_ON;
+	gpsPVRSRVData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
+
+	/* Initialise overall system state */
+	gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK;
+
+	/* Create an event object */
+	eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+	gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+
+	/* initialise list of command complete notifiers */
+	dllist_init(&sCmdCompNotifyHead);
+
+	/* Create a lock of the list notifiers */
+	eError = OSWRLockCreate(&hNotifyLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Create a lock of the debug notifiers */
+	eError = OSWRLockCreate(&g_hDbgNotifyLock);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	eError = PVRSRVRegisterDbgTable(g_aui32DebugOrderTable,
+									sizeof(g_aui32DebugOrderTable)/sizeof(g_aui32DebugOrderTable[0]),
+									&g_hDebugTable);
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	PVRSRVRegisterDbgRequestNotify(&g_hDbgSysNotify, &_SysDebugRequestNotify, DEBUG_REQUEST_SYS, gpsPVRSRVData);
+
+	eError = ServerSyncInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Initialise pdump */
+	eError = PDUMPINIT();
+	if(eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+	/* Register all the system devices */
+	for (i=0;i<psSysConfig->uiDeviceCount;i++)
+	{
+		if (PVRSRVRegisterDevice(&psSysConfig->pasDevices[i]) != PVRSRV_OK)
+		{
+			/* FIXME: We should unregister devices if we fail */
+			return eError;
+		}
+
+		/* Initialise the Transport Layer.
+		 * Need to remember the RGX device node for use in the Transport Layer
+		 * when allocating stream buffers that are shared with clients.
+		 * Note however when the device is an LMA device our buffers will not
+		 * be in host memory but card memory.
+		 */
+		if (gpsPVRSRVData->apsRegisteredDevNodes[gpsPVRSRVData->ui32RegisteredDevices-1]->psDevConfig->eDeviceType == PVRSRV_DEVICE_TYPE_RGX)
+		{
+			eError = TLInit(gpsPVRSRVData->apsRegisteredDevNodes[gpsPVRSRVData->ui32RegisteredDevices-1]);
+			PVR_LOGG_IF_ERROR(eError, "TLInit", Error);
+		}
+	}
+
+	eError = PVRSRVHandleInit();
+	if(eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	eError = PVRSRVConnectionInit();
+	if(eError != PVRSRV_OK)
+	{
+		goto Error;
+	}
+
+	/* Create the clean up event object */
+	eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+	/* Create a thread which is used to do the deferred cleanup */
+	eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread,
+							"pvr_defer_free",
+							CleanupThread,
+							gpsPVRSRVData,
+							LOWEST_PRIORITY);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVInit: Failed to create deferred cleanup thread"));
+		goto Error;
+	}
+
+	/* Create the devices watchdog event object */
+	eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj);
+	PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+	/* Create a thread which is used to detect fatal errors */
+	eError = OSThreadCreate(&gpsPVRSRVData->hDevicesWatchdogThread,
+							"pvr_devices_wd_thread",
+							DevicesWatchdogThread,
+							gpsPVRSRVData);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVInit: Failed to create devices watchdog thread"));
+		goto Error;
+	}
+
+#if defined(PVR_TESTING_UTILS)
+	TUtilsInit();
+#endif
+#if defined(PVR_DVFS)
+	eError = InitDVFS(gpsPVRSRVData, hDevice);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVInit: Failed to start DVFS"));
+		goto Error;
+	}
+#endif
+	return eError;
+
+Error:
+	PVRSRVDeInit(hDevice);
+	return eError;
+}
+
+
+IMG_VOID IMG_CALLCONV PVRSRVDeInit(IMG_VOID *hDevice)
+{
+	PVRSRV_DATA		*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR	eError;
+	IMG_UINT32		i;
+
+	if (gpsPVRSRVData == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit failed - invalid gpsPVRSRVData"));
+		return;
+	}
+
+#if defined(PVR_TESTING_UTILS)
+	TUtilsDeinit();
+#endif
+
+#if defined (SUPPORT_RGX)
+	sUnregisterDevice[PVRSRV_DEVICE_TYPE_RGX] = DevDeInitRGX;
+#endif
+
+	psPVRSRVData->bUnload = IMG_TRUE;
+	if (psPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectSignal(psPVRSRVData->hGlobalEventObject);
+	}
+
+	/* Stop and cleanup the devices watchdog thread */
+	if (psPVRSRVData->hDevicesWatchdogThread)
+	{
+		if (psPVRSRVData->hDevicesWatchdogEvObj)
+		{
+			eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread);
+		gpsPVRSRVData->hDevicesWatchdogThread = IMG_NULL;
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+	}
+
+	if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+	{
+		eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj);
+		gpsPVRSRVData->hDevicesWatchdogEvObj = IMG_NULL;
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+	}
+
+	/* Stop and cleanup the deferred clean up thread, event object and
+	 * deferred context list.
+	 */
+	if (psPVRSRVData->hCleanupThread)
+	{
+		if (psPVRSRVData->hCleanupEventObject)
+		{
+			eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+			PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+		}
+		eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread);
+		gpsPVRSRVData->hCleanupThread = IMG_NULL;
+		PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+	}
+
+	if (gpsPVRSRVData->hCleanupEventObject)
+	{
+		eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject);
+		gpsPVRSRVData->hCleanupEventObject = IMG_NULL;
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+	}
+
+	eError = PVRSRVConnectionDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVConnectionDataDeInit failed"));
+	}
+
+	eError = PVRSRVHandleDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
+	}
+
+	/* Unregister all the system devices */
+	for (i=0;i<psPVRSRVData->ui32RegisteredDevices;i++)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->apsRegisteredDevNodes[i];
+
+		/* set device state */
+		psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
+
+		/* Counter part to what gets done in PVRSRVFinaliseSystem */
+		if (psDeviceNode->hSyncPrimContext != IMG_NULL)
+		{
+			if (psDeviceNode->psSyncPrim != IMG_NULL)
+			{
+				/* Free general pupose sync primitive */
+				SyncPrimFree(psDeviceNode->psSyncPrim);
+				psDeviceNode->psSyncPrim = IMG_NULL;
+			}
+
+			SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext);
+			psDeviceNode->hSyncPrimContext = IMG_NULL;
+		}
+
+		PVRSRVUnregisterDevice(psDeviceNode);
+		psPVRSRVData->apsRegisteredDevNodes[i] = IMG_NULL;
+	}
+	SysDestroyConfigData(gpsSysConfig);
+
+#if defined(PVR_DVFS)
+	eError = DeinitDVFS(gpsPVRSRVData, hDevice);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVInit: Failed to suspend DVFS"));
+	}
+#endif
+
+	/* Clean up Transport Layer resources that remain. 
+	 * Done after RGX node clean up as HWPerf stream is destroyed during 
+	 * this
+	 */
+	TLDeInit();
+
+	ServerSyncDeinit();
+
+	if (g_hDbgSysNotify)
+	{
+		PVRSRVUnregisterDbgRequestNotify(g_hDbgSysNotify);
+	}
+
+	if (g_hDebugTable)
+	{
+		PVRSRVUnregisterDbgTable(g_hDebugTable);
+	}
+
+	if (g_hDbgNotifyLock)
+	{
+		OSWRLockDestroy(g_hDbgNotifyLock);
+	}
+
+	if (hNotifyLock)
+	{
+		OSWRLockDestroy(hNotifyLock);
+	}
+
+	/* deinitialise pdump */
+	if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+	{
+		PDUMPDEINIT();
+	}
+	
+	/* destroy event object */
+	if (gpsPVRSRVData->hGlobalEventObject)
+	{
+		OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject);
+		gpsPVRSRVData->hGlobalEventObject = IMG_NULL;
+	}
+
+	/* Check there is no notify function */
+	if (!dllist_is_empty(&sCmdCompNotifyHead))
+	{
+		PDLLIST_NODE psNode = dllist_get_next_node(&sCmdCompNotifyHead);
+
+		/* some device did not unregistered properly */
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: Notify list for cmd complete is not empty!!"));
+
+		/* clean the nodes anyway */
+		while (psNode != IMG_NULL)
+		{
+			PVRSRV_CMDCOMP_NOTIFY	*psNotify;
+
+			dllist_remove_node(psNode);
+			
+			psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+			OSFreeMem(psNotify);
+
+			psNode = dllist_get_next_node(&sCmdCompNotifyHead);
+		}
+	}
+
+	OSLockDestroy(gpsPVRSRVData->hPowerLock);
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DCDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: DCInit() failed"));
+	}
+#endif
+
+
+    eError = PMRDeInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PMRDeInit() failed"));
+	}
+
+#if defined(PVR_RI_DEBUG)
+	RIDeInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	DevicememHistoryDeInitKM();
+#endif
+	
+	OSDeInitEnvData();
+
+	for (i=0;i<gpsPVRSRVData->ui32RegisteredPhysHeaps;i++)
+	{
+		PhysHeapUnregister(gpsPVRSRVData->apsRegisteredPhysHeaps[i]);
+	}
+	eError = PhysHeapDeinit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PhysHeapDeinit() failed"));
+	}
+
+	OSFreeMem(gpsPVRSRVData);
+	gpsPVRSRVData = IMG_NULL;
+}
+
+PVRSRV_ERROR LMA_MMUPxAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_SIZE_T uiSize,
+							Px_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+	IMG_BOOL bSuccess;
+	RA_BASE_T uiCardAddr;
+	RA_LENGTH_T uiActualSize;
+	RA_ARENA	*pArena=psDevNode->psLocalDevMemArena;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	IMG_UINT32  ui32OSid = 0;
+#endif
+
+	PVR_ASSERT((uiSize & OSGetPageMask()) == 0);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32	ui32OSidReg = 0;
+
+	IMG_PID		pId = OSGetCurrentProcessID();
+
+	RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg);
+
+	pArena = psDevNode->psOSidSubArena[ui32OSid];
+}
+#endif
+
+	bSuccess = RA_Alloc(pArena,
+						uiSize,
+						0,							/* No flags */
+						OSGetPageSize(),
+						&uiCardAddr,
+						&uiActualSize,
+						IMG_NULL);					/* No private handle */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): LMA_MMUPxAlloc: Address:%llu, size:%llu", uiCardAddr,uiActualSize));
+}
+#endif
+
+	PVR_ASSERT(uiSize == uiActualSize);
+
+	psMemHandle->u.ui64Handle = uiCardAddr;
+	psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
+
+	if (bSuccess)
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	/* Allocation is done a page at a time */
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, OSGetPageSize());
+#else
+		IMG_CPU_PHYADDR	sCpuPAddr;
+		sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
+
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+															 IMG_NULL,
+															 sCpuPAddr,
+															 OSGetPageSize(),
+															 IMG_NULL);
+#endif
+#endif
+		return PVRSRV_OK;
+	}
+	else
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+IMG_VOID LMA_MMUPxFree(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle)
+{
+	RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	/* Allocation is done a page at a time */
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, OSGetPageSize());
+#else
+		PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT64)uiCardAddr);
+#endif
+#endif
+
+	RA_Free(psDevNode->psLocalDevMemArena, uiCardAddr);
+}
+
+PVRSRV_ERROR LMA_MMUPxMap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle,
+							IMG_SIZE_T uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+							IMG_VOID **pvPtr)
+{
+	IMG_CPU_PHYADDR sCpuPAddr;
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+
+	PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr);
+	*pvPtr = OSMapPhysToLin(sCpuPAddr,
+							OSGetPageSize(),
+							0);
+	if (*pvPtr == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	else
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		/* Mapping is done a page at a time */
+		PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, OSGetPageSize());
+#else
+		{
+			PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+										 *pvPtr,
+										 sCpuPAddr,
+										 uiSize,
+										 IMG_NULL);
+		}
+#endif
+#endif
+		return PVRSRV_OK;
+	}
+}
+
+IMG_VOID LMA_MMUPxUnmap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle,
+						IMG_VOID *pvPtr)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		/* Mapping is done a page at a time */
+		PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, OSGetPageSize());
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, (IMG_UINT64)(IMG_UINTPTR_T)pvPtr);
+#endif
+#endif
+
+	OSUnMapPhysToLin(pvPtr, OSGetPageSize(), 0);
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static PVRSRV_ERROR CreateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	IMG_UINT	uiCounter=0;
+
+	for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		psDeviceNode->psOSidSubArena[uiCounter] =
+			RA_Create(psDeviceNode->szRAName,
+					  OSGetPageShift(),			/* Use host page size, keeps things simple */
+					  RA_LOCKCLASS_0,			/* This arena doesn't use any other arenas. */
+					  IMG_NULL,					/* No Import */
+					  IMG_NULL,					/* No free import */
+					  IMG_NULL);				/* No import handle */
+
+		if (psDeviceNode->psOSidSubArena[uiCounter] == IMG_NULL)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"\n(GPU Virtualization Validation): Calling RA_Add with base %u and size %u \n",0, GPUVIRT_SIZEOF_ARENA0));
+
+	/* Arena creation takes place earlier than when the client side reads the apphints and transfers them over the bridge. Since we don't
+	 * know how the memory is going to be partitioned and since we already need some memory for all the initial allocations that take place,
+	 * we populate the first sub-arena (0) with a span of 64 megabytes. This has been shown to be enough even for cases where EWS is allocated
+	 * memory in this sub arena and then a multi app example is executed. This pre-allocation also means that consistency must be maintained
+	 * between apphints and reality. That's why in the Apphints, the OSid0 region must start from 0 and end at 3FFFFFF. */
+
+	if (!RA_Add(psDeviceNode->psOSidSubArena[0], 0, GPUVIRT_SIZEOF_ARENA0, 0 , IMG_NULL ))
+	{
+		RA_Delete(psDeviceNode->psOSidSubArena[0]);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psDeviceNode->psLocalDevMemArena = psDeviceNode->psOSidSubArena[0];
+
+	return PVRSRV_OK;
+}
+
+IMG_VOID PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS])
+{
+	IMG_UINT	uiCounter;
+
+	/* Since Sub Arena[0] has been populated already, now we populate the rest starting from 1*/
+
+	for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,"\n[GPU Virtualization Validation]: Calling RA_Add with base %u and size %u \n",aui32OSidMin[uiCounter][0], aui32OSidMax[uiCounter][0]-aui32OSidMin[uiCounter][0]+1));
+
+		if (!RA_Add(psDeviceNode->psOSidSubArena[uiCounter], aui32OSidMin[uiCounter][0], aui32OSidMax[uiCounter][0]-aui32OSidMin[uiCounter][0]+1, 0, IMG_NULL))
+		{
+			goto error;
+		}
+	}
+
+	#if defined(EMULATOR)
+	{
+		SysSetOSidRegisters(aui32OSidMin, aui32OSidMax);
+	}
+	#endif
+
+	return ;
+
+error:
+	for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+	{
+		RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+	}
+
+	return ;
+}
+
+#endif
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVRegisterDevice
+
+ @Description
+
+ registers a device with the system
+
+ @Input	   psDevConfig			: Device configuration structure
+
+ @Return   PVRSRV_ERROR  :
+
+******************************************************************************/
+static PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_DATA				*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR			eError;
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	PVRSRV_DEVICE_PHYS_HEAP	physHeapIndex;
+
+	/* Allocate device node */
+	psDeviceNode = OSAllocMem(sizeof(PVRSRV_DEVICE_NODE));
+	if (psDeviceNode == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	OSMemSet(psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
+
+	/* set device state */
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL physical memory heap"));
+		goto e1;
+	}
+	eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL physical memory heap"));
+		goto e1;
+	}
+
+	/* Do we have card memory? If so create an RA to manage it */
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		RA_BASE_T uBase;
+		RA_LENGTH_T uSize;
+		IMG_CPU_PHYADDR sCpuPAddr;
+		IMG_UINT64 ui64Size;
+
+		eError = PhysHeapGetAddress(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], &sCpuPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			/* We can only get here if there is a bug in this module */
+			PVR_ASSERT(IMG_FALSE);
+			return eError;
+		}
+
+		eError = PhysHeapGetSize(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], &ui64Size);
+		if (eError != PVRSRV_OK)
+		{
+			/* We can only get here if there is a bug in this module */
+			PVR_ASSERT(IMG_FALSE);
+			return eError;
+		}
+
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for card memory 0x%016llx-0x%016llx",
+				 (IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size));
+
+		OSSNPrintf(psDeviceNode->szRAName, sizeof(psDeviceNode->szRAName),
+											"%s card mem",
+											psDevConfig->pszName);
+
+		uBase = 0;
+		if (psDevConfig->uiFlags & PVRSRV_DEVICE_CONFIG_LMA_USE_CPU_ADDR)
+		{
+			uBase = sCpuPAddr.uiAddr;
+		}
+
+		uSize = (RA_LENGTH_T) ui64Size;
+		PVR_ASSERT(uSize == ui64Size);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		eError = CreateLMASubArenas(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			goto e2;
+		}
+}
+#else
+		psDeviceNode->psLocalDevMemArena =
+			RA_Create(psDeviceNode->szRAName,
+						OSGetPageShift(),	/* Use host page size, keeps things simple */
+						RA_LOCKCLASS_0,     /* This arena doesn't use any other arenas. */
+						IMG_NULL,			/* No Import */
+						IMG_NULL,			/* No free import */
+						IMG_NULL);			/* No import handle */
+
+		if (psDeviceNode->psLocalDevMemArena == IMG_NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e2;
+		}
+
+		if (!RA_Add(psDeviceNode->psLocalDevMemArena, uBase, uSize, 0 /* No flags */, IMG_NULL /* No private data */))
+		{
+			RA_Delete(psDeviceNode->psLocalDevMemArena);
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e2;		
+		}
+#endif
+
+
+		psDeviceNode->pfnMMUPxAlloc = LMA_MMUPxAlloc;
+		psDeviceNode->pfnMMUPxFree = LMA_MMUPxFree;
+		psDeviceNode->pfnMMUPxMap = LMA_MMUPxMap;
+		psDeviceNode->pfnMMUPxUnmap = LMA_MMUPxUnmap;
+		psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory only, no local card memory"));
+
+		/* else we only have OS system memory */
+		psDeviceNode->pfnMMUPxAlloc = OSMMUPxAlloc;
+		psDeviceNode->pfnMMUPxFree = OSMMUPxFree;
+		psDeviceNode->pfnMMUPxMap = OSMMUPxMap;
+		psDeviceNode->pfnMMUPxUnmap = OSMMUPxUnmap;
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 2nd phys heap"));
+		psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+	}
+
+	/*
+		FIXME: We might want PT memory to come from a different heap so it
+		would make sense to specify the HeapID for it, but need to think
+		if/how this would affect how we do the CPU <> Dev physical address
+		translation.
+	*/
+	psDeviceNode->pszMMUPxPDumpMemSpaceName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+
+	/* Add the devnode to our list so we can unregister it later */
+	psPVRSRVData->apsRegisteredDevNodes[psPVRSRVData->ui32RegisteredDevices++] = psDeviceNode;
+
+	psDeviceNode->psDevConfig = psDevConfig;
+
+	/* all devices need a unique identifier */
+	AllocateDeviceID(psPVRSRVData, &psDeviceNode->sDevId.ui32DeviceIndex);
+
+	/* Device type and class will be setup during this callback */
+	eError = sRegisterDevice[psDevConfig->eDeviceType](psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		OSFreeMem(psDeviceNode);
+		/*not nulling pointer, out of scope*/
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
+		eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+		goto e3;
+	}
+
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Registered device %d of type %d", psDeviceNode->sDevId.ui32DeviceIndex, psDeviceNode->sDevId.eDeviceType));
+	PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx", (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+	PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ));
+	
+	/* and finally insert the device into the dev-list */
+	List_PVRSRV_DEVICE_NODE_Insert(&psPVRSRVData->psDeviceNodeList, psDeviceNode);
+
+	/* set device state */
+	psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE;
+
+	return PVRSRV_OK;
+e3:
+	if (psDeviceNode->psLocalDevMemArena)
+	{
+		RA_Delete(psDeviceNode->psLocalDevMemArena);
+	}
+e2:
+e1:
+	for(physHeapIndex=0; physHeapIndex < PVRSRV_DEVICE_PHYS_HEAP_LAST; physHeapIndex++)
+	{
+		if (psDeviceNode->apsPhysHeap[physHeapIndex])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[physHeapIndex]);
+		}
+	}
+	OSFreeMem(psDeviceNode);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSysPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState, IMG_BOOL bForced)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(bForced);
+
+	if (gpsSysConfig->pfnSysPrePowerState)
+	{
+		eError = gpsSysConfig->pfnSysPrePowerState(eNewPowerState);
+	}
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSysPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState, IMG_BOOL bForced)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER(bForced);
+
+	if (gpsSysConfig->pfnSysPostPowerState)
+	{
+		eError = gpsSysConfig->pfnSysPostPowerState(eNewPowerState);
+	}
+	return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterExtDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+													IMG_UINT32 *pui32DeviceIndex,
+													IMG_UINT32 ui32PhysHeapID)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PHYS_HEAP   *psPhysHeapTmp;
+	PVRSRV_DEVICE_PHYS_HEAP eDevPhysHeap;
+	PVRSRV_ERROR eError;
+
+	eError = PhysHeapAcquire(ui32PhysHeapID, &psPhysHeapTmp);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterExtDevice: Failed to acquire physical memory heap"));
+		goto e0;
+	}
+	if (PhysHeapGetType(psPhysHeapTmp) == PHYS_HEAP_TYPE_LMA)
+	{
+		eDevPhysHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+	}
+	else
+	{
+		eDevPhysHeap = PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL;
+	}
+	psDeviceNode->apsPhysHeap[eDevPhysHeap] = psPhysHeapTmp;
+
+	/* allocate a unique device id */
+	eError = AllocateDeviceID(psPVRSRVData, &psDeviceNode->sDevId.ui32DeviceIndex);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterExtDevice: Failed to allocate Device ID"));
+		goto e1;
+	}
+
+	if (pui32DeviceIndex)
+	{
+		*pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
+	}
+
+	List_PVRSRV_DEVICE_NODE_Insert(&psPVRSRVData->psDeviceNodeList, psDeviceNode);
+
+	return PVRSRV_OK;
+e1:
+	PhysHeapRelease(psDeviceNode->apsPhysHeap[eDevPhysHeap]);
+e0:
+	return eError;
+}
+
+IMG_VOID IMG_CALLCONV PVRSRVUnregisterExtDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_PHYS_HEAP eDevPhysHeap;
+
+	List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+	(IMG_VOID)FreeDeviceID(psPVRSRVData, psDeviceNode->sDevId.ui32DeviceIndex);
+	for (eDevPhysHeap = 0; eDevPhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; eDevPhysHeap++)
+	{
+		if (psDeviceNode->apsPhysHeap[eDevPhysHeap])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[eDevPhysHeap]);
+		}
+	}
+}
+
+static PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEV_POWER_STATE ePowState;
+
+	ePowState = va_arg(va, PVRSRV_DEV_POWER_STATE);
+
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+										 ePowState,
+										 IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (%s, device index: %d)", 
+						PVRSRVGetErrorStringKM(eError),
+						psDeviceNode->sDevId.ui32DeviceIndex));
+	}
+
+	return eError;
+}
+
+/*wraps the PVRSRVDevInitCompatCheck call and prints a debugging message if failed*/
+static PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_Any_va(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 *pui32ClientBuildOptions;
+
+	pui32ClientBuildOptions = va_arg(va, IMG_UINT32*);
+
+	eError = PVRSRVDevInitCompatCheck(psDeviceNode, *pui32ClientBuildOptions);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
+	}
+	return eError;
+}
+
+static PVRSRV_ERROR PVRSRVFinaliseSystem_SetIdleState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_DEV_POWER_STATE ePowState;
+
+	ePowState = va_arg(va, PVRSRV_DEV_POWER_STATE);
+
+	eError = PVRSRVDeviceIdleRequestKM(IMG_FALSE, psDeviceNode->sDevId.ui32DeviceIndex, &PVRSRVDeviceIsDefaultStateOFF, IMG_TRUE);
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDeviceIdleRequestKM call (%s, device index: %d)",
+						PVRSRVGetErrorStringKM(eError),
+						psDeviceNode->sDevId.ui32DeviceIndex));
+	}
+
+	return eError;
+}
+/*!
+******************************************************************************
+
+ @Function	PVRSRVFinaliseSystem
+
+ @Description
+
+ Final part of system initialisation.
+
+ @Input	   ui32DevIndex : Index to the required device
+
+ @Return   PVRSRV_ERROR  :
+
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful, IMG_UINT32 ui32ClientBuildOptions)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR		eError;
+	IMG_UINT32			i;
+	PVRSRV_DEVICE_NODE *psRGXDeviceNode = IMG_NULL;
+	PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
+
+	if (bInitSuccessful)
+	{
+		for (i=0;i<psPVRSRVData->ui32RegisteredDevices;i++)
+		{
+			PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->apsRegisteredDevNodes[i];
+			if(psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_RGX)
+			{
+				psRGXDeviceNode = psDeviceNode; 
+			}
+			eError = SyncPrimContextCreate(IMG_NULL,
+								  psDeviceNode,
+								  &psDeviceNode->hSyncPrimContext);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed to create SyncPrimContext (%u)", eError));
+				return eError;
+			}
+
+			/* Allocate general purpose sync primitive */
+			eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, &psDeviceNode->psSyncPrim, "pvrsrv dev general");
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed to allocate sync primitive with error (%u)", eError));
+				return eError;
+			}
+		}
+
+		eError = PVRSRVPowerLock();
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem_SetPowerState_AnyCb: Failed to acquire power lock"));
+			return eError;
+		}
+
+		/* Always ensure a single power on command appears in the pdump.
+		 * This should be the only power related call outside of PDUMPPOWCMDSTART/END.
+		 * Place all devices into ON power state. */
+		eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psPVRSRVData->psDeviceNodeList,
+														&PVRSRVFinaliseSystem_SetPowerState_AnyCb,
+														PVRSRV_DEV_POWER_STATE_ON);
+		if (eError != PVRSRV_OK)
+		{
+			PVRSRVPowerUnlock();
+			return eError;
+		}
+
+		/* Verify firmware compatibility for devices */
+		eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psPVRSRVData->psDeviceNodeList,
+													&PVRSRVFinaliseSystem_CompatCheck_Any_va,
+													&ui32ClientBuildOptions);
+		if (eError != PVRSRV_OK)
+		{
+			PVRSRVPowerUnlock();
+			PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+			return eError;
+		}
+
+		PDUMPPOWCMDSTART();
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{ /* Force idle all devices whose default power state is off*/
+			eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psPVRSRVData->psDeviceNodeList,
+													&PVRSRVFinaliseSystem_SetIdleState_AnyCb,
+													&ui32ClientBuildOptions);
+
+			if (eError == PVRSRV_OK)
+			{
+				break;
+			}
+			else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+			{
+				PVRSRV_ERROR		eError2;
+
+				PVRSRVPowerUnlock();
+				OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+				eError2 = PVRSRVPowerLock();
+
+				if (eError2 != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed to acquire power lock"));
+					return eError2;
+				}
+			}
+			else
+			{
+				PVRSRVPowerUnlock();
+				return eError;
+			}
+
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Place all devices into their default power state. */
+		eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psPVRSRVData->psDeviceNodeList,
+														&PVRSRVFinaliseSystem_SetPowerState_AnyCb,
+														PVRSRV_DEV_POWER_STATE_DEFAULT);
+		PDUMPPOWCMDEND();
+
+		if (eError != PVRSRV_OK)
+		{
+			PVRSRVPowerUnlock();
+			return eError;
+		}
+
+		PVRSRVPowerUnlock();
+
+/* If PDUMP is enabled and RGX device is supported, then initialise the performance counters that can be further modified in PDUMP.
+Then, before ending the init phase of the pdump, drain the commands put in the kCCBs during the init phase */
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+		if(psRGXDeviceNode)
+		{
+			PVRSRV_RGXDEV_INFO 	*psDevInfo = (PVRSRV_RGXDEV_INFO *) (psRGXDeviceNode->pvDevice);
+			IMG_UINT32 ui32Idx;
+
+			eError = PVRSRVRGXInitHWPerfCountersKM(psRGXDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "PVSRVFinaliseSystem: failed to init hwperf counters(%d)", eError));
+				return eError;
+			}
+
+			for (ui32Idx = 0; ui32Idx < RGXFWIF_DM_LAST; ui32Idx++)
+			{
+				eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->apsKernelCCBCtl[ui32Idx]->ui32WriteOffset, ui32Idx);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "PVSRVFinaliseSystem: problem draining kCCB (%d)", eError));
+					return eError;
+				}
+			}
+		}
+#endif
+	}
+
+	eError = PDumpStopInitPhaseKM(IMG_SRV_INIT);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to stop PDump init phase"));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode,IMG_UINT32 ui32ClientBuildOptions)
+{
+	/* Only check devices which specify a compatibility check callback */
+	if (psDeviceNode->pfnInitDeviceCompatCheck)
+		return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode, ui32ClientBuildOptions);
+	else
+		return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAcquireDeviceDataKM
+
+ @Description
+
+ Matchs a device given a device type and a device index.
+
+ @input	psDeviceNode :The device node to be matched.
+
+ @Input	   va : Variable argument list with:
+			eDeviceType : Required device type. If type is unknown use ui32DevIndex
+						 to locate device data
+
+ 			ui32DevIndex : Index to the required device obtained from the
+						PVRSRVEnumerateDevice function
+
+ @Return   PVRSRV_ERROR  :
+
+******************************************************************************/
+static IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
+{
+	PVRSRV_DEVICE_TYPE eDeviceType;
+	IMG_UINT32 ui32DevIndex;
+
+	eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE);
+	ui32DevIndex = va_arg(va, IMG_UINT32);
+
+	if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN &&
+		psDeviceNode->sDevId.eDeviceType == eDeviceType) ||
+		(eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN &&
+		 psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex))
+	{
+		return psDeviceNode;
+	}
+	else
+	{
+		return IMG_NULL;
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVAcquireDeviceDataKM
+
+ @Description
+
+ Returns device information
+
+ @Input	   ui32DevIndex : Index to the required device obtained from the
+						PVRSRVEnumerateDevice function
+
+ @Input	   eDeviceType : Required device type. If type is unknown use ui32DevIndex
+						 to locate device data
+
+ @Output  *phDevCookie : Dev Cookie
+
+
+ @Return   PVRSRV_ERROR  :
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32			ui32DevIndex,
+													 PVRSRV_DEVICE_TYPE	eDeviceType,
+													 IMG_HANDLE			*phDevCookie)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE	*psDeviceNode;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
+
+	/* Find device in the list */
+	psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+												&PVRSRVAcquireDeviceDataKM_Match_AnyVaCb,
+												eDeviceType,
+												ui32DevIndex);
+
+
+	if (!psDeviceNode)
+	{
+		/* device can't be found in the list so it isn't in the system */
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
+		return PVRSRV_ERROR_INIT_FAILURE;
+	}
+
+/*FoundDevice:*/
+
+	/* return the dev cookie? */
+	if (phDevCookie)
+	{
+		*phDevCookie = (IMG_HANDLE)psDeviceNode;
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseDeviceDataKM (IMG_HANDLE hDevCookie)
+{
+	PVR_UNREFERENCED_PARAMETER(hDevCookie);
+
+	/* 
+	  Empty release body as the lifetime of this resource accessed by 
+	  PVRSRVAcquireDeviceDataKM is linked to driver lifetime, not API allocation.
+	  This is one reason why this type crosses the bridge with a shared handle.
+	  Thus no server release action is required, just bridge action to ensure
+	  associated handle is freed.
+    */ 
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVUnregisterDevice
+
+ @Description
+
+ This De-inits device
+
+ @Input	   ui32DevIndex : Index to the required device
+
+ @Return   PVRSRV_ERROR  :
+
+******************************************************************************/
+static PVRSRV_ERROR IMG_CALLCONV PVRSRVUnregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_DATA				*psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+	PVRSRV_ERROR			eError;
+
+	eError = PVRSRVPowerLock();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVUnregisterDevice: Failed to acquire power lock"));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		/* Force idle device*/
+		eError = PVRSRVDeviceIdleRequestKM(IMG_FALSE,
+							psDeviceNode->sDevId.ui32DeviceIndex,
+							IMG_NULL,
+							IMG_TRUE);
+
+		if (eError == PVRSRV_OK)
+		{
+			break;
+		}
+		else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+		{
+			PVRSRV_ERROR	eError2;
+
+			PVRSRVPowerUnlock();
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+			eError2 = PVRSRVPowerLock();
+			if (eError2 != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"PVRSRVUnregisterDevice: Failed to acquire power lock"));
+				return eError2;
+			}
+		}
+		else
+		{
+			PVRSRVPowerUnlock();
+			return eError;
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+	/*
+		Power down the device if necessary.
+	 */
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE_OFF,
+										 IMG_TRUE);
+
+	PVRSRVPowerUnlock();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVUnregisterDevice: Failed PVRSRVSetDevicePowerStateKM call (%s). Dump debug.", PVRSRVGetErrorStringKM(eError)));
+
+		PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+
+		/* If the driver is okay then return the error, otherwise we can ignore this error. */
+		if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+		{
+			return eError;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVUnregisterDevice: Will continue to unregister as driver status is not OK"));
+		}
+	}
+
+	/*
+		De-init the device.
+	*/
+	sUnregisterDevice[psDeviceNode->sDevId.eDeviceType](psDeviceNode);
+
+	/* Remove RA for local card memory */
+	if (psDeviceNode->psLocalDevMemArena)
+	{
+		RA_Delete(psDeviceNode->psLocalDevMemArena);
+	}
+
+	/* remove node from list */
+	List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+	/* deallocate id and memory */
+	(IMG_VOID)FreeDeviceID(psPVRSRVData, psDeviceNode->sDevId.ui32DeviceIndex);
+
+	for (ePhysHeapIdx = 0; ePhysHeapIdx < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeapIdx++)
+	{
+		if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+		{
+			PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+		}
+	}
+
+	OSFreeMem(psDeviceNode);
+	/*not nulling pointer, out of scope*/
+
+	return (PVRSRV_OK);
+}
+
+
+/*
+	PollForValueKM
+*/
+static
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32*	pui32LinMemAddr,
+										  IMG_UINT32			ui32Value,
+										  IMG_UINT32			ui32Mask,
+										  IMG_UINT32			ui32Timeoutus,
+										  IMG_UINT32			ui32PollPeriodus,
+										  IMG_BOOL				bAllowPreemption)
+{
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(ui32Timeoutus);
+	PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+	PVR_UNREFERENCED_PARAMETER(bAllowPreemption);
+	return PVRSRV_OK;
+#else
+	IMG_UINT32	ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+	if (bAllowPreemption)
+	{
+		PVR_ASSERT(ui32PollPeriodus >= 1000);
+	}
+
+	LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+	{
+		ui32ActualValue = (*pui32LinMemAddr & ui32Mask);
+		if(ui32ActualValue == ui32Value)
+		{
+			return PVRSRV_OK;
+		}
+
+		if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+		if (bAllowPreemption)
+		{
+			OSSleepms(ui32PollPeriodus / 1000);
+		}
+		else
+		{
+			OSWaitus(ui32PollPeriodus);
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+			ui32Value, ui32ActualValue, ui32Mask));
+	
+	return PVRSRV_ERROR_TIMEOUT;
+#endif /* NO_HARDWARE */
+}
+
+
+/*
+	PVRSRVPollForValueKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM (volatile IMG_UINT32	*pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	return PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask,
+						  MAX_HW_TIME_US,
+						  MAX_HW_TIME_US/WAIT_TRY_COUNT,
+						  IMG_FALSE);
+}
+
+static
+PVRSRV_ERROR IMG_CALLCONV WaitForValueKM(volatile IMG_UINT32  *pui32LinMemAddr,
+                                         IMG_UINT32           ui32Value,
+                                         IMG_UINT32           ui32Mask,
+                                         IMG_BOOL             bHoldBridgeLock)
+{
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	return PVRSRV_OK;
+#else
+
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	IMG_HANDLE hOSEvent;
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eErrorWait;
+	IMG_UINT32 ui32ActualValue;
+
+	eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVWaitForValueKM: Failed to setup EventObject with error (%d)", eError));
+		goto EventObjectOpenError;
+	}
+
+	eError = PVRSRV_ERROR_TIMEOUT;
+	
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		ui32ActualValue = (*pui32LinMemAddr & ui32Mask);
+
+		if (ui32ActualValue == ui32Value)
+		{
+			/* Expected value has been found */
+			eError = PVRSRV_OK;
+			break;
+		}
+		else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			/* Services in bad state, don't wait any more */
+			eError = PVRSRV_ERROR_NOT_READY;
+			break;
+		}
+		else
+		{
+			/* wait for event and retry */
+			eErrorWait = bHoldBridgeLock ? OSEventObjectWaitAndHoldBridgeLock(hOSEvent) : OSEventObjectWait(hOSEvent);
+			if (eErrorWait != PVRSRV_OK  &&  eErrorWait != PVRSRV_ERROR_TIMEOUT)
+			{
+				PVR_DPF((PVR_DBG_WARNING,"PVRSRVWaitForValueKM: Waiting for value failed with error %d. Expected 0x%x but found 0x%x (Mask 0x%08x). Retrying",
+							eErrorWait,
+							ui32Value,
+							ui32ActualValue,
+							ui32Mask));
+			}
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	OSEventObjectClose(hOSEvent);
+
+	/* One last check incase the object wait ended after the loop timeout... */
+	if (eError != PVRSRV_OK  &&  (*pui32LinMemAddr & ui32Mask) == ui32Value)
+	{
+		eError = PVRSRV_OK;
+	}
+
+	/* Provide event timeout information to aid the Device Watchdog Thread... */
+	if (eError == PVRSRV_OK)
+	{
+		psPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+	}
+	else if (eError == PVRSRV_ERROR_TIMEOUT)
+	{
+		psPVRSRVData->ui32GEOConsecutiveTimeouts++;
+	}
+
+EventObjectOpenError:
+
+	return eError;
+
+#endif /* NO_HARDWARE */
+}
+
+/*
+	PVRSRVWaitForValueKM
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM (volatile IMG_UINT32	*pui32LinMemAddr,
+												IMG_UINT32			ui32Value,
+												IMG_UINT32			ui32Mask)
+{
+	/* In this case we are NOT retaining bridge lock while waiting
+	   for bridge lock. */
+	return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_FALSE);
+}
+
+/*
+	PVRSRVWaitForValueKMAndHoldBridgeLock
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 *pui32LinMemAddr,
+                                                                  IMG_UINT32          ui32Value,
+                                                                  IMG_UINT32          ui32Mask)
+{
+	return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_TRUE);
+}
+
+#if !defined(NO_HARDWARE)
+static IMG_BOOL _CheckStatus(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	PVRSRV_CMDCOMP_HANDLE	hCmdCompCallerHandle = (PVRSRV_CMDCOMP_HANDLE) pvCallbackData;
+	PVRSRV_CMDCOMP_NOTIFY	*psNotify;
+
+	psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+
+	/* A device has finished some processing, check if that unblocks other devices */
+	if (hCmdCompCallerHandle != psNotify->hCmdCompHandle)
+	{
+		psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle);
+	}
+
+	/* keep processing until the end of the list */
+	return IMG_TRUE;
+}
+#endif
+
+IMG_VOID IMG_CALLCONV PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+	PVRSRV_DATA			*psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	/* notify any registered device to check if block work items can now proceed */
+#if !defined(NO_HARDWARE)
+	OSWRLockAcquireRead(hNotifyLock);
+	dllist_foreach_node(&sCmdCompNotifyHead, _CheckStatus, hCmdCompCallerHandle);
+	OSWRLockReleaseRead(hNotifyLock);
+#endif
+
+	/* signal global event object */
+	if (psPVRSRVData->hGlobalEventObject)
+	{
+		IMG_HANDLE hOSEventKM = psPVRSRVData->hGlobalEventObject;
+		if(hOSEventKM)
+		{
+			OSEventObjectSignal(hOSEventKM);
+		}
+	}
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVKickDevicesKM(IMG_VOID)
+{
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVKickDevicesKM"));
+	PVRSRVCheckStatus(IMG_NULL);
+	return PVRSRV_OK;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function		PVRSRVGetErrorStringKM
+
+ @Description	Returns a text string relating to the PVRSRV_ERROR enum.
+
+ @Note		case statement used rather than an indexed arrary to ensure text is
+ 			synchronised with the correct enum
+
+ @Input		eError : PVRSRV_ERROR enum
+
+ @Return	const IMG_CHAR * : Text string
+
+ @Note		Must be kept in sync with servicesext.h
+
+******************************************************************************/
+
+IMG_EXPORT
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError)
+{
+	switch(eError)
+	{
+		case PVRSRV_OK:
+			return "PVRSRV_OK";
+#define PVRE(x) \
+		case x: \
+			return #x;
+#include "pvrsrv_errors.h"
+#undef PVRE
+		default:
+			return "Unknown PVRSRV error number";
+	}
+}
+
+/*
+	PVRSRVSystemDebugInfo
+ */
+PVRSRV_ERROR PVRSRVSystemDebugInfo( DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	return SysDebugInfo(gpsSysConfig, pfnDumpDebugPrintf);
+}
+
+/*
+	PVRSRVGetSystemName
+*/
+const IMG_CHAR *PVRSRVGetSystemName(IMG_VOID)
+{
+	return gpsSysConfig->pszSystemName;
+}
+
+/*
+	PVRSRVSystemHasCacheSnooping
+*/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(IMG_VOID)
+{
+	if (gpsSysConfig->eCacheSnoopingMode != PVRSRV_SYSTEM_SNOOP_NONE)
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(IMG_VOID)
+{
+	if ((gpsSysConfig->eCacheSnoopingMode == PVRSRV_SYSTEM_SNOOP_CPU_ONLY) ||
+		(gpsSysConfig->eCacheSnoopingMode == PVRSRV_SYSTEM_SNOOP_CROSS))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;	
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(IMG_VOID)
+{
+	if ((gpsSysConfig->eCacheSnoopingMode == PVRSRV_SYSTEM_SNOOP_DEVICE_ONLY) ||
+		(gpsSysConfig->eCacheSnoopingMode == PVRSRV_SYSTEM_SNOOP_CROSS))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+
+/*
+	PVRSRVSystemWaitCycles
+*/
+IMG_VOID PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles)
+{
+	/* Delay in us */
+	IMG_UINT32 ui32Delayus = 1;
+
+	/* obtain the device freq */
+	if (psDevConfig->pfnClockFreqGet != IMG_NULL)
+	{
+		IMG_UINT32 ui32DeviceFreq;
+
+		ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData);
+
+		ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq;
+
+		if (ui32Delayus == 0)
+		{
+			ui32Delayus = 1;
+		}
+	}
+
+	OSWaitus(ui32Delayus);
+}
+
+/*
+	PVRSRVRegisterCmdCompleteNotify
+*/
+PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+	if ((phNotify == IMG_NULL) || (pfnCmdCompleteNotify == IMG_NULL) || (hCmdCompHandle == IMG_NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Bad arguments (%p, %p, %p)", __FUNCTION__, phNotify, pfnCmdCompleteNotify, hCmdCompHandle));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psNotify = OSAllocMem(sizeof(*psNotify));
+	if (psNotify == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Not enough memory to allocate CmdCompleteNotify function", __FUNCTION__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;		
+	}
+
+	/* Set-up the notify data */
+	psNotify->hCmdCompHandle = hCmdCompHandle;
+	psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify;
+
+	/* Add it to the list of Notify functions */
+	OSWRLockAcquireWrite(hNotifyLock);
+	dllist_add_to_tail(&sCmdCompNotifyHead, &psNotify->sListNode);
+	OSWRLockReleaseWrite(hNotifyLock);
+
+	*phNotify = psNotify;
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVUnregisterCmdCompleteNotify
+*/
+PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify)
+{
+	PVRSRV_CMDCOMP_NOTIFY *psNotify = (PVRSRV_CMDCOMP_NOTIFY*) hNotify;
+
+	if (psNotify == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Bad arguments (%p)", __FUNCTION__, hNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* remove the node from the list */
+	OSWRLockAcquireWrite(hNotifyLock);
+	dllist_remove_node(&psNotify->sListNode);
+	OSWRLockReleaseWrite(hNotifyLock);
+
+	/* free the notify structure that holds the node */
+	OSFreeMem(psNotify);
+
+	return PVRSRV_OK;
+
+}
+
+static IMG_VOID _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA*) hDebugRequestHandle;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;	
+	/* only dump info on the lowest verbosity level */
+	if (ui32VerbLevel != DEBUG_REQUEST_VERBOSITY_LOW)
+	{
+		return;
+	}
+
+	PVR_DUMPDEBUG_LOG(("DDK info: %s (%s) %s", PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR));
+	PVR_DUMPDEBUG_LOG(("Time now: %015llu", OSClockus64()));
+
+	/* Services state */
+	switch (psPVRSRVData->eServicesState)
+	{
+		case PVRSRV_SERVICES_STATE_OK:
+		{
+			PVR_DUMPDEBUG_LOG(("Services State: OK"));
+			break;
+		}
+		
+		case PVRSRV_SERVICES_STATE_BAD:
+		{
+			PVR_DUMPDEBUG_LOG(("Services State: BAD"));
+			break;
+		}
+		
+		default:
+		{
+			PVR_DUMPDEBUG_LOG(("Services State: UNKNOWN (%d)", psPVRSRVData->eServicesState));
+			break;
+		}
+	}
+
+	/* Power state */
+	switch (psPVRSRVData->eCurrentPowerState)
+	{
+		case PVRSRV_SYS_POWER_STATE_OFF:
+		{
+			PVR_DUMPDEBUG_LOG(("System Power State: OFF"));
+			break;
+		}
+		case PVRSRV_SYS_POWER_STATE_ON:
+		{
+			PVR_DUMPDEBUG_LOG(("System Power State: ON"));
+			break;
+		}
+		default:
+		{
+			PVR_DUMPDEBUG_LOG(("System Power State: UNKNOWN (%d)", psPVRSRVData->eCurrentPowerState));
+			break;
+		}
+	}
+
+	/* Dump system specific debug info */
+	PVRSRVSystemDebugInfo(pfnDumpDebugPrintf);
+
+}
+
+static IMG_BOOL _DebugRequest(PDLLIST_NODE psNode, IMG_PVOID hVerbLevel)
+{
+	IMG_UINT32 *pui32VerbLevel = (IMG_UINT32 *) hVerbLevel;
+	PVRSRV_DBGREQ_NOTIFY *psNotify;
+
+	psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_DBGREQ_NOTIFY, sListNode);
+
+	psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, *pui32VerbLevel);
+
+	/* keep processing until the end of the list */
+	return IMG_TRUE;
+}
+
+/*
+	PVRSRVDebugRequest
+*/
+IMG_VOID IMG_CALLCONV PVRSRVDebugRequest(IMG_UINT32 ui32VerbLevel, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	IMG_UINT32 i,j;
+
+	g_pfnDumpDebugPrintf = pfnDumpDebugPrintf;
+	if (pfnDumpDebugPrintf == IMG_NULL)
+	{
+		/* Only dump the call stack to the kernel log if the debug text is going there. */
+		OSDumpStack();
+	}
+
+	/* notify any registered device to check if block work items can now proceed */
+	/* Lock the lists */
+	OSWRLockAcquireRead(g_hDbgNotifyLock);
+
+	PVR_DUMPDEBUG_LOG(("------------[ PVR DBG: START ]------------"));
+
+	/* For each verbosity level */
+	for (j=0;j<(ui32VerbLevel+1);j++)
+	{
+		/* For each requester */
+		for (i=0;i<g_psDebugTable->ui32RequestCount;i++)
+		{
+			dllist_foreach_node(&g_psDebugTable->asEntry[i].sListHead, _DebugRequest, &j);
+		}
+	}
+	PVR_DUMPDEBUG_LOG(("------------[ PVR DBG: END ]------------"));
+
+	/* Unlock the lists */
+	OSWRLockReleaseRead(g_hDbgNotifyLock);
+}
+
+/*
+	PVRSRVRegisterDebugRequestNotify
+*/
+PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify, PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, IMG_UINT32 ui32RequesterID, PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+	PVRSRV_DBGREQ_NOTIFY *psNotify;
+	PDLLIST_NODE psHead = IMG_NULL;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError;
+
+	if ((phNotify == IMG_NULL) || (pfnDbgRequestNotify == IMG_NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Bad arguments (%p, %p,)", __FUNCTION__, phNotify, pfnDbgRequestNotify));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_params;
+	}
+
+	psNotify = OSAllocMem(sizeof(*psNotify));
+	if (psNotify == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Not enough memory to allocate DbgRequestNotify structure", __FUNCTION__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	/* Set-up the notify data */
+	psNotify->hDbgRequestHandle = hDbgRequestHandle;
+	psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify;
+	psNotify->ui32RequesterID = ui32RequesterID;
+
+	/* Lock down all the lists */
+	OSWRLockAcquireWrite(g_hDbgNotifyLock);
+
+	/* Find which list to add it to */
+	for (i=0;i<g_psDebugTable->ui32RequestCount;i++)
+	{
+		if (g_psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID)
+		{
+			psHead = &g_psDebugTable->asEntry[i].sListHead;
+		}
+	}
+
+	if (psHead == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to find debug requester", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_add;
+	}
+
+	/* Add it to the list of Notify functions */
+	dllist_add_to_tail(psHead, &psNotify->sListNode);
+
+	/* Unlock the lists */
+	OSWRLockReleaseWrite(g_hDbgNotifyLock);
+
+	*phNotify = psNotify;
+
+	return PVRSRV_OK;
+
+fail_add:
+	OSWRLockReleaseWrite(g_hDbgNotifyLock);
+	OSFreeMem(psNotify);
+fail_alloc:
+fail_params:
+	return eError;
+}
+
+/*
+	PVRSRVUnregisterCmdCompleteNotify
+*/
+PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify)
+{
+	PVRSRV_DBGREQ_NOTIFY *psNotify = (PVRSRV_DBGREQ_NOTIFY*) hNotify;
+
+	if (psNotify == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Bad arguments (%p)", __FUNCTION__, hNotify));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* remove the node from the list */
+	OSWRLockAcquireWrite(g_hDbgNotifyLock);
+	dllist_remove_node(&psNotify->sListNode);
+	OSWRLockReleaseWrite(g_hDbgNotifyLock);
+
+	/* free the notify structure that holds the node */
+	OSFreeMem(psNotify);
+
+	return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR PVRSRVRegisterDbgTable(IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length, IMG_PVOID *phTable)
+{
+	IMG_UINT32 i;
+	if (g_psDebugTable != IMG_NULL)
+	{
+		return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED;
+	}
+
+	g_psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) + (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1)));
+	if (!g_psDebugTable)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	g_psDebugTable->ui32RequestCount = ui32Length;
+
+	/* Init the list heads */
+	for (i=0;i<ui32Length;i++)
+	{
+		g_psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i];
+		dllist_init(&g_psDebugTable->asEntry[i].sListHead);
+	}
+
+	*phTable = g_psDebugTable;
+	return PVRSRV_OK;
+}
+
+static IMG_VOID PVRSRVUnregisterDbgTable(IMG_PVOID hTable)
+{
+	IMG_UINT32 i;
+
+	PVR_ASSERT(hTable == g_psDebugTable);
+
+	for (i=0;i<g_psDebugTable->ui32RequestCount;i++)
+	{
+		if (!dllist_is_empty(&g_psDebugTable->asEntry[i].sListHead))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PVRSRVUnregisterDbgTable: Found registered callback(s) on %d", i));
+		}
+	}
+	OSFREEMEM(g_psDebugTable);
+	g_psDebugTable = IMG_NULL;
+}
+
+PVRSRV_ERROR AcquireGlobalEventObjectServer(IMG_HANDLE *phGlobalEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	*phGlobalEventObject = psPVRSRVData->hGlobalEventObject;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR ReleaseGlobalEventObjectServer(IMG_HANDLE hGlobalEventObject)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	PVR_ASSERT(psPVRSRVData->hGlobalEventObject == hGlobalEventObject);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR GetBIFTilingHeapXStride(IMG_UINT32 uiHeapNum, IMG_UINT32 *puiXStride)
+{
+	IMG_UINT32 uiMaxHeaps;
+
+	PVR_ASSERT(puiXStride != IMG_NULL);
+
+	GetNumBifTilingHeapConfigs(&uiMaxHeaps);
+
+	if(uiHeapNum < 1 || uiHeapNum > uiMaxHeaps) {
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*puiXStride = gpsSysConfig->pui32BIFTilingHeapConfigs[uiHeapNum - 1];
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR GetNumBifTilingHeapConfigs(IMG_UINT32 *puiNumHeaps)
+{
+	*puiNumHeaps = gpsSysConfig->ui32BIFTilingHeapCount;
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVResetHWRLogsKM
+*/
+PVRSRV_ERROR PVRSRVResetHWRLogsKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVR_LOG(("User requested HWR logs reset"));
+
+	if(psDeviceNode && psDeviceNode->pfnResetHWRLogs)
+	{
+		return psDeviceNode->pfnResetHWRLogs(psDeviceNode);
+	}
+
+	return PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+}
+
+/*****************************************************************************
+ End of file (pvrsrv.c)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/ri_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/ri_server.c
new file mode 100644
index 0000000..6b2f6a3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/ri_server.c
@@ -0,0 +1,1298 @@
+/*************************************************************************/ /*!
+@File			ri_server.c
+@Title          Resource Information (RI) server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Resource Information (RI) server functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#include <stdarg.h>
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "srvkm.h"
+#include "lock.h"
+/* services/server/include/ */
+#include "ri_server.h"
+
+/* services/include/shared/ */
+#include "hash.h"
+/* services/shared/include/ */
+#include "dllist.h"
+
+#include "pmr.h"
+
+#if defined(PVR_RI_DEBUG)
+
+#define USE_RI_LOCK 	1
+
+/*
+ * Initial size use for Hash table.
+ * (Used to index the RI list entries).
+ */
+#define _RI_INITIAL_HASH_TABLE_SIZE	64
+
+/*
+ * Values written to the 'valid' field of
+ * RI structures when created and cleared
+ * prior to being destroyed.
+ * The code can then check this value
+ * before accessing the provided pointer
+ * contents as a valid RI structure.
+ */
+#define _VALID_RI_LIST_ENTRY 	0x66bccb66
+#define _VALID_RI_SUBLIST_ENTRY	0x77cddc77
+#define _INVALID				0x00000000
+
+/*
+ * If this define is set to 1, details of
+ * the linked lists (addresses, prev/next
+ * ptrs, etc) are also output when function
+ * RIDumpList() is called
+ */
+#define _DUMP_LINKEDLIST_INFO		0
+
+
+typedef IMG_UINT64 _RI_BASE_T;
+
+/*
+ *  Length of string used for process name
+ */
+#define TASK_COMM_LEN 				16
+/*
+ *  Length of string used for process ID
+ */
+#define TASK_PID_LEN 				11
+/*
+ *  Length of string used for "[{PID}:_{process_name}]"
+ */
+#define RI_PROC_TAG_CHAR_LEN 		(1+TASK_PID_LEN+2+TASK_COMM_LEN+1)
+
+/*
+ *  Length of string used for address
+ */
+#define RI_ADDR_CHAR_LEN			12
+/*
+ *  Length of string used for size
+ */
+#define RI_SIZE_CHAR_LEN			12
+/*
+ *  Length of string used for "{Imported from PID nnnnnnnnnn}"
+ */
+#define RI_IMPORT_TAG_CHAR_LEN 		32
+/*
+ *  Total length of string returned to debugfs
+ *  {0xaddr}_{annotation_text}_{0xsize}_{import_tag}
+ */
+#define RI_MAX_DEBUGFS_ENTRY_LEN	(RI_ADDR_CHAR_LEN+1+RI_MAX_TEXT_LEN+1+RI_SIZE_CHAR_LEN+1+RI_IMPORT_TAG_CHAR_LEN+1)
+/*
+ *  Total length of string output to _RIOutput()
+ *  for MEMDESC RI sub-list entries
+ *  {0xaddr}_{annotation_text}_[{PID}:_{process_name}]_{0xsize}_bytes_{import_tag}
+ */
+#define RI_MAX_MEMDESC_RI_ENTRY_LEN	(RI_ADDR_CHAR_LEN+1+RI_MAX_TEXT_LEN+1+RI_PROC_TAG_CHAR_LEN+1+RI_SIZE_CHAR_LEN+7+RI_IMPORT_TAG_CHAR_LEN+1)
+/*
+ *  Total length of string output to _RIOutput()
+ *  for PMR RI list entries
+ *  {annotation_text}_{pmr_handle}_suballocs:{num_suballocs}_{0xsize}
+ */
+#define RI_MAX_PMR_RI_ENTRY_LEN		(RI_MAX_TEXT_LEN+1+RI_ADDR_CHAR_LEN+11+10+1+RI_SIZE_CHAR_LEN)
+
+
+/*
+ * Structure used to make linked sublist of
+ * memory allocations (MEMDESC)
+ */
+struct _RI_SUBLIST_ENTRY_
+{
+	DLLIST_NODE				sListNode;
+	struct _RI_LIST_ENTRY_	*psRI;
+	IMG_UINT32 				valid;
+	IMG_BOOL				bIsImport;
+	IMG_BOOL				bIsExportable;
+	IMG_PID					pid;
+	IMG_CHAR				ai8ProcName[TASK_COMM_LEN];
+	IMG_DEV_VIRTADDR 		sVAddr;
+	IMG_UINT64				ui64Offset;
+	IMG_UINT64				ui64Size;
+	IMG_CHAR				ai8TextB[RI_MAX_TEXT_LEN+1];
+	DLLIST_NODE				sProcListNode;
+};
+
+/*
+ * Structure used to make linked list of
+ * PMRs. Sublists of allocations (MEMDESCs) made
+ * from these PMRs are chained off these entries.
+ */
+struct _RI_LIST_ENTRY_
+{
+	DLLIST_NODE				sListNode;
+	DLLIST_NODE				sSubListFirst;
+	IMG_UINT32 				valid;
+	PMR						*hPMR;
+	IMG_UINT64 				ui64LogicalSize;
+	IMG_PID					pid;
+	IMG_CHAR				ai8ProcName[TASK_COMM_LEN];
+	IMG_CHAR				ai8TextA[RI_MAX_TEXT_LEN+1];
+	IMG_UINT16 				ui16SubListCount;
+	IMG_UINT16 				ui16MaxSubListCount;
+};
+
+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY;
+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY;
+
+static IMG_UINT16 	g_ui16RICount = 0;
+static HASH_TABLE 	*g_pRIHashTable = IMG_NULL;
+static IMG_UINT16 	g_ui16ProcCount = 0;
+static HASH_TABLE 	*g_pProcHashTable = IMG_NULL;
+
+static POS_LOCK		g_hRILock;
+/*
+ * Flag used to indicate if RILock should be destroyed when final PMR entry
+ * is deleted, i.e. if RIDeInitKM() has already been called before that point
+ * but the handle manager has deferred deletion of RI entries.
+ */
+static IMG_BOOL 	bRIDeInitDeferred = IMG_FALSE;
+
+/*
+ *  Used as head of linked-list of PMR RI entries -
+ *  this is useful when we wish to iterate all PMR
+ *  list entries (when we don't have a PMR ref)
+ */
+static DLLIST_NODE	sListFirst;
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static IMG_VOID _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+
+static PVRSRV_ERROR _DumpAllEntries (IMG_UINTPTR_T k, IMG_UINTPTR_T v);
+static PVRSRV_ERROR _DeleteAllEntries (IMG_UINTPTR_T k, IMG_UINTPTR_T v);
+static PVRSRV_ERROR _DumpList(PMR *hPMR, IMG_PID pid);
+#define _RIOutput(x) PVR_LOG(x)
+
+IMG_INTERNAL IMG_UINT32
+_ProcHashFunc (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
+IMG_INTERNAL IMG_UINT32
+_ProcHashFunc (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
+{
+	IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+	IMG_UINT32 ui;
+	IMG_UINT32 uHashKey = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		IMG_UINT32 uHashPart = *p++;
+
+		uHashPart += (uHashPart << 12);
+		uHashPart ^= (uHashPart >> 22);
+		uHashPart += (uHashPart << 4);
+		uHashPart ^= (uHashPart >> 9);
+		uHashPart += (uHashPart << 10);
+		uHashPart ^= (uHashPart >> 2);
+		uHashPart += (uHashPart << 7);
+		uHashPart ^= (uHashPart >> 12);
+
+		uHashKey += uHashPart;
+	}
+
+	return uHashKey;
+}
+IMG_INTERNAL IMG_BOOL
+_ProcHashComp (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
+IMG_INTERNAL IMG_BOOL
+_ProcHashComp (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
+{
+	IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+	IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+	IMG_UINT32 ui;
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		if (*p1++ != *p2++)
+			return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+static IMG_VOID _RILock(IMG_VOID)
+{
+#if (USE_RI_LOCK == 1)
+	OSLockAcquire(g_hRILock);
+#endif
+}
+
+static IMG_VOID _RIUnlock(IMG_VOID)
+{
+#if (USE_RI_LOCK == 1)
+	OSLockRelease(g_hRILock);
+#endif
+}
+
+PVRSRV_ERROR RIInitKM(IMG_VOID)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	bRIDeInitDeferred = IMG_FALSE;
+#if (USE_RI_LOCK == 1)
+	eError = OSLockCreate(&g_hRILock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSLockCreate failed (returned %d)",__func__,eError));
+	}
+#endif
+	return eError;
+}
+IMG_VOID RIDeInitKM(IMG_VOID)
+{
+#if (USE_RI_LOCK == 1)
+	if (g_ui16RICount > 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: called with %d entries remaining - deferring OSLockDestroy()",__func__,g_ui16RICount));
+		bRIDeInitDeferred = IMG_TRUE;
+	}
+	else
+	{
+		OSLockDestroy(g_hRILock);
+	}
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIWritePMREntryKM
+
+ @Description
+            Writes a new Resource Information list entry.
+            The new entry will be inserted at the head of the list of
+            PMR RI entries and assigned the values provided.
+
+ @input     hPMR - Reference (handle) to the PMR to which this reference relates
+ @input     ai8TextA - String describing this PMR (may be null)
+ @input     uiLogicalSize - Size of PMR
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryKM(PMR *hPMR,
+					   	       IMG_UINT32 ui32TextASize,
+					   	       const IMG_CHAR *psz8TextA,
+					   	       IMG_UINT64 ui64LogicalSize)
+{
+	IMG_UINTPTR_T hashData = 0;
+	PMR			*pPMRHashKey = hPMR;
+	IMG_PCHAR pszText = (IMG_PCHAR)psz8TextA;
+	RI_LIST_ENTRY *psRIEntry = IMG_NULL;
+
+
+	/* if Hash table has not been created, create it now */
+	if (!g_pRIHashTable)
+	{
+		g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+		g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+	}
+	if (!g_pRIHashTable || !g_pProcHashTable)
+	{
+		/* Error - no memory to allocate for Hash table(s) */
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	if (!hPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		/* Acquire RI Lock */
+		_RILock();
+
+		/* look-up hPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (IMG_VOID *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+		if (!psRIEntry)
+		{
+			/*
+			 * If failed to find a matching existing entry, create a new one
+			 */
+			psRIEntry = (RI_LIST_ENTRY *)OSAllocZMem(sizeof(RI_LIST_ENTRY));
+			if (!psRIEntry)
+			{
+				/* Release RI Lock */
+				_RIUnlock();
+				/* Error - no memory to allocate for new RI entry */
+				return PVRSRV_ERROR_OUT_OF_MEMORY;
+			}
+			else
+			{
+				/*
+				 * Add new RI Entry
+				 */
+				if (g_ui16RICount == 0)
+				{
+					/* Initialise PMR entry linked-list head */
+					dllist_init(&sListFirst);
+				}
+				g_ui16RICount++;
+
+				dllist_init (&(psRIEntry->sSubListFirst));
+				psRIEntry->ui16SubListCount = 0;
+				psRIEntry->ui16MaxSubListCount = 0;
+				psRIEntry->valid = _VALID_RI_LIST_ENTRY;
+				psRIEntry->pid = OSGetCurrentProcessID();
+				OSSNPrintf((IMG_CHAR *)psRIEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentProcessName());
+				/* Add PMR entry to linked-list of PMR entries */
+				dllist_init (&(psRIEntry->sListNode));
+				dllist_add_to_tail(&sListFirst,(PDLLIST_NODE)&(psRIEntry->sListNode));
+			}
+
+			if (pszText)
+			{
+				if (ui32TextASize > RI_MAX_TEXT_LEN)
+					ui32TextASize = RI_MAX_TEXT_LEN;
+
+				/* copy ai8TextA field data */
+				OSSNPrintf((IMG_CHAR *)psRIEntry->ai8TextA, ui32TextASize+1, "%s", pszText);
+
+				/* ensure string is NUL-terminated */
+				psRIEntry->ai8TextA[ui32TextASize] = '\0';
+			}
+			else
+			{
+				/* ensure string is NUL-terminated */
+				psRIEntry->ai8TextA[0] = '\0';
+			}
+			psRIEntry->hPMR = hPMR;
+			psRIEntry->ui64LogicalSize = ui64LogicalSize;
+
+			/* Create index entry in Hash Table */
+			HASH_Insert_Extended (g_pRIHashTable, (IMG_VOID *)&pPMRHashKey, (IMG_UINTPTR_T)psRIEntry);
+
+			/* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */
+			PMRStoreRIHandle(hPMR, (IMG_PVOID)psRIEntry);
+		}
+		/* Release RI Lock */
+		_RIUnlock();
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIWriteMEMDESCEntryKM
+
+ @Description
+            Writes a new Resource Information sublist entry.
+            The new entry will be inserted at the head of the sublist of
+            the indicated PMR list entry, and assigned the values provided.
+
+ @input     hPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates
+ @input     ai8TextB - String describing this secondary reference (may be null)
+ @input     uiOffset - Offset from the start of the PMR at which this allocation begins
+ @input     uiSize - Size of this allocation
+ @input     bIsImport - Flag indicating if this is an allocation or an import
+ @input     bIsExportable - Flag indicating if this allocation is exportable
+ @output    phRIHandle - Handle to the created RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *hPMR,
+					   	   	   	   IMG_UINT32 ui32TextBSize,
+					   	   	   	   const IMG_CHAR *psz8TextB,
+					   	   	   	   IMG_UINT64 ui64Offset,
+					   	   	   	   IMG_UINT64 ui64Size,
+					   	   	   	   IMG_BOOL bIsImport,
+					   	   	   	   IMG_BOOL bIsExportable,
+					   	   	   	   RI_HANDLE *phRIHandle)
+{
+	IMG_UINTPTR_T hashData = 0;
+	PMR 		*pPMRHashKey = hPMR;
+	IMG_PID		pid;
+	IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+	RI_LIST_ENTRY *psRIEntry = IMG_NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = IMG_NULL;
+
+
+	/* check Hash tables have been created (meaning at least one PMR has been defined) */
+	if (!g_pRIHashTable || !g_pProcHashTable)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (!hPMR || !phRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		/* Acquire RI Lock */
+		_RILock();
+
+		*phRIHandle = IMG_NULL;
+
+		/* look-up hPMR in Hash Table */
+		hashData = HASH_Retrieve_Extended (g_pRIHashTable, (IMG_VOID *)&pPMRHashKey);
+		psRIEntry = (RI_LIST_ENTRY *)hashData;
+		if (!psRIEntry)
+		{
+			/* Release RI Lock */
+			_RIUnlock();
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMem(sizeof(RI_SUBLIST_ENTRY));
+		if (!psRISubEntry)
+		{
+			/* Release RI Lock */
+			_RIUnlock();
+			/* Error - no memory to allocate for new RI sublist entry */
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		else
+		{
+			/*
+			 * Insert new entry in sublist
+			 */
+			PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst));
+
+			/*
+			 * Insert new entry before currentNode
+			 */
+			if (!currentNode)
+			{
+				currentNode = &(psRIEntry->sSubListFirst);
+			}
+			dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode));
+
+			psRISubEntry->psRI = psRIEntry;
+
+			/* Increment number of entries in sublist */
+			psRIEntry->ui16SubListCount++;
+			if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount)
+			{
+				psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount;
+			}
+			psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+		}
+
+		psRISubEntry->pid = OSGetCurrentProcessID();
+
+		if (ui32TextBSize > RI_MAX_TEXT_LEN)
+			ui32TextBSize = RI_MAX_TEXT_LEN;
+		/* copy ai8TextB field data */
+		OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+		/* ensure string is NUL-terminated */
+		psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+		psRISubEntry->ui64Offset = ui64Offset;
+		psRISubEntry->ui64Size = ui64Size;
+		psRISubEntry->bIsImport = bIsImport;
+		psRISubEntry->bIsExportable = bIsExportable;
+		OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, TASK_COMM_LEN, "%s", OSGetCurrentProcessName());
+		dllist_init (&(psRISubEntry->sProcListNode));
+
+		/*
+		 *	Now insert this MEMDESC into the proc list
+		 */
+		/* look-up pid in Hash Table */
+		pid = psRISubEntry->pid;
+		hashData = HASH_Retrieve_Extended (g_pProcHashTable, (IMG_VOID *)&pid);
+		if (!hashData)
+		{
+			/*
+			 * No allocations for this pid yet
+			 */
+			HASH_Insert_Extended (g_pProcHashTable, (IMG_VOID *)&pid, (IMG_UINTPTR_T)&(psRISubEntry->sProcListNode));
+			/* Increment number of entries in proc hash table */
+			g_ui16ProcCount++;
+		}
+		else
+		{
+			/*
+			 * Insert allocation into pid allocations linked list
+			 */
+			PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+			/*
+			 * Insert new entry
+			 */
+			dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+		}
+		*phRIHandle = (RI_HANDLE)psRISubEntry;
+		/* Release RI Lock */
+		_RIUnlock();
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIUpdateMEMDESCAddrKM
+
+ @Description
+            Update a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be updated
+ @input     uiAddr - New address for the RI entry
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+								   IMG_DEV_VIRTADDR sVAddr)
+{
+	RI_SUBLIST_ENTRY *psRISubEntry = IMG_NULL;
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr;
+
+	/* Release RI lock */
+	_RIUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDeletePMREntryKM
+
+ @Description
+            Delete a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle)
+{
+	RI_LIST_ENTRY *psRIEntry = IMG_NULL;
+	PMR			*pPMRHashKey;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	else
+	{
+		psRIEntry = (RI_LIST_ENTRY *)hRIHandle;
+
+		if (psRIEntry->valid != _VALID_RI_LIST_ENTRY)
+		{
+			/* Pointer does not point to valid structure */
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if(psRIEntry->ui16SubListCount == 0)
+		{
+		    /* Acquire RI lock*/
+			_RILock();
+
+			/* Remove the HASH table index entry */
+			pPMRHashKey = psRIEntry->hPMR;
+			HASH_Remove_Extended(g_pRIHashTable, (IMG_VOID *)&pPMRHashKey);
+
+			psRIEntry->valid = _INVALID;
+
+			/* Remove PMR entry from linked-list of PMR entries */
+			dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode));
+
+			/* Now, free the memory used to store the RI entry */
+			OSFreeMem(psRIEntry);
+			psRIEntry = IMG_NULL;
+
+		    /* Release RI lock*/
+			_RIUnlock();
+
+			/*
+			 * Decrement number of RI entries - if this is now zero,
+			 * we can delete the RI hash table
+			 */
+			if(--g_ui16RICount == 0)
+			{
+				HASH_Delete(g_pRIHashTable);
+				g_pRIHashTable = IMG_NULL;
+				/* If deInit has been deferred, we can now destroy the RI Lock */
+				if (bRIDeInitDeferred)
+				{
+					OSLockDestroy(g_hRILock);
+				}
+			}
+			/*
+			 * Make the handle NULL once PMR RI entry is deleted
+			 */
+			hRIHandle = IMG_NULL;
+		}
+		else
+		{
+			eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+		}
+	}
+
+	return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDeleteMEMDESCEntryKM
+
+ @Description
+            Delete a Resource Information entry.
+
+ @input     hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle)
+{
+	RI_LIST_ENTRY *psRIEntry = IMG_NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = IMG_NULL;
+	IMG_UINTPTR_T hashData = 0;
+	IMG_PID     pid;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+	if (!hRIHandle)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+	if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+	{
+		/* Pointer does not point to valid structure */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI;
+
+	/* Now, remove entry from the sublist */
+	dllist_remove_node(&(psRISubEntry->sListNode));
+
+	psRISubEntry->valid = _INVALID;
+
+	/* Remove the entry from the proc allocations linked list */
+	pid = psRISubEntry->pid;
+	/* If this is the only allocation for this pid, just remove it from the hash table */
+	if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == IMG_NULL)
+	{
+		HASH_Remove_Extended(g_pProcHashTable, (IMG_VOID *)&pid);
+		/* Decrement number of entries in proc hash table, and delete the hash table if there are now none */
+		if(--g_ui16ProcCount == 0)
+		{
+			HASH_Delete(g_pProcHashTable);
+			g_pProcHashTable = IMG_NULL;
+		}
+	}
+	else
+	{
+		hashData = HASH_Retrieve_Extended (g_pProcHashTable, (IMG_VOID *)&pid);
+		if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode))
+		{
+			HASH_Remove_Extended(g_pProcHashTable, (IMG_VOID *)&pid);
+			HASH_Insert_Extended (g_pProcHashTable, (IMG_VOID *)&pid, (IMG_UINTPTR_T)dllist_get_next_node(&(psRISubEntry->sProcListNode)));
+		}
+	}
+	dllist_remove_node(&(psRISubEntry->sProcListNode));
+
+	/* Now, free the memory used to store the sublist entry */
+	OSFreeMem(psRISubEntry);
+	psRISubEntry = IMG_NULL;
+
+	/*
+	 * Decrement number of entries in sublist
+	 */
+	psRIEntry->ui16SubListCount--;
+
+    /* Release RI lock*/
+	_RIUnlock();
+
+	/*
+	 * Make the handle NULL once MEMDESC RI entry is deleted
+	 */
+	hRIHandle = IMG_NULL;
+
+	return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDeleteListKM
+
+ @Description
+            Delete all Resource Information entries and free associated
+            memory.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteListKM(IMG_VOID)
+{
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+	if (g_pRIHashTable)
+	{
+		eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries);
+		if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+		{
+			/*
+			 * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+			 * the hash table gets deleted as a result of deleting the final PMR entry,
+			 * so this is not a real error condition...
+			 */
+			eResult = PVRSRV_OK;
+		}
+	}
+	return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpListKM
+
+ @Description
+            Dumps out the contents of the RI List entry for the
+            specified PMR, and all MEMDESC allocation entries
+            in the associated sub linked list.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @input     hPMR - PMR for which RI entry details are to be output
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpListKM(PMR *hPMR)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Acquire RI lock*/
+	_RILock();
+
+	eError = _DumpList(hPMR,0);
+
+    /* Release RI lock*/
+	_RIUnlock();
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIGetListEntryKM
+
+ @Description
+            Returns pointer to a formatted string with details of the specified
+            list entry. If no entry exists (e.g. it may have been deleted
+            since the previous call), IMG_NULL is returned.
+
+ @input     pid - pid for which RI entry details are to be output
+ @input     ppHandle - handle to the entry, if IMG_NULL, the first entry will be
+                     returned.
+ @output    pszEntryString - string to be output for the entry
+ @output    hEntry - hEntry will be returned pointing to the next entry
+                     (or IMG_NULL if there is no next entry)
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+						  IMG_HANDLE **ppHandle,
+						  IMG_CHAR **ppszEntryString)
+{
+	RI_SUBLIST_ENTRY  *psRISubEntry = IMG_NULL;
+	IMG_UINTPTR_T     hashData      = 0;
+	IMG_PID      	  hashKey  = pid;
+
+	static IMG_CHAR	  ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN+1];
+	static IMG_UINT64 ui64TotalAlloc = 0;
+	static IMG_UINT64 ui64TotalImport = 0;
+	static IMG_BOOL bDisplaySummary = IMG_FALSE;
+	static IMG_BOOL bTerminateNextCall = IMG_FALSE;
+
+	if (bDisplaySummary)
+	{
+		OSSNPrintf((IMG_CHAR *)&ai8DebugfsSummaryString[0],
+		            RI_MAX_TEXT_LEN,
+		            "Alloc:0x%llx + Imports:0x%llx = Total:0x%llx\n",
+		            (unsigned long long)ui64TotalAlloc,
+		            (unsigned long long)ui64TotalImport,
+		            (unsigned long long)(ui64TotalAlloc+ui64TotalImport));
+		*ppszEntryString = &ai8DebugfsSummaryString[0];
+		ui64TotalAlloc = 0;
+		ui64TotalImport = 0;
+		bTerminateNextCall = IMG_TRUE;
+		bDisplaySummary = IMG_FALSE;
+		return IMG_TRUE;
+	}
+
+	if (bTerminateNextCall)
+	{
+		*ppszEntryString = IMG_NULL;
+		*ppHandle        = IMG_NULL;
+		bTerminateNextCall = IMG_FALSE;
+		return IMG_FALSE;
+	}
+
+    /* Acquire RI lock*/
+	_RILock();
+
+	/* look-up pid in Hash Table, to obtain first entry for pid */
+	hashData = HASH_Retrieve_Extended(g_pProcHashTable, (IMG_VOID *)&hashKey);
+	if (hashData)
+	{
+		if (*ppHandle)
+		{
+			psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle;
+			if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+			{
+				psRISubEntry = IMG_NULL;
+			}
+		}
+		else
+		{
+			psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+			if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+			{
+				psRISubEntry = IMG_NULL;
+			}
+		}
+	}
+
+	if (psRISubEntry)
+	{
+		PDLLIST_NODE  psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode);
+
+		if (psNextProcListNode == IMG_NULL  ||
+		    psNextProcListNode == (PDLLIST_NODE)hashData)
+		{
+			bDisplaySummary = IMG_TRUE;
+		}
+
+
+		if (psRISubEntry->bIsImport)
+		{
+			ui64TotalImport += psRISubEntry->ui64Size;
+		}
+		else
+		{
+			ui64TotalAlloc += psRISubEntry->ui64Size;
+		}
+
+
+		_GenerateMEMDESCEntryString(psRISubEntry,
+		                            IMG_TRUE,
+		                            RI_MAX_DEBUGFS_ENTRY_LEN,
+		                            (IMG_CHAR *)&ai8DebugfsSummaryString);
+		ai8DebugfsSummaryString[RI_MAX_DEBUGFS_ENTRY_LEN] = '\0';
+
+		*ppszEntryString = (IMG_CHAR *)&ai8DebugfsSummaryString;
+		*ppHandle        = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode);
+
+	}
+	else
+	{
+		bDisplaySummary = IMG_TRUE;
+		if (ui64TotalAlloc == 0)
+		{
+			ai8DebugfsSummaryString[0] = '\0';
+			*ppszEntryString = (IMG_CHAR *)&ai8DebugfsSummaryString;
+		}
+	}
+
+    /* Release RI lock*/
+	_RIUnlock();
+
+	return IMG_TRUE;
+}
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static IMG_VOID _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry,
+                                            IMG_BOOL bDebugFs,
+                                            IMG_UINT16 ui16MaxStrLen,
+                                            IMG_CHAR *pszEntryString)
+{
+	IMG_CHAR 	szProc[RI_PROC_TAG_CHAR_LEN];
+	IMG_CHAR 	szImport[RI_IMPORT_TAG_CHAR_LEN];
+	IMG_PCHAR   pszAnnotationText = IMG_NULL;
+
+	if (!bDebugFs)
+	{
+		/* we don't include process ID info for debugfs output */
+		OSSNPrintf( (IMG_CHAR *)&szProc,
+		            RI_PROC_TAG_CHAR_LEN,
+		            "[%d: %s]",
+		            psRISubEntry->pid,
+		            (IMG_CHAR *)psRISubEntry->ai8ProcName);
+	}
+	if (psRISubEntry->bIsImport)
+	{
+		OSSNPrintf( (IMG_CHAR *)&szImport,
+		            RI_IMPORT_TAG_CHAR_LEN,
+		            "{Import from PID %d}",
+		            psRISubEntry->psRI->pid);
+		/* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+		pszAnnotationText = (IMG_PCHAR)psRISubEntry->psRI->ai8TextA;
+	}
+	else
+	{
+		if (psRISubEntry->bIsExportable)
+		{
+			/* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+			pszAnnotationText = (IMG_PCHAR)psRISubEntry->psRI->ai8TextA;
+		}
+		else
+		{
+			/* Set pszAnnotationText to that of the MEMDESC RI entry */
+			pszAnnotationText = (IMG_PCHAR)psRISubEntry->ai8TextB;
+		}
+	}
+	OSSNPrintf(pszEntryString,
+	           ui16MaxStrLen,
+	           "%s 0x%llx %-80s %s 0x%llx %s%c",
+	           (bDebugFs ? "" : "  "),
+	           (unsigned long long)(psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
+	           pszAnnotationText,
+	           (bDebugFs ? "" : (char *)szProc),
+	           (unsigned long long)psRISubEntry->ui64Size,
+	           (psRISubEntry->bIsImport ? (char *)&szImport : ""),
+	           (bDebugFs ? '\n' : ' '));
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	_DumpList
+ @Description
+            Dumps out RI List entries according to parameters passed.
+
+ @input     hPMR - If not NULL, function will output the RI entries for
+                   the specified PMR only
+ @input     pid - If non-zero, the function will only output MEMDESC RI
+  	  	  	  	  entries made by the process with ID pid.
+                  If zero, all MEMDESC RI entries will be output.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpList(PMR *hPMR, IMG_PID pid)
+{
+	RI_LIST_ENTRY *psRIEntry = IMG_NULL;
+	RI_SUBLIST_ENTRY *psRISubEntry = IMG_NULL;
+	IMG_UINT16 ui16SubEntriesParsed = 0;
+	IMG_UINTPTR_T hashData = 0;
+	IMG_PID		  hashKey;
+	PMR			*pPMRHashKey = hPMR;
+	IMG_BOOL 	bDisplayedThisPMR = IMG_FALSE;
+
+
+	if (!hPMR)
+	{
+		/* NULL handle provided */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	if (g_pRIHashTable && g_pProcHashTable)
+	{
+		if (pid != 0)
+		{
+			/* look-up pid in Hash Table */
+			hashKey = pid;
+			hashData = HASH_Retrieve_Extended (g_pProcHashTable, (IMG_VOID *)&hashKey);
+			if (hashData)
+			{
+				psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+				if (psRISubEntry)
+				{
+					psRIEntry = psRISubEntry->psRI;
+				}
+			}
+		}
+		else
+		{
+			/* look-up hPMR in Hash Table */
+			hashData = HASH_Retrieve_Extended (g_pRIHashTable, (IMG_VOID *)&pPMRHashKey);
+			psRIEntry = (RI_LIST_ENTRY *)hashData;
+		}
+		if (!psRIEntry)
+		{
+			/* No entry found in hash table */
+			return PVRSRV_ERROR_NOT_FOUND;
+		}
+		while (psRIEntry)
+		{
+			bDisplayedThisPMR = IMG_FALSE;
+			/* Output details for RI entry */
+			if (!pid)
+			{
+				_RIOutput (("%s (0x%p) suballocs:%d size:0x%llx",
+				            psRIEntry->ai8TextA,
+				            psRIEntry->hPMR,
+				            (IMG_UINT)psRIEntry->ui16SubListCount,
+				            (unsigned long long)psRIEntry->ui64LogicalSize));
+				bDisplayedThisPMR = IMG_TRUE;
+			}
+			ui16SubEntriesParsed = 0;
+			if(psRIEntry->ui16SubListCount)
+			{
+#if _DUMP_LINKEDLIST_INFO
+				_RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%x}",
+				            (IMG_UINT)psRIEntry->sSubListFirst.psNextNode));
+#endif /* _DUMP_LINKEDLIST_INFO */
+				if (!pid)
+				{
+					psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+					                                RI_SUBLIST_ENTRY, sListNode);
+				}
+				/* Traverse RI sublist and output details for each entry */
+				while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+				{
+					if (!bDisplayedThisPMR)
+					{
+						_RIOutput (("%s (0x%p) suballocs:%d size:0x%llx",
+						            psRIEntry->ai8TextA,
+						            psRIEntry->hPMR,
+						            (IMG_UINT)psRIEntry->ui16SubListCount,
+						            (unsigned long long)psRIEntry->ui64LogicalSize));
+						bDisplayedThisPMR = IMG_TRUE;
+					}
+#if _DUMP_LINKEDLIST_INFO
+					_RIOutput (("RI LIST:    [this subentry:0x%x]",(IMG_UINT)psRISubEntry));
+					_RIOutput (("RI LIST:     psRI:0x%x",(IMG_UINT32)psRISubEntry->psRI));
+#endif /* _DUMP_LINKEDLIST_INFO */
+
+					{
+						IMG_CHAR szEntryString[RI_MAX_MEMDESC_RI_ENTRY_LEN];
+
+						_GenerateMEMDESCEntryString(psRISubEntry,
+						                            IMG_FALSE,
+						                            RI_MAX_MEMDESC_RI_ENTRY_LEN,
+						                            (IMG_CHAR *)&szEntryString);
+						szEntryString[RI_MAX_MEMDESC_RI_ENTRY_LEN-1] = '\0';
+						_RIOutput (("%s",(IMG_CHAR *)&szEntryString));
+					}
+
+					if (pid)
+					{
+						if((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == 0) ||
+						   (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+						{
+							psRISubEntry = IMG_NULL;
+						}
+						else
+						{
+							psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+							                                RI_SUBLIST_ENTRY, sProcListNode);
+							if (psRISubEntry)
+							{
+								if (psRIEntry != psRISubEntry->psRI)
+								{
+									/*
+									 * The next MEMDESC in the process linked list is in a different PMR
+									 */
+									psRIEntry = psRISubEntry->psRI;
+									bDisplayedThisPMR = IMG_FALSE;
+								}
+							}
+						}
+					}
+					else
+					{
+						ui16SubEntriesParsed++;
+						psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+						                                RI_SUBLIST_ENTRY, sListNode);
+					}
+				}
+			}
+			if (!pid)
+			{
+				if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount)
+				{
+					/*
+					 * Output error message as sublist does not contain the
+					 * number of entries indicated by sublist count
+					 */
+					_RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries",
+					            ui16SubEntriesParsed,psRIEntry->ui16SubListCount));
+				}
+				else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst)))
+				{
+					/*
+					 * Output error message as sublist is empty but sublist count
+					 * is not zero
+					 */
+					_RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist",
+					            psRIEntry->ui16SubListCount));
+				}
+			}
+			psRIEntry = IMG_NULL;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpAllKM
+
+ @Description
+            Dumps out the contents of all RI List entries (i.e. for all
+            MEMDESC allocations for each PMR).
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpAllKM(IMG_VOID)
+{
+	if (g_pRIHashTable)
+	{
+		return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries);
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	RIDumpProcessKM
+
+ @Description
+            Dumps out the contents of all MEMDESC RI List entries (for every
+            PMR) which have been allocate by the specified process only.
+            At present, output is directed to Kernel log
+            via PVR_DPF.
+
+ @Return	PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32  dummyPMR;
+
+	if (g_pProcHashTable)
+	{
+		/* Acquire RI lock*/
+		_RILock();
+
+		eError = _DumpList((PMR *)&dummyPMR,pid);
+
+	    /* Release RI lock*/
+		_RIUnlock();
+	}
+	return eError;
+}
+
+static PVRSRV_ERROR _DumpAllEntries (IMG_UINTPTR_T k, IMG_UINTPTR_T v)
+{
+	RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	return RIDumpListKM(psRIEntry->hPMR);
+}
+
+static PVRSRV_ERROR _DeleteAllEntries (IMG_UINTPTR_T k, IMG_UINTPTR_T v)
+{
+	RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+	RI_SUBLIST_ENTRY *psRISubEntry;
+	PVRSRV_ERROR eResult = PVRSRV_OK;
+
+	PVR_UNREFERENCED_PARAMETER (k);
+
+	while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0))
+	{
+		psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode);
+		eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry);
+	}
+	if (eResult == PVRSRV_OK)
+	{
+		eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry);
+		/*
+		 * If we've deleted the Hash table, return
+		 * an error to stop the iterator...
+		 */
+		if (!g_pRIHashTable)
+		{
+			eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		}
+	}
+	return eResult;
+}
+
+#endif /* if defined(PVR_RI_DEBUG) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/scp.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/scp.c
new file mode 100644
index 0000000..93f76a3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/scp.c
@@ -0,0 +1,951 @@
+/*************************************************************************/ /*!
+@File           scp.c
+@Title          Software Command Processor
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    The software command processor allows commands queued and
+                deferred until their synchronisation requirements have been meet.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "scp.h"
+#include "lists.h"
+#include "allocmem.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "sync_server.h"
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
+#include <linux/sw_sync.h>
+#else
+#include <../drivers/staging/android/sw_sync.h>
+#endif
+
+static PVRSRV_ERROR AllocReleaseFence(struct sw_sync_timeline *psTimeline, const char *szName, IMG_UINT32 ui32FenceVal, int *piFenceFd)
+{
+	struct sync_fence *psFence = IMG_NULL;
+	struct sync_pt *psPt;
+	int iFd = get_unused_fd();
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (iFd < 0)
+	{
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	psPt = sw_sync_pt_create(psTimeline, ui32FenceVal);
+	if(!psPt)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorPutFd;
+	}
+
+	psFence = sync_fence_create(szName, psPt);
+	if(!psFence)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorFreePoint;
+	}
+
+	sync_fence_install(psFence, iFd);
+
+	*piFenceFd = iFd;
+
+ErrorOut:
+	return eError;
+
+ErrorFreePoint:
+	sync_pt_free(psPt);
+
+ErrorPutFd:
+	put_unused_fd(iFd);
+
+	goto ErrorOut;
+}
+
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+struct _SCP_CONTEXT_
+{
+	IMG_VOID			*pvCCB;	            /*!< Pointer to the command circler buffer*/
+	volatile IMG_UINT32	ui32DepOffset;      /*!< Dependency offset  */
+	volatile IMG_UINT32	ui32ReadOffset;     /*!< Read offset */
+	volatile IMG_UINT32	ui32WriteOffset;    /*!< Write offset */
+	IMG_UINT32			ui32CCBSize;        /*!< CCB size */
+	IMG_UINT32			psSyncRequesterID;	/*!< Sync requester ID, used when taking sync operations */
+	POS_LOCK			hLock;				/*!< Lock for this structure */
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	IMG_VOID            *pvTimeline;
+	IMG_UINT32          ui32TimelineVal;
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+};
+
+typedef struct _SCP_SYNC_DATA_
+{
+	SERVER_SYNC_PRIMITIVE	*psSync;		/*!< Server sync */
+	IMG_UINT32				ui32Fence;		/*!< Fence value to check for */
+	IMG_UINT32				ui32Update;		/*!< Fence update value */
+	IMG_UINT32				ui32Flags;		/*!< Flags for this sync data */
+#define SCP_SYNC_DATA_FENCE		(1<<0)		/*!< This sync has a fence */
+#define SCP_SYNC_DATA_UPDATE	(1<<1)		/*!< This sync has an update */
+} SCP_SYNC_DATA;
+
+
+#define SCP_COMMAND_INVALID     0   /*!< Invalid command */
+#define SCP_COMMAND_CALLBACK    1   /*!< Command with callbacks */
+#define SCP_COMMAND_PADDING     2   /*!< Padding */
+typedef struct _SCP_COMMAND_
+{
+	IMG_UINT32				ui32CmdType;        /*!< Command type */
+	IMG_UINT32				ui32CmdSize;		/*!< Total size of the command (i.e. includes header) */
+	IMG_UINT32				ui32SyncCount;      /*!< Total number of syncs in pasSync */
+	SCP_SYNC_DATA			*pasSCPSyncData;    /*!< Pointer to the array of sync data (allocated in the CCB) */
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	struct sync_fence       *psAcquireFence;
+	struct sync_fence       *psReleaseFence;
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+	SCPReady				pfnReady;           /*!< Pointer to the funtion to check if the command is ready */
+	SCPDo					pfnDo;           	/*!< Pointer to the funtion to call when the command is ready to go */
+	IMG_PVOID				pvReadyData;        /*!< Data to pass into pfnReady */
+	IMG_PVOID				pvCompleteData;     /*!< Data to pass into pfnComplete */
+} SCP_COMMAND;
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+	((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+	(Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#define PADDING_COMMAND_SIZE	(sizeof(SCP_COMMAND))
+
+#if defined(SCP_DEBUG)
+#define SCP_DEBUG_PRINT(fmt, ...) \
+	PVRSRVDebugPrintf(PVR_DBG_WARNING, \
+					  __FILE__, __LINE__, \
+					  fmt, \
+					  __VA_ARGS__)
+#else
+#define SCP_DEBUG_PRINT(fmt, ...)
+#endif
+
+/*****************************************************************************
+ *                          Internal functions                               *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       __SCPAlloc
+
+@Description    Allocate space in the software command processor.
+
+@Input          psContext            Context to allocate from
+
+@Input          ui32Size                Size to allocate
+
+@Output         ppvBufferSpace          Pointer to space allocated
+
+@Return         PVRSRV_OK if the allocation was successful
+*/
+/*****************************************************************************/
+static
+PVRSRV_ERROR __SCPAlloc(SCP_CONTEXT *psContext,
+						IMG_UINT32 ui32Size,
+						IMG_PVOID *ppvBufferSpace)
+{
+	IMG_UINT32 ui32FreeSpace;
+
+	ui32FreeSpace = GET_CCB_SPACE(psContext->ui32WriteOffset,
+								  psContext->ui32ReadOffset,
+								  psContext->ui32CCBSize);
+	if (ui32FreeSpace >= ui32Size)
+	{
+		*ppvBufferSpace = (IMG_PVOID)((IMG_UINT8 *)psContext->pvCCB +
+		                  psContext->ui32WriteOffset);
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_RETRY;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _SCPAlloc
+
+@Description    Allocate space in the software command processor, handling the
+                case where we wrap around the CCB.
+
+@Input          psContext            Context to allocate from
+
+@Input          ui32Size                Size to allocate
+
+@Output         ppvBufferSpace          Pointer to space allocated
+
+@Return         PVRSRV_OK if the allocation was successful
+*/
+/*****************************************************************************/
+static
+PVRSRV_ERROR _SCPAlloc(SCP_CONTEXT *psContext,
+					   IMG_UINT32 ui32Size,
+					   IMG_PVOID *ppvBufferSpace)
+{
+	if ((ui32Size + PADDING_COMMAND_SIZE) > psContext->ui32CCBSize)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "Command size (%d) too big for CCB\n", ui32Size));
+		return PVRSRV_ERROR_CMD_TOO_BIG;
+	}
+
+	/*
+		Check we don't overflow the end of the buffer and make sure we have
+		enough for the padding command
+	*/
+	if ((psContext->ui32WriteOffset + ui32Size + PADDING_COMMAND_SIZE) > psContext->ui32CCBSize)
+	{
+		SCP_COMMAND *psCommand;
+		IMG_PVOID pvCommand;
+		PVRSRV_ERROR eError;
+		IMG_UINT32 ui32Remain = psContext->ui32CCBSize - psContext->ui32WriteOffset;
+
+		/* We're at the end of the buffer without enough contiguous space */
+		eError = __SCPAlloc(psContext, ui32Remain, &pvCommand);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_ASSERT(eError == PVRSRV_ERROR_RETRY);
+			return eError;
+		}
+		psCommand = pvCommand;
+		psCommand->ui32CmdType = SCP_COMMAND_PADDING;
+		psCommand->ui32CmdSize = ui32Remain;
+
+		UPDATE_CCB_OFFSET(psContext->ui32WriteOffset, ui32Remain, psContext->ui32CCBSize);
+	}
+
+	return __SCPAlloc(psContext, ui32Size, ppvBufferSpace);
+}
+
+/*************************************************************************/ /*!
+@Function       _SCPInsert
+
+@Description    Insert the a finished command that was written into the CCB
+                space allocated in a previous call to _SCPAlloc.
+                This makes the command ready to be processed.
+
+@Input          psContext               Context to allocate from
+
+@Input          ui32Size                Size to allocate
+
+@Return         None
+*/
+/*****************************************************************************/
+static
+IMG_VOID _SCPInsert(SCP_CONTEXT *psContext,
+					IMG_UINT32 ui32Size)
+{
+	/*
+	 * Update the write offset.
+	 */
+	UPDATE_CCB_OFFSET(psContext->ui32WriteOffset,
+					  ui32Size,
+					  psContext->ui32CCBSize);
+}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+
+static void _SCPDumpFence(const char *psczName, struct sync_fence *psFence)
+{
+	struct list_head *psEntry;
+	char szTime[16]  = { '\0' };
+	char szVal1[64]  = { '\0' };
+	char szVal2[64]  = { '\0' };
+	char szVal3[132] = { '\0' };
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+
+	PVR_DUMPDEBUG_LOG(("\t  %s: [%p] %s: %s", psczName, psFence, psFence->name,
+			 (psFence->status >  0 ? "signaled" :
+			  psFence->status == 0 ? "active" : "error")));
+	list_for_each(psEntry, &psFence->pt_list_head)
+	{
+		struct sync_pt *psPt = container_of(psEntry, struct sync_pt, pt_list);
+		struct timeval tv = ktime_to_timeval(psPt->timestamp);
+		snprintf(szTime, sizeof(szTime), "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+		if (psPt->parent->ops->pt_value_str &&
+			psPt->parent->ops->timeline_value_str)
+		{
+			psPt->parent->ops->pt_value_str(psPt, szVal1, sizeof(szVal1));
+			psPt->parent->ops->timeline_value_str(psPt->parent, szVal2, sizeof(szVal2));
+			snprintf(szVal3, sizeof(szVal3), ": %s / %s", szVal1, szVal2);
+		}
+		PVR_DUMPDEBUG_LOG(("\t    %s %s%s%s", psPt->parent->name,
+				 (psPt->status >  0 ? "signaled" :
+				  psPt->status == 0 ? "active" : "error"),
+				 (psPt->status >  0 ? szTime : ""),
+				 szVal3));
+	}
+
+}
+
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+/*************************************************************************/ /*!
+@Function       _SCPCommandReady
+
+@Description    Check if a command is ready. Checks to see if the command
+                has had it's fences meet and is ready to go.
+
+@Input          psCommand               Command to check
+
+@Return         PVRSRV_OK if the command is ready
+*/
+/*****************************************************************************/
+static
+PVRSRV_ERROR _SCPCommandReady(SCP_COMMAND *psCommand)
+{
+	IMG_UINT32 i;
+	
+	PVR_ASSERT(psCommand->ui32CmdType != SCP_COMMAND_INVALID);
+
+	if (psCommand->ui32CmdType == SCP_COMMAND_PADDING)
+	{
+		return PVRSRV_OK;
+	}
+
+	for (i = 0; i < psCommand->ui32SyncCount; i++)
+	{
+		SCP_SYNC_DATA *psSCPSyncData = &psCommand->pasSCPSyncData[i];
+
+		/*
+			If the same sync is used in concurrent command we can skip the check
+		*/
+		if (psSCPSyncData->ui32Flags & SCP_SYNC_DATA_FENCE)
+		{
+			if (!ServerSyncFenceIsMet(psSCPSyncData->psSync, psSCPSyncData->ui32Fence))
+			{
+				return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+			}
+		}
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	/* Check for the provided acquire fence */
+	if (psCommand->psAcquireFence != IMG_NULL)
+	{
+		int err = sync_fence_wait(psCommand->psAcquireFence, 0);
+		/* -ETIME means active. In this case we will retry later again. If the
+		 * return value is an error or zero we will close this fence and
+		 * proceed. This makes sure that we are not getting stuck here when a
+		 * fence changes into an error state for whatever reason. */
+		if (err == -ETIME)
+		{
+			return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+		}
+		else
+		{
+			if (err)
+			{
+				PVR_LOG(("SCP: Fence wait failed with %d", err));
+				_SCPDumpFence("Acquire Fence", psCommand->psAcquireFence);
+			}
+			/* Put the fence. */
+			sync_fence_put(psCommand->psAcquireFence);
+			psCommand->psAcquireFence = IMG_NULL;
+		}
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+	/* Command is ready */
+	if (psCommand->pfnReady(psCommand->pvReadyData))
+	{
+		return PVRSRV_OK;
+	}
+
+	/*
+		If we got here it means the command is ready to go, but the SCP client
+		isn't ready for the command
+	*/
+	return PVRSRV_ERROR_NOT_READY;
+}
+
+/*************************************************************************/ /*!
+@Function       _SCPCommandDo
+
+@Description    Run a command
+
+@Input          psCommand               Command to run
+
+@Return         PVRSRV_OK if the command is ready
+*/
+/*****************************************************************************/
+static
+IMG_VOID _SCPCommandDo(SCP_COMMAND *psCommand)
+{
+	if (psCommand->ui32CmdType == SCP_COMMAND_CALLBACK)
+	{
+		psCommand->pfnDo(psCommand->pvReadyData, psCommand->pvCompleteData);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       _SCPDumpCommand
+
+@Description    Dump a SCP command
+
+@Input          psCommand               Command to dump
+
+@Return         None
+*/
+/*****************************************************************************/
+static IMG_VOID _SCPDumpCommand(SCP_COMMAND *psCommand)
+{
+	IMG_UINT32 i;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+
+	PVR_DUMPDEBUG_LOG(("\tCommand type = %d (@%p)", psCommand->ui32CmdType, psCommand));
+
+	if (psCommand->ui32CmdType == SCP_COMMAND_CALLBACK)
+	{
+		for (i = 0; i < psCommand->ui32SyncCount; i++)
+		{
+			SCP_SYNC_DATA *psSCPSyncData = &psCommand->pasSCPSyncData[i];
+
+			PVR_ASSERT(psCommand->pasSCPSyncData != IMG_NULL);
+			PVR_ASSERT(psSCPSyncData != IMG_NULL);
+
+			/*
+				Only dump this sync if there is a fence operation on it
+			*/
+			if (psSCPSyncData->ui32Flags & SCP_SYNC_DATA_FENCE)
+			{
+				PVR_ASSERT(psSCPSyncData->psSync != IMG_NULL);
+				PVR_DUMPDEBUG_LOG(("\t\tFenced on 0x%08x = 0x%08x (?= 0x%08x)",
+						ServerSyncGetFWAddr(psSCPSyncData->psSync),
+						psSCPSyncData->ui32Fence,
+						ServerSyncGetValue(psSCPSyncData->psSync)));
+			}
+		}
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+		if (psCommand->psAcquireFence)
+		{
+			_SCPDumpFence("Acquire Fence", psCommand->psAcquireFence);
+		}
+		if (psCommand->psReleaseFence)
+		{
+			_SCPDumpFence("Release Fence", psCommand->psReleaseFence);
+		}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+	}
+}
+
+/*****************************************************************************
+ *                    Public interface functions                             *
+ *****************************************************************************/
+
+/*
+	SCPCreate
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV SCPCreate(IMG_UINT32 ui32CCBSizeLog2,
+									SCP_CONTEXT **ppsContext)
+{
+	SCP_CONTEXT	*psContext;
+	IMG_UINT32 ui32Power2QueueSize = 1 << ui32CCBSizeLog2;
+	PVRSRV_ERROR eError;
+
+	/* allocate an internal queue info structure */
+	psContext = OSAllocMem(sizeof(SCP_CONTEXT));
+	if (psContext == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"SCPCreate: Failed to alloc queue struct"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorExit;
+	}
+	OSMemSet(psContext, 0, sizeof(SCP_CONTEXT));
+
+	/* allocate the command queue buffer - allow for overrun */
+	psContext->pvCCB = OSAllocMem(ui32Power2QueueSize);
+	if (psContext->pvCCB == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"SCPCreate: Failed to alloc queue buffer"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorExit;
+	}
+
+	/* Sanity check: Should be zeroed by OSMemSet */
+	PVR_ASSERT(psContext->ui32ReadOffset == 0);
+	PVR_ASSERT(psContext->ui32WriteOffset == 0);
+
+	psContext->ui32CCBSize = ui32Power2QueueSize;
+
+	eError = OSLockCreate(&psContext->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorExit;
+	}
+
+	eError = PVRSRVServerSyncRequesterRegisterKM(&psContext->psSyncRequesterID);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorExit;
+	}	
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	psContext->pvTimeline = sw_sync_timeline_create("pvr_scp");
+	if(psContext->pvTimeline == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"SCPCreate: sw_sync_timeline_create() failed"));
+		goto ErrorExit;
+	}
+	psContext->ui32TimelineVal = 0;
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+	SCP_DEBUG_PRINT("%s: New SCP %p of size %d", 
+			__FUNCTION__, psContext, ui32Power2QueueSize);
+
+	*ppsContext = psContext;
+
+	return PVRSRV_OK;
+
+ErrorExit:
+	if(psContext)
+	{
+		if(psContext->pvCCB)
+		{
+			OSFreeMem(psContext->pvCCB);
+			psContext->pvCCB = IMG_NULL;
+		}
+
+		OSFreeMem(psContext);
+	}
+
+	return eError;
+}
+
+/*
+	SCPAllocCommand
+*/
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV SCPAllocCommand(SCP_CONTEXT *psContext,
+										  IMG_UINT32 ui32SyncPrimCount,
+										  SERVER_SYNC_PRIMITIVE **papsSync,
+										  IMG_BOOL *pabUpdate,
+										  IMG_INT32 i32AcquireFenceFd,
+										  SCPReady pfnCommandReady,
+										  SCPDo pfnCommandDo,
+										  IMG_SIZE_T ui32ReadyDataByteSize,
+										  IMG_SIZE_T ui32CompleteDataByteSize,
+										  IMG_PVOID *ppvReadyData,
+										  IMG_PVOID *ppvCompleteData,
+										  IMG_INT32 *pi32ReleaseFenceFd)
+{
+	PVRSRV_ERROR eError;
+	SCP_COMMAND *psCommand;
+	IMG_UINT32 ui32CommandSize;
+	IMG_UINT32 ui32SyncOpSize;
+	IMG_UINT32 i;
+
+	/* Round up the incoming data sizes to be pointer granular */
+	ui32ReadyDataByteSize = (ui32ReadyDataByteSize & (~(sizeof(IMG_PVOID)-1))) + sizeof(IMG_PVOID);
+	ui32CompleteDataByteSize = (ui32CompleteDataByteSize & (~(sizeof(IMG_PVOID)-1))) + sizeof(IMG_PVOID);
+
+	ui32SyncOpSize = (sizeof(PVRSRV_CLIENT_SYNC_PRIM_OP) * ui32SyncPrimCount);
+
+	/* Total command size */
+	ui32CommandSize = sizeof(SCP_COMMAND) +
+					  ui32SyncOpSize +
+					  ui32ReadyDataByteSize +
+					  ui32CompleteDataByteSize;
+
+	eError = _SCPAlloc(psContext, ui32CommandSize, (IMG_VOID **) &psCommand);
+	if(eError != PVRSRV_OK)
+	{
+		SCP_DEBUG_PRINT("%s: Failed to allocate command of size %d for ctx %p (%d)", __FUNCTION__, ui32CommandSize, psContext, eError);
+		return eError;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	if (pi32ReleaseFenceFd)
+	{
+		/* Create a release sync for the caller. */
+		eError = AllocReleaseFence(psContext->pvTimeline, "pvr_scp_retire",
+								   ++psContext->ui32TimelineVal,
+								   pi32ReleaseFenceFd);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+#else /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+	PVR_UNREFERENCED_PARAMETER(i32AcquireFenceFd);
+	PVR_UNREFERENCED_PARAMETER(pi32ReleaseFenceFd);
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+	SCP_DEBUG_PRINT("%s: New Command %p for ctx %p of size %d, syncCount: %d", 
+			__FUNCTION__, psCommand, psContext, ui32CommandSize, ui32SyncPrimCount);
+
+	/* setup the command */
+	psCommand->ui32CmdSize = ui32CommandSize;
+	psCommand->ui32CmdType = SCP_COMMAND_CALLBACK;
+	psCommand->ui32SyncCount = ui32SyncPrimCount;
+
+	/* Set up command pointers */
+	psCommand->pasSCPSyncData = (SCP_SYNC_DATA *) (((IMG_CHAR *) psCommand) + sizeof(SCP_COMMAND));
+
+	psCommand->pfnReady = pfnCommandReady;
+	psCommand->pfnDo = pfnCommandDo;
+
+	psCommand->pvReadyData = ((IMG_CHAR *) psCommand) +
+							 sizeof(SCP_COMMAND) + ui32SyncOpSize;
+	psCommand->pvCompleteData = ((IMG_CHAR *) psCommand) +
+								sizeof(SCP_COMMAND) + ui32SyncOpSize +
+								ui32ReadyDataByteSize;
+
+	/* Copy over the sync data */
+	for (i=0;i<ui32SyncPrimCount;i++)
+	{
+		SCP_SYNC_DATA *psSCPSyncData = &psCommand->pasSCPSyncData[i];
+		IMG_BOOL bFenceRequired;
+
+		psSCPSyncData->psSync = papsSync[i];
+
+		PVRSRVServerSyncQueueSWOpKM(papsSync[i],
+								  &psSCPSyncData->ui32Fence,
+								  &psSCPSyncData->ui32Update,
+								  psContext->psSyncRequesterID,
+								  pabUpdate[i],
+								  &bFenceRequired);
+		if (bFenceRequired)
+		{
+			psSCPSyncData->ui32Flags = SCP_SYNC_DATA_FENCE;
+		}
+		else
+		{
+			psSCPSyncData->ui32Flags = 0;
+		}
+
+		/* Only update if requested */
+		if (pabUpdate[i])
+		{
+			psSCPSyncData->ui32Flags |= SCP_SYNC_DATA_UPDATE;
+		}
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	/* Copy over the fences */
+	if (i32AcquireFenceFd >= 0)
+	{
+		psCommand->psAcquireFence = sync_fence_fdget(i32AcquireFenceFd);
+	}
+	else
+	{
+		psCommand->psAcquireFence = IMG_NULL;
+	}
+
+	if (pi32ReleaseFenceFd)
+	{
+		psCommand->psReleaseFence = sync_fence_fdget(*pi32ReleaseFenceFd);
+	}
+	else
+	{
+		psCommand->psReleaseFence = IMG_NULL;
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+	*ppvReadyData = psCommand->pvReadyData;
+	*ppvCompleteData = psCommand->pvCompleteData;
+
+	return PVRSRV_OK;
+}
+
+/*
+	SCPSubmitCommand
+*/
+IMG_EXPORT 
+PVRSRV_ERROR SCPSubmitCommand(SCP_CONTEXT *psContext)
+{
+	SCP_COMMAND *psCommand;
+
+	if (psContext == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psCommand = (SCP_COMMAND *) ((IMG_UINT8 *) psContext->pvCCB
+				+ psContext->ui32WriteOffset);
+
+	SCP_DEBUG_PRINT("%s: Submit command %p for ctx %p", 
+			__FUNCTION__, psCommand, psContext);
+
+	_SCPInsert(psContext, psCommand->ui32CmdSize);
+
+	return PVRSRV_OK;
+}
+
+/*
+	SCPRun
+*/
+IMG_EXPORT
+PVRSRV_ERROR SCPRun(SCP_CONTEXT *psContext)
+{
+	SCP_COMMAND *psCommand;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+
+	if (psContext == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	OSLockAcquire(psContext->hLock);
+	while (psContext->ui32DepOffset != psContext->ui32WriteOffset)
+	{
+		psCommand = (SCP_COMMAND *)((IMG_UINT8 *)psContext->pvCCB +
+		            psContext->ui32DepOffset);
+
+		/* See if the command is ready to go */
+		eError = _SCPCommandReady(psCommand);
+
+		SCP_DEBUG_PRINT("%s: Processes command %p for ctx %p (%d)", 
+				__FUNCTION__, psCommand, psContext, eError);
+
+		if (eError == PVRSRV_OK)
+		{
+			/* processed cmd so update queue */
+			UPDATE_CCB_OFFSET(psContext->ui32DepOffset,
+							  psCommand->ui32CmdSize,
+							  psContext->ui32CCBSize);
+		}
+		else
+		{
+			/* As soon as we hit a command that can't run break out */
+			break;
+		}
+
+		/* Run the command */
+		_SCPCommandDo(psCommand);
+	}
+	OSLockRelease(psContext->hLock);
+
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR SCPFlush(SCP_CONTEXT *psContext)
+{
+	if (psContext->ui32ReadOffset != psContext->ui32WriteOffset)
+	{
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*
+	SCPCommandComplete
+*/
+IMG_EXPORT
+IMG_VOID SCPCommandComplete(SCP_CONTEXT *psContext)
+{
+	SCP_COMMAND *psCommand;
+	IMG_UINT32 i;
+	IMG_BOOL bContinue = IMG_TRUE;
+
+	if (psContext == IMG_NULL)
+	{
+		return;
+	}
+
+	if (psContext->ui32ReadOffset == psContext->ui32DepOffset)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SCPCommandComplete: Called with no work to do!"));
+		return;
+	}	
+
+	while(bContinue)
+	{
+		psCommand = (SCP_COMMAND *) ((IMG_UINT8 *) psContext->pvCCB + 
+					psContext->ui32ReadOffset);
+
+		if (psCommand->ui32CmdType == SCP_COMMAND_CALLBACK)
+		{
+			/* Do any fence updates */
+			for (i=0;i<psCommand->ui32SyncCount;i++)
+			{
+				SCP_SYNC_DATA *psSCPSyncData = &psCommand->pasSCPSyncData[i];
+				IMG_BOOL bUpdate = (psSCPSyncData->ui32Flags & SCP_SYNC_DATA_UPDATE);
+	
+				ServerSyncCompleteOp(psSCPSyncData->psSync, bUpdate, psSCPSyncData->ui32Update);
+
+				if (bUpdate)
+				{
+					psSCPSyncData->ui32Flags = 0; /* Stop future interaction with this sync prim. */
+					psSCPSyncData->psSync = NULL; /* Clear psSync as it is no longer referenced. */
+				}
+			}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+			if (psCommand->psReleaseFence)
+			{
+				sw_sync_timeline_inc(psContext->pvTimeline, 1);
+				/* Decrease the ref to this fence */
+				sync_fence_put(psCommand->psReleaseFence);
+				psCommand->psReleaseFence = IMG_NULL;
+			}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+			bContinue = IMG_FALSE;
+		}
+
+		/* processed cmd so update queue */
+		UPDATE_CCB_OFFSET(psContext->ui32ReadOffset,
+						  psCommand->ui32CmdSize,
+						  psContext->ui32CCBSize);
+
+		SCP_DEBUG_PRINT("%s: Complete command %p for ctx %p (continue: %d)", 
+				__FUNCTION__, psCommand, psContext, bContinue);
+
+	}
+}
+
+IMG_EXPORT
+IMG_BOOL SCPHasPendingCommand(SCP_CONTEXT *psContext)
+{
+	return psContext->ui32DepOffset != psContext->ui32WriteOffset;
+}
+
+IMG_EXPORT
+IMG_VOID IMG_CALLCONV SCPDumpStatus(SCP_CONTEXT *psContext)
+{
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+
+	PVR_ASSERT(psContext != IMG_NULL);
+
+	/*
+		Acquire the lock to ensure that the SCP isn't run while
+		while we're dumping info
+	*/
+	OSLockAcquire(psContext->hLock);
+
+	PVR_DUMPDEBUG_LOG(("Pending command:"));
+	if (psContext->ui32DepOffset == psContext->ui32WriteOffset)
+	{
+		PVR_DUMPDEBUG_LOG(("\tNone"));
+	}
+	else
+	{
+		SCP_COMMAND *psCommand;
+		IMG_UINT32 ui32DepOffset = psContext->ui32DepOffset;
+
+		while (ui32DepOffset != psContext->ui32WriteOffset)
+		{
+		        /* Dump the command we're pending on */
+		        psCommand = (SCP_COMMAND *)((IMG_UINT8 *)psContext->pvCCB +
+		                ui32DepOffset);
+
+		        _SCPDumpCommand(psCommand);
+
+		        /* processed cmd so update queue */
+		        UPDATE_CCB_OFFSET(ui32DepOffset,
+		                                          psCommand->ui32CmdSize,
+		                                          psContext->ui32CCBSize);
+
+		}
+	}
+
+	PVR_DUMPDEBUG_LOG(("Active command(s):"));
+	if (psContext->ui32DepOffset == psContext->ui32ReadOffset)
+	{
+		PVR_DUMPDEBUG_LOG(("\tNone"));
+	}
+	else
+	{
+		SCP_COMMAND *psCommand;
+		IMG_UINT32 ui32ReadOffset = psContext->ui32ReadOffset;
+
+		while (ui32ReadOffset != psContext->ui32DepOffset)
+		{
+			psCommand = (SCP_COMMAND *)((IMG_UINT8 *)psContext->pvCCB +
+			            ui32ReadOffset);
+
+			_SCPDumpCommand(psCommand);
+
+			/* processed cmd so update queue */
+			UPDATE_CCB_OFFSET(ui32ReadOffset,
+							  psCommand->ui32CmdSize,
+							  psContext->ui32CCBSize);
+		}
+	}
+
+	OSLockRelease(psContext->hLock);
+}
+
+
+/*
+	SCPDestroy
+*/
+IMG_EXPORT
+IMG_VOID IMG_CALLCONV SCPDestroy(SCP_CONTEXT *psContext)
+{
+	/*
+		The caller must ensure that they completed all queued operations
+		before calling this function
+	*/
+	
+	PVR_ASSERT(psContext->ui32ReadOffset == psContext->ui32WriteOffset);
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	sync_timeline_destroy(psContext->pvTimeline);
+#endif
+
+	PVRSRVServerSyncRequesterUnregisterKM(psContext->psSyncRequesterID);
+	OSLockDestroy(psContext->hLock);
+	psContext->hLock = IMG_NULL;
+	OSFreeMem(psContext->pvCCB);
+	psContext->pvCCB = IMG_NULL;
+	OSFreeMem(psContext);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/srvcore.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/srvcore.c
new file mode 100644
index 0000000..dba4766
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/srvcore.c
@@ -0,0 +1,783 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Common Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements core PVRSRV API, server side
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "device.h"
+
+#include "pdump_km.h"
+
+#include "srvkm.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "srvcore.h"
+#include "pvrsrv.h"
+#include "power.h"
+#include "lists.h"
+
+#include "rgx_options_km.h"
+#include "pvrversion.h"
+#include "lock.h"
+#include "osfunc.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#include "services.h"
+#endif
+
+/* For the purpose of maintainability, it is intended that this file should not
+ * contain any OS specific #ifdefs. Please find a way to add e.g.
+ * an osfunc.c abstraction or override the entire function in question within
+ * env,*,pvr_bridge_k.c
+ */
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT] =
+{
+		[PVRSRV_BRIDGE_SRVCORE] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_SYNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_SYNCEXPORT] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_SYNCSEXPORT] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_PDUMPCTRL] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_MM] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_MMPLAT] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_CMM] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_PDUMPMM] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_PDUMP] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_DMABUF] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_DC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_CACHEGENERIC] = PVRSRV_BRIDGE_CACHEGENERIC_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_SMM] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_PVRTL] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RI] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_VALIDATION] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_TUTILS] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_DEVICEMEMHISTORY] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST,
+#if defined(SUPPORT_RGX)
+		/* Need a gap here to start next entry at element 150 */
+		[PVRSRV_BRIDGE_RGXTQ] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RGXCMP] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RGXINIT] = PVRSRV_BRIDGE_RGXINIT_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RGXTA3D] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_BREAKPOINT] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_DEBUGMISC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RGXPDUMP] = PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RGXHWPERF] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_RGXRAY] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_REGCONFIG] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST,
+		[PVRSRV_BRIDGE_TIMERQUERY] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST,
+#endif
+};
+
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					IMG_VOID *pvDest,
+					IMG_VOID *pvSrc,
+					IMG_UINT32 ui32Size)
+{
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
+	g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+	return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  IMG_VOID *pvDest,
+				  IMG_VOID *pvSrc,
+				  IMG_UINT32 ui32Size)
+{
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
+	g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+	return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#else
+INLINE PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					IMG_VOID *pvDest,
+					IMG_VOID *pvSrc,
+					IMG_UINT32 ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+	return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+INLINE PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  IMG_VOID *pvDest,
+				  IMG_VOID *pvSrc,
+				  IMG_UINT32 ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+	return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+				IMG_UINT32 ui32Flags,
+				IMG_UINT32 ui32ClientBuildOptions,
+				IMG_UINT32 ui32ClientDDKVersion,
+				IMG_UINT32 ui32ClientDDKBuild,
+				IMG_UINT8  *pui8KernelArch,
+				IMG_UINT32 *ui32Log2PageSize)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_UINT32			ui32BuildOptions, ui32BuildOptionsMismatch;
+	IMG_UINT32			ui32DDKVersion, ui32DDKBuild;
+	
+	*ui32Log2PageSize = GET_LOG2_PAGESIZE();
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32	ui32OSid = 0, ui32OSidReg = 0;
+
+	IMG_PID pIDCurrent = OSGetCurrentProcessID();
+
+	ui32OSid    = (ui32Flags & (OSID_BITS_FLAGS_MASK<<(OSID_BITS_FLAGS_OFFSET  ))) >> (OSID_BITS_FLAGS_OFFSET);
+	ui32OSidReg = (ui32Flags & (OSID_BITS_FLAGS_MASK<<(OSID_BITS_FLAGS_OFFSET+3))) >> (OSID_BITS_FLAGS_OFFSET+3);
+
+	InsertPidOSidsCoupling(pIDCurrent, ui32OSid, ui32OSidReg);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"[GPU Virtualization Validation]: OSIDs: %d, %d\n",ui32OSid, ui32OSidReg));
+}
+#endif
+
+	PVR_UNREFERENCED_PARAMETER(ui32Flags);
+	if(ui32Flags & SRV_FLAGS_INIT_PROCESS)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: Connecting as init process", __func__));
+		if ((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Rejecting init process", __func__));
+			eError = PVRSRV_ERROR_SRV_CONNECT_FAILED;
+			goto chk_exit;
+		}
+#if defined (__linux__)
+		PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
+#endif
+	}
+	else
+	{
+		if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
+		{
+			if (!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed.  Driver unusable.",
+					__FUNCTION__));
+				eError = PVRSRV_ERROR_INIT_FAILURE;
+				goto chk_exit;
+			}
+		}
+		else
+		{
+			if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
+						 __FUNCTION__));
+				eError = PVRSRV_ERROR_RETRY;
+				goto chk_exit;
+			}
+			else
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
+						 __FUNCTION__));
+				eError = PVRSRV_ERROR_RETRY;
+				goto chk_exit;
+			}
+		}
+	}
+	ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM;
+	/*
+	 * Validate the build options
+	 */
+	ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+	if (ui32BuildOptions != ui32ClientBuildOptions)
+	{
+		ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+		if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+				"extra options present in client-side driver: (0x%x). Please check rgx_options.h",
+				__FUNCTION__,
+				ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+		}
+
+		if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+				"extra options present in KM driver: (0x%x). Please check rgx_options.h",
+				__FUNCTION__,
+				ui32BuildOptions & ui32BuildOptionsMismatch ));
+		}
+		eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __FUNCTION__));
+	}
+
+	/*
+	 * Validate DDK version
+	 */
+	ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+	if (ui32ClientDDKVersion != ui32DDKVersion)
+	{
+		PVR_LOG(("(FAIL) %s: Incompatible driver DDK revision (%u.%u) / client DDK revision (%u.%u).",
+				__FUNCTION__,
+				PVRVERSION_MAJ, PVRVERSION_MIN,
+				PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion),
+				PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion)));
+		eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+		PVR_DBG_BREAK;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%u.%u) and client DDK revision (%u.%u) match. [ OK ]",
+				__FUNCTION__,
+				PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN));
+	}
+	
+	/*
+	 * Validate DDK build
+	 */
+	ui32DDKBuild = PVRVERSION_BUILD;
+	if (ui32ClientDDKBuild != ui32DDKBuild)
+	{
+		PVR_LOG(("(FAIL) %s: Incompatible driver DDK build (%d) / client DDK build (%d).",
+				__FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+		eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+		PVR_DBG_BREAK;
+		goto chk_exit;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK build (%d) and client DDK build (%d) match. [ OK ]",
+				__FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+	}
+
+	/* Success so far so is it the PDump client that is connecting? */
+	if (ui32Flags & SRV_FLAGS_PDUMPCTRL)
+	{
+		PDumpConnectionNotify();
+	}
+
+	PVR_ASSERT(pui8KernelArch != NULL);
+	/* Can't use __SIZEOF_POINTER__ here as it is not defined on Windows */
+	if (sizeof(IMG_PVOID) == 8)
+	{
+		*pui8KernelArch = 64;
+	}
+	else
+	{
+		*pui8KernelArch = 32;
+	}
+
+	if (ui32Flags & SRV_FLAGS_INIT_PROCESS)
+	{
+		psConnection->bInitProcess = IMG_TRUE;
+	}
+
+#if defined(DEBUG_BRIDGE_KM)
+	{
+		int ii;
+
+		/* dump dispatch table offset lookup table */
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __FUNCTION__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1));
+		for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii]));
+		}
+	}
+#endif
+
+chk_exit:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(IMG_VOID)
+{
+	/* just return OK, per-process data is cleaned up by resmgr */
+
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVDumpDebugInfoKM
+*/
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(IMG_UINT32 ui32VerbLevel)
+{
+	if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	PVR_LOG(("User requested PVR debug info"));
+
+	PVRSRVDebugRequest(ui32VerbLevel, IMG_NULL);
+									   
+	return PVRSRV_OK;
+}
+
+/*
+	PVRSRVGetDevClockSpeedKM
+*/
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+						 IMG_PUINT32  pui32RGXClockSpeed)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != IMG_NULL);
+
+	eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDevClockSpeedKM: "
+				"Could not get device clock speed (%d)!",
+				eError));
+	}
+
+	return eError;
+}
+
+
+/*
+	PVRSRVHWOpTimeoutKM
+*/
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(IMG_VOID)
+{
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+	PVR_LOG(("User requested OS reset"));
+	OSPanic();
+#endif
+	PVR_LOG(("HW operation timeout, dump server info"));
+	PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_LOW,IMG_NULL);
+	return PVRSRV_OK;
+}
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+		IMG_VOID *psBridgeIn,
+		IMG_VOID *psBridgeOut,
+		CONNECTION_DATA *psConnection)
+{
+#if !defined(DEBUG)
+	PVR_UNREFERENCED_PARAMETER(ui32DispatchTableEntry);
+#endif
+	PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+	PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(DEBUG_BRIDGE_KM)
+	PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to "
+			 "Dummy Wrapper (probably not what you want!)",
+			 __FUNCTION__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+#else
+	PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to "
+			 "Dummy Wrapper (probably not what you want!)",
+			 __FUNCTION__, ui32DispatchTableEntry));
+#endif
+	return -ENOTTY;
+}
+
+
+/*
+	PVRSRVSoftResetKM
+*/
+PVRSRV_ERROR
+PVRSRVSoftResetKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                  IMG_UINT64 ui64ResetValue1,
+                  IMG_UINT64 ui64ResetValue2)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if ((psDeviceNode == IMG_NULL) || (psDeviceNode->pfnSoftReset == IMG_NULL))
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = psDeviceNode->pfnSoftReset(psDeviceNode, ui64ResetValue1, ui64ResetValue2);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRSRVSoftResetKM: "
+				"Failed to soft reset (error %d)",
+				eError));
+	}
+
+	return eError;
+}
+
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does
+ * 		  error checking.
+ *
+ * @param ui32Index
+ * @param pszIOCName
+ * @param pfFunction
+ * @param pszFunctionName
+ *
+ * @return
+ ********************************************************************************/
+IMG_VOID
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   IMG_UINT32 ui32Index,
+					   const IMG_CHAR *pszIOCName,
+					   BridgeWrapperFunction pfFunction,
+					   const IMG_CHAR *pszFunctionName,
+					   POS_LOCK hBridgeLock,
+					   const IMG_CHAR *pszBridgeLockName,
+					   IMG_BYTE* pbyBridgeBuffer,
+					   IMG_UINT32 ui32BridgeInBufferSize,
+					   IMG_UINT32 ui32BridgeOutBufferSize)
+{
+	static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX;		/* -1 */
+#if !defined(DEBUG)
+	PVR_UNREFERENCED_PARAMETER(pszIOCName);
+#endif
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+	PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+	PVR_UNREFERENCED_PARAMETER(pszBridgeLockName);
+#endif
+
+	ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup];
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+	/* Enable this to dump out the dispatch table entries */
+	PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __FUNCTION__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup]));
+	PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName));
+#endif
+
+	/* Any gaps are sub-optimal in-terms of memory usage, but we are mainly
+	 * interested in spotting any large gap of wasted memory that could be
+	 * accidentally introduced.
+	 *
+	 * This will currently flag up any gaps > 5 entries.
+	 *
+	 * NOTE: This shouldn't be debug only since switching from debug->release
+	 * etc is likely to modify the available ioctls and thus be a point where
+	 * mistakes are exposed. This isn't run at at a performance critical time.
+	 */
+	if((ui32PrevIndex != IMG_UINT32_MAX) &&
+	   ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+		(ui32Index <= ui32PrevIndex)))
+	{
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+		PVR_DPF((PVR_DBG_WARNING,
+				 "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+				 __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+				 ui32Index, pszIOCName));
+#else
+		PVR_DPF((PVR_DBG_MESSAGE,
+				 "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+				 __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+	}
+
+	if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range",
+				 __FUNCTION__, (IMG_UINT)ui32Index, pszIOCName));
+
+#if defined(DEBUG_BRIDGE_KM)
+		PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu",
+				 __FUNCTION__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXINIT_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu\n",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST));
+
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_RGX_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST));
+		PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu",
+				 __FUNCTION__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST));
+#endif
+
+		OSPanic();
+	}
+
+	/* Panic if the previous entry has been overwritten as this is not allowed!
+	 * NOTE: This shouldn't be debug only since switching from debug->release
+	 * etc is likely to modify the available ioctls and thus be a point where
+	 * mistakes are exposed. This isn't run at at a performance critical time.
+	 */
+	if(g_BridgeDispatchTable[ui32Index].pfFunction)
+	{
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
+				 __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
+#else
+		PVR_DPF((PVR_DBG_ERROR,
+				 "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%u)",
+				 __FUNCTION__, pszIOCName, ui32Index));
+		PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+#endif
+		OSPanic();
+	}
+
+	g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+	g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock;
+	g_BridgeDispatchTable[ui32Index].pvBridgeBuffer = (IMG_PVOID) pbyBridgeBuffer;
+	g_BridgeDispatchTable[ui32Index].ui32BridgeInBufferSize = ui32BridgeInBufferSize;
+	g_BridgeDispatchTable[ui32Index].ui32BridgeOutBufferSize = ui32BridgeOutBufferSize;
+#if defined(DEBUG_BRIDGE_KM)
+	g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+	g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+	g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName;
+	g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+	g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+#endif
+
+	ui32PrevIndex = ui32Index;
+}
+
+PVRSRV_ERROR
+PVRSRVInitSrvDisconnectKM(CONNECTION_DATA *psConnection,
+							IMG_BOOL bInitSuccesful,
+							IMG_UINT32 ui32ClientBuildOptions)
+{
+	PVRSRV_ERROR eError;
+
+	if(!psConnection->bInitProcess)
+	{
+		return PVRSRV_ERROR_SRV_DISCONNECT_FAILED;
+	}
+
+	psConnection->bInitProcess = IMG_FALSE;
+
+	PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
+	PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
+
+	eError = PVRSRVFinaliseSystem(bInitSuccesful, ui32ClientBuildOptions);
+
+	PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL ,
+				(eError == PVRSRV_OK) && bInitSuccesful);
+
+	return eError;
+}
+
+IMG_INT BridgedDispatchKM(CONNECTION_DATA * psConnection,
+					  PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM)
+{
+
+	IMG_VOID   * psBridgeIn;
+	IMG_VOID   * psBridgeOut;
+	BridgeWrapperFunction pfBridgeHandler;
+	IMG_UINT32   ui32DispatchTableEntry;
+	IMG_INT      err          = -EFAULT;
+	IMG_UINT32	ui32BridgeInBufferSize;
+	IMG_UINT32	ui32BridgeOutBufferSize;
+
+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH)
+	PVR_DBG_BREAK;
+#endif
+
+	ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[psBridgePackageKM->ui32BridgeID] + psBridgePackageKM->ui32FunctionID;
+
+#if defined(DEBUG_BRIDGE_KM)
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry=%d, (bridge module %d, function %d)",
+			__FUNCTION__, 
+			ui32DispatchTableEntry, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+	PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
+			 __FUNCTION__,
+			 g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+	g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CallCount++;
+	g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+
+	if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock)
+	{
+		/* Acquire module specific bridge lock */
+		OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock);
+		
+		/* Use buffers which are allocated for this bridge module */
+		psBridgeIn = g_BridgeDispatchTable[ui32DispatchTableEntry].pvBridgeBuffer;
+		ui32BridgeInBufferSize = g_BridgeDispatchTable[ui32DispatchTableEntry].ui32BridgeInBufferSize;
+		psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + ui32BridgeInBufferSize);
+		ui32BridgeOutBufferSize = g_BridgeDispatchTable[ui32DispatchTableEntry].ui32BridgeOutBufferSize;
+	}
+	else
+	{
+		/* Acquire default global bridge lock if calling module has no independent lock */
+		OSAcquireBridgeLock();
+
+		/* Request for global bridge buffers */
+		OSGetGlobalBridgeBuffers(&psBridgeIn,
+					&ui32BridgeInBufferSize,
+					&psBridgeOut,
+					&ui32BridgeOutBufferSize);
+	}
+	
+	/* check we are not using a bigger bridge than allocated */
+#if defined(DEBUG)
+	PVR_ASSERT(psBridgePackageKM->ui32InBufferSize <= ui32BridgeInBufferSize);
+	PVR_ASSERT(psBridgePackageKM->ui32OutBufferSize <= ui32BridgeOutBufferSize);
+#endif
+
+	if((CopyFromUserWrapper (psConnection,
+					ui32DispatchTableEntry,
+					psBridgeIn,
+					psBridgePackageKM->pvParamIn,
+					psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK)
+#if defined __QNXNTO__
+/* For Neutrino, the output bridge buffer acts as an input as well */
+					|| (CopyFromUserWrapper(psConnection,
+											ui32DispatchTableEntry,
+											psBridgeOut,
+											(IMG_PVOID)((IMG_UINT32)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize),
+											psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+#endif
+		) /* end of if-condition */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: CopyFromUserWrapper returned an error!", __FUNCTION__));
+		goto unlock_and_return_fault;
+	}
+
+	if(ui32DispatchTableEntry > (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is out if range!",
+				 __FUNCTION__, ui32DispatchTableEntry));
+		goto unlock_and_return_fault;
+	}
+	pfBridgeHandler =
+		(BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction;
+	
+	if (pfBridgeHandler == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!",
+				 __FUNCTION__, ui32DispatchTableEntry));
+		goto unlock_and_return_fault;
+	}
+	
+	err = pfBridgeHandler(ui32DispatchTableEntry,
+						  psBridgeIn,
+						  psBridgeOut,
+						  psConnection);
+	if(err < 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: ...done (err=%d)", __FUNCTION__, err));
+		goto unlock_and_return_fault;
+	}
+
+	/* 
+	   This should always be true as a.t.m. all bridge calls have to
+   	   return an error message, but this could change so we do this
+   	   check to be safe.
+   	*/
+	if (psBridgePackageKM->ui32OutBufferSize > 0)
+	{
+		err = -EFAULT;
+		if (CopyToUserWrapper (psConnection,
+						ui32DispatchTableEntry,
+						psBridgePackageKM->pvParamOut,
+						psBridgeOut,
+						psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+		{
+			goto unlock_and_return_fault;
+		}
+	}
+
+	err = 0;
+
+unlock_and_return_fault:
+	if (g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock)
+	{
+		OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntry].hBridgeLock);
+	}
+	else
+	{
+		OSReleaseBridgeLock();
+	}
+	if (err)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __FUNCTION__, err));
+	}
+	return err;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/sync_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/sync_server.c
new file mode 100644
index 0000000..d19aa49
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/sync_server.c
@@ -0,0 +1,1811 @@
+/*************************************************************************/ /*!
+@File           sync_server.c
+@Title          Server side synchronisation functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements the server side functions that for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "sync_server.h"
+#include "sync_server_internal.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pdump.h"
+#include "pvr_debug.h"
+#include "pdump_km.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "pvrsrv.h"
+#include "debug_request_ids.h"
+#include "connection_server.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "ossecure_export.h"
+#endif
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#include "rgxdebug.h"
+#endif
+
+struct _SYNC_PRIMITIVE_BLOCK_
+{
+	PVRSRV_DEVICE_NODE	*psDevNode;
+	DEVMEM_MEMDESC		*psMemDesc;
+	DEVMEM_EXPORTCOOKIE	sExportCookie;
+	IMG_UINT32			*pui32LinAddr;
+	IMG_UINT32			ui32BlockSize;		/*!< Size of the Sync Primitive Block */
+	IMG_UINT32			ui32RefCount;
+	POS_LOCK			hLock;
+	DLLIST_NODE			sConnectionNode;
+	SYNC_CONNECTION_DATA *psSyncConnectionData;	/*!< Link back to the sync connection data if there is one */
+};
+
+struct _SERVER_SYNC_PRIMITIVE_
+{
+	PVRSRV_CLIENT_SYNC_PRIM *psSync;
+	IMG_UINT32				ui32NextOp;
+	IMG_UINT32				ui32RefCount;
+	IMG_UINT32				ui32UID;
+	IMG_UINT32				ui32LastSyncRequesterID;
+	DLLIST_NODE				sNode;
+	/* PDump only data */
+	IMG_BOOL				bSWOperation;
+	IMG_BOOL				bSWOpStartedInCaptRange;
+	IMG_UINT32				ui32LastHWUpdate;
+	IMG_BOOL				bPDumped;
+	POS_LOCK				hLock;
+	IMG_CHAR				szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+
+struct _SERVER_SYNC_EXPORT_
+{
+	SERVER_SYNC_PRIMITIVE *psSync;
+};
+
+struct _SERVER_OP_COOKIE_
+{
+	IMG_BOOL				bActive;
+	/*
+		Client syncblock(s) info.
+		If this changes update the calculation of ui32BlockAllocSize
+	*/
+	IMG_UINT32				ui32SyncBlockCount;
+	SYNC_PRIMITIVE_BLOCK	**papsSyncPrimBlock;
+
+	/*
+		Client sync(s) info.
+		If this changes update the calculation of ui32ClientAllocSize
+	*/
+	IMG_UINT32				ui32ClientSyncCount;
+	IMG_UINT32				*paui32SyncBlockIndex;
+	IMG_UINT32				*paui32Index;
+	IMG_UINT32				*paui32Flags;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				*paui32UpdateValue;
+
+	/*
+		Server sync(s) info
+		If this changes update the calculation of ui32ServerAllocSize
+	*/
+	IMG_UINT32				ui32ServerSyncCount;
+	SERVER_SYNC_PRIMITIVE	**papsServerSync;
+	IMG_UINT32				*paui32ServerFenceValue;
+	IMG_UINT32				*paui32ServerUpdateValue;
+
+};
+
+struct _SYNC_CONNECTION_DATA_
+{
+	DLLIST_NODE	sListHead;
+	IMG_UINT32	ui32RefCount;
+	POS_LOCK	hLock;
+};
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+enum SYNC_RECORD_TYPE
+{
+	SYNC_RECORD_TYPE_UNKNOWN = 0,
+	SYNC_RECORD_TYPE_CLIENT,
+	SYNC_RECORD_TYPE_SERVER,
+};
+
+struct SYNC_RECORD
+{
+	SYNC_PRIMITIVE_BLOCK	*psServerSyncPrimBlock;	/*!< handle to _SYNC_PRIMITIVE_BLOCK_ */
+	IMG_UINT32				ui32SyncOffset; 		/*!< offset to sync in block */
+	IMG_UINT32				ui32FwBlockAddr;
+	IMG_PID					uiPID;
+	IMG_UINT64				ui64OSTime;
+	enum SYNC_RECORD_TYPE	eRecordType;
+	DLLIST_NODE				sNode;
+	IMG_CHAR				szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+
+static POS_LOCK g_hSyncRecordListLock;
+static DLLIST_NODE g_sSyncRecordList;
+static IMG_UINT g_uiFreedSyncRecordIdx = 0;
+static struct SYNC_RECORD * g_apsFreedSyncRecords[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN] = {0};
+static IMG_HANDLE g_hSyncRecordNotify;
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static IMG_UINT32 g_ServerSyncUID = 0;
+
+POS_LOCK g_hListLock;
+static DLLIST_NODE g_sAllServerSyncs;
+IMG_HANDLE g_hNotify;
+
+#define SYNC_REQUESTOR_UNKNOWN 0
+static IMG_UINT32 g_ui32NextSyncRequestorID = 1;
+
+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG)
+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(SYNC_DEBUG)
+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_UPDATES_PRINT(fmt, ...)
+#endif
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(
+			SYNC_RECORD_HANDLE * phRecord,
+			SYNC_PRIMITIVE_BLOCK * hServerSyncPrimBlock,
+			IMG_UINT32 ui32FwBlockAddr,
+			IMG_UINT32 ui32SyncOffset,
+			IMG_BOOL bServerSync,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	struct SYNC_RECORD * psSyncRec;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!phRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	*phRecord = IMG_NULL;
+
+	psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+	if (!psSyncRec)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock;
+	psSyncRec->ui32SyncOffset = ui32SyncOffset;
+	psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+	psSyncRec->ui64OSTime = OSClockns64();
+	psSyncRec->uiPID = OSGetCurrentProcessID();
+	psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT;
+
+	if(pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+		/* Copy over the class name annotation */
+		OSStringNCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+		psSyncRec->szClassName[ui32ClassNameSize] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		psSyncRec->szClassName[0] = 0;
+	}
+
+	OSLockAcquire(g_hSyncRecordListLock);
+	dllist_add_to_head(&g_sSyncRecordList, &psSyncRec->sNode);
+	OSLockRelease(g_hSyncRecordListLock);
+
+	*phRecord = (SYNC_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord)
+{
+	struct SYNC_RECORD **ppFreedSync;
+	struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
+
+	if (!hRecord)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	OSLockAcquire(g_hSyncRecordListLock);
+
+	dllist_remove_node(&pSync->sNode);
+
+	if (g_uiFreedSyncRecordIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: g_uiFreedSyncRecordIdx out of range", __FUNCTION__));
+		g_uiFreedSyncRecordIdx = 0;
+	}
+	ppFreedSync = &g_apsFreedSyncRecords[g_uiFreedSyncRecordIdx];
+	g_uiFreedSyncRecordIdx = (g_uiFreedSyncRecordIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+	if (*ppFreedSync)
+	{
+		OSFreeMem(*ppFreedSync);
+	}
+	pSync->psServerSyncPrimBlock = NULL;
+	pSync->ui64OSTime = OSClockns64();
+	*ppFreedSync = pSync;
+
+	OSLockRelease(g_hSyncRecordListLock);
+
+	return PVRSRV_OK;
+}
+#else
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(
+			SYNC_RECORD_HANDLE * phRecord,
+			SYNC_PRIMITIVE_BLOCK * hServerSyncPrimBlock,
+			IMG_UINT32 ui32FwBlockAddr,
+			IMG_UINT32 ui32SyncOffset,
+			IMG_BOOL bServerSync,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName)
+{
+	*phRecord = IMG_NULL;
+	PVR_UNREFERENCED_PARAMETER(phRecord);
+	PVR_UNREFERENCED_PARAMETER(hServerSyncPrimBlock);
+	PVR_UNREFERENCED_PARAMETER(ui32FwBlockAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32SyncOffset);
+	PVR_UNREFERENCED_PARAMETER(bServerSync);
+	PVR_UNREFERENCED_PARAMETER(ui32ClassNameSize);
+	PVR_UNREFERENCED_PARAMETER(pszClassName);
+	return PVRSRV_OK;
+}
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord)
+{
+	PVR_UNREFERENCED_PARAMETER(hRecord);
+	return PVRSRV_OK;
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static
+IMG_VOID _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncConnectionData->hLock);
+	ui32RefCount = ++psSyncConnectionData->ui32RefCount;
+	OSLockRelease(psSyncConnectionData->hLock);	
+
+	SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+						__FUNCTION__, psSyncConnectionData, ui32RefCount);
+}
+
+static
+IMG_VOID _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncConnectionData->hLock);
+	ui32RefCount = --psSyncConnectionData->ui32RefCount;
+	OSLockRelease(psSyncConnectionData->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+							__FUNCTION__, psSyncConnectionData, ui32RefCount);
+
+		PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead));
+		OSLockDestroy(psSyncConnectionData->hLock);
+		OSFreeMem(psSyncConnectionData);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+							__FUNCTION__, psSyncConnectionData, ui32RefCount);
+	}
+}
+
+static
+IMG_VOID _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+	if (psConnection)
+	{
+		SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData;
+
+		/*
+			Make sure the connection doesn't go away. It doesn't matter that we will release
+			the lock between as the refcount and list don't have to be atomic w.r.t. to each other
+		*/
+		_SyncConnectionRef(psSyncConnectionData);
+	
+		OSLockAcquire(psSyncConnectionData->hLock);
+		if (psConnection != IMG_NULL)
+		{
+			dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
+		}
+		OSLockRelease(psSyncConnectionData->hLock);
+		psBlock->psSyncConnectionData = psSyncConnectionData;
+	}
+	else
+	{
+		psBlock->psSyncConnectionData = IMG_NULL;
+	}
+}
+
+static
+IMG_VOID _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+	SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData;
+
+	if (psBlock->psSyncConnectionData)
+	{
+		OSLockAcquire(psSyncConnectionData->hLock);
+		dllist_remove_node(&psBlock->sConnectionNode);
+		OSLockRelease(psSyncConnectionData->hLock);
+
+		_SyncConnectionUnref(psBlock->psSyncConnectionData);
+	}
+}
+
+static
+IMG_VOID _SyncPrimitiveBlockRef(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncBlk->hLock);
+	ui32RefCount = ++psSyncBlk->ui32RefCount;
+	OSLockRelease(psSyncBlk->hLock);
+
+	SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+						__FUNCTION__, psSyncBlk, ui32RefCount);
+}
+
+static
+IMG_VOID _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSyncBlk->hLock);
+	ui32RefCount = --psSyncBlk->ui32RefCount;
+	OSLockRelease(psSyncBlk->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+		SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+							__FUNCTION__, psSyncBlk, ui32RefCount);
+
+		_SyncConnectionRemoveBlock(psSyncBlk);
+		OSLockDestroy(psSyncBlk->hLock);
+		DevmemUnexport(psSyncBlk->psMemDesc, &psSyncBlk->sExportCookie);
+		DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+		psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+		OSFreeMem(psSyncBlk);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+							__FUNCTION__, psSyncBlk, ui32RefCount);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+								PVRSRV_DEVICE_NODE *psDevNode,
+								SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+								IMG_UINT32 *puiSyncPrimVAddr,
+								IMG_UINT32 *puiSyncPrimBlockSize,
+								DEVMEM_EXPORTCOOKIE **psExportCookie)
+{
+	SYNC_PRIMITIVE_BLOCK *psNewSyncBlk;
+	PVRSRV_ERROR eError;
+
+	psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK));
+	if (psNewSyncBlk == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	psNewSyncBlk->psDevNode = psDevNode;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block");
+
+	eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+										 &psNewSyncBlk->psMemDesc,
+										 puiSyncPrimVAddr,
+										 &psNewSyncBlk->ui32BlockSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc,
+									  (IMG_PVOID *) &psNewSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	eError = DevmemExport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+		goto e3;
+	}
+
+	eError = OSLockCreate(&psNewSyncBlk->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e4;
+	}
+
+	psNewSyncBlk->ui32RefCount = 1;
+
+	/* If there is a connection pointer then add the new block onto it's list */
+	_SyncConnectionAddBlock(psConnection, psNewSyncBlk);
+
+	*psExportCookie = &psNewSyncBlk->sExportCookie;
+	*ppsSyncBlk = psNewSyncBlk;
+	*puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+						  "Allocated UFO block (FirmwareVAddr = 0x%08x)",
+						  *puiSyncPrimVAddr);
+
+	return PVRSRV_OK;
+e4:
+	DevmemUnexport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie);
+
+e3:
+	DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc);
+e2:
+	psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc);
+e1:
+	OSFreeMem(psNewSyncBlk);
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+	_SyncPrimitiveBlockUnref(psSyncBlk);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+					IMG_UINT32 ui32Value)
+{
+	if((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize)
+	{
+		psSyncBlk->pui32LinAddr[ui32Index] = ui32Value;
+		return PVRSRV_OK;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for "
+							"0x%08X byte sync block (value 0x%08X)",
+							ui32Index,
+							psSyncBlk->ui32BlockSize,
+							ui32Value));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value)
+{
+	*psServerSync->psSync->pui32LinAddr = ui32Value;
+
+	return PVRSRV_OK;
+}
+
+IMG_VOID
+ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSync->hLock);
+	ui32RefCount = ++psSync->ui32RefCount;
+	OSLockRelease(psSync->hLock);
+
+	SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+						__FUNCTION__, psSync, ui32RefCount);
+}
+
+IMG_VOID
+ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	IMG_UINT32 ui32RefCount;
+
+	OSLockAcquire(psSync->hLock);
+	ui32RefCount = --psSync->ui32RefCount;
+	OSLockRelease(psSync->hLock);
+
+	if (ui32RefCount == 0)
+	{
+		SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+							__FUNCTION__, psSync, ui32RefCount);
+
+		/* Remove the sync from the global list */
+		OSLockAcquire(g_hListLock);
+		dllist_remove_node(&psSync->sNode);
+		OSLockRelease(g_hListLock);
+
+		OSLockDestroy(psSync->hLock);
+		SyncPrimFree(psSync->psSync);
+		OSFreeMem(psSync);
+	}
+	else
+	{
+		SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+							__FUNCTION__, psSync, ui32RefCount);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(PVRSRV_DEVICE_NODE *psDevNode,
+						SERVER_SYNC_PRIMITIVE **ppsSync,
+						IMG_UINT32 *pui32SyncPrimVAddr,
+						IMG_UINT32 ui32ClassNameSize,
+						const IMG_CHAR *pszClassName)
+{
+	SERVER_SYNC_PRIMITIVE *psNewSync;
+	PVRSRV_ERROR eError;
+
+	psNewSync = OSAllocMem(sizeof(SERVER_SYNC_PRIMITIVE));
+	if (psNewSync == IMG_NULL)
+	{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* szClassName must be setup now and used for the SyncPrimAlloc call because
+	 * pszClassName is allocated in the bridge code is not NULL terminated 
+	 */
+	if(pszClassName)
+	{
+		if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+			ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+		/* Copy over the class name annotation */
+		OSStringNCopy(psNewSync->szClassName, pszClassName, ui32ClassNameSize);
+		psNewSync->szClassName[ui32ClassNameSize] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		psNewSync->szClassName[0] = 0;
+	}
+
+	eError = SyncPrimAllocForServerSync(psDevNode->hSyncPrimContext,
+						   &psNewSync->psSync,
+						   psNewSync->szClassName);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_sync_alloc;
+	}
+
+	eError = OSLockCreate(&psNewSync->hLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+
+	SyncPrimSet(psNewSync->psSync, 0);
+
+	psNewSync->ui32NextOp = 0;
+	psNewSync->ui32RefCount = 1;
+	psNewSync->ui32UID = g_ServerSyncUID++;
+	psNewSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+	psNewSync->bSWOperation = IMG_FALSE;
+	psNewSync->ui32LastHWUpdate = 0x0bad592c;
+	psNewSync->bPDumped = IMG_FALSE;
+
+	/* Add the sync to the global list */
+	OSLockAcquire(g_hListLock);
+	dllist_add_to_head(&g_sAllServerSyncs, &psNewSync->sNode);
+	OSLockRelease(g_hListLock);
+
+	*pui32SyncPrimVAddr = SyncPrimGetFirmwareAddr(psNewSync->psSync);
+	SYNC_UPDATES_PRINT("%s: sync: %p, fwaddr: %8.8X", __FUNCTION__, psNewSync, *pui32SyncPrimVAddr);
+	*ppsSync = psNewSync;
+	return PVRSRV_OK;
+
+fail_lock_create:
+	SyncPrimFree(psNewSync->psSync);
+
+fail_sync_alloc:
+	OSFreeMem(psNewSync);
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	ServerSyncUnref(psSync);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+							SERVER_SYNC_PRIMITIVE **papsSyncs,
+							IMG_UINT32 *pui32UID,
+							IMG_UINT32 *pui32FWAddr,
+							IMG_UINT32 *pui32CurrentOp,
+							IMG_UINT32 *pui32NextOp)
+{
+	IMG_UINT32 i;
+
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		PVRSRV_CLIENT_SYNC_PRIM *psClientSync = papsSyncs[i]->psSync;
+
+		pui32UID[i] = papsSyncs[i]->ui32UID;
+		pui32FWAddr[i] = SyncPrimGetFirmwareAddr(psClientSync);
+		pui32CurrentOp[i] = *psClientSync->pui32LinAddr;
+		pui32NextOp[i] = papsSyncs[i]->ui32NextOp;
+	}
+	return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT)
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							  SERVER_SYNC_EXPORT **ppsExport)
+{
+	SERVER_SYNC_EXPORT *psNewExport;
+	PVRSRV_ERROR eError;
+
+	psNewExport = OSAllocMem(sizeof(SERVER_SYNC_EXPORT));
+	if (!psNewExport)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	ServerSyncRef(psSync);
+
+	psNewExport->psSync = psSync;
+	*ppsExport = psNewExport;
+
+	return PVRSRV_OK;
+e0:
+	return eError;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	ServerSyncUnref(psExport->psSync);
+
+	OSFreeMem(psExport);
+
+	return PVRSRV_OK;
+}
+
+static IMG_VOID
+_PVRSRVSyncPrimServerImportKM(SERVER_SYNC_EXPORT *psExport,
+							  SERVER_SYNC_PRIMITIVE **ppsSync,
+							  IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	ServerSyncRef(psExport->psSync);
+
+	*ppsSync = psExport->psSync;
+	*pui32SyncPrimVAddr = SyncPrimGetFirmwareAddr(psExport->psSync->psSync);
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT) */
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							SERVER_SYNC_EXPORT **ppsExport)
+{
+	return _PVRSRVSyncPrimServerExportKM(psSync,
+										 ppsExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	return _PVRSRVSyncPrimServerUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(SERVER_SYNC_EXPORT *psExport,
+							SERVER_SYNC_PRIMITIVE **ppsSync,
+							IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	_PVRSRVSyncPrimServerImportKM(psExport,
+								  ppsSync,
+								  pui32SyncPrimVAddr);
+
+	return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+								   SERVER_SYNC_PRIMITIVE *psSync,
+								   IMG_SECURE_TYPE *phSecure,
+								   SERVER_SYNC_EXPORT **ppsExport,
+								   CONNECTION_DATA **ppsSecureConnection)
+{
+	SERVER_SYNC_EXPORT *psNewExport;
+	PVRSRV_ERROR eError;
+
+	/* Create an export server sync */
+	eError = _PVRSRVSyncPrimServerExportKM(psSync,
+										   &psNewExport);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Transform it into a secure export */
+	eError = OSSecureExport(psConnection,
+							(IMG_PVOID) psNewExport,
+							phSecure,
+							ppsSecureConnection);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	*ppsExport = psNewExport;
+	return PVRSRV_OK;
+e1:
+	_PVRSRVSyncPrimServerUnexportKM(psNewExport);
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/* FIXME: This is the same as the non-secure version. */
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+	_PVRSRVSyncPrimServerUnexportKM(psExport);
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(IMG_SECURE_TYPE hSecure,
+								   SERVER_SYNC_PRIMITIVE **ppsSync,
+								   IMG_UINT32 *pui32SyncPrimVAddr)
+{
+	PVRSRV_ERROR eError;
+	SERVER_SYNC_EXPORT *psImport;
+
+	/* Retrieve the data from the secure import */
+	eError = OSSecureImport(hSecure, (IMG_PVOID *) &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	_PVRSRVSyncPrimServerImportKM(psImport,
+								  ppsSync,
+								  pui32SyncPrimVAddr);
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+#endif /* defined(SUPPORT_SECURE_EXPORT) */
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID)
+{
+	*pui32SyncRequesterID = g_ui32NextSyncRequestorID++;
+
+	return PVRSRV_OK;
+}
+
+IMG_VOID PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32SyncRequesterID);
+}
+
+static IMG_VOID
+_ServerSyncTakeOperation(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_BOOL bUpdate,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue)
+{
+	IMG_BOOL bInCaptureRange;
+
+	/* Only advance the pending if the an update is required */
+	if (bUpdate)
+	{
+		*pui32FenceValue = psSync->ui32NextOp++;
+	}
+	else
+	{
+		*pui32FenceValue = psSync->ui32NextOp;
+	}
+
+	*pui32UpdateValue = psSync->ui32NextOp;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	/*
+		If this is the 1st operation (in this capture range) then PDump
+		this sync
+	*/
+	if (!psSync->bPDumped && bInCaptureRange)
+	{
+		IMG_CHAR azTmp[100];
+		OSSNPrintf(azTmp,
+				   sizeof(azTmp),
+				   "Dump initial sync state (0x%p, FW VAddr = 0x%08x) = 0x%08x\n",
+				   psSync,
+				   SyncPrimGetFirmwareAddr(psSync->psSync),
+				   *psSync->psSync->pui32LinAddr);
+		PDumpCommentKM(azTmp, 0);
+
+		SyncPrimPDump(psSync->psSync);
+		psSync->bPDumped = IMG_TRUE;
+	}
+
+	/*
+		When exiting capture range clear down bPDumped as we might re-enter
+		capture range and thus need to PDump this sync again
+	*/
+	if (!bInCaptureRange)
+	{
+		psSync->bPDumped = IMG_FALSE;
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired)
+{
+
+	ServerSyncRef(psSync);
+
+	/*
+		ServerSyncRef will acquire and release the lock but we need to
+		reacquire here to ensure the state that we're modifying below
+		will be consistent with itself. But it doesn't matter if another
+		thread acquires the lock in between as we've ensured the sync
+		wont go away
+	*/
+	OSLockAcquire(psSync->hLock);
+	_ServerSyncTakeOperation(psSync,
+							 bUpdate,
+							 pui32FenceValue,
+							 pui32UpdateValue);
+
+	/*
+		The caller want to know if a fence command is required
+		i.e. was the last operation done on this sync done by the
+		the same sync requestor
+	*/
+	if (pbFenceRequired)
+	{
+		if (ui32SyncRequesterID == psSync->ui32LastSyncRequesterID)
+		{
+			*pbFenceRequired = IMG_FALSE;
+		}
+		else
+		{
+			*pbFenceRequired = IMG_TRUE;
+		}
+	}
+	/*
+		If we're transitioning from a HW operation to a SW operation we
+		need to save the last update the HW will do so that when we PDump
+		we can issue a POL for it before the next HW operation and then
+		LDB in the last SW fence update
+	*/
+	if (psSync->bSWOperation == IMG_FALSE)
+	{
+		psSync->bSWOperation = IMG_TRUE;
+		psSync->ui32LastHWUpdate = *pui32FenceValue;
+		PDumpIsCaptureFrameKM(&psSync->bSWOpStartedInCaptRange);
+	}
+
+	if (pbFenceRequired)
+	{
+		if (*pbFenceRequired)
+		{
+			SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+		}
+	}
+
+	/* Only update the last requester id if we are make changes to this sync
+	 * object. */
+	if (bUpdate)
+		psSync->ui32LastSyncRequesterID = ui32SyncRequesterID;
+
+	OSLockRelease(psSync->hLock);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						       IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue)
+{
+	/*
+		For HW operations the client is required to ensure the
+		operation has completed before freeing the sync as we
+		no way of dropping the refcount if we where to acquire it
+		here.
+
+		Take the lock to ensure the state that we're modifying below
+		will be consistent with itself.
+	*/
+	OSLockAcquire(psSync->hLock);
+	_ServerSyncTakeOperation(psSync,
+							 bUpdate,
+							 pui32FenceValue,
+							 pui32UpdateValue);
+
+	/*
+		Note:
+
+		We might want to consider optimising the fences that we write for
+		HW operations but for now just clear it back to unknown
+	*/
+	psSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+
+	if (psSync->bSWOperation)
+	{
+#if defined(PDUMP)		
+		IMG_CHAR azTmp[256];
+		OSSNPrintf(azTmp,
+				   sizeof(azTmp),
+				   "Wait for HW ops and dummy update for SW ops (0x%p, FW VAddr = 0x%08x, value = 0x%08x)\n",
+				   psSync,
+				   SyncPrimGetFirmwareAddr(psSync->psSync),
+				   *pui32FenceValue);
+		PDumpCommentKM(azTmp, 0);
+#endif
+
+		if (psSync->bSWOpStartedInCaptRange)
+		{
+			/* Dump a POL for the previous HW operation */
+			SyncPrimPDumpPol(psSync->psSync,
+								psSync->ui32LastHWUpdate,
+								0xffffffff,
+								PDUMP_POLL_OPERATOR_EQUAL,
+								0);
+		}
+
+		/* Dump the expected value (i.e. the value after all the SW operations) */
+		SyncPrimPDumpValue(psSync->psSync, *pui32FenceValue);
+
+		/* Reset the state as we've just done a HW operation */
+		psSync->bSWOperation = IMG_FALSE;
+	}
+	OSLockRelease(psSync->hLock);
+
+	SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+
+	return PVRSRV_OK;
+}
+
+IMG_BOOL ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_UINT32 ui32FenceValue)
+{
+	SYNC_UPDATES_PRINT("%s: sync: %p, value(%d) == fence(%d)?", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32FenceValue);
+	return (*psSync->psSync->pui32LinAddr == ui32FenceValue);
+}
+
+IMG_VOID
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_BOOL bDoUpdate,
+					 IMG_UINT32 ui32UpdateValue)
+{
+	if (bDoUpdate)
+	{
+		SYNC_UPDATES_PRINT("%s: sync: %p (%d) = %d", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32UpdateValue);
+
+		*psSync->psSync->pui32LinAddr = ui32UpdateValue;
+	}
+
+	ServerSyncUnref(psSync);
+}
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->ui32UID;
+}
+
+IMG_UINT32 ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return SyncPrimGetFirmwareAddr(psSync->psSync);
+}
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return *psSync->psSync->pui32LinAddr;
+}
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+	return psSync->ui32NextOp;
+}
+
+static IMG_BOOL _ServerSyncState(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	SERVER_SYNC_PRIMITIVE *psSync = IMG_CONTAINER_OF(psNode, SERVER_SYNC_PRIMITIVE, sNode);
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+
+	if (*psSync->psSync->pui32LinAddr != psSync->ui32NextOp)
+	{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+		PVR_DUMPDEBUG_LOG(("\tPending server sync (ID = %d, FWAddr = 0x%08x): Current = 0x%08x, NextOp = 0x%08x (%s)",
+		                   psSync->ui32UID,
+		                   ServerSyncGetFWAddr(psSync),
+		                   ServerSyncGetValue(psSync),
+		                   psSync->ui32NextOp,
+		                   psSync->szClassName));
+#else
+		PVR_DUMPDEBUG_LOG(("\tPending server sync (ID = %d, FWAddr = 0x%08x): Value (Host) = 0x%08x, Value (FW) = 0x%08x, NextOp = 0x%08x (%s)",
+		                   psSync->ui32UID,
+		                   ServerSyncGetFWAddr(psSync),
+		                   ServerSyncGetValue(psSync),
+		                   RGXReadWithSP(ServerSyncGetFWAddr(psSync)),
+		                   psSync->ui32NextOp,
+		                   psSync->szClassName));
+#endif
+	}
+	return IMG_TRUE;
+}
+
+static IMG_VOID _ServerSyncDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel)
+{
+
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL;
+
+	PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+	
+	pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+	{
+		PVR_DUMPDEBUG_LOG(("Dumping all pending server syncs"));
+		OSLockAcquire(g_hListLock);
+		dllist_foreach_node(&g_sAllServerSyncs, _ServerSyncState, IMG_NULL);
+		OSLockRelease(g_hListLock);
+	}
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+						 SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+						 IMG_UINT32 ui32ClientSyncCount,
+						 IMG_UINT32 *paui32SyncBlockIndex,
+						 IMG_UINT32 *paui32Index,
+						 IMG_UINT32 ui32ServerSyncCount,
+						 SERVER_SYNC_PRIMITIVE **papsServerSync,
+						 SERVER_OP_COOKIE **ppsServerCookie)
+{
+	SERVER_OP_COOKIE *psNewCookie;
+	IMG_UINT32 ui32BlockAllocSize;
+	IMG_UINT32 ui32ServerAllocSize;
+	IMG_UINT32 ui32ClientAllocSize;
+	IMG_UINT32 ui32TotalAllocSize;
+	IMG_UINT32 i;
+	IMG_CHAR *pcPtr;
+	PVRSRV_ERROR eError;
+
+	/* Allocate space for all the sync block list */
+	ui32BlockAllocSize = ui32SyncBlockCount * (sizeof(SYNC_PRIMITIVE_BLOCK *));
+
+	/* Allocate space for all the client sync size elements */
+	ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+
+	/* Allocate space for all the server sync size elements */
+	ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(SERVER_SYNC_PRIMITIVE *)
+							+ (2 * sizeof(IMG_UINT32)));
+
+	ui32TotalAllocSize = sizeof(SERVER_OP_COOKIE) +
+							 ui32BlockAllocSize +
+							 ui32ServerAllocSize +
+							 ui32ClientAllocSize;
+
+	psNewCookie = OSAllocMem(ui32TotalAllocSize);
+	pcPtr = (IMG_CHAR *) psNewCookie;
+
+	if (!psNewCookie)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	OSMemSet(psNewCookie, 0, ui32TotalAllocSize);
+
+	/* Setup the pointers */
+	pcPtr += sizeof(SERVER_OP_COOKIE);
+	psNewCookie->papsSyncPrimBlock = (SYNC_PRIMITIVE_BLOCK **) pcPtr;
+
+	pcPtr += sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount;
+	psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->papsServerSync =(SERVER_SYNC_PRIMITIVE **) pcPtr;
+
+	pcPtr += sizeof(SERVER_SYNC_PRIMITIVE *) * ui32ServerSyncCount;
+	psNewCookie->paui32ServerFenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+	psNewCookie->paui32ServerUpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+	/* Check the pointer setup went ok */
+	PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize));
+
+	psNewCookie->ui32SyncBlockCount= ui32SyncBlockCount;
+	psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+	psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+	psNewCookie->bActive = IMG_FALSE;
+
+	/* Copy all the data into our server cookie */
+	OSMemCopy(psNewCookie->papsSyncPrimBlock,
+			  papsSyncPrimBlock,
+			  sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount);
+
+	OSMemCopy(psNewCookie->paui32SyncBlockIndex,
+			  paui32SyncBlockIndex,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSMemCopy(psNewCookie->paui32Index,
+			  paui32Index,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+	OSMemCopy(psNewCookie->papsServerSync,
+			  papsServerSync,
+			  sizeof(SERVER_SYNC_PRIMITIVE *) *ui32ServerSyncCount);
+
+	/*
+		Take a reference on all the sync blocks and server syncs so they can't
+		be freed while we're using them
+	*/
+	for (i=0;i<ui32SyncBlockCount;i++)
+	{
+		_SyncPrimitiveBlockRef(psNewCookie->papsSyncPrimBlock[i]);
+	}
+
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		ServerSyncRef(psNewCookie->papsServerSync[i]);
+	}
+
+	*ppsServerCookie = psNewCookie;
+	return PVRSRV_OK;
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+					       IMG_UINT32 ui32ClientSyncCount,
+					       IMG_UINT32 *paui32Flags,
+					       IMG_UINT32 *paui32FenceValue,
+					       IMG_UINT32 *paui32UpdateValue,
+					       IMG_UINT32 ui32ServerSyncCount,
+						   IMG_UINT32 *paui32ServerFlags)
+{
+	IMG_UINT32 i;
+
+	if ((ui32ClientSyncCount != psServerCookie->ui32ClientSyncCount) ||
+		(ui32ServerSyncCount != psServerCookie->ui32ServerSyncCount))
+	{
+		/* The bridge layer should have stopped us getting here but check incase */
+		PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync counts", __FUNCTION__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		/* Server syncs must fence */
+		if ((paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+		{
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	/*
+		For client syncs all we need to do is save the values
+		that we've been passed
+	*/
+	OSMemCopy(psServerCookie->paui32Flags,
+			  paui32Flags,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSMemCopy(psServerCookie->paui32FenceValue,
+			  paui32FenceValue,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+	OSMemCopy(psServerCookie->paui32UpdateValue,
+			  paui32UpdateValue,
+			  sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+	/*
+		For server syncs we just take an operation
+	*/
+	for (i=0;i<ui32ServerSyncCount;i++)
+	{
+		/*
+			Take op can only take one operation at a time so we can't
+			optimise away fences so just report the requestor as unknown
+		*/
+		PVRSRVServerSyncQueueSWOpKM(psServerCookie->papsServerSync[i],
+								  &psServerCookie->paui32ServerFenceValue[i],
+								  &psServerCookie->paui32ServerUpdateValue[i],
+								  SYNC_REQUESTOR_UNKNOWN,
+								  (paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE) ? IMG_TRUE:IMG_FALSE,
+								  IMG_NULL);
+	}
+
+	psServerCookie->bActive = IMG_TRUE;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+						IMG_BOOL *pbReady)
+{
+	IMG_UINT32 i;
+	IMG_BOOL bReady = IMG_TRUE;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!psServerCookie->bActive)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+		bReady = IMG_FALSE;
+		eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+		goto e0;
+	}
+
+	/* Check the client syncs */
+	for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			if (psSyncBlock->pui32LinAddr[ui32Index] !=
+					psServerCookie->paui32FenceValue[i])
+			{
+				bReady = IMG_FALSE;
+				goto e0;
+			}
+		}
+	}
+
+	for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+	{
+		bReady = ServerSyncFenceIsMet(psServerCookie->papsServerSync[i],
+									  psServerCookie->paui32ServerFenceValue[i]);
+		if (!bReady)
+		{
+			break;
+		}
+	}
+
+e0:
+	*pbReady = bReady;
+	return eError;
+}
+
+static
+PVRSRV_ERROR _SyncPrimOpComplete(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_UINT32 i;
+
+	for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			psSyncBlock->pui32LinAddr[ui32Index] = psServerCookie->paui32UpdateValue[i];
+		}
+	}
+
+	for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+	{
+		ServerSyncCompleteOp(psServerCookie->papsServerSync[i],
+							 (psServerCookie->paui32ServerFenceValue[i] != psServerCookie->paui32ServerUpdateValue[i]),
+							 psServerCookie->paui32ServerUpdateValue[i]);
+	}
+
+	psServerCookie->bActive = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_BOOL bReady;
+
+	PVRSRVSyncPrimOpReadyKM(psServerCookie, &bReady);
+
+	/* Check the client is playing ball */
+	if (!bReady)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: sync op still not ready", __FUNCTION__));
+
+		return PVRSRV_ERROR_BAD_SYNC_STATE;
+	}
+
+	return _SyncPrimOpComplete(psServerCookie);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie)
+{
+	IMG_UINT32 i;
+
+	/* If the operation is still active then check if it's finished yet */
+	if (psServerCookie->bActive)
+	{
+		if (PVRSRVSyncPrimOpCompleteKM(psServerCookie) == PVRSRV_ERROR_BAD_SYNC_STATE)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Not ready, ask for retry", __FUNCTION__));
+			return PVRSRV_ERROR_RETRY;
+		}
+	}
+
+	/* Drop our references on the sync blocks and server syncs*/
+	for (i = 0; i < psServerCookie->ui32SyncBlockCount; i++)
+	{
+		_SyncPrimitiveBlockUnref(psServerCookie->papsSyncPrimBlock[i]);
+	}
+
+	for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+	{
+		ServerSyncUnref(psServerCookie->papsServerSync[i]);
+	}
+
+	OSFreeMem(psServerCookie);
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc,
+					   ui32Offset,
+					   ui32Value,
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+	/*
+		We might be ask to PDump sync state outside of capture range
+		(e.g. texture uploads) so make this continuous.
+	*/
+	DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
+					   ui32Offset,
+					   sizeof(IMG_UINT32),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc,
+						   ui32Offset,
+						   ui32Value,
+						   ui32Mask,
+						   eOperator,
+						   ui32PDumpFlags);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (!psServerCookie->bActive)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+		eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+		goto e0;
+	}
+
+	/* PDump POL on the client syncs */
+	for (i = 0; i < psServerCookie->ui32ClientSyncCount; i++)
+	{
+		if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+			IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+			SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+			PVRSRVSyncPrimPDumpPolKM(psSyncBlock,
+									ui32Index*sizeof(IMG_UINT32),
+									psServerCookie->paui32FenceValue[i],
+									0xFFFFFFFFU,
+									eOperator,
+									ui32PDumpFlags);
+		}
+	}
+
+	/* PDump POL on the server syncs */
+	for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+	{
+		SERVER_SYNC_PRIMITIVE *psServerSync = psServerCookie->papsServerSync[i];
+		IMG_UINT32 ui32FenceValue = psServerCookie->paui32ServerFenceValue[i];
+
+		SyncPrimPDumpPol(psServerSync->psSync,
+						ui32FenceValue,
+						0xFFFFFFFFU,
+						PDUMP_POLL_OPERATOR_EQUAL,
+						ui32PDumpFlags);
+	}
+
+e0:
+	return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize)
+{
+	DevmemPDumpCBP(psSyncBlk->psMemDesc,
+				   ui32Offset,
+				   uiWriteOffset,
+				   uiPacketSize,
+				   uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif
+
+/* SyncRegisterConnection */
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData)
+{
+	SYNC_CONNECTION_DATA *psSyncConnectionData;
+	PVRSRV_ERROR eError;
+
+	psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA));
+	if (psSyncConnectionData == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	eError = OSLockCreate(&psSyncConnectionData->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lockcreate;
+	}
+	dllist_init(&psSyncConnectionData->sListHead);
+	psSyncConnectionData->ui32RefCount = 1;
+
+	*ppsSyncConnectionData = psSyncConnectionData;
+	return PVRSRV_OK;
+
+fail_lockcreate:
+	OSFreeMem(psSyncConnectionData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/* SyncUnregisterConnection */
+IMG_VOID SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	_SyncConnectionUnref(psSyncConnectionData);
+}
+
+static
+IMG_BOOL _PDumpSyncBlock(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	SYNC_PRIMITIVE_BLOCK *psSyncBlock = IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
+	PVR_UNREFERENCED_PARAMETER(pvCallbackData);
+
+	DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
+					   0,
+					   psSyncBlock->ui32BlockSize,
+					   PDUMP_FLAGS_CONTINUOUS);
+	return IMG_TRUE;
+}
+
+IMG_VOID SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+	OSLockAcquire(psSyncConnectionData->hLock);
+
+	PDUMPCOMMENT("Dump client Sync Prim state");
+	dllist_foreach_node(&psSyncConnectionData->sListHead,
+						_PDumpSyncBlock,
+						IMG_NULL);
+
+	OSLockRelease(psSyncConnectionData->hLock);
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#define NS_IN_S (1000000000UL)
+static IMG_VOID _SyncRecordPrint(struct SYNC_RECORD * psSyncRec, IMG_UINT64 ui64TimeNow)
+{
+	SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+
+	if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType)
+	{
+		IMG_UINT64 ui64DeltaS;
+		IMG_UINT32 ui32DeltaF;
+		IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime;
+		ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+		if (psSyncBlock && psSyncBlock->pui32LinAddr)
+		{
+			IMG_VOID *pSyncAddr;
+			pSyncAddr = psSyncBlock->pui32LinAddr + psSyncRec->ui32SyncOffset;
+
+			PVR_DUMPDEBUG_LOG(("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=0x%08x (%s)",
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				ui64DeltaS, ui32DeltaF,
+				(psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+				*(IMG_UINT32*)pSyncAddr,
+				psSyncRec->szClassName
+				));
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG(("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=<null_ptr> (%s)",
+				((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+				psSyncRec->uiPID,
+				ui64DeltaS, ui32DeltaF,
+				(psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+				psSyncRec->szClassName
+				));
+		}
+	}
+}
+
+static IMG_BOOL _SyncRecordNodePrint(PDLLIST_NODE psNode, IMG_VOID *pvCallbackData)
+{
+	struct SYNC_RECORD *psSyncRec;
+	psSyncRec = IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+	_SyncRecordPrint(psSyncRec, *((IMG_UINT64*)pvCallbackData));
+	return IMG_TRUE;
+}
+
+static IMG_VOID _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel)
+{
+	IMG_UINT64 ui64TimeNowS;
+	IMG_UINT32 ui32TimeNowF;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = g_pfnDumpDebugPrintf;
+	IMG_UINT64 ui64TimeNow = OSClockns64();
+	ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+	PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+	if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+	{
+		IMG_UINT i;
+		OSLockAcquire(g_hSyncRecordListLock);
+
+		PVR_DUMPDEBUG_LOG(("Dumping all allocated syncs @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF));
+		PVR_DUMPDEBUG_LOG(("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+					"Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"));
+		dllist_foreach_node(&g_sSyncRecordList, _SyncRecordNodePrint, &ui64TimeNow);
+
+		PVR_DUMPDEBUG_LOG(("Dumping all recently freed syncs @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF));
+		PVR_DUMPDEBUG_LOG(("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+					"Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"));
+		for(i = DECREMENT_WITH_WRAP(g_uiFreedSyncRecordIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+				i != g_uiFreedSyncRecordIdx;
+				i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+		{
+			if (g_apsFreedSyncRecords[i])
+			{
+				_SyncRecordPrint(g_apsFreedSyncRecords[i], ui64TimeNow);
+			}
+			else
+			{
+				break;
+			}
+		}
+
+		OSLockRelease(g_hSyncRecordListLock);
+	}
+}
+#undef NS_IN_S
+
+static PVRSRV_ERROR SyncRecordListInit(IMG_VOID)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&g_hSyncRecordListLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&g_sSyncRecordList);
+
+	eError = PVRSRVRegisterDbgRequestNotify(&g_hSyncRecordNotify,
+											_SyncRecordRequest,
+											DEBUG_REQUEST_SERVERSYNC,
+											IMG_NULL);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+	return PVRSRV_OK;
+
+fail_dbg_register:
+	OSLockDestroy(g_hSyncRecordListLock);;
+fail_lock_create:
+	return eError;
+}
+
+static IMG_BOOL _SyncRecordListDestroy(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	struct SYNC_RECORD *pSyncRec;
+
+	PVR_UNREFERENCED_PARAMETER(pvCallbackData);
+
+	pSyncRec = IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+
+	dllist_remove_node(psNode);
+	OSFreeMem(pSyncRec);
+
+	return IMG_TRUE;
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR ServerSyncInit(IMG_VOID)
+{
+	PVRSRV_ERROR eError;
+
+	eError = OSLockCreate(&g_hListLock, LOCK_TYPE_NONE);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_lock_create;
+	}
+	dllist_init(&g_sAllServerSyncs);
+
+	eError = PVRSRVRegisterDbgRequestNotify(&g_hNotify,
+											_ServerSyncDebugRequest,
+											DEBUG_REQUEST_SERVERSYNC,
+											IMG_NULL);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_dbg_register;
+	}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	eError = SyncRecordListInit();
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_record_list;
+	}
+#endif
+
+	return PVRSRV_OK;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+fail_record_list:
+	PVRSRVUnregisterDbgRequestNotify(g_hNotify);
+#endif
+fail_dbg_register:
+	OSLockDestroy(g_hListLock);;
+fail_lock_create:
+	return eError;
+}
+
+IMG_VOID ServerSyncDeinit(IMG_VOID)
+{
+	PVRSRVUnregisterDbgRequestNotify(g_hNotify);
+	OSLockDestroy(g_hListLock);
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	{
+		IMG_INT i;
+		OSLockAcquire(g_hSyncRecordListLock);
+		dllist_foreach_node(&g_sSyncRecordList, _SyncRecordListDestroy, NULL);
+		for (i=0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+		{
+			if (g_apsFreedSyncRecords[i])
+			{
+				OSFreeMem(g_apsFreedSyncRecords[i]);
+				g_apsFreedSyncRecords[i] = NULL;
+			}
+		}
+		OSLockRelease(g_hSyncRecordListLock);
+		PVRSRVUnregisterDbgRequestNotify(g_hSyncRecordNotify);
+		OSLockDestroy(g_hSyncRecordListLock);
+	}
+#endif
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlintern.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlintern.c
new file mode 100644
index 0000000..5c985e8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlintern.c
@@ -0,0 +1,329 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer functions available to driver components in 
+                the driver.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "pvr_tlcommon.h"
+#include "tlintern.h"
+
+/*
+ * Make functions
+ */
+PTL_STREAM_DESC
+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3)
+{
+	PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC));
+	if (ps == IMG_NULL) 
+	{
+		return IMG_NULL;
+	}
+	ps->psNode = f1;
+	ps->ui32Flags = f2;
+	ps->hDataEvent = f3;
+	return ps;
+}
+
+PTL_SNODE
+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4)
+{
+	PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE));
+	if (ps == IMG_NULL)
+	{
+		return IMG_NULL;
+	}
+	ps->hDataEventObj = f2;
+	ps->psStream = f3;
+	ps->psRDesc = f4;
+	f3->psNode = ps;
+	return ps;
+}
+
+/*
+ * Transport Layer Global top variables and functions
+ */
+static TL_GLOBAL_DATA  sTLGlobalData = { 0 };
+
+TL_GLOBAL_DATA *TLGGD(IMG_VOID)	// TLGetGlobalData()
+{
+	return &sTLGlobalData;
+}
+
+/* TLInit must only be called once at driver initialisation for one device.
+ * An assert is provided to check this condition on debug builds.
+ */
+PVRSRV_ERROR
+TLInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevNode);
+	PVR_ASSERT(sTLGlobalData.psRgxDevNode==0);
+
+	/* Store the RGX device node for later use in devmem buffer allocations */
+	sTLGlobalData.psRgxDevNode = (IMG_VOID*)psDevNode;
+
+	/* Allocate a lock for TL global data, to be used while updating the TL data.
+	 * This is for making TL global data muti-thread safe */
+	eError = OSLockCreate (&sTLGlobalData.hTLGDLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+	
+	/* Allocate the event object used to signal global TL events such as
+	 * new stream created */
+	eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+	
+	PVR_DPF_RETURN_OK;
+
+/* Don't allow the driver to start up on error */
+e1:
+	OSLockDestroy (sTLGlobalData.hTLGDLock);
+	sTLGlobalData.hTLGDLock = NULL;
+e0:
+	PVR_DPF_RETURN_RC (eError);
+}
+
+static IMG_VOID RemoveAndFreeStreamNode(PTL_SNODE psRemove)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE* 		 last;
+	PTL_SNODE 		 psn;
+	PVRSRV_ERROR     eError;
+
+	PVR_DPF_ENTERED;
+
+	// Unlink the stream node from the master list
+	PVR_ASSERT(psGD->psHead);
+	last = &psGD->psHead;
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn == psRemove)
+		{
+			/* Other calling code may have freed and zero'd the pointers */
+			if (psn->psRDesc)
+			{
+				OSFREEMEM(psn->psRDesc);
+			}
+			if (psn->psStream)
+			{
+				OSFREEMEM(psn->psStream);
+			}
+			*last = psn->psNext;
+			break;
+		}
+		last = &psn->psNext;
+	}
+
+	// Release the event list object owned by the stream node
+	if (psRemove->hDataEventObj)
+	{
+		eError = OSEventObjectDestroy(psRemove->hDataEventObj);
+		PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+		psRemove->hDataEventObj = NULL;
+	}
+
+	// Release the memory of the stream node
+	OSFREEMEM(psRemove);
+
+	PVR_DPF_RETURN;
+}
+
+IMG_VOID
+TLDeInit(IMG_VOID)
+{
+	PVR_DPF_ENTERED;
+
+	if (sTLGlobalData.uiClientCnt)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt));
+		sTLGlobalData.uiClientCnt = 0;
+	}
+
+	/* Clean up the SNODE list */
+	if (sTLGlobalData.psHead)
+	{
+		while (sTLGlobalData.psHead)
+		{
+			RemoveAndFreeStreamNode(sTLGlobalData.psHead);
+		}
+		/* Leave psHead NULL on loop exit */
+	}
+
+	/* Clean up the TL global event object */
+	if (sTLGlobalData.hTLEventObj)
+	{
+		OSEventObjectDestroy(sTLGlobalData.hTLEventObj);
+		sTLGlobalData.hTLEventObj = NULL;
+	}
+
+	/* Destroy the TL global data lock */
+	if (sTLGlobalData.hTLGDLock)
+	{
+		OSLockDestroy (sTLGlobalData.hTLGDLock);
+		sTLGlobalData.hTLGDLock = NULL;
+	}
+
+	sTLGlobalData.psRgxDevNode = NULL;
+
+	PVR_DPF_RETURN;
+}
+
+PVRSRV_DEVICE_NODE*
+TLGetGlobalRgxDevice(IMG_VOID)
+{
+	PVRSRV_DEVICE_NODE *p = (PVRSRV_DEVICE_NODE*)(TLGGD()->psRgxDevNode);
+	if (!p)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLGetGlobalRgxDevice() NULL node ptr, TL " \
+				"can not be used when no RGX device has been found"));
+		PVR_ASSERT(p);
+	}
+	return p;
+}
+
+IMG_VOID TLAddStreamNode(PTL_SNODE psAdd)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psAdd);
+	psAdd->psNext = TLGGD()->psHead;
+	TLGGD()->psHead = psAdd;
+	
+	PVR_DPF_RETURN;
+}
+
+PTL_SNODE TLFindStreamNodeByName(IMG_PCHAR pszName)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE 		 psn;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(pszName);
+
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn->psStream && OSStringCompare(psn->psStream->szName, pszName)==0)
+		{
+			PVR_DPF_RETURN_VAL(psn);
+		}
+	}
+
+	PVR_DPF_RETURN_VAL(IMG_NULL);
+}
+
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psRDesc)
+{
+	TL_GLOBAL_DATA*  psGD = TLGGD();
+	PTL_SNODE 		 psn;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRDesc);
+
+	for (psn = psGD->psHead; psn; psn=psn->psNext)
+	{
+		if (psn->psRDesc == psRDesc)
+		{
+			PVR_DPF_RETURN_VAL(psn);
+		}
+	}
+	PVR_DPF_RETURN_VAL(IMG_NULL);
+}
+
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRemove);
+
+	/* If there is a client connected to this stream, defer stream's deletion */
+	if (psRemove->psRDesc != IMG_NULL)
+	{
+		PVR_DPF_RETURN_VAL (IMG_FALSE);
+	}
+
+	/* Remove stream from TL_GLOBAL_DATA's list and free stream node */	
+	psRemove->psStream = IMG_NULL;
+	RemoveAndFreeStreamNode(psRemove);
+
+	PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
+
+IMG_BOOL TLRemoveDescAndTryFreeStreamNode(PTL_SNODE psRemove)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psRemove);
+
+	/* Remove stream descriptor (i.e. stream reader context) */
+	psRemove->psRDesc = IMG_NULL;
+
+	/* Do not Free Stream Node if there is a write reference (a producer context) to the stream */
+	if (0 != psRemove->uiWRefCount)
+	{
+		PVR_DPF_RETURN_VAL (IMG_FALSE);
+	}
+
+	/* Make stream pointer NULL to prevent it from being destroyed in RemoveAndFreeStreamNode
+	 * Cleanup of stream should be done by the calling context */
+	psRemove->psStream = IMG_NULL;
+	RemoveAndFreeStreamNode(psRemove);
+	
+	PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlserver.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlserver.c
new file mode 100644
index 0000000..9772e3e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlserver.c
@@ -0,0 +1,389 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <stddef.h>
+
+#include "img_defs.h"
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "connection_server.h"
+#include "allocmem.h"
+
+#include "tlintern.h"
+#include "tlserver.h"
+
+#define NO_STREAM_WAIT_PERIOD 2000
+#define NO_DATA_WAIT_PERIOD   1000
+#define NO_ACQUIRE            0xffffffffU
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+PVRSRV_ERROR
+TLServerConnectKM(CONNECTION_DATA *psConnection)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerDisconnectKM(CONNECTION_DATA *psConnection)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerOpenStreamKM(IMG_PCHAR  	 	   pszName,
+			   	     IMG_UINT32 		   ui32Mode,
+			   	     PTL_STREAM_DESC* 	   ppsSD,
+			   	     DEVMEM_EXPORTCOOKIE** ppsBufCookie)
+{
+	PVRSRV_ERROR 	eError = PVRSRV_OK;
+	PVRSRV_ERROR 	eErrorEO = PVRSRV_OK;
+	PTL_SNODE		psNode = 0;
+	TL_STREAM_DESC* psNewSD = 0;
+	IMG_HANDLE 		hEvent;
+	PTL_GLOBAL_DATA psGD = TLGGD();
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+    PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode));
+#endif
+
+	PVR_ASSERT(pszName);
+
+	/* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName
+	 * returns NON NULL PTL_SNODE, we try updating the global data client count and 
+	 * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has 
+	 * not been deleted) while we are updating it
+	 */
+	OSLockAcquire (psGD->hTLGDLock);
+
+	psNode = TLFindStreamNodeByName(pszName);
+	if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT))
+	{	/* Blocking code to wait for stream to be created if it does not exist */
+		eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent);
+		PVR_LOGG_IF_ERROR (eError, "OSEventObjectOpen", e0);
+
+		do
+		{
+			if ((psNode = TLFindStreamNodeByName(pszName)) == NULL)
+			{
+				PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName));
+				
+				/* Release TL_GLOBAL_DATA lock before sleeping */
+				OSLockRelease (psGD->hTLGDLock);
+
+				/* Will exit OK or with timeout, both cases safe to ignore */
+				eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD);
+				
+				/* Acquire lock after waking up */
+				OSLockAcquire (psGD->hTLGDLock);
+			}
+		}
+		while ((psNode == NULL) && (eErrorEO == PVRSRV_OK));
+
+		eError = OSEventObjectClose(hEvent);
+		PVR_LOGG_IF_ERROR (eError, "OSEventObjectClose", e0);
+	}
+
+	/* Make sure we have found a stream node after wait/search */
+	if (psNode == NULL)
+	{
+		/* Did we exit the wait with timeout, inform caller */
+		if (eErrorEO == PVRSRV_ERROR_TIMEOUT)
+		{
+			eError = eErrorEO;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_NOT_FOUND;
+			PVR_DPF((PVR_DBG_ERROR, "Stream does not exist"));
+		}
+		goto e0;
+	}
+
+	// Only one client/descriptor per stream supported
+	if (psNode->psRDesc != NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Can not open stream, stream already opened"));
+		eError = PVRSRV_ERROR_ALREADY_OPEN;
+		goto e0;
+	}
+
+	// Create an event handle for this client to wait on when no data in stream
+	// buffer.
+	eError = OSEventObjectOpen(psNode->hDataEventObj, &hEvent);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Not possible to open node's event object"));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+		goto e0;
+	}
+
+	psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent);
+	if (!psNewSD)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+	psGD->uiClientCnt++;
+	psNode->psRDesc = psNewSD;
+
+	/* Global data updated. Now release global lock */
+	OSLockRelease (psGD->hTLGDLock);
+
+	// Copy the export cookie back to the user mode API to enable access to
+	// the stream buffer from user-mode process.
+	*ppsBufCookie = TLStreamGetBufferCookie(psNode->psStream);
+
+	*ppsSD = psNewSD;
+
+	PVR_DPF((PVR_DBG_VERBOSE, 
+			 "TLServerOpenStreamKM evList=%p, evObj=%p", 
+			 psNode->hDataEventObj, 
+			 psNode->psRDesc->hDataEvent));
+
+	PVR_DPF_RETURN_OK;
+
+e1:
+	OSEventObjectClose (hEvent);
+e0:
+	OSLockRelease (psGD->hTLGDLock);
+	PVR_DPF_RETURN_RC (eError);
+}
+
+PVRSRV_ERROR
+TLServerCloseStreamKM(PTL_STREAM_DESC psSD)
+{
+	PVRSRV_ERROR    eError = PVRSRV_OK;
+	PTL_GLOBAL_DATA psGD = TLGGD();
+	PTL_SNODE		psNode = 0;
+	PTL_STREAM	psStream;
+	IMG_BOOL	bDestroyStream;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	// Sanity check, quick exit if there are no streams
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	// Check stream still valid
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since the descriptor is valid, the stream should not have been made NULL */
+	PVR_ASSERT (psNode->psStream);
+
+	/* Save the stream's reference in-case its destruction is required after this
+	 * client is removed */
+	psStream = psNode->psStream;
+	
+	/* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode
+	 * call will update the TL_SNODE's descriptor value */
+	OSLockAcquire (psGD->hTLGDLock);
+
+	// Remove descriptor from stream object/list
+	bDestroyStream = TLRemoveDescAndTryFreeStreamNode (psNode);
+
+	// Assert the counter is sane after input data validated.
+	PVR_ASSERT(psGD->uiClientCnt > 0);
+	psGD->uiClientCnt--;
+
+	OSLockRelease (psGD->hTLGDLock);	
+	
+	/* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */
+	if (bDestroyStream)
+	{
+		TLStreamDestroy (psStream);
+		psStream = IMG_NULL;
+	}
+	
+	/* Clean up the descriptor structure */
+
+	// Close and free the event handle resource used by this descriptor
+	eError = OSEventObjectClose(psSD->hDataEvent);
+	if (eError != PVRSRV_OK)
+	{
+		// Log error but continue as it seems best
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectClose() failed error %d", eError));
+		eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+	}
+
+	// Free the stream descriptor object
+	OSFREEMEM(psSD);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+		   	   		  IMG_UINT32*	  puiReadOffset,
+		   	   		  IMG_UINT32* 	  puiReadLen)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	TL_GLOBAL_DATA*		psGD = TLGGD();
+	IMG_UINT32		    uiTmpOffset = NO_ACQUIRE;
+	IMG_UINT32  		uiTmpLen = 0;
+	PTL_SNODE			psNode = 0;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psSD);
+
+	// Sanity check, quick exit if there are no streams
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	// Check stream still valid
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* If we are here, the stream will never be made NULL until this context itself 
+	 * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will
+	 * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode)
+	 * when a valid stream descriptor is present (i.e. a client is connected).
+	 * Hence, no checks for stream being NON NULL are required after this. */
+	PVR_ASSERT (psNode->psStream);
+	
+	//PVR_DPF((PVR_DBG_VERBOSE, "TLServerAcquireDataKM evList=%p, evObj=%p", psSD->psNode->hDataEventObj, psSD->hDataEvent));
+
+	/* Check for data in the associated stream buffer, sleep/wait if none */
+	while (((uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, &uiTmpOffset)) == 0) &&
+	       (!(psSD->ui32Flags&PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) )
+	{
+		PVR_DPF((PVR_DBG_VERBOSE, "TLAcquireDataKM sleeping..."));
+
+		// Loop around if EndOfStream (nothing to read) and wait times out,
+		// exit loop if not time out but data is ready for client
+		while (TLStreamEOS(psNode->psStream))
+		{
+			eError = OSEventObjectWaitTimeout(psSD->hDataEvent, NO_DATA_WAIT_PERIOD);
+			if (eError != PVRSRV_OK)
+			{
+				/* Return timeout or other error condition to the caller who
+				 * can choose to call again if desired. We don't block
+				 * Indefinitely as we want the user mode application to have a
+				 * chance to break out and end if it needs to, so we return the
+				 * time out error code. */
+				PVR_DPF_RETURN_RC(eError);
+			}
+		}
+	}
+
+	/* Data available now if we reach here in blocking more or we take the
+	 * values as is in non-blocking mode which might be all zeros. */
+	*puiReadOffset = uiTmpOffset;
+	*puiReadLen = uiTmpLen;
+
+	PVR_DPF((PVR_DBG_VERBOSE, "TLAcquireDataKM return offset=%d, len=%d bytes", *puiReadOffset, *puiReadLen));
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+		 	 		  IMG_UINT32  	  uiReadOffset,
+		 	 		  IMG_UINT32  	  uiReadLen)
+{
+	TL_GLOBAL_DATA*		psGD = TLGGD();
+	PTL_SNODE			psNode = 0;
+
+	PVR_DPF_ENTERED;
+
+	/* Unreferenced in release builds */
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+
+	PVR_ASSERT(psSD);
+
+	// Sanity check, quick exit if there are no streams
+	if (psGD->psHead == NULL)
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+	}
+
+	// Check stream still valid
+	psNode = TLFindStreamNodeByDesc(psSD);
+	if ((psNode == NULL) || (psNode != psSD->psNode))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+	}
+
+	/* Since we have a valid stream descriptor, the stream should not have been
+	 * made NULL by any producer context. */
+	PVR_ASSERT (psNode->psStream);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen));
+
+	// Move read position on to free up space in stream buffer
+	TLStreamAdvanceReadPos(psNode->psStream, uiReadLen);
+
+	PVR_DPF_RETURN_OK;
+}
+
+/*****************************************************************************
+ End of file (tlserver.c)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlstream.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlstream.c
new file mode 100644
index 0000000..3b7dc2a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/common/tlstream.c
@@ -0,0 +1,929 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API implementation.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer API implementation.
+                These functions are provided to driver components.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "pvr_tlcommon.h"
+#include "tlintern.h"
+#include "tlstream.h"
+
+/* To debug buffer utilisation enable this macro here and
+ * define PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c
+ * before the inclusion of pvr_debug.h. Issue pvrtutils 6 on target to see
+ * stream buffer utilisation. */
+//#define TL_BUFFER_UTILIZATION 1
+
+#define EVENT_OBJECT_TIMEOUT_MS 1000
+
+/* Given the state of the buffer it returns a number of bytes that the client
+ * can use for a successful allocation. */
+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead,
+										IMG_UINT32 ui32LWrite, 
+										IMG_UINT32 ui32CBSize,
+						                IMG_UINT32 ui32ReqSizeMin)
+{
+	IMG_UINT32 ui32AvSpace = 0;
+	
+	/* This could be written in fewer lines using the ? operator but it  
+		would not be kind to potential readers of this source at all. */ 
+	if ( ui32LRead > ui32LWrite )                          /* Buffer WRAPPED */
+	{
+		if ( (ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+		{
+			ui32AvSpace =  ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+	}
+	else                                                  /* Normal, no wrap */
+	{
+		if ( (ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+		{
+			ui32AvSpace =  ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+		else if ( (ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+		{
+			ui32AvSpace =  ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+		}
+	}
+    /* The max size of a TL packet currently is UINT16. adjust accordingly */
+	return MIN(ui32AvSpace, IMG_UINT16_MAX);
+}
+
+/* Returns bytes left in the buffer. Negative if there is not any.
+ * two 4b aligned values are reserved, one for the write failed buffer flag
+ * and one to be able to distinguish the buffer full state to the buffer
+ * empty state.
+ * Always returns free space -8 even when the "write failed" packet may be
+ * already in the stream before this write. */
+static INLINE IMG_INT
+cbSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+	/* We need to reserve 4b (one packet) in the buffer to be able to tell empty 
+	 * buffers from full buffers and one more for packet write fail packet */
+	if ( ui32Read > ui32Write )
+	{
+		return (IMG_INT) ui32Read - (IMG_INT)ui32Write - (IMG_INT) BUFFER_RESERVED_SPACE;
+	}
+	else
+	{
+		return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT) BUFFER_RESERVED_SPACE;
+	}
+}   
+
+/******************************************************************************* 
+ * TL Server public API implementation.
+ ******************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+			   IMG_CHAR *szStreamName,
+			   IMG_UINT32 ui32Size,
+			   IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_SOURCECB pfProducerCB,
+               IMG_PVOID pvProducerUD)
+{
+	PTL_STREAM     psTmp;
+	PVRSRV_ERROR   eError;
+	IMG_HANDLE     hEventList;
+	PTL_SNODE      psn = 0;
+	IMG_CHAR       pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE+20];
+
+	DEVMEM_FLAGS_T uiMemFlags =  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | 
+								 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								 PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | /* CPU only */
+								 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PVR_DPF_ENTERED;
+	/* Sanity checks:  */
+	/* non NULL handler required */
+	if ( NULL == phStream ) 
+	{ 
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	if (OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE) 
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName()
+	 * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+	
+	/* Check if there already exists a stream with this name. */
+	psn = TLFindStreamNodeByName( szStreamName );
+	if ( IMG_NULL != psn )
+	{
+		eError = PVRSRV_ERROR_ALREADY_EXISTS;
+		goto e0;
+	}
+	
+	/* Allocate stream structure container (stream struct) for the new stream */
+	psTmp = OSAllocZMem(sizeof(TL_STREAM)) ;
+	if ( NULL == psTmp ) 
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	OSStringCopy(psTmp->szName, szStreamName);
+
+	if ( ui32StreamFlags & TL_FLAG_FORCE_FLUSH )
+	{
+		psTmp->bWaitForEmptyOnDestroy = IMG_TRUE;
+	}
+
+	psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ?  IMG_TRUE : IMG_FALSE;
+
+	if ( ui32StreamFlags & TL_FLAG_DROP_DATA ) 
+	{
+		if ( ui32StreamFlags & TL_FLAG_BLOCKING_RESERVE ) 
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto e1;
+		}
+		psTmp->bDrop = IMG_TRUE;
+	}
+	else if ( ui32StreamFlags & TL_FLAG_BLOCKING_RESERVE ) 
+    {	/* Additional synchronization object required for this kind of stream */
+        psTmp->bBlock = IMG_TRUE;
+
+		eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj);
+		if (eError != PVRSRV_OK)
+		{
+			goto e1;
+		}
+		/* Create an event handle for this kind of stream */
+		eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent);
+		if (eError != PVRSRV_OK)
+		{
+			goto e2;
+		}
+    }
+
+	/* Remember producer supplied CB and data for later */
+	psTmp->pfProducerCallback = (IMG_VOID(*)(IMG_VOID))pfProducerCB;
+	psTmp->pvProducerUserData = pvProducerUD;
+
+	/* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */
+	psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size);
+	psTmp->ui32Read = 0;
+	psTmp->ui32Write = 0;
+	psTmp->ui32Pending = NOTHING_PENDING;
+
+	OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", szStreamName);
+
+	/* Allocate memory for the circular buffer and export it to user space. */
+	eError = DevmemAllocateExportable( IMG_NULL,
+									   (IMG_HANDLE) TLGetGlobalRgxDevice(),
+									   (IMG_DEVMEM_SIZE_T)psTmp->ui32Size,
+									   (IMG_DEVMEM_ALIGN_T) OSGetPageSize(),
+									   uiMemFlags | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+									   pszBufferLabel,
+									   &psTmp->psStreamMemDesc);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e3);
+
+	eError = DevmemAcquireCpuVirtAddr( psTmp->psStreamMemDesc, (IMG_VOID**) &psTmp->pbyBuffer );
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+	eError = DevmemExport(psTmp->psStreamMemDesc, &(psTmp->sExportCookie));
+	PVR_LOGG_IF_ERROR(eError, "DevmemExport", e5);
+
+	/* Synchronization object to synchronize with user side data transfers. */
+	eError = OSEventObjectCreate(psTmp->szName, &hEventList);
+	if (eError != PVRSRV_OK)
+	{
+		goto e6;
+	}
+
+	eError = OSLockCreate (&psTmp->hStreamLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e7;
+	}
+
+	/* Now remember the stream in the global TL structures */
+	psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, 0);
+	if (psn == NULL)
+	{
+		eError=PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e8;
+	}
+
+	/* Stream node created, now reset the write reference count to 1
+	 * (i.e. this context's reference) */
+	psn->uiWRefCount = 1;
+
+	TLAddStreamNode(psn);
+
+	/* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	/* Best effort signal, client wait timeout will ultimately let it find the
+	 * new stream if this fails, acceptable to avoid cleanup as it is tricky
+	 * at this point */
+	(void) OSEventObjectSignal(TLGGD()->hTLEventObj);
+
+	/* Pass the newly created stream handle back to caller */
+	*phStream = (IMG_HANDLE)psTmp;
+	PVR_DPF_RETURN_OK;
+
+e8:
+	OSLockDestroy(psTmp->hStreamLock);
+e7:
+	OSEventObjectDestroy(hEventList);
+e6:
+	DevmemUnexport(psTmp->psStreamMemDesc, &(psTmp->sExportCookie));
+e5:
+	DevmemReleaseCpuVirtAddr( psTmp->psStreamMemDesc );
+e4:
+	DevmemFree(psTmp->psStreamMemDesc);
+e3:
+	OSEventObjectClose(psTmp->hProducerEvent);
+e2:
+	OSEventObjectDestroy(psTmp->hProducerEventObj);
+e1:
+	OSFREEMEM(psTmp);
+e0:
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+             IMG_CHAR   *szStreamName)
+{
+ 	PTL_SNODE  psTmpSNode;
+
+	PVR_DPF_ENTERED;
+
+	if ( IMG_NULL == phStream || IMG_NULL == szStreamName )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	/* Acquire the TL_GLOBAL_DATA lock first to ensure,
+	 * the TL_STREAM while returned and being modified,
+	 * is not deleted by some other context */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+	
+	/* Search for a stream node with a matching stream name */
+	psTmpSNode = TLFindStreamNodeByName(szStreamName);
+
+	if ( IMG_NULL == psTmpSNode )
+	{
+		OSLockRelease (TLGGD()->hTLGDLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND);
+	}
+
+	/* The TL_SNODE->uiWRefCount governs the presence of this node in the
+	 * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing
+	 * this node from the TL_GLOBAL_DATA list. Hence, is protected using the
+	 * TL_GLOBAL_DATA lock and not TL_STREAM lock */
+	psTmpSNode->uiWRefCount++;
+	
+	OSLockRelease (TLGGD()->hTLGDLock);
+
+	/* Return the stream handle to the caller */
+	*phStream = (IMG_HANDLE)psTmpSNode->psStream;
+
+	PVR_DPF_RETURN_VAL(PVRSRV_OK);
+}
+
+IMG_VOID 
+TLStreamClose(IMG_HANDLE hStream)
+{
+	PTL_STREAM	psTmp;
+	IMG_BOOL	bDestroyStream;
+
+	PVR_DPF_ENTERED;
+
+	if ( IMG_NULL == hStream )
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+				 "TLStreamClose failed as NULL stream handler passed, nothing done.\n"));
+		PVR_DPF_RETURN;
+	}
+
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required 
+	 * in-case this TL_STREAM node is to be deleted */
+	OSLockAcquire (TLGGD()->hTLGDLock);
+	
+	/* Decrement write reference counter of the stream */
+	psTmp->psNode->uiWRefCount--;
+
+	if ( 0 != psTmp->psNode->uiWRefCount )
+	{	/* The stream is still being used in other context(s) do not destroy anything */
+		OSLockRelease (TLGGD()->hTLGDLock);
+		PVR_DPF_RETURN;
+	}
+	else
+	{
+		/* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */
+
+		if ( psTmp->bWaitForEmptyOnDestroy == IMG_TRUE )
+		{
+			/* We won't require the TL_STREAM lock to be acquired here for accessing its read
+			 * and write offsets. REASON: We are here because there is no producer context
+			 * referencing this TL_STREAM, hence its ui32Write offset won't be changed now.
+			 * Also, the updation of ui32Read offset is not protected by locks */
+			while (psTmp->ui32Read != psTmp->ui32Write)
+			{
+				/* Release lock before sleeping */
+				OSLockRelease (TLGGD()->hTLGDLock);
+				
+				OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_MS);
+				
+				OSLockAcquire (TLGGD()->hTLGDLock);
+
+				/* Ensure destruction of stream is still required */
+				if (0 != psTmp->psNode->uiWRefCount)
+				{
+					OSLockRelease (TLGGD()->hTLGDLock);
+					PVR_DPF_RETURN;
+				}
+			}
+		}
+
+		/* Try removing the stream from TL_GLOBAL_DATA */
+		bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode);
+		
+		OSLockRelease (TLGGD()->hTLGDLock);
+		
+		if (bDestroyStream)
+		{
+			/* Destroy the stream if it was removed from TL_GLOBAL_DATA */
+			TLStreamDestroy (psTmp);
+			psTmp = IMG_NULL;
+		}
+		PVR_DPF_RETURN;
+	}
+}
+
+static PVRSRV_ERROR
+DoTLStreamReserve(IMG_HANDLE hStream,
+				IMG_UINT8 **ppui8Data, 
+				IMG_UINT32 ui32ReqSize,
+                IMG_UINT32 ui32ReqSizeMin,
+				PVRSRVTL_PACKETTYPE ePacketType,
+				IMG_UINT32* pui32AvSpace)
+{
+	PTL_STREAM psTmp;
+	IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual;
+	IMG_INT pad, iFreeSpace;
+
+	PVR_DPF_ENTERED;
+	if (pui32AvSpace) *pui32AvSpace = 0;
+
+	if (( IMG_NULL == hStream ))
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Assert used as the packet type parameter is currently only provided
+	 * by the TL APIs, not the calling client */
+	PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType));
+
+	/* The buffer is only used in "rounded" (aligned) chunks */
+	lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize);
+
+	/* Lock the stream before reading it's pending value, because if pending is set
+	 * to NOTHING_PENDING, we update the pending value such that subsequent calls to
+	 * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */
+	OSLockAcquire (psTmp->hStreamLock);
+
+	/* Get a local copy of the stream buffer parameters */
+	ui32LRead  = psTmp->ui32Read ;
+	ui32LWrite = psTmp->ui32Write ;
+	ui32LPending = psTmp->ui32Pending ;
+
+	/*  Multiple pending reserves are not supported. */
+	if ( NOTHING_PENDING != ui32LPending )
+	{
+		OSLockRelease (psTmp->hStreamLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+	}
+
+	if ( IMG_UINT16_MAX < lReqSizeAligned )
+	{
+		psTmp->ui32Pending = NOTHING_PENDING;
+		if (pui32AvSpace)
+		{
+			*pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin);
+		}
+		OSLockRelease (psTmp->hStreamLock);
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL);
+	}
+
+	/* Prevent other threads from entering this region before we are done updating
+	 * the pending value and write offset (incase of padding). This is not exactly 
+	 * a lock but a signal for other contexts that there is a TLStreamCommit operation
+	 * pending on this stream */
+	psTmp->ui32Pending = 0;
+
+	OSLockRelease (psTmp->hStreamLock);
+
+	/* If there is enough contiguous space following the current Write
+	 * position then no padding is required */
+	if (  psTmp->ui32Size
+		< ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) )
+	{
+		pad = psTmp->ui32Size - ui32LWrite;
+	}
+	else
+	{
+		pad = 0 ;
+	}
+
+	lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad ;
+	/* If this is a blocking reserve and there is not enough space then wait. */
+	if( psTmp->bBlock )
+	{
+		if( psTmp->ui32Size < lReqSizeActual )
+		{
+			/* Acquire stream lock for updating pending value */
+			OSLockAcquire (psTmp->hStreamLock);
+			psTmp->ui32Pending = NOTHING_PENDING;
+			OSLockRelease (psTmp->hStreamLock);
+			
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+		}
+		while ( ( cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+		         <(IMG_INT) lReqSizeActual ) )
+		{
+			OSEventObjectWait(psTmp->hProducerEvent);
+			// update local copies.
+			ui32LRead  = psTmp->ui32Read ;
+			ui32LWrite = psTmp->ui32Write ;
+		}
+	}
+
+	iFreeSpace = cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+	
+	/* The easy case: buffer has enough space to hold the requested packet (data + header) */
+	if (  iFreeSpace >=(IMG_INT) lReqSizeActual )
+	{
+		if ( pad ) 
+		{ 
+			/* Inserting padding packet. */
+			pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+			*pui32Buf = PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR)) ;
+
+			/* CAUTION: the used pad value should always result in a properly 
+			 *          aligned ui32LWrite pointer, which in this case is 0 */
+			ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size;
+			/* Detect unaligned pad value */
+			PVR_ASSERT( ui32LWrite == 0);
+		}
+		/* Insert size-stamped packet header */
+		pui32Buf = (IMG_UINT32*) &psTmp->pbyBuffer[ui32LWrite];
+
+		*pui32Buf = PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType);
+
+		/* return the next position in the buffer to the user */
+		*ppui8Data =  &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ] ;
+
+		/* update pending offset: size stamp + data  */
+		ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) ;
+	}
+	/* The not so easy case: not enough space, decide how to handle data */
+	else
+	{
+
+#if defined(DEBUG)
+		/* Sanity check that the user is not trying to add more data than the
+		 * buffer size. Conditionally compile it out to ensure this check has
+		 * no impact to release performance */
+		if ( lReqSizeAligned+sizeof(PVRSRVTL_PACKETHDR) > psTmp->ui32Size )
+		{
+			OSLockAcquire (psTmp->hStreamLock);
+			psTmp->ui32Pending = NOTHING_PENDING;
+			OSLockRelease (psTmp->hStreamLock);
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+		}
+#endif
+
+		/* No data overwriting, insert write_failed flag and return */
+		if (psTmp->bDrop) 
+		{
+			/* Caller should not try to use ppui8Data,
+			 * NULLify to give user a chance of avoiding memory corruption */
+			ppui8Data = IMG_NULL;
+
+			/* This flag should not be inserted two consecutive times, so 
+			 * check the last ui32 in case it was a packet drop packet. */
+			pui32Buf =  ui32LWrite 
+					  ? 
+					    (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)]
+					   : // Previous four bytes are not guaranteed to be a packet header...
+					    (IMG_UINT32*)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT];
+
+			if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED
+				 != 
+				 GET_PACKET_TYPE( (PVRSRVTL_PACKETHDR*)pui32Buf ) )
+			{
+				/* Insert size-stamped packet header */
+				pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+				*pui32Buf = PVRSRVTL_SET_PACKET_WRITE_FAILED ;
+				ui32LWrite += sizeof(PVRSRVTL_PACKETHDR);
+				iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR);
+			}
+
+			OSLockAcquire (psTmp->hStreamLock);
+			psTmp->ui32Write = ui32LWrite;
+			psTmp->ui32Pending = NOTHING_PENDING;
+			OSLockRelease (psTmp->hStreamLock);
+			
+			if (pui32AvSpace)
+			{
+				*pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin);
+			}
+			PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL);
+		} 
+	}
+
+	/* Acquire stream lock for updating stream parameters */
+	OSLockAcquire (psTmp->hStreamLock);
+	psTmp->ui32Write = ui32LWrite ;
+	psTmp->ui32Pending = ui32LPending ;
+	OSLockRelease (psTmp->hStreamLock);
+
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+				IMG_UINT8 **ppui8Data,
+				IMG_UINT32 ui32Size)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available)
+{
+	return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available);
+}
+
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize)
+{
+	PTL_STREAM psTmp;
+	IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	if ( IMG_NULL == hStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)hStream;
+
+	/* Get a local copy of the stream buffer parameters */
+	ui32LRead = psTmp->ui32Read ;
+	ui32LWrite = psTmp->ui32Write ;
+	ui32LPending = psTmp->ui32Pending ;
+
+	ui32OldWrite = ui32LWrite;
+
+	// Space in buffer is aligned
+	ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize);
+
+	/* Sanity check. ReqSize + packet header size. */
+	if ( ui32LPending != ui32ReqSize + sizeof(PVRSRVTL_PACKETHDR) )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+	}
+
+	/* Update pointer to written data. */
+	ui32LWrite = (ui32LWrite + ui32LPending) % psTmp->ui32Size;
+
+	/* and reset LPending to 0 since data are now submitted  */
+	ui32LPending = NOTHING_PENDING;
+
+	/* Calculate high water mark for debug purposes */
+#if defined(TL_BUFFER_UTILIZATION)
+	{
+		IMG_UINT32 tmp = 0;
+		if (ui32LWrite > ui32LRead)
+		{
+			tmp = (ui32LWrite-ui32LRead);
+		}
+		else if (ui32LWrite < ui32LRead)
+		{
+			tmp = (psTmp->ui32Size-ui32LRead+ui32LWrite);
+		} /* else equal, ignore */
+
+		if (tmp > psTmp->ui32BufferUt)
+		{
+			psTmp->ui32BufferUt = tmp;
+		}
+	}
+#endif
+
+	/* Acquire stream lock to ensure other context(s) (if any)
+	 * wait on the lock (in DoTLStreamReserve) for consistent values
+	 * of write offset and pending value */
+	OSLockAcquire (psTmp->hStreamLock);
+
+	/* Update stream buffer parameters to match local copies */
+	psTmp->ui32Write = ui32LWrite ;
+	psTmp->ui32Pending = ui32LPending ;
+
+	OSLockRelease (psTmp->hStreamLock);
+
+	/* If  we have transitioned from an empty buffer to a non-empty buffer,
+	 * signal any consumers that may be waiting */
+	if (ui32OldWrite == ui32LRead && !psTmp->bNoSignalOnCommit)
+	{
+		/* Signal consumers that may be waiting */
+		eError = OSEventObjectSignal(psTmp->psNode->hDataEventObj);
+		if ( eError != PVRSRV_OK)
+		{
+			PVR_DPF_RETURN_RC(eError);
+		}
+	}
+	
+	PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size)
+{
+	IMG_BYTE *pbyDest = IMG_NULL;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	if ( IMG_NULL == hStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eError = TLStreamReserve(hStream, &pbyDest, ui32Size);
+	if ( PVRSRV_OK != eError ) 
+	{	
+		PVR_DPF_RETURN_RC(eError);
+	}
+	else
+	{
+		PVR_ASSERT ( pbyDest != NULL );
+		OSMemCopy((IMG_VOID*)pbyDest, (IMG_VOID*)pui8Src, ui32Size);
+		eError = TLStreamCommit(hStream, ui32Size);
+		if ( PVRSRV_OK != eError ) 
+		{	
+			PVR_DPF_RETURN_RC(eError);
+		}
+	}
+	PVR_DPF_RETURN_OK;
+}
+
+IMG_VOID TLStreamInfo(PTL_STREAM_INFO psInfo)
+{
+ 	IMG_DEVMEM_SIZE_T actual_req_size;
+	IMG_DEVMEM_ALIGN_T align = 4; /* Low dummy value so the real value can be obtained */
+
+ 	actual_req_size = 2; 
+	DevmemExportalignAdjustSizeAndAlign(IMG_NULL, &actual_req_size, &align);
+
+	psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR);
+	psInfo->minReservationSize = sizeof(IMG_UINT32);
+	psInfo->pageSize = (IMG_UINT32)(actual_req_size);
+	psInfo->pageAlign = (IMG_UINT32)(align);
+}
+
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE psStream)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT8* pData;
+
+	PVR_DPF_ENTERED;
+
+	if ( IMG_NULL == psStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+
+	eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL);
+	if ( PVRSRV_OK !=  eError )
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0));
+}
+
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE psStream)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PTL_STREAM   psTmp;
+
+	PVR_DPF_ENTERED;
+
+	if ( IMG_NULL == psStream )
+	{
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+	}
+	psTmp = (PTL_STREAM)psStream;
+	
+	/* Signal clients only when data is available to read */
+	if (psTmp->ui32Read != psTmp->ui32Write)
+	{
+		eError = OSEventObjectSignal(psTmp->psNode->hDataEventObj);
+	}
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+/*
+ * Internal stream APIs to server part of Transport Layer, declared in
+ * header tlintern.h. Direct pointers to stream objects are used here as
+ * these functions are internal.
+ */
+IMG_UINT32
+TLStreamAcquireReadPos(PTL_STREAM psStream, IMG_UINT32* puiReadOffset)
+{
+	IMG_UINT32 uiReadLen = 0;
+	IMG_UINT32 ui32LRead, ui32LWrite;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+	PVR_ASSERT(puiReadOffset);
+
+	/* Grab a local copy */
+	ui32LRead = psStream->ui32Read;
+	ui32LWrite = psStream->ui32Write;
+
+	/* No data available and CB defined - try and get data */
+	if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback)
+	{
+		PVRSRV_ERROR eRc;
+		IMG_UINT32   ui32Resp = 0;
+
+		eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS,
+				&ui32Resp, psStream->pvProducerUserData);
+		PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback");
+
+		ui32LWrite = psStream->ui32Write;
+	}
+
+	/* No data available... */
+	if (ui32LRead == ui32LWrite)
+	{
+		PVR_DPF_RETURN_VAL(0);
+	}
+
+	/* Data is available to read... */
+	*puiReadOffset = ui32LRead;
+
+	/*PVR_DPF((PVR_DBG_VERBOSE,
+	 *		"TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d",
+	 *		ui32LWrite, ui32LRead, psStream->ui32Size));
+	 */
+
+	if ( ui32LRead > ui32LWrite )
+	{	/* CB has wrapped around. 
+		 * Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer]
+		 * and let a subsequent AcquireReadPos read the rest of the Buffer */
+		/*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/
+		uiReadLen = psStream->ui32Size - ui32LRead;
+	}
+	else
+	{	// CB has not wrapped
+		uiReadLen = ui32LWrite - ui32LRead;
+	}
+
+	PVR_DPF_RETURN_VAL(uiReadLen);
+}
+
+IMG_VOID
+TLStreamAdvanceReadPos(PTL_STREAM psStream, IMG_UINT32 uiReadLen)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	/* Update the read offset by the length provided in a circular manner.
+	 * Assuming the updation to be atomic hence, avoiding use of locks */
+	psStream->ui32Read = (psStream->ui32Read + uiReadLen) % psStream->ui32Size;
+
+	/* If this is a blocking reserve stream, 
+	 * notify reserves that may be pending */
+	if(psStream->bBlock)
+	{
+		PVRSRV_ERROR eError;
+		eError = OSEventObjectSignal(psStream->hProducerEventObj);
+		if ( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING,
+					 "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u",
+					 eError));
+		}
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE,
+			 "TLStreamAdvanceReadPos Read now at: %d",
+			psStream->ui32Read));
+	PVR_DPF_RETURN;
+}
+
+IMG_VOID
+TLStreamDestroy (PTL_STREAM psStream)
+{
+	PVR_ASSERT (psStream);
+	
+	OSLockDestroy (psStream->hStreamLock);
+
+	/* If block-while-reserve stream, the stream's hProducerEvent and hProducerEventObj
+	 * need to be cleaned as well */
+	if ( IMG_TRUE == psStream->bBlock ) 
+	{
+		OSEventObjectClose(psStream->hProducerEvent);
+		OSEventObjectDestroy(psStream->hProducerEventObj);
+	}
+
+	DevmemUnexport(psStream->psStreamMemDesc, &psStream->sExportCookie);
+	DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc);
+	DevmemFree(psStream->psStreamMemDesc);
+	OSFREEMEM(psStream);
+}
+
+DEVMEM_EXPORTCOOKIE*
+TLStreamGetBufferCookie(PTL_STREAM psStream)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	PVR_DPF_RETURN_VAL(&psStream->sExportCookie);
+}
+
+IMG_BOOL
+TLStreamEOS(PTL_STREAM psStream)
+{
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psStream);
+
+	/* If both pointers are equal then the buffer is empty */
+	PVR_DPF_RETURN_VAL( psStream->ui32Read == psStream->ui32Write );
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/debugmisc_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/debugmisc_server.c
new file mode 100644
index 0000000..973b9bc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/debugmisc_server.c
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "debugmisc_server.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "pdump_km.h"
+#include "mmu_common.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  uiFlags,
+	IMG_BOOL bSetBypassed)
+{
+	RGXFWIF_KCCB_CMD  sSLCBPCtlCmd;
+	PVRSRV_ERROR  eError = PVRSRV_OK;
+
+	sSLCBPCtlCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCBPCTL;
+	sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.bSetBypassed = bSetBypassed;
+	sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.uiFlags = uiFlags;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+	                            RGXFWIF_DM_GP,
+	                            &sSLCBPCtlCmd,
+	                            sizeof(sSLCBPCtlCmd),
+	                            IMG_TRUE);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscSLCSetEnableStateKM: RGXScheduleCommandfailed. Error:%u", eError));
+	}
+	else
+	{
+		/* Wait for the SLC flush to complete */
+		eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVDebugMiscSLCSetEnableStateKM: Waiting for value aborted with error (%u)", eError));
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32RGXFWLogType)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+
+	/* check log type is valid */
+	if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* set the new log type */
+	psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32RGXFWLogType;
+
+	return PVRSRV_OK;
+
+}
+
+static IMG_BOOL
+_RGXDumpFreeListPageList(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+	RGXDumpFreeListPageList(psFreeList);
+
+	return IMG_TRUE;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+	PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+
+	if (dllist_is_empty(&psDevInfo->sFreeListHead))
+	{
+		return PVRSRV_OK;
+	}
+
+	PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------"));
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_foreach_node(&psDevInfo->sFreeListHead, _RGXDumpFreeListPageList, IMG_NULL);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------"));
+
+	return PVRSRV_OK;
+
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/debugmisc_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/debugmisc_server.h
new file mode 100644
index 0000000..aa2caa1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/debugmisc_server.h
@@ -0,0 +1,75 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debugging and miscellaneous functions server interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Kernel services functions for debugging and other
+                miscellaneous functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined(DEBUGMISC_SERVER_H)
+#define DEBUGMISC_SERVER_H
+
+#include <img_defs.h>
+#include <pvrsrv_error.h>
+#include <device.h>
+#include <pmr.h>
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  uiFlags,
+	IMG_BOOL  bSetBypassed);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDebugMiscInitFWImageKM(
+	PMR *psFWImgDestPMR,
+	PMR *psFWImgSrcPMR,
+	IMG_UINT64 ui64FWImgLen,
+	PMR *psFWImgSigPMR,
+	IMG_UINT64 ui64FWSigLen);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+	PVRSRV_DEVICE_NODE *psDeviceNode,
+	IMG_UINT32  ui32RGXFWLogType);
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxbreakpoint.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxbreakpoint.c
new file mode 100644
index 0000000..b4e3d7a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxbreakpoint.c
@@ -0,0 +1,269 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Breakpoint routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Breakpoint routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbreakpoint.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData,
+					RGXFWIF_DM		eFWDataMaster,
+					IMG_UINT32		ui32BPAddr,
+					IMG_UINT32		ui32HandlerAddr,
+					IMG_UINT32		ui32DataMaster)
+{
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+	
+	if (psDeviceNode->psDevConfig->bBPSet == IMG_TRUE)
+		return PVRSRV_ERROR_BP_ALREADY_SET;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
+	sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
+	sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE;
+
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+		
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				eFWDataMaster,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, eFWDataMaster, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXSetBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+
+	psDeviceNode->psDevConfig->eBPDM = eFWDataMaster;
+	psDeviceNode->psDevConfig->bBPSet = IMG_TRUE;
+	
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData)
+{
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+	RGXFWIF_DM			eDataMaster = psDeviceNode->psDevConfig->eBPDM;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32BPAddr = 0;
+	sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL;
+	
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				eDataMaster,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, eDataMaster, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXClearBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+
+	psDeviceNode->psDevConfig->bBPSet = IMG_FALSE;
+	
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData)
+{
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+	RGXFWIF_DM			eDataMaster = psDeviceNode->psDevConfig->eBPDM;
+	
+	if (psDeviceNode->psDevConfig->bBPSet == IMG_FALSE)
+		return PVRSRV_ERROR_BP_NOT_SET;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+	
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				eDataMaster,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, eDataMaster, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXEnableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+	
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData)
+{
+	DEVMEM_MEMDESC		*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+	RGXFWIF_DM			eDataMaster = psDeviceNode->psDevConfig->eBPDM;
+	
+	if (psDeviceNode->psDevConfig->bBPSet == IMG_FALSE)
+		return PVRSRV_ERROR_BP_NOT_SET;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+	
+	RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, 
+				psFWMemContextMemDesc, 
+				0 , 
+				RFW_FWADDR_NOREF_FLAG);
+	
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				eDataMaster,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, eDataMaster, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDisableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+				
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_UINT32		ui32TempRegs,
+					IMG_UINT32		ui32SharedRegs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sBPCmd;
+	
+	sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+	sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_REGS;
+	sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs;
+	sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sBPCmd,
+				sizeof(sBPCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXOverallocateBPRegistersKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXOverallocateBPRegistersKM: Wait for completion aborted with error (%u)", eError));
+		return eError;
+	}
+
+	return eError;
+}
+
+
+/******************************************************************************
+ End of file (rgxbreakpoint.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxbreakpoint.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxbreakpoint.h
new file mode 100644
index 0000000..44792d6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxbreakpoint.h
@@ -0,0 +1,136 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX breakpoint functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX breakpoint functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBREAKPOINT_H__)
+#define __RGXBREAKPOINT_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetBreakpointKM
+
+ @Description
+	Server-side implementation of RGXSetBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input eDataMaster - Data Master to schedule command for
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32BPAddr - Address of breakpoint
+ @Input ui32HandlerAddr - Address of breakpoint handler
+ @Input ui32BPCtl - Breakpoint controls
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData,
+					RGXFWIF_DM		eFWDataMaster,
+					IMG_UINT32		ui32BPAddr,
+					IMG_UINT32		ui32HandlerAddr,
+					IMG_UINT32		ui32DataMaster);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXClearBreakpointKM
+
+ @Description
+	Server-side implementation of RGXClearBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXEnableBreakpointKM
+
+ @Description
+	Server-side implementation of RGXEnableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDisableBreakpointKM
+
+ @Description
+	Server-side implementation of RGXDisableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_HANDLE		hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXOverallocateBPRegistersKM
+
+ @Description
+	Server-side implementation of RGXOverallocateBPRegisters
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui32TempRegs - Number of temporary registers to overallocate
+ @Input ui32SharedRegs - Number of shared registers to overallocate
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/					
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_UINT32		ui32TempRegs,
+					IMG_UINT32		ui32SharedRegs);
+#endif /* __RGXBREAKPOINT_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxccb.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxccb.c
new file mode 100644
index 0000000..2fb0085
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxccb.c
@@ -0,0 +1,1560 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX CCB routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX CCB routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgx_memallocflags.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "rgx_fwif_shared.h"
+#include "rgxtimerquery.h"
+#if defined(LINUX)
+#include "trace_events.h"
+#endif
+
+/*
+ *  Defines the number of fence updates to record so that future fences in the CCB
+ *  can be checked to see if they are already known to be satisfied. The value has
+ *  implications for memory and host CPU usage and so should be tuned by using
+ *  firmware performance measurements to trade these off against performance gains.
+ *
+ *  Must be a power of 2!
+ */
+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE  (64)
+
+
+struct _RGX_CLIENT_CCB_ {
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl;			/*!< CPU mapping of the CCB control structure used by the fw */
+	IMG_UINT8					*pui8ClientCCB;				/*!< CPU mapping of the CCB */
+	DEVMEM_MEMDESC 				*psClientCCBMemDesc;		/*!< MemDesc for the CCB */
+	DEVMEM_MEMDESC 				*psClientCCBCtrlMemDesc;		/*!< MemDesc for the CCB control */
+	IMG_UINT32					ui32HostWriteOffset;		/*!< CCB write offset from the driver side */
+	IMG_UINT32					ui32LastPDumpWriteOffset;			/*!< CCB write offset from the last time we submitted a command in capture range */
+	IMG_UINT32					ui32LastROff;				/*!< Last CCB Read offset to help detect any CCB wedge */
+	IMG_UINT32					ui32LastWOff;				/*!< Last CCB Write offset to help detect any CCB wedge */
+	IMG_UINT32					ui32ByteCount;				/*!< Count of the number of bytes written to CCCB */
+	IMG_UINT32					ui32LastByteCount;			/*!< Last value of ui32ByteCount to help detect any CCB wedge */
+	IMG_UINT32					ui32Size;					/*!< Size of the CCB */
+	DLLIST_NODE					sNode;						/*!< Node used to store this CCB on the per connection list */
+	PDUMP_CONNECTION_DATA		*psPDumpConnectionData;		/*!< Pointer to the per connection data in which we reside */
+	IMG_PVOID					hTransition;				/*!< Handle for Transition callback */
+	IMG_CHAR					szName[MAX_CLIENT_CCB_NAME];/*!< Name of this client CCB */
+	RGX_SERVER_COMMON_CONTEXT   *psServerCommonContext;     /*!< Parent server common context that this CCB belongs to */
+#if defined REDUNDANT_SYNCS_DEBUG
+	IMG_UINT32					ui32UpdateWriteIndex;		/*!< Next position to overwrite in Fence Update List */
+	RGXFWIF_UFO					asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE];  /*!< Cache of recent updates written in this CCB */
+#endif
+};
+
+static PVRSRV_ERROR _RGXCCBPDumpTransition(IMG_PVOID *pvData, IMG_BOOL bInto, IMG_BOOL bContinuous)
+{
+	RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
+	
+	IMG_UINT32 ui32PDumpFlags = bContinuous ? PDUMP_FLAGS_CONTINUOUS:0;
+
+	/*
+		We're about to Transition into capture range and we've submitted
+		new commands since the last time we entered capture range so drain
+		the CCB as required
+	*/
+	if (bInto)
+	{
+		volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
+		PVRSRV_ERROR eError;
+
+		/*
+			Wait for the FW to catch up (retry will get pushed back out services
+			client where we wait on the event object and try again later)
+		*/
+		if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
+		{
+			return PVRSRV_ERROR_RETRY;
+		}
+
+		/*
+			We drain whenever capture range is entered. Even if no commands
+			have been issued while where out of capture range we have to wait for
+			operations that we might have issued in the last capture range
+			to finish so the sync prim update that will happen after all the
+			PDumpTransition callbacks have been called doesn't clobber syncs
+			which the FW is currently working on.
+			Although this is suboptimal, while out of capture range for every
+			persistent operation we serialise the PDump script processing and
+			the FW, there is no easy solution.
+			Not all modules that work on syncs register a PDumpTransition and
+			thus we have no way of knowing if we can skip drain and the sync
+			prim dump or not.
+		*/
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+							  "cCCB(%s@%p): Draining rgxfw_roff == woff (%d)",
+							  psClientCCB->szName,
+							  psClientCCB,
+							  psClientCCB->ui32LastPDumpWriteOffset);
+
+		eError = DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
+										offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+										psClientCCB->ui32LastPDumpWriteOffset,
+										0xffffffff,
+										PDUMP_POLL_OPERATOR_EQUAL,
+										ui32PDumpFlags);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "_RGXCCBPDumpTransition: problem pdumping POL for cCCBCtl (%d)", eError));
+		}
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		/*
+			If new command(s) have been written out of capture range then we
+			need to fast forward past uncaptured operations.
+		*/
+		if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset)
+		{
+			/*
+				There are commands that where not captured so after the
+				simulation drain (above) we also need to fast-forward pass
+				those commands so the FW can start with the 1st command
+				which is in the new capture range
+			 */
+			psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+			psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+			psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
+	
+			PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+								  "cCCB(%s@%p): Fast-forward from %d to %d",
+								  psClientCCB->szName,
+								  psClientCCB,
+								  psClientCCB->ui32LastPDumpWriteOffset,
+								  psClientCCB->ui32HostWriteOffset);
+	
+			DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+							   0,
+							   sizeof(RGXFWIF_CCCB_CTL),
+							   ui32PDumpFlags);
+							   
+			/*
+				Although we've entered capture range we might not do any work
+				on this CCB so update the ui32LastPDumpWriteOffset to reflect
+				where we got to for next so we start the drain from where we
+				got to last time
+			*/
+			psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+		}
+	}
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_DEVICE_NODE	*psDeviceNode,
+						  IMG_UINT32			ui32CCBSizeLog2,
+						  CONNECTION_DATA		*psConnectionData,
+						  const IMG_CHAR		*pszName,
+						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						  RGX_CLIENT_CCB		**ppsClientCCB,
+						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
+						  DEVMEM_MEMDESC 		**ppsClientCCBCtrlMemDesc)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
+	IMG_UINT32		ui32AllocSize = (1U << ui32CCBSizeLog2);
+	RGX_CLIENT_CCB	*psClientCCB;
+
+	psClientCCB = OSAllocMem(sizeof(*psClientCCB));
+	if (psClientCCB == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+	psClientCCB->psServerCommonContext = psServerCommonContext;
+
+	uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+	                            PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_UNCACHED |
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+	uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+								/* FIXME: Client CCB Ctl should be read-only for the CPU 
+									(it is not because for now we initialize it from the host) */
+								PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | 
+								PVRSRV_MEMALLOCFLAG_UNCACHED |
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+	PDUMPCOMMENT("Allocate RGXFW cCCB");
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										ui32AllocSize,
+										uiClientCCBMemAllocFlags,
+										"FirmwareClientCCB",
+										&psClientCCB->psClientCCBMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_alloc_ccb;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+									  (IMG_VOID **) &psClientCCB->pui8ClientCCB);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_map_ccb;
+	}
+
+	PDUMPCOMMENT("Allocate RGXFW cCCB control");
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										sizeof(RGXFWIF_CCCB_CTL),
+										uiClientCCBCtlMemAllocFlags,
+										"FirmwareClientCCBControl",
+										&psClientCCB->psClientCCBCtrlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_alloc_ccbctrl;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
+									  (IMG_VOID **) &psClientCCB->psClientCCBCtrl);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+				PVRSRVGetErrorStringKM(eError)));
+		goto fail_map_ccbctrl;
+	}
+
+	psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32DepOffset = 0;
+	psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
+	OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s",
+									pszName,
+									(unsigned long) OSGetCurrentProcessID(),
+									(unsigned long) OSGetCurrentThreadID(),
+									OSGetCurrentProcessName());
+
+	PDUMPCOMMENT("cCCB control");
+	DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+					   0,
+					   sizeof(RGXFWIF_CCCB_CTL),
+					   PDUMP_FLAGS_CONTINUOUS);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psClientCCB->ui32HostWriteOffset = 0;
+	psClientCCB->ui32LastPDumpWriteOffset = 0;
+	psClientCCB->ui32Size = ui32AllocSize;
+	psClientCCB->ui32LastROff = ui32AllocSize - 1;
+	psClientCCB->ui32ByteCount = 0;
+	psClientCCB->ui32LastByteCount = 0;
+
+#if defined REDUNDANT_SYNCS_DEBUG
+	psClientCCB->ui32UpdateWriteIndex = 0;
+	OSMemSet(psClientCCB->asFenceUpdateList, 0, sizeof(psClientCCB->asFenceUpdateList));
+#endif
+
+	eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
+											  _RGXCCBPDumpTransition,
+											  psClientCCB,
+											  &psClientCCB->hTransition);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_pdumpreg;
+	}
+
+	/*
+	 * Note:
+	 * Save the PDump specific structure, which is ref counted unlike
+	 * the connection data, to ensure it's not freed too early
+	 */
+	psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
+	PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created",
+				 psClientCCB->szName,
+				 psClientCCB);
+
+	*ppsClientCCB = psClientCCB;
+	*ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+	*ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
+	return PVRSRV_OK;
+
+fail_pdumpreg:
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+fail_map_ccbctrl:
+	DevmemFwFree(psClientCCB->psClientCCBCtrlMemDesc);
+fail_alloc_ccbctrl:
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+fail_map_ccb:
+	DevmemFwFree(psClientCCB->psClientCCBMemDesc);
+fail_alloc_ccb:
+	OSFreeMem(psClientCCB);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_VOID RGXDestroyCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+	PDumpUnregisterTransitionCallback(psClientCCB->hTransition);
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+	DevmemFwFree(psClientCCB->psClientCCBCtrlMemDesc);
+	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+	DevmemFwFree(psClientCCB->psClientCCBMemDesc);
+	OSFreeMem(psClientCCB);
+}
+
+
+static PVRSRV_ERROR _RGXAcquireCCB(RGX_CLIENT_CCB	*psClientCCB,
+								   IMG_UINT32		ui32CmdSize,
+								   IMG_PVOID		*ppvBufferSpace)
+{
+	IMG_UINT32 ui32FreeSpace;
+
+#if defined(PDUMP)
+	/* Wait for sufficient CCB space to become available */
+	PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+							ui32CmdSize, psClientCCB->ui32HostWriteOffset,
+							psClientCCB->szName);
+	DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+	               offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+	               psClientCCB->ui32HostWriteOffset,
+	               ui32CmdSize,
+	               psClientCCB->ui32Size);
+#endif
+
+	ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+								  psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+								  psClientCCB->ui32Size);
+
+	/* Don't allow all the space to be used */
+	if (ui32FreeSpace > ui32CmdSize)
+	{
+		*ppvBufferSpace = (IMG_PVOID) (psClientCCB->pui8ClientCCB +
+									   psClientCCB->ui32HostWriteOffset);
+		return PVRSRV_OK;
+	}
+
+	return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION	: RGXAcquireCCB
+
+ PURPOSE	: Obtains access to write some commands to a CCB
+
+ PARAMETERS	: psClientCCB		- The client CCB
+			  ui32CmdSize		- How much space is required
+			  ppvBufferSpace	- Pointer to space in the buffer
+			  bPDumpContinuous  - Should this be PDump continuous?
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+										IMG_UINT32		ui32CmdSize,
+										IMG_PVOID		*ppvBufferSpace,
+										IMG_BOOL		bPDumpContinuous)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32	ui32PDumpFlags	= bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS : 0;
+	IMG_BOOL	bInCaptureRange;
+	IMG_BOOL	bPdumpEnabled;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	bPdumpEnabled = (bInCaptureRange || bPDumpContinuous);
+
+	/*
+		PDumpSetFrame will detect as we Transition into capture range for
+		frame based data but if we are PDumping continuous data then we
+		need to inform the PDump layer ourselves
+	*/
+	if (bPDumpContinuous && !bInCaptureRange)
+	{
+		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_TRUE, IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	/* Check that the CCB can hold this command + padding */
+	if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)\n",
+								ui32CmdSize, psClientCCB->ui32Size));
+		return PVRSRV_ERROR_CMD_TOO_BIG;
+	}
+
+	/*
+		Check we don't overflow the end of the buffer and make sure we have
+		enough for the padding command.
+	*/
+	if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) >
+		psClientCCB->ui32Size)
+	{
+		RGXFWIF_CCB_CMD_HEADER *psHeader;
+		IMG_VOID *pvHeader;
+		PVRSRV_ERROR eError;
+		IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+		/* We're at the end of the buffer without enough contiguous space */
+		eError = _RGXAcquireCCB(psClientCCB,
+								ui32Remain,
+								&pvHeader);
+		if (eError != PVRSRV_OK)
+		{
+			/*
+				It's possible no commands have been processed in which case as we
+				can fail the padding allocation due to that fact we never allow
+				the client CCB to be full
+			*/
+			return eError;
+		}
+		psHeader = pvHeader;
+		psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+		psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+		if (bPdumpEnabled)
+		{
+			DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+							   psClientCCB->ui32HostWriteOffset,
+							   ui32Remain,
+							   ui32PDumpFlags);
+		}
+				
+		UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+						  ui32Remain,
+						  psClientCCB->ui32Size);
+		psClientCCB->ui32ByteCount += ui32Remain;
+	}
+
+	return _RGXAcquireCCB(psClientCCB,
+						  ui32CmdSize,
+						  ppvBufferSpace);
+}
+
+/******************************************************************************
+ FUNCTION	: RGXReleaseCCB
+
+ PURPOSE	: Release a CCB that we have been writing to.
+
+ PARAMETERS	: psDevData			- device data
+  			  psCCB				- the CCB
+
+ RETURNS	: None
+******************************************************************************/
+IMG_INTERNAL IMG_VOID RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+									IMG_UINT32		ui32CmdSize,
+									IMG_BOOL		bPDumpContinuous)
+{
+	IMG_UINT32	ui32PDumpFlags	= bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS : 0;
+	IMG_BOOL	bInCaptureRange;
+	IMG_BOOL	bPdumpEnabled;
+
+	PDumpIsCaptureFrameKM(&bInCaptureRange);
+	bPdumpEnabled = (bInCaptureRange || bPDumpContinuous);
+
+	/* Dump the CCB data */
+	if (bPdumpEnabled)
+	{
+		DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+						   psClientCCB->ui32HostWriteOffset,
+						   ui32CmdSize,
+						   ui32PDumpFlags);
+	}
+	
+	/*
+	 *  Check if there have been any fences written that will already be
+	 *  satistified by a previously written update in this CCB.
+	 */
+#if defined REDUNDANT_SYNCS_DEBUG
+	{
+		IMG_UINT8  *pui8BufferStart = (IMG_PVOID)((IMG_UINTPTR_T)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+		IMG_UINT8  *pui8BufferEnd   = (IMG_PVOID)((IMG_UINTPTR_T)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset + ui32CmdSize);
+
+		/* Walk through the commands in this section of CCB being released... */
+		while (pui8BufferStart < pui8BufferEnd)
+		{
+			RGXFWIF_CCB_CMD_HEADER  *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8BufferStart;
+
+			if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE) /* don't check for unfenced update. Following comment explain why */
+			{
+				/* If an UPDATE then record the value incase a later fence depends on it. */
+				IMG_UINT32  ui32NumUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				IMG_UINT32  i;
+
+				for (i = 0;  i < ui32NumUpdates;  i++)
+				{
+					RGXFWIF_UFO  *psUFOPtr = ((RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER))) + i;
+					
+					psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateWriteIndex++] = *psUFOPtr;
+					psClientCCB->ui32UpdateWriteIndex &= (RGX_CCCB_FENCE_UPDATE_LIST_SIZE-1);
+				}
+			}
+			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+			{
+				IMG_UINT32  ui32NumFences = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				IMG_UINT32  i;
+				
+				for (i = 0;  i < ui32NumFences;  i++)
+				{
+					RGXFWIF_UFO  *psUFOPtr = ((RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER))) + i;
+					IMG_UINT32  ui32UpdateIndex;
+
+					/* Check recently queued updates to see if this fence will be satisfied by the time it is checked. */
+					for (ui32UpdateIndex = 0;  ui32UpdateIndex < RGX_CCCB_FENCE_UPDATE_LIST_SIZE;  ui32UpdateIndex++)
+					{
+						RGXFWIF_UFO  *psUpdatePtr = &psClientCCB->asFenceUpdateList[ui32UpdateIndex];
+							
+						if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
+							psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
+						{
+							PVR_DPF((PVR_DBG_WARNING, "Redundant fence found in cCCB(%p) - 0x%x -> 0x%x",
+									psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+							//psUFOPtr->puiAddrUFO.ui32Addr = 0;
+							break;
+						}
+					}
+				}
+			}
+			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)
+			{
+				IMG_UINT32  ui32NumFences = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+				IMG_UINT32  i;
+				
+				for (i = 0;  i < ui32NumFences;  i++)
+				{
+					RGXFWIF_UFO  *psUFOPtr = ((RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER))) + i;
+					IMG_UINT32  ui32UpdateIndex;
+							
+					/* Check recently queued updates to see if this fence will be satisfied by the time it is checked. */
+					for (ui32UpdateIndex = 0;  ui32UpdateIndex < RGX_CCCB_FENCE_UPDATE_LIST_SIZE;  ui32UpdateIndex++)
+					{
+						RGXFWIF_UFO  *psUpdatePtr = &psClientCCB->asFenceUpdateList[ui32UpdateIndex];
+						
+						/*
+						 *  The PR-fence will be met if the update value is >= the required fence value. E.g.
+						 *  the difference between the update value and fence value is positive.
+						 */
+						if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
+							((psUpdatePtr->ui32Value - psUFOPtr->ui32Value) & (1U << 31)) == 0)
+						{
+							PVR_DPF((PVR_DBG_WARNING, "Redundant PR fence found in cCCB(%p) - 0x%x -> 0x%x",
+									psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+							//psUFOPtr->puiAddrUFO.ui32Addr = 0;
+							break;
+						}
+					}
+				}
+			}
+
+			/* Move to the next command in this section of CCB being released... */
+			pui8BufferStart += sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize;
+		}
+	}
+#endif /* REDUNDANT_SYNCS_DEBUG */
+
+	/*
+	 * Update the CCB write offset.
+	 */
+	UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+					  ui32CmdSize,
+					  psClientCCB->ui32Size);
+	psClientCCB->ui32ByteCount += ui32CmdSize;
+
+	/*
+		PDumpSetFrame will detect as we Transition out of capture range for
+		frame based data but if we are PDumping continuous data then we
+		need to inform the PDump layer ourselves
+	*/
+	if (bPDumpContinuous && !bInCaptureRange)
+	{
+		PVRSRV_ERROR eError;
+
+		/* Only Transitioning into capture range can cause an error */
+		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_FALSE, IMG_TRUE);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	if (bPdumpEnabled)
+	{
+		/* Update the PDump write offset to show we PDumped this command */
+		psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+	}
+
+#if defined(NO_HARDWARE)
+	/*
+		The firmware is not running, it cannot update these; we do here instead.
+	*/
+	psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+	psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+#endif
+}
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+	return psClientCCB->ui32HostWriteOffset;
+}
+
+#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR
+#define CHECK_COMMAND(cmd, fenceupdate) \
+				case RGXFWIF_CCB_CMD_TYPE_##cmd: \
+						PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \
+						bFenceUpdate = fenceupdate; \
+						break
+
+static IMG_VOID _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB,
+										  IMG_UINT32 ui32Offset,
+										  IMG_UINT32 ui32ByteCount)
+{
+#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS)
+	IMG_UINT8 *pui8Ptr = psClientCCB->pui8ClientCCB + ui32Offset;
+	IMG_UINT32 ui32ConsumeSize = ui32ByteCount;
+
+	while (ui32ConsumeSize)
+	{
+		RGXFWIF_CCB_CMD_HEADER *psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8Ptr;
+		IMG_BOOL bFenceUpdate = IMG_FALSE;
+
+		PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08x", pui8Ptr - psClientCCB->pui8ClientCCB));
+		switch(psHeader->eCmdType)
+		{
+			CHECK_COMMAND(TA, IMG_FALSE);
+			CHECK_COMMAND(3D, IMG_FALSE);
+			CHECK_COMMAND(CDM, IMG_FALSE);
+			CHECK_COMMAND(TQ_3D, IMG_FALSE);
+			CHECK_COMMAND(TQ_2D, IMG_FALSE);
+			CHECK_COMMAND(3D_PR, IMG_FALSE);
+			CHECK_COMMAND(NULL, IMG_FALSE);
+			CHECK_COMMAND(SHG, IMG_FALSE);
+			CHECK_COMMAND(RTU, IMG_FALSE);
+			CHECK_COMMAND(RTU_FC, IMG_FALSE);
+			CHECK_COMMAND(PRE_TIMESTAMP, IMG_FALSE);
+			CHECK_COMMAND(POST_TIMESTAMP, IMG_FALSE);
+			CHECK_COMMAND(FENCE, IMG_TRUE);
+			CHECK_COMMAND(UPDATE, IMG_TRUE);
+			CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE);
+			CHECK_COMMAND(RMW_UPDATE, IMG_TRUE);
+			CHECK_COMMAND(FENCE_PR, IMG_TRUE);
+			CHECK_COMMAND(UNFENCED_RMW_UPDATE, IMG_FALSE);
+			CHECK_COMMAND(PADDING, IMG_FALSE);
+			default:
+				PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!"));
+				break;
+		}
+		pui8Ptr += sizeof(*psHeader);
+		if (bFenceUpdate)
+		{
+			IMG_UINT32 j;
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8Ptr;
+			for (j=0;j<psHeader->ui32CmdSize/sizeof(RGXFWIF_UFO);j++)
+			{
+				PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x",
+							psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value));
+			}
+		}
+		else
+		{
+			IMG_UINT32 *pui32Ptr = (IMG_UINT32 *) pui8Ptr;
+			IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32);
+			while(ui32Remain)
+			{
+				if (ui32Remain >= 4)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3]));
+					pui32Ptr += 4;
+					ui32Remain -= 4;
+				}
+				if (ui32Remain == 3)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1], pui32Ptr[2]));
+					pui32Ptr += 3;
+					ui32Remain -= 3;
+				}
+				if (ui32Remain == 2)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x",
+							pui32Ptr[0], pui32Ptr[1]));
+					pui32Ptr += 2;
+					ui32Remain -= 2;
+				}
+				if (ui32Remain == 1)
+				{
+					PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x",
+							pui32Ptr[0]));
+					pui32Ptr += 1;
+					ui32Remain -= 1;
+				}
+			}
+		}
+		pui8Ptr += psHeader->ui32CmdSize;
+		ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize;
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(psClientCCB);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32ByteCount);
+#endif
+}
+
+/*
+	Workout how much space this command will require
+*/
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB 			*psClientCCB,
+                                    IMG_UINT32				ui32ClientFenceCount,
+                                    PRGXFWIF_UFO_ADDR		*pauiFenceUFOAddress,
+                                    IMG_UINT32				*paui32FenceValue,
+                                    IMG_UINT32				ui32ClientUpdateCount,
+                                    PRGXFWIF_UFO_ADDR		*pauiUpdateUFOAddress,
+                                    IMG_UINT32				*paui32UpdateValue,
+                                    IMG_UINT32				ui32ServerSyncCount,
+                                    IMG_UINT32				*paui32ServerSyncFlags,
+                                    SERVER_SYNC_PRIMITIVE	**papsServerSyncs,
+                                    IMG_UINT32				ui32CmdSize,
+                                    IMG_PBYTE				pui8DMCmd,
+                                    RGXFWIF_DEV_VIRTADDR	* ppPreTimestamp,
+                                    RGXFWIF_DEV_VIRTADDR	* ppPostTimestamp,
+                                    PRGXFWIF_UFO_ADDR       * ppRMWUFOAddr,
+                                    RGXFWIF_CCB_CMD_TYPE	eType,
+                                    IMG_BOOL				bPDumpContinuous,
+                                    IMG_CHAR				*pszCommandName,
+                                    RGX_CCB_CMD_HELPER_DATA	*psCmdHelperData)
+{
+	IMG_UINT32 ui32FenceCount;
+	IMG_UINT32 ui32UpdateCount;
+	IMG_UINT32 i;
+
+	/* Save the data we require in the submit call */
+	psCmdHelperData->psClientCCB = psClientCCB;
+	psCmdHelperData->bPDumpContinuous = bPDumpContinuous;
+	psCmdHelperData->pszCommandName = pszCommandName;
+
+	/* Client sync data */
+	psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount;
+	psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress;
+	psCmdHelperData->paui32FenceValue = paui32FenceValue;
+	psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount;
+	psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress;
+	psCmdHelperData->paui32UpdateValue = paui32UpdateValue;
+
+	/* Server sync data */
+	psCmdHelperData->ui32ServerSyncCount = ui32ServerSyncCount;
+	psCmdHelperData->paui32ServerSyncFlags = paui32ServerSyncFlags;
+	psCmdHelperData->papsServerSyncs = papsServerSyncs;
+
+	/* Command data */
+	psCmdHelperData->ui32CmdSize = ui32CmdSize;
+	psCmdHelperData->pui8DMCmd = pui8DMCmd;
+	psCmdHelperData->eType = eType;
+
+	PDUMPCOMMENTWITHFLAGS((bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0,
+			"%s Command Server Init on FWCtx %08x", pszCommandName,
+			FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
+
+	/* Init the generated data members */
+	psCmdHelperData->ui32ServerFenceCount = 0;
+	psCmdHelperData->ui32ServerUpdateCount = 0;
+	psCmdHelperData->ui32ServerUnfencedUpdateCount = 0;
+	psCmdHelperData->ui32PreTimeStampCmdSize = 0;
+	psCmdHelperData->ui32PostTimeStampCmdSize = 0;
+	psCmdHelperData->ui32RMWUFOCmdSize = 0;
+
+
+	if (ppPreTimestamp && (ppPreTimestamp->ui32Addr != 0))
+	{
+
+		psCmdHelperData->pPreTimestamp           = * ppPreTimestamp;
+		psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+			+ ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1));
+	}
+
+	if (ppPostTimestamp && (ppPostTimestamp->ui32Addr != 0))
+	{
+		psCmdHelperData->pPostTimestamp           = * ppPostTimestamp;
+		psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+			+ ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1));
+	}
+
+	if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0))
+	{
+		psCmdHelperData->pRMWUFOAddr       = * ppRMWUFOAddr;
+		psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO);
+	}
+
+
+	/* Workout how many fences and updates this command will have */
+	for (i = 0; i < ui32ServerSyncCount; i++)
+	{
+		if (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+		{
+			/* Server syncs must fence */
+			psCmdHelperData->ui32ServerFenceCount++;
+		}
+
+		/* If it is an update */
+		if (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+		{
+			/* is it a fenced update or a progresse update (a.k.a unfenced update) ?*/
+			if ((paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+			{
+				/* it is a progress update */
+				psCmdHelperData->ui32ServerUnfencedUpdateCount++;
+			}
+			else
+			{
+				/* it is a fenced update */
+				psCmdHelperData->ui32ServerUpdateCount++;
+			}
+		}
+	}
+
+
+	/* Total fence command size (header plus command data) */
+	ui32FenceCount = ui32ClientFenceCount + psCmdHelperData->ui32ServerFenceCount;
+	if (ui32FenceCount)
+	{
+		psCmdHelperData->ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32FenceCount * sizeof(RGXFWIF_UFO)) +
+																  sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32FenceCmdSize = 0;
+	}
+
+	/* Total DM command size (header plus command data) */
+	psCmdHelperData->ui32DMCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32CmdSize +
+														   sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+	/* Total update command size (header plus command data) */
+	ui32UpdateCount = ui32ClientUpdateCount + psCmdHelperData->ui32ServerUpdateCount;
+	if (ui32UpdateCount)
+	{
+		psCmdHelperData->ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32UpdateCount * sizeof(RGXFWIF_UFO)) +
+																   sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32UpdateCmdSize = 0;
+	}
+
+	/* Total unfenced update commad size (header plus command data) */ 
+	if (psCmdHelperData->ui32ServerUnfencedUpdateCount != 0)
+	{
+		psCmdHelperData->ui32UnfencedUpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((psCmdHelperData->ui32ServerUnfencedUpdateCount * sizeof(RGXFWIF_UFO)) +
+																		   sizeof(RGXFWIF_CCB_CMD_HEADER));
+	}
+	else
+	{
+		psCmdHelperData->ui32UnfencedUpdateCmdSize = 0;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	Reserve space in the CCB and fill in the command and client sync data
+*/
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+									   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+									   IMG_BOOL *pbKickRequired)
+{
+	IMG_UINT32 ui32BeforeWOff = asCmdHelperData[0].psClientCCB->ui32HostWriteOffset;
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+	IMG_UINT8 *pui8StartPtr;
+	PVRSRV_ERROR eError;
+
+	*pbKickRequired = IMG_FALSE;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		if (asCmdHelperData[0].bPDumpContinuous != asCmdHelperData[i].bPDumpContinuous)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d",
+					 __FUNCTION__,
+					 asCmdHelperData[0].bPDumpContinuous?"IMG_TRUE":"IMG_FALSE",
+					 asCmdHelperData[i].bPDumpContinuous?"IMG_TRUE":"IMG_FALSE",
+					 ui32CmdCount));
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+
+	/*
+		Acquire space in the CCB for all the command(s).
+	*/
+	eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB,
+						   ui32AllocSize,
+						   (IMG_PVOID *)&pui8StartPtr,
+						   asCmdHelperData[0].bPDumpContinuous);	
+	if (eError != PVRSRV_OK)
+	{
+		/* Failed so bail out and allow the client side to retry */
+		if (asCmdHelperData[0].psClientCCB->ui32HostWriteOffset != ui32BeforeWOff)
+		{
+			*pbKickRequired = IMG_TRUE;
+		}
+		return eError;
+	}
+
+
+
+	/*
+		For each command fill in the fence, DM, and update command
+
+		Note:
+		We only fill in the client fences here, the server fences (and updates)
+		will be filled in together at the end. This is because we might fail the
+		kernel CCB alloc and would then have to rollback the server syncs if
+		we took the operation here
+	*/
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i];
+		IMG_UINT8 *pui8CmdPtr;
+		IMG_UINT8 *pui8ServerFenceStart = 0;
+		IMG_UINT8 *pui8ServerUpdateStart = 0;
+#if defined(PDUMP)
+		IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr;
+		IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext));
+#endif
+
+		if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+		{
+			PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+
+
+
+		/*
+			Create the fence command.
+		*/
+		if (psCmdHelperData->ui32FenceCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT k;
+
+			/* Fences are at the start of the command */
+			pui8CmdPtr = pui8StartPtr;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* Fill in the client fences */
+			for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++)
+			{
+				RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+	
+				psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k];
+				psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[k];
+				pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+				PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+				PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+
+			}
+			pui8ServerFenceStart = pui8CmdPtr;
+		}
+
+		/* jump over the Server fences */
+		pui8CmdPtr = pui8StartPtr + psCmdHelperData->ui32FenceCmdSize;
+
+
+		/*
+		  Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to
+		  sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have
+		  the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+		*/
+		if (psCmdHelperData->ui32PreTimeStampCmdSize != 0)
+		{
+			RGXWriteTimestampCommand(& pui8CmdPtr,
+			                         RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP,
+			                         psCmdHelperData->pPreTimestamp);
+		}
+
+		/*
+			Create the DM command
+		*/
+		if (psCmdHelperData->ui32DMCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = psCmdHelperData->eType;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+
+
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+
+			OSMemCopy(pui8CmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize);
+			pui8CmdPtr += psCmdHelperData->ui32CmdSize;
+		}
+
+
+
+		if (psCmdHelperData->ui32PostTimeStampCmdSize != 0)
+		{
+			RGXWriteTimestampCommand(& pui8CmdPtr,
+			                         RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP,
+			                         psCmdHelperData->pPostTimestamp);
+		}
+
+
+		if (psCmdHelperData->ui32RMWUFOCmdSize != 0)
+		{
+			RGXFWIF_CCB_CMD_HEADER * psHeader;
+			RGXFWIF_UFO            * psUFO;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			psUFO = (RGXFWIF_UFO *) pui8CmdPtr;
+			psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr;
+			
+			pui8CmdPtr += sizeof(RGXFWIF_UFO);
+		}
+	
+
+		/*
+			Create the update command.
+			
+			Note:
+			We only fill in the client updates here, the server updates (and fences)
+			will be filled in together at the end
+		*/
+		if (psCmdHelperData->ui32UpdateCmdSize)
+		{
+			RGXFWIF_CCB_CMD_HEADER *psHeader;
+			IMG_UINT k;
+
+			psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+			pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+			/* Fill in the client updates */
+			for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++)
+			{
+				RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+	
+				psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k];
+				psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[k];
+				pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+				PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+				PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x",
+						psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+			}
+			pui8ServerUpdateStart = pui8CmdPtr;
+		}
+	
+		/* Save the server sync fence & update offsets for submit time */
+		psCmdHelperData->pui8ServerFenceStart  = pui8ServerFenceStart;
+		psCmdHelperData->pui8ServerUpdateStart = pui8ServerUpdateStart;
+
+		/* jump over the fenced update */
+		if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0)
+		{
+			RGXFWIF_CCB_CMD_HEADER * const psHeader = (RGXFWIF_CCB_CMD_HEADER * ) psCmdHelperData->pui8ServerUpdateStart + psCmdHelperData->ui32UpdateCmdSize;
+			/* set up the header for unfenced updates,  */
+			PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */
+			psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE;
+			psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+		
+			/* jump over the header */
+			psCmdHelperData->pui8ServerUnfencedUpdateStart = ((IMG_UINT8*) psHeader) + sizeof(RGXFWIF_CCB_CMD_HEADER);
+		}
+		else
+		{
+			psCmdHelperData->pui8ServerUnfencedUpdateStart = IMG_NULL;
+		}
+		
+		/* Save start for sanity checking at submit time */
+		psCmdHelperData->pui8StartPtr = pui8StartPtr;
+
+		/* Set the start pointer for the next iteration around the loop */
+		pui8StartPtr +=
+			psCmdHelperData->ui32FenceCmdSize         +
+			psCmdHelperData->ui32PreTimeStampCmdSize  +
+			psCmdHelperData->ui32DMCmdSize            +
+			psCmdHelperData->ui32PostTimeStampCmdSize +
+			psCmdHelperData->ui32RMWUFOCmdSize        + 
+			psCmdHelperData->ui32UpdateCmdSize        +
+			psCmdHelperData->ui32UnfencedUpdateCmdSize;
+
+		if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+		{
+			PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+		else
+		{
+			PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+					psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+		}
+	}
+
+	*pbKickRequired = IMG_TRUE;
+	return PVRSRV_OK;
+}
+
+/*
+	Fill in the server syncs data and release the CCB space
+*/
+IMG_VOID RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+								   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+								   const IMG_CHAR *pcszDMName,
+								   IMG_UINT32 ui32CtxAddr)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+   /*
+		For each command fill in the server sync info
+	*/
+	for (i=0;i<ui32CmdCount;i++)
+	{
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+		IMG_UINT8 *pui8ServerFenceStart = psCmdHelperData->pui8ServerFenceStart;
+		IMG_UINT8 *pui8ServerUpdateStart = psCmdHelperData->pui8ServerUpdateStart;
+		IMG_UINT8 *pui8ServerUnfencedUpdateStart = psCmdHelperData->pui8ServerUnfencedUpdateStart;		
+		IMG_UINT32 j;
+
+		/* Now fill in the server fence and updates together */
+		for (j = 0; j < psCmdHelperData->ui32ServerSyncCount; j++)
+		{
+			RGXFWIF_UFO *psUFOPtr;
+			IMG_UINT32 ui32UpdateValue;
+			IMG_UINT32 ui32FenceValue;
+			PVRSRV_ERROR eError;
+			IMG_BOOL bFence = ((psCmdHelperData->paui32ServerSyncFlags[j] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)!=0)?IMG_TRUE:IMG_FALSE;
+			IMG_BOOL bUpdate = ((psCmdHelperData->paui32ServerSyncFlags[j] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)!=0)?IMG_TRUE:IMG_FALSE;
+			const IMG_BOOL bUnfencedUpdate = ((psCmdHelperData->paui32ServerSyncFlags[j] & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+				? IMG_TRUE
+				: IMG_FALSE;
+
+			eError = PVRSRVServerSyncQueueHWOpKM(psCmdHelperData->papsServerSyncs[j],
+												 bUpdate,
+												 &ui32FenceValue,
+												 &ui32UpdateValue);
+			/* This function can't fail */
+			PVR_ASSERT(eError == PVRSRV_OK);
+	
+			/*
+				As server syncs always fence (we have a check in RGXCmcdHelperInitCmdCCB
+				which ensures the client is playing ball) the filling in of the fence
+				is unconditional.
+			*/
+			if (bFence)
+			{
+				PVR_ASSERT(pui8ServerFenceStart != 0);
+
+				psUFOPtr = (RGXFWIF_UFO *) pui8ServerFenceStart;
+				psUFOPtr->puiAddrUFO.ui32Addr = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j]);
+				psUFOPtr->ui32Value = ui32FenceValue;
+				pui8ServerFenceStart += sizeof(RGXFWIF_UFO);
+
+#if defined(LINUX)
+				trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+										 pcszDMName,
+										 ui32CtxAddr,
+										 psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+										 1,
+										 &psUFOPtr->puiAddrUFO,
+										 &psUFOPtr->ui32Value);
+#endif
+			}
+	
+			/* If there is an update then fill that in as well */
+			if (bUpdate)
+			{
+				if (bUnfencedUpdate)
+				{
+					PVR_ASSERT(pui8ServerUnfencedUpdateStart != 0);
+
+					psUFOPtr = (RGXFWIF_UFO *) pui8ServerUnfencedUpdateStart;
+					psUFOPtr->puiAddrUFO.ui32Addr = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j]);
+					psUFOPtr->ui32Value = ui32UpdateValue;
+					pui8ServerUnfencedUpdateStart += sizeof(RGXFWIF_UFO);
+				}
+				else
+				{
+					/* fenced update */
+					PVR_ASSERT(pui8ServerUpdateStart != 0);
+
+					psUFOPtr = (RGXFWIF_UFO *) pui8ServerUpdateStart;
+					psUFOPtr->puiAddrUFO.ui32Addr = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j]);
+					psUFOPtr->ui32Value = ui32UpdateValue;
+					pui8ServerUpdateStart += sizeof(RGXFWIF_UFO);
+				}
+#if defined(LINUX)
+				trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+										  pcszDMName,
+										  ui32CtxAddr,
+										  psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+										  1,
+										  &psUFOPtr->puiAddrUFO,
+										  &psUFOPtr->ui32Value);
+#endif
+				
+#if defined(NO_HARDWARE)
+				/*
+				  There is no FW so the host has to do any Sync updates
+				  (client sync updates are done in the client
+				*/
+				PVRSRVServerSyncPrimSetKM(psCmdHelperData->papsServerSyncs[j], ui32UpdateValue);
+#endif
+			}
+		}
+
+#if defined(LINUX)
+		trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+								 pcszDMName,
+								 ui32CtxAddr,
+								 psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+								 psCmdHelperData->ui32ClientFenceCount,
+								 psCmdHelperData->pauiFenceUFOAddress,
+								 psCmdHelperData->paui32FenceValue);
+		trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+								  pcszDMName,
+								  ui32CtxAddr,
+								  psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+								  psCmdHelperData->ui32ClientUpdateCount,
+								  psCmdHelperData->pauiUpdateUFOAddress,
+								  psCmdHelperData->paui32UpdateValue);
+#endif
+
+		if (psCmdHelperData->ui32ServerSyncCount)
+		{
+			/*
+				Do some sanity checks to ensure we did the point math right
+			*/
+			if (pui8ServerFenceStart != 0)
+			{
+				PVR_ASSERT(pui8ServerFenceStart ==
+						   (psCmdHelperData->pui8StartPtr +
+						   psCmdHelperData->ui32FenceCmdSize));
+			}
+
+			if (pui8ServerUpdateStart != 0)
+			{
+				PVR_ASSERT(pui8ServerUpdateStart ==
+				           psCmdHelperData->pui8StartPtr             +
+				           psCmdHelperData->ui32FenceCmdSize         +
+				           psCmdHelperData->ui32PreTimeStampCmdSize  +
+				           psCmdHelperData->ui32DMCmdSize            +
+				           psCmdHelperData->ui32RMWUFOCmdSize        +
+				           psCmdHelperData->ui32PostTimeStampCmdSize +
+				           psCmdHelperData->ui32UpdateCmdSize);
+			}
+
+			if (pui8ServerUnfencedUpdateStart != 0)
+			{
+				PVR_ASSERT(pui8ServerUnfencedUpdateStart ==
+				           psCmdHelperData->pui8StartPtr             +
+				           psCmdHelperData->ui32FenceCmdSize         +
+				           psCmdHelperData->ui32PreTimeStampCmdSize  +
+				           psCmdHelperData->ui32DMCmdSize            +
+				           psCmdHelperData->ui32RMWUFOCmdSize        +
+				           psCmdHelperData->ui32PostTimeStampCmdSize +
+				           psCmdHelperData->ui32UpdateCmdSize        +
+				           psCmdHelperData->ui32UnfencedUpdateCmdSize);
+			}			
+		}
+	
+		/*
+			All the commands have been filled in so release the CCB space.
+			The FW still won't run this command until we kick it
+		*/
+		PDUMPCOMMENTWITHFLAGS((psCmdHelperData->bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0,
+				"%s Command Server Release on FWCtx %08x",
+				psCmdHelperData->pszCommandName, ui32CtxAddr);
+	}
+
+	_RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB,
+							  asCmdHelperData[0].psClientCCB->ui32HostWriteOffset,
+							  ui32AllocSize);
+
+	RGXReleaseCCB(asCmdHelperData[0].psClientCCB, 
+				  ui32AllocSize,
+				  asCmdHelperData[0].bPDumpContinuous);
+}
+
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32              ui32CmdCount,
+                                      RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+	IMG_UINT32 ui32AllocSize = 0;
+	IMG_UINT32 i;
+
+	/*
+		Workout how much space we need for all the command(s)
+	*/
+	for (i = 0; i < ui32CmdCount; i++)
+	{
+		ui32AllocSize +=
+			asCmdHelperData[i].ui32FenceCmdSize          +
+			asCmdHelperData[i].ui32DMCmdSize             +
+			asCmdHelperData[i].ui32UpdateCmdSize         +
+			asCmdHelperData[i].ui32UnfencedUpdateCmdSize +			
+			asCmdHelperData[i].ui32PreTimeStampCmdSize   +
+			asCmdHelperData[i].ui32PostTimeStampCmdSize  +
+			asCmdHelperData[i].ui32RMWUFOCmdSize;
+	}
+
+	return ui32AllocSize;
+}
+
+
+static IMG_PCCHAR _CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType)
+{
+	static const IMG_CHAR* aCCBCmdName[20] = { "TA", "3D", "CDM", "TQ_3D", "TQ_2D",
+	                                           "3D_PR", "NULL", "SHG", "RTU", "RTU_FC",
+	                                           "PRE_TIMESTAMP",
+	                                           "FENCE", "UPDATE", "RMW_UPDATE",
+	                                           "FENCE_PR", "PRIORITY",
+	                                           "POST_TIMESTAMP", "UNFENCED_UPDATE",
+	                                           "UNFENCED_RMW_UPDATE", "PADDING"};
+	IMG_UINT32	cmdStrIdx = 19;
+
+	PVR_ASSERT( (cmdType == RGXFWIF_CCB_CMD_TYPE_TA)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_3D)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_CDM)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_TQ_3D)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_TQ_2D)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_3D_PR)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_NULL)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_SHG)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_RTU)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_RTU_FC)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_PRIORITY)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE)
+	            || (cmdType == RGXFWIF_CCB_CMD_TYPE_PADDING));
+
+	if( cmdType !=  RGXFWIF_CCB_CMD_TYPE_PADDING)
+	{
+		cmdStrIdx = ((IMG_UINT32)cmdType & ~RGX_CCB_TYPE_TASK) - (RGXFWIF_CCB_CMD_TYPE_TA & ~RGX_CCB_TYPE_TASK);
+	}
+
+	return aCCBCmdName[cmdStrIdx];
+}
+
+PVRSRV_ERROR CheckForStalledCCB(RGX_CLIENT_CCB  *psCurrentClientCCB)
+{
+	volatile RGXFWIF_CCCB_CTL	*psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	IMG_UINT32 					ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+	IMG_UINT32 					ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+
+	if (ui32SampledRdOff > psClientCCBCtrl->ui32WrapMask  ||
+		ui32SampledWrOff > psClientCCBCtrl->ui32WrapMask)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d WOFF=%d)",
+				ui32SampledRdOff, ui32SampledWrOff));
+		return  PVRSRV_ERROR_INVALID_OFFSET;
+	}
+
+	if (ui32SampledRdOff != ui32SampledWrOff &&
+				psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff &&
+				ui32SampledRdOff == psCurrentClientCCB->ui32LastROff &&
+				(psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size)
+	{
+		//RGXFWIF_DEV_VIRTADDR v = {0};
+		//DumpStalledCCBCommand(v,psCurrentClientCCB,IMG_NULL);
+
+		/* Don't log this by default unless debugging since a higher up
+		 * function will log the stalled condition. Helps avoid double
+		 *  messages in the log.
+		 */
+		PVR_DPF((PVR_DBG_MESSAGE, "CheckForStalledCCB: CCCB has not progressed (ROFF=%d WOFF=%d)",
+				ui32SampledRdOff, ui32SampledWrOff));
+		eError =  PVRSRV_ERROR_CCCB_STALLED;
+	}
+
+	psCurrentClientCCB->ui32LastROff = ui32SampledRdOff;
+	psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff;
+	psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount;
+
+	return eError;
+}
+
+IMG_VOID DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+							   RGX_CLIENT_CCB  *psCurrentClientCCB,
+							   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	volatile RGXFWIF_CCCB_CTL	  *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+	IMG_UINT8					  *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+	volatile IMG_UINT8		   	  *pui8Ptr;
+	IMG_UINT32 					  ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+	IMG_UINT32 					  ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
+	IMG_UINT32 					  ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+	pui8Ptr = pui8ClientCCBBuff + ui32SampledRdOff;
+
+	if ((ui32SampledRdOff == ui32SampledDepOff) &&
+		(ui32SampledRdOff != ui32SampledWrOff))
+	{
+		volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+		RGXFWIF_CCB_CMD_TYPE 	eCommandType = psCommandHeader->eCmdType;
+		volatile IMG_UINT8				*pui8Ptr = (IMG_UINT8 *)psCommandHeader;
+
+		/* CCB is stalled on a fence... */
+		if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+		{
+			RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+			IMG_UINT32 jj;
+
+			/* Display details of the fence object on which the context is pending */
+			PVR_DUMPDEBUG_LOG(("FWCtx 0x%08X @ %d (%s) pending on %s:",
+							   sFWCommonContext.ui32Addr,
+							   ui32SampledRdOff,
+							   (IMG_PCHAR)&psCurrentClientCCB->szName,
+							   _CCBCmdTypename(eCommandType)));
+			for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+			{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+				PVR_DUMPDEBUG_LOG(("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value));
+#else
+				PVR_DUMPDEBUG_LOG(("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+				                   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+				                   psUFOPtr[jj].ui32Value,
+				                   RGXReadWithSP(psUFOPtr[jj].puiAddrUFO.ui32Addr)));
+#endif
+			}
+
+			/* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */
+			pui8Ptr = (IMG_UINT8 *)psUFOPtr + psCommandHeader->ui32CmdSize;
+			psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+			if( (IMG_UINTPTR_T)psCommandHeader != ((IMG_UINTPTR_T)pui8ClientCCBBuff + ui32SampledWrOff))
+			{
+				PVR_DUMPDEBUG_LOG((" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType)));
+				/* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */
+				pui8Ptr += sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize;
+				psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+				/* If the next command is an update, display details of that so we can see what would then become unblocked */
+				if( (IMG_UINTPTR_T)psCommandHeader != ((IMG_UINTPTR_T)pui8ClientCCBBuff + ui32SampledWrOff))
+				{
+					eCommandType = psCommandHeader->eCmdType;
+
+					if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+					{
+						psUFOPtr = (RGXFWIF_UFO *)((IMG_UINT8 *)psCommandHeader + sizeof(*psCommandHeader));
+						PVR_DUMPDEBUG_LOG((" preventing %s:",_CCBCmdTypename(eCommandType)));
+						for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+						{
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+							PVR_DUMPDEBUG_LOG(("  Addr:0x%08x  Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value));
+#else
+							PVR_DUMPDEBUG_LOG(("  Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+							                   psUFOPtr[jj].puiAddrUFO.ui32Addr,
+							                   psUFOPtr[jj].ui32Value,
+							                   RGXReadWithSP(psUFOPtr[jj].puiAddrUFO.ui32Addr)));
+#endif
+						}
+					}
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG((" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr));
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG((" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr));
+			}
+		}
+	}
+}
+
+/******************************************************************************
+ End of file (rgxccb.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxccb.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxccb.h
new file mode 100644
index 0000000..04e79a1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxccb.h
@@ -0,0 +1,167 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Circular Command Buffer functionality.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Circular Command Buffer functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCCB_H__)
+#define __RGXCCB_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdebug.h"
+
+#define MAX_CLIENT_CCB_NAME	30
+
+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB;
+
+/*
+	This structure is declared here as it's allocated on the heap by
+	the callers
+*/
+
+typedef struct _RGX_CCB_CMD_HELPER_DATA_ {
+	/* Data setup at command init time */
+	RGX_CLIENT_CCB  		*psClientCCB;
+	IMG_CHAR 				*pszCommandName;
+	IMG_BOOL 				bPDumpContinuous;
+	
+	IMG_UINT32				ui32ClientFenceCount;
+	PRGXFWIF_UFO_ADDR		*pauiFenceUFOAddress;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				ui32ClientUpdateCount;
+	PRGXFWIF_UFO_ADDR		*pauiUpdateUFOAddress;
+	IMG_UINT32				*paui32UpdateValue;
+
+	IMG_UINT32				ui32ServerSyncCount;
+	IMG_UINT32				*paui32ServerSyncFlags;
+	SERVER_SYNC_PRIMITIVE	**papsServerSyncs;
+
+	RGXFWIF_CCB_CMD_TYPE	eType;
+	IMG_UINT32				ui32CmdSize;
+	IMG_UINT8				*pui8DMCmd;
+	IMG_UINT32				ui32FenceCmdSize;
+	IMG_UINT32				ui32DMCmdSize;
+	IMG_UINT32				ui32UpdateCmdSize;
+	IMG_UINT32				ui32UnfencedUpdateCmdSize;
+
+	/* timestamp commands */
+	RGXFWIF_DEV_VIRTADDR    pPreTimestamp;
+	IMG_UINT32              ui32PreTimeStampCmdSize;
+	RGXFWIF_DEV_VIRTADDR    pPostTimestamp;
+	IMG_UINT32              ui32PostTimeStampCmdSize;
+	PRGXFWIF_UFO_ADDR       pRMWUFOAddr;
+	IMG_UINT32              ui32RMWUFOCmdSize;
+
+	/* Data setup at command acquire time */
+	IMG_UINT8				*pui8StartPtr;
+	IMG_UINT8				*pui8ServerUpdateStart;
+	IMG_UINT8				*pui8ServerUnfencedUpdateStart;
+	IMG_UINT8				*pui8ServerFenceStart;
+	IMG_UINT32				ui32ServerFenceCount;
+	IMG_UINT32				ui32ServerUpdateCount;
+	IMG_UINT32				ui32ServerUnfencedUpdateCount;
+
+} RGX_CCB_CMD_HELPER_DATA;
+
+#define PADDING_COMMAND_SIZE	(sizeof(RGXFWIF_CCB_CMD_HEADER))
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_DEVICE_NODE	*psDeviceNode,
+						  IMG_UINT32			ui32CCBSizeLog2,
+						  CONNECTION_DATA		*psConnectionData,
+						  const IMG_CHAR		*pszName,
+						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						  RGX_CLIENT_CCB		**ppsClientCCB,
+						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
+						  DEVMEM_MEMDESC 		**ppsClientCCBCtlMemDesc);
+
+IMG_VOID RGXDestroyCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+										IMG_UINT32		ui32CmdSize,
+										IMG_PVOID		*ppvBufferSpace,
+										IMG_BOOL		bPDumpContinuous);
+
+IMG_INTERNAL IMG_VOID RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+									IMG_UINT32		ui32CmdSize,
+									IMG_BOOL		bPDumpContinuous);
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB          *psClientCCB,
+                                    IMG_UINT32              ui32ClientFenceCount,
+                                    PRGXFWIF_UFO_ADDR       *pauiFenceUFOAddress,
+                                    IMG_UINT32              *paui32FenceValue,
+                                    IMG_UINT32              ui32ClientUpdateCount,
+                                    PRGXFWIF_UFO_ADDR       *pauiUpdateUFOAddress,
+                                    IMG_UINT32              *paui32UpdateValue,
+                                    IMG_UINT32              ui32ServerSyncCount,
+                                    IMG_UINT32              *paui32ServerSyncFlags,
+                                    SERVER_SYNC_PRIMITIVE   **pasServerSyncs,
+                                    IMG_UINT32              ui32CmdSize,
+                                    IMG_UINT8               *pui8DMCmd,
+                                    RGXFWIF_DEV_VIRTADDR    *ppPreTimestamp,
+                                    RGXFWIF_DEV_VIRTADDR    *ppPostTimestamp,
+                                    RGXFWIF_DEV_VIRTADDR    *ppRMWUFOAddr,
+                                    RGXFWIF_CCB_CMD_TYPE    eType,
+                                    IMG_BOOL                bPDumpContinuous,
+                                    IMG_CHAR                *pszCommandName,
+                                    RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+									   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+									   IMG_BOOL *pbKickRequired);
+
+IMG_VOID RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+								   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+								   const IMG_CHAR *pcszDMName,
+								   IMG_UINT32 ui32CtxAddr);
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+								   RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+IMG_VOID DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, RGX_CLIENT_CCB  *psCurrentClientCCB, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+PVRSRV_ERROR CheckForStalledCCB(RGX_CLIENT_CCB  *psCurrentClientCCB);
+#endif /* __RGXCCB_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxcompute.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxcompute.c
new file mode 100644
index 0000000..fe7e6b7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxcompute.c
@@ -0,0 +1,534 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Compute routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Compute routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+#include "rgxsync.h"
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	DEVMEM_MEMDESC				*psFWComputeContextStateMemDesc;
+	PVRSRV_CLIENT_SYNC_PRIM		*psSync;
+	DLLIST_NODE					sListNode;
+};
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA			*psConnection,
+											 PVRSRV_DEVICE_NODE			*psDeviceNode,
+											 IMG_UINT32					ui32Priority,
+											 IMG_DEV_VIRTADDR			sMCUFenceAddr,
+											 IMG_UINT32					ui32FrameworkCommandSize,
+											 IMG_PBYTE					pbyFrameworkCommand,
+											 IMG_HANDLE					hMemCtxPrivData,
+											 RGX_SERVER_COMPUTE_CONTEXT	**ppsComputeContext)
+{
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext;
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+
+	/* Prepare cleanup struct */
+	*ppsComputeContext = IMG_NULL;
+	psComputeContext = OSAllocMem(sizeof(*psComputeContext));
+	if (psComputeContext == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSMemSet(psComputeContext, 0, sizeof(*psComputeContext));
+
+	psComputeContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psComputeContext->psSync,
+						   "compute cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware compute context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_COMPUTECTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "ComputeContextState",
+							  &psComputeContext->psFWComputeContextStateMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_contextsuspendalloc;
+	}
+
+	/* 
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psComputeContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc,
+										   pbyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+	
+	sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+	sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 "CDM",
+									 IMG_NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 psComputeContext->psFWComputeContextStateMemDesc,
+									 RGX_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 &sInfo,
+									 &psComputeContext->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+	}
+
+	*ppsComputeContext = psComputeContext;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+fail_frameworkcopy:
+	DevmemFwFree(psComputeContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	DevmemFwFree(psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+	SyncPrimFree(psComputeContext->psSync);
+fail_syncalloc:
+	OSFreeMem(psComputeContext);
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+	PVRSRV_ERROR				eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+											  FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext),
+											  psComputeContext->psSync,
+											  RGXFWIF_DM_CDM);
+
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free its resources */
+
+	OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+	dllist_remove_node(&(psComputeContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+	FWCommonContextFree(psComputeContext->psServerCommonContext);
+	DevmemFwFree(psComputeContext->psFWFrameworkMemDesc);
+	DevmemFwFree(psComputeContext->psFWComputeContextStateMemDesc);
+	SyncPrimFree(psComputeContext->psSync);
+	OSFreeMem(psComputeContext);
+
+	return PVRSRV_OK;
+}
+
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext,
+								IMG_UINT32					ui32ClientFenceCount,
+								PRGXFWIF_UFO_ADDR			*pauiClientFenceUFOAddress,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								PRGXFWIF_UFO_ADDR			*pauiClientUpdateUFOAddress,
+								IMG_UINT32					*paui32ClientUpdateValue,
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE 		**pasServerSyncs,
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_BOOL					bPDumpContinuous,
+							    IMG_UINT32					ui32ExtJobRef,
+								IMG_UINT32					ui32IntJobRef)
+{
+	RGXFWIF_KCCB_CMD		sCmpKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA	asCmdHelperData[1];
+	IMG_BOOL				bKickRequired;
+	PVRSRV_ERROR			eError;
+	PVRSRV_ERROR			eError2;
+	IMG_UINT32				i;
+
+	RGXFWIF_DEV_VIRTADDR	pPreTimestamp;
+	RGXFWIF_DEV_VIRTADDR	pPostTimestamp;
+	PRGXFWIF_UFO_ADDR		pRMWUFOAddr;
+
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerSyncPrims;i++)
+	{
+		if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on CDM) must fence", __FUNCTION__));
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+	                          & pPreTimestamp,
+	                          & pPostTimestamp,
+	                          & pRMWUFOAddr);
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext),
+	                                ui32ClientFenceCount,
+	                                pauiClientFenceUFOAddress,
+	                                paui32ClientFenceValue,
+	                                ui32ClientUpdateCount,
+	                                pauiClientUpdateUFOAddress,
+	                                paui32ClientUpdateValue,
+	                                ui32ServerSyncPrims,
+	                                paui32ServerSyncFlags,
+	                                pasServerSyncs,
+	                                ui32CmdSize,
+	                                pui8DMCmd,
+	                                & pPreTimestamp,
+	                                & pPostTimestamp,
+	                                & pRMWUFOAddr,
+	                                RGXFWIF_CCB_CMD_TYPE_CDM,
+	                                bPDumpContinuous,
+	                                "Compute",
+	                                asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdinit;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asCmdHelperData), 
+	                                   asCmdHelperData, &bKickRequired);
+	if ((eError != PVRSRV_OK) && (!bKickRequired))
+	{
+		/*
+			Only bail if no new data was submitted into the client CCB, we might
+			have already submitted a padding packet which we should flush through
+			the FW.
+		*/
+		PVR_DPF((PVR_DBG_ERROR, "RGXKickCDM: Failed to create client CCB command"));
+		goto fail_cmdaquire;
+	}
+
+
+	/*
+		We should reserved space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	if (eError == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+		RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+	}
+
+	/* Construct the kernel compute CCB command. */
+	sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	/*
+	 * Submit the compute command to the firmware.
+	 */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_CDM,
+									&sCmpKCCBCmd,
+									sizeof(sCmpKCCBCmd),
+									bPDumpContinuous);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+	
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickCDMKM failed to schedule kernel CCB command. (0x%x)", eError));
+	}
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	else
+	{
+		RGXHWPerfFTraceGPUEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+				ui32ExtJobRef, ui32IntJobRef, "CDM");
+	}
+#endif
+	/*
+	 * Now check eError (which may have returned an error from our earlier call
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdaquire;
+	}
+
+	return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+	return eError;
+}
+
+IMG_EXPORT PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+	RGXFWIF_KCCB_CMD sFlushCmd;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+	sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM;
+	sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+									RGXFWIF_DM_GP,
+									&sFlushCmd,
+									sizeof(sFlushCmd),
+									IMG_TRUE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError));
+	}
+	else
+	{
+		/* Wait for the SLC flush to complete */
+		eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice,
+								RGXFWIF_DM_GP,
+								psComputeContext->psSync,
+								IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Compute flush aborted with error (%u)", eError));
+		}
+	}
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+												  RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+												  IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+								psConnection,
+								psComputeContext->psDeviceNode->pvDevice,
+								ui32Priority,
+								RGXFWIF_DM_CDM);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+	}
+	return eError;
+}
+
+static IMG_BOOL CheckForStalledComputeCtxtCommand(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	RGX_SERVER_COMPUTE_CONTEXT 		*psCurrentServerComputeCtx = IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+	RGX_SERVER_COMMON_CONTEXT		*psCurrentServerComputeCommonCtx = psCurrentServerComputeCtx->psServerCommonContext;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = pvCallbackData;
+
+	DumpStalledFWCommonContext(psCurrentServerComputeCommonCtx, pfnDumpDebugPrintf);
+	return IMG_TRUE;
+}
+IMG_VOID CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+									DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+	dllist_foreach_node(&(psDevInfo->sComputeCtxtListHead),
+						CheckForStalledComputeCtxtCommand, pfnDumpDebugPrintf);
+	OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+static IMG_BOOL CheckForStalledClientComputeCtxtCommand(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	PVRSRV_ERROR *peError = (PVRSRV_ERROR*)pvCallbackData;
+	RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+	RGX_SERVER_COMMON_CONTEXT *psCurrentServerComputeCommonCtx = psCurrentServerComputeCtx->psServerCommonContext;
+
+	if (PVRSRV_ERROR_CCCB_STALLED == CheckStalledClientCommonContext(psCurrentServerComputeCommonCtx))
+	{
+		*peError = PVRSRV_ERROR_CCCB_STALLED;
+	}
+
+	return IMG_TRUE;
+}
+IMG_BOOL CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+	dllist_foreach_node(&(psDevInfo->sComputeCtxtListHead), 
+						CheckForStalledClientComputeCtxtCommand, &eError);
+	OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+	return (PVRSRV_ERROR_CCCB_STALLED == eError)? IMG_TRUE: IMG_FALSE;
+}
+
+IMG_EXPORT PVRSRV_ERROR 
+PVRSRVRGXKickSyncCDMKM(RGX_SERVER_COMPUTE_CONTEXT  *psComputeContext,
+                       IMG_UINT32                  ui32ClientFenceCount,
+                       PRGXFWIF_UFO_ADDR           *pauiClientFenceUFOAddress,
+                       IMG_UINT32                  *paui32ClientFenceValue,
+                       IMG_UINT32                  ui32ClientUpdateCount,
+                       PRGXFWIF_UFO_ADDR           *pauiClientUpdateUFOAddress,
+                       IMG_UINT32                  *paui32ClientUpdateValue,
+                       IMG_UINT32                  ui32ServerSyncPrims,
+                       IMG_UINT32                  *paui32ServerSyncFlags,
+                       SERVER_SYNC_PRIMITIVE       **pasServerSyncs,
+					   IMG_UINT32				   ui32NumCheckFenceFDs,
+					   IMG_INT32				   *pai32CheckFenceFDs,
+					   IMG_INT32                   i32UpdateFenceFD,
+                       IMG_BOOL                    bPDumpContinuous)
+{
+	if (ui32NumCheckFenceFDs > 0 || i32UpdateFenceFD >= 0)
+	{
+		return PVRSRV_ERROR_NOT_IMPLEMENTED;
+	}
+
+	return RGXKickSyncKM(psComputeContext->psDeviceNode,
+	                     psComputeContext->psServerCommonContext,
+	                     RGXFWIF_DM_CDM,
+						 "SyncCDM",
+	                     ui32ClientFenceCount,
+	                     pauiClientFenceUFOAddress,
+	                     paui32ClientFenceValue,
+	                     ui32ClientUpdateCount,
+	                     pauiClientUpdateUFOAddress,
+	                     paui32ClientUpdateValue,
+	                     ui32ServerSyncPrims,
+	                     paui32ServerSyncFlags,
+	                     pasServerSyncs,
+	                     bPDumpContinuous);
+}
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxcompute.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxcompute.h
new file mode 100644
index 0000000..02d9035
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxcompute.h
@@ -0,0 +1,179 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX compute functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX compute functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCOMPUTE_H__)
+#define __RGXCOMPUTE_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXCreateComputeContextKM
+
+ @Description
+	
+
+ @Input pvDeviceNode 
+ @Input psCmpCCBMemDesc - 
+ @Input psCmpCCBCtlMemDesc - 
+ @Output ppsFWComputeContextMemDesc - 
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA			*psConnection,
+											 PVRSRV_DEVICE_NODE			*psDeviceNode,
+											 IMG_UINT32					ui32Priority,
+											 IMG_DEV_VIRTADDR			sMCUFenceAddr,
+											 IMG_UINT32					ui32FrameworkRegisterSize,
+											 IMG_PBYTE					pbyFrameworkRegisters,
+											 IMG_HANDLE					hMemCtxPrivData,
+											 RGX_SERVER_COMPUTE_CONTEXT	**ppsComputeContext);
+
+/*! 
+*******************************************************************************
+ @Function	PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyComputeContext
+
+ @Input psCleanupData - 
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXKickCDMKM
+
+ @Description
+	Server-side implementation of RGXKickCDM
+
+ @Input psDeviceNode - RGX Device node
+ @Input psFWComputeContextMemDesc - Mem desc for firmware compute context
+ @Input ui32cCCBWoffUpdate - New fw Woff for the client CDM CCB
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT	*psComputeContext,
+								IMG_UINT32					ui32ClientFenceCount,
+								PRGXFWIF_UFO_ADDR			*pauiClientFenceUFOAddress,
+								IMG_UINT32					*paui32ClientFenceValue,
+								IMG_UINT32					ui32ClientUpdateCount,
+								PRGXFWIF_UFO_ADDR			*pauiClientUpdateUFOAddress,
+								IMG_UINT32					*paui32ClientUpdateValue,
+								IMG_UINT32					ui32ServerSyncPrims,
+								IMG_UINT32					*paui32ServerSyncFlags,
+								SERVER_SYNC_PRIMITIVE 		**pasServerSyncs,
+								IMG_UINT32					ui32CmdSize,
+								IMG_PBYTE					pui8DMCmd,
+								IMG_BOOL					bPDumpContinuous,
+								IMG_UINT32					ui32ExtJobRef,
+								IMG_UINT32					ui32IntJobRef);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXFlushComputeDataKM
+
+ @Description
+	Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+												  RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+												  IMG_UINT32 ui32Priority);
+
+/* Debug - check if compute context is waiting on a fence */
+IMG_VOID CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+									DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_BOOL CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXKickSyncCDMKM
+
+ @Description
+	Sending a sync kick command though this CDM context
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+IMG_EXPORT PVRSRV_ERROR 
+PVRSRVRGXKickSyncCDMKM(RGX_SERVER_COMPUTE_CONTEXT  *psComputeContext,
+                       IMG_UINT32                  ui32ClientFenceCount,
+                       PRGXFWIF_UFO_ADDR           *pauiClientFenceUFOAddress,
+                       IMG_UINT32                  *paui32ClientFenceValue,
+                       IMG_UINT32                  ui32ClientUpdateCount,
+                       PRGXFWIF_UFO_ADDR           *pauiClientUpdateUFOAddress,
+                       IMG_UINT32                  *paui32ClientUpdateValue,
+                       IMG_UINT32                  ui32ServerSyncPrims,
+                       IMG_UINT32                  *paui32ServerSyncFlags,
+                       SERVER_SYNC_PRIMITIVE       **pasServerSyncs,
+					   IMG_UINT32				   ui32NumCheckFenceFDs,
+					   IMG_INT32				   *pai32CheckFenceFDs,
+					   IMG_INT32                   i32UpdateFenceFD,
+                       IMG_BOOL                    bPDumpContinuous);
+
+#endif /* __RGXCOMPUTE_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdebug.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdebug.c
new file mode 100644
index 0000000..9063cba
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdebug.c
@@ -0,0 +1,3109 @@
+/*************************************************************************/ /*!
+@File
+@Title          Rgx debug information
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "lists.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services.h"
+
+#include "devicemem_pdump.h"
+
+#include "rgx_fwif.h"
+#include "pvrsrv.h"
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+#include "rgx_fwif_sf.h"
+#include "rgxfw_log_helper.h"
+#endif
+
+#include "rgxta3d.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#if defined(RGX_FEATURE_RAY_TRACING)
+#include "rgxray.h"
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+
+
+#define RGX_DEBUG_STR_SIZE	(150)
+
+#define RGX_CR_BIF_CAT_BASE0                              (0x1200U)
+#define RGX_CR_BIF_CAT_BASE1                              (0x1208U)
+
+#define RGX_CR_BIF_CAT_BASEN(n) \
+	RGX_CR_BIF_CAT_BASE0 + \
+	((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n)
+
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXDBG_BIF_IDS \
+	X(BIF0)\
+	X(BIF1)\
+	X(TEXAS_BIF)\
+	X(DPX_BIF)
+#else
+#define RGXDBG_BIF_IDS \
+	X(BIF0)\
+	X(BIF1)\
+	X(TEXAS_BIF)
+#endif
+
+#define RGXDBG_SIDEBAND_TYPES \
+	X(META)\
+	X(TLA)\
+	X(DMA)\
+	X(VDMM)\
+	X(CDM)\
+	X(IPP)\
+	X(PM)\
+	X(TILING)\
+	X(MCU)\
+	X(PDS)\
+	X(PBE)\
+	X(VDMS)\
+	X(IPF)\
+	X(ISP)\
+	X(TPF)\
+	X(USCS)\
+	X(PPP)\
+	X(VCE)\
+	X(TPF_CPF)\
+	X(IPF_CPF)\
+	X(FBCDC)
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+	RGXDBG_BIF_IDS
+#undef X
+} RGXDBG_BIF_ID;
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+	RGXDBG_SIDEBAND_TYPES
+#undef X
+} RGXDBG_SIDEBAND_TYPE;
+
+
+IMG_CHAR* pszPowStateName [] = {
+#define X(NAME)	#NAME,
+	RGXFWIF_POW_STATES
+#undef X
+};
+
+IMG_CHAR* pszBIFNames [] = {
+#define X(NAME)	#NAME,
+	RGXDBG_BIF_IDS
+#undef X
+};
+
+extern IMG_UINT32 g_ui32HostSampleIRQCount;
+
+
+IMG_UINT32 RGXReadWithSP(IMG_UINT32 ui32FWAddr)
+{
+	PVRSRV_DATA        *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->apsRegisteredDevNodes[0];
+	PVRSRV_RGXDEV_INFO *psDevInfo    = psDeviceNode->pvDevice;
+	IMG_UINT32         ui32Value     = 0;
+	PVRSRV_ERROR       eError;
+
+	eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32Value);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXReadWithSP error: %s", PVRSRVGetErrorStringKM(eError)));
+	}
+
+	return ui32Value;
+}
+
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodePMPC
+
+ @Description
+
+ Return the name for the PM managed Page Catalogues
+
+ @Input ui32PC	 - Page Catalogue number
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC)
+{
+	IMG_CHAR* pszPMPC = " (-)";
+
+	switch (ui32PC)
+	{
+		case 0x8: pszPMPC = " (PM-VCE0)"; break;
+		case 0x9: pszPMPC = " (PM-TE0)"; break;
+		case 0xA: pszPMPC = " (PM-ZLS0)"; break;
+		case 0xB: pszPMPC = " (PM-ALIST0)"; break;
+		case 0xC: pszPMPC = " (PM-VCE1)"; break;
+		case 0xD: pszPMPC = " (PM-TE1)"; break;
+		case 0xE: pszPMPC = " (PM-ZLS1)"; break;
+		case 0xF: pszPMPC = " (PM-ALIST1)"; break;
+	}
+
+	return pszPMPC;
+}
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+/*!
+*******************************************************************************
+
+ @Function	_DPXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from DPX_CR_BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID	 			- BIF identifier
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _DPXDecodeBIFReqTags(RGXDBG_BIF_ID	eBankID,
+									 IMG_UINT32		ui32TagID, 
+									 IMG_UINT32		ui32TagSB, 
+									 IMG_CHAR		**ppszTagID, 
+									 IMG_CHAR		**ppszTagSB,
+									 IMG_CHAR		*pszScratchBuf,
+									 IMG_UINT32		ui32ScratchBufSize)
+{
+	/* default to unknown */
+	IMG_CHAR *pszTagID = "-";
+	IMG_CHAR *pszTagSB = "-";
+
+	PVR_ASSERT(eBankID == RGXDBG_DPX_BIF);
+	PVR_ASSERT(ppszTagID != IMG_NULL);
+
+	PVR_UNREFERENCED_PARAMETER(ui32TagSB);
+	PVR_UNREFERENCED_PARAMETER(pszScratchBuf);
+	PVR_UNREFERENCED_PARAMETER(ui32ScratchBufSize);
+
+	switch (ui32TagID)
+	{
+		case 0x0:
+		{
+			pszTagID = "MMU";
+			break;
+		}
+		case 0x1:
+		{
+			pszTagID = "RS_READ";
+			break;
+		}
+		case 0x2:
+		{
+			pszTagID = "RS_WRITE";
+			break;
+		}
+		case 0x3:
+		{
+			pszTagID = "RQ";
+			break;
+		}
+		case 0x4:
+		{
+			pszTagID = "PU";
+			break;
+		}
+	} /* switch(TagID) */
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID	 			- BIF identifier
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDecodeBIFReqTags(RGXDBG_BIF_ID	eBankID,
+									 IMG_UINT32		ui32TagID, 
+									 IMG_UINT32		ui32TagSB, 
+									 IMG_CHAR		**ppszTagID, 
+									 IMG_CHAR		**ppszTagSB,
+									 IMG_CHAR		*pszScratchBuf,
+									 IMG_UINT32		ui32ScratchBufSize)
+{
+	/* default to unknown */
+	IMG_CHAR *pszTagID = "-";
+	IMG_CHAR *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != IMG_NULL);
+	PVR_ASSERT(ppszTagSB != IMG_NULL);
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	if (eBankID == RGXDBG_DPX_BIF)
+	{
+		_DPXDecodeBIFReqTags(eBankID, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+		return;
+	}
+#endif
+	
+	switch (ui32TagID)
+	{
+		case 0x0:
+		{
+#if defined(RGX_FEATURE_RAY_TRACING)
+			if (eBankID == RGXDBG_BIF0)
+			{
+				pszTagID = "VRDM";				
+			}
+			else
+			{
+				pszTagID = "MMU";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Table"; break;
+					case 0x1: pszTagSB = "Directory"; break;
+					case 0x2: pszTagSB = "Catalogue"; break;
+				}
+			}
+#else
+			pszTagID = "MMU";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Table"; break;
+				case 0x1: pszTagSB = "Directory"; break;
+				case 0x2: pszTagSB = "Catalogue"; break;
+			}
+#endif
+			break;
+		}
+		case 0x1:
+		{
+			pszTagID = "TLA";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Pixel data"; break;
+				case 0x1: pszTagSB = "Command stream data"; break;
+				case 0x2: pszTagSB = "Fence or flush"; break;
+			}
+			break;
+		}
+		case 0x2:
+		{
+#if defined(RGX_FEATURE_RAY_TRACING)
+			if (eBankID == RGXDBG_BIF0)
+			{
+				pszTagID = "SHF";				
+			}
+			else
+			{
+				pszTagID = "HOST";
+			}
+#else
+			pszTagID = "HOST";
+#endif
+			break;
+		}
+		case 0x3:
+		{
+#if defined(RGX_FEATURE_RAY_TRACING)
+			if (eBankID == RGXDBG_BIF0)
+			{
+				pszTagID = "SHG";				
+			}
+			else
+			{
+				pszTagID = "META";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "DCache - Thread 0"; break;
+					case 0x1: pszTagSB = "ICache - Thread 0"; break;
+					case 0x2: pszTagSB = "JTag - Thread 0"; break;
+					case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+					case 0x4: pszTagSB = "DCache - Thread "; break;
+					case 0x5: pszTagSB = "ICache - Thread 1"; break;
+					case 0x6: pszTagSB = "JTag - Thread 1"; break;
+					case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+				}
+			}
+#else
+			pszTagID = "META";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "DCache - Thread 0"; break;
+				case 0x1: pszTagSB = "ICache - Thread 0"; break;
+				case 0x2: pszTagSB = "JTag - Thread 0"; break;
+				case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+				case 0x4: pszTagSB = "DCache - Thread "; break;
+				case 0x5: pszTagSB = "ICache - Thread 1"; break;
+				case 0x6: pszTagSB = "JTag - Thread 1"; break;
+				case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+			}
+#endif
+			break;
+		}
+		case 0x4:
+		{
+			pszTagID = "USC";
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "Cache line %d", (ui32TagSB & 0x3f));
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+		case 0x5:
+		{
+#if defined(RGX_FEATURE_RAY_TRACING)
+			if (eBankID == RGXDBG_TEXAS_BIF)
+			{
+				pszTagID = "PBE";
+			}
+			else
+			{
+				pszTagID = "RPM";
+			}
+#else
+			pszTagID = "PBE";
+#endif
+			break;
+		}
+		case 0x6:
+		{
+#if defined(RGX_FEATURE_RAY_TRACING)
+			if (eBankID == RGXDBG_TEXAS_BIF)
+			{
+				pszTagID = "ISP";
+				switch (ui32TagSB)
+				{
+					case 0x00: pszTagSB = "ZLS"; break;
+					case 0x20: pszTagSB = "Occlusion Query"; break;
+				}
+			}
+			else
+			{
+				pszTagID = "FBA";				
+			}
+#else
+			pszTagID = "ISP";
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "ZLS"; break;
+				case 0x20: pszTagSB = "Occlusion Query"; break;
+			}
+#endif
+			break;
+		}
+		case 0x7:
+		{
+			if (eBankID == RGXDBG_TEXAS_BIF)
+			{
+				pszTagID = "IPF";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "CPF"; break;
+					case 0x1: pszTagSB = "DBSC"; break;
+					case 0x2:
+					case 0x4:
+					case 0x6:
+					case 0x8: pszTagSB = "Control Stream"; break;
+					case 0x3:
+					case 0x5:
+					case 0x7:
+					case 0x9: pszTagSB = "Primitive Block"; break;
+				}
+			}
+			else
+			{
+				pszTagID = "IPP";
+				switch (ui32TagSB)
+				{
+					case 0x0: pszTagSB = "Macrotile Header"; break;
+					case 0x1: pszTagSB = "Region Header"; break;
+				}
+			}
+			break;
+		}
+#else /* RGX_FEATURE_CLUSTER_GROUPING */
+		case 0x5:
+		{
+			pszTagID = "PBE";
+			break;
+		}
+		case 0x6:
+		{
+			pszTagID = "ISP";
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "ZLS"; break;
+				case 0x20: pszTagSB = "Occlusion Query"; break;
+			}
+			break;
+		}
+		case 0x7:
+		{
+			pszTagID = "IPF";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Macrotile Header"; break;
+				case 0x1: pszTagSB = "Region Header"; break;
+				case 0x2: pszTagSB = "DBSC"; break;
+				case 0x3: pszTagSB = "CPF"; break;
+				case 0x4: 
+				case 0x6:
+				case 0x8: pszTagSB = "Control Stream"; break;
+				case 0x5: 
+				case 0x7:
+				case 0x9: pszTagSB = "Primitive Block"; break;
+			}
+			break;
+		}
+#endif /* RGX_FEATURE_CLUSTER_GROUPING */
+		case 0x8:
+		{
+			pszTagID = "CDM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "Indirect Data"; break;
+				case 0x2: pszTagSB = "Event Write"; break;
+				case 0x3: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+		case 0x9:
+		{
+			pszTagID = "VDM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "PPP State"; break;
+				case 0x2: pszTagSB = "Index Data"; break;
+				case 0x4: pszTagSB = "Call Stack"; break;
+				case 0x8: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+		case 0xA:
+		{
+			pszTagID = "PM";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+				case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+				case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+				case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+				case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+				case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+				case 0x6: pszTagSB = "PMA_MAVP"; break;
+				case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+				case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+				case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+				case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+				case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+				case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+				case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+				case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+				case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+				case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+				case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+				case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+				case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+				case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+				case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+				case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+				case 0x18: pszTagSB = "PMA_TAVFP"; break;
+				case 0x19: pszTagSB = "PMD_3DVFP"; break;
+				case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+			}
+			break;
+		}
+		case 0xB:
+		{
+			pszTagID = "TA";
+			switch (ui32TagSB)
+			{
+				case 0x1: pszTagSB = "VCE"; break;
+				case 0x2: pszTagSB = "TPC"; break;
+				case 0x3: pszTagSB = "TE Control Stream"; break;
+				case 0x4: pszTagSB = "TE Region Header"; break;
+				case 0x5: pszTagSB = "TE Render Target Cache"; break;
+				case 0x6: pszTagSB = "TEAC Render Target Cache"; break;
+				case 0x7: pszTagSB = "VCE Render Target Cache"; break;
+				case 0x8: pszTagSB = "PPP Context State"; break;
+			}
+			break;
+		}
+		case 0xC:
+		{
+			pszTagID = "TPF";
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+				case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+				case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+				case 0x3: pszTagSB = "CPF - Tables"; break;
+				case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+				case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+				case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+				case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+				case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+				case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+				case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+				case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+				case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+				case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+				case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+				case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+			}
+			break;
+		}
+		case 0xD:
+		{
+			pszTagID = "PDS";
+			break;
+		}
+		case 0xE:
+		{
+			pszTagID = "MCU";
+			{
+				IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7;
+				IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7;
+				IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+				IMG_CHAR* pszBurst = "";
+				IMG_CHAR* pszGroupEnc = "";
+				IMG_CHAR* pszGroup = "";
+
+				switch (ui32Burst)
+				{
+					case 0x0:
+					case 0x1: pszBurst = "128bit word within the Lower 256bits"; break;
+					case 0x2:
+					case 0x3: pszBurst = "128bit word within the Upper 256bits"; break;
+					case 0x4: pszBurst = "Lower 256bits"; break;
+					case 0x5: pszBurst = "Upper 256bits"; break;
+					case 0x6: pszBurst = "512 bits"; break;
+				}
+				switch (ui32GroupEnc)
+				{
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+					case 0x0: pszGroupEnc = "PDS_REQ"; break;
+					case 0x1: pszGroupEnc = "USC_REQ"; break;
+					case 0x2: pszGroupEnc = "MADD_REQ"; break;
+					case 0x3: pszGroupEnc = "USCB_USC"; break;
+#else
+					case 0x0: pszGroupEnc = "TPUA_USC"; break;
+					case 0x1: pszGroupEnc = "TPUB_USC"; break;
+					case 0x2: pszGroupEnc = "USCA_USC"; break;
+					case 0x3: pszGroupEnc = "USCB_USC"; break;
+					case 0x4: pszGroupEnc = "PDS_USC"; break;
+#if (RGX_FEATURE_NUM_CLUSTERS < 6)
+					case 0x5: pszGroupEnc = "PDSRW"; break;
+#elif (RGX_FEATURE_NUM_CLUSTERS == 6)
+					case 0x5: pszGroupEnc = "UPUC_USC"; break;
+					case 0x6: pszGroupEnc = "TPUC_USC"; break;
+					case 0x7: pszGroupEnc = "PDSRW"; break;
+#endif
+#endif
+				}
+				switch (ui32Group)
+				{
+					case 0x0: pszGroup = "Banks 0-3"; break;
+					case 0x1: pszGroup = "Banks 4-7"; break;
+					case 0x2: pszGroup = "Banks 8-11"; break;
+					case 0x3: pszGroup = "Banks 12-15"; break;
+				}
+
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+								"%s, %s, %s", pszBurst, pszGroupEnc, pszGroup);
+				pszTagSB = pszScratchBuf;
+			}
+			break;
+		}
+		case 0xF:
+		{
+			pszTagID = "FB_CDC";
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+			{
+				IMG_UINT32 ui32Req   = (ui32TagSB >> 0) & 0xf;
+				IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+				IMG_CHAR* pszReqOrig = "";
+
+				switch (ui32Req)
+				{
+					case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+					case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+					case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+					case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+					case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+					case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+					case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+					case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+					case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+					case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+					case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+					case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+					case 0xc: pszReqOrig = "Reserved"; break;
+					case 0xd: pszReqOrig = "Reserved"; break;
+					case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+					case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+				}
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+				           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+				pszTagSB = pszScratchBuf;
+			}
+#else
+			{
+				IMG_UINT32 ui32Req   = (ui32TagSB >> 2) & 0x7;
+				IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3;
+				IMG_CHAR* pszReqOrig = "";
+
+				switch (ui32Req)
+				{
+					case 0x0: pszReqOrig = "FBC Request, originator ZLS";   break;
+					case 0x1: pszReqOrig = "FBC Request, originator PBE";   break;
+					case 0x2: pszReqOrig = "FBC Request, originator Host";  break;
+					case 0x3: pszReqOrig = "FBC Request, originator TLA";   break;
+					case 0x4: pszReqOrig = "FBDC Request, originator ZLS";  break;
+					case 0x5: pszReqOrig = "FBDC Request, originator MCU";  break;
+					case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+					case 0x7: pszReqOrig = "FBDC Request, originator TLA";  break;
+				}
+				OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+				           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+				pszTagSB = pszScratchBuf;
+			}
+#endif
+			break;
+		}
+	} /* switch(TagID) */
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+#endif
+
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel	 - MMU level
+
+ @Return   IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+	IMG_CHAR* pszMMULevel = "";
+
+	switch (ui32MMULevel)
+	{
+		case 0x0: pszMMULevel = " (Page Table)"; break;
+		case 0x1: pszMMULevel = " (Page Directory)"; break;
+		case 0x2: pszMMULevel = " (Page Catalog)"; break;
+		case 0x3: pszMMULevel = " (Cat Base)"; break;
+	}
+
+	return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID           - Tag ID value
+ @Input ui32TagSB           - Tag Sideband data
+ @Output ppszTagID          - Decoded string from the Tag ID
+ @Output ppszTagSB          - Decoded string from the Tag SB
+ @Output pszScratchBuf      - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize  - Size of the provided buffer
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDecodeMMUReqTags(IMG_UINT32  ui32TagID, 
+									 IMG_UINT32  ui32TagSB, 
+                                     IMG_CHAR    **ppszTagID, 
+									 IMG_CHAR    **ppszTagSB,
+									 IMG_CHAR    *pszScratchBuf,
+									 IMG_UINT32  ui32ScratchBufSize)
+{
+	IMG_INT32  i32SideBandType = -1;
+	IMG_CHAR   *pszTagID = "-";
+	IMG_CHAR   *pszTagSB = "-";
+
+	PVR_ASSERT(ppszTagID != IMG_NULL);
+	PVR_ASSERT(ppszTagSB != IMG_NULL);
+
+	switch (ui32TagID)
+	{
+		case  0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break;
+		case  1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break;
+#if defined(RGX_FEATURE_META_DMA)
+		case  2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break;
+#endif
+		case  3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break;
+		case  4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break;
+		case  5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break;
+		case  6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break;
+		case  7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break;
+		case  8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case  9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+		case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE break;
+		case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case  9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE break;
+		case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+		case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case  9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+		case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+		case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break;
+		case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break;
+		case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break;
+		case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break;
+		case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break;
+#if defined(HW_ERN_47229)
+		case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break;
+		case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+		case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+#endif
+		case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+		case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE break;
+		case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE break;
+		case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+		case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+		case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+		case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+		case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE break;
+		case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE break;
+		case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+		case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+		case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+		case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break;
+		case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break;
+		case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break;
+		case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break;
+		case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break;
+#if defined(HW_ERN_47229)
+		case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break;
+		case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+		case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+#endif
+		case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+		case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE break;
+		case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE break;
+		case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+		case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+		case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+		case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+		case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE break;
+		case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE break;
+		case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+		case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+		case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+		case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break;
+		case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break;
+		case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break;
+		case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break;
+		case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break;
+#if defined(HW_ERN_47229)
+		case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break;
+		case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+		case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+#endif
+		case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+		case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE break;
+		case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE break;
+		case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+		case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+		case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+		case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+		case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE break;
+		case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE break;
+		case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+		case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+		case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+		case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+		case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break;
+		case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break;
+		case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break;
+		case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break;
+		case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break;
+		case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break;
+#if defined(HW_ERN_47229)
+		case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break;
+		case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+		case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+#endif
+		case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break;
+#if defined(HW_ERN_47229)
+		case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+		case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE break;
+		case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+#elif defined(FIX_HW_BRN_50539)
+		case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE break;
+		case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+		case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+#else
+		case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+		case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+		case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+#endif
+	}
+	
+	switch (i32SideBandType)
+	{
+		case RGXDBG_META:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "DCache - Thread 0"; break;
+				case 0x1: pszTagSB = "ICache - Thread 0"; break;
+				case 0x2: pszTagSB = "JTag - Thread 0"; break;
+				case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+				case 0x4: pszTagSB = "DCache - Thread 1"; break;
+				case 0x5: pszTagSB = "ICache - Thread 1"; break;
+				case 0x6: pszTagSB = "JTag - Thread 1"; break;
+				case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TLA:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Pixel data"; break;
+				case 0x1: pszTagSB = "Command stream data"; break;
+				case 0x2: pszTagSB = "Fence or flush"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_VDMM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream - Read Only"; break;
+				case 0x1: pszTagSB = "PPP State - Read Only"; break;
+				case 0x2: pszTagSB = "Indices - Read Only"; break;
+				case 0x4: pszTagSB = "Call Stack - Read/Write"; break;
+				case 0x6: pszTagSB = "DrawIndirect - Read Only"; break;
+				case 0xA: pszTagSB = "Context State - Write Only"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_CDM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Control Stream"; break;
+				case 0x1: pszTagSB = "Indirect Data"; break;
+				case 0x2: pszTagSB = "Event Write"; break;
+				case 0x3: pszTagSB = "Context State"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_IPP:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Macrotile Header"; break;
+				case 0x1: pszTagSB = "Region Header"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_PM:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+				case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+				case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+				case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+				case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+				case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+				case 0x6: pszTagSB = "PMA_MAVP"; break;
+				case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+				case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+				case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+				case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+				case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+				case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+				case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+				case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+				case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+				case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+				case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+				case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+				case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+				case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+				case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+				case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+				case 0x18: pszTagSB = "PMA_TAVFP"; break;
+				case 0x19: pszTagSB = "PMD_3DVFP"; break;
+				case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TILING:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "PSG Control Stream TP0"; break;
+				case 0x1: pszTagSB = "TPC TP0"; break;
+				case 0x2: pszTagSB = "VCE0"; break;
+				case 0x3: pszTagSB = "VCE1"; break;
+				case 0x4: pszTagSB = "PSG Control Stream TP1"; break;
+				case 0x5: pszTagSB = "TPC TP1"; break;
+				case 0x8: pszTagSB = "PSG Region Header TP0"; break;
+				case 0xC: pszTagSB = "PSG Region Header TP1"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_VDMS:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "Context State - Write Only"; break;
+			}
+			break;
+		}
+		
+		case RGXDBG_IPF:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x00:
+				case 0x20: pszTagSB = "CPF"; break;
+				case 0x01: pszTagSB = "DBSC"; break;
+				case 0x02:
+				case 0x04:
+				case 0x06:
+				case 0x08:
+				case 0x0A:
+				case 0x0C:
+				case 0x0E:
+				case 0x10: pszTagSB = "Control Stream"; break;
+				case 0x03:
+				case 0x05:
+				case 0x07:
+				case 0x09:
+				case 0x0B:
+				case 0x0D:
+				case 0x0F:
+				case 0x11: pszTagSB = "Primitive Block"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_ISP:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x00: pszTagSB = "ZLS read/write"; break;
+				case 0x20: pszTagSB = "Occlusion query read/write"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_TPF:
+		{
+			switch (ui32TagSB)
+			{
+				case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+				case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+				case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+				case 0x3: pszTagSB = "CPF - Tables"; break;
+				case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+				case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+				case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+				case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+				case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+				case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+				case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+				case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+				case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+				case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+				case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+				case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+			}
+			break;
+		}
+
+		case RGXDBG_FBCDC:
+		{
+			IMG_UINT32 ui32Req   = (ui32TagSB >> 0) & 0xf;
+			IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+			IMG_CHAR* pszReqOrig = "";
+
+			switch (ui32Req)
+			{
+				case 0x0: pszReqOrig = "FBC Request, originator ZLS";  break;
+				case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+				case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+				case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+				case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+				case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+				case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+				case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+				case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+				case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+				case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+				case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+				case 0xc: pszReqOrig = "Reserved"; break;
+				case 0xd: pszReqOrig = "Reserved"; break;
+				case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+				case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+			}
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+
+		case RGXDBG_MCU:
+		{
+			IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7;
+			IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7;
+			IMG_UINT32 ui32Group     = ui32TagSB & 0x3;
+
+			IMG_CHAR* pszGroup = "";
+
+			switch (ui32Group)
+			{
+				case 0x0: pszGroup = "Banks 0-1"; break;
+				case 0x1: pszGroup = "Banks 2-3"; break;
+				case 0x2: pszGroup = "Banks 4-5"; break;
+				case 0x3: pszGroup = "Banks 6-7"; break;
+			}
+
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+			           "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+
+		default:
+		{
+			OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB);
+			pszTagSB = pszScratchBuf;
+			break;
+		}
+	}
+
+	*ppszTagID = pszTagID;
+	*ppszTagSB = pszTagSB;
+}
+#endif
+
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+	DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+	DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+	DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+	DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input psResult                 - The DevicememHistory result to be printed
+ @Input ui32Index                - The index of the result
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+								DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+								IMG_UINT32 ui32Index)
+{
+	IMG_UINT32 ui32Remainder;
+	PVR_DUMPDEBUG_LOG(("  [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+				" Size: " IMG_DEVMEM_SIZE_FMTSPEC
+				" Allocated: %c Modified %llu us ago (abs time %llu us)",
+									ui32Index,
+									psResult->szString,
+					(unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+					(unsigned long long) psResult->uiSize,
+					psResult->bAllocated ? 'Y' : 'N',
+					(unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+					(unsigned long long) OSDivide64r64(psResult->ui64When, 1000, &ui32Remainder)));
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf       - Debug printf function
+ @Input psResult                 - The DevicememHistory result to be printed
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut)
+{
+	IMG_UINT32 i;
+
+	if(psQueryOut->ui32NumResults == 0)
+	{
+		PVR_DUMPDEBUG_LOG(("  No results"));
+	}
+	else
+	{
+		for(i = 0; i < psQueryOut->ui32NumResults; i++)
+		{
+			_PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, &psQueryOut->sResults[i], i);
+		}
+	}
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+	{ 0, PVRSRV_4K_PAGE_SIZE },
+	{ 1, PVRSRV_16K_PAGE_SIZE },
+	{ 2, PVRSRV_64K_PAGE_SIZE },
+	{ 3, PVRSRV_256K_PAGE_SIZE },
+	{ 4, PVRSRV_1M_PAGE_SIZE },
+	{ 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function	_PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW     - The HW page size value
+
+ @Return   IMG_UINT32      The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+	PVR_ASSERT(ui32PageSizeHW <= 5);
+
+	return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID              - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr     - The device address to search for allocations at/before/after
+ @Input asQueryOut         - Storage for the query results
+ @Input ui32PageSizeBytes  - Faulted page size in bytes
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+							DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+							IMG_UINT32 ui32PageSizeBytes)
+{
+	IMG_UINT32 i;
+	DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+
+	sQueryIn.uiPID = uiPID;
+
+	/* query the DevicememHistory about the preceding / faulting / next page */
+
+	for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+	{
+		switch(i)
+		{
+			case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+				sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1;
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+				sQueryIn.sDevVAddr = sFaultDevVAddr;
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+				sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+				break;
+		}
+
+		/* return value ignored because we check each of the QUERY_OUT elements
+		 * later to see if they contain any hits
+		 */
+		(void) DevicememHistoryQuery(&sQueryIn, &asQueryOut[i]);
+	}
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+	RGXMEM_PROCESS_INFO sProcessInfo;
+	IMG_DEV_VIRTADDR sFaultDevVAddr;
+	DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+	/* the CR timer value at the time of the fault, recorded by the FW.
+	 * used to differentiate different page faults
+	 */
+	IMG_UINT64 ui64CRTimer;
+	/* time when this FAULT_INFO entry was added. used for timing
+	 * reference against the map/unmap information
+	 */
+	IMG_UINT64 ui64When;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the fist `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+	IMG_UINT32 ui32Head;
+	IMG_UINT32 ui32NumWrites;
+	/* the number of faults in this log need not correspond exactly to
+	 * the HWINFO number of the FW, as the FW HWINFO log may contain
+	 * non-page fault HWRs
+	 */
+	FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+/*!
+*******************************************************************************
+
+ @Function	_QueryFaultInfo
+
+ @Description
+
+ Searches the local list of previously analysed page faults to see if the given
+ fault has already been analysed and if so, returns a pointer to the analysis
+ objbect (FAULT_INFO *), otherwise returns NULL.
+
+ @Input pfnDumpDebugPrintf       - The debug printf function
+ @Input sFaultDevVAddr           - The faulting device virtual address
+ @Input ui64CRTimer              - The CR timer value recorded by the FW at the time of the fault
+
+ @Return   FAULT_INFO* Pointer to an existing fault analysis structure if found, otherwise IMG_NULL
+
+******************************************************************************/
+static FAULT_INFO *_QueryFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+								IMG_DEV_VIRTADDR sFaultDevVAddr,
+								IMG_UINT64 ui64CRTimer)
+{
+	IMG_UINT32 i;
+
+	for(i = 0; i < MIN(gsFaultInfoLog.ui32NumWrites, RGXFWIF_HWINFO_MAX); i++)
+	{
+		if((gsFaultInfoLog.asFaults[i].ui64CRTimer == ui64CRTimer) &&
+			(gsFaultInfoLog.asFaults[i].sFaultDevVAddr.uiAddr == sFaultDevVAddr.uiAddr))
+			{
+				return &gsFaultInfoLog.asFaults[i];
+			}
+	}
+
+	return IMG_NULL;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_AddFaultInfo
+
+ @Description
+
+ Add the given page fault information to the page fault log.
+
+
+ @Input psProcessInfo        - Information about the user process which caused the page fault
+ @Input sFaultDevVAddr       - The faulting device virtual address
+ @Input ui64CRTimer          - The CR timer value recorded by the FW at the time of the fault
+ @Input asQueryOut           - The DevicememHistory query information with the allocations relating to the fault
+
+ @Return   FAULT_INFO* Pointer to the newly added record
+
+******************************************************************************/
+static FAULT_INFO *_AddFaultInfo(RGXMEM_PROCESS_INFO *psProcessInfo,
+						IMG_DEV_VIRTADDR sFaultDevVAddr,
+						IMG_UINT64 ui64CRTimer,
+						DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT])
+{
+	IMG_UINT32 ui32Head = gsFaultInfoLog.ui32Head;
+	FAULT_INFO *psInfo = &gsFaultInfoLog.asFaults[ui32Head];
+
+	if(gsFaultInfoLog.ui32Head < RGXFWIF_HWINFO_MAX - 1)
+	{
+		gsFaultInfoLog.ui32Head++;
+	}
+	else
+	{
+		/* wrap back to the first of the 'LAST' entries */
+		gsFaultInfoLog.ui32Head = RGXFWIF_HWINFO_MAX_FIRST;
+	}
+
+	gsFaultInfoLog.ui32NumWrites++;
+
+	psInfo->sProcessInfo = *psProcessInfo;
+	psInfo->sFaultDevVAddr = sFaultDevVAddr;
+	psInfo->ui64CRTimer = ui64CRTimer;
+	psInfo->ui64When = OSClockus64();
+	OSMemCopy(psInfo->asQueryOut, asQueryOut, sizeof(psInfo->asQueryOut));
+
+	return psInfo;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf   - The debug printf function
+ @Input psInfo               - The page fault occurrence to print
+ @Input pui32Index           - (optional) index value to include in the print output
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+							FAULT_INFO *psInfo,
+							const IMG_UINT32 *pui32Index)
+{
+	IMG_UINT32 i;
+
+	if(pui32Index)
+	{
+		PVR_DUMPDEBUG_LOG(("(%u) Device memory history for page fault address 0x%010llX, CRTimer: 0x%016llX, "
+							"PID: %u (%s, unregistered: %u) Abs Time: %llu us",
+					*pui32Index,
+					(unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+					psInfo->ui64CRTimer,
+					(unsigned int) psInfo->sProcessInfo.uiPID,
+					psInfo->sProcessInfo.szProcessName,
+					psInfo->sProcessInfo.bUnregistered,
+					(unsigned long long) psInfo->ui64When));
+	}
+	else
+	{
+		PVR_DUMPDEBUG_LOG(("Device memory history for page fault address 0x%010llX, PID: %u (%s, unregistered: %u) Abs Time: %llu us",
+					(unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+					(unsigned int) psInfo->sProcessInfo.uiPID,
+					psInfo->sProcessInfo.szProcessName,
+					psInfo->sProcessInfo.bUnregistered,
+					(unsigned long long) psInfo->ui64When));
+	}
+
+	for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+	{
+		const IMG_CHAR *pszWhich;
+
+		switch(i)
+		{
+			case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+				pszWhich = "Preceding page";
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+				pszWhich = "Faulted page";
+				break;
+			case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+				pszWhich = "Next page";
+				break;
+		}
+
+		PVR_DUMPDEBUG_LOG(("%s:", pszWhich));
+		_PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, &psInfo->asQueryOut[i]);
+	}
+}
+
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXBIFBank
+
+ @Description
+
+ Dump BIF Bank state in human readable form.
+
+ @Input psDevInfo				- RGX device info
+ @Input eBankID	 				- BIF identifier
+ @Input ui64MMUStatus			- MMU Status register value
+ @Input ui64ReqStatus			- BIF request Status register value
+ @Input ui64PCAddress                   - Page catalogue base address of faulting access
+ @Input ui64CRTimer                     - RGX CR timer value at time of page fault
+ @Input bBIFSummary				- Flag to check whether the function is called
+ 	 	 	 	 	 	 	 	  as a part of the debug dump summary or
+								  as a part of a HWR log
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                   PVRSRV_RGXDEV_INFO	*psDevInfo,
+                                   RGXDBG_BIF_ID 		eBankID,
+                                   IMG_UINT64			ui64MMUStatus,
+                                   IMG_UINT64			ui64ReqStatus,
+                                   IMG_UINT64			ui64PCAddress,
+                                   IMG_UINT64			ui64CRTimer,
+                                   IMG_BOOL				bBIFSummary)
+{
+
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG(("%s - OK", pszBIFNames[eBankID]));
+	}
+	else
+	{
+		IMG_DEV_VIRTADDR sFaultDevVAddr;
+		IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+		IMG_BOOL bFound = IMG_FALSE;
+		RGXMEM_PROCESS_INFO sProcessInfo;
+		IMG_UINT32 ui32PageSizeBytes;
+		FAULT_INFO *psInfo;
+#endif
+		/* Bank 0 & 1 share the same fields */
+		PVR_DUMPDEBUG_LOG(("%s%s - FAULT:",
+						  (bBIFSummary)?"":"    ",
+						  pszBIFNames[eBankID]));
+
+		/* MMU Status */
+		{
+			IMG_UINT32 ui32PC = 
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+
+			IMG_UINT32 ui32PageSize = 
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+
+			IMG_UINT32 ui32MMUDataType = 
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT;
+
+			IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0;
+			IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+			ui32PageSizeBytes = _PageSizeHWToBytes(ui32PageSize);
+#endif
+
+			PVR_DUMPDEBUG_LOG(("%s  * MMU status (0x%016llX): PC = %d%s, Page Size = %d, MMU data type = %d%s%s.",
+			                  (bBIFSummary)?"":"    ",
+							  ui64MMUStatus,
+			                  ui32PC,
+			                  (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC),
+			                  ui32PageSize,
+			                  ui32MMUDataType,
+			                  (bROFault)?", Read Only fault":"",
+			                  (bProtFault)?", PM/META protection fault":""));
+		}
+
+		/* Req Status */
+		{
+			IMG_CHAR *pszTagID;
+			IMG_CHAR *pszTagSB;
+			IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+
+			IMG_BOOL bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0;
+			IMG_UINT32 ui32TagSB = 
+				(ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT;
+			IMG_UINT32 ui32TagID = 
+				(ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >>
+							RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT;
+			IMG_UINT64 ui64Addr = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+			/* RNW bit offset is different. The TAG_SB, TAG_ID and address fields are the same. */
+			if (eBankID == RGXDBG_DPX_BIF)
+			{
+				bRead = (ui64ReqStatus & DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN) != 0;
+			}
+#endif
+			_RGXDecodeBIFReqTags(eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE);
+
+			PVR_DUMPDEBUG_LOG(("%s  * Request (0x%016llX): %s (%s), %s 0x%010llX.",
+							  (bBIFSummary)?"":"    ",
+							  ui64ReqStatus,
+			                  pszTagID,
+			                  pszTagSB,
+			                  (bRead)?"Reading from":"Writing to",
+			                  ui64Addr));
+		}
+
+		/* Check if the host thinks this fault is valid */
+
+		sFaultDevVAddr.uiAddr = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+
+		if(bBIFSummary)
+		{
+			IMG_UINT32 ui32PC = 
+				(ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+					RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+				
+			/* Only the first 8 cat bases are application memory contexts which we can validate... */
+			if (ui32PC < 8)
+			{
+				sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32PC));
+				PVR_DUMPDEBUG_LOG(("Acquired live PC address: 0x%016llX", sPCDevPAddr.uiAddr));
+			}
+			else
+			{
+				sPCDevPAddr.uiAddr = 0;
+			}
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG(("FW logged fault using PC Address: 0x%016llX", ui64PCAddress));
+			sPCDevPAddr.uiAddr = ui64PCAddress;
+		}
+
+		if(bBIFSummary)
+		{
+			PVR_DUMPDEBUG_LOG(("Checking faulting address 0x%010llX", sFaultDevVAddr.uiAddr));
+			RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr);
+		}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+		 /* look to see if we have already processed this fault.
+		  * if so then use the previously acquired information.
+		  */
+		OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+		psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, sFaultDevVAddr, ui64CRTimer);
+
+		if(psInfo == IMG_NULL)
+		{
+			if(sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+			{
+				/* look up the process details for the faulting page catalogue */
+				bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+
+				if(bFound)
+				{
+					DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT] = {{ 0 }};
+
+					/* get any DevicememHistory data for the faulting address */
+					_GetDevicememHistoryData(sProcessInfo.uiPID,
+										sFaultDevVAddr,
+										asQueryOut,
+										ui32PageSizeBytes);
+
+					psInfo = _AddFaultInfo(&sProcessInfo,
+										sFaultDevVAddr,
+										ui64CRTimer,
+										asQueryOut);
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG(("Could not find PID for PC 0x%016llX", sPCDevPAddr.uiAddr));
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG(("Page fault not applicable to Devmem History"));
+			}
+		}
+
+		/* psInfo should always be non-NULL if the process was found */
+		PVR_ASSERT((psInfo != IMG_NULL) || !bFound);
+
+		if(psInfo != IMG_NULL)
+		{
+			_PrintFaultInfo(pfnDumpDebugPrintf, psInfo, NULL);
+		}
+
+		OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+#endif
+
+	}
+
+}
+#endif
+
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input psDevInfo				- RGX device info
+ @Input ui64MMUStatus			- MMU Status register value
+ @Input bSummary				- Flag to check whether the function is called
+ 	 	 	 	 	 	 	 	  as a part of the debug dump summary or
+								  as a part of a HWR log
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                          PVRSRV_RGXDEV_INFO    *psDevInfo,
+                                          IMG_UINT64            ui64MMUStatus,
+                                          IMG_BOOL              bSummary)
+{
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG(("MMU (Core) - OK"));
+	}
+	else
+	{
+		IMG_UINT32 ui32PC        = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+		IMG_UINT64 ui64Addr      = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT;
+		IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT;
+		IMG_UINT32 ui32SideBand  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT;
+		IMG_UINT32 ui32MMULevel  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT;
+		IMG_BOOL bRead           = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0;
+		IMG_BOOL bFault          = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0;
+		IMG_BOOL bROFault        = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2;
+		IMG_BOOL bProtFault      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3;
+		IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+		IMG_CHAR *pszTagID;
+		IMG_CHAR *pszTagSB;
+
+		_RGXDecodeMMUReqTags(ui32Requester, ui32SideBand, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+		PVR_DUMPDEBUG_LOG(("%sMMU (Core) - FAULT:",  (bSummary)?"":"    "));
+		PVR_DUMPDEBUG_LOG(("%s  * MMU status (0x%016llX): PC = %d, %s 0x%010llX, %s (%s)%s%s%s%s.",
+						  (bSummary)?"":"    ",
+						  ui64MMUStatus,
+						  ui32PC,
+		                  (bRead)?"Reading from":"Writing to",
+						  ui64Addr,
+						  pszTagID,
+						  pszTagSB,
+						  (bFault)?", Fault":"",
+						  (bROFault)?", Read Only fault":"",
+						  (bProtFault)?", PM/META protection fault":"",
+						  _RGXDecodeMMULevel(ui32MMULevel)));
+	}
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXMMUMetaFaultStatus
+
+ @Description
+
+ Dump MMU Meta Fault state in human readable form.
+
+ @Input psDevInfo				- RGX device info
+ @Input ui64MMUStatus			- MMU Status register value
+ @Input bSummary				- Flag to check whether the function is called
+ 	 	 	 	 	 	 	 	  as a part of the debug dump summary or
+								  as a part of a HWR log
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDumpRGXMMUMetaFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                              PVRSRV_RGXDEV_INFO    *psDevInfo,
+                                              IMG_UINT64            ui64MMUStatus,
+                                              IMG_BOOL              bSummary)
+{
+	if (ui64MMUStatus == 0x0)
+	{
+		PVR_DUMPDEBUG_LOG(("MMU (Meta) - OK"));
+	}
+	else
+	{
+		IMG_UINT32 ui32PC        = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT;
+		IMG_UINT64 ui64Addr      = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT;
+		IMG_UINT32 ui32SideBand  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT;
+		IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT;
+		IMG_UINT32 ui32MMULevel  = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK) >>
+		                           RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT;
+		IMG_BOOL bRead           = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_META_RNW_EN) != 0;
+		IMG_BOOL bFault          = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN) != 0;
+		IMG_BOOL bROFault        = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x2;
+		IMG_BOOL bProtFault      = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK) >>
+		                            RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT) == 0x3;
+		IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+		IMG_CHAR *pszTagID;
+		IMG_CHAR *pszTagSB;
+
+		_RGXDecodeMMUReqTags(ui32Requester, ui32SideBand, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+		PVR_DUMPDEBUG_LOG(("%sMMU (Meta) - FAULT:",  (bSummary)?"":"    "));
+		PVR_DUMPDEBUG_LOG(("%s  * MMU status (0x%016llX): PC = %d, %s 0x%010llX, %s (%s)%s%s%s%s.",
+						  (bSummary)?"":"    ",
+						  ui64MMUStatus,
+						  ui32PC,
+		                  (bRead)?"Reading from":"Writing to",
+						  ui64Addr,
+						  pszTagID,
+						  pszTagSB,
+						  (bFault)?", Fault":"",
+						  (bROFault)?", Read Only fault":"",
+						  (bProtFault)?", PM/META protection fault":"",
+						  _RGXDecodeMMULevel(ui32MMULevel)));
+	}
+}
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input psRGXFWIfTraceBufCtl	- RGX FW trace buffer
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                 RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	IMG_CHAR    *pszTraceAssertPath;
+	IMG_CHAR    *pszTraceAssertInfo;
+	IMG_INT32   ui32TraceAssertLine;
+	IMG_UINT32  i;
+
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+		pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+		ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+		/* print non null assert strings */
+		if (*pszTraceAssertInfo)
+		{
+			PVR_DUMPDEBUG_LOG(("FW-T%d Assert: %s (%s:%d)", 
+			                  i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine));
+		}
+	}
+}
+
+static IMG_VOID _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                               RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+	IMG_UINT32 i;
+	for (i = 0; i < RGXFW_THREAD_NUM; i++)
+	{
+		if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[i])
+		{
+			PVR_DUMPDEBUG_LOG(("T%u polling %s (reg:0x%08X mask:0x%08X)",
+			                  i,
+			                  ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), 
+			                  psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, 
+			                  psRGXFWIfTraceBufCtl->aui32CrPollMask[i]));
+		}
+	}
+
+}
+
+static IMG_VOID _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                  RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl, PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_BOOL        	bAnyLocked = IMG_FALSE;
+	IMG_UINT32      	dm, i;
+	IMG_UINT32      	ui32LineSize;
+	IMG_CHAR	    	*pszLine, *pszTemp;
+	const IMG_CHAR 		*apszDmNames[RGXFWIF_DM_MAX + 1] = { "GP(", "2D(", "TA(", "3D(", "CDM(",
+#if defined(RGX_FEATURE_RAY_TRACING)
+								 "RTU(", "SHG(",
+#endif /* RGX_FEATURE_RAY_TRACING */
+								 NULL };
+
+	const IMG_CHAR 		*pszMsgHeader = "Number of HWR: ";
+	IMG_CHAR 			*pszLockupType = "";
+	RGXFWIF_HWRINFOBUF 	*psHWInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+	RGX_HWRINFO 		*psHWRInfo;
+	IMG_UINT32      	ui32MsgHeaderSize = OSStringLength(pszMsgHeader);
+	IMG_UINT32			ui32HWRRecoveryFlags;
+	IMG_UINT32			ui32ReadIndex;
+
+	for (dm = 0; dm < RGXFWIF_DM_MAX; dm++)
+	{
+		if (psRGXFWIfTraceBufCtl->aui16HwrDmLockedUpCount[dm]  ||
+		    psRGXFWIfTraceBufCtl->aui16HwrDmOverranCount[dm])
+		{
+			bAnyLocked = IMG_TRUE;
+			break;					
+		}
+	}
+
+	if (!bAnyLocked && (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK))
+	{
+		/* No HWR situation, print nothing */
+		return;
+	}
+
+	ui32LineSize = sizeof(IMG_CHAR) * (	ui32MsgHeaderSize + 
+			(RGXFWIF_DM_MAX*(	4/*DM name + left parenthesis*/ + 
+								5/*UINT16 max num of digits*/ + 
+								1/*slash*/ + 
+								5/*UINT16 max num of digits*/ + 
+								3/*right parenthesis + comma + space*/)) + 
+			7 + (RGXFWIF_DM_MAX*6)/* FALSE() + (UINT16 max num + comma) per DM */ +
+			1/* \0 */);
+
+	pszLine = OSAllocMem(ui32LineSize);
+	if (pszLine == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"_RGXDumpRGXDebugSummary: Out of mem allocating line string (size: %d)", ui32LineSize));
+		return;
+	}
+
+	OSStringCopy(pszLine,pszMsgHeader);
+	pszTemp = pszLine + ui32MsgHeaderSize;
+
+	for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != IMG_NULL); dm++)
+	{
+		OSStringCopy(pszTemp,apszDmNames[dm]);
+		pszTemp += OSStringLength(apszDmNames[dm]);
+		pszTemp += OSSNPrintf(pszTemp, 
+				5 + 1 + 5 + 1 + 5 + 1 + 1 + 1 + 1 /* UINT16 + slash + UINT16 + plus + UINT16 + right parenthesis + comma + space + \0 */,
+				"%u/%u+%u), ",
+				psRGXFWIfTraceBufCtl->aui16HwrDmRecoveredCount[dm],
+				psRGXFWIfTraceBufCtl->aui16HwrDmLockedUpCount[dm],
+				psRGXFWIfTraceBufCtl->aui16HwrDmOverranCount[dm]);
+	}
+
+	OSStringCopy(pszTemp, "FALSE(");
+	pszTemp += 6;
+
+	for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != IMG_NULL); dm++)
+	{
+		pszTemp += OSSNPrintf(pszTemp, 
+				5 + 1 + 1 /* UINT16 max num + comma + \0 */,
+				(dm < RGXFWIF_DM_MAX-1 ? "%u," : "%u)"),
+				psRGXFWIfTraceBufCtl->aui16HwrDmFalseDetectCount[dm]);
+	}
+
+	PVR_DUMPDEBUG_LOG((pszLine));
+
+	OSFreeMem(pszLine);
+
+	/* Print out per HWR info */
+	for (dm = 0; (dm < RGXFWIF_DM_MAX) && (apszDmNames[dm] != IMG_NULL); dm++)
+	{
+		if (dm == RGXFWIF_DM_GP)
+		{
+			PVR_DUMPDEBUG_LOG(("DM %d (GP)", dm));
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG(("DM %d (HWRflags 0x%08x)", dm, psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm]));
+		}
+
+		ui32ReadIndex = 0;
+		for(i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+		{
+			psHWRInfo = &psHWInfoBuf->sHWRInfo[ui32ReadIndex];
+
+			if((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+			{
+				IMG_UINT64 ui64OSTimeStamp, ui64DeltaTime, ui64Seconds, ui64Nanoseconds;
+				IMG_UINT32 ui32CRDeltaToOSDeltaKNs = psHWRInfo->sTimeCorr.ui32CRDeltaToOSDeltaKNs;
+				IMG_UINT32 ui32Remainder;
+
+				/* Get delta CR, convert to delta nS and add the result to the correlated OS timestamp */
+				ui64DeltaTime = psHWRInfo->ui64CRTimer - psHWRInfo->sTimeCorr.ui64CRTimeStamp;
+				ui64DeltaTime = RGXFWIF_GET_DELTA_OSTIME_NS(ui64DeltaTime, ui32CRDeltaToOSDeltaKNs);
+				ui64OSTimeStamp = psHWRInfo->sTimeCorr.ui64OSTimeStamp + ui64DeltaTime;
+
+				/* Split timestamp in seconds and nanoseconds */
+				ui64Seconds = OSDivide64r64(ui64OSTimeStamp, 1000000000, &ui32Remainder);
+				ui64Nanoseconds = ui64OSTimeStamp - (ui64Seconds * 1000000000ULL);
+
+				ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+				if(ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+				else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Innocent Overrun"; }
+
+				PVR_DUMPDEBUG_LOG(("  Recovery %d: PID = %d, frame = %d, HWRTData = 0x%08X,"
+				                   " EventStatus = 0x%08X, CRTimer = 0x%012llX, OSTimer = %llu.%09llu%s",
+				                   psHWRInfo->ui32HWRNumber,
+				                   psHWRInfo->ui32PID,
+				                   psHWRInfo->ui32FrameNum,
+				                   psHWRInfo->ui32ActiveHWRTData,
+				                   psHWRInfo->ui32EventStatus,
+				                   psHWRInfo->ui64CRTimer,
+				                   ui64Seconds,
+				                   ui64Nanoseconds,
+				                   pszLockupType));
+
+				switch(psHWRInfo->eHWRType)
+				{
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+					case RGX_HWRTYPE_BIF0FAULT:
+					case RGX_HWRTYPE_BIF1FAULT:
+					{
+						_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType),
+										psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+										psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+										psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+										psHWRInfo->ui64CRTimer,
+										IMG_FALSE);
+					}
+					break;
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+					case RGX_HWRTYPE_TEXASBIF0FAULT:
+					{
+						_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_TEXAS_BIF,
+										psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+										psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+										psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+										psHWRInfo->ui64CRTimer,
+										IMG_FALSE);
+					}
+					break;
+#endif
+#if defined(RGX_FEATURE_RAY_TRACING)
+					case RGX_HWRTYPE_DPXMMUFAULT:
+					{
+						_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_DPX_BIF,
+										psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+										psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+										psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress,
+										psHWRInfo->ui64CRTimer,
+										IMG_FALSE);
+					}
+					break;
+#endif
+#else
+					case RGX_HWRTYPE_MMUFAULT:
+					{
+						_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, psDevInfo,
+						                          psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+						                          IMG_FALSE);
+					}
+					break;
+
+					case RGX_HWRTYPE_MMUMETAFAULT:
+					{
+						_RGXDumpRGXMMUMetaFaultStatus(pfnDumpDebugPrintf, psDevInfo,
+						                              psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+						                              IMG_FALSE);
+					}
+					break;
+#endif
+
+					case RGX_HWRTYPE_POLLFAILURE:
+					{
+						PVR_DUMPDEBUG_LOG(("    T%u polling %s (reg:0x%08X mask:0x%08X)",
+										  psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+										  ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+										  psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask));
+					}
+					break;
+
+					case RGX_HWRTYPE_OVERRUN:
+					case RGX_HWRTYPE_UNKNOWNFAILURE:
+					{
+						/* Nothing to dump */
+					}
+					break;
+
+					default:
+					{
+						PVR_ASSERT(IMG_FALSE);
+					}
+					break;
+				}
+			}
+
+			if(ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+							ui32ReadIndex = psHWInfoBuf->ui32WriteIndex;
+			else
+				ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+		}
+	}	
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function	_CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+
+ @Input psDevInfo	 - RGX device info
+
+ @Return   IMG_BOOL      - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32BIFMMUEntry;
+
+	ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY);
+
+	if(ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN)
+	{
+		return IMG_TRUE;
+	}
+	else
+	{
+		return IMG_FALSE;
+	}
+}
+
+/*!
+*******************************************************************************
+
+ @Function	_GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo	 - RGX device info
+ @Output psDevVAddr      - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase    - The page catalog base
+ @Output pui32DataType   - The MMU entry data type
+
+ @Return   void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+									IMG_UINT32 *pui32CatBase,
+									IMG_UINT32 *pui32DataType)
+{
+	IMG_UINT64 ui64BIFMMUEntryStatus;
+
+	ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS);
+
+	psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+	*pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >>
+								RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT;
+
+	*pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >>
+								RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT;
+}
+
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input psDevInfo	 - RGX device info
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID _RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                        PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bRGXPoweredON)
+{
+	IMG_CHAR *pszState, *pszReason;
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+#if defined(NO_HARDWARE)
+	PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+	if (bRGXPoweredON)
+	{
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+		IMG_UINT64	ui64RegValMMUStatus;
+
+		ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS);
+		_RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, psDevInfo, ui64RegValMMUStatus, IMG_TRUE);
+
+		ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+		_RGXDumpRGXMMUMetaFaultStatus(pfnDumpDebugPrintf, psDevInfo, ui64RegValMMUStatus, IMG_TRUE);
+#else
+		IMG_UINT64	ui64RegValMMUStatus, ui64RegValREQStatus;
+
+		ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS);
+		ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS);
+
+		_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+
+		ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS);
+		ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS);
+
+		_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+#if defined(RGX_NUM_PHANTOMS)
+		{
+			IMG_UINT32  ui32Phantom;
+			
+			for (ui32Phantom = 0;  ui32Phantom < RGX_NUM_PHANTOMS;  ui32Phantom++)
+			{
+				/* This can't be done as it may interfere with the FW... */
+				/*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/
+				
+				ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+				ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+				_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_TEXAS, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+			}
+		}
+#else
+		ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+		ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+		_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+#endif
+#endif
+#endif
+#if defined(RGX_FEATURE_RAY_TRACING)
+		ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_MMU_STATUS);
+		ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_REQ_STATUS);
+
+		_RGXDumpRGXBIFBank(pfnDumpDebugPrintf, psDevInfo, RGXDBG_DPX_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, 0, 0, IMG_TRUE);
+#endif
+
+		if(_CheckForPendingPage(psDevInfo))
+		{
+			IMG_UINT32 ui32CatBase;
+			IMG_UINT32 ui32DataType;
+			IMG_DEV_VIRTADDR sDevVAddr;
+
+			PVR_DUMPDEBUG_LOG(("MMU Pending page: Yes"));
+
+			_GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType);
+
+			if(ui32CatBase >= 8)
+			{
+				PVR_DUMPDEBUG_LOG(("Cannot check address on PM cat base %u", ui32CatBase));
+			}
+			else
+			{
+				IMG_DEV_PHYADDR sPCDevPAddr;
+
+				sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase));
+
+				PVR_DUMPDEBUG_LOG(("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+							" on cat base %u. PC Addr = 0x%llX",
+								(unsigned long long) sDevVAddr.uiAddr,
+								ui32CatBase,
+								(unsigned long long) sPCDevPAddr.uiAddr));
+				RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr);
+			}
+		}
+	}
+#endif /* NO_HARDWARE */
+
+	/* Firmware state */
+	switch (psDevInfo->psDeviceNode->eHealthStatus)
+	{
+		case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszState = "OK";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszState = "NOT RESPONDING";  break;
+		case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszState = "DEAD";  break;
+		default:  pszState = "UNKNOWN";  break;
+	}
+
+	switch (psDevInfo->psDeviceNode->eHealthReason)
+	{
+		case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " - FW Assert";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " - Poll failure";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " - Global Event Object timeouts rising";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " - KCCB offset invalid";  break;
+		case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " - KCCB stalled";  break;
+		default:  pszReason = " - Unknown reason";  break;
+	}
+
+	if (psRGXFWIfTraceBuf == IMG_NULL)
+	{
+		PVR_DUMPDEBUG_LOG(("RGX FW State: %s%s", pszState, pszReason));
+
+		/* can't dump any more information */
+		return;
+	}
+	
+	PVR_DUMPDEBUG_LOG(("RGX FW State: %s%s (HWRState 0x%08x)", pszState, pszReason, psRGXFWIfTraceBuf->ui32HWRStateFlags));
+	PVR_DUMPDEBUG_LOG(("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d other, %d total)", 
+	                  pszPowStateName[psRGXFWIfTraceBuf->ePowState],
+	                  (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+	                  psDevInfo->ui32ActivePMReqOk,
+	                  psDevInfo->ui32ActivePMReqDenied,
+	                  psDevInfo->ui32ActivePMReqTotal - psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqDenied,
+	                  psDevInfo->ui32ActivePMReqTotal));
+
+
+	_RGXDumpFWAssert(pfnDumpDebugPrintf, psRGXFWIfTraceBuf);
+
+	_RGXDumpFWPoll(pfnDumpDebugPrintf, psRGXFWIfTraceBuf);
+
+	_RGXDumpFWHWRInfo(pfnDumpDebugPrintf, psRGXFWIfTraceBuf, psDevInfo);
+
+}
+
+static IMG_VOID _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                             PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+#define RGX_META_SP_EXTRA_DEBUG \
+			X(RGX_CR_META_SP_MSLVCTRL0) \
+			X(RGX_CR_META_SP_MSLVCTRL1) \
+			X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+			X(RGX_CR_META_SP_MSLVIRQENABLE) \
+			X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+	IMG_UINT32 ui32Idx, ui32RegIdx;
+	IMG_UINT32 ui32RegVal;
+	IMG_UINT32 ui32RegAddr;
+
+	const IMG_UINT32 aui32DebugRegAddr [] = {
+#define X(A) A,
+		RGX_META_SP_EXTRA_DEBUG
+#undef X
+		};
+
+	const IMG_CHAR* apszDebugRegName [] = {
+#define X(A) #A,
+	RGX_META_SP_EXTRA_DEBUG
+#undef X
+	};
+	
+	const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38};
+
+	PVR_DUMPDEBUG_LOG(("META Slave Port extra debug:"));
+
+	/* dump first set of Slave Port debug registers */
+	for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+	{
+		const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+		ui32RegAddr = aui32DebugRegAddr[ui32Idx];
+		ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+		PVR_DUMPDEBUG_LOG(("  * %s: 0x%8.8X", pszRegName, ui32RegVal));
+	}
+
+	/* dump second set of Slave Port debug registers */
+	for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+	{
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+		ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+		PVR_DUMPDEBUG_LOG(("  * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal));
+
+	}
+
+	for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+	{
+		ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+		for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+		{
+			OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+			ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+			PVR_DUMPDEBUG_LOG(("  * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal));
+		}
+	}
+
+}
+
+/*
+	RGXDumpDebugInfo
+*/
+IMG_VOID RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	IMG_UINT32 i;
+
+	for(i=0;i<=DEBUG_REQUEST_VERBOSITY_MAX;i++)
+	{
+		RGXDebugRequestProcess(pfnDumpDebugPrintf, psDevInfo, i);
+	}
+}
+
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+/*
+ *  Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+	RGXFW_LOG_SFids	 eSFId;
+	IMG_CHAR		 *pszName;
+	IMG_CHAR		 *pszFmt;
+	IMG_UINT32		 ui32ArgNum;
+} TRACEBUF_LOG;
+
+TRACEBUF_LOG aLogDefinitions[] = {
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+	RGXFW_LOG_SFIDLIST 
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	TRACEBUF_LOG  *psLogDef    = &aLogDefinitions[0];
+	IMG_BOOL      bIntegrityOk = IMG_TRUE;
+
+	/*
+	 *  For every log ID, check the format string and number of arguments is valid.
+	 */
+	while (psLogDef->eSFId != RGXFW_SF_LAST)
+	{
+		IMG_UINT32    ui32Count;
+		IMG_CHAR      *pszString;
+		TRACEBUF_LOG  *psLogDef2;
+
+		/*
+		 * Check the number of arguments matches the number of '%' in the string and
+		 * check that no string uses %s which is not supported as it requires a
+		 * pointer to memory that is not going to be valid.
+		 */
+		pszString = psLogDef->pszFmt;
+		ui32Count = 0;
+		
+		while (*pszString != '\0')
+		{
+			if (*pszString++ == '%')
+			{
+				ui32Count++;
+				if (*pszString == 's')
+				{
+					bIntegrityOk = IMG_FALSE;
+					PVR_DUMPDEBUG_LOG(("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+									  psLogDef->pszName, *pszString));
+				}
+				else if (*pszString == '%')
+				{
+					/* Double % is a printable % sign and not a format string... */
+					ui32Count--;
+				}
+			}
+		}
+		
+		if (ui32Count != psLogDef->ui32ArgNum)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG(("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+			                  psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum));
+		}
+
+		/* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+		if (ui32Count > 20)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG(("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+			                  psLogDef->pszName, ui32Count));
+		}
+
+		/* Check the id number is unique (don't take into account the number of arguments) */
+		ui32Count = 0;
+		psLogDef2 = &aLogDefinitions[0];
+
+		while (psLogDef2->eSFId != RGXFW_SF_LAST)
+		{
+			if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+			{
+				ui32Count++;
+			}
+			psLogDef2++;
+		}
+
+		if (ui32Count != 1)
+		{
+			bIntegrityOk = IMG_FALSE;
+			PVR_DUMPDEBUG_LOG(("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+			                  psLogDef->pszName, psLogDef->eSFId, ui32Count - 1));
+		}
+
+		/* Move to the next log ID... */
+		psLogDef++;
+	}
+
+	return bIntegrityOk;
+}
+
+IMG_VOID RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              PVRSRV_RGXDEV_INFO  *psDevInfo)
+{
+	RGXFWIF_TRACEBUF  *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	static IMG_BOOL   bIntegrityCheckPassed = IMG_FALSE;
+
+	/* Check that the firmware trace is correctly defined... */
+	if (!bIntegrityCheckPassed)
+	{
+		bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf);
+		if (!bIntegrityCheckPassed)
+		{
+			return;
+		}
+	}
+
+	/* Dump FW trace information... */
+	if (psRGXFWIfTraceBufCtl != IMG_NULL)
+	{
+		IMG_CHAR    szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+		IMG_UINT32  tid;
+		
+		/* Print the log type settings... */
+		if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+		{
+			PVR_DUMPDEBUG_LOG(("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+							  ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+							  RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+							  ));
+		}
+		else
+		{
+			PVR_DUMPDEBUG_LOG(("Debug log type: none"));
+		}
+
+		/* Print the decoded log for each thread... */
+		for (tid = 0;  tid < RGXFW_THREAD_NUM;  tid++) 
+		{
+			IMG_UINT32  *ui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].aui32TraceBuffer;
+			IMG_UINT32  ui32TracePtr  = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer;
+			IMG_UINT32  ui32Count     = 0;
+
+			while (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+			{
+				IMG_UINT32  ui32Data, ui32DataToId;
+				
+				/* Find the first valid log ID, skipping whitespace... */
+				do
+				{
+					ui32Data     = ui32TraceBuf[ui32TracePtr];
+					ui32DataToId = idToStringID(ui32Data);
+
+					/* If an unrecognized id is found check if it is valid, if it is tracebuf needs updating. */ 
+					if (ui32DataToId == RGXFW_SF_LAST  &&  RGXFW_LOG_VALIDID(ui32Data))
+					{
+						PVR_DUMPDEBUG_LOG(("ERROR: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data));
+						return;
+					}
+
+					/* Update the trace pointer... */
+					ui32TracePtr = (ui32TracePtr + 1) % RGXFW_TRACE_BUFFER_SIZE;
+					ui32Count++;
+				} while ((RGXFW_SF_LAST == ui32DataToId  ||  ui32DataToId >= RGXFW_SF_FIRST)  &&
+				         ui32Count < RGXFW_TRACE_BUFFER_SIZE);
+
+				if (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+				{
+					IMG_UINT64  ui64RGXTimer;
+					
+					/* If we hit the ASSERT message then this is the end of the log... */
+					if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+					{
+						PVR_DUMPDEBUG_LOG(("ASSERTION %s failed at %s:%u",
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+										  psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum));
+						break;
+					}
+
+					/*
+					 *  Print the trace string and provide up to 20 arguments which
+					 *  printf function will be able to use. We have already checked
+					 *  that no string uses more than this.
+					 */
+					OSStringCopy(szBuffer, "%llu:T%u-%s> ");
+					OSStringCopy(&szBuffer[OSStringLength(szBuffer)], SFs[ui32DataToId].name);
+					szBuffer[OSStringLength(szBuffer)-1] = '\0';
+					ui64RGXTimer = (IMG_UINT64)(ui32TraceBuf[(ui32TracePtr +  0) % RGXFW_TRACE_BUFFER_SIZE]) << 32 |
+					               (IMG_UINT64)(ui32TraceBuf[(ui32TracePtr +  1) % RGXFW_TRACE_BUFFER_SIZE]);
+					PVR_DUMPDEBUG_LOG((szBuffer, ui64RGXTimer, tid, groups[RGXFW_SF_GID(ui32Data)],
+									  ui32TraceBuf[(ui32TracePtr +  2) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  3) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  4) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  5) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  6) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  7) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  8) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr +  9) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 10) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 11) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 12) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 13) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 14) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 15) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 16) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 17) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 18) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 19) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 20) % RGXFW_TRACE_BUFFER_SIZE],
+									  ui32TraceBuf[(ui32TracePtr + 21) % RGXFW_TRACE_BUFFER_SIZE]));
+
+					/* Update the trace pointer... */
+					ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % RGXFW_TRACE_BUFFER_SIZE;
+					ui32Count    = (ui32Count    + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+				}
+			}
+		}
+	}
+}
+#endif
+
+
+static IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+	switch(ePowerState)
+	{
+		case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+		case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+		case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+		default: return "UNKNOWN";
+	}
+}
+
+IMG_VOID RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                PVRSRV_RGXDEV_INFO	*psDevInfo,
+                                IMG_UINT32			ui32VerbLevel)
+{
+	PVRSRV_ERROR eError = PVRSRVPowerLock();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"RGXDebugRequestProcess : failed to acquire lock, error:0x%x", eError));
+		return;
+	}
+
+	switch (ui32VerbLevel)
+	{
+		case DEBUG_REQUEST_VERBOSITY_LOW :
+		{
+			IMG_UINT32              ui32DeviceIndex;
+			PVRSRV_DEV_POWER_STATE  ePowerState;
+			IMG_BOOL                bRGXPoweredON;
+
+			ui32DeviceIndex = psDevInfo->psDeviceNode->sDevId.ui32DeviceIndex;
+
+			eError = PVRSRVGetDevicePowerState(ui32DeviceIndex, &ePowerState);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+				goto Exit;
+			}
+
+			bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+			PVR_DUMPDEBUG_LOG(("------[ RGX summary ]------"));
+			PVR_DUMPDEBUG_LOG(("RGX BVNC: %s", RGX_BVNC_KM));
+			PVR_DUMPDEBUG_LOG(("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)));
+
+			_RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, psDevInfo, bRGXPoweredON);
+
+			if (bRGXPoweredON)
+			{
+
+				PVR_DUMPDEBUG_LOG(("------[ RGX registers ]------"));
+				PVR_DUMPDEBUG_LOG(("RGX Register Base Address (Linear):   0x%p", psDevInfo->pvRegsBaseKM));
+				PVR_DUMPDEBUG_LOG(("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr));
+
+				/* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going though the core */
+				OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+
+				eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asDbgCommands, RGX_MAX_DEBUG_COMMANDS, PDUMP_FLAGS_CONTINUOUS, pfnDumpDebugPrintf);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_WARNING,"RGXDebugRequestProcess: RGXRunScript failed (%d) - Retry", eError));
+
+					/* use thread1 for slave port accesses */
+					OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x1 << RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT);
+
+					eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asDbgCommands, RGX_MAX_DEBUG_COMMANDS, PDUMP_FLAGS_CONTINUOUS, pfnDumpDebugPrintf);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: RGXRunScript retry failed (%d) - Dump Slave Port debug information", eError));
+						_RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, psDevInfo);
+					}
+
+					/* use thread0 again */
+					OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0 << RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT);
+				}
+			}
+			else
+			{
+				PVR_DUMPDEBUG_LOG((" (!) RGX power is down. No registers dumped"));
+			}
+
+			/* Dump out the kernel CCBs. */
+			{
+				RGXFWIF_DM	eKCCBType;
+				
+				for (eKCCBType = 0; eKCCBType < RGXFWIF_DM_MAX; eKCCBType++)
+				{
+					RGXFWIF_CCB_CTL	*psKCCBCtl = psDevInfo->apsKernelCCBCtl[eKCCBType];
+		
+					if (psKCCBCtl != IMG_NULL)
+					{
+						PVR_DUMPDEBUG_LOG(("RGX Kernel CCB %u WO:0x%X RO:0x%X",
+						                  eKCCBType, psKCCBCtl->ui32WriteOffset, psKCCBCtl->ui32ReadOffset));
+					}
+				}
+		 	}
+
+			/* Dump out the firmware CCBs. */
+			{
+				RGXFWIF_DM	eFCCBType;
+				
+				for (eFCCBType = 0; eFCCBType < RGXFWIF_DM_MAX; eFCCBType++)
+				{
+					RGXFWIF_CCB_CTL	*psFCCBCtl = psDevInfo->apsFirmwareCCBCtl[eFCCBType];
+		
+					if (psFCCBCtl != IMG_NULL)
+					{
+						PVR_DUMPDEBUG_LOG(("RGX Firmware CCB %u WO:0x%X RO:0x%X",
+						                  eFCCBType, psFCCBCtl->ui32WriteOffset, psFCCBCtl->ui32ReadOffset));
+					}
+				}
+		 	}
+
+		 	/* Dump the KCCB commands executed */
+			{
+				PVR_DUMPDEBUG_LOG(("RGX Kernel CCB commands executed = %d",
+				                  psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted));
+			}
+
+		 	/* Dump the IRQ info */
+			{
+				PVR_DUMPDEBUG_LOG(("RGX FW IRQ count = %d, last sampled in MISR = %d",
+				                  psDevInfo->psRGXFWIfTraceBuf->ui32InterruptCount,
+				                  g_ui32HostSampleIRQCount));
+			}
+
+			/* Dump the FW config flags */
+			{
+				RGXFWIF_INIT		*psRGXFWInit;
+
+				eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+						(IMG_VOID **)&psRGXFWInit);
+
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: Failed to acquire kernel fw if ctl (%u)",
+								eError));
+					goto Exit;
+				}
+
+				PVR_DUMPDEBUG_LOG(("RGX FW config flags = 0x%X", psRGXFWInit->ui32ConfigFlags));
+
+				DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+			}
+
+			break;
+
+		}
+		case DEBUG_REQUEST_VERBOSITY_MEDIUM :
+		{
+			IMG_INT tid;
+			/* Dump FW trace information */
+			if (psDevInfo->psRGXFWIfTraceBuf != IMG_NULL)
+			{
+				RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+		
+				for ( tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) 
+				{
+					IMG_UINT32	i;
+					IMG_BOOL	bPrevLineWasZero = IMG_FALSE;
+					IMG_BOOL	bLineIsAllZeros = IMG_FALSE;
+					IMG_UINT32	ui32CountLines = 0;
+					IMG_UINT32	*pui32TraceBuffer;
+					IMG_CHAR	*pszLine;
+		
+					pui32TraceBuffer = &psRGXFWIfTraceBufCtl->sTraceBuf[tid].aui32TraceBuffer[0];
+		
+					/* each element in the line is 8 characters plus a space.  The '+1' is because of the final trailing '\0'. */
+					pszLine = OSAllocMem(9*RGXFW_TRACE_BUFFER_LINESIZE+1);
+					if (pszLine == IMG_NULL)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: Out of mem allocating line string (size: %d)", 9*RGXFW_TRACE_BUFFER_LINESIZE));
+						goto Exit;
+					}
+		
+					/* Print the tracepointer */
+					if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+					{
+						PVR_DUMPDEBUG_LOG(("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+						                  ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+						                  RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+						                  ));
+					}
+					else
+					{
+						PVR_DUMPDEBUG_LOG(("Debug log type: none"));
+					}
+					
+					PVR_DUMPDEBUG_LOG(("------[ RGX FW thread %d trace START ]------", tid));
+					PVR_DUMPDEBUG_LOG(("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer));
+					PVR_DUMPDEBUG_LOG(("FWT[tracebufsize]: %X", RGXFW_TRACE_BUFFER_SIZE));
+		
+					for (i = 0; i < RGXFW_TRACE_BUFFER_SIZE; i += RGXFW_TRACE_BUFFER_LINESIZE)
+					{
+						IMG_UINT32 k = 0;
+						IMG_UINT32 ui32Line = 0x0;
+						IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+						IMG_CHAR   *pszBuf = pszLine;
+		
+						for (k = 0; k < RGXFW_TRACE_BUFFER_LINESIZE; k++)
+						{
+							ui32Line |= pui32TraceBuffer[i + k];
+		
+							/* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+							OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+							pszBuf += 9; /* write over the '\0' */
+						}
+		
+						bLineIsAllZeros = (ui32Line == 0x0);
+		
+						if (bLineIsAllZeros)
+						{
+							if (bPrevLineWasZero)
+							{
+								ui32CountLines++;
+							}
+							else
+							{
+								bPrevLineWasZero = IMG_TRUE;
+								ui32CountLines = 1;
+								PVR_DUMPDEBUG_LOG(("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset));
+							}
+						}
+						else
+						{
+							if (bPrevLineWasZero  &&  ui32CountLines > 1)
+							{
+								PVR_DUMPDEBUG_LOG(("FWT[...]: %d lines were all zero", ui32CountLines));
+							}
+							bPrevLineWasZero = IMG_FALSE;
+
+							PVR_DUMPDEBUG_LOG(("FWT[%08x]:%s", ui32LineOffset, pszLine));
+						}
+		
+					}
+					if (bPrevLineWasZero)
+					{
+						PVR_DUMPDEBUG_LOG(("FWT[END]: %d lines were all zero", ui32CountLines));
+					}
+		
+					PVR_DUMPDEBUG_LOG(("------[ RGX FW thread %d trace END ]------", tid));
+		
+					OSFreeMem(pszLine);
+				}
+			}
+
+			{
+				PVR_DUMPDEBUG_LOG(("------[ Stalled FWCtxs ]------"));
+
+				CheckForStalledTransferCtxt(psDevInfo, pfnDumpDebugPrintf);
+				CheckForStalledRenderCtxt(psDevInfo, pfnDumpDebugPrintf);
+				CheckForStalledComputeCtxt(psDevInfo, pfnDumpDebugPrintf);
+#if defined(RGX_FEATURE_RAY_TRACING)
+				CheckForStalledRayCtxt(psDevInfo, pfnDumpDebugPrintf);
+#endif
+			}
+			break;
+		}
+		case DEBUG_REQUEST_VERBOSITY_HIGH:
+		{
+			PVRSRV_ERROR            eError;
+			IMG_UINT32              ui32DeviceIndex;
+			PVRSRV_DEV_POWER_STATE  ePowerState;
+			IMG_BOOL                bRGXPoweredON;
+
+			ui32DeviceIndex = psDevInfo->psDeviceNode->sDevId.ui32DeviceIndex;
+
+			eError = PVRSRVGetDevicePowerState(ui32DeviceIndex, &ePowerState);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+				return;
+			}
+
+			bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+			PVR_DUMPDEBUG_LOG(("------[ Debug bus ]------"));
+
+			_RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, psDevInfo, bRGXPoweredON);
+
+			if (bRGXPoweredON)
+			{
+				eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asDbgBusCommands, RGX_MAX_DBGBUS_COMMANDS, PDUMP_FLAGS_CONTINUOUS, pfnDumpDebugPrintf);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_WARNING,"RGXDebugRequestProcess: RGXRunScript failed (%s)", PVRSRVGetErrorStringKM(eError)));
+				}
+				break;
+			}
+		}
+		default:
+			break;
+	}
+
+Exit:
+	PVRSRVPowerUnlock();
+}
+
+/*
+	RGXPanic
+*/
+IMG_VOID RGXPanic(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	PVR_LOG(("RGX panic"));
+	PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+	OSPanic();
+}
+
+/*
+	RGXQueryDMState
+*/
+PVRSRV_ERROR RGXQueryDMState(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_DM eDM, RGXFWIF_DM_STATE *peState, RGXFWIF_DEV_VIRTADDR *psCommonContextDevVAddr)
+{
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+	RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	if (eDM >= RGXFWIF_DM_MAX)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,"RGXQueryDMState: eDM parameter is out of range (%u)",eError));
+		return eError;
+	}
+
+	if (peState == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,"RGXQueryDMState: peState is NULL (%u)",eError));
+		return eError;
+	}
+
+	if (psCommonContextDevVAddr == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,"RGXQueryDMState: psCommonContextDevVAddr is NULL (%u)",eError));
+		return eError;
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXQueryDMState: Failed (%d) to acquire address for trace buffer", eError));
+		return eError;
+	}
+
+	if (psRGXFWIfTraceBufCtl->apsHwrDmFWCommonContext[eDM].ui32Addr)
+	{
+		*peState = RGXFWIF_DM_STATE_LOCKEDUP;
+	}
+	else
+	{
+		*peState = RGXFWIF_DM_STATE_NORMAL;
+	}
+	
+	*psCommonContextDevVAddr = psRGXFWIfTraceBufCtl->apsHwrDmFWCommonContext[eDM];
+
+	return eError;
+}
+
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdebug.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdebug.h
new file mode 100644
index 0000000..498dc99
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdebug.h
@@ -0,0 +1,166 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX debug header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX debugging functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEBUG_H__)
+#define __RGXDEBUG_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXPanic
+
+ @Description
+
+ Called when an unrecoverable situation is detected. Dumps RGX debug
+ information and tells the OS to panic.
+
+ @Input psDevInfo - RGX device info
+
+ @Return IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXPanic(PVRSRV_RGXDEV_INFO	*psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpDebugInfo
+
+ @Description
+
+ Dump useful debugging info. Dumps lesser information than PVRSRVDebugRequest.
+ Does not dump debugging information for all requester types.(SysDebug, ServerSync info)
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input psDevInfo	        - RGX device info
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                          PVRSRV_RGXDEV_INFO	*psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specificed level of
+ verbosity
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input psDevInfo	        - RGX device info
+ @Input ui32VerbLevel       - Verbosity level
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                                PVRSRV_RGXDEV_INFO	*psDevInfo,
+                                IMG_UINT32			ui32VerbLevel);
+
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+/*!
+*******************************************************************************
+
+ @Function	RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf  - Optional replacement print function
+ @Input psDevInfo	        - RGX device info
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+                              PVRSRV_RGXDEV_INFO	*psDevInfo);
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXQueryDMState
+
+ @Description
+
+ Query DM state
+
+ @Input  psDevInfo        - RGX device info
+ @Input  eDM              - DM number for which to return status
+ @Output peState          - RGXFWIF_DM_STATE
+ @Output psComCtxDevVAddr - If DM is locked-up, Firmware address of Firmware Common Context, otherwise IMG_NULL
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXQueryDMState(PVRSRV_RGXDEV_INFO *psDevInfo, RGXFWIF_DM eDM, RGXFWIF_DM_STATE *peState, RGXFWIF_DEV_VIRTADDR *psComCtxDevVAddr);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXReadWithSP
+
+ @Description
+
+ Reads data from a memory location (FW memory map) using the META Slave Port
+
+ @Input  ui32FWAddr - 32 bit FW address
+
+ @Return IMG_UINT32
+******************************************************************************/
+IMG_UINT32 RGXReadWithSP(IMG_UINT32 ui32FWAddr);
+
+#endif /* __RGXDEBUG_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdevice.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdevice.h
new file mode 100644
index 0000000..00e2e50
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxdevice.h
@@ -0,0 +1,422 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX device node header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX device node
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEVICE_H__)
+#define __RGXDEVICE_H__
+
+#include "img_types.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif.h"
+#include "rgxscript.h"
+#include "cache_external.h"
+#include "device.h"
+#include "osfunc.h"
+
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+	DEVMEM_MEMDESC		*psFWFrameworkMemDesc;
+	IMG_DEV_VIRTADDR	*psMCUFenceAddr;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST			(0x1 << 0)		/*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_FTRACE_EN				(0x1 << 1)		/*!< Used to enable device FTrace thread to consume HWPerf data */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN 	(0x1 << 2)		/*!< Used to disable the Devices Watchdog logging */
+
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE            100                      /* DVFS Table size */
+#define RGX_GPU_DVFS_GET_INDEX(clockfreq)  ((clockfreq) / 10000000) /* Assuming different GPU clocks are separated by at least 10MHz
+                                                                     * WARNING: this macro must be used only with nominal values of
+                                                                     * the GPU clock speed (the ones provided by the customer code) */
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US       25000          /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US  150000         /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US    10000000       /* Time before the next periodic calibration and correlation */
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+	IMG_UINT64 ui64CalibrationCRTimestamp;              /*!< CR timestamp used to calibrate GPU frequencies (beginning of a calibration period) */
+	IMG_UINT64 ui64CalibrationOSTimestamp;              /*!< OS timestamp used to calibrate GPU frequencies (beginning of a calibration period) */
+	IMG_UINT64 ui64CalibrationCRTimediff;               /*!< CR timediff used to calibrate GPU frequencies (calibration period) */
+	IMG_UINT64 ui64CalibrationOSTimediff;               /*!< OS timediff used to calibrate GPU frequencies (calibration period) */
+	IMG_UINT32 ui32CalibrationPeriod;                   /*!< Threshold used to determine whether the current GPU frequency should be calibrated */
+	IMG_UINT32 ui32CurrentDVFSId;                       /*!< Current table entry index */
+	IMG_BOOL   bAccumulatePeriod;                       /*!< Accumulate many consecutive periods to get a better calibration at the end */
+	IMG_UINT32 aui32DVFSClock[RGX_GPU_DVFS_TABLE_SIZE]; /*!< DVFS clocks table (clocks in Hz) */
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+	IMG_BOOL   bValid;                /* If TRUE, statistics are valid.
+	                                     FALSE if the driver couldn't get reliable stats. */
+	IMG_UINT64 ui64GpuStatActiveHigh; /* GPU active high statistic */
+	IMG_UINT64 ui64GpuStatActiveLow;  /* GPU active low (i.e. TLA active only) statistic */
+	IMG_UINT64 ui64GpuStatBlocked;    /* GPU blocked statistic */
+	IMG_UINT64 ui64GpuStatIdle;       /* GPU idle statistic */
+	IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+
+#if defined(GPU_UTIL_SLC_STALL_COUNTERS)
+	IMG_UINT32 ui32SLCStallsRatio;    /* SLC Read/Write stalls ratio expressed in 0,01% units */
+#endif
+#if defined(PVR_POWER_ACTOR) && defined (PVR_DVFS)
+	IMG_UINT32 ui32GpuEnergy;         /* GPU dynamic energy */
+#endif
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+	IMG_BOOL			bEnabled;
+	RGXFWIF_PWR_EVT		ePowerIslandToPush;
+	IMG_UINT32      	ui32NumRegRecords;
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+	PVRSRV_DEVICE_TYPE		eDeviceType;
+	PVRSRV_DEVICE_CLASS		eDeviceClass;
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+
+	IMG_UINT8				ui8VersionMajor;
+	IMG_UINT8				ui8VersionMinor;
+	IMG_UINT32				ui32CoreConfig;
+	IMG_UINT32				ui32CoreFlags;
+
+	IMG_BOOL                bFirmwareInitialised;
+	IMG_BOOL				bPDPEnabled;
+
+	/* Kernel mode linear address of device registers */
+	IMG_PVOID				pvRegsBaseKM;
+
+	/* FIXME: The alloc for this should go through OSAllocMem in future */
+	IMG_HANDLE				hRegMapping;
+
+	/* System physical address of device registers*/
+	IMG_CPU_PHYADDR			sRegsPhysBase;
+	/*  Register region size in bytes */
+	IMG_UINT32				ui32RegSize;
+
+	PVRSRV_STUB_PBDESC		*psStubPBDescListKM;
+
+	/* Firmware memory context info */
+	DEVMEM_CONTEXT			*psKernelDevmemCtx;
+	DEVMEM_HEAP				*psFirmwareHeap;
+	MMU_CONTEXT				*psKernelMMUCtx;
+	IMG_UINT32				ui32KernelCatBaseIdReg;
+	IMG_UINT32				ui32KernelCatBaseId;
+	IMG_UINT32				ui32KernelCatBaseReg;
+	IMG_UINT32				ui32KernelCatBaseWordSize;
+	IMG_UINT32				ui32KernelCatBaseAlignShift;
+	IMG_UINT32				ui32KernelCatBaseShift;
+	IMG_UINT64				ui64KernelCatBaseMask;
+
+	IMG_VOID				*pvDeviceMemoryHeap;
+	
+	/* Kernel CCBs */
+	DEVMEM_MEMDESC			*apsKernelCCBCtlMemDesc[RGXFWIF_DM_MAX];	/*!< memdesc for kernel CCB control */
+	RGXFWIF_CCB_CTL			*apsKernelCCBCtl[RGXFWIF_DM_MAX];			/*!< kernel CCB control kernel mapping */
+	DEVMEM_MEMDESC			*apsKernelCCBMemDesc[RGXFWIF_DM_MAX];		/*!< memdesc for kernel CCB */
+	IMG_UINT8				*apsKernelCCB[RGXFWIF_DM_MAX];				/*!< kernel CCB kernel mapping */
+
+	/* Firmware CCBs */
+	DEVMEM_MEMDESC			*apsFirmwareCCBCtlMemDesc[RGXFWIF_DM_MAX];	/*!< memdesc for Firmware CCB control */
+	RGXFWIF_CCB_CTL			*apsFirmwareCCBCtl[RGXFWIF_DM_MAX];			/*!< kernel CCB control Firmware mapping */
+	DEVMEM_MEMDESC			*apsFirmwareCCBMemDesc[RGXFWIF_DM_MAX];		/*!< memdesc for Firmware CCB */
+	IMG_UINT8				*apsFirmwareCCB[RGXFWIF_DM_MAX];				/*!< kernel CCB Firmware mapping */
+
+	/*
+		if we don't preallocate the pagetables we must 
+		insert newly allocated page tables dynamically 
+	*/
+	IMG_VOID				*pvMMUContextList;
+
+	IMG_UINT32				ui32ClkGateStatusReg;
+	IMG_UINT32				ui32ClkGateStatusMask;
+	RGX_SCRIPTS				*psScripts;
+
+	DEVMEM_MEMDESC			*psRGXFWCodeMemDesc;
+	DEVMEM_EXPORTCOOKIE		sRGXFWCodeExportCookie;
+
+	DEVMEM_MEMDESC			*psRGXFWDataMemDesc;
+	DEVMEM_EXPORTCOOKIE		sRGXFWDataExportCookie;
+
+	DEVMEM_MEMDESC			*psRGXFWCorememMemDesc;
+	DEVMEM_EXPORTCOOKIE		sRGXFWCorememExportCookie;
+
+	DEVMEM_MEMDESC			*psRGXFWIfTraceBufCtlMemDesc;
+	RGXFWIF_TRACEBUF		*psRGXFWIfTraceBuf;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWRInfoBufCtlMemDesc;
+	RGXFWIF_HWRINFOBUF		*psRGXFWIfHWRInfoBuf;
+
+	DEVMEM_MEMDESC			*psRGXFWIfGpuUtilFWCbCtlMemDesc;
+	RGXFWIF_GPU_UTIL_FWCB	*psRGXFWIfGpuUtilFWCb;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWPerfBufMemDesc;
+	IMG_BYTE				*psRGXFWIfHWPerfBuf;
+	IMG_UINT32				ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+	DEVMEM_MEMDESC			*psRGXFWIfCorememDataStoreMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWIfRegCfgMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWIfHWPerfCountersMemDesc;
+	DEVMEM_EXPORTCOOKIE     sRGXFWHWPerfCountersExportCookie;
+	DEVMEM_MEMDESC			*psRGXFWIfInitMemDesc;
+
+	DEVMEM_MEMDESC			*psRGXFWIfRuntimeCfgMemDesc;
+	RGXFWIF_RUNTIME_CFG		*psRGXFWIfRuntimeCfg;
+
+#if defined(RGXFW_ALIGNCHECKS)
+	DEVMEM_MEMDESC			*psRGXFWAlignChecksMemDesc;
+#endif
+
+	DEVMEM_MEMDESC			*psRGXFWSigTAChecksMemDesc;
+	IMG_UINT32				ui32SigTAChecksSize;
+
+	DEVMEM_MEMDESC			*psRGXFWSig3DChecksMemDesc;
+	IMG_UINT32				ui32Sig3DChecksSize;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	DEVMEM_MEMDESC			*psRGXFWSigRTChecksMemDesc;
+	IMG_UINT32				ui32SigRTChecksSize;
+	
+	DEVMEM_MEMDESC			*psRGXFWSigSHChecksMemDesc;
+	IMG_UINT32				ui32SigSHChecksSize;
+#endif
+
+	IMG_VOID				*pvLISRData;
+	IMG_VOID				*pvMISRData;
+	IMG_VOID				*pvAPMISRData;
+	
+	DEVMEM_MEMDESC			*psRGXFaultAddressMemDesc;
+
+#if defined(FIX_HW_BRN_37200)
+	DEVMEM_MEMDESC			*psRGXFWHWBRN37200MemDesc;
+#endif
+
+#if defined(RGX_FEATURE_SLC_VIVT)
+	DEVMEM_MEMDESC			*psSLC3FenceMemDesc;
+#endif
+
+#if defined (PDUMP)
+	IMG_BOOL				abDumpedKCCBCtlAlready[RGXFWIF_DM_MAX];
+	
+#endif	
+
+	/*! Handles to the lock and stream objects used to transport
+	 * HWPerf data to user side clients. See RGXHWPerfInit() RGXHWPerfDeinit().
+	 * Set during initialisation if the application hint turns bit 7
+	 * 'Enable HWPerf' on in the ConfigFlags sent to the FW. FW stores this
+	 * bit in the RGXFW_CTL.ui32StateFlags member. They may also get
+	 * set by the API RGXCtrlHWPerf(). Thus these members may be 0 if HWPerf is
+	 * not enabled as these members are created on demand and destroyed at
+	 * driver unload.
+	 */
+	POS_LOCK 				hLockHWPerfStream;
+	IMG_HANDLE				hHWPerfStream;
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	IMG_HANDLE				hGPUTraceCmdCompleteHandle;
+	IMG_BOOL				bFTraceGPUEventsEnabled;
+	IMG_HANDLE				hGPUTraceTLConnection;
+	IMG_HANDLE				hGPUTraceTLStream;
+	IMG_UINT64				ui64LastSampledTimeCorrOSTimeStamp;
+#endif
+
+	/* If we do 10 deferred memory allocations per second, then the ID would warp around after 13 years */
+	IMG_UINT32				ui32ZSBufferCurrID;	/*!< ID assigned to the next deferred devmem allocation */
+	IMG_UINT32				ui32FreelistCurrID;	/*!< ID assigned to the next freelist */
+	IMG_UINT32				ui32RPMFreelistCurrID;	/*!< ID assigned to the next RPM freelist */
+
+	POS_LOCK 				hLockZSBuffer;		/*!< Lock to protect simultaneous access to ZSBuffers */
+	DLLIST_NODE				sZSBufferHead;		/*!< List of on-demand ZSBuffers */
+	POS_LOCK 				hLockFreeList;		/*!< Lock to protect simultaneous access to Freelists */
+	DLLIST_NODE				sFreeListHead;		/*!< List of growable Freelists */
+	POS_LOCK 				hLockRPMFreeList;	/*!< Lock to protect simultaneous access to RPM Freelists */
+	DLLIST_NODE				sRPMFreeListHead;	/*!< List of growable RPM Freelists */
+	PSYNC_PRIM_CONTEXT		hSyncPrimContext;
+	PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+	IMG_UINT32				ui32ActivePMReqOk;
+	IMG_UINT32				ui32ActivePMReqDenied;
+	IMG_UINT32				ui32ActivePMReqTotal;
+	
+	IMG_HANDLE				hProcessQueuesMISR;
+
+	IMG_UINT32 				ui32DeviceFlags;	/*!< Flags to track general device state  */
+
+	/* Poll data for detecting firmware fatal errors */
+	IMG_UINT32  aui32CrLastPollAddr[RGXFW_THREAD_NUM];
+	IMG_UINT32  ui32KCCBCmdsExecutedLastTime;
+	IMG_BOOL    bKCCBCmdsWaitingLastTime;
+	IMG_UINT32  ui32GEOTimeoutsLastTime;
+
+	/* Client stall detection */
+	IMG_BOOL	bStalledClient;
+
+	/* Timer Queries */
+	IMG_UINT32        ui32ActiveQueryId;       /*!< id of the active line */
+	IMG_BOOL          bSaveStart;              /*!< save the start time of the next kick on the device*/
+	IMG_BOOL          bSaveEnd;                /*!< save the end time of the next kick on the device*/
+
+	DEVMEM_MEMDESC    * psStartTimeMemDesc;    /*!< memdesc for Start Times */
+	RGXFWIF_TIMESTAMP * pasStartTimeById;      /*!< CPU mapping of the above */
+
+	DEVMEM_MEMDESC    * psEndTimeMemDesc;      /*!< memdesc for End Timer */
+	RGXFWIF_TIMESTAMP * pasEndTimeById;        /*!< CPU mapping of the above */
+
+	IMG_UINT32        aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES];      /*!< kicks Scheduled on QueryId */
+	DEVMEM_MEMDESC    * psCompletedMemDesc;    /*!< kicks Completed on QueryId */
+	IMG_UINT32        * pui32CompletedById;    /*!< CPU mapping of the above */
+
+	/* GPU DVFS Table */
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable;
+
+	/* Pointer to function returning the GPU utilisation statistics since the last
+	 * time the function was called. Supports different users at the same time.
+	 *
+	 * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+	 *                      in microseconds since the last time the function was called
+	 *                      by a specific user (identified by hGpuUtilUser)
+	 *
+	 * Returns PVRSRV_OK in case the call completed without errors,
+	 * some other value otherwise.
+	 */
+	PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+	                                    IMG_HANDLE hGpuUtilUser,
+	                                    RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+
+	PVRSRV_ERROR (*pfnRegisterGpuUtilStats) (IMG_HANDLE *phGpuUtilUser);
+	PVRSRV_ERROR (*pfnUnregisterGpuUtilStats) (IMG_HANDLE hGpuUtilUser);
+
+	POS_LOCK    hGPUUtilLock;
+
+	/* Register configuration */
+	RGX_REG_CONFIG		sRegCongfig;
+
+	IMG_BOOL				bIgnoreFurtherIRQs;
+	DLLIST_NODE				sMemoryContextList;
+
+	POSWR_LOCK		hRenderCtxListLock;
+	POSWR_LOCK		hComputeCtxListLock;
+	POSWR_LOCK		hTransferCtxListLock;
+	POSWR_LOCK		hRaytraceCtxListLock;
+	POSWR_LOCK		hMemoryCtxListLock;
+
+	/* Linked lists of contexts on this device */
+	DLLIST_NODE 		sRenderCtxtListHead;
+	DLLIST_NODE 		sComputeCtxtListHead;
+	DLLIST_NODE 		sTransferCtxtListHead;
+	DLLIST_NODE 		sRaytraceCtxtListHead;
+
+	DLLIST_NODE 		sCommonCtxtListHead;
+	IMG_UINT32			ui32CommonCtxtCurrentID;			/*!< ID assigned to the next common context */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	POS_LOCK 				hDebugFaultInfoLock;		/*!< Lock to protect the debug fault info list */
+	POS_LOCK 				hMMUCtxUnregLock;	/*!< Lock to protect list of unregistered MMU contexts */
+#endif
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+	/*! GPU default core clock speed in Hz */
+	IMG_UINT32			ui32CoreClockSpeed;
+
+	/*! Active Power Management: GPU actively requests the host driver to be powered off */
+	IMG_BOOL			bEnableActivePM;
+
+	/*! Enable the GPU to power off internal Power Islands independently from the host driver */
+	IMG_BOOL			bEnableRDPowIsland;
+	
+	/*! Active Power Management: Delay between the GPU idle and the request to the host */
+	IMG_UINT32			ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+	/*! Timing information */
+	RGX_TIMING_INFORMATION	*psRGXTimingInfo;
+	IMG_BOOL bHasTDMetaCodePhysHeap;
+	IMG_UINT32 uiTDMetaCodePhysHeapID;
+	IMG_BOOL bHasTDSecureBufPhysHeap;
+	IMG_UINT32 uiTDSecureBufPhysHeapID;
+} RGX_DATA;
+
+
+/*
+	RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME		"RGXREG"
+
+#endif /* __RGXDEVICE_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxfwutils.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxfwutils.c
new file mode 100644
index 0000000..2e5785c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxfwutils.c
@@ -0,0 +1,4021 @@
+ /*************************************************************************/ /*!
+@File
+@Title          Rogue firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Rogue firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "lists.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+#include "pvr_debug.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_alignchecks_km.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#if defined(RGX_FEATURE_RAY_TRACING)
+#include "rgxray.h"
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxta3d.h"
+#include "rgxutils.h"
+#include "sync_internal.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+
+#if defined(TDMETACODE)
+#include "physmem_osmem.h"
+#endif
+
+#ifdef __linux__
+#include <linux/kernel.h>	// sprintf
+#include <linux/string.h>	// strncpy, strlen
+#include "trace_events.h"
+#else
+#include <stdio.h>
+#endif
+
+#include "process_stats.h"
+/* Kernel CCB length */
+#define RGXFWIF_KCCB_TA_NUMCMDS_LOG2	(6)
+#define RGXFWIF_KCCB_3D_NUMCMDS_LOG2	(6)
+#define RGXFWIF_KCCB_2D_NUMCMDS_LOG2	(6)
+#define RGXFWIF_KCCB_CDM_NUMCMDS_LOG2	(6)
+#define RGXFWIF_KCCB_GP_NUMCMDS_LOG2	(6)
+#define RGXFWIF_KCCB_RTU_NUMCMDS_LOG2	(6)
+#define RGXFWIF_KCCB_SHG_NUMCMDS_LOG2	(6)
+
+/* Firmware CCB length */
+#define RGXFWIF_FWCCB_TA_NUMCMDS_LOG2	(4)
+#define RGXFWIF_FWCCB_3D_NUMCMDS_LOG2	(4)
+#define RGXFWIF_FWCCB_2D_NUMCMDS_LOG2	(4)
+#define RGXFWIF_FWCCB_CDM_NUMCMDS_LOG2	(4)
+#define RGXFWIF_FWCCB_GP_NUMCMDS_LOG2	(4)
+#define RGXFWIF_FWCCB_RTU_NUMCMDS_LOG2	(4)
+#define RGXFWIF_FWCCB_SHG_NUMCMDS_LOG2	(4)
+
+#if defined(RGX_FEATURE_SLC_VIVT)
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_INIT* psRGXFWInit)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+
+	PVR_DPF_ENTERED;
+
+	eError = DevmemAllocate(psDevInfo->psFirmwareHeap,
+							1,
+							ROGUE_CACHE_LINE_SIZE,
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                            PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | 
+							PVRSRV_MEMALLOCFLAG_UNCACHED,
+							"SLC3 Fence WA",
+							ppsSLC3FenceMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+							   psDevInfo->psFirmwareHeap,
+							   &psRGXFWInit->sSLC3FenceDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		DevmemFwFree(*ppsSLC3FenceMemDesc);
+	}
+
+	PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static IMG_VOID _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+	DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+	if (psSLC3FenceMemDesc)
+	{
+		DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+		DevmemFree(psSLC3FenceMemDesc);
+	}
+}
+#endif
+
+static IMG_VOID __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+	/* ensure memory is flushed before kicking MTS */
+	OSWriteMemoryBarrier();
+
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+	/* ensure the MTS kick goes through before continuing */
+	OSMemoryBarrier();
+}
+
+
+/*!
+*******************************************************************************
+ @Function		RGXFWSetupSignatureChecks
+ @Description	
+ @Input			psDevInfo
+ 
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+                                              DEVMEM_MEMDESC**    ppsSigChecksMemDesc, 
+                                              IMG_UINT32          ui32SigChecksBufSize,
+                                              RGXFWIF_SIGBUF_CTL* psSigBufCtl,
+                                              const IMG_CHAR*     pszBufferName)
+{
+	PVRSRV_ERROR	eError;
+	DEVMEM_FLAGS_T	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+									  PVRSRV_MEMALLOCFLAG_GPU_READABLE | 
+					                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+									  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | 
+									  PVRSRV_MEMALLOCFLAG_UNCACHED |
+									  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for %s signature checks", pszBufferName);
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32SigChecksBufSize,
+							uiMemAllocFlags,
+							"SignatureChecks",
+							ppsSigChecksMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for signature checks (%u)",
+					ui32SigChecksBufSize,
+					eError));
+		return eError;
+	}
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(&psSigBufCtl->psBuffer,
+						  *ppsSigChecksMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	DevmemPDumpLoadMem(	*ppsSigChecksMemDesc,
+						0,
+						ui32SigChecksBufSize,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+
+	return PVRSRV_OK;
+}
+
+#if defined(RGXFW_ALIGNCHECKS)
+/*!
+*******************************************************************************
+ @Function		RGXFWSetupAlignChecks
+ @Description	
+ @Input			psDevInfo
+ 
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo, 
+								RGXFWIF_DEV_VIRTADDR	*psAlignChecksDevFW,
+								IMG_UINT32				*pui32RGXFWAlignChecks,
+								IMG_UINT32				ui32RGXFWAlignChecksSize)
+{
+	IMG_UINT32		aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+	IMG_UINT32		ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM) + ui32RGXFWAlignChecksSize;
+	IMG_UINT32*		paui32AlignChecks;
+	PVRSRV_ERROR	eError;
+
+	/* Allocate memory for the checks */
+	PDUMPCOMMENT("Allocate memory for alignment checks");
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32RGXFWAlingChecksTotal,
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_UNCACHED,
+							"AlignmentChecks",
+							&psDevInfo->psRGXFWAlignChecksMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for alignment checks (%u)",
+					ui32RGXFWAlingChecksTotal,
+					eError));
+		goto failAlloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+									(IMG_VOID **)&paui32AlignChecks);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel addr for alignment checks (%u)",
+					eError));
+		goto failAqCpuAddr;
+	}
+
+	/* Copy the values */
+	OSMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM));
+	paui32AlignChecks += sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+
+	OSMemCopy(paui32AlignChecks, pui32RGXFWAlignChecks, ui32RGXFWAlignChecksSize);
+
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWAlignChecksMemDesc,
+						0,
+						ui32RGXFWAlingChecksTotal,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	/* Prepare the pointer for the fw to access that memory */
+	RGXSetFirmwareAddress(psAlignChecksDevFW,
+						  psDevInfo->psRGXFWAlignChecksMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	return PVRSRV_OK;
+
+
+
+
+failAqCpuAddr:
+	DevmemFwFree(psDevInfo->psRGXFWAlignChecksMemDesc);
+failAlloc:
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static IMG_VOID RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+	if (psDevInfo->psRGXFWAlignChecksMemDesc != IMG_NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+		DevmemFwFree(psDevInfo->psRGXFWAlignChecksMemDesc);
+		psDevInfo->psRGXFWAlignChecksMemDesc = IMG_NULL;
+	}
+}
+#endif
+
+
+IMG_VOID RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR	*ppDest,
+							   DEVMEM_MEMDESC		*psSrc,
+							   IMG_UINT32			uiExtraOffset,
+							   IMG_UINT32			ui32Flags)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEV_VIRTADDR	psDevVirtAddr;
+	IMG_UINT64			ui64Offset;
+	IMG_BOOL            bCachedInMETA;
+	DEVMEM_FLAGS_T      uiDevFlags;
+
+	eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Convert to an address in META memmap */
+	ui64Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_HEAP_BASE;
+
+	/* The biggest offset for the Shared region that can be addressed */
+	PVR_ASSERT(ui64Offset < 3*RGXFW_SEGMMU_DMAP_SIZE);
+
+	/* Check in the devmem flags whether this memory is cached/uncached */
+	DevmemGetFlags(psSrc, &uiDevFlags);
+
+	/* Honour the META cache flags */	
+	bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) & uiDevFlags) != 0;
+	
+#if defined(HW_ERN_45914)
+	/* We only cache in META if it's also cached in the SLC */
+	{
+		IMG_BOOL bCachedInSLC = (DevmemDeviceCacheMode(uiDevFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED);
+
+		bCachedInMETA = bCachedInMETA && bCachedInSLC;
+	}
+#endif
+
+	if (bCachedInMETA)
+	{
+		ppDest->ui32Addr = ((IMG_UINT32) ui64Offset) | RGXFW_BOOTLDR_META_ADDR;
+	}
+	else
+	{
+		ppDest->ui32Addr = ((IMG_UINT32) ui64Offset) | RGXFW_SEGMMU_DMAP_ADDR_START;
+	}
+
+	if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+	{
+		DevmemReleaseDevVirtAddr(psSrc);
+	}
+}
+
+#if defined(RGX_FEATURE_META_DMA)
+IMG_VOID RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR		*psDest,
+							  DEVMEM_MEMDESC		*psSrcMemDesc,
+							  RGXFWIF_DEV_VIRTADDR	*psSrcFWDevVAddr,
+							  IMG_UINT32			uiOffset)
+{
+	PVRSRV_ERROR		eError;
+	IMG_DEV_VIRTADDR	sDevVirtAddr;
+
+	eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+	psDest->psDevVirtAddr.uiAddr += uiOffset;
+	psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+	DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+#endif
+
+IMG_VOID RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+	DevmemReleaseDevVirtAddr(psSrc);
+}
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+	DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+	PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+	DEVMEM_MEMDESC *psContextStateMemDesc;
+	RGX_CLIENT_CCB *psClientCCB;
+	DEVMEM_MEMDESC *psClientCCBMemDesc;
+	DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+	IMG_BOOL bCommonContextMemProvided;
+	IMG_UINT32 ui32ContextID;
+	DLLIST_NODE sListNode;
+	RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+};
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+									 PVRSRV_DEVICE_NODE *psDeviceNode,
+									 const IMG_CHAR *pszContextName,
+									 DEVMEM_MEMDESC *psAllocatedMemDesc,
+									 IMG_UINT32 ui32AllocatedOffset,
+									 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+									 DEVMEM_MEMDESC *psContextStateMemDesc,
+									 IMG_UINT32 ui32CCBAllocSize,
+									 IMG_UINT32 ui32Priority,
+									 RGX_COMMON_CONTEXT_INFO *psInfo,
+									 RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+	RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+	IMG_UINT32 ui32FWCommonContextOffset;
+	IMG_UINT8 *pui8Ptr;
+	PVRSRV_ERROR eError;
+
+	/*
+		Allocate all the resources that are required
+	*/
+	psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+	if (psServerCommonContext == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	if (psAllocatedMemDesc)
+	{
+		PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+					 pszContextName,
+					 ui32AllocatedOffset);
+		ui32FWCommonContextOffset = ui32AllocatedOffset;
+		psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+		psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+	}
+	else
+	{
+		/* Allocate device memory for the firmware context */
+		PDUMPCOMMENT("Allocate Rogue firmware %s context", pszContextName);
+		eError = DevmemFwAllocate(psDevInfo,
+								sizeof(*psFWCommonContext),
+								RGX_FWCOMCTX_ALLOCFLAGS,
+								"FirmwareContext",
+								&psServerCommonContext->psFWCommonContextMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s : Failed to allocate firmware %s context (%s)",
+									__FUNCTION__,
+									pszContextName,
+									PVRSRVGetErrorStringKM(eError)));
+			goto fail_contextalloc;
+		}
+		ui32FWCommonContextOffset = 0;
+		psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+	}
+
+	/* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+	psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+	psServerCommonContext->ui32ContextID    = psDevInfo->ui32CommonCtxtCurrentID++;
+	dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+
+	/* Allocate the client CCB */
+	eError = RGXCreateCCB(psDeviceNode,
+						  ui32CCBAllocSize,
+						  psConnection,
+						  pszContextName,
+						  psServerCommonContext,
+						  &psServerCommonContext->psClientCCB,
+						  &psServerCommonContext->psClientCCBMemDesc,
+						  &psServerCommonContext->psClientCCBCtrlMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for %s context(%s)",
+								__FUNCTION__,
+								pszContextName,
+								PVRSRVGetErrorStringKM(eError)));
+		goto fail_allocateccb;
+	}
+
+	/*
+		Temporarily map the firmware context to the kernel and init it
+	*/
+	eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+                                      (IMG_VOID **)&pui8Ptr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s context (%s)to CPU",
+								__FUNCTION__,
+								pszContextName,
+								PVRSRVGetErrorStringKM(eError)));
+		goto fail_cpuvirtacquire;
+	}
+
+	psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+
+	/* Set the firmware CCB device addresses in the firmware common context */
+	RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+						  psServerCommonContext->psClientCCBMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+	RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+						  psServerCommonContext->psClientCCBCtrlMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+
+	/* Set the memory context device address */
+	psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+	RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+						  psFWMemContextMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+
+	/* Set the framework register updates address */
+	psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+	RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+						  psInfo->psFWFrameworkMemDesc,
+						  0, RFW_FWADDR_FLAG_NONE);
+
+	psFWCommonContext->ui32Priority = ui32Priority;
+	psFWCommonContext->ui32PrioritySeqNum = 0;
+
+	if(psInfo->psMCUFenceAddr != IMG_NULL)
+	{
+		psFWCommonContext->ui64MCUFenceAddr = psInfo->psMCUFenceAddr->uiAddr;
+	}
+
+	/* Store a references to Server Common Context and PID for notifications back from the FW. */
+	psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+	psFWCommonContext->ui32PID                   = OSGetCurrentProcessID();
+
+	/* Set the firmware GPU context state buffer */
+	psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+	if (psContextStateMemDesc)
+	{
+		RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+							  psContextStateMemDesc,
+							  0,
+							  RFW_FWADDR_FLAG_NONE);
+	}
+
+	/*
+	 * Dump the created context
+	 */
+	PDUMPCOMMENT("Dump %s context", pszContextName);
+	DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+					   ui32FWCommonContextOffset,
+					   sizeof(*psFWCommonContext),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	/* We've finished the setup so release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+	/* Map this allocation into the FW */
+	RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+						  psServerCommonContext->psFWCommonContextMemDesc,
+						  ui32FWCommonContextOffset,
+						  RFW_FWADDR_FLAG_NONE);
+
+#if defined(LINUX)
+	trace_rogue_create_fw_context(OSGetCurrentProcessName(),
+								  pszContextName,
+								  psServerCommonContext->sFWCommonContextFWAddr.ui32Addr);
+#endif
+
+	*ppsServerCommonContext = psServerCommonContext;
+	return PVRSRV_OK;
+
+fail_allocateccb:
+	DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+fail_cpuvirtacquire:
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+	if (!psServerCommonContext->bCommonContextMemProvided)
+	{
+		DevmemFwFree(psServerCommonContext->psFWCommonContextMemDesc);
+	}
+fail_contextalloc:
+	OSFreeMem(psServerCommonContext);
+fail_alloc:
+	return eError;
+}
+
+IMG_VOID FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	/*
+		Unmap the context itself and then all it's resources
+	*/
+
+	/* Unmap the FW common context */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+	/* Umap context state buffer (if there was one) */
+	if (psServerCommonContext->psContextStateMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+	}
+	/* Unmap the framework buffer */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+	/* Unmap client CCB and CCB control */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+	RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+	/* Unmap the memory context */
+	RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+	/* Destroy the client CCB */
+	RGXDestroyCCB(psServerCommonContext->psClientCCB);
+	
+	/* Remove the context from the list of all contexts. */
+	dllist_remove_node(&psServerCommonContext->sListNode);
+
+	/* Free the FW common context (if there was one) */
+	if (!psServerCommonContext->bCommonContextMemProvided)
+	{
+		DevmemFwFree(psServerCommonContext->psFWCommonContextMemDesc);
+	}
+	/* Free the hosts representation of the common context */
+	OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	return psServerCommonContext->psClientCCB;
+}
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+	RGXFWIF_CONTEXT_RESET_REASON  eLastResetReason;
+	
+	PVR_ASSERT(psServerCommonContext != IMG_NULL);
+	
+	/* Take the most recent reason and reset for next time... */
+	eLastResetReason = psServerCommonContext->eLastResetReason;
+	psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+
+	return eLastResetReason;
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXFreeKernelCCB
+ @Description	Free a kernel CCB
+ @Input			psDevInfo
+ @Input			eKCCBType
+ 
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static IMG_VOID RGXFreeKernelCCB(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								 RGXFWIF_DM				eKCCBType)
+{
+	if (psDevInfo->apsKernelCCBMemDesc[eKCCBType] != IMG_NULL)
+	{
+		if (psDevInfo->apsKernelCCB[eKCCBType] != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->apsKernelCCBMemDesc[eKCCBType]);
+			psDevInfo->apsKernelCCB[eKCCBType] = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->apsKernelCCBMemDesc[eKCCBType]);
+		psDevInfo->apsKernelCCBMemDesc[eKCCBType] = IMG_NULL;
+	}
+	if (psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType] != IMG_NULL)
+	{
+		if (psDevInfo->apsKernelCCBCtl[eKCCBType] != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType]);
+			psDevInfo->apsKernelCCBCtl[eKCCBType] = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType]);
+		psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType] = IMG_NULL;
+	}
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXSetupKernelCCB
+ @Description	Allocate and initialise a kernel CCB
+ @Input			psDevInfo
+ 
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupKernelCCB(PVRSRV_RGXDEV_INFO 	*psDevInfo, 
+									  RGXFWIF_INIT			*psRGXFWInit,
+									  RGXFWIF_DM			eKCCBType,
+									  IMG_UINT32			ui32NumCmdsLog2,
+									  IMG_UINT32			ui32CmdSize)
+{
+	PVRSRV_ERROR		eError;
+	RGXFWIF_CCB_CTL		*psKCCBCtl;
+	DEVMEM_FLAGS_T		uiCCBCtlMemAllocFlags, uiCCBMemAllocFlags;
+	IMG_UINT32			ui32kCCBSize = (1U << ui32NumCmdsLog2);
+
+
+	/*
+	 * FIXME: the write offset need not be writeable by the firmware, indeed may
+	 * not even be needed for reading. Consider moving it to its own data
+	 * structure.
+	 */
+	uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+							PVRSRV_MEMALLOCFLAG_UNCACHED | 
+							 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocation flags for Kernel CCB */
+	uiCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+						 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						 PVRSRV_MEMALLOCFLAG_UNCACHED | 
+						 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/*
+		Allocate memory for the kernel CCB control.
+	*/
+	PDUMPCOMMENT("Allocate memory for kernel CCB control %u", eKCCBType);
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_CCB_CTL),
+							uiCCBCtlMemAllocFlags,
+							"KernelCCBControl",
+                            &psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType]);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to allocate kernel CCB ctl %u (%u)",
+				eKCCBType, eError));
+		goto fail;
+	}
+
+	/*
+		Allocate memory for the kernel CCB.
+		(this will reference further command data in non-shared CCBs)
+	*/
+	PDUMPCOMMENT("Allocate memory for kernel CCB %u", eKCCBType);
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32kCCBSize * ui32CmdSize,
+							uiCCBMemAllocFlags,
+							"KernelCCB",
+                            &psDevInfo->apsKernelCCBMemDesc[eKCCBType]);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to allocate kernel CCB %u (%u)",
+				eKCCBType, eError));
+		goto fail;
+	}
+
+	/*
+		Map the kernel CCB control to the kernel.
+	*/
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType],
+                                      (IMG_VOID **)&psDevInfo->apsKernelCCBCtl[eKCCBType]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to acquire cpu kernel CCB Ctl %u (%u)",
+				eKCCBType, eError));
+		goto fail;
+	}
+
+	/*
+		Map the kernel CCB to the kernel.
+	*/
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->apsKernelCCBMemDesc[eKCCBType],
+                                      (IMG_VOID **)&psDevInfo->apsKernelCCB[eKCCBType]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupKernelCCB: Failed to acquire cpu kernel CCB %u (%u)",
+				eKCCBType, eError));
+		goto fail;
+	}
+
+	/*
+	 * Initialise the kernel CCB control.
+	 */
+	psKCCBCtl = psDevInfo->apsKernelCCBCtl[eKCCBType];
+	psKCCBCtl->ui32WriteOffset = 0;
+	psKCCBCtl->ui32ReadOffset = 0;
+	psKCCBCtl->ui32WrapMask = ui32kCCBSize - 1;
+	psKCCBCtl->ui32CmdSize = ui32CmdSize;
+
+	/*
+	 * Set-up RGXFWIfCtl pointers to access the kCCBs
+	 */
+	RGXSetFirmwareAddress(&psRGXFWInit->psKernelCCBCtl[eKCCBType],
+						  psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType],
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psKernelCCB[eKCCBType],
+						  psDevInfo->apsKernelCCBMemDesc[eKCCBType],
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	psRGXFWInit->eDM[eKCCBType] = eKCCBType;
+
+	/*
+	 * Pdump the kernel CCB control.
+	 */
+	PDUMPCOMMENT("Initialise kernel CCB ctl %d", eKCCBType);
+	DevmemPDumpLoadMem(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType],
+					   0,
+					   sizeof(RGXFWIF_CCB_CTL),
+					   0);
+
+	return PVRSRV_OK;
+
+fail:
+	RGXFreeKernelCCB(psDevInfo, eKCCBType);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXFreeFirmwareCCB
+ @Description	Free a firmware CCB
+ @Input			psDevInfo
+ @Input			eFWCCBType
+
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static IMG_VOID RGXFreeFirmwareCCB(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								 RGXFWIF_DM				eFWCCBType)
+{
+	if (psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType] != IMG_NULL)
+	{
+		if (psDevInfo->apsFirmwareCCB[eFWCCBType] != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType]);
+			psDevInfo->apsFirmwareCCB[eFWCCBType] = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType]);
+		psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType] = IMG_NULL;
+	}
+	if (psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType] != IMG_NULL)
+	{
+		if (psDevInfo->apsFirmwareCCBCtl[eFWCCBType] != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType]);
+			psDevInfo->apsFirmwareCCBCtl[eFWCCBType] = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType]);
+		psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType] = IMG_NULL;
+	}
+}
+
+/*!
+*******************************************************************************
+ @Function		RGXSetupFirmwareCCB
+ @Description	Allocate and initialise a Firmware CCB
+ @Input			psDevInfo
+
+ @Return		PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXSetupFirmwareCCB(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+									  RGXFWIF_INIT			*psRGXFWInit,
+									  RGXFWIF_DM			eFWCCBType,
+									  IMG_UINT32			ui32NumCmdsLog2,
+									  IMG_UINT32			ui32CmdSize)
+{
+	PVRSRV_ERROR		eError;
+	RGXFWIF_CCB_CTL		*psFWCCBCtl;
+	DEVMEM_FLAGS_T		uiCCBCtlMemAllocFlags, uiCCBMemAllocFlags;
+	IMG_UINT32			ui32FWCCBSize = (1U << ui32NumCmdsLog2);
+
+	/*
+	 * FIXME: the write offset need not be writeable by the host, indeed may
+	 * not even be needed for reading. Consider moving it to its own data
+	 * structure.
+	 */
+	uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+							PVRSRV_MEMALLOCFLAG_UNCACHED |
+							 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocation flags for Firmware CCB */
+	uiCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						 PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						 PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						 PVRSRV_MEMALLOCFLAG_UNCACHED |
+						 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/*
+		Allocate memory for the Firmware CCB control.
+	*/
+	PDUMPCOMMENT("Allocate memory for firmware CCB control %u", eFWCCBType);
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_CCB_CTL),
+							uiCCBCtlMemAllocFlags,
+							"FirmwareCCBControl",
+                            &psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType]);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to allocate Firmware CCB ctl %u (%u)",
+				eFWCCBType, eError));
+		goto fail;
+	}
+
+	/*
+		Allocate memory for the Firmware CCB.
+		(this will reference further command data in non-shared CCBs)
+	*/
+	PDUMPCOMMENT("Allocate memory for firmware CCB %u", eFWCCBType);
+	eError = DevmemFwAllocate(psDevInfo,
+							ui32FWCCBSize * ui32CmdSize,
+							uiCCBMemAllocFlags,
+							"FirmwareCCB",
+                            &psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType]);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to allocate Firmware CCB %u (%u)",
+				eFWCCBType, eError));
+		goto fail;
+	}
+
+	/*
+		Map the Firmware CCB control to the kernel.
+	*/
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType],
+                                      (IMG_VOID **)&psDevInfo->apsFirmwareCCBCtl[eFWCCBType]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to acquire cpu firmware CCB Ctl %u (%u)",
+				eFWCCBType, eError));
+		goto fail;
+	}
+
+	/*
+		Map the firmware CCB to the kernel.
+	*/
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType],
+                                      (IMG_VOID **)&psDevInfo->apsFirmwareCCB[eFWCCBType]);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmwareCCB: Failed to acquire cpu firmware CCB %u (%u)",
+				eFWCCBType, eError));
+		goto fail;
+	}
+
+	/*
+	 * Initialise the firmware CCB control.
+	 */
+	psFWCCBCtl = psDevInfo->apsFirmwareCCBCtl[eFWCCBType];
+	psFWCCBCtl->ui32WriteOffset = 0;
+	psFWCCBCtl->ui32ReadOffset = 0;
+	psFWCCBCtl->ui32WrapMask = ui32FWCCBSize - 1;
+	psFWCCBCtl->ui32CmdSize = ui32CmdSize;
+
+	/*
+	 * Set-up RGXFWIfCtl pointers to access the kCCBs
+	 */
+	RGXSetFirmwareAddress(&psRGXFWInit->psFirmwareCCBCtl[eFWCCBType],
+						  psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType],
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psFirmwareCCB[eFWCCBType],
+						  psDevInfo->apsFirmwareCCBMemDesc[eFWCCBType],
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	psRGXFWInit->eDM[eFWCCBType] = eFWCCBType;
+
+	/*
+	 * Pdump the kernel CCB control.
+	 */
+	PDUMPCOMMENT("Initialise firmware CCB ctl %d", eFWCCBType);
+	DevmemPDumpLoadMem(psDevInfo->apsFirmwareCCBCtlMemDesc[eFWCCBType],
+					   0,
+					   sizeof(RGXFWIF_CCB_CTL),
+					   0);
+
+	return PVRSRV_OK;
+
+fail:
+	RGXFreeFirmwareCCB(psDevInfo, eFWCCBType);
+
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static IMG_VOID RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PMR *psPMR;
+	
+	if (psDevInfo->psRGXFaultAddressMemDesc)
+	{
+		if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(IMG_VOID **)&psPMR) == PVRSRV_OK)
+		{
+			PMRUnlockSysPhysAddresses(psPMR);
+		}
+		DevmemFwFree(psDevInfo->psRGXFaultAddressMemDesc);
+		psDevInfo->psRGXFaultAddressMemDesc = IMG_NULL;
+	}
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE	*psDeviceNode, RGXFWIF_INIT *psRGXFWInit)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	IMG_UINT32			*pui32MemoryVirtAddr;
+	IMG_UINT32			i;
+	IMG_SIZE_T			ui32PageSize;
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PMR					*psPMR;
+
+	ui32PageSize = OSGetPageSize();
+
+	/* Allocate page of memory to use for page faults on non-blocking memory transactions */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED;
+	
+	psDevInfo->psRGXFaultAddressMemDesc = IMG_NULL;
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										ui32PageSize,
+										uiMemAllocFlags,
+										"FaultAddress",
+										&psDevInfo->psRGXFaultAddressMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate mem for fault address (%u)",
+				eError));
+		goto failFaultAddressDescAlloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+									  (IMG_VOID **)&pui32MemoryVirtAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire mem for fault adress (%u)",
+				eError));
+		goto failFaultAddressDescAqCpuVirt;
+	}
+
+	for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+	{
+		*(pui32MemoryVirtAddr + i) = 0xDEADBEEF;
+	}
+
+	eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(IMG_VOID **)&psPMR);
+		
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting PMR for fault adress (%u)",
+				eError));
+		
+		goto failFaultAddressDescGetPMR;
+	}
+	else
+	{
+		IMG_BOOL bValid;
+		IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+		
+		eError = PMRLockSysPhysAddresses(psPMR,ui32Log2PageSize);
+			
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error locking physical address for fault address MemDesc (%u)",
+					eError));
+			
+			goto failFaultAddressDescLockPhys;
+		}
+			
+		eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize,1,0,&(psRGXFWInit->sFaultPhysAddr),&bValid);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting physical address for fault address MemDesc (%u)",
+					eError));
+			
+			goto failFaultAddressDescGetPhys;
+		}
+
+		if (!bValid)
+		{
+			psRGXFWInit->sFaultPhysAddr.uiAddr = 0;
+			PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed getting physical address for fault address MemDesc - invalid page (0x%llX)",
+					psRGXFWInit->sFaultPhysAddr.uiAddr));
+
+			goto failFaultAddressDescGetPhys;
+		}
+	}
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+	
+	return PVRSRV_OK;
+
+failFaultAddressDescGetPhys:
+	PMRUnlockSysPhysAddresses(psPMR);
+
+failFaultAddressDescLockPhys:
+
+failFaultAddressDescGetPMR:
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+failFaultAddressDescAqCpuVirt:
+	DevmemFwFree(psDevInfo->psRGXFaultAddressMemDesc);
+	psDevInfo->psRGXFaultAddressMemDesc = IMG_NULL;
+
+failFaultAddressDescAlloc:
+
+	return eError;
+}
+
+static PVRSRV_ERROR RGXHwBrn37200(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+
+#if defined(FIX_HW_BRN_37200)
+	struct _DEVMEM_HEAP_	*psBRNHeap;
+	DEVMEM_FLAGS_T			uiFlags;
+	IMG_DEV_VIRTADDR		sTmpDevVAddr;
+	IMG_SIZE_T				uiPageSize;
+
+	uiPageSize = OSGetPageSize();
+	
+	uiFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+				PVRSRV_MEMALLOCFLAG_GPU_READABLE | 
+				PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | 
+				PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+				PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+				PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx,
+							  "HWBRN37200", /* FIXME: We need to create an IDENT macro for this string.
+							                 Make sure the IDENT macro is not accessible to userland */
+							  &psBRNHeap);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: HWBRN37200 Failed DevmemFindHeapByName (%u)", eError));
+		goto failFWHWBRN37200FindHeapByName;
+	}
+
+	psDevInfo->psRGXFWHWBRN37200MemDesc = IMG_NULL;
+	eError = DevmemAllocate(psBRNHeap,
+						uiPageSize,
+						ROGUE_CACHE_LINE_SIZE,
+						uiFlags,
+						"HWBRN37200",
+						&psDevInfo->psRGXFWHWBRN37200MemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: Failed to allocate %u bytes for HWBRN37200 (%u)",
+				(IMG_UINT32)uiPageSize,
+				eError));
+		goto failFWHWBRN37200MemDescAlloc;
+	}
+		
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(psDevInfo->psRGXFWHWBRN37200MemDesc,
+						   psBRNHeap,
+						   &sTmpDevVAddr);
+		
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXHwBrn37200: Failed to allocate %u bytes for HWBRN37200 (%u)",
+				(IMG_UINT32)uiPageSize,
+				eError));
+		goto failFWHWBRN37200DevmemMapToDevice;
+	}
+
+	return PVRSRV_OK;
+
+failFWHWBRN37200DevmemMapToDevice:
+
+failFWHWBRN37200MemDescAlloc:
+	DevmemFwFree(psDevInfo->psRGXFWHWBRN37200MemDesc);
+	psDevInfo->psRGXFWHWBRN37200MemDesc = IMG_NULL;
+
+failFWHWBRN37200FindHeapByName:
+#endif
+
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXSetupFirmware
+
+ @Description
+
+ Setups all the firmware related data
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							     IMG_BOOL			bEnableSignatureChecks,
+							     IMG_UINT32			ui32SignatureChecksBufSize,
+							     IMG_UINT32			ui32HWPerfFWBufSizeKB,
+							     IMG_UINT64		 	ui64HWPerfFilter,
+							     IMG_UINT32			ui32RGXFWAlignChecksSize,
+							     IMG_UINT32			*pui32RGXFWAlignChecks,
+							     IMG_UINT32			ui32ConfigFlags,
+							     IMG_UINT32			ui32LogType,
+							     IMG_UINT32            ui32NumTilingCfgs,
+							     IMG_UINT32            *pui32BIFTilingXStrides,
+							     IMG_UINT32			ui32FilterFlags,
+							     IMG_UINT32			ui32JonesDisableMask,
+							     IMG_UINT32			ui32HWRDebugDumpLimit,
+								 IMG_UINT32			ui32HWPerfCountersDataSize,
+							     RGXFWIF_DEV_VIRTADDR	*psRGXFWInitFWAddr,
+							     RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf)
+
+{
+	PVRSRV_ERROR		eError;
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	RGXFWIF_INIT		*psRGXFWInit;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	IMG_UINT32			dm;
+#if defined(RGX_FEATURE_META_DMA)
+	RGXFWIF_DEV_VIRTADDR sRGXTmpCorememDataStoreFWAddr;
+#endif
+
+	/* Fw init data */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+						/* FIXME: Change to Cached */
+
+	PDUMPCOMMENT("Allocate RGXFWIF_INIT structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_INIT),
+							uiMemAllocFlags,
+							"FirmwareInitStructure",
+							&psDevInfo->psRGXFWIfInitMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw if ctl (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_INIT),
+				eError));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+									  (IMG_VOID **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel fw if ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(psRGXFWInitFWAddr,
+						psDevInfo->psRGXFWIfInitMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	/* FW Trace buffer */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw trace structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_TRACEBUF),
+							uiMemAllocFlags,
+							"FirmwareTraceStructure",
+							&psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw trace (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_TRACEBUF),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psTraceBufCtl,
+						psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+									  (IMG_VOID **)&psDevInfo->psRGXFWIfTraceBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	/* Determine the size of the HWPerf FW buffer */
+	if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX>>10))
+	{
+		/* Size specified as a AppHint but it is too big */
+		PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+				ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX>>10));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MAX;
+		
+	}
+	else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN>>10))
+	{
+		/* Size specified as in AppHint HWPerfFWBufSizeInKB */
+		PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: Using HWPerf FW buffer size of %u KB",
+				ui32HWPerfFWBufSizeKB));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = ui32HWPerfFWBufSizeKB<<10;
+	}
+	else if (ui32HWPerfFWBufSizeKB > 0)
+	{
+		/* Size specified as a AppHint but it is too small */
+		PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+				ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN>>10));
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MIN;
+	}
+	else
+	{
+		/* 0 size implies AppHint not set or is set to zero,
+		 * use default size from driver constant. */
+		psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_DEFAULT;
+	}
+
+	/* Allocate HWPerf FW L1 buffer */
+	eError = DevmemFwAllocate(psDevInfo,
+							  psDevInfo->ui32RGXFWIfHWPerfBufSize+RGXFW_HWPERF_L1_PADDING_DEFAULT,
+							  uiMemAllocFlags,
+							  "FirmwareHWPerfBuffer",
+							  &psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+	
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate kernel fw hwperf buffer (%u)",
+				 eError));
+		goto fail;
+	}
+
+	/* Meta cached flag removed from this allocation as it was found
+	 * FW performance was better without it. */
+	RGXSetFirmwareAddress(&psRGXFWInit->psHWPerfInfoCtl,
+						  psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+									  (IMG_VOID**)&psDevInfo->psRGXFWIfHWPerfBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to acquire kernel hwperf buffer (%u)",
+				 eError));
+		goto fail;
+	}
+
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | 
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	/* Allocate buffer to store FW data */
+	eError = DevmemFwAllocate(psDevInfo,
+							  RGX_META_COREMEM_DATA_SIZE,
+							  uiMemAllocFlags,
+							  "FirmwareCorememDataStore",
+							  &psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+	
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate coremem data store (%u)",
+				 eError));
+		goto fail;
+	}
+
+#if defined(RGX_FEATURE_META_DMA)
+	RGXSetFirmwareAddress(&sRGXTmpCorememDataStoreFWAddr,
+						  psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+
+	RGXSetMetaDMAAddress(&psRGXFWInit->sCorememDataStore,
+						 psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+						 &sRGXTmpCorememDataStoreFWAddr,
+						 0);
+#else
+	RGXSetFirmwareAddress(&psRGXFWInit->sCorememDataStore.pbyFWAddr,
+						  psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+						  0, RFW_FWADDR_NOREF_FLAG);
+#endif
+
+	/* init HW frame info */
+	PDUMPCOMMENT("Allocate rgxfw HW info buffer");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_HWRINFOBUF),
+							uiMemAllocFlags,
+							"FirmwareHWInfoBuffer",
+							&psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for HW info (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psRGXFWIfHWRInfoBufCtl,
+						psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+									  (IMG_VOID **)&psDevInfo->psRGXFWIfHWRInfoBuf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+				eError));
+		goto fail;
+	}
+	OSMemSet(psDevInfo->psRGXFWIfHWRInfoBuf, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+	/* init HWPERF data */
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfRIdx = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWIdx = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWrapCount = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+	psRGXFWInit->ui64HWPerfFilter = ui64HWPerfFilter;
+	psRGXFWInit->bDisableFilterHWPerfCustomCounter = (ui32ConfigFlags & RGXFWIF_INICFG_HWP_DISABLE_FILTER) ? IMG_TRUE : IMG_FALSE;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfUt = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfDropCount = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32FirstDropOrdinal = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32LastDropOrdinal = 0;
+	psDevInfo->psRGXFWIfTraceBuf->ui32PowMonEnergy = 0;
+
+	
+	/* Initialise the HWPerf module in the Rogue device driver.
+	 * May allocate host buffer if HWPerf enabled at driver load time.
+	 */
+	eError = RGXHWPerfInit(psDeviceNode, (ui32ConfigFlags & RGXFWIF_INICFG_HWPERF_EN));
+	PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInit", fail);
+
+	/* Set initial log type */
+	if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Invalid initial log type (0x%X)",ui32LogType));
+		goto fail;
+	}
+	psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32LogType;
+
+	/* Allocate shared buffer for GPU utilisation */
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate shared buffer for GPU utilisation");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_GPU_UTIL_FWCB),
+							uiMemAllocFlags,
+							"FirmwareGPUUtilisationBuffer",
+							&psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for GPU utilisation buffer ctl (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_GPU_UTIL_FWCB),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psGpuUtilFWCbCtl,
+						psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+									  (IMG_VOID **)&psDevInfo->psRGXFWIfGpuUtilFWCb);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel GPU utilization FW CB ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	/* Initialise GPU utilisation buffer */
+	psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+	    RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw FW runtime configuration (FW)");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_RUNTIME_CFG),
+							uiMemAllocFlags,
+							"FirmwareFWRuntimeCfg",
+							&psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for FW runtime configuration (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_RUNTIME_CFG),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psRuntimeCfg,
+						psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+									(IMG_VOID **)&psDevInfo->psRGXFWIfRuntimeCfg);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel FW runtime configuration (%u)",
+				eError));
+		goto fail;
+	}
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("Allocate rgxfw register configuration structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_REG_CFG),
+							uiMemAllocFlags,
+							"Firmware register configuration structure",
+							&psDevInfo->psRGXFWIfRegCfgMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw register configurations (%u)",
+				(IMG_UINT32)sizeof(RGXFWIF_REG_CFG),
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psRegCfg,
+						psDevInfo->psRGXFWIfRegCfgMemDesc,
+						0, RFW_FWADDR_NOREF_FLAG);
+#endif
+
+	uiMemAllocFlags =	PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+						PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+						PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+						PVRSRV_MEMALLOCFLAG_UNCACHED |
+						PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate rgxfw hwperfctl structure");
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+							ui32HWPerfCountersDataSize,
+							uiMemAllocFlags,
+							"Firmware hwperf control structure",
+							&psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXInitHWPerfCounters: Failed to allocate %u bytes for fw hwperf control (%u)",
+				ui32HWPerfCountersDataSize,
+				eError));
+		goto fail;
+	}
+
+	eError = DevmemExport(psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+	                      &psDevInfo->sRGXFWHWPerfCountersExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to export fw hwperf ctl (%u)",
+				eError));
+		goto fail;
+	}
+
+	RGXSetFirmwareAddress(&psRGXFWInit->psHWPerfCtl,
+						psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+						0, 0);
+	
+	/* Allocate a sync for power management */
+	eError = SyncPrimContextCreate(IMG_NULL,
+									psDevInfo->psDeviceNode,
+						  			&psDevInfo->hSyncPrimContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive context with error (%u)", eError));
+		goto fail;
+	}
+
+	eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive with error (%u)", eError));
+		goto fail;
+	}
+
+	psRGXFWInit->uiPowerSync = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim);
+
+	/* Required info by FW to calculate the ActivePM idle timer latency */
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+		psRGXFWInit->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+		psRGXFWInit->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+		/* Initialise variable runtime configuration to the system defaults */
+		psRuntimeCfg->ui32CoreClockSpeed = psRGXFWInit->ui32InitialCoreClockSpeed;
+		psRuntimeCfg->ui32ActivePMLatencyms = psRGXFWInit->ui32ActivePMLatencyms;
+		psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+	}
+
+	/* Setup Fault read register */
+	eError = RGXSetupFaultReadRegister(psDeviceNode, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup fault read register"));
+		goto fail;
+	}
+
+	/* Apply FIX_HW_BRN_37200 */
+	eError = RGXHwBrn37200(psDevInfo);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to apply HWBRN37200"));
+		goto fail;
+	}
+
+	/*
+	 * Set up kernel TA CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_TA, RGXFWIF_KCCB_TA_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel TA CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up firmware TA CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_TA, RGXFWIF_FWCCB_TA_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware TA CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up kernel 3D CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_3D, RGXFWIF_KCCB_3D_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel 3D CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up Firmware 3D CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_3D, RGXFWIF_FWCCB_3D_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware 3D CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up kernel 2D CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_2D, RGXFWIF_KCCB_2D_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel 2D CCB"));
+		goto fail;
+	}
+	/*
+	 * Set up Firmware 2D CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_2D, RGXFWIF_FWCCB_2D_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware 2D CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up kernel compute CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_CDM, RGXFWIF_KCCB_CDM_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel Compute CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up Firmware Compute CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_CDM, RGXFWIF_FWCCB_CDM_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware Compute CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up kernel general purpose CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_GP, RGXFWIF_KCCB_GP_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel General Purpose CCB"));
+		goto fail;
+	}
+	
+	/*
+	 * Set up Firmware general purpose CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_GP, RGXFWIF_FWCCB_GP_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware General Purpose CCB"));
+		goto fail;
+	}
+#if defined(RGX_FEATURE_RAY_TRACING)	
+	/*
+	 * Set up kernel SHG CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_SHG, RGXFWIF_KCCB_SHG_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel SHG CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up Firmware SHG CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_SHG, RGXFWIF_FWCCB_SHG_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware SHG CCB"));
+		goto fail;
+	}
+	
+	/*
+	 * Set up kernel RTU CCB.
+	 */
+	eError = RGXSetupKernelCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_RTU, RGXFWIF_KCCB_RTU_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_KCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate kernel RTU CCB"));
+		goto fail;
+	}
+
+	/*
+	 * Set up Firmware RTU CCB.
+	 */
+	eError = RGXSetupFirmwareCCB(psDevInfo,
+							   psRGXFWInit,
+							   RGXFWIF_DM_RTU, RGXFWIF_FWCCB_RTU_NUMCMDS_LOG2,
+							   sizeof(RGXFWIF_FWCCB_CMD));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Firmware SHG CCB"));
+		goto fail;
+	}
+#endif
+
+	/* Setup Signature and Checksum Buffers for TA and 3D */
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+	                                   &psDevInfo->psRGXFWSigTAChecksMemDesc, 
+	                                   ui32SignatureChecksBufSize,
+	                                   &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_TA],
+	                                   "TA");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup TA signature checks"));
+		goto fail;
+	}
+	psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+	                                   &psDevInfo->psRGXFWSig3DChecksMemDesc, 
+	                                   ui32SignatureChecksBufSize,
+	                                   &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_3D],
+	                                   "3D");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup 3D signature checks"));
+		goto fail;
+	}
+	psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+	                                   &psDevInfo->psRGXFWSigRTChecksMemDesc, 
+	                                   ui32SignatureChecksBufSize,
+	                                   &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_RTU],
+	                                   "RTU");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup RTU signature checks"));
+		goto fail;
+	}
+	psDevInfo->ui32SigRTChecksSize = ui32SignatureChecksBufSize;
+	
+	eError = RGXFWSetupSignatureChecks(psDevInfo,
+	                                   &psDevInfo->psRGXFWSigSHChecksMemDesc, 
+	                                   ui32SignatureChecksBufSize,
+	                                   &psRGXFWInit->asSigBufCtl[RGXFWIF_DM_SHG],
+	                                   "SHG");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup SHG signature checks"));
+		goto fail;
+	}
+	psDevInfo->ui32SigSHChecksSize = ui32SignatureChecksBufSize;
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+	eError = RGXFWSetupAlignChecks(psDevInfo, 
+								&psRGXFWInit->paui32AlignChecks, 
+								pui32RGXFWAlignChecks, 
+								ui32RGXFWAlignChecksSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup alignment checks"));
+		goto fail;
+	}
+#endif
+
+	/* Fill the remaining bits of fw the init data */
+	psRGXFWInit->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+	psRGXFWInit->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+	psRGXFWInit->sDPXControlStreamBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+	psRGXFWInit->sResultDumpBase.uiAddr = RGX_DOPPLER_OVERFLOW_HEAP_BASE;
+	psRGXFWInit->sRTUHeapBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+
+	/* RD Power Island */
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+		IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+						(eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+		ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+	}
+
+	psRGXFWInit->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL;
+	psRGXFWInit->ui32FilterFlags = ui32FilterFlags;
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	psRGXFWInit->ui32JonesDisableMask = ui32JonesDisableMask;
+#endif
+	psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_SRVCFG_DISABLE_PDP_EN)
+			? IMG_FALSE : IMG_TRUE;
+	psRGXFWInit->ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+#if defined(RGX_FEATURE_SLC_VIVT)
+	eError = _AllocateSLC3Fence(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate memory for SLC3Fence"));
+		goto fail;
+	}
+#endif
+
+	/* Timestamps */
+	uiMemAllocFlags =
+		PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+		PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
+		PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+		PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+		PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+		PVRSRV_MEMALLOCFLAG_UNCACHED |
+		PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+	
+	/*
+	  the timer query arrays
+	*/
+	PDUMPCOMMENT("Allocate timer query arrays (FW)");
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(RGXFWIF_TIMESTAMP) * RGX_MAX_TIMER_QUERIES,
+	                          uiMemAllocFlags,
+	                          "Start times array",
+	                          & psDevInfo->psStartTimeMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc,
+	                                  (IMG_VOID **)& psDevInfo->pasStartTimeById);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+		goto fail;
+	}
+
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(RGXFWIF_TIMESTAMP) * RGX_MAX_TIMER_QUERIES,
+	                          uiMemAllocFlags,
+	                          "End times array",
+	                          & psDevInfo->psEndTimeMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc,
+	                                  (IMG_VOID **)& psDevInfo->pasEndTimeById);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+		goto fail;
+	}
+
+	eError = DevmemFwAllocate(psDevInfo,
+	                          sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES,
+	                          uiMemAllocFlags,
+	                          "Completed ops array",
+	                          & psDevInfo->psCompletedMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to completed ops array"));
+		goto fail;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc,
+	                                  (IMG_VOID **)& psDevInfo->pui32CompletedById);
+	
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map completed ops array"));
+		goto fail;
+	}
+
+		/* Initialize FW started flag */
+	psRGXFWInit->bFirmwareStarted = IMG_FALSE;
+	
+	/* Initialise the compatibility check data */
+	RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+	RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInit->sRGXCompChecks.sHWBVNC);
+	
+	{
+		/* Below line is to make sure (compilation time check) that 
+				RGX_BVNC_KM_V_ST fits into RGXFWIF_COMPCHECKS_BVNC structure */
+		IMG_CHAR _tmp_[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX] = RGX_BVNC_KM_V_ST;
+		_tmp_[0] = '\0';
+	}
+	
+	PDUMPCOMMENT("Dump RGXFW Init data");
+	if (!bEnableSignatureChecks)
+	{
+#if defined(PDUMP)
+		PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)");
+		DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfInitMemDesc,
+							offsetof(RGXFWIF_INIT, asSigBufCtl),
+							sizeof(RGXFWIF_SIGBUF_CTL)*RGXFWIF_DM_MAX,
+							PDUMP_FLAGS_CONTINUOUS);
+#endif
+		psRGXFWInit->asSigBufCtl[RGXFWIF_DM_3D].psBuffer.ui32Addr = 0x0;
+		psRGXFWInit->asSigBufCtl[RGXFWIF_DM_TA].psBuffer.ui32Addr = 0x0;
+	}
+	
+	for (dm = 0; dm < RGXFWIF_DM_MAX; dm++)
+	{
+		psDevInfo->psRGXFWIfTraceBuf->aui16HwrDmLockedUpCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui16HwrDmOverranCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui16HwrDmRecoveredCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->aui16HwrDmFalseDetectCount[dm] = 0;
+		psDevInfo->psRGXFWIfTraceBuf->apsHwrDmFWCommonContext[dm].ui32Addr = 0;
+	}
+	
+	/*
+	 * BIF Tiling configuration
+	 */
+
+	psRGXFWInit->sBifTilingCfg[0].uiBase = RGX_BIF_TILING_HEAP_1_BASE;
+	psRGXFWInit->sBifTilingCfg[0].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInit->sBifTilingCfg[0].uiXStride = pui32BIFTilingXStrides[0];
+	psRGXFWInit->sBifTilingCfg[1].uiBase = RGX_BIF_TILING_HEAP_2_BASE;
+	psRGXFWInit->sBifTilingCfg[1].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInit->sBifTilingCfg[1].uiXStride = pui32BIFTilingXStrides[1];
+	psRGXFWInit->sBifTilingCfg[2].uiBase = RGX_BIF_TILING_HEAP_3_BASE;
+	psRGXFWInit->sBifTilingCfg[2].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInit->sBifTilingCfg[2].uiXStride = pui32BIFTilingXStrides[2];
+	psRGXFWInit->sBifTilingCfg[3].uiBase = RGX_BIF_TILING_HEAP_4_BASE;
+	psRGXFWInit->sBifTilingCfg[3].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+	psRGXFWInit->sBifTilingCfg[3].uiXStride = pui32BIFTilingXStrides[3];
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Dump rgxfw hwperfctl structure");
+	DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+	                        0,
+							ui32HWPerfCountersDataSize,
+	                        PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("Dump rgxfw HW Perf Info structure");
+	DevmemPDumpLoadMem (psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+						0,
+						psDevInfo->ui32RGXFWIfHWPerfBufSize,
+						PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("Dump rgxfw trace structure");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+						0,
+						sizeof(RGXFWIF_TRACEBUF),
+						PDUMP_FLAGS_CONTINUOUS);
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("Dump rgxfw register configuration buffer");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfRegCfgMemDesc,
+						0,
+						sizeof(RGXFWIF_REG_CFG),
+						PDUMP_FLAGS_CONTINUOUS);
+#endif
+	PDUMPCOMMENT("Dump rgxfw init structure");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfInitMemDesc,
+						0,
+						sizeof(RGXFWIF_INIT),
+						PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Dump rgxfw coremem data store");
+	DevmemPDumpLoadMem(	psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+						0,
+						RGX_META_COREMEM_DATA_SIZE,
+						PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("RTCONF: run-time configuration");
+
+	
+	/* Dump the config options so they can be edited.
+	 * 
+	 * FIXME: Need new DevmemPDumpWRW API which writes a WRW to load ui32ConfigFlags
+	 */
+	PDUMPCOMMENT("(Set the FW config options here)");
+	PDUMPCOMMENT("( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_TA_EN);
+	PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_3D_EN);
+	PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_CDM_EN);
+	PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+	PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+	PDUMPCOMMENT("( Reserved (do not set): 0x%08x)", RGXFWIF_INICFG_RSVD);
+	PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+	PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+	PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN);
+	PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+	PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+	PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN);
+#if defined(RGX_FEATURE_VDM_OBJECT_LEVEL_LLS)
+	PDUMPCOMMENT("( Ctx Switch Object mode Index: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX);
+	PDUMPCOMMENT("( Ctx Switch Object mode Instance: 0x%08x)", RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE);
+	PDUMPCOMMENT("( Ctx Switch Object mode List: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST);
+#endif
+	PDUMPCOMMENT("( Enable SHG Bypass mode: 0x%08x)", RGXFWIF_INICFG_SHG_BYPASS_EN);
+	PDUMPCOMMENT("( Enable RTU Bypass mode: 0x%08x)", RGXFWIF_INICFG_RTU_BYPASS_EN);
+	PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+	PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+	PDUMPCOMMENT("( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+	PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN);
+	PDUMPCOMMENT("( Enable CDM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN);
+	PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+							offsetof(RGXFWIF_INIT, ui32ConfigFlags),
+							psRGXFWInit->ui32ConfigFlags,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	/* 
+	 * Dump the log config so it can be edited.
+	 */
+	PDUMPCOMMENT("(Set the log config here)");
+	PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)");
+	PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+	PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+	PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+	PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+	PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+	PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+	PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+	PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+	PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+	PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+	PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+#if defined(RGX_FEATURE_RAY_TRACING)
+	PDUMPCOMMENT("( RPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RPM);
+#endif
+#if defined(RGX_FEATURE_META_DMA)
+	PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+#endif
+	PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+							offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+							psDevInfo->psRGXFWIfTraceBuf->ui32LogType,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENT("Set the HWPerf Filter config here");
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfInitMemDesc,
+						offsetof(RGXFWIF_INIT, ui64HWPerfFilter),
+						psRGXFWInit->ui64HWPerfFilter,
+						PDUMP_FLAGS_CONTINUOUS);						
+							
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PDUMPCOMMENT("(Number of registers configurations in sidekick)");
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, ui32NumRegsSidekick),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("(Number of registers configurations in rascal/dust)");
+	DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, ui32NumRegsRascalDust),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("(Set registers here, address, value)");
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+	DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+							offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+							0,
+							PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+#endif
+
+	/* We don't need access to the fw init data structure anymore */
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+	psRGXFWInit = IMG_NULL;
+
+	psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+	return PVRSRV_OK;
+
+fail:
+	if (psDevInfo->psRGXFWIfInitMemDesc != IMG_NULL && psRGXFWInit != IMG_NULL)
+	{
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+	}
+	RGXFreeFirmware(psDevInfo);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXFreeFirmware
+
+ @Description
+
+ Frees all the firmware-related allocations
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_VOID RGXFreeFirmware(PVRSRV_RGXDEV_INFO 	*psDevInfo)
+{
+	RGXFWIF_DM	eCCBType;
+	
+	psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+	for (eCCBType = 0; eCCBType < RGXFWIF_DM_MAX; eCCBType++)
+	{
+		RGXFreeKernelCCB(psDevInfo, eCCBType);
+		RGXFreeFirmwareCCB(psDevInfo, eCCBType);
+	}
+
+#if defined(RGXFW_ALIGNCHECKS)
+	if (psDevInfo->psRGXFWAlignChecksMemDesc)
+	{
+		RGXFWFreeAlignChecks(psDevInfo);
+	}
+#endif
+
+	if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWSigTAChecksMemDesc);
+		psDevInfo->psRGXFWSigTAChecksMemDesc = IMG_NULL;
+	}
+
+	if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWSig3DChecksMemDesc);
+		psDevInfo->psRGXFWSig3DChecksMemDesc = IMG_NULL;
+	}
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	if (psDevInfo->psRGXFWSigRTChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWSigRTChecksMemDesc);
+		psDevInfo->psRGXFWSigRTChecksMemDesc = IMG_NULL;
+	}
+	
+	if (psDevInfo->psRGXFWSigSHChecksMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWSigSHChecksMemDesc);
+		psDevInfo->psRGXFWSigSHChecksMemDesc = IMG_NULL;
+	}
+#endif
+
+#if defined(FIX_HW_BRN_37200)
+	if (psDevInfo->psRGXFWHWBRN37200MemDesc)
+	{
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWHWBRN37200MemDesc);
+		DevmemFree(psDevInfo->psRGXFWHWBRN37200MemDesc);
+		psDevInfo->psRGXFWHWBRN37200MemDesc = IMG_NULL;
+	}
+#endif
+
+	RGXSetupFaultReadRegisterRollback(psDevInfo);
+
+	if (psDevInfo->psPowSyncPrim != IMG_NULL)
+	{
+		SyncPrimFree(psDevInfo->psPowSyncPrim);
+		psDevInfo->psPowSyncPrim = IMG_NULL;
+	}
+	
+	if (psDevInfo->hSyncPrimContext != 0)
+	{
+		SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+		psDevInfo->hSyncPrimContext = 0;
+	}
+
+	if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfGpuUtilFWCb != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+			psDevInfo->psRGXFWIfGpuUtilFWCb = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+		psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = IMG_NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfRuntimeCfg != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+			psDevInfo->psRGXFWIfRuntimeCfg = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+		psDevInfo->psRGXFWIfRuntimeCfgMemDesc = IMG_NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfHWRInfoBuf != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+			psDevInfo->psRGXFWIfHWRInfoBuf = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+		psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = IMG_NULL;
+	}
+
+	RGXHWPerfDeinit();
+	
+	if (psDevInfo->psRGXFWIfHWPerfBufMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfHWPerfBuf != IMG_NULL)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+			psDevInfo->psRGXFWIfHWPerfBuf = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+		psDevInfo->psRGXFWIfHWPerfBufMemDesc = IMG_NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+		psDevInfo->psRGXFWIfCorememDataStoreMemDesc = IMG_NULL;
+	}
+
+	if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+	{
+		if (psDevInfo->psRGXFWIfTraceBuf != IMG_NULL)
+		{    
+			DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+			psDevInfo->psRGXFWIfTraceBuf = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+		psDevInfo->psRGXFWIfTraceBufCtlMemDesc = IMG_NULL;
+	}
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWIfRegCfgMemDesc);
+		psDevInfo->psRGXFWIfRegCfgMemDesc = IMG_NULL;
+	}
+#endif
+	if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+		if (DevmemIsValidExportCookie(&psDevInfo->sRGXFWHWPerfCountersExportCookie))
+		{
+			/* if the export cookie is valid, the init sequence failed */
+			PVR_DPF((PVR_DBG_ERROR, "RGXFreeFirmware: FW HWPerf Export cookie"
+			         "still valid (should have been unexported at init time)"));
+			DevmemUnexport(psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+			               &psDevInfo->sRGXFWHWPerfCountersExportCookie);
+		}
+		DevmemFwFree(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+		psDevInfo->psRGXFWIfHWPerfCountersMemDesc = IMG_NULL;
+	}
+#if defined(RGX_FEATURE_SLC_VIVT)
+	_FreeSLC3Fence(psDevInfo);
+#endif
+
+	if (psDevInfo->psRGXFWIfInitMemDesc)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWIfInitMemDesc);
+		psDevInfo->psRGXFWIfInitMemDesc = IMG_NULL;
+	}
+
+	if (psDevInfo->psCompletedMemDesc)
+	{
+		if (psDevInfo->pui32CompletedById)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc);
+			psDevInfo->pui32CompletedById = IMG_NULL;
+		}
+		DevmemFwFree(psDevInfo->psCompletedMemDesc);
+		psDevInfo->psCompletedMemDesc = IMG_NULL;
+	}
+	if (psDevInfo->psEndTimeMemDesc)
+	{
+		if (psDevInfo->pasEndTimeById)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc);
+			psDevInfo->pasEndTimeById = IMG_NULL;
+		}
+
+		DevmemFwFree(psDevInfo->psEndTimeMemDesc);
+		psDevInfo->psEndTimeMemDesc = IMG_NULL;
+	}
+	if (psDevInfo->psStartTimeMemDesc)
+	{
+		if (psDevInfo->pasStartTimeById)
+		{
+			DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc);
+			psDevInfo->pasStartTimeById = IMG_NULL;
+		}
+
+		DevmemFwFree(psDevInfo->psStartTimeMemDesc);
+		psDevInfo->psStartTimeMemDesc = IMG_NULL;
+	}
+}
+
+
+/******************************************************************************
+ FUNCTION	: RGXStartFirmware
+
+ PURPOSE	: Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS	: psDevInfo
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXStartFirmware(PVRSRV_RGXDEV_INFO 	*psDevInfo)
+{
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Rogue Firmware Slave boot Start");
+	/*
+	 * Run init script.
+	 */
+	eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asInitCommands, RGX_MAX_INIT_COMMANDS, PDUMP_FLAGS_CONTINUOUS, IMG_NULL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXStart: RGXRunScript failed (%d)", eError));
+		return eError;
+	}
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Rogue Firmware startup complete\n");
+	
+	return eError;
+}
+
+
+/******************************************************************************
+ FUNCTION	: RGXAcquireKernelCCBSlot
+
+ PURPOSE	: Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS	: psCCB - the CCB
+			: Address of space if available, IMG_NULL otherwise
+
+ RETURNS	: PVRSRV_ERROR
+******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+											RGXFWIF_CCB_CTL	*psKCCBCtl,
+											IMG_UINT32			*pui32Offset)
+{
+	IMG_UINT32	ui32OldWriteOffset, ui32NextWriteOffset;
+
+	ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+	ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+	/* Note: The MTS can queue up to 255 kicks (254 pending kicks and 1 executing kick)
+	 * Hence the kernel CCB should not queue more 254 commands
+	 */
+	PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+	
+#if defined(PDUMP)
+	/* Wait for sufficient CCB space to become available */
+	PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset);
+	DevmemPDumpCBP(psKCCBCtrlMemDesc,
+	               offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+	               ui32NextWriteOffset,
+	               1,
+	               (psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+
+		if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+		{
+			*pui32Offset = ui32NextWriteOffset;
+			return PVRSRV_OK;
+		}
+		{
+			/* 
+			 * The following sanity check doesn't impact performance,
+			 * since the CPU has to wait for the GPU anyway (full kernel CCB).
+			 */
+			if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+			{
+				return PVRSRV_ERROR_KERNEL_CCB_FULL;
+			}
+		}
+
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	/* Time out on waiting for CCB space */
+	return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+										 RGXFWIF_DM			eKCCBType,
+										 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+										 IMG_UINT32			ui32CmdSize,
+										 IMG_BOOL			bPDumpContinuous)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+	/* Ensure Rogue is powered up before kicking MTS */
+	eError = PVRSRVPowerLock();
+
+	if (eError != PVRSRV_OK) 
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK) 
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to transition Rogue to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	eError = RGXSendCommandRaw(psDevInfo, eKCCBType,  psKCCBCmd, ui32CmdSize, bPDumpContinuous?PDUMP_FLAGS_CONTINUOUS:0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandWithPowLock: failed to schedule command (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+#if defined(DEBUG)
+		/* PVRSRVDebugRequest must be called without powerlock */
+		PVRSRVPowerUnlock();
+		PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+		goto _PVRSRVPowerLock_Exit;
+#endif
+	}
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+	PVRSRVPowerUnlock();
+
+_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								 RGXFWIF_DM			eKCCBType,
+								 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								 IMG_UINT32			ui32CmdSize,
+								 PDUMP_FLAGS_T		uiPdumpFlags)
+{
+	PVRSRV_ERROR		eError;
+	RGXFWIF_CCB_CTL		*psKCCBCtl = psDevInfo->apsKernelCCBCtl[eKCCBType];
+	IMG_UINT8			*pui8KCCB = psDevInfo->apsKernelCCB[eKCCBType];
+	IMG_UINT32			ui32NewWriteOffset;
+	IMG_UINT32			ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+#if !defined(PDUMP)
+	PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+#endif
+#if defined(PDUMP)
+	IMG_BOOL bIsInCaptureRange;
+	IMG_BOOL bPdumpEnabled;
+	IMG_BOOL bPDumpContinuous = (uiPdumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0;
+	IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS();
+
+	PDumpIsCaptureFrameKM(&bIsInCaptureRange);
+	bPdumpEnabled = (bIsInCaptureRange || bPDumpContinuous) && !bPDumpPowTrans;
+
+	/* in capture range */
+	if (bPdumpEnabled)
+	{
+		if (!psDevInfo->abDumpedKCCBCtlAlready[eKCCBType])
+		{
+			/* entering capture range */
+			psDevInfo->abDumpedKCCBCtlAlready[eKCCBType] = IMG_TRUE;
+
+			/* wait for firmware to catch up */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXSendCommandRaw: waiting on fw to catch-up. DM: %d, roff: %d, woff: %d",
+						eKCCBType, psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+			PVRSRVPollForValueKM(&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF);
+
+			/* Dump Init state of Kernel CCB control (read and write offset) */
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control(%d), roff: %d, woff: %d", eKCCBType, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+			DevmemPDumpLoadMem(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType],
+					0,
+					sizeof(RGXFWIF_CCB_CTL),
+					PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+#endif
+
+	PVR_ASSERT(ui32CmdSize == psKCCBCtl->ui32CmdSize);
+	if (!OSLockIsLocked(PVRSRVGetPVRSRVData()->hPowerLock))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw called without power lock held!"));
+		PVR_ASSERT(OSLockIsLocked(PVRSRVGetPVRSRVData()->hPowerLock));
+	}
+
+	/*
+	 * Acquire a slot in the CCB.
+	 */ 
+	eError = RGXAcquireKernelCCBSlot(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType], psKCCBCtl, &ui32NewWriteOffset);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw failed to acquire CCB slot. Type:%u Error:%u",
+				eKCCBType, eError));
+		goto _RGXSendCommandRaw_Exit;
+	}
+	
+	/*
+	 * Copy the command into the CCB.
+	 */
+	OSMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+			  psKCCBCmd, psKCCBCtl->ui32CmdSize);
+
+	/* ensure kCCB data is written before the offsets */
+	OSWriteMemoryBarrier();
+
+	/* Move past the current command */
+	psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+
+
+#if defined(PDUMP)
+	/* in capture range */
+	if (bPdumpEnabled)
+	{
+		/* Dump new Kernel CCB content */
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB(%d) cmd, woff = %d", eKCCBType, ui32OldWriteOffset);
+		DevmemPDumpLoadMem(psDevInfo->apsKernelCCBMemDesc[eKCCBType],
+				ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+				psKCCBCtl->ui32CmdSize,
+				PDUMP_FLAGS_CONTINUOUS);
+
+		/* Dump new kernel CCB write offset */
+		PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl(%d) woff: %d", eKCCBType, ui32NewWriteOffset);
+		DevmemPDumpLoadMem(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType],
+							   offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+							   sizeof(IMG_UINT32),
+							   uiPdumpFlags);
+	}
+
+	/* out of capture range */
+	if (!bPdumpEnabled)
+	{
+		eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset, eKCCBType);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandRaw: problem draining kCCB (%d)", eError));
+			goto _RGXSendCommandRaw_Exit;
+		}
+	}
+#endif
+
+
+	PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB %d", eKCCBType);
+	/*
+	 * Kick the MTS to schedule the firmware.
+	 */
+	{
+		IMG_UINT32	ui32MTSRegVal = (eKCCBType & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+		
+		__MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+
+		PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, ui32MTSRegVal, uiPdumpFlags);
+	}
+	
+#if defined (NO_HARDWARE)
+	/* keep the roff updated because fw isn't there to update it */
+	psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+_RGXSendCommandRaw_Exit:
+	return eError;
+}
+
+IMG_VOID RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+/*!
+******************************************************************************
+
+ @Function	_RGXScheduleProcessQueuesMISR
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+				the queue for all the DMs)
+******************************************************************************/
+static IMG_VOID _RGXScheduleProcessQueuesMISR(IMG_VOID *pvData)
+{
+	PVRSRV_DEVICE_NODE     *psDeviceNode = pvData;
+	PVRSRV_RGXDEV_INFO     *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_DM			   eDM;
+	PVRSRV_ERROR		   eError;
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	/* We don't need to acquire the BridgeLock as this power transition won't
+	   send a command to the FW */
+	eError = PVRSRVPowerLock();
+	if (eError != PVRSRV_OK) 
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		return;
+	}
+
+	/* Check whether it's worth waking up the GPU */
+	eError = PVRSRVGetDevicePowerState(psDeviceNode->sDevId.ui32DeviceIndex, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		RGXFWIF_GPU_UTIL_FWCB  *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+		IMG_BOOL               bGPUHasWorkWaiting;
+
+		bGPUHasWorkWaiting =
+		    (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+		if (!bGPUHasWorkWaiting)
+		{
+			/* all queues are empty, don't wake up the GPU */
+			PVRSRVPowerUnlock();
+			return;
+		}
+	}
+
+	PDUMPPOWCMDSTART();
+	/* wake up the GPU */
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK) 
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to transition Rogue to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		PVRSRVPowerUnlock();
+		return;
+	}
+
+	/* uncounted kick for all DMs */
+	for (eDM = RGXFWIF_HWDM_MIN; eDM < RGXFWIF_HWDM_MAX; eDM++)
+	{
+		IMG_UINT32	ui32MTSRegVal = (eDM & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+
+		__MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+	}
+
+	PVRSRVPowerUnlock();
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	return OSInstallMISR(phMISR,
+	                     _RGXScheduleProcessQueuesMISR,
+	                     psDeviceNode);
+}
+
+typedef struct _DEVMEM_COMMON_CONTEXT_LOOKUP_
+{
+	IMG_UINT32                 ui32ContextID;
+	RGX_SERVER_COMMON_CONTEXT  *psServerCommonContext;
+} DEVMEM_COMMON_CONTEXT_LOOKUP;
+
+
+static IMG_BOOL _FindServerCommonContext(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	DEVMEM_COMMON_CONTEXT_LOOKUP  *psRefLookUp = (DEVMEM_COMMON_CONTEXT_LOOKUP *)pvCallbackData;
+	RGX_SERVER_COMMON_CONTEXT     *psServerCommonContext;
+
+	psServerCommonContext = IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+	if (psServerCommonContext->ui32ContextID == psRefLookUp->ui32ContextID)
+	{
+		psRefLookUp->psServerCommonContext = psServerCommonContext;
+		return IMG_FALSE;
+	}
+	else
+	{
+		return IMG_TRUE;
+	}
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXScheduleCommand
+
+ @Description - Submits a CCB command and kicks the firmware but first schedules
+                any commands which have to happen before handle  
+
+ @Input psDevInfo - pointer to device info
+ @Input eKCCBType - see RGXFWIF_CMD_*
+ @Input pvKCCBCmd - kernel CCB command
+ @Input ui32CmdSize -
+ @Input bPDumpContinuous - TRUE if the pdump flags should be continuous
+
+
+ @Return ui32Error - success or failure
+
+******************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								RGXFWIF_DM			eKCCBType,
+								RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								IMG_UINT32			ui32CmdSize,
+								IMG_BOOL			bPDumpContinuous)
+{
+	PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+	PVRSRV_ERROR eError;
+
+	if ((eKCCBType == RGXFWIF_DM_3D) || (eKCCBType == RGXFWIF_DM_2D) || (eKCCBType == RGXFWIF_DM_CDM))
+	{
+		/* This handles the no operation case */
+		OSCPUOperation(psData->uiCacheOp);
+		psData->uiCacheOp = PVRSRV_CACHE_OP_NONE;
+	}
+
+	eError = RGXPreKickCacheCommand(psDevInfo);
+	if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+	eError = RGXSendCommandWithPowLock(psDevInfo, eKCCBType, psKCCBCmd, ui32CmdSize, bPDumpContinuous);
+	if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+
+RGXScheduleCommand_exit:
+	return eError;
+}
+
+/*
+ * RGXCheckFirmwareCCBs
+ */
+IMG_VOID RGXCheckFirmwareCCBs(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_FWCCB_CMD 	*psFwCCBCmd;
+	IMG_UINT32 			ui32DMCount;
+
+	for (ui32DMCount = 0; ui32DMCount < RGXFWIF_DM_MAX; ui32DMCount++)
+	{
+		RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->apsFirmwareCCBCtl[ui32DMCount];
+		IMG_UINT8 		*psFWCCB = psDevInfo->apsFirmwareCCB[ui32DMCount];
+
+		while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+		{
+			/* Point to the next command */
+			psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+			switch(psFwCCBCmd->eCmdType)
+			{
+				case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+				{
+					if (psDevInfo->bPDPEnabled)
+					{
+						PDUMP_PANIC(RGX, ZSBUFFER_BACKING, "Request to add backing to ZSBuffer");
+					}
+					RGXProcessRequestZSBufferBacking(psDevInfo,
+													psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+					break;
+				}
+
+				case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+				{
+					if (psDevInfo->bPDPEnabled)
+					{
+						PDUMP_PANIC(RGX, ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer");
+					}
+					RGXProcessRequestZSBufferUnbacking(psDevInfo,
+													psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+					break;
+				}
+
+				case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+				{
+					if (psDevInfo->bPDPEnabled)
+					{
+						PDUMP_PANIC(RGX, FREELIST_GROW, "Request to grow the free list");
+					}
+					RGXProcessRequestGrow(psDevInfo,
+										psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+					break;
+				}
+
+				case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+				{
+					if (psDevInfo->bPDPEnabled)
+					{
+						PDUMP_PANIC(RGX, FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists");
+					}
+
+					PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d/%d) for %d freelists",
+					        psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+					        psDevInfo->psRGXFWIfTraceBuf->ui32HwrCounter+1,
+					        psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+
+					RGXProcessRequestFreelistsReconstruction(psDevInfo, ui32DMCount,
+										psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+										psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+					break;
+				}
+
+				case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+				{
+					DEVMEM_COMMON_CONTEXT_LOOKUP  sLookUp;
+
+					sLookUp.ui32ContextID         = psFwCCBCmd->uCmdData.sCmdContextResetNotification.ui32ServerCommonContextID;
+					sLookUp.psServerCommonContext = IMG_NULL;
+					
+					dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, _FindServerCommonContext, (IMG_PVOID)&sLookUp);
+
+					PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Context 0x%p reset (ID=0x%08x, Reason=%d)",
+					        sLookUp.psServerCommonContext,
+					        (IMG_UINT32)(psFwCCBCmd->uCmdData.sCmdContextResetNotification.ui32ServerCommonContextID),
+					        (IMG_UINT32)(psFwCCBCmd->uCmdData.sCmdContextResetNotification.eResetReason)));
+
+					if (sLookUp.psServerCommonContext != IMG_NULL)
+					{
+						sLookUp.psServerCommonContext->eLastResetReason = psFwCCBCmd->uCmdData.sCmdContextResetNotification.eResetReason;
+					}
+					break;
+				}
+
+				case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+				{
+					RGXDumpDebugInfo(IMG_NULL,psDevInfo);
+					break;
+				}
+
+				case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+				{
+                    IMG_PID	   pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+                    IMG_INT32  i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+                    switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+					{
+						case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+						{
+                            PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+							break;
+						}
+						case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+						{
+							PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+							break;
+						}
+						case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+						{
+							PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+							break;
+						}
+						case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+						{
+							PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+							break;
+						}
+						case RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES:
+						{
+							PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+							break;
+						}
+						case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+						{
+							PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+							break;
+						}
+					}
+					break;
+				}
+				default:
+					PVR_ASSERT(IMG_FALSE);
+			}
+
+			/* Update read offset */
+			psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+		}
+	}
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+ */ 
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC	*psFWFrameworkMemDesc,
+										   IMG_PBYTE		pbyGPUFRegisterList,
+										   IMG_UINT32		ui32FrameworkRegisterSize)
+{
+	PVRSRV_ERROR	eError;
+	RGXFWIF_RF_REGISTERS	*psRFReg;
+
+	eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+                                      (IMG_VOID **)&psRFReg);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkCopyCommand: Failed to map firmware render context state (%u)",
+				eError));
+		return eError;
+	}
+
+	OSMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+	
+	/* Release the CPU mapping */
+	DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+	/*
+	 * Dump the FW framework buffer
+	 */
+	PDUMPCOMMENT("Dump FWFramework buffer");
+	DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+										DEVMEM_MEMDESC		**ppsFWFrameworkMemDesc,
+										IMG_UINT32			ui32FrameworkCommandSize)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	
+	/*
+		Allocate device memory for the firmware GPU framework state.
+		Sufficient info to kick one or more DMs should be contained in this buffer
+	*/
+	PDUMPCOMMENT("Allocate Rogue firmware framework state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  ui32FrameworkCommandSize,
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FirmwareGPUFrameworkState",
+							  ppsFWFrameworkMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkContextKM: Failed to allocate firmware framework state (%u)",
+				eError));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO	*psDevInfo,
+							RGXFWIF_DM eDM,
+							PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+							IMG_BOOL bPDumpContinuous)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_KCCB_CMD	sCmdSyncPrim;
+
+	/* Ensure Rogue is powered up before kicking MTS */
+	eError = PVRSRVPowerLock();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXWaitForFWOp: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXWaitForFWOp: failed to transition Rogue to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+
+
+
+	/* Setup sync primitive */
+	SyncPrimSet(psSyncPrim, 0);
+
+	/* prepare a sync command */
+	sCmdSyncPrim.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+	sCmdSyncPrim.uCmdData.sSyncData.uiSyncObjDevVAddr = SyncPrimGetFirmwareAddr(psSyncPrim);
+	sCmdSyncPrim.uCmdData.sSyncData.uiUpdateVal = 1;
+
+	PDUMPCOMMENT("RGXWaitForFWOp: Submit Kernel SyncPrim [0x%08x] to DM %d ", sCmdSyncPrim.uCmdData.sSyncData.uiSyncObjDevVAddr, eDM);
+
+	/* submit the sync primitive to the kernel CCB */
+	eError = RGXSendCommandRaw(psDevInfo,
+								eDM,
+								&sCmdSyncPrim,
+								sizeof(RGXFWIF_KCCB_CMD),
+								bPDumpContinuous  ? PDUMP_FLAGS_CONTINUOUS:0);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXScheduleCommandAndWait: Failed to schedule Kernel SyncPrim with error (%u)", eError));
+		goto _RGXSendCommandRaw_Exit;
+	}
+
+	/* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXScheduleCommandAndWait: Poll for Kernel SyncPrim [0x%08x] on DM %d ", sCmdSyncPrim.uCmdData.sSyncData.uiSyncObjDevVAddr, eDM);
+
+	SyncPrimPDumpPol(psSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS:0);
+#endif
+
+	{
+		RGXFWIF_CCB_CTL  *psKCCBCtl = psDevInfo->apsKernelCCBCtl[eDM];
+		IMG_UINT32       ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+												   psKCCBCtl->ui32WriteOffset -
+												   psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+		IMG_UINT32       ui32MaxRetries;
+
+		for (ui32MaxRetries = (ui32CurrentQueueLength + 1) * 3;
+			 ui32MaxRetries > 0;
+			 ui32MaxRetries--)
+		{
+			eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, 1, 0xffffffff);
+
+			if (eError != PVRSRV_ERROR_TIMEOUT)
+			{
+				break;
+			}
+		}
+
+		if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXScheduleCommandAndWait: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information."));
+			PVRSRVPowerUnlock();
+
+			PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX,IMG_NULL);
+			PVR_ASSERT(eError != PVRSRV_ERROR_TIMEOUT);
+			goto _PVRSRVDebugRequest_Exit;
+		}
+	}
+
+_RGXSendCommandRaw_Exit:
+_PVRSRVSetDevicePowerStateKM_Exit:
+
+	PVRSRVPowerUnlock();
+
+_PVRSRVDebugRequest_Exit:
+_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+									   RGXFWIF_DM			eDM,
+									   RGXFWIF_KCCB_CMD		*psKCCBCmd,
+									   IMG_UINT32			ui32CmdSize,
+									   RGXFWIF_CLEANUP_TYPE	eCleanupType,
+									   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+									   IMG_BOOL				bPDumpContinuous)
+{
+	PVRSRV_ERROR eError;
+
+	psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+
+	psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+	psKCCBCmd->uCmdData.sCleanupData.uiSyncObjDevVAddr = SyncPrimGetFirmwareAddr(psSyncPrim);
+
+	SyncPrimSet(psSyncPrim, 0);
+
+	/*
+		Send the cleanup request to the firmware. If the resource is still busy
+		the firmware will tell us and we'll drop out with a retry.
+	*/
+	eError = RGXScheduleCommand(psDevInfo,
+								eDM,
+								psKCCBCmd,
+								ui32CmdSize,
+								bPDumpContinuous);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_command;
+	}
+
+	/* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+	PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command");
+	SyncPrimPDumpPol(psSyncPrim,
+					RGXFWIF_CLEANUP_RUN,
+					RGXFWIF_CLEANUP_RUN,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS:0);
+
+	/*
+	 * The cleanup request to the firmware will tell us if a given resource is busy or not.
+	 * If the RGXFWIF_CLEANUP_BUSY flag is set, this means that the resource is still in use.
+	 * In this case we return a PVRSRV_ERROR_RETRY error to the client drivers and they will
+	 * re-issue the cleanup request until it succeed.
+	 *
+	 * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+	 * that cleanup requests are only submitted if the resource is unused.
+	 * If this is not the case, the following poll will block infinitely, making sure
+	 * the issue doesn't go unnoticed.
+	 */
+	PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+					eDM,
+					psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+					psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+	SyncPrimPDumpPol(psSyncPrim,
+					0,
+					RGXFWIF_CLEANUP_BUSY,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS:0);
+#endif
+
+	{
+		RGXFWIF_CCB_CTL  *psKCCBCtl = psDevInfo->apsKernelCCBCtl[eDM];
+		IMG_UINT32       ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+		                                           psKCCBCtl->ui32WriteOffset -
+		                                           psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+		IMG_UINT32       ui32MaxRetries;
+
+		for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+			 ui32MaxRetries > 0;
+			 ui32MaxRetries--)
+		{
+			eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, RGXFWIF_CLEANUP_RUN, RGXFWIF_CLEANUP_RUN);
+
+			if (eError != PVRSRV_ERROR_TIMEOUT)
+			{
+				break;
+			}
+		}
+
+		/*
+			If the firmware hasn't got back to us in a timely manner
+			then bail and let the caller retry the command.
+		*/
+		if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_DPF((PVR_DBG_WARNING,"RGXScheduleCleanupCommand: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information."));
+
+			eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+			    PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX,IMG_NULL);
+#endif
+			goto fail_poll;
+		}
+		else if (eError != PVRSRV_OK)
+		{
+			goto fail_poll;
+		}
+	}
+
+	/*
+		If the command has was run but a resource was busy, then the request
+		will need to be retried.
+	*/
+	if (*psSyncPrim->pui32LinAddr & RGXFWIF_CLEANUP_BUSY)
+	{
+		eError = PVRSRV_ERROR_RETRY;
+		goto fail_requestbusy;
+	}
+
+	return PVRSRV_OK;
+
+fail_requestbusy:
+fail_poll:
+fail_command:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	RGXRequestCommonContextCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr,
+											  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+											  RGXFWIF_DM eDM)
+{
+	RGXFWIF_KCCB_CMD			sRCCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]", eDM, psFWCommonContextFWAddr.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+									   eDM,
+									   &sRCCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+									   psSyncPrim,
+									   IMG_FALSE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRequestCommonContextCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+ * RGXRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+										 PRGXFWIF_HWRTDATA psHWRTData,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+										 RGXFWIF_DM eDM)
+{
+	RGXFWIF_KCCB_CMD			sHWRTDataCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("HW RTData cleanup Request DM%d [HWRTData = 0x%08x]", eDM, psHWRTData.ui32Addr);
+
+	sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+									   eDM,
+									   &sHWRTDataCleanUpCmd,
+									   sizeof(sHWRTDataCleanUpCmd),
+									   RGXFWIF_CLEANUP_HWRTDATA,
+									   psSync,
+									   IMG_FALSE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRequestHWRTDataCleanUp: Failed to schedule a HWRTData cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_FREELIST psFWFreeList,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sFLCleanUpCmd = {0};
+	PVRSRV_ERROR 				eError;
+
+	PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+									   RGXFWIF_DM_GP,
+									   &sFLCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_FREELIST,
+									   psSync,
+									   IMG_FALSE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestZSBufferCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_ZSBUFFER psFWZSBuffer,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sZSBufferCleanUpCmd = {0};
+	PVRSRV_ERROR 				eError;
+
+	PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+									   RGXFWIF_DM_3D,
+									   &sZSBufferCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_ZSBUFFER,
+									   psSync,
+									   IMG_FALSE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestZSBufferCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+											 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+											 RGXFWIF_DM eDM)
+{
+	RGXFWIF_KCCB_CMD			sHWFrameDataCleanUpCmd = {0};
+	PVRSRV_ERROR				eError;
+
+	PDUMPCOMMENT("HW FrameData cleanup Request DM%d [HWFrameData = 0x%08x]", eDM, psHWFrameData.ui32Addr);
+
+	sHWFrameDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWFrameData = psHWFrameData;
+
+	eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+									   eDM,
+									   &sHWFrameDataCleanUpCmd,
+									   sizeof(sHWFrameDataCleanUpCmd),
+									   RGXFWIF_CLEANUP_HWFRAMEDATA,
+									   psSync,
+									   IMG_FALSE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRayFrameDataCleanUp: Failed to schedule a HWFrameData cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+
+/*
+	RGXFWRequestRPMFreeListCleanUp
+*/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+											PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+											PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	RGXFWIF_KCCB_CMD			sFLCleanUpCmd = {0};
+	PVRSRV_ERROR 				eError;
+
+	PDUMPCOMMENT("RPM Free list cleanup Request [RPM FreeList = 0x%08x]", psFWRPMFreeList.ui32Addr);
+
+	/* Setup our command data, the cleanup call will fill in the rest */
+	sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psRPMFreelist = psFWRPMFreeList;
+
+	/* Request cleanup of the firmware resource */
+	eError = RGXScheduleCleanupCommand(psDevInfo,
+									   RGXFWIF_DM_GP,
+									   &sFLCleanUpCmd,
+									   sizeof(RGXFWIF_KCCB_CMD),
+									   RGXFWIF_CLEANUP_RPM_FREELIST,
+									   psSync,
+									   IMG_FALSE);
+
+	if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRPMFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+	}
+
+	return eError;
+}
+#endif
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+								CONNECTION_DATA *psConnection,
+								PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32Priority,
+								RGXFWIF_DM eDM)
+{
+	IMG_UINT32				ui32CmdSize;
+	IMG_UINT8				*pui8CmdPtr;
+	RGXFWIF_KCCB_CMD		sPriorityCmd;
+	RGXFWIF_CCB_CMD_HEADER	*psCmdHeader;	
+	RGXFWIF_CMD_PRIORITY	*psCmd;
+	IMG_UINT32				ui32BeforeWOff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext));
+	IMG_BOOL				bKickCMD = IMG_TRUE;
+	PVRSRV_ERROR			eError;
+
+	/*
+		Get space for command
+	*/
+	ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+	eError = RGXAcquireCCB(FWCommonContextGetClientCCB(psContext),
+						   ui32CmdSize,
+						   (IMG_PVOID *) &pui8CmdPtr,
+						   IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		if (ui32BeforeWOff != RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext)))
+		{
+			bKickCMD = IMG_FALSE;
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire client CCB", __FUNCTION__));
+			goto fail_ccbacquire;
+		}
+	}
+
+	if (bKickCMD)
+	{
+		/*
+			Write the command header and command
+		*/
+		psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+		psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+		psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+		pui8CmdPtr += sizeof(*psCmdHeader);
+		
+		psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+		psCmd->ui32Priority = ui32Priority;
+		pui8CmdPtr += sizeof(*psCmd);
+	}
+
+	/*
+		We should reserved space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	if (bKickCMD)
+	{
+		/*
+			Submit the command
+		*/
+		RGXReleaseCCB(FWCommonContextGetClientCCB(psContext),
+					  ui32CmdSize,
+					  IMG_TRUE);
+	
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __FUNCTION__));
+			return eError;
+		}
+	}
+
+	/* Construct the priority command. */
+	sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+	sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext));
+	sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+									eDM,
+									&sPriorityCmd,
+									sizeof(sPriorityCmd),
+									IMG_TRUE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError));
+	}
+
+	return PVRSRV_OK;
+
+fail_ccbacquire:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	RGXReadMETAAddr
+*/
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO	*psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+	IMG_UINT8 *pui8RegBase = (IMG_UINT8*)psDevInfo->pvRegsBaseKM;
+	IMG_UINT32 ui32Value;
+
+	/* Wait for Slave Port to be Ready */
+	if (PVRSRVPollForValueKM(
+	        (IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Issue the Read */
+	OSWriteHWReg32(
+	    psDevInfo->pvRegsBaseKM,
+	    RGX_CR_META_SP_MSLVCTRL0,
+	    ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+	/* Wait for Slave Port to be Ready: read complete */
+	if (PVRSRVPollForValueKM(
+	        (IMG_UINT32*) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+	        RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	/* Read the value */
+	ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX);
+
+	*pui32Value = ui32Value;
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	RGXUpdateHealthStatus
+*/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed)
+{
+	PVRSRV_DATA*                 psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_HEALTH_STATUS  eNewStatus   = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	PVRSRV_DEVICE_HEALTH_REASON  eNewReason   = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+	PVRSRV_RGXDEV_INFO*  psDevInfo;
+	RGXFWIF_TRACEBUF*  psRGXFWIfTraceBufCtl;
+	IMG_UINT32  ui32DMCount, ui32ThreadCount;
+	IMG_BOOL  bKCCBCmdsWaiting;
+	
+	PVR_ASSERT(psDevNode != NULL);
+	psDevInfo = psDevNode->pvDevice;
+	psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	
+	/* If the firmware is not initialised, there is not much point continuing! */
+	if (!psDevInfo->bFirmwareInitialised  ||  psDevInfo->pvRegsBaseKM == IMG_NULL  ||
+	    psDevInfo->psDeviceNode == IMG_NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* If Rogue is not powered on, don't continue 
+	   (there is a race condition where PVRSRVIsDevicePowered returns TRUE when the GPU is actually powering down. 
+	   That's not a problem as this function does not touch the HW except for the RGXScheduleCommand function,
+	   which is already powerlock safe. The worst thing that could happen is that Rogue might power back up
+	   but the chances of that are very low */
+	if (!PVRSRVIsDevicePowered(psDevNode->sDevId.ui32DeviceIndex))
+	{
+		return PVRSRV_OK;
+	}
+	
+	/* If this is a quick update, then include the last current value... */
+	if (!bCheckAfterTimePassed)
+	{
+		eNewStatus = psDevNode->eHealthStatus;
+		eNewReason = psDevNode->eHealthReason;
+	}
+	
+	/*
+	   Firmware thread checks...
+	*/
+	for (ui32ThreadCount = 0;  ui32ThreadCount < RGXFW_THREAD_NUM;  ui32ThreadCount++)
+	{
+		if (psRGXFWIfTraceBufCtl != IMG_NULL)
+		{
+			IMG_CHAR*  pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+			/*
+			Check if the FW has hit an assert...
+			*/
+			if (*pszTraceAssertInfo != '\0')
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXGetDeviceHealthStatus: Firmware thread %d has asserted: %s (%s:%d)",
+				        ui32ThreadCount, pszTraceAssertInfo,
+						psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+						psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+				eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+				goto _RGXUpdateHealthStatus_Exit;
+			}
+
+			/*
+			   Check the threads to see if they are in the same poll locations as last time...
+			*/
+			if (bCheckAfterTimePassed)
+			{	
+				if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] != 0  &&
+					psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] == psDevInfo->aui32CrLastPollAddr[ui32ThreadCount])
+				{
+					PVR_DPF((PVR_DBG_ERROR, "RGXGetDeviceHealthStatus: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)",
+							ui32ThreadCount,
+							((psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), 
+							psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, 
+							psRGXFWIfTraceBufCtl->aui32CrPollMask[ui32ThreadCount]));
+					eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+					eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING;
+					goto _RGXUpdateHealthStatus_Exit;
+				}
+				psDevInfo->aui32CrLastPollAddr[ui32ThreadCount] = psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount];
+			}
+		}
+	}
+
+	/*
+	   Event Object Timeouts check...
+	*/
+	if (psDevInfo->ui32GEOTimeoutsLastTime > 1  &&  psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGetDeviceHealthStatus: Global Event Object Timeouts have risen (from %d to %d)",
+				psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+		eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+		eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+	}
+	psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+	
+	/*
+	   Check the Kernel CCB pointers are valid. If any commands were waiting last time, then check
+	   that some have executed since then.
+	*/
+	bKCCBCmdsWaiting = IMG_FALSE;
+	
+	for (ui32DMCount = 0; ui32DMCount < RGXFWIF_DM_MAX; ui32DMCount++)
+	{
+		RGXFWIF_CCB_CTL *psKCCBCtl = ((PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice)->apsKernelCCBCtl[ui32DMCount];
+
+		if (psKCCBCtl != IMG_NULL)
+		{
+			if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask  ||
+				psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXGetDeviceHealthStatus: KCCB for DM%d has invalid offset (ROFF=%d WOFF=%d)",
+				        ui32DMCount, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+				eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+			}
+
+			if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+			{
+				bKCCBCmdsWaiting = IMG_TRUE;
+			}
+		}
+	}
+
+	if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfTraceBuf != IMG_NULL)
+	{
+		IMG_UINT32  ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted;
+		
+		if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+		{
+			/*
+			   If something was waiting last time then the Firmware has stopped processing commands.
+			*/
+			if (psDevInfo->bKCCBCmdsWaitingLastTime)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXGetDeviceHealthStatus: No KCCB commands executed since check!"));
+				eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+				eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+			}
+		
+			/*
+			   If no commands are currently pending and nothing happened since the last poll, then
+			   schedule a dummy command to ping the firmware so we know it is alive and processing.
+			*/
+			if (!bKCCBCmdsWaiting)
+			{
+				RGXFWIF_KCCB_CMD  sCmpKCCBCmd;
+				PVRSRV_ERROR      eError;
+
+				sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+				eError = RGXScheduleCommand(psDevNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sCmpKCCBCmd,
+											sizeof(sCmpKCCBCmd),
+											IMG_TRUE);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "RGXGetDeviceHealthStatus: Cannot schedule Health Check command! (0x%x)", eError));
+				}
+				else
+				{
+					bKCCBCmdsWaiting = IMG_TRUE;
+				}
+			}
+		}
+
+		psDevInfo->bKCCBCmdsWaitingLastTime     = bKCCBCmdsWaiting;
+		psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+	}
+
+	if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+	{
+		/* Attempt to detect and deal with any stalled client contexts */
+		IMG_BOOL bStalledClient = IMG_FALSE;
+		if (CheckForStalledClientTransferCtxt(psDevInfo))
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Detected stalled client transfer context"));
+			bStalledClient = IMG_TRUE;
+		}
+		if (CheckForStalledClientRenderCtxt(psDevInfo))
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Detected stalled client render context"));
+			bStalledClient = IMG_TRUE;
+		}
+#if !defined(UNDER_WDDM)
+		if (CheckForStalledClientComputeCtxt(psDevInfo))
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Detected stalled client compute context"));
+			bStalledClient = IMG_TRUE;
+		}
+#endif
+#if defined(RGX_FEATURE_RAY_TRACING)
+		if (CheckForStalledClientRayCtxt(psDevInfo))
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Detected stalled client raytrace context"));
+			bStalledClient = IMG_TRUE;
+		}
+#endif
+		/* try the unblock routines only on the transition from OK to stalled */
+		if (!psDevInfo->bStalledClient && bStalledClient)
+		{
+#if defined(SUPPORT_DISPLAY_CLASS)
+			//DCDisplayContextFlush();
+#endif
+		}
+		psDevInfo->bStalledClient = bStalledClient;
+	}
+
+	/*
+	   Finished, save the new status...
+	*/
+_RGXUpdateHealthStatus_Exit:
+	psDevNode->eHealthStatus = eNewStatus;
+	psDevNode->eHealthReason = eNewReason;
+
+	return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext)
+{
+	RGX_CLIENT_CCB 	*psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+
+	return CheckForStalledCCB(psCurrentClientCCB);
+}
+
+IMG_VOID DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+									DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	RGX_CLIENT_CCB 	*psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+	PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext = psCurrentServerCommonContext->sFWCommonContextFWAddr;
+
+	DumpStalledCCBCommand(sFWCommonContext, psCurrentClientCCB, pfnDumpDebugPrintf);
+}
+
+IMG_VOID AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+									IMG_UINT32 *pui32NumCleanupCtl,
+									RGXFWIF_DM eDM,
+									IMG_BOOL bKick,
+									RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+									RGX_ZSBUFFER_DATA              *psZBuffer,
+									RGX_ZSBUFFER_DATA              *psSBuffer)
+{
+	PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+	PVR_ASSERT((eDM == RGXFWIF_DM_TA) || (eDM == RGXFWIF_DM_3D));
+
+	if(bKick)
+	{
+		if(eDM == RGXFWIF_DM_TA)
+		{
+			if(psRTDataCleanup)
+			{
+				PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+				RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+									offsetof(RGXFWIF_HWRTDATA, sTACleanupState),
+								RFW_FWADDR_NOREF_FLAG);
+
+				*(psCleanupCtlWrite++) = psCleanupCtl;
+			}
+		}
+		else
+		{
+			if(psRTDataCleanup)
+			{
+				PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+				RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+									offsetof(RGXFWIF_HWRTDATA, s3DCleanupState),
+								RFW_FWADDR_NOREF_FLAG);
+
+				*(psCleanupCtlWrite++) = psCleanupCtl;
+			}
+
+			if(psZBuffer)
+			{
+				(psCleanupCtlWrite++)->ui32Addr = psZBuffer->sZSBufferFWDevVAddr.ui32Addr +
+								offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+			}
+
+			if(psSBuffer)
+			{
+				(psCleanupCtlWrite++)->ui32Addr = psSBuffer->sZSBufferFWDevVAddr.ui32Addr +
+								offsetof(RGXFWIF_FWZSBUFFER, sCleanupState);
+			}
+		}
+	}
+
+	*pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+
+	PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+	RGXFWIF_HWRINFOBUF	*psHWRInfoBuf;
+	RGXFWIF_TRACEBUF 	*psRGXFWIfTraceBufCtl;
+	IMG_UINT32 			i;
+
+	if(psDevNode->pvDevice == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_DEVINFO;
+	}
+	psDevInfo = psDevNode->pvDevice;
+
+	psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+	psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+	for(i = 0 ; i < RGXFWIF_DM_MAX ; i++)
+	{
+		/* Reset the HWR numbers */
+		psRGXFWIfTraceBufCtl->aui16HwrDmLockedUpCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui16HwrDmFalseDetectCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui16HwrDmRecoveredCount[i] = 0;
+		psRGXFWIfTraceBufCtl->aui16HwrDmOverranCount[i] = 0;
+	}
+
+	for(i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+	{
+		psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+	}
+
+	for(i = 0 ; i < RGXFW_THREAD_NUM ; i++)
+	{
+		psHWRInfoBuf->ui32FirstCrPollAddr[i] = 0;
+		psHWRInfoBuf->ui32FirstCrPollMask[i] = 0;
+	}
+
+	psHWRInfoBuf->ui32WriteIndex = 0;
+	psHWRInfoBuf->ui32DDReqCount = 0;
+
+	return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset, RGXFWIF_DM eKCCBType)
+{
+	RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->apsKernelCCBCtl[eKCCBType];
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if (psDevInfo->abDumpedKCCBCtlAlready[eKCCBType])
+	{
+		/* exiting capture range */
+		psDevInfo->abDumpedKCCBCtlAlready[eKCCBType] = IMG_FALSE;
+
+		/* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER,
+                                      "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+                                      psKCCBCtl,
+                                      ui32WriteOffset,
+                                      ui32WriteOffset);
+		eError = DevmemPDumpDevmemPol32(psDevInfo->apsKernelCCBCtlMemDesc[eKCCBType],
+                                                offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+                                                ui32WriteOffset,
+                                                0xffffffff,
+                                                PDUMP_POLL_OPERATOR_EQUAL,
+                                                PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXPdumpDrainKCCB: problem pdumping POL for kCCBCtl (%d)", eError));
+		}
+	}
+	return eError;
+}
+#endif
+
+/******************************************************************************
+ End of file (rgxfwutils.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxfwutils.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxfwutils.h
new file mode 100644
index 0000000..cd3ca75
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxfwutils.h
@@ -0,0 +1,720 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX firmware utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX firmware utility routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXFWUTILS_H__)
+#define __RGXFWUTILS_H__
+
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+
+
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+											IMG_DEVMEM_SIZE_T uiSize,
+											DEVMEM_FLAGS_T uiFlags,
+						                    IMG_PCHAR pszText,
+											DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	eError = DevmemAllocate(psDevInfo->psFirmwareHeap,
+							uiSize,
+							ROGUE_CACHE_LINE_SIZE,
+							uiFlags,
+							pszText,
+							ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+							   psDevInfo->psFirmwareHeap,
+							   &sTmpDevVAddr);
+	PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+													  IMG_DEVMEM_SIZE_T uiSize,
+													  DEVMEM_FLAGS_T uiFlags,
+									                  IMG_PCHAR pszText,
+													  DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_ERROR eError;
+
+	PVR_DPF_ENTERED;
+
+	eError = DevmemAllocateExportable(IMG_NULL,
+									  (IMG_HANDLE) psDeviceNode,
+									  uiSize,
+									  64,
+									  uiFlags,
+									  pszText,
+									  ppsMemDescPtr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"FW DevmemAllocateExportable failed (%u)", eError));
+		PVR_DPF_RETURN_RC(eError);
+	}
+
+	/*
+		We need to map it so the heap for this allocation
+		is set
+	*/
+	eError = DevmemMapToDevice(*ppsMemDescPtr,
+							   psDevInfo->psFirmwareHeap,
+							   &sTmpDevVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"FW DevmemMapToDevice failed (%u)", eError));
+	}
+	PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static INLINE IMG_VOID DevmemFwFree(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_DPF_ENTERED1(psMemDesc);
+
+	DevmemReleaseDevVirtAddr(psMemDesc);
+	DevmemFree(psMemDesc);
+
+	PVR_DPF_RETURN;
+}
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+    IMG_UINT64  ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+    /*
+     *  In order to avoid having to issue three 32-bit reads to detect the
+     *  lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+     *  in the MSB of the high 32-bit word. If the wrap happens, we just read
+     *  the register again (it will not wrap again so soon).
+     */
+    if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+    {
+        ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+    }
+
+    return ((ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK)	>> RGX_CR_TIMER_VALUE_SHIFT);
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS	(PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+                                 PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) | \
+								 PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+								 PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+								 PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+								 PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+								 PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \
+								 PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+								 PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE		(0)			/*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG		(1U << 0)	/*!< It is safe to immediately release the reference to the pointer, 
+												  otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							     IMG_BOOL				bEnableSignatureChecks,
+							     IMG_UINT32			ui32SignatureChecksBufSize,
+							     IMG_UINT32			ui32HWPerfFWBufSizeKB,
+							     IMG_UINT64			ui64HWPerfFilter,
+							     IMG_UINT32			ui32RGXFWAlignChecksSize,
+							     IMG_UINT32			*pui32RGXFWAlignChecks,
+							     IMG_UINT32			ui32ConfigFlags,
+							     IMG_UINT32			ui32LogType,
+							     IMG_UINT32            ui32NumTilingCfgs,
+							     IMG_UINT32            *pui32BIFTilingXStrides,
+							     IMG_UINT32			ui32FilterMode,
+							     IMG_UINT32			ui32JonesDisableMask,
+							     IMG_UINT32			ui32HWRDebugDumpLimit,
+								 IMG_UINT32			ui32HWPerfCountersDataSize,
+							     RGXFWIF_DEV_VIRTADDR	*psRGXFWInitFWAddr,
+							     RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf);
+
+
+
+IMG_VOID RGXFreeFirmware(PVRSRV_RGXDEV_INFO 	*psDevInfo);
+
+/*************************************************************************/ /*!
+@Function       RGXSetFirmwareAddress
+
+@Description    Sets a pointer in a firmware data structure.
+
+@Input          ppDest		 Address of the pointer to set
+@Input          psSrc		 MemDesc describing the pointer
+@Input          ui32Flags	 Any combination of  RFW_FWADDR_*_FLAG
+
+@Return			IMG_VOID
+*/ /**************************************************************************/
+IMG_VOID RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR	*ppDest,
+							   DEVMEM_MEMDESC		*psSrc,
+							   IMG_UINT32			uiOffset,
+							   IMG_UINT32			ui32Flags);
+
+#if defined(RGX_FEATURE_META_DMA)
+/*************************************************************************/ /*!
+@Function       RGXSetMetaDMAAddress
+
+@Description    Fills a Firmware structure used to setup the Meta DMA with two
+                pointers to the same data, one on 40 bit and one on 32 bit
+                (pointer in the FW memory space).
+
+@Input          ppDest		 	Address of the structure to set
+@Input          psSrcMemDesc	MemDesc describing the pointer
+@Input			psSrcFWDevVAddr Firmware memory space pointer
+
+@Return			IMG_VOID
+*/ /**************************************************************************/
+IMG_VOID RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR		*psDest,
+							  DEVMEM_MEMDESC		*psSrcMemDesc,
+							  RGXFWIF_DEV_VIRTADDR	*psSrcFWDevVAddr,
+							  IMG_UINT32			uiOffset);
+#endif
+
+/*************************************************************************/ /*!
+@Function       RGXUnsetFirmwareAddress
+
+@Description    Unsets a pointer in a firmware data structure
+
+@Input          psSrc		 MemDesc describing the pointer
+
+@Return			IMG_VOID
+*/ /**************************************************************************/
+IMG_VOID RGXUnsetFirmwareAddress(DEVMEM_MEMDESC			*psSrc);
+
+/*************************************************************************/ /*!
+@Function       FWCommonContextAllocate
+
+@Description    Allocate a FW common coontext. This allocates the HW memory
+                for the context, the CCB and wires it all together.
+
+@Input          psConnection            Connection this context is being created on
+@Input          psDeviceNode		    Device node to create the FW context on
+                                        (must be RGX device node)
+@Input          pszContextName          Name of the context
+@Input          psAllocatedMemDesc      Pointer to pre-allocated MemDesc to use
+                                        as the FW context or NULL if this function
+                                        should allocate it
+@Input          ui32AllocatedOffset     Offset into pre-allocate MemDesc to use
+                                        as the FW context. If psAllocatedMemDesc
+                                        is NULL then this parameter is ignored
+@Input          psFWMemContextMemDesc   MemDesc of the FW memory context this
+                                        common context resides on
+@Input          psContextStateMemDesc   FW context state (context switch) MemDesc
+@Input          ui32CCBAllocSize        Size of the CCB for this context
+@Input          ui32Priority            Priority of the context
+@Input          psInfo                  Structure that contains extra info
+                                        required for the creation of the context
+                                        (elements might change from core to core)
+@Return			PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+									 PVRSRV_DEVICE_NODE *psDeviceNode,
+									 const IMG_CHAR *pszContextName,
+									 DEVMEM_MEMDESC *psAllocatedMemDesc,
+									 IMG_UINT32 ui32AllocatedOffset,
+									 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+									 DEVMEM_MEMDESC *psContextStateMemDesc,
+									 IMG_UINT32 ui32CCBAllocSize,
+									 IMG_UINT32 ui32Priority,
+									 RGX_COMMON_CONTEXT_INFO *psInfo,
+									 RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+									 
+
+IMG_VOID FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PVRSRV_ERROR RGXStartFirmware(PVRSRV_RGXDEV_INFO 	*psDevInfo);
+
+/*!
+******************************************************************************
+
+ @Function	RGXScheduleProcessQueuesKM
+
+ @Description - Software command complete handler
+				(sends uncounted kicks for all the DMs through the MISR)
+
+ @Input hCmdCompHandle - RGX device node
+
+******************************************************************************/
+IMG_IMPORT
+IMG_VOID RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+/*!
+******************************************************************************
+
+ @Function	RGXInstallProcessQueuesMISR
+
+ @Description - Installs the MISR to handle Process Queues operations
+
+ @Input phMISR - Pointer to the MISR handler
+
+ @Input psDeviceNode - RGX Device node
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandWithPowLock
+
+@Description    Sends a command to a particular DM without honouring
+				pending cache operations but taking the power lock.
+
+@Input          psDevInfo			Device Info
+@Input          eDM					To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          bPDumpContinuous
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+										RGXFWIF_DM			eKCCBType,
+									 	RGXFWIF_KCCB_CMD	*psKCCBCmd,
+									 	IMG_UINT32			ui32CmdSize,
+									 	IMG_BOOL			bPDumpContinuous);
+
+/*************************************************************************/ /*!
+@Function       RGXSendCommandRaw
+
+@Description    Sends a command to a particular DM without honouring
+				pending cache operations or the power lock.
+
+@Input          psDevInfo			Device Info
+@Input          eDM					To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          bPDumpContinuous
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								 RGXFWIF_DM			eKCCBType,
+								 RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								 IMG_UINT32			ui32CmdSize,
+								 PDUMP_FLAGS_T		uiPdumpFlags);
+
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommand
+
+@Description    Sends a command to a particular DM
+
+@Input          psDevInfo			Device Info
+@Input          eDM					To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          bPDumpContinuous
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+								RGXFWIF_DM			eKCCBType,
+								RGXFWIF_KCCB_CMD	*psKCCBCmd,
+								IMG_UINT32			ui32CmdSize,
+								IMG_BOOL			bPDumpContinuous);
+
+/*************************************************************************/ /*!
+@Function       RGXScheduleCommandAndWait
+
+@Description    Schedules the command with RGXScheduleCommand and then waits 
+				for the FW to update a sync. The sync must be piggy backed on
+				the cmd, either by passing a sync cmd or a cmd that contains the
+				sync which the FW will eventually update. The sync is created in
+				the function, therefore the function provides a FWAddr and 
+				UpdateValue for that cmd.
+
+@Input          psDevInfo			Device Info
+@Input          eDM					To which DM the cmd is sent.
+@Input          psKCCBCmd			The cmd to send.
+@Input          ui32CmdSize			The cmd size.
+@Input          puiSyncObjFWAddr	Pointer to the location with the FWAddr of 
+									the sync.
+@Input          puiUpdateValue		Pointer to the location with the update 
+									value of the sync.
+@Input          bPDumpContinuous
+
+@Return			PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndWait(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+									   RGXFWIF_DM			eDM,
+									   RGXFWIF_KCCB_CMD		*psKCCBCmd,
+									   IMG_UINT32			ui32CmdSize,
+									   IMG_UINT32			*puiSyncObjDevVAddr,
+									   IMG_UINT32			*puiUpdateValue,
+									   PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+									   IMG_BOOL				bPDumpContinuous);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+/*! ***********************************************************************//**
+@brief          Copy framework command into FW addressable buffer
+
+@param          psFWFrameworkMemDesc
+@param          pbyGPUFRegisterList
+@param          ui32FrameworkRegisterSize
+
+@returns        PVRSRV_ERROR 
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC	*psFWFrameworkMemDesc,
+										   IMG_PBYTE		pbyGPUFRegisterList,
+										   IMG_UINT32		ui32FrameworkRegisterSize);
+
+
+/*! ***********************************************************************//**
+@brief          Create FW addressable buffer for framework
+
+@param          psDeviceNode
+@param          ppsFWFrameworkMemDesc
+@param          ui32FrameworkRegisterSize
+
+@returns        PVRSRV_ERROR 
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+										DEVMEM_MEMDESC     ** ppsFWFrameworkMemDesc,
+										IMG_UINT32         ui32FrameworkRegisterSize);
+
+/*************************************************************************/ /*!
+@Function       RGXWaitForFWOp
+
+@Description    Send a sync command and wait to be signaled.
+
+@Input          psDevInfo			Device Info
+@Input          eDM					To which DM the cmd is sent.
+@Input          bPDumpContinuous	
+
+@Return			IMG_VOID
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO	*psDevInfo,
+									RGXFWIF_DM	eDM,
+									PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+									IMG_BOOL	bPDumpContinuous);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestCommonContextCleanUp
+
+ @Description Schedules a FW common context cleanup. The firmware will doesn't
+              block waiting for the resource to become idle but rather notifies
+              the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWContext - firmware address of the context to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PRGXFWIF_FWCOMMONCONTEXT psFWContext,
+											  PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+											  RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestHWRTDataCleanUp
+
+ @Description Schedules a FW HWRTData memory cleanup. The firmware will doesn't
+              block waiting for the resource to become idle but rather notifies
+              the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+										 PRGXFWIF_HWRTDATA psHWRTData,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+										 RGXFWIF_DM eDM);
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+											 PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+											 PVRSRV_CLIENT_SYNC_PRIM *psSync,
+											 RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestRPMFreeListCleanUp
+
+ @Description Schedules a FW RPM FreeList cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWRPMFreeList - firmware address of the RPM freelist to be cleaned up
+
+ @Input psSync - Sync object associated with cleanup
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+											PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+											PVRSRV_CLIENT_SYNC_PRIM *psSync);
+#endif
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestFreeListCleanUp
+
+ @Description Schedules a FW FreeList cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+										 PRGXFWIF_FREELIST psFWFreeList,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*!
+******************************************************************************
+
+ @Function	RGXFWRequestZSBufferCleanUp
+
+ @Description Schedules a FW ZS Buffer cleanup. The firmware will doesn't block
+              waiting for the resource to become idle but rather notifies the
+              host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWZSBuffer - firmware address of the ZS Buffer to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+										 PRGXFWIF_ZSBUFFER psFWZSBuffer,
+										 PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+								CONNECTION_DATA *psConnection,
+								PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32Priority,
+								RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function	RGXReadMETAAddr
+
+ @Description Reads a value at given address in META memory space
+              (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Output pui32Value - value
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO	*psDevInfo,
+                             IMG_UINT32 ui32METAAddr,
+                             IMG_UINT32 *pui32Value);
+
+/*!
+******************************************************************************
+
+ @Function	RGXCheckFirmwareCCBs
+
+ @Description Processes all commands that are found in any firmware CCB.
+
+ @Input psDevInfo - pointer to device
+
+ ******************************************************************************/
+IMG_VOID RGXCheckFirmwareCCBs(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+******************************************************************************
+
+ @Function	   RGXUpdateHealthStatus
+
+ @Description  Tests a number of conditions which might indicate a fatal error has
+               occurred in the firmware. The result is stored in the device node
+               eheathStatus.
+
+ @Input        psDevNode              Pointer to device node structure.
+ @Input        bCheckAfterTimePassed  When TRUE, the function will also test for
+                                      firmware queues and polls not changing
+                                      since the previous test.
+                                      
+                                      Note: if not enough time has passed since
+                                      the last call, false positives may occur.
+
+ @returns      PVRSRV_ERROR 
+ ******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+                                   IMG_BOOL bCheckAfterTimePassed);
+
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext);
+
+IMG_VOID DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+									DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+/*!
+******************************************************************************
+
+ @Function	   AttachKickResourcesCleanupCtls
+
+ @Description  Attaches the cleanup structures to a kick command so that
+               submission reference counting can be performed when the
+               firmware processes the command
+
+ @Output        apsCleanupCtl          Array of CleanupCtl structure pointers to populate.
+ @Output        pui32NumCleanupCtl     Number of CleanupCtl structure pointers written out.
+ @Input         eDM                    Which data master is the subject of the command.
+ @Input         bKick                  TRUE if the client originally wanted to kick this DM.
+ @Input         psRTDataCleanup        Optional RTData cleanup associated with the command.
+ @Input         psZBuffer              Optional ZBuffer associated with the command.
+ @Input         psSBuffer              Optional SBuffer associated with the command.
+ ******************************************************************************/
+IMG_VOID AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+									IMG_UINT32 *pui32NumCleanupCtl,
+									RGXFWIF_DM eDM,
+									IMG_BOOL bKick,
+									RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+									RGX_ZSBUFFER_DATA              *psZBuffer,
+									RGX_ZSBUFFER_DATA              *psSBuffer);
+
+/*!
+******************************************************************************
+
+ @Function			RGXResetHWRLogs
+
+ @Description 		Resets the HWR Logs buffer (the hardware recovery count is not reset)
+
+ @Input 			psDevInfo	Pointer to the device
+
+ @Return			PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, a PVRSRV_
+                                	error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function                      RGXPdumpDrainKCCB
+
+ @Description                   Wait for the firmware to execute all the commands in the kCCB
+
+ @Input                         psDevInfo	Pointer to the device
+
+ @Input                         ui32WriteOffset	  Woff we have to POL for the Roff to be equal to
+
+ @Input                         eKCCBType	  Data Master of the KCCB
+
+ @Return                        PVRSRV_ERROR	PVRSRV_OK on success. Otherwise, an
+                                                error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+                               IMG_UINT32 ui32WriteOffset,
+                               RGXFWIF_DM eKCCBType);
+#endif /* PDUMP */
+
+
+#endif /* __RGXFWUTILS_H__ */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxheapconfig.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxheapconfig.h
new file mode 100644
index 0000000..f29f999
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxheapconfig.h
@@ -0,0 +1,181 @@
+/*************************************************************************/ /*!
+@File
+@Title          device configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Memory heaps device specific configuration
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#warning FIXME:  add the MMU specialisation defines here (or in hwdefs, perhaps?)
+
+#ifndef __RGXHEAPCONFIG_H__
+#define __RGXHEAPCONFIG_H__
+
+#include "rgxdefs_km.h"
+
+#define DEV_DEVICE_TYPE			PVRSRV_DEVICE_TYPE_RGX
+#define DEV_DEVICE_CLASS		PVRSRV_DEVICE_CLASS_3D
+
+#define DEV_MAJOR_VERSION		1
+#define DEV_MINOR_VERSION		0
+
+/*      
+	RGX Device Virtual Address Space Definitions:
+
+	Notes:
+	Base addresses have to be a multiple of 4MiB
+	
+	RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, on a
+	global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+	respectively.
+	Therefore if clients use multiple configs they must still be consistent with
+	their definitions for these heaps.
+*/
+
+#if RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS == 40
+
+	/* Start at 128 Kb. Size of 256 Mb */
+//	#define RGX_3DPARAMETERS_HEAP_BASE			IMG_UINT64_C(0x0000020000)
+//  #define RGX_3DPARAMETERS_HEAP_SIZE			IMG_UINT64_C(0x0010000000)
+
+	/* Start at 4GiB. Size of 512 GiB */
+	#define RGX_GENERAL_HEAP_BASE				IMG_UINT64_C(0x0100000000)
+    #define RGX_GENERAL_HEAP_SIZE				IMG_UINT64_C(0x8000000000)
+
+	/* start at 516 GiB. Size of 32 GiB */
+	#define RGX_BIF_TILING_NUM_HEAPS            4
+	#define RGX_BIF_TILING_HEAP_SIZE            IMG_UINT64_C(0x0200000000)
+	#define RGX_BIF_TILING_HEAP_1_BASE          IMG_UINT64_C(0x8100000000)
+	#define RGX_BIF_TILING_HEAP_2_BASE          (RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE)
+	#define RGX_BIF_TILING_HEAP_3_BASE          (RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE)
+	#define RGX_BIF_TILING_HEAP_4_BASE          (RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE)
+
+	/* Start at 600GiB. Size of 4 GiB */
+	#define RGX_PDSCODEDATA_HEAP_BASE			IMG_UINT64_C(0x9600000000)
+    #define RGX_PDSCODEDATA_HEAP_SIZE			IMG_UINT64_C(0x0100000000)
+ 
+	/* Start at 800GiB. Size of 4 GiB */
+	#define RGX_USCCODE_HEAP_BASE				IMG_UINT64_C(0xC800000000)
+    #define RGX_USCCODE_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+ 
+	/* Start at 903GiB. Size of 4 GiB */
+	#define RGX_FIRMWARE_HEAP_BASE				IMG_UINT64_C(0xE1C0000000)
+    #define RGX_FIRMWARE_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+
+	/* Start at 912GiB. Size of 16 GiB. 16GB aligned to match RGX_CR_ISP_PIXEL_BASE */
+    #define RGX_TQ3DPARAMETERS_HEAP_BASE		IMG_UINT64_C(0xE400000000)
+    #define RGX_TQ3DPARAMETERS_HEAP_SIZE		IMG_UINT64_C(0x0400000000)
+
+	/* Size of 16 * 4 KB (think about large page systems .. */
+#if defined(FIX_HW_BRN_37200)
+    #define RGX_HWBRN37200_HEAP_BASE				IMG_UINT64_C(0xFFFFF00000)
+    #define RGX_HWBRN37200_HEAP_SIZE				IMG_UINT64_C(0x0000100000)
+#endif
+
+	/* Start at 928GiB. Size of 4 GiB */
+	#define RGX_DOPPLER_HEAP_BASE				IMG_UINT64_C(0xE800000000)
+	#define RGX_DOPPLER_HEAP_SIZE				IMG_UINT64_C(0x0100000000)
+
+	/* Start at 932GiB. Size of 4 GiB */
+	#define RGX_DOPPLER_OVERFLOW_HEAP_BASE		IMG_UINT64_C(0xE900000000)
+	#define RGX_DOPPLER_OVERFLOW_HEAP_SIZE		IMG_UINT64_C(0x0100000000)
+	
+	/* signal we've identified the core by the build */
+	#define RGX_CORE_IDENTIFIED
+#endif /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS == 40 */
+
+#if !defined(RGX_CORE_IDENTIFIED)
+	#error "rgxheapconfig.h: ERROR: unspecified RGX Core version"
+#endif
+
+/* /\********************************************************************************* */
+/*  * */
+/*  * Heap overlap check */
+/*  * */
+/*  ********************************************************************************\/ */
+/* #if defined(SUPPORT_RGX_GENERAL_MAPPING_HEAP) */
+/* 	#if ((RGX_GENERAL_MAPPING_HEAP_BASE + RGX_GENERAL_MAPPING_HEAP_SIZE) >= RGX_GENERAL_HEAP_BASE) */
+/* 		#error "rgxheapconfig.h: ERROR: RGX_GENERAL_MAPPING_HEAP overlaps RGX_GENERAL_HEAP" */
+/* 	#endif */
+/* #endif */
+
+/* #if ((RGX_GENERAL_HEAP_BASE + RGX_GENERAL_HEAP_SIZE) >= RGX_3DPARAMETERS_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_GENERAL_HEAP overlaps RGX_3DPARAMETERS_HEAP" */
+/* #endif */
+
+/* #if ((RGX_3DPARAMETERS_HEAP_BASE + RGX_3DPARAMETERS_HEAP_SIZE) >= RGX_TADATA_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_3DPARAMETERS_HEAP overlaps RGX_TADATA_HEAP" */
+/* #endif */
+
+/* #if ((RGX_TADATA_HEAP_BASE + RGX_TADATA_HEAP_SIZE) >= RGX_SYNCINFO_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_TADATA_HEAP overlaps RGX_SYNCINFO_HEAP" */
+/* #endif */
+
+/* #if ((RGX_SYNCINFO_HEAP_BASE + RGX_SYNCINFO_HEAP_SIZE) >= RGX_PDSPIXEL_CODEDATA_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_SYNCINFO_HEAP overlaps RGX_PDSPIXEL_CODEDATA_HEAP" */
+/* #endif */
+
+/* #if ((RGX_PDSPIXEL_CODEDATA_HEAP_BASE + RGX_PDSPIXEL_CODEDATA_HEAP_SIZE) >= RGX_KERNEL_CODE_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_PDSPIXEL_CODEDATA_HEAP overlaps RGX_KERNEL_CODE_HEAP" */
+/* #endif */
+
+/* #if ((RGX_KERNEL_CODE_HEAP_BASE + RGX_KERNEL_CODE_HEAP_SIZE) >= RGX_PDSVERTEX_CODEDATA_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_KERNEL_CODE_HEAP overlaps RGX_PDSVERTEX_CODEDATA_HEAP" */
+/* #endif */
+
+/* #if ((RGX_PDSVERTEX_CODEDATA_HEAP_BASE + RGX_PDSVERTEX_CODEDATA_HEAP_SIZE) >= RGX_KERNEL_DATA_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_PDSVERTEX_CODEDATA_HEAP overlaps RGX_KERNEL_DATA_HEAP" */
+/* #endif */
+
+/* #if ((RGX_KERNEL_DATA_HEAP_BASE + RGX_KERNEL_DATA_HEAP_SIZE) >= RGX_PIXELSHADER_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_KERNEL_DATA_HEAP overlaps RGX_PIXELSHADER_HEAP" */
+/* #endif */
+
+/* #if ((RGX_PIXELSHADER_HEAP_BASE + RGX_PIXELSHADER_HEAP_SIZE) >= RGX_VERTEXSHADER_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_PIXELSHADER_HEAP overlaps RGX_VERTEXSHADER_HEAP" */
+/* #endif */
+
+/* #if ((RGX_VERTEXSHADER_HEAP_BASE + RGX_VERTEXSHADER_HEAP_SIZE) < RGX_VERTEXSHADER_HEAP_BASE) */
+/* 	#error "rgxheapconfig.h: ERROR: RGX_VERTEXSHADER_HEAP_BASE size cause wraparound" */
+/* #endif */
+
+#endif /* __RGXHEAPCONFIG_H__ */
+
+/*****************************************************************************
+ End of file (rgxheapconfig.h)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxhwperf.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxhwperf.c
new file mode 100644
index 0000000..b87122e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxhwperf.c
@@ -0,0 +1,1760 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX HW Performance implementation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgx_hwperf_km.h"
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "devicemem_pdump.h"
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+#define HWPERF_TL_STREAM_NAME  "hwperf"
+
+/* Defined to ensure HWPerf packets are not delayed */
+#define SUPPORT_TL_PROODUCER_CALLBACK 1
+
+
+/******************************************************************************
+ *
+ *****************************************************************************/
+
+
+/*
+	RGXHWPerfCopyDataL1toL2
+*/
+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(IMG_HANDLE hHWPerfStream,
+										  IMG_BYTE   *pbFwBuffer, 
+										  IMG_UINT32 ui32BytesExp)
+{
+  	IMG_BYTE 	 *pbL2Buffer;
+	IMG_UINT32   ui32L2BufFree;
+	IMG_UINT32   ui32BytesCopied = 0;
+	IMG_UINT32   ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer));
+	PVRSRV_ERROR eError;
+
+/* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX;
+#endif
+
+	PVR_DPF_ENTERED;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d",
+							  pbFwBuffer, ui32BytesExp));
+#endif
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	{
+		/* Check the incoming buffer of data has not lost any packets */
+ 	 	IMG_BYTE *pbFwBufferIter = pbFwBuffer;
+ 	 	IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp;
+	 	do
+		{
+			RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter);
+			IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal;
+			if (gui32Ordinal != IMG_UINT32_MAX)
+			{
+				if ((gui32Ordinal+1) != ui32CurOrdinal)
+				{
+					if (gui32Ordinal < ui32CurOrdinal)
+					{
+						PVR_DPF((PVR_DBG_WARNING,
+								 "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u",
+								 pbFwBufferIter,
+								 ui32CurOrdinal - gui32Ordinal - 1,
+								 gui32Ordinal,
+								 ui32CurOrdinal));
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_WARNING,
+								 "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u",
+								  pbFwBufferIter,
+								  gui32Ordinal,
+								  ui32CurOrdinal));
+					}
+				}
+			}
+			gui32Ordinal = asCurPos->ui32Ordinal;
+			pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos);
+		} while( pbFwBufferIter < pbFwBufferEnd );
+	}
+#endif
+
+	/* Try submitting all data in one TL packet. */
+	eError = TLStreamReserve2( hHWPerfStream, 
+							   &pbL2Buffer, 
+							   (IMG_SIZE_T)ui32BytesExp, ui32BytesExpMin,
+							   &ui32L2BufFree);
+	if ( eError == PVRSRV_OK )
+	{
+		OSMemCopy( pbL2Buffer, pbFwBuffer, (IMG_SIZE_T)ui32BytesExp );
+		eError = TLStreamCommit(hHWPerfStream, (IMG_SIZE_T)ui32BytesExp);
+		if ( eError != PVRSRV_OK )
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+					 eError, __func__));
+			goto e0;
+		}
+		/* Data were successfully written */
+		ui32BytesCopied = ui32BytesExp;
+	}
+	else if (eError == PVRSRV_ERROR_STREAM_FULL)
+	{
+		/* There was not enough space for all data, copy as much as possible */
+		IMG_UINT32                sizeSum  = 0;
+		RGX_PHWPERF_V2_PACKET_HDR psCurPkt = RGX_HWPERF_GET_PACKET(pbFwBuffer);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree));
+
+		/* Traverse the array to find how many packets will fit in the available space. */
+		while ( sizeSum < ui32BytesExp  &&
+				sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32L2BufFree )
+		{
+			sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+			psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+		}
+
+		if ( 0 != sizeSum )
+		{
+			eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (IMG_SIZE_T)sizeSum);
+
+			if ( eError == PVRSRV_OK )
+			{
+				OSMemCopy( pbL2Buffer, pbFwBuffer, (IMG_SIZE_T)sizeSum );
+				eError = TLStreamCommit(hHWPerfStream, (IMG_SIZE_T)sizeSum);
+				if ( eError != PVRSRV_OK )
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+							 eError, __func__));
+					goto e0;
+				}
+				/* sizeSum bytes of hwperf packets have been successfully written */
+				ui32BytesCopied = sizeSum;
+			}
+			else if ( PVRSRV_ERROR_STREAM_FULL == eError )
+			{
+				PVR_DPF((PVR_DBG_WARNING, "Can not write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+			}
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "Can not find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+		}
+	}
+	if ( PVRSRV_OK != eError && /*  Some other error occurred */
+	     PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller*/
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.",
+				 eError));
+	}
+
+e0:
+	/* Return the remaining packets left to be transported. */
+	PVR_DPF_RETURN_VAL(ui32BytesCopied);
+}
+
+
+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx(
+		const IMG_UINT32 ui32BufSize,
+		const IMG_UINT32 ui32Pos,
+		const IMG_UINT32 ui32Size)
+{
+	return (  ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 );
+}
+
+
+/*
+	RGXHWPerfDataStore
+*/
+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	RGXFWIF_TRACEBUF    *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_BYTE*           psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf;
+	IMG_UINT32			ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount;
+	IMG_UINT32			ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+	IMG_UINT32			ui32BytesExpSum = 0;
+#endif
+	
+	PVR_DPF_ENTERED;
+
+	/* Caller should check this member is valid before calling */
+	PVR_ASSERT(psDevInfo->hHWPerfStream);
+	
+ 	/* Get a copy of the current
+	 *   read (first packet to read) 
+	 *   write (empty location for the next write to be inserted) 
+	 *   WrapCount (size in bytes of the buffer at or past end)
+	 * indexes of the FW buffer */
+	ui32SrcRIdx = psRGXFWIfTraceBufCtl->ui32HWPerfRIdx;
+	ui32SrcWIdx = psRGXFWIfTraceBufCtl->ui32HWPerfWIdx;
+	OSMemoryBarrier();
+	ui32SrcWrapCount = psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount;
+
+	/* Is there any data in the buffer not yet retrieved? */
+	if ( ui32SrcRIdx != ui32SrcWIdx )
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d ", ui32SrcRIdx, ui32SrcWIdx));
+
+		/* Is the write position higher than the read position? */
+		if ( ui32SrcWIdx > ui32SrcRIdx )
+		{
+			/* Yes, buffer has not wrapped */
+			ui32BytesExp  = ui32SrcWIdx - ui32SrcRIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+			ui32BytesExpSum += ui32BytesExp;
+#endif
+			ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+													  psHwPerfInfo + ui32SrcRIdx,
+													  ui32BytesExp);
+			ui32BytesCopiedSum += ui32BytesCopied;
+
+			/* Advance the read index and the free bytes counter by the number
+			 * of bytes transported. Items will be left in buffer if not all data
+			 * could be transported. Exit to allow buffer to drain. */
+			psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+					psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+					ui32BytesCopied);
+		}
+		/* No, buffer has wrapped and write position is behind read position */
+		else
+		{
+			/* Byte count equal to 
+			 *     number of bytes from read position to the end of the buffer, 
+			 *   + data in the extra space in the end of the buffer. */
+			ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+			ui32BytesExpSum += ui32BytesExp;
+#endif
+			/* Attempt to transfer the packets to the TL stream buffer */
+			ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+													  psHwPerfInfo + ui32SrcRIdx,
+													  ui32BytesExp);
+			ui32BytesCopiedSum += ui32BytesCopied;
+
+			/* Advance read index as before and Update the local copy of the
+			 * read index as it might be used in the last if branch*/
+			ui32SrcRIdx = RGXHWPerfAdvanceRIdx(
+					psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+					ui32BytesCopied);
+
+			/* Update Wrap Count */
+			if ( ui32SrcRIdx == 0)
+			{
+				psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+			}
+			psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = ui32SrcRIdx;
+			
+			/* If all the data in the end of the array was copied, try copying
+			 * wrapped data in the beginning of the array, assuming there is
+			 * any and the RIdx was wrapped. */
+			if (   (ui32BytesCopied == ui32BytesExp)
+			    && (ui32SrcWIdx > 0) 
+				&& (ui32SrcRIdx == 0) )
+			{
+				ui32BytesExp = ui32SrcWIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+				ui32BytesExpSum += ui32BytesExp;
+#endif
+				ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo->hHWPerfStream,
+														  psHwPerfInfo,
+														  ui32BytesExp);
+				ui32BytesCopiedSum += ui32BytesCopied;
+				/* Advance the FW buffer read position. */
+				psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+						psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+						ui32BytesCopied);
+			}
+		}
+#ifdef HWPERF_MISR_FUNC_DEBUG
+		if (ui32BytesCopiedSum != ui32BytesExpSum)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psRGXFWIfTraceBufCtl->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum));
+		}
+#endif
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport"));
+	}
+
+	PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+}
+
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+	IMG_UINT32          ui32BytesCopied;
+
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevInfo);
+	psRgxDevInfo = psDevInfo->pvDevice;
+
+	if (psRgxDevInfo->hHWPerfStream != 0)
+	{
+		OSLockAcquire(psRgxDevInfo->hLockHWPerfStream);
+		ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo);
+		OSLockRelease(psRgxDevInfo->hLockHWPerfStream);
+
+		if ( ui32BytesCopied )
+		{	/* Signal consumers that packets may be available to read when
+		     * running from a HW kick, not when called by client APP thread
+			 * via the transport layer CB as this can lead to stream
+			 * corruption.*/
+			eError = TLStreamSync(psRgxDevInfo->hHWPerfStream);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+        else
+        {
+            PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStoreCB: Zero bytes copied from FW L1 to L2."));
+        }
+	}
+
+	PVR_DPF_RETURN_OK;
+}
+
+
+/* Not currently supported by default */
+#if defined(SUPPORT_TL_PROODUCER_CALLBACK)
+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
+		IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, IMG_VOID* pvUser)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser;
+
+	PVR_UNREFERENCED_PARAMETER(hStream);
+	PVR_UNREFERENCED_PARAMETER(ui32Resp);
+
+	PVR_ASSERT(psRgxDevInfo);
+
+	switch (ui32ReqOp)
+	{
+	case TL_SOURCECB_OP_CLIENT_EOS:
+		if (psRgxDevInfo->hHWPerfStream != 0)
+		{
+			OSLockAcquire(psRgxDevInfo->hLockHWPerfStream);
+			(void) RGXHWPerfDataStore(psRgxDevInfo);
+			OSLockRelease(psRgxDevInfo->hLockHWPerfStream);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return eError;
+}
+#endif
+
+
+/* References to key objects to allow kernel-side behaviour to function
+ * e.g. FTrace and KM interface to HWPerf.
+ */
+static PVRSRV_DEVICE_NODE* gpsRgxDevNode = IMG_NULL;
+static PVRSRV_RGXDEV_INFO* gpsRgxDevInfo = IMG_NULL;
+
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_DEVICE_NODE *psRgxDevNode, IMG_BOOL bEnable)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32L2BufferSize;
+
+	PVR_DPF_ENTERED;
+
+	/* On first call at driver initialisation we get the RGX device,
+	 * in later on-demand calls this parameter is optional. */
+	if (psRgxDevNode)
+	{
+		gpsRgxDevNode = psRgxDevNode;
+		gpsRgxDevInfo = psRgxDevNode->pvDevice;
+	}
+
+	/* Before proper initialisation make sure we have a valid RGX device. */
+	if (!gpsRgxDevInfo)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HWPerf module not initialised"));
+		PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_DEVICE);
+	}
+
+	/* Initialise first in case of an error condition or if it is not enabled
+	 */
+	gpsRgxDevInfo->hLockHWPerfStream = IMG_NULL;
+	gpsRgxDevInfo->hHWPerfStream = IMG_NULL;
+
+	/* Does the caller want to enable data collection resources? */
+	if (!bEnable)
+	{
+		PVR_DPF_RETURN_OK;
+	}
+
+	/* Create the HWPerf stream lock used for multiple stream writers when
+	 * configured e.g. TL producer callback
+	 */
+	eError = OSLockCreate(&gpsRgxDevInfo->hLockHWPerfStream, LOCK_TYPE_PASSIVE);
+	PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+	/* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+	 * accessed by the FW. The MISR may try to write one packet the size of the L1
+	 * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+	 * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+	 * are the more chance of this happening.
+	 * Size chosen to allow MISR to write an L1 sized packet and for the client
+	 * application/daemon to drain a L1 sized packet e.g. ~ 2xL1+64 working space.
+	 * 
+	 * However in the case of NO_HARDWARE the L2 buffer will not be used.
+	 * By reducing the size of the L2 buffer we can support a larger L1 buffer size
+	 * since on a 32-bit system, vmalloc memory is limited.
+	 */
+#if defined(NO_HARDWARE)
+	ui32L2BufferSize = 0;
+#else
+	ui32L2BufferSize = gpsRgxDevInfo->ui32RGXFWIfHWPerfBufSize<<1;
+#endif
+	eError = TLStreamCreate(&gpsRgxDevInfo->hHWPerfStream, HWPERF_TL_STREAM_NAME,
+					ui32L2BufferSize+RGXFW_HWPERF_L1_PADDING_DEFAULT,
+					TL_FLAG_DROP_DATA | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+#if !defined(SUPPORT_TL_PROODUCER_CALLBACK)
+					IMG_NULL, IMG_NULL
+#else
+                    /* Not enabled  by default */
+					RGXHWPerfTLCB, gpsRgxDevInfo
+#endif
+					);
+
+	PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", e1);
+
+	PVR_DPF_RETURN_OK;
+
+e1:
+	OSLockDestroy(gpsRgxDevInfo->hLockHWPerfStream);
+	gpsRgxDevInfo->hLockHWPerfStream = IMG_NULL;
+	gpsRgxDevInfo->hHWPerfStream = IMG_NULL;
+/* e0: */
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+IMG_VOID RGXHWPerfDeinit(void)
+{
+	PVR_DPF_ENTERED;
+
+	/* Clean up the stream and lock objects if allocated
+	 */
+	if (gpsRgxDevInfo && gpsRgxDevInfo->hHWPerfStream)
+	{
+		TLStreamClose(gpsRgxDevInfo->hHWPerfStream);
+		gpsRgxDevInfo->hHWPerfStream = IMG_NULL;
+	}
+	if (gpsRgxDevInfo && gpsRgxDevInfo->hLockHWPerfStream)
+	{
+		OSLockDestroy(gpsRgxDevInfo->hLockHWPerfStream);
+		gpsRgxDevInfo->hLockHWPerfStream = IMG_NULL;
+	}
+
+	/* Clear global RGX device reference
+	 */
+	gpsRgxDevInfo = IMG_NULL;
+	gpsRgxDevNode = IMG_NULL;
+
+	PVR_DPF_RETURN;
+}
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+/*
+	PVRSRVRGXCtrlHWPerfKM
+*/
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+		PVRSRV_DEVICE_NODE*	psDeviceNode,
+		IMG_BOOL			bToggle,
+		IMG_UINT64 			ui64Mask)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO* psDevice;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+
+	PVR_DPF_ENTERED;
+	PVR_ASSERT(psDeviceNode);
+	psDevice = psDeviceNode->pvDevice;
+
+	/* If this method is being used whether to enable or disable
+	 * then the hwperf stream is likely to be needed eventually so create it,
+	 * also helps unit testing.
+	 * Stream allocated on demand to reduce RAM foot print on systems not
+	 * needing HWPerf resources.
+	 */
+	if (psDevice->hHWPerfStream == IMG_NULL)
+	{
+		eError = RGXHWPerfInit(psDeviceNode, IMG_TRUE);
+		PVR_LOGR_IF_ERROR(eError, "RGXHWPerfInit");
+	}
+
+	/* Prepare command parameters ...
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_EVENTS;
+	sKccbCmd.uCmdData.sHWPerfCtrl.bToggle = bToggle;
+	sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask;
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfKM parameters set, calling FW")); */
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,	RGXFWIF_DM_GP, 
+								&sKccbCmd, sizeof(sKccbCmd), IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGR_IF_ERROR(eError, "RGXScheduleCommand");
+	}
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfKM command scheduled for FW")); */
+
+	/* Wait for FW to complete
+	 */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGR_IF_ERROR(eError, "RGXWaitForFWOp");
+	}
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfKM firmware completed")); */
+
+	/* If it was being asked to disable then don't delete the stream as the FW
+	 * will continue to generate events during the disabling phase. Clean up
+	 * will be done when the driver is unloaded.
+	 * The increase in extra memory used by the stream would only occur on a
+	 * developer system and not a production device as a user would never
+	 * enable HWPerf. If this is not the case then a deferred clean system will
+	 * need to be implemented.
+	 */
+	/*if ((!bEnable) && (psDevice->hHWPerfStream))
+	{
+		TLStreamDestroy(psDevice->hHWPerfStream);
+		psDevice->hHWPerfStream = 0;
+	}*/
+
+#if defined(DEBUG)
+	if (bToggle)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf events (%llx) have been TOGGLED", ui64Mask));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf mask has been SET to (%llx)", ui64Mask));
+	}
+#endif
+
+	PVR_DPF_RETURN_OK;
+}
+
+
+/*
+	PVRSRVRGXEnableHWPerfCountersKM
+*/
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+		PVRSRV_DEVICE_NODE* 		psDeviceNode,
+		IMG_UINT32 					ui32ArrayLen,
+		RGX_HWPERF_CONFIG_CNTBLK* 	psBlockConfigs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+	DEVMEM_MEMDESC*		psFwBlkConfigsMemDesc;
+	RGX_HWPERF_CONFIG_CNTBLK* psFwArray;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+	PVR_ASSERT(ui32ArrayLen>0);
+	PVR_ASSERT(psBlockConfigs);
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS;
+	sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+	eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+			sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, 
+			PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+									  PVRSRV_MEMALLOCFLAG_GPU_READABLE | 
+					                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+									  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+									  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | 
+									  PVRSRV_MEMALLOCFLAG_UNCACHED |
+									  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+			"HWPerfCountersConfigBlock",
+			&psFwBlkConfigsMemDesc);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+	RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.pasBlockConfigs,
+			psFwBlkConfigsMemDesc, 0, 0);
+
+	eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (IMG_VOID **)&psFwArray);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+	}
+
+	OSMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+	DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+						0,
+						sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+						0);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM parameters set, calling FW")); */
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+	}
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM command scheduled for FW")); */
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+	}
+
+	/* Release temporary memory used for block configuration
+	 */
+	RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+	DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+	DevmemFwFree(psFwBlkConfigsMemDesc);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM firmware completed")); */
+
+	PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED",  ui32ArrayLen));
+
+	PVR_DPF_RETURN_OK;
+
+fail2:
+	DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+fail1:
+	RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+	DevmemFwFree(psFwBlkConfigsMemDesc);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+/*
+	PVRSRVRGXConfigCustomCountersReadingHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+		PVRSRV_DEVICE_NODE*     psDeviceNode,
+		IMG_UINT16              ui16CustomBlockID,
+		IMG_UINT16              ui16NumCustomCounters,
+		IMG_UINT32*             pui32CustomCounterIDs)
+{
+	PVRSRV_ERROR        eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD    sKccbCmd;
+	DEVMEM_MEMDESC*     psFwSelectCntrsMemDesc = IMG_NULL;
+	IMG_UINT32*         psFwArray;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters));
+
+	/* Fill in the command structure with the parameters needed */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS;
+	sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters;
+	sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID;
+
+	if (ui16NumCustomCounters > 0)
+	{
+		PVR_ASSERT(pui32CustomCounterIDs);
+
+		eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+				sizeof(IMG_UINT32) * ui16NumCustomCounters,
+				PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+				PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+				PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+				PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+				PVRSRV_MEMALLOCFLAG_UNCACHED |
+				PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+				"HWPerfConfigCustomCounters",
+				&psFwSelectCntrsMemDesc);
+		if (eError != PVRSRV_OK)
+			PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+		RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.pui32CustomCounterIDs,
+				psFwSelectCntrsMemDesc, 0, 0);
+
+		eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (IMG_VOID **)&psFwArray);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+		}
+
+		OSMemCopy(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters);
+		DevmemPDumpLoadMem(psFwSelectCntrsMemDesc,
+				0,
+				sizeof(IMG_UINT32) * ui16NumCustomCounters,
+				0);
+	}
+
+	/* Push in the KCCB the command to configure the custom counters block */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+	}
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled"));
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+	}
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed"));
+
+	if (ui16NumCustomCounters > 0)
+	{
+		/* Release temporary memory used for block configuration */
+		RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+		DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+		DevmemFwFree(psFwSelectCntrsMemDesc);
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters));
+
+	PVR_DPF_RETURN_OK;
+
+	fail2:
+	if (psFwSelectCntrsMemDesc) DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+
+	fail1:
+	if (psFwSelectCntrsMemDesc) 
+	{
+		RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+		DevmemFwFree(psFwSelectCntrsMemDesc);
+	}
+	
+	PVR_DPF_RETURN_RC(eError);
+}
+/*
+	PVRSRVRGXDisableHWPerfcountersKM
+*/
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+		PVRSRV_DEVICE_NODE*		psDeviceNode,
+		IMG_BOOL				bEnable,
+	    IMG_UINT32 				ui32ArrayLen,
+	    IMG_UINT16*				psBlockIDs)
+{
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sKccbCmd;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDeviceNode);
+	PVR_ASSERT(ui32ArrayLen>0);
+	PVR_ASSERT(ui32ArrayLen<=RGXFWIF_HWPERF_CTRL_BLKS_MAX);
+	PVR_ASSERT(psBlockIDs);
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS;
+	sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable;
+	sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen;
+	OSMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16)*ui32ArrayLen);
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM parameters set, calling FW")); */
+
+	/* Ask the FW to carry out the HWPerf configuration command
+	 */
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+			RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), IMG_TRUE);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "RGXScheduleCommand");
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM command scheduled for FW")); */
+
+	/* Wait for FW to complete */
+	eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+	if (eError != PVRSRV_OK)
+		PVR_LOGR_IF_ERROR(eError, "RGXWaitForFWOp");
+
+	/* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM firmware completed")); */
+
+#if defined(DEBUG)
+	if (bEnable)
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED",  ui32ArrayLen));
+	else
+		PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED",  ui32ArrayLen));
+#endif
+
+	PVR_DPF_RETURN_OK;
+}
+
+
+/******************************************************************************
+ * SUPPORT_GPUTRACE_EVENTS
+ *
+ * Currently only implemented on Linux and Android. Feature can be enabled on
+ * Android builds but can also be enabled on Linux builds for testing
+ * but requires the gpu.h FTrace event header file to be present.
+ *****************************************************************************/
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+
+static POS_LOCK hFTraceLock;
+static IMG_VOID RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+
+static PVRSRV_ERROR RGXHWPerfFTraceGPUEnable(void)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(gpsRgxDevNode && gpsRgxDevInfo);
+
+	/* In the case where the AppHint has not been set we need to
+	 * initialise the host driver HWPerf resources here. Allocated on
+	 * demand to reduce RAM foot print on systems not needing HWPerf.
+	 * Signal FW to enable event generation.
+	 */
+	if (gpsRgxDevNode->psSyncPrim)
+	{
+		eError = PVRSRVRGXCtrlHWPerfKM(gpsRgxDevNode, IMG_FALSE, RGX_HWPERF_EVENT_MASK_HW_KICKFINISH);
+		PVR_LOGG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+	}
+
+	/* Connect to the TL Stream for HWPerf data consumption */
+	eError = TLClientConnect(&gpsRgxDevInfo->hGPUTraceTLConnection);
+	PVR_LOGG_IF_ERROR(eError, "TLClientConnect", err_out);
+
+	eError = TLClientOpenStream(gpsRgxDevInfo->hGPUTraceTLConnection,
+								HWPERF_TL_STREAM_NAME,
+								PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+								&gpsRgxDevInfo->hGPUTraceTLStream);
+	PVR_LOGG_IF_ERROR(eError, "TLClientOpenStream", err_disconnect);
+
+	/* Register a notifier to collect HWPerf data whenever the HW completes
+	 * an operation.
+	 */
+	eError = PVRSRVRegisterCmdCompleteNotify(
+		&gpsRgxDevInfo->hGPUTraceCmdCompleteHandle,
+		&RGXHWPerfFTraceCmdCompleteNotify,
+		gpsRgxDevInfo);
+	PVR_LOGG_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream);
+
+	/* Reset the OS timestamp coming from the timer correlation data
+	 * associated with the latest HWPerf event we processed.
+	 */
+	gpsRgxDevInfo->ui64LastSampledTimeCorrOSTimeStamp = 0;
+
+	gpsRgxDevInfo->bFTraceGPUEventsEnabled = IMG_TRUE;
+
+err_out:
+    PVR_DPF_RETURN_RC(eError);
+
+err_close_stream:
+	TLClientCloseStream(gpsRgxDevInfo->hGPUTraceTLConnection,
+						gpsRgxDevInfo->hGPUTraceTLStream);
+err_disconnect:
+	TLClientDisconnect(gpsRgxDevInfo->hGPUTraceTLConnection);
+	goto err_out;
+}
+
+static PVRSRV_ERROR RGXHWPerfFTraceGPUDisable(IMG_BOOL bDeInit)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(gpsRgxDevNode && gpsRgxDevInfo);
+
+	OSLockAcquire(hFTraceLock);
+
+	if (!bDeInit)
+	{
+		eError = PVRSRVRGXCtrlHWPerfKM(gpsRgxDevNode, IMG_FALSE, (RGX_HWPERF_EVENT_MASK_NONE));
+		PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+	}
+
+
+	if (gpsRgxDevInfo->hGPUTraceCmdCompleteHandle)
+	{
+		/* Tracing is being turned off. Unregister the notifier. */
+		eError = PVRSRVUnregisterCmdCompleteNotify(
+				gpsRgxDevInfo->hGPUTraceCmdCompleteHandle);
+		PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify");
+		gpsRgxDevInfo->hGPUTraceCmdCompleteHandle = IMG_NULL;
+	}
+
+	if (gpsRgxDevInfo->hGPUTraceTLStream)
+	{
+		IMG_PBYTE pbTmp = NULL;
+		IMG_UINT32 ui32Tmp = 0;
+
+		/* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
+		 * are some events left unprocessed in this FTrace/systrace "session"
+		 * (note that even if we have just disabled HWPerf on the FW some packets
+		 * could have been generated and already copied to L2 by the MISR handler).
+		 *
+		 * With the following calls we will both copy new data to the Host buffer
+		 * (done by the producer callback in TLClientAcquireData) and advance
+		 * the read offset in the buffer to catch up with the latest events.
+		 */
+		eError = TLClientAcquireData(gpsRgxDevInfo->hGPUTraceTLConnection,
+		                             gpsRgxDevInfo->hGPUTraceTLStream,
+		                             &pbTmp, &ui32Tmp);
+		PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+		/* Let close stream perform the release data on the outstanding acquired data */
+		eError = TLClientCloseStream(gpsRgxDevInfo->hGPUTraceTLConnection,
+		                             gpsRgxDevInfo->hGPUTraceTLStream);
+		PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+		gpsRgxDevInfo->hGPUTraceTLStream = IMG_NULL;
+	}
+
+	if (gpsRgxDevInfo->hGPUTraceTLConnection)
+	{
+		eError = TLClientDisconnect(gpsRgxDevInfo->hGPUTraceTLConnection);
+		PVR_LOG_IF_ERROR(eError, "TLClientDisconnect");
+		gpsRgxDevInfo->hGPUTraceTLConnection = IMG_NULL;
+	}
+
+	gpsRgxDevInfo->bFTraceGPUEventsEnabled = IMG_FALSE;
+
+	OSLockRelease(hFTraceLock);
+
+    PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(IMG_BOOL bNewValue)
+{
+	IMG_BOOL bOldValue;
+    PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED;
+
+	if (!gpsRgxDevInfo)
+	{
+		/* RGXHWPerfFTraceGPUInit hasn't been called yet -- it's too early
+		 * to enable tracing.
+		 */
+        eError = PVRSRV_ERROR_NO_DEVICEDATA_FOUND;
+        PVR_DPF_RETURN_RC(eError);
+    }
+
+	bOldValue = gpsRgxDevInfo->bFTraceGPUEventsEnabled;
+
+	if (bOldValue != bNewValue)
+	{
+		if (bNewValue)
+		{
+            eError = RGXHWPerfFTraceGPUEnable();
+		}
+		else
+		{
+            eError = RGXHWPerfFTraceGPUDisable(IMG_FALSE);
+		}
+	}
+
+    PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+
+	/* Lock down because we need to protect
+	 * RGXHWPerfFTraceGPUDisable()/RGXHWPerfFTraceGPUEnable()
+	 */
+	OSAcquireBridgeLock();
+    eError = RGXHWPerfFTraceGPUEventsEnabledSet(bNewValue);
+	OSReleaseBridgeLock();
+
+    PVR_DPF_RETURN_RC(eError);
+}
+
+IMG_BOOL RGXHWPerfFTraceGPUEventsEnabled(IMG_VOID)
+{
+	return(gpsRgxDevInfo->bFTraceGPUEventsEnabled);
+}
+
+IMG_BOOL PVRGpuTraceEnabled(IMG_VOID)
+{
+	return (RGXHWPerfFTraceGPUEventsEnabled());
+}
+
+IMG_VOID RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32ExternalJobRef, IMG_UINT32 ui32InternalJobRef,
+		const IMG_CHAR* pszJobType)
+{
+	IMG_UINT32   ui32PID = OSGetCurrentProcessID();
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(pszJobType);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUEnqueueEvent: PID %u, external jobRef %u, internal jobRef %u", ui32PID, ui32ExternalJobRef, ui32InternalJobRef));
+
+	PVRGpuTraceClientWork(ui32PID, ui32ExternalJobRef, ui32InternalJobRef, pszJobType);
+
+	PVR_DPF_RETURN;
+}
+
+
+static IMG_VOID RGXHWPerfFTraceGPUSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	IMG_UINT64 ui64Timestamp;
+	RGX_HWPERF_HW_DATA_FIELDS* psHWPerfPktData;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGXFWIF_TIME_CORR *psTimeCorr;
+	IMG_UINT32 ui32CRDeltaToOSDeltaKNs;
+	IMG_UINT64 ui64CRTimeStamp;
+	IMG_UINT64 ui64OSTimeStamp;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psHWPerfPkt);
+	PVR_ASSERT(pszWorkName);
+
+	psHWPerfPktData = (RGX_HWPERF_HW_DATA_FIELDS*) RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+	/* Filter out 3DFINISH events for 3DTQKICKs which have already been
+	 * filtered by ValidFTraceEvent() */
+
+	/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event */
+	psTimeCorr              = &psGpuUtilFWCB->sTimeCorr[psHWPerfPktData->ui32TimeCorrIndex];
+	ui64CRTimeStamp         = psTimeCorr->ui64CRTimeStamp;
+	ui64OSTimeStamp         = psTimeCorr->ui64OSTimeStamp;
+	ui32CRDeltaToOSDeltaKNs = psTimeCorr->ui32CRDeltaToOSDeltaKNs;
+
+	if(psDevInfo->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp)
+	{
+		/* The previous packet had a time reference (time correlation data) more recent
+		 * than the one in the current packet, it means the timer correlation array wrapped
+		 * too quickly (buffer too small) and in the previous call to RGXHWPerfFTraceGPUSwitchEvent
+		 * we read one of the newest timer correlations rather than one of the oldest ones.
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfFTraceGPUSwitchEvent: The timestamps computed so far could be wrong! "
+		                        "The time correlation array size should be increased to avoid this."));
+	}
+	psDevInfo->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp;
+
+	{
+		IMG_UINT64 deltaRgxTimer = psHWPerfPkt->ui64RGXTimer - ui64CRTimeStamp;  /* RGX CR timer ticks delta */
+		IMG_UINT64 delta_nS =
+		    RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui32CRDeltaToOSDeltaKNs); /* RGX time delta in nS */
+		ui64Timestamp = ui64OSTimeStamp + delta_nS;                              /* Calculate OS time of HWPerf event */
+
+		PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUSwitchEvent: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u",
+		         ui64CRTimeStamp, ui64OSTimeStamp, psTimeCorr->ui32CoreClockSpeed ));
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d",
+			pszWorkName, psHWPerfPktData->ui32ExtJobRef, psHWPerfPktData->ui32IntJobRef, eSwType));
+
+	PVRGpuTraceWorkSwitch(ui64Timestamp, psHWPerfPktData->ui32PID,
+			psHWPerfPktData->ui32ExtJobRef, psHWPerfPktData->ui32IntJobRef,
+			pszWorkName, eSwType);
+
+	PVR_DPF_RETURN;
+}
+
+
+static IMG_BOOL ValidFTraceEvent(RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt,
+		IMG_PCHAR* ppszWorkName, PVR_GPUTRACE_SWITCH_TYPE* peSwType)
+{
+	RGX_HWPERF_EVENT_TYPE eType;
+	static const struct {
+		IMG_CHAR* pszName;
+		PVR_GPUTRACE_SWITCH_TYPE eSwType;
+	} aszEventTypeMap[] = {
+			{ /* RGX_HWPERF_HW_TAKICK */       "TA",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_TAFINISHED */   "TA",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DTQKICK */     "TQ3D",   PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_3DKICK */       "3D",     PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_3DFINISHED */   "3D",     PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_CDMKICK */      "CDM",    PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_CDMFINISHED */  "CDM",    PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_TLAKICK */      "TQ2D",   PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_TLAFINISHED */  "TQ2D",   PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DSPMKICK */    "3DSPM",  PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_PERIODIC */     IMG_NULL, 0 }, /* PERIODIC not supported */
+			{ /* RGX_HWPERF_HW_RTUKICK */      "RTU",    PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_RTUFINISHED */  "RTU",    PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_SHGKICK */      "SHG",    PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+			{ /* RGX_HWPERF_HW_SHGFINISHED */  "SHG",    PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DTQFINISHED */ "TQ3D",   PVR_GPUTRACE_SWITCH_TYPE_END },
+			{ /* RGX_HWPERF_HW_3DSPMFINISHED */ "3DSPM", PVR_GPUTRACE_SWITCH_TYPE_END },
+	};
+
+	PVR_ASSERT(psHWPerfPkt);
+
+	eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
+	if ((eType < RGX_HWPERF_HW_TAKICK) || (eType > RGX_HWPERF_HW_3DSPMFINISHED))
+	{
+		/* No map entry, ignore event */
+		PVR_DPF((PVR_DBG_VERBOSE, "ValidFTraceEvent: Unsupported event type %d %02d",
+			eType, eType+RGX_HWPERF_HW_TAKICK)); 
+		return IMG_FALSE;
+	}
+	eType-=RGX_HWPERF_HW_TAKICK;
+
+	if (aszEventTypeMap[eType].pszName == IMG_NULL)
+	{
+		/* Not supported map entry, ignore event */
+		PVR_DPF((PVR_DBG_VERBOSE, "ValidFTraceEventl: Unsupported event type %d %02d",
+			eType, eType+RGX_HWPERF_HW_TAKICK)); 
+		return IMG_FALSE;
+	}
+
+	*ppszWorkName = aszEventTypeMap[eType].pszName;
+	*peSwType = aszEventTypeMap[eType].eSwType;
+
+	return IMG_TRUE;
+}
+
+
+static IMG_VOID RGXHWPerfFTraceGPUThreadProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_PBYTE pBuffer, IMG_UINT32 ui32ReadLen)
+{
+	IMG_UINT32			ui32TlPackets = 0;
+	IMG_UINT32          ui32HWPerfPackets = 0;
+	IMG_UINT32          ui32HWPerfPacketsSent = 0;
+	IMG_PBYTE			pBufferEnd;
+	PVRSRVTL_PPACKETHDR psHDRptr;
+	PVRSRVTL_PACKETTYPE ui16TlType;
+
+	PVR_DPF_ENTERED;
+
+	PVR_ASSERT(psDevInfo);
+	PVR_ASSERT(pBuffer);
+	PVR_ASSERT(ui32ReadLen);
+
+	/* Process the TL Packets
+	 */
+	pBufferEnd = pBuffer+ui32ReadLen;
+	psHDRptr = GET_PACKET_HDR(pBuffer);
+	while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+	{
+		ui16TlType = GET_PACKET_TYPE(psHDRptr);
+		if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+		{
+			IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+			if (0 == ui16DataLen)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfFTraceGPUThreadProcessPackets: ZERO Data in TL data packet: %p", psHDRptr));
+			}
+			else
+			{
+				RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt;
+				RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd;
+				IMG_CHAR* pszWorkName;
+				PVR_GPUTRACE_SWITCH_TYPE eSwType;
+
+				/* Check for lost hwperf data packets */
+				psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen);
+				psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr));
+				do
+				{
+					if (ValidFTraceEvent(psHWPerfPkt, &pszWorkName, &eSwType))
+					{
+						RGXHWPerfFTraceGPUSwitchEvent(psDevInfo, psHWPerfPkt, pszWorkName, eSwType);
+						ui32HWPerfPacketsSent++;
+					}
+					ui32HWPerfPackets++;
+					psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt);
+				}
+				while (psHWPerfPkt < psHWPerfEnd);
+			}
+		}
+		else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUThreadProcessPackets: Indication that the transport buffer was full"));
+		}
+		else
+		{
+			/* else Ignore padding packet type and others */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUThreadProcessPackets: Ignoring TL packet, type %d", ui16TlType ));
+		}
+
+		psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+		ui32TlPackets++;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUThreadProcessPackets: TL "
+	 		"Packets processed %03d, HWPerf packets %03d, sent %03d",
+	 		ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent));
+
+	PVR_DPF_RETURN;
+}
+
+
+static
+IMG_VOID RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+	PVRSRV_DATA*        psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
+	IMG_HANDLE			hUnusedByTL;
+	PVRSRV_ERROR        eError;
+	IMG_HANDLE          hStream;
+	IMG_PBYTE           pBuffer;
+	IMG_UINT32          ui32ReadLen;
+
+	PVR_DPF_ENTERED;
+
+	/* Command-complete notifiers can run concurrently. If this is
+	 * happening, just bail out and let the previous call finish.
+	 * This is ok because we can process the queued packets on the next call.
+	 */
+	if (!(OSTryLockAcquire(hFTraceLock)))
+	{
+		PVR_DPF_RETURN;
+	}
+
+	/* Exit if no HWPerf enabled device exits */
+	PVR_ASSERT(psDeviceInfo != IMG_NULL &&
+	           psPVRSRVData != IMG_NULL &&
+	           gpsRgxDevInfo != NULL);
+
+
+	hUnusedByTL = psDeviceInfo->hGPUTraceTLConnection;
+	hStream = psDeviceInfo->hGPUTraceTLStream;
+
+	if (hStream)
+	{
+		/* If we have a valid stream attempt to acquire some data */
+		eError = TLClientAcquireData(hUnusedByTL, hStream, &pBuffer, &ui32ReadLen);
+		if (eError == PVRSRV_OK)
+		{
+			/* Process the HWPerf packets and release the data */
+			if (ui32ReadLen > 0)
+			{
+				PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUThread: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+
+				/* Process the transport layer data for HWPerf packets... */
+				RGXHWPerfFTraceGPUThreadProcessPackets (psDeviceInfo, pBuffer, ui32ReadLen);
+
+				eError = TLClientReleaseData(hUnusedByTL, hStream);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_LOG_ERROR(eError, "TLClientReleaseData");
+
+					/* Serious error, disable FTrace GPU events */
+
+					/* Release TraceLock so we always have the locking
+					 * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
+					OSLockRelease(hFTraceLock);
+					OSAcquireBridgeLock();
+					RGXHWPerfFTraceGPUDisable(IMG_FALSE);
+					OSReleaseBridgeLock();
+					goto out;
+
+				}
+			} /* else no data, ignore */
+		}
+		else if (eError != PVRSRV_ERROR_TIMEOUT)
+		{
+			PVR_LOG_ERROR(eError, "TLClientAcquireData");
+		}
+	}
+
+	OSLockRelease(hFTraceLock);
+out:
+	PVR_DPF_RETURN;
+}
+
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_DPF_ENTERED;
+
+    /* Must be setup already by the general HWPerf module initialisation.
+	 * DevInfo object needed by FTrace event generation code */
+	PVR_ASSERT(gpsRgxDevInfo);
+	gpsRgxDevInfo->bFTraceGPUEventsEnabled = IMG_FALSE;
+
+	eError = OSLockCreate(&hFTraceLock, LOCK_TYPE_DISPATCH);
+
+	PVR_DPF_RETURN_RC(eError);
+}
+
+
+IMG_VOID RGXHWPerfFTraceGPUDeInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVR_DPF_ENTERED;
+
+	if (gpsRgxDevInfo->bFTraceGPUEventsEnabled)
+	{
+		RGXHWPerfFTraceGPUDisable(IMG_TRUE);
+		gpsRgxDevInfo->bFTraceGPUEventsEnabled = IMG_FALSE;
+	}
+
+	OSLockDestroy(hFTraceLock);
+
+	PVR_DPF_RETURN;
+}
+
+
+#endif /* SUPPORT_GPUTRACE_EVENTS */
+
+
+/******************************************************************************
+ * SUPPORT_KERNEL_HWPERF
+ *
+ * Currently only implemented on Linux. Feature can be enabled on Linux builds
+ * to provide an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+#if defined(SUPPORT_KERNEL_HWPERF)
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+	PVRSRV_DEVICE_NODE* psRgxDevNode;
+
+	/* TL Connect/disconnect state */
+	IMG_HANDLE          hTLConnection;
+
+	/* TL Open/close state */
+	IMG_HANDLE          hSD;
+
+	/* TL Acquire/release state */
+	IMG_PBYTE			pHwpBuf;
+	IMG_UINT32          ui32HwpBufLen;
+
+} RGX_KM_HWPERF_DEVDATA;
+
+
+PVRSRV_ERROR RGXHWPerfConnect(
+		IMG_HANDLE* phDevData)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData;
+
+	/* Valid input argument values supplied by the caller */
+	if (!phDevData)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Clear the handle to aid error checking by caller */
+	*phDevData = IMG_NULL;
+
+	/* Check the HWPerf module is initialised before we allow a connection */
+	if (!gpsRgxDevNode || !gpsRgxDevInfo)
+	{
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	/* In the case where the AppHint has not been set we need to
+	 * initialise the host driver HWPerf resources here. Allocated on
+	 * demand to reduce RAM foot print on systems not needing HWPerf.
+	 */
+	if (gpsRgxDevInfo->hHWPerfStream == IMG_NULL)
+	{
+		eError = RGXHWPerfInit(IMG_NULL, IMG_TRUE);
+		PVR_LOGR_IF_ERROR(eError, "RGXHWPerfInit");
+	}
+
+	/* Allocation the session object for this connection */
+	psDevData = OSAllocZMem(sizeof(*psDevData));
+	if (psDevData == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	psDevData->psRgxDevNode = gpsRgxDevNode;
+
+
+	/* Open a TL connection and store it in the session object */
+	eError = TLClientConnect(&psDevData->hTLConnection);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	/* Open the 'hwperf' TL stream for reading in this session */
+	eError = TLClientOpenStream(psDevData->hTLConnection,
+			                    HWPERF_TL_STREAM_NAME,
+			                    PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+			                    &psDevData->hSD);
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	*phDevData = psDevData;
+	return PVRSRV_OK;
+
+	/* Error path... */
+e2:
+	TLClientDisconnect(psDevData->hTLConnection);
+e1:
+	OSFREEMEM(psDevData);
+// e0:
+	return eError;
+}
+
+
+
+PVRSRV_ERROR RGXHWPerfControl(
+		IMG_HANDLE  hDevData,
+		IMG_BOOL    bToggle,
+		IMG_UINT64  ui64Mask)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Ensure we are initialised and have a valid device node */
+	if (!psDevData->psRgxDevNode)
+	{
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	/* Call the internal server API */
+	eError = PVRSRVRGXCtrlHWPerfKM(psDevData->psRgxDevNode, bToggle, ui64Mask);
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCounters(
+		IMG_HANDLE                 hDevData,
+		IMG_UINT32                 ui32NumBlocks,
+		RGX_HWPERF_CONFIG_CNTBLK*  asBlockConfigs)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData || ui32NumBlocks==0 || !asBlockConfigs)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Ensure we are initialised and have a valid device node */
+	if (!psDevData->psRgxDevNode)
+	{
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	/* Call the internal server API */
+	eError = PVRSRVRGXConfigEnableHWPerfCountersKM(
+			psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs);
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+		IMG_HANDLE   hDevData,
+		IMG_UINT32   ui32NumBlocks,
+		IMG_UINT16*   aeBlockIDs)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData || ui32NumBlocks==0 || !aeBlockIDs)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+	/* Ensure we are initialised and have a valid device node */
+	if (!psDevData->psRgxDevNode)
+	{
+		return PVRSRV_ERROR_INVALID_DEVICE;
+	}
+
+	/* Call the internal server API */
+	eError = PVRSRVRGXCtrlHWPerfCountersKM(
+			psDevData->psRgxDevNode, IMG_FALSE, ui32NumBlocks, aeBlockIDs);
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfAcquireData(
+		IMG_HANDLE  hDevData,
+		IMG_PBYTE*  ppBuf,
+		IMG_UINT32* pui32BufLen)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+	IMG_PBYTE              pTlBuf = IMG_NULL;
+	IMG_UINT32             ui32TlBufLen = 0;
+	IMG_PBYTE              pDataDest;
+	IMG_UINT32			ui32TlPackets = 0;
+	IMG_PBYTE			pBufferEnd;
+	PVRSRVTL_PPACKETHDR psHDRptr;
+	PVRSRVTL_PACKETTYPE ui16TlType;
+
+	/* Reset the output arguments in case we discover an error */
+	*ppBuf = IMG_NULL;
+	*pui32BufLen = 0;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Acquire some data to read from the HWPerf TL stream */
+	eError = TLClientAcquireData(psDevData->hTLConnection,
+	                             psDevData->hSD,
+	                             &pTlBuf,
+	                             &ui32TlBufLen);
+	PVR_LOGR_IF_ERROR(eError, "TLClientAcquireData");
+
+	/* TL indicates no data exists so return OK and zero. */
+	if ((pTlBuf == IMG_NULL) || (ui32TlBufLen == 0))
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Is the client buffer allocated and too small? */
+	if (psDevData->pHwpBuf && (psDevData->ui32HwpBufLen < ui32TlBufLen))
+	{
+		OSFREEMEM(psDevData->pHwpBuf);
+	}
+
+	/* Do we need to allocate a new client buffer? */
+	if (!psDevData->pHwpBuf)
+	{
+		psDevData->pHwpBuf = OSAllocMem(ui32TlBufLen);
+		if (psDevData->pHwpBuf  == IMG_NULL)
+		{
+			(void) TLClientReleaseData(psDevData->hTLConnection, psDevData->hSD);
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+		psDevData->ui32HwpBufLen = ui32TlBufLen;
+	}
+
+	/* Process each TL packet in the data buffer we have acquired */
+	pBufferEnd = pTlBuf+ui32TlBufLen;
+	pDataDest = psDevData->pHwpBuf;
+	psHDRptr = GET_PACKET_HDR(pTlBuf);
+	while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+	{
+		ui16TlType = GET_PACKET_TYPE(psHDRptr);
+		if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+		{
+			IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+			if (0 == ui16DataLen)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireData: ZERO Data in TL data packet: %p", psHDRptr));
+			}
+			else
+			{
+				/* For valid data copy it into the client buffer and move
+				 * the write position on */
+				OSMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen);
+				pDataDest += ui16DataLen;
+			}
+		}
+		else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+		{
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireData: Indication that the transport buffer was full"));
+		}
+		else
+		{
+			/* else Ignore padding packet type and others */
+			PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireData: Ignoring TL packet, type %d", ui16TlType ));
+		}
+
+		/* Update loop variable to the next packet and increment counts */
+		psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+		ui32TlPackets++;
+	}
+
+	PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireData: TL Packets processed %03d", ui32TlPackets));
+
+	/* Update output arguments with client buffer details and true length */
+	*ppBuf = psDevData->pHwpBuf;
+	*pui32BufLen = pDataDest - psDevData->pHwpBuf;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfReleaseData(
+		IMG_HANDLE hDevData)
+{
+	PVRSRV_ERROR           eError;
+	RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	/* Valid input argument values supplied by the caller */
+	if (!psDevData)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Free the client buffer if allocated and reset length */
+	if (psDevData->pHwpBuf)
+	{
+		OSFREEMEM(psDevData->pHwpBuf);
+	}
+	psDevData->ui32HwpBufLen = 0;
+
+	/* Inform the TL that we are done with reading the data. Could perform this
+	 * in the acquire call but felt it worth keeping it symmetrical */
+	eError = TLClientReleaseData(psDevData->hTLConnection, psDevData->hSD);
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisconnect(
+		IMG_HANDLE hDevData)
+{
+	PVRSRV_ERROR           eError = PVRSRV_OK;
+	RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+	/* Check session handle is not zero */
+	if (!psDevData)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* If the client buffer exists they have not called ReleaseData
+	 * before disconnecting so clean it up */
+	if (psDevData->pHwpBuf)
+	{
+		eError = RGXHWPerfReleaseData(hDevData);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfDisconnect: Failed to release data (%d)", eError));
+		}
+		/* RGXHWPerfReleaseData call above will null out the buffer 
+		 * fields and length */
+	}
+
+	/* Close the TL stream, ignore the error if it occurs as we
+	 * are disconnecting */
+	if (psDevData->hSD)
+	{
+		eError = TLClientCloseStream(psDevData->hTLConnection,
+									 psDevData->hSD);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfDisconnect: Failed to close handle on HWPerf stream (%d)", eError));
+		}
+		psDevData->hSD = IMG_NULL;
+	}
+
+	/* End the TL connection as we don't require it anymore */
+	if (psDevData->hTLConnection)
+	{
+		eError = TLClientDisconnect(psDevData->hTLConnection);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfDisconnect: Failed to disconnect from the Transport (%d)", eError));
+		}
+		psDevData->hTLConnection = IMG_NULL;
+	}
+
+	/* Free the session memory */
+	psDevData->psRgxDevNode = IMG_NULL;
+	OSFREEMEM(psDevData);
+	return eError;
+}
+
+
+#endif /* SUPPORT_KERNEL_HWPERF */
+
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxhwperf.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxhwperf.h
new file mode 100644
index 0000000..5383988
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxhwperf.h
@@ -0,0 +1,114 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX HW Performance header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX HWPerf functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+  
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_hwperf_km.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Data Transport Routines
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo);
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_DEVICE_NODE *psRgxDevInfo, IMG_BOOL bEnable);
+IMG_VOID RGXHWPerfDeinit(void);
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s)
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+		PVRSRV_DEVICE_NODE*	psDeviceNode,
+		IMG_BOOL			bToggle,
+		IMG_UINT64 			ui64Mask);
+
+
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+		PVRSRV_DEVICE_NODE* 		psDeviceNode,
+		IMG_UINT32 					ui32ArrayLen,
+		RGX_HWPERF_CONFIG_CNTBLK* 	psBlockConfigs);
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+		PVRSRV_DEVICE_NODE*		psDeviceNode,
+		IMG_BOOL			bEnable,
+	    IMG_UINT32 			ui32ArrayLen,
+	    IMG_UINT16*			psBlockIDs);
+
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+		PVRSRV_DEVICE_NODE*     psDeviceNode,
+		IMG_UINT16              ui16CustomBlockID,
+		IMG_UINT16              ui16NumCustomCounters,
+		IMG_UINT32*             pui32CustomCounterIDs);
+
+/******************************************************************************
+ * RGX HW Performance To FTrace Profiling API(s)
+ *****************************************************************************/
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInit(PVRSRV_RGXDEV_INFO *psDevInfo);
+IMG_VOID RGXHWPerfFTraceGPUDeInit(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+IMG_VOID RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+		IMG_UINT32 ui32ExternalJobRef, IMG_UINT32 ui32InternalJobRef,
+		const IMG_CHAR* pszJobType);
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(IMG_BOOL bNewValue);
+IMG_BOOL RGXHWPerfFTraceGPUEventsEnabled(IMG_VOID);
+
+IMG_VOID RGXHWPerfFTraceGPUThread(IMG_PVOID pvData);
+
+#endif
+
+
+#endif /* RGXHWPERF_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxinit.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxinit.c
new file mode 100644
index 0000000..94cb3e4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxinit.c
@@ -0,0 +1,3159 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "pvrsrv.h"
+#include "rgxheapconfig.h"
+#include "rgxpower.h"
+
+#include "rgxinit.h"
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+
+#include "rgx_options_km.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "debug_request_ids.h"
+#include "rgxtimecorr.h"
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32  pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64  ui64ResetValue1, IMG_UINT64  ui64ResetValue2);
+
+#define RGX_MMU_LOG2_PAGE_SIZE_4KB   (12)
+#define RGX_MMU_LOG2_PAGE_SIZE_16KB  (14)
+#define RGX_MMU_LOG2_PAGE_SIZE_64KB  (16)
+#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18)
+#define RGX_MMU_LOG2_PAGE_SIZE_1MB   (20)
+#define RGX_MMU_LOG2_PAGE_SIZE_2MB   (21)
+
+#define RGX_MMU_PAGE_SIZE_4KB   (   4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB  (  16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB  (  64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB   (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB   (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+/* FIXME: This is a workaround due to having 2 inits but only 1 deinit */
+static IMG_BOOL g_bDevInit2Done = IMG_FALSE;
+
+
+static IMG_VOID RGX_DeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+IMG_UINT32 g_ui32HostSampleIRQCount = 0;
+
+#if !defined(NO_HARDWARE)
+
+/*
+	RGX LISR Handler
+*/
+static IMG_BOOL RGX_LISRHandler (IMG_VOID *pvData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_DEVICE_CONFIG *psDevConfig;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	IMG_UINT32 ui32IRQStatus;
+	IMG_BOOL bInterruptProcessed = IMG_FALSE;
+
+	psDeviceNode = pvData;
+	psDevConfig = psDeviceNode->psDevConfig;
+	psDevInfo = psDeviceNode->pvDevice;
+
+	if (psDevInfo->bIgnoreFurtherIRQs)
+	{
+		return IMG_FALSE;
+	}
+
+	ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVIRQSTATUS);
+
+	if (ui32IRQStatus & RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN)
+	{
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVIRQSTATUS, RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
+		
+#if defined(RGX_FEATURE_OCPBUS)
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+
+		if (psDevConfig->pfnInterruptHandled)
+		{
+			psDevConfig->pfnInterruptHandled(psDevConfig);
+		}
+
+		bInterruptProcessed = IMG_TRUE;
+		
+		/* Sample the current count from the FW _after_ we've cleared the interrupt. */
+		g_ui32HostSampleIRQCount = psDevInfo->psRGXFWIfTraceBuf->ui32InterruptCount;
+
+		OSScheduleMISR(psDevInfo->pvMISRData);
+
+		if (psDevInfo->pvAPMISRData != IMG_NULL)
+		{
+			OSScheduleMISR(psDevInfo->pvAPMISRData);
+		}
+	}
+	return bInterruptProcessed;
+}
+
+static IMG_VOID RGXCheckFWActivePowerState(IMG_VOID *psDevice)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = psDevice;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+		/* The FW is IDLE and therefore could be shut down */
+		eError = RGXActivePowerRequest(psDeviceNode);
+
+		if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+		{
+			PVR_DPF((PVR_DBG_WARNING,"RGXCheckFWActivePowerState: Failed RGXActivePowerRequest call (device index: %d) with %s", 
+						psDeviceNode->sDevId.ui32DeviceIndex,
+						PVRSRVGetErrorStringKM(eError)));
+			
+			PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, IMG_NULL);
+		}
+	}
+
+}
+
+
+PVRSRV_ERROR RGXRegisterGpuUtilStats(IMG_HANDLE *phGpuUtilUser)
+{
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+	psAggregateStats = OSAllocMem(sizeof(RGXFWIF_GPU_UTIL_STATS));
+	if(psAggregateStats == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psAggregateStats->ui64GpuStatActiveLow  = 0;
+	psAggregateStats->ui64GpuStatIdle       = 0;
+	psAggregateStats->ui64GpuStatActiveHigh = 0;
+	psAggregateStats->ui64GpuStatBlocked    = 0;
+
+	/* Not used */
+	psAggregateStats->bValid = IMG_FALSE;
+	psAggregateStats->ui64GpuStatCumulative = 0;
+
+	*phGpuUtilUser = psAggregateStats;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXUnregisterGpuUtilStats(IMG_HANDLE hGpuUtilUser)
+{
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+	if(hGpuUtilUser == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psAggregateStats = hGpuUtilUser;
+	OSFreeMem(psAggregateStats);
+
+	return PVRSRV_OK;
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_ACTIVE_LOW   RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW
+#define GPU_IDLE         RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE_HIGH  RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH
+#define GPU_BLOCKED      RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS   64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                       IMG_HANDLE hGpuUtilUser,
+                                       RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+	IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+	IMG_UINT64 ui64TimeNow;
+	IMG_UINT64 ui64LastPeriod;
+	IMG_UINT64 ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+	IMG_UINT32 i = 0;
+
+
+	/***** (1) Initialise return stats *****/
+
+	psReturnStats->bValid = IMG_FALSE;
+	psReturnStats->ui64GpuStatActiveLow  = 0;
+	psReturnStats->ui64GpuStatIdle       = 0;
+	psReturnStats->ui64GpuStatActiveHigh = 0;
+	psReturnStats->ui64GpuStatBlocked    = 0;
+	psReturnStats->ui64GpuStatCumulative = 0;
+
+	if (hGpuUtilUser == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	psAggregateStats = hGpuUtilUser;
+
+
+	/***** (2) Get latest data from shared area *****/
+
+	OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+	/* Read the timer before reading the latest stats from the shared
+	 * area, discard it later in case of state updates after this point.
+	 */
+	ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(OSClockns64());
+	OSMemoryBarrier();
+
+	/* Keep reading the counters until the values stabilise as the FW
+	 * might be updating them at the same time.
+	 */
+	while(((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+	       (aui64TmpCounters[ui64LastState] !=
+	        psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+	      (i < MAX_ITERATIONS))
+	{
+		ui64LastWord  = psUtilFWCb->ui64LastWord;
+		ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+		aui64TmpCounters[GPU_ACTIVE_LOW]  = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_LOW];
+		aui64TmpCounters[GPU_IDLE]        = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+		aui64TmpCounters[GPU_ACTIVE_HIGH] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_HIGH];
+		aui64TmpCounters[GPU_BLOCKED]     = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+		i++;
+	}
+
+#if defined(PVR_POWER_ACTOR) && defined(PVR_DVFS)
+	/* Power actor enabled */
+	psReturnStats->ui32GpuEnergy = psDevInfo->psRGXFWIfTraceBuf->ui32PowMonEnergy;
+#endif
+
+	OSLockRelease(psDevInfo->hGPUUtilLock);
+
+	if (i == MAX_ITERATIONS)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data within a short time."));
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+
+	/***** (3) Compute return stats and update aggregate stats *****/
+
+	/* Update temp counters to account for the time since the last update to the shared ones */
+	ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+	ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+	aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+	/* Get statistics for a user since its last request */
+	psReturnStats->ui64GpuStatActiveLow = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_LOW],
+	                                                                  psAggregateStats->ui64GpuStatActiveLow);
+	psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+	                                                             psAggregateStats->ui64GpuStatIdle);
+	psReturnStats->ui64GpuStatActiveHigh = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_HIGH],
+	                                                                   psAggregateStats->ui64GpuStatActiveHigh);
+	psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+	                                                                psAggregateStats->ui64GpuStatBlocked);
+	psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatActiveLow + psReturnStats->ui64GpuStatIdle +
+	                                       psReturnStats->ui64GpuStatActiveHigh + psReturnStats->ui64GpuStatBlocked;
+
+	/* Update aggregate stats for the current user */
+	psAggregateStats->ui64GpuStatActiveLow  += psReturnStats->ui64GpuStatActiveLow;
+	psAggregateStats->ui64GpuStatIdle       += psReturnStats->ui64GpuStatIdle;
+	psAggregateStats->ui64GpuStatActiveHigh += psReturnStats->ui64GpuStatActiveHigh;
+	psAggregateStats->ui64GpuStatBlocked    += psReturnStats->ui64GpuStatBlocked;
+
+	/* Check that the return stats make sense */
+	if(psReturnStats->ui64GpuStatCumulative == 0)
+	{
+		/* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+		 * returned 0, which means something has gone very wrong with the timers
+		 * or the aggregate stats...
+		 */
+		PVR_DPF((PVR_DBG_WARNING,"RGXGetGpuUtilStats could not get reliable data because"
+		                         "the timers or the aggregate stats are wrongly configured."));
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+
+	/***** (4) Convert return stats to microseconds *****/
+
+	psReturnStats->ui64GpuStatActiveLow  = OSDivide64(psReturnStats->ui64GpuStatActiveLow, 1000, &i);
+	psReturnStats->ui64GpuStatIdle       = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &i);
+	psReturnStats->ui64GpuStatActiveHigh = OSDivide64(psReturnStats->ui64GpuStatActiveHigh, 1000, &i);
+	psReturnStats->ui64GpuStatBlocked    = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &i);
+	psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &i);
+	psReturnStats->bValid                = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	RGX MISR Handler
+*/
+static IMG_VOID RGX_MISRHandler (IMG_VOID *pvData)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+
+	/* Give the HWPerf service a chance to transfer some data from the FW
+	 * buffer to the host driver transport layer buffer.
+	 */
+	RGXHWPerfDataStoreCB(psDeviceNode);
+
+	/* Inform other services devices that we have finished an operation */
+	PVRSRVCheckStatus(psDeviceNode);
+
+	/* Process all firmware CCBs for pending commands */
+	RGXCheckFirmwareCCBs(psDeviceNode->pvDevice);
+
+	/* Calibrate the GPU frequency and recorrelate Host and FW timers (done every few seconds) */
+	RGXGPUFreqCalibrateCorrelatePeriodic(psDeviceNode);
+}
+#endif
+
+
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE	*psDeviceNode, IMG_UINT32 ui32NumElements, IMG_UINT32 aui32Elements[])
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+	IMG_UINT32	ui32OS, ui32Region, ui32Counter=0;
+	IMG_UINT32	aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS];
+	IMG_UINT32	aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS];
+
+	PVR_UNREFERENCED_PARAMETER(ui32NumElements);
+
+	for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++)
+	{
+		for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++)
+		{
+			aui32OSidMin[ui32OS][ui32Region] = aui32Elements[ui32Counter++];
+			aui32OSidMax[ui32OS][ui32Region] = aui32Elements[ui32Counter++];
+
+			PVR_DPF((PVR_DBG_MESSAGE,"OS=%u, Region=%u, Min=%u, Max=%u", ui32OS, ui32Region, aui32OSidMin[ui32OS][ui32Region], aui32OSidMax[ui32OS][ui32Region]));
+		}
+	}
+
+	PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax);
+}
+#else
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32NumElements);
+	PVR_UNREFERENCED_PARAMETER(aui32Elements);
+}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXInitDevPart2KM
+ */ 
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  RGX_INIT_COMMAND		*psInitScript,
+									  RGX_INIT_COMMAND		*psDbgScript,
+									  RGX_INIT_COMMAND		*psDbgBusScript,
+									  RGX_INIT_COMMAND		*psDeinitScript,
+									  IMG_UINT32			ui32KernelCatBaseIdReg,
+									  IMG_UINT32			ui32KernelCatBaseId,
+									  IMG_UINT32			ui32KernelCatBaseReg,
+									  IMG_UINT32			ui32KernelCatBaseWordSize,
+									  IMG_UINT32			ui32KernelCatBaseAlignShift,
+									  IMG_UINT32			ui32KernelCatBaseShift,
+									  IMG_UINT64			ui64KernelCatBaseMask,
+									  IMG_UINT32			ui32DeviceFlags,
+									  RGX_ACTIVEPM_CONF		eActivePMConf,
+								 	  DEVMEM_EXPORTCOOKIE	*psFWCodeAllocServerExportCookie,
+								 	  DEVMEM_EXPORTCOOKIE	*psFWDataAllocServerExportCookie,
+								 	  DEVMEM_EXPORTCOOKIE	*psFWCorememAllocServerExportCookie,
+								 	  DEVMEM_EXPORTCOOKIE	*psHWPerfDataAllocServerExportCookie)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO		*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_DEV_POWER_STATE	eDefaultPowerState;
+	PVRSRV_DEVICE_CONFIG	*psDevConfig = psDeviceNode->psDevConfig;
+
+	PDUMPCOMMENT("RGX Initialisation Part 2");
+
+	psDevInfo->ui32KernelCatBaseIdReg = ui32KernelCatBaseIdReg;
+	psDevInfo->ui32KernelCatBaseId = ui32KernelCatBaseId;
+	psDevInfo->ui32KernelCatBaseReg = ui32KernelCatBaseReg;
+	psDevInfo->ui32KernelCatBaseAlignShift = ui32KernelCatBaseAlignShift;
+	psDevInfo->ui32KernelCatBaseShift = ui32KernelCatBaseShift;
+	psDevInfo->ui32KernelCatBaseWordSize = ui32KernelCatBaseWordSize;
+	psDevInfo->ui64KernelCatBaseMask = ui64KernelCatBaseMask;
+
+	/*
+	 * Map RGX Registers
+	 */
+#if !defined(NO_HARDWARE)
+	psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psDevConfig->sRegsCpuPBase,
+										     psDevConfig->ui32RegsSize,
+										     0);
+
+	if (psDevInfo->pvRegsBaseKM == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: Failed to create RGX register mapping"));
+		return PVRSRV_ERROR_BAD_MAPPING;
+	}
+#else
+	psDevInfo->pvRegsBaseKM = IMG_NULL;
+#endif /* !NO_HARDWARE */
+
+	/* free the export cookies provided to srvinit */
+	DevmemUnexport(psDevInfo->psRGXFWCodeMemDesc, psFWCodeAllocServerExportCookie);
+	DevmemUnexport(psDevInfo->psRGXFWDataMemDesc, psFWDataAllocServerExportCookie);
+	if (DevmemIsValidExportCookie(psFWCorememAllocServerExportCookie))
+	{
+		DevmemUnexport(psDevInfo->psRGXFWCorememMemDesc, psFWCorememAllocServerExportCookie);
+	}
+	DevmemUnexport(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfDataAllocServerExportCookie);
+	/*
+	 * Copy scripts
+	 */
+	OSMemCopy(psDevInfo->psScripts->asInitCommands, psInitScript,
+			  RGX_MAX_INIT_COMMANDS * sizeof(*psInitScript));
+
+	OSMemCopy(psDevInfo->psScripts->asDbgCommands, psDbgScript,
+			  RGX_MAX_DEBUG_COMMANDS * sizeof(*psDbgScript));
+
+	OSMemCopy(psDevInfo->psScripts->asDbgBusCommands, psDbgBusScript,
+			  RGX_MAX_DBGBUS_COMMANDS * sizeof(*psDbgBusScript));
+
+	OSMemCopy(psDevInfo->psScripts->asDeinitCommands, psDeinitScript,
+			  RGX_MAX_DEINIT_COMMANDS * sizeof(*psDeinitScript));
+
+#if defined(PDUMP)
+	/* Run the deinit script to feed the last-frame deinit buffer */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation script");
+	RGXRunScript(psDevInfo, psDevInfo->psScripts->asDeinitCommands, RGX_MAX_DEINIT_COMMANDS, PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW, IMG_NULL);
+#endif
+
+
+	psDevInfo->ui32RegSize = psDevConfig->ui32RegsSize;
+	psDevInfo->sRegsPhysBase = psDevConfig->sRegsCpuPBase;
+
+	/* Initialise Device Flags */
+	psDevInfo->ui32DeviceFlags = 0;
+	if (ui32DeviceFlags & RGXKMIF_DEVICE_STATE_ZERO_FREELIST)
+	{
+		psDevInfo->ui32DeviceFlags |= RGXKM_DEVICE_STATE_ZERO_FREELIST;
+	}
+
+	if (ui32DeviceFlags & RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN)
+	{
+		psDevInfo->ui32DeviceFlags |= RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN;
+	}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	/* If built, always setup FTrace consumer thread. */
+	RGXHWPerfFTraceGPUInit(psDeviceNode->pvDevice);
+
+	RGXHWPerfFTraceGPUEventsEnabledSet((ui32DeviceFlags & RGXKMIF_DEVICE_STATE_FTRACE_EN) ? IMG_TRUE: IMG_FALSE);
+#endif
+
+	/* Initialise lists of ZSBuffers */
+	eError = OSLockCreate(&psDevInfo->hLockZSBuffer,LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sZSBufferHead);
+	psDevInfo->ui32ZSBufferCurrID = 1;
+
+	/* Initialise lists of growable Freelists */
+	eError = OSLockCreate(&psDevInfo->hLockFreeList,LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+	dllist_init(&psDevInfo->sFreeListHead);
+	psDevInfo->ui32FreelistCurrID = 1;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock, LOCK_TYPE_PASSIVE);
+
+	if(eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	/* Allocate DVFS Table */
+	psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+	if (psDevInfo->psGpuDVFSTable == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Reset DVFS Table */
+	psDevInfo->psGpuDVFSTable->ui32CurrentDVFSId = 0;
+	psDevInfo->psGpuDVFSTable->aui32DVFSClock[0] = 0;
+
+	/* Setup GPU utilisation stats update callback */
+#if !defined(NO_HARDWARE)
+	psDevInfo->pfnRegisterGpuUtilStats = RGXRegisterGpuUtilStats;
+	psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+	psDevInfo->pfnUnregisterGpuUtilStats = RGXUnregisterGpuUtilStats;
+#endif
+
+	eError = OSLockCreate(&psDevInfo->hGPUUtilLock, LOCK_TYPE_PASSIVE);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+
+	eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+
+	/* set-up the Active Power Mgmt callback */
+#if !defined(NO_HARDWARE)
+	{
+		RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+		IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+		IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+							   (eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+
+		if (bEnableAPM)
+		{
+			eError = OSInstallMISR(&psDevInfo->pvAPMISRData, RGXCheckFWActivePowerState, psDeviceNode);
+			if (eError != PVRSRV_OK)
+			{
+				return eError;
+			}
+
+			/* Prevent the device being woken up before there is something to do. */
+			eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+		}
+	}
+#endif
+
+	/* Register the device with the power manager. */
+	eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
+										&RGXPrePowerState, &RGXPostPowerState,
+										psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+										&RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+										&RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+										&RGXDustCountChange,
+										(IMG_HANDLE)psDeviceNode,
+										PVRSRV_DEV_POWER_STATE_OFF,
+										eDefaultPowerState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to register device with power manager"));
+		return eError;
+	}
+
+#if !defined(NO_HARDWARE)
+	eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != IMG_NULL)
+		{
+			(IMG_VOID) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		return eError;
+	}
+
+	/* Register the interrupt handlers */
+	eError = OSInstallMISR(&psDevInfo->pvMISRData,
+									RGX_MISRHandler, psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != IMG_NULL)
+		{
+			(IMG_VOID) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		(IMG_VOID) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		return eError;
+	}
+
+	eError = OSInstallDeviceLISR(psDevConfig, &psDevInfo->pvLISRData,
+								 RGX_LISRHandler, psDeviceNode);
+	if (eError != PVRSRV_OK)
+	{
+		if (psDevInfo->pvAPMISRData != IMG_NULL)
+		{
+			(IMG_VOID) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+		(IMG_VOID) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		(IMG_VOID) OSUninstallMISR(psDevInfo->pvMISRData);
+		return eError;
+	}
+
+#endif
+	g_bDevInit2Done = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
+ 
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+
+	PVRSRV_ERROR			eError;
+	RGXFWIF_KCCB_CMD		sKccbCmd;
+
+	/* Fill in the command structure with the parameters needed
+	 */
+	sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT;
+
+	eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sKccbCmd,
+											sizeof(sKccbCmd),
+											IMG_TRUE);
+
+	return PVRSRV_OK;
+
+}
+
+static
+PVRSRV_ERROR RGXAllocateFWCodeRegion(PVRSRV_DEVICE_NODE	*psDeviceNode,
+                                     IMG_DEVMEM_SIZE_T ui32FWCodeAllocSize,
+                                     IMG_UINT32 uiMemAllocFlags)
+{
+ 	PVRSRV_ERROR eError;
+
+#if ! defined(TDMETACODE)
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+
+	uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+	                   PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate and export code memory for fw");
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										ui32FWCodeAllocSize,
+										uiMemAllocFlags,
+										"FirmwareCodeRegion",
+	                                    &psDevInfo->psRGXFWCodeMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevmemFwAllocateExportable failed (%u)",
+				eError));
+	}
+
+	return eError;
+
+#else
+	PMR *psTDMetaCodePMR;
+	IMG_DEVMEM_SIZE_T uiMemDescSize;
+	IMG_DEV_VIRTADDR sTmpDevVAddr;
+	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+	PDUMPCOMMENT("Allocate TD META code memory for fw");
+
+	eError = PhysmemNewTDMetaCodePMR(psDeviceNode,
+	                                 ui32FWCodeAllocSize,
+	                                 12,
+	                                 uiMemAllocFlags,
+	                                 &psTDMetaCodePMR);
+	if(eError != PVRSRV_OK)
+	{
+		goto PMRCreateError;
+	}
+
+	PDUMPCOMMENT("Import TD META code memory for fw");
+
+	/* NB: psTDMetaCodePMR refcount: 1 -> 2 */
+	eError = DevmemLocalImport(IMG_NULL, /* bridge handle not applicable here */
+	                           psTDMetaCodePMR,
+	                           uiMemAllocFlags,
+	                           &psDevInfo->psRGXFWCodeMemDesc,
+	                           &uiMemDescSize);
+	if(eError != PVRSRV_OK)
+	{
+		goto ImportError;
+	}
+
+	eError = DevmemMapToDevice(psDevInfo->psRGXFWCodeMemDesc,
+							   psDevInfo->psFirmwareHeap,
+							   &sTmpDevVAddr);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to map TD META code PMR (%u)", eError));
+		goto MapError;
+	}
+
+	/* Caution, oddball code follows:
+	   When doing the DevmemLocalImport above, we wrap the PMR in a memdesc and increment
+	   the PMR's refcount. We would like to implicitly say now, that memdesc is our
+	   tracking mechanism for the PMR, and no longer the original pointer to it. The call
+	   to PMRUnimportPMR below does that. For reasons explained below, this is only done
+	   if this function will return successfully.
+
+	   NB: i.e., psTDMetaCodePMR refcount: 2 -> 1
+	*/
+	PMRUnimportPMR(psTDMetaCodePMR);
+
+	return eError;
+
+MapError:
+	DevmemFree(psDevInfo->psRGXFWCodeMemDesc);
+
+ImportError:
+	/* This is done even after the DevmemFree above because as a result of the PMRUnimportPMR
+	   at the end of the function never getting hit on an error condition, the PMR must be
+	   unreferenced "again" as part of the cleanup */
+	PMRUnimportPMR(psTDMetaCodePMR);
+
+PMRCreateError:
+
+	return eError;
+#endif
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+										  IMG_DEVMEM_SIZE_T 	uiFWCodeLen,
+									 	  IMG_DEVMEM_SIZE_T 	uiFWDataLen,
+									 	  IMG_DEVMEM_SIZE_T 	uiFWCorememLen,
+									 	  DEVMEM_EXPORTCOOKIE	**ppsFWCodeAllocServerExportCookie,
+									 	  IMG_DEV_VIRTADDR		*psFWCodeDevVAddrBase,
+									 	  DEVMEM_EXPORTCOOKIE	**ppsFWDataAllocServerExportCookie,
+									 	  IMG_DEV_VIRTADDR		*psFWDataDevVAddrBase,
+									 	  DEVMEM_EXPORTCOOKIE	**ppsFWCorememAllocServerExportCookie,
+									 	  IMG_DEV_VIRTADDR		*psFWCorememDevVAddrBase,
+										  RGXFWIF_DEV_VIRTADDR	*psFWCorememMetaVAddrBase)
+{
+	DEVMEM_FLAGS_T		uiMemAllocFlags;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR        eError;
+
+	/* set up memory contexts */
+
+	/* Register callbacks for creation of device memory contexts */
+	psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+	psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+	/* Create the memory context for the firmware. */
+	eError = DevmemCreateContext(IMG_NULL, psDeviceNode,
+								 DEVMEM_HEAPCFG_META,
+								 &psDevInfo->psKernelDevmemCtx);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitAllocFWImgMemKM: Failed DevmemCreateContext (%u)", eError));
+		goto failed_to_create_ctx;
+	}
+	
+	eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx,
+								  "Firmware", /* FIXME: We need to create an IDENT macro for this string.
+								                 Make sure the IDENT macro is not accessible to userland */
+								  &psDevInfo->psFirmwareHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitAllocFWImgMemKM: Failed DevmemFindHeapByName (%u)", eError));
+		goto failed_to_find_heap;
+	}
+
+	/* 
+	 * Set up Allocation for FW code section 
+	 */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+	                  PVRSRV_MEMALLOCFLAG_GPU_READABLE | 
+	                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+	                  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+
+	eError = RGXAllocateFWCodeRegion(psDeviceNode,
+                                     uiFWCodeLen,
+	                                 uiMemAllocFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescAlloc;
+	}
+
+	eError = DevmemExport(psDevInfo->psRGXFWCodeMemDesc,
+	                      &psDevInfo->sRGXFWCodeExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to export fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescExport;
+	}
+
+	eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+	                                  psFWCodeDevVAddrBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw code mem (%u)",
+				eError));
+		goto failFWCodeMemDescAqDevVirt;
+	}
+
+	/*
+	* The FW code must be the first allocation in the firmware heap, otherwise
+	* the bootloader will not work (META will not be able to find the bootloader).
+	*/
+	PVR_ASSERT(psFWCodeDevVAddrBase->uiAddr == RGX_FIRMWARE_HEAP_BASE);
+
+	/* 
+	 * Set up Allocation for FW data section 
+	 */
+	uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+	                  PVRSRV_MEMALLOCFLAG_GPU_READABLE | 
+	                  PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+	                  PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+	                  PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+	                  PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+	                  PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+	PDUMPCOMMENT("Allocate and export data memory for fw");
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										uiFWDataLen,
+										uiMemAllocFlags,
+										"FirmwareDataRegion",
+	                                    &psDevInfo->psRGXFWDataMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescAlloc;
+	}
+
+	eError = DevmemExport(psDevInfo->psRGXFWDataMemDesc,
+	                      &psDevInfo->sRGXFWDataExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to export fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescExport;
+	}
+
+	eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+	                                  psFWDataDevVAddrBase);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw data mem (%u)",
+				eError));
+		goto failFWDataMemDescAqDevVirt;
+	}
+
+	if (uiFWCorememLen != 0)
+	{
+		/* 
+		 * Set up Allocation for FW coremem section 
+		 */
+		uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+		                  PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+			PVRSRV_MEMALLOCFLAG_GPU_READABLE | 
+			PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+			PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+			PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+			PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+			PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+		PDUMPCOMMENT("Allocate and export coremem memory for fw");
+
+		eError = DevmemFwAllocateExportable(psDeviceNode,
+				uiFWCorememLen,
+				uiMemAllocFlags,
+				"FirmwareCorememRegion",
+				&psDevInfo->psRGXFWCorememMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw coremem mem, size: %lld, flags: %x (%u)",
+						uiFWCorememLen, uiMemAllocFlags, eError));
+			goto failFWCorememMemDescAlloc;
+		}
+
+		eError = DevmemExport(psDevInfo->psRGXFWCorememMemDesc,
+				&psDevInfo->sRGXFWCorememExportCookie);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to export fw coremem mem (%u)",
+						eError));
+			goto failFWCorememMemDescExport;
+		}
+
+		eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc,
+				psFWCorememDevVAddrBase);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw coremem mem (%u)",
+						eError));
+			goto failFWCorememMemDescAqDevVirt;
+		}
+
+		RGXSetFirmwareAddress(psFWCorememMetaVAddrBase,
+				psDevInfo->psRGXFWCorememMemDesc,
+				0, RFW_FWADDR_NOREF_FLAG);
+
+#if defined(HW_ERN_45914)
+		/* temporarily make sure the coremem is init using the SLC */
+		psFWCorememMetaVAddrBase->ui32Addr &= ~RGXFW_SEGMMU_DMAP_ADDR_START;
+		psFWCorememMetaVAddrBase->ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+#endif
+
+	}
+
+	*ppsFWCodeAllocServerExportCookie = &psDevInfo->sRGXFWCodeExportCookie;
+	*ppsFWDataAllocServerExportCookie = &psDevInfo->sRGXFWDataExportCookie;
+	/* Set all output arguments to ensure safe use in Part2 initialisation */
+	*ppsFWCorememAllocServerExportCookie = &psDevInfo->sRGXFWCorememExportCookie;
+
+	return PVRSRV_OK;
+
+
+failFWCorememMemDescAqDevVirt:
+
+	if (uiFWCorememLen != 0)
+	{
+		DevmemUnexport(psDevInfo->psRGXFWCorememMemDesc, &psDevInfo->sRGXFWCorememExportCookie);
+	}
+failFWCorememMemDescExport:
+
+	if (uiFWCorememLen != 0)
+	{
+		DevmemFwFree(psDevInfo->psRGXFWCorememMemDesc);
+		psDevInfo->psRGXFWCorememMemDesc = IMG_NULL;
+	}
+failFWCorememMemDescAlloc:
+
+	DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+failFWDataMemDescAqDevVirt:
+
+	DevmemUnexport(psDevInfo->psRGXFWDataMemDesc, &psDevInfo->sRGXFWDataExportCookie);
+failFWDataMemDescExport:
+
+	DevmemFwFree(psDevInfo->psRGXFWDataMemDesc);
+	psDevInfo->psRGXFWDataMemDesc = IMG_NULL;
+failFWDataMemDescAlloc:
+
+	DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+failFWCodeMemDescAqDevVirt:
+
+	DevmemUnexport(psDevInfo->psRGXFWCodeMemDesc, &psDevInfo->sRGXFWCodeExportCookie);
+failFWCodeMemDescExport:
+
+	DevmemFwFree(psDevInfo->psRGXFWCodeMemDesc);
+	psDevInfo->psRGXFWCodeMemDesc = IMG_NULL;
+failFWCodeMemDescAlloc:
+
+failed_to_find_heap:
+	/*
+	 * Clear the mem context create callbacks before destroying the RGX firmware
+	 * context to avoid a spurious callback.
+	 */
+	psDeviceNode->pfnRegisterMemoryContext = IMG_NULL;
+	psDeviceNode->pfnUnregisterMemoryContext = IMG_NULL;
+	DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+	psDevInfo->psKernelDevmemCtx = IMG_NULL;
+failed_to_create_ctx:
+
+	return eError;
+}
+
+/*
+ * PVRSRVRGXInitFirmwareKM
+ */ 
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitFirmwareKM(PVRSRV_DEVICE_NODE			*psDeviceNode, 
+									    RGXFWIF_DEV_VIRTADDR		*psRGXFwInit,
+									    IMG_BOOL					bEnableSignatureChecks,
+									    IMG_UINT32					ui32SignatureChecksBufSize,
+									    IMG_UINT32					ui32HWPerfFWBufSizeKB,
+									    IMG_UINT64					ui64HWPerfFilter,
+									    IMG_UINT32					ui32RGXFWAlignChecksSize,
+									    IMG_UINT32					*pui32RGXFWAlignChecks,
+									    IMG_UINT32					ui32ConfigFlags,
+									    IMG_UINT32					ui32LogType,
+									    IMG_UINT32					ui32FilterFlags,
+									    IMG_UINT32					ui32JonesDisableMask,
+									    IMG_UINT32					ui32HWRDebugDumpLimit,
+									    RGXFWIF_COMPCHECKS_BVNC     *psClientBVNC,
+										IMG_UINT32					ui32HWPerfCountersDataSize,
+										DEVMEM_EXPORTCOOKIE	**ppsHWPerfDataAllocServerExportCookie,
+									    RGX_RD_POWER_ISLAND_CONF			eRGXRDPowerIslandingConf)
+{
+	PVRSRV_ERROR				eError;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+	IMG_UINT32 ui32NumBIFTilingConfigs, *pui32BIFTilingXStrides, i;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+
+	/* Check if BVNC numbers of client and driver are compatible */
+	rgx_bvnc_packed(&sBVNC.ui32BNC, sBVNC.aszV, sBVNC.ui32VLenMax, RGX_BVNC_KM_B, RGX_BVNC_KM_V_ST, RGX_BVNC_KM_N, RGX_BVNC_KM_C);
+
+	RGX_BVNC_EQUAL(sBVNC, *psClientBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and client (%d).",
+					__FUNCTION__, 
+					sBVNC.ui32LayoutVersion, 
+					psClientBVNC->ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and client (%d).",
+					__FUNCTION__, 
+					sBVNC.ui32VLenMax, 
+					psClientBVNC->ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / client BNC (%d._.%d.%d).",
+					__FUNCTION__, 
+					RGX_BVNC_PACKED_EXTR_B(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_B(*psClientBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(*psClientBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+		
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / client BVNC (%d.%s.%d.%d).",
+					__FUNCTION__, 
+					RGX_BVNC_PACKED_EXTR_B(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_V(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_B(*psClientBVNC), 
+					RGX_BVNC_PACKED_EXTR_V(*psClientBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(*psClientBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			PVR_DBG_BREAK;
+			goto failed_to_pass_compatibility_check;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and client BVNC (%d.%s.%d.%d) match. [ OK ]",
+				__FUNCTION__, 
+				RGX_BVNC_PACKED_EXTR_B(sBVNC), 
+				RGX_BVNC_PACKED_EXTR_V(sBVNC), 
+				RGX_BVNC_PACKED_EXTR_N(sBVNC), 
+				RGX_BVNC_PACKED_EXTR_C(sBVNC), 
+				RGX_BVNC_PACKED_EXTR_B(*psClientBVNC), 
+				RGX_BVNC_PACKED_EXTR_V(*psClientBVNC), 
+				RGX_BVNC_PACKED_EXTR_N(*psClientBVNC), 
+				RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+	}
+
+	GetNumBifTilingHeapConfigs(&ui32NumBIFTilingConfigs);
+	pui32BIFTilingXStrides = OSAllocMem(sizeof(IMG_UINT32) * ui32NumBIFTilingConfigs);
+	if(pui32BIFTilingXStrides == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: OSAllocMem failed (%u)", eError));
+		goto failed_BIF_tiling_alloc;
+	}
+	for(i = 0; i < ui32NumBIFTilingConfigs; i++)
+	{
+		eError = GetBIFTilingHeapXStride(i+1, &pui32BIFTilingXStrides[i]);
+		if(eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: GetBIFTilingHeapXStride for heap %u failed (%u)",
+			         i + 1, eError));
+			goto failed_BIF_heap_init;
+		}
+	}
+
+	eError = RGXSetupFirmware(psDeviceNode, 
+							     bEnableSignatureChecks, 
+							     ui32SignatureChecksBufSize,
+							     ui32HWPerfFWBufSizeKB,
+							     ui64HWPerfFilter,
+							     ui32RGXFWAlignChecksSize,
+							     pui32RGXFWAlignChecks,
+							     ui32ConfigFlags,
+							     ui32LogType,
+							     ui32NumBIFTilingConfigs,
+							     pui32BIFTilingXStrides,
+							     ui32FilterFlags,
+							     ui32JonesDisableMask,
+							     ui32HWRDebugDumpLimit,
+							     ui32HWPerfCountersDataSize,
+							     psRGXFwInit,
+							     eRGXRDPowerIslandingConf);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", eError));
+		goto failed_init_firmware;
+	}
+	*ppsHWPerfDataAllocServerExportCookie = &psDevInfo->sRGXFWHWPerfCountersExportCookie;
+	
+	OSFreeMem(pui32BIFTilingXStrides);
+	return PVRSRV_OK;
+
+failed_init_firmware:
+failed_BIF_heap_init:
+	OSFreeMem(pui32BIFTilingXStrides);
+failed_BIF_tiling_alloc:
+failed_to_pass_compatibility_check:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+									 DEVMEM_MEMDESC **psMemDesc,
+									 IMG_UINT32 *puiSyncPrimVAddr,
+									 IMG_UINT32 *puiSyncPrimBlockSize)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR eError;
+	RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+	IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+	IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/* Size and align are 'expanded' because we request an Exportalign allocation */
+	DevmemExportalignAdjustSizeAndAlign(psDevInfo->psFirmwareHeap,
+										&uiUFOBlockSize,
+										&ui32UFOBlockAlign);
+
+	eError = DevmemFwAllocateExportable(psDeviceNode,
+										uiUFOBlockSize,
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+										PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+										PVRSRV_MEMALLOCFLAG_CACHE_COHERENT | 
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+										"UFOBlock",
+										psMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	DevmemPDumpLoadMem(*psMemDesc,
+					   0,
+					   uiUFOBlockSize,
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+	*puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+	return PVRSRV_OK;
+
+	DevmemFwFree(*psMemDesc);
+e0:
+	return eError;
+}
+
+/* See device.h for function declaration */
+static IMG_VOID RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+								DEVMEM_MEMDESC *psMemDesc)
+{
+	/*
+		If the system has snooping of the device cache then the UFO block
+		might be in the cache so we need to flush it out before freeing
+		the memory
+	*/
+	if (PVRSRVSystemSnoopingOfDeviceCache())
+	{
+		RGXFWIF_KCCB_CMD sFlushInvalCmd;
+		PVRSRV_ERROR eError;
+
+		/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+		sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+		eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sFlushInvalCmd,
+											sizeof(sFlushInvalCmd),
+											IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: Failed to schedule SLC flush command with error (%u)", eError));
+		}
+		else
+		{
+			/* Wait for the SLC flush to complete */
+			eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: SLC flush and invalidate aborted with error (%u)", eError));
+			}
+		}
+	}
+
+	RGXUnsetFirmwareAddress(psMemDesc);
+	DevmemFwFree(psMemDesc);
+}
+
+/*
+	DevDeInitRGX
+*/
+PVRSRV_ERROR DevDeInitRGX (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO			*psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+	PVRSRV_ERROR				eError;
+	DEVICE_MEMORY_INFO		    *psDevMemoryInfo;
+
+	if (!psDevInfo)
+	{
+		/* Can happen if DevInitRGX failed */
+		PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Null DevInfo"));
+		return PVRSRV_OK;
+	}
+
+	/* Unregister debug request notifiers first as they could depend on anything. */
+	PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+
+	/* Cancel notifications to this device */
+	PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+	psDeviceNode->hCmdCompNotify = IMG_NULL;
+
+	/*
+	 *  De-initialise in reverse order, so stage 2 init is undone first.
+	 */
+	if (g_bDevInit2Done)
+	{
+		g_bDevInit2Done = IMG_FALSE;
+
+#if !defined(NO_HARDWARE)
+		(IMG_VOID) OSUninstallDeviceLISR(psDevInfo->pvLISRData);
+		(IMG_VOID) OSUninstallMISR(psDevInfo->pvMISRData);
+		(IMG_VOID) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+		if (psDevInfo->pvAPMISRData != IMG_NULL)
+		{
+			(IMG_VOID) OSUninstallMISR(psDevInfo->pvAPMISRData);
+		}
+#endif /* !NO_HARDWARE */
+
+		/* Remove the device from the power manager */
+		eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+
+		OSLockDestroy(psDevInfo->hGPUUtilLock);
+
+		/* Free DVFS Table */
+		if (psDevInfo->psGpuDVFSTable != IMG_NULL)
+		{
+			OSFreeMem(psDevInfo->psGpuDVFSTable);
+			psDevInfo->psGpuDVFSTable = IMG_NULL;
+		}
+
+		/* De-init Freelists/ZBuffers... */
+		OSLockDestroy(psDevInfo->hLockFreeList);
+		OSLockDestroy(psDevInfo->hLockZSBuffer);
+
+		/* De-init HWPerf Ftrace thread resources for the RGX device */
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUDeInit(psDevInfo);
+#endif
+
+		/* Unregister MMU related stuff */
+		eError = RGXMMUInit_Unregister(psDeviceNode);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", eError));
+			return eError;
+		}
+
+		/* UnMap Regs */
+		if (psDevInfo->pvRegsBaseKM != IMG_NULL)
+		{
+#if !defined(NO_HARDWARE)
+			OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
+							 psDevInfo->ui32RegSize,
+							 0);
+#endif /* !NO_HARDWARE */
+			psDevInfo->pvRegsBaseKM = IMG_NULL;
+		}
+	}
+
+#if 0 /* not required at this time */
+	if (psDevInfo->hTimer)
+	{
+		eError = OSRemoveTimer(psDevInfo->hTimer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed to remove timer"));
+			return 	eError;
+		}
+		psDevInfo->hTimer = IMG_NULL;
+	}
+#endif
+
+    psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+	RGX_DeInitHeaps(psDevMemoryInfo);
+
+	if (psDevInfo->psRGXFWCodeMemDesc)
+	{
+		/* Free fw code */
+		PDUMPCOMMENT("Freeing FW code memory");
+		if (DevmemIsValidExportCookie(&psDevInfo->sRGXFWCodeExportCookie))
+		{
+			/* if the export cookie is valid, the init sequence failed */
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: FW Code Export cookie still valid (should have been unexported at init time)"));
+			DevmemUnexport(psDevInfo->psRGXFWCodeMemDesc, &psDevInfo->sRGXFWCodeExportCookie);
+		}
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+		DevmemFwFree(psDevInfo->psRGXFWCodeMemDesc);
+		psDevInfo->psRGXFWCodeMemDesc = IMG_NULL;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"No firmware code memory to free!"));
+	}
+
+	if (psDevInfo->psRGXFWDataMemDesc)
+	{
+		/* Free fw data */
+		PDUMPCOMMENT("Freeing FW data memory");
+		if (DevmemIsValidExportCookie(&psDevInfo->sRGXFWDataExportCookie))
+		{
+			/* if the export cookie is valid, the init sequence failed */
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: FW Data Export cookie still valid (should have been unexported at init time)"));
+			DevmemUnexport(psDevInfo->psRGXFWDataMemDesc, &psDevInfo->sRGXFWDataExportCookie);
+		}
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+		DevmemFwFree(psDevInfo->psRGXFWDataMemDesc);
+		psDevInfo->psRGXFWDataMemDesc = IMG_NULL;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"No firmware data memory to free!"));
+	}
+
+	if (psDevInfo->psRGXFWCorememMemDesc)
+	{
+		/* Free fw data */
+		PDUMPCOMMENT("Freeing FW coremem memory");
+		if (DevmemIsValidExportCookie(&psDevInfo->sRGXFWCorememExportCookie))
+		{
+			/* if the export cookie is valid, the init sequence failed */
+			PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: FW Coremem Export cookie still valid (should have been unexported at init time)"));
+			DevmemUnexport(psDevInfo->psRGXFWCorememMemDesc, &psDevInfo->sRGXFWCorememExportCookie);
+		}
+		DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc);
+		DevmemFwFree(psDevInfo->psRGXFWCorememMemDesc);
+		psDevInfo->psRGXFWCorememMemDesc = IMG_NULL;
+	}
+
+	/*
+	   Free the firmware allocations.
+	 */
+	RGXFreeFirmware(psDevInfo);
+
+	/*
+	 * Clear the mem context create callbacks before destroying the RGX firmware
+	 * context to avoid a spurious callback.
+	 */
+	psDeviceNode->pfnRegisterMemoryContext = IMG_NULL;
+	psDeviceNode->pfnUnregisterMemoryContext = IMG_NULL;
+
+	if (psDevInfo->psKernelDevmemCtx)
+	{
+		eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+		/* FIXME - this should return void */
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* destroy the context list locks */
+	OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+	OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+	OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+	OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+	OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	if (psDevInfo->hDebugFaultInfoLock != IMG_NULL)
+	{
+		OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+	}
+	if (psDevInfo->hMMUCtxUnregLock != IMG_NULL)
+	{
+		OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+	}
+#endif
+
+	/* Free the init scripts. */
+	OSFreeMem(psDevInfo->psScripts);
+
+	/* DeAllocate devinfo */
+	OSFreeMem(psDevInfo);
+
+	psDeviceNode->pvDevice = IMG_NULL;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+ 
+ @Function	RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+  
+******************************************************************************/
+static IMG_VOID RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle, IMG_UINT32 ui32VerbLevel)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = hDbgReqestHandle;
+
+	/* Only action the request if we've fully init'ed */
+	if (g_bDevInit2Done)
+	{
+		RGXDebugRequestProcess(g_pfnDumpDebugPrintf, psDeviceNode->pvDevice, ui32VerbLevel);
+	}
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ 	PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+	IMG_UINT32			ui32Idx;
+
+	for (ui32Idx = 0; ui32Idx < RGXFWIF_DM_MAX; ui32Idx++)
+	{
+		psDevInfo->abDumpedKCCBCtlAlready[ui32Idx] = IMG_FALSE;
+	}
+
+static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name,
+	IMG_UINT64 heap_base,
+	IMG_DEVMEM_SIZE_T heap_length,
+	IMG_UINT32 log2_import_alignment)
+{
+	DEVMEM_HEAP_BLUEPRINT b = {
+		.pszName = name,
+		.sHeapBaseAddr.uiAddr = heap_base,
+		.uiHeapLength = heap_length,
+		.uiLog2DataPageSize = GET_LOG2_PAGESIZE(),
+		.uiLog2ImportAlignment = log2_import_alignment
+	};
+
+	return b;
+}
+
+#define INIT_HEAP(NAME) \
+do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			RGX_ ## NAME ## _HEAP_IDENT, \
+			RGX_ ## NAME ## _HEAP_BASE, \
+			RGX_ ## NAME ## _HEAP_SIZE, \
+			0); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_HEAP_NAME(STR, NAME) \
+do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			STR, \
+			RGX_ ## NAME ## _HEAP_BASE, \
+			RGX_ ## NAME ## _HEAP_SIZE, \
+			0); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_TILING_HEAP(N) \
+do { \
+	IMG_UINT32 xstride; \
+	GetBIFTilingHeapXStride(N, &xstride); \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			RGX_BIF_TILING_HEAP_ ## N ## _IDENT, \
+			RGX_BIF_TILING_HEAP_ ## N ## _BASE, \
+			RGX_BIF_TILING_HEAP_SIZE, \
+			RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(xstride)); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+	return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name,
+	IMG_UINT64 heap_base,
+	IMG_DEVMEM_SIZE_T heap_length,
+	IMG_UINT32 log2_import_alignment)
+{
+	DEVMEM_HEAP_BLUEPRINT b = {
+		.pszName = name,
+		.sHeapBaseAddr.uiAddr = heap_base,
+		.uiHeapLength = heap_length,
+		.uiLog2DataPageSize = GET_LOG2_PAGESIZE(),
+		.uiLog2ImportAlignment = log2_import_alignment
+	};
+
+	return b;
+}
+
+#define INIT_HEAP(NAME) \
+do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			RGX_ ## NAME ## _HEAP_IDENT, \
+			RGX_ ## NAME ## _HEAP_BASE, \
+			RGX_ ## NAME ## _HEAP_SIZE, \
+			0); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_HEAP_NAME(STR, NAME) \
+do { \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			STR, \
+			RGX_ ## NAME ## _HEAP_BASE, \
+			RGX_ ## NAME ## _HEAP_SIZE, \
+			0); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+#define INIT_TILING_HEAP(N) \
+do { \
+	IMG_UINT32 xstride; \
+	GetBIFTilingHeapXStride(N, &xstride); \
+	*psDeviceMemoryHeapCursor = _blueprint_init( \
+			RGX_BIF_TILING_HEAP_ ## N ## _IDENT, \
+			RGX_BIF_TILING_HEAP_ ## N ## _BASE, \
+			RGX_BIF_TILING_HEAP_SIZE, \
+			RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(xstride)); \
+	psDeviceMemoryHeapCursor++; \
+} while (0)
+
+static PVRSRV_ERROR RGX_InitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo)
+{
+    DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+
+    /* FIXME - consider whether this ought not to be on the device node itself */
+	psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID);
+    if(psNewMemoryInfo->psDeviceMemoryHeap == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT"));
+		goto e0;
+	}
+
+	/* Initialise the heaps */
+	psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+	INIT_HEAP(GENERAL);
+	INIT_HEAP(PDSCODEDATA);
+	INIT_HEAP(USCCODE);
+	INIT_HEAP(TQ3DPARAMETERS);
+	INIT_TILING_HEAP(1);
+	INIT_TILING_HEAP(2);
+	INIT_TILING_HEAP(3);
+	INIT_TILING_HEAP(4);
+	INIT_HEAP(DOPPLER);
+	INIT_HEAP(DOPPLER_OVERFLOW);
+#if defined(FIX_HW_BRN_37200)
+	INIT_HEAP_NAME("HWBRN37200", HWBRN37200);
+#endif
+	INIT_HEAP_NAME("Firmware", FIRMWARE);
+
+	/* set the heap count */
+	psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+	PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID);
+
+    /* the new way: we'll set up 2 heap configs: one will be for Meta
+       only, and has only the firmware heap in it. 
+       The remaining one shall be for clients only, and shall have all
+       the other heaps in it */
+
+    psNewMemoryInfo->uiNumHeapConfigs = 2;
+	psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+    if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG"));
+		goto e1;
+	}
+    
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount-1;
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+#if defined(FIX_HW_BRN_37200)
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 2;
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-2;
+#else
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 1;
+    psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-1;
+#endif
+
+	return PVRSRV_OK;
+e1:
+	OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+e0:
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+#undef INIT_HEAP
+#undef INIT_HEAP_NAME
+#undef INIT_TILING_HEAP
+
+#undef INIT_HEAP
+#undef INIT_HEAP_NAME
+#undef INIT_TILING_HEAP
+
+static IMG_VOID RGX_DeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+	OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+	OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+
+/*
+	RGXRegisterDevice
+*/
+PVRSRV_ERROR RGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+    PVRSRV_ERROR eError;
+	DEVICE_MEMORY_INFO *psDevMemoryInfo;
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+
+	/* pdump info about the core */
+	PDUMPCOMMENT("RGX Version Information (KM): %s", RGX_BVNC_KM);
+	
+	#if defined(RGX_FEATURE_SYSTEM_CACHE)
+	PDUMPCOMMENT("RGX System Level Cache is present");
+	#endif /* RGX_FEATURE_SYSTEM_CACHE */
+
+	PDUMPCOMMENT("RGX Initialisation (Part 1)");
+
+	/*********************
+	 * Device node setup *
+	 *********************/
+	/* Setup static data and callbacks on the device agnostic device node */
+	psDeviceNode->sDevId.eDeviceType		= DEV_DEVICE_TYPE;
+	psDeviceNode->sDevId.eDeviceClass		= DEV_DEVICE_CLASS;
+#if defined(PDUMP)
+	psDeviceNode->sDevId.pszPDumpRegName	= RGX_PDUMPREG_NAME;
+	/*
+		FIXME: This should not be required as PMR's should give the memspace
+		name. However, due to limitations within PDump we need a memspace name
+		when dpumping with MMU context with virtual address in which case we
+		don't have a PMR to get the name from.
+		
+		There is also the issue obtaining a namespace name for the catbase which
+		is required when we PDump the write of the physical catbase into the FW
+		structure
+	*/
+	psDeviceNode->sDevId.pszPDumpDevName	= PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+	psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+#endif /* PDUMP */
+
+	psDeviceNode->eHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+	psDeviceNode->eHealthReason = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+
+	/* Configure MMU specific stuff */
+	RGXMMUInit_Register(psDeviceNode);
+
+	psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+	psDeviceNode->pfnSLCCacheInvalidateRequest = RGXSLCCacheInvalidateRequest;
+
+	/* Register RGX to receive notifies when other devices complete some work */
+	PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+	psDeviceNode->pfnInitDeviceCompatCheck	= &RGXDevInitCompatCheck;
+
+	/* Register callbacks for creation of device memory contexts */
+	psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+	psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+	/* Register callbacks for Unified Fence Objects */
+	psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+	psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+	/* Register callback for dumping debug info */
+	PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify, &RGXDebugRequestNotify, DEBUG_REQUEST_RGX, psDeviceNode);
+	
+	/* Register callback for checking the device's health */
+	psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus;
+
+	/* Register method to service the FW HWPerf buffer */
+	psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+	/* Register callback for getting the device version information string */
+	psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+	/* Register callback for getting the device clock speed */
+	psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+	/* Register callback for soft resetting some device modules */
+	psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+	/* Register callback for resetting the HWR logs */
+	psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+
+	/*********************
+	 * Device info setup *
+	 *********************/
+	/* Allocate device control block */
+	psDevInfo = OSAllocMem(sizeof(*psDevInfo));
+	if (psDevInfo == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+		return (PVRSRV_ERROR_OUT_OF_MEMORY);
+	}
+	OSMemSet (psDevInfo, 0, sizeof(*psDevInfo));
+
+	/* create locks for the context lists stored in the DevInfo structure.
+	 * these lists are modified on context create/destroy and read by the
+	 * watchdog thread
+	 */
+
+	eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+		goto e0;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+		goto e1;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+		goto e2;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hRaytraceCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create raytrace context list lock", __func__));
+		goto e3;
+	}
+
+	eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+		goto e4;
+	}
+
+	dllist_init(&(psDevInfo->sRenderCtxtListHead));
+	dllist_init(&(psDevInfo->sComputeCtxtListHead));
+	dllist_init(&(psDevInfo->sTransferCtxtListHead));
+	dllist_init(&(psDevInfo->sRaytraceCtxtListHead));
+
+	dllist_init(&(psDevInfo->sCommonCtxtListHead));
+	psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+	dllist_init(&psDevInfo->sMemoryContextList);
+
+	/* Allocate space for scripts. */
+	psDevInfo->psScripts = OSAllocMem(sizeof(*psDevInfo->psScripts));
+	if (!psDevInfo->psScripts)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate memory for scripts", __func__));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e5;
+	}
+
+	/* Setup static data and callbacks on the device specific device info */
+	psDevInfo->eDeviceType 		= DEV_DEVICE_TYPE;
+	psDevInfo->eDeviceClass 	= DEV_DEVICE_CLASS;
+	psDevInfo->psDeviceNode		= psDeviceNode;
+
+	psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+	psDevMemoryInfo->ui32AddressSpaceSizeLog2 = RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS;
+	psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+	/* flags, backing store details to be specified by system */
+	psDevMemoryInfo->ui32Flags = 0;
+
+	eError = RGX_InitHeaps(psDevMemoryInfo);
+	if (eError != PVRSRV_OK)
+	{
+		goto e6;
+	}
+
+	psDeviceNode->pvDevice = psDevInfo;
+	return PVRSRV_OK;
+
+e6:
+	OSFreeMem(psDevInfo->psScripts);
+e5:
+	OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+e4:
+	OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+e3:
+	OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+e2:
+	OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+e1:
+	OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+e0:
+	OSFreeMem(psDevInfo);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is reduntant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are 
+ build-compatible then server-firmware are build-compatible as well.
+ 
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_INIT *psRGXFWInit)
+{
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+	if (psRGXFWInit == IMG_NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+
+	ui32BuildOptionsFWKMPart = psRGXFWInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_KM;
+	
+	if (ui32BuildOptions != ui32BuildOptionsFWKMPart)
+	{
+		ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+		if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+				"extra options present in the KM driver: (0x%x). Please check rgx_options_km.h",
+				ui32BuildOptions & ui32BuildOptionsMismatch ));
+		}
+
+		if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+				"extra options present in Firmware: (0x%x). Please check rgx_options_km.h",
+				ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+		}
+		return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BuildOptions_FWAgainstClient
+
+ @Description
+
+ Validate the FW build options against client build options (KM and non-KM)
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+ @Input ui32ClientBuildOptions - client build options flags
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BuildOptions_FWAgainstClient(PVRSRV_RGXDEV_INFO 	*psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit,
+																			IMG_UINT32 ui32ClientBuildOptions)
+{
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32BuildOptionsMismatch;
+	IMG_UINT32			ui32BuildOptionsFW;
+#endif
+#if defined(PDUMP)
+	PVRSRV_ERROR		eError;
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: client and FW build options");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+												offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+												ui32ClientBuildOptions,
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == IMG_NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	
+	ui32BuildOptionsFW = psRGXFWInit->sRGXCompChecks.ui32BuildOptions;
+	
+	if (ui32ClientBuildOptions != ui32BuildOptionsFW)
+	{
+		ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+		if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+				"extra options present in client: (0x%x). Please check rgx_options.h",
+				ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+		}
+
+		if ( (ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+				"extra options present in Firmware: (0x%x). Please check rgx_options.h",
+				ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+		}
+		return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and client build options match. [ OK ]"));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32			ui32DDKVersion;
+	PVRSRV_ERROR		eError;
+	
+	ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+												offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+												ui32DDKVersion,
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == IMG_NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+	{
+		PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK revision (%u.%u) / Firmware DDK revision (%u.%u).",
+				PVRVERSION_MAJ, PVRVERSION_MIN, 
+				PVRVERSION_UNPACK_MAJ(psRGXFWInit->sRGXCompChecks.ui32DDKVersion),
+				PVRVERSION_UNPACK_MIN(psRGXFWInit->sRGXCompChecks.ui32DDKVersion)));
+		eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK revision (%u.%u) and Firmware DDK revision (%u.%u) match. [ OK ]",
+				PVRVERSION_MAJ, PVRVERSION_MIN, 
+				PVRVERSION_MAJ, PVRVERSION_MIN));
+	}
+#endif	
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	IMG_UINT32			ui32DDKBuild;
+	PVRSRV_ERROR		eError;
+	
+	ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+												offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+												ui32DDKBuild,
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == IMG_NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+	{
+		PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK build (%d) / Firmware DDK build (%d).",
+				ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+		eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build (%d) and Firmware DDK build (%d) match. [ OK ]",
+				ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																			RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)
+	IMG_UINT32					i;
+#endif
+#if !defined(NO_HARDWARE)
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+	PVRSRV_ERROR				eError;
+	
+	rgx_bvnc_packed(&sBVNC.ui32BNC, sBVNC.aszV, sBVNC.ui32VLenMax, RGX_BVNC_KM_B, RGX_BVNC_KM_V_ST, RGX_BVNC_KM_N, RGX_BVNC_KM_C);
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+											sBVNC.ui32LayoutVersion,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (maxlen)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+											sBVNC.ui32VLenMax,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part)");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+											offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32BNC),
+											sBVNC.ui32BNC,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+	}
+
+	for (i = 0; i < sBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+	{
+		PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (V part)");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+												offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) + 
+												i,
+												*((IMG_UINT32 *)(sBVNC.aszV + i)),
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		}
+	}
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == IMG_NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	RGX_BVNC_EQUAL(sBVNC, psRGXFWInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+	
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and firmware (%d).",
+					__FUNCTION__, 
+					sBVNC.ui32LayoutVersion, 
+					psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and firmware (%d).",
+					__FUNCTION__, 
+					sBVNC.ui32VLenMax, 
+					psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BNC (%d._.%d.%d) and Firmware BNC (%d._.%d.%d)",
+					RGX_BVNC_PACKED_EXTR_B(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+		
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%d.%s.%d.%d) and Firmware BVNC (%d.%s.%d.%d)",
+					RGX_BVNC_PACKED_EXTR_B(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_V(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sBVNC), 
+					RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC), 
+					RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+#if ((!defined(NO_HARDWARE))&&(!defined(EMULATOR)))
+#define TARGET_SILICON  /* definition for everything that is not emu and not nohw configuration */
+#endif
+
+#if defined(FIX_HW_BRN_38835)
+#define COMPAT_BVNC_MASK_B
+#define COMPAT_BVNC_MASK_V
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+																	RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP) || defined(TARGET_SILICON)
+	IMG_UINT32 ui32MaskBNC = RGX_BVNC_PACK_MASK_B |
+								RGX_BVNC_PACK_MASK_N |
+								RGX_BVNC_PACK_MASK_C;
+
+	IMG_UINT32 bMaskV = IMG_FALSE;
+
+	PVRSRV_ERROR				eError;
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+
+#if defined(TARGET_SILICON)
+	RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+	IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+
+#if defined(PDUMP) || defined(TARGET_SILICON)
+
+#if defined(COMPAT_BVNC_MASK_B)
+	ui32MaskBNC &= ~RGX_BVNC_PACK_MASK_B;
+#endif
+#if defined(COMPAT_BVNC_MASK_V)
+	bMaskV = IMG_TRUE;
+#endif
+#if defined(COMPAT_BVNC_MASK_N)
+	ui32MaskBNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+	ui32MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+	
+	rgx_bvnc_packed(&sSWBVNC.ui32BNC, sSWBVNC.aszV, sSWBVNC.ui32VLenMax, RGX_BVNC_KM_B, RGX_BVNC_KM_V_ST, RGX_BVNC_KM_N, RGX_BVNC_KM_C);
+
+#if defined(FIX_HW_BRN_38344)
+	if (RGX_BVNC_KM_C >= 10)
+	{
+		ui32MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+	}
+#endif
+
+	if ((ui32MaskBNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) || bMaskV)
+	{
+		PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+				((!(ui32MaskBNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), 
+				((bMaskV)?("V"):("")), 
+				((!(ui32MaskBNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), 
+				((!(ui32MaskBNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+	}
+#endif
+
+#if defined(EMULATOR)
+	PVR_LOG(("Compatibility checks for emu target: Ignoring HW BVNC checks."));
+#endif
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("Compatibility check: Layout version of compchecks struct");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+											offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+											sSWBVNC.ui32LayoutVersion,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+
+	PDUMPCOMMENT("Compatibility check: HW V max len and FW V max len");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+											offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+											offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+											sSWBVNC.ui32VLenMax,
+											0xffffffff,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+
+	if (ui32MaskBNC != 0)
+	{
+		PDUMPIF("DISABLE_HWBNC_CHECK");
+		PDUMPELSE("DISABLE_HWBNC_CHECK");
+		PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC");
+		eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+												offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32BNC),
+												sSWBVNC.ui32BNC,
+												ui32MaskBNC,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+			return eError;
+		}
+		PDUMPFI("DISABLE_HWBNC_CHECK");
+	}
+	if (!bMaskV)
+	{
+		IMG_UINT32 i;
+		PDUMPIF("DISABLE_HWV_CHECK");
+		PDUMPELSE("DISABLE_HWV_CHECK");
+		for (i = 0; i < sSWBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+		{
+			PDUMPCOMMENT("Compatibility check: HW V and FW V");
+			eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+												offsetof(RGXFWIF_INIT, sRGXCompChecks) + 
+												offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+												offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) + 
+												i,
+												*((IMG_UINT32 *)(sSWBVNC.aszV + i)),
+												0xffffffff,
+												PDUMP_POLL_OPERATOR_EQUAL,
+												PDUMP_FLAGS_CONTINUOUS);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+				return eError;
+			}
+		}
+		PDUMPFI("DISABLE_HWV_CHECK");
+	}
+#endif
+
+#if defined(TARGET_SILICON)
+	if (psRGXFWInit == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+	
+	sHWBVNC = psRGXFWInit->sRGXCompChecks.sHWBVNC;
+
+	sHWBVNC.ui32BNC &= ui32MaskBNC;
+	sSWBVNC.ui32BNC &= ui32MaskBNC;
+
+	if (bMaskV)
+	{
+		sHWBVNC.aszV[0] = '\0';
+		sSWBVNC.aszV[0] = '\0';
+	}
+
+	RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+#if defined(FIX_HW_BRN_42480)
+	if (!bCompatibleAll && bCompatibleVersion)
+	{
+		if ((RGX_BVNC_PACKED_EXTR_B(sSWBVNC) == 1) &&
+			!(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sSWBVNC),"76")) &&
+			(RGX_BVNC_PACKED_EXTR_N(sSWBVNC) == 4) &&
+			(RGX_BVNC_PACKED_EXTR_C(sSWBVNC) == 6))
+		{
+			if ((RGX_BVNC_PACKED_EXTR_B(sHWBVNC) == 1) &&
+				!(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sHWBVNC),"69")) &&
+				(RGX_BVNC_PACKED_EXTR_N(sHWBVNC) == 4) &&
+				(RGX_BVNC_PACKED_EXTR_C(sHWBVNC) == 4))
+			{
+				bCompatibleBNC = IMG_TRUE;
+				bCompatibleLenMax = IMG_TRUE;
+				bCompatibleV = IMG_TRUE;
+				bCompatibleAll = IMG_TRUE;
+			}
+		}
+	}
+#endif
+
+	if (!bCompatibleAll)
+	{
+		if (!bCompatibleVersion)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+					__FUNCTION__, 
+					sHWBVNC.ui32LayoutVersion, 
+					sSWBVNC.ui32LayoutVersion));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleLenMax)
+		{
+			PVR_LOG(("(FAIL) %s: Incompatible V maxlen of HW (%d) and FW (%d).",
+					__FUNCTION__, 
+					sHWBVNC.ui32VLenMax, 
+					sSWBVNC.ui32VLenMax));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+
+		if (!bCompatibleBNC)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BNC (%d._.%d.%d) and FW BNC (%d._.%d.%d).",
+					RGX_BVNC_PACKED_EXTR_B(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_B(sSWBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sSWBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+		
+		if (!bCompatibleV)
+		{
+			PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d).",
+					RGX_BVNC_PACKED_EXTR_B(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_V(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sHWBVNC), 
+					RGX_BVNC_PACKED_EXTR_B(sSWBVNC), 
+					RGX_BVNC_PACKED_EXTR_V(sSWBVNC), 
+					RGX_BVNC_PACKED_EXTR_N(sSWBVNC), 
+					RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+			eError = PVRSRV_ERROR_BVNC_MISMATCH;
+			return eError;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d) match. [ OK ]", 
+				RGX_BVNC_PACKED_EXTR_B(sHWBVNC), 
+				RGX_BVNC_PACKED_EXTR_V(sHWBVNC), 
+				RGX_BVNC_PACKED_EXTR_N(sHWBVNC), 
+				RGX_BVNC_PACKED_EXTR_C(sHWBVNC), 
+				RGX_BVNC_PACKED_EXTR_B(sSWBVNC), 
+				RGX_BVNC_PACKED_EXTR_V(sSWBVNC), 
+				RGX_BVNC_PACKED_EXTR_N(sSWBVNC), 
+				RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+
+static PVRSRV_ERROR RGXDevInitCompatCheck_METACoreVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+									RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+	PVRSRV_ERROR		eError;
+#endif
+
+#if defined(PDUMP)
+	PDUMPIF("DISABLE_HWMETA_CHECK");
+	PDUMPELSE("DISABLE_HWMETA_CHECK");
+	PDUMPCOMMENT("Compatibility check: KM driver and HW META version");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+					offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+					offsetof(RGXFWIF_COMPCHECKS, ui32METAVersion),
+					RGX_CR_META_CORE_ID_VALUE,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					PDUMP_FLAGS_CONTINUOUS);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		return eError;
+	}
+	PDUMPFI("DISABLE_HWMETA_CHECK");
+#endif
+
+#if !defined(NO_HARDWARE)
+	if (psRGXFWInit == IMG_NULL)
+		return PVRSRV_ERROR_INVALID_PARAMS;
+
+	if (psRGXFWInit->sRGXCompChecks.ui32METAVersion != RGX_CR_META_CORE_ID_VALUE)
+	{
+		PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver META version (%d) / HW META version (%d).",
+				RGX_CR_META_CORE_ID_VALUE, psRGXFWInit->sRGXCompChecks.ui32METAVersion));
+		eError = PVRSRV_ERROR_META_MISMATCH;
+		PVR_DBG_BREAK;
+		return eError;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver META version (%d) and HW META version (%d) match. [ OK ]",
+				RGX_CR_META_CORE_ID_VALUE, psRGXFWInit->sRGXCompChecks.ui32METAVersion));
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function	RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR - depending on mismatch found
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+	PVRSRV_ERROR		eError;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_INIT		*psRGXFWInit = IMG_NULL;
+#if !defined(NO_HARDWARE)
+	IMG_UINT32			ui32RegValue;
+#endif
+
+	/* Ensure it's a RGX device */
+	if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_RGX)
+	{
+		PVR_LOG(("(FAIL) %s: Device not of type RGX", __FUNCTION__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto chk_exit;
+	}
+
+	/* 
+	 * Retrieve the FW information
+	 */
+	
+#if !defined(NO_HARDWARE)
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+												(IMG_VOID **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+				__FUNCTION__, eError));
+		return eError;
+	}
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		if(*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+		{
+			/* No need to wait if the FW has already updated the values */
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	ui32RegValue = 0;
+	eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+				__FUNCTION__, eError));
+		goto chk_exit;
+	}
+
+	if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+	{
+		eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+		PVR_DPF((PVR_DBG_ERROR,"%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+				__FUNCTION__, psRGXFWInit->sRGXCompChecks.bUpdated, eError));
+		goto chk_exit;
+	}
+	
+	if (!*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+	{
+		eError = PVRSRV_ERROR_TIMEOUT;
+		PVR_DPF((PVR_DBG_ERROR,"%s: Missing compatibility info from FW (%u)",
+				__FUNCTION__, eError));
+		goto chk_exit;
+	}
+#endif
+
+	eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_BuildOptions_FWAgainstClient(psDevInfo, psRGXFWInit, ui32ClientBuildOptions);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+	
+	eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = RGXDevInitCompatCheck_METACoreVersion_AgainstDriver(psDevInfo, psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		goto chk_exit;
+	}
+
+	eError = PVRSRV_OK;
+chk_exit:
+#if !defined(NO_HARDWARE)
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+	return eError;
+}
+
+#define	MAKESTRING(x) #x
+#define TOSTRING(x) MAKESTRING(x)
+
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+static PVRSRV_ERROR ValidateFWImageWithSP(IMG_CHAR *pcFWImgAddr, IMG_SIZE_T uiFWImgLen)
+{
+	PVRSRV_ERROR         eError             = PVRSRV_OK;
+#if !defined(NO_HARDWARE) && defined(DEBUG)
+	PVRSRV_DATA          *psPVRSRVData      = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE   *psDeviceNode      = psPVRSRVData->apsRegisteredDevNodes[0];
+	PVRSRV_DEVICE_CONFIG *psDevConfig       = psDeviceNode->psDevConfig;
+	PVRSRV_RGXDEV_INFO   *psDevInfo         = psDeviceNode->pvDevice;
+	IMG_UINT32           *pui32HostCodeAddr = (IMG_UINT32*)pcFWImgAddr;
+	IMG_UINT32           ui32FWCodeAddr     = RGXFW_BOOTLDR_META_ADDR;
+	IMG_UINT32           ui32FWImageLen     = uiFWImgLen/sizeof(IMG_UINT32);
+	IMG_UINT32           i;
+
+	/* ValidateFWImageWithSP is called by PVRSRVRGXInitLoadFWImageKM, but the RGX
+	 * registers are mapped later in PVRSRVRGXInitDevPart2KM, we do it now as we need them */
+	psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psDevConfig->sRegsCpuPBase, psDevConfig->ui32RegsSize, 0);
+	if (psDevInfo->pvRegsBaseKM == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"ValidateFWImageWithSP: Failed to create RGX register mapping"));
+		return PVRSRV_ERROR_BAD_MAPPING;
+	}
+
+	for(i = 0 ; i < ui32FWImageLen ; i++)
+	{
+		if(RGXReadWithSP(ui32FWCodeAddr) != *pui32HostCodeAddr)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"ValidateFWImageWithSP: Mismatch between Host and Meta views of the firmware code"));
+			eError =  PVRSRV_ERROR_META_MISMATCH;
+			goto validatefwimage_cleanup;
+		}
+		ui32FWCodeAddr += 4;
+		pui32HostCodeAddr++;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"ValidateFWImageWithSP: Match between Host and Meta views of the firmware code"));
+
+validatefwimage_cleanup:
+	OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM, psDevInfo->ui32RegSize, 0);
+#else
+	PVR_UNREFERENCED_PARAMETER(pcFWImgAddr);
+	PVR_UNREFERENCED_PARAMETER(uiFWImgLen);
+#endif /* !defined(NO_HARDWARE) && defined(DEBUG) */
+
+	return eError;
+}
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+static PVRSRV_ERROR
+ValidateFWImage(
+	IMG_CHAR *pcFWImgDestAddr,
+	IMG_CHAR *pcFWImgSrcAddr,
+	IMG_SIZE_T uiFWImgLen,
+	IMG_CHAR *pcFWImgSigAddr,
+	IMG_UINT64 ui64FWSigLen)
+{
+#if defined(DEBUG)
+	if(OSMemCmp(pcFWImgDestAddr, pcFWImgSrcAddr, uiFWImgLen) != 0)
+	{
+		return PVRSRV_ERROR_INIT_TDMETACODE_PAGES_FAIL;
+	}
+
+	PVR_UNREFERENCED_PARAMETER(pcFWImgSigAddr);
+	PVR_UNREFERENCED_PARAMETER(ui64FWSigLen);
+#else
+	PVR_UNREFERENCED_PARAMETER(pcFWImgDestAddr);
+	PVR_UNREFERENCED_PARAMETER(uiFWImgLen);
+	PVR_UNREFERENCED_PARAMETER(pcFWImgSigAddr);
+	PVR_UNREFERENCED_PARAMETER(ui64FWSigLen);
+#endif
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRCopy(PMR *psDstPMR, PMR *psSrcPMR, IMG_SIZE_T uiMaxCopyLen)
+{
+	IMG_CHAR acBuf[512];
+	IMG_UINT64 uiBytesCopied;
+	PVRSRV_ERROR eStatus;
+	
+	uiBytesCopied = 0;
+	while(uiBytesCopied < uiMaxCopyLen)
+	{
+		IMG_SIZE_T uiRead, uiWritten;
+		IMG_SIZE_T uiCopyAmt;
+		uiCopyAmt = sizeof(acBuf) > uiMaxCopyLen ? uiMaxCopyLen : sizeof(acBuf);
+		eStatus = PMR_ReadBytes(psSrcPMR,
+		                        uiBytesCopied,
+		                        acBuf,
+		                        uiCopyAmt,
+		                        &uiRead);
+		if(eStatus != PVRSRV_OK)
+		{
+			return eStatus;
+		}
+		eStatus = PMR_WriteBytes(psDstPMR,
+		                         uiBytesCopied,
+		                         acBuf,
+		                         uiCopyAmt,
+		                         &uiWritten);
+		if(eStatus != PVRSRV_OK)
+		{
+			return eStatus;
+		}
+		PVR_ASSERT(uiRead == uiWritten);
+		PVR_ASSERT(uiRead == uiCopyAmt);
+		uiBytesCopied += uiCopyAmt;
+	}
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRGXInitLoadFWImageKM(
+	PMR *psFWImgDestPMR,
+	PMR *psFWImgSrcPMR,
+	IMG_UINT64 ui64FWImgLen,
+	PMR *psFWImgSigPMR,
+	IMG_UINT64 ui64FWSigLen)
+{
+	IMG_CHAR *pcFWImgSigAddr, *pcFWImgDestAddr, *pcFWImgSrcAddr;
+	IMG_HANDLE hFWImgSigHdl, hFWImgDestHdl, hFWImgSrcHdl;
+	IMG_SIZE_T uiLen;
+	PVRSRV_ERROR eStatus;
+
+	/* The purpose of this function is to do the following:
+	   - copy the data contained in psFWImgSrcPMR into psFWImgDestPMR
+	   - use the data contained in psFWImgSigPMR to validate the contents of psFWImgDestPMR
+
+	   This is a functional placeholder that is meant to be overridden when actually using
+	   the protected META code feature. As a result, normally, the memory backed by 
+	   psFWImgDestPMR will not be read/writeable from this layer. Thus the operation of
+	   actually doing the copy and verify must be handled in a mode with more privilege,
+	   typically a hypervisor.
+
+	   Because psFWImgSrcPMR and psFWImgSigPMR are normal OS-memory controlled PMR's, it
+	   should be sufficient to acquire their kernel mappings and pass the pointers to
+	   their mapped addressed into the hypervisor. However, since psFWImgDestPMR references
+	   a region of memory that would typically be allocated (and writeable) by a hypervisor,
+	   it will be necessary to pass the psFWImgDestPMR->pvFlavourData (or a field contained
+	   within it) to the hypervisor to identify the region of memory to copy to and validate.
+
+	   In the example function provided below, the following things happen:
+	   - kernel mappings are acquired for the destination and signature PMRs
+	   - a copy is done using the PMR_ReadBytes / PMR_WriteBytes callback functionality in
+	     the PMR layer
+	   - a validation is done by reading back the destination buffer and comparing it against
+	     the source buffer.
+
+	   c.f. a real implementation, where the following things would likely happen:
+	   - kernel mappings are acquired for the source and signature PMRs
+	   - the source/signature mapped addresses and lengths, and psFWImgDestPMR->pvFlavourData
+	     are passed into the hypervisor to do the copy/validate.
+	*/
+
+	eStatus = PMRAcquireKernelMappingData(psFWImgDestPMR,
+	                                      0,
+	                                      0,
+	                                      (IMG_VOID **) &pcFWImgDestAddr,
+	                                      &uiLen,
+                                          &hFWImgDestHdl);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Acquire mapping for dest failed (%u)", eStatus));
+		goto error;
+	}
+	if(ui64FWImgLen > uiLen)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: PMR dst len (%llu) > mapped len (%llu)",
+		         ui64FWImgLen, (unsigned long long)uiLen));
+		goto error;
+	}
+
+	eStatus = PMRAcquireKernelMappingData(psFWImgSrcPMR,
+	                                      0,
+	                                      0,
+	                                      (IMG_VOID **) &pcFWImgSrcAddr,
+	                                      &uiLen,
+                                          &hFWImgSrcHdl);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Acquire mapping for src failed (%u)", eStatus));
+		goto error;
+	}
+	if(ui64FWImgLen > uiLen)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: PMR dst len (%llu) > mapped len (%llu)",
+		         ui64FWImgLen, (unsigned long long)uiLen));
+		goto error;
+	}
+
+	eStatus = PMRAcquireKernelMappingData(psFWImgSigPMR,
+	                                      0,
+	                                      0,
+	                                      (IMG_VOID **) &pcFWImgSigAddr,
+	                                      &uiLen,
+                                          &hFWImgSigHdl);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Acquire mapping for sig failed (%u)", eStatus));
+		goto error;
+	}
+	if(ui64FWSigLen > uiLen)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: sig len (%llu) > mapped len (%llu)",
+		         ui64FWSigLen, (unsigned long long)uiLen));
+		goto error;
+	}
+
+	/* Copy the firmware image from the intermediate buffer to the real firmware memory allocation. */
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVDebugMiscInitFWImageKM: copying %llu bytes from PMR %p to PMR %p",
+	                        ui64FWImgLen, psFWImgSrcPMR, psFWImgDestPMR));
+	PMRCopy(psFWImgDestPMR, psFWImgSrcPMR, TRUNCATE_64BITS_TO_SIZE_T(ui64FWImgLen));
+
+	/* Validate the firmware image after it has been copied into place */
+	eStatus = ValidateFWImage(pcFWImgDestAddr, pcFWImgSrcAddr, TRUNCATE_64BITS_TO_SIZE_T(ui64FWImgLen), pcFWImgSigAddr, ui64FWSigLen);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Signature check failed"));
+		goto error;
+	}
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+	/* Compare the firmware image as seen from the CPU point of view
+	 * against the same memory area as seen from the META point of view */
+	eStatus = ValidateFWImageWithSP(pcFWImgDestAddr, TRUNCATE_64BITS_TO_SIZE_T(ui64FWImgLen));
+	if(eStatus != PVRSRV_OK)
+	{
+		goto error;
+	}
+#endif
+
+	eStatus = PMRReleaseKernelMappingData(psFWImgDestPMR,
+	                                      hFWImgDestHdl);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Release mapping for dest failed (%u)", eStatus));
+		goto error;
+	}
+
+	eStatus = PMRReleaseKernelMappingData(psFWImgSrcPMR,
+	                                      hFWImgSrcHdl);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Release mapping for src failed (%u)", eStatus));
+		goto error;
+	}
+
+	eStatus = PMRReleaseKernelMappingData(psFWImgSigPMR,
+	                                      hFWImgSigHdl);
+	if(eStatus != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscInitFWImageKM: Release mapping for sig failed (%u)", eStatus));
+		goto error;
+	}
+
+	return PVRSRV_OK;
+
+error:
+	return PVRSRV_ERROR_INIT_TDMETACODE_PAGES_FAIL;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function       RGXDevVersionString
+@Description    Gets the version string for the given device node and returns 
+                a pointer to it in ppszVersionString. It is then the 
+                responsibility of the caller to free this memory.
+@Input          psDeviceNode            Device node from which to obtain the 
+                                        version string
+@Output	        ppszVersionString	Contains the version string upon return
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, 
+					IMG_CHAR **ppszVersionString)
+{
+#if defined(COMPAT_BVNC_MASK_B) || defined(COMPAT_BVNC_MASK_V) || defined(COMPAT_BVNC_MASK_N) || defined(COMPAT_BVNC_MASK_C) || defined(NO_HARDWARE) || defined(EMULATOR)
+	IMG_CHAR pszFormatString[] = "Rogue Version: %d.%s.%d.%d (SW)";
+#else
+	IMG_CHAR pszFormatString[] = "Rogue Version: %d.%s.%d.%d (HW)";
+#endif
+	IMG_SIZE_T uiStringLength;
+
+	if (psDeviceNode == NULL || ppszVersionString == NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	uiStringLength = OSStringLength(pszFormatString);
+	uiStringLength += OSStringLength(TOSTRING(RGX_BVNC_KM_B));
+	uiStringLength += OSStringLength(TOSTRING(RGX_BVNC_KM_V));
+	uiStringLength += OSStringLength(TOSTRING(RGX_BVNC_KM_N));
+	uiStringLength += OSStringLength(TOSTRING(RGX_BVNC_KM_C));
+
+	*ppszVersionString = OSAllocZMem(uiStringLength * sizeof(IMG_CHAR));
+	if (*ppszVersionString == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSSNPrintf(*ppszVersionString, uiStringLength, pszFormatString, 
+		   RGX_BVNC_KM_B, TOSTRING(RGX_BVNC_KM_V), RGX_BVNC_KM_N, RGX_BVNC_KM_C);
+
+	return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function       RGXDevClockSpeed
+@Description    Gets the clock speed for the given device node and returns
+                it in pui32RGXClockSpeed.
+@Input          psDeviceNode		Device node
+@Output         pui32RGXClockSpeed  Variable for storing the clock speed
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+					IMG_PUINT32  pui32RGXClockSpeed)
+{
+	RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+	/* get clock speed */
+	*pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+	return PVRSRV_OK;
+}
+
+
+/**************************************************************************/ /*!
+@Function       RGXSoftReset
+@Description    Resets some modules of the RGX device
+@Input          psDeviceNode		Device node
+@Input          ui64ResetValue1 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET
+                                register).
+@Input          ui64ResetValue2 A mask for which each bit set corresponds
+                                to a module to reset (via the SOFT_RESET2
+                                register).
+@Return         PVRSRV_ERROR
+*/ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+                                 IMG_UINT64  ui64ResetValue1,
+                                 IMG_UINT64  ui64ResetValue2)
+{
+	PVRSRV_RGXDEV_INFO        *psDevInfo;
+
+	PVR_ASSERT(psDeviceNode != NULL);
+	PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+
+	if ((ui64ResetValue1 & RGX_CR_SOFT_RESET_MASKFULL) != ui64ResetValue1  ||
+	    (ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* the device info */
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/* Set in soft-reset */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1);
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+
+	return PVRSRV_OK;
+}
+
+
+/******************************************************************************
+ End of file (rgxinit.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxinit.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxinit.h
new file mode 100644
index 0000000..95e75ed
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxinit.h
@@ -0,0 +1,274 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX initialisation header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXINIT_H__)
+#define __RGXINIT_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "rgxscript.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitDevPart2KM
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  RGX_INIT_COMMAND		*psInitScript,
+									  RGX_INIT_COMMAND		*psDbgScript,
+									  RGX_INIT_COMMAND		*psDbgBusScript,
+									  RGX_INIT_COMMAND		*psDeinitScript,
+									  IMG_UINT32			ui32KernelCatBaseIdReg,
+									  IMG_UINT32			ui32KernelCatBaseId,
+									  IMG_UINT32			ui32KernelCatBaseReg,
+									  IMG_UINT32			ui32KernelCatBaseWordSize,
+									  IMG_UINT32			ui32KernelCatBaseAlignShift,
+									  IMG_UINT32			ui32KernelCatBaseShift,
+									  IMG_UINT64			ui64KernelCatBaseMask,
+									  IMG_UINT32			ui32DeviceFlags,
+									  RGX_ACTIVEPM_CONF		eActivePMConf,
+								 	  DEVMEM_EXPORTCOOKIE	*psFWCodeAllocServerExportCookie,
+								 	  DEVMEM_EXPORTCOOKIE	*psFWDataAllocServerExportCookie,
+								 	  DEVMEM_EXPORTCOOKIE	*psFWCorememAllocServerExportCookie,
+									  DEVMEM_EXPORTCOOKIE	*psHWPerfDataAllocServerExportCookie);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(PVRSRV_DEVICE_NODE    *psDeviceNode,
+										  IMG_DEVMEM_SIZE_T     ui32FWCodeLen,
+									 	  IMG_DEVMEM_SIZE_T     ui32FWDataLen,
+									 	  IMG_DEVMEM_SIZE_T     uiFWCorememLen,
+									 	  DEVMEM_EXPORTCOOKIE   **ppsFWCodeAllocServerExportCookie,
+									 	  IMG_DEV_VIRTADDR      *psFWCodeDevVAddrBase,
+									 	  DEVMEM_EXPORTCOOKIE   **ppsFWDataAllocServerExportCookie,
+									 	  IMG_DEV_VIRTADDR      *psFWDataDevVAddrBase,
+										  DEVMEM_EXPORTCOOKIE   **ppsFWCorememAllocServerExportCookie,
+										  IMG_DEV_VIRTADDR      *psFWCorememDevVAddrBase,
+										  RGXFWIF_DEV_VIRTADDR  *psFWCorememMetaVAddrBase);
+
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitFirmwareKM
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitFirmwareKM(PVRSRV_DEVICE_NODE			*psDeviceNode, 
+									    RGXFWIF_DEV_VIRTADDR		*psRGXFwInit,
+									    IMG_BOOL					bEnableSignatureChecks,
+									    IMG_UINT32					ui32SignatureChecksBufSize,
+									    IMG_UINT32					ui32HWPerfFWBufSizeKB,
+									    IMG_UINT64					ui64HWPerfFilter,
+									    IMG_UINT32					ui32RGXFWAlignChecksSize,
+									    IMG_UINT32					*pui32RGXFWAlignChecks,
+									    IMG_UINT32					ui32ConfigFlags,
+									    IMG_UINT32					ui32LogType,
+										IMG_UINT32					ui32FilterMode,
+									    IMG_UINT32					ui32JonesDisableMask,
+									    IMG_UINT32					ui32HWRDebugDumpLimit,
+									    RGXFWIF_COMPCHECKS_BVNC     *psClientBVNC,
+										IMG_UINT32					ui32HWPerfCountersDataSize,
+										DEVMEM_EXPORTCOOKIE   **ppsHWPerfDataAllocServerExportCookie,
+									    RGX_RD_POWER_ISLAND_CONF			eRGXRDPowerIslandingConf);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitLoadFWImageKM
+
+ @Description
+
+ Load the firmware image into place.
+
+ @Input psFWImgDestPMR - PMR holding destination memory buffer for firmware
+
+ @input psFWImgSrcPMR - PMR holding firmware image data to load
+
+ @input ui64FWImgLen - number of bytes in Src/Dst memory buffers
+
+ @input psFWImgSigPMR - a buffer holding a signature for Src, which is used for validation
+
+ @input ui64FWSigLen - number of bytes contained in the signature buffer.
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXInitLoadFWImageKM(PMR *psFWImgDestPMR,
+                                        PMR *psFWImgSrcPMR,
+                                        IMG_UINT64 ui64FWImgLen,
+										PMR *psFWImgSigPMR,
+                                        IMG_UINT64 ui64FWSigLen);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXInitHWPerfCountersKM
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input pvDeviceNode - device node
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM (PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function	RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input: 	psDeviceNode - device node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+/*!
+*******************************************************************************
+
+ @Function	DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXRegisterGpuUtilStats
+
+ @Description  Initialise data used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument). This function must be called only once for each
+               different user/handle.
+
+ @Input        phGpuUtilUser - Pointer to handle used to identify a user of
+                               RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterGpuUtilStats(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function     RGXUnregisterGpuUtilStats
+
+ @Description  Free data previously used to compute GPU utilisation statistics
+               for a particular user (identified by the handle passed as
+               argument).
+
+ @Input        hGpuUtilUser - Handle used to identify a user of
+                              RGXGetGpuUtilStats
+
+ @Return       PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXUnregisterGpuUtilStats(IMG_HANDLE hGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function		PVRSRVGPUVIRTPopulateLMASubArenasKM
+
+ @Description	Populates the LMA arenas based on the min max values passed by
+				the client during initialization. GPU Virtualization Validation
+				only.
+
+ @Input			pvDeviceNode	: Pointer to a device info structure.
+				ui32NumElements	: Total number of min / max values passed by
+								  the client
+				pui32Elements	: The array containing all the min / max values
+								  passed by the client, all bundled together
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE	*psDeviceNode, IMG_UINT32 ui32NumElements, IMG_UINT32 aui32Elements[]);
+
+#endif /* __RGXINIT_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmem.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmem.c
new file mode 100644
index 0000000..ad235d6c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmem.c
@@ -0,0 +1,669 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_server_utils.h"
+#include "devicemem_pdump.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+
+/*
+	FIXME:
+	For now just get global state, but what we really want is to do
+	this per memory context
+*/
+static IMG_UINT32 ui32CacheOpps = 0;
+static IMG_UINT32 ui32CacheOpSequence = 0;
+/* FIXME: End */
+
+typedef struct _SERVER_MMU_CONTEXT_ {
+	DEVMEM_MEMDESC *psFWMemContextMemDesc;
+	MMU_CONTEXT *psMMUContext;
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	DLLIST_NODE sNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+} SERVER_MMU_CONTEXT;
+
+IMG_VOID RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+							   IMG_HANDLE hDeviceData,
+							   MMU_LEVEL eMMULevel,
+							   IMG_BOOL bUnmap)
+{
+	PVR_UNREFERENCED_PARAMETER(bUnmap);
+
+	switch (eMMULevel)
+	{
+		case MMU_LEVEL_3:	ui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PC;
+							break;
+		case MMU_LEVEL_2:	ui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PD;
+							break;
+		case MMU_LEVEL_1:	ui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PT;
+							ui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB;
+							break;
+		default:
+							PVR_ASSERT(0);
+							break;
+	}
+}
+
+PVRSRV_ERROR RGXSLCCacheInvalidateRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
+									PMR *psPmr)
+{
+	RGXFWIF_KCCB_CMD sFlushInvalCmd;
+	IMG_UINT32 ulPMRFlags;
+	IMG_UINT32 ui32DeviceCacheFlags;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	PVR_ASSERT(psDeviceNode);
+
+	/* In DEINIT state, we stop scheduling SLC flush commands, because we don't know in what state the firmware is.
+	 * Anyway, if we are in DEINIT state, we don't care anymore about FW memory consistency
+	 */
+	if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+	{
+
+		/* get the PMR's caching flags */
+		eError = PMR_Flags(psPmr, &ulPMRFlags);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "RGXSLCCacheInvalidateRequest: Unable to get the caching attributes of PMR %p",psPmr));
+		}
+
+		ui32DeviceCacheFlags = DevmemDeviceCacheMode(ulPMRFlags);
+
+		/* Schedule a SLC flush and invalidate if
+		 * - the memory is cached.
+		 * - we can't get the caching attributes (by precaution).
+		 */
+		if ((ui32DeviceCacheFlags == PVRSRV_MEMALLOCFLAG_GPU_CACHED) || (eError != PVRSRV_OK))
+		{
+			/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+			sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+			sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+			sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+			sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+			sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+			eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+												RGXFWIF_DM_GP,
+												&sFlushInvalCmd,
+												sizeof(sFlushInvalCmd),
+												IMG_TRUE);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXSLCCacheInvalidateRequest: Failed to schedule SLC flush command with error (%u)", eError));
+			}
+			else
+			{
+				/* Wait for the SLC flush to complete */
+				eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"RGXSLCCacheInvalidateRequest: SLC flush and invalidate aborted with error (%u)", eError));
+				}
+			}
+		}
+	}
+
+	return eError;
+}
+
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+	RGXFWIF_KCCB_CMD sFlushCmd;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	RGXFWIF_DM eDMcount = RGXFWIF_DM_MAX;
+
+	if (!ui32CacheOpps)
+	{
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE;
+	/* Set which memory context this command is for (all ctxs for now) */
+	ui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL;
+#if 0
+	sFlushCmd.uCmdData.sMMUCacheData.psMemoryContext = ???
+#endif
+
+	/* PVRSRVPowerLock guarantees atomicity between commands and global variables consistency.
+	 * This is helpful in a scenario with several applications allocating resources. */
+	eError = PVRSRVPowerLock();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to acquire powerlock (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+		goto _PVRSRVPowerLock_Exit;
+	}
+
+	PDUMPPOWCMDSTART();
+	eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE_ON,
+										 IMG_FALSE);
+	PDUMPPOWCMDEND();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to transition RGX to ON (%s)",
+					PVRSRVGetErrorStringKM(eError)));
+
+		goto _PVRSRVSetDevicePowerStateKM_Exit;
+	}
+
+	sFlushCmd.uCmdData.sMMUCacheData.ui32Flags = ui32CacheOpps;
+	sFlushCmd.uCmdData.sMMUCacheData.ui32CacheSequenceNum = ++ui32CacheOpSequence;
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+							"Submit MMU flush and invalidate (flags = 0x%08x, cache operation sequence = %u)",
+							ui32CacheOpps, ui32CacheOpSequence);
+#endif
+
+	ui32CacheOpps = 0;
+
+	/* Schedule MMU cache command */
+	do
+	{
+		eDMcount--;
+		eError = RGXSendCommandRaw(psDevInfo, eDMcount, &sFlushCmd, sizeof(RGXFWIF_KCCB_CMD), PDUMP_FLAGS_CONTINUOUS);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXPreKickCacheCommand: Failed to schedule MMU cache command \
+									to DM=%d with error (%u)", eDMcount, eError));
+			break;
+		}
+	}
+	while(eDMcount > 0);
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+	PVRSRVPowerUnlock();
+
+_PVRSRVPowerLock_Exit:
+	return eError;
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+/* page fault debug is the only current use case for needing to find process info
+ * after that process device memory context has been destroyed
+ */
+
+typedef struct _UNREGISTERED_MEMORY_CONTEXT_
+{
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	IMG_DEV_PHYADDR sPCDevPAddr;
+} UNREGISTERED_MEMORY_CONTEXT;
+
+/* must be a power of two */
+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3)
+
+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE];
+static IMG_UINT32 gui32UnregisteredMemCtxsHead = 0;
+
+/* record a device memory context being unregistered.
+ * the list of unregistered contexts can be used to find the PID and process name
+ * belonging to a memory context which has been destroyed
+ */
+static IMG_VOID _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+	UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+	OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+	psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead];
+
+	gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1)
+					& (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1);
+
+	OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	psRecord->uiPID = psServerMMUContext->uiPID;
+	MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr);
+	OSStringNCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName));
+	psRecord->szProcessName[sizeof(psRecord->szProcessName) - 1] = '\0';
+}
+
+#endif
+
+IMG_VOID RGXUnregisterMemoryContext(IMG_HANDLE hPrivData)
+{
+	SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo;
+
+	OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+	dllist_remove_node(&psServerMMUContext->sNode);
+	OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	_RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext);
+#endif
+
+	/*
+	 * Release the page catalogue address acquired in RGXRegisterMemoryContext().
+	 */
+	MMU_ReleaseBaseAddr(IMG_NULL /* FIXME */);
+	
+	/*
+	 * Free the firmware memory context.
+	 */
+	DevmemFwFree(psServerMMUContext->psFWMemContextMemDesc);
+
+	OSFreeMem(psServerMMUContext);
+}
+
+
+/*
+ * RGXRegisterMemoryContext
+ */ 
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  MMU_CONTEXT			*psMMUContext,
+									  IMG_HANDLE			*hPrivData)
+{
+	PVRSRV_ERROR			eError;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	DEVMEM_FLAGS_T			uiFWMemContextMemAllocFlags;
+	RGXFWIF_FWMEMCONTEXT	*psFWMemContext;
+	DEVMEM_MEMDESC			*psFWMemContextMemDesc;
+	SERVER_MMU_CONTEXT *psServerMMUContext;
+
+	if (psDevInfo->psKernelMMUCtx == IMG_NULL)
+	{
+		/*
+		 * This must be the creation of the Kernel memory context. Take a copy
+		 * of the MMU context for use when programming the BIF.
+		 */ 
+		psDevInfo->psKernelMMUCtx = psMMUContext;
+	}
+	else
+	{
+		psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
+		if (psServerMMUContext == IMG_NULL)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto fail_alloc_server_ctx;
+		}
+
+		psServerMMUContext->psDevInfo = psDevInfo;
+
+		/*
+		 * This FW MemContext is only mapped into kernel for initialisation purposes.
+		 * Otherwise this allocation is only used by the FW.
+		 * Therefore the GPU cache doesn't need coherency,
+		 * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick)
+		 */
+		uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+		/*
+			Allocate device memory for the firmware memory context for the new
+			application.
+		*/
+		PDUMPCOMMENT("Allocate RGX firmware memory context");
+		/* FIXME: why cache-consistent? */
+		eError = DevmemFwAllocate(psDevInfo,
+								sizeof(*psFWMemContext),
+								uiFWMemContextMemAllocFlags,
+								"FirmwareMemoryContext",
+								&psFWMemContextMemDesc);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)",
+					eError));
+			goto fail_alloc_fw_ctx;
+		}
+		
+		/*
+			Temporarily map the firmware memory context to the kernel.
+		*/
+		eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
+										  (IMG_VOID **)&psFWMemContext);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)",
+					eError));
+			goto fail_acquire_cpu_addr;
+		}
+		
+		/*
+		 * Write the new memory context's page catalogue into the firmware memory
+		 * context for the client.
+		 */
+		eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+					eError));
+			DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+			goto fail_acquire_base_addr;
+		}
+
+		/*
+		 * Set default values for the rest of the structure.
+		 */
+		psFWMemContext->uiPageCatBaseRegID = -1;
+		psFWMemContext->uiBreakpointAddr = 0;
+		psFWMemContext->uiBPHandlerAddr = 0;
+		psFWMemContext->uiBreakpointCtl = 0;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+		IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+
+		MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg);
+
+		psFWMemContext->ui32OSid = ui32OSidReg;
+}
+#endif
+
+#if defined(PDUMP)
+		{
+			IMG_CHAR			aszName[PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT];
+			IMG_DEVMEM_OFFSET_T uiOffset = 0;
+
+			/*
+			 * Dump the Mem context allocation
+			 */
+			DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
+			
+
+			/*
+			 * Obtain a symbolic addr of the mem context structure
+			 */
+			eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, 
+												   &uiOffset, 
+												   aszName, 
+												   PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT);
+
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+				goto fail_pdump_cat_base_addr;
+			}
+
+			/*
+			 * Dump the Page Cat tag in the mem context (symbolic address)
+			 */
+			eError = MMU_PDumpWritePageCatBase(psMMUContext,
+												aszName,
+												uiOffset,
+												8, /* 64-bit register write */
+												0,
+												0,
+												0);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+						eError));
+				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+				goto fail_pdump_cat_base;
+			}
+		}
+#endif
+
+		/*
+		 * Release kernel address acquired above.
+		 */
+		DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+
+		/*
+		 * Store the process information for this device memory context
+		 * for use with the host page-fault analysis.
+		 */
+		psServerMMUContext->uiPID = OSGetCurrentProcessID();
+		psServerMMUContext->psMMUContext = psMMUContext;
+		psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+		if (OSSNPrintf(psServerMMUContext->szProcessName,
+						RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME,
+						"%s",
+						OSGetCurrentProcessName()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME)
+		{
+			psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0';
+		}
+
+		OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+		dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
+		OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+		MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc);
+		*hPrivData = psServerMMUContext;
+	}
+			
+	return PVRSRV_OK;
+
+#if defined(PDUMP)
+fail_pdump_cat_base:
+fail_pdump_cat_base_addr:
+	MMU_ReleaseBaseAddr(IMG_NULL);
+#endif
+fail_acquire_base_addr:
+	/* Done before jumping to the fail point as the release is done before exit */
+fail_acquire_cpu_addr:
+	DevmemFwFree(psServerMMUContext->psFWMemContextMemDesc);
+fail_alloc_fw_ctx:
+	OSFreeMem(psServerMMUContext);
+fail_alloc_server_ctx:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv)
+{
+	SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv;
+
+	return psMMUContext->psFWMemContextMemDesc;
+}
+
+typedef struct _RGX_FAULT_DATA_ {
+	IMG_DEV_VIRTADDR *psDevVAddr;
+	IMG_DEV_PHYADDR *psDevPAddr;
+} RGX_FAULT_DATA;
+
+static IMG_BOOL _RGXCheckFaultAddress(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	SERVER_MMU_CONTEXT *psServerMMUContext = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+	RGX_FAULT_DATA *psFaultData = (RGX_FAULT_DATA *) pvCallbackData;
+	IMG_DEV_PHYADDR sPCDevPAddr;
+	
+	if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get PC address for memory context"));
+		return IMG_TRUE;
+	}
+
+	if (psFaultData->psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+	{
+		PVR_LOG(("Found memory context (PID = %d, %s)",
+				 psServerMMUContext->uiPID,
+				 psServerMMUContext->szProcessName));
+
+		MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psFaultData->psDevVAddr);
+		return IMG_FALSE;
+	}
+	return IMG_TRUE;
+}
+
+IMG_VOID RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, IMG_DEV_PHYADDR *psDevPAddr)
+{
+	RGX_FAULT_DATA sFaultData;
+	IMG_DEV_PHYADDR sPCDevPAddr;
+
+	sFaultData.psDevVAddr = psDevVAddr;
+	sFaultData.psDevPAddr = psDevPAddr;
+
+	OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+	dllist_foreach_node(&psDevInfo->sMemoryContextList,
+						_RGXCheckFaultAddress,
+						&sFaultData);
+
+	/* Lastly check for fault in the kernel allocated memory */
+	if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get PC address for kernel memory context"));
+	}
+
+	if (sFaultData.psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+	{
+		MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr);
+	}
+
+	OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+}
+
+/* input for query to find the MMU context corresponding to a
+ * page catalogue address
+ */
+typedef struct _RGX_FIND_MMU_CONTEXT_
+{
+	IMG_DEV_PHYADDR sPCAddress;
+	SERVER_MMU_CONTEXT *psServerMMUContext;
+	MMU_CONTEXT *psMMUContext;
+} RGX_FIND_MMU_CONTEXT;
+
+static IMG_BOOL _RGXFindMMUContext(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	SERVER_MMU_CONTEXT *psServerMMUContext = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+	RGX_FIND_MMU_CONTEXT *psData = pvCallbackData;
+	IMG_DEV_PHYADDR sPCDevPAddr;
+
+	if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+	{
+		PVR_LOG(("Failed to get PC address for memory context"));
+		return IMG_TRUE;
+	}
+
+	if (psData->sPCAddress.uiAddr == sPCDevPAddr.uiAddr)
+	{
+		psData->psServerMMUContext = psServerMMUContext;
+
+		return IMG_FALSE;
+	}
+	return IMG_TRUE;
+}
+
+/* given the physical address of a page catalogue, searches for a corresponding
+ * MMU context and if found, provides the caller details of the process.
+ * Returns IMG_TRUE if a process is found.
+ */
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+								RGXMEM_PROCESS_INFO *psInfo)
+{
+	RGX_FIND_MMU_CONTEXT sData;
+	IMG_BOOL bRet = IMG_FALSE;
+
+	sData.sPCAddress = sPCAddress;
+	sData.psServerMMUContext = IMG_NULL;
+
+	dllist_foreach_node(&psDevInfo->sMemoryContextList, _RGXFindMMUContext, &sData);
+
+	if(sData.psServerMMUContext != IMG_NULL)
+	{
+		psInfo->uiPID = sData.psServerMMUContext->uiPID;
+		OSStringNCopy(psInfo->szProcessName, sData.psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+		psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+		psInfo->bUnregistered = IMG_FALSE;
+		bRet = IMG_TRUE;
+	}
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	else
+	{
+		/* no active memory context found with the given PC address.
+		 * Check the list of most recently freed memory contexts.
+		 */
+		 IMG_UINT32 i;
+
+		 OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+		 for(i = (gui32UnregisteredMemCtxsHead > 0) ? (gui32UnregisteredMemCtxsHead - 1) :
+		 					UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+							i != gui32UnregisteredMemCtxsHead; i--)
+		{
+			UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
+
+			if(psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
+			{
+				psInfo->uiPID = psRecord->uiPID;
+				OSStringNCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)-1);
+				psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0';
+				psInfo->bUnregistered = IMG_TRUE;
+				bRet = IMG_TRUE;
+				break;
+			}
+		}
+
+		OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+	}
+#endif
+	return bRet;
+}
+
+/******************************************************************************
+ End of file (rgxmem.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmem.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmem.h
new file mode 100644
index 0000000..4565abb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmem.h
@@ -0,0 +1,89 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX memory context management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for RGX memory context management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXMEM_H__)
+#define __RGXMEM_H__
+
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "mmu_common.h"
+#include "rgxdevice.h"
+
+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 40
+
+typedef struct _RGXMEM_PROCESS_INFO_
+{
+	IMG_PID uiPID;
+	IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+	IMG_BOOL bUnregistered;
+} RGXMEM_PROCESS_INFO;
+
+/* FIXME: SyncPrim should be stored on the memory context */
+IMG_VOID RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_VOID RGXMMUSyncPrimFree(IMG_VOID);
+
+IMG_VOID RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+							   IMG_HANDLE hDeviceData,
+							   MMU_LEVEL eMMULevel,
+							   IMG_BOOL bUnmap);
+
+PVRSRV_ERROR RGXSLCCacheInvalidateRequest(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									PMR *psPmr);
+
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO 	*psDevInfo);
+
+IMG_VOID RGXUnregisterMemoryContext(IMG_HANDLE hPrivData);
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  MMU_CONTEXT			*psMMUContext,
+									  IMG_HANDLE			*hPrivData);
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv);
+
+IMG_VOID RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+							  IMG_DEV_VIRTADDR *psDevVAddr,
+							  IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+								RGXMEM_PROCESS_INFO *psInfo);
+
+#endif /* __RGXMEM_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmmuinit.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmmuinit.c
new file mode 100644
index 0000000..f235e31
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmmuinit.c
@@ -0,0 +1,990 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "pdump_km.h"
+
+/*
+ * Bits of PT, PD and PC not involving addresses 
+ */
+
+#define RGX_MMUCTRL_PTE_PROTMASK	(RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+									 RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+									 RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+									 RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
+									 RGX_MMUCTRL_PT_DATA_CC_EN | \
+									 RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+									 RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK	(RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+									 ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+									 RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK	(RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+									 RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+typedef struct _RGX_PAGESIZECONFIG_
+{
+	const MMU_PxE_CONFIG *psPDEConfig;
+	const MMU_PxE_CONFIG *psPTEConfig;
+	const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+	IMG_UINT32 uiRefCount;
+	IMG_UINT32 uiMaxRefCount;
+} RGX_PAGESIZECONFIG;
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static RGX_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static RGX_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static RGX_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static RGX_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ *  Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static RGX_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ *  Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static RGX_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+   the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT8 ui8Log2PageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT8 ui8Log2PageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT8 ui8Log2PageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+                                           const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+                                           const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+                                           const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                                           IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	/*
+	 * Setup sRGXMMUDeviceAttributes
+	 */
+	sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+	sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+	sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+	sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+	sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+	/* Functions for deriving page table/dir/cat protection bits */
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+	/* Functions for establishing configurations for PDE/PTE/DEVVADDR
+	   on per-heap basis */
+	sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+	sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+	sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+	/*
+	 * Setup sRGXMMUPCEConfig
+	 */
+	sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */
+	sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry */
+
+	sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address in PC entry */
+	sRGXMMUPCEConfig.uiLog2Align = 12; /* Alignment of PD AND PC */
+
+	sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; //Mask to get the status bits of the PC */
+	sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */
+
+	sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+	sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */
+
+	/*
+	 *  Setup sRGXMMUTopLevelDevVAddrConfig
+	 */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */
+	sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+/*
+ *
+ *  Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_4KBDP
+	 */
+	sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+	sRGXMMUPDEConfig_4KBDP.uiLog2Align = 12;
+
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_4KBDP
+	 */
+	sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+	sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
+	sRGXMMUPTEConfig_4KBDP.uiLog2Align = 12;
+
+	sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_4KBDP
+	 */
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+	sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+
+	/*
+	 * Setup gsPageSizeConfig4KB
+	 */
+	gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+	gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+	gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+	gsPageSizeConfig4KB.uiRefCount = 0;
+ 	gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_16KBDP
+	 */
+	sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */
+	sRGXMMUPDEConfig_16KBDP.uiLog2Align = 10; /* Alignment of the page tables NOT directories */
+
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_16KBDP
+	 */
+	sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+	sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */
+	sRGXMMUPTEConfig_16KBDP.uiLog2Align = 14; /* Alignment of the pages NOT tables */
+
+	sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_16KBDP
+	 */
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+	sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+	sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+
+	/*
+	 * Setup gsPageSizeConfig16KB
+	 */
+	gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+	gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+	gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+	gsPageSizeConfig16KB.uiRefCount = 0;
+	gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_64KBDP
+	 */
+	sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+	sRGXMMUPDEConfig_64KBDP.uiLog2Align = 8;
+
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_64KBDP
+	 */
+	sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+	sRGXMMUPTEConfig_64KBDP.uiAddrShift =16;
+	sRGXMMUPTEConfig_64KBDP.uiLog2Align = 16;
+
+	sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_64KBDP
+	 */
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+	sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+
+	/*
+	 * Setup gsPageSizeConfig64KB
+	 */
+	gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+	gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+	gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+	gsPageSizeConfig64KB.uiRefCount = 0;
+	gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ *  Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+	/*
+	 * Setup sRGXMMUPDEConfig_256KBDP
+	 */
+	sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+	sRGXMMUPDEConfig_256KBDP.uiLog2Align = 6;
+
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+	 */
+	sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+	sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+	sRGXMMUPTEConfig_256KBDP.uiLog2Align = 18;
+
+	sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_256KBDP
+	 */
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+	sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+	sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+
+	/*
+	 * Setup gsPageSizeConfig256KB
+	 */
+	gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+	gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+	gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+	gsPageSizeConfig256KB.uiRefCount = 0;
+	gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_1MBDP
+	 */
+	sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_1MBDP.uiAddrShift = 4;
+	sRGXMMUPDEConfig_1MBDP.uiLog2Align = 4;
+
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_1MBDP
+	 */
+	sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+	sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+	sRGXMMUPTEConfig_1MBDP.uiLog2Align = 20;
+
+	sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_1MBDP
+	 */
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+	sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+	sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+
+	/*
+	 * Setup gsPageSizeConfig1MB
+	 */
+	gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+	gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+	gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+	gsPageSizeConfig1MB.uiRefCount = 0;
+	gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUPDEConfig_2MBDP
+	 */
+	sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+	sRGXMMUPDEConfig_2MBDP.uiAddrShift = 4;
+	sRGXMMUPDEConfig_2MBDP.uiLog2Align = 4;
+
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+	sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+	sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+	sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+	sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUPTEConfig_2MBDP
+	 */
+	sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+	sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+	sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+	sRGXMMUPTEConfig_2MBDP.uiLog2Align = 21;
+
+	sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+	sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+	sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+	sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+	/*
+	 * Setup sRGXMMUDevVAddrConfig_2MBDP
+	 */
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+	sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+	sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+	sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+
+	/*
+	 * Setup gsPageSizeConfig2MB
+	 */
+	gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+	gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+	gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+	gsPageSizeConfig2MB.uiRefCount = 0;
+	gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+	/*
+	 * Setup sRGXMMUDeviceAttributes
+	 */
+	sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+	sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+	sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+	sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+	/* Functions for deriving page table/dir/cat protection bits */
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+	sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+	/* Functions for establishing configurations for PDE/PTE/DEVVADDR
+	   on per-heap basis */
+	sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+	sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+	psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+    PVRSRV_ERROR eError;
+
+    eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+    psDeviceNode->pfnMMUGetContextID = IMG_NULL;
+#endif
+
+    psDeviceNode->psMMUDevAttrs = IMG_NULL;
+
+#if defined(DEBUG)
+    PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+             gsPageSizeConfig4KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+             gsPageSizeConfig4KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+             gsPageSizeConfig16KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+             gsPageSizeConfig16KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+             gsPageSizeConfig64KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+             gsPageSizeConfig64KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+             gsPageSizeConfig256KB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+             gsPageSizeConfig256KB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+             gsPageSizeConfig1MB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+             gsPageSizeConfig1MB.uiRefCount));
+    PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+             gsPageSizeConfig2MB.uiMaxRefCount));
+    PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+             gsPageSizeConfig2MB.uiRefCount));
+#endif
+    if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+        gsPageSizeConfig16KB.uiRefCount > 0 ||
+        gsPageSizeConfig64KB.uiRefCount > 0 ||
+        gsPageSizeConfig256KB.uiRefCount > 0 ||
+        gsPageSizeConfig1MB.uiRefCount > 0 ||
+        gsPageSizeConfig2MB.uiRefCount > 0
+        )
+    {
+        PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+    }
+
+    return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt4
+@Description    calculate the PCE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+    return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePCEProt8
+@Description    calculate the PCE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT8 ui8Log2PageSize)
+{
+	PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_UNREFERENCED_PARAMETER(ui8Log2PageSize);
+
+	PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+	return 0;	
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt4
+@Description    derive the PDE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+    PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+	return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePDEProt8
+@Description    derive the PDE protection flags based on an 8 byte entry
+
+@Input          ui8Log2PageSize The log2 of the required page size.
+                E.g, for 4KiB pages, this parameter must be 12.
+                For 2MiB pages, it must be set to 21.
+
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT8 ui8Log2PageSize)
+{
+	IMG_UINT64 ret_value = 0; // 0 means invalid
+
+    if (! (uiProtFlags & MMU_PROTFLAGS_INVALID)) // if not invalid
+	{
+		switch (ui8Log2PageSize)
+		{
+			case 12:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+				break;
+			case 14:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+				break;
+			case 16:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+				break;
+			case 18:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+				break;
+			case 20:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+				break;
+			case 21:
+				ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+				break;
+			default:
+				PVR_DPF((PVR_DBG_ERROR,
+						 "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+						 __FILE__, __LINE__, __FUNCTION__, ui8Log2PageSize));
+		}
+	}
+	return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt4
+@Description    calculate the PTE protection flags based on a 4 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+    PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXDerivePTEProt8
+@Description    calculate the PTE protection flags based on an 8 byte entry
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT8 ui8Log2PageSize)
+{
+	IMG_UINT64 ui64MMUFlags=0;
+
+	PVR_UNREFERENCED_PARAMETER(ui8Log2PageSize);
+
+	if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+	{
+		/* read/write */
+	}
+	else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+	{
+		/* read only */
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+	}
+	else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+	{
+		/* write only */
+        PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device"));
+	}
+    else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+    }
+
+	/* cache coherency */
+	if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+	}
+
+	/* cache setup */
+	if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
+	}
+
+    if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+    {
+        ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+    }
+
+	if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+	{
+		ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+	}
+
+	return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function       RGXGetPageSizeConfig
+@Description    Set up configuration for variable sized data pages.
+                RGXPutPageSizeConfigCB has to be called to ensure correct
+                refcounting.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+                                           const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+                                           const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+                                           const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+                                           IMG_HANDLE *phPriv)
+{
+    RGX_PAGESIZECONFIG *psPageSizeConfig;
+
+    switch (uiLog2DataPageSize)
+    {
+    case 12:
+        psPageSizeConfig = &gsPageSizeConfig4KB;
+        break;
+    case 14:
+        psPageSizeConfig = &gsPageSizeConfig16KB;
+        break;
+    case 16:
+        psPageSizeConfig = &gsPageSizeConfig64KB;
+        break;
+    case 18:
+        psPageSizeConfig = &gsPageSizeConfig256KB;
+        break;
+    case 20:
+        psPageSizeConfig = &gsPageSizeConfig1MB;
+        break;
+    case 21:
+        psPageSizeConfig = &gsPageSizeConfig2MB;
+        break;
+    default:
+        PVR_DPF((PVR_DBG_ERROR,
+                 "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                 uiLog2DataPageSize));
+        return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+    }
+    
+    /* Refer caller's pointers to the data */
+    *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+    *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+    *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+    /* Increment ref-count - not that we're allocating anything here
+       (I'm using static structs), but one day we might, so we want
+       the Get/Put code to be balanced properly */
+    psPageSizeConfig->uiRefCount ++;
+
+    /* This is purely for debug statistics */
+    psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+                                          psPageSizeConfig->uiRefCount);
+#endif
+
+    *phPriv = (IMG_HANDLE)(IMG_UINTPTR_T)uiLog2DataPageSize;
+	PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(IMG_UINTPTR_T)*phPriv);
+
+    return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       RGXPutPageSizeConfig
+@Description    Tells this code that the mmu module is done with the
+                configurations set in RGXGetPageSizeConfig.  This can
+                be a no-op.
+                Called after RGXGetPageSizeConfigCB.
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+    RGX_PAGESIZECONFIG *psPageSizeConfig;
+    IMG_UINT32 uiLog2DataPageSize;
+
+    uiLog2DataPageSize = (IMG_UINT32)(IMG_UINTPTR_T) hPriv;
+
+    switch (uiLog2DataPageSize)
+    {
+    case 12:
+        psPageSizeConfig = &gsPageSizeConfig4KB;
+        break;
+    case 14:
+        psPageSizeConfig = &gsPageSizeConfig16KB;
+        break;
+    case 16:
+        psPageSizeConfig = &gsPageSizeConfig64KB;
+        break;
+    case 18:
+        psPageSizeConfig = &gsPageSizeConfig256KB;
+        break;
+    case 20:
+        psPageSizeConfig = &gsPageSizeConfig1MB;
+        break;
+    case 21:
+        psPageSizeConfig = &gsPageSizeConfig2MB;
+        break;
+    default:
+        PVR_DPF((PVR_DBG_ERROR,
+                 "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+                 uiLog2DataPageSize));
+        return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+    }
+
+    /* Ref-count here is not especially useful, but it's an extra
+       check that the API is being used correctly */
+    psPageSizeConfig->uiRefCount --;
+#else
+    PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+    return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+    PVR_UNREFERENCED_PARAMETER(ui32PDE);
+    PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+	PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+	return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+	switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
+	{
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+			*pui32Log2PageSize = 12;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+			*pui32Log2PageSize = 14;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+			*pui32Log2PageSize = 16;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+			*pui32Log2PageSize = 18;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+			*pui32Log2PageSize = 20;
+			break;
+		case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+			*pui32Log2PageSize = 21;
+			break;
+		default:
+			return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+			break;
+	}
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmmuinit.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmmuinit.h
new file mode 100644
index 0000000..c79a72e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxmmuinit.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific initialisation routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific MMU initialisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily.  It exists solely
+   for the linkage between rgxinit.c and rgxmmuinit.c, the former
+   being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMMUINIT_H_
+#define _SRVKM_RGXMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+IMG_EXPORT PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+IMG_EXPORT PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpdump.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpdump.c
new file mode 100644
index 0000000..0f245c0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpdump.c
@@ -0,0 +1,133 @@
+/*************************************************************************/ /*!
+@File			rgxpdump.c
+@Title          Device specific pdump routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+
+#include "devicemem_pdump.h"
+#include "rgxpdump.h"
+
+/*
+ * PVRSRVPDumpSignatureBufferKM
+ */
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+										  IMG_UINT32			ui32PDumpFlags)
+{	
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+
+	/* TA signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+	 
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc,
+								 0,
+								 psDevInfo->ui32SigTAChecksSize,
+								 "out.tasig",
+								 0,
+								 ui32PDumpFlags);
+
+	/* 3D signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc,
+								 0,
+								 psDevInfo->ui32Sig3DChecksSize,
+								 "out.3dsig",
+								 0,
+								 ui32PDumpFlags);
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	/* RT signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigRTChecksMemDesc,
+								 0,
+								 psDevInfo->ui32SigRTChecksSize,
+								 "out.rtsig",
+								 0,
+								 ui32PDumpFlags);
+								 
+	/* SH signatures */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigSHChecksMemDesc,
+								 0,
+								 psDevInfo->ui32SigSHChecksSize,
+								 "out.shsig",
+								 0,
+								 ui32PDumpFlags);
+#endif
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+									  IMG_UINT32			ui32PDumpFlags)
+{	
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+
+	/* Dump trace buffers */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+								offsetof(RGXFWIF_TRACEBUF, sTraceBuf),
+								RGXFW_THREAD_NUM * 
+								   ( 1 * sizeof(IMG_UINT32) 
+								    +RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32) 
+								    +RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)),
+								"out.trace",
+								0,
+								ui32PDumpFlags);
+
+	/* Dump hwperf buffer */
+	PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+	DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+								 0,
+								 psDevInfo->ui32RGXFWIfHWPerfBufSize,
+								 "out.hwperf",
+								 0,
+								 ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#endif /* PDUMP */
+
+/******************************************************************************
+ End of file (rgxpdump.c)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpdump.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpdump.h
new file mode 100644
index 0000000..4cdf9cb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpdump.h
@@ -0,0 +1,112 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX pdump Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX pdump functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxdevice.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPDumpSignatureBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+										IMG_UINT32			ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVPDumpTraceBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+										IMG_UINT32			ui32PDumpFlags);
+#else  	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpSignatureBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSignatureBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+							 IMG_UINT32			ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpTraceBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpTraceBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+						 IMG_UINT32			ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+/******************************************************************************
+ End of file (rgxpdump.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpower.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpower.c
new file mode 100644
index 0000000..01914ec
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpower.c
@@ -0,0 +1,1398 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific power routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "rgxpower.h"
+#include "rgx_fwif_km.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "rgxdefs_km.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "rgx_meta.h"
+#include "devicemem_pdump.h"
+#include "rgxapi_km.h"
+#include "rgxtimecorr.h"
+
+#include "process_stats.h"
+
+extern IMG_UINT32 g_ui32HostSampleIRQCount;
+
+#if ! defined(FIX_HW_BRN_37453)
+/*!
+*******************************************************************************
+
+ @Function	RGXEnableClocks
+
+ @Description Enable RGX Clocks
+
+ @Input psDevInfo - device info structure
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID RGXEnableClocks(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGX clock: use default (automatic clock gating)");
+}
+#endif
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function	_RGXInitSLC
+
+ @Description Initialise RGX SLC
+
+ @Input psDevInfo - device info structure
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+#if !defined(RGX_FEATURE_S7_CACHE_HIERARCHY)
+
+#define RGX_INIT_SLC _RGXInitSLC
+
+static IMG_VOID _RGXInitSLC(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	IMG_UINT32	ui32Reg;
+	IMG_UINT32	ui32RegVal;
+
+#if defined(FIX_HW_BRN_36492)
+	/* Because the WA for this BRN forbids using SLC reset, need to inval it instead */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Invalidate the SLC");
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 
+			RGX_CR_SLC_CTRL_FLUSH_INVAL, RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN);
+	PDUMPREG32(RGX_PDUMPREG_NAME, 
+			RGX_CR_SLC_CTRL_FLUSH_INVAL, RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN, 
+			PDUMP_FLAGS_CONTINUOUS);
+
+	/* poll for completion */
+	PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + RGX_CR_SLC_STATUS0),
+							 0x0,
+							 RGX_CR_SLC_STATUS0_INVAL_PENDING_EN);
+
+	PDUMPREGPOL(RGX_PDUMPREG_NAME,
+				RGX_CR_SLC_STATUS0,
+				0x0,
+				RGX_CR_SLC_STATUS0_INVAL_PENDING_EN,
+				PDUMP_FLAGS_CONTINUOUS,
+				PDUMP_POLL_OPERATOR_EQUAL);
+#endif
+	 
+	if (!PVRSRVSystemSnoopingOfCPUCache() && !PVRSRVSystemSnoopingOfDeviceCache())
+	{
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping");
+	}
+	else
+	{
+		if (PVRSRVSystemSnoopingOfCPUCache())
+		{
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping");
+		}
+		if (PVRSRVSystemSnoopingOfDeviceCache())
+		{
+			PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping");
+		}
+	}
+
+#if (RGX_FEATURE_SLC_SIZE_IN_BYTES < (128*1024))
+	/*
+	 * SLC Bypass control
+	 */
+	ui32Reg = RGX_CR_SLC_CTRL_BYPASS;
+
+	/* Bypass SLC for textures if the SLC size is less than 128kB */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Bypass SLC for TPU");
+	ui32RegVal = RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN;
+
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32Reg, ui32RegVal);
+	PDUMPREG32(RGX_PDUMPREG_NAME, ui32Reg, ui32RegVal, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	/*
+	 * SLC Bypass control
+	 */
+	ui32Reg = RGX_CR_SLC_CTRL_MISC;
+	ui32RegVal = RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+	/* Bypass burst combiner if SLC line size is smaller than 1024 bits */
+#if (RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS < 1024)
+	ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+#endif
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32Reg, ui32RegVal);
+	PDUMPREG32(RGX_PDUMPREG_NAME, ui32Reg, ui32RegVal, PDUMP_FLAGS_CONTINUOUS);
+
+}
+#endif /* RGX_FEATURE_S7_CACHE_HIERARCHY */
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXInitSLC3
+
+ @Description Initialise RGX SLC3
+
+ @Input psDevInfo - device info structure
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY)
+
+#define RGX_INIT_SLC _RGXInitSLC3
+
+static IMG_VOID _RGXInitSLC3(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	IMG_UINT32	ui32Reg;
+	IMG_UINT32	ui32RegVal;
+
+
+#if defined(HW_ERN_51468)
+    /*
+     * SLC control
+     */
+	ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+	ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH;
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32Reg, ui32RegVal);
+	PDUMPREG32(RGX_PDUMPREG_NAME, ui32Reg, ui32RegVal, PDUMP_FLAGS_CONTINUOUS);
+
+#else
+
+    /*
+     * SLC control
+     */
+	ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+	ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH;
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32Reg, ui32RegVal);
+	PDUMPREG32(RGX_PDUMPREG_NAME, ui32Reg, ui32RegVal, PDUMP_FLAGS_CONTINUOUS);
+
+	/*
+	 * SLC scramble bits
+	 */
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 aui32ScrambleRegs[] = {
+		    RGX_CR_SLC3_SCRAMBLE, 
+		    RGX_CR_SLC3_SCRAMBLE2,
+		    RGX_CR_SLC3_SCRAMBLE3,
+		    RGX_CR_SLC3_SCRAMBLE4};
+
+		IMG_UINT64 aui64ScrambleValues[] = {
+#if (RGX_FEATURE_SLC_BANKS == 2)
+		   IMG_UINT64_C(0x6965a99a55696a6a),
+		   IMG_UINT64_C(0x6aa9aa66959aaa9a),
+		   IMG_UINT64_C(0x9a5665965a99a566),
+		   IMG_UINT64_C(0x5aa69596aa66669a)
+#elif (RGX_FEATURE_SLC_BANKS == 4)
+		   IMG_UINT64_C(0xc6788d722dd29ce4),
+		   IMG_UINT64_C(0x7272e4e11b279372),
+		   IMG_UINT64_C(0x87d872d26c6c4be1),
+		   IMG_UINT64_C(0xe1b4878d4b36e478)
+#elif (RGX_FEATURE_SLC_BANKS == 8)
+		   IMG_UINT64_C(0x859d6569e8fac688),
+		   IMG_UINT64_C(0xf285e1eae4299d33),
+		   IMG_UINT64_C(0x1e1af2be3c0aa447)
+#endif        
+		};
+
+		for (i = 0;
+		     i < sizeof(aui64ScrambleValues)/sizeof(IMG_UINT64);
+			 i++)
+		{
+			IMG_UINT32 ui32Reg = aui32ScrambleRegs[i];
+			IMG_UINT64 ui64Value = aui64ScrambleValues[i];
+
+			OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32Reg, ui64Value);
+			PDUMPREG64(RGX_PDUMPREG_NAME, ui32Reg, ui64Value, PDUMP_FLAGS_CONTINUOUS);
+		}
+	}
+#endif
+
+#if defined(HW_ERN_45914)
+	/* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: disable forced SLC coherency");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_GARTEN_SLC, 0);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_GARTEN_SLC, 0, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+}
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXInitBIF
+
+ @Description Initialise RGX BIF
+
+ @Input psDevInfo - device info structure
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID RGXInitBIF(PVRSRV_RGXDEV_INFO	*psDevInfo)
+{
+	PVRSRV_ERROR	eError;
+	IMG_DEV_PHYADDR sPCAddr;
+
+	/*
+		Acquire the address of the Kernel Page Catalogue.
+	*/
+	eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCAddr);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* Sanity check Cat-Base address */
+	PVR_ASSERT((((sPCAddr.uiAddr
+			>> psDevInfo->ui32KernelCatBaseAlignShift)
+			<< psDevInfo->ui32KernelCatBaseShift)
+			& ~psDevInfo->ui64KernelCatBaseMask) == 0x0UL);
+
+	/*
+		Write the kernel catalogue base.
+	*/
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGX firmware MMU Page Catalogue");
+
+	if (psDevInfo->ui32KernelCatBaseIdReg != -1)
+	{
+		/* Set the mapping index */
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+						psDevInfo->ui32KernelCatBaseIdReg,
+						psDevInfo->ui32KernelCatBaseId);
+
+		/* pdump mapping context */
+		PDUMPREG32(RGX_PDUMPREG_NAME,
+							psDevInfo->ui32KernelCatBaseIdReg,
+							psDevInfo->ui32KernelCatBaseId,
+							PDUMP_FLAGS_CONTINUOUS);
+	}
+
+	if (psDevInfo->ui32KernelCatBaseWordSize == 8)
+	{
+		/* Write the cat-base address */
+		OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+						psDevInfo->ui32KernelCatBaseReg,
+						((sPCAddr.uiAddr
+							>> psDevInfo->ui32KernelCatBaseAlignShift)
+							<< psDevInfo->ui32KernelCatBaseShift)
+							& psDevInfo->ui64KernelCatBaseMask);
+	}
+	else
+	{
+		/* Write the cat-base address */
+		OSWriteHWReg32(psDevInfo->pvRegsBaseKM,
+						psDevInfo->ui32KernelCatBaseReg,
+						(IMG_UINT32)(((sPCAddr.uiAddr
+							>> psDevInfo->ui32KernelCatBaseAlignShift)
+							<< psDevInfo->ui32KernelCatBaseShift)
+							& psDevInfo->ui64KernelCatBaseMask));
+	}
+
+	/* pdump catbase address */
+	MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+							  RGX_PDUMPREG_NAME,
+							  psDevInfo->ui32KernelCatBaseReg,
+							  psDevInfo->ui32KernelCatBaseWordSize,
+							  psDevInfo->ui32KernelCatBaseAlignShift,
+							  psDevInfo->ui32KernelCatBaseShift,
+							  PDUMP_FLAGS_CONTINUOUS);
+
+	/*
+	 * Trusted META boot
+	 */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+	#if defined(TRUSTED_DEVICE_DEFAULT_ENABLED)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXInitBIF: Trusted Device enabled");
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+	PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN, PDUMP_FLAGS_CONTINUOUS);
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+	PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN, PDUMP_FLAGS_CONTINUOUS);
+	#else /* ! defined(TRUSTED_DEVICE_DEFAULT_ENABLED) */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXInitBIF: Trusted Device disabled");
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_TRUST, 0);
+	PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_BIF_TRUST, 0, PDUMP_FLAGS_CONTINUOUS);
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SYS_BUS_SECURE, 0);
+	PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_SYS_BUS_SECURE, 0, PDUMP_FLAGS_CONTINUOUS);
+	#endif /* TRUSTED_DEVICE_DEFAULT_ENABLED */
+#endif
+
+}
+
+#if defined(RGX_FEATURE_AXI_ACELITE)
+/*!
+*******************************************************************************
+
+ @Function	RGXAXIACELiteInit
+
+ @Description Initialise AXI-ACE Lite interface
+
+ @Input psDevInfo - device info structure
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+static IMG_VOID RGXAXIACELiteInit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	IMG_UINT32 ui32RegAddr;
+	IMG_UINT64 ui64RegVal;
+
+	ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION;
+
+	/* Setup AXI-ACE config. Set everything to outer cache */
+	ui64RegVal =   (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+				   (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+				   (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT)  |
+				   (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+				   (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+				   (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT) |
+				   (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+				   (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+				   (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+				   ui32RegAddr,
+				   ui64RegVal);
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Init AXI-ACE interface");
+	PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegVal, PDUMP_FLAGS_CONTINUOUS);
+}
+#endif
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXStart
+
+ @Description
+
+ (client invoked) chip-reset and initialisation
+
+ @Input psDevInfo - device info structure
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR RGXStart(PVRSRV_RGXDEV_INFO	*psDevInfo, PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+	PVRSRV_ERROR	eError = PVRSRV_OK;
+	RGXFWIF_INIT	*psRGXFWInit;
+
+#if defined(FIX_HW_BRN_37453)
+	/* Force all clocks on*/
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: force all clocks on");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_ON);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_ON, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+#if defined(SUPPORT_SHARED_SLC)	&& !defined(FIX_HW_BRN_36492)
+	/* When the SLC is shared, the SLC reset is performed by the System layer when calling
+	 * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid
+	 * soft_resetting it here. If HW_BRN_36492, the bit is already masked out. 
+	 */
+#define	RGX_CR_SOFT_RESET_ALL	(RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)");
+#else
+#define	RGX_CR_SOFT_RESET_ALL	(RGX_CR_SOFT_RESET_MASKFULL)
+#endif
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+	/* Set RGX in soft-reset */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: soft reset assert step 1");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS, PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: soft reset assert step 2");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS, PDUMP_FLAGS_CONTINUOUS);
+
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Take everything out of reset but META */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: soft reset de-assert step 1 excluding META");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN, PDUMP_FLAGS_CONTINUOUS);
+
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0x0);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET2, 0x0, PDUMP_FLAGS_CONTINUOUS);
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, PDUMP_FLAGS_CONTINUOUS);
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: soft reset de-assert step 2 excluding META");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN, PDUMP_FLAGS_CONTINUOUS);
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, PDUMP_FLAGS_CONTINUOUS);
+#else
+	/* Set RGX in soft-reset */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: soft reset everything");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Take Rascal and Dust out of reset */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Rascal and Dust out of reset");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Take everything out of reset but META */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Take everything out of reset but META");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+#if ! defined(FIX_HW_BRN_37453)
+	/*
+	 * Enable clocks.
+	 */
+	RGXEnableClocks(psDevInfo);
+#endif
+
+	/*
+	 * Initialise SLC.
+	 */
+#if !defined(SUPPORT_SHARED_SLC)	
+	RGX_INIT_SLC(psDevInfo);
+#endif
+
+#if !defined(SUPPORT_META_SLAVE_BOOT)
+	/* Configure META to Master boot */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: META Master boot");
+	OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+	PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	/* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */
+	{
+		IMG_UINT64 ui64GartenConfig;
+
+		/* Garten IDLE bit controlled by META */
+		ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+		/* Set fence addr to the bootloader */
+		ui64GartenConfig |= (RGXFW_BOOTLDR_DEVV_ADDR & ~RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK);
+
+		/* Set PC = 0 for fences */
+		ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+
+#if defined(RGX_FEATURE_SLC_VIVT)
+#if !defined(FIX_HW_BRN_51281)
+		/* Ensure the META fences go all the way to external memory */
+		ui64GartenConfig |= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN;    /* SLC Coherent 1 */
+		ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK; /* SLC Persistence 0 */
+#endif
+
+#else 
+		/* Set SLC DM=META */
+		ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_DM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+
+#endif
+
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Configure META wrapper");
+		OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+		PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig, PDUMP_FLAGS_CONTINUOUS);
+	}
+
+#if defined(RGX_FEATURE_AXI_ACELITE)
+	/*
+		We must init the AXI-ACE interface before 1st BIF transaction
+	*/
+	RGXAXIACELiteInit(psDevInfo);
+#endif
+
+	/*
+	 * Initialise BIF.
+	 */
+	RGXInitBIF(psDevInfo);
+
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: Take META out of reset");
+	/* need to wait for at least 16 cycles before taking meta out of reset ... */
+	PVRSRVSystemWaitCycles(psDevConfig, 32);
+	PDUMPIDLWITHFLAGS(32, PDUMP_FLAGS_CONTINUOUS);
+	
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0x0);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, 0x0, PDUMP_FLAGS_CONTINUOUS);
+
+	(IMG_VOID) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, PDUMP_FLAGS_CONTINUOUS);
+	
+	/* ... and afterwards */
+	PVRSRVSystemWaitCycles(psDevConfig, 32);
+	PDUMPIDLWITHFLAGS(32, PDUMP_FLAGS_CONTINUOUS);
+#if defined(FIX_HW_BRN_37453)
+	/* we rely on the 32 clk sleep from above */
+
+	/* switch clocks back to auto */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: set clocks back to auto");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_AUTO);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_AUTO, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	/*
+	 * Start the firmware.
+	 */
+#if defined(SUPPORT_META_SLAVE_BOOT)
+	RGXStartFirmware(psDevInfo);
+#else
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXStart: RGX Firmware Master boot Start");
+#endif
+	
+	OSMemoryBarrier();
+
+	/* Check whether the FW has started by polling on bFirmwareStarted flag */
+	eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+									  (IMG_VOID **)&psRGXFWInit);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXStart: Failed to acquire kernel fw if ctl (%u)", eError));
+		return eError;
+	}
+
+	if (PVRSRVPollForValueKM((IMG_UINT32 *)&psRGXFWInit->bFirmwareStarted,
+							 IMG_TRUE,
+							 0xFFFFFFFF) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXStart: Polling for 'FW started' flag failed."));
+		eError = PVRSRV_ERROR_TIMEOUT;
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+	eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+											offsetof(RGXFWIF_INIT, bFirmwareStarted),
+											IMG_TRUE,
+											0xFFFFFFFFU,
+											PDUMP_POLL_OPERATOR_EQUAL,
+											PDUMP_FLAGS_CONTINUOUS);
+	
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXStart: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+		DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+		return eError;
+	}
+#endif
+
+	SetFirmwareStartTime(psRGXFWInit->ui32FirmwareStartedTimeStamp);
+
+	DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+	return eError;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function	RGXStop
+
+ @Description Stop RGX in preparation for power down
+
+ @Input psDevInfo - RGX device info
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR RGXStop(PVRSRV_RGXDEV_INFO	*psDevInfo)
+
+{
+	PVRSRV_ERROR		eError; 
+
+
+	eError = RGXRunScript(psDevInfo, psDevInfo->psScripts->asDeinitCommands, RGX_MAX_DEINIT_COMMANDS, PDUMP_FLAGS_CONTINUOUS, IMG_NULL);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXStop: RGXRunScript failed (%d)", eError));
+		return eError;
+	}
+
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXInitSLC
+*/
+#if defined(SUPPORT_SHARED_SLC)
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo;
+
+	if (psDeviceNode == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+#if !defined(FIX_HW_BRN_36492)
+
+	/* reset the SLC */
+	PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXInitSLC: soft reset SLC");
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+	OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+	PDUMPREGREAD64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, PDUMP_FLAGS_CONTINUOUS);
+
+	/* Take everything out of reset */
+	OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0);
+	PDUMPREG64(RGX_PDUMPREG_NAME, RGX_CR_SOFT_RESET, 0, PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+	RGX_INIT_SLC(psDevInfo);
+
+	return PVRSRV_OK;
+}
+#endif
+
+
+static IMG_VOID _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+	IMG_UINT64 *paui64StatsCounters;
+	IMG_UINT64 ui64LastPeriod;
+	IMG_UINT64 ui64LastState;
+	IMG_UINT64 ui64LastTime;
+	IMG_UINT64 ui64TimeNow;
+
+	psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+	OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+	ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(OSClockns64());
+
+	/* Update counters to account for the time since the last update */
+	ui64LastState  = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+	ui64LastTime   = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+	ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+	paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+	/* Update state and time of the latest update */
+	psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+	OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+extern PVRSRV_DATA	*gpsPVRSRVData;
+
+static
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM2 (PVRSRV_RGXDEV_INFO	*psDevInfo,
+										  IMG_UINT32			ui32Mask,
+										  IMG_UINT32			ui32Timeoutus,
+										  IMG_UINT32			ui32PollPeriodus,
+										  IMG_BOOL				bAllowPreemption)
+{
+	IMG_UINT32	ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+	if (bAllowPreemption)
+	{
+		PVR_ASSERT(ui32PollPeriodus >= 1000);
+	}
+
+	LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+	{
+		ui32ActualValue = (g_ui32HostSampleIRQCount & ui32Mask);
+		if(ui32ActualValue == psDevInfo->psRGXFWIfTraceBuf->ui32InterruptCount)
+		{
+			return PVRSRV_OK;
+		}
+
+		if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			return PVRSRV_ERROR_TIMEOUT;
+		}
+
+		if (bAllowPreemption)
+		{
+			OSSleepms(ui32PollPeriodus / 1000);
+		}
+		else
+		{
+			OSWaitus(ui32PollPeriodus);
+		}
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+			psDevInfo->psRGXFWIfTraceBuf->ui32InterruptCount, ui32ActualValue, ui32Mask));
+
+	return PVRSRV_ERROR_TIMEOUT;
+}
+
+/*
+	RGXPrePowerState
+*/
+PVRSRV_ERROR RGXPrePowerState (IMG_HANDLE				hDevHandle,
+							   PVRSRV_DEV_POWER_STATE	eNewPowerState,
+							   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							   IMG_BOOL					bForced)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if ((eNewPowerState != eCurrentPowerState) &&
+		(eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+	{
+		PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+		PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+		RGXFWIF_KCCB_CMD	sPowCmd;
+		RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+		IMG_UINT32			ui32DM;
+
+		/* Send the Power off request to the FW */
+		sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+		sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+		sPowCmd.uCmdData.sPowData.uPoweReqData.bForced = bForced;
+
+		SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+
+		/* Send one pow command to each DM to make sure we flush all the DMs pipelines */
+		for (ui32DM = 0; ui32DM < RGXFWIF_DM_MAX; ui32DM++)
+		{
+			eError = RGXSendCommandRaw(psDevInfo,
+					ui32DM,
+					&sPowCmd,
+					sizeof(sPowCmd),
+					0);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to send Power off request for DM%d", ui32DM));
+				return eError;
+			}
+		}
+
+		/* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies 
+		   on the EventObject which is signalled in this MISR */
+		eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+		/* Check the Power state after the answer */
+		if (eError == PVRSRV_OK)	
+		{
+			/* Finally, de-initialise some registers. */
+			if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+			{
+#if !defined(NO_HARDWARE)
+
+				eError = PollForValueKM2(psDevInfo,
+									          0xffffffff,
+									          MAX_HW_TIME_US,
+									          MAX_HW_TIME_US/WAIT_TRY_COUNT,
+									          IMG_TRUE);
+#endif /* NO_HARDWARE */
+
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Wait for pending interrupts failed. Host:%d, FW: %d",
+					g_ui32HostSampleIRQCount,
+					psDevInfo->psRGXFWIfTraceBuf->ui32InterruptCount));
+				}
+				else
+				{
+					/* Update GPU frequency and timer correlation related data */
+					RGXGPUFreqCalibratePrePowerState(psDeviceNode);
+
+					/* Update GPU state counters */
+					_RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(PVR_DVFS)
+					eError = SuspendDVFS();
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: Failed to suspend DVFS"));
+						return eError;
+					}
+#endif
+					eError = RGXStop(psDevInfo);
+					if (eError != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: RGXStop failed (%s)", PVRSRVGetErrorStringKM(eError)));
+						eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+					}
+					psDevInfo->bIgnoreFurtherIRQs = IMG_TRUE;
+				}
+			}
+			else
+			{
+				/* the sync was updated bu the pow state isn't off -> the FW denied the transition */
+				eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+				if (bForced)
+				{	/* It is an error for a forced request to be denied */
+					PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failure to power off during a forced power off. FW: %d", psFWTraceBuf->ePowState));
+				}
+			}
+		}
+		else if (eError == PVRSRV_ERROR_TIMEOUT)
+		{
+			/* timeout waiting for the FW to ack the request: return timeout */
+			PVR_DPF((PVR_DBG_WARNING,"RGXPrePowerState: Timeout waiting for powoff ack from the FW"));
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Error waiting for powoff ack from the FW (%s)", PVRSRVGetErrorStringKM(eError)));
+			eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+		}
+
+	}
+
+	return eError;
+}
+
+
+/*
+	RGXPostPowerState
+*/
+PVRSRV_ERROR RGXPostPowerState (IMG_HANDLE				hDevHandle,
+								PVRSRV_DEV_POWER_STATE	eNewPowerState,
+								PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+								IMG_BOOL				bForced)
+{
+	if ((eNewPowerState != eCurrentPowerState) &&
+		(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+	{
+		PVRSRV_ERROR		 eError;
+		PVRSRV_DEVICE_NODE	 *psDeviceNode = hDevHandle;
+		PVRSRV_RGXDEV_INFO	 *psDevInfo = psDeviceNode->pvDevice;
+		PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+		if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+		{
+			/* Update GPU frequency and timer correlation related data */
+			RGXGPUFreqCalibratePostPowerState(psDeviceNode);
+
+			/* Update GPU state counters */
+			_RGXUpdateGPUUtilStats(psDevInfo);
+
+			/*
+				Run the RGX init script.
+			*/
+			eError = RGXStart(psDevInfo, psDevConfig);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: RGXStart failed"));
+				return eError;
+			}
+
+			/* Coming up from off, re-allow RGX interrupts.  */
+			psDevInfo->bIgnoreFurtherIRQs = IMG_FALSE;
+
+#if defined(PVR_DVFS)
+			eError = ResumeDVFS();
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: Failed to resume DVFS"));
+				return eError;
+			}
+#endif
+		}
+	}
+
+	PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState);
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	RGXPreClockSpeedChange
+*/
+PVRSRV_ERROR RGXPreClockSpeedChange (IMG_HANDLE				hDevHandle,
+									 PVRSRV_DEV_POWER_STATE	eCurrentPowerState)
+{
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_DATA			*psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+	PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RGXPreClockSpeedChange: RGX clock speed was %uHz",
+			psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+    if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+		&& (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+	{
+		/* Update GPU frequency and timer correlation related data */
+		RGXGPUFreqCalibratePreClockSpeedChange(psDeviceNode);
+	}
+
+	return eError;
+}
+
+
+/*
+	RGXPostClockSpeedChange
+*/
+PVRSRV_ERROR RGXPostClockSpeedChange (IMG_HANDLE				hDevHandle,
+									  PVRSRV_DEV_POWER_STATE	eCurrentPowerState)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_DATA			*psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+	IMG_UINT32 		ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+	/* Update runtime configuration with the new value */
+	psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed;
+
+    if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) 
+		&& (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+	{
+		RGXFWIF_KCCB_CMD	sCOREClkSpeedChangeCmd;
+
+		RGXGPUFreqCalibratePostClockSpeedChange(psDeviceNode, ui32NewClockSpeed);
+
+		sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+		sCOREClkSpeedChangeCmd.uCmdData.sCORECLKSPEEDCHANGEData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+		/* Ensure the new clock speed is written to memory before requesting the FW to read it */
+		OSMemoryBarrier();
+
+		PDUMPCOMMENT("Scheduling CORE clock speed change command");
+
+		PDUMPPOWCMDSTART();
+		eError = RGXSendCommandRaw(psDeviceNode->pvDevice,
+		                           RGXFWIF_DM_GP,
+		                           &sCOREClkSpeedChangeCmd,
+		                           sizeof(sCOREClkSpeedChangeCmd),
+		                           0);
+		PDUMPPOWCMDEND();
+
+		if (eError != PVRSRV_OK)
+		{
+			PDUMPCOMMENT("Scheduling CORE clock speed change command failed");
+			PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+			return eError;
+		}
+ 
+		PVR_DPF((PVR_DBG_MESSAGE,"RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+				psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+	}
+
+	return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXDustCountChange
+
+ @Description
+
+	Does change of number of DUSTs
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE				hDevHandle,
+								IMG_UINT32				ui32NumberOfDusts)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR		eError;
+	RGXFWIF_KCCB_CMD 	sDustCountChange;
+	IMG_UINT32			ui32MaxAvailableDusts = RGX_FEATURE_NUM_CLUSTERS / 2;
+
+#if !defined(NO_HARDWARE)
+	RGXFWIF_TRACEBUF 	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+	if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+	{
+		eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+		PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Attempt to change dust count when not IDLE"));
+		return eError;
+	}
+#endif
+
+	PVR_ASSERT(ui32MaxAvailableDusts > 1);
+
+	if (ui32NumberOfDusts > ui32MaxAvailableDusts)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		PVR_DPF((PVR_DBG_ERROR, 
+				"RGXDustCountChange: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u",
+				ui32NumberOfDusts,
+				ui32MaxAvailableDusts,
+				eError));
+		return eError;
+	}
+
+	SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+
+	sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUMDUST_CHANGE;
+	sDustCountChange.uCmdData.sPowData.uPoweReqData.ui32NumOfDusts = ui32NumberOfDusts;
+
+	PDUMPCOMMENT("Scheduling command to change Dust Count to %u", ui32NumberOfDusts);
+	eError = RGXSendCommandRaw(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sDustCountChange,
+				sizeof(sDustCountChange),
+				0);
+
+	if (eError != PVRSRV_OK)
+	{
+		PDUMPCOMMENT("Scheduling command to change Dust Count failed. Error:%u", eError);
+		PVR_DPF((PVR_DBG_ERROR, "RGXDustCountChange: Scheduling KCCB to change Dust Count failed. Error:%u", eError));
+		return eError;
+	}
+
+	/* Wait for the firmware to answer. */
+	eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Timeout waiting for idle request"));
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					0);
+#endif
+
+	return PVRSRV_OK;
+}
+/*
+ @Function	RGXAPMLatencyChange
+*/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE				hDevHandle,
+				IMG_UINT32				ui32ActivePMLatencyms,
+				IMG_BOOL				bActivePMLatencyPersistant)
+{
+
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR		eError;
+	RGXFWIF_RUNTIME_CFG	*psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+	PVRSRV_DEV_POWER_STATE	ePowerState;
+
+	eError = PVRSRVPowerLock();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXAPMLatencyChange: Failed to acquire power lock"));
+		return eError;
+	}
+
+	/* Update runtime configuration with the new values */
+	psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+	psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+
+	eError = PVRSRVGetDevicePowerState(psDeviceNode->sDevId.ui32DeviceIndex, &ePowerState);
+
+	if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		RGXFWIF_KCCB_CMD	sActivePMLatencyChange;
+		sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+		sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+		sActivePMLatencyChange.uCmdData.sPowData.uPoweReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+
+		/* Ensure the new APM latency is written to memory before requesting the FW to read it */
+		OSMemoryBarrier();
+
+		PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms);
+		eError = RGXSendCommandRaw(psDeviceNode->pvDevice,
+					RGXFWIF_DM_GP,
+					&sActivePMLatencyChange,
+					sizeof(sActivePMLatencyChange),
+					0);
+
+		if (eError != PVRSRV_OK)
+		{
+			PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError);
+			PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+			return eError;
+		}
+	}
+
+	PVRSRVPowerUnlock();
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXActivePowerRequest
+*/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+	OSAcquireBridgeLock();
+	/* NOTE: If this function were to wait for event object attempt should be
+	   made to prevent releasing bridge lock during sleep. Bridge lock should
+	   be held during sleep. */
+
+	/* Powerlock to avoid further requests from racing with the FW hand-shake from now on
+	   (previous kicks to this point are detected by the FW) */
+	eError = PVRSRVPowerLock();
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXActivePowerRequest: Failed to acquire PowerLock (device index: %d, error: %s)", 
+					psDeviceNode->sDevId.ui32DeviceIndex,
+					PVRSRVGetErrorStringKM(eError)));
+		goto _RGXActivePowerRequest_PowerLock_failed;
+	}
+
+	/* Check again for IDLE once we have the power lock */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+	{
+
+		psDevInfo->ui32ActivePMReqTotal++;
+
+        SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFWTraceBuf->ui64StartIdleTime);
+
+		PDUMPPOWCMDSTART();
+		eError = 
+			PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+					PVRSRV_DEV_POWER_STATE_OFF,
+					IMG_FALSE); /* forced */
+		PDUMPPOWCMDEND();
+
+		if (eError == PVRSRV_OK)
+		{
+			psDevInfo->ui32ActivePMReqOk++;
+		}
+		else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+		{
+			psDevInfo->ui32ActivePMReqDenied++;
+		}
+
+	}
+
+	PVRSRVPowerUnlock();
+
+_RGXActivePowerRequest_PowerLock_failed:
+	OSReleaseBridgeLock();
+
+	return eError;
+
+}
+/*
+	RGXForcedIdleRequest
+*/
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD	sPowCmd;
+	PVRSRV_ERROR		eError;
+	IMG_UINT32		ui32RetryCount = 0;
+
+#if !defined(NO_HARDWARE)
+	RGXFWIF_TRACEBUF	*psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+	/* Firmware already forced idle */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_FORCED_IDLE)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+	if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+	{
+		return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+	}
+#endif
+
+	SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+	sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sPowCmd.uCmdData.sPowData.uPoweReqData.bCancelForcedIdle = IMG_FALSE;
+
+	PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command");
+
+	/* Send one forced IDLE command to GP */
+	eError = RGXSendCommandRaw(psDevInfo,
+			RGXFWIF_DM_GP,
+			&sPowCmd,
+			sizeof(sPowCmd),
+			0);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Failed to send idle request"));
+		return eError;
+	}
+
+	/* Wait for GPU to finish current workload */
+	do {
+		eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+		if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+		{
+			break;
+		}
+		ui32RetryCount++;
+		PVR_DPF((PVR_DBG_WARNING,"RGXForcedIdleRequest: Request timeout. Retry %d of %d", ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+	} while (IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Idle request failed. Firmware potentially left in forced idle state"));
+		return eError;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					0);
+#endif
+
+#if !defined(NO_HARDWARE)
+	/* Check the firmware state for idleness */
+	if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Failed to force IDLE"));
+
+		return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+	}
+#endif
+
+	return PVRSRV_OK;
+}
+
+/*
+	RGXCancelForcedIdleRequest
+*/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE	*psDeviceNode = hDevHandle;
+	PVRSRV_RGXDEV_INFO	*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_KCCB_CMD	sPowCmd;
+	PVRSRV_ERROR		eError = PVRSRV_OK;
+
+	SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+
+	/* Send the IDLE request to the FW */
+	sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+	sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+	sPowCmd.uCmdData.sPowData.uPoweReqData.bCancelForcedIdle = IMG_TRUE;
+
+	PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command");
+
+	/* Send cancel forced IDLE command to GP */
+	eError = RGXSendCommandRaw(psDevInfo,
+			RGXFWIF_DM_GP,
+			&sPowCmd,
+			sizeof(sPowCmd),
+			0);
+
+	if (eError != PVRSRV_OK)
+	{
+		PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP);
+		goto ErrorExit;
+	}
+
+	/* Wait for the firmware to answer. */
+	eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 1, 0xFFFFFFFF);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Timeout waiting for cancel idle request"));
+		goto ErrorExit;
+	}
+
+#if defined(PDUMP)
+	PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d ", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+	SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+					1,
+					0xffffffff,
+					PDUMP_POLL_OPERATOR_EQUAL,
+					0);
+#endif
+
+	return eError;
+
+ErrorExit:
+	PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Firmware potentially left in forced idle state"));
+	return eError;
+}
+
+/******************************************************************************
+ End of file (rgxpower.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpower.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpower.h
new file mode 100644
index 0000000..82f45e6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxpower.h
@@ -0,0 +1,216 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX power header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX power
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXPOWER_H__)
+#define __RGXPOWER_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   eNewPowerState : New power state
+ @Input	   eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE				hDevHandle, 
+							  PVRSRV_DEV_POWER_STATE	eNewPowerState, 
+							  PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							  IMG_BOOL					bForced);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   eNewPowerState : New power state
+ @Input	   eCurrentPowerState : Current power state
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE				hDevHandle, 
+							   PVRSRV_DEV_POWER_STATE	eNewPowerState, 
+							   PVRSRV_DEV_POWER_STATE	eCurrentPowerState,
+							  IMG_BOOL					bForced);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXPreClockSpeedChange
+
+ @Description
+
+	Does processing required before an RGX clock speed change.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   bIdleDevice : Whether the firmware needs to be idled
+ @Input	   eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE				hDevHandle,
+									PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function	RGXPostClockSpeedChange
+
+ @Description
+
+	Does processing required after an RGX clock speed change.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   bIdleDevice : Whether the firmware had been idled previously
+ @Input	   eCurrentPowerState : Power state of the device
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE				hDevHandle,
+									 PVRSRV_DEV_POWER_STATE	eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function	RGXDustCountChange
+
+ @Description Change of number of DUSTs
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE				hDevHandle,
+								IMG_UINT32				ui32NumberOfDusts);
+
+/*!
+******************************************************************************
+
+ @Function	RGXAPMLatencyChange
+
+ @Description
+
+	Changes the wait duration used before firmware indicates IDLE.
+	Reducing this value will cause the firmware to shut off faster and
+	more often but may increase bubbles in GPU scheduling due to the added
+	power management activity. If bPersistent is NOT set, APM latency will
+	return back to system default on power up.
+
+ @Input	   hDevHandle : RGX Device Node
+ @Input	   ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input	   bPersistent : Set to ensure new value is not reset
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE				hDevHandle,
+				IMG_UINT32				ui32ActivePMLatencyms,
+				IMG_BOOL				bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function	RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function	RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function	RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input	   hDevHandle : RGX Device Node
+
+ @Return   PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+#endif /* __RGXPOWER_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxregconfig.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxregconfig.c
new file mode 100644
index 0000000..0c74a55
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxregconfig.c
@@ -0,0 +1,227 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Register configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Regconfig routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxregconfig.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+PVRSRV_ERROR PVRSRVRGXSetRegConfigPIKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_UINT8              ui8RegPowerIsland)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+	RGXFWIF_PWR_EVT		ePowerIsland = (RGXFWIF_PWR_EVT) ui8RegPowerIsland;
+
+
+	if (ePowerIsland < psRegCfg->ePowerIslandToPush)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Register configuration must be in power island order."));
+		return PVRSRV_ERROR_REG_CONFIG_INVALID_PI;
+	}
+
+	psRegCfg->ePowerIslandToPush = ePowerIsland;
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_UINT32		ui32RegAddr,
+					IMG_UINT64		ui64RegValue)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	if (psRegCfg->bEnabled)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Cannot add record whilst register configuration active."));
+		return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+	}
+	if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Register configuration full."));
+		return PVRSRV_ERROR_REG_CONFIG_FULL;
+	}
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr;
+	sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue;
+	sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigPI = psRegCfg->ePowerIslandToPush;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->ui32NumRegRecords++;
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	if (psRegCfg->bEnabled)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Attempt to clear register configuration whilst active."));
+		return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+	}
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->ui32NumRegRecords = 0;
+	psRegCfg->ePowerIslandToPush = RGXFWIF_PWR_EVT_PWR_ON; /* Default first PI */
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->bEnabled = IMG_TRUE;
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+	PVRSRV_ERROR 		eError = PVRSRV_OK;
+	RGXFWIF_KCCB_CMD 	sRegCfgCmd;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psDeviceNode->pvDevice;
+	RGX_REG_CONFIG          *psRegCfg = &psDevInfo->sRegCongfig;
+
+	sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+	sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE;
+
+	eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+				RGXFWIF_DM_GP,
+				&sRegCfgCmd,
+				sizeof(sRegCfgCmd),
+				IMG_TRUE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+		return eError;
+	}
+
+	psRegCfg->bEnabled = IMG_FALSE;
+
+	return eError;
+#else
+	PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+	return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+
+/******************************************************************************
+ End of file (rgxregconfig.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxregconfig.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxregconfig.h
new file mode 100644
index 0000000..bbe1586
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxregconfig.h
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX register configuration functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX register configuration functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXREGCONFIG_H__)
+#define __RGXREGCONFIG_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetRegConfigPIKM
+
+ @Description
+	Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui8RegPowerIsland - Reg configuration
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetRegConfigPIKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_UINT8       ui8RegPowerIsland);
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXSetRegConfigKM
+
+ @Description
+	Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui64RegAddr - Register address
+ @Input ui64RegValue - Reg value
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+					IMG_UINT32	ui64RegAddr,
+					IMG_UINT64	ui64RegValue);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXClearRegConfigKM
+
+ @Description
+	Server-side implementation of RGXClearRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXEnableRegConfigKM
+
+ @Description
+	Server-side implementation of RGXEnableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function	PVRSRVRGXDisableRegConfigKM
+
+ @Description
+	Server-side implementation of RGXDisableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(PVRSRV_DEVICE_NODE	*psDeviceNode);
+
+#endif /* __RGXREGCONFIG_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxsync.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxsync.c
new file mode 100644
index 0000000..d1e8130
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxsync.c
@@ -0,0 +1,198 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX sync kick routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX sync kick routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+
+#include "rgxsync.h"
+
+PVRSRV_ERROR RGXKickSyncKM(PVRSRV_DEVICE_NODE        *psDeviceNode,
+                           RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						   RGXFWIF_DM                eDM,
+						   IMG_CHAR                  *pszCommandName,
+                           IMG_UINT32                ui32ClientFenceCount,
+                           PRGXFWIF_UFO_ADDR         *pauiClientFenceUFOAddress,
+                           IMG_UINT32                *paui32ClientFenceValue,
+                           IMG_UINT32                ui32ClientUpdateCount,
+                           PRGXFWIF_UFO_ADDR         *pauiClientUpdateUFOAddress,
+                           IMG_UINT32                *paui32ClientUpdateValue,
+                           IMG_UINT32                ui32ServerSyncPrims,
+                           IMG_UINT32                *paui32ServerSyncFlags,
+                           SERVER_SYNC_PRIMITIVE     **pasServerSyncs,
+                           IMG_BOOL                  bPDumpContinuous)
+{
+	RGXFWIF_KCCB_CMD		sCmpKCCBCmd;
+	RGX_CCB_CMD_HELPER_DATA	asCmdHelperData[1];
+	IMG_BOOL				bKickRequired;
+	PVRSRV_ERROR			eError;
+	PVRSRV_ERROR			eError2;
+	IMG_UINT32				i;
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerSyncPrims;i++)
+	{
+		if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on %s) must fence", __FUNCTION__, pszCommandName));
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psServerCommonContext),
+									  ui32ClientFenceCount,
+									  pauiClientFenceUFOAddress,
+									  paui32ClientFenceValue,
+									  ui32ClientUpdateCount,
+									  pauiClientUpdateUFOAddress,
+									  paui32ClientUpdateValue,
+									  ui32ServerSyncPrims,
+									  paui32ServerSyncFlags,
+									  pasServerSyncs,
+									  0,         /* ui32CmdSize */
+									  IMG_NULL,  /* pui8DMCmd */
+									  IMG_NULL,  /* ppPreAddr */
+									  IMG_NULL,  /* ppPostAddr */
+									  IMG_NULL,  /* ppRMWUFOAddr */
+									  RGXFWIF_CCB_CMD_TYPE_NULL,
+									  bPDumpContinuous,
+									  pszCommandName,
+									  asCmdHelperData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cmdinit;
+	}
+
+	eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asCmdHelperData),
+	                                   asCmdHelperData, &bKickRequired);
+	if ((eError != PVRSRV_OK) && (!bKickRequired))
+	{
+		/*
+			Only bail if no new data was submitted into the client CCB, we might
+			have already submitted a padding packet which we should flush through
+			the FW.
+		*/
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create client CCB command", __FUNCTION__));
+		goto fail_cmdaquire;
+	}
+
+
+	/*
+		We should reserved space in the kernel CCB here and fill in the command
+		directly.
+		This is so if there isn't space in the kernel CCB we can return with
+		retry back to services client before we take any operations
+	*/
+
+	/*
+		We might only be kicking for flush out a padding packet so only submit
+		the command if the create was successful
+	*/
+	if (eError == PVRSRV_OK)
+	{
+		/*
+			All the required resources are ready at this point, we can't fail so
+			take the required server sync operations and commit all the resources
+		*/
+		RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, pszCommandName, FWCommonContextGetFWAddress(psServerCommonContext).ui32Addr);
+	}
+
+	/* Construct the kernel compute CCB command. */
+	sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+	sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psServerCommonContext);
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psServerCommonContext));
+	sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+	/*
+	 * Submit the compute command to the firmware.
+	 */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+									eDM,
+									&sCmpKCCBCmd,
+									sizeof(sCmpKCCBCmd),
+									bPDumpContinuous);
+		if (eError2 != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+	
+	if (eError2 != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s failed to schedule kernel CCB command. (0x%x)", __FUNCTION__, eError));
+	}
+	/*
+	 * Now check eError (which may have returned an error from our earlier call
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_cmdaquire;
+	}
+
+	return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+
+	return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsync.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxsync.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxsync.h
new file mode 100644
index 0000000..fa77134
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxsync.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX sync kick functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX sync kick functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXSYNC__)
+#define __RGXSYNC_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+/*!
+*******************************************************************************
+ @Function	RGXKickSyncKM
+
+ @Description Send a sync kick command to the FW
+	
+ @Return   PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXKickSyncKM(PVRSRV_DEVICE_NODE        *psDeviceNode,
+						   RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+						   RGXFWIF_DM                eDM,
+						   IMG_CHAR                  *pszCommandName,
+						   IMG_UINT32                ui32ClientFenceCount,
+						   PRGXFWIF_UFO_ADDR         *pauiClientFenceUFOAddress,
+						   IMG_UINT32                *paui32ClientFenceValue,
+						   IMG_UINT32                ui32ClientUpdateCount,
+						   PRGXFWIF_UFO_ADDR         *pauiClientUpdateUFOAddress,
+						   IMG_UINT32                *paui32ClientUpdateValue,
+						   IMG_UINT32                ui32ServerSyncPrims,
+						   IMG_UINT32                *paui32ServerSyncFlags,
+						   SERVER_SYNC_PRIMITIVE     **pasServerSyncs,
+						   IMG_BOOL                  bPDumpContinuous);
+
+#endif /* __RGXSYNC_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxta3d.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxta3d.c
new file mode 100644
index 0000000..3515f9f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxta3d.c
@@ -0,0 +1,3442 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA/3D routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX TA/3D routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* for the offsetof macro */
+#include <stddef.h> 
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "rgxsync.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "process_stats.h"
+#include "osfunc.h"
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+typedef struct _DEVMEM_REF_LOOKUP_
+{
+	IMG_UINT32 ui32ZSBufferID;
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+} DEVMEM_REF_LOOKUP;
+
+typedef struct _DEVMEM_FREELIST_LOOKUP_
+{
+	IMG_UINT32 ui32FreeListID;
+	RGX_FREELIST *psFreeList;
+} DEVMEM_FREELIST_LOOKUP;
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+	DEVMEM_MEMDESC				*psContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWRenderContextMemDesc;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	RGX_SERVER_RC_TA_DATA		sTAData;
+	RGX_SERVER_RC_3D_DATA		s3DData;
+	IMG_UINT32					ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE		(1 << 0)
+#define RC_CLEANUP_3D_COMPLETE		(1 << 1)
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+};
+
+
+static
+#ifdef __GNUC__
+	__attribute__((noreturn))
+#endif
+void sleep_for_ever(void)
+{
+#if defined(__KLOCWORK__) // klocworks would report an infinite loop because of while(1).
+	PVR_ASSERT(0); 
+#else
+	while(1)
+	{
+		OSSleepms(~0); // sleep the maximum amount of time possible
+	}
+#endif
+}
+
+
+/*
+	Static functions used by render context code
+*/
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+							   PVRSRV_DEVICE_NODE *psDeviceNode,
+							   PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  FWCommonContextGetFWAddress(psTAData->psServerCommonContext),
+											  psCleanupSync,
+											  RGXFWIF_DM_TA);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				__FUNCTION__,
+				PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+#if defined(DEBUG)
+	/* Log the number of TA context stores which occurred */
+	{
+		RGXFWIF_TACTX_STATE	*psFWTAState;
+
+		eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+										  (IMG_VOID**)&psFWTAState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+					__FUNCTION__, eError));
+		}
+		else
+		{
+			/* Release the CPU virt addr */
+			DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+		}
+	}
+#endif
+	FWCommonContextFree(psTAData->psServerCommonContext);
+	DevmemFwFree(psTAData->psContextStateMemDesc);
+	return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+							   PVRSRV_DEVICE_NODE *psDeviceNode,
+							   PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  FWCommonContextGetFWAddress(ps3DData->psServerCommonContext),
+											  psCleanupSync,
+											  RGXFWIF_DM_3D);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __FUNCTION__,
+				 PVRSRVGetErrorStringKM(eError)));
+		return eError;
+	}
+
+	/* ... it has so we can free it's resources */
+#if defined(DEBUG)
+	/* Log the number of 3D context stores which occurred */
+	{
+		RGXFWIF_3DCTX_STATE	*psFW3DState;
+
+		eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc,
+										  (IMG_VOID**)&psFW3DState);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+					__FUNCTION__, eError));
+		}
+		else
+		{
+			/* Release the CPU virt addr */
+			DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc);
+		}
+	}
+#endif
+
+	FWCommonContextFree(ps3DData->psServerCommonContext);
+	DevmemFwFree(ps3DData->psContextStateMemDesc);
+	return PVRSRV_OK;
+}
+
+static IMG_BOOL _RGXDumpPMRPageList(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+	PVRSRV_ERROR			eError;
+
+	eError = PMRDumpPageList(psPMRNode->psPMR,
+							RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Error (%u) printing pmr %p", eError, psPMRNode->psPMR));
+	}
+
+	return IMG_TRUE;
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+	PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016llx",
+				psFreeList->sFreeListFWDevVAddr.ui32Addr,
+				psFreeList->ui32FreelistID,
+				psFreeList->ui64FreelistChecksum));
+
+	/* Dump Init FreeList page list */
+	PVR_LOG(("  Initial Memory block"));
+	dllist_foreach_node(&psFreeList->sMemoryBlockInitHead,
+					_RGXDumpPMRPageList,
+					IMG_NULL);
+
+	/* Dump Grow FreeList page list */
+	PVR_LOG(("  Grow Memory blocks"));
+	dllist_foreach_node(&psFreeList->sMemoryBlockHead,
+					_RGXDumpPMRPageList,
+					IMG_NULL);
+
+	return IMG_TRUE;
+}
+
+static PVRSRV_ERROR _UpdateFwFreelistSize(RGX_FREELIST *psFreeList,
+										IMG_BOOL bGrow,
+										IMG_UINT32 ui32DeltaSize)
+{
+	PVRSRV_ERROR			eError;
+	RGXFWIF_KCCB_CMD		sGPCCBCmd;
+
+	sGPCCBCmd.eCmdType = (bGrow) ? RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE : RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE;
+	sGPCCBCmd.uCmdData.sFreeListGSData.psFreeListFWDevVAddr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+	sGPCCBCmd.uCmdData.sFreeListGSData.ui32DeltaSize = ui32DeltaSize;
+	sGPCCBCmd.uCmdData.sFreeListGSData.ui32NewSize = psFreeList->ui32CurrentFLPages;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "Send FW update: freelist [FWAddr=0x%08x] has 0x%08x pages",
+								psFreeList->sFreeListFWDevVAddr.ui32Addr,
+								psFreeList->ui32CurrentFLPages));
+
+	/* Submit command to the firmware.  */
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psFreeList->psDevInfo,
+									RGXFWIF_DM_GP,
+									&sGPCCBCmd,
+									sizeof(sGPCCBCmd),
+									IMG_TRUE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_UpdateFwFreelistSize: failed to update FW freelist size. (error = %u)", eError));
+		return eError;
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _FreeListCheckSum(RGX_FREELIST *psFreeList,
+                   	   	   	   	   	   IMG_UINT64 *pui64CheckSum)
+{
+#if defined(NO_HARDWARE)
+	/* No checksum needed as we have all information in the pdumps */
+	PVR_UNREFERENCED_PARAMETER(psFreeList);
+	*pui64CheckSum = 0;
+	return PVRSRV_OK;
+#else
+	PVRSRV_ERROR eError;
+	IMG_SIZE_T uiNumBytes;
+    IMG_UINT8* pui8Buffer;
+    IMG_UINT32* pui32Buffer;
+    IMG_UINT32 ui32CheckSumAdd = 0;
+    IMG_UINT32 ui32CheckSumXor = 0;
+    IMG_UINT32 ui32Entry;
+    IMG_UINT32 ui32Entry2;
+
+	/* Allocate Buffer of the size of the freelist */
+	pui8Buffer = OSAllocMem(psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+    if (pui8Buffer == IMG_NULL)
+    {
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto _OSAllocMem_Exit;
+    }
+
+    /* Copy freelist content into Buffer */
+    eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+    				psFreeList->uiFreeListPMROffset + (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32),
+    				pui8Buffer,
+    				psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32),
+            		&uiNumBytes);
+    if (eError != PVRSRV_OK)
+    {
+    	goto _PMR_ReadBytes_Exit;
+    }
+
+    PVR_ASSERT(uiNumBytes == psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+
+    /* Generate checksum */
+    pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+    for(ui32Entry = 0; ui32Entry < psFreeList->ui32CurrentFLPages; ui32Entry++)
+    {
+    	ui32CheckSumAdd += pui32Buffer[ui32Entry];
+    	ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+    	/* Check for double entries */
+    	for (ui32Entry2 = 0; ui32Entry2 < psFreeList->ui32CurrentFLPages; ui32Entry2++)
+    	{
+			if ((ui32Entry != ui32Entry2) &&
+				(pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]))
+			{
+				PVR_DPF((PVR_DBG_ERROR, "Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d",
+											psFreeList->sFreeListFWDevVAddr.ui32Addr,
+											pui32Buffer[ui32Entry2],
+											ui32Entry,
+											ui32Entry2));
+				sleep_for_ever();
+//				PVR_ASSERT(0);
+			}
+    	}
+    }
+
+    OSFreeMem(pui8Buffer);
+
+    /* Set return value */
+    *pui64CheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+    PVR_ASSERT(eError == PVRSRV_OK);
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+_PMR_ReadBytes_Exit:
+	OSFreeMem(pui8Buffer);
+
+_OSAllocMem_Exit:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+#endif
+}
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+							IMG_UINT32 ui32NumPages,
+							PDLLIST_NODE pListHeader)
+{
+	RGX_PMR_NODE	*psPMRNode;
+	IMG_DEVMEM_SIZE_T uiSize;
+	IMG_BOOL bMappingTable = IMG_TRUE;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	IMG_DEVMEM_SIZE_T uiLength;
+	IMG_DEVMEM_SIZE_T uistartPage;
+	PVRSRV_ERROR eError;
+	IMG_UINT64 ui64CheckSum;
+	IMG_UINT32 ui32CheckSumXor;
+	IMG_UINT32 ui32CheckSumAdd;
+
+	/* Are we allowed to grow ? */
+	if ((psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) < ui32NumPages)
+	{
+		PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: grow by %u pages denied. Max PB size reached (current pages %u/%u)",
+				psFreeList,
+				ui32NumPages,
+				psFreeList->ui32CurrentFLPages,
+				psFreeList->ui32MaxFLPages));
+		return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+	}
+
+	/* Allocate kernel memory block structure */
+	psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+	if (psPMRNode == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXGrowFreeList: failed to allocate host data structure"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages
+	 */
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+	psPMRNode->ui32NumPages = ui32NumPages;
+	psPMRNode->psFreeList = psFreeList;
+
+	/* Allocate Memory Block */
+	PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages);
+	uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+	eError = PhysmemNewRamBackedPMR(psFreeList->psDevInfo->psDeviceNode,
+									uiSize,
+									uiSize,
+									1,
+									1,
+									&bMappingTable,
+									RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+									PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+									&psPMRNode->psPMR);
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXGrowFreeList: Failed to allocate PB block of size: 0x%016llX",
+				 (IMG_UINT64)uiSize));
+		goto ErrorBlockAlloc;
+	}
+
+	/* Zeroing physical pages pointed by the PMR */
+	if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+	{
+		eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXGrowFreeList: Failed to zero PMR %p of freelist %p with Error %d",
+									psPMRNode->psPMR,
+									psFreeList,
+									eError));
+			PVR_ASSERT(0);
+		}
+	}
+
+	uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+	uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+	uiOffset = psFreeList->uiFreeListPMROffset + (uistartPage * sizeof(IMG_UINT32));
+
+	/* write Freelist with Memory Block physical addresses */
+	eError = PMRWritePMPageList(
+						/* Target PMR, offset, and length */
+						psFreeList->psFreeListPMR,
+						uiOffset,
+						uiLength,
+						/* Referenced PMR, and "page" granularity */
+						psPMRNode->psPMR,
+						RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+						&psPMRNode->psPageList,
+						&ui64CheckSum);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				 "RGXGrowFreeList: Failed to write pages of Node %p",
+				 psPMRNode));
+		goto ErrorPopulateFreelist;
+	}
+
+	/* We add It must be added to the tail, otherwise the freelist population won't work */
+	dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+	/* Update number of available pages */
+	psFreeList->ui32CurrentFLPages += ui32NumPages;
+
+	/* Update statistics */
+	if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+	{
+		psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+	}
+
+	if (psFreeList->bCheckFreelist)
+	{
+		/* Update checksum */
+		ui32CheckSumAdd = (IMG_UINT32)(psFreeList->ui64FreelistChecksum + ui64CheckSum);
+		ui32CheckSumXor = (IMG_UINT32)((psFreeList->ui64FreelistChecksum  ^ ui64CheckSum) >> 32);
+		psFreeList->ui64FreelistChecksum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+		/* Note: We can't do a freelist check here, because the freelist is probably empty (OOM) */
+	}
+
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"Freelist [%p]: grow by %u pages (current pages %u/%u)",
+			psFreeList,
+			ui32NumPages,
+			psFreeList->ui32CurrentFLPages,
+			psFreeList->ui32MaxFLPages));
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+ErrorPopulateFreelist:
+	PMRUnrefPMR(psPMRNode->psPMR);
+
+ErrorBlockAlloc:
+	OSFreeMem(psPMRNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+										RGX_FREELIST *psFreeList)
+{
+	DLLIST_NODE *psNode;
+	RGX_PMR_NODE *psPMRNode;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	IMG_UINT32 ui32OldValue;
+
+	/*
+	 * Lock protects simultaneous manipulation of:
+	 * - the memory block list
+	 * - the freelist's ui32CurrentFLPages value
+	 */
+	PVR_ASSERT(pListHeader);
+	PVR_ASSERT(psFreeList);
+	PVR_ASSERT(psFreeList->psDevInfo);
+	PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+	/* Get node from head of list and remove it */
+	psNode = dllist_get_next_node(pListHeader);
+	if (psNode)
+	{
+		dllist_remove_node(psNode);
+
+		psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+		PVR_ASSERT(psPMRNode);
+		PVR_ASSERT(psPMRNode->psPMR);
+		PVR_ASSERT(psPMRNode->psFreeList);
+
+		/* remove block from freelist list */
+
+		/* Unwrite Freelist with Memory Block physical addresses */
+		eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXRemoveBlockFromFreeListKM: Failed to unwrite pages of Node %p",
+					 psPMRNode));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* Free PMR (We should be the only one that holds a ref on the PMR) */
+		eError = PMRUnrefPMR(psPMRNode->psPMR);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXRemoveBlockFromFreeListKM: Failed to free PB block %p (error %u)",
+					 psPMRNode->psPMR,
+					 eError));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* update available pages in freelist */
+		ui32OldValue = psFreeList->ui32CurrentFLPages;
+		psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages;
+
+		/* check underflow */
+		PVR_ASSERT(ui32OldValue > psFreeList->ui32CurrentFLPages);
+
+		PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+								psFreeList,
+								psPMRNode->ui32NumPages,
+								psFreeList->ui32CurrentFLPages,
+								psFreeList->ui32MaxFLPages));
+
+		OSFreeMem(psPMRNode);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+								psFreeList,
+								psFreeList->ui32InitFLPages));
+		eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+	}
+
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	return eError;
+}
+
+static IMG_BOOL _FindFreeList(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	DEVMEM_FREELIST_LOOKUP *psRefLookUp = (DEVMEM_FREELIST_LOOKUP *)pvCallbackData;
+	RGX_FREELIST *psFreeList;
+
+	psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+	if (psFreeList->ui32FreelistID == psRefLookUp->ui32FreeListID)
+	{
+		psRefLookUp->psFreeList = psFreeList;
+		return IMG_FALSE;
+	}
+	else
+	{
+		return IMG_TRUE;
+	}
+}
+
+IMG_VOID RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32FreelistID)
+{
+	DEVMEM_FREELIST_LOOKUP sLookUp;
+	RGXFWIF_KCCB_CMD s3DCCBCmd;
+	IMG_UINT32 ui32GrowValue;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* find the freelist with the corresponding ID */
+	sLookUp.ui32FreeListID = ui32FreelistID;
+	sLookUp.psFreeList = IMG_NULL;
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_foreach_node(&psDevInfo->sFreeListHead, _FindFreeList, (IMG_PVOID)&sLookUp);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	if (sLookUp.psFreeList)
+	{
+		RGX_FREELIST *psFreeList = sLookUp.psFreeList;
+
+		/* Try to grow the freelist */
+		eError = RGXGrowFreeList(psFreeList,
+								psFreeList->ui32GrowFLPages,
+								&psFreeList->sMemoryBlockHead);
+		if (eError == PVRSRV_OK)
+		{
+			/* Grow successful, return size of grow size */
+			ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+			psFreeList->ui32NumGrowReqByFW++;
+
+ #if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(0,
+	                               1, /* Add 1 to the appropriate counter (Requests by FW) */
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+ #endif
+
+		}
+		else
+		{
+			/* Grow failed */
+			ui32GrowValue = 0;
+			PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p failed (error %u)",
+									psFreeList,
+									eError));
+		}
+
+		/* send feedback */
+		s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+		s3DCCBCmd.uCmdData.sFreeListGSData.psFreeListFWDevVAddr = sLookUp.psFreeList->sFreeListFWDevVAddr.ui32Addr;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaSize = ui32GrowValue;
+		s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewSize = psFreeList->ui32CurrentFLPages;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+												RGXFWIF_DM_3D,
+												&s3DCCBCmd,
+												sizeof(s3DCCBCmd),
+												IMG_FALSE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+	else
+	{
+		/* Should never happen */
+		PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", sLookUp.ui32FreeListID));
+		PVR_ASSERT(IMG_FALSE);
+	}
+}
+
+static IMG_BOOL _RGXCheckFreeListReconstruction(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+
+	PVRSRV_RGXDEV_INFO 		*psDevInfo;
+	RGX_FREELIST			*psFreeList;
+	RGX_PMR_NODE			*psPMRNode;
+	PVRSRV_ERROR			eError;
+	IMG_DEVMEM_OFFSET_T		uiOffset;
+	IMG_DEVMEM_SIZE_T		uiLength;
+	IMG_UINT32				ui32StartPage;
+	IMG_UINT64				ui64CheckSum;
+
+	psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+	psFreeList = psPMRNode->psFreeList;
+	PVR_ASSERT(psFreeList);
+	psDevInfo = psFreeList->psDevInfo;
+	PVR_ASSERT(psDevInfo);
+
+	uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+	ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+	uiOffset = psFreeList->uiFreeListPMROffset + (ui32StartPage * sizeof(IMG_UINT32));
+
+	PMRUnwritePMPageList(psPMRNode->psPageList);
+	psPMRNode->psPageList = IMG_NULL;
+	eError = PMRWritePMPageList(
+						/* Target PMR, offset, and length */
+						psFreeList->psFreeListPMR,
+						uiOffset,
+						uiLength,
+						/* Referenced PMR, and "page" granularity */
+						psPMRNode->psPMR,
+						RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+						&psPMRNode->psPageList,
+						&ui64CheckSum);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Error (%u) writing FL 0x%08x", eError, (IMG_UINT32)psFreeList->ui32FreelistID));
+	}
+
+	/* Zeroing physical pages pointed by the reconstructed freelist */
+	if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+	{
+		eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"_RGXCheckFreeListReconstruction: Failed to zero PMR %p of freelist %p with Error %d",
+									psPMRNode->psPMR,
+									psFreeList,
+									eError));
+			PVR_ASSERT(0);
+		}
+	}
+
+	psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+
+	return IMG_TRUE;
+}
+
+IMG_VOID RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+								RGXFWIF_DM eDM,
+								IMG_UINT32 ui32FreelistsCount,
+								IMG_UINT32 *paui32Freelists)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_FREELIST_LOOKUP sLookUp;
+	IMG_UINT32 ui32Loop, ui32Loop2;
+	RGXFWIF_KCCB_CMD s3DCCBCmd;
+	IMG_UINT64 ui64CheckSum;
+	
+	PVR_ASSERT(psDevInfo);
+
+	//PVR_DPF((PVR_DBG_ERROR,"FreeList RECONSTRUCTION: Reconstructing %u freelist(s)", ui32FreelistsCount));
+	
+	for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+	{
+		/* check if there is more than one occurrence of FL on the list */	
+		for (ui32Loop2 = ui32Loop + 1; ui32Loop2 < ui32FreelistsCount; ui32Loop2++)
+		{
+			if (paui32Freelists[ui32Loop] == paui32Freelists[ui32Loop2])
+			{
+				/* There is a duplicate on a list, skip current Freelist */
+				break;
+			}
+		}
+
+		if (ui32Loop2 < ui32FreelistsCount)
+		{
+			/* There is a duplicate on the list, skip current Freelist */
+			continue;
+		}
+
+		/* find the freelist with the corresponding ID */
+		sLookUp.ui32FreeListID = paui32Freelists[ui32Loop];
+		sLookUp.psFreeList = IMG_NULL;
+	
+		//PVR_DPF((PVR_DBG_ERROR,"FreeList RECONSTRUCTION: Looking for freelist %08X", (IMG_UINT32)sLookUp.ui32FreeListID));
+		OSLockAcquire(psDevInfo->hLockFreeList);
+		//PVR_DPF((PVR_DBG_ERROR,"FreeList RECONSTRUCTION: Freelist head %08X", (IMG_UINT32)&psDevInfo->sFreeListHead));
+		dllist_foreach_node(&psDevInfo->sFreeListHead, _FindFreeList, (IMG_PVOID)&sLookUp);
+		OSLockRelease(psDevInfo->hLockFreeList);
+
+		if (sLookUp.psFreeList)
+		{
+			RGX_FREELIST *psFreeList = sLookUp.psFreeList;
+
+			//PVR_DPF((PVR_DBG_ERROR,"FreeList RECONSTRUCTION: Reconstructing freelist %08X", (IMG_UINT32)psFreeList));
+		
+			/* Do the FreeList Reconstruction */
+				
+			psFreeList->ui32CurrentFLPages = 0;
+
+			/* Reconstructing Init FreeList pages */
+			dllist_foreach_node(&psFreeList->sMemoryBlockInitHead,
+							_RGXCheckFreeListReconstruction, 
+							IMG_NULL);
+
+			/* Reconstructing Grow FreeList pages */
+			dllist_foreach_node(&psFreeList->sMemoryBlockHead,
+							_RGXCheckFreeListReconstruction, 
+							IMG_NULL);
+
+			if (psFreeList->bCheckFreelist)
+			{
+				/* Get Freelist checksum (as the list is fully populated) */
+				eError = _FreeListCheckSum(psFreeList,
+											&ui64CheckSum);
+				if (eError != PVRSRV_OK)
+				{
+					PVR_DPF((PVR_DBG_ERROR,
+							 "RGXProcessRequestFreelistsReconstruction: Failed to get freelist checksum Node %p",
+							 psFreeList));
+					sleep_for_ever();
+//					PVR_ASSERT(0);
+				}
+
+				/* Verify checksum with previous value */
+				if (psFreeList->ui64FreelistChecksum != ui64CheckSum)
+				{
+					PVR_DPF((PVR_DBG_ERROR, "RGXProcessRequestFreelistsReconstruction: Freelist [%p] checksum failed: before reconstruction = 0x%016llx, after reconstruction = 0x%016llx",
+											psFreeList,
+											psFreeList->ui64FreelistChecksum,
+											ui64CheckSum));
+					sleep_for_ever();
+					//PVR_ASSERT(0);
+				}
+			}
+
+			eError = PVRSRV_OK;
+
+			if (eError == PVRSRV_OK)
+			{
+				/* Freelist reconstruction successful */
+				s3DCCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = 
+													paui32Freelists[ui32Loop];
+			}
+			else
+			{
+				/* Freelist reconstruction failed */
+				s3DCCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = 
+													paui32Freelists[ui32Loop] | RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+				
+				PVR_DPF((PVR_DBG_ERROR,"Reconstructing of FreeList %p failed (error %u)",
+										psFreeList,
+										eError));
+			}
+		}
+		else
+		{
+			/* Should never happen */
+			PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Freelist reconstruction)", sLookUp.ui32FreeListID));
+			PVR_ASSERT(IMG_FALSE);
+		}
+	}
+
+	/* send feedback */
+	s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+	s3DCCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+	LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+	{
+		eError = RGXScheduleCommand(psDevInfo,
+											eDM,
+											&s3DCCBCmd,
+											sizeof(s3DCCBCmd),
+											IMG_FALSE);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			break;
+		}
+		OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+	} END_LOOP_UNTIL_TIMEOUT();
+
+	/* Kernel CCB should never fill up, as the FW is processing them right away  */
+	PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateHWRTData(PVRSRV_DEVICE_NODE	*psDeviceNode,
+							   IMG_UINT32			psRenderTarget, /* FIXME this should not be IMG_UINT32 */
+							   IMG_DEV_VIRTADDR		psPMMListDevVAddr,
+							   IMG_DEV_VIRTADDR		psVFPPageTableAddr,
+							   RGX_FREELIST			*apsFreeLists[RGXFW_MAX_FREELISTS],
+							   RGX_RTDATA_CLEANUP_DATA	**ppsCleanupData,
+							   DEVMEM_MEMDESC		**ppsRTACtlMemDesc,
+							   IMG_UINT32           ui32PPPScreen,
+							   IMG_UINT32           ui32PPPGridOffset,
+							   IMG_UINT64           ui64PPPMultiSampleCtl,
+							   IMG_UINT32           ui32TPCStride,
+							   IMG_DEV_VIRTADDR		sTailPtrsDevVAddr,
+							   IMG_UINT32           ui32TPCSize,
+							   IMG_UINT32           ui32TEScreen,
+							   IMG_UINT32           ui32TEAA,
+							   IMG_UINT32           ui32TEMTILE1,
+							   IMG_UINT32           ui32TEMTILE2,
+							   IMG_UINT32           ui32MTileStride,
+							   IMG_UINT32                 ui32ISPMergeLowerX,
+							   IMG_UINT32                 ui32ISPMergeLowerY,
+							   IMG_UINT32                 ui32ISPMergeUpperX,
+							   IMG_UINT32                 ui32ISPMergeUpperY,
+							   IMG_UINT32                 ui32ISPMergeScaleX,
+							   IMG_UINT32                 ui32ISPMergeScaleY,
+							   IMG_UINT16			ui16MaxRTs,
+							   DEVMEM_MEMDESC		**ppsMemDesc,
+							   IMG_UINT32			*puiHWRTData)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+	RGXFWIF_HWRTDATA *psHWRTData;
+	RGXFWIF_RTA_CTL *psRTACtl;
+	IMG_UINT32 ui32Loop;
+	RGX_RTDATA_CLEANUP_DATA *psTmpCleanup;
+
+	/* Prepare cleanup struct */
+	psTmpCleanup = OSAllocMem(sizeof(*psTmpCleanup));
+	if (psTmpCleanup == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto AllocError;
+	}
+
+	OSMemSet(psTmpCleanup, 0, sizeof(*psTmpCleanup));
+	*ppsCleanupData = psTmpCleanup;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psTmpCleanup->psCleanupSync,
+						   "HWRTData cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto SyncAlloc;
+	}
+
+	psDevInfo = psDeviceNode->pvDevice;
+
+	/*
+	 * This FW RT-Data is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_HWRTDATA),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FirmwareHWRTData",
+							ppsMemDesc);
+	if (eError != PVRSRV_OK) 
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateHWRTData: DevmemAllocate for RGX_FWIF_HWRTDATA failed"));
+		goto FWRTDataAllocateError;
+	}
+
+	psTmpCleanup->psDeviceNode = psDeviceNode;
+	psTmpCleanup->psFWHWRTDataMemDesc = *ppsMemDesc;
+
+	RGXSetFirmwareAddress(&pFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+	*puiHWRTData = pFirmwareAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (IMG_VOID **)&psHWRTData);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+	/* FIXME: MList is something that that PM writes physical addresses to,
+	 * so ideally its best allocated in kernel */
+	psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr;
+	psHWRTData->psParentRenderTarget.ui32Addr = psRenderTarget;
+	#if defined(SUPPORT_VFP)
+	psHWRTData->sVFPPageTableAddr = psVFPPageTableAddr;
+	#endif
+
+	psHWRTData->ui32PPPScreen         = ui32PPPScreen;
+	psHWRTData->ui32PPPGridOffset     = ui32PPPGridOffset;
+	psHWRTData->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl;
+	psHWRTData->ui32TPCStride         = ui32TPCStride;
+	psHWRTData->sTailPtrsDevVAddr     = sTailPtrsDevVAddr;
+	psHWRTData->ui32TPCSize           = ui32TPCSize;
+	psHWRTData->ui32TEScreen          = ui32TEScreen;
+	psHWRTData->ui32TEAA              = ui32TEAA;
+	psHWRTData->ui32TEMTILE1          = ui32TEMTILE1;
+	psHWRTData->ui32TEMTILE2          = ui32TEMTILE2;
+	psHWRTData->ui32MTileStride       = ui32MTileStride;
+	psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX;
+	psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY;
+	psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX;
+	psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY;
+	psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX;
+	psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY;
+
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		psTmpCleanup->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+		psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount++;
+		psHWRTData->apsFreeLists[ui32Loop] = *((PRGXFWIF_FREELIST *)&(psTmpCleanup->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr)); /* FIXME: Fix pointer type casting */
+		/* invalid initial snapshot value, the snapshot is always taken during first kick
+		 * and hence the value get replaced during the first kick anyway. So its safe to set it 0.
+		*/
+		psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+	
+	PDUMPCOMMENT("Allocate RGXFW RTA control");
+	eError = DevmemFwAllocate(psDevInfo,
+										sizeof(RGXFWIF_RTA_CTL),
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+										PVRSRV_MEMALLOCFLAG_UNCACHED |
+										PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+										"FirmwareRTAControl",
+										ppsRTACtlMemDesc);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate RGX RTA control (%u)",
+				eError));
+		goto FWRTAAllocateError;
+	}
+	psTmpCleanup->psRTACtlMemDesc = *ppsRTACtlMemDesc;
+	RGXSetFirmwareAddress(&psHWRTData->psRTACtl,
+								   *ppsRTACtlMemDesc,
+								   0, RFW_FWADDR_FLAG_NONE);
+	
+	eError = DevmemAcquireCpuVirtAddr(*ppsRTACtlMemDesc, (IMG_VOID **)&psRTACtl);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTACpuMapError);
+	psRTACtl->ui32RenderTargetIndex = 0;
+	psRTACtl->ui32ActiveRenderTargets = 0;
+
+	if (ui16MaxRTs > 1)
+	{
+		/* Allocate memory for the checks */
+		PDUMPCOMMENT("Allocate memory for shadow render target cache");
+		eError = DevmemFwAllocate(psDevInfo,
+								ui16MaxRTs * sizeof(IMG_UINT32),
+								PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+								PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+								PVRSRV_MEMALLOCFLAG_UNCACHED|
+								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+								"FirmwareShadowRTCache",
+								&psTmpCleanup->psRTArrayMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u)",
+				ui16MaxRTs, eError));
+			goto FWAllocateRTArryError;
+		}
+
+		RGXSetFirmwareAddress(&psRTACtl->paui32ValidRenderTargets,
+										psTmpCleanup->psRTArrayMemDesc,
+										0, RFW_FWADDR_FLAG_NONE);
+
+		/* Allocate memory for the checks */
+		PDUMPCOMMENT("Allocate memory for tracking renders accumulation");
+		eError = DevmemFwAllocate(psDevInfo,
+                                                        ui16MaxRTs * sizeof(IMG_UINT32),
+                                                        PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+                                                        PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
+                                                        PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+                                                        PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+                                                        PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+                                                        PVRSRV_MEMALLOCFLAG_UNCACHED|
+                                                        PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+                                                        "FirmwareRendersAccumulation",
+                                                        &psTmpCleanup->psRendersAccArrayMemDesc);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u) (renders accumulation)",
+						  ui16MaxRTs, eError));
+			goto FWAllocateRTAccArryError;
+		}
+
+		RGXSetFirmwareAddress(&psRTACtl->paui32NumRenders,
+                                                psTmpCleanup->psRendersAccArrayMemDesc,
+                                                0, RFW_FWADDR_FLAG_NONE);
+		psRTACtl->ui16MaxRTs = ui16MaxRTs;
+	}
+	else
+	{
+		psRTACtl->paui32ValidRenderTargets.ui32Addr = 0;
+		psRTACtl->paui32NumRenders.ui32Addr = 0;
+		psRTACtl->ui16MaxRTs = 1;
+	}
+		
+	PDUMPCOMMENT("Dump HWRTData 0x%08X", *puiHWRTData);
+	DevmemPDumpLoadMem(*ppsMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("Dump RTACtl");
+	DevmemPDumpLoadMem(*ppsRTACtlMemDesc, 0, sizeof(*psRTACtl), PDUMP_FLAGS_CONTINUOUS);
+
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+	DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+	return PVRSRV_OK;
+
+	DevmemFwFree(psTmpCleanup->psRendersAccArrayMemDesc);
+FWAllocateRTAccArryError:
+	DevmemFwFree(psTmpCleanup->psRTArrayMemDesc);
+FWAllocateRTArryError:
+	DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+FWRTACpuMapError:
+	RGXUnsetFirmwareAddress(*ppsRTACtlMemDesc);
+	DevmemFwFree(*ppsRTACtlMemDesc);
+FWRTAAllocateError:
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		PVR_ASSERT(psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+		psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount--;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+	DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+FWRTDataCpuMapError:
+	RGXUnsetFirmwareAddress(*ppsMemDesc);
+	DevmemFwFree(*ppsMemDesc);
+FWRTDataAllocateError:
+	SyncPrimFree(psTmpCleanup->psCleanupSync);
+SyncAlloc:
+	OSFreeMem(psTmpCleanup);
+
+AllocError:
+	return eError;
+}
+
+/* Destroy HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+	PVRSRV_ERROR eError;
+	PRGXFWIF_HWRTDATA psHWRTData;
+	IMG_UINT32 ui32Loop;
+
+	PVR_ASSERT(psCleanupData);
+
+	RGXSetFirmwareAddress(&psHWRTData, psCleanupData->psFWHWRTDataMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+	/* Cleanup HWRTData in TA */
+	eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+										 psHWRTData,
+										 psCleanupData->psCleanupSync,
+										 RGXFWIF_DM_TA);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+
+	psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+	/* Cleanup HWRTData in 3D */
+	eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+										 psHWRTData,
+										 psCleanupData->psCleanupSync,
+										 RGXFWIF_DM_3D);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+
+	/* If we got here then TA and 3D operations on this RTData have finished */
+	if(psCleanupData->psRTACtlMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRTACtlMemDesc);
+		DevmemFwFree(psCleanupData->psRTACtlMemDesc);
+	}
+	
+	RGXUnsetFirmwareAddress(psCleanupData->psFWHWRTDataMemDesc);
+	DevmemFwFree(psCleanupData->psFWHWRTDataMemDesc);
+	
+	if(psCleanupData->psRTArrayMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRTArrayMemDesc);
+		DevmemFwFree(psCleanupData->psRTArrayMemDesc);
+	}
+	if(psCleanupData->psRendersAccArrayMemDesc)
+	{
+		RGXUnsetFirmwareAddress(psCleanupData->psRendersAccArrayMemDesc);
+		DevmemFwFree(psCleanupData->psRendersAccArrayMemDesc);
+	}
+
+
+	SyncPrimFree(psCleanupData->psCleanupSync);
+
+	/* decrease freelist refcount */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+	{
+		PVR_ASSERT(psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+		psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount--;
+	}
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	OSFreeMem(psCleanupData);
+
+	return PVRSRV_OK;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateFreeList(PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							   IMG_UINT32			ui32MaxFLPages,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+							   IMG_BOOL				bCheckFreelist,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   PMR					*psFreeListPMR,
+							   IMG_DEVMEM_OFFSET_T	uiFreeListPMROffset,
+							   RGX_FREELIST			**ppsFreeList)
+{
+	PVRSRV_ERROR				eError;
+	RGXFWIF_FREELIST			*psFWFreeList;
+	DEVMEM_MEMDESC				*psFWFreelistMemDesc;
+	RGX_FREELIST				*psFreeList;
+	PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+	/* Allocate kernel freelist struct */
+	psFreeList = OSAllocMem(sizeof(*psFreeList));
+	if (psFreeList == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: failed to allocate host data structure"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocHost;
+	}
+	OSMemSet(psFreeList, 0, sizeof(*psFreeList));
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psFreeList->psCleanupSync,
+						   "ta3d free list cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateFreeList: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto SyncAlloc;
+	}
+
+	/*
+	 * This FW FreeList context is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psFWFreeList),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FirmwareFreeList",
+							&psFWFreelistMemDesc);
+	if (eError != PVRSRV_OK) 
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+		goto FWFreeListAlloc;
+	}
+
+	/* Initialise host data structures */
+	psFreeList->psDevInfo = psDevInfo;
+	psFreeList->psFreeListPMR = psFreeListPMR;
+	psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+	psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+	RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+	psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+	psFreeList->ui32InitFLPages = ui32InitFLPages;
+	psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+	psFreeList->ui32CurrentFLPages = 0;
+	psFreeList->ui64FreelistChecksum = 0;
+	psFreeList->ui32RefCount = 0;
+	psFreeList->bCheckFreelist = bCheckFreelist;
+	dllist_init(&psFreeList->sMemoryBlockHead);
+	dllist_init(&psFreeList->sMemoryBlockInitHead);
+
+
+	/* Add to list of freelists */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+
+	/* Initialise FW data structure */
+	eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (IMG_VOID **)&psFWFreeList);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+	psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+	psFWFreeList->ui32CurrentPages = ui32InitFLPages;
+	psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+	psFWFreeList->ui32CurrentStackTop = ui32InitFLPages - 1;
+	psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr;
+	psFWFreeList->ui64CurrentDevVAddr = sFreeListDevVAddr.uiAddr + ((ui32MaxFLPages - ui32InitFLPages) * sizeof(IMG_UINT32));
+	psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+	psFWFreeList->bGrowPending = IMG_FALSE;
+
+	PVR_DPF((PVR_DBG_MESSAGE,"Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, Max FL base address 0x%016llx, Init FL base address 0x%016llx",
+			psFreeList,
+			ui32MaxFLPages,
+			ui32InitFLPages,
+			sFreeListDevVAddr.uiAddr,
+			psFWFreeList->psFreeListDevVAddr.uiAddr));
+
+	PDUMPCOMMENT("Dump FW FreeList");
+	DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+	/*
+	 * Separate dump of the Freelist's number of Pages and stack pointer.
+	 * This allows to easily modify the PB size in the out2.txt files.
+	 */
+	PDUMPCOMMENT("FreeList TotalPages");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+							offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+							psFWFreeList->ui32CurrentPages,
+							PDUMP_FLAGS_CONTINUOUS);
+	PDUMPCOMMENT("FreeList StackPointer");
+	DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+							offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+							psFWFreeList->ui32CurrentStackTop,
+							PDUMP_FLAGS_CONTINUOUS);
+
+	DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+
+	/* Add initial PB block */
+	eError = RGXGrowFreeList(psFreeList,
+								ui32InitFLPages,
+								&psFreeList->sMemoryBlockInitHead);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"RGXCreateFreeList: failed to allocate initial memory block for free list 0x%016llx (error = %u)",
+				sFreeListDevVAddr.uiAddr,
+				eError));
+		goto FWFreeListCpuMap;
+	}
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+	                               0,
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+#endif
+
+	psFreeList->ownerPid = OSGetCurrentProcessID();
+	/* return values */
+	*ppsFreeList = psFreeList;
+
+	return PVRSRV_OK;
+
+	/* Error handling */
+
+FWFreeListCpuMap:
+	/* Remove freelists from list  */
+	OSLockAcquire(psDevInfo->hLockFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	OSLockRelease(psDevInfo->hLockFreeList);
+
+	RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+	DevmemFwFree(psFWFreelistMemDesc);
+
+FWFreeListAlloc:
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+SyncAlloc:
+	OSFreeMem(psFreeList);
+
+ErrorAllocHost:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	RGXDestroyFreeList
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT64 ui64CheckSum;
+
+	PVR_ASSERT(psFreeList);
+
+	if (psFreeList->ui32RefCount != 0)
+	{
+		/* Freelist still busy */
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Freelist is not in use => start firmware cleanup */
+	eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+										 psFreeList->sFreeListFWDevVAddr,
+										 psFreeList->psCleanupSync);
+	if(eError != PVRSRV_OK)
+	{
+		/* Can happen if the firmware took too long to handle the cleanup request,
+		 * or if SLC-flushes didn't went through (due to some GPU lockup) */
+		return eError;
+	}
+
+	if (psFreeList->bCheckFreelist)
+	{
+		/* Do consistency tests (as the list is fully populated) */
+		eError = _FreeListCheckSum(psFreeList,
+									&ui64CheckSum);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXDestroyFreeList: Failed to get freelist checksum Node %p",
+					 psFreeList));
+			sleep_for_ever();
+//				PVR_ASSERT(0);
+		}
+
+		if (psFreeList->ui64FreelistChecksum != ui64CheckSum)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "RGXDestroyFreeList: Checksum mismatch [%p]! stored 0x%016llx, verified 0x%016llx %p",
+					 psFreeList,
+					 psFreeList->ui64FreelistChecksum,
+					 ui64CheckSum,
+					 psFreeList));
+			sleep_for_ever();
+//			PVR_ASSERT(0);
+		}
+	}
+
+	/* Destroy FW structures */
+	RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+	DevmemFwFree(psFreeList->psFWFreelistMemDesc);
+
+	/* Remove grow shrink blocks */
+	while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+	{
+		eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+
+	/* Remove initial PB block */
+	eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	/* consistency checks */
+	PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+	PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+	/* Remove FreeList from list */
+	OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+	dllist_remove_node(&psFreeList->sNode);
+	OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+	SyncPrimFree(psFreeList->psCleanupSync);
+
+	/* free Freelist */
+	OSFreeMem(psFreeList);
+
+	return eError;
+}
+
+
+
+/*
+	RGXAddBlockToFreeListKM
+*/
+
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToFreeListKM(RGX_FREELIST *psFreeList,
+										IMG_UINT32 ui32NumPages)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if we have reference to freelist's PMR */
+	if (psFreeList->psFreeListPMR == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,	"Freelist is not configured for grow"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* grow freelist */
+	eError = RGXGrowFreeList(psFreeList,
+							ui32NumPages,
+							&psFreeList->sMemoryBlockHead);
+	if(eError == PVRSRV_OK)
+	{
+		/* update freelist data in firmware */
+		_UpdateFwFreelistSize(psFreeList, IMG_TRUE, ui32NumPages);
+
+		psFreeList->ui32NumGrowReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+			/* Update Stats */
+			PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+	                               0,
+	                               psFreeList->ui32InitFLPages,
+	                               psFreeList->ui32NumHighPages,
+	                               psFreeList->ownerPid);
+
+#endif
+	}
+
+	return eError;
+}
+
+/*
+	RGXRemoveBlockFromFreeListKM
+*/
+
+IMG_EXPORT
+PVRSRV_ERROR RGXRemoveBlockFromFreeListKM(RGX_FREELIST *psFreeList)
+{
+	PVRSRV_ERROR eError;
+
+	/*
+	 * Make sure the pages part of the memory block are not in use anymore.
+	 * Instruct the firmware to update the freelist pointers accordingly.
+	 */
+
+	eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead,
+								psFreeList);
+
+	return eError;
+}
+
+
+/*
+	RGXCreateRenderTarget
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRenderTarget(PVRSRV_DEVICE_NODE	*psDeviceNode, 
+								   IMG_DEV_VIRTADDR		psVHeapTableDevVAddr,
+								   RGX_RT_CLEANUP_DATA 	**ppsCleanupData,
+								   IMG_UINT32			*sRenderTargetFWDevVAddr)
+{
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	RGXFWIF_RENDER_TARGET	*psRenderTarget;
+	RGXFWIF_DEV_VIRTADDR	pFirmwareAddr;
+	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
+	RGX_RT_CLEANUP_DATA		*psCleanupData;
+
+	psCleanupData = OSAllocMem(sizeof(*psCleanupData));
+	if (psCleanupData == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto err_out;
+	}
+
+	OSMemSet(psCleanupData, 0, sizeof(*psCleanupData));
+	psCleanupData->psDeviceNode = psDeviceNode;
+	/*
+	 * This FW render target context is only mapped into kernel for initialisation.
+	 * Otherwise this allocation is only used by the FW.
+	 * Therefore the GPU cache doesn't need coherency,
+	 * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+	 */
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psRenderTarget),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FirmwareRenderTarget",
+							&psCleanupData->psRenderTargetMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "RGXCreateRenderTarget: DevmemAllocate for Render Target failed"));
+		goto err_free;
+	}
+	RGXSetFirmwareAddress(&pFirmwareAddr, psCleanupData->psRenderTargetMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+	*sRenderTargetFWDevVAddr = pFirmwareAddr.ui32Addr;
+
+	eError = DevmemAcquireCpuVirtAddr(psCleanupData->psRenderTargetMemDesc, (IMG_VOID **)&psRenderTarget);
+	PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", err_fwalloc);
+
+	psRenderTarget->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+	psRenderTarget->bTACachesNeedZeroing = IMG_FALSE;
+	PDUMPCOMMENT("Dump RenderTarget");
+	DevmemPDumpLoadMem(psCleanupData->psRenderTargetMemDesc, 0, sizeof(*psRenderTarget), PDUMP_FLAGS_CONTINUOUS);
+	DevmemReleaseCpuVirtAddr(psCleanupData->psRenderTargetMemDesc);
+
+	*ppsCleanupData = psCleanupData;
+
+err_out:
+	return eError;
+
+err_free:
+	OSFreeMem(psCleanupData);
+	goto err_out;
+
+err_fwalloc:
+	DevmemFwFree(psCleanupData->psRenderTargetMemDesc);
+	goto err_free;
+
+}
+
+
+/*
+	RGXDestroyRenderTarget
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData)
+{
+	RGXUnsetFirmwareAddress(psCleanupData->psRenderTargetMemDesc);
+
+	/*
+		Note:
+		When we get RT cleanup in the FW call that instead
+	*/
+	/* Flush the the SLC before freeing */
+	{
+		RGXFWIF_KCCB_CMD sFlushInvalCmd;
+		PVRSRV_ERROR eError;
+		PVRSRV_DEVICE_NODE *psDeviceNode = psCleanupData->psDeviceNode;
+
+		/* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+		PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+		sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+		sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+		eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+											RGXFWIF_DM_GP,
+											&sFlushInvalCmd,
+											sizeof(sFlushInvalCmd),
+											IMG_TRUE);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: Failed to schedule SLC flush command with error (%u)", eError));
+		}
+		else
+		{
+			/* Wait for the SLC flush to complete */
+			eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: SLC flush and invalidate aborted with error (%u)", eError));
+			}
+		}
+	}
+
+	DevmemFwFree(psCleanupData->psRenderTargetMemDesc);
+	OSFreeMem(psCleanupData);
+	return PVRSRV_OK;
+}
+
+/*
+	RGXCreateZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateZSBufferKM(PVRSRV_DEVICE_NODE	*psDeviceNode,
+								DEVMEMINT_RESERVATION 	*psReservation,
+								PMR 					*psPMR,
+								PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags,
+								RGX_ZSBUFFER_DATA **ppsZSBuffer,
+								IMG_UINT32 *pui32ZSBufferFWDevVAddr)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_FWZSBUFFER			*psFWZSBuffer;
+	RGX_ZSBUFFER_DATA			*psZSBuffer;
+	DEVMEM_MEMDESC				*psFWZSBufferMemDesc;
+	IMG_BOOL					bOnDemand = ((uiMapFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) > 0);
+
+	/* Allocate host data structure */
+	psZSBuffer = OSAllocMem(sizeof(*psZSBuffer));
+	if (psZSBuffer == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup data structure for ZS-Buffer"));
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto ErrorAllocCleanup;
+	}
+	OSMemSet(psZSBuffer, 0, sizeof(*psZSBuffer));
+
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psZSBuffer->psCleanupSync,
+						   "ta3d zs buffer cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto ErrorSyncAlloc;
+	}
+
+	/* Populate Host data */
+	psZSBuffer->psDevInfo = psDevInfo;
+	psZSBuffer->psReservation = psReservation;
+	psZSBuffer->psPMR = psPMR;
+	psZSBuffer->uiMapFlags = uiMapFlags;
+	psZSBuffer->ui32RefCount = 0;
+	psZSBuffer->bOnDemand = bOnDemand;
+    if (bOnDemand)
+    {
+    	psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+    	psZSBuffer->psMapping = IMG_NULL;
+
+		OSLockAcquire(psDevInfo->hLockZSBuffer);
+    	dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+		OSLockRelease(psDevInfo->hLockZSBuffer);
+    }
+
+	/* Allocate firmware memory for ZS-Buffer. */
+	PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure");
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(*psFWZSBuffer),
+							PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+							PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+							PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+							PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+							PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+							PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+							"FirmwareZSBuffer",
+							&psFWZSBufferMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate firmware ZS-Buffer (%u)", eError));
+		goto ErrorAllocFWZSBuffer;
+	}
+	psZSBuffer->psZSBufferMemDesc = psFWZSBufferMemDesc;
+
+	/* Temporarily map the firmware render context to the kernel. */
+	eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+                                      (IMG_VOID **)&psFWZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to map firmware ZS-Buffer (%u)", eError));
+		goto ErrorAcquireFWZSBuffer;
+	}
+
+	/* Populate FW ZS-Buffer data structure */
+	psFWZSBuffer->bOnDemand = bOnDemand;
+	psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_ZSBUFFER_UNBACKED : RGXFWIF_ZSBUFFER_BACKED;
+	psFWZSBuffer->ui32ZSBufferID = psZSBuffer->ui32ZSBufferID;
+
+	/* Get firmware address of ZS-Buffer. */
+	RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+	/* Dump the ZS-Buffer and the memory content */
+	PDUMPCOMMENT("Dump firmware ZS-Buffer");
+	DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+
+	/* Release address acquired above. */
+	DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+	/* define return value */
+	*ppsZSBuffer = psZSBuffer;
+	*pui32ZSBufferFWDevVAddr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+							psZSBuffer,
+							(bOnDemand) ? "On-Demand": "Up-front"));
+
+	psZSBuffer->owner=OSGetCurrentProcessID();
+
+	return PVRSRV_OK;
+
+	/* error handling */
+
+ErrorAcquireFWZSBuffer:
+	DevmemFwFree(psFWZSBufferMemDesc);
+
+ErrorAllocFWZSBuffer:
+	SyncPrimFree(psZSBuffer->psCleanupSync);
+
+ErrorSyncAlloc:
+	OSFreeMem(psZSBuffer);
+
+ErrorAllocCleanup:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+	RGXDestroyZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psZSBuffer);
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	/* Request ZS Buffer cleanup */
+	eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+										psZSBuffer->sZSBufferFWDevVAddr,
+										psZSBuffer->psCleanupSync);
+	if (eError != PVRSRV_ERROR_RETRY)
+	{
+		/* Free the firmware render context. */
+    	RGXUnsetFirmwareAddress(psZSBuffer->psZSBufferMemDesc);
+		DevmemFwFree(psZSBuffer->psZSBufferMemDesc);
+
+	    /* Remove Deferred Allocation from list */
+		if (psZSBuffer->bOnDemand)
+		{
+			OSLockAcquire(hLockZSBuffer);
+			PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+			dllist_remove_node(&psZSBuffer->sNode);
+			OSLockRelease(hLockZSBuffer);
+		}
+
+		SyncPrimFree(psZSBuffer->psCleanupSync);
+
+		PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+		PVR_DPF((PVR_DBG_MESSAGE,"ZS-Buffer [%p] destroyed",psZSBuffer));
+
+		/* Free ZS-Buffer host data structure */
+		OSFreeMem(psZSBuffer);
+
+	}
+
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	if (!psZSBuffer)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if ((psZSBuffer->uiMapFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) == 0)
+	{
+		/* Only deferred allocations can be populated */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+								psZSBuffer,
+								psZSBuffer->ui32ZSBufferID));
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	OSLockAcquire(hLockZSBuffer);
+
+	if (psZSBuffer->ui32RefCount == 0)
+	{
+		if (psZSBuffer->bOnDemand)
+		{
+			IMG_HANDLE hDevmemHeap;
+
+			PVR_ASSERT(psZSBuffer->psMapping == IMG_NULL);
+
+			/* Get Heap */
+			eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+			PVR_ASSERT(psZSBuffer->psMapping == IMG_NULL);
+
+			eError = DevmemIntMapPMR(hDevmemHeap,
+									psZSBuffer->psReservation,
+									psZSBuffer->psPMR,
+									psZSBuffer->uiMapFlags,
+									&psZSBuffer->psMapping);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Unable populate ZS Buffer [%p, ID=0x%08x] with error %u",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID,
+										eError));
+				OSLockRelease(hLockZSBuffer);
+				return eError;
+
+			}
+			PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID));
+		}
+	}
+
+	/* Increase refcount*/
+	psZSBuffer->ui32RefCount++;
+
+	OSLockRelease(hLockZSBuffer);
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+					RGX_POPULATION **ppsPopulation)
+{
+	RGX_POPULATION *psPopulation;
+	PVRSRV_ERROR eError;
+
+	psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsUpdateZSBufferStats(1,0,psZSBuffer->owner);
+#endif
+
+	/* Do the backing */
+	eError = RGXBackingZSBuffer(psZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		goto OnErrorBacking;
+	}
+
+	/* Create the handle to the backing */
+	psPopulation = OSAllocMem(sizeof(*psPopulation));
+	if (psPopulation == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto OnErrorAlloc;
+	}
+
+	psPopulation->psZSBuffer = psZSBuffer;
+
+	/* return value */
+	*ppsPopulation = psPopulation;
+
+	return PVRSRV_OK;
+
+OnErrorAlloc:
+	RGXUnbackingZSBuffer(psZSBuffer);
+
+OnErrorBacking:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+	POS_LOCK hLockZSBuffer;
+	PVRSRV_ERROR eError;
+
+	if (!psZSBuffer)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+	PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+								psZSBuffer,
+								psZSBuffer->ui32ZSBufferID));
+
+	hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+	OSLockAcquire(hLockZSBuffer);
+
+	if (psZSBuffer->bOnDemand)
+	{
+		if (psZSBuffer->ui32RefCount == 1)
+		{
+			PVR_ASSERT(psZSBuffer->psMapping);
+
+			eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR,"Unable to unpopulate ZS Buffer [%p, ID=0x%08x] with error %u",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID,
+										eError));
+				OSLockRelease(hLockZSBuffer);
+				return eError;
+			}
+
+			PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+										psZSBuffer,
+										psZSBuffer->ui32ZSBufferID));
+		}
+	}
+
+	/* Decrease refcount*/
+	psZSBuffer->ui32RefCount--;
+
+	OSLockRelease(hLockZSBuffer);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+	PVRSRV_ERROR eError;
+
+	if (!psPopulation)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	OSFreeMem(psPopulation);
+
+	return PVRSRV_OK;
+}
+
+static IMG_BOOL _FindZSBuffer(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	DEVMEM_REF_LOOKUP *psRefLookUp = (DEVMEM_REF_LOOKUP *)pvCallbackData;
+	RGX_ZSBUFFER_DATA *psZSBuffer;
+
+	psZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+	if (psZSBuffer->ui32ZSBufferID == psRefLookUp->ui32ZSBufferID)
+	{
+		psRefLookUp->psZSBuffer = psZSBuffer;
+		return IMG_FALSE;
+	}
+	else
+	{
+		return IMG_TRUE;
+	}
+}
+
+IMG_VOID RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+											IMG_UINT32 ui32ZSBufferID)
+{
+	DEVMEM_REF_LOOKUP sLookUp;
+	RGXFWIF_KCCB_CMD sTACCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* scan all deferred allocations */
+	sLookUp.ui32ZSBufferID = ui32ZSBufferID;
+	sLookUp.psZSBuffer = IMG_NULL;
+
+	OSLockAcquire(psDevInfo->hLockZSBuffer);
+	dllist_foreach_node(&psDevInfo->sZSBufferHead, _FindZSBuffer, (IMG_PVOID)&sLookUp);
+	OSLockRelease(psDevInfo->hLockZSBuffer);
+
+	if (sLookUp.psZSBuffer)
+	{
+		IMG_BOOL bBackingDone = IMG_TRUE;
+
+		/* Populate ZLS */
+		eError = RGXBackingZSBuffer(sLookUp.psZSBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"Populating ZS-Buffer failed failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+			bBackingDone = IMG_FALSE;
+		}
+
+		/* send confirmation */
+		sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.psZSBufferFWDevVAddr = sLookUp.psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+												RGXFWIF_DM_TA,
+												&sTACCBCmd,
+												sizeof(sTACCBCmd),
+												IMG_FALSE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+		sLookUp.psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		PVRSRVStatsUpdateZSBufferStats(0,1,sLookUp.psZSBuffer->owner);
+#endif
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", sLookUp.ui32ZSBufferID));
+	}
+}
+
+IMG_VOID RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+											IMG_UINT32 ui32ZSBufferID)
+{
+	DEVMEM_REF_LOOKUP sLookUp;
+	RGXFWIF_KCCB_CMD sTACCBCmd;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psDevInfo);
+
+	/* scan all deferred allocations */
+	sLookUp.ui32ZSBufferID = ui32ZSBufferID;
+	sLookUp.psZSBuffer = IMG_NULL;
+
+	OSLockAcquire(psDevInfo->hLockZSBuffer);
+	dllist_foreach_node(&psDevInfo->sZSBufferHead, _FindZSBuffer, (IMG_PVOID)&sLookUp);
+	OSLockRelease(psDevInfo->hLockZSBuffer);
+
+	if (sLookUp.psZSBuffer)
+	{
+		/* Unpopulate ZLS */
+		eError = RGXUnbackingZSBuffer(sLookUp.psZSBuffer);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,"UnPopulating ZS-Buffer failed failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+			PVR_ASSERT(IMG_FALSE);
+		}
+
+		/* send confirmation */
+		sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.psZSBufferFWDevVAddr = sLookUp.psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+		sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError = RGXScheduleCommand(psDevInfo,
+												RGXFWIF_DM_TA,
+												&sTACCBCmd,
+												sizeof(sTACCBCmd),
+												IMG_FALSE);
+			if (eError != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+		/* Kernel CCB should never fill up, as the FW is processing them right away  */
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", sLookUp.ui32ZSBufferID));
+	}
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEM_MEMDESC *psAllocatedMemDesc,
+							  IMG_UINT32 ui32AllocatedOffset,
+							  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+							  IMG_DEV_VIRTADDR sVDMCallStackAddr,
+							  IMG_UINT32 ui32Priority,
+							  RGX_COMMON_CONTEXT_INFO *psInfo,
+							  RGX_SERVER_RC_TA_DATA *psTAData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	RGXFWIF_TACTX_STATE *psContextState;
+	PVRSRV_ERROR eError;
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware TA context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_TACTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FirmwareTAContextState",
+							  &psTAData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_tacontextsuspendalloc;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+                                      (IMG_VOID **)&psContextState);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to map firmware render context state (%u)",
+				eError));
+		goto fail_suspendcpuvirtacquire;
+	}
+	psContextState->uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr;
+	DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 "TA",
+									 psAllocatedMemDesc,
+									 ui32AllocatedOffset,
+									 psFWMemContextMemDesc,
+									 psTAData->psContextStateMemDesc,
+									 RGX_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &psTAData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init TA fw common context (%u)",
+				eError));
+		goto fail_tacommoncontext;
+	}
+	
+	/*
+	 * Dump the FW 3D context suspend state buffer
+	 */
+	PDUMPCOMMENT("Dump the TA context suspend state buffer");
+	DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+					   0,
+					   sizeof(RGXFWIF_TACTX_STATE),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	psTAData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_tacommoncontext:
+fail_suspendcpuvirtacquire:
+	DevmemFwFree(psTAData->psContextStateMemDesc);
+fail_tacontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+							  PVRSRV_DEVICE_NODE *psDeviceNode,
+							  DEVMEM_MEMDESC *psAllocatedMemDesc,
+							  IMG_UINT32 ui32AllocatedOffset,
+							  DEVMEM_MEMDESC *psFWMemContextMemDesc,
+							  IMG_UINT32 ui32Priority,
+							  RGX_COMMON_CONTEXT_INFO *psInfo,
+							  RGX_SERVER_RC_3D_DATA *ps3DData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_3DCTX_STATE),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "Firmware3DContextState",
+							  &ps3DData->psContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+				eError));
+		goto fail_3dcontextsuspendalloc;
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 "3D",
+									 psAllocatedMemDesc,
+									 ui32AllocatedOffset,
+									 psFWMemContextMemDesc,
+									 ps3DData->psContextStateMemDesc,
+									 RGX_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps3DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init 3D fw common context (%u)",
+				eError));
+		goto fail_3dcommoncontext;
+	}
+
+	/*
+	 * Dump the FW 3D context suspend state buffer
+	 */
+	PDUMPCOMMENT("Dump the 3D context suspend state buffer");
+	DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+					   0,
+					   sizeof(RGXFWIF_3DCTX_STATE),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	ps3DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_3dcommoncontext:
+	DevmemFwFree(ps3DData->psContextStateMemDesc);
+fail_3dcontextsuspendalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sMCUFenceAddr,
+											IMG_DEV_VIRTADDR			sVDMCallStackAddr,
+											IMG_UINT32					ui32FrameworkRegisterSize,
+											IMG_PBYTE					pabyFrameworkRegisters,
+											IMG_HANDLE					hMemCtxPrivData,
+											RGX_SERVER_RENDER_CONTEXT	**ppsRenderContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 			*psDevInfo = psDeviceNode->pvDevice;
+	RGX_SERVER_RENDER_CONTEXT	*psRenderContext;
+	DEVMEM_MEMDESC				*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO		sInfo;
+
+	/* Prepare cleanup structure */
+	*ppsRenderContext = IMG_NULL;
+	psRenderContext = OSAllocMem(sizeof(*psRenderContext));
+	if (psRenderContext == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSMemSet(psRenderContext, 0, sizeof(*psRenderContext));
+	psRenderContext->psDeviceNode = psDeviceNode;
+
+	/*
+		Create the FW render context, this has the TA and 3D FW common
+		contexts embedded within it
+	*/
+	eError = DevmemFwAllocate(psDevInfo,
+							  sizeof(RGXFWIF_FWRENDERCONTEXT),
+							  RGX_FWCOMCTX_ALLOCFLAGS,
+							  "FirmwareRenderContext",
+							  &psRenderContext->psFWRenderContextMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_fwrendercontext;
+	}
+
+	/*
+		As the common context alloc will dump the TA and 3D common contexts
+		after the've been setup we skip of the 2 common contexts and dump the
+		rest of the structure
+	*/
+	PDUMPCOMMENT("Dump shared part of render context context");
+	DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc,
+					   (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+					   sizeof(RGXFWIF_FWRENDERCONTEXT) - (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+					   PDUMP_FLAGS_CONTINUOUS);
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psRenderContext->psCleanupSync,
+						   "ta3d render context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/* 
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psRenderContext->psFWFrameworkMemDesc,
+										ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc,
+										   pabyFrameworkRegisters,
+										   ui32FrameworkRegisterSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+	sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+	eError = _CreateTAContext(psConnection,
+							  psDeviceNode,
+							  psRenderContext->psFWRenderContextMemDesc,
+							  offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+							  psFWMemContextMemDesc,
+							  sVDMCallStackAddr,
+							  ui32Priority,
+							  &sInfo,
+							  &psRenderContext->sTAData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_tacontext;
+	}
+
+	eError = _Create3DContext(psConnection,
+							  psDeviceNode,
+							  psRenderContext->psFWRenderContextMemDesc,
+							  offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+							  psFWMemContextMemDesc,
+							  ui32Priority,
+							  &sInfo,
+							  &psRenderContext->s3DData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dcontext;
+	}
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+	}
+
+	*ppsRenderContext= psRenderContext;
+	return PVRSRV_OK;
+
+fail_3dcontext:
+	_DestroyTAContext(&psRenderContext->sTAData,
+					  psDeviceNode,
+					  psRenderContext->psCleanupSync);
+fail_tacontext:
+fail_frameworkcopy:
+	DevmemFwFree(psRenderContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psRenderContext->psCleanupSync);
+fail_syncalloc:
+	DevmemFwFree(psRenderContext->psFWRenderContextMemDesc);
+fail_fwrendercontext:
+	OSFreeMem(psRenderContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+	PVRSRV_ERROR				eError;
+	PVRSRV_RGXDEV_INFO 	*psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+	dllist_remove_node(&(psRenderContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+	/* Cleanup the TA if we haven't already */
+	if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+	{
+		eError = _DestroyTAContext(&psRenderContext->sTAData,
+								   psRenderContext->psDeviceNode,
+								   psRenderContext->psCleanupSync);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+	/* Cleanup the 3D if we haven't already */
+	if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+	{
+		eError = _Destroy3DContext(&psRenderContext->s3DData,
+								   psRenderContext->psDeviceNode,
+								   psRenderContext->psCleanupSync);
+		if (eError != PVRSRV_ERROR_RETRY)
+		{
+			psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+		}
+		else
+		{
+			goto e0;
+		}
+	}
+
+	/*
+		Only if both TA and 3D contexts have been cleaned up can we
+		free the shared resources
+	*/
+	if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+	{
+		RGXFWIF_FWRENDERCONTEXT	*psFWRenderContext;
+
+		/* Update SPM statistics */
+		eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+	                                      (IMG_VOID **)&psFWRenderContext);
+		if (eError == PVRSRV_OK)
+		{
+			DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+					eError));
+		}
+
+		/* Free the framework buffer */
+		DevmemFwFree(psRenderContext->psFWFrameworkMemDesc);
+	
+		/* Free the firmware render context */
+		DevmemFwFree(psRenderContext->psFWRenderContextMemDesc);
+
+		/* Free the cleanup sync */
+		SyncPrimFree(psRenderContext->psCleanupSync);
+
+		OSFreeMem(psRenderContext);
+	}
+
+	return PVRSRV_OK;
+
+e0:
+	OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+	return eError;
+}
+
+
+/* TODO !!! this was local on the stack, and we managed to blow the stack for the kernel. 
+ * THIS - 46 argument function needs to be sorted out.
+ */
+/* 1 command for the TA */
+static RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[1];
+/* Up to 3 commands for the 3D (partial render fence, partial reader, and render) */
+static RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[3];
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+								 IMG_UINT32					ui32ClientTAFenceCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClientTAFenceUFOAddress,
+								 IMG_UINT32					*paui32ClientTAFenceValue,
+								 IMG_UINT32					ui32ClientTAUpdateCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClientTAUpdateUFOAddress,
+								 IMG_UINT32					*paui32ClientTAUpdateValue,
+								 IMG_UINT32					ui32ServerTASyncPrims,
+								 IMG_UINT32					*paui32ServerTASyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerTASyncs,
+								 IMG_UINT32					ui32Client3DFenceCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClient3DFenceUFOAddress,
+								 IMG_UINT32					*paui32Client3DFenceValue,
+								 IMG_UINT32					ui32Client3DUpdateCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClient3DUpdateUFOAddress,
+								 IMG_UINT32					*paui32Client3DUpdateValue,
+								 IMG_UINT32					ui32Server3DSyncPrims,
+								 IMG_UINT32					*paui32Server3DSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServer3DSyncs,
+								 PRGXFWIF_UFO_ADDR			uiPRFenceUFOAddress,
+								 IMG_UINT32					ui32PRFenceValue,
+								 IMG_UINT32					ui32NumCheckFenceFDs,
+								 IMG_INT32					*ai32CheckFenceFDs,
+								 IMG_INT32                  i32UpdateFenceFD,
+								 IMG_UINT32					ui32TACmdSize,
+								 IMG_PBYTE					pui8TADMCmd,
+								 IMG_UINT32					ui323DPRCmdSize,
+								 IMG_PBYTE					pui83DPRDMCmd,
+								 IMG_UINT32					ui323DCmdSize,
+								 IMG_PBYTE					pui83DDMCmd,
+								 IMG_UINT32					ui32ExtJobRef,
+								 IMG_UINT32					ui32IntJobRef,
+								 IMG_BOOL					bLastTAInScene,
+								 IMG_BOOL					bKickTA,
+								 IMG_BOOL					bKickPR,
+								 IMG_BOOL					bKick3D,
+								 IMG_BOOL					bAbort,
+								 IMG_BOOL					bPDumpContinuous,
+								 RGX_RTDATA_CLEANUP_DATA	*psRTDataCleanup,
+								 RGX_ZSBUFFER_DATA		*psZBuffer,
+								 RGX_ZSBUFFER_DATA		*psSBuffer,
+								 IMG_BOOL			bCommitRefCountsTA,
+								 IMG_BOOL			bCommitRefCounts3D,
+								 IMG_BOOL			*pbCommittedRefCountsTA,
+								 IMG_BOOL			*pbCommittedRefCounts3D)
+{
+
+	IMG_UINT32				ui32TACmdCount=0;
+	IMG_UINT32				ui323DCmdCount=0;
+	IMG_BOOL				bKickTADM = IMG_FALSE;
+	IMG_BOOL				bKick3DDM = IMG_FALSE;
+	RGXFWIF_UFO				sPRUFO;
+	IMG_UINT32				*paui32Server3DSyncFlagsPR = IMG_NULL;
+	IMG_UINT32				*paui32Server3DSyncFlags3D = IMG_NULL;
+	IMG_UINT32				i;
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+	PVRSRV_ERROR			eError2;
+
+	/* Internal client sync info, used to help with merging of Android fd syncs */
+	IMG_UINT32				ui32IntClientTAFenceCount = 0;
+	PRGXFWIF_UFO_ADDR		*pauiIntClientTAFenceUFOAddress = IMG_NULL;
+	IMG_UINT32				*paui32IntClientTAFenceValue = IMG_NULL;
+
+	IMG_UINT32 				ui32NumUpdateSyncs = 0;
+	PRGXFWIF_UFO_ADDR 		*puiUpdateFWAddrs = IMG_NULL;
+	IMG_UINT32 				*pui32UpdateValues = IMG_NULL;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	/* Android fd sync update info */
+	struct pvr_sync_append_data *psFDData = NULL;
+#endif
+
+	RGXFWIF_DEV_VIRTADDR pPreTimestamp;
+	RGXFWIF_DEV_VIRTADDR pPostTimestamp;
+	PRGXFWIF_UFO_ADDR    pRMWUFOAddr;
+
+	*pbCommittedRefCountsTA = IMG_FALSE;
+	*pbCommittedRefCounts3D = IMG_FALSE;
+
+	/* Sanity check the server fences */
+	for (i=0;i<ui32ServerTASyncPrims;i++)
+	{
+		if (!(paui32ServerTASyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on TA) must fence", __FUNCTION__));
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	for (i=0;i<ui32Server3DSyncPrims;i++)
+	{
+		if (!(paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on 3D) must fence", __FUNCTION__));
+			return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+		}
+	}
+
+	RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice,
+	                          & pPreTimestamp,
+	                          & pPostTimestamp,
+	                          & pRMWUFOAddr);
+
+	/*
+		Sanity check we have a PR kick if there are client or server fences
+	*/
+	if (!bKickPR & ((ui32Client3DFenceCount != 0) || (ui32Server3DSyncPrims != 0)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence (client or server) passed without a PR kick", __FUNCTION__));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	/* Init and acquire to TA command if required */
+	if(bKickTA)
+	{
+		RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+		ui32IntClientTAFenceCount = ui32ClientTAFenceCount;
+		pauiIntClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+		paui32IntClientTAFenceValue = paui32ClientTAFenceValue;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+		if (ui32NumCheckFenceFDs || i32UpdateFenceFD >= 0)
+		{
+			/*
+				This call is only using the Android fd sync to fence the
+				TA command. There is an update but this is used to
+				indicate that the fence has been finished with and so it
+				can happen after the PR as by then we've finished using
+				the fd sync
+			*/
+			eError =
+			  pvr_sync_append_fences("TA",
+			                         ui32NumCheckFenceFDs,
+			                         ai32CheckFenceFDs,
+									 i32UpdateFenceFD,
+			                         ui32NumUpdateSyncs,
+			                         puiUpdateFWAddrs,
+			                         pui32UpdateValues,
+			                         ui32IntClientTAFenceCount,
+			                         pauiIntClientTAFenceUFOAddress,
+			                         paui32IntClientTAFenceValue,
+			                         &psFDData);
+			if (eError != PVRSRV_OK)
+			{
+			    goto fail_fdsync;
+			}
+			ui32NumUpdateSyncs = psFDData->nr_updates;
+			puiUpdateFWAddrs = psFDData->update_ufo_addresses;
+			pui32UpdateValues = psFDData->update_values;
+			ui32IntClientTAFenceCount = psFDData->nr_checks;
+			pauiIntClientTAFenceUFOAddress = psFDData->check_ufo_addresses;
+			paui32IntClientTAFenceValue = psFDData->check_values;
+		}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+		/* Init the TA command helper */
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+		                                ui32IntClientTAFenceCount,
+		                                pauiIntClientTAFenceUFOAddress,
+		                                paui32IntClientTAFenceValue,
+		                                ui32ClientTAUpdateCount,
+		                                pauiClientTAUpdateUFOAddress,
+		                                paui32ClientTAUpdateValue,
+		                                ui32ServerTASyncPrims,
+		                                paui32ServerTASyncFlags,
+		                                pasServerTASyncs,
+		                                ui32TACmdSize,
+		                                pui8TADMCmd,
+		                                & pPreTimestamp,
+		                                (bKick3D ? IMG_NULL : & pPostTimestamp),
+		                                (bKick3D ? IMG_NULL : & pRMWUFOAddr),
+		                                RGXFWIF_CCB_CMD_TYPE_TA,
+		                                bPDumpContinuous,
+		                                "TA",
+		                                asTACmdHelperData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_tacmdinit;
+		}
+
+		eError = RGXCmdHelperAcquireCmdCCB(IMG_ARR_NUM_ELEMS(asTACmdHelperData),
+		                                   asTACmdHelperData,
+		                                   &bKickTADM);
+		if (eError != PVRSRV_OK)
+		{
+			if (!bKickTADM)
+			{
+				goto fail_taacquirecmd;
+			}
+			else
+			{
+				/* commit the TA ref counting next time, when the CCB space is successfully
+				 * acquired
+				 */
+				bCommitRefCountsTA = IMG_FALSE;
+			}
+		}
+		else
+		{
+			ui32TACmdCount++;
+		}
+	}
+
+	/* Only kick the 3D if required */
+	if (eError == PVRSRV_OK)
+	{
+	if (bKickPR)
+	{
+		RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+		if (ui32Server3DSyncPrims)
+		{
+			/*
+				The fence (and possible update) straddle multiple commands so
+				we have to modify the flags to do the right things at the right
+				time.
+				At this stage we should only fence, any updates will happen with
+				the normal 3D command.
+			*/
+			paui32Server3DSyncFlagsPR = OSAllocMem(sizeof(IMG_UINT32) * ui32Server3DSyncPrims);
+			if (paui32Server3DSyncFlagsPR == IMG_NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto fail_prserversyncflagsallocpr;
+			}
+
+			/* Copy only the fence flag across */
+			for (i=0;i<ui32Server3DSyncPrims;i++)
+			{
+				paui32Server3DSyncFlagsPR[i] = paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK;
+			}
+		}
+
+		/*
+			The command helper doesn't know about the PR fence so create
+			the command with all the fences against it and later create
+			the PR command itself which _must_ come after the PR fence.
+		*/
+		sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+		sPRUFO.ui32Value = ui32PRFenceValue;
+
+		/* Init the PR fence command helper */
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+										ui32Client3DFenceCount,
+										pauiClient3DFenceUFOAddress,
+										paui32Client3DFenceValue,
+										0,
+										IMG_NULL,
+										IMG_NULL,
+										(bKick3D ? ui32Server3DSyncPrims : 0),
+										paui32Server3DSyncFlagsPR,
+										pasServer3DSyncs,
+										sizeof(sPRUFO),
+										(IMG_UINT8*) &sPRUFO,
+										IMG_NULL,
+										IMG_NULL,
+										IMG_NULL,
+										RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+										bPDumpContinuous,
+										"3D-PR-Fence",
+										&as3DCmdHelperData[ui323DCmdCount++]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_prfencecmdinit;
+		}
+
+		/* Init the 3D PR command helper */
+		/*
+			See note above PVRFDSyncQueryFencesKM as to why updates for android
+			syncs are passed in with the PR
+		*/
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+										0,
+										IMG_NULL,
+										IMG_NULL,
+										ui32NumUpdateSyncs,
+										puiUpdateFWAddrs,
+										pui32UpdateValues,
+										0,
+										IMG_NULL,
+										IMG_NULL,
+										ui323DPRCmdSize,
+										pui83DPRDMCmd,
+										IMG_NULL,
+										IMG_NULL,
+										IMG_NULL,
+										RGXFWIF_CCB_CMD_TYPE_3D_PR,
+										bPDumpContinuous,
+										"3D-PR",
+										&as3DCmdHelperData[ui323DCmdCount++]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_prcmdinit;
+		}
+	}
+
+	if (bKick3D || bAbort)
+	{
+		RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+		if (ui32Server3DSyncPrims)
+		{
+			/*
+				Copy only the update flags for the 3D as the fences will be in
+				the PR command created above
+			*/
+			paui32Server3DSyncFlags3D = OSAllocMem(sizeof(IMG_UINT32) * ui32Server3DSyncPrims);
+			if (paui32Server3DSyncFlags3D == IMG_NULL)
+			{
+				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+				goto fail_prserversyncflagsalloc3d;
+			}
+
+			/* Copy only the update flag across */
+			for (i=0;i<ui32Server3DSyncPrims;i++)
+			{
+				paui32Server3DSyncFlags3D[i] = paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE;
+			}
+		}
+
+		/* Init the 3D command helper */
+		eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+		                                0,
+		                                IMG_NULL,
+		                                IMG_NULL,
+		                                ui32Client3DUpdateCount,
+		                                pauiClient3DUpdateUFOAddress,
+		                                paui32Client3DUpdateValue,
+		                                ui32Server3DSyncPrims,
+		                                paui32Server3DSyncFlags3D,
+		                                pasServer3DSyncs,
+		                                ui323DCmdSize,
+		                                pui83DDMCmd,
+		                                (bKickTA ? IMG_NULL : & pPreTimestamp),
+		                                & pPostTimestamp,
+		                                & pRMWUFOAddr,
+		                                RGXFWIF_CCB_CMD_TYPE_3D,
+		                                bPDumpContinuous,
+		                                "3D",
+		                                &as3DCmdHelperData[ui323DCmdCount++]);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_3dcmdinit;
+		}
+	}
+
+	/* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+	if (ui323DCmdCount > IMG_ARR_NUM_ELEMS(as3DCmdHelperData))
+	{
+		goto fail_3dcmdinit;
+	}
+
+	if (ui323DCmdCount)
+	{
+		PVR_ASSERT(bKickPR || bKick3D);
+
+		/* Acquire space for all the 3D command(s) */
+		eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+										   as3DCmdHelperData,
+										   &bKick3DDM);
+		if (eError != PVRSRV_OK)
+		{
+			/* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+			 * of a new TA command with the same Write offset in Kernel CCB.
+			 */
+			goto fail_3dacquirecmd;
+		}
+	}
+	}
+
+	/*
+		We should acquire the space in the kernel CCB here as after this point
+		we release the commands which will take operations on server syncs
+		which can't be undone
+	*/
+
+	/*
+		Everything is ready to go now, release the commands
+	*/
+	if (ui32TACmdCount)
+	{
+		RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+								  asTACmdHelperData,
+								  "TA",
+								  FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+	}
+
+	if (ui323DCmdCount)
+	{
+		RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+								  as3DCmdHelperData,
+								  "3D",
+								  FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+	}
+
+	if (bKickTADM)
+	{
+		RGXFWIF_KCCB_CMD sTAKCCBCmd;
+
+		/* Construct the kernel TA CCB command. */
+		sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		sTAKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+		sTAKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+		if(bCommitRefCountsTA)
+		{
+			AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTAKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+										&sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+										RGXFWIF_DM_TA,
+										bKickTA,
+										psRTDataCleanup,
+										psZBuffer,
+										psSBuffer);
+			*pbCommittedRefCountsTA = IMG_TRUE;
+		}
+		else
+		{
+			sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+		}
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+										RGXFWIF_DM_TA,
+										&sTAKCCBCmd,
+										sizeof(sTAKCCBCmd),
+										bPDumpContinuous);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+		RGXHWPerfFTraceGPUEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+    				ui32ExtJobRef, ui32IntJobRef, "TA3D");
+#endif
+
+	}
+	
+	if (bKick3DDM)
+	{
+		RGXFWIF_KCCB_CMD s3DKCCBCmd;
+
+		/* Construct the kernel 3D CCB command. */
+		s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+		if(bCommitRefCounts3D)
+		{
+			AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+											&s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+											RGXFWIF_DM_3D,
+											bKick3D,
+											psRTDataCleanup,
+											psZBuffer,
+											psSBuffer);
+			*pbCommittedRefCounts3D = IMG_TRUE;
+		}
+		else
+		{
+			s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+		}
+
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+										RGXFWIF_DM_3D,
+										&s3DKCCBCmd,
+										sizeof(s3DKCCBCmd),
+										bPDumpContinuous);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_3dacquirecmd;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDData);
+#endif
+	pvr_sync_free_append_fences_data(psFDData);
+
+#endif
+	if(paui32Server3DSyncFlags3D)
+	{
+		OSFreeMem(paui32Server3DSyncFlags3D);
+	}
+
+	if(paui32Server3DSyncFlagsPR)
+	{
+		OSFreeMem(paui32Server3DSyncFlagsPR);
+	}
+
+	return PVRSRV_OK;
+
+fail_3dacquirecmd:
+fail_3dcmdinit:
+	if (paui32Server3DSyncFlags3D)
+	{
+		OSFreeMem(paui32Server3DSyncFlags3D);
+	}
+fail_prserversyncflagsalloc3d:
+fail_prcmdinit:
+fail_prfencecmdinit:
+	if (paui32Server3DSyncFlagsPR)
+	{
+		OSFreeMem(paui32Server3DSyncFlagsPR);
+	}
+fail_prserversyncflagsallocpr:
+fail_taacquirecmd:
+fail_tacmdinit:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_rollback_append_fences(psFDData);
+	pvr_sync_free_append_fences_data(psFDData);
+fail_fdsync:
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+												 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+												 IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+									psConnection,
+									psRenderContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_TA);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the TA part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_tacontext;
+		}
+		psRenderContext->sTAData.ui32Priority = ui32Priority;
+	}
+
+	if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+									psConnection,
+									psRenderContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_3D);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_3dcontext;
+		}
+		psRenderContext->s3DData.ui32Priority = ui32Priority;
+	}
+	return PVRSRV_OK;
+
+fail_3dcontext:
+fail_tacontext:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+/*
+ * PVRSRVRGXGetLastResetReason
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+                                                        IMG_UINT32 *peLastResetReason)
+{
+	RGX_SERVER_RC_TA_DATA         *psRenderCtxTAData = &(psRenderContext->sTAData);
+	RGX_SERVER_COMMON_CONTEXT     *psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+	RGX_SERVER_RC_3D_DATA         *psRenderCtx3DData = &(psRenderContext->s3DData);
+	RGX_SERVER_COMMON_CONTEXT     *psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+	RGXFWIF_CONTEXT_RESET_REASON  eLastTAResetReason, eLast3DResetReason;
+	
+	PVR_ASSERT(psRenderContext != IMG_NULL);
+	PVR_ASSERT(peLastResetReason != IMG_NULL);
+	
+	/* Get the last reset reasons from both the TA and 3D so they are reset... */
+	eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx);
+	eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx);
+
+	/* Combine the reset reason from TA and 3D into one... */
+	*peLastResetReason = (IMG_UINT32) eLast3DResetReason;
+	if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE  ||
+	    ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP  ||
+	      eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING)  &&
+	     (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP  ||
+	      eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)))
+	{
+		*peLastResetReason = (IMG_UINT32) eLastTAResetReason;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXGetPartialRenderCountKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+											  IMG_UINT32 *pui32NumPartialRenders)
+{
+	RGXFWIF_HWRTDATA *psHWRTData;
+	PVRSRV_ERROR eError;
+
+	eError = DevmemAcquireCpuVirtAddr(psHWRTDataMemDesc, (IMG_VOID **)&psHWRTData);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXGetPartialRenderCountKM: Failed to map Firmware Render Target Data (%u)", eError));
+		return eError;
+	}
+
+	*pui32NumPartialRenders = psHWRTData->ui32NumPartialRenders;
+
+	DevmemReleaseCpuVirtAddr(psHWRTDataMemDesc);
+
+	return PVRSRV_OK;
+}
+
+
+static IMG_BOOL CheckForStalledRenderCtxtCommand(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	RGX_SERVER_RENDER_CONTEXT 		*psCurrentServerRenderCtx = IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+	RGX_SERVER_RC_TA_DATA			*psRenderCtxTAData = &(psCurrentServerRenderCtx->sTAData);
+	RGX_SERVER_COMMON_CONTEXT		*psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+	RGX_SERVER_RC_3D_DATA			*psRenderCtx3DData = &(psCurrentServerRenderCtx->s3DData);
+	RGX_SERVER_COMMON_CONTEXT		*psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = pvCallbackData;
+
+
+	DumpStalledFWCommonContext(psCurrentServerTACommonCtx, pfnDumpDebugPrintf);
+	DumpStalledFWCommonContext(psCurrentServer3DCommonCtx, pfnDumpDebugPrintf);
+
+	return IMG_TRUE;
+}
+IMG_VOID CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+								   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+	dllist_foreach_node(&(psDevInfo->sRenderCtxtListHead),
+						CheckForStalledRenderCtxtCommand, pfnDumpDebugPrintf);
+	OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+static IMG_BOOL CheckForStalledClientRenderCtxtCommand(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	PVRSRV_ERROR *peError = (PVRSRV_ERROR*)pvCallbackData;
+	RGX_SERVER_RENDER_CONTEXT 		*psCurrentServerRenderCtx = IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+	RGX_SERVER_RC_TA_DATA			*psRenderCtxTAData = &(psCurrentServerRenderCtx->sTAData);
+	RGX_SERVER_COMMON_CONTEXT		*psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+	RGX_SERVER_RC_3D_DATA			*psRenderCtx3DData = &(psCurrentServerRenderCtx->s3DData);
+	RGX_SERVER_COMMON_CONTEXT		*psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+
+	if (PVRSRV_ERROR_CCCB_STALLED == CheckStalledClientCommonContext(psCurrentServerTACommonCtx))
+	{
+		*peError = PVRSRV_ERROR_CCCB_STALLED;
+	}
+	if (PVRSRV_ERROR_CCCB_STALLED == CheckStalledClientCommonContext(psCurrentServer3DCommonCtx))
+	{
+		*peError = PVRSRV_ERROR_CCCB_STALLED;
+	}
+
+	return IMG_TRUE;
+}
+IMG_BOOL CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+	dllist_foreach_node(&(psDevInfo->sRenderCtxtListHead),
+						CheckForStalledClientRenderCtxtCommand, &eError);
+	OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+	return (PVRSRV_ERROR_CCCB_STALLED == eError)? IMG_TRUE: IMG_FALSE;
+}
+
+IMG_EXPORT PVRSRV_ERROR 
+PVRSRVRGXKickSyncTAKM(RGX_SERVER_RENDER_CONTEXT  *psRenderContext,
+                       IMG_UINT32                  ui32TAClientFenceCount,
+                       PRGXFWIF_UFO_ADDR           *pauiTAClientFenceUFOAddress,
+                       IMG_UINT32                  *paui32TAClientFenceValue,
+                       IMG_UINT32                  ui32TAClientUpdateCount,
+                       PRGXFWIF_UFO_ADDR           *pauiTAClientUpdateUFOAddress,
+                       IMG_UINT32                  *paui32TAClientUpdateValue,
+                       IMG_UINT32                  ui32TAServerSyncPrimsCount,
+                       IMG_UINT32                  *paui32TAServerSyncFlags,
+                       SERVER_SYNC_PRIMITIVE       **pasTAServerSyncs,
+					   IMG_UINT32                  ui323DClientFenceCount,
+					   PRGXFWIF_UFO_ADDR           *paui3DClientFenceUFOAddress,
+					   IMG_UINT32                  *paui323DClientFenceValue,
+					   IMG_UINT32                  ui323DClientUpdateCount,
+					   PRGXFWIF_UFO_ADDR           *paui3DClientUpdateUFOAddress,
+					   IMG_UINT32                  *paui323DClientUpdateValue,
+					   IMG_UINT32                  ui323DServerSyncPrimsCount,
+					   IMG_UINT32                  *paui323DServerSyncFlags,
+					   SERVER_SYNC_PRIMITIVE       **pas3DServerSyncs,
+					   IMG_UINT32				   ui32NumFenceFDs,
+					   IMG_INT32				   *pai32FenceFDs,
+					   IMG_INT32                   i32UpdateFenceFD,
+                       IMG_BOOL                    bPDumpContinuous)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	/* Android fd sync update info */
+	struct pvr_sync_append_data *psFDData = NULL;
+
+	/* Android FD fences are hardcoded to updates (IMG_TRUE below), Fences go to the TA and updates to the 3D */
+	if (ui32NumFenceFDs || i32UpdateFenceFD >= 0)
+	{
+		eError =
+		pvr_sync_append_fences("TA",
+									  ui32NumFenceFDs,
+									  pai32FenceFDs,
+									  i32UpdateFenceFD,
+									  ui323DClientUpdateCount,
+									  paui3DClientUpdateUFOAddress,
+									  paui323DClientUpdateValue,
+									  ui323DClientFenceCount,
+									  paui3DClientFenceUFOAddress,
+									  paui323DClientFenceValue,
+									  &psFDData);
+		if (eError != PVRSRV_OK)
+		{
+		    goto fail_fdsync;
+		}
+		ui323DClientUpdateCount = psFDData->nr_updates;
+		paui3DClientUpdateUFOAddress = psFDData->update_ufo_addresses;
+		paui323DClientUpdateValue = psFDData->update_values;
+		ui323DClientFenceCount = psFDData->nr_checks;
+		paui3DClientFenceUFOAddress = psFDData->check_ufo_addresses;
+		paui323DClientFenceValue = psFDData->check_values;
+	}
+#endif
+
+	/* send one command through the TA */
+	if ((ui32TAClientFenceCount + ui32TAClientUpdateCount + ui32TAServerSyncPrimsCount) > 0)
+	{
+		eError = RGXKickSyncKM(psRenderContext->psDeviceNode,
+		                     psRenderContext->sTAData.psServerCommonContext,
+		                     RGXFWIF_DM_TA,
+							 "SyncTA",
+		                     ui32TAClientFenceCount,
+		                     pauiTAClientFenceUFOAddress,
+		                     paui32TAClientFenceValue,
+		                     ui32TAClientUpdateCount,
+		                     pauiTAClientUpdateUFOAddress,
+		                     paui32TAClientUpdateValue,
+		                     ui32TAServerSyncPrimsCount,
+		                     paui32TAServerSyncFlags,
+		                     pasTAServerSyncs,
+		                     bPDumpContinuous);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send TA sync command (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_kickTA;
+		}
+	}
+
+	/* and another one through the 3D */
+	if ((ui323DClientFenceCount + ui323DClientUpdateCount + ui323DServerSyncPrimsCount) > 0)
+	{
+		eError = RGXKickSyncKM(psRenderContext->psDeviceNode,
+		                     psRenderContext->s3DData.psServerCommonContext,
+		                     RGXFWIF_DM_3D,
+							 "Sync3D",
+		                     ui323DClientFenceCount,
+		                     paui3DClientFenceUFOAddress,
+		                     paui323DClientFenceValue,
+		                     ui323DClientUpdateCount,
+		                     paui3DClientUpdateUFOAddress,
+		                     paui323DClientUpdateValue,
+		                     ui323DServerSyncPrimsCount,
+		                     paui323DServerSyncFlags,
+		                     pas3DServerSyncs,
+		                     bPDumpContinuous);
+
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send 3D sync command (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_kick3D;
+		}
+	}
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDData);
+#endif
+	pvr_sync_free_append_fences_data(psFDData);
+#endif
+
+	return eError;
+
+fail_kick3D:
+fail_kickTA:
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_rollback_append_fences(psFDData);
+	pvr_sync_free_append_fences_data(psFDData);
+fail_fdsync:
+#endif
+
+	return eError;
+}
+/******************************************************************************
+ End of file (rgxta3d.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxta3d.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxta3d.h
new file mode 100644
index 0000000..6cf4af4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxta3d.h
@@ -0,0 +1,470 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX TA and 3D Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX TA and 3D Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTA3D_H__)
+#define __RGXTA3D_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+typedef struct {
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psFWHWRTDataMemDesc;
+	DEVMEM_MEMDESC			*psRTACtlMemDesc;
+	DEVMEM_MEMDESC			*psRTArrayMemDesc;
+	DEVMEM_MEMDESC          	*psRendersAccArrayMemDesc;
+	RGX_FREELIST 			*apsFreeLists[RGXFW_MAX_FREELISTS];
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+} RGX_RTDATA_CLEANUP_DATA;
+
+struct _RGX_FREELIST_ {
+	PVRSRV_RGXDEV_INFO 		*psDevInfo;
+
+	/* Free list PMR */
+	PMR						*psFreeListPMR;
+	IMG_DEVMEM_OFFSET_T		uiFreeListPMROffset;
+
+	/* Freelist config */
+	IMG_UINT32				ui32MaxFLPages;
+	IMG_UINT32				ui32InitFLPages;
+	IMG_UINT32				ui32CurrentFLPages;
+	IMG_UINT32				ui32GrowFLPages;
+	IMG_UINT32				ui32FreelistID;
+	IMG_UINT64				ui64FreelistChecksum;	/* checksum over freelist content */
+	IMG_BOOL				bCheckFreelist;			/* freelist check enabled */
+	IMG_UINT32				ui32RefCount;			/* freelist reference counting */
+
+	IMG_UINT32				ui32NumGrowReqByApp;	/* Total number of grow requests by Application*/
+	IMG_UINT32				ui32NumGrowReqByFW;		/* Total Number of grow requests by Firmware */
+	IMG_UINT32				ui32NumHighPages;		/* High Mark of pages in the freelist */
+
+	IMG_PID					ownerPid;			/* Pid of the owner of the list */
+
+	/* Memory Blocks */
+	DLLIST_NODE				sMemoryBlockHead;
+	DLLIST_NODE				sMemoryBlockInitHead;
+	DLLIST_NODE				sNode;
+
+	/* FW data structures */
+	DEVMEM_MEMDESC			*psFWFreelistMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sFreeListFWDevVAddr;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+} ;
+
+struct _RGX_PMR_NODE_ {
+	RGX_FREELIST			*psFreeList;
+	PMR						*psPMR;
+	PMR_PAGELIST 			*psPageList;
+	DLLIST_NODE				sMemoryBlock;
+	IMG_UINT32				ui32NumPages;
+	IMG_BOOL				bInternal;
+} ;
+
+typedef struct {
+	PVRSRV_DEVICE_NODE		*psDeviceNode;
+	DEVMEM_MEMDESC			*psRenderTargetMemDesc;
+} RGX_RT_CLEANUP_DATA;
+
+typedef struct {
+	PVRSRV_RGXDEV_INFO		*psDevInfo;
+	DEVMEM_MEMDESC			*psZSBufferMemDesc;
+	RGXFWIF_DEV_VIRTADDR	sZSBufferFWDevVAddr;
+
+	DEVMEMINT_RESERVATION 	*psReservation;
+	PMR 					*psPMR;
+	DEVMEMINT_MAPPING 		*psMapping;
+	PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags;
+	IMG_UINT32 				ui32ZSBufferID;
+	IMG_UINT32 				ui32RefCount;
+	IMG_BOOL				bOnDemand;
+
+	IMG_BOOL				ui32NumReqByApp;		/* Number of Backing Requests from  Application */
+	IMG_BOOL				ui32NumReqByFW;			/* Number of Backing Requests from Firmware */
+
+	IMG_PID					owner;
+
+	DLLIST_NODE	sNode;
+
+	PVRSRV_CLIENT_SYNC_PRIM	*psCleanupSync;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+	RGX_ZSBUFFER_DATA		*psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+/* Create HWRTDataSet */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateHWRTData(PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							   IMG_UINT32			psRenderTarget,
+							   IMG_DEV_VIRTADDR		psPMMListDevVAddr,
+							   IMG_DEV_VIRTADDR		psVFPPageTableAddr,
+							   RGX_FREELIST			*apsFreeLists[RGXFW_MAX_FREELISTS],
+							   RGX_RTDATA_CLEANUP_DATA	**ppsCleanupData,
+							   DEVMEM_MEMDESC			**ppsRTACtlMemDesc,
+							   IMG_UINT32           ui32PPPScreen,
+							   IMG_UINT32           ui32PPPGridOffset,
+							   IMG_UINT64           ui64PPPMultiSampleCtl,
+							   IMG_UINT32           ui32TPCStride,
+							   IMG_DEV_VIRTADDR		sTailPtrsDevVAddr,
+							   IMG_UINT32           ui32TPCSize,
+							   IMG_UINT32           ui32TEScreen,
+							   IMG_UINT32           ui32TEAA,
+							   IMG_UINT32           ui32TEMTILE1,
+							   IMG_UINT32           ui32TEMTILE2,
+							   IMG_UINT32           ui32MTileStride,
+							   IMG_UINT32                 ui32ISPMergeLowerX,
+							   IMG_UINT32                 ui32ISPMergeLowerY,
+							   IMG_UINT32                 ui32ISPMergeUpperX,
+							   IMG_UINT32                 ui32ISPMergeUpperY,
+							   IMG_UINT32                 ui32ISPMergeScaleX,
+							   IMG_UINT32                 ui32ISPMergeScaleY,
+							   IMG_UINT16			ui16MaxRTs,
+							   DEVMEM_MEMDESC		**psMemDesc,
+							   IMG_UINT32			*puiHWRTData);
+
+/* Destroy HWRTData */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData);
+
+/* Create Render Target */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateRenderTarget(PVRSRV_DEVICE_NODE	*psDeviceNode,
+								   IMG_DEV_VIRTADDR		psVHeapTableDevVAddr,
+								   RGX_RT_CLEANUP_DATA	**ppsCleanupData,
+								   IMG_UINT32			*sRenderTargetFWDevVAddr);
+
+/* Destroy render target */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData);
+
+
+/*
+	RGXCreateZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateZSBufferKM(PVRSRV_DEVICE_NODE				*psDeviceNode,
+								DEVMEMINT_RESERVATION 	*psReservation,
+								PMR 					*psPMR,
+								PVRSRV_MEMALLOCFLAGS_T 	uiMapFlags,
+								RGX_ZSBUFFER_DATA		 	**ppsZSBuffer,
+								IMG_UINT32					*sRenderTargetFWDevVAddr);
+
+/*
+	RGXDestroyZSBuffer
+*/
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+									RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls )
+ */
+IMG_EXPORT
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+	RGXProcessRequestZSBufferBacking
+*/
+IMG_EXPORT
+IMG_VOID RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+										IMG_UINT32 ui32ZSBufferID);
+
+/*
+	RGXProcessRequestZSBufferUnbacking
+*/
+IMG_EXPORT
+IMG_VOID RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+										IMG_UINT32 ui32ZSBufferID);
+
+/*
+	RGXGrowFreeList
+*/
+IMG_INTERNAL
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+									IMG_UINT32 ui32NumPages,
+									PDLLIST_NODE pListHeader);
+
+/* Create free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXCreateFreeList(PVRSRV_DEVICE_NODE	*psDeviceNode, 
+							   IMG_UINT32			ui32MaxFLPages,
+							   IMG_UINT32			ui32InitFLPages,
+							   IMG_UINT32			ui32GrowFLPages,
+							   IMG_BOOL				bCheckFreelist,
+							   IMG_DEV_VIRTADDR		sFreeListDevVAddr,
+							   PMR					*psFreeListPMR,
+							   IMG_DEVMEM_OFFSET_T	uiFreeListPMROffset,
+							   RGX_FREELIST			**ppsFreeList);
+
+/* Destroy free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+	RGXProcessRequestGrow
+*/
+IMG_EXPORT
+IMG_VOID RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+								IMG_UINT32 ui32FreelistID);
+
+
+/* Grow free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXAddBlockToFreeListKM(RGX_FREELIST *psFreeList,
+										IMG_UINT32 ui32NumPages);
+
+/* Shrink free list */
+IMG_EXPORT
+PVRSRV_ERROR RGXRemoveBlockFromFreeListKM(RGX_FREELIST *psFreeList);
+
+
+/* Reconstruct free list after Hardware Recovery */
+IMG_VOID RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+								RGXFWIF_DM eDM,
+								IMG_UINT32 ui32FreelistsCount,
+								IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateRenderContextKM
+
+ @Description
+	Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psTACCBMemDesc - TA CCB Memory descriptor
+ @Input psTACCBCtlMemDesc - TA CCB Ctrl Memory descriptor
+ @Input ps3DCCBMemDesc - 3D CCB Memory descriptor
+ @Input ps3DCCBCtlMemDesc - 3D CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input sMCUFenceAddr - MCU Fence device virtual address
+ @Input psVDMStackPointer - VDM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRenderContextMemDesc - firmware render context memory descriptor
+ @Output ppsFWContextStateMemDesc - firmware context state memory descriptor
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA				*psConnection,
+											PVRSRV_DEVICE_NODE			*psDeviceNode,
+											IMG_UINT32					ui32Priority,
+											IMG_DEV_VIRTADDR			sMCUFenceAddr,
+											IMG_DEV_VIRTADDR			sVDMCallStackAddr,
+											IMG_UINT32					ui32FrameworkCommandSize,
+											IMG_PBYTE					pabyFrameworkCommand,
+											IMG_HANDLE					hMemCtxPrivData,
+											RGX_SERVER_RENDER_CONTEXT	**ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyRenderContext
+
+ @Input psCleanupData - clean up data
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXKickTA3DKM
+
+ @Description
+	Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+								 IMG_UINT32					ui32ClientTAFenceCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClientTAFenceUFOAddress,
+								 IMG_UINT32					*paui32ClientTAFenceValue,
+								 IMG_UINT32					ui32ClientTAUpdateCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClientUpdateTAUFOAddress,
+								 IMG_UINT32					*paui32ClientTAUpdateValue,
+								 IMG_UINT32					ui32ServerTASyncPrims,
+								 IMG_UINT32					*paui32ServerTASyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServerTASyncs,
+								 IMG_UINT32					ui32Client3DFenceCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClient3DFenceUFOAddress,
+								 IMG_UINT32					*paui32Client3DFenceValue,
+								 IMG_UINT32					ui32Client3DUpdateCount,
+								 PRGXFWIF_UFO_ADDR			*pauiClientUpdate3DUFOAddress,
+								 IMG_UINT32					*paui32Client3DUpdateValue,
+								 IMG_UINT32					ui32Server3DSyncPrims,
+								 IMG_UINT32					*paui32Server3DSyncFlags,
+								 SERVER_SYNC_PRIMITIVE 		**pasServer3DSyncs,
+								 PRGXFWIF_UFO_ADDR			uiPRFenceUFOAddress,
+								 IMG_UINT32					ui32PRFenceValue,
+								 IMG_UINT32					ui32NumCheckFenceFDs,
+								 IMG_INT32					*pai32CheckFenceFDs,
+								 IMG_INT32                  i32UpdateFenceFD,
+								 IMG_UINT32					ui32TACmdSize,
+								 IMG_PBYTE					pui8TADMCmd,
+								 IMG_UINT32					ui323DPRCmdSize,
+								 IMG_PBYTE					pui83DPRDMCmd,
+								 IMG_UINT32					ui323DCmdSize,
+								 IMG_PBYTE					pui83DDMCmd,
+								 IMG_UINT32					TAFrameNum,
+								 IMG_UINT32					TARTData,
+								 IMG_BOOL					bLastTAInScene,
+								 IMG_BOOL					bKickTA,
+								 IMG_BOOL					bKickPR,
+								 IMG_BOOL					bKick3D,
+								 IMG_BOOL					bAbort,
+								 IMG_BOOL					bPDumpContinuous,
+								 RGX_RTDATA_CLEANUP_DATA        *psRTDataCleanup,
+								 RGX_ZSBUFFER_DATA              *psZBuffer,
+								 RGX_ZSBUFFER_DATA               *psSBuffer,
+								 IMG_BOOL						bCommitRefCountsTA,
+								 IMG_BOOL						bCommitRefCounts3D,
+								 IMG_BOOL						*pbCommittedRefCountsTA,
+								 IMG_BOOL						*pbCommittedRefCounts3D);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+												 RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+												 IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT	*psRenderContext,
+                                                        IMG_UINT32 *peLastResetReason);
+
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+											  IMG_UINT32 *pui32NumPartialRenders);
+
+/* Debug - check if render context is waiting on a fence */
+IMG_VOID CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+								   DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_BOOL CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+IMG_EXPORT PVRSRV_ERROR 
+PVRSRVRGXKickSyncTAKM(RGX_SERVER_RENDER_CONTEXT  *psRenderContext,
+                       IMG_UINT32                  ui32TAClientFenceCount,
+                       PRGXFWIF_UFO_ADDR           *pauiTAClientFenceUFOAddress,
+                       IMG_UINT32                  *paui32TAClientFenceValue,
+                       IMG_UINT32                  ui32TAClientUpdateCount,
+                       PRGXFWIF_UFO_ADDR           *pauiTAClientUpdateUFOAddress,
+                       IMG_UINT32                  *paui32TAClientUpdateValue,
+                       IMG_UINT32                  ui32TAServerSyncPrims,
+                       IMG_UINT32                  *paui32TAServerSyncFlags,
+                       SERVER_SYNC_PRIMITIVE       **pasTAServerSyncs,
+					   IMG_UINT32                  ui323DClientFenceCount,
+					   PRGXFWIF_UFO_ADDR           *paui3DClientFenceUFOAddress,
+					   IMG_UINT32                  *paui323DClientFenceValue,
+					   IMG_UINT32                  ui323DClientUpdateCount,
+					   PRGXFWIF_UFO_ADDR           *paui3DClientUpdateUFOAddress,
+					   IMG_UINT32                  *paui323DClientUpdateValue,
+					   IMG_UINT32                  ui323DServerSyncPrims,
+					   IMG_UINT32                  *paui323DServerSyncFlags,
+					   SERVER_SYNC_PRIMITIVE       **pas3DServerSyncs,
+					   IMG_UINT32				   ui32NumFenceFDs,
+					   IMG_INT32				   *pai32FenceFDs,
+					   IMG_INT32                   i32UpdateFenceFD,
+                       IMG_BOOL                    bPDumpContinuous);
+
+#endif /* __RGXTA3D_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimecorr.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimecorr.c
new file mode 100644
index 0000000..567f2f6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimecorr.c
@@ -0,0 +1,306 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific time correlation and calibration routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimecorr.h"
+#include "rgxfwutils.h"
+
+/******************************************************************************
+ *
+ * - A calibration period is started on power-on and after a DVFS transition,
+ *   and it's closed before a power-off and before a DVFS transition
+ *   (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs...,
+ *   where each arrow is a calibration period)
+ *
+ * - The timers on the Host and on the FW are correlated at the beginning of
+ *   each period together with the (possibly calibrated) current GPU frequency
+ *
+ * - If the frequency has not changed since the last power-off/on sequence or
+ *   before/after a DVFS transition (-> the transition didn't really happen)
+ *   then multiple consecutive periods are merged (the higher the numbers the
+ *   better the accuracy in the computed clock speed)
+ *
+ * - Correlation and calibration are also done more or less periodically
+ *   (using a best effort approach)
+ *
+ *****************************************************************************/
+
+static IMG_VOID _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO    *psDevInfo     = psDeviceNode->pvDevice;
+	RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+	RGX_GPU_DVFS_TABLE    *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	RGXFWIF_TIME_CORR     *psTimeCorr;
+	IMG_UINT32            ui32NewSeqCount;
+	IMG_UINT32            ui32CoreClockSpeed;
+	IMG_UINT32            ui32Remainder;
+
+	ui32CoreClockSpeed = psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId];
+
+	ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1;
+	psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)];
+
+	psTimeCorr->ui64CRTimeStamp    = RGXReadHWTimerReg(psDevInfo);
+	psTimeCorr->ui64OSTimeStamp    = OSClockns64();
+	psTimeCorr->ui32CoreClockSpeed = ui32CoreClockSpeed;
+	psTimeCorr->ui32CRDeltaToOSDeltaKNs =
+	    RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(ui32CoreClockSpeed, ui32Remainder);
+
+	/* Make sure the values are written to memory before updating the index of the current entry */
+	OSWriteMemoryBarrier();
+
+	/* Update the index of the current entry in the timer correlation array */
+	psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount;
+
+	PVR_DPF((PVR_DBG_MESSAGE,"RGXMakeTimeCorrData: Correlated OS timestamp %llu (ns) with CR timestamp %llu, GPU clock speed %uHz",
+	         psTimeCorr->ui64OSTimeStamp, psTimeCorr->ui64CRTimeStamp, psTimeCorr->ui32CoreClockSpeed));
+}
+
+
+static IMG_VOID _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo         = psDeviceNode->pvDevice;
+	RGX_DATA           *psRGXData         = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	IMG_UINT32         ui32CoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+	IMG_UINT32         ui32Index          = RGX_GPU_DVFS_GET_INDEX(ui32CoreClockSpeed);
+
+	IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+	IMG_UINT64 ui64OSTimestamp = OSClockus64();
+
+	psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp;
+	psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp;
+
+	/* Set the time needed to (re)calibrate the GPU frequency */
+	if((psGpuDVFSTable->aui32DVFSClock[ui32Index] == 0) ||                /* We never met this frequency */
+	   (psGpuDVFSTable->aui32DVFSClock[ui32Index] == ui32CoreClockSpeed)) /* We weren't able to calibrate this frequency previously */
+	{
+		psGpuDVFSTable->aui32DVFSClock[ui32Index] = ui32CoreClockSpeed;
+		psGpuDVFSTable->ui32CalibrationPeriod     = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "RGXGPUFreqCalibrationStart: using uncalibrated GPU frequency %u", ui32CoreClockSpeed));
+	}
+	else if(psGpuDVFSTable->ui32CalibrationPeriod == RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US)
+	{
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US;
+	}
+	else
+	{
+		psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US;
+	}
+
+	/* Update the index to the DVFS table */
+	psGpuDVFSTable->ui32CurrentDVFSId = ui32Index;
+}
+
+
+static IMG_VOID _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+	IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+	IMG_UINT64 ui64OSTimestamp = OSClockus64();
+
+	if(!psGpuDVFSTable->bAccumulatePeriod)
+	{
+		psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+		psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+	}
+
+	psGpuDVFSTable->ui64CalibrationCRTimediff += ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp;
+	psGpuDVFSTable->ui64CalibrationOSTimediff += ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp;
+}
+
+
+static IMG_UINT32 _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+	IMG_UINT32 ui32CalibratedClockSpeed;
+	IMG_UINT32 ui32Remainder;
+
+	ui32CalibratedClockSpeed =
+	    RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff, psGpuDVFSTable->ui64CalibrationOSTimediff, ui32Remainder);
+
+	PVR_DPF((PVR_DBG_MESSAGE, "GPU frequency calibration: %u -> %u done over %llu us",
+	         psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId],
+	         ui32CalibratedClockSpeed,
+	         psGpuDVFSTable->ui64CalibrationOSTimediff));
+
+	psGpuDVFSTable->aui32DVFSClock[psGpuDVFSTable->ui32CurrentDVFSId] = ui32CalibratedClockSpeed;
+
+	return ui32CalibratedClockSpeed;
+}
+
+
+/*
+	RGXGPUFreqCalibratePrePowerState
+*/
+IMG_VOID RGXGPUFreqCalibratePrePowerState(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+	if(psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+	{
+		_RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+	}
+}
+
+
+/*
+	RGXGPUFreqCalibratePostPowerState
+*/
+IMG_VOID RGXGPUFreqCalibratePostPowerState(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode      = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo         = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable    = psDevInfo->psGpuDVFSTable;
+	RGX_DATA            *psRGXData         = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+	IMG_UINT32          ui32CoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+	/* If the frequency hasn't changed then accumulate the time diffs to get a better result */
+	psGpuDVFSTable->bAccumulatePeriod = (RGX_GPU_DVFS_GET_INDEX(ui32CoreClockSpeed) == psGpuDVFSTable->ui32CurrentDVFSId);
+
+	_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+	/* Update the timer correlation data */
+	_RGXMakeTimeCorrData(psDeviceNode);
+}
+
+
+/*
+	RGXGPUFreqCalibratePreClockSpeedChange
+*/
+IMG_VOID RGXGPUFreqCalibratePreClockSpeedChange(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+	/* Wait until RGXPostClockSpeedChange() to do anything as the GPU frequency may be left
+	 * unchanged (in that case we delay calibration/correlation to get a better result later) */
+}
+
+
+/*
+	RGXGPUFreqCalibratePostClockSpeedChange
+*/
+IMG_UINT32 RGXGPUFreqCalibratePostClockSpeedChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32NewClockSpeed)
+{
+	PVRSRV_DEVICE_NODE  *psDeviceNode          = hDevHandle;
+	PVRSRV_RGXDEV_INFO  *psDevInfo             = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE  *psGpuDVFSTable        = psDevInfo->psGpuDVFSTable;
+	IMG_UINT32          ui32ReturnedClockSpeed = ui32NewClockSpeed;
+
+	if(RGX_GPU_DVFS_GET_INDEX(ui32NewClockSpeed) != psGpuDVFSTable->ui32CurrentDVFSId)
+	{
+		/* Only calibrate if the last period was long enough */
+		if(psGpuDVFSTable->ui64CalibrationOSTimediff >= RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US)
+		{
+			ui32ReturnedClockSpeed = _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+		}
+
+		_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+
+		/* Update the timer correlation data */
+		_RGXMakeTimeCorrData(psDeviceNode);
+
+		psGpuDVFSTable->bAccumulatePeriod = IMG_FALSE;
+	}
+	else
+	{
+		psGpuDVFSTable->bAccumulatePeriod = IMG_TRUE;
+	}
+
+	return ui32ReturnedClockSpeed;
+}
+
+
+/*
+	RGXGPUFreqCalibrateCorrelatePeriodic
+*/
+IMG_VOID RGXGPUFreqCalibrateCorrelatePeriodic(IMG_HANDLE hDevHandle)
+{
+	PVRSRV_DEVICE_NODE     *psDeviceNode   = hDevHandle;
+	PVRSRV_RGXDEV_INFO     *psDevInfo      = psDeviceNode->pvDevice;
+	RGX_GPU_DVFS_TABLE     *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+	PVRSRV_DATA            *psPVRSRVData;
+	IMG_UINT64             ui64TimeNow     = OSClockus64();
+	PVRSRV_DEV_POWER_STATE ePowerState;
+
+	/* Check if it's the right time to recalibrate the GPU clock frequency */
+	if((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return;
+
+	/* Try to acquire the powerlock, if not possible then don't wait */
+	psPVRSRVData     = PVRSRVGetPVRSRVData();
+	if (OSLockIsLocked(psPVRSRVData->hPowerLock)) return; /* Better to not wait here if possible */
+	/* There's a chance that the powerlock could be taken here, it's not that bad even if not desirable
+	   (TODO use OSTryLockAcquire, currently implemented under Linux only) */
+	if (PVRSRVPowerLock() != PVRSRV_OK) return;
+
+	/* If the GPU is off then we can't do anything */
+	PVRSRVGetDevicePowerState(psDeviceNode->sDevId.ui32DeviceIndex, &ePowerState);
+	if (ePowerState != PVRSRV_DEV_POWER_STATE_ON)
+	{
+		PVRSRVPowerUnlock();
+		return;
+	}
+
+	/* All checks passed, we can calibrate and correlate */
+	_RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+	_RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable);
+	_RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+	_RGXMakeTimeCorrData(psDeviceNode);
+
+	/* Force Accumulate Period to false to not trigger a periodic calibration over and over again */
+	psGpuDVFSTable->bAccumulatePeriod = IMG_FALSE;
+
+	PVRSRVPowerUnlock();
+}
+
+
+/******************************************************************************
+ End of file (rgxtimecorr.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimecorr.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimecorr.h
new file mode 100644
index 0000000..da40189
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimecorr.h
@@ -0,0 +1,125 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX time correlation and calibration header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX time correlation and calibration routines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTIMECORR_H__)
+#define __RGXTIMECORR_H__
+
+#include "img_types.h"
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePrePowerState
+
+ @Description Manage GPU frequency and timer correlation data
+              before a power off.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXGPUFreqCalibratePrePowerState(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePostPowerState
+
+ @Description Manage GPU frequency and timer correlation data
+              after a power on.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXGPUFreqCalibratePostPowerState(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePreClockSpeedChange
+
+ @Description Manage GPU frequency and timer correlation data
+              before a DVFS transition.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXGPUFreqCalibratePreClockSpeedChange(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePostClockSpeedChange
+
+ @Description Manage GPU frequency and timer correlation data
+              after a DVFS transition.
+
+ @Input       hDevHandle        : RGX Device Node
+ @Input       ui32NewClockSpeed : GPU clock speed after the DVFS transition
+
+ @Return      IMG_UINT32 : Calibrated GPU clock speed after the DVFS transition
+
+******************************************************************************/
+IMG_UINT32 RGXGPUFreqCalibratePostClockSpeedChange(IMG_HANDLE hDevHandle, IMG_UINT32 ui32NewClockSpeed);
+
+/*!
+******************************************************************************
+
+ @Function    RGXGPUFreqCalibratePeriodic
+
+ @Description Calibrate the GPU clock speed and correlate the timers
+              at regular intervals.
+
+ @Input       hDevHandle : RGX Device Node
+
+ @Return      IMG_VOID
+
+******************************************************************************/
+IMG_VOID RGXGPUFreqCalibrateCorrelatePeriodic(IMG_HANDLE hDevHandle);
+
+#endif /* __RGXTIMECORR_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimerquery.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimerquery.c
new file mode 100644
index 0000000..2d822cc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimerquery.c
@@ -0,0 +1,243 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Timer queries
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    RGX Timer queries
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimerquery.h"
+#include "rgxdevice.h"
+
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+                           IMG_UINT32         ui32QueryId)
+{
+	PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	psDevInfo->bSaveStart = IMG_TRUE;
+	psDevInfo->bSaveEnd   = IMG_TRUE;
+
+	/* clear the stamps, in case there is no Kick */
+	psDevInfo->pasStartTimeById[ui32QueryId].ui64Timestamp = 0UL;
+	psDevInfo->pasEndTimeById[ui32QueryId].ui64Timestamp   = 0UL;
+
+	/* save of the active query index */
+	psDevInfo->ui32ActiveQueryId = ui32QueryId;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+	PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+	/* clear off the flags set by Begin(). Note that _START_TIME is
+	 * probably already cleared by Kick()
+	 */
+	psDevInfo->bSaveStart = IMG_FALSE;
+	psDevInfo->bSaveEnd   = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+                      IMG_UINT32         ui32QueryId,
+                      IMG_UINT64         * pui64StartTime,
+                      IMG_UINT64         * pui64EndTime)
+{
+	PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+	IMG_UINT32         ui32Scheduled;
+	IMG_UINT32         ui32Completed;
+
+	if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId];
+	ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId];
+
+	/* if there was no kick since the Begin() on this id we return 0-s as Begin cleared
+	 * the stamps. If there was no begin the returned data is undefined - but still
+	 * safe from services pov
+	 */
+	if (ui32Completed >= ui32Scheduled)
+	{
+		RGXFWIF_TIMESTAMP * psTimestamp;
+		RGXFWIF_TIME_CORR * psTimeCorr;
+		IMG_UINT64        ui64CRTimeDiff;
+
+		psTimestamp = &psDevInfo->pasStartTimeById[ui32QueryId];
+
+		/* If the start time is 0 then don't attempt to compute the absolute
+		 * timestamp, it could end up with a division by zero.
+		 * Not necessary to repeat the check on the end time, when we enter
+		 * this case the time has been updated by the Firmware.
+		 */
+		if(psTimestamp->ui64Timestamp == 0)
+		{
+			* pui64StartTime = 0;
+			* pui64EndTime = 0;
+			return PVRSRV_OK;
+		}
+
+		psTimeCorr       = &psTimestamp->sTimeCorr;
+		ui64CRTimeDiff   = psTimestamp->ui64Timestamp - psTimeCorr->ui64CRTimeStamp;
+		* pui64StartTime = psTimeCorr->ui64OSTimeStamp +
+		                   RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff, psTimeCorr->ui32CRDeltaToOSDeltaKNs);
+
+		psTimestamp      = &psDevInfo->pasEndTimeById[ui32QueryId];
+		psTimeCorr       = &psTimestamp->sTimeCorr;
+		ui64CRTimeDiff   = psTimestamp->ui64Timestamp - psTimeCorr->ui64CRTimeStamp;
+		* pui64EndTime   = psTimeCorr->ui64OSTimeStamp +
+		                   RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff, psTimeCorr->ui32CRDeltaToOSDeltaKNs);
+
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+	*pui64Time = OSClockns64();
+
+	return PVRSRV_OK;
+}
+
+
+
+/******************************************************************************
+ NOT BRIDGED/EXPORTED FUNCS
+******************************************************************************/
+/* writes a time stamp command in the client CCB */
+IMG_VOID
+RGXWriteTimestampCommand(IMG_PBYTE            * ppbyPtr,
+                         RGXFWIF_CCB_CMD_TYPE eCmdType,
+                         RGXFWIF_DEV_VIRTADDR pTimestamp)
+{
+	RGXFWIF_CCB_CMD_HEADER * psHeader;
+
+	psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppbyPtr);
+
+	PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP
+	           || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP);
+
+	psHeader->eCmdType    = eCmdType;
+	psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN  - 1);
+
+	(*ppbyPtr) += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+	(*(RGXFWIF_DEV_VIRTADDR*)*ppbyPtr) = pTimestamp;
+
+	(*ppbyPtr) += psHeader->ui32CmdSize;
+}
+
+
+IMG_VOID
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO   * psDevInfo,
+                          RGXFWIF_DEV_VIRTADDR * ppPreTimestamp,
+                          RGXFWIF_DEV_VIRTADDR * ppPostTimestamp,
+                          PRGXFWIF_UFO_ADDR    * ppUpdate)
+{
+	if (ppPreTimestamp != IMG_NULL)
+	{
+		if (psDevInfo->bSaveStart)
+		{
+			/* drop the SaveStart on the first Kick */
+			psDevInfo->bSaveStart = IMG_FALSE;
+
+			RGXSetFirmwareAddress(ppPreTimestamp,
+			                      psDevInfo->psStartTimeMemDesc,
+			                      sizeof(RGXFWIF_TIMESTAMP) * psDevInfo->ui32ActiveQueryId,
+			                      RFW_FWADDR_NOREF_FLAG);
+		}
+		else
+		{
+			ppPreTimestamp->ui32Addr = 0;
+		}
+	}
+
+	if (ppPostTimestamp != IMG_NULL && ppUpdate != IMG_NULL)
+	{
+		if (psDevInfo->bSaveEnd)
+		{
+			RGXSetFirmwareAddress(ppPostTimestamp,
+			                      psDevInfo->psEndTimeMemDesc,
+			                      sizeof(RGXFWIF_TIMESTAMP) * psDevInfo->ui32ActiveQueryId,
+			                      RFW_FWADDR_NOREF_FLAG);
+
+			psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++;
+
+			RGXSetFirmwareAddress(ppUpdate,
+			                      psDevInfo->psCompletedMemDesc,
+			                      sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId,
+			                      RFW_FWADDR_NOREF_FLAG);
+		}
+		else
+		{
+			ppUpdate->ui32Addr        = 0;
+			ppPostTimestamp->ui32Addr = 0;
+		}
+	}
+}
+
+
+/******************************************************************************
+ End of file (rgxtimerquery.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimerquery.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimerquery.h
new file mode 100644
index 0000000..14d8118
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtimerquery.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Timer queries
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Timer queries functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (_RGX_TIMERQUERIES_H_)
+#define _RGX_TIMERQUERIES_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXBeginTimerQuery
+@Description    Opens a new timer query.
+
+@Input          ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+                           IMG_UINT32         ui32QueryId);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXEndTimerQuery
+@Description    Closes a timer query
+
+                The lack of ui32QueryId argument expresses the fact that there can't
+                be overlapping queries open.
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(PVRSRV_DEVICE_NODE * psDeviceNode);
+
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXQueryTimer
+@Description    Queries the state of the specified timer
+
+@Input          ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Out            pui64StartTime
+@Out            pui64EndTime
+@Return         PVRSRV_OK                         on success.
+                PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with
+                                                  operations from the queried period
+                other error code                  otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+                      IMG_UINT32         ui32QueryId,
+                      IMG_UINT64         * pui64StartTime,
+                      IMG_UINT64         * pui64EndTime);
+
+
+/**************************************************************************/ /*!
+@Function       PVRSRVRGXCurrentTime
+@Description    Returns the current state of the timer used in timer queries
+@Input          psDevData  Device data.
+@Out            pui64Time
+@Return         PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(PVRSRV_DEVICE_NODE * psDeviceNode,
+                     IMG_UINT64         * pui64Time);
+
+
+/******************************************************************************
+ NON BRIDGED/EXPORTED interface
+******************************************************************************/
+
+/* write the timestamp cmd from the helper*/
+IMG_VOID
+RGXWriteTimestampCommand(IMG_PBYTE            * ppui8CmdPtr,
+                         RGXFWIF_CCB_CMD_TYPE eCmdType,
+                         RGXFWIF_DEV_VIRTADDR pTimestamp);
+
+/* get the relevant data from the Kick to the helper*/
+IMG_VOID
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO   * psDevInfo,
+                          RGXFWIF_DEV_VIRTADDR * ppPreTimestamp,
+                          RGXFWIF_DEV_VIRTADDR * ppPostTimestamp,
+                          PRGXFWIF_UFO_ADDR    * ppUpdate);
+
+#endif /* _RGX_TIMERQUERIES_H_ */
+
+/******************************************************************************
+ End of file (rgxtimerquery.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtransfer.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtransfer.c
new file mode 100644
index 0000000..e84606c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtransfer.c
@@ -0,0 +1,1075 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific transfer queue routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "rgxsync.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+typedef struct {
+	DEVMEM_MEMDESC				*psFWContextStateMemDesc;
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_TQ_3D_DATA;
+
+typedef struct {
+	RGX_SERVER_COMMON_CONTEXT	*psServerCommonContext;
+	IMG_UINT32					ui32Priority;
+} RGX_SERVER_TQ_2D_DATA;
+
+struct _RGX_SERVER_TQ_CONTEXT_ {
+	PVRSRV_DEVICE_NODE			*psDeviceNode;
+	DEVMEM_MEMDESC				*psFWFrameworkMemDesc;
+	IMG_UINT32					ui32Flags;
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D		(1<<0)
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D		(1<<1)
+	RGX_SERVER_TQ_3D_DATA		s3DData;
+	RGX_SERVER_TQ_2D_DATA		s2DData;
+	PVRSRV_CLIENT_SYNC_PRIM		*psCleanupSync;
+	DLLIST_NODE					sListNode;
+};
+
+/*
+	Static functions used by transfer context code
+*/
+
+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
+											 PVRSRV_DEVICE_NODE *psDeviceNode,
+											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+											 IMG_UINT32 ui32Priority,
+											 RGX_COMMON_CONTEXT_INFO *psInfo,
+											 RGX_SERVER_TQ_3D_DATA *ps3DData)
+{
+	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+	PVRSRV_ERROR eError;
+
+	/*
+		Allocate device memory for the firmware GPU context suspend state.
+		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+	*/
+	PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");
+
+	eError = DevmemFwAllocate(psDevInfo,
+							sizeof(RGXFWIF_3DCTX_STATE),
+							RGX_FWCOMCTX_ALLOCFLAGS,
+							"FirmwareTQ3DContext",
+							&ps3DData->psFWContextStateMemDesc);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextswitchstate;
+	}
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 "TQ_3D",
+									 IMG_NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 ps3DData->psFWContextStateMemDesc,
+									 RGX_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps3DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+
+	PDUMPCOMMENT("Dump 3D context suspend state buffer");
+	DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
+
+	ps3DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+	DevmemFwFree(ps3DData->psFWContextStateMemDesc);
+fail_contextswitchstate:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
+											 PVRSRV_DEVICE_NODE *psDeviceNode,
+											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
+											 IMG_UINT32 ui32Priority,
+											 RGX_COMMON_CONTEXT_INFO *psInfo,
+											 RGX_SERVER_TQ_2D_DATA *ps2DData)
+{
+	PVRSRV_ERROR eError;
+
+	eError = FWCommonContextAllocate(psConnection,
+									 psDeviceNode,
+									 "TQ_2D",
+									 IMG_NULL,
+									 0,
+									 psFWMemContextMemDesc,
+									 IMG_NULL,
+									 RGX_CCB_SIZE_LOG2,
+									 ui32Priority,
+									 psInfo,
+									 &ps2DData->psServerCommonContext);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_contextalloc;
+	}
+
+	ps2DData->ui32Priority = ui32Priority;
+	return PVRSRV_OK;
+
+fail_contextalloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData,
+											  PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  FWCommonContextGetFWAddress(ps2DData->psServerCommonContext),
+											  psCleanupSync,
+											  RGXFWIF_DM_2D);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __FUNCTION__,
+				 PVRSRVGetErrorStringKM(eError)));
+	}
+
+	/* ... it has so we can free it's resources */
+	FWCommonContextFree(ps2DData->psServerCommonContext);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData,
+											  PVRSRV_DEVICE_NODE *psDeviceNode,
+											  PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+	PVRSRV_ERROR eError;
+
+	/* Check if the FW has finished with this resource ... */
+	eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+											  FWCommonContextGetFWAddress(ps3DData->psServerCommonContext),
+											  psCleanupSync,
+											  RGXFWIF_DM_3D);
+	if (eError == PVRSRV_ERROR_RETRY)
+	{
+		return eError;
+	}
+	else if (eError != PVRSRV_OK)
+	{
+		PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+				 __FUNCTION__,
+				 PVRSRVGetErrorStringKM(eError)));
+	}
+
+	/* ... it has so we can free it's resources */
+	DevmemFwFree(ps3DData->psFWContextStateMemDesc);
+	FWCommonContextFree(ps3DData->psServerCommonContext);
+
+	return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA		*psConnection,
+										   PVRSRV_DEVICE_NODE		*psDeviceNode,
+										   IMG_UINT32				ui32Priority,
+										   IMG_DEV_VIRTADDR			sMCUFenceAddr,
+										   IMG_UINT32				ui32FrameworkCommandSize,
+										   IMG_PBYTE				pabyFrameworkCommand,
+										   IMG_HANDLE				hMemCtxPrivData,
+										   RGX_SERVER_TQ_CONTEXT	**ppsTransferContext)
+{
+	RGX_SERVER_TQ_CONTEXT	*psTransferContext;
+	DEVMEM_MEMDESC			*psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+	RGX_COMMON_CONTEXT_INFO	sInfo;
+	PVRSRV_ERROR			eError = PVRSRV_OK;
+
+	/* Allocate the server side structure */
+	*ppsTransferContext = IMG_NULL;
+	psTransferContext = OSAllocMem(sizeof(*psTransferContext));
+	if (psTransferContext == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	OSMemSet(psTransferContext, 0, sizeof(*psTransferContext));
+
+	psTransferContext->psDeviceNode = psDeviceNode;
+
+	/* Allocate cleanup sync */
+	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+						   &psTransferContext->psCleanupSync,
+						   "transfer context cleanup");
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+				eError));
+		goto fail_syncalloc;
+	}
+
+	/* 
+	 * Create the FW framework buffer
+	 */
+	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+										&psTransferContext->psFWFrameworkMemDesc,
+										ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+				eError));
+		goto fail_frameworkcreate;
+	}
+
+	/* Copy the Framework client data into the framework buffer */
+	eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+										   pabyFrameworkCommand,
+										   ui32FrameworkCommandSize);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+				eError));
+		goto fail_frameworkcopy;
+	}
+
+	sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+	sInfo.psMCUFenceAddr = &sMCUFenceAddr;
+
+	eError = _Create3DTransferContext(psConnection,
+									  psDeviceNode,
+									  psFWMemContextMemDesc,
+									  ui32Priority,
+									  &sInfo,
+									  &psTransferContext->s3DData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_3dtransfercontext;
+	}
+	psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+
+	eError = _Create2DTransferContext(psConnection,
+									  psDeviceNode,
+									  psFWMemContextMemDesc,
+									  ui32Priority,
+									  &sInfo,
+									  &psTransferContext->s2DData);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_2dtransfercontext;
+	}
+	psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+
+	{
+		PVRSRV_RGXDEV_INFO			*psDevInfo = psDeviceNode->pvDevice;
+
+		OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+		dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+		OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+		*ppsTransferContext = psTransferContext;
+	}
+
+	*ppsTransferContext = psTransferContext;
+	return PVRSRV_OK;
+
+fail_2dtransfercontext:
+	_Destroy3DTransferContext(&psTransferContext->s3DData,
+							  psTransferContext->psDeviceNode,
+							  psTransferContext->psCleanupSync);
+fail_3dtransfercontext:
+fail_frameworkcopy:
+	DevmemFwFree(psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+	SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+	OSFreeMem(psTransferContext);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	*ppsTransferContext = IMG_NULL;
+	return eError;
+}
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext)
+{
+	PVRSRV_ERROR eError;
+	PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+
+	/* remove node from list before calling destroy - as destroy, if successful
+	 * will invalidate the node
+	 * must be re-added if destroy fails
+	 */
+	OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+	dllist_remove_node(&(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+
+	if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D)
+	{
+		eError = _Destroy2DTransferContext(&psTransferContext->s2DData,
+										   psTransferContext->psDeviceNode,
+										   psTransferContext->psCleanupSync);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_destroy2d;
+		}
+		/* We've freed the 2D context, don't try to free it again */
+		psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+	}
+
+	if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+	{
+		eError = _Destroy3DTransferContext(&psTransferContext->s3DData,
+										   psTransferContext->psDeviceNode,
+										   psTransferContext->psCleanupSync);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_destroy3d;
+		}
+		/* We've freed the 3D context, don't try to free it again */
+		psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+	}
+
+	DevmemFwFree(psTransferContext->psFWFrameworkMemDesc);
+	SyncPrimFree(psTransferContext->psCleanupSync);
+
+	OSFreeMem(psTransferContext);
+
+	return PVRSRV_OK;
+
+fail_destroy2d:
+fail_destroy3d:
+	OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+	dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+	OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									   IMG_UINT32				ui32PrepareCount,
+									   IMG_UINT32				*paui32ClientFenceCount,
+									   PRGXFWIF_UFO_ADDR		**papauiClientFenceUFOAddress,
+									   IMG_UINT32				**papaui32ClientFenceValue,
+									   IMG_UINT32				*paui32ClientUpdateCount,
+									   PRGXFWIF_UFO_ADDR		**papauiClientUpdateUFOAddress,
+									   IMG_UINT32				**papaui32ClientUpdateValue,
+									   IMG_UINT32				*paui32ServerSyncCount,
+									   IMG_UINT32				**papaui32ServerSyncFlags,
+									   SERVER_SYNC_PRIMITIVE	***papapsServerSyncs,
+									   IMG_UINT32				ui32NumCheckFenceFDs,
+									   IMG_INT32				*pai32CheckFenceFDs,
+									   IMG_INT32				i32UpdateFenceFD,
+									   IMG_UINT32				*paui32FWCommandSize,
+									   IMG_UINT8				**papaui8FWCommand,
+									   IMG_UINT32				*pui32TQPrepareFlags,
+									   IMG_UINT32				ui32ExtJobRef,
+									   IMG_UINT32				ui32IntJobRef)
+{
+	PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+	RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper;
+	RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper;
+	IMG_UINT32 ui323DCmdCount = 0;
+	IMG_UINT32 ui322DCmdCount = 0;
+	IMG_BOOL bKick2D = IMG_FALSE;
+	IMG_BOOL bKick3D = IMG_FALSE;
+	IMG_BOOL bPDumpContinuous = IMG_FALSE;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32IntClientFenceCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = IMG_NULL;
+	IMG_UINT32 *paui32IntFenceValue = IMG_NULL;
+	IMG_UINT32 ui32IntClientUpdateCount = 0;
+	PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = IMG_NULL;
+	IMG_UINT32 *paui32IntUpdateValue = IMG_NULL;
+	PVRSRV_ERROR eError;
+	PVRSRV_ERROR eError2;
+
+	RGXFWIF_DEV_VIRTADDR pPreTimestamp;
+	RGXFWIF_DEV_VIRTADDR pPostTimestamp;
+	PRGXFWIF_UFO_ADDR    pRMWUFOAddr;
+
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+
+	if (ui32PrepareCount == 0)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	if (ui32NumCheckFenceFDs != 0 || i32UpdateFenceFD >= 0)
+	{
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+		/* Fence FD's are only valid in the 3D case with no batching */
+		if ((ui32PrepareCount !=1) && (!TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[0], 3D)))
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+#else
+		/* We only support Fence FD's if built with SUPPORT_NATIVE_FENCE_SYNC */
+		return PVRSRV_ERROR_INVALID_PARAMS;
+#endif
+	}
+
+	/* We can't allocate the required amount of stack space on all consumer architectures */
+	pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount);
+	if (pas3DCmdHelper == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc3dhelper;
+	}
+	pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount);
+	if (pas2DCmdHelper == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc2dhelper;
+	}
+
+	/*
+		Ensure we do the right thing for server syncs which cross call bounderies
+	*/
+	for (i=0;i<ui32PrepareCount;i++)
+	{
+		IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START;
+		IMG_BOOL bHaveEndPrepare = IMG_FALSE;
+
+		if (bHaveStartPrepare)
+		{
+			IMG_UINT32 k;
+			/*
+				We've at the start of a transfer operation (which might be made
+				up of multiple HW operations) so check if we also have then
+				end of the transfer operation in the batch
+			*/
+			for (k=i;k<ui32PrepareCount;k++)
+			{
+				if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END)
+				{
+					bHaveEndPrepare = IMG_TRUE;
+					break;
+				}
+			}
+
+			if (!bHaveEndPrepare)
+			{
+				/*
+					We don't have the complete command passed in this call
+					so drop the update request. When we get called again with
+					the last HW command in this transfer operation we'll do
+					the update at that point.
+				*/
+				for (k=0;k<paui32ServerSyncCount[i];k++)
+				{
+					papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE;
+				}
+			}
+		}
+	}
+
+
+	/*
+		Init the command helper commands for all the prepares
+	*/
+	for (i=0;i<ui32PrepareCount;i++)
+	{
+		RGX_CLIENT_CCB *psClientCCB;
+		RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+		IMG_CHAR *pszCommandName;
+		RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+		RGXFWIF_CCB_CMD_TYPE eType;
+
+		if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+		{
+			psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+			psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+			pszCommandName = "TQ-3D";
+			psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++];
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D;
+		}
+		else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D))
+		{
+			psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+			psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+			pszCommandName = "TQ-2D";
+			psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++];
+			eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D;
+		}
+		else
+		{
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto fail_cmdtype;
+		}
+
+		if (i == 0)
+		{
+			bPDumpContinuous = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS);
+			PDUMPCOMMENTWITHFLAGS((bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0,
+					"%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr);
+		}
+		else
+		{
+			IMG_BOOL bNewPDumpContinuous = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS);
+
+			if (bNewPDumpContinuous != bPDumpContinuous)
+			{
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __FUNCTION__));
+				goto fail_pdumpcheck;
+			}
+		}
+
+		ui32IntClientFenceCount  = paui32ClientFenceCount[i];
+		pauiIntFenceUFOAddress   = papauiClientFenceUFOAddress[i];
+		paui32IntFenceValue      = papaui32ClientFenceValue[i];
+		ui32IntClientUpdateCount = paui32ClientUpdateCount[i];
+		pauiIntUpdateUFOAddress  = papauiClientUpdateUFOAddress[i];
+		paui32IntUpdateValue     = papaui32ClientUpdateValue[i];
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	if (ui32NumCheckFenceFDs || i32UpdateFenceFD >= 0)
+	{
+		eError =
+		  pvr_sync_append_fences("TQ",
+		                               ui32NumCheckFenceFDs,
+		                               pai32CheckFenceFDs,
+		                               i32UpdateFenceFD,
+		                               ui32IntClientUpdateCount,
+		                               pauiIntUpdateUFOAddress,
+		                               paui32IntUpdateValue,
+		                               ui32IntClientFenceCount,
+		                               pauiIntFenceUFOAddress,
+		                               paui32IntFenceValue,
+		                               &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_syncinit;
+		}
+		ui32IntClientUpdateCount = psFDFenceData->nr_updates;
+		pauiIntUpdateUFOAddress = psFDFenceData->update_ufo_addresses;
+		paui32IntUpdateValue = psFDFenceData->update_values;
+		ui32IntClientFenceCount = psFDFenceData->nr_checks;
+		pauiIntFenceUFOAddress = psFDFenceData->check_ufo_addresses;
+		paui32IntFenceValue = psFDFenceData->check_values;
+	}
+#endif
+
+		RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+		                          & pPreTimestamp,
+		                          & pPostTimestamp,
+		                          & pRMWUFOAddr);
+
+		/*
+			Create the command helper data for this command
+		*/
+		eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+		                                ui32IntClientFenceCount,
+		                                pauiIntFenceUFOAddress,
+		                                paui32IntFenceValue,
+		                                ui32IntClientUpdateCount,
+		                                pauiIntUpdateUFOAddress,
+		                                paui32IntUpdateValue,
+		                                paui32ServerSyncCount[i],
+		                                papaui32ServerSyncFlags[i],
+		                                papapsServerSyncs[i],
+		                                paui32FWCommandSize[i],
+		                                papaui8FWCommand[i],
+		                                & pPreTimestamp,
+		                                & pPostTimestamp,
+		                                & pRMWUFOAddr,
+		                                eType,
+		                                bPDumpContinuous,
+		                                pszCommandName,
+		                                psCmdHelper);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_initcmd;
+		}
+	}
+
+	/*
+		Acquire space for all the commands in one go
+	*/
+	if (ui323DCmdCount)
+	{
+		
+		eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+										   &pas3DCmdHelper[0],
+										   &bKick3D);
+		if (eError != PVRSRV_OK)
+		{
+			if (bKick3D)
+			{
+				ui323DCmdCount = 0;
+				ui322DCmdCount = 0;
+			}
+			else
+			{
+				goto fail_3dcmdacquire;
+			}
+		}
+	}
+
+	if (ui322DCmdCount)
+	{
+		eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount,
+										   &pas2DCmdHelper[0],
+										   &bKick2D);
+	
+		if (eError != PVRSRV_OK)
+		{
+			if (bKick2D || bKick3D)
+			{
+				ui323DCmdCount = 0;
+				ui322DCmdCount = 0;
+			}
+			else
+			{
+				goto fail_2dcmdacquire;
+			}
+		}
+	}
+
+	/*
+		We should acquire the kernel CCB(s) space here as the schedule could fail
+		and we would have to roll back all the syncs
+	*/
+
+	/*
+		Only do the command helper release (which takes the server sync
+		operations if the acquire succeeded
+	*/
+	if (ui323DCmdCount)
+	{
+		RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+								  &pas3DCmdHelper[0],
+								  "TQ_3D",
+								  FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr);
+		
+	}
+
+	if (ui322DCmdCount)
+	{
+		RGXCmdHelperReleaseCmdCCB(ui322DCmdCount,
+								  &pas2DCmdHelper[0],
+								  "TQ_2D",
+								  FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr);
+	}
+
+	/*
+		Even if we failed to acquire the client CCB space we might still need
+		to kick the HW to process a padding packet to release space for us next
+		time round
+	*/
+	if (bKick3D)
+	{
+		RGXFWIF_KCCB_CMD s3DKCCBCmd;
+
+		/* Construct the kernel 3D CCB command. */
+		s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext);
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+		s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+										RGXFWIF_DM_3D,
+										&s3DKCCBCmd,
+										sizeof(s3DKCCBCmd),
+										bPDumpContinuous);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+        	RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+        			ui32ExtJobRef, ui32IntJobRef, "TQ3D");
+#endif
+	}
+
+	if (bKick2D)
+	{
+		RGXFWIF_KCCB_CMD s2DKCCBCmd;
+
+		/* Construct the kernel 3D CCB command. */
+		s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+		s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext);
+		s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+		s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+		LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+		{
+			eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+										RGXFWIF_DM_2D,
+										&s2DKCCBCmd,
+										sizeof(s2DKCCBCmd),
+										bPDumpContinuous);
+			if (eError2 != PVRSRV_ERROR_RETRY)
+			{
+				break;
+			}
+			OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+		} END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+        	RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+        			ui32ExtJobRef, ui32IntJobRef, "TQ2D");
+#endif
+	}
+
+	/*
+	 * Now check eError (which may have returned an error from our earlier calls
+	 * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+	 * so we check it now...
+	 */
+	if (eError != PVRSRV_OK )
+	{
+		goto fail_2dcmdacquire;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	/*
+		Free the merged sync memory if required
+	*/
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+
+	OSFreeMem(pas2DCmdHelper);
+	OSFreeMem(pas3DCmdHelper);
+
+	return PVRSRV_OK;
+
+/*
+	No resources are created in this function so there is nothing to free
+	unless we had to merge syncs.
+	If we fail after the client CCB acquire there is still nothing to do
+	as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+
+fail_initcmd:
+
+fail_pdumpcheck:
+fail_cmdtype:
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+fail_syncinit:
+	/* Relocated cleanup here as the loop could fail after the first iteration
+	 * at the above goto tags at which point the psFDCheckData memory would
+	 * have been allocated.
+	 */
+	if (psFDFenceData)
+	{
+		pvr_sync_rollback_append_fences(psFDFenceData);
+		pvr_sync_free_append_fences_data(psFDFenceData);
+		psFDFenceData = NULL;
+	}
+#endif
+	PVR_ASSERT(eError != PVRSRV_OK);
+	OSFreeMem(pas2DCmdHelper);
+fail_alloc2dhelper:
+	OSFreeMem(pas3DCmdHelper);
+fail_alloc3dhelper:
+	return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+												   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority)
+{
+	PVRSRV_ERROR eError;
+
+	if (psTransferContext->s2DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_2D);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_2dcontext;
+		}
+		psTransferContext->s2DData.ui32Priority = ui32Priority;
+	}
+
+	if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+	{
+		eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
+									psConnection,
+									psTransferContext->psDeviceNode->pvDevice,
+									ui32Priority,
+									RGXFWIF_DM_3D);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+			goto fail_3dcontext;
+		}
+		psTransferContext->s3DData.ui32Priority = ui32Priority;
+	}
+	return PVRSRV_OK;
+
+fail_3dcontext:
+fail_2dcontext:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static IMG_BOOL CheckForStalledTransferCtxtCommand(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	RGX_SERVER_TQ_CONTEXT 		*psCurrentServerTransferCtx = IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+	RGX_SERVER_TQ_2D_DATA		*psTransferCtx2DData = &(psCurrentServerTransferCtx->s2DData);
+	RGX_SERVER_COMMON_CONTEXT	*psCurrentServerTQ2DCommonCtx = psTransferCtx2DData->psServerCommonContext;
+	RGX_SERVER_TQ_3D_DATA		*psTransferCtx3DData = &(psCurrentServerTransferCtx->s3DData);
+	RGX_SERVER_COMMON_CONTEXT	*psCurrentServerTQ3DCommonCtx = psTransferCtx3DData->psServerCommonContext;
+	DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = pvCallbackData;
+
+
+	DumpStalledFWCommonContext(psCurrentServerTQ2DCommonCtx, pfnDumpDebugPrintf);
+	DumpStalledFWCommonContext(psCurrentServerTQ3DCommonCtx, pfnDumpDebugPrintf);
+
+	return IMG_TRUE;
+}
+IMG_VOID CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+									 DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+	dllist_foreach_node(&(psDevInfo->sTransferCtxtListHead),
+						CheckForStalledTransferCtxtCommand, pfnDumpDebugPrintf);
+	OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+}
+
+static IMG_BOOL CheckForStalledClientTransferCtxtCommand(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
+{
+	PVRSRV_ERROR *peError = (PVRSRV_ERROR*)pvCallbackData;
+	RGX_SERVER_TQ_CONTEXT 		*psCurrentServerTransferCtx = IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+	RGX_SERVER_TQ_2D_DATA		*psTransferCtx2DData = &(psCurrentServerTransferCtx->s2DData);
+	RGX_SERVER_COMMON_CONTEXT	*psCurrentServerTQ2DCommonCtx = psTransferCtx2DData->psServerCommonContext;
+	RGX_SERVER_TQ_3D_DATA		*psTransferCtx3DData = &(psCurrentServerTransferCtx->s3DData);
+	RGX_SERVER_COMMON_CONTEXT	*psCurrentServerTQ3DCommonCtx = psTransferCtx3DData->psServerCommonContext;
+
+	if (PVRSRV_ERROR_CCCB_STALLED == CheckStalledClientCommonContext(psCurrentServerTQ2DCommonCtx))
+	{
+		*peError = PVRSRV_ERROR_CCCB_STALLED;
+	}
+	if (PVRSRV_ERROR_CCCB_STALLED == CheckStalledClientCommonContext(psCurrentServerTQ3DCommonCtx))
+	{
+		*peError = PVRSRV_ERROR_CCCB_STALLED;
+	}
+
+	return IMG_TRUE;
+}
+IMG_BOOL CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+	dllist_foreach_node(&(psDevInfo->sTransferCtxtListHead), 
+						CheckForStalledClientTransferCtxtCommand, &eError);
+	OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+	return (PVRSRV_ERROR_CCCB_STALLED == eError)? IMG_TRUE: IMG_FALSE;
+}
+
+PVRSRV_ERROR PVRSRVRGXKickSyncTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									   IMG_UINT32				ui32ClientFenceCount,
+									   PRGXFWIF_UFO_ADDR		*pauiClientFenceUFOAddress,
+									   IMG_UINT32				*paui32ClientFenceValue,
+									   IMG_UINT32				ui32ClientUpdateCount,
+									   PRGXFWIF_UFO_ADDR		*pauiClientUpdateUFOAddress,
+									   IMG_UINT32				*paui32ClientUpdateValue,
+									   IMG_UINT32				ui32ServerSyncCount,
+									   IMG_UINT32				*pui32ServerSyncFlags,
+									   SERVER_SYNC_PRIMITIVE	**pasServerSyncs,
+									   IMG_UINT32				ui32NumCheckFenceFDs,
+									   IMG_INT32				*pai32CheckFenceFDs,
+									   IMG_INT32				i32UpdateFenceFD,
+									   IMG_UINT32				ui32TQPrepareFlags)
+{
+	PVRSRV_ERROR                eError;
+	RGX_SERVER_COMMON_CONTEXT   *psServerCommonCtx;
+	IMG_CHAR                    *pszCommandName;
+	RGXFWIF_DM                  eDM;
+	IMG_BOOL                    bPDumpContinuous;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	/* Android fd sync update info */
+	struct pvr_sync_append_data *psFDFenceData = NULL;
+#endif
+
+	bPDumpContinuous = ((ui32TQPrepareFlags & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS);
+
+	if (TQ_PREP_FLAGS_COMMAND_IS(ui32TQPrepareFlags, 3D))
+	{
+		psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+		pszCommandName = "SyncTQ-3D";
+		eDM = RGXFWIF_DM_3D;
+	}
+	else if (TQ_PREP_FLAGS_COMMAND_IS(ui32TQPrepareFlags, 2D))
+	{
+		psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+		pszCommandName = "SyncTQ-2D";
+		eDM = RGXFWIF_DM_2D;
+	}
+	else
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	/* Android FD fences are hardcoded to updates (IMG_TRUE below), Fences go to the TA and updates to the 3D */
+	if (ui32NumCheckFenceFDs || i32UpdateFenceFD >= 0)
+	{
+		eError =
+		  pvr_sync_append_fences("TQ",
+		                         ui32NumCheckFenceFDs,
+		                         pai32CheckFenceFDs,
+		                         i32UpdateFenceFD,
+		                         ui32ClientUpdateCount,
+		                         pauiClientUpdateUFOAddress,
+		                         paui32ClientUpdateValue,
+		                         ui32ClientFenceCount,
+		                         pauiClientFenceUFOAddress,
+		                         paui32ClientFenceValue,
+		                         &psFDFenceData);
+		if (eError != PVRSRV_OK)
+		{
+			goto fail_fdsync;
+		}
+		ui32ClientUpdateCount = psFDFenceData->nr_updates;
+		pauiClientUpdateUFOAddress = psFDFenceData->update_ufo_addresses;
+		paui32ClientUpdateValue = psFDFenceData->update_values;
+		ui32ClientFenceCount = psFDFenceData->nr_checks;
+		pauiClientFenceUFOAddress = psFDFenceData->check_ufo_addresses;
+		paui32ClientFenceValue = psFDFenceData->check_values;
+	}
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+
+	eError = 
+		RGXKickSyncKM(psTransferContext->psDeviceNode,
+				      psServerCommonCtx,
+				      eDM,
+				      pszCommandName,
+				      ui32ClientFenceCount,
+				      pauiClientFenceUFOAddress,
+				      paui32ClientFenceValue,
+				      ui32ClientUpdateCount,
+				      pauiClientUpdateUFOAddress,
+				      paui32ClientUpdateValue,
+				      ui32ServerSyncCount,
+				      pui32ServerSyncFlags,
+				      pasServerSyncs,
+				      bPDumpContinuous);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error calling RGXKickSyncKM (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+		goto fail_kicksync;
+	}
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#if defined(NO_HARDWARE)
+	pvr_sync_nohw_complete_fences(psFDFenceData);
+#endif
+	pvr_sync_free_append_fences_data(psFDFenceData);
+#endif
+
+	return eError;
+
+fail_kicksync:
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_rollback_append_fences(psFDFenceData);
+	pvr_sync_free_append_fences_data(psFDFenceData);
+fail_fdsync:
+#endif
+
+	return eError;
+}
+
+/**************************************************************************//**
+ End of file (rgxtransfer.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtransfer.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtransfer.h
new file mode 100644
index 0000000..f8d897d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxtransfer.h
@@ -0,0 +1,160 @@
+/*************************************************************************/ /*!
+@File
+@Title          RGX Transfer queue Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the RGX Transfer queue Functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTRANSFER_H__)
+#define __RGXTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT;
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXCreateTransferContextKM
+
+ @Description
+	Server-side implementation of RGXCreateTransferContext
+
+ @Input pvDeviceNode - device node
+ 
+FIXME fill this in
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA			*psConnection,
+										   PVRSRV_DEVICE_NODE		*psDeviceNode,
+										   IMG_UINT32				ui32Priority,
+										   IMG_DEV_VIRTADDR			sMCUFenceAddr,
+										   IMG_UINT32				ui32FrameworkCommandSize,
+										   IMG_PBYTE				pabyFrameworkCommand,
+										   IMG_HANDLE				hMemCtxPrivData,
+										   RGX_SERVER_TQ_CONTEXT	**ppsTransferContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVRGXDestroyTransferContextKM
+
+ @Description
+	Server-side implementation of RGXDestroyTransferContext
+
+ @Input psTransferContext - Transfer context
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext);
+
+/*!
+*******************************************************************************
+
+ @Function	PVRSRVSubmitTransferKM
+
+ @Description
+	Schedules one or more 2D or 3D HW commands on the firmware
+
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									IMG_UINT32				ui32PrepareCount,
+									IMG_UINT32				*paui32ClientFenceCount,
+									PRGXFWIF_UFO_ADDR		**papauiClientFenceUFOAddress,
+									IMG_UINT32				**papaui32ClientFenceValue,
+									IMG_UINT32				*paui32ClientUpdateCount,
+									PRGXFWIF_UFO_ADDR		**papauiClientUpdateUFOAddress,
+									IMG_UINT32				**papaui32ClientUpdateValue,
+									IMG_UINT32				*paui32ServerSyncCount,
+									IMG_UINT32				**papaui32ServerSyncFlags,
+									SERVER_SYNC_PRIMITIVE	***papapsServerSyncs,
+									IMG_UINT32				ui32NumCheckFenceFDs,
+									IMG_INT32				*paui32CheckFenceFDs,
+									IMG_INT32				i32UpdateFenceFD,
+									IMG_UINT32				*paui32FWCommandSize,
+									IMG_UINT8				**papaui8FWCommand,
+									IMG_UINT32				*pui32TQPrepareFlags,
+									IMG_UINT32				ui32ExtJobRef,
+									IMG_UINT32				ui32IntJobRef);
+
+IMG_EXPORT
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+												   RGX_SERVER_TQ_CONTEXT *psTransferContext,
+												   IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+IMG_VOID CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+									 DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_BOOL CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR PVRSRVRGXKickSyncTransferKM(RGX_SERVER_TQ_CONTEXT	*psTransferContext,
+									   IMG_UINT32				ui32ClientFenceCount,
+									   PRGXFWIF_UFO_ADDR		*pauiClientFenceUFOAddress,
+									   IMG_UINT32				*paui32ClientFenceValue,
+									   IMG_UINT32				ui32ClientUpdateCount,
+									   PRGXFWIF_UFO_ADDR		*pauiClientUpdateUFOAddress,
+									   IMG_UINT32				*paui32ClientUpdateValue,
+									   IMG_UINT32				ui32ServerSyncCount,
+									   IMG_UINT32				*pui32ServerSyncFlags,
+									   SERVER_SYNC_PRIMITIVE	**pasServerSyncs,
+									   IMG_UINT32				ui32NumCheckFenceFDs,
+									   IMG_INT32				*paui32CheckFenceFDs,
+									   IMG_INT32				i32UpdateFenceFD,
+									   IMG_UINT32				ui32TQPrepareFlags);
+#endif /* __RGXTRANSFER_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxutils.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxutils.c
new file mode 100644
index 0000000..e3d9f2a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxutils.c
@@ -0,0 +1,287 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device specific functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stddef.h>
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "power.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+
+/*
+ * RGXRunScript
+ */
+PVRSRV_ERROR RGXRunScript(PVRSRV_RGXDEV_INFO	*psDevInfo,
+						 RGX_INIT_COMMAND		*psScript,
+						 IMG_UINT32				ui32NumCommands,
+						 IMG_UINT32				ui32PdumpFlags,
+						 DUMPDEBUG_PRINTF_FUNC  *pfnDumpDebugPrintf)
+{
+	IMG_UINT32 ui32PC;
+#if !defined(NO_HARDWARE)
+	IMG_UINT32 ui32LastLoopPoint = 0xFFFFFFFF;
+#endif /* NO_HARDWARE */
+
+	for (ui32PC = 0;  ui32PC < ui32NumCommands;  ui32PC++)
+	{
+		RGX_INIT_COMMAND *psComm = &psScript[ui32PC];
+
+		switch (psComm->eOp)
+		{
+			case RGX_INIT_OP_DBG_READ32_HW_REG:
+			{
+				IMG_UINT32	ui32RegVal;
+				ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGReadHWReg.ui32Offset);
+				PVR_DUMPDEBUG_LOG(("%s: 0x%08X", psComm->sDBGReadHWReg.aszName, ui32RegVal));
+				break;
+			}
+			case RGX_INIT_OP_DBG_READ64_HW_REG:
+			{
+				IMG_UINT64	ui64RegVal;
+				ui64RegVal = OSReadHWReg64(psDevInfo->pvRegsBaseKM, psComm->sDBGReadHWReg.ui32Offset);
+				PVR_DUMPDEBUG_LOG(("%s: 0x%016llX", psComm->sDBGReadHWReg.aszName, ui64RegVal));
+				break;
+			}
+			case RGX_INIT_OP_WRITE_HW_REG:
+			{
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					OSWriteHWReg32(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
+				}
+				PDUMPCOMMENT("RGXRunScript: Write HW reg operation");
+				PDUMPREG32(RGX_PDUMPREG_NAME,
+						psComm->sWriteHWReg.ui32Offset,
+						psComm->sWriteHWReg.ui32Value,
+						ui32PdumpFlags);
+				break;
+			}
+			case RGX_INIT_OP_PDUMP_HW_REG:
+			{
+				PDUMPCOMMENT("RGXRunScript: Dump HW reg operation");
+				PDUMPREG32(RGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset,
+						psComm->sPDumpHWReg.ui32Value, ui32PdumpFlags);
+				break;
+			}
+			case RGX_INIT_OP_COND_POLL_HW_REG:
+			{
+#if !defined(NO_HARDWARE)
+				IMG_UINT32	ui32RegVal;
+
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					/* read the register used as condition */
+					ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sCondPollHWReg.ui32CondOffset);
+
+					/* if the conditions succeeds, poll the register */
+					if ((ui32RegVal & psComm->sCondPollHWReg.ui32CondMask) == psComm->sCondPollHWReg.ui32CondValue)
+					{
+						if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sCondPollHWReg.ui32Offset),
+								psComm->sCondPollHWReg.ui32Value,
+								psComm->sCondPollHWReg.ui32Mask) != PVRSRV_OK)
+						{
+							PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Cond Poll for Reg (0x%x) failed -> Cancel script.", psComm->sCondPollHWReg.ui32Offset));
+							return PVRSRV_ERROR_TIMEOUT;
+						}
+
+					}
+					else
+					{
+						PVR_DPF((PVR_DBG_WARNING, 
+						"RGXRunScript: Skipping Poll for Reg (0x%x) because the condition is not met (Reg 0x%x ANDed with mask 0x%x equal to 0x%x but value 0x%x found instead).",
+						psComm->sCondPollHWReg.ui32Offset,
+						psComm->sCondPollHWReg.ui32CondOffset,
+						psComm->sCondPollHWReg.ui32CondMask,
+						psComm->sCondPollHWReg.ui32CondValue,
+						ui32RegVal));
+					}
+				}
+#endif
+				break;
+			}
+			case RGX_INIT_OP_POLL_64_HW_REG:
+			{
+				/* Split lower and upper words */
+				IMG_UINT32 ui32UpperValue = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Value >> 32);
+				IMG_UINT32 ui32LowerValue = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Value);
+
+				IMG_UINT32 ui32UpperMask = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Mask >> 32);
+				IMG_UINT32 ui32LowerMask = (IMG_UINT32) (psComm->sPoll64HWReg.ui64Mask);
+
+				PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "RGXRunScript: 64 bit HW offset: %x", psComm->sPoll64HWReg.ui32Offset);
+
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					if (PVRSRVPollForValueKM((IMG_UINT32 *)(((IMG_UINT8*)psDevInfo->pvRegsBaseKM) + psComm->sPoll64HWReg.ui32Offset + 4),
+										 ui32UpperValue,
+										 ui32UpperMask) != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for upper part of Reg (0x%x) failed -> Cancel script.", psComm->sPoll64HWReg.ui32Offset));
+						return PVRSRV_ERROR_TIMEOUT;
+					}
+				}
+				PDUMPREGPOL(RGX_PDUMPREG_NAME,
+							psComm->sPoll64HWReg.ui32Offset + 4,
+							ui32UpperValue,
+							ui32UpperMask,
+							ui32PdumpFlags,
+							PDUMP_POLL_OPERATOR_EQUAL);
+
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sPoll64HWReg.ui32Offset),
+										 ui32LowerValue,
+										 ui32LowerMask) != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for lower part of Reg (0x%x) failed -> Cancel script.", psComm->sPoll64HWReg.ui32Offset));
+						return PVRSRV_ERROR_TIMEOUT;
+					}
+				}
+				PDUMPREGPOL(RGX_PDUMPREG_NAME,
+							psComm->sPoll64HWReg.ui32Offset,
+							ui32LowerValue,
+							ui32LowerMask,
+							ui32PdumpFlags,
+							PDUMP_POLL_OPERATOR_EQUAL);
+
+				break;
+			}
+			case RGX_INIT_OP_POLL_HW_REG:
+			{
+				if( !(ui32PdumpFlags & PDUMP_FLAGS_NOHW) )
+				{
+					if (PVRSRVPollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + psComm->sPollHWReg.ui32Offset),
+										 psComm->sPollHWReg.ui32Value,
+										 psComm->sPollHWReg.ui32Mask) != PVRSRV_OK)
+					{
+						PVR_DPF((PVR_DBG_ERROR, "RGXRunScript: Poll for Reg (0x%x) failed -> Cancel script.", psComm->sPollHWReg.ui32Offset));
+						return PVRSRV_ERROR_TIMEOUT;
+					}
+				}
+				PDUMPREGPOL(RGX_PDUMPREG_NAME,
+							psComm->sPollHWReg.ui32Offset,
+							psComm->sPollHWReg.ui32Value,
+							psComm->sPollHWReg.ui32Mask,
+							ui32PdumpFlags,
+							PDUMP_POLL_OPERATOR_EQUAL);
+
+				break;
+			}
+
+			case RGX_INIT_OP_LOOP_POINT:
+			{
+#if !defined(NO_HARDWARE)
+				ui32LastLoopPoint = ui32PC;
+#endif /* NO_HARDWARE */
+				break;
+			}
+
+			case RGX_INIT_OP_COND_BRANCH:
+			{
+#if !defined(NO_HARDWARE)
+				IMG_UINT32 ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM,
+													  psComm->sConditionalBranchPoint.ui32Offset);
+
+				if((ui32RegVal & psComm->sConditionalBranchPoint.ui32Mask) != psComm->sConditionalBranchPoint.ui32Value)
+				{
+					ui32PC = ui32LastLoopPoint - 1;
+				}
+#endif /* NO_HARDWARE */
+
+				PDUMPIDLWITHFLAGS(30, ui32PdumpFlags);
+				break;
+			}
+			case RGX_INIT_OP_DBG_CALC:
+			{
+				IMG_UINT32 ui32RegVal1;
+				IMG_UINT32 ui32RegVal2;
+				IMG_UINT32 ui32RegVal3;
+				ui32RegVal1 = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGCalc.ui32Offset1);
+				ui32RegVal2 = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGCalc.ui32Offset2);
+				ui32RegVal3 = OSReadHWReg32(psDevInfo->pvRegsBaseKM,  psComm->sDBGCalc.ui32Offset3);
+				if (ui32RegVal1 + ui32RegVal2 > ui32RegVal3)
+				{
+					PVR_DUMPDEBUG_LOG(("%s: 0x%08X", psComm->sDBGCalc.aszName, ui32RegVal1 + ui32RegVal2 - ui32RegVal3));
+				}
+				else
+				{
+					PVR_DUMPDEBUG_LOG(("%s: 0x%08X", psComm->sDBGCalc.aszName, 0));
+				}
+				break;
+			}
+			case RGX_INIT_OP_DBG_WAIT:
+			{
+				OSWaitus(psComm->sDBGWait.ui32WaitInUs);
+				break;
+			}
+			case RGX_INIT_OP_DBG_STRING:
+			{
+				PVR_DUMPDEBUG_LOG(("%s", psComm->sDBGString.aszString));
+				break;
+			}
+			case RGX_INIT_OP_HALT:
+			{
+				return PVRSRV_OK;
+			}
+			case RGX_INIT_OP_ILLEGAL:
+			/* FALLTHROUGH */
+			default:
+			{
+				PVR_DPF((PVR_DBG_ERROR,"RGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
+				return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+			}
+		}
+
+	}
+
+	return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION;
+}
+
+/******************************************************************************
+ End of file (rgxutils.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxutils.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxutils.h
new file mode 100644
index 0000000..f075f43
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/devices/rgx/rgxutils.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device specific utility routines declarations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Inline functions/structures specific to RGX
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+
+ @Function	RGXRunScript
+
+ @Description Execute the commands in the script
+
+ @Input 
+
+ @Return   PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXRunScript(PVRSRV_RGXDEV_INFO	*psDevInfo,
+						 RGX_INIT_COMMAND	*psScript,
+						 IMG_UINT32			ui32NumCommands,
+						 IMG_UINT32				ui32PdumpFlags,
+						 DUMPDEBUG_PRINTF_FUNC  *pfnDumpDebugPrintf);
+
+/******************************************************************************
+ End of file (rgxutils.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/allocmem.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/allocmem.c
new file mode 100644
index 0000000..e3e359d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/allocmem.c
@@ -0,0 +1,253 @@
+/*************************************************************************/ /*!
+@File
+@Title          Host memory management implementation for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "img_defs.h"
+#include "allocmem.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+IMG_INTERNAL IMG_PVOID OSAllocMem(IMG_UINT32 ui32Size)
+{
+	IMG_PVOID pvRet = IMG_NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == IMG_NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+
+	if (pvRet != IMG_NULL)
+	{
+
+		if (!is_vmalloc_addr(pvRet))
+		{
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet));
+#else
+			{
+				IMG_CPU_PHYADDR sCpuPAddr;
+				sCpuPAddr.uiAddr = 0;
+
+				PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+				                             pvRet,
+				                             sCpuPAddr,
+				                             ksize(pvRet),
+				                             IMG_NULL);
+			}
+#endif
+		}
+		else
+		{
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											   ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+											   (IMG_UINT64)(IMG_UINTPTR_T) pvRet);
+#else
+			{
+				IMG_CPU_PHYADDR sCpuPAddr;
+				sCpuPAddr.uiAddr = 0;
+
+				PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											 pvRet,
+											 sCpuPAddr,
+											 ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+											 IMG_NULL);
+			}
+#endif
+		}
+
+	}
+#endif
+	return pvRet;
+}
+
+
+IMG_INTERNAL IMG_PVOID OSAllocMemstatMem(IMG_UINT32 ui32Size)
+{
+	IMG_PVOID pvRet = IMG_NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vmalloc(ui32Size);
+	}
+	if (pvRet == IMG_NULL)
+	{
+		pvRet = kmalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+IMG_INTERNAL IMG_PVOID OSAllocZMem(IMG_UINT32 ui32Size)
+{
+	IMG_PVOID pvRet = IMG_NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == IMG_NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+
+	if (pvRet != IMG_NULL)
+	{
+
+		if (!is_vmalloc_addr(pvRet))
+		{
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet));
+#else
+			{
+				IMG_CPU_PHYADDR sCpuPAddr;
+				sCpuPAddr.uiAddr = 0;
+
+				PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+				                             pvRet,
+				                             sCpuPAddr,
+				                             ksize(pvRet),
+				                             IMG_NULL);
+			}
+#endif
+		}
+		else
+		{
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											   ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+											   (IMG_UINT64)(IMG_UINTPTR_T) pvRet);
+#else
+			{
+				IMG_CPU_PHYADDR sCpuPAddr;
+				sCpuPAddr.uiAddr = 0;
+
+				PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+											 pvRet,
+											 sCpuPAddr,
+											 ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+											 IMG_NULL);
+			}
+#endif
+		}
+
+	}
+#endif
+	return pvRet;
+}
+
+IMG_INTERNAL IMG_PVOID OSAllocMemstatZMem(IMG_UINT32 ui32Size)
+{
+	IMG_PVOID pvRet = IMG_NULL;
+
+	if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+	{
+		pvRet = vzalloc(ui32Size);
+	}
+	if (pvRet == IMG_NULL)
+	{
+		pvRet = kzalloc(ui32Size, GFP_KERNEL);
+	}
+
+	return pvRet;
+}
+
+IMG_INTERNAL void OSFreeMem(IMG_PVOID pvMem)
+{
+
+	if ( !is_vmalloc_addr(pvMem) )
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		if (pvMem != IMG_NULL)
+		{
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvMem));
+#else
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+			                               (IMG_UINT64)(IMG_UINTPTR_T) pvMem);
+#endif
+		}
+#endif
+		kfree(pvMem);
+	}
+	else
+	{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+		if (pvMem != IMG_NULL)
+		{
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+			                                     (IMG_UINT64)(IMG_UINTPTR_T) pvMem);
+#else
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+			                               (IMG_UINT64)(IMG_UINTPTR_T) pvMem);
+#endif
+		}
+#endif
+		vfree(pvMem);
+	}
+}
+
+IMG_INTERNAL void OSFreeMemstatMem(IMG_PVOID pvMem)
+{
+	if ( !is_vmalloc_addr(pvMem) )
+	{
+		kfree(pvMem);
+	}
+	else
+	{
+		vfree(pvMem);
+	}
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/devicemem_mmap_stub.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/devicemem_mmap_stub.c
new file mode 100644
index 0000000..cbf30e7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/devicemem_mmap_stub.c
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* our exported API */
+#include "devicemem_mmap.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/include/ */
+
+/* services/include/srvhelper/ */
+#include "ra.h"
+
+/* autogenerated bridge */
+#include "client_mm_bridge.h"
+
+#include "pmr.h"
+
+IMG_INTERNAL PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRSize,
+          IMG_UINT32 uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          void **ppvMappingAddressOut,
+          IMG_SIZE_T *puiMappingLengthOut)
+{
+    PVRSRV_ERROR eError;
+    PMR *psPMR;
+    void *pvKernelAddress;
+    IMG_SIZE_T uiLength;
+    IMG_HANDLE hPriv;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+    /*
+      Normally this function would mmap a PMR into the memory space of
+      user process, but in this case we're taking a PMR and mapping it
+      into kernel virtual space.  We keep the same function name for
+      symmetry as this allows the higher layers of the software stack
+      to not care whether they are user mode or kernel
+    */
+
+    psPMR = hPMR;
+
+    eError = PMRAcquireKernelMappingData(psPMR,
+                                         0,
+                                         0,
+                                         &pvKernelAddress,
+                                         &uiLength,
+                                         &hPriv);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+    
+    *phOSMMapPrivDataOut = hPriv;
+    *ppvMappingAddressOut = pvKernelAddress;
+    *puiMappingLengthOut = uiLength;
+
+    PVR_ASSERT(*puiMappingLengthOut == uiPMRSize);
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            void *pvMappingAddress,
+            IMG_SIZE_T uiMappingLength)
+{
+    PMR *psPMR;
+
+    PVR_UNREFERENCED_PARAMETER(hBridge);
+    PVR_UNREFERENCED_PARAMETER(pvMappingAddress);
+    PVR_UNREFERENCED_PARAMETER(uiMappingLength);
+
+    psPMR = hPMR;
+    PMRReleaseKernelMappingData(psPMR,
+                                hOSMMapPrivData);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/driverlock.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/driverlock.h
new file mode 100644
index 0000000..bb44265
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/driverlock.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File           driverlock.h
+@Title          Main driver lock
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    The main driver lock, held in most places in
+                the driver.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __DRIVERLOCK_H__
+#define __DRIVERLOCK_H__
+
+/*
+ * Main driver lock, used to ensure driver code is single threaded.
+ * There are some places where this lock must not be taken, such as
+ * in the mmap related deriver entry points.
+ */
+extern struct mutex gPVRSRVLock;
+
+#endif /* __DRIVERLOCK_H__ */
+/*****************************************************************************
+ End of file (driverlock.h)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/env_connection.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/env_connection.h
new file mode 100644
index 0000000..2b8622e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/env_connection.h
@@ -0,0 +1,107 @@
+/*************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux specific server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_ENV_CONNECTION_H_)
+#define _ENV_CONNECTION_H_
+
+#include <linux/list.h>
+
+#include "handle.h"
+#include "pvr_debug.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_sys.h"
+#include "allocmem.h"
+#endif
+
+#if defined(SUPPORT_ION)
+#define ION_CLIENT_NAME_SIZE	50
+
+typedef struct _ENV_ION_CONNECTION_DATA_
+{
+	IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE];
+	struct ion_device *psIonDev;
+	struct ion_client *psIonClient;
+	IMG_UINT32 ui32IonClientRefCount;
+} ENV_ION_CONNECTION_DATA;
+#endif
+
+typedef struct _ENV_CONNECTION_DATA_
+{
+	struct file *psFile;
+
+#if defined(SUPPORT_ION)
+	ENV_ION_CONNECTION_DATA *psIonData;
+#endif
+#if defined(SUPPORT_DRM_EXT)
+	IMG_VOID *pPriv;
+#endif
+} ENV_CONNECTION_DATA;
+
+#if defined(SUPPORT_ION)
+static inline struct ion_client *EnvDataIonClientAcquire(ENV_CONNECTION_DATA *psEnvData)
+{
+	PVR_ASSERT(psEnvData->psIonData != IMG_NULL);
+	PVR_ASSERT(psEnvData->psIonData->psIonClient != IMG_NULL);
+	PVR_ASSERT(psEnvData->psIonData->ui32IonClientRefCount > 0);
+	psEnvData->psIonData->ui32IonClientRefCount++;
+	return psEnvData->psIonData->psIonClient;
+}
+
+static inline void EnvDataIonClientRelease(ENV_ION_CONNECTION_DATA *psIonData)
+{
+	PVR_ASSERT(psIonData != IMG_NULL);
+	PVR_ASSERT(psIonData->psIonClient != IMG_NULL);
+	PVR_ASSERT(psIonData->ui32IonClientRefCount > 0);
+	if (--psIonData->ui32IonClientRefCount == 0)
+	{
+		ion_client_destroy(psIonData->psIonClient);
+		IonDevRelease(psIonData->psIonDev);
+		OSFreeMem(psIonData);
+		psIonData = IMG_NULL;
+	}
+}
+#endif /* defined(SUPPORT_ION) */
+
+#endif /* !defined(_ENV_CONNECTION_H_) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/env_data.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/env_data.h
new file mode 100644
index 0000000..7e5eb03
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/env_data.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title          Environmental Data header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Linux-specific part of system data.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _ENV_DATA_
+#define _ENV_DATA_
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+#include <linux/workqueue.h>
+#endif
+
+#endif /* _ENV_DATA_ */
+/*****************************************************************************
+ End of file (env_data.h)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/event.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/event.c
new file mode 100644
index 0000000..c54ca21
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/event.c
@@ -0,0 +1,361 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)) )
+#include <asm/system.h>
+#endif
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "mm.h"
+#include "env_data.h"
+#include "driverlock.h"
+#include "event.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+
+#include "osfunc.h"
+
+/* Returns pointer to task_struct that belongs to thread which acquired
+ * bridge lock. */
+extern struct task_struct *OSGetBridgeLockOwner(void);
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+	rwlock_t sLock;
+	struct list_head sList;
+
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+	atomic_t sTimeStamp;
+	IMG_UINT32 ui32TimeStampPrevious;
+#if defined(DEBUG)
+	IMG_UINT ui32Stats;
+#endif
+	wait_queue_head_t sWait;
+	struct list_head sList;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectListCreate
+
+ @Description
+
+ Linux wait object list creation
+
+ @Output    hOSEventKM : Pointer to the event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
+
+	psEvenObjectList = OSAllocMem(sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST));
+	if (psEvenObjectList == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&psEvenObjectList->sList);
+
+	rwlock_init(&psEvenObjectList->sLock);
+
+	*phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectListDestroy
+
+ @Description
+
+ Linux wait object list destruction
+
+ @Input    hOSEventKM : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
+
+	if(psEvenObjectList)
+	{
+		if (!list_empty(&psEvenObjectList->sList))
+		{
+			 PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+			 return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+		}
+		OSFreeMem(psEvenObjectList);
+		/*not nulling pointer, copy on stack*/
+	}
+	return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectDelete
+
+ @Description
+
+ Linux wait object removal
+
+ @Input    hOSEventObject : Event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject)
+{
+	if(hOSEventObject)
+	{
+		PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+		PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+		write_lock_bh(&psLinuxEventObjectList->sLock);
+		list_del(&psLinuxEventObject->sList);
+		write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+#if defined(DEBUG)
+//		PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+		OSFreeMem(psLinuxEventObject);
+		/*not nulling pointer, copy on stack*/
+
+		return PVRSRV_OK;
+	}
+	return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectAdd
+
+ @Description
+
+ Linux wait object addition
+
+ @Input    hOSEventObjectList : Event object list handle
+ @Output   phOSEventObject : Pointer to the event object handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+
+	/* allocate completion variable */
+	psLinuxEventObject = OSAllocMem(sizeof(PVRSRV_LINUX_EVENT_OBJECT));
+	if (psLinuxEventObject == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+	atomic_set(&psLinuxEventObject->sTimeStamp, 0);
+	psLinuxEventObject->ui32TimeStampPrevious = 0;
+
+#if defined(DEBUG)
+	psLinuxEventObject->ui32Stats = 0;
+#endif
+	init_waitqueue_head(&psLinuxEventObject->sWait);
+
+	psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+	write_lock_bh(&psLinuxEventObjectList->sLock);
+	list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+	write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+	*phOSEventObject = psLinuxEventObject;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectSignal
+
+ @Description
+
+ Linux wait object signaling function
+
+ @Input    hOSEventObjectList : Event object list handle
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+	struct list_head *psListEntry, *psListEntryTemp, *psList;
+	psList = &psLinuxEventObjectList->sList;
+
+	read_lock_bh(&psLinuxEventObjectList->sLock);
+	list_for_each_safe(psListEntry, psListEntryTemp, psList)
+	{
+
+		psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+
+		atomic_inc(&psLinuxEventObject->sTimeStamp);
+		wake_up_interruptible(&psLinuxEventObject->sWait);
+	}
+	read_unlock_bh(&psLinuxEventObjectList->sLock);
+
+	return 	PVRSRV_OK;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function	LinuxEventObjectWait
+
+ @Description
+
+ Linux wait object routine
+
+ @Input    hOSEventObject : Event object handle
+
+ @Input   ui32MSTimeout : Time out value in msec
+
+ @Return   PVRSRV_ERROR  :  Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout, IMG_BOOL bHoldBridgeLock)
+{
+	IMG_UINT32 ui32TimeStamp;
+	IMG_BOOL bReleasePVRLock;
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	DEFINE_WAIT(sWait);
+
+	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+
+	IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
+
+	/* Check if the driver is good shape */
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		return PVRSRV_ERROR_TIMEOUT;
+	}
+
+	do
+	{
+		prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+		ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp);
+
+		if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
+		{
+			break;
+		}
+
+		/* Check thread holds the current PVR/bridge lock before obeying the
+		 * 'release before deschedule' behaviour. Some threads choose not to
+		 * hold the bridge lock in their implementation.
+		 */
+		bReleasePVRLock = (!bHoldBridgeLock && mutex_is_locked(&gPVRSRVLock) && current == OSGetBridgeLockOwner());
+		if (bReleasePVRLock == IMG_TRUE)
+		{
+			OSReleaseBridgeLock();
+		}
+
+		ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies);
+
+		if (bReleasePVRLock == IMG_TRUE)
+		{
+			OSAcquireBridgeLock();
+		}
+
+#if defined(DEBUG)
+		psLinuxEventObject->ui32Stats++;
+#endif
+
+
+	} while (ui32TimeOutJiffies);
+
+	finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+	psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
+
+	return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/event.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/event.h
new file mode 100644
index 0000000..9063f88
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/event.h
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@File
+@Title          Event Object 
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout, IMG_BOOL bHoldBridgeLock);
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/handle_idr.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/handle_idr.c
new file mode 100644
index 0000000..5b5ecf6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/handle_idr.c
@@ -0,0 +1,445 @@
+/*************************************************************************/ /*!
+@File
+@Title		Resource Handle Manager - IDR Back-end
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provide IDR based resource handle management back-end
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#define ID_VALUE_MIN	1
+#define ID_VALUE_MAX	INT_MAX
+
+#define	ID_TO_HANDLE(i) ((IMG_HANDLE)(IMG_UINTPTR_T)(i))
+#define	HANDLE_TO_ID(h) ((IMG_INT)(IMG_UINTPTR_T)(h))
+
+struct _HANDLE_IMPL_BASE_
+{
+	struct idr sIdr;
+
+	IMG_UINT32 ui32MaxHandleValue;
+
+	IMG_UINT32 ui32TotalHandCount;
+};
+
+typedef struct _HANDLE_ITER_DATA_WRAPPER_
+{
+	PFN_HANDLE_ITER pfnHandleIter;
+	void *pvHandleIterData;
+} HANDLE_ITER_DATA_WRAPPER;
+
+
+static int HandleIterFuncWrapper(int id, void *data, void *iter_data)
+{
+	HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data;
+
+	PVR_UNREFERENCED_PARAMETER(data);
+
+	return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function	AcquireHandle
+
+ @Description	Acquire a new handle
+
+ @Input		psBase - Pointer to handle base structure
+		phHandle - Points to a handle pointer
+		pvData - Pointer to resource to be associated with the handle
+
+ @Output	phHandle - Points to a handle pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE *phHandle, 
+				  void *pvData)
+{
+	int id;
+	int result;
+
+	PVR_ASSERT(psBase != IMG_NULL);
+	PVR_ASSERT(phHandle != IMG_NULL);
+	PVR_ASSERT(pvData != IMG_NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	idr_preload(GFP_KERNEL);
+	id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0);
+	idr_preload_end();
+
+	result = id;
+#else
+	do
+	{
+		if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id);
+	} while (result == -EAGAIN);
+
+	if ((IMG_UINT32)id > psBase->ui32MaxHandleValue)
+	{
+		idr_remove(&psBase->sIdr, id);
+		result = -ENOSPC;
+	}
+#endif
+
+	if (result < 0)
+	{
+		if (result == -ENOSPC)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached", 
+				 __FUNCTION__, psBase->ui32MaxHandleValue));
+
+			return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+		}
+
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psBase->ui32TotalHandCount++;
+
+	*phHandle = ID_TO_HANDLE(id);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	ReleaseHandle
+
+ @Description	Release a handle that is no longer needed.
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle to release
+		ppvData - Points to a void data pointer
+
+ @Output	ppvData - Points to a void data pointer
+
+ @Return	PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE hHandle, 
+				  void **ppvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvData;
+
+	PVR_ASSERT(psBase);
+
+	/* Get the data associated with the handle. If we get back NULL then 
+	   it's an invalid handle */
+
+	pvData = idr_find(&psBase->sIdr, id);
+	if (pvData)
+	{
+		idr_remove(&psBase->sIdr, id);
+		psBase->ui32TotalHandCount--;
+	}
+
+	if (pvData == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)", 
+			 __FUNCTION__, id, psBase->ui32TotalHandCount));
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+
+	if (ppvData)
+	{
+		*ppvData = pvData;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	GetHandleData
+
+ @Description	Get the data associated with the given handle
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle from which data should be retrieved
+                ppvData - Points to a void data pointer
+
+ @Output	ppvData - Points to a void data pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE hHandle, 
+				  void **ppvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvData;
+
+	PVR_ASSERT(psBase);
+	PVR_ASSERT(ppvData);
+
+	pvData = idr_find(&psBase->sIdr, id);
+	if (pvData)
+	{
+		*ppvData = pvData;
+
+		return PVRSRV_OK;
+	}
+	else
+	{
+		return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+	}
+}
+
+/*!
+******************************************************************************
+
+ @Function	SetHandleData
+
+ @Description	Set the data associated with the given handle
+
+ @Input		psBase - Pointer to handle base structure
+		hHandle - Handle for which data should be changed
+		pvData - Pointer to new data to be associated with the handle
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, 
+				  IMG_HANDLE hHandle, 
+				  void *pvData)
+{
+	int id = HANDLE_TO_ID(hHandle);
+	void *pvOldData;
+
+	PVR_ASSERT(psBase);
+
+	pvOldData = idr_replace(&psBase->sIdr, pvData, id);
+	if (IS_ERR(pvOldData))
+	{
+		if (PTR_ERR(pvOldData) == -ENOENT)
+		{
+			return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+		}
+		else
+		{
+			return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData)
+{
+	HANDLE_ITER_DATA_WRAPPER sIterData;
+
+	PVR_ASSERT(psBase);
+	PVR_ASSERT(pfnHandleIter);
+
+	sIterData.pfnHandleIter = pfnHandleIter;
+	sIterData.pvHandleIterData = pvHandleIterData;
+
+	return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function	EnableHandlePurging
+
+ @Description	Enable purging for a given handle base
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+	PVR_ASSERT(psBase);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	PurgeHandles
+
+ @Description	Purge handles for a given handle base
+
+ @Input 	psBase - Pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psBase);
+	PVR_ASSERT(psBase);
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	CreateHandleBase
+
+ @Description	Create a handle base structure
+
+ @Input 	ppsBase - pointer to handle base structure pointer
+
+ @Output	ppsBase - points to handle base structure pointer
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase)
+{
+	HANDLE_IMPL_BASE *psBase;
+
+	PVR_ASSERT(ppsBase);
+
+	psBase = OSAllocZMem(sizeof(*psBase));
+	if (psBase == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", __FUNCTION__));
+
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	idr_init(&psBase->sIdr);
+
+	psBase->ui32MaxHandleValue = ID_VALUE_MAX;
+	psBase->ui32TotalHandCount = 0;
+
+	*ppsBase = psBase;
+
+	return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function	DestroyHandleBase
+
+ @Description	Destroy a handle base structure
+
+ @Input 	psBase - pointer to handle base structure
+
+ @Return	Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase)
+{
+	PVR_ASSERT(psBase);
+
+	if (psBase->ui32TotalHandCount != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: Handles still exist (%u found)", 
+			 __FUNCTION__, psBase->ui32TotalHandCount));
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+	idr_remove_all(&psBase->sIdr);
+#endif
+
+	/* Finally destroy the idr */
+	idr_destroy(&psBase->sIdr);
+
+	OSFreeMem(psBase);
+
+	return PVRSRV_OK;
+}
+
+
+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = 
+{
+	.pfnAcquireHandle = AcquireHandle,
+	.pfnReleaseHandle = ReleaseHandle,
+	.pfnGetHandleData = GetHandleData,
+	.pfnSetHandleData = SetHandleData,
+	.pfnIterateOverHandles = IterateOverHandles,
+	.pfnEnableHandlePurging = EnableHandlePurging,
+	.pfnPurgeHandles = PurgeHandles,
+	.pfnCreateHandleBase = CreateHandleBase,
+	.pfnDestroyHandleBase = DestroyHandleBase
+};
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs)
+{
+	static IMG_BOOL bAcquired = IMG_FALSE;
+
+	if (bAcquired)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", 
+			 __FUNCTION__));
+		return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+	}
+
+	if (ppsFuncs == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*ppsFuncs = &g_sHandleFuncTab;
+
+	bAcquired = IMG_TRUE;
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/linkage.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/linkage.h
new file mode 100644
index 0000000..ddbb255
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/linkage.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific Services code internal interfaces
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Interfaces between various parts of the Linux specific
+                Services code, that don't have any other obvious
+                header file to go into.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__LINKAGE_H__)
+#define __LINKAGE_H__
+
+/*
+ * FIXME: This is declared here to save creating a new header which should be
+ * removed soon anyway as bridge gen should be providing this interface.
+ */
+PVRSRV_ERROR LinuxBridgeInit(void);
+PVRSRV_ERROR LinuxBridgeDeInit(void);
+
+#if !defined(SUPPORT_DRM)
+long PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd, unsigned long arg);
+
+#if defined(CONFIG_COMPAT)
+long PVRSRV_BridgeCompatDispatchKM(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+#endif
+
+PVRSRV_ERROR PVROSFuncInit(void);
+void PVROSFuncDeInit(void);
+
+int PVRDebugCreateDebugFSEntries(void);
+void PVRDebugRemoveDebugFSEntries(void);
+
+int MMapPMR(struct file *file, struct vm_area_struct *ps_vma);
+
+#endif /* !defined(__LINKAGE_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mm.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mm.c
new file mode 100644
index 0000000..6bb2d29
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mm.c
@@ -0,0 +1,107 @@
+/*************************************************************************/ /*!
+@File
+@Title          Misc memory management utility functions for Linux
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "mm.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_server_utils.h"
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
+ #if defined(CONFIG_ARM)
+  #define ioremap_cache(x,y) ioremap_cached(x,y)
+ #endif
+#endif
+
+void *
+_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
+               IMG_UINT32 ui32Bytes,
+               IMG_UINT32 ui32MappingFlags,
+               IMG_CHAR *pszFileName,
+               IMG_UINT32 ui32Line)
+{
+	void *pvIORemapCookie;
+	IMG_UINT32 ui32CPUCacheMode = DevmemCPUCacheMode(ui32MappingFlags);
+
+	switch (ui32CPUCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				pvIORemapCookie = (void *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+				break;
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+				pvIORemapCookie = (void *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes);
+#else
+				pvIORemapCookie = (void *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+#endif
+				break;
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+				pvIORemapCookie = (void *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes);
+#else
+				pvIORemapCookie = (void *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+				break;
+		default:
+				return IMG_NULL;
+				break;
+	}
+
+    PVR_UNREFERENCED_PARAMETER(pszFileName);
+    PVR_UNREFERENCED_PARAMETER(ui32Line);
+
+    return pvIORemapCookie;
+}
+
+
+void
+_IOUnmapWrapper(void *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
+{
+    PVR_UNREFERENCED_PARAMETER(pszFileName);
+    PVR_UNREFERENCED_PARAMETER(ui32Line);
+
+    iounmap(pvIORemapCookie);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mm.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mm.h
new file mode 100644
index 0000000..e8a4447
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mm.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title          Declares various memory management utility functions for Linux.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __IMG_LINUX_MM_H__
+#define __IMG_LINUX_MM_H__
+
+#include <asm/io.h>
+/*!
+ *******************************************************************************
+ * @brief Reserve physical IO memory and create a CPU virtual mapping for it
+ *
+ * @param BasePAddr 
+ * @param ui32Bytes  
+ * @param ui32MappingFlags  
+ *
+ * @return 
+ ******************************************************************************/
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
+	_IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
+#else
+#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
+	_IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
+#endif
+void *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
+					  IMG_UINT32 ui32Bytes,
+					  IMG_UINT32 ui32MappingFlags,
+					  IMG_CHAR *pszFileName,
+					  IMG_UINT32 ui32Line);
+
+/*!
+ ******************************************************************************
+ * @brief Unmaps an IO memory mapping created using IORemap
+ *
+ * @param pvIORemapCookie  
+ *
+ * @return 
+ ******************************************************************************/
+#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
+#define IOUnmapWrapper(pvIORemapCookie) \
+	_IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
+#else
+#define IOUnmapWrapper(pvIORemapCookie) \
+	_IOUnmapWrapper(pvIORemapCookie, NULL, 0)
+#endif
+void _IOUnmapWrapper(void *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
+
+#endif /* __IMG_LINUX_MM_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mmap.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mmap.c
new file mode 100644
index 0000000..2c3b3d2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/mmap.c
@@ -0,0 +1,482 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux mmap interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "linkage.h"
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "devicemem_server_utils.h"
+#include "allocmem.h"
+
+#include "private_data.h"
+#include "driverlock.h"
+
+#if defined(SUPPORT_DRM)
+#include "pvr_drm.h"
+#endif
+
+/* WARNING!
+ * The mmap code has its own mutex, to prevent a possible deadlock,
+ * when using gPVRSRVLock.
+ * The Linux kernel takes the mm->mmap_sem before calling the mmap
+ * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
+ * entry point may take mm->mmap_sem during fault handling, or 
+ * before calling get_user_pages.  If gPVRSRVLock was used in the
+ * mmap entry points, a deadlock could result, due to the ioctl
+ * and mmap code taking the two locks in different orders.
+ * As a corollary to this, the mmap entry points must not call
+ * any driver code that relies on gPVRSRVLock is held.
+ */
+static DEFINE_MUTEX(g_sMMapMutex);
+
+#include "pmr.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+static void MMapPMROpen(struct vm_area_struct* ps_vma)
+{
+	/* Our VM flags should ensure this function never gets called */
+	PVR_ASSERT(0);
+}
+
+static void MMapPMRClose(struct vm_area_struct *ps_vma)
+{
+    PMR *psPMR;
+    IMG_UINTPTR_T  vAddr = ps_vma->vm_start;
+    IMG_SIZE_T pageSize = OSGetPageSize();
+
+    psPMR = ps_vma->vm_private_data;
+
+    while (vAddr < ps_vma->vm_end)
+    {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+    /* USER MAPPING */
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+    PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT64)vAddr);
+#endif
+#endif
+    	vAddr += pageSize;
+    }
+
+    PMRUnlockSysPhysAddresses(psPMR);
+    PMRUnrefPMR(psPMR);
+}
+
+/*
+ * This vma operation is used to read data from mmap regions. It is called
+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
+ * requests and reads from /proc/<pid>/mem.
+ */
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+					   void *buf, int len, int write)
+{
+    PMR *psPMR;
+    unsigned long ulOffset;
+    IMG_SIZE_T uiBytesCopied;
+    PVRSRV_ERROR eError;
+    int iRetVal = -EINVAL;
+
+    psPMR = ps_vma->vm_private_data;
+
+    ulOffset = addr - ps_vma->vm_start;
+
+	if (write)
+	{
+		eError = PMR_WriteBytes(psPMR,
+								(IMG_DEVMEM_OFFSET_T) ulOffset,
+								buf,
+								len,
+								&uiBytesCopied);
+	}
+	else
+	{
+		eError = PMR_ReadBytes(psPMR,
+							   (IMG_DEVMEM_OFFSET_T) ulOffset,
+							   buf,
+							   len,
+							   &uiBytesCopied);
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
+				 __FUNCTION__,
+				 write?"PMR_WriteBytes":"PMR_ReadBytes",
+				 eError));
+	}
+	else
+	{
+		iRetVal = uiBytesCopied;
+	}
+
+	return iRetVal;
+}
+
+static struct vm_operations_struct gsMMapOps =
+{
+	.open=&MMapPMROpen,
+	.close=&MMapPMRClose,
+	.access=MMapVAccess,
+};
+
+int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+	PVRSRV_ERROR eError;
+	IMG_HANDLE hSecurePMRHandle;
+	IMG_SIZE_T uiLength;
+	IMG_DEVMEM_OFFSET_T uiOffset;
+	unsigned long uiPFN;
+	PMR *psPMR;
+	PMR_FLAGS_T ulPMRFlags;
+	IMG_UINT32 ui32CPUCacheFlags;
+	unsigned long ulNewFlags = 0;
+	pgprot_t sPageProt;
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+	IMG_BOOL bMixedMap = IMG_FALSE;
+    IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+    IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+	IMG_UINT32 uiOffsetIdx, uiNumOfPFNs;
+	IMG_CPU_PHYADDR *psCpuPAddr;
+	IMG_BOOL *pbValid;
+
+	if(psConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+		goto em0;
+	}
+
+	/*
+	 * The bridge lock used here to protect Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr
+	 * is replaced by a specific lock considering that the handle functions have now their own lock
+	 * and ResManFindPrivateDataByPtr is going to be removed.
+	 *  This change was necessary to solve the lockdep issues related with the MMapPMR
+	 */
+	mutex_lock(&g_sMMapMutex);
+	PMRLock();
+
+#if defined(SUPPORT_DRM_DC_MODULE)
+	psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma);
+	if (!psPMR)
+#endif
+	{
+		hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);
+
+		eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+					    (void **)&psPMR,
+					    hSecurePMRHandle,
+					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+		if (eError != PVRSRV_OK)
+		{
+			goto e0;
+		}
+	}
+
+	/*
+	 * Take a reference on the PMR, make's sure that it can't be freed
+	 * while it's mapped into the user process
+	 */
+	PMRRefPMR(psPMR);
+
+	PMRUnlock();
+
+	eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+	    ((ps_vma->vm_flags & VM_SHARED) == 0))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e1;
+	}
+
+	/*
+	 * We ought to call PMR_Flags() here to check the permissions
+	 * against the requested mode, and possibly to set up the cache
+	 * control protflags
+	 */
+	eError = PMR_Flags(psPMR, &ulPMRFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+	ulNewFlags = ps_vma->vm_flags;
+#if 0
+	/* Discard user read/write request, we will pull these flags from the PMR */
+	ulNewFlags &= ~(VM_READ | VM_WRITE);
+
+	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
+	{
+		ulNewFlags |= VM_READ;
+	}
+	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
+	{
+		ulNewFlags |= VM_WRITE;
+	}
+#endif
+
+	ps_vma->vm_flags = ulNewFlags;
+
+#if defined (CONFIG_ARM64)
+	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags));
+#elif defined(CONFIG_ARM)
+	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
+#elif defined(CONFIG_X86)
+	sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags));
+#elif defined(CONFIG_METAG) || defined(CONFIG_MIPS)
+	sPageProt = vm_get_page_prot(ulNewFlags);
+#else
+#error Please add pgprot_modify equivalent for your system
+#endif
+	ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
+	switch (ui32CPUCacheFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				sPageProt = pgprot_noncached(sPageProt);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				sPageProt = pgprot_writecombine(sPageProt);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+				break;
+
+		default:
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e1;
+	}
+	ps_vma->vm_page_prot = sPageProt;
+
+    uiLength = ps_vma->vm_end - ps_vma->vm_start;
+
+    ps_vma->vm_flags |= VM_IO;
+
+/* Don't include the mapping in core dumps */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+    ps_vma->vm_flags |= VM_DONTDUMP;
+#else
+    ps_vma->vm_flags |= VM_RESERVED;
+#endif
+
+    /*
+     * Disable mremap because our nopage handler assumes all
+     * page requests have already been validated.
+     */
+    ps_vma->vm_flags |= VM_DONTEXPAND;
+    
+    /* Don't allow mapping to be inherited across a process fork */
+    ps_vma->vm_flags |= VM_DONTCOPY;
+
+    /* Can we use stack allocations */
+    uiNumOfPFNs = uiLength >> PAGE_SHIFT;
+    if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
+    {
+    	psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(IMG_CPU_PHYADDR));
+    	if (psCpuPAddr == IMG_NULL)
+    	{
+    		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+    		goto e2;
+    	}
+    	
+    	/* Should allocation fail, clean-up here before exiting */
+    	pbValid = OSAllocMem(uiNumOfPFNs * sizeof(IMG_BOOL));
+    	if (pbValid == IMG_NULL)
+    	{
+    		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+    		OSFreeMem(psCpuPAddr);
+    		goto e2;
+    	}
+    }
+    else
+    {
+		psCpuPAddr = asCpuPAddr;
+		pbValid = abValid;
+    }
+    
+    /* Obtain map range pfns */
+	eError = PMR_CpuPhysAddr(psPMR,
+							 PAGE_SHIFT,
+							 uiNumOfPFNs,
+							 0,
+							 psCpuPAddr,
+							 pbValid);
+	if (eError)
+	{
+		goto e3;
+	}
+
+	/* Scan the map range for pfns without struct page* handling. If we find
+	   one, this is a mixed map, and we can't use vm_insert_page() */
+	for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
+	{
+		if (pbValid[uiOffsetIdx])
+		{
+			uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+			PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+			if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
+			{
+				bMixedMap = IMG_TRUE;
+			}
+		}
+	}
+
+	if (bMixedMap)
+	{
+	    ps_vma->vm_flags |= VM_MIXEDMAP;
+	}
+
+    for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
+    {
+        IMG_SIZE_T uiNumContiguousBytes;
+        IMG_INT32 iStatus;
+
+        uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
+        uiOffsetIdx = uiOffset >> PAGE_SHIFT;
+
+		/*
+			Only map in pages that are valid, any that aren't will be picked up
+			by the nopage handler which will return a zeroed page for us
+		*/
+		if (pbValid[uiOffsetIdx])
+		{
+	        uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+	        PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+			if (bMixedMap)
+			{
+				/* This path is just for debugging. It should be equivalent
+				 * to the remap_pfn_range() path.
+				 */
+				iStatus = vm_insert_mixed(ps_vma,
+										  ps_vma->vm_start + uiOffset,
+										  uiPFN);
+			}
+			else
+			{
+				/* Since kernel 3.7 this sets VM_MIXEDMAP internally */
+				iStatus = vm_insert_page(ps_vma,
+										 ps_vma->vm_start + uiOffset,
+										 pfn_to_page(uiPFN));
+			}
+
+	        PVR_ASSERT(iStatus == 0);
+	        if(iStatus)
+	        {
+	            // N.B. not the right error code, but, it doesn't get propagated anyway... :(
+	            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+	
+	            goto e3;
+	        }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+    /* USER MAPPING*/
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	    PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
+#else
+    	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+    			 	 	 	 	 (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset),
+    			 	 	 	 	 psCpuPAddr[uiOffsetIdx],
+								 PAGE_SIZE,
+								 IMG_NULL);
+#endif
+#endif
+
+		}
+        (void)pFile;
+    }
+    
+    if (psCpuPAddr != asCpuPAddr)
+    {
+    	OSFreeMem(psCpuPAddr);
+    	OSFreeMem(pbValid);
+    }
+
+    /* let us see the PMR so we can unlock it later */
+    ps_vma->vm_private_data = psPMR;
+
+    /* Install open and close handlers for ref-counting */
+    ps_vma->vm_ops = &gsMMapOps;
+
+	mutex_unlock(&g_sMMapMutex);
+
+    return 0;
+
+    /*
+      error exit paths follow
+    */
+ e3:
+	if (psCpuPAddr != asCpuPAddr)
+	{
+		OSFreeMem(psCpuPAddr);
+		OSFreeMem(pbValid);
+	}
+ e2:
+    PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
+    PMRUnlockSysPhysAddresses(psPMR);
+ e1:
+	PMRUnrefPMR(psPMR);
+	goto em1;
+ e0:
+    PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section"));
+	PMRUnlock();
+ em1:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
+	mutex_unlock(&g_sMMapMutex);
+ em0:
+    return -ENOENT; // -EAGAIN // or what?
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/module.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/module.c
new file mode 100644
index 0000000..8c3b5b8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/module.c
@@ -0,0 +1,1085 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux module setup
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#if (!defined(LDM_PLATFORM) && !defined(LDM_PCI)) || \
+	(defined(LDM_PLATFORM) && defined(LDM_PCI))
+	#error "LDM_PLATFORM or LDM_PCI must be defined"
+#endif
+
+#if defined(SUPPORT_DRM)
+#define	PVR_MOD_STATIC
+#else
+#define	PVR_MOD_STATIC	static
+#endif
+
+#if defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+#define PVR_USE_PRE_REGISTERED_PLATFORM_DEV
+#endif
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+#if defined(SUPPORT_DRM)
+#include <drm/drmP.h>
+#endif
+
+#if defined(LDM_PLATFORM)
+#include <linux/platform_device.h>
+#endif
+
+#if defined(LDM_PCI)
+#include <linux/pci.h>
+#endif
+
+#include <linux/device.h>
+
+#include "img_defs.h"
+#include "mm.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "connection_server.h"
+#include "handle.h"
+#include "pvr_debugfs.h"
+#include "pvrmodule.h"
+#include "private_data.h"
+#include "driverlock.h"
+#include "linkage.h"
+#include "power.h"
+#include "env_connection.h"
+#include "sysinfo.h"
+#include "pvrsrv.h"
+#include "process_stats.h"
+
+#if defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING) || defined(SUPPORT_DRM)
+#include "syscommon.h"
+#endif
+
+#if defined(SUPPORT_DRM_EXT)
+#include "pvr_drm_ext.h"
+#endif
+#if defined(SUPPORT_DRM)
+#include "pvr_drm.h"
+#endif
+#if defined(SUPPORT_AUTH)
+#include "osauth.h"
+#endif
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+#if defined(SUPPORT_KERNEL_HWPERF) || defined(SUPPORT_SHARED_SLC)
+#include "rgxapi_km.h"
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+#include "srvinit.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "kerneldisplay.h"
+#endif
+
+/*
+ * DRVNAME is the name we use to register our driver.
+ * DEVNAME is the name we use to register actual device nodes.
+ */
+#define	DRVNAME		PVR_LDM_DRIVER_REGISTRATION_NAME
+#define DEVNAME		PVRSRV_MODNAME
+
+#if defined(SUPPORT_DRM)
+#define PRIVATE_DATA(pFile) (PVR_DRM_FILE_FROM_FILE(pFile)->driver_priv)
+#else
+#define PRIVATE_DATA(pFile) ((pFile)->private_data)
+#endif
+
+/*
+ * This is all module configuration stuff required by the linux kernel.
+ */
+MODULE_SUPPORTED_DEVICE(DEVNAME);
+
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DEBUG)
+#include <linux/moduleparam.h>
+#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(DEBUG) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+extern IMG_UINT32 gPVRDebugLevel;
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel, "Sets the level of debug output (default 0x7)");
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+#if defined(DEBUG)
+extern IMG_UINT32 gPMRAllocFail;
+module_param(gPMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches"
+        " this value, it will fail (default value is 0 which"
+        "means that alloc function will behave normally).");
+#endif /* defined(DEBUG) */
+
+/*
+ * Newer kernels no longer support __devinitdata, __devinit, __devexit, or
+ * __devexit_p.
+ */
+#if !defined(__devinitdata)
+#define __devinitdata
+#endif
+#if !defined(__devinit)
+#define __devinit
+#endif
+#if !defined(__devexit)
+#define __devexit
+#endif
+#if !defined(__devexit_p)
+#define __devexit_p(x) (&(x))
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+/* Display class interface */
+EXPORT_SYMBOL(DCRegisterDevice);
+EXPORT_SYMBOL(DCUnregisterDevice);
+EXPORT_SYMBOL(DCDisplayConfigurationRetired);
+EXPORT_SYMBOL(DCDisplayHasPendingCommand);
+EXPORT_SYMBOL(DCImportBufferAcquire);
+EXPORT_SYMBOL(DCImportBufferRelease);
+#endif
+
+/* Physmem interface (required by LMA DC drivers) */
+EXPORT_SYMBOL(PhysHeapAcquire);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapGetAddress);
+EXPORT_SYMBOL(PhysHeapGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
+
+/* System interface (required by DC drivers) */
+#if defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING) && !defined(SUPPORT_DRM)
+EXPORT_SYMBOL(SysInstallDeviceLISR);
+EXPORT_SYMBOL(SysUninstallDeviceLISR);
+#endif
+
+EXPORT_SYMBOL(PVRSRVCheckStatus);
+EXPORT_SYMBOL(PVRSRVGetErrorStringKM);
+
+#if defined(SUPPORT_KERNEL_HWPERF)
+EXPORT_SYMBOL(RGXHWPerfConnect);
+EXPORT_SYMBOL(RGXHWPerfDisconnect);
+EXPORT_SYMBOL(RGXHWPerfControl);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters);
+EXPORT_SYMBOL(RGXHWPerfDisableCounters);
+EXPORT_SYMBOL(RGXHWPerfAcquireData);
+EXPORT_SYMBOL(RGXHWPerfReleaseData);
+#endif
+
+#if defined(SUPPORT_SHARED_SLC)
+EXPORT_SYMBOL(RGXInitSLC);
+#endif
+
+#if !defined(SUPPORT_DRM)
+struct device *psDev;
+
+/*
+ * Device class used for /sys entries (and udev device node creation)
+ */
+static struct class *psPvrClass;
+
+/*
+ * This is the major number we use for all nodes in /dev.
+ */
+static int AssignedMajorNumber;
+
+/*
+ * These are the operations that will be associated with the device node
+ * we create.
+ *
+ * With gcc -W, specifying only the non-null members produces "missing
+ * initializer" warnings.
+*/
+static int PVRSRVOpen(struct inode* pInode, struct file* pFile);
+static int PVRSRVRelease(struct inode* pInode, struct file* pFile);
+
+static struct file_operations pvrsrv_fops =
+{
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= PVRSRV_BridgeDispatchKM,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl	= PVRSRV_BridgeCompatDispatchKM,
+#endif
+	.open		= PVRSRVOpen,
+	.release	= PVRSRVRelease,
+	.mmap		= MMapPMR,
+};
+#endif	/* !defined(SUPPORT_DRM) */
+
+DEFINE_MUTEX(gPVRSRVLock);
+
+#if defined(LDM_PLATFORM)
+#define	LDM_DEV	struct platform_device
+#define	LDM_DRV	struct platform_driver
+#define TO_LDM_DEV(d) to_platform_device(d)
+#endif /*LDM_PLATFORM */
+
+#if defined(LDM_PCI)
+#define	LDM_DEV	struct pci_dev
+#define	LDM_DRV	struct pci_driver
+#define TO_LDM_DEV(d) to_pci_device(d)
+#endif /* LDM_PCI */
+
+#if defined(LDM_PLATFORM)
+static int PVRSRVDriverRemove(LDM_DEV *device);
+static int PVRSRVDriverProbe(LDM_DEV *device);
+#endif
+
+#if defined(LDM_PCI)
+static void PVRSRVDriverRemove(LDM_DEV *device);
+static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
+#endif
+
+static void PVRSRVDriverShutdown(LDM_DEV *device);
+static int PVRSRVDriverSuspend(struct device *device);
+static int PVRSRVDriverResume(struct device *device);
+
+#if defined(LDM_PCI)
+/* This structure is used by the Linux module code */
+struct pci_device_id powervr_id_table[] __devinitdata = {
+	{PCI_DEVICE(SYS_RGX_DEV_VENDOR_ID, SYS_RGX_DEV_DEVICE_ID)},
+#if defined (SYS_RGX_DEV1_DEVICE_ID)
+	{PCI_DEVICE(SYS_RGX_DEV_VENDOR_ID, SYS_RGX_DEV1_DEVICE_ID)},
+#endif
+	{0}
+};
+#if !defined(SUPPORT_DRM_EXT)
+MODULE_DEVICE_TABLE(pci, powervr_id_table);
+#endif
+#endif /*defined(LDM_PCI) */
+
+#if defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+static struct platform_device_id powervr_id_table[] __devinitdata = {
+	{SYS_RGX_DEV_NAME, 0},
+	{}
+};
+#endif
+
+static struct dev_pm_ops powervr_dev_pm_ops = {
+	.suspend	= PVRSRVDriverSuspend,
+	.resume		= PVRSRVDriverResume,
+};
+
+static LDM_DRV powervr_driver = {
+#if defined(LDM_PLATFORM)
+	.driver = {
+		.name	= DRVNAME,
+		.pm	= &powervr_dev_pm_ops,
+	},
+#endif
+#if defined(LDM_PCI)
+	.name		= DRVNAME,
+	.driver.pm	= &powervr_dev_pm_ops,
+#endif
+#if defined(LDM_PCI) || defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+	.id_table	= powervr_id_table,
+#endif
+	.probe		= PVRSRVDriverProbe,
+#if defined(LDM_PLATFORM)
+	.remove		= PVRSRVDriverRemove,
+#endif
+#if defined(LDM_PCI)
+	.remove		= __devexit_p(PVRSRVDriverRemove),
+#endif
+	.shutdown	= PVRSRVDriverShutdown,
+};
+
+#if defined(SUPPORT_DRM_EXT)
+extern LDM_DEV *gpsPVRLDMDev;
+#else
+LDM_DEV *gpsPVRLDMDev;
+#endif
+
+#if defined(LDM_PLATFORM)
+#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
+static void PVRSRVDeviceRelease(struct device unref__ *pDevice)
+{
+}
+
+static struct platform_device powervr_device =
+{
+	.name			= DEVNAME,
+	.id			= -1,
+	.dev 			= {
+		.release	= PVRSRVDeviceRelease
+	}
+};
+#else
+static struct platform_device_info powervr_device_info =
+{
+	.name			= DEVNAME,
+	.id			= -1,
+	.dma_mask		= DMA_BIT_MASK(32),
+};
+#endif	/* (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) */
+#endif	/* defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) */
+#endif	/* defined(LDM_PLATFORM) */
+
+static IMG_BOOL bCalledSysInit = IMG_FALSE;
+static IMG_BOOL	bDriverProbeSucceeded = IMG_FALSE;
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVSystemInit
+
+ @Description
+
+ Wrapper for PVRSRVInit.
+
+ @input pDevice - the device for which a probe is requested
+
+ @Return 0 for success or <0 for an error.
+
+*****************************************************************************/
+#if defined(SUPPORT_DRM)
+int PVRSRVSystemInit(struct drm_device *pDrmDevice)
+#else
+static int PVRSRVSystemInit(LDM_DEV *pDevice)
+#endif
+{
+#if defined(SUPPORT_DRM)
+#if defined(LDM_PLATFORM)
+	LDM_DEV *pDevice = pDrmDevice->platformdev;
+#elif defined(LDM_PCI)
+	LDM_DEV *pDevice = pDrmDevice->pdev;
+#endif
+#endif
+
+	PVR_TRACE(("PVRSRVSystemInit (pDevice=%p)", pDevice));
+
+#if defined(SUPPORT_DRM)
+	/* PVRSRVInit is only designed to be called once */
+	if (bCalledSysInit == IMG_FALSE)
+#endif
+	{
+		gpsPVRLDMDev = pDevice;
+		bCalledSysInit = IMG_TRUE;
+
+		if (PVRSRVInit(pDevice) != PVRSRV_OK)
+		{
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVSystemDeInit
+
+ @Description
+
+ Wrapper for PVRSRVDeInit.
+
+ @input pDevice - the device for which a probe is requested
+ @Return nothing.
+
+*****************************************************************************/
+PVR_MOD_STATIC void PVRSRVSystemDeInit(LDM_DEV *pDevice)
+{
+	PVR_TRACE(("PVRSRVSystemDeInit"));
+
+	PVRSRVDeInit(pDevice);
+
+#if !defined(LDM_PLATFORM) || (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
+	gpsPVRLDMDev = IMG_NULL;
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVDriverProbe
+
+ @Description
+
+ See whether a given device is really one we can drive.
+
+ @input pDevice - the device for which a probe is requested
+
+ @Return 0 for success or <0 for an error.
+
+*****************************************************************************/
+#if defined(LDM_PLATFORM)
+static int PVRSRVDriverProbe(LDM_DEV *pDevice)
+#endif
+#if defined(LDM_PCI)
+static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *pID)
+#endif
+{
+	int result = 0;
+
+	PVR_TRACE(("PVRSRVDriverProbe (pDevice=%p)", pDevice));
+
+#if defined(SUPPORT_DRM)
+#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRM_EXT)
+	result = drm_platform_init(&sPVRDRMDriver, pDevice);
+#endif
+#if defined(LDM_PCI) && !defined(SUPPORT_DRM_EXT)
+	result = drm_get_pci_dev(pDevice, pID, &sPVRDRMDriver);
+#endif
+#else	/* defined(SUPPORT_DRM) */
+	result = PVRSRVSystemInit(pDevice);
+#endif	/* defined(SUPPORT_DRM) */
+	bDriverProbeSucceeded = (result == 0);
+	return result;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVDriverRemove
+
+ @Description
+
+ This call is the opposite of the probe call; it is called when the device is
+ being removed from the driver's control.
+
+ @input pDevice - the device for which driver detachment is happening
+
+ @Return 0, or no return value at all, depending on the device type.
+
+*****************************************************************************/
+#if defined (LDM_PLATFORM)
+static int PVRSRVDriverRemove(LDM_DEV *pDevice)
+#endif
+#if defined(LDM_PCI)
+static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
+#endif
+{
+	PVR_TRACE(("PVRSRVDriverRemove (pDevice=%p)", pDevice));
+
+#if defined(SUPPORT_DRM) && !defined(SUPPORT_DRM_EXT)
+#if defined(LDM_PLATFORM)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
+	drm_platform_exit(&sPVRDRMDriver, pDevice);
+#else
+	drm_put_dev(platform_get_drvdata(pDevice));
+#endif
+#endif	/* defined(LDM_PLATFORM) */
+#if defined(LDM_PCI)
+	drm_put_dev(pci_get_drvdata(pDevice));
+#endif
+#else	/* defined(SUPPORT_DRM) */
+	PVRSRVSystemDeInit(pDevice);
+#endif	/* defined(SUPPORT_DRM) */
+#if defined(LDM_PLATFORM)
+	return 0;
+#endif
+}
+
+static DEFINE_MUTEX(gsPMMutex);
+static IMG_BOOL bDriverIsSuspended;
+static IMG_BOOL bDriverIsShutdown;
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVDriverShutdown
+
+ @Description
+
+ Suspend device operation for system shutdown.  This is called as part of the
+ system halt/reboot process.  The driver is put into a quiescent state by 
+ setting the power state to D3.
+
+ @input pDevice - the device for which shutdown is requested
+
+ @Return nothing
+
+*****************************************************************************/
+static void PVRSRVDriverShutdown(LDM_DEV *pDevice)
+{
+	PVR_TRACE(("PVRSRVDriverShutdown (pDevice=%p)", pDevice));
+
+	mutex_lock(&gsPMMutex);
+
+	if (!bDriverIsShutdown && !bDriverIsSuspended)
+	{
+		/*
+		 * Take the bridge mutex, and never release it, to stop
+		 * processes trying to use the driver after it has been
+		 * shutdown.
+		 */
+		OSAcquireBridgeLock();
+
+		(void) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_OFF, IMG_TRUE);
+	}
+
+	bDriverIsShutdown = IMG_TRUE;
+
+	/* The bridge mutex is held on exit */
+	mutex_unlock(&gsPMMutex);
+}
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVDriverSuspend
+
+ @Description
+
+ Suspend device operation.
+
+ @input pDevice - the device for which resume is requested
+
+ @Return 0 for success or <0 for an error.
+
+*****************************************************************************/
+static int PVRSRVDriverSuspend(struct device *pDevice)
+{
+	int res = 0;
+
+	PVR_TRACE(( "PVRSRVDriverSuspend (pDevice=%p)", pDevice));
+
+	mutex_lock(&gsPMMutex);
+
+	if (!bDriverIsSuspended && !bDriverIsShutdown)
+	{
+		OSAcquireBridgeLock();
+
+		if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_OFF, IMG_TRUE) == PVRSRV_OK)
+		{
+			/* The bridge mutex will be held until we resume */
+			bDriverIsSuspended = IMG_TRUE;
+		}
+		else
+		{
+			OSReleaseBridgeLock();
+			res = -EINVAL;
+		}
+	}
+
+	mutex_unlock(&gsPMMutex);
+
+	return res;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVDriverResume
+
+ @Description
+
+ Resume device operation.
+
+ @input pDevice - the device for which resume is requested
+
+ @Return 0 for success or <0 for an error.
+
+*****************************************************************************/
+static int PVRSRVDriverResume(struct device *pDevice)
+{
+	int res = 0;
+
+	PVR_TRACE(("PVRSRVDriverResume (pDevice=%p)", pDevice));
+
+	mutex_lock(&gsPMMutex);
+
+	if (bDriverIsSuspended && !bDriverIsShutdown)
+	{
+		if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_ON, IMG_TRUE) == PVRSRV_OK)
+		{
+			bDriverIsSuspended = IMG_FALSE;
+			OSReleaseBridgeLock();
+		}
+		else
+		{
+			/* The bridge mutex is not released on failure */
+			res = -EINVAL;
+		}
+	}
+
+	mutex_unlock(&gsPMMutex);
+
+	return res;
+}
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVOpen
+
+ @Description
+
+ Open the PVR services node.
+
+ @input pInode - the inode for the file being openeded.
+ @input dev    - the DRM device corresponding to this driver.
+
+ @input pFile - the file handle data for the actual file being opened
+
+ @Return 0 for success or <0 for an error.
+
+*****************************************************************************/
+#if defined(SUPPORT_DRM)
+int PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pDRMFile)
+#else
+static int PVRSRVOpen(struct inode unref__ *pInode, struct file *pFile)
+#endif
+{
+#if defined(SUPPORT_DRM)
+	struct file *pFile = PVR_FILE_FROM_DRM_FILE(pDRMFile);
+#endif
+	void *pvConnectionData;
+	PVRSRV_ERROR eError;
+
+	if (!try_module_get(THIS_MODULE))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to get module"));
+		return -ENOENT;
+	}
+
+	OSAcquireBridgeLock();
+
+	/*
+	 * Here we pass the file pointer which will passed through to our
+	 * OSConnectionPrivateDataInit function where we can save it so
+	 * we can back reference the file structure from it's connection
+	 */
+	eError = PVRSRVConnectionConnect(&pvConnectionData, (IMG_PVOID) pFile);
+	if (eError != PVRSRV_OK)
+	{
+		OSReleaseBridgeLock();
+		module_put(THIS_MODULE);
+
+		return -ENOMEM;
+	}
+
+	PRIVATE_DATA(pFile) = pvConnectionData;
+	OSReleaseBridgeLock();
+
+	return 0;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function		PVRSRVRelease
+
+ @Description
+
+ Release access the PVR services node - called when a file is closed, whether
+ at exit or using close(2) system call.
+
+ @input pInode - the inode for the file being released
+ @input pvPrivData - driver private data
+
+ @input pFile - the file handle data for the actual file being released
+
+ @Return 0 for success or <0 for an error.
+
+*****************************************************************************/
+#if defined(SUPPORT_DRM)
+void PVRSRVRelease(struct drm_device unref__ *dev, struct drm_file *pDRMFile)
+#else
+static int PVRSRVRelease(struct inode unref__ *pInode, struct file *pFile)
+#endif
+{
+#if defined(SUPPORT_DRM)
+	struct file *pFile = PVR_FILE_FROM_DRM_FILE(pDRMFile);
+#endif
+	void *pvConnectionData;
+
+	OSAcquireBridgeLock();
+
+	pvConnectionData = PRIVATE_DATA(pFile);
+	if (pvConnectionData)
+	{
+		PVRSRVConnectionDisconnect(pvConnectionData);
+		PRIVATE_DATA(pFile) = NULL;
+	}
+
+	OSReleaseBridgeLock();
+	module_put(THIS_MODULE);
+
+#if !defined(SUPPORT_DRM)
+	return 0;
+#endif
+}
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile)
+{
+	return (pFile)? PRIVATE_DATA(pFile): IMG_NULL;
+}
+
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+
+	psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+	PVR_ASSERT(psEnvConnection != NULL);
+	
+	return psEnvConnection->psFile;
+}
+
+#if defined(SUPPORT_AUTH)
+PVRSRV_ERROR OSCheckAuthentication(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Level)
+{
+	if (ui32Level != 0)
+	{
+		ENV_CONNECTION_DATA *psEnvConnection;
+
+		psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+		if (psEnvConnection == IMG_NULL)
+		{
+			return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		}
+
+		if (!PVR_DRM_FILE_FROM_FILE(psEnvConnection->psFile)->authenticated)
+		{
+			PVR_DPF((PVR_DBG_WARNING, "%s: PVR Services Connection not authenticated", __FUNCTION__));
+			return PVRSRV_ERROR_NOT_AUTHENTICATED;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_AUTH) */
+
+/*!
+******************************************************************************
+
+ @Function		PVRCore_Init
+
+ @Description
+
+ Insert the driver into the kernel.
+
+ Readable and/or writable debugfs entries under /sys/kernel/debug/pvr are
+ created with PVRDebugFSCreateEntry().  These can be read at runtime to get
+ information about the device (eg. 'cat /sys/kernel/debug/pvr/nodes')
+
+ __init places the function in a special memory section that the kernel frees
+ once the function has been run.  Refer also to module_init() macro call below.
+
+ @input none
+
+ @Return none
+
+*****************************************************************************/
+#if defined(SUPPORT_DRM_EXT)
+int PVRCore_Init(void)
+#else
+static int __init PVRCore_Init(void)
+#endif
+{
+	PVRSRV_ERROR eError;
+	int error = 0;
+
+	PVR_TRACE(("PVRCore_Init"));
+
+#if defined(SUPPORT_DRM) && defined(PDUMP)
+	error = dbgdrv_init();
+	if (error != 0)
+	{
+		return error;
+	}
+#endif
+
+	error = PVRDebugFSInit();
+	if (error != 0)
+	{
+		return error;
+	}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	eError = PVRSRVStatsInitialise();
+	if (eError != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+#endif
+
+	eError = PVROSFuncInit();
+	if (eError != PVRSRV_OK)
+	{
+		return -ENOMEM;
+	}
+
+	LinuxBridgeInit();
+
+#if defined(LDM_PLATFORM)
+	error = platform_driver_register(&powervr_driver);
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
+		return error;
+	}
+
+#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
+	error = platform_device_register(&powervr_device);
+#else
+	gpsPVRLDMDev = platform_device_register_full(&powervr_device_info);
+	error = IS_ERR(gpsPVRLDMDev) ? PTR_ERR(gpsPVRLDMDev) : 0;
+#endif
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
+		return error;
+	}
+#endif /* defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) */
+#endif /* defined(LDM_PLATFORM) */ 
+
+#if defined(LDM_PCI) && !defined(SUPPORT_DRM_EXT)
+#if defined(SUPPORT_DRM)
+	error = drm_pci_init(&sPVRDRMDriver, &powervr_driver);
+#else
+	error = pci_register_driver(&powervr_driver);
+#endif
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
+		return error;
+	}
+#endif /* defined(LDM_PCI) && !defined(SUPPORT_DRM_EXT) */
+
+	/* Check that the driver probe function was called */
+#if defined(LDM_PCI) && defined(SUPPORT_DRM_EXT)
+	if (!bDriverProbeSucceeded)
+	{
+		error = PVRSRVInit(gpsPVRLDMDev);
+		if (error != 0)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemInit: unable to init PVR service (%d)", error));
+			return error;
+		}
+		bDriverProbeSucceeded = IMG_TRUE;
+	}
+#endif /* defined(SUPPORT_DRM) */
+
+	if (!bDriverProbeSucceeded)
+	{
+		PVR_TRACE(("PVRCore_Init: PVRSRVDriverProbe has not been called or did not succeed - check that hardware is detected"));
+		return error;
+	}
+
+#if !defined(SUPPORT_DRM)
+	AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
+	if (AssignedMajorNumber <= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
+		return -EBUSY;
+	}
+
+	PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
+
+	/*
+	 * This code facilitates automatic device node creation on platforms
+	 * with udev (or similar).
+	 */
+	psPvrClass = class_create(THIS_MODULE, "pvr");
+	if (IS_ERR(psPvrClass))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass)));
+		return -EBUSY;
+	}
+
+	psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
+				  NULL, DEVNAME);
+	if (IS_ERR(psDev))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev)));
+		return -EBUSY;
+	}
+#endif /* !defined(SUPPORT_DRM) */
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	eError = pvr_sync_init();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create sync (%d)", eError));
+		return -EBUSY;
+	}
+#endif
+
+	error = PVRDebugCreateDebugFSEntries();
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRCore_Init: failed to create default debugfs entries (%d)", error));
+	}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	error = PVRGpuTraceInit();
+	if (error != 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "PVRCore_Init: failed to initialise PVR GPU Tracing (%d)", error));
+	}
+#endif
+
+#if defined(SUPPORT_KERNEL_SRVINIT)
+	eError = SrvInit();
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: SrvInit failed (%d)", eError));
+		return -ENODEV;
+	}
+#endif
+	return 0;
+}
+
+
+/*!
+*****************************************************************************
+
+ @Function		PVRCore_Cleanup
+
+ @Description	
+
+ Remove the driver from the kernel.
+
+ There's no way we can get out of being unloaded other than panicking; we
+ just do everything and plough on regardless of error.
+
+ __exit places the function in a special memory section that the kernel frees
+ once the function has been run.  Refer also to module_exit() macro call below.
+
+ @input none
+
+ @Return none
+
+*****************************************************************************/
+#if defined(SUPPORT_DRM_EXT)
+void PVRCore_Cleanup(void)
+#else
+static void __exit PVRCore_Cleanup(void)
+#endif
+{
+	PVR_TRACE(("PVRCore_Cleanup"));
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+	PVRGpuTraceDeInit();
+#endif
+
+	PVRDebugRemoveDebugFSEntries();
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+	pvr_sync_deinit();
+#endif
+
+#if !defined(SUPPORT_DRM)
+	if (psDev)
+	{
+		device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
+	}
+
+	if (psPvrClass)
+	{
+		class_destroy(psPvrClass);
+	}
+
+	if (AssignedMajorNumber > 0)
+	{
+		unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME);
+	}
+#endif
+
+#if defined(LDM_PCI)
+#if defined(SUPPORT_DRM) && !defined(SUPPORT_DRM_EXT)
+	drm_pci_exit(&sPVRDRMDriver, &powervr_driver);
+#else
+	pci_unregister_driver(&powervr_driver);
+#endif
+#endif /* defined(LDM_PCI) */
+
+#if defined (LDM_PLATFORM)
+#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
+	platform_device_unregister(&powervr_device);
+#else
+	PVR_ASSERT(gpsPVRLDMDev != NULL);
+	platform_device_unregister(gpsPVRLDMDev);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) */
+#endif /* defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) */
+	platform_driver_unregister(&powervr_driver);
+#endif /* defined (LDM_PLATFORM) */
+
+	LinuxBridgeDeInit();
+
+	PVROSFuncDeInit();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	PVRSRVStatsDestroy();
+#endif
+	PVRDebugFSDeInit();
+
+#if defined(SUPPORT_DRM) && defined(PDUMP)
+	dbgdrv_cleanup();
+#endif
+	PVR_TRACE(("PVRCore_Cleanup: unloading"));
+}
+
+/*
+ * These macro calls define the initialisation and removal functions of the
+ * driver.  Although they are prefixed `module_', they apply when compiling
+ * statically as well; in both cases they define the function the kernel will
+ * run to start/stop the driver.
+*/
+#if !defined(SUPPORT_DRM_EXT)
+module_init(PVRCore_Init);
+module_exit(PVRCore_Cleanup);
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osconnection_server.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osconnection_server.c
new file mode 100644
index 0000000..0842735
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osconnection_server.c
@@ -0,0 +1,132 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux specific per process data functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "connection_server.h"
+#include "osconnection_server.h"
+
+#include "env_connection.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#if defined (SUPPORT_ION)
+#include <linux/err.h>
+#include PVR_ANDROID_ION_HEADER
+
+/*
+	The ion device (the base object for all requests)
+	gets created by the system and we acquire it via
+	linux specific functions provided by the system layer
+*/
+#include "ion_sys.h"
+#endif
+
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, IMG_PVOID pvOSData)
+{
+	ENV_CONNECTION_DATA *psEnvConnection;
+#if defined(SUPPORT_ION)
+	ENV_ION_CONNECTION_DATA *psIonConnection;
+#endif
+
+	*phOsPrivateData = OSAllocMem(sizeof(ENV_CONNECTION_DATA));
+
+	if (*phOsPrivateData == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData;
+	OSMemSet(psEnvConnection, 0, sizeof(*psEnvConnection));
+
+	/* Save the pointer to our struct file */
+	psEnvConnection->psFile = pvOSData;
+
+#if defined(SUPPORT_ION)
+	psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocMem(sizeof(ENV_ION_CONNECTION_DATA));
+	if (psIonConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	OSMemSet(psIonConnection, 0, sizeof(*psIonConnection));
+	psEnvConnection->psIonData = psIonConnection;
+	/*
+		We can have more then one connection per process so we need more then
+		the PID to have a unique name
+	*/
+	psEnvConnection->psIonData->psIonDev = IonDevAcquire();
+	OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentProcessID());
+	psEnvConnection->psIonData->psIonClient =
+		ion_client_create(psEnvConnection->psIonData->psIonDev,
+						  psEnvConnection->psIonData->azIonClientName);
+ 
+	if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create "
+								"ion client for per connection data"));
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	psEnvConnection->psIonData->ui32IonClientRefCount = 1;
+#endif /* SUPPORT_ION */
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+	ENV_CONNECTION_DATA *psEnvConnection; 
+
+	if (hOsPrivateData == IMG_NULL)
+	{
+		return PVRSRV_OK;
+	}
+
+	psEnvConnection = hOsPrivateData;
+
+#if defined(SUPPORT_ION)
+	EnvDataIonClientRelease(psEnvConnection->psIonData);
+#endif
+
+	OSFreeMem(hOsPrivateData);
+	/*not nulling pointer, copy on stack*/
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc.c
new file mode 100644
index 0000000..db09ae8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc.c
@@ -0,0 +1,2089 @@
+/*************************************************************************/ /*!
+@File
+@Title          Environment related functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h> 
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <linux/spinlock.h>
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
+	defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
+	defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
+	defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) || \
+	defined(PVR_LINUX_USING_WORKQUEUES)
+#include <linux/workqueue.h>
+#endif
+#include <linux/kthread.h>
+#include <asm/atomic.h>
+
+#include "osfunc.h"
+#include "img_types.h"
+#include "mm.h"
+#include "allocmem.h"
+#include "env_data.h"
+#include "pvr_debugfs.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvr_debug.h"
+#include "driverlock.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING)
+#include "syscommon.h"
+#endif
+#include "physmem_osmem_linux.h"
+
+#if defined(VIRTUAL_PLATFORM)
+#define EVENT_OBJECT_TIMEOUT_MS         (120000)
+#else
+#if defined(EMULATOR)
+#define EVENT_OBJECT_TIMEOUT_MS         (2000)
+#else
+#define EVENT_OBJECT_TIMEOUT_MS         (100)
+#endif /* EMULATOR */
+#endif
+
+/* Fairly arbitrary sizes - hopefully enough for all bridge calls */
+#define PVRSRV_MAX_BRIDGE_IN_SIZE	0x2000
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE	0x1000
+
+static void *g_pvBridgeBuffers = IMG_NULL;
+
+struct task_struct *OSGetBridgeLockOwner(void);
+
+/*
+	Create a 4MB pool which should be more then enough in most cases,
+	if it becomes full then the calling code will fall back to
+	vm_map_ram.
+*/
+
+#if defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))
+#define POOL_SIZE	(4*1024*1024)
+static struct gen_pool *pvrsrv_pool_writecombine = NULL;
+static char *pool_start;
+
+static void deinit_pvr_pool(void)
+{
+	gen_pool_destroy(pvrsrv_pool_writecombine);
+	pvrsrv_pool_writecombine = NULL;
+	vfree(pool_start);
+	pool_start = NULL;
+}
+
+static void init_pvr_pool(void)
+{
+	struct vm_struct *tmp_area;
+	int ret = -1;
+
+	/* Create the pool to allocate vm space from */
+	pvrsrv_pool_writecombine = gen_pool_create(PAGE_SHIFT, -1);
+	if (!pvrsrv_pool_writecombine) {
+		printk(KERN_ERR "%s: create pvrsrv_pool failed\n", __func__);
+		return;
+	}
+
+	/* Reserve space in the vmalloc vm range */
+	tmp_area = __get_vm_area(POOL_SIZE, VM_ALLOC,
+			VMALLOC_START, VMALLOC_END);
+	if (!tmp_area) {
+		printk(KERN_ERR "%s: __get_vm_area failed\n", __func__);
+		gen_pool_destroy(pvrsrv_pool_writecombine);
+		pvrsrv_pool_writecombine = NULL;
+		return;
+	}
+
+	pool_start = tmp_area->addr;
+
+	if (!pool_start) {
+		printk(KERN_ERR "%s:No vm space to create POOL\n",
+				__func__);
+		gen_pool_destroy(pvrsrv_pool_writecombine);
+		pvrsrv_pool_writecombine = NULL;
+		return;
+	} else {
+		/* Add our reserved space into the pool */
+		ret = gen_pool_add(pvrsrv_pool_writecombine,
+			(unsigned long) pool_start, POOL_SIZE, -1);
+		if (ret) {
+			printk(KERN_ERR "%s:could not remainder pool\n",
+					__func__);
+			deinit_pvr_pool();
+			return;
+		}
+	}
+	return;
+}
+
+static inline IMG_BOOL vmap_from_pool(void *pvCPUVAddr)
+{
+	IMG_CHAR *pcTmp = pvCPUVAddr;
+	if ((pcTmp >= pool_start) && (pcTmp <= (pool_start + POOL_SIZE)))
+	{
+		return IMG_TRUE;
+	}
+	return IMG_FALSE;
+}
+#endif	/* defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))*/
+
+PVRSRV_ERROR OSMMUPxAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_SIZE_T uiSize,
+							Px_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+	IMG_CPU_PHYADDR sCpuPAddr;
+	struct page *psPage;
+
+	/*
+		Check that we're not doing multiple pages worth of
+		import as it's not supported a.t.m.
+	*/
+	PVR_ASSERT(uiSize == PAGE_SIZE);
+
+	psPage = alloc_page(GFP_KERNEL);
+	if (psPage == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if defined (CONFIG_X86)
+	{
+		IMG_PVOID pvPageVAddr = page_address(psPage);
+		int ret;
+		ret = set_memory_wc((unsigned long)pvPageVAddr, 1);
+
+		if (ret)
+		{
+			__free_page(psPage);
+			return PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+		}
+	}
+#endif
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined (CONFIG_METAG)
+	{
+		IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+		IMG_PVOID pvPageVAddr = kmap(psPage);
+
+		sCPUPhysAddrStart.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+		sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+		OSInvalidateCPUCacheRangeKM(pvPageVAddr,
+									pvPageVAddr + PAGE_SIZE,
+									sCPUPhysAddrStart,
+									sCPUPhysAddrEnd);
+	}
+#endif
+
+	psMemHandle->u.pvHandle = psPage;
+	sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+
+	PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	    PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, PAGE_SIZE);
+#else
+	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+								 psPage,
+								 sCpuPAddr,
+								 PAGE_SIZE,
+								 IMG_NULL);
+#endif
+#endif
+
+	return PVRSRV_OK;
+}
+
+void OSMMUPxFree(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle)
+{
+	struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	    PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, PAGE_SIZE);
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT64)(IMG_UINTPTR_T)psPage);
+#endif
+#endif
+
+#if defined (CONFIG_X86)
+	{
+		IMG_PVOID pvPageVAddr;
+		int ret;
+
+		pvPageVAddr = page_address(psPage);
+		ret = set_memory_wb((unsigned long) pvPageVAddr, 1);
+		if (ret)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
+		}
+	}
+#endif
+	__free_page(psPage);
+}
+
+PVRSRV_ERROR OSMMUPxMap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle,
+						IMG_SIZE_T uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+						void **pvPtr)
+{
+	struct page **ppsPage = (struct page **) &psMemHandle->u.pvHandle;
+	IMG_UINTPTR_T uiCPUVAddr;
+	pgprot_t prot = PAGE_KERNEL;
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+	prot = pgprot_writecombine(prot);
+
+#if defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))
+	uiCPUVAddr = gen_pool_alloc(pvrsrv_pool_writecombine, PAGE_SIZE);
+
+	if (uiCPUVAddr) {
+		int ret = 0;
+		struct vm_struct tmp_area;
+
+		/* vmalloc and friends expect a guard page so we need to take that into account */
+		tmp_area.addr = (void *)uiCPUVAddr;
+		tmp_area.size =  2 * PAGE_SIZE;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,17,0))
+		ret = map_vm_area(&tmp_area, prot, ppsPage);
+#else
+		ret = map_vm_area(&tmp_area, prot, & ppsPage);
+#endif
+		if (ret) {
+			gen_pool_free(pvrsrv_pool_writecombine, uiCPUVAddr, PAGE_SIZE);
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Cannot map page to pool",
+					 __func__));
+			/* Failed the pool alloc so fall back to the vm_map path */
+			uiCPUVAddr = 0;
+		}
+	}
+
+	/* Not else as if the poll alloc fails it resets uiCPUVAddr to 0 */
+	if (uiCPUVAddr == 0)
+#endif	/* defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0)) */
+	{
+		uiCPUVAddr = (IMG_UINTPTR_T) vm_map_ram(ppsPage,
+												1,
+												-1,
+												prot);
+	}
+
+	/* Check that one of the above methods got us an address */
+	if (((void *)uiCPUVAddr) == IMG_NULL)
+	{
+		return PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL;
+	}
+
+	*pvPtr = (void *) ((uiCPUVAddr & (~OSGetPageMask())) |
+							((IMG_UINTPTR_T) (psDevPAddr->uiAddr & OSGetPageMask())));
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	/* Mapping is done a page at a time */
+	PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, PAGE_SIZE);
+#else
+	{
+		IMG_CPU_PHYADDR sCpuPAddr;
+		sCpuPAddr.uiAddr = 0;
+
+		PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+									 (void *)uiCPUVAddr,
+									 sCpuPAddr,
+									 uiSize,
+									 IMG_NULL);
+	}
+#endif
+#endif
+
+	return PVRSRV_OK;
+}
+
+void OSMMUPxUnmap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle, void *pvPtr)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(psMemHandle);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+	/* Mapping is done a page at a time */
+	PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, PAGE_SIZE);
+#else
+	PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, (IMG_UINT64)(IMG_UINTPTR_T)pvPtr);
+#endif
+#endif
+
+#if defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))
+	if (vmap_from_pool(pvPtr))
+	{
+		unsigned long addr = (unsigned long)pvPtr;
+
+		/* Flush the data cache */
+		flush_cache_vunmap(addr, addr + PAGE_SIZE);
+		/* Unmap the page */
+		unmap_kernel_range_noflush(addr, PAGE_SIZE);
+		/* Flush the TLB */
+		__flush_tlb_single(addr);
+		/* Free the page back to the pool */
+		gen_pool_free(pvrsrv_pool_writecombine, addr, PAGE_SIZE);
+	}
+	else
+#endif	/* defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0)) */
+	{
+		vm_unmap_ram(pvPtr, 1);
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSMemCopy
+@Description    Copies memory around
+@Input          pvDst    Pointer to dst
+@Output         pvSrc    Pointer to src
+@Input          ui32Size Bytes to copy
+*/ /**************************************************************************/
+void OSMemCopy(void *pvDst, const void *pvSrc, IMG_SIZE_T ui32Size)
+{
+	memcpy(pvDst, pvSrc, ui32Size);
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSMemSet
+@Description    Function that does the same as the C memset() functions
+@Modified      *pvDest     Pointer to start of buffer to be set
+@Input          ui8Value   Value to set each byte to
+@Input          ui32Size   Number of bytes to set
+*/ /**************************************************************************/
+void OSMemSet(void *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size)
+{
+	memset(pvDest, (char) ui8Value, (size_t) ui32Size);
+}
+
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, IMG_SIZE_T uiLen)
+{
+	return (IMG_INT) memcmp(pvBufA, pvBufB, uiLen);
+}
+
+/*************************************************************************/ /*!
+@Function       OSStringNCopy
+@Description    strcpy
+*/ /**************************************************************************/
+IMG_CHAR *OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, IMG_SIZE_T uSize)
+{
+	return strncpy(pszDest, pszSrc, uSize);
+}
+
+/*************************************************************************/ /*!
+@Function       OSSNPrintf
+@Description    snprintf
+@Return         the chars written or -1 on error
+*/ /**************************************************************************/
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+	va_list argList;
+	IMG_INT32 iCount;
+
+	va_start(argList, pszFormat);
+	iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+	va_end(argList);
+
+	return iCount;
+}
+
+IMG_SIZE_T OSStringLength(const IMG_CHAR *pStr)
+{
+	return strlen(pStr);
+}
+
+IMG_SIZE_T OSStringNLength(const IMG_CHAR *pStr, IMG_SIZE_T uiCount)
+{
+	return strnlen(pStr, uiCount);
+}
+
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2)
+{
+	return strcmp(pStr1, pStr2);
+}
+
+/*************************************************************************/ /*!
+@Function       OSInitEnvData
+@Description    Allocates space for env specific data
+@Input          ppvEnvSpecificData   Pointer to pointer in which to return
+                                     allocated data.
+@Input          ui32MMUMode          MMU mode.
+@Return         PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInitEnvData(void)
+{
+	/* allocate memory for the bridge buffers to be used during an ioctl */
+	g_pvBridgeBuffers = OSAllocMem(PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE);
+	if (g_pvBridgeBuffers == IMG_NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+#if defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))
+	/*
+		vm_ram_ram works with 2MB blocks to avoid excessive
+		TLB flushing but our allocations are always small and have
+		a long lifetime which then leads to fragmentation of vmalloc space.
+		To workaround this we create a virtual address pool in the vmap range
+		for mapping our page tables into so we don't fragment vmalloc space.
+	*/
+	if (!pvrsrv_pool_writecombine)
+	{
+		init_pvr_pool();
+	}
+#endif	/* defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0)) */
+
+	LinuxInitPagePool();
+
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSDeInitEnvData
+@Description    frees env specific data memory
+@Input          pvEnvSpecificData   Pointer to private structure
+@Return         PVRSRV_OK on success else PVRSRV_ERROR_OUT_OF_MEMORY
+*/ /**************************************************************************/
+void OSDeInitEnvData(void)
+{
+
+	LinuxDeinitPagePool();
+
+#if defined(CONFIG_GENERIC_ALLOCATOR) && defined(CONFIG_X86) && (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))
+	if (pvrsrv_pool_writecombine)
+	{
+		deinit_pvr_pool();
+	}
+#endif
+	if (g_pvBridgeBuffers)
+	{
+		/* free-up the memory allocated for bridge buffers */
+		OSFreeMem(g_pvBridgeBuffers);
+		g_pvBridgeBuffers = IMG_NULL;
+	}
+}
+
+PVRSRV_ERROR OSGetGlobalBridgeBuffers(IMG_VOID **ppvBridgeInBuffer,
+							IMG_UINT32 *pui32BridgeInBufferSize,
+							IMG_VOID **ppvBridgeOutBuffer,
+							IMG_UINT32 *pui32BridgeOutBufferSize)
+{
+	PVR_ASSERT (ppvBridgeInBuffer && ppvBridgeOutBuffer);
+	PVR_ASSERT (pui32BridgeInBufferSize && pui32BridgeOutBufferSize);
+
+	*ppvBridgeInBuffer = g_pvBridgeBuffers;
+	*pui32BridgeInBufferSize = PVRSRV_MAX_BRIDGE_IN_SIZE;
+
+	*ppvBridgeOutBuffer = *ppvBridgeInBuffer + *pui32BridgeInBufferSize;
+	*pui32BridgeOutBufferSize = PVRSRV_MAX_BRIDGE_OUT_SIZE;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSReleaseThreadQuanta
+@Description    Releases thread quanta
+*/ /**************************************************************************/ 
+void OSReleaseThreadQuanta(void)
+{
+	schedule();
+}
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+static inline IMG_UINT32 Clockus(void)
+{
+	return (jiffies * (1000000 / HZ));
+}
+#else
+/* Not matching/aligning this API to the Clockus() API above to avoid necessary
+ * multiplication/division operations in calling code.
+ */
+static inline IMG_UINT64 Clockns64(void)
+{
+	IMG_UINT64 timenow;
+
+	/* Kernel thread preempt protection. Some architecture implementations 
+	 * (ARM) of sched_clock are not preempt safe when the kernel is configured 
+	 * as such e.g. CONFIG_PREEMPT and others.
+	 */
+	preempt_disable();
+
+	/* Using sched_clock instead of ktime_get since we need a time stamp that
+	 * correlates with that shown in kernel logs and trace data not one that
+	 * is a bit behind. */
+	timenow = sched_clock();
+
+	preempt_enable();
+
+	return timenow;
+}
+#endif
+
+/*************************************************************************/ /*!
+ @Function OSClockns64
+ @Description
+        This function returns the clock in nanoseconds. Unlike OSClockus,
+        OSClockus64 has a near 64-bit range
+*/ /**************************************************************************/
+IMG_UINT64 OSClockns64(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+	return Clockns64();	
+#else
+	return ((IMG_UINT64)Clockus()) * 1000ULL;
+#endif
+}
+
+/*************************************************************************/ /*!
+ @Function OSClockus64
+ @Description
+        This function returns the clock in microseconds. Unlike OSClockus,
+        OSClockus64 has a near 64-bit range
+*/ /**************************************************************************/
+IMG_UINT64 OSClockus64(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+	IMG_UINT64 timenow = Clockns64();
+	IMG_UINT32 remainder;
+	return OSDivide64r64(timenow, 1000, &remainder);
+#else
+	return ((IMG_UINT64)Clockus());
+#endif
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSClockus
+@Description    This function returns the clock in microseconds
+@Return         clock (us)
+*/ /**************************************************************************/ 
+IMG_UINT32 OSClockus(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+	return (IMG_UINT32) OSClockus64();
+#else
+	return Clockus();
+#endif
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSClockms
+@Description    This function returns the clock in milliseconds
+@Return         clock (ms)
+*/ /**************************************************************************/ 
+IMG_UINT32 OSClockms(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+	IMG_UINT64 timenow = Clockns64();
+	IMG_UINT32 remainder;
+
+	return OSDivide64(timenow, 1000000, &remainder);
+#else
+	IMG_UINT64 time, j = (IMG_UINT32)jiffies;
+
+	time = j * (((1 << 16) * 1000) / HZ);
+	time >>= 16;
+
+	return (IMG_UINT32)time;
+#endif
+}
+
+
+/*
+	OSWaitus
+*/
+void OSWaitus(IMG_UINT32 ui32Timeus)
+{
+	udelay(ui32Timeus);
+}
+
+
+/*
+	OSSleepms
+*/
+void OSSleepms(IMG_UINT32 ui32Timems)
+{
+	msleep(ui32Timems);
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessID
+@Description    Returns ID of current process (thread group)
+@Return         ID of current process
+*****************************************************************************/
+IMG_PID OSGetCurrentProcessID(void)
+{
+	if (in_interrupt())
+	{
+		return KERNEL_ID;
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
+	return (IMG_PID)current->pgrp;
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
+	return (IMG_PID)task_tgid_nr(current);
+#else
+	return (IMG_PID)current->tgid;
+#endif
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentProcessName
+@Description    gets name of current process
+@Return         process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentProcessName(void)
+{
+	return current->comm;
+}
+
+/*************************************************************************/ /*!
+@Function       OSGetCurrentThreadID
+@Description    Returns ID for current thread
+@Return         ID of current thread
+*****************************************************************************/
+IMG_UINTPTR_T OSGetCurrentThreadID(void)
+{
+	if (in_interrupt())
+	{
+		return KERNEL_ID;
+	}
+
+	return current->pid;
+}
+
+/*************************************************************************/ /*!
+@Function       OSGetPageSize
+@Description    gets page size
+@Return         page size
+*/ /**************************************************************************/
+IMG_SIZE_T OSGetPageSize(void)
+{
+	return PAGE_SIZE;
+}
+
+/*************************************************************************/ /*!
+@Function       OSGetPageShift
+@Description    gets page size
+@Return         page size
+*/ /**************************************************************************/
+IMG_SIZE_T OSGetPageShift(void)
+{
+	return PAGE_SHIFT;
+}
+
+/*************************************************************************/ /*!
+@Function       OSGetPageMask
+@Description    gets page mask
+@Return         page size
+*/ /**************************************************************************/
+IMG_SIZE_T OSGetPageMask(void)
+{
+	return (OSGetPageSize()-1);
+}
+
+#if !defined (SUPPORT_SYSTEM_INTERRUPT_HANDLING)
+typedef struct _LISR_DATA_ {
+	PFN_LISR pfnLISR;
+	void *pvData;
+	IMG_UINT32 ui32IRQ;
+} LISR_DATA;
+
+/*
+	DeviceISRWrapper
+*/
+static irqreturn_t DeviceISRWrapper(int irq, void *dev_id)
+{
+	LISR_DATA *psLISRData = (LISR_DATA *) dev_id;
+	IMG_BOOL bStatus = IMG_FALSE;
+
+	PVR_UNREFERENCED_PARAMETER(irq);
+
+	bStatus = psLISRData->pfnLISR(psLISRData->pvData);
+
+	return bStatus ? IRQ_HANDLED : IRQ_NONE;
+}
+#endif
+
+/*
+	OSInstallDeviceLISR
+*/
+PVRSRV_ERROR OSInstallDeviceLISR(PVRSRV_DEVICE_CONFIG *psDevConfig,
+				 IMG_HANDLE *hLISRData, 
+				 PFN_LISR pfnLISR,
+				 void *pvData)
+{
+#if defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING)
+	return SysInstallDeviceLISR(psDevConfig->ui32IRQ,
+					psDevConfig->pszName,
+					pfnLISR,
+					pvData,
+					hLISRData);
+#else
+	LISR_DATA *psLISRData;
+	unsigned long flags = 0;
+
+	psLISRData = OSAllocMem(sizeof(LISR_DATA));
+
+	psLISRData->pfnLISR = pfnLISR;
+	psLISRData->pvData = pvData;
+	psLISRData->ui32IRQ = psDevConfig->ui32IRQ;
+
+	if (psDevConfig->bIRQIsShared)
+	{
+		flags |= IRQF_SHARED;
+	}
+
+	if (psDevConfig->eIRQActiveLevel == PVRSRV_DEVICE_IRQ_ACTIVE_HIGH)
+	{
+		flags |= IRQF_TRIGGER_HIGH;
+	}
+	else if (psDevConfig->eIRQActiveLevel == PVRSRV_DEVICE_IRQ_ACTIVE_LOW)
+	{
+		flags |= IRQF_TRIGGER_LOW;
+	}
+
+	PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %p", psDevConfig->pszName, psDevConfig->ui32IRQ, pvData));
+
+	if(request_irq(psDevConfig->ui32IRQ, DeviceISRWrapper,
+		flags, psDevConfig->pszName, psLISRData))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", psDevConfig->ui32IRQ));
+
+		return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+	}
+
+	*hLISRData = (IMG_HANDLE) psLISRData;
+
+	return PVRSRV_OK;
+#endif
+}
+
+/*
+	OSUninstallDeviceLISR
+*/
+PVRSRV_ERROR OSUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+#if defined (SUPPORT_SYSTEM_INTERRUPT_HANDLING)
+	return SysUninstallDeviceLISR(hLISRData);
+#else
+	LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+
+	PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %p", psLISRData->ui32IRQ,  psLISRData->pvData));
+
+	free_irq(psLISRData->ui32IRQ, psLISRData);
+	OSFreeMem(psLISRData);
+
+	return PVRSRV_OK;
+#endif
+}
+
+#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
+typedef struct  _MISR_DATA_ {
+	struct workqueue_struct *psWorkQueue;
+	struct work_struct sMISRWork;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+	MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR,
+							void *hData)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(MISR_DATA));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+
+	PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+	psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
+
+	if (psMISRData->psWorkQueue == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+		OSFreeMem(psMISRData);
+		return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+	}
+
+	INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	PVR_TRACE(("Uninstalling MISR"));
+
+	destroy_workqueue(psMISRData->psWorkQueue);
+	OSFreeMem(psMISRData);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	/*
+		Note:
+
+		In the case of NO_HARDWARE we want the driver to be synchronous so
+		that we don't have to worry about waiting for previous operations
+		to complete
+	*/
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+#else
+	queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork);
+#endif
+	return PVRSRV_OK;
+}
+#else	/* defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */
+#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
+typedef struct  _MISR_DATA_ {
+	struct work_struct sMISRWork;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+	MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, void *hData)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(MISR_DATA));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+
+	PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+	INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	PVR_TRACE(("Uninstalling MISR"));
+
+	flush_scheduled_work();
+
+	OSFreeMem(hMISRData);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = hMISRData;
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+#else
+	schedule_work(&psMISRData->sMISRWork);
+#endif
+	return PVRSRV_OK;
+}
+
+#else	/* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */
+typedef struct _MISR_DATA_ {
+	struct tasklet_struct sMISRTasklet;
+	PFN_MISR pfnMISR;
+	void *hData;
+} MISR_DATA;
+
+/*
+	MISRWrapper
+*/
+static void MISRWrapper(unsigned long data)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) data;
+
+	psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+	OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, void *hData)
+{
+	MISR_DATA *psMISRData;
+
+	psMISRData = OSAllocMem(sizeof(MISR_DATA));
+	if (psMISRData == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psMISRData->hData = hData;
+	psMISRData->pfnMISR = pfnMISR;
+
+	PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+	tasklet_init(&psMISRData->sMISRTasklet, MISRWrapper, (unsigned long)psMISRData);
+
+	*hMISRData = (IMG_HANDLE) psMISRData;
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+	PVR_TRACE(("Uninstalling MISR"));
+
+	tasklet_kill(&psMISRData->sMISRTasklet);
+
+	return PVRSRV_OK;
+}
+
+/*
+	OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+	MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+#if defined(NO_HARDWARE)
+	psMISRData->pfnMISR(psMISRData->hData);
+#else
+	tasklet_schedule(&psMISRData->sMISRTasklet);
+#endif
+	return PVRSRV_OK;
+}
+
+#endif /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */
+#endif /* #if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */
+
+/* OS specific values for thread priority */
+const IMG_INT32 ai32OSPriorityValues[LAST_PRIORITY] = { -20, /* HIGHEST_PRIORITY */
+                                                        -10, /* HIGH_PRIRIOTY */
+                                                          0, /* NORMAL_PRIORITY */
+                                                          9, /* LOW_PRIORITY */
+                                                         19, /* LOWEST_PRIORITY */
+                                                        -22};/* NOSET_PRIORITY */
+
+typedef struct {
+	struct task_struct *kthread;
+	PFN_THREAD pfnThread;
+	void *hData;
+	OS_THREAD_LEVEL eThreadPriority;
+} OSThreadData;
+
+static int OSThreadRun(void *data)
+{
+	OSThreadData *psOSThreadData = data;
+
+	/* If i32NiceValue is acceptable, set the nice value for the new thread */
+	if (psOSThreadData->eThreadPriority != NOSET_PRIORITY &&
+	         psOSThreadData->eThreadPriority < LAST_PRIORITY)
+		set_user_nice(current, ai32OSPriorityValues[psOSThreadData->eThreadPriority]);
+
+	/* Call the client's kernel thread with the client's data pointer */
+	psOSThreadData->pfnThread(psOSThreadData->hData);
+
+	/* Wait for OSThreadDestroy() to call kthread_stop() */
+	while (!kthread_should_stop())
+	{
+		 schedule();
+	}
+
+	return 0;
+}
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+							IMG_CHAR *pszThreadName,
+							PFN_THREAD pfnThread,
+							void *hData)
+{
+	return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, hData, NOSET_PRIORITY);
+}
+
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+                                    IMG_CHAR *pszThreadName,
+                                    PFN_THREAD pfnThread,
+                                    void *hData,
+                                    OS_THREAD_LEVEL eThreadPriority)
+{
+	OSThreadData *psOSThreadData;
+	PVRSRV_ERROR eError;
+
+	psOSThreadData = OSAllocMem(sizeof(OSThreadData));
+	if (psOSThreadData == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psOSThreadData->pfnThread = pfnThread;
+	psOSThreadData->hData = hData;
+	psOSThreadData->eThreadPriority= eThreadPriority;
+	psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, pszThreadName);
+
+	if (IS_ERR(psOSThreadData->kthread))
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_kthread;
+	}
+
+	*phThread = psOSThreadData;
+
+	return PVRSRV_OK;
+
+fail_kthread:
+	OSFreeMem(psOSThreadData);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread)
+{
+	OSThreadData *psOSThreadData = hThread;
+	int ret;
+
+	/* Let the thread know we are ready for it to end and wait for it. */
+	ret = kthread_stop(psOSThreadData->kthread);
+	PVR_ASSERT(ret == 0);
+	OSFreeMem(psOSThreadData);
+
+	return PVRSRV_OK;
+}
+
+void OSPanic(void)
+{
+	BUG();
+
+#if defined(__KLOCWORK__)
+	/* Klocworks does not understand that BUG is terminal... */
+	abort();
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       OSMapPhysToLin
+@Description    Maps the physical memory into linear addr range
+@Input          BasePAddr       Physical cpu address
+@Input          ui32Bytes       Bytes to map
+@Input          ui32CacheType   Cache type
+@Return         Linear addr of mapping on success, else NULL
+ */ /**************************************************************************/
+void *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+               IMG_SIZE_T ui32Bytes,
+               IMG_UINT32 ui32MappingFlags)
+{
+	void *pvIORemapCookie;
+
+	pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
+	if(pvIORemapCookie == IMG_NULL)
+	{
+		PVR_ASSERT(0);
+		return IMG_NULL;
+	}
+
+	return pvIORemapCookie;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSUnMapPhysToLin
+@Description    Unmaps memory that was mapped with OSMapPhysToLin
+@Input          pvLinAddr
+@Input          ui32Bytes
+@Return         TRUE on success, else FALSE
+*/ /**************************************************************************/
+IMG_BOOL
+OSUnMapPhysToLin(void *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32MappingFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+	IOUnmapWrapper(pvLinAddr);
+
+	return IMG_TRUE;
+}
+
+/*
+	OSReadHWReg8
+*/
+IMG_UINT8 OSReadHWReg8(IMG_PVOID	pvLinRegBaseAddr,
+						IMG_UINT32	ui32Offset)
+{
+#if !defined(NO_HARDWARE)
+	return (IMG_UINT8) readb((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#else
+	return 0x4e;	/* FIXME: OSReadHWReg should not exist in no hardware builds */
+#endif
+}
+
+/*
+	OSReadHWReg16
+*/
+IMG_UINT16 OSReadHWReg16(IMG_PVOID	pvLinRegBaseAddr,
+						 IMG_UINT32	ui32Offset)
+{
+#if !defined(NO_HARDWARE)
+	return (IMG_UINT16) readw((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#else
+	return 0x3a4e;	/* FIXME: OSReadHWReg should not exist in no hardware builds */
+#endif
+}
+
+/*
+	OSReadHWReg32
+*/
+IMG_UINT32 OSReadHWReg32(IMG_PVOID	pvLinRegBaseAddr,
+						 IMG_UINT32	ui32Offset)
+{
+#if !defined(NO_HARDWARE)
+	return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#else
+	return 0x30f73a4e;	/* FIXME: OSReadHWReg should not exist in no hardware builds */
+#endif
+}
+
+
+/*
+	OSReadHWReg64
+*/
+IMG_UINT64 OSReadHWReg64(IMG_PVOID	pvLinRegBaseAddr,
+						 IMG_UINT32	ui32Offset)
+{
+	IMG_UINT64	ui64Result;
+
+	ui64Result = OSReadHWReg32(pvLinRegBaseAddr, ui32Offset + 4);
+	ui64Result <<= 32;
+	ui64Result |= (IMG_UINT64)OSReadHWReg32(pvLinRegBaseAddr, ui32Offset);
+
+	return ui64Result;
+}
+
+/*
+	OSReadHWRegBank
+*/
+IMG_DEVMEM_SIZE_T OSReadHWRegBank(IMG_PVOID pvLinRegBaseAddr,
+                                  IMG_UINT32 ui32Offset,
+                                  IMG_UINT8 *pui8DstBuf,
+                                  IMG_DEVMEM_SIZE_T uiDstBufLen)
+{
+#if !defined(NO_HARDWARE)
+	IMG_DEVMEM_SIZE_T uiCounter;
+
+	/* FIXME: optimize this */
+
+	for(uiCounter = 0; uiCounter < uiDstBufLen; uiCounter++) {
+		*(pui8DstBuf + uiCounter) =
+		  readb(pvLinRegBaseAddr + ui32Offset + uiCounter);
+	}
+
+	return uiCounter;
+#else
+	return uiDstBufLen;
+#endif
+}
+
+/*
+	OSWriteHWReg8
+*/
+void OSWriteHWReg8(void			*pvLinRegBaseAddr,
+				   IMG_UINT32	ui32Offset,
+				   IMG_UINT8	ui8Value)
+{
+#if !defined(NO_HARDWARE)
+	writeb(ui8Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#endif
+}
+
+/*
+	OSWriteHWReg16
+*/
+void OSWriteHWReg16(void		*pvLinRegBaseAddr,
+					IMG_UINT32	ui32Offset,
+					IMG_UINT16	ui16Value)
+{
+#if !defined(NO_HARDWARE)
+	writew(ui16Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#endif
+}
+
+/*
+	OSWriteHWReg32
+*/
+void OSWriteHWReg32(void		*pvLinRegBaseAddr,
+					IMG_UINT32	ui32Offset,
+					IMG_UINT32	ui32Value)
+{
+#if !defined(NO_HARDWARE)
+	writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
+#endif
+}
+
+
+/*
+	OSWriteHWReg64
+*/
+void OSWriteHWReg64(void		*pvLinRegBaseAddr,
+					IMG_UINT32	ui32Offset,
+					IMG_UINT64	ui64Value)
+{
+#if !defined(NO_HARDWARE)
+	IMG_UINT32 ui32ValueLow, ui32ValueHigh;
+
+	ui32ValueLow = ui64Value & 0xffffffff;
+	ui32ValueHigh = ((IMG_UINT64) (ui64Value >> 32)) & 0xffffffff;
+
+	writel(ui32ValueLow, pvLinRegBaseAddr + ui32Offset);
+	writel(ui32ValueHigh, pvLinRegBaseAddr + ui32Offset + 4);
+#endif
+}
+
+IMG_DEVMEM_SIZE_T OSWriteHWRegBank(void *pvLinRegBaseAddr,
+								   IMG_UINT32 ui32Offset,
+								   IMG_UINT8 *pui8SrcBuf,
+								   IMG_DEVMEM_SIZE_T uiSrcBufLen)
+{
+#if !defined(NO_HARDWARE)
+	IMG_DEVMEM_SIZE_T uiCounter;
+
+	/* FIXME: optimize this */
+
+	for(uiCounter = 0; uiCounter < uiSrcBufLen; uiCounter++) {
+		writeb(*(pui8SrcBuf + uiCounter),
+		       pvLinRegBaseAddr + ui32Offset + uiCounter);
+	}
+
+	return uiCounter;
+#else
+	return uiSrcBufLen;
+#endif
+}
+
+#define	OS_MAX_TIMERS	8
+
+/* Timer callback strucure used by OSAddTimer */
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+	IMG_BOOL			bInUse;
+	PFN_TIMER_FUNC		pfnTimerFunc;
+	void				*pvData;
+	struct timer_list	sTimer;
+	IMG_UINT32			ui32Delay;
+	IMG_BOOL			bActive;
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	struct work_struct	sWork;
+#endif
+}TIMER_CALLBACK_DATA;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+static struct workqueue_struct	*psTimerWorkQueue;
+#endif
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+DEFINE_MUTEX(sTimerStructLock);
+#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+/* The lock is used to control access to sTimers */
+static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
+#else
+static DEFINE_SPINLOCK(sTimerStructLock);
+#endif
+#endif
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+	if (!psTimerCBData->bActive)
+		return;
+
+	/* call timer callback */
+	psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+	/* reset timer */
+	mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSTimerCallbackWrapper
+@Description    OS specific timer callback wrapper function
+@Input          uData    Timer callback data
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(IMG_UINTPTR_T uData)
+{
+	TIMER_CALLBACK_DATA	*psTimerCBData = (TIMER_CALLBACK_DATA*)uData;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	int res;
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+#else
+	res = schedule_work(&psTimerCBData->sWork);
+#endif
+	if (res == 0)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+	}
+#else
+	OSTimerCallbackBody(psTimerCBData);
+#endif
+}
+
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+	OSTimerCallbackBody(psTimerCBData);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function       OSAddTimer
+@Description    OS specific function to install a timer callback
+@Input          pfnTimerFunc    Timer callback
+@Input         *pvData          Callback data
+@Input          ui32MsTimeout   Callback period
+@Return         Valid handle success, NULL failure
+*/ /**************************************************************************/
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout)
+{
+	TIMER_CALLBACK_DATA	*psTimerCBData;
+	IMG_UINT32		ui32i;
+#if !(defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE))
+	unsigned long		ulLockFlags;
+#endif
+
+	/* check callback */
+	if(!pfnTimerFunc)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+		return IMG_NULL;
+	}
+
+	/* Allocate timer callback data structure */
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	mutex_lock(&sTimerStructLock);
+#else
+	spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
+#endif
+	for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+	{
+		psTimerCBData = &sTimers[ui32i];
+		if (!psTimerCBData->bInUse)
+		{
+			psTimerCBData->bInUse = IMG_TRUE;
+			break;
+		}
+	}
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	mutex_unlock(&sTimerStructLock);
+#else
+	spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
+#endif
+	if (ui32i >= OS_MAX_TIMERS)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+		return IMG_NULL;
+	}
+
+	psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+	psTimerCBData->pvData = pvData;
+	psTimerCBData->bActive = IMG_FALSE;
+
+	/*
+		HZ = ticks per second
+		ui32MsTimeout = required ms delay
+		ticks = (Hz * ui32MsTimeout) / 1000
+	*/
+	psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+								?	1
+								:	((HZ * ui32MsTimeout) / 1000);
+	/* initialise object */
+	init_timer(&psTimerCBData->sTimer);
+
+	/* setup timer object */
+	psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper;
+	psTimerCBData->sTimer.data = (IMG_UINTPTR_T)psTimerCBData;
+
+	return (IMG_HANDLE)(IMG_UINTPTR_T)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+	IMG_UINT32 ui32i = (IMG_UINT32)((IMG_UINTPTR_T)hTimer) - 1;
+
+	PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+	return &sTimers[ui32i];
+}
+
+/*************************************************************************/ /*!
+@Function       OSRemoveTimer
+@Description    OS specific function to remove a timer callback
+@Input          hTimer : timer handle
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(!psTimerCBData->bActive);
+
+	/* free timer callback data struct */
+	psTimerCBData->bInUse = IMG_FALSE;
+
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSEnableTimer
+@Description    OS specific function to enable a timer callback
+@Input          hTimer    Timer handle
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(!psTimerCBData->bActive);
+
+	/* Start timer arming */
+	psTimerCBData->bActive = IMG_TRUE;
+
+	/* set the expire time */
+	psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+	/* Add the timer to the list */
+	add_timer(&psTimerCBData->sTimer);
+
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSDisableTimer
+@Description    OS specific function to disable a timer callback
+@Input          hTimer    Timer handle
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+	TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+	PVR_ASSERT(psTimerCBData->bInUse);
+	PVR_ASSERT(psTimerCBData->bActive);
+
+	/* Stop timer from arming */
+	psTimerCBData->bActive = IMG_FALSE;
+	smp_mb();
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	flush_scheduled_work();
+#endif
+
+	/* remove timer */
+	del_timer_sync(&psTimerCBData->sTimer);
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	/*
+	 * This second flush is to catch the case where the timer ran
+	 * before we managed to delete it, in which case, it will have
+	 * queued more work for the workqueue.	Since the bActive flag
+	 * has been cleared, this second flush won't result in the
+	 * timer being rearmed.
+	 */
+	flush_workqueue(psTimerWorkQueue);
+#endif
+#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	flush_scheduled_work();
+#endif
+
+	return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectCreate
+@Description    OS specific function to create an event object
+@Input          pszName      Globally unique event object name (if null name must be autogenerated)
+@Output         hEventObject OS event object info structure
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_UNREFERENCED_PARAMETER(pszName);
+
+	if(hEventObject)
+	{
+		if(LinuxEventObjectListCreate(hEventObject) != PVRSRV_OK)
+		{
+			 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+	}
+
+	return eError;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectDestroy
+@Description    OS specific function to destroy an event object
+@Input          hEventObject   OS event object info structure
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if(hEventObject)
+	{
+		LinuxEventObjectListDestroy(hEventObject);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+/*
+ * EventObjectWaitTimeout()
+ */
+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM,
+                                           IMG_UINT32 uiTimeoutMs,
+                                           IMG_BOOL bHoldBridgeLock)
+{
+    PVRSRV_ERROR eError;
+
+	if(hOSEventKM && uiTimeoutMs > 0)
+	{
+		eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutMs, bHoldBridgeLock);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %d", hOSEventKM, uiTimeoutMs ));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeout
+@Description    Wait for an event with timeout as supplied. Called from client
+@Input          hOSEventKM    OS and kernel specific handle to event object
+@Input          uiTimeoutMs   Non zero time period in milliseconds to wait
+@Return         PVRSRV_ERROR_TIMEOUT : Wait reached wait limit and timed out
+@Return         PVRSRV_ERROR         : any other system error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT32 uiTimeoutMs)
+{
+    return EventObjectWaitTimeout(hOSEventKM, uiTimeoutMs, IMG_FALSE);
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWait
+@Description    OS specific function to wait for an event object. Called
+				from client. Uses a default wait with 100ms timeout.
+@Input          hOSEventKM    OS and kernel specific handle to event object
+@Return         PVRSRV_ERROR_TIMEOUT  : Reached wait limit and timed out
+@Return         PVRSRV_ERROR  : any other system error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+{
+	return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitTimeoutAndHoldBridgeLock
+@Description    Wait for an event with timeout as supplied. Called from client
+                NOTE: Holds bridge lock during wait.
+@Input          hOSEventKM    OS and kernel specific handle to event object
+@Input          uiTimeoutMs   Non zero time period in milliseconds to wait
+@Return         PVRSRV_ERROR_TIMEOUT : Wait reached wait limit and timed out
+@Return         PVRSRV_ERROR         : any other system error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT32 uiTimeoutMs)
+{
+	return EventObjectWaitTimeout(hOSEventKM, uiTimeoutMs, IMG_TRUE);
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectWaitAndHoldBridgeLock
+@Description    OS specific function to wait for an event object. Called
+				from client. Uses a default wait with 100ms timeout.
+                NOTE: Holds bridge lock during wait.
+@Input          hOSEventKM    OS and kernel specific handle to event object
+@Return         PVRSRV_ERROR_TIMEOUT  : Reached wait limit and timed out
+@Return         PVRSRV_ERROR  : any other system error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM)
+{
+	return OSEventObjectWaitTimeoutAndHoldBridgeLock(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectOpen
+@Description    OS specific function to open an event object.  Called from client
+@Input          hEventObject  Pointer to an event object
+@Output         phOSEvent     OS and kernel specific handle to event object
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+											IMG_HANDLE *phOSEvent)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if(hEventObject)
+	{
+		if(LinuxEventObjectAdd(hEventObject, phOSEvent) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectOpen: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectClose
+@Description    OS specific function to close an event object.  Called from client
+@Input          hOSEventKM    OS and kernel specific handle to event object
+@Return         PVRSRV_ERROR  :
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	if(hOSEventKM)
+	{
+		if(LinuxEventObjectDelete(hOSEventKM) != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSEventObjectSignal
+@Description    OS specific function to 'signal' an event object.  Called from L/MISR
+@Input          hOSEventKM   OS and kernel specific handle to event object
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject)
+{
+	PVRSRV_ERROR eError;
+
+	if(hEventObject)
+	{
+		eError = LinuxEventObjectSignal(hEventObject);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       OSProcHasPrivSrvInit
+@Description    Does the process have sufficient privileges to initialise services?
+@Return         IMG_BOOL
+*/ /**************************************************************************/
+IMG_BOOL OSProcHasPrivSrvInit(void)
+{
+	return capable(CAP_SYS_ADMIN) != 0;
+}
+
+/*************************************************************************/ /*!
+@Function       OSCopyToUser
+@Description    Copy a block of data into user space
+@Input          pvSrc
+@Output         pvDest
+@Input          ui32Bytes
+@Return   PVRSRV_ERROR  :
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess,
+                          void *pvDest,
+                          const void *pvSrc,
+                          IMG_SIZE_T ui32Bytes)
+{
+	PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+	if(pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+		return PVRSRV_OK;
+	else
+		return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+/*************************************************************************/ /*!
+@Function       OSCopyFromUser
+@Description    Copy a block of data from the user space
+@Output         pvDest
+@Input          pvSrc
+@Input          ui32Bytes
+@Return         PVRSRV_ERROR  :
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess,
+                            void *pvDest,
+                            const void *pvSrc,
+                            IMG_SIZE_T ui32Bytes)
+{
+	PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+	if(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
+		return PVRSRV_OK;
+	else
+		return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+/*************************************************************************/ /*!
+@Function       OSAccessOK
+@Description    Checks if a user space pointer is valide
+@Input          eVerification
+@Input          pvUserPtr
+@Input          ui32Bytes
+@Return         IMG_BOOL :
+*/ /**************************************************************************/
+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, void *pvUserPtr, IMG_SIZE_T ui32Bytes)
+{
+	IMG_INT linuxType;
+
+	if (eVerification == PVR_VERIFY_READ)
+	{
+		linuxType = VERIFY_READ;
+	}
+	else
+	{
+		PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
+		linuxType = VERIFY_WRITE;
+	}
+
+	return access_ok(linuxType, pvUserPtr, ui32Bytes);
+}
+
+
+void OSWriteMemoryBarrier(void)
+{
+	wmb();
+}
+
+
+void OSMemoryBarrier(void)
+{
+	mb();
+}
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+	*pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+	return ui64Divident;
+}
+
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+	*pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+	return (IMG_UINT32) ui64Divident;
+}
+
+/* One time osfunc initialisation */
+PVRSRV_ERROR PVROSFuncInit(void)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	{
+		PVR_ASSERT(!psTimerWorkQueue);
+
+		psTimerWorkQueue = create_workqueue("pvr_timer");
+		if (psTimerWorkQueue == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
+			return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+		}
+	}
+#endif
+
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)
+	{
+		IMG_UINT32 ui32i;
+
+		for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+		{
+			TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+			INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+		}
+	}
+#endif
+	return PVRSRV_OK;
+}
+
+/*
+ * Osfunc deinitialisation.
+ * Note that PVROSFuncInit may not have been called
+ */
+void PVROSFuncDeInit(void)
+{
+#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
+	if (psTimerWorkQueue != NULL)
+	{
+		destroy_workqueue(psTimerWorkQueue);
+		psTimerWorkQueue = NULL;
+	}
+#endif
+}
+
+void OSDumpStack(void)
+{
+	dump_stack();
+}
+
+static struct task_struct *gsOwner;
+
+void OSAcquireBridgeLock(void)
+{
+	mutex_lock(&gPVRSRVLock);
+	gsOwner = current;
+}
+
+void OSReleaseBridgeLock(void)
+{
+	gsOwner = NULL;
+	mutex_unlock(&gPVRSRVLock);
+}
+
+struct task_struct *OSGetBridgeLockOwner(void)
+{
+	return gsOwner;
+}
+
+
+/*************************************************************************/ /*!
+@Function       OSCreateStatisticEntry
+@Description    Create a statistic entry in the specified folder.
+@Input          pszName        String containing the name for the entry.
+@Input          pvFolder       Reference from OSCreateStatisticFolder() of the
+                               folder to create the entry in, or IMG_NULL for the
+                               root.
+@Input          pfnStatsPrint  Pointer to function that can be used to print the
+                               values of all the statistics.
+@Input          pfnIncMemRefCt Pointer to function that can be used to take a
+                               reference on the memory backing the statistic
+							   entry.
+@Input          pfnDecMemRefCt Pointer to function that can be used to drop a
+                               reference on the memory backing the statistic
+							   entry.
+@Input          pvData         OS specific reference that can be used by
+                               pfnGetElement.
+@Return         Pointer void reference to the entry created, which can be
+                passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+IMG_PVOID OSCreateStatisticEntry(IMG_CHAR* pszName, IMG_PVOID pvFolder,
+                                 OS_STATS_PRINT_FUNC* pfnStatsPrint,
+							     OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+							     OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+                                 IMG_PVOID pvData)
+{
+	return (IMG_PVOID)PVRDebugFSCreateStatisticEntry(pszName, (PVR_DEBUGFS_DIR_DATA *)pvFolder, pfnStatsPrint, pfnIncMemRefCt, pfnDecMemRefCt, pvData);
+} /* OSCreateStatisticEntry */
+
+
+/*************************************************************************/ /*!
+@Function       OSRemoveStatisticEntry
+@Description    Removes a statistic entry.
+@Input          pvEntry  Pointer void reference to the entry created by
+                         OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(IMG_PVOID pvEntry)
+{
+	PVRDebugFSRemoveStatisticEntry((PVR_DEBUGFS_DRIVER_STAT *)pvEntry);
+} /* OSRemoveStatisticEntry */
+
+
+/*************************************************************************/ /*!
+@Function       OSCreateStatisticFolder
+@Description    Create a statistic folder to hold statistic entries.
+@Input          pszName   String containing the name for the folder.
+@Input          pvFolder  Reference from OSCreateStatisticFolder() of the folder
+                          to create the folder in, or IMG_NULL for the root.
+@Return         Pointer void reference to the folder created, which can be
+                passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+IMG_PVOID OSCreateStatisticFolder(IMG_CHAR *pszName, IMG_PVOID pvFolder)
+{
+	PVR_DEBUGFS_DIR_DATA *psNewStatFolder = IMG_NULL;
+	int iResult;
+
+	iResult = PVRDebugFSCreateEntryDir(pszName, (PVR_DEBUGFS_DIR_DATA *)pvFolder, &psNewStatFolder);
+	return (iResult == 0) ? (void *)psNewStatFolder : IMG_NULL;
+} /* OSCreateStatisticFolder */
+
+
+/*************************************************************************/ /*!
+@Function       OSRemoveStatisticFolder
+@Description    Removes a statistic folder.
+@Input          pvFolder  Reference from OSCreateStatisticFolder() of the
+                          folder that should be removed.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(IMG_PVOID pvFolder)
+{
+	PVRDebugFSRemoveEntryDir((PVR_DEBUGFS_DIR_DATA *)pvFolder);
+} /* OSRemoveStatisticFolder */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_arm.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_arm.c
new file mode 100644
index 0000000..3fc0f99
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_arm.c
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
+ #include <asm/system.h>
+#endif
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, wait)
+#else
+#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, 0, wait)
+#endif
+
+static void per_cpu_cache_flush(void *arg)
+{
+	PVR_UNREFERENCED_PARAMETER(arg);
+	flush_cache_all();
+}
+
+void OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	switch(uiCacheOp)
+	{
+		/* Fall-through */
+		case PVRSRV_CACHE_OP_CLEAN:
+					/* No full (inner) cache clean op */
+					ON_EACH_CPU(per_cpu_cache_flush, NULL, 1);
+#if defined(CONFIG_OUTER_CACHE)
+					outer_clean_range(0, ULONG_MAX);
+#endif
+					break;
+
+		case PVRSRV_CACHE_OP_FLUSH:
+					ON_EACH_CPU(per_cpu_cache_flush, NULL, 1);
+#if defined(CONFIG_OUTER_CACHE) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+					/* To use the "deferred flush" (not clean) DDK feature you need a kernel
+					 * implementation of outer_flush_all() for ARM CPUs with an outer cache
+					 * controller (e.g. PL310, common with Cortex A9 and later).
+					 *
+					 * Reference DDKs don't require this functionality, as they will only
+					 * clean the cache, never flush (clean+invalidate) it.
+					 */
+					outer_flush_all();
+#endif
+					break;
+
+		case PVRSRV_CACHE_OP_NONE:
+					break;
+
+		default:
+					PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid cache operation type %d",
+					__FUNCTION__, uiCacheOp));
+					PVR_ASSERT(0);
+					break;
+	}
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
+static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd)
+{
+	return (size_t)((char *)pvEnd - (char *)pvStart);
+}
+#endif
+
+void OSFlushCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+							IMG_PVOID pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+	arm_dma_ops.sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+	dmac_flush_range(pvVirtStart, pvVirtEnd);
+
+	/* Outer cache */
+	outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSCleanCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+							IMG_PVOID pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+	/* Inner cache */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
+	dmac_clean_range(pvVirtStart, pvVirtEnd);
+#else
+	dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE);
+#endif
+
+	/* Outer cache */
+	outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
+
+void OSInvalidateCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+								 IMG_PVOID pvVirtEnd,
+								 IMG_CPU_PHYADDR sCPUPhysStart,
+								 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+	arm_dma_ops.sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+#else	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+#if defined(PVR_LINUX_DONT_USE_RANGE_BASED_INVALIDATE)
+	OSCleanCPUCacheRangeKM(pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+#else
+	/* Inner cache */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
+	dmac_inv_range(pvVirtStart, pvVirtEnd);
+#else
+	dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE);
+#endif
+
+	/* Outer cache */
+	outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr);
+#endif
+#endif	/* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_arm64.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_arm64.c
new file mode 100644
index 0000000..7d1fd3d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_arm64.c
@@ -0,0 +1,116 @@
+/*************************************************************************/ /*!
+@File
+@Title          arm specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if defined(CONFIG_OUTER_CACHE)
+  /* If you encounter a 64-bit ARM system with an outer cache, you'll need
+   * to add the necessary code to manage that cache.  See osfunc_arm.c	
+   * for an example of how to do so.
+   */
+	#error "CONFIG_OUTER_CACHE not supported on arm64."
+#endif
+
+
+static void per_cpu_cache_flush(void *arg)
+{
+	PVR_UNREFERENCED_PARAMETER(arg);
+	flush_cache_all();
+}
+
+void OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	switch(uiCacheOp)
+	{
+		/* Fall-through */
+		case PVRSRV_CACHE_OP_CLEAN:
+					/* No full (inner) cache clean op */
+					on_each_cpu(per_cpu_cache_flush, NULL, 1);
+					break;
+
+		case PVRSRV_CACHE_OP_FLUSH:
+					on_each_cpu(per_cpu_cache_flush, NULL, 1);
+					break;
+
+		case PVRSRV_CACHE_OP_NONE:
+					break;
+
+		default:
+					PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid cache operation type %d",
+					__FUNCTION__, uiCacheOp));
+					PVR_ASSERT(0);
+					break;
+	}
+}
+
+void OSFlushCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+							IMG_PVOID pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	dma_ops->sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+	dma_ops->sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+void OSCleanCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+							IMG_PVOID pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	dma_ops->sync_single_for_device(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+}
+
+void OSInvalidateCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+								 IMG_PVOID pvVirtEnd,
+								 IMG_CPU_PHYADDR sCPUPhysStart,
+								 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	dma_ops->sync_single_for_cpu(NULL, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_x86.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_x86.c
new file mode 100644
index 0000000..346a712
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/osfunc_x86.c
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          x86 specific OS functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS functions who's implementation are processor specific
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/smp.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
+#include <asm/system.h>
+#endif
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, wait)
+#else
+#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, 0, wait)
+#endif
+
+static void per_cpu_cache_flush(void *arg)
+{
+    PVR_UNREFERENCED_PARAMETER(arg);
+    wbinvd();
+}
+
+void OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+	switch(uiCacheOp)
+	{
+		/* Fall-through */
+		case PVRSRV_CACHE_OP_CLEAN:
+		case PVRSRV_CACHE_OP_FLUSH:
+		case PVRSRV_CACHE_OP_INVALIDATE:
+					on_each_cpu(per_cpu_cache_flush, NULL, 1);
+					break;
+
+		case PVRSRV_CACHE_OP_NONE:
+					break;
+
+		default:
+					PVR_DPF((PVR_DBG_ERROR,
+					"%s: Invalid cache operation type %d",
+					__FUNCTION__, uiCacheOp));
+					PVR_ASSERT(0);
+					break;
+	}
+}
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+	IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+	IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+	IMG_BYTE *pbBase;
+
+	pbEnd = (IMG_BYTE *)PVR_ALIGN((IMG_UINTPTR_T)pbEnd,
+	                              (IMG_UINTPTR_T)boot_cpu_data.x86_clflush_size);
+
+	mb();
+	for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+	{
+		clflush(pbBase);
+	}
+	mb();
+}
+
+void OSFlushCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+							IMG_PVOID pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+
+void OSCleanCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+							IMG_PVOID pvVirtEnd,
+							IMG_CPU_PHYADDR sCPUPhysStart,
+							IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	/* No clean feature on x86 */
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSInvalidateCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+								 IMG_PVOID pvVirtEnd,
+								 IMG_CPU_PHYADDR sCPUPhysStart,
+								 IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+	PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+	/* No invalidate-only support */
+	x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/ossecure_export.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/ossecure_export.c
new file mode 100644
index 0000000..e1c086426a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/ossecure_export.c
@@ -0,0 +1,189 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+
+#include "img_types.h"
+#include "ossecure_export.h"
+#include "private_data.h"
+#include "pvr_debug.h"
+#include "driverlock.h"
+
+#if defined(SUPPORT_DRM)
+#include "pvr_drm.h"
+#endif
+
+PVRSRV_ERROR OSSecureExport(CONNECTION_DATA *psConnection,
+							IMG_PVOID pvData,
+							IMG_SECURE_TYPE *phSecure,
+							CONNECTION_DATA **ppsSecureConnection)
+{
+	CONNECTION_DATA *psSecureConnection;
+	struct file *connection_file;
+	struct file *secure_file;
+	struct dentry *secure_dentry;
+	struct vfsmount *secure_mnt;
+	int secure_fd;
+	IMG_BOOL bPmrUnlocked = IMG_FALSE;
+	PVRSRV_ERROR eError;
+
+	/* Obtain the current connections struct file */
+	connection_file = LinuxFileFromConnection(psConnection);
+
+	/* Allocate a fd number */
+	secure_fd = get_unused_fd();
+	if (secure_fd < 0)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		Get a reference to the dentry so when close is called we don't
+		drop the last reference too early and delete the file
+	*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+	secure_dentry = dget(connection_file->f_path.dentry);
+	secure_mnt = mntget(connection_file->f_path.mnt);
+#else
+	secure_dentry = dget(connection_file->f_dentry);
+	secure_mnt = mntget(connection_file->f_vfsmnt);
+#endif
+
+	/* PMR lock needs to be released before bridge lock to keep lock hierarchy
+	 * and avoid deadlock situation.
+	 * OSSecureExport() can be called from functions that are not acquiring
+	 * PMR lock (e.g. by PVRSRVSyncPrimServerSecureExportKM()) so we have to
+	 * check if PMR lock is locked. */
+	if (PMRIsLockedByMe())
+	{
+		PMRUnlock();
+		bPmrUnlocked = IMG_TRUE;
+	}
+	OSReleaseBridgeLock();
+
+	/* Open our device (using the file information from our current connection) */
+	secure_file = dentry_open(
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+					  &connection_file->f_path,
+#else
+					  connection_file->f_dentry,
+					  connection_file->f_vfsmnt,
+#endif
+					  connection_file->f_flags,
+					  current_cred());
+
+	OSAcquireBridgeLock();
+	if (bPmrUnlocked)
+		PMRLock();
+
+	/* Bail if the open failed */
+	if (IS_ERR(secure_file))
+	{
+		put_unused_fd(secure_fd);
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/* Return the new services connection our secure data created */
+	psSecureConnection = LinuxConnectionFromFile(secure_file);
+
+	if(psSecureConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	/* Bind our struct file with it's fd number */
+	fd_install(secure_fd, secure_file);
+
+	/* Save the private data */
+	PVR_ASSERT(psSecureConnection->hSecureData == IMG_NULL);
+	psSecureConnection->hSecureData = pvData;
+
+	*phSecure = secure_fd;
+	*ppsSecureConnection = psSecureConnection;
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+PVRSRV_ERROR OSSecureImport(IMG_SECURE_TYPE hSecure, IMG_PVOID *ppvData)
+{
+	struct file *secure_file;
+	CONNECTION_DATA *psSecureConnection;
+	PVRSRV_ERROR eError;
+
+	secure_file = fget(hSecure);
+
+	if (!secure_file)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_out;
+	}
+
+	psSecureConnection = LinuxConnectionFromFile(secure_file);
+	if ((psSecureConnection == IMG_NULL) || (psSecureConnection->hSecureData == IMG_NULL))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto err_fput;
+	}
+
+	*ppvData = psSecureConnection->hSecureData;
+	fput(secure_file);
+	return PVRSRV_OK;
+
+err_fput:
+	fput(secure_file);
+err_out:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pdump.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pdump.c
new file mode 100644
index 0000000..e1fbf28
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pdump.c
@@ -0,0 +1,462 @@
+/*************************************************************************/ /*!
+@File
+@Title          Parameter dump macro target routines
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined (PDUMP)
+
+#include <asm/atomic.h>
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+
+#include "dbgdrvif_srv5.h"
+#include "mm.h"
+#include "allocmem.h"
+#include "pdump_km.h"
+#include "pdump_osfunc.h"
+
+#include <linux/kernel.h> // sprintf
+#include <linux/string.h> // strncpy, strlen
+#include <linux/mutex.h>
+
+#define PDUMP_DATAMASTER_PIXEL		(1)
+#define PDUMP_DATAMASTER_EDM		(3)
+
+static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
+
+
+typedef struct PDBG_PDUMP_STATE_TAG
+{
+	PDBG_STREAM psStream[PDUMP_NUM_CHANNELS];
+
+	IMG_CHAR *pszMsg;
+	IMG_CHAR *pszScript;
+	IMG_CHAR *pszFile;
+
+} PDBG_PDUMP_STATE;
+
+static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, IMG_NULL, IMG_NULL, IMG_NULL};
+
+#define SZ_MSG_SIZE_MAX			PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_SCRIPT_SIZE_MAX		PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_FILENAME_SIZE_MAX	PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+
+static struct mutex gsPDumpMutex;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+
+/*!
+ * \name	PDumpOSGetScriptString
+ */
+PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
+									IMG_UINT32 *pui32MaxLen)
+{
+	*phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
+	*pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
+	if (!*phScript)
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSGetMessageString
+ */
+PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg,
+									 IMG_UINT32 *pui32MaxLen)
+{
+	*ppszMsg = gsDBGPdumpState.pszMsg;
+	*pui32MaxLen = SZ_MSG_SIZE_MAX;
+	if (!*ppszMsg)
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSGetFilenameString
+ */
+PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
+									 IMG_UINT32 *pui32MaxLen)
+{
+	*ppszFile = gsDBGPdumpState.pszFile;
+	*pui32MaxLen = SZ_FILENAME_SIZE_MAX;
+	if (!*ppszFile)
+	{
+		return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+	}
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSBufprintf
+ */
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
+{
+	IMG_CHAR* pszBuf = hBuf;
+	IMG_INT32 n;
+	va_list	vaArgs;
+
+	va_start(vaArgs, pszFormat);
+
+	n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+	va_end(vaArgs);
+
+	if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)	/* glibc >= 2.1 or glibc 2.0 */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+		return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+	}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+	g_ui32EveryLineCounter++;
+#endif
+
+	/* Put line ending sequence at the end if it isn't already there */
+	PDumpOSVerifyLineEnding(pszBuf, ui32ScriptSizeMax);
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSVSprintf
+ */
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
+{
+	IMG_INT32 n;
+
+	n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+	if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)	/* glibc >= 2.1 or glibc 2.0 */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+		return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSDebugPrintf
+ */
+void PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
+{
+	PVR_UNREFERENCED_PARAMETER(pszFormat);
+
+	/* FIXME: Implement using services PVR_DBG or otherwise with kprintf */
+}
+
+/*!
+ * \name	PDumpOSSprintf
+ */
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
+{
+	IMG_INT32 n;
+	va_list	vaArgs;
+
+	va_start(vaArgs, pszFormat);
+
+	n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+	va_end(vaArgs);
+
+	if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1)	/* glibc >= 2.1 or glibc 2.0 */
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+		return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*!
+ * \name	PDumpOSBuflen
+ */
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+	IMG_CHAR* pszBuf = hBuffer;
+	IMG_UINT32 ui32Count = 0;
+
+	while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
+	{
+		ui32Count++;
+	}
+	return(ui32Count);
+}
+
+/*!
+ * \name	PDumpOSVerifyLineEnding
+ */
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+	IMG_UINT32 ui32Count;
+	IMG_CHAR* pszBuf = hBuffer;
+
+	/* strlen */
+	ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
+
+	/* Put \n sequence at the end if it isn't already there */
+	if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
+	{
+		pszBuf[ui32Count] = '\n';
+		ui32Count++;
+		pszBuf[ui32Count] = '\0';
+	}
+}
+
+
+
+/*!
+ * \name	PDumpOSGetStreamOffset
+ */
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker)
+{
+	PDBG_STREAM psStream = (PDBG_STREAM) hStream;
+
+	PVR_ASSERT(gpfnDbgDrv);
+	gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
+	return IMG_TRUE;
+}
+
+/*!
+ *	\name	PDumpOSDebugDriverWrite
+ */
+IMG_UINT32 PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+									IMG_UINT8 *pui8Data,
+									IMG_UINT32 ui32BCount)
+{
+	PVR_ASSERT(gpfnDbgDrv != IMG_NULL);
+
+	return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount);
+}
+
+/*!
+ *	\name	PDumpOSReleaseExecution
+ */
+void PDumpOSReleaseExecution(void)
+{
+	OSReleaseThreadQuanta();
+}
+
+/**************************************************************************
+ * Function Name  : PDumpOSInit
+ * Outputs        : None
+ * Returns        :
+ * Description    : Reset connection to vldbgdrv
+ *					Then try to connect to PDUMP streams
+**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript,
+		IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment)
+{
+	PVRSRV_ERROR     eError;
+
+	*pui32InitCapMode = DEBUG_CAPMODE_FRAMED;
+	*ppszEnvComment = IMG_NULL;
+
+	/* If we tried this earlier, then we might have connected to the driver
+	 * But if pdump.exe was running then the stream connected would fail
+	 */
+	if (!gpfnDbgDrv)
+	{
+		DBGDrvGetServiceTable((void **)&gpfnDbgDrv);
+
+		// If something failed then no point in trying to connect streams
+		if (gpfnDbgDrv == IMG_NULL)
+		{
+			return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+		}
+		
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		if(!gsDBGPdumpState.pszFile)
+		{
+			gsDBGPdumpState.pszFile = OSAllocMem(SZ_FILENAME_SIZE_MAX);
+			if (gsDBGPdumpState.pszFile == IMG_NULL)
+			{
+				goto init_failed;
+			}
+		}
+
+		if(!gsDBGPdumpState.pszMsg)
+		{
+			gsDBGPdumpState.pszMsg = OSAllocMem(SZ_MSG_SIZE_MAX);
+			if (gsDBGPdumpState.pszMsg == IMG_NULL)
+			{
+				goto init_failed;
+			}
+		}
+
+		if(!gsDBGPdumpState.pszScript)
+		{
+			gsDBGPdumpState.pszScript = OSAllocMem(SZ_SCRIPT_SIZE_MAX);
+			if (gsDBGPdumpState.pszScript == IMG_NULL)
+			{
+				goto init_failed;
+			}
+		}
+
+		eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+		if (!gpfnDbgDrv->pfnCreateStream(PDUMP_PARAM_CHANNEL_NAME, 0, 10, &psParam->hInit, &psParam->hMain, &psParam->hDeinit))
+		{
+			goto init_failed;
+		}
+		gsDBGPdumpState.psStream[PDUMP_CHANNEL_PARAM] = psParam->hMain;
+
+
+		if (!gpfnDbgDrv->pfnCreateStream(PDUMP_SCRIPT_CHANNEL_NAME, 0, 10, &psScript->hInit, &psScript->hMain, &psScript->hDeinit))
+		{
+			goto init_failed;
+		}
+		gsDBGPdumpState.psStream[PDUMP_CHANNEL_SCRIPT] = psScript->hMain;
+	}
+
+	return PVRSRV_OK;
+
+init_failed:
+	PDumpOSDeInit(psParam, psScript);
+	return eError;
+}
+
+
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript)
+{
+	gpfnDbgDrv->pfnDestroyStream(psScript->hInit, psScript->hMain, psScript->hDeinit);
+	gpfnDbgDrv->pfnDestroyStream(psParam->hInit, psParam->hMain, psParam->hDeinit);
+
+	if(gsDBGPdumpState.pszFile)
+	{
+		OSFreeMem(gsDBGPdumpState.pszFile);
+		gsDBGPdumpState.pszFile = IMG_NULL;
+	}
+
+	if(gsDBGPdumpState.pszScript)
+	{
+		OSFreeMem(gsDBGPdumpState.pszScript);
+		gsDBGPdumpState.pszScript = IMG_NULL;
+	}
+
+	if(gsDBGPdumpState.pszMsg)
+	{
+		OSFreeMem(gsDBGPdumpState.pszMsg);
+		gsDBGPdumpState.pszMsg = IMG_NULL;
+	}
+
+	gpfnDbgDrv = IMG_NULL;
+}
+
+PVRSRV_ERROR PDumpOSCreateLock(void)
+{
+	mutex_init(&gsPDumpMutex);
+	return PVRSRV_OK;
+}
+
+void PDumpOSDestroyLock(void)
+{
+	/* no destruction work to do, just assert
+	 * the lock is not held */
+	PVR_ASSERT(mutex_is_locked(&gsPDumpMutex) == 0);
+}
+
+void PDumpOSLock(void)
+{
+	mutex_lock(&gsPDumpMutex);
+}
+
+void PDumpOSUnlock(void)
+{
+	mutex_unlock(&gsPDumpMutex);
+}
+
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream,
+		IMG_UINT32 ui32StateID)
+{
+	return (gpfnDbgDrv->pfnGetCtrlState((PDBG_STREAM)hDbgStream, ui32StateID));
+}
+
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame)
+{
+	gpfnDbgDrv->pfnSetFrame(ui32Frame);
+	return;
+}
+
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_UINT32 eModuleID)
+{
+ 	return (eModuleID != IMG_PDUMPCTRL);
+}
+
+#if defined(PVR_TESTING_UTILS)
+void PDumpOSDumpState(void);
+
+void PDumpOSDumpState(void)
+{
+	PVR_LOG(("---- PDUMP LINUX: gpfnDbgDrv( %p )  gpfnDbgDrv.ui32Size( %d )",
+			gpfnDbgDrv, gpfnDbgDrv->ui32Size));
+
+	PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState( %p )",
+			&gsDBGPdumpState));
+
+	PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[0]( %p )",
+			gsDBGPdumpState.psStream[0]));
+
+	(void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[0], 0xFE);
+
+	PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[1]( %p )",
+			gsDBGPdumpState.psStream[1]));
+
+	(void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFE);
+
+	/* Now dump non-stream specific info */
+	(void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFF);
+}
+#endif
+
+#endif /* #if defined (PDUMP) */
+/*****************************************************************************
+ End of file (PDUMP.C)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_dmabuf.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_dmabuf.c
new file mode 100644
index 0000000..bcc9882
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_dmabuf.c
@@ -0,0 +1,702 @@
+/*************************************************************************/ /*!
+@File           physmem_dmabuf.c
+@Title          dmabuf memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for dmabuf memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_DRM)
+#include "pvr_drm.h"
+#endif
+
+#if !defined(SUPPORT_DRM) || defined(PVR_DRM_USE_PRIME)
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "pdump_physmem.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "physmem_dmabuf.h"
+
+#if defined(SUPPORT_ION)
+#include "hash.h"
+#include "ion_sys.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+
+typedef struct _PMR_DMA_BUF_DATA_
+{
+	/* Filled in at PMR create time */
+	PHYS_HEAP *psPhysHeap;
+	struct dma_buf_attachment *psAttachment;
+	PFN_DESTROY_DMABUF_PMR pfnDestroy;
+	IMG_BOOL bPoisonOnFree;
+	IMG_HANDLE hPDumpAllocInfo;
+
+	/* Modified by PMR lock/unlock */
+	struct sg_table *psSgTable;
+	IMG_DEV_PHYADDR *pasDevPhysAddr;
+	IMG_UINT32 ui32PageCount;
+} PMR_DMA_BUF_DATA;
+
+/* Start size of the g_psDmaBufHash hash table */
+#define DMA_BUF_HASH_SIZE 20
+
+#if defined(SUPPORT_ION)
+static HASH_TABLE *g_psDmaBufHash = IMG_NULL;
+static IMG_UINT32 g_ui32HashRefCount = 0;
+#endif
+
+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
+#define pvr_sg_length(sg) ((sg)->length)
+#else
+#define pvr_sg_length(sg) sg_dma_len(sg)
+#endif
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static void _Poison(IMG_PVOID pvKernAddr,
+		    IMG_DEVMEM_SIZE_T uiBufferSize,
+		    const IMG_CHAR *pacPoisonData,
+		    IMG_SIZE_T uiPoisonSize)
+{
+	IMG_DEVMEM_SIZE_T uiDestByteIndex;
+	IMG_CHAR *pcDest = pvKernAddr;
+	IMG_UINT32 uiSrcByteIndex = 0;
+
+	for (uiDestByteIndex = 0; uiDestByteIndex < uiBufferSize; uiDestByteIndex++)
+	{
+		pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+		uiSrcByteIndex++;
+		if (uiSrcByteIndex == uiPoisonSize)
+		{
+			uiSrcByteIndex = 0;
+		}
+	}
+}
+
+
+/*****************************************************************************
+ *                       PMR callback functions                              *
+ *****************************************************************************/
+
+static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	PVRSRV_ERROR eError;
+
+	if (psPrivData->hPDumpAllocInfo)
+	{
+		PDumpPMRFree(psPrivData->hPDumpAllocInfo);
+		psPrivData->hPDumpAllocInfo = NULL;
+	}
+
+	if (psPrivData->bPoisonOnFree)
+	{
+		IMG_PVOID pvKernAddr;
+		int i, err;
+
+		err = dma_buf_begin_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_FROM_DEVICE);
+		if (err)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to begin cpu access for free poisoning", __func__));
+			PVR_ASSERT(IMG_FALSE);
+			goto exit;
+		}
+
+		for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+		{
+			pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+			if (IS_ERR_OR_NULL(pvKernAddr))
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before free", __func__));
+				PVR_ASSERT(IMG_FALSE);
+				goto exit_end_access;
+			}
+
+			_Poison(pvKernAddr, PAGE_SIZE, _FreePoison, _FreePoisonSize);
+
+			dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+		}
+
+exit_end_access:
+		dma_buf_end_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_TO_DEVICE);
+	}
+
+exit:
+	if (psPrivData->pfnDestroy)
+	{
+		eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
+		if (eError != PVRSRV_OK)
+		{
+			return eError;
+		}
+	}
+
+	OSFreeMem(psPrivData);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					       IMG_UINT32 uiLog2DevPageSize)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+	IMG_DEV_PHYADDR *pasDevPhysAddr = NULL;
+	IMG_CPU_PHYADDR sCpuPhysAddr;
+	IMG_UINT32 ui32PageCount = 0;
+	struct scatterlist *sg;
+	struct sg_table *table;
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
+	if (!table)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_map;
+	}
+
+	/*
+	 * We do a two pass process, 1st workout how many pages there
+	 * are, 2nd fill in the data.
+	 */
+	for_each_sg(table->sgl, sg, table->nents, i)
+	{
+		ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
+	}
+
+	if (WARN_ON(!ui32PageCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lock dmabuf with no pages",
+				 __func__));
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_page_count;
+	}
+
+	pasDevPhysAddr = OSAllocMem(sizeof(*pasDevPhysAddr) * ui32PageCount);
+	if (!pasDevPhysAddr)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	ui32PageCount = 0;
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+	{
+		IMG_UINT32 j;
+
+		for (j = 0; j < pvr_sg_length(sg); j += PAGE_SIZE)
+		{
+			/* Pass 2: Get the page data */
+			sCpuPhysAddr.uiAddr = sg_phys(sg) + j;
+
+			PhysHeapCpuPAddrToDevPAddr(psPrivData->psPhysHeap, 
+						   1,
+						   &pasDevPhysAddr[ui32PageCount],
+						   &sCpuPhysAddr);
+			ui32PageCount++;
+		}
+	}
+
+	psPrivData->pasDevPhysAddr = pasDevPhysAddr;
+	psPrivData->ui32PageCount = ui32PageCount;
+	psPrivData->psSgTable = table;
+
+	return PVRSRV_OK;
+
+fail_alloc:
+fail_page_count:
+	dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
+
+fail_map:
+	PVR_ASSERT(eError!= PVRSRV_OK);
+	return eError;
+}
+
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+	struct sg_table *psSgTable = psPrivData->psSgTable;
+
+	OSFreeMem(psPrivData->pasDevPhysAddr);
+
+	psPrivData->pasDevPhysAddr = NULL;
+	psPrivData->ui32PageCount = 0;
+
+	dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					 IMG_UINT32 ui32NumOfPages,
+					 IMG_DEVMEM_OFFSET_T *puiOffset,
+					 IMG_BOOL *pbValid,
+					 IMG_DEV_PHYADDR *psDevPAddr)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	IMG_UINT32 ui32PageIndex;
+	IMG_UINT32 idx;
+
+	for (idx=0; idx < ui32NumOfPages; idx++)
+	{
+		if (pbValid[idx])
+		{
+			IMG_UINT32 ui32InPageOffset;
+
+			ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
+			ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
+
+			PVR_ASSERT(ui32PageIndex < psPrivData->ui32PageCount);
+			PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
+
+			psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+				  IMG_SIZE_T uiOffset,
+				  IMG_SIZE_T uiSize,
+				  void **ppvKernelAddressOut,
+				  IMG_HANDLE *phHandleOut,
+				  PMR_FLAGS_T ulFlags)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	IMG_PVOID pvKernAddr;
+	PVRSRV_ERROR eError;
+	int err;
+
+	err = dma_buf_begin_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_BIDIRECTIONAL);
+	if (err)
+	{
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail;
+	}
+
+	pvKernAddr = dma_buf_vmap(psDmaBuf);
+	if (IS_ERR_OR_NULL(pvKernAddr))
+	{
+		eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+		goto fail_kmap;
+	}
+
+	*ppvKernelAddressOut = pvKernAddr + uiOffset;
+	*phHandleOut = pvKernAddr;
+
+	return PVRSRV_OK;
+
+fail_kmap:
+	dma_buf_end_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_BIDIRECTIONAL);
+
+fail:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+					      IMG_HANDLE hHandle)
+{
+	PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+	struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+	IMG_PVOID pvKernAddr = hHandle;
+
+	dma_buf_vunmap(psDmaBuf, pvKernAddr);
+
+	dma_buf_end_cpu_access(psDmaBuf, 0, psDmaBuf->size, DMA_BIDIRECTIONAL);
+}
+
+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
+{
+	.pfnLockPhysAddresses		= PMRLockPhysAddressesDmaBuf,
+	.pfnUnlockPhysAddresses		= PMRUnlockPhysAddressesDmaBuf,
+	.pfnDevPhysAddr			= PMRDevPhysAddrDmaBuf,
+	.pfnAcquireKernelMappingData	= PMRAcquireKernelMappingDataDmaBuf,
+	.pfnReleaseKernelMappingData	= PMRReleaseKernelMappingDataDmaBuf,
+	.pfnFinalize			= PMRFinalizeDmaBuf,
+};
+
+/*****************************************************************************
+ *                       Public facing interface                             *
+ *****************************************************************************/
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
+				struct dma_buf_attachment *psAttachment,
+				PFN_DESTROY_DMABUF_PMR pfnDestroy,
+				PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				PMR **ppsPMRPtr)
+{
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+	PMR_DMA_BUF_DATA *psPrivData;
+	IMG_BOOL bMappingTable = IMG_TRUE;
+	PMR_FLAGS_T uiPMRFlags;
+	IMG_BOOL bZeroOnAlloc;
+	IMG_BOOL bPoisonOnAlloc;
+	IMG_BOOL bPoisonOnFree;
+	PVRSRV_ERROR eError;
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+	{
+		bZeroOnAlloc = IMG_TRUE;
+	}
+	else
+	{
+		bZeroOnAlloc = IMG_FALSE;
+	}
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
+	{
+		bPoisonOnAlloc = IMG_TRUE;
+	}
+	else
+	{
+		bPoisonOnAlloc = IMG_FALSE;
+	}
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+	{
+		bPoisonOnFree = IMG_TRUE;
+	}
+	else
+	{
+		bPoisonOnFree = IMG_FALSE;
+	}
+
+	if (bZeroOnAlloc && bPoisonOnFree)
+	{
+		/* Zero on Alloc and Poison on Alloc are mutually exclusive */
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_params;
+	}
+
+	psPrivData = OSAllocZMem(sizeof(*psPrivData));
+	if (psPrivData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_priv_alloc;
+	}
+
+	psPrivData->psPhysHeap = psHeap;
+	psPrivData->psAttachment = psAttachment;
+	psPrivData->pfnDestroy = pfnDestroy;
+	psPrivData->bPoisonOnFree = bPoisonOnFree;
+
+	if (bZeroOnAlloc || bPoisonOnAlloc)
+	{
+		IMG_PVOID pvKernAddr;
+		int i, err;
+
+		err = dma_buf_begin_cpu_access(psDmaBuf,
+					       0,
+					       psDmaBuf->size,
+					       DMA_FROM_DEVICE);
+		if (err)
+		{
+			eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+			goto fail_begin;
+		}
+
+		for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+		{
+			pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+			if (IS_ERR_OR_NULL(pvKernAddr))
+			{
+				PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to map page for %s",
+					 __func__,
+					 bZeroOnAlloc ? "zeroing" : "poisoning"));
+				eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+
+				dma_buf_end_cpu_access(psDmaBuf,
+						       0,
+						       psDmaBuf->size,
+						       DMA_TO_DEVICE);
+
+				goto fail_kmap;
+			}
+
+			if (bZeroOnAlloc)
+			{
+				memset(pvKernAddr, 0, PAGE_SIZE);
+			}
+			else
+			{
+				_Poison(pvKernAddr, PAGE_SIZE, _AllocPoison, _AllocPoisonSize);
+			}
+
+			dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+		}
+
+		dma_buf_end_cpu_access(psDmaBuf,
+				       0,
+				       psDmaBuf->size,
+				       DMA_TO_DEVICE);
+	}
+
+	uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+	/*
+	 * Check no significant bits were lost in cast due to different
+	 * bit widths for flags
+	 */
+	PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+	eError = PMRCreatePMR(psHeap,
+			      psDmaBuf->size,
+			      psDmaBuf->size,
+			      1,
+			      1,
+			      &bMappingTable,
+			      PAGE_SHIFT,
+			      uiPMRFlags,
+			      "PMRDMABUF",
+			      &_sPMRDmaBufFuncTab,
+			      psPrivData,
+			      ppsPMRPtr,
+			      &psPrivData->hPDumpAllocInfo,
+			      IMG_FALSE);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR", __func__));
+		goto fail_create_pmr;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	eError = RIWritePMREntryKM(*ppsPMRPtr,
+				   sizeof("DMABUF"),
+				   "DMABUF",
+				   psDmaBuf->size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: Failed to write PMR entry (%s)",
+			 __func__, PVRSRVGetErrorStringKM(eError)));
+	}
+#endif
+
+	return PVRSRV_OK;
+
+fail_create_pmr:
+fail_kmap:
+fail_begin:
+	OSFreeMem(psPrivData);
+
+fail_priv_alloc:
+fail_params:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#if defined(SUPPORT_ION)
+static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+					 struct dma_buf_attachment *psAttachment)
+{
+	struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+
+	HASH_Remove(g_psDmaBufHash, (IMG_UINTPTR_T) psDmaBuf);
+	g_ui32HashRefCount--;
+
+	if (g_ui32HashRefCount == 0)
+	{
+		HASH_Delete(g_psDmaBufHash);
+		g_psDmaBufHash = IMG_NULL;
+	}
+
+	PhysHeapRelease(psHeap);
+
+	dma_buf_detach(psDmaBuf, psAttachment);
+	dma_buf_put(psDmaBuf);
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+		    IMG_INT fd,
+		    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+		    PMR **ppsPMRPtr,
+		    IMG_DEVMEM_SIZE_T *puiSize,
+		    IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	PMR *psPMR;
+	struct dma_buf_attachment *psAttachment;
+	struct dma_buf *psDmaBuf;
+	PHYS_HEAP *psHeap;
+	PVRSRV_ERROR eError;
+
+	if (!psConnection)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto fail_params;
+	}
+
+	/* Get the buffer handle */
+	psDmaBuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(psDmaBuf))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed", __func__));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto fail_dma_buf_get;
+	}
+
+	if (g_psDmaBufHash)
+	{
+		/* We have a hash table so check if we've seen this dmabuf before */
+		psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (IMG_UINTPTR_T) psDmaBuf);
+		if (psPMR)
+		{
+			/* Reuse the PMR we already created */
+			PMRRefPMR(psPMR);
+
+			*ppsPMRPtr = psPMR;
+			*puiSize = psDmaBuf->size;
+			*puiAlign = PAGE_SIZE;
+
+			dma_buf_put(psDmaBuf);
+
+			return PVRSRV_OK;
+		}
+	}
+
+	/* Attach a fake device to to the dmabuf */
+	psAttachment = dma_buf_attach(psDmaBuf, (void *)0x1);
+	if (IS_ERR_OR_NULL(psAttachment))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed", __func__));
+		eError = PVRSRV_ERROR_BAD_MAPPING;
+		goto fail_dma_buf_attach;
+	}
+
+	/*
+	 * Get the physical heap for this PMR
+	 *	
+	 * Note:
+	 * While we have no way to determine the type of the buffer
+	 * we just assume that all dmabufs are from the same
+	 * physical heap.
+	 */
+	eError = PhysHeapAcquire(IonPhysHeapID(), &psHeap);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Failed PhysHeapAcquire", __func__));
+		goto fail_physheap;
+	}
+
+	eError = PhysmemCreateNewDmaBufBackedPMR(psHeap,
+						 psAttachment,
+						 PhysmemDestroyDmaBuf,
+						 uiFlags,
+						 &psPMR);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_create_new_pmr;
+	}
+
+	if (!g_psDmaBufHash)
+	{
+		/*
+		 * As different processes may import the same dmabuf we need to
+		 * create a hash table so we don't generate a duplicate PMR but
+		 * rather just take a reference on an existing one.
+		 */
+		g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
+		if (!g_psDmaBufHash)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto fail_hash_create;
+		}
+	}
+
+	/* First time we've seen this dmabuf so store it in the hash table */
+	HASH_Insert(g_psDmaBufHash, (IMG_UINTPTR_T) psDmaBuf, (IMG_UINTPTR_T) psPMR);
+	g_ui32HashRefCount++;
+
+	*ppsPMRPtr = psPMR;
+	*puiSize = psDmaBuf->size;
+	*puiAlign = PAGE_SIZE;
+
+	return PVRSRV_OK;
+
+fail_hash_create:
+	PMRUnrefPMR(psPMR);
+
+fail_create_new_pmr:
+	PhysHeapRelease(psHeap);
+
+fail_physheap:
+	dma_buf_detach(psDmaBuf, psAttachment);
+
+fail_dma_buf_attach:
+	dma_buf_put(psDmaBuf);
+
+fail_dma_buf_get:
+fail_params:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+#endif /* defined(SUPPORT_ION) */
+#endif /* !defined(SUPPORT_DRM) || defined(PVR_DRM_USE_PRIME) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_osmem_linux.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_osmem_linux.c
new file mode 100644
index 0000000..b820f53
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_osmem_linux.c
@@ -0,0 +1,2016 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for OS managed memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                implementing the function callbacks for physical memory borrowed
+                from that normally managed by the operating system.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include5/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "devicemem_server_utils.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0))
+#include <linux/mm.h>
+#define PHYSMEM_SUPPORTS_SHRINKER
+#endif
+
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <asm/io.h>
+#if defined(CONFIG_X86)
+#include <asm/cacheflush.h>
+#endif
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined (CONFIG_METAG)
+#include "osfunc.h"
+#endif
+
+#if defined(CONFIG_X86)
+#define PMR_UNSET_PAGES_STACK_ALLOC 64
+#endif
+
+/* Provide SHRINK_STOP definition for kernel older than 3.12 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+#define SHRINK_STOP (~0UL)
+#endif
+
+#include "physmem_osmem_linux.h"
+
+#if  (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+/* split_pages() not available on older-kernels */
+#if (PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER > 0)
+/* This includes bin (i.e. bucket) for order-0 */
+#define ALLOC_ORDER_ARRAY_SIZE (PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER+1)
+#define PHYSMEM_USING_HIGH_ALLOC_ORDER
+
+static IMG_UINT32 g_uiCutOffOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER;
+#endif
+#endif
+
+/* guard against too big a backlog of deferred freeing of pages */
+#define MAX_OUTSTANDING_DEFERRED_FREE_PAGES 32768
+static atomic_t g_uiNumDeferredFreePages = ATOMIC_INIT(0);
+
+struct _PMR_OSPAGEARRAY_DATA_ {
+    /*
+      uiNumPages:
+
+      number of "pages" (a.k.a. macro pages, compound pages, higher
+      order pages, etc...)
+    */
+    IMG_UINT32 uiNumPages;
+
+    /*
+      uiLog2PageSize;
+
+      size of each "page" -- this would normally be the same as
+      PAGE_SHIFT, but we support the idea that we may allocate pages
+      in larger chunks for better contiguity, using order>0 in the
+      call to alloc_pages()
+    */
+    IMG_UINT32 uiLog2PageSize;
+
+    /*
+      the pages thusly allocated...  N.B.. One entry per compound page,
+      where compound pages are used.
+    */
+    struct page **pagearray;
+
+    /*
+      for pdump...
+    */
+    IMG_BOOL bPDumpMalloced;
+    IMG_HANDLE hPDumpAllocInfo;
+
+    /*
+      record at alloc time whether poisoning will be required when the
+      PMR is freed.
+    */
+    IMG_BOOL bZero;
+    IMG_BOOL bPoisonOnFree;
+    IMG_BOOL bPoisonOnAlloc;
+    IMG_BOOL bHasOSPages;
+    IMG_BOOL bOnDemand;
+    /*
+	 The cache mode of the PMR (required at free time)
+	 Boolean used to track if we need to revert the cache attributes
+	 of the pages used in this allocation. Depends on OS/architecture.
+	*/
+    IMG_UINT32 ui32CPUCacheFlags;
+	IMG_BOOL bUnsetMemoryType;
+
+	/* Structure which is hooked into the cleanup thread work list */
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+};
+
+/***********************************
+ * Page pooling for uncached pages *
+ ***********************************/
+ 
+static void
+_FreeOSPage(IMG_UINT32 ui32CPUCacheFlags,
+			IMG_UINT32 uiOrder,
+			IMG_BOOL bUnsetMemoryType,
+			IMG_BOOL bFreeToOS,
+			struct page *psPage);
+ 
+typedef	struct
+{
+	/* Linkage for page pool LRU list */
+	struct list_head sPagePoolItem;
+
+	struct page *psPage;
+} LinuxPagePoolEntry;
+
+static IMG_UINT32 g_ui32PagePoolEntryCount = 0;
+
+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
+static IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
+#else
+static IMG_UINT32 g_ui32PagePoolMaxEntries = 0;
+#endif
+
+/* Global structures we use to manage the page pool */
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+static struct kmem_cache *g_psLinuxPagePoolCache = IMG_NULL;
+
+static LIST_HEAD(g_sPagePoolList);
+static LIST_HEAD(g_sUncachedPagePoolList);
+
+static inline void
+_PagePoolLock(void)
+{
+	mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline int
+_PagePoolTrylock(void)
+{
+	return mutex_trylock(&g_sPagePoolMutex);
+}
+
+static inline void
+_PagePoolUnlock(void)
+{
+	mutex_unlock(&g_sPagePoolMutex);
+}
+
+static LinuxPagePoolEntry *
+_LinuxPagePoolEntryAlloc(void)
+{
+    return kmem_cache_zalloc(g_psLinuxPagePoolCache, GFP_KERNEL);
+}
+
+static inline IMG_BOOL _GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, struct list_head **ppsPoolHead)
+{
+	switch(ui32CPUCacheFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+/*
+	For x86 we need to keep different lists for uncached
+	and write-combined as we must always honour the PAT
+	setting which cares about this difference.
+*/
+#if defined(CONFIG_X86)
+			*ppsPoolHead = &g_sUncachedPagePoolList;
+			break;
+#else
+			/* Fall-through */
+#endif
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+			*ppsPoolHead = &g_sPagePoolList;
+			break;
+		default:
+			return IMG_FALSE;
+	}
+	return IMG_TRUE;
+}
+
+static void
+_LinuxPagePoolEntryFree(LinuxPagePoolEntry *psPagePoolEntry)
+{
+	kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+}
+
+static inline IMG_BOOL
+_AddEntryToPool(struct page *psPage, IMG_UINT32 ui32CPUCacheFlags)
+{
+	LinuxPagePoolEntry *psEntry;
+	struct list_head *psPoolHead = IMG_NULL;
+
+	if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
+	{
+		return IMG_FALSE;
+	}
+
+	psEntry = _LinuxPagePoolEntryAlloc();
+	if (psEntry == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	psEntry->psPage = psPage;
+	_PagePoolLock();
+	list_add_tail(&psEntry->sPagePoolItem, psPoolHead);
+	g_ui32PagePoolEntryCount++;
+	_PagePoolUnlock();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* MemStats usually relies on having the bridge lock held, however
+	 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+	 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+	 * the page pool lock is used to ensure these calls are mutually
+	 * exclusive
+	 */
+	_PagePoolLock();
+	PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE);
+	_PagePoolUnlock();
+#endif
+
+	return IMG_TRUE;
+}
+
+static inline void
+_RemoveEntryFromPoolUnlocked(LinuxPagePoolEntry *psPagePoolEntry)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	/* MemStats usually relies on having the bridge lock held, however
+	 * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+	 * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+	 * the page pool lock is used to ensure these calls are mutually
+	 * exclusive
+	 */
+	PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE);
+#endif
+
+	list_del(&psPagePoolEntry->sPagePoolItem);
+	g_ui32PagePoolEntryCount--;
+}
+
+static inline struct page *
+_RemoveFirstEntryFromPool(IMG_UINT32 ui32CPUCacheFlags)
+{
+	LinuxPagePoolEntry *psPagePoolEntry;
+	struct page *psPage;
+	struct list_head *psPoolHead = IMG_NULL;
+
+	if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead))
+	{
+		return NULL;
+	}
+
+	_PagePoolLock();
+	if (list_empty(psPoolHead))
+	{
+		_PagePoolUnlock();
+		return NULL;
+	}
+
+	PVR_ASSERT(g_ui32PagePoolEntryCount > 0);
+	psPagePoolEntry = list_first_entry(psPoolHead, LinuxPagePoolEntry, sPagePoolItem);
+	_RemoveEntryFromPoolUnlocked(psPagePoolEntry);
+
+	psPage = psPagePoolEntry->psPage;
+	_LinuxPagePoolEntryFree(psPagePoolEntry);
+	_PagePoolUnlock();
+
+	return psPage;
+}
+
+#if defined(PHYSMEM_SUPPORTS_SHRINKER)
+static struct shrinker g_sShrinker;
+
+static unsigned long
+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	int remain;
+
+	PVR_ASSERT(psShrinker == &g_sShrinker);
+	(void)psShrinker;
+	(void)psShrinkControl;
+
+	/* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+	if (_PagePoolTrylock() == 0)
+		return 0;
+	remain = g_ui32PagePoolEntryCount;
+	_PagePoolUnlock();
+
+	return remain;
+}
+
+static unsigned long
+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+	int remain;
+
+	PVR_ASSERT(psShrinker == &g_sShrinker);
+	(void)psShrinker;
+
+	/* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+	if (_PagePoolTrylock() == 0)
+		return SHRINK_STOP;
+	list_for_each_entry_safe(psPagePoolEntry,
+	                         psTempPoolEntry,
+	                         &g_sPagePoolList,
+	                         sPagePoolItem)
+	{
+		_RemoveEntryFromPoolUnlocked(psPagePoolEntry);
+
+		/*
+		  We don't want to save the cache type and is we need to unset the
+		  memory type as it would double the page pool structure and the
+		  values are always going to be the same anyway which is why the
+		  page is in the pool (well the page could be UNCACHED or
+		  WRITE_COMBINE but we don't even need the cache type for freeing
+		  back to the OS).
+		*/
+		_FreeOSPage(PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE,
+			    0,
+			    IMG_TRUE,
+			    IMG_TRUE,
+			    psPagePoolEntry->psPage);
+		_LinuxPagePoolEntryFree(psPagePoolEntry);
+
+		if (--uNumToScan == 0)
+		{
+			break;
+		}
+	}
+
+	/*
+	  Note:
+	  For anything other then x86 this list will be empty but we want to
+	  keep differences between compiled code to a minimum and so
+	  this isn't wrapped in #if defined(CONFIG_X86)
+	*/
+	list_for_each_entry_safe(psPagePoolEntry,
+	                         psTempPoolEntry,
+	                         &g_sUncachedPagePoolList,
+	                         sPagePoolItem)
+	{
+		_RemoveEntryFromPoolUnlocked(psPagePoolEntry);
+
+		/*
+		  We don't want to save the cache type and is we need to unset the
+		  memory type as it would double the page pool structure and the
+		  values are always going to be the same anyway which is why the
+		  page is in the pool (well the page could be UNCACHED or
+		  WRITE_COMBINE but we don't even need the cache type for freeing
+		  back to the OS).
+		*/
+		_FreeOSPage(PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+			    0,
+			    IMG_TRUE,
+			    IMG_TRUE,
+			    psPagePoolEntry->psPage);
+		_LinuxPagePoolEntryFree(psPagePoolEntry);
+
+		if (--uNumToScan == 0)
+		{
+			break;
+		}
+	}
+
+	if (list_empty(&g_sPagePoolList) && list_empty(&g_sUncachedPagePoolList))
+	{
+		PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
+	}
+	remain = g_ui32PagePoolEntryCount;
+	_PagePoolUnlock();
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+	return remain;
+#else
+	return psShrinkControl->nr_to_scan - uNumToScan;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+static int
+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+	if (psShrinkControl->nr_to_scan != 0)
+	{
+		return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
+	}
+	else
+	{
+		/* No pages are being reclaimed so just return the page count */
+		return _CountObjectsInPagePool(psShrinker, psShrinkControl);
+	}
+}
+
+static struct shrinker g_sShrinker =
+{
+	.shrink = _ShrinkPagePool,
+	.seeks = DEFAULT_SEEKS
+};
+#else
+static struct shrinker g_sShrinker =
+{
+	.count_objects = _CountObjectsInPagePool,
+	.scan_objects = _ScanObjectsInPagePool,
+	.seeks = DEFAULT_SEEKS
+};
+#endif
+#endif /* defined(PHYSMEM_SUPPORTS_SHRINKER) */
+
+static void DisableOOMKiller(void)
+{
+	/* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
+	 *
+	 * As oom_killer_disable() is an inline, non-exported function, we
+	 * can't use it from a modular driver. Furthermore, the OOM killer
+	 * API doesn't look thread safe, which `current' is.
+	 */
+	WARN_ON(current->flags & PF_DUMPCORE);
+	current->flags |= PF_DUMPCORE;
+}
+
+void LinuxInitPagePool(void)
+{
+	IMG_UINT32 ui32Flags = 0;
+
+	_PagePoolLock();
+#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
+	ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
+#endif
+	g_psLinuxPagePoolCache = kmem_cache_create("img-pp", sizeof(LinuxPagePoolEntry), 0, ui32Flags, NULL);
+
+#if defined(PHYSMEM_SUPPORTS_SHRINKER)
+	/* Only create the shrinker if we created the cache OK */
+	if (g_psLinuxPagePoolCache)
+	{
+		register_shrinker(&g_sShrinker);
+	}
+#endif
+	_PagePoolUnlock();
+}
+
+void LinuxDeinitPagePool(void)
+{
+	LinuxPagePoolEntry *psPagePoolEntry, *psTempPPEntry;
+
+	_PagePoolLock();
+	/* Evict all the pages from the pool */
+	list_for_each_entry_safe(psPagePoolEntry,
+	                         psTempPPEntry,
+	                         &g_sPagePoolList,
+	                         sPagePoolItem)
+	{
+		_RemoveEntryFromPoolUnlocked(psPagePoolEntry);
+
+		/*
+			We don't want to save the cache type and is we need to unset the
+			memory type as it would double the page pool structure and the
+			values are always going to be the same anyway which is why the
+			page is in the pool (well the page could be UNCACHED or
+			WRITE_COMBINE but we don't even need the cache type for freeing
+			back to the OS).
+		*/
+		_FreeOSPage(PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE,
+					0,
+					IMG_TRUE,
+					IMG_TRUE,
+					psPagePoolEntry->psPage);
+		_LinuxPagePoolEntryFree(psPagePoolEntry);
+	}
+	
+	/*
+		Note:
+		For anything other then x86 this will be a no-op but we want to
+		keep differences between compiled code to a minimum and so
+		this isn't wrapped in #if defined(CONFIG_X86)
+	*/
+	list_for_each_entry_safe(psPagePoolEntry,
+	                         psTempPPEntry,
+	                         &g_sUncachedPagePoolList,
+	                         sPagePoolItem)
+	{
+		_RemoveEntryFromPoolUnlocked(psPagePoolEntry);
+
+		_FreeOSPage(PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+					0,
+					IMG_TRUE,
+					IMG_TRUE,
+					psPagePoolEntry->psPage);
+		_LinuxPagePoolEntryFree(psPagePoolEntry);
+	}
+
+	PVR_ASSERT(g_ui32PagePoolEntryCount == 0);
+
+	/* Free the page cache */
+	kmem_cache_destroy(g_psLinuxPagePoolCache);
+
+#if defined(PHYSMEM_SUPPORTS_SHRINKER)
+	unregister_shrinker(&g_sShrinker);
+#endif
+	_PagePoolUnlock();
+}
+
+static void EnableOOMKiller(void)
+{
+	current->flags &= ~PF_DUMPCORE;
+}
+
+static void
+_PoisonPages(struct page *page,
+             IMG_UINT32 uiOrder,
+             const IMG_CHAR *pacPoisonData,
+             IMG_SIZE_T uiPoisonSize)
+{
+    void *kvaddr;
+    IMG_UINT32 uiSrcByteIndex;
+    IMG_UINT32 uiDestByteIndex;
+    IMG_UINT32 uiSubPageIndex;
+    IMG_CHAR *pcDest;
+
+    uiSrcByteIndex = 0;
+    for (uiSubPageIndex = 0; uiSubPageIndex < (1U << uiOrder); uiSubPageIndex++)
+    {
+        kvaddr = kmap(page + uiSubPageIndex);
+
+        pcDest = kvaddr;
+
+        for(uiDestByteIndex=0; uiDestByteIndex<PAGE_SIZE; uiDestByteIndex++)
+        {
+            pcDest[uiDestByteIndex] = pacPoisonData[uiSrcByteIndex];
+            uiSrcByteIndex++;
+            if (uiSrcByteIndex == uiPoisonSize)
+            {
+                uiSrcByteIndex = 0;
+            }
+        }
+        kunmap(page + uiSubPageIndex);
+    }
+}
+
+static const IMG_CHAR _AllocPoison[] = "^PoIsOn";
+static const IMG_UINT32 _AllocPoisonSize = 7;
+static const IMG_CHAR _FreePoison[] = "<DEAD-BEEF>";
+static const IMG_UINT32 _FreePoisonSize = 11;
+
+static PVRSRV_ERROR
+_AllocOSPageArray(PMR_SIZE_T uiSize,
+        IMG_UINT32 uiLog2PageSize,
+        IMG_BOOL bZero,
+        IMG_BOOL bPoisonOnAlloc,
+        IMG_BOOL bPoisonOnFree,
+        IMG_BOOL bOnDemand,
+        IMG_UINT32 ui32CPUCacheFlags,
+		struct _PMR_OSPAGEARRAY_DATA_ **ppsPageArrayDataPtr)
+{
+    PVRSRV_ERROR eError;
+    void *pvData;
+    IMG_UINT32 uiNumPages;
+
+    struct page **ppsPageArray;
+    struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData;
+
+    if (uiSize >= 0x1000000000ULL)
+    {
+        PVR_DPF((PVR_DBG_ERROR,
+                 "physmem_osmem_linux.c: Do you really want 64GB of physical memory in one go?  This is likely a bug"));
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e_freed_pvdata;
+    }
+
+    PVR_ASSERT(PAGE_SHIFT <= uiLog2PageSize);
+    if ((uiSize & ((1ULL << uiLog2PageSize) - 1)) != 0)
+    {
+    	PVR_DPF((PVR_DBG_ERROR,
+    			"Allocation size "PMR_SIZE_FMTSPEC" is not multiple of page size 2^%u !",
+    			uiSize,
+    			uiLog2PageSize));
+
+        eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+        goto e_freed_pvdata;
+    }
+
+    /* Use of cast below is justified by the assertion that follows to
+       prove that no significant bits have been truncated */
+    uiNumPages = (IMG_UINT32)(((uiSize-1)>>uiLog2PageSize) + 1);
+    PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2PageSize) == uiSize);
+
+    pvData = OSAllocMem(sizeof(struct _PMR_OSPAGEARRAY_DATA_) +
+                        sizeof(struct page *) * uiNumPages);
+    if (pvData == IMG_NULL)
+    {
+        PVR_DPF((PVR_DBG_ERROR,
+                 "physmem_osmem_linux.c: OS refused the memory allocation for the table of pages.  Did you ask for too much?"));
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e_freed_pvdata;
+    }
+    PVR_ASSERT(pvData != IMG_NULL);
+
+    psPageArrayData = pvData;
+    ppsPageArray = pvData + sizeof(struct _PMR_OSPAGEARRAY_DATA_);
+    psPageArrayData->pagearray = ppsPageArray;
+    psPageArrayData->uiLog2PageSize = uiLog2PageSize;
+    psPageArrayData->uiNumPages = uiNumPages;
+    psPageArrayData->bZero = bZero;
+    psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
+ 	psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+ 	psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ 	psPageArrayData->bHasOSPages = IMG_FALSE;
+ 	psPageArrayData->bOnDemand = bOnDemand;
+
+    psPageArrayData->bPDumpMalloced = IMG_FALSE;
+
+	psPageArrayData->bUnsetMemoryType = IMG_FALSE;
+
+	*ppsPageArrayDataPtr = psPageArrayData;
+
+	return PVRSRV_OK;
+
+e_freed_pvdata:
+   PVR_ASSERT(eError != PVRSRV_OK);
+   return eError;
+
+}
+
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(struct page **ppsPage, IMG_UINT32 uiNumPages, IMG_BOOL bFlush,
+#if defined (CONFIG_X86)
+					   struct page **ppsUnsetPages, IMG_UINT32 uiUnsetPagesIndex,
+#endif
+					   IMG_UINT32 ui32CPUCacheFlags, unsigned int gfp_flags)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	
+	if (ppsPage != IMG_NULL)
+	{
+#if defined (CONFIG_ARM) || defined(CONFIG_ARM64) || defined (CONFIG_METAG)
+		/*  On ARM kernels we can be given pages which still remain in the cache.
+			In order to make sure that the data we write through our mappings
+			doesn't get over written by later cache evictions we invalidate the
+			pages that get given to us.
+	
+			Note:
+			This still seems to be true if we request cold pages, it's just less
+			likely to be in the cache. */
+
+		if (ui32CPUCacheFlags != PVRSRV_MEMALLOCFLAG_CPU_CACHED)
+		{
+			IMG_UINT32 ui32Idx;
+
+			if (uiNumPages < PVR_LINUX_ARM_PAGEALLOC_FLUSH_THRESHOLD)
+			{
+				for (ui32Idx = 0; ui32Idx < uiNumPages;  ++ui32Idx)
+				{
+					IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+					IMG_PVOID pvPageVAddr;
+
+					pvPageVAddr = kmap(ppsPage[ui32Idx]);
+					sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
+					sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+					/* If we're zeroing, we need to make sure the cleared memory is pushed out
+					   of the cache before the cache lines are invalidated */
+					if (bFlush)
+					{
+						OSFlushCPUCacheRangeKM(pvPageVAddr,
+											   pvPageVAddr + PAGE_SIZE,
+											   sCPUPhysAddrStart,
+											   sCPUPhysAddrEnd);
+					}
+					else
+					{
+						OSInvalidateCPUCacheRangeKM(pvPageVAddr,
+													pvPageVAddr + PAGE_SIZE,
+													sCPUPhysAddrStart,
+													sCPUPhysAddrEnd);
+					}
+
+					kunmap(ppsPage[ui32Idx]);
+				}
+			}
+			else
+			{
+				OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+			}
+		}
+#endif
+
+#if defined (CONFIG_X86)
+		/*  On X86 if we already have a mapping we need to change the mode of
+			current mapping before we map it ourselves	*/
+		int ret = IMG_FALSE;
+		IMG_UINT32 uiPageIndex;
+		PVR_UNREFERENCED_PARAMETER(bFlush);
+
+		switch (ui32CPUCacheFlags)
+		{
+			case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				ret = set_pages_array_uc(ppsUnsetPages, uiUnsetPagesIndex);
+				if (ret)
+				{
+					eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+					PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
+				}
+				break;
+
+			case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				ret = set_pages_array_wc(ppsUnsetPages, uiUnsetPagesIndex);
+				if (ret)
+				{
+					eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+					PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
+				}
+				break;
+
+			case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+				break;
+
+			default:
+				break;
+		}
+
+		if (ret)
+		{
+			for(uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+			{
+				_FreeOSPage(ui32CPUCacheFlags,
+							0,
+							IMG_FALSE,
+							IMG_FALSE,
+							ppsPage[uiPageIndex]);
+			}
+		}
+#endif
+	}
+	
+	return eError;
+}
+
+static PVRSRV_ERROR
+_AllocOSPage(IMG_UINT32 ui32CPUCacheFlags,
+             unsigned int gfp_flags,
+             IMG_UINT32 uiOrder,
+             struct page **ppsPage,
+             IMG_BOOL *pbPageFromPool)
+{
+	struct page *psPage = IMG_NULL;
+	*pbPageFromPool = IMG_FALSE;
+
+	/* Does the requested page contiguity match the CPU page size? */
+	if (uiOrder == 0)
+	{
+		psPage = _RemoveFirstEntryFromPool(ui32CPUCacheFlags);
+		if (psPage != IMG_NULL)
+		{
+			*pbPageFromPool = IMG_TRUE;
+			if (gfp_flags & __GFP_ZERO)
+			{
+				/* The kernel will zero the page for us when we allocate it,
+				   but if it comes from the pool then we must do this 
+				   ourselves. */
+				IMG_PVOID pvPageVAddr = kmap(psPage);
+				memset(pvPageVAddr, 0, PAGE_SIZE);
+				kunmap(psPage);
+			}
+		}
+	}
+
+	/*  Did we check the page pool and/or was it a page pool miss,
+		either the pool was empty or it was for a cached page so we
+		must ask the OS  */
+	if (!*pbPageFromPool)
+	{
+		DisableOOMKiller();
+		psPage = alloc_pages(gfp_flags, uiOrder);
+		EnableOOMKiller();
+	}
+
+	if(IMG_NULL == (*ppsPage = psPage))
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY; 
+	}
+	
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_AllocOSZeroOrderPages(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData,
+					   IMG_UINT32 ui32CPUCacheFlags,
+					   unsigned int gfp_flags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiPageIndex;
+	IMG_BOOL bPageFromPool = IMG_FALSE;	
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+#if defined(CONFIG_X86)
+	/* On x86 we batch applying cache attributes by storing references to all
+	   pages that are not from the page pool */
+	struct page *apsUnsetPages[PMR_UNSET_PAGES_STACK_ALLOC];
+	struct page **ppsUnsetPages = apsUnsetPages;
+	IMG_UINT32 uiUnsetPagesIndex = 0;
+	
+	if (psPageArrayData->uiNumPages > PMR_UNSET_PAGES_STACK_ALLOC)
+	{
+		ppsUnsetPages = OSAllocMem(sizeof(struct page*) * psPageArrayData->uiNumPages);
+		if (ppsUnsetPages == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed alloc_pages metadata allocation", __FUNCTION__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+#endif
+
+	/* Allocate pages one at a time.  Note that the _device_ memory
+	   page size may be different from the _host_ cpu page size - we
+	   have a concept of a minimum contiguity requirement, which must
+	   be sufficient to meet the requirement of both device and host
+	   page size (and possibly other devices or other external
+	   constraints).  We are allocating ONE "minimum contiguity unit"
+	   (in practice, generally a _device_ page, but not necessarily)
+	   at a time, by asking the OS for 2**uiOrder _host_ pages at a
+	   time. */
+	for (uiPageIndex = 0;
+		 uiPageIndex < psPageArrayData->uiNumPages;
+		 uiPageIndex++)
+	{
+		eError = _AllocOSPage(ui32CPUCacheFlags, gfp_flags, 0,
+							  &ppsPageArray[uiPageIndex], &bPageFromPool);
+#if defined(CONFIG_X86)
+		if (!bPageFromPool)
+		{
+			ppsUnsetPages[uiUnsetPagesIndex] = ppsPageArray[uiPageIndex];
+			uiUnsetPagesIndex += 1;
+		}
+#endif
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: alloc_pages failed to honour request at %d of %d (%s)",
+					 __FUNCTION__,
+					 uiPageIndex,
+					 psPageArrayData->uiNumPages,
+					 PVRSRVGetErrorStringKM(eError)));
+			for(--uiPageIndex;uiPageIndex < psPageArrayData->uiNumPages;--uiPageIndex)
+			{
+				_FreeOSPage(ui32CPUCacheFlags,
+							0,
+							IMG_TRUE,
+							IMG_TRUE,
+							ppsPageArray[uiPageIndex]);
+			}
+			eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+			goto e_freed_pages;
+		}
+    }
+
+	/* Do the cache management as required */
+	eError = _ApplyOSPagesAttribute (ppsPageArray,
+									 psPageArrayData->uiNumPages,
+									 psPageArrayData->bZero,
+#if defined(CONFIG_X86)
+									 ppsUnsetPages,
+									 uiUnsetPagesIndex,
+#endif
+									 ui32CPUCacheFlags,
+									 gfp_flags);
+	
+e_freed_pages:
+#if defined(CONFIG_X86)
+	if (psPageArrayData->uiNumPages > PMR_UNSET_PAGES_STACK_ALLOC)
+	{
+		OSFreeMem(ppsUnsetPages);
+	}
+#endif
+	return eError;
+}
+
+#if defined(PHYSMEM_USING_HIGH_ALLOC_ORDER)
+static PVRSRV_ERROR
+_AllocOSHigherOrderPages(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData,
+						 IMG_UINT32 ui32CPUCacheFlags,
+						 unsigned int gfp_flags)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiOrder;
+	IMG_UINT32 uiPageIndex;	
+	IMG_UINT32 uiMsbNumPages;
+	IMG_BOOL bPageFromPool = IMG_FALSE;
+	IMG_INT32 aiOrderCount[ALLOC_ORDER_ARRAY_SIZE];	
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	unsigned int gfp_flags_zero_order;
+	unsigned int gfp_flags_other_order;
+#if defined(CONFIG_X86)
+	/* On x86 we batch applying cache attributes by storing references
+	   to all pages that are not from the page pool */
+	struct page *apsUnsetPages[PMR_UNSET_PAGES_STACK_ALLOC];
+	struct page **ppsUnsetPages = apsUnsetPages;
+	IMG_UINT32 uiUnsetPagesIndex = 0;
+	
+	if (psPageArrayData->uiNumPages > PMR_UNSET_PAGES_STACK_ALLOC)
+	{
+		ppsUnsetPages = OSAllocMem(sizeof(struct page*) * psPageArrayData->uiNumPages);
+		if (ppsUnsetPages == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed alloc_pages metadata allocation", __FUNCTION__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+	}
+#endif
+
+	gfp_flags_zero_order = gfp_flags;
+	/* Disable retry/wait  */
+	gfp_flags_other_order = (gfp_flags | __GFP_NORETRY) & ~__GFP_WAIT;
+
+
+	/* Re-express uiNumPages in multi-order up to cut-off order */
+	for (uiOrder = 0; uiOrder <= g_uiCutOffOrder; ++uiOrder)
+	{
+		aiOrderCount[uiOrder] = psPageArrayData->uiNumPages & (1<<uiOrder) ? 1 : 0;
+	}
+	
+	/* Accumulate top order bits into cut-off order bin */
+	uiMsbNumPages =  psPageArrayData->uiNumPages >> (g_uiCutOffOrder+1);
+	aiOrderCount[g_uiCutOffOrder] += uiMsbNumPages ? uiMsbNumPages << 1 : 0;
+
+	/* Allocate variable higher-order pages as per order-array specification.
+	   There's currently no support for compound pages, the "minimum contiguity unit" 
+	   that is supported is equal to the _host_ cpu page size (i.e. PAGE_SHIFT) */
+	for (uiOrder=g_uiCutOffOrder, uiPageIndex=0; uiPageIndex < psPageArrayData->uiNumPages; )
+	{
+		/* Has this order bucket been exhausted */
+		for  ( ; ! aiOrderCount[uiOrder]; --uiOrder)
+			;
+
+		/* Alloc uiOrder pages at uiPageIndex */
+		eError = _AllocOSPage(ui32CPUCacheFlags,
+							uiOrder ? gfp_flags_other_order : gfp_flags_zero_order,
+							uiOrder,
+							  &ppsPageArray[uiPageIndex], &bPageFromPool);
+		if (eError == PVRSRV_OK)
+		{
+			IMG_UINT32 uiIdx;
+
+			if (uiOrder) 
+			{
+				split_page(ppsPageArray[uiPageIndex], uiOrder); 
+			}
+
+			for (uiIdx=0; uiIdx < (1 << uiOrder); ++uiIdx)
+			{
+				/* For higher order allocations, we need to return not just the 1st
+				   pointer but all pages in the order */
+				ppsPageArray[uiPageIndex+uiIdx] = &ppsPageArray[uiPageIndex][uiIdx];
+#if defined(CONFIG_X86)
+				if (!bPageFromPool)
+				{
+					ppsUnsetPages[uiUnsetPagesIndex+uiIdx] = ppsPageArray[uiPageIndex+uiIdx];
+				}
+			}
+
+			if (!bPageFromPool)
+			{
+				uiUnsetPagesIndex += (1 << uiOrder);
+			}
+#else
+			}
+#endif
+			/* Less one order allocation */
+			uiPageIndex += (1 << uiOrder);
+			aiOrderCount[uiOrder] -= 1;
+		}
+		else
+		{
+			if (uiOrder > 0)
+			{
+				/*
+				  The strategy employed to cope with memory fragmentation is two fold:
+				   - By speculating that it is _better_ to move any remaining failed order
+				     allocations to a much lower order (i.e. failed-order/2) _most_ likely
+				     to succeed (quick-failure-recovery).
+
+				   - By ensuring that other allocations benefit from this knowledge (failure-rate)
+				     by adjusting the global cut-off order variable used for such future 
+				     allocations (throttling up/down).
+				 */
+				IMG_INT32 uiFailedOrder = uiOrder;
+				IMG_INT32 uiLowOrder = uiFailedOrder >> 1;
+				g_uiCutOffOrder = uiFailedOrder - 1;
+
+				/* Accumulate remaining failed order into lower order */
+				for  ( ; aiOrderCount[uiFailedOrder]; --aiOrderCount[uiFailedOrder])
+				{
+					aiOrderCount[uiLowOrder] += 1 << (uiFailedOrder - uiLowOrder);
+				}
+			}
+			else
+			{
+				/* At order zero, there's nothing else left to do, so we must
+					unwind the series of order allocations hitherto */
+				PVR_DPF((PVR_DBG_ERROR,
+					 "physmem_osmem_linux.c: alloc_pages failed to honour request at %d (order: %d) of %d (%s)",
+					 uiPageIndex, uiOrder,
+					 psPageArrayData->uiNumPages,
+					 PVRSRVGetErrorStringKM(eError)));
+				
+				for(--uiPageIndex;uiPageIndex < psPageArrayData->uiNumPages;--uiPageIndex)
+				{
+					_FreeOSPage(ui32CPUCacheFlags,
+								0,
+								IMG_TRUE,
+								IMG_TRUE,
+								ppsPageArray[uiPageIndex]);
+				}
+				
+				eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+				goto e_freed_pages;
+			}
+		}
+	}
+
+	/* Do the cache management as required */
+	eError = _ApplyOSPagesAttribute (ppsPageArray,
+									 psPageArrayData->uiNumPages,
+									 psPageArrayData->bZero,
+#if defined(CONFIG_X86)
+									 ppsUnsetPages,
+									 uiUnsetPagesIndex,
+#endif
+									 ui32CPUCacheFlags,
+									 gfp_flags);
+
+e_freed_pages:
+#if defined(CONFIG_X86)
+	if (psPageArrayData->uiNumPages > PMR_UNSET_PAGES_STACK_ALLOC)
+	{
+		OSFreeMem(ppsUnsetPages);
+	}
+#endif
+	return eError;
+}
+#endif
+
+static PVRSRV_ERROR
+_AllocOSPages(struct _PMR_OSPAGEARRAY_DATA_ **ppsPageArrayDataPtr)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiOrder;
+	IMG_UINT32 uiPageIndex;
+	IMG_UINT32 ui32CPUCacheFlags;
+#if defined(PHYSMEM_USING_HIGH_ALLOC_ORDER)
+	IMG_UINT32 uiUseHighOrderFlag;
+#endif
+	struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData = *ppsPageArrayDataPtr;
+	struct page **ppsPageArray = psPageArrayData->pagearray;
+	unsigned int gfp_flags;
+
+	PVR_ASSERT(!psPageArrayData->bHasOSPages);
+
+	/* For now we don't support compound pages */
+	uiOrder = psPageArrayData->uiLog2PageSize - PAGE_SHIFT;
+	ui32CPUCacheFlags = psPageArrayData->ui32CPUCacheFlags;
+	PVR_ASSERT(uiOrder == 0);
+
+	gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
+
+#if !defined(CONFIG_X86_64)
+	gfp_flags |= __GFP_HIGHMEM;
+#endif
+
+	if (psPageArrayData->bZero)
+	{
+		gfp_flags |= __GFP_ZERO;
+	}
+
+	/*
+		Unset memory type is set to true as although in the "normal" case
+		(where we free the page back to the pool) we don't want to unset
+		it, we _must_ unset it in the case where the page pool was full
+		and thus we have to give the page back to the OS.
+	*/
+	if (ui32CPUCacheFlags == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+	    ||ui32CPUCacheFlags == PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+	{
+		psPageArrayData->bUnsetMemoryType = IMG_TRUE;
+	}
+	else
+	{
+		psPageArrayData->bUnsetMemoryType = IMG_FALSE;
+	}
+
+#if defined(PHYSMEM_USING_HIGH_ALLOC_ORDER)
+	/* Multi-order allocations should be considered when size request >= threshold */
+	uiUseHighOrderFlag = psPageArrayData->uiNumPages >= PVR_LINUX_PHYSMEM_MIN_NUM_PAGES;
+	if (uiUseHighOrderFlag && g_uiCutOffOrder)
+	{
+		eError = _AllocOSHigherOrderPages(psPageArrayData, ui32CPUCacheFlags, gfp_flags);
+	}
+	else 
+	{
+		/* When to re-enable high-order is a trade-off (accuracy/ouija-board vs. simplicity) */
+		eError = _AllocOSZeroOrderPages(psPageArrayData, ui32CPUCacheFlags, gfp_flags);
+		g_uiCutOffOrder += uiUseHighOrderFlag;
+	}
+#else
+	eError = _AllocOSZeroOrderPages(psPageArrayData, ui32CPUCacheFlags, gfp_flags);
+#endif
+
+	if (eError == PVRSRV_OK)
+	{
+		for (uiPageIndex = 0; uiPageIndex < psPageArrayData->uiNumPages; uiPageIndex++)
+		{
+			/* Can't ask us to zero it and poison it */
+			PVR_ASSERT(!psPageArrayData->bZero || !psPageArrayData->bPoisonOnAlloc);
+
+			if (psPageArrayData->bPoisonOnAlloc)
+			{
+				_PoisonPages(ppsPageArray[uiPageIndex],
+							 uiOrder,
+							 _AllocPoison,
+							 _AllocPoisonSize);
+			}
+			
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+			/* Allocation is done a page at a time */
+			PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, PAGE_SIZE);
+#else
+			{
+				IMG_CPU_PHYADDR sCPUPhysAddr;
+	
+				sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[uiPageIndex]);
+				PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+											 IMG_NULL,
+											 sCPUPhysAddr,
+											 PAGE_SIZE,
+											 IMG_NULL);
+			}
+#endif
+#endif
+		}
+
+		
+		/* OS Pages have been allocated */
+		psPageArrayData->bHasOSPages = IMG_TRUE;
+
+		PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
+	}
+	return eError;
+}
+
+
+/*
+	Note:
+	We must _only_ check bUnsetMemoryType in the case where we need to free
+	the page back to the OS since we may have to revert the cache properties
+	of the page to the default as given by the OS when it was allocated.
+*/
+static void
+_FreeOSPage(IMG_UINT32 ui32CPUCacheFlags,
+            IMG_UINT32 uiOrder,
+            IMG_BOOL bUnsetMemoryType,
+            IMG_BOOL bFreeToOS,
+            struct page *psPage)
+{
+	IMG_BOOL bAddedToPool = IMG_FALSE;
+#if defined (CONFIG_X86)
+	IMG_PVOID pvPageVAddr;
+#else
+	PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
+#endif
+
+	/* Only zero order pages can be managed in the pool */
+	if ((uiOrder == 0) && (!bFreeToOS))
+	{
+		_PagePoolLock();
+		bAddedToPool = g_ui32PagePoolEntryCount < g_ui32PagePoolMaxEntries;
+		_PagePoolUnlock();
+
+		if (bAddedToPool)
+		{
+			if (!_AddEntryToPool(psPage, ui32CPUCacheFlags))
+			{
+				bAddedToPool = IMG_FALSE;
+			}
+		}
+	}
+
+	if (!bAddedToPool)
+	{
+#if defined(CONFIG_X86)
+		pvPageVAddr = page_address(psPage);
+		if (pvPageVAddr && bUnsetMemoryType == IMG_TRUE)
+		{
+			int ret;
+
+			ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+			if (ret)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
+			}
+		}
+#endif
+		__free_pages(psPage, uiOrder);
+	}
+}
+
+static PVRSRV_ERROR
+_FreeOSPagesArray(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData)
+{
+	PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
+
+	OSFreeMem(psPageArrayData);
+
+	return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* _FreeOSPages_MemStats: Depends on the bridge lock already being held */
+static void
+_FreeOSPages_MemStats(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData)
+{
+	struct page **ppsPageArray;
+	#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+	IMG_UINT32 ui32PageIndex;
+	#endif
+
+	PVR_ASSERT(psPageArrayData->bHasOSPages);
+
+	ppsPageArray = psPageArrayData->pagearray;
+
+	#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+	#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+		PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, psPageArrayData->uiNumPages * PAGE_SIZE);
+	#else
+		for(ui32PageIndex = 0; ui32PageIndex < psPageArrayData->uiNumPages; ui32PageIndex++)
+		{
+			IMG_CPU_PHYADDR sCPUPhysAddr;
+
+			sCPUPhysAddr.uiAddr = page_to_phys(ppsPageArray[ui32PageIndex]);
+			PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, sCPUPhysAddr.uiAddr);
+		}
+	#endif
+	#endif
+}
+#endif /* PVRSRV_ENABLE_PROCESS_STATS */
+
+static PVRSRV_ERROR
+_FreeOSPages_FreePages(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 uiOrder;
+	IMG_UINT32 uiPageIndex;
+	struct page **ppsPageArray;
+	IMG_BOOL bAddedToPool = IMG_FALSE;
+
+#if defined (CONFIG_X86)
+	struct page *apsUnsetPages[PMR_UNSET_PAGES_STACK_ALLOC];
+	struct page **ppsUnsetPages = apsUnsetPages;
+	IMG_UINT32 uiUnsetPagesIndex = 0;
+
+	if (psPageArrayData->uiNumPages > PMR_UNSET_PAGES_STACK_ALLOC)
+	{
+		/* OSAllocMemstatMem required because this code may be run without the bridge lock held */
+		ppsUnsetPages = OSAllocMemstatMem(sizeof(struct page*) * psPageArrayData->uiNumPages);
+		if (ppsUnsetPages == NULL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __FUNCTION__));
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto e_exit;
+		}
+	}
+#endif
+
+	PVR_ASSERT(psPageArrayData->bHasOSPages);
+
+	ppsPageArray = psPageArrayData->pagearray;
+
+	 /* For now we don't support compound pages */
+	uiOrder = psPageArrayData->uiLog2PageSize - PAGE_SHIFT;
+	PVR_ASSERT(uiOrder == 0);
+
+	for (uiPageIndex = 0;
+	     uiPageIndex < psPageArrayData->uiNumPages;
+	     uiPageIndex++)
+	{
+		if (psPageArrayData->bPoisonOnFree)
+		{
+			_PoisonPages(ppsPageArray[uiPageIndex],
+			             uiOrder,
+			             _FreePoison,
+			             _FreePoisonSize);
+		}
+
+		/* Only zero order pages can be managed in the pool */
+		if (uiOrder == 0)
+		{
+			_PagePoolLock();
+			bAddedToPool = g_ui32PagePoolEntryCount < g_ui32PagePoolMaxEntries;
+			_PagePoolUnlock();
+
+			if (bAddedToPool)
+			{
+				if (!_AddEntryToPool(ppsPageArray[uiPageIndex], psPageArrayData->ui32CPUCacheFlags))
+				{
+					bAddedToPool = IMG_FALSE;
+				}
+			}
+		}
+
+		if (!bAddedToPool)
+		{
+#if defined(CONFIG_X86)
+			if (psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+			{
+				/* Keeping track of the pages for which the caching needs to change */
+				ppsUnsetPages[uiUnsetPagesIndex] = ppsPageArray[uiPageIndex];
+				uiUnsetPagesIndex++;
+			}
+			else
+#endif
+			{
+				_FreeOSPage(0,
+				            uiOrder,
+				            IMG_FALSE,
+				            IMG_TRUE,
+				            ppsPageArray[uiPageIndex]);
+			}
+		}
+	}
+
+#if defined(CONFIG_X86)
+	if (uiUnsetPagesIndex != 0)
+	{
+		int ret;
+		ret = set_pages_array_wb(ppsUnsetPages, uiUnsetPagesIndex);
+
+		if (ret)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+		}
+
+		for (uiPageIndex = 0;
+		     uiPageIndex < uiUnsetPagesIndex;
+		     uiPageIndex++)
+		{
+			_FreeOSPage(0,
+			            uiOrder,
+			            IMG_FALSE,
+			            IMG_TRUE,
+			            ppsUnsetPages[uiPageIndex]);
+		}
+	}
+
+	if (psPageArrayData->uiNumPages > PMR_UNSET_PAGES_STACK_ALLOC)
+	{
+		OSFreeMemstatMem(ppsUnsetPages);
+	}
+#endif
+
+	eError = PVRSRV_OK;
+
+	psPageArrayData->bHasOSPages = IMG_FALSE;
+
+#if defined(CONFIG_X86)
+e_exit:
+#endif
+	return eError;
+}
+
+static PVRSRV_ERROR
+_CleanupThread_FreePagesAndPageArrayData(void *pvData)
+{
+	struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData = pvData;
+	PVRSRV_ERROR eError;
+
+	atomic_sub((int) psPageArrayData->uiNumPages, &g_uiNumDeferredFreePages);
+
+	eError = _FreeOSPages_FreePages(psPageArrayData);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+		goto err_out;
+	}
+
+	eError = _FreeOSPagesArray(psPageArrayData);
+
+err_out:
+	return eError;
+}
+
+/* clone a PMR_OSPAGEARRAY_DATA structure, including making a copy of
+ * the the list of physical pages it owns.
+ * returns a pointer to the newly allocated PMR_OSPAGEARRAY_DATA structure.
+ */
+static struct _PMR_OSPAGEARRAY_DATA_ *_CloneOSPageArrayData(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayDataIn)
+{
+	struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayDataOut;
+	size_t uiStructureSize;
+
+	uiStructureSize = sizeof(struct _PMR_OSPAGEARRAY_DATA_) +
+				sizeof(struct page *) * psPageArrayDataIn->uiNumPages;
+
+	psPageArrayDataOut = OSAllocMemstatMem(uiStructureSize);
+
+	if(psPageArrayDataOut == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_CloneOSPageArrayData: Failed to clone PMR_OSPAGEARRAY_DATA"));
+		return NULL;
+	}
+
+	memcpy(psPageArrayDataOut, psPageArrayDataIn, uiStructureSize);
+
+	psPageArrayDataOut->pagearray = (void *) ((char *) psPageArrayDataOut) +
+	sizeof(struct _PMR_OSPAGEARRAY_DATA_);
+
+	return psPageArrayDataOut;
+}
+
+static PVRSRV_ERROR
+_FreeOSPages(struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayData, IMG_BOOL bFreePageArray)
+{
+	PVRSRV_ERROR eError;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ 	_FreeOSPages_MemStats(psPageArrayData);
+#endif
+
+	if((atomic_read(&g_uiNumDeferredFreePages) + psPageArrayData->uiNumPages) <= MAX_OUTSTANDING_DEFERRED_FREE_PAGES)
+	{
+		struct _PMR_OSPAGEARRAY_DATA_ *psPageArrayToFree;
+
+		if(bFreePageArray)
+		{
+			/* the PMR_OSPAGEARRAY_DATA structure is also being
+			 * freed, so the whole structure as-is can be pushed to
+			 * the cleanup thread, which can free the PMR_OSPAGEARRAY_DATA
+			 * structure as well as the phys pages
+			 */
+			psPageArrayToFree = psPageArrayData;
+		}
+		else
+		{
+			/* the caller does not want the PMR_OSPAGEARRAY_DATA
+			 * structure to be freed, only the phys pages.
+			 * to acommodate this, the PMR_OSPAGEARRAY_DATA structure
+			 * is cloned and ownership of the pages is moved to the
+			 * clone, then the clone is pushed onto the cleanup thread
+			 * work list. Now psPageArrayData owns no pages, as the caller
+			 * expects.
+			 */
+
+			 struct _PMR_OSPAGEARRAY_DATA_ *psClone;
+
+			 psClone = _CloneOSPageArrayData(psPageArrayData);
+
+			 if(psClone == NULL)
+			 {
+			 	/* cloning failed so go ahead and free the
+				 * pages immediately
+				 */
+				 goto nodefer;
+			 }
+
+			 /* the clone takes ownership of the pages, so mark the
+			  * original structure as not having any pages
+			  */
+			  psPageArrayData->bHasOSPages = IMG_FALSE;
+
+			  psPageArrayToFree = psClone;
+		}
+
+		atomic_add((int) psPageArrayData->uiNumPages, &g_uiNumDeferredFreePages);
+		psPageArrayToFree->sCleanupThreadFn.pfnFree = _CleanupThread_FreePagesAndPageArrayData;
+		psPageArrayToFree->sCleanupThreadFn.pvData = psPageArrayToFree;
+		psPageArrayToFree->sCleanupThreadFn.ui32RetryCount = CLEANUP_THREAD_RETRY_COUNT_DEFAULT;
+		PVRSRVCleanupThreadAddWork(&psPageArrayToFree->sCleanupThreadFn);
+
+		return PVRSRV_OK;
+	}
+nodefer:
+	/* we are not deferring, so free the pages immediately */
+
+ 	eError = _FreeOSPages_FreePages(psPageArrayData);
+
+	if(eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+		goto err_out;
+	}
+
+	if(bFreePageArray)
+	{
+		eError = _FreeOSPagesArray(psPageArrayData);
+	}
+
+err_out:
+    return eError;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+   before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv
+                 //struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData
+                 )
+{
+    PVRSRV_ERROR eError;
+    struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData;
+
+    psOSPageArrayData = pvPriv;
+
+    /* Conditionally do the PDump free, because if CreatePMR failed we
+       won't have done the PDump MALLOC.  */
+    if (psOSPageArrayData->bPDumpMalloced)
+    {
+        PDumpPMRFree(psOSPageArrayData->hPDumpAllocInfo);
+    }
+
+	/*  We can't free pages until now. */
+	if (psOSPageArrayData->bHasOSPages)
+	{
+		eError = _FreeOSPages(psOSPageArrayData, IMG_TRUE);
+		PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+	else
+	{
+	    eError = _FreeOSPagesArray(psOSPageArrayData);
+	    PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+	}
+
+    return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+   This function must be called before the lookup address func. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                             // struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData,
+                             IMG_UINT32 uiLog2DevPageSize)
+{
+    PVRSRV_ERROR eError;
+    struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData;
+
+    psOSPageArrayData = pvPriv;
+
+    if (psOSPageArrayData->bOnDemand)
+    {
+		/* Allocate Memory for deferred allocation */
+    	eError = _AllocOSPages(&psOSPageArrayData);
+    	if (eError != PVRSRV_OK)
+    	{
+    		return eError;
+    	}
+    }
+
+    /* Physical page addresses are already locked down in this
+       implementation, so there is no need to acquire physical
+       addresses.  We do need to verify that the physical contiguity
+       requested by the caller (i.e. page size of the device they
+       intend to map this memory into) is compatible with (i.e. not of
+       coarser granularity than) our already known physicial
+       contiguity of the pages */
+    if (uiLog2DevPageSize > psOSPageArrayData->uiLog2PageSize)
+    {
+        /* or NOT_MAPPABLE_TO_THIS_PAGE_SIZE ? */
+        eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+        return eError;
+    }
+
+    eError = PVRSRV_OK;
+    return eError;
+
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv
+                               //struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData
+                               )
+{
+    /* Just drops the refcount. */
+
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData;
+
+    psOSPageArrayData = pvPriv;
+    if (psOSPageArrayData->bOnDemand)
+    {
+		/* Free Memory for deferred allocation */
+    	eError = _FreeOSPages(psOSPageArrayData, IMG_FALSE);
+    	if (eError != PVRSRV_OK)
+    	{
+    		return eError;
+    	}
+    }
+
+    PVR_ASSERT (eError == PVRSRV_OK);
+    return eError;
+}
+
+/* N.B.  It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                    IMG_UINT32 ui32NumOfPages,
+                    IMG_DEVMEM_OFFSET_T *puiOffset,
+					IMG_BOOL *pbValid,
+                    IMG_DEV_PHYADDR *psDevPAddr)
+{
+    const struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData = pvPriv;
+    struct page **ppsPageArray = psOSPageArrayData->pagearray;
+   	IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2PageSize;
+	IMG_UINT32 uiInPageOffset;
+    IMG_UINT32 uiPageIndex;
+    IMG_UINT32 idx;
+
+    for (idx=0; idx < ui32NumOfPages; idx++)
+    {
+		if (pbValid[idx])
+		{	
+			uiPageIndex = puiOffset[idx] >> psOSPageArrayData->uiLog2PageSize;
+	    	uiInPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2PageSize);
+
+			PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiNumPages);
+			PVR_ASSERT(uiInPageOffset < uiPageSize);
+
+			psDevPAddr[idx].uiAddr = page_to_phys(ppsPageArray[uiPageIndex]) + uiInPageOffset;
+		}
+    }
+
+    return PVRSRV_OK;
+}
+
+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
+	void *pvBase;
+	IMG_UINT32 ui32PageCount;
+} PMR_OSPAGEARRAY_KERNMAP_DATA;
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                                 IMG_SIZE_T uiOffset,
+                                 IMG_SIZE_T uiSize,
+                                 void **ppvKernelAddressOut,
+                                 IMG_HANDLE *phHandleOut,
+                                 PMR_FLAGS_T ulFlags)
+{
+    PVRSRV_ERROR eError;
+    struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData;
+    void *pvAddress;
+    pgprot_t prot = PAGE_KERNEL;
+    IMG_UINT32 ui32CPUCacheFlags;
+    IMG_UINT32 ui32PageOffset;
+    IMG_SIZE_T uiMapOffset;
+    IMG_UINT32 ui32PageCount;
+    PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+    psOSPageArrayData = pvPriv;
+	ui32CPUCacheFlags = DevmemCPUCacheMode(ulFlags);
+
+	/*
+		Zero offset and size as a special meaning which means map in the
+		whole of the PMR, this is due to fact that the places that call
+		this callback might not have access to be able to determine the
+		physical size
+	*/
+	if ((uiOffset == 0) && (uiSize == 0))
+	{
+		ui32PageOffset = 0;
+		uiMapOffset = 0;
+		ui32PageCount = psOSPageArrayData->uiNumPages;
+	}
+	else
+	{
+		IMG_SIZE_T uiEndoffset;
+
+		ui32PageOffset = uiOffset >> psOSPageArrayData->uiLog2PageSize;
+		uiMapOffset = uiOffset - (ui32PageOffset << psOSPageArrayData->uiLog2PageSize);
+		uiEndoffset = uiOffset + uiSize - 1;
+		// Add one as we want the count, not the offset
+		ui32PageCount = (uiEndoffset >> psOSPageArrayData->uiLog2PageSize) + 1;
+		ui32PageCount -= ui32PageOffset;
+	}
+
+    if (psOSPageArrayData->uiLog2PageSize != PAGE_SHIFT)
+    {
+        /* we only know how to use vmap on allocations comprising
+           individual pages.  Higher-order "pages" are not supported
+           with this. */
+        eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+        goto e0;
+    }
+
+	switch (ui32CPUCacheFlags)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+				prot = pgprot_noncached(prot);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+				prot = pgprot_writecombine(prot);
+				break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+				break;
+
+		default:
+				eError = PVRSRV_ERROR_INVALID_PARAMS;
+				goto e0;
+	}
+	
+	psData = OSAllocMem(sizeof(PMR_OSPAGEARRAY_KERNMAP_DATA));
+	if (psData == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	
+	pvAddress = vm_map_ram(&psOSPageArrayData->pagearray[ui32PageOffset],
+						   ui32PageCount,
+						   -1,
+						   prot);
+	if (pvAddress == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+    *ppvKernelAddressOut = pvAddress + uiMapOffset;
+    psData->pvBase = pvAddress;
+    psData->ui32PageCount = ui32PageCount;
+    *phHandleOut = psData;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+ e1:
+    OSFreeMem(psData);
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+                                                 IMG_HANDLE hHandle)
+{
+    struct _PMR_OSPAGEARRAY_DATA_ *psOSPageArrayData;
+    PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+    psOSPageArrayData = pvPriv;
+    psData = hHandle;
+    vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
+    OSFreeMem(psData);
+}
+
+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
+    .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
+    .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
+    .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
+    .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
+    .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
+    .pfnReadBytes = IMG_NULL,
+    .pfnWriteBytes = IMG_NULL,
+    .pfnFinalize = &PMRFinalizeOSMem
+};
+
+static PVRSRV_ERROR
+_NewOSAllocPagesPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                    IMG_DEVMEM_SIZE_T uiSize,
+					IMG_DEVMEM_SIZE_T uiChunkSize,
+					IMG_UINT32 ui32NumPhysChunks,
+					IMG_UINT32 ui32NumVirtChunks,
+					IMG_BOOL *pabMappingTable,
+                    IMG_UINT32 uiLog2PageSize,
+                    PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                    PMR **ppsPMRPtr)
+{
+    PVRSRV_ERROR eError;
+    PVRSRV_ERROR eError2;
+    PMR *psPMR;
+    struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
+    IMG_HANDLE hPDumpAllocInfo = IMG_NULL;
+    PMR_FLAGS_T uiPMRFlags;
+    IMG_BOOL bZero;
+    IMG_BOOL bPoisonOnAlloc;
+    IMG_BOOL bPoisonOnFree;
+    IMG_BOOL bOnDemand = ((uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) > 0);
+	IMG_BOOL bCpuLocal = ((uiFlags & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) > 0);
+	IMG_UINT32 ui32CPUCacheFlags = (IMG_UINT32) DevmemCPUCacheMode(uiFlags);
+
+
+    if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+    {
+        bZero = IMG_TRUE;
+    }
+    else
+    {
+        bZero = IMG_FALSE;
+    }
+
+    if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
+    {
+        bPoisonOnAlloc = IMG_TRUE;
+    }
+    else
+    {
+        bPoisonOnAlloc = IMG_FALSE;
+    }
+
+    if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+    {
+        bPoisonOnFree = IMG_TRUE;
+    }
+    else
+    {
+        bPoisonOnFree = IMG_FALSE;
+    }
+
+    if ((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+        (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+    {
+        /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto errorOnParam;
+    }
+
+	/* Silently round up alignment/pagesize if request was less that
+	   PAGE_SHIFT, because it would never be harmful for memory to be
+	   _more_ contiguous that was desired */
+	uiLog2PageSize = PAGE_SHIFT > uiLog2PageSize
+		? PAGE_SHIFT
+		: uiLog2PageSize;
+
+	/* Create Array structure that hold the physical pages */
+	eError = _AllocOSPageArray(uiChunkSize * ui32NumPhysChunks,
+						   uiLog2PageSize,
+						   bZero,
+						   bPoisonOnAlloc,
+						   bPoisonOnFree,
+						   bOnDemand,
+						   ui32CPUCacheFlags,
+						   &psPrivData);
+	if (eError != PVRSRV_OK)
+	{
+		goto errorOnAllocPageArray;
+	}
+
+	if (!bOnDemand)
+	{
+		/* Allocate the physical pages */
+		eError = _AllocOSPages(&psPrivData);
+		if (eError != PVRSRV_OK)
+		{
+			goto errorOnAllocPages;
+		}
+	}
+
+    /* In this instance, we simply pass flags straight through.
+
+       Generically, uiFlags can include things that control the PMR
+       factory, but we don't need any such thing (at the time of
+       writing!), and our caller specifies all PMR flags so we don't
+       need to meddle with what was given to us.
+    */
+    uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+    /* check no significant bits were lost in cast due to different
+       bit widths for flags */
+    PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+    if (bOnDemand)
+    {
+    	PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
+    }
+    if (bCpuLocal)
+    {
+    	PDUMPCOMMENT("CPU_LOCAL allocation requested");
+    }
+    eError = PMRCreatePMR(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+                          uiSize,
+                          uiChunkSize,
+                          ui32NumPhysChunks,
+                          ui32NumVirtChunks,
+                          pabMappingTable,
+                          uiLog2PageSize,
+                          uiPMRFlags,
+                          "PMROSAP",
+                          &_sPMROSPFuncTab,
+                          psPrivData,
+                          &psPMR,
+                          &hPDumpAllocInfo,
+    					  IMG_FALSE);
+    if (eError != PVRSRV_OK)
+    {
+        goto errorOnCreate;
+    }
+
+	psPrivData->hPDumpAllocInfo = hPDumpAllocInfo;
+	psPrivData->bPDumpMalloced = IMG_TRUE;
+
+    *ppsPMRPtr = psPMR;
+    return PVRSRV_OK;
+
+errorOnCreate:
+	if (!bOnDemand)
+	{
+		eError2 = _FreeOSPages(psPrivData, IMG_FALSE);
+		PVR_ASSERT(eError2 == PVRSRV_OK);
+	}
+
+errorOnAllocPages:
+	eError2 = _FreeOSPagesArray(psPrivData);
+	PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_SIZE_T uiChunkSize,
+						 IMG_UINT32 ui32NumPhysChunks,
+						 IMG_UINT32 ui32NumVirtChunks,
+						 IMG_BOOL *pabMappingTable,
+                         IMG_UINT32 uiLog2PageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         PMR **ppsPMRPtr)
+{
+    return _NewOSAllocPagesPMR(psDevNode,
+                               uiSize,
+                               uiChunkSize,
+                               ui32NumPhysChunks,
+                               ui32NumVirtChunks,
+                               pabMappingTable,
+                               uiLog2PageSize,
+                               uiFlags,
+                               ppsPMRPtr);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_osmem_linux.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_osmem_linux.h
new file mode 100644
index 0000000..f30f964
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_osmem_linux.h
@@ -0,0 +1,49 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux OS physmem implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PHYSMEM_OSMEM_LINUX_H__
+#define __PHYSMEM_OSMEM_LINUX_H__
+
+void LinuxInitPagePool(void);
+void LinuxDeinitPagePool(void);
+
+#endif /* __PHYSMEM_OSMEM_LINUX_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_tdmetacode_linux.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_tdmetacode_linux.c
new file mode 100644
index 0000000..789ed63
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_tdmetacode_linux.c
@@ -0,0 +1,386 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for Trusted Device firmware code memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                implementing the function callbacks for physical memory borrowed
+                from that normally managed by the operating system.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include5/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#if defined(CONFIG_X86)
+#include <asm/cacheflush.h>
+#endif
+
+#include "rgxdevice.h"
+
+/* This is a placeholder implementation of a PMR factory to wrap allocations into
+   the protected META code regions. It has been consciously heavily inspired by the
+   standard osmem PMR factory to supply dummy functionality. Most things here will
+   change in a real implementation.
+
+   Your starting point for re-implementing this module should be by inspecting the 
+   sTDMETACodePMRFuncTab structure below and determining which callbacks you need
+   to implement for your system.
+*/
+
+typedef struct {
+	void *token;
+	IMG_UINT32 ui32Log2PageSizeBytes;
+	struct page **apsPageArray;
+	IMG_UINT64 ui64NumPages;
+
+	PHYS_HEAP *psTDMetaCodePhysHeap;
+    IMG_HANDLE hPDumpAllocInfo;
+} sTDMetaCodePageList;
+
+static void
+_FreeTDMetaCodePageContainer(void *pvPagecontainer)
+{
+	sTDMetaCodePageList *psPageContainer = (sTDMetaCodePageList *) pvPagecontainer;
+
+	if(! psPageContainer)
+	{
+		return;
+	}
+
+	if(psPageContainer->apsPageArray)
+	{
+		IMG_UINT64 i;
+		for(i = 0; i < psPageContainer->ui64NumPages; i++)
+		{
+			if(psPageContainer->apsPageArray[i])
+			{
+				__free_page(psPageContainer->apsPageArray[i]);
+			}
+		}
+		OSFreeMem(psPageContainer->apsPageArray);
+	}
+
+	PhysHeapRelease(psPageContainer->psTDMetaCodePhysHeap);
+
+    PDumpPMRFree(psPageContainer->hPDumpAllocInfo);
+
+    OSFreeMem(psPageContainer);
+}
+
+static PVRSRV_ERROR
+PMRSysPhysAddrTDMetaCode(PMR_IMPL_PRIVDATA pvPriv,
+                         IMG_UINT32 ui32NumOfPages,
+                         IMG_DEVMEM_OFFSET_T *puiOffset,
+						 IMG_BOOL *pbValid,
+                         IMG_DEV_PHYADDR *psDevPAddr)
+{
+	sTDMetaCodePageList *psPageContainer = (sTDMetaCodePageList *) pvPriv;
+	IMG_UINT64 ui64PageNum;
+	IMG_UINT32 idx;
+
+	for (idx=0; idx < ui32NumOfPages; idx++)
+	{
+		if (pbValid[idx])
+		{
+			IMG_UINT32 ui32PageOffset;
+
+			ui64PageNum = puiOffset[idx] >> psPageContainer->ui32Log2PageSizeBytes;
+			ui32PageOffset = puiOffset[idx] - (ui64PageNum << psPageContainer->ui32Log2PageSizeBytes);
+			PVR_ASSERT(ui64PageNum < psPageContainer->ui64NumPages);
+
+			psDevPAddr[idx].uiAddr = page_to_phys(psPageContainer->apsPageArray[ui64PageNum]) + ui32PageOffset;
+		}
+	}
+	
+	return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR
+PMRFinalizeTDMetaCode(PMR_IMPL_PRIVDATA pvPriv)
+{
+	_FreeTDMetaCodePageContainer((void *) pvPriv);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRReadBytesTDMetaCode(PMR_IMPL_PRIVDATA pvPriv,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT8 *pcBuffer,
+                       IMG_SIZE_T uiBufSz,
+                       IMG_SIZE_T *puiNumBytes)
+{
+	sTDMetaCodePageList *psPageContainer = (sTDMetaCodePageList *) pvPriv;
+    IMG_UINT8 *pvMapping;
+	IMG_UINT32 uiPageSize = 1 << psPageContainer->ui32Log2PageSizeBytes;
+   	IMG_UINT32 uiPageIndex;
+	IMG_UINT32 uiReadOffset;
+	IMG_UINT32 uiReadBytes;
+
+	*puiNumBytes = 0;
+	
+	while(uiBufSz)
+	{
+    	uiPageIndex = uiOffset >> psPageContainer->ui32Log2PageSizeBytes;
+		uiReadOffset = uiOffset - uiPageIndex * uiPageSize;
+		uiReadBytes = uiPageSize - uiReadOffset;
+
+		if(uiReadBytes > uiBufSz)
+		{
+			uiReadBytes = uiBufSz;
+		}
+		
+        pvMapping = kmap(psPageContainer->apsPageArray[uiPageIndex]);
+        PVR_ASSERT(pvMapping);
+        memcpy(pcBuffer, pvMapping + uiReadOffset, uiReadBytes);
+        kunmap(psPageContainer->apsPageArray[uiPageIndex]);
+		
+		uiBufSz -= uiReadBytes;
+		pcBuffer += uiReadBytes;
+		*puiNumBytes += uiReadBytes;
+
+		uiOffset += uiReadBytes;
+	}
+    return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRKernelMapTDMetaCode(PMR_IMPL_PRIVDATA pvPriv,
+                       IMG_SIZE_T uiOffset,
+                       IMG_SIZE_T uiSize,
+                       void **ppvKernelAddressOut,
+                       IMG_HANDLE *phHandleOut,
+                       PMR_FLAGS_T ulFlags)
+{
+    sTDMetaCodePageList *psPageContainer;
+    void *pvAddress;
+
+    psPageContainer = pvPriv;
+
+	pvAddress = vm_map_ram(psPageContainer->apsPageArray,
+						   psPageContainer->ui64NumPages,
+						   -1,
+						   PAGE_KERNEL);
+
+	if(! pvAddress)
+	{
+		return PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+	}
+
+    *ppvKernelAddressOut = pvAddress + uiOffset;
+    *phHandleOut = pvAddress;
+
+    return PVRSRV_OK;
+}
+
+static void
+PMRKernelUnmapTDMetaCode(PMR_IMPL_PRIVDATA pvPriv,
+                         IMG_HANDLE hHandle)
+{
+    sTDMetaCodePageList *psPageContainer;
+    psPageContainer = pvPriv;
+    vm_unmap_ram(hHandle, psPageContainer->ui64NumPages);
+}
+
+static PMR_IMPL_FUNCTAB sTDMETACodePMRFuncTab = {
+	.pfnLockPhysAddresses = IMG_NULL,           /* pages are always available in these PMRs */
+	.pfnUnlockPhysAddresses = IMG_NULL,         /* as above */
+	.pfnDevPhysAddr = PMRSysPhysAddrTDMetaCode,
+	.pfnPDumpSymbolicAddr = IMG_NULL,           /* nothing special needed */
+	.pfnAcquireKernelMappingData = PMRKernelMapTDMetaCode,
+	.pfnReleaseKernelMappingData = PMRKernelUnmapTDMetaCode,
+	.pfnReadBytes = PMRReadBytesTDMetaCode,
+	.pfnFinalize = PMRFinalizeTDMetaCode
+};
+
+
+static PVRSRV_ERROR
+_AllocTDMetaCodePageContainer(IMG_UINT64 ui64NumPages,
+                              IMG_UINT32 uiLog2PageSize,
+                              PHYS_HEAP *psTDMetaCodePhysHeap,
+                              void **ppvPageContainer)
+{
+	IMG_UINT64 i;
+	PVRSRV_ERROR eStatus = PVRSRV_OK;
+	sTDMetaCodePageList *psPageContainer;
+
+	psPageContainer = OSAllocMem(sizeof(sTDMetaCodePageList));
+	if(!psPageContainer)
+	{
+		eStatus = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail;
+	}
+	psPageContainer->ui32Log2PageSizeBytes = uiLog2PageSize;
+	psPageContainer->ui64NumPages = ui64NumPages;
+	psPageContainer->psTDMetaCodePhysHeap = psTDMetaCodePhysHeap;
+	psPageContainer->apsPageArray = OSAllocMem(ui64NumPages * sizeof(psPageContainer->apsPageArray[0]));
+	if(!psPageContainer->apsPageArray)
+	{
+		eStatus = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail;
+	}
+	for(i = 0; i < ui64NumPages; i++)
+	{
+		psPageContainer->apsPageArray[i] = IMG_NULL;
+	}
+
+	for(i = 0; i < ui64NumPages; i++)
+	{
+		psPageContainer->apsPageArray[i] = alloc_page(GFP_KERNEL);
+		if(! psPageContainer->apsPageArray[i])
+		{
+			eStatus = PVRSRV_ERROR_REQUEST_TDMETACODE_PAGES_FAIL;
+			goto fail;
+		}
+	}
+
+	*ppvPageContainer = psPageContainer;
+	return eStatus;
+
+fail:
+	_FreeTDMetaCodePageContainer((void *) psPageContainer);
+	*ppvPageContainer = IMG_NULL;
+	return eStatus;
+}
+
+PVRSRV_ERROR
+PhysmemNewTDMetaCodePMR(PVRSRV_DEVICE_NODE *psDevNode,
+                        IMG_DEVMEM_SIZE_T uiSize,
+                        IMG_UINT32 uiLog2PageSize,
+                        PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                        PMR **ppsPMRPtr)
+{
+	sTDMetaCodePageList *psPageContainer = IMG_NULL;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_UINT64 ui64NumPages = (uiSize >> uiLog2PageSize) + ((uiSize & (uiPageSize-1)) != 0);
+	PVRSRV_ERROR eStatus;
+	PHYS_HEAP *psTDMetaCodePhysHeap;
+	IMG_BOOL bMappingTable = IMG_TRUE;
+    IMG_HANDLE hPDumpAllocInfo = IMG_NULL;
+	RGX_DATA *psRGXData;
+
+    IMG_UINT32 uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+    /* check no significant bits were lost in cast due to different
+       bit widths for flags */
+    PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+	
+	/* get the physical heap for TD Meta Code */
+	psRGXData = (RGX_DATA *)(psDevNode->psDevConfig->hDevData);
+	if(! psRGXData->bHasTDMetaCodePhysHeap)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed allocation from non-existent Trusted Device physical heap!"));
+		eStatus = PVRSRV_ERROR_REQUEST_TDMETACODE_PAGES_FAIL;
+		goto fail;
+	}
+	eStatus = PhysHeapAcquire(psRGXData->uiTDMetaCodePhysHeapID,
+	                          &psTDMetaCodePhysHeap);
+	if(eStatus)
+	{
+		goto fail;
+	}
+
+	/* allocate and initialize the page container structure */
+	eStatus = _AllocTDMetaCodePageContainer(ui64NumPages,
+	                                        uiLog2PageSize,
+	                                        psTDMetaCodePhysHeap,
+	                                        (void *)&psPageContainer);
+	if(eStatus)
+	{
+		goto fail;
+	}
+
+	/* wrap the container in a PMR */
+    eStatus = PMRCreatePMR(psTDMetaCodePhysHeap,
+                           ui64NumPages * uiPageSize,
+                           ui64NumPages * uiPageSize,
+                           1,
+                           1,
+                           &bMappingTable,
+                           uiLog2PageSize,
+                           uiPMRFlags,
+                           "PMRTDMETACODE",
+                           &sTDMETACodePMRFuncTab,
+                           (void *)psPageContainer,
+                           ppsPMRPtr,
+                           &hPDumpAllocInfo,
+                           IMG_FALSE);
+	
+#if defined(PVR_RI_DEBUG)
+	{
+		RIWritePMREntryKM (*ppsPMRPtr,
+						   sizeof("TD META Code"),
+					   	   "TD META Code",
+						   (ui64NumPages * uiPageSize));
+	}
+#endif
+
+	/* this is needed when the allocation is finalized and we need to free it. */
+	psPageContainer->hPDumpAllocInfo = hPDumpAllocInfo;
+
+	return eStatus;
+
+	/* error cleanup */
+
+fail:
+	return eStatus;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_tdsecbuf_linux.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_tdsecbuf_linux.c
new file mode 100644
index 0000000..531973f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/physmem_tdsecbuf_linux.c
@@ -0,0 +1,385 @@
+/*************************************************************************/ /*!
+@File
+@Title          Implementation of PMR functions for Trusted Device secure buffers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management.  This module is responsible for
+                implementing the function callbacks for physical memory borrowed
+                from that normally managed by the operating system.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include5/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#if defined(CONFIG_X86)
+#include <asm/cacheflush.h>
+#endif
+
+#include "rgxdevice.h"
+
+/* This is a placeholder implementation of a PMR factory to wrap allocations into
+   the secure buffer regions. It has been consciously heavily inspired by the
+   standard osmem PMR factory to supply dummy functionality. Most things here will
+   change in a real implementation.
+
+   Your starting point for re-implementing this module should be by inspecting the 
+   sTDSecureBufPMRFuncTab structure below and determining which callbacks you need
+   to implement for your system.
+*/
+
+typedef struct {
+	void *token;
+	IMG_UINT32 ui32Log2PageSizeBytes;
+	struct page **apsPageArray;
+	IMG_UINT64 ui64NumPages;
+
+	PHYS_HEAP *psTDSecureBufPhysHeap;
+    IMG_HANDLE hPDumpAllocInfo;
+} sTDSecureBufPageList;
+
+static void
+_FreeTDSecureBufPageContainer(void *pvPagecontainer)
+{
+	sTDSecureBufPageList *psPageContainer = (sTDSecureBufPageList *) pvPagecontainer;
+
+	if(! psPageContainer)
+	{
+		return;
+	}
+
+	if(psPageContainer->apsPageArray)
+	{
+		IMG_UINT64 i;
+		for(i = 0; i < psPageContainer->ui64NumPages; i++)
+		{
+			if(psPageContainer->apsPageArray[i])
+			{
+				__free_page(psPageContainer->apsPageArray[i]);
+			}
+		}
+		OSFreeMem(psPageContainer->apsPageArray);
+	}
+
+	PhysHeapRelease(psPageContainer->psTDSecureBufPhysHeap);
+
+    PDumpPMRFree(psPageContainer->hPDumpAllocInfo);
+
+	OSFreeMem(psPageContainer);
+}
+
+static PVRSRV_ERROR
+PMRSysPhysAddrTDSecureBuf(PMR_IMPL_PRIVDATA pvPriv,
+                         IMG_UINT32 ui32NumOfPages,
+                         IMG_DEVMEM_OFFSET_T *puiOffset,
+ 						 IMG_BOOL *pbValid,
+                         IMG_DEV_PHYADDR *psDevPAddr)
+{
+	sTDSecureBufPageList *psPageContainer = (sTDSecureBufPageList *) pvPriv;
+	IMG_UINT32 ui32PageOffset;
+	IMG_UINT64 ui64PageNum;
+	IMG_UINT32 idx;
+
+	for (idx=0; idx < ui32NumOfPages; idx++)
+	{
+		if (pbValid[idx])
+		{
+			ui64PageNum = puiOffset[idx] >> psPageContainer->ui32Log2PageSizeBytes;
+			ui32PageOffset = puiOffset[idx] - (ui64PageNum << psPageContainer->ui32Log2PageSizeBytes);
+			PVR_ASSERT(ui64PageNum < psPageContainer->ui64NumPages);
+
+			psDevPAddr[idx].uiAddr = page_to_phys(psPageContainer->apsPageArray[ui64PageNum]) + ui32PageOffset;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+
+static PVRSRV_ERROR
+PMRFinalizeTDSecureBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+	_FreeTDSecureBufPageContainer((void *) pvPriv);
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRReadBytesTDSecureBuf(PMR_IMPL_PRIVDATA pvPriv,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT8 *pcBuffer,
+                       IMG_SIZE_T uiBufSz,
+                       IMG_SIZE_T *puiNumBytes)
+{
+	sTDSecureBufPageList *psPageContainer = (sTDSecureBufPageList *) pvPriv;
+    IMG_UINT8 *pvMapping;
+	IMG_UINT32 uiPageSize = 1 << psPageContainer->ui32Log2PageSizeBytes;
+   	IMG_UINT32 uiPageIndex;
+	IMG_UINT32 uiReadOffset;
+	IMG_UINT32 uiReadBytes;
+
+	*puiNumBytes = 0;
+	
+	while(uiBufSz)
+	{
+    	uiPageIndex = uiOffset >> psPageContainer->ui32Log2PageSizeBytes;
+		uiReadOffset = uiOffset - uiPageIndex * uiPageSize;
+		uiReadBytes = uiPageSize - uiReadOffset;
+
+		if(uiReadBytes > uiBufSz)
+		{
+			uiReadBytes = uiBufSz;
+		}
+		
+        pvMapping = kmap(psPageContainer->apsPageArray[uiPageIndex]);
+        PVR_ASSERT(pvMapping);
+        memcpy(pcBuffer, pvMapping + uiReadOffset, uiReadBytes);
+        kunmap(psPageContainer->apsPageArray[uiPageIndex]);
+		
+		uiBufSz -= uiReadBytes;
+		pcBuffer += uiReadBytes;
+		*puiNumBytes += uiReadBytes;
+
+		uiOffset += uiReadBytes;
+	}
+    return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRKernelMapTDSecureBuf(PMR_IMPL_PRIVDATA pvPriv,
+                       IMG_SIZE_T uiOffset,
+                       IMG_SIZE_T uiSize,
+                       void **ppvKernelAddressOut,
+                       IMG_HANDLE *phHandleOut,
+                       PMR_FLAGS_T ulFlags)
+{
+    sTDSecureBufPageList *psPageContainer;
+    void *pvAddress;
+
+    psPageContainer = pvPriv;
+
+	pvAddress = vm_map_ram(psPageContainer->apsPageArray,
+						   psPageContainer->ui64NumPages,
+						   -1,
+						   PAGE_KERNEL);
+
+	if(! pvAddress)
+	{
+		return PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+	}
+
+    *ppvKernelAddressOut = pvAddress + uiOffset;
+    *phHandleOut = pvAddress;
+
+    return PVRSRV_OK;
+}
+
+static void
+PMRKernelUnmapTDSecureBuf(PMR_IMPL_PRIVDATA pvPriv,
+                         IMG_HANDLE hHandle)
+{
+    sTDSecureBufPageList *psPageContainer;
+    psPageContainer = pvPriv;
+    vm_unmap_ram(hHandle, psPageContainer->ui64NumPages);
+}
+
+static PMR_IMPL_FUNCTAB sTDSecureBufPMRFuncTab = {
+	.pfnLockPhysAddresses = IMG_NULL,           /* pages are always available in these PMRs */
+	.pfnUnlockPhysAddresses = IMG_NULL,         /* as above */
+	.pfnDevPhysAddr = PMRSysPhysAddrTDSecureBuf,
+	.pfnPDumpSymbolicAddr = IMG_NULL,           /* nothing special needed */
+	.pfnAcquireKernelMappingData = PMRKernelMapTDSecureBuf,
+	.pfnReleaseKernelMappingData = PMRKernelUnmapTDSecureBuf,
+	.pfnReadBytes = PMRReadBytesTDSecureBuf,
+	.pfnFinalize = PMRFinalizeTDSecureBuf
+};
+
+
+static PVRSRV_ERROR
+_AllocTDSecureBufPageContainer(IMG_UINT64 ui64NumPages,
+                              IMG_UINT32 uiLog2PageSize,
+                              PHYS_HEAP *psTDSecureBufPhysHeap,
+                              void **ppvPageContainer)
+{
+	IMG_UINT64 i;
+	PVRSRV_ERROR eStatus = PVRSRV_OK;
+	sTDSecureBufPageList *psPageContainer;
+
+	psPageContainer = OSAllocMem(sizeof(sTDSecureBufPageList));
+	if(!psPageContainer)
+	{
+		eStatus = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail;
+	}
+	psPageContainer->ui32Log2PageSizeBytes = uiLog2PageSize;
+	psPageContainer->ui64NumPages = ui64NumPages;
+	psPageContainer->psTDSecureBufPhysHeap = psTDSecureBufPhysHeap;
+	psPageContainer->apsPageArray = OSAllocMem(ui64NumPages * sizeof(psPageContainer->apsPageArray[0]));
+	if(!psPageContainer->apsPageArray)
+	{
+		eStatus = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail;
+	}
+	for(i = 0; i < ui64NumPages; i++)
+	{
+		psPageContainer->apsPageArray[i] = IMG_NULL;
+	}
+
+	for(i = 0; i < ui64NumPages; i++)
+	{
+		psPageContainer->apsPageArray[i] = alloc_page(GFP_KERNEL);
+		if(! psPageContainer->apsPageArray[i])
+		{
+			eStatus = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+			goto fail;
+		}
+	}
+
+	*ppvPageContainer = psPageContainer;
+	return eStatus;
+
+fail:
+	_FreeTDSecureBufPageContainer((void *) psPageContainer);
+	*ppvPageContainer = IMG_NULL;
+	return eStatus;
+}
+
+PVRSRV_ERROR
+PhysmemNewTDSecureBufPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                        IMG_DEVMEM_SIZE_T uiSize,
+                        IMG_UINT32 uiLog2PageSize,
+                        PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                        PMR **ppsPMRPtr)
+{
+	sTDSecureBufPageList *psPageContainer = IMG_NULL;
+	IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+	IMG_UINT64 ui64NumPages = (uiSize >> uiLog2PageSize) + ((uiSize & (uiPageSize-1)) != 0);
+	PVRSRV_ERROR eStatus;
+	PHYS_HEAP *psTDSecureBufPhysHeap;
+	IMG_BOOL bMappingTable = IMG_TRUE;
+	IMG_HANDLE hPDumpAllocInfo = IMG_NULL;
+	RGX_DATA *psRGXData;
+
+    IMG_UINT32 uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+    /* check no significant bits were lost in cast due to different
+       bit widths for flags */
+    PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+	
+	/* get the physical heap for TD secure buffers */
+	psRGXData = (RGX_DATA *)(psDevNode->psDevConfig->hDevData);
+	if(! psRGXData->bHasTDSecureBufPhysHeap)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed allocation from non-existent Trusted Device physical heap!"));
+		eStatus = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+		goto fail;
+	}
+	eStatus = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+	                          &psTDSecureBufPhysHeap);
+	if(eStatus)
+	{
+		goto fail;
+	}
+
+	/* allocate and initialize the page container structure */
+	eStatus = _AllocTDSecureBufPageContainer(ui64NumPages,
+	                                        uiLog2PageSize,
+	                                        psTDSecureBufPhysHeap,
+	                                        (void *)&psPageContainer);
+	if(eStatus)
+	{
+		goto fail;
+	}
+
+	/* wrap the container in a PMR */
+    eStatus = PMRCreatePMR(psTDSecureBufPhysHeap,
+                           ui64NumPages * uiPageSize,
+                           ui64NumPages * uiPageSize,
+                           1,
+                           1,
+                           &bMappingTable,
+                           uiLog2PageSize,
+                           uiPMRFlags,
+                           "PMRTDSECUREBUF",
+                           &sTDSecureBufPMRFuncTab,
+                           (void *)psPageContainer,
+                           ppsPMRPtr,
+                           &hPDumpAllocInfo,
+                           IMG_FALSE);
+	
+#if defined(PVR_RI_DEBUG)
+	{
+		RIWritePMREntryKM (*ppsPMRPtr,
+						   sizeof("TD Secure Buffer"),
+						   "TD Secure Buffer",
+						   (ui64NumPages * uiPageSize));
+	}
+#endif
+
+	/* this is needed when the allocation is finalized and we need to free it. */
+	psPageContainer->hPDumpAllocInfo = hPDumpAllocInfo;
+
+	return eStatus;
+
+	/* error cleanup */
+
+fail:
+	return eStatus;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/private_data.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/private_data.h
new file mode 100644
index 0000000..6d63f15
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/private_data.h
@@ -0,0 +1,53 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linux private data structure
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INCLUDED_PRIVATE_DATA_H_)
+#define __INCLUDED_PRIVATE_DATA_H_
+
+#include <linux/fs.h>
+
+#include "connection_server.h"
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile);
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection);
+
+#endif /* !defined(__INCLUDED_PRIVATE_DATA_H_) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_bridge_k.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_bridge_k.c
new file mode 100644
index 0000000..7f5cf59
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_bridge_k.c
@@ -0,0 +1,811 @@
+/*************************************************************************/ /*!
+@File
+@Title          PVR Bridge Module (kernel side)
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Receives calls from the user portion of services and
+                despatches them to functions in the kernel portion.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+
+#if defined(SUPPORT_DRM)
+#include <drm/drmP.h>
+#include "pvr_drm.h"
+#endif /* defined(SUPPORT_DRM) */
+
+/* RGX: */
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "srvcore.h"
+#include "common_srvcore_bridge.h"
+#include "cache_defines.h"
+
+#if defined(MODULE_TEST)
+/************************************************************************/
+// additional includes for services testing
+/************************************************************************/
+#include "pvr_test_bridge.h"
+#include "kern_test.h"
+/************************************************************************/
+// end of additional includes
+/************************************************************************/
+#endif
+
+#if defined(DEBUG_BRIDGE_KM)
+static PVR_DEBUGFS_ENTRY_DATA *gpsPVRDebugFSBridgeStatsEntry = NULL;
+static struct seq_operations gsBridgeStatsReadOps;
+#endif
+
+/* These will go when full bridge gen comes in */
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+#if defined(SUPPORT_DISPLAY_CLASS)
+PVRSRV_ERROR InitDCBridge(void);
+PVRSRV_ERROR DeinitDCBridge(void);
+#endif
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR InitSYNCEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCEXPORTBridge(void);
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSYNCSEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(void);
+#endif
+#if defined (SUPPORT_RGX)
+PVRSRV_ERROR InitRGXINITBridge(void);
+PVRSRV_ERROR DeinitRGXINITBridge(void);
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+#if defined(RGX_FEATURE_RAY_TRACING)
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+#endif /* RGX_FEATURE_RAY_TRACING */
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+#endif /* SUPPORT_RGX */
+#if (CACHEFLUSH_TYPE == CACHEFLUSH_GENERIC)
+PVRSRV_ERROR InitCACHEGENERICBridge(void);
+PVRSRV_ERROR DeinitCACHEGENERICBridge(void);
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSMMBridge(void);
+PVRSRV_ERROR DeinitSMMBridge(void);
+#endif
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(IMG_VOID);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(IMG_VOID);
+#endif
+#if defined(SUPPORT_ION)
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+#endif
+#if defined(SUPPORT_VALIDATION)
+PVRSRV_ERROR InitVALIDATIONBridge(void);
+#endif
+#if defined(PVR_TESTING_UTILS)
+PVRSRV_ERROR InitTUTILSBridge(void);
+PVRSRV_ERROR DeinitTUTILSBridge(void);
+#endif
+
+PVRSRV_ERROR
+LinuxBridgeInit(void)
+{
+	PVRSRV_ERROR eError;
+#if defined(DEBUG_BRIDGE_KM)
+	IMG_INT iResult;
+
+	iResult = PVRDebugFSCreateEntry("bridge_stats",
+					NULL,
+					&gsBridgeStatsReadOps,
+					NULL,
+					&g_BridgeDispatchTable[0],
+					&gpsPVRDebugFSBridgeStatsEntry);
+	if (iResult != 0)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+#endif
+
+	eError = InitSRVCOREBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitSYNCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+	eError = InitSYNCEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = InitSYNCSEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitPDUMPCTRLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = InitMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = InitCMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = InitPDUMPMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = InitPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_ION)
+	eError = InitDMABUFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = InitDCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if (CACHEFLUSH_TYPE == CACHEFLUSH_GENERIC)
+	eError = InitCACHEGENERICBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = InitSMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = InitPVRTLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	#if defined(PVR_RI_DEBUG)
+	eError = InitRIBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	#endif
+
+#if defined(SUPPORT_VALIDATION)
+	eError = InitVALIDATIONBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+	eError = InitTUTILSBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = InitDEVICEMEMHISTORYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	#if defined (SUPPORT_RGX)
+	eError = InitRGXTQBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitRGXCMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitRGXINITBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitRGXTA3DBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitBREAKPOINTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitDEBUGMISCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	
+	eError = InitRGXPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitRGXHWPERFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	eError = InitRGXRAYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+	eError = InitREGCONFIGBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = InitTIMERQUERYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#endif /* SUPPORT_RGX */
+
+	return eError;
+}
+
+PVRSRV_ERROR
+LinuxBridgeDeInit(void)
+{
+	PVRSRV_ERROR eError;
+#if defined(DEBUG_BRIDGE_KM)
+	if (gpsPVRDebugFSBridgeStatsEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(gpsPVRDebugFSBridgeStatsEntry);
+		gpsPVRDebugFSBridgeStatsEntry = NULL;
+	}
+#endif
+
+	eError = DeinitSRVCOREBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitSYNCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+	eError = DeinitSYNCEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = DeinitSYNCSEXPORTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitPDUMPCTRLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = DeinitMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = DeinitCMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = DeinitPDUMPMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	eError = DeinitPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(SUPPORT_ION)
+	eError = DeinitDMABUFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+	eError = DeinitTUTILSBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+	eError = DeinitDCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if (CACHEFLUSH_TYPE == CACHEFLUSH_GENERIC)
+	eError = DeinitCACHEGENERICBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+	eError = DeinitSMMBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	eError = DeinitPVRTLBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	#if defined(PVR_RI_DEBUG)
+	eError = DeinitRIBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	eError = DeinitDEVICEMEMHISTORYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif
+
+	#if defined (SUPPORT_RGX)
+	eError = DeinitRGXTQBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitRGXCMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitRGXINITBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitRGXTA3DBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitBREAKPOINTBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitDEBUGMISCBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+	
+	eError = DeinitRGXPDUMPBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitRGXHWPERFBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+	eError = DeinitRGXRAYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+	eError = DeinitREGCONFIGBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+	eError = DeinitTIMERQUERYBridge();
+	if (eError != PVRSRV_OK)
+	{
+		return eError;
+	}
+
+#endif /* SUPPORT_RGX */
+
+	return eError;
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+static void *BridgeStatsSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+
+	OSAcquireBridgeLock();
+
+	if (psDispatchTable == NULL || (*puiPosition) > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		return NULL;
+	}
+
+	if ((*puiPosition) == 0) 
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return &(psDispatchTable[(*puiPosition) - 1]);
+}
+
+static void BridgeStatsSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	OSReleaseBridgeLock();
+}
+
+static void *BridgeStatsSeqNext(struct seq_file *psSeqFile,
+			       void *pvData,
+			       loff_t *puiPosition)
+{
+	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+	loff_t uiItemAskedFor = *puiPosition; /* puiPosition on entry is the index to return */
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	/* Is the item asked for (starts at 0) a valid table index? */
+	if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+	{
+		(*puiPosition)++; /* on exit it is the next seq index to ask for */
+		return &(psDispatchTable[uiItemAskedFor]);
+	}
+
+	/* Now passed the end of the table to indicate stop */
+	return IMG_NULL;
+}
+
+static int BridgeStatsSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		seq_printf(psSeqFile,
+			   "Total ioctl call count = %u\n"
+			   "Total number of bytes copied via copy_from_user = %u\n"
+			   "Total number of bytes copied via copy_to_user = %u\n"
+			   "Total number of bytes copied via copy_*_user = %u\n\n"
+			   "%-60s | %-48s | %10s | %20s | %10s\n",
+			   g_BridgeGlobalStats.ui32IOCTLCount,
+			   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+			   g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+			   g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+			   "  #: Bridge Name",
+			   "Wrapper Function",
+			   "Call Count",
+			   "copy_from_user Bytes",
+			   "copy_to_user Bytes");
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = (	PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)pvData;
+
+		seq_printf(psSeqFile,
+			   "%3d: %-60s   %-48s   %-10u   %-20u   %-10u\n",
+			   (IMG_UINT32)(((IMG_SIZE_T)psEntry-(IMG_SIZE_T)g_BridgeDispatchTable)/sizeof(PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY)),
+			   psEntry->pszIOCName,
+			   psEntry->pszFunctionName,
+			   psEntry->ui32CallCount,
+			   psEntry->ui32CopyFromUserTotalBytes,
+			   psEntry->ui32CopyToUserTotalBytes);
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsBridgeStatsReadOps =
+{
+	.start = BridgeStatsSeqStart,
+	.stop = BridgeStatsSeqStop,
+	.next = BridgeStatsSeqNext,
+	.show = BridgeStatsSeqShow,
+};
+#endif /* defined(DEBUG_BRIDGE_KM) */
+
+
+#if defined(SUPPORT_DRM)
+int
+PVRSRV_BridgeDispatchKM(struct drm_device unref__ *dev, void *arg, struct drm_file *pDRMFile)
+#else
+long
+PVRSRV_BridgeDispatchKM(struct file *pFile, unsigned int unref__ ioctlCmd, unsigned long arg)
+#endif
+{
+#if defined(SUPPORT_DRM)
+	struct file *pFile = PVR_FILE_FROM_DRM_FILE(pDRMFile);
+#else
+	PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
+	PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
+#endif
+	PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+
+	if(psConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __FUNCTION__));
+		return -EFAULT;
+	}
+
+#if defined(SUPPORT_DRM)
+	psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg;
+	PVR_ASSERT(psBridgePackageKM != IMG_NULL);
+#else
+
+	psBridgePackageKM = &sBridgePackageKM;
+
+	if (!OSAccessOK(PVR_VERIFY_WRITE,
+				   psBridgePackageUM,
+				   sizeof(PVRSRV_BRIDGE_PACKAGE)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
+				 __FUNCTION__));
+
+		return -EFAULT;
+	}
+	
+	/* FIXME - Currently the CopyFromUserWrapper which collects stats about
+	 * how much data is shifted to/from userspace isn't available to us
+	 * here. */
+	if (OSCopyFromUser(IMG_NULL,
+					  psBridgePackageKM,
+					  psBridgePackageUM,
+					  sizeof(PVRSRV_BRIDGE_PACKAGE))
+	  != PVRSRV_OK)
+	{
+		return -EFAULT;
+	}
+#endif
+
+#if defined(DEBUG_BRIDGE_CALLS)
+	{
+		IMG_UINT32 mangledID;
+		mangledID = psBridgePackageKM->ui32BridgeID;
+
+		psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
+
+		PVR_DPF((PVR_DBG_WARNING, "%s: Bridge ID (x%8x) %8u (mangled: x%8x) ", __FUNCTION__, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32BridgeID, mangledID));
+	}
+#else
+		psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
+#endif
+
+	return BridgedDispatchKM(psConnection, psBridgePackageKM);
+}
+
+
+#if defined(CONFIG_COMPAT)
+#if defined(SUPPORT_DRM)
+int
+#else
+long
+#endif
+PVRSRV_BridgeCompatDispatchKM(struct file *pFile,
+			      unsigned int unref__ ioctlCmd,
+			      unsigned long arg)
+{
+	struct bridge_package_from_32
+	{
+		IMG_UINT32				bridge_id;			/*!< ioctl bridge group */
+		IMG_UINT32				function_id;        /*!< ioctl function index */
+		IMG_UINT32				size;				/*!< size of structure */
+		IMG_UINT32				addr_param_in;		/*!< input data buffer */ 
+		IMG_UINT32				in_buffer_size;		/*!< size of input data buffer */
+		IMG_UINT32				addr_param_out;		/*!< output data buffer */
+		IMG_UINT32				out_buffer_size;	/*!< size of output data buffer */
+	};
+
+	PVRSRV_BRIDGE_PACKAGE params_for_64;
+	struct bridge_package_from_32 params;
+ 	struct bridge_package_from_32 * const params_addr = &params;
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+
+	if(psConnection == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __FUNCTION__));
+		return -EFAULT;
+	}
+
+	/* make sure there is no padding inserted by compiler */
+	PVR_ASSERT(sizeof(struct bridge_package_from_32) == 7 * sizeof(IMG_UINT32));
+
+	if(!OSAccessOK(PVR_VERIFY_READ, (void *) arg,
+				   sizeof(struct bridge_package_from_32)))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
+				 __FUNCTION__));
+
+		return -EFAULT;
+	}
+	
+	if(OSCopyFromUser(NULL, params_addr, (void*) arg,
+					  sizeof(struct bridge_package_from_32))
+	   != PVRSRV_OK)
+	{
+		return -EFAULT;
+	}
+
+	PVR_ASSERT(params_addr->size == sizeof(struct bridge_package_from_32));
+
+	params_for_64.ui32BridgeID = PVRSRV_GET_BRIDGE_ID(params_addr->bridge_id);
+	params_for_64.ui32FunctionID = params_addr->function_id;
+	params_for_64.ui32Size = sizeof(params_for_64);
+	params_for_64.pvParamIn = (void*) ((size_t) params_addr->addr_param_in);
+	params_for_64.pvParamOut = (void*) ((size_t) params_addr->addr_param_out);
+	params_for_64.ui32InBufferSize = params_addr->in_buffer_size;
+	params_for_64.ui32OutBufferSize = params_addr->out_buffer_size;
+
+	return BridgedDispatchKM(psConnection, &params_for_64);
+}
+#endif /* defined(CONFIG_COMPAT) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debug.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debug.c
new file mode 100644
index 0000000..f3ed71e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debug.c
@@ -0,0 +1,1305 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Provides kernel side Debug Functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <stdarg.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+#include <generated/compile.h>
+#include <generated/utsrelease.h>
+#else
+#include <linux/compile.h>
+#include <linux/utsrelease.h>
+#endif
+
+#include "allocmem.h"
+#include "pvrversion.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pvr_debugfs.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "lists.h"
+#include "osfunc.h"
+
+/* Handle used by DebugFS to get GPU utilisation stats */
+static IMG_HANDLE ghGpuUtilUserDebugFS = NULL;
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/******** BUFFERED LOG MESSAGES ********/
+
+/* Because we don't want to have to handle CCB wrapping, each buffered
+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means
+ * there is the same fixed number of messages that can be stored,
+ * regardless of message length.
+ */
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+
+#define PVRSRV_DEBUG_CCB_MESG_MAX	PVR_MAX_DEBUG_MESSAGE_LEN
+
+#include <linux/syscalls.h>
+#include <linux/time.h>
+
+typedef struct
+{
+	const IMG_CHAR *pszFile;
+	IMG_INT iLine;
+	IMG_UINT32 ui32TID;
+	IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX];
+	struct timeval sTimeVal;
+}
+PVRSRV_DEBUG_CCB;
+
+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX] = { { 0 } };
+
+static IMG_UINT giOffset = 0;
+
+static DEFINE_MUTEX(gsDebugCCBMutex);
+
+static void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+			   const IMG_CHAR *szBuffer)
+{
+	mutex_lock(&gsDebugCCBMutex);
+
+	gsDebugCCB[giOffset].pszFile = pszFileName;
+	gsDebugCCB[giOffset].iLine   = ui32Line;
+	gsDebugCCB[giOffset].ui32TID = current->tgid;
+
+	do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal);
+
+	strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1);
+	gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0;
+
+	giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX;
+
+	mutex_unlock(&gsDebugCCBMutex);
+}
+
+IMG_EXPORT void PVRSRVDebugPrintfDumpCCB(void)
+{
+	int i;
+
+	mutex_lock(&gsDebugCCBMutex);
+	
+	for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++)
+	{
+		PVRSRV_DEBUG_CCB *psDebugCCBEntry =
+			&gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX];
+
+		/* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */
+		if (!psDebugCCBEntry->pszFile)
+		{
+			continue;
+		}
+
+		printk(KERN_ERR "%s:%d: (%ld.%ld,tid=%u) %s\n",
+			   psDebugCCBEntry->pszFile,
+			   psDebugCCBEntry->iLine,
+			   (long)psDebugCCBEntry->sTimeVal.tv_sec,
+			   (long)psDebugCCBEntry->sTimeVal.tv_usec,
+			   psDebugCCBEntry->ui32TID,
+			   psDebugCCBEntry->pcMesg);
+
+		/* Clear this entry so it doesn't get printed the next time again. */
+		psDebugCCBEntry->pszFile = IMG_NULL;
+	}
+
+	mutex_unlock(&gsDebugCCBMutex);
+}
+
+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */
+static INLINE void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+               const IMG_CHAR *szBuffer)
+{
+	(void)pszFileName;
+	(void)szBuffer;
+	(void)ui32Line;
+}
+
+IMG_EXPORT void PVRSRVDebugPrintfDumpCCB(void)
+{
+	/* Not available */
+}
+
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+						 const IMG_CHAR *pszFormat, va_list VArgs)
+						 IMG_FORMAT_PRINTF(3, 0);
+
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#define PVR_MAX_FILEPATH_LEN 256
+
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+						const IMG_CHAR *pszFormat, ...)
+						IMG_FORMAT_PRINTF(3, 4);
+
+/* NOTE: Must NOT be static! Used in module.c.. */
+IMG_UINT32 gPVRDebugLevel =
+	(
+	 DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+	 | DBGPRIV_BUFFERED
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+	 | DBGPRIV_DEBUG
+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
+	);
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */
+
+#define	PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+/* Message buffer for non-IRQ messages */
+static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* Message buffer for IRQ messages */
+static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* The lock is used to control access to gszBufferNonIRQ */
+static DEFINE_MUTEX(gsDebugMutexNonIRQ);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
+/* The lock is used to control access to gszBufferIRQ */
+static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
+#else
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+#endif
+
+#define	USE_SPIN_LOCK (in_interrupt() || !preemptible())
+
+static inline void GetBufferLock(unsigned long *pulLockFlags)
+{
+	if (USE_SPIN_LOCK)
+	{
+		spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
+	}
+	else
+	{
+		mutex_lock(&gsDebugMutexNonIRQ);
+	}
+}
+
+static inline void ReleaseBufferLock(unsigned long ulLockFlags)
+{
+	if (USE_SPIN_LOCK)
+	{
+		spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
+	}
+	else
+	{
+		mutex_unlock(&gsDebugMutexNonIRQ);
+	}
+}
+
+static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
+{
+	if (USE_SPIN_LOCK)
+	{
+		*ppszBuf = gszBufferIRQ;
+		*pui32BufSiz = sizeof(gszBufferIRQ);
+	}
+	else
+	{
+		*ppszBuf = gszBufferNonIRQ;
+		*pui32BufSiz = sizeof(gszBufferNonIRQ);
+	}
+}
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, pointed
+ * to by the var args list.
+ */
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs)
+{
+	IMG_UINT32 ui32Used;
+	IMG_UINT32 ui32Space;
+	IMG_INT32 i32Len;
+
+	ui32Used = strlen(pszBuf);
+	BUG_ON(ui32Used >= ui32BufSiz);
+	ui32Space = ui32BufSiz - ui32Used;
+
+	i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+	pszBuf[ui32BufSiz - 1] = 0;
+
+	/* Return true if string was truncated */
+	return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVReleasePrintf
+@Description    To output an important message to the user in release builds
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+	va_list vaArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(vaArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+	strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 2));
+	pszBuf[ui32BufSiz - 1] = '\0';
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		printk(KERN_ERR "%s\n", pszBuf);
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+	va_end(vaArgs);
+}
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+/*************************************************************************/ /*!
+@Function       PVRTrace
+@Description    To output a debug message to the user
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...)
+{
+	va_list VArgs;
+	unsigned long ulLockFlags = 0;
+	IMG_CHAR *pszBuf;
+	IMG_UINT32 ui32BufSiz;
+
+	SelectBuffer(&pszBuf, &ui32BufSiz);
+
+	va_start(VArgs, pszFormat);
+
+	GetBufferLock(&ulLockFlags);
+
+	strncpy(pszBuf, "PVR: ", (ui32BufSiz - 2));
+	pszBuf[ui32BufSiz - 1] = '\0';
+
+	if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+	{
+		printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+	}
+	else
+	{
+		printk(KERN_ERR "%s\n", pszBuf);
+	}
+
+	ReleaseBufferLock(ulLockFlags);
+
+	va_end(VArgs);
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, calling
+ * VBAppend to do the actual work.
+ */
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+	va_list VArgs;
+	IMG_BOOL bTrunc;
+
+	va_start (VArgs, pszFormat);
+
+	bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+	va_end (VArgs);
+
+	return bTrunc;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRSRVDebugPrintf
+@Description    To output a debug message to the user
+@Input          uDebugLevel The current debug level
+@Input          pszFile     The source file generating the message
+@Input          uLine       The line of the source file
+@Input          pszFormat   The message format string
+@Input          ...         Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+			   const IMG_CHAR *pszFullFileName,
+			   IMG_UINT32 ui32Line,
+			   const IMG_CHAR *pszFormat,
+			   ...)
+{
+	IMG_BOOL bNoLoc;
+	const IMG_CHAR *pszFileName = pszFullFileName;
+	IMG_CHAR *pszLeafName;
+
+	bNoLoc = (IMG_BOOL)((ui32DebugLevel & DBGPRIV_CALLTRACE) |
+						(ui32DebugLevel & DBGPRIV_BUFFERED)) ? IMG_TRUE : IMG_FALSE;
+
+	if (gPVRDebugLevel & ui32DebugLevel)
+	{
+		va_list vaArgs;
+		unsigned long ulLockFlags = 0;
+		IMG_CHAR *pszBuf;
+		IMG_UINT32 ui32BufSiz;
+
+		SelectBuffer(&pszBuf, &ui32BufSiz);
+
+		va_start(vaArgs, pszFormat);
+
+		GetBufferLock(&ulLockFlags);
+
+		switch (ui32DebugLevel)
+		{
+			case DBGPRIV_FATAL:
+			{
+				strncpy(pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_ERROR:
+			{
+				strncpy(pszBuf, "PVR_K:(Error): ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_WARNING:
+			{
+				strncpy(pszBuf, "PVR_K:(Warn):  ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_MESSAGE:
+			{
+				strncpy(pszBuf, "PVR_K:(Mesg):  ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_VERBOSE:
+			{
+				strncpy(pszBuf, "PVR_K:(Verb):  ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_DEBUG:
+			{
+				strncpy(pszBuf, "PVR_K:(Debug): ", (ui32BufSiz - 2));
+				break;
+			}
+			case DBGPRIV_CALLTRACE:
+			case DBGPRIV_ALLOC:
+			case DBGPRIV_BUFFERED:
+			default:
+			{
+				strncpy(pszBuf, "PVR_K:  ", (ui32BufSiz - 2));
+				break;
+			}
+		}
+		pszBuf[ui32BufSiz - 1] = '\0';
+
+		(void) BAppend(pszBuf, ui32BufSiz, "%u: ", current->pid);
+
+
+		if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+		{
+			printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+		}
+		else
+		{
+#if !defined(__sh__)
+			pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/');
+
+			if (pszLeafName)
+			{
+				pszFileName = pszLeafName+1;
+			}
+#endif /* __sh__ */
+
+			if (BAppend(pszBuf, ui32BufSiz, " [%u, %s]", ui32Line, pszFileName))
+			{
+				printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+			}
+			else
+			{
+				if (ui32DebugLevel & DBGPRIV_BUFFERED)
+				{
+					AddToBufferCCB(pszFileName, ui32Line, pszBuf);
+				}
+				else
+				{
+					printk(KERN_ERR "%s\n", pszBuf);
+				}
+			}
+		}
+
+		ReleaseBufferLock(ulLockFlags);
+
+		va_end (vaArgs);
+	}
+}
+
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+
+/*************************************************************************/ /*!
+ Version DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+					  va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugVersionSeqStart(struct seq_file *psSeqFile,
+				   loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugVersionCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static void _DebugVersionSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugVersionSeqNext(struct seq_file *psSeqFile,
+				  void *pvData,
+				  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugVersionCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static int _DebugVersionSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		const IMG_CHAR *pszSystemVersionString = PVRSRVGetSystemName();
+
+		seq_printf(psSeqFile, "Version: %s (%s) %s\n",
+			   PVRVERSION_STRING,
+			   PVR_BUILD_TYPE, PVR_BUILD_DIR);
+
+		seq_printf(psSeqFile, "System Version String: %s\n", pszSystemVersionString);
+
+		seq_printf(psSeqFile, "Kernel Version: " UTS_RELEASE " (" UTS_MACHINE ")\n");
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+		if (psDevNode->pfnDeviceVersionString)
+		{
+			IMG_CHAR *pszDeviceVersionString;
+			
+			if (psDevNode->pfnDeviceVersionString(psDevNode, &pszDeviceVersionString) == PVRSRV_OK)
+			{
+				seq_printf(psSeqFile, "%s\n", pszDeviceVersionString);
+				
+				OSFreeMem(pszDeviceVersionString);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsDebugVersionReadOps =
+{
+	.start = _DebugVersionSeqStart,
+	.stop = _DebugVersionSeqStop,
+	.next = _DebugVersionSeqNext,
+	.show = _DebugVersionSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Status DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+					 va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugStatusSeqStart(struct seq_file *psSeqFile,
+				  loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugStatusCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static void _DebugStatusSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugStatusSeqNext(struct seq_file *psSeqFile,
+				    void *pvData,
+				    loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugVersionCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static int _DebugStatusSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData == SEQ_START_TOKEN)
+	{
+		PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+
+		if (psPVRSRVData != NULL)
+		{
+			switch (psPVRSRVData->eServicesState)
+			{
+				case PVRSRV_SERVICES_STATE_OK:
+					seq_printf(psSeqFile, "Driver Status:   OK\n");
+					break;
+				case PVRSRV_SERVICES_STATE_BAD:
+					seq_printf(psSeqFile, "Driver Status:   BAD\n");
+					break;
+				default:
+					seq_printf(psSeqFile, "Driver Status:   %d\n", psPVRSRVData->eServicesState);
+					break;
+			}
+		}
+	}
+	else if (pvData != NULL)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+		IMG_CHAR           *pszStatus = "";
+		IMG_CHAR           *pszReason = "";
+		
+		/* Update the health status now if possible... */
+		if (psDeviceNode->pfnUpdateHealthStatus)
+		{
+			psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE);
+		}
+		
+		switch (psDeviceNode->eHealthStatus)
+		{
+			case PVRSRV_DEVICE_HEALTH_STATUS_OK:  pszStatus = "OK";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:  pszStatus = "NOT RESPONDING";  break;
+			case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:  pszStatus = "DEAD";  break;
+			default:  pszStatus = "UNKNOWN";  break;
+		}
+
+		/* Write the device status to the sequence file... */
+		if (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_RGX)
+		{
+			switch (psDeviceNode->eHealthReason)
+			{
+				case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " (FW Assert)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " (Poll failure)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " (Global Event Object timeouts rising)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " (KCCB offset invalid)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " (KCCB stalled)";  break;
+				default:  pszReason = " (Unknown reason)";  break;
+			}
+
+			seq_printf(psSeqFile, "Firmware Status: %s%s\n", pszStatus, pszReason);
+
+			/* Write other useful stats to aid the test cycle... */
+			if (psDeviceNode->pvDevice != NULL)
+			{
+				PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+				RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+				/* Calculate the number of HWR events in total across all the DMs... */
+				if (psRGXFWIfTraceBufCtl != NULL)
+				{
+					IMG_UINT32 ui32HWREventCount = 0;
+					IMG_UINT32 ui32CRREventCount = 0;
+					IMG_UINT32 ui32DMIndex;
+
+					for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++)
+					{
+						ui32HWREventCount += psRGXFWIfTraceBufCtl->aui16HwrDmLockedUpCount[ui32DMIndex];
+						ui32CRREventCount += psRGXFWIfTraceBufCtl->aui16HwrDmOverranCount[ui32DMIndex];
+					}
+
+					seq_printf(psSeqFile, "HWR Event Count: %d\n", ui32HWREventCount);
+					seq_printf(psSeqFile, "CRR Event Count: %d\n", ui32CRREventCount);
+				}
+				
+				/* Write the number of APM events... */
+				seq_printf(psSeqFile, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal);
+				
+				/* Write the current GPU Utilisation values... */
+				if (psDevInfo->pfnRegisterGpuUtilStats && psDevInfo->pfnGetGpuUtilStats &&
+				    psDeviceNode->eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
+				{
+					RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+					PVRSRV_ERROR eError = PVRSRV_OK;
+
+					if (ghGpuUtilUserDebugFS == NULL)
+					{
+						eError = psDevInfo->pfnRegisterGpuUtilStats(&ghGpuUtilUserDebugFS);
+					}
+
+					if (eError == PVRSRV_OK)
+					{
+						eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+						                                       ghGpuUtilUserDebugFS,
+						                                       &sGpuUtilStats);
+					}
+
+					if ((eError == PVRSRV_OK) &&
+					    ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+					{
+						IMG_UINT64 util;
+						IMG_UINT32 rem;
+
+						util = 100 * (sGpuUtilStats.ui64GpuStatActiveHigh +
+						              sGpuUtilStats.ui64GpuStatActiveLow);
+						util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+
+						seq_printf(psSeqFile, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+					}
+					else
+					{
+						seq_printf(psSeqFile, "GPU Utilisation: -\n");
+					}
+				}
+			}
+		}
+		else
+		{
+			switch (psDeviceNode->eHealthReason)
+			{
+				case PVRSRV_DEVICE_HEALTH_REASON_NONE:  pszReason = "";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED:  pszReason = " (ASSERTED)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING:  pszReason = " (POLL FAILING)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS:  pszReason = " (TIMEOUTS)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT:  pszReason = " (QUEUE CORRUPT)";  break;
+				case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED:  pszReason = " (QUEUE STALLED)";  break;
+				default:  pszReason = " (UNKNOWN)";  break;
+			}
+
+			seq_printf(psSeqFile, "Device %d Status: %s%s\n",
+					   psDeviceNode->sDevId.ui32DeviceIndex, pszStatus, pszReason);
+		}
+	}
+
+	return 0;
+}
+
+static IMG_INT DebugStatusSet(const char __user *pcBuffer,
+			     size_t uiCount,
+			     loff_t uiPosition,
+			     void *pvData)
+{
+	IMG_CHAR acDataBuffer[6];
+
+	if (uiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount > (sizeof(acDataBuffer) / sizeof(acDataBuffer[0])))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (((acDataBuffer[0] == 'k') || ((acDataBuffer[0] == 'K'))) && uiCount == 2)
+	{
+		PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+		psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD;
+	}
+	else
+	{
+		return -EINVAL;
+	}
+
+	return uiCount;
+}
+
+static struct seq_operations gsDebugStatusReadOps =
+{
+	.start = _DebugStatusSeqStart,
+	.stop = _DebugStatusSeqStop,
+	.next = _DebugStatusSeqNext,
+	.show = _DebugStatusSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Dump Debug DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugDumpDebugSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugDumpDebugCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static void _DebugDumpDebugSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugDumpDebugSeqNext(struct seq_file *psSeqFile,
+				    void *pvData,
+				    loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugDumpDebugCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static struct seq_file *gpsDumpDebugPrintfSeqFile = IMG_NULL;
+
+static void _DumpDebugSeqPrintf(const IMG_CHAR *pszFormat, ...)
+{
+	if (gpsDumpDebugPrintfSeqFile)
+	{
+		IMG_CHAR  szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+		va_list  ArgList;
+
+		va_start(ArgList, pszFormat);
+		vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+		seq_printf(gpsDumpDebugPrintfSeqFile, "%s\n", szBuffer);
+		va_end(ArgList);
+	}
+}
+
+static int _DebugDumpDebugSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL  &&  pvData != SEQ_START_TOKEN)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+		
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			gpsDumpDebugPrintfSeqFile = psSeqFile;
+			PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_MAX, _DumpDebugSeqPrintf);
+			gpsDumpDebugPrintfSeqFile = IMG_NULL;
+			
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsDumpDebugReadOps = 
+{
+	.start = _DebugDumpDebugSeqStart,
+	.stop  = _DebugDumpDebugSeqStop,
+	.next  = _DebugDumpDebugSeqNext,
+	.show  = _DebugDumpDebugSeqShow,
+};
+/*************************************************************************/ /*!
+ Firmware Trace DebugFS entry
+*/ /**************************************************************************/
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+	loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+	loff_t uiPosition = va_arg(va, loff_t);
+	loff_t uiCurrentPosition = *puiCurrentPosition;
+
+	(*puiCurrentPosition)++;
+
+	return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugFWTraceSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	if (*puiPosition == 0)
+	{
+		return SEQ_START_TOKEN;
+	}
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugFWTraceCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static void _DebugFWTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugFWTraceSeqNext(struct seq_file *psSeqFile,
+				    void *pvData,
+				    loff_t *puiPosition)
+{
+	PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+	loff_t uiCurrentPosition = 1;
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	(*puiPosition)++;
+
+	return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+					      _DebugFWTraceCompare_AnyVaCb,
+					      &uiCurrentPosition,
+					      *puiPosition);
+}
+
+static struct seq_file *gpsFWTracePrintfSeqFile = IMG_NULL;
+
+static void _FWTraceSeqPrintf(const IMG_CHAR *pszFormat, ...)
+{
+	if (gpsFWTracePrintfSeqFile)
+	{
+		IMG_CHAR  szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+		va_list  ArgList;
+
+		va_start(ArgList, pszFormat);
+		vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+		seq_printf(gpsFWTracePrintfSeqFile, "%s\n", szBuffer);
+		va_end(ArgList);
+	}
+}
+
+static int _DebugFWTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL  &&  pvData != SEQ_START_TOKEN)
+	{
+		PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+		
+		if (psDeviceNode->pvDevice != NULL)
+		{
+			PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+			gpsFWTracePrintfSeqFile = psSeqFile;
+			RGXDumpFirmwareTrace(_FWTraceSeqPrintf, psDevInfo);
+			gpsFWTracePrintfSeqFile = IMG_NULL;
+		}
+	}
+
+	return 0;
+}
+
+static struct seq_operations gsFWTraceReadOps = 
+{
+	.start = _DebugFWTraceSeqStart,
+	.stop  = _DebugFWTraceSeqStop,
+	.next  = _DebugFWTraceSeqNext,
+	.show  = _DebugFWTraceSeqShow,
+};
+#endif
+
+
+/*************************************************************************/ /*!
+ Debug level DebugFS entry
+*/ /**************************************************************************/
+
+#if defined(DEBUG)
+static void *DebugLevelSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	if (*puiPosition == 0)
+	{
+		return psSeqFile->private;
+	}
+
+	return NULL;
+}
+
+static void DebugLevelSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *DebugLevelSeqNext(struct seq_file *psSeqFile,
+			       void *pvData,
+			       loff_t *puiPosition)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	PVR_UNREFERENCED_PARAMETER(pvData);
+	PVR_UNREFERENCED_PARAMETER(puiPosition);
+
+	return NULL;
+}
+
+static int DebugLevelSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	if (pvData != NULL)
+	{
+		IMG_UINT32 uiDebugLevel = *((IMG_UINT32 *)pvData);
+
+		seq_printf(psSeqFile, "%u\n", uiDebugLevel);
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct seq_operations gsDebugLevelReadOps =
+{
+	.start = DebugLevelSeqStart,
+	.stop = DebugLevelSeqStop,
+	.next = DebugLevelSeqNext,
+	.show = DebugLevelSeqShow,
+};
+
+
+static IMG_INT DebugLevelSet(const char __user *pcBuffer,
+			     size_t uiCount,
+			     loff_t uiPosition,
+			     void *pvData)
+{
+	IMG_UINT32 *uiDebugLevel = (IMG_UINT32 *)pvData;
+	IMG_CHAR acDataBuffer[6];
+
+	if (uiPosition != 0)
+	{
+		return -EIO;
+	}
+
+	if (uiCount > (sizeof(acDataBuffer) / sizeof(acDataBuffer[0])))
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+	{
+		return -EINVAL;
+	}
+
+	if (acDataBuffer[uiCount - 1] != '\n')
+	{
+		return -EINVAL;
+	}
+
+	if (sscanf(acDataBuffer, "%u", &gPVRDebugLevel) == 0)
+	{
+		return -EINVAL;
+	}
+
+	/* As this is Linux the next line uses a GCC builtin function */
+	(*uiDebugLevel) &= (1 << __builtin_ffsl(DBGPRIV_LAST)) - 1;
+
+	return uiCount;
+}
+#endif /* defined(DEBUG) */
+
+static PVR_DEBUGFS_ENTRY_DATA *gpsVersionDebugFSEntry;
+
+static PVR_DEBUGFS_ENTRY_DATA *gpsStatusDebugFSEntry;
+static PVR_DEBUGFS_ENTRY_DATA *gpsDumpDebugDebugFSEntry;
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+static PVR_DEBUGFS_ENTRY_DATA *gpsFWTraceDebugFSEntry;
+#endif
+
+#if defined(DEBUG)
+static PVR_DEBUGFS_ENTRY_DATA *gpsDebugLevelDebugFSEntry;
+#endif
+
+int PVRDebugCreateDebugFSEntries(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	int iResult;
+
+	PVR_ASSERT(psPVRSRVData != NULL);
+	PVR_ASSERT(gpsVersionDebugFSEntry == NULL);
+
+	iResult = PVRDebugFSCreateEntry("version",
+					NULL,
+					&gsDebugVersionReadOps,
+					NULL,
+					psPVRSRVData,
+					&gpsVersionDebugFSEntry);
+	if (iResult != 0)
+	{
+		return iResult;
+	}
+
+	iResult = PVRDebugFSCreateEntry("status",
+					NULL,
+					&gsDebugStatusReadOps,
+					(PVRSRV_ENTRY_WRITE_FUNC *)DebugStatusSet,
+					psPVRSRVData,
+					&gpsStatusDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto ErrorRemoveVersionEntry;
+	}
+
+	iResult = PVRDebugFSCreateEntry("debug_dump",
+					NULL,
+					&gsDumpDebugReadOps,
+					NULL,
+					psPVRSRVData,
+					&gpsDumpDebugDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto ErrorRemoveStatusEntry;
+	}
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+	iResult = PVRDebugFSCreateEntry("firmware_trace",
+					NULL,
+					&gsFWTraceReadOps,
+					NULL,
+					psPVRSRVData,
+					&gpsFWTraceDebugFSEntry);
+	if (iResult != 0)
+	{
+		goto ErrorRemoveDumpDebugEntry;
+	}
+#endif
+
+#if defined(DEBUG)
+	iResult = PVRDebugFSCreateEntry("debug_level",
+					NULL,
+					&gsDebugLevelReadOps,
+					(PVRSRV_ENTRY_WRITE_FUNC *)DebugLevelSet,
+					&gPVRDebugLevel,
+					&gpsDebugLevelDebugFSEntry);
+	if (iResult != 0)
+	{
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+		goto ErrorRemoveFWTraceLogEntry;
+#else
+		goto ErrorRemoveDumpDebugEntry;
+#endif
+	}
+#endif
+
+	return 0;
+
+#if (defined(DEBUG) && defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS))
+ErrorRemoveFWTraceLogEntry:
+	PVRDebugFSRemoveEntry(gpsFWTraceDebugFSEntry);
+	gpsFWTraceDebugFSEntry = NULL;
+#endif
+
+#if (defined(DEBUG) || defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS))
+ErrorRemoveDumpDebugEntry:
+	PVRDebugFSRemoveEntry(gpsDumpDebugDebugFSEntry);
+	gpsDumpDebugDebugFSEntry = NULL;
+#endif
+
+ErrorRemoveStatusEntry:
+	PVRDebugFSRemoveEntry(gpsStatusDebugFSEntry);
+	gpsStatusDebugFSEntry = NULL;
+
+ErrorRemoveVersionEntry:
+	PVRDebugFSRemoveEntry(gpsVersionDebugFSEntry);
+	gpsVersionDebugFSEntry = NULL;
+
+	return iResult;
+}
+
+void PVRDebugRemoveDebugFSEntries(void)
+{
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	PVRSRV_DEVICE_NODE *psDeviceNode;
+	PVRSRV_RGXDEV_INFO *psDevInfo;
+
+	psDeviceNode = psPVRSRVData->apsRegisteredDevNodes[0];
+	if (psDeviceNode)
+	{
+		psDevInfo = psDeviceNode->pvDevice;
+		if (psDevInfo && psDevInfo->pfnUnregisterGpuUtilStats)
+		{
+			psDevInfo->pfnUnregisterGpuUtilStats(ghGpuUtilUserDebugFS);
+		}
+	}
+
+#if defined(DEBUG)
+	if (gpsDebugLevelDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(gpsDebugLevelDebugFSEntry);
+		gpsDebugLevelDebugFSEntry = NULL;
+	}
+#endif
+
+#if defined(PVRSRV_ENABLE_FW_TRACE_DEBUGFS)
+	if (gpsFWTraceDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(gpsFWTraceDebugFSEntry);
+		gpsFWTraceDebugFSEntry = NULL;
+	}
+#endif
+
+	if (gpsDumpDebugDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(gpsDumpDebugDebugFSEntry);
+		gpsDumpDebugDebugFSEntry = NULL;
+	}
+
+	if (gpsStatusDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(gpsStatusDebugFSEntry);
+		gpsStatusDebugFSEntry = NULL;
+	}
+
+	if (gpsVersionDebugFSEntry != NULL)
+	{
+		PVRDebugFSRemoveEntry(gpsVersionDebugFSEntry);
+		gpsVersionDebugFSEntry = NULL;
+	}
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debugfs.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debugfs.c
new file mode 100644
index 0000000..e71ac45
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debugfs.c
@@ -0,0 +1,857 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating debugfs directories and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "allocmem.h"
+
+#define PVR_DEBUGFS_DIR_NAME "pvr"
+
+/* Define to set the PVR_DPF debug output level for pvr_debugfs.
+ * Normally, leave this set to PVR_DBGDRIV_MESSAGE, but when debugging
+ * you can temporarily change this to PVR_DBG_ERROR.
+ */
+#if defined(PVRSRV_NEED_PVR_DPF)
+#define PVR_DEBUGFS_PVR_DPF_LEVEL      PVR_DBGDRIV_MESSAGE
+#else
+#define PVR_DEBUGFS_PVR_DPF_LEVEL      0
+#endif
+
+static struct dentry *gpsPVRDebugFSEntryDir = NULL;
+
+/* Lock used when adjusting refCounts and deleting entries */
+static struct mutex gDebugFSLock;
+
+/*************************************************************************/ /*!
+ Statistic entry read functions
+*/ /**************************************************************************/
+
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_
+{
+	void				 *pvData;
+	OS_STATS_PRINT_FUNC  *pfnStatsPrint;
+	PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC	*pfnIncStatMemRefCount;
+	PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC	*pfnDecStatMemRefCount;
+	IMG_UINT32				ui32RefCount;
+	PVR_DEBUGFS_ENTRY_DATA	*pvDebugFSEntry;
+} PVR_DEBUGFS_DRIVER_STAT;
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_
+{
+	struct dentry *psDir;
+	PVR_DEBUGFS_DIR_DATA *psParentDir;
+	IMG_UINT32	ui32RefCount;
+} PVR_DEBUGFS_DIR_DATA;
+
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_
+{
+	struct dentry *psEntry;
+	PVR_DEBUGFS_DIR_DATA *psParentDir;
+	IMG_UINT32	ui32RefCount;
+	PVR_DEBUGFS_DRIVER_STAT *psStatData;
+} PVR_DEBUGFS_ENTRY_DATA;
+
+typedef struct _PVR_DEBUGFS_PRIV_DATA_
+{
+	struct seq_operations	*psReadOps;
+	PVRSRV_ENTRY_WRITE_FUNC	*pfnWrite;
+	void			*pvData;
+	IMG_BOOL		bValid;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+} PVR_DEBUGFS_PRIV_DATA;
+
+static void _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry);
+static void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry);
+static void _UnrefAndMaybeDestroyDirEntryWhileLocked(PVR_DEBUGFS_DIR_DATA *psDirEntry);
+static IMG_BOOL _RefDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry);
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry);
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+
+static void _StatsSeqPrintf(void *pvFile, const IMG_CHAR *pszFormat, ...)
+{
+	IMG_CHAR  szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+	va_list  ArgList;
+
+	va_start(ArgList, pszFormat);
+	vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+	seq_printf((struct seq_file *)pvFile, "%s", szBuffer);
+	va_end(ArgList);
+}
+
+static void *_DebugFSStatisticSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+
+	if (psStatData)
+	{
+		if (psStatData->pvData)
+		{
+			/* take reference on psStatData (for duration of stat iteration) */
+			if (!_RefStatEntry((void*)psStatData))
+			{
+				PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for '%s' but failed to take ref on stat entry, returning -EIO(%d)", __FUNCTION__, psStatData->pvDebugFSEntry->psEntry->d_iname, -EIO));
+				return NULL;
+			}
+		}
+		else
+		{
+			/* NB This is valid if the stat has no structure associated with it (eg. driver_stats, which prints totals stored in a number of global vars) */
+		}
+
+		if (*puiPosition == 0)
+		{
+			return psStatData;
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+	}
+
+	return NULL;
+}
+
+static void _DebugFSStatisticSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (psStatData)
+	{
+		/* drop ref taken on stat memory, and if it is now zero, be sure we don't try to read it again */
+		if ((psStatData->ui32RefCount > 0) && (psStatData->pvData))
+		{
+			/* drop reference on psStatData (held for duration of stat iteration) */
+			_UnrefAndMaybeDestroyStatEntry((void*)psStatData);
+		}
+		else
+		{
+			if (psStatData->ui32RefCount > 0)
+			{
+				/* psStatData->pvData is NULL */
+				/* NB This is valid if the stat has no structure associated with it (eg. driver_stats, which prints totals stored in a number of global vars) */
+			}
+			if (psStatData->pvData)
+			{
+				/* psStatData->ui32RefCount is zero */
+				PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData->ui32RefCount is %d", __FUNCTION__, psStatData->ui32RefCount));
+			}
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+	}
+}
+
+static void *_DebugFSStatisticSeqNext(struct seq_file *psSeqFile,
+				      void *pvData,
+				      loff_t *puiPosition)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	if (psStatData)
+	{
+		if (psStatData->pvData)
+		{
+			if (puiPosition)
+			{
+				(*puiPosition)++;
+			}
+			else
+			{
+				PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called with puiPosition NULL", __FUNCTION__));
+			}
+		}
+		else
+		{
+			/* psStatData->pvData is NULL */
+			/* NB This is valid if the stat has no structure associated with it (eg. driver_stats, which prints totals stored in a number of global vars) */
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+	}
+
+	return NULL;
+}
+
+static int _DebugFSStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)pvData;
+
+	if (psStatData != NULL)
+	{
+		psStatData->pfnStatsPrint((void*)psSeqFile, psStatData->pvData, _StatsSeqPrintf);
+		return 0;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+	}
+
+	return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSStatisticReadOps =
+{
+	.start = _DebugFSStatisticSeqStart,
+	.stop  = _DebugFSStatisticSeqStop,
+	.next  = _DebugFSStatisticSeqNext,
+	.show  = _DebugFSStatisticSeqShow,
+};
+
+
+/*************************************************************************/ /*!
+ Common internal API
+*/ /**************************************************************************/
+
+static int _DebugFSFileOpen(struct inode *psINode, struct file *psFile)
+{
+	PVR_DEBUGFS_PRIV_DATA *psPrivData;
+	int iResult = -EIO;
+	IMG_BOOL bRefRet = IMG_FALSE;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+	PVR_ASSERT(psINode);
+	psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+	if (psPrivData)
+	{
+		/* Check that psPrivData is still valid to use */
+		if (psPrivData->bValid)
+		{
+			psDebugFSEntry = psPrivData->psDebugFSEntry;
+
+			/* Take ref on stat entry before opening seq file - this ref will be dropped if we
+			 * fail to open the seq file or when we close it
+			 */
+			if (psDebugFSEntry)
+			{
+				bRefRet = _RefDebugFSEntry(psDebugFSEntry);
+				if (bRefRet)
+				{
+					iResult = seq_open(psFile, psPrivData->psReadOps);
+					if (iResult == 0)
+					{
+						struct seq_file *psSeqFile = psFile->private_data;
+
+						psSeqFile->private = psPrivData->pvData;
+					}
+					else
+					{
+						/* Drop ref if we failed to open seq file */
+						_UnrefAndMaybeDestroyDebugFSEntry(psDebugFSEntry);
+						PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", __FUNCTION__, iResult));
+					}
+				}
+			}
+		}
+	}
+
+	return iResult;
+}
+
+static int _DebugFSFileClose(struct inode *psINode, struct file *psFile)
+{
+	int iResult;
+	PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+	if (psPrivData)
+	{
+		psDebugFSEntry = psPrivData->psDebugFSEntry;
+	}
+	iResult = seq_release(psINode, psFile);
+	if (psDebugFSEntry)
+	{
+		_UnrefAndMaybeDestroyDebugFSEntry(psDebugFSEntry);
+	}
+	return iResult;
+}
+
+static ssize_t _DebugFSFileWrite(struct file *psFile,
+				 const char __user *pszBuffer,
+				 size_t uiCount,
+				 loff_t *puiPosition)
+{
+	struct inode *psINode = psFile->f_path.dentry->d_inode;
+	PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+	if (psPrivData->pfnWrite == NULL)
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for file '%s', which does not have pfnWrite defined, returning -EIO(%d)", __FUNCTION__, psFile->f_path.dentry->d_iname, -EIO));
+		return -EIO;
+	}
+
+	return psPrivData->pfnWrite(pszBuffer, uiCount, *puiPosition, psPrivData->pvData);
+}
+
+static const struct file_operations gsPVRDebugFSFileOps =
+{
+	.owner = THIS_MODULE,
+	.open = _DebugFSFileOpen,
+	.read = seq_read,
+	.write = _DebugFSFileWrite,
+	.llseek = seq_lseek,
+	.release = _DebugFSFileClose,
+};
+
+
+/*************************************************************************/ /*!
+ Public API
+*/ /**************************************************************************/
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSInit
+@Description    Initialise PVR debugfs support. This should be called before
+                using any PVRDebugFS functions.
+@Return         int      On success, returns 0. Otherwise, returns an
+                         error code.
+*/ /**************************************************************************/
+int PVRDebugFSInit(void)
+{
+	PVR_ASSERT(gpsPVRDebugFSEntryDir == NULL);
+
+	mutex_init(&gDebugFSLock);
+
+	gpsPVRDebugFSEntryDir = debugfs_create_dir(PVR_DEBUGFS_DIR_NAME, NULL);
+	if (gpsPVRDebugFSEntryDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create '%s' debugfs root directory",
+			 __FUNCTION__, PVR_DEBUGFS_DIR_NAME));
+
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSDeInit
+@Description    Deinitialise PVR debugfs support. This should be called only
+                if PVRDebugFSInit() has already been called. All debugfs
+                directories and entries should be removed otherwise this
+                function will fail.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSDeInit(void)
+{
+	debugfs_remove(gpsPVRDebugFSEntryDir);
+	gpsPVRDebugFSEntryDir = NULL;
+	mutex_destroy(&gDebugFSLock);
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSCreateEntryDir
+@Description    Create a directory for debugfs entries that will be located
+                under the root directory, as created by
+                PVRDebugFSCreateEntries().
+@Input          pszName      String containing the name for the directory.
+@Input          psParentDir  The parent directory in which to create the new
+                             directory. This should either be NULL, meaning it
+                             should be created in the root directory, or a
+                             pointer to a directory as returned by this
+                             function.
+@Output         ppsNewDir    On success, points to the newly created
+                             directory.
+@Return         int          On success, returns 0. Otherwise, returns an
+                             error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+				 PVR_DEBUGFS_DIR_DATA *psParentDir,
+				 PVR_DEBUGFS_DIR_DATA **ppsNewDir)
+{
+	PVR_DEBUGFS_DIR_DATA *psNewDir;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+	if (pszName == NULL || ppsNewDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s:   Invalid param", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	psNewDir = OSAllocMemstatMem(sizeof(*psNewDir));
+
+	if (psNewDir == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot allocate memory for '%s' pvr_debugfs structure",
+			 __FUNCTION__, pszName));
+		return -ENOMEM;
+	}
+
+	psNewDir->psParentDir = psParentDir;
+	psNewDir->psDir = debugfs_create_dir(pszName, (psNewDir->psParentDir) ? psNewDir->psParentDir->psDir : gpsPVRDebugFSEntryDir);
+
+	if (psNewDir->psDir == NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create '%s' debugfs directory",
+			 __FUNCTION__, pszName));
+
+		OSFreeMemstatMem(psNewDir);
+		return -ENOMEM;
+	}
+
+	*ppsNewDir = psNewDir;
+	psNewDir->ui32RefCount = 1;
+
+	/* if parent directory is not gpsPVRDebugFSEntryDir, increment its refCount */
+	if (psNewDir->psParentDir)
+	{
+		_RefDirEntry(psNewDir->psParentDir);
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSRemoveEntryDir
+@Description    Remove a directory that was created by
+                PVRDebugFSCreateEntryDir(). Any directories or files created
+                under the directory being removed should be removed first.
+@Input          psDir        Pointer representing the directory to be removed.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR_DATA *psDir)
+{
+	_UnrefAndMaybeDestroyDirEntry(psDir);
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSCreateEntry
+@Description    Create an entry in the specified directory.
+@Input          pszName         String containing the name for the entry.
+@Input          psParentDir     Pointer from PVRDebugFSCreateEntryDir()
+                                representing the directory in which to create
+                                the entry or NULL for the root directory.
+@Input          psReadOps       Pointer to structure containing the necessary
+                                functions to read from the entry.
+@Input          pfnWrite        Callback function used to write to the entry.
+@Input          pvData          Private data to be passed to the read
+                                functions, in the seq_file private member, and
+                                the write function callback.
+@Output         ppsNewEntry     On success, points to the newly created entry.
+@Return         int             On success, returns 0. Otherwise, returns an
+                                error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntry(const char *pszName,
+			  PVR_DEBUGFS_DIR_DATA *psParentDir,
+			  struct seq_operations *psReadOps,
+			  PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+			  void *pvData,
+			  PVR_DEBUGFS_ENTRY_DATA **ppsNewEntry)
+{
+	PVR_DEBUGFS_PRIV_DATA *psPrivData;
+	PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+	struct dentry *psEntry;
+	umode_t uiMode;
+
+	PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+	psPrivData = OSAllocMemstatMem(sizeof(*psPrivData));
+	if (psPrivData == NULL)
+	{
+		return -ENOMEM;
+	}
+	psDebugFSEntry = OSAllocMemstatMem(sizeof(*psDebugFSEntry));
+	if (psDebugFSEntry == NULL)
+	{
+		OSFreeMemstatMem(psPrivData);
+		return -ENOMEM;
+	}
+
+	psPrivData->psReadOps = psReadOps;
+	psPrivData->pfnWrite = pfnWrite;
+	psPrivData->pvData = (void*)pvData;
+	psPrivData->bValid = IMG_TRUE;
+	/* Store ptr to debugFSEntry in psPrivData, so a ref can be taken on it
+	 * when the client opens a file */
+	psPrivData->psDebugFSEntry = psDebugFSEntry;
+
+	uiMode = S_IFREG;
+
+	if (psReadOps != NULL)
+	{
+		uiMode |= S_IRUGO;
+	}
+
+	if (pfnWrite != NULL)
+	{
+		uiMode |= S_IWUSR;
+	}
+
+	psDebugFSEntry->psParentDir = psParentDir;
+	psDebugFSEntry->ui32RefCount = 1;
+	psDebugFSEntry->psStatData = (PVR_DEBUGFS_DRIVER_STAT*)pvData;
+
+	if (psDebugFSEntry->psParentDir)
+	{
+		/* increment refCount of parent directory */
+		_RefDirEntry(psDebugFSEntry->psParentDir);
+	}
+
+	psEntry = debugfs_create_file(pszName,
+					  uiMode,
+					  (psParentDir != NULL) ? psParentDir->psDir : gpsPVRDebugFSEntryDir,
+					  psPrivData,
+					  &gsPVRDebugFSFileOps);
+	if (IS_ERR(psEntry))
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: Cannot create debugfs '%s' file",
+			 __FUNCTION__, pszName));
+
+		return PTR_ERR(psEntry);
+	}
+
+	psDebugFSEntry->psEntry = psEntry;
+	*ppsNewEntry = (void*)psDebugFSEntry;
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSRemoveEntry
+@Description    Removes an entry that was created by PVRDebugFSCreateEntry().
+@Input          psDebugFSEntry  Pointer representing the entry to be removed.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntry(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry)
+{
+	_UnrefAndMaybeDestroyDebugFSEntry(psDebugFSEntry);
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSCreateStatisticEntry
+@Description    Create a statistic entry in the specified directory.
+@Input          pszName         String containing the name for the entry.
+@Input          psDir           Pointer from PVRDebugFSCreateEntryDir()
+                                representing the directory in which to create
+                                the entry or NULL for the root directory.
+@Input          pfnStatsPrint   A callback function used to print all the
+                                statistics when reading from the statistic
+                                entry.
+@Input          pfnIncStatMemRefCount   A callback function used take a
+										reference on the memory backing the
+                                		statistic.
+@Input          pfnDecStatMemRefCount   A callback function used drop a
+										reference on the memory backing the
+                                		statistic.
+@Input          pvData          Private data to be passed to the provided
+                                callback function.
+
+@Return         PVR_DEBUGFS_DRIVER_STAT*   On success, a pointer representing
+										   the newly created statistic entry.
+										   Otherwise, NULL.
+*/ /**************************************************************************/
+PVR_DEBUGFS_DRIVER_STAT *PVRDebugFSCreateStatisticEntry(const char *pszName,
+					 PVR_DEBUGFS_DIR_DATA *psDir,
+				     OS_STATS_PRINT_FUNC *pfnStatsPrint,
+					 PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC	*pfnIncStatMemRefCount,
+					 PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC	*pfnDecStatMemRefCount,
+					 void *pvData)
+{
+	PVR_DEBUGFS_DRIVER_STAT *psStatData;
+	PVR_DEBUGFS_ENTRY_DATA * psDebugFSEntry;
+
+	int iResult;
+
+	if (pszName == NULL || pfnStatsPrint == NULL)
+	{
+		return NULL;
+	}
+	if ((pfnIncStatMemRefCount != NULL || pfnDecStatMemRefCount != NULL) && pvData == NULL)
+	{
+		return NULL;
+	}
+
+	psStatData = OSAllocMemstatZMem(sizeof(*psStatData));
+	if (psStatData == NULL)
+	{
+		return NULL;
+	}
+
+	psStatData->pvData = pvData;
+	psStatData->pfnStatsPrint = pfnStatsPrint;
+	psStatData->pfnIncStatMemRefCount = pfnIncStatMemRefCount;
+	psStatData->pfnDecStatMemRefCount = pfnDecStatMemRefCount;
+	psStatData->ui32RefCount = 1;
+
+	iResult = PVRDebugFSCreateEntry(pszName,
+					psDir,
+					&gsDebugFSStatisticReadOps,
+					NULL,
+					psStatData,
+					&psDebugFSEntry);
+	if (iResult != 0)
+	{
+		OSFreeMemstatMem(psStatData);
+		return NULL;
+	}
+	psStatData->pvDebugFSEntry = (void*)psDebugFSEntry;
+
+	if (pfnIncStatMemRefCount)
+	{
+		/* call function to take reference on the memory holding the stat */
+		psStatData->pfnIncStatMemRefCount((void*)psStatData->pvData);
+	}
+
+	psDebugFSEntry->ui32RefCount = 1;
+
+	return psStatData;
+}
+
+/*************************************************************************/ /*!
+@Function       PVRDebugFSRemoveStatisticEntry
+@Description    Removes a statistic entry that was created by
+                PVRDebugFSCreateStatisticEntry().
+@Input          psStatEntry  Pointer representing the statistic entry to be
+                         	 removed.
+@Return         void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveStatisticEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+	/* drop reference on pvStatEntry*/
+	_UnrefAndMaybeDestroyStatEntry(psStatEntry);
+}
+
+static void _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry)
+{
+	mutex_lock(&gDebugFSLock);
+
+	if (psDirEntry->ui32RefCount > 0)
+	{
+		/* Increment refCount */
+		psDirEntry->ui32RefCount++;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psDirEntry '%s' when ui32RefCount is zero", __FUNCTION__, psDirEntry->psDir->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+}
+
+static void _UnrefAndMaybeDestroyDirEntryWhileLocked(PVR_DEBUGFS_DIR_DATA *psDirEntry)
+{
+	if (psDirEntry->ui32RefCount > 0)
+	{
+		/* Decrement refCount and free if now zero */
+		if (--psDirEntry->ui32RefCount == 0)
+		{
+			/* if parent directory is not gpsPVRDebugFSEntryDir, decrement its refCount */
+			debugfs_remove(psDirEntry->psDir);
+			if (psDirEntry->psParentDir)
+			{
+				_UnrefAndMaybeDestroyDirEntryWhileLocked(psDirEntry->psParentDir);
+			}
+			OSFreeMemstatMem(psDirEntry);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDirEntry '%s' when ui32RefCount is zero", __FUNCTION__, psDirEntry->psDir->d_iname));
+	}
+}
+
+static void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry)
+{
+	mutex_lock(&gDebugFSLock);
+
+	if (psDirEntry->ui32RefCount > 0)
+	{
+		/* Decrement refCount and free if now zero */
+		if (--psDirEntry->ui32RefCount == 0)
+		{
+			/* if parent directory is not gpsPVRDebugFSEntryDir, decrement its refCount */
+			debugfs_remove(psDirEntry->psDir);
+			if (psDirEntry->psParentDir)
+			{
+				_UnrefAndMaybeDestroyDirEntryWhileLocked(psDirEntry->psParentDir);
+			}
+			OSFreeMemstatMem(psDirEntry);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDirEntry '%s' when ui32RefCount is zero", __FUNCTION__, psDirEntry->psDir->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+}
+
+static IMG_BOOL _RefDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry)
+{
+	IMG_BOOL bResult = IMG_FALSE;
+
+	PVR_ASSERT(psDebugFSEntry != NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	bResult = (psDebugFSEntry->ui32RefCount > 0);
+	if (bResult)
+	{
+		/* Increment refCount of psDebugFSEntry */
+		psDebugFSEntry->ui32RefCount++;
+	}
+
+	mutex_unlock(&gDebugFSLock);
+
+	return bResult;
+}
+
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry)
+{
+	mutex_lock(&gDebugFSLock);
+	/* Decrement refCount of psDebugFSEntry, and free if now zero */
+	PVR_ASSERT(psDebugFSEntry != IMG_NULL);
+
+	if (psDebugFSEntry->ui32RefCount > 0)
+	{
+		if (--psDebugFSEntry->ui32RefCount == 0)
+		{
+			struct dentry *psEntry = psDebugFSEntry->psEntry;
+
+			if (psEntry)
+			{
+				/* Free any private data that was provided to debugfs_create_file() */
+				if (psEntry->d_inode->i_private != NULL)
+				{
+					PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA*)psDebugFSEntry->psEntry->d_inode->i_private;
+
+					psPrivData->bValid = IMG_FALSE;
+					psPrivData->psDebugFSEntry = NULL;
+					OSFreeMemstatMem(psEntry->d_inode->i_private);
+				}
+				debugfs_remove(psEntry);
+			}
+			/* decrement refcount of parent directory */
+			if (psDebugFSEntry->psParentDir)
+			{
+				_UnrefAndMaybeDestroyDirEntryWhileLocked(psDebugFSEntry->psParentDir);
+			}
+
+			/* now free the memory allocated for psDebugFSEntry */
+			OSFreeMemstatMem(psDebugFSEntry);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDebugFSEntry '%s' when ui32RefCount is zero", __FUNCTION__, psDebugFSEntry->psEntry->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+}
+
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+	IMG_BOOL bResult = IMG_FALSE;
+
+	PVR_ASSERT(psStatEntry != NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	bResult = (psStatEntry->ui32RefCount > 0);
+	if (bResult)
+	{
+		/* Increment refCount of psStatEntry */
+		psStatEntry->ui32RefCount++;
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+	}
+
+	mutex_unlock(&gDebugFSLock);
+
+	return bResult;
+}
+
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+	IMG_BOOL bResult;
+
+	PVR_ASSERT(psStatEntry != IMG_NULL);
+
+	mutex_lock(&gDebugFSLock);
+
+	bResult = (psStatEntry->ui32RefCount > 0);
+
+	if (bResult)
+	{
+		/* Decrement refCount of psStatData, and free if now zero */
+		if (--psStatEntry->ui32RefCount == 0)
+		{
+			mutex_unlock(&gDebugFSLock);
+
+			if (psStatEntry->pvDebugFSEntry)
+			{
+				_UnrefAndMaybeDestroyDebugFSEntry((PVR_DEBUGFS_ENTRY_DATA*)psStatEntry->pvDebugFSEntry);
+			}
+			if (psStatEntry->pfnDecStatMemRefCount)
+			{
+				/* call function to drop reference on the memory holding the stat */
+				psStatEntry->pfnDecStatMemRefCount((void*)psStatEntry->pvData);
+			}
+		}
+		else
+		{
+			mutex_unlock(&gDebugFSLock);
+		}
+	}
+	else
+	{
+		PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+		mutex_unlock(&gDebugFSLock);
+	}
+
+	return bResult;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debugfs.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debugfs.h
new file mode 100644
index 0000000..8895e03
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_debugfs.h
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating debugfs directories and entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DEBUGFS_H__)
+#define __PVR_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "osfunc.h"
+
+typedef ssize_t (PVRSRV_ENTRY_WRITE_FUNC)(const char __user *pszBuffer,
+					  size_t uiCount,
+					  loff_t uiPosition,
+					  void *pvData);
+
+
+typedef IMG_UINT32 (PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_ PVR_DEBUGFS_DIR_DATA;
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_ PVR_DEBUGFS_ENTRY_DATA;
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_ PVR_DEBUGFS_DRIVER_STAT;
+
+int PVRDebugFSInit(void);
+void PVRDebugFSDeInit(void);
+
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+				 	 	 	 PVR_DEBUGFS_DIR_DATA *psParentDir,
+							 PVR_DEBUGFS_DIR_DATA **ppsNewDir);
+
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR_DATA *psDir);
+
+int PVRDebugFSCreateEntry(const char *pszName,
+			  	  	  	  PVR_DEBUGFS_DIR_DATA *psParentDir,
+						  struct seq_operations *psReadOps,
+						  PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+						  void *pvData,
+						  PVR_DEBUGFS_ENTRY_DATA **ppsNewEntry);
+
+void PVRDebugFSRemoveEntry(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry);
+
+PVR_DEBUGFS_DRIVER_STAT *PVRDebugFSCreateStatisticEntry(const char *pszName,
+					 	 	 	 	 	 	 	 	 	PVR_DEBUGFS_DIR_DATA *psDir,
+														OS_STATS_PRINT_FUNC *pfnStatsPrint,
+														PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount,
+														PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount,
+														void *pvData);
+void PVRDebugFSRemoveStatisticEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+
+#endif /* !defined(__PVR_DEBUGFS_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm.h
new file mode 100644
index 0000000..be1b021
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm.h
@@ -0,0 +1,236 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR drm driver
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    drm module
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_H__)
+#define __PVR_DRM_H__
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+
+/*
+ * Check for a kernel patched by IMG, with DMA-BUF and PRIME components
+ * from a later version of the kernel.
+ */
+#if !defined(DRM_PRIME_LINUX_VERSION_CODE)
+#define	DRM_PRIME_LINUX_VERSION_CODE LINUX_VERSION_CODE
+#endif
+
+#if (DRM_PRIME_LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,3))
+#define	PVR_DRM_USE_PRIME
+#endif
+
+#if defined(LMA)
+#define PVR_DRM_PHYS_HEAP	PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL
+#else
+#define PVR_DRM_PHYS_HEAP	PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL
+#endif
+
+#if defined(LDM_PLATFORM)
+#define	LDM_DEV	struct platform_device
+#endif /*LDM_PLATFORM */
+
+#if defined(LDM_PCI)
+#define	LDM_DEV	struct pci_dev
+#endif /* LDM_PCI */
+
+#include "connection_server.h"
+#include "pvr_drm_external.h"
+#include "pvr_drm_shared.h"
+#include "pvr_drm_display.h"
+#include "sync_server.h"
+#include "pmr.h"
+
+#if defined(PVR_DRM_USE_PRIME)
+#include <linux/dma-buf.h>
+#endif
+
+#if defined(SUPPORT_DRM_DC_MODULE)
+#include <linux/spinlock.h>
+#include "scp.h"
+#endif
+
+#if defined(PDUMP)
+#include "linuxsrv.h"
+#endif
+
+#if defined(SUPPORT_DRM)
+#if (!defined(LDM_PLATFORM) && !defined(LDM_PCI)) || \
+	(defined(LDM_PLATFORM) && defined(LDM_PCI))
+	#error "LDM_PLATFORM or LDM_PCI must be defined"
+#endif
+
+#define	MAKESTRING(x)	#x
+#define TOSTRING(x)	MAKESTRING(x)
+
+#define	PVR_DRM_FILE_FROM_FILE(pFile)		((struct drm_file *)((pFile)->private_data))
+#define	PVR_FILE_FROM_DRM_FILE(pDRMFile)	((pDRMFile)->filp)
+
+struct pvr_drm_dev_priv
+{
+#if defined(SUPPORT_DRM_DC_MODULE)
+	/* The DRM device funcs *MUST* be the first field in the structure
+	   as pvr_drm_display.h relies on this being the case. */
+	struct pvr_drm_device_funcs funcs;
+	void *display_priv;
+
+	spinlock_t flip_done_lock;
+	struct list_head flip_done_head;
+	SCP_CONTEXT *display_flip_context;
+
+	IMG_HANDLE display_misr;
+	IMG_HANDLE display_notify;
+#endif
+
+	PVRSRV_DEVICE_NODE *dev_node;
+
+#if defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING)
+	IMG_HANDLE *hSysData;
+#else
+	unsigned int irq;
+	pvr_drm_irq_handler irq_handler;
+#endif
+};
+
+enum pvr_drm_gem_object_type
+{
+	PVR_DRM_GEM_UNDEFINED = 0,
+	PVR_DRM_GEM_PMR,
+	PVR_DRM_GEM_DISPLAY_PMR,
+	PVR_DRM_GEM_IMPORT_PMR,
+};
+
+struct pvr_drm_gem_object
+{
+	enum pvr_drm_gem_object_type type;
+	struct drm_gem_object base;
+	PMR *pmr;
+	void *obj;
+	SERVER_SYNC_PRIMITIVE *apsSyncPrim[PVRSRV_GEM_SYNC_TYPE_COUNT];
+	IMG_UINT32 auiSyncPrimVAddr[PVRSRV_GEM_SYNC_TYPE_COUNT];
+};
+
+#define to_pvr_drm_gem_object(obj) container_of(obj, struct pvr_drm_gem_object, base)
+
+extern struct drm_driver sPVRDRMDriver;
+
+int PVRSRVSystemInit(struct drm_device *pDrmDevice);
+void PVRSRVSystemDeInit(LDM_DEV *pDevice);
+
+int PVRSRVOpen(struct drm_device *dev, struct drm_file *file);
+void PVRSRVRelease(struct drm_device *dev, struct drm_file *file);
+
+#if defined(PDUMP)
+int dbgdrv_init(void);
+void dbgdrv_cleanup(void);
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *file);
+int dbgdrv_ioctl_compat(struct drm_device *dev, void *arg, struct drm_file *file);
+#endif
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, struct drm_file *file);
+
+#if defined(CONFIG_COMPAT)
+int PVRSRV_BridgeCompatDispatchKM(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+int PVRDRMGEMCreate(struct drm_device *dev, void *arg, struct drm_file *file);
+int PVRDRMGEMToIMGHandle(struct drm_device *dev, void *arg, struct drm_file *file);
+int PVRDRMIMGToGEMHandle(struct drm_device *dev, void *arg, struct drm_file *file);
+int PVRDRMGEMSyncGet(struct drm_device *dev, void *arg, struct drm_file *file);
+
+int PVRSRVGEMInitObject(struct drm_gem_object *obj,
+			enum pvr_drm_gem_object_type type,
+			PVRSRV_MEMALLOCFLAGS_T alloc_flags);
+
+struct drm_gem_object *PVRSRVGEMObjectCreate(struct drm_device *dev,
+					     enum pvr_drm_gem_object_type type,
+					     size_t size,
+					     PVRSRV_MEMALLOCFLAGS_T alloc_flags);
+void PVRSRVGEMFreeObject(struct drm_gem_object *obj);
+
+PVRSRV_ERROR PVRSRVGEMCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+				struct drm_gem_object *psObj,
+				PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				PMR **ppsPMR);
+struct drm_gem_object *PVRSRVGEMGetObject(PMR *psPMR);
+PMR *PVRSRVGEMMMapLookupPMR(struct file *psFile, struct vm_area_struct *psVMA);
+
+#if defined(PVR_DRM_USE_PRIME)
+struct drm_gem_object *PVRSRVPrimeImport(struct drm_device *dev, struct dma_buf *dma_buf);
+struct dma_buf *PVRSRVPrimeExport(struct drm_device unref__ *dev, struct drm_gem_object *obj, int flags);
+#endif
+
+#if defined(SUPPORT_DRM_DC_MODULE)
+int PVRSRVDRMDisplayInit(struct drm_device *dev);
+int PVRSRVDRMDisplayDeinit(struct drm_device *dev);
+
+PVRSRV_ERROR PVRSRVDRMDisplayCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+				       struct drm_device *dev,
+				       size_t size,
+				       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				       PMR **ppsPMR,
+				       void **buffer);
+u32 PVRSRVDRMDisplayGetVBlankCounter(struct drm_device *dev, int crtc);
+int PVRSRVDRMDisplayEnableVBlank(struct drm_device *dev, int crtc);
+void PVRSRVDRMDisplayDisableVBlank(struct drm_device *dev, int crtc);
+
+int PVRSRVGEMDumbCreate(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args);
+int PVRSRVGEMDumbDestroy(struct drm_file *file, struct drm_device *dev, uint32_t handle);
+int PVRSRVGEMDumbMapOffset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset);
+#endif
+
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define	DRM_IOCTL_PVR_SRVKM_CMD			_IOWR(0, DRM_PVR_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
+#define	DRM_IOCTL_PVR_UNPRIV_CMD		_IOWR(0, DRM_PVR_UNPRIV_CMD, drm_pvr_unpriv_cmd)
+
+#if defined(PDUMP)
+#define	DRM_IOCTL_PVR_DBGDRV_CMD		_IOWR(0, DRM_PVR_DBGDRV_CMD, IOCTL_PACKAGE)
+#endif
+
+#define	DRM_IOCTL_PVR_GEM_CREATE		_IOWR(0, DRM_PVR_GEM_CREATE, drm_pvr_gem_create)
+#define	DRM_IOCTL_PVR_GEM_TO_IMG_HANDLE		_IOWR(0, DRM_PVR_GEM_TO_IMG_HANDLE, drm_pvr_gem_to_img_handle)
+#define	DRM_IOCTL_PVR_IMG_TO_GEM_HANDLE		_IOWR(0, DRM_PVR_IMG_TO_GEM_HANDLE, drm_pvr_img_to_gem_handle)
+#define DRM_IOCTL_PVR_GEM_SYNC_GET		_IOWR(0, DRM_PVR_GEM_SYNC_GET, drm_pvr_gem_sync_get)
+
+#endif	/* defined(SUPPORT_DRM) */
+#endif /* !defined(__PVR_DRM_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm_gem.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm_gem.c
new file mode 100644
index 0000000..9d28385
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm_gem.c
@@ -0,0 +1,980 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM GEM interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Interface for managing GEM memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_DRM)
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "private_data.h"
+#include "driverlock.h"
+#include "pmr.h"
+#include "physmem.h"
+#include "pvr_drm.h"
+#include "pvr_drm_display.h"
+#include "sync_server_internal.h"
+#include "allocmem.h"
+
+#if defined(PVR_DRM_USE_PRIME)
+#include "physmem_dmabuf.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+static PVRSRV_ERROR GEMSyncHandleDestroy(IMG_PVOID pvParam)
+{
+	SERVER_SYNC_PRIMITIVE *psSync = (SERVER_SYNC_PRIMITIVE *)pvParam;
+
+	ServerSyncUnref(psSync);
+
+	return PVRSRV_OK;
+}
+
+static int GEMSyncHandleCreate(CONNECTION_DATA *psConnection, SERVER_SYNC_PRIMITIVE *psSync, IMG_HANDLE *phSync)
+{
+	PVRSRV_ERROR eError;
+	int iErr;
+
+	ServerSyncRef(psSync);
+
+	eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+				   phSync,
+				   (void *)psSync,
+				   PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+				   PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				   (PFN_HANDLE_RELEASE)GEMSyncHandleDestroy);
+	if (eError != PVRSRV_OK)
+	{
+		switch (eError)
+		{
+			case PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE:
+			case PVRSRV_ERROR_OUT_OF_MEMORY:
+				iErr = -ENOMEM;
+				break;
+			case PVRSRV_ERROR_INVALID_PARAMS:
+			default:
+				iErr = -EINVAL;
+		}
+
+		goto ErrorSyncUnreference;
+	}
+
+	return 0;
+
+ErrorSyncUnreference:
+	ServerSyncUnref(psSync);
+
+	return iErr;
+}
+
+
+/*************************************************************************/ /*!
+* DRM GEM PMR factory
+*/ /**************************************************************************/
+
+typedef struct PMR_GEM_PRIV_TAG
+{
+	struct drm_gem_object	*psObj;
+	PMR			*psBackingPMR;
+} PMR_GEM_PRIV;
+
+
+static PVRSRV_ERROR PMRGEMLockPhysAddress(PMR_IMPL_PRIVDATA pvPriv,
+					  IMG_UINT32 uiLog2DevPageSize)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	/* Call  PMRLockSysPhysAddresses using the proper lock class to avoid a Lockdep issue */
+	return PMRLockSysPhysAddressesNested(psGEMPriv->psBackingPMR, uiLog2DevPageSize, 1);
+}
+
+static PVRSRV_ERROR PMRGEMUnlockPhysAddress(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	return PMRUnlockSysPhysAddresses(psGEMPriv->psBackingPMR);
+}
+
+static PVRSRV_ERROR PMRGEMDevPhysAddr(PMR_IMPL_PRIVDATA pvPriv,
+				      IMG_UINT32 ui32NumOfPages,
+				      IMG_DEVMEM_OFFSET_T *uiOffset,
+					  IMG_BOOL *pbValid,
+				      IMG_DEV_PHYADDR *psDevAddrPtr)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+	
+	/* This use-case requires special treatment; as it is not a core
+	   PMR factory (i.e. it's a PMR adapter), only uiOffset[0] is valid.
+	   This is the initial look-up offset specified by higher level code.
+	   This inconsistency is due to function type-info sharing between
+	   the two. Normally core PMR is responsible for re-expressing an
+	   offset/uiNumOfPages pair into offset[array] for PMR factories
+	   to translate, i.e. PMR adapter -> PMR core -> PMR factories */
+	return PMR_DevPhysAddr(psGEMPriv->psBackingPMR,
+						   PAGE_SHIFT, 
+						   ui32NumOfPages,
+						   uiOffset[0], 
+						   psDevAddrPtr,
+						   pbValid);
+}
+
+#if defined(PDUMP)
+static PVRSRV_ERROR PMRGEMPDumpSymbolicAddr(PMR_IMPL_PRIVDATA pvPriv,
+					      IMG_DEVMEM_OFFSET_T uiOffset,
+					      IMG_CHAR *pszMemspaceName,
+					      IMG_UINT32 ui32MemspaceNameLen,
+					      IMG_CHAR *pszSymbolicAddr,
+					      IMG_UINT32 ui32SymbolicAddrLen,
+					      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+					      IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	return PMR_PDumpSymbolicAddr(psGEMPriv->psBackingPMR,
+				     uiOffset,
+				     ui32MemspaceNameLen,
+				     pszMemspaceName,
+				     ui32SymbolicAddrLen,
+				     pszSymbolicAddr,
+				     puiNewOffset,
+				     puiNextSymName);
+}
+#endif
+
+static PVRSRV_ERROR PMRGEMAcquireKernelMappingData(PMR_IMPL_PRIVDATA pvPriv,
+						   IMG_SIZE_T uiOffset,
+						   IMG_SIZE_T uiSize,
+						   void **ppvKernelAddressOut,
+						   IMG_HANDLE *phHandleOut,
+						   PMR_FLAGS_T unref__ ulFlags)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+	IMG_SIZE_T uiLength;
+
+	return PMRAcquireKernelMappingData(psGEMPriv->psBackingPMR,
+					   uiOffset,
+					   uiSize,
+					   ppvKernelAddressOut,
+					   &uiLength,
+					   phHandleOut);
+}
+
+static void PMRGEMReleaseKernelMappingData(PMR_IMPL_PRIVDATA pvPriv,
+					       IMG_HANDLE hHandle)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	PMRReleaseKernelMappingData(psGEMPriv->psBackingPMR, hHandle);
+}
+
+static PVRSRV_ERROR PMRGEMReadBytes(PMR_IMPL_PRIVDATA pvPriv,
+				    IMG_DEVMEM_OFFSET_T uiOffset,
+				    IMG_UINT8 *pcBuffer,
+				    IMG_SIZE_T uiBufferSize,
+				    IMG_SIZE_T *puiNumBytes)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	return PMR_ReadBytes(psGEMPriv->psBackingPMR,
+			     uiOffset,
+			     pcBuffer,
+			     uiBufferSize,
+			     puiNumBytes);
+}
+
+static PVRSRV_ERROR PMRGEMWriteBytes(PMR_IMPL_PRIVDATA pvPriv,
+				     IMG_DEVMEM_OFFSET_T uiOffset,
+				     IMG_UINT8 *pcBuffer,
+				     IMG_SIZE_T uiBufferSize,
+				     IMG_SIZE_T *puiNumBytes)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	return PMR_WriteBytes(psGEMPriv->psBackingPMR,
+			      uiOffset,
+			      pcBuffer,
+			      uiBufferSize,
+			      puiNumBytes);
+}
+
+static PVRSRV_ERROR PMRGEMFinalize(PMR_IMPL_PRIVDATA pvPriv)
+{
+	PMR_GEM_PRIV *psGEMPriv = pvPriv;
+
+	PMRUnrefPMR(psGEMPriv->psBackingPMR);
+
+	OSFreeMem(psGEMPriv);
+
+	return PVRSRV_OK;
+}
+
+static const PMR_IMPL_FUNCTAB gsPMRGEMFuncTab = 
+{
+	.pfnLockPhysAddresses		= PMRGEMLockPhysAddress,
+	.pfnUnlockPhysAddresses		= PMRGEMUnlockPhysAddress,
+	.pfnDevPhysAddr			= PMRGEMDevPhysAddr,
+#if defined(PDUMP)
+	.pfnPDumpSymbolicAddr		= PMRGEMPDumpSymbolicAddr,
+#endif
+	.pfnAcquireKernelMappingData	= PMRGEMAcquireKernelMappingData,
+	.pfnReleaseKernelMappingData	= PMRGEMReleaseKernelMappingData,
+	.pfnReadBytes			= PMRGEMReadBytes,
+	.pfnWriteBytes			= PMRGEMWriteBytes,
+	.pfnFinalize			= PMRGEMFinalize,
+};
+
+PVRSRV_ERROR PVRSRVGEMCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+				struct drm_gem_object *psObj,
+				PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				PMR **ppsPMR)
+{
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+	IMG_BOOL bMappingTable = IMG_TRUE;
+	PMR_GEM_PRIV *psGEMPriv;
+	PVRSRV_ERROR eError;
+
+	/* Create the private data structure for the PMR */
+	psGEMPriv = OSAllocZMem(sizeof *psGEMPriv);
+	if (psGEMPriv == NULL)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	psGEMPriv->psObj = psObj;
+
+	switch (psPVRObj->type)
+	{
+		case PVR_DRM_GEM_PMR:
+			eError = PhysmemNewRamBackedPMR(psDevNode,
+							psObj->size,
+							psObj->size,
+							1,
+							1,
+							&bMappingTable,
+							PAGE_SHIFT,
+							uiFlags,
+							&psGEMPriv->psBackingPMR);
+			break;
+#if defined(SUPPORT_DRM_DC_MODULE)
+		case PVR_DRM_GEM_DISPLAY_PMR:
+			eError = PVRSRVDRMDisplayCreatePMR(psDevNode,
+							   psObj->dev,
+							   psObj->size,
+							   uiFlags,
+							   &psGEMPriv->psBackingPMR,
+							   &psPVRObj->obj);
+			break;
+#endif
+#if defined(PVR_DRM_USE_PRIME)
+		case PVR_DRM_GEM_IMPORT_PMR:
+			eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode->apsPhysHeap[PVR_DRM_PHYS_HEAP],
+								 psObj->import_attach,
+								 NULL,
+								 uiFlags,
+								 &psGEMPriv->psBackingPMR);
+			break;
+#endif
+		case PVR_DRM_GEM_UNDEFINED:
+		default:
+			eError = PVRSRV_ERROR_NOT_SUPPORTED;
+			break;
+	}
+
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorFreePMRPriv;
+	}
+
+	eError = PMRCreatePMR(psDevNode->apsPhysHeap[PVR_DRM_PHYS_HEAP],
+			      psObj->size,
+			      psObj->size,
+			      1,
+			      1,
+			      &bMappingTable,
+			      PAGE_SHIFT,
+			      uiFlags,
+			      "PMRGEM",
+			      &gsPMRGEMFuncTab,
+			      psGEMPriv,
+			      ppsPMR,
+			      IMG_NULL,
+			      IMG_FALSE);
+	if (eError != PVRSRV_OK)
+	{
+		goto ErrorUnrefBackingPMR;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	eError = RIWritePMREntryKM(*ppsPMR,
+				   sizeof("GEM"),
+				   "GEM",
+				   psObj->size);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: Failed to write PMR entry (%s)",
+			 __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+	}
+#endif
+
+	return PVRSRV_OK;
+
+ErrorUnrefBackingPMR:
+	PMRUnrefPMR(psGEMPriv->psBackingPMR);
+
+ErrorFreePMRPriv:
+	OSFreeMem(psGEMPriv);
+
+	return eError;	
+}
+
+
+/*************************************************************************/ /*!
+* PVR DRM IOCTL functions
+*/ /**************************************************************************/
+
+int PVRDRMGEMCreate(struct drm_device *dev, void *arg, struct drm_file *file)
+{
+	drm_pvr_gem_create *psGEMCreate = (drm_pvr_gem_create *)arg;
+	struct drm_gem_object *psObj;
+	enum pvr_drm_gem_object_type eType;
+	int iRet;
+
+	if ((psGEMCreate->usage_flags & PVR_GEM_USE_SCANOUT) &&
+	    (psGEMCreate->usage_flags & PVR_GEM_USE_CURSOR))
+	{
+		return -EINVAL;
+	}
+
+	if ((psGEMCreate->usage_flags & PVR_GEM_USE_SCANOUT) ||
+	    (psGEMCreate->usage_flags & PVR_GEM_USE_CURSOR))
+	{
+#if defined(SUPPORT_DRM_DC_MODULE)
+		eType = PVR_DRM_GEM_DISPLAY_PMR;
+#else
+		return -EPERM;
+#endif
+	}
+	else
+	{
+		eType = PVR_DRM_GEM_PMR;
+	}
+
+	psObj = PVRSRVGEMObjectCreate(dev,
+				      eType,
+				      psGEMCreate->size,
+				      (PVRSRV_MEMALLOCFLAGS_T)psGEMCreate->alloc_flags);
+	if (IS_ERR(psObj))
+	{
+		return PTR_ERR(psObj);
+	}
+
+	iRet = drm_gem_handle_create(file, psObj, &psGEMCreate->handle);
+
+	drm_gem_object_unreference_unlocked(psObj);
+
+	return iRet;
+}
+
+static PVRSRV_ERROR GEMDestroyPMRHandle(IMG_PVOID pvParam)
+{
+	struct drm_gem_object *psObj = PVRSRVGEMGetObject((PMR *)pvParam);
+
+	drm_gem_object_unreference_unlocked(psObj);
+
+	return PVRSRV_OK;
+}
+
+static int GEMCreatePMRHandle(CONNECTION_DATA *psConnection, PMR *psPMR, IMG_HANDLE *phPMR)
+{
+	struct drm_gem_object *psObj = PVRSRVGEMGetObject(psPMR);
+	PVRSRV_ERROR eError;
+	int iErr;
+
+	/*
+	 * Hold a reference to the GEM object so that it can't be destroyed
+	 * until there are no more IMG handles for the PMR.
+	 */
+	drm_gem_object_reference(psObj);
+
+	eError = PVRSRVAllocHandle(psConnection->psHandleBase,
+				   phPMR,
+				   (void *)psPMR,
+				   PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+				   PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+				   (PFN_HANDLE_RELEASE)GEMDestroyPMRHandle);
+	if (eError != PVRSRV_OK)
+	{
+		switch (eError)
+		{
+			case PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE:
+			case PVRSRV_ERROR_OUT_OF_MEMORY:
+				iErr = -ENOMEM;
+				break;
+			case PVRSRV_ERROR_INVALID_PARAMS:
+			default:
+				iErr = -EINVAL;
+		}
+
+		goto ErrorObjectUnreference;
+	}
+
+	return 0;
+
+ErrorObjectUnreference:
+	drm_gem_object_unreference_unlocked(psObj);
+
+	return iErr;
+}
+
+int PVRDRMGEMToIMGHandle(struct drm_device *dev, void *arg, struct drm_file *file)
+{
+	drm_pvr_gem_to_img_handle *psGEMToIMGHandle = (drm_pvr_gem_to_img_handle *)arg;
+	struct pvr_drm_gem_object *psPVRObj;
+	struct drm_gem_object *psObj;
+	int iRet;
+
+	OSAcquireBridgeLock();
+
+	psObj = drm_gem_object_lookup(dev, file, psGEMToIMGHandle->gem_handle);
+	if (psObj == NULL)
+	{
+		iRet = -ENOENT;
+		goto ExitUnlock;
+	}
+	psPVRObj = to_pvr_drm_gem_object(psObj);
+
+	switch (psPVRObj->type)
+	{
+		case PVR_DRM_GEM_PMR:
+#if defined(SUPPORT_DRM_DC_MODULE)
+		case PVR_DRM_GEM_DISPLAY_PMR:
+#endif
+#if defined(PVR_DRM_USE_PRIME)
+		case PVR_DRM_GEM_IMPORT_PMR:
+#endif
+		{
+			IMG_HANDLE hPMR;
+
+			iRet = GEMCreatePMRHandle(LinuxConnectionFromFile(PVR_FILE_FROM_DRM_FILE(file)),
+						  psPVRObj->pmr,
+						  &hPMR);
+			if (iRet == 0)
+			{
+				psGEMToIMGHandle->img_handle = (uint64_t)(uintptr_t)hPMR;
+			}
+			break;
+		}
+		default:
+			iRet = -EINVAL;
+			break;
+	}
+
+	drm_gem_object_unreference_unlocked(psObj);
+
+ExitUnlock:
+	OSReleaseBridgeLock();
+
+	return iRet;
+}
+
+int PVRDRMIMGToGEMHandle(struct drm_device *dev, void *arg, struct drm_file *file)
+{
+	drm_pvr_img_to_gem_handle *psIMGToGEMHandle = (drm_pvr_img_to_gem_handle *)arg;
+	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(PVR_FILE_FROM_DRM_FILE(file));
+	struct drm_gem_object *psObj;
+	PMR *psPMR;
+	PVRSRV_ERROR eError;
+	int iRet;
+
+	OSAcquireBridgeLock();
+
+	eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+				    (void **)&psPMR,
+				    (IMG_HANDLE)(IMG_UINTPTR_T)psIMGToGEMHandle->img_handle,
+				    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+	if (eError != PVRSRV_OK)
+	{
+		iRet = -EINVAL;
+		goto ExitUnlock;
+	}
+
+	psObj = PVRSRVGEMGetObject(psPMR);
+	if (psObj == NULL)
+	{
+		iRet = -EINVAL;
+		goto ExitUnlock;
+	}
+
+	iRet = drm_gem_handle_create(file, psObj, &psIMGToGEMHandle->gem_handle);
+
+ExitUnlock:
+	OSReleaseBridgeLock();
+
+	return iRet;
+}
+
+int PVRDRMGEMSyncGet(struct drm_device *dev, void *arg, struct drm_file *file)
+{
+	drm_pvr_gem_sync_get *psGEMSyncGet = (drm_pvr_gem_sync_get *)arg;
+	struct pvr_drm_gem_object *psPVRObj;
+	struct drm_gem_object *psObj;
+	int iRet;
+
+	OSAcquireBridgeLock();
+
+	psObj = drm_gem_object_lookup(dev, file, psGEMSyncGet->gem_handle);
+	if (psObj == NULL)
+	{
+		iRet = -ENOENT;
+		goto ExitUnlock;
+	}
+	psPVRObj = to_pvr_drm_gem_object(psObj);
+
+	switch (psPVRObj->type)
+	{
+		case PVR_DRM_GEM_PMR:
+#if defined(PVR_DRM_USE_PRIME)
+		case PVR_DRM_GEM_IMPORT_PMR:
+#endif
+#if defined(SUPPORT_DRM_DC_MODULE)
+		case PVR_DRM_GEM_DISPLAY_PMR:
+#endif
+		{
+			SERVER_SYNC_PRIMITIVE *psSync;
+			IMG_UINT32 uiSyncVAddr;
+			IMG_HANDLE hSync = NULL;
+
+			switch (psGEMSyncGet->type)
+			{
+				case PVRSRV_GEM_SYNC_TYPE_WRITE:
+				case PVRSRV_GEM_SYNC_TYPE_READ_HW:
+				case PVRSRV_GEM_SYNC_TYPE_READ_SW:
+				case PVRSRV_GEM_SYNC_TYPE_READ_DISPLAY:
+					psSync = psPVRObj->apsSyncPrim[psGEMSyncGet->type];
+					uiSyncVAddr = psPVRObj->auiSyncPrimVAddr[psGEMSyncGet->type];
+					break;
+				default:
+					iRet = -EINVAL;
+					goto ExitUnref;
+			}
+
+			if (psSync != NULL)
+			{
+				iRet = GEMSyncHandleCreate(LinuxConnectionFromFile(PVR_FILE_FROM_DRM_FILE(file)),
+							   psSync,
+							   &hSync);
+				if (iRet != 0)
+				{
+					goto ExitUnref;
+				}
+			}
+			
+			psGEMSyncGet->sync_handle = (uint64_t)(uintptr_t)hSync;
+			psGEMSyncGet->firmware_addr = uiSyncVAddr;
+			
+			iRet = 0;
+			break;
+		}
+		default:
+			iRet = -EINVAL;
+			break;
+	}
+
+ExitUnref:
+	drm_gem_object_unreference_unlocked(psObj);
+
+ExitUnlock:
+	OSReleaseBridgeLock();
+
+	return iRet;
+}
+
+
+/*************************************************************************/ /*!
+* DRM GEM helper callbacks
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_DRM_DC_MODULE)
+int PVRSRVGEMDumbCreate(struct drm_file *file,
+			struct drm_device *dev,
+			struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_object *psObj;
+	uint32_t uiPitch;
+	size_t uiSize;
+	int iRet;
+
+	uiPitch = args->width * (ALIGN(args->bpp, 8) / 8);
+	uiSize = uiPitch * args->height;
+
+	psObj = PVRSRVGEMObjectCreate(dev,
+				      PVR_DRM_GEM_DISPLAY_PMR,
+				      uiSize,
+				      PVRSRV_MEMALLOCFLAG_WRITE_COMBINE);
+	if (IS_ERR(psObj))
+	{
+		return PTR_ERR(psObj);
+	}
+
+	iRet = drm_gem_handle_create(file, psObj, &args->handle);
+	if (iRet == 0)
+	{
+		args->pitch = uiPitch;
+		args->size = uiSize;
+	}
+
+	drm_gem_object_unreference_unlocked(psObj);
+
+	return iRet;
+}
+
+int PVRSRVGEMDumbDestroy(struct drm_file *file,
+			 struct drm_device unref__ *dev,
+			 uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int PVRSRVGEMDumbMapOffset(struct drm_file *file,
+			   struct drm_device *dev,
+			   uint32_t handle,
+			   uint64_t *offset)
+{
+	struct drm_gem_object *psObj;
+	int iRet = 0;
+
+	mutex_lock(&dev->struct_mutex);
+
+	psObj = drm_gem_object_lookup(dev, file, handle);
+	if (!psObj)
+	{
+		iRet = -ENOENT;
+		goto ExitUnlock;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) || defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+	iRet = drm_gem_create_mmap_offset(psObj);
+	if (iRet)
+	{
+		goto ExitUnref;
+	}
+
+	*offset = drm_vma_node_offset_addr(&psObj->vma_node);
+#else
+	if (!psObj->map_list.map)
+	{
+		iRet = drm_gem_create_mmap_offset(psObj);
+		if (iRet)
+		{
+			goto ExitUnref;
+		}
+	}
+
+	*offset = (uint64_t)psObj->map_list.hash.key << PAGE_SHIFT;
+#endif
+
+ExitUnref:
+	drm_gem_object_unreference(psObj);
+
+ExitUnlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return iRet;
+}
+#endif
+
+void PVRSRVGEMFreeObject(struct drm_gem_object *obj)
+{
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(obj);
+	int iSyncIndex;
+
+	for (iSyncIndex = 0; iSyncIndex < ARRAY_SIZE(psPVRObj->apsSyncPrim); iSyncIndex++)
+	{
+		if (psPVRObj->apsSyncPrim[iSyncIndex] != NULL)
+		{
+			PVRSRVServerSyncFreeKM(psPVRObj->apsSyncPrim[iSyncIndex]);
+		}
+	}
+
+	switch (psPVRObj->type)
+	{
+		case PVR_DRM_GEM_PMR:
+#if defined(SUPPORT_DRM_DC_MODULE)
+		case PVR_DRM_GEM_DISPLAY_PMR:
+#endif
+			if (psPVRObj->pmr != NULL)
+			{
+				PMRUnrefPMR(psPVRObj->pmr);
+			}
+			break;
+#if defined(PVR_DRM_USE_PRIME)
+		case PVR_DRM_GEM_IMPORT_PMR:
+			if (psPVRObj->pmr != NULL)
+			{
+				PMRUnrefPMR(psPVRObj->pmr);
+
+				drm_prime_gem_destroy(obj, NULL);
+			}
+			break;
+#endif
+		default:
+			break;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) || defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+	drm_gem_free_mmap_offset(obj);
+#else
+	if (obj->map_list.map)
+	{
+		drm_gem_free_mmap_offset(obj);
+	}
+#endif
+
+	drm_gem_object_release(obj);
+	OSFreeMem(psPVRObj);
+}
+
+
+/*************************************************************************/ /*!
+* GEM interface
+*/ /**************************************************************************/
+
+int PVRSRVGEMInitObject(struct drm_gem_object *obj,
+			enum pvr_drm_gem_object_type type,
+			PVRSRV_MEMALLOCFLAGS_T alloc_flags)
+{
+	struct pvr_drm_dev_priv *psDevPriv = (struct pvr_drm_dev_priv *)obj->dev->dev_private;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(obj);
+	int iSyncIndex;
+	PVRSRV_ERROR eError;
+	int iRet;
+
+	psPVRObj->type = type;
+
+	eError = PVRSRVGEMCreatePMR(psDevPriv->dev_node,
+				    obj,
+				    alloc_flags,
+				    &psPVRObj->pmr);
+	if (eError != PVRSRV_OK)
+	{
+		return -ENOSPC;
+	}
+
+	if (psDevPriv->dev_node->hSyncPrimContext)
+	{
+		char *pszSyncName;
+
+		BUG_ON(ARRAY_SIZE(psPVRObj->apsSyncPrim) != ARRAY_SIZE(psPVRObj->auiSyncPrimVAddr));
+
+		for (iSyncIndex = 0; iSyncIndex < ARRAY_SIZE(psPVRObj->apsSyncPrim); iSyncIndex++)
+		{
+			if (iSyncIndex == PVRSRV_GEM_SYNC_TYPE_READ_DISPLAY)
+			{
+#if defined(SUPPORT_DRM_DC_MODULE)
+				if (type != PVR_DRM_GEM_DISPLAY_PMR)
+#endif
+				{
+					break;
+				}
+			}
+
+			switch (iSyncIndex)
+			{
+				case PVRSRV_GEM_SYNC_TYPE_WRITE:
+					pszSyncName = "pvr_drm_gem_write";
+					break;
+				case PVRSRV_GEM_SYNC_TYPE_READ_HW:
+					pszSyncName = "pvr_drm_gem_read_hw";
+					break;
+				case PVRSRV_GEM_SYNC_TYPE_READ_SW:
+					pszSyncName = "pvr_drm_gem_read_sw";
+					break;
+				case PVRSRV_GEM_SYNC_TYPE_READ_DISPLAY:
+					pszSyncName = "pvr_drm_gem_read_display";
+					break;
+				default:
+					PVR_ASSERT(0);
+					pszSyncName = "pvr_drm_gem_unknown";
+					break;
+			}
+
+			eError = PVRSRVServerSyncAllocKM(psDevPriv->dev_node,
+							 &psPVRObj->apsSyncPrim[iSyncIndex],
+							 &psPVRObj->auiSyncPrimVAddr[iSyncIndex],
+							 strlen(pszSyncName),
+							 pszSyncName);
+			if (eError != PVRSRV_OK)
+			{
+				iRet = -ENOMEM;
+				goto ErrorServerSyncFree;
+			}
+		}
+	}
+
+	return 0;
+
+ErrorServerSyncFree:
+	while (iSyncIndex--)
+	{
+		PVRSRVServerSyncFreeKM(psPVRObj->apsSyncPrim[iSyncIndex]);
+		psPVRObj->apsSyncPrim[iSyncIndex] = NULL;
+	}
+
+	PMRUnrefPMR(psPVRObj->pmr);
+	psPVRObj->pmr = NULL;
+
+	psPVRObj->type = PVR_DRM_GEM_UNDEFINED;
+
+	return iRet;
+}
+
+struct drm_gem_object *PVRSRVGEMObjectCreate(struct drm_device *dev,
+					     enum pvr_drm_gem_object_type type,
+					     size_t size,
+					     PVRSRV_MEMALLOCFLAGS_T flags)
+{
+	struct drm_gem_object *psObj;
+	struct pvr_drm_gem_object *psPVRObj;
+	int iRet;
+
+	psPVRObj = OSAllocZMem(sizeof *psPVRObj);
+	if (psPVRObj == NULL)
+	{
+		return ERR_PTR(-ENOMEM);
+	}
+	psObj = &psPVRObj->base;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) || defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+	drm_gem_private_object_init(dev, psObj, ALIGN(size, PAGE_SIZE));
+#else
+	iRet = drm_gem_private_object_init(dev, psObj, ALIGN(size, PAGE_SIZE));
+	if (iRet)
+	{
+		OSFreeMem(psPVRObj);
+		return ERR_PTR(iRet);
+	}
+#endif
+	iRet = PVRSRVGEMInitObject(psObj,
+				   type,
+				   flags);
+	if (iRet)
+	{
+		drm_gem_object_unreference_unlocked(psObj);
+		return ERR_PTR(iRet);
+	}
+
+	return psObj;
+}
+
+struct drm_gem_object *PVRSRVGEMGetObject(PMR *psPMR)
+{
+	PMR_GEM_PRIV *psGEMPriv;
+
+	psGEMPriv = PMRGetPrivateDataHack(psPMR, &gsPMRGEMFuncTab);
+	if (psGEMPriv != NULL)
+	{
+		return psGEMPriv->psObj;
+	}
+
+	return NULL;
+}
+
+PMR *PVRSRVGEMMMapLookupPMR(struct file *psFile, struct vm_area_struct *psVMA)
+{
+	struct drm_file *psDrmFile = PVR_DRM_FILE_FROM_FILE(psFile);
+	struct drm_device *psDev = psDrmFile->minor->dev;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
+	struct drm_gem_mm *psMM = psDev->mm_private;
+#endif
+	struct drm_gem_object *psObj;
+	struct pvr_drm_gem_object *psPVRObj;
+	PMR *psPMR = NULL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) || defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+	struct drm_vma_offset_node *psNode;
+#else
+	struct drm_hash_item *psHash;
+	struct drm_local_map *psMap;
+#endif
+
+	mutex_lock(&psDev->struct_mutex);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) || defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
+	psNode = drm_vma_offset_exact_lookup(&psMM->vma_manager, psVMA->vm_pgoff, vma_pages(psVMA));
+#else
+	psNode = drm_vma_offset_exact_lookup(psDev->vma_offset_manager, psVMA->vm_pgoff, vma_pages(psVMA));
+#endif
+	if (!psNode)
+	{
+		goto ExitUnlock;
+	}
+
+	psObj = container_of(psNode, struct drm_gem_object, vma_node);
+#else
+	if (drm_ht_find_item(&psMM->offset_hash, psVMA->vm_pgoff, &psHash) != 0)
+	{
+		goto ExitUnlock;
+	}
+
+	psMap = container_of(psHash, struct drm_map_list, hash)->map;
+	if (!psMap)
+	{
+		goto ExitUnlock;
+	}
+
+	psObj = psMap->handle;
+#endif
+
+	psPVRObj = to_pvr_drm_gem_object(psObj);
+	psPMR = psPVRObj->pmr;
+
+ExitUnlock:
+	mutex_unlock(&psDev->struct_mutex);
+
+	return psPMR;
+}
+
+#endif /* defined(SUPPORT_DRM) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm_prime.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm_prime.c
new file mode 100644
index 0000000..7cef9ef
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_drm_prime.c
@@ -0,0 +1,390 @@
+/*************************************************************************/ /*!
+@File
+@Title          PowerVR DRM GEM Prime interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Interface for managing prime memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(SUPPORT_DRM)
+
+#include "pvr_drm.h"
+#include "allocmem.h"
+
+#if defined(PVR_DRM_USE_PRIME)
+#include "pmr.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+
+/*************************************************************************/ /*!
+* DRM Prime helper callbacks
+*/ /**************************************************************************/
+
+static struct sg_table *PrimeMapDmaBuf(struct dma_buf_attachment *psAttachment,
+				       enum dma_data_direction eDir)
+{
+	struct drm_gem_object *psObj = psAttachment->dmabuf->priv;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+#if !defined(LMA)
+	IMG_DEVMEM_SIZE_T uiSize = 0;
+#endif
+	struct sg_table *psSGTable;
+	IMG_CPU_PHYADDR sCpuPAddr;
+	IMG_BOOL bCpuPAddrValid;
+	struct scatterlist *psScatterList;
+	IMG_UINT uiPageCount;
+	IMG_INT iPageNum;
+	PVRSRV_ERROR eError;
+	IMG_INT iErr;
+
+	psSGTable = OSAllocMem(sizeof(*psSGTable));
+	if (!psSGTable)
+	{
+		return ERR_PTR(-ENOMEM);
+	}
+
+#if defined(LMA)
+	uiPageCount = 1;
+#else
+	eError = PMR_LogicalSize(psPVRObj->pmr, &uiSize);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	uiPageCount = (IMG_SIZE_T)uiSize >> PAGE_SHIFT;
+#endif
+
+	iErr = sg_alloc_table(psSGTable, uiPageCount, GFP_KERNEL);
+	if (iErr)
+	{
+		goto ErrorFreeSGTable;
+	}
+
+	eError = PMRLockSysPhysAddresses(psPVRObj->pmr, PAGE_SHIFT);
+	if (eError != PVRSRV_OK)
+	{
+		iErr = -EPERM;
+		goto ErrorSGFreeTable;
+	}
+
+	for_each_sg (psSGTable->sgl, psScatterList, uiPageCount, iPageNum)
+	{
+		eError = PMR_CpuPhysAddr(psPVRObj->pmr,
+					 PAGE_SHIFT,
+					 1,
+					 iPageNum << PAGE_SHIFT,
+					 &sCpuPAddr,
+					 &bCpuPAddrValid);
+		if (eError != PVRSRV_OK || !bCpuPAddrValid)
+		{
+			iErr = -ENOMEM;
+			goto ErrorUnlockPMR;
+		}
+
+		sg_set_page(psScatterList, pfn_to_page(PFN_DOWN(sCpuPAddr.uiAddr)), PAGE_SIZE, 0);
+	}
+
+	(void)dma_map_sg(psAttachment->dev, psSGTable->sgl, psSGTable->nents, eDir);
+
+	return psSGTable;
+
+ErrorUnlockPMR:
+	PMRUnlockSysPhysAddresses(psPVRObj->pmr);
+
+ErrorSGFreeTable:
+	sg_free_table(psSGTable);
+
+ErrorFreeSGTable:
+	OSFreeMem(psSGTable);
+
+	return ERR_PTR(iErr);
+}
+
+static void PrimeUnmapDmaBuf(struct dma_buf_attachment *psAttachment,
+			     struct sg_table *psSGTable,
+			     enum dma_data_direction eDir)
+{
+	struct drm_gem_object *psObj = psAttachment->dmabuf->priv;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+	PVRSRV_ERROR eError;
+
+	dma_unmap_sg(psAttachment->dev, psSGTable->sgl, psSGTable->nents, eDir);
+
+	sg_free_table(psSGTable);
+	OSFreeMem(psSGTable);
+
+	eError = PMRUnlockSysPhysAddresses(psPVRObj->pmr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING, "%s: failed to unlock PMR (%d)\n",
+			 __FUNCTION__, eError));
+	}
+}
+
+static void PrimeRelease(struct dma_buf *psDmaBuf)
+{
+	struct drm_gem_object *psObj = psDmaBuf->priv;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) && ! defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+	if (psObj->export_dma_buf == psDmaBuf)
+	{
+		psObj->export_dma_buf = NULL;
+
+		drm_gem_object_unreference_unlocked(psObj);
+	}
+#else
+	drm_gem_object_unreference_unlocked(psObj);
+#endif
+}
+
+static int PrimeBeginCpuAccess(struct dma_buf *psDmaBuf,
+			       size_t uiStart,
+			       size_t uiLen,
+			       enum dma_data_direction unref__ eDir)
+{
+	struct drm_gem_object *psObj = psDmaBuf->priv;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+	PVRSRV_ERROR eError;
+
+	if (uiStart + uiLen > psDmaBuf->size)
+	{
+		return -EINVAL;
+	}
+
+	eError = PMRLockSysPhysAddresses(psPVRObj->pmr, PAGE_SHIFT);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: failed to lock PMR (%d)\n",
+			 __FUNCTION__, eError));
+
+		return (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) ? -ENOMEM : -EINVAL;
+	}
+
+	return 0;
+}
+
+static void PrimeEndCpuAccess(struct dma_buf *psDmaBuf,
+			      size_t unref__ uiStart,
+			      size_t unref__ uiLen,
+			      enum dma_data_direction unref__ eDir)
+{
+	struct drm_gem_object *psObj = psDmaBuf->priv;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+	PVRSRV_ERROR eError;
+
+	eError = PMRUnlockSysPhysAddresses(psPVRObj->pmr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to unlock PMR (%d)\n",
+			 __FUNCTION__, eError));
+	}
+}
+
+static void *PrimeKMap(struct dma_buf unref__ *psDmaBuf,
+		       unsigned long unref__ ulPageNum)
+{
+	return NULL;
+}
+
+static void *PrimeKMapAtomic(struct dma_buf unref__ *psDmaBuf,
+			     unsigned long unref__ ulPageNum)
+{
+	return NULL;
+}
+
+static int PrimeMMap(struct dma_buf unref__ *psDmaBuf,
+		     struct vm_area_struct unref__ *psVMA)
+{
+	return -EINVAL;
+}
+
+static void *PrimeVMap(struct dma_buf *psDmaBuf)
+{
+	struct drm_gem_object *psObj = psDmaBuf->priv;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+	void *pvCpuVAddr;
+	IMG_SIZE_T uiMappedLength;
+	IMG_HANDLE hMappingHandle;
+	PVRSRV_ERROR eError;
+
+	eError = PMRAcquireKernelMappingData(psPVRObj->pmr,
+					     0,
+					     0,
+					     &pvCpuVAddr,
+					     &uiMappedLength,
+					     &hMappingHandle);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+			 "%s: failed to acquire kernel mapping data (%d)\n",
+			 __FUNCTION__, eError));
+
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* We don't have anywhere to store the mapping handle but because we're 
+	   mapping the entire buffer we assume that the handle is actually the 
+	   same as the virtual address we get back */
+	PVR_ASSERT((void *)hMappingHandle == pvCpuVAddr);
+
+	return pvCpuVAddr;
+}
+
+static void PrimeVUnmap(struct dma_buf *psDmaBuf, void *pvVAddr)
+{
+	struct drm_gem_object *psObj = psDmaBuf->priv;
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(psObj);
+	PVRSRV_ERROR eError;
+
+	eError = PMRReleaseKernelMappingData(psPVRObj->pmr, (IMG_HANDLE)pvVAddr);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_WARNING,
+			 "%s: failed to release kernel mapping data (%d)\n",
+			 __FUNCTION__, eError));
+	}
+}
+
+static const struct dma_buf_ops gsPrimeOps =
+{
+	.map_dma_buf		= PrimeMapDmaBuf,
+	.unmap_dma_buf		= PrimeUnmapDmaBuf,
+	.release		= PrimeRelease,
+	.begin_cpu_access	= PrimeBeginCpuAccess,
+	.end_cpu_access		= PrimeEndCpuAccess,
+	.kmap			= PrimeKMap,
+	.kmap_atomic		= PrimeKMapAtomic,
+	.mmap			= PrimeMMap,
+	.vmap			= PrimeVMap,
+	.vunmap			= PrimeVUnmap,
+};
+
+struct dma_buf *PVRSRVPrimeExport(struct drm_device unref__ *dev,
+				  struct drm_gem_object *obj,
+				  int flags)
+{
+	struct pvr_drm_gem_object *psPVRObj = to_pvr_drm_gem_object(obj);
+
+	switch (psPVRObj->type)
+	{
+		case PVR_DRM_GEM_PMR:
+		case PVR_DRM_GEM_DISPLAY_PMR:
+			break;
+		default:
+			PVR_ASSERT(0);
+			return ERR_PTR(-EINVAL);
+	}
+
+	return dma_buf_export(obj,
+			      &gsPrimeOps,
+			      obj->size,
+			      flags
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
+			      , NULL
+#endif
+			     );
+}
+
+struct drm_gem_object *PVRSRVPrimeImport(struct drm_device *dev,
+					 struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *psObj;
+	struct pvr_drm_gem_object *psPVRObj;
+	int iRet;
+
+	if (dma_buf->ops == &gsPrimeOps)
+	{
+		psObj = dma_buf->priv;
+
+		if (psObj->dev == dev)
+		{
+			drm_gem_object_reference(psObj);
+			return psObj;
+		}
+	}
+
+	psPVRObj = OSAllocZMem(sizeof *psPVRObj);
+	if (psPVRObj == NULL)
+	{
+		return ERR_PTR(-ENOMEM);
+	}
+	psObj = &psPVRObj->base;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) || defined(CHROMIUMOS_WORKAROUNDS_KERNEL310)
+	drm_gem_private_object_init(dev, psObj, dma_buf->size);
+#else
+	iRet = drm_gem_private_object_init(dev, psObj, dma_buf->size);
+	if (iRet)
+	{
+		OSFreeMem(psPVRObj);
+		return ERR_PTR(iRet);
+	}
+#endif
+	psObj->import_attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(psObj->import_attach))
+	{
+		iRet = PTR_ERR(psObj->import_attach);
+		goto ErrorGemUnref;
+	}
+
+	get_dma_buf(dma_buf);
+
+	iRet = PVRSRVGEMInitObject(psObj,
+				   PVR_DRM_GEM_IMPORT_PMR,
+				   PVRSRV_MEMALLOCFLAG_UNCACHED);
+	if (iRet)
+	{
+		goto ErrorDmaBufDetach;
+	}
+
+	return psObj;
+
+ErrorDmaBufDetach:
+	dma_buf_detach(dma_buf, psObj->import_attach);
+
+ErrorGemUnref:
+	drm_gem_object_unreference_unlocked(psObj);
+
+	return ERR_PTR(iRet);
+}
+
+#endif /* defined(PVR_DRM_USE_PRIME) */
+#endif /* defined(SUPPORT_DRM) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_gputrace.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_gputrace.c
new file mode 100644
index 0000000..3f14769
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_gputrace.c
@@ -0,0 +1,414 @@
+/*************************************************************************/ /*!
+@File           pvr_gputrace.c
+@Title          PVR GPU Trace module Linux implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_error.h"
+#include "srvkm.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+
+#include "pvr_gputrace.h"
+
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+
+#define KM_FTRACE_NO_PRIORITY (0)
+
+
+/******************************************************************************
+ Module internal implementation
+******************************************************************************/
+
+/* Circular buffer sizes, must be a power of two */
+#define PVRSRV_KM_FTRACE_JOB_MAX       (512)
+#define PVRSRV_KM_FTRACE_CTX_MAX        (16)
+
+#define PVRSRV_FTRACE_JOB_FLAG_MASK     (0xFF000000)
+#define PVRSRV_FTRACE_JOB_ID_MASK       (0x00FFFFFF)
+#define PVRSRV_FTRACE_JOB_FLAG_ENQUEUED (0x80000000)
+
+#define PVRSRV_FTRACE_JOB_GET_ID(pa)               ((pa)->ui32FlagsAndID & PVRSRV_FTRACE_JOB_ID_MASK)
+#define PVRSRV_FTRACE_JOB_SET_ID_CLR_FLAGS(pa, id) ((pa)->ui32FlagsAndID = PVRSRV_FTRACE_JOB_ID_MASK & (id))
+
+#define PVRSRV_FTRACE_JOB_GET_FLAGS(pa)     ((pa)->ui32FlagsAndID & PVRSRV_FTRACE_JOB_FLAG_MASK)
+#define PVRSRV_FTRACE_JOB_SET_FLAGS(pa, fl) ((pa)->ui32FlagsAndID |= PVRSRV_FTRACE_JOB_FLAG_MASK & (fl))
+
+typedef struct _PVRSRV_FTRACE_JOB_
+{
+	/* Job ID calculated, no need to store it. */
+	IMG_UINT32 ui32FlagsAndID;
+	IMG_UINT32 ui32ExtJobRef;
+	IMG_UINT32 ui32IntJobRef;
+} PVRSRV_FTRACE_GPU_JOB;
+
+
+typedef struct _PVRSRV_FTRACE_GPU_CTX_
+{
+	/* Context ID is calculated, no need to store it IMG_UINT32 ui32CtxID; */
+	IMG_UINT32            ui32PID;
+
+	/* Every context has a circular buffer of jobs */
+	IMG_UINT16            ui16JobWrite;		/*!< Next position to write to */
+	PVRSRV_FTRACE_GPU_JOB asJobs[PVRSRV_KM_FTRACE_JOB_MAX];
+} PVRSRV_FTRACE_GPU_CTX;
+
+
+typedef struct _PVRSRV_FTRACE_GPU_DATA_
+{
+	IMG_UINT16 ui16CtxWrite;				/*!< Next position to write to */
+	PVRSRV_FTRACE_GPU_CTX asFTraceContext[PVRSRV_KM_FTRACE_CTX_MAX];
+} PVRSRV_FTRACE_GPU_DATA;
+
+PVRSRV_FTRACE_GPU_DATA gsFTraceGPUData;
+
+static void CreateJob(IMG_UINT32 ui32PID, IMG_UINT32 ui32ExtJobRef,
+		IMG_UINT32 ui32IntJobRef)
+{
+	PVRSRV_FTRACE_GPU_CTX* psContext = IMG_NULL;
+	PVRSRV_FTRACE_GPU_JOB* psJob = IMG_NULL;
+	IMG_UINT32 i;
+
+	/* Search for a previously created CTX object */
+	for (i = 0; i < PVRSRV_KM_FTRACE_CTX_MAX; ++i)
+	{
+		if(gsFTraceGPUData.asFTraceContext[i].ui32PID == ui32PID)
+		{
+			psContext = &(gsFTraceGPUData.asFTraceContext[i]);
+			break;
+		} /* */
+	}
+
+	/* If not present in the CB history, create it */
+	if (psContext == NULL)
+	{
+		/*
+		  We overwrite old contexts as we don't get a "finished" indication
+		  so we assume PVRSRV_KM_FTRACE_CTX_MAX is a sufficient number of
+		  process contexts in use at any one time.
+		*/
+		i = gsFTraceGPUData.ui16CtxWrite;
+
+		gsFTraceGPUData.asFTraceContext[i].ui32PID = ui32PID;
+		gsFTraceGPUData.asFTraceContext[i].ui16JobWrite = 0;
+		psContext = &(gsFTraceGPUData.asFTraceContext[i]);
+
+		/* Advance the write position of the context CB. */
+		gsFTraceGPUData.ui16CtxWrite = (i+1) & (PVRSRV_KM_FTRACE_CTX_MAX-1);
+	}
+
+	/*
+	  This is just done during the first kick so it is assumed the job is not
+	  in the CB of jobs yet so we create it. Clear flags.
+	*/
+	psJob = &(psContext->asJobs[psContext->ui16JobWrite]);
+	PVRSRV_FTRACE_JOB_SET_ID_CLR_FLAGS(psJob, 1001+psContext->ui16JobWrite);
+	psJob->ui32ExtJobRef = ui32ExtJobRef;
+	psJob->ui32IntJobRef = ui32IntJobRef;
+
+	/*
+	  Advance the write position of the job CB. Overwrite oldest job
+	  when buffer overflows
+	*/
+	psContext->ui16JobWrite = (psContext->ui16JobWrite + 1) & (PVRSRV_KM_FTRACE_JOB_MAX-1);
+}
+
+
+static PVRSRV_ERROR GetCtxAndJobID(IMG_UINT32 ui32PID,
+	IMG_UINT32 ui32ExtJobRef, IMG_UINT32 ui32IntJobRef,
+	IMG_UINT32 *pui32CtxID, PVRSRV_FTRACE_GPU_JOB** ppsJob)
+{
+	PVRSRV_FTRACE_GPU_CTX* psContext = IMG_NULL;
+	IMG_UINT32 i;
+
+	/* Search for the process context object in the CB */
+	for (i = 0; i < PVRSRV_KM_FTRACE_CTX_MAX; ++i)
+	{
+		if(gsFTraceGPUData.asFTraceContext[i].ui32PID == ui32PID)
+		{
+			psContext = &(gsFTraceGPUData.asFTraceContext[i]);
+			/* Derive context ID from CB index: 101..101+PVRSRV_KM_FTRACE_CTX_MAX */
+			*pui32CtxID = 101+i;
+			break;
+		}
+	}
+
+	/* If not found, return an error, let caller trace the error */
+	if (psContext == NULL)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,"GetCtxAndJobID: Failed to find context ID for PID %d", ui32PID));
+		return PVRSRV_ERROR_PROCESS_NOT_FOUND;
+	}
+
+	/* Look for the JobID in the jobs CB */
+	for(i = 0; i < PVRSRV_KM_FTRACE_JOB_MAX; ++i)
+	{
+		if((psContext->asJobs[i].ui32ExtJobRef == ui32ExtJobRef) &&
+			(psContext->asJobs[i].ui32IntJobRef == ui32IntJobRef))
+		{
+			/* Derive job ID from CB index: 1001..1001+PVRSRV_KM_FTRACE_JOB_MAX */
+			*ppsJob = &psContext->asJobs[i];
+			return PVRSRV_OK;
+		}
+	}
+
+	PVR_DPF((PVR_DBG_MESSAGE,"GetCtxAndJobID: Failed to find job ID for extJobRef %d, intJobRef %x", ui32ExtJobRef, ui32IntJobRef));
+	return PVRSRV_ERROR_NOT_FOUND;
+}
+
+
+/* DebugFS entry for the feature's on/off file */
+static PVR_DEBUGFS_ENTRY_DATA *gpsPVRDebugFSGpuTracingOnEntry = NULL;
+
+
+/*
+  If SUPPORT_GPUTRACE_EVENTS is defined the drive is built with support
+  to route RGX HWPerf packets to the Linux FTrace mechanism. To allow
+  this routing feature to be switched on and off at run-time the following
+  debugfs entry is created:
+  	/sys/kernel/debug/pvr/gpu_tracing_on
+  To enable GPU events in the FTrace log type the following on the target:
+ 	echo Y > /sys/kernel/debug/pvr/gpu_tracing_on
+  To disable, type:
+  	echo N > /sys/kernel/debug/pvr/gpu_tracing_on
+
+  It is also possible to enable this feature at driver load by setting the
+  default application hint "EnableFTraceGPU=1" in /etc/powervr.ini.
+*/
+
+static void *GpuTracingSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+	if (*puiPosition == 0)
+	{
+		/* We want only one entry in the sequence, one call to show() */
+		return (void*)1;
+	}
+
+	return NULL;
+}
+
+
+static void GpuTracingSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+}
+
+
+static void *GpuTracingSeqNext(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+	PVR_UNREFERENCED_PARAMETER(psSeqFile);
+	return NULL;
+}
+
+
+static int GpuTracingSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+	IMG_BOOL bValue = PVRGpuTraceEnabled();
+
+	PVR_UNREFERENCED_PARAMETER(pvData);
+
+	seq_puts(psSeqFile, (bValue ? "Y\n" : "N\n"));
+	return 0;
+}
+
+
+static struct seq_operations gsGpuTracingReadOps =
+{
+	.start = GpuTracingSeqStart,
+	.stop  = GpuTracingSeqStop,
+	.next  = GpuTracingSeqNext,
+	.show  = GpuTracingSeqShow,
+};
+
+
+static IMG_INT GpuTracingSet(const IMG_CHAR *buffer, size_t count, loff_t uiPosition, void *data)
+{
+	IMG_CHAR cFirstChar;
+
+	PVR_UNREFERENCED_PARAMETER(uiPosition);
+	PVR_UNREFERENCED_PARAMETER(data);
+
+	if (!count)
+	{
+		return -EINVAL;
+	}
+
+	if (pvr_copy_from_user(&cFirstChar, buffer, 1))
+	{
+		return -EFAULT;
+	}
+
+	switch (cFirstChar)
+	{
+		case '0':
+		case 'n':
+		case 'N':
+		{
+			PVRGpuTraceEnabledSet(IMG_FALSE);
+			PVR_TRACE(("DISABLED GPU FTrace"));
+			break;
+		}
+		case '1':
+		case 'y':
+		case 'Y':
+		{
+            if (PVRGpuTraceEnabledSet(IMG_TRUE) == PVRSRV_OK)
+            {
+                PVR_TRACE(("ENABLED GPU FTrace"));
+            }
+            else
+            {
+                PVR_TRACE(("FAILED to enable GPU FTrace"));
+            }
+			break;
+		}
+	}
+
+	return count;
+}
+
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+
+void PVRGpuTraceClientWork(
+		const IMG_UINT32 ui32Pid,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32IntJobRef,
+		const IMG_CHAR* pszKickType)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVRSRV_FTRACE_GPU_JOB* psJob;
+	IMG_UINT32   ui32CtxId = 0;
+
+	PVR_ASSERT(pszKickType);
+
+	PVR_DPF((PVR_DBG_VERBOSE, "PVRGpuTraceClientKick(%s): PID %u, extJobRef %u, intJobRef %u", pszKickType, ui32Pid, ui32ExtJobRef, ui32IntJobRef));
+
+	CreateJob(ui32Pid, ui32ExtJobRef, ui32IntJobRef);
+
+	/*
+	  Always create jobs for client work above but only emit the enqueue
+	  trace if the feature is enabled.
+	  This keeps the lookup tables up to date when the gpu_tracing_on is
+	  disabled so that when it is re-enabled the packets that might be in
+	  the HWPerf buffer can be decoded in the switch event processing below.
+	*/
+	if (PVRGpuTraceEnabled())
+	{
+		eError = GetCtxAndJobID(ui32Pid, ui32ExtJobRef, ui32IntJobRef, &ui32CtxId,  &psJob);
+		PVR_LOGRN_IF_ERROR(eError, "GetCtxAndJobID");
+
+		trace_gpu_job_enqueue(ui32CtxId, PVRSRV_FTRACE_JOB_GET_ID(psJob), pszKickType);
+
+		PVRSRV_FTRACE_JOB_SET_FLAGS(psJob, PVRSRV_FTRACE_JOB_FLAG_ENQUEUED);
+	}
+}
+
+
+void PVRGpuTraceWorkSwitch(
+		IMG_UINT64 ui64HWTimestampInOSTime,
+		const IMG_UINT32 ui32Pid,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32IntJobRef,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+    PVRSRV_FTRACE_GPU_JOB* psJob = IMG_NULL;
+	IMG_UINT32 ui32CtxId;
+
+	PVR_ASSERT(pszWorkType);
+
+	eError = GetCtxAndJobID(ui32Pid, ui32ExtJobRef, ui32IntJobRef,	&ui32CtxId,  &psJob);
+	PVR_LOGRN_IF_ERROR(eError, "GetCtxAndJobID");
+
+	PVR_ASSERT(psJob);
+
+    /*
+      Only trace switch event if the job's enqueue event was traced. Necessary
+	  for when the GPU tracing is disabled, apps run and re-enabled to avoid
+	  orphan switch events from appearing in the trace file.
+	*/
+	if (PVRSRV_FTRACE_JOB_GET_FLAGS(psJob) & PVRSRV_FTRACE_JOB_FLAG_ENQUEUED)
+	{
+		if (eSwType == PVR_GPUTRACE_SWITCH_TYPE_END)
+		{
+			/* When the GPU goes idle, we need to trace a switch with a context
+			 * ID of 0.
+			 */
+			ui32CtxId = 0;
+		}
+
+		trace_gpu_sched_switch(pszWorkType, ui64HWTimestampInOSTime,
+				ui32CtxId, KM_FTRACE_NO_PRIORITY, PVRSRV_FTRACE_JOB_GET_ID(psJob));
+	}
+}
+
+
+PVRSRV_ERROR PVRGpuTraceInit(void)
+{
+	return PVRDebugFSCreateEntry("gpu_tracing_on",
+				      NULL,
+				      &gsGpuTracingReadOps,
+				      (PVRSRV_ENTRY_WRITE_FUNC *)GpuTracingSet,
+				      NULL,
+				      &gpsPVRDebugFSGpuTracingOnEntry);
+}
+
+
+void PVRGpuTraceDeInit(void)
+{
+	/* Can be NULL if driver startup failed */
+	if (gpsPVRDebugFSGpuTracingOnEntry)
+	{
+		PVRDebugFSRemoveEntry(gpsPVRDebugFSGpuTracingOnEntry);
+		gpsPVRDebugFSGpuTracingOnEntry = NULL;
+	}
+}
+
+
+/******************************************************************************
+ End of file (pvr_gputrace.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_gputrace.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_gputrace.h
new file mode 100644
index 0000000..e4f21ee
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_gputrace.h
@@ -0,0 +1,96 @@
+/*************************************************************************/ /*!
+@File           pvr_gputrace.h
+@Title          PVR GPU Trace module common environment interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_GPUTRACE_H_
+#define PVR_GPUTRACE_H_
+
+#include "img_types.h"
+
+
+/******************************************************************************
+ Module out-bound API
+******************************************************************************/
+
+/*
+  The device layer of the KM driver defines these two APIs to allow a
+  platform module to set and retrieve the feature's on/off state.
+*/
+extern PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue);
+extern IMG_BOOL PVRGpuTraceEnabled(void);
+
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+typedef enum {
+	PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0,
+
+	PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1,
+	PVR_GPUTRACE_SWITCH_TYPE_END = 2
+
+} PVR_GPUTRACE_SWITCH_TYPE;
+
+
+void PVRGpuTraceClientWork(
+		const IMG_UINT32 ui32Pid,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32IntJobRef,
+		const IMG_CHAR* pszKickType);
+
+
+void PVRGpuTraceWorkSwitch(
+		IMG_UINT64 ui64OSTimestamp,
+		const IMG_UINT32 ui32Pid,
+		const IMG_UINT32 ui32ExtJobRef,
+		const IMG_UINT32 ui32IntJobRef,
+		const IMG_CHAR* pszWorkType,
+		PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+
+PVRSRV_ERROR PVRGpuTraceInit(void);
+
+
+void PVRGpuTraceDeInit(void);
+
+
+#endif /* PVR_GPUTRACE_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_uaccess.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_uaccess.h
new file mode 100644
index 0000000..53c8f0a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/pvr_uaccess.h
@@ -0,0 +1,106 @@
+/*************************************************************************/ /*!
+@File
+@Title          Utility functions for user space access
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_UACCESS_H__
+#define __PVR_UACCESS_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#endif
+
+#include <asm/uaccess.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+    if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+    {
+	return __copy_to_user(pvTo, pvFrom, ulBytes);
+    }
+    return ulBytes;
+#else
+    return copy_to_user(pvTo, pvFrom, ulBytes);
+#endif
+}
+
+
+#if defined(__KLOCWORK__)
+	/* this part is only to tell Klocwork not to report false positive because
+	   it doesn't understand that pvr_copy_from_user will initialise the memory
+	   pointed to by pvTo */
+#include <linux/string.h> /* get the memset prototype */
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+	if (pvTo != NULL)
+	{
+		memset(pvTo, 0xAA, ulBytes);
+		return 0;
+	}
+	return 1;
+}
+	
+#else /* real implementation */
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
+    /*
+     * The compile time correctness checking introduced for copy_from_user in
+     * Linux 2.6.33 isn't fully compatible with our usage of the function.
+     */
+    if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+    {
+	return __copy_from_user(pvTo, pvFrom, ulBytes);
+    }
+    return ulBytes;
+#else
+    return copy_from_user(pvTo, pvFrom, ulBytes);
+#endif
+}
+#endif /* klocworks */ 
+
+#endif /* __PVR_UACCESS_H__ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/rogue_trace_events.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/rogue_trace_events.h
new file mode 100644
index 0000000..be3a9fa
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/rogue_trace_events.h
@@ -0,0 +1,158 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rogue
+
+#if !defined(_ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ROGUE_TRACE_EVENTS_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+TRACE_EVENT(rogue_fence_update,
+
+	TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 fw_ctx, u32 offset,
+		u32 sync_fwaddr, u32 sync_value),
+
+	TP_ARGS(comm, cmd, dm, fw_ctx, offset, sync_fwaddr, sync_value),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       cmd,            cmd             )
+		__string(       dm,             dm              )
+		__field(        u32,            fw_ctx          )
+		__field(        u32,            offset          )
+		__field(        u32,            sync_fwaddr     )
+		__field(        u32,            sync_value      )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(cmd, cmd);
+		__assign_str(dm, dm);
+		__entry->fw_ctx = fw_ctx;
+		__entry->offset = offset;
+		__entry->sync_fwaddr = sync_fwaddr;
+		__entry->sync_value = sync_value;
+	),
+
+	TP_printk("comm=%s cmd=%s dm=%s fw_ctx=%lx offset=%lu sync_fwaddr=%lx sync_value=%lx",
+		__get_str(comm),
+		__get_str(cmd),
+		__get_str(dm),
+		(unsigned long)__entry->fw_ctx,
+		(unsigned long)__entry->offset,
+		(unsigned long)__entry->sync_fwaddr,
+		(unsigned long)__entry->sync_value)
+);
+
+TRACE_EVENT(rogue_fence_check,
+
+	TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 fw_ctx, u32 offset,
+		u32 sync_fwaddr, u32 sync_value),
+
+	TP_ARGS(comm, cmd, dm, fw_ctx, offset, sync_fwaddr, sync_value),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       cmd,            cmd             )
+		__string(       dm,             dm              )
+		__field(        u32,            fw_ctx          )
+		__field(        u32,            offset          )
+		__field(        u32,            sync_fwaddr     )
+		__field(        u32,            sync_value      )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(cmd, cmd);
+		__assign_str(dm, dm);
+		__entry->fw_ctx = fw_ctx;
+		__entry->offset = offset;
+		__entry->sync_fwaddr = sync_fwaddr;
+		__entry->sync_value = sync_value;
+	),
+
+	TP_printk("comm=%s cmd=%s dm=%s fw_ctx=%lx offset=%lu sync_fwaddr=%lx sync_value=%lx",
+		__get_str(comm),
+		__get_str(cmd),
+		__get_str(dm),
+		(unsigned long)__entry->fw_ctx,
+		(unsigned long)__entry->offset,
+		(unsigned long)__entry->sync_fwaddr,
+		(unsigned long)__entry->sync_value)
+);
+
+TRACE_EVENT(rogue_create_fw_context,
+
+	TP_PROTO(const char *comm, const char *dm, u32 fw_ctx),
+
+	TP_ARGS(comm, dm, fw_ctx),
+
+	TP_STRUCT__entry(
+		__string(       comm,           comm            )
+		__string(       dm,             dm              )
+		__field(        u32,            fw_ctx          )
+	),
+
+	TP_fast_assign(
+		__assign_str(comm, comm);
+		__assign_str(dm, dm);
+		__entry->fw_ctx = fw_ctx;
+	),
+
+	TP_printk("comm=%s dm=%s fw_ctx=%lx",
+		__get_str(comm),
+		__get_str(dm),
+		(unsigned long)__entry->fw_ctx)
+);
+
+#endif /* _ROGUE_TRACE_EVENTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE rogue_trace_events
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/trace_events.c b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/trace_events.c
new file mode 100644
index 0000000..ca4152b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/trace_events.c
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@Title          Linux trace event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/sched.h>
+
+#include "img_types.h"
+#include "rgx_fwif_km.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace_events.h"
+
+/* This is a helper that calls trace_rogue_fence_update for each fence in an
+ * array.
+ */
+void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values)
+{
+	IMG_UINT i;
+	for (i = 0; i < uCount; i++)
+	{
+		trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+								 pauiAddresses[i].ui32Addr, paui32Values[i]);
+	}
+}
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values)
+{
+	IMG_UINT i;
+	for (i = 0; i < uCount; i++)
+	{
+		trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+							  pauiAddresses[i].ui32Addr, paui32Values[i]);
+	}
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/trace_events.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/trace_events.h
new file mode 100644
index 0000000..7fe1a2b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/env/linux/trace_events.h
@@ -0,0 +1,87 @@
+/*************************************************************************/ /*!
+@Title          Linux trace events and event helper functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TRACE_EVENTS_H)
+#define TRACE_EVENTS_H
+
+#include "rogue_trace_events.h"
+
+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't
+ * enabled, just like the actual trace event functions that the kernel
+ * defines for us.
+ */
+#ifdef CONFIG_EVENT_TRACING
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+							   IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values);
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+							  IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values);
+#else  /* CONFIG_TRACE_EVENTS */
+static inline
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+							   IMG_UINT32 ui32FWContext,
+							   IMG_UINT32 ui32Offset,
+							   IMG_UINT uCount,
+							   PRGXFWIF_UFO_ADDR *pauiAddresses,
+							   IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+							  IMG_UINT32 ui32FWContext,
+							  IMG_UINT32 ui32Offset,
+							  IMG_UINT uCount,
+							  PRGXFWIF_UFO_ADDR *pauiAddresses,
+							  IMG_UINT32 *paui32Values)
+{
+}
+#endif /* CONFIG_TRACE_EVENTS */
+
+#endif /* TRACE_EVENTS_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/cache_generic.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/cache_generic.h
new file mode 100644
index 0000000..07ec973
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/cache_generic.h
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_GENERIC_H_
+#define _CACHE_GENERIC_H_
+
+#include "img_types.h"
+#include "cache_external.h"
+#include "device.h"
+#include "pvrsrv_error.h"
+
+PVRSRV_ERROR CacheOpQueue(PVRSRV_CACHE_OP uiCacheOp);
+
+#endif	/* _CACHE_GENERIC_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/connection_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/connection_server.h
new file mode 100644
index 0000000..d58d256
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/connection_server.h
@@ -0,0 +1,107 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for server side connection management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_CONNECTION_SERVER_H_)
+#define _CONNECTION_SERVER_H_
+
+
+#include "img_types.h"
+#include "handle.h"
+#include "pvrsrv_cleanup.h"
+
+/* Variable used to hold in memory the timeout for the current time slice*/
+extern IMG_UINT64 gui64TimesliceLimit;
+/* Counter number of handle data freed during the current time slice */
+extern IMG_UINT32 gui32HandleDataFreeCounter;
+/* Set the maximum time the freeing of the resources can keep the lock */
+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS 3000 * 1000 /* 3ms */
+
+typedef struct _CONNECTION_DATA_
+{
+	PVRSRV_HANDLE_BASE		*psHandleBase;
+	struct _SYNC_CONNECTION_DATA_	*psSyncConnectionData;
+	struct _PDUMP_CONNECTION_DATA_	*psPDumpConnectionData;
+
+	/* True if the process is the initialisation server. */
+	IMG_BOOL			bInitProcess;
+
+	/*
+	 * OS specific data can be stored via this handle.
+	 * See osconnection_server.h for a generic mechanism
+	 * for initialising this field.
+	 */
+	IMG_HANDLE			hOsPrivateData;
+
+	IMG_PID				pid;
+
+	IMG_PVOID			hSecureData;
+
+	IMG_HANDLE			hProcessStats;
+
+	/* Structure which is hooked into the cleanup thread work list */
+	PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+
+	/* List navigation for deferred freeing of connection data */
+	struct _CONNECTION_DATA_	**ppsThis;
+	struct _CONNECTION_DATA_	*psNext;
+} CONNECTION_DATA;
+
+PVRSRV_ERROR PVRSRVConnectionConnect(IMG_PVOID *ppvPrivData, IMG_PVOID pvOSData);
+void PVRSRVConnectionDisconnect(IMG_PVOID pvPrivData);
+
+PVRSRV_ERROR PVRSRVConnectionInit(void);
+PVRSRV_ERROR PVRSRVConnectionDeInit(void);
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVConnectionPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection)
+{
+	return (psConnection != IMG_NULL) ? psConnection->hOsPrivateData : IMG_NULL;
+}
+
+
+#endif /* !defined(_CONNECTION_SERVER_H_) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/dc_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/dc_server.h
new file mode 100644
index 0000000..c039b06
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/dc_server.h
@@ -0,0 +1,156 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _DC_SERVER_H_
+#define _DC_SERVER_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "sync_external.h"
+#include "pvrsrv_surface.h"
+#include "pmr.h"
+#include "kerneldisplay.h"
+#include "sync_server.h"
+
+typedef struct _DC_DEVICE_ DC_DEVICE;
+typedef struct _DC_DISPLAY_CONTEXT_ DC_DISPLAY_CONTEXT;
+typedef struct _DC_BUFFER_ DC_BUFFER;
+typedef DC_BUFFER* DC_PIN_HANDLE;
+
+PVRSRV_ERROR DCDevicesQueryCount(IMG_UINT32 *pui32DeviceCount);
+
+PVRSRV_ERROR DCDevicesEnumerate(IMG_UINT32 ui32DeviceArraySize,
+								IMG_UINT32 *pui32DeviceCount,
+								IMG_UINT32 *paui32DeviceIndex);
+
+PVRSRV_ERROR DCDeviceAcquire(IMG_UINT32 ui32DeviceIndex,
+							 DC_DEVICE **ppsDevice);
+
+PVRSRV_ERROR DCDeviceRelease(DC_DEVICE *psDevice);
+
+PVRSRV_ERROR DCGetInfo(DC_DEVICE *psDevice,
+					   DC_DISPLAY_INFO *psDisplayInfo);
+
+PVRSRV_ERROR DCPanelQueryCount(DC_DEVICE *psDevice,
+								IMG_UINT32 *pui32NumPanels);
+
+PVRSRV_ERROR DCPanelQuery(DC_DEVICE *psDevice,
+						   IMG_UINT32 ui32PanelsArraySize,
+						   IMG_UINT32 *pui32NumPanels,
+						   PVRSRV_PANEL_INFO *pasPanelInfo);
+
+PVRSRV_ERROR DCFormatQuery(DC_DEVICE *psDevice,
+							 IMG_UINT32 ui32FormatArraySize,
+							 PVRSRV_SURFACE_FORMAT *pasFormat,
+							 IMG_UINT32 *pui32Supported);
+
+PVRSRV_ERROR DCDimQuery(DC_DEVICE *psDevice,
+						  IMG_UINT32 ui32DimSize,
+						  PVRSRV_SURFACE_DIMS *pasDim,
+						  IMG_UINT32 *pui32Supported);
+
+PVRSRV_ERROR DCSetBlank(DC_DEVICE *psDevice,
+						IMG_BOOL bEnabled);
+
+PVRSRV_ERROR DCSetVSyncReporting(DC_DEVICE *psDevice,
+								 IMG_BOOL bEnabled);
+
+PVRSRV_ERROR DCLastVSyncQuery(DC_DEVICE *psDevice,
+							  IMG_INT64 *pi64Timestamp);
+
+PVRSRV_ERROR DCSystemBufferAcquire(DC_DEVICE *psDevice,
+								   IMG_UINT32 *pui32ByteStride,
+								   DC_BUFFER **ppsBuffer);
+
+PVRSRV_ERROR DCSystemBufferRelease(DC_BUFFER *psBuffer);
+
+PVRSRV_ERROR DCDisplayContextCreate(DC_DEVICE *psDevice,
+									DC_DISPLAY_CONTEXT **ppsDisplayContext);
+
+PVRSRV_ERROR DCDisplayContextFlush(IMG_VOID);
+
+PVRSRV_ERROR DCDisplayContextConfigureCheck(DC_DISPLAY_CONTEXT *psDisplayContext,
+											IMG_UINT32 ui32PipeCount,
+											PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+											DC_BUFFER **papsBuffers);
+
+PVRSRV_ERROR DCDisplayContextConfigure(DC_DISPLAY_CONTEXT *psDisplayContext,
+									   IMG_UINT32 ui32PipeCount,
+									   PVRSRV_SURFACE_CONFIG_INFO *pasSurfAttrib,
+									   DC_BUFFER **papsBuffers,
+									   IMG_UINT32 ui32SyncOpCount,
+									   SERVER_SYNC_PRIMITIVE **papsSync,
+									   IMG_BOOL *pabUpdate,
+									   IMG_UINT32 ui32DisplayPeriod,
+									   IMG_UINT32 ui32MaxDepth,
+									   IMG_INT32 i32AcquireFenceFd,
+									   IMG_INT32 *pi32ReleaseFenceFd);
+
+PVRSRV_ERROR DCDisplayContextDestroy(DC_DISPLAY_CONTEXT *psDisplayContext);
+
+PVRSRV_ERROR DCBufferAlloc(DC_DISPLAY_CONTEXT *psDisplayContext,
+						   DC_BUFFER_CREATE_INFO *psSurfInfo,
+						   IMG_UINT32 *pui32ByteStride,
+						   DC_BUFFER **ppsBuffer);
+
+PVRSRV_ERROR DCBufferFree(DC_BUFFER *psBuffer);
+
+PVRSRV_ERROR DCBufferImport(DC_DISPLAY_CONTEXT *psDisplayContext,
+							IMG_UINT32 ui32NumPlanes,
+							PMR **papsImport,
+						    DC_BUFFER_IMPORT_INFO *psSurfAttrib,
+						    DC_BUFFER **ppsBuffer);
+
+PVRSRV_ERROR DCBufferUnimport(DC_BUFFER *psBuffer);
+
+PVRSRV_ERROR DCBufferAcquire(DC_BUFFER *psBuffer,
+							 PMR **psPMR);
+
+PVRSRV_ERROR DCBufferRelease(PMR *psPMR);
+
+PVRSRV_ERROR DCBufferPin(DC_BUFFER *psBuffer, DC_PIN_HANDLE *phPin);
+
+PVRSRV_ERROR DCBufferUnpin(DC_PIN_HANDLE hPin);
+
+PVRSRV_ERROR DCInit(IMG_VOID);
+PVRSRV_ERROR DCDeInit(IMG_VOID);
+
+#endif /*_DC_SERVER_H_  */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/debug_request_ids.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/debug_request_ids.h
new file mode 100644
index 0000000..6258c56
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/debug_request_ids.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug requester ID's
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header contains the defines for the debug ID's for all the
+				services components
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+ 
+#ifndef __DEBUG_REQUEST_IDS__
+#define __DEBUG_REQUEST_IDS__
+
+/* Services controlled devices should be 1st */
+#define DEBUG_REQUEST_RGX			(0)
+#define DEBUG_REQUEST_DC			(1)
+#define DEBUG_REQUEST_SERVERSYNC	(2)
+#define DEBUG_REQUEST_SYS           (3)
+#define DEBUG_REQUEST_ANDROIDSYNC   (4)
+
+#endif /* __DEBUG_REQUEST_IDS__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/device.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/device.h
new file mode 100644
index 0000000..cf437b3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/device.h
@@ -0,0 +1,301 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common Device header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device related function templates and defines
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICE_H__
+#define __DEVICE_H__
+
+
+#include "devicemem_heapcfg.h"
+#include "mmu_common.h"	
+#include "ra.h"  		/* RA_ARENA */
+#include "pvrsrv_device.h"
+#include "srvkm.h"
+#include "devicemem.h"
+#include "physheap.h"
+#include "sync.h"
+#include "dllist.h"
+#include "cache_external.h"
+
+#include "lock.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "services.h"
+#endif
+
+/* BM context forward reference */
+typedef struct _BM_CONTEXT_ BM_CONTEXT;
+
+/*********************************************************************/ /*!
+ @Function      AllocUFOCallback
+ @Description   Device specific callback for allocation of an UFO block
+
+ @Input         psDeviceNode          Pointer to device node to allocate
+                                      the UFO for.
+ @Output        ppsMemDesc            Pointer to pointer for the memdesc of
+                                      the allocation
+ @Output        pui32SyncAddr         FW Base address of the UFO block
+ @Output        puiSyncPrimBlockSize  Size of the UFO block
+
+ @Return        PVRSRV_OK if allocation was successful
+ */
+/*********************************************************************/
+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+														DEVMEM_MEMDESC **ppsMemDesc,
+														IMG_UINT32 *pui32SyncAddr,
+														IMG_UINT32 *puiSyncPrimBlockSize);
+
+/*********************************************************************/ /*!
+ @Function      FreeUFOCallback
+ @Description   Device specific callback for freeing of an UFO
+
+ @Input         psDeviceNode    Pointer to device node that the UFO block was
+                                allocated from.
+ @Input         psMemDesc       Pointer to pointer for the memdesc of
+                                the UFO block to free.
+ */
+/*********************************************************************/
+typedef IMG_VOID (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+										 DEVMEM_MEMDESC *psMemDesc);
+
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+	/* size of address space, as log2 */
+	IMG_UINT32				ui32AddressSpaceSizeLog2;
+
+	/* 
+		flags, includes physical memory resource types available to the system.  
+		Allows for validation at heap creation, define PVRSRV_BACKINGSTORE_XXX 
+	*/
+	IMG_UINT32				ui32Flags;
+
+	/* heap count.  Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */
+	IMG_UINT32				ui32HeapCount;
+
+	/* BM kernel context for the device */
+    BM_CONTEXT				*pBMKernelContext;
+
+	/* BM context list for the device*/
+    BM_CONTEXT				*pBMContext;
+
+    /* Blueprints for creating new device memory contexts */
+    IMG_UINT32              uiNumHeapConfigs;
+    DEVMEM_HEAP_CONFIG      *psDeviceMemoryHeapConfigArray;
+    DEVMEM_HEAP_BLUEPRINT   *psDeviceMemoryHeap;
+} DEVICE_MEMORY_INFO;
+
+
+typedef struct _Px_HANDLE_
+{
+	union
+	{
+		IMG_VOID *pvHandle;
+		IMG_UINT64 ui64Handle;
+	}u;
+} Px_HANDLE;
+
+typedef enum _PVRSRV_DEVICE_STATE_
+{
+	PVRSRV_DEVICE_STATE_UNDEFINED = 0,
+	PVRSRV_DEVICE_STATE_INIT,
+	PVRSRV_DEVICE_STATE_ACTIVE,
+	PVRSRV_DEVICE_STATE_DEINIT,
+} PVRSRV_DEVICE_STATE;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_
+{
+	PVRSRV_DEVICE_HEALTH_STATUS_OK = 0,
+	PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING,
+	PVRSRV_DEVICE_HEALTH_STATUS_DEAD
+} PVRSRV_DEVICE_HEALTH_STATUS;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_
+{
+	PVRSRV_DEVICE_HEALTH_REASON_NONE = 0,
+	PVRSRV_DEVICE_HEALTH_REASON_ASSERTED,
+	PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING,
+	PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS,
+	PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+	PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED
+} PVRSRV_DEVICE_HEALTH_REASON;
+
+typedef PVRSRV_ERROR (*FN_CREATERAMBACKEDPMR)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+										IMG_DEVMEM_SIZE_T uiSize,
+										IMG_DEVMEM_SIZE_T uiChunkSize,
+										IMG_UINT32 ui32NumPhysChunks,
+										IMG_UINT32 ui32NumVirtChunks,
+										IMG_BOOL *pabMappingTable,
+										IMG_UINT32 uiLog2PageSize,
+										PVRSRV_MEMALLOCFLAGS_T uiFlags,
+										PMR **ppsPMRPtr);
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+	PVRSRV_DEVICE_IDENTIFIER	sDevId;
+
+	PVRSRV_DEVICE_STATE			eDevState;
+	PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus;
+	PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
+
+	/* device specific MMU attributes */
+    MMU_DEVICEATTRIBS      *psMMUDevAttrs;
+
+	/*
+		callbacks the device must support:
+	*/
+
+    FN_CREATERAMBACKEDPMR pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+    PVRSRV_ERROR (*pfnMMUPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_SIZE_T uiSize,
+									Px_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+    IMG_VOID (*pfnMMUPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, Px_HANDLE *psMemHandle);
+
+	PVRSRV_ERROR (*pfnMMUPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, Px_HANDLE *pshMemHandle,
+								IMG_SIZE_T uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+								IMG_VOID **pvPtr);
+
+	IMG_VOID (*pfnMMUPxUnmap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+								Px_HANDLE *psMemHandle, IMG_VOID *pvPtr);
+
+	IMG_UINT32 uiMMUPxLog2AllocGran;
+	IMG_CHAR				*pszMMUPxPDumpMemSpaceName;
+
+	IMG_VOID (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+										IMG_HANDLE hDeviceData,
+										MMU_LEVEL eLevel,
+										IMG_BOOL bUnmap);
+
+	PVRSRV_ERROR (*pfnSLCCacheInvalidateRequest)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+										PMR *psPmr);
+
+	IMG_VOID (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+	                                      IMG_BOOL bIsTimerPoll);
+
+	PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	/* Method to drain device HWPerf packets from firmware buffer to host buffer */
+	PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+	PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString);
+
+	PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed);
+
+	PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+	PVRSRV_DEVICE_CONFIG	*psDevConfig;
+
+	/* device post-finalise compatibility check */
+	PVRSRV_ERROR			(*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*,IMG_UINT32 ui32ClientBuildOptions);
+
+	/* information about the device's address space and heaps */
+	DEVICE_MEMORY_INFO		sDevMemoryInfo;
+
+	/* private device information */
+	IMG_VOID				*pvDevice;
+
+	IMG_CHAR				szRAName[50];
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+	RA_ARENA                *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
+#endif
+
+	RA_ARENA				*psLocalDevMemArena;
+
+	/*
+	 * Pointers to the device's physical memory heap(s)
+	 * The first entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be an LMA heap
+	 *  (but the device configuration could specify a UMA heap here, if desired)
+	 * The second entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations
+	 *  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be a UMA heap
+	 *  (but the configuration could specify an LMA heap here, if desired)
+	 * The device configuration will always specify two physical heap IDs - in the event of the device
+	 *  only using one physical heap, both of these IDs will be the same, and hence both pointers below
+	 *  will also be the same
+	 */
+	PHYS_HEAP				*apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+	struct _PVRSRV_DEVICE_NODE_	*psNext;
+	struct _PVRSRV_DEVICE_NODE_	**ppsThis;
+	
+	/* Functions for notification about memory contexts */
+	PVRSRV_ERROR			(*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_	*psDeviceNode,
+														MMU_CONTEXT					*psMMUContext,
+														IMG_HANDLE					*hPrivData);
+	IMG_VOID				(*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData);
+
+	/* Funtions for allocation/freeing of UFOs */
+	AllocUFOBlockCallback	pfnAllocUFOBlock;	/*!< Callback for allocation of a block of UFO memory */
+	FreeUFOBlockCallback	pfnFreeUFOBlock;	/*!< Callback for freeing of a block of UFO memory */
+
+	PSYNC_PRIM_CONTEXT		hSyncPrimContext;
+
+	PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim;
+
+	IMG_HANDLE				hCmdCompNotify;
+	IMG_HANDLE				hDbgReqNotify;
+
+#if defined(PDUMP)
+	/* 	device-level callback which is called when pdump.exe starts.
+	 *	Should be implemented in device-specific init code, e.g. rgxinit.c
+	 */
+	PVRSRV_ERROR			(*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+	/* device-level callback to return pdump ID associated to a memory context */
+	IMG_UINT32				(*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+#endif
+} PVRSRV_DEVICE_NODE;
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful,
+														IMG_UINT32 ui32ClientBuildOptions);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode,
+														IMG_UINT32 ui32ClientBuildOptions);
+
+	
+#endif /* __DEVICE_H__ */
+
+/******************************************************************************
+ End of file (device.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_heapcfg.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_heapcfg.h
new file mode 100644
index 0000000..0dbd40a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_heapcfg.h
@@ -0,0 +1,150 @@
+/**************************************************************************/ /*!
+@File
+@Title          Temporary Device Memory 2 stuff
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEMHEAPCFG_H__
+#define __DEVICEMEMHEAPCFG_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/* FIXME: Find a better way of defining _PVRSRV_DEVICE_NODE_ */
+struct _PVRSRV_DEVICE_NODE_;
+
+/*
+  A "heap config" is a blueprint to be used for initial setting up of
+  heaps when a device memory context is created.
+
+  We define a data structure to define this, but it's really down to
+  the caller to populate it.  This is all expected to be in-kernel.
+  We provide an API that client code can use to enquire about the
+  blueprint, such that it may do the heap setup during the context
+  creation call on behalf of the user */
+
+/* blueprint for a single heap */
+typedef struct _DEVMEM_HEAP_BLUEPRINT_
+{
+    /* Name of this heap - for debug purposes, and perhaps for lookup
+       by name? */
+    const IMG_CHAR *pszName;
+
+    /* Virtual address of the beginning of the heap.  This _must_ be a
+       multiple of the data page size for the heap.  It is
+       _recommended_ that it be coarser than that - especially, it
+       should begin on a boundary appropriate to the MMU for the
+       device.  For Rogue, this is a Page Directory boundary, or 1GB
+       (virtual address a multiple of 0x0040000000). */
+    IMG_DEV_VIRTADDR sHeapBaseAddr;
+
+    /* Length of the heap.  Given that the END address of the heap has
+       a similar restriction to that of the _beginning_ of the heap.
+       That is the heap length _must_ be a whole number of data pages.
+       Again, the recommendation is that it ends on a 1GB boundary.
+       Again, this is not essential, but we do know that (at the time
+       of writing) the current implementation of mmu_common.c is such
+       that no two heaps may share a page directory, thus the
+       remaining virtual space would be wasted if the length were not
+       a multiple of 1GB */
+    IMG_DEVMEM_SIZE_T uiHeapLength;
+
+    /* Data page size.  This is the page size that is going to get
+       programmed into the MMU, so it needs to be a valid one for the
+       device.  Importantly, the start address and length _must_ be
+       multiples of this page size.  Note that the page size is
+       specified as the log 2 relative to 1 byte (e.g. 12 indicates
+       4kB) */
+    IMG_UINT32 uiLog2DataPageSize;
+
+    /* Import alignment.  Force imports to this heap to be
+       aligned to at least this value */
+    IMG_UINT32 uiLog2ImportAlignment;
+} DEVMEM_HEAP_BLUEPRINT;
+
+/* entire named heap config */
+typedef struct _DEVMEM_HEAP_CONFIG_
+{
+    /* Name of this heap config - for debug and maybe lookup */
+    const IMG_CHAR *pszName;
+
+    /* Number of heaps in this config */
+    IMG_UINT32 uiNumHeaps;
+
+    /* Array of individual heap blueprints as defined above */
+    DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray;
+} DEVMEM_HEAP_CONFIG;
+
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigCount(
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 *puiNumHeapConfigsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapCount(
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 *puiNumHeapsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigName(
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapConfigNameBufSz,
+    IMG_CHAR *pszHeapConfigNameOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapDetails(
+    const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+    IMG_UINT32 uiHeapConfigIndex,
+    IMG_UINT32 uiHeapIndex,
+    IMG_UINT32 uiHeapNameBufSz,
+    IMG_CHAR *pszHeapNameOut,
+    IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+    IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+    IMG_UINT32 *puiLog2DataPageSizeOut,
+    IMG_UINT32 *puiLog2ImportAlignmentOut
+);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_history_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_history_server.h
new file mode 100644
index 0000000..bd90516
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_history_server.h
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File			devicemem_history_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Devicemem History functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_HISTORY_SERVER_H_
+#define _DEVICEMEM_HISTORY_SERVER_H_
+
+#include "img_defs.h"
+#include "mm_common.h"
+#include "pvrsrv_error.h"
+
+extern PVRSRV_ERROR
+DevicememHistoryInitKM(IMG_VOID);
+
+extern IMG_VOID
+DevicememHistoryDeInitKM(IMG_VOID);
+
+extern PVRSRV_ERROR
+DevicememHistoryMapKM(IMG_DEV_VIRTADDR sDevVAddr, IMG_SIZE_T uiSize, const char szText[DEVICEMEM_HISTORY_TEXT_BUFSZ]);
+
+extern PVRSRV_ERROR
+DevicememHistoryUnmapKM(IMG_DEV_VIRTADDR sDevVAddr, IMG_SIZE_T uiSize, const char szText[DEVICEMEM_HISTORY_TEXT_BUFSZ]);
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_
+{
+	IMG_PID uiPID;
+	IMG_DEV_VIRTADDR sDevVAddr;
+} DEVICEMEM_HISTORY_QUERY_IN;
+
+/* store up to 2 results for a lookup. in the case of the faulting page being
+ * re-mapped between the page fault occurring on HW and the page fault analysis
+ * being done, the second result entry will show the allocation being unmapped
+ */
+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 2
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_
+{
+	IMG_CHAR szString[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+	IMG_DEV_VIRTADDR sBaseDevVAddr;
+	IMG_SIZE_T uiSize;
+	IMG_BOOL bAllocated;
+	IMG_UINT64 ui64When;
+	IMG_UINT64 ui64Age;
+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT;
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
+{
+	IMG_UINT32 ui32NumResults;
+	/* result 0 is the newest */
+	DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
+} DEVICEMEM_HISTORY_QUERY_OUT;
+
+extern IMG_BOOL
+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_server.h
new file mode 100644
index 0000000..4fa596a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_server.h
@@ -0,0 +1,362 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file for server side component of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEM_SERVER_H__
+#define __DEVICEMEM_SERVER_H__
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pmr.h"
+
+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
+/* FIXME: can we unify RESERVATION and MAPPING to save data structures? */
+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+
+/*
+ * DevmemServerGetImportHandle()
+ *
+ * For given exportable memory descriptor returns PMR handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_HANDLE *phImport);
+
+/*
+ * DevmemServerGetHeapHandle()
+ *
+ * For given reservation returns the Heap handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+						   IMG_HANDLE *phHeap);
+
+/*
+ * DevmemIntCtxCreate()
+ *
+ * Create a Server-side Device Memory Context.  This is usually the
+ * counterpart of the client side memory context, and indeed is
+ * usually created at the same time.
+ *
+ * You must have one of these before creating any heaps.
+ *
+ * All heaps must have been destroyed before calling
+ * DevmemIntCtxDestroy()
+ *
+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising
+ * to later call DevmemIntCtxDestroy()
+ *
+ * Note that this call will cause the device MMU code to do some work
+ * for creating the device memory context, but it does not guarantee
+ * that a page catalogue will have been created, as this may be
+ * deferred until first allocation.
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_CTX object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxCreate(
+                 PVRSRV_DEVICE_NODE *psDeviceNode,
+                 /* devnode / perproc etc */
+
+                 DEVMEMINT_CTX **ppsDevmemCtxPtr,
+                 IMG_HANDLE *hPrivData
+                 );
+/*
+ * DevmemIntCtxDestroy()
+ *
+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxDestroy(
+                  DEVMEMINT_CTX *psDevmemCtx
+                  );
+
+/*
+ * DevmemIntCtxExport()
+ *
+ * Export a device memory context created with DevmemIntCtxCreate to another
+ * process
+ */
+ 
+extern PVRSRV_ERROR
+DevmemIntCtxExport(DEVMEMINT_CTX *psDevmemCtx,
+                   DEVMEMINT_CTX_EXPORT **ppsExport);
+
+/*
+ * DevmemIntCtxUnexport
+ *
+ * Unexport an exported a device memory context.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxUnexport(DEVMEMINT_CTX_EXPORT *psExport);
+
+/*
+ * DevmemIntCtxImport
+ *
+ * Import an exported a device memory context.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxImport(DEVMEMINT_CTX_EXPORT *psExport,
+				   DEVMEMINT_CTX **ppsDevmemCtxPtr,
+				   IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemIntHeapCreate()
+ *
+ * Creates a new heap in this device memory context.  This will cause
+ * a call into the MMU code to allocate various data structures for
+ * managing this heap.  It will not necessarily cause any page tables
+ * to be set up, as this can be deferred until first allocation.
+ * (i.e. we shouldn't care - it's up to the MMU code)
+ *
+ * Note that the data page size must be specified (as log 2).  The
+ * data page size as specified here will be communicated to the mmu
+ * module, and thus may determine the page size configured in page
+ * directory entries for subsequent allocations from this heap.  It is
+ * essential that the page size here is less than or equal to the
+ * "minimum contiguity guarantee" of any PMR that you subsequently
+ * attempt to map to this heap.
+ *
+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are
+ * promising that you shall subsequently call DevmemIntHeapDestroy()
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapCreate(
+                   DEVMEMINT_CTX *psDevmemCtx,
+                   IMG_DEV_VIRTADDR sHeapBaseAddr,
+                   IMG_DEVMEM_SIZE_T uiHeapLength,
+                   IMG_UINT32 uiLog2DataPageSize,
+                   DEVMEMINT_HEAP **ppsDevmemHeapPtr
+                   );
+/*
+ * DevmemIntHeapDestroy()
+ *
+ * Destroys a heap previously created with DevmemIntHeapCreate()
+ *
+ * All allocations from his heap must have been freed before this
+ * call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapDestroy(
+                     DEVMEMINT_HEAP *psDevmemHeap
+                    );
+
+/*
+ * DevmemIntMapPMR()
+ *
+ * Maps the given PMR to the virtual range previously allocated with
+ * DevmemIntReserveRange()
+ *
+ * If appropriate, the PMR must have had its physical backing
+ * committed, as this call will call into the MMU code to set up the
+ * page tables for this allocation, which shall in turn request the
+ * physical addresses from the PMR.  Alternatively, the PMR
+ * implementation can choose to do so off the back of the "lock"
+ * callback, which it will receive as a result (indirectly) of this
+ * call.
+ *
+ * This function makes no promise w.r.t. the circumstances that it can
+ * be called, and these would be "inherited" from the implementation
+ * of the PMR.  For example if the PMR "lock" callback causes pages to
+ * be pinned at that time (which may cause scheduling or disk I/O
+ * etc.) then it would not be legal to "Map" the PMR in a context
+ * where scheduling events are disallowed.
+ *
+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are
+ * promising that you shall later call DevmemIntUnmapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+                DEVMEMINT_RESERVATION *psReservation,
+                PMR *psPMR,
+                PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+                DEVMEMINT_MAPPING **ppsMappingPtr);
+/*
+ * DevmemIntUnmapPMR()
+ *
+ * Reverses the mapping caused by DevmemIntMapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
+
+/*
+ * DevmemIntReserveRange()
+ *
+ * Indicates that the specified range should be reserved from the
+ * given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the
+ * specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds) then you
+ * are promising that you shall later call DevmemIntUnreserveRange()
+ */
+extern PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+                      IMG_DEV_VIRTADDR sAllocationDevVAddr,
+                      IMG_DEVMEM_SIZE_T uiAllocationSize,
+                      DEVMEMINT_RESERVATION **ppsReservationPtr);
+/*
+ * DevmemIntUnreserveRange()
+ *
+ * Undoes the state change caused by DevmemIntReserveRage()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+
+/*
+ * SLCFlushInvalRequest()
+ *
+ * Schedules an SLC Flush & Invalidate on the firmware if required.
+ * If the request is performed depends on the caching attributes
+ * of the allocation and hence depends on the underlying PMR
+ */
+extern PVRSRV_ERROR
+DevmemSLCFlushInvalRequest(PVRSRV_DEVICE_NODE *psDeviceNode, PMR *psPmr);
+
+extern PVRSRV_ERROR
+DevmemIntIsVDevAddrValid(DEVMEMINT_CTX *psDevMemContext,
+                         IMG_DEV_VIRTADDR sDevAddr);
+
+#if defined(PDUMP)
+/*
+ * DevmemIntPDumpSaveToFileVirtual()
+ *
+ * Writes out PDump "SAB" commands with the data found in memory at
+ * the given virtual address.
+ */
+/* FIXME: uiArraySize shouldn't be here, and is an
+   artefact of the bridging */
+extern PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+								IMG_UINT32 ui32FileOffset,
+								IMG_UINT32 ui32PDumpFlags);
+
+extern IMG_UINT32
+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpBitmap(PVRSRV_DEVICE_NODE *psDeviceNode,
+						IMG_CHAR *pszFileName,
+						IMG_UINT32 ui32FileOffset,
+						IMG_UINT32 ui32Width,
+						IMG_UINT32 ui32Height,
+						IMG_UINT32 ui32StrideInBytes,
+						IMG_DEV_VIRTADDR sDevBaseAddr,
+						DEVMEMINT_CTX *psDevMemContext,
+						IMG_UINT32 ui32Size,
+						PDUMP_PIXEL_FORMAT ePixelFormat,
+						IMG_UINT32 ui32AddrMode,
+						IMG_UINT32 ui32PDumpFlags);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+                                IMG_DEV_VIRTADDR sDevAddrStart,
+                                IMG_DEVMEM_SIZE_T uiSize,
+                                IMG_UINT32 uiArraySize,
+                                const IMG_CHAR *pszFilename,
+								IMG_UINT32 ui32FileOffset,
+								IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
+	PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpBitmap(PVRSRV_DEVICE_NODE *psDeviceNode,
+						IMG_CHAR *pszFileName,
+						IMG_UINT32 ui32FileOffset,
+						IMG_UINT32 ui32Width,
+						IMG_UINT32 ui32Height,
+						IMG_UINT32 ui32StrideInBytes,
+						IMG_DEV_VIRTADDR sDevBaseAddr,
+						DEVMEMINT_CTX *psDevMemContext,
+						IMG_UINT32 ui32Size,
+						PDUMP_PIXEL_FORMAT ePixelFormat,
+						IMG_UINT32 ui32AddrMode,
+						IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Width);
+	PVR_UNREFERENCED_PARAMETER(ui32Height);
+	PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+	PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+	PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+	PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif /* ifndef __DEVICEMEM_SERVER_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_server_utils.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_server_utils.h
new file mode 100644
index 0000000..f492040
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/devicemem_server_utils.h
@@ -0,0 +1,191 @@
+/**************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header file utilities that are specific to device memory functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv.h"
+
+static INLINE IMG_UINT32 DevmemCPUCacheMode(PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32CPUCacheMode = ulFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK;
+	IMG_UINT32 ui32Ret;
+
+	PVR_ASSERT(ui32CPUCacheMode == (ulFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK));
+
+	switch (ui32CPUCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+			/* Fall through */
+		case PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT:
+			/*
+				If the allocation needs to be coherent what we end up doing
+				depends on the snooping features of the system
+			*/
+			if (PVRSRVSystemSnoopingOfCPUCache())
+			{
+				/*
+					If the system has CPU cache snooping (tested above)
+					then the allocation should be cached ...
+				*/
+				ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+			}
+			else
+			{
+				/* ... otherwise it should be uncached */
+				ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			}
+			break;
+
+		default:
+			PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode));
+			PVR_ASSERT(0);
+			/*
+				We should never get here, but if we do then setting the mode
+				to uncached is the safest thing to do.
+			*/
+			ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+			break;
+	}
+
+	return ui32Ret;
+}
+
+static INLINE IMG_UINT32 DevmemDeviceCacheMode(PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32DeviceCacheMode = ulFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK;
+	IMG_UINT32 ui32Ret;
+
+	PVR_ASSERT(ui32DeviceCacheMode == (ulFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK));
+
+	switch (ui32DeviceCacheMode)
+	{
+		case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT:
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+			break;
+
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT:
+			/* Fall through */
+		case PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT:
+			/*
+				If the allocation needs to be coherent what we end up doing
+				depends on the snooping features of the system
+			*/
+			if (PVRSRVSystemSnoopingOfDeviceCache())
+			{
+				/*
+					If the system has GPU cache snooping (tested above)
+					then the allocation should be cached ...
+				*/
+				ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+			}
+			else
+			{
+				/* ... otherwise it should be uncached */
+				ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			}
+			break;
+
+		default:
+			PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode));
+			PVR_ASSERT(0);
+			/*
+				We should never get here, but if we do then setting the mode
+				to uncached is the safest thing to do.
+			*/
+			ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+			break;
+	}
+
+	return ui32Ret;
+}
+
+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32CPUCacheMode = ulFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK;
+	IMG_BOOL bRet = IMG_FALSE;
+
+	PVR_ASSERT(ui32CPUCacheMode == (ulFlags & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK));
+
+	if ((ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ||
+		(ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHED_CACHE_COHERENT))
+	{
+		bRet = PVRSRVSystemSnoopingOfDeviceCache();
+	}
+	return bRet;
+}
+
+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+	IMG_UINT32 ui32DeviceCacheMode = ulFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK;
+	IMG_BOOL bRet = IMG_FALSE;
+
+	PVR_ASSERT(ui32DeviceCacheMode == (ulFlags & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK));
+
+	if ((ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) ||
+		(ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHED_CACHE_COHERENT))
+	{
+		bRet = PVRSRVSystemSnoopingOfCPUCache();
+	}
+	return bRet;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/handle.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/handle.h
new file mode 100644
index 0000000..2714c6d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/handle.h
@@ -0,0 +1,220 @@
+/**************************************************************************/ /*!
+@File
+@Title          Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_H__)
+#define __HANDLE_H__
+
+/*
+ * Handle API
+ * ----------
+ * The handle API is intended to provide handles for kernel resources,
+ * which can then be passed back to user space processes.
+ *
+ * The following functions comprise the API.  Each function takes a
+ * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated
+ * for each process, and stored in the per-process data area.  Use
+ * KERNEL_HANDLE_BASE for handles not allocated for a particular process,
+ * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE
+ * structure for the process is available.
+ *
+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * 	PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+ *
+ * Allocate a handle phHandle, for the resource of type eType pointed to by
+ * pvData.
+ *
+ * For handles that have a definite lifetime, where the corresponding
+ * resource is explicitly created and destroyed, eFlag should be zero.
+ *
+ * If the resource is not explicitly created and destroyed, eFlag should be
+ * set to PVRSRV_HANDLE_ALLOC_FLAG_SHARED.  For a given process, the same
+ * handle will be returned each time a handle for the resource is allocated
+ * with the PVRSRV_HANDLE_ALLOC_FLAG_SHARED flag.
+ *
+ * If a particular resource may be referenced multiple times by a
+ * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ * will allow multiple handles to be allocated for the resource.
+ * Such handles cannot be found with PVRSRVFindHandle.
+ *
+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * 	PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+ *
+ * This function is similar to PVRSRVAllocHandle, except that the allocated
+ * handles are associated with a parent handle, hParent, that has been
+ * allocated previously.  Subhandles are automatically deallocated when their
+ * parent handle is deallocated.
+ * Subhandles can be treated as ordinary handles.  For example, they may
+ * have subhandles of their own, and may be explicity deallocated using
+ * PVRSRVReleaseHandle (see below).
+ *
+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Find the handle previously allocated for the resource pointed to by
+ * pvData, of type eType.  Handles allocated with the flag
+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this
+ * function.
+ *
+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Given a handle for a resource of type eType, return the pointer to the
+ * resource.
+ *
+ * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType,
+ * 	IMH_HANDLE hAncestor);
+ *
+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant
+ * of hAncestor.
+ *
+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Deallocate a handle of given type.
+ *
+ * PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+ * 	IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Return the parent of a handle in *phParent, or IMG_NULL if the handle has
+ * no parent.
+ */
+
+#include "img_types.h"
+#include "hash.h"
+
+typedef enum
+{
+	PVRSRV_HANDLE_TYPE_NONE = 0,
+	PVRSRV_HANDLE_TYPE_DEV_NODE,
+	PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+	PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+	PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+	PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+	PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+	PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_RPM_CONTEXT_CLEANUP,
+	PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+	PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+	PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_SERVER_EXPORTCOOKIE,
+	PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+	PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+	PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+	PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+	PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+	PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+	PVRSRV_HANDLE_TYPE_DC_DEVICE,
+	PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+	PVRSRV_HANDLE_TYPE_DC_BUFFER,
+	PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+	PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+	PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+	PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+	PVRSRV_HANDLE_TYPE_RI_HANDLE,
+	PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+	PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+} PVRSRV_HANDLE_TYPE;
+
+typedef enum
+{
+	/* No flags */
+	PVRSRV_HANDLE_ALLOC_FLAG_NONE = 		0,
+	/* Share a handle that already exists for a given data pointer */
+	PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 		0x01,
+	/* Muliple handles can point at the given data pointer */
+	PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 		0x02,
+	/* Subhandles are allocated in a private handle space */
+	PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 		0x04
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+#define	KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData);
+
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
+
+PVRSRV_ERROR PVRSRVHandleInit(void);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(void);
+
+void LockHandle(void);
+void UnlockHandle(void);
+
+
+#endif /* !defined(__HANDLE_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/handle_impl.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/handle_impl.h
new file mode 100644
index 0000000..95043d7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/handle_impl.h
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title          Implementation Callbacks for Handle Manager API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the handle manager API. This file is for declarations 
+                and definitions that are private/internal to the handle manager 
+                API but need to be shared between the generic handle manager 
+                code and the various handle manager backends, i.e. the code that 
+                implements the various callbacks.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_IMPL_H__)
+#define __HANDLE_IMPL_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE;
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData);
+
+typedef struct _HANDLE_IMPL_FUNCTAB_
+{
+	/* Acquire a new handle which is associated with the given data */
+	PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData);
+
+	/* Release the given handle (optionally returning the data associated with it) */
+	PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+	/* Get the data associated with the given handle */
+	PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+	/* Set the data associated with the given handle */
+	PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData);
+
+	PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData);
+
+	/* Enable handle purging on the given handle base */
+	PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase);
+
+	/* Purge handles on the given handle base */
+	PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase);
+
+	/* Create handle base */
+	PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase);
+
+	/* Destroy handle base */
+	PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase);
+} HANDLE_IMPL_FUNCTAB;
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs);
+
+#endif /* !defined(__HANDLE_IMPL_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/lists.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/lists.h
new file mode 100644
index 0000000..a0e7cc4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/lists.h
@@ -0,0 +1,336 @@
+/*************************************************************************/ /*!
+@File
+@Title          Linked list shared functions templates.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Definition of the linked list function templates.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __LISTS_UTILS__
+#define __LISTS_UTILS__
+
+/* instruct QAC to ignore warnings about the following custom formatted macros */
+/* PRQA S 0881,3410 ++ */
+#include <stdarg.h>
+#include "img_types.h"
+#include "device.h"
+#include "power.h"
+
+/*
+ - USAGE -
+
+ The list functions work with any structure that provides the fields psNext and
+ ppsThis. In order to make a function available for a given type, it is required
+ to use the funcion template macro that creates the actual code.
+
+ There are 4 main types of functions:
+ - INSERT	: given a pointer to the head pointer of the list and a pointer to
+ 			  the node, inserts it as the new head.
+ - REMOVE	: given a pointer to a node, removes it from its list.
+ - FOR EACH	: apply a function over all the elements of a list.
+ - ANY		: apply a function over the elements of a list, until one of them
+ 			  return a non null value, and then returns it.
+
+ The two last functions can have a variable argument form, with allows to pass
+ additional parameters to the callback function. In order to do this, the
+ callback function must take two arguments, the first is the current node and
+ the second is a list of variable arguments (va_list).
+
+ The ANY functions have also another for wich specifies the return type of the
+ callback function and the default value returned by the callback function.
+
+*/
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEach
+@Description    Apply a callback function to all the elements of a list.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\
+{\
+	while(psHead)\
+	{\
+		pfnCallBack(psHead);\
+		psHead = psHead->psNext;\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_ForEachSafe
+@Description    Apply a callback function to all the elements of a list. Do it
+                in a safe way that handles the fact that a node might remove itself
+                from the list during the iteration.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \
+IMG_VOID List_##TYPE##_ForEachSafe(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \
+IMG_VOID List_##TYPE##_ForEachSafe(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\
+{\
+	TYPE *psNext;\
+\
+	while(psHead)\
+	{\
+		psNext = psHead->psNext; \
+		pfnCallBack(psHead);\
+		psHead = psNext;\
+	}\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+	va_list ap;\
+	while(psHead)\
+	{\
+		va_start(ap, pfnCallBack);\
+		pfnCallBack(psHead, ap);\
+		psHead = psHead->psNext;\
+		va_end(ap);\
+	}\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Any
+@Description    Applies a callback function to the elements of a list until the function
+                returns a non null value, then returns it.
+@Input          psHead        The head of the list to be processed.
+@Input          pfnCallBack   The function to be applied to each element of the list.
+@Return         The first non null value returned by the callback function.
+*/ /**************************************************************************/
+#define DECLARE_LIST_ANY(TYPE) \
+IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\
+{ \
+	IMG_VOID *pResult;\
+	TYPE *psNextNode;\
+	pResult = IMG_NULL;\
+	psNextNode = psHead;\
+	while(psHead && !pResult)\
+	{\
+		psNextNode = psNextNode->psNext;\
+		pResult = pfnCallBack(psHead);\
+		psHead = psNextNode;\
+	}\
+	return pResult;\
+}
+
+
+/*with variable arguments, that will be passed as a va_list to the callback function*/
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+	va_list ap;\
+	TYPE *psNextNode;\
+	IMG_VOID* pResult = IMG_NULL;\
+	while(psHead && !pResult)\
+	{\
+		psNextNode = psHead->psNext;\
+		va_start(ap, pfnCallBack);\
+		pResult = pfnCallBack(psHead, ap);\
+		va_end(ap);\
+		psHead = psNextNode;\
+	}\
+	return pResult;\
+}
+
+/*those ones are for extra type safety, so there's no need to use castings for the results*/
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+	RTYPE result;\
+	TYPE *psNextNode;\
+	result = CONTINUE;\
+	psNextNode = psHead;\
+	while(psHead && result == CONTINUE)\
+	{\
+		psNextNode = psNextNode->psNext;\
+		result = pfnCallBack(psHead);\
+		psHead = psNextNode;\
+	}\
+	return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+	va_list ap;\
+	TYPE *psNextNode;\
+	RTYPE result = CONTINUE;\
+	while(psHead && result == CONTINUE)\
+	{\
+		psNextNode = psHead->psNext;\
+		va_start(ap, pfnCallBack);\
+		result = pfnCallBack(psHead, ap);\
+		va_end(ap);\
+		psHead = psNextNode;\
+	}\
+	return result;\
+}
+
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Remove
+@Description    Removes a given node from the list.
+@Input          psNode      The pointer to the node to be removed.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REMOVE(TYPE) \
+IMG_VOID List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\
+{\
+	(*psNode->ppsThis)=psNode->psNext;\
+	if(psNode->psNext)\
+	{\
+		psNode->psNext->ppsThis = psNode->ppsThis;\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Insert
+@Description    Inserts a given node at the beginnning of the list.
+@Input          psHead   The pointer to the pointer to the head node.
+@Input          psNode   The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT(TYPE) \
+IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+	psNewNode->ppsThis = ppsHead;\
+	psNewNode->psNext = *ppsHead;\
+	*ppsHead = psNewNode;\
+	if(psNewNode->psNext)\
+	{\
+		psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+	}\
+}
+
+/*************************************************************************/ /*!
+@Function       List_##TYPE##_Reverse
+@Description    Reverse a list in place
+@Input          ppsHead    The pointer to the pointer to the head node.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REVERSE(TYPE) \
+IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+    TYPE *psTmpNode1; \
+    TYPE *psTmpNode2; \
+    TYPE *psCurNode; \
+	psTmpNode1 = IMG_NULL; \
+	psCurNode = *ppsHead; \
+	while(psCurNode) { \
+    	psTmpNode2 = psCurNode->psNext; \
+        psCurNode->psNext = psTmpNode1; \
+		psTmpNode1 = psCurNode; \
+		psCurNode = psTmpNode2; \
+		if(psCurNode) \
+		{ \
+			psTmpNode1->ppsThis = &(psCurNode->psNext); \
+		} \
+		else \
+		{ \
+			psTmpNode1->ppsThis = ppsHead;		\
+		} \
+	} \
+	*ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL)
+
+
+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
+DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_INSERT(PVRSRV_POWER_DEV);
+DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
+IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
+
+#endif
+
+/* re-enable warnings */
+/* PRQA S 0881,3410 -- */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/mmu_common.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/mmu_common.h
new file mode 100644
index 0000000..e382eaf
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/mmu_common.h
@@ -0,0 +1,587 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef MMU_COMMON_H
+#define MMU_COMMON_H
+
+/*
+	The Memory Management Unit (MMU) performs device virtual to physical translation.
+
+	Terminology:
+	 - page catalogue, PC	(optional, 3 tier MMU)
+	 - page directory, PD
+	 - page table, PT (can be variable sized)
+	 - data page, DP (can be variable sized)
+    Note: PD and PC are fixed size and can't be larger than 
+           the native physical (CPU) page size
+	Shifts and AlignShift variables:
+	 - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0 
+	 - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units 
+	 	by applying a bit shift left by 'xxxAlignShift' bits
+*/
+
+/*
+	Device Virtual Address Config:
+
+	Incoming Device Virtual Address is deconstructed into up to 4
+	fields, where the virtual address is up to 64bits:
+	MSB-----------------------------------------------LSB
+	| PC Index:   | PD Index:  | PT Index: | DP offset: |
+	| d bits      | c bits     | b-v bits  |  a+v bits  |
+	-----------------------------------------------------
+	where v is the variable page table modifier, e.g.
+			v == 0 -> 4KB DP
+			v == 2 -> 16KB DP
+			v == 4 -> 64KB DP
+			v == 6 -> 256KB DP
+			v == 8 -> 1MB DP
+			v == 10 -> 4MB DP
+*/
+
+/* services/server/include/ */
+#include "pmr.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+/*!
+	The level of the MMU
+*/
+typedef enum
+{
+	MMU_LEVEL_0 = 0,	/* Level 0 = Page */
+
+	MMU_LEVEL_1,
+	MMU_LEVEL_2,
+	MMU_LEVEL_3,
+	MMU_LEVEL_LAST
+} MMU_LEVEL;
+
+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */
+#include "pdump_mmu.h"
+
+#define MMU_MAX_LEVEL 3
+
+struct _MMU_DEVVADDR_CONFIG_;
+
+/*!
+	MMU device attributes. This structure is the interface between the generic
+	MMU code and the device specific MMU code.
+*/
+typedef struct _MMU_DEVICEATTRIBS_
+{
+	PDUMP_MMU_TYPE eMMUType;
+
+	/*! The type of the top level object */
+	MMU_LEVEL eTopLevel;
+
+	/*! Alignment requirement of the base object */
+	IMG_UINT32 ui32BaseAlign;
+
+	/*! HW config of the base object */
+	struct _MMU_PxE_CONFIG_ *psBaseConfig;
+
+	/*! Address split for the base object */
+	const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig;
+
+	/*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32, IMG_UINT8);
+	/*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32);
+	/*! Callback for creating protection bits for the page directory entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32, IMG_UINT8);
+	/*! Callback for creating protection bits for the page directory entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32);
+	/*! Callback for creating protection bits for the page table entry with 8 byte entry */
+	IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32, IMG_UINT8);
+	/*! Callback for creating protection bits for the page table entry with 4 byte entry */
+	IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32);
+
+	/*! Callback for getting the MMU configuration based on the specified page size */
+	PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize,
+												const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig,
+												const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig,
+												const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig,
+												IMG_HANDLE *phPriv2);
+	/*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */
+	PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv);
+
+	/*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */
+	PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *);
+	/*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */
+	PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *);
+
+	/*! Private data handle */
+	IMG_HANDLE hGetPageSizeFnPriv;
+} MMU_DEVICEATTRIBS;
+
+/*!
+	MMU virtual address split
+*/
+typedef struct _MMU_DEVVADDR_CONFIG_
+{
+	/*! Page catalogue index mask */
+	IMG_UINT64	uiPCIndexMask;
+	/*! Page catalogue index shift */
+	IMG_UINT8	uiPCIndexShift;
+	/*! Page directory mask */
+	IMG_UINT64	uiPDIndexMask;
+	/*! Page directory shift */
+	IMG_UINT8	uiPDIndexShift;
+	/*! Page table mask */
+	IMG_UINT64	uiPTIndexMask;
+	/*! Page index shift */
+	IMG_UINT8	uiPTIndexShift;
+	/*! Page offset mask */
+	IMG_UINT64	uiPageOffsetMask;
+	/*! Page offset shift */
+	IMG_UINT8	uiPageOffsetShift;
+} MMU_DEVVADDR_CONFIG;
+
+/*
+	P(C/D/T) Entry Config:
+
+	MSB-----------------------------------------------LSB
+	| PT Addr:   | variable PT ctrl | protection flags: |
+	| bits c+v   | b bits           | a bits            |
+	-----------------------------------------------------
+	where v is the variable page table modifier and is optional
+*/
+/*!
+	Generic MMU page * entry description. This is used to describe PC, PD and PT
+*/
+typedef struct _MMU_PxE_CONFIG_
+{
+	/*! Size of an entry in bytes */
+	IMG_UINT8	uiBytesPerEntry;
+
+	/*! Physical address mask */
+	IMG_UINT64	 uiAddrMask;
+	/*! Physical address shift */
+	IMG_UINT8	 uiAddrShift;
+	/*! Log 2 alignment */
+	IMG_UINT8	 uiLog2Align;
+
+	/*! Variable control mask */
+	IMG_UINT64	 uiVarCtrlMask;
+	/*! Variable control shift */
+	IMG_UINT8	 uiVarCtrlShift;
+
+	/*! Protection flags mask */
+	IMG_UINT64	 uiProtMask;
+	/*! Protection flags shift */
+	IMG_UINT8	 uiProtShift;
+
+	/*! Entry valid bit mask */
+	IMG_UINT64   uiValidEnMask;
+	/*! Entry valid bit shift */
+	IMG_UINT8    uiValidEnShift;
+} MMU_PxE_CONFIG;
+
+/* MMU Protection flags */
+
+
+/* These are specified generically and in a h/w independent way, and
+   are interpreted at each level (PC/PD/PT) separately. */
+
+/* The following flags are for internal use only, and should not
+   traverse the API */
+#define MMU_PROTFLAGS_INVALID 0x80000000U
+
+typedef IMG_UINT32 MMU_PROTFLAGS_T;
+
+/* The following flags should be supplied by the caller: */
+#define MMU_PROTFLAGS_READABLE	   				(1U<<0)
+#define MMU_PROTFLAGS_WRITEABLE		   		    (1U<<1)
+#define MMU_PROTFLAGS_CACHE_COHERENT			(1U<<2)
+#define MMU_PROTFLAGS_CACHED					(1U<<3)
+
+/* Device specific flags*/
+#define MMU_PROTFLAGS_DEVICE_OFFSET		16
+#define MMU_PROTFLAGS_DEVICE_MASK		0x000f0000UL
+#define MMU_PROTFLAGS_DEVICE(n)	\
+			(((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \
+			MMU_PROTFLAGS_DEVICE_MASK)
+
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+struct _PVRSRV_DEVICE_NODE_; 
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextCreate
+
+@Description    Create a new MMU context
+
+@Input          psDevNode               Device node of the device to create the
+                                        MMU context for
+
+@Output         ppsMMUContext           The created MMU context
+
+@Return         PVRSRV_OK if the MMU context was successfully created
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_ContextCreate (struct _PVRSRV_DEVICE_NODE_ *psDevNode, MMU_CONTEXT **ppsMMUContext);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_ContextDestroy
+
+@Description    Destroy a MMU context
+
+@Input          ppsMMUContext           MMU context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+extern IMG_VOID
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function       MMU_Alloc
+
+@Description    Allocate the page tables required for the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          uSize                   The size of the allocation
+
+@Output         puActualSize            Actual size of allocation
+
+@Input          uiProtFlags             Generic MMU protection flags
+
+@Input          uDevVAddrAlignment      Alignment requirement of the virtual
+                                        allocation
+
+@Input          psDevVAddr              Virtual address to start the allocation
+                                        from
+
+@Return         PVRSRV_OK if the allocation of the page tables was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+           IMG_DEVMEM_SIZE_T uSize,
+           IMG_DEVMEM_SIZE_T *puActualSize,
+           IMG_UINT32 uiProtFlags,
+           IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+           IMG_DEV_VIRTADDR *psDevVAddr,
+           IMG_UINT8 uiLog2PageSize);
+
+
+/*************************************************************************/ /*!
+@Function       MMU_Free
+
+@Description    Free the page tables of the specified virtual range
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Virtual address to start the free
+                                        from
+
+@Input          uSize                   The size of the allocation
+
+@Return         None
+*/
+/*****************************************************************************/
+extern IMG_VOID
+MMU_Free (MMU_CONTEXT *psMMUContext,
+          IMG_DEV_VIRTADDR sDevVAddr,
+          IMG_DEVMEM_SIZE_T uiSize,
+          IMG_UINT8 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_UnmapPages
+
+@Description    Unmap pages from the MMU.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          psDevVAddr              Device virtual address of the 1st page
+
+@Input          ui32PageCount           Number of pages to unmap
+
+@Return         None
+*/
+/*****************************************************************************/
+extern IMG_VOID
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+                IMG_DEV_VIRTADDR sDevVAddr,
+                IMG_UINT32 ui32PageCount,
+                IMG_UINT8 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_MapPMR
+
+@Description    Map a PMR into the MMU.
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          sDevVAddr               Device virtual address to map the PMR
+                                        into
+
+@Input          psPMR                   PMR to map
+
+@Input          uiSizeBytes             Size in bytes to map
+
+@Input          uiMappingFlags          Memalloc flags for the mapping
+
+@Return         PVRSRV_OK if the PMR was successfully mapped
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPMR (MMU_CONTEXT *psMMUContext,
+            IMG_DEV_VIRTADDR sDevVAddr,
+            const PMR *psPMR,
+            IMG_DEVMEM_SIZE_T uiSizeBytes,
+            PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+            IMG_UINT8 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquireBaseAddr
+
+@Description    Acquire the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Output         psPhysAddr              Device physical address of the base level
+                                        MMU object
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleaseBaseAddr
+
+@Description    Release the device physical address of the base level MMU object
+
+@Input          psMMUContext            MMU context to operate on
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+IMG_VOID
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/***********************************************************************************/ /*!
+@Function       MMU_SetOSid
+
+@Description    Set the OSid associated with the application (and the MMU Context)
+
+@Input          psMMUContext            MMU context to store the OSid on
+
+@Input          ui32OSid                the OSid in question
+
+@Input			ui32OSidReg				The value that the firmware will assign to the
+										registers.
+@Return None
+*/
+/***********************************************************************************/
+
+IMG_VOID MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg);
+
+/***********************************************************************************/ /*!
+@Function       MMU_GetOSid
+
+@Description    Retrieve the OSid associated with the MMU context.
+
+@Input          psMMUContext            MMU context to store the OSid on
+
+@Output			ui32OSid                the OSid in question
+
+@Output			ui32OSidReg				The OSid that the firmware will assign to the
+										registers
+@Return None
+*/
+/***********************************************************************************/
+
+IMG_VOID MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, IMG_UINT32 * pui32OSidReg);
+#endif
+
+/*************************************************************************/ /*!
+@Function       MMU_SetDeviceData
+
+@Description    Set the device specific callback data
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          hDevData                Device data
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData);
+
+/*************************************************************************/ /*!
+@Function       MMU_CheckFaultAddress
+
+@Description    Check the specified MMU context to see if the provided address
+                should be valid
+
+@Input          psMMUContext            MMU context to store the data on
+
+@Input          psDevVAddr              Address to check
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, IMG_DEV_VIRTADDR *psDevVAddr);
+
+/*************************************************************************/ /*!
+@Function       MMUI_IsVDevAddrValid
+@Description    Checks if given address is valid.
+@Input          psMMUContext MMU context to store the data on
+@Input          uiLog2PageSize page size
+@Input          psDevVAddr Address to check
+@Return         IMG_TRUE of address is valid
+*/ /**************************************************************************/
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+                             IMG_UINT32 uiLog2PageSize,
+                             IMG_DEV_VIRTADDR sDevVAddr);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       MMU_ContextDerivePCPDumpSymAddr
+
+@Description    Derives a PDump Symbolic address for the top level MMU object
+
+@Input          psMMUContext                    MMU context to operate on
+
+@Input          pszPDumpSymbolicNameBuffer      Buffer to write the PDump symbolic
+                                                address to
+
+@Input          uiPDumpSymbolicNameBufferSize   Size of the buffer
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+                                                    IMG_CHAR *pszPDumpSymbolicNameBuffer,
+                                                    IMG_SIZE_T uiPDumpSymbolicNameBufferSize);
+
+/*************************************************************************/ /*!
+@Function       MMU_PDumpWritePageCatBase
+
+@Description    PDump write of the top level MMU object to a device register
+
+@Input          psMMUContext        MMU context to operate on
+
+@Input          pszSpaceName		PDump name of the mem/reg space
+
+@Input          uiOffset			Offset to write the address to
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+        								const IMG_CHAR *pszSpaceName,
+        								IMG_DEVMEM_OFFSET_T uiOffset,
+        								IMG_UINT32 ui32WordSize,
+        								IMG_UINT32 ui32AlignShift,
+        								IMG_UINT32 ui32Shift,
+        								PDUMP_FLAGS_T uiPdumpFlags);
+
+/*************************************************************************/ /*!
+@Function       MMU_AcquirePDumpMMUContext
+
+@Description    Acquire a reference to the PDump MMU context for this MMU
+                context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          pszRegSpaceName         PDump name of the register space
+
+@Output         pui32PDumpMMUContextID  PDump MMU context ID
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32PDumpMMUContextID);
+
+/*************************************************************************/ /*!
+@Function       MMU_ReleasePDumpMMUContext
+
+@Description    Release a reference to the PDump MMU context for this MMU context
+
+@Input          psMMUContext            MMU context to operate on
+
+@Input          pszRegSpaceName         PDump name of the register space
+
+@Output         pui32PDumpMMUContextID  PDump MMU context ID
+
+@Return         PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(MMU_PDumpWritePageCatBase)
+#endif
+static INLINE IMG_VOID
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+        						const IMG_CHAR *pszSpaceName,
+        						IMG_DEVMEM_OFFSET_T uiOffset,
+        						IMG_UINT32 ui32WordSize,
+        						IMG_UINT32 ui32AlignShift,
+        						IMG_UINT32 ui32Shift,
+        						PDUMP_FLAGS_T uiPdumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMMUContext);
+	PVR_UNREFERENCED_PARAMETER(pszSpaceName);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32WordSize);
+	PVR_UNREFERENCED_PARAMETER(ui32AlignShift);
+	PVR_UNREFERENCED_PARAMETER(ui32Shift);
+	PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+}
+#endif /* PDUMP */
+
+
+#endif /* #ifdef MMU_COMMON_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osauth.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osauth.h
new file mode 100644
index 0000000..c0d9191
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osauth.h
@@ -0,0 +1,54 @@
+/**************************************************************************/ /*!
+@File
+@Title          OS Authentication header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the interface between the OS and the bridge to
+                authenticate a function called from the client
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __OSAUTH_H__
+#define __OSAUTH_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+PVRSRV_ERROR OSCheckAuthentication(CONNECTION_DATA *psConnectionData, IMG_UINT32 ui32Level);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osconnection_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osconnection_server.h
new file mode 100644
index 0000000..dbd2f66
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osconnection_server.h
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side connection management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    API for OS specific callbacks from server side connection
+                management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _OSCONNECTION_SERVER_H_
+#define _OSCONNECTION_SERVER_H_
+
+#include "handle.h"
+
+
+#if defined(__linux__) || defined(__QNXNTO__)
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, IMG_PVOID pvOSData);
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+#else	/* defined(__linux__) */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataInit)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, IMG_PVOID pvOSData)
+{
+	PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+	PVR_UNREFERENCED_PARAMETER(pvOSData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataDeInit)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+	PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+	PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+	return PVRSRV_OK;
+}
+#endif	/* defined(__linux__) */
+
+
+#endif /* _OSCONNECTION_SERVER_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osfunc.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osfunc.h
new file mode 100644
index 0000000..1918a59
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/osfunc.h
@@ -0,0 +1,443 @@
+/**************************************************************************/ /*!
+@File
+@Title          OS functions header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS specific API definitions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG		1
+#endif
+
+#ifndef __OSFUNC_H__
+#define __OSFUNC_H__
+
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "device.h"
+
+/******************************************************************************
+ * Static defines
+ *****************************************************************************/
+#define KERNEL_ID			0xffffffffL
+#define ISR_ID				0xfffffffdL
+
+IMG_UINT64 OSClockns64(IMG_VOID);
+IMG_UINT64 OSClockus64(IMG_VOID);
+IMG_UINT32 OSClockus(IMG_VOID);
+IMG_UINT32 OSClockms(IMG_VOID);
+
+IMG_SIZE_T OSGetPageSize(IMG_VOID);
+IMG_SIZE_T OSGetPageShift(IMG_VOID);
+IMG_SIZE_T OSGetPageMask(IMG_VOID);
+
+typedef IMG_VOID (*PFN_THREAD)(IMG_PVOID pvData);
+
+PVRSRV_ERROR OSInstallDeviceLISR(PVRSRV_DEVICE_CONFIG *psDevConfig,
+								 IMG_HANDLE *hLISRData,
+								 PFN_LISR pfnLISR,
+								 IMG_VOID *hData);
+PVRSRV_ERROR OSUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData,
+						   PFN_MISR pfnMISR,
+						   IMG_VOID *hData);
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData);
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData);
+
+
+
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreate
+@Description    Creates a kernel thread and starts it running. The caller
+                is responsible for informing the thread that it must finish
+                and return from the pfnThread function. It is not possible
+                to kill or terminate it.The new thread runs with the default
+                priority provided by the Operating System.
+@Output         phThread       Returned handle to the thread.
+@Input          pszThreadName  Name to assign to the thread.
+@Input          pfnThread      Thread entry point function.
+@Input          hData          Thread specific data pointer for pfnThread().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+							IMG_CHAR *pszThreadName,
+							PFN_THREAD pfnThread,
+							IMG_VOID *hData);
+
+/*! Available priority levels for the creation of a new Kernel Thread. */
+typedef enum priority_levels
+{
+	HIGHEST_PRIORITY = 0,
+	HIGH_PRIRIOTY,
+	NORMAL_PRIORITY,
+	LOW_PRIORITY,
+	LOWEST_PRIORITY,
+	NOSET_PRIORITY,   /* With this option the priority level is is the default for the given OS */
+	LAST_PRIORITY     /* This must be always the last entry */
+} OS_THREAD_LEVEL;
+
+/*************************************************************************/ /*!
+@Function       OSThreadCreatePriority
+@Description    As OSThreadCreate, this function creates a kernel thread and
+                starts it running. The difference is that with this function
+                is possible to specify the priority used to schedule the new
+                thread.
+
+@Output         phThread        Returned handle to the thread.
+@Input          pszThreadName   Name to assign to the thread.
+@Input          pfnThread       Thread entry point function.
+@Input          hData           Thread specific data pointer for pfnThread().
+@Input          eThreadPriority Priority level to assign to the new thread.
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+                                    IMG_CHAR *pszThreadName,
+                                    PFN_THREAD pfnThread,
+                                    IMG_VOID *hData,
+                                    OS_THREAD_LEVEL eThreadPriority);
+
+/*************************************************************************/ /*!
+@Function       OSThreadDestroy
+@Description    Waits for the thread to end and then destroys the thread
+                handle memory. This function will block and wait for the
+                thread to finish successfully, thereby providing a sync point
+                for the thread completing its work. No attempt is made to kill
+                or otherwise terminate the thread.
+@Input          phThread  The thread handle returned by OSThreadCreate().
+@Return         Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+
+IMG_VOID OSMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
+#define OSCachedMemCopy OSMemCopy
+#define OSDeviceMemCopy OSMemCopy
+IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
+IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
+
+
+IMG_VOID OSCPUOperation(PVRSRV_CACHE_OP eCacheOp);
+
+IMG_VOID OSFlushCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+								IMG_PVOID pvVirtEnd,
+								IMG_CPU_PHYADDR sCPUPhysStart,
+								IMG_CPU_PHYADDR sCPUPhysEnd);
+
+
+IMG_VOID OSCleanCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+								IMG_PVOID pvVirtEnd,
+								IMG_CPU_PHYADDR sCPUPhysStart,
+								IMG_CPU_PHYADDR sCPUPhysEnd);
+
+IMG_VOID OSInvalidateCPUCacheRangeKM(IMG_PVOID pvVirtStart,
+									 IMG_PVOID pvVirtEnd,
+									 IMG_CPU_PHYADDR sCPUPhysStart,
+									 IMG_CPU_PHYADDR sCPUPhysEnd);
+
+
+IMG_PID OSGetCurrentProcessID(IMG_VOID);
+IMG_CHAR *OSGetCurrentProcessName(IMG_VOID);
+IMG_UINTPTR_T OSGetCurrentThreadID(IMG_VOID);
+IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
+#define OSCachedMemSet OSMemSet
+#define OSDeviceMemSet OSMemSet
+IMG_INT OSMemCmp(IMG_VOID *pvBufA, IMG_VOID *pvBufB, IMG_SIZE_T uiLen);
+
+PVRSRV_ERROR OSMMUPxAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_SIZE_T uiSize,
+							Px_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_VOID OSMMUPxFree(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle);
+
+PVRSRV_ERROR OSMMUPxMap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle,
+						IMG_SIZE_T uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+						IMG_VOID **pvPtr);
+
+IMG_VOID OSMMUPxUnmap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle, IMG_VOID *pvPtr);
+
+
+PVRSRV_ERROR OSInitEnvData(IMG_VOID);
+IMG_VOID OSDeInitEnvData(IMG_VOID);
+
+IMG_CHAR* OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, IMG_SIZE_T uSize);
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4);
+IMG_SIZE_T OSStringLength(const IMG_CHAR *pStr);
+IMG_SIZE_T OSStringNLength(const IMG_CHAR *pStr, IMG_SIZE_T uiCount);
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2);
+
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+								 IMG_HANDLE *EventObject);
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject);
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject);
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT32 uiTimeoutMs);
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM);
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT32 uiTimeoutMs);
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+											IMG_HANDLE *phOSEvent);
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM);
+
+/* Avoid macros so we don't evaluate pszSrc twice */
+static INLINE IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
+{
+	IMG_CHAR  *result; /* Helps with klocwork, will be optimised away */
+
+	result = OSStringNCopy(pszDest, pszSrc, OSStringLength(pszSrc)+1);
+#if defined(__KLOCWORK__)
+	pszDest[OSStringLength(pszSrc)] = '\0';
+#endif
+	return result;
+}
+
+/*!
+******************************************************************************
+
+ @Function OSWaitus
+ 
+ @Description 
+    This function implements a busy wait of the specified microseconds
+    This function does NOT release thread quanta
+ 
+ @Input ui32Timeus - (us)
+
+ @Return IMG_VOID
+
+******************************************************************************/ 
+IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
+
+
+/*!
+******************************************************************************
+
+ @Function OSSleepms
+ 
+ @Description 
+    This function implements a sleep of the specified milliseconds
+    This function may allow pre-emption if implemented
+ 
+ @Input ui32Timems - (ms)
+
+ @Return IMG_VOID
+
+******************************************************************************/ 
+IMG_VOID OSSleepms(IMG_UINT32 ui32Timems);
+
+IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
+
+IMG_UINT8 OSReadHWReg8(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+IMG_UINT16 OSReadHWReg16(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+IMG_UINT32 OSReadHWReg32(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+IMG_UINT64 OSReadHWReg64(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+IMG_UINT64 OSReadHWRegBank(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 *pui8DstBuf, IMG_UINT64 ui64DstBufLen);
+
+IMG_VOID OSWriteHWReg8(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui32Value);
+IMG_VOID OSWriteHWReg16(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui32Value);
+IMG_VOID OSWriteHWReg32(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+IMG_VOID OSWriteHWReg64(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
+IMG_UINT64 OSWriteHWRegBank(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 *pui8SrcBuf, IMG_UINT64 ui64rcBufLen);
+
+typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer);
+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer);
+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer);
+
+
+/******************************************************************************
+
+ @Function		OSPanic
+
+ @Description	Take action in response to an unrecoverable driver error
+
+ @Input    IMG_VOID
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+IMG_VOID OSPanic(IMG_VOID);
+
+IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
+
+typedef enum _img_verify_test
+{
+	PVR_VERIFY_WRITE = 0,
+	PVR_VERIFY_READ
+} IMG_VERIFY_TEST;
+
+IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T ui32Bytes);
+
+PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
+PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
+
+#if defined (__linux__)
+#define OSBridgeCopyFromUser OSCopyFromUser
+#define OSBridgeCopyToUser OSCopyToUser
+#else
+PVRSRV_ERROR OSBridgeCopyFromUser (IMG_PVOID pvProcess,
+						IMG_PVOID pvDest,
+						const IMG_PVOID pvSrc,
+						IMG_SIZE_T ui32Bytes);
+PVRSRV_ERROR OSBridgeCopyToUser (IMG_PVOID pvProcess,
+						IMG_VOID *pvDest,
+						const IMG_VOID *pvSrc,
+						IMG_SIZE_T ui32Bytes);
+#endif
+
+PVRSRV_ERROR OSGetGlobalBridgeBuffers (IMG_VOID **ppvBridgeInBuffer,
+							IMG_UINT32 *pui32BridgeInBufferSize,
+							IMG_VOID **ppvBridgeOutBuffer,
+							IMG_UINT32 *pui32BridgeOutBufferSize);
+
+
+IMG_VOID OSWriteMemoryBarrier(IMG_VOID);
+IMG_VOID OSMemoryBarrier(IMG_VOID);
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+/* Provide LockDep friendly definitions for Services RW locks */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "allocmem.h"
+
+typedef struct rw_semaphore *POSWR_LOCK;
+
+#define OSWRLockCreate(ppsLock) ({ \
+	PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+	*(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \
+	if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \
+	e;})
+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;})
+
+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;})
+
+#elif defined(LINUX) || defined(__QNXNTO__)
+/* User-mode unit tests use these definitions on Linux */
+
+typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+
+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock);
+IMG_VOID OSWRLockDestroy(POSWR_LOCK psLock);
+IMG_VOID OSWRLockAcquireRead(POSWR_LOCK psLock);
+IMG_VOID OSWRLockReleaseRead(POSWR_LOCK psLock);
+IMG_VOID OSWRLockAcquireWrite(POSWR_LOCK psLock);
+IMG_VOID OSWRLockReleaseWrite(POSWR_LOCK psLock);
+
+#else
+struct _OSWR_LOCK_ {
+	IMG_UINT32 ui32Dummy;
+};
+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock)
+{
+	PVR_UNREFERENCED_PARAMETER(ppsLock);
+	return PVRSRV_OK;
+}
+
+static INLINE IMG_VOID OSWRLockDestroy(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+static INLINE IMG_VOID OSWRLockAcquireRead(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+static INLINE IMG_VOID OSWRLockReleaseRead(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+static INLINE IMG_VOID OSWRLockAcquireWrite(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+static INLINE IMG_VOID OSWRLockReleaseWrite(POSWR_LOCK psLock)
+{
+	PVR_UNREFERENCED_PARAMETER(psLock);
+}
+#endif
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+IMG_VOID OSDumpStack(IMG_VOID);
+
+IMG_VOID OSAcquireBridgeLock(IMG_VOID);
+IMG_VOID OSReleaseBridgeLock(IMG_VOID);
+
+
+/*
+ *  Functions for providing support for PID statistics.
+ */
+typedef void (OS_STATS_PRINTF_FUNC)(IMG_PVOID pvFilePtr, const IMG_CHAR *pszFormat, ...);
+ 
+typedef void (OS_STATS_PRINT_FUNC)(IMG_PVOID pvFilePtr,
+								   IMG_PVOID pvStatPtr,
+                                   OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+typedef IMG_UINT32 (OS_INC_STATS_MEM_REFCOUNT_FUNC)(IMG_PVOID pvStatPtr);
+typedef IMG_UINT32 (OS_DEC_STATS_MEM_REFCOUNT_FUNC)(IMG_PVOID pvStatPtr);
+IMG_PVOID OSCreateStatisticEntry(IMG_CHAR* pszName, IMG_PVOID pvFolder,
+                                 OS_STATS_PRINT_FUNC* pfnStatsPrint,
+							 	 OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+							 	 OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+                                 IMG_PVOID pvData);
+IMG_VOID OSRemoveStatisticEntry(IMG_PVOID pvEntry);
+IMG_PVOID OSCreateStatisticFolder(IMG_CHAR *pszName, IMG_PVOID pvFolder);
+IMG_VOID OSRemoveStatisticFolder(IMG_PVOID pvFolder);
+
+
+#endif /* __OSFUNC_H__ */
+
+/******************************************************************************
+ End of file (osfunc.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/ossecure_export.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/ossecure_export.h
new file mode 100644
index 0000000..c1c43e1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/ossecure_export.h
@@ -0,0 +1,52 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+PVRSRV_ERROR OSSecureExport(CONNECTION_DATA *psConnection,
+							IMG_PVOID pvData,
+							IMG_SECURE_TYPE *phSecure,
+							CONNECTION_DATA **ppsSecureConnection);
+							
+PVRSRV_ERROR OSSecureImport(IMG_SECURE_TYPE hSecure, IMG_PVOID *ppvData);
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_km.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_km.h
new file mode 100644
index 0000000..a93e369
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_km.h
@@ -0,0 +1,722 @@
+/*************************************************************************/ /*!
+@File
+@Title          pdump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for pdump functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PDUMP_KM_H_
+#define _PDUMP_KM_H_
+
+/* services/srvkm/include/ */
+#include "device.h"
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "services.h"
+
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "connection_server.h"
+#include "sync_server.h"
+/*
+ *	Pull in pdump flags from services include
+ */
+#include "pdump.h"
+#include "pdumpdefs.h"
+
+/* Define this to enable the PDUMP_HERE trace in the server */
+#undef PDUMP_TRACE
+
+#if defined(PDUMP_TRACE)
+#define PDUMP_HERE(a)	if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a)))
+#define PDUMP_HEREA(a)	PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a)))
+#else
+#define PDUMP_HERE(a)	(void)(a);
+#define PDUMP_HEREA(a)	(void)(a);
+#endif
+
+#define PDUMP_PD_UNIQUETAG	(IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG	(IMG_HANDLE)0
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+extern IMG_UINT32 g_ui32EveryLineCounter;
+#endif
+
+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA;
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(IMG_PVOID *pvData, IMG_BOOL bInto, IMG_BOOL bContinuous);
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_type, _id, _msg) do \
+		{ PVRSRV_ERROR _eE;\
+			_eE = PDumpPanic((PVRSRV_DEVICE_TYPE_ ## _type)<<16 | ((_type ## _PDUMP_PANIC_ ## _id)&0xFFFF), _msg, __FUNCTION__, __LINE__);\
+			PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\
+		MSC_SUPPRESS_4127\
+		} while (0)
+
+#ifdef PDUMP
+	/* Shared across pdump_x files */
+	PVRSRV_ERROR PDumpInitCommon(IMG_VOID);
+	IMG_VOID PDumpDeInitCommon(IMG_VOID);
+	IMG_BOOL PDumpReady(IMG_VOID);
+	IMG_VOID PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+									IMG_SIZE_T *puiZeroPageSize,
+									const IMG_CHAR **ppszZeroPageFilename);
+
+	IMG_VOID PDumpConnectionNotify(IMG_VOID);
+
+	PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID);
+	PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_MODULE_ID eModuleID);
+	PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Frame);
+	PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32* pui32Frame);
+	PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+	                                           IMG_UINT32 ui32Start,
+	                                           IMG_UINT32 ui32End,
+	                                           IMG_UINT32 ui32Interval,
+	                                           IMG_UINT32 ui32MaxParamFileSize);
+
+
+	PVRSRV_ERROR PDumpReg32(IMG_CHAR	*pszPDumpRegName,
+							IMG_UINT32	ui32RegAddr,
+							IMG_UINT32	ui32RegValue,
+							IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpReg64(IMG_CHAR	*pszPDumpRegName,
+							IMG_UINT32	ui32RegAddr,
+							IMG_UINT64	ui64RegValue,
+							IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpLDW(IMG_CHAR      *pcBuffer,
+	                      IMG_CHAR      *pszDevSpaceName,
+	                      IMG_UINT32    ui32OffsetBytes,
+	                      IMG_UINT32    ui32NumLoadBytes,
+	                      PDUMP_FLAGS_T uiPDumpFlags);
+
+	PVRSRV_ERROR PDumpSAW(IMG_CHAR      *pszDevSpaceName,
+	                      IMG_UINT32    ui32HPOffsetBytes,
+	                      IMG_UINT32    ui32NumSaveBytes,
+	                      IMG_CHAR      *pszOutfileName,
+	                      IMG_UINT32    ui32OutfileOffsetByte,
+	                      PDUMP_FLAGS_T uiPDumpFlags);
+
+	PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR				*pszPDumpRegName,
+							   IMG_UINT32			ui32RegAddr,
+							   IMG_UINT32			ui32RegValue,
+							   IMG_UINT32			ui32Mask,
+							   IMG_UINT32			ui32Flags,
+							   PDUMP_POLL_OPERATOR	eOperator);
+
+	IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+										  IMG_CHAR *pszFileName,
+										  IMG_UINT32 ui32FileOffset,
+										  IMG_UINT32 ui32Width,
+										  IMG_UINT32 ui32Height,
+										  IMG_UINT32 ui32StrideInBytes,
+										  IMG_DEV_VIRTADDR sDevBaseAddr,
+										  IMG_UINT32 ui32MMUContextID,
+										  IMG_UINT32 ui32Size,
+										  PDUMP_PIXEL_FORMAT ePixelFormat,
+										  IMG_UINT32 ui32AddrMode,
+										  IMG_UINT32 ui32PDumpFlags);
+
+	IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName,
+										   IMG_CHAR *pszFileName,
+										   IMG_UINT32 ui32FileOffset,
+										   IMG_UINT32 ui32Address,
+										   IMG_UINT32 ui32Size,
+										   IMG_UINT32 ui32PDumpFlags);
+
+	PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2);
+	PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32	ui32Flags,
+									   IMG_CHAR*	pszFormat,
+									   ...) IMG_FORMAT_PRINTF(2, 3);
+
+	PVRSRV_ERROR PDumpPanic(IMG_UINT32      ui32PanicNo,
+							IMG_CHAR*       pszPanicMsg,
+							const IMG_CHAR* pszPPFunc,
+							IMG_UINT32      ui32PPline);
+
+	PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib,
+							IMG_UINT32	ui32Reg,
+							IMG_UINT32	ui32dwData,
+							IMG_HANDLE	hUniqueTag);
+	PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+									 IMG_UINT32		ui32Reg,
+									 IMG_UINT32		ui32Data,
+									 IMG_UINT32		ui32Flags,
+									 IMG_HANDLE		hUniqueTag);
+
+	IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
+
+	PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing);
+
+	IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_IDENTIFIER	*psDevID,
+								  IMG_UINT64			ui64DevVAddr,
+								  IMG_PUINT32			pui32PhysPages,
+								  IMG_UINT32			ui32NumPages,
+								  IMG_HANDLE			hUniqueTag);
+	PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
+									IMG_CHAR *pszMemSpace,
+									IMG_UINT32 *pui32MMUContextID,
+									IMG_UINT32 ui32MMUType,
+									IMG_HANDLE hUniqueTag1,
+									IMG_HANDLE hOSMemHandle,
+									IMG_VOID *pvPDCPUAddr);
+	PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
+									IMG_CHAR *pszMemSpace,
+									IMG_UINT32 ui32MMUContextID,
+									IMG_UINT32 ui32MMUType);
+
+	PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+								const IMG_UINT32 dwRegOffset,
+								IMG_UINT32	ui32Flags);
+	PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+								const IMG_UINT32 dwRegOffset,
+								IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
+	PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
+
+	IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(PVRSRV_DEVICE_IDENTIFIER *psDevId,
+										IMG_CHAR			*pszFileName,
+										IMG_UINT32			ui32FileOffset,
+										IMG_DEV_VIRTADDR	sDevBaseAddr,
+										IMG_UINT32 			ui32Size,
+										IMG_UINT32			ui32MMUContextID,
+										IMG_UINT32 			ui32PDumpFlags);
+
+	PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR		*pszPDumpRegName,
+								  IMG_UINT32	ui32RegOffset,
+								  IMG_UINT32	ui32WPosVal,
+								  IMG_UINT32	ui32PacketSize,
+								  IMG_UINT32	ui32BufferSize,
+								  IMG_UINT32	ui32Flags);
+
+	PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+	                      IMG_UINT32 ui32MMUCtxID,
+	                      IMG_UINT32 ui32RegionID,
+	                      IMG_BOOL bEnable,
+	                      IMG_UINT64 ui64VAddr,
+	                      IMG_UINT64 ui64LenBytes,
+	                      IMG_UINT32 ui32XStride,
+	                      IMG_UINT32 ui32Flags);
+
+	PVRSRV_ERROR PDumpCreateLockKM(IMG_VOID);
+	IMG_VOID PDumpDestroyLockKM(IMG_VOID);
+	IMG_VOID PDumpLockKM(IMG_VOID);
+	IMG_VOID PDumpUnlockKM(IMG_VOID);
+
+	/*
+	    Process persistence common API for use by common
+	    clients e.g. mmu and physmem.
+	 */
+	IMG_BOOL PDumpIsPersistent(IMG_VOID);
+	PVRSRV_ERROR PDumpAddPersistantProcess(IMG_VOID);
+
+	PVRSRV_ERROR PDumpIfKM(IMG_CHAR		*pszPDumpCond);
+	PVRSRV_ERROR PDumpElseKM(IMG_CHAR	*pszPDumpCond);
+	PVRSRV_ERROR PDumpFiKM(IMG_CHAR		*pszPDumpCond);
+
+	IMG_VOID PDumpPowerTransitionStart(IMG_VOID);
+	IMG_VOID PDumpPowerTransitionEnd(IMG_VOID);
+	IMG_BOOL PDumpInPowerTransition(IMG_VOID);
+	IMG_BOOL PDumpIsDumpSuspended(IMG_VOID);
+
+	/*!
+	 * @name	PDumpWriteParameter
+	 * @brief	General function for writing to PDump stream. Used
+	 *          mainly for memory dumps to parameter stream.
+	 * 			Usually more convenient to use PDumpWriteScript below
+	 * 			for the script stream.
+	 * @param	psui8Data - data to write
+	 * @param	ui32Size - size of write
+	 * @param	ui32Flags - PDump flags
+	 * @param   pui32FileOffset - on return contains the file offset to
+	 *                            the start of the parameter data
+	 * @param   aszFilenameStr - pointer to at least a 20 char buffer to
+	 *                           return the parameter filename
+	 * @return	error
+	 */
+	PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size,
+			IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset,
+			IMG_CHAR* aszFilenameStr);
+
+	/*!
+	 * @name	PDumpWriteScript
+	 * @brief	Write an PDumpOS created string to the "script" output stream
+	 * @param	hString - PDump OS layer handle of string buffer to write
+	 * @param	ui32Flags - PDump flags
+	 * @return	IMG_TRUE on success.
+	 */
+	IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags);
+
+    /*
+      PDumpWriteShiftedMaskedValue():
+
+      loads the "reference" address into an internal PDump register,
+      optionally shifts it right,
+      optionally shifts it left,
+      optionally masks it
+      then finally writes the computed value to the given destination address
+
+      i.e. it emits pdump language equivalent to this expression:
+
+      dest = ((&ref) >> SHRamount << SHLamount) & MASK
+    */
+extern PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+                             const IMG_CHAR *pszDestSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiDestOffset,
+                             const IMG_CHAR *pszRefRegspaceName,
+                             const IMG_CHAR *pszRefSymbolicName,
+                             IMG_DEVMEM_OFFSET_T uiRefOffset,
+                             IMG_UINT32 uiSHRAmount,
+                             IMG_UINT32 uiSHLAmount,
+                             IMG_UINT32 uiMask,
+                             IMG_DEVMEM_SIZE_T uiWordSize,
+                             IMG_UINT32 uiPDumpFlags);
+
+    /*
+      PDumpWriteSymbAddress():
+
+      writes the address of the "reference" to the offset given
+    */
+extern PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+                      IMG_DEVMEM_OFFSET_T uiDestOffset,
+                      const IMG_CHAR *pszRefSymbolicName,
+                      IMG_DEVMEM_OFFSET_T uiRefOffset,
+                      const IMG_CHAR *pszPDumpDevName,
+                      IMG_UINT32 ui32WordSize,
+                      IMG_UINT32 ui32AlignShift,
+                      IMG_UINT32 ui32Shift,
+                      IMG_UINT32 uiPDumpFlags);
+
+/* Register the connection with the PDump subsystem */
+extern PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+											PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
+
+/* Unregister the connection with the PDump subsystem */
+extern IMG_VOID PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+
+/* Register for notification of PDump Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+													 PFN_PDUMP_TRANSITION pfnCallback,
+													 IMG_PVOID hPrivData,
+													 IMG_PVOID *ppvHandle);
+
+/* Unregister notification of PDump Transition */
+extern IMG_VOID PDumpUnregisterTransitionCallback(IMG_PVOID pvHandle);
+
+/* Notify PDump of a Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_BOOL bContinuous);
+   
+	#define PDUMP_LOCK				PDumpLockKM
+	#define PDUMP_UNLOCK			PDumpUnlockKM
+
+	#define PDUMPINIT				PDumpInitCommon
+	#define PDUMPDEINIT				PDumpDeInitCommon
+	#define PDUMPREG32				PDumpReg32
+	#define PDUMPREG64				PDumpReg64
+	#define PDUMPREGREAD32			PDumpRegRead32
+	#define PDUMPREGREAD64			PDumpRegRead64
+	#define PDUMPCOMMENT			PDumpComment
+	#define PDUMPCOMMENTWITHFLAGS	PDumpCommentWithFlags
+	#define PDUMPREGPOL				PDumpRegPolKM
+	#define PDUMPSETMMUCONTEXT		PDumpSetMMUContext
+	#define PDUMPCLEARMMUCONTEXT	PDumpClearMMUContext
+	#define PDUMPPDREG				PDumpPDReg
+	#define PDUMPPDREGWITHFLAGS		PDumpPDRegWithFlags
+	#define PDUMPREGBASEDCBP		PDumpRegBasedCBP
+	#define PDUMPENDINITPHASE		PDumpStopInitPhaseKM
+	#define PDUMPIDLWITHFLAGS		PDumpIDLWithFlags
+	#define PDUMPIDL				PDumpIDL
+	#define PDUMPPOWCMDSTART		PDumpPowerTransitionStart
+	#define PDUMPPOWCMDEND			PDumpPowerTransitionEnd
+	#define PDUMPPOWCMDINTRANS		PDumpInPowerTransition
+	#define PDUMPIF					PDumpIfKM
+	#define PDUMPELSE				PDumpElseKM
+	#define PDUMPFI					PDumpFiKM
+#else
+	/*
+		We should be clearer about which functions can be called
+		across the bridge as this looks rather unblanced
+	*/
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpInitCommon)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpInitCommon(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpConnectionNotify)
+#endif
+static INLINE IMG_VOID
+PDumpConnectionNotify(IMG_VOID)
+{
+	return;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCreateLockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCreateLockKM(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDestroyLockKM)
+#endif
+static INLINE IMG_VOID
+PDumpDestroyLockKM(IMG_VOID)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpLockKM)
+#endif
+static INLINE IMG_VOID
+PDumpLockKM(IMG_VOID)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnlockKM)
+#endif
+static INLINE IMG_VOID
+PDumpUnlockKM(IMG_VOID)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpAddPersistantProcess)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpAddPersistantProcess(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStartInitPhaseKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpStartInitPhaseKM(IMG_VOID)
+{
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStopInitPhaseKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpStopInitPhaseKM(IMG_MODULE_ID eModuleID)
+{
+	PVR_UNREFERENCED_PARAMETER(eModuleID);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32 ui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(ui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetFrameKM(CONNECTION_DATA *psConnection, IMG_UINT32* pui32Frame)
+{
+	PVR_UNREFERENCED_PARAMETER(psConnection);
+	PVR_UNREFERENCED_PARAMETER(pui32Frame);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+	PVR_UNREFERENCED_PARAMETER(pszComment);
+	PVR_UNREFERENCED_PARAMETER(ui32Flags);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+                              IMG_UINT32 ui32Start,
+                              IMG_UINT32 ui32End,
+                              IMG_UINT32 ui32Interval,
+                              IMG_UINT32 ui32MaxParamFileSize)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32Mode);
+	PVR_UNREFERENCED_PARAMETER(ui32Start);
+	PVR_UNREFERENCED_PARAMETER(ui32End);
+	PVR_UNREFERENCED_PARAMETER(ui32Interval);
+	PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPanic)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPanic(IMG_UINT32      ui32PanicNo,
+		   IMG_CHAR*       pszPanicMsg,
+		   const IMG_CHAR* pszPPFunc,
+		   IMG_UINT32      ui32PPline)
+{
+	PVR_UNREFERENCED_PARAMETER(ui32PanicNo);
+	PVR_UNREFERENCED_PARAMETER(pszPanicMsg);
+	PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+	PVR_UNREFERENCED_PARAMETER(ui32PPline);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsLastCaptureFrameKM)
+#endif
+static INLINE IMG_BOOL
+PDumpIsLastCaptureFrameKM(IMG_VOID)
+{
+	return IMG_FALSE;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+	*bIsCapturing = IMG_FALSE;
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpBitmapKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+										  IMG_CHAR *pszFileName,
+										  IMG_UINT32 ui32FileOffset,
+										  IMG_UINT32 ui32Width,
+										  IMG_UINT32 ui32Height,
+										  IMG_UINT32 ui32StrideInBytes,
+										  IMG_DEV_VIRTADDR sDevBaseAddr,
+										  IMG_UINT32 ui32MMUContextID,
+										  IMG_UINT32 ui32Size,
+										  PDUMP_PIXEL_FORMAT ePixelFormat,
+										  IMG_UINT32 ui32AddrMode,
+										  IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+	PVR_UNREFERENCED_PARAMETER(pszFileName);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Width);
+	PVR_UNREFERENCED_PARAMETER(ui32Height);
+	PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+	PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+	PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+	PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+	PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterConnection)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+						PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncConnectionData);
+	PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterConnection)
+#endif
+static INLINE
+IMG_VOID PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+											  PFN_PDUMP_TRANSITION pfnCallback,
+											  IMG_PVOID hPrivData,
+											  IMG_PVOID *ppvHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+	PVR_UNREFERENCED_PARAMETER(pfnCallback);
+	PVR_UNREFERENCED_PARAMETER(hPrivData);
+	PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallback)
+#endif
+static INLINE
+IMG_VOID PDumpUnregisterTransitionCallback(IMG_PVOID pvHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpTransition)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_BOOL bContinuous)
+{
+	PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+	PVR_UNREFERENCED_PARAMETER(bInto);
+	PVR_UNREFERENCED_PARAMETER(bContinuous);
+	return PVRSRV_OK;
+}
+
+	#if defined WIN32
+		#define PDUMPINIT			PDumpInitCommon
+		#define PDUMPDEINIT(...)		/ ## * PDUMPDEINIT(__VA_ARGS__) * ## /
+		#define PDUMPREG32(...)			/ ## * PDUMPREG32(__VA_ARGS__) * ## /
+		#define PDUMPREG64(...)			/ ## * PDUMPREG64(__VA_ARGS__) * ## /
+		#define PDUMPREGREAD32(...)			/ ## * PDUMPREGREAD32(__VA_ARGS__) * ## /
+		#define PDUMPREGREAD64(...)			/ ## * PDUMPREGREAD64(__VA_ARGS__) * ## /
+		#define PDUMPCOMMENT(...)		/ ## * PDUMPCOMMENT(__VA_ARGS__) * ## /
+		#define PDUMPREGPOL(...)		/ ## * PDUMPREGPOL(__VA_ARGS__) * ## /
+		#define PDUMPSETMMUCONTEXT(...)		/ ## * PDUMPSETMMUCONTEXT(__VA_ARGS__) * ## /
+		#define PDUMPCLEARMMUCONTEXT(...)	/ ## * PDUMPCLEARMMUCONTEXT(__VA_ARGS__) * ## /
+		#define PDUMPPDREG(...)			/ ## * PDUMPPDREG(__VA_ARGS__) * ## /
+		#define PDUMPPDREGWITHFLAGS(...)	/ ## * PDUMPPDREGWITHFLAGS(__VA_ARGS__) * ## /
+		#define PDUMPSYNC(...)			/ ## * PDUMPSYNC(__VA_ARGS__) * ## /
+		#define PDUMPCOPYTOMEM(...)		/ ## * PDUMPCOPYTOMEM(__VA_ARGS__) * ## /
+		#define PDUMPWRITE(...)			/ ## * PDUMPWRITE(__VA_ARGS__) * ## /
+		#define PDUMPCBP(...)			/ ## * PDUMPCBP(__VA_ARGS__) * ## /
+		#define	PDUMPREGBASEDCBP(...)		/ ## * PDUMPREGBASEDCBP(__VA_ARGS__) * ## /
+		#define PDUMPCOMMENTWITHFLAGS(...)	/ ## * PDUMPCOMMENTWITHFLAGS(__VA_ARGS__) * ## /
+		#define PDUMPMALLOCPAGESPHYS(...)	/ ## * PDUMPMALLOCPAGESPHYS(__VA_ARGS__) * ## /
+		#define PDUMPENDINITPHASE(...)		/ ## * PDUMPENDINITPHASE(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXREG(...)		/ ## * PDUMPMSVDXREG(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXREGWRITE(...)		/ ## * PDUMPMSVDXREGWRITE(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXREGREAD(...)		/ ## * PDUMPMSVDXREGREAD(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXPOLEQ(...)		/ ## * PDUMPMSVDXPOLEQ(__VA_ARGS__) * ## /
+		#define PDUMPMSVDXPOL(...)		/ ## * PDUMPMSVDXPOL(__VA_ARGS__) * ## /
+		#define PDUMPIDLWITHFLAGS(...)		/ ## * PDUMPIDLWITHFLAGS(__VA_ARGS__) * ## /
+		#define PDUMPIDL(...)			/ ## * PDUMPIDL(__VA_ARGS__) * ## /
+		#define PDUMPPOWCMDSTART(...)		/ ## * PDUMPPOWCMDSTART(__VA_ARGS__) * ## /
+		#define PDUMPPOWCMDEND(...)		/ ## * PDUMPPOWCMDEND(__VA_ARGS__) * ## /
+		#define PDUMP_LOCK			/ ## * PDUMP_LOCK(__VA_ARGS__) * ## /
+		#define PDUMP_UNLOCK			/ ## * PDUMP_UNLOCK(__VA_ARGS__) * ## /
+	#else
+		#if defined LINUX || defined GCC_IA32 || defined GCC_ARM || defined __QNXNTO__
+			#define PDUMPINIT	PDumpInitCommon
+			#define PDUMPDEINIT(args...)
+			#define PDUMPREG32(args...)
+			#define PDUMPREG64(args...)
+			#define PDUMPREGREAD32(args...)
+			#define PDUMPREGREAD64(args...)
+			#define PDUMPCOMMENT(args...)
+			#define PDUMPREGPOL(args...)
+			#define PDUMPSETMMUCONTEXT(args...)
+			#define PDUMPCLEARMMUCONTEXT(args...)
+			#define PDUMPPDREG(args...)
+			#define PDUMPPDREGWITHFLAGS(args...)
+			#define PDUMPSYNC(args...)
+			#define PDUMPCOPYTOMEM(args...)
+			#define PDUMPWRITE(args...)
+			#define PDUMPREGBASEDCBP(args...)
+			#define PDUMPCOMMENTWITHFLAGS(args...)
+			#define PDUMPENDINITPHASE(args...)
+			#define PDUMPIDLWITHFLAGS(args...)
+			#define PDUMPIDL(args...)
+			#define PDUMPPOWCMDSTART(args...)
+			#define PDUMPPOWCMDEND(args...)
+			#define PDUMP_LOCK(args...)
+			#define PDUMP_UNLOCK(args...)
+
+		#else
+			#error Compiler not specified
+		#endif
+	#endif
+#endif
+
+
+#endif /* _PDUMP_KM_H_ */
+
+/******************************************************************************
+ End of file (pdump_km.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_mmu.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_mmu.h
new file mode 100644
index 0000000..477c27f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_mmu.h
@@ -0,0 +1,175 @@
+/**************************************************************************/ /*!
+@File
+@Title          Common MMU Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_MMU_H
+#define SRVKM_PDUMP_MMU_H
+
+/* services/server/include/ */
+#include "pdump_symbolicaddr.h"
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+
+/*
+	PDUMP MMU attributes
+*/
+typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
+{
+    /* Per-Device Pdump attribs */
+
+	/*!< Pdump memory bank name */
+	IMG_CHAR				*pszPDumpMemDevName;
+
+	/*!< Pdump register bank name */
+	IMG_CHAR				*pszPDumpRegDevName;
+
+} PDUMP_MMU_ATTRIB_DEVICE;
+
+typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
+{
+	IMG_UINT32 ui32Dummy;
+} PDUMP_MMU_ATTRIB_CONTEXT;
+
+typedef struct _PDUMP_MMU_ATTRIB_HEAP_
+{
+	/* data page info */
+	IMG_UINT32 ui32DataPageMask;
+} PDUMP_MMU_ATTRIB_HEAP;
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+    /* FIXME: would these be better as pointers rather than copies? */
+    struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
+    struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
+    struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
+} PDUMP_MMU_ATTRIB;
+
+#if defined(PDUMP)
+    extern PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR			*pszPDumpDevName,
+                                       MMU_LEVEL				eMMULevel,
+                                       IMG_DEV_PHYADDR			*psDevPAddr,
+                                       IMG_UINT32				ui32Size,
+                                       IMG_UINT32				ui32Align);
+
+    extern PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR				*pszPDumpDevName,
+                                     MMU_LEVEL					eMMULevel,
+                                     IMG_DEV_PHYADDR			*psDevPAddr);
+
+    extern PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR			*pszPDumpDevName,
+                                        const IMG_CHAR			*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                                        const IMG_CHAR 			*pszSymbolicAddr,
+                                        IMG_UINT32				ui32Size,
+                                        IMG_UINT32				ui32Align);
+
+    extern PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR			*pszPDumpDevName,
+                                      const IMG_CHAR			*pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+                                      const IMG_CHAR 			*pszSymbolicAddr);
+
+    extern PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+    								   const IMG_CHAR *pszPDumpDevName,
+                                       IMG_VOID *pvPxMem,
+                                       IMG_DEV_PHYADDR sPxDevPAddr,
+                                       IMG_UINT32 uiFirstEntry,
+                                       IMG_UINT32 uiNumEntries,
+                                       const IMG_CHAR *pszMemspaceName,
+                                       const IMG_CHAR *pszSymbolicAddr,
+                                       IMG_UINT64 uiSymbolicAddrOffset,
+                                       IMG_UINT32 uiBytesPerEntry,
+                                       IMG_UINT32 uiLog2Align,
+                                       IMG_UINT32 uiAddrShift,
+                                       IMG_UINT64 uiAddrMask,
+                                       IMG_UINT64 uiPxEProtMask,
+                                       IMG_UINT32 ui32Flags);
+
+    extern PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                                IMG_DEV_PHYADDR sPCDevPAddr,
+                                                PDUMP_MMU_TYPE eMMUType,
+                                                IMG_UINT32 *pui32MMUContextID);
+
+    extern PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+                                               IMG_UINT32 ui32MMUContextID);
+
+	extern PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+												const IMG_CHAR *pszPDumpRegName,
+												IMG_UINT32 uiRegAddr,
+												const IMG_CHAR *pszPDumpPCSymbolicName);
+
+	/* FIXME: split to separate file... (debatable whether this is anything to do with MMU) */
+extern PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+               IMG_UINT32 uiPDumpMMUCtx,
+               IMG_DEV_VIRTADDR sDevAddrStart,
+               IMG_DEVMEM_SIZE_T uiSize,
+               const IMG_CHAR *pszFilename,
+               IMG_UINT32 uiFileOffset,
+			   IMG_UINT32 ui32PDumpFlags);
+
+	#define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, aszSymbolicAddr, ui32Size, ui32Align) \
+        PDumpMMUMalloc2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr, ui32Size, ui32Align)
+    #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, aszSymbolicAddr) \
+        PDumpMMUFree2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr)
+
+    #define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+        PDumpMMUAllocMMUContext(pszPDumpMemDevName,                     \
+                                sPCDevPAddr,                            \
+                                eMMUType,								\
+                                puiPDumpCtxID)
+
+    #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+        PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID)
+#else
+
+	#define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, pszDevPAddr, ui32Size, ui32Align) \
+        ((IMG_VOID)0)
+    #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, psDevPAddr) \
+        ((IMG_VOID)0)
+    #define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, puiPDumpCtxID) \
+        ((IMG_VOID)0)
+    #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+        ((IMG_VOID)0)
+
+#endif // defined(PDUMP)
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_osfunc.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_osfunc.h
new file mode 100644
index 0000000..bd99121
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_osfunc.h
@@ -0,0 +1,352 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	OS-independent interface to helper functions for pdump
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device_types.h"
+
+
+/* FIXME
+ * Some OSes (WinXP,CE) allocate the string on the stack, but some
+ * (Linux) use a global variable/lock instead.
+ * Would be good to use the same across all OSes.
+ *
+ * A handle is returned which represents IMG_CHAR* type on all OSes.
+ *
+ * The allocated buffer length is also returned on OSes where it's
+ * supported (e.g. Linux).
+ */
+#define MAX_PDUMP_STRING_LENGTH (256)
+#if defined(WIN32)
+#define PDUMP_GET_SCRIPT_STRING()	\
+	IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING()		\
+	IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH];			\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_FILE_STRING()		\
+	IMG_CHAR	pszFileName[MAX_PDUMP_STRING_LENGTH];	\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()		\
+	IMG_CHAR 	pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_CHAR	pszFileName[MAX_PDUMP_STRING_LENGTH];	\
+	IMG_UINT32	ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_UINT32	ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#else	/* WIN32 */
+
+#if defined(__QNXNTO__)
+
+#define PDUMP_GET_SCRIPT_STRING()	\
+	IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING()		\
+	IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH];			\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_FILE_STRING()		\
+	IMG_CHAR	pszFileName[MAX_PDUMP_STRING_LENGTH];	\
+	IMG_UINT32	ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()		\
+	IMG_CHAR 	pszScript[MAX_PDUMP_STRING_LENGTH];		\
+	IMG_CHAR	pszFileName[MAX_PDUMP_STRING_LENGTH];	\
+	IMG_UINT32	ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_UINT32	ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1;	\
+	IMG_HANDLE	hScript = (IMG_HANDLE)pszScript;
+
+#else  /* __QNXNTO__ */
+
+	/*
+	 * Linux
+	 */
+#define PDUMP_GET_SCRIPT_STRING()				\
+	IMG_HANDLE hScript;							\
+	IMG_UINT32	ui32MaxLen;						\
+	PVRSRV_ERROR eErrorPDump;						\
+	eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");
+
+#define PDUMP_GET_MSG_STRING()					\
+	IMG_CHAR *pszMsg;							\
+	IMG_UINT32	ui32MaxLen;						\
+	PVRSRV_ERROR eErrorPDump;						\
+	eErrorPDump = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetMessageString");
+
+#define PDUMP_GET_FILE_STRING()				\
+	IMG_CHAR *pszFileName;					\
+	IMG_UINT32	ui32MaxLen;					\
+	PVRSRV_ERROR eErrorPDump;				\
+	eErrorPDump = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetFilenameString");
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING()		\
+	IMG_HANDLE hScript;							\
+	IMG_CHAR *pszFileName;						\
+	IMG_UINT32	ui32MaxLenScript;				\
+	IMG_UINT32	ui32MaxLenFileName;				\
+	PVRSRV_ERROR eErrorPDump;						\
+	eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");\
+	eErrorPDump = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
+	PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetFilenameString");
+
+	/*!
+	 * @name	PDumpOSGetScriptString
+	 * @brief	Get the "script" buffer
+	 * @param	phScript - buffer handle for pdump script
+	 * @param	pui32MaxLen - max length of the script buffer
+	 * 			FIXME: the max length should be internal to the OS-specific code
+	 * @return	error (always PVRSRV_OK on some OSes)
+	 */
+	PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
+
+	/*!
+	 * @name	PDumpOSGetMessageString
+	 * @brief	Get the "message" buffer
+	 * @param	pszMsg - buffer pointer for pdump messages
+	 * @param	pui32MaxLen - max length of the message buffer
+	 * 			FIXME: the max length should be internal to the OS-specific code
+	 * @return	error (always PVRSRV_OK on some OSes)
+	 */
+	PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen);
+
+	/*!
+	 * @name	PDumpOSGetFilenameString
+	 * @brief	Get the "filename" buffer
+	 * @param	ppszFile - buffer pointer for filename
+	 * @param	pui32MaxLen - max length of the filename buffer
+	 * 			FIXME: the max length should be internal to the OS-specific code
+	 * @return	error (always PVRSRV_OK on some OSes)
+	 */
+	PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
+
+#endif /* __QNXNTO__ */
+#endif /* WIN32 */
+
+
+/*
+ * PDump streams, channels, init and deinit routines (common to all OSes)
+ */
+
+typedef struct
+{
+	IMG_HANDLE hInit;        /*!< Driver initialisation PDump stream */
+	IMG_HANDLE hMain;        /*!< App framed PDump stream */
+	IMG_HANDLE hDeinit;      /*!< Driver/HW de-initialisation PDump stream */
+} PDUMP_CHANNEL;
+
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript,
+		IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment);
+
+IMG_VOID PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript);
+
+/*!
+ * @name	PDumpOSSetSplitMarker
+ * @brief	Inform the PDump client to start a new file at the given marker.
+ * @param	hStream - stream
+ * @param   ui32Marker - byte file position
+ */
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker);
+
+/*
+	PDumpOSDebugDriverWrite - ENV layer write entry point from COMMON layer
+	                          A call back down the PDump software layer
+ */
+IMG_UINT32 PDumpOSDebugDriverWrite(IMG_HANDLE psStream,
+                                   IMG_UINT8 *pui8Data,
+                                   IMG_UINT32 ui32BCount);
+
+/*
+ * Define macro for processing variable args list in OS-independent
+ * manner. See e.g. PDumpComment().
+ */
+#define PDUMP_va_list	va_list
+#define PDUMP_va_start	va_start
+#define PDUMP_va_end	va_end
+
+
+/*!
+ * @name	PDumpOSBufprintf
+ * @brief	Printf to OS-specific pdump state buffer
+ * @param	hBuf - buffer handle to write into
+ * @param	ui32ScriptSizeMax - maximum size of data to write (not supported on all OSes)
+ * @param	pszFormat - format string
+ */
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(3, 4);
+
+/*!
+ * @name	PDumpOSDebugPrintf
+ * @brief	Debug message during pdumping
+ * @param	pszFormat - format string
+ */
+IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2);
+
+/*
+ * Write into a IMG_CHAR* on all OSes. Can be allocated on the stack or heap.
+ */
+/*!
+ * @name	PDumpOSSprintf
+ * @brief	Printf to IMG char array
+ * @param	pszComment - char array to print into
+ * @param	pszFormat - format string
+ */
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4);
+
+/*!
+ * @name	PDumpOSVSprintf
+ * @brief	Printf to IMG string using variable args (see stdarg.h). This is necessary
+ * 			because the ... notation does not support nested function calls.
+ * @param	pszMsg - char array to print into
+ * @param	ui32ScriptSizeMax - maximum size of data to write (not supported on all OSes)
+ * @param	pszFormat - format string
+ * @param	vaArgs - variable args structure (from stdarg.h)
+ */
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) IMG_FORMAT_PRINTF(3, 0);
+
+/*!
+ * @name	PDumpOSBuflen
+ * @param	hBuffer - handle to buffer
+ * @param	ui32BuffeRSizeMax - max size of buffer (chars)
+ * @return	length of buffer, will always be <= ui32BufferSizeMax
+ */
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/*!
+ * @name	PDumpOSVerifyLineEnding
+ * @brief	Put line ending sequence at the end if it isn't already there
+ * @param	hBuffer - handle to buffer
+ * @param	ui32BufferSizeMax - max size of buffer (chars)
+ */
+IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/*!
+ * @name	PDumpOSCPUVAddrToDevPAddr
+ * @brief	OS function to convert CPU virtual to device physical for dumping pages
+ * @param	hOSMemHandle	mem allocation handle (used if kernel virtual mem space is limited, e.g. linux)
+ * @param	ui32Offset		dword offset into allocation (for use with mem handle, e.g. linux)
+ * @param	pui8LinAddr		CPU linear addr (usually a kernel virtual address)
+ * @param	ui32PageSize	page size, used for assertion check
+ * @return	psDevPAddr		device physical addr
+ */
+IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
+        IMG_HANDLE hOSMemHandle,
+		IMG_UINT32 ui32Offset,
+		IMG_UINT8 *pui8LinAddr,
+		IMG_UINT32 ui32PageSize,
+		IMG_DEV_PHYADDR *psDevPAddr);
+
+/*!
+ * @name	PDumpOSCPUVAddrToPhysPages
+ * @brief	OS function to convert CPU virtual to backing physical pages
+ * @param	hOSMemHandle	mem allocation handle (used if kernel virtual mem space is limited, e.g. linux)
+ * @param	ui32Offset		offset within mem allocation block
+ * @param	pui8LinAddr		CPU linear addr
+ * @param	ui32DataPageMask	mask for data page (= data page size -1)
+ * @return	pui32PageOffset	CPU page offset (same as device page offset if page sizes equal)
+ */
+IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
+		IMG_UINT32 ui32Offset,
+		IMG_PUINT8 pui8LinAddr,
+		IMG_UINT32 ui32DataPageMask,
+		IMG_UINT32 *pui32PageOffset);
+
+/*!
+ * @name	PDumpOSReleaseExecution
+ * @brief	OS function to switch to another process, to clear pdump buffers
+ */
+IMG_VOID PDumpOSReleaseExecution(IMG_VOID);
+
+/*!
+ * @name	PDumpOSCreateLock
+ * @brief	Create the global pdump lock
+ */
+PVRSRV_ERROR PDumpOSCreateLock(IMG_VOID);
+
+/*!
+ * @name	PDumpOSDestroyLock
+ * @brief	Destroy the global pdump lock
+ */
+IMG_VOID PDumpOSDestroyLock(IMG_VOID);
+
+/*!
+ * @name	PDumpOSLock
+ * @brief	Acquire the global pdump lock
+ */
+IMG_VOID PDumpOSLock(IMG_VOID);
+
+/*!
+ * @name	PDumpOSUnlock
+ * @brief	Release the global pdump lock
+ */
+IMG_VOID PDumpOSUnlock(IMG_VOID);
+
+/*!
+ * @name	PDumpOSGetCtrlState
+ * @brief	Retrieve some state from the debug driver or debug driver stream
+ */
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream, IMG_UINT32 ui32StateID);
+
+/*!
+ * @name	PDumpOSSetFrame
+ * @brief	Set the current frame value mirrored in the debug driver
+ */
+IMG_VOID PDumpOSSetFrame(IMG_UINT32 ui32Frame);
+
+/*!
+ * @name	PDumpOSAllowInitPhaseToComplete
+ * @brief	Some platforms wish to control when the init phase is marked as
+ *          complete depending on who is instructing it so.
+ */
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_UINT32 eModuleID);
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_physmem.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_physmem.h
new file mode 100644
index 0000000..c75202b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_physmem.h
@@ -0,0 +1,212 @@
+/**************************************************************************/ /*!
+@File
+@Title          pdump functions to assist with physmem allocations
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements basic low level control of MMU.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PDUMP_PHYSMEM_H
+#define SRVSRV_PDUMP_PHYSMEM_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+
+
+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T;
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PDumpPMRMalloc(const IMG_CHAR *pszDevSpace,
+               const IMG_CHAR *pszSymbolicAddress,
+               IMG_UINT64 ui64Size,
+               /* alignment is alignment of start of buffer _and_
+                  minimum contiguity - i.e. smallest allowable
+                  page-size. */
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_BOOL bForcePersistent,
+               IMG_HANDLE *phHandlePtr);
+
+IMG_INTERNAL IMG_VOID
+PDumpPMRMallocPMR(const PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_BOOL bForcePersistent,
+                  IMG_HANDLE *phPDumpAllocInfoPtr);
+
+extern
+PVRSRV_ERROR PDumpPMRFree(IMG_HANDLE hPDumpAllocationInfoHandle);
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPMRMalloc(const IMG_CHAR *pszDevSpace,
+               const IMG_CHAR *pszSymbolicAddress,
+               IMG_UINT64 ui64Size,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               IMG_BOOL bForcePersistent,
+               IMG_HANDLE *phHandlePtr)
+{
+	PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+	PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+	PVR_UNREFERENCED_PARAMETER(ui64Size);
+	PVR_UNREFERENCED_PARAMETER(uiAlign);
+	PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+	PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+	PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+	return PVRSRV_OK;
+}
+
+static INLINE IMG_VOID
+PDumpPMRMallocPMR(const PMR *psPMR,
+                  IMG_DEVMEM_SIZE_T uiSize,
+                  IMG_DEVMEM_ALIGN_T uiBlockSize,
+                  IMG_BOOL bForcePersistent,
+                  IMG_HANDLE *phPDumpAllocInfoPtr)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+	PVR_UNREFERENCED_PARAMETER(bForcePersistent);
+	PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPMRFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+	PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+
+#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SYMBOLICADDR_FMTSPEC "%s%llu"
+
+#if defined(PDUMP)
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, phHandlePtr) \
+    PDumpPMRMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, phHandlePtr)
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    PDumpPMRFree(hHandle)
+#else
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, phHandlePtr) \
+    ((IMG_VOID)(*phHandlePtr=IMG_NULL))
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+    ((IMG_VOID)(0))
+#endif // defined(PDUMP)
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT64 ui64Value,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFilename,
+            IMG_UINT32 uiFileOffset,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_DEVMEM_SIZE_T uiSize,
+            const IMG_CHAR *pszFileName,
+            IMG_UINT32 uiFileOffset);
+
+/*
+  PDumpPMRPOL()
+
+  emits a POL to the PDUMP.
+*/
+extern PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMempaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiOffset,
+            IMG_UINT32 ui32Value,
+            IMG_UINT32 ui32Mask,
+            PDUMP_POLL_OPERATOR eOperator,
+            IMG_UINT32 uiCount,
+            IMG_UINT32 uiDelay,
+            PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+            const IMG_CHAR *pszSymbolicName,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+
+/*
+ * PDumpWriteBuffer()
+ *
+ * writes a binary blob to the pdump param stream containing the
+ * current contents of the memory, and returns the filename and offset
+ * of where that blob is located (for use in a subsequent LDB, for
+ * example)
+ *
+ * Caller to provide buffer to receive filename, and declare the size
+ * of that buffer
+ */
+extern PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+                 IMG_SIZE_T uiNumBytes,
+                 PDUMP_FLAGS_T uiPDumpFlags,
+                 IMG_CHAR *pszFilenameOut,
+                 IMG_SIZE_T uiFilenameBufSz,
+                 PDUMP_FILEOFFSET_T *puiOffsetOut);
+
+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_symbolicaddr.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_symbolicaddr.h
new file mode 100644
index 0000000..ed912a5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pdump_symbolicaddr.h
@@ -0,0 +1,55 @@
+/**************************************************************************/ /*!
+@File
+@Title          Abstraction of PDUMP symbolic address derivation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Allows pdump functions to derive symbolic addresses on-the-fly
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H
+#define SRVKM_PDUMP_SYMBOLICADDR_H
+
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+/* pdump symbolic addresses are generated on-the-fly with a callback */
+
+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset);
+
+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem.h
new file mode 100644
index 0000000..c7389c9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem.h
@@ -0,0 +1,102 @@
+/*************************************************************************/ /*!
+@File
+@Title          Physmem header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for common entry point for creation of RAM backed PMR's
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_H_
+#define _SRVSRV_PHYSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewRamBackedPMR
+ *
+ * This function will create a RAM backed PMR using the device specific
+ * callback, this allows control at a per-devicenode level to select the
+ * memory source thus supporting mixed UMA/LMA systems.
+ *
+ * The size must be a multiple of page size.  The page size is
+ * specified in log2.  It should be regarded as a minimum contiguity
+ * of which the that the resulting memory must be a multiple.  It may
+ * be that this should be a fixed number.  It may be that the
+ * allocation size needs to be a multiple of some coarser "page size"
+ * than that specified in the page size argument.  For example, take
+ * an OS whose page granularity is a fixed 16kB, but the caller
+ * requests memory in page sizes of 4kB.  The request can be satisfied
+ * if and only if the SIZE requested is a multiple of 16kB.  If the
+ * arguments supplied are such that this OS cannot grant the request,
+ * PVRSRV_ERROR_INVALID_PARAMS will be returned.
+ *
+ * The caller should supply storage of a pointer.  Upon successful
+ * return a PMR object will have been created and a pointer to it
+ * returned in the PMROut argument.
+ *
+ * A PMR thusly created should be destroyed with PhysmemUnrefPMR.
+ *
+ * Note that this function may cause memory allocations and on some
+ * OSes this may cause scheduling events, so it is important that this
+ * function be called with interrupts enabled and in a context where
+ * scheduling events and memory allocations are permitted.
+ *
+ * The flags may be used by the implementation to change its behaviour
+ * if required.  The flags will also be stored in the PMR as immutable
+ * metadata and returned to mmu_common when it asks for it.
+ *
+ */
+extern PVRSRV_ERROR
+PhysmemNewRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                       IMG_DEVMEM_SIZE_T uiSize,
+                       IMG_DEVMEM_SIZE_T uiChunkSize,
+                       IMG_UINT32 ui32NumPhysChunks,
+                       IMG_UINT32 ui32NumVirtChunks,
+                       IMG_BOOL *pabMappingTable,
+                       IMG_UINT32 uiLog2PageSize,
+                       PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                       PMR **ppsPMROut);
+
+#endif /* _SRVSRV_PHYSMEM_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_dmabuf.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_dmabuf.h
new file mode 100644
index 0000000..a22fe4d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_dmabuf.h
@@ -0,0 +1,77 @@
+/**************************************************************************/ /*!
+@File           physmem_dmabuf.h
+@Title          Header for dmabuf PMR factory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks importing Ion allocations
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_PHYSMEM_DMABUF_H_)
+#define _PHYSMEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+#include "pmr.h"
+
+typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+					       struct dma_buf_attachment *psAttachment);
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
+				struct dma_buf_attachment *psAttachment,
+				PFN_DESTROY_DMABUF_PMR pfnDestroy,
+				PVRSRV_MEMALLOCFLAGS_T uiFlags,
+				PMR **ppsPMRPtr);
+
+#if defined(SUPPORT_ION)
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+					IMG_INT fd,
+					PVRSRV_MEMALLOCFLAGS_T uiFlags,
+					PMR **ppsPMRPtr,
+					IMG_DEVMEM_SIZE_T *puiSize,
+					IMG_DEVMEM_ALIGN_T *puiAlign);
+#endif
+
+#endif /* !defined(_PHYSMEM_DMABUF_H_) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_lma.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_lma.h
new file mode 100644
index 0000000..552c1e0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_lma.h
@@ -0,0 +1,84 @@
+/**************************************************************************/ /*!
+@File
+@Title          Header for local card memory allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Part of the memory management. This module is responsible for
+                implementing the function callbacks for local card memory.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_LMA_H_
+#define _SRVSRV_PHYSMEM_LMA_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewLocalRamBackedPMR
+ *
+ * This function will create a PMR using the local card memory and is OS
+ * agnostic.
+ */
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+							IMG_DEVMEM_SIZE_T uiSize,
+							IMG_DEVMEM_SIZE_T uiChunkSize,
+							IMG_UINT32 ui32NumPhysChunks,
+							IMG_UINT32 ui32NumVirtChunks,
+							IMG_BOOL *pabMappingTable,
+							IMG_UINT32 uiLog2PageSize,
+							PVRSRV_MEMALLOCFLAGS_T uiFlags,
+							PMR **ppsPMRPtr);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Define some helper list functions for the virtualization validation code
+ */
+
+IMG_VOID	InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg);
+IMG_VOID	RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg);
+IMG_VOID	RemovePidOSidCoupling(IMG_PID pId);
+#endif
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_LMA_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_osmem.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_osmem.h
new file mode 100644
index 0000000..706c859
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/physmem_osmem.h
@@ -0,0 +1,114 @@
+/**************************************************************************/ /*!
+@File
+@Title		PMR implementation of OS derived physical memory
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Part of the memory management.  This module is
+                responsible for the an implementation of the "PMR"
+                abstraction.  This interface is for the
+                PhysmemNewOSRamBackedPMR() "PMR Factory" which is
+                responsible for claiming chunks of memory (in
+                particular physically contiguous quanta) from the
+                Operating System.
+
+                As such, this interface will be implemented on a
+                Per-OS basis, in the "env" directory for that system.
+                A dummy implementation is available in
+                physmem_osmem_dummy.c for operating systems that
+                cannot, or do not wish to, offer this functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _SRVSRV_PHYSMEM_OSMEM_H_
+#define _SRVSRV_PHYSMEM_OSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewOSRamBackedPMR
+ *
+ * To be overridden on a per-OS basis.
+ *
+ * This function will create a PMR using the default "OS supplied" physical pages
+ * method, assuming such is available on a particular operating system.  (If not,
+ * PVRSRV_ERROR_NOT_SUPPORTED should be returned)
+ */
+extern PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_SIZE_T uiChunkSize,
+						 IMG_UINT32 ui32NumPhysChunks,
+						 IMG_UINT32 ui32NumVirtChunks,
+						 IMG_BOOL *pabMappingTable,
+                         IMG_UINT32 uiLog2PageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         PMR **ppsPMROut);
+
+/*
+ * PhysmemNewTDMetaCodePMR
+ *
+ * This function is used as part of the facility to provide secure META firmware
+ * memory. A default implementation is provided which must be replaced by the SoC
+ * implementor.
+ *
+ * Calling this function will return a PMR for a memory allocation made in "secure
+ * META code memory". It will only be writable by a hypervisor, and when the feature
+ * is enabled on the SoC, the META will only be able to perform instruction reads from
+ * memory that is secured that way.
+ */
+PVRSRV_ERROR
+PhysmemNewTDMetaCodePMR(PVRSRV_DEVICE_NODE *psDevNode,
+                        IMG_DEVMEM_SIZE_T uiSize,
+                        IMG_UINT32 uiLog2PageSize,
+                        PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                        PMR **ppsPMRPtr);
+
+PVRSRV_ERROR
+PhysmemNewTDSecureBufPMR(PVRSRV_DEVICE_NODE *psDevNode,
+                         IMG_DEVMEM_SIZE_T uiSize,
+                         IMG_UINT32 uiLog2PageSize,
+                         PVRSRV_MEMALLOCFLAGS_T uiFlags,
+                         PMR **ppsPMRPtr);
+
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_OSMEM_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pmr.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pmr.h
new file mode 100644
index 0000000..26bda9b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pmr.h
@@ -0,0 +1,823 @@
+/**************************************************************************/ /*!
+@File
+@Title		Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Part of the memory management.  This module is responsible for
+                the "PMR" abstraction.  A PMR (Physical Memory Resource)
+                represents some unit of physical memory which is
+                allocated/freed/mapped/unmapped as an indivisible unit
+                (higher software levels provide an abstraction above that
+                to deal with dividing this down into smaller manageable units).
+                Importantly, this module knows nothing of virtual memory, or
+                of MMUs etc., with one excuseable exception.  We have the
+                concept of a "page size", which really means nothing in
+                physical memory, but represents a "contiguity quantum" such
+                that the higher level modules which map this memory are able
+                to verify that it matches the needs of the page size for the
+                virtual realm into which it is being mapped.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_H_
+#define _SRVSRV_PMR_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"			/* Required for export DEVMEM_EXPORTCOOKIE */
+
+/* services/include */
+#include "pdump.h"
+
+/* services/server/include/ */
+#include "pmr_impl.h"
+#include "physheap.h"
+/* A typical symbolic address for physical memory may look like:
+   :MEMORYSPACE:SUBSYS_NNNNNN_0X1234567890_XYZ.  That example is quite
+   extreme, they are likely shorter than that.  We'll make the define
+   here plentiful, however, note that this is _advisory_ not
+   _mandatory_ - in other words, it's the allocator's responsibility
+   to choose the amount of memory to set aside, and it's up to us to
+   honour the size passed in by the caller.  i.e. this define is for
+   GUIDANCE ONLY.
+*/
+#define PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT		(60)
+#define PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT		(20)
+#define PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT	(PMR_MAX_SYMBOLIC_ADDRESS_LENGTH_DEFAULT + PMR_MAX_MEMSPACE_NAME_LENGTH_DEFAULT)
+#define PMR_MAX_PARAMSTREAM_FILENAME_LENGTH_DEFAULT (100)
+#define PMR_MAX_TRANSLATION_STACK_ALLOC				(32)
+
+typedef IMG_UINT64 PMR_BASE_T;
+typedef IMG_UINT64 PMR_SIZE_T;
+#define PMR_SIZE_FMTSPEC "0x%010llX"
+#define PMR_VALUE32_FMTSPEC "0x%08X"
+#define PMR_VALUE64_FMTSPEC "0x%016llX"
+typedef IMG_UINT32 PMR_LOG2ALIGN_T;
+typedef IMG_UINT64 PMR_PASSWORD_T;
+
+typedef struct _PMR_ PMR;
+typedef struct _PMR_EXPORT_ PMR_EXPORT;
+
+typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+
+struct _PVRSRV_DEVICE_NODE_;
+
+/*
+ * PMRCreatePMR
+ *
+ * Not to be called directly, only via implementations of PMR
+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc.
+ *
+ * Creates a PMR object, with callbacks and private data as per the
+ * FuncTab/PrivData args.
+ *
+ * Note that at creation time the PMR must set in stone the "logical
+ * size" and the "contiguity guarantee"
+ *
+ * Flags are also set at this time.  (T.B.D.  flags also immutable for
+ * the life of the PMR?)
+ *
+ * Logical size is the amount of Virtual space this allocation would
+ * take up when mapped.  Note that this does not have to be the same
+ * as the actual physical size of the memory.  For example, consider
+ * the sparsely allocated non-power-of-2 texture case.  In this
+ * instance, the "logical size" would be the virtual size of the
+ * rounded-up power-of-2 texture.  That some pages of physical memory
+ * may not exist does not affect the logical size calculation.
+ *
+ * The PMR must also supply the "contiguity guarantee" which is the
+ * finest granularity of alignment and size of physical pages that the
+ * PMR will provide after LockSysPhysAddresses is called.  Note that
+ * the calling code may choose to call PMRSysPhysAddr with a finer
+ * granularity than this, for example if it were to map into a device
+ * MMU with a smaller page size, and it's also OK for the PMR to
+ * supply physical memory in larger chunks than this.  But
+ * importantly, never the other way around.
+ *
+ * More precisely, the following inequality must be maintained
+ * whenever mappings and/or physical addresses exist:
+ *
+ *       (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory)
+ *
+ *
+ * Note also that the implementation may supply pszPDumpFlavour and
+ * pszPDumpDefaultMemspaceName, which are irrelevant where the PMR
+ * implementation overrides the default symbolic name construction
+ * routine.  Where the function pointer for PDump symbolic name
+ * derivation is not overridden (i.e. IMG_NULL appears in the relevant
+ * entry of the functab) and default implementation shall be used
+ * which will copy the PDumpDefaultMemspaceName into the namespace
+ * argument, and create the symbolic name by concatenating the
+ * "PDumpFlavour" and a numeric representation of the PMR's serial
+ * number.
+ *
+ * The implementation must guarantee that the storage for these two
+ * strings, and the function table, are maintained, as copies are not
+ * made, the pointer is simply stored.
+ *
+ * The function table will contain the following callbacks which may
+ * be overridden by the PMR implementation:
+ *
+ * pfnLockPhysAddresses
+ *
+ *      Called when someone locks requests that Physical pages are to
+ *      be locked down via the PMRLockSysPhysAddresses() API.  Note
+ *      that if physical pages are prefaulted at PMR creation time and
+ *      therefore static, it would not be necessary to override this
+ *      function, in which case IMG_NULL may be supplied.
+ *
+ * pfnUnlockPhysAddresses
+ *
+ *      The reverse of pfnLockPhysAddresses.  Note that this should be
+ *      IMG_NULL if and only if pfnLockPhysAddresses is IMG_NULL
+ *
+ * pfnSysPhysAddr
+ *
+ *      This function is mandatory.  This is the one which returns the
+ *      system physical address for a given offset into this PMR.  The
+ *      "lock" function will have been called, if overridden, before
+ *      this function, thus the implementation should not increase any
+ *      refcount when answering this call.  Refcounting, if necessary,
+ *      should be done in the lock/unlock calls.  Refcounting would
+ *      not be necessary in the prefaulted/static scenario, as the
+ *      pmr.c abstraction will handle the refcounting for the whole
+ *      PMR.
+ *
+ * pfnPDumpSymbolicAddr
+ *
+ *      Derives the PDump symbolic address for the given offset.  The
+ *      default implementation will copy the PDumpDefaultMemspaceName
+ *      into the namespace argument (or use SYSMEM if none was
+ *      supplied), and create the symbolic name by concatenating the
+ *      "PDumpFlavour" and a numeric representation of the PMR's
+ *      serial number.
+ *
+ * pfnFinalize
+ *
+ *      Called when the PMR's refcount reaches zero and it gets
+ *      destroyed.  This allows the implementation to free up any
+ *      resource acquired during creation time.
+ *
+ */
+extern PVRSRV_ERROR
+PMRCreatePMR(PHYS_HEAP *psPhysHeap,
+             PMR_SIZE_T uiLogicalSize,
+             PMR_SIZE_T uiChunkSize,
+             IMG_UINT32 ui32NumPhysChunks,
+             IMG_UINT32 ui32NumVirtChunks,
+             IMG_BOOL *pabMappingTable,
+             PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+             PMR_FLAGS_T uiFlags,
+             const IMG_CHAR *pszPDumpFlavour,
+             const PMR_IMPL_FUNCTAB *psFuncTab,
+             PMR_IMPL_PRIVDATA pvPrivData,
+             PMR **ppsPMRPtr,
+             IMG_HANDLE *phPDumpAllocInfo,
+             IMG_BOOL bForcePersistent);
+
+/*
+ * PMRLockSysPhysAddresses()
+ *
+ * Calls the relevant callback to lock down the system physical addresses of the memory that makes up the whole PMR.
+ *
+ * Before this call, it is not valid to use any of the information
+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(),
+ * PMR_PDumpSymbolicAddr() [ see note below about lock/unlock
+ * semantics ]
+ *
+ * The caller of this function does not have to care about how the PMR
+ * is implemented.  He only has to know that he is allowed access to
+ * the physical addresses _after_ calling this function and _until_
+ * calling PMRUnlockSysPhysAddresses().
+ *
+ *
+ * Notes to callback implementers (authors of PMR Factories):
+ *
+ * Some PMR implementations will be such that the physical memory
+ * exists for the lifetime of the PMR, with a static address, (and
+ * normally flags and symbolic address are static too) and so it is
+ * legal for a PMR implementation to not provide an implementation for
+ * the lock callback.
+ *
+ * Some PMR implementation may wish to page memory in from secondary
+ * storage on demand.  The lock/unlock callbacks _may_ be the place to
+ * do this.  (more likely, there would be a separate API for doing
+ * this, but this API provides a useful place to assert that it has
+ * been done)
+ */
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR,
+                        IMG_UINT32 uiLog2DevPageSize);
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddressesNested(PMR *psPMR,
+                        IMG_UINT32 uiLog2DevPageSize,
+                        IMG_UINT32 ui32NestingLevel);
+
+/*
+ * PMRUnlockSysPhysAddresses()
+ *
+ * the reverse of PMRLockSysPhysAddresses()
+ */
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR);
+
+IMG_VOID PMRLock(void);
+IMG_VOID PMRUnlock(void);
+IMG_BOOL PMRIsLocked(void);
+IMG_BOOL PMRIsLockedByMe(void);
+
+/*
+ * PhysmemPMRExport()
+ *
+ * Given a PMR, creates a PMR "Export", which is a handle that
+ * provides sufficient data to be able to "import" this PMR elsewhere.
+ * The PMR Export is an object in its own right, whose existance
+ * implies a reference on the PMR, thus the PMR cannot be destroyed
+ * while the PMR Export exists.  The intention is that the PMR Export
+ * will be wrapped in the devicemem layer by a cross process handle,
+ * and some IPC by which to communicate the handle value and password
+ * to other processes.  The receiving process is able to unwrap this
+ * to gain access to the same PMR Export in this layer, and, via
+ * PhysmemPMRImport(), obtain a reference to the original PMR.
+ *
+ * The caller receives, along with the PMR Export object, information
+ * about the size and contiguity guarantee for the PMR, and also the
+ * PMRs secret password, in order to authenticate the subsequent
+ * import.
+ *
+ * N.B.  If you call PMRExportPMR() (and it succeeds), you are
+ * promising to later call PMRUnexportPMR()
+ */
+extern PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+             PMR_EXPORT **ppsPMRExport,
+             PMR_SIZE_T *puiSize,
+             PMR_LOG2ALIGN_T *puiLog2Contig,
+             PMR_PASSWORD_T *puiPassword);
+
+/*
+ * PMRMakeServerExportClientExport()
+ * 
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+PMRMakeServerExportClientExport(DEVMEM_EXPORTCOOKIE *psPMRExportIn,
+								PMR_EXPORT **ppsPMRExportPtr,
+								PMR_SIZE_T *puiSize,
+								PMR_LOG2ALIGN_T *puiLog2Contig,
+								PMR_PASSWORD_T *puiPassword);
+
+PVRSRV_ERROR
+PMRUnmakeServerExportClientExport(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRUnexporPMRt()
+ *
+ * The reverse of PMRExportPMR().  This causes the PMR to no
+ * longer be exported.  If the PMR has already been imported, the
+ * imported PMR reference will still be valid, but no further imports
+ * will be possible.
+ */
+extern PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRImportPMR()
+ *
+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and
+ * obtains a reference to the original PMR.
+ *
+ * The password must match, and is assumed to have been (by whatever
+ * means, IPC etc.) preserved intact from the former call to
+ * PMRExportPMR()
+ *
+ * The size and contiguity arguments are entirely irrelevant for the
+ * import, however they are verified in order to trap bugs.
+ *
+ * N.B.  If you call PhysmemPMRImport() (and it succeeds), you are
+ * promising to later call PhysmemPMRUnimport()
+ */
+extern PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+             PMR_PASSWORD_T uiPassword,
+             PMR_SIZE_T uiSize,
+             PMR_LOG2ALIGN_T uiLog2Contig,
+             PMR **ppsPMR);
+
+/*
+ * PMRUnimportPMR()
+ *
+ * releases the reference on the PMR as obtained by PMRImportPMR()
+ */
+extern PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+				  PMR **ppsPMR,
+				  IMG_DEVMEM_SIZE_T *puiSize,
+				  IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * Equivalent mapping functions when in kernel mode - TOOD: should
+ * unify this and the PMRAcquireMMapArgs API with a suitable
+ * abstraction
+ */
+extern PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+                            IMG_SIZE_T uiLogicalOffset,
+                            IMG_SIZE_T uiSize,
+                            IMG_VOID **ppvKernelAddressOut,
+                            IMG_SIZE_T *puiLengthOut,
+                            IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+                                  IMG_SIZE_T uiLogicalOffset,
+                                  IMG_SIZE_T uiSize,
+                                  IMG_VOID **ppvKernelAddressOut,
+                                  IMG_SIZE_T *puiLengthOut,
+                                  IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+                            IMG_HANDLE hPriv);
+
+
+/*
+ * PMR_ReadBytes()
+ *
+ * calls into the PMR implementation to read up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will read up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is read, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just write 0 to invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT8 *pcBuffer,
+              IMG_SIZE_T uiBufSz,
+              IMG_SIZE_T *puiNumBytes);
+
+/*
+ * PMR_WriteBytes()
+ *
+ * calls into the PMR implementation to write up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will write up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is written, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just ignore data at invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+			   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+               IMG_UINT8 *pcBuffer,
+               IMG_SIZE_T uiBufSz,
+               IMG_SIZE_T *puiNumBytes);
+
+/*
+ * PMRRefPMR()
+ *
+ * Take a reference on the passed in PMR
+ */
+extern IMG_VOID
+PMRRefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor")
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+extern PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR);
+
+
+/*
+ * PMR_Flags()
+ *
+ * Flags are static and guaranteed for the life of the PMR.  Thus this
+ * function is idempotent and acquire/release semantics is not
+ * required.
+ *
+ * Returns the flags as specified on the PMR.  The flags are to be
+ * interpreted as mapping permissions
+ */
+extern PVRSRV_ERROR
+PMR_Flags(const PMR *psPMR,
+          PMR_FLAGS_T *puiMappingFlags);
+
+
+extern PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+				IMG_DEVMEM_SIZE_T *puiLogicalSize);
+
+/*
+ * PMR_SysPhysAddr()
+ *
+ * A note regarding Lock/Unlock semantics
+ * ======================================
+ *
+ * PMR_SysPhysAddr may only be called after PMRLockSysPhysAddresses()
+ * has been called.  The data returned may be used only until
+ * PMRUnlockSysPhysAddresses() is called after which time the licence
+ * to use the data is revoked and the information may be invalid.
+ *
+ * Given an offset, this function returns the device physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ * If caller only wants one physical address it is sufficient to pass in:
+ * ui32Log2PageSize==0 and ui32NumOfPages==1
+ */
+extern PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEV_PHYADDR *psDevAddr,
+                IMG_BOOL *pbValid);
+
+/*
+ * PMR_CpuPhysAddr()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * Given an offset, this function returns the CPU physical address of the
+ * corresponding page in the PMR.  It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ */
+extern PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+                IMG_UINT32 ui32Log2PageSize,
+                IMG_UINT32 ui32NumOfPages,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_CPU_PHYADDR *psCpuAddrPtr,
+                IMG_BOOL *pbValid);
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+		  IMG_UINT64 *pui64UID);
+
+#if defined(PDUMP)
+/*
+ * PMR_PDumpSymbolicAddr()
+ *
+ * Given an offset, returns the pdump memspace name and symbolic
+ * address of the corresponding page in the PMR.
+ *
+ * Note that PDump memspace names and symbolic addresses are static
+ * and valid for the lifetime of the PMR, therefore we don't require
+ * acquire/release semantics here.
+ *
+ * Note that it is expected that the pdump "mapping" code will call
+ * this function multiple times as each page is mapped in turn
+ *
+ * Note that NextSymName is the offset from the base of the PMR to the
+ * next pdump symbolic address (or the end of the PMR if the PMR only
+ * had one PDUMPMALLOC
+ */
+extern PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+		      IMG_DEVMEM_OFFSET_T *puiNextSymName
+                      );
+
+/*
+ * PMRPDumpLoadMemValue32()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMemValue64()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMem()
+ *
+ * writes the current contents of the PMR memory to the pdump PRM
+ * stream, and emits some PDump code to the script stream to LDB said
+ * bytes from said file. If bZero is IMG_TRUE then the PDump zero page
+ * is used as the source for the LDB.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero);
+
+/*
+ * PMRPDumpSaveToFile()
+ *
+ * emits some PDump that does an SAB (save bytes) using the PDump
+ * symbolic address of the PMR.  Note that this is generally not the
+ * preferred way to dump the buffer contents.  There is an equivalent
+ * function in devicemem_server.h which also emits SAB but using the
+ * virtual address, which is the "right" way to dump the buffer
+ * contents to a file.  This function exists just to aid testing by
+ * providing a means to dump the PMR directly by symbolic address
+ * also.
+ */
+extern PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename);
+#else	/* PDUMP */
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMR_PDumpSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+                      IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                      IMG_UINT32 ui32NamespaceNameLen,
+                      IMG_CHAR *pszNamespaceName,
+                      IMG_UINT32 ui32SymbolicAddrLen,
+                      IMG_CHAR *pszSymbolicAddr,
+                      IMG_DEVMEM_OFFSET_T *puiNewOffset,
+                      IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen);
+	PVR_UNREFERENCED_PARAMETER(pszNamespaceName);
+	PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen);
+	PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr);
+	PVR_UNREFERENCED_PARAMETER(puiNewOffset);
+	PVR_UNREFERENCED_PARAMETER(puiNextSymName);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT32 ui32Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+			         IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                     IMG_UINT64 ui64Value,
+                     PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui64Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMem)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+                IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                IMG_DEVMEM_SIZE_T uiSize,
+                PDUMP_FLAGS_T uiPDumpFlags,
+                IMG_BOOL bZero)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+	PVR_UNREFERENCED_PARAMETER(bZero);
+	return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpSaveToFile)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+                   IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   IMG_UINT32 uiArraySize,
+                   const IMG_CHAR *pszFilename)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiArraySize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	return PVRSRV_OK;
+}
+
+#endif	/* PDUMP */
+
+/* This function returns the private data that a pmr subtype
+   squirrelled in here. We use the function table pointer as
+   "authorization" that this function is being called by the pmr
+   subtype implementation.  We can assume (assert) that.  It would be
+   a bug in the implementation of the pmr subtype if this assertion
+   ever fails. */
+extern IMG_VOID *
+PMRGetPrivateDataHack(const PMR *psPMR,
+                      const PMR_IMPL_FUNCTAB *psFuncTab);
+
+extern PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+				IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psReferencePMR,
+					IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+extern PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+                   PMR *psPageListPMR,
+                   IMG_DEVMEM_OFFSET_T uiTableOffset,
+                   IMG_DEVMEM_SIZE_T  uiTableLength,
+                   /* Referenced PMR, and "page" granularity */
+                   PMR *psReferencePMR,
+                   IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+                   PMR_PAGELIST **ppsPageList,
+                   IMG_UINT64 *pui64CheckSum);
+
+/* Doesn't actually erase the page list - just releases the appropriate refcounts */
+extern PVRSRV_ERROR // should be IMG_VOID, surely
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList);
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags);
+
+extern PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpPol32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+              IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+              IMG_UINT32 ui32Value,
+              IMG_UINT32 ui32Mask,
+              PDUMP_POLL_OPERATOR eOperator,
+              PDUMP_FLAGS_T uiFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+            IMG_DEVMEM_OFFSET_T uiReadOffset,
+            IMG_DEVMEM_OFFSET_T uiWriteOffset,
+            IMG_DEVMEM_SIZE_T uiPacketSize,
+            IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psPMR);
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif
+/*
+ * PMRInit()
+ *
+ * To be called once and only once to initialise the internal data in
+ * the PMR module (mutexes and such)
+ *
+ * Not for general use.  Only PVRSRVInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRInit(IMG_VOID);
+
+/*
+ * PMRDeInit()
+ *
+ * To be called once and only once to deinitialise the internal data in
+ * the PMR module (mutexes and such) and for debug checks
+ *
+ * Not for general use.  Only PVRSRVDeInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRDeInit(IMG_VOID);
+
+#if defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+				 IMG_PVOID hRIHandle);
+#endif
+
+#endif /* #ifdef _SRVSRV_PMR_H_ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pmr_impl.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pmr_impl.h
new file mode 100644
index 0000000..ff45b24
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pmr_impl.h
@@ -0,0 +1,180 @@
+/**************************************************************************/ /*!
+@File
+@Title		Implementation Callbacks for Physmem (PMR) abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Part of the memory management.  This file is for definitions that
+                are private to the world of PMRs, but that needs to be shared between
+                pmr.c itself and the modules that implement the callbacks for the
+                PMR.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_IMPL_H_
+#define _SRVSRV_PMR_IMPL_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/* stuff that per-flavour callbacks need to share with pmr.c */
+
+typedef IMG_VOID *PMR_IMPL_PRIVDATA;
+
+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T;
+
+typedef struct {
+    /*
+     * LockPhysAddresses() and UnlockPhysAddresses()
+     *
+     * locks down the physical addresses for the whole PMR.  If memory
+     * is "prefaulted", i.e. exists phsycally at PMR creation time,
+     * then there is no need to override this callback.  The default
+     * implementation is to simply increment a lock-count for
+     * debugging purposes.
+     *
+     * If overridden, this function will be called when someone first
+     * requires a physical addresses, and the UnlockPhysAddresses()
+     * counterpart will be called when the last such reference is
+     * released.
+     *
+     * The PMR implementation may assume that physical addresses will
+     * have been "locked" in this manner before any call is made to
+     * the SysPhysAddr() callback
+     */
+    PVRSRV_ERROR (*pfnLockPhysAddresses)(PMR_IMPL_PRIVDATA pvPriv,
+                                         IMG_UINT32 uiLog2DevPageSize);
+    PVRSRV_ERROR (*pfnUnlockPhysAddresses)(PMR_IMPL_PRIVDATA pvPriv);
+    /*
+     * called iteratively or once to obtain page(s) physical address
+     * ("page" might be device mmu page, or host cpu mmu page, or
+     * something else entirely... the PMR implementation should
+     * make no assumption, and honour the request for a physical
+     * address of any byte in the PMR)
+     *
+     * [ it's the callers responsibility to ensure that no addresses
+     * are missed, by calling this at least as often as once per
+     * "1<<contiguityguarantee" ]
+     *
+     * the LockPhysAddresses callback (if overridden) is guaranteed to
+     * have been called prior to calling this one, and the caller
+     * promises not to rely on the physical address thus obtained
+     * after the UnlockPhysAddresses callback is called.
+     *
+     * Overriding this callback is mandatory in all PMR
+     * implementations.
+     */
+    PVRSRV_ERROR (*pfnDevPhysAddr)(PMR_IMPL_PRIVDATA pvPriv,
+                                   IMG_UINT32 ui32NumOfAddr,
+                                   IMG_DEVMEM_OFFSET_T *puiOffset,
+								   IMG_BOOL *pbValid,
+                                   IMG_DEV_PHYADDR *psDevAddrPtr);
+    /*
+     * called iteratively to obtain PDump symbolic addresses.  Behaves
+     * just like SysPhysAddr callback, except for returning Symbolic
+     * Addresses.
+     *
+     * It is optional to override this callback.  The default
+     * implementation will construct an address from the PMR type and
+     * serial number
+     */
+    PVRSRV_ERROR (*pfnPDumpSymbolicAddr)(PMR_IMPL_PRIVDATA pvPriv,
+                                         IMG_DEVMEM_OFFSET_T uiOffset,
+                                         IMG_CHAR *pszMemspaceName,
+                                         IMG_UINT32 ui32MemspaceNameLen,
+                                         IMG_CHAR *pszSymbolicAddr,
+                                         IMG_UINT32 ui32SymbolicAddrLen,
+                                         IMG_DEVMEM_OFFSET_T *puiNewOffset,
+					 IMG_DEVMEM_OFFSET_T *puiNextSymName);
+    /*
+     * AcquireKernelMappingData()/ReleaseKernelMappingData()
+     *
+     * called to obtain a kernel virtual address for the PMR for use
+     * internally in services.
+     *
+     * It is not necessary to override this callback, but failure to
+     * do so will mean that kernel mappings will not be possible
+     */
+    PVRSRV_ERROR (*pfnAcquireKernelMappingData)(PMR_IMPL_PRIVDATA pvPriv,
+                                                IMG_SIZE_T uiOffset,
+                                                IMG_SIZE_T uiSize,
+                                                IMG_VOID **ppvKernelAddressOut,
+                                                IMG_HANDLE *phHandleOut,
+                                                PMR_FLAGS_T ulFlags);
+    IMG_VOID (*pfnReleaseKernelMappingData)(PMR_IMPL_PRIVDATA pvPriv,
+                                            IMG_HANDLE hHandle);
+    /*
+     * Read up to uiBufSz bytes from the PMR.
+     * The pmr will be already locked.
+     *
+     * Overriding this is optional.  The default implementation will
+     * acquire a kernel virtual address with
+     * pfnAcquireKernelMappingData and OSMemCopy the data directly
+     */
+    PVRSRV_ERROR (*pfnReadBytes)(PMR_IMPL_PRIVDATA pvPriv,
+                                 IMG_DEVMEM_OFFSET_T uiOffset,
+                                 IMG_UINT8 *pcBuffer,
+                                 IMG_SIZE_T uiBufSz,
+                                 IMG_SIZE_T *puiNumBytes);
+
+    /*
+     * Write up to uiBufSz bytes into the PMR.
+     * The pmr will be already locked.
+     *
+     * Overriding this is optional.  The default implementation will
+     * acquire a kernel virtual address with
+     * pfnAcquireKernelMappingData and OSMemCopy the data directly
+     *
+     * Note:
+     * This function callback is optional and unlike pfnReadBytes
+     * isn't required if pfnAcquireKernelMappingData isn't provided
+     */
+    PVRSRV_ERROR (*pfnWriteBytes)(PMR_IMPL_PRIVDATA pvPriv,
+                                  IMG_DEVMEM_OFFSET_T uiOffset,
+                                  IMG_UINT8 *pcBuffer,
+                                  IMG_SIZE_T uiBufSz,
+                                  IMG_SIZE_T *puiNumBytes);
+    /*
+     * Finalize()
+     *
+     * This callback will be called once when the last reference to
+     * the PMR has disappeared.
+     */
+    PVRSRV_ERROR (*pfnFinalize)(PMR_IMPL_PRIVDATA pvPriv);
+} PMR_IMPL_FUNCTAB;
+
+#endif /* of #ifndef _SRVSRV_PHYSMEM_PRIV_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/power.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/power.h
new file mode 100644
index 0000000..ccbd899
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/power.h
@@ -0,0 +1,160 @@
+/*************************************************************************/ /*!
+@File
+@Title          Power Management Functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main APIs for power management functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWER_H
+#define POWER_H
+
+
+#include "pvrsrv_device.h"
+
+/*!
+ *****************************************************************************
+ *	Power management
+ *****************************************************************************/
+ 
+typedef struct _PVRSRV_POWER_DEV_TAG_
+{
+	PFN_PRE_POWER					pfnDevicePrePower;
+	PFN_POST_POWER					pfnDevicePostPower;
+	PFN_SYS_DEV_PRE_POWER			pfnSystemPrePower;
+	PFN_SYS_DEV_POST_POWER			pfnSystemPostPower;
+	PFN_PRE_CLOCKSPEED_CHANGE		pfnPreClockSpeedChange;
+	PFN_POST_CLOCKSPEED_CHANGE		pfnPostClockSpeedChange;
+	PFN_FORCED_IDLE_REQUEST			pfnForcedIdleRequest;
+	PFN_FORCED_IDLE_CANCEL_REQUEST		pfnForcedIdleCancelRequest;
+	PFN_DUST_COUNT_REQUEST			pfnDustCountRequest;
+	IMG_HANDLE						hDevCookie;
+	IMG_UINT32						ui32DeviceIndex;
+	PVRSRV_DEV_POWER_STATE 			eDefaultPowerState;
+	PVRSRV_DEV_POWER_STATE 			eCurrentPowerState;
+	struct _PVRSRV_POWER_DEV_TAG_	*psNext;
+	struct _PVRSRV_POWER_DEV_TAG_	**ppsThis;
+
+} PVRSRV_POWER_DEV;
+
+typedef enum _PVRSRV_INIT_SERVER_STATE_
+{
+	PVRSRV_INIT_SERVER_Unspecified		= -1,	
+	PVRSRV_INIT_SERVER_RUNNING			= 0,	
+	PVRSRV_INIT_SERVER_RAN				= 1,	
+	PVRSRV_INIT_SERVER_SUCCESSFUL		= 2,	
+	PVRSRV_INIT_SERVER_NUM				= 3,	
+	PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
+
+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice);
+
+IMG_IMPORT
+IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE	eInitServerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE	eInitServerState, IMG_BOOL bState);
+
+
+
+IMG_IMPORT PVRSRV_ERROR PVRSRVPowerLock(IMG_VOID);
+IMG_IMPORT IMG_VOID PVRSRVForcedPowerLock(IMG_VOID);
+IMG_IMPORT IMG_VOID PVRSRVPowerUnlock(IMG_VOID);
+
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32				ui32DeviceIndex,
+										 PVRSRV_DEV_POWER_STATE	eNewPowerState,
+										 IMG_BOOL				bForced);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState, IMG_BOOL bForced);
+
+/* Type PFN_DC_REGISTER_POWER */
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32					ui32DeviceIndex,
+									   PFN_PRE_POWER				pfnDevicePrePower,
+									   PFN_POST_POWER				pfnDevicePostPower,
+									   PFN_SYS_DEV_PRE_POWER		pfnSystemPrePower,
+									   PFN_SYS_DEV_POST_POWER		pfnSystemPostPower,
+									   PFN_PRE_CLOCKSPEED_CHANGE	pfnPreClockSpeedChange,
+									   PFN_POST_CLOCKSPEED_CHANGE	pfnPostClockSpeedChange,
+									   PFN_FORCED_IDLE_REQUEST		pfnForcedIdleRequest,
+									   PFN_FORCED_IDLE_CANCEL_REQUEST	pfnForcedIdleCancelRequest,
+									   PFN_DUST_COUNT_REQUEST	pfnDustCountRequest,
+									   IMG_HANDLE					hDevCookie,
+									   PVRSRV_DEV_POWER_STATE		eCurrentPowerState,
+									   PVRSRV_DEV_POWER_STATE		eDefaultPowerState);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVGetDevicePowerState(IMG_UINT32 ui32DeviceIndex, PPVRSRV_DEV_POWER_STATE pePowerState);
+
+IMG_IMPORT
+IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
+
+IMG_IMPORT
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32	ui32DeviceIndex,
+											 IMG_BOOL	bIdleDevice,
+											 IMG_VOID	*pvInfo);
+
+IMG_IMPORT
+IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32	ui32DeviceIndex,
+										  IMG_BOOL		bIdleDevice,
+										  IMG_VOID		*pvInfo);
+
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(IMG_BOOL					bAllDevices,
+					IMG_UINT32				ui32DeviceIndex,
+					PFN_SYS_DEV_IS_DEFAULT_STATE_OFF	pfnCheckIdleReq,
+					IMG_BOOL				bDeviceOffPermitted);
+
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(IMG_BOOL			bAllDevices,
+						IMG_UINT32		ui32DeviceIndex);
+
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(IMG_UINT32	ui32DeviceIndex,
+						IMG_UINT32	ui32DustCount);
+
+
+#endif /* POWER_H */
+
+/******************************************************************************
+ End of file (power.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/process_stats.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/process_stats.h
new file mode 100644
index 0000000..39b1534
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/process_stats.h
@@ -0,0 +1,168 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for creating and reading proc filesystem entries.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PROCESS_STATS_H__
+#define __PROCESS_STATS_H__
+
+#include "pvrsrv_error.h"
+
+/*
+ *  The publishing of Process Stats is controlled by the
+ *  PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory
+ *  allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option.
+ * 
+ *  Note: There will be a performance degradation with memory allocation
+ *        recording enabled!
+ */
+
+
+/*
+ *  Memory types which can be tracked...
+ */
+typedef enum {
+    PVRSRV_MEM_ALLOC_TYPE_KMALLOC,				/* memory allocated by kmalloc() */
+    PVRSRV_MEM_ALLOC_TYPE_VMALLOC,				/* memory allocated by kmalloc() */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,	/* pages allocated from UMA to hold page table information */
+    PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,			/* ALLOC_PAGES_PT_UMA mapped to kernel address space */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,	/* pages allocated from LMA to hold page table information */
+    PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,		/* ALLOC_PAGES_PT_LMA mapped to kernel address space */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,		/* pages allocated from LMA */
+    PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,		/* pages allocated from UMA */
+    PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,	/* mapped UMA/LMA pages  */
+    PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES,		/* pages in the page pool */
+
+	/* Must be the last enum...*/
+    PVRSRV_MEM_ALLOC_TYPE_COUNT
+} PVRSRV_MEM_ALLOC_TYPE;
+
+
+/*
+ * Functions for managing the processes recorded...
+ */
+PVRSRV_ERROR  PVRSRVStatsInitialise(IMG_VOID);
+
+IMG_VOID  PVRSRVStatsDestroy(IMG_VOID);
+
+PVRSRV_ERROR  PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats);
+
+IMG_VOID  PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+
+#define MAX_POWER_STAT_ENTRIES		51
+
+/*
+ * Functions for recording the statistics...
+ */
+IMG_VOID  PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+                                       IMG_VOID *pvCpuVAddr,
+                                       IMG_CPU_PHYADDR sCpuPAddr,
+                                       IMG_SIZE_T uiBytes,
+                                       IMG_PVOID pvPrivateData);
+
+IMG_VOID  PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+										  IMG_UINT64 ui64Key);
+
+IMG_VOID PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_SIZE_T uiBytes);
+/*
+ * Increases the memory stat for eAllocType. Tracks the allocation size value
+ * by inserting a value into a hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack().
+ */
+IMG_VOID PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_SIZE_T uiBytes,
+        							IMG_UINT64 uiCpuVAddr);
+
+IMG_VOID PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_SIZE_T uiBytes);
+
+/*
+ * Decrease the memory stat for eAllocType. Takes the allocation size value from the
+ * hash table with uiCpuVAddr as key. Pair with PVRSRVStatsIncrMemAllocStatAndTrack().
+ */
+IMG_VOID PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+        							IMG_UINT64 uiCpuVAddr);
+
+IMG_VOID
+PVRSRVStatsIncrMemAllocPoolStat(IMG_SIZE_T uiBytes);
+
+IMG_VOID
+PVRSRVStatsDecrMemAllocPoolStat(IMG_SIZE_T uiBytes);
+
+IMG_VOID  PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+                                              IMG_UINT32 ui32TotalNumOutOfMemory,
+                                              IMG_UINT32 ui32TotalTAStores,
+                                              IMG_UINT32 ui32Total3DStores,
+                                              IMG_UINT32 ui32TotalSHStores,
+                                              IMG_UINT32 ui32TotalCDMStores,
+                                              IMG_PID owner);
+
+IMG_VOID  PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+                                         IMG_UINT32 ui32NumReqByFW,
+                                         IMG_PID owner);
+
+IMG_VOID  PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+                                         IMG_UINT32 ui32NumGrowReqByFW,
+                                         IMG_UINT32 ui32InitFLPages,
+                                         IMG_UINT32 ui32NumHighPages,
+                                         IMG_PID	ownerPid);
+
+
+typedef enum
+{
+    PVRSRV_POWER_ENTRY_TYPE_PRE,
+    PVRSRV_POWER_ENTRY_TYPE_POST
+} PVRSRV_POWER_ENTRY_TYPE;
+
+IMG_VOID InsertPowerTimeStatistic(PVRSRV_POWER_ENTRY_TYPE bType,
+								IMG_INT32 ui32CurrentState, IMG_INT32 ui32NextState,
+                                IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+								IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+								IMG_BOOL bForced);
+
+IMG_VOID InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
+IMG_VOID InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
+
+IMG_VOID SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
+
+IMG_VOID SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
+
+#endif /* __PROCESS_STATS_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvr_dvfs.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvr_dvfs.h
new file mode 100644
index 0000000..c3bd53f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvr_dvfs.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File           pvr_dvfs.h
+@Title          System level interface for DVFS
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defined the API between services and system layer
+                required for Ion integration.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_DVFS_H_
+#define _PVR_DVFS_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+
+typedef IMG_VOID (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui64Freq);
+typedef IMG_VOID (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui64Volt);
+
+typedef struct _IMG_OPP_
+{
+	IMG_UINT32			ui32Volt;
+	IMG_UINT32			ui32Freq;
+} IMG_OPP;
+
+typedef const IMG_OPP* IMG_OPP_TABLE;
+
+typedef struct _IMG_DVFS_GOVERNOR_CFG_
+{
+	IMG_UINT32			ui32UpThreshold;
+	IMG_UINT32			ui32DownDifferential;
+} IMG_DVFS_GOVERNOR_CFG;
+
+typedef struct _IMG_DVFS_DEVICE_CFG_
+{
+	IMG_OPP_TABLE			pasOPPTable;
+	IMG_UINT32			ui32OPPTableSize;
+
+	IMG_UINT32			ui32FreqMin;
+	IMG_UINT32			ui32FreqMax;
+	IMG_UINT32			ui32PollMs;
+	IMG_BOOL			bIdleReq;
+
+	PFN_SYS_DEV_DVFS_SET_FREQUENCY	pfnSetFrequency;
+	PFN_SYS_DEV_DVFS_SET_VOLTAGE	pfnSetVoltage;
+} IMG_DVFS_DEVICE_CFG;
+
+typedef struct _IMG_DVFS_GOVERNOR_
+{
+	IMG_BOOL			bEnabled;
+} IMG_DVFS_GOVERNOR;
+
+#if defined(__linux__)
+typedef struct _IMG_DVFS_DEVICE_
+{
+	POS_LOCK			hDVFSLock;
+	struct dev_pm_opp		*psOPP;
+	struct devfreq			*psDevFreq;
+	IMG_BOOL			bEnabled;
+	IMG_HANDLE			hGpuUtilUserDVFS;
+} IMG_DVFS_DEVICE;
+
+typedef struct _IMG_POWER_AVG_
+{
+	IMG_UINT32			ui32Power;
+	IMG_UINT32			ui32Samples;
+} IMG_POWER_AVG;
+
+typedef struct _IMG_DVFS_PA_
+{
+	IMG_UINT32			ui32AllocatedPower;
+	IMG_UINT32			*aui32ConversionTable;
+	IMG_OPP				sOPPCurrent;
+	IMG_INT32			i32Temp;
+	IMG_UINT64			ui64StartTime;
+	IMG_UINT32			ui32Energy;
+	POS_LOCK			hDVFSLock;
+	struct power_actor		*psPowerActor;
+	IMG_POWER_AVG			sPowerAvg;
+} IMG_DVFS_PA;
+
+typedef struct _IMG_DVFS_PA_CFG_
+{
+	/* Coefficients for a curve defining power leakage due to temperature */
+	IMG_INT32			i32Ta;		/* t^3 */
+	IMG_INT32			i32Tb;		/* t^2 */
+	IMG_INT32			i32Tc;		/* t^1 */
+	IMG_INT32			i32Td;		/* const */
+
+	IMG_UINT32			ui32Other;	/* Static losses unrelated to GPU */
+	IMG_UINT32			ui32Weight;	/* Power actor weight */
+} IMG_DVFS_PA_CFG;
+
+typedef struct _IMG_DVFS_
+{
+	IMG_DVFS_DEVICE			sDVFSDevice;
+	IMG_DVFS_GOVERNOR		sDVFSGovernor;
+	IMG_DVFS_DEVICE_CFG		sDVFSDeviceCfg;
+	IMG_DVFS_GOVERNOR_CFG		sDVFSGovernorCfg;
+#if defined(PVR_POWER_ACTOR)
+	IMG_DVFS_PA			sDVFSPA;
+	IMG_DVFS_PA_CFG			sDVFSPACfg;
+#endif
+} PVRSRV_DVFS;
+#endif/* (__linux__) */
+
+#endif /* _PVR_DVFS_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv.h
new file mode 100644
index 0000000..0ad41c0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv.h
@@ -0,0 +1,515 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR services server header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_H
+#define PVRSRV_H
+
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "device.h"
+#include "power.h"
+#include "sysinfo.h"
+#include "physheap.h"
+
+typedef struct _SYS_DEVICE_ID_TAG
+{
+	IMG_UINT32	uiID;
+	IMG_BOOL	bInUse;
+
+} SYS_DEVICE_ID;
+
+
+typedef struct PVRSRV_DATA_TAG
+{
+    IMG_UINT32                  ui32NumDevices;      	   	/*!< number of devices in system */
+	SYS_DEVICE_ID				sDeviceID[SYS_DEVICE_COUNT];
+	PVRSRV_DEVICE_NODE			*apsRegisteredDevNodes[SYS_DEVICE_COUNT];
+	IMG_UINT32					ui32RegisteredDevices;
+	IMG_UINT32		 			ui32CurrentOSPowerState;	/*!< current OS specific power state */
+	PVRSRV_DEVICE_NODE			*psDeviceNodeList;			/*!< List head of device nodes */
+	struct _DEVICE_COMMAND_DATA_ *apsDeviceCommandData[SYS_DEVICE_COUNT];
+
+	IMG_UINT32					ui32RegisteredPhysHeaps;
+	PHYS_HEAP					*apsRegisteredPhysHeaps[SYS_PHYS_HEAP_COUNT];
+
+    PVRSRV_POWER_DEV			*psPowerDeviceList;			/*!< list of devices registered with the power manager */
+	POS_LOCK					hPowerLock;					/*!< lock for power state transitions */
+   	PVRSRV_SYS_POWER_STATE		eCurrentPowerState;			/*!< current Kernel services power state */
+   	PVRSRV_SYS_POWER_STATE		eFailedPowerState;			/*!< Kernel services power state (Failed to transition to) */
+
+   	PVRSRV_SERVICES_STATE		eServicesState;				/*!< global driver state */
+
+	IMG_HANDLE					hGlobalEventObject;			/*!< OS Global Event Object */
+	IMG_UINT32					ui32GEOConsecutiveTimeouts;	/*!< OS Global Event Object Timeouts */
+	
+	PVRSRV_CACHE_OP				uiCacheOp;					/*!< Pending cache operations in the system */
+
+	IMG_HANDLE					hCleanupThread;				/*!< Cleanup thread */
+	IMG_HANDLE					hCleanupEventObject;		/*!< Event object to drive cleanup thread */
+	POS_LOCK					hCleanupThreadWorkListLock;	/*!< Lock protecting the cleanup thread work list */
+	DLLIST_NODE					sCleanupThreadWorkList;		/*!< List of work to do by the cleanup thread */
+	IMG_PID						cleanupThreadPid;			/*!< Cleanup thread process id */
+
+	IMG_HANDLE					hDevicesWatchdogThread;		/*!< Devices Watchdog thread */
+	IMG_HANDLE					hDevicesWatchdogEvObj;		/*! Event object to drive devices watchdog thread */
+	volatile IMG_UINT32			ui32DevicesWatchdogPwrTrans;/*! Number of off -> on power state transitions */
+	volatile IMG_UINT32			ui32DevicesWatchdogTimeout; /*! Timeout for the Devices Watchdog Thread */
+#ifdef PVR_TESTING_UTILS
+	volatile IMG_UINT32			ui32DevicesWdWakeupCounter;	/* Need this for the unit tests. */
+#endif
+
+	IMG_BOOL					bUnload;					/*!< Driver unload is in progress */
+} PVRSRV_DATA;
+
+
+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE;
+typedef IMG_VOID (*PFN_CMDCOMP_NOTIFY) (PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG
+{
+	PVRSRV_CMDCOMP_HANDLE	hCmdCompHandle;
+	PFN_CMDCOMP_NOTIFY		pfnCmdCompleteNotify;
+
+	DLLIST_NODE					sListNode;
+} PVRSRV_CMDCOMP_NOTIFY;
+
+#define DEBUG_REQUEST_VERBOSITY_LOW		0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM	1
+#define DEBUG_REQUEST_VERBOSITY_HIGH	2
+
+#define DEBUG_REQUEST_VERBOSITY_MAX	(DEBUG_REQUEST_VERBOSITY_HIGH)
+
+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE;
+typedef IMG_VOID (*PFN_DBGREQ_NOTIFY) (PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel);
+
+typedef struct PVRSRV_DBGREQ_NOTIFY_TAG
+{
+	PVRSRV_DBGREQ_HANDLE	hDbgRequestHandle;
+	PFN_DBGREQ_NOTIFY		pfnDbgRequestNotify;
+	IMG_UINT32				ui32RequesterID;
+
+	DLLIST_NODE					sListNode;
+} PVRSRV_DBGREQ_NOTIFY;
+
+/*!
+*******************************************************************************
+
+ @Description
+
+ Macro used within debug dump functions to send output either to PVR_LOG or
+ a custom function.
+
+******************************************************************************/
+#define PVR_DUMPDEBUG_LOG(x)					\
+	do											\
+	{											\
+		if (pfnDumpDebugPrintf)					\
+		{										\
+			pfnDumpDebugPrintf x;				\
+		}										\
+		else									\
+		{										\
+			PVR_LOG(x);							\
+		}										\
+	} while(0)
+
+/*!
+*******************************************************************************
+
+ @Description
+
+ Typedef for custom debug dump output functions.
+
+******************************************************************************/
+typedef void (DUMPDEBUG_PRINTF_FUNC)(const IMG_CHAR *pszFormat, ...);
+
+extern DUMPDEBUG_PRINTF_FUNC *g_pfnDumpDebugPrintf;
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVGetPVRSRVData
+
+ @Description	Get a pointer to the global data
+
+ @Return   PVRSRV_DATA *
+
+******************************************************************************/
+PVRSRV_DATA *PVRSRVGetPVRSRVData(IMG_VOID);
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
+                                                   PVRSRV_DEVICE_TYPE *peDeviceType,
+                                                   PVRSRV_DEVICE_CLASS *peDeviceClass,
+                                                   IMG_UINT32 *pui32DeviceIndex);
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32			ui32DevIndex,
+													 PVRSRV_DEVICE_TYPE	eDeviceType,
+													 IMG_HANDLE			*phDevCookie);
+
+IMG_EXPORT
+PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseDeviceDataKM (IMG_HANDLE hDevCookie);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterExtDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+													IMG_UINT32 *pui32DeviceIndex,
+													IMG_UINT32 ui32PhysHeapID);
+
+IMG_VOID IMG_CALLCONV PVRSRVUnregisterExtDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSysPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState, IMG_BOOL bForced);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSysPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState, IMG_BOOL bForced);
+
+PVRSRV_ERROR LMA_MMUPxAlloc(PVRSRV_DEVICE_NODE *psDevNode, IMG_SIZE_T uiSize,
+							Px_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_VOID LMA_MMUPxFree(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle);
+
+PVRSRV_ERROR LMA_MMUPxMap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle,
+							IMG_SIZE_T uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+							IMG_VOID **pvPtr);
+
+IMG_VOID LMA_MMUPxUnmap(PVRSRV_DEVICE_NODE *psDevNode, Px_HANDLE *psMemHandle,
+						IMG_VOID *pvPtr);
+										
+
+/*!
+******************************************************************************
+ @Function	PVRSRVPollForValueKM
+
+ @Description
+ Polls for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM(volatile IMG_UINT32	*pui32LinMemAddr,
+														  IMG_UINT32			ui32Value,
+														  IMG_UINT32			ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVWaitForValueKM
+
+ @Description
+ Waits (using EventObjects) for a value to match a masked read
+
+ @Input pui32LinMemAddr			: CPU linear address to poll
+ @Input ui32Value				: required value
+ @Input ui32Mask				: Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM(volatile IMG_UINT32	*pui32LinMemAddr,
+														IMG_UINT32			ui32Value,
+														IMG_UINT32			ui32Mask);
+
+/*!
+******************************************************************************
+ @Function	PVRSRVWaitForValueKMAndHoldBridgeLockKM
+
+ @Description
+ Waits without releasing bridge lock (using EventObjects) for a value
+ to match a masked read
+
+ @Input pui32LinMemAddr			: CPU linear address to poll
+ @Input ui32Value				: required value
+ @Input ui32Mask				: Mask
+
+ @Return   PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 *pui32LinMemAddr,
+                                                                  IMG_UINT32          ui32Value,
+                                                                  IMG_UINT32          ui32Mask);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemDebugInfo
+
+ @Description	: Dump the system debug info
+
+@Input pfnDumpDebugPrintf : Used to specify the appropriate printf function.
+			     If this argument is IMG_NULL, then PVR_LOG() will
+			     be used as the default printing function.
+
+*****************************************************************************/
+PVRSRV_ERROR PVRSRVSystemDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVGetSystemName
+
+ @Description	: Gets the system name string
+
+ @Return : The system name
+*****************************************************************************/
+const IMG_CHAR *PVRSRVGetSystemName(IMG_VOID);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemHasCacheSnooping
+
+ @Description	: Returns whether the system has cache snooping
+
+ @Return : IMG_TRUE if the system has cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(IMG_VOID);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemSnoopingOfCPUCache
+
+ @Description	: Returns whether the system supports snooping of the CPU cache
+
+ @Return : IMG_TRUE if the system has CPU cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(IMG_VOID);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemSnoopingOfDeviceCache
+
+ @Description	: Returns whether the system supports snooping of the device cache
+
+ @Return : IMG_TRUE if the system has device cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(IMG_VOID);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVSystemWaitCycles
+
+ @Description	: Waits for at least ui32Cycles of the Device clk.
+
+*****************************************************************************/
+IMG_VOID PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles);
+
+
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVCheckStatus
+
+ @Description	: Notify any registered cmd complete function (except if its
+				  hPrivData matches the hCmdCompHandle handler) and raise the global 
+				  event object. 
+
+ @Input hCmdCompHandle	: Identify the caller by the handler used when 
+						  registering for cmd complete. IMG_NULL calls all
+						  the notify functions.
+
+*****************************************************************************/
+IMG_VOID IMG_CALLCONV PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVKickDevicesKM(IMG_VOID);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVResetHWRLogsKM
+
+ @Description	: Resets the HWR Logs buffer (the hardware recovery count is not reset)
+
+ @Input psDeviceNode	: Pointer to the device
+
+ @Return   PVRSRV_ERROR : PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*****************************************************************************
+ */
+PVRSRV_ERROR PVRSRVResetHWRLogsKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVRegisterCmdCompleteNotify
+
+ @Description	: Register a notify function which is called when some device
+				  finishes some work (that is, when someone calls to PVRSRVCheckStatus).
+
+ @Input phNotify : Pointer to the Cmd complete notify handler
+
+ @Input pfnCmdCompleteNotify : Notify function
+
+ @Input hPrivData : Handler to data passed to the Notify function when called
+
+*****************************************************************************/
+PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, PVRSRV_CMDCOMP_HANDLE hPrivData);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVUnregisterCmdCompleteNotify
+
+ @Description	: Unregister a previously registered notify func.
+
+ @Input hNotify : Cmd complete notify handler registered previously
+
+*****************************************************************************/
+PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify);
+
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVDebugRequest
+
+ @Description	: Notify any registered debug request handler that a debug
+                  request has been made and at what level. It dumps information 
+		  for all debug handlers unlike RGXDumpDebugInfo
+
+ @Input ui32VerbLevel	: The maximum verbosity level to dump
+
+ @Input pfnDumpDebugPrintf : Used to specify the appropriate printf function.
+			     If this argument is IMG_NULL, then PVR_LOG() will
+			     be used as the default printing function.
+
+*****************************************************************************/
+IMG_VOID IMG_CALLCONV PVRSRVDebugRequest(IMG_UINT32 ui32VerbLevel, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVRegisterDebugRequestNotify
+
+ @Description	: Register a notify function which is called when a debug
+				  request is made into the driver (that is, when someone
+				  calls to PVRSRVDebugRequest). There are a number of levels
+				  of verbosity, starting at 0 and going to
+				  DEBUG_REQUEST_VERBOSITY_MAX. For each level that's required
+				  a new call to the notify function will be made.
+
+ @Input phNotify : Pointer to the debug request notify handler
+
+ @Input pfnDbgRequestNotify : Notify function
+
+ @Input ui32RequesterID : Used to determine the order debug request callbacks get
+                          called in with the table passed into 
+
+ @Input hDbgReqeustHandle : Handler to data passed to the Notify function when called
+
+*****************************************************************************/
+PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify, PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, IMG_UINT32 ui32RequesterID, PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle);
+
+/*!
+*****************************************************************************
+ @Function	: PVRSRVUnregisterDebugRequestNotify
+
+ @Description	: Unregister a previously registered notify func.
+
+ @Input hNotify : Debug request notify handler registered previously
+
+*****************************************************************************/
+PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify);
+
+/*!
+*****************************************************************************
+ @Function	: AcquireGlobalEventObjectServer
+
+ @Description	: Acquire the global event object.
+
+ @Output phGlobalEventObject : Handle to the global event object
+
+*****************************************************************************/
+PVRSRV_ERROR AcquireGlobalEventObjectServer(IMG_HANDLE *phGlobalEventObject);
+
+/*!
+*****************************************************************************
+ @Function	: ReleaseGlobalEventObjectServer
+
+ @Description	: Release the global event object.
+
+ @Input hGlobalEventObject : Handle to the global event object
+
+*****************************************************************************/
+PVRSRV_ERROR ReleaseGlobalEventObjectServer(IMG_HANDLE hGlobalEventObject);
+
+
+/*!
+*****************************************************************************
+ @Function	: GetBIFTilingHeapXStride
+
+ @Description	: return the default x-stride configuration for the given
+                  BIF tiling heap number
+
+ @Input uiHeapNum: BIF tiling heap number, starting from 1
+
+ @Output puiXStride: pointer to x-stride output of the requested heap
+
+*****************************************************************************/
+PVRSRV_ERROR GetBIFTilingHeapXStride(IMG_UINT32 uiHeapNum, IMG_UINT32 *puiXStride);
+
+/*!
+*****************************************************************************
+ @Function	: GetNumBIFTilingHeaps
+
+ @Description	: return the number of BIF tiling heaps on this system
+
+ @Output puiNumHeaps: pointer to uint to hold number of heaps
+
+*****************************************************************************/
+PVRSRV_ERROR GetNumBifTilingHeapConfigs(IMG_UINT32 *puiNumHeaps);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*!
+***********************************************************************************
+ @Function				: PopulateLMASubArenas
+
+ @Description			: Uses the Apphints passed by the client at initialization
+						  time to add bases and sizes in the various arenas in the
+						  LMA memory
+
+ @Input psDeviceNode	: Pointer to the device node struct containing all the
+						  arena information
+
+ @Input ui32OSidMin		: Single dimensional array containing the minimum values
+						  for each OSid area
+
+ @Input ui32OSidMax		: Single dimensional array containing the maximum values
+						  for each OSid area
+***********************************************************************************/
+
+IMG_VOID PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS]);
+#endif
+
+#endif /* PVRSRV_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv_cleanup.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv_cleanup.h
new file mode 100644
index 0000000..fba346e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv_cleanup.h
@@ -0,0 +1,76 @@
+/**************************************************************************/ /*!
+@File
+@Title          PowerVR SrvKM cleanup thread deferred work interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_CLEANUP_H
+#define PVRSRV_CLEANUP_H
+
+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam);
+
+/* typical number of times a caller should want the work to be retried in case
+ * of the callback function (pfnFree) returning an error.
+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry
+ * count (ui32RetryCount) unless there are special requirements.
+ * A value of 6000 corresponds to around 10 minutes.
+ */
+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 6000
+
+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_
+{
+	DLLIST_NODE sNode; /*!< list node to attach to the cleanup thread work list */
+	CLEANUP_THREAD_FN pfnFree; /*!< function to be called */
+	void *pvData; /*!< private data for pfnFree */
+	IMG_UINT32 ui32RetryCount; /*!< number of times the callback should be re-tried when it returns error */
+} PVRSRV_CLEANUP_THREAD_WORK;
+
+/*!
+******************************************************************************
+ @Function                PVRSRVCleanupThreadAddWork
+
+ @Description             Add a work item to be called from the cleanup thread
+
+ @Input psData          : The function pointer and private data for the callback
+
+ @Return                  None
+******************************************************************************/
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData);
+
+#endif /* PVRSRV_CLEANUP_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv_device.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv_device.h
new file mode 100644
index 0000000..0453e71
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/pvrsrv_device.h
@@ -0,0 +1,193 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __PVRSRV_DEVICE_H__
+#define __PVRSRV_DEVICE_H__
+
+#include "servicesext.h"
+#include "pvrsrv_device_types.h"
+#include "img_types.h"
+#include "ra.h"
+#include "physheap.h"
+#include "rgx_fwif_km.h"
+#include "pmr.h"
+#include "lock.h"
+#include "pvr_dvfs.h"
+
+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG;
+
+/*! The CPU physical base of the LMA physical heap is used as the base for
+ *  device memory physical heap allocations */
+#define PVRSRV_DEVICE_CONFIG_LMA_USE_CPU_ADDR	(1<<0)
+
+/*
+ *  The maximum number of physical heaps associated
+ *  with a device
+ */
+typedef enum
+{
+	PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL = 0,
+	PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL = 1,
+	PVRSRV_DEVICE_PHYS_HEAP_LAST
+}PVRSRV_DEVICE_PHYS_HEAP;
+
+typedef enum
+{
+	PVRSRV_DEVICE_IRQ_ACTIVE_SYSDEFAULT = 0,
+	PVRSRV_DEVICE_IRQ_ACTIVE_LOW,
+	PVRSRV_DEVICE_IRQ_ACTIVE_HIGH
+}PVRSRV_DEVICE_IRQ_ACTIVE_LEVEL;
+
+typedef IMG_VOID (*PFN_MISR)(IMG_VOID *pvData);
+
+typedef IMG_BOOL (*PFN_LISR)(IMG_VOID *pvData);
+
+typedef IMG_UINT32 (*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR (*PFN_SYS_DEV_PRE_POWER)(PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                              PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+											  IMG_BOOL bForced);
+
+
+typedef PVRSRV_ERROR (*PFN_SYS_DEV_POST_POWER)(PVRSRV_DEV_POWER_STATE eNewPowerState,
+                                               PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+											   IMG_BOOL bForced);
+
+typedef IMG_VOID (*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+typedef PVRSRV_ERROR (*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+														 IMG_UINT64 ui64MemSize);
+
+struct _PVRSRV_DEVICE_CONFIG_
+{
+	/*! Configuration flags */
+	IMG_UINT32			uiFlags;
+
+	/*! Name of the device (used when registering the IRQ) */
+	IMG_CHAR			*pszName;
+
+	/*! Type of device this is */
+	PVRSRV_DEVICE_TYPE		eDeviceType;
+
+	/*! Register bank address */
+	IMG_CPU_PHYADDR			sRegsCpuPBase;
+	/*! Register bank size */
+	IMG_UINT32			ui32RegsSize;
+	/*! Device interrupt number */
+	IMG_UINT32			ui32IRQ;
+
+	/*! The device interrupt is shared */
+	IMG_BOOL			bIRQIsShared;
+
+	/*! IRQ polarity */
+	PVRSRV_DEVICE_IRQ_ACTIVE_LEVEL	eIRQActiveLevel;
+
+	/*! Device specific data handle */
+	IMG_HANDLE			hDevData;
+
+	/*! System specific data. This gets passed into system callback functions */
+	IMG_HANDLE			hSysData;
+
+	/*! ID of the Physical memory heap to use
+	 *! The first entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL])  will be used for allocations
+	 *!  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be the PhysHeapID
+	 *!  of an LMA heap (but the configuration could specify a UMA heap here, if desired)
+	 *! The second entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations
+	 *!  where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be the PhysHeapID
+	 *!  of a UMA heap (but the configuration could specify an LMA heap here, if desired)
+	 *! In the event of there being only one Physical Heap, the configuration should specify the
+	 *!  same heap details in both entries */
+	IMG_UINT32			aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+	/*! Callback to inform the device we about to change power state */
+	PFN_SYS_DEV_PRE_POWER		pfnPrePowerState;
+
+	/*! Callback to inform the device we have finished the power state change */
+	PFN_SYS_DEV_POST_POWER		pfnPostPowerState;
+
+	/*! Callback to obtain the clock frequency from the device */
+	PFN_SYS_DEV_CLK_FREQ_GET	pfnClockFreqGet;
+
+	/*! Callback to inform the device that an interrupt has been handled */
+	PFN_SYS_DEV_INTERRUPT_HANDLED	pfnInterruptHandled;
+
+	/*! Callback to handle memory budgeting */
+	PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE	pfnCheckMemAllocSize;
+
+	/*! Current breakpoint data master */
+	RGXFWIF_DM			eBPDM;
+	/*! A Breakpoint has been set */
+	IMG_BOOL			bBPSet;	
+
+#if defined(PVR_DVFS)
+	PVRSRV_DVFS			sDVFS;
+#endif
+};
+
+typedef PVRSRV_ERROR (*PFN_SYSTEM_PRE_POWER_STATE)(PVRSRV_SYS_POWER_STATE eNewPowerState);
+typedef PVRSRV_ERROR (*PFN_SYSTEM_POST_POWER_STATE)(PVRSRV_SYS_POWER_STATE eNewPowerState);
+
+typedef enum _PVRSRV_SYSTEM_SNOOP_MODE_ {
+	PVRSRV_SYSTEM_SNOOP_NONE = 0,
+	PVRSRV_SYSTEM_SNOOP_CPU_ONLY,
+	PVRSRV_SYSTEM_SNOOP_DEVICE_ONLY,
+	PVRSRV_SYSTEM_SNOOP_CROSS,
+} PVRSRV_SYSTEM_SNOOP_MODE;
+
+typedef struct _PVRSRV_SYSTEM_CONFIG_
+{
+	IMG_UINT32				uiSysFlags;
+	IMG_CHAR				*pszSystemName;
+	IMG_UINT32				uiDeviceCount;
+	PVRSRV_DEVICE_CONFIG	*pasDevices;
+	PFN_SYSTEM_PRE_POWER_STATE pfnSysPrePowerState;
+	PFN_SYSTEM_POST_POWER_STATE pfnSysPostPowerState;
+	PVRSRV_SYSTEM_SNOOP_MODE eCacheSnoopingMode;
+
+	PHYS_HEAP_CONFIG		*pasPhysHeaps;
+	IMG_UINT32				ui32PhysHeapCount;
+
+	IMG_UINT32              *pui32BIFTilingHeapConfigs;
+	IMG_UINT32              ui32BIFTilingHeapCount;
+} PVRSRV_SYSTEM_CONFIG;
+
+
+#endif /* __PVRSRV_DEVICE_H__*/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/ri_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/ri_server.h
new file mode 100644
index 0000000..9a98a30
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/ri_server.h
@@ -0,0 +1,87 @@
+/*************************************************************************/ /*!
+@File			ri_server.h
+@Title          Resource Information abstraction
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description	Resource Information (RI) functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RI_SERVER_H_
+#define _RI_SERVER_H_
+
+#include <img_defs.h>
+#include <ri_typedefs.h>
+#include <pmr.h>
+#include <pvrsrv_error.h>
+
+PVRSRV_ERROR RIInitKM(IMG_VOID);
+IMG_VOID RIDeInitKM(IMG_VOID);
+
+PVRSRV_ERROR RIWritePMREntryKM(PMR *hPMR,
+					   	   	   IMG_UINT32 ui32TextASize,
+					   	   	   const IMG_CHAR ai8TextA[RI_MAX_TEXT_LEN+1],
+					   	   	   IMG_UINT64 uiLogicalSize);
+
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *hPMR,
+					   	   	   	   IMG_UINT32 ui32TextBSize,
+					   	   	   	   const IMG_CHAR ai8TextB[RI_MAX_TEXT_LEN+1],
+					   	   	   	   IMG_UINT64 uiOffset,
+					   	   	   	   IMG_UINT64 uiSize,
+					   	   	   	   IMG_BOOL bIsImport,
+					   	   	   	   IMG_BOOL bIsExportable,
+					   	   	   	   RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+								   IMG_DEV_VIRTADDR sVAddr);
+
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle);
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle);
+
+PVRSRV_ERROR RIDeleteListKM(IMG_VOID);
+
+PVRSRV_ERROR RIDumpListKM(PMR *hPMR);
+
+PVRSRV_ERROR RIDumpAllKM(IMG_VOID);
+
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid);
+
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+						  IMG_HANDLE **ppHandle,
+						  IMG_CHAR **ppszEntryString);
+
+#endif /* #ifndef _RI_SERVER_H _*/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/scp.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/scp.h
new file mode 100644
index 0000000..39ee506
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/scp.h
@@ -0,0 +1,232 @@
+/**************************************************************************/ /*!
+@File
+@Title          Software Command Processor header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the interface for the software command processor
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SCP_H
+#define SCP_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "sync_server.h"
+
+
+typedef struct _SCP_CONTEXT_ SCP_CONTEXT;	/*!< Opaque handle to a software command processor context */
+
+typedef IMG_BOOL (*SCPReady)(IMG_PVOID pvReadyData);
+typedef IMG_VOID (*SCPDo)(IMG_PVOID pvReadyData, IMG_PVOID pvCompleteData);
+
+/*************************************************************************/ /*!
+@Function       SCPCreate
+
+@Description    Create a software command processor
+
+@Input          ui32CCBSizeLog2         Log2 of the CCB size
+
+@Output         ppvBufferSpace          Pointer to space allocated
+
+@Return         PVRSRV_OK if the software command processor was created
+*/
+/*****************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV SCPCreate(IMG_UINT32 ui32CCBSizeLog2,
+									SCP_CONTEXT **ppsContext);
+
+/*************************************************************************/ /*!
+@Function       SCPAllocCommand
+
+@Description    Allocate space in the software command processor and return
+                the data pointers for the callback data.
+
+                Once any command ready data and command complete have been setup
+                the command can be submitted for processing by calling
+                SCPSubmitCommand.
+
+                When any fences the command has have been meet then the command
+                ready callback will be called with the command ready data.
+                Once the command has completed the command complete callback will
+                be called with the command complete data.
+
+@Input          psSCPContext            Context to allocate from
+
+@Input          ui32SyncPrimCount       Number of Sync Prim operations
+
+@Input          papsSync                Pointer to array of pointers to server syncs
+
+@Input          pfnCommandReady         Callback to call if the command is ready
+
+@Input          pfnCommandDo            Callback to the function to run
+
+@Input          ui32ReadyDataSize       Size of command ready data to allocate in bytes
+
+@Input          pfnCommandComplete      Callback to call when the command has completed
+
+@Input          ui32CompleteDataSize    Size of command complete data to allocate
+
+@Output         ppvReadyData            Pointer to memory allocated for command
+                                        ready callback data
+
+@Output         ppvCompleteData         Pointer to memory allocated for command
+                                        complete callback data
+
+@Return         PVRSRV_OK if the allocate was successfull
+*/
+/*****************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR IMG_CALLCONV SCPAllocCommand(SCP_CONTEXT *psSCPContext,
+										  IMG_UINT32 ui32SyncPrimCount,
+										  SERVER_SYNC_PRIMITIVE **papsSync,
+										  IMG_BOOL *pabUpdate,
+										  IMG_INT32 i32AcquireFenceFd,
+										  SCPReady pfnCommandReady,
+										  SCPDo pfnCommandDo,
+										  IMG_SIZE_T ui32ReadyDataByteSize,
+										  IMG_SIZE_T ui32CompleteDataByteSize,
+										  IMG_PVOID *ppvReadyData,
+										  IMG_PVOID *ppvCompleteData,
+										  IMG_INT32 *pi32ReleaseFenceFd);
+
+/*************************************************************************/ /*!
+@Function       SCPSubmitCommand
+
+@Description    Submit a command for processing. We don't actually try to
+                run the command in this call as it might not be valid to do
+                from the same thread that this function is being called from
+
+@Input          psSCPContext            Context to allocate on which to submit
+                                        the command
+
+@Return         PVRSRV_OK if the command was submitted
+*/
+/*****************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR SCPSubmitCommand(SCP_CONTEXT *psContext);
+
+
+/*************************************************************************/ /*!
+@Function       SCPRun
+
+@Description    Run the software command processor to see if any commands are
+                now ready.
+
+@Input          psSCPContext            Context to process
+
+@Return         PVRSRV_OK if the software command processor was run
+*/
+/*****************************************************************************/
+IMG_IMPORT
+PVRSRV_ERROR SCPRun(SCP_CONTEXT *psContext);
+
+/*************************************************************************/ /*!
+@Function       SCPCommandComplete
+
+@Description    Complete a command which the software command processor
+                has previously issued.
+                Note: Commands _MUST_ be completed in order
+
+@Input          psSCPContext            Context to process
+
+@Return         PVRSRV_OK if the software command processor was run
+*/
+/*****************************************************************************/
+IMG_IMPORT
+IMG_VOID SCPCommandComplete(SCP_CONTEXT *psContext);
+
+/*************************************************************************/ /*!
+@Function       SCPFlush
+
+@Description    Flush the software command processor.
+
+@Input          psSCPContext            Context to process
+
+@Return         PVRSRV_OK if all commands have been completed, otherwise
+				PVRSRV_ERROR_RETRY
+*/
+/*****************************************************************************/
+IMG_EXPORT
+PVRSRV_ERROR SCPFlush(SCP_CONTEXT *psContext);
+
+/*************************************************************************/ /*!
+@Function       SCPHasPendingCommand
+
+@Description    Check the software command processor for pending commands.
+
+@Input          psContext               Context to process
+
+@Return         IMG_TRUE if there is at least one pending command
+				IMG_FALSE if there are no pending commands
+*/
+/*****************************************************************************/
+IMG_EXPORT
+IMG_BOOL SCPHasPendingCommand(SCP_CONTEXT *psContext);
+
+/*************************************************************************/ /*!
+@Function       SCPDumpStatus
+
+@Description    Dump the status of the provided software command processor.
+
+@Input          psSCPContext            Context to dump
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_EXPORT
+IMG_VOID IMG_CALLCONV SCPDumpStatus(SCP_CONTEXT *psContext);
+
+/*************************************************************************/ /*!
+@Function       SCPDestroy
+
+@Description    Destroy a software command processor.
+
+@Input          psSCPContext            Context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_IMPORT
+IMG_VOID IMG_CALLCONV SCPDestroy(SCP_CONTEXT *psContext);
+
+
+#endif /* SCP_H */
+
+/******************************************************************************
+ End of file (queue.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/secure_export.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/secure_export.h
new file mode 100644
index 0000000..55aac8c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/secure_export.h
@@ -0,0 +1,65 @@
+/**************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+#include "connection_server.h"
+
+typedef struct _SECURE_CLEANUP_DATA_ {
+	PMR *psPMR;
+} SECURE_CLEANUP_DATA;
+
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+								PMR *psPMR,
+								IMG_SECURE_TYPE *phSecure,
+								PMR **ppsPMR,
+								CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR);
+
+PVRSRV_ERROR PMRSecureImportPMR(IMG_SECURE_TYPE hSecure,
+								PMR **ppsPMR,
+								IMG_DEVMEM_SIZE_T *puiSize,
+								IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR);
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/srvcore.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/srvcore.h
new file mode 100644
index 0000000..038a1c9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/srvcore.h
@@ -0,0 +1,203 @@
+/**************************************************************************/ /*!
+@File
+@Title          PVR Bridge Functionality
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Header for the PVR Bridge code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __BRIDGED_PVR_BRIDGE_H__
+#define __BRIDGED_PVR_BRIDGE_H__
+
+#include "lock_types.h"
+#include "connection_server.h"
+#include "pvr_debug.h"
+
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "pvr_bridge_io.h"
+
+#ifndef ENOMEM
+#define ENOMEM	12
+#endif
+#ifndef EFAULT
+#define EFAULT	14
+#endif
+#ifndef ENOTTY
+#define ENOTTY	25
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+					IMG_UINT32 ui32DispatchTableEntry,
+					IMG_VOID *pvDest,
+					IMG_VOID *pvSrc,
+					IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection, 
+				  IMG_UINT32 ui32DispatchTableEntry,
+				  IMG_VOID *pvDest,
+				  IMG_VOID *pvSrc,
+				  IMG_UINT32 ui32Size);
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+		IMG_VOID *psBridgeIn,
+		IMG_VOID *psBridgeOut,
+		CONNECTION_DATA *psConnection);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry,
+									 IMG_VOID *psBridgeIn,
+									 IMG_VOID *psBridgeOut,
+									 CONNECTION_DATA *psConnection);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+	BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl
+										arguments before calling into srvkm proper */
+	POS_LOCK	hBridgeLock;	/*!< The bridge lock which needs to be acquired 
+						before calling the above wrapper */
+	IMG_PVOID	pvBridgeBuffer;	/*!< The buffer that will be used for bridgeIn and bridgeOut structs during this bridge call */
+	IMG_UINT32	ui32BridgeInBufferSize;	/*!< Available bridge input buffer size */
+	IMG_UINT32	ui32BridgeOutBufferSize;	/*!< Available bridge output buffer size */
+#if defined(DEBUG_BRIDGE_KM)
+	const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */
+	const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */
+	const IMG_CHAR *pszBridgeLockName;	/*!< Name of bridge lock which will be acquired */
+	IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */
+	IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from
+											 userspace within this ioctl */
+	IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from
+										   userspace within this ioctl */
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_RGX)
+	#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1)
+	#define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_RGX_LAST+1)
+#else
+	#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT  (PVRSRV_BRIDGE_DISPATCH_LAST+1)
+	#define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT  (PVRSRV_BRIDGE_LAST+1)
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+IMG_VOID
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+					   IMG_UINT32 ui32Index,
+					   const IMG_CHAR *pszIOCName,
+					   BridgeWrapperFunction pfFunction,
+					   const IMG_CHAR *pszFunctionName,
+					   POS_LOCK hBridgeLock,
+					   const IMG_CHAR* pszBridgeLockName,
+					   IMG_BYTE* pbyBridgeBuffer,
+					   IMG_UINT32 ui32BridgeInBufferSize,
+					   IMG_UINT32 ui32BridgeOutBufferSize );
+
+
+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */
+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\
+					hBridgeLock, pbyBridgeBuffer,\
+					ui32BridgeInBufferSize, ui32BridgeOutBufferSize) \
+	_SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32BridgeGroup), ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\
+							(POS_LOCK)hBridgeLock, #hBridgeLock,\
+							pbyBridgeBuffer, ui32BridgeInBufferSize, ui32BridgeOutBufferSize )
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+	IMG_UINT32 ui32IOCTLCount;
+	IMG_UINT32 ui32TotalCopyFromUserBytes;
+	IMG_UINT32 ui32TotalCopyToUserBytes;
+} PVRSRV_BRIDGE_GLOBAL_STATS;
+
+/* OS specific code may want to report the stats held here and within the
+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a
+ * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+
+IMG_INT BridgedDispatchKM(CONNECTION_DATA * psConnection,
+					  PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM);
+
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+				IMG_UINT32 ui32Flags,
+				IMG_UINT32 ui32ClientBuildOptions,
+				IMG_UINT32 ui32ClientDDKVersion,
+				IMG_UINT32 ui32ClientDDKBuild,
+				IMG_UINT8  *pui8KernelArch,
+				IMG_UINT32 *ui32Log2PageSize);
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(IMG_VOID);
+
+PVRSRV_ERROR
+PVRSRVInitSrvDisconnectKM(CONNECTION_DATA *psConnection,
+							IMG_BOOL bInitSuccesful,
+							IMG_UINT32 ui32ClientBuildOptions);
+
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(IMG_UINT32 ui32VerbLevel);
+
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+							IMG_PUINT32  pui32RGXClockSpeed);
+
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(IMG_VOID);
+
+/* performs a SOFT_RESET on the given device node */
+PVRSRV_ERROR
+PVRSRVSoftResetKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+                  IMG_UINT64 ui64ResetValue1,
+                  IMG_UINT64 ui64ResetValue2);
+
+
+#endif /* __BRIDGED_PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (bridged_pvr_bridge.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/srvkm.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/srvkm.h
new file mode 100644
index 0000000..40c6dcb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/srvkm.h
@@ -0,0 +1,193 @@
+/**************************************************************************/ /*!
+@File
+@Title          Services kernel module internal header file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+#include "servicesext.h"
+
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/**	Use PVR_DPF() unless message is necessary in release build
+ */
+#ifdef PVR_DISABLE_LOGGING
+#define PVR_LOG(X)
+#else
+/* PRQA S 3410 1 */ /* this macro requires no brackets in order to work */
+#define PVR_LOG(X)			PVRSRVReleasePrintf X;
+#endif
+
+IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(1, 2);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVInit
+
+ @Description	Initialise services
+
+ @Input	   psSysData	: sysdata structure
+
+ @Return   PVRSRV_ERROR	:
+
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(void *);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVDeInit
+
+ @Description	De-Initialise services
+
+ @Input	   psSysData	: sysdata structure
+
+ @Return   PVRSRV_ERROR	:
+
+******************************************************************************/
+IMG_VOID IMG_CALLCONV PVRSRVDeInit(IMG_VOID *);
+
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVScheduleDevicesKM
+
+ @Description	Schedules all Services-Managed Devices to check their pending
+ 				command queues. The intention is that ScheduleDevices be called by the
+				3rd party BC driver after it has finished writing new data to its output
+				texture.
+
+ @Input		bInLISR
+
+ @Return	IMG_VOID
+
+******************************************************************************/
+IMG_IMPORT IMG_VOID PVRSRVScheduleDevicesKM(IMG_BOOL bInLISR);
+
+IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave);
+
+/*!
+******************************************************************************
+
+ @Function	PVRSRVScheduleDeviceCallbacks
+
+ @Description	Schedule all device callbacks
+
+ @Input		ui32CallerID
+
+ @Return	IMG_VOID
+
+******************************************************************************/
+IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_UINT32 ui32CallerID);
+
+
+
+/******************
+HIGHER LEVEL MACROS
+*******************/
+
+/*----------------------------------------------------------------------------
+Repeats the body of the loop for a certain minimum time, or until the body
+exits by its own means (break, return, goto, etc.)
+
+Example of usage:
+
+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+{
+	if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+	{
+		bTimeout = IMG_FALSE;
+		break;
+	}
+	
+	OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+} END_LOOP_UNTIL_TIMEOUT();
+
+-----------------------------------------------------------------------------*/
+
+/*	uiNotLastLoop will remain at 1 until the timeout has expired, at which time		
+ * 	it will be decremented and the loop executed one final time. This is necessary
+ *	when preemption is enabled. 
+ */
+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+	IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+	IMG_INT32 iNotLastLoop;					 \
+	for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+		((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--;				\
+		uiCurrent = OSClockus(),													\
+		uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset,		\
+		uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+/*!
+ ******************************************************************************
+
+ @Function		PVRSRVGetErrorStringKM
+
+ @Description	Returns a text string relating to the PVRSRV_ERROR enum.
+
+ ******************************************************************************/
+IMG_IMPORT
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError);
+
+/*
+	FIXME: This should be defined elsewhere when server sync are implemented
+*/
+typedef struct _SERVER_SYNC_PRIM_
+{
+	/* Placeholder until structure is properly implemented */
+	IMG_UINT32 ui32Placeholder;
+} SERVER_SYNC_PRIM;
+
+
+#endif /* SRVKM_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/sync_server.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/sync_server.h
new file mode 100644
index 0000000..2344acd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/sync_server.h
@@ -0,0 +1,318 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Describes the server side synchronisation functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+#ifndef _SYNC_SERVER_H_
+#define _SYNC_SERVER_H_
+
+typedef struct _SERVER_OP_COOKIE_ SERVER_OP_COOKIE;
+typedef struct _SERVER_SYNC_PRIMITIVE_ SERVER_SYNC_PRIMITIVE;
+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK;
+typedef struct _SERVER_SYNC_EXPORT_ SERVER_SYNC_EXPORT;
+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA;
+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE;
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+								PVRSRV_DEVICE_NODE *psDevNode,
+								SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+								IMG_UINT32 *puiSyncPrimVAddr,
+								IMG_UINT32 *puiSyncPrimBlockSize,
+								DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+								 DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+					IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+							SERVER_SYNC_EXPORT **ppsExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(SERVER_SYNC_EXPORT *psExport,
+							 SERVER_SYNC_PRIMITIVE **ppsSync,
+							 IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+								   SERVER_SYNC_PRIMITIVE *psSync,
+								   IMG_SECURE_TYPE *phSecure,
+								   SERVER_SYNC_EXPORT **ppsExport,
+								   CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(IMG_SECURE_TYPE hSecure,
+								   SERVER_SYNC_PRIMITIVE **ppsSync,
+								   IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID);
+IMG_VOID PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(
+			SYNC_RECORD_HANDLE * phRecord,
+			SYNC_PRIMITIVE_BLOCK * hServerSyncPrimBlock,
+			IMG_UINT32 ui32FwBlockAddr,
+			IMG_UINT32 ui32SyncOffset,
+			IMG_BOOL bServerSync,
+			IMG_UINT32 ui32ClassNameSize,
+			const IMG_CHAR *pszClassName);
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+			SYNC_RECORD_HANDLE hRecord);
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(PVRSRV_DEVICE_NODE *psDevNode,
+						SERVER_SYNC_PRIMITIVE **ppsSync,
+						IMG_UINT32 *pui32SyncPrimVAddr,
+						IMG_UINT32 ui32ClassNameSize,
+						const IMG_CHAR *szClassName);
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+							SERVER_SYNC_PRIMITIVE **papsSyncs,
+							IMG_UINT32 *pui32UID,
+							IMG_UINT32 *pui32FWAddr,
+							IMG_UINT32 *pui32CurrentOp,
+							IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+						  IMG_UINT32 *pui32FenceValue,
+						  IMG_UINT32 *pui32UpdateValue,
+						  IMG_UINT32 ui32SyncRequesterID,
+						  IMG_BOOL bUpdate,
+						  IMG_BOOL *pbFenceRequired);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+							   IMG_BOOL bUpdate,
+						       IMG_UINT32 *pui32FenceValue,
+						       IMG_UINT32 *pui32UpdateValue);
+
+IMG_BOOL
+ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_UINT32 ui32FenceValue);
+
+IMG_VOID
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+					 IMG_BOOL bDoUpdate,
+					 IMG_UINT32 ui32UpdateValue);
+
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+						 SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+						 IMG_UINT32 ui32ClientSyncCount,
+						 IMG_UINT32 *paui32SyncBlockIndex,
+						 IMG_UINT32 *paui32Index,
+						 IMG_UINT32 ui32ServerSyncCount,
+						 SERVER_SYNC_PRIMITIVE **papsServerSync,
+						 SERVER_OP_COOKIE **ppsServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+					       IMG_UINT32 ui32ClientSyncCount,
+					       IMG_UINT32 *paui32Flags,
+					       IMG_UINT32 *paui32FenceValue,
+					       IMG_UINT32 *paui32UpdateValue,
+					       IMG_UINT32 ui32ServerSyncCount,
+						   IMG_UINT32 *paui32ServerFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+						IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie);
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_VOID ServerSyncDumpPending(IMG_VOID);
+
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData);
+IMG_VOID SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+IMG_VOID SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+
+PVRSRV_ERROR ServerSyncInit(IMG_VOID);
+IMG_VOID ServerSyncDeinit(IMG_VOID);
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, 
+							IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T ui32PDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpValueKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, 
+							IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+						 IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimOpPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+						 PDUMP_POLL_OPERATOR eOperator,
+						 PDUMP_FLAGS_T uiDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psServerCookie);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpCBPKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+						 IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+						 IMG_UINT64 uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+	PVR_UNREFERENCED_PARAMETER(ui32Offset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif	/*_SYNC_SERVER_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/sync_server_internal.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/sync_server_internal.h
new file mode 100644
index 0000000..3a913df
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/sync_server_internal.h
@@ -0,0 +1,55 @@
+/**************************************************************************/ /*!
+@File
+@Title          Server side internal synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Describes the server side internal synchronisation functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SYNC_SERVER_INTERNAL_H_
+#define _SYNC_SERVER_INTERNAL_H_
+
+#include "img_types.h"
+
+IMG_VOID
+ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_VOID
+ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync);
+
+#endif	/*_SYNC_SERVER_INTERNAL_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlintern.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlintern.h
new file mode 100644
index 0000000..2aaa776
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlintern.h
@@ -0,0 +1,273 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer internals
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport Layer header used by TL internally
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLINTERN_H__
+#define __TLINTERN_H__
+
+
+#include "devicemem_typedefs.h"
+#include "pvr_tlcommon.h"
+#include "device.h"
+#include "lock.h"
+
+/* Forward declarations */
+typedef struct _TL_SNODE_* PTL_SNODE;
+
+/*! TL stream structure container.
+ *    pbyBuffer   holds the circular buffer.
+ *    ui32Read    points to the beginning of the buffer, ie to where data to
+ *                  Read begin.
+ *    ui32Write   points to the end of data that have been committed, ie this is
+ *                  where new data will be written.
+ *    ui32Pending number of bytes reserved in last reserve call which have not
+ *                  yet been submitted. Therefore these data are not ready to
+ *                  be transported.
+ *    hStreamLock - provides atomic protection for the ui32Pending & ui32Write
+ *                  members of the structure for when they are checked and/or
+ *                  updated in the context of a stream writer (producer)
+ *                  calling DoTLStreamReserve() & TLStreamCommit().
+ *                - Reader context is not multi-threaded, only one client per
+ *                  stream is allowed. Also note the read context may be in an
+ *                  ISR which prevents a design where locks can be held in the
+ *                  AcquireData/ReleaseData() calls. Thus this lock only
+ *                  protects the stream members from simultaneous writers.
+ *
+ *      ui32Read < ui32Write <= ui32Pending 
+ *        where < and <= operators are overloaded to make sense in a circular way.
+ */
+typedef struct _TL_STREAM_ 
+{
+	IMG_CHAR 			szName[PRVSRVTL_MAX_STREAM_NAME_SIZE];	/*!< String name identifier */
+	IMG_BOOL 			bDrop; 					/*!< Flag: When buffer is full drop new data instead of 
+														   overwriting older data */
+	IMG_BOOL 			bBlock;					/*!< Flag: When buffer is full reserve will block until there is
+														   enough free space in the buffer to fullfil the request. */
+	IMG_BOOL 			bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non empty stream block until 
+														   stream is drained. */
+	IMG_BOOL            bNoSignalOnCommit;      /*!< Flag: Used to avoid the TL signalling waiting consumers
+                                                           that new data is available on every commit. Producers
+                                                           using this flag will need to manually signal when
+                                                           appropriate using the TLStreamSync() API */
+
+	IMG_VOID			(*pfProducerCallback)(IMG_VOID); /*!< Optional producer callback of type TL_STREAM_SOURCECB */
+	IMG_PVOID			pvProducerUserData;	             /*!< Producer callback user data */
+
+	volatile IMG_UINT32 ui32Read; 				/*!< Pointer to the beginning of available data */
+	volatile IMG_UINT32 ui32Write;				/*!< Pointer to already committed data which are ready to be
+													 copied to user space*/
+	IMG_UINT32			ui32BufferUt;			/*!< Buffer utilisation high watermark, see
+	 	 	 	 	 	 	 	 	 	 	 	 * TL_BUFFER_UTILIZATION in tlstream.c */
+	IMG_UINT32 			ui32Pending;			/*!< Count pending bytes reserved in buffer */
+	IMG_UINT32 			ui32Size; 				/*!< Buffer size */
+	IMG_BYTE 			*pbyBuffer;				/*!< Actual data buffer */
+
+	PTL_SNODE 			psNode;					/*!< Ptr to parent stream node */
+	DEVMEM_MEMDESC 		*psStreamMemDesc;		/*!< MemDescriptor used to allocate buffer space through PMR */
+	DEVMEM_EXPORTCOOKIE sExportCookie; 			/*!< Export cookie for stream DEVMEM */
+
+	IMG_HANDLE			hProducerEvent;			/*!< Handle to wait on if there is not enough space */
+	IMG_HANDLE			hProducerEventObj;		/*!< Handle to signal blocked reserve calls */
+
+	POS_LOCK 			hStreamLock;			/*!< Lock for ui32Pending & ui32Write*/
+} TL_STREAM, *PTL_STREAM;
+
+/* there need to be enough space reserved in the buffer for 2 minimal packets
+ * and it needs to be aligned the same way the buffer is or there will be a
+ * compile error.*/
+#define BUFFER_RESERVED_SPACE 2*PVRSRVTL_PACKET_ALIGNMENT
+
+/* ensure the space reserved follows the buffer's alignment */
+BLD_ASSERT(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)), tlintern_h);
+
+/* Define the largest value that a uint that matches the 
+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */
+#define MAX_UINT 0xffffFFFF
+
+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is
+ * outstanding on the stream. */
+#define NOTHING_PENDING IMG_UINT32_MAX
+
+
+/*
+ * Transport Layer Stream Descriptor types/defs
+ */
+typedef struct _TL_STREAM_DESC_
+{
+	PTL_SNODE	psNode;			/*!< Ptr to parent stream node */
+	IMG_UINT32	ui32Flags;
+	IMG_HANDLE	hDataEvent; 	/*!< For wait call */
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3);
+
+#define TL_STREAM_KM_FLAG_MASK	0xFFFF0000
+#define TL_STREAM_FLAG_TEST		0x10000000
+#define TL_STREAM_FLAG_WRAPREAD	0x00010000
+
+#define TL_STREAM_UM_FLAG_MASK	0x0000FFFF
+
+/*
+ * Transport Layer stream list node
+ */
+typedef struct _TL_SNODE_
+{
+	struct _TL_SNODE_*  psNext;				/*!< Linked list next element */
+	IMG_HANDLE			hDataEventObj;		/*!< Readers 'wait for data' event */
+	PTL_STREAM 			psStream;			/*!< TL Stream object */
+	IMG_INT				uiWRefCount;		/*!< Stream writer reference count */
+	PTL_STREAM_DESC 	psRDesc;			/*!< Stream reader 0 or ptr only */
+} TL_SNODE;
+
+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4);
+
+/*
+ * Transport Layer global top types and variables
+ * Use access function to obtain pointer.
+ *
+ * hTLGDLock - provides atomicity over read/check/write operations and
+ *             sequence of operations on uiClientCnt, psHead list of SNODEs and
+ *             the immediate members in a list element SNODE structure.
+ *           - This larger scope of responsibility for this lock helps avoid
+ *             the need for a lock in the SNODE structure.
+ *           - Lock held in the client (reader) context when streams are
+ *             opened/closed and in the server (writer) context when streams
+ *             are created/open/closed.
+ */
+typedef struct _TL_GDATA_
+{
+	IMG_PVOID  psRgxDevNode;        /* Device node to use for buffer allocations */
+	IMG_HANDLE hTLEventObj;         /* Global TL signal object, new streams, etc */
+
+	IMG_UINT   uiClientCnt;         /* Counter to track the number of client stream connections. */
+	PTL_SNODE  psHead;              /* List of TL streams and associated client handle */
+
+	POS_LOCK	hTLGDLock;          /* Lock for structure AND psHead SNODE list */
+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA;
+
+/*
+ * Transport Layer Internal Kernel-Mode Server API
+ */
+TL_GLOBAL_DATA* TLGGD(IMG_VOID);		/* TLGetGlobalData() */
+
+PVRSRV_ERROR TLInit(PVRSRV_DEVICE_NODE *psDevNode);
+IMG_VOID TLDeInit(IMG_VOID);
+
+PVRSRV_DEVICE_NODE* TLGetGlobalRgxDevice(IMG_VOID);
+
+IMG_VOID  TLAddStreamNode(PTL_SNODE psAdd);
+PTL_SNODE TLFindStreamNodeByName(IMG_PCHAR pszName);
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+
+/****************************************************************************************
+ Function Name	: TLTryRemoveStreamAndFreeStreamNode
+ 
+ Inputs		: PTL_SNODE	Pointer to the TL_SNODE whose stream is requested
+ 			to be removed from TL_GLOBAL_DATA's list
+ 
+ Return Value	: IMG_TRUE	-	If the stream was made NULL and this
+ 					TL_SNODE was removed from the
+					TL_GLOBAL_DATA's list
+
+ 		  IMG_FALSE	-	If the stream wasn't made NULL as there
+		  			is a client connected to this stream
+
+ Description	: If there is no client currently connected to this stream then,
+ 		  	This function removes this TL_SNODE from the
+			TL_GLOBAL_DATA's list. The caller is responsible for the
+			cleanup of the TL_STREAM whose TL_SNODE may be removed
+		  
+		  Otherwise, this function does nothing
+*****************************************************************************************/
+IMG_BOOL  TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove);
+
+/*****************************************************************************************
+ Function Name	: TLRemoveDescAndTryFreeStreamNode
+ 
+ Inputs		: PTL_SNODE	Pointer to the TL_SNODE whose descriptor is
+ 			requested to be removed 
+ 
+ Return Value	: IMG_TRUE	-	If this	TL_SNODE was removed from the
+					TL_GLOBAL_DATA's list
+
+ 		  IMG_FALSE	-	Otherwise
+
+ Description	: This function removes the stream descriptor from this TL_SNODE
+ 		  and,
+		  If there is no writer (producer context) currently bound to this stream,
+ 		  	This function removes this TL_SNODE from the
+			TL_GLOBAL_DATA's list. The caller is responsible for the
+			cleanup of the TL_STREAM whose TL_SNODE may be removed
+******************************************************************************************/
+IMG_BOOL  TLRemoveDescAndTryFreeStreamNode(PTL_SNODE psRemove);
+
+/*
+ * Transport Layer stream interface to server part declared here to avoid
+ * circular dependency.
+ */
+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream, IMG_UINT32* puiReadOffset);
+IMG_VOID TLStreamAdvanceReadPos(PTL_STREAM psStream, IMG_UINT32 uiReadLen);
+
+DEVMEM_EXPORTCOOKIE* TLStreamGetBufferCookie(PTL_STREAM psStream);
+IMG_BOOL TLStreamEOS(PTL_STREAM psStream);
+
+/****************************************************************************************
+ Function Name	: TLStreamDestroy
+  
+ Inputs		: PTL_STREAM	Pointer to the TL_STREAM to be destroyed
+ 
+ Description	: This function performs all the clean-up operations required for
+ 			destruction of this stream
+*****************************************************************************************/
+IMG_VOID TLStreamDestroy (PTL_STREAM);
+
+/*
+ * Test related functions
+ */
+PVRSRV_ERROR TUtilsInit (IMG_VOID);
+PVRSRV_ERROR TUtilsDeinit (IMG_VOID);
+
+
+#endif /* __TLINTERN_H__ */
+/******************************************************************************
+ End of file (tlintern.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlserver.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlserver.h
new file mode 100644
index 0000000..4dda604
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlserver.h
@@ -0,0 +1,83 @@
+/*************************************************************************/ /*!
+@File
+@Title          KM server Transport Layer implementation
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Main bridge APIs for Transport Layer client functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __TLSERVER_H_
+#define __TLSERVER_H_
+
+#include <stddef.h>
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#include "tlintern.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+
+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection);
+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection);
+
+PVRSRV_ERROR TLServerOpenStreamKM(IMG_PCHAR pszName,
+			   IMG_UINT32 ui32Mode,
+			   PTL_STREAM_DESC* ppsSD,
+			   DEVMEM_EXPORTCOOKIE** ppsBufCookie);
+
+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD);
+
+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+			   IMG_UINT32* puiReadOffset,
+			   IMG_UINT32* puiReadLen);
+
+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+				 IMG_UINT32 uiReadOffset,
+				 IMG_UINT32 uiReadLen);
+
+
+#endif /* __TLSERVER_H_ */
+
+/*****************************************************************************
+ End of file (tlserver.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlstream.h b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlstream.h
new file mode 100644
index 0000000..dbca4a9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/server/include/tlstream.h
@@ -0,0 +1,331 @@
+/*************************************************************************/ /*!
+@File
+@Title          Transport Layer kernel side API.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    TL provides driver components with a way to copy data from kernel
+                space to user space (e.g. screen/file).
+
+                Data can be passed to the Transport Layer through the 
+                TL Stream (kernel space) API interface.
+
+                The buffer provided to every stream is a modified version of a 
+                circular buffer. Which CB version is created is specified by
+                relevant flags when creating a stream. Currently two types
+                of buffer are available:
+                - TL_FLAG_DROP_DATA:
+                  When the buffer is full, incoming data are dropped 
+                  (instead of overwriting older data) and a marker is set 
+                  to let the user know that data have been lost.
+                - TL_FLAG_BLOCKING_RESERVE:
+                  When the circular buffer is full, reserve/write calls block
+                  until enough space is freed.
+
+                All size/space requests are in bytes. However, the actual
+                implementation uses native word sizes (i.e. 4 byte aligned).
+
+                The user does not need to provide space for the stream buffer 
+                as the TL handles memory allocations and usage.
+
+                Inserting data to a stream's buffer can be done either:
+                - by using TLReserve/TLCommit: User is provided with a buffer
+                                                 to write data to.
+                - or by using TLWrite:         User provides a buffer with 
+                                                 data to be committed. The TL 
+                                                 copies the data from the 
+                                                 buffer into the stream buffer 
+                                                 and returns.
+                Users should be aware that there are implementation overheads 
+                associated with every stream buffer. If you find that less 
+                data are captured than expected then try increasing the
+                stream buffer size or use TLInfo to obtain buffer parameters
+                and calculate optimum required values at run time.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLSTREAM_H__
+#define __TLSTREAM_H__
+
+
+#include "img_types.h"
+
+/*! Flags specifying stream and circular buffer behaviour */
+/*! Discard new data if the buffer is full */
+#define TL_FLAG_DROP_DATA              (1U<<0)
+/*! Block Reserve (subsequently Write) calls if there is not enough space 
+ *    until some space is freed. */
+#define TL_FLAG_BLOCKING_RESERVE	    (1U<<1)
+/*! Do not destroy stream if there still are data that have not been 
+ *     copied in user space. BLock until the stream is emptied. */
+#define TL_FLAG_FORCE_FLUSH            (1U<<2)
+/*! Do not signal consumers on commit automatically when the stream buffer
+ * transitions from empty to non-empty. Producer responsible for signal when
+ * it chooses. */
+#define TL_FLAG_NO_SIGNAL_ON_COMMIT    (1U<<3)
+
+/*! Structure used to pass internal TL stream sizes information to users.*/
+typedef struct _TL_STREAM_INFO_
+{
+    IMG_UINT32 headerSize;          /*!< Packet header size in bytes */
+    IMG_UINT32 minReservationSize;  /*!< Minimum data size reserved in bytes */
+    IMG_UINT32 pageSize;            /*!< Page size in bytes */
+    IMG_UINT32 pageAlign;           /*!< Page alignment in bytes */
+} TL_STREAM_INFO, *PTL_STREAM_INFO;
+
+/*! Callback operations or notifications that a stream producer may handle
+ * when requested by the Transport Layer.
+ */
+#define TL_SOURCECB_OP_CLIENT_EOS 0x01  /*!< Client has reached end of stream,
+                                         * can anymore data be supplied?
+                                         * ui32Resp ignored in this operation */
+
+/*! Function pointer type for the callback handler into the "producer" code
+ * that writes data to the TL stream.  Producer should handle the notification
+ * or operation supplied in ui32ReqOp on stream hStream. The
+ * Operations and notifications are defined above in TL_SOURCECB_OP */
+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream,
+		IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, IMG_VOID* pvUser);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCreate
+ @Description   Request the creation of a new stream and open a handle.
+ 				If creating a stream which should continue to exist after the
+				current context is finished, then TLStreamCreate must be 
+				followed by a TLStreamOpen call. On any case, the number of 
+				create/open calls must balance with the number of close calls
+				used. This ensures the resources of a stream are released when
+				it is no longer required.
+ @Output        phStream        Pointer to handle to store the new stream.
+ @Input         szStreamName    Name of stream, maximum length:
+                                  PRVSRVTL_MAX_STREAM_NAME_SIZE.
+                                  If a longer string is provided,creation fails.
+ @Input         ui32Size        Desired buffer size in bytes.
+ @Input         ui32StreamFlags Flags that configure buffer behaviour.See above.
+ @Input         pfProducerDB    Optional callback, may be null.
+ @Input         pvProducerData  Optional user data for callback, may be null.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle or string name 
+                                               exceeded MAX_STREAM_NAME_SIZE
+ @Return        PVRSRV_ERROR_OUT_OF_MEMORY   Failed to allocate space for stream
+                                               handle.
+ @Return        PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with
+ 											   the same stream name string.
+ @Return        eError                       Internal services call returned
+                                               eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamCreate(IMG_HANDLE *phStream,
+               IMG_CHAR	  *szStreamName,
+               IMG_UINT32 ui32Size,
+               IMG_UINT32 ui32StreamFlags,
+               TL_STREAM_SOURCECB pfProducerCB,
+               IMG_PVOID pvProducerUD);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamOpen
+ @Description   Attach to existing stream that has already been created by a
+                  TLStreamCreate call. A handle is returned to the stream.
+ @Output        phStream        Pointer to handle to store the stream.
+ @Input         szStreamName    Name of stream, should match an already
+                                  existing stream name
+ @Return        PVRSRV_ERROR_NOT_FOUND        None of the streams matched the
+                                                 requested stream name.
+				PVRSRV_ERROR_INVALID_PARAMS	   non NULL pointer to stream 
+											     handler is required.
+ @Return        PVRSRV_OK                      Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+             IMG_CHAR   *szStreamName);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamClose
+ @Description   Detach from the stream associated with the given handle. If
+                  the current handle is the last one accessing the stream 
+				  (i.e. the number of TLStreamCreate+TLStreamOpen calls matches
+				  the number of TLStreamClose calls) then the stream is also
+				  deleted.
+				On return the handle is no longer valid.
+ @Input         hStream     Handle to stream that will be closed.
+ @Return        None.
+*/ /**************************************************************************/
+IMG_VOID
+TLStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the 
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream. 
+ @Input         ui32Size        Number of bytes to reserve in buffer.
+ @Return        PVRSRV_INVALID_PARAMS       NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY      There are data previously reserved
+                                              that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Misusing the stream by trying to 
+                                              reserve more space than the 
+                                              buffer size.
+                PVRSRV_ERROR_STREAM_FULL    Stream buffer full, data not written
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamReserve(IMG_HANDLE hStream, 
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamReserve2
+ @Description   Reserve space in stream buffer. When successful every
+                  TLStreamReserve call must be followed by a matching
+                  TLStreamCommit call. While a TLStreamCommit call is pending
+                  for a stream, subsequent TLStreamReserve calls for this
+                  stream will fail.
+ @Input         hStream         Stream handle.
+ @Output        ppui8Data       Pointer to a pointer to a location in the
+                                  buffer. The caller can then use this address
+                                  in writing data into the stream.
+ @Input         ui32Size        Ideal number of bytes to reserve in buffer.
+ @Input         ui32SizeMin     Minimum number of bytes to reserve in buffer.
+ @Input         pui32Available  Optional, but when present and the FULL error
+                                  is returned, a size suggestion is returned
+								  in this argument which the caller can attempt
+                                  to reserve again for a successful allocation.
+ @Return        PVRSRV_INVALID_PARAMS       NULL stream handler.
+ @Return        PVRSRV_ERROR_NOT_READY      There are data previously reserved
+                                              that are pending to be committed.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE  Misusing the stream by trying to
+                                              reserve more space than the
+                                              buffer size.
+                PVRSRV_ERROR_STREAM_FULL    Stream buffer full, data not written
+ @Return        PVRSRV_OK                   Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+                IMG_UINT8  **ppui8Data,
+                IMG_UINT32 ui32Size,
+                IMG_UINT32 ui32SizeMin,
+                IMG_UINT32* pui32Available);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamCommit
+ @Description   Notify TL that data have been written in the stream buffer.
+                  Should always follow and match TLStreamReserve call.
+ @Input         hStream         Stream handle.
+ @Input         ui32Size        Number of bytes that have been added to the
+                                  stream.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        PVRSRV_ERROR_STREAM_MISUSE   Commit results in more data 
+                                               committed than the buffer size,
+                                               the stream is misused.
+ @Return        eError                       Commit was successful but 
+                                               internal services call returned
+                                               eError error number.
+ @Return        PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamCommit(IMG_HANDLE hStream,
+               IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamWrite
+ @Description   Combined Reserve/Commit call. This function Reserves space in 
+                  the specified stream buffer, copies ui32Size bytes of data
+                  from the array pui8Src points to and Commits in an "atomic"
+                  style operation.
+ @Input         hStream         Stream handle.
+ @Input         pui8Src         Source to read data from.
+ @Input         ui32Size        Number of bytes to copy and commit.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handler.
+ @Return        eError                       Error codes returned by either 
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamWrite(IMG_HANDLE hStream, 
+              IMG_UINT8  *pui8Src,
+              IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamSync
+ @Description   Signal the consumer to start acquiring data from the stream
+                buffer. Called by producers that use the TL_FLAG_NO_SIGNAL_ON_COMMIT
+                flag to manually control when consumers starting reading the
+                stream. Used when multiple small writes need to be batched.
+ @Input         hStream         Stream handle.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS  NULL stream handle.
+ @Return        eError                       Error codes returned by either
+                                               Reserve or Commit.
+ @Return        PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE hStream);
+
+
+/*************************************************************************/ /*!
+ @Function      TLStreamMarkEOS
+ @Description   Insert a EOS marker packet in the given stream.
+ @Input         hStream         Stream handle.
+ @Return        PVRSRV_ERROR_INVALID_PARAMS	NULL stream handler.
+ @Return        eError                     	Error codes returned by either
+                                              Reserve or Commit.
+ @Return        PVRSRV_OK       			Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR 
+TLStreamMarkEOS(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function      TLStreamInfo
+ @Description   Run time information about buffer elemental sizes.
+                It sets psInfo members accordingly. Users can use those values
+                to calculate the parameters they use in TLStreamCreate and 
+                TLStreamReserve.
+ @Output        psInfo          pointer to stream info structure.
+ @Return        None.
+*/ /**************************************************************************/
+IMG_VOID 
+TLStreamInfo(PTL_STREAM_INFO psInfo);
+
+
+#endif /* __TLSTREAM_H__ */
+/*****************************************************************************
+ End of file (tlstream.h)
+*****************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem.c
new file mode 100644
index 0000000..d759681
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem.c
@@ -0,0 +1,2026 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Front End (nominally Client side part, but now invokable
+                from server too) of device memory management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "devicemem_mmap.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#if defined(PDUMP)
+#include "devicemem_pdump.h"
+#endif
+#if defined(PVR_RI_DEBUG)
+#include "client_ri_bridge.h"
+#endif 
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "client_devicememhistory_bridge.h"
+#endif
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#if defined(LINUX)
+#include "linux/kernel.h"
+#endif
+#endif
+
+/** Page size.
+ *  Should be initialised to the correct value at driver init time.
+ *  Use macros from devicemem.h to access from outside this module.
+ */
+IMG_UINT32 g_uiLog2PageSize = 0;
+
+static PVRSRV_ERROR
+_Mapping_Export(DEVMEM_IMPORT *psImport,
+                DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr,
+                DEVMEM_EXPORTKEY *puiExportKeyPtr,
+                DEVMEM_SIZE_T *puiSize,
+                DEVMEM_LOG2ALIGN_T *puiLog2Contig)
+{
+    /* Gets an export handle and key for the PMR used for this mapping */
+    /* Can only be done if there are no suballocations for this mapping */
+
+    PVRSRV_ERROR eError;
+    DEVMEM_EXPORTHANDLE hPMRExportHandle;
+    DEVMEM_EXPORTKEY uiExportKey;
+    IMG_DEVMEM_SIZE_T uiSize;
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig;
+
+    if (psImport == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }
+
+    if (!psImport->bExportable)
+    {
+		eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+        goto failParams;
+    }
+
+    eError = BridgePMRExportPMR(psImport->hBridge,
+                                psImport->hPMR,
+                                &hPMRExportHandle,
+                                &uiSize,
+                                &uiLog2Contig,
+                                &uiExportKey);
+    if (eError != PVRSRV_OK)
+    {
+        goto failExport;
+    }
+
+    PVR_ASSERT(uiSize == psImport->uiSize);
+
+    *phPMRExportHandlePtr = hPMRExportHandle;
+    *puiExportKeyPtr = uiExportKey;
+    *puiSize = uiSize;
+    *puiLog2Contig = uiLog2Contig;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failExport:
+failParams:
+
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+
+}
+
+static IMG_VOID
+_Mapping_Unexport(DEVMEM_IMPORT *psImport,
+                  DEVMEM_EXPORTHANDLE hPMRExportHandle)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT (psImport != IMG_NULL);
+
+    eError = BridgePMRUnexportPMR(psImport->hBridge,
+                                  hPMRExportHandle);
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/*****************************************************************************
+ *                    Sub allocation internals                               *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_AllocateDeviceMemory(IMG_HANDLE hBridge,
+					  IMG_HANDLE hDeviceNode,
+					  IMG_UINT32 uiLog2Quantum,
+					  IMG_DEVMEM_SIZE_T uiSize,
+					  IMG_DEVMEM_SIZE_T uiChunkSize,
+					  IMG_UINT32 ui32NumPhysChunks,
+					  IMG_UINT32 ui32NumVirtChunks,
+					  IMG_BOOL *pabMappingTable,
+					  IMG_DEVMEM_ALIGN_T uiAlign,
+					  DEVMEM_FLAGS_T uiFlags,
+					  IMG_BOOL bExportable,
+					  DEVMEM_IMPORT **ppsImport)
+{
+	DEVMEM_IMPORT *psImport;
+	DEVMEM_FLAGS_T uiPMRFlags;
+	IMG_HANDLE hPMR;
+	PVRSRV_ERROR eError;
+
+	eError = _DevmemImportStructAlloc(hBridge,
+									  bExportable,
+									  &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failAlloc;
+	}
+
+    /* Check the size is a multiple of the quantum */
+    PVR_ASSERT((uiSize & ((1ULL<<uiLog2Quantum)-1)) == 0);
+
+	/* Pass only the PMR flags down */
+	uiPMRFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
+
+    eError = BridgePhysmemNewRamBackedPMR(hBridge,
+                                          hDeviceNode,
+                                          uiSize,
+                                          uiChunkSize,
+                                          ui32NumPhysChunks,
+                                          ui32NumVirtChunks,
+                                          pabMappingTable,
+                                          uiLog2Quantum,
+                                          uiPMRFlags,
+                                          &hPMR);
+    if (eError != PVRSRV_OK)
+    {
+        /* Our check above should have ensured this the "not page
+           multiple" error never happens */
+        PVR_ASSERT(eError != PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE);
+
+        goto failPMR;
+    }
+
+	_DevmemImportStructInit(psImport,
+							uiSize,
+							uiAlign,
+							uiFlags,
+							hPMR);
+
+	*ppsImport = psImport;
+	return PVRSRV_OK;
+
+failPMR:
+	_DevmemImportDiscard(psImport);
+failAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+static IMG_VOID
+_FreeDeviceMemory(DEVMEM_IMPORT *psImport)
+{
+	_DevmemImportStructRelease(psImport);
+}
+
+static IMG_BOOL
+_SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
+                     RA_LENGTH_T uiSize,
+                     RA_FLAGS_T _flags,
+                     /* returned data */
+                     RA_BASE_T *puiBase,
+                     RA_LENGTH_T *puiActualSize,
+                     RA_PERISPAN_HANDLE *phImport)
+{
+    /* When suballocations need a new lump of memory, the RA calls
+       back here.  Later, in the kernel, we must construct a new PMR
+       and a pairing between the new lump of virtual memory and the
+       PMR (whether or not such PMR is backed by physical memory) */
+    DEVMEM_HEAP *psHeap;
+    DEVMEM_IMPORT *psImport;
+    IMG_DEVMEM_ALIGN_T uiAlign;
+    DEVMEM_FLAGS_T uiFlags;
+    PVRSRV_ERROR eError;
+    IMG_BOOL bMappingTable = IMG_TRUE;
+
+    uiFlags = (DEVMEM_FLAGS_T) _flags;
+
+    /* Per-arena private handle is, for us, the heap */
+    psHeap = hArena;
+
+    /* align to the l.s.b. of the size...  e.g. 96kiB aligned to
+       32kiB. NB: There is an argument to say that the RA should never
+       ask us for Non-power-of-2 size anyway, but I don't want to make
+       that restriction arbitrarily now */
+    uiAlign = uiSize & ~(uiSize-1);
+
+    /* The RA should not have invoked us with a size that is not a
+       multiple of the quantum anyway */
+    PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
+
+	eError = _AllocateDeviceMemory(psHeap->psCtx->hBridge,
+								   psHeap->psCtx->hDeviceNode,
+								   psHeap->uiLog2Quantum,
+								   uiSize,
+								   uiSize,
+								   1,
+								   1,
+								   &bMappingTable,
+								   uiAlign,
+								   uiFlags,
+								   IMG_FALSE,
+								   &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failAlloc;
+	}
+
+#if defined(PVR_RI_DEBUG)
+	{
+		eError = BridgeRIWritePMREntry (psImport->hBridge,
+										psImport->hPMR,
+										sizeof("PMR sub-allocated"),
+										"PMR sub-allocated",
+										psImport->uiSize);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif
+	/*
+		Suballocations always get mapped into the device was we need to
+		key the RA off something and as we can't export suballocations
+		there is no valid reason to request an allocation an not map it
+	*/
+	eError = _DevmemImportStructDevMap(psHeap,
+									   IMG_TRUE,
+									   psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	*puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	*puiActualSize = uiSize;
+	*phImport = psImport;
+
+    return IMG_TRUE;
+
+    /*
+      error exit paths follow
+    */
+failMap:
+    _FreeDeviceMemory(psImport);
+failAlloc:
+
+    return IMG_FALSE;
+}
+
+static IMG_VOID
+_SubAllocImportFree(RA_PERARENA_HANDLE hArena,
+                    RA_BASE_T uiBase,
+                    RA_PERISPAN_HANDLE hImport)
+{
+    DEVMEM_IMPORT *psImport = hImport;
+
+    PVR_ASSERT(psImport != IMG_NULL);
+    PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap);
+    PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr);
+
+    _DevmemImportStructDevUnmap(psImport);  
+	_DevmemImportStructRelease(psImport);
+}
+
+/*****************************************************************************
+ *                    Devmem context internals                               *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_PopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx,
+                              IMG_HANDLE hDeviceNode,
+                              DEVMEM_HEAPCFGID uiHeapBlueprintID)
+{
+    PVRSRV_ERROR eError;
+    PVRSRV_ERROR eError2;
+    struct _DEVMEM_HEAP_ **ppsHeapArray;
+    IMG_UINT32 uiNumHeaps;
+    IMG_UINT32 uiHeapsToUnwindOnError;
+    IMG_UINT32 uiHeapIndex;
+    IMG_DEV_VIRTADDR sDevVAddrBase;
+    IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH];
+    IMG_DEVMEM_SIZE_T uiHeapLength;
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize;
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment;
+
+    eError = DevmemHeapCount(psCtx->hBridge,
+                             hDeviceNode,
+                             uiHeapBlueprintID,
+                             &uiNumHeaps);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    if (uiNumHeaps == 0)
+    {
+        ppsHeapArray = IMG_NULL;
+    }
+    else
+    {
+        ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps);
+        if (ppsHeapArray == IMG_NULL)
+        {
+            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+            goto e0;
+        }
+    }
+
+    uiHeapsToUnwindOnError = 0;
+
+    for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++)
+    {
+        eError = DevmemHeapDetails(psCtx->hBridge,
+                                   hDeviceNode,
+                                   uiHeapBlueprintID,
+                                   uiHeapIndex,
+                                   &aszHeapName[0],
+                                   sizeof(aszHeapName),
+                                   &sDevVAddrBase,
+                                   &uiHeapLength,
+                                   &uiLog2DataPageSize,
+                                   &uiLog2ImportAlignment);
+        if (eError != PVRSRV_OK)
+        {
+            goto e1;
+        }
+
+        eError = DevmemCreateHeap(psCtx,
+                                  sDevVAddrBase,
+                                  uiHeapLength,
+                                  uiLog2DataPageSize,
+                                  uiLog2ImportAlignment,
+                                  aszHeapName,
+                                  &ppsHeapArray[uiHeapIndex]);
+        if (eError != PVRSRV_OK)
+        {
+            goto e1;
+        }
+
+        uiHeapsToUnwindOnError = uiHeapIndex + 1;
+    }
+
+    psCtx->uiAutoHeapCount = uiNumHeaps;
+    psCtx->ppsAutoHeapArray = ppsHeapArray;
+
+    PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+    PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps);
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths
+    */
+ e1:
+    for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++)
+    {
+        eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]);
+        PVR_ASSERT(eError2 == PVRSRV_OK);
+    }
+
+    if (uiNumHeaps != 0)
+    {
+        OSFreeMem(ppsHeapArray);
+    }
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+static IMG_VOID
+_UnpopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+    PVRSRV_ERROR eError2;
+    IMG_UINT32 uiHeapIndex;
+    IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+    PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+    if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+    {
+    	bDoCheck = IMG_FALSE;
+    }
+#endif
+
+    PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+
+    for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++)
+    {
+        eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]);
+        if (bDoCheck)
+        {
+        	PVR_ASSERT(eError2 == PVRSRV_OK);
+        }
+    }
+
+    if (psCtx->uiAutoHeapCount != 0)
+    {
+        OSFreeMem(psCtx->ppsAutoHeapArray);
+        psCtx->ppsAutoHeapArray = IMG_NULL;
+    }
+    psCtx->uiAutoHeapCount = 0;
+
+    PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+    PVR_ASSERT(psCtx->ppsAutoHeapArray == IMG_NULL);
+}
+
+
+/*****************************************************************************
+ *                    Devmem context functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateContext(DEVMEM_BRIDGE_HANDLE hBridge,
+                    IMG_HANDLE hDeviceNode,
+                    DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                    DEVMEM_CONTEXT **ppsCtxPtr)
+{
+    PVRSRV_ERROR eError;
+    DEVMEM_CONTEXT *psCtx;
+    /* handle to the server-side counterpart of the device memory
+       context (specifically, for handling mapping to device MMU) */
+    IMG_HANDLE hDevMemServerContext;
+    IMG_HANDLE hPrivData;
+
+
+    if (ppsCtxPtr == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    psCtx = OSAllocMem(sizeof *psCtx);
+    if (psCtx == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e0;
+    }
+
+    psCtx->uiNumHeaps = 0;
+
+    psCtx->hBridge = hBridge;
+
+    /* Create (server-side) Device Memory context */
+    eError = BridgeDevmemIntCtxCreate(psCtx->hBridge,
+                                   hDeviceNode,
+                                   &hDevMemServerContext,
+                                   &hPrivData);
+    if (eError != PVRSRV_OK)
+    {
+        goto e1;
+    }
+
+    psCtx->hDeviceNode = hDeviceNode;
+    psCtx->hDevMemServerContext = hDevMemServerContext;
+    psCtx->hPrivData = hPrivData;
+
+    /* automagic heap creation */
+    psCtx->uiAutoHeapCount = 0;
+
+    eError = _PopulateContextFromBlueprint(psCtx, hDeviceNode, uiHeapBlueprintID);
+    if (eError != PVRSRV_OK)
+    {
+        goto e2;
+    }
+
+
+    *ppsCtxPtr = psCtx;
+
+
+    PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount);
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e2:
+    PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+    PVR_ASSERT(psCtx->uiNumHeaps == 0);
+    BridgeDevmemIntCtxDestroy(psCtx->hBridge, hDevMemServerContext);
+
+ e1:
+    OSFreeMem(psCtx);
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+                         IMG_HANDLE *hPrivData)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psCtx == IMG_NULL) || (hPrivData == IMG_NULL))
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+
+	*hPrivData = psCtx->hPrivData;
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx)
+{
+	PVRSRV_ERROR eError;
+
+	if (psCtx == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto e0;
+	}
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFindHeapByName(const struct _DEVMEM_CONTEXT_ *psCtx,
+                     const IMG_CHAR *pszHeapName,
+                     struct _DEVMEM_HEAP_ **ppsHeapRet)
+{
+    IMG_UINT32 uiHeapIndex;
+
+    /* N.B.  This func is only useful for finding "automagic" heaps by name */
+    for (uiHeapIndex = 0;
+         uiHeapIndex < psCtx->uiAutoHeapCount;
+         uiHeapIndex++)
+    {
+        if (!OSStringCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName))
+        {
+            *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex];
+            return PVRSRV_OK;
+        }
+    }
+
+    return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx)
+{
+    PVRSRV_ERROR eError;
+    IMG_BOOL bDoCheck = IMG_TRUE;
+
+#if defined(__KERNEL__)
+    PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+    if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+    {
+    	bDoCheck = IMG_FALSE;
+    }
+#endif
+
+    if (psCtx == IMG_NULL)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    /* should be only the automagically instantiated heaps left */
+    if (psCtx->uiNumHeaps != psCtx->uiAutoHeapCount)
+    {
+        return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+    }
+
+    _UnpopulateContextFromBlueprint(psCtx);
+
+    if (bDoCheck)
+    {
+		PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+		PVR_ASSERT(psCtx->uiNumHeaps == 0);
+    }
+    eError = BridgeDevmemIntCtxDestroy(psCtx->hBridge,
+                                      psCtx->hDevMemServerContext);
+    if (bDoCheck)
+    {
+    	PVR_ASSERT (eError == PVRSRV_OK);
+    }
+
+    OSFreeMem(psCtx);
+
+    return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                 Devmem heap query functions                               *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigCount(DEVMEM_BRIDGE_HANDLE hBridge,
+                      IMG_HANDLE hDeviceNode,
+                      IMG_UINT32 *puiNumHeapConfigsOut)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapConfigCount(hBridge,
+                                          hDeviceNode,
+                                          puiNumHeapConfigsOut);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapCount(DEVMEM_BRIDGE_HANDLE hBridge,
+                IMG_HANDLE hDeviceNode,
+                IMG_UINT32 uiHeapConfigIndex,
+                IMG_UINT32 *puiNumHeapsOut)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapCount(hBridge,
+                                    hDeviceNode,
+                                    uiHeapConfigIndex,
+                                    puiNumHeapsOut);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigName(DEVMEM_BRIDGE_HANDLE hBridge,
+                     IMG_HANDLE hDeviceNode,
+                     IMG_UINT32 uiHeapConfigIndex,
+                     IMG_CHAR *pszConfigNameOut,
+                     IMG_UINT32 uiConfigNameBufSz)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapConfigName(hBridge,
+                                         hDeviceNode,
+                                         uiHeapConfigIndex,
+                                         uiConfigNameBufSz,
+                                         pszConfigNameOut);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapDetails(DEVMEM_BRIDGE_HANDLE hBridge,
+                  IMG_HANDLE hDeviceNode,
+                  IMG_UINT32 uiHeapConfigIndex,
+                  IMG_UINT32 uiHeapIndex,
+                  IMG_CHAR *pszHeapNameOut,
+                  IMG_UINT32 uiHeapNameBufSz,
+                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                  IMG_UINT32 *puiLog2DataPageSizeOut,
+                  IMG_UINT32 *puiLog2ImportAlignmentOut)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgeHeapCfgHeapDetails(hBridge,
+                                      hDeviceNode,
+                                      uiHeapConfigIndex,
+                                      uiHeapIndex,
+                                      uiHeapNameBufSz,
+                                      pszHeapNameOut,
+                                      psDevVAddrBaseOut,
+                                      puiHeapLengthOut,
+                                      puiLog2DataPageSizeOut,
+                                      puiLog2ImportAlignmentOut);
+
+    VG_MARK_INITIALIZED(pszHeapNameOut,uiHeapNameBufSz);
+
+    return eError;
+}
+
+/*****************************************************************************
+ *                    Devmem heap functions                                  *
+ *****************************************************************************/
+ 
+/* See devicemem.h for important notes regarding the arguments
+   to this function */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx,
+                 IMG_DEV_VIRTADDR sBaseAddress,
+                 IMG_DEVMEM_SIZE_T uiLength,
+                 IMG_UINT32 ui32Log2Quantum,
+                 IMG_UINT32 ui32Log2ImportAlignment,
+                 const IMG_CHAR *pszName,
+                 DEVMEM_HEAP **ppsHeapPtr)
+{
+    PVRSRV_ERROR eError = PVRSRV_OK;
+    PVRSRV_ERROR eError2;
+    DEVMEM_HEAP *psHeap;
+    /* handle to the server-side counterpart of the device memory
+       heap (specifically, for handling mapping to device MMU */
+    IMG_HANDLE hDevMemServerHeap;
+
+    IMG_CHAR aszBuf[100];
+    IMG_CHAR *pszStr;
+
+    if (ppsHeapPtr == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    psHeap = OSAllocMem(sizeof *psHeap);
+    if (psHeap == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e0;
+    }
+
+    /* Need to keep local copy of heap name, so caller may free
+       theirs */
+    pszStr = OSAllocMem(OSStringLength(pszName)+1);
+    if (pszStr == IMG_NULL)
+    {
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e1;
+    }
+    OSStringCopy(pszStr, pszName);
+    psHeap->pszName = pszStr;
+
+    psHeap->sBaseAddress = sBaseAddress;
+    OSAtomicWrite(&psHeap->hImportCount,0);
+
+    OSSNPrintf(aszBuf, sizeof(aszBuf),
+               "NDM heap '%s' (suballocs) ctx:%p",
+               pszName, psCtx);
+    pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+    if (pszStr == IMG_NULL)
+    {
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e2;
+    }
+    OSStringCopy(pszStr, aszBuf);
+    psHeap->pszSubAllocRAName = pszStr;
+
+    psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName,
+                       /* Subsequent imports: */
+                       ui32Log2Quantum,
+					   RA_LOCKCLASS_2,
+                       _SubAllocImportAlloc,
+                       _SubAllocImportFree,
+                       (RA_PERARENA_HANDLE) psHeap);
+    if (psHeap->psSubAllocRA == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+        goto e3;
+    }
+
+    psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment;
+    psHeap->uiLog2Quantum = ui32Log2Quantum;
+
+    OSSNPrintf(aszBuf, sizeof(aszBuf),
+               "NDM heap '%s' (QVM) ctx:%p",
+               pszName, psCtx);
+    pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+    if (pszStr == IMG_NULL)
+    {
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto e4;
+    }
+    OSStringCopy(pszStr, aszBuf);
+    psHeap->pszQuantizedVMRAName = pszStr;
+
+    psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName,
+                       /* Subsequent import: */
+                                       0, RA_LOCKCLASS_1, IMG_NULL, IMG_NULL,
+                       (RA_PERARENA_HANDLE) psHeap);
+
+    if (psHeap->psQuantizedVMRA == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+        goto e5;
+    }
+
+	if (!RA_Add(psHeap->psQuantizedVMRA,
+                       (RA_BASE_T)sBaseAddress.uiAddr,
+                       (RA_LENGTH_T)uiLength,
+                       (RA_FLAGS_T)0, /* This RA doesn't use or need flags */
+				IMG_NULL /* per ispan handle */))
+	{
+		RA_Delete(psHeap->psQuantizedVMRA);
+        eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+        goto e5;
+	}
+
+
+    psHeap->psCtx = psCtx;
+
+
+    /* Create server-side counterpart of Device Memory heap */
+    eError = BridgeDevmemIntHeapCreate(psCtx->hBridge,
+                                      psCtx->hDevMemServerContext,
+                                      sBaseAddress,
+                                      uiLength,
+                                      ui32Log2Quantum,
+                                      &hDevMemServerHeap);
+    if (eError != PVRSRV_OK)
+    {
+        goto e6;
+    }
+    psHeap->hDevMemServerHeap = hDevMemServerHeap;
+
+	eError = OSLockCreate(&psHeap->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto e7;
+	}
+
+    psHeap->psCtx->uiNumHeaps ++;
+    *ppsHeapPtr = psHeap;
+
+#if defined PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING
+    psHeap->psMemDescList = IMG_NULL;
+#endif  /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths
+    */
+ e7:
+    eError2 = BridgeDevmemIntHeapDestroy(psCtx->hBridge,
+                                       psHeap->hDevMemServerHeap);
+    PVR_ASSERT (eError2 == PVRSRV_OK);
+ e6:
+    RA_Delete(psHeap->psQuantizedVMRA);
+ e5:
+    OSFreeMem(psHeap->pszQuantizedVMRAName);
+ e4:
+    RA_Delete(psHeap->psSubAllocRA);
+ e3:
+    OSFreeMem(psHeap->pszSubAllocRAName);
+ e2:
+    OSFreeMem(psHeap->pszName);
+ e1:
+    OSFreeMem(psHeap);
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(struct _DEVMEM_HEAP_ *psHeap,
+			  IMG_DEV_VIRTADDR *pDevVAddr)
+{
+	if (psHeap == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pDevVAddr = psHeap->sBaseAddress;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemExportalignAdjustSizeAndAlign(DEVMEM_HEAP *psHeap, IMG_DEVMEM_SIZE_T *puiSize, IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+	IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+	IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign;
+	IMG_UINT32 uiLog2Quantum;
+
+	if (psHeap)
+	{
+		uiLog2Quantum = psHeap->uiLog2Quantum;
+	}
+	else
+	{
+		uiLog2Quantum = GET_LOG2_PAGESIZE();
+	}
+
+    if ((1ULL << uiLog2Quantum) > uiAlign)
+    {
+		uiAlign = 1ULL << uiLog2Quantum;
+    }
+    uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+
+	*puiSize = uiSize;
+	*puiAlign = uiAlign;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap)
+{
+    PVRSRV_ERROR eError;
+	IMG_INT uiImportCount;
+
+    if (psHeap == IMG_NULL)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+	uiImportCount = OSAtomicRead(&psHeap->hImportCount);
+    if (uiImportCount > 0)
+    {
+        PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName));
+        return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+    }
+
+	OSLockDestroy(psHeap->hLock);
+
+    PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0);
+    psHeap->psCtx->uiNumHeaps --;
+
+    eError = BridgeDevmemIntHeapDestroy(psHeap->psCtx->hBridge,
+                                       psHeap->hDevMemServerHeap);
+    PVR_ASSERT (eError == PVRSRV_OK);
+
+    RA_Delete(psHeap->psQuantizedVMRA);
+    OSFreeMem(psHeap->pszQuantizedVMRAName);
+
+    RA_Delete(psHeap->psSubAllocRA);
+    OSFreeMem(psHeap->pszSubAllocRAName);
+
+    OSFreeMem(psHeap->pszName);
+
+    OSFreeMem(psHeap);
+
+    return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ *                Devmem allocation/free functions                           *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocate(DEVMEM_HEAP *psHeap,
+               IMG_DEVMEM_SIZE_T uiSize,
+               IMG_DEVMEM_ALIGN_T uiAlign,
+               DEVMEM_FLAGS_T uiFlags,
+               const IMG_PCHAR pszText,
+			   DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+    IMG_BOOL bStatus; /* eError for RA */
+    RA_BASE_T uiAllocatedAddr;
+    RA_LENGTH_T uiAllocatedSize;
+    RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */
+    RA_FLAGS_T uiFlagsForRA;
+    PVRSRV_ERROR eError;
+    DEVMEM_MEMDESC *psMemDesc = IMG_NULL;
+	IMG_DEVMEM_OFFSET_T uiOffset = 0;
+	DEVMEM_IMPORT *psImport;
+	IMG_VOID *pvAddr;
+
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		/* Deferred Allocation not supported on SubAllocs*/
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+    if (psHeap == IMG_NULL || ppsMemDescPtr == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }
+
+	eError = _DevmemValidateParams(uiSize,
+								   uiAlign,
+								   uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+    /*
+        If zero flag is set we have to have write access to the page.
+    */
+    uiFlags |= (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) ? PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0;
+
+	/*
+		No request for exportable memory so use the RA
+	*/
+    uiFlagsForRA = (RA_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+    /* Check that the cast didn't lose any flags due to different integer widths */
+    PVR_ASSERT(uiFlagsForRA == (uiFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK));
+
+	/* 
+	   When the RA suballocates memory from a Span it does not zero it. It only zeroes the
+	   memory if it allocates a new Span; but we don't know what is going to happen for this
+	   RA_Alloc call. Therefore, we zero the mem after the allocation below.
+	*/
+	uiFlagsForRA &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+	
+	bStatus = RA_Alloc(psHeap->psSubAllocRA,
+					   uiSize,
+					   uiFlagsForRA,
+					   uiAlign,
+					   &uiAllocatedAddr,
+					   &uiAllocatedSize,
+					   &hImport);
+	if (!bStatus)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failDeviceMemAlloc;
+	}
+
+	psImport = hImport;
+	uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr;
+
+	_DevmemMemDescInit(psMemDesc,
+					   uiOffset,
+					   psImport);
+
+	/* zero the memory */
+	if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+	{
+		eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+		if (eError != PVRSRV_OK)
+		{
+			goto failZero;
+		}
+
+		/* FIXME: uiSize is a 64-bit quantity whereas the 3rd argument
+		 * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems
+		 * hence a compiler warning of implicit cast and loss of data.
+		 * Added explicit cast and assert to remove warning.
+		 */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+		PVR_ASSERT(uiSize<IMG_UINT32_MAX);
+#endif
+
+		OSDeviceMemSet(pvAddr, 0x0, (IMG_SIZE_T) uiSize);
+	    
+		DevmemReleaseCpuVirtAddr(psMemDesc);
+
+#if defined(PDUMP)
+		DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+	}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+	 * the allocation gets mapped/unmapped
+	 */
+	OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+	psMemDesc->sTraceData.uiSize = uiSize;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hBridge,
+											psMemDesc->psImport->hPMR,
+											OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+											pszText,
+											psMemDesc->uiOffset,
+											uiAllocatedSize,
+											IMG_FALSE,
+											IMG_FALSE,
+											&(psMemDesc->hRIHandle));
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVR_RI_DEBUG) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	*ppsMemDescPtr = psMemDesc;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failZero:
+	_DevmemMemDescRelease(psMemDesc);
+	psMemDesc = IMG_NULL;	/* Make sure we don't do a discard after the release */
+failDeviceMemAlloc:
+	if (psMemDesc)
+		_DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateExportable(IMG_HANDLE hBridge,
+						 IMG_HANDLE hDeviceNode,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_ALIGN_T uiAlign,
+						 DEVMEM_FLAGS_T uiFlags,
+						 const IMG_PCHAR pszText,
+						 DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+    PVRSRV_ERROR eError;
+    DEVMEM_MEMDESC *psMemDesc = IMG_NULL;
+	DEVMEM_IMPORT *psImport;
+	IMG_BOOL bMappingTable = IMG_TRUE;
+
+
+	DevmemExportalignAdjustSizeAndAlign(IMG_NULL,
+										&uiSize,
+										&uiAlign);
+
+	eError = _DevmemValidateParams(uiSize,
+								   uiAlign,
+								   uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+	/*
+		Note:
+		In the case of exportable memory we have no heap to
+		query the pagesize from, so we assume host pagesize.
+	*/
+	eError = _AllocateDeviceMemory(hBridge,
+								   hDeviceNode,
+								   GET_LOG2_PAGESIZE(),
+								   uiSize,
+								   uiSize,
+								   1,
+								   1,
+								   &bMappingTable,
+								   uiAlign,
+								   uiFlags,
+								   IMG_TRUE,
+								   &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	_DevmemMemDescInit(psMemDesc,
+					   0,
+					   psImport);
+
+    *ppsMemDescPtr = psMemDesc;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+	 * the allocation gets mapped/unmapped
+	 */
+	OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+	psMemDesc->sTraceData.uiSize = uiSize;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	{
+		eError = BridgeRIWritePMREntry (psImport->hBridge,
+										psImport->hPMR,
+										OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+										(IMG_CHAR *)pszText,
+										psImport->uiSize);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+		}
+
+		 /* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psImport->hBridge,
+											psImport->hPMR,
+											sizeof("^"),
+											"^",
+											psMemDesc->uiOffset,
+											uiSize,
+											IMG_FALSE,
+											IMG_TRUE,
+											&psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVR_RI_DEBUG) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failDeviceMemAlloc:
+    _DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateSparse(IMG_HANDLE hBridge,
+					 IMG_HANDLE hDeviceNode,
+					 IMG_DEVMEM_SIZE_T uiSize,
+					 IMG_DEVMEM_SIZE_T uiChunkSize,
+					 IMG_UINT32 ui32NumPhysChunks,
+					 IMG_UINT32 ui32NumVirtChunks,
+					 IMG_BOOL *pabMappingTable,
+					 IMG_DEVMEM_ALIGN_T uiAlign,
+					 DEVMEM_FLAGS_T uiFlags,
+					 const IMG_PCHAR pszText,
+					 DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+    PVRSRV_ERROR eError;
+    DEVMEM_MEMDESC *psMemDesc = IMG_NULL;
+	DEVMEM_IMPORT *psImport;
+
+
+	DevmemExportalignAdjustSizeAndAlign(IMG_NULL,
+										&uiSize,
+										&uiAlign);
+
+	eError = _DevmemValidateParams(uiSize,
+								   uiAlign,
+								   uiFlags);
+	if (eError != PVRSRV_OK)
+	{
+		goto failParams;
+	}
+
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+	/*
+		Note:
+		In the case of sparse memory we have no heap to
+		query the pagesize from, so we assume host pagesize.
+	*/
+	eError = _AllocateDeviceMemory(hBridge,
+								   hDeviceNode,
+								   GET_LOG2_PAGESIZE(),
+								   uiSize,
+								   uiChunkSize,
+								   ui32NumPhysChunks,
+								   ui32NumVirtChunks,
+								   pabMappingTable,
+								   uiAlign,
+								   uiFlags,
+								   IMG_TRUE,
+								   &psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDeviceMemAlloc;
+	}
+
+	_DevmemMemDescInit(psMemDesc,
+					   0,
+					   psImport);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	/* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+	 * the allocation gets mapped/unmapped
+	 */
+	OSStringNCopy(psMemDesc->sTraceData.szText, pszText, sizeof(psMemDesc->sTraceData.szText) - 1);
+	psMemDesc->sTraceData.uiSize = uiSize;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	{
+		eError = BridgeRIWritePMREntry (psImport->hBridge,
+										psImport->hPMR,
+										OSStringNLength(pszText, RI_MAX_TEXT_LEN),
+										(IMG_CHAR *)pszText,
+										psImport->uiSize);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+		}
+
+		/* Attach RI information */
+    	eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hBridge,
+											psMemDesc->psImport->hPMR,
+											sizeof("^"),
+											"^",
+											psMemDesc->uiOffset,
+											uiSize,
+											IMG_FALSE,
+											IMG_TRUE,
+											&psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#else  /* if defined(PVR_RI_DEBUG) */
+	PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	*ppsMemDescPtr = psMemDesc;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failDeviceMemAlloc:
+    _DevmemMemDescDiscard(psMemDesc);
+
+failMemDescAlloc:
+failParams:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+/*****************************************************************************
+ *                Devmem unsecure export functions                           *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+             DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+    /* Caller to provide storage for export cookie struct */
+    PVRSRV_ERROR eError;
+    IMG_HANDLE hPMRExportHandle = 0;
+    IMG_UINT64 uiPMRExportPassword = 0;
+    IMG_DEVMEM_SIZE_T uiSize = 0;
+    IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0;
+
+    if (psMemDesc == IMG_NULL || psExportCookie == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto e0;
+    }
+
+    eError = _Mapping_Export(psMemDesc->psImport,
+                             &hPMRExportHandle,
+                             &uiPMRExportPassword,
+                             &uiSize,
+                             &uiLog2Contig);
+    if (eError != PVRSRV_OK)
+    {
+		psExportCookie->uiSize = 0;
+        goto e0;
+    }
+
+    psExportCookie->hPMRExportHandle = hPMRExportHandle;
+    psExportCookie->uiPMRExportPassword = uiPMRExportPassword;
+    psExportCookie->uiSize = uiSize;
+    psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig;
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMakeServerExportClientExport(DEVMEM_BRIDGE_HANDLE hBridge,
+                                   DEVMEM_SERVER_EXPORTCOOKIE hServerExportCookie,
+                                   DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+	DEVMEM_EXPORTHANDLE hPMRExportHandle;
+	IMG_DEVMEM_SIZE_T uiPMRSize;
+	IMG_DEVMEM_LOG2ALIGN_T uiPMRLog2Contig;
+	DEVMEM_EXPORTKEY uiPMRExportKey;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	eError = BridgePMRMakeServerExportClientExport(hBridge,
+													hServerExportCookie,
+													&hPMRExportHandle,
+													&uiPMRSize,
+													&uiPMRLog2Contig,
+													&uiPMRExportKey);
+
+	if (eError == PVRSRV_OK)
+	{
+		psExportCookie->hPMRExportHandle = hPMRExportHandle;
+		psExportCookie->uiPMRExportPassword = uiPMRExportKey;
+		psExportCookie->uiSize = uiPMRSize;
+		psExportCookie->uiLog2ContiguityGuarantee = uiPMRLog2Contig;
+	}
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnmakeServerExportClientExport(DEVMEM_BRIDGE_HANDLE hBridge,
+                                   DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+	return BridgePMRUnmakeServerExportClientExport(hBridge, psExportCookie->hPMRExportHandle);
+}
+
+IMG_INTERNAL IMG_BOOL
+DevmemIsValidExportCookie(DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+	/* Assume that if the size is set, the export cookie is used */
+	return (psExportCookie->uiSize != 0x0);
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+               DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+    _Mapping_Unexport(psMemDesc->psImport,
+                      psExportCookie->hPMRExportHandle);
+
+    psExportCookie->uiSize = 0;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemImport(IMG_HANDLE hBridge,
+			 DEVMEM_EXPORTCOOKIE *psCookie,
+			 DEVMEM_FLAGS_T uiFlags,
+			 DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+    DEVMEM_MEMDESC *psMemDesc = IMG_NULL;
+    DEVMEM_IMPORT *psImport;
+    IMG_HANDLE hPMR;
+    PVRSRV_ERROR eError;
+
+	if (ppsMemDescPtr == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+	eError = _DevmemImportStructAlloc(hBridge,
+									  IMG_TRUE,
+									  &psImport);
+    if (eError != PVRSRV_OK)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto failImportAlloc;
+    }
+
+
+    /* Get a handle to the PMR (inc refcount, checks authorization) */
+    eError = BridgePMRImportPMR(hBridge,
+                                psCookie->hPMRExportHandle,
+                                psCookie->uiPMRExportPassword,
+                                psCookie->uiSize, /* not trusted - just for sanity checks */
+                                psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for sanity checks */
+                                &hPMR);
+    if (eError != PVRSRV_OK)
+    {
+        goto failImport;
+    }
+
+	_DevmemImportStructInit(psImport,
+							psCookie->uiSize,
+							1ULL << psCookie->uiLog2ContiguityGuarantee,
+							uiFlags,
+							hPMR);
+
+	_DevmemMemDescInit(psMemDesc,
+					   0,
+					   psImport);
+
+    *ppsMemDescPtr = psMemDesc;
+
+#if defined(PVR_RI_DEBUG)
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hBridge,
+											psMemDesc->psImport->hPMR,
+											sizeof("^"),
+											"^",
+											psMemDesc->uiOffset,
+											psMemDesc->psImport->uiSize,
+											IMG_TRUE,
+											IMG_FALSE,
+											&psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif /* if defined(PVR_RI_DEBUG) */
+
+	return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+failImport:
+    _DevmemImportDiscard(psImport);
+failImportAlloc:
+    _DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+    PVR_ASSERT(eError != PVRSRV_OK);
+
+    return eError;
+}
+
+/*****************************************************************************
+ *                   Common MemDesc functions                                *
+ *****************************************************************************/
+
+/*
+	This function is called for freeing any class of memory
+*/
+IMG_INTERNAL IMG_VOID
+DevmemFree(DEVMEM_MEMDESC *psMemDesc)
+{
+#if defined(PVR_RI_DEBUG)
+	if (psMemDesc->hRIHandle)
+	{
+	    PVRSRV_ERROR eError;
+
+	    eError = BridgeRIDeleteMEMDESCEntry(psMemDesc->psImport->hBridge,
+					   	   	   	   psMemDesc->hRIHandle);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIDeleteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif  /* if defined(PVR_RI_DEBUG) */
+	_DevmemMemDescRelease(psMemDesc);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+				  DEVMEM_HEAP *psHeap,
+				  IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+	DEVMEM_IMPORT *psImport;
+	IMG_DEV_VIRTADDR sDevVAddr;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bMap = IMG_TRUE;
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	if (psHeap == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_INVALID_PARAMS;
+		goto failParams;
+	}
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+		goto failCheck;
+	}
+
+	/* Don't map memory for deferred allocations */
+	if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+	{
+		PVR_ASSERT(psMemDesc->psImport->bExportable);
+		bMap = IMG_FALSE;
+	}
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	psImport = psMemDesc->psImport;
+	_DevmemMemDescAcquire(psMemDesc);
+
+	eError = _DevmemImportStructDevMap(psHeap,
+									   bMap,
+									   psImport);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMap;
+	}
+
+	sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+	sDevVAddr.uiAddr += psMemDesc->uiOffset;
+	psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+    *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+
+    OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	BridgeDevicememHistoryMap(psMemDesc->psImport->hBridge,
+						psMemDesc->sDeviceMemDesc.sDevVAddr,
+						psMemDesc->sTraceData.uiSize,
+						psMemDesc->sTraceData.szText);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+	if (psMemDesc->hRIHandle)
+    {
+		 eError = BridgeRIUpdateMEMDESCAddr(psImport->hBridge,
+    									   psMemDesc->hRIHandle,
+    									   psImport->sDeviceImport.sDevVAddr);
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif
+
+    return PVRSRV_OK;
+
+failMap:
+	_DevmemMemDescRelease(psMemDesc);
+failCheck:
+failParams:
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                         IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+	PVRSRV_ERROR eError;
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+	if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+		goto failCheck;
+	}
+	psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+    *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+    return PVRSRV_OK;
+
+failCheck:
+	OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(psMemDesc != IMG_NULL);
+
+	OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sDeviceMemDesc.ui32RefCount,
+					psMemDesc->sDeviceMemDesc.ui32RefCount-1);
+
+	PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0);
+
+	if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+	{
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+		BridgeDevicememHistoryUnmap(psMemDesc->psImport->hBridge,
+							psMemDesc->sDeviceMemDesc.sDevVAddr,
+							psMemDesc->sTraceData.uiSize,
+							psMemDesc->sTraceData.szText);
+#endif
+		_DevmemImportStructDevUnmap(psMemDesc->psImport);
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+		_DevmemMemDescRelease(psMemDesc);
+	}
+	else
+	{
+		OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                         IMG_VOID **ppvCpuVirtAddr)
+{
+	PVRSRV_ERROR eError;
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sCPUMemDesc.ui32RefCount,
+					psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+	if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0)
+	{
+		DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+		IMG_UINT8 *pui8CPUVAddr;
+
+		_DevmemMemDescAcquire(psMemDesc);
+		eError = _DevmemImportStructCPUMap(psImport);
+		if (eError != PVRSRV_OK)
+		{
+			goto failMap;
+		}
+
+		pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr;
+		pui8CPUVAddr += psMemDesc->uiOffset;
+		psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr;
+	}
+    *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+
+    VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+
+    OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+    return PVRSRV_OK;
+
+failMap:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	psMemDesc->sCPUMemDesc.ui32RefCount--;
+	_DevmemMemDescRelease(psMemDesc);
+	OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+	return eError;
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(psMemDesc != IMG_NULL);
+
+	OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					psMemDesc->sCPUMemDesc.ui32RefCount,
+					psMemDesc->sCPUMemDesc.ui32RefCount-1);
+
+	PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0);
+
+	if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0)
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+		_DevmemImportStructCPUUnmap(psMemDesc->psImport);
+		_DevmemMemDescRelease(psMemDesc);
+	}
+	else
+	{
+		OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+	}
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+			   IMG_HANDLE *phImport)
+{
+	if (psMemDesc->psImport->bExportable == IMG_FALSE)
+	{
+		return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+	}
+
+	*phImport = psMemDesc->psImport->hPMR;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_UINT64 *pui64UID)
+{
+	DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+	PVRSRV_ERROR eError;
+
+	eError = BridgePMRGetUID(psImport->hBridge,
+							 psImport->hPMR,
+							 pui64UID);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+				IMG_HANDLE *hReservation)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*hReservation = psImport->sDeviceImport.hReservation;
+
+	return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *phPMR,
+		IMG_DEVMEM_OFFSET_T *puiPMROffset)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	*puiPMROffset = psMemDesc->uiOffset;
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*phPMR = psImport->hPMR;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+				DEVMEM_FLAGS_T *puiFlags)
+{
+	DEVMEM_IMPORT *psImport;
+
+	PVR_ASSERT(psMemDesc);
+	psImport = psMemDesc->psImport;
+
+	PVR_ASSERT(psImport);
+	*puiFlags = psImport->uiFlags;
+
+	return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+				  IMG_HANDLE hExtHandle,
+				  DEVMEM_FLAGS_T uiFlags,
+				  DEVMEM_MEMDESC **ppsMemDescPtr,
+				  IMG_DEVMEM_SIZE_T *puiSizePtr)
+{
+    DEVMEM_MEMDESC *psMemDesc = IMG_NULL;
+    DEVMEM_IMPORT *psImport;
+    IMG_DEVMEM_SIZE_T uiSize;
+    IMG_DEVMEM_ALIGN_T uiAlign;
+    IMG_HANDLE hPMR;
+    PVRSRV_ERROR eError;
+
+    if (ppsMemDescPtr == IMG_NULL)
+    {
+        eError = PVRSRV_ERROR_INVALID_PARAMS;
+        goto failParams;
+    }	
+
+	eError =_DevmemMemDescAlloc(&psMemDesc);
+    if (eError != PVRSRV_OK)
+    {
+        goto failMemDescAlloc;
+    }
+
+	eError = _DevmemImportStructAlloc(hBridge,
+									  IMG_TRUE,
+									  &psImport);
+    if (eError != PVRSRV_OK)
+    {
+        eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+        goto failImportAlloc;
+    }
+
+	/* Get the PMR handle and it's size from the server */
+	eError = BridgePMRLocalImportPMR(hBridge,
+									 hExtHandle,
+									 &hPMR,
+									 &uiSize,
+									 &uiAlign);
+	if (eError != PVRSRV_OK)
+	{
+		goto failImport;
+	}
+
+	_DevmemImportStructInit(psImport,
+							uiSize,
+							uiAlign,
+							uiFlags,
+							hPMR);
+
+	_DevmemMemDescInit(psMemDesc,
+					   0,
+					   psImport);
+
+    *ppsMemDescPtr = psMemDesc;
+	if (puiSizePtr)
+		*puiSizePtr = uiSize;
+
+#if defined(PVR_RI_DEBUG)
+	{
+		/* Attach RI information */
+		eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hBridge,
+											psMemDesc->psImport->hPMR,
+											sizeof("^"),
+											"^",
+											psMemDesc->uiOffset,
+											psMemDesc->psImport->uiSize,
+											IMG_TRUE,
+											IMG_FALSE,
+											&(psMemDesc->hRIHandle));
+		if( eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+		}
+	}
+#endif /* if defined(PVR_RI_DEBUG) */
+	return PVRSRV_OK;
+
+failImport:
+    _DevmemImportDiscard(psImport);
+failImportAlloc:
+	_DevmemMemDescDiscard(psMemDesc);
+failMemDescAlloc:
+failParams:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+                         IMG_DEV_VIRTADDR sDevVAddr)
+{
+    return BridgeDevmemIsVDevAddrValid(psContext->hBridge,
+                                       psContext->hDevMemServerContext,
+                                       sDevVAddr);
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapLog2ImportAlignment(DEVMEM_HEAP *psHeap)
+{
+	return psHeap->uiLog2ImportAlignment;
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem_pdump.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem_pdump.c
new file mode 100644
index 0000000..36bc123
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem_pdump.c
@@ -0,0 +1,323 @@
+/*************************************************************************/ /*!
+@File
+@Title          Shared device memory management PDump functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements common (client & server) PDump functions for the
+                memory management code
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined PDUMP
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pdump.h"
+#include "devicemem_utils.h"
+#include "devicemem_pdump.h"
+#include "client_pdumpmm_bridge.h"
+
+IMG_INTERNAL IMG_VOID
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+    eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hBridge,
+                                   psMemDesc->psImport->hPMR,
+                                   psMemDesc->uiOffset + uiOffset,
+                                   uiSize,
+                                   uiPDumpFlags,
+                                   IMG_FALSE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+    eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hBridge,
+                                   psMemDesc->psImport->hPMR,
+                                   psMemDesc->uiOffset + uiOffset,
+                                   uiSize,
+                                   uiPDumpFlags,
+                                   IMG_TRUE);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgePMRPDumpLoadMemValue32(psMemDesc->psImport->hBridge,
+                                        psMemDesc->psImport->hPMR,
+                                        psMemDesc->uiOffset + uiOffset,
+                                        ui32Value,
+                                        uiPDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgePMRPDumpLoadMemValue64(psMemDesc->psImport->hBridge,
+                                          psMemDesc->psImport->hPMR,
+                                          psMemDesc->uiOffset + uiOffset,
+                                          ui64Value,
+                                          uiPDumpFlags);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* FIXME: This should be server side only */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size)
+{
+    PVRSRV_ERROR		eError;
+	IMG_CHAR			aszMemspaceName[100];
+	IMG_CHAR			aszSymbolicName[100];
+	IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+	*puiMemOffset += psMemDesc->uiOffset;
+
+    eError = BridgePMRPDumpSymbolicAddr(psMemDesc->psImport->hBridge,
+										psMemDesc->psImport->hPMR,
+										*puiMemOffset,
+										sizeof(aszMemspaceName),
+										&aszMemspaceName[0],
+										sizeof(aszSymbolicName),
+										&aszSymbolicName[0],
+										puiMemOffset,
+										&uiNextSymName);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+
+	OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]);
+	return eError;
+}
+
+IMG_INTERNAL IMG_VOID
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename)
+{
+    PVRSRV_ERROR eError;
+
+    eError = BridgePMRPDumpSaveToFile(psMemDesc->psImport->hBridge,
+									  psMemDesc->psImport->hPMR,
+									  psMemDesc->uiOffset + uiOffset,
+									  uiSize,
+									  OSStringLength(pszFilename) + 1,
+									  pszFilename);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* FIXME: Remove? */
+IMG_INTERNAL IMG_VOID
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32	ui32PdumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEV_VIRTADDR sDevAddrStart;
+
+    sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr;
+    sDevAddrStart.uiAddr += psMemDesc->uiOffset;
+    sDevAddrStart.uiAddr += uiOffset;
+
+    eError = BridgeDevmemIntPDumpSaveToFileVirtual(psMemDesc->psImport->hBridge,
+                                                   psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+                                                   sDevAddrStart,
+                                                   uiSize,
+                                                   OSStringLength(pszFilename) + 1,
+                                                   pszFilename,
+												   ui32FileOffset,
+												   ui32PdumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                       IMG_DEVMEM_OFFSET_T uiOffset,
+                       IMG_UINT32 ui32Value,
+                       IMG_UINT32 ui32Mask,
+                       PDUMP_POLL_OPERATOR eOperator,
+                       PDUMP_FLAGS_T ui32PDumpFlags)
+{
+    PVRSRV_ERROR eError;
+    IMG_DEVMEM_SIZE_T uiNumBytes;
+
+    uiNumBytes = 4;
+
+    if (psMemDesc->uiOffset + uiOffset + uiNumBytes >= psMemDesc->psImport->uiSize)
+    {
+        eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+        goto e0;
+    }
+
+    eError = BridgePMRPDumpPol32(psMemDesc->psImport->hBridge,
+                                 psMemDesc->psImport->hPMR,
+                                 psMemDesc->uiOffset + uiOffset,
+                                 ui32Value,
+                                 ui32Mask,
+                                 eOperator,
+                                 ui32PDumpFlags);
+    if (eError != PVRSRV_OK)
+    {
+        goto e0;
+    }
+
+    return PVRSRV_OK;
+
+    /*
+      error exit paths follow
+    */
+
+ e0:
+    PVR_ASSERT(eError != PVRSRV_OK);
+    return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVRSRV_ERROR eError;
+
+	if ((psMemDesc->uiOffset + uiReadOffset) > psMemDesc->psImport->uiSize)
+	{
+		eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+		goto e0;
+	}
+
+	eError = BridgePMRPDumpCBP(psMemDesc->psImport->hBridge,
+							   psMemDesc->psImport->hPMR,
+							   psMemDesc->uiOffset + uiReadOffset,
+							   uiWriteOffset,
+							   uiPacketSize,
+							   uiBufferSize);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	return PVRSRV_OK;
+
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#endif /* PDUMP */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem_utils.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem_utils.c
new file mode 100644
index 0000000..307f047
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/devicemem_utils.c
@@ -0,0 +1,698 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "ra.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+
+#if !defined(__KERNEL__) && defined(SUPPORT_ION)
+#include <sys/mman.h>
+#endif
+
+/*
+	The Devmem import structure is the structure we use
+	to manage memory that is "imported" (which is page
+	granular) from the server into our process, this
+	includes allocations.
+
+	This allows memory to be imported without requiring
+	any CPU or device mapping. Memory can then be mapped
+	into the device or CPU on demand, but neither is
+	required.
+*/
+
+IMG_INTERNAL
+IMG_VOID _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport)
+{
+	IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount);
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+	PVR_ASSERT(iRefCount != 1);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					iRefCount-1,
+					iRefCount);
+}
+
+IMG_INTERNAL
+IMG_VOID _DevmemImportStructRelease(DEVMEM_IMPORT *psImport)
+{
+	IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount);
+	PVR_ASSERT(iRefCount >= 0);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					iRefCount+1,
+					iRefCount);
+
+	if (iRefCount == 0)
+	{
+		BridgePMRUnrefPMR(psImport->hBridge,
+						  psImport->hPMR);
+		OSLockDestroy(psImport->sCPUImport.hLock);
+		OSLockDestroy(psImport->sDeviceImport.hLock);
+		OSLockDestroy(psImport->hLock);
+		OSFreeMem(psImport);
+	}
+}
+
+IMG_INTERNAL
+IMG_VOID _DevmemImportDiscard(DEVMEM_IMPORT *psImport)
+{
+	PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0);
+	OSLockDestroy(psImport->sCPUImport.hLock);
+	OSLockDestroy(psImport->sDeviceImport.hLock);
+	OSLockDestroy(psImport->hLock);
+	OSFreeMem(psImport);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc)
+{
+	DEVMEM_MEMDESC *psMemDesc;
+	PVRSRV_ERROR eError;
+
+	psMemDesc = OSAllocMem(sizeof(DEVMEM_MEMDESC));
+
+	if (psMemDesc == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto failAlloc;
+	}
+	
+	/* Structure must be zero'd incase it needs to be freed before it is initialised! */
+	OSCachedMemSet(psMemDesc, 0, sizeof(DEVMEM_MEMDESC));
+
+	eError = OSLockCreate(&psMemDesc->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failMDLock;
+	}
+
+	eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDMDLock;
+	}
+
+	eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failCMDLock;
+	}
+
+	*ppsMemDesc = psMemDesc;
+
+	return PVRSRV_OK;
+
+failCMDLock:
+	OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+failDMDLock:
+	OSLockDestroy(psMemDesc->hLock);
+failMDLock:
+	OSFreeMem(psMemDesc);
+failAlloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	Init the MemDesc structure
+*/
+IMG_INTERNAL
+IMG_VOID _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+										  IMG_DEVMEM_OFFSET_T uiOffset,
+										  DEVMEM_IMPORT *psImport)
+{
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					0,
+					1);
+
+	psMemDesc->psImport = psImport;
+	psMemDesc->uiOffset = uiOffset;
+
+	psMemDesc->sDeviceMemDesc.ui32RefCount = 0;
+	psMemDesc->sCPUMemDesc.ui32RefCount = 0;
+	OSAtomicWrite(&psMemDesc->hRefCount, 1);
+}
+
+IMG_INTERNAL
+IMG_VOID _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc)
+{
+	IMG_INT iRefCount;
+	PVR_UNREFERENCED_PARAMETER(iRefCount);
+
+	iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					iRefCount-1,
+					iRefCount);
+}
+
+IMG_INTERNAL
+IMG_VOID _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc)
+{
+	IMG_INT iRefCount;
+	PVR_ASSERT(psMemDesc != NULL);
+	
+	iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount);
+	PVR_ASSERT(iRefCount >= 0);
+
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psMemDesc,
+					iRefCount+1,
+					iRefCount);
+
+	if (iRefCount == 0)
+	{
+		if (!psMemDesc->psImport->bExportable)
+		{
+			RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA,
+					psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr +
+					psMemDesc->uiOffset);
+		}
+		else
+		{
+			_DevmemImportStructRelease(psMemDesc->psImport);
+		}
+
+		OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+		OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+		OSLockDestroy(psMemDesc->hLock);
+		OSFreeMem(psMemDesc);
+	}
+}
+
+IMG_INTERNAL
+IMG_VOID _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc)
+{
+	PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0);
+
+	OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+	OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+	OSLockDestroy(psMemDesc->hLock);
+	OSFreeMem(psMemDesc);
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+								   IMG_DEVMEM_ALIGN_T uiAlign,
+								   DEVMEM_FLAGS_T uiFlags)
+{
+    if (!(uiFlags & PVRSRV_MEMALLOCFLAG_GPU_READABLE))
+    {
+        /* Don't support memory not GPU readable currently */
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    if ((uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+        (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+    {
+        /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    if (uiAlign & (uiAlign-1))
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    /* Verify that size is a positive integer multiple of alignment */
+#if 0 // FIXME
+    if (uiSize & (uiAlign-1))
+    {
+        /* Size not a multiple of alignment */
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+#endif
+    if (uiSize == 0)
+    {
+        return PVRSRV_ERROR_INVALID_PARAMS;
+    }
+
+    return PVRSRV_OK;
+}
+
+/*
+	Allocate and init an import structure
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructAlloc(IMG_HANDLE hBridge,
+									  IMG_BOOL bExportable,
+									  DEVMEM_IMPORT **ppsImport)
+{
+	DEVMEM_IMPORT *psImport;
+	PVRSRV_ERROR eError;
+
+    psImport = OSAllocMem(sizeof *psImport);
+    if (psImport == IMG_NULL)
+    {
+        return PVRSRV_ERROR_OUT_OF_MEMORY;
+    }
+
+	/* Setup some known bad values for things we don't have yet */
+	psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON;
+    psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON;
+    psImport->sDeviceImport.psHeap = IMG_NULL;
+    psImport->sDeviceImport.bMapped = IMG_FALSE;
+
+	eError = OSLockCreate(&psImport->sDeviceImport.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failDIOSLockCreate;
+	}
+
+	psImport->sCPUImport.hOSMMapData = IMG_NULL;
+	psImport->sCPUImport.pvCPUVAddr = IMG_NULL;
+
+	eError = OSLockCreate(&psImport->sCPUImport.hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failCIOSLockCreate;
+	}
+
+	/* Set up common elements */
+    psImport->hBridge = hBridge;
+    psImport->bExportable = bExportable;
+
+	/* Setup refcounts */
+    psImport->sDeviceImport.ui32RefCount = 0;
+    psImport->sCPUImport.ui32RefCount = 0;
+    OSAtomicWrite(&psImport->hRefCount, 0);
+
+	/* Create the lock */
+	eError = OSLockCreate(&psImport->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto failILockAlloc;
+	}
+
+#if !defined(__KERNEL__) && defined(SUPPORT_ION)
+	psImport->sCPUImport.iDmaBufFd = -1;
+#endif
+
+    *ppsImport = psImport;
+    
+    return PVRSRV_OK;
+
+failILockAlloc:
+	OSLockDestroy(psImport->sCPUImport.hLock);
+failCIOSLockCreate:
+	OSLockDestroy(psImport->sDeviceImport.hLock);
+failDIOSLockCreate:
+	OSFreeMem(psImport);
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+/*
+	Initialise the import structure
+*/
+IMG_INTERNAL
+IMG_VOID _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 IMG_DEVMEM_ALIGN_T uiAlign,
+								 DEVMEM_FLAGS_T uiFlags,
+								 IMG_HANDLE hPMR)
+{
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					0,
+					1);
+
+	psImport->uiSize = uiSize;
+	psImport->uiAlign = uiAlign;
+	psImport->uiFlags = uiFlags;
+	psImport->hPMR = hPMR;
+	OSAtomicWrite(&psImport->hRefCount, 1);
+}
+
+/*
+	Map an import to the device
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+									   IMG_BOOL bMap,
+									   DEVMEM_IMPORT *psImport)
+{
+	DEVMEM_DEVICE_IMPORT *psDeviceImport;
+	IMG_BOOL bStatus;
+    RA_BASE_T uiAllocatedAddr;
+    RA_LENGTH_T uiAllocatedSize;
+    IMG_DEV_VIRTADDR sBase;
+    IMG_HANDLE hReservation;
+    PVRSRV_ERROR eError;
+	IMG_UINT uiAlign;
+
+	/* Round the provided import alignment to the configured heap alignment */
+	uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
+	uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+
+	psDeviceImport = &psImport->sDeviceImport;
+
+	OSLockAcquire(psDeviceImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psDeviceImport->ui32RefCount,
+					psDeviceImport->ui32RefCount+1);
+
+	if (psDeviceImport->ui32RefCount++ == 0)
+	{
+		_DevmemImportStructAcquire(psImport);
+
+		OSAtomicIncrement(&psHeap->hImportCount);
+
+		if (psHeap->psCtx->hBridge != psImport->hBridge)
+		{
+			/*
+				The import was done with a different connection then the
+				memory context which means they are not compatible.
+			*/
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto failCheck;
+		}
+
+		/* Allocate space in the VM */
+	    bStatus = RA_Alloc(psHeap->psQuantizedVMRA,
+	                       psImport->uiSize,
+	                       0, /* flags: this RA doesn't use flags*/
+	                       uiAlign,
+	                       &uiAllocatedAddr,
+	                       &uiAllocatedSize,
+	                       IMG_NULL /* don't care about per-import priv data */
+	                       );
+	    if (!bStatus)
+	    {
+	        eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM;
+	        goto failVMRAAlloc;
+	    }
+	
+	    /* No reason for the allocated virtual size to be different from
+	       the PMR's size */
+	    PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+	
+	    sBase.uiAddr = uiAllocatedAddr;
+	
+		/* Setup page tables for the allocated VM space */
+	    eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hBridge,
+											 psHeap->hDevMemServerHeap,
+											 sBase,
+											 uiAllocatedSize,
+											 &hReservation);
+	    if (eError != PVRSRV_OK)
+	    {
+	        goto failReserve;
+	    }
+
+		if (bMap)
+		{
+			DEVMEM_FLAGS_T uiMapFlags;
+			
+			uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;
+
+			/* Actually map the PMR to allocated VM space */
+			eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hBridge,
+										   psHeap->hDevMemServerHeap,
+										   hReservation,
+										   psImport->hPMR,
+										   uiMapFlags,
+										   &psDeviceImport->hMapping);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMap;
+			}
+			psDeviceImport->bMapped = IMG_TRUE;
+		}
+
+		/* Setup device mapping specific parts of the mapping info */
+	    psDeviceImport->hReservation = hReservation;
+		psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
+		psDeviceImport->psHeap = psHeap;
+	}
+	else
+	{
+		/*
+			Check that we've been asked to map it into the
+			same heap 2nd time around
+		*/
+		if (psHeap != psDeviceImport->psHeap)
+		{
+			eError = PVRSRV_ERROR_INVALID_HEAP;
+			goto failParams;
+		}
+	}
+	OSLockRelease(psDeviceImport->hLock);
+
+	return PVRSRV_OK;
+
+failMap:
+	BridgeDevmemIntUnreserveRange(psHeap->psCtx->hBridge,
+								  hReservation);
+failReserve:
+	RA_Free(psHeap->psQuantizedVMRA,
+            uiAllocatedAddr);
+failVMRAAlloc:
+failCheck:
+	_DevmemImportStructRelease(psImport);
+	OSAtomicDecrement(&psHeap->hImportCount);
+failParams:
+	OSLockRelease(psDeviceImport->hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	Unmap an import from the Device
+*/
+IMG_INTERNAL
+IMG_VOID _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_DEVICE_IMPORT *psDeviceImport;
+
+	psDeviceImport = &psImport->sDeviceImport;
+
+	OSLockAcquire(psDeviceImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psDeviceImport->ui32RefCount,
+					psDeviceImport->ui32RefCount-1);
+
+	if (--psDeviceImport->ui32RefCount == 0)
+	{
+		DEVMEM_HEAP *psHeap = psDeviceImport->psHeap;
+
+		if (psDeviceImport->bMapped)
+		{
+			eError = BridgeDevmemIntUnmapPMR(psImport->hBridge,
+											psDeviceImport->hMapping);
+			PVR_ASSERT(eError == PVRSRV_OK);
+		}
+	
+	    eError = BridgeDevmemIntUnreserveRange(psImport->hBridge,
+	                                        psDeviceImport->hReservation);
+	    PVR_ASSERT(eError == PVRSRV_OK);
+	
+	    RA_Free(psHeap->psQuantizedVMRA,
+	            psDeviceImport->sDevVAddr.uiAddr);
+
+	    OSLockRelease(psDeviceImport->hLock);
+
+		_DevmemImportStructRelease(psImport);
+
+		OSAtomicDecrement(&psHeap->hImportCount);
+	}
+	else
+	{
+		OSLockRelease(psDeviceImport->hLock);
+	}
+}
+
+/*
+	Map an import into the CPU
+*/
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport)
+{
+	PVRSRV_ERROR eError;
+	DEVMEM_CPU_IMPORT *psCPUImport;
+	IMG_SIZE_T uiMappingLength;
+
+	psCPUImport = &psImport->sCPUImport;
+
+	OSLockAcquire(psCPUImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psCPUImport->ui32RefCount,
+					psCPUImport->ui32RefCount+1);
+
+	if (psCPUImport->ui32RefCount++ == 0)
+	{
+		_DevmemImportStructAcquire(psImport);
+
+#if !defined(__KERNEL__) && defined(SUPPORT_ION)
+		if (psImport->sCPUImport.iDmaBufFd >= 0)
+		{
+			void *pvCPUVAddr;
+			int iProt = PROT_READ;
+
+			iProt |= (psImport->uiFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) ? PROT_WRITE : 0;
+			/* For ion imports, use the ion fd and mmap facility to map the
+			 * buffer to user space. We can bypass the services bridge in
+			 * this case and possibly save some time.
+			 */
+			pvCPUVAddr = mmap(NULL, psImport->uiSize, iProt,
+			                  MAP_SHARED, psImport->sCPUImport.iDmaBufFd, 0);
+
+			if (pvCPUVAddr == MAP_FAILED)
+			{
+				eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+				goto failMap;
+			}
+
+			psCPUImport->hOSMMapData = pvCPUVAddr;
+			psCPUImport->pvCPUVAddr = pvCPUVAddr;
+			uiMappingLength = psImport->uiSize;
+		}
+		else
+#endif
+		{
+			eError = OSMMapPMR(psImport->hBridge,
+							   psImport->hPMR,
+							   psImport->uiSize,
+							   psImport->uiFlags,
+							   &psCPUImport->hOSMMapData,
+							   &psCPUImport->pvCPUVAddr,
+							   &uiMappingLength);
+			if (eError != PVRSRV_OK)
+			{
+				goto failMap;
+			}
+		}
+
+		/* There is no reason the mapping length is different to the size */
+		PVR_ASSERT(uiMappingLength == psImport->uiSize);
+	}
+	OSLockRelease(psCPUImport->hLock);
+
+	return PVRSRV_OK;
+
+failMap:
+	psCPUImport->ui32RefCount--;
+	_DevmemImportStructRelease(psImport);
+	OSLockRelease(psCPUImport->hLock);
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+/*
+	Unmap an import from the CPU
+*/
+IMG_INTERNAL
+IMG_VOID _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport)
+{
+	DEVMEM_CPU_IMPORT *psCPUImport;
+
+	psCPUImport = &psImport->sCPUImport;
+
+	OSLockAcquire(psCPUImport->hLock);
+	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+					__FUNCTION__,
+					psImport,
+					psCPUImport->ui32RefCount,
+					psCPUImport->ui32RefCount-1);
+
+	if (--psCPUImport->ui32RefCount == 0)
+	{
+		/* FIXME: psImport->uiSize is a 64-bit quantity where as the 5th
+		 * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems
+		 * hence a compiler warning of implicit cast and loss of data.
+		 * Added explicit cast and assert to remove warning.
+		 */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+		PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX);
+#endif
+
+#if !defined(__KERNEL__) && defined(SUPPORT_ION)
+		if (psImport->sCPUImport.iDmaBufFd >= 0)
+		{
+			munmap(psCPUImport->hOSMMapData, psImport->uiSize);
+		}
+		else
+#endif
+		{
+			OSMUnmapPMR(psImport->hBridge,
+						psImport->hPMR,
+						psCPUImport->hOSMMapData,
+						psCPUImport->pvCPUVAddr,
+						(IMG_SIZE_T)psImport->uiSize);
+		}
+
+		OSLockRelease(psCPUImport->hLock);
+
+		_DevmemImportStructRelease(psImport);
+	}
+	else
+	{
+		OSLockRelease(psCPUImport->hLock);
+	}
+}
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/dllist.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/dllist.c
new file mode 100644
index 0000000..ef6db05
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/dllist.c
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services implementation of double linked lists
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements a double linked list
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "dllist.h"
+
+#if defined(RGX_FIRMWARE)
+#include "rgxfw_cr_defs.h"
+#include "rgxfw_ctl.h"
+#endif
+
+/* Walk through all the nodes on the list until the end or a callback returns FALSE */
+#if defined(RGX_FIRMWARE)
+RGXFW_COREMEM_CODE
+#endif
+IMG_VOID dllist_foreach_node(PDLLIST_NODE psListHead,
+							  PFN_NODE_CALLBACK pfnCallBack,
+							  IMG_PVOID pvCallbackData)
+{
+	PDLLIST_NODE psWalker = psListHead->psNextNode;
+	PDLLIST_NODE psNextWalker;
+
+	while (psWalker != psListHead)
+	{
+		/*
+			The callback function could remove itself from the list
+			so to avoid NULL pointer deference save the next node pointer
+			before calling the callback
+		*/
+		psNextWalker = psWalker->psNextNode;
+		if (pfnCallBack(psWalker, pvCallbackData))
+		{
+			psWalker = psNextWalker;
+		}
+		else
+		{
+			break;
+		}
+	}
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/hash.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/hash.c
new file mode 100644
index 0000000..c58c597
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/hash.c
@@ -0,0 +1,708 @@
+/*************************************************************************/ /*!
+@File
+@Title          Self scaling hash tables.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description 
+   Implements simple self scaling hash tables. Hash collisions are
+   handled by chaining entries together. Hash tables are increased in
+   size when they become more than (50%?) full and decreased in size
+   when less than (25%?) full. Hash tables are never decreased below
+   their initial size.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include/ */
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/shared/include/ */
+#include "hash.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+#include "allocmem.h"
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
+
+#define	KEY_TO_INDEX(pHash, key, uSize) \
+	((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define	KEY_COMPARE(pHash, pKey1, pKey2) \
+	((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+/* Each entry in a hash table is placed into a bucket */
+struct _BUCKET_
+{
+	/* the next bucket on the same chain */
+	struct _BUCKET_ *pNext;
+
+	/* entry value */
+	IMG_UINTPTR_T v;
+
+	/* entry key */
+#if defined (WIN32)
+	IMG_UINTPTR_T k[1];
+#else
+	IMG_UINTPTR_T k[];		/* PRQA S 0642 */ /* override dynamic array declaration warning */
+#endif
+};
+typedef struct _BUCKET_ BUCKET;
+
+struct _HASH_TABLE_
+{
+	/* current size of the hash table */
+	IMG_UINT32 uSize;
+
+	/* number of entries currently in the hash table */
+	IMG_UINT32 uCount;
+
+	/* the minimum size that the hash table should be re-sized to */
+	IMG_UINT32 uMinimumSize;
+
+	/* size of key in bytes */
+	IMG_UINT32 uKeySize;
+
+	/* hash function */
+	HASH_FUNC *pfnHashFunc;
+
+	/* key comparison function */
+	HASH_KEY_COMP *pfnKeyComp;
+
+	/* the hash table array */
+	BUCKET **ppBucketTable;
+};
+
+/*************************************************************************/ /*!
+@Function       HASH_Func_Default
+@Description    Hash function intended for hashing keys composed of
+                IMG_UINTPTR_T arrays.
+@Input          uKeySize     The size of the hash key, in bytes.
+@Input          pKey         A pointer to the key to hash.
+@Input          uHashTabLen  The length of the hash table.
+@Return         The hash value.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINT32
+HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
+{
+	IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
+	IMG_UINT32 ui;
+	IMG_UINT32 uHashKey = 0;
+
+	PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+	PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+		uHashPart += (uHashPart << 12);
+		uHashPart ^= (uHashPart >> 22);
+		uHashPart += (uHashPart << 4);
+		uHashPart ^= (uHashPart >> 9);
+		uHashPart += (uHashPart << 10);
+		uHashPart ^= (uHashPart >> 2);
+		uHashPart += (uHashPart << 7);
+		uHashPart ^= (uHashPart >> 12);
+
+		uHashKey += uHashPart;
+	}
+
+	return uHashKey;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Key_Comp_Default
+@Description    Compares keys composed of IMG_UINTPTR_T arrays.
+@Input          uKeySize    The size of the hash key, in bytes.
+@Input          pKey1       Pointer to first hash key to compare.
+@Input          pKey2       Pointer to second hash key to compare.
+@Return         IMG_TRUE    The keys match.
+                IMG_FALSE   The keys don't match.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
+{
+	IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
+	IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
+	IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
+	IMG_UINT32 ui;
+
+	PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
+
+	for (ui = 0; ui < uKeyLen; ui++)
+	{
+		if (*p1++ != *p2++)
+			return IMG_FALSE;
+	}
+
+	return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       _ChainInsert
+@Description    Insert a bucket into the appropriate hash table chain.
+@Input          pBucket       The bucket
+@Input          ppBucketTable The hash table
+@Input          uSize         The size of the hash table
+@Return         PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pBucket != IMG_NULL);
+	PVR_ASSERT (ppBucketTable != IMG_NULL);
+	PVR_ASSERT (uSize != 0);
+
+	if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);	/* PRQA S 0432,0541 */ /* ignore dynamic array warning */
+	pBucket->pNext = ppBucketTable[uIndex];
+	ppBucketTable[uIndex] = pBucket;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _Rehash
+@Description    Iterate over every entry in an old hash table and
+                rehash into the new table.
+@Input          ppOldTable   The old hash table
+@Input          uOldSize     The size of the old hash table
+@Input          ppNewTable   The new hash table
+@Input          uNewSize     The size of the new hash table
+@Return         None
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_Rehash (HASH_TABLE *pHash,
+	 BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+         BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+	IMG_UINT32 uIndex;
+	for (uIndex=0; uIndex< uOldSize; uIndex++)
+    {
+		BUCKET *pBucket;
+		pBucket = ppOldTable[uIndex];
+		while (pBucket != IMG_NULL)
+		{
+			PVRSRV_ERROR eError;
+			BUCKET *pNextBucket = pBucket->pNext;
+			eError = _ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
+			if (eError != PVRSRV_OK)
+			{
+				PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed"));
+				return eError;
+			}
+			pBucket = pNextBucket;
+		}
+    }
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _Resize
+@Description    Attempt to resize a hash table, failure to allocate a
+                new larger hash table is not considered a hard failure.
+                We simply continue and allow the table to fill up, the
+                effect is to allow hash chains to become longer.
+@Input          pHash      Hash table to resize.
+@Input          uNewSize   Required table size.
+@Return         IMG_TRUE Success
+                IMG_FALSE Failed
+*/ /**************************************************************************/
+static IMG_BOOL
+_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+	if (uNewSize != pHash->uSize)
+    {
+		BUCKET **ppNewTable;
+        IMG_UINT32 uIndex;
+
+#if defined(__linux__) && defined(__KERNEL__)
+		ppNewTable = OSAllocMemstatMem(sizeof (BUCKET *) * uNewSize);
+#else
+		ppNewTable = OSAllocMem(sizeof (BUCKET *) * uNewSize);
+#endif
+		if (ppNewTable == IMG_NULL)
+        {
+            return IMG_FALSE;
+        }
+
+        for (uIndex=0; uIndex<uNewSize; uIndex++)
+            ppNewTable[uIndex] = IMG_NULL;
+
+        if (_Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK)
+		{
+			/*
+				If we fail the rehash then there is nothing we can do as we've already
+				started to modify some of the entries if we just return FALSE here then
+				we will have dropped some items off the hash table.
+				The only reason the rehash can fail if is there is bug in another part
+				of the driver so in reality we should never hit this
+			*/
+			PVR_ASSERT(IMG_FALSE);
+			return IMG_FALSE;
+		}
+
+#if defined(__linux__) && defined(__KERNEL__)
+        OSFreeMemstatMem(pHash->ppBucketTable);
+#else
+        OSFreeMem(pHash->ppBucketTable);
+#endif
+        /*not nulling pointer, being reassigned just below*/
+        pHash->ppBucketTable = ppNewTable;
+        pHash->uSize = uNewSize;
+    }
+    return IMG_TRUE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       HASH_Create_Extended
+@Description    Create a self scaling hash table, using the supplied
+                key size, and the supplied hash and key comparsion
+                functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the number
+                              of entries in the hash table, not its size in
+                              bytes.
+@Input          uKeySize      The size of the key, in bytes.
+@Input          pfnHashFunc   Pointer to hash function.
+@Input          pfnKeyComp    Pointer to key comparsion function.
+@Return         IMG_NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL 
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+	HASH_TABLE *pHash;
+	IMG_UINT32 uIndex;
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
+
+#if defined(__linux__) && defined(__KERNEL__)
+	pHash = OSAllocMemstatMem(sizeof(HASH_TABLE));
+#else
+	pHash = OSAllocMem(sizeof(HASH_TABLE));
+#endif
+    if (pHash == IMG_NULL)
+	{
+		return IMG_NULL;
+	}
+
+	pHash->uCount = 0;
+	pHash->uSize = uInitialLen;
+	pHash->uMinimumSize = uInitialLen;
+	pHash->uKeySize = uKeySize;
+	pHash->pfnHashFunc = pfnHashFunc;
+	pHash->pfnKeyComp = pfnKeyComp;
+
+#if defined(__linux__) && defined(__KERNEL__)
+    pHash->ppBucketTable = OSAllocMemstatMem(sizeof (BUCKET *) * pHash->uSize);
+#else
+    pHash->ppBucketTable = OSAllocMem(sizeof (BUCKET *) * pHash->uSize);
+#endif
+    if (pHash->ppBucketTable == IMG_NULL)
+    {
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemstatMem(pHash);
+#else
+		OSFreeMem(pHash);
+#endif
+		/*not nulling pointer, out of scope*/
+		return IMG_NULL;
+    }
+
+	for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+		pHash->ppBucketTable[uIndex] = IMG_NULL;
+	return pHash;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Create
+@Description    Create a self scaling hash table with a key
+                consisting of a single IMG_UINTPTR_T, and using
+                the default hash and key comparison functions.
+@Input          uInitialLen   Initial and minimum length of the
+                              hash table, where the length refers to the
+                              number of entries in the hash table, not its size
+                              in bytes.
+@Return         IMG_NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL 
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
+{
+	return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
+		&HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Delete
+@Description    Delete a hash table created by HASH_Create_Extended or
+                HASH_Create.  All entries in the table must have been
+                removed before calling this function.
+@Input          pHash     Hash table
+@Return         None
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_VOID
+HASH_Delete (HASH_TABLE *pHash)
+{
+	IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__) && !defined(__QNXNTO__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+	if (psPVRSRVData != IMG_NULL)
+	{
+		if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+		{
+			bDoCheck = IMG_FALSE;
+		}
+	}
+#endif
+	if (pHash != IMG_NULL)
+    {
+		if (bDoCheck)
+		{
+			PVR_ASSERT (pHash->uCount==0);
+		}
+		if(pHash->uCount != 0)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!"));
+			PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
+		}
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemstatMem(pHash->ppBucketTable);
+#else
+		OSFreeMem(pHash->ppBucketTable);
+#endif
+		pHash->ppBucketTable = IMG_NULL;
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemstatMem(pHash);
+#else
+		OSFreeMem(pHash);
+#endif
+		/*not nulling pointer, copy on stack*/
+    }
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert_Extended
+@Description    Insert a key value pair into a hash table created
+                with HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to the key.
+@Input          v         The value associated with the key.
+@Return         IMG_TRUE  - success
+                IMG_FALSE  - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
+{
+	BUCKET *pBucket;
+
+	PVR_ASSERT (pHash != IMG_NULL);
+
+	if (pHash == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
+		return IMG_FALSE;
+	}
+
+#if defined(__linux__) && defined(__KERNEL__)
+	pBucket = OSAllocMemstatMem(sizeof(BUCKET) + pHash->uKeySize);
+#else
+	pBucket = OSAllocMem(sizeof(BUCKET) + pHash->uKeySize);
+#endif
+    if (pBucket == IMG_NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	pBucket->v = v;
+	/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/
+	OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize);
+	if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK)
+	{
+#if defined(__linux__) && defined(__KERNEL__)
+		OSFreeMemstatMem(pBucket);
+#else
+		OSFreeMem(pBucket);
+#endif
+		return IMG_FALSE;
+	}
+
+	pHash->uCount++;
+
+	/* check if we need to think about re-balancing */
+	if (pHash->uCount << 1 > pHash->uSize)
+    {
+        /* Ignore the return code from _Resize because the hash table is
+           still in a valid state and although not ideally sized, it is still
+           functional */
+        _Resize (pHash, pHash->uSize << 1);
+    }
+
+
+	return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Insert
+@Description    Insert a key value pair into a hash table created with
+                HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key value.
+@Input          v         The value associated with the key.
+@Return         IMG_TRUE - success.
+                IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
+{
+	return HASH_Insert_Extended(pHash, &k, v);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove_Extended
+@Description    Remove a key from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINTPTR_T
+HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
+{
+	BUCKET **ppBucket;
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pHash != IMG_NULL);
+
+	if (pHash == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
+		return 0;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+	for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
+	{
+		/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+		if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+		{
+			BUCKET *pBucket = *ppBucket;
+			IMG_UINTPTR_T v = pBucket->v;
+			(*ppBucket) = pBucket->pNext;
+
+#if defined(__linux__) && defined(__KERNEL__)
+			OSFreeMemstatMem(pBucket);
+#else
+			OSFreeMem(pBucket);
+#endif
+			/*not nulling original pointer, already overwritten*/
+
+			pHash->uCount--;
+
+			/* check if we need to think about re-balancing */
+			if (pHash->uSize > (pHash->uCount << 2) &&
+                pHash->uSize > pHash->uMinimumSize)
+            {
+                /* Ignore the return code from _Resize because the
+                   hash table is still in a valid state and although
+                   not ideally sized, it is still functional */
+				_Resize (pHash,
+                         PRIVATE_MAX (pHash->uSize >> 1,
+                                      pHash->uMinimumSize));
+            }
+
+			return v;
+		}
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Remove
+@Description    Remove a key value pair from a hash table created
+                with HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINTPTR_T
+HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
+{
+	return HASH_Remove_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve_Extended
+@Description    Retrieve a value from a hash table created with
+                HASH_Create_Extended.
+@Input          pHash     Hash table
+@Input          pKey      Pointer to the key.
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINTPTR_T
+HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
+{
+	BUCKET **ppBucket;
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT (pHash != IMG_NULL);
+
+	if (pHash == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
+		return 0;
+	}
+
+	uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+	for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
+	{
+		/* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+		if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+		{
+			BUCKET *pBucket = *ppBucket;
+			IMG_UINTPTR_T v = pBucket->v;
+
+			return v;
+		}
+	}
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Retrieve
+@Description    Retrieve a value from a hash table created with
+                HASH_Create.
+@Input          pHash     Hash table
+@Input          k         The key
+@Return         0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINTPTR_T
+HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
+{
+	return HASH_Retrieve_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function       HASH_Iterate
+@Description    Iterate over every entry in the hash table
+@Input          pHash - Hash table to iterate
+@Input          pfnCallback - Callback to call with the key and data for each
+							  entry in the hash table
+@Return         Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback)
+{
+    IMG_UINT32 uIndex;
+    for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+    {
+        BUCKET *pBucket;
+        pBucket = pHash->ppBucketTable[uIndex];
+        while (pBucket != IMG_NULL)
+        {
+            PVRSRV_ERROR eError;
+            BUCKET *pNextBucket = pBucket->pNext;
+
+            eError = pfnCallback((IMG_UINTPTR_T) ((IMG_VOID *) *(pBucket->k)), (IMG_UINTPTR_T) pBucket->v);
+
+            /* The callback might want us to break out early */
+            if (eError != PVRSRV_OK)
+                return eError;
+
+            pBucket = pNextBucket;
+        }
+    }
+    return PVRSRV_OK;
+}
+
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function       HASH_Dump
+@Description    To dump the contents of a hash table in human readable
+                form.
+@Input          pHash     Hash table
+*/ /**************************************************************************/
+IMG_VOID
+HASH_Dump (HASH_TABLE *pHash)
+{
+	IMG_UINT32 uIndex;
+	IMG_UINT32 uMaxLength=0;
+	IMG_UINT32 uEmptyCount=0;
+
+	PVR_ASSERT (pHash != IMG_NULL);
+	for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+	{
+		BUCKET *pBucket;
+		IMG_UINT32 uLength = 0;
+		if (pHash->ppBucketTable[uIndex] == IMG_NULL)
+		{
+			uEmptyCount++;
+		}
+		for (pBucket=pHash->ppBucketTable[uIndex];
+				pBucket != IMG_NULL;
+				pBucket = pBucket->pNext)
+		{
+			uLength++;
+		}
+		uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
+	}
+
+	PVR_TRACE(("hash table: uMinimumSize=%d  size=%d  count=%d",
+			pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+	PVR_TRACE(("  empty=%d  max=%d", uEmptyCount, uMaxLength));
+}
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/ra.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/ra.c
new file mode 100644
index 0000000..af5541c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/ra.c
@@ -0,0 +1,1344 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Allocator
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+@Description
+ Implements generic resource allocation. The resource
+ allocator was originally intended to manage address spaces.  In
+ practice the resource allocator is generic and can manage arbitrary
+ sets of integers.
+
+ Resources are allocated from arenas. Arena's can be created with an
+ initial span of resources. Further resources spans can be added to
+ arenas. A call back mechanism allows an arena to request further
+ resource spans on demand.
+
+ Each arena maintains an ordered list of resource segments each
+ described by a boundary tag. Each boundary tag describes a segment
+ of resources which are either 'free', available for allocation, or
+ 'busy' currently allocated. Adjacent 'free' segments are always
+ coallesced to avoid fragmentation.
+
+ For allocation, all 'free' segments are kept on lists of 'free'
+ segments in a table index by pvr_log2(segment size). ie Each table index
+ n holds 'free' segments in the size range 2^n -> 2^(n+1) - 1.
+
+ Allocation policy is based on an *almost* good fit strategy. 
+
+ Allocated segments are inserted into a self scaling hash table which
+ maps the base resource of the span to the relevant boundary
+ tag. This allows the code to get back to the bounary tag without
+ exporting explicit boundary tag references through the API.
+
+ Each arena has an associated quantum size, all allocations from the
+ arena are made in multiples of the basic quantum.
+
+ On resource exhaustion in an arena, a callback if provided will be
+ used to request further resources. Resouces spans allocated by the
+ callback mechanism will be returned when freed (through one of the
+ two callbacks).
+*/ /**************************************************************************/
+
+/* Issues:
+ * - flags, flags are passed into the resource allocator but are not currently used.
+ * - determination, of import size, is currently braindead.
+ * - debug code should be moved out to own module and #ifdef'd
+ */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "uniq_key_splay_tree.h"
+
+#include "hash.h"
+#include "ra.h"
+
+#include "osfunc.h"
+#include "allocmem.h"
+#include "lock.h"
+
+/* The initial, and minimum size of the live address -> boundary tag
+   structure hash table. The value 64 is a fairly arbitrary
+   choice. The hash table resizes on demand so the value choosen is
+   not critical. */
+#define MINIMUM_HASH_SIZE (64)
+
+
+/* #define RA_VALIDATE */
+
+#if defined(__KLOCWORK__)
+  /* make sure Klocworks analyse all the code (including the debug one) */
+  #if !defined(RA_VALIDATE)
+    #define RA_VALIDATE
+  #endif
+#endif
+
+#if (!defined(PVRSRV_NEED_PVR_ASSERT)) || (!defined(RA_VALIDATE))
+  /* Disable the asserts unless explicitly told otherwise.  They slow the driver
+     too much for other people */
+
+  #undef PVR_ASSERT
+  /* let's use a macro that really do not do anything when compiling in release
+     mode! */
+  #define PVR_ASSERT(x)
+#endif
+
+/* boundary tags, used to describe a resource segment */
+struct _BT_
+{
+	enum bt_type
+	{
+		btt_free,				/* free resource segment */
+		btt_live				/* allocated resource segment */
+	} type;
+
+	unsigned int is_leftmost;
+	unsigned int is_rightmost;
+	unsigned int free_import;
+
+	/* The base resource and extent of this segment */
+	RA_BASE_T base;
+	RA_LENGTH_T uSize;
+
+	/* doubly linked ordered list of all segments within the arena */
+	struct _BT_ *pNextSegment;
+	struct _BT_ *pPrevSegment;
+
+	/* doubly linked un-ordered list of free segments with the same flags. */
+	struct _BT_ * next_free;
+	struct _BT_ * prev_free;
+	
+	/* a user reference associated with this span, user references are
+	 * currently only provided in the callback mechanism */
+    IMG_HANDLE hPriv;
+
+    /* Flags to match on this span */
+    IMG_UINT32 uFlags;
+
+};
+typedef struct _BT_ BT;
+
+
+/* resource allocation arena */
+struct _RA_ARENA_
+{
+	/* arena name for diagnostics output */
+	IMG_CHAR *name;
+
+	/* allocations within this arena are quantum sized */
+	RA_LENGTH_T uQuantum;
+
+	/* import interface, if provided */
+	IMG_BOOL (*pImportAlloc)(RA_PERARENA_HANDLE h,
+							 RA_LENGTH_T uSize,
+							 IMG_UINT32 uFlags,
+							 RA_BASE_T *pBase,
+							 RA_LENGTH_T *pActualSize,
+                             RA_PERISPAN_HANDLE *phPriv);
+	IMG_VOID (*pImportFree) (RA_PERARENA_HANDLE,
+                             RA_BASE_T,
+                             RA_PERISPAN_HANDLE hPriv);
+
+	/* arbitrary handle provided by arena owner to be passed into the
+	 * import alloc and free hooks */
+	IMG_VOID *pImportHandle;
+
+	IMG_PSPLAY_TREE per_flags_buckets;
+	
+	/* resource segment list */
+	BT *pHeadSegment;
+
+	/* segment address to boundary tag hash table */
+	HASH_TABLE *pSegmentHash;
+
+	/* Lock for this arena */
+	POS_LOCK hLock;
+
+	/* LockClass of this arena. This is used within lockdep to decide if a
+	 * recursive call sequence with the same lock class is allowed or not. */
+	IMG_UINT32 ui32LockClass;
+};
+
+/*************************************************************************/ /*!
+@Function       _RequestAllocFail
+@Description    Default callback allocator used if no callback is
+                specified, always fails to allocate further resources to the
+                arena.
+@Input          _h - callback handle
+@Input          _uSize - requested allocation size
+@Output         _pActualSize - actual allocation size
+@Input          _pRef - user reference
+@Input          _uflags - allocation flags
+@Input          _pBase - receives allocated base
+@Return         IMG_FALSE, this function always fails to allocate.
+*/ /**************************************************************************/
+static IMG_BOOL
+_RequestAllocFail (RA_PERARENA_HANDLE _h,
+                   RA_LENGTH_T _uSize,
+                   IMG_UINT32 _uFlags,
+                   RA_BASE_T *_pBase,
+                   RA_LENGTH_T *_pActualSize,
+                   RA_PERISPAN_HANDLE *_phPriv)
+{
+	PVR_UNREFERENCED_PARAMETER (_h);
+	PVR_UNREFERENCED_PARAMETER (_uSize);
+	PVR_UNREFERENCED_PARAMETER (_pActualSize);
+	PVR_UNREFERENCED_PARAMETER (_phPriv);
+	PVR_UNREFERENCED_PARAMETER (_uFlags);
+	PVR_UNREFERENCED_PARAMETER (_pBase);
+
+	return IMG_FALSE;
+}
+
+
+#if defined (HAS_BUILTIN_CTZLL)
+    /* make sure to trigger an error if someone change the buckets or the bHasEltsMapping size
+       the bHasEltsMapping is used to quickly determine the smallest bucket containing elements.
+       therefore it must have at least as many bits has the buckets array have buckets. The RA
+       implementation actually uses one more bit. */
+    BLD_ASSERT((sizeof(((IMG_PSPLAY_TREE) 0)->buckets) / sizeof(((IMG_PSPLAY_TREE) 0)->buckets[0]))
+			   < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping), ra_c);
+#endif 
+
+
+/*************************************************************************/ /*!
+@Function       pvr_log2
+@Description    Computes the floor of the log base 2 of a unsigned integer
+@Input          n       Unsigned integer
+@Return         Floor(Log2(n))
+*/ /**************************************************************************/
+#if defined(__GNUC__)
+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type
+   indeed the __builtin_clzll is for unsigned long long variables.
+
+   if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl
+   if it changes to unsigned int, use __builtin_clz
+
+   if it changes for something bigger than unsigned long long, 
+   then revert the pvr_log2 to the classic implementation */
+BLD_ASSERT(sizeof(RA_LENGTH_T) == sizeof(unsigned long long), ra_c);
+
+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n)
+{
+	PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+	return (8 * sizeof(RA_LENGTH_T)) - 1 - __builtin_clzll(n);
+}
+#else
+static IMG_UINT32
+pvr_log2 (RA_LENGTH_T n)
+{
+	IMG_UINT32 l = 0;
+
+	PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+	n>>=1;
+	while (n>0)
+	{
+		n>>=1;
+		l++;
+	}
+	return l;
+}
+#endif
+
+
+#if defined(RA_VALIDATE)
+/*************************************************************************/ /*!
+@Function       _IsInSegmentList
+@Description    Tests if a BT is in the segment list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's segment list.
+                IMG_TRUE   BT was in the arena's segment list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInSegmentList (RA_ARENA *pArena,
+                  BT *pBT)
+{
+	BT*  pBTScan;
+
+	PVR_ASSERT (pArena != IMG_NULL);
+	PVR_ASSERT (pBT != IMG_NULL);
+
+	/* Walk the segment list until we see the BT pointer... */
+	pBTScan = pArena->pHeadSegment;
+	while (pBTScan != IMG_NULL  &&  pBTScan != pBT)
+	{
+		pBTScan = pBTScan->pNextSegment;
+	}
+
+	/* Test if we found it and then return */
+	return (pBTScan == pBT);
+}
+
+/*************************************************************************/ /*!
+@Function       _IsInFreeList
+@Description    Tests if a BT is in the free list.
+@Input          pArena           The arena.
+@Input          pBT              The boundary tag to look for.
+@Return         IMG_FALSE  BT was not in the arena's free list.
+                IMG_TRUE   BT was in the arena's free list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInFreeList (RA_ARENA *pArena,
+               BT *pBT)
+{
+	BT*  pBTScan;
+	IMG_UINT32  uIndex;
+
+	PVR_ASSERT (pArena != IMG_NULL);
+	PVR_ASSERT (pBT != IMG_NULL);
+
+	/* Look for the free list that holds BTs of this size... */
+	uIndex  = pvr_log2 (pBT->uSize);
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+
+	pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+	if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags))
+	{
+		return 0;
+	}
+	else
+	{
+		pBTScan = pArena->per_flags_buckets->buckets[uIndex];
+		while (pBTScan != IMG_NULL  &&  pBTScan != pBT)
+		{
+			pBTScan = pBTScan->next_free;
+		}
+
+		/* Test if we found it and then return */
+		return (pBTScan == pBT);
+	}
+}
+
+/* is_arena_valid should only be used in debug mode.
+   it checks that some properties an arena must have are verified */
+static int is_arena_valid(struct _RA_ARENA_ * arena)
+{
+	struct _BT_ * chunk;
+#if defined(HAS_BUILTIN_CTZLL)
+	unsigned int i;
+#endif
+
+	for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment)
+	{
+		/* if next segment is NULL, then it must be a rightmost */
+		PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost));
+		/* if prev segment is NULL, then it must be a leftmost */
+		PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost));
+
+		if (chunk->type == btt_free)
+		{
+			/* checks the correctness of the type field */
+			PVR_ASSERT(_IsInFreeList(arena, chunk));
+
+		    /* check that there can't be two consecutive free chunks.
+		       Indeed, instead of having two consecutive free chunks,
+			   there should be only one that span the size of the two. */
+			PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free));
+			PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free));
+		}
+		else
+		{
+			/* checks the correctness of the type field */
+			PVR_ASSERT(!_IsInFreeList(arena, chunk));
+		}
+
+		PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base));
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base));
+
+		/* all segments of the same imports must have the same flags ... */
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags));
+		/* ... and the same import handle */
+		PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv));
+
+
+		/* if a free chunk spans a whole import, then it must be an 'not to free import'.
+		   Otherwise it should have been freed. */
+		PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import));
+	}
+
+#if defined(HAS_BUILTIN_CTZLL)
+    if (arena->per_flags_buckets != NULL)
+	{
+		for (i = 0; i < FREE_TABLE_LIMIT; ++i)
+		{
+			/* verify that the bHasEltsMapping is correct for this flags bucket */
+			PVR_ASSERT( 
+				((arena->per_flags_buckets->buckets[i] == NULL) &&
+				 (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0)))
+				||
+				((arena->per_flags_buckets->buckets[i] != NULL) &&
+				 ((  (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0)))
+				);		
+		}
+	}
+#endif	
+
+	/* if arena was not valid, one of the assert before should have triggered */
+	return 1;
+}
+#endif
+/*************************************************************************/ /*!
+@Function       _SegmentListInsertAfter
+@Description    Insert a boundary tag into an arena segment list after a
+                specified boundary tag.
+@Input          pInsertionPoint  The insertion point.
+@Input          pBT              The boundary tag to insert.
+@Return         PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsertAfter (BT *pInsertionPoint,
+						 BT *pBT)
+{
+	PVR_ASSERT (pBT != IMG_NULL);
+	PVR_ASSERT (pInsertionPoint != IMG_NULL);
+
+	pBT->pNextSegment = pInsertionPoint->pNextSegment;
+	pBT->pPrevSegment = pInsertionPoint;
+	if (pInsertionPoint->pNextSegment != IMG_NULL)
+	{
+		pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+	}
+	pInsertionPoint->pNextSegment = pBT;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListInsert
+@Description    Insert a boundary tag into an arena segment list
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to insert.
+@Return         PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	PVR_ASSERT (!_IsInSegmentList(pArena, pBT));
+
+	/* insert into the segment chain */
+	pBT->pNextSegment = pArena->pHeadSegment;
+	pArena->pHeadSegment = pBT;
+	if (pBT->pNextSegment != NULL)
+	{
+		pBT->pNextSegment->pPrevSegment = pBT;
+	}
+
+	pBT->pPrevSegment = NULL;
+
+	return eError;
+}
+
+/*************************************************************************/ /*!
+@Function       _SegmentListRemove
+@Description    Remove a boundary tag from an arena segment list.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag to remove.
+*/ /**************************************************************************/
+static IMG_VOID
+_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
+{
+	PVR_ASSERT (_IsInSegmentList(pArena, pBT));
+	
+	if (pBT->pPrevSegment == IMG_NULL)
+		pArena->pHeadSegment = pBT->pNextSegment;
+	else
+		pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+	if (pBT->pNextSegment != IMG_NULL)
+		pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _BuildBT
+@Description    Construct a boundary tag for a free segment.
+@Input          base     The base of the resource segment.
+@Input          uSize    The extent of the resouce segment.
+@Input          uFlags   The flags to give to the boundary tag
+@Return         Boundary tag or NULL
+*/ /**************************************************************************/
+static BT *
+_BuildBT (RA_BASE_T base,
+          RA_LENGTH_T uSize,
+          RA_FLAGS_T uFlags
+          )
+{
+	BT *pBT;
+
+	pBT = OSAllocMem(sizeof(BT));
+    if (pBT == IMG_NULL)
+	{
+		return IMG_NULL;
+	}
+
+	OSCachedMemSet(pBT, 0, sizeof(BT));
+
+	pBT->is_leftmost = 1;
+	pBT->is_rightmost = 1;
+	pBT->type = btt_live;
+	pBT->base = base;
+	pBT->uSize = uSize;
+    pBT->uFlags = uFlags;
+	pBT->free_import = 0;
+
+	return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _SegmentSplit
+@Description    Split a segment into two, maintain the arena segment list. The
+                boundary tag should not be in the free table. Neither the
+                original or the new neighbour bounary tag will be in the free
+                table.
+@Input          pBT       The boundary tag to split.
+@Input          uSize     The required segment size of boundary tag after
+                          splitting.
+@Return         New neighbour boundary tag or NULL.
+*/ /**************************************************************************/
+static BT *
+_SegmentSplit (BT *pBT, RA_LENGTH_T uSize)
+{
+	BT *pNeighbour;
+
+	pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags);
+    if (pNeighbour == IMG_NULL)
+    {
+        return IMG_NULL;
+    }
+
+	_SegmentListInsertAfter(pBT, pNeighbour);
+
+	pNeighbour->is_leftmost = 0;
+	pNeighbour->is_rightmost = pBT->is_rightmost;
+	pNeighbour->free_import = pBT->free_import;
+	pBT->is_rightmost = 0;
+	pNeighbour->hPriv = pBT->hPriv;
+	pBT->uSize = uSize;
+	pNeighbour->uFlags = pBT->uFlags;
+
+	return pNeighbour;
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListInsert
+@Description    Insert a boundary tag into an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static IMG_VOID
+_FreeListInsert (RA_ARENA *pArena, BT *pBT)
+{
+	IMG_UINT32 uIndex;
+	uIndex = pvr_log2 (pBT->uSize);
+
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+	pBT->type = btt_free;
+
+	pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+	/* the flags item in the splay tree must have been created before-hand by
+	   _InsertResource */
+	PVR_ASSERT(pArena->per_flags_buckets != NULL);
+	PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+	pBT->next_free = pArena->per_flags_buckets->buckets[uIndex];
+	if (pBT->next_free != NULL)
+	{
+		pBT->next_free->prev_free = pBT;
+	}
+	pBT->prev_free = NULL;
+	pArena->per_flags_buckets->buckets[uIndex] = pBT;
+
+#if defined(HAS_BUILTIN_CTZLL)
+	/* tells that bucket[index] now contains elements */
+    pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function       _FreeListRemove
+@Description    Remove a boundary tag from an arena free table.
+@Input          pArena    The arena.
+@Input          pBT       The boundary tag.
+*/ /**************************************************************************/
+static IMG_VOID
+_FreeListRemove (RA_ARENA *pArena, BT *pBT)
+{
+	IMG_UINT32 uIndex;
+	uIndex = pvr_log2 (pBT->uSize);
+
+	PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+	PVR_ASSERT (_IsInFreeList(pArena, pBT));
+
+	if (pBT->next_free != NULL)
+	{
+		pBT->next_free->prev_free = pBT->prev_free;
+	}
+
+	if (pBT->prev_free != NULL)
+	{
+		pBT->prev_free->next_free = pBT->next_free;
+	}
+	else
+	{
+		pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+		/* the flags item in the splay tree must have already been created
+		   (otherwise how could there be a segment with these flags */
+		PVR_ASSERT(pArena->per_flags_buckets != NULL);
+		PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+		pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free;
+#if defined(HAS_BUILTIN_CTZLL)
+		if (pArena->per_flags_buckets->buckets[uIndex] == NULL)
+		{
+			/* there is no more elements in this bucket. Update the mapping. */
+			pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex);
+		}
+#endif
+	}
+	
+
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+	pBT->type = btt_live;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _InsertResource
+@Description    Add a free resource segment to an arena.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Input          uFlags    The flags of the new resources.
+@Return         New bucket pointer
+                IMG_NULL on failure
+*/ /**************************************************************************/
+static BT *
+_InsertResource (RA_ARENA *pArena,
+                 RA_BASE_T base,
+                 RA_LENGTH_T uSize,
+                 RA_FLAGS_T uFlags
+                 )
+{
+	BT *pBT;
+	PVR_ASSERT (pArena!=IMG_NULL);
+
+	pBT = _BuildBT (base, uSize, uFlags);
+
+	if (pBT != IMG_NULL)
+	{
+		IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets);
+		if (tmp == NULL)
+		{
+			OSFreeMem(pBT);
+			return NULL;
+		}
+		
+		pArena->per_flags_buckets = tmp;
+		_SegmentListInsert (pArena, pBT);
+		_FreeListInsert (pArena, pBT);
+	}
+	return pBT;
+}
+
+/*************************************************************************/ /*!
+@Function       _InsertResourceSpan
+@Description    Add a free resource span to an arena, marked for free_import.
+@Input          pArena    The arena.
+@Input          base      The base of the resource segment.
+@Input          uSize     The extent of the resource segment.
+@Return         The boundary tag representing the free resource segment,
+                or IMG_NULL on failure.
+*/ /**************************************************************************/
+static INLINE BT *
+_InsertResourceSpan (RA_ARENA *pArena,
+                     RA_BASE_T base,
+                     RA_LENGTH_T uSize,
+                     RA_FLAGS_T uFlags)
+{
+	BT *pBT = _InsertResource(pArena, base, uSize, uFlags);
+	if (pBT != NULL)
+	{
+		pBT->free_import = 1;
+	}
+	return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _RemoveResourceSpan
+@Description    Frees a resource span from an arena, returning the imported
+				span via the callback.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+@Return         IMG_FALSE failure - span was still in use
+                IMG_TRUE  success - span was removed and returned
+*/ /**************************************************************************/
+static INLINE IMG_BOOL
+_RemoveResourceSpan (RA_ARENA *pArena, BT *pBT)
+{
+	PVR_ASSERT (pArena!=IMG_NULL);
+	PVR_ASSERT (pBT!=IMG_NULL);
+
+	if (pBT->free_import &&
+		pBT->is_leftmost &&
+		pBT->is_rightmost)
+	{
+		_SegmentListRemove (pArena, pBT);
+		pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->hPriv);
+		OSFreeMem(pBT);
+
+		return IMG_TRUE;
+	}
+
+
+	return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _FreeBT
+@Description    Free a boundary tag taking care of the segment list and the
+                boundary tag free table.
+@Input          pArena     The arena.
+@Input          pBT        The boundary tag to free.
+*/ /**************************************************************************/
+static IMG_VOID
+_FreeBT (RA_ARENA *pArena, BT *pBT)
+{
+	BT *pNeighbour;
+
+	PVR_ASSERT (pArena!=IMG_NULL);
+	PVR_ASSERT (pBT!=IMG_NULL);
+	PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+	/* try and coalesce with left neighbour */
+	pNeighbour = pBT->pPrevSegment;
+	if ((!pBT->is_leftmost)	&& (pNeighbour->type == btt_free))
+	{
+		/* Sanity check. */
+		PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base);
+
+		_FreeListRemove (pArena, pNeighbour);
+		_SegmentListRemove (pArena, pNeighbour);
+		pBT->base = pNeighbour->base;
+
+		pBT->uSize += pNeighbour->uSize;
+		pBT->is_leftmost = pNeighbour->is_leftmost;
+        OSFreeMem(pNeighbour);
+	}
+
+	/* try to coalesce with right neighbour */
+	pNeighbour = pBT->pNextSegment;
+	if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free))
+	{
+		/* sanity check */
+		PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base);
+
+		_FreeListRemove (pArena, pNeighbour);
+		_SegmentListRemove (pArena, pNeighbour);
+		pBT->uSize += pNeighbour->uSize;
+		pBT->is_rightmost = pNeighbour->is_rightmost;
+		OSFreeMem(pNeighbour);
+	}
+
+	if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE)
+	{
+		_FreeListInsert (pArena, pBT);
+		PVR_ASSERT( (!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import) );
+	}
+	
+	PVR_ASSERT(is_arena_valid(pArena));
+}
+
+
+/*
+  This function returns the first element in a bucket that can be split
+  in a way that one of the subsegment can meet the size and alignment
+  criteria.
+
+  The first_elt is the bucket to look into. Remember that a bucket is
+  implemented as a pointer to the first element of the linked list.
+
+  nb_max_try is used to limit the number of elements considered.
+  This is used to only consider the first nb_max_try elements in the
+  free-list. The special value ~0 is used to say unlimited i.e. consider
+  all elements in the free list 
+ */
+static INLINE
+struct _BT_ * find_chunk_in_bucket(struct _BT_ * first_elt,
+								   RA_LENGTH_T uSize,
+								   RA_LENGTH_T uAlignment,
+								   unsigned int nb_max_try)
+{
+	struct _BT_ * walker;
+
+	for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
+	{
+		const RA_BASE_T aligned_base = (uAlignment > 1) ?
+			(walker->base + uAlignment - 1) & ~(uAlignment - 1)
+			: walker->base;
+		
+		if (walker->base + walker->uSize >= aligned_base + uSize)
+		{
+			return walker;
+		}
+
+		/* 0xFFFF...FFFF is used has nb_max_try = infinity. */
+		if (nb_max_try != (unsigned int) ~0)
+		{
+			nb_max_try--;
+		}
+	}
+
+	return NULL;
+}
+
+
+/*************************************************************************/ /*!
+@Function       _AttemptAllocAligned
+@Description    Attempt an allocation from an arena.
+@Input          pArena       The arena.
+@Input          uSize        The requested allocation size.
+@Output         phPriv		 The user references associated with
+                             the imported segment. (optional)
+@Input          flags        Allocation flags
+@Input          uAlignment   Required uAlignment, or 0. 
+                             Must be a power of 2 if not 0
+@Output         base         Allocated resource base (non optional, must not be NULL)
+@Return         IMG_FALSE failure
+                IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AttemptAllocAligned (RA_ARENA *pArena,
+					  RA_LENGTH_T uSize,
+					  IMG_UINT32 uFlags,
+					  RA_LENGTH_T uAlignment,
+					  RA_BASE_T *base,
+                      RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */
+{
+
+	IMG_UINT32 index_low;
+	IMG_UINT32 index_high; 
+	IMG_UINT32 i; 
+	struct _BT_ * pBT = NULL;
+	RA_BASE_T aligned_base;
+
+	PVR_ASSERT (pArena!=IMG_NULL);
+	PVR_ASSERT (base != NULL);
+
+	pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+	if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->ui32Flags != uFlags))
+	{
+		/* no chunks with these flags. */
+		return IMG_FALSE;
+	}
+
+	index_low = pvr_log2(uSize);
+	index_high = pvr_log2(uSize + uAlignment - 1);
+	
+	PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+	PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+	PVR_ASSERT(index_low <= index_high);
+
+#if defined(HAS_BUILTIN_CTZLL)
+	i = __builtin_ctzll((IMG_ELTS_MAPPINGS) (~((1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+ 	for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i)
+	{
+	}
+#endif
+	PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+	if (i != FREE_TABLE_LIMIT)
+	{
+		/* since we start at index_high + 1, we are guarantee to exit */
+		pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+	}
+	else
+	{
+		for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i)
+		{
+			pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);			
+		}
+	}
+
+	if (pBT == NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+
+	_FreeListRemove (pArena, pBT);
+
+	/* with uAlignment we might need to discard the front of this segment */
+	if (aligned_base > pBT->base)
+	{
+		BT *pNeighbour;
+		pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(aligned_base - pBT->base));
+		/* partition the buffer, create a new boundary tag */
+		if (pNeighbour == NULL)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Front split failed", __FUNCTION__));
+			/* Put pBT back in the list */
+			_FreeListInsert (pArena, pBT);
+			return IMG_FALSE;
+		}
+
+		_FreeListInsert(pArena, pBT);
+		pBT = pNeighbour;
+	}
+
+	/* the segment might be too big, if so, discard the back of the segment */
+	if (pBT->uSize > uSize)
+	{
+		BT *pNeighbour;
+		pNeighbour = _SegmentSplit(pBT, uSize);
+		/* partition the buffer, create a new boundary tag */
+		if (pNeighbour == NULL)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "%s: Back split failed", __FUNCTION__));
+			/* Put pBT back in the list */
+			_FreeListInsert (pArena, pBT);
+			return IMG_FALSE;
+		}
+	
+		_FreeListInsert (pArena, pNeighbour);
+	}
+
+	pBT->type = btt_live;
+	
+	if (!HASH_Insert_Extended (pArena->pSegmentHash, &pBT->base, (IMG_UINTPTR_T)pBT))
+	{
+		_FreeBT (pArena, pBT);
+		return IMG_FALSE;
+	}
+	
+	if (phPriv != IMG_NULL)
+		*phPriv = pBT->hPriv;
+	
+	*base = pBT->base;
+	
+	return IMG_TRUE;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function       RA_Create
+@Description    To create a resource arena.
+@Input          name          The name of the arena for diagnostic purposes.
+@Input          base          The base of an initial resource span or 0.
+@Input          uSize         The size of an initial resource span or 0.
+@Input          uFlags        The flags of an initial resource span or 0.
+@Input          ulog2Quantum  The arena allocation quantum.
+@Input          imp_alloc     A resource allocation callback or 0.
+@Input          imp_free      A resource de-allocation callback or 0.
+@Input          pImportHandle Handle passed to alloc and free or 0.
+@Return         arena handle, or IMG_NULL.
+*/ /**************************************************************************/
+IMG_INTERNAL RA_ARENA *
+RA_Create (IMG_CHAR *name,
+		   RA_LOG2QUANTUM_T uLog2Quantum,
+		   IMG_UINT32 ui32LockClass,
+		   IMG_BOOL (*imp_alloc)(RA_PERARENA_HANDLE h, 
+                                 RA_LENGTH_T uSize,
+                                 RA_FLAGS_T _flags, 
+                                 /* returned data */
+                                 RA_BASE_T *pBase,
+                                 RA_LENGTH_T *pActualSize,
+                                 RA_PERISPAN_HANDLE *phPriv),
+		   IMG_VOID (*imp_free) (RA_PERARENA_HANDLE,
+                                 RA_BASE_T,
+                                 RA_PERISPAN_HANDLE),
+		   RA_PERARENA_HANDLE arena_handle)
+{
+	RA_ARENA *pArena;
+	PVRSRV_ERROR eError;
+
+	if (name == NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR, "RA_Create: invalid parameter 'name' (NULL not accepted)"));
+		return NULL;
+	}
+	
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Create: name='%s'", name));
+
+	pArena = OSAllocMem(sizeof (*pArena));
+    if (pArena == IMG_NULL)
+	{
+		goto arena_fail;
+	}
+
+	eError = OSLockCreate(&pArena->hLock, LOCK_TYPE_PASSIVE);
+	if (eError != PVRSRV_OK)
+	{
+		goto lock_fail;
+	}
+
+	pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default);
+
+	if (pArena->pSegmentHash==IMG_NULL)
+	{
+		goto hash_fail;
+	}
+
+	pArena->name = name;
+	pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : &_RequestAllocFail;
+	pArena->pImportFree = imp_free;
+	pArena->pImportHandle = arena_handle;
+	pArena->pHeadSegment = IMG_NULL;
+	pArena->uQuantum = (IMG_UINT64) (1 << uLog2Quantum);
+	pArena->per_flags_buckets = NULL;
+	pArena->ui32LockClass = ui32LockClass;
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	return pArena;
+
+hash_fail:
+	OSLockDestroy(pArena->hLock);
+lock_fail:
+	OSFreeMem(pArena);
+	/*not nulling pointer, out of scope*/
+arena_fail:
+	return IMG_NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Delete
+@Description    To delete a resource arena. All resources allocated from
+                the arena must be freed before deleting the arena.
+@Input          pArena        The arena to delete.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_VOID
+RA_Delete (RA_ARENA *pArena)
+{
+	IMG_UINT32 uIndex;
+
+	PVR_ASSERT(pArena != IMG_NULL);
+
+	if (pArena == IMG_NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
+		return;
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	PVR_DPF ((PVR_DBG_MESSAGE,
+			  "RA_Delete: name='%s'", pArena->name));
+
+	while (pArena->pHeadSegment != IMG_NULL)
+	{
+		BT *pBT = pArena->pHeadSegment;
+
+		if (pBT->type != btt_free)
+		{
+			PVR_DPF ((PVR_DBG_ERROR, "RA_Delete: allocations still exist in the arena that is being destroyed"));
+			PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
+			PVR_DPF ((PVR_DBG_ERROR, "RA_Delete: base = 0x%llx size=0x%llx",
+					  (unsigned long long)pBT->base, (unsigned long long)pBT->uSize));
+		}
+		else
+		{
+			_FreeListRemove(pArena, pBT);
+		}
+
+		_SegmentListRemove (pArena, pBT);
+		OSFreeMem(pBT);
+		/*not nulling original pointer, it has changed*/
+	}
+
+	while (pArena->per_flags_buckets != NULL)
+	{
+		for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+		{
+			PVR_ASSERT(pArena->per_flags_buckets->buckets[uIndex] == IMG_NULL);
+		}
+
+		pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->ui32Flags, pArena->per_flags_buckets);
+	}
+
+	HASH_Delete (pArena->pSegmentHash);
+	OSLockDestroy(pArena->hLock);
+	OSFreeMem(pArena);
+	/*not nulling pointer, copy on stack*/
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Add
+@Description    To add a resource span to an arena. The span must not
+                overlapp with any span previously added to the arena.
+@Input          pArena     The arena to add a span into.
+@Input          base       The base of the span.
+@Input          uSize      The extent of the span.
+@Input			uFlags	   the flags of the new import
+@Input			hPriv	   a private handle associate to the span. (reserved for user)
+@Return         IMG_TRUE - Success
+                IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+		RA_BASE_T base,
+		RA_LENGTH_T uSize,
+		RA_FLAGS_T uFlags,
+		RA_PERISPAN_HANDLE hPriv)
+{
+	struct _BT_* bt;
+	PVR_ASSERT (pArena != IMG_NULL);
+	PVR_ASSERT (uSize != 0);
+
+	if (pArena == IMG_NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
+		return IMG_FALSE;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Add: name='%s', "
+              "base=0x%llx, size=0x%llx", pArena->name,
+			  (unsigned long long)base, (unsigned long long)uSize));
+
+	uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+	bt = _InsertResource(pArena, base, uSize, uFlags);
+	if (bt != NULL)
+	{
+		bt->hPriv = hPriv;
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	OSLockRelease(pArena->hLock);
+
+	return bt != NULL;
+}
+
+/*************************************************************************/ /*!
+@Function       RA_Alloc
+@Description    To allocate resource from an arena.
+@Input          pArena         The arena
+@Input          uRequestSize   The size of resource segment requested.
+@Output         pActualSize    The actual size of resource segment
+                               allocated, typcially rounded up by quantum.
+@Output         phPriv         The user reference associated with allocated resource span.
+@Input          uFlags         Flags influencing allocation policy.
+@Input          uAlignment     The uAlignment constraint required for the
+                               allocated segment, use 0 if uAlignment not required, otherwise
+                               must be a power of 2.
+@Output         base           Allocated base resource
+@Return         IMG_TRUE - success
+                IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+RA_Alloc (RA_ARENA *pArena,
+		  RA_LENGTH_T uRequestSize,
+		  RA_FLAGS_T uFlags,
+		  RA_LENGTH_T uAlignment,
+		  RA_BASE_T *base,
+		  RA_LENGTH_T *pActualSize,
+          RA_PERISPAN_HANDLE *phPriv)
+{
+	IMG_BOOL bResult;
+	RA_LENGTH_T uSize = uRequestSize;
+
+	PVR_ASSERT (pArena!=IMG_NULL);
+	PVR_ASSERT (uSize > 0);
+
+	if (pArena == IMG_NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena"));
+		return IMG_FALSE;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	if (pActualSize != IMG_NULL)
+	{
+		*pActualSize = uSize;
+	}
+
+	/* Must be a power of 2 or 0 */
+	PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+	PVR_DPF ((PVR_DBG_MESSAGE,
+			  "RA_Alloc: arena='%s', size=0x%llx(0x%llx), "
+              "alignment=0x%llx", pArena->name,
+			  (unsigned long long)uSize, (unsigned long long)uRequestSize,
+			  (unsigned long long)uAlignment));
+
+	/* if allocation failed then we might have an import source which
+	   can provide more resource, else we will have to fail the
+	   allocation to the caller. */
+	bResult = _AttemptAllocAligned (pArena, uSize, uFlags, uAlignment, base, phPriv);
+	if (!bResult)
+	{
+        IMG_HANDLE hPriv;
+		RA_BASE_T import_base;
+		RA_LENGTH_T uImportSize = uSize;
+
+		/*
+			Ensure that we allocate sufficient space to meet the uAlignment
+			constraint
+		 */
+		if (uAlignment > pArena->uQuantum)
+		{
+			uImportSize += (uAlignment - pArena->uQuantum);
+		}
+
+		/* ensure that we import according to the quanta of this arena */
+		uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+
+		bResult =
+			pArena->pImportAlloc (pArena->pImportHandle, uImportSize, uFlags,
+                                  &import_base, &uImportSize, &hPriv);
+		if (bResult)
+		{
+			BT *pBT;
+			pBT = _InsertResourceSpan (pArena, import_base, uImportSize, uFlags);
+			/* successfully import more resource, create a span to
+			   represent it and retry the allocation attempt */
+			if (pBT == IMG_NULL)
+			{
+				/* insufficient resources to insert the newly acquired span,
+				   so free it back again */
+				pArena->pImportFree(pArena->pImportHandle, import_base, hPriv);
+
+				PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', "
+                          "size=0x%llx failed!", pArena->name,
+						  (unsigned long long)uSize));
+				/* RA_Dump (arena); */
+				OSLockRelease(pArena->hLock);
+				return IMG_FALSE;
+			}
+
+
+            pBT->hPriv = hPriv;
+
+			bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+			if (!bResult)
+			{
+				PVR_DPF ((PVR_DBG_ERROR,
+						  "RA_Alloc: name='%s' second alloc failed!",
+						  pArena->name));
+
+				/*
+				  On failure of _AttemptAllocAligned() depending on the exact point
+				  of failure, the imported segment may have been used and freed, or
+				  left untouched. If the later, we need to return it.
+				*/
+				_FreeBT(pArena, pBT);
+			}
+			else
+			{
+				/* Check if the new allocation was in the span we just added... */
+				if (*base < import_base  ||  *base > (import_base + uImportSize))
+				{
+					PVR_DPF ((PVR_DBG_ERROR,
+							  "RA_Alloc: name='%s' alloc did not occur in the imported span!",
+							  pArena->name));
+
+					/*
+					  Remove the imported span which should not be in use (if it is then
+					  that is okay, but essentially no span should exist that is not used).
+					*/
+					_FreeBT(pArena, pBT);
+				}
+			}
+		}
+	}
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', size=0x%llx, "
+              "*base=0x%llx = %d",pArena->name, (unsigned long long)uSize,
+			  (unsigned long long)*base, bResult));
+
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	OSLockRelease(pArena->hLock);
+	return bResult;
+}
+
+
+
+
+/*************************************************************************/ /*!
+@Function       RA_Free
+@Description    To free a resource segment.
+@Input          pArena     The arena the segment was originally allocated from.
+@Input          base       The base of the resource span to free.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_VOID
+RA_Free (RA_ARENA *pArena, RA_BASE_T base)
+{
+	BT *pBT;
+
+	PVR_ASSERT (pArena != IMG_NULL);
+
+	if (pArena == IMG_NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
+		return;
+	}
+
+	OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+	PVR_ASSERT(is_arena_valid(pArena));
+
+	PVR_DPF ((PVR_DBG_MESSAGE, "RA_Free: name='%s', base=0x%llx", pArena->name,
+			  (unsigned long long)base));
+
+	pBT = (BT *) HASH_Remove_Extended (pArena->pSegmentHash, &base);
+	PVR_ASSERT (pBT != IMG_NULL);
+
+	if (pBT)
+	{
+		PVR_ASSERT (pBT->base == base);
+		_FreeBT (pArena, pBT);
+	}
+
+	PVR_ASSERT(is_arena_valid(pArena));
+	OSLockRelease(pArena->hLock);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/sync.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/sync.c
new file mode 100644
index 0000000..180cfd5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/sync.c
@@ -0,0 +1,1863 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services synchronisation interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implements client side code for services synchronisation
+                interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "client_sync_bridge.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "lock.h"
+#include "pvr_debug.h"
+/* FIXME */
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+
+#define SYNC_BLOCK_LIST_CHUNCK_SIZE	10
+#define LOCAL_SYNC_PRIM_RESET_VALUE 0
+
+/*
+	This defines the maximum amount of synchronisation memory
+	that can be allocated per SyncPrim context.
+	In reality this number is meaningless as we would run out
+	of synchronisation memory before we reach this limit, but
+	we need to provide a size to the span RA.
+*/
+#define MAX_SYNC_MEM				(4 * 1024 * 1024)
+
+typedef struct _SYNC_BLOCK_LIST_
+{
+	IMG_UINT32			ui32BlockCount;			/*!< Number of contexts in the list */
+	IMG_UINT32			ui32BlockListSize;		/*!< Size of the array contexts */
+	SYNC_PRIM_BLOCK		**papsSyncPrimBlock;	/*!< Array of syncprim blocks */
+} SYNC_BLOCK_LIST;
+
+typedef struct _SYNC_OP_COOKIE_
+{
+	IMG_UINT32				ui32SyncCount;
+	IMG_UINT32				ui32ClientSyncCount;
+	IMG_UINT32				ui32ServerSyncCount;
+	IMG_BOOL				bHaveServerSync;
+	IMG_HANDLE				hBridge;
+	IMG_HANDLE				hServerCookie;
+
+	SYNC_BLOCK_LIST			*psSyncBlockList;
+	PVRSRV_CLIENT_SYNC_PRIM	**papsSyncPrim;
+	/*
+		Client sync(s) info.
+		If this changes update the calculation of ui32ClientAllocSize
+	*/
+	IMG_UINT32				*paui32SyncBlockIndex;
+	IMG_UINT32				*paui32Index;
+	IMG_UINT32				*paui32Flags;
+	IMG_UINT32				*paui32FenceValue;
+	IMG_UINT32				*paui32UpdateValue;
+
+	/*
+		Server sync(s) info
+		If this changes update the calculation of ui32ServerAllocSize
+	*/
+	IMG_HANDLE				*pahServerSync;
+	IMG_UINT32              *paui32ServerFlags;
+} SYNC_OP_COOKIE;
+
+/* forward declaration */
+static IMG_VOID
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value);
+
+/*
+	Internal interfaces for management of synchronisation block memory
+*/
+static PVRSRV_ERROR
+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext,
+						SYNC_PRIM_BLOCK **ppsSyncBlock)
+{
+	SYNC_PRIM_BLOCK *psSyncBlk;
+	DEVMEM_SERVER_EXPORTCOOKIE hServerExportCookie;
+	DEVMEM_EXPORTCOOKIE sExportCookie;
+	PVRSRV_ERROR eError;
+
+	psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK));
+	if (psSyncBlk == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+	psSyncBlk->psContext = psContext;
+
+	/* Allocate sync prim block */
+	eError = BridgeAllocSyncPrimitiveBlock(psContext->hBridge,
+										   psContext->hDeviceNode,
+										   &psSyncBlk->hServerSyncPrimBlock,
+										   &psSyncBlk->ui32FirmwareAddr,
+										   &psSyncBlk->ui32SyncBlockSize,
+										   &hServerExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_blockalloc;
+	}
+
+	/* Make it mappable by the client */
+	eError = DevmemMakeServerExportClientExport(psContext->hBridge,
+												hServerExportCookie,
+												&sExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_export;
+	}
+
+	/* Get CPU mapping of the memory block */
+	eError = DevmemImport(psContext->hBridge,
+						  &sExportCookie,
+						  PVRSRV_MEMALLOCFLAG_CPU_READABLE,
+						  &psSyncBlk->hMemDesc);
+
+	/*
+		Regardless of success or failure we "undo" the export
+	*/
+	DevmemUnmakeServerExportClientExport(psContext->hBridge,
+										 &sExportCookie);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_import;
+	}
+
+	eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+									  (IMG_PVOID *) &psSyncBlk->pui32LinAddr);
+	if (eError != PVRSRV_OK)
+	{
+		goto fail_cpuvaddr;
+	}
+
+	*ppsSyncBlock = psSyncBlk;
+	return PVRSRV_OK;
+
+fail_cpuvaddr:
+	DevmemFree(psSyncBlk->hMemDesc);
+fail_import:
+fail_export:
+	BridgeFreeSyncPrimitiveBlock(psContext->hBridge,
+								 psSyncBlk->hServerSyncPrimBlock);
+fail_blockalloc:
+	OSFreeMem(psSyncBlk);
+fail_alloc:
+	return eError;
+}
+
+static IMG_VOID
+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk)
+{
+	SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext;
+
+	DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+	DevmemFree(psSyncBlk->hMemDesc);
+	BridgeFreeSyncPrimitiveBlock(psContext->hBridge,
+								 psSyncBlk->hServerSyncPrimBlock);
+	OSFreeMem(psSyncBlk);
+}
+
+static IMG_BOOL
+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
+					RA_LENGTH_T uSize,
+					RA_FLAGS_T uFlags,
+					RA_BASE_T *puiBase,
+					RA_LENGTH_T *puiActualSize,
+					RA_PERISPAN_HANDLE *phImport)
+{
+	SYNC_PRIM_CONTEXT *psContext = hArena;
+	SYNC_PRIM_BLOCK *psSyncBlock = IMG_NULL;
+	RA_LENGTH_T uiSpanSize;
+	PVRSRV_ERROR eError;
+	IMG_BOOL bRet;
+	PVR_UNREFERENCED_PARAMETER(uFlags);
+
+	PVR_ASSERT(hArena != IMG_NULL);
+
+	/* Check we've not be called with an unexpected size */
+	PVR_ASSERT(uSize == sizeof(IMG_UINT32));
+
+	/*
+		Ensure the synprim context doesn't go away while we have sync blocks
+		attached to it
+	*/
+	OSLockAcquire(psContext->hLock);
+	psContext->ui32RefCount++;
+	OSLockRelease(psContext->hLock);
+
+	/* Allocate the block of memory */
+	eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "Failed to allocation syncprim block (%d)", eError));
+		goto fail_syncblockalloc;
+	}
+
+	/* Allocate a span for it */
+	bRet = RA_Alloc(psContext->psSpanRA,
+					psSyncBlock->ui32SyncBlockSize,
+					0,
+					psSyncBlock->ui32SyncBlockSize,
+					&psSyncBlock->uiSpanBase,
+					&uiSpanSize,
+					IMG_NULL);
+	if (bRet == IMG_FALSE)
+	{
+		goto fail_spanalloc;
+	}
+
+	/*
+		There is no reason the span RA should return an allocation larger
+		then we request
+	*/
+	PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize);
+
+	*puiBase = psSyncBlock->uiSpanBase;
+	*puiActualSize = psSyncBlock->ui32SyncBlockSize;
+	*phImport = psSyncBlock;
+	return IMG_TRUE;
+
+fail_spanalloc:
+	FreeSyncPrimitiveBlock(psSyncBlock);
+fail_syncblockalloc:
+	OSLockAcquire(psContext->hLock);
+	psContext->ui32RefCount--;
+	OSLockRelease(psContext->hLock);
+
+	return IMG_FALSE;
+}
+
+static IMG_VOID
+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena,
+					  RA_BASE_T uiBase,
+					  RA_PERISPAN_HANDLE hImport)
+{
+	SYNC_PRIM_CONTEXT *psContext = hArena;
+	SYNC_PRIM_BLOCK *psSyncBlock = hImport;
+
+	PVR_ASSERT(psContext != IMG_NULL);
+	PVR_ASSERT(psSyncBlock != IMG_NULL);
+
+	PVR_ASSERT(uiBase == psSyncBlock->uiSpanBase);
+
+	/* Free the span this import is using */
+	RA_Free(psContext->psSpanRA, uiBase);
+
+	/* Free the syncpim block */
+	FreeSyncPrimitiveBlock(psSyncBlock);
+
+	/*	Drop our reference to the syncprim context */
+	OSLockAcquire(psContext->hLock);
+	psContext->ui32RefCount--;
+	OSLockRelease(psContext->hLock);
+}
+
+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
+{
+	IMG_UINT64 ui64Temp;
+	
+	PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+
+	/* FIXME: Subtracting a 64-bit address from another and then implicit
+	 * cast to 32-bit number. Need to review all call sequences that use this
+	 * function, added explicit casting for now.
+	 */
+	ui64Temp =  psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+	PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+	return (IMG_UINT32)ui64Temp;
+}
+
+static IMG_VOID SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+
+	psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
+									  (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
+}
+
+static IMG_VOID SyncPrimLocalFree(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+
+	PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+	PVR_ASSERT(0 == OSAtomicRead(&psSyncInt->u.sLocal.hRefCount));
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	{
+		PVRSRV_ERROR eError;
+		/* remove this sync record */
+		eError = BridgeSyncRecordRemoveByHandle(
+						psSyncInt->u.sLocal.psSyncBlock->psContext->hBridge,
+						psSyncInt->u.sLocal.hRecord);
+		PVR_ASSERT(PVRSRV_OK == eError);
+	}
+#endif
+	/* reset the sync prim value as it is freed.
+	 * this guarantees the client sync allocated to the client will
+	 * have a value of zero and the client does not need to
+	 * explicitly initialise the sync value to zero.
+	 * the allocation of the backing memory for the sync prim block
+	 * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+	 */
+	 _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+}
+
+static IMG_VOID SyncPrimServerFree(SYNC_PRIM *psSyncInt)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeServerSyncFree(psSyncInt->u.sServer.hBridge,
+								  psSyncInt->u.sServer.hServerSync);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimServerFree failed"));
+	}
+}
+
+static IMG_VOID SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
+{
+	PVR_ASSERT(SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType);
+
+	if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
+	}
+	else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+	{
+		SyncPrimLocalFree(psSyncInt);
+	}
+}
+
+static IMG_VOID SyncPrimLocalRef(SYNC_PRIM *psSyncInt)
+{
+	PVR_ASSERT(SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType);
+
+	if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalRef sync use after free"));
+	}
+	else
+	{
+		OSAtomicIncrement(&psSyncInt->u.sLocal.hRefCount);
+	}
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt)
+{
+	SYNC_PRIM_BLOCK *psSyncBlock;
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);	
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrServer(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sServer.ui32FirmwareAddr;
+}
+
+#if !defined(__KERNEL__)
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleLocal(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sLocal.psSyncBlock->psContext->hBridge;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleServer(SYNC_PRIM *psSyncInt)
+{
+	return psSyncInt->u.sServer.hBridge;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		return _SyncPrimGetBridgeHandleLocal(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		return _SyncPrimGetBridgeHandleServer(psSyncInt);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "_SyncPrimGetBridgeHandle: Invalid sync type"));
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		*/
+		PVR_ASSERT(IMG_FALSE);
+		return 0;
+	}
+}
+#endif
+
+/*
+	Internal interfaces for management of syncprim block lists
+*/
+static SYNC_BLOCK_LIST *_SyncPrimBlockListCreate(IMG_VOID)
+{
+	SYNC_BLOCK_LIST *psBlockList;
+
+	psBlockList = OSAllocMem(sizeof(SYNC_BLOCK_LIST));
+	if (!psBlockList)
+	{
+		return IMG_NULL;
+	}
+
+	psBlockList->ui32BlockCount = 0;
+	psBlockList->ui32BlockListSize = SYNC_BLOCK_LIST_CHUNCK_SIZE;
+
+	psBlockList->papsSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *)
+													* SYNC_BLOCK_LIST_CHUNCK_SIZE);
+	if (!psBlockList->papsSyncPrimBlock)
+	{
+		OSFreeMem(psBlockList);
+		return IMG_NULL;
+	}
+
+	OSCachedMemSet(psBlockList->papsSyncPrimBlock,
+			 0,
+			 sizeof(SYNC_PRIM_BLOCK *) * psBlockList->ui32BlockListSize);
+
+	return psBlockList;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListAdd(SYNC_BLOCK_LIST *psBlockList,
+											SYNC_PRIM_BLOCK *psSyncPrimBlock)
+{
+	IMG_UINT32 i;
+
+	/* Check the context isn't already on the list */
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+		{
+			return PVRSRV_OK;
+		}
+	}
+
+	/* Check we have space for a new item */
+	if (psBlockList->ui32BlockCount == psBlockList->ui32BlockListSize)
+	{
+		SYNC_PRIM_BLOCK	**papsNewSyncPrimBlock;
+
+		papsNewSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *) *
+											(psBlockList->ui32BlockListSize +
+											SYNC_BLOCK_LIST_CHUNCK_SIZE));
+		if (!papsNewSyncPrimBlock)
+		{
+			return PVRSRV_ERROR_OUT_OF_MEMORY;
+		}
+
+		OSCachedMemCopy(papsNewSyncPrimBlock,
+				  psBlockList->papsSyncPrimBlock,
+				  sizeof(SYNC_PRIM_CONTEXT *) *
+				  psBlockList->ui32BlockListSize);
+
+		OSFreeMem(psBlockList->papsSyncPrimBlock);
+
+		psBlockList->papsSyncPrimBlock = papsNewSyncPrimBlock;
+		psBlockList->ui32BlockListSize += SYNC_BLOCK_LIST_CHUNCK_SIZE;
+	}
+
+	/* Add the context to the list */
+	psBlockList->papsSyncPrimBlock[psBlockList->ui32BlockCount++] = psSyncPrimBlock;
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListBlockToIndex(SYNC_BLOCK_LIST *psBlockList,
+												   SYNC_PRIM_BLOCK *psSyncPrimBlock,
+												   IMG_UINT32 *pui32Index)
+{
+	IMG_UINT32 i;
+
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+		{
+			*pui32Index = i;
+			return PVRSRV_OK;
+		}
+	}
+
+	return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListHandleArrayCreate(SYNC_BLOCK_LIST *psBlockList,
+														IMG_UINT32 *pui32BlockHandleCount,
+														IMG_HANDLE **ppahHandleList)
+{
+	IMG_HANDLE *pahHandleList;
+	IMG_UINT32 i;
+
+	pahHandleList = OSAllocMem(sizeof(IMG_HANDLE) *
+							   psBlockList->ui32BlockCount);
+	if (!pahHandleList)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i=0;i<psBlockList->ui32BlockCount;i++)
+	{
+		pahHandleList[i] = psBlockList->papsSyncPrimBlock[i]->hServerSyncPrimBlock;
+	}
+
+	*ppahHandleList = pahHandleList;
+	*pui32BlockHandleCount = psBlockList->ui32BlockCount;
+
+	return PVRSRV_OK;
+}
+
+static IMG_VOID _SyncPrimBlockListHandleArrayDestroy(IMG_HANDLE *pahHandleList)
+{
+	OSFreeMem(pahHandleList);
+}
+
+static IMG_UINT32 _SyncPrimBlockListGetClientValue(SYNC_BLOCK_LIST *psBlockList,
+												   IMG_UINT32 ui32BlockIndex,
+												   IMG_UINT32 ui32Index)
+{
+	return psBlockList->papsSyncPrimBlock[ui32BlockIndex]->pui32LinAddr[ui32Index];
+}
+
+static IMG_VOID _SyncPrimBlockListDestroy(SYNC_BLOCK_LIST *psBlockList)
+{
+	OSFreeMem(psBlockList->papsSyncPrimBlock);
+	OSFreeMem(psBlockList);
+}
+
+
+/* TODO: implement _Log2 using __builtin_clz gcc's builtin */
+/* TODO: factorise the log2 functions (there is a similar one in services/shared/common/ra.c) */
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+	IMG_UINT32 ui32Log2Align = 0;
+	PVR_ASSERT(ui32Align != 0); /* Log2 isn't defined on 0 (triggers an assert instead of an infinite loop) */
+
+	while (!(ui32Align & 1))
+	{
+		ui32Log2Align++;
+		ui32Align = ui32Align >> 1;
+	}
+	PVR_ASSERT(ui32Align == 1);
+
+	return ui32Log2Align;
+}
+
+/*
+	External interfaces
+*/
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimContextCreate(SYNC_BRIDGE_HANDLE hBridge,
+					  IMG_HANDLE hDeviceNode,
+					  PSYNC_PRIM_CONTEXT *phSyncPrimContext)
+{
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT));
+	if (psContext == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	psContext->hBridge = hBridge;
+	psContext->hDeviceNode = hDeviceNode;
+
+	eError = OSLockCreate(&psContext->hLock, LOCK_TYPE_PASSIVE);
+	if ( eError != PVRSRV_OK)
+	{
+		goto fail_lockcreate;
+	}
+	
+	OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext);
+	OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+	/*
+		Create the RA for sub-allocations of the SynPrim's
+
+		Note:
+		The import size doesn't matter here as the server will pass
+		back the blocksize when does the import which overrides
+		what we specify here.
+	*/
+
+	psContext->psSubAllocRA = RA_Create(psContext->azName,
+										/* Params for imports */
+										_Log2(sizeof(IMG_UINT32)),
+										RA_LOCKCLASS_2,
+										SyncPrimBlockImport,
+										SyncPrimBlockUnimport,
+										psContext);
+	if (psContext->psSubAllocRA == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_suballoc;
+	}
+
+	/*
+		Create the span-management RA
+
+		The RA requires that we work with linear spans. For our use
+		here we don't require this behaviour as we're always working
+		within offsets of blocks (imports). However, we need to keep
+		the RA happy so we create the "span" management RA which
+		ensures that all are imports are added to the RA in a linear
+		fashion
+	*/
+	psContext->psSpanRA = RA_Create(psContext->azSpanName,
+									/* Params for imports */
+									0,
+									RA_LOCKCLASS_1,
+									IMG_NULL,
+									IMG_NULL,
+									IMG_NULL);
+	if (psContext->psSpanRA == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_span;
+	}
+
+	if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, IMG_NULL))
+	{
+		RA_Delete(psContext->psSpanRA);
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_span;
+	}
+
+	psContext->ui32RefCount = 1;
+
+	*phSyncPrimContext = psContext;
+	return PVRSRV_OK;
+fail_span:
+	RA_Delete(psContext->psSubAllocRA);
+fail_suballoc:
+	OSLockDestroy(psContext->hLock);
+fail_lockcreate:
+	OSFreeMem(psContext);
+fail_alloc:
+	return eError;
+}
+
+IMG_INTERNAL IMG_VOID SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext)
+{
+	SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+	IMG_BOOL bDoRefCheck = IMG_TRUE;
+
+/* FIXME */
+#if defined(__KERNEL__)
+	PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+	if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+	{
+		bDoRefCheck =  IMG_FALSE;
+	}
+#endif
+	OSLockAcquire(psContext->hLock);
+	if (--psContext->ui32RefCount != 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimContextDestroy: Refcount non-zero: %d", psContext->ui32RefCount));
+
+		if (bDoRefCheck)
+		{
+			PVR_ASSERT(0);
+		}
+		return;
+	}
+	/*
+		If we fail above then we won't release the lock. However, if that
+		happens things have already gone very wrong and we bail to save
+		freeing memory which might still be in use and holding this lock
+		will show up if anyone is trying to use this context after it has
+		been destroyed.
+	*/
+	OSLockRelease(psContext->hLock);
+
+	RA_Delete(psContext->psSpanRA);
+	RA_Delete(psContext->psSubAllocRA);
+	OSLockDestroy(psContext->hLock);
+	OSFreeMem(psContext);
+}
+
+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+										PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+										const IMG_CHAR *pszClassName,
+										IMG_BOOL bServerSync)
+{
+	SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM *psNewSync;
+	PVRSRV_ERROR eError;
+	RA_BASE_T uiSpanAddr;
+
+	psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+	if (psNewSync == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_alloc;
+	}
+
+	if (!RA_Alloc(psContext->psSubAllocRA,
+				  sizeof(IMG_UINT32),
+				  0,
+				  sizeof(IMG_UINT32),
+				  &uiSpanAddr,
+				  IMG_NULL,
+				  (RA_PERISPAN_HANDLE *) &psSyncBlock))
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto fail_raalloc;
+	}
+	psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
+	OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
+	psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
+	psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+	SyncPrimGetCPULinAddr(psNewSync);
+	*ppsSync = &psNewSync->sCommon;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	{
+		IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+		if(pszClassName)
+		{
+			/* Copy the class name annotation into a fixed-size array */
+			OSStringNCopy(szClassName, pszClassName, SYNC_MAX_CLASS_NAME_LEN - 1);
+			szClassName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+		}
+		else
+		{
+			/* No class name annotation */
+			szClassName[0] = 0;
+		}
+		/* record this sync */
+		eError = BridgeSyncRecordAdd(
+					psSyncBlock->psContext->hBridge,
+					&psNewSync->u.sLocal.hRecord,
+					psSyncBlock->hServerSyncPrimBlock,
+					psSyncBlock->ui32FirmwareAddr,
+					SyncPrimGetOffset(psNewSync),
+					bServerSync,
+					OSStringNLength(szClassName, SYNC_MAX_CLASS_NAME_LEN),
+					szClassName);
+	}
+#else
+	PVR_UNREFERENCED_PARAMETER(pszClassName);
+	PVR_UNREFERENCED_PARAMETER(bServerSync);
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+	return PVRSRV_OK;
+
+fail_raalloc:
+	OSFreeMem(psNewSync);
+fail_alloc:
+	PVR_ASSERT(eError != PVRSRV_OK);
+
+	return eError;
+}
+
+#if defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+										PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+										const IMG_CHAR *pszClassName)
+{
+	return _SyncPrimAlloc(hSyncPrimContext,
+					  ppsSync,
+					  pszClassName,
+					  IMG_TRUE);
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+										PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+										const IMG_CHAR *pszClassName)
+{
+	return _SyncPrimAlloc(hSyncPrimContext,
+					  ppsSync,
+					  pszClassName,
+					  IMG_FALSE);
+}
+
+static IMG_VOID
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value)
+{
+	PVRSRV_ERROR eError;
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		SYNC_PRIM_BLOCK *psSyncBlock;
+		SYNC_PRIM_CONTEXT *psContext;
+
+		psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+		psContext = psSyncBlock->psContext;
+
+		eError = BridgeSyncPrimSet(psContext->hBridge,
+									psSyncBlock->hServerSyncPrimBlock,
+									SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+									ui32Value);
+		PVR_ASSERT(eError == PVRSRV_OK);
+	}
+	else
+	{
+		eError = BridgeServerSyncPrimSet(psSyncInt->u.sServer.hBridge,
+									psSyncInt->u.sServer.hServerSync,
+									ui32Value);
+		PVR_ASSERT(eError == PVRSRV_OK);
+
+	}
+}
+
+IMG_INTERNAL IMG_VOID SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		SyncPrimLocalUnref(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		SyncPrimServerFree(psSyncInt);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimFree: Invalid sync type"));
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		*/
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	OSFreeMem(psSyncInt);
+}
+
+#if defined(NO_HARDWARE)
+IMG_INTERNAL IMG_VOID
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	SYNC_PRIM *psSyncInt;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	/* There is no check for the psSyncInt to be LOCAL as this call
+	   substitutes the Firmware updating a sync and that sync could
+	   be a server one */
+
+	_SyncPrimSetValue(psSyncInt, ui32Value);
+}
+#endif
+
+IMG_INTERNAL IMG_VOID
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	SYNC_PRIM *psSyncInt;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimSet: Invalid sync type"));
+		/*PVR_ASSERT(IMG_FALSE);*/
+		return;
+	}
+
+	_SyncPrimSetValue(psSyncInt, ui32Value);
+
+#if defined(PDUMP)
+	SyncPrimPDump(psSync);
+#endif
+
+}
+
+IMG_INTERNAL IMG_UINT32 SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+	{
+		return SyncPrimGetFirmwareAddrLocal(psSyncInt);
+	}
+	else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		return SyncPrimGetFirmwareAddrServer(psSyncInt);
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimGetFirmwareAddr: Invalid sync type"));
+		/*
+			Either the client has given us a bad pointer or there is an
+			error in this module
+		*/
+		PVR_ASSERT(IMG_FALSE);
+		return 0;
+	}
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo)
+{
+#if defined(PVRSRV_NEED_PVR_DPF)
+	SYNC_PRIM *psSyncInt;
+	PVRSRV_CLIENT_SYNC_PRIM **papsServerSync;
+	IMG_UINT32 ui32ServerSyncs = 0;
+	IMG_UINT32 *pui32UID = IMG_NULL;
+	IMG_UINT32 *pui32FWAddr = IMG_NULL;
+	IMG_UINT32 *pui32CurrentOp = IMG_NULL;
+	IMG_UINT32 *pui32NextOp = IMG_NULL;
+	IMG_UINT32 i;
+	PVRSRV_ERROR eError = PVRSRV_OK;
+
+	papsServerSync = OSAllocMem(ui32SyncCount * sizeof(PVRSRV_CLIENT_SYNC_PRIM *));
+	if (!papsServerSync)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (i = 0; i < ui32SyncCount; i++)
+	{
+		psSyncInt = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+		if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: sync=local  fw=0x%x curr=0x%04x",
+					 pcszExtraInfo,
+					 SyncPrimGetFirmwareAddrLocal(psSyncInt),
+					 *psSyncInt->sCommon.pui32LinAddr));
+		}
+		else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+		{
+			papsServerSync[ui32ServerSyncs++] = papsSync[i];
+		}
+		else
+		{
+			PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Invalid sync type"));
+			/*
+			   Either the client has given us a bad pointer or there is an
+			   error in this module
+			   */
+			PVR_ASSERT(IMG_FALSE);
+			eError = PVRSRV_ERROR_INVALID_PARAMS;
+			goto err_free;
+		}
+	}
+
+	if (ui32ServerSyncs > 0)
+	{
+		pui32UID = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32UID)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		pui32FWAddr = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32FWAddr)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		pui32CurrentOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32CurrentOp)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		pui32NextOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+		if (!pui32NextOp)
+		{
+			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+			goto err_free;
+		}
+		eError = SyncPrimServerGetStatus(ui32ServerSyncs, papsServerSync,
+										 pui32UID,
+										 pui32FWAddr,
+										 pui32CurrentOp,
+										 pui32NextOp);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Error querying server sync status (%d)",
+					 eError));
+			goto err_free;
+		}
+		for (i = 0; i < ui32ServerSyncs; i++)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "%s: sync=server fw=0x%x curr=0x%04x next=0x%04x id=%u%s",
+					 pcszExtraInfo,
+					 pui32FWAddr[i],
+					 pui32CurrentOp[i],
+					 pui32NextOp[i],
+					 pui32UID[i],
+					 (pui32NextOp[i] - pui32CurrentOp[i] == 1) ? " *" : 
+					 (pui32NextOp[i] - pui32CurrentOp[i] >  1) ? " **" : 
+					 ""));
+		}
+	}
+
+err_free:
+	OSFreeMem(papsServerSync);
+	if (pui32UID)
+	{
+		OSFreeMem(pui32UID);
+	}
+	if (pui32FWAddr)
+	{
+		OSFreeMem(pui32FWAddr);
+	}
+	if (pui32CurrentOp)
+	{
+		OSFreeMem(pui32CurrentOp);
+	}
+	if (pui32NextOp)
+	{
+		OSFreeMem(pui32NextOp);
+	}
+	return eError;
+#else
+	PVR_UNREFERENCED_PARAMETER(ui32SyncCount);
+	PVR_UNREFERENCED_PARAMETER(papsSync);
+	PVR_UNREFERENCED_PARAMETER(pcszExtraInfo);
+	return PVRSRV_OK;
+#endif
+}
+#endif
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+							  PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+							  PSYNC_OP_COOKIE *ppsCookie)
+{
+	SYNC_OP_COOKIE *psNewCookie;
+	SYNC_BLOCK_LIST *psSyncBlockList;
+	IMG_UINT32 ui32ServerSyncCount = 0;
+	IMG_UINT32 ui32ClientSyncCount = 0;
+	IMG_UINT32 ui32ServerAllocSize;
+	IMG_UINT32 ui32ClientAllocSize;
+	IMG_UINT32 ui32TotalAllocSize;
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	IMG_UINT32 i;
+	IMG_UINT32 ui32SyncBlockCount;
+	IMG_HANDLE hBridge;
+	IMG_HANDLE *pahHandleList;
+	IMG_CHAR *pcPtr;
+	PVRSRV_ERROR eError;
+
+	psSyncBlockList = _SyncPrimBlockListCreate();
+	
+	if (!psSyncBlockList)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		if (SyncPrimIsServerSync(papsSyncPrim[i]))
+		{
+			ui32ServerSyncCount++;
+		}
+		else
+		{
+			SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+			ui32ClientSyncCount++;
+			eError = _SyncPrimBlockListAdd(psSyncBlockList, psSync->u.sLocal.psSyncBlock);
+			if (eError != PVRSRV_OK)
+			{
+				goto e1;
+			}
+		}
+	}
+
+	ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(IMG_HANDLE) + sizeof(IMG_UINT32));
+	ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+	ui32TotalAllocSize = sizeof(SYNC_OP_COOKIE) +
+							 (sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount) +
+							 ui32ServerAllocSize + 
+							 ui32ClientAllocSize;
+
+	psNewCookie = OSAllocMem(ui32TotalAllocSize);
+	pcPtr = (IMG_CHAR *) psNewCookie;
+
+	if (!psNewCookie)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e1;
+	}
+
+	/* Setup the pointers */
+	pcPtr += sizeof(SYNC_OP_COOKIE);
+	psNewCookie->papsSyncPrim = (PVRSRV_CLIENT_SYNC_PRIM **) pcPtr;
+
+	pcPtr += sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount;
+	psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+	
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+	pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+	psNewCookie->pahServerSync =(IMG_HANDLE *) pcPtr;
+	pcPtr += sizeof(IMG_HANDLE) * ui32ServerSyncCount;
+
+	psNewCookie->paui32ServerFlags =(IMG_UINT32 *) pcPtr;
+	pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+	/* Check the pointer setup went ok */
+	PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize));
+
+	psNewCookie->ui32SyncCount = ui32SyncCount;
+	psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+	psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+	psNewCookie->psSyncBlockList = psSyncBlockList;
+
+	/*
+		Get the bridge handle from the 1st sync.
+
+		Note: We assume the all syncs have been created with the same
+			  services connection.
+	*/
+	if (SyncPrimIsServerSync(papsSyncPrim[0]))
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+		hBridge = psSync->u.sServer.hBridge;
+	}
+	else
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+		hBridge = psSync->u.sLocal.psSyncBlock->psContext->hBridge;		
+	}
+
+	psNewCookie->hBridge = hBridge;
+
+	if (ui32ServerSyncCount)
+	{
+		psNewCookie->bHaveServerSync = IMG_TRUE;
+	}
+	else
+	{
+		psNewCookie->bHaveServerSync = IMG_FALSE;
+	}
+
+	/* Fill in the server and client sync data */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+		if (SyncPrimIsServerSync(papsSyncPrim[i]))
+		{
+			psNewCookie->pahServerSync[ui32ServerIndex] = psSync->u.sServer.hServerSync;
+
+			ui32ServerIndex++;
+		}
+		else
+		{
+			/* Location of sync */
+			eError = _SyncPrimBlockListBlockToIndex(psSyncBlockList,
+													psSync->u.sLocal.psSyncBlock,
+													&psNewCookie->paui32SyncBlockIndex[ui32ClientIndex]);
+			if (eError != PVRSRV_OK)
+			{
+				goto e2;
+			}
+
+			/* Workout the index to sync */
+			psNewCookie->paui32Index[ui32ClientIndex] =
+					SyncPrimGetOffset(psSync)/sizeof(IMG_UINT32);
+
+			ui32ClientIndex++;
+		}
+
+		psNewCookie->papsSyncPrim[i] = papsSyncPrim[i];
+	}
+
+	eError = _SyncPrimBlockListHandleArrayCreate(psSyncBlockList,
+												 &ui32SyncBlockCount,
+												 &pahHandleList);
+	if (eError !=PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/*
+		Create the server side cookie. Here we pass in all the unchanging
+		data so we only need to pass in the minimum at takeop time
+	*/
+	eError = BridgeSyncPrimOpCreate(hBridge,
+									ui32SyncBlockCount,
+									pahHandleList,
+									psNewCookie->ui32ClientSyncCount,
+									psNewCookie->paui32SyncBlockIndex,
+									psNewCookie->paui32Index,
+									psNewCookie->ui32ServerSyncCount,
+									psNewCookie->pahServerSync,
+									&psNewCookie->hServerCookie);
+
+	/* Free the handle list regardless of error */
+	_SyncPrimBlockListHandleArrayDestroy(pahHandleList);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e2;
+	}
+
+	/* Increase the reference count on all referenced local sync prims
+	 * so that they cannot be freed until this Op is finished with
+	 */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSyncInt;
+		psSyncInt = IMG_CONTAINER_OF(papsSyncPrim[i], SYNC_PRIM, sCommon);
+		if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+		{
+			SyncPrimLocalRef(psSyncInt);
+		}
+	}
+
+	*ppsCookie = psNewCookie;
+	return PVRSRV_OK;
+
+e2:
+	OSFreeMem(psNewCookie);
+e1:
+	_SyncPrimBlockListDestroy(psSyncBlockList);
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+							IMG_UINT32 ui32SyncCount,
+							PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	IMG_UINT32 i;
+
+	/* Copy client sync operations */
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		/*
+			Sanity check the client passes in the same syncs as the
+			ones we got at create time
+		*/
+		if (psCookie->papsSyncPrim[i] != pasSyncOp[i].psSync)
+		{
+			return PVRSRV_ERROR_INVALID_PARAMS;
+		}
+
+		if (SyncPrimIsServerSync(pasSyncOp[i].psSync))
+		{
+			psCookie->paui32ServerFlags[ui32ServerIndex] =
+					pasSyncOp[i].ui32Flags;
+
+			ui32ServerIndex++;
+		}
+		else
+		{
+			/* Client operation information */
+			psCookie->paui32Flags[ui32ClientIndex] =
+					pasSyncOp[i].ui32Flags;
+			psCookie->paui32FenceValue[ui32ClientIndex] =
+					pasSyncOp[i].ui32FenceValue;
+			psCookie->paui32UpdateValue[ui32ClientIndex] =
+					pasSyncOp[i].ui32UpdateValue;
+
+			ui32ClientIndex++;
+		}
+	}
+
+	eError = BridgeSyncPrimOpTake(psCookie->hBridge,
+								  psCookie->hServerCookie,
+								  psCookie->ui32ClientSyncCount,
+								  psCookie->paui32Flags,
+								  psCookie->paui32FenceValue,
+								  psCookie->paui32UpdateValue,
+								  psCookie->ui32ServerSyncCount,
+								  psCookie->paui32ServerFlags);
+
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+							 IMG_BOOL *pbReady)
+{
+	PVRSRV_ERROR eError;
+	PVR_ASSERT(psCookie != IMG_NULL);
+
+	/*
+		If we have a server sync we have no choice
+		but to do the check in the server
+	*/
+	if (psCookie->bHaveServerSync)
+	{
+		eError = BridgeSyncPrimOpReady(psCookie->hBridge,
+									   psCookie->hServerCookie,
+									   pbReady);
+		if (eError != PVRSRV_OK)
+		{
+			PVR_DPF((PVR_DBG_ERROR,
+					 "%s: Failed to do sync check in server (Error = %d)",
+					 __FUNCTION__, eError));
+			goto e0;
+		}
+	}
+	else
+	{
+		IMG_UINT32 i;
+		IMG_UINT32 ui32SnapShot;
+		IMG_BOOL bReady = IMG_TRUE;
+
+		for (i=0;i<psCookie->ui32ClientSyncCount;i++)
+		{
+			if ((psCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+			{
+				continue;
+			}
+
+			ui32SnapShot = _SyncPrimBlockListGetClientValue(psCookie->psSyncBlockList,
+															psCookie->paui32SyncBlockIndex[i],
+															psCookie->paui32Index[i]);
+			if (ui32SnapShot != psCookie->paui32FenceValue[i])
+			{
+				bReady = IMG_FALSE;
+				break;
+			}
+		}
+
+		*pbReady = bReady;
+	}
+
+	return PVRSRV_OK;
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie)
+{
+	PVRSRV_ERROR eError;
+
+	eError = BridgeSyncPrimOpComplete(psCookie->hBridge,
+									  psCookie->hServerCookie);
+
+	return eError;
+}
+
+IMG_INTERNAL
+IMG_VOID SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+
+	/* Decrease the reference count on all referenced local sync prims
+	 * so that they can be freed now this Op is finished with
+	 */
+	for (i=0;i<psCookie->ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psSyncInt;
+		psSyncInt = IMG_CONTAINER_OF(psCookie->papsSyncPrim[i], SYNC_PRIM, sCommon);
+		if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+		{
+			SyncPrimLocalUnref(psSyncInt);
+		}
+	}
+
+	eError = BridgeSyncPrimOpDestroy(psCookie->hBridge,
+									 psCookie->hServerCookie);
+	PVR_ASSERT(eError == PVRSRV_OK);
+
+	_SyncPrimBlockListDestroy(psCookie->psSyncBlockList);
+	OSFreeMem(psCookie);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+							   IMG_UINT32 *pui32SyncCount,
+							   PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp)
+{
+	IMG_UINT32 ui32ServerIndex = 0;
+	IMG_UINT32 ui32ClientIndex = 0;
+	PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOps;
+	IMG_UINT32 i;
+
+	psSyncOps = OSAllocMem(sizeof(PVRSRV_CLIENT_SYNC_PRIM_OP) * 
+						   psCookie->ui32SyncCount);
+	if (!psSyncOps)
+	{
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+	
+	for (i=0; i<psCookie->ui32SyncCount; i++)
+	{
+		psSyncOps[i].psSync = psCookie->papsSyncPrim[i];
+		if (SyncPrimIsServerSync(psCookie->papsSyncPrim[i]))
+		{
+			psSyncOps[i].ui32FenceValue = 0;
+			psSyncOps[i].ui32UpdateValue = 0;
+			psSyncOps[i].ui32Flags = psCookie->paui32ServerFlags[ui32ServerIndex];
+			ui32ServerIndex++;
+		}
+		else
+		{
+			psSyncOps[i].ui32FenceValue = psCookie->paui32FenceValue[ui32ClientIndex]; 
+			psSyncOps[i].ui32UpdateValue = psCookie->paui32UpdateValue[ui32ClientIndex]; 
+			psSyncOps[i].ui32Flags = psCookie->paui32Flags[ui32ClientIndex];
+			ui32ClientIndex++;
+		}
+	}
+
+	*ppsSyncOp = psSyncOps;
+	*pui32SyncCount = psCookie->ui32SyncCount;
+
+	return PVRSRV_OK;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE hBridge,
+								 IMG_HANDLE hDeviceNode,
+								 PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+								 const IMG_CHAR *pszClassName
+								 PVR_DBG_FILELINE_PARAM)
+{
+	IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+	SYNC_PRIM *psNewSync;
+	PVRSRV_ERROR eError;
+
+#if !defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+	PVR_DBG_FILELINE_UNREF();
+#endif
+	psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+	if (psNewSync == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+	OSCachedMemSet(psNewSync, 0, sizeof(SYNC_PRIM));
+
+	if(pszClassName)
+	{
+		/* Copy the class name annotation into a fixed-size array */
+		OSStringNCopy(szClassName, pszClassName, SYNC_MAX_CLASS_NAME_LEN - 1);
+		szClassName[SYNC_MAX_CLASS_NAME_LEN - 1] = 0;
+	}
+	else
+	{
+		/* No class name annotation */
+		szClassName[0] = 0;
+	}
+
+	eError = BridgeServerSyncAlloc(hBridge,
+								   hDeviceNode,
+								   &psNewSync->u.sServer.hServerSync,
+								   &psNewSync->u.sServer.ui32FirmwareAddr,
+								   OSStringNLength(szClassName, SYNC_MAX_CLASS_NAME_LEN),
+								   szClassName);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e1;
+	}
+
+#if defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+	PVR_DPF((PVR_DBG_WARNING, "Allocated sync=server fw=0x%x [%p]" PVR_DBG_FILELINE_FMT,
+			 psNewSync->u.sServer.ui32FirmwareAddr, &psNewSync->sCommon PVR_DBG_FILELINE_ARG));
+#endif
+
+	psNewSync->eType = SYNC_PRIM_TYPE_SERVER;
+	psNewSync->u.sServer.hBridge = hBridge;
+	*ppsSync = &psNewSync->sCommon;
+
+	return PVRSRV_OK;
+e1:
+	OSFreeMem(psNewSync);
+e0:
+	return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+									 PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+									 IMG_UINT32 *pui32UID,
+									 IMG_UINT32 *pui32FWAddr,
+									 IMG_UINT32 *pui32CurrentOp,
+									 IMG_UINT32 *pui32NextOp)
+{
+	PVRSRV_ERROR eError;
+	IMG_UINT32 i;
+	SYNC_BRIDGE_HANDLE hBridge = _SyncPrimGetBridgeHandle(papsSync[0]);
+	IMG_HANDLE *pahServerHandle;
+
+	pahServerHandle = OSAllocMem(sizeof(IMG_HANDLE) * ui32SyncCount);
+	if (pahServerHandle == IMG_NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		goto e0;
+	}
+
+	/*
+		Check that all the sync we've been passed are server syncs
+		and that they all are on the same connection.
+	*/
+	for (i=0;i<ui32SyncCount;i++)
+	{
+		SYNC_PRIM *psIntSync = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+
+		if (!SyncPrimIsServerSync(papsSync[i]))
+		{
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto e1;
+		}
+
+		if (hBridge != _SyncPrimGetBridgeHandle(papsSync[i]))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "SyncServerGetStatus: Sync connection is different\n"));
+			eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+			goto e1;
+		}
+
+		pahServerHandle[i] = psIntSync->u.sServer.hServerSync;
+	}
+
+	eError = BridgeServerSyncGetStatus(hBridge,
+									   ui32SyncCount,
+									   pahServerHandle,
+									   pui32UID,
+									   pui32FWAddr,
+									   pui32CurrentOp,
+									   pui32NextOp);
+	OSFreeMem(pahServerHandle);
+
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+	return PVRSRV_OK;
+
+e1:
+	OSFreeMem(pahServerHandle);
+e0:
+	PVR_ASSERT(eError != PVRSRV_OK);
+	return eError;
+}
+
+#endif
+
+IMG_INTERNAL
+IMG_BOOL SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+	{
+		return IMG_TRUE;
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_INTERNAL
+IMG_HANDLE SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+	PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_SERVER);
+
+	return psSyncInt->u.sServer.hServerSync;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp)
+{
+	SYNC_PRIM *psSyncInt;
+	IMG_BOOL bUpdate;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSyncOp != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSyncOp->psSync, SYNC_PRIM, sCommon);
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_SERVER)
+	{
+		return PVRSRV_ERROR_INVALID_SYNC_PRIM;
+	}
+
+	PVR_ASSERT(psSyncOp->ui32Flags != 0);
+	if (psSyncOp->ui32Flags & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+	{
+		bUpdate = IMG_TRUE;
+	}else
+	{
+		bUpdate = IMG_FALSE;
+	}
+
+	eError = BridgeServerSyncQueueHWOp(psSyncInt->u.sServer.hBridge,
+									      psSyncInt->u.sServer.hServerSync,
+										  bUpdate,
+									      &psSyncOp->ui32FenceValue,
+									      &psSyncOp->ui32UpdateValue);
+	return eError;
+}
+
+#if defined(PDUMP)
+IMG_INTERNAL IMG_VOID SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDump(psContext->hBridge,
+								 psSyncBlock->hServerSyncPrimBlock,
+								 SyncPrimGetOffset(psSyncInt));
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDumpValue(psContext->hBridge,
+								 psSyncBlock->hServerSyncPrimBlock,
+								 SyncPrimGetOffset(psSyncInt),
+								 ui32Value);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+									   IMG_UINT32 ui32Value,
+									   IMG_UINT32 ui32Mask,
+									   PDUMP_POLL_OPERATOR eOperator,
+									   IMG_UINT32 ui32PDumpFlags)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpPol: Invalid sync type (expected SYNC_PRIM_TYPE_LOCAL)"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	eError = BridgeSyncPrimPDumpPol(psContext->hBridge,
+									psSyncBlock->hServerSyncPrimBlock,
+									SyncPrimGetOffset(psSyncInt),
+									ui32Value,
+									ui32Mask,
+									eOperator,
+									ui32PDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+									   PDUMP_POLL_OPERATOR eOperator,
+									   IMG_UINT32 ui32PDumpFlags)
+{
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psCookie != IMG_NULL);
+
+	eError = BridgeSyncPrimOpPDumpPol(psCookie->hBridge,
+									psCookie->hServerCookie,
+									eOperator,
+									ui32PDumpFlags);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+	
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL IMG_VOID SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+									   IMG_UINT64 uiWriteOffset,
+									   IMG_UINT64 uiPacketSize,
+									   IMG_UINT64 uiBufferSize)
+{
+	SYNC_PRIM *psSyncInt;
+	SYNC_PRIM_BLOCK *psSyncBlock;
+	SYNC_PRIM_CONTEXT *psContext;
+	PVRSRV_ERROR eError;
+
+	PVR_ASSERT(psSync != IMG_NULL);
+	psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+	if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpCBP: Invalid sync type"));
+		PVR_ASSERT(IMG_FALSE);
+		return;
+	}
+
+	psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+	psContext = psSyncBlock->psContext;
+
+	/* FIXME: uiWriteOffset, uiPacketSize, uiBufferSize were changed to
+	 * 64-bit quantities to resolve Windows compiler warnings.
+	 * However the bridge is only 32-bit hence compiler warnings
+	 * of implicit cast and loss of data.
+	 * Added explicit cast and assert to remove warning.
+	 */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+	PVR_ASSERT(uiWriteOffset<IMG_UINT32_MAX);
+	PVR_ASSERT(uiPacketSize<IMG_UINT32_MAX);
+	PVR_ASSERT(uiBufferSize<IMG_UINT32_MAX);
+#endif
+	eError = BridgeSyncPrimPDumpCBP(psContext->hBridge,
+									psSyncBlock->hServerSyncPrimBlock,
+									SyncPrimGetOffset(psSyncInt),
+									(IMG_UINT32)uiWriteOffset,
+									(IMG_UINT32)uiPacketSize,
+									(IMG_UINT32)uiBufferSize);
+
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,
+				"%s: failed with error %d",
+				__FUNCTION__, eError));
+	}
+    PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#endif
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/tlclient.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/tlclient.c
new file mode 100644
index 0000000..f21a6a3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/tlclient.c
@@ -0,0 +1,365 @@
+/*************************************************************************/ /*!
+@File			tlclient.c
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* DESIGN NOTE
+ * This transport layer consumer-role API was created as a shared API when a
+ * client wanted to read the data of a TL stream from within the KM server
+ * driver. This was in addition to the existing clients supported externally
+ * by the UM client library component via PVR API layer.
+ * This shared API is thus used by the PVR TL API in the client library and
+ * by clients internal to the server driver module. It depends on
+ * client entry points of the TL and DEVMEM bridge modules. These entry points
+ * encapsulate from the TL shared API whether a direct bridge or an indirect
+ * (ioctl) bridge is used.
+ * One reason for needing this layer centres around the fact that some of the
+ * API functions make multiple bridge calls and the logic that glues these
+ * together is common regardless of client location. Further this layer has
+ * allowed the defensive coding that checks parameters to move into the PVR
+ * API layer where untrusted clients enter giving a more efficient KM code path.
+ */
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlclient.h"
+#include "client_pvrtl_bridge.h"
+
+/* Defines/Constants
+ */
+
+#define PVR_CONNECT_NO_FLAGS 	0x00U
+#define NO_ACQUIRE 				0xffffffffU
+#define DIRECT_BRIDGE_HANDLE	((IMG_HANDLE)0xDEADBEEFU)
+
+/* User-side stream descriptor structure.
+ */
+typedef struct _TL_STREAM_DESC_
+{
+	/* Handle on kernel-side stream descriptor*/
+	IMG_HANDLE		hServerSD;
+
+	/* Stream data buffer variables */
+	DEVMEM_EXPORTCOOKIE		sExportCookie;
+	DEVMEM_MEMDESC*			psUMmemDesc;
+	IMG_PBYTE				pBaseAddr;
+
+	/* Offset in bytes into the circular buffer and valid only after
+	 * an Acquire call and undefined after a release. */
+	IMG_UINT32 	uiReadOffset;
+
+	/* Always a positive integer when the Acquire call returns and a release
+	 * is outstanding. Undefined at all other times. */
+	IMG_UINT32	uiReadLen;
+
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+
+/* Used in direct connections only */
+IMG_INTERNAL
+PVRSRV_ERROR TLClientConnect(IMG_HANDLE* phSrvHandle)
+{
+	/* Check the caller provided valid pointer*/
+	if(!phSrvHandle)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientConnect: Null connection handle"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*phSrvHandle = DIRECT_BRIDGE_HANDLE;
+
+	return PVRSRV_OK;
+
+
+}
+
+
+/* Used in direct connections only */
+IMG_INTERNAL
+PVRSRV_ERROR IMG_CALLCONV TLClientDisconnect(IMG_HANDLE hSrvHandle)
+{
+	if (hSrvHandle != (IMG_HANDLE)0xDEADBEEF)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientDisconnect: Invalid connection handle"));
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	return PVRSRV_OK;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+		IMG_PCHAR    pszName,
+		IMG_UINT32   ui32Mode,
+		IMG_HANDLE*  phSD)
+{
+	PVRSRV_ERROR 				eError = PVRSRV_OK;
+	TL_STREAM_DESC* 			psSD = 0;
+	DEVMEM_SERVER_EXPORTCOOKIE 	hServerExportCookie;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(pszName);
+	PVR_ASSERT(phSD);
+	*phSD = NULL;
+
+	/* Allocate memory for the stream descriptor object, initialise with
+	 * "no data read" yet. */
+	psSD = OSAllocZMem(sizeof(TL_STREAM_DESC));
+	if (psSD == NULL)
+	{
+		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLOpenStream: KM returned %d", eError));
+		goto e0;
+	}
+	psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+	/* Send open stream request to kernel server to get stream handle and
+	 * buffer cookie so we can get access to the buffer in this process. */
+	eError = BridgeTLOpenStream(hSrvHandle, pszName, ui32Mode,
+										&psSD->hServerSD, &hServerExportCookie);
+	if (eError != PVRSRV_OK)
+	{
+	    if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) &&
+		    (eError == PVRSRV_ERROR_TIMEOUT))
+	    {
+	    	goto e1;
+	    }
+	    PVR_LOGG_IF_ERROR(eError, "BridgeTLOpenStream", e1);
+	}
+
+	/* Convert server export cookie into a cookie for use by this client */
+	eError = DevmemMakeServerExportClientExport(hSrvHandle,
+									hServerExportCookie, &psSD->sExportCookie);
+	PVR_LOGG_IF_ERROR(eError, "DevmemMakeServerExportClientExport", e2);
+
+	/* Now convert client cookie into a client handle on the buffer's
+	 * physical memory region */
+	eError = DevmemImport(hSrvHandle, &psSD->sExportCookie,
+						PVRSRV_MEMALLOCFLAG_CPU_READABLE, &psSD->psUMmemDesc);
+	PVR_LOGG_IF_ERROR(eError, "DevmemImport", e3);
+
+	/* Now map the memory into the virtual address space of this process. */
+	eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (IMG_PVOID *)
+															&psSD->pBaseAddr);
+	PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+	/* Return client descriptor handle to caller */
+	*phSD = psSD;
+	return PVRSRV_OK;
+
+/* Clean up post buffer setup */
+e4:
+	DevmemFree(psSD->psUMmemDesc);
+e3:
+	(void) DevmemUnmakeServerExportClientExport(hSrvHandle,
+				&psSD->sExportCookie);
+/* Clean up post stream open */
+e2:
+	BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+
+/* Cleanup post allocation of the descriptor object */
+e1:
+	OSFreeMem(psSD);
+
+e0:
+	return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR          eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+
+	/* Check the caller provided connection is valid */
+	if(!psSD->hServerSD)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientCloseStream: descriptor already closed/not open"));
+		return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+	}
+
+	/* Check if acquire is outstanding, perform release if it is, ignore result
+	 * as there is not much we can do if it is an error other than close */
+	if (psSD->uiReadLen != NO_ACQUIRE)
+	{
+		(void) BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+									psSD->uiReadOffset, psSD->uiReadLen);
+		psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+	}
+
+	/* Clean up DevMem resources used for this stream in this client */
+	DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc);
+
+	DevmemFree(psSD->psUMmemDesc);
+
+	/* Ignore error, not much that can be done */
+	(void) DevmemUnmakeServerExportClientExport(hSrvHandle,
+			&psSD->sExportCookie);
+
+
+	/* Send close to server to clean up kernel mode resources for this
+	 * handle and release the memory. */
+	eError = BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLCloseStream: KM returned %d", eError));
+		/*/ Not much we can do with error, fall through to clean up
+		 * return eError; */
+	}
+
+	OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC));
+	OSFreeMem (psSD);
+
+	return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE  hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* pui32BufLen)
+{
+	PVRSRV_ERROR 		  eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+	PVR_ASSERT(ppPacketBuf);
+	PVR_ASSERT(pui32BufLen);
+
+	/* Check Acquire has not been called twice in a row without a release */
+	if (psSD->uiReadOffset != NO_ACQUIRE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientAcquireData: acquire already outstanding"));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	*pui32BufLen = 0;
+	/* Ask the kernel server for the next chunk of data to read */
+	eError = BridgeTLAcquireData(hSrvHandle, psSD->hServerSD,
+									&psSD->uiReadOffset, &psSD->uiReadLen);
+	if (eError != PVRSRV_OK)
+	{
+		if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
+			(eError != PVRSRV_ERROR_TIMEOUT))
+		{
+			PVR_DPF((PVR_DBG_ERROR, "BridgeTLAcquireData: KM returned %d", eError));
+		}
+		psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE;
+		return eError;
+	}
+
+	/* Return the data offset and length to the caller if bytes are available
+	 * to be read. Could be zero for non-blocking mode. */
+	if (psSD->uiReadLen)
+	{
+		*ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset;
+		*pui32BufLen = psSD->uiReadLen;
+	}
+	else
+	{
+		/* On non blocking, zero length data could be returned from server
+		 * Which is basically a no-acquire operation */
+		*ppPacketBuf = 0;
+		*pui32BufLen = 0;
+	}
+
+	return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD)
+{
+	PVRSRV_ERROR eError = PVRSRV_OK;
+	TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+	PVR_ASSERT(hSrvHandle);
+	PVR_ASSERT(hSD);
+
+	/* the previous acquire did not return any data, this is a no-operation */
+	if (psSD->uiReadLen == 0)
+	{
+		return PVRSRV_OK;
+	}
+
+	/* Check release has not been called twice in a row without an acquire */
+	if (psSD->uiReadOffset == NO_ACQUIRE)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+		return PVRSRV_ERROR_RETRY;
+	}
+
+	/* Inform the kernel to release the data from the buffer */
+	eError = BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+										psSD->uiReadOffset, psSD->uiReadLen);
+	if (eError != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "BridgeTLReleaseData: KM returned %d", eError));
+		/* Need to continue to keep client data consistent, fall through
+		 * return eError */
+	}
+
+	/* Reset state to indicate no outstanding acquire */
+	psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+	return eError;
+}
+
+
+/******************************************************************************
+ End of file (tlclient.c)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/uniq_key_splay_tree.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/uniq_key_splay_tree.c
new file mode 100644
index 0000000..84296097
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/uniq_key_splay_tree.c
@@ -0,0 +1,244 @@
+/*************************************************************************/ /*!
+@File
+@Title          Provides splay-trees.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Implementation of splay-trees.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */
+#include "osfunc.h" /* for OSMemFree */
+#include "pvr_debug.h"
+#include "uniq_key_splay_tree.h"
+
+/**
+ * This function performs a simple top down splay
+ *
+ * @param ui32Flags the flags that must splayed to the root (if possible).
+ * @param psTree The tree to splay.
+ * @return the resulting tree after the splay operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) 
+{
+	IMG_SPLAY_TREE sTmp1;
+	IMG_PSPLAY_TREE psLeft;
+	IMG_PSPLAY_TREE psRight;
+	IMG_PSPLAY_TREE psTmp2;
+
+	if (psTree == IMG_NULL)
+	{
+		return IMG_NULL;
+	}
+	
+	sTmp1.psLeft = IMG_NULL;
+	sTmp1.psRight = IMG_NULL;
+
+	psLeft = &sTmp1;
+	psRight = &sTmp1;
+	
+    for (;;)
+	{
+		if (ui32Flags < psTree->ui32Flags)
+		{
+			if (psTree->psLeft == IMG_NULL)
+			{
+				break;
+			}
+			
+			if (ui32Flags < psTree->psLeft->ui32Flags)
+			{
+				/* if we get to this point, we need to rotate right the tree */
+				psTmp2 = psTree->psLeft;
+				psTree->psLeft = psTmp2->psRight;
+				psTmp2->psRight = psTree;
+				psTree = psTmp2;
+				if (psTree->psLeft == IMG_NULL)
+				{
+					break;
+				}
+			}
+
+			/* if we get to this point, we need to link right */
+			psRight->psLeft = psTree;
+			psRight = psTree;
+			psTree = psTree->psLeft;
+		}
+		else
+		{
+			if (ui32Flags > psTree->ui32Flags)
+			{
+				if (psTree->psRight == IMG_NULL)
+				{
+					break;
+				}
+
+				if (ui32Flags > psTree->psRight->ui32Flags)
+				{
+					/* if we get to this point, we need to rotate left the tree */
+					psTmp2 = psTree->psRight;
+					psTree->psRight = psTmp2->psLeft;
+					psTmp2->psLeft = psTree;
+					psTree = psTmp2;
+					if (psTree->psRight == IMG_NULL)
+					{
+						break;
+					}
+				}
+
+				/* if we get to this point, we need to link left */
+				psLeft->psRight = psTree;
+				psLeft = psTree;
+				psTree = psTree->psRight;
+			}
+			else
+			{
+				break;
+			}
+		}
+    }
+
+	/* at this point re-assemble the tree */
+    psLeft->psRight = psTree->psLeft;
+    psRight->psLeft = psTree->psRight;
+    psTree->psLeft = sTmp1.psRight;
+    psTree->psRight = sTmp1.psLeft;
+    return psTree;
+}
+
+
+/**
+ * This function inserts a node into the Tree (unless it is already present, in
+ * which case it is equivalent to performing only a splay operation
+ *
+ * @param ui32Flags the key of the new node
+ * @param psTree The tree into which one wants to add a new node
+ * @return The resulting with the node in it
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree) 
+{
+    IMG_PSPLAY_TREE psNew;
+
+	if (psTree != IMG_NULL)
+	{
+		psTree = PVRSRVSplay(ui32Flags, psTree);
+		if (psTree->ui32Flags == ui32Flags)
+		{
+			return psTree;
+		}
+	}
+	
+	psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE));
+	if (psNew == IMG_NULL)
+	{
+		PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree."));
+		return IMG_NULL;
+	}
+	
+	psNew->ui32Flags = ui32Flags;
+	OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets));
+
+#if defined(HAS_BUILTIN_CTZLL)
+	psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1);
+#endif
+
+    if (psTree == IMG_NULL)
+	{
+		psNew->psLeft  = IMG_NULL;
+		psNew->psRight = IMG_NULL;
+		return psNew;
+    }
+
+    if (ui32Flags < psTree->ui32Flags)
+	{
+		psNew->psLeft  = psTree->psLeft;
+		psNew->psRight = psTree;
+		psTree->psLeft = IMG_NULL;
+    }
+	else 
+	{
+		psNew->psRight  = psTree->psRight;
+		psNew->psLeft   = psTree;
+		psTree->psRight = IMG_NULL;
+    }
+
+	return psNew;
+}
+
+
+/**
+ * Deletes a node from the tree (unless it is not there, in which case it is
+ * equivalent to a splay operation)
+ * 
+ * @param ui32Flags the value of the node to remove
+ * @param psTree the tree into which the node must be removed 
+ * @return the resulting tree
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+    IMG_PSPLAY_TREE psTmp;
+    if (psTree == IMG_NULL)
+	{
+		return IMG_NULL;
+	}
+
+    psTree = PVRSRVSplay(ui32Flags, psTree);
+    if (ui32Flags == psTree->ui32Flags)
+	{
+		/* The value was present in the tree */
+		if (psTree->psLeft == IMG_NULL)
+		{
+			psTmp = psTree->psRight;
+		}
+		else
+		{
+			psTmp = PVRSRVSplay(ui32Flags, psTree->psLeft);
+			psTmp->psRight = psTree->psRight;
+		}
+		OSFreeMem(psTree);
+		return psTmp;
+    }
+
+	/* the value was not present in the tree, so just return it as is (after the
+	 * splay) */
+    return psTree;
+}
+
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/uniq_key_splay_tree.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/uniq_key_splay_tree.h
new file mode 100644
index 0000000..e58c0b7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/common/uniq_key_splay_tree.h
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File
+@Title          Splay trees interface
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provides debug functionality
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef UNIQ_KEY_SPLAY_TREE_H_
+#define UNIQ_KEY_SPLAY_TREE_H_
+
+#include "img_types.h"
+
+#if defined(__GNUC__) && defined(__x86_64__)
+  /* note, the 64 bits requirements should not be necessary.
+     Unfortunately, linking against the __ctzdi function (in 32bits) failed.  */
+
+  #define HAS_BUILTIN_CTZLL
+#endif
+
+#if defined(HAS_BUILTIN_CTZLL)
+  /* if the compiler can provide this builtin, then map the is_bucket_n_free?
+     into an int. This way, the driver can find the first non empty without loop */
+
+  typedef IMG_UINT64 IMG_ELTS_MAPPINGS;
+#endif
+
+
+/* head of list of free boundary tags for indexed by pvr_log2 of the
+   boundary tag size */
+#define FREE_TABLE_LIMIT 40
+
+struct _BT_;
+
+typedef struct img_splay_tree 
+{
+	/* left child/subtree */
+    struct img_splay_tree * psLeft;
+
+	/* right child/subtree */
+    struct img_splay_tree * psRight;
+
+    /* Flags to match on this span, used as the key. */
+    IMG_UINT32 ui32Flags;
+
+#if defined(HAS_BUILTIN_CTZLL)
+	/* each bit of this int is a boolean telling if the corresponding
+	   bucket is empty or not */
+    IMG_ELTS_MAPPINGS bHasEltsMapping;
+#endif
+	
+	struct _BT_ * buckets[FREE_TABLE_LIMIT];
+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE;
+
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+
+
+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/devices/rgx/rgx_compat_bvnc.c b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/devices/rgx/rgx_compat_bvnc.c
new file mode 100644
index 0000000..c045270
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/devices/rgx/rgx_compat_bvnc.c
@@ -0,0 +1,150 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for BVNC manipulating
+
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ * RGX Version packed into 24-bit (BNC) and string (V) to be used by Compatibility Check
+ *****************************************************************************/
+
+#include "rgx_compat_bvnc.h"
+
+IMG_VOID rgx_bvnc_packed(IMG_UINT32 *pui32OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen, 
+								IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+#if 0
+	IMG_UINT32 i = ui32OutVMaxLen;
+#endif
+	IMG_UINT32 ui32InVLen = 0;
+	IMG_UINT32 ui32V = 0;
+
+	*pui32OutBNC = (((ui32B & 0xFF) << 16) | ((ui32N & 0xFF) << 8) |
+												(ui32C & 0xFF));
+
+	/* Using dword accesses instead of byte accesses when forming V part of BVNC */
+	ui32OutVMaxLen = ui32OutVMaxLen;
+	while (pszV[ui32InVLen])
+	{
+		ui32V |= ((((IMG_UINT32)pszV[ui32InVLen]) & 0xFF) << (ui32InVLen*8));
+		ui32InVLen++;
+	}
+
+	*((IMG_UINT32 *)pszOutV) = ui32V;
+
+#if 0
+	for (i = 0; i < (ui32OutVMaxLen + 1); i++)
+		pszOutV[i] = '\0';
+
+	while ((ui32OutVMaxLen > 0) && *pszV)
+	{
+		*pszOutV++ = *pszV++;
+		ui32OutVMaxLen--;
+	}
+#endif
+}
+
+IMG_VOID rgx_bvnc_pack_hw(IMG_UINT32 *pui32OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen, 
+								IMG_UINT32 ui32B, IMG_CHAR *pszFwV, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+	IMG_UINT32 i = ui32OutVMaxLen;
+	IMG_CHAR *pszPointer;
+
+	*pui32OutBNC = (((ui32B & 0xFF) << 16) | ((ui32N & 0xFF) << 8) |
+												(ui32C & 0xFF));
+
+	for (i = 0; i < (ui32OutVMaxLen + 1); i++)
+		pszOutV[i] = '\0';
+
+	/* find out whether pszFwV is integer number or not */
+	pszPointer = pszFwV;
+	while (*pszPointer)
+	{
+		if ((*pszPointer < '0') || (*pszPointer > '9'))
+		{
+			break;
+		}
+		pszPointer++;
+	}
+
+	if (*pszPointer)
+	{
+		/* pszFwV is not a number, so taking V from it */
+		pszPointer = pszFwV;
+		while ((ui32OutVMaxLen > 0) && *pszPointer)
+		{
+			*pszOutV++ = *pszPointer++;
+			ui32OutVMaxLen--;
+		}
+	}
+	else
+	{
+		/* pszFwV is a number, taking V from ui32V */
+		IMG_CHAR aszBuf[4];
+
+		pszPointer = aszBuf;
+
+		if (ui32V > 99)
+			pszPointer+=3;
+		else if (ui32V > 9)
+			pszPointer+=2;
+		else
+			pszPointer+=1;
+
+		*pszPointer-- = '\0';
+		*pszPointer = '0';
+
+		while (ui32V > 0)
+		{
+			*pszPointer-- = (ui32V % 10) + '0';
+			ui32V /= 10;
+		}
+		
+		pszPointer = aszBuf;
+		while ((ui32OutVMaxLen > 0) && *pszPointer)
+		{
+			*pszOutV++ = *pszPointer++;
+			ui32OutVMaxLen--;
+		}
+	}
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/devices/rgx/rgx_compat_bvnc.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/devices/rgx/rgx_compat_bvnc.h
new file mode 100644
index 0000000..61e11f0
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/devices/rgx/rgx_compat_bvnc.h
@@ -0,0 +1,96 @@
+/*************************************************************************/ /*!
+@File
+@Title          Functions for BVNC manipulating
+
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_COMPAT_BVNC_H__)
+#define __RGX_COMPAT_BVNC_H__
+
+#include "img_types.h"
+
+/******************************************************************************
+ * RGX Version packed into 24-bit (BNC) and string (V) to be used by Compatibility Check
+ *****************************************************************************/
+
+#define RGX_BVNC_PACK_MASK_B 0x00FF0000
+#define RGX_BVNC_PACK_MASK_N 0x0000FF00
+#define RGX_BVNC_PACK_MASK_C 0x000000FF
+
+#define RGX_BVNC_PACKED_EXTR_B(BVNC) (((BVNC).ui32BNC >> 16) & 0xFF)
+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((BVNC).aszV)
+#define RGX_BVNC_PACKED_EXTR_N(BVNC) (((BVNC).ui32BNC >> 8) & 0xFF)
+#define RGX_BVNC_PACKED_EXTR_C(BVNC) (((BVNC).ui32BNC >> 0) & 0xFF)
+
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v) do {													\
+										(lenmax) = IMG_FALSE;												\
+										(bnc) = IMG_FALSE;													\
+										(v) = IMG_FALSE;													\
+										(version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion);		\
+										if (version)														\
+										{																	\
+											(lenmax) = ((L).ui32VLenMax == (R).ui32VLenMax);				\
+										}																	\
+										if (lenmax)															\
+										{																	\
+											(bnc) = ((L).ui32BNC == (R).ui32BNC);							\
+										}																	\
+										if (bnc)															\
+										{																	\
+											(L).aszV[(L).ui32VLenMax] = '\0';								\
+											(R).aszV[(R).ui32VLenMax] = '\0';								\
+											(v) = (OSStringCompare((L).aszV, (R).aszV)==0);					\
+										}																	\
+										(all) = (version) && (lenmax) && (bnc) && (v);						\
+									} while (0)
+
+IMG_VOID rgx_bvnc_packed(IMG_UINT32 *pui32OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+							IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+IMG_VOID rgx_bvnc_pack_hw(IMG_UINT32 *pui32OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+							IMG_UINT32 ui32B, IMG_CHAR *pszFwV, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+
+#endif /*  __RGX_COMPAT_BVNC_H__ */
+
+/******************************************************************************
+ End of file (rgx_compat_bvnc.h)
+******************************************************************************/
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/cache_internal.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/cache_internal.h
new file mode 100644
index 0000000..8326724
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/cache_internal.h
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services cache management header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines for cache management which are visible internally only.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_INTERNAL_H_
+#define _CACHE_INTERNAL_H_
+#include "img_types.h"
+#include "pvrsrv_devmem.h"
+#include "cache_external.h"
+
+typedef struct _CACHE_BATCH_OP_ENTRY_
+{
+	IMG_UINT32			ui32PMREntryIndex;
+	PVRSRV_CACHE_OP  	eCacheOp;
+	IMG_DEVMEM_SIZE_T	uiSize;
+    IMG_DEVMEM_OFFSET_T uiOffset;
+} CACHE_BATCH_OP_ENTRY;
+
+#endif	/* _CACHE_INTERNAL_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem.h
new file mode 100644
index 0000000..6748e72
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem.h
@@ -0,0 +1,558 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management core internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to core device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVCLIENT_DEVICEMEM_H
+#define SRVCLIENT_DEVICEMEM_H
+
+/********************************************************************************
+ *                                                                              *
+ *   +------------+   +------------+    +--------------+      +--------------+  *
+ *   | a   sub-   |   | a   sub-   |    |  an          |      | allocation   |  *
+ *   | allocation |   | allocation |    |  allocation  |      | also mapped  |  *
+ *   |            |   |            |    |  in proc 1   |      | into proc 2  |  *
+ *   +------------+   +------------+    +--------------+      +--------------+  *
+ *             |         |                     |                     |          *
+ *          +--------------+            +--------------+      +--------------+  *
+ *          | page   gran- |            | page   gran- |      | page   gran- |  *
+ *          | ular mapping |            | ular mapping |      | ular mapping |  *
+ *          +--------------+            +--------------+      +--------------+  *
+ *                 |                                 |          |               *
+ *                 |                                 |          |               *
+ *                 |                                 |          |               *
+ *          +--------------+                       +--------------+             *
+ *          |              |                       |              |             *
+ *          | A  "P.M.R."  |                       | A  "P.M.R."  |             *
+ *          |              |                       |              |             *
+ *          +--------------+                       +--------------+             *
+ *                                                                              *
+ ********************************************************************************/
+
+/*
+    All device memory allocations are ultimately a view upon (not
+    necessarily the whole of) a "PMR".
+
+    A PMR is a "Physical Memory Resource", which may be a
+    "pre-faulted" lump of physical memory, or it may be a
+    representation of some physical memory that will be instantiated
+    at some future time.
+
+    PMRs always represent multiple of some power-of-2 "contiguity"
+    promised by the PMR, which will allow them to be mapped in whole
+    pages into the device MMU.  As memory allocations may be smaller
+    than a page, these mappings may be suballocated and thus shared
+    between multiple allocations in one process.  A PMR may also be
+    mapped simultaneously into multiple device memory contexts
+    (cross-process scenario), however, for security reasons, it is not
+    legal to share a PMR "both ways" at once, that is, mapped into
+    multiple processes and divided up amongst several suballocations.
+
+    This PMR terminology is introduced here for background
+    information, but is generally of little concern to the caller of
+    this API.  This API handles suballocations and mappings, and the
+    caller thus deals primarily with MEMORY DESCRIPTORS representing
+    an allocation or suballocation, HEAPS representing ranges of
+    virtual addresses in a CONTEXT.
+*/
+
+/*
+   |<---------------------------context------------------------------>|
+   |<-------heap------->|   |<-------heap------->|<-------heap------->|
+   |<-alloc->|          |   |<-alloc->|<-alloc->||   |<-alloc->|      |
+*/
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "pdump.h"
+
+/* Use GET and SET function to access this */
+IMG_INTERNAL extern IMG_UINT32  g_uiLog2PageSize;
+
+#define GET_LOG2_PAGESIZE() ( (const IMG_UINT32) g_uiLog2PageSize )
+#define SET_LOG2_PAGESIZE(ui32Log2PageSize) \
+	{ \
+		PVR_ASSERT( (ui32Log2PageSize > 11) && (ui32Log2PageSize < 22) ); \
+		g_uiLog2PageSize = (IMG_UINT32) ui32Log2PageSize; \
+	}
+
+typedef IMG_UINT32 DEVMEM_HEAPCFGID;
+#define DEVMEM_HEAPCFG_FORCLIENTS 0
+#define DEVMEM_HEAPCFG_META 1
+
+/*
+  In order to call the server side functions, we need a bridge handle.
+  We abstract that here, as we may wish to change its form.
+ */
+
+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
+
+/*
+ * DevmemCreateContext()
+ *
+ * Create a device memory context
+ *
+ * This must be called before any heap is created in this context
+ *
+ * Caller to provide bridge handle which will be squirreled away
+ * internally and used for all future operations on items from this
+ * memory context.  Caller also to provide devicenode handle, as this
+ * is used for MMU configuration and also to determine the heap
+ * configuration for the auto-instantiated heaps.
+ *
+ * Note that when compiled in services/server, the hBridge is not used
+ * and is thrown away by the "fake" direct bridge.  (This may change.
+ * It is recommended that IMG_NULL be passed for the handle for now)
+ *
+ * hDeviceNode and uiHeapBlueprintID shall together dictate which
+ * heap-config to use.
+ *
+ * This will cause the server side counterpart to be created also.
+ *
+ * If you call DevmemCreateContext() (and the call succeeds) you
+ * are promising that you will later call Devmem_ContextDestroy(),
+ * except for abnormal process termination in which case it is
+ * expected it will be destroyed as part of handle clean up.
+ *
+ * Caller to provide storage for the pointer to the NEWDEVMEM_CONTEXT
+ * object thusly created.
+ */
+extern PVRSRV_ERROR
+DevmemCreateContext(DEVMEM_BRIDGE_HANDLE hBridge,
+                    IMG_HANDLE hDeviceNode,
+                    DEVMEM_HEAPCFGID uiHeapBlueprintID,
+                    DEVMEM_CONTEXT **ppsCtxPtr);
+
+/*
+ * DevmemAcquireDevPrivData()
+ * 
+ * Acquire the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+                         IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemReleaseDevPrivData()
+ * 
+ * Release the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemDestroyContext()
+ *
+ * Undoes that done by DevmemCreateContext()
+ */
+extern PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemCreateHeap()
+ *
+ * Create a heap in the given context.
+ *
+ * N.B.  Not intended to be called directly, though it can be.
+ * Normally, heaps are instantiated at context creation time according
+ * to the specified blueprint.  See DevmemCreateContext() for details.
+ *
+ * This will cause MMU code to set up data structures for the heap,
+ * but may not cause page tables to be modified until allocations are
+ * made from the heap.
+ *
+ * The "Quantum" is both the device MMU page size to be configured for
+ * this heap, and the unit multiples of which "quantized" allocations
+ * are made (allocations smaller than this, known as "suballocations"
+ * will be made from a "sub alloc RA" and will "import" chunks
+ * according to this quantum)
+ *
+ * Where imported PMRs (or, for example, PMRs created by device class
+ * buffers) are mapped into this heap, it is important that the
+ * physical contiguity guarantee offered by the PMR is greater than or
+ * equal to the quantum size specified here, otherwise the attempt to
+ * map it will fail.  "Normal" allocations via Devmem_Allocate
+ * shall automatically meet this requirement, as each "import" will
+ * trigger the creation of a PMR with the desired contiguity.  The
+ * supported quantum sizes in that case shall be dictated by the OS
+ * specific implementation of PhysmemNewOSRamBackedPMR() (see)
+ */
+extern PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
+                 /* base and length of heap */
+                 IMG_DEV_VIRTADDR sBaseAddress,
+                 IMG_DEVMEM_SIZE_T uiLength,
+                 /* log2 of allocation quantum, i.e. "page" size.
+                    All allocations (that go to server side) are
+                    multiples of this.  We use a client-side RA to
+                    make sub-allocations from this */
+                 IMG_UINT32 ui32Log2Quantum,
+                 /* The minimum import alignment for this heap */
+                 IMG_UINT32 ui32Log2ImportAlignment,
+                 /* Name of heap for debug */
+                 /* N.B.  Okay to exist on caller's stack - this
+                    func takes a copy if it needs it. */
+                 const IMG_CHAR *pszName,
+                 DEVMEM_HEAP **ppsHeapPtr);
+/*
+ * DevmemDestroyHeap()
+ *
+ * Reverses DevmemCreateHeap()
+ *
+ * N.B. All allocations must have been freed and all mappings must
+ * have been unmapped before invoking this call
+ */
+extern PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
+
+/*
+ * DevmemExportalignAdjustSizeAndAlign()
+ * Compute the Size and Align passed to avoid suballocations (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN)
+ */
+IMG_INTERNAL IMG_VOID
+DevmemExportalignAdjustSizeAndAlign(DEVMEM_HEAP *psHeap, IMG_DEVMEM_SIZE_T *puiSize, IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * DevmemAllocate()
+ *
+ * Makes an allocation (possibly a "suballocation", as described
+ * below) of device virtual memory from this heap.
+ *
+ * The size and alignment of the allocation will be honoured by the RA
+ * that allocates the "suballocation".  The resulting allocation will
+ * be mapped into GPU virtual memory and the physical memory to back
+ * it will exist, by the time this call successfully completes.
+ * 
+ * The size must be a positive integer multiple of the alignment.
+ * (i.e. the aligment specifies the alignment of both the start and
+ * the end of the resulting allocation.)
+ *
+ * Allocations made via this API are routed though a "suballocation
+ * RA" which is responsible for ensuring that small allocations can be
+ * made without wasting physical memory in the server.  Furthermore,
+ * such suballocations can be made entirely client side without
+ * needing to go to the server unless the allocation spills into a new
+ * page.
+ *
+ * Such suballocations cause many allocations to share the same "PMR".
+ * This happens only when the flags match exactly.
+ *
+ */
+
+PVRSRV_ERROR DevmemAllocate(DEVMEM_HEAP *psHeap,
+                            IMG_DEVMEM_SIZE_T uiSize,
+                            IMG_DEVMEM_ALIGN_T uiAlign,
+                            DEVMEM_FLAGS_T uiFlags,
+                            const IMG_PCHAR pszText,
+                            DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DevmemAllocateExportable(IMG_HANDLE hBridge,
+						 IMG_HANDLE hDeviceNode,
+						 IMG_DEVMEM_SIZE_T uiSize,
+						 IMG_DEVMEM_ALIGN_T uiAlign,
+						 DEVMEM_FLAGS_T uiFlags,
+						 const IMG_PCHAR pszText,
+						 DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DevmemAllocateSparse(IMG_HANDLE hBridge,
+					 IMG_HANDLE hDeviceNode,
+					 IMG_DEVMEM_SIZE_T uiSize,
+					 IMG_DEVMEM_SIZE_T uiChunkSize,
+					 IMG_UINT32 ui32NumPhysChunks,
+					 IMG_UINT32 ui32NumVirtChunks,
+					 IMG_BOOL *pabMappingTable,
+					 IMG_DEVMEM_ALIGN_T uiAlign,
+					 DEVMEM_FLAGS_T uiFlags,
+					 const IMG_PCHAR pszText,
+					 DEVMEM_MEMDESC **ppsMemDescPtr);
+
+/*
+ * DevmemFree()
+ *
+ * Reverses that done by DevmemAllocate() N.B.  The underlying
+ * mapping and server side allocation _may_ not be torn down, for
+ * example, if the allocation has been exported, or if multiple
+ * allocations were suballocated from the same mapping, but this is
+ * properly refcounted, so the caller does not have to care.
+ */
+
+extern IMG_VOID
+DevmemFree(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+	DevmemMapToDevice:
+
+	Map an allocation to the device is was allocated from.
+	This function _must_ be called before any call to 
+	DevmemAcquireDevVirtAddr is made as it binds the allocation
+	to the heap.
+	DevmemReleaseDevVirtAddr is used to release the reference
+	to the device mapping this function created, but it doesn't
+	mean that the memory will actually be unmapped from the
+	device as other references to the mapping obtained via
+	DevmemAcquireDevVirtAddr could still be active.
+*/
+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+							   DEVMEM_HEAP *psHeap,
+							   IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+/*
+	DevmemAcquireDevVirtAddr
+
+	Acquire the MemDesc's device virtual address.
+	This function _must_ be called after DevmemMapToDevice
+	and is expected to be used be functions which didn't allocate
+	the MemDesc but need to know it's address
+ */
+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      IMG_DEV_VIRTADDR *psDevVirtAddrRet);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the device virtual address that was
+ * acquired by "Acquire" or "MapToDevice"
+ */
+extern IMG_VOID
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemAcquireCpuVirtAddr()
+ *
+ * Acquires a license to use the cpu virtual address of this mapping.
+ * Note that the memory may not have been mapped into cpu virtual
+ * memory prior to this call.  On first "acquire" the memory will be
+ * mapped in (if it wasn't statically mapped in) and on last put it
+ * _may_ become unmapped.  Later calling "Acquire" again, _may_ cause
+ * the memory to be mapped at a different address.
+ */
+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+                                      IMG_VOID **ppvCpuVirtAddr);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the cpu virtual address that was granted
+ * with the "Get" call.
+ */
+extern IMG_VOID
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemExport()
+ *
+ * Given a memory allocation allocated with DevmemAllocateExportable()
+ * create a "cookie" that can be passed intact by the caller's own choice
+ * of secure IPC to another process and used as the argument to "map"
+ * to map this memory into a heap in the target processes.  N.B.  This can
+ * also be used to map into multiple heaps in one process, though that's not
+ * the intention.
+ *
+ * Note, the caller must later call Unexport before freeing the
+ * memory.
+ */
+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+                          DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+
+IMG_VOID DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+						DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+PVRSRV_ERROR
+DevmemImport(IMG_HANDLE hBridge,
+			 DEVMEM_EXPORTCOOKIE *psCookie,
+			 DEVMEM_FLAGS_T uiFlags,
+			 DEVMEM_MEMDESC **ppsMemDescPtr);
+
+/*
+ * DevmemIsValidExportCookie()
+ * Check whether the Export Cookie contains a valid export */
+IMG_BOOL
+DevmemIsValidExportCookie(DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+/*
+ * DevmemMakeServerExportClientExport()
+ * 
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+DevmemMakeServerExportClientExport(DEVMEM_BRIDGE_HANDLE hBridge,
+                                   DEVMEM_SERVER_EXPORTCOOKIE hServerExportCookie,
+                                   DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+/*
+ * DevmemUnmakeServerExportClientExport()
+ * 
+ * Free any resource associated with the Make operation
+ */
+PVRSRV_ERROR
+DevmemUnmakeServerExportClientExport(DEVMEM_BRIDGE_HANDLE hBridge,
+                                   DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+/*
+ *
+ * The following set of functions is specific to the heap "blueprint"
+ * stuff, for automatic creation of heaps when a context is created
+ *
+ */
+
+
+/* Devmem_HeapConfigCount: returns the number of heap configs that
+   this device has.  Note that there is no acquire/release semantics
+   required, as this data is guaranteed to be constant for the
+   lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapConfigCount(DEVMEM_BRIDGE_HANDLE hBridge,
+                      IMG_HANDLE hDeviceNode,
+                      IMG_UINT32 *puiNumHeapConfigsOut);
+
+/* Devmem_HeapCount: returns the number of heaps that a given heap
+   config on this device has.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapCount(DEVMEM_BRIDGE_HANDLE hBridge,
+                IMG_HANDLE hDeviceNode,
+                IMG_UINT32 uiHeapConfigIndex,
+                IMG_UINT32 *puiNumHeapsOut);
+/* Devmem_HeapConfigName: return the name of the given heap config.
+   The caller is to provide the storage for the returned string and
+   indicate the number of bytes (including null terminator) for such
+   string in the BufSz arg.  Note that there is no acquire/release
+   semantics required, as this data is guaranteed to be constant for
+   the lifetime of the device node.
+ */
+extern PVRSRV_ERROR
+DevmemHeapConfigName(DEVMEM_BRIDGE_HANDLE hBridge,
+                     IMG_HANDLE hDeviceNode,
+                     IMG_UINT32 uiHeapConfigIndex,
+                     IMG_CHAR *pszConfigNameOut,
+                     IMG_UINT32 uiConfigNameBufSz);
+
+/* Devmem_HeapDetails: fetches all the metadata that is recorded in
+   this heap "blueprint".  Namely: heap name (caller to provide
+   storage, and indicate buffer size (including null terminator) in
+   BufSz arg), device virtual address and length, log2 of data page
+   size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
+   Note that there is no acquire/release semantics required, as this
+   data is guaranteed to be constant for the lifetime of the device
+   node. */
+extern PVRSRV_ERROR
+DevmemHeapDetails(DEVMEM_BRIDGE_HANDLE hBridge,
+                  IMG_HANDLE hDeviceNode,
+                  IMG_UINT32 uiHeapConfigIndex,
+                  IMG_UINT32 uiHeapIndex,
+                  IMG_CHAR *pszHeapNameOut,
+                  IMG_UINT32 uiHeapNameBufSz,
+                  IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+                  IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+                  IMG_UINT32 *puiLog2DataPageSize,
+                  IMG_UINT32 *puiLog2ImportAlignmentOut);
+
+/*
+ * Devmem_FindHeapByName()
+ *
+ * returns the heap handle for the named _automagic_ heap in this
+ * context.  "automagic" heaps are those that are born with the
+ * context from a blueprint
+ */
+extern PVRSRV_ERROR
+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
+                     const IMG_CHAR *pszHeapName,
+                     DEVMEM_HEAP **ppsHeapRet);
+
+/*
+ * DevmemGetHeapBaseDevVAddr()
+ *
+ * returns the device virtual address of the base of the heap.
+ */
+
+PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
+			  IMG_DEV_VIRTADDR *pDevVAddr);
+
+extern PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+			   IMG_HANDLE *phImport);
+
+extern PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+						   IMG_UINT64 *pui64UID);
+
+PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+				IMG_HANDLE *hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+		IMG_HANDLE *hPMR,
+		IMG_DEVMEM_OFFSET_T *puiPMROffset);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+				DEVMEM_FLAGS_T *puiFlags);
+
+PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+				  IMG_HANDLE hExtHandle,
+				  DEVMEM_FLAGS_T uiFlags,
+				  DEVMEM_MEMDESC **ppsMemDescPtr,
+				  IMG_DEVMEM_SIZE_T *puiSizePtr);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+                         IMG_DEV_VIRTADDR sDevVAddr);
+
+/* DevmemGetHeapLog2ImportAlignment()
+ *
+ * Get the import alignment used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapLog2ImportAlignment(DEVMEM_HEAP *psHeap);
+
+#endif /* #ifndef SRVCLIENT_DEVICEMEM_CLIENT_H */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_history_shared.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_history_shared.h
new file mode 100644
index 0000000..789409c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_history_shared.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory History shared definitions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Shared (client/server) definitions related to the Devicemem History
+                functionality.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_HISTORY_SHARED_H
+#define DEVICEMEM_HISTORY_SHARED_H
+
+/* structure used inside MEMDESC to hold the allocation name until
+ * the allocation is unmapped
+ */
+typedef struct _DEVICEMEM_HISTORY_MEMDESC_DATA_
+{
+	IMG_CHAR szText[DEVICEMEM_HISTORY_TEXT_BUFSZ];
+	IMG_DEVMEM_SIZE_T uiSize;
+} DEVICEMEM_HISTORY_MEMDESC_DATA;
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_mmap.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_mmap.h
new file mode 100644
index 0000000..2963336
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_mmap.h
@@ -0,0 +1,108 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    OS abstraction for the mmap2 interface for mapping PMRs into
+                User Mode memory
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_MMAP_H_
+#define _DEVICEMEM_MMAP_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*
+ *
+ * OSMMapPMR
+ *
+ * Causes this PMR to be mapped into CPU memory that the user process
+ * may access.
+ *
+ * Whether the memory is mapped readonly, readwrite, or not at all, is
+ * dependent on the PMR itself.
+ *
+ * The PMR handle is opaque to the user, and lower levels of this
+ * stack ensure that the handle is private to this process, such that
+ * this API cannot be abused to gain access to other people's PMRs.
+ *
+ * The OS implementation of this function should return the virtual
+ * address and length for the User to use.  The "PrivData" is to be
+ * stored opaquely by the caller (N.B. he should make no assumptions,
+ * in particular, IMG_NULL is a valid handle) and given back to the
+ * call to OSMunmapPMR.
+ *
+ * The OS implementation is free to use the PrivData handle for any
+ * purpose it sees fit.
+ */
+
+extern PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+          IMG_HANDLE hPMR,
+          IMG_DEVMEM_SIZE_T uiPMRLength,
+          IMG_UINT32 uiFlags,
+          IMG_HANDLE *phOSMMapPrivDataOut,
+          IMG_VOID **ppvMappingAddressOut,
+          IMG_SIZE_T *puiMappingLengthOut);
+
+/*
+ *
+ * OSMUnmapPMR
+ *
+ * The reverse of OSMMapPMR
+ *
+ * The caller is required to pass the PMR handle back in along with
+ * the same 3-tuple of information as was returned by the call to
+ * OSMMapPMR
+ *
+ */
+/* 
+   FIXME:
+   perhaps this function should take _only_ the hOSMMapPrivData arg,
+   and the implementation is required to store any of the other data
+   items that it requires to do the unmap?
+*/
+extern IMG_VOID
+OSMUnmapPMR(IMG_HANDLE hBridge,
+            IMG_HANDLE hPMR,
+            IMG_HANDLE hOSMMapPrivData,
+            IMG_VOID *pvMappingAddress,
+            IMG_SIZE_T uiMappingLength);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_pdump.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_pdump.h
new file mode 100644
index 0000000..b86602a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_pdump.h
@@ -0,0 +1,343 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management PDump internal
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Services internal interface to PDump device memory management
+                functions that are shared between client and server code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_PDUMP_H_
+#define _DEVICEMEM_PDUMP_H_
+
+#include "devicemem.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemPDumpMem()
+ *
+ * takes a memory descriptor, offset, and size, and takes the current
+ * contents of the memory at that location and writes it to the prm
+ * pdump file, and emits a pdump LDB to load the data from that file.
+ * The intention here is that the contents of the simulated buffer
+ * upon pdump playback will be made to be the same as they are when
+ * this command is run, enabling pdump of cases where the memory has
+ * been modified externally, i.e. by the host cpu or by a third
+ * party.
+ */
+extern IMG_VOID
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpZeroMem()
+ *
+ * as DevmemPDumpMem() but the PDump allocation will be populated with zeros from
+ * the zero page in the parameter stream
+ */
+extern IMG_VOID
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue()
+ * 
+ * As above but dumps the value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ * 
+ * (The same functionality can be achieved by the above function but
+ *  the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL IMG_VOID
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue64()
+ *
+ * As above but dumps the 64bit-value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ *  the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL IMG_VOID
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpPageCatBaseToSAddr()
+ *
+ * Returns the symbolic address of a piece of memory represented
+ * by an offset into the mem descriptor.
+ */
+extern PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size);
+
+/*
+ * DevmemPDumpSaveToFile()
+ *
+ * emits a pdump SAB to cause the current contents of the memory to be
+ * written to the given file during playback
+ */
+extern IMG_VOID
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename);
+
+/*
+ * DevmemPDumpSaveToFileVirtual()
+ *
+ * emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the
+ * virtual address and device MMU context to cause the pdump player to
+ * traverse the MMU page tables itself.
+ */
+extern IMG_VOID
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32 ui32PdumpFlags);
+
+
+/*
+ *
+ * Devmem_PDumpDevmemPol32()
+ *
+ * writes a PDump 'POL' command to wait for a masked 32-bit memory
+ * location to become the specified value
+ */
+extern PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                           IMG_DEVMEM_OFFSET_T uiOffset,
+                           IMG_UINT32 ui32Value,
+                           IMG_UINT32 ui32Mask,
+                           PDUMP_POLL_OPERATOR eOperator,
+                           PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*
+ * DevmemPDumpCBP()
+ *
+ * Polls for space in circular buffer. Reads the read offset
+ * from memory and waits until there is enough space to write
+ * the packet.
+ *
+ * hMemDesc      - MemDesc which contains the read offset
+ * uiReadOffset  - Offset into MemDesc to the read offset
+ * uiWriteOffset - Current write offset
+ * uiPacketSize  - Size of packet to write
+ * uiBufferSize  - Size of circular buffer
+ */
+extern PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize);
+
+#else	/* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMem)
+#endif
+static INLINE IMG_VOID
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+                   IMG_DEVMEM_OFFSET_T uiOffset,
+                   IMG_DEVMEM_SIZE_T uiSize,
+                   PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue32)
+#endif
+static INLINE IMG_VOID
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue64)
+#endif
+static INLINE IMG_VOID
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT64 ui64Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui64Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue)
+#endif
+static INLINE IMG_VOID
+DevmemPDumpLoadMemValue(DEVMEM_MEMDESC *psMemDesc,
+                        IMG_DEVMEM_OFFSET_T uiOffset,
+                        IMG_UINT32 ui32Value,
+                        PDUMP_FLAGS_T uiPDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpPageCatBaseToSAddr)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC		*psMemDesc,
+							  IMG_DEVMEM_OFFSET_T	*puiMemOffset,
+							  IMG_CHAR				*pszName,
+							  IMG_UINT32			ui32Size)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(puiMemOffset);
+	PVR_UNREFERENCED_PARAMETER(pszName);
+	PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFile)
+#endif
+static INLINE IMG_VOID
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+                      IMG_DEVMEM_OFFSET_T uiOffset,
+                      IMG_DEVMEM_SIZE_T uiSize,
+                      const IMG_CHAR *pszFilename)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFileVirtual)
+#endif
+static INLINE IMG_VOID
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+                             IMG_DEVMEM_OFFSET_T uiOffset,
+                             IMG_DEVMEM_SIZE_T uiSize,
+                             const IMG_CHAR *pszFilename,
+							 IMG_UINT32 ui32FileOffset,
+							 IMG_UINT32 ui32PdumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(uiSize);
+	PVR_UNREFERENCED_PARAMETER(pszFilename);
+	PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpDevmemPol32)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+                           IMG_DEVMEM_OFFSET_T uiOffset,
+                           IMG_UINT32 ui32Value,
+                           IMG_UINT32 ui32Mask,
+                           PDUMP_POLL_OPERATOR eOperator,
+                           PDUMP_FLAGS_T ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiOffset);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+	return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+				IMG_DEVMEM_OFFSET_T uiReadOffset,
+				IMG_DEVMEM_OFFSET_T uiWriteOffset,
+				IMG_DEVMEM_SIZE_T uiPacketSize,
+				IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psMemDesc);
+	PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+	return PVRSRV_OK;
+}
+#endif	/* PDUMP */
+#endif	/* _DEVICEMEM_PDUMP_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_utils.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_utils.h
new file mode 100644
index 0000000..92a89b6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/devicemem_utils.h
@@ -0,0 +1,388 @@
+/*************************************************************************/ /*!
+@File
+@Title          Device Memory Management internal utility functions
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Utility functions used internally by device memory management
+                code.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_UTILS_H_
+#define _DEVICEMEM_UTILS_H_
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "devicemem_mmap.h"
+#include "devicemem_utils.h"
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "mm_common.h"
+#include "devicemem_history_shared.h"
+#endif
+
+#define DEVMEM_HEAPNAME_MAXLENGTH 160
+
+
+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+/* If we need a "hMapping" but we don't have a server-side mapping, we
+   poison the entry with this value so that it's easily recognised in
+   the debugger.  Note that this is potentially a valid handle, but
+   then so is IMG_NULL, which is no better, indeed worse, as it's not
+   obvious in the debugger.  The value doesn't matter.  We _never_ use
+   it (and because it's valid, we never assert it isn't this) but it's
+   nice to have a value in the source code that we can grep for when
+   things go wrong. */
+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead)
+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead)
+
+struct _DEVMEM_CONTEXT_ {
+    /* Cookie of the device on which this memory context resides */
+    IMG_HANDLE hDeviceNode;
+
+    /* Number of heaps that have been created in this context
+       (regardless of whether they have allocations) */
+    IMG_UINT32 uiNumHeaps;
+
+    /* Sometimes we need to talk to Kernel Services.  In order to do
+       so, we need the connection handle */
+    DEVMEM_BRIDGE_HANDLE hBridge;
+
+    /*
+      Each "DEVMEM_CONTEXT" has a counterpart in the server,
+      which is responsible for handling the mapping into device MMU.
+      We have a handle to that here.
+    */
+    IMG_HANDLE hDevMemServerContext;
+
+    /* Number of automagically created heaps in this context,
+       i.e. those that are born at context creation time from the
+       chosen "heap config" or "blueprint" */
+    IMG_UINT32 uiAutoHeapCount;
+
+    /* pointer to array of such heaps */
+    struct _DEVMEM_HEAP_ **ppsAutoHeapArray;
+
+	/* Private data handle for device specific data */
+	IMG_HANDLE hPrivData;
+};
+
+struct _DEVMEM_HEAP_ {
+    /* Name of heap - for debug and lookup purposes. */
+    IMG_CHAR *pszName;
+
+    /* Number of live imports in the heap */
+    ATOMIC_T hImportCount;
+
+    /*
+     * Base address of heap, required by clients due to some requesters
+     * not being full range 
+     */
+    IMG_DEV_VIRTADDR sBaseAddress;
+
+    /* This RA is for managing sub-allocations in virtual space.  Two
+       more RA's will be used under the Hood for managing the coarser
+       allocation of virtual space from the heap, and also for
+       managing the physical backing storage. */
+    RA_ARENA *psSubAllocRA;
+    IMG_CHAR *pszSubAllocRAName;
+    /*
+      This RA is for the coarse allocation of virtual space from the heap
+    */
+    RA_ARENA *psQuantizedVMRA;
+    IMG_CHAR *pszQuantizedVMRAName;
+
+    /* We also need to store a copy of the quantum size in order to
+       feed this down to the server */
+    IMG_UINT32 uiLog2Quantum;
+
+    /* Store a copy of the minimum import alignment */
+    IMG_UINT32 uiLog2ImportAlignment;
+
+    /* The parent memory context for this heap */
+    struct _DEVMEM_CONTEXT_ *psCtx;
+
+	POS_LOCK hLock;							/*!< Lock to protect this structure */
+
+    /*
+      Each "DEVMEM_HEAP" has a counterpart in the server,
+      which is responsible for handling the mapping into device MMU.
+      We have a handle to that here.
+    */
+    IMG_HANDLE hDevMemServerHeap;
+};
+
+
+typedef struct _DEVMEM_DEVICE_IMPORT_ {
+	DEVMEM_HEAP *psHeap;			/*!< Heap this import is bound to */
+	IMG_DEV_VIRTADDR sDevVAddr;		/*!< Device virtual address of the import */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the device virtual address */
+	IMG_HANDLE hReservation;		/*!< Device memory reservation handle */
+	IMG_HANDLE hMapping;			/*!< Device mapping handle */
+	IMG_BOOL bMapped;				/*!< This is import mapped? */
+	POS_LOCK hLock;					/*!< Lock to protect the device import */
+} DEVMEM_DEVICE_IMPORT;
+
+typedef struct _DEVMEM_CPU_IMPORT_ {
+	IMG_PVOID pvCPUVAddr;			/*!< CPU virtual address of the import */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the CPU virtual address */
+	IMG_HANDLE hOSMMapData;			/*!< CPU mapping handle */
+	POS_LOCK hLock;					/*!< Lock to protect the CPU import */
+#if !defined(__KERNEL__) && defined(SUPPORT_ION)
+	int iDmaBufFd;					/*!< >=0 if this was an imported ion allocation */
+#endif
+} DEVMEM_CPU_IMPORT;
+
+typedef struct _DEVMEM_IMPORT_ {
+    DEVMEM_BRIDGE_HANDLE hBridge;		/*!< Bridge connection for the server */
+	IMG_DEVMEM_ALIGN_T uiAlign;			/*!< Alignment of the PMR */
+	DEVMEM_SIZE_T uiSize;				/*!< Size of import */
+    ATOMIC_T hRefCount;					/*!< Refcount for this import */
+    IMG_BOOL bExportable;				/*!< Is this import exportable? */
+    IMG_HANDLE hPMR;					/*!< Handle to the PMR */
+    DEVMEM_FLAGS_T uiFlags;				/*!< Flags for this import */
+    POS_LOCK hLock;						/*!< Lock to protect the import */
+
+	DEVMEM_DEVICE_IMPORT sDeviceImport;	/*!< Device specifics of the import */
+	DEVMEM_CPU_IMPORT sCPUImport;		/*!< CPU specifics of the import */
+} DEVMEM_IMPORT;
+
+typedef struct _DEVMEM_DEVICE_MEMDESC_ {
+	IMG_DEV_VIRTADDR sDevVAddr;		/*!< Device virtual address of the allocation */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the device virtual address */
+	POS_LOCK hLock;					/*!< Lock to protect device memdesc */
+} DEVMEM_DEVICE_MEMDESC;
+
+typedef struct _DEVMEM_CPU_MEMDESC_ {
+	IMG_PVOID pvCPUVAddr;			/*!< CPU virtual address of the import */
+	IMG_UINT32 ui32RefCount;		/*!< Refcount of the device CPU address */
+	POS_LOCK hLock;					/*!< Lock to protect CPU memdesc */
+} DEVMEM_CPU_MEMDESC;
+
+struct _DEVMEM_MEMDESC_ {
+    DEVMEM_IMPORT *psImport;				/*!< Import this memdesc is on */
+    IMG_DEVMEM_OFFSET_T uiOffset;			/*!< Offset into import where our allocation starts */
+    ATOMIC_T hRefCount;						/*!< Refcount of the memdesc */
+    POS_LOCK hLock;							/*!< Lock to protect memdesc */
+
+	DEVMEM_DEVICE_MEMDESC sDeviceMemDesc;	/*!< Device specifics of the memdesc */
+	DEVMEM_CPU_MEMDESC sCPUMemDesc;		/*!< CPU specifics of the memdesc */
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+	DEVICEMEM_HISTORY_MEMDESC_DATA sTraceData;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+    IMG_HANDLE hRIHandle;					/*!< Handle to RI information */
+#endif
+};
+
+/******************************************************************************
+@Function       _DevmemValidateParams
+@Description    Check if flags are conflicting and if align is a size multiple.
+
+@Input          uiSize      Size of the import.
+@Input          uiAlign     Alignment of the import.
+@Input          uiFlags     Flags for the import.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+								   IMG_DEVMEM_ALIGN_T uiAlign,
+								   DEVMEM_FLAGS_T uiFlags);
+
+/******************************************************************************
+@Function       _DevmemImportStructAlloc
+@Description    Allocates memory for an import struct. Does not allocate a PMR!
+                Create locks for CPU and Devmem mappings.
+
+@Input          hBridge       Bridge to use for calls from the import.
+@Input          bExportable   Is this import is exportable?
+@Input          ppsImport     The import to allocate.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructAlloc(IMG_HANDLE hBridge,
+									  IMG_BOOL bExportable,
+									  DEVMEM_IMPORT **ppsImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructInit
+@Description    Initialises the import struct with the given parameters.
+                Set it's refcount to 1!
+
+@Input          psImport    The import to initialise.
+@Input          uiSize      Size of the import.
+@Input          uiAlign     Alignment of allocations in the import.
+@Input          uiMapFlags
+@Input          hPMR        Reference to the PMR of this import struct.
+******************************************************************************/
+IMG_VOID _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+								 IMG_DEVMEM_SIZE_T uiSize,
+								 IMG_DEVMEM_ALIGN_T uiAlign,
+								 PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+								 IMG_HANDLE hPMR);
+
+/******************************************************************************
+@Function       _DevmemImportStructDevMap
+@Description    NEVER call after the last _DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the device's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+
+@Input          psHeap    The heap to map to.
+@Input          bMap      Caller can choose if the import should be really
+                          mapped in the page tables or if just a virtual range
+                          should be reserved and the refcounts increased.
+@Input          psImport  The import we want to map.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+									   IMG_BOOL bMap,
+									   DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructDevUnmap
+@Description    Unmaps the PMR referenced by the import struct from the
+                device's virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+IMG_VOID _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructCPUMap
+@Description    NEVER call after the last _DevmemMemDescRelease()
+                Maps the PMR referenced by the import struct to the CPU's
+                virtual address space.
+                Does nothing but increase the cpu mapping refcount if the
+                import struct was already mapped.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructCPUUnmap
+@Description    Unmaps the PMR referenced by the import struct from the CPU's
+                virtual address space.
+                If this was not the last remaining CPU mapping on the import
+                struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+IMG_VOID _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport);
+
+
+/******************************************************************************
+@Function       _DevmemImportStructAcquire
+@Description    Acquire an import struct by increasing it's refcount.
+******************************************************************************/
+IMG_VOID _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportStructRelease
+@Description    Reduces the refcount of the import struct.
+                Destroys the import in the case it was the last reference.
+                Destroys underlying PMR if this import was the last reference
+                to it.
+@return         A boolean to signal if the import was destroyed. True = yes.
+******************************************************************************/
+IMG_VOID _DevmemImportStructRelease(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemImportDiscard
+@Description    Discard a created, but unitilised import structure.
+                This must only be called before _DevmemImportStructInit
+                after which _DevmemImportStructRelease must be used to
+                "free" the import structure.
+******************************************************************************/
+IMG_VOID _DevmemImportDiscard(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemMemDescAlloc
+@Description    Allocates a MemDesc and create it's various locks.
+                Zero the allocated memory.
+@return         PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescInit
+@Description    Sets the given offset and import struct fields in the MemDesc.
+                Initialises refcount to 1 and other values to 0.
+
+@Input          psMemDesc    MemDesc to initialise.
+@Input          uiOffset     Offset in the import structure.
+@Input          psImport     Import the MemDesc is on.
+******************************************************************************/
+IMG_VOID _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+						  	IMG_DEVMEM_OFFSET_T uiOffset,
+						  	DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function       _DevmemMemDescAcquire
+@Description    Acquires the MemDesc by increasing it's refcount.
+******************************************************************************/
+IMG_VOID _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescRelease
+@Description    Releases the MemDesc by reducing it's refcount.
+                Destroy the MemDesc if it's recount is 0.
+                Destroy the import struct the MemDesc is on if that was the
+                last MemDesc on the import, probably following the destruction
+                of the underlying PMR.
+******************************************************************************/
+IMG_VOID _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function       _DevmemMemDescDiscard
+@Description    Discard a created, but unitilised MemDesc structure.
+                This must only be called before _DevmemMemDescInit
+                after which _DevmemMemDescRelease must be used to
+                "free" the MemDesc structure.
+******************************************************************************/
+IMG_VOID _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc);
+
+#endif /* _DEVICEMEM_UTILS_H_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/dllist.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/dllist.h
new file mode 100644
index 0000000..190f74d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/dllist.h
@@ -0,0 +1,248 @@
+/*************************************************************************/ /*!
+@File
+@Title          Double linked list header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Double linked list interface
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DLLIST_
+#define _DLLIST_
+
+#include "img_types.h"
+
+/*!
+	Pointer to a linked list node
+*/
+typedef struct _DLLIST_NODE_	*PDLLIST_NODE;
+
+
+/*!
+	Node in a linked list
+*/
+/*
+ * Note: the following structure's size is architecture-dependent and
+ * clients may need to create a mirror the structure definition if it needs
+ * to be used in a structure shared between host and device. Consider such
+ * clients if any changes are made to this structure.
+ */ 
+typedef struct _DLLIST_NODE_
+{
+	struct _DLLIST_NODE_	*psPrevNode;
+	struct _DLLIST_NODE_	*psNextNode;
+} DLLIST_NODE;
+
+
+/*!
+	Static initialiser
+*/
+#define DECLARE_DLLIST(n) \
+DLLIST_NODE n = {&n, &n}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_init
+
+@Description    Initialize a new double linked list
+
+@Input          psListHead              List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_VOID dllist_init(PDLLIST_NODE psListHead)
+{
+	psListHead->psPrevNode = psListHead;
+	psListHead->psNextNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_is_empty
+
+@Description    Returns whether the list is empty
+
+@Input          psListHead              List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_is_empty(PDLLIST_NODE psListHead)
+{
+	return ((psListHead->psPrevNode == psListHead) 
+				&& (psListHead->psNextNode == psListHead));
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_head
+
+@Description    Add psNewNode to head of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_VOID dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+	PDLLIST_NODE psTmp;
+
+	psTmp = psListHead->psNextNode;
+
+	psListHead->psNextNode = psNewNode;
+	psNewNode->psNextNode = psTmp;
+
+	psTmp->psPrevNode = psNewNode;
+	psNewNode->psPrevNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_add_to_tail
+
+@Description    Add psNewNode to tail of list psListHead
+
+@Input          psListHead             Head Node
+@Input          psNewNode              New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_VOID dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+	PDLLIST_NODE psTmp;
+
+	psTmp = psListHead->psPrevNode;
+
+	psListHead->psPrevNode = psNewNode;
+	psNewNode->psPrevNode = psTmp;
+
+	psTmp->psNextNode = psNewNode;
+	psNewNode->psNextNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_node_is_in_list
+
+@Description    Returns IMG_TRUE if psNode is in a list 
+
+@Input          psNode             List node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_node_is_in_list(PDLLIST_NODE psNode)
+{
+	return (psNode->psNextNode != 0);
+}
+
+
+/*************************************************************************/ /*!
+@Function       dllist_get_next_node
+
+@Description    Returns the list node after psListHead or IMG_NULL psListHead
+				is the only element in the list.
+
+@Input          psListHead             List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead)
+{
+	if (psListHead->psNextNode == psListHead)
+	{
+		return IMG_NULL;
+	}
+	else
+	{
+		return psListHead->psNextNode;
+	}
+} 
+
+
+/*************************************************************************/ /*!
+@Function       dllist_remove_node
+
+@Description    Removes psListNode from the list where it currently belongs
+
+@Input          psListNode             List node to be removed
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_VOID dllist_remove_node(PDLLIST_NODE psListNode)
+{
+	psListNode->psNextNode->psPrevNode = psListNode->psPrevNode;
+	psListNode->psPrevNode->psNextNode = psListNode->psNextNode;
+
+	/* Clear the node to show it's not on a list */
+	psListNode->psPrevNode = 0;
+	psListNode->psNextNode = 0;
+}
+
+
+/*!
+	Callback function called on each element of the list
+*/
+typedef IMG_BOOL (*PFN_NODE_CALLBACK)(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData);
+
+
+/*************************************************************************/ /*!
+@Function       dllist_foreach_node
+
+@Description    Walk through all the nodes on the list until the 
+				end or a callback returns FALSE
+
+@Input          psListHead			List node to start the operation
+@Input			pfnCallBack			PFN_NODE_CALLBACK function called on each element	
+@Input			pvCallbackData		Data passed to pfnCallBack alongside the current Node
+
+*/
+/*****************************************************************************/
+IMG_INTERNAL
+IMG_VOID dllist_foreach_node(PDLLIST_NODE psListHead,
+							  PFN_NODE_CALLBACK pfnCallBack,
+							  IMG_PVOID pvCallbackData);
+
+
+#endif	/* _DLLIST_ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/sync.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/sync.h
new file mode 100644
index 0000000..290b8cd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/sync.h
@@ -0,0 +1,399 @@
+/*************************************************************************/ /*!
+@File
+@Title          Synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the client side interface for synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "sync_external.h"
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#ifndef _SYNC_
+#define _SYNC_
+
+#if defined(__KERNEL__) && defined(ANDROID) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextCreate
+
+@Description    Create a new synchronisation context
+
+@Input          hBridge                 Bridge handle
+
+@Input          hDeviceNode             Device node handle
+
+@Output         hSyncPrimContext        Handle to the created synchronisation
+                                        primitive context
+
+@Return         PVRSRV_OK if the synchronisation primitive context was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimContextCreate(SYNC_BRIDGE_HANDLE	hBridge,
+					  IMG_HANDLE			hDeviceNode,
+					  PSYNC_PRIM_CONTEXT	*hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimContextDestroy
+
+@Description    Destroy a synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context to destroy
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimAlloc
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAlloc(PSYNC_PRIM_CONTEXT		hSyncPrimContext,
+			  PVRSRV_CLIENT_SYNC_PRIM	**ppsSync,
+			  const IMG_CHAR 			*pszClassName);
+
+#if defined(__KERNEL__)
+/*************************************************************************/ /*!
+@Function       SyncPrimAllocForServerSync
+
+@Description    Allocate a new synchronisation primitive on the specified
+                synchronisation context for a server sync
+
+@Input          hSyncPrimContext        Handle to the synchronisation
+                                        primitive context
+
+@Output         ppsSync                 Created synchronisation primitive
+
+@Input          pszClassName            Sync source annotation
+
+@Return         PVRSRV_OK if the synchronisation primitive was
+                successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT   hSyncPrimContext,
+						PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+						const IMG_CHAR          *pszClassName);
+#endif
+
+/*************************************************************************/ /*!
+@Function       SyncPrimFree
+
+@Description    Free a synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to free
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimSet
+
+@Description    Set the synchronisation primitive to a value
+
+@Input          psSync                  The synchronisation primitive to set
+
+@Input          ui32Value               Value to set it to
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+#if defined(NO_HARDWARE)
+
+/*************************************************************************/ /*!
+@Function       SyncPrimNoHwUpdate
+
+@Description    Updates the synchronisation primitive value (in NoHardware drivers)
+
+@Input          psSync                  The synchronisation primitive to update
+
+@Input          ui32Value               Value to update it to
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+#endif
+
+PVRSRV_ERROR
+SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE	hBridge,
+					IMG_HANDLE			hDeviceNode,
+					PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+					const IMG_CHAR		*pszClassName
+					PVR_DBG_FILELINE_PARAM);
+
+PVRSRV_ERROR
+SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+						PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+						IMG_UINT32 *pui32UID,
+						IMG_UINT32 *pui32FWAddr,
+						IMG_UINT32 *pui32CurrentOp,
+						IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp);
+
+IMG_BOOL
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+IMG_HANDLE
+SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+
+PVRSRV_ERROR
+SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+				 PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+				 PSYNC_OP_COOKIE *ppsCookie);
+
+PVRSRV_ERROR
+SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+			   IMG_UINT32 ui32SyncCount,
+			   PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp);
+
+PVRSRV_ERROR
+SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+				IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie);
+
+IMG_VOID
+SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie);
+
+PVRSRV_ERROR
+SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+				  IMG_UINT32 *pui32SyncCount,
+				  PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp);
+
+PVRSRV_ERROR
+SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function       SyncPrimPDump
+
+@Description    PDump the current value of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpValue
+
+@Description    PDump the ui32Value as the value of the synchronisation 
+				primitive (regardless of the current value).
+
+@Input          psSync          The synchronisation primitive to PDump
+@Input			ui32Value		Value to give to the sync prim on the pdump
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpPol
+
+@Description    Do a PDump poll of the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          ui32Value               Value to poll for 
+
+@Input          ui32Mask                PDump mask operator
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT32 ui32Value,
+				 IMG_UINT32 ui32Mask,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimOpPDumpPol
+
+@Description    Do a PDump poll all the synchronisation primitives on this
+				Operation cookie.
+
+@Input          psCookie                Operation cookie
+
+@Input          ui32PDumpFlags          PDump flags
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID
+SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function       SyncPrimPDumpCBP
+
+@Description    Do a PDump CB poll using the synchronisation primitive
+
+@Input          psSync                  The synchronisation primitive to PDump
+
+@Input          uiWriteOffset           Current write offset of buffer
+
+@Input          uiPacketSize            Size of the packet to write into CB
+
+@Input          uiBufferSize            Size of the CB
+
+@Return         None
+*/
+/*****************************************************************************/
+IMG_VOID 
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT64 uiWriteOffset,
+				 IMG_UINT64 uiPacketSize,
+				 IMG_UINT64 uiBufferSize);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpValue)
+#endif
+static INLINE IMG_VOID
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDump)
+#endif
+static INLINE IMG_VOID
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpPol)
+#endif
+static INLINE IMG_VOID
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT32 ui32Value,
+				 IMG_UINT32 ui32Mask,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(ui32Value);
+	PVR_UNREFERENCED_PARAMETER(ui32Mask);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimServerPDumpPol)
+#endif
+static INLINE IMG_VOID
+SyncPrimServerPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 PDUMP_POLL_OPERATOR eOperator,
+				 IMG_UINT32 ui32PDumpFlags)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(eOperator);
+	PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpCBP)
+#endif
+static INLINE IMG_VOID 
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+				 IMG_UINT64 uiWriteOffset,
+				 IMG_UINT64 uiPacketSize,
+				 IMG_UINT64 uiBufferSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psSync);
+	PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+	PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+	PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif	/* PDUMP */
+#endif	/* _PVRSRV_SYNC_ */
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/sync_internal.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/sync_internal.h
new file mode 100644
index 0000000..eecab0f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/sync_internal.h
@@ -0,0 +1,121 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services internal synchronisation interface header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Defines the internal client side interface for services
+                synchronisation
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_INTERNAL_
+#define _SYNC_INTERNAL_
+
+#include "img_types.h"
+#include "sync_external.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+
+/*
+	Private structure's
+*/
+#define SYNC_PRIM_NAME_SIZE		50
+typedef struct SYNC_PRIM_CONTEXT
+{
+	SYNC_BRIDGE_HANDLE			hBridge;						/*!< Bridge handle */
+	IMG_HANDLE					hDeviceNode;					/*!< The device we're operating on */
+	IMG_CHAR					azName[SYNC_PRIM_NAME_SIZE];	/*!< Name of the RA */
+	RA_ARENA					*psSubAllocRA;					/*!< RA context */
+	IMG_CHAR					azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */
+	RA_ARENA					*psSpanRA;						/*!< RA used for span management of SubAllocRA */
+	IMG_UINT32					ui32RefCount;					/*!< Refcount for this context */
+	POS_LOCK					hLock;							/*!< Lock for this context */
+} SYNC_PRIM_CONTEXT;
+
+typedef struct _SYNC_PRIM_BLOCK_
+{
+	SYNC_PRIM_CONTEXT	*psContext;				/*!< Our copy of the services connection */
+	IMG_HANDLE			hServerSyncPrimBlock;	/*!< Server handle for this block */
+	IMG_UINT32			ui32SyncBlockSize;		/*!< Size of the sync prim block */
+	IMG_UINT32			ui32FirmwareAddr;		/*!< Firmware address */
+	DEVMEM_MEMDESC		*hMemDesc;				/*!< Host mapping handle */
+	IMG_UINT32			*pui32LinAddr;			/*!< User CPU mapping */
+	IMG_UINT64			uiSpanBase;				/*!< Base of this import in the span RA */
+	DLLIST_NODE			sListNode;				/*!< List node for the sync block list */
+} SYNC_PRIM_BLOCK;
+
+typedef enum _SYNC_PRIM_TYPE_
+{
+	SYNC_PRIM_TYPE_UNKNOWN = 0,
+	SYNC_PRIM_TYPE_LOCAL,
+	SYNC_PRIM_TYPE_SERVER,
+} SYNC_PRIM_TYPE;
+
+typedef struct _SYNC_PRIM_LOCAL_
+{
+	ATOMIC_T				hRefCount;	/*!< Ref count for this sync */
+	SYNC_PRIM_BLOCK			*psSyncBlock;	/*!< Synchronisation block this primitive is allocated on */
+	IMG_UINT64				uiSpanAddr;		/*!< Span address of the sync */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+	IMG_HANDLE				hRecord;		/*!< Sync record handle */
+#endif
+} SYNC_PRIM_LOCAL;
+
+typedef struct _SYNC_PRIM_SERVER_
+{
+	SYNC_BRIDGE_HANDLE		hBridge;			/*!< Bridge handle */
+	IMG_HANDLE				hServerSync;		/*!< Handle to the server sync */
+	IMG_UINT32				ui32FirmwareAddr;	/*!< Firmware address of the sync */
+} SYNC_PRIM_SERVER;
+
+typedef struct _SYNC_PRIM_
+{
+	PVRSRV_CLIENT_SYNC_PRIM	sCommon;		/*!< Client visible part of the sync prim */
+	SYNC_PRIM_TYPE			eType;			/*!< Sync primative type */
+	union {
+		SYNC_PRIM_LOCAL		sLocal;			/*!< Local sync primative data */
+		SYNC_PRIM_SERVER	sServer;		/*!< Server sync primative data */
+	} u;
+} SYNC_PRIM;
+
+
+/* FIXME this must return a correctly typed pointer */
+IMG_INTERNAL IMG_UINT32 SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+#endif	/* _SYNC_INTERNAL_ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/tlclient.h b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/tlclient.h
new file mode 100644
index 0000000..3a81ef6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/shared/include/tlclient.h
@@ -0,0 +1,169 @@
+/*************************************************************************/ /*!
+@File           tlclient.h
+@Title          Services Transport Layer shared API
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Transport layer common API used in both clients and server
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+ 
+#ifndef TLCLIENT_H_
+#define TLCLIENT_H_
+
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "services.h"
+
+#include "pvr_tlcommon.h"
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientConnect
+ @Description	Initialise direct connection to Services kernel server
+                transport layer
+ @Output		phSrvHandle	Address of a pointer to a connection object
+ @Return        PVRSRV_ERROR:	for system error codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientConnect(IMG_HANDLE* phSrvHandle);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientDisconnect
+ @Description	Disconnect from the direct connected Services kernel server
+                transport layer
+ @Input			hSrvHandle	Handle to connection object as returned from
+ 	 	 	 	 	 	 	TLClientConnect()
+ @Return        PVRSRV_ERROR:	for system error codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDisconnect(IMG_HANDLE hSrvHandle);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientOpenStream
+ @Description	Open a descriptor onto an existing kernel transport stream.
+ @Input			hSrvHandle    	Address of a pointer to a connection object
+ @Input			pszName			Address of the stream name string, no longer
+ 	 	 	 	 	 	 	 	than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input			ui32Mode		Unused
+ @Output		phSD			Address of a pointer to an stream object
+ @Return 		PVRSRV_ERROR_NOT_FOUND:        when named stream not found
+ @Return		PVRSRV_ERROR_ALREADY_OPEN:     stream already open by another
+ @Return		PVRSRV_ERROR_STREAM_ERROR:     internal driver state error
+ @Return        PVRSRV_ERROR_TIMEOUT:          block timed out, stream not found
+ @Return		PVRSRV_ERROR:			       for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+		IMG_PCHAR    pszName,
+		IMG_UINT32   ui32Mode,
+		IMG_HANDLE*  phSD);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientCloseStream
+ @Description	Close and release the stream connection to Services kernel
+				server transport layer. Any outstanding Acquire will be
+				released.
+ @Input			hSrvHandle      Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to close
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle is not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	  internal driver state error
+ @Return		PVRSRV_ERROR:				  for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientAcquireData
+ @Description	When there is data available in the stream buffer this call
+ 	 	 	 	returns with the address and length of the data buffer the
+ 	 	 	 	client can safely read. This buffer may contain one or more
+ 	 	 	 	packets of data.
+ 	 	 	 	If no data is available then this call blocks until it becomes
+ 	 	 	 	available. However if the stream has been destroyed while
+ 	 	 	 	waiting then a resource unavailable error will be returned
+ 	 	 	 	to the caller. Clients must pair this call with a
+ 	 	 	 	ReleaseData call.
+ @Input			hSrvHandle  	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Output		ppPacketBuf		Address of a pointer to an byte buffer. On exit
+								pointer contains address of buffer to read from
+ @Output		puiBufLen		Pointer to an integer. On exit it is the size
+								of the data to read from the packet buffer
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:     when SD handle not known
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	       internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				   release not called beforehand
+ @Return        PVRSRV_ERROR_TIMEOUT:              block timed out, no data
+ @Return		PVRSRV_ERROR:					   for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE  hSD,
+		IMG_PBYTE*  ppPacketBuf,
+		IMG_UINT32* puiBufLen);
+
+
+/**************************************************************************/ /*!
+ @Function		TLClientReleaseData
+ @Description	Called after client has read the stream data out of the buffer
+ 	 	 	 	The data is subsequently flushed from the stream buffer to make
+ 	 	 	 	room for more data packets from the stream source.
+ @Input			hSrvHandle  	Address of a pointer to a connection object
+ @Input			hSD				Handle of the stream object to read
+ @Return		PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return		PVRSRV_ERROR_HANDLE_NOT_FOUND:   when SD handle not known to TL
+ @Return		PVRSRV_ERROR_STREAM_ERROR: 	     internal driver state error
+ @Return		PVRSRV_ERROR_RETRY:				 acquire not called beforehand
+ @Return		PVRSRV_ERROR:	                 for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+		IMG_HANDLE hSD);
+
+
+
+#endif /* TLCLIENT_H_ */
+
+/******************************************************************************
+ End of file (tlclient.h)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/common/env/linux/ion_support_generic.c b/drivers/external_drivers/intel_media/graphics/rgx/services/system/common/env/linux/ion_support_generic.c
new file mode 100644
index 0000000..277c3b5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/common/env/linux/ion_support_generic.c
@@ -0,0 +1,161 @@
+/*************************************************************************/ /*!
+@File           ion_support.c
+@Title          Generic Ion support
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file does the Ion initialisation and De-initialistion for
+                systems that don't already have Ion.
+                For systems that do have Ion it's expected they they init Ion
+                as per their requirements and then implement IonDevAcquire and
+                IonDevRelease which provides access to the ion device.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "ion_support.h"
+#include "ion_sys.h"
+
+#include <linux/version.h>
+#include PVR_ANDROID_ION_HEADER
+#include PVR_ANDROID_ION_PRIV_HEADER
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* Just the system heaps are used by the generic implementation */
+static struct ion_platform_data generic_config = {
+	.nr = 2,
+	.heaps =
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,39))
+#else
+		(struct ion_platform_heap [])
+#endif
+		{
+			{
+				.type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+				.name = "system_contig",
+				.id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+			},
+			{
+				.type = ION_HEAP_TYPE_SYSTEM,
+				.name = "system",
+				.id = ION_HEAP_TYPE_SYSTEM,
+			}
+		}
+};
+
+struct ion_heap **g_apsIonHeaps;
+struct ion_device *g_psIonDev;
+
+PVRSRV_ERROR IonInit(void *phPrivateData)
+{
+	int uiHeapCount = generic_config.nr;
+	int uiError;
+	int i;
+
+	PVR_UNREFERENCED_PARAMETER(phPrivateData);
+
+	g_apsIonHeaps = kzalloc(sizeof(struct ion_heap *) * uiHeapCount, GFP_KERNEL);
+
+	/* Create the ion devicenode */
+	g_psIonDev = ion_device_create(NULL);
+	if (IS_ERR_OR_NULL(g_psIonDev)) {
+		kfree(g_apsIonHeaps);
+		return PVRSRV_ERROR_OUT_OF_MEMORY;
+	}
+
+	/* Register all the heaps */
+	for (i = 0; i < generic_config.nr; i++)
+	{
+		struct ion_platform_heap *psPlatHeapData = &generic_config.heaps[i];
+
+		g_apsIonHeaps[i] = ion_heap_create(psPlatHeapData);
+		if (IS_ERR_OR_NULL(g_apsIonHeaps[i]))
+		{
+			uiError = PTR_ERR(g_apsIonHeaps[i]);
+			goto failHeapCreate;
+		}
+		ion_device_add_heap(g_psIonDev, g_apsIonHeaps[i]);
+	}
+
+	return PVRSRV_OK;
+
+failHeapCreate:
+	for (i = 0; i < uiHeapCount; i++) {
+		if (g_apsIonHeaps[i])
+		{
+				ion_heap_destroy(g_apsIonHeaps[i]);
+		}
+	}
+	kfree(g_apsIonHeaps);
+	ion_device_destroy(g_psIonDev);
+
+	return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+struct ion_device *IonDevAcquire(IMG_VOID)
+{
+	return g_psIonDev;
+}
+
+IMG_VOID IonDevRelease(struct ion_device *psIonDev)
+{
+	/* Nothing to do, sanity check the pointer we're passed back */
+	PVR_ASSERT(psIonDev == g_psIonDev);
+}
+
+IMG_UINT32 IonPhysHeapID(IMG_VOID)
+{
+	return 0;
+}
+
+IMG_VOID IonDeinit(IMG_VOID)
+{
+	int uiHeapCount = generic_config.nr;
+	int i;
+
+	for (i = 0; i < uiHeapCount; i++) {
+		if (g_apsIonHeaps[i])
+		{
+				ion_heap_destroy(g_apsIonHeaps[i]);
+		}
+	}
+	kfree(g_apsIonHeaps);
+	ion_device_destroy(g_psIonDev);
+}
+
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/common/env/linux/pci_support.c b/drivers/external_drivers/intel_media/graphics/rgx/services/system/common/env/linux/pci_support.c
new file mode 100644
index 0000000..5c03669
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/common/env/linux/pci_support.c
@@ -0,0 +1,658 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <asm/mtrr.h>
+
+#include "pci_support.h"
+#include "allocmem.h"
+
+typedef	struct _PVR_PCI_DEV_TAG
+{
+	struct pci_dev		*psPCIDev;
+	HOST_PCI_INIT_FLAGS	ePCIFlags;
+	IMG_BOOL		abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+} PVR_PCI_DEV;
+
+/*************************************************************************/ /*!
+@Function       OSPCISetDev
+@Description    Set a PCI device for subsequent use.
+@Input          pvPCICookie             Pointer to OS specific PCI structure
+@Input          eFlags                  Flags
+@Return		PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+{
+	int err;
+	IMG_UINT32 i;
+	PVR_PCI_DEV *psPVRPCI;
+
+	psPVRPCI = OSAllocMem(sizeof(*psPVRPCI));
+	if (psPVRPCI == IMG_NULL)
+	{
+		printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n");
+		return IMG_NULL;
+	}
+
+	psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
+	psPVRPCI->ePCIFlags = eFlags;
+
+	err = pci_enable_device(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err);
+		OSFreeMem(psPVRPCI);
+		return IMG_NULL;
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_set_master(psPVRPCI->psPCIDev);
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)		/* PRQA S 3358 */ /* misuse of enums */
+	{
+#if defined(CONFIG_PCI_MSI)
+		err = pci_enable_msi(psPVRPCI->psPCIDev);
+		if (err != 0)
+		{
+			printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err);
+			psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;	/* PRQA S 1474,3358,4130 */ /* misuse of enums */
+		}
+#else
+		printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel");
+#endif
+}
+
+	/* Initialise the PCI resource tracking array */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+	}
+
+	return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAcquireDev
+@Description    Acquire a PCI device for subsequent use.
+@Input          ui16VendorID            Vendor PCI ID
+@Input          ui16DeviceID            Device PCI ID
+@Input          eFlags                  Flags
+@Return		PVRSRV_PCI_DEV_HANDLE   Pointer to PCI device handle
+*/ /**************************************************************************/
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, 
+				      IMG_UINT16 ui16DeviceID, 
+				      HOST_PCI_INIT_FLAGS eFlags)
+{
+	struct pci_dev *psPCIDev;
+
+	psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
+	if (psPCIDev == NULL)
+	{
+		return IMG_NULL;
+	}
+
+	return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIDevID
+@Description    Get the PCI device ID.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16DeviceID           Pointer to where the device ID should 
+                                        be returned
+@Return		PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIDevID(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16DeviceID)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (pui16DeviceID == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui16DeviceID = psPVRPCI->psPCIDev->device;
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIIRQ
+@Description    Get the interrupt number for the device.
+@Input          hPVRPCI                 PCI device handle
+@Output         pui16DeviceID           Pointer to where the interrupt number 
+                                        should be returned
+@Return		PVRSRV_ERROR            Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (pui32IRQ == IMG_NULL)
+	{
+		return PVRSRV_ERROR_INVALID_PARAMS;
+	}
+
+	*pui32IRQ = psPVRPCI->psPCIDev->irq;
+
+	return PVRSRV_OK;
+}
+
+/* Functions supported by OSPCIAddrRangeFunc */
+enum HOST_PCI_ADDR_RANGE_FUNC
+{
+	HOST_PCI_ADDR_RANGE_FUNC_LEN,
+	HOST_PCI_ADDR_RANGE_FUNC_START,
+	HOST_PCI_ADDR_RANGE_FUNC_END,
+	HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
+	HOST_PCI_ADDR_RANGE_FUNC_RELEASE
+};
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeFunc
+@Description    Internal support function for various address range related 
+                functions
+@Input          eFunc                   Function to perform
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Function dependent value
+*/ /**************************************************************************/
+static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+										 PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+										 IMG_UINT32 ui32Index)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+
+	if (ui32Index >= DEVICE_COUNT_RESOURCE)
+	{
+		printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range");
+		return 0;
+	}
+
+	switch (eFunc)
+	{
+		case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+		{
+			return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_START:
+		{
+			return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_END:
+		{
+			return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+		{
+			int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME);
+			if (err != 0)
+			{
+				printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err);
+				return 0;
+			}
+			psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+			return 1;
+		}
+		case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+		{
+			if (psPVRPCI->abPCIResourceInUse[ui32Index])
+			{
+				pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
+				psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+			}
+			return 1;
+		}
+		default:
+		{
+			printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function");
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeLen
+@Description    Returns length of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Length of address range or 0 if no 
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeStart
+@Description    Returns the start of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              Start of address range or 0 if no 
+                                        such range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); 
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIAddrRangeEnd
+@Description    Returns the end of a given address range
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return		IMG_UINT32              End of address range or 0 if no such
+                                        range
+*/ /**************************************************************************/
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); 
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRange
+@Description    Request a given address range index for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+								   IMG_UINT32 ui32Index)
+{
+	if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRange
+@Description    Release a given address range that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0)
+	{
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+	else
+	{
+		return PVRSRV_OK;
+	}
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIRequestAddrRegion
+@Description    Request a given region from an address range for subsequent use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          uiOffset              Offset into the address range that forms 
+                                        the start of the region
+@Input          uiLength              Length of the region
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+									IMG_UINT32 ui32Index,
+									IMG_UINT64 uiOffset,
+									IMG_UINT64 uiLength)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start;
+	resource_size_t end;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+	/* Check that the requested region is valid */
+	if ((start + uiOffset + uiLength - 1) > end)
+	{
+		return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+	}
+
+	if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+	{
+		if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+		{
+			return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		}
+	}
+	else
+	{
+		if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL)
+		{
+			return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseAddrRegion
+@Description    Release a given region, from an address range, that is no 
+                longer in use
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Input          ui32Offset              Offset into the address range that forms 
+                                        the start of the region
+@Input          ui32Length              Length of the region
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
+									IMG_UINT32 ui32Index,
+									IMG_UINT64 uiOffset,
+									IMG_UINT64 uiLength)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start;
+	resource_size_t end;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+
+	/* Check that the region is valid */
+	if ((start + uiOffset + uiLength - 1) > end)
+	{
+		return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH;
+	}
+
+	if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO)
+	{
+		release_region(start + uiOffset, uiLength);
+	}
+	else
+	{
+		release_mem_region(start + uiOffset, uiLength);
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIReleaseDev
+@Description    Release a PCI device that is no longer being used
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int i;
+
+	/* Release all PCI regions that are currently in use */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			pci_release_region(psPVRPCI->psPCIDev, i);
+			psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+		}
+	}
+
+#if defined(CONFIG_PCI_MSI)
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)		/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_disable_msi(psPVRPCI->psPCIDev);
+	}
+#endif
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+	{
+		pci_clear_master(psPVRPCI->psPCIDev);
+	}
+
+	pci_disable_device(psPVRPCI->psPCIDev);
+
+	OSFreeMem((IMG_VOID *)psPVRPCI);
+	/*not nulling pointer, copy on stack*/
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCISuspendDev
+@Description    Prepare PCI device to be turned off by power management
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int i;
+	int err;
+
+	/* Release all PCI regions that are currently in use */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			pci_release_region(psPVRPCI->psPCIDev, i);
+		}
+	}
+
+	err = pci_save_state(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	pci_disable_device(psPVRPCI->psPCIDev);
+
+	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
+	switch(err)
+	{
+		case 0:
+			break;
+		case -EIO:
+			printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM");
+			break;
+		case -EINVAL:
+			printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state");
+			break;
+		default:
+			printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err);
+			break;
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIResumeDev
+@Description    Prepare a PCI device to be resumed by power management
+@Input          hPVRPCI                 PCI device handle
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	int err;
+	int i;
+
+	err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
+	switch(err)
+	{
+		case 0:
+			break;
+		case -EIO:
+			printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM");
+			break;
+		case -EINVAL:
+			printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state");
+			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+		default:
+			printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err);
+			return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
+	pci_restore_state(psPVRPCI->psPCIDev);
+#else
+	err = pci_restore_state(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCIResumeDev: pci_restore_state failed (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+#endif
+	err = pci_enable_device(psPVRPCI->psPCIDev);
+	if (err != 0)
+	{
+		printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)	/* PRQA S 3358 */ /* misuse of enums */
+		pci_set_master(psPVRPCI->psPCIDev);
+
+	/* Restore the PCI resource tracking array */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+	{
+		if (psPVRPCI->abPCIResourceInUse[i])
+		{
+			err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME);
+			if (err != 0)
+			{
+				printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err);
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function       OSPCIClearResourceMTRRs
+@Description    Clear any BIOS-configured MTRRs for a PCI memory region
+@Input          hPVRPCI                 PCI device handle
+@Input          ui32Index               Address range index
+@Return	        PVRSRV_ERROR	        Services error code
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+{
+	PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+	resource_size_t start, end;
+	int err;
+
+	start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+	end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1;
+
+	err = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0);
+	if (err < 0)
+	{
+		printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	err = mtrr_del(err, start, end - start);
+	if (err < 0)
+	{
+		printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", err);
+		return PVRSRV_ERROR_PCI_CALL_FAILED;
+	}
+
+	/* Workaround for overlapping MTRRs. */
+	{
+		IMG_BOOL bGotMTRR0 = IMG_FALSE;
+
+		/* Current mobo BIOSes will normally set up a WRBACK MTRR spanning
+		 * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic &
+		 * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour.
+		 *
+		 * WRBACK is incompatible with some PCI devices, so try to split
+		 * the UNCACHABLE regions up and insert a WRCOMB region instead.
+		 */
+		err = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0);
+		if (err < 0)
+		{
+			/* If this fails, services has probably run before and created
+			 * a write-combined MTRR for the test chip. Assume it has, and
+			 * don't return an error here.
+			 */
+			return PVRSRV_OK;
+		}
+
+		if(err == 0)
+			bGotMTRR0 = IMG_TRUE;
+
+		err = mtrr_del(err, start, end - start);
+		if(err < 0)
+		{
+			printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", err);
+			return PVRSRV_ERROR_PCI_CALL_FAILED;
+		}
+
+		if(bGotMTRR0)
+		{
+			/* Replace 0 with a non-overlapping WRBACK MTRR */
+			err = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0);
+			if(err < 0)
+			{
+				printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err);
+				return PVRSRV_ERROR_PCI_CALL_FAILED;
+			}
+
+			/* Add a WRCOMB MTRR for the PCI device memory bar */
+			err = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0);
+			if(err < 0)
+			{
+				printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err);
+				return PVRSRV_ERROR_PCI_CALL_FAILED;
+			}
+		}
+	}
+
+	return PVRSRV_OK;
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/ion_support.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/ion_support.h
new file mode 100644
index 0000000..341038d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/ion_support.h
@@ -0,0 +1,46 @@
+/*************************************************************************/ /*!
+@File           ion_support.h
+@Title          Generic Ion support header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This file defines the API for generic Ion support.
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR IonInit(void *pvPrivateData);
+
+IMG_VOID IonDeinit(IMG_VOID);
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/pci_support.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/pci_support.h
new file mode 100644
index 0000000..57da29b1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/pci_support.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PCI_SUPPORT_H__
+#define __PCI_SUPPORT_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef enum _HOST_PCI_INIT_FLAGS_
+{
+	HOST_PCI_INIT_FLAG_BUS_MASTER	= 0x00000001,
+	HOST_PCI_INIT_FLAG_MSI		= 0x00000002,
+	HOST_PCI_INIT_FLAG_FORCE_I32 	= 0x7fffffff
+} HOST_PCI_INIT_FLAGS;
+
+struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
+typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
+
+PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIDevID(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16DeviceID);
+PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
+IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength);
+PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
+PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
+
+#endif /* __PCI_SUPPORT_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/syscommon.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/syscommon.h
new file mode 100644
index 0000000..cfc4b82
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/include/syscommon.h
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title          Common System APIs and structures
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides common system-specific declarations and macros
+                that are supported by all systems
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYSCOMMON_H
+#define _SYSCOMMON_H
+
+#include "osfunc.h"
+
+#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)
+#include <asm/io.h>
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+#include "pvrsrv.h"
+
+PVRSRV_ERROR SysCreateConfigData(PVRSRV_SYSTEM_CONFIG **ppsSysConfig, void *hDevice);
+IMG_VOID SysDestroyConfigData(PVRSRV_SYSTEM_CONFIG *psSysConfig);
+PVRSRV_ERROR SysAcquireSystemData(IMG_HANDLE hSysData);
+PVRSRV_ERROR SysReleaseSystemData(IMG_HANDLE hSysData);
+PVRSRV_ERROR SysDebugInfo(PVRSRV_SYSTEM_CONFIG *psSysConfig, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "services.h"
+IMG_VOID SysSetOSidRegisters(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_OS][GPUVIRT_VALIDATION_NUM_REGIONS]);
+IMG_VOID SysPrintAndResetFaultStatusRegister(void);
+#endif
+
+#if defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING)
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_UINT32 ui32IRQ,
+				  IMG_CHAR *pszName,
+				  PFN_LISR pfnLISR,
+				  IMG_PVOID pvData,
+				  IMG_HANDLE *phLISRData);
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+#if defined(SUPPORT_DRM)
+IMG_BOOL SystemISRHandler(IMG_VOID *pvData);
+#endif
+#endif /* defined(SUPPORT_SYSTEM_INTERRUPT_HANDLING) */
+
+
+/*
+ * SysReadHWReg and SysWriteHWReg differ from OSReadHWReg and OSWriteHWReg
+ * in that they are always intended for use with real hardware, even on
+ * NO_HARDWARE systems.
+ */
+#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__))
+#define	SysReadHWReg(p, o) OSReadHWReg(p, o)
+#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
+#else	/* !(defined(NO_HARDWARE) && defined(__linux__)) */
+/*!
+******************************************************************************
+
+ @Function	SysReadHWReg
+
+ @Description
+
+ register read function
+
+ @input pvLinRegBaseAddr :	lin addr of register block base
+
+ @input ui32Offset :
+
+ @Return   register value
+
+******************************************************************************/
+static inline IMG_UINT32 SysReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
+{
+	return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset);
+}
+
+/*!
+******************************************************************************
+
+ @Function	SysWriteHWReg
+
+ @Description
+
+ register write function
+
+ @input pvLinRegBaseAddr :	lin addr of register block base
+
+ @input ui32Offset :
+
+ @input ui32Value :
+
+ @Return   none
+
+******************************************************************************/
+static inline IMG_VOID SysWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+	writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
+}
+#endif	/* !(defined(NO_HARDWARE) && defined(__linux__)) */
+
+/*!
+******************************************************************************
+
+ @Function		SysCheckMemAllocSize
+
+ @Description	Function to apply memory budgeting policies
+
+ @input			psDevNode
+
+ @input			uiChunkSize
+
+ @input			ui32NumPhysChunks
+
+ @Return		PVRSRV_ERROR
+
+******************************************************************************/
+FORCE_INLINE PVRSRV_ERROR SysCheckMemAllocSize(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+												IMG_UINT64 ui64MemSize)
+{
+	PVR_UNREFERENCED_PARAMETER(psDevNode);
+	PVR_UNREFERENCED_PARAMETER(ui64MemSize);
+
+	return PVRSRV_OK;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
+
+/*****************************************************************************
+ End of file (syscommon.h)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video.c b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video.c
new file mode 100644
index 0000000..ecdae2e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video.c
@@ -0,0 +1,177 @@
+/*************************************************************************/ /*!                                                                                                               
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved                                                                                                               
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/string.h>
+
+#include "bufferclass_video.h"
+#include "bufferclass_interface.h"
+
+#define VBUFFERCLASS_DEVICE_NAME "Video Bufferclass Device"
+#define CBUFFERCLASS_DEVICE_NAME "Camera Bufferclass Device"
+
+static void *gpvAnchorVideo[BC_VIDEO_DEVICE_MAX_ID];
+
+static void *gpcAnchor;
+
+BC_VIDEO_DEVINFO *
+GetAnchorPtr(int id)
+{
+	BC_VIDEO_DEVINFO *AnchorPtr = NULL;
+	if (id < BC_VIDEO_DEVICE_MAX_ID)
+		AnchorPtr = gpvAnchorVideo[id];
+	else if (id == BC_CAMERA_DEVICEID)
+		AnchorPtr = gpcAnchor;
+	return AnchorPtr;
+}
+
+static void
+SetAnchorPtr(BC_VIDEO_DEVINFO * psDevInfo, int id)
+{
+	if (id < BC_VIDEO_DEVICE_MAX_ID)
+		gpvAnchorVideo[id] = (void *) psDevInfo;
+	else if (id == BC_CAMERA_DEVICEID)
+		gpcAnchor = (void *) psDevInfo;
+}
+
+BCE_ERROR
+BC_Video_Register(int id)
+{
+	BC_VIDEO_DEVINFO *psDevInfo;
+
+	psDevInfo = GetAnchorPtr(id);
+
+	if (psDevInfo == NULL) {
+		psDevInfo =
+			(BC_VIDEO_DEVINFO *) BCAllocKernelMem(sizeof(BC_VIDEO_DEVINFO));
+
+		if (!psDevInfo) {
+			return (BCE_ERROR_OUT_OF_MEMORY);
+		}
+
+		SetAnchorPtr((void *) psDevInfo, id);
+
+		psDevInfo->ulRefCount = 0;
+
+		if (BCOpenPVRServices(&psDevInfo->hPVRServices) != BCE_OK) {
+			return (BCE_ERROR_INIT_FAILURE);
+		}
+
+		psDevInfo->ulNumBuffers = 0;
+
+		psDevInfo->sBufferInfo.eIMGPixFmt = IMG_PIXFMT_UNKNOWN;
+		psDevInfo->sBufferInfo.ui32Width = 0;
+		psDevInfo->sBufferInfo.ui32Height = 0;
+		psDevInfo->sBufferInfo.ui32ByteStride = 0;
+		psDevInfo->sBufferInfo.ui32BufferDeviceID = id;
+		psDevInfo->sBufferInfo.ui32Flags = 0;
+		psDevInfo->sBufferInfo.ui32BufferCount =
+			(IMG_UINT32) psDevInfo->ulNumBuffers;
+
+		if (id < BC_VIDEO_DEVICE_MAX_ID) {
+			strncpy(psDevInfo->sBufferInfo.szDeviceName,
+				VBUFFERCLASS_DEVICE_NAME, MAX_BUFFER_DEVICE_NAME_SIZE);
+		} else if (id == BC_CAMERA_DEVICEID) {
+			strncpy(psDevInfo->sBufferInfo.szDeviceName,
+				CBUFFERCLASS_DEVICE_NAME, MAX_BUFFER_DEVICE_NAME_SIZE);
+		}
+	}
+
+	psDevInfo->ulRefCount++;
+
+	return (BCE_OK);
+}
+
+BCE_ERROR
+BC_Video_Unregister(int id)
+{
+	BC_VIDEO_DEVINFO *psDevInfo;
+
+	psDevInfo = GetAnchorPtr(id);
+
+	if (psDevInfo == NULL) {
+		return (BCE_ERROR_GENERIC);
+	}
+
+	psDevInfo->ulRefCount--;
+
+	if (psDevInfo->ulRefCount == 0) {
+		if (BCClosePVRServices(psDevInfo->hPVRServices) != BCE_OK) {
+			psDevInfo->hPVRServices = NULL;
+			return (BCE_ERROR_GENERIC);
+		}
+
+		if (psDevInfo->psSystemBuffer) {
+			BCFreeKernelMem(psDevInfo->psSystemBuffer);
+		}
+
+		BCFreeKernelMem(psDevInfo);
+
+		SetAnchorPtr(NULL, id);
+	}
+	return (BCE_OK);
+}
+
+BCE_ERROR
+BC_Video_Init(int id)
+{
+	BCE_ERROR eError;
+
+	eError = BC_Video_Register(id);
+	if (eError != BCE_OK) {
+		return eError;
+	}
+
+	return (BCE_OK);
+}
+
+BCE_ERROR
+BC_Video_Deinit(int id)
+{
+	BCE_ERROR eError;
+
+	BCVideoDestroyBuffers(id);
+
+	eError = BC_Video_Unregister(id);
+	if (eError != BCE_OK) {
+		return eError;
+	}
+
+	return (BCE_OK);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video.h
new file mode 100644
index 0000000..ece1729
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video.h
@@ -0,0 +1,183 @@
+/*************************************************************************/ /*!                                                                                                               
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved                                                                                                               
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __BC_VIDEO_H__
+#define __BC_VIDEO_H__
+
+#include "img_defs.h"
+#include "bufferclass_interface.h"
+#include "imgpixfmts_km.h"
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#define MAX_BUFFER_DEVICE_NAME_SIZE	(50) /*!< Max size of the buffer device name */
+
+/*! buffer information structure */
+typedef struct BUFFER_INFO_TAG
+{
+	unsigned int	ui32BufferCount;     /*!< Number of supported buffers */
+	unsigned int	ui32BufferDeviceID;  /*!< DeviceID assigned by Services*/
+	IMG_PIXFMT		eIMGPixFmt;          /*!< Pixel format of the buffer */
+	unsigned int	ui32ByteStride;      /*!< Byte stride of the buffer */
+	unsigned int	ui32Width;           /*!< Width of the buffer, in pixels */
+	unsigned int	ui32Height;          /*!< Height of the buffer, in pixels */
+	unsigned int	ui32Flags;           /*!< Flags */
+	char			szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE];  /*!< Name of the device */
+} BUFFER_INFO;
+
+enum BC_memory
+{
+	BC_MEMORY_MMAP = 1,
+	BC_MEMORY_USERPTR = 2,
+};
+
+/*
+ * the following types are tested for fourcc in struct bc_buf_params_t
+ *   NV12
+ *   UYVY
+ *   RGB565 - not tested yet
+ *   YUYV
+ */
+typedef struct bc_buf_params {
+	int count;            /*number of buffers, [in/out] */
+	int width;            /*buffer width in pixel, multiple of 8 or 32 */
+	int height;            /*buffer height in pixel */
+	int stride;
+	unsigned int fourcc;    /*buffer pixel format */
+	enum BC_memory type;
+} bc_buf_params_t;
+
+#define BC_VIDEO_DEVICE_MAX_ID          5
+#define BC_CAMERA_DEVICEID              8
+
+typedef void *BCE_HANDLE;
+
+typedef enum tag_bce_bool {
+	BCE_FALSE = 0,
+	BCE_TRUE = 1,
+} BCE_BOOL, *BCE_PBOOL;
+
+typedef struct BC_VIDEO_BUFFER_TAG {
+	unsigned long ulSize;
+	BCE_HANDLE hMemHandle;
+
+	IMG_SYS_PHYADDR *psSysAddr;
+
+	IMG_CPU_VIRTADDR sCPUVAddr;
+
+	struct BC_VIDEO_BUFFER_TAG *psNext;
+	int sBufferHandle;
+	IMG_BOOL is_conti_addr;
+} BC_VIDEO_BUFFER;
+
+typedef struct BC_VIDEO_DEVINFO_TAG {
+	IMG_UINT32 ulDeviceID;
+
+	BC_VIDEO_BUFFER *psSystemBuffer;
+
+	unsigned long ulNumBuffers;
+
+	BCE_HANDLE hPVRServices;
+
+	unsigned long ulRefCount;
+
+	BUFFER_INFO sBufferInfo;
+	enum BC_memory buf_type;
+} BC_VIDEO_DEVINFO;
+
+typedef enum _BCE_ERROR_ {
+	BCE_OK = 0,
+	BCE_ERROR_GENERIC = 1,
+	BCE_ERROR_OUT_OF_MEMORY = 2,
+	BCE_ERROR_TOO_FEW_BUFFERS = 3,
+	BCE_ERROR_INVALID_PARAMS = 4,
+	BCE_ERROR_INIT_FAILURE = 5,
+	BCE_ERROR_CANT_REGISTER_CALLBACK = 6,
+	BCE_ERROR_INVALID_DEVICE = 7,
+	BCE_ERROR_DEVICE_REGISTER_FAILED = 8,
+	BCE_ERROR_NO_PRIMARY = 9
+} BCE_ERROR;
+
+#ifndef UNREFERENCED_PARAMETER
+#define UNREFERENCED_PARAMETER(param) (param) = (param)
+#endif
+
+BCE_ERROR BC_Video_Register(int id);
+BCE_ERROR BC_Video_Unregister(int id);
+BCE_ERROR BC_Video_Buffers_Create(int id);
+BCE_ERROR BC_Video_Buffers_Destroy(int id);
+BCE_ERROR BC_Video_Init(int id);
+BCE_ERROR BC_Video_Deinit(int id);
+
+BCE_ERROR BCOpenPVRServices(BCE_HANDLE * phPVRServices);
+BCE_ERROR BCClosePVRServices(BCE_HANDLE hPVRServices);
+
+void *BCAllocKernelMem(unsigned long ulSize);
+void BCFreeKernelMem(void *pvMem);
+
+BCE_ERROR BCAllocDiscontigMemory(unsigned long ulSize,
+				 BCE_HANDLE unref__ * phMemHandle,
+				 IMG_CPU_VIRTADDR * pLinAddr,
+				 IMG_SYS_PHYADDR ** ppPhysAddr);
+
+void BCFreeDiscontigMemory(unsigned long ulSize,
+			   BCE_HANDLE unref__ hMemHandle,
+			   IMG_CPU_VIRTADDR LinAddr,
+			   IMG_SYS_PHYADDR * pPhysAddr);
+
+IMG_SYS_PHYADDR CpuPAddrToSysPAddrBC(IMG_CPU_PHYADDR cpu_paddr);
+IMG_CPU_PHYADDR SysPAddrToCpuPAddrBC(IMG_SYS_PHYADDR sys_paddr);
+
+void *MapPhysAddr(IMG_SYS_PHYADDR sSysAddr, unsigned long ulSize);
+void UnMapPhysAddr(void *pvAddr, unsigned long ulSize);
+
+BC_VIDEO_DEVINFO *GetAnchorPtr(int id);
+int GetBufferCount(unsigned int *puiBufferCount, int id);
+
+extern unsigned int bc_video_id_usage[BC_VIDEO_DEVICE_MAX_ID];
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video_linux.c b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video_linux.c
new file mode 100644
index 0000000..0d3752e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/bufferclass_video_linux.c
@@ -0,0 +1,955 @@
+/*************************************************************************/ /*!                                                                                                               
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved                                                                                                               
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#if defined(LMA)
+#include <linux/pci.h>
+#else
+#include <linux/dma-mapping.h>
+#endif
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_object.h"
+
+#include "bufferclass_video.h"
+#include "bufferclass_video_linux.h"
+#include "pvrmodule.h"
+#include "env_connection.h"
+#include "private_data.h"
+#include "drm_shared.h"
+
+#define DEVNAME    "bc_video"
+#define DRVNAME    DEVNAME
+
+#if defined(BCE_USE_SET_MEMORY)
+#undef BCE_USE_SET_MEMORY
+#endif
+
+#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) && defined(SUPPORT_LINUX_X86_PAT) && defined(SUPPORT_LINUX_X86_WRITECOMBINE)
+#include <asm/cacheflush.h>
+#define    BCE_USE_SET_MEMORY
+#endif
+
+unsigned int bc_video_id_usage[BC_VIDEO_DEVICE_MAX_ID];
+
+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
+		*tfile,
+		uint32_t handle);
+
+MODULE_SUPPORTED_DEVICE(DEVNAME);
+
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+static struct class *psPvrClass;
+#endif
+
+static int AssignedMajorNumber;
+
+#define unref__ __attribute__ ((unused))
+
+#if defined(LMA)
+#define PVR_BUFFERCLASS_MEMOFFSET (220 * 1024 * 1024)
+#define PVR_BUFFERCLASS_MEMSIZE      (4 * 1024 * 1024)
+
+unsigned long g_ulMemBase = 0;
+unsigned long g_ulMemCurrent = 0;
+
+#define VENDOR_ID_PVR               0x1010
+#define DEVICE_ID_PVR               0x1CF1
+
+#define PVR_MEM_PCI_BASENUM         2
+#endif
+
+#define file_to_id(file)  (iminor(file->f_path.dentry->d_inode))
+
+struct psb_fpriv *BCVideoGetPriv(struct drm_file *file)
+{
+	CONNECTION_DATA *psConnection = file->driver_priv;
+	ENV_CONNECTION_DATA *psPrivateData;
+
+	psPrivateData = PVRSRVConnectionPrivateData(psConnection);
+	return psPrivateData->pPriv;
+}
+
+void BCVideoSetPriv(struct drm_file *file, void *fpriv)
+{
+	CONNECTION_DATA *psConnection = file->driver_priv;
+	ENV_CONNECTION_DATA *psPrivateData;
+
+	psPrivateData = PVRSRVConnectionPrivateData(psConnection);
+	psPrivateData->pPriv = fpriv;
+}
+
+int
+BCVideoModInit(void)
+{
+	int i, j;
+	/*LDM_PCI is defined, while LDM_PLATFORM and LMA are not defined.*/
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+	struct device *psDev;
+#endif
+
+#if defined(LMA)
+	struct pci_dev *psPCIDev;
+	int error;
+#endif
+
+#if defined(LMA)
+	psPCIDev = pci_get_device(VENDOR_ID_PVR, DEVICE_ID_PVR, NULL);
+	if (psPCIDev == NULL) {
+		printk(KERN_ERR DRVNAME
+		       ": BCVideoModInit:  pci_get_device failed\n");
+		goto ExitError;
+	}
+
+	if ((error = pci_enable_device(psPCIDev)) != 0) {
+		printk(KERN_ERR DRVNAME
+		       ": BCVideoModInit: pci_enable_device failed (%d)\n", error);
+		goto ExitError;
+	}
+#endif
+
+#if defined(DEBUG)
+	printk(KERN_ERR DRVNAME ": BCVideoModInit: major device %d\n",
+	       AssignedMajorNumber);
+#endif
+
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+	psPvrClass = class_create(THIS_MODULE, "bc_video");
+	if (IS_ERR(psPvrClass)) {
+		printk(KERN_ERR DRVNAME
+		       ": BCVideoModInit: unable to create class (%ld)",
+		       PTR_ERR(psPvrClass));
+		goto ExitUnregister;
+	}
+
+	psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
+			      NULL,
+#endif
+			      DEVNAME);
+	if (IS_ERR(psDev)) {
+		printk(KERN_ERR DRVNAME
+		       ": BCVideoModInit: unable to create device (%ld)",
+		       PTR_ERR(psDev));
+		goto ExitDestroyClass;
+	}
+#endif
+
+#if defined(LMA)
+	g_ulMemBase =
+		pci_resource_start(psPCIDev,
+				   PVR_MEM_PCI_BASENUM) + PVR_BUFFERCLASS_MEMOFFSET;
+#endif
+
+	for (i = 0; i < BC_VIDEO_DEVICE_MAX_ID; i++) {
+		bc_video_id_usage[i] = 0;
+		if (BC_Video_Init(i) != BCE_OK) {
+			printk(KERN_ERR DRVNAME
+			       ": BCVideoModInit: can't init video bc device %d.\n", i);
+			for (j = i; j >= 0; j--) {
+				BC_Video_Deinit(j);
+			}
+			goto ExitUnregister;
+		}
+	}
+
+	if (BC_Video_Init(BC_CAMERA_DEVICEID) != BCE_OK) {
+		for (i = BC_VIDEO_DEVICE_MAX_ID - 1; i >= 0; i--) {
+			BC_Video_Deinit(i);
+		}
+		BC_Video_Deinit(BC_CAMERA_DEVICEID);
+		printk(KERN_ERR DRVNAME ": BC_Camera_ModInit: can't init device\n");
+		goto ExitUnregister;
+	}
+
+#if defined(LMA)
+	pci_disable_device(psPCIDev);
+#endif
+
+	return 0;
+
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+ExitDestroyClass:
+	class_destroy(psPvrClass);
+#endif
+ExitUnregister:
+	unregister_chrdev(AssignedMajorNumber, DEVNAME);
+	//ExitDisable:
+#if defined(LMA)
+	pci_disable_device(psPCIDev);
+ExitError:
+#endif
+	return -EBUSY;
+}
+
+int
+BCVideoModCleanup(void)
+{
+	int i;
+#if defined(LDM_PLATFORM) || defined(LDM_PCI)
+	device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
+	class_destroy(psPvrClass);
+#endif
+
+	for (i = 0; i < BC_VIDEO_DEVICE_MAX_ID; i++) {
+		if (BC_Video_Deinit(i) != BCE_OK) {
+			printk(KERN_ERR DRVNAME
+			       ": BCVideoModCleanup: can't deinit video device %d.\n",
+			       i);
+			return -1;
+		}
+	}
+
+	if (BC_Video_Deinit(BC_CAMERA_DEVICEID) != BCE_OK) {
+		printk(KERN_ERR DRVNAME
+		       ": BC_Camera_ModCleanup: can't deinit device\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+void *
+BCAllocKernelMem(unsigned long ulSize)
+{
+	return kmalloc(ulSize, GFP_KERNEL);
+}
+
+void
+BCFreeKernelMem(void *pvMem)
+{
+	kfree(pvMem);
+}
+
+#if defined(BC_DISCONTIG_BUFFERS)
+
+#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
+#define    VMALLOC_TO_PAGE_PHYS(vAddr) page_to_phys(vmalloc_to_page(vAddr))
+
+BCE_ERROR
+BCAllocDiscontigMemory(unsigned long ulSize,
+		       BCE_HANDLE unref__ * phMemHandle,
+		       IMG_CPU_VIRTADDR * pLinAddr,
+		       IMG_SYS_PHYADDR ** ppPhysAddr)
+{
+	unsigned long ulPages = RANGE_TO_PAGES(ulSize);
+	IMG_SYS_PHYADDR *pPhysAddr;
+	unsigned long ulPage;
+	IMG_CPU_VIRTADDR LinAddr;
+
+	LinAddr =
+		__vmalloc(ulSize, GFP_KERNEL | __GFP_HIGHMEM,
+			  pgprot_noncached(PAGE_KERNEL));
+	if (!LinAddr) {
+		return BCE_ERROR_OUT_OF_MEMORY;
+	}
+
+	pPhysAddr = kmalloc(ulPages * sizeof(IMG_SYS_PHYADDR), GFP_KERNEL);
+	if (!pPhysAddr) {
+		vfree(LinAddr);
+		return BCE_ERROR_OUT_OF_MEMORY;
+	}
+
+	*pLinAddr = LinAddr;
+
+	for (ulPage = 0; ulPage < ulPages; ulPage++) {
+		pPhysAddr[ulPage].uiAddr = VMALLOC_TO_PAGE_PHYS(LinAddr);
+
+		LinAddr += PAGE_SIZE;
+	}
+
+	*ppPhysAddr = pPhysAddr;
+
+	return BCE_OK;
+}
+
+void
+BCFreeDiscontigMemory(unsigned long ulSize,
+		      BCE_HANDLE unref__ hMemHandle,
+		      IMG_CPU_VIRTADDR LinAddr, IMG_SYS_PHYADDR * pPhysAddr)
+{
+	kfree(pPhysAddr);
+
+	vfree(LinAddr);
+}
+#else	/* defined(BC_DISCONTIG_BUFFERS) */
+
+BCE_ERROR
+BCAllocContigMemory(unsigned long ulSize,
+		    BCE_HANDLE unref__ * phMemHandle,
+		    IMG_CPU_VIRTADDR * pLinAddr, IMG_CPU_PHYADDR * pPhysAddr)
+{
+#if defined(LMA)
+	void *pvLinAddr;
+
+
+	if (g_ulMemCurrent + ulSize >= PVR_BUFFERCLASS_MEMSIZE) {
+		return (BCE_ERROR_OUT_OF_MEMORY);
+	}
+
+	pvLinAddr = ioremap(g_ulMemBase + g_ulMemCurrent, ulSize);
+
+	if (pvLinAddr) {
+		pPhysAddr->uiAddr = g_ulMemBase + g_ulMemCurrent;
+		*pLinAddr = pvLinAddr;
+
+		g_ulMemCurrent += ulSize;
+		return (BCE_OK);
+	}
+	return (BCE_ERROR_OUT_OF_MEMORY);
+#else
+#if defined(BCE_USE_SET_MEMORY)
+	void *pvLinAddr;
+	unsigned long ulAlignedSize = PAGE_ALIGN(ulSize);
+	int iPages = (int)(ulAlignedSize >> PAGE_SHIFT);
+	int iError;
+
+	pvLinAddr = kmalloc(ulAlignedSize, GFP_KERNEL);
+	BUG_ON(((unsigned long) pvLinAddr) & ~PAGE_MASK);
+
+	iError = set_memory_wc((unsigned long) pvLinAddr, iPages);
+	if (iError != 0) {
+		printk(KERN_ERR DRVNAME
+		       ": BCAllocContigMemory:  set_memory_wc failed (%d)\n", iError);
+		return (BCE_ERROR_OUT_OF_MEMORY);
+	}
+
+	pPhysAddr->uiAddr = virt_to_phys(pvLinAddr);
+	*pLinAddr = pvLinAddr;
+
+	return (BCE_OK);
+#else
+	dma_addr_t dma;
+	void *pvLinAddr;
+
+	pvLinAddr = dma_alloc_coherent(NULL, ulSize, &dma, GFP_KERNEL);
+	if (pvLinAddr == NULL) {
+		return (BCE_ERROR_OUT_OF_MEMORY);
+	}
+
+	pPhysAddr->uiAddr = dma;
+	*pLinAddr = pvLinAddr;
+
+	return (BCE_OK);
+#endif
+#endif
+}
+
+void
+BCFreeContigMemory(unsigned long ulSize,
+		   BCE_HANDLE unref__ hMemHandle,
+		   IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr)
+{
+#if defined(LMA)
+	g_ulMemCurrent -= ulSize;
+	iounmap(LinAddr);
+#else
+#if defined(BCE_USE_SET_MEMORY)
+	unsigned long ulAlignedSize = PAGE_ALIGN(ulSize);
+	int iError;
+	int iPages = (int)(ulAlignedSize >> PAGE_SHIFT);
+
+	iError = set_memory_wb((unsigned long) LinAddr, iPages);
+	if (iError != 0) {
+		printk(KERN_ERR DRVNAME
+		       ": BCFreeContigMemory:  set_memory_wb failed (%d)\n", iError);
+	}
+	kfree(LinAddr);
+#else
+	dma_free_coherent(NULL, ulSize, LinAddr, (dma_addr_t) PhysAddr.uiAddr);
+#endif
+#endif
+}
+#endif	/* defined(BC_DISCONTIG_BUFFERS) */
+
+IMG_SYS_PHYADDR
+CpuPAddrToSysPAddrBC(IMG_CPU_PHYADDR cpu_paddr)
+{
+	IMG_SYS_PHYADDR sys_paddr;
+	sys_paddr.uiAddr = cpu_paddr.uiAddr;
+	return sys_paddr;
+}
+
+IMG_CPU_PHYADDR
+SysPAddrToCpuPAddrBC(IMG_SYS_PHYADDR sys_paddr)
+{
+	IMG_CPU_PHYADDR cpu_paddr;
+	cpu_paddr.uiAddr = sys_paddr.uiAddr;
+	return cpu_paddr;
+}
+
+BCE_ERROR
+BCOpenPVRServices(BCE_HANDLE * phPVRServices)
+{
+	*phPVRServices = 0;
+	return (BCE_OK);
+}
+
+
+BCE_ERROR
+BCClosePVRServices(BCE_HANDLE unref__ hPVRServices)
+{
+	return (BCE_OK);
+}
+
+static int
+BC_CreateBuffers(int id, bc_buf_params_t * p, IMG_BOOL is_conti_addr)
+{
+	BC_VIDEO_DEVINFO *psDevInfo;
+	IMG_UINT32 i, stride, size;
+	IMG_PIXFMT pixel_fmt;
+
+	if (p->count <= 0)
+		return -EINVAL;
+
+	if (p->width <= 1 || p->height <= 1)
+		return -EINVAL;
+
+	switch (p->fourcc) {
+	case BC_PIX_FMT_NV12:
+		pixel_fmt = IMG_PIXFMT_YUV420_3PLANE;
+		break;
+	case BC_PIX_FMT_UYVY:
+		pixel_fmt = IMG_PIXFMT_UYVY;
+		break;
+	case BC_PIX_FMT_RGB565:
+		pixel_fmt = IMG_PIXFMT_B5G6R5_UNORM;
+		p->stride = p->stride << 1;    /* stride for RGB from user space is uncorrect */
+		break;
+	case BC_PIX_FMT_YUYV:
+		pixel_fmt = IMG_PIXFMT_YUYV;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	stride = p->stride;
+
+	if (p->type != BC_MEMORY_MMAP && p->type != BC_MEMORY_USERPTR)
+		return -EINVAL;
+
+	if ((psDevInfo = GetAnchorPtr(id)) == IMG_NULL)
+		return -ENODEV;
+
+	if (psDevInfo->ulNumBuffers)
+		BCVideoDestroyBuffers(id);
+
+	psDevInfo->buf_type = p->type;
+	psDevInfo->psSystemBuffer =
+		BCAllocKernelMem(sizeof(BC_VIDEO_BUFFER) * p->count);
+
+	if (!psDevInfo->psSystemBuffer)
+		return -ENOMEM;
+
+	memset(psDevInfo->psSystemBuffer, 0, sizeof(BC_VIDEO_BUFFER) * p->count);
+	size = p->height * stride;
+	if (pixel_fmt == IMG_PIXFMT_YUV420_3PLANE)
+		size += (stride >> 1) * (p->height >> 1) << 1;
+
+	for (i = 0; i < p->count; i++) {
+		IMG_SYS_PHYADDR *pPhysAddr;
+		psDevInfo->ulNumBuffers++;
+		psDevInfo->psSystemBuffer[i].ulSize = size;
+
+		/*for discontig buffers, allocate memory for pPhysAddr */
+		psDevInfo->psSystemBuffer[i].is_conti_addr = is_conti_addr;
+		if (is_conti_addr) {
+			pPhysAddr = BCAllocKernelMem(1 * sizeof(IMG_SYS_PHYADDR));
+			if (!pPhysAddr) {
+				return BCE_ERROR_OUT_OF_MEMORY;
+			}
+			memset(pPhysAddr, 0, 1 * sizeof(IMG_SYS_PHYADDR));
+		} else {
+			unsigned long ulPages = RANGE_TO_PAGES(size);
+			pPhysAddr = BCAllocKernelMem(ulPages * sizeof(IMG_SYS_PHYADDR));
+			if (!pPhysAddr) {
+				return BCE_ERROR_OUT_OF_MEMORY;
+			}
+			memset(pPhysAddr, 0, ulPages * sizeof(IMG_SYS_PHYADDR));
+		}
+		psDevInfo->psSystemBuffer[i].psSysAddr = pPhysAddr;
+	}
+	p->count = psDevInfo->ulNumBuffers;
+
+	psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ulNumBuffers;
+	psDevInfo->sBufferInfo.eIMGPixFmt = pixel_fmt;
+	psDevInfo->sBufferInfo.ui32Width = p->width;
+	psDevInfo->sBufferInfo.ui32Height = p->height;
+	psDevInfo->sBufferInfo.ui32ByteStride = stride;
+	psDevInfo->sBufferInfo.ui32BufferDeviceID = id;
+//	psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE |
+					   //PVRSRV_BC_FLAGS_YUVCSC_BT601;
+	return 0;
+}
+
+int
+BCVideoDestroyBuffers(int id)
+{
+	BC_VIDEO_DEVINFO *psDevInfo;
+	IMG_UINT32 i;
+	if ((psDevInfo = GetAnchorPtr(id)) == IMG_NULL)
+		return -ENODEV;
+
+	if (!psDevInfo->ulNumBuffers)
+		return 0;
+
+	if (id < 0 ||
+		id >= BC_VIDEO_DEVICE_MAX_ID ||
+	    bc_video_id_usage[id] != 1) {
+		return 0;;
+	}
+	bc_video_id_usage[id] = 0;
+
+	for (i = 0; i < psDevInfo->ulNumBuffers; i++)
+		BCFreeKernelMem(psDevInfo->psSystemBuffer[i].psSysAddr);
+
+	BCFreeKernelMem(psDevInfo->psSystemBuffer);
+
+	psDevInfo->ulNumBuffers = 0;
+	psDevInfo->sBufferInfo.eIMGPixFmt = IMG_PIXFMT_UNKNOWN;
+	psDevInfo->sBufferInfo.ui32Width = 0;
+	psDevInfo->sBufferInfo.ui32Height = 0;
+	psDevInfo->sBufferInfo.ui32ByteStride = 0;
+	psDevInfo->sBufferInfo.ui32BufferDeviceID = id;
+	psDevInfo->sBufferInfo.ui32Flags = 0;
+	psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ulNumBuffers;
+
+	return 0;
+}
+
+int
+GetBufferCount(unsigned int *puiBufferCount, int id)
+{
+	BC_VIDEO_DEVINFO *psDevInfo = GetAnchorPtr(id);
+
+	if (psDevInfo == IMG_NULL) {
+		return -1;
+	}
+
+	*puiBufferCount = (unsigned int) psDevInfo->sBufferInfo.ui32BufferCount;
+
+	return 0;
+}
+
+static int
+BCVideoBridge(struct drm_device *dev, IMG_VOID * arg,
+		struct drm_file *file_priv)
+{
+	int err = -EFAULT;
+
+	BC_VIDEO_DEVINFO *devinfo;
+	int i;
+	BC_Video_ioctl_package *psBridge = (BC_Video_ioctl_package *) arg;
+	int command = psBridge->ioctl_cmd;
+	int id = 0;
+
+	if (command == BC_Video_ioctl_request_buffers) {
+		for (i = 0; i < BC_VIDEO_DEVICE_MAX_ID; i++) {
+			if (bc_video_id_usage[i] == 0) {
+				bc_video_id_usage[i] = 1;
+				id = i;
+				break;
+			}
+		}
+		if (i == BC_VIDEO_DEVICE_MAX_ID) {
+			printk(KERN_ERR DRVNAME
+			       " : Does you really need to run more than 5 video simultaneously.\n");
+			return -1;
+		} else
+			BCVideoGetPriv(file_priv)->bcd_index = id;
+	} else
+		id = psBridge->device_id;
+
+	if ((devinfo = GetAnchorPtr(id)) == IMG_NULL)
+		return -ENODEV;
+
+	switch (command) {
+	case BC_Video_ioctl_get_buffer_count: {
+		if (GetBufferCount(&psBridge->outputparam, id) == -1) {
+			printk(KERN_ERR DRVNAME
+			       " : GetBufferCount error in BCVideoBridge.\n");
+			return err;
+		}
+		return 0;
+		break;
+	}
+	case BC_Video_ioctl_get_buffer_index: {
+		int idx;
+		BC_VIDEO_BUFFER *buffer;
+
+		for (idx = 0; idx < devinfo->ulNumBuffers; idx++) {
+			buffer = &devinfo->psSystemBuffer[idx];
+
+			if (psBridge->inputparam == buffer->sBufferHandle) {
+				psBridge->outputparam = idx;
+				return 0;
+			}
+		}
+		printk(KERN_ERR DRVNAME ": BCIOGET_BUFFERIDX- buffer not found\n");
+		return -EINVAL;
+		break;
+	}
+	case BC_Video_ioctl_request_buffers: {
+		bc_buf_params_t p;
+		if (copy_from_user
+		    (&p, (void __user *)(uintptr_t)
+		     (psBridge->inputparam), sizeof(p))) {
+			printk(KERN_ERR DRVNAME
+			       " : failed to copy inputparam to kernel.\n");
+			return -EFAULT;
+		}
+		psBridge->outputparam = id;
+		return BC_CreateBuffers(id, &p, IMG_FALSE);
+		break;
+	}
+	case BC_Video_ioctl_set_buffer_phyaddr: {
+		bc_buf_ptr_t p;
+		struct ttm_buffer_object *bo = NULL;
+		struct ttm_tt *ttm = NULL;
+		struct ttm_object_file *tfile = BCVideoGetPriv(file_priv)->tfile;
+
+		if (copy_from_user
+		    (&p, (void __user *)(uintptr_t)
+		     (psBridge->inputparam), sizeof(p))) {
+			printk(KERN_ERR DRVNAME
+			       " : failed to copy inputparam to kernel.\n");
+			return -EFAULT;
+		}
+
+		if (p.index >= devinfo->ulNumBuffers || !p.handle) {
+			printk(KERN_ERR DRVNAME
+			       " : index big than NumBuffers or p.handle is NULL.\n");
+			return -EINVAL;
+		}
+
+		bo = ttm_buffer_object_lookup(tfile, p.handle);
+		if (unlikely(bo == NULL)) {
+			printk(KERN_ERR DRVNAME
+			       " : Could not find buffer object for setstatus.\n");
+			return -EINVAL;
+		}
+		ttm = bo->ttm;
+
+		devinfo->psSystemBuffer[p.index].sCPUVAddr = NULL;
+		devinfo->psSystemBuffer[p.index].sBufferHandle = p.handle;
+		for (i = 0; i < ttm->num_pages; i++) {
+			if (ttm->pages[i] == NULL) {
+				printk(KERN_ERR DRVNAME
+				       " : Debug: the page is NULL.\n");
+				return -EINVAL;
+			}
+			devinfo->psSystemBuffer[p.index].psSysAddr[i].uiAddr =
+				page_to_pfn(ttm->pages[i]) << PAGE_SHIFT;
+		}
+		if (bo)
+			ttm_bo_unref(&bo);
+		return 0;
+		break;
+	}
+	case BC_Video_ioctl_release_buffer_device: {
+		bc_video_id_usage[id] = 0;
+
+		BCVideoDestroyBuffers(id);
+		return 0;
+		break;
+	}
+	case BC_Video_ioctl_alloc_buffer: {
+		bc_buf_ptr_t p;
+		IMG_VOID *pvBuf;
+		IMG_UINT32 ui32Size;
+		IMG_UINT32 ulCounter;
+		BUFFER_INFO *bufferInfo;
+
+		if (copy_from_user
+		    (&p, (void __user *)(uintptr_t)
+		     (psBridge->inputparam), sizeof(p))) {
+			printk(KERN_ERR DRVNAME
+			       " : failed to copy inputparam to kernel.\n");
+			return -EFAULT;
+		}
+
+		if (p.index >= devinfo->ulNumBuffers) {
+			printk(KERN_ERR DRVNAME " : index big than NumBuffers.\n");
+			return -EINVAL;
+		}
+
+		bufferInfo = &(devinfo->sBufferInfo);
+		if (bufferInfo->eIMGPixFmt != IMG_PIXFMT_YUV420_3PLANE) {
+			printk(KERN_ERR DRVNAME
+			       " : BC_Video_ioctl_alloc_buffer only support NV12 format.\n");
+			return -EINVAL;
+		}
+		ui32Size = bufferInfo->ui32Height * bufferInfo->ui32ByteStride;
+		ui32Size +=
+			(bufferInfo->ui32ByteStride >> 1) *
+			(bufferInfo->ui32Height >> 1) << 1;
+
+		pvBuf =
+			__vmalloc(ui32Size, GFP_KERNEL | __GFP_HIGHMEM,
+				  __pgprot((pgprot_val(PAGE_KERNEL) & ~_PAGE_CACHE_MASK)
+					   | _PAGE_CACHE_WC));
+		if (pvBuf == NULL) {
+			printk(KERN_ERR DRVNAME
+			       " : Failed to allocate %d bytes buffer.\n", ui32Size);
+			return -EINVAL;
+		}
+		devinfo->psSystemBuffer[p.index].sCPUVAddr = pvBuf;
+		devinfo->psSystemBuffer[p.index].sBufferHandle = 0;
+
+		i = 0;
+
+		for (ulCounter = 0; ulCounter < ui32Size; ulCounter += PAGE_SIZE) {
+			devinfo->psSystemBuffer[p.index].psSysAddr[i++].uiAddr =
+				vmalloc_to_pfn(pvBuf + ulCounter) << PAGE_SHIFT;
+		}
+
+		if (p.handle) {
+			printk(KERN_ERR DRVNAME
+			       " : fill data %d bytes from user space 0x%x.\n", ui32Size,
+			       (int) p.handle);
+			if (copy_from_user(pvBuf, (void __user *) p.handle, ui32Size)) {
+				printk(KERN_ERR DRVNAME
+				       " : failed to copy inputparam to kernel.\n");
+				return -EFAULT;
+			}
+
+		}
+		psBridge->outputparam = (int)(uintptr_t) pvBuf;
+
+		return 0;
+		break;
+	}
+	case BC_Video_ioctl_free_buffer: {
+		bc_buf_ptr_t p;
+
+		if (copy_from_user
+		    (&p, (void __user *)(uintptr_t)
+		     (psBridge->inputparam), sizeof(p))) {
+			printk(KERN_ERR DRVNAME
+			       " : failed to copy inputparam to kernel.\n");
+			return -EFAULT;
+		}
+
+		vfree(devinfo->psSystemBuffer[p.index].sCPUVAddr);
+		return 0;
+		break;
+	}
+	case BC_Video_ioctl_get_buffer_handle: {
+		int idx;
+		BC_VIDEO_BUFFER *buffer;
+
+		idx = (int)psBridge->inputparam;
+
+		if (idx > devinfo->ulNumBuffers || idx < 0) {
+			printk(KERN_ERR DRVNAME
+				" : Invalid device ID %d\n", idx);
+			return -EINVAL;
+		}
+
+		buffer = &devinfo->psSystemBuffer[idx];
+		psBridge->outputparam = buffer->sBufferHandle;
+
+		return 0;
+	}
+	default:
+		return err;
+	}
+
+	return 0;
+}
+
+int
+BC_Camera_Bridge(BC_Video_ioctl_package * psBridge, unsigned long pAddr)
+{
+	int err = -EFAULT;
+	BC_VIDEO_DEVINFO *devinfo;
+	int id = BC_CAMERA_DEVICEID;
+	int command = psBridge->ioctl_cmd;
+
+	if ((devinfo = GetAnchorPtr(BC_CAMERA_DEVICEID)) == IMG_NULL)
+		return -ENODEV;
+
+	switch (command) {
+	case BC_Video_ioctl_get_buffer_count: {
+		if (GetBufferCount(&psBridge->outputparam, id) == -1) {
+			printk(KERN_ERR DRVNAME
+			       " : GetBufferCount error in BCVideoBridge.\n");
+			return err;
+		}
+		return 0;
+		break;
+	}
+	case BC_Video_ioctl_get_buffer_index: {
+		int idx;
+		BC_VIDEO_BUFFER *buffer;
+
+		for (idx = 0; idx < devinfo->ulNumBuffers; idx++) {
+			buffer = &devinfo->psSystemBuffer[idx];
+
+			if (psBridge->inputparam == buffer->sBufferHandle) {
+				psBridge->outputparam = idx;
+				return 0;
+			}
+		}
+		printk(KERN_ERR DRVNAME ": BCIOGET_BUFFERIDX- buffer not found\n");
+		return -EINVAL;
+		break;
+	}
+	case BC_Video_ioctl_request_buffers: {
+		bc_buf_params_t p;
+		memcpy(&p, (void *)(uintptr_t)
+		       (psBridge->inputparam), sizeof(p));
+		if (p.type == BC_MEMORY_MMAP)
+			return BC_CreateBuffers(id, &p, IMG_TRUE);
+		else
+			return BC_CreateBuffers(id, &p, IMG_FALSE);
+		break;
+	}
+	case BC_Video_ioctl_release_buffer_device: {
+		return BCVideoDestroyBuffers(id);
+		break;
+	}
+	case BC_Video_ioctl_set_buffer_phyaddr: {
+		bc_buf_ptr_t p;
+
+		if (copy_from_user
+		    (&p, (void __user *)(uintptr_t)
+		     (psBridge->inputparam), sizeof(p))) {
+			printk(KERN_ERR DRVNAME
+			       " : failed to copy inputparam to kernel.\n");
+			return -EFAULT;
+		}
+
+		if (p.index >= devinfo->ulNumBuffers) {
+			printk(KERN_ERR DRVNAME " : index big than NumBuffers\n");
+			return -EINVAL;
+		}
+		if (devinfo->psSystemBuffer[p.index].is_conti_addr) {
+			/* Get the physical address of each frame */
+			devinfo->psSystemBuffer[p.index].psSysAddr[0].uiAddr =
+				pAddr +
+				p.index * PAGE_ALIGN(devinfo->psSystemBuffer[p.index].ulSize);
+		} else {
+			int i, num_pages, map_pages;
+			unsigned int start_addr = p.pa;
+			struct page **ppsPages;
+
+			if (start_addr & ~PAGE_MASK) {
+				printk(KERN_ERR DRVNAME
+				       " : the virtual address must be PAGE aligned.\n");
+				return -EFAULT;
+			}
+			num_pages = (p.size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+			ppsPages = kmalloc((size_t)num_pages * sizeof(*ppsPages),  GFP_KERNEL);
+
+			if (ppsPages == NULL) {
+				printk(KERN_ERR DRVNAME
+				       " : fails to alloc page array.\n");
+				return -EFAULT;
+			}
+			memset(ppsPages, 0, (size_t)num_pages * sizeof(*ppsPages));
+
+			map_pages = get_user_pages(current, current->mm, p.pa, num_pages, 1, 0, ppsPages, NULL);
+
+			if (map_pages != num_pages) {
+				printk(KERN_ERR DRVNAME
+				       " : Couldn't map all the pages needed (wanted: %d, got %d).\n", num_pages, map_pages);
+				return -EFAULT;
+			}
+			devinfo->psSystemBuffer[p.index].sCPUVAddr = NULL;
+			devinfo->psSystemBuffer[p.index].sBufferHandle = 0;
+			for (i = 0; i < num_pages; i++) {
+				devinfo->psSystemBuffer[p.index].psSysAddr[i].uiAddr =
+					page_to_pfn(ppsPages[i]) << PAGE_SHIFT;
+			}
+
+			kfree(ppsPages);
+		}
+		return 0;
+		break;
+	}
+	default:
+		return err;
+	}
+
+	return 0;
+}
+
+#define	PVR_DRM_BC_CMD		DRM_PVR_RESERVED3
+
+/*bc_video ioctl*/
+#define DRM_IOCTL_BUFFER_CLASS_VIDEO \
+	DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_BC_CMD, BC_Video_ioctl_package)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+#define IOCTL_DEF(ioctl, func, flags) {ioctl, flags, func}
+#else
+#define IOCTL_DEF(ioctl, func, flags) {ioctl, flags, func, ioctl}
+#endif
+struct drm_ioctl_desc sBCdrmIoctls[] = {
+	IOCTL_DEF(DRM_IOCTL_BUFFER_CLASS_VIDEO, BCVideoBridge, DRM_AUTH)
+};
+
+static int bc_max_ioctl = DRM_ARRAY_SIZE(sBCdrmIoctls);
+
+void BCVideoQueryIoctls(struct drm_ioctl_desc *ioctls)
+{
+	int i;
+
+	for (i = 0; i < bc_max_ioctl; i++)
+	{
+		unsigned int slot = DRM_IOCTL_NR(sBCdrmIoctls[i].cmd) - DRM_COMMAND_BASE;
+		ioctls[slot] = sBCdrmIoctls[i];
+	}
+}
+
+EXPORT_SYMBOL(BC_Camera_Bridge);
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_ext.c b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_ext.c
new file mode 100644
index 0000000..020301b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_ext.c
@@ -0,0 +1,348 @@
+/*************************************************************************/ /*!                                                                                                               
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved                                                                                                               
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/mm.h>
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "img_defs.h"
+#include "lock.h"
+#include "pvr_drm_ext.h"
+#include "pvrsrv_interface.h"
+#include "pvr_bridge.h"
+#include "srvkm.h"
+#include "dc_mrfld.h"
+#include "drm_shared.h"
+#include "linkage.h"
+
+#if defined(PDUMP)
+#include "linuxsrv.h"
+#endif
+
+#include <linux/module.h>
+#include "pvrmodule.h"
+
+#define PVR_DRM_SRVKM_CMD       DRM_PVR_RESERVED1
+#define PVR_DRM_IS_MASTER_CMD   DRM_PVR_RESERVED4
+#define PVR_DRM_DBGDRV_CMD      DRM_PVR_RESERVED6
+
+#define PVR_DRM_SRVKM_IOCTL \
+	DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
+
+#define PVR_DRM_IS_MASTER_IOCTL \
+	DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
+
+#if defined(PDUMP)
+#define	PVR_DRM_DBGDRV_IOCTL \
+	DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
+#endif
+
+static int
+PVRDRMIsMaster(struct drm_device *dev, void *arg, struct drm_file *pFile)
+{
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+static struct drm_ioctl_desc pvr_ioctls[] = {
+	{PVR_DRM_SRVKM_IOCTL, DRM_UNLOCKED, PVRSRV_BridgeDispatchKM},
+	{PVR_DRM_IS_MASTER_IOCTL, DRM_MASTER, PVRDRMIsMaster},
+#if defined(PDUMP)
+	{PVR_DRM_DBGDRV_IOCTL, 0, dbgdrv_ioctl}
+#endif
+};
+#else
+static struct drm_ioctl_desc pvr_ioctls[] = {
+	{PVR_DRM_SRVKM_IOCTL, DRM_UNLOCKED, PVRSRV_BridgeDispatchKM, PVR_DRM_SRVKM_IOCTL},
+	{PVR_DRM_IS_MASTER_IOCTL, DRM_MASTER, PVRDRMIsMaster, PVR_DRM_IS_MASTER_IOCTL},
+#if defined(PDUMP)
+	{PVR_DRM_DBGDRV_IOCTL, 0, dbgdrv_ioctl. PVR_DRM_DBGDRV_IOCTL}
+#endif
+};
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) */
+
+DECLARE_WAIT_QUEUE_HEAD(sWaitForInit);
+
+static bool bInitComplete;
+static bool bInitFailed;
+
+struct pci_dev *gpsPVRLDMDev;
+
+struct drm_device *gpsPVRDRMDev;
+
+#define PVR_DRM_FILE struct drm_file *
+
+int PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
+{
+	int iRes = 0;
+
+	DRM_DEBUG("PVRSRVDrmLoad");
+
+	gpsPVRDRMDev = dev;
+	gpsPVRLDMDev = dev->pdev;
+
+#if defined(PDUMP)
+	iRes = dbgdrv_init();
+	if (iRes != 0)
+	{
+		goto exit;
+	}
+#endif
+	
+	iRes = PVRCore_Init();
+	if (iRes != 0)
+	{
+		goto exit_dbgdrv_cleanup;
+	}
+
+	if (MerrifieldDCInit(dev) != PVRSRV_OK)
+	{
+		DRM_ERROR("%s: display class init failed\n", __FUNCTION__);
+		goto exit_pvrcore_cleanup;
+	}
+
+	goto exit;
+
+exit_pvrcore_cleanup:
+	PVRCore_Cleanup();
+
+exit_dbgdrv_cleanup:
+#if defined(PDUMP)
+	dbgdrv_cleanup();
+#endif
+exit:
+	if (iRes != 0)
+	{
+		bInitFailed = true;
+	}
+	bInitComplete = true;
+
+	wake_up_interruptible(&sWaitForInit);
+
+	return iRes;
+}
+
+int PVRSRVDrmUnload(struct drm_device *dev)
+{
+	DRM_DEBUG("PVRSRVDrmUnload");
+
+	if (MerrifieldDCDeinit() != PVRSRV_OK)
+	{
+		DRM_ERROR("%s: can't deinit display class\n", __FUNCTION__);
+	}
+
+	PVRCore_Cleanup();
+
+#if defined(PDUMP)
+	dbgdrv_cleanup();
+#endif
+
+	return 0;
+}
+
+int PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
+{
+	while (!bInitComplete)
+	{
+		DEFINE_WAIT(sWait);
+
+		prepare_to_wait(&sWaitForInit, &sWait, TASK_INTERRUPTIBLE);
+
+		if (!bInitComplete)
+		{
+			DRM_DEBUG("%s: Waiting for module initialisation to complete", __FUNCTION__);
+
+			schedule();
+		}
+
+		finish_wait(&sWaitForInit, &sWait);
+
+		if (signal_pending(current))
+		{
+			return -ERESTARTSYS;
+		}
+	}
+
+	if (bInitFailed)
+	{
+		DRM_DEBUG("%s: Module initialisation failed", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	return PVRSRVOpen(dev, file);
+}
+
+void PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
+{
+	PVRSRVRelease(dev, file);
+
+	file->driver_priv = NULL;
+}
+
+void PVRSRVQueryIoctls(struct drm_ioctl_desc *ioctls)
+{
+	int i;
+
+	for (i = 0; i < DRM_ARRAY_SIZE(pvr_ioctls); i++)
+	{
+		unsigned int slot = DRM_IOCTL_NR(pvr_ioctls[i].cmd) - DRM_COMMAND_BASE;
+		ioctls[slot] = pvr_ioctls[i];
+	}
+}
+
+unsigned int PVRSRVGetMeminfoSize(void* hMemHandle)
+{
+    PVRSRV_MEMINFO  minfo;
+
+    if (copy_from_user(&minfo,hMemHandle,sizeof minfo))
+    {
+        return 0;
+    }
+
+    return minfo.uiAllocationSize;
+}
+
+void * PVRSRVGetMeminfoCPUAddr(void* hMemHandle)
+{
+    PVRSRV_MEMINFO  minfo;
+
+    if (copy_from_user(&minfo,hMemHandle,sizeof minfo))
+    {
+        return 0;
+    }
+
+    return minfo.pvCpuVirtAddr;
+}
+
+int PVRSRVGetMeminfoPages(void* hMemHandle, int npages, struct page ***pages)
+{
+    PVRSRV_MEMINFO  minfo;
+    struct page   **pglist;
+    uint32_t        kaddr;
+    int             res;
+
+    if (copy_from_user(&minfo,hMemHandle,sizeof minfo))
+    {
+        return -EFAULT;
+    }
+
+    kaddr = (uint32_t)(uintptr_t)minfo.pvCpuVirtAddr;
+
+    if ((pglist = kzalloc(npages * sizeof(struct page*),GFP_KERNEL)) == NULL)
+    {
+        return -ENOMEM;
+    }
+
+    down_read(&current->mm->mmap_sem);
+    res = get_user_pages(current,current->mm,kaddr,npages,0,0,pglist,NULL);
+    up_read(&current->mm->mmap_sem);
+
+    if (res <= 0)
+    {
+        kfree(pglist);
+        return res;
+    }
+
+    *pages = pglist;
+	return 0;
+}
+
+int PVRSRVGetMeminfoPfn(void           *hMemHandle,
+                        int             npages,
+                        unsigned long **pfns)
+{
+    PVRSRV_MEMINFO          minfo;
+    struct vm_area_struct  *vma;
+    unsigned long          *pfnlist;
+    uint32_t                kaddr;
+    int                     res, pg = 0;
+
+    /*
+     *  This 'handle' is a pointer in user space to a meminfo struct.
+     *  We need to copy it here and get the user's view of memory.
+     */
+    if (copy_from_user(&minfo,hMemHandle,sizeof minfo))
+    {
+        return 0;
+    }
+
+    kaddr = (uint32_t)(uintptr_t)minfo.pvCpuVirtAddr;
+
+    if ((pfnlist = kzalloc(npages * sizeof(unsigned long),
+                           GFP_KERNEL)) == NULL)
+    {
+        return -ENOMEM;
+    }
+
+    while (pg < npages)
+    {
+        if ((vma = find_vma(current->mm,
+                            kaddr + (pg * PAGE_SIZE))) == NULL)
+        {
+            kfree(pfnlist);
+            return -EFAULT;
+        }
+
+        if ((res = follow_pfn(
+                        vma,
+                        (unsigned long)(kaddr + (pg * PAGE_SIZE)),
+                        &pfnlist[pg])) < 0)
+        {
+            kfree(pfnlist);
+            return res;
+        }
+
+        ++pg;
+    }
+
+    *pfns = pfnlist;
+    return 0;
+}
+
+int PVRSRVInterrupt(struct drm_device* dev)
+{
+	return 1;
+}
+
+int PVRSRVMMap(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+	return MMapPMR(pFile, ps_vma);
+}
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_ext.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_ext.h
new file mode 100644
index 0000000..a163927
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_ext.h
@@ -0,0 +1,59 @@
+/*************************************************************************/ /*!                                                                                                               
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved                                                                                                               
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_EXT_H__)
+#define __PVR_DRM_EXT_H__
+
+int PVRCore_Init(void);
+void PVRCore_Cleanup(void);
+void PVRSRVRelease(struct drm_device *dev, struct drm_file *pDRMFile);
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, struct drm_file *pFile);
+
+int PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
+int PVRSRVDrmRelease(struct inode *inode, struct file *filp);
+
+#if defined(PDUMP)
+int dbgdrv_init(void);
+void dbgdrv_cleanup(void);
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile);
+#endif
+
+#endif /* !defined(__PVR_DRM_EXT_H) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_shared.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_shared.h
new file mode 100644
index 0000000..603856e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/pvr_drm_shared.h
@@ -0,0 +1,121 @@
+/*************************************************************************/ /*!
+@File
+@Title          Services DRM definitions shared between kernel and user space.
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_SHARED_H__)
+#define __PVR_DRM_SHARED_H__
+
+#if defined(SUPPORT_DRM)
+#include <linux/types.h>
+
+/* 
+ * DRM command numbers, relative to DRM_COMMAND_BASE. 
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PVR_SRVKM_CMD			0x12 /* Used for PVR Services ioctls */
+#define DRM_PVR_DBGDRV_CMD			1 /* Debug driver (PDUMP) ioctls */
+#define DRM_PVR_UNPRIV_CMD			2 /* PVR driver unprivileged ioctls */
+#define DRM_PVR_GEM_CREATE			3
+#define DRM_PVR_GEM_TO_IMG_HANDLE		4
+#define DRM_PVR_IMG_TO_GEM_HANDLE		5
+#define DRM_PVR_GEM_SYNC_GET			6
+
+
+#if !defined(SUPPORT_KERNEL_SRVINIT)
+/* Subcommands of DRM_PVR_UNPRIV_CMD */
+#define	DRM_PVR_UNPRIV_CMD_INIT_SUCCESFUL	0 /* PVR Services init succesful */
+
+typedef struct drm_pvr_unpriv_cmd_tag
+{
+	uint32_t	cmd;
+	int32_t		result;
+} drm_pvr_unpriv_cmd;
+#endif	/* #if !defined(SUPPORT_KERNEL_SRVINIT) */
+
+#define PVR_GEM_USE_SCANOUT	(1 << 0)
+#define PVR_GEM_USE_CURSOR	(2 << 0)
+
+typedef	struct drm_pvr_gem_create_tag
+{
+	/* Input parameters (preserved by the ioctl) */
+	uint64_t	size;
+	uint32_t	alloc_flags;
+	uint32_t	usage_flags;
+
+	/* Output parameters */
+	uint32_t	handle;
+	uint32_t	pad;
+} drm_pvr_gem_create;
+
+typedef	struct drm_pvr_gem_to_img_handle_tag
+{	
+	/* Input parameters (preserved by the ioctl) */
+	uint32_t	gem_handle;
+	uint32_t	pad;
+
+	/* Output parameters */
+	uint64_t	img_handle;
+} drm_pvr_gem_to_img_handle;
+
+typedef	struct drm_pvr_img_to_gem_handle_tag
+{	
+	/* Input parameters (preserved by the ioctl) */
+	uint64_t	img_handle;
+
+	/* Output parameters */
+	uint32_t	gem_handle;
+	uint32_t	pad;
+} drm_pvr_img_to_gem_handle;
+
+typedef struct drm_pvr_gem_sync_get_tag
+{
+	/* Input parameters (preserved by the ioctl) */
+	uint32_t	gem_handle;
+	uint32_t	type;
+
+	/* Output parameters */
+	uint64_t	sync_handle;
+	uint32_t	firmware_addr;
+	uint32_t	pad;
+} drm_pvr_gem_sync_get;
+
+#endif /* defined(SUPPORT_DRM) */
+#endif /* defined(__PVR_DRM_SHARED_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysconfig.c b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysconfig.c
new file mode 100644
index 0000000..ea78354f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysconfig.c
@@ -0,0 +1,327 @@
+/*************************************************************************/ /*!
+@File           
+@Title          System Configuration
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    System Configuration functions
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include "img_types.h"
+#include "pwr_mgmt.h"
+#include "psb_irq.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "sysconfig.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "pci_support.h"
+
+#include "dfrgx_interface.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_support.h"
+#endif
+
+typedef struct _PLAT_DATA_
+{
+	IMG_HANDLE	hRGXPCI;
+
+	struct drm_device *psDRMDev;
+} PLAT_DATA;
+
+PLAT_DATA *gpsPlatData = IMG_NULL;
+extern struct drm_device *gpsPVRDRMDev;
+
+/* Unused globals to keep link with 3rdparty components happy */
+IMG_BOOL gbSystemActivePMEnabled;
+IMG_BOOL gbSystemActivePMInit;
+
+/*
+	PCIInitDev
+*/
+static PVRSRV_ERROR PCIInitDev(PLAT_DATA *psPlatData)
+{
+	PVRSRV_DEVICE_CONFIG *psDevice = &sSysConfig.pasDevices[0];
+	PVRSRV_ERROR eError;
+	IMG_UINT32 ui32MaxOffset;
+	IMG_UINT32 ui32BaseAddr = 0;
+
+	psPlatData->psDRMDev = gpsPVRDRMDev;
+	if (!psPlatData->psDRMDev)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: DRM device not initialized"));
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+	if (!IS_MRFLD(psPlatData->psDRMDev))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device 0x%08x not supported", psPlatData->psDRMDev->pci_device));
+		return PVRSRV_ERROR_NOT_SUPPORTED;
+	}
+
+	psPlatData->hRGXPCI = OSPCISetDev((IMG_VOID *)psPlatData->psDRMDev->pdev, 0);
+	if (!psPlatData->hRGXPCI)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
+		return PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND;
+	}
+
+	ui32MaxOffset = OSPCIAddrRangeLen(psPlatData->hRGXPCI, 0);
+	if (ui32MaxOffset < (RGX_REG_OFFSET + RGX_REG_SIZE))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region 0x%08x isn't big enough", ui32MaxOffset));
+		return PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+	}
+	PVR_DPF((PVR_DBG_WARNING,"PCIInitDev: Device memory region len 0x%08x", ui32MaxOffset));
+
+	/* Reserve the address range */
+	if (OSPCIRequestAddrRange(psPlatData->hRGXPCI, 0) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
+		return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+
+	}
+
+	ui32BaseAddr = OSPCIAddrRangeStart(psPlatData->hRGXPCI, 0);
+
+	if (OSPCIIRQ(psPlatData->hRGXPCI, &psDevice->ui32IRQ) != PVRSRV_OK)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Couldn't get IRQ"));
+		eError = PVRSRV_ERROR_INVALID_DEVICE;
+		goto e4;
+	}
+	PVR_DPF((PVR_DBG_WARNING, "PCIInitDev: BaseAddr 0x%08x, EndAddr 0x%llx, IRQ %d",
+			ui32BaseAddr, OSPCIAddrRangeEnd(psPlatData->hRGXPCI, 0), psDevice->ui32IRQ));
+
+	psDevice->sRegsCpuPBase.uiAddr = ui32BaseAddr + RGX_REG_OFFSET;
+	psDevice->ui32RegsSize = RGX_REG_SIZE;
+	PVR_DPF((PVR_DBG_WARNING, "PCIInitDev: sRegsCpuPBase 0x%llx, size 0x%x",
+			psDevice->sRegsCpuPBase.uiAddr, psDevice->ui32RegsSize));
+
+	return PVRSRV_OK;
+
+e4:
+	OSPCIReleaseAddrRange(psPlatData->hRGXPCI, 0);
+	OSPCIReleaseDev(psPlatData->hRGXPCI);
+
+	return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function		PCIDeInitDev
+
+ @Description
+
+ Uninitialise the PCI device when it is no loger required
+
+ @Input		psSysData :	System data
+
+ @Return	none
+
+******************************************************************************/
+static IMG_VOID PCIDeInitDev(PLAT_DATA *psPlatData)
+{
+	OSPCIReleaseAddrRange(psPlatData->hRGXPCI, 0);
+	OSPCIReleaseDev(psPlatData->hRGXPCI);
+}
+
+
+PVRSRV_ERROR SysCreateConfigData(PVRSRV_SYSTEM_CONFIG **ppsSysConfig, void *hDevice)
+{
+	PLAT_DATA *psPlatData;
+	PVRSRV_ERROR eError;
+
+	PVR_UNREFERENCED_PARAMETER(hDevice);
+
+	psPlatData = OSAllocZMem(sizeof(*psPlatData));
+
+	/* Query the Emu for reg and IRQ information */
+	eError = PCIInitDev(psPlatData);
+	if (eError != PVRSRV_OK)
+	{
+		goto e0;
+	}
+
+	/* Save data for this device */
+	sSysConfig.pasDevices[0].hSysData = (IMG_HANDLE) psPlatData;
+
+	/* Save private data for the physical memory heap */
+	gsPhysHeapConfig[0].hPrivData = (IMG_HANDLE) psPlatData;
+
+#if defined(TDMETACODE)
+	#error "Not supported services/3rdparty/intel_drm/sysconfig.c"
+	gsPhysHeapConfig[1].hPrivData = IMG_NULL;
+#endif
+
+	*ppsSysConfig = &sSysConfig;
+
+	gpsPlatData = psPlatData;
+
+
+	/* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+	IonInit(NULL);
+#endif
+
+	return PVRSRV_OK;
+e0:
+	return eError;
+}
+
+IMG_VOID SysDestroyConfigData(PVRSRV_SYSTEM_CONFIG *psSysConfig)
+{
+	PLAT_DATA *psPlatData = gpsPlatData;
+
+	PVR_UNREFERENCED_PARAMETER(psSysConfig);
+	PCIDeInitDev(psPlatData);
+	OSFreeMem(psPlatData);
+
+#if defined(SUPPORT_ION)
+	IonDeinit();
+#endif
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_SYSTEM_CONFIG *psSysConfig, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+	PVR_UNREFERENCED_PARAMETER(psSysConfig);
+	PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+	return PVRSRV_OK;
+}
+
+static IMG_VOID SysCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+										IMG_UINT32 ui32NumOfAddr,
+										IMG_DEV_PHYADDR *psDevPAddr,
+										IMG_CPU_PHYADDR *psCpuPAddr)
+{
+	psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+static IMG_VOID SysDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+										IMG_UINT ui32NumOfAddr,
+										IMG_CPU_PHYADDR *psCpuPAddr,
+										IMG_DEV_PHYADDR *psDevPAddr)
+{
+	psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+	if (ui32NumOfAddr > 1)
+	{
+		IMG_UINT32 ui32Idx;
+		for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+		{
+			psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+		}
+	}
+}
+
+static PVRSRV_ERROR SysDevicePrePowerState(
+		PVRSRV_DEV_POWER_STATE eNewPowerState,
+		PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+		IMG_BOOL bForced)
+{
+	if ((eNewPowerState != eCurrentPowerState) &&
+		(eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "Remove SGX power"));
+
+		if (!power_island_put(OSPM_GRAPHICS_ISLAND))
+			return PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+
+		/*Report dfrgx We have the device OFF*/
+		dfrgx_interface_power_state_set(0);
+	}
+
+	return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR SysDevicePostPowerState(
+		PVRSRV_DEV_POWER_STATE eNewPowerState,
+		PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+		IMG_BOOL bForced)
+{
+	if ((eNewPowerState != eCurrentPowerState) &&
+		(eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "Restore SGX power"));
+
+		if (!power_island_get(OSPM_GRAPHICS_ISLAND))
+			return PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+
+		/*Report dfrgx We have the device ON*/
+		dfrgx_interface_power_state_set(1);
+	}
+
+	return PVRSRV_OK;
+}
+
+typedef int (*psb_irq_handler_t)(void *data);
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_UINT32 ui32IRQ,
+				  IMG_CHAR *pszName,
+				  PFN_LISR pfnLISR,
+				  IMG_PVOID pvData,
+				  IMG_HANDLE *phLISRData)
+{
+	register_rgx_irq_handler((psb_irq_handler_t) pfnLISR, pvData);
+	return PVRSRV_OK;
+
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+	register_rgx_irq_handler(IMG_NULL, IMG_NULL);
+	return PVRSRV_OK;
+
+}
+
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysconfig.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysconfig.h
new file mode 100644
index 0000000..96f1685
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysconfig.h
@@ -0,0 +1,172 @@
+/*************************************************************************/ /*!
+@File           
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "sysinfo.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+static IMG_VOID SysCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+										IMG_UINT32 ui32NumOfAddr,
+										IMG_DEV_PHYADDR *psDevPAddr,
+										IMG_CPU_PHYADDR *psCpuPAddr);
+
+static IMG_VOID SysDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+										IMG_UINT32 ui32NumOfAddr,
+										IMG_CPU_PHYADDR *psCpuPAddr,
+										IMG_DEV_PHYADDR *psDevPAddr);
+
+static PVRSRV_ERROR SysDevicePostPowerState(
+		PVRSRV_DEV_POWER_STATE eNewPowerState,
+		PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+		IMG_BOOL bForced);
+
+static PVRSRV_ERROR SysDevicePrePowerState(
+		PVRSRV_DEV_POWER_STATE eNewPowerState,
+		PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+		IMG_BOOL bForced);
+
+static RGX_TIMING_INFORMATION sRGXTimingInfo =
+{
+	.ui32CoreClockSpeed		= RGX_CORE_CLOCK_SPEED_DEFAULT,
+	.bEnableActivePM		= IMG_TRUE,
+	.bEnableRDPowIsland		= IMG_FALSE,
+
+	/* ui32ActivePMLatencyms */
+	.ui32ActivePMLatencyms		= RGX_APM_LATENCY_DEFAULT
+};
+
+static RGX_DATA sRGXData =
+{
+	.psRGXTimingInfo = &sRGXTimingInfo,
+};
+
+static PVRSRV_DEVICE_CONFIG sDevices[] =
+{
+	/* RGX device */
+	{
+		.eDeviceType            = PVRSRV_DEVICE_TYPE_RGX,
+		.pszName                = "RGX",
+
+		/* Device setup information */
+		.sRegsCpuPBase          = { 0 },
+		.ui32RegsSize           = 0,
+		.ui32IRQ                = 0,
+
+		/* No power management on no HW system */
+		.pfnPrePowerState       = SysDevicePrePowerState,
+		.pfnPostPowerState      = SysDevicePostPowerState,
+
+		.hDevData               = &sRGXData,
+		.hSysData               = IMG_NULL,
+
+		.aui32PhysHeapID = { 0, 0 },
+	}
+};
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = {
+	.pfnCpuPAddrToDevPAddr	= SysCpuPAddrToDevPAddr,
+	.pfnDevPAddrToCpuPAddr	= SysDevPAddrToCpuPAddr,
+};
+
+#if defined(TDMETACODE)
+#error "TDMETACODE Need to be implemented or not supported in services/3rdparty/intel_drm/sysconfig.h"
+#else
+static PHYS_HEAP_CONFIG	gsPhysHeapConfig[1] = {
+	{
+	.ui32PhysHeapID			= 0,
+	.eType					= PHYS_HEAP_TYPE_UMA,
+	.pszPDumpMemspaceName	= "SYSMEM",
+	.psMemFuncs				= &gsPhysHeapFuncs,
+	.hPrivData				= IMG_NULL,
+	}
+};
+#endif
+
+/* default BIF tiling heap x-stride configurations. */
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] =
+{
+    0, /* BIF tiling heap 1 x-stride */
+    1, /* BIF tiling heap 2 x-stride */
+    2, /* BIF tiling heap 3 x-stride */
+    3  /* BIF tiling heap 4 x-stride */
+};
+
+
+static PVRSRV_SYSTEM_CONFIG sSysConfig = {
+	.pszSystemName = "Merrifield with Rogue",
+	.uiDeviceCount = sizeof(sDevices)/sizeof(PVRSRV_DEVICE_CONFIG),
+	.pasDevices = &sDevices[0],
+
+	/* Physcial memory heaps */
+	.ui32PhysHeapCount = sizeof(gsPhysHeapConfig) / sizeof(PHYS_HEAP_CONFIG),
+	.pasPhysHeaps = &(gsPhysHeapConfig[0]),
+
+	/* No power management on no HW system */
+	.pfnSysPrePowerState = NULL,
+	.pfnSysPostPowerState = NULL,
+
+	.pui32BIFTilingHeapConfigs = &gauiBIFTilingHeapXStrides[0],
+	.ui32BIFTilingHeapCount = IMG_ARR_NUM_ELEMS(gauiBIFTilingHeapXStrides),
+
+	/* no cache snooping */
+	.eCacheSnoopingMode = PVRSRV_SYSTEM_SNOOP_CPU_ONLY,
+};
+
+#define VENDOR_ID_MERRIFIELD        0x8086
+#define DEVICE_ID_MERRIFIELD        0x1180
+#define DEVICE_ID_MOOREFIELD        0x1480
+
+#define RGX_REG_OFFSET              0x100000
+#define RGX_REG_SIZE                0x10000
+
+#define IS_MRFLD(dev) ((((dev)->pci_device & 0xFFF8) == DEVICE_ID_MERRIFIELD) || \
+			(((dev)->pci_device & 0xFFF8) == DEVICE_ID_MOOREFIELD))
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+ 
+#endif	/* __SYSCCONFIG_H__ */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysinfo.h b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysinfo.h
new file mode 100644
index 0000000..94f5484
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/services/system/rgx_intel/sysinfo.h
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File           
+@Title          System Description Header
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    This header provides system-specific declarations and macros
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*! Sleep time (1h) for Devices Watchdog thread when GPU is in power off state */
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT 60 * 60 * 1000
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT  (10000)
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US                 (500000)
+#define FATAL_ERROR_DETECTION_POLL_MS  (10000)
+#define WAIT_TRY_COUNT                 (10000)
+
+#define SYS_DEVICE_COUNT 3 /* RGX, DISPLAY (external), BUFFER (external) */
+
+#if defined(TDMETACODE)
+#define SYS_PHYS_HEAP_COUNT		2
+#else
+#define SYS_PHYS_HEAP_COUNT		1
+#endif
+
+#define SYS_RGX_DEV_VENDOR_ID		0x8086
+#define SYS_RGX_DEV_DEVICE_ID		0x1180
+
+/*!
+* Active Power Latency default in ms
+*/
+#define RGX_APM_LATENCY_DEFAULT			(500)
+
+/*!
+* Core Clock Speed in Hz
+*/
+#define RGX_CORE_CLOCK_SPEED_DEFAULT	(400000000)
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME    "Merrifield"
+#if defined(SUPPORT_DRM)
+/*
+ * Use the static bus ID for the platform DRM device.
+ */
+#if defined(PVR_DRM_DEV_BUS_ID)
+#define	SYS_RGX_DEV_DRM_BUS_ID	PVR_DRM_DEV_BUS_ID
+#else
+#define SYS_RGX_DEV_DRM_BUS_ID	"platform:Merrifield"
+#endif	/* defined(PVR_DRM_DEV_BUS_ID) */
+#endif	/* defined(SUPPORT_DRM) */
+#endif
+
+#endif	/* !defined(__SYSINFO_H__) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv.c b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv.c
new file mode 100644
index 0000000..0b68e6b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv.c
@@ -0,0 +1,1570 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug Driver
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    32 Bit kernel mode debug driver
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma  warning(disable:4201)
+#pragma  warning(disable:4214)
+#pragma  warning(disable:4115)
+#pragma  warning(disable:4514)
+
+
+#include <ntddk.h>
+#include <windef.h>
+#include <winerror.h>
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <linux/string.h>
+#endif
+
+#if defined (__QNXNTO__)
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma  warning(default:4214)
+#pragma  warning(default:4115)
+#endif /* _WIN32 */
+
+
+/******************************************************************************
+ Types
+******************************************************************************/
+
+#define DBG_STREAM_NAME_MAX		30
+
+/*
+	Per-buffer control structure.
+*/
+typedef struct _DBG_STREAM_
+{
+	struct _DBG_STREAM_* psNext;
+	struct _DBG_STREAM_* psInitStream;
+	struct _DBG_STREAM_* psDeinitStream;
+	IMG_UINT32 ui32Flags;			/*!< flags (see DEBUG_FLAGS) */
+	IMG_PVOID  pvBase;
+	IMG_UINT32 ui32Size;
+	IMG_UINT32 ui32RPtr;
+	IMG_UINT32 ui32WPtr;
+
+	IMG_UINT32 ui32Marker;			/*!< Size marker for file splitting */
+
+	IMG_UINT32 ui32InitPhaseWOff;	/*!< snapshot offset for init phase end for follow-on pdump */
+
+	IMG_CHAR   szName[DBG_STREAM_NAME_MAX];			/* Give this a size, some compilers don't like [] */
+} DBG_STREAM;
+
+/* Check 4xDBG_STREAM will fit in one page */
+BLD_ASSERT(sizeof(DBG_STREAM)<<2<HOST_PAGESIZE,dbgdriv_c)
+
+/******************************************************************************
+ Global variables
+******************************************************************************/
+
+static PDBG_STREAM          g_psStreamList = 0;
+
+/* Mutex used to prevent UM threads (via the dbgdrv ioctl interface) and KM
+ * threads (from pvrsrvkm via the ExtDBG API) entering the debug driver core
+ * and changing the state of share data at the same time.
+ */
+IMG_VOID *                  g_pvAPIMutex=IMG_NULL;
+
+static IMG_UINT32			g_PDumpCurrentFrameNo = 0;
+
+DBGKM_SERVICE_TABLE g_sDBGKMServices =
+{
+	sizeof (DBGKM_SERVICE_TABLE),
+	ExtDBGDrivCreateStream,
+	ExtDBGDrivDestroyStream,
+	ExtDBGDrivWrite2,
+	ExtDBGDrivSetMarker,
+	ExtDBGDrivWaitForEvent,
+	ExtDBGDrivGetCtrlState,
+	ExtDBGDrivSetFrame
+};
+
+
+/***************************************************************************
+ Forward declarations
+***************************************************************************/
+
+IMG_BOOL   IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+IMG_VOID   IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_VOID   IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+IMG_VOID   IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+IMG_VOID   IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void);
+IMG_VOID   IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame);
+IMG_VOID   DestroyAllStreams(IMG_VOID);
+
+/* Static function declarations */
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
+static IMG_VOID InvalidateAllStreams(IMG_VOID);
+
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*!
+ @name	ExtDBGDrivCreateStream
+ */
+IMG_BOOL IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit)
+{
+	IMG_BOOL pvRet;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	pvRet=DBGDrivCreateStream(pszName, ui32Flags, ui32Size, phInit, phMain, phDeinit);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return pvRet;
+}
+
+/*!
+ @name	ExtDBGDrivDestroyStream
+ */
+IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	DBGDrivDestroyStream(hInit, hMain, hDeinit);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return;
+}
+
+/*!
+ @name	ExtDBGDrivFindStream
+ */
+IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+	IMG_VOID *	pvRet;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	pvRet=DBGDrivFindStream(pszName, bResetStream);
+	if (pvRet == IMG_NULL)
+	{
+		PVR_DPF((PVR_DBG_ERROR, "ExtDBGDrivFindStream: Stream not found"));
+	}
+
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return pvRet;
+}
+
+/*!
+ @name	ExtDBGDrivRead
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+	IMG_UINT32 ui32Ret;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Ret=DBGDrivRead(psStream, ui32BufID, ui32OutBuffSize, pui8OutBuf);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Ret;
+}
+
+/*!
+ @name	ExtDBGDrivWrite2
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+	IMG_UINT32	ui32Ret;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Ret;
+}
+
+/*!
+ @name	ExtDBGDrivSetMarker
+ */
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	DBGDrivSetMarker(psStream, ui32Marker);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return;
+}
+
+/*!
+ @name	ExtDBGDrivGetMarker
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
+{
+	IMG_UINT32	ui32Marker;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Marker = DBGDrivGetMarker(psStream);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Marker;
+}
+
+/*!
+ @name	ExtDBGDrivWaitForEvent
+ */
+IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	DBGDrivWaitForEvent(eEvent);
+#else	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+	PVR_UNREFERENCED_PARAMETER(eEvent);				/* PRQA S 3358 */
+#endif	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+}
+
+
+/*!
+ @name	ExtDBGDrivGetCtrlState
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+	IMG_UINT32 ui32State = 0;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32State = DBGDrivGetCtrlState(psStream, ui32StateID);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32State;
+}
+
+/*!
+ @name	ExtDBGDrivGetFrame
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void)
+{
+	IMG_UINT32 ui32Frame = 0;
+
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	ui32Frame = DBGDrivGetFrame();
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return ui32Frame;
+}
+
+/*!
+ @name	ExtDBGDrivGetCtrlState
+ */
+IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+	/* Aquire API Mutex */
+	HostAquireMutex(g_pvAPIMutex);
+
+	DBGDrivSetFrame(ui32Frame);
+
+	/* Release API Mutex */
+	HostReleaseMutex(g_pvAPIMutex);
+
+	return;
+}
+
+
+
+/*!****************************************************************************
+ @name		AtoI
+ @brief		Returns the integer value of a decimal string
+ @param		szIn - String with hexadecimal value
+ @return	IMG_UINT32 integer value, 0 if string is null or not valid
+				Based on Max`s one, now copes with (only) hex ui32ords, upper or lower case a-f.
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn)
+{
+	IMG_INT		iLen = 0;
+	IMG_UINT32	ui32Value = 0;
+	IMG_UINT32	ui32Digit=1;
+	IMG_UINT32	ui32Base=10;
+	IMG_INT		iPos;
+	IMG_CHAR	bc;
+
+	//get len of string
+	while (szIn[iLen] > 0)
+	{
+		iLen ++;
+	}
+
+	//nothing to do
+	if (iLen == 0)
+	{
+		return (0);
+	}
+
+	/* See if we have an 'x' or 'X' before the number to make it a hex number */
+	iPos=0;
+	while (szIn[iPos] == '0')
+	{
+		iPos++;
+	}
+	if (szIn[iPos] == '\0')
+	{
+		return 0;
+	}
+	if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
+	{
+		ui32Base=16;
+		szIn[iPos]='0';
+	}
+
+	//go through string from right (least significant) to left
+	for (iPos = iLen - 1; iPos >= 0; iPos --)
+	{
+		bc = szIn[iPos];
+
+		if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)			//handle lower case a-f
+		{
+			bc -= 'a' - 0xa;
+		}
+		else
+		if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)			//handle upper case A-F
+		{
+			bc -= 'A' - 0xa;
+		}
+		else
+		if ((bc >= '0') && (bc <= '9'))				//if char out of range, return 0
+		{
+			bc -= '0';
+		}
+		else
+			return (0);
+
+		ui32Value += (IMG_UINT32)bc  * ui32Digit;
+
+		ui32Digit = ui32Digit * ui32Base;
+	}
+	return (ui32Value);
+}
+
+
+/*!****************************************************************************
+ @name		StreamValid
+ @brief		Validates supplied debug buffer.
+ @param		psStream - debug stream
+ @return	true if valid
+*****************************************************************************/
+static IMG_BOOL StreamValid(PDBG_STREAM psStream)
+{
+	PDBG_STREAM	psThis;
+
+	psThis = g_psStreamList;
+
+	while (psThis)
+	{
+		if (psStream && ((psThis == psStream) ||
+						(psThis->psInitStream == psStream) ||
+						(psThis->psDeinitStream == psStream)) )
+		{
+			return(IMG_TRUE);
+		}
+		else
+		{
+			psThis = psThis->psNext;
+		}
+	}
+
+	return(IMG_FALSE);
+}
+
+
+/*!****************************************************************************
+ @name		StreamValidForRead
+ @brief		Validates supplied debug buffer for read op.
+ @param		psStream - debug stream
+ @return	true if readable
+*****************************************************************************/
+static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream)
+{
+	if( StreamValid(psStream) &&
+		((psStream->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) )
+	{
+		return(IMG_TRUE);
+	}
+
+	return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name		StreamValidForWrite
+ @brief		Validates supplied debug buffer for write op.
+ @param		psStream - debug stream
+ @return	true if writable
+*****************************************************************************/
+static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream)
+{
+	if( StreamValid(psStream) &&
+		((psStream->ui32Flags & DEBUG_FLAGS_READONLY) == 0) )
+	{
+		return(IMG_TRUE);
+	}
+
+	return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name		Write
+ @brief		Copies data from a buffer into selected stream. Stream size is fixed.
+ @param		psStream - stream for output
+ @param		pui8Data - input buffer
+ @param		ui32InBuffSize - size of input
+ @return	none
+*****************************************************************************/
+static IMG_VOID Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize)
+{
+	/*
+		Split copy into two bits as necessary (if we're allowed to wrap).
+	*/
+	if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) == 0)
+	{
+		PVR_ASSERT( (psStream->ui32WPtr + ui32InBuffSize) < psStream->ui32Size );
+	}
+
+	if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
+	{
+		/* Yes we need two bits, calculate their sizes */
+		IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
+		IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
+
+		/* Copy first block to current location */
+		HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr),
+				(IMG_PVOID) pui8Data,
+				ui32B1);
+
+		/* Copy second block to start of buffer */
+		HostMemCopy(psStream->pvBase,
+				(IMG_PVOID)(pui8Data + ui32B1),
+				ui32B2);
+
+		/* Set pointer to be the new end point */
+		psStream->ui32WPtr = ui32B2;
+	}
+	else
+	{	/* Can fit block in single chunk */
+		HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr),
+				(IMG_PVOID) pui8Data,
+				ui32InBuffSize);
+
+		psStream->ui32WPtr += ui32InBuffSize;
+
+		if (psStream->ui32WPtr == psStream->ui32Size)
+		{
+			psStream->ui32WPtr = 0;
+		}
+	}
+}
+
+
+/*!****************************************************************************
+ @name		WriteExpandingBuffer
+ @brief		Copies data from a buffer into selected stream. Stream size may be expandable.
+ @param		psStream - stream for output
+ @param		pui8InBuf - input buffer
+ @param		ui32InBuffSize - size of input
+ @return	bytes copied
+*****************************************************************************/
+static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+	IMG_UINT ui32Space;
+
+	/*
+		How much space have we got in the buffer ?
+	*/
+	ui32Space = SpaceInStream(psStream);
+
+	/*
+		Check if we can expand the buffer 
+	*/
+	if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
+	{
+		/*
+			Don't do anything if we've got less that 32 ui8tes of space and
+			we're not allowing expansion of buffer space...
+		*/
+		if (ui32Space < 32)
+		{
+			PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is full and isn't expandable", psStream));
+			return(0);
+		}
+	}
+	else
+	{
+		if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
+		{
+			IMG_UINT32	ui32NewBufSize;
+
+			/*
+				Find new buffer size, double the current size or increase by 1MB
+			*/
+			ui32NewBufSize = MIN(psStream->ui32Size<<1,psStream->ui32Size+(1<<20));
+			ui32NewBufSize = MIN(ui32NewBufSize, (16<<20));
+
+			PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x",
+					psStream->ui32Size, ui32NewBufSize));
+
+			if (ui32InBuffSize > psStream->ui32Size)
+			{
+				ui32NewBufSize += ui32InBuffSize;
+				PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is expanding by size of input buffer %u", psStream, ui32NewBufSize));
+			}
+
+			/* 
+				Attempt to expand the buffer 
+			*/
+			if ((ui32NewBufSize < psStream->ui32Size) ||
+					!ExpandStreamBuffer(psStream,ui32NewBufSize))
+			{
+				if (ui32Space < 32)
+				{
+					if((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+					{
+						return(0);
+					}
+					else
+					{
+						/* out of memory */
+						PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: Unable to expand %p. Out of memory.", psStream));
+						InvalidateAllStreams();
+						return (0xFFFFFFFFUL);
+					}
+				}
+			}
+
+			/* 
+				Recalc the space in the buffer 
+			*/
+			ui32Space = SpaceInStream(psStream);
+			PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x",
+					ui32Space));
+		}
+	}
+
+	/*
+		Only copy what we can..
+	*/
+	if (ui32Space <= (ui32InBuffSize + 4))
+	{
+		ui32InBuffSize = ui32Space - 4;
+	}
+
+	/*
+		Write the stuff...
+	*/
+	Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	if (ui32InBuffSize)
+	{
+		HostSignalEvent(DBG_EVENT_STREAM_DATA);
+	}
+#endif
+	return(ui32InBuffSize);
+}
+
+/*****************************************************************************
+******************************************************************************
+******************************************************************************
+ THE ACTUAL FUNCTIONS
+******************************************************************************
+******************************************************************************
+*****************************************************************************/
+
+static IMG_VOID DBGDrivSetStreamName(PDBG_STREAM psStream,
+									 IMG_CHAR* pszBase,
+									 IMG_CHAR* pszExt)
+{
+	IMG_CHAR* pCh = psStream->szName;
+	IMG_CHAR* pChEnd = psStream->szName+DBG_STREAM_NAME_MAX-8;
+	IMG_CHAR* pSrcCh;
+	IMG_CHAR* pSrcChEnd;
+
+	for (pSrcCh = pszBase, pSrcChEnd = pszBase+strlen(pszBase);
+			(pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+			pSrcCh++, pCh++)
+	{
+		*pCh = *pSrcCh;
+	}
+
+	for (pSrcCh = pszExt, pSrcChEnd = pszExt+strlen(pszExt);
+			(pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+			pSrcCh++, pCh++)
+	{
+		*pCh = *pSrcCh;
+	}
+
+	*pCh = '\0';
+}
+
+/*!****************************************************************************
+ @name		DBGDrivCreateStream
+ @brief		Creates a pdump/debug stream
+ @param		pszName - stream name
+ @param		ui32Flags - output flags, text stream bit is set for pdumping
+ @param		ui32Size - size of stream buffer in pages
+ @return	none
+*****************************************************************************/
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName,
+                                          IMG_UINT32 ui32Flags,
+                                          IMG_UINT32 ui32Size,
+                                          IMG_HANDLE* phInit,
+                                          IMG_HANDLE* phMain,
+                                          IMG_HANDLE* phDeinit)
+{
+	IMG_BOOL            bUseNonPagedMem4Buffers = ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0);
+	PDBG_STREAM         psStream = IMG_NULL;
+	PDBG_STREAM	        psInitStream = IMG_NULL;
+	PDBG_STREAM         psStreamDeinit = IMG_NULL;
+	IMG_VOID*           pvBase = IMG_NULL;
+
+	/*
+		If we already have a buffer using this name just return
+		its handle.
+	*/
+	psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
+	if (psStream)
+	{
+		*phInit = psStream->psInitStream;
+		*phMain = psStream;
+		*phDeinit = psStream->psDeinitStream;
+		return IMG_TRUE;
+	}
+
+	/*
+		Allocate memory for control structures
+	*/
+	psStream = HostNonPageablePageAlloc(1);
+	if	(!psStream)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
+		goto errCleanup;
+	}
+	psInitStream = psStream+1;
+	psStreamDeinit = psStream+2;
+
+
+	/* Allocate memory for Main buffer */
+	psStream->pvBase = IMG_NULL;
+	if (bUseNonPagedMem4Buffers)
+	{
+		pvBase = HostNonPageablePageAlloc(ui32Size);
+	}
+	else
+	{
+		pvBase = HostPageablePageAlloc(ui32Size);
+	}
+
+	if (!pvBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
+		goto errCleanup;
+	}
+
+	/*
+		Setup debug buffer state.
+	*/
+	psStream->psNext = 0;
+	psStream->pvBase = pvBase;
+	psStream->ui32Flags = ui32Flags | DEBUG_FLAGS_CIRCULAR;
+	psStream->ui32Size = ui32Size * HOST_PAGESIZE;
+	psStream->ui32RPtr = 0;
+	psStream->ui32WPtr = 0;
+	psStream->ui32Marker = 0;
+	psStream->ui32InitPhaseWOff = 0;
+	DBGDrivSetStreamName(psStream, pszName, "");
+	PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStream->szName));
+
+	/* Allocate memory for Init buffer */
+	psInitStream->pvBase = IMG_NULL;
+	if (bUseNonPagedMem4Buffers)
+	{
+		pvBase = HostNonPageablePageAlloc(ui32Size);
+	}
+	else
+	{
+		pvBase = HostPageablePageAlloc(ui32Size);
+	}
+
+	if (!pvBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
+		goto errCleanup;
+	}
+
+	/* Initialise the stream for the Init phase */
+	psInitStream->psNext = psInitStream->psInitStream = psInitStream->psDeinitStream = IMG_NULL;
+	psInitStream->ui32Flags = ui32Flags;
+	psInitStream->pvBase = pvBase;
+	psInitStream->ui32Size = ui32Size * HOST_PAGESIZE;
+	psInitStream->ui32RPtr = 0;
+	psInitStream->ui32WPtr = 0;
+	psInitStream->ui32Marker = 0;
+	psInitStream->ui32InitPhaseWOff = 0;
+	DBGDrivSetStreamName(psInitStream, pszName, "_Init");
+	PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with init name (%s)\n\r", psInitStream->szName));
+	psStream->psInitStream = psInitStream;
+
+	/* Allocate memory for Deinit buffer */
+	psStreamDeinit->pvBase = IMG_NULL;
+	if (bUseNonPagedMem4Buffers)
+	{
+		pvBase = HostNonPageablePageAlloc(1);
+	}
+	else
+	{
+		pvBase = HostPageablePageAlloc(1);
+	}
+
+	if (!pvBase)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc DeinitStream buffer\n\r"));
+		goto errCleanup;
+	}
+
+	/* Initialise the stream for the Deinit phase */
+	psStreamDeinit->psNext = psStreamDeinit->psInitStream = psStreamDeinit->psDeinitStream = IMG_NULL;
+	psStreamDeinit->pvBase = pvBase;
+	psStreamDeinit->ui32Flags = ui32Flags;
+	psStreamDeinit->ui32Size = HOST_PAGESIZE;
+	psStreamDeinit->ui32RPtr = 0;
+	psStreamDeinit->ui32WPtr = 0;
+	psStreamDeinit->ui32Marker = 0;
+	psStreamDeinit->ui32InitPhaseWOff = 0;
+	DBGDrivSetStreamName(psStreamDeinit, pszName, "_Deinit");
+	PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStreamDeinit->szName));
+
+	psStream->psDeinitStream = psStreamDeinit;
+
+	/*
+		Insert into list.
+	*/
+	psStream->psNext = g_psStreamList;
+	g_psStreamList = psStream;
+
+	AddSIDEntry(psStream);
+	
+	*phInit = psStream->psInitStream;
+	*phMain = psStream;
+	*phDeinit = psStream->psDeinitStream;
+
+	return IMG_TRUE;
+
+errCleanup:
+	if (bUseNonPagedMem4Buffers)
+	{
+		if (psStream) HostNonPageablePageFree(psStream->pvBase);
+		if (psInitStream) HostNonPageablePageFree(psInitStream->pvBase);
+		if (psStreamDeinit) HostNonPageablePageFree(psStreamDeinit->pvBase);
+	}
+	else
+	{
+		if (psStream) HostPageablePageFree(psStream->pvBase);
+		if (psInitStream) HostPageablePageFree(psInitStream->pvBase);
+		if (psStreamDeinit) HostPageablePageFree(psStreamDeinit->pvBase);
+	}
+	HostNonPageablePageFree(psStream);
+	psStream = psInitStream = psStreamDeinit = IMG_NULL;
+	return IMG_FALSE;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivDestroyStream
+ @brief		Delete a stream and free its memory
+ @param		psStream - stream to be removed
+ @return	none
+*****************************************************************************/
+IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit,IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+	PDBG_STREAM psStreamInit = (PDBG_STREAM) hInit;
+	PDBG_STREAM psStream = (PDBG_STREAM) hMain;
+	PDBG_STREAM	psStreamDeinit = (PDBG_STREAM) hDeinit;
+	PDBG_STREAM	psStreamThis;
+	PDBG_STREAM	psStreamPrev;
+
+	PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
+
+	/*
+		Validate buffer.
+	*/
+	if (!StreamValid(psStream))
+	{
+		return;
+	}
+
+	RemoveSIDEntry(psStream);
+	
+	/*
+		Remove from linked list.
+	*/
+	psStreamThis = g_psStreamList;
+	psStreamPrev = 0;
+
+	while (psStreamThis)
+	{
+		if (psStreamThis == psStream)
+		{
+			if (psStreamPrev)
+			{
+				psStreamPrev->psNext = psStreamThis->psNext;
+			}
+			else
+			{
+				g_psStreamList = psStreamThis->psNext;
+			}
+
+			psStreamThis = 0;
+		}
+		else
+		{
+			psStreamPrev = psStreamThis;
+			psStreamThis = psStreamThis->psNext;
+		}
+	}
+
+	/*
+		And free its memory.
+	*/
+	if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+	{
+		HostNonPageablePageFree(psStream->pvBase);
+		HostNonPageablePageFree(psStreamInit->pvBase);
+		HostNonPageablePageFree(psStreamDeinit->pvBase);
+	}
+	else
+	{
+		HostPageablePageFree(psStream->pvBase);
+		HostPageablePageFree(psStreamInit->pvBase);
+		HostPageablePageFree(psStreamDeinit->pvBase);
+	}
+
+	/* Free the shared page used for the three stream tuple */
+	HostNonPageablePageFree(psStream);
+	psStream = psStreamInit = psStreamDeinit = IMG_NULL;
+
+	if (g_psStreamList == 0)
+	{
+		PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
+	}
+
+	return;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivFindStream
+ @brief		Finds/resets a named stream
+ @param		pszName - stream name
+ @param		bResetStream - whether to reset the stream, e.g. to end pdump init phase
+ @return	none
+*****************************************************************************/
+IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+	PDBG_STREAM	psStream;
+	PDBG_STREAM	psThis;
+	IMG_UINT32	ui32Off;
+	IMG_BOOL	bAreSame;
+
+	psStream = 0;
+
+	PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s",
+			pszName,
+			(bResetStream == IMG_TRUE) ? "with reset" : "no reset"));
+
+	/*
+		Scan buffer names for supplied one.
+	*/
+	for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext)
+	{
+		bAreSame = IMG_TRUE;
+		ui32Off = 0;
+
+		if (strlen(psThis->szName) == strlen(pszName))
+		{
+			while ((ui32Off < DBG_STREAM_NAME_MAX) && (psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && bAreSame)
+			{
+				if (psThis->szName[ui32Off] != pszName[ui32Off])
+				{
+					bAreSame = IMG_FALSE;
+				}
+
+				ui32Off++;
+			}
+		}
+		else
+		{
+			bAreSame = IMG_FALSE;
+		}
+
+		if (bAreSame)
+		{
+			psStream = psThis;
+			break;
+		}
+	}
+
+	if(psStream)
+	{
+		psStream->psInitStream->ui32RPtr = 0;
+		psStream->psDeinitStream->ui32RPtr = 0;
+		psStream->ui32RPtr = 0;
+		if (bResetStream)
+		{
+			/* This will erase any data written to the main stream 
+			 * before the client starts. */
+			psStream->ui32WPtr = 0;
+		}
+		psStream->ui32Marker = psStream->psInitStream->ui32Marker = 0;
+
+
+		/* mark init stream to prevent further reading by pdump client */
+		/* Check for possible race condition */
+		psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr;
+
+		PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x",
+				psStream->szName,
+				psStream->psInitStream->ui32InitPhaseWOff));
+	}
+
+	return((IMG_VOID *) psStream);
+}
+
+static IMG_VOID IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream)
+{
+	IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n";
+	IMG_UINT32 ui32Space;
+	IMG_UINT32 ui32Off = 0;
+	IMG_UINT32 ui32WPtr = psStream->ui32WPtr;
+	IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+	
+	PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s", psStream->szName ));
+
+	/*
+		Validate buffer.
+	*/
+	/*
+	if (!StreamValid(psStream))
+	{
+		return;
+	}
+*/
+	/* Write what we can of the error message */
+	ui32Space = SpaceInStream(psStream);
+
+	/* Make sure there's space for termination character */
+	if(ui32Space > 0)
+	{
+		ui32Space--;
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full."));
+	}
+
+	while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space))
+	{
+		pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off];
+		ui32Off++;
+		ui32WPtr++;
+	}
+	pui8Buffer[ui32WPtr++] = '\0';
+	psStream->ui32WPtr = ui32WPtr;
+
+	/* Buffer will accept no more params from Services/client driver */
+	psStream->ui32Flags |= DEBUG_FLAGS_READONLY;
+}
+
+/*!****************************************************************************
+ @name		InvalidateAllStreams
+ @brief		invalidate all streams in list
+ @return	none
+*****************************************************************************/
+static IMG_VOID InvalidateAllStreams(IMG_VOID)
+{
+	PDBG_STREAM psStream = g_psStreamList;
+	while (psStream != IMG_NULL)
+	{
+		DBGDrivInvalidateStream(psStream);
+		psStream = psStream->psNext;
+	}
+	return;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivWrite2
+ @brief		Copies data from a buffer into selected (expandable) stream.
+ @param		psStream - stream for output
+ @param		pui8InBuf - input buffer
+ @param		ui32InBuffSize - size of input
+ @return	bytes copied, 0 if recoverable error, -1 if unrecoverable error
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+
+	/*
+		Validate buffer.
+	*/
+	if (!StreamValidForWrite(psStream))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid"));
+		return(0xFFFFFFFFUL);
+	}
+
+	PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x",
+			ui32InBuffSize,
+			psStream->szName,
+			psStream->ui32RPtr,
+			psStream->ui32WPtr));
+
+	return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+/*!****************************************************************************
+ @name		DBGDrivRead
+ @brief		Read from debug driver buffers
+ @param		psMainStream - stream
+ @param		ui32BufID - on of the DEBUG_READ_BUFID flags to indicate which buffer
+ @param		ui32OutBuffSize - available space in client buffer
+ @param		pui8OutBuf - output buffer
+ @return	bytes read, 0 if failure occurred
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+	IMG_UINT32 ui32Data;
+	DBG_STREAM *psStream;
+
+	/*
+		Validate buffer.
+	*/
+	if (!StreamValidForRead(psMainStream))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %p is invalid", psMainStream));
+		return(0);
+	}
+
+	if(ui32BufID == DEBUG_READ_BUFID_INIT)
+	{
+		psStream = psMainStream->psInitStream;
+	}
+	else if (ui32BufID == DEBUG_READ_BUFID_DEINIT)
+	{
+		psStream = psMainStream->psDeinitStream;
+	}
+	else
+	{
+		psStream = psMainStream;
+	}
+
+	/* Don't read beyond the init phase marker point */
+	if (psStream->ui32RPtr == psStream->ui32WPtr ||
+		((psStream->ui32InitPhaseWOff > 0) &&
+		 (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) )
+	{
+		return(0);
+	}
+
+	/*
+		Get amount of data in buffer.
+	*/
+	if (psStream->ui32RPtr <= psStream->ui32WPtr)
+	{
+		ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
+	}
+	else
+	{
+		ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
+	}
+
+	/*
+		Don't read beyond the init phase marker point
+	*/
+	if ((psStream->ui32InitPhaseWOff > 0) &&
+		(psStream->ui32InitPhaseWOff < psStream->ui32WPtr))
+	{
+		ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr;
+	}
+
+	/*
+		Only transfer what target buffer can handle.
+	*/
+	if (ui32Data > ui32OutBuffSize)
+	{
+		ui32Data = ui32OutBuffSize;
+	}
+
+	PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x",
+			ui32Data,
+			psStream->szName,
+			psStream->ui32RPtr,
+			psStream->ui32WPtr));
+
+	/*
+		Split copy into two bits or one depending on W/R position.
+	*/
+	if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
+	{	/* Calc block 1 and block 2 sizes */
+		IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
+		IMG_UINT32 ui32B2 = ui32Data - ui32B1;
+
+		/* Copy up to end of circular buffer */
+		HostMemCopy((IMG_VOID *) pui8OutBuf,
+				(IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+				ui32B1);
+
+		/* Copy from start of circular buffer */
+		HostMemCopy((IMG_VOID *)(pui8OutBuf + ui32B1),
+				psStream->pvBase,
+				ui32B2);
+
+		/* Update read pointer now that we've copied the data out */
+		psStream->ui32RPtr = ui32B2;
+	}
+	else
+	{	/* Copy data from wherever */
+		HostMemCopy((IMG_VOID *) pui8OutBuf,
+				(IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+				ui32Data);
+
+		/* Update read pointer now that we've copied the data out */
+		psStream->ui32RPtr += ui32Data;
+
+		/* Check for wrapping */
+		if ((psStream->ui32RPtr != psStream->ui32WPtr) &&
+			(psStream->ui32RPtr >= psStream->ui32Size))
+		{
+			psStream->ui32RPtr = 0;
+		}
+	}
+
+	return(ui32Data);
+}
+
+/*!****************************************************************************
+ @name		DBGDrivSetMarker
+ @brief		Sets the marker in the stream to split output files
+ @param		psStream, ui32Marker
+ @return	nothing
+*****************************************************************************/
+IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+	/*
+		Validate buffer
+	*/
+	if (!StreamValid(psStream))
+	{
+		return;
+	}
+
+	/* Called by PDump client to reset the marker to zero after a file split */
+	if ((ui32Marker == 0) && (psStream->ui32Marker == 0))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: Client resetting marker that is already zero!"));
+	}
+	/* Called by pvrsrvkm to set the marker to signal a file split is required */
+	if ((ui32Marker != 0) && (psStream->ui32Marker != 0))
+	{
+		/* In this case a previous split request is still outstanding. The
+		 * client has not yet actioned and acknowledged the previous
+		 * marker. This may be an error if the client does not catch-up and
+		 * the stream's written data is allowed to pass the max file
+		 * size again. If this happens the PDump is invalid as the offsets
+		 * from the script file will be incorrect.
+		 */
+		PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: Server setting marker that is already set!"));
+	}
+	else
+	{
+		PVR_DPF((PVR_DBG_MESSAGE, "DBGDrivSetMarker: Setting stream split marker to %d (was %d)", ui32Marker, psStream->ui32Marker));
+	}
+
+	psStream->ui32Marker = ui32Marker;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivGetMarker
+ @brief 	Gets the marker in the stream to split output files
+ @param	 	psStream - stream
+ @return	marker offset
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
+{
+	/*
+		Validate buffer
+	*/
+	if (!StreamValid(psStream))
+	{
+		return 0;
+	}
+
+	return psStream->ui32Marker;
+}
+
+/*!****************************************************************************
+ @name		DBGDrivGetServiceTable
+ @brief		get jump table for Services driver
+ @return	pointer to jump table
+*****************************************************************************/
+IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID)
+{
+	return((IMG_PVOID)&g_sDBGKMServices);
+}
+
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+/*!****************************************************************************
+ @name		DBGDrivWaitForEvent
+ @brief		waits for an event
+ @param		eEvent - debug driver event
+ @return	IMG_VOID
+*****************************************************************************/
+IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+	HostWaitForEvent(eEvent);
+}
+#endif
+
+/*	Use PVR_DPF() to avoid state messages in release build */
+#if defined(PVR_DISABLE_LOGGING) || !defined(DEBUG)
+#define PVR_LOG(...)
+#else
+
+extern IMG_VOID PVRSRVDebugPrintf(IMG_UINT32	ui32DebugLevel,
+						const IMG_CHAR*	pszFileName,
+						IMG_UINT32	ui32Line,
+						const IMG_CHAR*	pszFormat,
+						...	);
+/* Reproduce the PVR_LOG macro here but direct it to DPF */
+#define PVR_LOG(...)	PVRSRVDebugPrintf( DBGPRIV_CALLTRACE, __FILE__, __LINE__ , __VA_ARGS__);
+
+#endif
+
+
+/*!****************************************************************************
+ @name		DBGDrivGetCtrlState
+ @brief		Gets a state value from the debug driver or stream
+ @param		psStream - stream
+ @param		ui32StateID - state ID
+ @return	Nothing
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+	/* Validate buffer */
+	if (!StreamValid(psStream))
+	{
+		return (0xFFFFFFFF);
+	}
+
+	/* Retrieve the state asked for */
+	switch (ui32StateID)
+	{
+	case DBG_GET_STATE_FLAG_IS_READONLY:
+		return ((psStream->ui32Flags & DEBUG_FLAGS_READONLY) != 0);
+		break;
+
+	case 0xFE: /* Dump the current stream state */
+		PVR_LOG("------ PDUMP DBGDriv: psStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+				psStream, psStream->szName, psStream->ui32Flags);
+		PVR_LOG("------ PDUMP DBGDriv: psStream->pvBase( %p ) psStream->ui32Size( %u )",
+				psStream->pvBase, psStream->ui32Size);
+		PVR_LOG("------ PDUMP DBGDriv: psStream->ui32RPtr( %u ) psStream->ui32WPtr( %u )",
+				psStream->ui32RPtr, psStream->ui32WPtr);
+		PVR_LOG("------ PDUMP DBGDriv: psStream->ui32Marker( %u ) psStream->ui32InitPhaseWOff( %u )",
+				psStream->ui32Marker, psStream->ui32InitPhaseWOff);
+		if (psStream->psInitStream)
+		{
+			PVR_LOG("-------- PDUMP DBGDriv: psInitStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+					psStream->psInitStream, psStream->psInitStream->szName, psStream->ui32Flags);
+			PVR_LOG("-------- PDUMP DBGDriv: psInitStream->pvBase( %p ) psInitStream->ui32Size( %u )",
+					psStream->psInitStream->pvBase, psStream->psInitStream->ui32Size);
+			PVR_LOG("-------- PDUMP DBGDriv: psInitStream->ui32RPtr( %u ) psInitStream->ui32WPtr( %u )",
+					psStream->psInitStream->ui32RPtr, psStream->psInitStream->ui32WPtr);
+			PVR_LOG("-------- PDUMP DBGDriv: psInitStream->ui32Marker( %u ) psInitStream->ui32InitPhaseWOff( %u ) ",
+					psStream->psInitStream->ui32Marker, psStream->psInitStream->ui32InitPhaseWOff);
+		}
+
+		break;
+
+	case 0xFF: /* Dump driver state not in a stream */
+		{
+			PVR_LOG("------ PDUMP DBGDriv: g_psStreamList( head %p ) g_pvAPIMutex( %p ) g_PDumpCurrentFrameNo( %u )", g_psStreamList, g_pvAPIMutex, g_PDumpCurrentFrameNo);
+		}
+		break;
+
+	default:
+		PVR_ASSERT(0);
+	}
+
+	return (0xFFFFFFFF);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void)
+{
+	return g_PDumpCurrentFrameNo;
+}
+
+IMG_VOID IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+	g_PDumpCurrentFrameNo = ui32Frame;
+}
+
+
+/*!****************************************************************************
+ @name		ExpandStreamBuffer
+ @brief		allocates a new buffer when the current one is full
+ @param		psStream - stream
+ @param		ui32NewSize - new size
+ @return	IMG_TRUE - if allocation succeeded, IMG_FALSE - if not
+*****************************************************************************/
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
+{
+	IMG_VOID *	pvNewBuf;
+	IMG_UINT32	ui32NewSizeInPages;
+	IMG_UINT32	ui32NewWOffset;
+	IMG_UINT32	ui32NewROffset;
+	IMG_UINT32	ui32SpaceInOldBuf;
+
+	/* 
+		First check new size is bigger than existing size 
+	*/
+	if (psStream->ui32Size >= ui32NewSize)
+	{
+		return IMG_FALSE;
+	}
+
+	/*
+		Calc space in old buffer 
+	*/
+	ui32SpaceInOldBuf = SpaceInStream(psStream);
+
+	/*
+		Allocate new buffer 
+	*/
+	ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
+
+	if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+	{
+		pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
+	}
+	else
+	{
+		pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
+	}
+
+	if (pvNewBuf == IMG_NULL)
+	{
+		return IMG_FALSE;
+	}
+
+	if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+	{
+		/*
+			Copy over old buffer to new one, we place data at start of buffer
+			even if Read offset is not at start of buffer
+		*/
+		if (psStream->ui32RPtr <= psStream->ui32WPtr)
+		{
+			/*
+				No wrapping of data so copy data to start of new buffer 
+			*/
+		HostMemCopy(pvNewBuf,
+					(IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+					psStream->ui32WPtr - psStream->ui32RPtr);
+		}
+		else
+		{
+			IMG_UINT32	ui32FirstCopySize;
+	
+			/*
+				The data has wrapped around the buffer, copy beginning of buffer first 
+			*/
+			ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
+	
+			HostMemCopy(pvNewBuf,
+					(IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr),
+					ui32FirstCopySize);
+	
+			/*
+				Now second half 
+			*/
+			HostMemCopy((IMG_VOID *)((IMG_UINTPTR_T)pvNewBuf + ui32FirstCopySize),
+					(IMG_VOID *)(IMG_PBYTE)psStream->pvBase,
+					psStream->ui32WPtr);
+		}
+		ui32NewROffset = 0;
+	}
+	else
+	{
+		/* Copy everything in the old buffer to the new one */
+		HostMemCopy(pvNewBuf, psStream->pvBase,	psStream->ui32WPtr);
+		ui32NewROffset = psStream->ui32RPtr;
+	}
+
+	/*
+		New Write offset is at end of data 
+	*/                                                        
+	ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
+
+	/*
+		Free old buffer 
+	*/
+	if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+	{
+		HostNonPageablePageFree(psStream->pvBase);
+	}
+	else
+	{
+		HostPageablePageFree(psStream->pvBase);
+	}
+
+	/*
+		Now set new params up 
+	*/
+	psStream->pvBase = pvNewBuf;
+	psStream->ui32RPtr = ui32NewROffset;
+	psStream->ui32WPtr = ui32NewWOffset;
+	psStream->ui32Size = ui32NewSizeInPages * 4096;
+
+	return IMG_TRUE;
+}
+
+/*!****************************************************************************
+ @name		SpaceInStream
+ @brief		remaining space in stream
+ @param		psStream - stream
+ @return	bytes remaining
+*****************************************************************************/
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
+{
+	IMG_UINT32	ui32Space;
+
+	if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+	{
+		/* Allow overwriting the buffer which was already read */
+		if (psStream->ui32RPtr > psStream->ui32WPtr)
+		{
+			ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
+		}
+		else
+		{
+			ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
+		}
+	}
+	else
+	{
+		/* Don't overwrite anything */
+		ui32Space = psStream->ui32Size - psStream->ui32WPtr;
+	}
+
+	return ui32Space;
+}
+
+
+/*!****************************************************************************
+ @name		DestroyAllStreams
+ @brief		delete all streams in list
+ @return	none
+*****************************************************************************/
+IMG_VOID DestroyAllStreams(IMG_VOID)
+{
+	PDBG_STREAM psStream = g_psStreamList;
+	PDBG_STREAM psStreamToFree;
+
+	while (psStream != IMG_NULL)
+	{
+		psStreamToFree = psStream;
+		psStream = psStream->psNext;
+		DBGDrivDestroyStream(psStreamToFree->psInitStream, psStreamToFree, psStreamToFree->psDeinitStream);
+	}
+	g_psStreamList = IMG_NULL;
+	return;
+}
+
+/******************************************************************************
+ End of file (DBGDRIV.C)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv.h b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv.h
new file mode 100644
index 0000000..6649e5c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv.h
@@ -0,0 +1,114 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_
+#define _DBGDRIV_
+
+/*****************************************************************************
+ The odd constant or two
+*****************************************************************************/
+
+#define DBGDRIV_VERSION 	0x100
+#define MAX_PROCESSES 		2
+#define BLOCK_USED			0x01
+#define BLOCK_LOCKED		0x02
+#define DBGDRIV_MONOBASE	0x000B0000
+
+
+extern IMG_VOID *	g_pvAPIMutex;
+
+/*****************************************************************************
+ Internal debug driver core functions
+*****************************************************************************/
+/* Called by WDDM debug driver win7/hostfunc.c */
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages,
+											IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+
+/* Called by Linux debug driver main.c to allow the API mutex lock to be used
+ * to protect the common IOCTL read buffer while avoiding deadlock in the Ext
+ * layer
+ */
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID,
+									IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+
+/* Used in ioctl.c in DBGDIOCDrivGetServiceTable() which is called in WDDM PDump files */
+IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID);
+
+/* Used in WDDM version of debug driver win7/main.c */
+IMG_VOID DestroyAllStreams(IMG_VOID);
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn);
+
+IMG_VOID HostMemSet(IMG_VOID *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size);
+IMG_VOID HostMemCopy(IMG_VOID *pvDest,IMG_VOID *pvSrc,IMG_UINT32 ui32Size);
+
+/*****************************************************************************
+ Secure handle Function prototypes
+*****************************************************************************/
+IMG_SID PStream2SID(PDBG_STREAM psStream);
+PDBG_STREAM SID2PStream(IMG_SID hStream); 
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream);
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream);
+
+/*****************************************************************************
+ Declarations for IOCTL Service table and KM table entry points
+*****************************************************************************/
+IMG_BOOL   IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+IMG_VOID   IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+IMG_VOID   IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
+IMG_VOID   IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void);
+IMG_VOID   IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame);
+
+#endif
+
+/*****************************************************************************
+ End of file (DBGDRIV.H)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv_ioctl.h b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv_ioctl.h
new file mode 100644
index 0000000..bce7b96
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/dbgdriv_ioctl.h
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_IOCTL_H_
+#define _DBGDRIV_IOCTL_H_
+
+#include "dbgdrvif_srv5.h"
+
+
+/* Share this debug driver global with the OS layer so that IOCTL calls
+ * coming from the OS enter the common table of entry points.
+ */
+extern IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(IMG_VOID *, IMG_VOID *, IMG_BOOL);
+
+
+#endif /* _DBGDRIV_IOCTL_H_ */
+
+/*****************************************************************************
+ End of file
+ *****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/handle.c b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/handle.c
new file mode 100644
index 0000000..af54eee
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/handle.c
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title          Resource Handle Manager
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description    Provide resource handle management
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+
+/* max number of streams held in SID info table */
+#define MAX_SID_ENTRIES		8
+
+typedef struct _SID_INFO
+{
+	PDBG_STREAM	psStream;
+} SID_INFO, *PSID_INFO;
+
+static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES];
+
+IMG_SID PStream2SID(PDBG_STREAM psStream)
+{
+	if (psStream != (PDBG_STREAM)IMG_NULL)
+	{
+		IMG_INT32 iIdx;
+
+		for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+		{
+			if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+			{
+				/* idx is one based */
+				return (IMG_SID)iIdx+1;
+			}
+		}
+	}
+
+	return (IMG_SID)0;
+}
+
+
+PDBG_STREAM SID2PStream(IMG_SID hStream)
+{
+	/* changed to zero based */
+	IMG_INT32 iIdx = (IMG_INT32)hStream-1;
+
+	if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES)
+	{
+		return gaSID_Xlat_Table[iIdx].psStream;
+	}
+	else
+	{
+    	return (PDBG_STREAM)IMG_NULL;
+    }
+}
+
+
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream)
+{
+	if (psStream != (PDBG_STREAM)IMG_NULL)
+	{
+		IMG_INT32 iIdx;
+
+		for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+		{
+			if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+			{
+				/* already created */
+				return IMG_TRUE;
+			}
+
+			if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)IMG_NULL)
+			{
+				/* free entry */
+				gaSID_Xlat_Table[iIdx].psStream = psStream;
+				return IMG_TRUE;
+			}
+		}
+	}
+
+	return IMG_FALSE;
+}
+
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream)
+{
+	if (psStream != (PDBG_STREAM)IMG_NULL)
+	{
+		IMG_INT32 iIdx;
+
+		for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+		{
+			if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+			{
+				gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)IMG_NULL;
+				return IMG_TRUE;
+			}
+		}
+	}
+
+	return IMG_FALSE;
+}
+
+
+/******************************************************************************
+ End of file (handle.c)
+******************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/hostfunc.h b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/hostfunc.h
new file mode 100644
index 0000000..096bc3d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/hostfunc.h
@@ -0,0 +1,102 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HOSTFUNC_
+#define _HOSTFUNC_
+
+/*****************************************************************************
+ Defines
+*****************************************************************************/
+#define HOST_PAGESIZE			(4096)
+#define DBG_MEMORY_INITIALIZER	(0xe2)
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
+
+IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase);
+IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase);
+
+IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl);
+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess);
+
+IMG_VOID HostCreateRegDeclStreams(IMG_VOID);
+
+/* Direct macros for Linux to avoid LockDep false-positives from occurring */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#define HostCreateMutex(IMG_VOID) ({ \
+	struct mutex* pMutex = NULL; \
+	pMutex = kmalloc(sizeof(struct mutex), GFP_KERNEL); \
+	if (pMutex) { mutex_init(pMutex); }; \
+	pMutex;})
+#define HostDestroyMutex(hLock) ({mutex_destroy((hLock)); kfree((hLock)); PVRSRV_OK;})
+
+#define HostAquireMutex(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define HostReleaseMutex(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+IMG_VOID * HostCreateMutex(IMG_VOID);
+IMG_VOID HostAquireMutex(IMG_VOID * pvMutex);
+IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex);
+IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex);
+
+#endif
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_INT32 HostCreateEventObjects(IMG_VOID);
+IMG_VOID HostWaitForEvent(DBG_EVENT eEvent);
+IMG_VOID HostSignalEvent(DBG_EVENT eEvent);
+IMG_VOID HostDestroyEventObjects(IMG_VOID);
+#endif	/*defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#endif
+
+/*****************************************************************************
+ End of file (HOSTFUNC.H)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/ioctl.c b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/ioctl.c
new file mode 100644
index 0000000..43b38ed
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/common/ioctl.c
@@ -0,0 +1,308 @@
+/*************************************************************************/ /*!
+@File
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma  warning(disable:4201)
+#pragma  warning(disable:4214)
+#pragma  warning(disable:4115)
+#pragma  warning(disable:4514)
+
+#include <ntddk.h>
+#include <windef.h>
+
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <asm/uaccess.h>
+#include "pvr_uaccess.h"
+#endif /* LINUX */
+
+#include "img_types.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "dbgdriv_ioctl.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma  warning(default:4214)
+#pragma  warning(default:4115)
+#endif /* _WIN32 */
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivGetServiceTable
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	IMG_PVOID *	ppvOut;
+
+	PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+	ppvOut = (IMG_PVOID *) pvOutBuffer;
+
+	*ppvOut = DBGDrivGetServiceTable();
+
+    return(IMG_TRUE);
+}
+
+#if defined(__QNXNTO__)
+/*****************************************************************************
+ FUNCTION	:	DBGIODrivCreateStream
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_IN_CREATESTREAM psIn;
+	PDBG_OUT_CREATESTREAM psOut;
+
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
+	psOut = (PDBG_OUT_CREATESTREAM) pvOutBuffer;
+
+	return (ExtDBGDrivCreateStream(psIn->u.pszName, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages, &psOut->phInit, &psOut->phMain, &psOut->phDeinit));
+}
+#endif
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivGetStream
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_IN_FINDSTREAM psParams;
+	IMG_SID *	phStream;
+
+	psParams	= (PDBG_IN_FINDSTREAM)pvInBuffer;
+	phStream	= (IMG_SID *)pvOutBuffer;
+
+	*phStream = PStream2SID(ExtDBGDrivFindStream(WIDEPTR_GET_PTR(psParams->pszName, bCompat), psParams->bResetStream));
+
+	return(IMG_TRUE);
+}
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivRead
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	IMG_UINT32 *	pui32BytesCopied;
+	PDBG_IN_READ	psInParams;
+	PDBG_STREAM		psStream;
+	IMG_UINT8	*pui8ReadBuffer;
+
+	psInParams = (PDBG_IN_READ) pvInBuffer;
+	pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+	pui8ReadBuffer = WIDEPTR_GET_PTR(psInParams->pui8OutBuffer, bCompat);
+
+	psStream = SID2PStream(psInParams->hStream);
+
+	if (psStream != (PDBG_STREAM)IMG_NULL)
+	{
+		*pui32BytesCopied = ExtDBGDrivRead(psStream,
+									   psInParams->ui32BufID,
+									   psInParams->ui32OutBufferSize,
+									   pui8ReadBuffer);
+		return(IMG_TRUE);
+	}
+	else
+	{
+		/* invalid SID */
+		*pui32BytesCopied = 0;
+		return(IMG_FALSE);
+	}
+}
+
+/*****************************************************************************
+ FUNCTION	: DBGDIOCDrivSetMarker
+
+ PURPOSE	: Sets the marker in the stream to split output files
+
+ PARAMETERS	: pvInBuffer, pvOutBuffer
+
+ RETURNS	: success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_IN_SETMARKER	psParams;
+	PDBG_STREAM			psStream;
+
+	psParams = (PDBG_IN_SETMARKER) pvInBuffer;
+	PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	psStream = SID2PStream(psParams->hStream);
+	if (psStream != (PDBG_STREAM)IMG_NULL)
+	{
+		ExtDBGDrivSetMarker(psStream, psParams->ui32Marker);
+		return(IMG_TRUE);
+	}
+	else
+	{
+		/* invalid SID */
+		return(IMG_FALSE);
+	}
+}
+
+/*****************************************************************************
+ FUNCTION	: DBGDIOCDrivGetMarker
+
+ PURPOSE	: Gets the marker in the stream to split output files
+
+ PARAMETERS	: pvInBuffer, pvOutBuffer
+
+ RETURNS	: success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	PDBG_STREAM  psStream;
+	IMG_UINT32  *pui32Current;
+
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+	psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+	if (psStream != (PDBG_STREAM)IMG_NULL)
+	{
+		*pui32Current = ExtDBGDrivGetMarker(psStream);
+		return(IMG_TRUE);
+	}
+	else
+	{
+		/* invalid SID */
+		*pui32Current = 0;
+		return(IMG_FALSE);
+	}
+}
+
+
+/*****************************************************************************
+ FUNCTION	:	DBGDIOCDrivWaitForEvent
+
+ PURPOSE	:
+
+ PARAMETERS	:
+
+ RETURNS	:
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
+
+	PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	ExtDBGDrivWaitForEvent(eEvent);
+
+	return(IMG_TRUE);
+}
+
+
+/*****************************************************************************
+ FUNCTION	: DBGDIOCDrivGetFrame
+
+ PURPOSE	: Gets the marker in the stream to split output files
+
+ PARAMETERS	: pvInBuffer, pvOutBuffer
+
+ RETURNS	: success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer, IMG_BOOL bCompat)
+{
+	IMG_UINT32  *pui32Current;
+
+	PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+	PVR_UNREFERENCED_PARAMETER(bCompat);
+
+	pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+	*pui32Current = ExtDBGDrivGetFrame();
+
+	return(IMG_TRUE);
+}
+
+/*
+	ioctl interface jump table.
+	Accessed from the UM debug driver client
+*/
+IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(IMG_VOID *, IMG_VOID *, IMG_BOOL) =
+{
+	DBGDIOCDrivGetServiceTable, /* WDDM only for KMD to retrieve address from DBGDRV, Not used by umdbgdrvlnx */
+	DBGDIOCDrivGetStream,
+	DBGDIOCDrivRead,
+	DBGDIOCDrivSetMarker,
+	DBGDIOCDrivGetMarker,
+	DBGDIOCDrivWaitForEvent,
+	DBGDIOCDrivGetFrame,
+#if defined(__QNXNTO__)
+	DBGDIOCDrivCreateStream
+#endif
+};
+
+/*****************************************************************************
+ End of file (IOCTL.C)
+*****************************************************************************/
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/linux/hostfunc.c b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/linux/hostfunc.c
new file mode 100644
index 0000000..ede5aba
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/linux/hostfunc.c
@@ -0,0 +1,328 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug driver file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+#include <linux/mutex.h>
+#else
+#include <asm/semaphore.h>
+#endif
+#include <linux/hardirq.h>
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#endif	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif_srv5.h"
+#include "hostfunc.h"
+#include "dbgdriv.h"
+
+#if defined(PVRSRV_NEED_PVR_DPF) && !defined(SUPPORT_DRM)
+IMG_UINT32	gPVRDebugLevel = (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING |
+		DBGPRIV_CALLTRACE); /* Added call trace level to support PVR_LOGging of state in debug driver */
+
+#define PVR_STRING_TERMINATOR		'\0'
+#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
+
+/******************************************************************************/
+
+
+/*----------------------------------------------------------------------------
+<function>
+	FUNCTION   : PVRSRVDebugPrintf
+	PURPOSE    : To output a debug message to the user
+	PARAMETERS : In : uDebugLevel - The current debug level
+	             In : pszFile - The source file generating the message
+	             In : uLine - The line of the source file
+	             In : pszFormat - The message format string
+	             In : ... - Zero or more arguments for use by the format string
+	RETURNS    : None
+</function>
+------------------------------------------------------------------------------*/
+void PVRSRVDebugPrintf	(
+						IMG_UINT32	ui32DebugLevel,
+						const IMG_CHAR*	pszFileName,
+						IMG_UINT32	ui32Line,
+						const IMG_CHAR*	pszFormat,
+						...
+					)
+{
+	IMG_BOOL bTrace;
+	IMG_CHAR *pszLeafName;
+
+	pszLeafName = (char *)strrchr (pszFileName, '/');
+
+	if (pszLeafName)
+	{
+		pszFileName = pszLeafName;
+	}
+
+	bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE;
+
+	if (gPVRDebugLevel & ui32DebugLevel)
+	{
+		va_list vaArgs;
+		static char szBuffer[512];
+
+		va_start (vaArgs, pszFormat);
+
+		/* Add in the level of warning */
+		if (bTrace == IMG_FALSE)
+		{
+			switch(ui32DebugLevel)
+			{
+				case DBGPRIV_FATAL:
+				{
+					strcpy (szBuffer, "PVR_K:(Fatal): ");
+					break;
+				}
+				case DBGPRIV_ERROR:
+				{
+					strcpy (szBuffer, "PVR_K:(Error): ");
+					break;
+				}
+				case DBGPRIV_WARNING:
+				{
+					strcpy (szBuffer, "PVR_K:(Warning): ");
+					break;
+				}
+				case DBGPRIV_MESSAGE:
+				{
+					strcpy (szBuffer, "PVR_K:(Message): ");
+					break;
+				}
+				case DBGPRIV_VERBOSE:
+				{
+					strcpy (szBuffer, "PVR_K:(Verbose): ");
+					break;
+				}
+				default:
+				{
+					strcpy (szBuffer, "PVR_K:()");
+					break;
+				}
+			}
+		}
+		else
+		{
+			strcpy (szBuffer, "PVR_K: ");
+		}
+
+		vsprintf (&szBuffer[strlen(szBuffer)], pszFormat, vaArgs);
+
+ 		/*
+ 		 * Metrics and Traces don't need a location
+ 		 */
+ 		if (bTrace == IMG_FALSE)
+		{
+			sprintf (&szBuffer[strlen(szBuffer)], " [%d, %s]", (int)ui32Line, pszFileName);
+		}
+
+		printk(KERN_INFO "%s\n", szBuffer);
+
+		va_end (vaArgs);
+	}
+}
+#endif	/* defined(PVRSRV_NEED_PVR_DPF) && !defined(SUPPORT_DRM) */
+
+/*!
+******************************************************************************
+
+ @Function	HostMemSet
+
+ @Description Function that does the same as the C memset() functions
+
+ @Modified *pvDest :	pointer to start of buffer to be set
+
+ @Input    ui8Value:	value to set each byte to
+
+ @Input    ui32Size :	number of bytes to set
+
+ @Return   IMG_VOID
+
+******************************************************************************/
+IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+	memset(pvDest, (int) ui8Value, (size_t) ui32Size);
+}
+
+/*!
+******************************************************************************
+
+ @Function		HostMemCopy
+
+ @Description	Copies memory around
+
+ @Input    pvDst - pointer to dst
+ @Output   pvSrc - pointer to src
+ @Input    ui32Size - bytes to copy
+
+ @Return  none
+
+******************************************************************************/
+IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+    unsigned char *src,*dst;
+    int i;
+
+    src=(unsigned char *)pvSrc;
+    dst=(unsigned char *)pvDst;
+    for(i=0;i<ui32Size;i++)
+    {
+        dst[i]=src[i];
+    }
+#else
+    memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
+{
+    /* XXX Not yet implemented */
+	return 0;
+}
+
+IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+    return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+IMG_VOID HostPageablePageFree(IMG_VOID * pvBase)
+{
+    vfree(pvBase);
+}
+
+IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+    return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase)
+{
+    vfree(pvBase);
+}
+
+IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID **ppvMdl)
+{
+    /* XXX Not yet implemented */
+	return IMG_NULL;
+}
+
+IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess)
+{
+    /* XXX Not yet implemented */
+}
+
+IMG_VOID HostCreateRegDeclStreams(IMG_VOID)
+{
+    /* XXX Not yet implemented */
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+#define	EVENT_WAIT_TIMEOUT_MS	500
+#define	EVENT_WAIT_TIMEOUT_JIFFIES	(EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
+
+static int iStreamData;
+static wait_queue_head_t sStreamDataEvent;
+
+IMG_INT32 HostCreateEventObjects(IMG_VOID)
+{
+	init_waitqueue_head(&sStreamDataEvent);
+
+	return 0;
+}
+
+IMG_VOID HostWaitForEvent(DBG_EVENT eEvent)
+{
+	switch(eEvent)
+	{
+		case DBG_EVENT_STREAM_DATA:
+			/*
+			 * More than one process may be woken up.
+			 * Any process that wakes up should consume
+			 * all the data from the streams.
+			 */
+			wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
+			iStreamData = 0;
+			break;
+		default:
+			/*
+			 * For unknown events, enter an interruptible sleep.
+			 */
+			msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
+			break;
+	}
+}
+
+IMG_VOID HostSignalEvent(DBG_EVENT eEvent)
+{
+	switch(eEvent)
+	{
+		case DBG_EVENT_STREAM_DATA:
+			iStreamData = 1;
+			wake_up_interruptible(&sStreamDataEvent);
+			break;
+		default:
+			break;
+	}
+}
+
+IMG_VOID HostDestroyEventObjects(IMG_VOID)
+{
+}
+#endif	/* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/linux/main.c b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/linux/main.c
new file mode 100644
index 0000000..3c34df0d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/dbgdriv/linux/main.c
@@ -0,0 +1,373 @@
+/*************************************************************************/ /*!
+@File
+@Title          Debug driver main file
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRM)
+#include <linux/platform_device.h>
+#endif
+
+#if defined(LDM_PCI) && !defined(SUPPORT_DRM)
+#include <linux/pci.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#if defined(SUPPORT_DRM)
+#include <drm/drmP.h>
+#endif
+
+#include "img_types.h"
+#include "linuxsrv.h"
+#include "dbgdriv_ioctl.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+#include "pvr_debug.h"
+#include "pvrmodule.h"
+#include "pvr_uaccess.h"
+
+#if defined(SUPPORT_DRM)
+
+#include "pvr_drm_shared.h"
+#include "pvr_drm.h"
+
+#else /* defined(SUPPORT_DRM) */
+
+#define DRVNAME "dbgdrv"
+MODULE_SUPPORTED_DEVICE(DRVNAME);
+
+static struct class *psDbgDrvClass;
+
+static int AssignedMajorNumber = 0;
+
+long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
+long dbgdrv_ioctl_compat(struct file *, unsigned int, unsigned long);
+
+static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile)
+{
+	return 0;
+}
+
+static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile)
+{
+	return 0;
+}
+
+static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma)
+{
+	return 0;
+}
+
+static struct file_operations dbgdrv_fops =
+{
+	.owner          = THIS_MODULE,
+	.unlocked_ioctl = dbgdrv_ioctl,
+	.compat_ioctl   = dbgdrv_ioctl_compat,
+	.open           = dbgdrv_open,
+	.release        = dbgdrv_release,
+	.mmap           = dbgdrv_mmap,
+};
+
+#endif  /* defined(SUPPORT_DRM) */
+
+/* Outward temp buffer used by IOCTL handler allocated once and grows as needed.
+ * This optimisation means the debug driver performs less vmallocs/vfrees
+ * reducing the chance of kernel vmalloc space exhaustion.
+ * Singular out buffer for PDump UM reads is not multi-thread safe and so
+ * it now needs a mutex to protect it from multiple simultaneous reads in 
+ * the future.
+ */
+static IMG_CHAR*  g_outTmpBuf = IMG_NULL;
+static IMG_UINT32 g_outTmpBufSize = 64*PAGE_SIZE;
+static void*      g_pvOutTmpBufMutex = IMG_NULL;
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
+
+IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table)
+{
+	extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
+
+	*fn_table = &g_sDBGKMServices;
+}
+
+#if defined(SUPPORT_DRM)
+void dbgdrv_cleanup(void)
+#else
+void cleanup_module(void)
+#endif
+{
+	if (g_outTmpBuf)
+	{
+		vfree(g_outTmpBuf);
+		g_outTmpBuf = IMG_NULL;
+	}
+
+#if !defined(SUPPORT_DRM)
+	device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0));
+	class_destroy(psDbgDrvClass);
+	unregister_chrdev(AssignedMajorNumber, DRVNAME);
+#endif /* !defined(SUPPORT_DRM) */
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	HostDestroyEventObjects();
+#endif
+	HostDestroyMutex(g_pvOutTmpBufMutex);
+	HostDestroyMutex(g_pvAPIMutex);
+	return;
+}
+
+#if defined(SUPPORT_DRM)
+IMG_INT dbgdrv_init(void)
+#else
+int init_module(void)
+#endif
+{
+#if !defined(SUPPORT_DRM)
+	struct device *psDev;
+#endif
+
+#if !defined(SUPPORT_DRM)
+	int err = -EBUSY;
+#endif
+
+	/* Init API mutex */
+	if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL)
+	{
+		return -ENOMEM;
+	}
+
+	/* Init TmpBuf mutex */
+	if ((g_pvOutTmpBufMutex=HostCreateMutex()) == IMG_NULL)
+	{
+		return -ENOMEM;
+	}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	/*
+	 * The current implementation of HostCreateEventObjects on Linux
+	 * can never fail, so there is no need to check for error.
+	 */
+	(void) HostCreateEventObjects();
+#endif
+
+#if !defined(SUPPORT_DRM)
+	AssignedMajorNumber =
+		register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
+
+	if (AssignedMajorNumber <= 0)
+	{
+		PVR_DPF((PVR_DBG_ERROR," unable to get major\n"));
+		goto ErrDestroyEventObjects;
+	}
+
+	/*
+	 * This code (using GPL symbols) facilitates automatic device
+	 * node creation on platforms with udev (or similar).
+	 */
+	psDbgDrvClass = class_create(THIS_MODULE, DRVNAME);
+	if (IS_ERR(psDbgDrvClass))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)",
+				 __func__, PTR_ERR(psDbgDrvClass)));
+		goto ErrUnregisterCharDev;
+	}
+
+	psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0), NULL, DRVNAME);
+	if (IS_ERR(psDev))
+	{
+		PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)",
+								__func__, PTR_ERR(psDev)));
+		goto ErrDestroyClass;
+	}
+#endif /* !defined(SUPPORT_DRM) */
+
+	return 0;
+
+#if !defined(SUPPORT_DRM)
+ErrDestroyEventObjects:
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+	HostDestroyEventObjects();
+#endif
+ErrUnregisterCharDev:
+	unregister_chrdev(AssignedMajorNumber, DRVNAME);
+ErrDestroyClass:
+	class_destroy(psDbgDrvClass);
+	return err;
+#endif /* !defined(SUPPORT_DRM) */
+}
+
+static IMG_INT dbgdrv_ioctl_work(IMG_VOID *arg, IMG_BOOL bCompat)
+{
+	IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg;
+	char *buffer, *in, *out;
+	unsigned int cmd;
+	IMG_VOID *pBufferIn, *pBufferOut;
+
+	if ((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1)))
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
+		return -1;
+	}
+
+	buffer = (char *) HostPageablePageAlloc(1);
+	if (!buffer)
+	{
+		PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
+		return -EFAULT;
+	}
+
+	in = buffer;
+	out = buffer + (PAGE_SIZE >>1);
+
+	pBufferIn = WIDEPTR_GET_PTR(pIP->pInBuffer, bCompat);
+	pBufferOut = WIDEPTR_GET_PTR(pIP->pOutBuffer, bCompat);
+
+	if (pvr_copy_from_user(in, pBufferIn, pIP->ui32InBufferSize) != 0)
+	{
+		goto init_failed;
+	}
+
+	/* Extra -1 because ioctls start at DEBUG_SERVICE_IOCTL_BASE + 1 */
+	cmd = MAKEIOCTLINDEX(pIP->ui32Cmd) - DEBUG_SERVICE_IOCTL_BASE - 1;
+
+	if (pIP->ui32Cmd == DEBUG_SERVICE_READ)
+	{
+		IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
+		DBG_OUT_READ *psReadOutParams = (DBG_OUT_READ *)out;
+		DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
+		IMG_VOID *pvOutBuffer;
+		PDBG_STREAM psStream;
+
+		psStream = SID2PStream(psReadInParams->hStream);
+		if (!psStream)
+		{
+			goto init_failed;
+		}
+
+		/* Serialise IOCTL Read op access to the singular output buffer */
+		HostAquireMutex(g_pvOutTmpBufMutex);
+
+		if ((g_outTmpBuf == IMG_NULL) || (psReadInParams->ui32OutBufferSize > g_outTmpBufSize))
+		{
+			if (psReadInParams->ui32OutBufferSize > g_outTmpBufSize)
+			{
+				g_outTmpBufSize = psReadInParams->ui32OutBufferSize;
+			}
+			g_outTmpBuf = vmalloc(g_outTmpBufSize);
+			if (!g_outTmpBuf)
+			{
+				HostReleaseMutex(g_pvOutTmpBufMutex);
+				goto init_failed;
+			}
+		}
+
+		/* Ensure only one thread is allowed into the DBGDriv core at a time */
+		HostAquireMutex(g_pvAPIMutex);
+
+		psReadOutParams->ui32DataRead = DBGDrivRead(psStream,
+										   psReadInParams->ui32BufID,
+										   psReadInParams->ui32OutBufferSize,
+										   g_outTmpBuf);
+		psReadOutParams->ui32SplitMarker = DBGDrivGetMarker(psStream);
+
+		HostReleaseMutex(g_pvAPIMutex);
+
+		pvOutBuffer = WIDEPTR_GET_PTR(psReadInParams->pui8OutBuffer, bCompat);
+
+		if (pvr_copy_to_user(pvOutBuffer,
+						g_outTmpBuf,
+						*pui32BytesCopied) != 0)
+		{
+			HostReleaseMutex(g_pvOutTmpBufMutex);
+			goto init_failed;
+		}
+
+		HostReleaseMutex(g_pvOutTmpBufMutex);
+
+	}
+	else
+	{
+		(g_DBGDrivProc[cmd])(in, out, bCompat);
+	}
+
+	if (copy_to_user(pBufferOut, out, pIP->ui32OutBufferSize) != 0)
+	{
+		goto init_failed;
+	}
+
+	HostPageablePageFree((IMG_VOID *)buffer);
+	return 0;
+
+init_failed:
+	HostPageablePageFree((IMG_VOID *)buffer);
+	return -EFAULT;
+}
+
+#if defined(SUPPORT_DRM)
+int dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
+#else
+long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+#endif
+{
+	return dbgdrv_ioctl_work((IMG_VOID *) arg, IMG_FALSE);
+}
+
+#if defined(SUPPORT_DRM)
+int dbgdrv_ioctl_compat(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
+#else
+long dbgdrv_ioctl_compat(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+#endif
+{
+	return dbgdrv_ioctl_work((IMG_VOID *) arg, IMG_TRUE);
+}
+
+
+
+EXPORT_SYMBOL(DBGDrvGetServiceTable);
diff --git a/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/include/linuxsrv.h b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/include/linuxsrv.h
new file mode 100644
index 0000000..1966906
--- /dev/null
+++ b/drivers/external_drivers/intel_media/graphics/rgx/tools/services/debug/include/linuxsrv.h
@@ -0,0 +1,69 @@
+/*************************************************************************/ /*!
+@File
+@Title          Module defs for pvr core drivers
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License        Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LINUXSRV_H__
+#define _LINUXSRV_H__
+
+#include "dbgdrvif_srv5.h"
+
+typedef struct  tagIOCTL_PACKAGE
+{
+	IMG_UINT32 ui32Cmd;              // ioctl command
+	IMG_UINT32 ui32Size;			   // needs to be correctly set
+	DBG_WIDEPTR	pInBuffer;          // input data buffer
+	IMG_UINT32  ui32InBufferSize;     // size of input data buffer
+	DBG_WIDEPTR    pOutBuffer;         // output data buffer
+	IMG_UINT32  ui32OutBufferSize;    // size of output data buffer
+#if defined(SUPPORT_DRM)
+	IMG_UINT32 ui32PtrSize;
+#endif
+} IOCTL_PACKAGE;
+
+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,		
+						IMG_UINT32 ui32ControlCode, 
+						IMG_VOID *pInBuffer,		
+						IMG_UINT32 ui32InBufferSize,
+						IMG_VOID *pOutBuffer,		
+						IMG_UINT32 ui32OutBufferSize,  
+						IMG_UINT32 *pui32BytesReturned); 
+
+#endif /* _LINUXSRV_H__*/
diff --git a/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio.c b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio.c
new file mode 100755
index 0000000..fd6125b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio.c
@@ -0,0 +1,2268 @@
+/*
+ *   intel_mid_hdmi_audio.c - Intel HDMI audio driver for MID
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Authors:	Sailaja Bandarupalli <sailaja.bandarupalli@intel.com>
+ *		Ramesh Babu K V	<ramesh.babu@intel.com>
+ *		Vaibhav Agarwal <vaibhav.agarwal@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * ALSA driver for Intel MID HDMI audio controller
+ */
+
+#define pr_fmt(fmt)	"had: " fmt
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <sound/pcm.h>
+#include <sound/core.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
+#include "intel_mid_hdmi_audio.h"
+#ifdef CONFIG_DRM_INTEL_HANDSET
+#include <pwr_mgmt.h>
+#else
+#include <psb_powermgmt.h>
+#endif
+
+#define PCM_INDEX		0
+#define MAX_PB_STREAMS		1
+#define MAX_CAP_STREAMS		0
+#define HDMI_AUDIO_DRIVER	"hdmi-audio"
+static DEFINE_MUTEX(had_mutex);
+
+/*standard module options for ALSA. This module supports only one card*/
+static int hdmi_card_index = SNDRV_DEFAULT_IDX1;
+static char *hdmi_card_id = SNDRV_DEFAULT_STR1;
+struct snd_intelhad *had_data;
+
+module_param(hdmi_card_index, int, 0444);
+MODULE_PARM_DESC(hdmi_card_index,
+		"Index value for INTEL Intel HDMI Audio controller.");
+module_param(hdmi_card_id, charp, 0444);
+MODULE_PARM_DESC(hdmi_card_id,
+		"ID string for INTEL Intel HDMI Audio controller.");
+MODULE_AUTHOR("Sailaja Bandarupalli <sailaja.bandarupalli@intel.com>");
+MODULE_AUTHOR("Ramesh Babu K V <ramesh.babu@intel.com>");
+MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@intel.com>");
+MODULE_DESCRIPTION("Intel HDMI Audio driver");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("{Intel,Intel_HAD}");
+MODULE_VERSION(HAD_DRIVER_VERSION);
+
+#define INFO_FRAME_WORD1	0x000a0184
+#define FIFO_THRESHOLD		0xFE
+#define DMA_FIFO_THRESHOLD	0x7
+#define BYTES_PER_WORD		0x4
+
+/* Sampling rate as per IEC60958 Ver 3 */
+#define CH_STATUS_MAP_32KHZ	0x3
+#define CH_STATUS_MAP_44KHZ	0x0
+#define CH_STATUS_MAP_48KHZ	0x2
+#define CH_STATUS_MAP_88KHZ	0x8
+#define CH_STATUS_MAP_96KHZ	0xA
+#define CH_STATUS_MAP_176KHZ	0xC
+#define CH_STATUS_MAP_192KHZ	0xE
+
+#define MAX_SMPL_WIDTH_20	0x0
+#define MAX_SMPL_WIDTH_24	0x1
+#define SMPL_WIDTH_16BITS	0x1
+#define SMPL_WIDTH_24BITS	0x5
+#define CHANNEL_ALLOCATION	0x1F
+#define MASK_BYTE0		0x000000FF
+#define VALID_DIP_WORDS		3
+#define LAYOUT0			0
+#define LAYOUT1			1
+#define SWAP_LFE_CENTER		0x00fac4c8
+#define AUD_CONFIG_CH_MASK_V2	0x70
+
+/* supported PCM rates and bits */
+#define AC_SUPPCM_RATES   (0xfff << 0)
+#define AC_SUPPCM_BITS_8  (1<<16)
+#define AC_SUPPCM_BITS_16 (1<<17)
+#define AC_SUPPCM_BITS_20 (1<<18)
+#define AC_SUPPCM_BITS_24 (1<<19)
+#define AC_SUPPCM_BITS_32 (1<<20)
+
+#define SUPPORTED_RATES	 SNDRV_PCM_RATE_48000
+
+enum cea_audio_coding_types {
+	AUDIO_CODING_TYPE_REF_STREAM_HEADER =  0,
+	AUDIO_CODING_TYPE_LPCM              =  1,
+	AUDIO_CODING_TYPE_AC3               =  2,
+	AUDIO_CODING_TYPE_MPEG1             =  3,
+	AUDIO_CODING_TYPE_MP3               =  4,
+	AUDIO_CODING_TYPE_MPEG2             =  5,
+	AUDIO_CODING_TYPE_AACLC             =  6,
+	AUDIO_CODING_TYPE_DTS               =  7,
+	AUDIO_CODING_TYPE_ATRAC             =  8,
+	AUDIO_CODING_TYPE_SACD              =  9,
+	AUDIO_CODING_TYPE_EAC3              = 10,
+	AUDIO_CODING_TYPE_DTS_HD            = 11,
+	AUDIO_CODING_TYPE_MLP               = 12,
+	AUDIO_CODING_TYPE_DST               = 13,
+	AUDIO_CODING_TYPE_WMAPRO            = 14,
+	AUDIO_CODING_TYPE_REF_CXT           = 15,
+	/* also include valid xtypes below */
+	AUDIO_CODING_TYPE_HE_AAC            = 15,
+	AUDIO_CODING_TYPE_HE_AAC2           = 16,
+	AUDIO_CODING_TYPE_MPEG_SURROUND     = 17,
+};
+
+/*
+ * ELD SA bits in the CEA Speaker Allocation data block
+ */
+static int eld_speaker_allocation_bits[] = {
+	[0] = FL | FR,
+	[1] = LFE,
+	[2] = FC,
+	[3] = RL | RR,
+	[4] = RC,
+	[5] = FLC | FRC,
+	[6] = RLC | RRC,
+	/* the following are not defined in ELD yet */
+	[7] = 0,
+};
+
+/*
+ * This is an ordered list!
+ *
+ * The preceding ones have better chances to be selected by
+ * hdmi_channel_allocation().
+ */
+static struct cea_channel_speaker_allocation channel_allocations[] = {
+/*                        channel:   7     6    5    4    3     2    1    0  */
+{ .ca_index = 0x00,  .speakers = {   0,    0,   0,   0,   0,    0,  FR,  FL } },
+				/* 2.1 */
+{ .ca_index = 0x01,  .speakers = {   0,    0,   0,   0,   0,  LFE,  FR,  FL } },
+				/* Dolby Surround */
+{ .ca_index = 0x02,  .speakers = {   0,    0,   0,   0,  FC,    0,  FR,  FL } },
+				/* surround40 */
+{ .ca_index = 0x08,  .speakers = {   0,    0,  RR,  RL,   0,    0,  FR,  FL } },
+				/* surround41 */
+{ .ca_index = 0x09,  .speakers = {   0,    0,  RR,  RL,   0,  LFE,  FR,  FL } },
+				/* surround50 */
+{ .ca_index = 0x0a,  .speakers = {   0,    0,  RR,  RL,  FC,    0,  FR,  FL } },
+				/* surround51 */
+{ .ca_index = 0x0b,  .speakers = {   0,    0,  RR,  RL,  FC,  LFE,  FR,  FL } },
+				/* 6.1 */
+{ .ca_index = 0x0f,  .speakers = {   0,   RC,  RR,  RL,  FC,  LFE,  FR,  FL } },
+				/* surround71 */
+{ .ca_index = 0x13,  .speakers = { RRC,  RLC,  RR,  RL,  FC,  LFE,  FR,  FL } },
+
+{ .ca_index = 0x03,  .speakers = {   0,    0,   0,   0,  FC,  LFE,  FR,  FL } },
+{ .ca_index = 0x04,  .speakers = {   0,    0,   0,  RC,   0,    0,  FR,  FL } },
+{ .ca_index = 0x05,  .speakers = {   0,    0,   0,  RC,   0,  LFE,  FR,  FL } },
+{ .ca_index = 0x06,  .speakers = {   0,    0,   0,  RC,  FC,    0,  FR,  FL } },
+{ .ca_index = 0x07,  .speakers = {   0,    0,   0,  RC,  FC,  LFE,  FR,  FL } },
+{ .ca_index = 0x0c,  .speakers = {   0,   RC,  RR,  RL,   0,    0,  FR,  FL } },
+{ .ca_index = 0x0d,  .speakers = {   0,   RC,  RR,  RL,   0,  LFE,  FR,  FL } },
+{ .ca_index = 0x0e,  .speakers = {   0,   RC,  RR,  RL,  FC,    0,  FR,  FL } },
+{ .ca_index = 0x10,  .speakers = { RRC,  RLC,  RR,  RL,   0,    0,  FR,  FL } },
+{ .ca_index = 0x11,  .speakers = { RRC,  RLC,  RR,  RL,   0,  LFE,  FR,  FL } },
+{ .ca_index = 0x12,  .speakers = { RRC,  RLC,  RR,  RL,  FC,    0,  FR,  FL } },
+{ .ca_index = 0x14,  .speakers = { FRC,  FLC,   0,   0,   0,    0,  FR,  FL } },
+{ .ca_index = 0x15,  .speakers = { FRC,  FLC,   0,   0,   0,  LFE,  FR,  FL } },
+{ .ca_index = 0x16,  .speakers = { FRC,  FLC,   0,   0,  FC,    0,  FR,  FL } },
+{ .ca_index = 0x17,  .speakers = { FRC,  FLC,   0,   0,  FC,  LFE,  FR,  FL } },
+{ .ca_index = 0x18,  .speakers = { FRC,  FLC,   0,  RC,   0,    0,  FR,  FL } },
+{ .ca_index = 0x19,  .speakers = { FRC,  FLC,   0,  RC,   0,  LFE,  FR,  FL } },
+{ .ca_index = 0x1a,  .speakers = { FRC,  FLC,   0,  RC,  FC,    0,  FR,  FL } },
+{ .ca_index = 0x1b,  .speakers = { FRC,  FLC,   0,  RC,  FC,  LFE,  FR,  FL } },
+{ .ca_index = 0x1c,  .speakers = { FRC,  FLC,  RR,  RL,   0,    0,  FR,  FL } },
+{ .ca_index = 0x1d,  .speakers = { FRC,  FLC,  RR,  RL,   0,  LFE,  FR,  FL } },
+{ .ca_index = 0x1e,  .speakers = { FRC,  FLC,  RR,  RL,  FC,    0,  FR,  FL } },
+{ .ca_index = 0x1f,  .speakers = { FRC,  FLC,  RR,  RL,  FC,  LFE,  FR,  FL } },
+};
+
+static struct channel_map_table map_tables[] = {
+	{ SNDRV_CHMAP_FL,       0x00,   FL },
+	{ SNDRV_CHMAP_FR,       0x01,   FR },
+	{ SNDRV_CHMAP_RL,       0x04,   RL },
+	{ SNDRV_CHMAP_RR,       0x05,   RR },
+	{ SNDRV_CHMAP_LFE,      0x02,   LFE },
+	{ SNDRV_CHMAP_FC,       0x03,   FC },
+	{ SNDRV_CHMAP_RLC,      0x06,   RLC },
+	{ SNDRV_CHMAP_RRC,      0x07,   RRC },
+	{} /* terminator */
+};
+
+/* hardware capability structure */
+static const struct snd_pcm_hardware snd_intel_hadstream = {
+	.info =	(SNDRV_PCM_INFO_INTERLEAVED |
+		SNDRV_PCM_INFO_DOUBLE |
+		SNDRV_PCM_INFO_MMAP|
+		SNDRV_PCM_INFO_MMAP_VALID |
+		SNDRV_PCM_INFO_BATCH),
+	.formats = (SNDRV_PCM_FMTBIT_S24 |
+		SNDRV_PCM_FMTBIT_U24),
+	.rates = SNDRV_PCM_RATE_32000 |
+		SNDRV_PCM_RATE_44100 |
+		SNDRV_PCM_RATE_48000 |
+		SNDRV_PCM_RATE_88200 |
+		SNDRV_PCM_RATE_96000 |
+		SNDRV_PCM_RATE_176400 |
+		SNDRV_PCM_RATE_192000,
+	.rate_min = HAD_MIN_RATE,
+	.rate_max = HAD_MAX_RATE,
+	.channels_min = HAD_MIN_CHANNEL,
+	.channels_max = HAD_MAX_CHANNEL,
+	.buffer_bytes_max = HAD_MAX_BUFFER,
+	.period_bytes_min = HAD_MIN_PERIOD_BYTES,
+	.period_bytes_max = HAD_MAX_PERIOD_BYTES,
+	.periods_min = HAD_MIN_PERIODS,
+	.periods_max = HAD_MAX_PERIODS,
+	.fifo_size = HAD_FIFO_SIZE,
+};
+
+/*
+ * SF2:SF1:SF0 index => sampling frequency
+ */
+static int cea_sampling_frequencies[8] = {
+	0,		      /* 0: Refer to Stream Header */
+	SNDRV_PCM_RATE_32000,   /* 1:  32000Hz */
+	SNDRV_PCM_RATE_44100,   /* 2:  44100Hz */
+	SNDRV_PCM_RATE_48000,   /* 3:  48000Hz */
+	SNDRV_PCM_RATE_88200,   /* 4:  88200Hz */
+	SNDRV_PCM_RATE_96000,   /* 5:  96000Hz */
+	SNDRV_PCM_RATE_176400,  /* 6: 176400Hz */
+	SNDRV_PCM_RATE_192000,  /* 7: 192000Hz */
+};
+
+/*
+ * SS1:SS0 index => sample size
+ */
+static int cea_sample_sizes[4] = {
+	0,		      /* 0: Refer to Stream Header */
+	AC_SUPPCM_BITS_16,      /* 1: 16 bits */
+	AC_SUPPCM_BITS_20,      /* 2: 20 bits */
+	AC_SUPPCM_BITS_24,      /* 3: 24 bits */
+};
+
+/* Register access functions */
+
+inline int had_get_hwstate(struct snd_intelhad *intelhaddata)
+{
+	/* Check for device presence -SW state */
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED) {
+		pr_debug("%s:Device not connected:%d\n", __func__,
+				intelhaddata->drv_status);
+		return -ENODEV;
+	}
+
+	/* Check for device presence -HW state */
+	if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
+		pr_err("%s:Device not connected\n", __func__);
+		/* HOT_UNPLUG event can be sent to
+		 * maintain correct state within HAD
+		 * had_event_handler(HAD_EVENT_HOT_UNPLUG, intelhaddata);
+		 * Drop all acuired locks before executing this.
+		 */
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+inline int had_get_caps(enum had_caps_list query, void *caps)
+{
+	int retval;
+	struct snd_intelhad *intelhaddata = had_data;
+
+	retval = had_get_hwstate(intelhaddata);
+	if (!retval)
+		retval = intelhaddata->query_ops.hdmi_audio_get_caps(query,
+				caps);
+
+	return retval;
+}
+
+inline int had_set_caps(enum had_caps_list set_element , void *caps)
+{
+	int retval;
+	struct snd_intelhad *intelhaddata = had_data;
+
+	retval = had_get_hwstate(intelhaddata);
+	if (!retval)
+		retval = intelhaddata->query_ops.hdmi_audio_set_caps(
+				set_element, caps);
+
+	return retval;
+}
+
+inline int had_read_register(uint32_t offset, uint32_t *data)
+{
+	int retval;
+	struct snd_intelhad *intelhaddata = had_data;
+	u32 base_addr = intelhaddata->audio_reg_base;
+
+	retval = had_get_hwstate(intelhaddata);
+	if (!retval)
+		retval = intelhaddata->reg_ops.hdmi_audio_read_register(
+				base_addr + offset, data);
+
+	return retval;
+}
+
+inline int had_write_register(uint32_t offset, uint32_t data)
+{
+	int retval;
+	struct snd_intelhad *intelhaddata = had_data;
+	u32 base_addr = intelhaddata->audio_reg_base;
+
+	retval = had_get_hwstate(intelhaddata);
+	if (!retval)
+		retval = intelhaddata->reg_ops.hdmi_audio_write_register(
+				base_addr + offset, data);
+
+	return retval;
+}
+
+inline int had_read_modify(uint32_t offset, uint32_t data, uint32_t mask)
+{
+	int retval;
+	struct snd_intelhad *intelhaddata = had_data;
+	u32 base_addr = intelhaddata->audio_reg_base;
+
+	retval = had_get_hwstate(intelhaddata);
+	if (!retval)
+		retval = intelhaddata->reg_ops.hdmi_audio_read_modify(
+				base_addr + offset, data, mask);
+
+	return retval;
+}
+/**
+ * had_read_modify_aud_config_v2 - Specific function to read-modify AUD_CONFIG
+ * register on VLV2.The had_read_modify() function should not directly be used
+ * on VLV2 for updating AUD_CONFIG register.
+ * This is because:
+ * Bit6 of AUD_CONFIG register is writeonly due to a silicon bug on VLV2 HDMI IP.
+ * As a result a read-modify of AUD_CONFIG regiter will always clear bit6.
+ * AUD_CONFIG[6:4] represents the "channels" field of the register.
+ * This field should be 1xy binary for configuration with 6 or more channels.
+ * Read-modify of AUD_CONFIG (Eg. for enabling audio) causes the "channels" field
+ * to be updated as 0xy binary resulting in bad audio.
+ * The fix is to always write the AUD_CONFIG[6:4] with appropriate value when
+ * doing read-modify of AUD_CONFIG register.
+ *
+ * @substream: the current substream or NULL if no active substream
+ * @data : data to be written
+ * @mask : mask
+ *
+ */
+inline int had_read_modify_aud_config_v2(struct snd_pcm_substream *substream,
+					uint32_t data, uint32_t mask)
+{
+	union aud_cfg cfg_val = {.cfg_regval = 0};
+	u8 channels;
+
+	/* If substream is NULL, there is no active stream.
+	   In this case just set channels to 2*/
+	if (substream)
+		channels = substream->runtime->channels;
+	else
+		channels = 2;
+	cfg_val.cfg_regx_v2.num_ch = channels - 2;
+
+	data = data | cfg_val.cfg_regval;
+	mask = mask | AUD_CONFIG_CH_MASK_V2;
+
+	pr_debug("%s : data = %x, mask =%x\n", __func__, data, mask);
+
+	return had_read_modify(AUD_CONFIG, data, mask);
+}
+
+/**
+ * snd_intelhad_enable_audio_v1 - to enable audio
+ *
+ * @substream: Current substream or NULL if no active substream.
+ * @enable: 1 if audio is to be enabled; 0 if audio is to be disabled.
+ *
+ */
+static void snd_intelhad_enable_audio_v1(struct snd_pcm_substream *substream,
+					u8 enable)
+{
+	had_read_modify(AUD_CONFIG, enable, BIT(0));
+}
+
+/**
+ * snd_intelhad_reset_audio_v1 - to reset audio subsystem
+ *
+ * @reset: 1 to reset audio; 0 to bring audio out of reset.
+ *
+ */
+static void snd_intelhad_reset_audio_v1(u8 reset)
+{
+	had_write_register(AUD_HDMI_STATUS, reset);
+}
+
+/**
+ * had_prog_status_reg - to initialize audio channel status registers
+ *
+ * @substream:substream for which the prepare function is called
+ * @intelhaddata:substream private data
+ *
+ * This function is called in the prepare callback
+ */
+static int had_prog_status_reg(struct snd_pcm_substream *substream,
+			struct snd_intelhad *intelhaddata)
+{
+	union aud_ch_status_0 ch_stat0 = {.status_0_regval = 0};
+	union aud_ch_status_1 ch_stat1 = {.status_1_regval = 0};
+	int format;
+
+	pr_debug("Entry %s\n", __func__);
+
+	ch_stat0.status_0_regx.lpcm_id = (intelhaddata->aes_bits &
+						IEC958_AES0_NONAUDIO)>>1;
+	ch_stat0.status_0_regx.clk_acc = (intelhaddata->aes_bits &
+						IEC958_AES3_CON_CLOCK)>>4;
+	switch (substream->runtime->rate) {
+	case AUD_SAMPLE_RATE_32:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_32KHZ;
+		break;
+
+	case AUD_SAMPLE_RATE_44_1:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_44KHZ;
+		break;
+	case AUD_SAMPLE_RATE_48:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_48KHZ;
+		break;
+	case AUD_SAMPLE_RATE_88_2:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_88KHZ;
+		break;
+	case AUD_SAMPLE_RATE_96:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_96KHZ;
+		break;
+	case AUD_SAMPLE_RATE_176_4:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_176KHZ;
+		break;
+	case AUD_SAMPLE_RATE_192:
+		ch_stat0.status_0_regx.samp_freq = CH_STATUS_MAP_192KHZ;
+		break;
+
+	default:
+		/* control should never come here */
+		return -EINVAL;
+	break;
+
+	}
+	had_write_register(AUD_CH_STATUS_0, ch_stat0.status_0_regval);
+
+	format = substream->runtime->format;
+
+	if (format == SNDRV_PCM_FORMAT_S16_LE) {
+		ch_stat1.status_1_regx.max_wrd_len = MAX_SMPL_WIDTH_20;
+		ch_stat1.status_1_regx.wrd_len = SMPL_WIDTH_16BITS;
+	} else if (format == SNDRV_PCM_FORMAT_S24_LE) {
+		ch_stat1.status_1_regx.max_wrd_len = MAX_SMPL_WIDTH_24;
+		ch_stat1.status_1_regx.wrd_len = SMPL_WIDTH_24BITS;
+	} else {
+		ch_stat1.status_1_regx.max_wrd_len = 0;
+		ch_stat1.status_1_regx.wrd_len = 0;
+	}
+	had_write_register(AUD_CH_STATUS_1, ch_stat1.status_1_regval);
+	return 0;
+}
+
+/**
+ * snd_intelhad_prog_audio_ctrl_v2 - to initialize audio
+ * registers and buffer confgiuration registers
+ *
+ * @substream:substream for which the prepare function is called
+ * @intelhaddata:substream private data
+ *
+ * This function is called in the prepare callback
+ */
+int snd_intelhad_prog_audio_ctrl_v2(struct snd_pcm_substream *substream,
+					struct snd_intelhad *intelhaddata)
+{
+	union aud_cfg cfg_val = {.cfg_regval = 0};
+	union aud_buf_config buf_cfg = {.buf_cfgval = 0};
+	u8 channels;
+
+	had_prog_status_reg(substream, intelhaddata);
+
+	buf_cfg.buf_cfg_regx_v2.audio_fifo_watermark = FIFO_THRESHOLD;
+	buf_cfg.buf_cfg_regx_v2.dma_fifo_watermark = DMA_FIFO_THRESHOLD;
+	buf_cfg.buf_cfg_regx_v2.aud_delay = 0;
+	had_write_register(AUD_BUF_CONFIG, buf_cfg.buf_cfgval);
+
+	channels = substream->runtime->channels;
+	cfg_val.cfg_regx_v2.num_ch = channels - 2;
+	if (channels <= 2)
+		cfg_val.cfg_regx_v2.layout = LAYOUT0;
+	else
+		cfg_val.cfg_regx_v2.layout = LAYOUT1;
+
+	cfg_val.cfg_regx_v2.val_bit = 0;
+	had_write_register(AUD_CONFIG, cfg_val.cfg_regval);
+	return 0;
+}
+
+/**
+ * snd_intelhad_prog_audio_ctrl_v1 - to initialize audio
+ * registers and buffer confgiuration registers
+ *
+ * @substream:substream for which the prepare function is called
+ * @intelhaddata:substream private data
+ *
+ * This function is called in the prepare callback
+ */
+int snd_intelhad_prog_audio_ctrl_v1(struct snd_pcm_substream *substream,
+					struct snd_intelhad *intelhaddata)
+{
+	union aud_cfg cfg_val = {.cfg_regval = 0};
+	union aud_buf_config buf_cfg = {.buf_cfgval = 0};
+	u8 channels;
+
+	had_prog_status_reg(substream, intelhaddata);
+
+	buf_cfg.buf_cfg_regx.fifo_width = FIFO_THRESHOLD;
+	buf_cfg.buf_cfg_regx.aud_delay = 0;
+	had_write_register(AUD_BUF_CONFIG, buf_cfg.buf_cfgval);
+
+	channels = substream->runtime->channels;
+
+	switch (channels) {
+	case 1:
+	case 2:
+		cfg_val.cfg_regx.num_ch = CH_STEREO;
+		cfg_val.cfg_regx.layout = LAYOUT0;
+	break;
+
+	case 3:
+	case 4:
+		cfg_val.cfg_regx.num_ch = CH_THREE_FOUR;
+		cfg_val.cfg_regx.layout = LAYOUT1;
+	break;
+
+	case 5:
+	case 6:
+		cfg_val.cfg_regx.num_ch = CH_FIVE_SIX;
+		cfg_val.cfg_regx.layout = LAYOUT1;
+	break;
+
+	case 7:
+	case 8:
+		cfg_val.cfg_regx.num_ch = CH_SEVEN_EIGHT;
+		cfg_val.cfg_regx.layout = LAYOUT1;
+	break;
+
+	}
+
+	cfg_val.cfg_regx.val_bit = 0;
+	had_write_register(AUD_CONFIG, cfg_val.cfg_regval);
+	return 0;
+}
+/*
+ * Compute derived values in channel_allocations[].
+ */
+static void init_channel_allocations(void)
+{
+	int i, j;
+	struct cea_channel_speaker_allocation *p;
+	pr_debug("%s: Enter\n", __func__);
+
+	for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
+		p = channel_allocations + i;
+		p->channels = 0;
+		p->spk_mask = 0;
+		for (j = 0; j < ARRAY_SIZE(p->speakers); j++)
+			if (p->speakers[j]) {
+				p->channels++;
+				p->spk_mask |= p->speakers[j];
+			}
+	}
+}
+
+/*
+ * The transformation takes two steps:
+ *
+ *      eld->spk_alloc => (eld_speaker_allocation_bits[]) => spk_mask
+ *      spk_mask => (channel_allocations[])	 => ai->CA
+ *
+ * TODO: it could select the wrong CA from multiple candidates.
+*/
+static int snd_intelhad_channel_allocation(struct snd_intelhad *intelhaddata,
+					int channels)
+{
+	int i;
+	int ca = 0;
+	int spk_mask = 0;
+
+	/*
+	* CA defaults to 0 for basic stereo audio
+	*/
+	if (channels <= 2)
+		return 0;
+
+	/*
+	* expand ELD's speaker allocation mask
+	*
+	* ELD tells the speaker mask in a compact(paired) form,
+	* expand ELD's notions to match the ones used by Audio InfoFrame.
+	*/
+
+	for (i = 0; i < ARRAY_SIZE(eld_speaker_allocation_bits); i++) {
+		if (intelhaddata->eeld.speaker_allocation_block & (1 << i))
+				spk_mask |= eld_speaker_allocation_bits[i];
+	}
+
+	/* search for the first working match in the CA table */
+	for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
+		if (channels == channel_allocations[i].channels &&
+		(spk_mask & channel_allocations[i].spk_mask) ==
+				channel_allocations[i].spk_mask) {
+			ca = channel_allocations[i].ca_index;
+			break;
+		}
+	}
+
+	pr_debug("HDMI: select CA 0x%x for %d\n", ca, channels);
+
+	return ca;
+}
+
+/* from speaker bit mask to ALSA API channel position */
+static int spk_to_chmap(int spk)
+{
+	struct channel_map_table *t = map_tables;
+	for (; t->map; t++) {
+		if (t->spk_mask == spk)
+			return t->map;
+	}
+	return 0;
+}
+
+void had_build_channel_allocation_map(struct snd_intelhad *intelhaddata)
+{
+	int i = 0, c = 0;
+	int spk_mask = 0;
+	struct snd_pcm_chmap_elem *chmap;
+	uint8_t eld_high, eld_high_mask = 0xF0;
+	uint8_t high_msb;
+
+	chmap = kzalloc(sizeof(*chmap), GFP_KERNEL);
+	if (chmap == NULL) {
+		pr_err("kzalloc returned null in %s\n", __func__);
+		intelhaddata->chmap->chmap = NULL;
+		return;
+	}
+
+	had_get_caps(HAD_GET_ELD, &intelhaddata->eeld);
+
+	pr_debug("eeld.speaker_allocation_block = %x\n",
+			intelhaddata->eeld.speaker_allocation_block);
+
+	/* WA: Fix the max channel supported to 8 */
+
+	/* Sink may support more than 8 channels, if eld_high has more than
+	 * one bit set. SOC supports max 8 channels.
+	 * Refer eld_speaker_allocation_bits, for sink speaker allocation */
+
+	/* if 0x2F < eld < 0x4F fall back to 0x2f, else fall back to 0x4F */
+	eld_high = intelhaddata->eeld.speaker_allocation_block & eld_high_mask;
+	if ((eld_high & (eld_high -1)) && (eld_high > 0x1F)) {
+		/* eld_high & (eld_high-1): if more than 1 bit set */
+		/* 0x1F: 7 channels */
+		for (i = 1; i < 4; i++) {
+			high_msb = eld_high & (0x80 >> i);
+			if (high_msb) {
+				intelhaddata->eeld.speaker_allocation_block &=
+					high_msb | 0xF;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(eld_speaker_allocation_bits); i++) {
+		if (intelhaddata->eeld.speaker_allocation_block & (1 << i))
+				spk_mask |= eld_speaker_allocation_bits[i];
+	}
+
+	for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
+		if (spk_mask == channel_allocations[i].spk_mask) {
+			for (c = 0; c < channel_allocations[i].channels; c++) {
+				chmap->map[c] = spk_to_chmap(
+					channel_allocations[i].speakers[(MAX_SPEAKERS - 1)-c]);
+			}
+			chmap->channels = channel_allocations[i].channels;
+			intelhaddata->chmap->chmap = chmap;
+			break;
+		}
+	}
+	if (i >= ARRAY_SIZE(channel_allocations))
+		kfree(chmap);
+}
+
+/*
+ ** ALSA API channel-map control callbacks
+ **/
+static int had_chmap_ctl_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	struct snd_intelhad *intelhaddata = info->private_data;
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = HAD_MAX_CHANNEL;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
+	return 0;
+}
+
+#ifndef USE_ALSA_DEFAULT_TLV
+static int had_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+				unsigned int size, unsigned int __user *tlv)
+{
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	struct snd_intelhad *intelhaddata = info->private_data;
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	/* TODO: Fix for query channel map */
+	return -EPERM;
+}
+#endif
+
+static int had_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	struct snd_intelhad *intelhaddata = info->private_data;
+	int i = 0;
+	const struct snd_pcm_chmap_elem *chmap;
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+	if (intelhaddata->chmap->chmap ==  NULL)
+		return -ENODATA;
+	chmap = intelhaddata->chmap->chmap;
+	for (i = 0; i < chmap->channels; i++) {
+		ucontrol->value.integer.value[i] = chmap->map[i];
+		pr_debug("chmap->map[%d] = %d\n", i, chmap->map[i]);
+	}
+
+	return 0;
+}
+
+static int had_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	/* TODO: Get channel map and set swap register */
+	return -EPERM;
+}
+
+static int had_register_chmap_ctls(struct snd_intelhad *intelhaddata,
+						struct snd_pcm *pcm)
+{
+	int err = 0;
+	err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+			NULL, 0, (unsigned long)intelhaddata,
+			&intelhaddata->chmap);
+	if (err < 0)
+		return err;
+
+	intelhaddata->chmap->private_data = intelhaddata;
+	intelhaddata->kctl = intelhaddata->chmap->kctl;
+	intelhaddata->kctl->info = had_chmap_ctl_info;
+	intelhaddata->kctl->get = had_chmap_ctl_get;
+	intelhaddata->kctl->put = had_chmap_ctl_put;
+#ifndef USE_ALSA_DEFAULT_TLV
+	intelhaddata->kctl->tlv.c = had_chmap_ctl_tlv;
+#endif
+	intelhaddata->chmap->chmap = NULL;
+	return 0;
+}
+
+/**
+ * hdmi_audio_basic_audio_ctl_get - returns if audio is supported
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_basic_audio_ctl_get(struct snd_kcontrol *kcontrol,
+					  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+	/*
+	 * Since eld has no basic audio information, we look at
+	 * spk_alloc to see if it's non-zero as an indiction of audio
+	 * support.
+	 */
+	if (intelhaddata->eeld.speaker_allocation_block == 0)
+		ucontrol->value.integer.value[0] = 0;
+	else
+		ucontrol->value.integer.value[0] = 1;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_speaker_alloc_ctl_get - returns the speaker allocation map
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_speaker_alloc_ctl_get(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	ucontrol->value.integer.value[0] = intelhaddata->eeld.speaker_allocation_block;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_qmode_max_ch_cnt_ctl_get- returns the max channel count for the
+ * mode being queried. The mode to be queried is set via
+ * hdmi_audio_mode_to_query_ctl_put()
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_qmode_max_ch_cnt_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+	otm_hdmi_sad_t *sad;
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	sad = (otm_hdmi_sad_t *)intelhaddata->eeld.mn_sand_sads;
+	sad += intelhaddata->audio_mode_to_query;
+
+	ucontrol->value.integer.value[0] = sad->max_channels + 1;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_qmode_sample_rates_ctl_get - returns the sample rate mask for the
+ * mode being queried. The mode to be queried is set via
+ * hdmi_audio_mode_to_query_ctl_put()
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_qmode_sample_rates_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+	otm_hdmi_sad_t *sad;
+	int rates;
+	int i;
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	sad = (otm_hdmi_sad_t *)intelhaddata->eeld.mn_sand_sads;
+	sad += intelhaddata->audio_mode_to_query;
+
+	rates = 0;
+	for (i = 0; i < 7; i++) {
+		if (sad->byte2 & (1 << i)) {
+			rates |= cea_sampling_frequencies[i + 1];
+		}
+	}
+
+	ucontrol->value.integer.value[0] = rates;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_qmode_bps_ctl_get - returns the sample sizes for the
+ * mode being queried. The mode to be queried is set via
+ * hdmi_audio_mode_to_query_ctl_put()
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_qmode_bps_ctl_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+	otm_hdmi_sad_t *sad;
+	int sample_bits;
+	int i;
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	sad = (otm_hdmi_sad_t *)intelhaddata->eeld.mn_sand_sads;
+	sad += intelhaddata->audio_mode_to_query;
+
+	sample_bits = 0;
+	if (sad->audio_format_code == AUDIO_CODING_TYPE_LPCM) {
+		for (i = 0; i < 3; i++) {
+			if (sad->byte3 & (1 << i)) {
+				sample_bits |= cea_sample_sizes[i + 1];
+			}
+		}
+	}
+	ucontrol->value.integer.value[0] = sample_bits;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_qmode_comp_br_ctl_get - returns the compressed bitrate for the
+ * mode being queried. The mode to be queried is set via
+ * hdmi_audio_mode_to_query_ctl_put()
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_qmode_comp_br_ctl_get(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+	otm_hdmi_sad_t *sad;
+	int max_bitrate;
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	sad = (otm_hdmi_sad_t *)intelhaddata->eeld.mn_sand_sads;
+	sad += intelhaddata->audio_mode_to_query;
+
+	switch(sad->audio_format_code) {
+		case AUDIO_CODING_TYPE_AC3:
+		case AUDIO_CODING_TYPE_MPEG1:
+		case AUDIO_CODING_TYPE_MP3:
+		case AUDIO_CODING_TYPE_MPEG2:
+		case AUDIO_CODING_TYPE_AACLC:
+		case AUDIO_CODING_TYPE_DTS:
+		case AUDIO_CODING_TYPE_ATRAC:
+			max_bitrate = sad->byte2 * 8000;
+			break;
+		default:
+			max_bitrate = 0;
+			break;
+	}
+
+	ucontrol->value.integer.value[0] = max_bitrate;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_qmode_format_ctl_get - returns the supported format for the
+ * mode being queried. The mode to be queried is set via
+ * hdmi_audio_mode_to_query_ctl_put()
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_qmode_format_ctl_get(struct snd_kcontrol *kcontrol,
+					   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+	otm_hdmi_sad_t *sad;
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	sad = (otm_hdmi_sad_t *)intelhaddata->eeld.mn_sand_sads;
+	sad += intelhaddata->audio_mode_to_query;
+
+	ucontrol->value.integer.value[0] = sad->audio_format_code;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_mode_to_query_ctl_get - returns the mode being queried.
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_mode_to_query_ctl_get(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	ucontrol->value.integer.value[0] = intelhaddata->audio_mode_to_query;
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_mode_to_query_ctl_put - sets the mode to be queried.
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_mode_to_query_ctl_put(struct snd_kcontrol *kcontrol,
+					    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	if (ucontrol->value.integer.value[0] >= intelhaddata->eeld.sadc) {
+		pr_warn("audio mode value %lu invalid.  should be 0 to %d\n",
+			ucontrol->value.integer.value[0],
+			intelhaddata->eeld.sadc);
+		return -EINVAL;
+	}
+
+	intelhaddata->audio_mode_to_query = ucontrol->value.integer.value[0];
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_mode_cnt_ctl_get - returns the number of audio modes in the ELD
+ *
+ * @kcontrol: kernel control structure
+ * @ucontrol: user control structure (contains i/o data)
+ *
+ */
+static int hdmi_audio_mode_cnt_ctl_get(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED)
+		return -ENODEV;
+
+	ucontrol->value.integer.value[0] = intelhaddata->eeld.sadc;
+
+	return 0;
+}
+
+#define MIXER_INFO_FUNC(_sym_name, _type, _min, _max) \
+static int hdmi_audio_##_sym_name##_ctl_info(struct snd_kcontrol *kcontrol, \
+					     struct snd_ctl_elem_info *uinfo) \
+{ \
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_##_type ; \
+	uinfo->count = 1; \
+	uinfo->value.integer.min = _min; \
+	uinfo->value.integer.max = _max; \
+	return 0; \
+}
+
+#define MIXER_CONTROL(_ctl_name, _sym_name, _access, _get, _put) \
+{ \
+	.access = _access | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+	.name = _ctl_name, \
+	.info = hdmi_audio_##_sym_name##_ctl_info, \
+	.get = _get, \
+	.put = _put, \
+}
+
+#define RO_MIXER_CONTROL(_ctl_name, _sym_name) \
+	MIXER_CONTROL(_ctl_name, _sym_name, \
+		SNDRV_CTL_ELEM_ACCESS_READ, \
+		hdmi_audio_##_sym_name##_ctl_get, \
+		NULL)
+
+#define RW_MIXER_CONTROL(_ctl_name, _sym_name) \
+	MIXER_CONTROL(_ctl_name, _sym_name, \
+		SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE, \
+		hdmi_audio_##_sym_name##_ctl_get, \
+		hdmi_audio_##_sym_name##_ctl_put)
+
+MIXER_INFO_FUNC(basic_audio, BOOLEAN, 0, 1);
+MIXER_INFO_FUNC(speaker_alloc, INTEGER, 0, 0x3FF);
+MIXER_INFO_FUNC(mode_cnt, INTEGER, 0, 0x7FFFFFFF);
+MIXER_INFO_FUNC(mode_to_query, INTEGER, 0, 0x7FFFFFFF);
+MIXER_INFO_FUNC(qmode_format, INTEGER, AUDIO_CODING_TYPE_LPCM,
+                                       AUDIO_CODING_TYPE_MPEG_SURROUND);
+MIXER_INFO_FUNC(qmode_max_ch_cnt, INTEGER, 0, 8);
+MIXER_INFO_FUNC(qmode_sample_rates, INTEGER, 0, SUPPORTED_RATES);
+MIXER_INFO_FUNC(qmode_bps, INTEGER, 0, (AC_SUPPCM_BITS_24 | \
+                                        AC_SUPPCM_BITS_20 | \
+                                        AC_SUPPCM_BITS_16));
+MIXER_INFO_FUNC(qmode_comp_br, INTEGER, 0, 0x7FFFFFFF);
+
+static struct snd_kcontrol_new hdmi_audio_ctls[] = {
+	RO_MIXER_CONTROL("Basic Audio Supported", basic_audio),
+	RO_MIXER_CONTROL("Speaker Allocation", speaker_alloc),
+	RO_MIXER_CONTROL("Audio Mode Count", mode_cnt),
+	RW_MIXER_CONTROL("Audio Mode To Query", mode_to_query),
+	RO_MIXER_CONTROL("Query Mode : Format", qmode_format),
+	RO_MIXER_CONTROL("Query Mode : Max Ch Count", qmode_max_ch_cnt),
+	RO_MIXER_CONTROL("Query Mode : Sample Rate Mask",
+			qmode_sample_rates),
+	RO_MIXER_CONTROL("Query Mode : PCM Bits/Sample Mask", qmode_bps),
+	RO_MIXER_CONTROL("Query Mode : Max Compressed Bitrate",
+			qmode_comp_br),
+};
+
+static int had_create_audio_ctls(struct snd_card *card, struct snd_intelhad *intelhaddata)
+{
+	struct snd_kcontrol *kctl;
+	int err;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_audio_ctls); i++) {
+		kctl = snd_ctl_new1(&hdmi_audio_ctls[i], intelhaddata);
+		if (!kctl) {
+			return -ENOMEM;
+		}
+
+		err = snd_ctl_add(card, kctl);
+		if (err < 0) {
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * snd_intelhad_prog_dip_v1 - to initialize Data Island Packets registers
+ *
+ * @substream:substream for which the prepare function is called
+ * @intelhaddata:substream private data
+ *
+ * This function is called in the prepare callback
+ */
+static void snd_intelhad_prog_dip_v1(struct snd_pcm_substream *substream,
+				struct snd_intelhad *intelhaddata)
+{
+	int i;
+	union aud_ctrl_st ctrl_state = {.ctrl_val = 0};
+	union aud_info_frame2 frame2 = {.fr2_val = 0};
+	union aud_info_frame3 frame3 = {.fr3_val = 0};
+	u8 checksum = 0;
+	int channels;
+
+	channels = substream->runtime->channels;
+
+	had_write_register(AUD_CNTL_ST, ctrl_state.ctrl_val);
+
+	frame2.fr2_regx.chnl_cnt = substream->runtime->channels - 1;
+
+	frame3.fr3_regx.chnl_alloc = snd_intelhad_channel_allocation(
+					intelhaddata, channels);
+
+	/*Calculte the byte wide checksum for all valid DIP words*/
+	for (i = 0; i < BYTES_PER_WORD; i++)
+		checksum += (INFO_FRAME_WORD1 >> i*BITS_PER_BYTE) & MASK_BYTE0;
+	for (i = 0; i < BYTES_PER_WORD; i++)
+		checksum += (frame2.fr2_val >> i*BITS_PER_BYTE) & MASK_BYTE0;
+	for (i = 0; i < BYTES_PER_WORD; i++)
+		checksum += (frame3.fr3_val >> i*BITS_PER_BYTE) & MASK_BYTE0;
+
+	frame2.fr2_regx.chksum = -(checksum);
+
+	had_write_register(AUD_HDMIW_INFOFR, INFO_FRAME_WORD1);
+	had_write_register(AUD_HDMIW_INFOFR, frame2.fr2_val);
+	had_write_register(AUD_HDMIW_INFOFR, frame3.fr3_val);
+
+	/* program remaining DIP words with zero */
+	for (i = 0; i < HAD_MAX_DIP_WORDS-VALID_DIP_WORDS; i++)
+		had_write_register(AUD_HDMIW_INFOFR, 0x0);
+
+	ctrl_state.ctrl_regx.dip_freq = 1;
+	ctrl_state.ctrl_regx.dip_en_sta = 1;
+	had_write_register(AUD_CNTL_ST, ctrl_state.ctrl_val);
+}
+
+/**
+ * snd_intelhad_prog_buffer - programs buffer
+ * address and length registers
+ *
+ * @substream:substream for which the prepare function is called
+ * @intelhaddata:substream private data
+ *
+ * This function programs ring buffer address and length into registers.
+ */
+int snd_intelhad_prog_buffer(struct snd_intelhad *intelhaddata,
+					int start, int end)
+{
+	u32 ring_buf_addr, ring_buf_size, period_bytes;
+	u8 i, num_periods;
+	struct snd_pcm_substream *substream;
+
+	substream = intelhaddata->stream_info.had_substream;
+	if (!substream) {
+		pr_err("substream is NULL\n");
+		dump_stack();
+		return 0;
+	}
+
+	ring_buf_addr = substream->runtime->dma_addr;
+	ring_buf_size = snd_pcm_lib_buffer_bytes(substream);
+	intelhaddata->stream_info.ring_buf_size = ring_buf_size;
+	period_bytes = frames_to_bytes(substream->runtime,
+				substream->runtime->period_size);
+	num_periods = substream->runtime->periods;
+
+	/* buffer addr should  be 64 byte aligned, period bytes
+	 will be used to calculate addr offset*/
+	period_bytes &= ~0x3F;
+
+	/* Hardware supports MAX_PERIODS buffers */
+	if (end >= HAD_MAX_PERIODS)
+		return -EINVAL;
+
+	for (i = start; i <= end; i++) {
+		/* Program the buf registers with addr and len */
+		intelhaddata->buf_info[i].buf_addr = ring_buf_addr +
+							 (i * period_bytes);
+		if (i < num_periods-1)
+			intelhaddata->buf_info[i].buf_size = period_bytes;
+		else
+			intelhaddata->buf_info[i].buf_size = ring_buf_size -
+							(period_bytes*i);
+
+		had_write_register(AUD_BUF_A_ADDR + (i * HAD_REG_WIDTH),
+					intelhaddata->buf_info[i].buf_addr |
+					BIT(0) | BIT(1));
+		had_write_register(AUD_BUF_A_LENGTH + (i * HAD_REG_WIDTH),
+					period_bytes);
+		intelhaddata->buf_info[i].is_valid = true;
+	}
+	pr_debug("%s:buf[%d-%d] addr=%#x  and size=%d\n", __func__, start, end,
+			intelhaddata->buf_info[start].buf_addr,
+			intelhaddata->buf_info[start].buf_size);
+	intelhaddata->valid_buf_cnt = num_periods;
+	return 0;
+}
+
+inline int snd_intelhad_read_len(struct snd_intelhad *intelhaddata)
+{
+	int i, retval = 0;
+	u32 len[4];
+
+	for (i = 0; i < 4 ; i++) {
+		had_read_register(AUD_BUF_A_LENGTH + (i * HAD_REG_WIDTH),
+					&len[i]);
+		if (!len[i])
+			retval++;
+	}
+	if (retval != 1) {
+		for (i = 0; i < 4 ; i++)
+			pr_debug("buf[%d] size=%d\n", i, len[i]);
+	}
+
+	return retval;
+}
+
+/**
+ * snd_intelhad_prog_cts_v1 - Program HDMI audio CTS value
+ *
+ * @aud_samp_freq: sampling frequency of audio data
+ * @tmds: sampling frequency of the display data
+ * @n_param: N value, depends on aud_samp_freq
+ * @intelhaddata:substream private data
+ *
+ * Program CTS register based on the audio and display sampling frequency
+ */
+static void snd_intelhad_prog_cts_v1(u32 aud_samp_freq, u32 tmds, u32 n_param,
+				struct snd_intelhad *intelhaddata)
+{
+	u32 cts_val;
+	u64 dividend, divisor;
+
+	/* Calculate CTS according to HDMI 1.3a spec*/
+	dividend = (u64)tmds * n_param*1000;
+	divisor = 128 * aud_samp_freq;
+	cts_val = div64_u64(dividend, divisor);
+	pr_debug("TMDS value=%d, N value=%d, CTS Value=%d\n",
+			tmds, n_param, cts_val);
+	had_write_register(AUD_HDMI_CTS, (BIT(20) | cts_val));
+}
+
+static int had_calculate_n_value(u32 aud_samp_freq)
+{
+	s32 n_val;
+
+	/* Select N according to HDMI 1.3a spec*/
+	switch (aud_samp_freq) {
+	case AUD_SAMPLE_RATE_32:
+		n_val = 4096;
+	break;
+
+	case AUD_SAMPLE_RATE_44_1:
+		n_val = 6272;
+	break;
+
+	case AUD_SAMPLE_RATE_48:
+		n_val = 6144;
+	break;
+
+	case AUD_SAMPLE_RATE_88_2:
+		n_val = 12544;
+	break;
+
+	case AUD_SAMPLE_RATE_96:
+		n_val = 12288;
+	break;
+
+	case AUD_SAMPLE_RATE_176_4:
+		n_val = 25088;
+	break;
+
+	case HAD_MAX_RATE:
+		n_val = 24576;
+	break;
+
+	default:
+		n_val = -EINVAL;
+	break;
+	}
+	return n_val;
+}
+
+/**
+ * snd_intelhad_prog_n_v1 - Program HDMI audio N value
+ *
+ * @aud_samp_freq: sampling frequency of audio data
+ * @n_param: N value, depends on aud_samp_freq
+ * @intelhaddata:substream private data
+ *
+ * This function is called in the prepare callback.
+ * It programs based on the audio and display sampling frequency
+ */
+static int snd_intelhad_prog_n_v1(u32 aud_samp_freq, u32 *n_param,
+				struct snd_intelhad *intelhaddata)
+{
+	s32 n_val;
+
+	n_val =	had_calculate_n_value(aud_samp_freq);
+
+	if (n_val < 0)
+		return n_val;
+
+	had_write_register(AUD_N_ENABLE, (BIT(20) | n_val));
+	*n_param = n_val;
+	return 0;
+}
+
+static void had_clear_underrun_intr_v1(struct snd_intelhad *intelhaddata)
+{
+	u32 hdmi_status, i = 0;
+
+	/* Handle Underrun interrupt within Audio Unit */
+	had_write_register(AUD_CONFIG, 0);
+	/* Reset buffer pointers */
+	had_write_register(AUD_HDMI_STATUS, 1);
+	had_write_register(AUD_HDMI_STATUS, 0);
+	/**
+	 * The interrupt status 'sticky' bits might not be cleared by
+	 * setting '1' to that bit once...
+	 */
+	do { /* clear bit30, 31 AUD_HDMI_STATUS */
+		had_read_register(AUD_HDMI_STATUS, &hdmi_status);
+		pr_debug("HDMI status =0x%x\n", hdmi_status);
+		if (hdmi_status & AUD_CONFIG_MASK_UNDERRUN) {
+			i++;
+			hdmi_status &= (AUD_CONFIG_MASK_SRDBG |
+					AUD_CONFIG_MASK_FUNCRST);
+			hdmi_status |= ~AUD_CONFIG_MASK_UNDERRUN;
+			had_write_register(AUD_HDMI_STATUS, hdmi_status);
+		} else
+			break;
+	} while (i < MAX_CNT);
+	if (i >= MAX_CNT)
+		pr_err("Unable to clear UNDERRUN bits\n");
+	return;
+}
+
+/**
+* snd_intelhad_open - stream initializations are done here
+* @substream:substream for which the stream function is called
+*
+* This function is called whenever a PCM stream is opened
+*/
+static int snd_intelhad_open(struct snd_pcm_substream *substream)
+{
+	struct snd_intelhad *intelhaddata;
+	struct snd_pcm_runtime *runtime;
+	struct had_stream_pvt *stream;
+	struct had_pvt_data *had_stream;
+	int retval;
+
+	pr_debug("snd_intelhad_open called\n");
+	intelhaddata = snd_pcm_substream_chip(substream);
+	had_stream = intelhaddata->private_data;
+	runtime = substream->runtime;
+
+	pm_runtime_get(intelhaddata->dev);
+
+	/*
+	 * HDMI driver might suspend the device already,
+	 * so we return it on
+	 */
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+			OSPM_UHB_FORCE_POWER_ON)) {
+		pr_err("HDMI device can't be turned on\n");
+		retval = -ENODEV;
+		goto exit_put_handle;
+	}
+
+	if (had_get_hwstate(intelhaddata)) {
+		pr_err("%s: HDMI cable plugged-out\n", __func__);
+		retval = -ENODEV;
+		goto exit_ospm_handle;
+	}
+
+	/* Check, if device already in use */
+	if (runtime->private_data) {
+		pr_err("Device already in use\n");
+		retval = -EBUSY;
+		goto exit_ospm_handle;
+	}
+
+	/* set the runtime hw parameter with local snd_pcm_hardware struct */
+	runtime->hw = snd_intel_hadstream;
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (!stream) {
+		retval = -ENOMEM;
+		goto exit_ospm_handle;
+	}
+	stream->stream_status = STREAM_INIT;
+	runtime->private_data = stream;
+
+	retval = snd_pcm_hw_constraint_integer(runtime,
+			 SNDRV_PCM_HW_PARAM_PERIODS);
+	if (retval < 0) {
+		goto exit_err;
+	}
+
+	/* Make sure, that the period size is always aligned
+	 * 64byte boundary
+	 */
+	retval = snd_pcm_hw_constraint_step(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+	if (retval < 0) {
+		pr_err("%s:step_size=64 failed,err=%d\n", __func__, retval);
+		goto exit_err;
+	}
+
+	return retval;
+exit_err:
+	kfree(stream);
+exit_ospm_handle:
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+exit_put_handle:
+	pm_runtime_put(intelhaddata->dev);
+	runtime->private_data = NULL;
+	return retval;
+}
+
+/**
+* had_period_elapsed - updates the hardware pointer status
+* @had_substream:substream for which the stream function is called
+*
+*/
+static void had_period_elapsed(void *had_substream)
+{
+	struct snd_pcm_substream *substream = had_substream;
+	struct had_stream_pvt *stream;
+
+	if (!substream || !substream->runtime)
+		return;
+	stream = substream->runtime->private_data;
+	if (!stream)
+		return;
+
+	if (stream->stream_status != STREAM_RUNNING)
+		return;
+	snd_pcm_period_elapsed(substream);
+	return;
+}
+
+/**
+* snd_intelhad_init_stream - internal function to initialize stream info
+* @substream:substream for which the stream function is called
+*
+*/
+static int snd_intelhad_init_stream(struct snd_pcm_substream *substream)
+{
+	struct snd_intelhad *intelhaddata = snd_pcm_substream_chip(substream);
+
+	pr_debug("setting buffer ptr param\n");
+	intelhaddata->stream_info.period_elapsed = had_period_elapsed;
+	intelhaddata->stream_info.had_substream = substream;
+	intelhaddata->stream_info.buffer_ptr = 0;
+	intelhaddata->stream_info.buffer_rendered = 0;
+	intelhaddata->stream_info.sfreq = substream->runtime->rate;
+	return 0;
+}
+
+/**
+ * snd_intelhad_close- to free parameteres when stream is stopped
+ *
+ * @substream:  substream for which the function is called
+ *
+ * This function is called by ALSA framework when stream is stopped
+ */
+static int snd_intelhad_close(struct snd_pcm_substream *substream)
+{
+	struct snd_intelhad *intelhaddata;
+	struct snd_pcm_runtime *runtime;
+
+	pr_debug("snd_intelhad_close called\n");
+
+	intelhaddata = snd_pcm_substream_chip(substream);
+	runtime = substream->runtime;
+
+	if (!runtime->private_data) {
+		pr_debug("close() might have called after failed open");
+		return 0;
+	}
+
+	intelhaddata->stream_info.buffer_rendered = 0;
+	intelhaddata->stream_info.buffer_ptr = 0;
+	intelhaddata->stream_info.str_id = 0;
+	intelhaddata->stream_info.had_substream = NULL;
+
+	/* Check if following drv_status modification is required - VA */
+	if (intelhaddata->drv_status != HAD_DRV_DISCONNECTED)
+		intelhaddata->drv_status = HAD_DRV_CONNECTED;
+	kfree(runtime->private_data);
+	runtime->private_data = NULL;
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	pm_runtime_put(intelhaddata->dev);
+	return 0;
+}
+
+/**
+ * snd_intelhad_hw_params- to setup the hardware parameters
+ * like allocating the buffers
+ *
+ * @substream:  substream for which the function is called
+ * @hw_params: hardware parameters
+ *
+ * This function is called by ALSA framework when hardware params are set
+ */
+static int snd_intelhad_hw_params(struct snd_pcm_substream *substream,
+				    struct snd_pcm_hw_params *hw_params)
+{
+	unsigned long addr;
+	int pages, buf_size, retval;
+
+	BUG_ON(!hw_params);
+
+	buf_size = params_buffer_bytes(hw_params);
+	retval = snd_pcm_lib_malloc_pages(substream, buf_size);
+	if (retval < 0)
+		return retval;
+	pr_debug("%s:allocated memory = %d\n", __func__, buf_size);
+	/* mark the pages as uncached region */
+	addr = (unsigned long) substream->runtime->dma_area;
+	pages = (substream->runtime->dma_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+	retval = set_memory_uc(addr, pages);
+	if (retval) {
+		pr_err("set_memory_uc failed.Error:%d\n", retval);
+		return retval;
+	}
+	memset(substream->runtime->dma_area, 0, buf_size);
+
+	return retval;
+}
+
+/**
+ * snd_intelhad_hw_free- to release the resources allocated during
+ * hardware params setup
+ *
+ * @substream:  substream for which the function is called
+ *
+ * This function is called by ALSA framework before close callback.
+ *
+ */
+static int snd_intelhad_hw_free(struct snd_pcm_substream *substream)
+{
+	unsigned long addr;
+	u32 pages;
+
+	pr_debug("snd_intelhad_hw_free called\n");
+
+	/* mark back the pages as cached/writeback region before the free */
+	if (substream->runtime->dma_area != NULL) {
+		addr = (unsigned long) substream->runtime->dma_area;
+		pages = (substream->runtime->dma_bytes + PAGE_SIZE - 1) /
+								PAGE_SIZE;
+		set_memory_wb(addr, pages);
+		return snd_pcm_lib_free_pages(substream);
+	}
+	return 0;
+}
+
+/**
+* snd_intelhad_pcm_trigger - stream activities are handled here
+* @substream:substream for which the stream function is called
+* @cmd:the stream commamd thats requested from upper layer
+* This function is called whenever an a stream activity is invoked
+*/
+static int snd_intelhad_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	int caps, retval = 0;
+	unsigned long flag_irq;
+	struct snd_intelhad *intelhaddata;
+	struct had_stream_pvt *stream;
+	struct had_pvt_data *had_stream;
+
+	intelhaddata = snd_pcm_substream_chip(substream);
+	stream = substream->runtime->private_data;
+	had_stream = intelhaddata->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		pr_debug("Trigger Start\n");
+
+		/* Disable local INTRs till register prgmng is done */
+		if (had_get_hwstate(intelhaddata)) {
+			pr_err("_START: HDMI cable plugged-out\n");
+			retval = -ENODEV;
+			break;
+		}
+		stream->stream_status = STREAM_RUNNING;
+
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irq);
+		had_stream->stream_type = HAD_RUNNING_STREAM;
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irq);
+
+		/* Enable Audio */
+		/* ToDo: Need to enable UNDERRUN interrupts as well
+		   caps = HDMI_AUDIO_UNDERRUN | HDMI_AUDIO_BUFFER_DONE;
+		   */
+		caps = HDMI_AUDIO_BUFFER_DONE;
+		retval = had_set_caps(HAD_SET_ENABLE_AUDIO_INT, &caps);
+		retval = had_set_caps(HAD_SET_ENABLE_AUDIO, NULL);
+		intelhaddata->ops->enable_audio(substream, 1);
+
+		pr_debug("Processed _Start\n");
+
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("Trigger Stop\n");
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irq);
+		intelhaddata->stream_info.str_id = 0;
+		intelhaddata->curr_buf = 0;
+
+		/* Stop reporting BUFFER_DONE/UNDERRUN to above layers*/
+
+		had_stream->stream_type = HAD_INIT;
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irq);
+		/* Disable Audio */
+		/* ToDo: Need to disable UNDERRUN interrupts as well
+		   caps = HDMI_AUDIO_UNDERRUN | HDMI_AUDIO_BUFFER_DONE;
+		   */
+		caps = HDMI_AUDIO_BUFFER_DONE;
+		had_set_caps(HAD_SET_DISABLE_AUDIO_INT, &caps);
+		intelhaddata->ops->enable_audio(substream, 0);
+		/* Reset buffer pointers */
+		intelhaddata->ops->reset_audio(1);
+		intelhaddata->ops->reset_audio(0);
+		stream->stream_status = STREAM_DROPPED;
+		had_set_caps(HAD_SET_DISABLE_AUDIO, NULL);
+		break;
+
+	default:
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+/**
+* snd_intelhad_pcm_prepare- internal preparation before starting a stream
+*
+* @substream:  substream for which the function is called
+*
+* This function is called when a stream is started for internal preparation.
+*/
+static int snd_intelhad_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	int retval;
+	u32 disp_samp_freq, n_param;
+	struct snd_intelhad *intelhaddata;
+	struct snd_pcm_runtime *runtime;
+	struct had_pvt_data *had_stream;
+
+	pr_debug("pcm_prepare called\n");
+
+	intelhaddata = snd_pcm_substream_chip(substream);
+	runtime = substream->runtime;
+	had_stream = intelhaddata->private_data;
+
+	if (had_get_hwstate(intelhaddata)) {
+		pr_err("%s: HDMI cable plugged-out\n", __func__);
+		snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+		retval = -ENODEV;
+		goto prep_end;
+	}
+
+	pr_debug("period_size=%zd\n",
+				frames_to_bytes(runtime, runtime->period_size));
+	pr_debug("periods=%d\n", runtime->periods);
+	pr_debug("buffer_size=%zd\n", snd_pcm_lib_buffer_bytes(substream));
+	pr_debug("rate=%d\n", runtime->rate);
+	pr_debug("channels=%d\n", runtime->channels);
+
+	if (intelhaddata->stream_info.str_id) {
+		pr_debug("_prepare is called for existing str_id#%d\n",
+					intelhaddata->stream_info.str_id);
+		retval = snd_intelhad_pcm_trigger(substream,
+						SNDRV_PCM_TRIGGER_STOP);
+		return retval;
+	}
+
+	retval = snd_intelhad_init_stream(substream);
+	if (retval)
+		goto prep_end;
+
+
+	/* Get N value in KHz */
+	retval = had_get_caps(HAD_GET_SAMPLING_FREQ, &disp_samp_freq);
+	if (retval) {
+		pr_err("querying display sampling freq failed %#x\n", retval);
+		goto prep_end;
+	}
+
+	had_get_caps(HAD_GET_ELD, &intelhaddata->eeld);
+
+	retval = intelhaddata->ops->prog_n(substream->runtime->rate, &n_param,
+								intelhaddata);
+	if (retval) {
+		pr_err("programming N value failed %#x\n", retval);
+		goto prep_end;
+	}
+	intelhaddata->ops->prog_cts(substream->runtime->rate,
+					disp_samp_freq, n_param, intelhaddata);
+
+	intelhaddata->ops->prog_dip(substream, intelhaddata);
+
+	retval = intelhaddata->ops->audio_ctrl(substream, intelhaddata);
+
+	/* Prog buffer address */
+	retval = snd_intelhad_prog_buffer(intelhaddata,
+			HAD_BUF_TYPE_A, HAD_BUF_TYPE_D);
+
+	/* Program channel mapping in following order:
+	   FL, FR, C, LFE, RL, RR */
+
+	had_write_register(AUD_BUF_CH_SWAP, SWAP_LFE_CENTER);
+
+prep_end:
+	return retval;
+}
+
+/**
+ * snd_intelhad_pcm_pointer- to send the current buffer pointerprocessed by hw
+ *
+ * @substream:  substream for which the function is called
+ *
+ * This function is called by ALSA framework to get the current hw buffer ptr
+ * when a period is elapsed
+ */
+static snd_pcm_uframes_t snd_intelhad_pcm_pointer(
+					struct snd_pcm_substream *substream)
+{
+	struct snd_intelhad *intelhaddata;
+	u32 bytes_rendered = 0;
+
+	intelhaddata = snd_pcm_substream_chip(substream);
+
+	if (intelhaddata->flag_underrun) {
+		intelhaddata->flag_underrun = 0;
+		return SNDRV_PCM_POS_XRUN;
+	}
+
+	if (intelhaddata->stream_info.buffer_rendered)
+		div_u64_rem(intelhaddata->stream_info.buffer_rendered,
+			intelhaddata->stream_info.ring_buf_size,
+			&(bytes_rendered));
+
+	intelhaddata->stream_info.buffer_ptr = bytes_to_frames(
+						substream->runtime,
+						bytes_rendered);
+	return intelhaddata->stream_info.buffer_ptr;
+}
+
+/**
+* snd_intelhad_pcm_mmap- mmaps a kernel buffer to user space for copying data
+*
+* @substream:  substream for which the function is called
+* @vma:		struct instance of memory VMM memory area
+*
+* This function is called by OS when a user space component
+* tries to get mmap memory from driver
+*/
+static int snd_intelhad_pcm_mmap(struct snd_pcm_substream *substream,
+	struct vm_area_struct *vma)
+{
+
+	pr_debug("entry with prot:%s\n", __func__);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	return remap_pfn_range(vma, vma->vm_start,
+			substream->dma_buffer.addr >> PAGE_SHIFT,
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+int hdmi_audio_mode_change(struct snd_pcm_substream *substream)
+{
+	int retval = 0;
+	u32 disp_samp_freq, n_param;
+	struct snd_intelhad *intelhaddata;
+
+	intelhaddata = snd_pcm_substream_chip(substream);
+
+	/* Disable Audio */
+	intelhaddata->ops->enable_audio(substream, 0);
+
+	/* Update CTS value */
+	retval = had_get_caps(HAD_GET_SAMPLING_FREQ, &disp_samp_freq);
+	if (retval) {
+		pr_err("querying display sampling freq failed %#x\n", retval);
+		goto out;
+	}
+
+	retval = intelhaddata->ops->prog_n(substream->runtime->rate, &n_param,
+								intelhaddata);
+	if (retval) {
+		pr_err("programming N value failed %#x\n", retval);
+		goto out;
+	}
+	intelhaddata->ops->prog_cts(substream->runtime->rate,
+					disp_samp_freq, n_param, intelhaddata);
+
+	/* Enable Audio */
+	intelhaddata->ops->enable_audio(substream, 1);
+
+out:
+	return retval;
+}
+
+/*PCM operations structure and the calls back for the same */
+struct snd_pcm_ops snd_intelhad_playback_ops = {
+	.open =		snd_intelhad_open,
+	.close =	snd_intelhad_close,
+	.ioctl =	snd_pcm_lib_ioctl,
+	.hw_params =	snd_intelhad_hw_params,
+	.hw_free =	snd_intelhad_hw_free,
+	.prepare =	snd_intelhad_pcm_prepare,
+	.trigger =	snd_intelhad_pcm_trigger,
+	.pointer =	snd_intelhad_pcm_pointer,
+	.mmap =	snd_intelhad_pcm_mmap,
+};
+
+/**
+ * snd_intelhad_create - to crete alsa card instance
+ *
+ * @intelhaddata: pointer to internal context
+ * @card: pointer to card
+ *
+ * This function is called when the hdmi cable is plugged in
+ */
+static int snd_intelhad_create(
+		struct snd_intelhad *intelhaddata,
+		struct snd_card *card)
+{
+	int retval;
+	static struct snd_device_ops ops = {
+	};
+
+	BUG_ON(!intelhaddata);
+	/* ALSA api to register the device */
+	retval = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelhaddata, &ops);
+	return retval;
+}
+/**
+ * snd_intelhad_pcm_free - to free the memory allocated
+ *
+ * @pcm: pointer to pcm instance
+ * This function is called when the device is removed
+ */
+static void snd_intelhad_pcm_free(struct snd_pcm *pcm)
+{
+	pr_debug("Freeing PCM preallocated pages\n");
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static int had_iec958_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+	return 0;
+}
+
+static int had_iec958_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+	ucontrol->value.iec958.status[0] = (intelhaddata->aes_bits >> 0) & 0xff;
+	ucontrol->value.iec958.status[1] = (intelhaddata->aes_bits >> 8) & 0xff;
+	ucontrol->value.iec958.status[2] =
+					(intelhaddata->aes_bits >> 16) & 0xff;
+	ucontrol->value.iec958.status[3] =
+					(intelhaddata->aes_bits >> 24) & 0xff;
+	return 0;
+}
+static int had_iec958_mask_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.iec958.status[0] = 0xff;
+	ucontrol->value.iec958.status[1] = 0xff;
+	ucontrol->value.iec958.status[2] = 0xff;
+	ucontrol->value.iec958.status[3] = 0xff;
+	return 0;
+}
+static int had_iec958_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	unsigned int val;
+	struct snd_intelhad *intelhaddata = snd_kcontrol_chip(kcontrol);
+
+	pr_debug("entered had_iec958_put\n");
+	val = (ucontrol->value.iec958.status[0] << 0) |
+		(ucontrol->value.iec958.status[1] << 8) |
+		(ucontrol->value.iec958.status[2] << 16) |
+		(ucontrol->value.iec958.status[3] << 24);
+	if (intelhaddata->aes_bits != val) {
+		intelhaddata->aes_bits = val;
+		return 1;
+	}
+	return 1;
+}
+
+static struct snd_kcontrol_new had_control_iec958_mask = {
+	.access =   SNDRV_CTL_ELEM_ACCESS_READ,
+	.iface =    SNDRV_CTL_ELEM_IFACE_PCM,
+	.name =     SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK),
+	.info =     had_iec958_info, /* shared */
+	.get =      had_iec958_mask_get,
+};
+
+static struct snd_kcontrol_new had_control_iec958 = {
+	.iface =    SNDRV_CTL_ELEM_IFACE_PCM,
+	.name =	    SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+	.info =	    had_iec958_info,
+	.get =      had_iec958_get,
+	.put =	    had_iec958_put
+};
+
+static struct snd_intel_had_interface had_interface = {
+	.name =    "hdmi-audio",
+	.query =   hdmi_audio_query,
+	.suspend = hdmi_audio_suspend,
+	.resume =  hdmi_audio_resume,
+};
+
+static struct had_ops had_ops_v1 = {
+	.enable_audio = snd_intelhad_enable_audio_v1,
+	.reset_audio = snd_intelhad_reset_audio_v1,
+	.prog_n =	snd_intelhad_prog_n_v1,
+	.prog_cts =	snd_intelhad_prog_cts_v1,
+	.audio_ctrl =	snd_intelhad_prog_audio_ctrl_v1,
+	.prog_dip =	snd_intelhad_prog_dip_v1,
+	.handle_underrun =  had_clear_underrun_intr_v1,
+};
+
+/**
+ * hdmi_audio_probe - to create sound card instance for HDMI audio playabck
+ *
+ *@haddata: pointer to HAD private data
+ *@card_id: card for which probe is called
+ *
+ * This function is called when the hdmi cable is plugged in. This function
+ * creates and registers the sound card with ALSA
+ */
+static int hdmi_audio_probe(struct platform_device *devptr)
+{
+
+	int retval;
+	struct snd_pcm *pcm;
+	struct snd_card *card;
+	struct had_callback_ops ops_cb;
+	struct snd_intelhad *intelhaddata;
+	struct had_pvt_data *had_stream;
+	char *version = NULL; /* HDMI ops/Register set version.*/
+
+	pr_debug("Enter %s\n", __func__);
+
+	/* allocate memory for saving internal context and working */
+	intelhaddata = kzalloc(sizeof(*intelhaddata), GFP_KERNEL);
+	if (!intelhaddata) {
+		pr_err("mem alloc failed\n");
+		return -ENOMEM;
+	}
+
+	had_stream = kzalloc(sizeof(*had_stream), GFP_KERNEL);
+	if (!had_stream) {
+		pr_err("mem alloc failed\n");
+		retval = -ENOMEM;
+		goto free_haddata;
+	}
+
+	had_data = intelhaddata;
+	ops_cb.intel_had_event_call_back = had_event_handler;
+
+	/* registering with display driver to get access to display APIs */
+
+	retval = mid_hdmi_audio_setup(
+			ops_cb.intel_had_event_call_back,
+			&(intelhaddata->reg_ops),
+			&(intelhaddata->query_ops));
+	if (retval) {
+		pr_err("querying display driver APIs failed %#x\n", retval);
+		goto free_hadstream;
+	}
+	mutex_lock(&had_mutex);
+	spin_lock_init(&intelhaddata->had_spinlock);
+	intelhaddata->drv_status = HAD_DRV_DISCONNECTED;
+	/* create a card instance with ALSA framework */
+	retval = snd_card_create(hdmi_card_index, hdmi_card_id,
+				THIS_MODULE, 0, &card);
+	if (retval)
+		goto unlock_mutex;
+	intelhaddata->card = card;
+	intelhaddata->card_id = hdmi_card_id;
+	intelhaddata->card_index = card->number;
+	intelhaddata->private_data = had_stream;
+	intelhaddata->flag_underrun = 0;
+	intelhaddata->aes_bits = SNDRV_PCM_DEFAULT_CON_SPDIF;
+	strncpy(card->driver, INTEL_HAD, strlen(INTEL_HAD));
+	strncpy(card->shortname, INTEL_HAD, strlen(INTEL_HAD));
+
+	retval = snd_pcm_new(card, INTEL_HAD, PCM_INDEX, MAX_PB_STREAMS,
+						MAX_CAP_STREAMS, &pcm);
+	if (retval)
+		goto free_card;
+
+	/* setup private data which can be retrieved when required */
+	pcm->private_data = intelhaddata;
+	pcm->private_free = snd_intelhad_pcm_free;
+	pcm->info_flags = 0;
+	strncpy(pcm->name, card->shortname, strlen(card->shortname));
+	/* setup the ops for palyabck */
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+			    &snd_intelhad_playback_ops);
+	/* allocate dma pages for ALSA stream operations
+	 * memory allocated is based on size, not max value
+	 * thus using same argument for max & size
+	 */
+	retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+			SNDRV_DMA_TYPE_DEV, card->dev,
+			HAD_MAX_BUFFER, HAD_MAX_BUFFER);
+	if (retval)
+		goto free_card;
+
+	/* internal function call to register device with ALSA */
+	retval = snd_intelhad_create(intelhaddata, card);
+	if (retval)
+		goto free_prealloc;
+
+	card->private_data = &intelhaddata;
+	retval = snd_card_register(card);
+	if (retval)
+		goto free_prealloc;
+
+	/* IEC958 controls */
+	retval = snd_ctl_add(card, snd_ctl_new1(&had_control_iec958_mask,
+						intelhaddata));
+	if (retval < 0)
+		goto free_prealloc;
+	retval = snd_ctl_add(card, snd_ctl_new1(&had_control_iec958,
+						intelhaddata));
+	if (retval < 0)
+		goto free_prealloc;
+
+	init_channel_allocations();
+
+	/* Register channel map controls */
+	retval = had_register_chmap_ctls(intelhaddata, pcm);
+	if (retval < 0)
+		goto free_prealloc;
+
+	retval = had_create_audio_ctls(card, intelhaddata);
+	if (retval < 0)   //TODO: Check error path
+		goto free_prealloc;
+
+	intelhaddata->dev = &devptr->dev;
+	pm_runtime_set_active(intelhaddata->dev);
+	pm_runtime_enable(intelhaddata->dev);
+
+	mutex_unlock(&had_mutex);
+	retval = mid_hdmi_audio_register(&had_interface, intelhaddata);
+	if (retval) {
+		pr_err("registering with display driver failed %#x\n", retval);
+		goto err;
+	}
+	intelhaddata->hw_silence = 1;
+	intelhaddata->audio_reg_base = 0x69000;
+	intelhaddata->ops = &had_ops_v1;
+	version = "v1";
+	had_debugfs_init(intelhaddata, version);
+	return retval;
+
+err:
+	pm_runtime_disable(intelhaddata->dev);
+	intelhaddata->dev = NULL;
+free_prealloc:
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+free_card:
+	snd_card_free(card);
+unlock_mutex:
+	if (mutex_is_locked(&had_mutex))
+		mutex_unlock(&had_mutex);
+free_hadstream:
+	kfree(had_stream);
+free_haddata:
+	kfree(intelhaddata);
+	intelhaddata = NULL;
+	pr_err("Error returned from %s api %#x\n", __func__, retval);
+	return retval;
+}
+
+/**
+ * hdmi_audio_remove - removes the alsa card
+ *
+ *@haddata: pointer to HAD private data
+ *
+ * This function is called when the hdmi cable is un-plugged. This function
+ * free the sound card.
+ */
+static int hdmi_audio_remove(struct platform_device *devptr)
+{
+	struct snd_intelhad *intelhaddata = had_data;
+	int caps;
+
+	pr_debug("Enter %s\n", __func__);
+
+	if (!intelhaddata)
+		return 0;
+
+	if (intelhaddata->drv_status != HAD_DRV_DISCONNECTED) {
+		caps = HDMI_AUDIO_UNDERRUN | HDMI_AUDIO_BUFFER_DONE;
+		had_set_caps(HAD_SET_DISABLE_AUDIO_INT, &caps);
+		had_set_caps(HAD_SET_DISABLE_AUDIO, NULL);
+	}
+	snd_card_free(intelhaddata->card);
+	had_debugfs_exit(intelhaddata);
+	kfree(intelhaddata->private_data);
+	kfree(intelhaddata);
+	return 0;
+}
+
+static int had_pm_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int had_pm_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int had_pm_runtime_idle(struct device *dev)
+{
+	return pm_schedule_suspend(dev, HAD_SUSPEND_DELAY);
+}
+
+const struct dev_pm_ops had_pm_ops = {
+	.runtime_idle = had_pm_runtime_idle,
+	.runtime_suspend = had_pm_runtime_suspend,
+	.runtime_resume = had_pm_runtime_resume,
+};
+
+static const struct acpi_device_id had_acpi_ids[] = {
+	{ "HAD0F28", 0 },
+	{ "HAD022A8", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, had_acpi_ids);
+
+static struct platform_driver had_driver = {
+	.probe =        hdmi_audio_probe,
+	.remove	=       hdmi_audio_remove,
+	.suspend =      NULL,
+	.resume =       NULL,
+	.driver		= {
+		.name	= HDMI_AUDIO_DRIVER,
+#ifdef CONFIG_PM
+		.pm	= &had_pm_ops,
+#endif
+		.acpi_match_table = ACPI_PTR(had_acpi_ids),
+	},
+};
+
+/*
+* alsa_card_intelhad_init- driver init function
+* This function is called when driver module is inserted
+*/
+static int __init alsa_card_intelhad_init(void)
+{
+	int retval;
+
+	pr_debug("Enter %s\n", __func__);
+
+	pr_info("******** HAD DRIVER loading.. Ver: %s\n",
+					HAD_DRIVER_VERSION);
+
+	retval = platform_driver_register(&had_driver);
+	if (retval < 0) {
+		pr_err("Platform driver register failed\n");
+		return retval;
+	}
+
+	pr_debug("init complete\n");
+	return retval;
+}
+
+/**
+* alsa_card_intelhad_exit- driver exit function
+* This function is called when driver module is removed
+*/
+static void __exit alsa_card_intelhad_exit(void)
+{
+	pr_debug("had_exit called\n");
+	platform_driver_unregister(&had_driver);
+}
+late_initcall(alsa_card_intelhad_init);
+module_exit(alsa_card_intelhad_exit);
diff --git a/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio.h b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio.h
new file mode 100644
index 0000000..ea37420
--- /dev/null
+++ b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio.h
@@ -0,0 +1,632 @@
+/*
+ *   intel_mid_hdmi_audio.h - Intel HDMI audio driver for MID
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Authors:	Sailaja Bandarupalli <sailaja.bandarupalli@intel.com>
+ *		Ramesh Babu K V	<ramesh.babu@intel.com>
+ *		Vaibhav Agarwal <vaibhav.agarwal@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * ALSA driver for Intel MID HDMI audio controller
+ */
+#ifndef __INTEL_MID_HDMI_AUDIO_H
+#define __INTEL_MID_HDMI_AUDIO_H
+
+#include <linux/types.h>
+#include <sound/initval.h>
+#include <linux/version.h>
+#include <linux/pm_runtime.h>
+#include <sound/asoundef.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <otm_hdmi_eld.h>
+#include <android_hdmi.h>
+#ifdef CONFIG_DRM_I915
+#include <hdmi_audio_if.h>
+#else
+#include <mdfld_hdmi_audio_if.h>
+#endif
+
+#define HAD_DRIVER_VERSION	"0.01.003"
+#define HAD_MAX_DEVICES		1
+#define HAD_MIN_CHANNEL		2
+#define HAD_MAX_CHANNEL		8
+#define HAD_NUM_OF_RING_BUFS	4
+
+/* Assume 192KHz, 8channel, 25msec period */
+#define HAD_MAX_BUFFER		(600*1024)
+#define HAD_MIN_BUFFER		(32*1024)
+#define HAD_MAX_PERIODS		4
+#define HAD_MIN_PERIODS		4
+#define HAD_MAX_PERIOD_BYTES	(HAD_MAX_BUFFER/HAD_MIN_PERIODS)
+#define HAD_MIN_PERIOD_BYTES	256
+#define HAD_FIFO_SIZE		0 /* fifo not being used */
+#define MAX_SPEAKERS		8
+/* TODO: Add own tlv when channel map is ported for user space */
+#define USE_ALSA_DEFAULT_TLV
+
+#define AUD_SAMPLE_RATE_32	32000
+#define AUD_SAMPLE_RATE_44_1	44100
+#define AUD_SAMPLE_RATE_48	48000
+#define AUD_SAMPLE_RATE_88_2	88200
+#define AUD_SAMPLE_RATE_96	96000
+#define AUD_SAMPLE_RATE_176_4	176400
+#define AUD_SAMPLE_RATE_192	192000
+
+#define HAD_MIN_RATE		AUD_SAMPLE_RATE_32
+#define HAD_MAX_RATE		AUD_SAMPLE_RATE_192
+
+#define DRIVER_NAME		"intelmid_hdmi_audio"
+#define DIS_SAMPLE_RATE_25_2	25200
+#define DIS_SAMPLE_RATE_27	27000
+#define DIS_SAMPLE_RATE_54	54000
+#define DIS_SAMPLE_RATE_74_25	74250
+#define DIS_SAMPLE_RATE_148_5	148500
+#define HAD_REG_WIDTH		0x08
+#define HAD_MAX_HW_BUFS		0x04
+#define HAD_MAX_DIP_WORDS		16
+#define INTEL_HAD		"IntelHDMI"
+
+/* _AUD_CONFIG register MASK */
+#define AUD_CONFIG_MASK_UNDERRUN	0xC0000000
+#define AUD_CONFIG_MASK_SRDBG		0x00000002
+#define AUD_CONFIG_MASK_FUNCRST		0x00000001
+
+#define MAX_CNT			0xFF
+#define HAD_SUSPEND_DELAY	1000
+
+/**
+ * enum had_status - Audio stream states
+ *
+ * @STREAM_INIT: Stream initialized
+ * @STREAM_RUNNING: Stream running
+ * @STREAM_PAUSED: Stream paused
+ * @STREAM_DROPPED: Stream dropped
+ * */
+enum had_stream_status {
+	STREAM_INIT = 0,
+	STREAM_RUNNING = 1,
+	STREAM_PAUSED = 2,
+	STREAM_DROPPED = 3
+};
+
+/**
+ * enum had_status_stream - HAD stream states
+ * */
+enum had_status_stream {
+	HAD_INIT = 0,
+	HAD_RUNNING_STREAM,
+};
+
+enum had_drv_status {
+	HAD_DRV_CONNECTED,
+	HAD_DRV_RUNNING,
+	HAD_DRV_DISCONNECTED,
+	HAD_DRV_SUSPENDED,
+	HAD_DRV_ERR,
+};
+
+/* enum intel_had_aud_buf_type - HDMI controller ring buffer types */
+enum intel_had_aud_buf_type {
+	HAD_BUF_TYPE_A = 0,
+	HAD_BUF_TYPE_B = 1,
+	HAD_BUF_TYPE_C = 2,
+	HAD_BUF_TYPE_D = 3,
+};
+
+enum num_aud_ch {
+	CH_STEREO = 0,
+	CH_THREE_FOUR = 1,
+	CH_FIVE_SIX = 2,
+	CH_SEVEN_EIGHT = 3
+};
+
+
+/* HDMI controller register offsets */
+enum hdmi_ctrl_reg_offset_v1 {
+	AUD_CONFIG		= 0x0,
+	AUD_CH_STATUS_0		= 0x08,
+	AUD_CH_STATUS_1		= 0x0C,
+	AUD_HDMI_CTS		= 0x10,
+	AUD_N_ENABLE		= 0x14,
+	AUD_SAMPLE_RATE		= 0x18,
+	AUD_BUF_CONFIG		= 0x20,
+	AUD_BUF_CH_SWAP		= 0x24,
+	AUD_BUF_A_ADDR		= 0x40,
+	AUD_BUF_A_LENGTH	= 0x44,
+	AUD_BUF_B_ADDR		= 0x48,
+	AUD_BUF_B_LENGTH	= 0x4c,
+	AUD_BUF_C_ADDR		= 0x50,
+	AUD_BUF_C_LENGTH	= 0x54,
+	AUD_BUF_D_ADDR		= 0x58,
+	AUD_BUF_D_LENGTH	= 0x5c,
+	AUD_CNTL_ST		= 0x60,
+	AUD_HDMI_STATUS		= 0x68,
+	AUD_HDMIW_INFOFR	= 0x114,
+};
+
+/* Delta changes in HDMI controller register offsets
+ * compare to v1 version */
+
+enum hdmi_ctrl_reg_offset_v2 {
+	AUD_HDMI_STATUS_v2	= 0x64,
+	AUD_HDMIW_INFOFR_v2	= 0x68,
+};
+
+/*
+ * CEA speaker placement:
+ *
+ *  FL  FLC   FC   FRC   FR
+ *
+ *                         LFE
+ *
+ *  RL  RLC   RC   RRC   RR
+ *
+ * The Left/Right Surround channel _notions_ LS/RS in SMPTE 320M corresponds to
+ * CEA RL/RR; The SMPTE channel _assignment_ C/LFE is swapped to CEA LFE/FC.
+ */
+enum cea_speaker_placement {
+	FL  = (1 <<  0),        /* Front Left           */
+	FC  = (1 <<  1),        /* Front Center         */
+	FR  = (1 <<  2),        /* Front Right          */
+	FLC = (1 <<  3),        /* Front Left Center    */
+	FRC = (1 <<  4),        /* Front Right Center   */
+	RL  = (1 <<  5),        /* Rear Left            */
+	RC  = (1 <<  6),        /* Rear Center          */
+	RR  = (1 <<  7),        /* Rear Right           */
+	RLC = (1 <<  8),        /* Rear Left Center     */
+	RRC = (1 <<  9),        /* Rear Right Center    */
+	LFE = (1 << 10),        /* Low Frequency Effect */
+};
+
+struct cea_channel_speaker_allocation {
+	int ca_index;
+	int speakers[8];
+
+	/* derived values, just for convenience */
+	int channels;
+	int spk_mask;
+};
+
+struct channel_map_table {
+	unsigned char map;              /* ALSA API channel map position */
+	unsigned char cea_slot;         /* CEA slot value */
+	int spk_mask;                   /* speaker position bit mask */
+};
+
+/**
+ * union aud_cfg - Audio configuration
+ *
+ * @cfg_regx: individual register bits
+ * @cfg_regval: full register value
+ *
+ * */
+union aud_cfg {
+	struct {
+		u32 aud_en:1;
+		u32 layout:1;
+		u32 fmt:2;
+		u32 num_ch:2;
+		u32 rsvd0:1;
+		u32 set:1;
+		u32 flat:1;
+		u32 val_bit:1;
+		u32 user_bit:1;
+		u32 underrun:1;
+		u32 rsvd1:20;
+	} cfg_regx;
+	struct {
+		u32 aud_en:1;
+		u32 layout:1;
+		u32 fmt:2;
+		u32 num_ch:3;
+		u32 set:1;
+		u32 flat:1;
+		u32 val_bit:1;
+		u32 user_bit:1;
+		u32 underrun:1;
+		u32 packet_mode:1;
+		u32 left_align:1;
+		u32 bogus_sample:1;
+		u32 dp_modei:1;
+		u32 rsvd:16;
+	} cfg_regx_v2;
+	u32 cfg_regval;
+};
+
+/**
+ * union aud_ch_status_0 - Audio Channel Status 0 Attributes
+ *
+ * @status_0_regx:individual register bits
+ * @status_0_regval:full register value
+ *
+ * */
+union aud_ch_status_0 {
+	struct {
+		u32 ch_status:1;
+		u32 lpcm_id:1;
+		u32 cp_info:1;
+		u32 format:3;
+		u32 mode:2;
+		u32 ctg_code:8;
+		u32 src_num:4;
+		u32 ch_num:4;
+		u32 samp_freq:4;
+		u32 clk_acc:2;
+		u32 rsvd:2;
+	} status_0_regx;
+	u32 status_0_regval;
+};
+
+/**
+ * union aud_ch_status_1 - Audio Channel Status 1 Attributes
+ *
+ * @status_1_regx: individual register bits
+ * @status_1_regval: full register value
+ *
+ **/
+union aud_ch_status_1 {
+	struct {
+		u32 max_wrd_len:1;
+		u32 wrd_len:3;
+		u32 rsvd:28;
+		} status_1_regx;
+	u32 status_1_regval;
+};
+
+/**
+ * union aud_hdmi_cts - CTS register
+ *
+ * @cts_regx: individual register bits
+ * @cts_regval: full register value
+ *
+ * */
+union aud_hdmi_cts {
+	struct {
+		u32 cts_val:20;
+		u32 en_cts_prog:1;
+		u32 rsvd:11;
+	} cts_regx;
+	struct {
+		u32 cts_val:24;
+		u32 en_cts_prog:1;
+		u32 rsvd:7;
+	} cts_regx_v2;
+	u32 cts_regval;
+};
+
+/**
+ * union aud_hdmi_n_enable - N register
+ *
+ * @n_regx: individual register bits
+ * @n_regval: full register value
+ *
+*/
+union aud_hdmi_n_enable {
+	struct {
+		u32 n_val:20;
+		u32 en_n_prog:1;
+		u32 rsvd:11;
+	} n_regx;
+	struct {
+		u32 n_val:24;
+		u32 en_n_prog:1;
+		u32 rsvd:7;
+	} n_regx_v2;
+	u32 n_regval;
+};
+
+/**
+ * union aud_buf_config -  Audio Buffer configurations
+ *
+ * @buf_cfg_regx: individual register bits
+ * @buf_cfgval: full register value
+ *
+*/
+union aud_buf_config {
+	struct {
+		u32 fifo_width:8;
+		u32 rsvd0:8;
+		u32 aud_delay:8;
+		u32 rsvd1:8;
+	} buf_cfg_regx;
+	struct {
+		u32 audio_fifo_watermark:8;
+		u32 dma_fifo_watermark:3;
+		u32 rsvd0:5;
+		u32 aud_delay:8;
+		u32 rsvd1:8;
+	} buf_cfg_regx_v2;
+	u32 buf_cfgval;
+};
+
+/**
+ * union aud_buf_ch_swap - Audio Sample Swapping offset
+ *
+ * @buf_ch_swap_regx: individual register bits
+ * @buf_ch_swap_val: full register value
+ *
+ * */
+union aud_buf_ch_swap {
+	struct {
+		u32 first_0:3;
+		u32 second_0:3;
+		u32 first_1:3;
+		u32 second_1:3;
+		u32 first_2:3;
+		u32 second_2:3;
+		u32 first_3:3;
+		u32 second_3:3;
+		u32 rsvd:8;
+	} buf_ch_swap_regx;
+	u32 buf_ch_swap_val;
+};
+
+/**
+ * union aud_buf_addr - Address for Audio Buffer
+ *
+ * @buf_addr_regx: individual register bits
+ * @buf_addr_val: full register value
+ *
+ * */
+union aud_buf_addr {
+	struct {
+		u32 valid:1;
+		u32 intr_en:1;
+		u32 rsvd:4;
+		u32 addr:26;
+	} buf_addr_regx;
+	u32 buf_addr_val;
+};
+
+/**
+ * union aud_buf_len - Length of Audio Buffer
+ *
+ * @buf_len_regx: individual register bits
+ * @buf_len_val: full register value
+ *
+ * */
+union aud_buf_len {
+	struct {
+		u32 buf_len:20;
+		u32 rsvd:12;
+	} buf_len_regx;
+	u32 buf_len_val;
+};
+
+/**
+ * union aud_ctrl_st - Audio Control State Register offset
+ *
+ * @ctrl_regx: individual register bits
+ * @ctrl_val: full register value
+ *
+ * */
+union aud_ctrl_st {
+	struct {
+		u32 ram_addr:4;
+		u32 eld_ack:1;
+		u32 eld_addr:4;
+		u32 eld_buf_size:5;
+		u32 eld_valid:1;
+		u32 cp_ready:1;
+		u32 dip_freq:2;
+		u32 dip_idx:3;
+		u32 dip_en_sta:4;
+		u32 rsvd:7;
+	} ctrl_regx;
+	u32 ctrl_val;
+};
+
+/**
+ * union aud_info_frame1 - Audio HDMI Widget Data Island Packet offset
+ *
+ * @fr1_regx: individual register bits
+ * @fr1_val: full register value
+ *
+ * */
+union aud_info_frame1 {
+	struct {
+		u32 pkt_type:8;
+		u32 ver_num:8;
+		u32 len:5;
+		u32 rsvd:11;
+	} fr1_regx;
+	u32 fr1_val;
+};
+
+/**
+ * union aud_info_frame2 - DIP frame 2
+ *
+ * @fr2_regx: individual register bits
+ * @fr2_val: full register value
+ *
+ */
+union aud_info_frame2 {
+	struct {
+		u32 chksum:8;
+		u32 chnl_cnt:3;
+		u32 rsvd0:1;
+		u32 coding_type:4;
+		u32 smpl_size:2;
+		u32 smpl_freq:3;
+		u32 rsvd1:3;
+		u32 format:8;
+	} fr2_regx;
+	u32 fr2_val;
+};
+
+/**
+ * union aud_info_frame3 - DIP frame 3
+ *
+ * @fr3_regx: individual register bits
+ * @fr3_val: full register value
+ *
+ */
+union aud_info_frame3 {
+	struct {
+		u32 chnl_alloc:8;
+		u32 rsvd0:3;
+		u32 lsv:4;
+		u32 dm_inh:1;
+		u32 rsvd1:16;
+	} fr3_regx;
+	u32 fr3_val;
+};
+
+
+struct pcm_stream_info {
+	int		str_id;
+	void		*had_substream;
+	void		(*period_elapsed) (void *had_substream);
+	u32		buffer_ptr;
+	u64		buffer_rendered;
+	u32		ring_buf_size;
+	int		sfreq;
+};
+
+struct ring_buf_info {
+	uint32_t	buf_addr;
+	uint32_t	buf_size;
+	uint8_t		is_valid;
+};
+
+struct had_stream_pvt {
+	enum had_stream_status		stream_status;
+	int				stream_ops;
+	ssize_t				dbg_cum_bytes;
+};
+
+struct had_pvt_data {
+	enum had_status_stream		stream_type;
+};
+
+struct had_callback_ops {
+	had_event_call_back intel_had_event_call_back;
+};
+
+struct had_debugfs {
+	struct dentry     *root;
+	u32               reg_offset;
+	void              *reg_offset_table;
+	u32               reg_offset_table_size;
+};
+
+/**
+ * struct snd_intelhad - intelhad driver structure
+ *
+ * @card: ptr to hold card details
+ * @card_index: sound card index
+ * @card_id: detected sound card id
+ * @reg_ops: register operations to program registers
+ * @query_ops: caps call backs for get/set operations
+ * @drv_status: driver status
+ * @buf_info: ring buffer info
+ * @stream_info: stream information
+ * @eeld: holds EELD info
+ * @curr_buf: pointer to hold current active ring buf
+ * @valid_buf_cnt: ring buffer count for stream
+ * @had_spinlock: driver lock
+ * @aes_bits: IEC958 status bits
+ * @buff_done: id of current buffer done intr
+ * @dev: platoform device handle
+ * @kctl: holds kctl ptrs used for channel map
+ * @chmap: holds channel map info
+ * @audio_reg_base: hdmi audio register base offset
+ * @hw_silence: flag indicates SoC support for HW silence/Keep alive
+ * @ops: holds ops functions based on platform
+ */
+struct snd_intelhad {
+	struct snd_card	*card;
+	int		card_index;
+	char		*card_id;
+	struct hdmi_audio_registers_ops	reg_ops;
+	struct hdmi_audio_query_set_ops	query_ops;
+	enum had_drv_status	drv_status;
+	struct		ring_buf_info buf_info[HAD_NUM_OF_RING_BUFS];
+	struct		pcm_stream_info stream_info;
+	otm_hdmi_eld_t	eeld;
+	enum		intel_had_aud_buf_type curr_buf;
+	int		valid_buf_cnt;
+	unsigned int	aes_bits;
+	int flag_underrun;
+	struct had_pvt_data *private_data;
+	spinlock_t had_spinlock;
+	enum		intel_had_aud_buf_type buff_done;
+	struct device *dev;
+	struct snd_kcontrol *kctl;
+	struct snd_pcm_chmap *chmap;
+	unsigned int	audio_reg_base;
+	bool		hw_silence;
+	struct had_ops	*ops;
+	struct had_debugfs debugfs;
+	int		audio_mode_to_query;
+};
+
+struct had_ops {
+	void (*enable_audio) (struct snd_pcm_substream *substream,
+			u8 enable);
+	void (*reset_audio) (u8 reset);
+	int (*prog_n) (u32 aud_samp_freq, u32 *n_param,
+			struct snd_intelhad *intelhaddata);
+	void (*prog_cts) (u32 aud_samp_freq, u32 tmds, u32 n_param,
+			struct snd_intelhad *intelhaddata);
+	int (*audio_ctrl) (struct snd_pcm_substream *substream,
+				struct snd_intelhad *intelhaddata);
+	void (*prog_dip) (struct snd_pcm_substream *substream,
+				struct snd_intelhad *intelhaddata);
+	void (*handle_underrun) (struct snd_intelhad *intelhaddata);
+};
+
+
+int had_event_handler(enum had_event_type event_type, void *data);
+
+int hdmi_audio_query(void *drv_data, hdmi_audio_event_t event);
+int hdmi_audio_suspend(void *drv_data, hdmi_audio_event_t event);
+int hdmi_audio_resume(void *drv_data);
+int hdmi_audio_mode_change(struct snd_pcm_substream *substream);
+extern struct snd_pcm_ops snd_intelhad_playback_ops;
+
+int snd_intelhad_init_audio_ctrl(struct snd_pcm_substream *substream,
+					struct snd_intelhad *intelhaddata,
+					int flag_silence);
+int snd_intelhad_prog_buffer(struct snd_intelhad *intelhaddata,
+					int start, int end);
+int snd_intelhad_invd_buffer(int start, int end);
+inline int snd_intelhad_read_len(struct snd_intelhad *intelhaddata);
+void had_build_channel_allocation_map(struct snd_intelhad *intelhaddata);
+
+/* Register access functions */
+inline int had_get_hwstate(struct snd_intelhad *intelhaddata);
+inline int had_get_caps(enum had_caps_list query_element , void *capabilties);
+inline int had_set_caps(enum had_caps_list set_element , void *capabilties);
+inline int had_read_register(uint32_t reg_addr, uint32_t *data);
+inline int had_write_register(uint32_t reg_addr, uint32_t data);
+inline int had_read_modify(uint32_t reg_addr, uint32_t data, uint32_t mask);
+
+#ifdef CONFIG_DEBUG_FS
+void had_debugfs_init(struct snd_intelhad *intelhaddata, char *version);
+void had_debugfs_exit(struct snd_intelhad *intelhaddata);
+#else
+inline void had_debugfs_init(struct snd_intelhad *intelhaddata, char *version)
+{
+}
+inline void had_debugfs_exit(struct snd_intelhad *intelhaddata)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio_debug.c b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio_debug.c
new file mode 100644
index 0000000..7c0207d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio_debug.c
@@ -0,0 +1,403 @@
+/*
+ *   intel_mid_hdmi_audio_debug.c - Debug interface for Intel HDMI audio
+ *                                  driver for MID
+ *
+ *  Copyright (C) 2014 Intel Corp
+ *  Authors:	Jayachandran.B  <jayachandran.b@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Debug interface code for Intel MID HDMI audio Driver
+ */
+
+#define pr_fmt(fmt)	"had_debug: " fmt
+#include <linux/debugfs.h>
+#include "intel_mid_hdmi_audio.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+extern struct snd_intelhad *had_data;
+
+struct had_debug {
+	const char *name;
+	const struct file_operations *fops;
+	umode_t mode;
+};
+
+struct had_reg_offset_info {
+	const char *name;
+	u32 offset;
+};
+/* HDMI controller register offsets */
+static struct had_reg_offset_info had_debug_reg_offset_table_v1[] = {
+	{ "AUD_CONFIG",		AUD_CONFIG },
+	{ "AUD_CH_STATUS_0",	AUD_CH_STATUS_0 },
+	{ "AUD_CH_STATUS_1",	AUD_CH_STATUS_1 },
+	{ "AUD_HDMI_CTS",	AUD_HDMI_CTS },
+	{ "AUD_N_ENABLE",	AUD_N_ENABLE },
+	{ "AUD_SAMPLE_RATE",	AUD_SAMPLE_RATE },
+	{ "AUD_BUF_CONFIG",	AUD_BUF_CONFIG },
+	{ "AUD_BUF_CH_SWAP",	AUD_BUF_CH_SWAP },
+	{ "AUD_BUF_A_ADDR",	AUD_BUF_A_ADDR },
+	{ "AUD_BUF_A_LENGTH",	AUD_BUF_A_LENGTH },
+	{ "AUD_BUF_B_ADDR",	AUD_BUF_B_ADDR },
+	{ "AUD_BUF_B_LENGTH",	AUD_BUF_B_LENGTH },
+	{ "AUD_BUF_C_ADDR",	AUD_BUF_C_ADDR },
+	{ "AUD_BUF_C_LENGTH",	AUD_BUF_C_LENGTH },
+	{ "AUD_BUF_D_ADDR",	AUD_BUF_D_ADDR },
+	{ "AUD_BUF_D_LENGTH",	AUD_BUF_D_LENGTH },
+	{ "AUD_CNTL_ST",	AUD_CNTL_ST },
+	{ "AUD_HDMI_STATUS",	AUD_HDMI_STATUS },
+	{ "AUD_HDMIW_INFOFR",	AUD_HDMIW_INFOFR },
+};
+
+static struct had_reg_offset_info had_debug_reg_offset_table_v2[] = {
+	{ "AUD_CONFIG",		AUD_CONFIG },
+	{ "AUD_CH_STATUS_0",	AUD_CH_STATUS_0 },
+	{ "AUD_CH_STATUS_1",	AUD_CH_STATUS_1 },
+	{ "AUD_HDMI_CTS",	AUD_HDMI_CTS },
+	{ "AUD_N_ENABLE",	AUD_N_ENABLE },
+	{ "AUD_SAMPLE_RATE",	AUD_SAMPLE_RATE },
+	{ "AUD_BUF_CONFIG",	AUD_BUF_CONFIG },
+	{ "AUD_BUF_CH_SWAP",	AUD_BUF_CH_SWAP },
+	{ "AUD_BUF_A_ADDR",	AUD_BUF_A_ADDR },
+	{ "AUD_BUF_A_LENGTH",	AUD_BUF_A_LENGTH },
+	{ "AUD_BUF_B_ADDR",	AUD_BUF_B_ADDR },
+	{ "AUD_BUF_B_LENGTH",	AUD_BUF_B_LENGTH },
+	{ "AUD_BUF_C_ADDR",	AUD_BUF_C_ADDR },
+	{ "AUD_BUF_C_LENGTH",	AUD_BUF_C_LENGTH },
+	{ "AUD_BUF_D_ADDR",	AUD_BUF_D_ADDR },
+	{ "AUD_BUF_D_LENGTH",	AUD_BUF_D_LENGTH },
+	{ "AUD_CNTL_ST",	AUD_CNTL_ST },
+	{ "AUD_HDMI_STATUS",	AUD_HDMI_STATUS_v2 },
+	{ "AUD_HDMIW_INFOFR",	AUD_HDMIW_INFOFR_v2 },
+};
+
+static int had_debugfs_read_register(uint32_t *data)
+{
+	struct snd_intelhad *intelhaddata = had_data;
+	u32 offset = intelhaddata->debugfs.reg_offset;
+	u32 base = intelhaddata->audio_reg_base;
+	int retval;
+
+	retval = had_read_register(offset, data);
+	if (retval)
+		pr_err("Failed reading hdmi reg %x\n", base + offset);
+	else
+		pr_debug("Reading reg %x : value = %x\n", base + offset, *data);
+
+	return retval;
+}
+
+static int had_debugfs_write_register(uint32_t data)
+{
+	struct snd_intelhad *intelhaddata = had_data;
+	u32 offset = intelhaddata->debugfs.reg_offset;
+	u32 base = intelhaddata->audio_reg_base;
+	int retval;
+
+	retval = had_write_register(offset, data);
+	if (retval)
+		pr_err("Failed writing hdmi reg %x\n", base + offset);
+	else
+		pr_debug("Writing reg %x : value = %x\n", base + offset, data);
+
+	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+	return retval;
+}
+
+static ssize_t had_debug_reg_base_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	u32 reg_base;
+	struct snd_intelhad *intelhaddata = had_data;
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+	if (kstrtouint(buf, 0, &reg_base))
+		return -EINVAL;
+
+	intelhaddata->audio_reg_base = reg_base;
+
+	pr_debug("wrote hdmi_audio reg_base = %x\n", (int) intelhaddata->audio_reg_base);
+	return buf_size;
+}
+
+static ssize_t had_debug_reg_base_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct snd_intelhad *intelhaddata = had_data;
+	char buf[32];
+	u32 reg_base = intelhaddata->audio_reg_base;
+
+	snprintf(buf, 32, "0x%08x\n", reg_base);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+
+}
+
+static const struct file_operations had_debug_reg_base_ops = {
+	.open = simple_open,
+	.write = had_debug_reg_base_write,
+	.read = had_debug_reg_base_read,
+};
+
+static ssize_t had_debug_reg_offset_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	u32 reg_offset;
+	struct snd_intelhad *intelhaddata = had_data;
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+	if (kstrtouint(buf, 0, &reg_offset))
+		return -EINVAL;
+
+	intelhaddata->debugfs.reg_offset = reg_offset;
+
+	pr_info("wrote hdmi_audio reg_offset = %x\n", (int) intelhaddata->debugfs.reg_offset);
+	return buf_size;
+}
+
+static ssize_t had_debug_reg_offset_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct snd_intelhad *intelhaddata = had_data;
+	char buf[32];
+	u32 reg_offset = intelhaddata->debugfs.reg_offset;
+
+	snprintf(buf, 32, "0x%08x\n", reg_offset);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static const struct file_operations had_debug_reg_offset_ops = {
+	.open = simple_open,
+	.write = had_debug_reg_offset_write,
+	.read = had_debug_reg_offset_read,
+};
+
+static ssize_t had_debug_reg_val_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	u32 data;
+	int retval;
+	char buf[32];
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+	if (kstrtouint(buf, 0, &data))
+		return -EINVAL;
+
+	retval = had_debugfs_write_register(data);
+
+	if (retval)
+		return retval;
+
+	return buf_size;
+}
+
+static ssize_t had_debug_reg_val_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	u32 data;
+	int retval;
+	char buf[32];
+
+	retval = had_debugfs_read_register(&data);
+	if (retval)
+		return retval;
+
+	snprintf(buf, 32, "0x%08x\n", data);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static const struct file_operations had_debug_reg_val_ops = {
+	.open = simple_open,
+	.write = had_debug_reg_val_write,
+	.read = had_debug_reg_val_read,
+};
+
+/* max number of chars in the register details printed for a given register
+ * in the format : name = 0x<u32 value>
+ * assuming name is 16 chars long, total = 29 chars; With additional 11
+ * reserved chars incluing newline; total = 40 chars per register
+ */
+#define HAD_DEBUG_REG_DETAILS_SIZE      40
+static ssize_t had_debug_reg_all_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	u32 data;
+	int retval;
+	char *buf;
+	int i;
+	u32 offset;
+	const char *name;
+	int pos = 0;
+	struct snd_intelhad *intelhaddata = had_data;
+	struct had_reg_offset_info *info;
+	int buf_size = intelhaddata->debugfs.reg_offset_table_size
+		* HAD_DEBUG_REG_DETAILS_SIZE;
+
+	info = (struct had_reg_offset_info *)
+		intelhaddata->debugfs.reg_offset_table;
+	if (!info) {
+		pr_err("%s: offset table null\n", __func__);
+		return -EINVAL;
+	}
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[0] = 0;
+
+	for (i = 0; i < intelhaddata->debugfs.reg_offset_table_size; i++) {
+		offset = info[i].offset;
+		name   = info[i].name;
+		intelhaddata->debugfs.reg_offset = offset;
+		data = 0xDEADBEAD;
+		had_debugfs_read_register(&data);
+
+		pos += snprintf(buf+pos, HAD_DEBUG_REG_DETAILS_SIZE,
+				"%s = 0x%08x\n", name, data);
+	}
+
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return retval;
+
+}
+
+static const struct file_operations had_debug_reg_all_ops = {
+	.open = simple_open,
+	.read = had_debug_reg_all_read,
+};
+
+
+static ssize_t had_debug_readme_read(struct file *file, char __user *user_buf,
+		size_t count, loff_t *ppos)
+{
+	const char *buf =
+		"\nAll files can be read using 'cat'\n"
+		"\n1.reg_base : HDMI audio register base depending on the\n"
+		"pipe id used for HDMI\n"
+		"To set : echo 0x..... > reg_base\n"
+		"\n2.reg_offset : Offset of a particular register from the\n"
+		"register base\n"
+		"To set : echo 0x..... > reg_offset\n"
+		"\n3.reg_val : value of the register at offset 'reg_offset'\n"
+		"from 'reg_base' or default register base\n"
+		"To set : echo 0x..... > reg_val\n"
+		"\nTo Read/write a particular register :\n"
+		"i) Set reg_base if you want to change the reg_base.\n"
+		"Otherwise the default for the platform will be used\n"
+		"ii) Set reg_offset to the offset of the particular register\n"
+		"iii) To read, use 'cat reg_val'\n"
+		"     To write a value, use echo 0xvalue > reg_val\n"
+		" Eg. to read register at offset 0x10\n"
+		"echo 0x10 > reg_offset\n"
+		"cat reg_val\n"
+		" Eg. to write value 0xabcd to register at offset 0x10\n"
+		"echo 0x10 > reg_offset\n"
+		"echo 0xabcd > reg_val\n"
+		"\n4.reg_all : This is to read in one shot all the registers\n"
+		"at respective offsets from 'reg_base' or default reg base\n"
+		"Only reading of all registers (cat reg_all) is supported.\n"
+		"Writing to all registers in one shot is not supported\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static const struct file_operations had_debug_readme_ops = {
+	.open = simple_open,
+	.read = had_debug_readme_read,
+};
+
+static const struct had_debug dbg_entries[] = {
+	{"reg_base", &had_debug_reg_base_ops, 0600},
+	{"reg_offset", &had_debug_reg_offset_ops, 0600},
+	{"reg_val", &had_debug_reg_val_ops, 0600},
+	{"reg_all", &had_debug_reg_all_ops, 0400},
+	{"README", &had_debug_readme_ops, 0400},
+};
+
+static void had_debugfs_create_files(struct snd_intelhad *intelhaddata,
+		const struct had_debug *entries, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		struct dentry *dentry;
+		const struct had_debug *entry = &entries[i];
+
+		dentry = debugfs_create_file(entry->name, entry->mode,
+				intelhaddata->debugfs.root, NULL, entry->fops);
+		if (dentry == NULL) {
+			pr_err("Failed to create %s file\n", entry->name);
+			return;
+		}
+	}
+}
+
+void had_debugfs_init(struct snd_intelhad *intelhaddata, char *version)
+{
+
+	intelhaddata->debugfs.root = debugfs_create_dir("hdmi-audio", NULL);
+	if (IS_ERR(intelhaddata->debugfs.root) || !intelhaddata->debugfs.root) {
+		pr_err("Failed to create hdmi audio debugfs directory\n");
+		return;
+	}
+
+	had_debugfs_create_files(intelhaddata, dbg_entries,
+			ARRAY_SIZE(dbg_entries));
+
+	intelhaddata->debugfs.reg_offset_table = NULL;
+
+	if (!version) {
+		pr_err("%s:hdmi ops/Register set version null\n", __func__);
+		return;
+	}
+
+	if (!strncmp(version, "v1", 2)) {
+		intelhaddata->debugfs.reg_offset_table =
+			(void *)had_debug_reg_offset_table_v1;
+		intelhaddata->debugfs.reg_offset_table_size =
+			ARRAY_SIZE(had_debug_reg_offset_table_v1);
+	} else if (!strncmp(version, "v2", 2)) {
+		intelhaddata->debugfs.reg_offset_table =
+			(void *)had_debug_reg_offset_table_v2;
+		intelhaddata->debugfs.reg_offset_table_size =
+			ARRAY_SIZE(had_debug_reg_offset_table_v2);
+	} else
+		pr_err("%s:invalid hdmi ops/Register set version\n", __func__);
+}
+
+void had_debugfs_exit(struct snd_intelhad *intelhaddata)
+{
+	debugfs_remove_recursive(intelhaddata->debugfs.root);
+}
+#endif
diff --git a/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio_if.c b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio_if.c
new file mode 100644
index 0000000..1750b27
--- /dev/null
+++ b/drivers/external_drivers/intel_media/hdmi_audio/intel_mid_hdmi_audio_if.c
@@ -0,0 +1,515 @@
+/*
+ *   intel_mid_hdmi_audio_if.c - Intel HDMI audio driver for MID
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Authors:	Sailaja Bandarupalli <sailaja.bandarupalli@intel.com>
+ *		Ramesh Babu K V <ramesh.babu@intel.com>
+ *		Vaibhav Agarwal <vaibhav.agarwal@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * ALSA driver for Intel MID HDMI audio controller.  This file contains
+ * interface functions exposed to HDMI Display driver and code to register
+ * with ALSA framework..
+ */
+
+#define pr_fmt(fmt)		"had: " fmt
+
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <sound/pcm.h>
+#include <sound/core.h>
+#include "intel_mid_hdmi_audio.h"
+
+
+/**
+ * hdmi_audio_query - hdmi audio query function
+ *
+ *@haddata: pointer to HAD private data
+ *@event: audio event for which this method is invoked
+ *
+ * This function is called by client driver to query the
+ * hdmi audio.
+ */
+int hdmi_audio_query(void *haddata, hdmi_audio_event_t event)
+{
+	struct snd_pcm_substream *substream = NULL;
+	struct had_pvt_data *had_stream;
+	unsigned long flag_irqs;
+	struct snd_intelhad *intelhaddata = (struct snd_intelhad *)haddata;
+
+	if (intelhaddata->stream_info.had_substream)
+		substream = intelhaddata->stream_info.had_substream;
+	had_stream = intelhaddata->private_data;
+	switch (event.type) {
+	case HAD_EVENT_QUERY_IS_AUDIO_BUSY:
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+
+		if ((had_stream->stream_type == HAD_RUNNING_STREAM) ||
+			substream) {
+			spin_unlock_irqrestore(&intelhaddata->had_spinlock,
+						flag_irqs);
+			pr_debug("Audio stream active\n");
+			return -EBUSY;
+		}
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+	break;
+
+	case HAD_EVENT_QUERY_IS_AUDIO_SUSPENDED:
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+		if (intelhaddata->drv_status == HAD_DRV_SUSPENDED) {
+			spin_unlock_irqrestore(&intelhaddata->had_spinlock,
+						flag_irqs);
+			pr_debug("Audio is suspended\n");
+			return 1;
+		}
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+	break;
+
+	default:
+		pr_debug("error un-handled event !!\n");
+		return -EINVAL;
+	break;
+
+	}
+
+	return 0;
+}
+
+/**
+ * hdmi_audio_suspend - power management suspend function
+ *
+ *@haddata: pointer to HAD private data
+ *@event: pm event for which this method is invoked
+ *
+ * This function is called by client driver to suspend the
+ * hdmi audio.
+ */
+int hdmi_audio_suspend(void *haddata, hdmi_audio_event_t event)
+{
+	int caps, retval = 0;
+	struct had_pvt_data *had_stream;
+	unsigned long flag_irqs;
+	struct snd_pcm_substream *substream;
+	struct snd_intelhad *intelhaddata = (struct snd_intelhad *)haddata;
+
+	pr_debug("Enter:%s", __func__);
+
+	had_stream = intelhaddata->private_data;
+	substream = intelhaddata->stream_info.had_substream;
+
+	if (intelhaddata->dev->power.runtime_status != RPM_SUSPENDED) {
+		pr_err("audio stream is active\n");
+		return -EAGAIN;
+	}
+
+
+	spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_debug("had not connected\n");
+		return retval;
+	}
+
+	if (intelhaddata->drv_status == HAD_DRV_SUSPENDED) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_debug("had already suspended\n");
+		return retval;
+	}
+
+	intelhaddata->drv_status = HAD_DRV_SUSPENDED;
+	spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+	/* ToDo: Need to disable UNDERRUN interrupts as well
+	   caps = HDMI_AUDIO_UNDERRUN | HDMI_AUDIO_BUFFER_DONE;
+	   */
+	caps = HDMI_AUDIO_BUFFER_DONE;
+	had_set_caps(HAD_SET_DISABLE_AUDIO_INT, &caps);
+	had_set_caps(HAD_SET_DISABLE_AUDIO, NULL);
+	pr_debug("Exit:%s", __func__);
+	return retval;
+}
+
+/**
+ * hdmi_audio_resume - power management resume function
+ *
+ *@haddata: pointer to HAD private data
+ *
+ * This function is called by client driver to resume the
+ * hdmi audio.
+ */
+int hdmi_audio_resume(void *haddata)
+{
+	int caps, retval = 0;
+	struct snd_intelhad *intelhaddata = (struct snd_intelhad *)haddata;
+	unsigned long flag_irqs;
+
+	pr_debug("Enter:%s", __func__);
+
+	spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_debug("had not connected\n");
+		return 0;
+	}
+
+	if (HAD_DRV_SUSPENDED != intelhaddata->drv_status) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_err("had is not in suspended state\n");
+		return 0;
+	}
+
+	if (had_get_hwstate(intelhaddata)) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_err("Failed to resume. Device not accessible\n");
+		return -ENODEV;
+	}
+
+	intelhaddata->drv_status = HAD_DRV_CONNECTED;
+	spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+	/* ToDo: Need to enable UNDERRUN interrupts as well
+	   caps = HDMI_AUDIO_UNDERRUN | HDMI_AUDIO_BUFFER_DONE;
+	   */
+	caps = HDMI_AUDIO_BUFFER_DONE;
+	retval = had_set_caps(HAD_SET_ENABLE_AUDIO_INT, &caps);
+	retval = had_set_caps(HAD_SET_ENABLE_AUDIO, NULL);
+	pr_debug("Exit:%s", __func__);
+	return retval;
+}
+
+static inline int had_chk_intrmiss(struct snd_intelhad *intelhaddata,
+		enum intel_had_aud_buf_type buf_id)
+{
+	int i, intr_count = 0;
+	enum intel_had_aud_buf_type buff_done;
+	u32 buf_size, buf_addr;
+	struct had_pvt_data *had_stream;
+	unsigned long flag_irqs;
+
+	had_stream = intelhaddata->private_data;
+
+	buff_done = buf_id;
+
+	intr_count = snd_intelhad_read_len(intelhaddata);
+	if (intr_count > 1) {
+		/* In case of active playback */
+		pr_err("Driver detected %d missed buffer done interrupt(s)!!!!\n",
+				(intr_count - 1));
+		if (intr_count > 3)
+			return intr_count;
+
+		buf_id += (intr_count - 1);
+		/* Reprogram registers*/
+		for (i = buff_done; i < buf_id; i++) {
+			int j = i % 4;
+			buf_size = intelhaddata->buf_info[j].buf_size;
+			buf_addr = intelhaddata->buf_info[j].buf_addr;
+			had_write_register(AUD_BUF_A_LENGTH +
+					(j * HAD_REG_WIDTH), buf_size);
+			had_write_register(
+					AUD_BUF_A_ADDR+(j * HAD_REG_WIDTH),
+					(buf_addr | BIT(0) | BIT(1)));
+		}
+		buf_id = buf_id % 4;
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+		intelhaddata->buff_done = buf_id;
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+	}
+
+	return intr_count;
+}
+
+int had_process_buffer_done(struct snd_intelhad *intelhaddata)
+{
+	int retval = 0;
+	u32 len = 1;
+	enum intel_had_aud_buf_type buf_id;
+	enum intel_had_aud_buf_type buff_done;
+	struct pcm_stream_info *stream;
+	u32 buf_size;
+	struct had_pvt_data *had_stream;
+	int intr_count;
+	enum had_status_stream		stream_type;
+	unsigned long flag_irqs;
+
+	had_stream = intelhaddata->private_data;
+	stream = &intelhaddata->stream_info;
+	intr_count = 1;
+
+	spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_err("%s:Device already disconnected\n", __func__);
+		return retval;
+	}
+	buf_id = intelhaddata->curr_buf;
+	intelhaddata->buff_done = buf_id;
+	buff_done = intelhaddata->buff_done;
+	buf_size = intelhaddata->buf_info[buf_id].buf_size;
+	stream_type = had_stream->stream_type;
+
+	pr_debug("Enter:%s buf_id=%d", __func__, buf_id);
+
+	/* Every debug statement has an implication
+	 * of ~5msec. Thus, avoid having >3 debug statements
+	 * for each buffer_done handling.
+	 */
+
+	/* Check for any intr_miss in case of active playback */
+	if (had_stream->stream_type == HAD_RUNNING_STREAM) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		intr_count = had_chk_intrmiss(intelhaddata, buf_id);
+		if (!intr_count || (intr_count > 3)) {
+			pr_err("HAD SW state in non-recoverable!!! mode\n");
+			pr_err("Already played stale data\n");
+			return retval;
+		}
+		buf_id += (intr_count - 1);
+		buf_id = buf_id % 4;
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	}
+
+	intelhaddata->buf_info[buf_id].is_valid = true;
+	if (intelhaddata->valid_buf_cnt-1 == buf_id) {
+		if (had_stream->stream_type >= HAD_RUNNING_STREAM)
+			intelhaddata->curr_buf = HAD_BUF_TYPE_A;
+	} else
+		intelhaddata->curr_buf = buf_id + 1;
+
+	spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+
+	if (had_get_hwstate(intelhaddata)) {
+		pr_err("HDMI cable plugged-out\n");
+		return retval;
+	}
+
+	/*Reprogram the registers with addr and length*/
+	had_write_register(AUD_BUF_A_LENGTH +
+			(buf_id * HAD_REG_WIDTH), buf_size);
+	had_write_register(AUD_BUF_A_ADDR+(buf_id * HAD_REG_WIDTH),
+			intelhaddata->buf_info[buf_id].buf_addr|
+			BIT(0) | BIT(1));
+
+	had_read_register(AUD_BUF_A_LENGTH + (buf_id * HAD_REG_WIDTH),
+					&len);
+	pr_debug("%s:Enabled buf[%d]\n", __func__, buf_id);
+
+	/* In case of actual data,
+	 * report buffer_done to above ALSA layer
+	 */
+	buf_size =  intelhaddata->buf_info[buf_id].buf_size;
+	if (stream_type >= HAD_RUNNING_STREAM) {
+		intelhaddata->stream_info.buffer_rendered +=
+			(intr_count * buf_size);
+		stream->period_elapsed(stream->had_substream);
+	}
+
+	return retval;
+}
+
+int had_process_buffer_underrun(struct snd_intelhad *intelhaddata)
+{
+	int retval = 0;
+	enum intel_had_aud_buf_type buf_id;
+	struct pcm_stream_info *stream;
+	struct had_pvt_data *had_stream;
+	enum had_status_stream stream_type;
+	unsigned long flag_irqs;
+	int drv_status;
+
+	had_stream = intelhaddata->private_data;
+	stream = &intelhaddata->stream_info;
+
+	spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	buf_id = intelhaddata->curr_buf;
+	stream_type = had_stream->stream_type;
+	intelhaddata->buff_done = buf_id;
+	drv_status = intelhaddata->drv_status;
+	if (stream_type == HAD_RUNNING_STREAM)
+		intelhaddata->curr_buf = HAD_BUF_TYPE_A;
+
+	spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+
+	pr_debug("Enter:%s buf_id=%d, stream_type=%d\n",
+			__func__, buf_id, stream_type);
+
+	intelhaddata->ops->handle_underrun(intelhaddata);
+
+	if (drv_status == HAD_DRV_DISCONNECTED) {
+		pr_err("%s:Device already disconnected\n", __func__);
+		return retval;
+	}
+
+	if (stream_type == HAD_RUNNING_STREAM) {
+		/* Report UNDERRUN error to above layers */
+		intelhaddata->flag_underrun = 1;
+		stream->period_elapsed(stream->had_substream);
+	}
+
+	return retval;
+}
+
+int had_process_hot_plug(struct snd_intelhad *intelhaddata)
+{
+	int retval = 0;
+	enum intel_had_aud_buf_type buf_id;
+	struct snd_pcm_substream *substream;
+	struct had_pvt_data *had_stream;
+	unsigned long flag_irqs;
+
+	pr_debug("Enter:%s", __func__);
+
+	substream = intelhaddata->stream_info.had_substream;
+	had_stream = intelhaddata->private_data;
+
+	spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	if (intelhaddata->drv_status == HAD_DRV_CONNECTED) {
+		pr_debug("Device already connected\n");
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		return retval;
+	}
+	buf_id = intelhaddata->curr_buf;
+	intelhaddata->buff_done = buf_id;
+	intelhaddata->drv_status = HAD_DRV_CONNECTED;
+	spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+
+	pr_debug("Processing HOT_PLUG, buf_id = %d\n", buf_id);
+
+	/* Safety check */
+	if (substream) {
+		pr_debug("There should not be active PB from ALSA\n");
+		pr_debug("Signifies, cable is plugged-in even before\n");
+		pr_debug("processing snd_pcm_disconnect\n");
+		/* Set runtime->state to hw_params done */
+		snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
+	}
+
+	had_build_channel_allocation_map(intelhaddata);
+
+	return retval;
+}
+
+int had_process_hot_unplug(struct snd_intelhad *intelhaddata)
+{
+	int caps, retval = 0;
+	enum intel_had_aud_buf_type buf_id;
+	struct had_pvt_data *had_stream;
+	unsigned long flag_irqs;
+
+	pr_debug("Enter:%s", __func__);
+
+	had_stream = intelhaddata->private_data;
+	buf_id = intelhaddata->curr_buf;
+
+	spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	if (intelhaddata->drv_status == HAD_DRV_DISCONNECTED) {
+		pr_debug("Device already disconnected\n");
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		return retval;
+	} else {
+		/* Disable Audio */
+		caps = HDMI_AUDIO_BUFFER_DONE;
+		retval = had_set_caps(HAD_SET_DISABLE_AUDIO_INT, &caps);
+		retval = had_set_caps(HAD_SET_DISABLE_AUDIO, NULL);
+		intelhaddata->ops->enable_audio(intelhaddata->stream_info.had_substream, 0);
+	}
+
+	intelhaddata->drv_status = HAD_DRV_DISCONNECTED;
+	/* Report to above ALSA layer */
+	if (intelhaddata->stream_info.had_substream != NULL) {
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		pr_debug("%s: unlock -> sending pcm_stop -> lock\n", __func__);
+		snd_pcm_stop(intelhaddata->stream_info.had_substream,
+				SNDRV_PCM_STATE_DISCONNECTED);
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+	}
+
+	had_stream->stream_type = HAD_INIT;
+	spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+	kfree(intelhaddata->chmap->chmap);
+	intelhaddata->chmap->chmap = NULL;
+	pr_debug("%s: unlocked -> returned\n", __func__);
+
+	return retval;
+}
+
+/**
+ * had_event_handler - Call back function to handle events
+ *
+ * @event_type: Event type to handle
+ * @data: data related to the event_type
+ *
+ * This function is invoked to handle HDMI events from client driver.
+ */
+int had_event_handler(enum had_event_type event_type, void *data)
+{
+	int retval = 0;
+	struct snd_intelhad *intelhaddata = data;
+	enum intel_had_aud_buf_type buf_id;
+	struct snd_pcm_substream *substream;
+	struct had_pvt_data *had_stream;
+	unsigned long flag_irqs;
+
+	buf_id = intelhaddata->curr_buf;
+	had_stream = intelhaddata->private_data;
+
+	/* Switching to a function can drop atomicity even in INTR context.
+	 * Thus, a big lock is acquired to maintain atomicity.
+	 * This can be optimized later.
+	 * Currently, only buffer_done/_underrun executes in INTR context.
+	 * Also, locking is implemented seperately to avoid real contention
+	 * of data(struct intelhaddata) between IRQ/SOFT_IRQ/PROCESS context.
+	 */
+	substream = intelhaddata->stream_info.had_substream;
+	switch (event_type) {
+	case HAD_EVENT_AUDIO_BUFFER_DONE:
+		retval = had_process_buffer_done(intelhaddata);
+	break;
+
+	case HAD_EVENT_AUDIO_BUFFER_UNDERRUN:
+		retval = had_process_buffer_underrun(intelhaddata);
+	break;
+
+	case HAD_EVENT_HOT_PLUG:
+		retval = had_process_hot_plug(intelhaddata);
+	break;
+
+	case HAD_EVENT_HOT_UNPLUG:
+		retval = had_process_hot_unplug(intelhaddata);
+	break;
+
+	case HAD_EVENT_MODE_CHANGING:
+		pr_debug(" called _event_handler with _MODE_CHANGE event\n");
+		/* Process only if stream is active & cable Plugged-in */
+		spin_lock_irqsave(&intelhaddata->had_spinlock, flag_irqs);
+		if (intelhaddata->drv_status >= HAD_DRV_DISCONNECTED) {
+			spin_unlock_irqrestore(&intelhaddata->had_spinlock,
+					flag_irqs);
+			break;
+		}
+		spin_unlock_irqrestore(&intelhaddata->had_spinlock, flag_irqs);
+		if ((had_stream->stream_type == HAD_RUNNING_STREAM)
+				&& substream)
+			retval = hdmi_audio_mode_change(substream);
+	break;
+
+	default:
+		pr_debug("error un-handled event !!\n");
+		retval = -EINVAL;
+	break;
+
+	}
+	return retval;
+}
diff --git a/drivers/external_drivers/intel_media/interface/psb_drm.h b/drivers/external_drivers/intel_media/interface/psb_drm.h
new file mode 100755
index 0000000..10cd570
--- /dev/null
+++ b/drivers/external_drivers/intel_media/interface/psb_drm.h
@@ -0,0 +1,1268 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRM_H_
+#define _PSB_DRM_H_
+
+#if defined(__linux__) && !defined(__KERNEL__)
+#include <stdbool.h>
+#include <stdint.h>
+#include <linux/types.h>
+#include "drm_mode.h"
+#endif
+
+
+
+/*
+ * Menlow/MRST graphics driver package version
+ * a.b.c.xxxx
+ * a - Product Family: 5 - Linux
+ * b - Major Release Version: 0 - non-Gallium (Unbuntu)
+ *                            1 - Gallium (Moblin2)
+ *                            2 - IMG     (Moblin2)
+ *                            3 - IMG     (Meego)
+ *                            4 - IMG     (Android)
+ * c - Hotfix Release
+ * xxxx - Graphics internal build #
+ */
+#define PSB_PACKAGE_VERSION             "5.6.0.1202"
+
+#define DRM_PSB_SAREA_MAJOR             0
+#define DRM_PSB_SAREA_MINOR             2
+#define PSB_FIXED_SHIFT                 16
+
+#define PSB_NUM_PIPE                    3
+
+/*
+ * Public memory types.
+ */
+
+#define DRM_PSB_MEM_MMU                 TTM_PL_PRIV1
+#define DRM_PSB_FLAG_MEM_MMU            TTM_PL_FLAG_PRIV1
+
+#define TTM_PL_CI                       TTM_PL_PRIV0
+#define TTM_PL_FLAG_CI                  TTM_PL_FLAG_PRIV0
+#define TTM_PL_RAR                      TTM_PL_PRIV2
+#define TTM_PL_FLAG_RAR                 TTM_PL_FLAG_PRIV2
+#define TTM_PL_IMR                      TTM_PL_PRIV2
+#define TTM_PL_FLAG_IMR                 TTM_PL_FLAG_PRIV2
+
+#define DRM_PSB_MEM_MMU_TILING          TTM_PL_PRIV3
+#define DRM_PSB_FLAG_MEM_MMU_TILING     TTM_PL_FLAG_PRIV3
+
+
+/*Status of the command sent to the gfx device.*/
+typedef enum {
+	DRM_CMD_SUCCESS,
+	DRM_CMD_FAILED,
+	DRM_CMD_HANG
+} drm_cmd_status_t;
+
+struct drm_psb_scanout {
+	uint32_t buffer_id;	/* DRM buffer object ID */
+	uint32_t rotation;	/* Rotation as in RR_rotation definitions */
+	uint32_t stride;	/* Buffer stride in bytes */
+	uint32_t depth;		/* Buffer depth in bits (NOT) bpp */
+	uint32_t width;		/* Buffer width in pixels */
+	uint32_t height;	/* Buffer height in lines */
+	int32_t transform[3][3];	/* Buffer composite transform */
+	/* (scaling, rot, reflect) */
+};
+
+#define DRM_PSB_SAREA_OWNERS            16
+#define DRM_PSB_SAREA_OWNER_2D          0
+#define DRM_PSB_SAREA_OWNER_3D          1
+#define DRM_PSB_SAREA_SCANOUTS          3
+
+struct drm_psb_sarea {
+	/* Track changes of this data structure */
+
+	uint32_t major;
+	uint32_t minor;
+
+	/* Last context to touch part of hw */
+	uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
+
+	/* Definition of front- and rotated buffers */
+	uint32_t num_scanouts;
+	struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
+
+	int planeA_x;
+	int planeA_y;
+	int planeA_w;
+	int planeA_h;
+	int planeB_x;
+	int planeB_y;
+	int planeB_w;
+	int planeB_h;
+	/* Number of active scanouts */
+	uint32_t num_active_scanouts;
+};
+
+#define PSB_RELOC_MAGIC                 0x67676767
+#define PSB_RELOC_SHIFT_MASK            0x0000FFFF
+#define PSB_RELOC_SHIFT_SHIFT           0
+#define PSB_RELOC_ALSHIFT_MASK          0xFFFF0000
+#define PSB_RELOC_ALSHIFT_SHIFT         16
+
+/* Offset of the indicated buffer*/
+#define PSB_RELOC_OP_OFFSET             0
+
+struct drm_psb_reloc {
+	uint32_t reloc_op;
+	uint32_t where;		/* offset in destination buffer */
+	uint32_t buffer;	/* Buffer reloc applies to */
+	uint32_t mask;		/* Destination format: */
+	uint32_t shift;		/* Destination format: */
+	uint32_t pre_add;	/* Destination format: */
+	uint32_t background;	/* Destination add */
+	uint32_t dst_buffer;	/* Destination buffer. Index into buffer_list */
+	uint32_t arg0;		/* Reloc-op dependant */
+	uint32_t arg1;
+};
+
+
+#define PSB_GPU_ACCESS_READ             (1ULL << 32)
+#define PSB_GPU_ACCESS_WRITE            (1ULL << 33)
+#define PSB_GPU_ACCESS_MASK             (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
+
+#define PSB_BO_FLAG_COMMAND             (1ULL << 52)
+
+#define PSB_ENGINE_2D                   2
+#define PSB_ENGINE_DECODE               0
+#define PSB_ENGINE_VIDEO                0
+#define LNC_ENGINE_ENCODE               1
+#ifdef MERRIFIELD
+#define PSB_NUM_ENGINES                 7
+#else
+#define PSB_NUM_ENGINES                 2
+#endif
+#define VSP_ENGINE_VPP                  6
+
+/*
+ * For this fence class we have a couple of
+ * fence types.
+ */
+
+#define _PSB_FENCE_EXE_SHIFT            0
+#define _PSB_FENCE_FEEDBACK_SHIFT       4
+
+#define _PSB_FENCE_TYPE_EXE             (1 << _PSB_FENCE_EXE_SHIFT)
+#define _PSB_FENCE_TYPE_FEEDBACK        (1 << _PSB_FENCE_FEEDBACK_SHIFT)
+
+#define PSB_FEEDBACK_OP_VISTEST         (1 << 0)
+
+struct drm_psb_extension_rep {
+	int32_t exists;
+	uint32_t driver_ioctl_offset;
+	uint32_t sarea_offset;
+	uint32_t major;
+	uint32_t minor;
+	uint32_t pl;
+};
+
+#define DRM_PSB_EXT_NAME_LEN            128
+
+union drm_psb_extension_arg {
+	char extension[DRM_PSB_EXT_NAME_LEN];
+	struct drm_psb_extension_rep rep;
+};
+
+#define PSB_NOT_FENCE                (1 << 0)
+#define PSB_MEM_CLFLUSH                (1 << 1)
+
+struct psb_validate_req {
+	uint64_t set_flags;
+	uint64_t clear_flags;
+	uint64_t next;
+	uint64_t presumed_gpu_offset;
+	uint32_t buffer_handle;
+	uint32_t presumed_flags;
+	uint32_t pad64;
+	uint32_t unfence_flag;
+};
+
+struct psb_validate_rep {
+	uint64_t gpu_offset;
+	uint32_t placement;
+	uint32_t fence_type_mask;
+};
+
+#define PSB_USE_PRESUMED                (1 << 0)
+
+struct psb_validate_arg {
+	uint64_t handled;
+	uint64_t ret;
+	union {
+		struct psb_validate_req req;
+		struct psb_validate_rep rep;
+	} d;
+};
+
+
+#define DRM_PSB_FENCE_NO_USER           (1 << 0)
+
+struct psb_ttm_fence_rep {
+	uint32_t handle;
+	uint32_t fence_class;
+	uint32_t fence_type;
+	uint32_t signaled_types;
+	uint32_t error;
+};
+
+typedef struct drm_psb_cmdbuf_arg {
+	uint64_t buffer_list;	/* List of buffers to validate */
+	uint64_t fence_arg;
+
+	uint32_t cmdbuf_handle;	/* 2D Command buffer object or, */
+	uint32_t cmdbuf_offset;	/* rasterizer reg-value pairs */
+	uint32_t cmdbuf_size;
+
+	uint32_t reloc_handle;	/* Reloc buffer object */
+	uint32_t reloc_offset;
+	uint32_t num_relocs;
+
+	/* Not implemented yet */
+	uint32_t fence_flags;
+	uint32_t engine;
+
+} drm_psb_cmdbuf_arg_t;
+
+typedef struct drm_psb_pageflip_arg {
+	uint32_t flip_offset;
+	uint32_t stride;
+} drm_psb_pageflip_arg_t;
+
+typedef enum {
+	LNC_VIDEO_DEVICE_INFO,
+	LNC_VIDEO_GETPARAM_IMR_INFO,
+	LNC_VIDEO_GETPARAM_CI_INFO,
+	LNC_VIDEO_FRAME_SKIP,
+	IMG_VIDEO_DECODE_STATUS,
+	IMG_VIDEO_NEW_CONTEXT,
+	IMG_VIDEO_RM_CONTEXT,
+	IMG_VIDEO_UPDATE_CONTEXT,
+	IMG_VIDEO_MB_ERROR,
+	IMG_VIDEO_SET_DISPLAYING_FRAME,
+	IMG_VIDEO_GET_DISPLAYING_FRAME,
+	IMG_VIDEO_GET_HDMI_STATE,
+	IMG_VIDEO_SET_HDMI_STATE,
+	PNW_VIDEO_QUERY_ENTRY,
+	IMG_DISPLAY_SET_WIDI_EXT_STATE,
+	IMG_VIDEO_IED_STATE
+} lnc_getparam_key_t;
+
+struct drm_lnc_video_getparam_arg {
+	uint64_t key;
+	uint64_t arg;	/* argument pointer */
+	uint64_t value;	/* feed back pointer */
+};
+
+struct drm_video_displaying_frameinfo {
+	uint32_t buf_handle;
+	uint32_t width;
+	uint32_t height;
+	uint32_t size; /* buffer size */
+	uint32_t format; /* fourcc */
+	uint32_t luma_stride; /* luma stride */
+	uint32_t chroma_u_stride; /* chroma stride */
+	uint32_t chroma_v_stride;
+	uint32_t luma_offset; /* luma offset from the beginning of the memory */
+	uint32_t chroma_u_offset; /* UV offset from the beginning of the memory */
+	uint32_t chroma_v_offset;
+	uint32_t reserved;
+};
+
+/*
+ * Feedback components:
+ */
+
+/*
+ * Vistest component. The number of these in the feedback buffer
+ * equals the number of vistest breakpoints + 1.
+ * This is currently the only feedback component.
+ */
+
+struct drm_psb_vistest {
+	uint32_t vt[8];
+};
+
+struct drm_psb_sizes_arg {
+	uint32_t ta_mem_size;
+	uint32_t mmu_size;
+	uint32_t pds_size;
+	uint32_t rastgeom_size;
+	uint32_t tt_size;
+	uint32_t vram_size;
+};
+
+struct drm_psb_hist_status_arg {
+	uint32_t buf[32];
+};
+
+struct drm_psb_dpst_lut_arg {
+	uint8_t lut[256];
+	int output_id;
+};
+
+struct mrst_timing_info {
+	uint16_t pixel_clock;
+	uint8_t hactive_lo;
+	uint8_t hblank_lo;
+	uint8_t hblank_hi:4;
+	uint8_t hactive_hi:4;
+	uint8_t vactive_lo;
+	uint8_t vblank_lo;
+	uint8_t vblank_hi:4;
+	uint8_t vactive_hi:4;
+	uint8_t hsync_offset_lo;
+	uint8_t hsync_pulse_width_lo;
+	uint8_t vsync_pulse_width_lo:4;
+	uint8_t vsync_offset_lo:4;
+	uint8_t vsync_pulse_width_hi:2;
+	uint8_t vsync_offset_hi:2;
+	uint8_t hsync_pulse_width_hi:2;
+	uint8_t hsync_offset_hi:2;
+	uint8_t width_mm_lo;
+	uint8_t height_mm_lo;
+	uint8_t height_mm_hi:4;
+	uint8_t width_mm_hi:4;
+	uint8_t hborder;
+	uint8_t vborder;
+	uint8_t unknown0:1;
+	uint8_t hsync_positive:1;
+	uint8_t vsync_positive:1;
+	uint8_t separate_sync:2;
+	uint8_t stereo:1;
+	uint8_t unknown6:1;
+	uint8_t interlaced:1;
+} __attribute__((packed));
+
+struct gct_r10_timing_info {
+	uint16_t pixel_clock;
+	uint32_t hactive_lo:8;
+	uint32_t hactive_hi:4;
+	uint32_t hblank_lo:8;
+	uint32_t hblank_hi:4;
+	uint32_t hsync_offset_lo:8;
+	uint16_t hsync_offset_hi:2;
+	uint16_t hsync_pulse_width_lo:8;
+	uint16_t hsync_pulse_width_hi:2;
+	uint16_t hsync_positive:1;
+	uint16_t rsvd_1:3;
+	uint8_t  vactive_lo:8;
+	uint16_t vactive_hi:4;
+	uint16_t vblank_lo:8;
+	uint16_t vblank_hi:4;
+	uint16_t vsync_offset_lo:4;
+	uint16_t vsync_offset_hi:2;
+	uint16_t vsync_pulse_width_lo:4;
+	uint16_t vsync_pulse_width_hi:2;
+	uint16_t vsync_positive:1;
+	uint16_t rsvd_2:3;
+} __attribute__((packed));
+
+struct mrst_panel_descriptor_v1 {
+	uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+	/* 0x61190 if MIPI */
+	uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+	uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+	uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+	/* Register 0x61210 */
+	struct mrst_timing_info DTD;/*18 bytes, Standard definition */
+	uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+	/* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+	/* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+	uint16_t Panel_MIPI_Display_Descriptor;
+	/*16 bits, Defined as follows: */
+	/* if MIPI, 0x0000 if LVDS */
+	/* Bit 0, Type, 2 bits, */
+	/* 0: Type-1, */
+	/* 1: Type-2, */
+	/* 2: Type-3, */
+	/* 3: Type-4 */
+	/* Bit 2, Pixel Format, 4 bits */
+	/* Bit0: 16bpp (not supported in LNC), */
+	/* Bit1: 18bpp loosely packed, */
+	/* Bit2: 18bpp packed, */
+	/* Bit3: 24bpp */
+	/* Bit 6, Reserved, 2 bits, 00b */
+	/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+	/* Bit 14, Reserved, 2 bits, 00b */
+} __attribute__((packed));
+
+struct mrst_panel_descriptor_v2 {
+	uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+	/* 0x61190 if MIPI */
+	uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+	uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+	uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+	/* Register 0x61210 */
+	struct mrst_timing_info DTD;/*18 bytes, Standard definition */
+	uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+	/*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+	uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+	/*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+	uint16_t Panel_MIPI_Display_Descriptor;
+	/*16 bits, Defined as follows: */
+	/* if MIPI, 0x0000 if LVDS */
+	/* Bit 0, Type, 2 bits, */
+	/* 0: Type-1, */
+	/* 1: Type-2, */
+	/* 2: Type-3, */
+	/* 3: Type-4 */
+	/* Bit 2, Pixel Format, 4 bits */
+	/* Bit0: 16bpp (not supported in LNC), */
+	/* Bit1: 18bpp loosely packed, */
+	/* Bit2: 18bpp packed, */
+	/* Bit3: 24bpp */
+	/* Bit 6, Reserved, 2 bits, 00b */
+	/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+	/* Bit 14, Reserved, 2 bits, 00b */
+} __attribute__((packed));
+
+union mrst_panel_rx {
+	struct {
+		uint16_t NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+		/* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+		uint16_t MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+		/*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+		uint16_t SupportedVideoTransferMode:2; /*0: Non-burst only */
+		/* 1: Burst and non-burst */
+		/* 2/3: Reserved */
+		uint16_t HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+		uint16_t DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+		uint16_t ECC_ChecksumCapabilities:1; /*1 bit,0: No, 1: Yes*/
+		uint16_t BidirectionalCommunication:1; /*1 bit,0: No, 1: Yes */
+		uint16_t Rsvd:5; /*5 bits,00000b */
+	} panelrx;
+	uint16_t panel_receiver;
+} __attribute__((packed));
+
+struct gct_ioctl_arg {
+	uint8_t bpi; /* boot panel index, number of panel used during boot */
+	uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+	struct mrst_timing_info DTD; /* timing info for the selected panel */
+	uint32_t Panel_Port_Control;
+	uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+	uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+	uint32_t PP_Cycle_Delay;
+	uint16_t Panel_Backlight_Inverter_Descriptor;
+	uint16_t Panel_MIPI_Display_Descriptor;
+} __attribute__((packed));
+
+
+struct gct_timing_desc_block {
+	uint16_t clock;
+	uint16_t hactive:12;
+	uint16_t hblank:12;
+	uint16_t hsync_start:10;
+	uint16_t hsync_end:10;
+	uint16_t hsync_polarity:1;
+	uint16_t h_reversed:3;
+	uint16_t vactive:12;
+	uint16_t vblank:12;
+	uint16_t vsync_start:6;
+	uint16_t vsync_end:6;
+	uint16_t vsync_polarity:1;
+	uint16_t v_reversed:3;
+} __packed;
+
+struct gct_display_desc_block {
+	uint8_t type:2;
+	uint8_t pxiel_format:4;
+	uint8_t mode:2;
+	uint8_t frame_rate:6;
+	uint8_t reserved:2;
+} __attribute__((packed));
+
+struct gct_dsi_desc_block {
+	uint8_t lane_count:2;
+	uint8_t lane_frequency:3;
+	uint8_t transfer_mode:2;
+	uint8_t hs_clock_behavior:1;
+	uint8_t duo_display_support:1;
+	uint8_t ecc_caps:1;
+	uint8_t bdirect_support:1;
+	uint8_t reversed:5;
+} __packed;
+
+struct gct_bkl_desc_block {
+	uint16_t frequency;
+	uint8_t max_brightness:7;
+	uint8_t polarity:1;
+} __packed;
+
+struct gct_r20_clock_desc {
+	uint8_t pre_divisor:2;
+	uint16_t divisor:9;
+	uint8_t post_divisor:4;
+	uint8_t pll_bypass:1;
+	uint8_t cck_clock_divisor:1;
+	uint8_t reserved:7;
+} __packed;
+
+struct gct_r20_panel_info {
+	uint16_t width;
+	uint16_t height;
+} __packed;
+
+struct gct_r20_panel_mode {
+	uint8_t mode:1;
+	uint16_t reserved:15;
+} __packed;
+
+struct gct_r20_dsi_desc {
+	uint8_t num_dsi_lanes:2;
+	uint16_t reserved:14;
+} __packed;
+
+struct gct_r20_panel_desc {
+	uint8_t panel_name[16];
+	struct gct_timing_desc_block timing;
+	struct gct_r20_clock_desc clock_desc;
+	struct gct_r20_panel_info panel_info;
+	struct gct_r20_panel_mode panel_mode;
+	struct gct_r20_dsi_desc dsi_desc;
+	uint32_t early_init_seq;
+	uint32_t late_init_seq;
+} __packed;
+
+struct gct_r11_panel_desc {
+	uint8_t panel_name[16];
+	struct gct_timing_desc_block timing;
+	struct gct_display_desc_block display;
+	struct gct_dsi_desc_block dsi;
+	struct gct_bkl_desc_block bkl;
+	uint32_t early_init_seq;
+	uint32_t late_init_seq;
+} __packed;
+
+struct gct_r10_panel_desc {
+	struct gct_timing_desc_block timing;
+	struct gct_display_desc_block display;
+	struct gct_dsi_desc_block dsi;
+	struct gct_bkl_desc_block bkl;
+	uint32_t early_init_seq;
+	uint32_t late_init_seq;
+	uint8_t reversed[4];
+} __packed;
+
+struct intel_mid_vbt {
+	char signature[4];		/*4 bytes,"$GCT" */
+	uint8_t revision;		/*1 byte GCT version*/
+	uint8_t checksum;		/*1 byte checksum*/
+	uint16_t size;			/*2 byte size of checksumed data*/
+	uint8_t num_of_panel_desc;	/*1 byte number of panel descriptor*/
+	uint8_t primary_panel_idx;	/*1 byte primary panel descriptor idx*/
+	uint8_t secondary_panel_idx;	/*1 byte secondary panel desc idx*/
+	uint8_t splash_flag;		/*1 byte bit 0 is to disable splash*/
+	uint8_t reserved[4];		/*[0..1] relates to GPU burst for R20*/
+	void *panel_descs;
+} __packed;
+
+struct mrst_vbt {
+	char Signature[4];	/*4 bytes,"$GCT" */
+	uint8_t Revision;	/*1 byte */
+	uint8_t Size;		/*1 byte */
+	uint8_t Checksum;	/*1 byte,Calculated */
+	void *mrst_gct;
+} __attribute__ ((packed));
+
+struct mrst_gct_v1 { /* expect this table to change per customer request*/
+	union { /*8 bits,Defined as follows: */
+		struct {
+			uint8_t PanelType:4; /*4 bits, Bit field for panels*/
+			/* 0 - 3: 0 = LVDS, 1 = MIPI*/
+			/*2 bits,Specifies which of the*/
+			uint8_t BootPanelIndex:2;
+			/* 4 panels to use by default*/
+			uint8_t BootMIPI_DSI_RxIndex:2; /*Specifies which of*/
+			/* the 4 MIPI DSI receivers to use*/
+		} PD;
+		uint8_t PanelDescriptor;
+	};
+	struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+	union mrst_panel_rx panelrx[4]; /* panel receivers*/
+} __attribute__((packed));
+
+struct mrst_gct_v2 { /* expect this table to change per customer request*/
+	union { /*8 bits,Defined as follows: */
+		struct {
+			uint8_t PanelType:4; /*4 bits, Bit field for panels*/
+			/* 0 - 3: 0 = LVDS, 1 = MIPI*/
+			/*2 bits,Specifies which of the*/
+			uint8_t BootPanelIndex:2;
+			/* 4 panels to use by default*/
+			uint8_t BootMIPI_DSI_RxIndex:2; /*Specifies which of*/
+			/* the 4 MIPI DSI receivers to use*/
+		} PD;
+		uint8_t PanelDescriptor;
+	};
+	struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+	union mrst_panel_rx panelrx[4]; /* panel receivers*/
+} __attribute__((packed));
+
+#define PSB_DC_CRTC_SAVE                0x01
+#define PSB_DC_CRTC_RESTORE             0x02
+#define PSB_DC_OUTPUT_SAVE              0x04
+#define PSB_DC_OUTPUT_RESTORE           0x08
+#define PSB_DC_CRTC_MASK                0x03
+#define PSB_DC_OUTPUT_MASK              0x0C
+
+struct drm_psb_dc_state_arg {
+	uint32_t flags;
+	uint32_t obj_id;
+};
+
+struct drm_psb_mode_operation_arg {
+	uint32_t obj_id;
+	uint16_t operation;
+	struct drm_mode_modeinfo mode;
+	void *data;
+};
+
+struct drm_psb_stolen_memory_arg {
+	uint32_t base;
+	uint32_t size;
+};
+
+/*Display Register Bits*/
+#define REGRWBITS_PFIT_CONTROLS         (1 << 0)
+#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
+#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS  (1 << 2)
+#define REGRWBITS_PIPEASRC              (1 << 3)
+#define REGRWBITS_PIPEBSRC              (1 << 4)
+#define REGRWBITS_VTOTAL_A              (1 << 5)
+#define REGRWBITS_VTOTAL_B              (1 << 6)
+#define REGRWBITS_DSPACNTR              (1 << 8)
+#define REGRWBITS_DSPBCNTR              (1 << 9)
+#define REGRWBITS_DSPCCNTR              (1 << 10)
+#define REGRWBITS_SPRITE_UPDATE         (1 << 11)
+#define REGRWBITS_PIPEASTAT             (1 << 12)
+#define REGRWBITS_INT_MASK              (1 << 13)
+#define REGRWBITS_INT_ENABLE            (1 << 14)
+#define REGRWBITS_DISPLAY_ALL           (1 << 15)
+/*Overlay Register Bits*/
+#define OV_REGRWBITS_OVADD              (1 << 0)
+#define OV_REGRWBITS_OGAM_ALL           (1 << 1)
+
+#define OVC_REGRWBITS_OVADD             (1 << 2)
+#define OVC_REGRWBITS_OGAM_ALL          (1 << 3)
+
+#define OV_REGRWBITS_WAIT_FLIP          (1 << 4)
+#define OVC_REGRWBITS_WAIT_FLIP         (1 << 5)
+#define OVSTATUS_REGRBIT_OVR_UPDT       (1 << 6)
+
+/*sprite update fields*/
+#define SPRITE_UPDATE_SURFACE           (0x00000001UL)
+#define SPRITE_UPDATE_CONTROL           (0x00000002UL)
+#define SPRITE_UPDATE_POSITION          (0x00000004UL)
+#define SPRITE_UPDATE_SIZE              (0x00000008UL)
+#define SPRITE_UPDATE_WAIT_VBLANK       (0X00000010UL)
+#define SPRITE_UPDATE_CONSTALPHA        (0x00000020UL)
+#define SPRITE_UPDATE_ALL               (0x0000003fUL)
+
+/*vsync operation*/
+#define VSYNC_ENABLE                    (1 << 0)
+#define VSYNC_DISABLE                   (1 << 1)
+#define VSYNC_WAIT                      (1 << 2)
+#define GET_VSYNC_COUNT                 (1 << 3)
+struct intel_overlay_context {
+	uint32_t index;
+	uint32_t pipe;
+	uint32_t ovadd;
+};
+
+struct intel_sprite_context {
+	uint32_t update_mask;
+	/*plane index 0-A, 1-B, 2-C,etc*/
+	uint32_t index;
+	uint32_t pipe;
+
+	uint32_t cntr;
+	uint32_t linoff;
+	uint32_t stride;
+	uint32_t pos;
+	uint32_t size;
+	uint32_t keyminval;
+	uint32_t keymask;
+	uint32_t surf;
+	uint32_t keymaxval;
+	uint32_t tileoff;
+	uint32_t contalpa;
+};
+
+/* dependent macros*/
+#define INTEL_SPRITE_PLANE_NUM          3
+#define INTEL_OVERLAY_PLANE_NUM         2
+#define INTEL_DISPLAY_PLANE_NUM         5
+/* Medfield */
+#define INTEL_MDFLD_SPRITE_PLANE_NUM    3
+#define INTEL_MDFLD_OVERLAY_PLANE_NUM   2
+#define INTEL_MDFLD_CURSOR_PLANE_NUM    3
+#define INTEL_MDFLD_DISPLAY_PLANE_NUM   8
+#define INTEL_MDFLD_DISPLAY_PIPE_NUM    3
+/* Clovertrail+ */
+#define INTEL_CTP_SPRITE_PLANE_NUM      2
+#define INTEL_CTP_OVERLAY_PLANE_NUM     1
+#define INTEL_CTP_CURSOR_PLANE_NUM      2
+#define INTEL_CTP_DISPLAY_PLANE_NUM     5
+#define INTEL_CTP_DISPLAY_PIPE_NUM      2
+
+#define INVALID_INDEX                   0xffffffff
+
+struct mdfld_plane_contexts {
+	uint32_t active_primaries;
+	uint32_t active_sprites;
+	uint32_t active_overlays;
+	struct intel_sprite_context primary_contexts[INTEL_SPRITE_PLANE_NUM];
+	struct intel_sprite_context sprite_contexts[INTEL_SPRITE_PLANE_NUM];
+	struct intel_overlay_context overlay_contexts[INTEL_OVERLAY_PLANE_NUM];
+};
+
+struct drm_psb_vsync_set_arg {
+	uint32_t vsync_operation_mask;
+
+	struct {
+		uint32_t pipe;
+		int vsync_pipe;
+		int vsync_count;
+		uint64_t timestamp;
+	} vsync;
+};
+
+struct drm_psb_dc_info {
+	uint32_t pipe_count;
+
+	uint32_t primary_plane_count;
+	uint32_t sprite_plane_count;
+	uint32_t overlay_plane_count;
+	uint32_t cursor_plane_count;
+};
+
+struct drm_psb_register_rw_arg {
+	uint32_t b_force_hw_on;
+
+	uint32_t display_read_mask;
+	uint32_t display_write_mask;
+	struct {
+		uint32_t pfit_controls;
+		uint32_t pfit_autoscale_ratios;
+		uint32_t pfit_programmed_scale_ratios;
+		uint32_t pipeasrc;
+		uint32_t pipebsrc;
+		uint32_t vtotal_a;
+		uint32_t vtotal_b;
+		uint32_t dspcntr_a;
+		uint32_t dspcntr_b;
+		uint32_t pipestat_a;
+		uint32_t int_mask;
+		uint32_t int_enable;
+	} display;
+
+	uint32_t overlay_read_mask;
+	uint32_t overlay_write_mask;
+
+	struct {
+		uint32_t OVADD;
+		uint32_t OGAMC0;
+		uint32_t OGAMC1;
+		uint32_t OGAMC2;
+		uint32_t OGAMC3;
+		uint32_t OGAMC4;
+		uint32_t OGAMC5;
+		uint32_t IEP_ENABLED;
+		uint32_t IEP_BLE_MINMAX;
+		uint32_t IEP_BSSCC_CONTROL;
+		uint32_t index;
+		uint32_t b_wait_vblank;
+		uint32_t b_wms;
+		uint32_t buffer_handle;
+	} overlay;
+
+	uint32_t vsync_operation_mask;
+
+	struct {
+		uint32_t pipe;
+		int vsync_pipe;
+		int vsync_count;
+		uint64_t timestamp;
+	} vsync;
+
+	uint32_t sprite_enable_mask;
+	uint32_t sprite_disable_mask;
+
+	struct {
+		uint32_t dspa_control;
+		uint32_t dspa_key_value;
+		uint32_t dspa_key_mask;
+		uint32_t dspc_control;
+		uint32_t dspc_stride;
+		uint32_t dspc_position;
+		uint32_t dspc_linear_offset;
+		uint32_t dspc_size;
+		uint32_t dspc_surface;
+	} sprite;
+
+	uint32_t subpicture_enable_mask;
+	uint32_t subpicture_disable_mask;
+	struct {
+		uint32_t CursorADDR;
+		uint32_t xPos;
+		uint32_t yPos;
+		uint32_t CursorSize;
+	} cursor;
+	uint32_t cursor_enable_mask;
+	uint32_t cursor_disable_mask;
+
+	uint32_t plane_enable_mask;
+	uint32_t plane_disable_mask;
+
+	uint32_t get_plane_state_mask;
+
+	struct {
+		uint32_t type;
+		uint32_t index;
+		uint32_t ctx;
+	} plane;
+};
+
+enum {
+	PSB_DC_PLANE_ENABLED,
+	PSB_DC_PLANE_DISABLED,
+};
+
+enum {
+	PSB_GTT_MAP_TYPE_MEMINFO = 0,
+	PSB_GTT_MAP_TYPE_BCD,
+	PSB_GTT_MAP_TYPE_BCD_INFO,
+	PSB_GTT_MAP_TYPE_VIRTUAL,
+};
+
+struct psb_gtt_mapping_arg {
+	uint32_t type;
+	void *hKernelMemInfo;
+	uint32_t offset_pages;
+	uint32_t page_align;
+	uint32_t bcd_device_id;
+	uint32_t bcd_buffer_id;
+	uint32_t bcd_buffer_count;
+	uint32_t bcd_buffer_stride;
+	uint32_t vaddr;
+	uint32_t size;
+};
+
+struct drm_psb_getpageaddrs_arg {
+	uint64_t handle;
+	uint64_t page_addrs;
+	uint64_t gtt_offset;
+};
+
+
+#define MAX_SLICES_PER_PICTURE 72
+struct  psb_msvdx_mb_region {
+	uint32_t start;
+	uint32_t end;
+};
+
+typedef struct drm_psb_msvdx_decode_status {
+	uint32_t num_region;
+	struct psb_msvdx_mb_region mb_regions[MAX_SLICES_PER_PICTURE];
+} drm_psb_msvdx_decode_status_t;
+
+
+enum {
+	IDLE_CTRL_ENABLE = 0,
+	IDLE_CTRL_DISABLE,
+	IDLE_CTRL_ENTER,
+	IDLE_CTRL_EXIT
+};
+
+struct drm_psb_idle_ctrl {
+	uint32_t cmd;
+	uint32_t value;
+};
+
+/* Controlling the kernel modesetting buffers */
+
+#define DRM_PSB_KMS_OFF                 0x00
+#define DRM_PSB_KMS_ON                  0x01
+#define DRM_PSB_VT_LEAVE                0x02
+#define DRM_PSB_VT_ENTER                0x03
+#define DRM_PSB_EXTENSION               0x06
+#define DRM_PSB_SIZES                   0x07
+#define DRM_PSB_FUSE_REG                0x08
+#define DRM_PSB_VBT                     0x09
+#define DRM_PSB_DC_STATE                0x0A
+#define DRM_PSB_ADB                     0x0B
+#define DRM_PSB_MODE_OPERATION          0x0C
+#define DRM_PSB_STOLEN_MEMORY           0x0D
+#define DRM_PSB_REGISTER_RW             0x0E
+#define DRM_PSB_GTT_MAP                 0x0F
+#define DRM_PSB_GTT_UNMAP               0x10
+#define DRM_PSB_GETPAGEADDRS            0x11
+/**
+ * NOTE: Add new commands here, but increment
+ * the values below and increment their
+ * corresponding defines where they're
+ * defined elsewhere.
+ */
+#define DRM_PVR_RESERVED1               0x12
+#define DRM_PVR_RESERVED2               0x13
+#define DRM_PVR_RESERVED3               0x14
+#define DRM_PVR_RESERVED4               0x15
+#define DRM_PVR_RESERVED5               0x16
+
+#define DRM_PSB_HIST_ENABLE             0x17
+#define DRM_PSB_HIST_STATUS             0x18
+#define DRM_PSB_UPDATE_GUARD            0x19
+#define DRM_PSB_INIT_COMM               0x1A
+#define DRM_PSB_DPST                    0x1B
+#define DRM_PSB_GAMMA                   0x1C
+#define DRM_PSB_DPST_BL                 0x1D
+
+#define DRM_PVR_RESERVED6               0x1E
+
+#define DRM_PSB_GET_PIPE_FROM_CRTC_ID   0x1F
+#define DRM_PSB_DPU_QUERY               0x20
+#define DRM_PSB_DPU_DSR_ON              0x21
+#define DRM_PSB_DPU_DSR_OFF             0x22
+#define DRM_PSB_HDMI_FB_CMD             0x23
+
+/* HDCP IOCTLs */
+#define DRM_PSB_QUERY_HDCP              0x24
+#define DRM_PSB_VALIDATE_HDCP_KSV       0x25
+#define DRM_PSB_GET_HDCP_STATUS         0x26
+#define DRM_PSB_ENABLE_HDCP             0x27
+#define DRM_PSB_DISABLE_HDCP            0x28
+#define DRM_PSB_GET_HDCP_LINK_STATUS    0x2b
+#define DRM_PSB_ENABLE_HDCP_REPEATER    0x2c
+#define DRM_PSB_DISABLE_HDCP_REPEATER   0x2d
+#define DRM_PSB_HDCP_REPEATER_PRESENT   0x2e
+
+/* CSC IOCTLS */
+#define DRM_PSB_CSC_GAMMA_SETTING       0x29
+#define DRM_PSB_SET_CSC                 0x2a
+
+/* IED session */
+#define DRM_PSB_ENABLE_IED_SESSION      0x30
+#define DRM_PSB_DISABLE_IED_SESSION     0x31
+
+/* VSYNC IOCTLS */
+#define DRM_PSB_VSYNC_SET               0x32
+
+/* HDCP */
+#define DRM_PSB_HDCP_DISPLAY_IED_OFF    0x33
+#define DRM_PSB_HDCP_DISPLAY_IED_ON     0x34
+#define DRM_PSB_QUERY_HDCP_DISPLAY_IED_CAPS 0x35
+
+/* DPST LEVEL */
+#define DRM_PSB_DPST_LEVEL              0x36
+
+/* GET DC INFO IOCTLS */
+#define DRM_PSB_GET_DC_INFO             0x37
+
+/* Panel type query, 0: command mode, 1: video mode */
+#define DRM_PSB_PANEL_QUERY             0x38
+
+/* IDLE IOCTL*/
+#define DRM_PSB_IDLE_CTRL               0x39
+
+/****BEGIN HDMI TEST IOCTLS ****/
+#define DRM_PSB_HDMITEST                0x3A
+
+/* read an hdmi test register */
+#define HT_READ                         1
+/* write an hdmi test register */
+#define HT_WRITE                        2
+/* force power island on */
+#define HT_FORCEON                      4
+
+typedef struct tagHDMITESTREGREADWRITE {
+	/* register offset */
+	unsigned int reg;
+	/* input/output value */
+	unsigned int data;
+	/* OR'ed combo of HT_xx flags */
+	int mode;
+} drm_psb_hdmireg_t, *drm_psb_hdmireg_p;
+
+/**** END HDMI TEST IOCTLS ****/
+
+
+/* GET PANEL ORIENTATION INFO */
+#define DRM_PSB_PANEL_ORIENTATION       0x3B
+
+/* Update cursor position, input is intel_dc_cursor_ctx */
+#define DRM_PSB_UPDATE_CURSOR_POS       0x3C
+
+
+/* Do not use IOCTL between 0x40 and 0x4F */
+/* These will be reserved for OEM to use */
+/* OEM IOCTLs */
+#define DRM_OEM_RESERVED_START          0x40
+#define DRM_OEM_RESERVED_END            0x4F
+
+
+/*
+ * TTM execbuf extension.
+ */
+#define DRM_PSB_TTM_START               0x50
+#define DRM_PSB_TTM_END                 0x5F
+#if defined(PDUMP)
+#define DRM_PSB_CMDBUF                  (PVR_DRM_DBGDRV_CMD + 1)
+#else
+#define DRM_PSB_CMDBUF                  (DRM_PSB_TTM_START)
+#endif
+#define DRM_PSB_SCENE_UNREF             (DRM_PSB_CMDBUF + 1)
+#define DRM_PSB_PLACEMENT_OFFSET        (DRM_PSB_SCENE_UNREF + 1)
+
+
+
+
+#define DRM_PSB_DSR_ENABLE              0xfffffffe
+#define DRM_PSB_DSR_DISABLE             0xffffffff
+
+struct drm_psb_csc_matrix {
+	int pipe;
+	int64_t matrix[9];
+}__attribute__((packed));
+
+struct psb_drm_dpu_rect {
+	int x, y;
+	int width, height;
+};
+
+struct drm_psb_drv_dsr_off_arg {
+	int screen;
+	struct psb_drm_dpu_rect damage_rect;
+};
+
+
+struct drm_psb_dev_info_arg {
+	uint32_t num_use_attribute_registers;
+};
+#define DRM_PSB_DEVINFO                 0x01
+#define PSB_MODE_OPERATION_MODE_VALID   0x01
+#define PSB_MODE_OPERATION_SET_DC_BASE  0x02
+
+struct drm_psb_get_pipe_from_crtc_id_arg {
+	/** ID of CRTC being requested **/
+	uint32_t crtc_id;
+
+	/** pipe of requested CRTC **/
+	uint32_t pipe;
+};
+#define DRM_PSB_DISP_SAVE_HDMI_FB_HANDLE        1
+#define DRM_PSB_DISP_GET_HDMI_FB_HANDLE         2
+#define DRM_PSB_DISP_INIT_HDMI_FLIP_CHAIN       1
+#define DRM_PSB_DISP_QUEUE_BUFFER               2
+#define DRM_PSB_DISP_DEQUEUE_BUFFER             3
+#define DRM_PSB_DISP_PLANEB_DISABLE             4
+#define DRM_PSB_DISP_PLANEB_ENABLE              5
+#define DRM_PSB_HDMI_OSPM_ISLAND_DOWN           6
+#define DRM_PSB_HDMI_NOTIFY_HOTPLUG_TO_AUDIO    7
+
+/*csc gamma setting*/
+typedef enum {
+	GAMMA,
+	CSC,
+	GAMMA_INITIA,
+	GAMMA_SETTING,
+	GAMMA_REG_SETTING,
+	CSC_INITIA,
+	CSC_CHROME_SETTING,
+	CSC_SETTING,
+	CSC_REG_SETTING
+} setting_type;
+
+typedef enum {
+	/* gamma 0.5 */
+	GAMMA_05 = 1,
+	/* gamma 2.0 */
+	GAMMA_20,
+	/* gamma 0.5 + 2.0*/
+	GAMMA_05_20,
+	/* gamma 2.0 + 0.5*/
+	GAMMA_20_05,
+	/* gamma 1.0 */
+	GAMMA_10
+} gamma_mode;
+
+#define CSC_REG_COUNT                   6
+#define CHROME_COUNT                    16
+#define CSC_COUNT                       9
+
+struct csc_setting {
+	uint32_t pipe;
+	setting_type type;
+	bool enable_state;
+	uint32_t data_len;
+	union {
+		int csc_reg_data[CSC_REG_COUNT];
+		int chrome_data[CHROME_COUNT];
+		int64_t csc_data[CSC_COUNT];
+	} data;
+};
+#define GAMMA_10_BIT_TABLE_COUNT        129
+
+struct gamma_setting {
+	uint32_t pipe;
+	setting_type type;
+	bool enable_state;
+	gamma_mode initia_mode;
+	uint32_t data_len;
+	uint32_t gamma_tableX100[GAMMA_10_BIT_TABLE_COUNT];
+};
+struct drm_psb_csc_gamma_setting {
+	setting_type type;
+	union {
+		struct csc_setting csc_data;
+		struct gamma_setting gamma_data;
+	} data;
+}__attribute__((packed));
+struct drm_psb_buffer_data {
+	void *h_buffer;
+};
+struct drm_psb_flip_chain_data {
+	void **h_buffer_array;
+	unsigned int size;
+};
+struct drm_psb_disp_ctrl {
+	uint32_t cmd;
+	union {
+		uint32_t data;
+		struct drm_psb_buffer_data buf_data;
+		struct drm_psb_flip_chain_data flip_chain_data;
+	} u;
+};
+
+/* Merrifield driver specific interface */
+
+#define S3D_MIPIA_DISPLAY               0
+#define S3D_HDMI_DISPLAY                1
+#define S3D_MIPIC_DISPLAY               2
+#define S3D_WIDI_DISPLAY                0xFF
+
+struct drm_psb_s3d_query {
+	uint32_t s3d_display_type;
+	uint32_t is_s3d_supported;
+	uint32_t s3d_format;
+	uint32_t mode_resolution_x;
+	uint32_t mode_resolution_y;
+	uint32_t mode_refresh_rate;
+	uint32_t is_interleaving;
+};
+
+struct drm_psb_s3d_premodeset {
+	uint32_t s3d_buffer_format;
+};
+
+
+typedef enum intel_dc_plane_types {
+	DC_UNKNOWN_PLANE = 0,
+	DC_SPRITE_PLANE = 1,
+	DC_OVERLAY_PLANE,
+	DC_PRIMARY_PLANE,
+	DC_CURSOR_PLANE,
+	DC_PLANE_MAX,
+} DC_MRFLD_PLANE_TYPE;
+
+#define SPRITE_UPDATE_SURFACE           (0x00000001UL)
+#define SPRITE_UPDATE_CONTROL           (0x00000002UL)
+#define SPRITE_UPDATE_POSITION          (0x00000004UL)
+#define SPRITE_UPDATE_SIZE              (0x00000008UL)
+#define SPRITE_UPDATE_WAIT_VBLANK       (0X00000010UL)
+#define SPRITE_UPDATE_CONSTALPHA        (0x00000020UL)
+#define SPRITE_UPDATE_ALL               (0x0000003fUL)
+#define MRFLD_PRIMARY_COUNT             3
+
+typedef struct intel_dc_overlay_ctx {
+	uint32_t index;
+	uint32_t pipe;
+	uint32_t ovadd;
+} DC_MRFLD_OVERLAY_CONTEXT;
+
+typedef struct intel_dc_cursor_ctx {
+	uint32_t index;
+	uint32_t pipe;
+	uint32_t cntr;
+	uint32_t surf;
+	uint32_t pos;
+} DC_MRFLD_CURSOR_CONTEXT;
+
+typedef struct intel_dc_sprite_ctx {
+	uint32_t update_mask;
+	/* plane index 0-A, 1-B, 2-C,etc */
+	uint32_t index;
+	uint32_t pipe;
+
+	uint32_t cntr;
+	uint32_t linoff;
+	uint32_t stride;
+	uint32_t pos;
+	uint32_t size;
+	uint32_t keyminval;
+	uint32_t keymask;
+	uint32_t surf;
+	uint32_t keymaxval;
+	uint32_t tileoff;
+	uint32_t contalpa;
+} DC_MRFLD_SPRITE_CONTEXT;
+
+typedef struct intel_dc_primary_ctx {
+	uint32_t update_mask;
+	/* plane index 0-A, 1-B, 2-C,etc */
+	uint32_t index;
+	uint32_t pipe;
+	uint32_t cntr;
+	uint32_t linoff;
+	uint32_t stride;
+	uint32_t pos;
+	uint32_t size;
+	uint32_t keyminval;
+	uint32_t keymask;
+	uint32_t surf;
+	uint32_t keymaxval;
+	uint32_t tileoff;
+	uint32_t contalpa;
+} DC_MRFLD_PRIMARY_CONTEXT;
+
+typedef struct intel_dc_plane_zorder {
+	/* 3 primary planes */
+	uint32_t forceBottom[3];
+	/* 1 sprite plane */
+	uint32_t abovePrimary;
+} DC_MRFLD_DC_PLANE_ZORDER;
+
+typedef struct intel_dc_plane_ctx {
+	enum intel_dc_plane_types type;
+	struct intel_dc_plane_zorder zorder;
+	union {
+		struct intel_dc_overlay_ctx ov_ctx;
+		struct intel_dc_sprite_ctx sp_ctx;
+		struct intel_dc_primary_ctx prim_ctx;
+		struct intel_dc_cursor_ctx cs_ctx;
+	} ctx;
+} DC_MRFLD_SURF_CUSTOM;
+
+#endif
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/ipil_hdcp.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/ipil_hdcp.c
new file mode 100644
index 0000000..d080d9c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/ipil_hdcp.c
@@ -0,0 +1,253 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <linux/types.h>
+#include "hdcp_rx_defs.h"
+#include "ips_hdcp_api.h"
+#include "ipil_hdcp_api.h"
+
+/**
+ * Description: check whether hdcp hardware is ready
+ *
+ * Returns:	true if ready else false
+ */
+bool ipil_hdcp_is_ready(void)
+{
+	return ips_hdcp_is_ready();
+}
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an		buffer to return an
+ * @size	size of an buffer passed
+ *
+ * Returns:	true on succesful read else false
+ */
+bool ipil_hdcp_get_an(uint8_t *an, uint32_t size)
+{
+	bool ret = false;
+	if (an != NULL && size == HDCP_AN_SIZE) {
+		ips_hdcp_get_an(an);
+		ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: read aksv from hdcp tx
+ *
+ * @aksv	buffer to return aksv
+ * @size	size of an buffer passed
+ *
+ * Returns:	true on succesful read else false
+ */
+bool ipil_hdcp_get_aksv(uint8_t *aksv, uint32_t size)
+{
+	bool ret = false;
+	if (aksv != NULL  && size == HDCP_KSV_SIZE) {
+		ips_hdcp_get_aksv(aksv);
+		ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: set repeater bit in hdcp tx if downstream is a repeater else
+ *		reset the bit
+ *
+ * @present	indicates whether downstream is repeater or not
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ipil_hdcp_set_repeater(bool present)
+{
+	return ips_hdcp_set_repeater(present);
+}
+
+/**
+ * Description: set downstream bksv in hdcp tx
+ *
+ * @bksv	bksv from downstream device
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ipil_hdcp_set_bksv(uint8_t *bksv)
+{
+	return ips_hdcp_set_bksv(bksv);
+}
+
+/**
+ * Description: start first stage of authentication by writing an aksv
+ *
+ * Returns:	true on succesfully starting authentication else false
+ */
+bool ipil_hdcp_start_authentication(void)
+{
+	return ips_hdcp_start_authentication();
+}
+
+/**
+ * Description: check if hdcp tx R0 is ready after starting authentication
+ *
+ * Returns:	true if r0 is ready else false
+ */
+bool ipil_hdcp_is_r0_ready(void)
+{
+	return ips_hdcp_is_r0_ready();
+}
+
+/**
+ * Description: check if hdcp tx & rx ri matches
+ *
+ * @rx_ri	ri of downstream device
+ *
+ * Returns:	true if ri matches else false
+ */
+bool ipil_hdcp_does_ri_match(uint16_t rx_ri)
+{
+	return ips_hdcp_does_ri_match(rx_ri);
+}
+
+/**
+ * Description: Enable encryption once r0 matches
+ *
+ * Returns:	true on enabling encryption else false
+ */
+bool ipil_hdcp_enable_encryption(void)
+{
+	return ips_hdcp_enable_encryption();
+}
+
+/**
+ * Description: compute hdcp tx's v(sha1) for repeater authentication
+ *
+ * @rep_ksv_list	 ksv list from downstream repeater
+ * @rep_ksv_list_entries number of entries in the ksv list
+ * @topology_data	 bstatus value
+ *
+ * Returns:	true on successfully computing v else false
+ */
+bool ipil_hdcp_compute_tx_v(uint8_t *rep_ksv_list,
+				    uint32_t rep_ksv_list_entries,
+				    uint16_t topology_data)
+{
+	return ips_hdcp_compute_tx_v(rep_ksv_list,
+				     rep_ksv_list_entries,
+				     topology_data);
+}
+
+/**
+ * Description: compare hdcp tx & hdcp rx sha1 results
+ *
+ * @rep_prime_v	sha1 value from downstream repeater
+ *
+ * Returns:	true if same else false
+ */
+bool ipil_hdcp_compare_v(uint32_t *rep_prime_v)
+{
+	return ips_hdcp_compare_v(rep_prime_v);
+}
+
+/**
+ * Description: disable hdcp
+ *
+ * Returns:	true on successfully disabling hdcp else false
+ */
+bool ipil_hdcp_disable(void)
+{
+	return ips_hdcp_disable();
+}
+
+/**
+ * Description: check whether hdcp tx can authenticate
+ *
+ * Returns:	true if device can authenticate else false
+ */
+bool ipil_hdcp_device_can_authenticate(void)
+{
+	return ips_hdcp_device_can_authenticate();
+}
+
+/**
+ * Description: initialize hdcp tx for authentication
+ *
+ * Returns:	true success else false
+ */
+bool ipil_hdcp_init(void)
+{
+	return ips_hdcp_init();
+}
+
+/**
+ * Description: get hardware frame count for cipher Ri update
+ *
+ * @count   frame count for cipher Ri update
+ *
+ * Returns: true if successful else false
+ */
+bool ipil_hdcp_get_ri_frame_count(uint8_t *count)
+{
+	return ips_hdcp_get_ri_frame_count(count);
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/ipil_internal.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/ipil_internal.h
new file mode 100644
index 0000000..5e8a27c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/ipil_internal.h
@@ -0,0 +1,141 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef __IPIL_INTERNAL_H
+#define __IPIL_INTERNAL_H
+
+#include "ips_hdmi.h"
+
+/* hdmi display related registers */
+#define IPIL_DSPBASE		IPS_DSPBASE
+#define IPIL_DSPBSIZE		IPS_DSPBSIZE
+#define IPIL_PIPEBSRC		IPS_PIPEBSRC
+#define IPIL_DSPBPOS		IPS_DSPBPOS
+#define IPIL_PFIT_CONTROL	IPS_PFIT_CONTROL
+#define IPIL_HTOTAL_B		IPS_HTOTAL_B
+#define IPIL_VTOTAL_B		IPS_VTOTAL_B
+#define IPIL_HBLANK_B		IPS_HBLANK_B
+#define IPIL_VBLANK_B		IPS_VBLANK_B
+#define IPIL_HSYNC_B		IPS_HSYNC_B
+#define IPIL_VSYNC_B		IPS_VSYNC_B
+#define IPIL_DPLL_B		    IPS_DPLL_B
+#define IPIL_DPLL_DIV0		IPS_DPLL_DIV0
+#define IPIL_PIPEBCONF		IPS_PIPEBCONF
+#define IPIL_DSPBCNTR		IPS_DSPBCNTR
+#define IPIL_HDMIB_CONTROL  IPS_HDMIB_CONTROL
+#define IPIL_DSPBSTRIDE     IPS_DSPBSTRIDE
+#define IPIL_DSPBLINOFF     IPS_DSPBLINOFF
+#define IPIL_DSPBTILEOFF    IPS_DSPBTILEOFF
+#define IPIL_DSPBSURF       IPS_DSPBSURF
+#define IPIL_DSPBSTAT       IPS_DSPBSTAT
+#define IPIL_PALETTE_B      IPS_PALETTE_B
+#define IPIL_PFIT_PGM_RATIOS     IPS_PFIT_PGM_RATIOS
+#define IPIL_HDMIPHYMISCCTL      IPS_HDMIPHYMISCCTL
+
+#define IPIL_VGACNTRL 0x71400
+#define IPIL_DPLL_PWR_GATE_EN (1<<30)
+#define IPIL_DPLL_VCO_ENABLE (1<<31)
+#define IPIL_VGA_DISP_DISABLE (1<<31)
+
+/* TODO: revisit this. make then IP specific */
+#define IPIL_PFIT_ENABLE		(1 << 31)
+#define IPIL_PFIT_PIPE_SHIFT		29
+#define IPIL_PFIT_PIPE_SELECT_B		(1 << IPIL_PFIT_PIPE_SHIFT)
+#define IPIL_PFIT_SCALING_AUTO		(0 << 26)
+#define IPIL_PFIT_SCALING_PROGRAM	(1 << 26)
+#define IPIL_PFIT_SCALING_PILLARBOX	(1 << 27)
+#define IPIL_PFIT_SCALING_LETTERBOX	(3 << 26)
+#define IPIL_PFIT_FRACTIONAL_VALUE	(1 << 12)
+#define IPIL_PFIT_COEFF_MEDIAN_VALUE 	(1 << 25)
+#define IPIL_PFIT_VERT_SCALE_SHIFT	16
+#define IPIL_PFIT_HORIZ_SCALE_SHIFT	0
+#define IPIL_PFIT_VERT_MSB_SHIFT	28
+#define IPIL_PFIT_HORIZ_MSB_SHIFT	12
+
+#define IPIL_PWR_GATE_EN	(1 << 30)
+#define IPIL_PIPECONF_PLL_LOCK	(1<<29)
+#define IPIL_DSP_PLANE_PIPE_POS	24
+#define IPIL_DSP_PLANE_ENABLE	(1<<31)
+#define IPIL_PIPEACONF_ENABLE	(1<<31)
+#define IPIL_PIPEACONF_PIPE_STATE    (1<<30)
+#define IPIL_PIPECONF_PLANE_OFF  (1<<19)
+#define IPIL_PIPECONF_CURSOR_OFF     (1<<18)
+#define IPIL_P1_MASK		(0x1FF << 17)
+#define IPIL_HDMIB_PORT_EN	(1 << 31)
+#define IPIL_HDMIB_PIPE_B_SELECT (1 << 30)
+#define IPIL_HDMIB_NULL_PACKET	(1 << 9)
+#define IPIL_HDMIB_AUDIO_ENABLE	(1 << 6)
+#define IPIL_HDMIB_COLOR_RANGE_SELECT (1 << 8)
+#define IPIL_HDMI_PHY_POWER_DOWN 0x7f
+
+#define IPIL_TIMING_FLAG_PHSYNC	(1<<0)
+#define IPIL_TIMING_FLAG_NHSYNC	(1<<1)
+#define IPIL_TIMING_FLAG_PVSYNC	(1<<2)
+#define IPIL_TIMING_FLAG_NVSYNC	(1<<3)
+
+#define IPIL_HSYNC_POLARITY_MASK IPS_HSYNC_POLARITY_MASK
+#define IPIL_VSYNC_POLARITY_MASK IPS_VSYNC_POLARITY_MASK
+#define CLEAR_BITS(val, mask) ((val) & (~mask))
+#define SET_BITS(val, mask)  ((val) | (mask))
+
+#endif /* __IPIL_INTERNAL_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/otm_ipil_main.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/otm_ipil_main.c
new file mode 100755
index 0000000..140e9e3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/common/otm_ipil_main.c
@@ -0,0 +1,868 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include "ipil_internal.h"
+
+#include "otm_hdmi.h"
+#include "otm_hdmi_types.h"
+
+#include "ips_hdmi.h"
+#include "ipil_hdmi.h"
+
+static hdmi_device_t *hdmi_dev;
+
+/**
+ * Description: pass hdmi device information to lower layer
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_NULL_ARG on bad argument
+ */
+otm_hdmi_ret_t ipil_hdmi_set_hdmi_dev(hdmi_device_t *dev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	if (!dev)
+		rc = OTM_HDMI_ERR_NULL_ARG;
+	else
+		hdmi_dev = dev;
+	return rc;
+}
+
+/**
+ * Description: read 32 bit hdmi register
+ * @reg:	register address
+ *
+ * Returns:	register value
+ */
+uint32_t hdmi_read32(uint32_t reg)
+{
+	if (hdmi_dev)
+		return readl((const void *)(hdmi_dev->io_address + reg));
+
+	return 0;
+}
+
+/**
+ * Description: write 32 bit value to hdmi register
+ * @reg:	register address
+ * @val:	value to be written
+ *
+ * Returns:	none
+ */
+void hdmi_write32(uint32_t reg, uint32_t val)
+{
+	if (hdmi_dev)
+		writel(val, (void *)(hdmi_dev->io_address + reg));
+}
+
+/**
+ * Description: enable infoframes
+ *
+ * @dev:	hdmi_device_t
+ * @type:	type of infoframe packet
+ * @pkt:	infoframe packet data
+ * @freq:	number of times packet needs to be sent
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_ERR_INVAL on invalid packet type
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ipil_hdmi_enable_infoframe(hdmi_device_t *dev,
+		unsigned int type, otm_hdmi_packet_t *pkt, unsigned int freq)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	if (!dev || !pkt)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	switch (type) {
+	case HDMI_PACKET_AVI:
+	case HDMI_PACKET_VS:
+	case HDMI_PACKET_SPD:
+		rc = ips_hdmi_enable_vid_infoframe(dev, type, pkt, freq);
+		break;
+	default:/* TODO: Revisit for Other Infoframes */
+		rc = OTM_HDMI_ERR_INVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * Description: disable particular infoframe
+ *
+ * @dev:	hdmi_device_t
+ * @type:	type of infoframe packet
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_ERR_INVAL on invalid packet type
+ *		OTM_HDMI_SUCCESS on success
+*/
+otm_hdmi_ret_t ipil_hdmi_disable_infoframe(hdmi_device_t *dev,
+					unsigned int type)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	if (!dev)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	switch (type) {
+	case HDMI_PACKET_AVI:
+	case HDMI_PACKET_VS:
+	case HDMI_PACKET_SPD:
+		rc = ips_hdmi_disable_vid_infoframe(dev, type);
+		break;
+	default:/* TODO: Revisit for Other Infoframes */
+		rc = OTM_HDMI_ERR_INVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * Description: disable all infoframes
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_SUCCESS on success
+*/
+otm_hdmi_ret_t ipil_hdmi_disable_all_infoframes(hdmi_device_t *dev)
+{
+	if (!dev)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	return ips_hdmi_disable_all_infoframes(dev);
+}
+
+static void pfit_landscape(int hsrc_sz, int vsrc_sz,
+			int hdst_sz, int vdst_sz)
+{
+	int hmsb, vmsb, hratio, vratio;
+
+	/* IPIL_PFIT_COEFF_MEDIAN_VALUE for fugu 720p */
+	hdmi_write32(IPIL_PFIT_CONTROL,
+			IPIL_PFIT_ENABLE |
+			IPIL_PFIT_PIPE_SELECT_B |
+			IPIL_PFIT_SCALING_PROGRAM | IPIL_PFIT_COEFF_MEDIAN_VALUE);
+
+	/* handling scaling up and down */
+	if (hsrc_sz >= hdst_sz) {
+		/* scaling down: msb = 1 */
+		hratio = IPIL_PFIT_FRACTIONAL_VALUE * (hsrc_sz - hdst_sz) /
+							(hdst_sz + 1);
+		hmsb = 1;
+	} else {
+		/* scaling up: msb = 0 */
+		hratio = IPIL_PFIT_FRACTIONAL_VALUE * (hsrc_sz + 1) /
+							(hdst_sz + 1);
+		hmsb = 0;
+	}
+	if (vsrc_sz >= vdst_sz) {
+		/* scaling down: msb = 1 */
+		vratio = IPIL_PFIT_FRACTIONAL_VALUE * (vsrc_sz - vdst_sz) /
+							(vdst_sz + 1);
+		vmsb = 1;
+	} else {
+		/* scaling up: msb = 0 */
+		vratio = IPIL_PFIT_FRACTIONAL_VALUE * (vsrc_sz + 1) /
+							(vdst_sz + 1);
+		vmsb = 0;
+	}
+
+	pr_debug("\nhdisp = %d, vdisp = %d\n", hdst_sz, vdst_sz);
+	pr_debug("\nhratio = %d, vratio = %d\n", hratio, vratio);
+	hdmi_write32(IPIL_PFIT_PGM_RATIOS,
+		vmsb << IPIL_PFIT_VERT_MSB_SHIFT |
+		hmsb << IPIL_PFIT_HORIZ_MSB_SHIFT |
+		vratio << IPIL_PFIT_VERT_SCALE_SHIFT |
+		hratio << IPIL_PFIT_HORIZ_SCALE_SHIFT);
+}
+
+/*
+ * Panel fitter HW has rounding errors, it does not handle
+ * different source and destination width correctly. This
+ * workaround function fixes this limitation.
+ *
+ * The workaround values are experiment-based.
+ */
+static int pfit_pillarbox_wa(int src_height, int dst_height)
+{
+	if (src_height == 1024) {
+		switch (dst_height) {
+		case 720:
+			return 1;
+		case 768:
+			return 1;
+		case 1080:
+			return 0;
+		default:
+			return 0;
+		}
+	} else if (src_height == 1280) {
+		switch (dst_height) {
+		case 720:
+			return 1;
+		case 768:
+			return 0;
+		case 1080:
+			return 1;
+		default:
+			return 0;
+		}
+	} else
+		return 0;
+}
+
+/**
+ * Description: programs hdmi pipe src and size of the input.
+ *
+ * @dev:		hdmi_device_t
+ * @scalingtype:	scaling type (FULL_SCREEN, CENTER, NO_SCALE etc.)
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @fb_width, fb_height:allocated frame buffer dimensions
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_program_dspregs(hdmi_device_t *dev,
+					int scalingtype,
+					ipil_timings_t *mode,
+					ipil_timings_t *adjusted_mode,
+					int fb_width, int fb_height)
+{
+	int sprite_pos_x = 0, sprite_pos_y = 0;
+	int sprite_width = 0, sprite_height = 0;
+	int src_image_hor = 0, src_image_vert = 0;
+	int wa;
+
+	pr_debug("Enter %s\n", __func__);
+
+	/* NULL checks */
+	if (dev == NULL || mode == NULL || adjusted_mode == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/*
+	 * Frame buffer size may beyond active region in case of
+	 * panning mode.
+	 */
+	sprite_width = min_t(int, fb_width, adjusted_mode->width);
+	sprite_height = min_t(int, fb_height, adjusted_mode->height);
+
+	switch (scalingtype) {
+	case OTM_HDMI_SCALE_NONE:
+	case OTM_HDMI_SCALE_CENTER:
+		/*
+		 * This mode is used to support centering the screen
+		 * by setting reg in DISPLAY controller
+		 */
+		src_image_hor = adjusted_mode->width;
+		src_image_vert = adjusted_mode->height;
+		sprite_pos_x = (src_image_hor - sprite_width) / 2;
+		sprite_pos_y = (src_image_vert - sprite_height) / 2;
+
+		hdmi_write32(IPIL_PFIT_CONTROL,
+				hdmi_read32(IPIL_PFIT_CONTROL) &
+						~IPIL_PFIT_ENABLE);
+		break;
+
+	case OTM_HDMI_SCALE_FULLSCREEN:
+		src_image_hor = sprite_width;
+		src_image_vert = sprite_height;
+		sprite_pos_x = 0;
+		sprite_pos_y = 0;
+
+		if ((adjusted_mode->width > sprite_width) ||
+			(adjusted_mode->height > sprite_height))
+			hdmi_write32(IPIL_PFIT_CONTROL,
+					IPIL_PFIT_ENABLE |
+					IPIL_PFIT_PIPE_SELECT_B |
+					IPIL_PFIT_SCALING_AUTO);
+		break;
+
+	case OTM_HDMI_SCALE_ASPECT:
+		if (fb_width / (float)adjusted_mode->width > 1.5 ||
+			fb_height / (float)adjusted_mode->height > 1.5) {
+			/* panel fitter does not support scaling greater than 1.5 */
+			src_image_hor = sprite_width;
+			src_image_vert = sprite_height;
+
+			hdmi_write32(IPIL_PFIT_CONTROL,
+					hdmi_read32(IPIL_PFIT_CONTROL) &
+							~IPIL_PFIT_ENABLE);
+			break;
+		}
+
+		sprite_pos_x = 0;
+		sprite_pos_y = 0;
+		sprite_height = fb_height;
+		sprite_width = fb_width;
+		src_image_hor = fb_width;
+		src_image_vert = fb_height;
+
+		/* Use panel fitting when the display does not match
+		 * with the framebuffer size */
+		if ((adjusted_mode->width != fb_width) ||
+		    (adjusted_mode->height != fb_height)) {
+
+			if (fb_width > fb_height) {
+				/* Landscape mode */
+				pr_debug("Landscape mode...\n");
+
+				/* use programmed pfit instead of auto
+				 * for uneven hratio and vratio */
+				pfit_landscape(sprite_width,
+						sprite_height,
+						adjusted_mode->width,
+						adjusted_mode->height);
+			} else {
+				/* Portrait mode */
+				pr_debug("Portrait mode...\n");
+
+				/* Panel fitter HW has some limitations/bugs
+				 * which forces us to tweak the way we use
+				 * PILLARBOX mode.
+				 */
+				wa = pfit_pillarbox_wa(fb_height,
+							adjusted_mode->height);
+				pr_debug("wa = %d\n", wa);
+
+				src_image_hor = max_t(int, fb_width,
+						     adjusted_mode->width) + wa;
+				src_image_vert = max_t(int, fb_height,
+						      adjusted_mode->height);
+				sprite_pos_x = (src_image_hor - fb_width) / 2;
+				hdmi_write32(IPIL_PFIT_CONTROL,
+					     IPIL_PFIT_ENABLE |
+					     IPIL_PFIT_PIPE_SELECT_B |
+					     IPIL_PFIT_SCALING_AUTO);
+			}
+		} else
+			hdmi_write32(IPIL_PFIT_CONTROL,
+					IPIL_PFIT_ENABLE |
+					IPIL_PFIT_PIPE_SELECT_B |
+					IPIL_PFIT_SCALING_AUTO);
+
+		break;
+
+	default:
+		/* The defined sprite rectangle must always be
+		completely contained within the displayable area of the
+		screen image (frame buffer). */
+		sprite_pos_x = 0;
+		sprite_pos_y = 0;
+		sprite_height = fb_height;
+		sprite_width = fb_width;
+		src_image_hor = fb_width;
+		src_image_vert = fb_height;
+		if ((adjusted_mode->width != fb_width) ||
+				(adjusted_mode->height != fb_height))
+			hdmi_write32(IPIL_PFIT_CONTROL,
+					IPIL_PFIT_ENABLE |
+					IPIL_PFIT_PIPE_SELECT_B);
+
+		break;
+	}
+
+	pr_debug("Sprite position: (%d, %d)\n", sprite_pos_x,
+			sprite_pos_y);
+	pr_debug("Sprite size: %d x %d\n", sprite_width,
+			sprite_height);
+	pr_debug("Pipe source image size: %d x %d\n",
+			src_image_hor, src_image_vert);
+
+	hdmi_write32(IPIL_DSPBPOS, (sprite_pos_y << 16) | sprite_pos_x);
+	hdmi_write32(IPIL_DSPBSIZE, ((sprite_height - 1) << 16) |
+				 (sprite_width - 1));
+	hdmi_write32(IPIL_PIPEBSRC, ((src_image_hor - 1) << 16) |
+				(src_image_vert - 1));
+
+	pr_debug("Exit %s\n", __func__);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: this is pre-modeset configuration. This can be
+ *		resetting HDMI unit, disabling/enabling dpll etc
+ *		on the need basis.
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_prepare(hdmi_device_t *dev)
+{
+	/* NULL checks */
+	if (dev == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* Nothing needed as of now for medfield */
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: programs all the timing registers based on scaling type.
+ *
+ * @dev:		hdmi_device_t
+ * @scalingtype:	scaling type (FULL_SCREEN, CENTER, NO_SCALE etc.)
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_program_timings(hdmi_device_t *dev,
+					int scalingtype,
+					otm_hdmi_timing_t *mode,
+					otm_hdmi_timing_t *adjusted_mode)
+{
+	/* NULL checks */
+	if (dev == NULL || mode == NULL || adjusted_mode == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	if (scalingtype == OTM_HDMI_SCALE_NONE) {
+		/* Moorestown doesn't have register support for centering so we
+		 * need to  mess with the h/vblank and h/vsync start and ends
+		 * to get centering
+		 */
+		int offsetX = 0, offsetY = 0;
+
+		offsetX = (adjusted_mode->width - mode->width) / 2;
+		offsetY = (adjusted_mode->height - mode->height) / 2;
+
+		hdmi_write32(IPIL_HTOTAL_B, (mode->width - 1) |
+				((adjusted_mode->htotal - 1) << 16));
+
+		hdmi_write32(IPIL_VTOTAL_B, (mode->height - 1) |
+				((adjusted_mode->vtotal - 1) << 16));
+
+		hdmi_write32(IPIL_HBLANK_B,
+			(adjusted_mode->hblank_start - offsetX - 1) |
+			((adjusted_mode->hblank_end - offsetX - 1) << 16));
+
+		hdmi_write32(IPIL_HSYNC_B,
+			(adjusted_mode->hsync_start - offsetX - 1) |
+			((adjusted_mode->hsync_end - offsetX - 1) << 16));
+
+		hdmi_write32(IPIL_VBLANK_B,
+			(adjusted_mode->vblank_start - offsetY - 1) |
+			((adjusted_mode->vblank_end - offsetY - 1) << 16));
+
+		hdmi_write32(IPIL_VSYNC_B,
+			(adjusted_mode->vsync_start - offsetY - 1) |
+			((adjusted_mode->vsync_end - offsetY - 1) << 16));
+	} else {
+		hdmi_write32(IPIL_HTOTAL_B,
+				(adjusted_mode->width - 1) |
+				((adjusted_mode->htotal - 1) << 16));
+
+		hdmi_write32(IPIL_VTOTAL_B,
+				(adjusted_mode->height - 1) |
+				((adjusted_mode->vtotal - 1) << 16));
+
+		hdmi_write32(IPIL_HBLANK_B,
+				(adjusted_mode->hblank_start - 1) |
+				((adjusted_mode->hblank_end - 1) << 16));
+
+		hdmi_write32(IPIL_HSYNC_B,
+				(adjusted_mode->hsync_start - 1) |
+				((adjusted_mode->hsync_end - 1) << 16));
+
+		hdmi_write32(IPIL_VBLANK_B,
+				(adjusted_mode->vblank_start - 1) |
+				((adjusted_mode->vblank_end - 1) << 16));
+
+		hdmi_write32(IPIL_VSYNC_B,
+				(adjusted_mode->vsync_start - 1) |
+				((adjusted_mode->vsync_end - 1) << 16));
+	}
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: get dpll clocks
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ipil_hdmi_crtc_mode_get_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	pr_debug("enter %s\n", __func__);
+
+	/* NULL checks */
+	if (dev == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* get the adjusted clock value */
+	rc = ips_hdmi_crtc_mode_get_program_dpll(dev, dclk);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed to calculate adjusted clock\n");
+		return rc;
+	}
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ipil_hdmi_crtc_mode_set_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	pr_debug("enter %s\n", __func__);
+
+	/* NULL checks */
+	if (dev == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* get the adjusted clock value */
+	rc = ips_hdmi_crtc_mode_set_program_dpll(dev, dclk);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed to calculate adjusted clock\n");
+		return rc;
+	}
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: configures the display plane register and enables
+ *		pipeconf.
+ *
+ * @dev: hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_program_pipeconf(hdmi_device_t *dev)
+{
+	u32 dspcntr;
+	u32 pipeconf;
+
+	/* NULL checks */
+	if (dev == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* Set up the display plane register */
+	dspcntr = hdmi_read32(IPIL_DSPBCNTR);
+	dspcntr |= 1 << IPIL_DSP_PLANE_PIPE_POS;
+	dspcntr |= IPIL_DSP_PLANE_ENABLE;
+
+	/* setup pipeconf */
+	pipeconf = IPIL_PIPEACONF_ENABLE;
+
+
+	hdmi_write32(IPIL_PIPEBCONF, pipeconf);
+	hdmi_read32(IPIL_PIPEBCONF);
+
+	hdmi_write32(IPIL_DSPBCNTR, dspcntr);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: encoder mode set function for hdmi. enables phy.
+ *		set correct polarity for the current mode, sets
+ *		correct panel fitting.
+ *
+ *
+ * @dev:		hdmi_device_t
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @is_monitor_hdmi:	is monitor type is hdmi or not
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_enc_mode_set(hdmi_device_t *dev,
+					otm_hdmi_timing_t *mode,
+					otm_hdmi_timing_t *adjusted_mode,
+					bool is_monitor_hdmi)
+{
+	u32 hdmib, hdmi_phy_misc;
+	bool phsync;
+	bool pvsync;
+
+	/* NULL checks */
+	if (dev == NULL || mode == NULL || adjusted_mode == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	if (is_monitor_hdmi) {
+		hdmib = hdmi_read32(IPIL_HDMIB_CONTROL)
+						| IPIL_HDMIB_PIPE_B_SELECT
+						| IPIL_HDMIB_NULL_PACKET
+						| IPIL_HDMIB_AUDIO_ENABLE;
+	} else {
+		hdmib = hdmi_read32(IPIL_HDMIB_CONTROL)
+						| IPIL_HDMIB_PIPE_B_SELECT;
+		hdmib &= ~IPIL_HDMIB_NULL_PACKET;
+		hdmib &= ~IPIL_HDMIB_AUDIO_ENABLE;
+	}
+
+	/* disable HDMI port since the DPMS will take care of the enabling */
+	hdmib &= ~IPIL_HDMIB_PORT_EN;
+	/* Hdmi specification define that 640x480: full range
+	* other timing: limited range when RGB output */
+	if (adjusted_mode->width == 640 && adjusted_mode->height == 480)
+		hdmib &= ~IPIL_HDMIB_COLOR_RANGE_SELECT;
+	else
+		hdmib |= IPIL_HDMIB_COLOR_RANGE_SELECT;
+
+	/* set output polarity */
+	phsync = !!(adjusted_mode->mode_info_flags & PD_HSYNC_HIGH);
+	pvsync = !!(adjusted_mode->mode_info_flags & PD_VSYNC_HIGH);
+
+	pr_debug("enc_mode_set %dx%d (%c,%c)\n", adjusted_mode->width,
+						adjusted_mode->height,
+						phsync ? '+' : '-',
+						pvsync ? '+' : '-');
+	if (phsync)
+		hdmib = SET_BITS(hdmib, IPIL_HSYNC_POLARITY_MASK);
+	else
+		hdmib = CLEAR_BITS(hdmib, IPIL_HSYNC_POLARITY_MASK);
+	if (pvsync)
+		hdmib = SET_BITS(hdmib, IPIL_VSYNC_POLARITY_MASK);
+	else
+		hdmib = CLEAR_BITS(hdmib, IPIL_VSYNC_POLARITY_MASK);
+
+	hdmi_phy_misc = hdmi_read32(IPIL_HDMIPHYMISCCTL) &
+					~IPIL_HDMI_PHY_POWER_DOWN;
+
+	hdmi_write32(IPIL_HDMIPHYMISCCTL, hdmi_phy_misc);
+	hdmi_write32(IPIL_HDMIB_CONTROL, hdmib);
+	hdmi_read32(IPIL_HDMIB_CONTROL);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: save HDMI display registers
+ *
+ * @dev:		hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_save_display_registers(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		ips_hdmi_save_display_registers(dev);
+}
+
+/*
+ * Description: save HDMI data island packets
+ *
+ * @dev:		hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_save_data_island(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		ips_hdmi_save_data_island(dev);
+}
+
+/**
+ * Description:	get vic data from data island packets
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	vic
+ */
+uint8_t ipil_hdmi_get_vic_from_data_island(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		return ips_hdmi_get_vic_from_data_island(dev);
+
+	return 0;
+}
+
+
+/*
+ * Description: destroys any saved HDMI data
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_destroy_saved_data(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		ips_hdmi_destroy_saved_data(dev);
+}
+
+/**
+ * Description: disable HDMI display
+ *
+ * @dev:		hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_disable_hdmi(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		ips_disable_hdmi(dev);
+}
+
+/**
+ * Description: restore HDMI display registers and enable display
+ *
+ * @dev:		hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_restore_and_enable_display(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		ips_hdmi_restore_and_enable_display(dev);
+}
+
+/**
+ * Description: restore HDMI data island packets
+ *
+ * @dev:		hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_restore_data_island(hdmi_device_t *dev)
+{
+	if (NULL != dev)
+		ips_hdmi_restore_data_island(dev);
+}
+
+/**
+ * Description: get pixel clock range
+ *
+ * @pc_min:	minimum pixel clock
+ * @pc_max:	maximum pixel clock
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_FAILED on NULL input arguments.
+ */
+otm_hdmi_ret_t ipil_get_pixel_clock_range(unsigned int *pc_min,
+						unsigned int *pc_max)
+{
+	return ips_get_pixel_clock_range(pc_min, pc_max);
+}
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool ipil_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh)
+{
+	return ips_hdmi_is_preferred_mode(hdisplay, vdisplay, refresh);
+}
+
+/**
+ * Description: disable all planes on pipe
+ *
+ * @pipe: pipe ID
+ * @enable : true to enable planes; false to disable planes
+ *
+ */
+void ipil_enable_planes_on_pipe(uint32_t pipe, bool enable)
+{
+	ips_enable_planes_on_pipe(pipe, enable);
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/hdcp_rx_defs.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/hdcp_rx_defs.h
new file mode 100644
index 0000000..89debff
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/hdcp_rx_defs.h
@@ -0,0 +1,149 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef HDCP_RX_DEFS_H
+#define HDCP_RX_DEFS_H
+
+#include "hdmi_internal.h"
+
+#define HDCP_PRIMARY_I2C_ADDR	0x3A
+
+#define HDCP_MAX_RETRY_STATUS	(1500)
+#define HDCP_MAX_DEVICES        (127)
+
+#define HDCP_KSV_SIZE		0x05
+#define HDCP_KSV_HAMMING_WT	(20)
+#define HDCP_AN_SIZE		0x08
+#define HDCP_RI_SIZE		0x02
+#define HDCP_PJ_SIZE		0x01
+#define HDCP_V_H_SIZE		(20)
+
+
+#define HDCP_RX_BKSV_ADDR	0x00
+#define HDCP_RX_RI_ADDR		0x08
+#define HDCP_RX_PJ_ADDR		0x0A
+#define HDCP_RX_AKSV_ADDR	0x10
+
+#define HDCP_RX_AINFO_ADDR	0x15
+#define HDCP_RX_AINFO_SIZE	0x01
+
+#define HDCP_RX_AN_ADDR		0x18
+
+#define HDCP_RX_V_H_ADDR	0x20
+
+#define HDCP_RX_V_H0_ADDR	0x20
+#define HDCP_RX_V_H0_SIZE	0x04
+
+#define HDCP_RX_V_H1_ADDR	0x24
+#define HDCP_RX_V_H1_SIZE	0x04
+
+#define HDCP_RX_V_H2_ADDR	0x28
+#define HDCP_RX_V_H2_SIZE	0x04
+
+#define HDCP_RX_V_H3_ADDR	0x2C
+#define HDCP_RX_V_H3_SIZE	0x04
+
+#define HDCP_RX_V_H4_ADDR	0x30
+#define HDCP_RX_V_H4_SIZE	0x04
+
+#define HDCP_RX_BCAPS_ADDR	0x40
+#define HDCP_RX_BCAPS_SIZE	0x01
+
+#define HDCP_RX_BSTATUS_ADDR	0x41
+#define HDCP_RX_BSTATUS_SIZE	0x02
+
+#define HDCP_RX_KSV_FIFO_ADDR	0x43
+
+struct hdcp_rx_bcaps_t {
+	union {
+		uint8_t value;
+		struct {
+			uint8_t fast_reauthentication:1 ;
+			uint8_t b1_1_features:1 ;
+			uint8_t reserved:2 ;
+			uint8_t fast_transfer:1 ;
+			uint8_t ksv_fifo_ready:1 ;
+			uint8_t is_repeater:1 ;
+			uint8_t hdmi_reserved:1 ;
+		};
+	};
+};
+
+struct hdcp_rx_bstatus_t {
+	union {
+		uint16_t value;
+		struct {
+			uint16_t device_count:7 ;
+			uint16_t max_devs_exceeded:1 ;
+			uint16_t depth:3 ;
+			uint16_t max_cascade_exceeded:1 ;
+			uint16_t hdmi_mode:1 ;
+			uint16_t reserved2:1 ;
+			uint16_t rsvd:2 ;
+		};
+	};
+};
+
+
+#endif /* HDCP_RX_DEFS_H */
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/hdmi_hal.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/hdmi_hal.h
new file mode 100644
index 0000000..3226703
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/hdmi_hal.h
@@ -0,0 +1,215 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+/*
+ * GENERAL ASSUMPTIONS / REQUIREMENTS
+ *
+ * 1. HAL entries mentioned in this document are intended to be simple wrappers
+ *    on top of register reads and writes. They should not keep any state
+ *    and should not implement any policies. It is completely up to higher
+ *    levels when and how to use these entries. The only safety checks HAL
+ *    entries should do is for NULL pointers and (in very few cases) for
+ *    correctness of values supplied for register writes (range and alignment
+ *    checks).
+ * 2. HAL should be implemented in such a way that they can be used in both
+ *    both user and kernel space code.
+ * 3. HAL should hide actual register layout where appropriate (infoframes, csc
+ *    coefficients, etc). This objective is not met at the moment since in some
+ *    cases (DMA configuration) we assume that end user knows the register
+ *    layout (at least for now).
+ *
+ * ABBREVIATIONS
+ *
+ * GCP  - General Control Packet
+ * AVI  - Auxiliaty Video Information
+ * ACP  - Audio Content Protection
+ * ISRC - International Standard Recording Code
+ * SPD  - Source Product Description
+ */
+
+#ifndef __HDMI_HAL_H__
+#define __HDMI_HAL_H__
+
+
+#include <linux/types.h>
+
+#include "otm_hdmi_types.h"
+#include "otm_hdmi_defs.h"
+
+/*
+ * This enumeration represents HDMI unit revision
+ */
+typedef enum {
+	HDMI_PCI_REV_CE3100 = 0,
+	HDMI_PCI_REV_CE4100_A0 = 1,
+	HDMI_PCI_REV_CE4100_B012 = 2,
+	HDMI_PCI_REV_CE4200_A0 = 3,
+	HDMI_PCI_REV_CE4200_B0 = 4,
+} hdmi_unit_revision_id_t;
+
+/*
+ * HDMI register state
+ */
+typedef struct {
+	bool valid;
+	uint32_t saveDPLL;
+	uint32_t saveFPA0;
+	uint32_t savePIPEBCONF;
+	uint32_t saveHTOTAL_B;
+	uint32_t saveHBLANK_B;
+	uint32_t saveHSYNC_B;
+	uint32_t saveVTOTAL_B;
+	uint32_t saveVBLANK_B;
+	uint32_t saveVSYNC_B;
+	uint32_t savePIPEBSRC;
+	uint32_t saveDSPBSTRIDE;
+	uint32_t saveDSPBLINOFF;
+	uint32_t saveDSPBTILEOFF;
+	uint32_t saveDSPBSIZE;
+	uint32_t saveDSPBPOS;
+	uint32_t saveDSPBSURF;
+	uint32_t saveDSPBCNTR;
+	uint32_t saveDSPBSTATUS;
+	uint32_t save_palette_b[256];
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t saveHDMIPHYMISCCTL;
+	uint32_t saveHDMIB_CONTROL;
+	uint32_t saveHDMIB_DATALANES;
+} hdmi_register_state_t;
+
+/*
+ * HDMI infoframe information
+ */
+struct hdmi_infoframe_info_t {
+	bool valid;
+	uint32_t freq;
+	otm_hdmi_packet_t pkt;
+};
+
+/*
+ * This structure is used by HAL user to configure and use HAL
+ */
+typedef struct {
+	/* Base address of mapped registers */
+	void __iomem *io_address;
+
+	/* Base address of mapped interrupt registers */
+	void __iomem *irq_io_address;
+
+	/* Pointer to register read routine */
+	unsigned int (*io_read) (void *uhandle,	/* User provided data */
+				unsigned int base, /* Base address */
+				unsigned int offset);	/* Register offset */
+
+	/* Pointer to register write routine */
+	void (*io_write) (void *uhandle, /* User provided data */
+			unsigned int base, /* Base address */
+			unsigned int offset, /* Register offset */
+			unsigned int value); /* Value */
+
+
+	/* Pointer to the data that will be
+	 * passed to both io_read and io_write */
+	void *uhandle;
+
+	/* Pointer to the routine invoked at the beginning of every
+	 * HAL call */
+	void (*log_entry) (void *uhandle, /* User provided data */
+			   char *foo);	/* Name of the routine */
+
+	/* Pointer to the routine invoked at the end of every
+	 * HAL call */
+	void (*log_exit) (void *uhandle, /* User provided data */
+			  char *foo, /* Name of the routine */
+			  int rc); /* Return code */
+
+	/* HDMI unit identifier */
+	hdmi_unit_revision_id_t id;
+
+	/* Pointer to opaque polling timer */
+	void *poll_timer;
+
+	/* Pointer to the polling timer initialization routine */
+	void (*poll_start) (void *poll_timer);
+
+	/* Pointer to the timeout verification routine */
+	 bool(*poll_timeout) (void *poll_timer);
+
+	/* Interrupt status to interrupt handling function */
+	unsigned int isr_status;
+
+	/*
+	 * TODO: tmds clk value for the best pll found and is needed for audio.
+	 * This field has to be moved into OTM audio interfaces
+	 * when implemented.
+	 */
+	uint32_t clock_khz;
+
+	/* HDMI register value */
+	hdmi_register_state_t reg_state;
+	/* AVI Infoframe - used for suspend resume */
+	struct hdmi_infoframe_info_t avi;
+} hdmi_device_t;
+
+#endif /* __HDMI_HAL_H__*/
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_hdcp_api.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_hdcp_api.h
new file mode 100644
index 0000000..387c0fc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_hdcp_api.h
@@ -0,0 +1,199 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef IPIL_HDCP_API_H
+#define IPIL_HDCP_API_H
+
+
+#include <linux/types.h>
+#include "hdmi_internal.h"
+
+/**
+ * Description: initialize hdcp tx for authentication
+ *
+ * Returns:	true success else false
+ */
+bool ipil_hdcp_init(void);
+
+/**
+ * Description: check whether hdcp hardware is ready
+ *
+ * Returns:	true if ready else false
+ */
+bool ipil_hdcp_is_ready(void);
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an		buffer to return an
+ * @size	size of an buffer passed
+ *
+ * Returns:	true on succesful read else false
+ */
+bool ipil_hdcp_get_an(uint8_t *an, uint32_t size);
+
+/**
+ * Description: read aksv from hdcp tx
+ *
+ * @aksv	buffer to return aksv
+ * @size	size of an buffer passed
+ *
+ * Returns:	true on succesful read else false
+ */
+bool ipil_hdcp_get_aksv(uint8_t *aksv, uint32_t size);
+
+/**
+ * Description: set repeater bit in hdcp tx if downstream is a repeater else
+ *		reset the bit
+ *
+ * @present	indicates whether downstream is repeater or not
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ipil_hdcp_set_repeater(bool present);
+
+/**
+ * Description: set downstream bksv in hdcp tx
+ *
+ * @bksv	bksv from downstream device
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ipil_hdcp_set_bksv(uint8_t *bksv);
+
+/**
+ * Description: start first stage of authentication by writing an aksv
+ *
+ * Returns:	true on succesfully starting authentication else false
+ */
+bool ipil_hdcp_start_authentication(void);
+
+/**
+ * Description: check if hdcp tx R0 is ready after starting authentication
+ *
+ * Returns:	true if r0 is ready else false
+ */
+bool ipil_hdcp_is_r0_ready(void);
+
+/**
+ * Description: check if hdcp tx & rx ri matches
+ *
+ * @rx_ri	ri of downstream device
+ *
+ * Returns:	true if ri matches else false
+ */
+bool ipil_hdcp_does_ri_match(uint16_t rx_ri);
+
+/**
+ * Description: Enable encryption once r0 matches
+ *
+ * Returns:	true on enabling encryption else false
+ */
+bool ipil_hdcp_enable_encryption(void);
+
+/**
+ * Description: compute hdcp tx's v(sha1) for repeater authentication
+ *
+ * @rep_ksv_list	 ksv list from downstream repeater
+ * @rep_ksv_list_entries number of entries in the ksv list
+ * @topology_data	bstatus value
+ *
+ * Returns:	true on successfully computing v else false
+ */
+bool ipil_hdcp_compute_tx_v(uint8_t *rep_ksv_list,
+					   uint32_t rep_ksv_list_entries,
+					   uint16_t topology_data);
+
+/**
+ * Description: compare hdcp tx & hdcp rx sha1 results
+ *
+ * @rep_prime_v sha1 value from downstream repeater
+ *
+ * Returns:	true if same else false
+ */
+bool ipil_hdcp_compare_v(uint32_t *rep_prime_v);
+
+/**
+ * Description: disable hdcp
+ *
+ * Returns:	true on successfully disabling hdcp else false
+ */
+bool ipil_hdcp_disable(void);
+
+/**
+ * Description: check whether hdcp tx can authenticate
+ *
+ * Returns:	true if device can authenticate else false
+ */
+bool ipil_hdcp_device_can_authenticate(void);
+
+/**
+ * Description: get hardware frame count for cipher Ri update
+ *
+ * @count   framer count for cipher Ri update
+ *
+ * Returns: true if successful else false
+ */
+bool ipil_hdcp_get_ri_frame_count(uint8_t *count);
+
+#endif /* IPIL_HDCP_API_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_hdmi.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_hdmi.h
new file mode 100644
index 0000000..31bf26a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_hdmi.h
@@ -0,0 +1,304 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef __IPIL_HDMI_H
+#define __IPIL_HDMI_H
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include "otm_hdmi.h"
+
+#include "hdmi_internal.h"
+#include "hdmi_hal.h"
+
+
+/**
+ * Description: pass hdmi device information to lower layer
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_NULL_ARG on bad argument
+ */
+otm_hdmi_ret_t ipil_hdmi_set_hdmi_dev(hdmi_device_t *dev);
+
+/**
+ * Description: programs hdmi pipe src and size of the input.
+ *
+ * @dev:		hdmi_device_t
+ * @scalingtype		scaling type (FULL_SCREEN, CENTER, NO_SCALE etc.)
+
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @fb_width, fb_height:allocated frame buffer dimensions
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_program_dspregs(hdmi_device_t *dev,
+					int scalingtype,
+					ipil_timings_t *mode,
+					ipil_timings_t *adjusted_mode,
+					int fb_width, int fb_height);
+
+/**
+ * Description: this is pre-modeset configuration. This can be
+ *		resetting HDMI unit, disabling/enabling dpll etc
+ *		on the need basis.
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_prepare(hdmi_device_t *dev);
+
+/**
+ * Description: programs all the timing registers based on scaling type.
+ *
+ * @dev:		hdmi_device_t
+ * @scalingtype:	scaling type (FULL_SCREEN, CENTER, NO_SCALE etc.)
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_program_timings(hdmi_device_t *dev,
+					int scalingtype,
+					otm_hdmi_timing_t *mode,
+					otm_hdmi_timing_t *adjusted_mode);
+
+/**
+ * Description: gets dpll clocks
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ipil_hdmi_crtc_mode_get_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk);
+
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ipil_hdmi_crtc_mode_set_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk);
+
+/**
+ * Description: configures the display plane register and enables
+ *		pipeconf.
+ *
+ * @dev: hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_crtc_mode_set_program_pipeconf(hdmi_device_t *dev);
+
+/**
+ * Description: enable infoframes
+ *
+ * @dev:	hdmi_device_t
+ * @type:       type of infoframe packet
+ * @pkt:	infoframe packet data
+ * @freq:       number of times packet needs to be sent
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_ERR_INVAL on invalid packet type
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ipil_hdmi_enable_infoframe(hdmi_device_t *dev,
+					unsigned int type,
+					otm_hdmi_packet_t *pkt,
+					unsigned int freq);
+
+/**
+ * Description: disable particular infoframe
+ *
+ * @dev:	hdmi_device_t
+ * @type:       type of infoframe packet
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_ERR_INVAL on invalid packet type
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ipil_hdmi_disable_infoframe(hdmi_device_t *dev,
+					unsigned int type);
+
+/**
+ * Description: disable all infoframes
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ipil_hdmi_disable_all_infoframes(hdmi_device_t *dev);
+
+/**
+ * Description: encoder mode set function for hdmi. enables phy.
+ *		set correct polarity for the current mode, sets
+ *		correct panel fitting.
+ *
+ *
+ * @dev:		hdmi_device_t
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @is_monitor_hdmi:	is monitor type is hdmi or not
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t ipil_hdmi_enc_mode_set(hdmi_device_t *dev,
+					otm_hdmi_timing_t *mode,
+					otm_hdmi_timing_t *adjusted_mode,
+					bool is_monitor_hdmi);
+/**
+ * Description: save HDMI display registers
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_save_display_registers(hdmi_device_t *dev);
+void ipil_hdmi_save_data_island(hdmi_device_t *dev);
+
+/**
+ * Description:	get vic data from data island packets
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	vic
+ */
+uint8_t ipil_hdmi_get_vic_from_data_island(hdmi_device_t *dev);
+
+/**
+ * Description: restore HDMI display registers and enable display
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_restore_and_enable_display(hdmi_device_t *dev);
+void ipil_hdmi_restore_data_island(hdmi_device_t *dev);
+
+/**
+ * Description: destroys any saved HDMI data
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_hdmi_destroy_saved_data(hdmi_device_t *dev);
+
+/**
+ * Description: disable HDMI display
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns: none
+ */
+void ipil_disable_hdmi(hdmi_device_t *dev);
+
+/**
+ * Description: get pixel clock range
+ *
+ * @pc_min:	minimum pixel clock
+ * @pc_max:	maximum pixel clock
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_FAILED on NULL input arguments.
+ */
+otm_hdmi_ret_t ipil_get_pixel_clock_range(unsigned int *pc_min,
+						unsigned int *pc_max);
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool ipil_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh);
+
+/**
+ * Description: disable all planes on pipe
+ *
+ * @pipe: pipe ID
+ * @enable : true to enable planes; false to disable planes
+ *
+ */
+void ipil_enable_planes_on_pipe(uint32_t pipe, bool enable);
+
+#endif /* __IPIL_HDMI_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_utils.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_utils.h
new file mode 100644
index 0000000..339c9dd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/include/ipil_utils.h
@@ -0,0 +1,86 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef IPIL_UTILS_H
+#define IPIL_UTILS_H
+
+/**
+ * Description: read a 32 bit hdmi register
+ *
+ * @reg		register address
+ *
+ * Returns:	value in the hdmi register
+ */
+uint32_t hdmi_read32(uint32_t reg);
+
+/**
+ * Description: write into 32 bit hdmi register
+ *
+ * @reg		register address
+ * @val		value to be written
+ *
+ * Returns:	none
+ */
+void hdmi_write32(uint32_t reg, uint32_t val);
+
+#endif /* IPIL_UTILS_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/ctp/ips_hdmi_priv.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/ctp/ips_hdmi_priv.c
new file mode 100644
index 0000000..a14cb87
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/ctp/ips_hdmi_priv.c
@@ -0,0 +1,480 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include "otm_hdmi.h"
+#include "hdmi_internal.h"
+#include "ips_hdmi.h"
+#include "mfld_hdmi_reg.h"
+#include "ipil_internal.h"
+#include "ipil_utils.h"
+
+#define IPS_MIN_PIXEL_CLOCK 25174	/* 640x480@59.94Hz */
+#define IPS_MAX_PIXEL_CLOCK 148500	/* 1920x1080@60Hz */
+#define IPS_PIXEL_CLOCK_145 145000
+#define IPS_PIXEL_CLOCK_54  54000
+#define IPS_PIXEL_CLOCK_27  27000
+#define IPS_PREFERRED_HDISPLAY 1920
+#define IPS_PREFERRED_VDISPLAY 1080
+#define IPS_PREFERRED_REFRESH_RATE 60
+
+#define IPS_M_LANES_145 0x03CDADB1
+#define IPS_M_LANES_54  0x03CD80A1
+#define IPS_M_LANES_27  0x03C18051
+
+/* Clock Related Definitions
+ * Min/Max value based on DPLL parameter interface table
+ * from Penwell Display HAS
+ */
+#define IPS_DOT_MIN		19750
+#define IPS_DOT_MAX		148500
+
+#define IPS_DPLL_M_MIN_19	131
+#define IPS_DPLL_M_MAX_19	232
+#define IPS_DPLL_P1_MIN_19	3
+#define IPS_DPLL_P1_MAX_19	10
+
+#define IPS_LIMIT_DPLL_19	0
+#define IPS_VCO_SEL		(1 << 16)
+#define IPS_M_MIN		21
+
+static const struct ips_clock_limits_t ips_clock_limits[] = {
+	{	/* CRYSTAL_19 */
+	 .dot = {.min = IPS_DOT_MIN, .max = IPS_DOT_MAX},
+	 .m = {.min = IPS_DPLL_M_MIN_19, .max = IPS_DPLL_M_MAX_19},
+	 .p1 = {.min = IPS_DPLL_P1_MIN_19, .max = IPS_DPLL_P1_MAX_19},
+	 },
+};
+
+static const u32 ips_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+	224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,   /* 31 - 40 */
+	388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+	83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+	341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+	253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+	478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+	477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+	210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+	145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+	380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+	103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+	396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+	114, 313, 156, 334, 423, 467, 489, 244, 378, 445, /*181 - 190 */
+	222, 367, 183, 91, 45, 22, 11, 261, 130, 321, /* 191 - 200 */
+	416, 464, 232, 372, 186, 93, 302, 407, 203, 101, /* 201 - 210 */
+	50, 281, 140, 70, 291, 401, 456, 484, 242, 121, /* 211 - 220 */
+	60, 30, 15, 7, 3, 257, 384, 448, 480, 496, /* 221 - 230 */
+	248, 124, 62, 31, 271, 135, 323, 417, 208, 104 /* 231 - 240 */
+};
+
+
+/**
+ * Description: get pixel clock range
+ *
+ * @pc_min:	minimum pixel clock
+ * @pc_max:	maximum pixel clock
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_FAILED on NULL input arguments.
+ */
+otm_hdmi_ret_t ips_get_pixel_clock_range(unsigned int *pc_min,
+						unsigned int *pc_max)
+{
+	if (!pc_min || !pc_max)
+		return OTM_HDMI_ERR_FAILED;
+
+	*pc_min = IPS_MIN_PIXEL_CLOCK;
+	*pc_max = IPS_MAX_PIXEL_CLOCK;
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool ips_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh)
+{
+	if (hdisplay == IPS_PREFERRED_HDISPLAY &&
+		vdisplay == IPS_PREFERRED_VDISPLAY &&
+		refresh == IPS_PREFERRED_REFRESH_RATE)
+		return true;
+	else
+		return false;
+}
+
+/*
+ * Derive the pixel clock for the given refclk and
+ * divisors for 8xx chips.
+ */
+static void __ips_hdmi_derive_dot_clock(int refclk, struct ips_clock_t *clock)
+{
+	clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+static const struct ips_clock_limits_t *__ips_hdmi_clk_limits(void)
+{
+	const struct ips_clock_limits_t *limit = NULL;
+
+	/*
+	 * CRYSTAL_19 is enabled for medfield.
+	 * Expand this logic for other types.
+	 */
+	limit = &ips_clock_limits[IPS_LIMIT_DPLL_19];
+	return limit;
+}
+
+static bool __ips_hdmi_find_bestPll(int target, int refclk,
+					struct ips_clock_t *best_clock)
+{
+	struct ips_clock_t clock;
+	const struct ips_clock_limits_t *limit = __ips_hdmi_clk_limits();
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+		     clock.p1++) {
+			int this_err;
+
+			__ips_hdmi_derive_dot_clock(refclk, &clock);
+
+			this_err = abs(clock.dot - target);
+			if (this_err < err) {
+				*best_clock = clock;
+				err = this_err;
+			}
+		}
+	}
+	return err != target;
+}
+
+static void __ips_hdmi_clk_lanes(unsigned long clk)
+{
+	if ((clk > IPS_PIXEL_CLOCK_145) || (clk == IPS_PIXEL_CLOCK_145)) {
+		hdmi_write32(IPS_HDMIB_LANES02, IPS_M_LANES_145);
+		hdmi_write32(IPS_HDMIB_LANES3, IPS_M_LANES_145);
+	} else if ((clk > IPS_PIXEL_CLOCK_54) || (clk == IPS_PIXEL_CLOCK_54)) {
+		hdmi_write32(IPS_HDMIB_LANES02, IPS_M_LANES_54);
+		hdmi_write32(IPS_HDMIB_LANES3, IPS_M_LANES_54);
+	} else {
+		hdmi_write32(IPS_HDMIB_LANES02, IPS_M_LANES_27);
+		hdmi_write32(IPS_HDMIB_LANES3, IPS_M_LANES_27);
+	}
+}
+
+/**
+ * Description: gets the best dpll clock value based on
+ *		current timing mode clock.
+ *
+ * @clk:	refresh rate dot clock in kHz of current mode
+ * @pdpll, pfp:	will be set to adjusted dpll values.
+ * @pclock_khz:	tmds clk value for the best pll and is needed for audio.
+ *		This field has to be moved into OTM audio
+ *		interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments.
+ */
+otm_hdmi_ret_t __ips_hdmi_get_adjusted_clk(unsigned long clk,
+					u32 *pdpll, u32 *pfp,
+					uint32_t *pclock_khz)
+{
+	int refclk;
+	int clk_n;
+	int clk_p2;
+	int clk_byte = 1;
+	int m_conv = 0;
+	int clk_tmp;
+	u32 dpll, fp;
+	bool ret;
+	struct ips_clock_t clock;
+
+	/* NULL checks */
+	if (pdpll == NULL || pfp == NULL || pclock_khz == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	__ips_hdmi_clk_lanes(clk);
+
+	/* values corresponds to CRYSTAL_19, as this is enabled on mdfld */
+	refclk = 19200;
+	clk_n = 1;
+	clk_p2 = 10;
+
+	clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+	ret = __ips_hdmi_find_bestPll(clk_tmp, refclk, &clock);
+	/*
+	 * TODO: tmds clk value for the best pll found and is needed for audio.
+	 * This field has to be moved into OTM audio interfaces
+	 * when implemented.
+	 */
+	*pclock_khz = clock.dot / (clk_n * clk_p2 * clk_byte);
+	if (ret)
+		m_conv = ips_m_converts[(clock.m - IPS_M_MIN)];
+
+	dpll = 0;
+	dpll |= IPS_VCO_SEL;
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (clock.p1 - 2)) << 17;
+
+	fp = (clk_n / 2) << 16;
+	fp |= m_conv;
+
+	/* update the pointers */
+	*pdpll = dpll;
+	*pfp = fp;
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: gets dpll clocks
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_get_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	pr_debug("enter %s\n", __func__);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_set_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	u32 dpll_adj, fp;
+	u32 dpll;
+	int timeout = 0;
+
+	/* NULL checks */
+	if (dev == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	rc = __ips_hdmi_get_adjusted_clk(dclk, &dpll_adj, &fp, &dev->clock_khz);
+	dpll = hdmi_read32(IPIL_DPLL_B);
+	if (dpll & IPIL_DPLL_VCO_ENABLE) {
+		dpll &= ~IPIL_DPLL_VCO_ENABLE;
+		hdmi_write32(IPIL_DPLL_B, dpll);
+		hdmi_read32(IPIL_DPLL_B);
+
+		/* reset M1, N1 & P1 */
+		hdmi_write32(IPIL_DPLL_DIV0, 0);
+		dpll &= ~IPIL_P1_MASK;
+		hdmi_write32(IPIL_DPLL_B, dpll);
+	}
+
+	/*
+	 * When ungating power of DPLL, needs to wait 0.5us
+	 * before enable the VCO
+	 */
+	if (dpll & IPIL_PWR_GATE_EN) {
+		dpll &= ~IPIL_PWR_GATE_EN;
+		hdmi_write32(IPIL_DPLL_B, dpll);
+		udelay(1);
+	}
+
+	dpll = dpll_adj;
+	hdmi_write32(IPIL_DPLL_DIV0, fp);
+	hdmi_write32(IPIL_DPLL_B, dpll);
+	udelay(1);
+
+	dpll |= IPIL_DPLL_VCO_ENABLE;
+	hdmi_write32(IPIL_DPLL_B, dpll);
+	hdmi_read32(IPIL_DPLL_B);
+
+	/* wait for DSI PLL to lock */
+	while ((timeout < 20000) && !(hdmi_read32(IPIL_PIPEBCONF) &
+					IPIL_PIPECONF_PLL_LOCK)) {
+		udelay(150);
+		timeout++;
+	}
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: restore HDMI display registers and enable display
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_hdmi_restore_and_enable_display(hdmi_device_t *dev)
+{
+	int i;
+	u32 dpll = 0;
+	u32 dpll_val;
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+	if (dev->reg_state.valid == false) {
+		pr_debug("\nhdmi no data to restore\n");
+		return;
+	}
+
+	/*make sure VGA plane is off. it initializes to on after reset!*/
+	hdmi_write32(IPIL_VGACNTRL, IPIL_VGA_DISP_DISABLE);
+
+	dpll = hdmi_read32(IPS_DPLL_B);
+	if (!(dpll & IPIL_DPLL_VCO_ENABLE)) {
+		/**
+		 * When ungating power of DPLL, needs to wait 0.5us
+		 * before enable the VCO
+		 */
+		if (dpll & IPIL_DPLL_PWR_GATE_EN) {
+			dpll &= ~IPIL_DPLL_PWR_GATE_EN;
+			hdmi_write32(IPS_DPLL_B, dpll);
+			udelay(1);
+		}
+
+		hdmi_write32(IPS_DPLL_DIV0, dev->reg_state.saveFPA0);
+
+		dpll_val = dev->reg_state.saveDPLL & ~IPIL_DPLL_VCO_ENABLE;
+		hdmi_write32(IPS_DPLL_B, dpll_val);
+		udelay(1);
+
+		dpll_val |= IPIL_DPLL_VCO_ENABLE;
+		hdmi_write32(IPS_DPLL_B, dpll_val);
+		hdmi_read32(IPS_DPLL_B);
+
+	}
+
+	/* Restore mode */
+	hdmi_write32(IPS_HTOTAL_B, dev->reg_state.saveHTOTAL_B);
+	hdmi_write32(IPS_HBLANK_B, dev->reg_state.saveHBLANK_B);
+	hdmi_write32(IPS_HSYNC_B, dev->reg_state.saveHSYNC_B);
+	hdmi_write32(IPS_VTOTAL_B, dev->reg_state.saveVTOTAL_B);
+	hdmi_write32(IPS_VBLANK_B, dev->reg_state.saveVBLANK_B);
+	hdmi_write32(IPS_VSYNC_B, dev->reg_state.saveVSYNC_B);
+	hdmi_write32(IPS_PIPEBSRC, dev->reg_state.savePIPEBSRC);
+	hdmi_write32(IPS_DSPBSTAT, dev->reg_state.saveDSPBSTATUS);
+
+	/*set up the plane*/
+	hdmi_write32(IPS_DSPBSTRIDE, dev->reg_state.saveDSPBSTRIDE);
+	hdmi_write32(IPS_DSPBLINOFF, dev->reg_state.saveDSPBLINOFF);
+	hdmi_write32(IPS_DSPBTILEOFF, dev->reg_state.saveDSPBTILEOFF);
+	hdmi_write32(IPS_DSPBSIZE, dev->reg_state.saveDSPBSIZE);
+	hdmi_write32(IPS_DSPBPOS, dev->reg_state.saveDSPBPOS);
+	hdmi_write32(IPS_DSPBSURF, dev->reg_state.saveDSPBSURF);
+
+	hdmi_write32(IPS_PFIT_CONTROL, dev->reg_state.savePFIT_CONTROL);
+	hdmi_write32(IPS_PFIT_PGM_RATIOS, dev->reg_state.savePFIT_PGM_RATIOS);
+	hdmi_write32(IPS_HDMIPHYMISCCTL, dev->reg_state.saveHDMIPHYMISCCTL);
+	hdmi_write32(IPS_HDMIB_CONTROL, dev->reg_state.saveHDMIB_CONTROL);
+
+	/*enable the plane*/
+	hdmi_write32(IPS_DSPBCNTR, dev->reg_state.saveDSPBCNTR);
+	hdmi_write32(IPS_HDMIB_LANES02, dev->reg_state.saveHDMIB_DATALANES);
+	hdmi_write32(IPS_HDMIB_LANES3, dev->reg_state.saveHDMIB_DATALANES);
+
+	if (in_atomic() || in_interrupt()) {
+		/*  udelay arg must be < 20000 */
+		udelay(19999);
+	} else
+		msleep_interruptible(20);
+
+	/*enable the pipe */
+	hdmi_write32(IPS_PIPEBCONF, dev->reg_state.savePIPEBCONF);
+
+	/* restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		hdmi_write32(IPS_PALETTE_B + (i<<2),
+				dev->reg_state.save_palette_b[i]);
+
+	dev->reg_state.valid = false;
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/include/ips_hdcp_api.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/include/ips_hdcp_api.h
new file mode 100644
index 0000000..b172f50
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/include/ips_hdcp_api.h
@@ -0,0 +1,199 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#ifndef IPS_HDCP_API_H
+#define IPS_HDCP_API_H
+
+
+#include <linux/types.h>
+
+/**
+ * Description: initialize hdcp tx for authentication
+ *
+ * Returns:	true success else false
+ */
+bool ips_hdcp_init(void);
+
+/**
+ * Description: check whether hdcp hardware is ready
+ *
+ * Returns:	true if ready else false
+ */
+bool ips_hdcp_is_ready(void);
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an		buffer to return an in
+ * @size	size of an buffer passed
+ *
+ * Returns:	true on succesful read else false
+ */
+void ips_hdcp_get_an(uint8_t *an);
+
+/**
+ * Description: read aksv from hdcp tx
+ *
+ * @aksv	buffer to return aksv
+ * @size	size of an buffer passed
+ *
+ * Returns:	true on succesful read else false
+ */
+void ips_hdcp_get_aksv(uint8_t *aksv);
+
+/**
+ * Description: set downstream bksv in hdcp tx
+ *
+ * @bksv	bksv from downstream device
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ips_hdcp_set_bksv(uint8_t *bksv);
+
+/**
+ * Description: check if hdcp tx R0 is ready after starting authentication
+ *
+ * Returns:	true if r0 is ready else false
+ */
+bool ips_hdcp_is_r0_ready(void);
+
+/**
+ * Description: set repeater bit in hdcp tx if downstream is a repeater else
+ *		reset the bit
+ *
+ * @present	indicates whether downstream is repeater or not
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ips_hdcp_set_repeater(bool present);
+
+/**
+ * Description: start first stage of authentication by writing an aksv
+ *
+ * Returns:	true on succesfully starting authentication else false
+ */
+bool ips_hdcp_start_authentication(void);
+
+/**
+ * Description: Enable encryption once r0 matches
+ *
+ * Returns:	true on enabling encryption else false
+ */
+bool ips_hdcp_enable_encryption(void);
+
+/**
+ * Description: check if hdcp tx & rx ri matches
+ *
+ * @rx_ri	ri of downstream device
+ *
+ * Returns:	true if ri matches else false
+ */
+bool ips_hdcp_does_ri_match(uint16_t rx_ri);
+
+/**
+ * Description: compute v for repeater authentication
+ *
+ * @rep_ksv_list	 ksv list from downstream repeater
+ * @rep_ksv_list_entries number of entries in the ksv list
+ * @topology_data	 bstatus value
+ *
+ * Returns:	true on successfully computing v else false
+ */
+bool ips_hdcp_compute_tx_v(uint8_t *rep_ksv_list,
+					  uint32_t rep_ksv_list_entries,
+					  uint16_t topology_data);
+
+/**
+ * Description: compare hdcp tx & hdcp rx sha1 results
+ *
+ * @rep_prime_v sha1 value from downstream repeater
+ *
+ * Returns:	true if same else false
+ */
+bool ips_hdcp_compare_v(uint32_t *rep_prime_v);
+
+/**
+ * Description: disable hdcp
+ *
+ * Returns:	true on successfully disabling hdcp else false
+ */
+bool ips_hdcp_disable(void);
+
+/**
+ * Description: check whether hdcp tx can authenticate
+ *
+ * Returns:	true if device can authenticate else false
+ */
+bool ips_hdcp_device_can_authenticate(void);
+
+/**
+ * Description: get hardware frame count for cipher Ri update
+ *
+ * @count   frame count for cipher Ri update
+ *
+ * Returns: true if successful else false
+ */
+bool ips_hdcp_get_ri_frame_count(uint8_t *count);
+
+#endif /* IPS_HDCP_API_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/include/ips_hdmi.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/include/ips_hdmi.h
new file mode 100644
index 0000000..c245921
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/include/ips_hdmi.h
@@ -0,0 +1,280 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef __IPS_HDMI_H
+#define __IPS_HDMI_H
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include "otm_hdmi_types.h"
+#include "hdmi_internal.h"
+
+#define PCI_DEVICE_HDMI 0x080D
+#define PCI_LENGTH_HDMI 0x7000
+
+#define IPS_DPLL_B           (0x0f018)
+#define IPS_DPLL_DIV0        (0x0f048)
+#define IPS_PIPEBCONF        (0x71008)
+#define IPS_HTOTAL_B         (0x61000)
+#define IPS_HBLANK_B         (0x61004)
+#define IPS_HSYNC_B          (0x61008)
+#define IPS_VTOTAL_B         (0x6100c)
+#define IPS_VBLANK_B         (0x61010)
+#define IPS_VSYNC_B          (0x61014)
+#define IPS_PIPEBSRC         (0x6101c)
+#define IPS_DSPBSTRIDE       (0x71188)
+#define IPS_DSPBLINOFF       (0X71184)
+#define IPS_DSPBTILEOFF      (0x711A4)
+#define IPS_DSPBSIZE         (0x71190)
+#define IPS_DSPBPOS          (0x7118C)
+#define IPS_DSPBSURF         (0x7119C)
+#define IPS_DSPBCNTR         (0x71180)
+#define IPS_DSPBSTAT         (0x71024)
+#define IPS_PALETTE_B        (0x0a800)
+#define IPS_PFIT_CONTROL     (0x61230)
+#define IPS_PFIT_PGM_RATIOS  (0x61234)
+#define IPS_HDMIPHYMISCCTL   (0x61134)
+#define IPS_HDMIB_CONTROL    (0x61140)
+#define IPS_HDMIB_LANES02    (0x61120)
+#define IPS_HDMIB_LANES3     (0x61124)
+#define IPS_PFIT_ENABLE		(1 << 31)
+
+/* HSYNC and VSYNC Polarity Mask Bits */
+#define IPS_HSYNC_POLARITY_MASK (1 << 3)
+#define IPS_VSYNC_POLARITY_MASK (1 << 4)
+
+struct ips_clock_t {
+	int dot;
+	int m;
+	int p1;
+};
+
+struct ips_range_t {
+	int min, max;
+};
+
+struct ips_clock_limits_t {
+	struct ips_range_t dot, m, p1;
+};
+
+/**
+ * Description: disable video infoframe
+ *
+ * @dev:	hdmi_device_t
+ * @type:       type of infoframe packet
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_ERR_INVAL on invalid packet type
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ips_hdmi_disable_vid_infoframe(hdmi_device_t *dev,
+					unsigned int type);
+
+/**
+ * Description: enable video infoframe
+ *
+ * @dev:	hdmi_device_t
+ * @type:       type of infoframe packet
+ * @pkt:	infoframe packet data
+ * @freq:       number of times packet needs to be sent
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_ERR_INVAL on invalid packet type
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ips_hdmi_enable_vid_infoframe(hdmi_device_t *dev,
+					unsigned int type,
+					otm_hdmi_packet_t *pkt,
+					unsigned int freq);
+
+/**
+ * Description: disable all infoframes
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ips_hdmi_disable_all_infoframes(hdmi_device_t *dev);
+
+/**
+ * Description: gets the best dpll clock value based on
+ *		current timing mode clock.
+ *
+ * @clk:		refresh rate dot clock in kHz of current mode
+ * @pdpll, pfp:		will be set to adjusted dpll values.
+ * @pclock_khz:		tmds clk value for the best pll and is needed for audio.
+ *			This field has to be moved into OTM audio
+ *			interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments.
+ */
+otm_hdmi_ret_t ips_hdmi_get_adjusted_clk(unsigned long clk,
+				u32 *pdpll, u32 *pfp, uint32_t *pclock_khz);
+
+/**
+ * Description: restore HDMI display registers and enable display
+ *
+ * @dev:        hdmi_device_t
+ *
+ * Returns: none
+ */
+void ips_hdmi_restore_and_enable_display(hdmi_device_t *dev);
+void ips_hdmi_restore_data_island(hdmi_device_t *dev);
+
+/**
+ * Description: save HDMI display registers
+ *
+ * @dev:        hdmi_device_t
+ *
+ * Returns: none
+ */
+void ips_hdmi_save_display_registers(hdmi_device_t *dev);
+void ips_hdmi_save_data_island(hdmi_device_t *dev);
+
+/**
+ * Description:	get vic data from data island packets
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	vic
+ */
+uint8_t ips_hdmi_get_vic_from_data_island(hdmi_device_t *dev);
+
+
+/**
+ * Description: destroys any saved HDMI data
+ *
+ * @dev:        hdmi_device_t
+ *
+ * Returns: none
+ */
+void ips_hdmi_destroy_saved_data(hdmi_device_t *dev);
+
+/**
+ * Description: disable HDMI display
+ *
+ * @dev:        hdmi_device_t
+ *
+ * Returns: none
+ */
+void ips_disable_hdmi(hdmi_device_t *dev);
+
+/**
+ * Description: get pixel clock range
+ *
+ * @pc_min:	minimum pixel clock
+ * @pc_max:	maximum pixel clock
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_FAILED on NULL input arguments.
+ */
+otm_hdmi_ret_t ips_get_pixel_clock_range(unsigned int *pc_min,
+						unsigned int *pc_max);
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool ips_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh);
+
+/**
+ * Description: gets dpll clocks
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_get_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk);
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_set_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk);
+
+/**
+ * Description: disable all planes on pipe
+ *
+ * @pipe:    pipe ID
+ * @enable : true to enable planes; false to disable planes
+ *
+ */
+void ips_enable_planes_on_pipe(uint32_t pipe, bool enable);
+
+#endif /* __IPS_HDMI_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdcp.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdcp.c
new file mode 100644
index 0000000..8e09f82
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdcp.c
@@ -0,0 +1,878 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include "hdcp_rx_defs.h"
+#include "mfld_hdcp_reg.h"
+#include "ipil_utils.h"
+#include "ipil_hdcp_api.h"
+#include "ips_hdcp_api.h"
+
+static void ips_hdcp_capture_an(void);
+static bool ips_hdcp_is_hdcp_on(void);
+static bool ips_hdcp_is_an_ready(void);
+static void ips_hdcp_read_an(uint8_t *an);
+static void ips_hdcp_write_rx_ri(uint16_t rx_ri);
+static void ips_hdcp_set_config(int val);
+static int ips_hdcp_get_config(void);
+static bool ips_hdcp_is_encrypting(void) __attribute__((unused));
+static uint8_t ips_hdcp_get_repeater_control(void)__attribute__((unused));
+static void ips_hdcp_set_repeater_control(int value) __attribute__((unused));
+static uint8_t ips_hdcp_get_repeater_status(void);
+static int ips_hdcp_repeater_v_match_check(void);
+static bool ips_hdcp_repeater_is_busy(void) __attribute__((unused));
+static bool ips_hdcp_repeater_rdy_for_nxt_data(void);
+static bool ips_hdcp_repeater_is_idle(void);
+static bool ips_hdcp_repeater_wait_for_next_data(void);
+static bool ips_hdcp_repeater_wait_for_idle(void);
+static void ips_hdcp_off(void);
+
+/**
+ * Description: read register for hdcp status
+ *
+ * Returns:	value of hdcp status register
+ */
+static uint32_t ips_hdcp_get_status(void)
+{
+	return hdmi_read32(MDFLD_HDCP_STATUS_REG);
+}
+
+/**
+ * Description: enable hdcp on hdmi port
+ *
+ * @enable	enable or disable hdcp on hdmi port
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_enable_port(bool enable)
+{
+	uint32_t hdmib_reg = hdmi_read32(MDFLD_HDMIB_CNTRL_REG);
+	if (enable)
+		hdmib_reg |= MDFLD_HDMIB_HDCP_PORT_SEL;
+	else
+		hdmib_reg &= ~MDFLD_HDMIB_HDCP_PORT_SEL;
+	hdmi_write32(MDFLD_HDMIB_CNTRL_REG, hdmib_reg);
+}
+
+/**
+ * Description: generate an for new authentication
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_capture_an(void)
+{
+	hdmi_write32(MDFLD_HDCP_INIT_REG, (uint32_t) jiffies);
+	hdmi_write32(MDFLD_HDCP_INIT_REG, (uint32_t) (jiffies >> 1));
+	hdmi_write32(MDFLD_HDCP_CONFIG_REG, HDCP_CAPTURE_AN);
+}
+
+/**
+ * Description: check if hdcp is enabled on hdmi port
+ *
+ * Returns:	true if enabled else false
+ */
+static bool ips_hdcp_is_hdcp_on(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.hdcp_on)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: check if an is ready for use
+ *
+ * Returns:	true if ready else false
+ */
+static bool ips_hdcp_is_an_ready(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.an_ready)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an		buffer to copy the an into
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_read_an(uint8_t *an)
+{
+	uint8_t i = 0;
+	struct double_word_t temp;
+	temp.value = 0;
+	temp.low = hdmi_read32(MDFLD_HDCP_AN_LOW_REG);
+	temp.high = hdmi_read32(MDFLD_HDCP_AN_HI_REG);
+	for (i = 0; i < HDCP_AN_SIZE; i++)
+		an[i] = temp.byte[i];
+}
+
+/**
+ * Description: write rx_ri into hdcp tx register
+ *
+ * @rx_ri	downstream device's ri value
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_write_rx_ri(uint16_t rx_ri)
+{
+	hdmi_write32(MDFLD_HDCP_RECEIVER_RI_REG, rx_ri);
+}
+
+/**
+ * Description: set config value in hdcp tx configuration register
+ *
+ * @val		value to be written into the configuration register's
+ *		config bits
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_set_config(int val)
+{
+	struct ips_hdcp_config_reg_t config;
+	config.value = hdmi_read32(MDFLD_HDCP_CONFIG_REG);
+	config.hdcp_config = val;
+	hdmi_write32(MDFLD_HDCP_CONFIG_REG, config.value);
+}
+
+/**
+ * Description: read hdcp tx config bits
+ *
+ * Returns:	hdcp tx configuration register's config bits
+ */
+static int ips_hdcp_get_config(void)
+{
+	struct ips_hdcp_config_reg_t config;
+	config.value = hdmi_read32(MDFLD_HDCP_CONFIG_REG);
+	return config.hdcp_config;
+}
+
+/**
+ * Description: check whether hdcp configuration is set to encrypting
+ *
+ * Returns:	true if set to encrypting else false
+ */
+static bool ips_hdcp_config_is_encrypting(void)
+{
+	if (ips_hdcp_get_config() == HDCP_AUTHENTICATE_AND_ENCRYPT)
+		return true;
+	return false;
+}
+
+/**
+ * Description: check whether hdcp is encrypting data
+ *
+ * Returns:	true if encrypting else false
+ */
+static bool ips_hdcp_is_encrypting(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.encrypting)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: get control bits of hdcp-tx repeater reguister
+ *
+ * Returns:	repeater control bits
+ */
+static uint8_t ips_hdcp_get_repeater_control(void)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	return repeater.control;
+}
+
+/**
+ * Description: set control bits of hdcp-tx repeater reguister
+ *
+ * @value	value of the control bits
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_set_repeater_control(int value)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	repeater.control = value;
+	hdmi_write32(MDFLD_HDCP_REP_REG, repeater.value);
+}
+
+/**
+ * Description: get status bits of hdcp-tx repeater reguister
+ *
+ * Returns:	repeater status bits
+ */
+static uint8_t ips_hdcp_get_repeater_status(void)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	return repeater.status;
+}
+
+/**
+ * Description: check the status of SHA1 match
+ *
+ * Returns:	0	on error
+ *		1	on match
+ *		-1	if busy
+ */
+static int ips_hdcp_repeater_v_match_check(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	switch (status) {
+	case HDCP_REPEATER_STATUS_COMPLETE_MATCH:
+		return 1;
+	case HDCP_REPEATER_STATUS_BUSY:
+		return -1;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * Description: check if repeater is busy
+ *
+ * Returns:	true if busy else false
+ */
+static bool ips_hdcp_repeater_is_busy(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	if (status == HDCP_REPEATER_STATUS_BUSY)
+		return true;
+	return false;
+}
+
+/**
+ * Description: check if repeater is ready for next data
+ *
+ * Returns:	true if ready else false
+ */
+static bool ips_hdcp_repeater_rdy_for_nxt_data(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	if (status == HDCP_REPEATER_STATUS_RDY_NEXT_DATA)
+		return true;
+	return false;
+}
+
+/**
+ * Description: check if repeater is idle
+ *
+ * Returns:	true if idle else false
+ */
+static bool ips_hdcp_repeater_is_idle(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	if (status == HDCP_REPEATER_STATUS_IDLE)
+		return true;
+	return false;
+}
+
+/**
+ * Description: wait for hdcp repeater to be ready for next data
+ *
+ * Returns:	true if ready else false
+ */
+static bool ips_hdcp_repeater_wait_for_next_data(void)
+{
+	uint16_t i = 0;
+	for (; i < HDCP_MAX_RETRY_STATUS; i++) {
+		if (ips_hdcp_repeater_rdy_for_nxt_data())
+			return true;
+	}
+	return false;
+}
+
+/**
+ * Description: wait for hdcp repeater to get into idle state
+ *
+ * Returns:	true if repeater is in idle state else false
+ */
+static bool ips_hdcp_repeater_wait_for_idle(void)
+{
+	uint16_t i = 0;
+	for (; i < HDCP_MAX_RETRY_STATUS; i++) {
+		if (ips_hdcp_repeater_is_idle())
+			return true;
+	}
+	return false;
+}
+
+/**
+ * Description: switch off hdcp by setting in the config register
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_off(void)
+{
+	ips_hdcp_set_config(HDCP_Off);
+}
+
+/**
+ * Description: check whether hdcp hardware is ready
+ *
+ * Returns:	true if ready else false
+ */
+bool ips_hdcp_is_ready(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.fus_success && status.fus_complete)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an	  buffer to return an in
+ *
+ * Returns:	true on succesful read else false
+ */
+void ips_hdcp_get_an(uint8_t *an)
+{
+	bool ret = false;
+	ips_hdcp_off();
+	ips_hdcp_capture_an();
+	do {
+		ret = ips_hdcp_is_an_ready();
+	} while  (ret == false);
+	ips_hdcp_read_an(an);
+}
+
+/**
+ * Description: read aksv from hdcp tx
+ *
+ * @aksv	buffer to return aksv
+ *
+ * Returns:	true on succesful read else false
+ */
+void ips_hdcp_get_aksv(uint8_t *aksv)
+{
+	static uint8_t save_aksv[HDCP_KSV_SIZE] = {0, 0, 0, 0, 0};
+	static bool aksv_read_once = false;
+	uint8_t i = 0;
+	struct double_word_t temp;
+	if (aksv_read_once == false) {
+		temp.value = 0;
+		temp.low = hdmi_read32(MDFLD_HDCP_AKSV_LOW_REG);
+		temp.high = hdmi_read32(MDFLD_HDCP_AKSV_HI_REG);
+		aksv_read_once = true;
+		for (i = 0; i < HDCP_KSV_SIZE; i++)
+			save_aksv[i] = temp.byte[i];
+	}
+	for (i = 0; i < HDCP_KSV_SIZE; i++)
+		aksv[i] = save_aksv[i];
+}
+
+/**
+ * Description: set downstream bksv in hdcp tx
+ *
+ * @bksv	bksv from downstream device
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ips_hdcp_set_bksv(uint8_t *bksv)
+{
+	uint8_t i = 0;
+	struct double_word_t temp;
+	if (bksv == NULL)
+		return false;
+	temp.value = 0;
+	for (i = 0; i < HDCP_KSV_SIZE; i++)
+		temp.byte[i] = bksv[i];
+
+	hdmi_write32(MDFLD_HDCP_BKSV_LOW_REG, temp.low);
+	hdmi_write32(MDFLD_HDCP_BKSV_HI_REG, temp.high);
+	return true;
+}
+
+/**
+ * Description: set repeater bit in hdcp tx if downstream is a repeater else
+ *		reset the bit
+ *
+ * @present	indicates whether downstream is repeater or not
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ips_hdcp_set_repeater(bool present)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	repeater.present = present;
+	hdmi_write32(MDFLD_HDCP_REP_REG, repeater.value);
+	/* delay for hardware change of repeater status */
+	msleep(1);
+	return true;
+}
+
+/**
+ * Description: start first stage of authentication by writing an aksv
+ *
+ * Returns:	true on succesfully starting authentication else false
+ */
+bool ips_hdcp_start_authentication(void)
+{
+	ips_hdcp_enable_port(true);
+	ips_hdcp_set_config(HDCP_AUTHENTICATE_AND_ENCRYPT);
+	return true;
+}
+
+/**
+ * Description: check if hdcp tx R0 is ready after starting authentication
+ *
+ * Returns:	true if r0 is ready else false
+ */
+bool ips_hdcp_is_r0_ready(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.ri_ready)
+		return true;
+	return false;
+}
+
+/**
+ * Description: Enable encryption once r0 matches
+ *
+ * Returns:	true on enabling encryption else false
+ */
+bool ips_hdcp_enable_encryption(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	uint32_t hdmib_reg = hdmi_read32(MDFLD_HDMIB_CNTRL_REG);
+	status.value = ips_hdcp_get_status();
+
+	if (ips_hdcp_is_hdcp_on() &&
+	    ips_hdcp_config_is_encrypting() &&
+	    status.ri_match &&
+	    (hdmib_reg & MDFLD_HDMIB_HDCP_PORT_SEL))
+		return true;
+	return false;
+}
+
+/**
+ * Description: check if hdcp tx & rx ri matches
+ *
+ * @rx_ri	ri of downstream device
+ *
+ * Returns:	true if ri matches else false
+ */
+bool ips_hdcp_does_ri_match(uint16_t rx_ri)
+{
+	struct ips_hdcp_status_reg_t status;
+
+	ips_hdcp_write_rx_ri(rx_ri);
+	status.value = ips_hdcp_get_status();
+	if (status.ri_match)
+		return true;
+	return false;
+}
+
+/**
+ * Description: compute v for repeater authentication
+ *
+ * @rep_ksv_list	 ksv list from downstream repeater
+ * @rep_ksv_list_entries number of entries in the ksv list
+ * @topology_data	bstatus value
+ *
+ * Returns:	true on successfully computing v else false
+ */
+bool ips_hdcp_compute_tx_v(uint8_t *rep_ksv_list,
+				   uint32_t rep_ksv_list_entries,
+				   uint16_t topology_data)
+{
+	bool ret = false;
+	const uint8_t BSTAT_M0_LEN = 18; /* 2 (bstatus) + 8 (M0) + 8 (length) */
+	const uint8_t BSTAT_M0 = 10; /* 2 (bstatus) + 8 (M0) */
+	const uint8_t M0 = 8; /* 8 (M0) */
+	uint32_t num_devices = rep_ksv_list_entries;
+	uint32_t lower_num_bytes_for_sha = 0, num_pad_bytes = 0, temp_data = 0;
+	uint32_t rem_text_data = 0, num_mo_bytes_left = M0, value = 0, i = 0;
+	uint8_t *buffer = NULL, *temp_buffer = NULL, *temp_data_ptr = NULL;
+	struct sqword_t buffer_len;
+
+	/* Clear SHA hash generator for new V calculation and
+	 * set the repeater to idle state
+	 */
+	hdmi_write32(MDFLD_HDCP_SHA1_IN, 0);
+
+	ips_hdcp_set_repeater_control(HDCP_REPEATER_CTRL_IDLE);
+	if (!ips_hdcp_repeater_wait_for_idle())
+		return false;
+
+	/* Start the SHA buffer creation to find the number of pad bytes */
+	num_pad_bytes = (64 - ((rep_ksv_list_entries * HDCP_KSV_SIZE)
+			 + BSTAT_M0_LEN)
+			 % 64);
+
+	/* Get the number of bytes for SHA */
+	lower_num_bytes_for_sha = (HDCP_KSV_SIZE * num_devices)
+				   + BSTAT_M0_LEN
+				   + num_pad_bytes; /* multiple of 64 bytes */
+
+	buffer = (uint8_t *)kzalloc(lower_num_bytes_for_sha, GFP_KERNEL);
+	if (!buffer)
+		return false;
+
+	/* 1. Copy the KSV buffer
+	 * Note: data is in little endian format
+	 */
+	temp_buffer = buffer;
+	memcpy((void *)temp_buffer, (void *)rep_ksv_list,
+		     num_devices * HDCP_KSV_SIZE);
+	temp_buffer += num_devices * HDCP_KSV_SIZE;
+
+	/* 2. Copy the topology_data
+	 */
+	memcpy((void *)temp_buffer, (void *)&topology_data, 2);
+	/* bstatus is copied in little endian format */
+	temp_buffer += 2;
+
+	/* 3. Offset the pointer buffer by 8 bytes
+	 * These 8 bytes are zeroed and are place holders for M0
+	 */
+	temp_buffer += 8;
+
+	/* 4. Pad the buffer with extra bytes
+	 *. The first padding byte must be 0x80 based on SHA1 message digest algorithm
+	 * HW automatically appends 0x80 while creating
+	 * the buffer if M0 is not 32-bit aligned
+	 * If M0 is 32-bit aligned we need to explicitly inject 0x80 to the buffer
+	 */
+	if (num_pad_bytes && ((num_devices * HDCP_KSV_SIZE + BSTAT_M0) % 4 == 0))
+		*temp_buffer = 0x80;
+	temp_buffer += num_pad_bytes;
+
+	/* 5. Construct the length byte */
+	buffer_len.quad_part = (unsigned long long)(rep_ksv_list_entries *
+				HDCP_KSV_SIZE + BSTAT_M0) * 8; /* in bits */
+	temp_data_ptr = (uint8_t *)&buffer_len.quad_part;
+	/* Store in big endian form, it is reversed to little endian
+	 * when fed to SHA1
+	 */
+	for (i = 1; i <= 8; i++) {
+		*temp_buffer = *(temp_data_ptr + 8 - i);
+		temp_buffer++;
+	}
+
+	/* 6. Write KSV's and bstatus into SHA */
+	temp_buffer = buffer;
+	for (i = 0; i < (HDCP_KSV_SIZE * num_devices + 2)/4; i++) {
+		ips_hdcp_set_repeater_control(HDCP_REPEATER_32BIT_TEXT_IP);
+
+		/* As per HDCP spec sample SHA is in little endian format.
+		 * But the data fed to the cipher needs to be in big endian
+		 * format for it to compute it correctly
+		 */
+		memcpy(&value, temp_buffer, 4);
+		value = HDCP_CONVERT_ENDIANNESS(value);
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, value);
+		temp_buffer += 4;
+
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+	}
+
+	/* 7. Write the remaining bstatus data and M0
+	 * Text input must be aligned to LSB of the SHA1
+	 * in register when inputting partial text and partial M0
+	 */
+	rem_text_data = (HDCP_KSV_SIZE * num_devices + 2) % 4;
+	if (rem_text_data) {
+		/* Update the number of M0 bytes */
+		num_mo_bytes_left = num_mo_bytes_left - (4-rem_text_data);
+
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+
+		switch (rem_text_data) {
+		case 1:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP);
+			break;
+		case 2:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP);
+			break;
+		case 3:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP);
+			break;
+		default:
+			goto exit;
+		}
+
+		memcpy(&value, temp_buffer, 4);
+
+		/* Swap the text data in big endian format leaving the M0 data
+		 * as it is. LSB should contain the data in big endian format.
+		 * Since the M0 specfic data is all zeros while it's fed to the
+		 * cipher, those bit don't need to be modified.
+		 */
+		temp_data = 0;
+		for (i = 0; i < rem_text_data; i++) {
+			temp_data |= ((value & 0xff << (i * 8)) >>
+					(i * 8)) <<
+					((rem_text_data - i - 1) * 8);
+		}
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, temp_data);
+		temp_buffer += 4;
+	}
+
+	/* write 4 bytes of M0 */
+	if (false == ips_hdcp_repeater_wait_for_next_data())
+		goto exit;
+
+	ips_hdcp_set_repeater_control(HDCP_REPEATER_32BIT_MO_IP);
+	hdmi_write32(MDFLD_HDCP_SHA1_IN, (uint32_t)temp_buffer);
+	temp_buffer += 4;
+	num_mo_bytes_left -= 4;
+
+	if (num_mo_bytes_left) {
+		/* The remaining M0 + padding bytes need to be added */
+		num_pad_bytes = num_pad_bytes - (4 - num_mo_bytes_left);
+
+		/* write 4 bytes of M0 */
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+		switch (num_mo_bytes_left) {
+		case 1:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP);
+			break;
+		case 2:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP);
+			break;
+		case 3:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP);
+			break;
+		case 4:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_32BIT_MO_IP);
+			break;
+		default:
+			/* should never happen */
+			goto exit;
+		}
+
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, (uint32_t)temp_buffer);
+		temp_buffer += 4;
+		num_mo_bytes_left = 0;
+	}
+
+	/* 8. Write the remaining padding bytes and length */
+	/* Remaining data = remaining padding data + 64 bits of length data */
+	rem_text_data = num_pad_bytes + 8;
+
+	if (rem_text_data % 4) {
+		/* Should not happen */
+		pr_debug("hdcp: compute_tx_v - data not aligned\n");
+		goto exit;
+	}
+
+	for (i = 0; i < rem_text_data / 4; i++) {
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+
+		ips_hdcp_set_repeater_control(HDCP_REPEATER_32BIT_TEXT_IP);
+		memcpy(&value, temp_buffer, 4);
+		/* Do the big endian conversion */
+		value = HDCP_CONVERT_ENDIANNESS(value);
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, value);
+		temp_buffer += 4;
+	}
+
+	/* Done */
+	ret = true;
+
+exit:
+	kfree(buffer);
+	return ret;
+}
+
+/**
+ * Description: compare hdcp tx & hdcp rx sha1 results
+ *
+ * @rep_prime_v sha1 value from downstream repeater
+ *
+ * Returns:	true if same else false
+ */
+bool ips_hdcp_compare_v(uint32_t *rep_prime_v)
+{
+	bool ret = false;
+	uint32_t i = 10, stat;
+
+	/* Load V' */
+	hdmi_write32(MDFLD_HDCP_VPRIME_H0, *rep_prime_v);
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H1, *(rep_prime_v + 1));
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H2, *(rep_prime_v + 2));
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H3, *(rep_prime_v + 3));
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H4, *(rep_prime_v + 4));
+
+	if (false == ips_hdcp_repeater_wait_for_next_data())
+		goto exit;
+
+	/* Set HDCP_REP to do the comparison, start
+	 * transmitter's V calculation
+	 */
+	ips_hdcp_set_repeater_control(HDCP_REPEATER_COMPLETE_SHA1);
+
+	msleep(5);
+	do {
+		stat = ips_hdcp_repeater_v_match_check();
+		if (1 == stat) {
+			ret = true; /* match */
+			break;
+		} else if (-1 == stat)
+			msleep(5); /* busy, retry */
+		else
+			break; /* mismatch */
+	} while (--i);
+
+exit:
+	return ret;
+}
+
+/**
+ * Description: disable hdcp
+ *
+ * Returns:	true on successfully disabling hdcp else false
+ */
+bool ips_hdcp_disable(void)
+{
+	ips_hdcp_off();
+	/* Set Rx_Ri to 0 */
+	ips_hdcp_write_rx_ri(0);
+	/* Set Repeater to Not Present */
+	ips_hdcp_set_repeater(false);
+	/* Disable HDCP on this Port */
+	/* ips_hdcp_enable_port(false); */
+	return true;
+}
+
+/**
+ * Description: initialize hdcp tx for authentication
+ *
+ * Returns:	true success else false
+ */
+bool ips_hdcp_init(void)
+{
+	return true;
+}
+
+/**
+ * Description: check whether hdcp tx can authenticate
+ *
+ * Returns:	true if device can authenticate else false
+ */
+bool ips_hdcp_device_can_authenticate(void)
+{
+	return true;
+}
+
+/**
+ * Description: get hardware frame count for cipher Ri update
+ *
+ * @count   framer count for cipher Ri update
+ *
+ * Returns: true if successful else false
+ */
+bool ips_hdcp_get_ri_frame_count(uint8_t *count)
+{
+	struct ips_hdcp_status_reg_t status;
+
+	status.value = ips_hdcp_get_status();
+	*count       = status.frame_count;
+
+	return true;
+}
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdmi.c
new file mode 100644
index 0000000..51f82f6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdmi.c
@@ -0,0 +1,499 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include "otm_hdmi.h"
+#include "hdmi_internal.h"
+#include "ips_hdmi.h"
+#include "mfld_hdmi_reg.h"
+#include "ipil_internal.h"
+#include "ipil_utils.h"
+
+/**
+ * Description: disable video infoframe
+ *
+ * @dev:        hdmi_device_t
+ * @type:       type of infoframe packet
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *              OTM_HDMI_ERR_INVAL on invalid packet type
+ *              OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ips_hdmi_disable_vid_infoframe(hdmi_device_t *dev,
+					unsigned int type)
+{
+	uint32_t vid_dip_ctl = 0;
+	uint32_t dip_type = 0;
+	uint32_t index = 0;
+
+	if (!dev)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	/* Enable Particular Packet Type */
+	switch (type) {
+	case HDMI_PACKET_AVI:
+		dip_type = IPS_HDMI_EN_DIP_TYPE_AVI;
+		index = IPS_HDMI_DIP_BUFF_INDX_AVI;
+		break;
+	case HDMI_PACKET_VS:
+		dip_type = IPS_HDMI_EN_DIP_TYPE_VS;
+		index = IPS_HDMI_DIP_BUFF_INDX_VS;
+		break;
+	case HDMI_PACKET_SPD:
+		dip_type = IPS_HDMI_EN_DIP_TYPE_SPD;
+		index = IPS_HDMI_DIP_BUFF_INDX_SPD;
+		break;
+	default:
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* Disable DIP type & set the buffer index & reset access address */
+	vid_dip_ctl = hdmi_read32(IPS_HDMI_VID_DIP_CTL_ADDR);
+	vid_dip_ctl &= ~(dip_type |
+			IPS_HDMI_DIP_BUFF_INDX_MASK |
+			IPS_HDMI_DIP_ACCESS_ADDR_MASK |
+			IPS_HDMI_DIP_TRANSMISSION_FREQ_MASK);
+	vid_dip_ctl |= (index |
+			IPS_HDMI_VID_PORT_B_SELECT |
+			IPS_HDMI_VID_EN_DIP);
+	hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, vid_dip_ctl);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: enable video infoframe
+ *
+ * @dev:        hdmi_device_t
+ * @type:       type of infoframe packet
+ * @pkt:        infoframe packet data
+ * @freq:       number of times packet needs to be sent
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *              OTM_HDMI_ERR_INVAL on invalid packet type
+ *              OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ips_hdmi_enable_vid_infoframe(hdmi_device_t *dev,
+		unsigned int type, otm_hdmi_packet_t *pkt, unsigned int freq)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	uint32_t vid_dip_ctl = 0;
+	uint32_t index = 0;
+	uint32_t dip_type = 0;
+	uint32_t dip_data = 0;
+
+	if (!dev || !pkt)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	if (freq > HDMI_DIP_SEND_ATLEAST_EVERY_OTHER_VSYNC)
+		return OTM_HDMI_ERR_INVAL;
+
+	rc = ips_hdmi_disable_vid_infoframe(dev, type);
+	if (rc != OTM_HDMI_SUCCESS)
+		return rc;
+
+	/* Add delay for any Pending transmissions ~ 2 VSync + 3 HSync */
+	msleep_interruptible(32 + 8);
+
+	/* Enable Particular Packet Type */
+	switch (type) {
+	case HDMI_PACKET_AVI:
+		dip_type = IPS_HDMI_EN_DIP_TYPE_AVI;
+		index = IPS_HDMI_DIP_BUFF_INDX_AVI;
+		break;
+	case HDMI_PACKET_VS:
+		dip_type = IPS_HDMI_EN_DIP_TYPE_VS;
+		index = IPS_HDMI_DIP_BUFF_INDX_VS;
+		break;
+	case HDMI_PACKET_SPD:
+		dip_type = IPS_HDMI_EN_DIP_TYPE_SPD;
+		index = IPS_HDMI_DIP_BUFF_INDX_SPD;
+		break;
+	default:
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* Disable DIP type & set the buffer index & reset access address */
+	vid_dip_ctl = hdmi_read32(IPS_HDMI_VID_DIP_CTL_ADDR);
+	vid_dip_ctl &= ~(dip_type |
+			IPS_HDMI_DIP_BUFF_INDX_MASK |
+			IPS_HDMI_DIP_ACCESS_ADDR_MASK |
+			IPS_HDMI_DIP_TRANSMISSION_FREQ_MASK);
+	vid_dip_ctl |= (index |
+			IPS_HDMI_VID_PORT_B_SELECT |
+			IPS_HDMI_VID_EN_DIP);
+	hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, vid_dip_ctl);
+
+	/* Write Packet Data */
+	dip_data = 0;
+	dip_data = (pkt->header[0] << 0) |
+		   (pkt->header[1] << 8) |
+		   (pkt->header[2] << 16);
+	hdmi_write32(IPS_HDMI_VIDEO_DIP_DATA_ADDR, dip_data);
+
+	for (index = 0; index < (HDMI_DIP_PACKET_DATA_LEN/4); index++) {
+		dip_data = pkt->data32[index];
+		hdmi_write32(IPS_HDMI_VIDEO_DIP_DATA_ADDR, dip_data);
+	}
+
+	/* Enable Packet Type & Transmission Frequency */
+	vid_dip_ctl = hdmi_read32(IPS_HDMI_VID_DIP_CTL_ADDR);
+	vid_dip_ctl &= ~IPS_HDMI_VID_PORT_SELECT_MASK;
+	vid_dip_ctl |= (IPS_HDMI_VID_PORT_B_SELECT | IPS_HDMI_VID_EN_DIP);
+	vid_dip_ctl |= (dip_type | (freq << IPS_HDMI_DIP_TX_FREQ_SHIFT));
+	pr_debug("vid_dip_ctl %x\n", vid_dip_ctl);
+	hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, vid_dip_ctl);
+
+	return rc;
+}
+
+/**
+ * Description: disable all infoframes
+ *
+ * @dev:	hdmi_device
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t ips_hdmi_disable_all_infoframes(hdmi_device_t *dev)
+{
+	if (!dev)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	/* Disable Video Related Infoframes */
+	hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, 0x0);
+	/* Disable Audio Related Infoframes */
+	hdmi_write32(IPS_HDMI_AUD_DIP_CTL_ADDR, 0x0);
+
+	/* TODO: Disable other infoframes? */
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: save HDMI display registers
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_hdmi_save_display_registers(hdmi_device_t *dev)
+{
+	int i;
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+
+	dev->reg_state.saveDPLL = hdmi_read32(IPS_DPLL_B);
+	dev->reg_state.saveFPA0 = hdmi_read32(IPS_DPLL_DIV0);
+	dev->reg_state.savePIPEBCONF = hdmi_read32(IPS_PIPEBCONF);
+	dev->reg_state.saveHTOTAL_B = hdmi_read32(IPS_HTOTAL_B);
+	dev->reg_state.saveHBLANK_B = hdmi_read32(IPS_HBLANK_B);
+	dev->reg_state.saveHSYNC_B = hdmi_read32(IPS_HSYNC_B);
+	dev->reg_state.saveVTOTAL_B = hdmi_read32(IPS_VTOTAL_B);
+	dev->reg_state.saveVBLANK_B = hdmi_read32(IPS_VBLANK_B);
+	dev->reg_state.saveVSYNC_B = hdmi_read32(IPS_VSYNC_B);
+	dev->reg_state.savePIPEBSRC = hdmi_read32(IPS_PIPEBSRC);
+	dev->reg_state.saveDSPBSTRIDE = hdmi_read32(IPS_DSPBSTRIDE);
+	dev->reg_state.saveDSPBLINOFF = hdmi_read32(IPS_DSPBLINOFF);
+	dev->reg_state.saveDSPBTILEOFF = hdmi_read32(IPS_DSPBTILEOFF);
+	dev->reg_state.saveDSPBSIZE = hdmi_read32(IPS_DSPBSIZE);
+	dev->reg_state.saveDSPBPOS = hdmi_read32(IPS_DSPBPOS);
+	dev->reg_state.saveDSPBSURF = hdmi_read32(IPS_DSPBSURF);
+	dev->reg_state.saveDSPBCNTR = hdmi_read32(IPS_DSPBCNTR);
+	dev->reg_state.saveDSPBSTATUS = hdmi_read32(IPS_DSPBSTAT);
+
+	dev->reg_state.savePFIT_CONTROL = hdmi_read32(IPS_PFIT_CONTROL);
+	dev->reg_state.savePFIT_PGM_RATIOS = hdmi_read32(IPS_PFIT_PGM_RATIOS);
+	dev->reg_state.saveHDMIPHYMISCCTL = hdmi_read32(IPS_HDMIPHYMISCCTL);
+	dev->reg_state.saveHDMIB_CONTROL = hdmi_read32(IPS_HDMIB_CONTROL);
+	dev->reg_state.saveHDMIB_DATALANES = hdmi_read32(IPS_HDMIB_LANES02);
+
+	/*save palette (gamma) */
+	for (i = 0; i < 256; i++) {
+		dev->reg_state.save_palette_b[i] =
+			hdmi_read32(IPS_PALETTE_B + (i<<2));
+		udelay(2);
+	}
+
+	dev->reg_state.valid = true;
+}
+
+/**
+ * Description:	saves HDMI data island packets
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_hdmi_save_data_island(hdmi_device_t *dev)
+{
+	uint32_t index = 0;
+	uint32_t reg_val = 0;
+
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+
+	/* Save AVI Infoframe Data */
+	reg_val = hdmi_read32(IPS_HDMI_VID_DIP_CTL_ADDR);
+	if ((reg_val & IPS_HDMI_VID_EN_DIP) &&
+	    (reg_val & IPS_HDMI_EN_DIP_TYPE_AVI)) {
+		/* set DIP buffer index to AVI */
+		reg_val &= ~IPS_HDMI_DIP_BUFF_INDX_MASK;
+		reg_val |= IPS_HDMI_DIP_BUFF_INDX_AVI;
+		hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, reg_val);
+		/* set DIP RAM Access Address to 0 */
+		reg_val &= ~IPS_HDMI_DIP_ACCESS_ADDR_MASK;
+		hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, reg_val);
+		reg_val = hdmi_read32(IPS_HDMI_VID_DIP_CTL_ADDR);
+		/* copy transmission frequency */
+		dev->avi.freq =
+			((reg_val & IPS_HDMI_DIP_TRANSMISSION_FREQ_MASK) >>
+				IPS_HDMI_DIP_TX_FREQ_SHIFT);
+		/* copy header */
+		reg_val = hdmi_read32(IPS_HDMI_VIDEO_DIP_DATA_ADDR);
+		dev->avi.pkt.header[0] = reg_val & 0xFF;
+		dev->avi.pkt.header[1] = (reg_val >> 8) & 0xFF;
+		dev->avi.pkt.header[2] = (reg_val >> 16) & 0xFF;
+		/* copy data */
+		for (index = 0; index < (HDMI_DIP_PACKET_DATA_LEN/4); index++) {
+			dev->avi.pkt.data32[index] =
+				hdmi_read32(IPS_HDMI_VIDEO_DIP_DATA_ADDR);
+		}
+		/* set data valid */
+		dev->avi.valid = true;
+	} else
+		dev->avi.valid = false;
+}
+
+/**
+ * Description:	get vic data from data island packets
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	vic
+ */
+uint8_t ips_hdmi_get_vic_from_data_island(hdmi_device_t *dev)
+{
+	uint8_t vic = 0;
+	uint32_t index = 0;
+	uint32_t reg_val = 0;
+	uint8_t data[HDMI_DIP_PACKET_DATA_LEN];
+	uint32_t * data32 = (uint32_t *) (&data[0]);
+
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return 0;
+	}
+
+	/* Save AVI Infoframe Data */
+	reg_val = hdmi_read32(IPS_HDMI_VID_DIP_CTL_ADDR);
+	if (reg_val & IPS_HDMI_EN_DIP_TYPE_AVI) {
+		/* set DIP buffer index to AVI */
+		reg_val &= ~IPS_HDMI_DIP_BUFF_INDX_MASK;
+		reg_val |= IPS_HDMI_DIP_BUFF_INDX_AVI;
+		hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, reg_val);
+		/* set DIP RAM Access Address to 0 */
+		reg_val &= ~IPS_HDMI_DIP_ACCESS_ADDR_MASK;
+		hdmi_write32(IPS_HDMI_VID_DIP_CTL_ADDR, reg_val);
+
+		/* copy header */
+		reg_val = hdmi_read32(IPS_HDMI_VIDEO_DIP_DATA_ADDR);
+
+		/* copy data */
+		for (index = 0; index < (HDMI_DIP_PACKET_DATA_LEN/4); index++) {
+			data32[index] =
+				hdmi_read32(IPS_HDMI_VIDEO_DIP_DATA_ADDR);
+		}
+		/* set data valid */
+		vic = data[AVI_VIC_LOC];
+	}
+
+	return vic;
+}
+
+
+/**
+ * Description: disable HDMI display
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_disable_hdmi(hdmi_device_t *dev)
+{
+	int count = 0;
+	u32 temp;
+
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+
+	/* Disable display plane */
+	temp = hdmi_read32(IPS_DSPBCNTR);
+	if ((temp & IPIL_DSP_PLANE_ENABLE) != 0) {
+		hdmi_write32(IPS_DSPBCNTR, temp & ~IPIL_DSP_PLANE_ENABLE);
+		/* Flush the plane changes */
+		hdmi_write32(IPS_DSPBSURF, hdmi_read32(IPS_DSPBSURF));
+		hdmi_read32(IPS_DSPBSURF);
+	}
+
+	/* Next, disable display pipes */
+	temp = hdmi_read32(IPS_PIPEBCONF);
+	if ((temp & IPIL_PIPEACONF_ENABLE) != 0) {
+		temp &= ~IPIL_PIPEACONF_ENABLE;
+		temp |= IPIL_PIPECONF_PLANE_OFF | IPIL_PIPECONF_CURSOR_OFF;
+		hdmi_write32(IPS_PIPEBCONF, temp);
+		hdmi_read32(IPS_PIPEBCONF);
+
+		/* Wait for for the pipe disable to take effect. */
+		for (count = 0; count < 1000; count++) {
+			temp = hdmi_read32(IPS_PIPEBCONF);
+			if (!(temp & IPIL_PIPEACONF_PIPE_STATE))
+				break;
+
+			udelay(20);
+		}
+	}
+
+}
+
+/**
+ * Description: restore HDMI data island packets
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_hdmi_restore_data_island(hdmi_device_t *dev)
+{
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+
+	/* restore AVI infoframe */
+	if (dev->avi.valid) {
+		if (ips_hdmi_enable_vid_infoframe(dev, HDMI_PACKET_AVI,
+				&dev->avi.pkt, dev->avi.freq) !=
+				OTM_HDMI_SUCCESS)
+			pr_debug("\nfailed to program avi infoframe\n");
+		dev->avi.valid = false;
+	}
+}
+
+/**
+ * Description: destroys any saved HDMI data
+ *
+ * @dev:        hdmi_device_t
+ *
+ * Returns: none
+ */
+void ips_hdmi_destroy_saved_data(hdmi_device_t *dev)
+{
+	if (NULL != dev) {
+		dev->reg_state.valid = false;
+		dev->avi.valid = false;
+	}
+}
+
+/**
+ * Description: disable all planes on pipe
+ *
+ * @pipe:    pipe ID
+ * @enable : true to enable planes; false to disable planes
+ *
+ */
+void ips_enable_planes_on_pipe(uint32_t pipe, bool enable)
+{
+	uint32_t  temp;
+
+	/* Currently, only handle planes on pipe B */
+	switch (pipe) {
+	case 1:
+		temp = hdmi_read32(IPS_PIPEBCONF);
+
+		if (enable)
+			temp &= ~IPIL_PIPECONF_PLANE_OFF;
+		else
+			temp |= IPIL_PIPECONF_PLANE_OFF;
+
+		hdmi_write32(IPS_PIPEBCONF, temp);
+
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdmi_priv.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdmi_priv.c
new file mode 100644
index 0000000..4cd3bee
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/ips_hdmi_priv.c
@@ -0,0 +1,452 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include "otm_hdmi.h"
+#include "hdmi_internal.h"
+#include "ips_hdmi.h"
+#include "mfld_hdmi_reg.h"
+#include "ipil_internal.h"
+#include "ipil_utils.h"
+
+#define IPS_MIN_PIXEL_CLOCK 25174	/* 640x480@59.94Hz */
+#define IPS_MAX_PIXEL_CLOCK 74250	/* 1920x1080@30Hz */
+#define IPS_PREFERRED_HDISPLAY 1920
+#define IPS_PREFERRED_VDISPLAY 1080
+#define IPS_PREFERRED_REFRESH_RATE 30
+
+/* Clock Related Definitions
+ * Min/Max value based on DPLL parameter interface table
+ * from Penwell Display HAS
+ */
+#define IPS_DOT_MIN		19750
+#define IPS_DOT_MAX		120000
+
+#define IPS_DPLL_M_MIN_19	105
+#define IPS_DPLL_M_MAX_19	197
+#define IPS_DPLL_P1_MIN_19	2
+#define IPS_DPLL_P1_MAX_19	10
+#define IPS_LIMIT_DPLL_19	0
+#define IPS_VCO_SEL		(1 << 16)
+
+#define IPS_M_MIN		21
+
+static const struct ips_clock_limits_t ips_clock_limits[] = {
+	{	/* CRYSTAL_19 */
+	 .dot = {.min = IPS_DOT_MIN, .max = IPS_DOT_MAX},
+	 .m = {.min = IPS_DPLL_M_MIN_19, .max = IPS_DPLL_M_MAX_19},
+	 .p1 = {.min = IPS_DPLL_P1_MIN_19, .max = IPS_DPLL_P1_MAX_19},
+	 },
+};
+
+static const u32 ips_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+	224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,   /* 31 - 40 */
+	388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+	83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+	341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+	253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+	478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+	477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+	210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+	145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+	380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+	103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+	396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+	114, 313, 156, 334, 423, 467, 489, 244, 378, 445, /*181 - 190 */
+	222, 367, 183, 91, 45, 22, 11, 261, 130, 321, /* 191 - 200 */
+};
+
+/**
+ * Description: get pixel clock range
+ *
+ * @pc_min:	minimum pixel clock
+ * @pc_max:	maximum pixel clock
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_FAILED on NULL input arguments.
+ */
+otm_hdmi_ret_t ips_get_pixel_clock_range(unsigned int *pc_min,
+						unsigned int *pc_max)
+{
+	if (!pc_min || !pc_max)
+		return OTM_HDMI_ERR_FAILED;
+
+	*pc_min = IPS_MIN_PIXEL_CLOCK;
+	*pc_max = IPS_MAX_PIXEL_CLOCK;
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool ips_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh)
+{
+	if (hdisplay == IPS_PREFERRED_HDISPLAY &&
+		vdisplay == IPS_PREFERRED_VDISPLAY &&
+		refresh == IPS_PREFERRED_REFRESH_RATE)
+		return true;
+	else
+		return false;
+}
+
+/*
+ * Derive the pixel clock for the given refclk and
+ * divisors for 8xx chips.
+ */
+static void __ips_hdmi_derive_dot_clock(int refclk, struct ips_clock_t *clock)
+{
+	clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+static const struct ips_clock_limits_t *__ips_hdmi_clk_limits(void)
+{
+	const struct ips_clock_limits_t *limit = NULL;
+
+	/*
+	 * CRYSTAL_19 is enabled for medfield.
+	 * Expand this logic for other types.
+	 */
+	limit = &ips_clock_limits[IPS_LIMIT_DPLL_19];
+	return limit;
+}
+
+static bool __ips_hdmi_find_bestPll(int target, int refclk,
+					struct ips_clock_t *best_clock)
+{
+	struct ips_clock_t clock;
+	const struct ips_clock_limits_t *limit = __ips_hdmi_clk_limits();
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+		     clock.p1++) {
+			int this_err;
+
+			__ips_hdmi_derive_dot_clock(refclk, &clock);
+
+			this_err = abs(clock.dot - target);
+			if (this_err < err) {
+				*best_clock = clock;
+				err = this_err;
+			}
+		}
+	}
+	return err != target;
+}
+
+/**
+ * Description: gets the best dpll clock value based on
+ *		current timing mode clock.
+ *
+ * @clk:	refresh rate dot clock in kHz of current mode
+ * @pdpll, pfp:	will be set to adjusted dpll values.
+ * @pclock_khz:	tmds clk value for the best pll and is needed for audio.
+ *		This field has to be moved into OTM audio
+ *		interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments.
+ */
+otm_hdmi_ret_t __ips_hdmi_get_adjusted_clk(unsigned long clk,
+					u32 *pdpll, u32 *pfp,
+					uint32_t *pclock_khz)
+{
+	int refclk;
+	int clk_n;
+	int clk_p2;
+	int clk_byte = 1;
+	int m_conv = 0;
+	int clk_tmp;
+	u32 dpll, fp;
+	bool ret;
+	struct ips_clock_t clock;
+
+	/* NULL checks */
+	if (pdpll == NULL || pfp == NULL || pclock_khz == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* values corresponds to CRYSTAL_19, as this is enabled on mdfld */
+	refclk = 19200;
+	clk_n = 1;
+	clk_p2 = 10;
+
+	clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+	ret = __ips_hdmi_find_bestPll(clk_tmp, refclk, &clock);
+	/*
+	 * TODO: tmds clk value for the best pll found and is needed for audio.
+	 * This field has to be moved into OTM audio interfaces
+	 * when implemented.
+	 */
+	*pclock_khz = clock.dot / (clk_n * clk_p2 * clk_byte);
+	if (ret)
+		m_conv = ips_m_converts[(clock.m - IPS_M_MIN)];
+
+	dpll = 0;
+	dpll |= IPS_VCO_SEL;
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (clock.p1 - 2)) << 17;
+
+	fp = (clk_n / 2) << 16;
+	fp |= m_conv;
+
+	/* update the pointers */
+	*pdpll = dpll;
+	*pfp = fp;
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: gets dpll clocks
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_get_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	pr_debug("enter %s\n", __func__);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_set_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	u32 dpll_adj, fp;
+	u32 dpll;
+	int timeout = 0;
+
+	/* NULL checks */
+	if (dev == NULL) {
+		pr_debug("\ninvalid argument\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	rc = __ips_hdmi_get_adjusted_clk(dclk, &dpll_adj, &fp, &dev->clock_khz);
+	dpll = hdmi_read32(IPIL_DPLL_B);
+	if (dpll & IPIL_DPLL_VCO_ENABLE) {
+		dpll &= ~IPIL_DPLL_VCO_ENABLE;
+		hdmi_write32(IPIL_DPLL_B, dpll);
+		hdmi_read32(IPIL_DPLL_B);
+
+		/* reset M1, N1 & P1 */
+		hdmi_write32(IPIL_DPLL_DIV0, 0);
+		dpll &= ~IPIL_P1_MASK;
+		hdmi_write32(IPIL_DPLL_B, dpll);
+	}
+
+	/*
+	 * When ungating power of DPLL, needs to wait 0.5us
+	 * before enable the VCO
+	 */
+	if (dpll & IPIL_PWR_GATE_EN) {
+		dpll &= ~IPIL_PWR_GATE_EN;
+		hdmi_write32(IPIL_DPLL_B, dpll);
+		udelay(1);
+	}
+
+	dpll = dpll_adj;
+	hdmi_write32(IPIL_DPLL_DIV0, fp);
+	hdmi_write32(IPIL_DPLL_B, dpll);
+	udelay(1);
+
+	dpll |= IPIL_DPLL_VCO_ENABLE;
+	hdmi_write32(IPIL_DPLL_B, dpll);
+	hdmi_read32(IPIL_DPLL_B);
+
+	/* wait for DSI PLL to lock */
+	while ((timeout < 20000) && !(hdmi_read32(IPIL_PIPEBCONF) &
+					IPIL_PIPECONF_PLL_LOCK)) {
+		udelay(150);
+		timeout++;
+	}
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Description: restore HDMI display registers and enable display
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_hdmi_restore_and_enable_display(hdmi_device_t *dev)
+{
+	int i;
+	u32 dpll = 0;
+	u32 dpll_val;
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+	if (dev->reg_state.valid == false) {
+		pr_debug("\nhdmi no data to restore\n");
+		return;
+	}
+
+	/*make sure VGA plane is off. it initializes to on after reset!*/
+	hdmi_write32(IPIL_VGACNTRL, IPIL_VGA_DISP_DISABLE);
+
+	dpll = hdmi_read32(IPS_DPLL_B);
+	if (!(dpll & IPIL_DPLL_VCO_ENABLE)) {
+		/**
+		 * When ungating power of DPLL, needs to wait 0.5us
+		 * before enable the VCO
+		 */
+		if (dpll & IPIL_DPLL_PWR_GATE_EN) {
+			dpll &= ~IPIL_DPLL_PWR_GATE_EN;
+			hdmi_write32(IPS_DPLL_B, dpll);
+			udelay(1);
+		}
+
+		hdmi_write32(IPS_DPLL_DIV0, dev->reg_state.saveFPA0);
+
+		dpll_val = dev->reg_state.saveDPLL & ~IPIL_DPLL_VCO_ENABLE;
+		hdmi_write32(IPS_DPLL_B, dpll_val);
+		udelay(1);
+
+		dpll_val |= IPIL_DPLL_VCO_ENABLE;
+		hdmi_write32(IPS_DPLL_B, dpll_val);
+		hdmi_read32(IPS_DPLL_B);
+
+	}
+
+	/* Restore mode */
+	hdmi_write32(IPS_HTOTAL_B, dev->reg_state.saveHTOTAL_B);
+	hdmi_write32(IPS_HBLANK_B, dev->reg_state.saveHBLANK_B);
+	hdmi_write32(IPS_HSYNC_B, dev->reg_state.saveHSYNC_B);
+	hdmi_write32(IPS_VTOTAL_B, dev->reg_state.saveVTOTAL_B);
+	hdmi_write32(IPS_VBLANK_B, dev->reg_state.saveVBLANK_B);
+	hdmi_write32(IPS_VSYNC_B, dev->reg_state.saveVSYNC_B);
+	hdmi_write32(IPS_PIPEBSRC, dev->reg_state.savePIPEBSRC);
+	hdmi_write32(IPS_DSPBSTAT, dev->reg_state.saveDSPBSTATUS);
+
+	/*set up the plane*/
+	hdmi_write32(IPS_DSPBSTRIDE, dev->reg_state.saveDSPBSTRIDE);
+	hdmi_write32(IPS_DSPBLINOFF, dev->reg_state.saveDSPBLINOFF);
+	hdmi_write32(IPS_DSPBTILEOFF, dev->reg_state.saveDSPBTILEOFF);
+	hdmi_write32(IPS_DSPBSIZE, dev->reg_state.saveDSPBSIZE);
+	hdmi_write32(IPS_DSPBPOS, dev->reg_state.saveDSPBPOS);
+	hdmi_write32(IPS_DSPBSURF, dev->reg_state.saveDSPBSURF);
+
+	hdmi_write32(IPS_PFIT_CONTROL, dev->reg_state.savePFIT_CONTROL);
+	hdmi_write32(IPS_PFIT_PGM_RATIOS, dev->reg_state.savePFIT_PGM_RATIOS);
+	hdmi_write32(IPS_HDMIPHYMISCCTL, dev->reg_state.saveHDMIPHYMISCCTL);
+	hdmi_write32(IPS_HDMIB_CONTROL, dev->reg_state.saveHDMIB_CONTROL);
+
+	/*enable the plane*/
+	hdmi_write32(IPS_DSPBCNTR, dev->reg_state.saveDSPBCNTR);
+	hdmi_write32(IPS_HDMIB_LANES02, dev->reg_state.saveHDMIB_DATALANES);
+	hdmi_write32(IPS_HDMIB_LANES3, dev->reg_state.saveHDMIB_DATALANES);
+
+	if (in_atomic() || in_interrupt()) {
+		/*  udelay arg must be < 20000 */
+		udelay(19999);
+	} else
+		msleep_interruptible(20);
+
+	/*enable the pipe */
+	hdmi_write32(IPS_PIPEBCONF, dev->reg_state.savePIPEBCONF);
+
+	/* restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		hdmi_write32(IPS_PALETTE_B + (i<<2),
+				dev->reg_state.save_palette_b[i]);
+
+	dev->reg_state.valid = false;
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/mfld_hdcp_reg.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/mfld_hdcp_reg.h
new file mode 100644
index 0000000..c1a93cb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/mfld_hdcp_reg.h
@@ -0,0 +1,206 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef MFLD_HDCP_REG_H
+#define MFLD_HDCP_REG_H
+
+
+/* Register Definitions */
+#define MDFLD_HDMIB_CNTRL_REG		0x61140
+#define MDFLD_HDMIB_HDCP_PORT_SEL	(0x1 << 5)
+
+/* HDCP Config & Status */
+#define MDFLD_HDCP_CONFIG_REG		0x61400
+#define MDFLD_HDCP_STATUS_REG		0x61448
+/* AN & AKSV */
+#define MDFLD_HDCP_INIT_REG		0x61404
+#define MDFLD_HDCP_AN_LOW_REG		0x61410
+#define MDFLD_HDCP_AN_HI_REG		0x61414
+#define MDFLD_HDCP_AKSV_HI_REG		0x61450
+#define MDFLD_HDCP_AKSV_LOW_REG		0x61454
+/* BKSV */
+#define MDFLD_HDCP_BKSV_LOW_REG		0x61408
+#define MDFLD_HDCP_BKSV_HI_REG		0x6140C
+/* Rx-Ri */
+#define MDFLD_HDCP_RECEIVER_RI_REG	0x61418
+/* Repeater Control & Status */
+#define MDFLD_HDCP_REP_REG		0x61444
+/* Repeater SHA */
+#define MDFLD_HDCP_VPRIME_H0		0x6142C
+#define MDFLD_HDCP_VPRIME_H1		0x61430
+#define MDFLD_HDCP_VPRIME_H2		0x61434
+#define MDFLD_HDCP_VPRIME_H3		0x61438
+#define MDFLD_HDCP_VPRIME_H4		0x6143C
+#define MDFLD_HDCP_SHA1_IN		0x61440
+/* Akey */
+#define MDFLD_HDCP_AKEY_LO_REG		0x6141C
+#define MDFLD_HDCP_AKEY_MED_REG		0x61420
+#define MDFLD_HDCP_AKEY_HI_REG		0x61424
+
+#define HDCP_CONVERT_ENDIANNESS(x)	(((x&0x000000ff)<<24)|\
+					((x&0x0000ff00)<<8)|\
+					((x&0x00ff0000)>>8)|\
+					((x&0xff000000)>>24))
+
+struct double_word_t {
+	union {
+		uint64_t value;
+		struct {
+			uint32_t low;
+			uint32_t high;
+		};
+		struct {
+			uint8_t byte[8];
+		};
+	};
+};
+
+struct sqword_t {
+	union {
+		unsigned long long quad_part;
+		struct {
+			unsigned long low_part;
+			unsigned long high_part;
+		} u;
+		struct {
+			uint8_t byte[8];
+		};
+	};
+};
+
+enum ips_hdcp_config_enum {
+	HDCP_Off = 0,
+	HDCP_CAPTURE_AN = 1,
+	HDCP_DECRYPT_KEYS = 2,
+	HDCP_AUTHENTICATE_AND_ENCRYPT = 3,
+	HDCP_PULL_FUSE = 4,
+	HDCP_UNIQUE_MCH_ID = 5,
+	HDCP_ENCRYPT_KEYS = 6,
+	HDCP_CYPHER_CHECK_MODE = 7,
+	HDCP_FUSE_PULL_ENABLE = 0x20
+};
+
+struct ips_hdcp_config_reg_t {
+		union {
+		uint32_t value;
+		struct {
+			uint32_t hdcp_config:3;
+			uint32_t reserved:29;
+		};
+	};
+};
+
+struct ips_hdcp_status_reg_t {
+	union {
+		uint32_t value;
+		struct {
+			uint32_t ainfo:8;
+			uint32_t frame_count:8;
+			uint32_t hdcp_on:1;
+			uint32_t an_ready:1;
+			uint32_t ri_ready:1;
+			uint32_t ri_match:1;
+			uint32_t encrypting:1;
+			uint32_t ready_for_encr:1;
+			uint32_t umch_id_ready:1;
+			uint32_t mac_status:1;
+			uint32_t fus_complete:1;
+			uint32_t fus_success:1;
+			uint32_t reserved:6;
+		};
+	};
+};
+
+/* Repeater Control register */
+enum ips_hdcp_repeater_status_enum {
+	HDCP_REPEATER_STATUS_IDLE = 0,
+	HDCP_REPEATER_STATUS_BUSY = 1,
+	HDCP_REPEATER_STATUS_RDY_NEXT_DATA = 2,
+	HDCP_REPEATER_STATUS_COMPLETE_NO_MATCH = 4,
+	HDCP_REPEATER_STATUS_COMPLETE_MATCH = 12
+};
+
+enum ips_hdcp_repeater_ctrl_enum {
+	HDCP_REPEATER_CTRL_IDLE = 0,
+	HDCP_REPEATER_32BIT_TEXT_IP = 1,
+	HDCP_REPEATER_COMPLETE_SHA1 = 2,
+	HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP = 4,
+	HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP = 5,
+	HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP = 6,
+	HDCP_REPEATER_32BIT_MO_IP = 7
+};
+
+struct ips_hdcp_repeater_reg_t {
+	union {
+		uint32_t value;
+		struct {
+			uint32_t present:1;
+			uint32_t control:3;
+			uint32_t reserved1:12;
+			const uint32_t status:4;
+			uint32_t reserved2:12;
+		};
+	};
+};
+
+#endif /* MFLD_HDCP_REG_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/mfld_hdmi_reg.h b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/mfld_hdmi_reg.h
new file mode 100644
index 0000000..9a71bcd
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mfld/mfld_hdmi_reg.h
@@ -0,0 +1,99 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef __MFLD_HDMI_REG_H
+#define __MFLD_HDMI_REG_H
+
+/* Video Data Island Packet Control */
+#define IPS_HDMI_VID_DIP_CTL_ADDR		(0x61170)
+
+#define IPS_HDMI_VID_EN_DIP			((1) << 31)
+#define IPS_HDMI_VID_PORT_SELECT_MASK		((0x3) << 29)
+#define IPS_HDMI_VID_PORT_B_SELECT		((1) << 29)
+
+/* Video DIP Type Values Bits 24:21 */
+#define IPS_HDMI_EN_DIP_TYPE_MASK		((0xF) << 21)
+#define IPS_HDMI_EN_DIP_TYPE_AVI		((1) << 21)
+#define IPS_HDMI_EN_DIP_TYPE_VS			((1) << 22)
+#define IPS_HDMI_EN_DIP_TYPE_SPD		((1) << 24)
+
+/* Video DIP Type Buffer Index Bits 20:19 */
+#define IPS_HDMI_DIP_BUFF_INDX_MASK		((0x3) << 19)
+#define IPS_HDMI_DIP_BUFF_INDX_AVI		((0x0) << 19)
+#define IPS_HDMI_DIP_BUFF_INDX_VS		((0x1) << 19)
+#define IPS_HDMI_DIP_BUFF_INDX_SPD		((0x3) << 19)
+
+/* Video Dip Transmission Frequency 17:16 */
+#define IPS_HDMI_DIP_TX_FREQ_SHIFT		(16)
+#define IPS_HDMI_DIP_TRANSMISSION_FREQ_MASK	((0x3) << 16)
+
+/* Video Dip Access Address 3:0 */
+#define IPS_HDMI_DIP_ACCESS_ADDR_MASK		(0xF)
+
+/* Video Dip Data Register */
+#define IPS_HDMI_VIDEO_DIP_DATA_ADDR		(0x61178)
+
+/* Audio Data Island Packet Control */
+#define IPS_HDMI_AUD_DIP_CTL_ADDR		(0x69060)
+
+#endif /* MFLD_HDMI_REG_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mrfld/ips_hdcp.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mrfld/ips_hdcp.c
new file mode 100644
index 0000000..7746614
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mrfld/ips_hdcp.c
@@ -0,0 +1,890 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include "hdcp_rx_defs.h"
+#include "mfld_hdcp_reg.h"
+#include "ipil_utils.h"
+#include "ipil_hdcp_api.h"
+#include "ips_hdcp_api.h"
+
+static void ips_hdcp_capture_an(void);
+static bool ips_hdcp_is_hdcp_on(void);
+static bool ips_hdcp_is_an_ready(void);
+static void ips_hdcp_read_an(uint8_t *an);
+static void ips_hdcp_write_rx_ri(uint16_t rx_ri);
+static void ips_hdcp_set_config(int val);
+static int ips_hdcp_get_config(void);
+static bool ips_hdcp_is_encrypting(void) __attribute__((unused));
+static uint8_t ips_hdcp_get_repeater_control(void)__attribute__((unused));
+static void ips_hdcp_set_repeater_control(int value) __attribute__((unused));
+static uint8_t ips_hdcp_get_repeater_status(void);
+static int ips_hdcp_repeater_v_match_check(void);
+static bool ips_hdcp_repeater_is_busy(void) __attribute__((unused));
+static bool ips_hdcp_repeater_rdy_for_nxt_data(void);
+static bool ips_hdcp_repeater_is_idle(void);
+static bool ips_hdcp_repeater_wait_for_next_data(void);
+static bool ips_hdcp_repeater_wait_for_idle(void);
+static void ips_hdcp_off(void);
+
+/**
+ * Description: read register for hdcp status
+ *
+ * Returns:	value of hdcp status register
+ */
+static uint32_t ips_hdcp_get_status(void)
+{
+	return hdmi_read32(MDFLD_HDCP_STATUS_REG);
+}
+
+/**
+ * Description: enable hdcp on hdmi port
+ *
+ * @enable	enable or disable hdcp on hdmi port
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_enable_port(bool enable)
+{
+	uint32_t hdmib_reg = hdmi_read32(MDFLD_HDMIB_CNTRL_REG);
+	if (enable)
+		hdmib_reg |= MDFLD_HDMIB_HDCP_PORT_SEL;
+	else
+		hdmib_reg &= ~MDFLD_HDMIB_HDCP_PORT_SEL;
+	hdmi_write32(MDFLD_HDMIB_CNTRL_REG, hdmib_reg);
+}
+
+/**
+ * Description: generate an for new authentication
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_capture_an(void)
+{
+	hdmi_write32(MDFLD_HDCP_INIT_REG, (uint32_t) jiffies);
+	hdmi_write32(MDFLD_HDCP_INIT_REG, (uint32_t) (jiffies >> 1));
+	hdmi_write32(MDFLD_HDCP_CONFIG_REG, HDCP_CAPTURE_AN);
+}
+
+/**
+ * Description: check if hdcp is enabled on hdmi port
+ *
+ * Returns:	true if enabled else false
+ */
+static bool ips_hdcp_is_hdcp_on(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.hdcp_on)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: check if an is ready for use
+ *
+ * Returns:	true if ready else false
+ */
+static bool ips_hdcp_is_an_ready(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.an_ready)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an		buffer to copy the an into
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_read_an(uint8_t *an)
+{
+	uint8_t i = 0;
+	struct double_word_t temp;
+	temp.value = 0;
+	temp.low = hdmi_read32(MDFLD_HDCP_AN_LOW_REG);
+	temp.high = hdmi_read32(MDFLD_HDCP_AN_HI_REG);
+	for (i = 0; i < HDCP_AN_SIZE; i++)
+		an[i] = temp.byte[i];
+}
+
+/**
+ * Description: write rx_ri into hdcp tx register
+ *
+ * @rx_ri	downstream device's ri value
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_write_rx_ri(uint16_t rx_ri)
+{
+	hdmi_write32(MDFLD_HDCP_RECEIVER_RI_REG, rx_ri);
+}
+
+/**
+ * Description: set config value in hdcp tx configuration register
+ *
+ * @val		value to be written into the configuration register's
+ *		config bits
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_set_config(int val)
+{
+	struct ips_hdcp_config_reg_t config;
+	config.value = hdmi_read32(MDFLD_HDCP_CONFIG_REG);
+	config.hdcp_config = val;
+	hdmi_write32(MDFLD_HDCP_CONFIG_REG, config.value);
+}
+
+/**
+ * Description: read hdcp tx config bits
+ *
+ * Returns:	hdcp tx configuration register's config bits
+ */
+static int ips_hdcp_get_config(void)
+{
+	struct ips_hdcp_config_reg_t config;
+	config.value = hdmi_read32(MDFLD_HDCP_CONFIG_REG);
+	return config.hdcp_config;
+}
+
+/**
+ * Description: check whether hdcp configuration is set to encrypting
+ *
+ * Returns:	true if set to encrypting else false
+ */
+static bool ips_hdcp_config_is_encrypting(void)
+{
+	if (ips_hdcp_get_config() == HDCP_AUTHENTICATE_AND_ENCRYPT)
+		return true;
+	return false;
+}
+
+/**
+ * Description: check whether hdcp is encrypting data
+ *
+ * Returns:	true if encrypting else false
+ */
+static bool ips_hdcp_is_encrypting(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.encrypting)
+		return true;
+
+	return false;
+}
+
+/**
+ * Description: get control bits of hdcp-tx repeater reguister
+ *
+ * Returns:	repeater control bits
+ */
+static uint8_t ips_hdcp_get_repeater_control(void)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	return repeater.control;
+}
+
+/**
+ * Description: set control bits of hdcp-tx repeater reguister
+ *
+ * @value	value of the control bits
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_set_repeater_control(int value)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	repeater.control = value;
+	hdmi_write32(MDFLD_HDCP_REP_REG, repeater.value);
+}
+
+/**
+ * Description: get status bits of hdcp-tx repeater reguister
+ *
+ * Returns:	repeater status bits
+ */
+static uint8_t ips_hdcp_get_repeater_status(void)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	return repeater.status;
+}
+
+/**
+ * Description: check the status of SHA1 match
+ *
+ * Returns:	0	on error
+ *		1	on match
+ *		-1	if busy
+ */
+static int ips_hdcp_repeater_v_match_check(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	switch (status) {
+	case HDCP_REPEATER_STATUS_COMPLETE_MATCH:
+		return 1;
+	case HDCP_REPEATER_STATUS_BUSY:
+		return -1;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * Description: check if repeater is busy
+ *
+ * Returns:	true if busy else false
+ */
+static bool ips_hdcp_repeater_is_busy(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	if (status == HDCP_REPEATER_STATUS_BUSY)
+		return true;
+	return false;
+}
+
+/**
+ * Description: check if repeater is ready for next data
+ *
+ * Returns:	true if ready else false
+ */
+static bool ips_hdcp_repeater_rdy_for_nxt_data(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	if (status == HDCP_REPEATER_STATUS_RDY_NEXT_DATA)
+		return true;
+	return false;
+}
+
+/**
+ * Description: check if repeater is idle
+ *
+ * Returns:	true if idle else false
+ */
+static bool ips_hdcp_repeater_is_idle(void)
+{
+	uint8_t status = ips_hdcp_get_repeater_status();
+	if (status == HDCP_REPEATER_STATUS_IDLE)
+		return true;
+	return false;
+}
+
+/**
+ * Description: wait for hdcp repeater to be ready for next data
+ *
+ * Returns:	true if ready else false
+ */
+static bool ips_hdcp_repeater_wait_for_next_data(void)
+{
+	uint16_t i = 0;
+	for (; i < HDCP_MAX_RETRY_STATUS; i++) {
+		if (ips_hdcp_repeater_rdy_for_nxt_data())
+			return true;
+	}
+	return false;
+}
+
+/**
+ * Description: wait for hdcp repeater to get into idle state
+ *
+ * Returns:	true if repeater is in idle state else false
+ */
+static bool ips_hdcp_repeater_wait_for_idle(void)
+{
+	uint16_t i = 0;
+	for (; i < HDCP_MAX_RETRY_STATUS; i++) {
+		if (ips_hdcp_repeater_is_idle())
+			return true;
+	}
+	return false;
+}
+
+/**
+ * Description: switch off hdcp by setting in the config register
+ *
+ * Returns:	none
+ */
+static void ips_hdcp_off(void)
+{
+	ips_hdcp_set_config(HDCP_Off);
+}
+
+/**
+ * Description: check whether hdcp hardware is ready
+ *
+ * Returns:	true if ready else false
+ */
+bool ips_hdcp_is_ready(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	int count = 20;
+	bool ret  = false;
+
+	hdmi_write32(MDFLD_HDCP_CONFIG_REG,
+		HDCP_FUSE_PULL_ENABLE | HDCP_PULL_FUSE);
+	while ((count--) > 0) {
+		udelay(20);
+		status.value = ips_hdcp_get_status();
+		if (status.fus_success && status.fus_complete) {
+			ret = true;
+			break;
+		}
+	}
+
+	pr_debug("hdcp: read count left = %d\n", count);
+	return ret;
+}
+
+/**
+ * Description: read an from hdcp tx
+ *
+ * @an	  buffer to return an in
+ *
+ * Returns:	true on succesful read else false
+ */
+void ips_hdcp_get_an(uint8_t *an)
+{
+	bool ret = false;
+	ips_hdcp_off();
+	ips_hdcp_capture_an();
+	do {
+		ret = ips_hdcp_is_an_ready();
+	} while  (ret == false);
+	ips_hdcp_read_an(an);
+}
+
+/**
+ * Description: read aksv from hdcp tx
+ *
+ * @aksv	buffer to return aksv
+ *
+ * Returns:	true on succesful read else false
+ */
+void ips_hdcp_get_aksv(uint8_t *aksv)
+{
+	static uint8_t save_aksv[HDCP_KSV_SIZE] = {0, 0, 0, 0, 0};
+	static bool aksv_read_once = false;
+	uint8_t i = 0;
+	struct double_word_t temp;
+	if (aksv_read_once == false) {
+		temp.value = 0;
+		temp.low = hdmi_read32(MDFLD_HDCP_AKSV_LOW_REG);
+		temp.high = hdmi_read32(MDFLD_HDCP_AKSV_HI_REG);
+		aksv_read_once = true;
+		for (i = 0; i < HDCP_KSV_SIZE; i++)
+			save_aksv[i] = temp.byte[i];
+	}
+	for (i = 0; i < HDCP_KSV_SIZE; i++)
+		aksv[i] = save_aksv[i];
+}
+
+/**
+ * Description: set downstream bksv in hdcp tx
+ *
+ * @bksv	bksv from downstream device
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ips_hdcp_set_bksv(uint8_t *bksv)
+{
+	uint8_t i = 0;
+	struct double_word_t temp;
+	if (bksv == NULL)
+		return false;
+	temp.value = 0;
+	for (i = 0; i < HDCP_KSV_SIZE; i++)
+		temp.byte[i] = bksv[i];
+
+	hdmi_write32(MDFLD_HDCP_BKSV_LOW_REG, temp.low);
+	hdmi_write32(MDFLD_HDCP_BKSV_HI_REG, temp.high);
+	return true;
+}
+
+/**
+ * Description: set repeater bit in hdcp tx if downstream is a repeater else
+ *		reset the bit
+ *
+ * @present	indicates whether downstream is repeater or not
+ *
+ * Returns:	true on succesful write else false
+ */
+bool ips_hdcp_set_repeater(bool present)
+{
+	struct ips_hdcp_repeater_reg_t repeater;
+	repeater.value = hdmi_read32(MDFLD_HDCP_REP_REG);
+	repeater.present = present;
+	hdmi_write32(MDFLD_HDCP_REP_REG, repeater.value);
+	/* delay for hardware change of repeater status */
+	msleep(1);
+	return true;
+}
+
+/**
+ * Description: start first stage of authentication by writing an aksv
+ *
+ * Returns:	true on succesfully starting authentication else false
+ */
+bool ips_hdcp_start_authentication(void)
+{
+	ips_hdcp_enable_port(true);
+	ips_hdcp_set_config(HDCP_AUTHENTICATE_AND_ENCRYPT);
+	return true;
+}
+
+/**
+ * Description: check if hdcp tx R0 is ready after starting authentication
+ *
+ * Returns:	true if r0 is ready else false
+ */
+bool ips_hdcp_is_r0_ready(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	status.value = ips_hdcp_get_status();
+
+	if (status.ri_ready)
+		return true;
+	return false;
+}
+
+/**
+ * Description: Enable encryption once r0 matches
+ *
+ * Returns:	true on enabling encryption else false
+ */
+bool ips_hdcp_enable_encryption(void)
+{
+	struct ips_hdcp_status_reg_t status;
+	uint32_t hdmib_reg = hdmi_read32(MDFLD_HDMIB_CNTRL_REG);
+	status.value = ips_hdcp_get_status();
+
+	if (ips_hdcp_is_hdcp_on() &&
+	    ips_hdcp_config_is_encrypting() &&
+	    status.ri_match &&
+	    (hdmib_reg & MDFLD_HDMIB_HDCP_PORT_SEL))
+		return true;
+	return false;
+}
+
+/**
+ * Description: check if hdcp tx & rx ri matches
+ *
+ * @rx_ri	ri of downstream device
+ *
+ * Returns:	true if ri matches else false
+ */
+bool ips_hdcp_does_ri_match(uint16_t rx_ri)
+{
+	struct ips_hdcp_status_reg_t status;
+
+	ips_hdcp_write_rx_ri(rx_ri);
+	status.value = ips_hdcp_get_status();
+	if (status.ri_match)
+		return true;
+	return false;
+}
+
+/**
+ * Description: compute v for repeater authentication
+ *
+ * @rep_ksv_list	 ksv list from downstream repeater
+ * @rep_ksv_list_entries number of entries in the ksv list
+ * @topology_data	bstatus value
+ *
+ * Returns:	true on successfully computing v else false
+ */
+bool ips_hdcp_compute_tx_v(uint8_t *rep_ksv_list,
+				   uint32_t rep_ksv_list_entries,
+				   uint16_t topology_data)
+{
+	bool ret = false;
+	const uint8_t BSTAT_M0_LEN = 18; /* 2 (bstatus) + 8 (M0) + 8 (length) */
+	const uint8_t BSTAT_M0 = 10; /* 2 (bstatus) + 8 (M0) */
+	const uint8_t M0 = 8; /* 8 (M0) */
+	uint32_t num_devices = rep_ksv_list_entries;
+	uint32_t lower_num_bytes_for_sha = 0, num_pad_bytes = 0, temp_data = 0;
+	uint32_t rem_text_data = 0, num_mo_bytes_left = M0, value = 0, i = 0;
+	uint8_t *buffer = NULL, *temp_buffer = NULL, *temp_data_ptr = NULL;
+	struct sqword_t buffer_len;
+
+	/* Clear SHA hash generator for new V calculation and
+	 * set the repeater to idle state
+	 */
+	hdmi_write32(MDFLD_HDCP_SHA1_IN, 0);
+
+	ips_hdcp_set_repeater_control(HDCP_REPEATER_CTRL_IDLE);
+	if (!ips_hdcp_repeater_wait_for_idle())
+		return false;
+
+	/* Start the SHA buffer creation to find the number of pad bytes */
+	num_pad_bytes = (64 - ((rep_ksv_list_entries * HDCP_KSV_SIZE)
+			 + BSTAT_M0_LEN)
+			 % 64);
+
+	/* Get the number of bytes for SHA */
+	lower_num_bytes_for_sha = (HDCP_KSV_SIZE * num_devices)
+				   + BSTAT_M0_LEN
+				   + num_pad_bytes; /* multiple of 64 bytes */
+
+	buffer = (uint8_t *)kzalloc(lower_num_bytes_for_sha, GFP_KERNEL);
+	if (!buffer)
+		return false;
+
+	/* 1. Copy the KSV buffer
+	 * Note: data is in little endian format
+	 */
+	temp_buffer = buffer;
+	memcpy((void *)temp_buffer, (void *)rep_ksv_list,
+		     num_devices * HDCP_KSV_SIZE);
+	temp_buffer += num_devices * HDCP_KSV_SIZE;
+
+	/* 2. Copy the topology_data
+	 */
+	memcpy((void *)temp_buffer, (void *)&topology_data, 2);
+	/* bstatus is copied in little endian format */
+	temp_buffer += 2;
+
+	/* 3. Offset the pointer buffer by 8 bytes
+	 * These 8 bytes are zeroed and are place holders for M0
+	 */
+	temp_buffer += 8;
+
+	/* 4. Pad the buffer with extra bytes
+	 *. The first padding byte must be 0x80 based on SHA1 message digest
+	 *. algorithm. HW automatically appends 0x80 while creating
+	 * the buffer if M0 is not 32-bit aligned
+	 * If M0 is 32-bit aligned we need to explicitly inject 0x80 to
+	 * the buffer
+	 */
+	if (num_pad_bytes && ((num_devices * HDCP_KSV_SIZE + BSTAT_M0) % 4 == 0))
+		*temp_buffer = 0x80;
+	temp_buffer += num_pad_bytes;
+
+	/* 5. Construct the length byte */
+	buffer_len.quad_part = (unsigned long long)(rep_ksv_list_entries *
+				HDCP_KSV_SIZE + BSTAT_M0) * 8; /* in bits */
+	temp_data_ptr = (uint8_t *)&buffer_len.quad_part;
+	/* Store in big endian form, it is reversed to little endian
+	 * when fed to SHA1
+	 */
+	for (i = 1; i <= 8; i++) {
+		*temp_buffer = *(temp_data_ptr + 8 - i);
+		temp_buffer++;
+	}
+
+	/* 6. Write KSV's and bstatus into SHA */
+	temp_buffer = buffer;
+	for (i = 0; i < (HDCP_KSV_SIZE * num_devices + 2)/4; i++) {
+		ips_hdcp_set_repeater_control(HDCP_REPEATER_32BIT_TEXT_IP);
+
+		/* As per HDCP spec sample SHA is in little endian format.
+		 * But the data fed to the cipher needs to be in big endian
+		 * format for it to compute it correctly
+		 */
+		memcpy(&value, temp_buffer, 4);
+		value = HDCP_CONVERT_ENDIANNESS(value);
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, value);
+		temp_buffer += 4;
+
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+	}
+
+	/* 7. Write the remaining bstatus data and M0
+	 * Text input must be aligned to LSB of the SHA1
+	 * in register when inputting partial text and partial M0
+	 */
+	rem_text_data = (HDCP_KSV_SIZE * num_devices + 2) % 4;
+	if (rem_text_data) {
+		/* Update the number of M0 bytes */
+		num_mo_bytes_left = num_mo_bytes_left - (4-rem_text_data);
+
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+
+		switch (rem_text_data) {
+		case 1:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP);
+			break;
+		case 2:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP);
+			break;
+		case 3:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP);
+			break;
+		default:
+			goto exit;
+		}
+
+		memcpy(&value, temp_buffer, 4);
+
+		/* Swap the text data in big endian format leaving the M0 data
+		 * as it is. LSB should contain the data in big endian format.
+		 * Since the M0 specfic data is all zeros while it's fed to the
+		 * cipher, those bit don't need to be modified.
+		 */
+		temp_data = 0;
+		for (i = 0; i < rem_text_data; i++) {
+			temp_data |= ((value & 0xff << (i * 8)) >>
+					(i * 8)) <<
+					((rem_text_data - i - 1) * 8);
+		}
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, temp_data);
+		temp_buffer += 4;
+	}
+
+	/* write 4 bytes of M0 */
+	if (false == ips_hdcp_repeater_wait_for_next_data())
+		goto exit;
+
+	ips_hdcp_set_repeater_control(HDCP_REPEATER_32BIT_MO_IP);
+	hdmi_write32(MDFLD_HDCP_SHA1_IN, (uint32_t)(uintptr_t) temp_buffer);
+	temp_buffer += 4;
+	num_mo_bytes_left -= 4;
+
+	if (num_mo_bytes_left) {
+		/* The remaining M0 + padding bytes need to be added */
+		num_pad_bytes = num_pad_bytes - (4 - num_mo_bytes_left);
+
+		/* write 4 bytes of M0 */
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+		switch (num_mo_bytes_left) {
+		case 1:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_24BIT_TEXT_8BIT_MO_IP);
+			break;
+		case 2:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_16BIT_TEXT_16BIT_MO_IP);
+			break;
+		case 3:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_8BIT_TEXT_24BIT_MO_IP);
+			break;
+		case 4:
+			ips_hdcp_set_repeater_control(
+				HDCP_REPEATER_32BIT_MO_IP);
+			break;
+		default:
+			/* should never happen */
+			goto exit;
+		}
+
+		hdmi_write32(MDFLD_HDCP_SHA1_IN,
+			     (uint32_t)(uintptr_t) temp_buffer);
+		temp_buffer += 4;
+		num_mo_bytes_left = 0;
+	}
+
+	/* 8. Write the remaining padding bytes and length */
+	/* Remaining data = remaining padding data + 64 bits of length data */
+	rem_text_data = num_pad_bytes + 8;
+
+	if (rem_text_data % 4) {
+		/* Should not happen */
+		pr_debug("hdcp: compute_tx_v - data not aligned\n");
+		goto exit;
+	}
+
+	for (i = 0; i < rem_text_data / 4; i++) {
+		if (false == ips_hdcp_repeater_wait_for_next_data())
+			goto exit;
+
+		ips_hdcp_set_repeater_control(HDCP_REPEATER_32BIT_TEXT_IP);
+		memcpy(&value, temp_buffer, 4);
+		/* Do the big endian conversion */
+		value = HDCP_CONVERT_ENDIANNESS(value);
+		hdmi_write32(MDFLD_HDCP_SHA1_IN, value);
+		temp_buffer += 4;
+	}
+
+	/* Done */
+	ret = true;
+
+exit:
+	kfree(buffer);
+	return ret;
+}
+
+/**
+ * Description: compare hdcp tx & hdcp rx sha1 results
+ *
+ * @rep_prime_v sha1 value from downstream repeater
+ *
+ * Returns:	true if same else false
+ */
+bool ips_hdcp_compare_v(uint32_t *rep_prime_v)
+{
+	bool ret = false;
+	uint32_t i = 10, stat;
+
+	/* Load V' */
+	hdmi_write32(MDFLD_HDCP_VPRIME_H0, *rep_prime_v);
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H1, *(rep_prime_v + 1));
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H2, *(rep_prime_v + 2));
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H3, *(rep_prime_v + 3));
+
+	hdmi_write32(MDFLD_HDCP_VPRIME_H4, *(rep_prime_v + 4));
+
+	if (false == ips_hdcp_repeater_wait_for_next_data())
+		goto exit;
+
+	/* Set HDCP_REP to do the comparison, start
+	 * transmitter's V calculation
+	 */
+	ips_hdcp_set_repeater_control(HDCP_REPEATER_COMPLETE_SHA1);
+
+	msleep(5);
+	do {
+		stat = ips_hdcp_repeater_v_match_check();
+		if (1 == stat) {
+			ret = true; /* match */
+			break;
+		} else if (-1 == stat)
+			msleep(5); /* busy, retry */
+		else
+			break; /* mismatch */
+	} while (--i);
+
+exit:
+	return ret;
+}
+
+/**
+ * Description: disable hdcp
+ *
+ * Returns:	true on successfully disabling hdcp else false
+ */
+bool ips_hdcp_disable(void)
+{
+	ips_hdcp_off();
+	/* Set Rx_Ri to 0 */
+	ips_hdcp_write_rx_ri(0);
+	/* Set Repeater to Not Present */
+	ips_hdcp_set_repeater(false);
+	/* Disable HDCP on this Port */
+	/* ips_hdcp_enable_port(false); */
+	return true;
+}
+
+/**
+ * Description: initialize hdcp tx for authentication
+ *
+ * Returns:	true success else false
+ */
+bool ips_hdcp_init(void)
+{
+	return true;
+}
+
+/**
+ * Description: check whether hdcp tx can authenticate
+ *
+ * Returns:	true if device can authenticate else false
+ */
+bool ips_hdcp_device_can_authenticate(void)
+{
+	return true;
+}
+
+/**
+ * Description: get hardware frame count for cipher Ri update
+ *
+ * @count   framer count for cipher Ri update
+ *
+ * Returns: true if successful else false
+ */
+bool ips_hdcp_get_ri_frame_count(uint8_t *count)
+{
+	struct ips_hdcp_status_reg_t status;
+
+	status.value = ips_hdcp_get_status();
+	*count       = status.frame_count;
+
+	return true;
+}
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mrfld/ips_hdmi_priv.c b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mrfld/ips_hdmi_priv.c
new file mode 100755
index 0000000..24f519f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/ipil/specific/mrfld/ips_hdmi_priv.c
@@ -0,0 +1,676 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/delay.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+#include "ipil_utils.h"
+#include "ipil_internal.h"
+#include "ips_hdmi.h"
+
+/* TMDS clocks in KHz*/
+#define IPS_MIN_PIXEL_CLOCK 25174	/* 640x480@59.94Hz */
+#define IPS_MAX_PIXEL_CLOCK 148500	/* 1920x1080@60Hz */
+
+/* Preferred mode if none is indicated in EDID */
+#define IPS_PREFERRED_HDISPLAY 1920
+#define IPS_PREFERRED_VDISPLAY 1080
+#define IPS_PREFERRED_REFRESH_RATE 60
+
+struct data_rate_divider_selector_list_t {
+	uint32_t target_data_rate;
+	int m1;
+	int m2;
+	int n;
+	int p1;
+	int p2;
+};
+static struct data_rate_divider_selector_list_t
+	data_rate_divider_selector_list[] = {
+			{25175, 2, 118, 1, 3, 12},
+			{25200, 2, 105, 1, 2, 16},
+			{26000, 2, 122, 1, 3, 12},
+			{27000, 3, 75, 1, 2, 16},
+			{27027, 3, 75, 1, 2, 16},
+			{27500, 2, 129, 1, 3, 12},
+			{28000, 2, 153, 1, 3, 14},
+			{28320, 2, 118, 1, 2, 16},
+			{28322, 2, 118, 1, 2, 16},
+			{29000, 2, 151, 1, 2, 20},
+			{30000, 2, 125, 1, 2, 16},
+			{31000, 2, 109, 1, 3, 9},
+			{31500, 2, 123, 1, 3, 10},
+			{32000, 2, 150, 1, 3, 12},
+			{32500, 3, 79, 1, 2, 14},
+			{33000, 2, 116, 1, 3, 9},
+			{33750, 2, 123, 1, 2, 14},
+			{33784, 2, 132, 1, 3, 10},
+			{34000, 2, 124, 1, 2, 14},
+			{34870, 3, 71, 1, 3, 8},
+			{35000, 2, 123, 1, 3, 9},
+			{35500, 2, 111, 1, 3, 8},
+			{36000, 2, 150, 1, 2, 16},
+			{37000, 2, 130,1, 3, 9},
+			{38000, 3, 99, 1, 3, 10},
+			{39000, 2, 137, 1, 3, 9},
+			{40000, 2, 125, 1, 3, 8},
+			{40500, 3, 95, 1, 3, 9},
+			{40541, 3, 95, 1, 3, 9},
+			{41000, 2, 144, 1, 3, 9},
+			{41540, 2, 146, 1, 3, 9},
+			{42000, 2, 153, 1, 2, 14},
+			{43000, 2, 112, 1, 2, 10},
+			{43163, 2, 118, 1, 3, 7},
+			{44000, 3, 103, 1, 3, 9},
+			{44900, 2, 117, 1, 2, 10},
+			{45000, 2, 123, 1, 3, 7},
+			{46000, 2, 144, 1, 3, 8},
+			{47000, 2, 147, 1, 3, 8},
+			{48000, 2, 150, 1, 3, 8},
+			{49000, 2, 134, 1, 3, 7},
+			{49500, 2, 116, 1, 3, 6},
+			{50000, 2, 156, 1, 3, 8},
+			{51000, 3, 93, 1, 3, 7},
+			{52000, 2, 122, 1, 3, 6},
+			{52406, 3, 91, 1, 2, 10},
+			{53000, 2, 138, 1, 2, 10},
+			{54000, 3, 75, 1, 2, 8},
+			{54054, 3, 75, 1, 2, 8},
+			{55000, 2, 129, 1, 3, 6},
+			{56000, 2, 153, 1, 3, 7},
+			{56250, 2, 154, 1, 3, 7},
+			{57000, 3, 99, 1, 2, 10},
+			{58000, 2, 151, 1, 2, 10},
+			{59000, 2, 123, 1, 2, 8},
+			{60000, 2, 125, 1, 2, 8},
+			{61000, 2, 143, 1, 3, 6},
+			{62000, 2, 113, 1, 2, 7},
+			{63000, 2, 123, 1, 3, 5},
+			{64000, 2, 150, 1, 3, 6},
+			{65000, 3, 79, 1, 2, 7},
+			{66000, 2, 129, 1, 3, 5},
+			{66667, 3, 81, 1, 2, 7},
+			{67000, 3, 93, 1, 2, 8},
+			{67500, 2, 123, 1, 2, 7},
+			{67567, 2, 132, 1, 3, 5},
+			{68000, 2, 124, 1, 2, 7},
+			{68075, 2, 133, 1, 3, 5},
+			{68179, 3, 71, 1, 3, 4},
+			{69000, 2, 135, 1, 3, 5},
+			{70000, 3, 85, 1, 2, 7},
+			{71000, 2, 111, 1, 3, 4},
+			{72000, 2, 150, 1, 2, 8},
+			{73000, 3, 95, 1, 3, 5},
+			{74000, 2, 135, 1, 2, 7},
+			{74175, 3, 103, 1, 2, 8},
+			{74250, 2, 145, 1, 3, 5},
+			{74481, 3, 97, 1, 3, 5},
+			{75000, 2, 117, 1, 3, 4},
+			{76000, 3, 99, 1, 3, 5},
+			{77000, 2, 150, 1, 3, 5},
+			{78000, 2, 122, 1, 3, 4},
+			{78750, 2, 123, 1, 3, 4},
+			{79000, 2, 144, 1, 2, 7},
+			{80000, 2, 125, 1, 3, 4},
+			{81000, 2, 148, 1, 2, 7},
+			{81081, 2, 148, 1, 2, 7},
+			{81624, 3, 85, 1, 3, 4},
+			{82000, 2, 128, 1, 3, 4},
+			{83000, 2, 108, 1, 2, 5},
+			{83950, 2, 153, 1, 2, 7},
+			{84000, 2, 153, 1, 2, 7},
+			{85000, 2, 155, 1, 2, 7},
+			{86000, 2, 112, 1, 2, 5},
+			{87000, 2, 136, 1, 3, 4},
+			{88000, 2, 138, 1, 3, 4},
+			{89000, 2, 139, 1, 3, 4},
+			{90000, 2, 117, 1, 2, 5},
+			{91000, 3, 79, 1, 2, 5},
+			{92000, 2, 144, 1, 3, 4},
+			{92719, 2, 145, 1, 3, 4},
+			{92812, 2, 145, 1, 3, 4},
+			{93000, 2, 109, 1, 3, 3},
+			{93438, 2, 146, 1, 3, 4},
+			{94000, 2, 147, 1, 3, 4},
+			{94500, 2, 123, 1, 2, 5},
+			{95000, 3, 99, 1, 3, 4},
+			{95654, 3, 83, 1, 2, 5},
+			{96000, 2, 150, 1, 3, 4},
+			{97000, 3, 101, 1, 3, 4},
+			{98000, 2, 153, 1, 3, 4},
+			{99000, 2, 116, 1, 3, 3},
+			{100000, 2, 156, 1, 3, 4},
+			{101000, 3, 79, 1, 3, 3},
+			{102000, 2, 133, 1, 2, 5},
+			{103000, 2, 134, 1, 2, 5},
+			{104000, 2, 122, 1, 3, 3},
+			{105000, 2, 123, 1, 3, 3},
+			{106000, 2, 138, 1, 2, 5},
+			{107000, 3, 93, 1, 2, 5},
+			{107214, 3, 93, 1, 2, 5},
+			{108000, 3, 75, 1, 2, 4},
+			{109000, 2, 142, 1, 2, 5},
+			{110000, 2, 129, 1, 3, 3},
+			{110013, 2, 129, 1, 3, 3},
+			{111000, 2, 130, 1, 3, 3},
+			{111263, 3, 87, 1, 3, 3},
+			{111375, 3, 87, 1, 3, 3},
+			{112000, 2, 146, 1, 2, 5},
+			{113000, 2, 147, 1, 2, 5},
+			{113100, 2, 118, 1, 2, 4},
+			{113309, 2, 118, 1, 2, 4},
+			{114000, 3, 99, 1, 2, 5},
+			{115000, 2, 135, 1, 3, 3},
+			{116000, 2, 151, 1, 2, 5},
+			{117000, 2, 137, 1, 3, 3},
+			{118000, 2, 123, 1, 2, 4},
+			{119000, 3, 93, 1, 3, 3},
+			{119651, 3, 83, 1, 2, 4},
+			{120000, 2, 125, 1, 2, 4},
+			{121000, 2, 126, 1, 2, 4},
+			{122000, 2, 143, 1, 3, 3},
+			{122614, 3, 85, 1, 2, 4},
+			{123000, 2, 144, 1, 3, 3},
+			{123379, 2, 145, 1, 3, 3},
+			{124000, 3, 97, 1, 3, 3},
+			{125000, 2, 130, 1, 2, 4},
+			{126000, 2, 131, 1, 2, 4},
+			{127000, 2, 149, 1, 3, 3},
+			{128000, 2, 150, 1, 3, 3},
+			{129000, 2, 151, 1, 3, 3},
+			{129859, 2, 152, 1, 3, 3},
+			{130000, 2, 152, 1, 3, 3},
+			{131000, 3, 91, 1, 2, 4},
+			{131850, 3, 103, 1, 3, 3},
+			{132000, 3, 103, 1, 3, 3},
+			{133000, 2, 156, 1, 3, 3},
+			{133330, 2, 139, 1, 2, 4},
+			{134000, 3, 93, 1, 2, 4},
+			{135000, 2, 141, 1, 2, 4},
+			{136000, 2, 106, 1, 3, 2},
+			{137000, 2, 107, 1, 3, 2},
+			{138000, 2, 108, 1, 3, 2},
+			{139000, 2, 145, 1, 2, 4},
+			{139050, 2, 145, 1, 2, 4},
+			{139054, 2, 145, 1, 2, 4},
+			{140000, 3, 73, 1, 3, 2},
+			{141000, 2, 147, 1, 2, 4},
+			{142000, 2, 111, 1, 3, 2},
+			{143000, 2, 149, 1, 2, 4},
+			{143472, 2, 112, 1, 3, 2},
+			{144000, 2, 150, 1, 2, 4},
+			{145000, 2, 151, 1, 2, 4},
+			{146000, 2, 114, 1, 3, 2},
+			{147000, 2, 153, 1, 2, 4},
+			{147891, 3, 77, 1, 3, 2},
+			{148000, 3, 77, 1, 3, 2},
+			{148352, 3, 103, 1, 2, 4},
+			{148500, 2, 116, 1, 3, 2}
+};
+
+#define NUM_SELECTOR_LIST (sizeof( \
+		data_rate_divider_selector_list) \
+	/ sizeof(struct data_rate_divider_selector_list_t))
+
+/* DPLL registers on IOSF */
+#define PLLA_DWORD3_1   0x800C
+#define PLLA_DWORD3_2   0x802C
+#define PLLA_DWORD5_1   0x8014
+#define PLLA_DWORD5_2   0x8034
+#define PLLA_DWORD7_1   0x801C
+#define PLLA_DWORD7_2   0x803C
+#define PLLB_DWORD8     0x8040
+#define PLLB_DWORD10_1  0x8048
+#define PLLB_DWORD10_2  0x8068
+#define CMN_DWORD3      0x810C
+#define CMN_DWORD8      0x8100
+#define REF_DWORD18     0x80C0
+#define REF_DWORD22     0x80D0
+#define DPLL_CML_CLK1   0x8238
+#define DPLL_CML_CLK2   0x825C
+#define DPLL_LRC_CLK    0x824C
+#define DPLL_Tx_GRC     0x8244
+#define PCS_DWORD12_1   0x0230
+#define PCS_DWORD12_2   0x0430
+#define TX_SWINGS_1     0x8294
+#define TX_SWINGS_2     0x8290
+#define TX_SWINGS_3     0x8288
+#define TX_SWINGS_4     0x828C
+#define TX_SWINGS_5     0x0690
+#define TX_SWINGS_6     0x822C
+#define TX_SWINGS_7     0x8224
+#define TX_GROUP_1      0x82AC
+#define TX_GROUP_2      0x82B8
+
+#define DPLL_IOSF_EP 0x13
+
+/**
+ * Description: Write to DPLL register via IOSF
+ *
+ * @ep_id:	IOSF endpoint ID (0x13 for DPLL)
+ * @reg:        address of register
+ * @val:        value to write to register
+ *
+ * Returns:	none
+ */
+void gunit_iosf_write32(u32 ep_id, u32 reg, u32 val)
+{
+	u32 ret;
+	int retry = 0;
+	u32 sb_pkt = (1 << 16) | (ep_id << 8) | 0xf0;
+
+	/* Write value to side band register */
+	hdmi_write32(0x2108, reg);
+	hdmi_write32(0x2104, val);
+	hdmi_write32(0x2100, sb_pkt);
+
+	/* Check if transaction is complete */
+	ret = hdmi_read32(0x210C);
+	while ((retry++ < 0x1000) && (ret != 0x2)) {
+		usleep_range(500, 1000);
+		ret = hdmi_read32(0x210C);
+	}
+
+	if (ret != 2)
+		pr_err("%s: failed to program DPLL\n", __func__);
+}
+
+/**
+ * Description: Read DPLL register via IOSF
+ *
+ * @ep_id:	IOSF endpoint ID (0x13 for DPLL)
+ * @reg:        address of register
+ *
+ * Returns:	value of register
+ */
+u32 gunit_iosf_read32(u32 ep_id, u32 reg)
+{
+	u32 ret;
+	int retry = 0;
+	u32 sb_pkt = (0 << 16) | (ep_id << 8) | 0xf0;
+
+	/* Read side band register */
+	hdmi_write32(0x2108, reg);
+	hdmi_write32(0x2100, sb_pkt);
+
+	/* Check if transaction is complete */
+	ret = hdmi_read32(0x210C);
+	while ((retry++ < 0x1000) && (ret != 2)) {
+		usleep_range(500, 1000);
+		ret = hdmi_read32(0x210C);
+	}
+
+	if (ret != 2)
+		pr_err("%s: Failed to read\n", __func__);
+	else
+		ret = hdmi_read32(0x2104);
+
+	return ret;
+}
+
+/**
+ * Description: Find the m, n and p for DPLL.
+ *              Use the nominal pixel clock as TMDS clock.
+ *
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ * @real_clk:   nominal dot clock used as TMDS dot clock. Note it
+ *              has a small difference from real HW clock.
+ * @m1, m2:     DPLL m values
+ * @n:          DPLL n value
+ * @p1, p2:     DPLL p values
+ *
+ * Returns:	true on success
+ *		false on NULL input arguments
+ */
+static bool __ips_hdmi_get_divider_selector(
+			uint32_t dclk,
+			uint32_t *real_dclk,
+			int *m1, int *m2,
+			int *n, int *p1, int *p2)
+{
+	int i, matched_idx = 0;
+	uint32_t min_diff = 0xffffffff, curr_diff;
+
+	if (dclk > data_rate_divider_selector_list[NUM_SELECTOR_LIST - 1].target_data_rate ||
+			dclk < data_rate_divider_selector_list[0].target_data_rate ) {
+		pr_err("Could not find supported mode\n");
+		return false;
+	}
+
+	for (i = 0; i < NUM_SELECTOR_LIST; i++) {
+		curr_diff = abs(dclk - data_rate_divider_selector_list[i].target_data_rate);
+		if (min_diff > curr_diff) {
+			min_diff = curr_diff;
+			matched_idx = i;
+		}
+	}
+	*m1 = data_rate_divider_selector_list[matched_idx].m1;
+	*m2 = data_rate_divider_selector_list[matched_idx].m2;
+	*n = data_rate_divider_selector_list[matched_idx].n;
+	*p1 = data_rate_divider_selector_list[matched_idx].p1;
+	*p2 = data_rate_divider_selector_list[matched_idx].p2;
+	*real_dclk = data_rate_divider_selector_list[matched_idx].target_data_rate;
+	pr_debug("dclk: %d, real_dclk: %d", dclk,  *real_dclk);
+	return true;
+}
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @m1, m2:     DPLL m values
+ * @n:          DPLL n value
+ * @p1, p2:     DPLL p values
+ *
+ * Returns:	none
+*/
+static void __ips_hdmi_set_program_dpll(int n, int p1, int p2, int m1, int m2)
+{
+	u32 ret, tmp;
+	int retry = 0;
+	u32 div = (0x11 << 24) | (p1 << 21) | (p2 << 16) | (n << 12) |
+		  (0x1 << 11)  | (m1 << 8)  | (m2);
+
+	pr_debug("enter %s\n", __func__);
+
+	/* Common reset */
+	hdmi_write32(IPS_DPLL_B, 0x70006800);
+
+	/* Program DPLL registers via IOSF (TNG display HAS) */
+
+	/* Process monitor to 19.2MHz */
+	gunit_iosf_write32(DPLL_IOSF_EP, REF_DWORD22, 0x19080000);
+
+	/* LRC clock to 19.2MHz */
+	gunit_iosf_write32(DPLL_IOSF_EP, DPLL_LRC_CLK, 0x00000F10);
+
+	/* Disable periodic GRC IREF update for DPLL */
+	tmp = gunit_iosf_read32(DPLL_IOSF_EP, PLLB_DWORD8);
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLB_DWORD8, tmp & 0x00FFFFFF);
+
+	/* Enable Tx for periodic GRC update*/
+	gunit_iosf_write32(DPLL_IOSF_EP, DPLL_Tx_GRC, 0x00004000);
+
+	/* GRC cal clock set to 19.2MHZ */
+	gunit_iosf_write32(DPLL_IOSF_EP, REF_DWORD18, 0x30002400);
+
+	/* Set lock time to 53us.
+	 * Disable fast lock.
+	 */
+	gunit_iosf_write32(DPLL_IOSF_EP, CMN_DWORD8, 0x0);
+
+	/* Stagger Programming */
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_GROUP_1, 0x00001500);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_GROUP_2, 0x40400000);
+	gunit_iosf_write32(DPLL_IOSF_EP, PCS_DWORD12_1, 0x00220F00);
+	gunit_iosf_write32(DPLL_IOSF_EP, PCS_DWORD12_2, 0x00750F00);
+
+	/* Set divisors*/
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLA_DWORD3_1, div);
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLA_DWORD3_2, div);
+
+	/* Set up LCPLL in digital mode */
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLA_DWORD5_1, 0x0DF44300);
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLA_DWORD5_2, 0x0DF44300);
+
+	/* LPF co-efficients for LCPLL in digital mode */
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLB_DWORD10_1, 0x005F0021);
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLB_DWORD10_2, 0x005F0021);
+
+	/* Disable unused TLine clocks on right side */
+	gunit_iosf_write32(DPLL_IOSF_EP, CMN_DWORD3, 0x14540000);
+
+	/* Enable DPLL */
+	tmp = hdmi_read32(IPS_DPLL_B);
+	hdmi_write32(IPS_DPLL_B, tmp | IPIL_DPLL_VCO_ENABLE);
+
+	/* Enable DCLP to core */
+	tmp = gunit_iosf_read32(DPLL_IOSF_EP, PLLA_DWORD7_1);
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLA_DWORD7_1, tmp | (1 << 24));
+	tmp = gunit_iosf_read32(DPLL_IOSF_EP, PLLA_DWORD7_2);
+	gunit_iosf_write32(DPLL_IOSF_EP, PLLA_DWORD7_2, tmp | (1 << 24));
+
+	/* Set HDMI lane CML clock */
+	gunit_iosf_write32(DPLL_IOSF_EP, DPLL_CML_CLK1, 0x07760018);
+	gunit_iosf_write32(DPLL_IOSF_EP, DPLL_CML_CLK2, 0x00400888);
+
+	/* Swing settings */
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_1, 0x00000000);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_2, 0x2B245555);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_3, 0x5578B83A);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_4, 0x0C782040);
+	//gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_5, 0x2B247878);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_6, 0x00030000);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_7, 0x00004000);
+	gunit_iosf_write32(DPLL_IOSF_EP, TX_SWINGS_1, 0x80000000);
+
+	/* Wait until DPLL is locked */
+	ret = hdmi_read32(IPS_DPLL_B);
+	ret &= 0x8000;
+	while ((retry++ < 1000) && (ret != 0x8000)) {
+		usleep_range(500, 1000);
+		ret = hdmi_read32(IPS_DPLL_B);
+		ret &= 0x8000;
+	}
+
+	if (ret != 0x8000) {
+		pr_err("%s: DPLL failed to lock, exit...\n", __func__);
+		return;
+	}
+}
+
+/**
+ * Description: gets dpll clocks
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_get_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	int n, p1, p2, m1, m2;
+	uint32_t target_dclk;
+
+	pr_debug("enter %s\n", __func__);
+
+	if (__ips_hdmi_get_divider_selector(dclk,
+			&target_dclk, &m1, &m2, &n, &p1, &p2)) {
+		dev->clock_khz = 3840 * m1 * m2 / (p1 * p2);
+		return OTM_HDMI_SUCCESS;
+	} else
+		return OTM_HDMI_ERR_INVAL;
+}
+
+/**
+ * Description: programs dpll clocks, enables dpll and waits
+ *		till it locks with DSI PLL
+ *
+ * @dev:	hdmi_device_t
+ * @dclk:	refresh rate dot clock in kHz of current mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t	ips_hdmi_crtc_mode_set_program_dpll(hdmi_device_t *dev,
+							unsigned long dclk)
+{
+	int n, p1, p2, m1, m2;
+	uint32_t target_dclk;
+
+	pr_debug("enter %s\n", __func__);
+
+	if (__ips_hdmi_get_divider_selector(dclk,
+			&target_dclk, &m1, &m2, &n, &p1, &p2)) {
+		__ips_hdmi_set_program_dpll(n, p1, p2, m1, m2);
+		dev->clock_khz = 3840 * m1 * m2 / (p1 * p2);
+		return OTM_HDMI_SUCCESS;
+	} else
+		return OTM_HDMI_ERR_INVAL;
+}
+
+/**
+ * Description: get pixel clock range
+ *
+ * @pc_min:	minimum pixel clock
+ * @pc_max:	maximum pixel clock
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_FAILED on NULL input arguments.
+ */
+otm_hdmi_ret_t ips_get_pixel_clock_range(unsigned int *pc_min,
+					unsigned int *pc_max)
+{
+	if (!pc_min || !pc_max)
+		return OTM_HDMI_ERR_FAILED;
+
+	*pc_min = IPS_MIN_PIXEL_CLOCK;
+	*pc_max = IPS_MAX_PIXEL_CLOCK;
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool ips_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh)
+{
+	if (hdisplay == IPS_PREFERRED_HDISPLAY &&
+		vdisplay == IPS_PREFERRED_VDISPLAY &&
+		refresh == IPS_PREFERRED_REFRESH_RATE)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * Description: restore HDMI display registers and enable display
+ *
+ * @dev:	hdmi_device_t
+ *
+ * Returns:	none
+ */
+void ips_hdmi_restore_and_enable_display(hdmi_device_t *dev)
+{
+
+	int i;
+
+	if (NULL == dev)  {
+		pr_debug("\n%s invalid argument\n", __func__);
+		return;
+	}
+	if (dev->reg_state.valid == false) {
+		pr_debug("\nhdmi no data to restore\n");
+		return;
+	}
+
+	/*make sure VGA plane is off. it initializes to on after reset!*/
+	hdmi_write32(IPIL_VGACNTRL, IPIL_VGA_DISP_DISABLE);
+
+	ips_hdmi_crtc_mode_set_program_dpll(dev, dev->clock_khz);
+
+	/* Restore mode */
+	hdmi_write32(IPS_HTOTAL_B, dev->reg_state.saveHTOTAL_B);
+	hdmi_write32(IPS_HBLANK_B, dev->reg_state.saveHBLANK_B);
+	hdmi_write32(IPS_HSYNC_B, dev->reg_state.saveHSYNC_B);
+	hdmi_write32(IPS_VTOTAL_B, dev->reg_state.saveVTOTAL_B);
+	hdmi_write32(IPS_VBLANK_B, dev->reg_state.saveVBLANK_B);
+	hdmi_write32(IPS_VSYNC_B, dev->reg_state.saveVSYNC_B);
+	hdmi_write32(IPS_PIPEBSRC, dev->reg_state.savePIPEBSRC);
+	/* Don't restore pipestat as it will override register set during DPMS on */
+	/* hdmi_write32(IPS_DSPBSTAT, dev->reg_state.saveDSPBSTATUS);*/
+
+	/*set up the plane*/
+	hdmi_write32(IPS_DSPBSTRIDE, dev->reg_state.saveDSPBSTRIDE);
+	hdmi_write32(IPS_DSPBLINOFF, dev->reg_state.saveDSPBLINOFF);
+	hdmi_write32(IPS_DSPBTILEOFF, dev->reg_state.saveDSPBTILEOFF);
+	hdmi_write32(IPS_DSPBSIZE, dev->reg_state.saveDSPBSIZE);
+	hdmi_write32(IPS_DSPBPOS, dev->reg_state.saveDSPBPOS);
+	hdmi_write32(IPS_DSPBSURF, dev->reg_state.saveDSPBSURF);
+
+	hdmi_write32(IPS_PFIT_CONTROL, dev->reg_state.savePFIT_CONTROL);
+	hdmi_write32(IPS_PFIT_PGM_RATIOS, dev->reg_state.savePFIT_PGM_RATIOS);
+	hdmi_write32(IPS_HDMIPHYMISCCTL, dev->reg_state.saveHDMIPHYMISCCTL);
+	hdmi_write32(IPS_HDMIB_CONTROL, dev->reg_state.saveHDMIB_CONTROL);
+
+	/*enable the plane*/
+	hdmi_write32(IPS_DSPBCNTR, dev->reg_state.saveDSPBCNTR);
+	hdmi_write32(IPS_HDMIB_LANES02, dev->reg_state.saveHDMIB_DATALANES);
+	hdmi_write32(IPS_HDMIB_LANES3, dev->reg_state.saveHDMIB_DATALANES);
+
+	/*enable the pipe */
+	hdmi_write32(IPS_PIPEBCONF, dev->reg_state.savePIPEBCONF);
+
+	/* restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		hdmi_write32(IPS_PALETTE_B + (i<<2),
+				dev->reg_state.save_palette_b[i]);
+
+	dev->reg_state.valid = false;
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/os/android/android_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/os/android/android_hdmi.c
new file mode 100755
index 0000000..a688e19
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/os/android/android_hdmi.c
@@ -0,0 +1,2922 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include "psb_intel_display.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+#include "psb_powermgmt.h"
+#include "mdfld_output.h"
+#include "mdfld_hdmi_audio_if.h"
+#include "otm_hdmi_types.h"
+#include "otm_hdmi.h"
+#include "android_hdmi.h"
+#ifdef OTM_HDMI_HDCP_ENABLE
+#include "hdcp_api.h"
+#endif
+#ifdef CONFIG_SUPPORT_MIPI
+#include "mdfld_dsi_output.h"
+#endif
+
+/* Include file for sending uevents */
+#include "psb_umevents.h"
+
+/* External state dependency */
+extern int hdmi_state;
+
+/* External default scaling mode dependency */
+extern int default_hdmi_scaling_mode;
+
+#ifdef CONFIG_ITE_HDMI_CEC
+/* External EDID Source Physical Address*/
+extern int hdmi_edid_src_phy_addr;
+#endif
+
+static const struct {
+	int width, height, htotal, vtotal, dclk, vrefr, vic, par;
+} vic_formats[12] = {
+	/* 640x480p60 4:3 */
+	{  640,  480,  800,  525,  25200, 60,  1 , OTM_HDMI_PAR_4_3  },
+	/* 720x480p60 4:3 */
+	{  720,  480,  858,  525,  27027, 60,  2 , OTM_HDMI_PAR_4_3  },
+	/* 720x480p60 16:9 */
+	{  720,  480,  858,  525,  27027, 60,  3 , OTM_HDMI_PAR_16_9 },
+	/* 1280x720p60 16:9 */
+	{ 1280,  720, 1650,  750,  74250, 60,  4 , OTM_HDMI_PAR_16_9 },
+	/* 1920x1080p60 16:9 */
+	{ 1920, 1080, 2200, 1125, 148500, 60, 16 , OTM_HDMI_PAR_16_9 },
+	/* 720x576p50 4:3 */
+	{  720,  576,  864,  625,  27000, 50, 17 , OTM_HDMI_PAR_4_3  },
+	/* 720x576p50 16:9 */
+	{  720,  576,  864,  625,  27000, 50, 18 , OTM_HDMI_PAR_16_9 },
+	/* 1280x720p50 16:9 */
+	{  1280,  720, 1980,  750,  74250, 50, 19 , OTM_HDMI_PAR_16_9 },
+	/* 1920x1080p50 16:9 */
+	{  1920, 1080, 2640, 1125, 148500, 50, 31 , OTM_HDMI_PAR_16_9 },
+	/* 1920x1080p24 16:9 */
+	{  1920, 1080, 2750, 1125,  74250, 24, 32 , OTM_HDMI_PAR_16_9 },
+	/* 1920x1080p25 16:9 */
+	{  1920, 1080, 2640, 1125,  74250, 25, 33 , OTM_HDMI_PAR_16_9 },
+	/* 1920x1080p30 16:9 */
+	{  1920, 1080, 2200, 1125,  74250, 30, 34 , OTM_HDMI_PAR_16_9 },
+};
+
+/* Function declarations for interrupt routines */
+static irqreturn_t android_hdmi_irq_callback(int irq, void *data);
+static irqreturn_t __hdmi_irq_handler_bottomhalf(void *data);
+
+static int calculate_refresh_rate(struct drm_display_mode *mode);
+void android_hdmi_encoder_restore_wq(struct work_struct *work);
+
+/*
+ * mode array struct to support EDID testing
+ */
+#define DEBUG_MODES 100
+static struct debug_modes__t {
+	int clk;
+	int frq;
+	char name[DRM_DISPLAY_MODE_LEN + 1];
+} arr_modes[DEBUG_MODES];
+
+static u32 debug_modes_count;
+
+#define OTM_HDMI_I2C_ADAPTER_NUM 10
+#define OTM_HDMI_PIPE_NUM 1
+#define OTM_HDMI_MAX_DDC_WRITE_SIZE 20
+#define OTM_HDMI_MAX_HDISPLAY 1920
+#define OTM_HDMI_MAX_VDISPLAY 1080
+
+/* Default HDMI Edid - 640x480p 720x480p 1280x720p */
+static unsigned char default_edid[] = {
+	0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+	0x25, 0xD4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x14, 0x01, 0x03, 0x80, 0x00, 0x00, 0xFF,
+	0x2A, 0xBA, 0x45, 0xA1, 0x59, 0x55, 0x9D, 0x28,
+	0x0D, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1D,
+	0x00, 0x72, 0x51, 0xD0, 0x1E, 0x20, 0x6E, 0x28,
+	0x55, 0x00, 0xC4, 0x8E, 0x21, 0x00, 0x00, 0x1E,
+	0x8F, 0x0A, 0xD0, 0x8A, 0x20, 0xE0, 0x2D, 0x10,
+	0x10, 0x3E, 0x96, 0x00, 0xC4, 0x8E, 0x21, 0x00,
+	0x00, 0x18, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x49,
+	0x4E, 0x54, 0x45, 0x4C, 0x2D, 0x54, 0x56, 0x0A,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xFD,
+	0x0C, 0x37, 0x3D, 0x1F, 0x31, 0x0F, 0x00, 0x0A,
+	0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xCE,
+	0x02, 0x03, 0x13, 0x41, 0x42, 0x04, 0x02, 0x23,
+	0x09, 0x07, 0x07, 0x67, 0x03, 0x0C, 0x00, 0x10,
+	0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8B
+};
+
+static struct edid *drm_get_edid_retry(struct drm_connector *connector,
+				struct i2c_adapter *adapter, void *context)
+{
+#define N_EDID_READ_RETRIES 5
+	int retries = N_EDID_READ_RETRIES;
+	struct edid *edid = NULL;
+
+	do {
+		if (retries != N_EDID_READ_RETRIES) {
+			/* 50ms delay helps successfully read edid
+			 * on agilent HDMI analyzer*/
+			msleep(50);
+			/* check for hdmi status before retrying */
+			if (otm_hdmi_get_cable_status(context) == false)
+				break;
+			pr_debug("retrying edid read after delay\n");
+		}
+		edid = drm_get_edid(connector, adapter);
+	} while(edid == NULL && --retries &&
+			otm_hdmi_get_cable_status(context));
+
+	return edid;
+}
+
+/*store the state whether the edid is ready
+ *in HPD (1) or not (0)*/
+static int edid_ready_in_hpd = 0;
+
+/**
+ * This function handles bottom half of HDMI hotplug interrupts
+ * @data	: android hdmi private structure
+ *
+ * Returns the behavior of the interrupt handler
+ *	IRQ_HANDLED - if interrupt handled
+ * This function handles bottom half of HDMI hotplug interrupts.
+ * IRQ interrupt bottomhalf handler callback. This callback
+ * will be called for hdmi plug/unplug interrupts
+ */
+static irqreturn_t __hdmi_irq_handler_bottomhalf(void *data)
+{
+	struct android_hdmi_priv *hdmi_priv = data;
+	static int processed_hdmi_status = -1;
+
+	if (hdmi_priv == NULL || !hdmi_priv->dev)
+		return IRQ_HANDLED;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+			OSPM_UHB_FORCE_POWER_ON)) {
+		pr_err("Unable to power on display island!");
+		return IRQ_HANDLED;
+	} else {
+		struct drm_mode_config *mode_config = NULL;
+		struct edid *edid = NULL;
+		struct drm_connector *connector = NULL;
+		struct i2c_adapter *adapter = NULL;
+		bool hdmi_status = 0;
+		bool current_status = 0;
+		int hdmi_detect_count = 3;
+		int hdmi_detect_sleep_time= 0;
+		int hdmi_detect_exit_count = 50;
+		char *uevent_string = NULL;
+
+		otm_hdmi_power_rails_on();
+
+		/* Check HDMI status, read EDID only if connected */
+		hdmi_status = otm_hdmi_get_cable_status(hdmi_priv->context);
+
+		/* shorten sleep time for hdcp comppliance test 1a-02 */
+		hdmi_detect_sleep_time = (hdmi_status == false) ? 10 : 60;
+
+		do {
+			/* Debounce for at least 60ms in order for the
+			* cable status to have stabilized for next detection.
+			* Check its status to make sure same for 3 times.
+			*/
+			msleep(hdmi_detect_sleep_time);
+			current_status =
+				otm_hdmi_get_cable_status(hdmi_priv->context);
+			if (hdmi_status != current_status) {
+				hdmi_status = current_status;
+				hdmi_detect_count = 3;
+				hdmi_detect_sleep_time =
+					(hdmi_status == false) ? 10 : 60;
+			}
+		} while (hdmi_detect_count-- && hdmi_detect_exit_count--);
+
+		/* if the cable status has not changed/stable return */
+		if (hdmi_status == processed_hdmi_status ||
+			hdmi_detect_exit_count == 0) {
+			if (hdmi_detect_exit_count == 0)
+				pr_err("%s: HDMI cable status not stable!\n", __func__);
+			else
+				pr_info("%s: HDMI cable status not changed!\n", __func__);
+			if (!hdmi_status)
+				otm_hdmi_power_rails_off();
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+			return IRQ_HANDLED;
+		}
+
+		processed_hdmi_status = hdmi_status;
+
+#ifdef OTM_HDMI_HDCP_ENABLE
+		otm_hdmi_hdcp_set_hpd_state(hdmi_priv->context, hdmi_status);
+#endif
+		if (!hdmi_status)
+			goto exit;
+
+		adapter = i2c_get_adapter(OTM_HDMI_I2C_ADAPTER_NUM);
+		if (!adapter) {
+			pr_err("Unable to get i2c adapter for HDMI");
+			goto exit;
+		}
+
+		mode_config = &hdmi_priv->dev->mode_config;
+		list_for_each_entry(connector,
+					&mode_config->connector_list,
+					head) {
+			if (DRM_MODE_CONNECTOR_DVID ==
+				connector->connector_type) {
+				edid = (struct edid *)
+					drm_get_edid(connector, adapter);
+				if (edid) {
+					if (drm_detect_hdmi_monitor(edid))
+						/* MONITOR_TYPE_HDMI */
+						hdmi_priv->monitor_type =
+							MONITOR_TYPE_HDMI;
+					else
+						/* MONITOR_TYPE_DVI */
+						hdmi_priv->monitor_type =
+							MONITOR_TYPE_DVI;
+					/* Store raw edid in HDMI context */
+					otm_hdmi_set_raw_edid(
+						hdmi_priv->context,
+						(char *)edid);
+					/* Raw edid is ready in HDMI context */
+					edid_ready_in_hpd = 1;
+					kfree(edid);
+					pr_debug("Edid Read Success");
+				} else {
+					pr_err("Edid Read failed");
+					/* Retry in next get modes */
+					edid_ready_in_hpd = 0;
+				}
+				break;
+			}
+		}
+exit:
+		/* Notify user space */
+		pr_info("%s: HDMI hot plug state  = %d\n", __func__, hdmi_status);
+
+		if (hdmi_status) {
+			/* hdmi_state indicates that hotplug event happens */
+			hdmi_state = 1;
+			uevent_string = "HOTPLUG_IN=1";
+			psb_sysfs_uevent(hdmi_priv->dev, uevent_string);
+			/* delay updating audio state until mode setting is finished*/
+			hdmi_priv->delayed_audio_hotplug = true;
+		} else {
+			otm_hdmi_power_rails_off();
+			hdmi_state = 0;
+			edid_ready_in_hpd = 0;
+			uevent_string = "HOTPLUG_OUT=1";
+			psb_sysfs_uevent(hdmi_priv->dev, uevent_string);
+
+			mid_hdmi_audio_signal_event(hdmi_priv->dev, HAD_EVENT_HOT_UNPLUG);
+			switch_set_state(&hdmi_priv->sdev, 0);
+		}
+
+		/* drm_helper_hpd_irq_event(hdmi_priv->dev); */
+		/* force mode setting as connector status can be polluted by
+		  * user space's querying of connection status in response to
+		  * hot plug event. 
+		  */
+		drm_kms_helper_hotplug_event(hdmi_priv->dev);
+	}
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	return IRQ_HANDLED;
+}
+
+#ifdef OTM_HDMI_HDCP_ENABLE
+static int hdmi_ddc_read_write(bool read,
+			uint8_t i2c_addr,
+			uint8_t offset,
+			uint8_t *buffer,
+			int size)
+{
+	struct i2c_adapter *adapter = i2c_get_adapter(OTM_HDMI_I2C_ADAPTER_NUM);
+	struct i2c_msg msgs[2];
+	int num_of_msgs = 0;
+	uint8_t wr_buffer[OTM_HDMI_MAX_DDC_WRITE_SIZE];
+
+	/* Use one i2c message to write and two to read as some
+	 * monitors don't handle two write messages properly
+	*/
+	if (read) {
+		msgs[0].addr   = i2c_addr,
+		msgs[0].flags  = 0,
+		msgs[0].len    = 1,
+		msgs[0].buf    = &offset,
+
+		msgs[1].addr   = i2c_addr,
+		msgs[1].flags  = ((read) ? I2C_M_RD : 0),
+		msgs[1].len    = size,
+		msgs[1].buf    = buffer,
+
+		num_of_msgs = 2;
+	} else {
+		BUG_ON(size + 1 > OTM_HDMI_MAX_DDC_WRITE_SIZE);
+
+		wr_buffer[0] = offset;
+		memcpy(&wr_buffer[1], buffer, size);
+
+		msgs[0].addr   = i2c_addr,
+		msgs[0].flags  = 0,
+		msgs[0].len    = size + 1,
+		msgs[0].buf    = wr_buffer,
+
+		num_of_msgs = 1;
+	}
+
+	if (adapter != NULL && i2c_transfer(adapter, msgs, num_of_msgs) ==
+								num_of_msgs)
+		return 1;
+
+	return 0;
+}
+#endif
+
+/**
+ * IRQ interrupt bottomhalf handler callback.
+ * @irq		: IRQ number
+ * @data		: hdmi_priv data
+ *
+ * Returns IRQ_HANDLED when the interrupt has been handled.
+ * IRQ interrupt bottomhalf handler callback.
+ * This callback will be called for hdmi plug/unplug interrupts.
+ */
+static irqreturn_t android_hdmi_irq_callback(int irq, void *data)
+{
+	pr_debug("%s: IRQ Interrupt callback", __func__);
+
+	return __hdmi_irq_handler_bottomhalf(data);
+}
+
+int android_hdmi_irq_test(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct android_hdmi_priv *hdmi_priv = dev_priv->hdmi_priv;
+
+	return __hdmi_irq_handler_bottomhalf(hdmi_priv);
+}
+
+
+/**
+ * This function sets the hdmi driver during bootup
+ * @dev		: handle to drm_device
+ *
+ * Returns nothing
+ *
+ * This function is called from psb driver to setup the
+ * hdmi driver. Called only once during boot-up of the system
+ */
+void android_hdmi_driver_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct android_hdmi_priv *hdmi_priv;
+	int ret;
+
+	pr_info("Enter %s", __func__);
+
+	/* HDMI private data */
+	hdmi_priv = kzalloc(sizeof(struct android_hdmi_priv), GFP_KERNEL);
+	if (!hdmi_priv) {
+		pr_err("failed to allocate memory");
+		goto out;
+	}
+
+	pr_debug("%s: Initialize the HDMI device\n", __func__);
+	/* Initialize the HDMI context */
+	if (otm_hdmi_device_init(&(hdmi_priv->context), dev->pdev)) {
+		pr_err("failed to initialize hdmi device\n");
+		goto free;
+	}
+
+	hdmi_priv->dev = dev;
+
+	hdmi_priv->hdmib_reg = HDMIB_CONTROL;
+	hdmi_priv->monitor_type = MONITOR_TYPE_HDMI;
+	hdmi_priv->is_hdcp_supported = true;
+	/* delay setting hdmi switch state till mode setting is completed*/
+	hdmi_priv->delayed_audio_hotplug = true;
+
+	dev_priv->hdmi_priv = (void *)hdmi_priv;
+
+	/* Register hdmi switch_dev */
+	hdmi_priv->sdev.name = "hdmi_audio";
+	if (switch_dev_register(&hdmi_priv->sdev) < 0) {
+		pr_err("%s: Hdmi switch registration failed\n", __func__);
+		goto free;
+	}
+
+	/* Register callback to be used with Hotplug interrupts */
+	ret = otm_hdmi_hpd_callback_register(hdmi_priv->context,
+					     &android_hdmi_irq_callback,
+					     (void *)hdmi_priv);
+	if (ret) {
+		pr_err("Failed to register callback for hotplug!\n");
+		goto free;
+	}
+
+	pr_info("%s: Done with driver setup\n", __func__);
+	pr_info("Exit %s\n", __func__);
+	return;
+free:
+	kfree(hdmi_priv);
+out:
+	pr_info("Exit %s\n", __func__);
+	return;
+}
+
+/* structure for hdmi cmdline module
+ * don't upstream the code
+ */
+typedef struct {
+	int hdisplay, vdisplay;
+	int refresh;
+	int refresh_specified;
+	int vic;
+	int vic_specified;
+	int specified; /* 1: cmdline_mode is set */
+} otm_cmdline_mode;
+
+static otm_cmdline_mode cmdline_mode = { 0, 0, 0, 0, 0, 0, 0 };
+
+/**
+ * This function is used by external tools to force modes
+ * @cmdoption		: cmd line option parameter
+ *
+ * Returns -1 on error 0 on success -2 on invalid output
+ * This function gets the input parameters mentioned and sets the
+ * driver to the mentioned mode. These utility functions are
+ * for validating the various modes and useful for compliance
+ * testing as well as easy debug
+ */
+int otm_cmdline_parse_option(char *cmdoption)
+{
+	int ret = 0;
+	int namelen = 0;
+	int i;
+	int v_spec = 0;
+	char *name;
+	if (NULL == cmdoption)
+		return -1;
+
+	cmdline_mode.specified = 0;
+	cmdline_mode.refresh_specified = 0;
+	cmdline_mode.vic_specified = 0;
+
+	name = cmdoption;
+	namelen = strlen(name);
+	for (i = namelen-1; i >= 0; i--) {
+		switch (name[i]) {
+		case '@':
+			namelen = i;
+			cmdline_mode.refresh =
+				simple_strtol(&name[i+1], NULL, 10);
+			cmdline_mode.refresh_specified = 1;
+			break;
+		case 'x':
+		case 'X':
+			cmdline_mode.vdisplay =
+				simple_strtol(&name[i+1], NULL, 10);
+			v_spec = 1;
+			break;
+		case '0' ... '9':
+			break;
+		default:
+			/* invalid input */
+			return -2;
+		}
+	}
+
+	if ((i < 0) && (1 == v_spec))
+		cmdline_mode.hdisplay = simple_strtol(name, NULL, 10);
+
+	cmdline_mode.specified = 1;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(otm_cmdline_parse_option);
+
+/**
+ * This function is used by external tools to force the vic#
+ * @vic		: vic number
+ *
+ * Returns 0 on success and -1 on invalid input vic
+ * This function gets the input parameters mentioned and sets the
+ * driver to the mentioned vic number. These utility functions are
+ * for validating the various modes and useful for compliance
+ * testing as well as easy debug
+ */
+int otm_cmdline_set_vic_option(int vic)
+{
+	int i = 0;
+
+	pr_debug("Entered %s\n", __func__);
+	cmdline_mode.specified = 0;
+	cmdline_mode.refresh_specified = 0;
+	cmdline_mode.vic_specified = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vic_formats); i++) {
+		if (vic == vic_formats[i].vic) {
+			cmdline_mode.refresh = vic_formats[i].vrefr;
+			cmdline_mode.hdisplay = vic_formats[i].width;
+			cmdline_mode.vdisplay = vic_formats[i].height;
+			cmdline_mode.vic = vic;
+			cmdline_mode.specified = 1;
+			cmdline_mode.refresh_specified = 1;
+			cmdline_mode.vic_specified = 1;
+			pr_debug("%s:Command line mode option set to VIC: %d\n",
+				 __func__, vic);
+			return 0;
+		}
+	}
+
+	pr_debug("HDMI cmdline: Unsupported VIC(%d) specified\n", vic);
+	return -1;
+}
+EXPORT_SYMBOL_GPL(otm_cmdline_set_vic_option);
+
+/**
+ * This function is used by tools to print the cmdline options
+ *
+ * Returns nothing
+ * This function is used by external tools to print
+ * the cmdline options passed through tools
+ */
+void otm_print_cmdline_option(void)
+{
+	if (1 == cmdline_mode.specified) {
+		if (1 == cmdline_mode.vic_specified)
+			printk(KERN_INFO "HDMI cmdline option: %dx%d@%d (%d)\n",
+				cmdline_mode.hdisplay,
+				cmdline_mode.vdisplay,
+				cmdline_mode.refresh,
+				cmdline_mode.vic);
+		else if (1 == cmdline_mode.refresh_specified)
+			printk(KERN_INFO "HDMI cmdline option: %dx%d@%d\n",
+				cmdline_mode.hdisplay,
+				cmdline_mode.vdisplay,
+				cmdline_mode.refresh);
+		else
+			printk(KERN_INFO "HDMI cmdline option: %dx%d\n",
+				cmdline_mode.hdisplay, cmdline_mode.vdisplay);
+	} else
+		printk(KERN_INFO "HDMI cmdline option is not set\n");
+}
+EXPORT_SYMBOL_GPL(otm_print_cmdline_option);
+
+/**
+ * DRM connector helper routine.
+ * @connector	: drm_connector handle
+ * @mode		: drm_display_mode handle
+ *
+ * Returns integer values which tell whether the hdmi mode
+ * is valid or not
+ * MODE_CLOCK_LOW - mode clock less than min pixel clock value
+ * MODE_CLOCK_HIGH - mode clock greater than min pixel clock value
+ * MODE_BAD - mode values are incorrect
+ * MODE_OK - mode values are correct
+ * MODE_NO_DBLESCAN - double scan mode not supported
+ * MODE_NO_INTERLACE - interlace mode not supported
+ * This is the DRM connector helper routine
+ */
+int android_hdmi_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	unsigned int pc_min, pc_max;
+	struct drm_display_mode *mode_entry, *t;
+	struct drm_display_mode *saved_mode = NULL;
+	int active_region = 0, vrefresh = 0;
+	int saved_active_region = 0, saved_vrefresh = 0;
+	int ret = MODE_OK;
+
+	pr_debug("display info. hdisplay = %d, vdisplay = %d, clock = %d.\n",
+			mode->hdisplay, mode->vdisplay, mode->clock);
+
+	if (mode->hdisplay > OTM_HDMI_MAX_HDISPLAY) {
+		ret = MODE_BAD_HVALUE;
+		goto err;
+	}
+
+	if (mode->vdisplay > OTM_HDMI_MAX_VDISPLAY) {
+		ret = MODE_BAD_VVALUE;
+		goto err;
+	}
+
+	if (!(((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) ||
+	    ((mode->hdisplay == 1280) && (mode->vdisplay == 720)) ||
+	    ((mode->hdisplay == 720) && (mode->vdisplay == 576)) ||
+	    ((mode->hdisplay == 720) && (mode->vdisplay == 480)) ||
+	    ((mode->hdisplay == 640) && (mode->vdisplay == 480)))) {
+		ret = MODE_BAD_VVALUE;
+		goto err;
+	}
+
+	if ((mode->vrefresh < 24) || (mode->vrefresh > 60)) {
+		ret = MODE_BAD_VVALUE;
+		goto err;
+	}
+
+	/* Restricting modes within the supported pixel clock */
+	if (OTM_HDMI_SUCCESS == otm_hdmi_get_pixel_clock_range(
+					&pc_min, &pc_max)) {
+		if (mode->clock < pc_min) {
+			pr_debug("pruned mode %dx%d@%d.\n",
+				mode->hdisplay,
+				mode->vdisplay,
+				mode->clock);
+			ret = MODE_CLOCK_LOW;
+			goto err;
+		}
+		if (mode->clock > pc_max) {
+			pr_debug("pruned mode %dx%d@%d.\n",
+				mode->hdisplay,
+				mode->vdisplay,
+				mode->clock);
+			ret = MODE_CLOCK_HIGH;
+			goto err;
+		}
+	}
+
+#if defined(OTM_HDMI_UNIT_TEST) && defined (OTM_HDMI_CMDLINE)
+	/* if cmdline_mode is set, prune all other modes. */
+	if (1 == cmdline_mode.specified) {
+		if ((cmdline_mode.hdisplay != mode->hdisplay) ||
+			(cmdline_mode.vdisplay != mode->vdisplay) ||
+			((1 == cmdline_mode.refresh_specified) &&
+			(cmdline_mode.refresh !=
+			calculate_refresh_rate(mode)))) {
+			return MODE_BAD;
+		}
+	}
+#endif
+
+	if (mode->type == DRM_MODE_TYPE_USERDEF)
+		return MODE_OK;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+		pr_debug("pruned mode %dx%d@%dHz. Double scan not supported.\n",
+			mode->hdisplay,
+			mode->vdisplay,
+			calculate_refresh_rate(mode));
+		ret = MODE_NO_DBLESCAN;
+		goto err;
+	}
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		pr_debug("pruned mode %dx%d@%dHz. Interlace not supported.\n",
+			mode->hdisplay,
+			mode->vdisplay,
+			calculate_refresh_rate(mode));
+		ret = MODE_NO_INTERLACE;
+		goto err;
+	}
+	return MODE_OK;
+
+err:
+	mode->status = ret;
+
+	if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+		mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+		/*
+		 * Find the mode with maximum active region and refresh rate in
+		 * the supported mode list, and set it as preferred one.
+		 */
+		list_for_each_entry_safe(mode_entry, t,
+				&connector->modes, head) {
+			if (mode_entry->status != MODE_OK)
+				continue;
+
+			active_region =
+				mode_entry->hdisplay * mode_entry->vdisplay;
+
+			if (active_region >= saved_active_region) {
+				saved_active_region = active_region;
+				vrefresh = drm_mode_vrefresh(mode_entry);
+
+				if ((vrefresh >= saved_vrefresh)) {
+					saved_vrefresh = vrefresh;
+					saved_mode = mode_entry;
+				}
+			}
+		}
+
+		if (saved_mode)
+			saved_mode->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+
+	return ret;
+}
+
+
+/**
+ * This function maps the timings to drm_display_mode
+ * @timings	: This holds the timings information
+ * @dev		: drm_device handle
+ *
+ * Returns the mapped drm_display_mode
+ * This function maps the timings in EDID information
+ * to drm_display_mode and returns the same
+ */
+static struct drm_display_mode
+*android_hdmi_get_drm_mode_from_pdt(const otm_hdmi_timing_t *timings,
+				    struct drm_device *dev)
+{
+	struct drm_display_mode *mode;
+	int i;
+	static const struct {
+	int w, h;
+	} cea_interlaced[7] = {
+		{ 1920, 1080 },
+		{  720,  480 },
+		{ 1440,  480 },
+		{ 2880,  480 },
+		{  720,  576 },
+		{ 1440,  576 },
+		{ 2880,  576 },
+	};
+
+	if (timings == NULL || dev == NULL)
+		return NULL;
+
+	mode = drm_mode_create(dev);
+	if (mode == NULL)
+		return NULL;
+
+	mode->type = DRM_MODE_TYPE_DRIVER;
+	mode->clock = timings->dclk;
+
+	mode->hdisplay = timings->width;
+	mode->hsync_start = timings->hsync_start;
+	mode->hsync_end = timings->hsync_end;
+	mode->htotal = timings->htotal;
+
+	mode->vdisplay = timings->height;
+	mode->vsync_start = timings->vsync_start;
+	mode->vsync_end = timings->vsync_end;
+	mode->vtotal = timings->vtotal;
+
+	if (timings->mode_info_flags & PD_SCAN_INTERLACE) {
+
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+		for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+			if ((mode->hdisplay == cea_interlaced[i].w) &&
+			    (mode->vdisplay == cea_interlaced[i].h / 2)) {
+				mode->vdisplay *= 2;
+				mode->vsync_start *= 2;
+				mode->vsync_end *= 2;
+				mode->vtotal *= 2;
+				mode->vtotal |= 1;
+			}
+		}
+	}
+
+	mode->flags |= (timings->mode_info_flags & PD_HSYNC_HIGH) ?
+		DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+	mode->flags |= (timings->mode_info_flags & PD_VSYNC_HIGH) ?
+		DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+	/* Store aspect ratio information */
+	mode->flags |= (timings->mode_info_flags & OTM_HDMI_PAR_16_9) ?
+		DRM_MODE_FLAG_PAR16_9 : DRM_MODE_FLAG_PAR4_3;
+
+	return mode;
+}
+
+/**
+ * helper function to check whether two clocks can fall into the same VIC.
+ *
+ * Returns: true if possible, false otherwise.
+ */
+static bool __android_check_clock_match(int target, int reference)
+{
+	/* check target clock is in range of 60Hz or 59.94 (reference * 1000/1001) with
+	 * (-0.5%, +0.5%) tolerance. Based on CEA spec, when determining whether two video timings
+	 * are identical, clock frequencey within (-0.5%, +0.5%) tolerance should be considered
+	 * as the same.
+	 */
+
+	if (target >= DIV_ROUND_UP(reference * 995, 1001) &&
+		target <= DIV_ROUND_UP(reference * 1005, 1000))
+		return true;
+	return false;
+}
+
+/**
+ * This function adds the cea modes in extension blocks of EDID
+ * @context	: hdmi context
+ * @connector	: handle to drm_connector
+ *
+ * Returns the number of modes added
+ */
+static int android_hdmi_add_cea_edid_modes(void *context,
+					   struct drm_connector *connector)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	edid_info_t *edid_info;
+	struct drm_display_mode *newmode = NULL, *mode_entry, *t;
+	unsigned int saved_flags;
+	int i = 0, ret_count = 0;
+
+	if (connector == NULL || ctx == NULL)
+		return 0;
+
+	/* Init locals */
+	edid_info = &ctx->edid_int;
+
+	/* Do Mapping from PDT to drm_display_mode */
+	for (i = 0; i < edid_info->num_timings; i++) {
+		newmode = android_hdmi_get_drm_mode_from_pdt(
+				&edid_info->timings[i], connector->dev);
+		if (!newmode)
+			continue;
+
+		/* add new mode to list */
+		drm_mode_set_name(newmode);
+		drm_mode_probed_add(connector, newmode);
+		ret_count++;
+	}
+
+	list_for_each_entry_safe(mode_entry, t, &connector->probed_modes, head) {
+		/* If DRM already correctly handled PAR, skip this mode_entry */
+		if ((mode_entry->flags & DRM_MODE_FLAG_PAR4_3) || (mode_entry->flags & DRM_MODE_FLAG_PAR16_9))
+			continue;
+
+		for (i = 0; i < edid_info->num_timings; i++) {
+			newmode = android_hdmi_get_drm_mode_from_pdt(
+					&edid_info->timings[i], connector->dev);
+			if (!newmode)
+				continue;
+
+			/* Clear PAR flag for comparison */
+			saved_flags = newmode->flags;
+			newmode->flags &= (~DRM_MODE_FLAG_PAR4_3) & (~DRM_MODE_FLAG_PAR16_9);
+
+			/* If same mode, then update PAR flag */
+			if (drm_mode_equal_no_clocks(newmode, mode_entry) &&
+					__android_check_clock_match(newmode->clock, mode_entry->clock)) {
+					if (saved_flags & DRM_MODE_FLAG_PAR4_3)
+						mode_entry->flags |= DRM_MODE_FLAG_PAR4_3;
+					else
+						mode_entry->flags |= DRM_MODE_FLAG_PAR16_9;
+					break;
+			}
+			drm_mode_destroy(connector->dev, newmode);
+		}
+	}
+
+	return ret_count;
+}
+
+#ifdef OTM_HDMI_UNIT_TEST
+static bool android_hdmi_probed_mode_exists(
+				struct drm_connector *connector,
+				int hdisplay, int vdisplay, int vrefresh)
+{
+	struct drm_display_mode *mode, *t;
+	if (!connector || hdisplay < 0 || vdisplay < 0 || vrefresh < 0)
+		goto exit;
+
+	/* loop through all probed modes to match */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		if (mode->hdisplay == hdisplay &&
+			mode->vdisplay == vdisplay &&
+			vrefresh == drm_mode_vrefresh(mode)) {
+			return true;
+		}
+	}
+exit:
+	return false;
+}
+
+/**
+ * This function adds the edid information from cmdline
+ * @context	: handle hdmi_context
+ * @connector	: handle drm_connector
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @vrefresh	: refresh rate
+ *
+ * Returns true if mode added, false if not added
+ * This function is used to set the user requested mode
+ * into the mode list
+ */
+static bool android_hdmi_add_noedid_mode(
+				void *context,
+				struct drm_connector *connector,
+				int hdisplay, int vdisplay, int vrefresh)
+{
+	struct drm_display_mode *newmode = NULL;
+	const otm_hdmi_timing_t *pdt = NULL;
+
+	if (!context || !connector || hdisplay < 0 ||
+			vdisplay < 0 || vrefresh < 0)
+		goto exit;
+
+	/* get mode timings */
+	pdt = otm_hdmi_get_mode_timings(context, hdisplay, vdisplay, vrefresh);
+	if (!pdt)
+		goto exit;
+
+	/* add mode */
+	newmode = android_hdmi_get_drm_mode_from_pdt(pdt, connector->dev);
+	if (newmode) {
+		drm_mode_probed_add(connector, newmode);
+		return true;
+	}
+exit:
+	return false;
+}
+#endif
+
+/**
+ * Calculate refresh rate from mode
+ * @mode	: handle to drm_display_mode
+ *
+ * Returns the calculated refresh rate
+ */
+static int calculate_refresh_rate(struct drm_display_mode *mode)
+{
+	int refresh_rate = 0;
+
+	if (!mode)
+		return refresh_rate;
+
+	refresh_rate = (((mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1) *
+			mode->clock * 1000) /
+			(mode->htotal * mode->vtotal);
+
+	return refresh_rate;
+}
+
+/**
+ * Query HDMI setting from FW when first boot up
+ * @hdisplay	:  hactive timing set by FW
+ * @vdisplay	:  vactive timing set by FW
+ *
+ * Returns ture on success and false else
+ */
+static bool query_fw_hdmi_setting(struct drm_device *dev,
+				  uint32_t *hdisplay,
+				  uint32_t *vdisplay,
+				  uint8_t *vic,
+				  int * monitor_type)
+{
+	uint32_t htotal, vtotal;
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+
+	if (NULL == dev)
+		return false;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return false;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return false;
+
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return false;
+
+	htotal = REG_READ(HTOTAL_B);
+	vtotal = REG_READ(VTOTAL_B);
+
+	*vic = otm_hdmi_get_vic(hdmi_priv->context);
+
+	if (REG_READ(VIDEO_DIP_CTL) & EN_DIP)
+		*monitor_type = MONITOR_TYPE_HDMI;
+	else
+		*monitor_type = MONITOR_TYPE_DVI;
+
+	if (htotal != 0 && vtotal != 0) {
+		*hdisplay = ((htotal + 1) << 16) >> 16;
+		*vdisplay = ((vtotal + 1) << 16) >> 16;
+	        pr_info("%s:fw set htotal=0x%x vtotal=0x%x! vic=%d monitor_type=%d\n",
+				__func__, htotal, vtotal, *vic, *monitor_type);
+	}
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return true;
+}
+
+/**
+ * helper function to convert drm_display_mode to otm_hdmi_timing.
+ * @otm_mode:		otm hdmi mode to be populated
+ * @drm_mode:		drm_display_mode
+ *
+ * Returns:	none.
+ */
+static void __android_hdmi_drm_mode_to_otm_timing(otm_hdmi_timing_t *otm_mode,
+				struct drm_display_mode *drm_mode)
+{
+	uint8_t i = 0;
+	uint32_t par = OTM_HDMI_PAR_NO_DATA;
+
+	if (otm_mode == NULL || drm_mode == NULL)
+		return;
+
+	otm_mode->width			= (unsigned short)
+						drm_mode->crtc_hdisplay;
+	otm_mode->height		= (unsigned short)
+						drm_mode->crtc_vdisplay;
+	otm_mode->dclk			= (unsigned long)
+						drm_mode->clock;
+	otm_mode->htotal		= (unsigned short)
+						drm_mode->crtc_htotal;
+	otm_mode->hblank_start		= (unsigned short)
+						drm_mode->crtc_hblank_start;
+	otm_mode->hblank_end		= (unsigned short)
+						drm_mode->crtc_hblank_end;
+	otm_mode->hsync_start		= (unsigned short)
+						drm_mode->crtc_hsync_start;
+	otm_mode->hsync_end		= (unsigned short)
+						drm_mode->crtc_hsync_end;
+	otm_mode->vtotal		= (unsigned short)
+						drm_mode->crtc_vtotal;
+	otm_mode->vblank_start		= (unsigned short)
+						drm_mode->crtc_vblank_start;
+	otm_mode->vblank_end		= (unsigned short)
+						drm_mode->crtc_vblank_end;
+	otm_mode->vsync_start		= (unsigned short)
+						drm_mode->crtc_vsync_start;
+	otm_mode->vsync_end		= (unsigned short)
+						drm_mode->crtc_vsync_end;
+
+	otm_mode->mode_info_flags = 0;
+	if (drm_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		otm_mode->mode_info_flags |= PD_SCAN_INTERLACE;
+
+	if (drm_mode->flags & DRM_MODE_FLAG_PAR4_3) {
+		otm_mode->mode_info_flags |= OTM_HDMI_PAR_4_3;
+		par = OTM_HDMI_PAR_4_3;
+	} else if (drm_mode->flags & DRM_MODE_FLAG_PAR16_9) {
+		otm_mode->mode_info_flags |= OTM_HDMI_PAR_16_9;
+		par = OTM_HDMI_PAR_16_9;
+	}
+
+	if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		otm_mode->mode_info_flags |= PD_HSYNC_HIGH;
+
+	if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		otm_mode->mode_info_flags |= PD_VSYNC_HIGH;
+
+	otm_mode->metadata = 0;
+	for (i = 0; i < ARRAY_SIZE(vic_formats); i++) {
+		if (otm_mode->width == vic_formats[i].width &&
+			otm_mode->height == vic_formats[i].height &&
+			otm_mode->htotal == vic_formats[i].htotal &&
+			otm_mode->vtotal == vic_formats[i].vtotal &&
+			__android_check_clock_match(otm_mode->dclk, vic_formats[i].dclk) &&
+			par == vic_formats[i].par) {
+			if (1 == cmdline_mode.specified &&
+				1 == cmdline_mode.vic_specified) {
+				if (cmdline_mode.vic == vic_formats[i].vic) {
+					otm_mode->metadata = cmdline_mode.vic;
+					break;
+				}
+				/* else continue */
+			} else {
+				otm_mode->metadata = vic_formats[i].vic;
+				pr_debug("Assigning vic %d to mode %dx%d@%d, flags=%#x.\n",
+					(int)otm_mode->metadata, otm_mode->width,
+					otm_mode->height, (int)otm_mode->dclk,
+					(int)otm_mode->mode_info_flags);
+				break;
+			}
+		}
+	}
+}
+
+/**
+ * DRM get modes helper routine
+ * @connector	: handle to drm_connector
+ *
+ * Returns the number of modes added
+ * This is a helper routines for DRM get modes.
+ * This function gets the edid information from the external sink
+ * device using i2c when connected and parses the edid information
+ * obtained and adds the modes to connector list
+ * If sink device is not connected, then static edid timings are
+ * used and those modes are added to the connector list
+ */
+int android_hdmi_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct android_hdmi_priv *hdmi_priv = dev_priv->hdmi_priv;
+#ifdef CONFIG_ITE_HDMI_CEC
+	hdmi_context_t *ctx = hdmi_priv->context;
+#endif
+	struct edid *edid = NULL;
+	/* Edid address in HDMI context */
+	struct edid *ctx_edid = NULL;
+#ifdef CONFIG_ITE_HDMI_CEC
+	edid_info_t *edid_info;
+#endif
+	struct drm_display_mode *mode, *t;
+	int i = 0, j = 0, ret = 0;
+	int refresh_rate = 0;
+	bool pref_mode_found = false;
+	struct i2c_adapter *adapter = NULL;
+	struct drm_display_mode *pref_mode_assigned;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	struct drm_display_mode *dup_mode, *user_mode;
+	int mode_present = 0
+#endif
+
+	debug_modes_count = 0;
+	pr_debug("Enter %s\n", __func__);
+
+	adapter = i2c_get_adapter(OTM_HDMI_I2C_ADAPTER_NUM);
+
+	/* Lazy edid read feature, which can save I2C transactions largely.
+	 * Basically, HPD will do edid read and store to HDMI context.
+	 * Therefore, get modes should read edid with the condition
+	 * whether the edid is ready in HDP or not in a lazy way. */
+	if (edid_ready_in_hpd)
+		goto edid_is_ready;
+
+	/* Read edid blocks from i2c device if cable is connected */
+	if (NULL != adapter && otm_hdmi_get_cable_status(hdmi_priv->context))
+		edid = (struct edid *)drm_get_edid_retry(connector, adapter,
+							hdmi_priv->context);
+
+	if (edid == NULL) {
+		pr_err("%s Edid Read failed -use default edid", __func__);
+		edid = (struct edid *)default_edid;
+	} else {
+		edid_ready_in_hpd = 1;
+		if (drm_detect_hdmi_monitor(edid))
+			/* MONITOR_TYPE_HDMI */
+			hdmi_priv->monitor_type =
+					MONITOR_TYPE_HDMI;
+		else
+			/* MONITOR_TYPE_DVI */
+			hdmi_priv->monitor_type =
+					MONITOR_TYPE_DVI;
+		pr_debug("Edid Read Done in get modes\n");
+	}
+
+	/* Store raw edid into HDMI context */
+	otm_hdmi_set_raw_edid(hdmi_priv->context, (char *)edid);
+
+	/* Release edid */
+	if (edid && ((unsigned char *)edid != (unsigned char *)default_edid))
+		kfree(edid);
+
+edid_is_ready:
+	/* Get the edid stored in HDMI context */
+	otm_hdmi_get_raw_edid(hdmi_priv->context, (char **)&ctx_edid);
+
+	/* Add modes into DRM mode list */
+	drm_mode_connector_update_edid_property(connector, ctx_edid);
+	ret = drm_add_edid_modes(connector, ctx_edid);
+
+	/* Parse the edid extensions.
+	 * If successful, add modes in extension blocks to DRM
+	 */
+	if (otm_hdmi_edid_extension_parse(hdmi_priv->context, ctx_edid,
+			adapter) == OTM_HDMI_SUCCESS) {
+#ifdef CONFIG_ITE_HDMI_CEC
+		edid_info = &ctx->edid_int;
+		hdmi_edid_src_phy_addr = edid_info->spa;
+#endif
+		ret += android_hdmi_add_cea_edid_modes(hdmi_priv->context,
+						connector);
+	}
+
+#ifdef OTM_HDMI_UNIT_TEST
+	if (1 == cmdline_mode.specified) {
+		/* Add cmdline mode if it does not exist in EDID */
+		if (!android_hdmi_probed_mode_exists(connector,
+			cmdline_mode.hdisplay,
+			cmdline_mode.vdisplay,
+			cmdline_mode.refresh))
+			if (android_hdmi_add_noedid_mode(
+				hdmi_priv->context,
+				connector,
+				cmdline_mode.hdisplay,
+				cmdline_mode.vdisplay,
+				cmdline_mode.refresh))
+				ret++;
+	}
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	connector->display_info.raw_edid = NULL;
+#endif
+	/* monitor_type is being used to switch state between HDMI & DVI */
+	if (otm_hdmi_is_monitor_hdmi(hdmi_priv->context))
+		hdmi_priv->monitor_type = MONITOR_TYPE_HDMI;
+	else
+		hdmi_priv->monitor_type = MONITOR_TYPE_DVI;
+
+	j = 0;
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		refresh_rate = calculate_refresh_rate(mode);
+		if (mode->vrefresh == 0) {
+			mode->vrefresh = refresh_rate;
+		}
+		pr_debug("Mode %02d: %s %dHz\t Clk: %dKHz H/V: %c,%c"
+			"flags: 0x%08x\n",
+			j, mode->name, refresh_rate, mode->clock,
+			(mode->flags & DRM_MODE_FLAG_PHSYNC) ? '+' : '-',
+			(mode->flags & DRM_MODE_FLAG_PVSYNC) ? '+' : '-',
+			mode->flags);
+
+		if (debug_modes_count < DEBUG_MODES) {
+			strncpy(arr_modes[debug_modes_count].name, mode->name,
+				strlen(mode->name));
+			arr_modes[debug_modes_count].name[strlen(mode->name)]
+				= '\0';
+			arr_modes[debug_modes_count].frq = refresh_rate;
+			arr_modes[debug_modes_count].clk = mode->clock;
+			debug_modes_count++;
+		} else {
+			pr_err("Increase size of DEBUG_MODES, some modes not"
+				 " listed in report_edid.sh\n");
+		}
+
+		j++;
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	/* add user mode to connector->mode list to support
+	 * DRM IOCTL attachmode
+	 */
+        /*we don't support attach mode in DRM_IOCTL in kernel3.10 now,
+         * so saftly remove below code when linux kernel version is 3.10
+         */
+	list_for_each_entry_safe(user_mode, t, &connector->user_modes, head) {
+		mode_present = 0;
+		/* check for whether user_mode is already in the mode_list */
+		list_for_each_entry(mode, &connector->modes, head) {
+			if (drm_mode_equal(mode, user_mode)) {
+				mode_present = 1;
+				mode->status = MODE_OK;
+			}
+		}
+
+		pr_debug("user_mode ret: 0x%x, mode_present: 0x%x\n", ret,
+			 mode_present);
+		if (!mode_present) {
+			dup_mode = drm_mode_duplicate(dev, user_mode);
+			if (!dup_mode) {
+				pr_err("Invalid dup_mode\n");
+				break;
+			}
+			dup_mode->status = MODE_OK;
+			list_add_tail(&dup_mode->head, &connector->modes);
+			ret += 1;
+		}
+	}
+#endif
+
+	pref_mode_assigned = NULL;
+
+	/* choose a preferred mode and set the mode type accordingly */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		/* check whether the display has support for 1080p.
+		 * 720P is the minimum requirement expected from
+		 * external display.
+		 * (extend this if condition to set other modes as preferred).
+		 */
+		refresh_rate = calculate_refresh_rate(mode);
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			pr_debug("Preferred timing mode of extenal display is %dx%d@%dHz.\n",
+				mode->hdisplay, mode->vdisplay, refresh_rate);
+			pref_mode_found = true;
+			break;
+		}
+		if (pref_mode_assigned == NULL) {
+			if (otm_hdmi_is_preferred_mode(mode->hdisplay, mode->vdisplay,
+						refresh_rate)) {
+				pr_debug("External display has %dx%d@%dHz support.\n",
+					mode->hdisplay, mode->vdisplay, refresh_rate);
+				pr_debug("This mode will be assigned as preferred if none is indicated.\n");
+				pref_mode_assigned = mode;
+			}
+		}
+	}
+
+	/* if the external display does not indicate a preferred timing mode,
+	 * assign 1080p mode (if found) as the preferred mode.
+	 */
+	if (pref_mode_found == false && pref_mode_assigned != NULL)
+		pref_mode_assigned->type |= DRM_MODE_TYPE_PREFERRED;
+
+	if (pref_mode_found == false && pref_mode_assigned == NULL)
+		pr_err("Preferred mode is not indicated or assigned.\n");
+
+	pr_debug("Exit %s (%d)\n", __func__, (ret - i));
+
+	return ret - i;
+}
+
+/**
+ * helper function to print the display mode details.
+ * @mode:		drm display mode to print
+ *
+ * Returns:	none.
+ */
+static void __android_hdmi_dump_crtc_mode(struct drm_display_mode *mode)
+{
+	if (mode == NULL)
+		return;
+
+	pr_debug("hdisplay = %d\n", mode->hdisplay);
+	pr_debug("vdisplay = %d\n", mode->vdisplay);
+	pr_debug("hsync_start = %d\n", mode->hsync_start);
+	pr_debug("hsync_end = %d\n", mode->hsync_end);
+	pr_debug("htotal = %d\n", mode->htotal);
+	pr_debug("vsync_start = %d\n", mode->vsync_start);
+	pr_debug("vsync_end = %d\n", mode->vsync_end);
+	pr_debug("vtotal = %d\n", mode->vtotal);
+	pr_debug("clock = %d\n", mode->clock);
+	pr_debug("flags = 0x%x\n", mode->flags);
+}
+
+/**
+ * helper function to set scaling mode in OTM HDMI
+ * @context:		hdmi context
+ * @scalingType:	scaling type
+ *
+ * Returns:	none.
+ */
+static void __android_hdmi_set_scaling_type(void *context, int scalingType)
+{
+	switch (scalingType) {
+	case DRM_MODE_SCALE_NONE:
+		otm_hdmi_set_scaling_type(context, OTM_HDMI_SCALE_NONE);
+		break;
+	case DRM_MODE_SCALE_CENTER:
+		otm_hdmi_set_scaling_type(context, OTM_HDMI_SCALE_CENTER);
+		break;
+	case DRM_MODE_SCALE_FULLSCREEN:
+		otm_hdmi_set_scaling_type(context, OTM_HDMI_SCALE_FULLSCREEN);
+		break;
+	case DRM_MODE_SCALE_ASPECT:
+		otm_hdmi_set_scaling_type(context, OTM_HDMI_SCALE_ASPECT);
+		break;
+	default:
+		break;
+	}
+	return;
+}
+
+/**
+ * set display and pfit registers as per the preferred scaling property.
+ * @crtc		: crtc
+ *
+ * Returns:	0 on success
+ *		-1 on failure
+ */
+int android_hdmi_set_scaling_property(struct drm_crtc *crtc)
+{
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	struct psb_intel_crtc *psb_intel_crtc;
+	struct drm_display_mode *adjusted_mode, *mode;
+	otm_hdmi_timing_t otm_mode, otm_adjusted_mode;
+	int pipe;
+
+	pr_debug("Enter %s\n", __func__);
+
+	BUG_ON(crtc == NULL);
+
+	/* get handles for required data */
+	dev = crtc->dev;
+	BUG_ON(dev == NULL);
+	dev_priv = dev->dev_private;
+	BUG_ON(dev_priv == NULL);
+	hdmi_priv = dev_priv->hdmi_priv;
+	BUG_ON(hdmi_priv == NULL);
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+	pipe = psb_intel_crtc->pipe;
+	adjusted_mode = &psb_intel_crtc->saved_adjusted_mode;
+	mode = &psb_intel_crtc->saved_mode;
+
+	BUG_ON(pipe != OTM_HDMI_PIPE_NUM);
+
+	__android_hdmi_drm_mode_to_otm_timing(&otm_mode, mode);
+	__android_hdmi_drm_mode_to_otm_timing(&otm_adjusted_mode,
+						adjusted_mode);
+
+	if (otm_hdmi_crtc_set_scaling(hdmi_priv->context, &otm_mode,
+				&otm_adjusted_mode, crtc->fb->width,
+				crtc->fb->height)) {
+		pr_err("%s: failed to program scaling", __func__);
+		return -1;
+	}
+
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+}
+
+/**
+ * crtc mode set for hdmi pipe.
+ * @crtc		: crtc
+ * @mode		:mode requested
+ * @adjusted_mode:adjusted mode
+ * @x		:x value
+ * @y		:y value
+ * @old_fb	: old frame buffer values for flushing old planes
+ *
+ * Returns:	0 on success
+ *		-EINVAL on NULL input arguments
+ */
+int android_hdmi_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y,
+				struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev;
+	struct psb_intel_crtc *psb_intel_crtc;
+	struct drm_psb_private *dev_priv;
+	struct drm_framebuffer *fb;
+	struct android_hdmi_priv *hdmi_priv;
+	struct drm_mode_config *mode_config;
+	struct psb_intel_output *psb_intel_output = NULL;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	uint64_t scalingType = DRM_MODE_SCALE_CENTER;
+	int ret = 0;
+
+	int pipe;
+	otm_hdmi_timing_t otm_mode, otm_adjusted_mode;
+	uint32_t clock_khz;
+	int fb_width, fb_height;
+
+	pr_debug("Enter %s\n", __func__);
+
+	if (crtc == NULL || mode == NULL || adjusted_mode == NULL)
+		return -EINVAL;
+
+	/* get handles for required data */
+	dev = crtc->dev;
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+	pipe = psb_intel_crtc->pipe;
+	dev_priv = dev->dev_private;
+	fb = crtc->fb;
+	fb_width = fb->width;
+	fb_height = fb->height;
+	hdmi_priv = dev_priv->hdmi_priv;
+	mode_config = &dev->mode_config;
+
+	if (pipe != 1) {
+		pr_err("%s: Invalid pipe %d", __func__, pipe);
+		return 0;
+	}
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return 0;
+
+	pr_debug("%s mode info:\n", __func__);
+	__android_hdmi_dump_crtc_mode(mode);
+	pr_debug("%s adjusted mode info:\n", __func__);
+	__android_hdmi_dump_crtc_mode(adjusted_mode);
+
+	memcpy(&psb_intel_crtc->saved_mode, mode,
+				sizeof(struct drm_display_mode));
+	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+				sizeof(struct drm_display_mode));
+
+	__android_hdmi_drm_mode_to_otm_timing(&otm_mode, mode);
+	__android_hdmi_drm_mode_to_otm_timing(&otm_adjusted_mode,
+						adjusted_mode);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		encoder = connector->encoder;
+		if (!encoder)
+			continue;
+		if (encoder->crtc != crtc)
+			continue;
+		psb_intel_output = to_psb_intel_output(connector);
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	if (psb_intel_output)
+		drm_connector_property_get_value(&psb_intel_output->base,
+			dev->mode_config.scaling_mode_property, &scalingType);
+#else
+        if (psb_intel_output)
+            drm_object_property_get_value(&(&psb_intel_output->base)->base,
+                    dev->mode_config.scaling_mode_property, &scalingType);
+#endif
+
+	psb_intel_crtc->scaling_type = scalingType;
+
+	__android_hdmi_set_scaling_type(hdmi_priv->context, scalingType);
+
+	if (!dev_priv->hdmi_first_boot) {
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		/* Disable the panel fitter if it was on our pipe */
+		if (psb_intel_panel_fitter_pipe(dev) == pipe)
+			REG_WRITE(PFIT_CONTROL, 0);
+
+		/* Flush the plane changes */
+		{
+			struct drm_crtc_helper_funcs *crtc_funcs =
+							crtc->helper_private;
+			ret = crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+			if (ret != 0)
+				return ret;
+		}
+
+		if (otm_hdmi_crtc_mode_set(hdmi_priv->context, &otm_mode,
+					&otm_adjusted_mode, fb_width,
+					fb_height, &clock_khz)) {
+			pr_err("%s: failed to perform hdmi crtc mode set",
+					__func__);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+			return 0;
+		}
+	} else {
+		otm_hdmi_crtc_pll_get(hdmi_priv->context,
+				&otm_adjusted_mode,
+				&clock_khz);
+	}
+
+	hdmi_priv->clock_khz = clock_khz;
+
+	/*
+	 * SW workaround for Compliance 7-29 ACR test on 576p@50
+	 * use the nominal pixel clock, instead of the actual clock
+	 */
+	if (otm_adjusted_mode.metadata == 17 ||
+			otm_adjusted_mode.metadata == 18)
+		hdmi_priv->clock_khz = otm_adjusted_mode.dclk;
+
+	psb_intel_wait_for_vblank(dev);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	pr_debug("Exit %s\n", __func__);
+	return 0;
+}
+
+/**
+ * encoder mode set for hdmi pipe.
+ * @encoder:		hdmi encoder
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ *
+ * Returns:	none.
+ */
+void android_hdmi_enc_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev;
+	struct android_hdmi_priv *hdmi_priv;
+	struct drm_psb_private *dev_priv;
+	otm_hdmi_timing_t otm_mode, otm_adjusted_mode;
+
+	pr_debug("Enter %s\n", __func__);
+
+	if (encoder == NULL || mode == NULL || adjusted_mode == NULL)
+		return;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return;
+
+	/* get handles for required data */
+	dev = encoder->dev;
+	dev_priv = dev->dev_private;
+	hdmi_priv = dev_priv->hdmi_priv;
+
+	__android_hdmi_drm_mode_to_otm_timing(&otm_mode, mode);
+	__android_hdmi_drm_mode_to_otm_timing(&otm_adjusted_mode,
+						adjusted_mode);
+
+	if (!dev_priv->hdmi_first_boot) {
+		if (otm_hdmi_enc_mode_set(hdmi_priv->context, &otm_mode,
+					&otm_adjusted_mode)) {
+			pr_err("%s: failed to perform hdmi enc mode set",
+					__func__);
+			ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+			return;
+		}
+	}
+
+	/*save current set mode*/
+	if (hdmi_priv->current_mode)
+		drm_mode_destroy(dev,
+				 hdmi_priv->current_mode);
+	hdmi_priv->current_mode =
+		drm_mode_duplicate(dev, adjusted_mode);
+
+	/* Send MODE_CHANGE event to Audio driver */
+	mid_hdmi_audio_signal_event(dev, HAD_EVENT_MODE_CHANGING);
+
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifdef OTM_HDMI_HDCP_ALWAYS_ENC
+	/* enable hdcp */
+	if (otm_hdmi_hdcp_enable(hdmi_priv->context, adjusted_mode->vrefresh))
+		pr_debug("hdcp enabled");
+	else
+		pr_err("hdcp could not be enabled");
+#endif
+#endif
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	return;
+}
+
+/**
+ * save the register for HDMI display and disable HDMI
+ * @dev:		drm device
+ *
+ * Returns:	none.
+ */
+void android_hdmi_suspend_display(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	bool is_connected;
+
+	if (NULL == dev)
+		return;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return;
+
+	/* Check if monitor is attached to HDMI connector. */
+	is_connected = otm_hdmi_get_cable_status(hdmi_priv->context);
+
+	otm_hdmi_save_display_registers(hdmi_priv->context,
+					is_connected);
+
+	otm_disable_hdmi(hdmi_priv->context);
+
+	/* power island is turnned off by IRQ handler if device is disconnected */
+	if (is_connected && !hdmi_priv->hdmi_suspended) {
+		/* Keep DSPB & HDMIO islands off after suspending. */
+		otm_hdmi_power_islands_off();
+	}
+	hdmi_priv->hdmi_suspended = true;
+
+	otm_hdmi_power_rails_off();
+
+	/* disable hotplug detection */
+	otm_hdmi_enable_hpd(false);
+
+	if (is_connected) {
+		mid_hdmi_audio_signal_event(dev, HAD_EVENT_HOT_UNPLUG);
+		switch_set_state(&hdmi_priv->sdev, 0);
+	}
+
+	return;
+}
+
+/**
+ * Prepare HDMI EDID-like data and copy it to the given buffer
+ * Input parameters:
+ * @dev: drm Device
+ * @eld: pointer to otm_hdmi_eld_t data structure
+*
+ * Returns:	0 on success
+ *		-EINVAL on NULL input arguments
+ */
+
+int android_hdmi_get_eld(struct drm_device *dev, void *eld)
+{
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	otm_hdmi_eld_t *hdmi_eld;
+	otm_hdmi_ret_t ret;
+
+	if (NULL == dev || NULL == eld)
+		return -EINVAL;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return -EINVAL;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return -EINVAL;
+
+	hdmi_eld = (otm_hdmi_eld_t *)eld;
+	ret = otm_hdmi_get_eld(hdmi_priv->context, hdmi_eld);
+	if (ret == OTM_HDMI_SUCCESS)
+		return 0;
+
+	/* TODO: return proper error code. */
+	return -EINVAL;
+}
+
+
+/**
+ * get DPLL clock in khz
+ * Input parameters:
+ * @dev: drm Device
+*
+ * Returns:  DPLL clock in khz
+ */
+
+uint32_t android_hdmi_get_dpll_clock(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+
+	if (NULL == dev || NULL == dev->dev_private)
+		return 0;
+	dev_priv = dev->dev_private;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return 0;
+	return hdmi_priv->clock_khz;
+}
+
+/**
+ * Restore the register and enable the HDMI display
+ * @dev:		drm device
+ *
+ * Returns:	none.
+ */
+void android_hdmi_resume_display(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	bool is_connected;  /* connection status during resume */
+	bool was_connected; /* connection status before suspend */
+	hdmi_context_t *ctx;
+	if (NULL == dev)
+		return;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return;
+
+	ctx = (hdmi_context_t *)(hdmi_priv->context);
+	if (NULL == ctx)
+		return;
+
+	/* use the connection status before suspend to determine if
+	  * to power on islands. HDMI may have been plugged out
+	  * during suspend
+	*/
+	was_connected = ctx->is_connected;
+	if (was_connected && hdmi_priv->hdmi_suspended) {
+		/* Keep DSPB & HDMIO islands on after resuming. */
+		if (!otm_hdmi_power_islands_on()) {
+			pr_err("Unable to power on display island!");
+			return;
+		}
+	}
+	hdmi_priv->hdmi_suspended = false;
+
+	otm_hdmi_power_rails_on();
+	/* Check if monitor is attached to HDMI connector. */
+	is_connected = otm_hdmi_get_cable_status(hdmi_priv->context);
+
+	/* only restore display if there is no connection status change */
+	otm_hdmi_restore_and_enable_display(hdmi_priv->context,
+				was_connected & is_connected);
+
+	if (!is_connected) {
+		/* power off rails, HPD will continue to work */
+		otm_hdmi_power_rails_off();
+	}
+
+	/* enable hotplug detection */
+	otm_hdmi_enable_hpd(true);
+
+	if (is_connected) {
+		mid_hdmi_audio_signal_event(dev, HAD_EVENT_HOT_PLUG);
+		switch_set_state(&hdmi_priv->sdev, 1);
+	}
+}
+
+/**
+ * DRM encoder save helper routine
+ * @encoder      : handle to drm_encoder
+ *
+ * Returns nothing
+ * This helper routine is used by DRM during early suspend
+ * operation to simply disable active plane.
+ */
+void android_hdmi_encoder_save(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+	struct android_hdmi_priv *hdmi_priv = output->dev_priv;
+	int dspcntr_reg = DSPBCNTR;
+	int dspbsurf_reg = DSPBSURF;
+	u32 dspbcntr_val;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return ;
+
+	hdmi_priv->need_encoder_restore = true;
+
+	/*Use Disable pipeB plane to turn off HDMI screen
+	 in early_suspend  */
+	dspbcntr_val = REG_READ(dspcntr_reg);
+	if ((dspbcntr_val & DISPLAY_PLANE_ENABLE) != 0) {
+		REG_WRITE(dspcntr_reg,
+				dspbcntr_val & ~DISPLAY_PLANE_ENABLE);
+		/* Flush the plane changes */
+		REG_WRITE(dspbsurf_reg, REG_READ(dspbsurf_reg));
+	}
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	pr_debug("Exiting %s\n", __func__);
+}
+
+void android_hdmi_encoder_restore_wq(struct work_struct *work)
+{
+	struct android_hdmi_priv *hdmi_priv =
+		container_of(work, struct android_hdmi_priv, enc_work.work);
+	struct drm_encoder *encoder;
+	struct drm_device *dev = NULL;
+	struct drm_psb_private *dev_priv = NULL;
+	struct psb_intel_output *output = NULL;
+	int dspcntr_reg = DSPBCNTR;
+	int dspbsurf_reg = DSPBSURF;
+	u32 dspcntr_val = 0;
+	otm_hdmi_timing_t otm_mode;
+	bool is_monitor_hdmi;
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (unlikely(!hdmi_priv))
+		return;
+
+	encoder = (struct drm_encoder *)hdmi_priv->data;
+	if (unlikely(!encoder))
+		return;
+
+	dev = (struct drm_device *)encoder->dev;
+	if (unlikely(!dev))
+		return;
+
+	dev_priv = dev->dev_private;
+	if (unlikely(!dev_priv))
+		return;
+
+	output = enc_to_psb_intel_output(encoder);
+	if (unlikely(!output))
+		return;
+
+	if (!drm_helper_encoder_in_use(encoder))
+		return;
+
+	if (unlikely(!hdmi_priv->need_encoder_restore)) {
+		pr_debug("No need to restore encoder\n");
+		return;
+	}
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return;
+
+	if (!dev_priv->bhdmiconnected ||
+			!(dev_priv->panel_desc & DISPLAY_B)) {
+		ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+		return;
+	}
+
+	/* Restore pipe B plane to turn on HDMI screen */
+	dspcntr_val = REG_READ(dspcntr_reg);
+	if ((dspcntr_val & DISPLAY_PLANE_ENABLE) == 0 &&
+			(DISP_PLANEB_STATUS != DISPLAY_PLANE_DISABLE)) {
+		REG_WRITE(dspcntr_reg,
+				dspcntr_val | DISPLAY_PLANE_ENABLE);
+		/* Flush the plane changes */
+		REG_WRITE(dspbsurf_reg, REG_READ(dspbsurf_reg));
+	}
+
+	if (hdmi_priv->current_mode)
+		__android_hdmi_drm_mode_to_otm_timing(&otm_mode,
+			hdmi_priv->current_mode);
+	else
+		pr_err("%s: No saved current mode found, unable to restore\n",
+			__func__);
+
+	is_monitor_hdmi = otm_hdmi_is_monitor_hdmi(hdmi_priv->context);
+
+	/* Enable AVI infoframes for HDMI mode */
+	if (is_monitor_hdmi) {
+		rc = otm_hdmi_infoframes_set_avi(hdmi_priv->context, &otm_mode);
+		if (rc != OTM_HDMI_SUCCESS)
+			pr_debug("\nfailed to program avi infoframe\n");
+	} else {/* Disable all inofoframes for DVI mode */
+		rc = otm_hdmi_disable_all_infoframes(hdmi_priv->context);
+		if (rc != OTM_HDMI_SUCCESS)
+			pr_debug("\nfailed to disable all infoframes\n");
+	}
+
+	hdmi_priv->need_encoder_restore = false;
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+	pr_debug("Exiting %s\n", __func__);
+}
+
+/**
+ * DRM encoder restore helper routine
+ * @encoder      : handle to drm_encoder
+ *
+ * Returns nothing
+ * This helper routine is used by DRM during late resume
+ * operation for restoring the pipe and enabling it. The
+ * operation itself is completed in a delayed workqueue
+ * item which ensures restore can be done once the system
+ * is resumed.
+ */
+void android_hdmi_encoder_restore(struct drm_encoder *encoder)
+{
+	unsigned long delay = 0;
+	struct psb_intel_output *output = NULL;
+	struct android_hdmi_priv *hdmi_priv = NULL;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (unlikely(!encoder))
+		return;
+
+	output = enc_to_psb_intel_output(encoder);
+	if (unlikely(!output))
+		return;
+
+	hdmi_priv = output->dev_priv;
+	if (unlikely(!hdmi_priv))
+		return;
+
+	hdmi_priv->data = (void *)encoder;
+
+	delay = HZ/5;
+	schedule_delayed_work(&hdmi_priv->enc_work, delay);
+	pr_debug("Exiting %s\n", __func__);
+}
+
+/**
+ * DRM encoder mode fixup helper routine
+ * @encoder      : handle to drm_encoder
+ * @mode         : proposed display mode
+ * @adjusted_mode: actual mode to be displayed by HW
+ *
+ * Returns boolean to indicate success/failure
+ * This routine can be used to make adjustments to actual
+ * mode parameters as required by underlying HW.
+ * This is currently not required.
+ */
+bool android_hdmi_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct android_hdmi_priv *hdmi_priv = dev_priv->hdmi_priv;
+
+	otm_hdmi_timing_t otm_mode;
+	uint32_t hdisplay = 0;
+	uint32_t vdisplay = 0;
+	uint8_t vic = 0;
+	int monitor_type = MONITOR_TYPE_HDMI;
+
+	if (dev_priv->hdmi_first_boot) {
+		query_fw_hdmi_setting(dev, &hdisplay, &vdisplay, &vic, &monitor_type);
+		__android_hdmi_drm_mode_to_otm_timing(&otm_mode, (struct drm_display_mode *)mode);
+
+		if (mode->hdisplay == 640 &&
+			mode->vdisplay == 480) {
+			/*
+			 *  FW uses panel fitter for downscaling if preferred mode is 640x480.
+			 *  in kernel, overlay is used for downscaling hence panel fitter needs
+			 *  to be disabled, a simple solution is to reset mode.
+			 */
+			dev_priv->hdmi_first_boot = false;
+		} else if (hdisplay == mode->hdisplay &&
+					vdisplay == mode->vdisplay &&
+					(otm_mode.metadata == vic) &&
+					(monitor_type == hdmi_priv->monitor_type)) {
+			pr_info("%s: skip first boot !\n", __func__);
+			dev_priv->hdmi_first_boot = true;
+		} else {
+			/* need mode setting */
+			dev_priv->hdmi_first_boot = false;
+		}
+	}
+
+	return true;
+}
+
+void android_hdmi_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+		encoder->helper_private;
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void android_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+		encoder->helper_private;
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void android_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+/**
+ * Currently the driver doesn't take advantage of encoders.
+ */
+struct drm_encoder *android_hdmi_best_encoder(struct drm_connector *connector)
+{
+	struct psb_intel_output *psb_intel_output =
+		to_psb_intel_output(connector);
+	return &psb_intel_output->enc;
+}
+
+
+/**
+ * DRM connector save helper routine
+ * @connector       : handle to drm_connector
+ *
+ * Returns nothing.
+ * This routine is used to save connector state.
+ */
+void android_hdmi_connector_save(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct psb_intel_output *output = to_psb_intel_output(connector);
+	struct android_hdmi_priv *hdmi_priv = output->dev_priv;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return ;
+
+	hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmib_reg);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	pr_debug("Exiting %s\n", __func__);
+}
+
+/**
+ * DRM connector restore helper routine
+ * @connector       : handle to drm_connector
+ *
+ * Returns nothing.
+ * This routine is used to restore connector state.
+ */
+void android_hdmi_connector_restore(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct psb_intel_output *output = to_psb_intel_output(connector);
+	struct android_hdmi_priv *hdmi_priv = output->dev_priv;
+
+	pr_debug("Entering %s\n", __func__);
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return ;
+
+	REG_WRITE(hdmi_priv->hdmib_reg, hdmi_priv->save_HDMIB);
+	REG_READ(hdmi_priv->hdmib_reg);
+
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	pr_debug("Exiting %s\n", __func__);
+}
+
+/**
+ * Enable HDCP on HDMI display
+ * @dev:	drm device
+ *
+ * Returns:	true on success else false
+ */
+bool android_enable_hdmi_hdcp(struct drm_device *dev)
+{
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	if (NULL == dev)
+		return false;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return false;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return false;
+	if (NULL == hdmi_priv->current_mode)
+		return false;
+
+	/* enable hdcp */
+	if (otm_hdmi_hdcp_enable(hdmi_priv->context,
+		hdmi_priv->current_mode->vrefresh)) {
+		pr_debug("hdcp enabled");
+		return true;
+	} else {
+		pr_debug("hdcp could not be enabled");
+		return false;
+	}
+#endif
+#endif
+	return false;
+}
+
+/**
+ * disable HDCP on HDMI display
+ * @dev:	drm device
+ *
+ * Returns:	true on success else false
+ */
+bool android_disable_hdmi_hdcp(struct drm_device *dev)
+{
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	if (NULL == dev)
+		return false;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return false;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return false;
+
+	/* disable hdcp */
+	if (otm_hdmi_hdcp_disable(hdmi_priv->context)) {
+		pr_debug("hdcp disabled\n");
+		return true;
+	} else {
+		pr_err("hdcp could not be disabled");
+		return false;
+	}
+#endif
+#endif
+	return false;
+}
+
+/**
+ * Query whether HDCP is enabled & encrypting on HDMI display
+ * @dev:	drm device
+ *
+ * Returns:	true if encrypting else false
+ */
+bool android_check_hdmi_hdcp_enc_status(struct drm_device *dev)
+{
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	if (NULL == dev)
+		return false;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return false;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return false;
+
+	/* disable hdcp */
+	if (otm_hdmi_hdcp_enc_status(hdmi_priv->context))
+		return true;
+	else
+		return false;
+#endif
+#endif
+	return false;
+}
+
+/**
+ * Query presence of a HDCP Sink Device
+ * @dev:	drm device
+ * @bksv:	ksv value of the sink device will be returned on success
+ *
+ * Returns:	true on successful detection else false
+ */
+bool android_query_hdmi_hdcp_sink(struct drm_device *dev, uint8_t *bksv)
+{
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	if (NULL == dev || bksv == NULL)
+		return false;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return false;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return false;
+
+	/* query bksv availability */
+	if (otm_hdmi_hdcp_read_validate_bksv(hdmi_priv->context, bksv)) {
+		return true;
+	} else {
+		return false;
+	}
+#endif
+#endif
+	return false;
+}
+
+/**
+ * Query HDCP Phase 3 Link Status
+ * @dev:	drm device
+ *
+ * Returns:	true if link is not compromised else false
+ */
+bool android_check_hdmi_hdcp_link_status(struct drm_device *dev)
+{
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	if (NULL == dev)
+		return false;
+	dev_priv = dev->dev_private;
+	if (NULL == dev_priv)
+		return false;
+	hdmi_priv = dev_priv->hdmi_priv;
+	if (NULL == hdmi_priv)
+		return false;
+
+	/* disable hdcp */
+	if (otm_hdmi_hdcp_link_status(hdmi_priv->context)) {
+		return true;
+	} else {
+		pr_debug("hdcp link failed\n");
+		return false;
+	}
+#endif
+#endif
+	return false;
+}
+
+/**
+ * hdmi helper function to detect whether hdmi/dvi is connected or not.
+ * @connector:	hdmi connector
+ *
+ * Returns:	connector_status_connected if hdmi/dvi is connected.
+ *		connector_status_disconnected if hdmi/dvi is not connected.
+ */
+enum drm_connector_status
+android_hdmi_detect(struct drm_connector *connector,
+		    bool force)
+{
+	struct drm_device *dev = NULL;
+	struct drm_psb_private *dev_priv = NULL;
+	struct android_hdmi_priv *hdmi_priv = NULL;
+	bool data = 0;
+	static bool first_boot = true;
+	static int prev_connection_status = connector_status_disconnected;
+	struct i2c_adapter *adapter = i2c_get_adapter(OTM_HDMI_I2C_ADAPTER_NUM);
+
+	if (NULL == connector || NULL == adapter)
+		return connector_status_disconnected;
+
+	dev = connector->dev;
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	hdmi_priv = dev_priv->hdmi_priv;
+
+	/* Check if monitor is attached to HDMI connector. */
+	data = otm_hdmi_get_cable_status(hdmi_priv->context);
+	pr_debug("%s: HPD connected data = %d.\n", __func__, data);
+
+	if (!first_boot) {
+		/* detect can be invoked in multiple contexts,  providing potentially inconsistent
+		* cable status to drm framework and user land.  Use hdmi state detected
+		* in the IRQ handler to any observer after the first boot.
+		*/
+		if (data != hdmi_state)
+			pr_err("%s: inconsistent HPD status %d\n", __func__, data);
+		data = hdmi_state;
+	}
+
+#ifdef OTM_HDMI_HDCP_ENABLE
+	otm_hdmi_hdcp_set_hpd_state(hdmi_priv->context, data);
+#else
+	otm_hdmi_update_security_hdmi_hdcp_status(false, data);
+#endif
+
+	if (data) {
+		if (first_boot) {
+			hdmi_state = 1;
+			first_boot = false;
+		}
+
+		if (prev_connection_status == connector_status_connected)
+			return connector_status_connected;
+
+		prev_connection_status = connector_status_connected;
+
+		/* Turn on power islands and hold ref count */
+		if (!otm_hdmi_power_islands_on())
+			pr_err("otm_hdmi_power_islands_on failed!");
+
+		dev_priv->panel_desc |= DISPLAY_B;
+		dev_priv->bhdmiconnected = true;
+
+		return connector_status_connected;
+	} else {
+		if (prev_connection_status != connector_status_disconnected) {
+#ifdef OTM_HDMI_HDCP_ENABLE
+#ifdef OTM_HDMI_HDCP_ALWAYS_ENC
+			if (otm_hdmi_hdcp_disable(hdmi_priv->context))
+				pr_debug("hdcp disabled\n");
+			else
+				pr_debug("failed to disable hdcp\n");
+#endif
+#endif
+			dev_priv->panel_desc &= ~DISPLAY_B;
+			dev_priv->bhdmiconnected = false;
+
+			/* Turn off power islands and decrement ref count */
+			otm_hdmi_power_islands_off();
+		}
+
+		/* Always turn off power rails when hdmi is disconnected */
+		otm_hdmi_power_rails_off();
+
+		prev_connection_status = connector_status_disconnected;
+
+		return connector_status_disconnected;
+	}
+}
+
+int android_hdmi_probe_single_connector_modes(struct drm_connector *connector,
+		uint32_t maxX, uint32_t maxY)
+{
+	struct drm_device *dev;
+	int count = 0;
+
+	if (!connector)
+		return count;
+
+	dev = connector->dev;
+
+	count = drm_helper_probe_single_connector_modes(connector,
+			dev->mode_config.max_width,
+			dev->mode_config.max_height);
+	return count;
+}
+
+/**
+ * Description: check whether hdmi/dvi is connected or not.
+ *
+ * @dev:	drm device
+ *
+ * Returns:	true if hdmi/dvi is connected.
+ *		false if hdmi/dvi is not connected.
+ */
+bool android_hdmi_is_connected(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = NULL;
+	struct android_hdmi_priv *hdmi_priv = NULL;
+
+	if (NULL == dev)
+		return false;
+
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	hdmi_priv = dev_priv->hdmi_priv;
+
+	/* Check if monitor is attached to HDMI connector. */
+	return otm_hdmi_get_cable_status(hdmi_priv->context);
+}
+
+static int android_hdmi_set_property(struct drm_connector *connector,
+				     struct drm_property *property,
+				     uint64_t value)
+{
+	struct drm_encoder *pEncoder = connector->encoder;
+	struct psb_intel_crtc *pPsbCrtc = NULL;
+	bool bTransitionFromToCentered = false;
+	bool bTransitionFromToAspect = false;
+	uint64_t curValue;
+
+	pr_debug("Entered %s\n", __func__);
+	if (!pPsbCrtc) {
+		pr_err("Invalid pointer\n");
+		goto set_prop_error;
+	}
+	if (!strcmp(property->name, "scaling mode") && pEncoder) {
+		pr_debug("Property: scaling mode\n");
+	} else if (!strcmp(property->name, "DPMS") && pEncoder) {
+		pr_debug("Property: DPMS\n");
+	}
+	else {
+		pr_err("%s: Unable to handle property %s\n", __func__,
+			property->name);
+		goto set_prop_error;
+	}
+
+	if (!strcmp(property->name, "scaling mode") && pEncoder) {
+		pPsbCrtc = to_psb_intel_crtc(pEncoder->crtc);
+
+		if (!pPsbCrtc)
+			goto set_prop_error;
+
+		switch (value) {
+		case DRM_MODE_SCALE_FULLSCREEN:
+			break;
+		case DRM_MODE_SCALE_CENTER:
+			break;
+		case DRM_MODE_SCALE_NO_SCALE:
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			goto set_prop_error;
+		}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		if (drm_connector_property_get_value(connector, property, &curValue))
+#else
+                if (drm_object_property_get_value(&connector->base, property, &curValue))
+#endif
+			goto set_prop_error;
+
+		if (curValue == value)
+			goto set_prop_done;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		if (drm_connector_property_set_value(connector, property, value))
+#else 
+                if (drm_object_property_set_value(&connector->base, property, value))
+#endif
+			goto set_prop_error;
+
+		bTransitionFromToCentered =
+			(curValue == DRM_MODE_SCALE_NO_SCALE) ||
+			(value == DRM_MODE_SCALE_NO_SCALE) ||
+			(curValue == DRM_MODE_SCALE_CENTER) ||
+			(value == DRM_MODE_SCALE_CENTER);
+
+		bTransitionFromToAspect = (curValue == DRM_MODE_SCALE_ASPECT) ||
+			(value == DRM_MODE_SCALE_ASPECT);
+
+		if (pPsbCrtc->saved_mode.hdisplay != 0 &&
+			pPsbCrtc->saved_mode.vdisplay != 0) {
+			if (bTransitionFromToCentered ||
+					bTransitionFromToAspect) {
+				if (!drm_crtc_helper_set_mode(pEncoder->crtc,
+						&pPsbCrtc->saved_mode,
+						pEncoder->crtc->x,
+						pEncoder->crtc->y,
+						pEncoder->crtc->fb))
+					goto set_prop_error;
+			} else {
+				struct drm_encoder_helper_funcs *pEncHFuncs =
+					pEncoder->helper_private;
+				pEncHFuncs->mode_set(pEncoder,
+					&pPsbCrtc->saved_mode,
+					&pPsbCrtc->saved_adjusted_mode);
+			}
+		}
+	}
+set_prop_done:
+	return 0;
+set_prop_error:
+	return -1;
+}
+
+
+void android_hdmi_connector_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void android_hdmi_connector_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_device *dev = connector->dev;
+	bool hdmi_audio_busy = false;
+	u32 dspcntr_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+#if (defined CONFIG_PM_RUNTIME) && (!defined MERRIFIELD) \
+	&& (defined CONFIG_SUPPORT_MIPI)
+	bool panel_on = false, panel_on2 = false;
+	struct mdfld_dsi_config **dsi_configs;
+#endif
+
+	pr_debug("Entered %s\n", __func__);
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON))
+		return ;
+
+	/* Check HDMI Audio Status */
+	hdmi_audio_busy = mid_hdmi_audio_is_busy(dev);
+
+	pr_debug("[DPMS] audio busy: 0x%x\n", hdmi_audio_busy);
+
+	/* if hdmi audio is busy, just turn off hdmi display plane */
+	if (hdmi_audio_busy) {
+		dspcntr_val = PSB_RVDC32(DSPBCNTR);
+		connector->dpms = mode;
+
+		if (mode != DRM_MODE_DPMS_ON) {
+			if (!dev_priv->hdmi_first_boot) {
+				REG_WRITE(DSPBCNTR, dspcntr_val &
+						~DISPLAY_PLANE_ENABLE);
+				DISP_PLANEB_STATUS = DISPLAY_PLANE_DISABLE;
+			}
+		} else {
+			REG_WRITE(DSPBCNTR, dspcntr_val |
+							DISPLAY_PLANE_ENABLE);
+			DISP_PLANEB_STATUS = DISPLAY_PLANE_ENABLE;
+		}
+	} else {
+		drm_helper_connector_dpms(connector, mode);
+
+		if (mode != DRM_MODE_DPMS_ON)
+			DISP_PLANEB_STATUS = DISPLAY_PLANE_DISABLE;
+		else
+			DISP_PLANEB_STATUS = DISPLAY_PLANE_ENABLE;
+	}
+
+#if (defined CONFIG_PM_RUNTIME) && (!defined MERRIFIELD) \
+	&& (defined CONFIG_SUPPORT_MIPI)
+	dsi_configs = dev_priv->dsi_configs;
+
+	if (dsi_configs[0])
+		panel_on = dsi_configs[0]->dsi_hw_context.panel_on;
+	if (dsi_configs[1])
+		panel_on2 = dsi_configs[1]->dsi_hw_context.panel_on;
+
+	/*then check all display panels + monitors status*/
+	if (!panel_on &&
+		!panel_on2 &&
+		!(REG_READ(HDMIB_CONTROL) & HDMIB_PORT_EN)) {
+		/*request rpm idle*/
+		if(dev_priv->rpm_enabled) {
+			pm_request_idle(&dev->pdev->dev);
+		}
+	}
+#endif
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	pr_debug("Exiting %s\n", __func__);
+}
+
+/**
+ * hdmi helper function to manage power to the display (dpms)
+ * @encoder:	hdmi encoder
+ * @mode:	dpms on or off
+ *
+ * Returns:	none
+ */
+void android_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct android_hdmi_priv *hdmi_priv;
+	otm_hdmi_timing_t otm_mode;
+	bool is_monitor_hdmi;
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	struct psb_intel_output *output;
+	int dspcntr_reg = DSPBCNTR;
+	int dspbase_reg = MRST_DSPBBASE;
+	u32 hdmip_enabled = 0;
+	u32 hdmib, hdmi_phy_misc;
+	u32 temp;
+
+	pr_debug("Entered %s\n", __func__);
+	if (encoder == NULL)
+		return;
+
+	pr_debug("%s\n", mode == DRM_MODE_DPMS_ON ? "on" : "off");
+	dev = encoder->dev;
+	dev_priv = (struct drm_psb_private *)dev->dev_private;
+	hdmi_priv = dev_priv->hdmi_priv;
+	output = enc_to_psb_intel_output(encoder);
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+			OSPM_UHB_FORCE_POWER_ON))
+		return;
+	hdmib = REG_READ(hdmi_priv->hdmib_reg) | HDMIB_PIPE_B_SELECT;
+
+	is_monitor_hdmi = otm_hdmi_is_monitor_hdmi(hdmi_priv->context);
+
+	if (is_monitor_hdmi)
+		hdmib |= (HDMIB_NULL_PACKET | HDMIB_AUDIO_ENABLE);
+	else
+		hdmib &= ~(HDMIB_NULL_PACKET | HDMIB_AUDIO_ENABLE);
+
+	hdmi_phy_misc = REG_READ(HDMIPHYMISCCTL);
+	hdmip_enabled = REG_READ(hdmi_priv->hdmib_reg) & HDMIB_PORT_EN;
+	pr_debug("hdmip_enabled is %x\n", hdmip_enabled);
+
+	if (dev_priv->early_suspended) {
+		/* Use Disable pipeB plane to turn off HDMI screen
+		  * in early_suspend
+		  */
+		temp = REG_READ(dspcntr_reg);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(dspcntr_reg,
+				temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+		}
+	}
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		if (!dev_priv->hdmi_first_boot) {
+			REG_WRITE(hdmi_priv->hdmib_reg,
+					hdmib & ~HDMIB_PORT_EN & ~HDMIB_AUDIO_ENABLE);
+			otm_hdmi_vblank_control(dev, false);
+			REG_WRITE(HDMIPHYMISCCTL, hdmi_phy_misc | HDMI_PHY_POWER_DOWN);
+			rc = otm_hdmi_disable_all_infoframes(hdmi_priv->context);
+			if (rc != OTM_HDMI_SUCCESS)
+				pr_err("%s: failed to disable all infoframes\n",
+						__func__);
+		}
+	} else {
+		REG_WRITE(HDMIPHYMISCCTL, hdmi_phy_misc & ~HDMI_PHY_POWER_DOWN);
+		otm_hdmi_vblank_control(dev, true);
+		REG_WRITE(hdmi_priv->hdmib_reg, hdmib | HDMIB_PORT_EN);
+
+		if (hdmi_priv->delayed_audio_hotplug) {
+			mid_hdmi_audio_signal_event(dev, HAD_EVENT_HOT_PLUG);
+			switch_set_state(&hdmi_priv->sdev, 1);
+			pr_info("%s: hdmi switch state set to 1.\n", __func__);
+			hdmi_priv->delayed_audio_hotplug = false;
+		}
+
+		if (hdmi_priv->current_mode)
+			__android_hdmi_drm_mode_to_otm_timing(&otm_mode,
+				hdmi_priv->current_mode);
+		else
+			pr_info("%s: No saved current mode found, unable to restore\n",
+				__func__);
+
+		if (is_monitor_hdmi) {
+			/* Enable AVI infoframes for HDMI mode */
+			rc = otm_hdmi_infoframes_set_avi(hdmi_priv->context,
+								&otm_mode);
+			if (rc != OTM_HDMI_SUCCESS)
+				pr_err("%s: failed to program avi infoframe\n",
+					__func__);
+		} else {
+			/* Disable all infoframes for DVI mode */
+			rc = otm_hdmi_disable_all_infoframes
+							(hdmi_priv->context);
+			if (rc != OTM_HDMI_SUCCESS)
+				pr_err("%s: failed to disable all infoframes\n",
+					__func__);
+		}
+	}
+
+#ifdef OTM_HDMI_HDCP_ENABLE
+	otm_hdmi_hdcp_set_dpms(hdmi_priv->context,
+			(mode == DRM_MODE_DPMS_ON));
+#endif
+	/* flush hdmi port register */
+	REG_WRITE(hdmi_priv->hdmib_reg, REG_READ(hdmi_priv->hdmib_reg));
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+	pr_debug("Exiting %s\n", __func__);
+}
+
+/* OS Adaptation Layer Function Pointers
+ * Functions required to be implemented by Linux DRM framework
+ */
+const struct drm_encoder_helper_funcs android_hdmi_enc_helper_funcs = {
+	.dpms = android_hdmi_encoder_dpms,
+	.save = android_hdmi_encoder_save,
+	.restore = android_hdmi_encoder_restore,
+	.mode_fixup = android_hdmi_mode_fixup,
+	.prepare = android_hdmi_encoder_prepare,
+	.mode_set = android_hdmi_enc_mode_set,
+	.commit = android_hdmi_encoder_commit,
+};
+
+const struct drm_connector_helper_funcs
+	android_hdmi_connector_helper_funcs = {
+	.get_modes = android_hdmi_get_modes,
+	.mode_valid = android_hdmi_mode_valid,
+	.best_encoder = android_hdmi_best_encoder,
+};
+
+const struct drm_connector_funcs android_hdmi_connector_funcs = {
+	.dpms = android_hdmi_connector_dpms,
+	.save = android_hdmi_connector_save,
+	.restore = android_hdmi_connector_restore,
+	.detect = android_hdmi_detect,
+	.fill_modes = android_hdmi_probe_single_connector_modes,
+	.set_property = android_hdmi_set_property,
+	.destroy = android_hdmi_connector_destroy,
+};
+
+const struct drm_encoder_funcs android_hdmi_enc_funcs = {
+	.destroy = android_hdmi_enc_destroy,
+};
+
+/* END - OS Adaptation Layer Function Pointers
+ * Functions required to be implemented by Linux DRM framework
+ */
+
+/**
+ * This function initializes the hdmi driver called during bootup
+ * @dev		: handle to drm_device
+ * @mode_dev	: device mode
+ *
+ * Returns nothing
+ *
+ * This function initializes the hdmi driver called during bootup
+ * which includes initializing drm_connector, drm_encoder, hdmi audio
+ * and msic and collects all information reqd in hdmi private.
+ */
+void android_hdmi_driver_init(struct drm_device *dev,
+				    void *mode_dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct android_hdmi_priv *hdmi_priv = dev_priv->hdmi_priv;
+	struct psb_intel_output *psb_intel_output;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	bool power_on = false;
+
+	pr_info("Enter %s", __func__);
+
+	psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+	if (!psb_intel_output)
+		return;
+
+	psb_intel_output->mode_dev = mode_dev;
+	psb_intel_output->type = INTEL_OUTPUT_HDMI;
+	psb_intel_output->dev_priv = (struct drm_psb_private *)hdmi_priv;
+
+	connector = &psb_intel_output->base;
+	encoder = &psb_intel_output->enc;
+
+	drm_connector_init(dev, &psb_intel_output->base,
+				&android_hdmi_connector_funcs,
+				DRM_MODE_CONNECTOR_DVID);
+	drm_connector_helper_add(connector,
+				 &android_hdmi_connector_helper_funcs);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	drm_connector_attach_property(connector,
+					dev->mode_config.scaling_mode_property,
+					default_hdmi_scaling_mode);
+#else
+        drm_object_attach_property(&connector->base,
+                                        dev->mode_config.scaling_mode_property,
+					default_hdmi_scaling_mode);
+#endif
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	/* Disable polling */
+	connector->polled = 0;
+
+	drm_encoder_init(dev, &psb_intel_output->enc, &android_hdmi_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &android_hdmi_enc_helper_funcs);
+
+	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+					  &psb_intel_output->enc);
+
+#ifdef OTM_HDMI_HDCP_ENABLE
+	otm_hdmi_hdcp_init(hdmi_priv->context, &hdmi_ddc_read_write);
+#endif
+	/* Initialize the audio driver interface */
+	mid_hdmi_audio_init(hdmi_priv);
+	/* initialize hdmi encoder restore delayed work */
+	INIT_DELAYED_WORK(&hdmi_priv->enc_work, android_hdmi_encoder_restore_wq);
+
+	drm_sysfs_connector_add(connector);
+
+	/* Turn on power rails for HDMI */
+	power_on = otm_hdmi_power_rails_on();
+	if (!power_on)
+		pr_err("%s: Unable to power on HDMI rails\n", __func__);
+
+	/* Enable hotplug detection */
+	otm_hdmi_enable_hpd(true);
+
+	usleep_range(1000, 2000);
+
+	pr_info("%s: Done with driver init\n", __func__);
+	pr_info("Exit %s\n", __func__);
+}
+/*
+ *
+ * Internal scripts wrapper functions.
+ *
+ */
+
+#ifdef OTM_HDMI_UNIT_TEST
+/**
+ * test_otm_hdmi_report_edid_full() - Report current EDID information
+ *
+ * This routine simply dumps the EDID information
+ * Returns - nothing
+ */
+void test_otm_hdmi_report_edid_full(void)
+{
+	int i = 0;
+	printk(KERN_ALERT "\n*** Supported Modes ***\n");
+
+	for (i = 0; i < debug_modes_count; i++)
+		printk(KERN_ALERT "Mode %02d: %s @%dHz Clk: %dKHz\n", i,
+		arr_modes[i].name, arr_modes[i].frq, arr_modes[i].clk);
+
+	printk(KERN_ALERT "\n");
+}
+EXPORT_SYMBOL_GPL(test_otm_hdmi_report_edid_full);
+#endif
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/os/android/include/android_hdmi.h b/drivers/external_drivers/intel_media/otm_hdmi/os/android/include/android_hdmi.h
new file mode 100755
index 0000000..74a68b4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/os/android/include/android_hdmi.h
@@ -0,0 +1,348 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+ * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+ * Neither the name of Intel Corporation nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef __ANDROID_HDMI_H
+#define __ANDROID_HDMI_H
+
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <drm/drmP.h>
+
+#define CEA_EXT     0x02
+#define VTB_EXT     0x10
+#define DI_EXT      0x40
+#define LS_EXT      0x50
+#define MI_EXT      0x60
+
+/* Define the monitor type HDMI or DVI */
+#define MONITOR_TYPE_HDMI 1
+#define MONITOR_TYPE_DVI  2
+
+struct android_hdmi_priv {
+	/* common */
+	struct drm_device *dev;
+
+	struct switch_dev sdev;
+
+	/*medfield specific */
+	u32 hdmib_reg;
+	u32 save_HDMIB;
+	u32 clock_khz;
+
+	/* Delayed Encoder Restore */
+	struct drm_display_mode *current_mode;
+	bool need_encoder_restore;
+	struct delayed_work enc_work;
+	void *data;
+
+	bool hdmi_suspended;
+	bool hdmi_audio_enabled;
+	bool delayed_audio_hotplug;
+	bool is_hdcp_supported;
+	int monitor_type;
+	void *context;
+};
+
+extern int psb_intel_panel_fitter_pipe(struct drm_device *dev);
+extern void mid_hdmi_audio_init(struct android_hdmi_priv *p_hdmi_priv);
+
+#ifdef CONFIG_SUPPORT_HDMI
+
+/**
+ * This function initializes the hdmi driver called during bootup
+ * @dev		: handle to drm_device
+ * @mode_dev	: device mode
+ *
+ * Returns nothing
+ *
+ * This function initializes the hdmi driver called during bootup
+ * which includes initializing drm_connector, drm_encoder, hdmi audio
+ * and msic and collects all information reqd in hdmi private.
+ */
+void android_hdmi_driver_init(struct drm_device *dev,
+			      void *mode_dev);
+
+
+
+
+int android_hdmi_irq_test(struct drm_device *dev);
+
+
+/**
+ * This function sets the hdmi driver during bootup
+ * @dev		: handle to drm_device
+ *
+ * Returns nothing
+ *
+ * This function is called from psb driver to setup the
+ * hdmi driver. Called only once during boot-up of the system
+ */
+void android_hdmi_driver_setup(struct drm_device *dev);
+
+ /**
+ * Description: programming display registers as per the scaling property.
+ *
+ * @crtc:		crtc
+ *
+ * Returns:	0 on success
+ *		-1 on failure
+ */
+int android_hdmi_set_scaling_property(struct drm_crtc *crtc);
+
+/**
+ * Description: crtc mode set for hdmi pipe.
+ *
+ * @crtc:		crtc
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @x, y, old_fb:	old frame buffer values used for flushing old plane.
+ *
+ * Returns:	0 on success
+ *		-EINVAL on NULL input arguments
+ */
+int android_hdmi_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y,
+				struct drm_framebuffer *old_fb);
+
+
+/**
+ * Store the HDMI registers and enable the display
+ * Input parameters:
+ *	psDrmDev: Drm Device.
+ * Returns: none
+ */
+void android_hdmi_resume_display(struct drm_device *dev);
+
+/**
+ * Save the HDMI display registers and disable the display
+ * Input parameters:
+ *	psDrmDev: Drm Device.
+ * Returns: none
+ */
+void android_hdmi_suspend_display(struct drm_device *dev);
+
+/**
+ * Prepare HDMI EDID-like data and copy it to the given buffer
+ * Input parameters:
+ * @dev: drm Device
+ * @eld: pointer to otm_hdmi_eld_t data structure
+ *
+ * Returns:	0 on success
+ *		-EINVAL on NULL input arguments
+ */
+int android_hdmi_get_eld(struct drm_device *dev, void *eld);
+
+/**
+ * Enable HDCP on HDMI display
+ * @dev:	drm device
+ *
+ * Returns:	true on success else false
+ */
+
+/**
+ * get DPLL clock in khz
+ * Input parameters:
+ * @dev: drm Device
+ *
+ * Returns:  clock in khz
+ */
+uint32_t android_hdmi_get_dpll_clock(struct drm_device *dev);
+
+/**
+ * enable HDMI HDCP
+ * Input parameters:
+ * @dev: Drm Device
+ *
+ * Returns:	true on success else false
+ */
+bool android_enable_hdmi_hdcp(struct drm_device *dev);
+
+/**
+ * disable HDCP on HDMI display
+ * @dev:	drm device
+ *
+ * Returns:	true on success else false
+ */
+bool android_disable_hdmi_hdcp(struct drm_device *dev);
+
+/**
+ * Query whether HDCP is enabled & encrypting on HDMI display
+ * @dev:	drm device
+ *
+ * Returns:	true if encrypting else false
+ */
+bool android_check_hdmi_hdcp_enc_status(struct drm_device *dev);
+
+/**
+ * Query HDCP Phase 3 Link Status
+ * @dev:	drm device
+ *
+ * Returns:	true if link is not compromised else false
+ */
+bool android_check_hdmi_hdcp_link_status(struct drm_device *dev);
+
+/**
+ * Query presence of a HDCP Sink Device
+ * @dev:	drm device
+ * @bksv:	ksv value of the sink device will be returned on success
+ *
+ * Returns:	true on successful detection else false
+ */
+bool android_query_hdmi_hdcp_sink(struct drm_device *dev, uint8_t *bksv);
+
+
+/**
+ * Description: check whether hdmi/dvi is connected or not.
+ *
+ * @dev:	drm device
+ *
+ * Returns:	true if hdmi/dvi is connected.
+ *		false if hdmi/dvi is not connected.
+ */
+bool android_hdmi_is_connected(struct drm_device *dev);
+
+
+#else /* CONFIG_SUPPORT_HDMI */
+
+static inline void android_hdmi_driver_init(struct drm_device *dev,
+						void *mode_dev) {}
+
+static inline void android_hdmi_driver_setup(struct drm_device *dev) {}
+
+static inline int android_hdmi_set_scaling_property(struct drm_crtc *crtc)
+{ return 0; }
+
+static inline int android_hdmi_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y,
+				struct drm_framebuffer *old_fb) { return 0; }
+
+static inline void android_hdmi_resume_display(
+				struct drm_device *dev) {}
+
+static inline void android_hdmi_suspend_display(
+				struct drm_device *dev) {}
+
+static inline int android_hdmi_get_eld(struct drm_device *dev, void *eld)
+{ return 0; }
+
+static uint32_t android_hdmi_get_dpll_clock(struct drm_device *dev)
+{ return 0; }
+
+static inline bool android_enable_hdmi_hdcp(struct drm_device *dev)
+{ return false; }
+static inline bool android_disable_hdmi_hdcp(struct drm_device *dev)
+{ return false; }
+static inline bool android_check_hdmi_hdcp_enc_status(struct drm_device *dev)
+{ return false; }
+static inline bool android_check_hdmi_hdcp_link_status(struct drm_device *dev)
+{ return false; }
+static inline bool android_query_hdmi_hdcp_sink(struct drm_device *dev,
+						uint8_t *bksv)
+{ return false; }
+
+static inline bool android_hdmi_is_connected(struct drm_device *dev)
+{ return false; }
+
+#endif /* CONFIG_SUPPORT_HDMI */
+
+/*
+ * Description: hdmi helper function to parse cmdline option
+ *		from hdmicmd tool
+ *
+ * @cmdoption:	cmdline option
+ *
+ * Returns:	error codes 0(success),-1(cmd option),-2(invalid input)
+ */
+int otm_cmdline_parse_option(char *cmdoption);
+
+/*
+ * Description: hdmi helper function to parse vic option
+ *		from hdmicmd tool
+ *
+ * @cmdoption:	cmdline option
+ *
+ * Returns:	error codes 0(success),-1(error)
+ */
+int otm_cmdline_set_vic_option(int vic);
+
+/*
+ * Description: hdmi helper function to print cmdline options
+ *		from hdmicmd tool
+ *
+ * Returns:	none
+ */
+void otm_print_cmdline_option(void);
+
+/*
+ * Description: hdmi helper function to print edid information
+ *		from report_edid tool
+ *
+ * Returns:	none
+ */
+void test_otm_hdmi_report_edid_full(void);
+
+#endif /* __ANDROID_HDMI_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid.c
new file mode 100644
index 0000000..85630e9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid.c
@@ -0,0 +1,1539 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "otm_hdmi_types.h"
+#include "edid_internal.h"
+#include "hdmi_timings.h"
+
+/*
+ * Macro for error checking
+ */
+#define VERIFY(exp, rc, error, label) \
+	if (!(exp)) {                 \
+		rc = error;           \
+		goto label;           \
+	}
+
+#define VERIFY_QUICK(exp, label) \
+	if (!(exp)) {            \
+		goto label;      \
+	}
+
+/*
+ * Various constants
+ */
+#define EDID_SIGNATURE 0x00FFFFFFFFFFFF00ull
+
+/*
+ * Structure to keep state of read operation
+ */
+typedef struct {
+	unsigned char *buffer;
+	int position;
+} read_context_t;
+
+static void fetch_timing_descriptor(timing_descriptor_t *td,
+				read_context_t *rctx)
+				__attribute__((unused));
+
+static void declare_mandatory_3d(edid_info_t *edid)
+				__attribute__((unused));
+
+static void declare_short_3d(unsigned int struc_3d,
+				unsigned int mask,
+				edid_info_t *edid)
+				__attribute__((unused));
+
+static void declare_explicit_3d(unsigned char *e,
+				unsigned int n,
+				edid_info_t *edid)
+				__attribute__((unused));
+
+static void decode_misc_modes(unsigned char *e,
+				int n, edid_info_t *edid)
+				__attribute__((unused));
+
+/*
+ * fetch_next_field()
+ */
+static void fetch_next_field(void *dst,
+			read_context_t *read_context,
+			unsigned int size)
+{
+	unsigned char *b = read_context->buffer + read_context->position;
+
+	switch (size) {
+#ifdef __ORDER_MSB__
+	case 1:
+		*(unsigned char *)dst = *b;
+		break;
+
+	case 2:
+		*(unsigned short *)dst |= (unsigned short)*(b + 1) << 0;
+		*(unsigned short *)dst |= (unsigned short)*(b + 0) << 8;
+		break;
+
+	case 4:
+		*(unsigned int *)dst |= (unsigned int)*(b + 3) << 0;
+		*(unsigned int *)dst |= (unsigned int)*(b + 2) << 8;
+		*(unsigned int *)dst |= (unsigned int)*(b + 1) << 16;
+		*(unsigned int *)dst |= (unsigned int)*(b + 0) << 24;
+		break;
+
+	case 8:
+		*(unsigned long long *)dst |= (unsigned long long)*(b + 7) << 0;
+		*(unsigned long long *)dst |= (unsigned long long)*(b + 6) << 8;
+		*(unsigned long long *)dst |=
+		    (unsigned long long)*(b + 5) << 16;
+		*(unsigned long long *)dst |=
+		    (unsigned long long)*(b + 4) << 24;
+		*(unsigned long long *)dst |=
+		    (unsigned long long)*(b + 3) << 32;
+		*(unsigned long long *)dst |=
+		    (unsigned long long)*(b + 2) << 40;
+		*(unsigned long long *)dst |=
+		    (unsigned long long)*(b + 1) << 48;
+		*(unsigned long long *)dst |=
+		    (unsigned long long)*(b + 0) << 56;
+		break;
+#endif
+
+	default:
+		/*
+		 * This is only for byte sequences with LSB order, or where
+		 * where order does not matter
+		 */
+		memcpy(dst, b, size);
+		break;
+	}
+
+	read_context->position += size;
+}
+
+/*
+ * fetch_generic_descriptor()
+ */
+static void fetch_generic_descriptor(
+				generic_descriptor_t *gd,
+				read_context_t *rctx)
+{
+	fetch_next_field(&gd->flag_required, rctx, 2);
+	fetch_next_field(&gd->flag_reserved, rctx, 1);
+	fetch_next_field(&gd->data_type_tag, rctx, 1);
+	fetch_next_field(&gd->flag, rctx, 1);
+	fetch_next_field(&gd->payload, rctx, 13);
+}
+
+/*
+ * fetch_timing_descriptor()
+ */
+static void fetch_timing_descriptor(
+				timing_descriptor_t *td,
+				read_context_t *rctx)
+{
+	fetch_next_field(&td->pixel_clock, rctx, 2);
+	fetch_next_field(&td->h_active, rctx, 1);
+	fetch_next_field(&td->h_blanking, rctx, 1);
+	fetch_next_field(&td->h_active_blanking_hb, rctx, 1);
+	fetch_next_field(&td->v_active, rctx, 1);
+	fetch_next_field(&td->v_blanking, rctx, 1);
+	fetch_next_field(&td->v_active_blanking_hb, rctx, 1);
+	fetch_next_field(&td->h_sync_offset, rctx, 1);
+	fetch_next_field(&td->h_sync_pulse_width, rctx, 1);
+	fetch_next_field(&td->vs_offset_pulse_width, rctx, 1);
+	fetch_next_field(&td->offset_pulse_width_hb, rctx, 1);
+	fetch_next_field(&td->h_image_size, rctx, 1);
+	fetch_next_field(&td->v_image_size, rctx, 1);
+	fetch_next_field(&td->hv_image_size, rctx, 1);
+	fetch_next_field(&td->h_border, rctx, 1);
+	fetch_next_field(&td->v_border, rctx, 1);
+	fetch_next_field(&td->flags, rctx, 1);
+}
+
+/*
+ * fetch_block_zero()
+ * - ebz : structure representing edid block zero to be filled in
+ * - data: buffer of 128 bytes containing raw edid data supplied by TV
+ */
+static otm_hdmi_ret_t fetch_block_zero(edid_block_zero_t *ebz,
+				unsigned char *data)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	read_context_t read_context = {data, 0};
+	read_context_t *rctx = &read_context;
+	int i;
+
+	VERIFY(ebz != NULL, rc, OTM_HDMI_ERR_NULL_ARG, exit);
+	VERIFY(data != NULL, rc, OTM_HDMI_ERR_NULL_ARG, exit);
+
+	/* EDID signature */
+	fetch_next_field(&ebz->signature, rctx, 8);
+
+	/* Manufacturer name id */
+	fetch_next_field(&ebz->manufacturer_id, rctx, 2);
+
+	/* Product code */
+	fetch_next_field(&ebz->product_code, rctx, 2);
+
+	/* Serial number */
+	fetch_next_field(&ebz->serial_number, rctx, 4);
+
+	/* Manufacture week */
+	fetch_next_field(&ebz->manufacture_week, rctx, 1);
+
+	/* Manufacture year */
+	fetch_next_field(&ebz->manufacture_year, rctx, 1);
+
+	/* EDID version */
+	fetch_next_field(&ebz->version, rctx, 1);
+
+	/* EDID revision */
+	fetch_next_field(&ebz->revision, rctx, 1);
+
+	/* Video input definition */
+	fetch_next_field(&ebz->video_input_definition, rctx, 1);
+
+	/* Max horizontal image size */
+	fetch_next_field(&ebz->max_horz_image_size, rctx, 1);
+
+	/* Max vertical image size*/
+	fetch_next_field(&ebz->max_vert_image_size, rctx, 1);
+
+	/* Gamma */
+	fetch_next_field(&ebz->gamma, rctx, 1);
+
+	/* Feature support */
+	fetch_next_field(&ebz->feature_support, rctx, 1);
+
+	/* Color characteristics */
+	fetch_next_field(&ebz->rg_lowbits, rctx, 1);
+	fetch_next_field(&ebz->bw_lowbits, rctx, 1);
+	fetch_next_field(&ebz->red_x, rctx, 1);
+	fetch_next_field(&ebz->red_y, rctx, 1);
+	fetch_next_field(&ebz->green_x, rctx, 1);
+	fetch_next_field(&ebz->green_y, rctx, 1);
+	fetch_next_field(&ebz->blue_x, rctx, 1);
+	fetch_next_field(&ebz->blue_y, rctx, 1);
+	fetch_next_field(&ebz->white_x, rctx, 1);
+	fetch_next_field(&ebz->white_x, rctx, 1);
+
+	/* Established timings */
+	fetch_next_field(&ebz->est_timing_1, rctx, 1);
+	fetch_next_field(&ebz->est_timing_2, rctx, 1);
+	fetch_next_field(&ebz->est_timing_3, rctx, 1);
+
+	/* Standard timings */
+	for (i = 0; i < EDID_STD_TIMINGS_NUM; i++)
+		fetch_next_field(&ebz->std_timings[i], rctx, 2);
+
+	/* Detailed timing descriptors 1 and 2 */
+	fetch_generic_descriptor((generic_descriptor_t *) &ebz->td_1, rctx);
+	fetch_generic_descriptor((generic_descriptor_t *) &ebz->td_2, rctx);
+
+	/* Monitor Descriptors 1 and 2 */
+	fetch_generic_descriptor((generic_descriptor_t *) &ebz->md_1, rctx);
+	fetch_generic_descriptor((generic_descriptor_t *) &ebz->md_2, rctx);
+
+	/* Number of 128 byte blocks to follow */
+	fetch_next_field(&ebz->num_extentions, rctx, 1);
+
+exit:
+	return rc;
+}
+
+/*
+ * fetch_extension_block_cea()
+ */
+static otm_hdmi_ret_t fetch_extension_block_cea(extention_block_cea_t *eb,
+				    unsigned char *data)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	read_context_t read_context = {data, 0};
+	read_context_t *rctx = &read_context;
+
+	VERIFY(eb != NULL, rc, OTM_HDMI_ERR_NULL_ARG, exit);
+	VERIFY(data != NULL, rc, OTM_HDMI_ERR_NULL_ARG, exit);
+
+	fetch_next_field(&eb->tag, rctx, 1);
+	fetch_next_field(&eb->revision, rctx, 1);
+	fetch_next_field(&eb->content_offset, rctx, 1);
+	fetch_next_field(&eb->flags, rctx, 1);
+	fetch_next_field(&eb->data, rctx, 124);
+
+exit:
+	return rc;
+}
+
+/*
+ * encode_refresh()
+ * Convert an integer refresh rate to the equivalent enumeration
+ */
+static otm_hdmi_refresh_t encode_refresh(unsigned refresh)
+{
+	/* Both 1/1001 and 1/1000 refresh rates are included in case
+	 * EDID contains 1/1001 pixel clock values in the entry
+	 */
+	switch (refresh) {
+	case 23:
+		return OTM_HDMI_REFRESH_24;
+	case 24:
+		return OTM_HDMI_REFRESH_24;
+	case 25:
+		return OTM_HDMI_REFRESH_25;
+	case 29:
+		return OTM_HDMI_REFRESH_30;
+	case 30:
+		return OTM_HDMI_REFRESH_30;
+	case 50:
+		return OTM_HDMI_REFRESH_50;
+	case 59:
+		return OTM_HDMI_REFRESH_60;
+	case 60:
+		return OTM_HDMI_REFRESH_60;
+	}
+	return OTM_HDMI_REFRESH_USER_DEFINED;
+}
+
+/*
+ * Returns index of timing with given VIC in given table; -1 otherwise
+ */
+static int find_timing_by_vic_tp(const otm_hdmi_timing_t **set, int size,
+						unsigned int vic)
+{
+	int i, rc = -1;
+
+	VERIFY(set, rc, -1, exit);
+
+	for (i = 0; i < size; i++) {
+		if (set[i]->metadata == vic) {
+			rc = i;
+			break;
+		}
+	}
+
+exit:
+	return rc;
+}
+
+/*
+ * Timings comparison
+ */
+static bool __timing_equal(const otm_hdmi_timing_t *t1,
+				    const otm_hdmi_timing_t *t2)
+{
+	unsigned int t1_flags = t1->mode_info_flags &
+			(PD_SCAN_INTERLACE | PD_AR_16_BY_9);
+	unsigned int t2_flags = t2->mode_info_flags &
+			(PD_SCAN_INTERLACE | PD_AR_16_BY_9);
+
+	return ((t1->width == t2->width) &&
+		(t1->height == t2->height) &&
+		(t1->refresh == t2->refresh) &&
+		(t1_flags == t2_flags) && (t1->stereo_type == t2->stereo_type));
+}
+
+/*
+ * Returns index of given timings in given table of timings; -1 otherwise
+ */
+int find_timing(const otm_hdmi_timing_t *set, int size,
+					const otm_hdmi_timing_t *e)
+{
+	int i, rc = -1;
+	VERIFY(set && e, rc, -1, exit);
+	for (i = 0; i < size && !__timing_equal(&set[i], e); i++);
+
+	rc = (i < size) ? i : -1;
+exit:
+	return rc;
+}
+
+/*
+ * Returns index of given timings in given table of timing pointers;
+ * -1 otherwise
+ */
+static int find_timing_tp(const otm_hdmi_timing_t **set, int size,
+					const otm_hdmi_timing_t *e)
+{
+	int i, rc = -1;
+	VERIFY(set && e, rc, -1, exit);
+	for (i = 0; i < size && !__timing_equal(set[i], e); i++);
+
+	rc = (i < size) ? i : -1;
+exit:
+	return rc;
+}
+
+/*
+ * add_timings()
+ */
+static otm_hdmi_ret_t add_timings(edid_info_t *edid,
+				const otm_hdmi_timing_t *pdt,
+				unsigned int order)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int i, j;
+
+	/* Safety checks */
+	VERIFY((edid != NULL) && (pdt != NULL), rc,
+				OTM_HDMI_ERR_NULL_ARG, exit);
+
+	/* Is there room for more timings at all? */
+	VERIFY(edid->num_timings < MAX_TIMINGS, rc,
+				OTM_HDMI_ERR_INVAL, exit);
+
+	/* Print info about it */
+	print_pd_timing(pdt, order);
+
+	/* Do not add modes that we dont support */
+	i = find_timing_tp(edid->ref_timings, edid->num_ref_timings, pdt);
+	VERIFY(i > -1, rc, OTM_HDMI_ERR_INVAL, exit);
+
+	/* Do not add duplicates; Update discovery order though for cases when
+	 * a mode was decoded from DTD first
+	 */
+	if ((j = find_timing(edid->timings, edid->num_timings, pdt)) != -1) {
+		edid->order[j] = !edid->order[j] ? order : edid->order[j];
+		goto exit;
+	}
+	/* Save discovery order	*/
+	edid->order[edid->num_timings] = order;
+
+	/* Add timing */
+	edid->timings[edid->num_timings++] = *edid->ref_timings[i];
+
+	/* Update supported family of refresh rates */
+	edid->supports_60Hz = edid->supports_60Hz
+	    || pdt->refresh == OTM_HDMI_REFRESH_60;
+	edid->supports_50Hz = edid->supports_50Hz
+	    || pdt->refresh == OTM_HDMI_REFRESH_50;
+
+exit:
+	return rc;
+}
+
+/*
+ * checksum_valid()
+ */
+static bool checksum_valid(unsigned char *buffer, int size)
+{
+	unsigned char sum_computed = 0;
+
+	while (size-- > 0)
+		sum_computed += *(buffer++);
+
+	return (sum_computed == 0) ? true : false;
+}
+
+/*
+ * decode_speaker_allocation_data_block()
+ */
+static void decode_speaker_allocation_data_block(unsigned char *e, int n,
+					edid_info_t *edid)
+{
+	int ne = n / 3;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "[speaker block]\n");
+
+	while (ne-- > 0) {
+		edid->speaker_map =
+		    (unsigned)*e | (unsigned)(*(e + 1) & 0x7) << 8;
+		print_speaker_layout(edid->speaker_map);
+		e++;
+		e++;		 /* skip the rest of the block */
+	}
+}
+
+/*
+ * decode_video_data_block()
+ */
+static void decode_video_data_block(unsigned char *e, int n, edid_info_t *edid)
+{
+	int vic, j, i = 0;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "[video data block]\n");
+
+	while (n-- > 0) {
+		vic = *e & 0x7F;
+		LOG_PRINT(LOG_LEVEL_DETAIL,
+			 "- mode #%d %s\n", vic, (*e & 0x80) ? "native" : "");
+
+		if ((j =
+		     find_timing_by_vic_tp(edid->ref_timings,
+					   edid->num_ref_timings, vic)) != -1) {
+			add_timings(edid, edid->ref_timings[j], ++i);
+
+			 /* Handle native mode */
+			if (*e & 0x80) {
+				edid->native_idx =
+				    find_timing(edid->timings,
+						edid->num_timings,
+						edid->ref_timings[j]);
+			}
+		}
+		e++;
+	}
+}
+
+/*
+ * decode_audio_data_block()
+ */
+static void decode_audio_data_block(unsigned char *e, int n, edid_info_t *edid)
+{
+	int ne = n / SAD_SIZE;
+	void *sad_offset;
+	otm_hdmi_audio_cap_t *adb = (otm_hdmi_audio_cap_t *) &edid->audio_caps;
+
+	VERIFY_QUICK(ne > 0, exit);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "[audio data block... %d entries]\n", ne);
+
+	/* Do we have enough space in SAD table? */
+	if (edid->short_audio_descriptor_count + ne > MAX_CAPS) {
+		LOG_PRINT(LOG_LEVEL_ERROR,
+			"Too many SADs in EDID. Not adding %d SADs\n", ne);
+		return;
+	}
+
+	sad_offset = edid->short_audio_descriptor_data +
+			edid->short_audio_descriptor_count * SAD_SIZE;
+	memcpy(sad_offset, e, n);
+
+	edid->short_audio_descriptor_count += ne;
+
+	while (ne-- > 0) {
+		/* Do we have room for another capability? */
+		if (edid->num_caps < MAX_CAPS) {
+			adb[edid->num_caps].format = (*e & 0x78) >> 3;
+			adb[edid->num_caps].max_channels = (*e & 0x07) + 1;
+			adb[edid->num_caps].fs = *(e + 1) & 0x7F;
+			adb[edid->num_caps].ss_bitrate = *(e + 2);
+			print_audio_capability(&adb[edid->num_caps]);
+			edid->num_caps++;
+		}
+		/* Go to the next entry of the block */
+		e += SAD_SIZE;
+	}
+
+exit:
+	pr_debug("exit %s\n", __func__);
+}
+
+/*
+ *
+ */
+static void declare_mandatory_3d(edid_info_t *edid)
+{
+	if (true) {
+		add_timings(edid, &MODE_1920x1080p24__FP2, 0);
+		add_timings(edid, &MODE_1920x1080p24__FP, 0);
+		add_timings(edid, &MODE_1920x1080p24__TBH2, 0);
+	}
+
+	if (edid->supports_60Hz) {
+		add_timings(edid, &MODE_1280x720p5994_60__FP2, 0);
+		add_timings(edid, &MODE_1280x720p5994_60__FP, 0);
+		add_timings(edid, &MODE_1920x1080i5994_60__SBSH2, 0);
+		add_timings(edid, &MODE_1280x720p5994_60__TBH2, 0);
+	}
+
+	if (edid->supports_50Hz) {
+		add_timings(edid, &MODE_1280x720p50__FP2, 0);
+		add_timings(edid, &MODE_1280x720p50__FP, 0);
+		add_timings(edid, &MODE_1920x1080i50__SBSH2, 0);
+		add_timings(edid, &MODE_1280x720p50__TBH2, 0);
+	}
+};
+
+/*
+ * Addition of 3D timing via 2D mode
+ */
+static void add_3d_mode_via_2d(unsigned int vic, unsigned int struc_3d,
+						edid_info_t *edid)
+{
+	unsigned int j, k, num_timings = edid->num_timings;
+	otm_hdmi_timing_t t;
+
+	struct {
+		unsigned int struc;
+		unsigned int type;
+	} details_3d[] = {
+		{
+		0x00, OTM_HDMI_STEREO_FRAME_PACKING_2}, {
+		0x00, OTM_HDMI_STEREO_FRAME_PACKING}, {
+		0x06, OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2}, {
+	0x08, OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2},};
+
+	/* Look for an entry with given order among all decoded 2D timings */
+	for (j = 0; j < num_timings; j++) {
+		if (edid->order[j] == vic) {
+			/*
+			 * Create all required 3D variations for given
+			 * 2D mode
+			 */
+			for (k = 0;
+			     k < sizeof(details_3d) / sizeof(*details_3d);
+			     k++) {
+				if (details_3d[k].struc == struc_3d) {
+					t = edid->timings[j];
+					t.stereo_type = details_3d[k].type;
+					add_timings(edid, &t, 0);
+				}
+			}
+
+			/* Entry of given order was found so start looking for
+			 * new one
+			 */
+			break;
+		}
+	}
+}
+
+/*
+ * Processing of 3D modes declared via 3D_Struture_ALL and 3D_MASK
+ */
+static void declare_short_3d(
+		unsigned int struc_3d, unsigned int mask,
+		edid_info_t *edid)
+{
+	unsigned int i, j, modes;
+
+	/* Go through each 2D_VIC specified via 3D_MASK */
+	for (i = 0; i < 16; i++, mask = mask >> 1) {
+		/*
+		 * Go through stereo variations specified via
+		 * 3D_Structure_ALL
+		 */
+		for (j = 0, modes = struc_3d; j < 16; j++, modes = modes >> 1) {
+			if (modes & 0x01 && mask & 0x01)
+				add_3d_mode_via_2d(i + 1, j, edid);
+		}
+	}
+}
+
+/*
+ * Processing of 3D modes declared via explicit list of 2D vics
+ */
+static void declare_explicit_3d(
+			unsigned char *e,
+			unsigned int n,
+			edid_info_t *edid)
+{
+	unsigned int i;
+
+	/* Go through the list of 2D_VIC_ORDER_X entries
+	 * 0x08 typed entries are 2 bytes, others are 1 byte
+	 */
+	for (i = 0; i < n; i += (e[i] & 0x0F) == 0x08 ? 2 : 1)
+		add_3d_mode_via_2d(((e[i] & 0xF0) >> 4) + 1, e[i] & 0x0F, edid);
+}
+
+/*
+ * decode_misc_modes()
+ */
+static void decode_misc_modes(unsigned char *e,
+			int n, edid_info_t *edid)
+{
+
+}
+
+#ifdef OTM_HDMI_3D_ENABLE
+/* TODO: Should be Revisted
+ * 3D parsing hangs system with edid09 of analyzer Bug206379 */
+/*
+ * decode_3D_modes()
+ */
+void decode_3D_modes(unsigned char *e, int n, int layout, edid_info_t *edid)
+{
+	unsigned int offset;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "- 3D modes supported:\n");
+
+	/* Declare mandatory modes */
+	declare_mandatory_3d(edid);
+
+	/* There are several ways of declaring 3D modes support */
+	switch (layout) {
+	case 0:		 /* Mandatory modes only */
+		offset = 0;
+		break;
+
+	case 1:		/* Mandatory modes + variations described in
+			 * 3D_Structure_ALL */
+		/* supported by each of first 16 VICs */
+		offset = 2;
+		declare_short_3d(e[1] | (e[0] << 8), 0xFFFF, edid);
+		break;
+
+	case 2:		 /* Mandatory modes + variations described in
+			  * 3D_Structure_ALL */
+		/* supported only by some of 16 first VICs
+		 * [as told by 3D_MASK] */
+		offset = 4;
+		declare_short_3d(e[1] | (e[0] << 8), e[3] | (e[2] << 8), edid);
+		break;
+
+	default:		 /* Reserved for future use */
+		offset = 0;
+		break;
+	}
+
+	/* Declare 3D modes based on present 2D VIC entries */
+	declare_explicit_3d(e + offset,
+			(n >= offset) ? n - offset : 0, edid);
+}
+#endif
+
+/*
+ * decode_vendor_data_block()
+ */
+static void decode_vendor_data_block(unsigned char *e,
+				int n, edid_info_t *edid)
+{
+	unsigned int pos;
+#ifdef OTM_HDMI_3D_ENABLE
+	unsigned int len_3d, len_hdmi;
+#endif
+
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		"[vendor specific data block.. length %d]\n", n);
+
+	/* Look for HDMI signature [0x030C00] */
+	if (n >= 3) {
+		if ((e[0] == 0x03) && (e[1] == 0x0C) && (e[2] == 0x00)) {
+			edid->hdmi = true;
+			LOG_PRINT(LOG_LEVEL_DETAIL, "- HDMI signature found\n");
+		}
+	}
+	/* Parse Source Physical Address */
+	if (n >= 5)
+		edid->spa = (e[3] << 8) | e[4];
+	/* Look for more optional stuff */
+	if (n >= 6) {
+		/* Deep Color support */
+		edid->dc_48 = (e[5] & 0x40) != 0;
+		edid->dc_36 = (e[5] & 0x20) != 0;
+		edid->dc_30 = (e[5] & 0x10) != 0;
+		edid->dc_y444 = (e[5] & 0x08) != 0;
+
+		/* AI support */
+		edid->supports_ai = (e[5] & 0x80) != 0;
+	}
+	/* MAX TMDS clock */
+	if (n >= 7)
+		edid->max_tmds_clock = e[6] * 5;
+	/* Check for optional latency and 3D fields */
+	if ((n >= 8)) {
+		edid->latency_present = (e[7] & 0x80) != 0;
+		edid->latency_int_present = (e[7] & 0x40) != 0;
+		edid->hdmi_video_present = (e[7] & 0x20) != 0;
+	}
+	/* From now on keep explicit track of position we are reading */
+	pos = 8;
+
+	/* Get video latency [in ms] */
+	if (edid->latency_present) {
+		edid->latency_video = e[pos++];
+		edid->latency_audio = e[pos++];
+
+		/* Get interlaced video latency [in ms] */
+		if (edid->latency_int_present) {
+			edid->latency_video_interlaced = e[pos++];
+			edid->latency_audio_interlaced = e[pos++];
+		}
+	}
+
+#ifdef OTM_HDMI_3D_ENABLE
+	/* 3D and misc modes information from HDMI 1.4 specification */
+	if (edid->hdmi_video_present) {
+		edid->enabled_3d = (e[pos++] & 0x80) != 0;
+
+		len_3d = (e[pos] & 0x1F);
+		len_hdmi = (e[pos++] & 0xE0) >> 5;
+
+		/* Assumption is that both misc and 3D modes can be
+		 * present so deal with
+		 * misc modes first
+		 */
+		decode_misc_modes(e + pos, len_hdmi, edid);
+
+		/* Now deal with 3D stuff */
+		if (len_3d || edid->enabled_3d) {
+			decode_3D_modes(e + pos + len_hdmi, len_3d,
+					(e[pos - 2] & 0x60) >> 5, edid);
+		}
+
+	}
+#endif
+
+}
+
+/*
+ * decode_extended_data_block()
+ */
+static void decode_extended_data_block(unsigned char *e,
+				int n, edid_info_t *edid)
+{
+	LOG_PRINT(LOG_LEVEL_DETAIL, "[extended data block.. length %d]\n", n);
+
+	switch (*(e + 0)) {
+	case 0x00:		 /* Video Capability Block */
+		LOG_PRINT(LOG_LEVEL_DETAIL, "Video Capability Block\n");
+		edid->rgb_quant_selectable = *(e + 1) & 0x40;
+		edid->ycc_quant_selectable = *(e + 1) & 0x80;
+		break;
+	case 0x01:		 /* Vendor Specific Video Data Block */
+		LOG_PRINT(LOG_LEVEL_DETAIL,
+			"Vendor Specific Video Data Block\n");
+		break;
+	case 0x05:		 /* Colorimetry Block */
+		LOG_PRINT(LOG_LEVEL_DETAIL, "Colorimetry Block\n");
+		if (n == 3) {
+			edid->xvycc601 = (*(e + 1) & 0x01) != 0;
+			edid->xvycc709 = (*(e + 1) & 0x02) != 0;
+		}
+		break;
+	case 0x11:		 /* CEA Misc Audio Block */
+		LOG_PRINT(LOG_LEVEL_DETAIL, "CEA Misc Audio Block\n");
+		break;
+	case 0x12:		 /* Vendor specific audio data block */
+		LOG_PRINT(LOG_LEVEL_DETAIL,
+			"Vendor specific audio data Block\n");
+		break;
+	default:		 /* reserved blocks */
+		LOG_PRINT(LOG_LEVEL_DETAIL, "Reserved Block\n");
+		break;
+	}
+
+}
+
+/*
+ * This is what short descriptor handler signature should look like
+ * NOTE: e is where payload starts, i.e. header byte is not included!!!
+ */
+typedef void (*short_block_decoder_t)(unsigned char *e, int n,
+					edid_info_t *edid);
+
+static short_block_decoder_t short_block_decoders[] = {
+	/* Reserved */
+	NULL,
+	/* Audio data block decoder */
+	decode_audio_data_block,
+	/* Video data block decoder */
+	decode_video_data_block,
+	/* Vendor specific block decoder */
+	decode_vendor_data_block,
+	/* Speaker allocation block decoder */
+	decode_speaker_allocation_data_block,
+	/* VESA DTC data block decoder */
+	NULL,
+	/* Reserved */
+	NULL,
+	/* Extended tag handler */
+	decode_extended_data_block
+};
+
+/*
+ * decode_block_collection()
+ * See section 7.5 of CEA-861-C for details
+ */
+static void decode_block_collection(extention_block_cea_t *eb,
+				edid_info_t *edid)
+{
+	unsigned char *c = eb->data;
+	int block_type, payload_size;
+
+	/* All area before detailed descriptors should be filled
+	 */
+	while (c < ((unsigned char *)eb + eb->content_offset)) {
+		block_type = (*c & 0xE0) >> 5;
+		payload_size = *c & 0x1F;
+
+		/* Simple block types */
+		if ((block_type < 8) && (block_type >= 0)) {
+			if (short_block_decoders[block_type]) {
+				short_block_decoders[block_type] ((unsigned char
+								   *)c + 1,
+								  payload_size,
+								  edid);
+			} else {
+				LOG_PRINT(LOG_LEVEL_DETAIL,
+					 "[block 0x%x.. TBA]\n", block_type);
+			}
+		}
+		/* Unknown */
+		else
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+					"[unknown block 0x%x]\n", (int)*c);
+
+		LOG_PRINT(LOG_LEVEL_DETAIL, "\n");
+		c += (*c & 0x1F) + 1;
+	}
+}
+
+/*
+ * decode_standard_timings()
+ * Section 3.9.1 of EDID STD
+ */
+static void decode_standard_timings(unsigned short st, edid_info_t *edid)
+{
+	struct {
+		int h;
+		int v;
+	} ar[] = { {
+	16, 10}, {
+	4, 3}, {
+	5, 4}, {
+	16, 9} };
+	otm_hdmi_timing_t pdt;
+	int r;
+
+	if (st != 0x0101) {
+		pdt.width = ((st & 0x00FF) + 31) * 8;
+		pdt.refresh = encode_refresh(((st & 0x3F00) >> 8) + 60);
+
+		r = ((st & 0xC000) >> 14);
+
+		/* Init flags with respect to aspect ratio */
+		pdt.mode_info_flags = (r == 3) ? PD_AR_16_BY_9 : 0;
+
+		/* TODO: Add proper logic for EDID earlier than 1.3
+		 * This weird line below makes sure division by zero is avoided
+		 */
+		pdt.height =
+		    pdt.width * ar[r].v / (ar[r].h ? ar[r].h : ar[r].v);
+
+		/* Indicate no stereo support */
+		pdt.stereo_type = OTM_HDMI_STEREO_NONE;
+
+		LOG_PRINT(LOG_LEVEL_DETAIL, "[Standart timing]\n");
+		add_timings(edid, &pdt, 0);
+	}
+}
+
+/*
+ * decode_detailed_timings()
+ * Table 3.16 of EDID STD
+ */
+static bool decode_detailed_timings(timing_descriptor_t *td,
+					otm_hdmi_timing_t *pdt)
+{
+	bool rc = true;
+
+	int pixel_clock = td->pixel_clock * 10;
+	int h_active = ((td->h_active_blanking_hb & 0xF0) << 4) | td->h_active;
+	int h_blanking =
+	    ((td->h_active_blanking_hb & 0x0F) << 8) | td->h_blanking;
+	int v_active = ((td->v_active_blanking_hb & 0xF0) << 4) | td->v_active;
+	int v_blanking =
+	    ((td->v_active_blanking_hb & 0x0F) << 8) | td->v_blanking;
+	int h_sync_off =
+	    ((td->offset_pulse_width_hb & 0xC0) << 2) | td->h_sync_offset;
+	int h_sync_pw =
+	    ((td->offset_pulse_width_hb & 0x30) << 4) | td->h_sync_pulse_width;
+
+	int v_sync_off = ((td->vs_offset_pulse_width & 0xF0) >> 4)
+	    | (td->offset_pulse_width_hb & 0x0C);
+
+	int v_sync_pw = ((td->offset_pulse_width_hb & 0x03) << 4)
+	    | (td->vs_offset_pulse_width & 0x0F);
+
+	int h_img_size = ((td->hv_image_size & 0xF0) << 4) | td->h_image_size;
+	int v_img_size = ((td->hv_image_size & 0x0F) << 8) | td->v_image_size;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "[detailed timing descriptor]\n");
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - pixel_clock     : %d KHz\n", pixel_clock);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - horz_active     : %d pixels\n", h_active);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - horz_blanking   : %d pixels\n", h_blanking);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - vert_active     : %d lines\n", v_active);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - vert_blanking   : %d lines\n", v_blanking);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - horz_sync_off   : %d pixels\n", h_sync_off);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - horz_sync_pw    : %d pixels\n", h_sync_pw);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - vert_sync_off   : %d lines\n", v_sync_off);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - vert_sync_pw    : %d lines\n", v_sync_pw);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		" - image ratio     : %d : %d\n", h_img_size, v_img_size);
+
+	pdt->width = h_active;
+	pdt->htotal = h_active + h_blanking;
+	pdt->hblank_start = h_active;
+	pdt->hblank_end = h_active + h_blanking;
+	pdt->hsync_start = h_active + h_sync_off;
+	pdt->hsync_end = h_active + h_sync_off + h_sync_pw;
+
+	pdt->height = v_active;
+	pdt->vtotal = v_active + v_blanking;
+	pdt->vblank_start = v_active;
+	pdt->vblank_end = v_active + v_blanking;
+	pdt->vsync_start = v_active + v_sync_off;
+	pdt->vsync_end = v_active + v_sync_off + v_sync_pw;
+
+	pdt->dclk = pixel_clock;
+
+	/* Make sure we are seeing valid mode */
+	VERIFY(pdt->htotal && pdt->vtotal && pdt->dclk, rc, false, exit);
+
+	pdt->refresh = (pdt->dclk * 1000) / (pdt->htotal * pdt->vtotal);
+
+	/* Convert refresh velue to enumeration entry */
+	pdt->refresh = encode_refresh(pdt->refresh);
+
+	/* Check if mode is interlaced */
+	pdt->mode_info_flags |= (td->flags & 0x80) ? PD_SCAN_INTERLACE : 0;
+
+	/* Determine picture aspect ratio */
+	pdt->mode_info_flags |=
+	    (h_img_size / 4 == v_img_size / 3) ? 0 : PD_AR_16_BY_9;
+
+	/* Check for sync signal polarity */
+	if (td->flags & 0x18) {
+		pdt->mode_info_flags |= (td->flags & 0x02) ? PD_HSYNC_HIGH : 0;
+		pdt->mode_info_flags |= (td->flags & 0x04) ? PD_VSYNC_HIGH : 0;
+	}
+	/* Indicate no stereo support */
+	pdt->stereo_type = OTM_HDMI_STEREO_NONE;
+
+exit:
+	return rc;
+}
+
+/*
+ * decode_generic_descriptor()
+ * Table 3.19. Table 3.20 of EDID STD
+ */
+static void decode_generic_descriptor(generic_descriptor_t *gd,
+				edid_info_t *edid)
+{
+	int i;
+
+	/* Not a timing descriptor */
+	if ((gd->flag_required == 0) && (gd->flag_reserved == 0)
+	    && (gd->flag == 0)) {
+		switch (gd->data_type_tag) {
+		case 0xFF:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[Monitor Serial Number ]\n");
+			LOG_PRINT(LOG_LEVEL_DETAIL, " - %s\n", gd->payload);
+			break;
+		case 0xFE:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[ASCII String          ]\n");
+			LOG_PRINT(LOG_LEVEL_DETAIL, " - %s\n", gd->payload);
+			break;
+		case 0xFD:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[Monitor Range Limits  ]\n");
+			LOG_PRINT(LOG_LEVEL_DETAIL, " - ...\n");
+			break;
+		case 0xFC:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[Monitor Name          ]\n");
+			LOG_PRINT(LOG_LEVEL_DETAIL, " - %s\n", gd->payload);
+			for (i = 0; i < 13; i++) {
+				if (gd->payload[i] == '\n')
+					break;
+				else
+					edid->product_name[i] = gd->payload[i];
+			}
+			break;
+		case 0xFB:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[Color Data            ]\n");
+			break;
+		case 0xFA:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[More Standard Timings ]\n");
+			for (i = 0; i < 12; i += 2) {
+				decode_standard_timings
+				    ((gd->payload[i] << 8) | gd->payload[i + 1],
+				     edid);
+			}
+			break;
+		case 0x10:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[Dummy                 ]\n");
+			break;
+		default:
+			LOG_PRINT(LOG_LEVEL_DETAIL,
+				"[Manufacturer/Undefined]\n");
+			break;
+		}
+	}
+	/* Timing descriptor */
+	else {
+		otm_hdmi_timing_t pdt;
+		memset(&pdt, 0, sizeof(otm_hdmi_timing_t));
+		if (decode_detailed_timings((timing_descriptor_t *) gd, &pdt))
+			add_timings(edid, &pdt, 0);
+	}
+}
+
+#define __BASIC_AUDIO_FS (OTM_HDMI_AUDIO_FS_32_KHZ | \
+			OTM_HDMI_AUDIO_FS_44_1_KHZ | \
+			OTM_HDMI_AUDIO_FS_48_KHZ)
+
+/*
+ * __find_and_declare_basic_audio_support()
+ */
+static otm_hdmi_ret_t __find_and_declare_basic_audio_support(edid_info_t *edid)
+{
+	otm_hdmi_audio_cap_t *caps = edid->audio_caps;
+	unsigned int i = 0;
+
+	otm_hdmi_audio_cap_t cap = {
+		.format = OTM_HDMI_AUDIO_FORMAT_PCM,
+		.max_channels = 2,
+		.fs = __BASIC_AUDIO_FS,
+		.ss_bitrate = OTM_HDMI_AUDIO_SS_16,
+	};
+
+	/*
+	 * A speaker allocation map may not be present with basic audio
+	 * hence always add a FL/FR pair in the speaker allocation map
+	 */
+	edid->speaker_map |= 0x01;
+
+	/*
+	 * SADs are optional with basic audio as well. Search the SADs to
+	 * find a "basic audio like" mode. If none is found, create one in
+	 * audio_caps anyways since basic audio implies such a mode is
+	 * available
+	 */
+	for (i = 0; i < edid->num_caps; i++) {
+		if ((caps[i].format == cap.format)
+		    && (caps[i].max_channels >= cap.max_channels)
+		    && (caps[i].ss_bitrate & cap.ss_bitrate)
+		    && ((caps[i].fs & cap.fs) == cap.fs)) {
+			/* Found matching mode so do nothing */
+			return OTM_HDMI_SUCCESS;
+		}
+	}
+
+	/*
+	 * Basic audio like mode not found, add it to the caps if we have
+	 * space
+	 */
+	if (edid->num_caps >= MAX_CAPS) {
+		LOG_PRINT(LOG_LEVEL_ERROR,
+		    "No place to add basic audio mode in audio_caps\n");
+		return OTM_HDMI_ERR_NO_MEMORY;
+	}
+	memcpy(&caps[edid->num_caps++], &cap, sizeof(cap));
+	return OTM_HDMI_SUCCESS;
+}
+
+/*
+ * decode_extention_block_cea()
+ * Refer to section A.2.13 of CEA-861-C document for additional details
+ */
+static void decode_extention_block_cea(extention_block_cea_t *eb,
+				edid_info_t *edid)
+{
+	int i;
+	generic_descriptor_t gd;
+
+	/* Check YCbCr444 and YCbCr422 support */
+	if (eb->revision >= 2) {
+		edid->ycbcr444 = (eb->flags & 0x20) ? true : false;
+		edid->ycbcr422 = (eb->flags & 0x10) ? true : false;
+	}
+	/*
+	 * Short descriptors section exists when:
+	 * - offset is not 4
+	 * - CEA extension version is 3
+	 */
+	if ((eb->content_offset != 4) && (eb->revision >= 3))
+		decode_block_collection(eb, edid);
+	/*
+	 * Detailed timing descriptors:
+	 * - do not exist when offset is zero
+	 * - may still not exist even when offset is non-zero; in this case
+	 *   location where they would exist is padded so make sure
+	 *   appropriate decoding routine can handle that padding correctly
+	 */
+#define __DSCR_SIZE 18
+	if (eb->content_offset != 0) {
+		for (i = 0; i < (128 - eb->content_offset) / __DSCR_SIZE; i++) {
+			/*
+			 * Instead of using a casted pointer to descriptor,
+			 * we explicitely copy memory content temporary
+			 * placeholder. This way we are avoiding possible
+			 * integer addressing problems
+			 */
+			memcpy(&gd,
+			       (char *)eb + eb->content_offset +
+			       __DSCR_SIZE * i, __DSCR_SIZE);
+			decode_generic_descriptor(&gd, edid);
+		}
+	}
+#undef __DSCR_SIZE
+
+	/* Check for basic audio support and add it to caps list if necessary */
+	if ((eb->revision >= 2) && (eb->flags & 0x40))
+		__find_and_declare_basic_audio_support(edid);
+}
+
+/*
+ * fetch_block_map()
+ */
+static otm_hdmi_ret_t fetch_block_map(edid_block_map_t *ebm,
+				unsigned char *data)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	read_context_t read_context = { data, 0 };
+	read_context_t *rctx = &read_context;
+
+	VERIFY(ebm != NULL, rc, OTM_HDMI_ERR_INTERNAL, exit);
+	VERIFY(data != NULL, rc, OTM_HDMI_ERR_INTERNAL, exit);
+
+	/* Fill tag, map and checksum fields */
+	fetch_next_field(&ebm->tag, rctx, 1);
+	fetch_next_field(ebm->map, rctx, BLOCK_MAP_SIZE);
+	fetch_next_field(&ebm->checksum, rctx, 1);
+
+exit:
+	return rc;
+}
+
+/*
+ * block_decode()
+ */
+static otm_hdmi_ret_t block_decode(edid_info_t *edid_info, unsigned int type,
+					unsigned char *buffer)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	extention_block_cea_t eb;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "Decoding extension 0x%x\n", type);
+
+	switch (type) {
+	case 0x02:
+		VERIFY(buffer[0] == 0x02, rc, OTM_HDMI_ERR_FAILED, exit);
+		memset(&eb, 0, sizeof(extention_block_cea_t));
+		fetch_extension_block_cea(&eb, buffer);
+		decode_extention_block_cea(&eb, edid_info);
+		break;
+
+	default:
+		LOG_PRINT(LOG_LEVEL_DETAIL,
+			"Extension 0x%x is not supported; Bypassing\n",
+			   type);
+		break;
+	}
+
+exit:
+	return rc;
+}
+
+int edid_parse_pd_timing_from_cea_block(edid_info_t *edid_info,
+					unsigned char *buffer,
+					otm_hdmi_timing_t *pdts)
+{
+	extention_block_cea_t eb;
+	int i = 0;
+
+	if (buffer[0x0] == 0x2) {
+		memset(&eb, 0, sizeof(extention_block_cea_t));
+		edid_info->num_timings = 0;
+		fetch_extension_block_cea(&eb, buffer);
+		decode_extention_block_cea(&eb, edid_info);
+
+		for (i = 0; i < edid_info->num_timings; i++) {
+			memcpy((unsigned char *)&pdts[i],
+				     (unsigned char *)&edid_info->timings[i],
+				     sizeof(otm_hdmi_timing_t));
+		}
+		return edid_info->num_timings;
+	}
+	return 0;
+}
+
+
+/*
+ * edid_parse()
+ */
+otm_hdmi_ret_t edid_parse(edid_info_t *edid_info,
+			i2c_read_t data_read, void *cd)
+{
+	unsigned char buffer[SEGMENT_SIZE];
+	edid_block_zero_t ebz;
+	edid_block_map_t ebm;
+	extention_block_cea_t eb;
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int i, sp, offset;
+
+	/* Read block zero */
+	rc = data_read(cd, 0, 0, buffer, SEGMENT_SIZE);
+	VERIFY_QUICK(rc == OTM_HDMI_SUCCESS, exit);
+	print_raw_block(buffer, SEGMENT_SIZE);
+	VERIFY(checksum_valid(buffer, SEGMENT_SIZE), rc,
+				OTM_HDMI_ERR_FAILED, exit);
+
+	/* Process block zero */
+	memset(&ebz, 0, sizeof(edid_block_zero_t));
+	fetch_block_zero(&ebz, buffer);
+
+	VERIFY(ebz.signature == EDID_SIGNATURE, rc, OTM_HDMI_ERR_FAILED, exit);
+
+	/* Decode general stuff */
+	edid_info->manufacturer_id = ebz.manufacturer_id;
+	edid_info->product_code = ebz.product_code;
+	edid_info->product_sn = ebz.serial_number;
+	edid_info->product_year = ebz.manufacture_year + 1990;
+	edid_info->product_week = ebz.manufacture_week;
+	edid_info->max_horz_image_size = ebz.max_horz_image_size;
+	edid_info->max_vert_image_size = ebz.max_vert_image_size;
+	edid_info->native_idx = -1;
+
+	/* Decode timings */
+	decode_generic_descriptor((generic_descriptor_t *) &ebz.td_1,
+				  edid_info);
+	decode_generic_descriptor((generic_descriptor_t *) &ebz.td_2,
+				  edid_info);
+	decode_generic_descriptor((generic_descriptor_t *) &ebz.md_1,
+				  edid_info);
+	decode_generic_descriptor((generic_descriptor_t *) &ebz.md_2,
+				  edid_info);
+
+	for (i = 0; i < EDID_STD_TIMINGS_NUM; i++)
+		decode_standard_timings(ebz.std_timings[i], edid_info);
+
+	/* If there are no extentions - we are done */
+	VERIFY(ebz.num_extentions != 0, rc, OTM_HDMI_SUCCESS, exit);
+
+	/* Read next block */
+	rc = data_read(cd, 0, SEGMENT_SIZE, buffer, SEGMENT_SIZE);
+	VERIFY_QUICK(rc == OTM_HDMI_SUCCESS, exit);
+	print_raw_block(buffer, SEGMENT_SIZE);
+	VERIFY(checksum_valid(buffer, SEGMENT_SIZE), rc,
+				OTM_HDMI_ERR_FAILED, exit);
+
+	/* There is only 1 extention; Assume its CEA Extension [tag = 0x02] */
+	if (ebz.num_extentions == 1) {
+		/* Process extention block */
+		memset(&eb, 0, sizeof(extention_block_cea_t));
+		fetch_extension_block_cea(&eb, buffer);
+		VERIFY(eb.tag == 0x02, rc, OTM_HDMI_SUCCESS, exit);
+		decode_extention_block_cea(&eb, edid_info);
+	}
+	/* There is a block map and more extentions */
+	else {
+		/* Process block map */
+		memset(&ebm, 0, sizeof(edid_block_map_t));
+		fetch_block_map(&ebm, buffer);
+
+		/* Verify we are indeed dealing with map block */
+		VERIFY(ebm.tag == 0xF0, rc, OTM_HDMI_ERR_FAILED, exit);
+
+		/* Deal with each block in the map */
+		for (i = 0; (i < BLOCK_MAP_SIZE) && ebm.map[i]; i++) {
+			/* Compute extension block location */
+			sp = (i + 2) / 2;
+			offset = (i % 2) ? SEGMENT_SIZE : 0;
+
+			/* Read extension block */
+			rc = data_read(cd, sp, offset, buffer, SEGMENT_SIZE);
+			VERIFY_QUICK(rc == OTM_HDMI_SUCCESS, exit);
+			print_raw_block(buffer, SEGMENT_SIZE);
+			VERIFY(checksum_valid(buffer, SEGMENT_SIZE), rc,
+			       OTM_HDMI_ERR_FAILED, exit);
+
+			/* Decode extension block */
+			rc = block_decode(edid_info, ebm.map[i], buffer);
+			VERIFY_QUICK(rc == OTM_HDMI_SUCCESS, exit);
+		}
+
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+/*
+ * Read enhanced EDID data blocks at 0xA0 using segment pointer address 0x60
+ */
+static otm_hdmi_ret_t __enhanced_ddc_read_one_block(struct i2c_adapter *adapter,
+		unsigned char sp, unsigned char offset, unsigned char *buffer)
+{
+	#define SEGMENT_ADDR 0x30
+	#define EDID_ADDR    0x50
+	struct i2c_msg msgs[] = {
+			{
+				.addr = SEGMENT_ADDR,
+				.flags = 0,
+				.len = 1,
+				.buf = &sp,
+
+			}, {
+				.addr = EDID_ADDR,
+				.flags = 0,
+				.len = 1,
+				.buf = &offset,
+
+			}, {
+				.addr = EDID_ADDR,
+				.flags = I2C_M_RD,
+				.len = SEGMENT_SIZE,
+				.buf = buffer,
+			}
+	};
+
+	int ret_i2c, retries = 5, siz = sizeof(msgs) / sizeof(msgs[0]);
+
+	pr_debug("enter %s\n", __func__);
+
+	/* Safty check */
+	if (!adapter || !buffer)
+		return OTM_HDMI_ERR_FAILED;
+
+	do {
+		ret_i2c = i2c_transfer(adapter, msgs, siz);
+		pr_debug("ret_i2c: %d\n", ret_i2c);
+	} while (ret_i2c != siz && --retries);
+
+	if (ret_i2c != siz) {
+		pr_err("i2c failed1\n");
+		return OTM_HDMI_ERR_FAILED;
+	}
+
+	pr_debug("exit %s\n", __func__);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/*
+ * edid_extension_parse()
+ */
+otm_hdmi_ret_t edid_extension_parse(struct i2c_adapter *adapter,
+			edid_info_t *edid_info, unsigned char *edid)
+{
+	unsigned char *buffer;
+	edid_block_map_t ebm;
+	extention_block_cea_t eb;
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int i, sp, offset;
+
+	if (!edid_info || !edid)
+		return OTM_HDMI_ERR_FAILED;
+
+	pr_debug("enter %s\n", __func__);
+
+	/* If there are no extentions - we are done */
+	VERIFY(edid[0x7e] != 0, rc, OTM_HDMI_SUCCESS, exit);
+
+	/* Point to the first extension block */
+	buffer = edid + SEGMENT_SIZE;
+	VERIFY(checksum_valid(buffer, SEGMENT_SIZE), rc,
+			OTM_HDMI_ERR_FAILED, exit);
+
+	/* There is only 1 extention; Assume its CEA Extension [tag = 0x02] */
+	if (edid[0x7e] == 1) {
+		/* Process first extention block */
+		memset(&eb, 0, sizeof(extention_block_cea_t));
+		fetch_extension_block_cea(&eb, buffer);
+		VERIFY(eb.tag == 0x02, rc, OTM_HDMI_SUCCESS, exit);
+		decode_extention_block_cea(&eb, edid_info);
+	}
+	/* There is a block map and more extentions */
+	else {
+		/* Process block map */
+		memset(&ebm, 0, sizeof(edid_block_map_t));
+		fetch_block_map(&ebm, buffer);
+
+		/* Verify we are indeed dealing with map block */
+		VERIFY(ebm.tag == 0xF0, rc, OTM_HDMI_ERR_FAILED, exit);
+
+		/* Deal with each block in the map */
+		for (i = 0; (i < BLOCK_MAP_SIZE) && ebm.map[i]; i++) {
+			/* Compute extension block location */
+			sp = (i + 2) / 2;
+			offset = (i % 2) ? SEGMENT_SIZE : 0;
+
+			/* Read extension block */
+			rc = __enhanced_ddc_read_one_block(adapter, sp, offset,
+					buffer);
+			VERIFY_QUICK(rc == OTM_HDMI_SUCCESS, exit);
+			VERIFY(checksum_valid(buffer, SEGMENT_SIZE), rc,
+			       OTM_HDMI_ERR_FAILED, exit);
+
+			/* Decode extension block */
+			rc = block_decode(edid_info, ebm.map[i], buffer);
+			VERIFY_QUICK(rc == OTM_HDMI_SUCCESS, exit);
+		}
+	}
+exit:
+	pr_debug("exit %s (ret = %d)\n", __func__, rc);
+	return rc;
+}
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid.h
new file mode 100644
index 0000000..967193d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid.h
@@ -0,0 +1,158 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _EDID_H
+#define _EDID_H
+
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+#include "otm_hdmi.h"
+
+#define MAX_TIMINGS 100
+#define MAX_CAPS 30
+#define SEGMENT_SIZE 128
+#define MAX_EDID_BLOCKS 5
+#define SAD_SIZE 3
+#define MAX_DATA_BLOCK_SIZE (MAX_CAPS * SAD_SIZE)
+
+/**
+ * Different types representing pointers to a function
+ */
+typedef otm_hdmi_ret_t (*i2c_read_t)(void *client_data, unsigned int sp,
+				     unsigned int offset, void *buffer,
+				     unsigned int size);
+
+typedef int (*printf_t) (const char *fmt, ...);
+
+/**
+ * Structure with parsed EDID information returned to end user
+ */
+typedef struct {
+	bool hdmi;	/* HDMI or DVI device */
+	unsigned int num_timings;	/* Number of supported timings */
+	otm_hdmi_timing_t timings[MAX_TIMINGS];	/* Supported timings */
+	unsigned int order[MAX_TIMINGS];	/* VIC order of descovery */
+	unsigned int num_caps;	/* Number of supported audiocaps */
+	otm_hdmi_audio_cap_t audio_caps[MAX_CAPS];	/* Supported audio caps */
+	unsigned char short_audio_descriptor_data[MAX_DATA_BLOCK_SIZE]; /* payload of CEA 861 audio data block holding SADs*/
+	unsigned int short_audio_descriptor_count;  /* number of Short Audio Descriptor (SAD) */
+
+	unsigned int speaker_map;	/* Speaker layout */
+	unsigned short manufacturer_id;	/* Manufacturer ID */
+	unsigned short product_code;	/* Product code */
+	unsigned int product_sn;	/* Serial number */
+	unsigned int product_year;	/* Year of product manufacture */
+	unsigned int product_week;	/* Week of product manufacture */
+	unsigned char product_name[14];	/* Product name */
+	bool ycbcr444;	/* YCbCr444 support */
+	bool ycbcr422;	/* YCbCr422 support */
+	unsigned int spa;	/* CEC source physical address */
+	bool dc_30;	/* 30 bit Deep Color support */
+	bool dc_36;	/* 36 bit Deep Color support */
+	bool dc_48;	/* 48 bit Deep Color support */
+	bool dc_y444;	/* YCbCr444 support with DC */
+	bool xvycc601;	/* xvYCC BT601 Colorimetry */
+	bool xvycc709;	/* xvYCC BT709 Colorimetry */
+	bool supports_ai;	/* Aux audio info support */
+	unsigned int max_tmds_clock;
+	bool latency_present;
+	bool latency_int_present;
+	bool hdmi_video_present;
+	int latency_video;
+	int latency_audio;
+	int latency_video_interlaced;
+	int latency_audio_interlaced;
+	bool enabled_3d;
+	bool supports_60Hz;
+	bool supports_50Hz;
+	unsigned char max_horz_image_size;
+	unsigned char max_vert_image_size;
+	int native_idx;
+	bool rgb_quant_selectable;	/*rgb quant mode selectable */
+	bool ycc_quant_selectable;	/*ycc quant mode selectable */
+
+	const otm_hdmi_timing_t **ref_timings;	/* INPUT: reference timings
+						   table */
+	unsigned int num_ref_timings;	/* INPUT: size of ref timings table */
+} edid_info_t;
+
+otm_hdmi_ret_t edid_parse(edid_info_t *edid_info, i2c_read_t data_read,
+			  void *cd);
+
+otm_hdmi_ret_t edid_extension_parse(struct i2c_adapter *adapter,
+			 edid_info_t *edid_info, unsigned char *edid);
+
+void print_pd_timing(const otm_hdmi_timing_t *pdt, unsigned int order);
+void print_audio_capability(otm_hdmi_audio_cap_t *adb);
+void print_speaker_layout(unsigned int layout);
+void print_raw_block(unsigned char *buffer, int size);
+
+int find_timing(const otm_hdmi_timing_t *set, int size,
+		const otm_hdmi_timing_t *e);
+
+int edid_parse_pd_timing_from_cea_block(edid_info_t *edid_info,
+					unsigned char *buffer,
+					otm_hdmi_timing_t *pdts);
+#endif /* _EDID_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid_internal.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid_internal.h
new file mode 100644
index 0000000..812f68e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid_internal.h
@@ -0,0 +1,180 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _EDID_INTERNAL_H
+#define _EDID_INTERNAL_H
+
+#include "edid.h"
+
+#define EDID_STD_TIMINGS_NUM 8
+#define BLOCK_MAP_SIZE 126
+
+/*
+ * Structure representing timing descriptor in EDID native format
+ * See Table 3.16 of EDID STD and Table 50 of CEA-861-C documents for details
+ * Order of fields MUST match documentation
+ */
+typedef struct {
+	unsigned short pixel_clock;
+	unsigned char h_active;
+	unsigned char h_blanking;
+	unsigned char h_active_blanking_hb;
+	unsigned char v_active;
+	unsigned char v_blanking;
+	unsigned char v_active_blanking_hb;
+	unsigned char h_sync_offset;
+	unsigned char h_sync_pulse_width;
+	unsigned char vs_offset_pulse_width;
+	unsigned char offset_pulse_width_hb;
+	unsigned char h_image_size;
+	unsigned char v_image_size;
+	unsigned char hv_image_size;
+	unsigned char h_border;
+	unsigned char v_border;
+	unsigned char flags;
+} timing_descriptor_t;
+
+/*
+ * Structure representing any 18 bytes descriptor
+ * See Table 3.19 and 3.20 of EDID STD for details
+ * Order of fields MUST match documentation
+ */
+typedef struct {
+	unsigned short flag_required;
+	unsigned char flag_reserved;
+	unsigned char data_type_tag;
+	unsigned char flag;
+	unsigned char payload[13];
+} generic_descriptor_t;
+
+/*
+ * Structure representing EDID CEA extention block
+ * See Table 56, Table 26 and section A.2.13 of CEA-861-C for details
+ */
+typedef struct {
+	unsigned char tag;
+	unsigned char revision;
+	unsigned char content_offset;
+	unsigned char flags;
+	unsigned char data[124];
+} extention_block_cea_t;
+
+/*
+ * EDID Block 0
+ */
+typedef struct {
+	unsigned long long signature;
+	unsigned short manufacturer_id;
+	unsigned short product_code;
+	unsigned int serial_number;
+	unsigned char manufacture_week;
+	unsigned char manufacture_year;
+	unsigned char version;
+	unsigned char revision;
+	unsigned char video_input_definition;
+	unsigned char max_horz_image_size;
+	unsigned char max_vert_image_size;
+	unsigned char gamma;
+	unsigned char feature_support;
+	unsigned char rg_lowbits;
+	unsigned char bw_lowbits;
+	unsigned char red_x;
+	unsigned char red_y;
+	unsigned char green_x;
+	unsigned char green_y;
+	unsigned char blue_x;
+	unsigned char blue_y;
+	unsigned char white_x;
+	unsigned char white_y;
+	unsigned char est_timing_1;
+	unsigned char est_timing_2;
+	unsigned char est_timing_3;
+	unsigned short std_timings[EDID_STD_TIMINGS_NUM];
+	unsigned char td_1[sizeof(generic_descriptor_t)];
+	unsigned char td_2[sizeof(generic_descriptor_t)];
+	unsigned char md_1[sizeof(generic_descriptor_t)];
+	unsigned char md_2[sizeof(generic_descriptor_t)];
+	unsigned char num_extentions;
+} edid_block_zero_t;
+
+/*
+ * EDID block map
+ */
+typedef struct {
+	unsigned char tag;
+	unsigned char map[BLOCK_MAP_SIZE];
+	unsigned short checksum;
+} edid_block_map_t;
+
+/*
+ * Detailed printing routines prototypes
+ */
+void print_monitor_descriptor_undecoded(generic_descriptor_t *md,
+					printf_t print);
+
+void print_timing_descriptor_undecoded(timing_descriptor_t *td,
+				       printf_t print);
+
+void print_block_zero_undecoded(edid_block_zero_t *ebz, printf_t print);
+
+#endif /* _EDID_INTERNAL_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid_print.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid_print.c
new file mode 100644
index 0000000..f560334
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/edid_print.c
@@ -0,0 +1,358 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "edid_internal.h"
+
+/* Convert the refresh rate enum to a string for printing */
+static char *_refresh_string(otm_hdmi_refresh_t r)
+{
+	char *ret;
+
+	switch (r) {
+	case OTM_HDMI_REFRESH_23_98:
+		ret = "23.98";
+		break;
+	case OTM_HDMI_REFRESH_24:
+		ret = "24";
+		break;
+	case OTM_HDMI_REFRESH_25:
+		ret = "25";
+		break;
+	case OTM_HDMI_REFRESH_29_97:
+		ret = "29.97";
+		break;
+	case OTM_HDMI_REFRESH_30:
+		ret = "30";
+		break;
+	case OTM_HDMI_REFRESH_50:
+		ret = "50";
+		break;
+	case OTM_HDMI_REFRESH_47_96:
+		ret = "47.96";
+		break;
+	case OTM_HDMI_REFRESH_48:
+		ret = "48";
+		break;
+	case OTM_HDMI_REFRESH_59_94:
+		ret = "59.94";
+		break;
+	case OTM_HDMI_REFRESH_60:
+		ret = "60";
+		break;
+	case OTM_HDMI_REFRESH_NONE:
+		ret = "0";
+		break;
+	default:
+		ret = "<Unknown refresh rate>";
+		break;
+	};
+
+	return ret;
+}
+
+/* Convert stereo enum to a string for printing */
+static char *_stereo_string(otm_hdmi_stereo_t stereo_type)
+{
+	char *ret;
+
+	switch (stereo_type) {
+	case OTM_HDMI_STEREO_NONE:
+		ret = "Stereo None";
+		break;
+	case OTM_HDMI_STEREO_FRAME_PACKING_2:
+		ret = "Stereo Frame Packing 2";
+		break;
+	case OTM_HDMI_STEREO_FRAME_PACKING:
+		ret = "Stereo Frame Packing";
+		break;
+	case OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2:
+		ret = "Stereo Side by Side Half 2";
+		break;
+	case OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2:
+		ret = "Stereo Top Bottom Half 2";
+		break;
+	case OTM_HDMI_STEREO_FRAME_SEQUENTIAL:
+		ret = "Stereo Frame Sequential";
+		break;
+	default:
+		ret = "<Unknown Stereo type>";
+		break;
+	};
+
+	return ret;
+}
+
+/* print_pd_timing */
+void print_pd_timing(const otm_hdmi_timing_t *t,
+		     unsigned int order)
+{
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		"%dx%d @ %s %s [%s] [%d] [%s]\n",
+		t->width, t->height, _refresh_string(t->refresh),
+		((t->mode_info_flags & PD_SCAN_INTERLACE) ? "i" : "p"),
+		((t->mode_info_flags & PD_AR_16_BY_9) ? "16:9" : "4:3"),
+		order, _stereo_string(t->stereo_type));
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "htotal      : %d\n",
+				t->htotal);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		"hblank_start: %d\n", t->hblank_start);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		"hblank_end  : %d\n", t->hblank_end);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "hsync_start : %d\n",
+			t->hsync_start);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "hsync_end   : %d\n",
+			t->hsync_end);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "vtotal      : %d\n",
+			t->vtotal);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "vblank_start: %d\n",
+		t->vblank_start);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "vblank_end  : %d\n",
+		t->vblank_end);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "vsync_start : %d\n",
+		t->vsync_start);
+	LOG_PRINT(LOG_LEVEL_DETAIL, "vsync_end   : %d\n",
+		t->vsync_end);
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		"clock       : %dMhz\n", (int)(t->dclk / 1000));
+}
+
+/* Convert audio format enum to a string for printing */
+static char *_audio_format(otm_hdmi_audio_fmt_t fmt)
+{
+	char *s;
+
+	switch (fmt) {
+	case OTM_HDMI_AUDIO_FORMAT_PCM:
+		s = "PCM";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_AC3:
+		s = "AC3";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_MPEG1:
+		s = "MPEG1";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_MP3:
+		s = "MP3";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_MPEG2:
+		s = "MPEG2";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_AAC:
+		s = "AAC";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_DTS:
+		s = "DTS";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_ATRAC:
+		s = "ATRAC";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_OBA:
+		s = "One Bit Audio";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_DDP:
+		s = "Dolby Digital +";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_DTSHD:
+		s = "DTSHD";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_MLP:
+		s = "MLP";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_DST:
+		s = "DST";
+		break;
+	case OTM_HDMI_AUDIO_FORMAT_WMA_PRO:
+		s = "WMA Pro";
+		break;
+	default:
+		s = "< unknown format >";
+		break;
+	};
+
+	return s;
+}
+
+/* Convert sampling rate enum to a string for printing */
+static char *_sampling_rate(otm_hdmi_audio_fs_t fs)
+{
+	char *s;
+
+	switch (fs) {
+	case OTM_HDMI_AUDIO_FS_32_KHZ:
+		s = "32";
+		break;
+	case OTM_HDMI_AUDIO_FS_44_1_KHZ:
+		s = "44.1";
+		break;
+	case OTM_HDMI_AUDIO_FS_48_KHZ:
+		s = "48";
+		break;
+	case OTM_HDMI_AUDIO_FS_88_2_KHZ:
+		s = "88.2";
+		break;
+	case OTM_HDMI_AUDIO_FS_96_KHZ:
+		s = "96";
+		break;
+	case OTM_HDMI_AUDIO_FS_176_4_KHZ:
+		s = "176.4";
+		break;
+	case OTM_HDMI_AUDIO_FS_192_KHZ:
+		s = "192";
+		break;
+
+	default:
+		s = "< unknown sampling rate >";
+		break;
+	}
+
+	return s;
+}
+
+/* print_audio_capability() */
+void print_audio_capability(otm_hdmi_audio_cap_t *cap)
+{
+	int i;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL,
+		"Format: %s; Max channels: %d; Sampling rates, KHz:",
+		  _audio_format(cap->format), cap->max_channels);
+
+	for (i = 0; i < 7; i++)
+		LOG_PRINT(LOG_LEVEL_DETAIL,
+		" %s", (cap->fs & (1 << i)) ? _sampling_rate(1 << i) : "");
+	LOG_PRINT(LOG_LEVEL_DETAIL, "\n");
+}
+
+/* Convert a speaker map enum to a string for printing */
+static char *_speaker_map(otm_hdmi_audio_speaker_map_t map)
+{
+	char *s;
+
+	switch (map) {
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_FLFR:
+		s = "FL/FR";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_LFE:
+		s = "LFE";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_FC:
+		s = "FC";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_RLRR:
+		s = "RL/RR";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_RC:
+		s = "RC";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_FLCFRC:
+		s = "FLC/FRC";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_RLCRRC:
+		s = "RLC/RRC";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_FLWFRW:
+		s = "FLW/FRW";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_FLHFRH:
+		s = "FLH/FRH";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_TC:
+		s = "TC";
+		break;
+	case OTM_HDMI_AUDIO_SPEAKER_MAP_FCH:
+		s = "FCH";
+		break;
+
+	default:
+		s = "< unknown allocation >\n";
+		break;
+	}
+
+	return s;
+}
+
+/* print_speaker_layout() */
+void print_speaker_layout(unsigned int layout)
+{
+	int i;
+
+	LOG_PRINT(LOG_LEVEL_DETAIL, "Speaker layout map:");
+	for (i = 0; i < 11; i++)
+		LOG_PRINT(LOG_LEVEL_DETAIL,
+		" %s", (layout & (1 << i)) ? _speaker_map(1 << i) : "");
+	LOG_PRINT(LOG_LEVEL_DETAIL, "\n");
+}
+
+/* print_raw_block() */
+void print_raw_block(unsigned char *buffer, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (i != 0 && ((i % 0x10) == 0))
+			LOG_PRINT(LOG_LEVEL_DETAIL, "\n");
+		LOG_PRINT(LOG_LEVEL_DETAIL, "%02X\n", buffer[i]);
+	}
+	LOG_PRINT(LOG_LEVEL_DETAIL, "\n");
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdcp.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdcp.c
new file mode 100644
index 0000000..676aaa6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdcp.c
@@ -0,0 +1,1690 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/types.h>
+#include "hdcp_rx_defs.h"
+#include "otm_hdmi.h"
+#include "otm_hdmi_types.h"
+#include "ipil_hdcp_api.h"
+#include "psb_powermgmt.h"
+#include "ipil_hdmi.h"
+
+#define OTM_HDCP_DEBUG_MODULE
+
+#ifdef OTM_HDCP_DEBUG_MODULE
+bool module_disable_hdcp = false;
+EXPORT_SYMBOL_GPL(module_disable_hdcp);
+bool module_force_ri_mismatch = false;
+EXPORT_SYMBOL_GPL(module_force_ri_mismatch);
+#endif
+
+enum {
+	HDCP_ENABLE = 1,
+	HDCP_RESET,
+	HDCP_RI_CHECK,
+	HDCP_REPEATER_CHECK,
+	HDCP_REPEATER_WDT_EXPIRED,
+	HDCP_SET_POWER_SAVE_STATUS,
+	HDCP_SET_HPD_STATUS,
+	HDCP_SET_DPMS_STATUS
+} hdcp_task_msg_en;
+
+struct hdcp_wq_struct_t {
+	struct delayed_work dwork;
+	int msg;
+	void *msg_data;
+};
+
+/* = = = = = = = = = = = = = = = = == = = = = = = = = = = = = = = = = = = = = */
+/*!  \brief Our local context.
+ */
+struct hdcp_context_t {
+	int		can_authenticate;
+			/*!< indicates HDCP Authentication currently allowed
+			 */
+	bool	is_required;
+	bool	is_phase1_enabled;
+	bool	is_phase2_enabled;
+	bool	is_phase3_valid;
+	bool	previous_phase1_status;
+	bool	suspend;
+	bool	hpd;
+	bool	previous_hpd;
+	bool	display_power_on;
+	bool	auto_retry;
+	bool	wdt_expired;
+	bool	sink_query /* used to unconditionally read sink bksv */;
+	bool	force_reset;
+	unsigned int	auth_id;
+			/*!< id that indicates current
+			 * authentication attempt
+			 */
+	unsigned int	ri_check_interval;
+			/*!< phase 3 ri-check interval based on mode */
+	unsigned int	ri_check_interval_upper;
+			/*!< upper bound of ri-check interval */
+	unsigned int	ri_check_interval_lower;
+			/*!< lower bound of ri-check interval */
+	unsigned int	video_refresh_interval;
+			/*!< time interval (msec) of video refresh. */
+	unsigned int	prev_ri_frm_cnt_status;
+			/*!< Ri frame count in HDCP status register when
+			 * doing previous Ri check. */
+	unsigned int	current_srm_ver;
+			/*!< currently used SRM version (if vrl is not null)
+			 */
+	uint64_t	*vrl;
+			/*!< pointer to our VRL, formatted as array of KSVs
+			 * as hdcp__u64_t's
+			 */
+	unsigned int	vrl_count;
+			/*!< total number of KSVs in our VRL */
+	int (*ddc_read_write)(bool, uint8_t, uint8_t, uint8_t *, int);
+			/*!< Pointer to callback function for DDC Read Write */
+	struct workqueue_struct *hdcp_wq;
+			/*!< single-thread workqueue handling HDCP events */
+	unsigned int ri_retry;
+			/*!< time delay (msec) to re-try Ri check */
+	unsigned int hdcp_delay;
+			/*!< time delay (msec) to wait for TMDS to be ready */
+	bool hdmi; /* HDMI or DVI*/
+	bool bstatus_read;
+};
+
+/* Global instance of local context */
+static struct hdcp_context_t *hdcp_context;
+
+/* HDCP Main Event Handler */
+static void hdcp_task_event_handler(struct work_struct *work);
+
+/**
+ * Description: this function sends a message to the hdcp workqueue to be
+ *		processed with a delay
+ *
+ * @msg		message type
+ * @msg_data	any additional data accompanying the message
+ * @delay	amount of delay for before the message gets processed
+ *
+ * Returns:	true if message was successfully queued else false
+ */
+static bool wq_send_message_delayed(int msg,
+					void *msg_data,
+					unsigned long delay)
+{
+	struct hdcp_wq_struct_t *hwq = NULL;
+	hwq = kmalloc(sizeof(struct hdcp_wq_struct_t), GFP_KERNEL);
+	if (hwq == NULL) {
+		if (msg_data)
+			kfree(msg_data);
+		return false;
+	}
+
+	hwq->msg = msg;
+	hwq->msg_data = msg_data;
+
+	INIT_DELAYED_WORK(&hwq->dwork, hdcp_task_event_handler);
+
+	if (queue_delayed_work(hdcp_context->hdcp_wq, &hwq->dwork,
+		(unsigned long)(msecs_to_jiffies(delay))) != 0)
+		return true;
+	else
+		pr_debug("hdcp: failed to add message to delayed wq\n");
+
+	if (msg_data)
+		kfree(msg_data);
+
+	return false;
+}
+
+/**
+ * Description: this function sends a message to the hdcp workqueue to be
+ *		processed without delay
+ *
+ * @msg		message type
+ * @msg_data	any additional data accompanying the message
+ *
+ * Returns:	true if message was successfully queued else false
+ */
+static bool wq_send_message(int msg, void *msg_data)
+{
+	return wq_send_message_delayed(msg, msg_data, 0);
+}
+
+/**
+ * Description: this function verifies all conditions to enable hdcp
+ *
+ * Returns:	true if hdcp can be enabled else false
+ */
+static bool hdcp_enable_condition_ready(void)
+{
+	if (hdcp_context != NULL &&
+	    hdcp_context->can_authenticate == true &&
+	    hdcp_context->is_required == true &&
+	    hdcp_context->suspend == false &&
+	    hdcp_context->hpd == true &&
+	    hdcp_context->display_power_on == true)
+		return true;
+
+	if (hdcp_context == NULL) {
+		pr_err("hdcp: hdcp_context is NULL\n");
+	} else {
+		pr_debug("hdcp: condition not ready, required %d, hpd %d\n",
+			hdcp_context->is_required, hdcp_context->hpd);
+	}
+
+	return false;
+}
+
+/**
+ * Description: this function reads data from downstream i2c hdcp device
+ *
+ * @offset	offset address on hdcp device
+ * @buffer	buffer to store data
+ * @size	size of buffer to be read
+ *
+ * Returns:	true on success else false
+ */
+static int hdcp_ddc_read(uint8_t offset, uint8_t *buffer, int size)
+{
+	if (hdcp_enable_condition_ready() == true ||
+		(hdcp_context->sink_query == true && 
+		 offset == HDCP_RX_BKSV_ADDR))
+		return hdcp_context->ddc_read_write(true,
+			HDCP_PRIMARY_I2C_ADDR, offset, buffer, size);
+	return false;
+}
+
+/**
+ * Description: this function writes data to downstream i2c hdcp device
+ *
+ * @offset	offset address on hdcp device
+ * @buffer	data to be written
+ * @size	size of data to be written
+ *
+ * Returns:	true on success else false
+ */
+static int hdcp_ddc_write(uint8_t offset, uint8_t *buffer, int size)
+{
+	if (hdcp_enable_condition_ready() == true)
+		return hdcp_context->ddc_read_write(false,
+			HDCP_PRIMARY_I2C_ADDR, offset, buffer, size);
+	return false;
+}
+
+/**
+ * Description: this function converts ksv byte buffer into 64 bit number
+ *
+ * @ksv		ksv value
+ * @size	size of the ksv
+ *
+ * Returns:	64bit value of the ksv
+ */
+static uint64_t hdcp_ksv_64val_conv(uint8_t *ksv, uint32_t size)
+{
+	int i = 0;
+	uint64_t ksv64 = 0;
+	if (ksv != NULL && size == HDCP_KSV_SIZE) {
+		for (i = 0; i < 5; i++)
+			ksv64 |= (ksv[i] << (i*8));
+	}
+	return ksv64;
+}
+
+/**
+ * Description: this function validates a ksv value
+ *		1. 20 1's & 20 o's
+ *		2. SRM check: check for revoked keys
+ *
+ * @ksv		ksv value
+ * @size	size of the ksv
+ *
+ * Returns:	true if valid else false
+ */
+static bool hdcp_validate_ksv(uint8_t *ksv, uint32_t size)
+{
+	int i = 0, count = 0;
+	uint8_t temp = 0;
+	uint64_t ksv64 = hdcp_ksv_64val_conv(ksv, size);
+	bool ret = false;
+	if (ksv != NULL  && size == HDCP_KSV_SIZE) {
+		count = 0;
+		for (i = 0; i < 5; i++) {
+			temp = ksv[i];
+			while (temp) {
+				temp &= (temp-1);
+				count++;
+			}
+		}
+		if (count == HDCP_KSV_HAMMING_WT)
+			ret = true;
+	}
+	if (ret) {
+		/* SRM Check ? */
+		if (hdcp_context->vrl != NULL) {
+			const uint64_t *vrl = hdcp_context->vrl;
+			for (i = 0; i < hdcp_context->vrl_count; i++, vrl++) {
+				if (ksv64 == *vrl)
+					return true;
+			}
+		}
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads aksv from local hdcp tx device
+ *
+ * @aksv	buffer to store aksv
+ * @size	size of the aksv buffer
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_get_aksv(uint8_t *aksv, uint32_t size)
+{
+	bool ret = false;
+	if (ipil_hdcp_get_aksv(aksv, HDCP_KSV_SIZE) == true) {
+		if (hdcp_validate_ksv(aksv, size) == true)
+			ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads bksv from downstream device
+ *
+ * @bksv	buffer to store bksv
+ * @size	size of the bksv buffer
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_bksv(uint8_t *bksv, uint32_t size)
+{
+	bool ret = false;
+	if (bksv != NULL  && size == HDCP_KSV_SIZE) {
+		if (hdcp_ddc_read(HDCP_RX_BKSV_ADDR,
+				bksv, HDCP_KSV_SIZE) == true) {
+			if (hdcp_validate_ksv(bksv, size) == true)
+				ret = true;
+		}
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads bcaps from downstream device
+ *
+ * @bcaps	buffer to store bcaps
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_bcaps(uint8_t *bcaps)
+{
+	bool ret = false;
+	if (bcaps != NULL) {
+		if (hdcp_ddc_read(HDCP_RX_BCAPS_ADDR,
+				bcaps, HDCP_RX_BCAPS_SIZE) == true)
+			ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads bstatus from downstream device
+ *
+ * @bstatus	buffer to store bstatus
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_bstatus(uint16_t *bstatus)
+{
+	bool ret = false;
+	if (bstatus != NULL) {
+		if (hdcp_ddc_read(HDCP_RX_BSTATUS_ADDR,
+			(uint8_t *)bstatus, HDCP_RX_BSTATUS_SIZE) == true)
+			ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads ri from downstream device
+ *
+ * @rx_ri	buffer to store ri
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_rx_ri(uint16_t *rx_ri)
+{
+	bool ret = false;
+	if (rx_ri != NULL) {
+		if (hdcp_ddc_read(HDCP_RX_RI_ADDR,
+				(uint8_t *)rx_ri, HDCP_RI_SIZE) == true)
+			ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads r0 from downstream device
+ *
+ * @rx_r0	buffer to store r0
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_rx_r0(uint16_t *rx_r0)
+{
+	return hdcp_read_rx_ri(rx_r0);
+}
+
+/**
+ * Description: this function reads all ksv's from downstream repeater
+ *
+ * @ksv_list	buffer to store ksv list
+ * @size	size of the ksv_list to read into the buffer
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_rx_ksv_list(uint8_t *ksv_list, uint32_t size)
+{
+	bool ret = false;
+	if (ksv_list != NULL && size) {
+		if (hdcp_ddc_read(HDCP_RX_KSV_FIFO_ADDR,
+		    ksv_list, size) == true) {
+			ret = true;
+		}
+	}
+	return ret;
+}
+
+/**
+ * Description: this function reads sha1 value from downstream device
+ *
+ * @v		buffer to store the sha1 value
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_read_rx_v(uint8_t *v)
+{
+	bool ret = false;
+	uint8_t *buf = v;
+	uint8_t offset = HDCP_RX_V_H0_ADDR;
+
+	if (v != NULL) {
+		for (; offset <= HDCP_RX_V_H4_ADDR; offset += 4) {
+			if (hdcp_ddc_read(offset, buf, 4) == false) {
+				pr_debug("hdcp: read rx v failure\n");
+				break;
+			}
+			buf += 4;
+		}
+		if (offset > HDCP_RX_V_H4_ADDR)
+			ret = true;
+	}
+	return ret;
+}
+
+/**
+ * Description: this function performs ri match
+ *
+ * Returns:	true on match else false
+ */
+static bool hdcp_stage3_ri_check(void)
+{
+	uint16_t rx_ri = 0;
+
+	if (hdcp_enable_condition_ready() == false ||
+	    hdcp_context->is_phase1_enabled == false)
+		return false;
+
+#ifdef OTM_HDCP_DEBUG_MODULE
+	if (module_force_ri_mismatch) {
+		pr_debug("hdcp: force Ri mismatch\n");
+		module_force_ri_mismatch = false;
+		return false;
+	}
+#endif
+
+	if (hdcp_read_rx_ri(&rx_ri) == true) {
+		if (ipil_hdcp_does_ri_match(rx_ri) == true)
+			/* pr_debug("hdcp: Ri Matches %04x\n", rx_ri);*/
+			return true;
+
+		/* If first Ri check fails,we re-check it after ri_retry (msec).
+		 * This is because some receivers do not immediately have valid
+		 * Ri' at frame 128.
+		 * */
+		pr_debug("re-check Ri after %d (msec)\n",
+				hdcp_context->ri_retry);
+
+		msleep(hdcp_context->ri_retry);
+		if (hdcp_read_rx_ri(&rx_ri) == true)
+			if (ipil_hdcp_does_ri_match(rx_ri) == true)
+				return true;
+	}
+
+	/* ri check failed update phase3 status */
+	hdcp_context->is_phase3_valid = false;
+
+	pr_debug("hdcp: error!!!  Ri check failed %x\n", rx_ri);
+	return false;
+}
+
+/**
+ * Description: this function sends an aksv to downstream device
+ *
+ * @an		AN value to send
+ * @an_size	size of an
+ * @aksv	AKSV value to send
+ * @aksv_size	size of aksv
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_send_an_aksv(uint8_t *an, uint8_t an_size,
+			uint8_t *aksv, uint8_t aksv_size)
+{
+	bool ret = false;
+	if (an != NULL && an_size == HDCP_AN_SIZE &&
+	   aksv != NULL  && aksv_size == HDCP_KSV_SIZE) {
+		if (hdcp_ddc_write(HDCP_RX_AN_ADDR, an, HDCP_AN_SIZE) ==
+			true) {
+			/* wait 20ms for i2c write for An to complete */
+			/* msleep(20); */
+			if (hdcp_ddc_write(HDCP_RX_AKSV_ADDR, aksv,
+					HDCP_KSV_SIZE) == true)
+				ret = true;
+		}
+	}
+	return ret;
+}
+
+/**
+ * Description: this function resets hdcp state machine to initial state
+ *
+ * Returns:	none
+ */
+static void hdcp_reset(void)
+{
+	pr_debug("hdcp: reset\n");
+
+	/* blank TV screen */
+	ipil_enable_planes_on_pipe(1, false);
+
+	/* Stop HDCP */
+	if (hdcp_context->is_phase1_enabled == true ||
+	    hdcp_context->force_reset == true) {
+		pr_debug("hdcp: off state\n");
+		ipil_hdcp_disable();
+		hdcp_context->force_reset = false;
+	}
+
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	/* this flag will be re-enabled by upper layers */
+	hdcp_context->is_required = false;
+#endif
+	hdcp_context->is_phase1_enabled = false;
+	hdcp_context->is_phase2_enabled = false;
+	hdcp_context->is_phase3_valid   = false;
+	hdcp_context->prev_ri_frm_cnt_status = 0;
+}
+
+/**
+ * Description: this function schedules repeater authentication
+ *
+ * @first	whether its first time schedule or not, delay for check is
+ *		varied between first and subsequent times
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_rep_check(bool first)
+{
+	int msg = HDCP_REPEATER_CHECK;
+	int delay = (first) ? 50 : 100;
+	unsigned int *auth_id = kmalloc(sizeof(unsigned int), GFP_KERNEL);
+	if (auth_id != NULL) {
+		*auth_id = hdcp_context->auth_id;
+		return wq_send_message_delayed(msg, (void *)auth_id, delay);
+	} else
+		pr_debug("hdcp: %s failed to alloc mem\n", __func__);
+	return false;
+}
+
+/**
+ * Description: this function creates a watch dog timer for repeater auth
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_rep_watch_dog(void)
+{
+	int msg = HDCP_REPEATER_WDT_EXPIRED;
+	unsigned int *auth_id = kmalloc(sizeof(unsigned int), GFP_KERNEL);
+	if (auth_id != NULL) {
+		*auth_id = hdcp_context->auth_id;
+		/* set a watch dog timer for 5.2 secs, added additional
+		   0.2 seconds to be safe */
+		return wq_send_message_delayed(msg, (void *)auth_id, 5200);
+	} else
+		pr_debug("hdcp: %s failed to alloc mem\n", __func__);
+	return false;
+}
+
+/**
+ * Description: this function initiates repeater authentication
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_initiate_rep_auth(void)
+{
+	pr_debug("hdcp: initiating repeater check\n");
+	hdcp_context->wdt_expired = false;
+	if (hdcp_rep_check(true) == true) {
+		if (hdcp_rep_watch_dog() == true)
+			return true;
+		else
+			pr_debug("hdcp: failed to start repeater wdt\n");
+	} else
+		pr_debug("hdcp: failed to start repeater check\n");
+	return false;
+}
+
+/**
+ * Description:	this function schedules ri check
+ *
+ * @first_check	whether its the first time, interval is varied if first time
+ *
+ * Returns:	true on success else false
+ */
+static bool hdcp_stage3_schedule_ri_check(bool first_check)
+{
+	int msg = HDCP_RI_CHECK;
+	unsigned int *msg_data = kmalloc(sizeof(unsigned int), GFP_KERNEL);
+	/* Do the first check immediately while adding some randomness  */
+	int ri_check_interval = (first_check) ? (20 + (jiffies % 10)) :
+				hdcp_context->ri_check_interval;
+	if (msg_data != NULL) {
+		*msg_data = hdcp_context->auth_id;
+		return wq_send_message_delayed(msg, (void *)msg_data,
+						ri_check_interval);
+	}
+	return false;
+}
+
+/**
+ * Description: this function performs 1st stage HDCP authentication i.e.
+ *		exchanging keys and r0 match
+ *
+ * @is_repeater	variable to return type of downstream device, i.e. repeater
+ *		or not
+ *
+ * Returns:	true on successful authentication else false
+ */
+static bool hdcp_stage1_authentication(bool *is_repeater)
+{
+	uint8_t bksv[HDCP_KSV_SIZE], aksv[HDCP_KSV_SIZE], an[HDCP_AN_SIZE];
+	struct hdcp_rx_bstatus_t bstatus;
+	struct hdcp_rx_bcaps_t bcaps;
+	uint8_t retry = 0;
+	uint16_t rx_r0 = 0;
+
+	/* Wait (up to 2s) for HDMI sink to be in HDMI mode */
+	retry = 40;
+	if (hdcp_context->hdmi) {
+		while (retry--) {
+			if (hdcp_read_bstatus(&bstatus.value) == false) {
+				if (hdcp_context->bstatus_read) {
+					hdcp_context->bstatus_read = false;
+					pr_err("hdcp: failed to read bstatus\n");
+				} else {
+					pr_debug("hdcp: failed to read bstatus\n");
+				}
+				return false;
+			}
+			if (bstatus.hdmi_mode)
+				break;
+			msleep(50);
+			pr_debug("hdcp: waiting for sink to be in HDMI mode\n");
+		}
+	}
+
+	if (retry == 0)
+		pr_err("hdcp: sink is not in HDMI mode\n");
+
+	pr_debug("hdcp: bstatus: %04x\n", bstatus.value);
+
+	if (!hdcp_context->bstatus_read) {
+		hdcp_context->bstatus_read = true;
+		pr_info("hdcp: read bstatus successfully\n");
+	}
+
+	/* Read BKSV */
+	if (hdcp_read_bksv(bksv, HDCP_KSV_SIZE) == false) {
+		pr_err("hdcp: failed to read bksv\n");
+		return false;
+	}
+	pr_debug("hdcp: bksv: %02x%02x%02x%02x%02x\n",
+		bksv[0], bksv[1], bksv[2], bksv[3], bksv[4]);
+
+	/* Read An */
+	if (ipil_hdcp_get_an(an, HDCP_AN_SIZE) == false) {
+		pr_err("hdcp: failed to get an\n");
+		return false;
+	}
+	pr_debug("hdcp: an: %02x%02x%02x%02x%02x%02x%02x%02x\n",
+		an[0], an[1], an[2], an[3], an[4], an[5], an[6], an[7]);
+
+	/* Read AKSV */
+	if (hdcp_get_aksv(aksv, HDCP_KSV_SIZE) == false) {
+		pr_err("hdcp: failed to get aksv\n");
+		return false;
+	}
+	pr_debug("hdcp: aksv: %02x%02x%02x%02x%02x\n",
+			aksv[0], aksv[1], aksv[2], aksv[3], aksv[4]);
+
+	/* Write An AKSV to Downstream Rx */
+	if (hdcp_send_an_aksv(an, HDCP_AN_SIZE, aksv, HDCP_KSV_SIZE)
+						== false) {
+		pr_err("hdcp: failed to send an and aksv\n");
+		return false;
+	}
+	pr_debug("hdcp: sent an aksv\n");
+
+	/* Read BKSV */
+	if (hdcp_read_bksv(bksv, HDCP_KSV_SIZE) == false) {
+		pr_err("hdcp: failed to read bksv\n");
+		return false;
+	}
+	pr_debug("hdcp: bksv: %02x%02x%02x%02x%02x\n",
+			bksv[0], bksv[1], bksv[2], bksv[3], bksv[4]);
+
+	/* Read BCAPS */
+	if (hdcp_read_bcaps(&bcaps.value) == false) {
+		pr_err("hdcp: failed to read bcaps\n");
+		return false;
+	}
+	pr_debug("hdcp: bcaps: %x\n", bcaps.value);
+
+
+	/* Update repeater present status */
+	*is_repeater = bcaps.is_repeater;
+
+	/* Set Repeater Bit */
+	if (ipil_hdcp_set_repeater(bcaps.is_repeater) == false) {
+		pr_err("hdcp: failed to set repeater bit\n");
+		return false;
+	}
+
+	/* Write BKSV to Self (hdcp tx) */
+	if (ipil_hdcp_set_bksv(bksv) == false) {
+		pr_err("hdcp: failed to write bksv to self\n");
+		return false;
+	}
+
+	pr_debug("hdcp: set repeater & bksv\n");
+
+	/* Start Authentication i.e. computations using hdcp keys */
+	if (ipil_hdcp_start_authentication() == false) {
+		pr_err("hdcp: failed to start authentication\n");
+		return false;
+	}
+
+	pr_debug("hdcp: auth started\n");
+
+	/* Wait for 120ms before reading R0' */
+	msleep(120);
+
+	/* Check if R0 Ready in hdcp tx */
+	retry = 20;
+	do {
+		if (ipil_hdcp_is_r0_ready() == true)
+			break;
+		msleep(5);
+		retry--;
+	} while (retry);
+
+	if (retry == 0 && ipil_hdcp_is_r0_ready() == false) {
+		pr_err("hdcp: R0 is not ready\n");
+		return false;
+	}
+
+	pr_debug("hdcp: tx_r0 ready\n");
+
+	/* Read Ro' from Receiver hdcp rx */
+	if (hdcp_read_rx_r0(&rx_r0) == false) {
+		pr_err("hdcp: failed to read R0 from receiver\n");
+		return false;
+	}
+
+	pr_debug("hdcp: rx_r0 = %04x\n", rx_r0);
+
+	/* Check if R0 Matches */
+	if (ipil_hdcp_does_ri_match(rx_r0) == false) {
+		pr_err("hdcp: R0 does not match\n");
+		return false;
+	}
+	pr_debug("hdcp: R0 matched\n");
+
+	/* Enable Encryption & Check status */
+	if (ipil_hdcp_enable_encryption() == false) {
+		pr_err("hdcp: failed to enable encryption\n");
+		return false;
+	}
+	pr_debug("hdcp: encryption enabled\n");
+
+	hdcp_context->is_phase1_enabled = true;
+
+	return true;
+}
+
+/**
+ * Description: this function performs repeater authentication i.e. 2nd
+ *		stage HDCP authentication
+ *
+ * Returns:	true if repeater authentication is in progress or succesful
+ *		else false. If in progress repeater authentication would be
+ *		rescheduled
+ */
+static bool hdcp_stage2_repeater_authentication(void)
+{
+	uint8_t *rep_ksv_list = NULL;
+	uint32_t rep_prime_v[HDCP_V_H_SIZE] = {0};
+	struct hdcp_rx_bstatus_t bstatus;
+	struct hdcp_rx_bcaps_t bcaps;
+	bool ret = false;
+
+	/* Repeater Authentication */
+	if (hdcp_enable_condition_ready() == false ||
+	    hdcp_context->is_phase1_enabled == false ||
+	    hdcp_context->wdt_expired == true) {
+		pr_debug("hdcp: stage2 auth condition not ready\n");
+		return false;
+	}
+
+	/* Read BCAPS */
+	if (hdcp_read_bcaps(&bcaps.value) == false)
+		return false;
+
+	if (!bcaps.is_repeater)
+		return false;
+
+	/* Check if fifo ready */
+	if (!bcaps.ksv_fifo_ready) {
+		/* not ready: reschedule but return true */
+		pr_debug("hdcp: rescheduling repeater auth\n");
+		hdcp_rep_check(false);
+		return true;
+	}
+
+	/* Read BSTATUS */
+	if (hdcp_read_bstatus(&bstatus.value) == false)
+		return false;
+
+	/* Check validity of repeater depth & device count */
+	if (bstatus.max_devs_exceeded)
+		return false;
+
+	if (bstatus.max_cascade_exceeded)
+		return false;
+
+	if (0 == bstatus.device_count)
+		return true;
+
+	if (bstatus.device_count > HDCP_MAX_DEVICES)
+		return false;
+
+	/* allocate memory for ksv_list */
+	rep_ksv_list = kzalloc(bstatus.device_count * HDCP_KSV_SIZE,
+				GFP_KERNEL);
+	if (!rep_ksv_list) {
+		pr_debug("hdcp: rep ksv list alloc failure\n");
+		return false;
+	}
+
+	/* Read ksv list from repeater */
+	if (hdcp_read_rx_ksv_list(rep_ksv_list,
+				  bstatus.device_count * HDCP_KSV_SIZE)
+				  == false) {
+		pr_debug("hdcp: rep ksv list read failure\n");
+		goto exit;
+	}
+
+	/* TODO: SRM check */
+
+	/* Compute tx sha1 (V) */
+	if (ipil_hdcp_compute_tx_v(rep_ksv_list, bstatus.device_count,
+				   bstatus.value) == false) {
+		pr_debug("hdcp: rep compute tx v failure\n");
+		goto exit;
+	}
+
+	/* Read rx sha1 (V') */
+	if (hdcp_read_rx_v((uint8_t *)rep_prime_v) == false) {
+		pr_debug("hdcp: rep read rx v failure\n");
+		goto exit;
+	}
+
+	/* Verify SHA1 tx(V) = rx(V') */
+	if (ipil_hdcp_compare_v(rep_prime_v) == false) {
+		pr_debug("hdcp: rep compare v failure\n");
+		goto exit;
+	}
+
+	pr_debug("hdcp: repeater auth success\n");
+	hdcp_context->is_phase2_enabled = true;
+	ret = true;
+
+exit:
+	kfree(rep_ksv_list);
+	return ret;
+}
+
+/**
+ * Description: Main function that initiates all stages of HDCP authentication
+ *
+ * Returns:	true on succesful authentication else false
+ */
+static bool hdcp_start(void)
+{
+	bool is_repeater = false;
+
+	/* Make sure TMDS is available
+	 * Remove this delay since HWC already has the delay
+	 */
+	/* msleep(hdcp_context->hdcp_delay); */
+
+	pr_debug("hdcp: start\n");
+
+	/* Increment Auth Check Counter */
+	hdcp_context->auth_id++;
+
+	/* blank TV screen */
+	ipil_enable_planes_on_pipe(1, false);
+
+	/* Check HDCP Status */
+	if (ipil_hdcp_is_ready() == false) {
+		pr_err("hdcp: hdcp is not ready\n");
+		return false;
+	}
+
+	/* start 1st stage of hdcp authentication */
+	if (hdcp_stage1_authentication(&is_repeater) == false) {
+		pr_debug("hdcp: stage 1 authentication fails\n");
+		return false;
+	}
+
+	/* un-blank TV screen */
+	ipil_enable_planes_on_pipe(1, true);
+
+
+	pr_debug("hdcp: initial authentication completed, repeater:%d\n",
+		is_repeater);
+
+	/* Branch Repeater Mode Authentication */
+	if (is_repeater == true)
+		if (hdcp_initiate_rep_auth() == false)
+			return false;
+
+	/* Initiate phase3_valid with true status */
+	hdcp_context->is_phase3_valid = true;
+	/* Branch Periodic Ri Check */
+	pr_debug("hdcp: starting periodic Ri check\n");
+
+	/* Schedule Ri check after 2 sec*/
+	if (hdcp_stage3_schedule_ri_check(false) == false) {
+		pr_err("hdcp: fail to schedule Ri check\n");
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * Description: verify conditions necessary for re-authentication and
+ *		enable HDCP if favourable
+ *
+ * Returns:	none
+ */
+static void hdcp_retry_enable(void)
+{
+	int msg = HDCP_ENABLE;
+	if (hdcp_enable_condition_ready() == true &&
+		hdcp_context->is_phase1_enabled == false &&
+		hdcp_context->auto_retry == true) {
+		wq_send_message_delayed(msg, NULL, 30);
+		pr_debug("hdcp: retry enable\n");
+	}
+}
+
+/* Based on hardware Ri frame count, adjust ri_check_interval.
+ * Also, make sure Ri check happens right after Ri frame count
+ * becomes multiples of 128.
+ *  */
+static bool hdcp_ri_check_reschedule(void)
+{
+	#define RI_FRAME_WAIT_LIMIT 150
+
+	struct hdcp_context_t *ctx = hdcp_context;
+	uint32_t prev_ri_frm_cnt_status = ctx->prev_ri_frm_cnt_status;
+	uint8_t  ri_frm_cnt_status;
+	int32_t  ri_frm_cnt;
+	int32_t  adj;  /* Adjustment of ri_check_interval in msec */
+	uint32_t cnt_ri_wait = 0;
+	bool     ret = false;
+
+
+	/* Query hardware Ri frame counter.
+	 * This value is used to adjust ri_check_interval
+	 * */
+	ipil_hdcp_get_ri_frame_count(&ri_frm_cnt_status);
+
+	/* (frm_cnt_ri - prev_frm_cnt_ri) is expected to be 128. If not,
+	 * we have to compensate the time difference, which is caused by async
+	 * behavior of CPU clock, scheduler and HDMI clock. If hardware can
+	 * provide interrupt signal for Ri check, then this compensation work
+	 * can be avoided.
+	 * Hardcode "256" is because hardware Ri frame counter is 8 bits.
+	 * Hardcode "128" is based on HDCP spec.
+	* */
+	ri_frm_cnt = ri_frm_cnt_status >= prev_ri_frm_cnt_status      ?
+		ri_frm_cnt_status - prev_ri_frm_cnt_status       :
+		256 - prev_ri_frm_cnt_status + ri_frm_cnt_status;
+	pr_debug("current ri_frm_cnt = %d, previous ri_frm_cnt = %d\n",
+			  ri_frm_cnt_status, prev_ri_frm_cnt_status);
+
+	/* Compute adjustment of ri_check_interval*/
+	adj = (128 - ri_frm_cnt) * hdcp_context->video_refresh_interval;
+
+	/* Adjust ri_check_interval */
+	/* adj<0:  Ri check speed is slower than HDMI clock speed
+	 * adj>0:  Ri check speed is faster than HDMI clock speed
+	 * */
+	pr_debug("adjustment of ri_check_interval  = %d (ms)\n", adj);
+	ctx->ri_check_interval += adj;
+	if (ctx->ri_check_interval > ctx->ri_check_interval_upper)
+		ctx->ri_check_interval = ctx->ri_check_interval_upper;
+
+	if (ctx->ri_check_interval < ctx->ri_check_interval_lower)
+		ctx->ri_check_interval = ctx->ri_check_interval_lower;
+
+	pr_debug("ri_check_interval=%d(ms)\n", ctx->ri_check_interval);
+
+	/* Update prev_ri_frm_cnt_status*/
+	hdcp_context->prev_ri_frm_cnt_status = ri_frm_cnt_status;
+
+	/* Queue next Ri check task with new ri_check_interval*/
+	ret = hdcp_stage3_schedule_ri_check(false);
+	if (!ret)
+		goto exit;
+
+	/* Now, check if ri_frm_cnt_status is multiples of 128.
+	 * If we are too fast, wait for frame 128 (or a few frames after
+	 * frame 128) to happen to make sure Ri' is ready.
+	 * Why using hardcode "64"? : if ri_frm_cnt_status is close to
+	 * multiples of 128 (ie, ri_frm_cnt_status % 128 > 64), we keep waiting.
+	 * Otherwise if ri_frm_cnt_status just passes 128
+	 * (ie, ri_frm_cnt_status % 128 < 64) we continue.
+	 * Note the assumption here is this thread is scheduled to run at least
+	 * once in one 64-frame period.
+	 *
+	 * RI_FRAME_WAIT_LIMIT is in case HW stops updating ri frame
+	 * count and causes infinite looping.
+	*/
+	while ((ri_frm_cnt_status % 128 >= 64) &&
+			(cnt_ri_wait < RI_FRAME_WAIT_LIMIT)) {
+		msleep(hdcp_context->video_refresh_interval);
+		ipil_hdcp_get_ri_frame_count(&ri_frm_cnt_status);
+		cnt_ri_wait++;
+		pr_debug("current Ri frame count = %d\n", ri_frm_cnt_status);
+	}
+
+	if (RI_FRAME_WAIT_LIMIT == cnt_ri_wait) {
+		ret = false;
+		goto exit;
+	}
+
+	/* Match Ri with Ri'*/
+	ret = hdcp_stage3_ri_check();
+
+exit:
+	return ret;
+}
+
+/**
+ * Description: workqueue event handler to execute all hdcp tasks
+ *
+ * @work	work assigned from workqueue contains the task to be handled
+ *
+ * Returns:	none
+ */
+static void hdcp_task_event_handler(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct hdcp_wq_struct_t *hwq = container_of(delayed_work,
+						struct hdcp_wq_struct_t,
+						dwork);
+	int msg = 0;
+	void *msg_data = NULL;
+	bool reset_hdcp = false;
+	struct hdcp_context_t *ctx = hdcp_context;
+
+	if (hwq != NULL) {
+		msg = hwq->msg;
+		msg_data = hwq->msg_data;
+	}
+
+	if (hdcp_context == NULL || hwq == NULL)
+		goto EXIT_HDCP_HANDLER;
+
+	if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+				OSPM_UHB_FORCE_POWER_ON)) {
+		pr_err("Unable to power on display island!");
+		goto EXIT_HDCP_HANDLER;
+	}
+
+	switch (msg) {
+	case HDCP_ENABLE:
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+		if (hdcp_enable_condition_ready() == false) {
+			/* if enable condition is not ready have to return
+			 * from hdcp_enable function immediately */
+			reset_hdcp = true;
+			break;
+		}
+#endif
+		if (hdcp_enable_condition_ready() == true &&
+			hdcp_context->is_phase1_enabled == false &&
+			hdcp_start() == false) {
+			reset_hdcp = true;
+			hdcp_context->force_reset = true;
+			pr_debug("hdcp: failed to start hdcp\n");
+		}
+		break;
+
+	case HDCP_RI_CHECK:
+		/*pr_debug("hdcp: RI CHECK\n");*/
+		if (msg_data == NULL ||
+			*(unsigned int *)msg_data != hdcp_context->auth_id)
+			/*pr_debug("hdcp: auth count %d mismatch %d\n",
+				*(unsigned int *)msg_data,
+				hdcp_context->auth_id);*/
+			break;
+
+		/* Do phase 3 only if phase 1 was successful*/
+		if (hdcp_context->is_phase1_enabled == false)
+			break;
+
+		if (hdcp_ri_check_reschedule() == false)
+			reset_hdcp = true;
+		break;
+
+	case HDCP_REPEATER_CHECK:
+		pr_debug("hdcp: repeater check\n");
+		if (msg_data == NULL ||
+			*(unsigned int *)msg_data != hdcp_context->auth_id)
+			/*pr_debug("hdcp: auth count %d mismatch %d\n",
+				*(unsigned int *)msg_data,
+				hdcp_context->auth_id);*/
+			break;
+
+		if (hdcp_stage2_repeater_authentication() == false)
+			reset_hdcp = true;
+		break;
+
+	case HDCP_REPEATER_WDT_EXPIRED:
+		if (msg_data != NULL && *(unsigned int *)msg_data ==
+				hdcp_context->auth_id) {
+			/*pr_debug("hdcp: reapter wdt expired, "
+				    "auth_id = %d, msg_data = %d\n",
+				    hdcp_context->auth_id,
+				    *(unsigned int *)msg_data);*/
+
+			hdcp_context->wdt_expired = true;
+			if(!hdcp_context->is_phase2_enabled &&
+				hdcp_context->is_phase1_enabled)
+				reset_hdcp = true;
+		}
+		break;
+
+	case HDCP_RESET:
+		hdcp_reset();
+		break;
+
+	case HDCP_SET_POWER_SAVE_STATUS:/* handle suspend resume */
+		/* ignore suspend state if HPD is low */
+		if (msg_data != NULL && hdcp_context->hpd == true) {
+			hdcp_context->suspend = *((bool *)msg_data);
+			pr_debug("hdcp: suspend = %d\n",
+					hdcp_context->suspend);
+			if (hdcp_context->suspend == true) {
+				if (hdcp_context->is_phase1_enabled
+				    == true)
+					reset_hdcp = true;
+			}
+		}
+		break;
+
+	case HDCP_SET_HPD_STATUS:/* handle hpd status */
+		if (msg_data != NULL) {
+			hdcp_context->hpd = *((bool *)msg_data);
+			pr_debug("hdcp: hpd = %d\n",
+					hdcp_context->hpd);
+			if (hdcp_context->hpd == false) {
+				/* reset suspend state if HPD is Low */
+				hdcp_context->suspend = false;
+				reset_hdcp = true;
+			}
+		}
+		break;
+
+	case HDCP_SET_DPMS_STATUS:/* handle display_power_on status */
+		if (msg_data != NULL) {
+			hdcp_context->display_power_on =
+					*((bool *)msg_data);
+			pr_debug("hdcp: display_power_on = %d\n",
+					hdcp_context->display_power_on);
+			if (hdcp_context->display_power_on == false)
+				reset_hdcp = true;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	if (reset_hdcp == true) {
+		msg = HDCP_RESET;
+		wq_send_message(msg, NULL);
+	} else
+		/* if disabled retry HDCP authentication */
+		hdcp_retry_enable();
+
+	/* Update security component of hdmi and hdcp status */
+	if ((ctx->is_phase1_enabled != ctx->previous_phase1_status) ||
+			(ctx->hpd != ctx->previous_hpd)) {
+		ctx->previous_phase1_status = ctx->is_phase1_enabled;
+		ctx->previous_hpd           = ctx->hpd;
+
+		otm_hdmi_update_security_hdmi_hdcp_status(
+				ctx->is_phase1_enabled, ctx->hpd);
+	}
+	ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+
+EXIT_HDCP_HANDLER:
+	if (msg_data != NULL)
+		kfree(msg_data);
+	if (hwq != NULL)
+		kfree(hwq);
+
+	return;
+}
+
+/**
+ * Description: function to update HPD status
+ *
+ * @hdmi_context handle hdmi_context
+ * @hpd		 HPD high/low status
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_set_hpd_state(hdmi_context_t *hdmi_context,
+					bool hpd)
+{
+	int msg = HDCP_SET_HPD_STATUS;
+	bool *p_hpd = NULL;
+	hpd = !!(hpd);
+
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+	if (hdcp_context->hpd != hpd) {
+		p_hpd = kmalloc(sizeof(bool), GFP_KERNEL);
+		if (p_hpd != NULL) {
+			*p_hpd = hpd;
+			return wq_send_message(msg, (void *)p_hpd);
+		} else
+			pr_debug("hdcp: %s failed to alloc mem\n", __func__);
+	}
+
+	return false;
+}
+
+/**
+ * Description: function to update power save (suspend/resume) status
+ *
+ * @hdmi_context handle hdmi_context
+ * @suspend	 suspend/resume status
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_set_power_save(hdmi_context_t *hdmi_context,
+					bool suspend)
+{
+	int msg = HDCP_SET_POWER_SAVE_STATUS;
+	bool *p_suspend = NULL;
+
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+	if (hdcp_context->suspend != suspend) {
+		p_suspend = kmalloc(sizeof(bool), GFP_KERNEL);
+		if (p_suspend != NULL) {
+			*p_suspend = suspend;
+			return wq_send_message(msg, (void *)p_suspend);
+		} else
+			pr_debug("hdcp: %s failed to alloc mem\n", __func__);
+		if (suspend == true)
+			/* Cleanup WorkQueue */
+			flush_workqueue(hdcp_context->hdcp_wq);
+	}
+
+	return false;
+}
+
+/**
+ * Description: function to update display_power_on status
+ *
+ * @hdmi_context handle hdmi_context
+ * @display_power_on	display power on/off status
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_set_dpms(hdmi_context_t *hdmi_context,
+			bool display_power_on)
+{
+#ifdef OTM_HDMI_HDCP_ALWAYS_ENC
+	int msg = HDCP_SET_DPMS_STATUS;
+	bool *p_display_power_on = NULL;
+	display_power_on = !!(display_power_on);
+#endif
+
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	return true;
+#else
+	if (hdcp_context->display_power_on != display_power_on) {
+		p_display_power_on = kmalloc(sizeof(bool), GFP_KERNEL);
+		if (p_display_power_on != NULL) {
+			*p_display_power_on = display_power_on;
+			return wq_send_message(msg, (void *)p_display_power_on);
+		} else
+			pr_debug("hdcp: %s failed to alloc mem\n", __func__);
+		if (display_power_on == false)
+			/* Cleanup WorkQueue */
+
+			flush_workqueue(hdcp_context->hdcp_wq);
+	}
+	return false;
+#endif
+}
+
+/**
+ * Description: Function to check HDCP encryption status
+ *
+ * @hdmi_context handle hdmi_context
+ *
+ * Returns:	true if encrypting
+ *		else false
+ */
+bool otm_hdmi_hdcp_enc_status(hdmi_context_t *hdmi_context)
+{
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+	if (hdcp_context->is_required && hdcp_context->is_phase1_enabled)
+		return true;
+#endif
+	return false;	
+}
+
+/**
+ * Description: Function to check HDCP Phase3 Link status
+ *
+ * @hdmi_context handle hdmi_context
+ *
+ * Returns:	true if link is verified Ri Matches
+ *		else false
+ */
+bool otm_hdmi_hdcp_link_status(hdmi_context_t *hdmi_context)
+{
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+	if (hdcp_context->is_phase3_valid)
+		return true;
+#endif
+	return false;	
+}
+
+/**
+ * Description: Function to read BKSV and validate it
+ *
+ * @hdmi_context handle hdmi_context
+ * @bksv	 buffer to store bksv
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_read_validate_bksv(hdmi_context_t *hdmi_context,
+				uint8_t *bksv)
+{
+	bool ret = false;
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	if (hdmi_context == NULL || hdcp_context == NULL || bksv == NULL)
+		return false;
+
+	if(hdcp_context->hpd ==true &&
+		hdcp_context->suspend == false &&
+		hdcp_context->display_power_on == true &&
+		hdcp_context->is_required == false &&
+		hdcp_context->is_phase1_enabled == false) {
+		hdcp_context->sink_query = true;
+		ret = hdcp_read_bksv(bksv, HDCP_KSV_SIZE);
+		hdcp_context->sink_query = false;
+	}
+#endif
+	return ret;
+}
+
+/**
+ * Description: function to enable HDCP
+ *
+ * @hdmi_context handle hdmi_context
+ * @refresh_rate vertical refresh rate of the video mode
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_enable(hdmi_context_t *hdmi_context,
+				int refresh_rate)
+{
+	int                  msg = HDCP_ENABLE;
+	otm_hdmi_attribute_t hdmi_attr;
+	otm_hdmi_ret_t       rc;
+
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+	if (hdcp_context->is_required == true) {
+		pr_debug("hdcp: already enabled\n");
+		return true;
+	}
+#ifdef OTM_HDCP_DEBUG_MODULE
+	if (module_disable_hdcp) {
+		pr_debug("hdcp: disabled by module\n");
+		return false;
+	}
+#endif
+
+	hdcp_context->is_required = true;
+
+	/* compute ri check interval based on refresh rate */
+	if (refresh_rate) {
+		/*compute msec time for 1 frame*/
+		hdcp_context->video_refresh_interval = 1000 / refresh_rate;
+
+		/* compute msec time for 128 frames per HDCP spec */
+		hdcp_context->ri_check_interval = ((128 * 1000) / refresh_rate);
+	} else {
+		/*compute msec time for 1 frame, assuming refresh rate of 60*/
+		hdcp_context->video_refresh_interval = 1000 / 60;
+
+		/* default to 128 frames @ 60 Hz */
+		hdcp_context->ri_check_interval      = ((128 * 1000) / 60);
+	}
+
+	/* Set upper and lower bounds for ri_check_interval to
+	 *  avoid dynamic adjustment to go wild.
+	 *  Set adjustment range to 100ms, which is safe if HZ <=100.
+	*/
+	hdcp_context->ri_check_interval_lower =
+			hdcp_context->ri_check_interval - 100;
+	hdcp_context->ri_check_interval_upper =
+			hdcp_context->ri_check_interval + 100;
+
+	/* Init prev_ri_frm_cnt_status*/
+	hdcp_context->prev_ri_frm_cnt_status = 0;
+
+	/* Set ri_retry
+	* Default to interval of 3 frames if can not read
+	* OTM_HDMI_ATTR_ID_HDCP_RI_RETRY */
+	rc = otm_hdmi_get_attribute(hdmi_context,
+				OTM_HDMI_ATTR_ID_HDCP_RI_RETRY,
+				&hdmi_attr, false);
+
+	hdcp_context->ri_retry = (OTM_HDMI_SUCCESS == rc) ?
+				 hdmi_attr.content._uint.value :
+				 3 * hdcp_context->video_refresh_interval;
+
+	hdcp_context->hdmi = otm_hdmi_is_monitor_hdmi(hdmi_context);
+
+	pr_debug("hdcp: schedule HDCP enable\n");
+
+#ifdef OTM_HDMI_HDCP_ALWAYS_ENC
+	return wq_send_message(msg, NULL);
+#else
+	/* send message and wait for 1st stage authentication to complete */
+	if (wq_send_message(msg, NULL)) {
+		/* on any failure is_required flag will be reset */
+		while (hdcp_context->is_required) {
+			/* wait for phase1 to be enabled before
+			 * returning from this function */
+			if(hdcp_context->is_phase1_enabled)
+				return true;
+			msleep(1);
+		}
+	}
+
+	return false;
+#endif
+}
+
+/**
+ * Description: function to disable HDCP
+ *
+ * @hdmi_context handle hdmi_context
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_disable(hdmi_context_t *hdmi_context)
+{
+	int msg = HDCP_RESET;
+
+	if (hdmi_context == NULL || hdcp_context == NULL)
+		return false;
+
+	if (hdcp_context->is_required == false) {
+		pr_debug("hdcp: already disabled\n");
+		return true;
+	}
+
+	/* send reset message */
+	wq_send_message(msg, NULL);
+
+	/* Cleanup WorkQueue */
+	/*flush_workqueue(hdcp_context->hdcp_wq);*/
+
+	/* Wait until hdcp is disabled.
+	 * No need to wait for workqueue flushed since it may block for 2sec
+	 * */
+	while (hdcp_context->is_phase1_enabled)
+		msleep(1);
+
+	hdcp_context->is_required = false;
+
+	pr_debug("hdcp: disable\n");
+
+	return true;
+}
+
+/**
+ * Description: hdcp init function
+ *
+ * @hdmi_context handle hdmi_context
+ * @ddc_rd_wr:	pointer to ddc read write function
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_init(hdmi_context_t *hdmi_context,
+	int (*ddc_rd_wr)(bool, uint8_t, uint8_t, uint8_t *, int))
+{
+	otm_hdmi_attribute_t hdmi_attr;
+	otm_hdmi_ret_t       rc;
+
+	if (hdmi_context == NULL ||
+	    ddc_rd_wr == NULL ||
+	    ipil_hdcp_device_can_authenticate() == false ||
+	    hdcp_context != NULL) {
+		pr_debug("hdcp: init error!!! parameters\n");
+		return false;
+	}
+
+	/* allocate hdcp context */
+	hdcp_context = kmalloc(sizeof(struct hdcp_context_t), GFP_KERNEL);
+
+	/* Create hdcp workqueue to handle all hdcp tasks.
+	 * To avoid multiple threads created for multi-core CPU (eg CTP)
+	 * use create_singlethread_workqueue.
+	 * */
+	if (hdcp_context != NULL) {
+		hdcp_context->hdcp_wq =
+				create_singlethread_workqueue("HDCP_WQ");
+	}
+
+	if (hdcp_context == NULL || hdcp_context->hdcp_wq == NULL) {
+		pr_debug("hdcp: init error!!! allocation\n");
+		goto EXIT_INIT;
+	}
+
+	/* initialize hdcp context and hence hdcp state machine */
+	hdcp_context->is_required	= false;
+	hdcp_context->is_phase1_enabled	= false;
+	hdcp_context->is_phase2_enabled	= false;
+	hdcp_context->is_phase3_valid	= false;
+	hdcp_context->suspend		= false;
+	hdcp_context->hpd		= false;
+#ifndef OTM_HDMI_HDCP_ALWAYS_ENC
+	hdcp_context->display_power_on	= true;
+	hdcp_context->auto_retry	= false;
+#else
+	hdcp_context->display_power_on	= false;
+	hdcp_context->auto_retry	= true;
+#endif
+	hdcp_context->wdt_expired	= false;
+	hdcp_context->sink_query	= false;
+	hdcp_context->can_authenticate	= true;
+	hdcp_context->current_srm_ver	= 0u;
+	hdcp_context->vrl		= NULL;
+	hdcp_context->vrl_count		= 0u;
+	hdcp_context->ri_check_interval	= 0u;
+	hdcp_context->force_reset	= false;
+	hdcp_context->auth_id		= 0;
+
+	hdcp_context->previous_hpd = false;
+	hdcp_context->previous_phase1_status = false;
+
+	/* store i2c function pointer used for ddc read/write */
+	hdcp_context->ddc_read_write = ddc_rd_wr;
+	hdcp_context->bstatus_read = true;
+
+	/* Find hdcp delay
+	 * If attribute not set, default to 200ms
+	 */
+	rc = otm_hdmi_get_attribute(hdmi_context,
+				OTM_HDMI_ATTR_ID_HDCP_DELAY,
+				&hdmi_attr, false);
+
+	hdcp_context->hdcp_delay = (rc == OTM_HDMI_SUCCESS) ?
+			hdmi_attr.content._uint.value :
+			200;
+
+	/* perform any hardware initializations */
+	if (ipil_hdcp_init() == true) {
+		pr_debug("hdcp: initialized\n");
+		return true;
+	}
+
+EXIT_INIT:
+	/* Cleanup and exit */
+	if (hdcp_context != NULL) {
+		if (hdcp_context->hdcp_wq != NULL)
+			destroy_workqueue(hdcp_context->hdcp_wq);
+		kfree(hdcp_context);
+		hdcp_context = NULL;
+	}
+
+	return false;
+}
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdmi_internal.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdmi_internal.h
new file mode 100644
index 0000000..da2878a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdmi_internal.h
@@ -0,0 +1,297 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _OTM_HDMI_INTERNAL_H
+#define _OTM_HDMI_INTERNAL_H
+
+
+#include <linux/time.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "otm_hdmi_types.h"
+#include "otm_hdmi_defs.h"
+
+#include "edid.h"
+#include "hdmi_hal.h"
+
+/**
+ * Defines, shortcuts and globals
+ */
+
+extern otm_hdmi_attribute_t
+	otm_hdmi_attributes_table[OTM_HDMI_MAX_SUPPORTED_ATTRIBUTES];
+
+#define ATTRS otm_hdmi_attributes_table
+
+/* Note: this macro should **only** be used when time difference is less than
+ * 4s */
+#define TIME_DIFF(tv2, tv1) ((((tv2).tv_sec - (tv1).tv_sec) * 1000000) + \
+			     ((tv2).tv_usec - (tv1).tv_usec))
+
+#define GPIO_MIN 0
+#define GPIO_MAX 28
+
+#define I2C_HW_TIMEOUT TIME_MS(200)
+#define I2C_SW_TIMEOUT 500
+
+#define CURRENT_DMA_READ_DESCRIPTOR(ai) \
+(((hdmi_dma_descriptor_t *) (ai)->dscr_buf_addr_v)[(ai)->dscr_current_r])
+
+#define NUM_ENTRIES_IN(set) (sizeof(set) / sizeof(*set))
+
+#define ALL_SF (OTM_HDMI_AUDIO_FS_192_KHZ   | \
+		OTM_HDMI_AUDIO_FS_176_4_KHZ | \
+		OTM_HDMI_AUDIO_FS_96_KHZ    | \
+		OTM_HDMI_AUDIO_FS_88_2_KHZ  | \
+		OTM_HDMI_AUDIO_FS_48_KHZ    | \
+		OTM_HDMI_AUDIO_FS_44_1_KHZ  | \
+		OTM_HDMI_AUDIO_FS_32_KHZ)
+
+#define ALL_SS (OTM_HDMI_AUDIO_SS_16 | \
+		OTM_HDMI_AUDIO_SS_20 | \
+		OTM_HDMI_AUDIO_SS_24)
+
+#define DECLARE_AUDIO_CAP(_fmt, _nch, _fs, _ss) \
+	{ .format = _fmt, .max_channels = _nch, .fs = _fs, .ss_bitrate = _ss }
+
+/**
+ * Interrupts grouped by use
+ */
+#define HDMI_INTERRUPTS (HDMI_INTERRUPT_I2C_BUS_ERROR	 | \
+			 HDMI_INTERRUPT_I2C_BUFFER_FULL       | \
+			 HDMI_INTERRUPT_I2C_TRANSACTION_DONE  | \
+			 HDMI_INTERRUPT_HDCP_KEYS_READY       | \
+			 HDMI_INTERRUPT_HDCP_RI	       | \
+			 HDMI_INTERRUPT_HDCP_PI	       | \
+			 HDMI_INTERRUPT_HDCP_FRAME	    | \
+			 HDMI_INTERRUPT_HDCP_M0	       | \
+			 HDMI_INTERRUPT_HDCP_R0	       | \
+			 HDMI_INTERRUPT_AUDIO_FIFO_UNDERFLOW  | \
+			 HDMI_INTERRUPT_DMA_SRC_COMPLETE      | \
+			 HDMI_INTERRUPT_HOTPLUG_DETECT)	/* Enable HPD */
+
+/**
+ * Infoframe slot aliases
+ */
+enum {
+	PACKET_SLOT_AVI = 0,
+	PACKET_SLOT_AUDIO = 1,
+	PACKET_SLOT_GENERIC_0 = 2,
+	PACKET_SLOT_GENERIC_1 = 3,
+};
+
+/**
+ * Infoframe Transmission Frequency
+ */
+enum {
+	HDMI_DIP_SEND_ONCE = 0,
+	HDMI_DIP_SEND_EVERY_VSYNC = 1,
+	HDMI_DIP_SEND_ATLEAST_EVERY_OTHER_VSYNC = 2,
+};
+
+/**
+ * Supported packets
+ */
+typedef enum {
+	HDMI_PACKET_NULL = 0x00,
+	HDMI_PACKET_ACP = 0x04,
+	HDMI_PACKET_ISRC1 = 0x05,
+	HDMI_PACKET_ISRC2 = 0x06,
+	HDMI_PACKET_GAMUT = 0x0A,
+	HDMI_PACKET_VS = 0x81,
+	HDMI_PACKET_AVI = 0x82,
+	HDMI_PACKET_SPD = 0x83,
+} hdmi_packet_type_t;
+
+/**
+ * Packet management info
+ */
+typedef struct {
+	otm_hdmi_packet_t packet;
+	bool int_use;
+} packet_info_t;
+
+/**
+ * HDMI AV mute source
+ */
+typedef enum {
+	MUTE_SOURCE_HDCP = 0x01,
+	MUTE_SOURCE_APP = 0x02,
+} mute_source_t;
+
+/**
+ * This structure represents typical "enumerator - value" pair
+ */
+typedef struct {
+	int e;
+	int v;
+} ev_t;
+
+/**
+ * Audio setup information
+ */
+typedef struct {
+	unsigned int dscr_buf_addr;	/* DMA descriptors buffer physical
+					   address */
+	void *dscr_buf_addr_v;		/* DMA descriptors buffer virtual
+					   address */
+	unsigned int dscr_buf_size;	/* DMA descriptors buffer size */
+	unsigned int dscr_current_w;	/* index of current write descriptor */
+	unsigned int dscr_current_r;	/* index of current read descriptor */
+	bool playback;		/* playback status */
+	bool prebuffer;		/* prebuffering status */
+
+	otm_hdmi_audio_fmt_t fmt;	/* format */
+	otm_hdmi_audio_fs_t fs;		/* sampling frequency */
+	unsigned int nch;		/* number of channels */
+	otm_hdmi_audio_ss_t ss;		/* sample size */
+	unsigned int map;		/* speaker allocation map */
+	unsigned int chst[2];		/* channel status info */
+	bool hbr;		/* HBR vs non-HBR transmission mode */
+	otm_hdmi_audio_fs_t fs_adj;	/* audio frame rate */
+} audio_info_t;
+
+/**
+ * HDMI context definition
+ */
+typedef struct {
+	hdmi_device_t dev;	/* hdmi hal handle */
+	void *io_address;	/* address of mapped io region */
+	unsigned int io_length;	/* size of io region */
+
+	edid_info_t edid_ext;	/* external EDID structure */
+	edid_info_t edid_int;	/* internal EDID structure */
+	char edid_raw[MAX_EDID_BLOCKS*SEGMENT_SIZE];	 /* raw EDID data */
+
+	otm_hdmi_timing_t mode;	/* current mode */
+	bool mode_set;	/* mode switch completion indicator */
+	bool hdmi;	/* active transmission type */
+	bool is_connected; /* indicate whether cable is connected */
+
+	unsigned int irq_number;	/* IRQ number */
+	void *hpd_callback;	/* hot plug call back */
+	void *hpd_data;         /* Hotplug user data to be passed back in callback */
+	struct workqueue_struct *post_resume_wq;
+	struct work_struct post_resume_work;     /* Used to perform any
+						  * notification after
+						  * resuming from deep sleep
+						  * without HDMI interrupts.
+						  */
+
+	struct mutex modes_sema;	/* protecting modes table sharing */
+	struct mutex exec_sema;	/* to sync pd entries execution */
+	struct mutex hpd_sema;	/* semaphore to sync hot plug sensetive data */
+	struct mutex srv_sema;	/* semaphore to sync service and main threads */
+	struct mutex mute_sema;	/* to sync av mute operations */
+
+	bool phy_status;/* current HW PHY status */
+	bool dtv;	/* TX vs DTV indicator */
+	bool dc;	/* Deep Color enable indicator */
+
+	struct timeval hal_timer;	/* HAL polling timer */
+
+	packet_info_t pi_0;	/* data to be sent via 1st available slot */
+	packet_info_t pi_1;	/* data to be sent via 2nd available slot */
+
+	unsigned int gpio;	/* GPIO ID for I2C workaround */
+	int n_modes_tx;		/* number of static modes */
+	int n_modes_ref;	/* number of reference modes */
+
+	audio_info_t audio_info;	/* hdmi audio unit information */
+	mute_source_t mute_source;	/* current mute sources list */
+
+	otm_hdmi_phy_info_t phy_info;	/* Current PHY electrical properties */
+	int scaling_type; /* scaling type for HDMI display */
+	int gpio_hpd_pin; /* GPIO pin number of HDMI hotplug detection (in) */
+	int gpio_ls_en_pin; /* GPIO pin number for EDID level shifter (out) */
+	bool is_connected_overridden; /* indicate overridden cable state, set by external mean*/
+	bool override_cable_state; /*if true, cable state is indicated by is_connected_overriden;
+							if false, cable state is indicated by is_connected*/
+	bool islands_powered_on; /* HDMI power island status */
+
+} hdmi_context_t;
+
+/* mapping structures between pil and ipil */
+typedef	otm_hdmi_timing_t ipil_timings_t;
+
+/* HDMI attributes setup routine */
+typedef otm_hdmi_ret_t(*pd_attr_declare_t)(otm_hdmi_attribute_t *table,
+					otm_hdmi_attribute_id_t id,
+					otm_hdmi_attribute_type_t type,
+					otm_hdmi_attribute_flag_t flags,
+					char *name,
+					void *value,
+					unsigned int min,
+					unsigned int max);
+
+otm_hdmi_ret_t otm_hdmi_declare_attributes(pd_attr_declare_t declare,
+					pd_attr_get_name_t get_name);
+
+otm_hdmi_ret_t hdmi_timing_add_twin_entries(edid_info_t *edid,
+					    otm_hdmi_refresh_t src,
+					    otm_hdmi_refresh_t dst);
+
+void hdmi_timing_edid_to_vdc(otm_hdmi_timing_t *t);
+
+#endif /* _OTM_HDMI_INTERNAL_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdmi_timings.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdmi_timings.h
new file mode 100644
index 0000000..97b8d6e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/hdmi_timings.h
@@ -0,0 +1,121 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license.  When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2006-2011 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+      Intel Corporation
+      2200 Mission College Blvd.
+      Santa Clara, CA  97052
+
+ BSD LICENSE
+
+ Copyright(c) 2006-2011 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+   - Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+   - Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in
+     the documentation and/or other materials provided with the
+     distribution.
+   - Neither the name of Intel Corporation nor the names of its
+     contributors may be used to endorse or promote products derived
+     from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#ifndef __HDMI_TIMINGS_H__
+#define __HDMI_TIMINGS_H__
+
+#include "otm_hdmi.h"
+
+extern const otm_hdmi_timing_t MODE_640x480p5994_60;
+extern const otm_hdmi_timing_t MODE_720x480p5994_60;
+extern const otm_hdmi_timing_t MODE_720x480p5994_60__16by9;
+extern const otm_hdmi_timing_t MODE_1280x720p5994_60;
+extern const otm_hdmi_timing_t MODE_1920x1080i5994_60;
+extern const otm_hdmi_timing_t MODE_1920x1080i5994_60__FP;
+extern const otm_hdmi_timing_t MODE_720_1440x480i5994_60;
+extern const otm_hdmi_timing_t MODE_720_1440x480i5994_60__16by9;
+extern const otm_hdmi_timing_t MODE_1920x1080p5994_60;
+extern const otm_hdmi_timing_t MODE_720x576p50;
+extern const otm_hdmi_timing_t MODE_720x576p50__16by9;
+extern const otm_hdmi_timing_t MODE_1280x720p50;
+extern const otm_hdmi_timing_t MODE_1920x1080i50;
+extern const otm_hdmi_timing_t MODE_1920x1080i50__FP;
+extern const otm_hdmi_timing_t MODE_720_1440x576i50;
+extern const otm_hdmi_timing_t MODE_720_1440x576i50__16by9;
+extern const otm_hdmi_timing_t MODE_1920x1080p50;
+extern const otm_hdmi_timing_t MODE_1920x1080p24;
+extern const otm_hdmi_timing_t MODE_1920x1080p25;
+extern const otm_hdmi_timing_t MODE_1920x1080p30;
+extern const otm_hdmi_timing_t MODE_1920x1080p30__FP2;
+extern const otm_hdmi_timing_t MODE_1920x1080p30__FP;
+extern const otm_hdmi_timing_t MODE_1920x1080p30__TBH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p48;
+extern const otm_hdmi_timing_t MODE_1920x1080p24__FP2;
+extern const otm_hdmi_timing_t MODE_1920x1080p24__FP;
+extern const otm_hdmi_timing_t MODE_1280x720p5994_60__FP2;
+extern const otm_hdmi_timing_t MODE_1280x720p5994_60__FP;
+extern const otm_hdmi_timing_t MODE_1280x720p50__FP2;
+extern const otm_hdmi_timing_t MODE_1280x720p50__FP;
+extern const otm_hdmi_timing_t MODE_1280x720p5994_60__SBSH2;
+extern const otm_hdmi_timing_t MODE_1280x720p50__SBSH2;
+extern const otm_hdmi_timing_t MODE_1920x1080i5994_60__SBSH2;
+extern const otm_hdmi_timing_t MODE_1920x1080i50__SBSH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p5994_60__SBSH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p50__SBSH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p24__SBSH2;
+extern const otm_hdmi_timing_t MODE_1280x720p5994_60__TBH2;
+extern const otm_hdmi_timing_t MODE_1280x720p50__TBH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p5994_60__TBH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p50__TBH2;
+extern const otm_hdmi_timing_t MODE_1920x1080p24__TBH2;
+extern const otm_hdmi_timing_t MODE_1280x720p60__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1280x720p50__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1920x540p60__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1920x540p50__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_920x1080p60__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_920x1080p50__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1920x1080p30__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1920x1080p25__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1920x1080p24__PANEL_FS;
+extern const otm_hdmi_timing_t MODE_1920x1080p60__PANEL;
+extern const otm_hdmi_timing_t MODE_1920x1080p50__PANEL;
+extern const otm_hdmi_timing_t MODE_1920x1080p48__PANEL;
+#endif /* __HDMI_TIMINGS_H_ */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/infoframes.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/infoframes.c
new file mode 100644
index 0000000..2f86824
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/infoframes.c
@@ -0,0 +1,268 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+
+#include "hdmi_internal.h"
+#include "hdmi_timings.h"
+#include "infoframes_api.h"
+
+
+/**
+ * This enumeration represents possible color space settings found in GBD
+ * Current driver logic assumes we only support colorimetries that can be
+ * advertised in EDID: xvYCC601 and xvYCC709
+ */
+enum {
+	GBD_CS_ITU_BT709 = 0,
+	GBD_CS_XVYCC601 = 1,
+	GBD_CS_XVYCC709 = 2,
+	GBD_CS_XYZ = 3,
+	GBD_CS_RESERVED = 4,
+};
+
+static int __compute_check_sum(otm_hdmi_packet_t *packet)
+{
+	uint8_t i = 0;
+	uint8_t sum = 0;
+
+	for (i = 0; i < 3; i++)
+		sum += packet->header[i];
+	for (i = 1; i < 28; i++)
+		sum += packet->data[i];
+
+	packet->data[0] = (uint8_t)(0xFF - sum + 1);
+
+	return (int)packet->data[0];
+}
+
+/**
+ * Note: higher level ensures that input value is valid
+ */
+static int __pfconvert(otm_hdmi_output_pixel_format_t pf)
+{
+	int rc = 0;	/* init to RGB444 */
+
+	switch (pf) {
+	case OTM_HDMI_OPF_RGB444:
+		rc = 0;
+		break;
+	case OTM_HDMI_OPF_YUV422:
+		rc = 1;
+		break;
+	case OTM_HDMI_OPF_YUV444:
+		rc = 2;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+otm_hdmi_ret_t hdmi_packet_check_type(otm_hdmi_packet_t *p,
+					hdmi_packet_type_t type)
+{
+	return ((p && p->header[0] == type) ?
+		OTM_HDMI_SUCCESS : OTM_HDMI_ERR_FAILED);
+}
+
+/*
+ * Description: set avi infoframe based on mode
+ *
+ * @context:	hdmi_context
+ * @mode:	mode requested
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_infoframes_set_avi(void *context,
+					otm_hdmi_timing_t *mode)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	unsigned int type = HDMI_PACKET_AVI;
+	unsigned int freq = HDMI_DIP_SEND_EVERY_VSYNC;
+	int cs = GBD_CS_XVYCC601;
+	otm_hdmi_packet_t avi_pkt;
+	unsigned int pf, vic;
+	bool p, ext;
+	otm_hdmi_par_t par = PD_ATTR_UINT(ATTRS[OTM_HDMI_ATTR_ID_PAR]);
+
+	if (!context || !mode) {
+		pr_debug("\ninvalid arguments\n");
+		return OTM_HDMI_ERR_NULL_ARG;
+	}
+
+	/* Set header to AVI */
+	avi_pkt.header[0] = 0x82;
+	avi_pkt.header[1] = 0x02;
+	avi_pkt.header[2] = 0x0D;
+	/* Clear payload section */
+	memset(avi_pkt.data, 0, sizeof(avi_pkt.data));
+
+	/* RGB, Active Format Info valid, no bars */
+	/* use underscan as HDMI video is composed with all
+	 * active pixels and lines with or without border
+	 */
+	avi_pkt.data[1] = 0x12;
+	/* Set color component sample format */
+	pf = __pfconvert(PD_ATTR_UINT
+			 (ATTRS[OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT]));
+	avi_pkt.data[1] |= pf << 5;
+	/* Colorimetry */
+	ext = PD_ATTR_BOOL(ATTRS[OTM_HDMI_ATTR_ID_COLOR_SPACE_EXT]);
+	avi_pkt.data[2] =
+	    (ext ? 3 : (!pf ? 0 : ((mode->width <= 720) ? 0x01 : 0x02))) << 6;
+
+	/* Fill PAR for all supported modes
+	 * This is required for passing compliance tests
+	 */
+
+	if (mode->mode_info_flags & OTM_HDMI_PAR_16_9)
+		par = OTM_HDMI_PAR_16_9;
+	else if (mode->mode_info_flags & OTM_HDMI_PAR_4_3)
+		par = OTM_HDMI_PAR_4_3;
+
+	pr_debug("%s: Selecting PAR %d for mode vic %lu\n", __func__,
+		 par, mode->metadata);
+
+	avi_pkt.data[2] |= par << 4;
+	/* Fill FAR */
+	avi_pkt.data[2] |= PD_ATTR_UINT(ATTRS[OTM_HDMI_ATTR_ID_FAR]);
+
+	/* Get extended colorimetry from slot 1 */
+	if (hdmi_packet_check_type(&ctx->pi_1.packet, HDMI_PACKET_GAMUT) ==
+	    OTM_HDMI_SUCCESS)
+		cs = ctx->pi_1.packet.data[0] & 0x07;
+	/* Get extended colorimetry from slot 0 */
+	else if (hdmi_packet_check_type(&ctx->pi_0.packet, HDMI_PACKET_GAMUT) ==
+		 OTM_HDMI_SUCCESS)
+		cs = ctx->pi_0.packet.data[0] & 0x07;
+
+	/* Fill extended colorimetry */
+	avi_pkt.data[3] = ((cs == GBD_CS_XVYCC601) ? 0 : 1) << 4;
+
+	/* Fill quantization range */
+	if (ctx->edid_int.rgb_quant_selectable
+	    && PD_ATTR_UINT(ATTRS[OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT]) ==
+	    OTM_HDMI_OPF_RGB444)
+		avi_pkt.data[3] |=
+			PD_ATTR_BOOL(ATTRS[OTM_HDMI_ATTR_ID_OUTPUT_CLAMP]) ?
+			(0x01 << 2) : (0x02 << 2);
+
+	/* Only support RGB output, 640x480: full range Q0=1, Q1=0
+	* other timing: limited range Q0=0, Q1=1 */
+	avi_pkt.data[3] &= ~OTM_HDMI_COLOR_RANGE_MASK;
+	if (mode->width == 640 && mode->height == 480)
+		avi_pkt.data[3] |= 0x02 << 2;
+	else
+		avi_pkt.data[3] |= 0x01 << 2;
+
+	/* Fill Video Identification Code [adjust VIC according to PAR] */
+	vic = mode->metadata;
+	avi_pkt.data[AVI_VIC_LOC] = vic;
+
+	/* Fill pixel repetition value: 2x for 480i and 546i */
+	p = ((mode->mode_info_flags & PD_SCAN_INTERLACE) == 0);
+	avi_pkt.data[5] = ((mode->width == 720) && !p) ? 0x01 : 0x00;
+	/* Fill quantization range */
+	if (ctx->edid_int.ycc_quant_selectable
+	    && (PD_ATTR_UINT(ATTRS[OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT]) ==
+							OTM_HDMI_OPF_YUV444 ||
+		PD_ATTR_UINT(ATTRS[OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT]) ==
+							OTM_HDMI_OPF_YUV422))
+		avi_pkt.data[5] |=
+			PD_ATTR_BOOL(ATTRS[OTM_HDMI_ATTR_ID_OUTPUT_CLAMP]) ?
+			(0x00 << 6) : (0x01 << 6);
+
+	/* Compute and fill checksum */
+	avi_pkt.data[0] = __compute_check_sum(&avi_pkt);
+
+	/* Enable AVI infoframe */
+	rc = ipil_hdmi_enable_infoframe(&ctx->dev, type, &avi_pkt, freq);
+
+	return rc;
+}
+
+/*
+ * Description: disable all infoframes
+ *
+ * @context:        hdmi_context
+ *
+ * Returns:     OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *              OTM_HDMI_SUCCESS on success
+*/
+otm_hdmi_ret_t otm_hdmi_disable_all_infoframes(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	if (!ctx)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	return ipil_hdmi_disable_all_infoframes(&ctx->dev);
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/infoframes_api.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/infoframes_api.h
new file mode 100644
index 0000000..ea3429cc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/infoframes_api.h
@@ -0,0 +1,83 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _INFOFRAMES_API_H
+#define _INFOFRAMES_API_H
+
+#include "hdmi_internal.h"
+#include "otm_hdmi_defs.h"
+
+
+/*
+ * Description: hdmi_packet_check_type
+ *
+ * @p:			hdmi packet
+ * @type:               hdmi packet type
+ *
+ * Returns:     OTM_HDMI_SUCCESS on success
+ *              OTM_HDMI_ERR_FAILED on incorrect packet arguments
+ */
+extern otm_hdmi_ret_t hdmi_packet_check_type(otm_hdmi_packet_t *p,
+					hdmi_packet_type_t type);
+
+#endif /* _INFOFRAMES_API_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/mode_info.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/mode_info.c
new file mode 100644
index 0000000..5674e3e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/mode_info.c
@@ -0,0 +1,1151 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license.  When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2006-2011 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+      Intel Corporation
+      2200 Mission College Blvd.
+      Santa Clara, CA  97052
+
+ BSD LICENSE
+
+ Copyright(c) 2006-2011 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+   - Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+   - Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in
+     the documentation and/or other materials provided with the
+     distribution.
+   - Neither the name of Intel Corporation nor the names of its
+     contributors may be used to endorse or promote products derived
+     from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "hdmi_internal.h"
+#include "otm_hdmi_defs.h"
+#include "hdmi_timings.h"
+/*-----------------------------------------------------------------------------
+			480P TIMINGS
+-----------------------------------------------------------------------------*/
+#define TIMING_640x480p5994_60 \
+		640,		/* width	*/ \
+		480,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		25200,		/* clock	*/ \
+		800,		/* htotal	*/ \
+		640,		/* hblank start */ \
+		800,		/* hblank end	*/ \
+		656,		/* hsync start	*/ \
+		752,		/* hsync end	*/ \
+		525,		/* vtotal	*/ \
+		480,		/* vblank start */ \
+		525,		/* vblank end	*/ \
+		490,		/* vsync start	*/ \
+		492		/* vsync end	*/
+
+
+#define TIMING_720x480p5994_60 \
+		720,		/* width	*/ \
+		480,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		27027,		/* clock	*/ \
+		858,		/* htotal	*/ \
+		720,		/* hblank start */ \
+		858,		/* hblank end	*/ \
+		736,		/* hsync start	*/ \
+		798,		/* hsync end	*/ \
+		525,		/* vtotal	*/ \
+		480,		/* vblank start */ \
+		525,		/* vblank end	*/ \
+		489,		/* vsync start	*/ \
+		495		/* vsync end	*/
+
+/*-----------------------------------------------------------------------------
+			576P TIMINGS
+-----------------------------------------------------------------------------*/
+#define TIMING_720x576p50 \
+		720,		/* width	*/ \
+		576,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		27000,		/* clock	*/ \
+		864,		/* htotal	*/ \
+		720,		/* hblank start */ \
+		864,		/* hblank end	*/ \
+		732,		/* hsync start	*/ \
+		796,		/* hsync end	*/ \
+		625,		/* vtotal	*/ \
+		576,		/* vblank start */ \
+		625,		/* vblank end	*/ \
+		581,		/* vsync start	*/ \
+		586		/* vsync end	*/
+
+/*-----------------------------------------------------------------------------
+			720I TIMINGS
+-----------------------------------------------------------------------------*/
+#define TIMING_720_1440x480i5994_60 \
+		1440,		/* width	*/ \
+		240,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		27027,		/* clock	*/ \
+		1716,		/* htotal	*/ \
+		1440,		/* hblank start */ \
+		1716,		/* hblank end	*/ \
+		1478,		/* hsync start	*/ \
+		1602,		/* hsync end	*/ \
+		262,		/* vtotal	*/ \
+		240,		/* vblank start */ \
+		262,		/* vblank end	*/ \
+		244,		/* vsync start	*/ \
+		247		/* vsync end	*/
+
+#define TIMING_720_1440x576i50 \
+		1440,		/* width	*/ \
+		288,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		27000,		/* clock	*/ \
+		1728,		/* htotal	*/ \
+		1440,		/* hblank start */ \
+		1728,		/* hblank end	*/ \
+		1464,		/* hsync start	*/ \
+		1590,		/* hsync end	*/ \
+		312,		/* vtotal	*/ \
+		288,		/* vblank start */ \
+		312,		/* vblank end	*/ \
+		290,		/* vsync start	*/ \
+		293		/* vsync end	*/
+
+/*-----------------------------------------------------------------------------
+			720P TIMINGS
+-----------------------------------------------------------------------------*/
+#define TIMING_1280x720p50 \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		74250,		/* clock	*/ \
+		1980,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1980,		/* hblank end	*/ \
+		1720,		/* hsync start	*/ \
+		1760,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start */ \
+		750,		/* vblank end	*/ \
+		725,		/* vsync start	*/ \
+		730		/* vsync end	*/
+
+#define TIMING_1280x720p50_FP \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		148500,	 /* clock	*/ \
+		1980,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1980,		/* hblank end	*/ \
+		1720,		/* hsync start	*/ \
+		1760,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start */ \
+		750,		/* vblank end	*/ \
+		725,		/* vsync start	*/ \
+		730		/* vsync end	*/
+
+#define TIMING_1280x720p50_FP2 \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		148500,		/* clock	*/ \
+		1980,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1980,		/* hblank end	*/ \
+		1720,		/* hsync start	*/ \
+		1760,		/* hsync end	*/ \
+		1500,		/* vtotal	*/ \
+		1470,		/* vblank start */ \
+		1500,		/* vblank end	*/ \
+		1475,		/* vsync start	*/ \
+		1480		/* vsync end	*/
+
+#define TIMING_1280x720p50_FSEQ \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		148500,		/* clock	*/ \
+		1980,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1980,		/* hblank end	*/ \
+		1720,		/* hsync start	*/ \
+		1760,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start */ \
+		750,		/* vblank end	*/ \
+		725,		/* vsync start	*/ \
+		730		/* vsync end	*/
+
+#define TIMING_1280x720p5994_60 \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		74250,		/* clock	*/ \
+		1650,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1650,		/* hblank end	*/ \
+		1390,		/* hsync start	*/ \
+		1430,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start */ \
+		750,		/* vblank end	*/ \
+		725,		/* vsync start	*/ \
+		730		/* vsync end	*/
+
+#define TIMING_1280x720p5994_60_FP \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		148500,		/* clock	*/ \
+		1650,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1650,		/* hblank end	*/ \
+		1390,		/* hsync start	*/ \
+		1430,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start */ \
+		750,		/* vblank end	*/ \
+		725,		/* vsync start	*/ \
+		730		/* vsync end	*/
+
+#define TIMING_1280x720p5994_60_FP2 \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		148500,		/* clock	*/ \
+		1650,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1650,		/* hblank end	*/ \
+		1390,		/* hsync start	*/ \
+		1430,		/* hsync end	*/ \
+		1500,		/* vtotal	*/ \
+		1470,		/* vblank start */ \
+		1500,		/* vblank end	*/ \
+		1475,		/* vsync start	*/ \
+		1480		/* vsync end	*/
+
+#define TIMING_1280x720p5994_60_FSEQ \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		148500,	 /* clock	*/ \
+		1650,		/* htotal	*/ \
+		1280,		/* hblank start */ \
+		1650,		/* hblank end	*/ \
+		1390,		/* hsync start	*/ \
+		1430,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start */ \
+		750,		/* vblank end	*/ \
+		725,		/* vsync start	*/ \
+		730		/* vsync end	*/
+
+/*-----------------------------------------------------------------------------
+			1080I TIMINGS
+-----------------------------------------------------------------------------*/
+#define TIMING_1920x1080i50 \
+		1920,		/* width	*/ \
+		540,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		74250,		/* clock	*/ \
+		2640,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2640,		/* hblank end	*/ \
+		2448,		/* hsync start	*/ \
+		2492,		/* hsync end	*/ \
+		562,		/* vtotal	*/ \
+		540,		/* vblank start */ \
+		562,		/* vblank end	*/ \
+		542,		/* vsync start	*/ \
+		547		/* vsync end	*/
+
+#define TIMING_1920x1080i50_FP \
+		1920,		/* width	*/ \
+		540,		/* height	*/ \
+		OTM_HDMI_REFRESH_50, /* refresh rate */ \
+		148500,		/* clock	*/ \
+		2640,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2640,		/* hblank end	*/ \
+		2448,		/* hsync start	*/ \
+		2492,		/* hsync end	*/ \
+		562,		/* vtotal	*/ \
+		540,		/* vblank start */ \
+		562,		/* vblank end	*/ \
+		542,		/* vsync start	*/ \
+		547		/* vsync end	*/
+
+#define TIMING_1920x1080i5994_60 \
+		1920,		/* width	*/ \
+		540,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		74250,		/* clock	*/ \
+		2200,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2200,		/* hblank end	*/ \
+		2008,		/* hsync start	*/ \
+		2052,		/* hsync end	*/ \
+		562,		/* vtotal	*/ \
+		540,		/* vblank start */ \
+		562,		/* vblank end	*/ \
+		542,		/* vsync start	*/ \
+		547		/* vsync end	*/
+
+#define TIMING_1920x1080i5994_60_FP \
+		1920,		/* width	*/ \
+		540,		/* height	*/ \
+		OTM_HDMI_REFRESH_60, /* refresh rate */ \
+		148500,		/* clock	*/ \
+		2200,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2200,		/* hblank end	*/ \
+		2008,		/* hsync start	*/ \
+		2052,		/* hsync end	*/ \
+		562,		/* vtotal	*/ \
+		540,		/* vblank start */ \
+		562,		/* vblank end	*/ \
+		542,		/* vsync start	*/ \
+		547		/* vsync end	*/
+
+/*-----------------------------------------------------------------------------
+			1080P TIMINGS
+-----------------------------------------------------------------------------*/
+#define TIMING_1920x1080p24 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_24,	/* refresh	*/ \
+		74250,		/* clock	*/ \
+		2750,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2750,		/* hblank end	*/ \
+		2558,		/* hsync start	*/ \
+		2602,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p24_FP \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_24,	/* refresh	*/ \
+		148500,	 /* clock	*/ \
+		2750,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2750,		/* hblank end	*/ \
+		2558,		/* hsync start	*/ \
+		2602,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p24_FP2 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_24,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2750,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2750,		/* hblank end	*/ \
+		2558,		/* hsync start	*/ \
+		2602,		/* hsync end	*/ \
+		2250,		/* vtotal	*/ \
+		2205,		/* vblank start */ \
+		2250,		/* vblank end	*/ \
+		2209,		/* vsync start	*/ \
+		2214		/* vsync end	*/
+
+#define TIMING_1920x1080p24_FSEQ \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_24,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2750,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2750,		/* hblank end	*/ \
+		2558,		/* hsync start	*/ \
+		2602,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p25 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_25,	/* refresh	*/ \
+		74250,		/* clock	*/ \
+		2640,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2640,		/* hblank end	*/ \
+		2448,		/* hsync start	*/ \
+		2492,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p30 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_30,	/* refresh	*/ \
+		74250,		/* clock	*/ \
+		2200,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2200,		/* hblank end	*/ \
+		2008,		/* hsync start	*/ \
+		2052,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p30_FP \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_30,	/* refresh	*/ \
+		148500,	 /* clock	*/ \
+		2200,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2200,		/* hblank end	*/ \
+		2008,		/* hsync start	*/ \
+		2052,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p30_FP2 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_30,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2200,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2200,		/* hblank end	*/ \
+		2008,		/* hsync start	*/ \
+		2052,		/* hsync end	*/ \
+		2250,		/* vtotal	*/ \
+		2205,		/* vblank start */ \
+		2250,		/* vblank end	*/ \
+		2209,		/* vsync start	*/ \
+		2214		/* vsync end	*/
+
+#define TIMING_1920x1080p48 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_48,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2750,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2750,		/* hblank end	*/ \
+		2558,		/* hsync start	*/ \
+		2602,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* vblank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p50 \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_50,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2640,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2640,		/* hblank end	*/ \
+		2448,		/* hsync start	*/ \
+		2492,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* blank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+#define TIMING_1920x1080p5994_60	\
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_60,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2200,		/* htotal	*/ \
+		1920,		/* hblank start */ \
+		2200,		/* hblank end	*/ \
+		2008,		/* hsync start	*/ \
+		2052,		/* hsync end	*/ \
+		1125,		/* vtotal	*/ \
+		1080,		/* vblank start */ \
+		1125,		/* blank end	*/ \
+		1084,		/* vsync start	*/ \
+		1089		/* vsync end	*/
+
+/*-----------------------------------------------------------------------------
+			Panel TIMINGS
+-----------------------------------------------------------------------------*/
+
+#define TIMING_1920x1080p60_PANEL \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_60,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		1134,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1134,		/* vblank end	*/ \
+		1080 + 34,	/* vsync start */ \
+		1080 + 34 + 4	/* vsync end	*/
+
+#define TIMING_1920x1080p50_PANEL \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_50,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		1360,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1360,		/* vblank end	*/ \
+		1080 + 260,	/* vsync start*/ \
+		1080 + 260 + 4	/* vsync end	*/
+
+#define TIMING_1920x1080p48_PANEL \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_48,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		1417,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1417,		/* vblank end	*/ \
+		1080 + 317,	/* vsync start */ \
+		1080 + 317 + 4	/* vsync end	*/
+
+#define TIMING_1920x1080p30_PANEL_FS \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_30,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		1134,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1134,		/* vblank end	*/ \
+		1080 + 34,	/* vsync start */ \
+		1080 + 34 + 4	/* vsync end	*/
+
+#define TIMING_1920x1080p25_PANEL_FS \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_25,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		1360,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1360,		/* vblank end	*/ \
+		1080 + 260,	/* vsync start*/ \
+		1080 + 260 + 4	/* vsync end	*/
+
+#define TIMING_1920x1080p24_PANEL_FS \
+		1920,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_24,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		1417,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1417,		/* vblank end	*/ \
+		1080 + 317,	/* vsync start */ \
+		1080 + 317 + 4	/* vsync end	*/
+
+#define TIMING_960x1080p60_PANEL_FS \
+		960,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_60,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		1092,		/* htotal	*/ \
+		960,		/* hblank start*/ \
+		1092,		/* hblank end	*/ \
+		960 + 24,	/* hsync start */ \
+		960 + 24 + 32,	/* hsync end	*/ \
+		1134,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1134,		/* vblank end	*/ \
+		1080 + 34,	/* vsync start */ \
+		1080 + 34 + 4	/* vsync end	*/
+
+#define TIMING_960x1080p50_PANEL_FS \
+		960,		/* width	*/ \
+		1080,		/* height	*/ \
+		OTM_HDMI_REFRESH_50,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		1092,		/* htotal	*/ \
+		960,		/* hblank start*/ \
+		1092,		/* hblank end	*/ \
+		960 + 24,	/* hsync start */ \
+		960 + 24 + 32,	/* hsync end	*/ \
+		1360,		/* vtotal	*/ \
+		1080,		/* vblank start*/ \
+		1360,		/* vblank end	*/ \
+		1080 + 260,	/* vsync start */ \
+		1080 + 260 + 4	/* vsync end	*/
+
+#define TIMING_1920x540p60_PANEL_FS \
+		1920,		/* width	*/ \
+		540,		/* height	*/ \
+		OTM_HDMI_REFRESH_60,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		567,		/* vtotal	*/ \
+		540,		/* vblank start*/ \
+		567,		/* vblank end	*/ \
+		540 + 17,	/* vsync start */ \
+		540 + 17 + 4	/* vsync end	*/
+
+#define TIMING_1920x540p50_PANEL_FS \
+		1920,		/* width	*/ \
+		540,		/* height	*/ \
+		OTM_HDMI_REFRESH_50,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		2184,		/* htotal	*/ \
+		1920,		/* hblank start*/ \
+		2184,		/* hblank end	*/ \
+		1920 + 32,	/* hsync start */ \
+		1920 + 32 + 32, /* hsync end	*/ \
+		680,		/* vtotal	*/ \
+		540,		/* vblank start*/ \
+		680,		/* vblank end	*/ \
+		540 + 130,	/* vsync start */ \
+		540 + 130 + 4	/* vsync end	*/
+
+#define TIMING_1280x720p60_PANEL_FS \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_60,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		1650,		/* htotal	*/ \
+		1280,		/* hblank start*/ \
+		1650,		/* hblank end	*/ \
+		1390,		/* hsync start */ \
+		1422,		/* hsync end	*/ \
+		750,		/* vtotal	*/ \
+		720,		/* vblank start*/ \
+		750,		/* vblank end	*/ \
+		735,		/* vsync start */ \
+		739		/* vsync end	*/
+
+#define TIMING_1280x720p50_PANEL_FS \
+		1280,		/* width	*/ \
+		720,		/* height	*/ \
+		OTM_HDMI_REFRESH_50,	/* refresh	*/ \
+		148500,		/* clock	*/ \
+		1650,		/* htotal	*/ \
+		1280,		/* hblank start*/ \
+		1650,		/* hblank end	*/ \
+		1280 + 110,	/* hsync start */ \
+		1280 + 110 + 32,/* hsync end	*/ \
+		900,		/* vtotal	*/ \
+		720,		/* vblank start*/ \
+		900,		/* vblank end	*/ \
+		720 + 165,	/* vsync start */ \
+		720 + 165 + 4	/* vsync end	*/
+
+
+const otm_hdmi_timing_t MODE_640x480p5994_60 = {
+	TIMING_640x480p5994_60,
+	0,			/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	1			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720x480p5994_60 = {
+	TIMING_720x480p5994_60,
+	0,			/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	2			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720x480p5994_60__16by9 = {
+	TIMING_720x480p5994_60,
+	PD_AR_16_BY_9,		/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	3			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p5994_60 = {
+	TIMING_1280x720p5994_60,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	4			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080i5994_60 = {
+	TIMING_1920x1080i5994_60,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	5			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080i5994_60__FP = {
+	TIMING_1920x1080i5994_60_FP,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING,	/* stereo_type */
+	5				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720_1440x480i5994_60 = {
+	TIMING_720_1440x480i5994_60,
+	PD_SCAN_INTERLACE,	/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	6			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720_1440x480i5994_60__16by9 = {
+	TIMING_720_1440x480i5994_60,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	7			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p5994_60 = {
+	TIMING_1920x1080p5994_60,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	16			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720x576p50 = {
+	TIMING_720x576p50,
+	0,			/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	17			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720x576p50__16by9 = {
+	TIMING_720x576p50,
+	PD_AR_16_BY_9,		/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	18			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p50 = {
+	TIMING_1280x720p50,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	19			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080i50 = {
+	TIMING_1920x1080i50,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	20			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080i50__FP = {
+	TIMING_1920x1080i50_FP,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING,	/* stereo_type */
+	20				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720_1440x576i50 = {
+	TIMING_720_1440x576i50,
+	PD_SCAN_INTERLACE,	/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	21			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_720_1440x576i50__16by9 = {
+	TIMING_720_1440x576i50,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	22			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p50 = {
+	TIMING_1920x1080p50,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	31			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p24 = {
+	TIMING_1920x1080p24,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	32			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p25 = {
+	TIMING_1920x1080p25,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	33			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p30 = {
+	TIMING_1920x1080p30,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	34			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p30__FP2 = {
+	TIMING_1920x1080p30_FP2,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING_2,/* stereo_type */
+	34				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p30__FP = {
+	TIMING_1920x1080p30_FP,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING,	/* stereo_type */
+	34				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p30__TBH2 = {
+	TIMING_1920x1080p30,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,	/* stereo_type */
+	34					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p48 = {
+	TIMING_1920x1080p48,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	32			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p24__FP2 = {
+	TIMING_1920x1080p24_FP2,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING_2,/* stereo_type */
+	32				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p24__FP = {
+	TIMING_1920x1080p24_FP,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING,	/* stereo_type */
+	32				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p5994_60__FP2 = {
+	TIMING_1280x720p5994_60_FP2,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING_2,/* stereo_type */
+	4				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p5994_60__FP = {
+	TIMING_1280x720p5994_60_FP,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING,	/* stereo_type */
+	4				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p50__FP2 = {
+	TIMING_1280x720p50_FP2,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING_2,/* stereo_type */
+	19				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p50__FP = {
+	TIMING_1280x720p50_FP,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+					/* flags */
+	OTM_HDMI_STEREO_FRAME_PACKING,	/* stereo_type */
+	19				/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p5994_60__SBSH2 = {
+	TIMING_1280x720p5994_60,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	4					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p50__SBSH2 = {
+	TIMING_1280x720p50,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	19					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080i5994_60__SBSH2 = {
+	TIMING_1920x1080i5994_60,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	5					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080i50__SBSH2 = {
+	TIMING_1920x1080i50,
+	PD_SCAN_INTERLACE | PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	20					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p5994_60__SBSH2 = {
+	TIMING_1920x1080p5994_60,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	16					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p50__SBSH2 = {
+	TIMING_1920x1080p50,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	31					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p24__SBSH2 = {
+	TIMING_1920x1080p24,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,	/* stereo_type */
+	32					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p5994_60__TBH2 = {
+	TIMING_1280x720p5994_60,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,	/* stereo_type */
+	4					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p50__TBH2 = {
+	TIMING_1280x720p50,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,	/* stereo_type */
+	19					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p5994_60__TBH2 = {
+	TIMING_1920x1080p5994_60,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,	/* stereo_type */
+	16					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p50__TBH2 = {
+	TIMING_1920x1080p50,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,	/* stereo_type */
+	31					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p24__TBH2 = {
+	TIMING_1920x1080p24,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH,
+						/* flags */
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,	/* stereo_type */
+	32					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p60__PANEL_FS = {
+	TIMING_1280x720p60_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1280x720p50__PANEL_FS = {
+	TIMING_1280x720p50_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x540p60__PANEL_FS = {
+	TIMING_1920x540p60_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x540p50__PANEL_FS = {
+	TIMING_1920x540p50_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_920x1080p60__PANEL_FS = {
+	TIMING_960x1080p60_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_920x1080p50__PANEL_FS = {
+	TIMING_960x1080p50_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p30__PANEL_FS = {
+	TIMING_1920x1080p30_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p25__PANEL_FS = {
+	TIMING_1920x1080p25_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p24__PANEL_FS = {
+	TIMING_1920x1080p24_PANEL_FS,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+						/* flags */
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,	/* stereo_type */
+	0					/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p60__PANEL = {
+	TIMING_1920x1080p60_PANEL,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	0			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p50__PANEL = {
+	TIMING_1920x1080p50_PANEL,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	0			/* Metadata VIC */
+};
+
+const otm_hdmi_timing_t MODE_1920x1080p48__PANEL = {
+	TIMING_1920x1080p48_PANEL,
+	PD_AR_16_BY_9 | PD_HSYNC_HIGH | PD_VSYNC_HIGH | PD_DTV_MODE,
+				/* flags */
+	OTM_HDMI_STEREO_NONE,	/* stereo_type */
+	0			/* Metadata VIC */
+};
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/common/otm_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/otm_hdmi.c
new file mode 100644
index 0000000..7da2d7e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/common/otm_hdmi.c
@@ -0,0 +1,1914 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+
+#include <linux/time.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+
+#include "hdmi_internal.h"
+#include "hdmi_timings.h"
+#ifdef OTM_HDMI_HDCP_ENABLE
+#include "hdcp_api.h"
+#endif
+#include "edid.h"
+#include "ps_hdmi.h"
+#include "ips_hdmi.h"
+#include "infoframes_api.h"
+
+#define OTM_HDMI_NAME "OTM HDMI"
+
+/*
+ * Table of attributes
+ */
+otm_hdmi_attribute_t otm_hdmi_attributes_table
+		[OTM_HDMI_MAX_SUPPORTED_ATTRIBUTES];
+
+/* Placeholder for all TX supported modes */
+static const otm_hdmi_timing_t *g_video_modes[MAX_TIMINGS];
+/* Placeholder for all TX supported modes per CEA 861E spec used by EDID parser
+ */
+static const otm_hdmi_timing_t *g_video_modes_ref[MAX_TIMINGS];
+static otm_hdmi_ret_t otm_hdmi_attr_get_validate(otm_hdmi_attribute_id_t id);
+
+static otm_hdmi_ret_t __pd_attr_declare(otm_hdmi_attribute_t *table,
+				otm_hdmi_attribute_id_t id,
+				otm_hdmi_attribute_type_t type,
+				otm_hdmi_attribute_flag_t flags,
+				char *name,
+				void *value,
+				unsigned int min,
+				unsigned int max);
+
+static unsigned int g_gpio = GPIO_MIN;
+static unsigned int g_dtv;
+static unsigned int g_dc = 1;
+
+#define EDID_SIGNATURE 0x00FFFFFFFFFFFF00ull
+
+static hdmi_context_t *g_context;
+
+/* This table preserves the special timings for DTV models */
+static const otm_hdmi_timing_t *static_dtv_modes[] = {
+	&MODE_1920x1080p60__PANEL,
+	&MODE_1920x1080p50__PANEL,
+	&MODE_1920x1080p48__PANEL,
+	&MODE_1280x720p60__PANEL_FS,
+	&MODE_1280x720p50__PANEL_FS,
+	&MODE_1920x540p60__PANEL_FS,
+	&MODE_1920x540p50__PANEL_FS,
+	&MODE_920x1080p60__PANEL_FS,
+	&MODE_920x1080p50__PANEL_FS,
+	&MODE_1920x1080p30__PANEL_FS,
+	&MODE_1920x1080p25__PANEL_FS,
+	&MODE_1920x1080p24__PANEL_FS,
+};
+
+/* This table contains list of audio timings supported by Intel CE Media
+ * Processors and used in the situations when EDID is not available
+ *
+ * Note: Do *NOT* add declaration WMA in here as we dont have approval for that
+ */
+static otm_hdmi_audio_cap_t static_audio_modes[] = {
+	DECLARE_AUDIO_CAP(OTM_HDMI_AUDIO_FORMAT_PCM, 8, ALL_SF, ALL_SS),
+	DECLARE_AUDIO_CAP(OTM_HDMI_AUDIO_FORMAT_AC3, 8, ALL_SF, 640 / 8),
+	DECLARE_AUDIO_CAP(OTM_HDMI_AUDIO_FORMAT_DTS, 8, ALL_SF, 1536 / 8),
+	DECLARE_AUDIO_CAP(OTM_HDMI_AUDIO_FORMAT_DDP, 8, ALL_SF, 0),
+	DECLARE_AUDIO_CAP(OTM_HDMI_AUDIO_FORMAT_DTSHD, 8, ALL_SF, 0),
+	DECLARE_AUDIO_CAP(OTM_HDMI_AUDIO_FORMAT_MLP, 8, ALL_SF, 0),
+};
+
+/**
+ * This function called by edid_print tool internally
+ * @ctx		: hdmi context handle
+ * @edid		: edid information
+ *
+ * Returns nothing. Called by edid_print tool to print
+ * edid information to dmesg for debugging purposes
+ */
+static void __hdmi_report_edid(hdmi_context_t *ctx, edid_info_t *edid)
+{
+	int i = 0;
+
+	LOG_PRINT(LOG_LEVEL_HIGH, "----------------------\n");
+	LOG_PRINT(LOG_LEVEL_HIGH, "Name     : %s\n", edid->product_name);
+	LOG_PRINT(LOG_LEVEL_HIGH, "Year     : %d\n", edid->product_year);
+	LOG_PRINT(LOG_LEVEL_HIGH, "SN       : %d\n", edid->product_sn);
+	LOG_PRINT(LOG_LEVEL_HIGH, "Type     : %s\n",
+			edid->hdmi ? "HDMI" : "DVI");
+	LOG_PRINT(LOG_LEVEL_HIGH, "YCbCr444 : %s\n",
+			edid->ycbcr444 ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "YCbCr422 : %s\n",
+			edid->ycbcr422 ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "30 bpp   : %s\n",
+			edid->dc_30 ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "36 bpp   : %s\n",
+			edid->dc_36 ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "48 bpp   : %s\n",
+			edid->dc_48 ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "DC_YUV   : %s\n",
+			edid->dc_y444 ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "Max CLK  : %d\n",
+			edid->max_tmds_clock);
+	LOG_PRINT(LOG_LEVEL_HIGH, "Lip sync : %s\n",
+			edid->latency_present ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "ILip sync: %s\n",
+			edid->latency_int_present ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "Vid lat  : %d\n",
+			edid->latency_video);
+	LOG_PRINT(LOG_LEVEL_HIGH, "Aud lat  : %d\n",
+			edid->latency_audio);
+	LOG_PRINT(LOG_LEVEL_HIGH, "IVid lat : %d\n",
+			edid->latency_video_interlaced);
+	LOG_PRINT(LOG_LEVEL_HIGH, "IAud lat : %d\n",
+			edid->latency_audio_interlaced);
+	LOG_PRINT(LOG_LEVEL_HIGH, "HDMI VID : %s\n",
+			edid->hdmi_video_present ? "Y" : "N");
+	LOG_PRINT(LOG_LEVEL_HIGH, "HDMI 3D  : %s\n",
+			edid->enabled_3d ? "Y" : "N");
+
+	LOG_PRINT(LOG_LEVEL_HIGH, "SPA      : %d.%d.%d.%d\n",
+		  (edid->spa & 0xF000) >> 12,
+		  (edid->spa & 0x0F00) >> 8,
+		  (edid->spa & 0x00F0) >> 4, (edid->spa & 0x000F) >> 0);
+
+	LOG_PRINT(LOG_LEVEL_HIGH, "Supported timings [%d]:\n",
+		  edid->num_timings);
+
+	for (i = 0; i < edid->num_timings; i++)
+		print_pd_timing(&edid->timings[i], edid->order[i]);
+
+	LOG_PRINT(LOG_LEVEL_HIGH, "Audio capabilities:\n");
+	for (i = 0; i < edid->num_caps; i++)
+		print_audio_capability(&edid->audio_caps[i]);
+
+	print_speaker_layout(edid->speaker_map);
+	LOG_PRINT(LOG_LEVEL_HIGH, "----------------------\n");
+}
+
+/**
+ * This function overrides the edid information with static timings
+ * @ctx		: hdmi context handle
+ * @edid		: edid information
+ * @safe		: boolean for edid option
+ *
+ * Returns OTM_HDMI_SUCCESS or OTM_HDMI_ERR_INTERNAL
+ *
+ * This function overrides the edid information with static timings
+ */
+static otm_hdmi_ret_t __hdmi_edid_override(hdmi_context_t *ctx,
+				edid_info_t *edid,
+				bool safe)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	unsigned int i, n = 0;
+	bool hdmi;
+	bool dc_30, dc_36;
+	const otm_hdmi_timing_t **modes = NULL;
+	bool hdcp = PD_ATTR_BOOL(ATTRS[OTM_HDMI_ATTR_ID_HDCP]);
+	unsigned int n_modes_dtv = NUM_ENTRIES_IN(static_dtv_modes);
+
+	/* Verify pointers */
+	if (!(edid)) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	/* Save device type and DC caps */
+	hdmi = !ctx->dtv && (safe ? edid->hdmi : true);
+	dc_30 = hdmi && edid->dc_30;
+	dc_36 = hdmi && edid->dc_36;
+
+	/* Clear EDID */
+	memset(edid, 0, sizeof(edid_info_t));
+
+	/* Set device type */
+	edid->hdmi = hdmi;
+
+	/* Pick caps table based on whether we are HDMI TX or DTV */
+	modes = ctx->dtv ? static_dtv_modes : g_video_modes;
+	n = ctx->dtv ? n_modes_dtv : ctx->n_modes_tx;
+
+	/* Add all supported video modes */
+	for (i = edid->num_timings = 0; i < n; i++) {
+		edid->timings[edid->num_timings++] = *modes[i];
+
+		/* Do NOT advertise 3D modes in DVI mode unless we are in DTV
+		 * mode which means always use DTV static table
+		 */
+		if (!ctx->dtv && !hdmi &&
+			modes[i]->stereo_type != OTM_HDMI_STEREO_NONE) {
+			edid->num_timings--;
+		}
+	}
+
+	/* Set HDCP based on DTV indicator */
+	PD_ATTR_BOOL(ATTRS[OTM_HDMI_ATTR_ID_HDCP]) =
+				ctx->dtv ? false : hdcp;
+
+	/* Dont bother with HDMI caps if we are in DVI mode */
+	if (!(hdmi))
+		goto exit;
+
+	/* Add all supported audio modes */
+	edid->num_caps = NUM_ENTRIES_IN(static_audio_modes);
+	for (i = 0; i < edid->num_caps; i++)
+		edid->audio_caps[i] = static_audio_modes[i];
+
+	/* Enable all possible speaker allocation maps */
+	edid->speaker_map |= 0x3ff;
+
+	/* Indicate support of deep color and YCbCr output */
+	edid->ycbcr444 = true;
+	edid->ycbcr422 = true;
+	edid->dc_30 = safe ? dc_30 : true;
+	edid->dc_36 = safe ? dc_36 : true;
+
+exit:
+	return rc;
+}
+
+/**
+ * otm_hdmi_edid_parse() - fill capability table
+ * @context:      hdmi context
+ * @use_edid: True or False
+ *
+ * Returns - check otm_hdmi_ret_t
+ *
+ * This routine files capability table.
+ */
+otm_hdmi_ret_t otm_hdmi_edid_parse(void *context, otm_hdmi_use_edid_t use_edid)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	i2c_read_t edid_foo = ps_hdmi_i2c_edid_read;
+	bool cable = PD_ATTR_BOOL(ATTRS[OTM_HDMI_ATTR_ID_CABLE_STATUS]);
+	edid_info_t *edid;
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	/* Verify pointers */
+	if (!ctx) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	/* Init locals */
+	edid = &ctx->edid_int;
+
+	/* Begin EDID update protection */
+	mutex_lock(&ctx->modes_sema);
+
+	/* Clear EDID */
+	memset(edid, 0, sizeof(edid_info_t));
+
+	/* Setup reference table for parsing */
+	edid->num_ref_timings = ctx->n_modes_ref;
+	edid->ref_timings = g_video_modes_ref;
+
+	/* DTV mode will use static DTV timings directly */
+	if (ctx->dtv)
+		goto edid_override;
+
+	switch (use_edid) {
+	case OTM_HDMI_USE_EDID_REAL:
+		/* Try reading EDID. If reading failed pick overriding strategy
+		 * based on cable status
+		*/
+		rc = edid_parse(edid, edid_foo, ctx);
+		if (rc != OTM_HDMI_SUCCESS) {
+			pr_debug("Failed to read EDID info\n");
+			use_edid = cable ? OTM_HDMI_USE_EDID_SAFE :
+				OTM_HDMI_USE_EDID_NONE;
+		}
+		break;
+	case OTM_HDMI_USE_EDID_SAFE:
+		/* In safe mode we still need real EDID */
+		edid_parse(edid, edid_foo, ctx);
+		break;
+	case OTM_HDMI_USE_EDID_NONE:
+		/* In full override mode we dont care of real EDID
+		 * so do nothing
+		 */
+		break;
+	default:
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	/* Dont bother with static timings if we are using real EDID */
+	if (use_edid == OTM_HDMI_USE_EDID_REAL)
+		goto twin_caps;
+
+edid_override:
+	/* Use static timings */
+	__hdmi_edid_override(ctx, edid, use_edid == OTM_HDMI_USE_EDID_SAFE);
+
+	/* Insertion of twin entries below is done right in the parsed table of
+	 * modes without knowledge of its maximum size. Be extra careful about
+	 * it and check that MAX_TIMINGS is big enough; This needs to be fixed
+	 * in long run
+	 */
+twin_caps:
+	/* Print warning message in case there are no timings */
+	if (ctx->edid_int.num_timings == 0) {
+		LOG_ERROR
+		    ("----------------- WARNING -----------------------\n");
+		LOG_ERROR
+		    ("-- TV timings are not available		--\n");
+		LOG_ERROR
+		    ("-- To resolve this switch to static TV timings --\n");
+	}
+	/* Update EDID availability indicator */
+	PD_ATTR_UINT(ATTRS[OTM_HDMI_ATTR_ID_USE_EDID]) = use_edid;
+
+	/* End EDID update protection */
+	mutex_unlock(&ctx->modes_sema);
+
+exit:
+	return rc;
+}
+
+/**
+ * Parse EDID extension blocks
+ * @context:	hdmi context
+ * @raw_edid:	raw edid of first 2 blocks, read by DRM
+ *
+ * Returns:	check otm_hdmi_ret_t
+ *
+ * This routine fills capability table.
+ */
+otm_hdmi_ret_t otm_hdmi_edid_extension_parse(void *context,
+			struct edid *raw_edid,
+			struct i2c_adapter *adapter)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	unsigned char *edid = (u8 *) raw_edid;
+	edid_info_t   *edid_info;
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	pr_debug("enter %s\n", __func__);
+
+	/* Verify pointers */
+	if (!ctx || !edid || !adapter) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+
+	/* Init locals */
+	edid_info = &ctx->edid_int;
+
+	/* Begin EDID update protection */
+	mutex_lock(&ctx->modes_sema);
+
+	/* Clear EDID */
+	memset(edid_info, 0, sizeof(edid_info_t));
+
+	/* Setup reference table for parsing */
+	edid_info->num_ref_timings = ctx->n_modes_ref;
+	edid_info->ref_timings = g_video_modes_ref;
+
+	rc = edid_extension_parse(adapter, edid_info, edid);
+	if (rc != OTM_HDMI_SUCCESS)
+		pr_debug("Failed to read EDID info\n");
+
+	/* End EDID update protection */
+	mutex_unlock(&ctx->modes_sema);
+
+exit:
+	pr_debug("exit %s (ret = %d)\n", __func__, rc);
+	return rc;
+}
+
+/**
+* prepare hdmi eld packet and copy it to the given buffer
+* @ctx: hdmi context
+* @eld: pointer to otm_hdmi_eld_t structure
+*
+* Returns: OTM_HDMI_SUCCESS, OTM_HDMI_ERR_INTERNAL or OTM_HDMI_ERR_NULL_ARG
+*/
+otm_hdmi_ret_t otm_hdmi_get_eld(void *ctx, otm_hdmi_eld_t *eld)
+{
+	hdmi_context_t *context = (hdmi_context_t *)ctx;
+	edid_info_t *edid_int = NULL;
+
+	if (!context)
+		return OTM_HDMI_ERR_INTERNAL;
+
+	if (eld == NULL)
+		return OTM_HDMI_ERR_NULL_ARG;
+
+	memset(eld->eld_data, 0, sizeof(OTM_HDMI_ELD_SIZE));
+
+	edid_int = &(context->edid_int);
+
+	/* ELD Version Number - version 2, supporting CEA 861D or below */
+	eld->eld_ver = 2;
+
+	/* Version number of the ELD, reserved for future */
+	eld->veld_ver = 0;
+
+	/* Length of the Baseline structure, in number of DWORD. Maximum 80 bytes as per EELD proposal */
+	eld->baseline_eld_length = OTM_HDMI_ELD_SIZE/4;
+
+	/* monitor name length */
+	/* monitor name is not populated intentionally */
+	eld->mnl = 0;
+	/*011b - indicates CEA 861 B, C or D */
+	eld->cea_edid_rev_id = 3;
+
+	/* Capabilities */
+	eld->hdcp = 1;
+	eld->ai_support = edid_int->supports_ai;
+	/* 00b - indicate HDMI connection type */
+	eld->connection_type = 0;
+	/* number of Short Audio Descriptors  (SAD) */
+	if (edid_int->short_audio_descriptor_count > OTM_HDMI_MAX_SAD_COUNT) {
+		pr_warn("ELD supports a maximum of %d SADs. ",
+				OTM_HDMI_MAX_SAD_COUNT);
+		pr_warn("Limiting SAD count to %d. ",
+				OTM_HDMI_MAX_SAD_COUNT);
+		pr_warn("Some SADs will be lost!\n");
+		eld->sadc = OTM_HDMI_MAX_SAD_COUNT;
+	} else {
+		eld->sadc = edid_int->short_audio_descriptor_count;
+	}
+
+	/* delay of video compared to audio in terms of units of 2ms */
+	eld->audio_synch_delay = 0;
+	/* valid value of video/audio latency ranges from 1 to 251, see HDMI spec 3a */
+	/* Section 8.3.2 HDMI Vendor Specific Data Block (VSDB) */
+	if (edid_int->latency_video > 0 &&
+		edid_int->latency_video < 252 &&
+		edid_int->latency_audio > 0 &&
+		edid_int->latency_audio < 252) {
+		eld->audio_synch_delay = edid_int->latency_video - edid_int->latency_audio;
+		/* the maximum delay is 500ms */
+		if (eld->audio_synch_delay > 250)
+			eld->audio_synch_delay = 0;
+
+		pr_debug("video latency: %d, audio_latency: %d, lipsync: %d\n",
+			edid_int->latency_video,
+			edid_int->latency_audio,
+			eld->audio_synch_delay);
+	}
+
+	/* bits 8 - 10 are ignored: FLH/FRH (Front Left/Right High), TC (Top Center), FCH (Front Center High) */
+	eld->speaker_allocation_block = (uint8_t) (edid_int->speaker_map & 0xff);
+
+	/* The following fields are intentionally ignored */
+	/* eld->port_id_value */
+	/* eld->manufacturer_id */
+	/* eld->product_id */
+
+	/* 64-byte of baseline data, including monitor names and and list of 3-byte SAD */
+	/* monitor name is not populated here */
+	if (edid_int->short_audio_descriptor_count) {
+		WARN_ON(edid_int->short_audio_descriptor_count >
+						sizeof(eld->mn_sand_sads));
+		memcpy(eld->mn_sand_sads, edid_int->short_audio_descriptor_data,
+		       min_t(int, MAX_DATA_BLOCK_SIZE,
+			     3 * edid_int->short_audio_descriptor_count));
+	}
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * otm_hdmi_timing_from_cea_modes() - get timings for cea modes
+ * @buffer: the extension block buffer
+ * @timings: the result CEA timings extacted from the buffer
+ *
+ * Returns - the number of modes in the timings
+ */
+int otm_hdmi_timing_from_cea_modes(unsigned char *buffer,
+				   otm_hdmi_timing_t *timings)
+{
+	edid_info_t *edid  = NULL;
+
+	if (buffer == NULL)
+		return 0;
+
+	if (timings == NULL)
+		return 0;
+
+	if (g_context == NULL)
+		return 0;
+
+	edid = &g_context->edid_int;
+	if (edid == NULL)
+		return 0;
+
+	edid->num_ref_timings = g_context->n_modes_ref;
+	edid->ref_timings = g_video_modes_ref;
+
+	return edid_parse_pd_timing_from_cea_block(edid, buffer, timings);
+}
+
+/**
+ * otm_hdmi_get_mode_timings() - get timings of a mode, given:
+ * @context : HDMI context
+ * @hdisplay: mode width
+ * @vdisplay: mode height
+ * @vrefresh: mode refresh rate
+ *
+ * Returns matching mode, NULL otherwise.
+ */
+const otm_hdmi_timing_t *otm_hdmi_get_mode_timings(void *context,
+						int hdisplay,
+						int vdisplay,
+						int vrefresh)
+{
+	const otm_hdmi_timing_t *mode = NULL;
+	int i, refresh_rate;
+
+	if (hdisplay < 0 || vdisplay < 0 || vrefresh < 0)
+		goto exit;
+
+	for (i = 0; i < MAX_TIMINGS; i++) {
+		mode = g_video_modes[i];
+		refresh_rate = ((mode->dclk * 1000) /
+					(mode->htotal * mode->vtotal));
+		if (hdisplay == mode->width &&
+			vdisplay == mode->height &&
+			vrefresh == refresh_rate)
+			return mode;
+	}
+exit:
+	return NULL;
+}
+
+/**
+ * otm_hdmi_hpd_init - Initialize and enable HPD driver service.
+ *
+ * No input arguments
+ *
+ * Returns - OTM_HDMI_SUCCESS on successful initialization
+ * Returns - OTM_HDMI_ERR_FAILED on init failure
+ */
+otm_hdmi_ret_t otm_hdmi_hpd_init(void)
+{
+	int result = 0;
+	result = ps_hdmi_hpd_register_driver();
+	return (result) ? OTM_HDMI_SUCCESS : OTM_HDMI_ERR_FAILED;
+}
+
+/**
+ * otm_hdmi_hpd_deinit - Deinit HPD driver service.
+ *
+ * No input arguments
+ *
+ * Returns - OTM_HDMI_SUCCESS
+ */
+otm_hdmi_ret_t otm_hdmi_hpd_deinit(void)
+{
+	ps_hdmi_hpd_unregister_driver();
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * otm_hdmi_get_hpd_pin - get hdmi hpd pin number.
+ *
+ * No input arguments
+ *
+ * Returns - pin number
+ */
+unsigned int otm_hdmi_get_hpd_pin(void)
+{
+	unsigned int result = 0;
+	result = ps_hdmi_get_hpd_pin();
+	return result;
+}
+
+/**
+ * otm_hdmi_override_cable_status - override hdmi hpd cable status.
+ *
+ * Input: override state and auto test state
+ */
+void otm_hdmi_override_cable_status(bool state, bool auto_state)
+{
+	ps_hdmi_override_cable_status(state, auto_state);
+	return;
+}
+
+
+
+/**
+ * This function fills the given table with timings
+ * @unit_id	: hdmi unit revision id
+ * @table	: handle to table to be filled
+ * @max_size	: max number of entries in the table
+ * @reference	: is this table reference table?
+ *
+ * This routine fills given table with timings according to current unit version
+ * and subsequent use of table
+ */
+static int __init_tx_modes(hdmi_unit_revision_id_t unit_id,
+			   const otm_hdmi_timing_t **table,
+			   unsigned int max_size, bool reference)
+{
+	int i = 0;
+
+#define __ADD_MODE(mode)    \
+	do { \
+		if (i < max_size) \
+			table[i++] = mode;  \
+		else {		       \
+		    i = -1;	     \
+		    goto exit;	  \
+		}	\
+	} while (0);
+
+	/* The following 2D modes are supported on all unit revisions */
+	__ADD_MODE(&MODE_640x480p5994_60);
+	__ADD_MODE(&MODE_720_1440x576i50);
+	__ADD_MODE(&MODE_720_1440x480i5994_60);
+	__ADD_MODE(&MODE_720x576p50);
+	__ADD_MODE(&MODE_720x480p5994_60);
+	__ADD_MODE(&MODE_1280x720p50);
+	__ADD_MODE(&MODE_1280x720p5994_60);
+	__ADD_MODE(&MODE_1920x1080i50);
+	__ADD_MODE(&MODE_1920x1080i5994_60);
+	__ADD_MODE(&MODE_1920x1080p24);
+	__ADD_MODE(&MODE_1920x1080p25);
+	__ADD_MODE(&MODE_1920x1080p30);
+	__ADD_MODE(&MODE_1920x1080p50);
+	__ADD_MODE(&MODE_1920x1080p5994_60);
+
+	/* The following 3D modes are supported on all unit revisions */
+	__ADD_MODE(&MODE_1280x720p50__SBSH2);
+	__ADD_MODE(&MODE_1280x720p5994_60__SBSH2);
+	__ADD_MODE(&MODE_1920x1080i50__SBSH2);
+	__ADD_MODE(&MODE_1920x1080i5994_60__SBSH2);
+	__ADD_MODE(&MODE_1920x1080p24__SBSH2);
+	__ADD_MODE(&MODE_1920x1080p50__SBSH2);
+	__ADD_MODE(&MODE_1920x1080p5994_60__SBSH2);
+	__ADD_MODE(&MODE_1280x720p50__TBH2);
+	__ADD_MODE(&MODE_1280x720p5994_60__TBH2);
+	__ADD_MODE(&MODE_1920x1080p24__TBH2);
+	__ADD_MODE(&MODE_1920x1080p30__TBH2);
+	__ADD_MODE(&MODE_1920x1080p50__TBH2);
+	__ADD_MODE(&MODE_1920x1080p5994_60__TBH2);
+	__ADD_MODE(&MODE_1280x720p50__FP2);
+	__ADD_MODE(&MODE_1920x1080p30__FP2);
+
+	/* The following modes are only included if the table is used as a
+	 * reference set for EDID parsing
+	 */
+	if (reference) {
+		__ADD_MODE(&MODE_720_1440x576i50__16by9);
+		__ADD_MODE(&MODE_720_1440x480i5994_60__16by9);
+		__ADD_MODE(&MODE_720x576p50__16by9);
+		__ADD_MODE(&MODE_720x480p5994_60__16by9);
+	}
+	/* The following mode are supported only on CE4200 B0 and further */
+	if (unit_id >= HDMI_PCI_REV_CE4200_B0) {
+		__ADD_MODE(&MODE_1280x720p50__FP);
+		__ADD_MODE(&MODE_1280x720p5994_60__FP);
+		__ADD_MODE(&MODE_1920x1080i50__FP);
+		__ADD_MODE(&MODE_1920x1080i5994_60__FP);
+		__ADD_MODE(&MODE_1920x1080p24__FP);
+		__ADD_MODE(&MODE_1920x1080p30__FP);
+	}
+#undef __ADD_MODE
+
+exit:
+	return i;
+}
+
+static void log_entry(void *uhandle, char *foo)
+{
+#ifdef __HDMI_HAL_TRACE__
+	PD_PRINT("%s: Entering %s\n", PD_NAME, foo);
+#endif
+}
+
+static void log_exit(void *uhandle, char *foo, int rc)
+{
+#ifdef __HDMI_HAL_TRACE__
+	PD_PRINT("%s: Exiting %s with %d\n", PD_NAME, foo, rc);
+#endif
+}
+
+/* Microseconds domain timer initialization */
+static void __poll_start(void *poll_timer)
+{
+	do_gettimeofday(poll_timer);
+}
+
+/* Microseconds domain timeout verification */
+static bool __poll_timeout(void *poll_timer)
+{
+	struct timeval tv_stop;
+	do_gettimeofday(&tv_stop);
+	return TIME_DIFF(tv_stop, *((struct timeval *) poll_timer)) >
+				I2C_SW_TIMEOUT;
+}
+
+/**
+ * This function initializes hdmi_context
+ * @context	: opaque hdmi_context
+ * @pdev		: pci_device
+ *
+ * Returns check otm_hdmi_ret_t
+ * Initializes hdmi_context
+ */
+static otm_hdmi_ret_t __hdmi_context_init(void *context, struct pci_dev *pdev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	LOG_ENTRY(LOG_LEVEL_HIGH);
+
+	/* Verify pointers */
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	/* Setup resources for HDMI VDC */
+	rc = ps_hdmi_pci_dev_init(ctx, pdev);
+	if (rc != OTM_HDMI_SUCCESS)
+		goto exit;
+
+	pr_debug("About to call initialize HAL members and io_address is %p\n",
+		 ctx->io_address);
+
+	/* Initialize HAL; It's important that ALL entries are initialized!!! */
+	ctx->dev.log_entry = log_entry;
+	ctx->dev.log_exit = log_exit;
+	ctx->dev.poll_timer = &ctx->hal_timer;
+	ctx->dev.poll_start = __poll_start;
+	ctx->dev.poll_timeout = __poll_timeout;
+	ctx->dev.io_address = ctx->io_address;
+
+	ctx->dev.uhandle = ctx->io_address;
+
+	/* Create modes table sharing protection semaphore */
+	mutex_init(&ctx->modes_sema);
+
+	/* Create execution protection semaphore */
+	mutex_init(&ctx->exec_sema);
+
+	/* Create HPD protection semaphore */
+	mutex_init(&ctx->hpd_sema);
+
+	/* Create server thread synchronization semaphore */
+	mutex_init(&ctx->srv_sema);
+
+	/* Create AV mute synchronization semaphore */
+	mutex_init(&ctx->mute_sema);
+
+exit:
+	LOG_EXIT(LOG_LEVEL_HIGH, rc);
+	return rc;
+}
+
+/**
+ * otm_hdmi_deinit - deinit called during shutdown
+ * @context	:opaque hdmi_context
+ *
+ * Returns nothing. de-initializes and frees pointers
+ * Called during power down
+ */
+void otm_hdmi_deinit(void *context)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	LOG_ENTRY(LOG_LEVEL_HIGH);
+
+	/* Verify pointers */
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = context;
+
+	/* Destroy semaphores */
+	mutex_destroy(&ctx->modes_sema);
+	mutex_destroy(&ctx->exec_sema);
+	mutex_destroy(&ctx->hpd_sema);
+	mutex_destroy(&ctx->srv_sema);
+	mutex_destroy(&ctx->mute_sema);
+
+	/* Unmap IO region, Disable the PCI devices
+	 */
+	ps_hdmi_pci_dev_deinit(ctx);
+
+	/* Free context */
+	kfree(ctx);
+
+	pr_debug("Exiting deinit with error code %d\n", rc);
+exit:
+	LOG_EXIT(LOG_LEVEL_HIGH, rc);
+	return;
+}
+
+/* turn HDMI power rails on */
+bool otm_hdmi_power_rails_on(void)
+{
+	return ps_hdmi_power_rails_on();
+}
+
+/* turn HDMI power rails off */
+bool otm_hdmi_power_rails_off(void)
+{
+	return ps_hdmi_power_rails_off();
+}
+
+/* turn HDMI power islands on */
+bool otm_hdmi_power_islands_on(void)
+{
+	hdmi_context_t *ctx = g_context;
+
+	if (ctx && ctx->islands_powered_on == false) {
+		ctx->islands_powered_on = true;
+		return ps_hdmi_power_islands_on();
+	}
+	return true;
+}
+
+/* turn HDMI power islands off */
+void otm_hdmi_power_islands_off(void)
+{
+	hdmi_context_t *ctx = g_context;
+
+	if (ctx && ctx->islands_powered_on == true) {
+		ctx->islands_powered_on = false;
+		ps_hdmi_power_islands_off();
+	}
+}
+
+/* enable/disable IRQ and CPD_HPD */
+bool otm_hdmi_enable_hpd(bool enable)
+{
+	return ps_hdmi_enable_hpd(enable);
+}
+
+/* control HDMI vblank interrupt */
+void otm_hdmi_vblank_control(struct drm_device *dev, bool on)
+{
+	ps_hdmi_vblank_control(dev, on);
+}
+
+/*
+ * otm_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool otm_hdmi_get_cable_status(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	if (ctx == NULL)
+		return false;
+
+	return ps_hdmi_get_cable_status(ctx);
+}
+
+/**
+ * get pixel clock range
+ * @pc_min	: min pixel clock
+ * @pc_max	: max pixel clock
+ *
+ * Returns check otm_hdmi_ret_t
+ * This functions returns the minimum and maximum
+ * pixel clock values
+ */
+otm_hdmi_ret_t otm_hdmi_get_pixel_clock_range(unsigned int *pc_min,
+						unsigned int *pc_max)
+{
+	return ipil_get_pixel_clock_range(pc_min, pc_max);
+}
+
+/**
+ * otm_hdmi_hpd_callback_register - Register a callback for HPD events
+ * @context: hdmi device context
+ * @phdmi_irq_cb: function pointer for hotplug/unplug IRQ callbacks.
+ * @data: data for irq callback
+ *
+ * Perform HPD IRQ call back initialization
+ *
+ * Returns - check otm_hdmi_ret_t
+ */
+otm_hdmi_ret_t otm_hdmi_hpd_callback_register(void *context,
+					      irqreturn_t (*phdmi_irq_cb) (int, void*),
+					      void *data)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	pr_debug("Entering %s, context = %p\n", __func__, context);
+	/* Verify pointers */
+	if (context == NULL || phdmi_irq_cb == NULL ||
+		data == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	pr_debug("Registering HPD callback\n");
+	ctx->hpd_callback = phdmi_irq_cb;
+	ctx->hpd_data = data;
+exit:
+	return rc;
+
+}
+
+/**
+ * otm_hdmi_device_init	-	init hdmi device driver
+ * @context: hdmi device context
+ * @pdev: pci device
+ *
+ * Perform HDMI device initialization which includes 3 steps:
+ * 1) otm context create,
+ * 2) os specific context init,
+ * 3) device enable
+ *
+ * Returns - check otm_hdmi_ret_t
+ */
+otm_hdmi_ret_t otm_hdmi_device_init(void **context, struct pci_dev *pdev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+	int n;
+
+	LOG_ENTRY(LOG_LEVEL_HIGH);
+
+	/* Verify pointers */
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+
+	/* Create and clear context */
+	g_context = ctx =
+	    (hdmi_context_t *) kmalloc(sizeof(hdmi_context_t), GFP_KERNEL);
+	if (ctx == NULL) {
+		rc = OTM_HDMI_ERR_NO_MEMORY;
+		goto exit;
+	}
+	memset(ctx, 0, sizeof(hdmi_context_t));
+
+	pr_debug("HDMI Context created = %p\n", ctx);
+
+	/* Init HDMI context */
+	rc = __hdmi_context_init(ctx, pdev);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nHDMI Context init failed\n");
+		goto exit;
+	}
+
+	rc = otm_hdmi_declare_attributes(__pd_attr_declare, __pd_attr_get_name);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nHDMI declare attributes table failed\n");
+		goto exit;
+	}
+
+	ipil_hdmi_set_hdmi_dev(&ctx->dev);
+
+	/* Save the output mode as DTV or HDMT tx */
+	ctx->dtv = g_dtv;
+
+	/* Save the deep color enable flag */
+	ctx->dc = g_dc;
+
+	/* Save active GPIO number */
+	ctx->gpio = g_gpio;
+
+	/* Save context */
+	*context = ctx;
+
+	/* Fill in static timing table */
+	n = __init_tx_modes(ctx->dev.id, g_video_modes, MAX_TIMINGS,
+				false);
+	if (n < 0) {
+		rc = OTM_HDMI_ERR_NO_MEMORY;
+		goto exit;
+	}
+	ctx->n_modes_tx = n;
+
+	/* Fill EDID parser reference timing table */
+	n = __init_tx_modes(ctx->dev.id, g_video_modes_ref, MAX_TIMINGS,
+			    true);
+	if (n < 0) {
+		rc = OTM_HDMI_ERR_NO_MEMORY;
+		goto exit;
+	}
+	ctx->n_modes_ref = n;
+
+	/* Fill in advertised timings table */
+	otm_hdmi_edid_parse(ctx, OTM_HDMI_USE_EDID_NONE);
+
+exit:
+	/* Clean up if appropriate */
+	if ((rc != OTM_HDMI_SUCCESS) && (ctx != NULL))
+		otm_hdmi_deinit((void *)ctx);
+
+	LOG_EXIT(LOG_LEVEL_HIGH, rc);
+	return rc;
+}
+
+/**
+ * Returns if the given values is preferred mode or not
+ * @hdisplay	: width
+ * @vdisplay	: height
+ * @refresh	: refresh rate
+ *
+ * Returns true if preferred mode else false
+ */
+bool otm_hdmi_is_preferred_mode(int hdisplay, int vdisplay, int refresh)
+{
+	return ipil_hdmi_is_preferred_mode(hdisplay, vdisplay, refresh);
+}
+
+/**
+ * Set raw edid to the hdmi context
+ * @context	: opaque hdmi_context
+ * @raw_edid	: raw edid information
+ *
+ * Returns - check otm_hdmi_ret_t
+ * Copy raw edid to the hdmi context
+ */
+otm_hdmi_ret_t otm_hdmi_set_raw_edid(void *context, char *raw_edid)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	if (ctx == NULL)
+		return OTM_HDMI_ERR_FAILED;
+
+	/* TODO: need more flexiable way which should be edid size-aware copy */
+	memcpy(ctx->edid_raw, raw_edid, MAX_EDID_BLOCKS * SEGMENT_SIZE);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Get raw edid to the hdmi context
+ * @context	: opaque hdmi_context
+ * @raw_edid	: raw edid information
+ *
+ * Returns - check otm_hdmi_ret_t
+ * Retrieves raw edid in the hdmi context
+ */
+otm_hdmi_ret_t otm_hdmi_get_raw_edid(void *context, char **raw_edid)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	if (ctx == NULL || raw_edid == NULL)
+		return OTM_HDMI_ERR_FAILED;
+
+	*raw_edid = (char *)ctx->edid_raw;
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Set scaling type to the hdmi context
+ * @context	: opaque hdmi_context
+ * @scaling     : scaling type to be set
+ *
+ * Returns - check otm_hdmi_ret_t
+ */
+otm_hdmi_ret_t otm_hdmi_set_scaling_type(void *context, int scaling)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	if (ctx == NULL)
+		return OTM_HDMI_ERR_FAILED;
+
+	ctx->scaling_type = scaling;
+
+	return OTM_HDMI_SUCCESS;
+}
+
+/**
+ * Check if monitor connected is hdmi
+ * @context	: opaque hdmi_context
+ *
+ * Returns true if hdmi else false
+ * Check if monitor connected is hdmi
+ */
+bool otm_hdmi_is_monitor_hdmi(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	if (ctx == NULL)
+		return true; /* default to HDMI */
+
+	return ctx->edid_int.hdmi;
+}
+
+/**
+ * validates the attribute to be read or not.
+ * @id	: attribute id to be validated
+ *
+ * Read's the attributes flag value.
+ *
+ * Returns -
+ *	OTM_HDMI_SUCCESS - if the attribute is readable.
+ *	OTM_HDMI_ERR_INTERNAL -	if the attribute is non-readable.
+ *	OTM_HDMI_ERR_FAILED - if the attribute is not in range.
+ */
+static otm_hdmi_ret_t otm_hdmi_attr_get_validate(otm_hdmi_attribute_id_t id)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	if (id < 0 || id >= OTM_HDMI_MAX_SUPPORTED_ATTRIBUTES) {
+		LOG_ERROR("Invalid argument passed (id): %d\n", id);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	/*
+	 * Based on attribute type perform appropriate check
+	*/
+	if (OTM_HDMI_ATTR_FLAG_WRITE & ATTRS[id].flags) {
+		return rc;
+	} else if (OTM_HDMI_ATTR_FLAG_SUPPORTED & ATTRS[id].flags) {
+		/*
+		 * Needs a Fix.
+		 */
+		return rc;
+	} else if (OTM_HDMI_ATTR_FLAG_INTERNAL & ATTRS[id].flags) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		return rc;
+	} else {
+		LOG_ERROR("Invalid attribute accessed: (%d)\n", id);
+		rc = OTM_HDMI_ERR_FAILED;
+		return rc;
+	}
+exit:
+	LOG_EXIT(LOG_LEVEL_HIGH, rc);
+	return rc;
+}
+
+/**
+ * Getting given attribute
+ * @context		:opaque hdmi context
+ * @id			: attribute id
+ * @attribute		:user provided buffer for attribute details
+ * @log			: a hint wether port driver should log the call
+ *
+ * Returns otm_hdmi_ret_t check. Getting given attribute values
+ */
+otm_hdmi_ret_t otm_hdmi_get_attribute(void *context,
+					otm_hdmi_attribute_id_t id,
+					otm_hdmi_attribute_t *attribute,
+					bool log)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	LOG_ENTRY((log) ? LOG_LEVEL_HIGH : LOG_LEVEL_VBLANK);
+
+	rc = otm_hdmi_attr_get_validate(id);
+	if (OTM_HDMI_SUCCESS != rc)
+		goto exit;
+	if (NULL == attribute || NULL == context) {
+		LOG_ERROR("Invalid argument passed (attribute): %d\n", id);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	*attribute = ATTRS[id];
+exit:
+	LOG_EXIT((log) ? LOG_LEVEL_HIGH : LOG_LEVEL_VBLANK, rc);
+	return rc;
+}
+EXPORT_SYMBOL(otm_hdmi_get_attribute);
+
+/**
+ * Attribute name getting routine
+ * @id		: attribute id
+ *
+ * Returns attribute name corresponding to id
+ */
+char *__pd_attr_get_name(otm_hdmi_attribute_id_t id)
+{
+	otm_hdmi_attribute_t *table = otm_hdmi_attributes_table;
+
+	if ((0 <= id) && (id < OTM_HDMI_MAX_SUPPORTED_ATTRIBUTES))
+		return table[id].name;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(__pd_attr_get_name);
+
+/**
+ * Generic attribute declaration routine
+ * @table	: attribute table to be updated
+ * @id		: id to be updated to the table
+ * @type	: attribute type
+ * @flags	: attribute flags
+ * @name	: attribute name
+ * @value	: attribute default value
+ * @min	: min value possible for the attribute
+ * @max	: max value possible for the attribute
+ *
+ * Returns check otm_hdmi_ret_t
+ */
+static otm_hdmi_ret_t __pd_attr_declare(otm_hdmi_attribute_t *table,
+				otm_hdmi_attribute_id_t id,
+				otm_hdmi_attribute_type_t type,
+				otm_hdmi_attribute_flag_t flags,
+				char *name,
+				void *value,
+				unsigned int min,
+				unsigned int max)
+{
+	size_t strsiz = 0;
+
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	if ((id < 0) || (id > OTM_HDMI_MAX_SUPPORTED_ATTRIBUTES))
+		return  OTM_HDMI_ERR_FAILED;
+
+	/* Buffer sizes in 'otm_hdmi_attribute_t table' struct are
+	 * [OTM_HDMI_MAX_STRING_LENGTH + 1].
+	 */
+	if (name)
+		strsiz = strlen(name);
+	if ((name != NULL) && (strsiz <= OTM_HDMI_MAX_STRING_LENGTH))
+		strncpy(table[id].name, name, OTM_HDMI_MAX_STRING_LENGTH);
+	else
+		LOG_ERROR("set default name\n");
+		/* TODO: set default name */
+	table[id].name[OTM_HDMI_MAX_STRING_LENGTH] = 0;
+	table[id].flags = flags;
+
+	switch (type) {
+	case OTM_HDMI_ATTR_TYPE_UINT:
+		table[id].content._uint.value         =
+			(unsigned int) (uintptr_t) value;
+		table[id].content._uint.value_default =
+			(unsigned int) (uintptr_t) value;
+		table[id].content._uint.value_min     = min;
+		table[id].content._uint.value_max     = max;
+		break;
+	case OTM_HDMI_ATTR_TYPE_BOOLEAN:
+		table[id].content._bool.value         = (bool) value;
+		table[id].content._bool.value_default = (bool) value;
+		break;
+	case OTM_HDMI_ATTR_TYPE_STRING:
+		if (value)
+			strsiz = strlen(value);
+		if ((value != NULL) && strsiz <= OTM_HDMI_MAX_STRING_LENGTH)
+			strncpy(table[id].content.string.value,
+				(char *) value, OTM_HDMI_MAX_STRING_LENGTH);
+		else
+			rc = OTM_HDMI_ERR_FAILED;
+		table[id].content.string.value[OTM_HDMI_MAX_STRING_LENGTH] = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+/**
+ * otm_hdmi_declare_attributes - init hdmi global attributes table
+ * @declare	: declare attribute
+ * @get_name	: name of the attribute
+ *
+ * Returns - check otm_hdmi_ret_t
+ */
+otm_hdmi_ret_t otm_hdmi_declare_attributes(pd_attr_declare_t declare,
+						pd_attr_get_name_t get_name)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+
+	otm_hdmi_attribute_t *table = otm_hdmi_attributes_table;
+
+	LOG_ENTRY(LOG_LEVEL_HIGH);
+
+	/*
+	 * declare(table, OTM_HDMI_ATTR_ID_NAME,
+	 * OTM_HDMI_ATTR_TYPE_STRING, PD_ATTR_FLAGS_RS,
+	 * get_name(OTM_HDMI_ATTR_ID_NAME),
+	 * (void *) PD_NAME, 0, 0);
+	 */
+
+	PD_DECLARE_ATTRIBUTE_STRING(declare, table,
+		OTM_HDMI_ATTR_ID_NAME,
+		PD_ATTR_FLAGS_RS, get_name, PD_NAME);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_CABLE_STATUS,
+		PD_ATTR_FLAGS_RS, get_name, false);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_POWER,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_AUTO_MUTE,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_STATUS,
+		PD_ATTR_FLAGS_RS, get_name,
+		OTM_HDMI_HDCP_STATUS_OFF,
+		OTM_HDMI_HDCP_STATUS_OFF,
+		OTM_HDMI_HDCP_STATUS_ON);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_ENCRYPT,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_COLOR_SPACE_INPUT,
+		PD_ATTR_FLAGS_RWSI,
+		get_name,
+		OTM_HDMI_COLOR_SPACE_RGB,
+		0, OTM_HDMI_COLOR_SPACE_COUNT - 1);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT,
+		PD_ATTR_FLAGS_RWS, get_name,
+		OTM_HDMI_OPF_RGB444, 0, OTM_HDMI_OPF_COUNT - 1);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_PIXEL_DEPTH,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_OPD_24BIT,
+		0, OTM_HDMI_PIXEL_DEPTH_COUNT - 1);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_BG_COLOR,
+		PD_ATTR_FLAGS_RWS,
+		get_name, 0xFF0000, 0x000000, 0xFFFFFF);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_USE_EDID,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_USE_EDID_REAL,
+		OTM_HDMI_USE_EDID_NONE,
+		OTM_HDMI_USE_EDID_SAFE);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_DEBUG,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		LOG_LEVEL_ERROR,
+		__LOG_LEVEL_MIN, __LOG_LEVEL_MAX);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_VERSION_MAJOR,
+		PD_ATTR_FLAGS_RS, get_name, 0, 0, 100);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_VERSION_MINOR,
+		PD_ATTR_FLAGS_RS, get_name, 4, 0, 9);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_DISPLAY_PIPE,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_DISPLAY_ID_0,
+		OTM_HDMI_DISPLAY_ID_0,
+		OTM_HDMI_DISPLAY_ID_UNDEFINED);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_PAR,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_PAR_NO_DATA,
+		OTM_HDMI_PAR_NO_DATA,
+		OTM_HDMI_PAR_16_9);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_FAR,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_FAR_SAME_AS_PAR,
+		OTM_HDMI_FAR_16_9_TOP,
+		OTM_HDMI_FAR_16_9_SP_4_3);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_SLOW_DDC,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_AUDIO_CLOCK,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_AUDIO_CLOCK_36,
+		OTM_HDMI_AUDIO_CLOCK_24,
+		OTM_HDMI_AUDIO_CLOCK_16);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_AUDIO_STATUS,
+		PD_ATTR_FLAGS_RS, get_name, false);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_TMDS_DELAY,
+		PD_ATTR_FLAGS_RWS, get_name, 400, 100, 500);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_COLOR_SPACE_EXT,
+		PD_ATTR_FLAGS_RWS, get_name, false);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_OUTPUT_CLAMP,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_OUTPUT_DITHER,
+		PD_ATTR_FLAGS_RWS, get_name, false);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_1P1,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_MUTE,
+		PD_ATTR_FLAGS_RWS,
+		get_name,
+		OTM_HDMI_MUTE_OFF,
+		OTM_HDMI_MUTE_OFF, OTM_HDMI_MUTE_BOTH);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_PURE_VIDEO,
+		PD_ATTR_FLAGS_RWS, get_name, false);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_AUTO_PHY,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_AUTO_RETRY,
+		PD_ATTR_FLAGS_RWS, get_name, true);
+
+	PD_DECLARE_ATTRIBUTE_BOOL(declare, table,
+		OTM_HDMI_ATTR_ID_DVI,
+		PD_ATTR_FLAGS_RWS, get_name, false);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_RI_RETRY,
+		PD_ATTR_FLAGS_RWS, get_name, 40, 0, 50);
+
+	PD_DECLARE_ATTRIBUTE_UINT(declare, table,
+		OTM_HDMI_ATTR_ID_HDCP_DELAY,
+		PD_ATTR_FLAGS_RWS, get_name, 500, 0, 1000);
+
+	LOG_EXIT(LOG_LEVEL_HIGH, rc);
+	return rc;
+}
+EXPORT_SYMBOL(otm_hdmi_declare_attributes);
+
+ /**
+ * Description: static function that programs display and pfit registers.
+ *
+ * @ctx:		hdmi_context
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @fb_width		:allocated frame buffer dimensions
+ * @fb_height		:allocated frame buffer dimensions
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+static otm_hdmi_ret_t __hdmi_crtc_set_scaling(hdmi_context_t *ctx,
+					otm_hdmi_timing_t *mode,
+					otm_hdmi_timing_t *adjusted_mode,
+					int fb_width, int fb_height)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int scalingtype = ctx->scaling_type;
+
+	/* program display related registers: dpssize and pipesrc, pfit */
+	rc = ipil_hdmi_crtc_mode_set_program_dspregs(&ctx->dev, scalingtype,
+						mode, adjusted_mode,
+						fb_width, fb_height);
+	if (rc != OTM_HDMI_SUCCESS)
+		pr_debug("\nfailed to set display registers\n");
+
+	return rc;
+}
+
+/**
+ * Description: Interface to program display and pfit registers.
+ *
+ * @context:		hdmi_context
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @fb_width		:allocated frame buffer dimensions
+ * @fb_height		:allocated frame buffer dimensions
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_crtc_set_scaling(void *context,
+			otm_hdmi_timing_t *mode,
+			otm_hdmi_timing_t *adjusted_mode,
+			int fb_width, int fb_height)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	pr_debug("Enter %s\n", __func__);
+
+	/* NULL checks */
+	if (context == NULL || mode == NULL || adjusted_mode == NULL) {
+		pr_debug("\ninvalid arguments\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	return __hdmi_crtc_set_scaling(ctx, mode, adjusted_mode,
+						fb_width, fb_height);
+}
+
+/**
+ * Description: crtc pll clk get function for hdmi.
+ *
+ * @context		:hdmi_context
+ * @adjusted_mode	:adjusted mode
+ * @pclk_khz:		tmds clk value for the best pll and is needed for audio.
+ *			This field has to be moved into OTM audio
+ *			interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_crtc_pll_get(void *context,
+				otm_hdmi_timing_t *adjusted_mode,
+				uint32_t *pclk_khz)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+
+	/* program hdmi mode timing registers */
+	rc = ipil_hdmi_crtc_mode_get_program_dpll(&ctx->dev,
+						adjusted_mode->dclk);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed to program dpll\n");
+		return rc;
+	}
+
+	*pclk_khz = ctx->dev.clock_khz;
+
+	return rc;
+}
+
+/**
+ * Description: crtc mode set function for hdmi.
+ *
+ * @context		:hdmi_context
+ * @mode		:mode requested
+ * @adjusted_mode	:adjusted mode
+ * @fb_width		:allocated frame buffer dimensions
+ * @fb_height		:allocated frame buffer dimensions
+ * @pclk_khz:		tmds clk value for the best pll and is needed for audio.
+ *			This field has to be moved into OTM audio
+ *			interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_crtc_mode_set(void *context, otm_hdmi_timing_t *mode,
+			otm_hdmi_timing_t *adjusted_mode, int fb_width,
+			int fb_height, uint32_t *pclk_khz)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	int scalingtype = ctx->scaling_type;
+
+	pr_debug("Enter %s\n", __func__);
+
+	/* NULL checks */
+	if (context == NULL || mode == NULL || adjusted_mode == NULL
+						|| pclk_khz == NULL) {
+		pr_debug("\ninvalid arguments\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	/* prepare for crtc mode set. This includes any hdmi unit reset etc. */
+	rc = ipil_hdmi_crtc_mode_set_prepare(&ctx->dev);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed in preparing for mode set\n");
+		return rc;
+	}
+
+	/* program display related registers: dpssize and pipesrc, pfit
+	 * according to the preferred scaling mode.
+	 */
+	rc = __hdmi_crtc_set_scaling(ctx, mode, adjusted_mode,
+					fb_width, fb_height);
+
+	/* program hdmi mode timing registers */
+	rc = ipil_hdmi_crtc_mode_set_program_timings(&ctx->dev,
+						scalingtype, mode,
+						adjusted_mode);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed to set timing registers\n");
+		return rc;
+	}
+
+	/* program hdmi mode timing registers */
+	rc = ipil_hdmi_crtc_mode_set_program_dpll(&ctx->dev,
+						adjusted_mode->dclk);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed to program dpll\n");
+		return rc;
+	}
+	*pclk_khz = ctx->dev.clock_khz;
+
+	/* program hdmi mode timing registers */
+	rc = ipil_hdmi_crtc_mode_set_program_pipeconf(&ctx->dev);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed to program pipeconf\n");
+	} else {
+		/* destroy saved HDMI data after mode set */
+		ipil_hdmi_destroy_saved_data(&ctx->dev);
+	}
+
+	pr_debug("Exit%s\n", __func__);
+
+	return rc;
+}
+
+/**
+ * encoder mode set function for hdmi
+ * @context:		hdmi_context
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ * encoder mode set function for hdmi. enables phy.
+ * set correct polarity for the current mode.
+ */
+otm_hdmi_ret_t otm_hdmi_enc_mode_set(void *context, otm_hdmi_timing_t *mode,
+			otm_hdmi_timing_t *adjusted_mode)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	bool is_monitor_hdmi;
+
+	/* NULL checks */
+	if (context == NULL || mode == NULL || adjusted_mode == NULL) {
+		pr_debug("\ninvalid arguments\n");
+		return OTM_HDMI_ERR_INVAL;
+	}
+
+	is_monitor_hdmi = otm_hdmi_is_monitor_hdmi(ctx);
+	pr_debug("\nMonitor Mode: %s\n", is_monitor_hdmi ? "HDMI" : "DVI");
+
+	/* handle encoder mode set */
+	rc = ipil_hdmi_enc_mode_set(&ctx->dev, mode, adjusted_mode,
+					is_monitor_hdmi);
+	if (rc != OTM_HDMI_SUCCESS) {
+		pr_debug("\nfailed in programing enc mode set\n");
+		return rc;
+	}
+
+	/* Enable AVI infoframes for HDMI mode */
+	if (is_monitor_hdmi) {
+		rc = otm_hdmi_infoframes_set_avi(context, mode);
+		if (rc != OTM_HDMI_SUCCESS)
+			pr_debug("\nfailed to program avi infoframe\n");
+	} else {/* Disable all inofoframes for DVI mode */
+		rc = otm_hdmi_disable_all_infoframes(context);
+		if (rc != OTM_HDMI_SUCCESS)
+			pr_debug("\nfailed to disable all infoframes\n");
+	}
+
+	return rc;
+}
+
+/**
+ * Restore HDMI registers and enable the display
+ * @context	:hdmi_context
+ * @connected	:hdmi connected or not
+ *
+ * Returns:	none
+ * Restore HDMI registers and enable the display
+ */
+void otm_hdmi_restore_and_enable_display(void *context, bool connected)
+{
+	pr_debug("Entered %s\n", __func__);
+	if (NULL != context) {
+		if (connected) {
+			ipil_hdmi_restore_and_enable_display(
+					&((hdmi_context_t *)context)->dev);
+			/* restore data island packets */
+			if (otm_hdmi_is_monitor_hdmi(context)) {
+				ipil_hdmi_restore_data_island(
+					&((hdmi_context_t *)context)->dev);
+			}
+		} else {
+			ipil_hdmi_destroy_saved_data(
+					&((hdmi_context_t *)context)->dev);
+		}
+#ifdef OTM_HDMI_HDCP_ENABLE
+		/* inform HDCP about resume */
+		if (otm_hdmi_hdcp_set_power_save(context, false)
+						== false)
+			pr_debug("failed to resume hdcp\n");
+#endif
+	}
+}
+
+/**
+ * save HDMI display registers
+ * @context	:hdmi_context
+ * @connected	:hdmi connected or not
+ *
+ * Returns:	none
+ * save HDMI display registers
+ */
+void otm_hdmi_save_display_registers(void *context, bool connected)
+{
+	pr_debug("Entered %s\n", __func__);
+	if (NULL != context) {
+		if (connected) {
+			ipil_hdmi_save_display_registers(
+					&((hdmi_context_t *)context)->dev);
+			/* save data island packets */
+			if (otm_hdmi_is_monitor_hdmi(context)) {
+				ipil_hdmi_save_data_island(
+					&((hdmi_context_t *)context)->dev);
+			}
+		} else {
+			ipil_hdmi_destroy_saved_data(
+					&((hdmi_context_t *)context)->dev);
+		}
+	}
+}
+
+/**
+ * get vic from HDMI display registers
+ * @context	:hdmi_context
+ *
+ * Returns:	vic
+ */
+uint8_t otm_hdmi_get_vic(void *context)
+{
+	pr_debug("Entered %s\n", __func__);
+	if (NULL != context)
+		return ipil_hdmi_get_vic_from_data_island(&((hdmi_context_t *)context)->dev);
+	return 0;
+}
+
+
+/**
+ * notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void otm_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable)
+{
+	ps_hdmi_update_security_hdmi_hdcp_status(hdcp, cable);
+}
+
+/**
+ * disable HDMI display
+ * @context:	hdmi_context
+ *
+ * Returns:	none
+ * disable HDMI display
+ */
+void otm_disable_hdmi(void *context)
+{
+	pr_debug("Entered %s\n", __func__);
+	if (NULL != context) {
+#ifdef OTM_HDMI_HDCP_ENABLE
+		/* inform HDCP about suspend */
+		if (otm_hdmi_hdcp_set_power_save(context, true)
+						== false)
+			pr_debug("failed to suspend hdcp\n");
+#endif
+		/* disable HDMI */
+		ipil_disable_hdmi(&((hdmi_context_t *)context)->dev);
+	}
+}
+
+/*
+ *
+ * Internal scripts wrapper functions.
+ *
+ */
+
+/* Starting this off, but all scripts/unit test helpers should move
+ * to another file.
+ */
+
+#ifdef OTM_HDMI_UNIT_TEST
+
+/**
+ * test_otm_hdmi_report_edid() - Report current EDID information
+ *
+ * This routine simply dumps the EDID information
+ *
+ * Returns - nothing
+ */
+void test_otm_hdmi_report_edid(void)
+{
+	edid_info_t *edid = NULL;
+	if (NULL == g_context) {
+		LOG_PRINT(LOG_LEVEL_HIGH,
+			     "Cant print EDID, Initialize otm_hdmi first!\n");
+		return;
+	}
+	edid = &g_context->edid_int;
+	if (NULL == edid) {
+		LOG_PRINT(LOG_LEVEL_HIGH,
+			     "EDID not initialized in driver.\n");
+		return;
+	}
+	__hdmi_report_edid(g_context, edid);
+}
+EXPORT_SYMBOL_GPL(test_otm_hdmi_report_edid);
+#endif
+
+#ifdef OTM_HDMI_UNIT_TEST
+
+/**
+ * get_hdmi_context() - Getting hdmi_context
+ *
+ * This routine gives a handle to hdmi_context
+ * to be used with other function calls like
+ * set_attribute which requires hdmi_context
+ * as one of the params
+ *
+ * Returns - hdmi_context
+ */
+void *otm_hdmi_get_context(void)
+{
+	return (void *)g_context;
+}
+EXPORT_SYMBOL_GPL(otm_hdmi_get_context);
+#endif
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/include/hdcp_api.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/hdcp_api.h
new file mode 100644
index 0000000..63f0c4e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/hdcp_api.h
@@ -0,0 +1,170 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef HDCP_API_H
+#define HDCP_API_H
+
+#include <linux/types.h>
+#include "hdmi_internal.h"
+
+/**
+ * Description: function to update HPD status
+ *
+ * @hdmi_context handle hdmi_context
+ * @hpd	 HPD high/low status
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_set_hpd_state(hdmi_context_t *hdmi_context,
+					bool hpd);
+
+/**
+ * Description: function to update power save (suspend/resume) status
+ *
+ * @hdmi_context handle hdmi_context
+ * @suspend	suspend/resume status
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_set_power_save(hdmi_context_t *hdmi_context,
+					bool suspend);
+
+/**
+ * Description: function to update display_power_on status
+ *
+ * @hdmi_context handle hdmi_context
+ * @display_power_on	display power on/off status
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_set_dpms(hdmi_context_t *hdmi_context,
+					bool display_power_on);
+
+/**
+ * Description: Function to check HDCP encryption status
+ *
+ * @hdmi_context handle hdmi_context
+ *
+ * Returns:	true if encrypting
+ *		else false
+ */
+bool otm_hdmi_hdcp_enc_status(hdmi_context_t *hdmi_context);
+
+/**
+ * Description: Function to check HDCP Phase3 Link status
+ *
+ * Returns:	true if link is verified Ri Matches
+ *		else false
+ */
+bool otm_hdmi_hdcp_link_status(hdmi_context_t *hdmi_context);
+
+/**
+ * Description: Function to read BKSV and validate
+ *
+ * @hdmi_context handle hdmi_context
+ * @bksv	 buffer to store bksv
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_read_validate_bksv(hdmi_context_t *hdmi_context,
+					uint8_t *bksv);
+
+/**
+ * Description: function to enable HDCP
+ *
+ * @hdmi_context handle hdmi_context
+ * @refresh_rate vertical refresh rate of the video mode
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_enable(hdmi_context_t *hdmi_context,
+					int refresh_rate);
+
+/**
+ * Description: function to disable HDCP
+ *
+ * @hdmi_context handle hdmi_context
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_disable(hdmi_context_t *hdmi_context);
+
+/**
+ * Description: hdcp init function
+ *
+ * @hdmi_context handle hdmi_context
+ * @ddc_rd_wr:  pointer to ddc read write function
+ *
+ * Returns:	true on success
+ *		false on failure
+ */
+bool otm_hdmi_hdcp_init(hdmi_context_t *hdmi_context,
+	int (*ddc_rd_wr)(bool, uint8_t, uint8_t, uint8_t *, int));
+
+#endif /* HDCP_API_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi.h
new file mode 100644
index 0000000..bbd2309
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi.h
@@ -0,0 +1,563 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _OTM_HDMI_H
+#define _OTM_HDMI_H
+
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <drm/drm_edid.h>
+
+#include "otm_hdmi_types.h"
+#include "otm_hdmi_defs.h"
+#include "otm_hdmi_eld.h"
+#include <drm/drmP.h>
+
+/**
+ * Attribute name getting routine
+ */
+char *__pd_attr_get_name(otm_hdmi_attribute_id_t id);
+
+/**
+ * Type definition based on the function signature above
+ */
+typedef otm_hdmi_ret_t(*pd_attr_set_t)(otm_hdmi_attribute_t *table,
+				otm_hdmi_attribute_id_t id,
+				void *value,
+				otm_hdmi_attribute_flag_t flags);
+
+/**
+ * Attribute name getting routine
+ */
+char *__pd_attr_get_name(otm_hdmi_attribute_id_t id);
+
+/**
+ * Getting given attribute
+ * @context     : opaque hdmi context
+ * @id          : attribute id
+ * @attribute   : user provided buffer for attribute details
+ * @log         : a hint wether port driver should log the call
+ *
+ * Returns otm_hdmi_ret_t check. Getting given attribute values
+ */
+otm_hdmi_ret_t otm_hdmi_get_attribute(void *context,
+						otm_hdmi_attribute_id_t id,
+						otm_hdmi_attribute_t *attribute,
+						bool log);
+
+/**
+ * Type definition based on the function signature above
+ */
+typedef char *(*pd_attr_get_name_t)(otm_hdmi_attribute_id_t id);
+
+/**
+ * Attribute declaration / setting macros
+ */
+#define PD_DECLARE_ATTRIBUTE_BOOL(f_d, t, id, flags, \
+	f_n, value) \
+	f_d(t, id, OTM_HDMI_ATTR_TYPE_BOOLEAN, flags, \
+	f_n(id), (void *) value, 0, 0)
+
+#define PD_DECLARE_ATTRIBUTE_BOOL_CUSTOM(f_d, t, id, flags, \
+	name, value) \
+	f_d(t, id, OTM_HDMI_ATTR_TYPE_BOOLEAN, flags, \
+	name, (void *) value, 0, 0)
+
+#define PD_DECLARE_ATTRIBUTE_UINT(f_d, t, id, flags, \
+	f_n, value, min, max) \
+	f_d(t, id, OTM_HDMI_ATTR_TYPE_UINT, flags, \
+	f_n(id), (void *) value, min, max)
+
+#define PD_DECLARE_ATTRIBUTE_UINT_CUSTOM(f_d, t, id, flags, \
+	name, value, min, max) \
+	f_d(t, id, OTM_HDMI_ATTR_TYPE_UINT, flags, \
+	name, (void *) value, min, max)
+
+#define PD_DECLARE_ATTRIBUTE_STRING(f_d, t, id, \
+	flags, f_n, value) \
+	f_d(t, id, OTM_HDMI_ATTR_TYPE_STRING, flags, \
+	f_n(id), (void *) value, 0, 0)
+
+#define PD_DECLARE_ATTRIBUTE_STRING_CUSTOM(f_d, t, id, \
+	flags, name, value) \
+	f_d(t, id, OTM_HDMI_ATTR_TYPE_STRING, flags,	\
+	name, (void *) value, 0, 0)
+
+/**
+ * Attribute values accessor macros
+ */
+#define PD_ATTR_BOOL(attr) ((attr).content._bool.value)
+#define PD_ATTR_UINT(attr) ((attr).content._uint.value)
+
+/**
+ * Shortcuts for common flags combinations
+ */
+#define PD_ATTR_FLAGS_RWSI (OTM_HDMI_ATTR_FLAG_WRITE | \
+			OTM_HDMI_ATTR_FLAG_SUPPORTED | \
+			OTM_HDMI_ATTR_FLAG_INTERNAL)
+
+#define PD_ATTR_FLAGS_RWS (OTM_HDMI_ATTR_FLAG_WRITE | \
+			OTM_HDMI_ATTR_FLAG_SUPPORTED)
+
+#define PD_ATTR_FLAGS_RS (OTM_HDMI_ATTR_FLAG_SUPPORTED)
+
+/**
+ * Display timing information
+ */
+typedef struct {
+	unsigned short width;		/* width                            */
+	unsigned short height;		/* height                           */
+	otm_hdmi_refresh_t refresh;	/* refresh rate                     */
+	unsigned long dclk;		/* refresh rate dot clock in kHz    */
+	unsigned short htotal;		/* horizontal total                 */
+	unsigned short hblank_start;	/* horizontal blank start           */
+	unsigned short hblank_end;	/* horizontal blank end             */
+	unsigned short hsync_start;	/* horizontal sync start            */
+	unsigned short hsync_end;	/* horizontal sync end              */
+	unsigned short vtotal;		/* vertical total                   */
+	unsigned short vblank_start;	/* vertical blank start             */
+	unsigned short vblank_end;	/* vertical blank end               */
+	unsigned short vsync_start;	/* vertical sync start              */
+	unsigned short vsync_end;	/* vertical sync end                */
+	unsigned long mode_info_flags;	/* flags                            */
+	otm_hdmi_stereo_t stereo_type;	/* stereo mode type                 */
+	unsigned long metadata;		/* port driver specific information */
+} otm_hdmi_timing_t;
+
+/* set timing from cea modes */
+int otm_hdmi_timing_from_cea_modes(unsigned char *buffer,
+				   otm_hdmi_timing_t *pdts);
+
+/* get timings of a mode */
+const otm_hdmi_timing_t *otm_hdmi_get_mode_timings(void *context,
+					int hdisplay,
+					int vdisplay,
+					int vrefresh);
+
+/**
+ * otm_hdmi_hpd_init - Initialize and enable HPD driver service.
+ *
+ * No input arguments
+ *
+ * Returns - OTM_HDMI_SUCCESS on successful initialization
+ * Returns - OTM_HDMI_ERR_FAILED on init failure
+ */
+otm_hdmi_ret_t otm_hdmi_hpd_init(void);
+
+/**
+ * otm_hdmi_hpd_deinit - Deinit HPD driver service.
+ *
+ * No input arguments
+ *
+ * Returns - OTM_HDMI_SUCCESS
+ */
+otm_hdmi_ret_t otm_hdmi_hpd_deinit(void);
+
+
+/**
+ * otm_hdmi_get_hpd_pin - get hdmi hpd pin number.
+ *
+ * No input arguments
+ *
+ * Returns - pin number
+ */
+unsigned int otm_hdmi_get_hpd_pin(void);
+
+
+/**
+ * otm_hdmi_override_cable_status - override hdmi hpd cable status.
+ *
+ * Input: override state and auto test state
+ */
+void otm_hdmi_override_cable_status(bool state, bool auto_state);
+
+
+
+
+
+
+/**
+ * otm_hdmi_hpd_callback_register - Register a callback for HPD events
+ * @context: hdmi device context
+ * @phdmi_irq_cb: function pointer for hotplug/unplug IRQ callbacks.
+ * @data: data for irq callback
+ *
+ * Perform HPD IRQ call back initialization
+ *
+ * Returns - check otm_hdmi_ret_t
+ */
+otm_hdmi_ret_t otm_hdmi_hpd_callback_register(void *context,
+					      irqreturn_t (*phdmi_irq_cb) (int, void*),
+					      void *data);
+
+/* parse the raw edid and fill the capability table */
+otm_hdmi_ret_t otm_hdmi_edid_parse(void *ctx, otm_hdmi_use_edid_t use_edid);
+
+/* parse extension EDID blocks and fill the capability table */
+otm_hdmi_ret_t otm_hdmi_edid_extension_parse(void *context,
+			 struct edid *raw_edid,
+			 struct i2c_adapter *adapter);
+
+/* prepare hdmi eld packet and copy it to the input buffer */
+otm_hdmi_ret_t otm_hdmi_get_eld(void *ctx, otm_hdmi_eld_t *eld);
+
+/* init hdmi device driver */
+otm_hdmi_ret_t otm_hdmi_device_init(void **context, struct pci_dev *pdev);
+
+/*deinit hdmi device driver */
+void otm_hdmi_deinit(void *context);
+
+
+/* read edid information */
+unsigned char *otm_hdmi_read_edid(void);
+
+/* turn HDMI power rails on */
+bool otm_hdmi_power_rails_on(void);
+
+/* turn HDMI power rails off */
+bool otm_hdmi_power_rails_off(void);
+
+/* turn HDMI power islands on */
+bool otm_hdmi_power_islands_on(void);
+
+/* turn HDMI power islands off */
+void otm_hdmi_power_islands_off(void);
+
+/* enable/disable IRQ and CPD_HPD */
+bool otm_hdmi_enable_hpd(bool enable);
+
+/* control HDMI vblank interrupt */
+void otm_hdmi_vblank_control(struct drm_device *dev, bool on);
+
+/*
+ * otm_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool otm_hdmi_get_cable_status(void *context);
+
+/* get pixel clock range */
+otm_hdmi_ret_t otm_hdmi_get_pixel_clock_range(unsigned int *pc_min,
+					unsigned int *pc_max);
+
+/*
+ *  Getting given attribute
+ *  @param [in ] id      : attribute id
+ *  @param [out] data    : user provided buffer for attribute details
+ *  @param [in ] log     : a hint wether port driver should log the call
+ */
+otm_hdmi_ret_t otm_hdmi_get_attribute(void *context,
+					otm_hdmi_attribute_id_t id,
+					otm_hdmi_attribute_t *attribute,
+					bool log);
+
+/* check the mode is preferred or not */
+bool otm_hdmi_is_preferred_mode(int hdisplay,
+					int vdisplay, int refresh);
+
+/* set the raw edid into HDMI context */
+otm_hdmi_ret_t otm_hdmi_set_raw_edid(void *context, char *raw_edid);
+
+/* get the raw edid from HDMI context */
+otm_hdmi_ret_t otm_hdmi_get_raw_edid(void *context, char **raw_edid);
+
+/* set the scaling type for HDMI display */
+otm_hdmi_ret_t otm_hdmi_set_scaling_type(void *context, int scaling);
+
+/* check the connection is hdmi or dvi */
+bool otm_hdmi_is_monitor_hdmi(void *context);
+
+ /**
+ * Description: program display and pfit registers.
+ *
+ * @context:		hdmi_context
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @fb_width:		allocated frame buffer dimensions
+ * @fb_height:		allocated frame buffer dimensions
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_crtc_set_scaling(void *context,
+			otm_hdmi_timing_t *mode,
+			otm_hdmi_timing_t *adjusted_mode,
+			int fb_width, int fb_height);
+
+/*
+ * Description: crtc pll get function for hdmi.
+ *
+ * @context:		hdmi_context
+ * @adjusted_mode:	adjusted mode
+ * @pclk_khz:		tmds clk value for the best pll and is needed for audio.
+ *			This field has to be moved into OTM audio
+ *			interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_crtc_pll_get(void *context,
+                                otm_hdmi_timing_t *adjusted_mode,
+                                uint32_t *pclock_khz);
+
+/*
+ * Description: crtc mode set function for hdmi.
+ *
+ * @context:		hdmi_context
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ * @fb_width, fb_height:allocated frame buffer dimensions
+ * @pclk_khz:		tmds clk value for the best pll and is needed for audio.
+ *			This field has to be moved into OTM audio
+ *			interfaces when implemented
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_crtc_mode_set(void *context, otm_hdmi_timing_t *mode,
+			otm_hdmi_timing_t *adjusted_mode, int fb_width,
+			int fb_height, uint32_t *pclock_khz);
+
+/*
+ * Description: encoder mode set function for hdmi. enables phy.
+ *		set correct polarity for the current mode.
+ *
+ * @context:		hdmi_context
+ * @mode:		mode requested
+ * @adjusted_mode:	adjusted mode
+ *
+ * Returns:	OTM_HDMI_SUCCESS on success
+ *		OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_enc_mode_set(void *context, otm_hdmi_timing_t *mode,
+			otm_hdmi_timing_t *adjusted_mode);
+
+/*
+ * Description: set avi infoframe based on mode
+ *
+ * @context:            hdmi_context
+ * @mode:               mode requested
+ *
+ * Returns:     OTM_HDMI_SUCCESS on success
+ *              OTM_HDMI_ERR_INVAL on NULL input arguments
+ */
+otm_hdmi_ret_t otm_hdmi_infoframes_set_avi(void *context,
+					   otm_hdmi_timing_t *mode);
+
+/*
+ * Description:	disable all infoframes
+ *
+ * @context:	hdmi_context
+ *
+ * Returns:	OTM_HDMI_ERR_NULL_ARG on NULL parameters
+ *		OTM_HDMI_SUCCESS on success
+ */
+otm_hdmi_ret_t otm_hdmi_disable_all_infoframes(void *context);
+
+/*
+ * Description: save hdmi display registers
+ *
+ * @context:	hdmi_context
+ *
+ * Returns:	none
+ */
+void otm_hdmi_save_display_registers(void *context, bool connected);
+
+/**
+ * get vic from HDMI display registers
+ * @context	:hdmi_context
+ *
+ * Returns:	vic
+ */
+uint8_t otm_hdmi_get_vic(void *context);
+
+
+/*
+ * Description: disable HDMI display
+ *
+ * @context:	hdmi_context
+ *
+ * Returns:	none
+ */
+void otm_disable_hdmi(void *context);
+
+/*
+ * Description: restore hdmi display registers and enable the display
+ *
+ * @context:	hdmi_context
+ *
+ * Returns:	none
+ */
+void otm_hdmi_restore_and_enable_display(void *context, bool connected);
+
+/**
+ * Description: notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void otm_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable);
+
+
+/* TODO: Need refactor the logging and attr table */
+
+/*
+ * Logging macros
+ */
+
+/*
+ * Table of attributes
+ */
+extern otm_hdmi_attribute_t otm_hdmi_attributes_table[];
+
+#define ATTR otm_hdmi_attributes_table
+
+typedef enum {
+	LOG_LEVEL_ERROR = 0,	/* Error messages; Will always be printed */
+	__LOG_LEVEL_MIN = LOG_LEVEL_ERROR,
+	/* Add log levels below this line */
+	LOG_LEVEL_HIGH = 1,	/* Printed if 'debug' is set to 1 or higher */
+	LOG_LEVEL_LOW,	/* Printed if 'debug' is set to 2 or higher */
+	LOG_LEVEL_VBLANK,	/* Printed if 'debug' at highest level */
+	LOG_LEVEL_DETAIL, /* Print detailed info */
+	/* Add log levels above this line */
+	__LOG_LEVEL_TEMP_UPPER__,	/* DO NOT USE */
+	__LOG_LEVEL_MAX = __LOG_LEVEL_TEMP_UPPER__ - 1,
+} log_level_t;
+
+/* Used to log entry in to a function */
+#define LOG_ENTRY(log_level) \
+	if ((log_level) <= (int) PD_ATTR_UINT(ATTR[OTM_HDMI_ATTR_ID_DEBUG])) \
+		printk("OTM_HDMI: Entering %s\n", __func__)
+
+/* Used to log exit from a function */
+#define LOG_EXIT(log_level, rc) \
+	if ((log_level) <= (int) PD_ATTR_UINT(ATTR[OTM_HDMI_ATTR_ID_DEBUG])) \
+		printk("OTM_HDMI: Exiting %s with %d\n", __func__, rc)
+
+#define LOG_PRINT(log_level, args...) \
+	if ((log_level) <= (int) PD_ATTR_UINT(ATTR[OTM_HDMI_ATTR_ID_DEBUG])) \
+		printk("OTM_HDMI:" args)
+
+#define LOG_ERROR(msg, args...) LOG_PRINT(LOG_LEVEL_ERROR, msg, ##args)
+
+/*
+ * Bits in 'mode_info_flags' field
+ */
+#define PD_SCAN_INTERLACE     0x80000000 /* Interlaced pipe configuration     */
+#define PD_PIXEL_REPLICATION  0x40000000 /* Mode uses pixel replication       */
+#define PD_HSYNC_HIGH         0x20000000 /* HSYNC is active high              */
+#define PD_VSYNC_HIGH         0x10000000 /* VSYNC is active high              */
+#define PD_CLOCK_OVERSAMPLE   0x00020000 /* Indicates dot clock is a multiple */
+					 /* of the MDVO clock; See VDC_SetMode*/
+#define PD_EVO_DITHER         0x00008000 /* Enable EVO dithering              */
+#define PD_EVO_FORMAT_8       0x00004000 /* Set EVO 8 bit mode                */
+#define PD_EVO_CLK_INV        0x00002000 /* Invert EVO pixel clock            */
+#define PD_WSS_WORKAROUND     0x00000800 /* PAL Wide Screen Signal workaround */
+#define PD_DTV_MODE           0x00000400 /* Digital Panel mode                */
+#define PD_SVBI               0x00000010 /* Software VBI supported timings    */
+#define PD_AR_16_BY_9         OTM_HDMI_PAR_16_9 /* 16:9 aspect ratio          */
+#define OTM_HDMI_COLOR_RANGE_MASK	0x0c
+
+#define AVI_VIC_LOC 4
+
+/*
+ * Description: report edid tool helper function
+ *
+ * Returns:	none
+ */
+#ifdef OTM_HDMI_UNIT_TEST
+void test_otm_hdmi_report_edid(void);
+#endif
+
+/*
+ * Description: function to get hdmi_context
+ *		handle
+ *
+ * Returns:	hdmi_context
+ */
+extern void *otm_hdmi_get_context(void);
+
+typedef struct _sqword {
+	union {
+		unsigned long long quad_part;
+		struct {
+			unsigned long low_part;
+			unsigned long high_part;
+		} u;
+		struct {
+			uint8_t byte[8];
+		};
+	};
+} sqword_t;
+
+#endif /* _OTM_HDMI_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_defs.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_defs.h
new file mode 100644
index 0000000..f29d77c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_defs.h
@@ -0,0 +1,1153 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#ifndef _OTM_HDMI_DEFS_H
+#define _OTM_HDMI_DEFS_H
+
+
+#include <linux/types.h>
+
+#include "otm_hdmi_types.h"
+
+/* Definition for debug print format */
+#ifdef pr_fmt
+#undef pr_fmt
+#define pr_fmt(fmt)	"[otm_hdmi]: " fmt
+#endif
+
+/**
+ Existing port driver IDs with room for user specified port drivers.
+ When communicating with a specific port driver the port id must be passed.
+*/
+typedef enum {
+	OTM_HDMI_MIN_SUPPORTED_DRIVERS = 0, /* Port Driver IDs start at this
+						value */
+
+	OTM_HDMI_ID_INTTVENC = OTM_HDMI_MIN_SUPPORTED_DRIVERS, /* CVBS TV
+								encoder */
+	OTM_HDMI_ID_INTTVENC_COMPONENT,	/* Component TV encoder */
+	OTM_HDMI_ID_HDMI,		/* HDMI */
+	OTM_HDMI_ID_USER_MIN,	/* Begin user defined drivers */
+	OTM_HDMI_ID_USER_MAX = 8,	/* End user defined drivers */
+
+	OTM_HDMI_MAX_SUPPORTED_DRIVERS	/* Maximum number of port drivers. */
+} otm_hdmi_id_t;
+
+/**
+Defined port driver attributes.  Which ones are supported (and exactly how)
+can vary from one port driver to another.  See the Intel� CE Media Processors
+GDL 3.0 Programming Guide for details on the attributes supported by
+each port driver.
+
+The following Attributes require "elevated" privileges:
+OTM_HDMI_ATTR_ID_HDCP
+OTM_HDMI_ATTR_ID_HDCP_AUTO_MUTE
+OTM_HDMI_ATTR_ID_POWER
+OTM_HDMI_ATTR_ID_MUTE
+OTM_HDMI_ATTR_ID_HDCP_AUTO_PHY
+OTM_HDMI_ATTR_ID_HDCP_AUTO_RETRY
+OTM_HDMI_ATTR_ID_HDCP_RI_RETRY
+OTM_HDMI_ATTR_ID_HDCP_1P1
+OTM_HDMI_ATTR_ID_HDCP_ENCRYPT
+*/
+
+/* *NOTE* Extend pd_lib.c::__pd_attr_get_name() when adding new entries */
+typedef enum {
+	OTM_HDMI_MIN_SUPPORTED_ATTRIBUTES = 0,	/* Attribute ID enum's start
+						at this value */
+
+	OTM_HDMI_ATTR_ID_HDCP = OTM_HDMI_MIN_SUPPORTED_ATTRIBUTES, /* HDCP
+								control */
+	OTM_HDMI_ATTR_ID_HDCP_AUTO_MUTE, /* HDCP auto mute */
+	OTM_HDMI_ATTR_ID_HDCP_STATUS, /*HDCP status */
+	OTM_HDMI_ATTR_ID_HDCP_1P1, /* HDCP 1.1 Pj check control */
+	OTM_HDMI_ATTR_ID_COLOR_SPACE_INPUT, /*Input colorspace */
+	OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT, /* Output colorspace */
+	OTM_HDMI_ATTR_ID_PIXEL_DEPTH, /* Depth of outgoing pixels */
+	OTM_HDMI_ATTR_ID_BG_COLOR, /* Color for HDCP failure & video mute */
+	OTM_HDMI_ATTR_ID_CABLE_STATUS, /* Cable status */
+	OTM_HDMI_ATTR_ID_PAR, /* Picture aspect ratio */
+	OTM_HDMI_ATTR_ID_FAR, /* Format aspect ratio */
+	OTM_HDMI_ATTR_ID_USE_EDID, /* TV timings source */
+	OTM_HDMI_ATTR_ID_SLOW_DDC, /* DDC bus speed */
+	OTM_HDMI_ATTR_ID_AUDIO_CLOCK, /* Audio clock */
+	OTM_HDMI_ATTR_ID_COLOR_SPACE_EXT, /* Extended colorimetry */
+	OTM_HDMI_ATTR_ID_OUTPUT_CLAMP, /* Clamp output in [16,235] when it is */
+	/* RGB color space. In YCbCr output */
+	/* this attribute is ignored. */
+	OTM_HDMI_ATTR_ID_BRIGHTNESS, /* Brightness Level */
+	OTM_HDMI_ATTR_ID_CONTRAST, /* Contrast Level */
+	OTM_HDMI_ATTR_ID_HUE, /* Hue Angle */
+	OTM_HDMI_ATTR_ID_SATURATION, /* Saturation Level */
+	OTM_HDMI_ATTR_ID_ACP, /* Analog Content Protection */
+	OTM_HDMI_ATTR_ID_CC, /* Closed Captioning */
+	OTM_HDMI_ATTR_ID_OUTPUT_DITHER,	/* Dither 12-bit->10/8bit conversion */
+	OTM_HDMI_ATTR_ID_SHARPNESS_HLUMA, /* Horizontal Luma filter */
+	OTM_HDMI_ATTR_ID_SHARPNESS_HCHROMA, /* Horizontal Chroma Filter */
+	OTM_HDMI_ATTR_ID_BLANK_LEVEL, /* Sync pulse level */
+	OTM_HDMI_ATTR_ID_BLACK_LEVEL, /* Black Level */
+	OTM_HDMI_ATTR_ID_BURST_LEVEL, /* Burst Level */
+	OTM_HDMI_ATTR_ID_HDCP_DELAY,	/* Delay of HDCP start, waiting for TMDS to be available */
+	OTM_HDMI_ATTR_ID_HDCP_RI_RETRY,	/* RI retry delay */
+	OTM_HDMI_ATTR_ID_DVI,	/* DVI enforcement */
+	OTM_HDMI_ATTR_ID_TVOUT_TYPE, /* Current DAC configuration */
+	OTM_HDMI_ATTR_ID_HDCP_ENCRYPT, /* HDCP encryption control */
+	OTM_HDMI_ATTR_ID_3CH_SYNC, /* 3 Channel sync */
+	OTM_HDMI_ATTR_ID_SD_OPTION, /* Alternate SD mode (e.g.: PAL-M, */
+	/* PAL-N, etc.) */
+	OTM_HDMI_ATTR_ID_RGB,	/* RGB / YPbPr output selection */
+	OTM_HDMI_ATTR_ID_CGMS_MODE, /* Current Copy Generation mode */
+	OTM_HDMI_ATTR_ID_NO_SYNC, /* Sync removal from green (y) signal */
+	OTM_HDMI_ATTR_ID_YC_DELAY, /* Luma vs Chroma delay */
+	OTM_HDMI_ATTR_ID_POWER,	/* Disable DAC output */
+	OTM_HDMI_ATTR_ID_NAME,	/* Driver name */
+	OTM_HDMI_ATTR_ID_VERSION_MAJOR,	/* Driver major version */
+	OTM_HDMI_ATTR_ID_VERSION_MINOR,	/* Driver minor version */
+	OTM_HDMI_ATTR_ID_DEBUG,	/* Debug log */
+	OTM_HDMI_ATTR_ID_BUILD_DATE, /* Driver Build date */
+	OTM_HDMI_ATTR_ID_BUILD_TIME, /* Driver Build time */
+	OTM_HDMI_ATTR_ID_DISPLAY_PIPE, /* Display Pipeline assigned */
+	OTM_HDMI_ATTR_ID_SVIDEO, /* Assignment of component Pb & Pr */
+	/* DACs for S-Video output */
+	OTM_HDMI_ATTR_ID_AUDIO_STATUS, /* Status of audio playback */
+	OTM_HDMI_ATTR_ID_TMDS_DELAY, /* HPD to TMDS assert delay */
+	OTM_HDMI_ATTR_ID_MUTE, /* Mute */
+	OTM_HDMI_ATTR_ID_PURE_VIDEO, /* Pure video indication control */
+	OTM_HDMI_ATTR_ID_HDCP_AUTO_PHY, /* PHY blinking on HDCP Ri/Pj failure */
+	OTM_HDMI_ATTR_ID_CALIBRATION, /* Calibration value */
+	OTM_HDMI_ATTR_ID_HDCP_AUTO_RETRY, /* Automatic HDCP retry control */
+	OTM_HDMI_ATTR_ID_INTERNAL_TEST,	/* CVBS/Component internal test */
+
+	/* EXTENDED */
+	OTM_HDMI_ATTR_ID_USER_MIN, /* Start of user defined attributes */
+	OTM_HDMI_ATTR_ID_USER_MAX = 100, /* Max user defined attribute */
+	OTM_HDMI_MAX_SUPPORTED_ATTRIBUTES /* End of attribute IDs;
+						must be last */
+} otm_hdmi_attribute_id_t;
+
+
+/*
+ * the following def should be moved to otm_htmi_internel.h later
+ */
+#define PD_NAME "pd_hdmi"
+
+/** @ingroup disp_mode
+ * Defines supported color spaces
+ *
+ */
+typedef enum {
+	/* Normally used for Standard Definition YCbCr content */
+	OTM_HDMI_COLOR_SPACE_BT601 = 0,
+	/* Normally used for High Definition YCbCr content */
+	OTM_HDMI_COLOR_SPACE_BT709,
+	/* Used for all RGB pixel formats */
+	OTM_HDMI_COLOR_SPACE_RGB,
+	/* Number of entries in this enumeration */
+	OTM_HDMI_COLOR_SPACE_COUNT
+} otm_hdmi_color_space_t;
+
+/** @ingroup disp_mode
+ * Display (pipe) ids.  The Intel CE Media Processors have two displays.
+ */
+typedef enum {
+	/* [Pipe A] Main display/HDMI */
+	OTM_HDMI_DISPLAY_ID_0 = 0,
+	/* [Pipe B] Secondary display/Composite */
+	OTM_HDMI_DISPLAY_ID_1,
+	/* Undefined Pipe */
+	OTM_HDMI_DISPLAY_ID_UNDEFINED
+} otm_hdmi_display_id_t;
+
+/*
+ *
+ * Attribute usage flags.
+ */
+/* TODO: Move OTM_HDMI_ATTR_FLAG_SUPPORTED since it's internal to PD */
+typedef enum {
+	OTM_HDMI_ATTR_FLAG_WRITE = 0x1, /* Attribute can be written */
+	OTM_HDMI_ATTR_FLAG_SUPPORTED = 0x2,
+				      /* Attribute is supported on this port
+					driver. FOR INTERNAL USE ONLY. */
+	OTM_HDMI_ATTR_FLAG_INTERNAL = 0x4,
+				      /* Attribute is invisible to outside
+					world. FOR INTERNAL USE ONLY */
+} otm_hdmi_attribute_flag_t;
+
+/*
+ * Attribute flags used internally to override / extend certain behavior
+ * NOTE: Make sure values don't collide with otm_hdmi_attribute_flag_t
+ */
+#define OTM_HDMI_ATTR_FLAG_FORCED 0x8000 /* Read only is ignored */
+
+/**
+    Attribute types
+*/
+typedef enum {
+	OTM_HDMI_ATTR_TYPE_UINT, /* Attribute is of type #unsigned int. */
+	OTM_HDMI_ATTR_TYPE_BOOLEAN, /* Attribute is of type #bool. */
+	OTM_HDMI_ATTR_TYPE_STRING /* Attribute is a read-only 0-terminated
+					ASCII string. */
+} otm_hdmi_attribute_type_t;
+
+/**
+   Maximum size of PD string attributes
+*/
+#define OTM_HDMI_MAX_STRING_LENGTH 16
+
+/**
+     This structure represents port driver attribute
+*/
+typedef struct {
+	otm_hdmi_attribute_id_t id;	/* Global attribute ID. */
+	otm_hdmi_attribute_type_t type;	/* Data type of attribute. */
+	otm_hdmi_attribute_flag_t flags;/* Access permissions and
+						internal use*/
+
+	char name[OTM_HDMI_MAX_STRING_LENGTH + 1];
+
+	union	/* Attribute data dependent on attribute type */
+	{
+		struct {
+			unsigned int value_default; /* default value */
+			unsigned int value_min;	/* minimum value */
+			unsigned int value_max;	/* maximum value */
+			unsigned int value;	/* current value */
+		} _uint;
+
+		struct {
+			bool value_default; /* default value */
+			bool value; /* current value */
+		} _bool;
+
+		struct {
+			/* current value */
+			char value[OTM_HDMI_MAX_STRING_LENGTH + 1];
+		} string;
+
+	} content;
+} otm_hdmi_attribute_t;
+
+/**
+    The Internal TV Encoders can support several different TV standards when
+    they are used in Standard Definition (SD) resolutions.  The entries in
+    this enumeration are values that can be used to set the
+    OTM_HDMI_ATR_ID_SD_OPTION attribute to specify the standard to be used
+    for SD.
+*/
+typedef enum {
+	TV_STD_UNDEFINED = 0, /* Use Default per resolution */
+	TV_STD_NTSC = 0, /* Use NTSC for 720x480i mode. */
+	TV_STD_PAL = 0,	 /* Use PAL for 720x576i mode. */
+	TV_STD_NTSC_J = 1, /* Use NTSC-J (Japan) for 720x480i mode. */
+	TV_STD_PAL_M = 2, /* Use PAL-M (Brazil) for 720x480i mode. */
+	TV_STD_PAL_N = 3, /* Use PAL-N (Argentina) for 720x576i mode. */
+	TV_STD_MAX /* The number of IDs in this enumeration. */
+} otm_hdmi_sd_option_t;
+
+/*
+ * Unique IDs for [end user -> port driver] communication
+ */
+/*
+ * Command codes for the otm_hdmi_port_send() function.
+ */
+typedef enum {
+	OTM_HDMI_SEND_CC = 0,
+	/* Closed Captioning data; see #otm_hdmi_cc_data_t */
+	OTM_HDMI_SEND_PAL_WSS,
+	/* Pal Wide Screen signaling; See #otm_hdmi_wss_data_t */
+	OTM_HDMI_SEND_CGMS_A,
+	/* CGMS-A for NTSC and ATSC formats; See #otm_hdmi_cgms_data_t */
+	OTM_HDMI_SEND_CGMS_A_TYPE_B,
+	/* CGMS-A for CEA 770.2 and 770.3; see #otm_hdmi_cgms_type_b_data_t */
+	OTM_HDMI_SEND_HDMI_AUDIO_CTRL,
+	/* HDMI audio data; See #otm_hdmi_audio_ctrl_t */
+	OTM_HDMI_SEND_HDMI_HDCP_SRM,
+	/* HDCP System Renewability Message */
+	OTM_HDMI_SEND_HDMI_PACKET,
+	/* Generic HDMI packet; See #otm_hdmi_packet_info_t */
+	OTM_HDMI_SEND_HDMI_PHY,
+	/* Set HDMI PHY electrical properties */
+	OTM_HDMI_SEND_TELETEXT,
+	/* Teletext data to be re inserted into the vbi */
+	OTM_HDMI_SEND_USER_MIN,
+	/* External (non-Intel) port drivers may define command codes
+		starting with this value.
+	*/
+	OTM_HDMI_SEND_USER_MAX = 30
+	/* External (non-Intel) port drivers may define command codes up to
+		this value.
+	*/
+} otm_hdmi_otm_hdmi_send_t;
+
+/* Unique IDs for [port driver -> end user] communication */
+/**
+    Command codes for retrieving port driver extended information
+    via otm_hdmi_port_recv().
+*/
+typedef enum {
+	OTM_HDMI_RECV_HDMI_AUDIO_CTRL = 0, /* Audio control information
+					see #otm_hdmi_audio_ctrl_t       */
+	OTM_HDMI_RECV_HDMI_SINK_INFO,  /* HDMI sink information
+					see #otm_hdmi_sink_info_t        */
+	OTM_HDMI_RECV_HDMI_EDID_BLOCK, /* 128 bytes of raw EDID
+					see #otm_hdmi_edid_block_t       */
+	OTM_HDMI_RECV_HDMI_HDCP_INFO,  /* HDCP information        */
+	OTM_HDMI_RECV_HDMI_HDCP_KSVS,  /* HDCP keys selection vectors      */
+	OTM_HDMI_RECV_HDMI_PHY,	     /* PHY settings                     */
+	OTM_HDMI_RECV_HDMI_NATIVE_MODE,/* Native modes query               */
+	OTM_HDMI_RECV_USER_MIN,	     /* Begin user defined command codes */
+	OTM_HDMI_RECV_USER_MAX = 30    /* End user defined command codes   */
+} otm_hdmi_otm_hdmi_receive_t;
+
+/* Output pixel format */
+/**
+    Attribute values for the HDMI output pixel format.
+    See #OTM_HDMI_ATTR_ID_PIXEL_FORMAT_OUTPUT.
+*/
+typedef enum {
+	OTM_HDMI_OPF_RGB444 = 0,	/* RGB 4:4:4 Output */
+	OTM_HDMI_OPF_YUV422,	/* YUV 4:2:2 Output */
+	OTM_HDMI_OPF_YUV444,	/* YUV 4:4:4 Output */
+	OTM_HDMI_OPF_COUNT	/* Number of output pixel formats + 1 */
+} otm_hdmi_output_pixel_format_t;
+
+/* Output pixel depth */
+/**
+    Attribute values for the HDMI output pixel depth.
+    See #OTM_HDMI_ATTR_ID_PIXEL_DEPTH.
+*/
+typedef enum {
+	OTM_HDMI_OPD_24BIT = 0,	/* 24 bits per pixel */
+	OTM_HDMI_OPD_30BIT,	/* 30 bits per pixel */
+	OTM_HDMI_OPD_36BIT,	/* 36 bits per pixel */
+	OTM_HDMI_PIXEL_DEPTH_COUNT /* Number of supported pixel depths + 1 */
+} otm_hdmi_output_pixel_depth_t;
+
+/* Picture Aspect Ratio infoframe code */
+/**
+    Attribute values for the HDMI Picture Aspect Ratio information sent via
+    AVI infoframes. See #OTM_HDMI_ATTR_ID_PAR .
+*/
+typedef enum {
+	OTM_HDMI_PAR_NO_DATA = 0x00,	/* No aspect ratio specified */
+	OTM_HDMI_PAR_4_3 = 0x01,	/* 4:3 aspect ratio */
+	OTM_HDMI_PAR_16_9 = 0x02,	/* 16:9 aspect ratio */
+} otm_hdmi_par_t;
+
+/* Format Aspect Ratio infoframe code */
+/**
+    Attribute values for the HDMI Format Aspect Ratio information sent via
+    AVI infoframes. See #OTM_HDMI_ATTR_ID_FAR.
+*/
+typedef enum {
+	OTM_HDMI_FAR_16_9_TOP = 0x02, /* box 16:9 (top) */
+	OTM_HDMI_FAR_14_9_TOP = 0x03, /* box 14:9 (top) */
+	OTM_HDMI_FAR_G_14_9_CENTER = 0x04, /* box > 16:9 (center) */
+	OTM_HDMI_FAR_SAME_AS_PAR = 0x08, /* As encoded frame */
+	OTM_HDMI_FAR_4_3_CENTER = 0x09,	/* 4:3 center */
+	OTM_HDMI_FAR_16_9_CENTER = 0x0A, /* 16:9 center */
+	OTM_HDMI_FAR_14_9_CENTER = 0x0B, /* 14:9 center */
+	OTM_HDMI_FAR_4_3_SP_14_9 = 0x0D, /* 4:3 with s&p 14:9 center */
+	OTM_HDMI_FAR_16_9_SP_14_9 = 0x0E, /* 16:9 with s&p 14:9 center */
+	OTM_HDMI_FAR_16_9_SP_4_3 = 0x0F, /* 4:3 with s&p 4:3 center */
+} otm_hdmi_far_t;
+
+/* V B I   S E R V I C E S */
+/**
+    When inserting VBI information into the analog TV signal, this enumeration
+    is used to indicate the field into which the information should be inserted.
+*/
+typedef enum {
+	VBI_FIELD_ID_ODD = 1,	/* Odd field (field 1).   */
+	VBI_FIELD_ID_EVEN = 2,	/* Even field (field 2).  */
+	VBI_FIELD_ID_UNDEFINED = 3
+				/* This value should be passed when the
+				display is in a progressive (frame) mode.
+				*/
+} otm_hdmi_vbi_fieldid_t;
+
+/**
+
+    This enumeration is used to specify values for the #OTM_HDMI_ATTR_ID_ACP
+    attribute (the Analog Copy Protection mode).
+*/
+typedef enum {
+	ACP_MODE_OFF,		/* ACP Off */
+	ACP_MODE_PSP,		/* Pseudo Sync Pulse+No Color Stripes */
+	ACP_MODE_PSP_CS_2_LINES, /* Pseudo Sync Pulse+Color Stripes (2 lines)*/
+	ACP_MODE_PSP_CS_4_LINES, /* Pseudo Sync Pulse+Color Stripes (4 lines)*/
+	ACP_MODE_PSP_ALT, /* Pseudo Sync Pulse + No Color Stripes */
+	/* Type 1 Supplement 1 BPP configuration */
+	ACP_MODE_PSP_CS_2_LINES_ALT,	/* Pseudo Sync Pulse +
+						Color Stripes (2 lines) */
+	/* Type 2 Supplement 1 BPP configuration */
+	ACP_MODE_PSP_CS_4_LINES_ALT	/* Pseudo Sync Pulse +
+						Color Stripes (4 lines) */
+	    /* Type 3 Supplement 1 BPP configuration */
+} otm_hdmi_acp_mode_t;
+
+/**
+    This enumeration specifies values for CGMS-A copy permission states to be
+    inserted into the analog TV signal. See the #otm_hdmi_cgms_data_t
+    data structure.
+*/
+typedef enum {
+	CGMS_A_COPY_FREELY = 1,	/* Unlimited Copies can be made */
+	CGMS_A_COPY_NOMORE = 2,	/* Copy has already been made (was reserved) */
+	CGMS_A_COPY_ONCE = 3,	/* One copy can be made */
+	CGMS_A_COPY_NEVER = 4,	/* No copies can be made */
+	CGMS_A_NO_DATA = 5	/* No data. Word 1 will be 1111 */
+} otm_hdmi_cgms_copy_t;
+
+/**
+
+    This enumeration specifies values for CGMS-A aspect ratios to be inserted
+    into the analog TV signal. See the #otm_hdmi_cgms_data_t data structure.
+*/
+typedef enum {
+	CGMS_A_4_3 = 1,	 /* Normal 4:3 aspect ratio */
+	CGMS_A_4_3_LB = 2, /* 4:3 aspect ratio letterboxed */
+	CGMS_A_16_9 = 3	 /* 16:9 aspect ratio (Not available at 480i/576i) */
+} otm_hdmi_cgms_aspect_t;
+
+/**
+
+    This enumeration specifies values for CGMS-A aspect ratios to be inserted
+    into the analog TV signal. See the #otm_hdmi_cgms_data_t data structure.
+*/
+typedef enum {
+	CGMS_A_SCAN_NODATA = 0,	/* No Data */
+	CGMS_A_SCAN_OVER = 1,	/* Overscanned */
+	CGMS_A_SCAN_UNDER = 2,	/* Underscanned */
+	CGMS_A_SCAN_RESERVED = 3
+} otm_hdmi_cgms_scan_t;
+
+/**
+
+    This enumeration specifies values for CGMS-A aspect ratios to be inserted
+    into the analog TV signal. See the #otm_hdmi_cgms_data_t data structure.
+*/
+typedef enum {
+	CGMS_A_CSC_NO_DATA = 0,	/* No Data */
+	CGMS_A_CSC_BT601 = 1,	/* BT601 */
+	CGMS_A_CSC_BT709 = 2,	/* BT709 */
+	CGMS_A_CSC_RESERVED = 3	/* Reserved */
+} otm_hdmi_cgms_colorimetery;
+
+/**
+    This enumeration specifies values for Wide Screen Signalling aspect ration
+    information to be inserted into the analog TV signal. See the
+
+*/
+typedef enum {
+	/* PAL specific Modes */
+	WSS_4_3_FF = 0,		/* 4:3 Full Format */
+	WSS_14_9_LB_C = 1,	/* 14:9 Letterbox, Centered */
+	WSS_14_9_LB_T = 2,	/* 14:9 Letterbox, Top */
+	WSS_16_9_LB_C = 3,	/* 16:9 Letterbox, Centered */
+	WSS_16_9_LB_T = 4,	/* 16:9 Letterbox, Top */
+	WSS_G_16_9_LB_C = 5,	/* 16:9 Letterbox, Centered */
+	WSS_14_9_FF = 6,	/* 14:9 Full Format */
+	WSS_16_9_ANAMORPHIC = 7, /*16:9 Anamorphic */
+} otm_hdmi_wss_aspect_t;
+
+/**
+    This enumeration specifies values for Wide Screen Signalling camera mode
+    information to be inserted into the analog TV signal. See the
+
+*/
+typedef enum {
+	WSS_CAMERA_MODE = 0,	/* Camera Mode */
+	WSS_FILM_MODE = 1,	/* Film Mode */
+} otm_hdmi_wss_camera_t;
+
+/**
+    This enumeration specifies values for Wide Screen Signalling color encoding
+    information to be inserted into the analog TV signal. See the
+
+*/
+typedef enum {
+	WSS_CE_NORMAL_PAL = 10,	/* Normal PAL Colors */
+	WSS_CE_COLOR_PLUS = 11,	/* Motion Adaptive Color Plus */
+} otm_hdmi_wss_ce_t;
+
+/**
+    This enumeration specifies values to indicate  Wide Screen Signalling
+    helpers state, to be inserted into the analog TV signal. See the
+
+*/
+typedef enum {
+	WSS_HELPERS_NOT_PRESENT = 1,	/* No Helper */
+	WSS_HELPERS_PRESENT = 2,	/* Modulated helper */
+
+} otm_hdmi_wss_helpers_t;
+
+/**
+    This enumeration specifies values for Wide Screen Signalling open subtitles
+    state, to be inserted into the analog TV signal.
+    See the #otm_hdmi_wss_data_t data structure.
+*/
+typedef enum {
+	WSS_OPEN_SUBTITLES_NO = 1,	/* No open subtitles */
+	WSS_OPEN_SUBTITLES_INSIDE = 2,	/* Subtitles in active image area*/
+	WSS_OPEN_SUBTITLES_OUTSIDE = 3,	/* Subtitles out of active image area*/
+} otm_hdmi_wss_opensub_t;
+
+/**
+    This enumeration specifies values for Wide Screen Signalling surround sound
+    state, to be inserted into the analog TV signal.
+    See the #otm_hdmi_wss_data_t data structure.
+*/
+typedef enum {
+	WSS_SURROUND_NO = 1,	/* No surround sound information */
+	WSS_SURROUND_YES = 2,	/* Surround sound present */
+} otm_hdmi_wss_surround_t;
+
+/**
+    This enumeration contains the data type identifier for the WSS information
+    to pass to the TV encoder to be inserted into the analog TV signal.
+*/
+typedef enum {
+	WSS_NO_COPYRIGHT = 1,	/* No Copyright asserted or status unknown */
+	WSS_COPYRIGHT = 2,	/* Copyright Asserted */
+} otm_hdmi_wss_copyright_t;
+
+/**
+    This enumeration specifies values for Wide Screen Signalling copy
+    restriction state, to be inserted into the analog TV signal. See the
+
+*/
+typedef enum {
+	WSS_COPY_NO_REST = 1,	/* Copying not restricted */
+	WSS_COPY_RESTRICTED = 2	/* Copying Restricted */
+} otm_hdmi_wss_copy_t;
+
+/**
+
+    This data structure is used to pass closed captioning information to the
+    display driver.  The driver will pass this information to the TV encoder to
+    be inserted into the analog TV signal.
+*/
+typedef struct {
+	otm_hdmi_vbi_fieldid_t pd_vbi_field_id; /* Field ID identifier */
+	unsigned char data_length;
+				/* Number of valid closed caption data bytes
+				passed; must be an even number, with a
+				maximum value of 8.
+				*/
+	unsigned char ccdata[8];
+				/* Array containing the closed caption data
+				to be inserted.
+				*/
+} otm_hdmi_cc_data_t;
+
+/**
+    This data structure is used to pass PAL Wide Screen signaling from an
+    application to the display driver.  The driver will pass this
+    information to the TV encoder to be inserted into the PAL analog TV signal.
+
+    Teletext is not supported in silicon. Teletext in subtitle always is 0.
+
+    Standard in use:
+    ETSI EN 300 294 V1.4.1 2003-2004
+*/
+typedef struct {
+	bool enabled; /* OTM_HDMI_TRUE => Enabled */
+	otm_hdmi_wss_aspect_t aspect; /* Aspect Ratio */
+	otm_hdmi_wss_camera_t cam_mode;	/* Camera Mode */
+	otm_hdmi_wss_ce_t color_enc; /* Color Encoding */
+	otm_hdmi_wss_helpers_t helpers;	/* Helpers Present */
+	otm_hdmi_wss_opensub_t open_sub; /* Open Subtitles */
+	otm_hdmi_wss_surround_t surround; /* Surround sound */
+	otm_hdmi_wss_copyright_t copyright; /* Copyright assertion */
+	otm_hdmi_wss_copy_t copy_rest;	/* Copy Restriction */
+} otm_hdmi_wss_data_t;
+
+/**
+    This data structure is used to pass Copy Generation Management System
+    (Analog) information from the application to the display driver.  The driver
+    will pass this information to the TV encoder to be inserted into the analog
+    TV signal.
+
+    XDS CEA-608 based CGMS-A should be passed using the Closed Captioning API.
+    See #otm_hdmi_cc_data_t
+
+    Standard is use: IEC 61880 480i Line20, EIA/CEA-805 480p Line 41,
+			720p Line 24 , and 1080i Line 19
+*/
+typedef struct {
+	bool enabled; /* OTM_HDMI_TRUE => Enabled */
+	otm_hdmi_cgms_copy_t copyGen; /* CGMS-A data see #otm_hdmi_cgms_copy_t*/
+	otm_hdmi_cgms_aspect_t aspect; /* Wide Screen signaling. */
+	otm_hdmi_acp_mode_t mv;	/* APS */
+	bool analog_src; /* Analog Source Bit */
+} otm_hdmi_cgms_data_t;
+
+/**
+    This enumeration is used to specify values for the #OTM_HDMI_ATTR_ID_SVIDEO
+    attribute
+*/
+typedef enum {
+	OTM_HDMI_TVOUT_TYPE_COMPOSITE, /* Composite only */
+	OTM_HDMI_TVOUT_TYPE_SVIDEO, /* S-Video only */
+	OTM_HDMI_TVOUT_TYPE_COMPONENT, /* Reserved for internal use */
+	OTM_HDMI_TVOUT_TYPE_CVBSSV, /* Composite and S-video */
+} otm_hdmi_tvout_type_t;
+
+/**
+
+*   This data structure is used to pass Copy Generation
+*   Management System (Analog) Type B information from the
+*   application to the display driver.  The driver will pass
+*   this information to the TV encoder to be inserted into the
+*   analog TV signal.
+
+    XDS CEA-608 based CGMS-A should be passed using the Closed Captioning API.
+    See #otm_hdmi_cc_data_t
+
+    Standard is use: EIA/CEA-805 480p Line 40,
+			720p Line 23 , and 1080i Line 18, 581
+*/
+typedef struct {
+	bool enabled;	/* OTM_HDMI_TRUE => Enabled */
+	otm_hdmi_cgms_aspect_t aspect;	/* Wide Screen signaling IEC 61880. */
+	bool analog_src;	/* Analog Source Bit */
+	bool activeFormatValid;
+	bool barDataValid;
+	otm_hdmi_cgms_scan_t scanData;
+	otm_hdmi_cgms_colorimetery colorimetery;
+	unsigned char activeFormat;
+	bool RCI;
+	otm_hdmi_acp_mode_t mv;	/* APS */
+	otm_hdmi_cgms_copy_t copyGen;	/* CGMS-A data
+					see #otm_hdmi_cgms_copy_t */
+	unsigned short lineEndTop;
+	unsigned short lineStartBottom;
+	unsigned short pixelEndLeft;
+	unsigned short pixelStartRight;
+} otm_hdmi_cgms_type_b_data_t;
+
+/* Defines the size of the Teletext data packet. */
+#define TELETEXT_NUM_LINES 32
+#define TELETEXT_LINE_LENGTH 46
+
+/**
+
+*   This data structure is used as part of otm_hdmi_teletext_data_t
+*   structure to more easily fill out the PES header.
+*
+*   The packet structure is for a single Teletext line. This is
+*   a convenience structure that describes the PES data header
+*   defined in section 4.3 ETSI EN 300-472.
+*
+*   Only data_unit_id's of Teletext Data (0x02) and Teletext
+*   subtitles (0x03) will be processed by the driver. Stuffing
+*   (0xFF) will be ignored. A value of 0x00 will generate a
+*   warning.
+*
+*   Line numbers with a field id of 0 will have 313 added.
+*   Line numbers outside the VBI will generate a warning.
+*/
+typedef struct {
+	unsigned char type:8;	/* Data unit ID */
+	unsigned char length:8;	/* Length is required to be 44. */
+	unsigned char line:5;	/* VBI line */
+	unsigned char field:1;	/* VBI Field, 1 is bottom field */
+	unsigned char:2;	/* padding */
+	unsigned char framing_code:8;	/* Framing Code is required
+						to be 0xE4. */
+	unsigned char data[42];	/* Teletext data */
+} teletext_packet_t;
+
+/**
+
+*   This data structure is used to pass Teletext and Subtitle
+*   information to be reinserted into the VBI of the PAL signal
+*
+*   The packet structure contains up to two field's teletext
+*   lines with a 4-byte PES data header as defined in section
+*   4.3 ETSI EN 300-472.
+*
+*   Only data_unit_id's of Teletext Data (0x02) and Teletext
+*   subtitles (0x03) will be processed by the driver. Stuffing
+*   (0xFF) will be ignored. A value of 0x00 will generate a
+*   warning.
+*
+*   Line numbers with a field id of 0 will have 313 added.
+*   Line numbers outside the VBI will generate a warning.
+*/
+typedef struct {
+	bool enabled;	/* OTM_HDMI_TRUE => Enabled */
+	union {
+		teletext_packet_t packet[TELETEXT_NUM_LINES];
+		unsigned char
+		 raw_data[TELETEXT_NUM_LINES][TELETEXT_LINE_LENGTH];
+	};
+} otm_hdmi_teletext_data_t;
+
+/* H D M I   S P E C I F I C   D A T A   T Y P E S */
+
+/**
+   This structure defines the HDMI audio data blocks.
+*/
+typedef struct {
+	unsigned int format;
+	unsigned int max_channels;
+	unsigned int fs;
+	unsigned int ss_bitrate;
+} otm_hdmi_audio_cap_t;
+
+/**
+    A CEC Source Physical Address.
+*/
+typedef struct {
+	unsigned char a;
+	unsigned char b;
+	unsigned char c;
+	unsigned char d;
+} otm_hdmi_src_phys_addr_t;
+
+/**
+    This data structure represents additional sink details not-available through
+    port attributes
+*/
+typedef struct {
+	unsigned short manufac_id; /* Sink manufacturer ID */
+	unsigned short product_code; /* Sink product code */
+	bool hdmi; /* Sink is HDMI */
+	bool ycbcr444; /* Sink supports YCbCr444 */
+	bool ycbcr422; /* Sink supports YCbCr422 */
+	otm_hdmi_src_phys_addr_t spa; /* CEC source physical address a.b.c.d*/
+	unsigned int speaker_map; /* Speaker allocation map */
+	bool dc_30; /* Sink supports 30-bit color */
+	bool dc_36; /* Sink supports 36-bit color */
+	bool dc_y444; /* Sink supports YCbCr444 in supported DC modes*/
+	bool xvycc601; /* Sink supports xvYCC BT601 Colorimetry */
+	bool xvycc709; /* Sink supports xvYCC BT709 Colorimetry */
+	bool supports_ai; /* Sink supports aux audio information */
+	unsigned int max_tmds_clock; /* Sink MAX TMDS clock in MHz. */
+	bool latency_present; /* Sink lipsync data valid */
+	bool latency_int_present; /* Sink Interlaced lipsync
+						data valid */
+	bool hdmi_video_present; /* Sink Supports HDMI spec.
+						video modes */
+	int latency_video; /* progressive modes video latency */
+	int latency_audio; /* progressive modes audio latency */
+	int latency_video_interlaced; /* interlaced modes video latency */
+	int latency_audio_interlaced; /* interlaced modes audio latency */
+	bool enabled_3d; /* Sink supports 3D video modes */
+	bool num_modes_3d; /* DEPRECATED; */
+	unsigned char max_horz_image_size; /* Sink's screen size in cm */
+	unsigned char max_vert_image_size; /* Sink's screen size in cm */
+	unsigned char name[14];	/* Sink's name */
+	bool rgb_quant_full; /* RGB quantization mode selectable */
+	bool ycc_quant_full; /* YCC quantization mode selectable */
+} otm_hdmi_sink_info_t;
+
+/**
+    This data structure represents 128 byte EDID block
+*/
+typedef struct {
+	unsigned char index; /* Block number to read */
+	unsigned char data[128]; /* Block contents */
+} otm_hdmi_edid_block_t;
+
+/**
+    This enumeration defines the command IDs for the HDMI audio commands.
+    See #otm_hdmi_audio_ctrl_t.
+*/
+typedef enum {
+	OTM_HDMI_AUDIO_START, /* Start audio playback */
+	OTM_HDMI_AUDIO_STOP, /* Stop audio playback */
+	OTM_HDMI_AUDIO_SET_FORMAT, /* Set audio format */
+	OTM_HDMI_AUDIO_GET_CAPS, /* Retrieve descriptor of audio blocks */
+	OTM_HDMI_AUDIO_WRITE, /* For driver internal use only */
+	OTM_HDMI_AUDIO_SET_CHANNEL_STATUS, /* Set channel status info */
+	OTM_HDMI_AUDIO_GET_CHANNEL_STATUS, /* Get channel status info */
+} otm_hdmi_audio_cmd_id_t;
+
+/**
+    This enumeration defines IDs for different HDMI audio formats.
+*/
+/* IMPORTANT: DO NOT change order!!! */
+typedef enum {
+	OTM_HDMI_AUDIO_FORMAT_UNDEFINED = 0x00,
+	OTM_HDMI_AUDIO_FORMAT_PCM,
+	OTM_HDMI_AUDIO_FORMAT_AC3,
+	OTM_HDMI_AUDIO_FORMAT_MPEG1,
+	OTM_HDMI_AUDIO_FORMAT_MP3,
+	OTM_HDMI_AUDIO_FORMAT_MPEG2,
+	OTM_HDMI_AUDIO_FORMAT_AAC,
+	OTM_HDMI_AUDIO_FORMAT_DTS,
+	OTM_HDMI_AUDIO_FORMAT_ATRAC,
+	OTM_HDMI_AUDIO_FORMAT_OBA,
+	OTM_HDMI_AUDIO_FORMAT_DDP,
+	OTM_HDMI_AUDIO_FORMAT_DTSHD,
+	OTM_HDMI_AUDIO_FORMAT_MLP,
+	OTM_HDMI_AUDIO_FORMAT_DST,
+	OTM_HDMI_AUDIO_FORMAT_WMA_PRO,
+} otm_hdmi_audio_fmt_t;
+
+/**
+    This enumeration defines IDs for different HDMI audio sampling frequencies.
+*/
+typedef enum {
+	OTM_HDMI_AUDIO_FS_32_KHZ = 0x01,
+	OTM_HDMI_AUDIO_FS_44_1_KHZ = 0x02,
+	OTM_HDMI_AUDIO_FS_48_KHZ = 0x04,
+	OTM_HDMI_AUDIO_FS_88_2_KHZ = 0x08,
+	OTM_HDMI_AUDIO_FS_96_KHZ = 0x10,
+	OTM_HDMI_AUDIO_FS_176_4_KHZ = 0x20,
+	OTM_HDMI_AUDIO_FS_192_KHZ = 0x40,
+} otm_hdmi_audio_fs_t;
+
+/**
+    This enumeration defines IDs for different HDMI audio sample sizes.
+*/
+typedef enum {
+	OTM_HDMI_AUDIO_SS_UNDEFINED = 0x00,	/* Undefined value */
+	OTM_HDMI_AUDIO_SS_16 = 0x01,	/* 16 bits */
+	OTM_HDMI_AUDIO_SS_20 = 0x02,	/* 20 bits */
+	OTM_HDMI_AUDIO_SS_24 = 0x04,	/* 24 bits */
+} otm_hdmi_audio_ss_t;
+
+/**
+    Enumeration of the different audio speaker allocation options defined in the
+    CEA-861D specification.
+*/
+typedef enum {
+	OTM_HDMI_AUDIO_SPEAKER_MAP_FLFR = 0x0001,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_LFE = 0x0002,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_FC = 0x0004,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_RLRR = 0x0008,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_RC = 0x0010,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_FLCFRC = 0x0020,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_RLCRRC = 0x0040,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_FLWFRW = 0x0080,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_FLHFRH = 0x0100,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_TC = 0x0200,
+	OTM_HDMI_AUDIO_SPEAKER_MAP_FCH = 0x0400,
+} otm_hdmi_audio_speaker_map_t;
+
+/**
+    This structure represents different audio commands
+*/
+typedef struct {
+	otm_hdmi_audio_cmd_id_t cmd_id;	/* Audio command type */
+
+	union /* Audio command details */
+	{
+		struct	/* Arguments for #OTM_HDMI_AUDIO_SET_FORMAT command. */
+		{
+			otm_hdmi_audio_fmt_t fmt; /* Audio format */
+			otm_hdmi_audio_fs_t fs;	/* Sampling frequency */
+			unsigned int ch; /* Number of channels */
+			otm_hdmi_audio_ss_t ss;	/* Sample size [in bits] */
+			otm_hdmi_audio_speaker_map_t map; /* Speaker allocation
+								map */
+		} _set_config;
+
+		struct /* Arguments for #OTM_HDMI_AUDIO_GET_CAPS command. */
+		{
+			unsigned int index; /* Capability number */
+			otm_hdmi_audio_cap_t cap; /* Capability content */
+		} _get_caps;
+
+		struct /* Arguments for #OTM_HDMI_AUDIO_WRITE command */
+		{
+			unsigned int samples; /* Audio samples buffer address*/
+			unsigned int silence; /* Audio silence buffer address*/
+			unsigned int size; /* Audio data buffer size */
+			unsigned int id; /* Audio buffer ID */
+			bool sync; /* Type of write operation */
+		} _write;
+
+		struct /* Arguments for #OTM_HDMI_AUDIO_STOP command */
+		{
+			bool sync; /* Type of stop request */
+		} _stop;
+
+		struct /* Arguments for
+			#OTM_HDMI_AUDIO_SET_CHANNEL_STATUS command */
+		{
+			unsigned int lsw; /* Least significant word of
+						ch status */
+			unsigned int msw; /* Most significant word of
+						ch status */
+		} _channel_status;
+
+	} data;
+
+} otm_hdmi_audio_ctrl_t;
+
+#define HDMI_DIP_PACKET_HEADER_LEN	3
+#define HDMI_DIP_PACKET_DATA_LEN	28
+
+/**
+    This structure represents generic HDMI packet
+*/
+typedef struct {
+	uint8_t header[HDMI_DIP_PACKET_HEADER_LEN];
+	union {
+		uint8_t data[HDMI_DIP_PACKET_DATA_LEN];
+		uint32_t data32[HDMI_DIP_PACKET_DATA_LEN/4];
+	};
+} otm_hdmi_packet_t;
+
+/**
+    This structure represents HDMI packet slot number
+*/
+typedef enum {
+	OTM_HDMI_PACKET_SLOT_0,
+	OTM_HDMI_PACKET_SLOT_1,
+	OTM_HDMI_PACKET_SLOT_AVI,
+} otm_hdmi_packet_slot_t;
+
+/**
+    This structure is used to submit data via #OTM_HDMI_SEND_HDMI_PACKET service
+    provided by #otm_hdmi_port_send
+*/
+typedef struct {
+	otm_hdmi_packet_t packet;
+	otm_hdmi_packet_slot_t slot;
+} otm_hdmi_packet_info_t;
+
+/**
+    This enumeration is used to specify values for the
+    #OTM_HDMI_ATTR_ID_USE_EDID attribute
+*/
+typedef enum {
+	OTM_HDMI_USE_EDID_NONE = 0,
+	OTM_HDMI_USE_EDID_REAL = 1,
+	OTM_HDMI_USE_EDID_SAFE = 2,
+} otm_hdmi_use_edid_t;
+
+/**
+*   This enumeration represents YC Delay amounts
+*/
+typedef enum {
+	OTM_HDMI_YC_DELAY_NONE,	/* No YC delay */
+	OTM_HDMI_YC_DELAY_ADVANCE, /* Y 0.5 Pixel Advance delay */
+	OTM_HDMI_YC_DELAY_MINUS	/* Y 1.0 Pixel delay */
+} otm_hdmi_yc_delay_t;
+
+/**
+*   This enumeration represents vswing equalization values
+*/
+typedef enum {
+	OTM_HDMI_EQUALIZE_NONE,	/* Equalization disabled */
+	OTM_HDMI_EQUALIZE_10,	/* Equalization 10%, not supported on CE3100 */
+	OTM_HDMI_EQUALIZE_20,	/* Equalization 20% */
+	OTM_HDMI_EQUALIZE_30,	/* Equalization 30%, not supported on CE3100 */
+	OTM_HDMI_EQUALIZE_40,	/* Equalization 40% */
+	OTM_HDMI_EQUALIZE_50,	/* Equalization 50%, not supported on CE3100 */
+	OTM_HDMI_EQUALIZE_60,	/* Equalization 60% */
+	OTM_HDMI_EQUALIZE_70,	/* Equalization 70%, not supported on CE3100 */
+	OTM_HDMI_EQUALIZE_80,	/* Equalization 80% */
+} otm_hdmi_equalize_t;
+
+/**
+*   This enumeration represents transmit level amplitude values
+*/
+typedef enum {
+	OTM_HDMI_TRANSMIT_LEVEL_300, /* 300 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_325, /* 325 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_350, /* 350 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_375, /* 375 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_400, /* 400 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_425, /* 425 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_450, /* 450 mV */
+	OTM_HDMI_TRANSMIT_LEVEL_475, /* 475 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_500, /* 500 mV */
+	OTM_HDMI_TRANSMIT_LEVEL_525, /* 525 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_550, /* 550 mV */
+	OTM_HDMI_TRANSMIT_LEVEL_575, /* 575 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_600, /* 600 mV */
+	OTM_HDMI_TRANSMIT_LEVEL_625, /* 625 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_650, /* 650 mV, not supported on CE3100 */
+	OTM_HDMI_TRANSMIT_LEVEL_675, /* 675 mV, not supported on CE3100 */
+} otm_hdmi_transmit_level_t;
+
+/**
+*   This enumeration represents termination impedance values
+*/
+typedef enum {
+	OTM_HDMI_TERMINATION_OPEN, /* Open */
+	OTM_HDMI_TERMINATION_677, /* 677 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_398, /* 398 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_250, /* 250 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_200, /* 200 Ohm */
+	OTM_HDMI_TERMINATION_100, /* 100 Ohm */
+	OTM_HDMI_TERMINATION_88, /* 88 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_78, /* 78 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_72, /* 72 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_67, /* 67 Ohm */
+	OTM_HDMI_TERMINATION_65, /* 65 Ohm, not supported on CE3100 */
+	OTM_HDMI_TERMINATION_50, /* 50 Ohm */
+} otm_hdmi_termination_t;
+
+/**
+*   This enumeration represents current adjustment values
+*/
+typedef enum {
+	OTM_HDMI_CURRENT_0MA,	/* 0 mA */
+	OTM_HDMI_CURRENT_40MA,	/* 40 mA */
+	OTM_HDMI_CURRENT_60MA,	/* 60 mA */
+	OTM_HDMI_CURRENT_10MA,	/* 10 mA */
+	OTM_HDMI_CURRENT_250MA,	/* 250 mA */
+	OTM_HDMI_CURRENT_290MA,	/* 290 mA */
+	OTM_HDMI_CURRENT_310MA,	/* 310 mA */
+	OTM_HDMI_CURRENT_350MA,	/* 350 mA */
+} otm_hdmi_current_t;
+
+/**
+*   This enumeration represents band gap resistor values
+*/
+typedef enum {
+	OTM_HDMI_BGLVL_788, /* 0.788v not supported on CE4100 */
+	OTM_HDMI_BGLVL_818, /* 0.818v not supported on CE4100 */
+	OTM_HDMI_BGLVL_854, /* 0.854v not supported on CE4100
+				[CE3100 default] */
+	OTM_HDMI_BGLVL_891, /* 0.891v not supported on CE4100 */
+	OTM_HDMI_BGLVL_820, /* 0.82v  not supported on CE3100
+				[CE4100 default] */
+	OTM_HDMI_BGLVL_800, /* 0.80v  not supported on CE3100 */
+	OTM_HDMI_BGLVL_780, /* 0.78v  not supported on CE3100 */
+	OTM_HDMI_BGLVL_760, /* 0.76v  not supported on CE3100 */
+	OTM_HDMI_BGLVL_750, /* 0.75v  not supported on CE3100 */
+	OTM_HDMI_BGLVL_720, /* 0.72v  not supported on CE3100 */
+	OTM_HDMI_BGLVL_660, /* 0.66v  not supported on CE3100 */
+	OTM_HDMI_BGLVL_600, /* 0.60v  not supported on CE3100 */
+} otm_hdmi_bglvl_t;
+
+/**
+  This structure is used to submit data via #OTM_HDMI_SEND_HDMI_PACKET service
+  provided by #otm_hdmi_port_send and is intended for adjustments of PHY clock
+  and data lines.
+*/
+typedef struct {
+	otm_hdmi_equalize_t clock_equalization;
+				/**< Clock equalization percentage.  On CE3100,
+				*   same value is used for both clock and data.
+				*/
+	otm_hdmi_equalize_t data_equalization;
+				/**< Data equalization percentage.
+				*   IGNORED ON CE3100
+				*/
+	otm_hdmi_transmit_level_t clock_transmit_level;
+				/**< Clock transmit level in mV.  On CE3100,
+				*   same value is used for both clock and data.
+				*/
+	otm_hdmi_transmit_level_t data_transmit_level;
+				/**< Data transmit level in mV.
+				*   IGNORED ON CE3100.
+				*/
+	otm_hdmi_termination_t clock_termination;
+				/**< Clock termination in ohms. On CE3100, same
+				*   value is used for both clock and data.
+				*/
+	otm_hdmi_termination_t data_termination;
+				/**< Data termination in ohms.
+				*   IGNORED ON CE3100.
+				*/
+	otm_hdmi_current_t clock_current;
+				/**< Clock current in mA.  On CE3100, same value
+				*   is used for both clock and data.
+				*/
+	otm_hdmi_current_t data_current;
+				/**< Data current in mA.  IGNORED ON CE3100 */
+	otm_hdmi_bglvl_t bandgap_level;
+				/**< Same value used for both clock and data */
+} otm_hdmi_phy_info_t;
+
+/**
+*   This enumeration represents different HDCP states
+*/
+typedef enum {
+	OTM_HDMI_HDCP_STATUS_OFF, /* HDCP is disabled */
+	OTM_HDMI_HDCP_STATUS_IN_PROGRESS, /* HDCP is enabled but not
+						authenticated yet */
+	OTM_HDMI_HDCP_STATUS_ON, /* HDCP is enabled and is authenticated */
+} otm_hdmi_hdcp_status_t;
+
+/**
+*   This enumeration represents audio clock values with respect to which
+*   internal audio divisor value is chosen
+*/
+typedef enum {
+	OTM_HDMI_AUDIO_CLOCK_24,	/* Audio clock is running at 24MHz */
+	OTM_HDMI_AUDIO_CLOCK_36,	/* Audio clock is running at 36Mhz */
+	OTM_HDMI_AUDIO_CLOCK_16,	/* Audio clock is running at 16Mhz */
+} otm_hdmi_audio_clock_t;
+
+/**
+*   This enumeration is used to specify values for the #OTM_HDMI_ATTR_ID_MUTE
+*   attribute
+*/
+typedef enum {
+	OTM_HDMI_MUTE_OFF = 0x0,	/* Unmute */
+	OTM_HDMI_MUTE_VIDEO = 0x1,	/* Mute video only */
+	OTM_HDMI_MUTE_AUDIO = 0x2,	/* Mute audio only */
+	OTM_HDMI_MUTE_BOTH = 0x3,	/* Mute both audio and video */
+} otm_hdmi_mute_t;
+
+#endif /* _OTM_HDMI_DEFS_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_eld.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_eld.h
new file mode 100644
index 0000000..72cdef1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_eld.h
@@ -0,0 +1,227 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#ifndef _OTM_HDMI_ELD_H
+#define _OTM_HDMI_ELD_H
+
+#include <linux/types.h>
+#define OTM_HDMI_ELD_SIZE 84
+#define OTM_HDMI_MAX_SAD_COUNT 15
+
+/*
+ * CEA Short Audio Descriptor
+ */
+typedef struct {
+#pragma pack(1)
+        union {
+                uint8_t byte1;
+                struct {
+                        uint8_t max_channels:3; // Bits[0-2]
+                        uint8_t audio_format_code:4;    // Bits[3-6], see AUDIO_FORMAT_CODES
+                        uint8_t b1reserved:1;   // Bit[7] - reserved
+                };
+        };
+        union {
+                uint8_t byte2;
+                struct {
+                        uint8_t sp_rate_32kHz:1;        // Bit[0] sample rate = 32kHz
+                        uint8_t sp_rate_44kHz:1;        // Bit[1] sample rate = 44kHz
+                        uint8_t sp_rate_48kHz:1;        // Bit[2] sample rate = 48kHz
+                        uint8_t sp_rate_88kHz:1;        // Bit[3] sample rate = 88kHz
+                        uint8_t sp_rate_96kHz:1;        // Bit[4] sample rate = 96kHz
+                        uint8_t sp_rate_176kHz:1;       // Bit[5] sample rate = 176kHz
+                        uint8_t sp_rate_192kHz:1;       // Bit[6] sample rate = 192kHz
+                        uint8_t sp_rate_b2reserved:1;   // Bit[7] - reserved
+                };
+        };
+        union {
+                uint8_t byte3;  // maximum bit rate divided by 8kHz
+                // following is the format of 3rd byte for uncompressed(LPCM) audio
+                struct {
+                        uint8_t bit_rate_16bit:1;       // Bit[0]
+                        uint8_t bit_rate_20bit:1;       // Bit[1]
+                        uint8_t bit_rate_24bit:1;       // Bit[2]
+                        uint8_t bit_rate_b3reserved:5;  // Bits[3-7]
+                };
+        };
+#pragma pack()
+} otm_hdmi_sad_t;
+
+typedef union {
+	uint8_t eld_data[OTM_HDMI_ELD_SIZE];
+	#pragma pack(1)
+	struct {
+		/* Byte[0] = ELD Version Number */
+		union {
+			uint8_t   byte0;
+			struct {
+				uint8_t reserved:3; /* Reserf */
+				uint8_t eld_ver:5; /* ELD Version Number */
+						/* 00000b - reserved
+						 * 00001b - first rev, obsoleted
+						 * 00010b - version 2, supporting CEA version 861D or below
+						 * 00011b:11111b - reserved
+						 * for future
+						 */
+			};
+		};
+
+		/* Byte[1] = Vendor Version Field */
+		union {
+			uint8_t vendor_version;
+			struct {
+				uint8_t reserved1:3;
+				uint8_t veld_ver:5; /* Version number of the ELD
+						     * extension. This value is
+						     * provisioned and unique to
+						     * each vendor.
+						     */
+			};
+		};
+
+		/* Byte[2] = Baseline Lenght field */
+		uint8_t baseline_eld_length; /* Length of the Baseline structure
+					      *	divided by Four.
+					      */
+
+		/* Byte [3] = Reserved for future use */
+		uint8_t byte3;
+
+		/* Starting of the BaseLine EELD structure
+		 * Byte[4] = Monitor Name Length
+		 */
+		union {
+			uint8_t byte4;
+			struct {
+				uint8_t mnl:5;
+				uint8_t cea_edid_rev_id:3;
+			};
+		};
+
+		/* Byte[5] = Capabilities */
+		union {
+			uint8_t capabilities;
+			struct {
+				uint8_t hdcp:1; /* HDCP support */
+				uint8_t ai_support:1;   /* AI support */
+				uint8_t connection_type:2; /* Connection type
+							    * 00 - HDMI
+							    * 01 - DP
+							    * 10 -11  Reserved
+							    * for future
+							    * connection types
+							    */
+				uint8_t sadc:4; /* Indicates number of 3 bytes
+						 * Short Audio Descriptors.
+						 */
+			};
+		};
+
+		/* Byte[6] = Audio Synch Delay */
+		uint8_t audio_synch_delay; /* Amount of time reported by the
+					    * sink that the video trails audio
+					    * in milliseconds.
+					    */
+
+		/* Byte[7] = Speaker Allocation Block */
+		union {
+			uint8_t speaker_allocation_block;
+			struct {
+				uint8_t flr:1; /*Front Left and Right channels*/
+				uint8_t lfe:1; /*Low Frequency Effect channel*/
+				uint8_t fc:1;  /*Center transmission channel*/
+				uint8_t rlr:1; /*Rear Left and Right channels*/
+				uint8_t rc:1; /*Rear Center channel*/
+				uint8_t flrc:1; /*Front left and Right of Center
+						 *transmission channels
+						 */
+				uint8_t rlrc:1; /*Rear left and Right of Center
+						 *transmission channels
+						 */
+				uint8_t reserved3:1; /* Reserved */
+			};
+		};
+
+		/* Byte[8 - 15] - 8 Byte port identification value */
+		uint8_t port_id_value[8];
+
+		/* Byte[16 - 17] - 2 Byte Manufacturer ID */
+		uint8_t manufacturer_id[2];
+
+		/* Byte[18 - 19] - 2 Byte Product ID */
+		uint8_t product_id[2];
+
+		/* Byte [20-83] - 64 Bytes of BaseLine Data */
+		uint8_t mn_sand_sads[64]; /* This will include
+					   * - ASCII string of Monitor name
+					   * - List of 3 byte SADs
+					   * - Zero padding
+					   */
+
+		/* Vendor ELD Block should continue here!
+		 * No Vendor ELD block defined as of now.
+		 */
+	};
+	#pragma pack()
+} otm_hdmi_eld_t;
+
+#endif /* _OTM_HDMI_ELD_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_types.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_types.h
new file mode 100644
index 0000000..c3e0ce4
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/include/otm_hdmi_types.h
@@ -0,0 +1,475 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+	Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#ifndef _OTM_HDMI_TYPES_H
+#define _OTM_HDMI_TYPES_H
+
+/**
+ *	typedef enum otm_hdmi_ret_t - OTM HDMI module return code definition
+ *	OTM_HDMI_SUCCESS:	Function executed without errors.
+ *	OTM_HDMI_ERR_NO_MEMORY:	Could not allocate memory.
+ *	OTM_HDMI_ERR_FAILED:	This is a generic error code  means that a
+ *                              system call or call to some other software
+ *                              external to the driver.
+ *	OTM_HDMI_ERR_INTERNAL:	A condition that "should not be possible" was
+ *                              detected within the driver. This generally
+ *                              means there is nothing the application can do
+ *                              to correct the problem.
+ */
+typedef enum {
+	OTM_HDMI_SUCCESS = 0,
+	/**<
+	Function executed without errors
+	*/
+	OTM_HDMI_ERR_INVAL = 0x01,
+	/**<
+	An invalid argument was passed.
+	*/
+	OTM_HDMI_ERR_BUSY = 0x02,
+	/**<
+	An operation could not be completed because a needed resource is in use.
+	*/
+	OTM_HDMI_ERR_DISPLAY = 0x03,
+	/**<
+	An invalid display ID was passed.
+	*/
+	OTM_HDMI_ERR_SURFACE = 0x04,
+	/**<
+	An invalid surface ID, or the ID of a surface that is not
+	appropriate for the requested operation, was passed.
+	*/
+	OTM_HDMI_ERR_COMMAND = 0x05,
+	/**<
+	An internal command processing error occurred
+	*/
+	OTM_HDMI_ERR_NULL_ARG = 0x06,
+	/**<
+	A required argument was missing.  Either a NULL pointer or a count
+	of 0 was passed for a required argument.
+	*/
+	OTM_HDMI_ERR_NO_MEMORY = 0x07,
+	/**<
+	Could not allocate memory.
+	*/
+	OTM_HDMI_ERR_FAILED = 0x08,
+	/**<
+	This is a generic error code that generally means that a system
+	call or call to some other software external to the driver
+	returned a failure code.
+	*/
+	OTM_HDMI_ERR_INTERNAL = 0x09,
+	/**<
+	A condition that "should not be possible" was detected within the
+	driver.  This generally means there is nothing the application can
+	do to correct the problem.
+	*/
+	OTM_HDMI_ERR_NOT_IMPL = 0x0a,
+	/**<
+	The function is not currently implemented for the target chip.
+	*/
+	OTM_HDMI_ERR_MAPPED = 0x0b,
+	/**<
+	Operation not permitted on the mapped surface.
+	*/
+	OTM_HDMI_ERR_NO_INIT = 0x0c,
+	/**<
+	A GDL function was called without a preceding call to gdl_init().
+	*/
+	OTM_HDMI_ERR_NO_HW_SUPPORT = 0x0d,
+	/**<
+	The target chip does not support the requested function.  Examples:
+	- A graphics rendering option is not supported by the graphics core
+	  in the target chip.
+	- A plane or port driver does not support a requested attribute.
+	- An attempt was made to request the attribute list from a port
+	  driver that does not support any attributes.
+	*/
+	OTM_HDMI_ERR_INVAL_PF = 0x0e,
+	/**<
+	An unknown pixel format, or a pixel format not supported by the
+	attempted operation, was passed.
+	*/
+	OTM_HDMI_ERR_INVAL_RECT = 0x0f,
+	/**<
+	An invalid argument of type #gdl_rectangle_t was passed to the function.
+	*/
+	OTM_HDMI_ERR_ATTR_ID = 0x10,
+	/**<
+	An undefined ID was specified for a plane attribute or a port
+	driver attribute.
+	*/
+	OTM_HDMI_ERR_ATTR_NO_SUPPORT = 0x11,
+	/**<
+	An unsupported ID was specified for a plane attribute or a port
+	driver attribute.
+	*/
+	OTM_HDMI_ERR_ATTR_READONLY = 0x12,
+	/**<
+	An attempt was made to set the value of a read-only plane attribute
+	or port driver attribute.
+	*/
+	OTM_HDMI_ERR_ATTR_VALUE = 0x13,
+	/**<
+	An invalid value was specified for a plane attribute or a port
+	driver attribute.
+	*/
+	OTM_HDMI_ERR_PLANE_CONFLICT = 0x14,
+	/**<
+	An attempt was made to change the display mode to a resolution too
+	small to accommodate all of the currently enabled planes at their
+	current positions on the display. Move/shrink the affected planes first.
+	*/
+	OTM_HDMI_ERR_DISPLAY_CONFLICT = 0x15,
+	/**<
+	An attempt was made to change either display resolution or plane
+	size/origin, such that part/all of the plane will no longer be on the
+	display.
+	- If the display resolution is being reduced, change plane size/origin
+	  first.
+	- If plane size is being increased, increase the display resolution
+	  first, or reposition the plane.
+	- If plane origin is being changed, make sure you have picked an
+	  appropriate origin given the current plane size and display
+	  resolution.
+	*/
+	OTM_HDMI_ERR_TIMEOUT = 0x16,
+	/**<
+	The requested timeout period occurred before the requested
+	operation trigger occurred.
+	*/
+	OTM_HDMI_ERR_MISSING_BEGIN = 0x17,
+	 /**<
+	 An attempt was made to set a plane attribute without first calling
+	 gdl_config_begin().
+	 */
+	OTM_HDMI_ERR_PLANE_ID = 0x18,
+	/**<
+	An invalid plane ID was passed.  The ID is undefined, the plane is not
+	supported by the target chip, or the plane is not supported by the
+	called function.
+	*/
+	OTM_HDMI_ERR_INVAL_PTR = 0x19,
+	/**<
+	On Linux, a copy between user and kernel space failed.  This
+	probably indicates an invalid user space (argument) pointer.
+	*/
+
+	OTM_HDMI_ERR_INVAL_HEAP = 0x1a,
+	/**<
+	An invalid heap was passed for addition or removal. Attempt
+	to add overlaping heaps will cause this error too.
+	*/
+
+	OTM_HDMI_ERR_HEAP_IN_USE = 0x1b,
+	/**<
+	Heap removal was attempted while at least one surface was allocated
+	from that heap.
+	*/
+
+	OTM_HDMI_ERR_INVAL_CALLBACK = 0x1c,
+	/**<
+	Invalid callback (null) was passed to gdl_event_register() function
+	*/
+
+	OTM_HDMI_ERR_SCALING_POLICY = 0x1d,
+	/**<
+	A single scaling policy is required and was not specified for the
+	unsupported for the specified display ID.
+	*/
+
+	OTM_HDMI_ERR_INVAL_EVENT = 0x1e,
+	/**<
+	Invalid event was passed to functions expecting #gdl_app_event_t.
+	*/
+
+	OTM_HDMI_ERR_INVAL_IOCTL = 0x1f,
+	/**<
+	Invalid IOCTL request was sent to kernel module
+	*/
+	OTM_HDMI_ERR_SCHED_IN_ATOMIC = 0x20,
+	/**<
+	Scheduling was attempted while being in atomic context.
+	*/
+	OTM_HDMI_ERR_MMAP = 0x21,
+	/**<
+	Memory mapping failed
+	*/
+	OTM_HDMI_ERR_HDCP = 0x22,
+	/**<
+	HDCP failure
+	*/
+	OTM_HDMI_ERR_CONFIG = 0x23,
+	/**<
+	Platform config file error: either a required entry in the
+	platform configuration file is missing, or its entry is invalid.
+	*/
+	OTM_HDMI_ERR_HDMI_AUDIO_PLAYBACK = 0x24,
+	/**<
+	HDMI Audio start / stop / set buffer / set format command was
+	initiated at the wrong time.
+	*/
+	OTM_HDMI_ERR_HDMI_AUDIO_BUFFER_FULL = 0x25,
+	/**<
+	Given data does not fit in the internal buffer
+	*/
+	OTM_HDMI_ERR_PLANE_ORIGIN_ODD = 0x26,
+	/**<
+	In interlaced display modes, active planes must be configured with
+	their origins on even display lines. This error is returned when:
+	- in a progressive display mode: an attempt is made to change to an
+	  interlaced display mode while there is an active plane does not
+	  meet this requirement.
+	- in an interlaced display mode:
+	   - an attempt is made to reconfigure an active plane's origin
+	     to an odd line number, OR
+	   - an attempt is made to activate (by flipping a surface to) a
+	     plane that doesn't meet this requirement.
+	*/
+	OTM_HDMI_ERR_PLANE_HEIGHT_ODD = 0x27,
+	/**<
+	In interlaced display modes, active planes must be configured with
+	their even heights. This error is returned when:
+	- in a progressive display mode: an attempt is made to change to an
+	  interlaced display mode while there is an active plane does not
+	  meet this requirement.
+	- in an interlaced display mode:
+	   - an attempt is made to reconfigure an active plane's height
+	     to an odd value, OR
+	   - an attempt is made to activate (by flipping a surface to) a
+	     plane that doesn't meet this requirement.
+	*/
+	OTM_HDMI_ERR_HANDLE = 0x28,
+	/**<
+	Handle is not valid.
+	*/
+	OTM_HDMI_ERR_TVMODE_UNDEFINED = 0x29,
+	/**<
+	Display has undefined tv mode set on it.
+	*/
+	OTM_HDMI_ERR_PREMULT_CONFLICT = 0x2a,
+	/**<
+	An attempt was made to enable the #OTM_HDMI_PLANE_ALPHA_PREMULT
+	attribute and one of the following incompatible features at the same
+	time:
+	- Chroma keying on the same plane
+	  (#OTM_HDMI_PLANE_CHROMA_KEY_SRC_ENABLE set to #OTM_HDMI_TRUE).
+	- Gamma removal on the same plane (#OTM_HDMI_PLANE_REVERSE_GAMMA_TYPE
+	  set to a value other than #OTM_HDMI_GAMMA_LINEAR.
+	- color space conversion (the value of the plane's
+
+	  space of the display to which it is connected).
+	- a non-RGB pixel format.
+	*/
+
+	OTM_HDMI_ERR_SUSPENDED = 0x2b,
+	/**<
+	An attempt was made to execute a command while the driver was in a
+	suspended mode. During the suspended mode driver is in a low-power
+	state and no access to hardware is allowed.
+	*/
+
+	OTM_HDMI_ERR_STEREO_PLANE = 0x2c,
+	/**<
+	An attempt was made to stereo-flip to a plane unlinked to a right view
+	while a two-plane stereo display mode is in effect.
+	*/
+
+	OTM_HDMI_ERR_CE4100_3D_ORIGIN = 0x2d,
+	/**<
+	On the CE4100, the origin of a plane's destination rectangle cannot
+	exceed 922 when OTM_HDMI_STEREO_FRAME_PACKING_2 stereo frame format is
+	in use.
+	*/
+
+	OTM_HDMI_ERR_HDCP_KSV_INVALID = 0x2e,
+	/**<
+	HDCP invalid KSV
+	*/
+	OTM_HDMI_ERR_HDCP_KSV_REVOKED = 0x2f,
+	/**<
+	HDCP revoked KSV
+	*/
+	OTM_HDMI_ERR_HDCP_NO_ACK = 0x30,
+	/**<
+	HDCP I2C timeout when receiving R'
+	*/
+	OTM_HDMI_ERR_HDCP_LINK_INTEGRITY = 0x31,
+	/**<
+	HDCP R != R'
+	*/
+
+	OTM_HDMI_ERR_PERM = 0x32,
+	/**<
+	Callers permissions are insufficient to perform a requested action.
+	*/
+
+	/**********************************************************************
+	 ATTENTION!!: WHEN ADDING AN ERROR CODE MAKE SURE TO:
+	 - Search for a value marked "Currently unused" in the list above
+	   before adding a new value at the end.
+	 - Include inline (doxygen) documentation for the new error.
+	 - Add the new error to _error_string() in debug.c
+	**********************************************************************/
+} otm_hdmi_ret_t;
+
+/* ----------------------------------------------------------------------
+ *                 D I S P L A Y   M O D E
+ * ----------------------------------------------------------------------
+ */
+
+/**
+ *  Refresh rates for TV mode definitions.
+ *
+ *  Refresh rate is the number of times the display is updated per second.
+ *  This is the number of frames per second for progressive display modes;
+ *  the number of fields (half the number of frames) per second for interlaced
+ *  display modes.
+ *
+*/
+typedef enum {
+	OTM_HDMI_REFRESH_23_98, /* 23.98... (24/1.001)    */
+	OTM_HDMI_REFRESH_24,	/* 24                     */
+	OTM_HDMI_REFRESH_25,	/* 25                     */
+	OTM_HDMI_REFRESH_29_97, /* 29.97... (30/1.001)    */
+	OTM_HDMI_REFRESH_30,	/* 30 - DEPRECATED: This value is normally only
+				   used on computer systems and should be used
+				   with care, if at all. The corresponding TV
+				   rate is 30/(1.001) (see
+				   #OTM_HDMI_REFRESH_29_97). */
+	OTM_HDMI_REFRESH_50,	/* 50                     */
+	OTM_HDMI_REFRESH_59_94, /* 59.94... (60/1.001)    */
+	OTM_HDMI_REFRESH_60,	/* 60 - DEPRECATED: This value is normally only
+				   used on computer systems and should be used
+				   with care, if at all. The corresponding TV
+				   rate is 60/(1.001) (see
+				   #OTM_HDMI_REFRESH_59_94). */
+	OTM_HDMI_REFRESH_48,	/* 48 - DEPRECATED: This value is normally only
+				   used on HDMI output with special sink device
+				   and should be used with care, if at all. */
+	OTM_HDMI_REFRESH_47_96, /* 47.96... (48/1.001)   */
+	OTM_HDMI_REFRESH_NONE,  /* Indicates that mode is not set */
+	OTM_HDMI_REFRESH_USER_DEFINED
+				/* External (non-Intel) port drivers may define
+				   additional refresh rates that the support.
+				   Their IDs must be numbered starting at this
+				   value. */
+} otm_hdmi_refresh_t;
+
+/**
+ *  This enumeration is used to specify a stereo (3D) video format.  The SOCs
+ *  on which each format is supported are specified within square brackets.
+ *
+ *  Format names ending in "_2" indicate that the format requires the use of
+ *  two UPPs, one for the left view and one for the right. The
+ *
+ *  to reference the left view plane in order to link them together.
+ *
+ *  OTM_HDMI_STEREO_NONE
+ *     Indicates a mono display mode (no stereo format is in use).
+ *
+ *  OTM_HDMI_STEREO_FRAME_PACKING_2
+ *     Frame packing format implemented with 2 planes per stream.
+ *
+ *  OTM_HDMI_STEREO_FRAME_PACKING [CE4200-B and above]
+ *     Single-plane frame packing format.
+ *
+ *  OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2
+ *     Side-by-side format with the horizontal axis subsampled by half,
+ *     implemented with 2 planes per stream.
+ *     NOTE: Planes should be configured and buffers for graphics
+ *     allocated at half horizontal resolution. The TV set is responsible for
+ *     scaling the blended image horizontally by 2.
+ *
+ *  OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2
+ *     Top-and-bottom format with the vertical axis subsampled by half,
+ *     implemented with 2 planes per stream.
+ *     NOTE: Planes should be configured and buffers for graphics
+ *     allocated at half vertical resolution. The TV set is responsible for
+ *     scaling the blended image vertically by 2.
+ *
+ *  OTM_HDMI_STEREO_FRAME_SEQUENTIAL
+ *     Frame sequential format, a format used internally in some Digital TV
+ *     sets for direct output to the panel. NOTE: in order to use Frame
+ *     Sequential format, the HDMI port driver must be loaded with the dtv=1
+ *     command line argument.
+*/
+typedef enum {
+	OTM_HDMI_STEREO_NONE = 0xabcdef01,
+	OTM_HDMI_STEREO_FRAME_PACKING_2,
+	OTM_HDMI_STEREO_FRAME_PACKING,
+	OTM_HDMI_STEREO_SIDE_BY_SIDE_HALF_2,
+	OTM_HDMI_STEREO_TOP_BOTTOM_HALF_2,
+	OTM_HDMI_STEREO_FRAME_SEQUENTIAL,
+} otm_hdmi_stereo_t;
+
+/*
+ * Scaling type flags to passdown to the IP layer
+ */
+typedef enum {
+	OTM_HDMI_SCALE_NONE = 0,   /* Unmodified timing (display or
+					software can still scale) */
+	OTM_HDMI_SCALE_FULLSCREEN, /* Full screen, ignore aspect */
+	OTM_HDMI_SCALE_CENTER,     /* Centered, no scaling */
+	OTM_HDMI_SCALE_ASPECT,     /* Full screen, preserve aspect */
+} otm_hdmi_scale_t;
+
+#endif /* _OTM_HDMI_TYPES_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/ctp/ps_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/ctp/ps_hdmi.c
new file mode 100644
index 0000000..793fef9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/ctp/ps_hdmi.c
@@ -0,0 +1,574 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#include "otm_hdmi_types.h"
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+#include "ps_hdmi.h"
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel-mid.h>
+#include "psb_drv.h"
+#include "psb_powermgmt.h"
+
+/* Implementation of the Clovertrail specific PCI driver for receiving
+ * Hotplug and other device status signals.
+ * In Clovertrail platform, the HPD and OCP signals are delivered to the
+ * display sub-system using the TI TPD Companion chip.
+ */
+
+/* Constants */
+#define PS_HDMI_HPD_PCI_DRIVER_NAME "Clovertrail HDMI HPD Driver"
+
+/* Globals */
+static hdmi_context_t *g_context;
+
+
+
+#define PS_HDMI_MMIO_RESOURCE 0
+#define PS_VDC_OFFSET 0x00000000
+#define PS_VDC_SIZE 0x000080000
+#define PS_MSIC_PCI_DEVICE_ID 0x901
+
+#define PS_MSIC_HPD_GPIO_PIN 43
+#define PS_MSIC_LS_OE_GPIO_PIN 91
+
+#define PS_MSIC_HPD_GPIO_PIN_NAME "HDMI_HPD"
+#define PS_MSIC_LS_EN_GPIO_PIN_NAME "HDMI_LS_EN"
+
+#define PS_MSIC_VCC330CNT			0xd3
+#define PS_VCC330_OFF				0x24
+#define PS_VCC330_ON				0x37
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+extern int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen);
+#else
+extern int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
+                u32 *out, int outlen);
+#endif
+
+/* For CTP, it is required that SW pull up or pull down the
+ * LS_OE GPIO pin based on cable status. This is needed before
+ * performing any EDID read operation on CTP.
+ */
+static void __ps_gpio_configure_edid_read(void)
+{
+	static int old_pin_value  = -1;
+	int new_pin_value;
+	hdmi_context_t *ctx = g_context;
+
+	if (ctx == NULL) {
+		pr_err("%s failed due to internal error\n", __func__);
+		return;
+	}
+
+	new_pin_value = gpio_get_value(ctx->gpio_hpd_pin);
+	if (new_pin_value == old_pin_value)
+		return;
+
+	old_pin_value = new_pin_value;
+
+	if (new_pin_value == 0)
+		gpio_set_value(ctx->gpio_ls_en_pin, 0);
+	else
+		gpio_set_value(ctx->gpio_ls_en_pin, 1);
+
+	pr_debug("%s: CTP_HDMI_LS_OE pin = %d (%d)\n", __func__,
+		 gpio_get_value(ctx->gpio_ls_en_pin), new_pin_value);
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int result = 0;
+	unsigned int vdc_start;
+	uint32_t pci_address = 0;
+	uint8_t pci_dev_revision = 0;
+	hdmi_context_t *ctx = NULL;
+
+	if (pdev == NULL || context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	pr_debug("get resource start\n");
+	result = pci_read_config_dword(pdev, 16, &vdc_start);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	pci_address = vdc_start + PS_VDC_OFFSET;
+
+	pr_debug("map IO region\n");
+	/* Map IO region and save its length */
+	ctx->io_length = PS_VDC_SIZE;
+	ctx->io_address = ioremap_cache(pci_address, ctx->io_length);
+	if (!ctx->io_address) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	pr_debug("get PCI dev revision\n");
+	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	ctx->dev.id = pci_dev_revision;
+	/* Store this context for use by MSIC PCI driver */
+	g_context = ctx;
+
+	ctx->is_connected_overridden = true;
+
+	/* Handle CTP specific GPIO configuration */
+	ctx->gpio_hpd_pin = get_gpio_by_name(PS_MSIC_HPD_GPIO_PIN_NAME);
+	if (-1 == ctx->gpio_hpd_pin) {
+		ctx->gpio_hpd_pin = PS_MSIC_HPD_GPIO_PIN;
+		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
+				PS_MSIC_HPD_GPIO_PIN);
+	}
+
+	ctx->gpio_ls_en_pin = get_gpio_by_name(PS_MSIC_LS_EN_GPIO_PIN_NAME);
+	if (-1 == ctx->gpio_ls_en_pin) {
+		ctx->gpio_ls_en_pin = PS_MSIC_LS_OE_GPIO_PIN;
+		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
+				PS_MSIC_LS_OE_GPIO_PIN);
+	}
+
+	if (gpio_request(ctx->gpio_ls_en_pin, "CTP_HDMI_LS_OE")) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+				ctx->gpio_ls_en_pin);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	if (!gpio_is_valid(ctx->gpio_ls_en_pin)) {
+		pr_err("%s: Unable to validate gpio %d\n", __func__,
+				ctx->gpio_ls_en_pin);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	/* Set the GPIO based on cable status */
+	__ps_gpio_configure_edid_read();
+
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_deinit(void *context)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	/* unmap IO region */
+	iounmap(ctx->io_address);
+
+	/* Free GPIO resources */
+	gpio_free(ctx->gpio_ls_en_pin);
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_i2c_edid_read(void *ctx, unsigned int sp,
+				  unsigned int offset, void *buffer,
+				  unsigned int size)
+{
+	hdmi_context_t *context = (hdmi_context_t *)ctx;
+
+	char *src = context->edid_raw + sp * SEGMENT_SIZE + offset;
+	memcpy(buffer, src, size);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+bool ps_hdmi_power_rails_on(void)
+{
+	int ret = 0;
+	pr_debug("Entered %s\n", __func__);
+
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_ON);
+	if (ret) {
+		pr_debug("%s: Failed to power on VCC330.\n", __func__);
+		return false;
+	}
+
+	return true;
+}
+
+bool ps_hdmi_power_rails_off(void)
+{
+	int ret = 0;
+	pr_debug("Entered %s\n", __func__);
+
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_OFF);
+	if (ret) {
+		pr_debug("%s: Failed to power off VCC330.\n", __func__);
+		return false;
+	}
+
+	return true;
+}
+
+/* enable/disable HPD */
+bool ps_hdmi_enable_hpd(bool enable)
+{
+	return true;
+}
+
+bool ps_hdmi_power_islands_on()
+{
+	/*
+	 * If pmu_nc_set_power_state fails then accessing HW
+	 * reg would result in a crash - IERR/Fabric error.
+	 */
+	if (pmu_nc_set_power_state(OSPM_DISPLAY_B_ISLAND,
+			OSPM_ISLAND_UP, OSPM_REG_TYPE))
+		BUG();
+
+	return true;
+}
+
+void ps_hdmi_power_islands_off()
+{
+}
+
+void ps_hdmi_vblank_control(struct drm_device *dev, bool on)
+{
+	if (on)
+		psb_enable_vblank(dev, 1);
+	else
+		psb_disable_vblank(dev, 1);
+}
+
+/*
+ * ps_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool ps_hdmi_get_cable_status(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	if (ctx == NULL)
+		return false;
+
+	/* Read HDMI cable status from GPIO */
+	/* For CTP, it is required that SW pull up or pull down the
+	 * LS_OE GPIO pin based on cable status. This is needed before
+	 * performing any EDID read operation on CTP.
+	 */
+	__ps_gpio_configure_edid_read();
+
+	if (gpio_get_value(ctx->gpio_hpd_pin) == 0)
+		ctx->is_connected = false;
+	else
+		ctx->is_connected = true;
+
+	if (g_context->override_cable_state)
+		ctx->is_connected = g_context->is_connected_overridden;
+
+	return ctx->is_connected;
+}
+
+/* get HDMI hotplug pin number */
+int ps_hdmi_get_hpd_pin(void)
+{
+	if (g_context == NULL)
+		return 0;
+
+	return g_context->gpio_hpd_pin;
+}
+
+/* override the hdmi hpd cable status */
+void ps_hdmi_override_cable_status(bool state, bool auto_state)
+{
+	if (g_context == NULL)
+		return 0;
+
+	g_context->override_cable_state = auto_state;
+
+	if (state)
+		g_context->is_connected_overridden = true;
+	else
+		g_context->is_connected_overridden = false;
+	return;
+}
+
+
+/**
+ * notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void ps_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable)
+{
+#define IA_SCU_CMD      0XE8
+#define SCU_CHAABI_CMD  0X85
+#define CHAABI_MSG_SIZE 16
+
+	uint8_t  in_buf[CHAABI_MSG_SIZE];
+	uint32_t out_buf[CHAABI_MSG_SIZE/sizeof(uint32_t)];
+
+	pr_debug("hdcp: enter %s\n", __func__);
+
+	/* init
+	 * do not care about out_buf.
+	 */
+	memset(in_buf, 0, CHAABI_MSG_SIZE);
+
+	/* chaabi msg use byte 3 for command */
+	in_buf[3] = SCU_CHAABI_CMD;
+
+	/* chaabi msg use bits 1:0 of byte 4 for status */
+	if (cable)
+		in_buf[4] |= 1 << 0;
+	if (hdcp)
+		in_buf[4] |= 1 << 1;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+	/* no sub-cmd, so set "sub" argument to 0 */
+	intel_scu_ipc_command(IA_SCU_CMD, 0, in_buf, sizeof(in_buf),
+			out_buf, sizeof(out_buf)/sizeof(uint32_t));
+#else
+        intel_scu_ipc_command(IA_SCU_CMD, 0, (u32 *)in_buf, sizeof(in_buf),
+                        out_buf, sizeof(out_buf)/sizeof(uint32_t));
+#endif
+	pr_debug("hdcp: leave %s\n", __func__);
+	return;
+}
+
+
+/**
+ * hdmi interrupt handler (top half).
+ * @irq:	irq number
+ * @data:	data for the interrupt handler
+ *
+ * Returns:	IRQ_HANDLED on NULL input arguments, and if the
+ *			interrupt is not HDMI HPD interrupts.
+ *		IRQ_WAKE_THREAD if this is a HDMI HPD interrupt.
+ * hdmi interrupt handler (upper half). handles the interrupts
+ * by reading hdmi status register and waking up bottom half if needed.
+ */
+irqreturn_t ps_hdmi_irq_handler(int irq, void *data)
+{
+	if (g_context == NULL)
+		return IRQ_HANDLED;
+
+	return IRQ_WAKE_THREAD;
+}
+
+/* Power management functions */
+static int ps_hdmi_hpd_suspend(struct device *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	if (g_context)
+		disable_irq(g_context->irq_number);
+
+	ps_hdmi_power_rails_off();
+	return 0;
+}
+
+static int ps_hdmi_hpd_resume(struct device *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	if (g_context)
+		enable_irq(g_context->irq_number);
+
+	if (g_context && g_context->is_connected)
+		ps_hdmi_power_rails_on();
+	return 0;
+}
+
+/* PCI probe function */
+static int ps_hdmi_hpd_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	int result = 0;
+	hdmi_context_t *ctx = g_context;
+
+	if (pdev == NULL || ctx == NULL) {
+		pr_err("%s: called with NULL device or context\n", __func__);
+		result = -EINVAL;
+		return result;
+	}
+
+	/* Verify probe is called for the intended device */
+	if (pdev->device != PS_MSIC_PCI_DEVICE_ID) {
+		pr_err("%s: called for wrong device id = 0x%x\n", __func__,
+		       pdev->device);
+		result = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("pci_enable_device for 0x%x\n",
+					PS_MSIC_PCI_DEVICE_ID);
+	result = pci_enable_device(pdev);
+	if (result) {
+		pr_err("%s: Failed to enable MSIC PCI device = 0x%x\n",
+		       __func__, PS_MSIC_PCI_DEVICE_ID);
+		goto exit;
+	}
+
+	/* Perform the GPIO configuration */
+	result = gpio_request(ctx->gpio_hpd_pin, "ctp_hdmi_ti_hpd");
+	if (result) {
+		pr_debug("%s: Failed to request GPIO %d for kbd IRQ\n",
+			 __func__, ctx->gpio_hpd_pin);
+		goto exit2;
+	}
+
+	result = gpio_direction_input(ctx->gpio_hpd_pin);
+	if (result) {
+		pr_debug("%s: Failed to set GPIO %d as input\n",
+			 __func__, ctx->gpio_hpd_pin);
+		goto exit3;
+	}
+
+	ctx->irq_number = gpio_to_irq(ctx->gpio_hpd_pin);
+	pr_debug("%s: IRQ number assigned = %d\n", __func__, ctx->irq_number);
+
+	result = irq_set_irq_type(ctx->irq_number, IRQ_TYPE_EDGE_BOTH);
+	if (result) {
+		pr_debug("%s: Failed to set HDMI HPD IRQ type for IRQ %d\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+
+	/* This is unused on CTP platform, since we use GPIO */
+	ctx->dev.irq_io_address = 0;
+
+	result = request_threaded_irq(ctx->irq_number, ps_hdmi_irq_handler,
+				      ctx->hpd_callback, IRQF_SHARED,
+				      PS_HDMI_HPD_PCI_DRIVER_NAME,
+				      ctx->hpd_data);
+	if (result) {
+		pr_debug("%s: Register irq interrupt %d failed\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+	irq_set_irq_wake(ctx->irq_number, 1);
+
+	return result;
+
+exit3:
+	gpio_free(ctx->gpio_hpd_pin);
+exit2:
+	pci_disable_device(pdev);
+exit:
+	pci_dev_put(pdev);
+	return result;
+}
+
+/* PCI driver related structures */
+static DEFINE_PCI_DEVICE_TABLE(ps_hdmi_hpd_pci_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PS_MSIC_PCI_DEVICE_ID) },
+	{ 0 }
+};
+
+static const struct dev_pm_ops ps_hdmi_hpd_pm_ops = {
+	.suspend = ps_hdmi_hpd_suspend,
+	.resume = ps_hdmi_hpd_resume,
+};
+
+static struct pci_driver ps_hdmi_hpd_driver = {
+	.name = PS_HDMI_HPD_PCI_DRIVER_NAME,
+	.id_table = ps_hdmi_hpd_pci_id,
+	.probe = ps_hdmi_hpd_probe,
+	.driver.pm = &ps_hdmi_hpd_pm_ops,
+};
+
+/* PCI Driver registration function */
+int ps_hdmi_hpd_register_driver(void)
+{
+	pr_debug("%s: Registering PCI driver for HDMI HPD\n", __func__);
+	return pci_register_driver(&ps_hdmi_hpd_driver);
+}
+
+/* PCI Driver Cleanup function */
+int ps_hdmi_hpd_unregister_driver(void)
+{
+	pci_unregister_driver(&ps_hdmi_hpd_driver);
+	return 0;
+}
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/include/ps_hdmi.h b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/include/ps_hdmi.h
new file mode 100644
index 0000000..8dc9729
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/include/ps_hdmi.h
@@ -0,0 +1,149 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _PLATFORM_SPEC_H
+#define _PLATFORM_SPEC_H
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/intel_scu_ipc.h>
+#include "otm_hdmi_types.h"
+#include "otm_hdmi_defs.h"
+#include "edid.h"
+
+otm_hdmi_ret_t ps_hdmi_i2c_edid_read(void *ctx, unsigned int sp,
+				  unsigned int offset, void *buffer,
+				  unsigned int size);
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev);
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_deinit(void *context);
+
+/*
+ * ps_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool ps_hdmi_get_cable_status(void *context);
+
+/* turn on HDMI power rails not used for HPD */
+bool ps_hdmi_power_rails_on(void);
+
+bool ps_hdmi_hdcp_power_islands_on(void);
+
+
+/* turn off HDMI power rails not used for HPD */
+bool ps_hdmi_power_rails_off(void);
+
+/* turn on HDMI power islands */
+bool ps_hdmi_power_islands_on(void);
+
+/* turn off HDMI power islands */
+void ps_hdmi_power_islands_off(void);
+
+/* enable IRQ and CPD_HPD */
+bool ps_hdmi_enable_hpd(bool enable);
+
+/* control HDMI vblank interrupt */
+void ps_hdmi_vblank_control(struct drm_device *dev, bool on);
+
+/* get HDMI hotplug pin number */
+int ps_hdmi_get_hpd_pin(void);
+
+/* override the hdmi hpd cable status */
+void ps_hdmi_override_cable_status(bool state, bool auto_state);
+
+
+/**
+ * hdmi interrupt handler (top half).
+ * @irq:	irq number
+ * @data:	data for the interrupt handler
+ *
+ * Returns:	IRQ_HANDLED on NULL input arguments, and if the
+ *			interrupt is not HDMI HPD interrupts.
+ *		IRQ_WAKE_THREAD if this is a HDMI HPD interrupt.
+ * hdmi interrupt handler (upper half). handles the interrupts
+ * by reading hdmi status register and waking up bottom half if needed.
+ */
+irqreturn_t ps_hdmi_irq_handler(int irq, void *data);
+
+/* PCI Driver registration function */
+int ps_hdmi_hpd_register_driver(void);
+
+/* PCI Driver Cleanup function */
+int ps_hdmi_hpd_unregister_driver(void);
+
+/**
+ * notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void ps_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable);
+
+#endif /* _PLATFORM_SPEC_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mfld/ps_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mfld/ps_hdmi.c
new file mode 100644
index 0000000..d24a95d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mfld/ps_hdmi.c
@@ -0,0 +1,584 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#include "otm_hdmi_types.h"
+
+#include <asm/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+#include "ps_hdmi.h"
+#include "psb_drv.h"
+
+#include <asm/intel_scu_pmic.h>
+#include "psb_powermgmt.h"
+
+/* Implementation of the Medfield specific PCI driver for receiving
+ * Hotplug and other device status signals.
+ * In Medfield platform, the HPD and OCP signals are delivered to the
+ * display sub-system from the MSIC chip.
+ */
+
+/* Constants */
+#define PS_HDMI_HPD_PCI_DRIVER_NAME "Medfield HDMI MSIC Driver"
+
+/* Globals */
+static hdmi_context_t *g_context;
+
+#define PS_HDMI_MMIO_RESOURCE 0
+#define PS_VDC_OFFSET 0x00000000
+#define PS_VDC_SIZE 0x000080000
+#define PS_MSIC_PCI_DEVICE_ID 0x0831
+#define PS_MSIC_VRINT_ADDR 0xFFFF7FCB
+#define PS_MSIC_VRINT_IOADDR_LEN 0x02
+
+#define PS_HDMI_OCP_STATUS			(1 << 2)
+#define PS_HDMI_HPD_STATUS_BIT			(1 << 3)
+
+#define PS_MSIC_VCC330CNT			0xd3
+#define PS_VCC330_OFF				0x24
+#define PS_VCC330_ON				0x37
+#define PS_MSIC_VHDMICNT			0xde
+#define PS_VHDMI_OFF				0x24
+#define PS_VHDMI_ON				0xa4
+#define PS_VHDMI_DB_30MS			0x60
+#define PS_MSIC_HDMI_STATUS_CMD                 0x281
+#define PS_MSIC_HDMI_STATUS                     (1 << 0)
+#define PS_MSIC_IRQLVL1_MASK                    0x21
+#define PS_VREG_MASK                            (1 << 5)
+
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int result = 0;
+	unsigned int vdc_start;
+	uint32_t pci_address = 0;
+	uint8_t pci_dev_revision = 0;
+	hdmi_context_t *ctx = NULL;
+
+	if (pdev == NULL || context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	pr_debug("get resource start\n");
+	result = pci_read_config_dword(pdev, 16, &vdc_start);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	pci_address = vdc_start + PS_VDC_OFFSET;
+
+	pr_debug("map IO region\n");
+	/* Map IO region and save its length */
+	ctx->io_length = PS_VDC_SIZE;
+	ctx->io_address = ioremap_cache(pci_address, ctx->io_length);
+	if (!ctx->io_address) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	pr_debug("get PCI dev revision\n");
+	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	ctx->dev.id = pci_dev_revision;
+	/* Store this context for use by MSIC PCI driver */
+	g_context = ctx;
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_deinit(void *context)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	/* unmap IO region */
+	iounmap(ctx->io_address) ;
+
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_i2c_edid_read(void *ctx, unsigned int sp,
+				  unsigned int offset, void *buffer,
+				  unsigned int size)
+{
+	hdmi_context_t *context = (hdmi_context_t *)ctx;
+
+	char *src = context->edid_raw + sp * SEGMENT_SIZE + offset;
+	memcpy(buffer, src, size);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+static unsigned char vrint_data;
+
+bool ps_hdmi_power_rails_on(void)
+{
+	int ret = 0;
+	pr_debug("Entered %s\n", __func__);
+
+	if (vrint_data == 0) {
+		/* If it is not invoked in response to hot plug event,
+		 * then simply a NOP as power rails are never turned off.
+		 */
+		pr_debug("%s: NOP as there is no HPD.\n", __func__);
+		return true;
+	}
+	/* Turn on HDMI power rails. These will be on in all non-S0iX
+	 * states so that HPD and connection status will work. VCC330
+	 * will have ~1.7mW usage during idle states when the display
+	 * is active
+	 */
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_ON);
+	if (ret) {
+		pr_debug("%s: Failed to power on VCC330.\n", __func__);
+		return false;
+	}
+
+	if (vrint_data & PS_HDMI_OCP_STATUS) {
+		/* When there occurs overcurrent in MSIC HDMI HDP,
+		 * need to reset VHDMIEN by clearing to 0 then set to 1
+		 */
+		ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT,
+					PS_VHDMI_OFF);
+		if (ret) {
+			pr_debug("%s: Failed to power off VHDMI.\n", __func__);
+			goto err;
+		}
+		vrint_data = 0;
+	}
+
+
+	/* MSIC documentation requires that there be a 500us
+	 * delay after enabling VCC330 before you can enable
+	 * VHDMI
+	 */
+	usleep_range(500, 1000);
+
+	/* Extend VHDMI switch de-bounce time, to avoid
+	 * redundant MSIC VREG/HDMI interrupt during HDMI
+	 * cable plugged in/out
+	 */
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT,
+				PS_VHDMI_ON |
+				PS_VHDMI_DB_30MS);
+	if (ret) {
+		pr_debug("%s: Failed to power on VHDMI.\n", __func__);
+		goto err;
+	}
+
+	return true;
+
+err:
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_OFF);
+	if (ret) {
+		pr_debug("%s: Failed to power off VCC330 during clean up.\n",
+				__func__);
+		/* Fall through */
+	}
+	return false;
+}
+
+
+bool ps_hdmi_power_rails_off(void)
+{
+	/* VCC330 must stay on always for HPD. */
+	return true;
+}
+
+/* enable/disable HPD */
+bool ps_hdmi_enable_hpd(bool enable)
+{
+}
+
+bool ps_hdmi_power_islands_on()
+{
+	/*
+	 * If pmu_nc_set_power_state fails then accessing HW
+	 * reg would result in a crash - IERR/Fabric error.
+	 */
+	if (pmu_nc_set_power_state(OSPM_DISPLAY_B_ISLAND,
+				OSPM_ISLAND_UP, OSPM_REG_TYPE))
+		BUG();
+
+	return true;
+}
+
+void ps_hdmi_power_islands_off()
+{
+}
+
+void ps_hdmi_vblank_control(struct drm_device *dev, bool on)
+{
+	if (on)
+		psb_enable_vblank(dev, 1);
+	else
+		psb_disable_vblank(dev, 1);
+}
+
+/*
+ * ps_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool ps_hdmi_get_cable_status(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	u8 data = 0;
+
+	if (ctx == NULL)
+		return false;
+
+	/* Read HDMI cable status from MSIC chip */
+	intel_scu_ipc_ioread8(PS_MSIC_HDMI_STATUS_CMD, &data);
+	if (data & PS_MSIC_HDMI_STATUS)
+		ctx->is_connected = true;
+	else
+		ctx->is_connected = false;
+	return ctx->is_connected;
+}
+
+/**
+ * hdmi interrupt handler (upper half).
+ * @irq:	irq number
+ * @data:	data for the interrupt handler
+ *
+ * Returns:	IRQ_HANDLED on NULL input arguments, and if the
+ *			interrupt is not HDMI HPD interrupts.
+ *		IRQ_WAKE_THREAD if this is a HDMI HPD interrupt.
+ * hdmi interrupt handler (upper half). handles the interrupts
+ * by reading hdmi status register and waking up bottom half if needed.
+ */
+irqreturn_t ps_hdmi_irq_handler(int irq, void *data)
+{
+	/* Read interrupt status register */
+	if (g_context != NULL) {
+		vrint_data = readb(g_context->dev.irq_io_address);
+
+		/* handle HDMI HPD interrupts. */
+		if (vrint_data & (PS_HDMI_HPD_STATUS_BIT|PS_HDMI_OCP_STATUS))
+			return IRQ_WAKE_THREAD;
+	}
+	return IRQ_HANDLED;
+}
+
+/* Power management functions */
+
+/*
+ * Platform specific resume function after deep-sleep
+ * This function is used to carry out any specific actviity
+ * to aid HDMI IP resume in the context of system resume.
+ * This function will always be scheduled to execute after
+ * the system has finished resuming.
+ */
+void ps_post_resume_wq(struct work_struct *work)
+{
+	hdmi_context_t *ctx = container_of(work,
+					   hdmi_context_t,
+					   post_resume_work);
+	int ret = 0;
+
+	pr_debug("Entered %s\n", __func__);
+	if (ctx == NULL) {
+		pr_err("%s: NULL context!\n", __func__);
+		return;
+	}
+
+	/* While going to suspend state, the HPD interrupts from MSIC
+	 * were masked. During the resume, we do not immediately unmask
+	 * the interrupt to avoid race between the resultant hotplug
+	 * handlers and system resume activity. Instead, we simply turn
+	 * on the HDMI MSIC power rails and schedule this function to be
+	 * called after the system finishes a complete resume. At this
+	 * time, it is safe to re-enable HPD interrupts.
+	 */
+	ret = intel_scu_ipc_update_register(PS_MSIC_IRQLVL1_MASK, 0x0,
+					    PS_VREG_MASK);
+	if (ret) {
+		pr_debug("%s: Failed to unmask VREG IRQ.\n",
+			__func__);
+		goto exit;
+	}
+
+exit:
+	pr_debug("Exiting %s\n", __func__);
+}
+
+static int ps_hdmi_hpd_suspend(struct device *dev)
+{
+	int ret = 0;
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* suspend process is irreversible */
+	ret = intel_scu_ipc_update_register(PS_MSIC_IRQLVL1_MASK, 0xff,
+					    PS_VREG_MASK);
+	if (ret) {
+		pr_debug("%s: Failed to mask VREG IRQ.\n",
+			  __func__);
+	}
+
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT, PS_VHDMI_OFF);
+	if (ret) {
+		pr_debug("%s: Failed to power off VHDMI.\n",
+			  __func__);
+	}
+
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_OFF);
+	if (ret) {
+		pr_debug("%s: Failed to power off VCC330.\n",
+			  __func__);
+	}
+
+	pr_debug("Exiting %s\n", __func__);
+	return ret;
+}
+
+static int ps_hdmi_hpd_resume(struct device *dev)
+{
+	int ret = 0;
+
+	pr_debug("Entered %s\n", __func__);
+
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_ON);
+	if (ret) {
+		pr_debug("%s: Failed to power on VCC330.\n",
+			  __func__);
+		goto err;
+	}
+
+	/* MSIC documentation requires that there be a 500us delay
+	   after enabling VCC330 before you can enable VHDMI */
+	usleep_range(500, 1000);
+
+	ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT,
+				     PS_VHDMI_ON | PS_VHDMI_DB_30MS);
+	if (ret) {
+		pr_debug("%s: Failed to power on VHDMI.\n",
+			  __func__);
+		goto err;
+	}
+
+	/* We schedule a delayed wok item to be executed only after the
+	 * the full system has resumed.
+	 */
+	queue_work(g_context->post_resume_wq, &g_context->post_resume_work);
+
+	pr_debug("Exiting %s\n", __func__);
+	return ret;
+
+err:
+	pr_debug("Exiting %s\n", __func__);
+	return ret;
+}
+
+int ps_hdmi_get_hpd_pin(void)
+{
+	return 0;
+}
+
+void ps_hdmi_override_cable_status(bool state, bool auto_state)
+{
+	return;
+}
+
+
+/* PCI probe function */
+static int ps_hdmi_hpd_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	int result = 0;
+	hdmi_context_t *ctx = g_context;
+
+	if (pdev == NULL || ctx == NULL) {
+		pr_err("%s: called with NULL device or context\n", __func__);
+		result = -EINVAL;
+		return result;
+	}
+
+	/* Verify probe is called for the intended device */
+	if (pdev->device != PS_MSIC_PCI_DEVICE_ID) {
+		pr_err("%s: called for wrong device id = 0x%x\n", __func__,
+		       pdev->device);
+		result = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("pci_enable_device for 0x%x\n",
+					PS_MSIC_PCI_DEVICE_ID);
+	result = pci_enable_device(pdev);
+	if (result) {
+		pr_err("%s: Failed to enable MSIC PCI device = 0x%x\n",
+		       __func__, PS_MSIC_PCI_DEVICE_ID);
+		goto exit;
+	}
+
+	/* Map IO region for IRQ registers */
+	ctx->dev.irq_io_address = ioremap_nocache(PS_MSIC_VRINT_ADDR,
+						  PS_MSIC_VRINT_IOADDR_LEN);
+	if (!ctx->dev.irq_io_address) {
+		pr_err("%s: Failed to map IO region for MSIC IRQ\n", __func__);
+		result = -ENOMEM;
+		goto exit2;
+	}
+
+	ctx->irq_number = pdev->irq;
+	pr_debug("%s: IRQ number assigned = %d\n", __func__, pdev->irq);
+
+	result = request_threaded_irq(ctx->irq_number, ps_hdmi_irq_handler,
+				      ctx->hpd_callback, IRQF_SHARED,
+				      PS_HDMI_HPD_PCI_DRIVER_NAME,
+				      ctx->hpd_data);
+	if (result) {
+		pr_debug("%s: Register irq interrupt %d failed\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+
+	/* Create Freezable workqueue for post resume HPD operations */
+	ctx->post_resume_wq = create_freezable_workqueue("MFLD Post-Resume WQ");
+	if (!ctx->post_resume_wq) {
+		pr_debug("%s: Failed to create post-resume workqueue\n",
+			 __func__);
+		goto exit3;
+	}
+
+	INIT_WORK(&ctx->post_resume_work, ps_post_resume_wq);
+	return result;
+
+exit3:
+	iounmap(ctx->dev.irq_io_address);
+exit2:
+	pci_disable_device(pdev);
+exit:
+	pci_dev_put(pdev);
+	return result;
+}
+
+/* PCI driver related structures */
+static DEFINE_PCI_DEVICE_TABLE(ps_hdmi_hpd_pci_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PS_MSIC_PCI_DEVICE_ID) },
+	{ 0 }
+};
+
+static const struct dev_pm_ops ps_hdmi_hpd_pm_ops = {
+	.suspend = ps_hdmi_hpd_suspend,
+	.resume = ps_hdmi_hpd_resume,
+};
+
+static struct pci_driver ps_hdmi_hpd_driver = {
+	.name = PS_HDMI_HPD_PCI_DRIVER_NAME,
+	.id_table = ps_hdmi_hpd_pci_id,
+	.probe = ps_hdmi_hpd_probe,
+	.driver.pm = &ps_hdmi_hpd_pm_ops,
+};
+
+/* PCI Driver registration function */
+int ps_hdmi_hpd_register_driver(void)
+{
+	pr_debug("%s: Registering PCI driver for HDMI HPD\n", __func__);
+	return pci_register_driver(&ps_hdmi_hpd_driver);
+}
+
+/* PCI Driver Cleanup function */
+int ps_hdmi_hpd_unregister_driver(void)
+{
+	/* unmap IO region */
+	iounmap((void *)g_context->dev.irq_io_address);
+	pci_unregister_driver(&ps_hdmi_hpd_driver);
+
+	return 0;
+}
+
+/**
+ * notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void ps_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable)
+{
+	/* Note: do nothing since not clear if mfld needs this or not */
+	return;
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mofd/ps_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mofd/ps_hdmi.c
new file mode 100644
index 0000000..752e94f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mofd/ps_hdmi.c
@@ -0,0 +1,512 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2012 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#include "otm_hdmi_types.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+#include "ps_hdmi.h"
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel-mid.h>
+#include "pwr_mgmt.h"
+
+/* Implementation of the Moorefield specific PCI driver for receiving
+ * Hotplug and other device status signals.
+ * In Moorefield platform, the HPD and OCP signals are delivered to the
+ * display sub-system using the TI TPD Companion chip.
+ */
+
+/* Constants */
+#define PS_HDMI_HPD_PCI_DRIVER_NAME "Moorefield HDMI HPD Driver"
+
+/* Globals */
+static hdmi_context_t *g_context = NULL;
+
+#define PS_HDMI_MMIO_RESOURCE 0
+#define PS_VDC_OFFSET 0x00000000
+#define PS_VDC_SIZE 0x000080000
+#define PS_MSIC_PCI_DEVICE_ID 0x11A6
+
+#define PS_MSIC_HPD_GPIO_PIN 16
+#define PS_MSIC_LS_EN_GPIO_PIN 67
+#define PS_MSIC_HPD_GPIO_PIN_NAME "HDMI_HPD"
+#define PS_MSIC_LS_EN_GPIO_PIN_NAME "HDMI_LS_EN"
+
+
+/* For Moorefield, it is required that SW pull up or pull down the
+ * LS_OE GPIO pin based on cable status. This is needed before
+ * performing any EDID read operation on Moorefield.
+ */
+static void __ps_gpio_configure_edid_read(void)
+{
+	static int old_pin_value  = -1;
+	int new_pin_value;
+	hdmi_context_t *ctx = g_context;
+
+	if (ctx == NULL) {
+		pr_err("%s failed due to internal error\n", __func__);
+		return;
+	}
+
+	new_pin_value = gpio_get_value(ctx->gpio_hpd_pin);
+	if (new_pin_value == old_pin_value)
+		return;
+
+	old_pin_value = new_pin_value;
+
+	if (new_pin_value == 0)
+		gpio_set_value(ctx->gpio_ls_en_pin, 0);
+	else
+		gpio_set_value(ctx->gpio_ls_en_pin, 1);
+	pr_debug("%s: MSIC_LS_OE pin = %d (%d)\n", __func__,
+		 gpio_get_value(ctx->gpio_ls_en_pin), new_pin_value);
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int result = 0;
+	unsigned int vdc_start;
+	uint32_t pci_address = 0;
+	uint8_t pci_dev_revision = 0;
+	hdmi_context_t *ctx = NULL;
+
+	if (pdev == NULL || context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	pr_debug("get resource start\n");
+	result = pci_read_config_dword(pdev, 16, &vdc_start);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	pci_address = vdc_start + PS_VDC_OFFSET;
+
+	pr_debug("map IO region\n");
+	/* Map IO region and save its length */
+	ctx->io_length = PS_VDC_SIZE;
+	ctx->io_address = ioremap(pci_address, ctx->io_length);
+	if (!ctx->io_address) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	pr_debug("get PCI dev revision\n");
+	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	ctx->dev.id = pci_dev_revision;
+	/* Store this context for use by MSIC PCI driver */
+	g_context = ctx;
+	ctx->is_connected_overridden = true;
+
+	/* Handle Moorefield specific GPIO configuration
+	 * to enable EDID reads
+	 */
+	ctx->gpio_hpd_pin = get_gpio_by_name(PS_MSIC_HPD_GPIO_PIN_NAME);
+	if (-1 == ctx->gpio_hpd_pin) {
+		ctx->gpio_hpd_pin = PS_MSIC_HPD_GPIO_PIN;
+		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
+				PS_MSIC_HPD_GPIO_PIN);
+	}
+
+	ctx->gpio_ls_en_pin = get_gpio_by_name(PS_MSIC_LS_EN_GPIO_PIN_NAME);
+	if (-1 == ctx->gpio_ls_en_pin) {
+		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
+				PS_MSIC_LS_EN_GPIO_PIN);
+		ctx->gpio_ls_en_pin = PS_MSIC_LS_EN_GPIO_PIN;
+	}
+
+	if (gpio_request(ctx->gpio_ls_en_pin, "HDMI_LS_EN")) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+		       ctx->gpio_ls_en_pin);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	if (!gpio_is_valid(ctx->gpio_ls_en_pin)) {
+		pr_err("%s: Unable to validate gpio %d\n", __func__,
+		       ctx->gpio_ls_en_pin);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	if (gpio_direction_output(ctx->gpio_ls_en_pin, 0)) {
+		pr_err("%s: Failed to set GPIO %d as output\n",
+			 __func__, ctx->gpio_ls_en_pin);
+		goto exit;
+	}
+	/* Set the GPIO based on cable status */
+	__ps_gpio_configure_edid_read();
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_deinit(void *context)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	/* unmap IO region */
+	iounmap(ctx->io_address);
+
+	/* Free GPIO resources */
+	gpio_free(ctx->gpio_ls_en_pin);
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_i2c_edid_read(void *ctx, unsigned int sp,
+				  unsigned int offset, void *buffer,
+				  unsigned int size)
+{
+	hdmi_context_t *context = (hdmi_context_t *)ctx;
+
+	char *src = context->edid_raw + sp * SEGMENT_SIZE + offset;
+	memcpy(buffer, src, size);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+bool ps_hdmi_power_rails_on(void)
+{
+	pr_debug("Entered %s\n", __func__);
+	return true;
+}
+
+bool ps_hdmi_power_rails_off(void)
+{
+	pr_debug("Entered %s\n", __func__);
+	return 0;
+}
+
+/* enable/disable IRQ and CPD_HPD */
+bool ps_hdmi_enable_hpd(bool enable)
+{
+	u8 pin = 0;
+
+	pr_debug("Entered %s: %s\n", __func__, enable ? "enable" : "disable");
+
+	/* see ShadyCove PMIC spec and board schema */
+	/* PRx uses GPIO0 for CT_CP_HPD */
+	pin = 0x7e;
+
+	if (enable)
+		intel_scu_ipc_iowrite8(pin, 0x31);
+	else
+		intel_scu_ipc_iowrite8(pin, 0x30);
+	return true;
+}
+
+bool ps_hdmi_power_islands_on(void)
+{
+	/* power on display island C to use overlay C and sprite D planes */
+	return ospm_power_using_hw_begin(
+			OSPM_DISPLAY_B | OSPM_DISPLAY_HDMI,
+			OSPM_UHB_FORCE_POWER_ON);
+}
+
+void ps_hdmi_power_islands_off(void)
+{
+	ospm_power_using_hw_end(
+		OSPM_DISPLAY_HDMI | OSPM_DISPLAY_B);
+}
+
+void ps_hdmi_vblank_control(struct drm_device *dev, bool on)
+{
+	/* Won't force turning on/off vblank interrupt for MRFLD. */
+}
+
+/*
+ * ps_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool ps_hdmi_get_cable_status(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	if (ctx == NULL)
+		return false;
+
+	/* Read HDMI cable status from GPIO */
+	/* For Moorefield, it is required that SW pull up or pull down the
+	 * LS_OE GPIO pin based on cable status. This is needed before
+	 * performing any EDID read operation on Moorefield.
+	 */
+	__ps_gpio_configure_edid_read();
+
+	if (g_context->override_cable_state)
+		return g_context->is_connected_overridden;
+
+	if (gpio_get_value(ctx->gpio_hpd_pin) == 0)
+		ctx->is_connected =  false;
+	else
+		ctx->is_connected = true;
+	return ctx->is_connected;
+}
+
+/**
+ * notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void ps_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable)
+{
+	/* Note: do nothing since not clear if mrfld needs this or not */
+	return;
+}
+
+/**
+ * hdmi interrupt handler (top half).
+ * @irq:	irq number
+ * @data:	data for the interrupt handler
+ *
+ * Returns:	IRQ_HANDLED on NULL input arguments, and if the
+ *			interrupt is not HDMI HPD interrupts.
+ *		IRQ_WAKE_THREAD if this is a HDMI HPD interrupt.
+ * hdmi interrupt handler (upper half). handles the interrupts
+ * by reading hdmi status register and waking up bottom half if needed.
+ */
+irqreturn_t ps_hdmi_irq_handler(int irq, void *data)
+{
+	if (g_context == NULL)
+		return IRQ_HANDLED;
+
+	return IRQ_WAKE_THREAD;
+}
+
+/* Power management functions */
+static int ps_hdmi_hpd_suspend(struct device *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+	ps_hdmi_power_rails_off();
+	return 0;
+}
+
+static int ps_hdmi_hpd_resume(struct device *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+	ps_hdmi_power_rails_on();
+	return 0;
+}
+
+/* get HDMI hotplug pin number */
+int ps_hdmi_get_hpd_pin(void)
+{
+	if (g_context == NULL)
+		return 0;
+
+	return g_context->gpio_hpd_pin;
+}
+
+/* override the hdmi hpd cable status */
+void ps_hdmi_override_cable_status(bool state, bool auto_state)
+{
+	if (g_context == NULL)
+		return;
+
+	g_context->override_cable_state = auto_state;
+
+	if (state)
+		g_context->is_connected_overridden = true;
+	else
+		g_context->is_connected_overridden = false;
+	return;
+}
+
+
+
+/* PCI probe function */
+static int ps_hdmi_hpd_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	int result = 0;
+	hdmi_context_t *ctx = g_context;
+
+	if (pdev == NULL || ctx == NULL) {
+		pr_err("%s: called with NULL device or context\n", __func__);
+		result = -EINVAL;
+		return result;
+	}
+
+	/* Verify probe is called for the intended device */
+	if (pdev->device != PS_MSIC_PCI_DEVICE_ID) {
+		pr_err("%s: called for wrong device id = 0x%x\n", __func__,
+		       pdev->device);
+		result = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("pci_enable_device for 0x%x\n",
+					PS_MSIC_PCI_DEVICE_ID);
+	result = pci_enable_device(pdev);
+	if (result) {
+		pr_err("%s: Failed to enable MSIC PCI device = 0x%x\n",
+		       __func__, PS_MSIC_PCI_DEVICE_ID);
+		goto exit;
+	}
+
+	/* Perform the GPIO configuration */
+	result = gpio_request(ctx->gpio_hpd_pin, "hdmi_hpd");
+	if (result) {
+		pr_debug("%s: Failed to request GPIO %d for kbd IRQ\n",
+			 __func__, ctx->gpio_hpd_pin);
+		goto exit2;
+	}
+
+	result = gpio_direction_input(ctx->gpio_hpd_pin);
+	if (result) {
+		pr_debug("%s: Failed to set GPIO %d as input\n",
+			 __func__, ctx->gpio_hpd_pin);
+		goto exit3;
+	}
+
+	ctx->irq_number = gpio_to_irq(ctx->gpio_hpd_pin);
+	pr_debug("%s: IRQ number assigned = %d\n", __func__, ctx->irq_number);
+
+	result = irq_set_irq_type(ctx->irq_number, IRQ_TYPE_EDGE_BOTH);
+	if (result) {
+		pr_debug("%s: Failed to set HDMI HPD IRQ type for IRQ %d\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+
+	/* This is unused on Moorefield platform, since we use GPIO */
+	ctx->dev.irq_io_address = 0;
+
+	result = request_threaded_irq(ctx->irq_number, ps_hdmi_irq_handler,
+				      ctx->hpd_callback, IRQF_SHARED,
+				      PS_HDMI_HPD_PCI_DRIVER_NAME,
+				      ctx->hpd_data);
+	if (result) {
+		pr_debug("%s: Register irq interrupt %d failed\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+	return result;
+
+exit3:
+	gpio_free(ctx->gpio_hpd_pin);
+exit2:
+	pci_disable_device(pdev);
+exit:
+	pci_dev_put(pdev);
+	return result;
+}
+
+/* PCI driver related structures */
+static DEFINE_PCI_DEVICE_TABLE(ps_hdmi_hpd_pci_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PS_MSIC_PCI_DEVICE_ID) },
+	{ 0 }
+};
+
+static const struct dev_pm_ops ps_hdmi_hpd_pm_ops = {
+	.suspend = ps_hdmi_hpd_suspend,
+	.resume = ps_hdmi_hpd_resume,
+};
+
+static struct pci_driver ps_hdmi_hpd_driver = {
+	.name = PS_HDMI_HPD_PCI_DRIVER_NAME,
+	.id_table = ps_hdmi_hpd_pci_id,
+	.probe = ps_hdmi_hpd_probe,
+	.driver.pm = &ps_hdmi_hpd_pm_ops,
+};
+
+/* PCI Driver registration function */
+int ps_hdmi_hpd_register_driver(void)
+{
+	pr_debug("%s: Registering PCI driver for HDMI HPD\n", __func__);
+	return pci_register_driver(&ps_hdmi_hpd_driver);
+}
+
+/* PCI Driver Cleanup function */
+int ps_hdmi_hpd_unregister_driver(void)
+{
+	pci_unregister_driver(&ps_hdmi_hpd_driver);
+	return 0;
+}
+
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mrfld/ps_hdmi.c b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mrfld/ps_hdmi.c
new file mode 100644
index 0000000..c1673b6
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mrfld/ps_hdmi.c
@@ -0,0 +1,502 @@
+/*
+
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+
+  Copyright(c) 2012 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  The full GNU General Public License is included in this distribution
+  in the file called LICENSE.GPL.
+
+  Contact Information:
+
+  Intel Corporation
+  2200 Mission College Blvd.
+  Santa Clara, CA  95054
+
+  BSD LICENSE
+
+  Copyright(c) 2011 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+#include "otm_hdmi_types.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include "otm_hdmi.h"
+#include "ipil_hdmi.h"
+#include "ps_hdmi.h"
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel-mid.h>
+#include "pwr_mgmt.h"
+
+/* Implementation of the Merrifield specific PCI driver for receiving
+ * Hotplug and other device status signals.
+ * In Merrifield platform, the HPD and OCP signals are delivered to the
+ * display sub-system using the TI TPD Companion chip.
+ */
+
+/* Constants */
+#define PS_HDMI_HPD_PCI_DRIVER_NAME "Merrifield HDMI HPD Driver"
+
+/* Globals */
+static hdmi_context_t *g_context = NULL;
+
+#define PS_HDMI_MMIO_RESOURCE 0
+#define PS_VDC_OFFSET 0x00000000
+#define PS_VDC_SIZE 0x000080000
+#define PS_MSIC_PCI_DEVICE_ID 0x11A6
+
+#define PS_MSIC_HPD_GPIO_PIN 16
+#define PS_MSIC_LS_EN_GPIO_PIN 177
+#define PS_MSIC_HPD_GPIO_PIN_NAME "HDMI_HPD"
+#define PS_MSIC_LS_EN_GPIO_PIN_NAME "HDMI_LS_EN"
+#define PS_MSIC_CPD_HPD_GPIO_PIN 0x7F
+
+/* For Merrifield, it is required that SW pull up or pull down the
+ * LS_OE GPIO pin based on cable status. This is needed before
+ * performing any EDID read operation on Merrifield.
+ */
+static void __ps_gpio_configure_edid_read(void)
+{
+	static int old_pin_value  = -1;
+	int new_pin_value;
+	hdmi_context_t *ctx = g_context;
+
+	if (ctx == NULL) {
+		pr_err("%s failed due to internal error\n", __func__);
+		return;
+	}
+
+	new_pin_value = gpio_get_value(ctx->gpio_hpd_pin);
+	if (new_pin_value == old_pin_value)
+		return;
+
+	old_pin_value = new_pin_value;
+
+	if (new_pin_value == 0)
+		gpio_set_value(ctx->gpio_ls_en_pin, 0);
+	else
+		gpio_set_value(ctx->gpio_ls_en_pin, 1);
+
+	pr_debug("%s: MSIC_LS_OE pin = %d (%d)\n", __func__,
+		 gpio_get_value(ctx->gpio_ls_en_pin), new_pin_value);
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	int result = 0;
+	unsigned int vdc_start;
+	uint32_t pci_address = 0;
+	uint8_t pci_dev_revision = 0;
+	hdmi_context_t *ctx = NULL;
+
+	if (pdev == NULL || context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	pr_debug("get resource start\n");
+	result = pci_read_config_dword(pdev, 16, &vdc_start);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	pci_address = vdc_start + PS_VDC_OFFSET;
+
+	pr_debug("map IO region\n");
+	/* Map IO region and save its length */
+	ctx->io_length = PS_VDC_SIZE;
+	ctx->io_address = ioremap(pci_address, ctx->io_length);
+	if (!ctx->io_address) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	pr_debug("get PCI dev revision\n");
+	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
+	if (result != 0) {
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+	ctx->dev.id = pci_dev_revision;
+	/* Store this context for use by MSIC PCI driver */
+	g_context = ctx;
+	ctx->is_connected_overridden = true;
+
+	/* Handle Merrifield specific GPIO configuration
+	 * to enable EDID reads
+	 */
+	ctx->gpio_hpd_pin = get_gpio_by_name(PS_MSIC_HPD_GPIO_PIN_NAME);
+	if (-1 == ctx->gpio_hpd_pin) {
+		ctx->gpio_hpd_pin = PS_MSIC_HPD_GPIO_PIN;
+		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
+				PS_MSIC_HPD_GPIO_PIN);
+	}
+
+	ctx->gpio_ls_en_pin = get_gpio_by_name(PS_MSIC_LS_EN_GPIO_PIN_NAME);
+	if (-1 == ctx->gpio_ls_en_pin) {
+		ctx->gpio_ls_en_pin = PS_MSIC_LS_EN_GPIO_PIN;
+		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
+				PS_MSIC_LS_EN_GPIO_PIN);
+	}
+
+	if (gpio_request(ctx->gpio_ls_en_pin, "HDMI_LS_EN")) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+		       ctx->gpio_ls_en_pin);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	if (!gpio_is_valid(ctx->gpio_ls_en_pin)) {
+		pr_err("%s: Unable to validate gpio %d\n", __func__,
+		       ctx->gpio_ls_en_pin);
+		rc = OTM_HDMI_ERR_FAILED;
+		goto exit;
+	}
+
+	/* Set the GPIO based on cable status */
+	__ps_gpio_configure_edid_read();
+
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_pci_dev_deinit(void *context)
+{
+	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
+	hdmi_context_t *ctx = NULL;
+
+	if (context == NULL) {
+		rc = OTM_HDMI_ERR_INTERNAL;
+		goto exit;
+	}
+	ctx = (hdmi_context_t *)context;
+
+	/* unmap IO region */
+	iounmap(ctx->io_address);
+
+	/* Free GPIO resources */
+	gpio_free(ctx->gpio_ls_en_pin);
+exit:
+	return rc;
+}
+
+otm_hdmi_ret_t ps_hdmi_i2c_edid_read(void *ctx, unsigned int sp,
+				  unsigned int offset, void *buffer,
+				  unsigned int size)
+{
+	hdmi_context_t *context = (hdmi_context_t *)ctx;
+
+	char *src = context->edid_raw + sp * SEGMENT_SIZE + offset;
+	memcpy(buffer, src, size);
+
+	return OTM_HDMI_SUCCESS;
+}
+
+bool ps_hdmi_power_rails_on(void)
+{
+	pr_debug("Entered %s\n", __func__);
+	return true;
+}
+
+bool ps_hdmi_power_rails_off(void)
+{
+	pr_debug("Entered %s\n", __func__);
+	return 0;
+}
+
+/* enable/disable IRQ and CPD_HPD */
+bool ps_hdmi_enable_hpd(bool enable)
+{
+	pr_debug("Entered %s: %s\n", __func__, enable ? "enable" : "disable");
+
+	if (enable)
+		intel_scu_ipc_iowrite8(PS_MSIC_CPD_HPD_GPIO_PIN, 0x31);
+	else
+		intel_scu_ipc_iowrite8(PS_MSIC_CPD_HPD_GPIO_PIN, 0x30);
+	return true;
+}
+
+bool ps_hdmi_power_islands_on()
+{
+	return ospm_power_using_hw_begin(
+			OSPM_DISPLAY_B | OSPM_DISPLAY_HDMI,
+			OSPM_UHB_FORCE_POWER_ON);
+}
+
+void ps_hdmi_power_islands_off()
+{
+	ospm_power_using_hw_end(
+		OSPM_DISPLAY_HDMI | OSPM_DISPLAY_B);
+}
+
+void ps_hdmi_vblank_control(struct drm_device *dev, bool on)
+{
+	/* Won't force turning on/off vblank interrupt for MRFLD. */
+}
+
+/*
+ * ps_hdmi_get_cable_status - Get HDMI cable connection status
+ * @context: hdmi device context
+ *
+ * Returns - boolean state.
+ * true - HDMI cable connected
+ * false - HDMI cable disconnected
+ */
+bool ps_hdmi_get_cable_status(void *context)
+{
+	hdmi_context_t *ctx = (hdmi_context_t *)context;
+	if (ctx == NULL)
+		return false;
+
+	/* Read HDMI cable status from GPIO */
+	/* For Merrifield, it is required that SW pull up or pull down the
+	 * LS_OE GPIO pin based on cable status. This is needed before
+	 * performing any EDID read operation on Merrifield.
+	 */
+	__ps_gpio_configure_edid_read();
+
+	if (g_context->override_cable_state)
+		return g_context->is_connected_overridden;
+
+	if (gpio_get_value(ctx->gpio_hpd_pin) == 0)
+		ctx->is_connected =  false;
+	else
+		ctx->is_connected = true;
+	return ctx->is_connected;
+}
+
+/**
+ * notify security component of hdcp and hdmi cable status
+ *
+ * @hdcp	HDCP status: true if phase1 is enabled
+ * @cable	HDMI connection status: true if connected
+ *
+ * Returns:	none
+ */
+void ps_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable)
+{
+	/* Note: do nothing since not clear if mrfld needs this or not */
+	return;
+}
+
+/**
+ * hdmi interrupt handler (top half).
+ * @irq:	irq number
+ * @data:	data for the interrupt handler
+ *
+ * Returns:	IRQ_HANDLED on NULL input arguments, and if the
+ *			interrupt is not HDMI HPD interrupts.
+ *		IRQ_WAKE_THREAD if this is a HDMI HPD interrupt.
+ * hdmi interrupt handler (upper half). handles the interrupts
+ * by reading hdmi status register and waking up bottom half if needed.
+ */
+irqreturn_t ps_hdmi_irq_handler(int irq, void *data)
+{
+	if (g_context == NULL)
+		return IRQ_HANDLED;
+
+	return IRQ_WAKE_THREAD;
+}
+
+/* Power management functions */
+static int ps_hdmi_hpd_suspend(struct device *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+	ps_hdmi_power_rails_off();
+	return 0;
+}
+
+static int ps_hdmi_hpd_resume(struct device *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+	ps_hdmi_power_rails_on();
+	return 0;
+}
+
+/* get HDMI hotplug pin number */
+int ps_hdmi_get_hpd_pin(void)
+{
+	if (g_context == NULL)
+		return 0;
+
+	return g_context->gpio_hpd_pin;
+}
+
+/* override the hdmi hpd cable status */
+void ps_hdmi_override_cable_status(bool state, bool auto_state)
+{
+	if (g_context == NULL)
+		return 0;
+
+	g_context->override_cable_state = auto_state;
+
+	if (state)
+		g_context->is_connected_overridden = true;
+	else
+		g_context->is_connected_overridden = false;
+	return;
+}
+
+
+
+/* PCI probe function */
+static int ps_hdmi_hpd_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	int result = 0;
+	hdmi_context_t *ctx = g_context;
+
+	if (pdev == NULL || ctx == NULL) {
+		pr_err("%s: called with NULL device or context\n", __func__);
+		result = -EINVAL;
+		return result;
+	}
+
+	/* Verify probe is called for the intended device */
+	if (pdev->device != PS_MSIC_PCI_DEVICE_ID) {
+		pr_err("%s: called for wrong device id = 0x%x\n", __func__,
+		       pdev->device);
+		result = -EINVAL;
+		goto exit;
+	}
+
+	pr_debug("pci_enable_device for 0x%x\n",
+					PS_MSIC_PCI_DEVICE_ID);
+	result = pci_enable_device(pdev);
+	if (result) {
+		pr_err("%s: Failed to enable MSIC PCI device = 0x%x\n",
+		       __func__, PS_MSIC_PCI_DEVICE_ID);
+		goto exit;
+	}
+
+	/* Perform the GPIO configuration */
+	result = gpio_request(ctx->gpio_hpd_pin, "hdmi_hpd");
+	if (result) {
+		pr_debug("%s: Failed to request GPIO %d for kbd IRQ\n",
+			 __func__, ctx->gpio_hpd_pin);
+		goto exit2;
+	}
+
+	result = gpio_direction_input(ctx->gpio_hpd_pin);
+	if (result) {
+		pr_debug("%s: Failed to set GPIO %d as input\n",
+			 __func__, ctx->gpio_hpd_pin);
+		goto exit3;
+	}
+
+	ctx->irq_number = gpio_to_irq(ctx->gpio_hpd_pin);
+	pr_debug("%s: IRQ number assigned = %d\n", __func__, ctx->irq_number);
+
+	result = irq_set_irq_type(ctx->irq_number, IRQ_TYPE_EDGE_BOTH);
+	if (result) {
+		pr_debug("%s: Failed to set HDMI HPD IRQ type for IRQ %d\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+
+	/* This is unused on Merrifield platform, since we use GPIO */
+	ctx->dev.irq_io_address = 0;
+
+	result = request_threaded_irq(ctx->irq_number, ps_hdmi_irq_handler,
+				      ctx->hpd_callback, IRQF_SHARED,
+				      PS_HDMI_HPD_PCI_DRIVER_NAME,
+				      ctx->hpd_data);
+	if (result) {
+		pr_debug("%s: Register irq interrupt %d failed\n",
+			 __func__, ctx->irq_number);
+		goto exit3;
+	}
+	return result;
+
+exit3:
+	gpio_free(ctx->gpio_hpd_pin);
+exit2:
+	pci_disable_device(pdev);
+exit:
+	pci_dev_put(pdev);
+	return result;
+}
+
+/* PCI driver related structures */
+static DEFINE_PCI_DEVICE_TABLE(ps_hdmi_hpd_pci_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PS_MSIC_PCI_DEVICE_ID) },
+	{ 0 }
+};
+
+static const struct dev_pm_ops ps_hdmi_hpd_pm_ops = {
+	.suspend = ps_hdmi_hpd_suspend,
+	.resume = ps_hdmi_hpd_resume,
+};
+
+static struct pci_driver ps_hdmi_hpd_driver = {
+	.name = PS_HDMI_HPD_PCI_DRIVER_NAME,
+	.id_table = ps_hdmi_hpd_pci_id,
+	.probe = ps_hdmi_hpd_probe,
+	.driver.pm = &ps_hdmi_hpd_pm_ops,
+};
+
+/* PCI Driver registration function */
+int ps_hdmi_hpd_register_driver(void)
+{
+	pr_debug("%s: Registering PCI driver for HDMI HPD\n", __func__);
+	return pci_register_driver(&ps_hdmi_hpd_driver);
+}
+
+/* PCI Driver Cleanup function */
+int ps_hdmi_hpd_unregister_driver(void)
+{
+	pci_unregister_driver(&ps_hdmi_hpd_driver);
+	return 0;
+}
+
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/Makefile b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/Makefile
new file mode 100644
index 0000000..139d2b7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/Makefile
@@ -0,0 +1,17 @@
+obj-m = edidprint.o
+edidprint-objs := edid_print_func.o edid_print.o
+
+OTM_HDMI_INCDIR =hardware/intel/linux/drivers/staging/mrst/drv
+
+KDIR := $(ANDROID_PRODUCT_OUT)/obj/kernel
+ccflags-y += \
+	-I$(ANDROID_BUILD_TOP)/$(OTM_HDMI_INCDIR)/cosai/linux_kernel/include \
+	-I$(ANDROID_BUILD_TOP)/$(OTM_HDMI_INCDIR)/otm_hdmi/pil/include \
+	-I$(ANDROID_BUILD_TOP)/$(OTM_HDMI_INCDIR)/otm_hdmi/pil/common \
+	-I$(ANDROID_BUILD_TOP)/$(OTM_HDMI_INCDIR)/otm_hdmi/tools/edid_print \
+
+PWD := $(shell pwd)
+all:
+	make  -C $(KDIR) M=$(PWD) modules
+clean:
+	make -C $(KDIR) M=$(PWD) clean
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.c b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.c
new file mode 100644
index 0000000..8d14a7d
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.c
@@ -0,0 +1,30 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+
+#include <linux/kernel.h>
+#include "edid_print.h"
+
+static int log_level = -1;
+
+module_param(log_level, int, 0);
+MODULE_PARM_DESC(log_level, "OTM_HDMI_LOG_LEVEL");
+
+int edidprint_init_module(void)
+{
+
+	printk("edidprint_init_module() called\n");
+	set_log_level(log_level);
+	return 0;
+}
+
+void edidprint_cleanup_module(void)
+{
+	printk("edidprint module cleanup\n");
+}
+
+module_init(edidprint_init_module);
+module_exit(edidprint_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.h b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.h
new file mode 100644
index 0000000..8e972de
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.h
@@ -0,0 +1,6 @@
+#ifndef _EDID_PRINT_H_
+#define _EDID_PRINT_H_
+
+void set_log_level(int log_level);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.sh b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.sh
new file mode 100644
index 0000000..f614bac
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+usage()
+{
+	echo "Usage : edid_print.sh [log_level=0 or 1 or 2 or 3 or 4]"
+	echo "		log_level = 0 - prints error messages"
+	echo "		log_level = 1 - prints error and high priority messages"
+	echo "		log_level = 2 - prints error, high and low priority messages"
+	echo "		log_level = 3 - prints error, high, low and VBLANK messages"
+	echo "		log_level = 4 - prints all messages"
+	echo "for example:"
+	echo "     edid_print.sh log_level=3"
+}
+
+modulepath=./
+module=edidprint.ko
+if [ ! -f "$modulepath$module" ]; then
+	echo "cannot find edid_print module $module"
+	exit 1
+fi
+
+log_level=""
+
+if [ $# -ne 1 ]; then
+	usage
+	exit 0
+fi
+
+#parse input
+input=`echo $1 | awk -F '=' ' { print $1 } '`
+data=`echo $1 | awk -F '=' ' { print $2 } '`
+#echo "$input $data"
+
+if [[ $input == "log_level" ]]; then
+    log_level="log_level=$data"
+else
+	usage
+	exit 2
+fi
+
+echo "adb push $modulepath$module /data/"
+adb push $modulepath$module /data/
+
+echo "adb shell insmod /data/$module $log_level"
+adb shell insmod /data/$module $log_level
+
+adb shell rmmod $module
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print_func.c b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print_func.c
new file mode 100644
index 0000000..c0d3e2e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/edid_print/edid_print_func.c
@@ -0,0 +1,20 @@
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <otm_hdmi_defs.h>
+#include <otm_hdmi.h>
+
+#include "edid_print.h"
+
+void set_log_level(int log_level) {
+
+	if (log_level >= 0 && log_level <= 4) {
+		struct hdmi_context_t *hdmictx =
+			(struct hdmi_context_t*) otm_hdmi_get_context();
+		void *log_val = (void *)&log_level;
+		otm_hdmi_set_attribute(hdmictx, OTM_HDMI_ATTR_ID_DEBUG,
+					log_val, false);
+	} else {
+		printk("enter log_level >= 0 and <=4");
+	}
+}
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/Makefile b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/Makefile
new file mode 100644
index 0000000..730e16e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/Makefile
@@ -0,0 +1,15 @@
+obj-m = hdcp.o
+
+PWD := $(shell pwd)
+
+ifeq ($(PF), DV1)
+        KDIR := ../../../../../../../../out/target/product/mfld_dv10/obj/kernel
+endif
+ifeq ($(PF), PR2)
+        KDIR := ../../../../../../../../out/target/product/mfld_pr2/linux/kernel/
+endif
+
+all:
+	make -C $(KDIR) M=$(PWD) modules
+clean:
+	make -C $(KDIR) M=$(PWD) clean
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp.c b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp.c
new file mode 100644
index 0000000..937f803
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp.c
@@ -0,0 +1,41 @@
+#include "hdcp.h"
+
+static char *option = "option";
+
+module_param(option, charp, 0000);
+MODULE_PARM_DESC(option, "option");
+
+int my_init_module(void)
+{
+	if (strcmp(option, "option") == 0) {
+		printk(KERN_INFO "hdcp:m:: Err! usage\n");
+		printk(KERN_INFO "hdcp:m:: insmod ./hdcp.ko option=enable\n");
+		printk(KERN_INFO "hdcp:m:: insmod ./hdcp.ko option=disable\n");
+		printk(KERN_INFO "hdcp:m:: insmod ./hdcp.ko option=mismatch\n");
+	} else {
+		if (strcmp(option, "mismatch") == 0) {
+			printk(KERN_INFO "[hdcp:m]: force Ri mismatch\n");
+			module_force_ri_mismatch = true;
+		} else if (strcmp(option, "enable") == 0) {
+			printk(KERN_INFO "[hdcp:m]: hdcp enable\n");
+			module_disable_hdcp = false;
+		} else if (strcmp(option, "disable") == 0) {
+			printk(KERN_INFO "[hdcp:m]: hdcp disable\n");
+			module_disable_hdcp = true;
+		} else {
+			printk(KERN_INFO "[hdcp:m]: unsupported option %s\n",
+				option);
+		}
+	}
+	return 0;
+}
+
+void my_cleanup_module(void)
+{
+	/* Nothing to Do */
+}
+
+module_init(my_init_module);
+module_exit(my_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp.h b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp.h
new file mode 100644
index 0000000..fd880c2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp.h
@@ -0,0 +1,9 @@
+#ifndef HDCP_MODULE_H
+#define HDCP_MODULE_H
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+extern bool module_disable_hdcp;
+extern bool module_force_ri_mismatch;
+#endif/* HDCP_MODULE_H */
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_dis.sh b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_dis.sh
new file mode 100644
index 0000000..18fa86a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_dis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+echo "HDCP Disable Helper Script"
+echo "Pushing helper module..."
+adb push hdcp.ko /data/
+echo "Running module"
+adb shell insmod /data/hdcp.ko option="disable"
+echo "Removing Module"
+adb shell rmmod hdcp.ko
+echo "Done: unplug replug HDMI cable"
+echo "Look for HDCP messages in dmesg"
+echo "Use: adb shell dmesg | grep \"hdcp\""
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_en.sh b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_en.sh
new file mode 100644
index 0000000..d27f81c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_en.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+echo "HDCP Enable Helper Script"
+echo "Pushing helper module..."
+adb push hdcp.ko /data/
+echo "Running module"
+adb shell insmod /data/hdcp.ko option="enable"
+echo "Removing Module"
+adb shell rmmod hdcp.ko
+echo "Done: unplug replug HDMI cable"
+echo "Look for HDCP messages in dmesg"
+echo "Use: adb shell dmesg | grep \"hdcp\""
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_force_mismatch.sh b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_force_mismatch.sh
new file mode 100644
index 0000000..8e619a9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdcp/hdcp_force_mismatch.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+echo "HDCP Force Ri Mismatch Helper Script"
+echo "Pushing helper module..."
+adb push hdcp.ko /data/
+echo "Running module"
+adb shell insmod /data/hdcp.ko option="mismatch"
+echo "Removing Module"
+adb shell rmmod hdcp.ko
+echo "Done. Look for HDCP messages in dmesg"
+echo "Use: adb shell dmesg | grep \"hdcp\""
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/Makefile b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/Makefile
new file mode 100644
index 0000000..959a9f2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/Makefile
@@ -0,0 +1,8 @@
+obj-m = hdmicmd.o
+# TODO: Enable the below logic to config KDIR once the OTM MAKEFILE is updated.
+KDIR := ../../../../../../../../out/target/product/mfld_pr2/linux/kernel
+PWD := $(shell pwd)
+all:
+	make -C $(KDIR) M=$(PWD) modules
+clean:
+	make -C $(KDIR) M=$(PWD) clean
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.c b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.c
new file mode 100644
index 0000000..1ef3601
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.c
@@ -0,0 +1,38 @@
+#include "hdmicmd.h"
+
+/*static char *resolution = "1024x600@60"; */
+static char *resolution = "resolution";
+static int vic = -1;
+
+module_param(resolution, charp, 0000);
+MODULE_PARM_DESC(resolution, "resolution");
+
+module_param(vic, int, 0);
+MODULE_PARM_DESC(vic, "VIC");
+
+int hdmicmd_init_module(void)
+{
+
+	printk(KERN_INFO "hdmicmd_init_module() called\n");
+
+	if (strcmp(resolution, "resolution") == 0 && vic == -1) {
+		otm_print_cmdline_option();
+	} else if (strcmp(resolution, "resolution") != 0) {
+			/* Modify Video Option Based on Passed Resolution */
+			otm_cmdline_parse_option(resolution);
+	} else if (vic != -1) {
+			/* Modify Video Option Based on Passed VIC */
+			otm_cmdline_set_vic_option(vic);
+	}
+	return 0;
+}
+
+void hdmicmd_cleanup_module(void)
+{
+	printk(KERN_INFO "hdmicmd module cleanup\n");
+}
+
+module_init(hdmicmd_init_module);
+module_exit(hdmicmd_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.h b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.h
new file mode 100644
index 0000000..5b78901
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.h
@@ -0,0 +1,13 @@
+#ifndef _HDMI_CMD_H_
+#define _HDMI_CMD_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+extern int otm_cmdline_parse_option(char *);
+extern int otm_cmdline_set_vic_option(int vic);
+extern void otm_print_cmdline_option();
+
+#endif
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.sh b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.sh
new file mode 100644
index 0000000..c5ea01f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/hdmicmdline/hdmicmd.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+usage()
+{
+	echo "Usage 1: hdmicmd.sh [resolution=resolutionstring]"
+	echo "Usage 2: hdmicmd.sh [vic=vic_number]"
+	echo "for example:"
+	echo "     hdmicmd.sh resolution=1280x720@60"
+	echo "     hdmicmd.sh vic=4"
+	echo "If no parameters, dmesg will print out the current configuration"
+}
+
+modulepath=./
+module=hdmicmd.ko
+if [ ! -f "$modulepath$module" ]; then
+	echo "cannot find hdmicmd module $module"
+	exit 1
+fi
+
+resolution=""
+vic=""
+
+if [ $# -ne 1 ]; then
+	usage
+	exit 0
+fi
+
+#parse input
+input=`echo $1 | awk -F '=' ' { print $1 } '`
+data=`echo $1 | awk -F '=' ' { print $2 } '`
+#echo "$input $data"
+
+if [[ $input == "resolution" ]]; then
+    resolution="resolution=$data"
+elif [[ $input == "vic" ]]; then
+    vic="vic=$data"
+else
+	usage
+	exit 2
+fi
+
+echo "adb push $modulepath$module /data/"
+adb push $modulepath$module /data/
+
+echo "adb shell insmod /data/$module $resolution $vic"
+adb shell insmod /data/$module $resolution $vic
+
+adb shell rmmod $module
+
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/Makefile b/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/Makefile
new file mode 100644
index 0000000..21cc69b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/Makefile
@@ -0,0 +1,14 @@
+obj-m = report_edid.o
+
+PWD := $(shell pwd)
+
+ifeq ($(PF), DV1)
+	KDIR := ../../../../../../../../out/target/product/mfld_dv10/obj/kernel
+endif
+ifeq ($(PF), PR2)
+	KDIR := ../../../../../../../..//out/target/product/mfld_pr2/linux/kernel/
+endif
+all:
+	make -C $(KDIR) M=$(PWD) modules
+clean:
+	make -C $(KDIR) M=$(PWD) clean
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/report_edid.c b/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/report_edid.c
new file mode 100644
index 0000000..853f39f
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/report_edid.c
@@ -0,0 +1,29 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+/* 
+ * TODO: Revert back afer EDID Parse
+ *	 for established modes is implemented 
+ */
+
+extern void test_otm_hdmi_report_edid_full();
+/* extern test_otm_hdmi_report_edid(); */
+
+int report_edid_init_module(void)
+{
+	printk(KERN_INFO "report_edid module() called\n");
+	test_otm_hdmi_report_edid_full();
+	/* test_otm_hdmi_report_edid(); */
+	return 0;
+}
+
+void report_edid_cleanup_module(void)
+{
+	printk(KERN_INFO "report_edid module cleanup");
+}
+
+module_init(report_edid_init_module);
+module_exit(report_edid_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/report_edid.sh b/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/report_edid.sh
new file mode 100644
index 0000000..03ea696
--- /dev/null
+++ b/drivers/external_drivers/intel_media/otm_hdmi/tools/report_edid/report_edid.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+echo "EDID report helper script"
+echo "Pushing helper module..."
+adb push report_edid.ko /data/
+echo "Running module..."
+adb shell insmod /data/report_edid.ko
+echo "Cleaning up.."
+adb shell rmmod report_edid.ko
+echo "DONE. Look for EDID report in dmesg"
+
diff --git a/drivers/external_drivers/intel_media/video/common/Makefile b/drivers/external_drivers/intel_media/video/common/Makefile
new file mode 100644
index 0000000..5f70764
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/Makefile
@@ -0,0 +1,37 @@
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+INCDIR=drivers/staging/mrfl
+MEDIA_INCDIR=drivers/staging/intel_media
+ccflags-$(CONFIG_DRM_MRFLD) += \
+	-I$(INCDIR)/ \
+	-I$(INCDIR)/rgx/include \
+	-I$(INCDIR)/interface \
+	-I$(INCDIR)/drv \
+	-I$(INCDIR)/drv/ospm \
+	-I$(INCDIR)/rgx/services/server/env/linux \
+	-I$(INCDIR)/rgx/services/server/include \
+	-I$(INCDIR)/../intel_media/video/common \
+	-I$(INCDIR)/../intel_media/video/decode \
+	-I$(INCDIR)/../intel_media/video/encode \
+	-I$(INCDIR)/../intel_media/video/vsp \
+	-I$(INCDIR)/../../../include/linux \
+	-I$(INCDIR)/../../../include/drm \
+	-I$(INCDIR)/../../../include/drm/ttm
+
+ccflags-$(CONFIG_DRM_MRFLD) += -DANDROID -D_linux_ -DLINUX -D__KERNEL__  -DMERRIFIELD
+ccflags-$(CONFIG_SUPPORT_VSP) += -DSUPPORT_VSP
+ccflags-$(CONFIG_DRM_VXD_BYT) := -Iinclude/drm -Iinclude/drm/ttm -Idrivers/gpu/drm/i915 -Idrivers/staging/intel_media/common/baytrail -Idrivers/staging/intel_media/video/common -Idrivers/staging/intel_media/video/decode
+#VIDEO_COMMON_DIR = $(TOP_DIR)/driver/staging/intel_media/video/common
+#DECODE_DIR = $(TOP_DIR)/driver/staging/intel_media/video/decode
+
+obj-y += \
+	psb_ttm_glue.o \
+	psb_cmdbuf.o \
+	tng_securefw.o \
+	psb_buffer.o \
+	psb_fence.o \
+	psb_mmu.o \
+	psb_ttm_fence.o \
+	psb_ttm_fence_user.o \
+	psb_ttm_placement_user.o
diff --git a/drivers/external_drivers/intel_media/video/common/psb_buffer.c b/drivers/external_drivers/intel_media/video/common/psb_buffer.c
new file mode 100644
index 0000000..ae9dcad
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_buffer.c
@@ -0,0 +1,788 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ */
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_execbuf_util.h>
+#include <ttm/ttm_page_alloc.h>
+#include "psb_ttm_fence_api.h"
+#include <drm/drmP.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+struct drm_psb_ttm_backend {
+	struct ttm_backend base;
+	struct page **pages;
+	dma_addr_t *dma_addrs;
+	unsigned int desired_tile_stride;
+	unsigned int hw_tile_stride;
+	int mem_type;
+	unsigned long offset;
+	unsigned long num_pages;
+};
+
+#else
+
+/*  ttm removal of "backend" -- struct ttm_backend changed to struct ttm_tt.
+    Members now in struct ttm instead of struct ttm_backend:
+	pages
+	num_pages
+    */
+
+struct drm_psb_ttm_tt_s {
+	struct ttm_dma_tt ttm_dma;
+	unsigned int desired_tile_stride;
+	unsigned int hw_tile_stride;
+	int mem_type;
+	unsigned long offset;
+};
+#endif
+
+static int psb_move_blit(struct ttm_buffer_object *bo,
+			 bool evict, bool no_wait,
+			 struct ttm_mem_reg *new_mem)
+{
+	BUG();
+	return 0;
+}
+
+/*
+ * Flip destination ttm into GATT,
+ * then blit and subsequently move out again.
+ */
+static int psb_move_flip(struct ttm_buffer_object *bo,
+			 bool evict, bool interruptible, bool no_wait,
+			 struct ttm_mem_reg *new_mem)
+{
+	/*struct ttm_bo_device *bdev = bo->bdev;*/
+	struct ttm_mem_reg tmp_mem;
+	int ret;
+	struct ttm_placement placement;
+	uint32_t flags = TTM_PL_FLAG_TT;
+
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &flags;
+	placement.num_busy_placement = 0; /* FIXME */
+	placement.busy_placement = NULL;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, false, no_wait);
+#else
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+#endif
+	if (ret)
+		return ret;
+	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
+	if (ret)
+		goto out_cleanup;
+	ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
+	if (ret)
+		goto out_cleanup;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem);
+#else
+	ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+#endif
+out_cleanup:
+	if (tmp_mem.mm_node) {
+		/*spin_lock(&bdev->lru_lock);*/ /* lru_lock is removed from upstream TTM */
+		drm_mm_put_block(tmp_mem.mm_node);
+		tmp_mem.mm_node = NULL;
+		/*spin_unlock(&bdev->lru_lock);*/
+	}
+	return ret;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static int drm_psb_tbe_populate(struct ttm_backend *backend,
+				unsigned long num_pages,
+				struct page **pages,
+				struct page *dummy_read_page,
+				dma_addr_t *dma_addrs)
+{
+	struct drm_psb_ttm_backend *psb_be =
+		container_of(backend, struct drm_psb_ttm_backend, base);
+
+	psb_be->pages = pages;
+	psb_be->dma_addrs = dma_addrs; /* Not concretely implemented by TTM yet*/
+	return 0;
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static void drm_psb_tbe_clear(struct ttm_backend *backend)
+{
+	struct drm_psb_ttm_backend *psb_be =
+		container_of(backend, struct drm_psb_ttm_backend, base);
+
+	psb_be->pages = NULL;
+	psb_be->dma_addrs = NULL;
+	return;
+}
+
+static int drm_psb_tbe_unbind(struct ttm_backend *backend)
+#else
+static int drm_psb_tbe_unbind(struct ttm_tt *ttm)
+#endif
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	struct ttm_bo_device *bdev = backend->bdev;
+	struct drm_psb_private *dev_priv =
+		container_of(bdev, struct drm_psb_private, bdev);
+	struct drm_psb_ttm_backend *psb_be =
+		container_of(backend, struct drm_psb_ttm_backend, base);
+	struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
+#ifdef SUPPORT_VSP
+	struct psb_mmu_pd *vsp_pd = psb_mmu_get_default_pd(dev_priv->vsp_mmu);
+#endif /* SUPPORT_VSP */
+#else
+	struct ttm_dma_tt *ttm_dma;
+	struct ttm_bo_device *bdev;
+	struct drm_psb_private *dev_priv;
+	struct drm_psb_ttm_tt_s *psb_be;
+	struct psb_mmu_pd *pd;
+	struct psb_mmu_pd *vsp_pd;
+
+	ttm_dma = container_of(ttm, struct ttm_dma_tt, ttm);
+	psb_be = container_of(ttm_dma, struct drm_psb_ttm_tt_s, ttm_dma);
+	bdev = ttm->bdev;
+	dev_priv = container_of(bdev, struct drm_psb_private, bdev);
+	pd = psb_mmu_get_default_pd(dev_priv->mmu);
+#ifdef SUPPORT_VSP
+	vsp_pd = psb_mmu_get_default_pd(dev_priv->vsp_mmu);
+#endif /* SUPPORT_VSP */
+#endif
+
+#ifndef CONFIG_DRM_VXD_BYT
+	if (psb_be->mem_type == TTM_PL_TT) {
+		uint32_t gatt_p_offset =
+			(psb_be->offset - dev_priv->pg->mmu_gatt_start) >> PAGE_SHIFT;
+
+		(void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+					    psb_be->num_pages,
+#else
+					    ttm->num_pages,
+#endif
+					    psb_be->desired_tile_stride,
+					    psb_be->hw_tile_stride, 0);
+	}
+#endif
+
+	psb_mmu_remove_pages(pd, psb_be->offset,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+			     psb_be->num_pages,
+#else
+			     ttm->num_pages,
+#endif
+			     psb_be->desired_tile_stride,
+			     psb_be->hw_tile_stride);
+#ifdef SUPPORT_VSP
+	psb_mmu_remove_pages(vsp_pd, psb_be->offset,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
+			     psb_be->num_pages,
+#else
+			     ttm->num_pages,
+#endif
+			     psb_be->desired_tile_stride,
+			     psb_be->hw_tile_stride);
+#endif
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static int drm_psb_tbe_bind(struct ttm_backend *backend,
+			    struct ttm_mem_reg *bo_mem)
+#else
+static int drm_psb_tbe_bind(struct ttm_tt *ttm,
+			    struct ttm_mem_reg *bo_mem)
+#endif
+{
+	int type;
+	int ret;
+	struct ttm_bo_device *bdev;
+	struct drm_psb_private *dev_priv;
+	struct psb_mmu_pd *pd;
+#ifdef SUPPORT_VSP
+	struct psb_mmu_pd *vsp_pd;
+#endif
+	struct ttm_mem_type_manager *man;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	struct drm_psb_ttm_backend *psb_be;
+#else
+	struct ttm_dma_tt *ttm_dma;
+	struct drm_psb_ttm_tt_s *psb_be;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	bdev = backend->bdev;
+	psb_be = container_of(backend, struct drm_psb_ttm_backend, base);
+#else
+	ttm_dma = container_of(ttm, struct ttm_dma_tt, ttm);
+	psb_be = container_of(ttm_dma, struct drm_psb_ttm_tt_s, ttm_dma);
+	bdev = ttm->bdev;
+#endif
+
+	dev_priv = container_of(bdev, struct drm_psb_private, bdev);
+	pd = psb_mmu_get_default_pd(dev_priv->mmu);
+#ifdef SUPPORT_VSP
+	vsp_pd = psb_mmu_get_default_pd(dev_priv->vsp_mmu);
+#endif
+	man = &bdev->man[bo_mem->mem_type];
+
+	ret = 0;
+	psb_be->mem_type = bo_mem->mem_type;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	psb_be->num_pages = bo_mem->num_pages;
+#else
+	ttm->num_pages = bo_mem->num_pages;
+#endif
+
+	psb_be->desired_tile_stride = 0;
+	psb_be->hw_tile_stride = 0;
+	psb_be->offset = (bo_mem->start << PAGE_SHIFT) +
+			 man->gpu_offset;
+
+	type = (bo_mem->placement & TTM_PL_FLAG_CACHED) ?
+				PSB_MMU_CACHED_MEMORY : 0;
+
+#ifndef CONFIG_DRM_VXD_BYT
+	if (psb_be->mem_type == TTM_PL_TT) {
+		uint32_t gatt_p_offset =
+			(psb_be->offset - dev_priv->pg->mmu_gatt_start) >> PAGE_SHIFT;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+		ret = psb_gtt_insert_pages(dev_priv->pg,
+					   psb_be->pages,
+					   gatt_p_offset,
+					   psb_be->num_pages,
+					   psb_be->desired_tile_stride,
+					   psb_be->hw_tile_stride, type);
+#else
+		ret = psb_gtt_insert_pages(dev_priv->pg,
+					   ttm->pages,
+					   gatt_p_offset,
+					   ttm->num_pages,
+					   psb_be->desired_tile_stride,
+					   psb_be->hw_tile_stride, type);
+#endif
+	}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	ret = psb_mmu_insert_pages(pd,
+		psb_be->pages,
+		psb_be->offset, psb_be->num_pages,
+		psb_be->desired_tile_stride,
+		psb_be->hw_tile_stride, type);
+#else
+	ret = psb_mmu_insert_pages(pd,
+		ttm->pages,
+		psb_be->offset, ttm->num_pages,
+		psb_be->desired_tile_stride,
+		psb_be->hw_tile_stride, type);
+#endif
+	if (ret)
+		goto out_err;
+
+#ifdef SUPPORT_VSP
+	ret = psb_mmu_insert_pages(vsp_pd,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
+				   psb_be->pages,
+				   psb_be->offset,
+				   psb_be->num_pages,
+#else
+				   ttm->pages,
+				   psb_be->offset,
+				   ttm->num_pages,
+#endif
+				   psb_be->desired_tile_stride,
+				   psb_be->hw_tile_stride, type);
+	if (ret)
+		goto out_err;
+#endif
+
+	return 0;
+out_err:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	drm_psb_tbe_unbind(backend);
+#else
+	drm_psb_tbe_unbind(ttm);
+#endif
+	return ret;
+
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static void drm_psb_tbe_destroy(struct ttm_backend *backend)
+{
+	struct drm_psb_ttm_backend *psb_be =
+		container_of(backend, struct drm_psb_ttm_backend, base);
+
+	if (backend)
+		kfree(psb_be);
+}
+#else
+static void drm_psb_tbe_destroy(struct ttm_tt *ttm)
+{
+	struct ttm_dma_tt *ttm_dma;
+	struct drm_psb_ttm_tt_s *psb_be;
+
+	ttm_dma = container_of(ttm, struct ttm_dma_tt, ttm);
+	psb_be = container_of(ttm_dma, struct drm_psb_ttm_tt_s, ttm_dma);
+
+	ttm_dma_tt_fini(ttm_dma);
+	if (ttm)
+		kfree(psb_be);
+ }
+#endif
+
+static struct ttm_backend_func psb_ttm_backend = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	.populate = drm_psb_tbe_populate,
+	.clear = drm_psb_tbe_clear,
+#endif
+	.bind = drm_psb_tbe_bind,
+	.unbind = drm_psb_tbe_unbind,
+	.destroy = drm_psb_tbe_destroy,
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
+{
+	struct drm_psb_ttm_backend *psb_be;
+
+	psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
+	if (!psb_be)
+		return NULL;
+	psb_be->pages = NULL;
+	psb_be->base.func = &psb_ttm_backend;
+	psb_be->base.bdev = bdev;
+	return &psb_be->base;
+}
+#else
+static struct ttm_tt *drm_psb_ttm_tt_create(struct ttm_bo_device *bdev,
+	unsigned long size, uint32_t page_flags, struct page *dummy_read_page)
+{
+	struct drm_psb_ttm_tt_s *psb_be;
+	int rva;
+
+#if __OS_HAS_AGP && 0
+	if (this_is_an_agp_device)
+		return ttm_agp_tt_populate(ttm);
+#endif
+
+	psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
+	if (!psb_be)
+		return NULL;
+
+	psb_be->ttm_dma.ttm.func = &psb_ttm_backend;
+
+	rva = ttm_dma_tt_init(&psb_be->ttm_dma, bdev, size, page_flags,
+		dummy_read_page);
+	if (rva < 0) {
+		kfree(psb_be);
+		return NULL;
+	}
+
+	return &psb_be->ttm_dma.ttm;
+}
+
+static int drm_psb_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	struct ttm_dma_tt *ttm_dma;
+	struct ttm_bo_device *bdev;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *ddev;
+
+	/*	The only use made of the structure pointed to
+		by ddev is reference to these members:
+			struct device *dev;
+			struct pci_dev *pdev;
+		*/
+	ttm_dma = (struct ttm_dma_tt *) ttm;
+
+	bdev = ttm->bdev;
+	dev_priv = container_of(bdev, struct drm_psb_private, bdev);
+	ddev = dev_priv->dev;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
+	{
+		bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+		if (slave && ttm->sg) {
+			drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+							 NULL, ttm->num_pages);
+			ttm->state = tt_unbound;
+			return 0;
+		}
+	}
+#endif
+
+#if __OS_HAS_AGP && 0
+	if (this_is_an_agp_device)
+		return ttm_agp_tt_populate(ttm);
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl())
+		return ttm_dma_populate(ttm_dma, ddev->dev);
+#endif
+
+	return ttm_pool_populate(ttm);
+}
+
+static void drm_psb_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct ttm_dma_tt *ttm_dma;
+	struct ttm_bo_device *bdev;
+	struct drm_psb_private *dev_priv;
+	struct drm_device *ddev;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (slave)
+		return;
+#endif
+	/*	The only use made of the structure pointed to
+		by ddev is reference to these members:
+			struct device *dev;
+			struct pci_dev *pdev;
+		*/
+	ttm_dma = (struct ttm_dma_tt *) ttm;
+	bdev = ttm->bdev;
+	dev_priv = container_of(bdev, struct drm_psb_private, bdev);
+
+	ddev = dev_priv->dev;
+
+#if __OS_HAS_AGP && 0
+	if (this_is_an_agp_device) {
+		ttm_agp_tt_unpopulate(ttm);
+		return;
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate(ttm_dma, ddev->dev);
+		return;
+	}
+#endif
+
+	ttm_pool_unpopulate(ttm);
+}
+#endif
+
+static int psb_invalidate_caches(struct ttm_bo_device *bdev,
+				 uint32_t placement)
+{
+	return 0;
+}
+
+/*
+ * MSVDX/TOPAZ GPU virtual space looks like this
+ * (We currently use only one MMU context).
+ * PSB_MEM_MMU_START: from 0x00000000~0xd000000, for generic buffers
+ * TTM_PL_IMR: from 0xd0000000, for MFLD IMR buffers
+ * TTM_PL_CI: from 0xe0000000+half GTT space, for MRST camear/video buffer sharing
+ * TTM_PL_TT: from TTM_PL_CI+CI size, for buffers need to mapping into GTT
+ */
+static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			     struct ttm_mem_type_manager *man)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(bdev, struct drm_psb_private, bdev);
+#ifndef CONFIG_DRM_VXD_BYT
+	struct psb_gtt *pg = dev_priv->pg;
+#endif
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED |
+					 TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case DRM_PSB_MEM_MMU:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+			     TTM_MEMTYPE_FLAG_CMA;
+		man->gpu_offset = PSB_MEM_MMU_START;
+		man->available_caching = TTM_PL_FLAG_CACHED |
+					 TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		printk(KERN_INFO "[TTM] DRM_PSB_MEM_MMU heap: %lu\n",
+		       man->gpu_offset);
+		break;
+#ifndef CONFIG_DRM_VXD_BYT
+#if !defined(MERRIFIELD)
+	case TTM_PL_IMR:	/* Unmappable IMR memory */
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+			     TTM_MEMTYPE_FLAG_FIXED;
+		man->available_caching = TTM_PL_FLAG_UNCACHED;
+		man->default_caching = TTM_PL_FLAG_UNCACHED;
+		man->gpu_offset = PSB_MEM_IMR_START;
+		break;
+#endif
+
+	case TTM_PL_TT:	/* Mappable GATT memory */
+		man->func = &ttm_bo_manager_func;
+#ifdef PSB_WORKING_HOST_MMU_ACCESS
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+#else
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+			     TTM_MEMTYPE_FLAG_CMA;
+#endif
+		man->available_caching = TTM_PL_FLAG_CACHED |
+					 TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		man->gpu_offset =
+		    pg->mmu_gatt_start + pg->gtt_video_start;
+		printk(KERN_INFO "[TTM] TTM_PL_TT heap: 0x%lu\n",
+		       man->gpu_offset);
+		break;
+#endif
+
+	case DRM_PSB_MEM_MMU_TILING:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+			     TTM_MEMTYPE_FLAG_CMA;
+		man->gpu_offset = PSB_MEM_MMU_TILING_START;
+		man->available_caching = TTM_PL_FLAG_CACHED |
+					 TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		printk(KERN_INFO "[TTM] DRM_PSB_MEM_MMU_TILING heap: 0x%lu\n",
+		       man->gpu_offset);
+		break;
+
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void psb_evict_mask(struct ttm_buffer_object *bo, struct ttm_placement *placement)
+{
+	static uint32_t cur_placement;
+
+	cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEM;
+	cur_placement |= TTM_PL_FLAG_SYSTEM;
+
+	placement->fpfn = 0;
+	placement->lpfn = 0;
+	placement->num_placement = 1;
+	placement->placement = &cur_placement;
+	placement->num_busy_placement = 0;
+	placement->busy_placement = NULL;
+
+	/* all buffers evicted to system memory */
+	/* return cur_placement | TTM_PL_FLAG_SYSTEM; */
+}
+
+static int psb_move(struct ttm_buffer_object *bo,
+		    bool evict, bool interruptible,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		    bool no_wait_reserve,
+#endif
+		    bool no_wait, struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+#if (!defined(MERRIFIELD) && !defined(CONFIG_DRM_VXD_BYT))
+	if ((old_mem->mem_type == TTM_PL_IMR) ||
+	    (new_mem->mem_type == TTM_PL_IMR)) {
+		if (old_mem->mm_node) {
+			spin_lock(&bo->glob->lru_lock);
+			drm_mm_put_block(old_mem->mm_node);
+			spin_unlock(&bo->glob->lru_lock);
+		}
+		old_mem->mm_node = NULL;
+		*old_mem = *new_mem;
+	} else
+#endif
+	if (old_mem->mem_type == TTM_PL_SYSTEM) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		return ttm_bo_move_memcpy(bo, evict, false, no_wait, new_mem);
+#else
+		return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+#endif
+	} else if (new_mem->mem_type == TTM_PL_SYSTEM) {
+		int ret = psb_move_flip(bo, evict, interruptible,
+					no_wait, new_mem);
+		if (unlikely(ret != 0)) {
+			if (ret == -ERESTART)
+				return ret;
+			else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+				return ttm_bo_move_memcpy(bo, evict, false, no_wait,
+#else
+				return ttm_bo_move_memcpy(bo, evict, no_wait,
+#endif
+							  new_mem);
+		}
+	} else {
+		if (psb_move_blit(bo, evict, no_wait, new_mem))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+			return ttm_bo_move_memcpy(bo, evict, false, no_wait,
+#else
+			return ttm_bo_move_memcpy(bo, evict, no_wait,
+#endif
+						  new_mem);
+	}
+	return 0;
+}
+
+int psb_verify_access(struct ttm_buffer_object *bo,
+		      struct file *filp)
+{
+	struct drm_file *file_priv = (struct drm_file *)filp->private_data;
+
+	if (capable(CAP_SYS_ADMIN))
+		return 0;
+
+	/* workaround drm authentification issue on Android for ttm_bo_mmap */
+	/*
+	if (unlikely(!file_priv->authenticated))
+		return -EPERM;
+	*/
+
+	return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
+}
+
+static int psb_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct drm_psb_private *dev_priv =
+		container_of(bdev, struct drm_psb_private, bdev);
+#ifndef CONFIG_DRM_VXD_BYT
+	struct psb_gtt *pg = dev_priv->pg;
+#endif
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+#ifndef CONFIG_DRM_VXD_BYT
+	case TTM_PL_TT:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pg->gatt_start;
+		mem->bus.is_iomem = false; /* Don't know whether it is IO_MEM, this flag used in vm_fault handle */
+		break;
+#endif
+	case DRM_PSB_MEM_MMU:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = 0x00000000;
+		break;
+#ifndef CONFIG_DRM_VXD_BYT
+#if !defined(MERRIFIELD)
+	case TTM_PL_IMR:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = dev_priv->imr_region_start;;
+		mem->bus.is_iomem = true;
+		break;
+#endif
+#endif
+	case DRM_PSB_MEM_MMU_TILING:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = 0x00000000;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#ifdef PSB_TTM_IO_MEM_FREE
+static void psb_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+#endif
+
+struct ttm_bo_driver psb_ttm_bo_driver = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	.create_ttm_backend_entry = &drm_psb_tbe_init,
+#else
+	.ttm_tt_create = &drm_psb_ttm_tt_create,
+	.ttm_tt_populate = &drm_psb_ttm_tt_populate,
+	.ttm_tt_unpopulate = &drm_psb_ttm_tt_unpopulate,
+#endif
+	.invalidate_caches = &psb_invalidate_caches,
+	.init_mem_type = &psb_init_mem_type,
+	.evict_flags = &psb_evict_mask,
+	/* psb_move is used for IMR case */
+	.move = &psb_move,
+	.verify_access = &psb_verify_access,
+	.sync_obj_signaled = &ttm_fence_sync_obj_signaled,
+	.sync_obj_wait = &ttm_fence_sync_obj_wait,
+	.sync_obj_flush = &ttm_fence_sync_obj_flush,
+	.sync_obj_unref = &ttm_fence_sync_obj_unref,
+	.sync_obj_ref = &ttm_fence_sync_obj_ref,
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#if 0
+	/* Begin -- Not currently used by this driver.
+	   These will only be dispatched if non-NULL. */
+
+	/* hook to notify driver about a driver move so it
+	 * can do tiling things */
+	/*  Only called if non-NULL */
+	void (*move_notify)(struct ttm_buffer_object *bo,
+			    struct ttm_mem_reg *new_mem);
+	/* notify the driver we are taking a fault on this BO
+	 * and have reserved it */
+	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+
+	/**
+	 * notify the driver that we're about to swap out this bo
+	 */
+	void (*swap_notify) (struct ttm_buffer_object *bo);
+
+	/* End   -- Not currently used by this driver. */
+#endif
+#endif
+
+	.io_mem_reserve = &psb_ttm_io_mem_reserve,
+#ifdef PSB_TTM_IO_MEM_FREE
+	.io_mem_free = &psb_ttm_io_mem_free
+#endif
+};
diff --git a/drivers/external_drivers/intel_media/video/common/psb_cmdbuf.c b/drivers/external_drivers/intel_media/video/common/psb_cmdbuf.c
new file mode 100644
index 0000000..3943c84
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_cmdbuf.c
@@ -0,0 +1,1116 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#include "vxd_drm.h"
+#else
+#include "psb_drv.h"
+#include "psb_drm.h"
+#include "psb_reg.h"
+#ifdef MERRIFIELD
+#include "tng_topaz.h"
+#include "pwr_mgmt.h"
+#else
+#include "pnw_topaz.h"
+#include "psb_powermgmt.h"
+#endif
+#include "psb_intel_reg.h"
+#endif
+
+#include "psb_msvdx.h"
+
+#ifdef SUPPORT_MRST
+#include "lnc_topaz.h"
+#endif
+
+#ifdef SUPPORT_VSP
+#include "vsp.h"
+#endif
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_execbuf_util.h"
+#include "psb_ttm_userobj_api.h"
+#include "ttm/ttm_placement.h"
+#include "psb_video_drv.h"
+
+static inline int psb_same_page(unsigned long offset,
+				unsigned long offset2)
+{
+	return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
+}
+
+#if 0
+static inline unsigned long psb_offset_end(unsigned long offset,
+		unsigned long end)
+{
+	offset = (offset + PAGE_SIZE) & PAGE_MASK;
+	return (end < offset) ? end : offset;
+}
+#endif
+
+static void psb_idle_engine(struct drm_device *dev, int engine)
+{
+	/*Fix me add video engile support*/
+	return;
+}
+
+struct psb_dstbuf_cache {
+	unsigned int dst;
+	struct ttm_buffer_object *dst_buf;
+	unsigned long dst_offset;
+	uint32_t *dst_page;
+	unsigned int dst_page_offset;
+	struct ttm_bo_kmap_obj dst_kmap;
+	bool dst_is_iomem;
+};
+
+static int psb_check_presumed(struct psb_validate_req *req,
+			      struct ttm_buffer_object *bo,
+			      struct psb_validate_arg __user *data,
+			      int *presumed_ok)
+{
+	struct psb_validate_req __user *user_req = &(data->d.req);
+
+	*presumed_ok = 0;
+
+	if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+		*presumed_ok = 1;
+		return 0;
+	}
+
+	if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
+		return 0;
+
+	if (bo->offset == req->presumed_gpu_offset) {
+		*presumed_ok = 1;
+		return 0;
+	}
+
+	return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
+			  &user_req->presumed_flags);
+}
+
+
+static void psb_unreference_buffers(struct psb_context *context)
+{
+	struct ttm_validate_buffer *entry, *next;
+	struct psb_validate_buffer *vbuf;
+	struct list_head *list = &context->validate_list;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		vbuf =
+			container_of(entry, struct psb_validate_buffer, base);
+		list_del(&entry->head);
+		ttm_bo_unref(&entry->bo);
+	}
+
+	/*
+	list = &context->kern_validate_list;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		vbuf =
+			container_of(entry, struct psb_validate_buffer, base);
+		list_del(&entry->head);
+		ttm_bo_unref(&entry->bo);
+	}
+	*/
+}
+
+static int psb_lookup_validate_buffer(struct drm_file *file_priv,
+				      uint64_t data,
+				      struct psb_validate_buffer *item)
+{
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+
+	item->user_val_arg =
+		(struct psb_validate_arg __user *)(unsigned long) data;
+
+	if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
+				    sizeof(item->req)) != 0)) {
+		DRM_ERROR("Lookup copy fault.\n");
+		return -EFAULT;
+	}
+
+	item->base.bo =
+		ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
+
+	if (unlikely(item->base.bo == NULL)) {
+		DRM_ERROR("Bo lookup fault.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int psb_reference_buffers(struct drm_file *file_priv,
+				 uint64_t data,
+				 struct psb_context *context)
+{
+	struct psb_validate_buffer *item;
+	int ret;
+
+	while (likely(data != 0)) {
+		if (unlikely(context->used_buffers >=
+			     PSB_NUM_VALIDATE_BUFFERS)) {
+			DRM_ERROR("Too many buffers "
+				  "on validate list.\n");
+			ret = -EINVAL;
+			goto out_err0;
+		}
+
+		item = &context->buffers[context->used_buffers];
+
+		ret = psb_lookup_validate_buffer(file_priv, data, item);
+		if (unlikely(ret != 0))
+			goto out_err0;
+
+		item->base.reserved = 0;
+		list_add_tail(&item->base.head, &context->validate_list);
+		context->used_buffers++;
+		data = item->req.next;
+	}
+	return 0;
+
+out_err0:
+	psb_unreference_buffers(context);
+	return ret;
+}
+
+/* Todo: can simplified the code:
+ * set_val_flags is hard code as (read | write) in user space
+ * clr_flags is set as NULL
+ * only one fence type is needed */
+static int psb_placement_fence_type(struct ttm_buffer_object *bo,
+						uint64_t set_val_flags,
+						uint64_t clr_val_flags,
+						uint32_t new_fence_class,
+						uint32_t *new_fence_type)
+{
+	int ret;
+	uint32_t n_fence_type;
+
+	/*
+	uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
+	uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
+	*/
+	struct ttm_fence_object *old_fence;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	uint32_t old_fence_type;
+#endif
+	struct ttm_placement placement;
+
+	if (unlikely
+	    (!(set_val_flags &
+	       (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
+		DRM_ERROR
+		("GPU access type (read / write) is not indicated.\n");
+		return -EINVAL;
+	}
+
+	/* User space driver doesn't set any TTM placement flags in set_val_flags or clr_val_flags */
+	placement.num_placement = 0;/* FIXME  */
+	placement.num_busy_placement = 0;
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	ret = psb_ttm_bo_check_placement(bo, &placement);
+	if (unlikely(ret != 0))
+		return ret;
+
+	switch (new_fence_class) {
+	default:
+		n_fence_type = _PSB_FENCE_TYPE_EXE;
+	}
+
+	*new_fence_type = n_fence_type;
+	old_fence = (struct ttm_fence_object *) bo->sync_obj;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	old_fence_type = (uint32_t)(unsigned long) bo->sync_obj_arg;
+
+	if (old_fence && ((new_fence_class != old_fence->fence_class) ||
+			  ((n_fence_type ^ old_fence_type) &
+			   old_fence_type))) {
+#else
+	if (old_fence && ((new_fence_class != old_fence->fence_class))) {
+#endif
+		ret = ttm_bo_wait(bo, 0, 1, 0);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+	/*
+	bo->proposed_flags = (bo->proposed_flags | set_flags)
+		& ~clr_flags & TTM_PL_MASK_MEMTYPE;
+	*/
+	return 0;
+}
+
+#if 0
+int psb_validate_kernel_buffer(struct psb_context *context,
+			       struct ttm_buffer_object *bo,
+			       uint32_t fence_class,
+			       uint64_t set_flags, uint64_t clr_flags)
+{
+	struct psb_validate_buffer *item;
+	uint32_t cur_fence_type;
+	int ret;
+
+	if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
+		DRM_ERROR("Out of free validation buffer entries for "
+			  "kernel buffer validation.\n");
+		return -ENOMEM;
+	}
+
+	item = &context->buffers[context->used_buffers];
+	item->user_val_arg = NULL;
+	item->base.reserved = 0;
+
+	ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	spin_lock(&bo->lock);
+	ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
+				       &cur_fence_type);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(bo);
+		goto out_unlock;
+	}
+
+	item->base.bo = ttm_bo_reference(bo);
+	item->base.new_sync_obj_arg = (void *)(unsigned long) cur_fence_type;
+	item->base.reserved = 1;
+
+	list_add_tail(&item->base.head, &context->kern_validate_list);
+	context->used_buffers++;
+	/*
+	ret = ttm_bo_validate(bo, 1, 0, 0);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+	*/
+	item->offset = bo->offset;
+	item->flags = bo->mem.placement;
+	context->fence_types |= cur_fence_type;
+
+out_unlock:
+	spin_unlock(&bo->lock);
+	return ret;
+}
+#endif
+
+static int psb_validate_buffer_list(struct drm_file *file_priv,
+				    uint32_t fence_class,
+				    struct psb_context *context,
+				    int *po_correct,
+				    struct psb_mmu_driver *psb_mmu)
+{
+	struct psb_validate_buffer *item;
+	struct ttm_buffer_object *bo;
+	int ret;
+	struct psb_validate_req *req;
+	uint32_t fence_types = 0;
+	uint32_t cur_fence_type;
+	struct ttm_validate_buffer *entry;
+	struct list_head *list = &context->validate_list;
+	struct ttm_placement placement;
+	uint32_t flags;
+
+	*po_correct = 1;
+
+	list_for_each_entry(entry, list, head) {
+		item = container_of(entry, struct psb_validate_buffer, base);
+		bo = entry->bo;
+		item->ret = 0;
+		req = &item->req;
+
+		spin_lock(&bo->bdev->fence_lock);
+		ret = psb_placement_fence_type(bo,
+					       req->set_flags,
+					       req->clear_flags,
+					       fence_class,
+					       &cur_fence_type);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+
+		flags = item->req.pad64 | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+		placement.num_placement = 1;
+		placement.placement = &flags;
+		placement.num_busy_placement = 1;
+		placement.busy_placement = &flags;
+		placement.fpfn = 0;
+		placement.lpfn = 0;
+
+		spin_unlock(&bo->bdev->fence_lock);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		ret = ttm_bo_validate(bo, &placement, 1, 0, 0);
+#else
+		ret = ttm_bo_validate(bo, &placement, 1, 0);
+#endif
+		/* spin_lock(&bo->lock); */
+		/* mem and offset field of bo is protected by ::reserve
+		 * this function is called in reserve */
+		if (unlikely(ret != 0))
+			goto out_err;
+
+#ifndef CONFIG_DRM_VXD_BYT
+		if ((item->req.unfence_flag & PSB_MEM_CLFLUSH)) {
+			ret = psb_ttm_bo_clflush(psb_mmu, bo);
+			if (unlikely(!ret))
+				PSB_DEBUG_WARN("clflush bo fail\n");
+		}
+#endif
+
+		fence_types |= cur_fence_type;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		entry->new_sync_obj_arg = (void *)
+					  (unsigned long) cur_fence_type;
+#endif
+
+		item->offset = bo->offset;
+		item->flags = bo->mem.placement;
+		/* spin_unlock(&bo->lock); */
+
+		ret = psb_check_presumed(&item->req, bo, item->user_val_arg,
+					&item->po_correct);
+		if (unlikely(ret != 0))
+			goto out_err;
+
+		if (unlikely(!item->po_correct))
+			*po_correct = 0;
+
+		item++;
+	}
+
+	context->fence_types |= fence_types;
+
+	return 0;
+out_unlock:
+	spin_unlock(&bo->bdev->fence_lock);
+
+out_err:
+	/* spin_unlock(&bo->lock); */
+	item->ret = ret;
+	return ret;
+}
+
+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
+{
+	if (dst_cache->dst_page) {
+		ttm_bo_kunmap(&dst_cache->dst_kmap);
+		dst_cache->dst_page = NULL;
+	}
+	dst_cache->dst_buf = NULL;
+	dst_cache->dst = ~0;
+}
+
+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
+				   struct psb_validate_buffer *buffers,
+				   unsigned int dst,
+				   unsigned long dst_offset)
+{
+	int ret;
+
+	PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
+
+	if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
+		psb_clear_dstbuf_cache(dst_cache);
+		dst_cache->dst = dst;
+		dst_cache->dst_buf = buffers[dst].base.bo;
+	}
+
+	if (unlikely
+	    (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
+		DRM_ERROR("Relocation destination out of bounds.\n");
+		return -EINVAL;
+	}
+
+	if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
+	    NULL == dst_cache->dst_page) {
+		if (NULL != dst_cache->dst_page) {
+			ttm_bo_kunmap(&dst_cache->dst_kmap);
+			dst_cache->dst_page = NULL;
+		}
+
+		ret = ttm_bo_kmap(dst_cache->dst_buf,
+				dst_offset >> PAGE_SHIFT, 1,
+				&dst_cache->dst_kmap);
+		if (ret) {
+			DRM_ERROR("Could not map destination buffer for "
+				  "relocation.\n");
+			return ret;
+		}
+
+		dst_cache->dst_page =
+			ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
+					     &dst_cache->dst_is_iomem);
+		dst_cache->dst_offset = dst_offset & PAGE_MASK;
+		dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
+	}
+	return 0;
+}
+
+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
+			   uint32_t fence_class,
+			   const struct drm_psb_reloc *reloc,
+			   struct psb_validate_buffer *buffers,
+			   int num_buffers,
+			   struct psb_dstbuf_cache *dst_cache,
+			   int no_wait, int interruptible)
+{
+	uint32_t val;
+	uint32_t background;
+	unsigned int index;
+	int ret;
+	unsigned int shift;
+	unsigned int align_shift;
+	struct ttm_buffer_object *reloc_bo;
+
+
+	PSB_DEBUG_GENERAL("Reloc type %d\n"
+			  "\t where 0x%04x\n"
+			  "\t buffer 0x%04x\n"
+			  "\t mask 0x%08x\n"
+			  "\t shift 0x%08x\n"
+			  "\t pre_add 0x%08x\n"
+			  "\t background 0x%08x\n"
+			  "\t dst_buffer 0x%08x\n"
+			  "\t arg0 0x%08x\n"
+			  "\t arg1 0x%08x\n",
+			  reloc->reloc_op,
+			  reloc->where,
+			  reloc->buffer,
+			  reloc->mask,
+			  reloc->shift,
+			  reloc->pre_add,
+			  reloc->background,
+			  reloc->dst_buffer, reloc->arg0, reloc->arg1);
+
+	if (unlikely(reloc->buffer >= num_buffers)) {
+		DRM_ERROR("Illegal relocation buffer %d.\n",
+			  reloc->buffer);
+		return -EINVAL;
+	}
+
+	if (buffers[reloc->buffer].po_correct)
+		return 0;
+
+	if (unlikely(reloc->dst_buffer >= num_buffers)) {
+		DRM_ERROR
+		("Illegal destination buffer for relocation %d.\n",
+		 reloc->dst_buffer);
+		return -EINVAL;
+	}
+
+	ret = psb_update_dstbuf_cache(dst_cache, buffers,
+					reloc->dst_buffer,
+					reloc->where << 2);
+	if (ret)
+		return ret;
+
+	reloc_bo = buffers[reloc->buffer].base.bo;
+
+	if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
+		DRM_ERROR("Illegal relocation offset add.\n");
+		return -EINVAL;
+	}
+
+	switch (reloc->reloc_op) {
+	case PSB_RELOC_OP_OFFSET:
+		val = reloc_bo->offset + reloc->pre_add;
+		break;
+	default:
+		DRM_ERROR("Unimplemented relocation.\n");
+		return -EINVAL;
+	}
+
+	shift =
+		(reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
+	align_shift =
+		(reloc->
+		 shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
+
+	val = ((val >> align_shift) << shift);
+	index = reloc->where - dst_cache->dst_page_offset;
+
+	background = reloc->background;
+	val = (background & ~reloc->mask) | (val & reloc->mask);
+	dst_cache->dst_page[index] = val;
+
+	PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
+			  reloc->dst_buffer, index,
+			  dst_cache->dst_page[index]);
+
+	return 0;
+}
+
+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
+			       unsigned int num_pages)
+{
+	int ret = 0;
+
+	spin_lock(&dev_priv->reloc_lock);
+	if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
+		dev_priv->rel_mapped_pages += num_pages;
+		ret = 1;
+	}
+	spin_unlock(&dev_priv->reloc_lock);
+	return ret;
+}
+
+static int psb_fixup_relocs(struct drm_file *file_priv,
+			    uint32_t fence_class,
+			    unsigned int num_relocs,
+			    unsigned int reloc_offset,
+			    uint32_t reloc_handle,
+			    struct psb_context *context,
+			    int no_wait, int interruptible)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_buffer_object *reloc_buffer = NULL;
+	unsigned int reloc_num_pages;
+	unsigned int reloc_first_page;
+	unsigned int reloc_last_page;
+	struct psb_dstbuf_cache dst_cache;
+	struct drm_psb_reloc *reloc;
+	struct ttm_bo_kmap_obj reloc_kmap;
+	bool reloc_is_iomem;
+	int count;
+	int ret = 0;
+	int registered = 0;
+	uint32_t num_buffers = context->used_buffers;
+
+	if (num_relocs == 0)
+		return 0;
+
+	memset(&dst_cache, 0, sizeof(dst_cache));
+	memset(&reloc_kmap, 0, sizeof(reloc_kmap));
+
+	reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
+	if (!reloc_buffer)
+		goto out;
+
+	if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
+		DRM_ERROR("Relocation buffer was not on validate list.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	reloc_first_page = reloc_offset >> PAGE_SHIFT;
+	reloc_last_page =
+		(reloc_offset +
+		 num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
+	reloc_num_pages = reloc_last_page - reloc_first_page + 1;
+	reloc_offset &= ~PAGE_MASK;
+
+	if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
+		DRM_ERROR("Relocation buffer is too large\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
+		    (registered =
+			     psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
+
+	if (ret == -EINTR) {
+		ret = -ERESTART;
+		goto out;
+	}
+	if (ret) {
+		DRM_ERROR("Error waiting for space to map "
+			  "relocation buffer.\n");
+		goto out;
+	}
+
+	ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
+			  reloc_num_pages, &reloc_kmap);
+
+	if (ret) {
+		DRM_ERROR("Could not map relocation buffer.\n"
+			  "\tReloc buffer id 0x%08x.\n"
+			  "\tReloc first page %d.\n"
+			  "\tReloc num pages %d.\n",
+			  reloc_handle, reloc_first_page, reloc_num_pages);
+		goto out;
+	}
+
+	reloc = (struct drm_psb_reloc *)
+		((unsigned long)
+		 ttm_kmap_obj_virtual(&reloc_kmap,
+				      &reloc_is_iomem) + reloc_offset);
+
+	for (count = 0; count < num_relocs; ++count) {
+		ret = psb_apply_reloc(dev_priv, fence_class,
+				      reloc, context->buffers,
+				      num_buffers, &dst_cache,
+				      no_wait, interruptible);
+		if (ret)
+			goto out1;
+		reloc++;
+	}
+
+out1:
+	ttm_bo_kunmap(&reloc_kmap);
+out:
+	if (registered) {
+		spin_lock(&dev_priv->reloc_lock);
+		dev_priv->rel_mapped_pages -= reloc_num_pages;
+		spin_unlock(&dev_priv->reloc_lock);
+		DRM_WAKEUP(&dev_priv->rel_mapped_queue);
+	}
+
+	psb_clear_dstbuf_cache(&dst_cache);
+	if (reloc_buffer)
+		ttm_bo_unref(&reloc_buffer);
+	return ret;
+}
+
+void psb_fence_or_sync(struct drm_file *file_priv,
+		       uint32_t engine,
+		       uint32_t fence_types,
+		       uint32_t fence_flags,
+		       struct list_head *list,
+		       struct psb_ttm_fence_rep *fence_arg,
+		       struct ttm_fence_object **fence_p)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_fence_device *fdev = &dev_priv->fdev;
+	int ret;
+	struct ttm_fence_object *fence;
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+	uint32_t handle;
+	struct ttm_validate_buffer *entry, *next;
+	struct ttm_bo_global *glob = dev_priv->bdev.glob;
+
+	ret = ttm_fence_user_create(fdev, tfile,
+				    engine, fence_types,
+				    TTM_FENCE_FLAG_EMIT, &fence, &handle);
+	if (ret) {
+
+		/*
+		 * Fence creation failed.
+		 * Fall back to synchronous operation and idle the engine.
+		 */
+
+		psb_idle_engine(dev, engine);
+		if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
+
+			/*
+			 * Communicate to user-space that
+			 * fence creation has failed and that
+			 * the engine is idle.
+			 */
+
+			fence_arg->handle = ~0;
+			fence_arg->error = ret;
+		}
+
+		ttm_eu_backoff_reservation(list);
+		if (fence_p)
+			*fence_p = NULL;
+		return;
+	}
+
+#ifndef CONFIG_DRM_VXD_BYT
+	spin_lock(&glob->lru_lock);
+	list_for_each_entry_safe(entry, next, list, head) {
+		struct psb_validate_buffer *vbuf =
+			container_of(entry, struct psb_validate_buffer,
+				     base);
+		if (vbuf->req.unfence_flag & PSB_NOT_FENCE) {
+			list_del(&entry->head);
+			ttm_bo_unreserve_locked(entry->bo);
+			ttm_bo_unref(&entry->bo);
+			entry->reserved = false;
+		}
+	}
+	spin_unlock(&glob->lru_lock);
+#endif
+
+	ttm_eu_fence_buffer_objects(list, fence);
+	if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
+		struct ttm_fence_info info = ttm_fence_get_info(fence);
+		fence_arg->handle = handle;
+		fence_arg->fence_class = ttm_fence_class(fence);
+		fence_arg->fence_type = ttm_fence_types(fence);
+		fence_arg->signaled_types = info.signaled_types;
+		fence_arg->error = 0;
+	} else {
+		ret = ttm_ref_object_base_unref(tfile, handle,
+						ttm_fence_type);
+		if (ret)
+			DRM_ERROR("Failed to unref buffer object.\n");
+	}
+
+	if (fence_p)
+		*fence_p = fence;
+	else if (fence)
+		ttm_fence_object_unref(&fence);
+}
+
+
+#if 0
+static int psb_dump_page(struct ttm_buffer_object *bo,
+			 unsigned int page_offset, unsigned int num)
+{
+	struct ttm_bo_kmap_obj kmobj;
+	int is_iomem;
+	uint32_t *p;
+	int ret;
+	unsigned int i;
+
+	ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
+	if (ret)
+		return ret;
+
+	p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
+	for (i = 0; i < num; ++i)
+		PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
+
+	ttm_bo_kunmap(&kmobj);
+	return 0;
+}
+#endif
+
+static int psb_handle_copyback(struct drm_device *dev,
+			       struct psb_context *context,
+			       int ret)
+{
+	int err = ret;
+	struct ttm_validate_buffer *entry;
+	struct psb_validate_arg arg;
+	struct list_head *list = &context->validate_list;
+
+	if (ret) {
+		ttm_eu_backoff_reservation(list);
+		/* ttm_eu_backoff_reservation(&context->kern_validate_list); */
+	}
+
+	/* Todo: actually user space driver didn't hanlde the rep info */
+	if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
+		list_for_each_entry(entry, list, head) {
+			struct psb_validate_buffer *vbuf =
+				container_of(entry, struct psb_validate_buffer,
+					     base);
+			arg.handled = 1;
+			arg.ret = vbuf->ret;
+			if (!arg.ret) {
+				struct ttm_buffer_object *bo = entry->bo;
+				/* spin_lock(&bo->lock); */
+				/* offset and mem field of bo is protected by reserve */
+				ret = ttm_bo_reserve(bo, 1, 0, 0, 0);
+				if (unlikely(ret != 0))
+					arg.ret = -EFAULT;
+				arg.d.rep.gpu_offset = bo->offset;
+				arg.d.rep.placement = bo->mem.placement;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+				arg.d.rep.fence_type_mask =
+					(uint32_t)(unsigned long)
+					entry->new_sync_obj_arg;
+#else
+				arg.d.rep.fence_type_mask = _PSB_FENCE_TYPE_EXE;
+#endif
+				ttm_bo_unreserve(bo);
+				/* spin_unlock(&bo->lock); */
+			}
+
+			if (__copy_to_user(vbuf->user_val_arg,
+					   &arg, sizeof(arg))) {
+				err = -EFAULT;
+				DRM_ERROR("call __copy_to_user() function error!\n");
+			}
+
+			if (arg.ret)
+				break;
+		}
+	}
+
+	return err;
+}
+
+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_psb_cmdbuf_arg *arg = data;
+	int ret = 0;
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+	struct ttm_buffer_object *cmd_buffer = NULL;
+	struct psb_ttm_fence_rep fence_arg;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct psb_mmu_driver *mmu = NULL;
+	struct msvdx_private *msvdx_priv = NULL;
+#ifdef SUPPORT_VSP
+	struct vsp_private *vsp_priv = NULL;
+#endif
+	struct psb_video_ctx *pos = NULL;
+	struct psb_video_ctx *n = NULL;
+	struct psb_video_ctx *msvdx_ctx = NULL;
+	unsigned long irq_flags;
+#if defined(MERRIFIELD)
+	struct tng_topaz_private *topaz_priv;
+#endif
+	int engine, po_correct;
+	int found = 0;
+	struct psb_context *context = NULL;
+
+	if (dev_priv == NULL)
+		return -EINVAL;
+	mmu = dev_priv->mmu;
+	msvdx_priv = dev_priv->msvdx_private;
+
+#if defined(MERRIFIELD)
+	topaz_priv = dev_priv->topaz_private;
+#endif
+
+#ifdef SUPPORT_VSP
+	vsp_priv = dev_priv->vsp_private;
+#endif
+
+#if defined(MERRIFIELD)
+	if (drm_topaz_cmdpolicy != PSB_CMDPOLICY_PARALLEL) {
+		wait_event_interruptible(topaz_priv->cmd_wq, \
+			(atomic_read(&topaz_priv->cmd_wq_free) == 1));
+		atomic_set(&topaz_priv->cmd_wq_free, 0);
+	}
+#endif
+
+	ret = ttm_read_lock(&dev_priv->ttm_lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (arg->engine == PSB_ENGINE_DECODE) {
+		if (msvdx_priv->fw_loaded_by_punit)
+			psb_msvdx_check_reset_fw(dev);
+#ifndef MERRIFIELD
+		if (!ospm_power_using_video_begin(OSPM_VIDEO_DEC_ISLAND)) {
+			ret = -EBUSY;
+			goto out_err0;
+		}
+#endif
+		ret = mutex_lock_interruptible(&msvdx_priv->msvdx_mutex);
+		if (unlikely(ret != 0))
+			goto out_err0;
+
+		msvdx_priv->tfile = tfile;
+		context = &dev_priv->decode_context;
+	} else if (arg->engine == LNC_ENGINE_ENCODE) {
+#ifndef CONFIG_DRM_VXD_BYT
+
+		if (dev_priv->topaz_disabled) {
+			ret = -ENODEV;
+			goto out_err0;
+		}
+#ifndef MERRIFIELD
+		if (!ospm_power_using_video_begin(OSPM_VIDEO_ENC_ISLAND)) {
+			ret = -EBUSY;
+			goto out_err0;
+		}
+#endif
+		ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+		if (unlikely(ret != 0))
+			goto out_err0;
+		context = &dev_priv->encode_context;
+#endif
+	} else if (arg->engine == VSP_ENGINE_VPP) {
+#ifdef SUPPORT_VSP
+		ret = mutex_lock_interruptible(&vsp_priv->vsp_mutex);
+		if (unlikely(ret != 0))
+			goto out_err0;
+
+		context = &dev_priv->vsp_context;
+#endif
+	} else {
+		ret = -EINVAL;
+		goto out_err0;
+	}
+
+	if (context == NULL) {
+		ret = -EINVAL;
+		goto out_err0;
+	}
+
+	context->used_buffers = 0;
+	context->fence_types = 0;
+	if (!list_empty(&context->validate_list)) {
+		DRM_ERROR("context->validate_list is not null.\n");
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	/* BUG_ON(!list_empty(&context->kern_validate_list)); */
+
+	if (unlikely(context->buffers == NULL)) {
+		context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
+					   sizeof(*context->buffers));
+		if (unlikely(context->buffers == NULL)) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
+	}
+
+	ret = psb_reference_buffers(file_priv,
+				    arg->buffer_list,
+				    context);
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+	/* Not used in K3 */
+	/* context->val_seq = atomic_add_return(1, &dev_priv->val_seq); */
+
+	ret = ttm_eu_reserve_buffers(&context->validate_list);
+	if (unlikely(ret != 0))
+		goto out_err2;
+
+	engine = arg->engine;
+	ret = psb_validate_buffer_list(file_priv, engine,
+				       context, &po_correct, mmu);
+	if (unlikely(ret != 0))
+		goto out_err3;
+
+	if (!po_correct) {
+		ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
+				       arg->reloc_offset,
+				       arg->reloc_handle, context, 0, 1);
+		if (unlikely(ret != 0))
+			goto out_err3;
+
+	}
+
+	cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
+	if (unlikely(cmd_buffer == NULL)) {
+		ret = -EINVAL;
+		goto out_err4;
+	}
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if (pos->filp == file_priv->filp) {
+			int entrypoint = pos->ctx_type & 0xff;
+
+		PSB_DEBUG_GENERAL("cmds for profile %llu, entrypoint %llu\n",
+					(pos->ctx_type >> 8) & 0xff,
+					(pos->ctx_type & 0xff));
+
+#ifndef CONFIG_DRM_VXD_BYT
+			if (entrypoint == VAEntrypointEncSlice ||
+			    entrypoint == VAEntrypointEncPicture)
+				dev_priv->topaz_ctx = pos;
+			else
+#endif
+			if (entrypoint != VAEntrypointVideoProc ||
+				arg->engine == PSB_ENGINE_DECODE)
+				msvdx_ctx = pos;
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	if (!found) {
+		PSB_DEBUG_WARN("WARN: video ctx is not found.\n");
+		goto out_err4;
+	}
+
+	switch (arg->engine) {
+	case PSB_ENGINE_DECODE:
+		ret = psb_cmdbuf_video(file_priv, &context->validate_list,
+				       context->fence_types, arg,
+				       cmd_buffer, &fence_arg, msvdx_ctx);
+		if (unlikely(ret != 0))
+			goto out_err4;
+		break;
+#ifndef CONFIG_DRM_VXD_BYT
+	case LNC_ENGINE_ENCODE:
+#ifdef MERRIFIELD
+		if (IS_MRFLD(dev))
+			ret = tng_cmdbuf_video(
+				file_priv, &context->validate_list,
+				context->fence_types, arg,
+				cmd_buffer, &fence_arg);
+#else
+		if (IS_MDFLD(dev))
+			ret = pnw_cmdbuf_video(
+				file_priv, &context->validate_list,
+				context->fence_types, arg,
+				cmd_buffer, &fence_arg);
+#endif
+
+		if (unlikely(ret != 0))
+			goto out_err4;
+		break;
+#endif
+	case VSP_ENGINE_VPP:
+#ifdef SUPPORT_VSP
+		ret = vsp_cmdbuf_vpp(file_priv, &context->validate_list,
+				     context->fence_types, arg,
+				     cmd_buffer, &fence_arg);
+
+		if (unlikely(ret != 0))
+			goto out_err4;
+		break;
+#endif
+	default:
+		DRM_ERROR
+		("Unimplemented command submission mechanism (%x).\n",
+		 arg->engine);
+		ret = -EINVAL;
+		goto out_err4;
+	}
+
+	if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
+		ret = copy_to_user((void __user *)
+				   ((unsigned long) arg->fence_arg),
+				   &fence_arg, sizeof(fence_arg));
+	}
+
+out_err4:
+	if (cmd_buffer)
+		ttm_bo_unref(&cmd_buffer);
+out_err3:
+	ret = psb_handle_copyback(dev, context, ret);
+out_err2:
+	psb_unreference_buffers(context);
+out_err1:
+	if (arg->engine == PSB_ENGINE_DECODE)
+		mutex_unlock(&msvdx_priv->msvdx_mutex);
+	if (arg->engine == LNC_ENGINE_ENCODE)
+		mutex_unlock(&dev_priv->cmdbuf_mutex);
+#ifdef SUPPORT_VSP
+	if (arg->engine == VSP_ENGINE_VPP)
+		mutex_unlock(&vsp_priv->vsp_mutex);
+#endif
+out_err0:
+	ttm_read_unlock(&dev_priv->ttm_lock);
+#ifndef MERRIFIELD
+	if (arg->engine == PSB_ENGINE_DECODE)
+		ospm_power_using_video_end(OSPM_VIDEO_DEC_ISLAND);
+#endif
+#ifndef CONFIG_DRM_VXD_BYT
+#ifndef MERRIFIELD
+	if (arg->engine == LNC_ENGINE_ENCODE)
+		ospm_power_using_video_end(OSPM_VIDEO_ENC_ISLAND);
+#endif
+#endif
+	return ret;
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_fence.c b/drivers/external_drivers/intel_media/video/common/psb_fence.c
new file mode 100644
index 0000000..2756745
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_fence.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <drm/drmP.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+
+#ifdef MERRIFIELD
+#include "tng_topaz.h"
+#else
+#include "pnw_topaz.h"
+#endif
+
+#endif
+#include "psb_msvdx.h"
+
+#ifdef SUPPORT_VSP
+#include "vsp.h"
+#endif
+
+int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
+			    uint32_t fence_class,
+			    uint32_t flags, uint32_t *sequence,
+			    unsigned long *timeout_jiffies)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(fdev, struct drm_psb_private, fdev);
+	struct msvdx_private *msvdx_priv = NULL;
+	uint32_t seq = 0;
+
+	if (!dev_priv)
+		return -EINVAL;
+
+	if (fence_class >= PSB_NUM_ENGINES)
+		return -EINVAL;
+
+	msvdx_priv = dev_priv->msvdx_private;
+
+	switch (fence_class) {
+	case PSB_ENGINE_DECODE:
+		spin_lock(&dev_priv->sequence_lock);
+		seq = dev_priv->sequence[fence_class]++;
+		/* cmds in one batch use different fence value */
+		seq <<= 4;
+		seq += msvdx_priv->num_cmd;
+		spin_unlock(&dev_priv->sequence_lock);
+		break;
+#ifndef CONFIG_DRM_VXD_BYT
+	case LNC_ENGINE_ENCODE:
+		spin_lock(&dev_priv->sequence_lock);
+		seq = dev_priv->sequence[fence_class]++;
+		spin_unlock(&dev_priv->sequence_lock);
+		break;
+#ifdef SUPPORT_VSP
+	case VSP_ENGINE_VPP:
+		spin_lock(&dev_priv->sequence_lock);
+		seq = dev_priv->sequence[fence_class]++;
+		spin_unlock(&dev_priv->sequence_lock);
+		break;
+#endif
+#endif
+	default:
+		DRM_ERROR("Unexpected fence class\n");
+		return -EINVAL;
+	}
+
+	*sequence = seq;
+	if (fence_class == PSB_ENGINE_DECODE)
+		*timeout_jiffies = jiffies + DRM_HZ;
+	else
+		*timeout_jiffies = jiffies + DRM_HZ * 3;
+
+	return 0;
+}
+
+static void psb_fence_poll(struct ttm_fence_device *fdev,
+			   uint32_t fence_class, uint32_t waiting_types)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(fdev, struct drm_psb_private, fdev);
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t sequence = 0;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+#ifdef SUPPORT_VSP
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+#endif
+	if (unlikely(!dev_priv))
+		return;
+
+	if (waiting_types == 0)
+		return;
+
+	switch (fence_class) {
+	case PSB_ENGINE_DECODE:
+		sequence = msvdx_priv->msvdx_current_sequence;
+		break;
+#ifndef CONFIG_DRM_VXD_BYT
+	case LNC_ENGINE_ENCODE:
+#ifdef MERRIFIELD
+		if (IS_MRFLD(dev))
+			sequence = *((uint32_t *)
+				((struct tng_topaz_private *)dev_priv->
+				topaz_private)->topaz_sync_addr);
+#else
+		if (IS_MDFLD(dev))
+			sequence = *((uint32_t *)
+				((struct pnw_topaz_private *)dev_priv->
+				topaz_private)->topaz_sync_addr + 1);
+#endif
+		break;
+	case VSP_ENGINE_VPP:
+#ifdef SUPPORT_VSP
+		sequence = vsp_priv->current_sequence;
+		break;
+#endif
+#endif
+	default:
+		break;
+	}
+
+	PSB_DEBUG_GENERAL("Polling fence sequence, got 0x%08x\n", sequence);
+	ttm_fence_handler(fdev, fence_class, sequence,
+			  _PSB_FENCE_TYPE_EXE, 0);
+}
+
+static void psb_fence_lockup(struct ttm_fence_object *fence,
+			     uint32_t fence_types)
+{
+	struct ttm_fence_device *fdev = fence->fdev;
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	struct drm_psb_private *dev_priv =
+		container_of(fdev, struct drm_psb_private, fdev);
+	struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+
+	if (fence->fence_class == LNC_ENGINE_ENCODE) {
+#ifndef CONFIG_DRM_VXD_BYT
+		DRM_ERROR("TOPAZ timeout (probable lockup) detected,  flush queued cmdbuf");
+
+		write_lock(&fc->lock);
+#ifdef MERRIFIELD
+		if (IS_MRFLD(dev))
+			tng_topaz_handle_timeout(fence->fdev);
+#else
+		if (IS_MDFLD(dev))
+			pnw_topaz_handle_timeout(fence->fdev);
+#endif
+		ttm_fence_handler(fence->fdev, fence->fence_class,
+				  fence->sequence, fence_types, -EBUSY);
+		write_unlock(&fc->lock);
+#endif
+	} else if (fence->fence_class == PSB_ENGINE_DECODE) {
+		struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+		PSB_DEBUG_WARN("MSVDX timeout (probable lockup) detected, flush queued cmdbuf");
+#if (!defined(MERRIFIELD) && !defined(CONFIG_DRM_VXD_BYT))
+		if (psb_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0)
+			PSB_DEBUG_WARN("WARN: msvdx is power off in accident.\n");
+#endif
+		PSB_DEBUG_WARN("WARN: MSVDX_COMMS_FW_STATUS reg is 0x%x.\n",
+				PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS));
+		psb_msvdx_flush_cmd_queue(dev);
+
+		write_lock(&fc->lock);
+		ttm_fence_handler(fence->fdev, fence->fence_class,
+				  fence->sequence, fence_types, -EBUSY);
+		write_unlock(&fc->lock);
+
+		if (msvdx_priv->fw_loaded_by_punit)
+			msvdx_priv->msvdx_needs_reset |= MSVDX_RESET_NEEDS_REUPLOAD_FW |
+				MSVDX_RESET_NEEDS_INIT_FW;
+		else
+			msvdx_priv->msvdx_needs_reset = 1;
+	} else if (fence->fence_class == VSP_ENGINE_VPP) {
+#ifdef SUPPORT_VSP
+		struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+		if (vsp_fence_poll(dev) &&
+		    fence->sequence <= vsp_priv->current_sequence) {
+			DRM_ERROR("pass poll when timeout vsp sequence %x, current sequence %x\n", fence->sequence, vsp_priv->current_sequence);
+			return;
+		}
+
+		DRM_ERROR("fence sequence is %x\n", fence->sequence);
+		DRM_ERROR("VSP timeout (probable lockup) detected,"
+			  " reset vsp\n");
+
+		write_lock(&fc->lock);
+		ttm_fence_handler(fence->fdev, fence->fence_class,
+				  fence->sequence, fence_types,
+				  -EBUSY);
+		write_unlock(&fc->lock);
+
+		psb_vsp_dump_info(dev_priv);
+		vsp_priv->vsp_state = VSP_STATE_HANG;
+#endif
+	} else {
+		DRM_ERROR("Unsupported fence class\n");
+	}
+}
+
+static struct ttm_fence_driver psb_ttm_fence_driver = {
+	.has_irq = NULL,
+	.emit = psb_fence_emit_sequence,
+	.flush = NULL,
+	.poll = psb_fence_poll,
+	.needed_flush = NULL,
+	.wait = NULL,
+	.signaled = NULL,
+	.lockup = psb_fence_lockup,
+};
+
+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(fdev, struct drm_psb_private, fdev);
+	struct ttm_fence_class_init fci = {
+		.wrap_diff = (1 << 30),
+		.flush_diff = (1 << 29),
+		.sequence_mask = 0xFFFFFFFF
+	};
+
+	return ttm_fence_device_init(PSB_NUM_ENGINES,
+				     dev_priv->mem_global_ref.object,
+				     fdev, &fci, 1,
+				     &psb_ttm_fence_driver);
+}
+
+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_fence_device *fdev = &dev_priv->fdev;
+	struct ttm_fence_class_manager *fc =
+				&fdev->fence_class[fence_class];
+	unsigned long irq_flags;
+
+	write_lock_irqsave(&fc->lock, irq_flags);
+	psb_fence_poll(fdev, fence_class, fc->waiting_types);
+	write_unlock_irqrestore(&fc->lock, irq_flags);
+}
+
+void psb_fence_error(struct drm_device *dev,
+		     uint32_t fence_class,
+		     uint32_t sequence, uint32_t type, int error)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_fence_device *fdev = &dev_priv->fdev;
+	unsigned long irq_flags;
+	struct ttm_fence_class_manager *fc;
+
+	if (fence_class >= PSB_NUM_ENGINES) {
+		DRM_ERROR("fence_class %d is unsupported.\n", fence_class);
+		return;
+	}
+
+	fc = &fdev->fence_class[fence_class];
+
+	write_lock_irqsave(&fc->lock, irq_flags);
+	ttm_fence_handler(fdev, fence_class, sequence, type, error);
+	write_unlock_irqrestore(&fc->lock, irq_flags);
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_mmu.c b/drivers/external_drivers/intel_media/video/common/psb_mmu.c
new file mode 100644
index 0000000..ea3fa9b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_mmu.c
@@ -0,0 +1,1296 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#include "psb_reg.h"
+#endif
+#ifdef SUPPORT_VSP
+#include "vsp.h"
+#endif
+
+/*
+ * Code for the MSVDX/TOPAZ MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+	/* protects driver- and pd structures. Always take in read mode
+	 * before taking the page table spinlock.
+	 */
+	struct rw_semaphore sem;
+
+	/* protects page tables, directory tables and pt tables.
+	 * and pt structures.
+	 */
+	spinlock_t lock;
+
+	atomic_t needs_tlbflush;
+
+	uint8_t __iomem *register_map;
+	struct psb_mmu_pd *default_pd;
+	/*uint32_t bif_ctrl;*/
+	int has_clflush;
+	int clflush_add;
+	unsigned long clflush_mask;
+
+	struct drm_psb_private *dev_priv;
+	enum mmu_type_t mmu_type;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+	struct psb_mmu_pd *pd;
+	uint32_t index;
+	uint32_t count;
+	struct page *p;
+	uint32_t *v;
+};
+
+struct psb_mmu_pd {
+	struct psb_mmu_driver *driver;
+	int hw_context;
+	struct psb_mmu_pt **tables;
+	struct page *p;
+	struct page *dummy_pt;
+	struct page *dummy_page;
+	uint32_t pd_mask;
+	uint32_t invalid_pde;
+	uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+	return offset >> PSB_PDE_SHIFT;
+}
+
+#if defined(CONFIG_X86)
+static inline void psb_clflush(volatile void *addr)
+{
+	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+				   void *addr)
+{
+	if (!driver->has_clflush)
+		return;
+
+	mb();
+	psb_clflush(addr);
+	mb();
+}
+
+static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+{
+	uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+	int i;
+	uint8_t *clf;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	clf = kmap_atomic(page, KM_USER0);
+#else
+	clf = kmap_atomic(page);
+#endif
+	mb();
+	for (i = 0; i < clflush_count; ++i) {
+		psb_clflush(clf);
+		clf += clflush_add;
+	}
+	mb();
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	kunmap_atomic(clf, KM_USER0);
+#else
+	kunmap_atomic(clf);
+#endif
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages)
+{
+	int i;
+
+	if (!driver->has_clflush)
+		return ;
+
+	for (i = 0; i < num_pages; i++)
+		psb_page_clflush(driver, *page++);
+}
+#else
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+				   void *addr)
+{
+	;
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages)
+{
+	printk("Dumy psb_pages_clflush\n");
+}
+
+#endif
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+				    int force)
+{
+	if (atomic_read(&driver->needs_tlbflush) || force) {
+		if (!driver->dev_priv)
+			goto out;
+
+		if (driver->mmu_type == IMG_MMU) {
+			atomic_set(
+				&driver->dev_priv->msvdx_mmu_invaldc,
+				1);
+#ifndef CONFIG_DRM_VXD_BYT
+			atomic_set(
+				&driver->dev_priv->topaz_mmu_invaldc,
+				1);
+#endif
+		} else if (driver->mmu_type == VSP_MMU) {
+#ifdef SUPPORT_VSP
+			atomic_set(&driver->dev_priv->vsp_mmu_invaldc, 1);
+#endif
+		} else {
+			DRM_ERROR("MMU: invalid MMU type %d\n",
+				  driver->mmu_type);
+		}
+	}
+out:
+	atomic_set(&driver->needs_tlbflush, 0);
+}
+
+#if 0
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+	down_write(&driver->sem);
+	psb_mmu_flush_pd_locked(driver, force);
+	up_write(&driver->sem);
+}
+#endif
+
+static void psb_virtual_addr_clflush(struct psb_mmu_driver *driver,
+			void *vaddr, uint32_t num_pages)
+{
+	int i, j;
+	uint8_t *clf = (uint8_t*)vaddr;
+	uint32_t clflush_add = (driver->clflush_add * sizeof(uint32_t)) >> PAGE_SHIFT;
+	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+
+	DRM_INFO("clflush pages %d\n", num_pages);
+	mb();
+	for (i = 0; i < num_pages; ++i) {
+		for (j = 0; j < clflush_count; ++j) {
+			psb_clflush(clf);
+			clf += clflush_add;
+		}
+	}
+	mb();
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+{
+	if (rc_prot)
+		down_write(&driver->sem);
+
+	if (!driver->dev_priv)
+		goto out;
+
+	if (driver->mmu_type == IMG_MMU) {
+		atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
+#ifndef CONFIG_DRM_VXD_BYT
+		atomic_set(&driver->dev_priv->topaz_mmu_invaldc, 1);
+#endif
+	} else if (driver->mmu_type == VSP_MMU) {
+#ifdef SUPPORT_VSP
+		atomic_set(&driver->dev_priv->vsp_mmu_invaldc, 1);
+#endif
+	} else {
+		DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type);
+	}
+out:
+	if (rc_prot)
+		up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+	/*ttm_tt_cache_flush(&pd->p, 1);*/
+	psb_pages_clflush(pd->driver, &pd->p, 1);
+	down_write(&pd->driver->sem);
+	wmb();
+	psb_mmu_flush_pd_locked(pd->driver, 1);
+	pd->hw_context = hw_context;
+	up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+		unsigned long end)
+{
+
+	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+	return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+	uint32_t mask = PSB_PTE_VALID;
+
+	if (type & PSB_MMU_CACHED_MEMORY)
+		mask |= PSB_PTE_CACHED;
+	if (type & PSB_MMU_RO_MEMORY)
+		mask |= PSB_PTE_RO;
+	if (type & PSB_MMU_WO_MEMORY)
+		mask |= PSB_PTE_WO;
+
+	return (pfn << PAGE_SHIFT) | mask;
+}
+
+#ifdef SUPPORT_VSP
+static inline uint32_t vsp_mmu_mask_pte(uint32_t pfn, int type)
+{
+	return (pfn & VSP_PDE_MASK) | VSP_PTE_VALID;
+}
+#endif
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+				    int trap_pagefaults, int invalid_type) {
+	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+	uint32_t *v;
+	int i;
+
+	if (!pd)
+		return NULL;
+
+	pd->p = alloc_page(GFP_DMA32);
+	if (!pd->p)
+		goto out_err1;
+	pd->dummy_pt = alloc_page(GFP_DMA32);
+	if (!pd->dummy_pt)
+		goto out_err2;
+	pd->dummy_page = alloc_page(GFP_DMA32);
+	if (!pd->dummy_page)
+		goto out_err3;
+
+	if (!trap_pagefaults) {
+		if (driver->mmu_type == IMG_MMU) {
+			pd->invalid_pde =
+				psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+						 invalid_type);
+			pd->invalid_pte =
+				psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+						 invalid_type);
+		} else if (driver->mmu_type == VSP_MMU) {
+#ifdef SUPPORT_VSP
+			pd->invalid_pde =
+				vsp_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+						 invalid_type);
+			pd->invalid_pte =
+				vsp_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+						 invalid_type);
+#endif
+		} else {
+			DRM_ERROR("MMU: invalid MMU type %d\n",
+				  driver->mmu_type);
+			goto out_err4;
+		}
+	} else {
+		pd->invalid_pde = 0;
+		pd->invalid_pte = 0;
+	}
+
+	v = kmap(pd->dummy_pt);
+	if (!v)
+		goto out_err4;
+	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+		v[i] = pd->invalid_pte;
+
+	kunmap(pd->dummy_pt);
+
+	v = kmap(pd->p);
+	if (!v)
+		goto out_err4;
+	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+		v[i] = pd->invalid_pde;
+
+	kunmap(pd->p);
+
+	v = kmap(pd->dummy_page);
+	if (!v)
+		goto out_err4;
+	clear_page(v);
+	kunmap(pd->dummy_page);
+
+	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+	if (!pd->tables)
+		goto out_err4;
+
+	pd->hw_context = -1;
+	pd->pd_mask = PSB_PTE_VALID;
+	pd->driver = driver;
+
+	return pd;
+
+out_err4:
+	__free_page(pd->dummy_page);
+out_err3:
+	__free_page(pd->dummy_pt);
+out_err2:
+	__free_page(pd->p);
+out_err1:
+	kfree(pd);
+	return NULL;
+}
+
+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+	__free_page(pt->p);
+	kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+	struct psb_mmu_driver *driver = pd->driver;
+	struct psb_mmu_pt *pt;
+	int i;
+
+	down_write(&driver->sem);
+	if (pd->hw_context != -1)
+		psb_mmu_flush_pd_locked(driver, 1);
+
+	/* Should take the spinlock here, but we don't need to do that
+	   since we have the semaphore in write mode. */
+
+	for (i = 0; i < 1024; ++i) {
+		pt = pd->tables[i];
+		if (pt)
+			psb_mmu_free_pt(pt);
+	}
+
+	vfree(pd->tables);
+	__free_page(pd->dummy_page);
+	__free_page(pd->dummy_pt);
+	__free_page(pd->p);
+	kfree(pd);
+	up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+	void *v;
+	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+	spinlock_t *lock = &pd->driver->lock;
+	uint8_t *clf;
+	uint32_t *ptes;
+	int i;
+
+	if (!pt)
+		return NULL;
+
+	pt->p = alloc_page(GFP_DMA32);
+	if (!pt->p) {
+		kfree(pt);
+		return NULL;
+	}
+
+	spin_lock(lock);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	v = kmap_atomic(pt->p, KM_USER0);
+#else
+	v = kmap_atomic(pt->p);
+#endif
+	clf = (uint8_t *) v;
+	ptes = (uint32_t *) v;
+	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+		*ptes++ = pd->invalid_pte;
+
+
+#if defined(CONFIG_X86)
+	if (pd->driver->has_clflush && pd->hw_context != -1) {
+		mb();
+		for (i = 0; i < clflush_count; ++i) {
+			psb_clflush(clf);
+			clf += clflush_add;
+		}
+		mb();
+	}
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	kunmap_atomic(v, KM_USER0);
+#else
+	kunmap_atomic(v);
+#endif
+	spin_unlock(lock);
+
+	pt->count = 0;
+	pt->pd = pd;
+	pt->index = 0;
+
+	return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+		unsigned long addr) {
+	uint32_t index = psb_mmu_pd_index(addr);
+	struct psb_mmu_pt *pt;
+	uint32_t *v;
+	spinlock_t *lock = &pd->driver->lock;
+	struct psb_mmu_driver *driver = pd->driver;
+
+	spin_lock(lock);
+	pt = pd->tables[index];
+	while (!pt) {
+		spin_unlock(lock);
+		pt = psb_mmu_alloc_pt(pd);
+		if (!pt)
+			return NULL;
+		spin_lock(lock);
+
+		if (pd->tables[index]) {
+			spin_unlock(lock);
+			psb_mmu_free_pt(pt);
+			spin_lock(lock);
+			pt = pd->tables[index];
+			continue;
+		}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		v = kmap_atomic(pd->p, KM_USER0);
+#else
+		v = kmap_atomic(pd->p);
+#endif
+		pd->tables[index] = pt;
+		if (driver->mmu_type == IMG_MMU)
+			v[index] = (page_to_pfn(pt->p) << 12) |
+				pd->pd_mask;
+#ifdef SUPPORT_VSP
+		else if (driver->mmu_type == VSP_MMU)
+			v[index] = (page_to_pfn(pt->p));
+#endif
+		else
+			DRM_ERROR("MMU: invalid MMU type %d\n",
+				  driver->mmu_type);
+
+		pt->index = index;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		kunmap_atomic((void *) v, KM_USER0);
+#else
+		kunmap_atomic((void *) v);
+#endif
+
+		if (pd->hw_context != -1) {
+			psb_mmu_clflush(pd->driver, (void *) &v[index]);
+			atomic_set(&pd->driver->needs_tlbflush, 1);
+		}
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	pt->v = kmap_atomic(pt->p, KM_USER0);
+#else
+	pt->v = kmap_atomic(pt->p);
+#endif
+	return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+		unsigned long addr) {
+	uint32_t index = psb_mmu_pd_index(addr);
+	struct psb_mmu_pt *pt;
+	spinlock_t *lock = &pd->driver->lock;
+
+	spin_lock(lock);
+	pt = pd->tables[index];
+	if (!pt) {
+		spin_unlock(lock);
+		return NULL;
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	pt->v = kmap_atomic(pt->p, KM_USER0);
+#else
+	pt->v = kmap_atomic(pt->p);
+#endif
+	return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+	struct psb_mmu_pd *pd = pt->pd;
+	uint32_t *v;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	kunmap_atomic(pt->v, KM_USER0);
+#else
+	kunmap_atomic(pt->v);
+#endif
+	if (pt->count == 0) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		v = kmap_atomic(pd->p, KM_USER0);
+#else
+		v = kmap_atomic(pd->p);
+#endif
+		v[pt->index] = pd->invalid_pde;
+		pd->tables[pt->index] = NULL;
+
+		if (pd->hw_context != -1) {
+			psb_mmu_clflush(pd->driver,
+					(void *) &v[pt->index]);
+			atomic_set(&pd->driver->needs_tlbflush, 1);
+		}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		kunmap_atomic(pt->v, KM_USER0);
+#else
+		kunmap_atomic(pt->v);
+#endif
+		spin_unlock(&pd->driver->lock);
+		psb_mmu_free_pt(pt);
+		return;
+	}
+	spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+				   unsigned long addr, uint32_t pte)
+{
+	pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+		unsigned long addr)
+{
+	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+#if 0
+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
+		uint32_t mmu_offset)
+{
+	uint32_t *v;
+	uint32_t pfn;
+
+	v = kmap_atomic(pd->p, KM_USER0);
+	if (!v) {
+		printk(KERN_INFO "Could not kmap pde page.\n");
+		return 0;
+	}
+	pfn = v[psb_mmu_pd_index(mmu_offset)];
+	/*      printk(KERN_INFO "pde is 0x%08x\n",pfn); */
+	kunmap_atomic(v, KM_USER0);
+	if (((pfn & 0x0F) != PSB_PTE_VALID)) {
+		printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
+		       mmu_offset, pfn);
+	}
+	v = ioremap(pfn & 0xFFFFF000, 4096);
+	if (!v) {
+		printk(KERN_INFO "Could not kmap pte page.\n");
+		return 0;
+	}
+	pfn = v[psb_mmu_pt_index(mmu_offset)];
+	/* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
+	iounmap(v);
+	if (((pfn & 0x0F) != PSB_PTE_VALID)) {
+		printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
+		       mmu_offset, pfn);
+	}
+	return pfn >> PAGE_SHIFT;
+}
+
+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
+				       uint32_t mmu_offset,
+				       uint32_t gtt_pages)
+{
+	uint32_t start;
+	uint32_t next;
+
+	printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
+	       mmu_offset, gtt_pages);
+	down_read(&pd->driver->sem);
+	start = psb_mmu_check_pte_locked(pd, mmu_offset);
+	mmu_offset += PAGE_SIZE;
+	gtt_pages -= 1;
+	while (gtt_pages--) {
+		next = psb_mmu_check_pte_locked(pd, mmu_offset);
+		if (next != start + 1) {
+			printk(KERN_INFO
+			       "Ptes out of order: 0x%08x, 0x%08x.\n",
+			       start, next);
+		}
+		start = next;
+		mmu_offset += PAGE_SIZE;
+	}
+	up_read(&pd->driver->sem);
+}
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+			uint32_t mmu_offset, uint32_t gtt_start,
+			uint32_t gtt_pages)
+{
+	uint32_t *v;
+	uint32_t start = psb_mmu_pd_index(mmu_offset);
+	struct psb_mmu_driver *driver = pd->driver;
+	int num_pages = gtt_pages;
+
+	down_read(&driver->sem);
+	spin_lock(&driver->lock);
+
+	v = kmap_atomic(pd->p, KM_USER0);
+	v += start;
+
+	while (gtt_pages--) {
+		*v++ = gtt_start | pd->pd_mask;
+		gtt_start += PAGE_SIZE;
+	}
+
+	/*ttm_tt_cache_flush(&pd->p, num_pages);*/
+	psb_pages_clflush(pd->driver, &pd->p, num_pages);
+	kunmap_atomic(v, KM_USER0);
+	spin_unlock(&driver->lock);
+
+	if (pd->hw_context != -1)
+		atomic_set(&pd->driver->needs_tlbflush, 1);
+
+	up_read(&pd->driver->sem);
+	psb_mmu_flush_pd(pd->driver, 0);
+}
+#endif
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+	struct psb_mmu_pd *pd;
+
+	/* down_read(&driver->sem); */
+	pd = driver->default_pd;
+	/* up_read(&driver->sem); */
+
+	return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+{
+	struct psb_mmu_pd *pd;
+
+	pd = psb_mmu_get_default_pd(driver);
+	return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+	psb_mmu_free_pagedir(driver->default_pd);
+	kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+					int trap_pagefaults,
+					int invalid_type,
+					struct drm_psb_private *dev_priv,
+					enum mmu_type_t mmu_type) {
+	struct psb_mmu_driver *driver;
+
+	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver)
+		return NULL;
+
+	driver->dev_priv = dev_priv;
+	driver->mmu_type = mmu_type;
+
+	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+					      invalid_type);
+	if (!driver->default_pd)
+		goto out_err1;
+
+	spin_lock_init(&driver->lock);
+	init_rwsem(&driver->sem);
+	down_write(&driver->sem);
+	driver->register_map = registers;
+	atomic_set(&driver->needs_tlbflush, 1);
+
+	driver->has_clflush = 0;
+
+#if defined(CONFIG_X86)
+	if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+		uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+		/*
+		 * clflush size is determined at kernel setup for x86_64
+		 *  but not for i386. We have to do it here.
+		 */
+
+		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+		clflush_size = ((misc >> 8) & 0xff) * 8;
+		driver->has_clflush = 1;
+		driver->clflush_add =
+			PAGE_SIZE * clflush_size / sizeof(uint32_t);
+		driver->clflush_mask = driver->clflush_add - 1;
+		driver->clflush_mask = ~driver->clflush_mask;
+	}
+#endif
+
+	up_write(&driver->sem);
+	return driver;
+
+out_err1:
+	kfree(driver);
+	return NULL;
+}
+
+#if defined(CONFIG_X86)
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+			       unsigned long address, uint32_t num_pages,
+			       uint32_t desired_tile_stride,
+			       uint32_t hw_tile_stride)
+{
+	struct psb_mmu_pt *pt;
+	uint32_t rows = 1;
+	uint32_t i;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long add;
+	unsigned long row_add;
+	unsigned long clflush_add = pd->driver->clflush_add;
+	unsigned long clflush_mask = pd->driver->clflush_mask;
+
+	if (!pd->driver->has_clflush) {
+		/*ttm_tt_cache_flush(&pd->p, num_pages);*/
+		psb_pages_clflush(pd->driver, &pd->p, num_pages);
+		return;
+	}
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride << PAGE_SHIFT;
+	row_add = hw_tile_stride << PAGE_SHIFT;
+	mb();
+	for (i = 0; i < rows; ++i) {
+		addr = address;
+		end = addr + add;
+
+		do {
+			next = psb_pd_addr_end(addr, end);
+			pt = psb_mmu_pt_map_lock(pd, addr);
+			if (!pt)
+				continue;
+			do {
+				psb_clflush(&pt->v
+					    [psb_mmu_pt_index(addr)]);
+			} while (addr +=
+					 clflush_add,
+				 (addr & clflush_mask) < next);
+
+			psb_mmu_pt_unmap_unlock(pt);
+		} while (addr = next, next != end);
+		address += row_add;
+	}
+	mb();
+}
+#else
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+			       unsigned long address, uint32_t num_pages,
+			       uint32_t desired_tile_stride,
+			       uint32_t hw_tile_stride)
+{
+	drm_ttm_cache_flush(&pd->p, num_pages);
+}
+#endif
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+				 unsigned long address, uint32_t num_pages)
+{
+	struct psb_mmu_pt *pt;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long f_address = address;
+
+	down_read(&pd->driver->sem);
+
+	addr = address;
+	end = addr + (num_pages << PAGE_SHIFT);
+
+	do {
+		next = psb_pd_addr_end(addr, end);
+		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+		if (!pt)
+			goto out;
+		do {
+			psb_mmu_invalidate_pte(pt, addr);
+			--pt->count;
+		} while (addr += PAGE_SIZE, addr < next);
+		psb_mmu_pt_unmap_unlock(pt);
+
+	} while (addr = next, next != end);
+
+out:
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+	up_read(&pd->driver->sem);
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 0);
+
+	return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+			  uint32_t num_pages, uint32_t desired_tile_stride,
+			  uint32_t hw_tile_stride)
+{
+	struct psb_mmu_pt *pt;
+	uint32_t rows = 1;
+	uint32_t i;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long add;
+	unsigned long row_add;
+	unsigned long f_address = address;
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride << PAGE_SHIFT;
+	row_add = hw_tile_stride << PAGE_SHIFT;
+
+	/* down_read(&pd->driver->sem); */
+
+	/* Make sure we only need to flush this processor's cache */
+
+	for (i = 0; i < rows; ++i) {
+
+		addr = address;
+		end = addr + add;
+
+		do {
+			next = psb_pd_addr_end(addr, end);
+			pt = psb_mmu_pt_map_lock(pd, addr);
+			if (!pt)
+				continue;
+			do {
+				psb_mmu_invalidate_pte(pt, addr);
+				--pt->count;
+
+			} while (addr += PAGE_SIZE, addr < next);
+			psb_mmu_pt_unmap_unlock(pt);
+
+		} while (addr = next, next != end);
+		address += row_add;
+	}
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages,
+				   desired_tile_stride, hw_tile_stride);
+
+	/* up_read(&pd->driver->sem); */
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 0);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+				unsigned long address, uint32_t num_pages,
+				int type)
+{
+	struct psb_mmu_pt *pt;
+	struct psb_mmu_driver *driver = pd->driver;
+	uint32_t pte;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long f_address = address;
+	int ret = 0;
+
+	down_read(&pd->driver->sem);
+
+	addr = address;
+	end = addr + (num_pages << PAGE_SHIFT);
+
+	do {
+		next = psb_pd_addr_end(addr, end);
+		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+		if (!pt) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		do {
+			if (driver->mmu_type == IMG_MMU) {
+				pte = psb_mmu_mask_pte(start_pfn++, type);
+#ifdef SUPPORT_VSP
+			} else if (driver->mmu_type == VSP_MMU) {
+				pte = vsp_mmu_mask_pte(start_pfn++, type);
+#endif
+			} else {
+				DRM_ERROR("MMU: mmu type invalid %d\n",
+					  driver->mmu_type);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			psb_mmu_set_pte(pt, addr, pte);
+			pt->count++;
+		} while (addr += PAGE_SIZE, addr < next);
+		psb_mmu_pt_unmap_unlock(pt);
+
+	} while (addr = next, next != end);
+
+out:
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+	up_read(&pd->driver->sem);
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 1);
+
+	return ret;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+			 unsigned long address, uint32_t num_pages,
+			 uint32_t desired_tile_stride,
+			 uint32_t hw_tile_stride, int type)
+{
+	struct psb_mmu_pt *pt;
+	struct psb_mmu_driver *driver = pd->driver;
+	uint32_t rows = 1;
+	uint32_t i;
+	uint32_t pte;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long add;
+	unsigned long row_add;
+	unsigned long f_address = address;
+	int ret = 0;
+
+	if (hw_tile_stride) {
+		if (num_pages % desired_tile_stride != 0)
+			return -EINVAL;
+		rows = num_pages / desired_tile_stride;
+	} else {
+		desired_tile_stride = num_pages;
+	}
+
+	add = desired_tile_stride << PAGE_SHIFT;
+	row_add = hw_tile_stride << PAGE_SHIFT;
+
+	down_read(&pd->driver->sem);
+
+	for (i = 0; i < rows; ++i) {
+
+		addr = address;
+		end = addr + add;
+
+		do {
+			next = psb_pd_addr_end(addr, end);
+			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+			if (!pt) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			do {
+				if (driver->mmu_type == IMG_MMU) {
+					pte = psb_mmu_mask_pte(
+						page_to_pfn(*pages++),
+						type);
+#ifdef SUPPORT_VSP
+				} else if (driver->mmu_type == VSP_MMU) {
+					pte = vsp_mmu_mask_pte(
+						page_to_pfn(*pages++),
+						type);
+#endif
+				} else {
+					DRM_ERROR("MMU: mmu type invalid %d\n",
+						  driver->mmu_type);
+					ret = -EINVAL;
+					goto out;
+				}
+
+				psb_mmu_set_pte(pt, addr, pte);
+				pt->count++;
+			} while (addr += PAGE_SIZE, addr < next);
+			psb_mmu_pt_unmap_unlock(pt);
+
+		} while (addr = next, next != end);
+
+		address += row_add;
+	}
+out:
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages,
+				   desired_tile_stride, hw_tile_stride);
+
+	up_read(&pd->driver->sem);
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 1);
+
+	return ret;
+}
+
+#if 0 /*comented out, only used in mmu test now*/
+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
+{
+	mask &= _PSB_MMU_ER_MASK;
+	psb_iowrite32(driver,
+		      psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
+		      PSB_CR_BIF_CTRL);
+	(void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
+}
+
+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
+			       uint32_t mask)
+{
+	mask &= _PSB_MMU_ER_MASK;
+	psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
+		      PSB_CR_BIF_CTRL);
+	(void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+			   unsigned long *pfn)
+{
+	int ret;
+	struct psb_mmu_pt *pt;
+	uint32_t tmp;
+	spinlock_t *lock = &pd->driver->lock;
+
+	down_read(&pd->driver->sem);
+	pt = psb_mmu_pt_map_lock(pd, virtual);
+	if (!pt) {
+		uint32_t *v;
+
+		spin_lock(lock);
+		v = kmap_atomic(pd->p, KM_USER0);
+		tmp = v[psb_mmu_pd_index(virtual)];
+		kunmap_atomic(v, KM_USER0);
+		spin_unlock(lock);
+
+		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+		    !(pd->invalid_pte & PSB_PTE_VALID)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		ret = 0;
+		*pfn = pd->invalid_pte >> PAGE_SHIFT;
+		goto out;
+	}
+	tmp = pt->v[psb_mmu_pt_index(virtual)];
+	if (!(tmp & PSB_PTE_VALID)) {
+		ret = -EINVAL;
+	} else {
+		ret = 0;
+		*pfn = tmp >> PAGE_SHIFT;
+	}
+	psb_mmu_pt_unmap_unlock(pt);
+out:
+	up_read(&pd->driver->sem);
+	return ret;
+}
+
+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
+{
+	struct page *p;
+	unsigned long pfn;
+	int ret = 0;
+	struct psb_mmu_pd *pd;
+	uint32_t *v;
+	uint32_t *vmmu;
+
+	pd = driver->default_pd;
+	if (!pd)
+		printk(KERN_WARNING "Could not get default pd\n");
+
+
+	p = alloc_page(GFP_DMA32);
+
+	if (!p) {
+		printk(KERN_WARNING "Failed allocating page\n");
+		return;
+	}
+
+	v = kmap(p);
+	memset(v, 0x67, PAGE_SIZE);
+
+	pfn = (offset >> PAGE_SHIFT);
+
+	ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
+	if (ret) {
+		printk(KERN_WARNING "Failed inserting mmu page\n");
+		goto out_err1;
+	}
+
+	/* Ioremap the page through the GART aperture */
+
+	vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+	if (!vmmu) {
+		printk(KERN_WARNING "Failed ioremapping page\n");
+		goto out_err2;
+	}
+
+	/* Read from the page with mmu disabled. */
+	printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
+
+	/* Enable the mmu for host accesses and read again. */
+	psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
+
+	printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
+	       ioread32(vmmu));
+	*v = 0x15243705;
+	printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
+	       ioread32(vmmu));
+	iowrite32(0x16243355, vmmu);
+	(void) ioread32(vmmu);
+	printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
+
+	printk(KERN_INFO "Int stat is 0x%08x\n",
+	       psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
+	printk(KERN_INFO "Fault is 0x%08x\n",
+	       psb_ioread32(driver, PSB_CR_BIF_FAULT));
+
+	/* Disable MMU for host accesses and clear page fault register */
+	psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
+	iounmap(vmmu);
+out_err2:
+	psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
+out_err1:
+	kunmap(p);
+	__free_page(p);
+}
+#endif
+
+/*
+void psb_mmu_pgtable_dump(struct drm_device *dev)
+{
+
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
+	struct psb_mmu_pt *pt;
+	int i, j;
+	uint32_t flags;
+	uint32_t *v;
+
+	spinlock_t *lock = &pd->driver->lock;
+	down_read(&pd->driver->sem);
+	spin_lock_irqsave(lock, flags);
+	v = kmap_atomic(pd->p, KM_USER0);
+	if (!v) {
+		printk(KERN_INFO "%s: Kmap pg fail, abort\n", __func__);
+		return;
+	}
+
+	printk(KERN_INFO "%s: start dump mmu page table\n", __func__);
+	for (i = 0; i < 1024; i++) {
+		pt = pd->tables[i];
+		if (!pt) {
+			printk(KERN_INFO "pt[%d] is NULL, 0x%08x\n", i, v[i]);
+			continue;
+		}
+		printk(KERN_INFO "pt[%d] is 0x%08x\n", i, v[i]);
+		pt->v = kmap_atomic(pt->p, KM_USER0);
+		if (!(pt->v)) {
+			printk(KERN_INFO "%s: Kmap fail, abort\n", __func__);
+			break;
+		}
+		for (j = 0; j < 1024; j++) {
+			if (!(j%16))
+				printk(KERN_INFO "pte%d:", j);
+			uint32_t pte = pt->v[j];
+			printk("%08xh ", pte);
+			//if ((j%16) == 15)
+				//printk(KERN_INFO "\n");
+		}
+		kunmap_atomic(pt->v, KM_USER0);
+	}
+	spin_unlock_irqrestore(lock, flags);
+	up_read(&pd->driver->sem);
+	kunmap_atomic((void *) v, KM_USER0);
+	printk(KERN_INFO "%s: finish dump mmu page table\n", __func__);
+}
+*/
+
+int psb_ttm_bo_clflush(struct psb_mmu_driver *mmu,
+			struct ttm_buffer_object *bo)
+{
+	int ret = 0;
+	bool is_iomem;
+	void *addr;
+	struct ttm_bo_kmap_obj bo_kmap;
+
+	if (unlikely(!mmu || !bo)) {
+		DRM_ERROR("NULL pointer, mmu:%p bo:%p\n", mmu, bo);
+		return 1;
+	}
+
+	/*map surface parameters*/
+	ret = ttm_bo_kmap(bo, 0, bo->num_pages,
+                          &bo_kmap);
+        if (ret) {
+                DRM_ERROR("ttm_bo_kmap failed: %d.\n", ret);
+                return ret;
+        }
+
+	addr = (void *)ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
+	if (unlikely(!addr)) {
+		DRM_ERROR("failed to ttm_kmap_obj_virtual\n");
+		ret = 1;
+	}
+
+	psb_virtual_addr_clflush(mmu, addr, bo->num_pages);
+
+	ttm_bo_kunmap(&bo_kmap);
+	return ret;
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_fence.c b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence.c
new file mode 100644
index 0000000..891bd81
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence.c
@@ -0,0 +1,629 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "psb_ttm_fence_api.h"
+#include "psb_ttm_fence_driver.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include <drm/drmP.h>
+#include "psb_drm.h"
+/*
+ * Simple implementation for now.
+ */
+
+static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
+{
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+
+	printk(KERN_ERR "GPU lockup dectected on engine %u "
+	       "fence type 0x%08x\n",
+	       (unsigned int)fence->fence_class, (unsigned int)mask);
+	/*
+	 * Give engines some time to idle?
+	 */
+
+	write_lock(&fc->lock);
+	ttm_fence_handler(fence->fdev, fence->fence_class,
+			  fence->sequence, mask, -EBUSY);
+	write_unlock(&fc->lock);
+}
+
+/*
+ * Convenience function to be called by fence::wait methods that
+ * need polling.
+ */
+
+int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
+			   bool interruptible, uint32_t mask)
+{
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
+	uint32_t count = 0;
+	int ret;
+	unsigned long end_jiffies = fence->timeout_jiffies;
+
+	DECLARE_WAITQUEUE(entry, current);
+	add_wait_queue(&fc->fence_queue, &entry);
+
+	ret = 0;
+
+	for (;;) {
+		__set_current_state((interruptible) ?
+				    TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+		if (ttm_fence_object_signaled(fence, mask))
+			break;
+#if !(defined CONFIG_BOARD_MRFLD_VP || defined CONFIG_X86_MRFLD_ZILKER)
+		if (time_after_eq(jiffies, end_jiffies)) {
+			if (driver->lockup)
+				driver->lockup(fence, mask);
+			else
+				ttm_fence_lockup(fence, mask);
+			continue;
+		}
+#endif
+		if (lazy)
+			schedule_timeout(1);
+		else if ((++count & 0x0F) == 0) {
+			__set_current_state(TASK_RUNNING);
+			schedule();
+			__set_current_state((interruptible) ?
+					    TASK_INTERRUPTIBLE :
+					    TASK_UNINTERRUPTIBLE);
+		}
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTART;
+			break;
+		}
+	}
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&fc->fence_queue, &entry);
+	return ret;
+}
+
+/*
+ * Typically called by the IRQ handler.
+ */
+
+void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
+		       uint32_t sequence, uint32_t type, uint32_t error)
+{
+	int wake = 0;
+	uint32_t diff;
+	uint32_t relevant_type;
+	uint32_t new_type;
+	struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
+	const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
+	struct list_head *head;
+	struct ttm_fence_object *fence, *next;
+	bool found = false;
+
+	if (list_empty(&fc->ring))
+		return;
+
+	list_for_each_entry(fence, &fc->ring, ring) {
+		diff = (sequence - fence->sequence) & fc->sequence_mask;
+		if (diff > fc->wrap_diff) {
+			found = true;
+			break;
+		}
+	}
+
+	fc->waiting_types &= ~type;
+	head = (found) ? &fence->ring : &fc->ring;
+
+	list_for_each_entry_safe_reverse(fence, next, head, ring) {
+		if (&fence->ring == &fc->ring)
+			break;
+
+		DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
+			  (unsigned long)fence, fence->sequence,
+			  fence->fence_type);
+
+		if (error) {
+			fence->info.error = error;
+			fence->info.signaled_types = fence->fence_type;
+			list_del_init(&fence->ring);
+			wake = 1;
+			break;
+		}
+
+		relevant_type = type & fence->fence_type;
+		new_type = (fence->info.signaled_types | relevant_type) ^
+			   fence->info.signaled_types;
+
+		if (new_type) {
+			fence->info.signaled_types |= new_type;
+			DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
+				  (unsigned long)fence,
+				  fence->info.signaled_types);
+
+			if (unlikely(driver->signaled))
+				driver->signaled(fence);
+
+			if (driver->needed_flush)
+				fc->pending_flush |=
+					driver->needed_flush(fence);
+
+			if (new_type & fence->waiting_types)
+				wake = 1;
+		}
+
+		fc->waiting_types |=
+			fence->waiting_types & ~fence->info.signaled_types;
+
+		if (!(fence->fence_type & ~fence->info.signaled_types)) {
+			DRM_DEBUG("Fence completely signaled 0x%08lx\n",
+				  (unsigned long)fence);
+			list_del_init(&fence->ring);
+		}
+	}
+
+	/*
+	 * Reinstate lost waiting types.
+	 */
+
+	if ((fc->waiting_types & type) != type) {
+		head = head->prev;
+		list_for_each_entry(fence, head, ring) {
+			if (&fence->ring == &fc->ring)
+				break;
+
+			fc->waiting_types |=
+				fence->waiting_types & ~fence->info.signaled_types;
+		}
+	}
+
+	if (wake)
+		wake_up_all(&fc->fence_queue);
+}
+
+static void ttm_fence_unring(struct ttm_fence_object *fence)
+{
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	unsigned long irq_flags;
+
+	write_lock_irqsave(&fc->lock, irq_flags);
+	list_del_init(&fence->ring);
+	write_unlock_irqrestore(&fc->lock, irq_flags);
+}
+
+bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
+{
+	unsigned long flags;
+	bool signaled;
+	const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+
+	mask &= fence->fence_type;
+	read_lock_irqsave(&fc->lock, flags);
+	signaled = (mask & fence->info.signaled_types) == mask;
+	read_unlock_irqrestore(&fc->lock, flags);
+	if (!signaled && driver->poll) {
+		write_lock_irqsave(&fc->lock, flags);
+		driver->poll(fence->fdev, fence->fence_class, mask);
+		signaled = (mask & fence->info.signaled_types) == mask;
+		write_unlock_irqrestore(&fc->lock, flags);
+	}
+	return signaled;
+}
+
+int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
+{
+	const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	unsigned long irq_flags;
+	uint32_t saved_pending_flush;
+	bool call_flush;
+
+	if (type & ~fence->fence_type) {
+		DRM_ERROR("Flush trying to extend fence type, "
+			  "0x%x, 0x%x\n", type, fence->fence_type);
+		return -EINVAL;
+	}
+
+	write_lock_irqsave(&fc->lock, irq_flags);
+	fence->waiting_types |= type;
+	fc->waiting_types |= fence->waiting_types;
+
+	/*
+	 * fence->waiting_types has changed. Determine whether
+	 * we need to initiate some kind of flush as a result of this.
+	 */
+
+	saved_pending_flush = fc->pending_flush;
+	if (driver->needed_flush)
+		fc->pending_flush |= driver->needed_flush(fence);
+
+	if (driver->poll)
+		driver->poll(fence->fdev, fence->fence_class,
+			     fence->waiting_types);
+
+	call_flush = (fc->pending_flush != 0);
+	write_unlock_irqrestore(&fc->lock, irq_flags);
+
+	if (call_flush && driver->flush)
+		driver->flush(fence->fdev, fence->fence_class);
+
+	return 0;
+}
+
+/*
+ * Make sure old fence objects are signaled before their fence sequences are
+ * wrapped around and reused.
+ */
+
+void ttm_fence_flush_old(struct ttm_fence_device *fdev,
+			 uint32_t fence_class, uint32_t sequence)
+{
+	struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
+	struct ttm_fence_object *fence;
+	unsigned long irq_flags;
+	const struct ttm_fence_driver *driver = fdev->driver;
+	bool call_flush;
+	uint32_t diff;
+
+	write_lock_irqsave(&fc->lock, irq_flags);
+
+	list_for_each_entry_reverse(fence, &fc->ring, ring) {
+		diff = (sequence - fence->sequence) & fc->sequence_mask;
+		if (diff <= fc->flush_diff)
+			break;
+
+		fence->waiting_types = fence->fence_type;
+		fc->waiting_types |= fence->fence_type;
+
+		if (driver->needed_flush)
+			fc->pending_flush |= driver->needed_flush(fence);
+	}
+
+	if (driver->poll)
+		driver->poll(fdev, fence_class, fc->waiting_types);
+
+	call_flush = (fc->pending_flush != 0);
+	write_unlock_irqrestore(&fc->lock, irq_flags);
+
+	if (call_flush && driver->flush)
+		driver->flush(fdev, fence->fence_class);
+
+	/*
+	 * FIXME: Shold we implement a wait here for really old fences?
+	 */
+
+}
+
+int ttm_fence_object_wait(struct ttm_fence_object *fence,
+			  bool lazy, bool interruptible, uint32_t mask)
+{
+	const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	int ret = 0;
+	unsigned long timeout;
+	unsigned long cur_jiffies;
+	unsigned long to_jiffies;
+
+	if (mask & ~fence->fence_type) {
+		DRM_ERROR("Wait trying to extend fence type"
+			  " 0x%08x 0x%08x\n", mask, fence->fence_type);
+		BUG();
+		return -EINVAL;
+	}
+
+	if (driver->wait)
+		return driver->wait(fence, lazy, interruptible, mask);
+
+	ttm_fence_object_flush(fence, mask);
+retry:
+	if (!driver->has_irq ||
+	    driver->has_irq(fence->fdev, fence->fence_class, mask)) {
+
+		cur_jiffies = jiffies;
+		to_jiffies = fence->timeout_jiffies;
+#if !(defined CONFIG_BOARD_MRFLD_VP || defined CONFIG_X86_MRFLD_ZILKER)
+		timeout = (time_after(to_jiffies, cur_jiffies)) ?
+		    to_jiffies - cur_jiffies : 1;
+#else
+		timeout = 3 * DRM_HZ;
+#endif
+
+		if (interruptible)
+			ret = wait_event_interruptible_timeout
+			      (fc->fence_queue,
+			       ttm_fence_object_signaled(fence, mask), timeout);
+		else
+			ret = wait_event_timeout
+			      (fc->fence_queue,
+			       ttm_fence_object_signaled(fence, mask), timeout);
+
+		if (unlikely(ret == -ERESTARTSYS))
+			return -ERESTART;
+
+		if (unlikely(ret == 0)) {
+#if !(defined CONFIG_BOARD_MRFLD_VP || defined CONFIG_X86_MRFLD_ZILKER)
+			if (driver->lockup)
+				driver->lockup(fence, mask);
+			else
+				ttm_fence_lockup(fence, mask);
+#endif
+			goto retry;
+		}
+
+		return 0;
+	}
+
+	return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
+}
+
+int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
+			  uint32_t fence_class, uint32_t type)
+{
+	const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	unsigned long flags;
+	uint32_t sequence;
+	unsigned long timeout;
+	int ret;
+
+	ttm_fence_unring(fence);
+	ret = driver->emit(fence->fdev,
+			   fence_class, fence_flags, &sequence, &timeout);
+	if (ret)
+		return ret;
+
+	write_lock_irqsave(&fc->lock, flags);
+	fence->fence_class = fence_class;
+	fence->fence_type = type;
+	fence->waiting_types = 0;
+	fence->info.signaled_types = 0;
+	fence->info.error = 0;
+	fence->sequence = sequence;
+	fence->timeout_jiffies = timeout;
+	list_add_tail(&fence->ring, &fc->ring);
+	fc->latest_queued_sequence = sequence;
+	write_unlock_irqrestore(&fc->lock, flags);
+	return 0;
+}
+
+int ttm_fence_object_init(struct ttm_fence_device *fdev,
+			  uint32_t fence_class,
+			  uint32_t type,
+			  uint32_t create_flags,
+			  void (*destroy)(struct ttm_fence_object *),
+			  struct ttm_fence_object *fence)
+{
+	int ret = 0;
+
+	kref_init(&fence->kref);
+	fence->fence_class = fence_class;
+	fence->fence_type = type;
+	fence->info.signaled_types = 0;
+	fence->waiting_types = 0;
+	fence->sequence = 0;
+	fence->info.error = 0;
+	fence->fdev = fdev;
+	fence->destroy = destroy;
+	INIT_LIST_HEAD(&fence->ring);
+	atomic_inc(&fdev->count);
+
+	if (create_flags & TTM_FENCE_FLAG_EMIT) {
+		ret = ttm_fence_object_emit(fence, create_flags,
+					    fence->fence_class, type);
+	}
+
+	return ret;
+}
+
+int ttm_fence_object_create(struct ttm_fence_device *fdev,
+			    uint32_t fence_class,
+			    uint32_t type,
+			    uint32_t create_flags,
+			    struct ttm_fence_object **c_fence)
+{
+	struct ttm_fence_object *fence;
+	int ret;
+
+	ret = ttm_mem_global_alloc(fdev->mem_glob,
+				   sizeof(*fence),
+				   false,
+				   false);
+	if (unlikely(ret != 0)) {
+		printk(KERN_ERR "Out of memory creating fence object\n");
+		return ret;
+	}
+
+	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence) {
+		printk(KERN_ERR "Out of memory creating fence object\n");
+		ttm_mem_global_free(fdev->mem_glob, sizeof(*fence));
+		return -ENOMEM;
+	}
+
+	ret = ttm_fence_object_init(fdev, fence_class, type,
+				    create_flags, NULL, fence);
+	if (ret) {
+		ttm_fence_object_unref(&fence);
+		return ret;
+	}
+	*c_fence = fence;
+
+	return 0;
+}
+
+static void ttm_fence_object_destroy(struct kref *kref)
+{
+	struct ttm_fence_object *fence =
+		container_of(kref, struct ttm_fence_object, kref);
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	unsigned long irq_flags;
+
+	write_lock_irqsave(&fc->lock, irq_flags);
+	list_del_init(&fence->ring);
+	write_unlock_irqrestore(&fc->lock, irq_flags);
+
+	atomic_dec(&fence->fdev->count);
+	if (fence->destroy)
+		fence->destroy(fence);
+	else {
+		ttm_mem_global_free(fence->fdev->mem_glob,
+				    sizeof(*fence));
+		kfree(fence);
+	}
+}
+
+void ttm_fence_device_release(struct ttm_fence_device *fdev)
+{
+	kfree(fdev->fence_class);
+}
+
+int
+ttm_fence_device_init(int num_classes,
+		      struct ttm_mem_global *mem_glob,
+		      struct ttm_fence_device *fdev,
+		      const struct ttm_fence_class_init *init,
+		      bool replicate_init,
+		      const struct ttm_fence_driver *driver)
+{
+	struct ttm_fence_class_manager *fc;
+	const struct ttm_fence_class_init *fci;
+	int i;
+
+	fdev->mem_glob = mem_glob;
+	fdev->fence_class = kzalloc(num_classes *
+				    sizeof(*fdev->fence_class), GFP_KERNEL);
+
+	if (unlikely(!fdev->fence_class))
+		return -ENOMEM;
+
+	fdev->num_classes = num_classes;
+	atomic_set(&fdev->count, 0);
+	fdev->driver = driver;
+
+	for (i = 0; i < fdev->num_classes; ++i) {
+		fc = &fdev->fence_class[i];
+		fci = &init[(replicate_init) ? 0 : i];
+
+		fc->wrap_diff = fci->wrap_diff;
+		fc->flush_diff = fci->flush_diff;
+		fc->sequence_mask = fci->sequence_mask;
+
+		rwlock_init(&fc->lock);
+		INIT_LIST_HEAD(&fc->ring);
+		init_waitqueue_head(&fc->fence_queue);
+	}
+
+	return 0;
+}
+
+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
+{
+	struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
+	struct ttm_fence_info tmp;
+	unsigned long irq_flags;
+
+	read_lock_irqsave(&fc->lock, irq_flags);
+	tmp = fence->info;
+	read_unlock_irqrestore(&fc->lock, irq_flags);
+
+	return tmp;
+}
+
+void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
+{
+	struct ttm_fence_object *fence = *p_fence;
+
+	*p_fence = NULL;
+	(void)kref_put(&fence->kref, &ttm_fence_object_destroy);
+}
+
+/*
+ * Placement / BO sync object glue.
+ */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+	struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
+	uint32_t fence_types = (uint32_t)(unsigned long)sync_arg;
+
+	return ttm_fence_object_signaled(fence, fence_types);
+}
+#else
+bool ttm_fence_sync_obj_signaled(void *sync_obj)
+{
+	struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
+	uint32_t fence_types = _PSB_FENCE_TYPE_EXE;
+
+	return ttm_fence_object_signaled(fence, fence_types);
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
+			    bool lazy, bool interruptible)
+{
+	struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
+	uint32_t fence_types = (uint32_t)(unsigned long)sync_arg;
+
+	return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
+}
+#else
+int ttm_fence_sync_obj_wait(void *sync_obj,
+			    bool lazy, bool interruptible)
+{
+	struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
+	uint32_t fence_types = _PSB_FENCE_TYPE_EXE;
+
+	return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
+}
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
+{
+	struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
+	uint32_t fence_types = (uint32_t)(unsigned long)sync_arg;
+
+	return ttm_fence_object_flush(fence, fence_types);
+}
+#else
+int ttm_fence_sync_obj_flush(void *sync_obj)
+{
+	struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
+	uint32_t fence_types = _PSB_FENCE_TYPE_EXE;
+
+	return ttm_fence_object_flush(fence, fence_types);
+}
+#endif
+
+void ttm_fence_sync_obj_unref(void **sync_obj)
+{
+	ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
+}
+
+void *ttm_fence_sync_obj_ref(void *sync_obj)
+{
+	return (void *)
+	       ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_api.h b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_api.h
new file mode 100644
index 0000000..3e770b1
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_api.h
@@ -0,0 +1,285 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+#ifndef _TTM_FENCE_API_H_
+#define _TTM_FENCE_API_H_
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/version.h>
+
+#define TTM_FENCE_FLAG_EMIT (1 << 0)
+#define TTM_FENCE_TYPE_EXE  (1 << 0)
+
+struct ttm_fence_device;
+
+/**
+ * struct ttm_fence_info
+ *
+ * @fence_class:    The fence class.
+ * @fence_type:     Bitfield indicating types for this fence.
+ * @signaled_types: Bitfield indicating which types are signaled.
+ * @error:          Last error reported from the device.
+ *
+ * Used as output from the ttm_fence_get_info
+ */
+
+struct ttm_fence_info {
+	uint32_t signaled_types;
+	uint32_t error;
+};
+
+/**
+ * struct ttm_fence_object
+ *
+ * @fdev:            Pointer to the fence device struct.
+ * @kref:            Holds the reference count of this fence object.
+ * @ring:            List head used for the circular list of not-completely
+ *                   signaled fences.
+ * @info:            Data for fast retrieval using the ttm_fence_get_info()
+ * function.
+ * @timeout_jiffies: Absolute jiffies value indicating when this fence
+ *                   object times out and, if waited on, calls ttm_fence_lockup
+ *                   to check for and resolve a GPU lockup.
+ * @sequence:        Fence sequence number.
+ * @waiting_types:   Types currently waited on.
+ * @destroy:         Called to free the fence object, when its refcount has
+ *                   reached zero. If NULL, kfree is used.
+ *
+ * This struct is provided in the driver interface so that drivers can
+ * derive from it and create their own fence implementation. All members
+ * are private to the fence implementation and the fence driver callbacks.
+ * Otherwise a driver may access the derived object using container_of().
+ */
+
+struct ttm_fence_object {
+	struct ttm_fence_device *fdev;
+	struct kref kref;
+	uint32_t fence_class;
+	uint32_t fence_type;
+
+	/*
+	 * The below fields are protected by the fence class
+	 * manager spinlock.
+	 */
+
+	struct list_head ring;
+	struct ttm_fence_info info;
+	unsigned long timeout_jiffies;
+	uint32_t sequence;
+	uint32_t waiting_types;
+	void (*destroy)(struct ttm_fence_object *);
+};
+
+/**
+ * ttm_fence_object_init
+ *
+ * @fdev: Pointer to a struct ttm_fence_device.
+ * @fence_class: Fence class for this fence.
+ * @type: Fence type for this fence.
+ * @create_flags: Flags indicating varios actions at init time. At this point
+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
+ * the command stream.
+ * @destroy: Destroy function. If NULL, kfree() is used.
+ * @fence: The struct ttm_fence_object to initialize.
+ *
+ * Initialize a pre-allocated fence object. This function, together with the
+ * destroy function makes it possible to derive driver-specific fence objects.
+ */
+
+extern int
+ttm_fence_object_init(struct ttm_fence_device *fdev,
+		      uint32_t fence_class,
+		      uint32_t type,
+		      uint32_t create_flags,
+		      void (*destroy)(struct ttm_fence_object *fence),
+		      struct ttm_fence_object *fence);
+
+/**
+ * ttm_fence_object_create
+ *
+ * @fdev: Pointer to a struct ttm_fence_device.
+ * @fence_class: Fence class for this fence.
+ * @type: Fence type for this fence.
+ * @create_flags: Flags indicating varios actions at init time. At this point
+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
+ * the command stream.
+ * @c_fence: On successful termination, *(@c_fence) will point to the created
+ * fence object.
+ *
+ * Create and initialize a struct ttm_fence_object. The destroy function will
+ * be set to kfree().
+ */
+
+extern int
+ttm_fence_object_create(struct ttm_fence_device *fdev,
+			uint32_t fence_class,
+			uint32_t type,
+			uint32_t create_flags,
+			struct ttm_fence_object **c_fence);
+
+/**
+ * ttm_fence_object_wait
+ *
+ * @fence: The fence object to wait on.
+ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
+ * @interruptible: Sleep interruptible when waiting.
+ * @type_mask: Wait for the given type_mask to signal.
+ *
+ * Wait for a fence to signal the given type_mask. The function will
+ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
+ *
+ * Returns
+ * -ERESTART if interrupted by a signal.
+ * May return driver-specific error codes if timed-out.
+ */
+
+extern int
+ttm_fence_object_wait(struct ttm_fence_object *fence,
+		      bool lazy, bool interruptible, uint32_t type_mask);
+
+/**
+ * ttm_fence_object_flush
+ *
+ * @fence: The fence object to flush.
+ * @flush_mask: Fence types to flush.
+ *
+ * Make sure that the given fence eventually signals the
+ * types indicated by @flush_mask. Note that this may or may not
+ * map to a CPU or GPU flush.
+ */
+
+extern int
+ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
+
+/**
+ * ttm_fence_get_info
+ *
+ * @fence: The fence object.
+ *
+ * Copy the info block from the fence while holding relevant locks.
+ */
+
+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
+
+/**
+ * ttm_fence_object_ref
+ *
+ * @fence: The fence object.
+ *
+ * Return a ref-counted pointer to the fence object indicated by @fence.
+ */
+
+static inline struct ttm_fence_object *ttm_fence_object_ref(struct
+		ttm_fence_object
+		*fence) {
+	kref_get(&fence->kref);
+	return fence;
+}
+
+/**
+ * ttm_fence_object_unref
+ *
+ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
+ *
+ * Unreference the fence object pointed to by *(@p_fence), clearing
+ * *(p_fence).
+ */
+
+extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
+
+/**
+ * ttm_fence_object_signaled
+ *
+ * @fence: Pointer to the struct ttm_fence_object.
+ * @mask: Type mask to check whether signaled.
+ *
+ * This function checks (without waiting) whether the fence object
+ * pointed to by @fence has signaled the types indicated by @mask,
+ * and returns 1 if true, 0 if false. This function does NOT perform
+ * an implicit fence flush.
+ */
+
+extern bool
+ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
+
+/**
+ * ttm_fence_class
+ *
+ * @fence: Pointer to the struct ttm_fence_object.
+ *
+ * Convenience function that returns the fence class of a
+ * struct ttm_fence_object.
+ */
+
+static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
+{
+	return fence->fence_class;
+}
+
+/**
+ * ttm_fence_types
+ *
+ * @fence: Pointer to the struct ttm_fence_object.
+ *
+ * Convenience function that returns the fence types of a
+ * struct ttm_fence_object.
+ */
+
+static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
+{
+	return fence->fence_type;
+}
+
+/*
+ * The functions below are wrappers to the above functions, with
+ * similar names but with sync_obj omitted. These wrappers are intended
+ * to be plugged directly into the buffer object driver's sync object
+ * API, if the driver chooses to use ttm_fence_objects as buffer object
+ * sync objects. In the prototypes below, a sync_obj is cast to a
+ * struct ttm_fence_object, whereas a sync_arg is cast to an
+ * uint32_t representing a fence_type argument.
+ */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
+#else
+extern bool ttm_fence_sync_obj_signaled(void *sync_obj);
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
+				   bool lazy, bool interruptible);
+#else
+extern int ttm_fence_sync_obj_wait(void *sync_obj,
+				   bool lazy, bool interruptible);
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
+#else
+extern int ttm_fence_sync_obj_flush(void *sync_obj);
+#endif
+extern void ttm_fence_sync_obj_unref(void **sync_obj);
+extern void *ttm_fence_sync_obj_ref(void *sync_obj);
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_driver.h b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_driver.h
new file mode 100644
index 0000000..69518a3
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_driver.h
@@ -0,0 +1,296 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+#ifndef _TTM_FENCE_DRIVER_H_
+#define _TTM_FENCE_DRIVER_H_
+
+#include <linux/kref.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include "psb_ttm_fence_api.h"
+#include "ttm/ttm_memory.h"
+
+/** @file ttm_fence_driver.h
+ *
+ * Definitions needed for a driver implementing the
+ * ttm_fence subsystem.
+ */
+
+/**
+ * struct ttm_fence_class_manager:
+ *
+ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
+ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
+ * @flush_diff: Sequence difference to trigger fence flush.
+ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
+ * seqa as old an needing a flush.
+ * @sequence_mask: Mask of valid bits in a fence sequence.
+ * @lock: Lock protecting this struct as well as fence objects
+ * associated with this struct.
+ * @ring: Circular sequence-ordered list of fence objects.
+ * @pending_flush: Fence types currently needing a flush.
+ * @waiting_types: Fence types that are currently waited for.
+ * @fence_queue: Queue of waiters on fences belonging to this fence class.
+ * @latest_queued_sequence: Sequence number of the fence latest queued
+ * on the ring.
+ */
+
+struct ttm_fence_class_manager {
+
+	/*
+	 * Unprotected constant members.
+	 */
+
+	uint32_t wrap_diff;
+	uint32_t flush_diff;
+	uint32_t sequence_mask;
+
+	/*
+	 * The rwlock protects this structure as well as
+	 * the data in all fence objects belonging to this
+	 * class. This should be OK as most fence objects are
+	 * only read from once they're created.
+	 */
+
+	rwlock_t lock;
+	struct list_head ring;
+	uint32_t pending_flush;
+	uint32_t waiting_types;
+	wait_queue_head_t fence_queue;
+	uint32_t latest_queued_sequence;
+};
+
+/**
+ * struct ttm_fence_device
+ *
+ * @fence_class:  Array of fence class managers.
+ * @num_classes:  Array dimension of @fence_class.
+ * @count:        Current number of fence objects for statistics.
+ * @driver:       Driver struct.
+ *
+ * Provided in the driver interface so that the driver can derive
+ * from this struct for its driver_private, and accordingly
+ * access the driver_private from the fence driver callbacks.
+ *
+ * All members except "count" are initialized at creation and
+ * never touched after that. No protection needed.
+ *
+ * This struct is private to the fence implementation and to the fence
+ * driver callbacks, and may otherwise be used by drivers only to
+ * obtain the derived device_private object using container_of().
+ */
+
+struct ttm_fence_device {
+	struct ttm_mem_global *mem_glob;
+	struct ttm_fence_class_manager *fence_class;
+	uint32_t num_classes;
+	atomic_t count;
+	const struct ttm_fence_driver *driver;
+};
+
+/**
+ * struct ttm_fence_class_init
+ *
+ * @wrap_diff:    Fence sequence number wrap indicator. If
+ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
+ * considered to be older than sequence2.
+ * @flush_diff:   Fence sequence number flush indicator.
+ * If a non-completely-signaled fence has a fence sequence number
+ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
+ * the fence is considered too old and it will be flushed upon the
+ * next call of ttm_fence_flush_old(), to make sure no fences with
+ * stale sequence numbers remains unsignaled. @flush_diff should
+ * be sufficiently less than @wrap_diff.
+ * @sequence_mask: Mask with valid bits of the fence sequence
+ * number set to 1.
+ *
+ * This struct is used as input to ttm_fence_device_init.
+ */
+
+struct ttm_fence_class_init {
+	uint32_t wrap_diff;
+	uint32_t flush_diff;
+	uint32_t sequence_mask;
+};
+
+/**
+ * struct ttm_fence_driver
+ *
+ * @has_irq: Called by a potential waiter. Should return 1 if a
+ * fence object with indicated parameters is expected to signal
+ * automatically, and 0 if the fence implementation needs to
+ * repeatedly call @poll to make it signal.
+ * @emit:    Make sure a fence with the given parameters is
+ * present in the indicated command stream. Return its sequence number
+ * in "breadcrumb".
+ * @poll:    Check and report sequences of the given "fence_class"
+ *           that have signaled "types"
+ * @flush:   Make sure that the types indicated by the bitfield
+ *           ttm_fence_class_manager::pending_flush will eventually
+ *           signal. These bits have been put together using the
+ *           result from the needed_flush function described below.
+ * @needed_flush: Given the fence_class and fence_types indicated by
+ *           "fence", and the last received fence sequence of this
+ *           fence class, indicate what types need a fence flush to
+ *           signal. Return as a bitfield.
+ * @wait:    Set to non-NULL if the driver wants to override the fence
+ *           wait implementation. Return 0 on success, -EBUSY on failure,
+ *           and -ERESTART if interruptible and a signal is pending.
+ * @signaled:  Driver callback that is called whenever a
+ *           ttm_fence_object::signaled_types has changed status.
+ *           This function is called from atomic context,
+ *           with the ttm_fence_class_manager::lock held in write mode.
+ * @lockup:  Driver callback that is called whenever a wait has exceeded
+ *           the lifetime of a fence object.
+ *           If there is a GPU lockup,
+ *           this function should, if possible, reset the GPU,
+ *           call the ttm_fence_handler with an error status, and
+ *           return. If no lockup was detected, simply extend the
+ *           fence timeout_jiffies and return. The driver might
+ *           want to protect the lockup check with a mutex and cache a
+ *           non-locked-up status for a while to avoid an excessive
+ *           amount of lockup checks from every waiting thread.
+ */
+
+struct ttm_fence_driver {
+	bool (*has_irq)(struct ttm_fence_device *fdev,
+			uint32_t fence_class, uint32_t flags);
+	int (*emit)(struct ttm_fence_device *fdev,
+		    uint32_t fence_class,
+		    uint32_t flags,
+		    uint32_t *breadcrumb, unsigned long *timeout_jiffies);
+	void (*flush)(struct ttm_fence_device *fdev, uint32_t fence_class);
+	void (*poll)(struct ttm_fence_device *fdev,
+		     uint32_t fence_class, uint32_t types);
+	uint32_t(*needed_flush)
+	(struct ttm_fence_object *fence);
+	int (*wait)(struct ttm_fence_object *fence, bool lazy,
+		    bool interruptible, uint32_t mask);
+	void (*signaled)(struct ttm_fence_object *fence);
+	void (*lockup)(struct ttm_fence_object *fence, uint32_t fence_types);
+};
+
+/**
+ * function ttm_fence_device_init
+ *
+ * @num_classes:      Number of fence classes for this fence implementation.
+ * @mem_global:       Pointer to the global memory accounting info.
+ * @fdev:             Pointer to an uninitialised struct ttm_fence_device.
+ * @init:             Array of initialization info for each fence class.
+ * @replicate_init:   Use the first @init initialization info for all classes.
+ * @driver:           Driver callbacks.
+ *
+ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
+ * out-of-memory. Otherwise returns 0.
+ */
+extern int
+ttm_fence_device_init(int num_classes,
+		      struct ttm_mem_global *mem_glob,
+		      struct ttm_fence_device *fdev,
+		      const struct ttm_fence_class_init *init,
+		      bool replicate_init,
+		      const struct ttm_fence_driver *driver);
+
+/**
+ * function ttm_fence_device_release
+ *
+ * @fdev:             Pointer to the fence device.
+ *
+ * Release all resources held by a fence device. Note that before
+ * this function is called, the caller must have made sure all fence
+ * objects belonging to this fence device are completely signaled.
+ */
+
+extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
+
+/**
+ * ttm_fence_handler - the fence handler.
+ *
+ * @fdev:        Pointer to the fence device.
+ * @fence_class: Fence class that signals.
+ * @sequence:    Signaled sequence.
+ * @type:        Types that signal.
+ * @error:       Error from the engine.
+ *
+ * This function signals all fences with a sequence previous to the
+ * @sequence argument, and belonging to @fence_class. The signaled fence
+ * types are provided in @type. If error is non-zero, the error member
+ * of the fence with sequence = @sequence is set to @error. This value
+ * may be reported back to user-space, indicating, for example an illegal
+ * 3D command or illegal mpeg data.
+ *
+ * This function is typically called from the driver::poll method when the
+ * command sequence preceding the fence marker has executed. It should be
+ * called with the ttm_fence_class_manager::lock held in write mode and
+ * may be called from interrupt context.
+ */
+
+extern void
+ttm_fence_handler(struct ttm_fence_device *fdev,
+		  uint32_t fence_class,
+		  uint32_t sequence, uint32_t type, uint32_t error);
+
+/**
+ * ttm_fence_driver_from_dev
+ *
+ * @fdev:        The ttm fence device.
+ *
+ * Returns a pointer to the fence driver struct.
+ */
+
+static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(
+	struct ttm_fence_device *fdev) {
+	return fdev->driver;
+}
+
+/**
+ * ttm_fence_driver
+ *
+ * @fence:        Pointer to a ttm fence object.
+ *
+ * Returns a pointer to the fence driver struct.
+ */
+
+static inline const struct ttm_fence_driver *ttm_fence_driver(struct
+		ttm_fence_object
+		*fence) {
+	return ttm_fence_driver_from_dev(fence->fdev);
+}
+
+/**
+ * ttm_fence_fc
+ *
+ * @fence:        Pointer to a ttm fence object.
+ *
+ * Returns a pointer to the struct ttm_fence_class_manager for the
+ * fence class of @fence.
+ */
+
+static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
+		ttm_fence_object
+		*fence) {
+	return &fence->fdev->fence_class[fence->fence_class];
+}
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_user.c b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_user.c
new file mode 100644
index 0000000..c333788
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_user.c
@@ -0,0 +1,236 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_ttm_fence_user.h"
+#include "ttm/ttm_object.h"
+#include "psb_ttm_fence_driver.h"
+#include "psb_ttm_userobj_api.h"
+
+/**
+ * struct ttm_fence_user_object
+ *
+ * @base:    The base object used for user-space visibility and refcounting.
+ *
+ * @fence:   The fence object itself.
+ *
+ */
+
+struct ttm_fence_user_object {
+	struct ttm_base_object base;
+	struct ttm_fence_object fence;
+};
+
+static struct ttm_fence_user_object *ttm_fence_user_object_lookup(
+	struct ttm_object_file *tfile,
+	uint32_t handle) {
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL)) {
+		printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
+		       (unsigned long)handle);
+		return NULL;
+	}
+
+	if (unlikely(base->object_type != ttm_fence_type)) {
+		ttm_base_object_unref(&base);
+		printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
+		       (unsigned long)handle);
+		return NULL;
+	}
+
+	return container_of(base, struct ttm_fence_user_object, base);
+}
+
+/*
+ * The fence object destructor.
+ */
+
+static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
+{
+	struct ttm_fence_user_object *ufence =
+		container_of(fence, struct ttm_fence_user_object, fence);
+
+	ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence));
+	kfree(ufence);
+}
+
+/*
+ * The base object destructor. We basically unly unreference the
+ * attached fence object.
+ */
+
+static void ttm_fence_user_release(struct ttm_base_object **p_base)
+{
+	struct ttm_fence_user_object *ufence;
+	struct ttm_base_object *base = *p_base;
+	struct ttm_fence_object *fence;
+
+	*p_base = NULL;
+
+	if (unlikely(base == NULL))
+		return;
+
+	ufence = container_of(base, struct ttm_fence_user_object, base);
+	fence = &ufence->fence;
+	ttm_fence_object_unref(&fence);
+}
+
+int
+ttm_fence_user_create(struct ttm_fence_device *fdev,
+		      struct ttm_object_file *tfile,
+		      uint32_t fence_class,
+		      uint32_t fence_types,
+		      uint32_t create_flags,
+		      struct ttm_fence_object **fence,
+		      uint32_t *user_handle)
+{
+	int ret;
+	struct ttm_fence_object *tmp;
+	struct ttm_fence_user_object *ufence;
+
+	ret = ttm_mem_global_alloc(fdev->mem_glob,
+				   sizeof(*ufence),
+				   false,
+				   false);
+	if (unlikely(ret != 0))
+		return -ENOMEM;
+
+	ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
+	if (unlikely(ufence == NULL)) {
+		ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence));
+		return -ENOMEM;
+	}
+
+	ret = ttm_fence_object_init(fdev,
+				    fence_class,
+				    fence_types, create_flags,
+				    &ttm_fence_user_destroy, &ufence->fence);
+
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+	/*
+	 * One fence ref is held by the fence ptr we return.
+	 * The other one by the base object. Need to up the
+	 * fence refcount before we publish this object to
+	 * user-space.
+	 */
+
+	tmp = ttm_fence_object_ref(&ufence->fence);
+	ret = ttm_base_object_init(tfile, &ufence->base,
+				   false, ttm_fence_type,
+				   &ttm_fence_user_release, NULL);
+
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+	*fence = &ufence->fence;
+	*user_handle = ufence->base.hash.key;
+
+	return 0;
+out_err1:
+	ttm_fence_object_unref(&tmp);
+	tmp = &ufence->fence;
+	ttm_fence_object_unref(&tmp);
+	return ret;
+out_err0:
+	ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence));
+	kfree(ufence);
+	return ret;
+}
+
+int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	int ret;
+	union ttm_fence_signaled_arg *arg = data;
+	struct ttm_fence_object *fence;
+	struct ttm_fence_info info;
+	struct ttm_fence_user_object *ufence;
+	struct ttm_base_object *base;
+	ret = 0;
+
+	ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
+	if (unlikely(ufence == NULL))
+		return -EINVAL;
+
+	fence = &ufence->fence;
+
+	if (arg->req.flush) {
+		ret = ttm_fence_object_flush(fence, arg->req.fence_type);
+		if (unlikely(ret != 0))
+			goto out;
+	}
+
+	info = ttm_fence_get_info(fence);
+	arg->rep.signaled_types = info.signaled_types;
+	arg->rep.fence_error = info.error;
+
+out:
+	base = &ufence->base;
+	ttm_base_object_unref(&base);
+	return ret;
+}
+
+int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	int ret;
+	union ttm_fence_finish_arg *arg = data;
+	struct ttm_fence_user_object *ufence;
+	struct ttm_base_object *base;
+	struct ttm_fence_object *fence;
+	ret = 0;
+
+	ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
+	if (unlikely(ufence == NULL))
+		return -EINVAL;
+
+	fence = &ufence->fence;
+
+	ret = ttm_fence_object_wait(fence,
+				    arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
+				    true, arg->req.fence_type);
+	if (likely(ret == 0)) {
+		struct ttm_fence_info info = ttm_fence_get_info(fence);
+
+		arg->rep.signaled_types = info.signaled_types;
+		arg->rep.fence_error = info.error;
+	}
+
+	base = &ufence->base;
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
+
+int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	struct ttm_fence_unref_arg *arg = data;
+	int ret = 0;
+
+	ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
+	return ret;
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_user.h b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_user.h
new file mode 100644
index 0000000..fc13f89
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_fence_user.h
@@ -0,0 +1,140 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef TTM_FENCE_USER_H
+#define TTM_FENCE_USER_H
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#endif
+
+#define TTM_FENCE_MAJOR 0
+#define TTM_FENCE_MINOR 1
+#define TTM_FENCE_PL    0
+#define TTM_FENCE_DATE  "080819"
+
+/**
+ * struct ttm_fence_signaled_req
+ *
+ * @handle: Handle to the fence object. Input.
+ *
+ * @fence_type: Fence types we want to flush. Input.
+ *
+ * @flush: Boolean. Flush the indicated fence_types. Input.
+ *
+ * Argument to the TTM_FENCE_SIGNALED ioctl.
+ */
+
+struct ttm_fence_signaled_req {
+	uint32_t handle;
+	uint32_t fence_type;
+	int32_t flush;
+	uint32_t pad64;
+};
+
+/**
+ * struct ttm_fence_rep
+ *
+ * @signaled_types: Fence type that has signaled.
+ *
+ * @fence_error: Command execution error.
+ * Hardware errors that are consequences of the execution
+ * of the command stream preceding the fence are reported
+ * here.
+ *
+ * Output argument to the TTM_FENCE_SIGNALED and
+ * TTM_FENCE_FINISH ioctls.
+ */
+
+struct ttm_fence_rep {
+	uint32_t signaled_types;
+	uint32_t fence_error;
+};
+
+union ttm_fence_signaled_arg {
+	struct ttm_fence_signaled_req req;
+	struct ttm_fence_rep rep;
+};
+
+/*
+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
+ *
+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
+ * wait.
+ *
+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
+ * but return -EBUSY if the buffer is busy.
+ */
+
+#define TTM_FENCE_FINISH_MODE_LAZY     (1 << 0)
+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
+
+/**
+ * struct ttm_fence_finish_req
+ *
+ * @handle: Handle to the fence object. Input.
+ *
+ * @fence_type: Fence types we want to finish.
+ *
+ * @mode: Wait mode.
+ *
+ * Input to the TTM_FENCE_FINISH ioctl.
+ */
+
+struct ttm_fence_finish_req {
+	uint32_t handle;
+	uint32_t fence_type;
+	uint32_t mode;
+	uint32_t pad64;
+};
+
+union ttm_fence_finish_arg {
+	struct ttm_fence_finish_req req;
+	struct ttm_fence_rep rep;
+};
+
+/**
+ * struct ttm_fence_unref_arg
+ *
+ * @handle: Handle to the fence object.
+ *
+ * Argument to the TTM_FENCE_UNREF ioctl.
+ */
+
+struct ttm_fence_unref_arg {
+	uint32_t handle;
+	uint32_t pad64;
+};
+
+/*
+ * Ioctl offsets frome extenstion start.
+ */
+
+#define TTM_FENCE_SIGNALED 0x01
+#define TTM_FENCE_FINISH   0x02
+#define TTM_FENCE_UNREF    0x03
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_glue.c b/drivers/external_drivers/intel_media/video/common/psb_ttm_glue.c
new file mode 100644
index 0000000..18aa72a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_glue.c
@@ -0,0 +1,811 @@
+/**************************************************************************
+ * Copyright (c) 2008, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+
+#include <drm/drmP.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#ifndef MERRIFIELD
+#include "pnw_topaz.h"
+#else
+#include "tng_topaz.h"
+#endif
+/*IMG Headers*/
+#include "private_data.h"
+#endif
+#include "psb_video_drv.h"
+#include "psb_ttm_userobj_api.h"
+#include <linux/io.h>
+#include <asm/intel-mid.h>
+#include "psb_msvdx.h"
+
+#ifdef SUPPORT_VSP
+#include "vsp.h"
+#endif
+
+/* IED Clean-up Handling */
+extern uint32_t g_ied_ref;
+extern uint32_t g_ied_force_clean;
+extern struct mutex g_ied_mutex;
+extern int sepapp_drm_playback(bool ied_status);
+static int ied_enabled;
+
+static void ann_rm_workaround_ctx(struct drm_psb_private *dev_priv, uint64_t ctx_type);
+static void ann_add_workaround_ctx(struct drm_psb_private *dev_priv, uint64_t ctx_type);
+
+#ifdef MERRIFIELD
+struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
+{
+	return (struct psb_fpriv *) BCVideoGetPriv(file_priv);
+}
+#endif
+
+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	return ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
+}
+
+int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	return ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
+}
+
+int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	return ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
+}
+
+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
+}
+
+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	return ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
+				      &psb_priv(dev)->ttm_lock, data);
+}
+
+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	return ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
+}
+
+int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	return ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
+
+}
+
+int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	return  ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
+
+}
+
+int psb_pl_create_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	return ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
+				   &dev_priv->bdev, &dev_priv->ttm_lock, data);
+}
+
+int psb_pl_ub_create_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+
+	return ttm_pl_ub_create_ioctl(psb_fpriv(file_priv)->tfile,
+				      &dev_priv->bdev, &dev_priv->ttm_lock, data);
+}
+
+/**
+ * psb_ttm_fault - Wrapper around the ttm fault method.
+ *
+ * @vma: The struct vm_area_struct as in the vm fault() method.
+ * @vmf: The struct vm_fault as in the vm fault() method.
+ *
+ * Since ttm_fault() will reserve buffers while faulting,
+ * we need to take the ttm read lock around it, as this driver
+ * relies on the ttm_lock in write mode to exclude all threads from
+ * reserving and thus validating buffers in aperture- and memory shortage
+ * situations.
+ */
+int psb_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+				       vma->vm_private_data;
+	struct drm_psb_private *dev_priv =
+		container_of(bo->bdev, struct drm_psb_private, bdev);
+	int ret;
+
+	ret = ttm_read_lock(&dev_priv->ttm_lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
+
+	ttm_read_unlock(&dev_priv->ttm_lock);
+	return ret;
+}
+
+/*
+ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
+		      size_t count, loff_t *f_pos)
+{
+	struct drm_file *file_priv = (struct drm_file *)filp->private_data;
+	struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
+
+	return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
+}
+
+ssize_t psb_ttm_read(struct file *filp, char __user *buf,
+		     size_t count, loff_t *f_pos)
+{
+	struct drm_file *file_priv = (struct drm_file *)filp->private_data;
+	struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
+
+	return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
+}
+*/
+
+static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+int psb_ttm_global_init(struct drm_psb_private *dev_priv)
+{
+	struct drm_global_reference *global_ref;
+	struct drm_global_reference *global;
+	int ret;
+
+	global_ref = &dev_priv->mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &psb_ttm_mem_global_init;
+	global_ref->release = &psb_ttm_mem_global_release;
+
+	ret = drm_global_item_ref(global_ref);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed referencing a global TTM memory object.\n");
+		return ret;
+	}
+
+	dev_priv->bo_global_ref.mem_glob = dev_priv->mem_global_ref.object;
+	global = &dev_priv->bo_global_ref.ref;
+	global->global_type = DRM_GLOBAL_TTM_BO;
+	global->size = sizeof(struct ttm_bo_global);
+	global->init = &ttm_bo_global_init;
+	global->release = &ttm_bo_global_release;
+	ret = drm_global_item_ref((struct drm_global_reference *)global);
+	if (ret != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref((struct drm_global_reference *)global_ref);
+		return ret;
+	}
+
+	return 0;
+}
+
+void psb_ttm_global_release(struct drm_psb_private *dev_priv)
+{
+	drm_global_item_unref(&dev_priv->mem_global_ref);
+}
+
+int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_getpageaddrs_arg *arg = data;
+	struct ttm_buffer_object *bo;
+	struct ttm_tt *ttm;
+	struct page **tt_pages;
+	unsigned long i, num_pages;
+	unsigned long *p;
+
+	bo = ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
+				      arg->handle);
+	if (unlikely(bo == NULL)) {
+		printk(KERN_ERR
+		       "Could not find buffer object for getpageaddrs.\n");
+		return -EINVAL;
+	}
+	arg->gtt_offset = bo->offset;
+	ttm = bo->ttm;
+	num_pages = ttm->num_pages;
+	p = kzalloc(num_pages * sizeof(unsigned long), GFP_KERNEL);
+	if (unlikely(p == NULL))
+		return -ENOMEM;
+
+	tt_pages = ttm->pages;
+
+	for (i = 0; i < num_pages; i++)
+		p[i] = (unsigned long)page_to_phys(tt_pages[i]);
+
+	if (copy_to_user((void __user *)((unsigned long)arg->page_addrs),
+			 p, sizeof(unsigned long) * num_pages)) {
+		return -EFAULT;
+	}
+
+	ttm_bo_unref(&bo);
+	kfree(p);
+	return 0;
+}
+
+void psb_remove_videoctx(struct drm_psb_private *dev_priv, struct file *filp)
+{
+	struct psb_video_ctx *pos, *n;
+	struct psb_video_ctx *found_ctx = NULL;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int ctx_type;
+	unsigned long irq_flags;
+
+	/* iterate to query all ctx to if there is DRM running*/
+	ied_enabled = 0;
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if (pos->filp == filp) {
+			found_ctx = pos;
+			list_del(&pos->head);
+		} else {
+			if (pos->ctx_type & VA_RT_FORMAT_PROTECTED)
+				ied_enabled = 1;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	if (found_ctx) {
+		PSB_DEBUG_PM("Video:remove context profile %llu,"
+				  " entrypoint %llu\n",
+				  (found_ctx->ctx_type >> 8) & 0xff,
+				  (found_ctx->ctx_type & 0xff));
+		if (IS_ANN(dev))
+			ann_rm_workaround_ctx(dev_priv, found_ctx->ctx_type);
+#ifndef CONFIG_DRM_VXD_BYT
+		/* if current ctx points to it, set to NULL */
+		if ((VAEntrypointEncSlice ==
+				(found_ctx->ctx_type & 0xff)
+			|| VAEntrypointEncPicture ==
+				(found_ctx->ctx_type & 0xff))
+			&& VAProfileVP8Version0_3 !=
+				((found_ctx->ctx_type >> 8) & 0xff)) {
+#ifdef MERRIFIELD
+			tng_topaz_remove_ctx(dev_priv,
+				found_ctx);
+#else
+			if (dev_priv->topaz_ctx == found_ctx) {
+				pnw_reset_fw_status(dev_priv->dev,
+					PNW_TOPAZ_END_CTX);
+				dev_priv->topaz_ctx = NULL;
+			} else {
+				PSB_DEBUG_PM("Remove a inactive "\
+						"encoding context.\n");
+			}
+#endif
+			if (dev_priv->last_topaz_ctx == found_ctx)
+				dev_priv->last_topaz_ctx = NULL;
+#ifdef SUPPORT_VSP
+		} else if (
+			(VAEntrypointVideoProc ==
+					(found_ctx->ctx_type & 0xff)
+				&& 0xff ==
+					((found_ctx->ctx_type >> 8) & 0xff))
+			|| (VAEntrypointEncSlice ==
+					(found_ctx->ctx_type & 0xff)
+				&& VAProfileVP8Version0_3 ==
+					((found_ctx->ctx_type >> 8) & 0xff))
+			) {
+			ctx_type = found_ctx->ctx_type & 0xff;
+			PSB_DEBUG_PM("Remove vsp context.\n");
+			vsp_rm_context(dev_priv->dev, filp, ctx_type);
+#endif
+		} else
+#endif
+		{
+			mutex_lock(&msvdx_priv->msvdx_mutex);
+			if (msvdx_priv->msvdx_ctx == found_ctx)
+				msvdx_priv->msvdx_ctx = NULL;
+			if (msvdx_priv->last_msvdx_ctx == found_ctx)
+				msvdx_priv->last_msvdx_ctx = NULL;
+			mutex_unlock(&msvdx_priv->msvdx_mutex);
+		}
+
+		kfree(found_ctx);
+		#if (defined CONFIG_GFX_RTPM) && (!defined MERRIFIELD)
+		psb_ospm_post_power_down();
+		#endif
+	}
+}
+
+static struct psb_video_ctx *psb_find_videoctx(struct drm_psb_private *dev_priv,
+						struct file *filp)
+{
+	struct psb_video_ctx *pos, *n;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if (pos->filp == filp) {
+			spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+			return pos;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+	return NULL;
+}
+
+static int psb_entrypoint_number(struct drm_psb_private *dev_priv,
+		uint32_t entry_type)
+{
+	struct psb_video_ctx *pos, *n;
+	int count = 0;
+	unsigned long irq_flags;
+
+	entry_type &= 0xff;
+
+	if (entry_type < VAEntrypointVLD ||
+			entry_type > VAEntrypointEncPicture) {
+		DRM_ERROR("Invalide entrypoint value %d.\n", entry_type);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if (entry_type == (pos->ctx_type & 0xff))
+			count++;
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	PSB_DEBUG_GENERAL("There are %d active entrypoint %d.\n",
+			count, entry_type);
+	return count;
+}
+
+int psb_video_getparam(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_lnc_video_getparam_arg *arg = data;
+	int ret = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	drm_psb_msvdx_frame_info_t *current_frame = NULL;
+	uint32_t handle, i;
+	uint32_t device_info = 0;
+	uint64_t ctx_type = 0;
+	struct psb_video_ctx *video_ctx = NULL;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	unsigned long irq_flags;
+	struct file *filp = file_priv->filp;
+#if (!defined(MERRIFIELD) && !defined(CONFIG_DRM_VXD_BYT))
+	uint32_t imr_info[2];
+#endif
+#ifdef SUPPORT_VSP
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+#endif
+#ifdef CONFIG_VIDEO_MRFLD
+	struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
+	struct psb_msvdx_ec_ctx *ec_ctx = NULL;
+#endif
+
+	switch (arg->key) {
+#if (!defined(MERRIFIELD) && !defined(CONFIG_DRM_VXD_BYT))
+	case LNC_VIDEO_GETPARAM_IMR_INFO:
+		imr_info[0] = dev_priv->imr_region_start;
+		imr_info[1] = dev_priv->imr_region_size;
+		ret = copy_to_user((void __user *)((unsigned long)arg->value),
+				   &imr_info[0],
+				   sizeof(imr_info));
+		break;
+#endif
+
+	case LNC_VIDEO_DEVICE_INFO:
+#ifdef CONFIG_DRM_VXD_BYT
+		device_info = (0xffff & dev->pci_device) << 16;
+#else
+		device_info = 0xffff & dev_priv->video_device_fuse;
+		device_info |= (0xffff & dev->pci_device) << 16;
+#endif
+		ret = copy_to_user((void __user *)((unsigned long)arg->value),
+				   &device_info, sizeof(device_info));
+		break;
+
+	case IMG_VIDEO_NEW_CONTEXT:
+		/* add video decode/encode context */
+		ret = copy_from_user(&ctx_type, (void __user *)((unsigned long)arg->value),
+				     sizeof(ctx_type));
+		if (ret)
+			break;
+
+		video_ctx = kzalloc(sizeof(struct psb_video_ctx), GFP_KERNEL);
+		if (video_ctx == NULL) {
+			ret = -ENOMEM;
+			break;
+		}
+		INIT_LIST_HEAD(&video_ctx->head);
+		video_ctx->ctx_type = ctx_type;
+		video_ctx->cur_sequence = 0xffffffff;
+		if (IS_ANN(dev))
+			ann_add_workaround_ctx(dev_priv, ctx_type);
+#ifdef CONFIG_SLICE_HEADER_PARSING
+		video_ctx->frame_end_seq = 0xffffffff;
+		if (ctx_type & VA_RT_FORMAT_PROTECTED) {
+			video_ctx->slice_extract_flag = 1;
+			video_ctx->frame_boundary = 1;
+			video_ctx->frame_end_seq = 0xffffffff;
+		}
+#endif
+		video_ctx->filp = file_priv->filp;
+		spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+		list_add(&video_ctx->head, &dev_priv->video_ctx);
+		spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+#ifndef CONFIG_DRM_VXD_BYT
+#ifndef MERRIFIELD
+		if (IS_MDFLD(dev_priv->dev) &&
+				(VAEntrypointEncSlice ==
+				 (ctx_type & 0xff)))
+			pnw_reset_fw_status(dev_priv->dev,
+				PNW_TOPAZ_START_CTX);
+#endif
+
+#ifdef SUPPORT_VSP
+		if ((VAEntrypointVideoProc == (ctx_type & 0xff)
+				&& 0xff == ((ctx_type >> 8) & 0xff))
+			|| (VAEntrypointEncSlice == (ctx_type & 0xff)
+				&& VAProfileVP8Version0_3 ==
+					((ctx_type >> 8) & 0xff))) {
+			ret = vsp_new_context(dev, filp, ctx_type & 0xff);
+			if (ret)
+				break;
+
+			if (unlikely(vsp_priv->fw_loaded == 0)) {
+				ret = tng_securefw(dev, "vsp", "VSP", TNG_IMR11L_MSG_REGADDR);
+				if (ret != 0) {
+					DRM_ERROR("VSP: failed to init firmware\n");
+					break;
+				}
+			}
+            break;
+		}
+#endif
+#endif
+		PSB_DEBUG_INIT("Video:add ctx profile 0x%llx, entry 0x%llx.\n",
+					((ctx_type >> 8) & 0xff),
+					(ctx_type & 0xff));
+		PSB_DEBUG_INIT("Video:add context protected 0x%llx.\n",
+					(ctx_type & VA_RT_FORMAT_PROTECTED));
+		if (ctx_type & VA_RT_FORMAT_PROTECTED)
+			ied_enabled = 1;
+		else if ((ctx_type & VAEntrypointVLD) &&
+				(ctx_type & PSB_SURFACE_UNAVAILABLE)) {
+			mutex_lock(&g_ied_mutex);
+			if (g_ied_ref > 0) {
+				DRM_INFO("Video: create context without surface, ied_ref: %d\n", g_ied_ref);
+			}
+			mutex_unlock(&g_ied_mutex);
+		}
+		else {
+			mutex_lock(&g_ied_mutex);
+			DRM_INFO("Video: ied_ref: %d\n", g_ied_ref);
+			while (g_ied_ref) {
+				ret = sepapp_drm_playback(false);
+				if (ret) {
+					DRM_ERROR("IED Clean-up \
+						Failed:0x%x\n", ret);
+					break;
+				}
+				g_ied_ref--;
+			}
+			mutex_unlock(&g_ied_mutex);
+		}
+		break;
+	case IMG_VIDEO_RM_CONTEXT:
+		psb_remove_videoctx(dev_priv, file_priv->filp);
+		break;
+	case IMG_VIDEO_UPDATE_CONTEXT:
+		ret = copy_from_user(&ctx_type,
+				(void __user *)((unsigned long)arg->value),
+				sizeof(ctx_type));
+		if (ret)
+			break;
+		video_ctx = psb_find_videoctx(dev_priv, file_priv->filp);
+		if (video_ctx) {
+			PSB_DEBUG_GENERAL(
+				"Video: update video ctx old value 0x%llx\n",
+				video_ctx->ctx_type);
+			if (video_ctx->ctx_type != ctx_type) {
+#ifdef CONFIG_SLICE_HEADER_PARSING
+				if ((ctx_type & VA_RT_FORMAT_PROTECTED) &&
+					!(video_ctx->ctx_type & VA_RT_FORMAT_PROTECTED)) {
+					video_ctx->slice_extract_flag = 1;
+					video_ctx->frame_boundary = 1;
+					video_ctx->frame_end_seq = 0xffffffff;
+					ied_enabled = 1;
+
+					mutex_lock(&g_ied_mutex);
+					DRM_INFO("Video: ied_ref: %d\n", g_ied_ref);
+					if (g_ied_ref == 0) {
+						ret = sepapp_drm_playback(true);
+						if (ret) {
+							DRM_ERROR("IED enable in update context failed:0x%x\n", ret);
+						}
+						g_ied_ref++;
+					}
+					mutex_unlock(&g_ied_mutex);
+				}
+				if (!(ctx_type & VA_RT_FORMAT_PROTECTED)) {
+					mutex_lock(&g_ied_mutex);
+					DRM_INFO("Video: ied_ref: %d\n", g_ied_ref);
+					while (g_ied_ref) {
+						ret = sepapp_drm_playback(false);
+						if (ret) {
+							DRM_ERROR("IED Clean-up failed:0x%x\n", ret);
+							break;
+						}
+						g_ied_ref--;
+					}
+					mutex_unlock(&g_ied_mutex);
+				}
+#endif
+			}
+			video_ctx->ctx_type = ctx_type;
+			PSB_DEBUG_GENERAL(
+				"Video: update video ctx new value 0x%llx\n",
+				video_ctx->ctx_type);
+		} else
+			PSB_DEBUG_GENERAL(
+				"Video:fail to find context profile 0x%llx, entrypoint 0x%llx",
+				(ctx_type >> 8), (ctx_type & 0xff));
+		break;
+	case IMG_VIDEO_DECODE_STATUS:
+#ifdef CONFIG_VIDEO_MRFLD
+		if (msvdx_priv->host_be_opp_enabled) {
+			/*get the right frame_info struct for current surface*/
+			ret = copy_from_user(&handle,
+					     (void __user *)((unsigned long)arg->arg), 4);
+			if (ret)
+				break;
+
+			for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+				if (msvdx_priv->frame_info[i].handle == handle) {
+					current_frame = &msvdx_priv->frame_info[i];
+					break;
+				}
+			}
+			if (!current_frame) {
+				DRM_ERROR("MSVDX: didn't find frame_info which matched the surface_id. \n");
+				ret = -EFAULT;
+				break;
+			}
+			ret = copy_to_user((void __user *)((unsigned long)arg->value),
+					   &current_frame->fw_status, sizeof(current_frame->fw_status));
+		} else
+#endif
+		{
+			ret = copy_to_user((void __user *)((unsigned long)arg->value),
+					   &msvdx_priv->decoding_err, sizeof(msvdx_priv->decoding_err));
+		}
+		break;
+#ifdef CONFIG_VIDEO_MRFLD
+	case IMG_VIDEO_MB_ERROR:
+		/*get the right frame_info struct for current surface*/
+		ret = copy_from_user(&handle,
+			(void __user *)((unsigned long)arg->arg), 4);
+		if (ret)
+			break;
+
+		PSB_DEBUG_GENERAL(
+			"query surface (handle 0x%08x) decode error\n",
+			handle);
+
+		if (msvdx_priv->msvdx_ec_ctx[0] == NULL) {
+			PSB_DEBUG_GENERAL(
+				"Video: ec contexts are initilized\n");
+			return -EFAULT;
+		}
+
+		for (i = 0; i < PSB_MAX_EC_INSTANCE; i++)
+			if (msvdx_priv->msvdx_ec_ctx[i]->tfile == tfile)
+				ec_ctx = msvdx_priv->msvdx_ec_ctx[i];
+
+		if (!ec_ctx) {
+			PSB_DEBUG_GENERAL(
+				"Video: no ec context found\n");
+			return -EFAULT;
+		}
+
+		if (ec_ctx->cur_frame_info &&
+			ec_ctx->cur_frame_info->handle == handle) {
+			ret = copy_to_user(
+				(void __user *)((unsigned long)arg->value),
+				&(ec_ctx->cur_frame_info->decode_status),
+				sizeof(drm_psb_msvdx_decode_status_t));
+			PSB_DEBUG_GENERAL(
+			"surface is cur_frame, fault region num is %d\n",
+			ec_ctx->cur_frame_info->decode_status.num_region);
+			break;
+		}
+		for (i = 0; i < MAX_DECODE_BUFFERS; i++)
+			if (ec_ctx->frame_info[i].handle == handle) {
+				ret = copy_to_user(
+				(void __user *)((unsigned long)arg->value),
+				&(ec_ctx->frame_info[i].decode_status),
+				sizeof(drm_psb_msvdx_decode_status_t));
+				PSB_DEBUG_GENERAL(
+					"find surface with index %d, \
+					fault region num is %d \n",
+			i, ec_ctx->frame_info[i].decode_status.num_region);
+				break;
+			}
+
+		if (i >= MAX_DECODE_BUFFERS)
+			PSB_DEBUG_GENERAL(
+			    "could not find handle 0x%08x in ctx\n", handle);
+
+		break;
+#endif
+
+	case IMG_VIDEO_SET_DISPLAYING_FRAME:
+		ret = copy_from_user(&msvdx_priv->displaying_frame,
+				(void __user *)((unsigned long)arg->value),
+				sizeof(msvdx_priv->displaying_frame));
+		break;
+	case IMG_VIDEO_GET_DISPLAYING_FRAME:
+		ret = copy_to_user((void __user *)((unsigned long)arg->value),
+				&msvdx_priv->displaying_frame,
+				sizeof(msvdx_priv->displaying_frame));
+		break;
+
+#ifndef CONFIG_DRM_VXD_BYT
+	case IMG_VIDEO_GET_HDMI_STATE:
+		ret = copy_to_user((void __user *)((unsigned long)arg->value),
+				&hdmi_state,
+				sizeof(hdmi_state));
+		break;
+	case IMG_VIDEO_SET_HDMI_STATE:
+		if (!hdmi_state) {
+			PSB_DEBUG_ENTRY(
+				"wait 100ms for kernel hdmi pipe ready.\n");
+			msleep(100);
+		}
+		if (dev_priv->bhdmiconnected)
+			hdmi_state = (int)arg->value;
+		else
+			PSB_DEBUG_ENTRY(
+				"skip hdmi_state setting, for unplugged.\n");
+
+		PSB_DEBUG_ENTRY("%s, set hdmi_state = %d\n",
+				 __func__, hdmi_state);
+		break;
+#endif
+	case PNW_VIDEO_QUERY_ENTRY:
+		ret = copy_from_user(&handle,
+				(void __user *)((unsigned long)arg->arg),
+				sizeof(handle));
+		if (ret)
+			break;
+		/*Return the number of active entries*/
+		i = psb_entrypoint_number(dev_priv, handle);
+		ret = copy_to_user((void __user *)
+				((unsigned long)arg->value),
+				&i, sizeof(i));
+		break;
+#if (!defined(MERRIFIELD) && !defined(CONFIG_DRM_VXD_BYT))
+	case IMG_VIDEO_IED_STATE:
+		if (IS_MDFLD(dev)) {
+			int enabled = dev_priv->ied_enabled ? 1 : 0;
+			ret = copy_to_user((void __user *)
+				((unsigned long)arg->value),
+				&enabled, sizeof(enabled));
+		} else {
+			DRM_ERROR("IMG_VIDEO_IED_EANBLE error.\n");
+			return -EFAULT;
+		}
+		break;
+#endif
+	default:
+		ret = -EFAULT;
+		break;
+	}
+
+	if (ret) {
+		DRM_ERROR("%s: failed to call sub-ioctl 0x%llu",
+			__func__, arg->key);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* SLC bug: there is green corruption for vc1 decode if width is not 64 aligned
+ * Recode the number of VC1 ctx whose width is not 64 aligned
+ */
+static void ann_add_workaround_ctx(struct drm_psb_private *dev_priv, uint64_t ctx_type)
+{
+
+	struct msvdx_private *msvdx_priv;
+	int profile = (ctx_type >> 8) & 0xff;
+
+	if (profile != VAProfileVC1Simple &&
+	    profile != VAProfileVC1Main &&
+	    profile != VAProfileVC1Advanced)
+		return;
+
+	if (unlikely(!dev_priv))
+		return;
+
+	msvdx_priv = dev_priv->msvdx_private;
+	if (unlikely(!msvdx_priv))
+		return;
+
+	PSB_DEBUG_GENERAL(
+	    "add vc1 ctx, ctx_type is 0x%llx\n",
+		ctx_type);
+
+	/* ctx_type >> 32 is width_in_mb */
+	if ((ctx_type >> 32) % 4) {
+		atomic_inc(&msvdx_priv->vc1_workaround_ctx);
+		PSB_DEBUG_GENERAL("add workaround ctx %p in ctx\n", msvdx_priv);
+	}
+}
+
+static void ann_rm_workaround_ctx(struct drm_psb_private *dev_priv, uint64_t ctx_type)
+{
+
+	struct msvdx_private *msvdx_priv;
+	int profile = (ctx_type >> 8) & 0xff;
+
+	if (profile != VAProfileVC1Simple &&
+	    profile != VAProfileVC1Main &&
+	    profile != VAProfileVC1Advanced)
+		return;
+
+	if (unlikely(!dev_priv))
+		return;
+
+	msvdx_priv = dev_priv->msvdx_private;
+	if (unlikely(!msvdx_priv))
+		return;
+
+	PSB_DEBUG_GENERAL(
+	    "rm vc1 ctx, ctx_type is 0x%llx\n",
+		ctx_type);
+	/* ctx_type >> 32 is width_in_mb */
+	if ((ctx_type >> 32) % 4) {
+		atomic_dec(&msvdx_priv->vc1_workaround_ctx);
+		PSB_DEBUG_GENERAL("dec workaround ctx %p in ctx\n", msvdx_priv);
+	}
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_placement_user.c b/drivers/external_drivers/intel_media/video/common/psb_ttm_placement_user.c
new file mode 100644
index 0000000..bf5a5ef
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_placement_user.c
@@ -0,0 +1,951 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "psb_ttm_placement_user.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "psb_ttm_userobj_api.h"
+#include "ttm/ttm_lock.h"
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+#include "drmP.h"
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#include "drm.h"
+#else
+#include <uapi/drm/drm.h>
+#endif
+#ifdef CONFIG_ION
+#include "psb_drv.h"
+#include "psb_msvdx.h"
+#endif
+
+struct ttm_bo_user_object {
+	struct ttm_base_object base;
+	struct ttm_buffer_object bo;
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static size_t pl_bo_size;
+#endif
+
+static uint32_t psb_busy_prios[] = {
+	TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED,
+	TTM_PL_FLAG_PRIV0, /* CI */
+	TTM_PL_FLAG_PRIV2, /* IMR */
+	TTM_PL_FLAG_PRIV1, /* DRM_PSB_MEM_MMU */
+	TTM_PL_FLAG_SYSTEM
+};
+
+const struct ttm_placement default_placement = {0, 0, 0, NULL, 5, psb_busy_prios};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
+{
+	size_t page_array_size =
+		(num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+
+	if (unlikely(pl_bo_size == 0)) {
+		pl_bo_size = bdev->glob->ttm_bo_extra_size +
+			     ttm_round_pot(sizeof(struct ttm_bo_user_object));
+	}
+
+	return bdev->glob->ttm_bo_size + 2 * page_array_size;
+}
+#endif
+
+static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
+		*tfile, uint32_t handle) {
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL)) {
+		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+		       (unsigned long)handle);
+		return NULL;
+	}
+
+	if (unlikely(base->object_type != ttm_buffer_type)) {
+		ttm_base_object_unref(&base);
+		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+		       (unsigned long)handle);
+		return NULL;
+	}
+
+	return container_of(base, struct ttm_bo_user_object, base);
+}
+
+struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
+		*tfile, uint32_t handle) {
+	struct ttm_bo_user_object *user_bo;
+	struct ttm_base_object *base;
+
+	user_bo = ttm_bo_user_lookup(tfile, handle);
+	if (unlikely(user_bo == NULL))
+		return NULL;
+
+	(void)ttm_bo_reference(&user_bo->bo);
+	base = &user_bo->base;
+	ttm_base_object_unref(&base);
+	return &user_bo->bo;
+}
+
+static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_user_object *user_bo =
+		container_of(bo, struct ttm_bo_user_object, bo);
+
+	ttm_mem_global_free(bo->glob->mem_glob, bo->acc_size);
+	kfree(user_bo);
+}
+
+#ifndef CONFIG_ION
+/* This is used for sg_table which is derived from user-pointer */
+static void ttm_tt_free_user_pages(struct ttm_buffer_object *bo)
+{
+	struct page *page;
+	struct page **pages = NULL;
+	int i, ret;
+/*
+	struct page **pages_to_wb;
+
+	pages_to_wb = kmalloc(ttm->num_pages * sizeof(struct page *),
+			GFP_KERNEL);
+
+	if (pages_to_wb && ttm->caching_state != tt_cached) {
+		int num_pages_wb = 0;
+
+		for (i = 0; i < ttm->num_pages; ++i) {
+			page = ttm->pages[i];
+			if (page == NULL)
+				continue;
+			pages_to_wb[num_pages_wb++] = page;
+		}
+
+		if (set_pages_array_wb(pages_to_wb, num_pages_wb))
+			printk(KERN_ERR TTM_PFX "Failed to set pages to wb\n");
+
+	} else if (NULL == pages_to_wb) {
+		printk(KERN_ERR TTM_PFX
+		       "Failed to allocate memory for set wb operation.\n");
+	}
+
+*/
+	pages = kzalloc(bo->num_pages * sizeof(struct page *), GFP_KERNEL);
+	if (unlikely(pages == NULL)) {
+		printk(KERN_ERR "TTM bo free: kzalloc failed\n");
+		return ;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(bo->sg, pages,
+						 NULL, bo->num_pages);
+	if (ret) {
+		printk(KERN_ERR "sg to pages: kzalloc failed\n");
+		return ;
+	}
+
+	for (i = 0; i < bo->num_pages; ++i) {
+		page = pages[i];
+		if (page == NULL)
+			continue;
+
+		put_page(page);
+	}
+	/* kfree(pages_to_wb); */
+	kfree(pages);
+}
+#endif
+
+/* This is used for sg_table which is derived from user-pointer */
+static void ttm_ub_bo_user_destroy(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_user_object *user_bo =
+		container_of(bo, struct ttm_bo_user_object, bo);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
+#ifdef CONFIG_ION
+	struct list_head *list, *next;
+	struct psb_ion_buffer *pIonBuf;
+	struct drm_psb_private *dev_priv;
+	struct msvdx_private *msvdx_priv;
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	dev_priv = container_of(bdev, struct drm_psb_private, bdev);
+	if (dev_priv == NULL)
+		printk(KERN_ERR "failed to get valid dev_priv\n");
+	msvdx_priv = dev_priv->msvdx_private;
+	if (msvdx_priv == NULL)
+		printk(KERN_ERR "failed to get valid msvdx_priv\n");
+
+	list_for_each_safe(list, next, &msvdx_priv->ion_buffers_list) {
+		pIonBuf = list_entry(list, struct psb_ion_buffer, head);
+		if (pIonBuf->sg == bo->sg) {
+			list_del(&pIonBuf->head);
+			dma_buf_unmap_attachment(pIonBuf->psAttachment,
+						pIonBuf->sg, DMA_NONE);
+			dma_buf_detach(pIonBuf->psDmaBuf,
+					pIonBuf->psAttachment);
+			dma_buf_put(pIonBuf->psDmaBuf);
+			kfree(pIonBuf);
+			pIonBuf = NULL;
+			break;
+		}
+	}
+	bo->sg = NULL;
+#else
+	if (bo->sg) {
+		ttm_tt_free_user_pages(bo);
+		sg_free_table(bo->sg);
+		kfree(bo->sg);
+		bo->sg = NULL;
+	}
+#endif
+#endif
+
+	ttm_mem_global_free(bo->glob->mem_glob, bo->acc_size);
+	kfree(user_bo);
+}
+
+static void ttm_bo_user_release(struct ttm_base_object **p_base)
+{
+	struct ttm_bo_user_object *user_bo;
+	struct ttm_base_object *base = *p_base;
+	struct ttm_buffer_object *bo;
+
+	*p_base = NULL;
+
+	if (unlikely(base == NULL))
+		return;
+
+	user_bo = container_of(base, struct ttm_bo_user_object, base);
+	bo = &user_bo->bo;
+	ttm_bo_unref(&bo);
+}
+
+static void ttm_bo_user_ref_release(struct ttm_base_object *base,
+				    enum ttm_ref_type ref_type)
+{
+	struct ttm_bo_user_object *user_bo =
+		container_of(base, struct ttm_bo_user_object, base);
+	struct ttm_buffer_object *bo = &user_bo->bo;
+
+	switch (ref_type) {
+	case TTM_REF_SYNCCPU_WRITE:
+		ttm_bo_synccpu_write_release(bo);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
+			    struct ttm_pl_rep *rep)
+{
+	struct ttm_bo_user_object *user_bo =
+		container_of(bo, struct ttm_bo_user_object, bo);
+
+	rep->gpu_offset = bo->offset;
+	rep->bo_size = bo->num_pages << PAGE_SHIFT;
+	rep->map_handle = bo->addr_space_offset;
+	rep->placement = bo->mem.placement;
+	rep->handle = user_bo->base.hash.key;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	rep->sync_object_arg = (uint32_t)(unsigned long)bo->sync_obj_arg;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+/* FIXME Copy from upstream TTM */
+static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
+				 unsigned long num_pages)
+{
+	size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
+				 PAGE_MASK;
+
+	return glob->ttm_bo_size + 2 * page_array_size;
+}
+#endif /* if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+/* FIXME Copy from upstream TTM "ttm_bo_create", upstream TTM does not export this, so copy it here */
+static int ttm_bo_create_private(struct ttm_bo_device *bdev,
+				 unsigned long size,
+				 enum ttm_bo_type type,
+				 struct ttm_placement *placement,
+				 uint32_t page_alignment,
+				 unsigned long buffer_start,
+				 bool interruptible,
+				 struct file *persistent_swap_storage,
+				 struct ttm_buffer_object **p_bo)
+{
+	struct ttm_buffer_object *bo;
+	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+	int ret;
+
+	size_t acc_size =
+		ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+
+	if (unlikely(bo == NULL)) {
+		ttm_mem_global_free(mem_glob, acc_size);
+		return -ENOMEM;
+	}
+
+	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+			  buffer_start, interruptible,
+			  persistent_swap_storage, acc_size, NULL);
+	if (likely(ret == 0))
+		*p_bo = bo;
+
+	return ret;
+}
+#endif /* if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) */
+
+int psb_ttm_bo_check_placement(struct ttm_buffer_object *bo,
+			       struct ttm_placement *placement)
+{
+	int i;
+
+	for (i = 0; i < placement->num_placement; i++) {
+		if (!capable(CAP_SYS_ADMIN)) {
+			if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
+				printk(KERN_ERR TTM_PFX "Need to be root to "
+				       "modify NO_EVICT status.\n");
+				return -EINVAL;
+			}
+		}
+	}
+	for (i = 0; i < placement->num_busy_placement; i++) {
+		if (!capable(CAP_SYS_ADMIN)) {
+			if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
+				printk(KERN_ERR TTM_PFX "Need to be root to "
+				       "modify NO_EVICT status.\n");
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+			     unsigned long size,
+			     enum ttm_bo_type type,
+			     uint32_t flags,
+			     uint32_t page_alignment,
+			     unsigned long buffer_start,
+			     bool interruptible,
+			     struct file *persistent_swap_storage,
+			     struct ttm_buffer_object **p_bo)
+{
+	struct ttm_placement placement = default_placement;
+	int ret;
+
+	if ((flags & TTM_PL_MASK_CACHING) == 0)
+		flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+
+	placement.num_placement = 1;
+	placement.placement = &flags;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	ret = ttm_bo_create_private(bdev, size, type, &placement,
+		page_alignment, buffer_start, interruptible,
+		persistent_swap_storage, p_bo);
+#else
+	ret = ttm_bo_create(bdev, size, type, &placement, page_alignment,
+		buffer_start, interruptible, persistent_swap_storage, p_bo);
+#endif
+
+	return ret;
+}
+#else
+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+			     unsigned long size,
+			     enum ttm_bo_type type,
+			     uint32_t flags,
+			     uint32_t page_alignment,
+			     bool interruptible,
+			     struct file *persistent_swap_storage,
+			     struct ttm_buffer_object **p_bo)
+{
+	struct ttm_placement placement = default_placement;
+	int ret;
+
+	if ((flags & TTM_PL_MASK_CACHING) == 0)
+		flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+
+	placement.num_placement = 1;
+	placement.placement = &flags;
+
+	ret = ttm_bo_create(bdev, size, type, &placement, page_alignment,
+		interruptible, persistent_swap_storage, p_bo);
+
+	return ret;
+}
+#endif
+
+
+int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
+			struct ttm_bo_device *bdev,
+			struct ttm_lock *lock, void *data)
+{
+	union ttm_pl_create_arg *arg = data;
+	struct ttm_pl_create_req *req = &arg->req;
+	struct ttm_pl_rep *rep = &arg->rep;
+	struct ttm_buffer_object *bo;
+	struct ttm_buffer_object *tmp;
+	struct ttm_bo_user_object *user_bo;
+	uint32_t flags;
+	int ret = 0;
+	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+	struct ttm_placement placement = default_placement;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
+	size_t acc_size =
+		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+#else
+	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
+		sizeof(struct ttm_buffer_object));
+#endif
+	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	flags = req->placement;
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(user_bo == NULL)) {
+		ttm_mem_global_free(mem_glob, acc_size);
+		return -ENOMEM;
+	}
+
+	bo = &user_bo->bo;
+	ret = ttm_read_lock(lock, true);
+	if (unlikely(ret != 0)) {
+		ttm_mem_global_free(mem_glob, acc_size);
+		kfree(user_bo);
+		return ret;
+	}
+
+	placement.num_placement = 1;
+	placement.placement = &flags;
+
+	if ((flags & TTM_PL_MASK_CACHING) == 0)
+		flags |=  TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_bo_init(bdev, bo, req->size,
+			  ttm_bo_type_device, &placement,
+			  req->page_alignment, 0, true,
+			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
+#else
+	ret = ttm_bo_init(bdev, bo, req->size,
+			  ttm_bo_type_device, &placement,
+			  req->page_alignment, true,
+			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
+#endif
+	ttm_read_unlock(lock);
+	/*
+	 * Note that the ttm_buffer_object_init function
+	 * would've called the destroy function on failure!!
+	 */
+
+	if (unlikely(ret != 0))
+		goto out;
+
+	tmp = ttm_bo_reference(bo);
+	ret = ttm_base_object_init(tfile, &user_bo->base,
+				   flags & TTM_PL_FLAG_SHARED,
+				   ttm_buffer_type,
+				   &ttm_bo_user_release,
+				   &ttm_bo_user_ref_release);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = ttm_bo_reserve(bo, true, false, false, 0);
+	if (unlikely(ret != 0))
+		goto out_err;
+	ttm_pl_fill_rep(bo, rep);
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+out:
+	return 0;
+out_err:
+	ttm_bo_unref(&tmp);
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
+			   struct ttm_bo_device *bdev,
+			   struct ttm_lock *lock, void *data)
+{
+	union ttm_pl_create_ub_arg *arg = data;
+	struct ttm_pl_create_ub_req *req = &arg->req;
+	struct ttm_pl_rep *rep = &arg->rep;
+	struct ttm_buffer_object *bo;
+	struct ttm_buffer_object *tmp;
+	struct ttm_bo_user_object *user_bo;
+#ifdef CONFIG_ION
+	struct drm_psb_private *dev_priv;
+	struct msvdx_private *msvdx_priv;
+	struct dma_buf *psDmaBuf;
+	struct dma_buf_attachment *psAttachment;
+	int32_t fd = req->fd;
+	struct psb_ion_buffer *psIonBuf;
+#endif
+	uint32_t flags;
+	int ret = 0;
+	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+	struct ttm_placement placement = default_placement;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
+	size_t acc_size =
+		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+#else
+	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
+		sizeof(struct ttm_buffer_object));
+	/* Handle frame buffer allocated in user space, Convert
+	   user space virtual address into pages list */
+	unsigned int page_nr = 0;
+	struct vm_area_struct *vma = NULL;
+	struct sg_table *sg = NULL;
+	unsigned long num_pages = 0;
+	struct page **pages = 0;
+	unsigned long before_flags;
+#endif
+
+	if (req->user_address & ~PAGE_MASK) {
+		printk(KERN_ERR "User pointer buffer need page alignment\n");
+		return -EFAULT;
+	}
+
+	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	flags = req->placement;
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(user_bo == NULL)) {
+		ttm_mem_global_free(mem_glob, acc_size);
+		return -ENOMEM;
+	}
+	ret = ttm_read_lock(lock, true);
+	if (unlikely(ret != 0)) {
+		ttm_mem_global_free(mem_glob, acc_size);
+		kfree(user_bo);
+		return ret;
+	}
+	bo = &user_bo->bo;
+
+	placement.num_placement = 1;
+	placement.placement = &flags;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+#ifdef CONFIG_ION
+	if (fd >= 0) {
+		dev_priv = container_of(bdev, struct drm_psb_private, bdev);
+		if ((dev_priv == NULL) && (dev_priv->dev == NULL)) {
+			printk(KERN_ERR "failed to get dev_priv\n");
+			return -ENOMEM;
+		}
+
+		msvdx_priv = dev_priv->msvdx_private;
+		if (msvdx_priv == NULL) {
+			printk(KERN_ERR "failed to get msvdx_priv\n");
+			return -ENOMEM;
+		}
+		psDmaBuf = dma_buf_get(fd);
+		if (unlikely(psDmaBuf == NULL)) {
+			printk(KERN_ERR "failed to get DMA_BUF from Fd\n");
+			return -ENOMEM;
+		}
+		psAttachment = dma_buf_attach(psDmaBuf, dev_priv->dev->dev);
+		if (unlikely(psAttachment == NULL)) {
+			printk(KERN_ERR "failed to get attachment from dma_buf\n");
+			return -ENOMEM;
+		}
+		sg = dma_buf_map_attachment(psAttachment, DMA_NONE);
+		if (unlikely(sg == NULL)) {
+			printk(KERN_ERR "failed to get sg from DMA_BUF\n");
+			return -ENOMEM;
+		}
+
+		psIonBuf = kzalloc(sizeof(struct psb_ion_buffer), GFP_KERNEL);
+		psIonBuf->psDmaBuf = psDmaBuf;
+		psIonBuf->psAttachment = psAttachment;
+		psIonBuf->fd = fd;
+		psIonBuf->sg = sg;
+
+		mutex_lock(&msvdx_priv->ion_buf_list_lock);
+		list_add_tail(&psIonBuf->head, &msvdx_priv->ion_buffers_list);
+		mutex_unlock(&msvdx_priv->ion_buf_list_lock);
+	}
+	else {
+#endif
+
+	num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
+	if (unlikely(pages == NULL)) {
+		printk(KERN_ERR "kzalloc pages failed\n");
+		return -ENOMEM;
+	}
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, req->user_address);
+	if (unlikely(vma == NULL)) {
+		up_read(&current->mm->mmap_sem);
+		kfree(pages);
+		printk(KERN_ERR "find_vma failed\n");
+		return -EFAULT;
+	}
+	before_flags = vma->vm_flags;
+	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+		vma->vm_flags = vma->vm_flags &
+			((~VM_IO) & (~VM_PFNMAP));
+	page_nr = get_user_pages(current, current->mm,
+				 req->user_address,
+				 (int)(num_pages), 1, 0, pages,
+				 NULL);
+	vma->vm_flags = before_flags;
+	up_read(&current->mm->mmap_sem);
+
+	/* can be written by caller, not forced */
+	if (unlikely(page_nr < num_pages)) {
+		kfree(pages);
+		pages = 0;
+		printk(KERN_ERR "get_user_pages err.\n");
+		return -ENOMEM;
+	}
+	sg = drm_prime_pages_to_sg(pages, num_pages);
+	if (unlikely(sg == NULL)) {
+		kfree(pages);
+		printk(KERN_ERR "drm_prime_pages_to_sg err.\n");
+		return -ENOMEM;
+	}
+	kfree(pages);
+      }
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
+	ret = ttm_bo_init(bdev,
+			  bo,
+			  req->size,
+			  ttm_bo_type_user,
+			  &placement,
+			  req->page_alignment,
+			  req->user_address,
+			  true,
+			  NULL,
+			  acc_size,
+			  NULL,
+			  &ttm_bo_user_destroy);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_bo_init(bdev,
+			  bo,
+			  req->size,
+			  ttm_bo_type_sg,
+			  &placement,
+			  req->page_alignment,
+			  req->user_address,
+			  true,
+			  NULL,
+			  acc_size,
+			  sg,
+			  &ttm_ub_bo_user_destroy);
+#else
+	ret = ttm_bo_init(bdev,
+			  bo,
+			  req->size,
+			  ttm_bo_type_sg,
+			  &placement,
+			  req->page_alignment,
+			  true,
+			  NULL,
+			  acc_size,
+			  sg,
+			  &ttm_ub_bo_user_destroy);
+#endif
+
+	/*
+	 * Note that the ttm_buffer_object_init function
+	 * would've called the destroy function on failure!!
+	 */
+	ttm_read_unlock(lock);
+	if (unlikely(ret != 0))
+		goto out;
+
+	tmp = ttm_bo_reference(bo);
+	ret = ttm_base_object_init(tfile, &user_bo->base,
+				   flags & TTM_PL_FLAG_SHARED,
+				   ttm_buffer_type,
+				   &ttm_bo_user_release,
+				   &ttm_bo_user_ref_release);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = ttm_bo_reserve(bo, true, false, false, 0);
+	if (unlikely(ret != 0))
+		goto out_err;
+	ttm_pl_fill_rep(bo, rep);
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+out:
+	return 0;
+out_err:
+	ttm_bo_unref(&tmp);
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	union ttm_pl_reference_arg *arg = data;
+	struct ttm_pl_rep *rep = &arg->rep;
+	struct ttm_bo_user_object *user_bo;
+	struct ttm_buffer_object *bo;
+	struct ttm_base_object *base;
+	int ret;
+
+	user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
+	if (unlikely(user_bo == NULL)) {
+		printk(KERN_ERR "Could not reference buffer object.\n");
+		return -EINVAL;
+	}
+
+	bo = &user_bo->bo;
+	ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0)) {
+		printk(KERN_ERR
+		       "Could not add a reference to buffer object.\n");
+		goto out;
+	}
+
+	ret = ttm_bo_reserve(bo, true, false, false, 0);
+	if (unlikely(ret != 0))
+		goto out;
+	ttm_pl_fill_rep(bo, rep);
+	ttm_bo_unreserve(bo);
+
+out:
+	base = &user_bo->base;
+	ttm_base_object_unref(&base);
+	return ret;
+}
+
+int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	struct ttm_pl_reference_req *arg = data;
+
+	return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
+}
+
+int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	struct ttm_pl_synccpu_arg *arg = data;
+	struct ttm_bo_user_object *user_bo;
+	struct ttm_buffer_object *bo;
+	struct ttm_base_object *base;
+	bool existed;
+	int ret;
+
+	switch (arg->op) {
+	case TTM_PL_SYNCCPU_OP_GRAB:
+		user_bo = ttm_bo_user_lookup(tfile, arg->handle);
+		if (unlikely(user_bo == NULL)) {
+			printk(KERN_ERR
+			       "Could not find buffer object for synccpu.\n");
+			return -EINVAL;
+		}
+		bo = &user_bo->bo;
+		base = &user_bo->base;
+		ret = ttm_bo_synccpu_write_grab(bo,
+						arg->access_mode &
+						TTM_PL_SYNCCPU_MODE_NO_BLOCK);
+		if (unlikely(ret != 0)) {
+			ttm_base_object_unref(&base);
+			goto out;
+		}
+		ret = ttm_ref_object_add(tfile, &user_bo->base,
+					 TTM_REF_SYNCCPU_WRITE, &existed);
+		if (existed || ret != 0)
+			ttm_bo_synccpu_write_release(bo);
+		ttm_base_object_unref(&base);
+		break;
+	case TTM_PL_SYNCCPU_OP_RELEASE:
+		ret = ttm_ref_object_base_unref(tfile, arg->handle,
+						TTM_REF_SYNCCPU_WRITE);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+out:
+	return ret;
+}
+
+int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
+			   struct ttm_lock *lock, void *data)
+{
+	union ttm_pl_setstatus_arg *arg = data;
+	struct ttm_pl_setstatus_req *req = &arg->req;
+	struct ttm_pl_rep *rep = &arg->rep;
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_device *bdev;
+	struct ttm_placement placement = default_placement;
+	uint32_t flags[2];
+	int ret;
+
+	bo = ttm_buffer_object_lookup(tfile, req->handle);
+	if (unlikely(bo == NULL)) {
+		printk(KERN_ERR
+		       "Could not find buffer object for setstatus.\n");
+		return -EINVAL;
+	}
+
+	bdev = bo->bdev;
+
+	ret = ttm_read_lock(lock, true);
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+	ret = ttm_bo_reserve(bo, true, false, false, 0);
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_bo_wait_cpu(bo, false);
+	if (unlikely(ret != 0))
+		goto out_err2;
+#endif
+
+	flags[0] = req->set_placement;
+	flags[1] = req->clr_placement;
+
+	placement.num_placement = 2;
+	placement.placement = flags;
+
+	/* spin_lock(&bo->lock); */ /* Already get reserve lock */
+
+	ret = psb_ttm_bo_check_placement(bo, &placement);
+	if (unlikely(ret != 0))
+		goto out_err2;
+
+	placement.num_placement = 1;
+	flags[0] = (req->set_placement | bo->mem.placement) & ~req->clr_placement;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_bo_validate(bo, &placement, true, false, false);
+#else
+	ret = ttm_bo_validate(bo, &placement, true, false);
+#endif
+	if (unlikely(ret != 0))
+		goto out_err2;
+
+	ttm_pl_fill_rep(bo, rep);
+out_err2:
+	/* spin_unlock(&bo->lock); */
+	ttm_bo_unreserve(bo);
+out_err1:
+	ttm_read_unlock(lock);
+out_err0:
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+static int psb_ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
+					bool no_wait)
+{
+	int ret;
+
+	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+		if (no_wait)
+			return -EBUSY;
+		else if (interruptible) {
+			ret = wait_event_interruptible
+			      (bo->event_queue, atomic_read(&bo->reserved) == 0);
+			if (unlikely(ret != 0))
+				return -ERESTART;
+		} else {
+			wait_event(bo->event_queue,
+				   atomic_read(&bo->reserved) == 0);
+		}
+	}
+	return 0;
+}
+
+static void psb_ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
+{
+	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
+}
+
+int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
+{
+	struct ttm_pl_waitidle_arg *arg = data;
+	struct ttm_buffer_object *bo;
+	int ret;
+
+	bo = ttm_buffer_object_lookup(tfile, arg->handle);
+	if (unlikely(bo == NULL)) {
+		printk(KERN_ERR "Could not find buffer object for waitidle.\n");
+		return -EINVAL;
+	}
+
+	ret =
+		psb_ttm_bo_block_reservation(bo, true,
+					     arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
+	if (unlikely(ret != 0))
+		goto out;
+	spin_lock(&bo->bdev->fence_lock);
+	ret = ttm_bo_wait(bo,
+			  arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
+			  true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
+	spin_unlock(&bo->bdev->fence_lock);
+	psb_ttm_bo_unblock_reservation(bo);
+out:
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+int ttm_pl_verify_access(struct ttm_buffer_object *bo,
+			 struct ttm_object_file *tfile)
+{
+	struct ttm_bo_user_object *ubo;
+
+	/*
+	 * Check bo subclass.
+	 */
+
+	if (unlikely(bo->destroy != &ttm_bo_user_destroy
+		&& bo->destroy != &ttm_ub_bo_user_destroy))
+		return -EPERM;
+
+	ubo = container_of(bo, struct ttm_bo_user_object, bo);
+	if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
+		return 0;
+
+	return -EPERM;
+}
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_placement_user.h b/drivers/external_drivers/intel_media/video/common/psb_ttm_placement_user.h
new file mode 100644
index 0000000..2d9df69
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_placement_user.h
@@ -0,0 +1,253 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _TTM_PLACEMENT_USER_H_
+#define _TTM_PLACEMENT_USER_H_
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/kernel.h>
+#endif
+
+#include "ttm/ttm_placement.h"
+
+#define TTM_PLACEMENT_MAJOR 0
+#define TTM_PLACEMENT_MINOR 1
+#define TTM_PLACEMENT_PL    0
+#define TTM_PLACEMENT_DATE  "080819"
+
+/**
+ * struct ttm_pl_create_req
+ *
+ * @size: The buffer object size.
+ * @placement: Flags that indicate initial acceptable
+ *  placement.
+ * @page_alignment: Required alignment in pages.
+ *
+ * Input to the TTM_BO_CREATE ioctl.
+ */
+
+struct ttm_pl_create_req {
+	uint64_t size;
+	uint32_t placement;
+	uint32_t page_alignment;
+};
+
+/**
+ * struct ttm_pl_create_ub_req
+ *
+ * @size: The buffer object size.
+ * @user_address: User-space address of the memory area that
+ * should be used to back the buffer object cast to 64-bit.
+ * @placement: Flags that indicate initial acceptable
+ *  placement.
+ * @page_alignment: Required alignment in pages.
+ *
+ * Input to the TTM_BO_CREATE_UB ioctl.
+ */
+
+struct ttm_pl_create_ub_req {
+	uint64_t size;
+	uint64_t user_address;
+	int32_t  fd;
+	uint32_t placement;
+	uint32_t page_alignment;
+};
+
+/**
+ * struct ttm_pl_rep
+ *
+ * @gpu_offset: The current offset into the memory region used.
+ * This can be used directly by the GPU if there are no
+ * additional GPU mapping procedures used by the driver.
+ *
+ * @bo_size: Actual buffer object size.
+ *
+ * @map_handle: Offset into the device address space.
+ * Used for map, seek, read, write. This will never change
+ * during the lifetime of an object.
+ *
+ * @placement: Flag indicating the placement status of
+ * the buffer object using the TTM_PL flags above.
+ *
+ * @sync_object_arg: Used for user-space synchronization and
+ * depends on the synchronization model used. If fences are
+ * used, this is the buffer_object::fence_type_mask
+ *
+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
+ * TTM_PL_SETSTATUS ioctls.
+ */
+
+struct ttm_pl_rep {
+	uint64_t gpu_offset;
+	uint64_t bo_size;
+	uint64_t map_handle;
+	uint32_t placement;
+	uint32_t handle;
+	uint32_t sync_object_arg;
+	uint32_t pad64;
+};
+
+/**
+ * struct ttm_pl_setstatus_req
+ *
+ * @set_placement: Placement flags to set.
+ *
+ * @clr_placement: Placement flags to clear.
+ *
+ * @handle: The object handle
+ *
+ * Input to the TTM_PL_SETSTATUS ioctl.
+ */
+
+struct ttm_pl_setstatus_req {
+	uint32_t set_placement;
+	uint32_t clr_placement;
+	uint32_t handle;
+	uint32_t pad64;
+};
+
+/**
+ * struct ttm_pl_reference_req
+ *
+ * @handle: The object to put a reference on.
+ *
+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
+ */
+
+struct ttm_pl_reference_req {
+	uint32_t handle;
+	uint32_t pad64;
+};
+
+/*
+ * ACCESS mode flags for SYNCCPU.
+ *
+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
+ * writing to the buffer.
+ *
+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
+ * accessing the buffer.
+ *
+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
+ * for GPU accesses to finish but return -EBUSY.
+ *
+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
+ * memory while synchronized for CPU.
+ */
+
+#define TTM_PL_SYNCCPU_MODE_READ      TTM_ACCESS_READ
+#define TTM_PL_SYNCCPU_MODE_WRITE     TTM_ACCESS_WRITE
+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK  (1 << 2)
+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
+
+/**
+ * struct ttm_pl_synccpu_arg
+ *
+ * @handle: The object to synchronize.
+ *
+ * @access_mode: access mode indicated by the
+ * TTM_SYNCCPU_MODE flags.
+ *
+ * @op: indicates whether to grab or release the
+ * buffer for cpu usage.
+ *
+ * Input to the TTM_PL_SYNCCPU ioctl.
+ */
+
+struct ttm_pl_synccpu_arg {
+	uint32_t handle;
+	uint32_t access_mode;
+	enum {
+		TTM_PL_SYNCCPU_OP_GRAB,
+		TTM_PL_SYNCCPU_OP_RELEASE
+	} op;
+	uint32_t pad64;
+};
+
+/*
+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
+ *
+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
+ * wait.
+ *
+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
+ * but return -EBUSY if the buffer is busy.
+ */
+
+#define TTM_PL_WAITIDLE_MODE_LAZY     (1 << 0)
+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
+
+/**
+ * struct ttm_waitidle_arg
+ *
+ * @handle: The object to synchronize.
+ *
+ * @mode: wait mode indicated by the
+ * TTM_SYNCCPU_MODE flags.
+ *
+ * Argument to the TTM_BO_WAITIDLE ioctl.
+ */
+
+struct ttm_pl_waitidle_arg {
+	uint32_t handle;
+	uint32_t mode;
+};
+
+union ttm_pl_create_arg {
+	struct ttm_pl_create_req req;
+	struct ttm_pl_rep rep;
+};
+
+union ttm_pl_reference_arg {
+	struct ttm_pl_reference_req req;
+	struct ttm_pl_rep rep;
+};
+
+union ttm_pl_setstatus_arg {
+	struct ttm_pl_setstatus_req req;
+	struct ttm_pl_rep rep;
+};
+
+union ttm_pl_create_ub_arg {
+	struct ttm_pl_create_ub_req req;
+	struct ttm_pl_rep rep;
+};
+
+/*
+ * Ioctl offsets.
+ */
+
+#define TTM_PL_CREATE      0x00
+#define TTM_PL_REFERENCE   0x01
+#define TTM_PL_UNREF       0x02
+#define TTM_PL_SYNCCPU     0x03
+#define TTM_PL_WAITIDLE    0x04
+#define TTM_PL_SETSTATUS   0x05
+#define TTM_PL_CREATE_UB   0x06
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/common/psb_ttm_userobj_api.h b/drivers/external_drivers/intel_media/video/common/psb_ttm_userobj_api.h
new file mode 100644
index 0000000..10ac8c7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_ttm_userobj_api.h
@@ -0,0 +1,97 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _TTM_USEROBJ_API_H_
+#define _TTM_USEROBJ_API_H_
+
+#include "psb_ttm_placement_user.h"
+#include "psb_ttm_fence_user.h"
+#include "ttm/ttm_object.h"
+#include "psb_ttm_fence_api.h"
+#include "ttm/ttm_bo_api.h"
+#include <linux/version.h>
+
+struct ttm_lock;
+
+/*
+ * User ioctls.
+ */
+
+extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
+			       struct ttm_bo_device *bdev,
+			       struct ttm_lock *lock, void *data);
+extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
+				  struct ttm_bo_device *bdev,
+				  struct ttm_lock *lock, void *data);
+extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
+extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
+extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
+extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
+				  struct ttm_lock *lock, void *data);
+extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
+extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
+extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
+extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
+
+extern int
+ttm_fence_user_create(struct ttm_fence_device *fdev,
+		      struct ttm_object_file *tfile,
+		      uint32_t fence_class,
+		      uint32_t fence_types,
+		      uint32_t create_flags,
+		      struct ttm_fence_object **fence, uint32_t * user_handle);
+
+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
+		*tfile,
+		uint32_t handle);
+
+extern int
+ttm_pl_verify_access(struct ttm_buffer_object *bo,
+		     struct ttm_object_file *tfile);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+				    unsigned long size,
+				    enum ttm_bo_type type,
+				    uint32_t flags,
+				    uint32_t page_alignment,
+				    unsigned long buffer_start,
+				    bool interruptible,
+				    struct file *persistant_swap_storage,
+				    struct ttm_buffer_object **p_bo);
+#else
+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+				    unsigned long size,
+				    enum ttm_bo_type type,
+				    uint32_t flags,
+				    uint32_t page_alignment,
+				    bool interruptible,
+				    struct file *persistant_swap_storage,
+				    struct ttm_buffer_object **p_bo);
+#endif
+
+extern int psb_ttm_bo_check_placement(struct ttm_buffer_object *bo,
+				      struct ttm_placement *placement);
+#endif
diff --git a/drivers/external_drivers/intel_media/video/common/psb_video_drv.h b/drivers/external_drivers/intel_media/video/common/psb_video_drv.h
new file mode 100644
index 0000000..122a2ef
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/psb_video_drv.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Fei Jiang <fei.jiang@intel.com>
+ *
+ **/
+#ifndef _PSB_CMDBUF_H_
+#define _PSB_CMDBUF_H_
+
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#include "vxd_drm.h"
+#else
+#include "psb_drv.h"
+#endif
+
+#include "ttm/ttm_execbuf_util.h"
+
+#ifdef MERRIFIELD
+#include "bufferclass_interface.h"
+#include "pvrsrv_interface.h"
+#endif
+
+struct drm_psb_private;
+
+/*
+ * TTM driver private offsets used for mmap.
+ */
+#ifdef CONFIG_DRM_VXD_BYT
+/* need distinguish between gem mmap and ttm mmap */
+#define DRM_PSB_FILE_PAGE_OFFSET ((0x100000000ULL >> PAGE_SHIFT) * 18)
+#else
+#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+#endif
+
+#ifdef MERRIFIELD
+#define PSB_OBJECT_HASH_ORDER 13
+#define PSB_FILE_OBJECT_HASH_ORDER 12
+#define PSB_BO_HASH_ORDER 12
+#else
+#define PSB_OBJECT_HASH_ORDER 9
+#define PSB_FILE_OBJECT_HASH_ORDER 8
+#define PSB_BO_HASH_ORDER 8
+#endif
+
+#define PSB_TT_PRIV0_LIMIT	 (512*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT	 (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+#define PSB_NUM_VALIDATE_BUFFERS 2048
+
+#ifdef MERRIFIELD
+/*
+ * For Merrifield, the VSP memory must be in 1GB.
+ *	TT size is 256M or 512M
+ *	MMU size is 256M
+ *	TILING size is 256M
+ */
+#define PSB_MEM_MMU_START		0x00000000
+#define PSB_MEM_TT_START		0x10000000
+#define PSB_MEM_MMU_TILING_START	0x30000000
+#else
+#define PSB_MEM_MMU_START		0x00000000
+#define PSB_MEM_TT_START		0x30000000
+#define PSB_MEM_IMR_START		0x2C000000
+#define PSB_MEM_MMU_TILING_START	0x50000000
+
+#endif
+
+#define PSB_MAX_RELOC_PAGES 1024
+
+/* MMU type */
+enum mmu_type_t {
+	IMG_MMU = 1,
+	VSP_MMU = 2
+};
+
+/* psb_cmdbuf.c */
+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+void psb_fence_or_sync(struct drm_file *file_priv,
+			      uint32_t engine,
+			      uint32_t fence_types,
+			      uint32_t fence_flags,
+			      struct list_head *list,
+			      struct psb_ttm_fence_rep *fence_arg,
+			      struct ttm_fence_object **fence_p);
+
+/* psb_fence.c */
+void psb_fence_handler(struct drm_device *dev, uint32_t class);
+int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
+				   uint32_t fence_class,
+				   uint32_t flags, uint32_t *sequence,
+				   unsigned long *timeout_jiffies);
+void psb_fence_error(struct drm_device *dev,
+			    uint32_t class,
+			    uint32_t sequence, uint32_t type, int error);
+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
+
+/*
+ * psb_ttm_glue.c
+ */
+int psb_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+int psb_verify_access(struct ttm_buffer_object *bo,
+			     struct file *filp);
+ssize_t psb_ttm_read(struct file *filp, char __user *buf,
+			    size_t count, loff_t *f_pos);
+ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
+			     size_t count, loff_t *f_pos);
+int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+int psb_pl_create_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+int psb_pl_ub_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+int psb_extension_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+int psb_ttm_global_init(struct drm_psb_private *dev_priv);
+void psb_ttm_global_release(struct drm_psb_private *dev_priv);
+int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+int psb_video_getparam(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+void psb_remove_videoctx(struct drm_psb_private *dev_priv, struct file *filp);
+
+/*
+ * psb_mmu.c
+ */
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem *registers,
+		int trap_pagefaults, int invalid_type,
+		struct drm_psb_private *dev_priv, enum mmu_type_t mmu_type);
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+							*driver);
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+						int trap_pagefaults,
+						int invalid_type);
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+					unsigned long address,
+					uint32_t num_pages);
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+						uint32_t start_pfn,
+						unsigned long address,
+						uint32_t num_pages, int type);
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+				unsigned long address, uint32_t num_pages,
+				uint32_t desired_tile_stride,
+				uint32_t hw_tile_stride, int type);
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+				 unsigned long address, uint32_t num_pages,
+				 uint32_t desired_tile_stride,
+				 uint32_t hw_tile_stride);
+#if 0
+void psb_mmu_pgtable_dump(struct drm_device *dev);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+			       uint32_t gtt_start, uint32_t gtt_pages);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+				  unsigned long *pfn);
+#endif
+int psb_ttm_bo_clflush(struct psb_mmu_driver *mmu,
+			struct ttm_buffer_object *bo);
+
+int tng_securefw(struct drm_device *dev, char *fw_basename, char *island_name, int imrl_reg);
+int tng_rawfw(struct drm_device *dev, uint8_t *name);
+
+
+/* Currently defined profiles */
+enum VAProfile {
+	VAProfileNone                   = -1,
+	VAProfileMPEG2Simple		= 0,
+	VAProfileMPEG2Main		= 1,
+	VAProfileMPEG4Simple		= 2,
+	VAProfileMPEG4AdvancedSimple	= 3,
+	VAProfileMPEG4Main		= 4,
+	VAProfileH264Baseline		= 5,
+	VAProfileH264Main		= 6,
+	VAProfileH264High		= 7,
+	VAProfileVC1Simple		= 8,
+	VAProfileVC1Main		= 9,
+	VAProfileVC1Advanced		= 10,
+	VAProfileH263Baseline		= 11,
+	VAProfileJPEGBaseline           = 12,
+	VAProfileH264ConstrainedBaseline = 13,
+	VAProfileVP8Version0_3          = 14,
+	VAProfileMax
+};
+
+/* Currently defined entrypoints */
+enum VAEntrypoint {
+	VAEntrypointVLD		= 1,
+	VAEntrypointIZZ		= 2,
+	VAEntrypointIDCT	= 3,
+	VAEntrypointMoComp	= 4,
+	VAEntrypointDeblocking	= 5,
+	VAEntrypointEncSlice	= 6,	/* slice level encode */
+	VAEntrypointEncPicture  = 7,    /* pictuer encode, JPEG, etc */
+	VAEntrypointVideoProc   = 10,   /* video pre/post processing */
+	VAEntrypointMax
+};
+
+#define VA_RT_FORMAT_PROTECTED	0x80000000
+#define PSB_SURFACE_UNAVAILABLE 0x40000000
+
+/**
+ *struct psb_context
+ *
+ *@buffers:	 array of pre-allocated validate buffers.
+ *@used_buffers: number of buffers in @buffers array currently in use.
+ *@validate_buffer: buffers validated from user-space.
+ *@kern_validate_buffers : buffers validated from kernel-space.
+ *@fence_flags : Fence flags to be used for fence creation.
+ *
+ *This structure is used during execbuf validation.
+ */
+struct psb_context {
+	struct psb_validate_buffer *buffers;
+	uint32_t used_buffers;
+	struct list_head validate_list;
+	/* not used:
+	 * struct list_head kern_validate_list;
+	 * uint32_t val_seq; */
+	uint32_t fence_types;
+};
+
+struct psb_validate_buffer {
+	struct ttm_validate_buffer base;
+	struct psb_validate_req req;
+	int ret;
+	struct psb_validate_arg __user *user_val_arg;
+	uint32_t flags;
+	uint32_t offset;
+	int po_correct;
+};
+
+#define	LOG2_WB_FIFO_SIZE	(5)
+#define	WB_FIFO_SIZE		(1 << (LOG2_WB_FIFO_SIZE))
+
+struct adaptive_intra_refresh_info_type{
+       int8_t *air_table;
+       int32_t air_per_frame;
+       int16_t air_skip_cnt;
+       uint16_t air_scan_pos;
+       int32_t sad_threshold;
+};
+
+struct psb_video_ctx {
+	struct list_head head;
+	struct file *filp; /* DRM device file pointer */
+	uint64_t ctx_type; /* (msvdx_tile&0xff)<<16|profile<<8|entrypoint */
+	/* todo: more context specific data for multi-context support */
+	/* Write back buffer object */
+	struct ttm_buffer_object *wb_bo;
+	struct ttm_bo_kmap_obj wb_bo_kmap;
+	uint32_t *wb_addr[WB_FIFO_SIZE];
+	/* Setvideo buffer object */
+	struct ttm_buffer_object *mtx_ctx_bo;
+	struct ttm_bo_kmap_obj mtx_ctx_kmap;
+	uint32_t setv_addr;
+
+	/* CIR parameters */
+	struct ttm_buffer_object *cir_input_ctrl_bo;
+	struct ttm_bo_kmap_obj cir_input_ctrl_kmap;
+	uint32_t *cir_input_ctrl_addr;
+	uint32_t pseudo_rand_seed;
+	int32_t last_cir_index;
+
+	/* AIR parameters */
+	struct adaptive_intra_refresh_info_type air_info;
+	struct ttm_buffer_object *bufs_f_p_out_params_bo;
+	struct ttm_bo_kmap_obj bufs_f_p_out_params_kmap;
+	uint32_t *bufs_f_p_out_params_addr;
+	struct ttm_buffer_object *bufs_f_p_out_best_mp_param_bo;
+	struct ttm_bo_kmap_obj bufs_f_p_out_best_mp_param_kmap;
+	uint32_t *bufs_f_p_out_best_mp_param_addr;
+
+	/* Save state registers */
+	uint32_t *bias_reg;
+
+	uint32_t status;
+	uint32_t codec;
+	uint32_t frame_count;
+	/* Firmware data section offset and size */
+	uint32_t mtx_debug_val;
+	uint32_t mtx_bank_size;
+	uint32_t mtx_ram_size;
+
+	bool handle_sequence_needed;
+	uint32_t cur_sequence;
+
+	uint32_t low_cmd_count;
+	uint32_t high_cmd_count;
+
+	uint32_t enc_ctx_param;
+	uint32_t enc_ctx_addr;
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	uint32_t slice_extract_flag;
+	uint32_t frame_boundary;
+	uint32_t frame_end_seq;
+	uint32_t frame_end; /* frame end command is synced in interrupt */
+#endif
+};
+
+#ifdef MERRIFIELD
+struct psb_fpriv *psb_fpriv(struct drm_file *file_priv);
+#endif
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/common/tng_securefw.c b/drivers/external_drivers/intel_media/video/common/tng_securefw.c
new file mode 100644
index 0000000..336adeb
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/common/tng_securefw.c
@@ -0,0 +1,385 @@
+/**************************************************************************
+ * Copyright (c) 2014, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drv.h"
+#include "vsp.h"
+
+
+#ifdef CONFIG_DX_SEP54
+extern int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num);
+#endif
+
+/*
+ * For a new product/device, if the device information is not in the spid2fw
+ * table, driver will
+ * 1) Firstly try to load firmware with the name like msvdx.bin.0004.0004.000d
+ * 2) If there is no such firmware, driver will use the existing firmware from
+ *    the closest device (usually with same family id or product id)
+ * 3) Use the key from the closest device
+ *
+ * If above 1)/2) is failed, a new firmware is needed. For testing pupose without
+ * driver change
+ * 1) The new firmware must be with the name like msvdx.bin.0004.0004.000d
+ * 2) If use other key, try to modify /sys/module/<driver module>/video_sepkey
+ *
+ * For the formal change, extend spid2fw table to include the device and firmware
+ */
+
+#define FW_NAME_LEN  64
+#define IMR_REG_NUMBER(imrl_reg) ((imrl_reg - TNG_IMR_MSG_REGBASE) >> 2)
+#define ISLAND_MAGIC_NUMBER(island_str) ((island_str[2] << 24) | (island_str[1] << 16) | (island_str[0] << 8) | '$')
+
+#define mofd_vv_fab_a_msvdx	"msvdx.bin.0008.0000.0000"
+#define mofd_v0_msvdx		"msvdx.bin.0008.0000.0001"
+#define mofd_v1_pr2_msvdx	"msvdx.bin.0008.0002.0001"
+#define mofd_ffrd_pr0_msvdx	mofd_v0_msvdx
+#define mofd_prh_b0_msvdx	"msvdx.bin.000c.0001.0001"
+#define mofd_fugu_product_msvdx	"msvdx.bin.0008.0000.0002"
+
+#define mofd_vv_fab_a_topaz	"topaz.bin.0008.0000.0000"
+#define mofd_v0_topaz		"topaz.bin.0008.0000.0001"
+#define mofd_v1_pr2_topaz	"topaz.bin.0008.0002.0001"
+#define mofd_ffrd_pr0_topaz	mofd_vv_fab_a_topaz
+#define mofd_prh_b0_topaz	"topaz.bin.000c.0001.0001"
+#define mofd_fugu_product_topaz	"topaz.bin.0008.0000.0002"
+
+#define mofd_vv_fab_a_vsp	"vsp.bin.0008.0000.0000"
+#define mofd_v0_vsp		"vsp.bin.0008.0000.0001"
+#define mofd_v1_pr2_vsp		"vsp.bin.0008.0002.0001"
+#define mofd_ffrd_pr0_vsp	mofd_vv_fab_a_vsp
+#define mofd_prh_b0_vsp		"vsp.bin.000c.0001.0001"
+#define mofd_fugu_product_vsp		"vsp.bin.0008.0000.0002"
+
+#define mofd_default_spid	"0008.0001.0001"
+/*
+ * Firmware name if there is no entry in spid2fw table
+ */
+static char default_msvdx[FW_NAME_LEN];
+static char default_topaz[FW_NAME_LEN];
+static char default_vsp[FW_NAME_LEN];
+
+struct spid2fw_mapping {
+	u16 family_id;
+	u16 product_id;
+	u16 hardware_id;
+	char *msvdx_fw;
+	char *topaz_fw;
+	char *vsp_fw;
+	int sep_key;
+};
+
+/*
+ * Table spid2fw
+ */
+static struct spid2fw_mapping spid2fw[] = {
+	{8, 0, 0, mofd_vv_fab_a_msvdx, mofd_vv_fab_a_topaz, mofd_vv_fab_a_vsp, 15}, /* moorefield VV Fab A */
+	{8, 0, 1, mofd_v0_msvdx, mofd_v0_topaz, mofd_v0_vsp, 15}, /* moorefield V0 */
+	{8, 2, 0, mofd_vv_fab_a_msvdx, mofd_vv_fab_a_topaz, mofd_vv_fab_a_vsp, 15}, /* moorefield V1 VV with A0 soc */
+	{8, 2, 1, mofd_v1_pr2_msvdx, mofd_v1_pr2_topaz, mofd_v1_pr2_vsp, 15}, /* moorefield V1 PR2 */
+	//{8, 0, 2, mofd_product_msvdx, mofd_product_topaz, mofd_product_vsp, 15}, /* Anniedale Production Keys (QS/PRQ) */
+
+        {8, 0, 2, mofd_fugu_product_msvdx, mofd_fugu_product_topaz, mofd_fugu_product_vsp, 15},
+	{0xc, 0, 4, mofd_ffrd_pr0_msvdx, mofd_ffrd_pr0_topaz, mofd_ffrd_pr0_vsp, 15}, /* moorefield FFRD PR0 */
+	{0xc, 1, 1, mofd_prh_b0_msvdx, mofd_prh_b0_topaz, mofd_prh_b0_vsp, 15}, /* MCG Moorefield PRH B0 */
+
+	{-1, -1, -1, NULL, NULL, NULL, 15} /* the last entry for non-supported device */
+};
+
+/*
+* Fall back once mismatch
+*/
+static struct spid2fw_mapping default_mofd_spid2fw =
+	{8, 0, 2, mofd_fugu_product_msvdx, mofd_fugu_product_topaz, mofd_fugu_product_vsp, 15}; /* moorefield V0 */
+
+#define SPID2FW_NUMBER sizeof(spid2fw)/sizeof(struct spid2fw_mapping)
+static struct spid2fw_mapping *matched_spid2fw = NULL; /* query once, use multiple times */
+
+static void tng_copyfw(char *fw_name, char *island_name, int *sep_key, struct spid2fw_mapping *p)
+{
+	if (!strncmp(island_name, "VED", 3))
+		strcpy(fw_name, p->msvdx_fw);
+	else if (!strncmp(island_name, "VEC", 3))
+		strcpy(fw_name, p->topaz_fw);
+	else if (!strncmp(island_name, "VSP", 3))
+		strcpy(fw_name, p->vsp_fw);
+
+	if (drm_video_sepkey == -1)
+		*sep_key = p->sep_key;
+	else
+		*sep_key = drm_video_sepkey;
+
+	return;
+}
+
+static void tng_spid2fw(struct drm_device *dev, char *fw_name, char *fw_basename, char *island_name, int *sep_key)
+{
+	const struct firmware *raw = NULL;
+	int ret;
+
+	/* already get the matched entry in spid2fw table */
+	if (matched_spid2fw) {
+		tng_copyfw(fw_name, island_name, sep_key, matched_spid2fw);
+		return;
+	}
+
+	/* find a matched entry */
+	if (matched_spid2fw != NULL) {
+		tng_copyfw(fw_name,  island_name, sep_key, matched_spid2fw);
+		PSB_DEBUG_INIT("Got matched firmware %s\n", fw_name);
+		return;
+	}
+
+	/*
+	 * No entry in the table, fake one
+	 * Firstly try if we have the firmware like mvsdx.bin.0004.0002.000d
+	 */
+	DRM_ERROR("Cannot find pre-defined firmware for this spid, try to detect %s firmware\n", fw_basename);
+
+	snprintf(fw_name, FW_NAME_LEN, "%s.bin." mofd_default_spid, fw_basename);
+	ret = request_firmware(&raw, fw_name, &dev->pdev->dev);
+	if (raw == NULL || ret < 0) { /* there is no mvsdx.bin.0004.0002.000d */
+		DRM_ERROR("There is no firmware %s, try to use the default device firmware\n", fw_name);
+		matched_spid2fw = &default_mofd_spid2fw;
+		tng_copyfw(fw_name,  island_name, sep_key, matched_spid2fw);
+		return;
+	}
+	/*
+	 * We have firmware like mvsdx.bin.0004.0002.000d
+	 * Fake one entry in the table for other islands
+	 */
+	release_firmware(raw);
+
+	PSB_DEBUG_INIT("Detect %s firmware %s success! Fake entries for all the other islands\n", fw_basename, fw_name);
+
+	matched_spid2fw = &spid2fw[SPID2FW_NUMBER - 1];
+
+	matched_spid2fw->msvdx_fw = default_msvdx;
+	snprintf(matched_spid2fw->msvdx_fw, FW_NAME_LEN, "msvdx.bin." mofd_default_spid);
+
+	matched_spid2fw->topaz_fw = default_topaz;
+	snprintf(matched_spid2fw->topaz_fw, FW_NAME_LEN, "topaz.bin." mofd_default_spid);
+
+	matched_spid2fw->vsp_fw = default_vsp;
+	snprintf(matched_spid2fw->vsp_fw, FW_NAME_LEN, "vsp.bin." mofd_default_spid);
+
+	/* force the sep key with the nearest one*/
+	matched_spid2fw->sep_key =  default_mofd_spid2fw.sep_key;
+
+	tng_copyfw(fw_name, island_name, sep_key, matched_spid2fw);
+	return;
+}
+
+static void tng_get_fwinfo(struct drm_device *dev, char *fw_name, char *fw_basename, char *island_name, int *sep_key)
+{
+	tng_spid2fw(dev, fw_name, fw_basename, island_name, sep_key);
+	PSB_DEBUG_INIT("Use firmware %s for %s, SEP key %d\n", fw_name, island_name, *sep_key);
+}
+
+static void tng_print_imrinfo(int imrl_reg, uint64_t imr_base, uint64_t imr_end)
+{
+	unsigned int imr_regnum = IMR_REG_NUMBER(imrl_reg);
+
+	if (imr_base != 0)
+		PSB_DEBUG_INIT("IMR%d ranges 0x%12llx - 0x%12llx\n",
+			       imr_regnum, imr_base, imr_end);
+
+	PSB_DEBUG_INIT("IMR%d L 0x%2x = 0x%032x\n",
+		       imr_regnum, imrl_reg,
+		       intel_mid_msgbus_read32(PNW_IMR_MSG_PORT, imrl_reg));
+	PSB_DEBUG_INIT("IMR%d H 0x%2x = 0x%032x\n",
+		       imr_regnum, imrl_reg + 1,
+		       intel_mid_msgbus_read32(PNW_IMR_MSG_PORT, imrl_reg+1));
+	PSB_DEBUG_INIT("IMR%d RAC 0x%2x = 0x%032x\n",
+		       imr_regnum,  imrl_reg + 2,
+		       intel_mid_msgbus_read32(PNW_IMR_MSG_PORT, imrl_reg+2));
+	PSB_DEBUG_INIT("IMR%d WAC 0x%2x = 0x%032x\n",
+		       imr_regnum, imrl_reg + 3,
+		       intel_mid_msgbus_read32(PNW_IMR_MSG_PORT, imrl_reg+3));
+}
+
+static void tng_get_imrinfo(int imrl_reg, uint64_t *imr_addr)
+{
+	uint32_t imrl, imrh;
+	uint64_t imr_base, imr_end;
+
+	imrl = intel_mid_msgbus_read32(TNG_IMR_MSG_PORT, imrl_reg);
+	imrh = intel_mid_msgbus_read32(TNG_IMR_MSG_PORT, imrl_reg+1);
+
+	imr_base = (imrl & TNG_IMR_ADDRESS_MASK) << TNG_IMR_ADDRESS_SHIFT;
+	imr_end = (imrh & TNG_IMR_ADDRESS_MASK) << TNG_IMR_ADDRESS_SHIFT;
+
+	*imr_addr = imr_base;
+
+	tng_print_imrinfo(imrl_reg, imr_base, imr_end);
+}
+
+static int tng_securefw_prevsp(struct drm_device *dev, const struct firmware *raw)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	struct vsp_secure_boot_header *boot_header;
+	struct vsp_multi_app_blob_data *ma_header;
+	unsigned int vrl_header_size = 736;
+	unsigned char *ptr, *ma_ptr;
+
+	if (raw->size < sizeof(struct vsp_secure_boot_header)) {
+		DRM_ERROR("VSP:firmware is not a correct firmware (size %d)\n", (int)raw->size);
+		return 1;
+	}
+
+	ptr = (void *)raw->data;
+	ma_ptr = (void *) raw->data + vrl_header_size;
+	boot_header = (struct vsp_secure_boot_header *)(ptr + vrl_header_size);
+	ma_header = (struct vsp_multi_app_blob_data *)(ma_ptr + boot_header->ma_header_offset);
+
+	/* get firmware header */
+	memcpy(&vsp_priv->boot_header, boot_header, sizeof(vsp_priv->boot_header));
+
+	if (vsp_priv->boot_header.magic_number != VSP_SECURE_BOOT_MAGIC_NR) {
+		DRM_ERROR("VSP: failed to load correct vsp firmware\n"
+			  "FW magic number is wrong %x (should be %x)\n",
+			  vsp_priv->boot_header.magic_number,
+			  VSP_SECURE_BOOT_MAGIC_NR);
+		return 1;
+	}
+
+	/* read application firmware image data (for state-buffer size, etc) */
+	/* load the multi-app blob header */
+	memcpy(&vsp_priv->ma_header, ma_header, sizeof(vsp_priv->ma_header));
+	if (vsp_priv->ma_header.magic_number != VSP_MULTI_APP_MAGIC_NR) {
+		DRM_ERROR("VSP: failed to load correct vsp firmware\n"
+			  "FW magic number is wrong %x (should be %x)\n",
+			  vsp_priv->ma_header.magic_number,
+			  VSP_MULTI_APP_MAGIC_NR);
+
+		return 1;
+	}
+
+	VSP_DEBUG("firmware secure header:\n");
+	VSP_DEBUG("boot_header magic number %x\n", boot_header->magic_number);
+	VSP_DEBUG("boot_text_offset %x\n", boot_header->boot_text_offset);
+	VSP_DEBUG("boot_text_reg %x\n", boot_header->boot_text_reg);
+	VSP_DEBUG("boot_icache_value %x\n", boot_header->boot_icache_value);
+	VSP_DEBUG("boot_icache_reg %x\n", boot_header->boot_icache_reg);
+	VSP_DEBUG("boot_pc_value %x\n", boot_header->boot_pc_value);
+	VSP_DEBUG("boot_pc_reg %x\n", boot_header->boot_pc_reg);
+	VSP_DEBUG("ma_header_offset %x\n", boot_header->ma_header_offset);
+	VSP_DEBUG("ma_header_reg %x\n", boot_header->ma_header_reg);
+	VSP_DEBUG("boot_start_value %x\n", boot_header->boot_start_value);
+	VSP_DEBUG("boot_start_reg %x\n", boot_header->boot_start_reg);
+	VSP_DEBUG("firmware ma_blob header:\n");
+	VSP_DEBUG("ma_header magic number %x\n", ma_header->magic_number);
+	VSP_DEBUG("offset_from_start %x\n", ma_header->offset_from_start);
+	VSP_DEBUG("imr_state_buffer_addr %x\n", ma_header->imr_state_buffer_addr);
+	VSP_DEBUG("imr_state_buffer_size %x\n", ma_header->imr_state_buffer_size);
+	VSP_DEBUG("apps_default_context_buffer_size %x\n",
+		  ma_header->apps_default_context_buffer_size);
+
+	return 0;
+}
+
+static void tng_securefw_postvsp(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	vsp_priv->fw_loaded = VSP_FW_LOADED;
+	vsp_priv->vsp_state = VSP_STATE_DOWN;
+
+	vsp_priv->ctrl = (struct vsp_ctrl_reg *) (dev_priv->vsp_reg +
+						  VSP_CONFIG_REG_SDRAM_BASE +
+						  VSP_CONFIG_REG_START);
+}
+
+int tng_securefw(struct drm_device *dev, char *fw_basename, char *island_name, int imrl_reg)
+{
+	const struct firmware *raw = NULL;
+	char fw_name[FW_NAME_LEN];
+	unsigned char *imr_ptr;
+	uint64_t imr_addr;
+	int ret = 0, sep_key, fw_size;
+
+	tng_get_fwinfo(dev, fw_name, fw_basename, island_name, &sep_key);
+
+	/* try to load firmware from storage */
+	PSB_DEBUG_INIT("Try to request firmware %s\n", fw_name);
+	ret = request_firmware(&raw, fw_name, &dev->pdev->dev);
+	if (raw == NULL || ret < 0) {
+		DRM_ERROR("Failed to request firmware %s, ret =  %d\n", fw_name, ret);
+		return ret;
+	}
+
+	if (!strncmp(island_name, "VSP", 3)) {
+		ret = tng_securefw_prevsp(dev, raw);
+		if (ret) {
+			DRM_ERROR("VSP sanity check failed\n");
+			release_firmware(raw);
+			return ret;
+		}
+	}
+
+	PSB_DEBUG_INIT("Try to get IMR region information\n");
+	tng_get_imrinfo(imrl_reg, &imr_addr);
+
+	PSB_DEBUG_INIT("Try to map IMR region\n");
+	imr_ptr = ioremap(imr_addr, raw->size);
+	if (!imr_ptr) {
+		DRM_ERROR("Failed to map IMR region\n");
+		release_firmware(raw);
+		return 1;
+	}
+
+	fw_size = raw->size;
+	PSB_DEBUG_INIT("Try to copy firmware into IMR region\n");
+	memcpy(imr_ptr, raw->data, fw_size);
+
+	PSB_DEBUG_INIT("Try to unmap IMR region\n");
+	iounmap(imr_ptr);
+
+	PSB_DEBUG_INIT("Try to release firmware\n");
+	release_firmware(raw);
+
+#ifdef CONFIG_DX_SEP54
+	PSB_DEBUG_INIT("Try to verify firmware\n");
+	ret = sepapp_image_verify((u8 *)imr_addr, fw_size, sep_key,
+				  ISLAND_MAGIC_NUMBER(island_name));
+	if (ret) {
+		DRM_ERROR("Failed to verify firmware %x\n", ret);
+		return ret;
+	}
+	PSB_DEBUG_INIT("After verification, IMR region information\n");
+	tng_print_imrinfo(imrl_reg, 0, 0);
+#endif
+
+	if (!strncmp(island_name, "VSP", 3))
+		tng_securefw_postvsp(dev);
+
+	return ret;
+}
+
+int tng_rawfw(struct drm_device *dev, uint8_t *fw_basename)
+{
+	DRM_ERROR("Non secure mode never be ran\n");
+
+	return 1;
+}
+
diff --git a/drivers/external_drivers/intel_media/video/decode/Makefile b/drivers/external_drivers/intel_media/video/decode/Makefile
new file mode 100644
index 0000000..47281e5
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/Makefile
@@ -0,0 +1,27 @@
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+INCDIR=drivers/staging/mrfl
+MEDIA_INCDIR=drivers/staging/intel_media
+ccflags-$(CONFIG_DRM_MRFLD) += \
+	-I$(INCDIR)/ \
+	-I$(INCDIR)/rgx/include \
+	-I$(INCDIR)/interface \
+	-I$(INCDIR)/drv \
+	-I$(INCDIR)/../intel_media/video/common \
+	-I$(INCDIR)/../intel_media/video/decode \
+	-I$(INCDIR)/../../../include/linux \
+	-I$(INCDIR)/../../../include/drm \
+	-I$(INCDIR)/../../../include/drm/ttm \
+	-I$(INCDIR)/../tng/drv/ospm
+
+ccflags-$(CONFIG_DRM_MRFLD) += -DANDROID -D_linux_ -DLINUX -D__KERNEL__ -DMERRIFIELD -DCONFIG_VIDEO_MRFLD
+ccflags-$(CONFIG_DRM_VXD_BYT) := -Iinclude/drm -Iinclude/drm/ttm -Idrivers/gpu/drm/i915 -Idrivers/staging/intel_media/common/baytrail -Idrivers/staging/intel_media/video/common -Idrivers/staging/intel_media/video/decode
+#VIDEO_COMMON_DIR = $(TOP_DIR)/driver/staging/intel_media/video/common
+#DECODE_DIR = $(TOP_DIR)/driver/staging/intel_media/video/decode
+
+obj-y += \
+	psb_msvdx.o \
+	psb_msvdxinit.o \
+	psb_msvdx_fw.o
+
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx.c b/drivers/external_drivers/intel_media/video/decode/psb_msvdx.c
new file mode 100755
index 0000000..40a9b37
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx.c
@@ -0,0 +1,1788 @@
+/**************************************************************************
+ * MSVDX I/O operations and IRQ handling
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Li Zeng <li.zeng@intel.com>
+ *    Binglin Chen <binglin.chen@intel.com>
+ *    Fei Jiang <fei.jiang@intel.com>
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#include "vxd_drm.h"
+#else
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_powermgmt.h"
+#endif
+#include "psb_msvdx.h"
+#include "psb_msvdx_msg.h"
+#include "psb_msvdx_reg.h"
+#include "psb_msvdx_ec.h"
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/history_record.h>
+
+#ifdef CONFIG_MDFD_GL3
+#include "mdfld_gl3.h"
+#endif
+
+#ifndef list_first_entry
+#define list_first_entry(ptr, type, member) \
+	list_entry((ptr)->next, type, member)
+#endif
+
+static void psb_msvdx_fw_error_detected(struct drm_device *dev, uint32_t fence, uint32_t flags);
+static struct psb_video_ctx* psb_msvdx_find_ctx(struct drm_psb_private *dev_priv, uint32_t fence);
+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
+			  unsigned long cmd_size, struct psb_video_ctx* msvdx_ctx);
+static void psb_msvdx_set_tile(struct drm_device *dev,
+				unsigned long msvdx_tile);
+static int psb_msvdx_protected_frame_finished(struct drm_psb_private *dev_priv, struct psb_video_ctx* pos, uint32_t fence);
+int psb_msvdx_dequeue_send(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
+	int ret = 0;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+	if (list_empty(&msvdx_priv->msvdx_queue)) {
+		PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
+		msvdx_priv->msvdx_busy = 0;
+		spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+		return -EINVAL;
+	}
+
+	msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue,
+				     struct psb_msvdx_cmd_queue, head);
+	list_del(&msvdx_cmd->head);
+	spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+
+#ifdef MERRIFIELD
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	if(msvdx_cmd->msvdx_ctx->slice_extract_flag) {
+                /* msvdx_ctx->frame_boundary tells last command is frame end */
+		if (msvdx_cmd->msvdx_ctx->frame_boundary)
+			power_island_get(OSPM_VIDEO_DEC_ISLAND);
+
+		msvdx_cmd->msvdx_ctx->frame_end_seq = 0xffffffff;
+		msvdx_cmd->msvdx_ctx->frame_boundary = msvdx_cmd->frame_boundary;
+
+		if (msvdx_cmd->msvdx_ctx->frame_boundary)
+			msvdx_cmd->msvdx_ctx->frame_end_seq = msvdx_cmd->sequence & 0xffff;
+	}else
+#endif
+	power_island_get(OSPM_VIDEO_DEC_ISLAND);
+#endif
+
+	PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
+
+	if (IS_MSVDX_MEM_TILE(dev) && drm_psb_msvdx_tiling)
+		psb_msvdx_set_tile(dev, msvdx_cmd->msvdx_tile);
+
+#ifdef CONFIG_VIDEO_MRFLD_EC
+	/* Seperate update frame and backup cmds because if a batch of cmds
+	 * doesn't have * host_be_opp message, no need to update frame info
+	 * but still need to backup cmds.
+	 * This case can happen if an batch of cmds is not the entire frame
+	*/
+	if (msvdx_cmd->host_be_opp_enabled) {
+		psb_msvdx_update_frame_info(msvdx_priv, msvdx_cmd->tfile,
+			msvdx_cmd->cmd + msvdx_cmd->deblock_cmd_offset);
+	}
+	psb_msvdx_backup_cmd(msvdx_priv, msvdx_cmd->tfile,
+			msvdx_cmd->cmd,
+			msvdx_cmd->cmd_size,
+			msvdx_cmd->deblock_cmd_offset);
+#endif
+	ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size, msvdx_cmd->msvdx_ctx);
+	if (ret) {
+		DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
+		ret = -EINVAL;
+	}
+
+	kfree(msvdx_cmd->cmd);
+	kfree(msvdx_cmd);
+
+	return ret;
+}
+
+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct psb_msvdx_cmd_queue *msvdx_cmd;
+	struct list_head *list, *next;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	unsigned long irq_flags;
+	spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+	/*Flush the msvdx cmd queue and signal all fences in the queue */
+	list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) {
+		msvdx_cmd =
+			list_entry(list, struct psb_msvdx_cmd_queue, head);
+		list_del(list);
+		PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:0x%08x\n",
+				  msvdx_cmd->sequence);
+		msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence;
+		psb_fence_error(dev, PSB_ENGINE_DECODE,
+				msvdx_cmd->sequence,
+				_PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
+		kfree(msvdx_cmd->cmd);
+		kfree(msvdx_cmd);
+	}
+	msvdx_priv->msvdx_busy = 0;
+	spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+}
+
+static int psb_msvdx_map_command(struct drm_device *dev,
+				 struct ttm_buffer_object *cmd_buffer,
+				 unsigned long cmd_offset, unsigned long cmd_size,
+				 void **msvdx_cmd, uint32_t sequence, int copy_cmd,
+				 struct psb_video_ctx *msvdx_ctx)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int ret = 0;
+	unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
+	unsigned long cmd_size_remaining;
+	struct ttm_bo_kmap_obj cmd_kmap;
+	void *cmd, *cmd_copy, *cmd_start;
+	bool is_iomem;
+	union msg_header *header;
+	uint32_t cur_cmd_size, cur_cmd_id, mmu_ptd = 0, msvdx_mmu_invalid = 0;
+	struct fw_decode_msg *decode_msg;
+	struct fw_deblock_msg *deblock_msg;
+
+	/* command buffers may not exceed page boundary */
+	if ((cmd_size > PAGE_SIZE) || (cmd_size + cmd_page_offset > PAGE_SIZE))
+		return -EINVAL;
+
+	ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap);
+	if (ret) {
+		DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
+		return ret;
+	}
+
+	cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
+		    + cmd_page_offset;
+	cmd = cmd_start;
+	cmd_size_remaining = cmd_size;
+
+	msvdx_priv->host_be_opp_enabled = 0;
+	msvdx_priv->deblock_cmd_offset = PSB_MSVDX_INVALID_OFFSET;
+
+	while (cmd_size_remaining > 0) {
+		if (cmd_size_remaining < MTX_GENMSG_HEADER_SIZE) {
+			ret = -EINVAL;
+			goto out;
+		}
+		header = (union msg_header *)cmd;
+		cur_cmd_size = header->bits.msg_size;
+		cur_cmd_id = header->bits.msg_type;
+
+		PSB_DEBUG_GENERAL("cmd start at %p cur_cmd_size = %d"
+				  " cur_cmd_id = %02x fence = %08x\n",
+				  cmd, cur_cmd_size,
+				  cur_cmd_id, sequence);
+		if ((cur_cmd_size % sizeof(uint32_t))
+		    || (cur_cmd_size > cmd_size_remaining)) {
+			ret = -EINVAL;
+			DRM_ERROR("MSVDX: ret:%d\n", ret);
+			goto out;
+		}
+
+		switch (cur_cmd_id) {
+		case MTX_MSGID_DECODE_FE: {
+			if (sizeof(struct fw_decode_msg) > cmd_size_remaining) {
+				/* Msg size is not correct */
+				ret = -EINVAL;
+				PSB_DEBUG_MSVDX("MSVDX: wrong msg size.\n");
+				goto out;
+			}
+			decode_msg = (struct fw_decode_msg *)cmd;
+			decode_msg->header.bits.msg_fence = sequence;
+
+			mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
+			msvdx_mmu_invalid = atomic_cmpxchg(&dev_priv->
+							   msvdx_mmu_invaldc,
+							   1, 0);
+			if (msvdx_mmu_invalid == 1) {
+				decode_msg->flag_size.bits.flags |=
+						FW_INVALIDATE_MMU;
+#ifdef CONFIG_MDFD_GL3
+				gl3_invalidate();
+#endif
+				PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
+			}
+			/*
+			if (msvdx_mmu_invalid == 1)
+				psb_mmu_pgtable_dump(dev);
+			*/
+
+			decode_msg->mmu_context.bits.mmu_ptd = mmu_ptd >> 8;
+			PSB_DEBUG_MSVDX("MSVDX: MSGID_DECODE_FE:"
+					  " - fence: %08x"
+					  " - flags: %08x - buffer_size: %08x"
+					  " - crtl_alloc_addr: %08x"
+					  " - context: %08x - mmu_ptd: %08x"
+					  " - operating_mode: %08x.\n",
+					  decode_msg->header.bits.msg_fence,
+					  decode_msg->flag_size.bits.flags,
+					  decode_msg->flag_size.bits.buffer_size,
+					  decode_msg->crtl_alloc_addr,
+					  decode_msg->mmu_context.bits.context,
+					  decode_msg->mmu_context.bits.mmu_ptd,
+					  decode_msg->operating_mode);
+			break;
+		}
+
+		case MTX_MSGID_HOST_BE_OPP_MFLD:
+			msvdx_priv->host_be_opp_enabled = 1;
+			msvdx_priv->deblock_cmd_offset =
+					cmd_size - cmd_size_remaining;
+		case MTX_MSGID_INTRA_OOLD_MFLD:
+		case MTX_MSGID_DEBLOCK_MFLD: {
+			if (sizeof(struct fw_deblock_msg) > cmd_size_remaining) {
+				/* Msg size is not correct */
+				ret = -EINVAL;
+				PSB_DEBUG_MSVDX("MSVDX: wrong msg size.\n");
+				goto out;
+			}
+			deblock_msg = (struct fw_deblock_msg *)cmd;
+			mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
+			msvdx_mmu_invalid = atomic_cmpxchg(&dev_priv->
+							   msvdx_mmu_invaldc,
+							   1, 0);
+			if (msvdx_mmu_invalid == 1) {
+				deblock_msg->flag_type.bits.flags |=
+							FW_INVALIDATE_MMU;
+				PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
+			}
+
+			/* patch to right cmd type */
+			deblock_msg->header.bits.msg_type =
+					cur_cmd_id -
+					MTX_MSGID_DEBLOCK_MFLD +
+					MTX_MSGID_DEBLOCK;
+
+			deblock_msg->header.bits.msg_fence =
+				(uint16_t)(sequence & 0xffff);
+			deblock_msg->mmu_context.bits.mmu_ptd = (mmu_ptd >> 8);
+			PSB_DEBUG_MSVDX("MSVDX: MSGID_DEBLOCK:"
+				" - fence: %08x"
+				" - flags: %08x - slice_field_type: %08x"
+				" - operating_mode: %08x"
+				" - context: %08x - mmu_ptd: %08x"
+				" - frame_height_mb: %08x - pic_width_mb: %08x"
+				" - address_a0: %08x - address_a1: %08x"
+				" - mb_param_address: %08x"
+				" - ext_stride_a: %08x"
+				" - address_b0: %08x - address_b1: %08x"
+				" - alt_output_flags_b: %08x.\n",
+				deblock_msg->header.bits.msg_fence,
+				deblock_msg->flag_type.bits.flags,
+				deblock_msg->flag_type.bits.slice_field_type,
+				deblock_msg->operating_mode,
+				deblock_msg->mmu_context.bits.context,
+				deblock_msg->mmu_context.bits.mmu_ptd,
+				deblock_msg->pic_size.bits.frame_height_mb,
+				deblock_msg->pic_size.bits.pic_width_mb,
+				deblock_msg->address_a0,
+				deblock_msg->address_a1,
+				deblock_msg->mb_param_address,
+				deblock_msg->ext_stride_a,
+				deblock_msg->address_b0,
+				deblock_msg->address_b1,
+				deblock_msg->alt_output_flags_b);
+			cmd += (sizeof(struct fw_deblock_msg) - cur_cmd_size);
+			cmd_size_remaining -= (sizeof(struct fw_deblock_msg) -
+						cur_cmd_size);
+			break;
+		}
+
+#ifdef CONFIG_SLICE_HEADER_PARSING
+		/* VA_MSGID_NALU_EXTRACT start */
+		case MTX_MSGID_SLICE_HEADER_EXTRACT:
+		case MTX_MSGID_MODULAR_SLICE_HEADER_EXTRACT: {
+			struct fw_slice_header_extract_msg *extract_msg =
+				(struct fw_slice_header_extract_msg *)cmd;
+
+			PSB_DEBUG_MSVDX("send slice extract message.\n");
+
+			extract_msg->header.bits.msg_fence = sequence;
+
+			mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
+			msvdx_mmu_invalid = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
+							   1, 0);
+			if (msvdx_mmu_invalid == 1) {
+				extract_msg->flags.bits.flags |=
+						FW_INVALIDATE_MMU;
+#ifdef CONFIG_MDFD_GL3
+				gl3_invalidate();
+#endif
+				PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
+			}
+
+			extract_msg->mmu_context.bits.mmu_ptd = mmu_ptd >> 8;
+
+			PSB_DEBUG_MSVDX("Parse cmd msg size is %d,"
+				"type is 0x%x, fence is %d, flags is 0x%x, context is 0x%x,"
+				"mmu_ptd is 0x%x, src is 0x%x, src_size is %d, dst is 0x%x, dst_size is %d,"
+				"flag_bitfield is 0x%x, pic_param0 is 0x%x\n",
+
+			extract_msg->header.bits.msg_size,
+			extract_msg->header.bits.msg_type,
+			extract_msg->header.bits.msg_fence,
+			extract_msg->flags.bits.flags,
+			extract_msg->mmu_context.bits.context,
+			extract_msg->mmu_context.bits.mmu_ptd,
+			extract_msg->src,
+			extract_msg->src_size,
+			extract_msg->dst,
+			extract_msg->src_size,
+			extract_msg->flag_bitfield.value,
+			extract_msg->pic_param0.value);
+			break;
+		}
+		/* VA_MSGID_NALU_EXTRACT end */
+#endif
+		default:
+			/* Msg not supported */
+			ret = -EINVAL;
+			PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
+			goto out;
+		}
+
+		cmd += cur_cmd_size;
+		cmd_size_remaining -= cur_cmd_size;
+		if (((sequence++) & 0xf) == 0xf) {
+			ret = -EINVAL;
+			PSB_DEBUG_GENERAL("MSVDX: too many cmds, abort\n");
+			goto out;
+		}
+	}
+
+	msvdx_priv->num_cmd = ((--sequence) & 0xf);
+
+	if (copy_cmd) {
+		PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
+
+		cmd_copy = kzalloc(cmd_size, GFP_KERNEL);
+		if (cmd_copy == NULL) {
+			ret = -ENOMEM;
+			DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
+			goto out;
+		}
+		memcpy(cmd_copy, cmd_start, cmd_size);
+		*msvdx_cmd = cmd_copy;
+	} else {
+		PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
+		if (IS_MSVDX_MEM_TILE(dev) && drm_psb_msvdx_tiling) {
+			unsigned long msvdx_tile =
+				((msvdx_priv->msvdx_ctx->ctx_type >> 16) & 0xff);
+			psb_msvdx_set_tile(dev, msvdx_tile);
+		}
+
+#ifdef CONFIG_VIDEO_MRFLD_EC
+		if (msvdx_priv->host_be_opp_enabled) {
+			psb_msvdx_update_frame_info(msvdx_priv,
+				msvdx_priv->tfile,
+				cmd_start + msvdx_priv->deblock_cmd_offset);
+		}
+		psb_msvdx_backup_cmd(msvdx_priv, msvdx_priv->tfile,
+				cmd_start,
+				cmd_size,
+				msvdx_priv->deblock_cmd_offset);
+#endif
+		ret = psb_msvdx_send(dev, cmd_start, cmd_size, msvdx_ctx);
+		if (ret) {
+			DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
+			ret = -EINVAL;
+		}
+	}
+
+out:
+	ttm_bo_kunmap(&cmd_kmap);
+
+	return ret;
+}
+
+int psb__submit_cmdbuf_copy(struct drm_device *dev,
+			    struct ttm_buffer_object *cmd_buffer,
+			    unsigned long cmd_offset, unsigned long cmd_size,
+			    struct psb_video_ctx *msvdx_ctx, uint32_t fence_flag)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	struct psb_msvdx_cmd_queue *msvdx_cmd;
+	uint32_t sequence =  (dev_priv->sequence[PSB_ENGINE_DECODE] << 4);
+	unsigned long irq_flags;
+	void *cmd = NULL;
+	int ret;
+
+	/* queue the command to be sent when the h/w is ready */
+	PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
+			  sequence);
+	msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue),
+			    GFP_KERNEL);
+	if (msvdx_cmd == NULL) {
+		DRM_ERROR("MSVDXQUE: Out of memory...\n");
+		return -ENOMEM;
+	}
+
+	ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
+				    cmd_size, &cmd, sequence, 1, msvdx_ctx);
+	if (ret) {
+		DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
+		kfree(msvdx_cmd
+		     );
+		return ret;
+	}
+	msvdx_cmd->cmd = cmd;
+	msvdx_cmd->cmd_size = cmd_size;
+	msvdx_cmd->sequence = sequence;
+
+	msvdx_cmd->msvdx_tile =
+		((msvdx_priv->msvdx_ctx->ctx_type >> 16) & 0xff);
+	msvdx_cmd->deblock_cmd_offset =
+		msvdx_priv->deblock_cmd_offset;
+	msvdx_cmd->host_be_opp_enabled =
+		msvdx_priv->host_be_opp_enabled;
+	msvdx_cmd->tfile =
+		msvdx_priv->tfile;
+	msvdx_cmd->msvdx_ctx = msvdx_ctx;
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	msvdx_cmd->frame_boundary = fence_flag >> 1;
+#endif
+	spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+	list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue);
+	spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+	if (!msvdx_priv->msvdx_busy) {
+		msvdx_priv->msvdx_busy = 1;
+		PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
+		psb_msvdx_dequeue_send(dev);
+	}
+
+	return ret;
+}
+
+int psb_submit_video_cmdbuf(struct drm_device *dev,
+			    struct ttm_buffer_object *cmd_buffer,
+			    unsigned long cmd_offset, unsigned long cmd_size,
+			    struct psb_video_ctx *msvdx_ctx, uint32_t fence_flag)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t sequence =  (dev_priv->sequence[PSB_ENGINE_DECODE] << 4);
+	unsigned long irq_flags;
+	int ret = 0;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int offset = 0;
+        bool pm_ret = 0;
+
+	if (!msvdx_priv->fw_b0_uploaded){
+#ifdef MERRIFIELD
+		if (IS_TNG_B0(dev) || IS_MOFD(dev))
+			tng_securefw(dev, "msvdx", "VED", TNG_IMR5L_MSG_REGADDR);
+		else {
+			DRM_ERROR("VED secure fw: bad platform\n");
+		}
+
+		/*  change fw_b0_uploaded name */
+		msvdx_priv->fw_b0_uploaded = 1;
+#endif
+	}
+
+	if (!msvdx_ctx) {
+		PSB_DEBUG_GENERAL("MSVDX: null ctx\n");
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+
+	msvdx_priv->msvdx_ctx = msvdx_ctx;
+	msvdx_priv->last_msvdx_ctx = msvdx_priv->msvdx_ctx;
+
+	PSB_DEBUG_PM("sequence is 0x%x, needs_reset is 0x%x.\n",
+			sequence, msvdx_priv->msvdx_needs_reset);
+
+#ifdef MERRIFIELD
+	spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+	/* get power island when submit cmd to hardware */
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	if (msvdx_ctx->slice_extract_flag) {
+		if(msvdx_ctx->frame_boundary)
+			if (!(pm_ret = power_island_get(OSPM_VIDEO_DEC_ISLAND)))
+				return -EBUSY;
+	}else{
+		if (!(pm_ret = power_island_get(OSPM_VIDEO_DEC_ISLAND)))
+			return -EBUSY;
+	}
+#else
+	if (!(pm_ret = power_island_get(OSPM_VIDEO_DEC_ISLAND)))
+		return -EBUSY;
+#endif
+	spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+#endif
+
+	if (msvdx_priv->msvdx_busy) {
+		spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+		ret = psb__submit_cmdbuf_copy(dev, cmd_buffer,
+			    cmd_offset, cmd_size,
+			    msvdx_ctx, fence_flag);
+
+#ifdef MERRIFIELD
+		if (pm_ret)
+			power_island_put(OSPM_VIDEO_DEC_ISLAND);
+#endif
+		return ret;
+	}
+
+	if (msvdx_priv->msvdx_needs_reset) {
+		spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+		PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
+
+		if (!msvdx_priv->fw_loaded_by_punit) {
+			if (psb_msvdx_core_reset(dev_priv)) {
+				ret = -EBUSY;
+				DRM_ERROR("MSVDX: Reset failed\n");
+				return ret;
+			}
+		}
+
+		msvdx_priv->msvdx_needs_reset = 0;
+		msvdx_priv->msvdx_busy = 0;
+
+		if (msvdx_priv->fw_loaded_by_punit){
+			ret = psb_msvdx_post_init(dev);
+			if (ret) {
+				ret = -EBUSY;
+#ifdef MERRIFIELD
+				power_island_put(OSPM_VIDEO_DEC_ISLAND);
+#endif
+				PSB_DEBUG_WARN("WARN: psb_msvdx_post_init failed.\n");
+				return ret;
+			}
+		}
+		else{
+			if (psb_msvdx_init(dev)) {
+				ret = -EBUSY;
+				PSB_DEBUG_WARN("WARN: psb_msvdx_init failed.\n");
+				return ret;
+			}
+		}
+
+#ifdef CONFIG_VIDEO_MRFLD_EC
+		/* restore the state when power up during EC */
+		if (msvdx_priv->vec_ec_mem_saved) {
+			for (offset = 0; offset < 4; ++offset)
+				PSB_WMSVDX32(msvdx_priv->vec_ec_mem_data[offset],
+					     0x2cb0 + offset * 4);
+
+			PSB_WMSVDX32(msvdx_priv->vec_ec_mem_data[4],
+				     0x2cc4);
+			msvdx_priv->vec_ec_mem_saved = 0;
+		}
+#endif
+
+		spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+	}
+
+	if (msvdx_priv->fw_loaded_by_punit && !msvdx_priv->rendec_init) {
+		spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+		PSB_DEBUG_GENERAL("MSVDX:setup msvdx.\n");
+		ret = psb_msvdx_post_boot_init(dev);
+		if (ret) {
+			DRM_ERROR("MSVDX:fail to setup msvdx.\n");
+			/* FIXME: find a proper return value */
+			return -EFAULT;
+		}
+		msvdx_priv->rendec_init = 1;
+
+		PSB_DEBUG_GENERAL("MSVDX: setup msvdx successfully\n");
+		spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+	}
+
+	if (!msvdx_priv->fw_loaded_by_punit && !msvdx_priv->msvdx_fw_loaded) {
+		spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+		PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n");
+		ret = psb_setup_fw(dev);
+		if (ret) {
+			DRM_ERROR("MSVDX:fail to load FW\n");
+			/* FIXME: find a proper return value */
+			return -EFAULT;
+		}
+		msvdx_priv->msvdx_fw_loaded = 1;
+
+		PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
+		spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+	}
+
+	msvdx_priv->msvdx_busy = 1;
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	if (msvdx_ctx->slice_extract_flag){
+		msvdx_ctx->frame_boundary = fence_flag >> 1;
+		msvdx_ctx->frame_end_seq = 0xffffffff;
+		if (msvdx_ctx->frame_boundary)
+			msvdx_ctx->frame_end_seq = sequence & 0xffff;
+	}
+#endif
+	spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+	PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
+			  sequence);
+	ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
+				    cmd_size, NULL, sequence, 0, msvdx_ctx);
+	if (ret)
+		DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
+
+	return ret;
+}
+
+int psb_cmdbuf_video(struct drm_file *priv,
+		     struct list_head *validate_list,
+		     uint32_t fence_type,
+		     struct drm_psb_cmdbuf_arg *arg,
+		     struct ttm_buffer_object *cmd_buffer,
+		     struct psb_ttm_fence_rep *fence_arg,
+		     struct psb_video_ctx *msvdx_ctx)
+{
+	struct drm_device *dev = priv->minor->dev;
+	struct ttm_fence_object *fence;
+	int ret;
+
+	/*
+	 * Check this. Doesn't seem right. Have fencing done AFTER command
+	 * submission and make sure drm_psb_idle idles the MSVDX completely.
+	 */
+	ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
+					arg->cmdbuf_size, msvdx_ctx, arg->fence_flags);
+	if (ret)
+		return ret;
+
+
+	/* DRM_ERROR("Intel: Fix video fencing!!\n"); */
+	psb_fence_or_sync(priv, PSB_ENGINE_DECODE, fence_type,
+			  arg->fence_flags, validate_list, fence_arg,
+			  &fence);
+
+	ttm_fence_object_unref(&fence);
+	spin_lock(&cmd_buffer->bdev->fence_lock);
+	if (cmd_buffer->sync_obj != NULL)
+		ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
+	spin_unlock(&cmd_buffer->bdev->fence_lock);
+
+	return 0;
+}
+
+
+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
+			  unsigned long cmd_size, struct psb_video_ctx *msvdx_ctx)
+{
+	int ret = 0;
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	union msg_header *header;
+	uint32_t cur_sequence = 0xffffffff;
+	uint32_t cur_cmd_size, cur_cmd_id;
+
+	while (cmd_size > 0) {
+		header = (union msg_header *)cmd;
+		cur_cmd_size = header->bits.msg_size;
+		cur_cmd_id = header->bits.msg_type;
+
+		cur_sequence = ((struct fw_msg_header *)cmd)->header.bits.msg_fence;
+
+		if (cur_sequence != 0xffffffff) {
+			msvdx_ctx->cur_sequence = cur_sequence;
+		}
+
+		if (cur_cmd_size > cmd_size) {
+			ret = -EINVAL;
+			DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
+				  cmd_size, (unsigned long)cur_cmd_size);
+			goto out;
+		}
+
+		/* Send the message to h/w */
+		ret = psb_mtx_send(dev_priv, cmd);
+		if (ret) {
+			PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
+			goto out;
+		}
+		cmd += cur_cmd_size;
+		cmd_size -= cur_cmd_size;
+		if (cur_cmd_id == MTX_MSGID_HOST_BE_OPP ||
+			cur_cmd_id == MTX_MSGID_DEBLOCK ||
+			cur_cmd_id == MTX_MSGID_INTRA_OOLD) {
+			cmd += (sizeof(struct fw_deblock_msg) - cur_cmd_size);
+			cmd_size -=
+				(sizeof(struct fw_deblock_msg) - cur_cmd_size);
+		}
+	}
+out:
+	PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
+	return ret;
+}
+
+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
+{
+	static struct fw_padding_msg pad_msg;
+	const uint32_t *p_msg = (uint32_t *) msg;
+	uint32_t msg_num, words_free, ridx, widx, buf_size, buf_offset;
+	int ret = 0;
+	union msg_header *header;
+	header = (union msg_header *)msg;
+
+	PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
+
+	/* we need clocks enabled before we touch VEC local ram,
+	 * but fw will take care of the clock after fw is loaded
+	 */
+
+	msg_num = (header->bits.msg_size + 3) / 4;
+
+#if 0
+	{
+		int i;
+		printk(KERN_DEBUG "MSVDX: psb_mtx_send is %dDW\n",
+		       msg_num);
+
+		for (i = 0; i < msg_num; i++)
+			printk(KERN_DEBUG "0x%08x ", p_msg[i]);
+		printk(KERN_DEBUG "\n");
+	}
+#endif
+	buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
+
+	if (msg_num > buf_size) {
+		ret = -EINVAL;
+		DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
+		goto out;
+	}
+
+	ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
+	widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
+
+
+	buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
+	/*0x2000 is VEC Local Ram offset*/
+	buf_offset =
+		(PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 0x2000;
+
+	/* message would wrap, need to send a pad message */
+	if (widx + msg_num > buf_size) {
+		/* Shouldn't happen for a PAD message itself */
+		if (header->bits.msg_type == MTX_MSGID_PADDING)
+			DRM_INFO("MSVDX WARNING: should not wrap pad msg, "
+				"buf_size is %d, widx is %d, msg_num is %d.\n",
+				buf_size, widx, msg_num);
+
+		/* if the read pointer is at zero then we must wait for it to
+		 * change otherwise the write pointer will equal the read
+		 * pointer,which should only happen when the buffer is empty
+		 *
+		 * This will only happens if we try to overfill the queue,
+		 * queue management should make
+		 * sure this never happens in the first place.
+		 */
+		if (0 == ridx) {
+			ret = -EINVAL;
+			DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
+			goto out;
+		}
+
+		/* Send a pad message */
+		pad_msg.header.bits.msg_size = (buf_size - widx) << 2;
+		pad_msg.header.bits.msg_type = MTX_MSGID_PADDING;
+		psb_mtx_send(dev_priv, (void *)&pad_msg);
+		widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
+	}
+
+	if (widx >= ridx)
+		words_free = buf_size - (widx - ridx) - 1;
+	else
+		words_free = ridx - widx - 1;
+
+	if (msg_num > words_free) {
+		ret = -EINVAL;
+		DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
+		goto out;
+	}
+	while (msg_num > 0) {
+		PSB_WMSVDX32(*p_msg++, buf_offset + (widx << 2));
+		msg_num--;
+		widx++;
+		if (buf_size == widx)
+			widx = 0;
+	}
+
+	PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+
+	/* Make sure clocks are enabled before we kick
+	 * but fw will take care of the clock after fw is loaded
+	 */
+
+	/* signal an interrupt to let the mtx know there is a new message */
+	PSB_WMSVDX32(1, MTX_KICK_INPUT_OFFSET);
+
+out:
+	return ret;
+}
+
+/*
+ * MSVDX MTX interrupt
+ */
+
+static void psb_msvdx_mtx_interrupt(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	static uint32_t buf[128]; /* message buffer */
+	uint32_t ridx, widx, buf_size, buf_offset;
+	uint32_t num, ofs; /* message num and offset */
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int i;
+	union msg_header *header;
+	int cmd_complete = 0;
+	struct psb_msvdx_ec_ctx *msvdx_ec_ctx = NULL;
+
+	PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
+
+	/* we need clocks enabled before we touch VEC local ram,
+	 * but fw will take care of the clock after fw is loaded
+	 */
+
+loop: /* just for coding style check */
+	ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
+	widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
+
+	/* Get out of here if nothing */
+	if (ridx == widx)
+		goto done;
+
+	buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) & ((1 << 16) - 1);
+	/*0x2000 is VEC Local Ram offset*/
+	buf_offset =
+		(PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16) + 0x2000;
+
+	ofs = 0;
+	buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
+	header = (union msg_header *)buf;
+
+	/* round to nearest word */
+	num = (header->bits.msg_size + 3) / 4;
+
+	/* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
+
+	if (++ridx >= buf_size)
+		ridx = 0;
+
+	for (ofs++; ofs < num; ofs++) {
+		buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
+
+		if (++ridx >= buf_size)
+			ridx = 0;
+	}
+
+	/* Update the Read index */
+	PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
+
+	if (msvdx_priv->msvdx_needs_reset)
+		goto loop;
+
+	switch (header->bits.msg_type) {
+	case MTX_MSGID_HW_PANIC: {
+		/* For VXD385 firmware, fence value is not validate here */
+		uint32_t diff = 0;
+		uint32_t fence, last_mb;
+		drm_psb_msvdx_frame_info_t *failed_frame = NULL;
+
+		struct fw_panic_msg *panic_msg = (struct fw_panic_msg *)buf;
+
+		cmd_complete = 1;
+
+		PSB_DEBUG_WARN("MSVDX: MSGID_CMD_HW_PANIC:"
+				  "Fault detected"
+				  " - Fence: %08x"
+				  " - fe_status mb: %08x"
+				  " - be_status mb: %08x"
+				  " - reserved2: %08x"
+				  " - last mb: %08x"
+				  " - resetting and ignoring error\n",
+				  panic_msg->header.bits.msg_fence,
+				  panic_msg->fe_status,
+				  panic_msg->be_status,
+				  panic_msg->mb.bits.reserved2,
+				  panic_msg->mb.bits.last_mb);
+		/* If bit 8 of MSVDX_INTERRUPT_STATUS is set the fault was caused in the DMAC.
+		 * In this case you should check bits 20:22 of MSVDX_INTERRUPT_STATUS.
+		 * If bit 20 is set there was a problem DMAing the buffer back to host.
+		 * If bit 22 is set you'll need to get the value of MSVDX_DMAC_STREAM_STATUS (0x648).
+		 * If bit 1 is set then there was an issue DMAing the bitstream or termination code for parsing.*/
+		PSB_DEBUG_MSVDX("MSVDX: MSVDX_COMMS_ERROR_TRIG is 0x%x,"
+				"MSVDX_INTERRUPT_STATUS is 0x%x,"
+				"MSVDX_MMU_STATUS is 0x%x,"
+				"MSVDX_DMAC_STREAM_STATUS is 0x%x.\n",
+				PSB_RMSVDX32(MSVDX_COMMS_ERROR_TRIG),
+				PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS_OFFSET),
+				PSB_RMSVDX32(MSVDX_MMU_STATUS_OFFSET),
+				PSB_RMSVDX32(MSVDX_DMAC_STREAM_STATUS_OFFSET));
+
+		fence = panic_msg->header.bits.msg_fence;
+		last_mb = panic_msg->mb.bits.last_mb;
+
+		if (msvdx_priv->fw_loaded_by_punit)
+			msvdx_priv->msvdx_needs_reset |= MSVDX_RESET_NEEDS_REUPLOAD_FW |
+				MSVDX_RESET_NEEDS_INIT_FW;
+		else
+			msvdx_priv->msvdx_needs_reset = 1;
+
+		diff = msvdx_priv->msvdx_current_sequence
+		       - dev_priv->sequence[PSB_ENGINE_DECODE];
+
+		if (diff > 0x0FFFFFFF)
+			msvdx_priv->msvdx_current_sequence++;
+
+		PSB_DEBUG_WARN("MSVDX: Fence ID missing, "
+				  "assuming %08x\n",
+				  msvdx_priv->msvdx_current_sequence);
+
+		psb_fence_error(dev, PSB_ENGINE_DECODE,
+				msvdx_priv->msvdx_current_sequence,
+				_PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
+
+		/* Flush the command queue */
+		psb_msvdx_flush_cmd_queue(dev);
+		if (msvdx_priv->host_be_opp_enabled) {
+			/*get the frame_info struct for error concealment frame*/
+			for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+				/*by default the fence is 0, so there is problem here???*/
+				if (msvdx_priv->frame_info[i].fence == fence) {
+					failed_frame = &msvdx_priv->frame_info[i];
+					break;
+				}
+			}
+			if (!failed_frame) {
+				DRM_ERROR("MSVDX: didn't find frame_info which matched the fence %d in failed/panic message\n", fence);
+				goto done;
+			}
+
+			failed_frame->fw_status = 1; /* set ERROR flag */
+		}
+		msvdx_priv->decoding_err = 1;
+
+		goto done;
+	}
+
+	case MTX_MSGID_COMPLETED: {
+		uint32_t fence, flags;
+		struct fw_completed_msg *completed_msg =
+					(struct fw_completed_msg *)buf;
+		struct psb_video_ctx *msvdx_ctx;
+
+		PSB_DEBUG_GENERAL("MSVDX: MSGID_CMD_COMPLETED:"
+			" - Fence: %08x - flags: %08x - vdebcr: %08x"
+			" - first_mb : %d - last_mb: %d\n",
+			completed_msg->header.bits.msg_fence,
+			completed_msg->flags, completed_msg->vdebcr,
+			completed_msg->mb.bits.start_mb, completed_msg->mb.bits.last_mb);
+
+		cmd_complete = 1;
+
+		flags = completed_msg->flags;
+		fence = completed_msg->header.bits.msg_fence;
+
+		msvdx_priv->msvdx_current_sequence = fence;
+
+		if (IS_MRFLD(dev))
+			psb_msvdx_fw_error_detected(dev, fence, flags);
+
+		psb_fence_handler(dev, PSB_ENGINE_DECODE);
+
+		msvdx_ctx = psb_msvdx_find_ctx(dev_priv, fence);
+		if (unlikely(msvdx_ctx == NULL)) {
+			PSB_DEBUG_GENERAL("abnormal complete msg\n");
+			cmd_complete = 0;
+		}
+		else {
+#ifdef CONFIG_SLICE_HEADER_PARSING
+			msvdx_ctx->frame_end= psb_msvdx_protected_frame_finished(dev_priv, msvdx_ctx, fence);
+#endif
+		}
+
+		if (flags & FW_VA_RENDER_HOST_INT) {
+			/*Now send the next command from the msvdx cmd queue */
+#ifndef CONFIG_DRM_VXD_BYT
+			if (!(IS_MRFLD(dev)) ||
+				drm_msvdx_pmpolicy == PSB_PMPOLICY_NOPM)
+#endif
+				psb_msvdx_dequeue_send(dev);
+			goto done;
+		}
+
+		break;
+	}
+
+	case MTX_MSGID_CONTIGUITY_WARNING: {
+		drm_psb_msvdx_decode_status_t *fault_region = NULL;
+		uint32_t reg_idx, fence, start, end;
+		int found = 0;
+
+		struct fw_contiguity_msg *contiguity_msg =
+					(struct fw_contiguity_msg *)buf;
+
+		PSB_DEBUG_GENERAL("MSVDX: MSGID_CONTIGUITY_WARNING:");
+		PSB_DEBUG_GENERAL(
+			"- Fence: %08x - end_mb: %08x - begin_mb: %08x\n",
+			contiguity_msg->header.bits.msg_fence,
+			contiguity_msg->mb.bits.end_mb_num,
+			contiguity_msg->mb.bits.begin_mb_num);
+
+		/*get erro info*/
+		fence = contiguity_msg->header.bits.msg_fence;
+		start = contiguity_msg->mb.bits.begin_mb_num;
+		end = contiguity_msg->mb.bits.end_mb_num;
+
+		/*get the frame_info struct for error concealment frame*/
+		for (i = 0; i < PSB_MAX_EC_INSTANCE; i++)
+			if (msvdx_priv->msvdx_ec_ctx[i]->fence ==
+							(fence & (~0xf))) {
+				msvdx_ec_ctx = msvdx_priv->msvdx_ec_ctx[i];
+				found++;
+			}
+		/* psb_msvdx_mtx_message_dump(dev); */
+		if (!msvdx_ec_ctx || !(msvdx_ec_ctx->tfile) || found > 1) {
+			PSB_DEBUG_MSVDX(
+				"no matched ctx: fence 0x%x, found %d, ctx %p\n",
+				fence, found, msvdx_ec_ctx);
+			goto done;
+		}
+
+
+		fault_region = &msvdx_ec_ctx->decode_status;
+		if (start > end)
+			start = end;
+		if (start < PSB_MSVDX_EC_ROLLBACK)
+			start = 0;
+		else
+			start -= PSB_MSVDX_EC_ROLLBACK;
+
+		if (fault_region->num_region) {
+			reg_idx = fault_region->num_region - 1;
+			if ((start <= fault_region->mb_regions[reg_idx].end) &&
+			    (end > fault_region->mb_regions[reg_idx].end)) {
+				fault_region->mb_regions[reg_idx].end = end;
+				if (msvdx_ec_ctx->cur_frame_info) {
+					msvdx_ec_ctx->cur_frame_info->decode_status.mb_regions[reg_idx].end = end;
+				}
+			}
+			else {
+				reg_idx = fault_region->num_region++;
+				if (unlikely(reg_idx >=
+					MAX_SLICES_PER_PICTURE)) {
+					PSB_DEBUG_MSVDX(
+						"too many fault regions\n");
+					break;
+				}
+				fault_region->mb_regions[reg_idx].start = start;
+				fault_region->mb_regions[reg_idx].end = end;
+				if (msvdx_ec_ctx->cur_frame_info) {
+					msvdx_ec_ctx->cur_frame_info->decode_status.num_region = fault_region->num_region;
+					msvdx_ec_ctx->cur_frame_info->decode_status.mb_regions[reg_idx].start = start;
+					msvdx_ec_ctx->cur_frame_info->decode_status.mb_regions[reg_idx].end = end;
+				}
+			}
+		} else {
+			fault_region->num_region++;
+			fault_region->mb_regions[0].start = start;
+			fault_region->mb_regions[0].end = end;
+			if (msvdx_ec_ctx->cur_frame_info) {
+				msvdx_ec_ctx->cur_frame_info->decode_status.num_region = fault_region->num_region;
+				msvdx_ec_ctx->cur_frame_info->decode_status.mb_regions[0].start = start;
+				msvdx_ec_ctx->cur_frame_info->decode_status.mb_regions[0].end = end;
+			}
+		}
+
+		break;
+
+	}
+
+	case MTX_MSGID_DEBLOCK_REQUIRED: {
+		struct fw_deblock_required_msg *deblock_required_msg =
+			(struct fw_deblock_required_msg *)buf;
+		uint32_t fence;
+		int found = 0;
+
+		fence = deblock_required_msg->header.bits.msg_fence;
+		PSB_DEBUG_GENERAL(
+		    "MSVDX: MTX_MSGID_DEBLOCK_REQUIRED Fence=%08x\n", fence);
+
+		PSB_DEBUG_MSVDX("Get deblock required msg for ec\n");
+		for (i = 0; i < PSB_MAX_EC_INSTANCE; i++)
+			if (msvdx_priv->msvdx_ec_ctx[i]->fence
+						== (fence & (~0xf))) {
+				msvdx_ec_ctx =
+					msvdx_priv->msvdx_ec_ctx[i];
+				found++;
+			}
+		/* if found > 1, fence wrapping happens */
+		if (!msvdx_ec_ctx ||
+		    !(msvdx_ec_ctx->tfile) || found > 1) {
+			PSB_DEBUG_MSVDX(
+				"no matched ctx: fence 0x%x, found %d, ctx %p\n",
+				fence, found, msvdx_ec_ctx);
+			PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE_OFFSET + MSVDX_CMDS_BASE);
+			PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE_OFFSET + MSVDX_CMDS_BASE);
+			goto done;
+		}
+
+		msvdx_ec_ctx->cur_frame_info->fw_status = 1;
+		msvdx_priv->cur_msvdx_ec_ctx = msvdx_ec_ctx;
+
+		/*do error concealment with hw*/
+		schedule_work(&msvdx_priv->ec_work);
+		break;
+	}
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	/* extract done msg didn't return the msg id, which is not reasonable */
+	case MTX_MSGID_SLICE_HEADER_EXTRACT_DONE: {
+		struct fw_slice_header_extract_done_msg *extract_done_msg =
+			(struct fw_slice_header_extract_done_msg *)buf;
+
+		PSB_DEBUG_GENERAL("MSVDX:FW_VA_NALU_EXTRACT_DONE: "
+			"extract msg size: %d, extract msg type is: %d.\n",
+			extract_done_msg->header.bits.msg_size,
+			extract_done_msg->header.bits.msg_type);
+		break;
+	}
+#endif
+	default:
+		DRM_ERROR("ERROR: msvdx Unknown message from MTX, ID:0x%08x\n", header->bits.msg_type);
+		goto done;
+	}
+
+done:
+	PSB_DEBUG_GENERAL("MSVDX Interrupt: finish process a message\n");
+	if (ridx != widx) {
+		PSB_DEBUG_GENERAL("MSVDX Interrupt: there are more message to be read\n");
+		goto loop;
+	}
+
+	if (drm_msvdx_pmpolicy == PSB_PMPOLICY_NOPM ||
+			(IS_MDFLD(dev) && (msvdx_priv->msvdx_busy)) ||
+			(IS_MRFLD(dev) && !cmd_complete)) {
+		DRM_MEMORYBARRIER();	/* TBD check this... */
+		return;
+	}
+
+	/* we get a frame/slice done, try to save some power */
+	PSB_DEBUG_PM("MSVDX: schedule bottom half to\n"
+		"suspend msvdx, current sequence is 0x%x.\n",
+		msvdx_priv->msvdx_current_sequence);
+
+	if (drm_msvdx_bottom_half == PSB_BOTTOM_HALF_WQ)
+		schedule_delayed_work(
+			&msvdx_priv->msvdx_suspend_wq, 0);
+	else if (drm_msvdx_bottom_half == PSB_BOTTOM_HALF_TQ)
+		tasklet_hi_schedule(&msvdx_priv->msvdx_tasklet);
+	else
+		PSB_DEBUG_PM("MSVDX: Unknown Bottom Half\n");
+
+	DRM_MEMORYBARRIER();	/* TBD check this... */
+}
+
+/*
+ * MSVDX interrupt.
+ */
+int psb_msvdx_interrupt(void *pvData)
+{
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct msvdx_private *msvdx_priv;
+	uint32_t msvdx_stat;
+	struct saved_history_record *precord = NULL;
+
+	if (pvData == NULL) {
+		DRM_ERROR("ERROR: msvdx %s, Invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dev = (struct drm_device *)pvData;
+
+	dev_priv = psb_priv(dev);
+
+	msvdx_priv = dev_priv->msvdx_private;
+#ifndef CONFIG_DRM_VXD_BYT
+	msvdx_priv->msvdx_hw_busy = REG_READ(0x20D0) & (0x1 << 9);
+#endif
+	msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS_OFFSET);
+
+	precord = get_new_history_record();
+	if (precord) {
+		precord->type = 4;
+		precord->record_value.msvdx_stat = msvdx_stat;
+	}
+
+	/* driver only needs to handle mtx irq
+	 * For MMU fault irq, there's always a HW PANIC generated
+	 * if HW/FW is totally hang, the lockup function will handle
+	 * the reseting
+	 */
+	if (msvdx_stat & MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK) {
+		/*Ideally we should we should never get to this */
+		PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x\n", msvdx_stat);
+
+		/* Pause MMU */
+		PSB_WMSVDX32(MSVDX_MMU_CONTROL0_MMU_PAUSE_MASK,
+			     MSVDX_MMU_CONTROL0_OFFSET);
+		DRM_WRITEMEMORYBARRIER();
+
+		/* Clear this interupt bit only */
+		PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK,
+			     MSVDX_INTERRUPT_CLEAR_OFFSET);
+		PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR_OFFSET);
+		DRM_READMEMORYBARRIER();
+
+		msvdx_priv->msvdx_needs_reset = 1;
+	} else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_MTX_IRQ_MASK) {
+		PSB_DEBUG_IRQ
+			("MSVDX: msvdx_stat: 0x%x(MTX)\n", msvdx_stat);
+
+		/* Clear all interupt bits */
+		if (msvdx_priv->fw_loaded_by_punit)
+			PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_MTX_IRQ_MASK,
+				     MSVDX_INTERRUPT_CLEAR_OFFSET);
+		else
+			PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR_OFFSET);
+
+		PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR_OFFSET);
+		DRM_READMEMORYBARRIER();
+
+		psb_msvdx_mtx_interrupt(dev);
+	}
+
+	return 0;
+}
+
+#if 0
+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
+		      int *msvdx_lockup, int *msvdx_idle)
+{
+	int diff;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	*msvdx_lockup = 0;
+	*msvdx_idle = 1;
+
+	PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
+			  "last_sequence:%d and last_submitted_sequence :%d\n",
+			  msvdx_priv->msvdx_current_sequence,
+			  msvdx_priv->msvdx_last_sequence,
+			  dev_priv->sequence[PSB_ENGINE_DECODE]);
+
+	diff = msvdx_priv->msvdx_current_sequence -
+	       dev_priv->sequence[PSB_ENGINE_DECODE];
+
+	if (diff > 0x0FFFFFFF) {
+		if (msvdx_priv->msvdx_current_sequence ==
+		    msvdx_priv->msvdx_last_sequence) {
+			DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
+				  msvdx_priv->msvdx_current_sequence);
+			*msvdx_lockup = 1;
+		} else {
+			PSB_DEBUG_GENERAL("MSVDXTimer: "
+					  "msvdx responded fine so far\n");
+			msvdx_priv->msvdx_last_sequence =
+				msvdx_priv->msvdx_current_sequence;
+			*msvdx_idle = 0;
+		}
+	}
+}
+#endif
+
+int psb_check_msvdx_idle(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	uint32_t loop, ret;
+
+	if (msvdx_priv->fw_loaded_by_punit && msvdx_priv->rendec_init == 0)
+		return 0;
+
+	if (!msvdx_priv->fw_loaded_by_punit && msvdx_priv->msvdx_fw_loaded == 0)
+		return 0;
+
+	if (msvdx_priv->msvdx_busy) {
+		PSB_DEBUG_PM("MSVDX: msvdx_busy was set, return busy.\n");
+		return -EBUSY;
+	}
+
+	if (msvdx_priv->fw_loaded_by_punit) {
+		if (!(PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS) &
+					MSVDX_FW_STATUS_HW_IDLE)) {
+			PSB_DEBUG_PM("MSVDX_COMMS_SIGNATURE reg is 0x%x,\n"
+				"MSVDX_COMMS_FW_STATUS reg is 0x%x,\n"
+				"indicate hw is busy.\n",
+				PSB_RMSVDX32(MSVDX_COMMS_SIGNATURE),
+				PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS));
+			return -EBUSY;
+		}
+	}
+
+	/* on some cores below 50502, there is one instance that
+	 * read requests may not go to zero is in the case of a page fault,
+	 * check core revision by reg MSVDX_CORE_REV, 385 core is 0x20001
+	 * check if mmu page fault happend by reg MSVDX_INTERRUPT_STATUS,
+	 * check was it a page table rather than a protection fault
+	 * by reg MSVDX_MMU_STATUS, for such case,
+	 * need call psb_msvdx_core_reset as the work around */
+	if ((PSB_RMSVDX32(MSVDX_CORE_REV_OFFSET) < 0x00050502) &&
+		(PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS_OFFSET)
+			& MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK) &&
+		(PSB_RMSVDX32(MSVDX_MMU_STATUS_OFFSET) & 1)) {
+		PSB_DEBUG_WARN("mmu page fault, recover by core_reset.\n");
+		return 0;
+	}
+
+	/* check MSVDX_MMU_MEM_REQ to confirm there's no memory requests */
+	for (loop = 0; loop < 10; loop++)
+		ret = psb_wait_for_register(dev_priv,
+					MSVDX_MMU_MEM_REQ_OFFSET,
+					0, 0xff, 100, 1);
+	if (ret) {
+		PSB_DEBUG_WARN("MSVDX: MSVDX_MMU_MEM_REQ reg is 0x%x,\n"
+				"indicate mem busy, prevent power off vxd,"
+				"MSVDX_COMMS_FW_STATUS reg is 0x%x,"
+				"MSVDX_COMMS_ERROR_TRIG reg is 0x%x,",
+				PSB_RMSVDX32(MSVDX_MMU_MEM_REQ_OFFSET),
+				PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS),
+				PSB_RMSVDX32(MSVDX_COMMS_ERROR_TRIG));
+#ifdef CONFIG_MDFD_GL3
+		PSB_DEBUG_WARN("WARN: gl3 state is %d, 0 is off, 1 is on,\n"
+				"gl3 MDFLD_GCL_CR_CTL2 reg is 0x%x,"
+				"gl3 MDFLD_GCL_ERR_ADDR reg is 0x%x,"
+				"gl3 MDFLD_GCL_ERR_STATUS reg is 0x%x,"
+				"gl3 MDFLD_GCL_CR_ECO reg is 0x%x,"
+				"gl3 MDFLD_GL3_CONTROL reg is 0x%x,"
+				"gl3 MDFLD_GL3_USE_WRT_INVAL reg is 0x%x,"
+				"gl3 MDFLD_GL3_STATUS reg is 0x%x.\n",
+				psb_get_power_state(OSPM_GL3_CACHE_ISLAND),
+				MDFLD_GL3_READ(MDFLD_GCL_CR_CTL2),
+				MDFLD_GL3_READ(MDFLD_GCL_ERR_ADDR),
+				MDFLD_GL3_READ(MDFLD_GCL_ERR_STATUS),
+				MDFLD_GL3_READ(MDFLD_GCL_CR_ECO),
+				MDFLD_GL3_READ(MDFLD_GL3_CONTROL),
+				MDFLD_GL3_READ(MDFLD_GL3_USE_WRT_INVAL),
+				MDFLD_GL3_READ(MDFLD_GL3_STATUS));
+#endif
+		return -EBUSY;
+	}
+	/*
+		if (msvdx_priv->msvdx_hw_busy) {
+			PSB_DEBUG_PM("MSVDX: %s, HW is busy\n", __func__);
+			return -EBUSY;
+		}
+	*/
+	return 0;
+}
+
+int psb_msvdx_save_context(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int offset;
+	int need_sw_reset;
+
+	need_sw_reset = msvdx_priv->msvdx_needs_reset &
+			MSVDX_RESET_NEEDS_REUPLOAD_FW;
+
+	if (msvdx_priv->fw_loaded_by_punit)
+		msvdx_priv->msvdx_needs_reset = MSVDX_RESET_NEEDS_INIT_FW;
+	else
+		msvdx_priv->msvdx_needs_reset = 1;
+
+#ifdef CONFIG_VIDEO_MRFLD_EC
+	/* we should restore the state, if we power down/up during EC */
+	for (offset = 0; offset < 4; ++offset)
+		msvdx_priv->vec_ec_mem_data[offset] =
+			PSB_RMSVDX32(0x2cb0 + offset * 4);
+
+	msvdx_priv->vec_ec_mem_data[4] =
+		PSB_RMSVDX32(0x2cc4);
+
+	msvdx_priv->vec_ec_mem_saved = 1;
+	PSB_DEBUG_MSVDX("ec last mb %d %d %d %d\n", msvdx_priv->vec_ec_mem_data[0],
+				msvdx_priv->vec_ec_mem_data[1],
+				msvdx_priv->vec_ec_mem_data[2],
+				msvdx_priv->vec_ec_mem_data[3]);
+	PSB_DEBUG_MSVDX("ec error state %d\n", msvdx_priv->vec_ec_mem_data[4]);
+#endif
+
+	if (need_sw_reset) {
+		PSB_DEBUG_WARN("msvdx run into wrong state, soft reset msvdx before power down\n");
+		PSB_WMSVDX32(MTX_SOFT_RESET_MTXRESET, MTX_SOFT_RESET_OFFSET);
+
+		if (psb_msvdx_core_reset(dev_priv))
+			PSB_DEBUG_WARN("failed to call psb_msvdx_core_reset.\n");
+
+		if (msvdx_priv->fw_loaded_by_punit) {
+			PSB_WMSVDX32(0, MTX_ENABLE_OFFSET);
+			psb_msvdx_mtx_set_clocks(dev_priv->dev, 0);
+		}
+	}
+
+	return 0;
+}
+
+int psb_msvdx_restore_context(struct drm_device *dev)
+{
+	return 0;
+}
+
+void psb_msvdx_check_reset_fw(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+
+	/* handling fw upload here if required */
+	/* power off first, then hw_begin will power up/upload FW correctly */
+	if (msvdx_priv->msvdx_needs_reset & MSVDX_RESET_NEEDS_REUPLOAD_FW) {
+		spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+		PSB_DEBUG_PM("MSVDX: force to power off msvdx due to decoding error.\n");
+		ospm_apm_power_down_msvdx(dev, 1);
+		spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
+		msvdx_priv->msvdx_needs_reset &= ~MSVDX_RESET_NEEDS_REUPLOAD_FW;
+	}
+	spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
+}
+
+static void psb_msvdx_set_tile(struct drm_device *dev, unsigned long msvdx_tile)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	uint32_t cmd, msvdx_stride;
+	uint32_t start = msvdx_priv->tile_region_start0;
+	uint32_t end = msvdx_priv->tile_region_end0;
+	msvdx_stride = (msvdx_tile & 0xf);
+	/* Enable memory tiling */
+	cmd = ((start >> 20) + (((end >> 20) - 1) << 12) +
+				((0x8 | (msvdx_stride - 1)) << 24));
+	if (msvdx_stride) {
+		PSB_DEBUG_GENERAL("MSVDX: MMU Tiling register0 %08x\n", cmd);
+		PSB_DEBUG_GENERAL("       Region 0x%08x-0x%08x\n",
+					start, end);
+		PSB_WMSVDX32(cmd, MSVDX_MMU_TILE_BASE0_OFFSET);
+	}
+
+	start = msvdx_priv->tile_region_start1;
+	end = msvdx_priv->tile_region_end1;
+
+	msvdx_stride = (msvdx_tile >> 4);
+	/* Enable memory tiling */
+	PSB_WMSVDX32(0, MSVDX_MMU_TILE_BASE1_OFFSET);
+	cmd = ((start >> 20) + (((end >> 20) - 1) << 12) +
+				((0x8 | (msvdx_stride - 1)) << 24));
+	if (msvdx_stride) {
+		PSB_DEBUG_GENERAL("MSVDX: MMU Tiling register1 %08x\n", cmd);
+		PSB_DEBUG_GENERAL("       Region 0x%08x-0x%08x\n",
+					start, end);
+		PSB_WMSVDX32(cmd, MSVDX_MMU_TILE_BASE1_OFFSET);
+	}
+}
+
+void psb_powerdown_msvdx(struct work_struct *work)
+{
+	struct msvdx_private *msvdx_priv =
+		container_of(work, struct msvdx_private, msvdx_suspend_wq.work);
+
+	PSB_DEBUG_PM("MSVDX: work queue is scheduled to power off msvdx.\n");
+	ospm_apm_power_down_msvdx(msvdx_priv->dev, 0);
+}
+
+void msvdx_powerdown_tasklet(unsigned long data)
+{
+	struct drm_device *dev = (struct drm_device *)data;
+
+	PSB_DEBUG_PM("MSVDX: tasklet is scheduled to power off msvdx.\n");
+	ospm_apm_power_down_msvdx(dev, 0);
+}
+
+void psb_msvdx_mtx_set_clocks(struct drm_device *dev, uint32_t clock_state)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t old_clock_state = 0;
+	/* PSB_DEBUG_MSVDX("SetClocks to %x.\n", clock_state); */
+	old_clock_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE_OFFSET);
+	if (old_clock_state == clock_state)
+		return;
+
+	if (clock_state == 0) {
+		/* Turn off clocks procedure */
+		if (old_clock_state) {
+			/* Turn off all the clocks except core */
+			PSB_WMSVDX32(
+				MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+				MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+			/* Make sure all the clocks are off except core */
+			psb_wait_for_register(dev_priv,
+				MSVDX_MAN_CLK_ENABLE_OFFSET,
+				MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+				0xffffffff, 2000000, 5);
+
+			/* Turn off core clock */
+			PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE_OFFSET);
+		}
+	} else {
+		uint32_t clocks_en = clock_state;
+
+		/*Make sure that core clock is not accidentally turned off */
+		clocks_en |= MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK;
+
+		/* If all clocks were disable do the bring up procedure */
+		if (old_clock_state == 0) {
+			/* turn on core clock */
+			PSB_WMSVDX32(
+				MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+				MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+			/* Make sure core clock is on */
+			psb_wait_for_register(dev_priv,
+				MSVDX_MAN_CLK_ENABLE_OFFSET,
+				MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+				0xffffffff, 2000000, 5);
+
+			/* turn on the other clocks as well */
+			PSB_WMSVDX32(clocks_en, MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+			/* Make sure that all they are on */
+			psb_wait_for_register(dev_priv,
+					MSVDX_MAN_CLK_ENABLE_OFFSET,
+					clocks_en, 0xffffffff, 2000000, 5);
+		} else {
+			PSB_WMSVDX32(clocks_en, MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+			/* Make sure that they are on */
+			psb_wait_for_register(dev_priv,
+					MSVDX_MAN_CLK_ENABLE_OFFSET,
+					clocks_en, 0xffffffff, 2000000, 5);
+		}
+	}
+}
+
+void psb_msvdx_clearirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	unsigned long mtx_int = 0;
+
+	/* Clear MTX interrupt */
+	REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, MTX_IRQ,
+			       1);
+	PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR_OFFSET);
+}
+
+/* following two functions also works for CLV and MFLD */
+/* PSB_INT_ENABLE_R is set in psb_irq_(un)install_islands */
+void psb_msvdx_disableirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	/*uint32_t ier = dev_priv->vdc_irq_mask & (~_PSB_IRQ_MSVDX_FLAG); */
+
+	unsigned long enables = 0;
+
+	REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, MTX_IRQ,
+			       0);
+	PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+	/* write in sysirq.c */
+	/* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
+}
+
+void psb_msvdx_enableirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	/* uint32_t ier = dev_priv->vdc_irq_mask | _PSB_IRQ_MSVDX_FLAG; */
+	unsigned long enables = 0;
+
+	/* Only enable the master core IRQ*/
+	REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, MTX_IRQ,
+			       1);
+	PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+	/* write in sysirq.c */
+	/* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
+}
+
+#ifdef CONFIG_SLICE_HEADER_PARSING
+int psb_allocate_term_buf(struct drm_device *dev,
+			    struct ttm_buffer_object **term_buf,
+			    uint32_t *base_addr, unsigned long size)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	int ret;
+	struct ttm_bo_kmap_obj tmp_kmap;
+	bool is_iomem;
+	unsigned char *addr;
+	const unsigned char term_string[] = {
+		0x0, 0x0, 0x1, 0xff, 0x65, 0x6e, 0x64, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+		0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+	PSB_DEBUG_INIT("MSVDX: allocate termination buffer.\n");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(bdev, size,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
+				       NULL, term_buf);
+#else
+	ret = ttm_buffer_object_create(bdev, size,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT, 0, 0,
+				       NULL, term_buf);
+#endif
+	if (ret) {
+		DRM_ERROR("MSVDX:failed to allocate termination buffer.\n");
+		*term_buf = NULL;
+		return 1;
+	}
+
+	ret = ttm_bo_kmap(*term_buf, 0, (*term_buf)->num_pages, &tmp_kmap);
+	if (ret) {
+		PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
+		ttm_bo_unref(term_buf);
+		*term_buf = NULL;
+		return 1;
+	}
+
+	addr = (unsigned char *)ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
+	memcpy(addr, term_string, TERMINATION_SIZE);
+	ttm_bo_kunmap(&tmp_kmap);
+
+	*base_addr = (*term_buf)->offset;
+	return 0;
+}
+
+static int psb_msvdx_protected_frame_finished(struct drm_psb_private *dev_priv, struct psb_video_ctx *pos, uint32_t fence)
+{
+	if (unlikely(pos == NULL)) {
+		return 1;
+	}
+
+	if (!pos->slice_extract_flag)
+		return 1;
+
+	PSB_DEBUG_GENERAL("end_frame_seq, 0x%08x, fence = 0x%x\n", pos->frame_end_seq, fence);
+	if (pos->frame_end_seq == (fence & ~0xf))
+		return 1;
+	return 0;
+}
+#endif
+
+static struct psb_video_ctx* psb_msvdx_find_ctx(struct drm_psb_private *dev_priv, uint32_t fence)
+{
+	struct psb_video_ctx *pos = NULL, *n = NULL;
+
+	if (unlikely(dev_priv == NULL)) {
+		return NULL;
+	}
+
+	spin_lock(&dev_priv->video_ctx_lock);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if ((VAEntrypointVLD == (pos->ctx_type & 0xff)) && (pos->cur_sequence == fence)) {
+			spin_unlock(&dev_priv->video_ctx_lock);
+			return pos;
+		}
+	}
+	spin_unlock(&dev_priv->video_ctx_lock);
+
+	return NULL;
+}
+
+static void psb_msvdx_fw_error_detected(struct drm_device *dev, uint32_t fence, uint32_t flags)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	struct psb_msvdx_ec_ctx *msvdx_ec_ctx = NULL;
+	drm_psb_msvdx_frame_info_t *frame_info = NULL;
+	int found = 0;
+	int i;
+
+	if (!(flags & FW_DEVA_ERROR_DETECTED))
+		return;
+
+	/*get the frame_info struct for error concealment frame*/
+	for (i = 0; i < PSB_MAX_EC_INSTANCE; i++)
+		if (msvdx_priv->msvdx_ec_ctx[i]->fence ==
+						(fence & (~0xf))) {
+			msvdx_ec_ctx = msvdx_priv->msvdx_ec_ctx[i];
+			found++;
+		}
+	/* psb_msvdx_mtx_message_dump(dev); */
+	if (!msvdx_ec_ctx || !(msvdx_ec_ctx->tfile) || found > 1) {
+		PSB_DEBUG_MSVDX(
+			"no matched ctx: fence 0x%x, found %d, ctx %p\n",
+			fence, found, msvdx_ec_ctx);
+		return;
+	}
+
+	if (msvdx_ec_ctx->cur_frame_info &&
+		msvdx_ec_ctx->cur_frame_info->fence == (fence & (~0xf))) {
+		frame_info = msvdx_ec_ctx->cur_frame_info;
+	} else {
+		if (msvdx_ec_ctx->cur_frame_info) {
+			PSB_DEBUG_MSVDX(
+			"cur_frame_info's fence(%x) doesn't match fence (%x)\n",
+				msvdx_ec_ctx->cur_frame_info->fence, fence);
+		} else	{
+			PSB_DEBUG_MSVDX(
+			"The pointer msvdx_ec_ctx->cur_frame_info is null\n");
+		}
+		return;
+	}
+
+	if (frame_info->decode_status.num_region) {
+		PSB_DEBUG_MSVDX( "Error already recorded, no need to recorded again\n");
+		return;
+	}
+
+	PSB_DEBUG_MSVDX( "record error as first fault region\n");
+	frame_info->decode_status.num_region++;
+	frame_info->decode_status.mb_regions[0].start = 0;
+	frame_info->decode_status.mb_regions[0].end = 0;
+
+	/*
+	for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+		if (msvdx_ec_ctx->frame_info[i].fence == (fence & (~0xf))) {
+			break;
+		}
+
+	}
+	*/
+}
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx.h b/drivers/external_drivers/intel_media/video/decode/psb_msvdx.h
new file mode 100644
index 0000000..0dc8fed
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx.h
@@ -0,0 +1,345 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Li Zeng <li.zeng@intel.com>
+ *    Fei Jiang <fei.jiang@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _PSB_MSVDX_H_
+#define _PSB_MSVDX_H_
+
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#include "img_types.h"
+#endif
+
+#include "psb_msvdx_reg.h"
+
+extern int drm_msvdx_pmpolicy;
+extern int drm_msvdx_bottom_half;
+extern int drm_msvdx_delay;
+extern int hdmi_state;
+extern int drm_psb_msvdx_tiling;
+
+#define FIRMWAREID		0x014d42ab
+
+#define GET_MSVDX_FREQUENCY(freq_code)	((1600 * 2)/((freq_code) + 1))
+
+/* psb_mmu.c */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
+
+/* psb_msvdxinit.c */
+int psb_wait_for_register(struct drm_psb_private *dev_priv,
+			  uint32_t offset, uint32_t value, uint32_t enable,
+			  uint32_t poll_cnt, uint32_t timeout);
+int psb_msvdx_init(struct drm_device *dev);
+int psb_msvdx_uninit(struct drm_device *dev);
+int psb_msvdx_core_reset(struct drm_psb_private *dev_priv);
+
+int psb_msvdx_post_init(struct drm_device *dev);
+
+/* TODO: psb_msvdx_reset is used for the case of fw loading by driver
+ * Later we can test if it can be removed. */
+int psb_msvdx_reset(struct drm_psb_private *dev_priv);
+
+int psb_msvdx_post_boot_init(struct drm_device *dev);
+
+/* psb_msvdx.c */
+int psb_msvdx_interrupt(void *pvData);
+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
+#if 0
+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
+		      int *msvdx_lockup, int *msvdx_idle);
+#endif
+int psb_check_msvdx_idle(struct drm_device *dev);
+int psb_wait_msvdx_idle(struct drm_device *dev);
+int psb_cmdbuf_video(struct drm_file *priv,
+		     struct list_head *validate_list,
+		     uint32_t fence_type,
+		     struct drm_psb_cmdbuf_arg *arg,
+		     struct ttm_buffer_object *cmd_buffer,
+		     struct psb_ttm_fence_rep *fence_arg,
+		     struct psb_video_ctx *msvdx_ctx);
+int psb_msvdx_save_context(struct drm_device *dev);
+int psb_msvdx_restore_context(struct drm_device *dev);
+void psb_msvdx_check_reset_fw(struct drm_device *dev);
+void psb_powerdown_msvdx(struct work_struct *work);
+void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
+
+#ifdef CONFIG_SLICE_HEADER_PARSING
+int psb_allocate_term_buf(struct drm_device *dev,
+			    struct ttm_buffer_object **term_buf,
+			    uint32_t *base_addr, unsigned long size);
+#endif
+int32_t msvdx_rendec_init_by_msg(struct drm_device *dev);
+
+/* psb_msvdx_fw.c */
+int32_t psb_msvdx_alloc_fw_bo(struct drm_psb_private *dev_priv);
+int psb_setup_fw(struct drm_device *dev);
+int psb_setup_msvdx(struct drm_device *dev);
+
+/*  Non-Optimal Invalidation is not default */
+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV	2
+
+#define FW_VA_RENDER_HOST_INT		0x00004000
+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION	0x00000020
+#define FW_DEVA_ERROR_DETECTED 0x08000000
+
+/* There is no work currently underway on the hardware */
+#define MSVDX_FW_STATUS_HW_IDLE	0x00000001
+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE	0x00000200
+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
+	(MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV |			\
+		MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION |		\
+		MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
+
+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
+	(MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION |			\
+		MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
+
+#define POULSBO_D0	0x5
+#define POULSBO_D1	0x6
+#define PSB_REVID_OFFSET 0x8
+
+#define MTX_CODE_BASE		(0x80900000)
+#define MTX_DATA_BASE		(0x82880000)
+#define PC_START_ADDRESS	(0x80900000)
+
+#define MTX_CORE_CODE_MEM	(0x10)
+#define MTX_CORE_DATA_MEM	(0x18)
+
+#define RENDEC_A_SIZE	(4 * 1024 * 1024)
+#define RENDEC_B_SIZE	(1024 * 1024)
+
+#define TERMINATION_SIZE	48
+
+#define MSVDX_RESET_NEEDS_REUPLOAD_FW		(0x2)
+#define MSVDX_RESET_NEEDS_INIT_FW		(0x1)
+
+/* HOST_BE_OPP parameters */
+struct HOST_BE_OPP_PARAMS {
+	uint32_t handle;	/* struct ttm_buffer_object * of REGIO */
+	uint32_t buffer_stride;
+	uint32_t buffer_size;
+	uint32_t picture_width_mb;
+	uint32_t size_mb;
+};
+
+typedef struct drm_psb_msvdx_frame_info {
+	uint32_t handle;
+	uint32_t surface_id;
+	uint32_t fence;
+	uint32_t buffer_stride;
+	uint32_t buffer_size;
+	uint32_t picture_width_mb;
+	uint32_t fw_status;
+	uint32_t size_mb;
+	drm_psb_msvdx_decode_status_t decode_status;
+} drm_psb_msvdx_frame_info_t;
+
+#define MAX_DECODE_BUFFERS (24)
+#define PSB_MAX_EC_INSTANCE (4)
+#define PSB_MSVDX_INVALID_FENCE (0xffffffff)
+#define PSB_MSVDX_INVALID_OFFSET (0xffffffff)
+#define PSB_MSVDX_EC_ROLLBACK (9)
+
+struct psb_msvdx_ec_ctx {
+	struct ttm_object_file *tfile; /* protected by cmdbuf_mutex */
+	uint32_t context_id;
+	drm_psb_msvdx_frame_info_t frame_info[MAX_DECODE_BUFFERS];
+	drm_psb_msvdx_frame_info_t *cur_frame_info;
+	int frame_idx;
+
+	/* 12 render msg + 1 deblock msg
+	 * 12 * 20 + 1 * 48 = 288;
+	*/
+	unsigned char unfenced_cmd[300];
+	uint32_t cmd_size;
+	uint32_t deblock_cmd_offset;
+	uint32_t fence;
+	drm_psb_msvdx_decode_status_t decode_status;
+};
+
+#ifdef CONFIG_ION
+struct psb_ion_buffer {
+	struct list_head head;
+	struct dma_buf *psDmaBuf;
+	struct dma_buf_attachment *psAttachment;
+	struct sg_table *sg;
+	int fd;
+};
+#endif
+
+/* MSVDX private structure */
+struct msvdx_private {
+	struct drm_device *dev;
+	int msvdx_needs_reset;
+
+	unsigned int pmstate;
+
+	struct sysfs_dirent *sysfs_pmstate;
+
+	uint32_t msvdx_current_sequence;
+	uint32_t msvdx_last_sequence;
+
+	struct drm_psb_private *dev_priv;
+	/*
+	 *MSVDX Rendec Memory
+	 */
+	struct ttm_buffer_object *ccb0;
+	uint32_t base_addr0;
+	struct ttm_buffer_object *ccb1;
+	uint32_t base_addr1;
+
+	struct ttm_buffer_object *fw;
+	uint32_t is_load;
+	uint32_t mtx_mem_size;
+
+	/*
+	 *MSVDX tile regions
+	*/
+	uint32_t tile_region_start0;
+	uint32_t tile_region_end0;
+	uint32_t tile_region_start1;
+	uint32_t tile_region_end1;
+
+	/*
+	 *msvdx command queue
+	 */
+	spinlock_t msvdx_lock;
+	struct mutex msvdx_mutex;
+	struct list_head msvdx_queue;
+	int msvdx_busy;
+	int rendec_init;
+
+	int msvdx_fw_loaded;
+	void *msvdx_fw;
+	int msvdx_fw_size;
+
+	uint32_t fw_b0_uploaded;
+	uint32_t msvdx_hw_busy;
+
+	uint32_t vec_ec_mem_data[5];
+	uint32_t vec_ec_mem_saved;
+
+	uint32_t psb_dash_access_ctrl;
+	uint32_t decoding_err;
+	uint32_t fw_loaded_by_punit;
+
+	drm_psb_msvdx_frame_info_t frame_info[MAX_DECODE_BUFFERS];
+	drm_psb_msvdx_decode_status_t decode_status;
+	uint32_t host_be_opp_enabled;
+
+	/*work for error concealment*/
+	struct work_struct ec_work;
+	struct ttm_object_file *tfile; /* protected by cmdbuf_mutex */
+	struct psb_msvdx_ec_ctx *msvdx_ec_ctx[PSB_MAX_EC_INSTANCE];
+	struct psb_msvdx_ec_ctx *cur_msvdx_ec_ctx;
+	uint32_t deblock_cmd_offset;
+	int num_cmd;
+
+	struct drm_video_displaying_frameinfo displaying_frame;
+
+	/* pm suspend wq */
+	struct delayed_work msvdx_suspend_wq;
+	struct tasklet_struct msvdx_tasklet;
+
+	/* protected by msvdx_mutex */
+	/* Current video context */
+	struct psb_video_ctx *msvdx_ctx;
+	/* previous vieo context */
+	struct psb_video_ctx *last_msvdx_ctx;
+	uint32_t pm_gating_count;
+
+	struct page *mmu_recover_page;
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	struct ttm_buffer_object *term_buf;
+	uint32_t term_buf_addr;
+#endif
+
+	atomic_t vc1_workaround_ctx;
+
+#ifdef CONFIG_ION
+	struct list_head ion_buffers_list;
+	struct mutex ion_buf_list_lock;
+#endif
+
+
+};
+
+struct psb_msvdx_cmd_queue {
+	struct list_head head;
+	void *cmd;
+	unsigned long cmd_size;
+	uint32_t sequence;
+	uint32_t msvdx_tile;
+	uint32_t host_be_opp_enabled;
+	uint32_t deblock_cmd_offset;
+	struct ttm_object_file *tfile;
+	struct psb_video_ctx *msvdx_ctx;
+	int frame_boundary; /* this command is frame end if true */
+};
+
+#ifdef CONFIG_VIDEO_MRFLD
+struct psb_msvdx_ec_ctx *psb_msvdx_find_ec_ctx(
+			struct msvdx_private *msvdx_priv,
+			struct ttm_object_file *tfile,
+			void *cmd);
+#endif
+
+void psb_msvdx_clearirq(struct drm_device *dev);
+
+void psb_msvdx_disableirq(struct drm_device *dev);
+
+void psb_msvdx_enableirq(struct drm_device *dev);
+
+#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state)		\
+do {									\
+	msvdx_priv->pmstate = new_state;				\
+	if (new_state == PSB_PMSTATE_POWERDOWN)				\
+		msvdx_priv->pm_gating_count++;				\
+	sysfs_notify_dirent(msvdx_priv->sysfs_pmstate);			\
+	PSB_DEBUG_PM("MSVDX: %s, power gating count 0x%08x\n",		\
+		(new_state == PSB_PMSTATE_POWERUP) ? "powerup"		\
+		: ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown"	\
+			: "clockgated"), msvdx_priv->pm_gating_count);	\
+} while (0)
+
+#if 0
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
+#endif
+
+void psb_msvdx_mtx_set_clocks(struct drm_device *dev, uint32_t clock_state);
+
+extern int psb_submit_video_cmdbuf(struct drm_device *dev,
+				   struct ttm_buffer_object *cmd_buffer,
+				   unsigned long cmd_offset,
+				   unsigned long cmd_size,
+				   struct psb_video_ctx *msvdx_ctx,
+				   uint32_t fence_flag);
+
+void msvdx_powerdown_tasklet(unsigned long data);
+int psb_msvdx_dequeue_send(struct drm_device *dev);
+#endif
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx_ec.c b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_ec.c
new file mode 100644
index 0000000..0eec31e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_ec.c
@@ -0,0 +1,533 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Li Zeng <li.zeng@intel.com>
+ *
+ **************************************************************************/
+
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#endif
+
+#include "psb_msvdx.h"
+#include "psb_msvdx_msg.h"
+#include "psb_msvdx_reg.h"
+#include "psb_msvdx_ec.h"
+
+#define MAX_SIZE_IN_MB		(4096 / 16)
+
+static inline int psb_msvdx_cmd_port_write(struct drm_psb_private *dev_priv,
+			uint32_t offset, uint32_t value, uint32_t *cmd_space)
+{
+	uint32_t max_attempts = 0xff;
+	uint32_t attempts = 0;
+
+	max_attempts = 0xff;
+	while (*cmd_space == 0) {
+		*cmd_space = PSB_RMSVDX32(
+			MSVDX_CORE_CR_MSVDX_COMMAND_SPACE_OFFSET +
+			MSVDX_CORE_BASE);
+		if (*cmd_space)
+			break;
+		PSB_UDELAY(2);
+		attempts++;
+		if (attempts > max_attempts) {
+			printk(KERN_ERR "MSVDX: poll cmd space timeout\n");
+			return -1;
+		}
+	}
+
+	PSB_WMSVDX32(value, offset + MSVDX_CMDS_BASE);
+	(*cmd_space)--;
+	/*
+	 *printk(KERN_DEBUG "MSVDX: poll cmd space attempts %d\n", attempts);
+	*/
+	return 0;
+}
+
+#define PSB_CMDPORT_WRITE(_dev_priv_, _offset_, _cmd_, _cmd_space_)	\
+	do {								\
+		ret = psb_msvdx_cmd_port_write(_dev_priv_,		\
+				 _offset_, _cmd_, &_cmd_space_);	\
+		if (ret) {						\
+			printk(KERN_DEBUG "write cmd fail, abort\n");	\
+			goto ec_done;					\
+		}							\
+	} while (0);
+
+#define PSB_CMDPORT_WRITE_FAST(_dev_priv_, _offset_, _cmd_, _cmd_space_)  \
+	psb_msvdx_cmd_port_write(_dev_priv_,				\
+				 _offset_, _cmd_, &_cmd_space_);	\
+
+void psb_msvdx_do_concealment(struct work_struct *work)
+{
+	struct msvdx_private *msvdx_priv =
+		container_of(work, struct msvdx_private, ec_work);
+	struct drm_psb_private *dev_priv = NULL;
+	struct psb_msvdx_ec_ctx *msvdx_ec_ctx = msvdx_priv->cur_msvdx_ec_ctx;
+	drm_psb_msvdx_decode_status_t *fault_region = NULL;
+	struct fw_deblock_msg *deblock_msg =
+		(struct fw_deblock_msg *)(msvdx_ec_ctx->unfenced_cmd +
+		msvdx_ec_ctx->deblock_cmd_offset);
+	uint32_t width_in_mb, height_in_mb, cmd;
+	int conceal_above_row = 0, loop, mb_loop;
+	uint32_t cmd_space = 0;
+	int ret = 0;
+
+#ifdef CONFIG_VIDEO_MRFLD
+	if (!power_island_get(OSPM_VIDEO_DEC_ISLAND)) {
+#else
+	if (!ospm_power_using_video_begin(OSPM_VIDEO_DEC_ISLAND)) {
+#endif
+		printk(KERN_ERR "MSVDX: fail to power on ved for ec\n");
+		return;
+	}
+
+	dev_priv = msvdx_priv->dev_priv;
+	fault_region = &msvdx_ec_ctx->decode_status;
+
+	/* Concealment should be done in time,
+	 * otherwise panic msg will be signaled in msvdx
+	 */
+	preempt_disable();
+
+	if (msvdx_ec_ctx->deblock_cmd_offset == PSB_MSVDX_INVALID_OFFSET) {
+		printk(KERN_ERR "invalid msg offset, abort concealment\n");
+		goto ec_done;
+	}
+
+	if (fault_region->num_region == 0) {
+		PSB_DEBUG_MSVDX("no fault region\n");
+		goto ec_done;
+	}
+
+
+	width_in_mb = deblock_msg->pic_size.bits.pic_width_mb;
+	height_in_mb = deblock_msg->pic_size.bits.frame_height_mb;
+
+	{
+		int i;
+		for (i = 0; i < fault_region->num_region; i++)
+			PSB_DEBUG_MSVDX("[region %d] is %d to %d\n",
+					 i,
+					 fault_region->mb_regions[i].start,
+					 fault_region->mb_regions[i].end);
+		PSB_DEBUG_MSVDX("MSVDX: MSGID_DEBLOCK:"
+			" - fence: %08x"
+			" - flags: %08x - slice_field_type: %08x"
+			" - operating_mode: %08x"
+			" - context: %08x - mmu_ptd: %08x"
+			" - frame_height_mb: %08x - pic_width_mb: %08x"
+			" - address_a0: %08x - address_a1: %08x"
+			" - mb_param_address: %08x"
+			" - ext_stride_a: %08x"
+			" - address_b0: %08x - address_b1: %08x"
+			" - alt_output_flags_b: %08x.\n",
+			deblock_msg->header.bits.msg_fence,
+			deblock_msg->flag_type.bits.flags,
+			deblock_msg->flag_type.bits.slice_field_type,
+			deblock_msg->operating_mode,
+			deblock_msg->mmu_context.bits.context,
+			deblock_msg->mmu_context.bits.mmu_ptd,
+			deblock_msg->pic_size.bits.frame_height_mb,
+			deblock_msg->pic_size.bits.pic_width_mb,
+			deblock_msg->address_a0,
+			deblock_msg->address_a1,
+			deblock_msg->mb_param_address,
+			deblock_msg->ext_stride_a,
+			deblock_msg->address_b0,
+			deblock_msg->address_b1,
+			deblock_msg->alt_output_flags_b);
+		PSB_DEBUG_MSVDX("deblock addr_c0 is	0x%08x\n",
+					deblock_msg->address_c0);
+		PSB_DEBUG_MSVDX("deblock addr_c1 is	0x%08x\n",
+					deblock_msg->address_c1);
+	}
+
+	if (unlikely(!width_in_mb || !height_in_mb ||
+		width_in_mb > MAX_SIZE_IN_MB ||
+		height_in_mb > MAX_SIZE_IN_MB)) {
+		PSB_DEBUG_MSVDX("wrong pic size\n");
+		goto ec_done;
+	}
+
+	cmd = 0;
+	REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+			       DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT,
+			       (height_in_mb * 16) - 1);
+	REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+			       DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH,
+			       (width_in_mb * 16) - 1);
+	PSB_CMDPORT_WRITE(dev_priv,
+				 MSVDX_CMDS_DISPLAY_PICTURE_SIZE_OFFSET,
+				 cmd, cmd_space);
+
+	cmd = 0;
+	REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+			       CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT,
+			       (height_in_mb * 16) - 1);
+	REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+			       CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH,
+			       (width_in_mb * 16) - 1);
+	PSB_CMDPORT_WRITE(dev_priv,
+				 MSVDX_CMDS_CODED_PICTURE_SIZE_OFFSET,
+				 cmd, cmd_space);
+
+	cmd = deblock_msg->operating_mode;
+	REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+			  CHROMA_FORMAT, 1);
+	REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+			  ASYNC_MODE, 1);
+	REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+			  CODEC_MODE, 3);
+	REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+			  CODEC_PROFILE, 1);
+	PSB_CMDPORT_WRITE(dev_priv,
+				 MSVDX_CMDS_OPERATING_MODE_OFFSET,
+				 cmd, cmd_space);
+
+	/* dest frame address */
+	PSB_CMDPORT_WRITE(dev_priv,
+		MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET,
+				 deblock_msg->address_a0,
+				 cmd_space);
+
+	PSB_CMDPORT_WRITE(dev_priv,
+		MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET,
+				 deblock_msg->address_a1,
+				 cmd_space);
+
+	/* conceal frame address */
+	PSB_CMDPORT_WRITE(dev_priv,
+		MSVDX_CMDS_REFERENCE_PICTURE_BASE_ADDRESSES_OFFSET,
+				 deblock_msg->address_b0,
+				 cmd_space);
+	PSB_CMDPORT_WRITE(dev_priv,
+		MSVDX_CMDS_REFERENCE_PICTURE_BASE_ADDRESSES_OFFSET + 4,
+				 deblock_msg->address_b1,
+				 cmd_space);
+	cmd = 0;
+	REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_SLICE_PARAMS, SLICE_FIELD_TYPE, 2);
+	REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_SLICE_PARAMS, SLICE_CODE_TYPE, 1);
+
+	PSB_CMDPORT_WRITE(dev_priv,
+				 MSVDX_CMDS_SLICE_PARAMS_OFFSET,
+				 cmd, cmd_space);
+
+	cmd = deblock_msg->alt_output_flags_b;
+	if ((cmd & 3) != 0) {
+		PSB_DEBUG_MSVDX("MSVDX: conceal to rotate surface\n");
+	} else {
+		PSB_CMDPORT_WRITE(dev_priv,
+			MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET,
+					 cmd, cmd_space);
+
+		PSB_CMDPORT_WRITE(dev_priv,
+			MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET,
+				 0, cmd_space);
+
+		PSB_CMDPORT_WRITE(dev_priv,
+			MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET,
+				 0, cmd_space);
+
+		PSB_CMDPORT_WRITE(dev_priv,
+			MSVDX_CMDS_VC1_RANGE_MAPPING_FLAGS_OFFSET,
+				 0, cmd_space);
+	}
+
+	cmd = deblock_msg->ext_stride_a;
+	PSB_CMDPORT_WRITE(dev_priv,
+			  MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET,
+			  cmd, cmd_space);
+
+	for (loop = 0; loop < fault_region->num_region; loop++) {
+
+		uint32_t start = fault_region->mb_regions[loop].start;
+		uint32_t end = fault_region->mb_regions[loop].end;
+		uint32_t x, y;
+
+		PSB_DEBUG_MSVDX("MSVDX: region(%d) is %d~%d\n",
+			loop, start, end);
+
+		if (conceal_above_row)
+			start -= width_in_mb;
+		if (end > (width_in_mb * height_in_mb - 1))
+			end = (width_in_mb * height_in_mb - 1);
+		if (start > end)
+			start = 0;
+
+		PSB_DEBUG_MSVDX("MSVDX: modify region(%d) is %d~%d\n",
+			loop, start, end);
+
+		x = start % width_in_mb;
+		y = start / width_in_mb;
+
+		for (mb_loop = start; mb_loop <= end; mb_loop++, x++) {
+			if (x >= width_in_mb) {
+				x = 0;
+				y++;
+			}
+
+			/* PSB_DEBUG_MSVDX("MSVDX: concleament (%d,%d)\n",
+				x, y); */
+			if ((x == 0) && (mb_loop != start))
+				PSB_CMDPORT_WRITE_FAST(dev_priv,
+					MSVDX_CMDS_END_SLICE_PICTURE_OFFSET,
+					0, cmd_space);
+			cmd = 0;
+			REGIO_WRITE_FIELD_LITE(cmd,
+					       MSVDX_CMDS_MACROBLOCK_NUMBER,
+					       MB_CODE_TYPE, 1);
+			REGIO_WRITE_FIELD_LITE(cmd,
+					       MSVDX_CMDS_MACROBLOCK_NUMBER,
+					       MB_NO_X, x);
+			REGIO_WRITE_FIELD_LITE(cmd,
+					       MSVDX_CMDS_MACROBLOCK_NUMBER,
+					       MB_NO_Y, y);
+			PSB_CMDPORT_WRITE_FAST(dev_priv,
+				MSVDX_CMDS_MACROBLOCK_NUMBER_OFFSET,
+				cmd, cmd_space);
+			PSB_CMDPORT_WRITE_FAST(dev_priv,
+				MSVDX_CMDS_MACROBLOCK_RESIDUAL_FORMAT_OFFSET,
+				0, cmd_space);
+			cmd = 0;
+			REGIO_WRITE_FIELD_LITE(cmd,
+					MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+					       REF_INDEX_A_VALID, 1);
+			REGIO_WRITE_FIELD_LITE(cmd,
+					MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+					       INTER_PRED_BLOCK_SIZE, 0);
+			REGIO_WRITE_FIELD_LITE(cmd,
+					MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+					       REF_INDEX_A, 0);
+			REGIO_WRITE_FIELD_LITE(cmd,
+				MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+				REF_INDEX_B, 0);
+			PSB_CMDPORT_WRITE_FAST(dev_priv,
+				MSVDX_CMDS_INTER_BLOCK_PREDICTION_OFFSET,
+				cmd, cmd_space);
+			PSB_CMDPORT_WRITE_FAST(dev_priv,
+				MSVDX_CMDS_MOTION_VECTOR_OFFSET,
+				0, cmd_space);
+		}
+
+		PSB_CMDPORT_WRITE(dev_priv,
+			MSVDX_CMDS_END_SLICE_PICTURE_OFFSET,
+			0, cmd_space);
+	}
+
+ec_done:
+	/* try to unblock rendec */
+	ret = PSB_CMDPORT_WRITE_FAST(dev_priv,
+		MSVDX_CMDS_END_SLICE_PICTURE_OFFSET,
+		1, cmd_space);
+
+	fault_region->num_region = 0;
+
+	preempt_enable();
+
+#ifdef CONFIG_VIDEO_MRFLD
+	power_island_put(OSPM_VIDEO_DEC_ISLAND);
+#else
+	ospm_power_using_video_end(OSPM_VIDEO_DEC_ISLAND);
+#endif
+	printk(KERN_DEBUG "MSVDX: EC done, unlock msvdx ret %d\n",
+	       ret);
+
+	return;
+}
+
+struct psb_msvdx_ec_ctx *psb_msvdx_find_ec_ctx(
+			struct msvdx_private *msvdx_priv,
+			struct ttm_object_file *tfile,
+			void *cmd)
+{
+	int i, free_idx;
+	struct psb_msvdx_ec_ctx *ec_ctx = NULL;
+	struct fw_deblock_msg *deblock_msg = (struct fw_deblock_msg *)cmd;
+
+	free_idx = -1;
+	for (i = 0; i < PSB_MAX_EC_INSTANCE; i++) {
+		if (msvdx_priv->msvdx_ec_ctx[i]->tfile == tfile)
+			break;
+		else if (free_idx < 0 &&
+			 msvdx_priv->msvdx_ec_ctx[i]->tfile == NULL)
+			free_idx = i;
+	}
+
+	if (i < PSB_MAX_EC_INSTANCE)
+		ec_ctx = msvdx_priv->msvdx_ec_ctx[i];
+	else if (free_idx >= 0 && cmd) {
+		PSB_DEBUG_MSVDX("acquire ec ctx idx %d for tfile %p\n",
+				free_idx, tfile);
+		ec_ctx = msvdx_priv->msvdx_ec_ctx[free_idx];
+		memset(ec_ctx, 0, sizeof(*ec_ctx));
+		ec_ctx->tfile = tfile;
+		ec_ctx->context_id = deblock_msg->mmu_context.bits.context;
+	} else {
+		PSB_DEBUG_MSVDX("Available ec ctx is not found\n");
+	}
+
+	return ec_ctx;
+}
+
+void psb_msvdx_update_frame_info(struct msvdx_private *msvdx_priv,
+					struct ttm_object_file *tfile,
+					void *cmd)
+{
+
+	int i, free_idx;
+	drm_psb_msvdx_frame_info_t *frame_info;
+	struct fw_deblock_msg *deblock_msg = (struct fw_deblock_msg *)cmd;
+	uint32_t buffer_handle = deblock_msg->mb_param_address;
+
+	struct psb_msvdx_ec_ctx *ec_ctx;
+
+	PSB_DEBUG_MSVDX(
+		"update frame info (handle 0x%08x) for error concealment\n",
+		buffer_handle);
+
+	ec_ctx = psb_msvdx_find_ec_ctx(msvdx_priv, tfile, cmd);
+
+	if (!ec_ctx)
+		return;
+
+	free_idx = -1;
+	for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+		if (buffer_handle == ec_ctx->frame_info[i].handle)
+			break;
+		if ((free_idx < 0) && (ec_ctx->frame_info[i].handle == 0))
+			free_idx = i;
+	}
+
+	if (i < MAX_DECODE_BUFFERS)
+		frame_info = &ec_ctx->frame_info[i];
+	else if (free_idx >= 0) {
+		PSB_DEBUG_MSVDX("acquire frame_info solt idx %d\n", free_idx);
+		frame_info = &ec_ctx->frame_info[free_idx];
+	} else {
+		PSB_DEBUG_MSVDX("%d solts occupied, abort update frame_info\n",
+				MAX_DECODE_BUFFERS);
+		return;
+	}
+
+	frame_info->fw_status = 0;
+	frame_info->handle = buffer_handle;
+	frame_info->fence = (deblock_msg->header.bits.msg_fence & (~0xf));
+	frame_info->decode_status.num_region = 0;
+	ec_ctx->cur_frame_info = frame_info;
+}
+
+void psb_msvdx_backup_cmd(struct msvdx_private *msvdx_priv,
+				struct ttm_object_file *tfile,
+				void *cmd,
+				uint32_t cmd_size,
+				uint32_t deblock_cmd_offset)
+{
+	struct fw_deblock_msg *deblock_msg = NULL;
+
+	struct psb_msvdx_ec_ctx *ec_ctx;
+	union msg_header *header;
+
+	PSB_DEBUG_MSVDX("backup cmd for ved error concealment\n");
+
+	ec_ctx = psb_msvdx_find_ec_ctx(msvdx_priv, tfile, NULL);
+
+	if (!ec_ctx) {
+		PSB_DEBUG_MSVDX("this is not a ec ctx, abort backup cmd\n");
+		return;
+	}
+
+
+	if (deblock_cmd_offset != PSB_MSVDX_INVALID_OFFSET)
+		deblock_msg = (struct fw_deblock_msg *)(cmd + deblock_cmd_offset);
+
+	if (deblock_msg &&
+	    ec_ctx->context_id != deblock_msg->mmu_context.bits.context) {
+		PSB_DEBUG_MSVDX("backup cmd but find mis-match context id\n");
+		return;
+	}
+
+	ec_ctx->cmd_size = cmd_size;
+	ec_ctx->deblock_cmd_offset = deblock_cmd_offset;
+	memcpy(ec_ctx->unfenced_cmd, cmd, cmd_size);
+	ec_ctx->fence = PSB_MSVDX_INVALID_FENCE;
+	header = (union msg_header *)ec_ctx->unfenced_cmd;
+	if (cmd_size)
+		ec_ctx->fence = header->bits.msg_fence;
+	ec_ctx->fence &= (~0xf);
+	PSB_DEBUG_MSVDX("backup cmd for ved: fence 0x%08x, cmd_size %d\n",
+				ec_ctx->fence, cmd_size);
+}
+
+void psb_msvdx_mtx_message_dump(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	int i, buf_size, buf_offset;
+	buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) & ((1 << 16) - 1);
+	buf_offset =
+		(PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16) + 0x2000;
+
+	printk(KERN_DEBUG "Dump to HOST message buffer (offset:size)%04x:%04x\n",
+	       buf_offset, buf_size);
+	for (i = 0; i < buf_size; i += 4) {
+		uint32_t reg1, reg2, reg3, reg4;
+		reg1 = PSB_RMSVDX32(buf_offset + i * 4);
+		reg2 = PSB_RMSVDX32(buf_offset + i * 4 + 4);
+		reg3 = PSB_RMSVDX32(buf_offset + i * 4 + 8);
+		reg4 = PSB_RMSVDX32(buf_offset + i * 4 + 12);
+		printk(KERN_DEBUG "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		       (buf_offset + i * 4), reg1, reg2, reg3, reg4);
+	}
+
+	buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
+	buf_offset = (PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 0x2000;
+
+	printk(KERN_DEBUG "Dump to MTX message buffer (offset:size)%04x:%04x\n",
+	       buf_offset, buf_size);
+	for (i = 0; i < buf_size; i += 4) {
+		uint32_t reg1, reg2, reg3, reg4;
+		reg1 = PSB_RMSVDX32(buf_offset + i * 4);
+		reg2 = PSB_RMSVDX32(buf_offset + i * 4 + 4);
+		reg3 = PSB_RMSVDX32(buf_offset + i * 4 + 8);
+		reg4 = PSB_RMSVDX32(buf_offset + i * 4 + 12);
+		printk(KERN_DEBUG "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		       (buf_offset + i * 4), reg1, reg2, reg3, reg4);
+	}
+
+	buf_size = 12;
+	buf_offset = 0xFD0 + 0x2000;
+
+	printk(KERN_DEBUG "Comm header (offset:size)%04x:%04x\n",
+	       buf_offset, buf_size);
+	for (i = 0; i < buf_size; i += 4) {
+		uint32_t reg1, reg2, reg3, reg4;
+		reg1 = PSB_RMSVDX32(buf_offset + i * 4);
+		reg2 = PSB_RMSVDX32(buf_offset + i * 4 + 4);
+		reg3 = PSB_RMSVDX32(buf_offset + i * 4 + 8);
+		reg4 = PSB_RMSVDX32(buf_offset + i * 4 + 12);
+		printk(KERN_DEBUG "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		       (buf_offset + i * 4), reg1, reg2, reg3, reg4);
+	}
+
+	printk(KERN_DEBUG "Error status 0x2cc4: 0x%08x\n",
+	       PSB_RMSVDX32(0x2cc4));
+}
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx_ec.h b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_ec.h
new file mode 100644
index 0000000..a814f1b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_ec.h
@@ -0,0 +1,175 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2012 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Li Zeng <li.zeng@intel.com>
+ *
+ **************************************************************************/
+
+#define _PSB_MSVDX_EC_H_
+
+#define MSVDX_CMDS_BASE 0x1000
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_OFFSET (0x0000)
+
+/* MSVDX_CMDS, DISPLAY_PICTURE_SIZE, DISPLAY_PICTURE_HEIGHT */
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT_MASK (0x00FFF000)
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT_SHIFT (12)
+
+/* MSVDX_CMDS, DISPLAY_PICTURE_SIZE, DISPLAY_PICTURE_WIDTH */
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH_MASK (0x00000FFF)
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH_SHIFT (0)
+
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_OFFSET (0x0004)
+
+/* MSVDX_CMDS, CODED_PICTURE_SIZE, CODED_PICTURE_HEIGHT */
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT_MASK (0x00FFF000)
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT_SHIFT (12)
+
+/* MSVDX_CMDS, CODED_PICTURE_SIZE, CODED_PICTURE_WIDTH */
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH_MASK (0x00000FFF)
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH_SHIFT (0)
+
+#define MSVDX_CMDS_OPERATING_MODE_OFFSET (0x0008)
+
+/* MSVDX_CMDS, OPERATING_MODE, RPR_ENABLE */
+#define MSVDX_CMDS_OPERATING_MODE_RPR_ENABLE_MASK (0x20000000)
+#define MSVDX_CMDS_OPERATING_MODE_RPR_ENABLE_SHIFT (29)
+
+/* MSVDX_CMDS, OPERATING_MODE, USE_EXT_ROW_STRIDE */
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_MASK (0x10000000)
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_SHIFT (28)
+
+/* MSVDX_CMDS, OPERATING_MODE, CHROMA_INTERLEAVED */
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_MASK (0x08000000)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_SHIFT (27)
+/* MSVDX_CMDS, OPERATING_MODE, ROW_STRIDE */
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_MASK (0x07000000)
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_SHIFT (24)
+
+/* MSVDX_CMDS, OPERATING_MODE, CODEC_PROFILE */
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_MASK (0x00300000)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_SHIFT (20)
+
+/* MSVDX_CMDS, OPERATING_MODE, CODEC_MODE */
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_MASK (0x000F0000)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_SHIFT (16)
+
+/* MSVDX_CMDS, OPERATING_MODE, ASYNC_MODE */
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_MASK (0x00006000)
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_SHIFT (13)
+
+/* MSVDX_CMDS, OPERATING_MODE, CHROMA_FORMAT */
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_MASK (0x00001000)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_SHIFT (12)
+
+/* MSVDX_CMDS, OPERATING_MODE, INTERLACED */
+#define MSVDX_CMDS_OPERATING_MODE_INTERLACED_MASK (0x00000800)
+#define MSVDX_CMDS_OPERATING_MODE_INTERLACED_SHIFT (11)
+
+/* MSVDX_CMDS, OPERATING_MODE, OVERLAP */
+#define MSVDX_CMDS_OPERATING_MODE_OVERLAP_MASK (0x00000400)
+#define MSVDX_CMDS_OPERATING_MODE_OVERLAP_SHIFT (10)
+
+/* MSVDX_CMDS, OPERATING_MODE, PIC_CONDOVER */
+#define MSVDX_CMDS_OPERATING_MODE_PIC_CONDOVER_MASK (0x00000300)
+#define MSVDX_CMDS_OPERATING_MODE_PIC_CONDOVER_SHIFT (8)
+/* MSVDX_CMDS, OPERATING_MODE, DEBLOCK_STRENGTH */
+#define MSVDX_CMDS_OPERATING_MODE_DEBLOCK_STRENGTH_MASK (0x000000E0)
+#define MSVDX_CMDS_OPERATING_MODE_DEBLOCK_STRENGTH_SHIFT (5)
+
+/* MSVDX_CMDS, OPERATING_MODE, PIC_QUANT */
+#define MSVDX_CMDS_OPERATING_MODE_PIC_QUANT_MASK (0x0000001F)
+#define MSVDX_CMDS_OPERATING_MODE_PIC_QUANT_SHIFT (0)
+
+#define MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET (0x000C)
+#define MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET (0x0010)
+
+#define MSVDX_CMDS_REFERENCE_PICTURE_BASE_ADDRESSES_OFFSET (0x0100)
+
+#define MSVDX_CMDS_SLICE_PARAMS_OFFSET (0x0400)
+
+/* MSVDX_CMDS, SLICE_PARAMS, SLICE_FIELD_TYPE */
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_FIELD_TYPE_MASK (0x0000000C)
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_FIELD_TYPE_SHIFT (2)
+
+
+/* MSVDX_CMDS, SLICE_PARAMS, SLICE_CODE_TYPE */
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_CODE_TYPE_MASK (0x00000003)
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_CODE_TYPE_SHIFT (0)
+
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET (0x003C)
+
+#define MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET (0x0028)
+#define MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET (0x002C)
+#define MSVDX_CMDS_VC1_RANGE_MAPPING_FLAGS_OFFSET (0x0030)
+
+#define MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET (0x0040)
+
+#define MSVDX_CMDS_END_SLICE_PICTURE_OFFSET (0x0404)
+
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_OFFSET (0x0408)
+
+/* MSVDX_CMDS, MACROBLOCK_NUMBER, MB_CODE_TYPE */
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_CODE_TYPE_MASK (0x00030000)
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_CODE_TYPE_SHIFT (16)
+
+/* MSVDX_CMDS, MACROBLOCK_NUMBER, MB_NO_Y */
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_Y_MASK (0x0000FF00)
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_Y_SHIFT (8)
+
+/* MSVDX_CMDS, MACROBLOCK_NUMBER, MB_NO_X */
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_X_MASK (0x000000FF)
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_X_SHIFT (0)
+
+#define MSVDX_CMDS_MACROBLOCK_RESIDUAL_FORMAT_OFFSET (0x0418)
+
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_OFFSET (0x0430)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, REF_INDEX_A_VALID */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_VALID_MASK (0x00000020)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_VALID_SHIFT (5)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, INTER_PRED_BLOCK_SIZE */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_INTER_PRED_BLOCK_SIZE_MASK (0x70000)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_INTER_PRED_BLOCK_SIZE_SHIFT (16)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, REF_INDEX_A */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_MASK (0x0000000F)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_SHIFT (0)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, REF_INDEX_B */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_B_MASK (0x00000F00)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_B_SHIFT (8)
+
+#define MSVDX_CMDS_MOTION_VECTOR_OFFSET (0x0500)
+
+#define MSVDX_CORE_CR_MSVDX_COMMAND_SPACE_OFFSET (0x0028)
+
+#define MSVDX_CORE_BASE	(0x600)
+
+void psb_msvdx_update_frame_info(struct msvdx_private *msvdx_priv,
+					struct ttm_object_file *tfile,
+					void *cmd);
+void psb_msvdx_backup_cmd(struct msvdx_private *msvdx_priv,
+				struct ttm_object_file *tfile,
+				void *cmd,
+				uint32_t cmd_size,
+				uint32_t deblock_cmd_offset);
+
+void psb_msvdx_mtx_message_dump(struct drm_device *dev);
+void psb_msvdx_do_concealment(struct work_struct *work);
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx_fw.c b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_fw.c
new file mode 100644
index 0000000..4f7c3e9
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_fw.c
@@ -0,0 +1,769 @@
+/**************************************************************************
+ * psb_msvdxinit.c
+ * MSVDX initialization and mtx-firmware upload
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Li Zeng <li.zeng@intel.com>
+ *    Fei Jiang <fei.jiang@intel.com>
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#endif
+
+#include "psb_msvdx.h"
+#include <linux/firmware.h>
+#include "psb_msvdx_reg.h"
+
+#ifdef VXD_FW_BUILT_IN_KERNEL
+#include <linux/module.h>
+#endif
+
+#define UPLOAD_FW_BY_DMA 1
+#define STACKGUARDWORD          0x10101010
+#define MSVDX_MTX_DATA_LOCATION 0x82880000
+#define UNINITILISE_MEM 	0xcdcdcdcd
+
+#ifdef VXD_FW_BUILT_IN_KERNEL
+#define FIRMWARE_NAME "msvdx_fw_mfld_DE2.0.bin"
+MODULE_FIRMWARE(FIRMWARE_NAME);
+#endif
+
+/*MSVDX FW header*/
+struct msvdx_fw {
+	uint32_t ver;
+	uint32_t text_size;
+	uint32_t data_size;
+	uint32_t data_location;
+};
+
+int32_t psb_msvdx_alloc_fw_bo(struct drm_psb_private *dev_priv)
+{
+	uint32_t core_rev;
+	int32_t ret = 0;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	core_rev = PSB_RMSVDX32(MSVDX_CORE_REV_OFFSET);
+
+	if ((core_rev & 0xffffff) < 0x020000)
+		msvdx_priv->mtx_mem_size = 16 * 1024;
+	else
+		msvdx_priv->mtx_mem_size = 56 * 1024;
+
+	PSB_DEBUG_INIT("MSVDX: MTX mem size is 0x%08x bytes, allocate firmware BO size 0x%08x\n", msvdx_priv->mtx_mem_size,
+		       msvdx_priv->mtx_mem_size + 4096);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(&dev_priv->bdev, msvdx_priv->mtx_mem_size + 4096, /* DMA may run over a page */
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+				       0, 0, 0, NULL, &msvdx_priv->fw);
+#else
+	ret = ttm_buffer_object_create(&dev_priv->bdev, msvdx_priv->mtx_mem_size + 4096, /* DMA may run over a page */
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+				       0, 0, NULL, &msvdx_priv->fw);
+#endif
+
+	if (ret) {
+		DRM_ERROR("MSVDX: allocate firmware BO fail\n");
+	}
+	return ret;
+}
+
+#if UPLOAD_FW_BY_DMA
+
+static void msvdx_get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
+{
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int count = 0;
+	uint32_t reg_val = 0;
+
+	REGIO_WRITE_FIELD(reg_val, MSVDX_MTX_DEBUG, MTX_DBG_IS_SLAVE, 1);
+	REGIO_WRITE_FIELD(reg_val, MSVDX_MTX_DEBUG, MTX_DBG_GPIO_IN, 0x02);
+	PSB_WMSVDX32(reg_val, MSVDX_MTX_DEBUG_OFFSET);
+
+	do {
+		reg_val = PSB_RMSVDX32(MSVDX_MTX_DEBUG_OFFSET);
+		count++;
+	} while (((reg_val & 0x18) != 0) && count < 50000);
+
+	if (count >= 50000)
+		PSB_DEBUG_GENERAL("MAVDX: timeout in get_mtx_control_from_dash\n");
+
+	/* Save the access control register...*/
+	msvdx_priv->psb_dash_access_ctrl = PSB_RMSVDX32(MTX_RAM_ACCESS_CONTROL_OFFSET);
+}
+
+static void msvdx_release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
+{
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	/* restore access control */
+	PSB_WMSVDX32(msvdx_priv->psb_dash_access_ctrl, MTX_RAM_ACCESS_CONTROL_OFFSET);
+	/* release bus */
+	PSB_WMSVDX32(0x4, MSVDX_MTX_DEBUG_OFFSET);
+}
+
+/* for future debug info of msvdx related registers */
+static void psb_setup_fw_dump(struct drm_psb_private *dev_priv, uint32_t dma_channel)
+{
+/* for DMAC REGISTER */
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_CDMAA is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_CDMAA_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_CDMAC value is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_CDMAC_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware DMAC_SETUP value is 0x%x\n", PSB_RMSVDX32(DMAC_DMAC_SETUP_OFFSET + dma_channel));
+	DRM_ERROR("MSVDX: Upload firmware DMAC_DMAC_COUNT value is 0x%x\n", PSB_RMSVDX32(DMAC_DMAC_COUNT_OFFSET + dma_channel));
+	DRM_ERROR("MSVDX: Upload firmware DMAC_DMAC_PERIPH_OFFSET value is 0x%x\n", PSB_RMSVDX32(DMAC_DMAC_PERIPH_OFFSET + dma_channel));
+	DRM_ERROR("MSVDX: Upload firmware DMAC_DMAC_PERIPHERAL_ADDR value is 0x%x\n", PSB_RMSVDX32(DMAC_DMAC_PERIPHERAL_ADDR_OFFSET + dma_channel));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_CONTROL value is 0x%x\n", PSB_RMSVDX32(MSVDX_CONTROL_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware DMAC_DMAC_IRQ_STAT value is 0x%x\n", PSB_RMSVDX32(DMAC_DMAC_IRQ_STAT_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_MMU_CONTROL0 value is 0x%x\n", PSB_RMSVDX32(MSVDX_MMU_CONTROL0_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware DMAC_DMAC_COUNT 2222 value is 0x%x\n", PSB_RMSVDX32(DMAC_DMAC_COUNT_OFFSET + dma_channel));
+
+/* for MTX REGISTER */
+	DRM_ERROR("MSVDX: Upload firmware MTX_ENABLE_OFFSET is 0x%x\n", PSB_RMSVDX32(MTX_ENABLE_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_KICK_INPUT_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_KICK_INPUT_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_REGISTER_READ_WRITE_REQUEST_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_REGISTER_READ_WRITE_REQUEST_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_RAM_ACCESS_CONTROL_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_RAM_ACCESS_CONTROL_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_RAM_ACCESS_STATUS_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_RAM_ACCESS_STATUS_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_TIMERDIV_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_TIMERDIV_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_CDMAC_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_CDMAC_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_CDMAA_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_CDMAA_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_CDMAS0_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_CDMAS0_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MTX_SYSC_CDMAT_OFFSET value is 0x%x\n", PSB_RMSVDX32(MTX_SYSC_CDMAT_OFFSET));
+
+/* for MSVDX CORE REGISTER */
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_CONTROL_OFFSET is 0x%x\n", PSB_RMSVDX32(MSVDX_CONTROL_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_INTERRUPT_CLEAR_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_INTERRUPT_STATUS_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_HOST_INTERRUPT_ENABLE_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_HOST_INTERRUPT_ENABLE_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_MAN_CLK_ENABLE_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_CORE_ID_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_CORE_ID_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_MMU_STATUS_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_MMU_STATUS_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware FE_MSVDX_WDT_CONTROL_OFFSET value is 0x%x\n", PSB_RMSVDX32(FE_MSVDX_WDT_CONTROL_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware FE_MSVDX_WDTIMER_OFFSET value is 0x%x\n", PSB_RMSVDX32(FE_MSVDX_WDTIMER_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware BE_MSVDX_WDT_CONTROL_OFFSET value is 0x%x\n", PSB_RMSVDX32(BE_MSVDX_WDT_CONTROL_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware BE_MSVDX_WDTIMER_OFFSET value is 0x%x\n", PSB_RMSVDX32(BE_MSVDX_WDTIMER_OFFSET));
+
+/* for MSVDX RENDEC REGISTER */
+	DRM_ERROR("MSVDX: Upload firmware VEC_SHIFTREG_CONTROL_OFFSET is 0x%x\n", PSB_RMSVDX32(VEC_SHIFTREG_CONTROL_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_CONTROL0_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_CONTROL0_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_BUFFER_SIZE_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_BUFFER_SIZE_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_BASE_ADDR0_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_BASE_ADDR0_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_BASE_ADDR1_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_BASE_ADDR1_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_READ_DATA_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_CONTEXT0_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_CONTEXT0_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_RENDEC_CONTEXT1_OFFSET value is 0x%x\n", PSB_RMSVDX32(MSVDX_RENDEC_CONTEXT1_OFFSET));
+
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_MMU_MEM_REQ value is 0x%x\n", PSB_RMSVDX32(MSVDX_MMU_MEM_REQ_OFFSET));
+	DRM_ERROR("MSVDX: Upload firmware MSVDX_SYS_MEMORY_DEBUG2 value is 0x%x\n", PSB_RMSVDX32(0x6fc));
+}
+
+static void msvdx_upload_fw(struct drm_psb_private *dev_priv,
+			  uint32_t address, const unsigned int words, int fw_sel)
+{
+	uint32_t reg_val = 0;
+	uint32_t cmd;
+	uint32_t uCountReg, offset, mmu_ptd;
+	uint32_t size = (words * 4); /* byte count */
+	uint32_t dma_channel = 0; /* Setup a Simple DMA for Ch0 */
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	PSB_DEBUG_GENERAL("MSVDX: Upload firmware by DMA\n");
+	msvdx_get_mtx_control_from_dash(dev_priv);
+
+	/* dma transfers to/from the mtx have to be 32-bit aligned and in multiples of 32 bits */
+	PSB_WMSVDX32(address, MTX_SYSC_CDMAA_OFFSET);
+
+	REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, BURSTSIZE,	4); /* burst size in multiples of 64 bits (allowed values are 2 or 4) */
+	REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, RNW, 0);	/* false means write to mtx mem, true means read from mtx mem */
+	REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, ENABLE,	1);				/* begin transfer */
+	REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, LENGTH,	words);		/* This specifies the transfer size of the DMA operation in terms of 32-bit words */
+	PSB_WMSVDX32(reg_val, MTX_SYSC_CDMAC_OFFSET);
+
+	/* toggle channel 0 usage between mtx and other msvdx peripherals */
+	{
+		reg_val = PSB_RMSVDX32(MSVDX_CONTROL_OFFSET);
+		REGIO_WRITE_FIELD(reg_val, MSVDX_CONTROL, DMAC_CH0_SELECT,  0);
+		PSB_WMSVDX32(reg_val, MSVDX_CONTROL_OFFSET);
+	}
+
+
+	/* Clear the DMAC Stats */
+	PSB_WMSVDX32(0 , DMAC_DMAC_IRQ_STAT_OFFSET + dma_channel);
+
+	offset = msvdx_priv->fw->offset;
+
+	if (fw_sel)
+		offset += ((msvdx_priv->mtx_mem_size + 8192) & ~0xfff);
+
+	/* use bank 0 */
+	cmd = 0;
+	PSB_WMSVDX32(cmd, MSVDX_MMU_BANK_INDEX_OFFSET);
+
+	/* Write PTD to mmu base 0*/
+	mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
+	PSB_WMSVDX32(mmu_ptd, MSVDX_MMU_DIR_LIST_BASE_OFFSET + 0);
+
+	/* Invalidate */
+	reg_val = PSB_RMSVDX32(MSVDX_MMU_CONTROL0_OFFSET);
+	reg_val &= ~0xf;
+	REGIO_WRITE_FIELD(reg_val, MSVDX_MMU_CONTROL0, MMU_INVALDC, 1);
+	PSB_WMSVDX32(reg_val, MSVDX_MMU_CONTROL0_OFFSET);
+
+	PSB_WMSVDX32(offset, DMAC_DMAC_SETUP_OFFSET + dma_channel);
+
+	/* Only use a single dma - assert that this is valid */
+	if ((size / 4) >= (1 << 15)) {
+		DRM_ERROR("psb: DMA size beyond limited, aboart firmware uploading\n");
+		return;
+	}
+
+	uCountReg = PSB_DMAC_VALUE_COUNT(PSB_DMAC_BSWAP_NO_SWAP,
+					 0,  /* 32 bits */
+					 PSB_DMAC_DIR_MEM_TO_PERIPH,
+					 0,
+					 (size / 4));
+	/* Set the number of bytes to dma*/
+	PSB_WMSVDX32(uCountReg, DMAC_DMAC_COUNT_OFFSET + dma_channel);
+
+	cmd = PSB_DMAC_VALUE_PERIPH_PARAM(PSB_DMAC_ACC_DEL_0, PSB_DMAC_INCR_OFF, PSB_DMAC_BURST_2);
+	PSB_WMSVDX32(cmd, DMAC_DMAC_PERIPH_OFFSET + dma_channel);
+
+	/* Set destination port for dma */
+	cmd = 0;
+	REGIO_WRITE_FIELD(cmd, DMAC_DMAC_PERIPHERAL_ADDR, ADDR, MTX_SYSC_CDMAT_OFFSET);
+	PSB_WMSVDX32(cmd, DMAC_DMAC_PERIPHERAL_ADDR_OFFSET + dma_channel);
+
+
+	/* Finally, rewrite the count register with the enable bit set*/
+	PSB_WMSVDX32(uCountReg | DMAC_DMAC_COUNT_EN_MASK, DMAC_DMAC_COUNT_OFFSET + dma_channel);
+
+	/* Wait for all to be done */
+	if (psb_wait_for_register(dev_priv,
+				  DMAC_DMAC_IRQ_STAT_OFFSET + dma_channel,
+				  DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK,
+				  DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK,
+				  2000000, 5)) {
+		psb_setup_fw_dump(dev_priv, dma_channel);
+		msvdx_release_mtx_control_from_dash(dev_priv);
+		return;
+	}
+
+	/* Assert that the MTX DMA port is all done aswell */
+	if (psb_wait_for_register(dev_priv,
+			MTX_SYSC_CDMAS0_OFFSET,
+			1, 1, 2000000, 5)) {
+		msvdx_release_mtx_control_from_dash(dev_priv);
+		return;
+	}
+
+	msvdx_release_mtx_control_from_dash(dev_priv);
+
+	PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
+}
+
+#else
+
+static void msvdx_upload_fw(struct drm_psb_private *dev_priv,
+			  const uint32_t data_mem, uint32_t ram_bank_size,
+			  uint32_t address, const unsigned int words,
+			  const uint32_t * const data)
+{
+	uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
+	uint32_t access_ctrl;
+
+	PSB_DEBUG_GENERAL("MSVDX: Upload firmware by register interface\n");
+	/* Save the access control register... */
+	access_ctrl = PSB_RMSVDX32(MTX_RAM_ACCESS_CONTROL_OFFSET);
+
+	/* Wait for MCMSTAT to become be idle 1 */
+	psb_wait_for_register(dev_priv, MTX_RAM_ACCESS_STATUS_OFFSET,
+			      1,	/* Required Value */
+			      0xffffffff, /* Enables */
+			      2000000, 5);
+
+	for (loop = 0; loop < words; loop++) {
+		ram_id = data_mem + (address / ram_bank_size);
+		if (ram_id != cur_bank) {
+			addr = address >> 2;
+			ctrl = 0;
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCMID, ram_id);
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCM_ADDR, addr);
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCMAI, 1);
+			PSB_WMSVDX32(ctrl, MTX_RAM_ACCESS_CONTROL_OFFSET);
+			cur_bank = ram_id;
+		}
+		address += 4;
+
+		PSB_WMSVDX32(data[loop],
+			     MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET);
+
+		/* Wait for MCMSTAT to become be idle 1 */
+		psb_wait_for_register(dev_priv, MTX_RAM_ACCESS_STATUS_OFFSET,
+				      1,	/* Required Value */
+				      0xffffffff, /* Enables */
+				      2000000, 5);
+	}
+	PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
+
+	/* Restore the access control register... */
+	PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
+}
+
+#endif
+
+#if 0
+static int msvdx_verify_fw(struct drm_psb_private *dev_priv,
+			 const uint32_t ram_bank_size,
+			 const uint32_t data_mem, uint32_t address,
+			 const uint32_t words, const uint32_t * const data)
+{
+	uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
+	uint32_t access_ctrl;
+	int ret = 0;
+
+	/* Save the access control register... */
+	access_ctrl = PSB_RMSVDX32(MTX_RAM_ACCESS_CONTROL_OFFSET);
+
+	/* Wait for MCMSTAT to become be idle 1 */
+	psb_wait_for_register(dev_priv, MTX_RAM_ACCESS_STATUS_OFFSET,
+			      1,	/* Required Value */
+			      0xffffffff, /* Enables */
+			      2000000, 5);
+
+	for (loop = 0; loop < words; loop++) {
+		uint32_t reg_value;
+		ram_id = data_mem + (address / ram_bank_size);
+
+		if (ram_id != cur_bank) {
+			addr = address >> 2;
+			ctrl = 0;
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCMID, ram_id);
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCM_ADDR, addr);
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCMAI, 1);
+			REGIO_WRITE_FIELD_LITE(ctrl,
+					       MTX_RAM_ACCESS_CONTROL,
+					       MTX_MCMR, 1);
+
+			PSB_WMSVDX32(ctrl, MTX_RAM_ACCESS_CONTROL_OFFSET);
+
+			cur_bank = ram_id;
+		}
+		address += 4;
+
+		/* Wait for MCMSTAT to become be idle 1 */
+		psb_wait_for_register(dev_priv, MTX_RAM_ACCESS_STATUS_OFFSET,
+				      1,	/* Required Value */
+				      0xffffffff, /* Enables */
+				      2000000, 5);
+
+		reg_value = PSB_RMSVDX32(MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET);
+		if (data[loop] != reg_value) {
+			DRM_ERROR("psb: Firmware validation fails"
+				  " at index=%08x\n", loop);
+			ret = 1;
+			break;
+		}
+	}
+
+	/* Restore the access control register... */
+	PSB_WMSVDX32(access_ctrl, MTX_RAM_ACCESS_CONTROL_OFFSET);
+
+	return ret;
+}
+#endif
+
+static int msvdx_get_fw_bo(struct drm_device *dev,
+			   const struct firmware **raw, uint8_t *name)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	int rc, fw_size;
+	void *ptr = NULL;
+	struct ttm_bo_kmap_obj tmp_kmap;
+	bool is_iomem;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	void *gpu_addr;
+
+	rc = request_firmware(raw, name, &dev->pdev->dev);
+	if (*raw == NULL || rc < 0) {
+		DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
+			  name, rc);
+		return 1;
+	}
+
+	if ((*raw)->size < sizeof(struct msvdx_fw)) {
+		DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
+			  name, (*raw)->size);
+		return 1;
+	}
+
+	ptr = (void *)((*raw))->data;
+
+	if (!ptr) {
+		DRM_ERROR("MSVDX: Failed to load %s\n", name);
+		return 1;
+	}
+
+	/* another sanity check... */
+	fw_size = sizeof(struct msvdx_fw) +
+		  sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
+		  sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
+	if ((*raw)->size < fw_size) {
+		DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
+			  name, (*raw)->size);
+		return 1;
+	}
+
+	/* there is 4 byte split between text and data,
+	 * also there is 4 byte guard after data */
+	if (((struct msvdx_fw *)ptr)->text_size + 8 +
+		((struct msvdx_fw *)ptr)->data_size >
+		msvdx_priv->mtx_mem_size) {
+		DRM_ERROR("MSVDX: fw size is bigger than mtx_mem_size.\n");
+		return 1;
+	}
+
+	rc = ttm_bo_kmap(msvdx_priv->fw, 0, (msvdx_priv->fw)->num_pages, &tmp_kmap);
+	if (rc) {
+		PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", rc);
+		ttm_bo_unref(&msvdx_priv->fw);
+		ttm_bo_kunmap(&tmp_kmap);
+		return 1;
+	} else {
+		uint32_t *last_word;
+		gpu_addr = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
+
+		memset(gpu_addr, UNINITILISE_MEM, msvdx_priv->mtx_mem_size);
+
+		memcpy(gpu_addr, ptr + sizeof(struct msvdx_fw),
+			sizeof(uint32_t) * ((struct msvdx_fw *)ptr)->text_size);
+
+		memcpy(gpu_addr + (((struct msvdx_fw *) ptr)->data_location - MSVDX_MTX_DATA_LOCATION),
+			(void *)ptr + sizeof(struct msvdx_fw) + sizeof(uint32_t) * ((struct msvdx_fw *)ptr)->text_size,
+			sizeof(uint32_t) * ((struct msvdx_fw *)ptr)->data_size);
+
+		last_word = (uint32_t *)(gpu_addr + msvdx_priv->mtx_mem_size - 4);
+		/* Write a know value to last word in mtx memory*/
+		/* Usefull for detection of stack overrun */
+		*last_word = STACKGUARDWORD;
+	}
+
+	ttm_bo_kunmap(&tmp_kmap);
+	PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
+	PSB_DEBUG_GENERAL("MSVDX: Load firmware into BO successfully\n");
+	release_firmware(*raw);
+	return rc;
+}
+
+static uint32_t *msvdx_get_fw(struct drm_device *dev,
+			      const struct firmware **raw, uint8_t *name)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	int rc, fw_size;
+	void *ptr = NULL;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	rc = request_firmware(raw, name, &dev->pdev->dev);
+	if (*raw == NULL || rc < 0) {
+		DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
+			  name, rc);
+		return NULL;
+	}
+
+	if ((*raw)->size < sizeof(struct msvdx_fw)) {
+		DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
+			  name, (*raw)->size);
+		release_firmware(*raw);
+		return NULL;
+	}
+
+	ptr = (int *)((*raw))->data;
+
+	if (!ptr) {
+		DRM_ERROR("MSVDX: Failed to load %s\n", name);
+		release_firmware(*raw);
+		return NULL;
+	}
+
+	/* another sanity check... */
+	fw_size = sizeof(struct msvdx_fw) +
+		  sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
+		  sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
+	if ((*raw)->size < fw_size) {
+		DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
+			  name, (*raw)->size);
+		release_firmware(*raw);
+		return NULL;
+	} else if ((*raw)->size > fw_size) { /* there is ec firmware */
+		ptr += ((fw_size + 0xfff) & ~0xfff);
+		fw_size += (sizeof(struct msvdx_fw) +
+			    sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
+			    sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size);
+
+		ptr = (int *)((*raw))->data;  /* Resotre ptr to start of the firmware file */
+	}
+
+	msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL);
+	if (msvdx_priv->msvdx_fw == NULL)
+		DRM_ERROR("MSVDX: allocate FW buffer failed\n");
+	else {
+		memcpy(msvdx_priv->msvdx_fw, ptr, fw_size);
+		msvdx_priv->msvdx_fw_size = fw_size;
+	}
+
+	PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
+	release_firmware(*raw);
+
+	return msvdx_priv->msvdx_fw;
+}
+
+void msvdx_write_mtx_core_reg(struct drm_psb_private *dev_priv,
+			    const uint32_t core_reg, const uint32_t val)
+{
+	uint32_t reg = 0;
+
+	/* Put data in MTX_RW_DATA */
+	PSB_WMSVDX32(val, MTX_REGISTER_READ_WRITE_DATA_OFFSET);
+
+	/* DREADY is set to 0 and request a write */
+	reg = core_reg;
+	REGIO_WRITE_FIELD_LITE(reg, MTX_REGISTER_READ_WRITE_REQUEST,
+			       MTX_RNW, 0);
+	REGIO_WRITE_FIELD_LITE(reg, MTX_REGISTER_READ_WRITE_REQUEST,
+			       MTX_DREADY, 0);
+	PSB_WMSVDX32(reg, MTX_REGISTER_READ_WRITE_REQUEST_OFFSET);
+
+	psb_wait_for_register(dev_priv,
+			      MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
+			      MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
+			      MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
+			      2000000, 5);
+}
+
+int psb_setup_fw(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t ram_bank_size;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	int ret = 0;
+	struct msvdx_fw *fw;
+	uint32_t *fw_ptr = NULL;
+	uint32_t *text_ptr = NULL;
+	uint32_t *data_ptr = NULL;
+	const struct firmware *raw = NULL;
+	int ec_firmware = 0;
+
+	/* todo : Assert the clock is on - if not turn it on to upload code */
+	PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
+
+	psb_msvdx_mtx_set_clocks(dev_priv->dev, clk_enable_all);
+
+	/* Reset MTX */
+	PSB_WMSVDX32(MTX_SOFT_RESET_MTX_RESET_MASK,
+			MTX_SOFT_RESET_OFFSET);
+
+	PSB_WMSVDX32(FIRMWAREID, MSVDX_COMMS_FIRMWARE_ID);
+
+	PSB_WMSVDX32(0, MSVDX_COMMS_ERROR_TRIG);
+	PSB_WMSVDX32(199, MTX_SYSC_TIMERDIV_OFFSET); /* MTX_SYSC_TIMERDIV */
+	PSB_WMSVDX32(0, MSVDX_EXT_FW_ERROR_STATE); /* EXT_FW_ERROR_STATE */
+	PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
+	PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
+#ifndef CONFIG_SLICE_HEADER_PARSING
+	PSB_WMSVDX32(RETURN_VDEB_DATA_IN_COMPLETION | NOT_ENABLE_ON_HOST_CONCEALMENT,
+			MSVDX_COMMS_OFFSET_FLAGS);
+#else
+	/* decode flag should be set as 0 according to IMG's said */
+	PSB_WMSVDX32(drm_decode_flag, MSVDX_COMMS_OFFSET_FLAGS);
+#endif
+	PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
+
+	/* read register bank size */
+	{
+		uint32_t bank_size, reg;
+		reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK_OFFSET);
+		bank_size =
+			REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
+					 MTX_RAM_BANK_SIZE);
+		ram_bank_size = (uint32_t)(1 << (bank_size + 2));
+	}
+
+	PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
+			  ram_bank_size);
+
+	/* if FW already loaded from storage */
+	if (msvdx_priv->msvdx_fw) {
+		fw_ptr = msvdx_priv->msvdx_fw;
+	} else {
+#ifdef VXD_FW_BUILT_IN_KERNEL
+		fw_ptr = msvdx_get_fw(dev, &raw, FIRMWARE_NAME);
+#else
+		fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw_mfld_DE2.0.bin");
+#endif
+		PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw_mfld_DE2.0.bin by udevd\n");
+	}
+	if (!fw_ptr) {
+		DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n");
+		ret = 1;
+		goto out;
+	}
+
+	if (!msvdx_priv->is_load) { /* Load firmware into BO */
+		PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd into BO\n");
+#ifdef VXD_FW_BUILT_IN_KERNEL
+		ret = msvdx_get_fw_bo(dev, &raw, FIRMWARE_NAME);
+#else
+		ret = msvdx_get_fw_bo(dev, &raw, "msvdx_fw_mfld_DE2.0.bin");
+#endif
+		if (ret) {
+			DRM_ERROR("MSVDX: failed to call msvdx_get_fw_bo.\n");
+			ret = 1;
+			goto out;
+		}
+		msvdx_priv->is_load = 1;
+	}
+
+	fw = (struct msvdx_fw *) fw_ptr;
+
+	if (ec_firmware) {
+		fw_ptr += (((sizeof(struct msvdx_fw) + (fw->text_size + fw->data_size) * 4 + 0xfff) & ~0xfff) / sizeof(uint32_t));
+		fw = (struct msvdx_fw *) fw_ptr;
+	}
+
+	/*
+	if (fw->ver != 0x02) {
+		DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
+			"got version=%02x expected version=%02x\n",
+			fw->ver, 0x02);
+		ret = 1;
+		goto out;
+	}
+	*/
+	text_ptr =
+		(uint32_t *)((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
+	data_ptr = text_ptr + fw->text_size;
+
+	if (fw->text_size == 2858)
+		PSB_DEBUG_GENERAL(
+		"MSVDX: FW ver 1.00.10.0187 of SliceSwitch variant\n");
+	else if (fw->text_size == 3021)
+		PSB_DEBUG_GENERAL(
+		"MSVDX: FW ver 1.00.10.0187 of FrameSwitch variant\n");
+	else if (fw->text_size == 2841)
+		PSB_DEBUG_GENERAL("MSVDX: FW ver 1.00.10.0788\n");
+	else if (fw->text_size == 3147)
+		PSB_DEBUG_GENERAL("MSVDX: FW ver BUILD_DXVA_FW1.00.10.1042 of SliceSwitch variant\n");
+	else if (fw->text_size == 3097)
+		PSB_DEBUG_GENERAL("MSVDX: FW ver BUILD_DXVA_FW1.00.10.0963.02.0011 of FrameSwitch variant\n");
+	else
+		PSB_DEBUG_GENERAL("MSVDX: FW ver unknown\n");
+
+	PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
+	PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
+	PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
+	PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
+		fw->data_location);
+	PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
+		*text_ptr);
+	PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
+		*data_ptr);
+
+	PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
+
+#if UPLOAD_FW_BY_DMA
+	msvdx_upload_fw(dev_priv, 0, msvdx_priv->mtx_mem_size / 4, ec_firmware);
+#else
+	msvdx_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
+			PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
+			text_ptr);
+	msvdx_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
+			fw->data_location - MTX_DATA_BASE, fw->data_size,
+			data_ptr);
+#endif
+
+#if 0
+	/* todo :  Verify code upload possibly only in debug */
+	ret = psb_verify_fw(dev_priv, ram_bank_size,
+			MTX_CORE_CODE_MEM,
+			PC_START_ADDRESS - MTX_CODE_BASE,
+			fw->text_size, text_ptr);
+	if (ret) {
+		/* Firmware code upload failed */
+		ret = 1;
+		goto out;
+	}
+
+	ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
+	fw->data_location - MTX_DATA_BASE,
+	fw->data_size, data_ptr);
+	if (ret) {
+		/* Firmware data upload failed */
+		ret = 1;
+		goto out;
+	}
+#endif
+
+	/*	-- Set starting PC address	*/
+	msvdx_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
+
+	/*	-- Turn on the thread	*/
+	PSB_WMSVDX32(MTX_ENABLE_MTX_ENABLE_MASK, MTX_ENABLE_OFFSET);
+
+	/* Wait for the signature value to be written back */
+	ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
+				    MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
+				    0xffffffff, /* Enabled bits */
+				    2000000, 5);
+	if (ret) {
+		DRM_ERROR("MSVDX: firmware fails to initialize.\n");
+		goto out;
+	}
+
+	PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
+	PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
+			  MSVDX_COMMS_AREA_ADDR);
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	msvdx_rendec_init_by_msg(dev);
+#endif
+out:
+	return ret;
+}
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx_msg.h b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_msg.h
new file mode 100644
index 0000000..8c9e390
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_msg.h
@@ -0,0 +1,378 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang@intel.com>
+ *    Li Zeng <li.zeng@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _PSB_MSVDX_MSG_H_
+#define _PSB_MSVDX_MSG_H_
+
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#include "img_types.h"
+#endif
+
+/* Start of parser specific Host->MTX messages. */
+#define	FWRK_MSGID_START_PSR_HOSTMTX_MSG	(0x80)
+
+/* Start of parser specific MTX->Host messages. */
+#define	FWRK_MSGID_START_PSR_MTXHOST_MSG	(0xC0)
+
+/* Host defined msg, just for host use, MTX not recgnize */
+#define	FWRK_MSGID_HOST_EMULATED		(0x40)
+
+/* This type defines the framework specified message ids */
+enum {
+	/* ! Sent by the VA driver on the host to the mtx firmware.
+	 */
+	MTX_MSGID_PADDING = 0,
+	MTX_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
+	MTX_MSGID_DECODE_FE,
+	MTX_MSGID_DEBLOCK,
+	MTX_MSGID_INTRA_OOLD,
+	MTX_MSGID_DECODE_BE,
+	MTX_MSGID_HOST_BE_OPP,
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	MTX_MSGID_SLICE_HEADER_EXTRACT = 0x87,
+	MTX_MSGID_MODULAR_SLICE_HEADER_EXTRACT = 0x88,
+#endif
+	/*! Sent by the mtx firmware to itself.
+	 */
+	MTX_MSGID_RENDER_MC_INTERRUPT,
+
+	/* used to ditinguish mrst and mfld */
+	MTX_MSGID_DEBLOCK_MFLD = FWRK_MSGID_HOST_EMULATED,
+	MTX_MSGID_INTRA_OOLD_MFLD,
+	MTX_MSGID_DECODE_BE_MFLD,
+	MTX_MSGID_HOST_BE_OPP_MFLD,
+
+	/*! Sent by the DXVA firmware on the MTX to the host.
+	 */
+	MTX_MSGID_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
+	MTX_MSGID_COMPLETED_BATCH,
+	MTX_MSGID_DEBLOCK_REQUIRED,
+	MTX_MSGID_TEST_RESPONCE,
+	MTX_MSGID_ACK,
+	MTX_MSGID_FAILED,
+	MTX_MSGID_CONTIGUITY_WARNING,
+	MTX_MSGID_HW_PANIC,
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	MTX_MSGID_SLICE_HEADER_EXTRACT_DONE = 0xC9,
+#endif
+};
+
+#define MTX_GENMSG_SIZE_TYPE		uint8_t
+#define MTX_GENMSG_SIZE_MASK		(0xFF)
+#define MTX_GENMSG_SIZE_SHIFT		(0)
+#define MTX_GENMSG_SIZE_OFFSET		(0x0000)
+
+#define MTX_GENMSG_ID_TYPE		uint8_t
+#define MTX_GENMSG_ID_MASK		(0xFF)
+#define MTX_GENMSG_ID_SHIFT		(0)
+#define MTX_GENMSG_ID_OFFSET		(0x0001)
+
+#define MTX_GENMSG_HEADER_SIZE		2
+
+#define MTX_GENMSG_FENCE_TYPE		uint16_t
+#define MTX_GENMSG_FENCE_MASK		(0xFFFF)
+#define MTX_GENMSG_FENCE_OFFSET		(0x0002)
+#define MTX_GENMSG_FENCE_SHIFT		(0)
+
+#define FW_INVALIDATE_MMU		(0x0010)
+
+union msg_header {
+	struct {
+		uint32_t msg_size:8;
+		uint32_t msg_type:8;
+		uint32_t msg_fence:16;
+	} bits;
+	uint32_t value;
+};
+
+struct fw_init_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t reserved:16;
+		} bits;
+		uint32_t value;
+	} header;
+	uint32_t rendec_addr0;
+	uint32_t rendec_addr1;
+	union {
+		struct {
+			uint32_t rendec_size0:16;
+			uint32_t rendec_size1:16;
+		} bits;
+		uint32_t value;
+	} rendec_size;
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	uint32_t nalu_extract_term_buff_addr;
+#endif
+};
+
+struct fw_decode_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+	union {
+		struct {
+			uint32_t flags:16;
+			uint32_t buffer_size:16;
+		} bits;
+		uint32_t value;
+	} flag_size;
+	uint32_t crtl_alloc_addr;
+	union {
+		struct {
+			uint32_t context:8;
+			uint32_t mmu_ptd:24;
+		} bits;
+		uint32_t value;
+	} mmu_context;
+	uint32_t operating_mode;
+};
+
+struct fw_deblock_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+	union {
+		struct {
+			uint32_t flags:16;
+			uint32_t slice_field_type:2;
+			uint32_t reserved:14;
+		} bits;
+		uint32_t value;
+	} flag_type;
+	uint32_t operating_mode;
+	union {
+		struct {
+			uint32_t context:8;
+			uint32_t mmu_ptd:24;
+		} bits;
+		uint32_t value;
+	} mmu_context;
+	union {
+		struct {
+			uint32_t frame_height_mb:16;
+			uint32_t pic_width_mb:16;
+		} bits;
+		uint32_t value;
+	} pic_size;
+	uint32_t address_a0;
+	uint32_t address_a1;
+	uint32_t mb_param_address;
+	uint32_t ext_stride_a;
+	uint32_t address_b0;
+	uint32_t address_b1;
+	uint32_t alt_output_flags_b;
+	/* additional msg outside of IMG msg */
+	uint32_t address_c0;
+	uint32_t address_c1;
+};
+
+#define MTX_PADMSG_SIZE 2
+struct fw_padding_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+		} bits;
+		uint16_t value;
+	} header;
+};
+
+struct fw_msg_header {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+};
+
+struct fw_completed_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+	union {
+		struct {
+			uint32_t start_mb:16;
+			uint32_t last_mb:16;
+		} bits;
+		uint32_t value;
+	} mb;
+	uint32_t flags;
+	uint32_t vdebcr;
+};
+
+struct fw_deblock_required_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+};
+
+struct fw_panic_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+	uint32_t fe_status;
+	uint32_t be_status;
+	union {
+		struct {
+			uint32_t last_mb:16;
+			uint32_t reserved2:16;
+		} bits;
+		uint32_t value;
+	} mb;
+};
+
+struct fw_contiguity_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+	union {
+		struct {
+			uint32_t end_mb_num:16;
+			uint32_t begin_mb_num:16;
+		} bits;
+		uint32_t value;
+	} mb;
+};
+
+struct fw_slice_header_extract_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+
+	union {
+		struct {
+			uint32_t flags:16;
+			uint32_t res:16;
+		} bits;
+		uint32_t value;
+	} flags;
+
+	uint32_t src;
+
+	union {
+		struct {
+			uint32_t context:8;
+			uint32_t mmu_ptd:24;
+		} bits;
+		uint32_t value;
+	} mmu_context;
+
+	uint32_t dst;
+	uint32_t src_size;
+	uint32_t dst_size;
+
+	union {
+		struct {
+			uint32_t expected_pps_id:8;
+			uint32_t nalu_header_unit_type:5;
+			uint32_t nalu_header_ref_idc:2;
+			uint32_t nalu_header_reserved:1;
+			uint32_t continue_parse_flag:1;
+			uint32_t frame_mbs_only_flag:1;
+			uint32_t pic_order_present_flag:1;
+			uint32_t delta_pic_order_always_zero_flag:1;
+			uint32_t redundant_pic_cnt_present_flag:1;
+			uint32_t weighted_pred_flag:1;
+			uint32_t entropy_coding_mode_flag:1;
+			uint32_t deblocking_filter_control_present_flag:1;
+			uint32_t weighted_bipred_idc:2;
+			uint32_t residual_colour_transform_flag:1;
+			uint32_t chroma_format_idc:2;
+			uint32_t idr_flag:1;
+			uint32_t pic_order_cnt_type:2;
+		} bits;
+		uint32_t value;
+	} flag_bitfield;
+
+	union {
+		struct {
+			uint8_t num_slice_groups_minus1:3;
+			uint8_t num_ref_idc_l1_active_minus1:5;
+			uint8_t slice_group_map_type:3;
+			uint8_t num_ref_idc_l0_active_minus1:5;
+			uint8_t log2_slice_group_change_cycle:4;
+			uint8_t slice_header_bit_offset:4;
+			uint8_t log2_max_frame_num_minus4:4;
+			uint8_t logs_max_pic_order_cnt_lsb_minus4:4;
+		} bits;
+		uint32_t value;
+	} pic_param0;
+};
+
+struct fw_slice_header_extract_done_msg {
+	union {
+		struct {
+			uint32_t msg_size:8;
+			uint32_t msg_type:8;
+			uint32_t msg_fence:16;
+		} bits;
+		uint32_t value;
+	} header;
+};
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdx_reg.h b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_reg.h
new file mode 100644
index 0000000..270d34b
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdx_reg.h
@@ -0,0 +1,608 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _PSB_MSVDX_REG_H_
+#define _PSB_MSVDX_REG_H_
+
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#include "img_types.h"
+#endif
+
+#if (defined MFLD_MSVDX_FABRIC_DEBUG) && MFLD_MSVDX_FABRIC_DEBUG
+#define PSB_WMSVDX32(_val, _offs)					\
+do {									\
+	if (psb_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0)		\
+		panic("msvdx reg 0x%x write failed.\n",			\
+				(unsigned int)(_offs));			\
+	else								\
+		iowrite32(_val, dev_priv->msvdx_reg + (_offs));		\
+} while (0)
+
+static inline uint32_t PSB_RMSVDX32(uint32_t _offs)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)gpDrmDevice->dev_private;
+	if (psb_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0) {
+		panic("msvdx reg 0x%x read failed.\n", (unsigned int)(_offs));
+		return 0;
+	} else {
+		return ioread32(dev_priv->msvdx_reg + (_offs));
+	}
+}
+
+#elif (defined MSVDX_REG_DUMP) && MSVDX_REG_DUMP
+
+#define PSB_WMSVDX32(_val, _offs) \
+do {                                                \
+	printk(KERN_INFO"MSVDX: write %08x to reg 0x%08x\n", \
+			(unsigned int)(_val),       \
+			(unsigned int)(_offs));     \
+	iowrite32(_val, dev_priv->msvdx_reg + (_offs));   \
+} while (0)
+
+static inline uint32_t PSB_RMSVDX32(uint32_t _offs)
+{
+	uint32_t val = ioread32(dev_priv->msvdx_reg + (_offs));
+	printk(KERN_INFO"MSVDX: read reg 0x%08x, get %08x\n",
+			(unsigned int)(_offs), val);
+	return val;
+}
+
+#else
+
+#define PSB_WMSVDX32(_val, _offs) \
+	iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs) \
+	ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
+
+#define REGISTER(__group__, __reg__) (__group__##_##__reg__##_OFFSET)
+
+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER)	\
+	(((R_SPECIFIER)<<4) | (U_SPECIFIER))
+#define MTX_PC		MTX_INTERNAL_REG(0, 5)
+
+#define MEMIO_READ_FIELD(vpMem, field)						\
+	((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
+			& field##_MASK) >> field##_SHIFT))			\
+
+#define MEMIO_WRITE_FIELD(vpMem, field, value)					\
+do { 										\
+	((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) =	\
+		((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET)))	\
+			& (field##_TYPE)~field##_MASK) |			\
+	(field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK)); \
+} while (0)
+
+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value)				\
+do {										\
+	 (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) =		\
+	((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) |		\
+		(field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));	\
+} while (0)
+
+#define REGIO_READ_FIELD(reg_val, reg, field)					\
+	((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
+
+#define REGIO_WRITE_FIELD(reg_val, reg, field, value)				\
+do {										\
+	(reg_val) =								\
+	((reg_val) & ~(reg##_##field##_MASK)) |				\
+	(((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));	\
+} while (0)
+
+
+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value)			\
+do {										\
+	(reg_val) = ((reg_val) | ((value) << (reg##_##field##_SHIFT)));	\
+} while (0)
+
+/****** MSVDX.Technical Reference Manual.2.0.2.4.External VXD38x **************
+Offset address 			Name 			Identifier
+0x0000 - 0x03FF (1024B)		MTX Register		REG_MSVDX_MTX
+0x0400 - 0x047F (128B)		VDMC Register		REG_MSVDX _VDMC
+0x0480 - 0x04FF (128B)		VDEB Register		REG_MSVDX _VDEB
+0x0500 - 0x05FF (256B)		DMAC Register		REG_MSVDX _DMAC
+0x0600 - 0x06FF (256B)		MSVDX Core Register	REG_MSVDX _SYS
+0x0700 - 0x07FF (256B)		VEC iQ Matrix RAM 	REG_MSVDX_VEC_IQRAM
+0x0800 - 0x0FFF (2048B)		VEC Registers		REG_MSVDX _VEC
+0x1000 - 0x1FFF (4kB)		Command Register	REG_MSVDX _CMD
+0x2000 - 0x2FFF (4kB)		VEC Local RAM		REG_MSVDX _VEC_RAM
+0x3000 - 0x4FFF (8kB)		VEC VLC Table		RAM REG_MSVDX _VEC_VLC
+0x5000 - 0x5FFF (4kB)		AXI Register		REG_MSVDX _AXI
+******************************************************************************/
+
+/*************** MTX registers start: 0x0000 - 0x03FF (1024B) ****************/
+#define MTX_ENABLE_OFFSET				(0x0000)
+#define MTX_ENABLE_MTX_ENABLE_MASK				(0x00000001)
+#define MTX_ENABLE_MTX_ENABLE_SHIFT				(0)
+
+#define MTX_KICK_INPUT_OFFSET				(0x0080)
+
+#define MTX_REGISTER_READ_WRITE_REQUEST_OFFSET		(0x00FC)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK	(0x80000000)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT	(31)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK		(0x00010000)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT		(16)
+
+#define MTX_REGISTER_READ_WRITE_DATA_OFFSET		(0x00F8)
+
+#define MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET		(0x0104)
+
+#define MTX_RAM_ACCESS_CONTROL_OFFSET			(0x0108)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK			(0x0FF00000)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT			(20)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK		(0x000FFFFC)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT		(2)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK			(0x00000002)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT			(1)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK			(0x00000001)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT			(0)
+
+#define MTX_RAM_ACCESS_STATUS_OFFSET			(0x010C)
+
+#define MTX_SOFT_RESET_OFFSET				(0x0200)
+#define MTX_SOFT_RESET_MTX_RESET_MASK				(0x00000001)
+#define MTX_SOFT_RESET_MTX_RESET_SHIFT				(0)
+#define	MTX_SOFT_RESET_MTXRESET				(0x00000001)
+
+#define MTX_SYSC_TIMERDIV_OFFSET			(0x0208)
+
+#define MTX_SYSC_CDMAC_OFFSET				(0x0340)
+#define MTX_SYSC_CDMAC_BURSTSIZE_MASK				(0x07000000)
+#define MTX_SYSC_CDMAC_BURSTSIZE_SHIFT				(24)
+#define MTX_SYSC_CDMAC_RNW_MASK				(0x00020000)
+#define MTX_SYSC_CDMAC_RNW_SHIFT				(17)
+#define MTX_SYSC_CDMAC_ENABLE_MASK				(0x00010000)
+#define MTX_SYSC_CDMAC_ENABLE_SHIFT				(16)
+#define MTX_SYSC_CDMAC_LENGTH_MASK				(0x0000FFFF)
+#define MTX_SYSC_CDMAC_LENGTH_SHIFT				(0)
+
+#define MTX_SYSC_CDMAA_OFFSET				(0x0344)
+
+#define MTX_SYSC_CDMAS0_OFFSET      			(0x0348)
+
+#define MTX_SYSC_CDMAT_OFFSET				(0x0350)
+/************************** MTX registers end **************************/
+
+/**************** DMAC Registers: 0x0500 - 0x05FF (256B) ***************/
+#define DMAC_DMAC_COUNT_EN_MASK         		(0x00010000)
+#define DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK            (0x00020000)
+
+#define DMAC_DMAC_SETUP_OFFSET				(0x0500)
+
+#define DMAC_DMAC_COUNT_OFFSET				(0x0504)
+#define DMAC_DMAC_COUNT_BSWAP_LSBMASK           		(0x00000001)
+#define DMAC_DMAC_COUNT_BSWAP_SHIFT            		(30)
+#define DMAC_DMAC_COUNT_PW_LSBMASK				(0x00000003)
+#define DMAC_DMAC_COUNT_PW_SHIFT                		(27)
+#define DMAC_DMAC_COUNT_DIR_LSBMASK				(0x00000001)
+#define DMAC_DMAC_COUNT_DIR_SHIFT				(26)
+#define DMAC_DMAC_COUNT_PI_LSBMASK				(0x00000003)
+#define DMAC_DMAC_COUNT_PI_SHIFT				(24)
+#define DMAC_DMAC_COUNT_CNT_LSBMASK				(0x0000FFFF)
+#define DMAC_DMAC_COUNT_CNT_SHIFT				(0)
+#define DMAC_DMAC_COUNT_EN_MASK				(0x00010000)
+#define DMAC_DMAC_COUNT_EN_SHIFT				(16)
+
+#define DMAC_DMAC_PERIPH_OFFSET				(0x0508)
+#define DMAC_DMAC_PERIPH_ACC_DEL_LSBMASK			(0x00000007)
+#define DMAC_DMAC_PERIPH_ACC_DEL_SHIFT				(29)
+#define DMAC_DMAC_PERIPH_INCR_LSBMASK				(0x00000001)
+#define DMAC_DMAC_PERIPH_INCR_SHIFT				(27)
+#define DMAC_DMAC_PERIPH_BURST_LSBMASK				(0x00000007)
+#define DMAC_DMAC_PERIPH_BURST_SHIFT				(24)
+
+#define DMAC_DMAC_IRQ_STAT_OFFSET			(0x050C)
+#define DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK			(0x00020000)
+
+#define DMAC_DMAC_PERIPHERAL_ADDR_OFFSET		(0x0514)
+#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_MASK			(0x007FFFFF)
+#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_LSBMASK			(0x007FFFFF)
+#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_SHIFT			(0)
+
+/* DMAC control */
+#define PSB_DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) 	\
+		((((BSWAP) & DMAC_DMAC_COUNT_BSWAP_LSBMASK) <<	\
+			DMAC_DMAC_COUNT_BSWAP_SHIFT) | 		\
+		(((PW) & DMAC_DMAC_COUNT_PW_LSBMASK) <<		\
+			DMAC_DMAC_COUNT_PW_SHIFT) | 			\
+		(((DIR) & DMAC_DMAC_COUNT_DIR_LSBMASK) <<		\
+			DMAC_DMAC_COUNT_DIR_SHIFT) |			\
+		(((PERIPH_INCR) & DMAC_DMAC_COUNT_PI_LSBMASK) <<	\
+			DMAC_DMAC_COUNT_PI_SHIFT) |			\
+		(((COUNT) & DMAC_DMAC_COUNT_CNT_LSBMASK) <<		\
+			DMAC_DMAC_COUNT_CNT_SHIFT))
+
+#define PSB_DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST)		\
+		((((ACC_DEL) & DMAC_DMAC_PERIPH_ACC_DEL_LSBMASK) <<	\
+			DMAC_DMAC_PERIPH_ACC_DEL_SHIFT) | 		\
+		(((INCR) & DMAC_DMAC_PERIPH_INCR_LSBMASK) <<		\
+			DMAC_DMAC_PERIPH_INCR_SHIFT) | 		\
+		(((BURST) & DMAC_DMAC_PERIPH_BURST_LSBMASK) <<		\
+			DMAC_DMAC_PERIPH_BURST_SHIFT))
+
+typedef enum {
+	/* !< No byte swapping will be performed. */
+	PSB_DMAC_BSWAP_NO_SWAP = 0x0,
+	/* !< Byte order will be reversed. */
+	PSB_DMAC_BSWAP_REVERSE = 0x1,
+} DMAC_eBSwap;
+
+typedef enum {
+	/* !< Data from memory to peripheral. */
+	PSB_DMAC_DIR_MEM_TO_PERIPH = 0x0,
+	/* !< Data from peripheral to memory. */
+	PSB_DMAC_DIR_PERIPH_TO_MEM = 0x1,
+} DMAC_eDir;
+
+typedef enum {
+	PSB_DMAC_ACC_DEL_0	= 0x0,	/* !< Access delay zero clock cycles */
+	PSB_DMAC_ACC_DEL_256    = 0x1,	/* !< Access delay 256 clock cycles */
+	PSB_DMAC_ACC_DEL_512    = 0x2,	/* !< Access delay 512 clock cycles */
+	PSB_DMAC_ACC_DEL_768    = 0x3,	/* !< Access delay 768 clock cycles */
+	PSB_DMAC_ACC_DEL_1024   = 0x4,	/* !< Access delay 1024 clock cycles */
+	PSB_DMAC_ACC_DEL_1280   = 0x5,	/* !< Access delay 1280 clock cycles */
+	PSB_DMAC_ACC_DEL_1536   = 0x6,	/* !< Access delay 1536 clock cycles */
+	PSB_DMAC_ACC_DEL_1792   = 0x7,	/* !< Access delay 1792 clock cycles */
+} DMAC_eAccDel;
+
+typedef enum {
+	PSB_DMAC_INCR_OFF	= 0,	/* !< Static peripheral address. */
+	PSB_DMAC_INCR_ON	= 1,	/* !< Incrementing peripheral address. */
+} DMAC_eIncr;
+
+typedef enum {
+	PSB_DMAC_BURST_0	= 0x0,	/* !< burst size of 0 */
+	PSB_DMAC_BURST_1        = 0x1,	/* !< burst size of 1 */
+	PSB_DMAC_BURST_2        = 0x2,	/* !< burst size of 2 */
+	PSB_DMAC_BURST_3        = 0x3,	/* !< burst size of 3 */
+	PSB_DMAC_BURST_4        = 0x4,	/* !< burst size of 4 */
+	PSB_DMAC_BURST_5        = 0x5,	/* !< burst size of 5 */
+	PSB_DMAC_BURST_6        = 0x6,	/* !< burst size of 6 */
+	PSB_DMAC_BURST_7        = 0x7,	/* !< burst size of 7 */
+} DMAC_eBurst;
+/************************** DMAC Registers end **************************/
+
+/**************** MSVDX Core Registers: 0x0600 - 0x06FF (256B) ***************/
+#define MSVDX_CONTROL_OFFSET					(0x0600)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK			(0x00000100)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_SHIFT			(8)
+#define MSVDX_CONTROL_DMAC_CH0_SELECT_MASK			(0x00001000)
+#define MSVDX_CONTROL_DMAC_CH0_SELECT_SHIFT			(12)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK			(0x00000100)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_SHIFT			(8)
+#define MSVDX_CONTROL_MSVDX_FE_SOFT_RESET_MASK			(0x00010000)
+#define MSVDX_CONTROL_MSVDX_BE_SOFT_RESET_MASK			(0x00100000)
+#define MSVDX_CONTROL_MSVDX_VEC_MEMIF_SOFT_RESET_MASK		(0x01000000)
+#define MSVDX_CONTROL_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK 	(0x10000000)
+#define msvdx_sw_reset_all \
+	(MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK |	  		\
+	MSVDX_CONTROL_MSVDX_FE_SOFT_RESET_MASK |		\
+	MSVDX_CONTROL_MSVDX_BE_SOFT_RESET_MASK	|		\
+	MSVDX_CONTROL_MSVDX_VEC_MEMIF_SOFT_RESET_MASK |	\
+	MSVDX_CONTROL_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
+
+#define MSVDX_INTERRUPT_CLEAR_OFFSET			(0x060C)
+
+#define MSVDX_INTERRUPT_STATUS_OFFSET			(0x0608)
+#define MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK		(0x00000F00)
+#define MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_SHIFT		(8)
+#define MSVDX_INTERRUPT_STATUS_MTX_IRQ_MASK			(0x00004000)
+#define MSVDX_INTERRUPT_STATUS_MTX_IRQ_SHIFT			(14)
+
+#define MSVDX_HOST_INTERRUPT_ENABLE_OFFSET		(0x0610)
+
+#define MSVDX_MAN_CLK_ENABLE_OFFSET			(0x0620)
+#define MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK		(0x00000001)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_MAN_CLK_ENABLE_MASK 	(0x00000002)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_MAN_CLK_ENABLE_MASK 	(0x00000004)
+#define MSVDX_MAN_CLK_ENABLE_VDMC_MAN_CLK_ENABLE_MASK 		(0x00000008)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_MAN_CLK_ENABLE_MASK 	(0x00000010)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_MAN_CLK_ENABLE_MASK 	(0x00000020)
+#define MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK		(0x00000040)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK (0x00020000)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK	(0x00040000)
+#define MSVDX_MAN_CLK_ENABLE_VDMC_AUTO_CLK_ENABLE_MASK 	(0x00080000)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK 	(0x00100000)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_AUTO_CLK_ENABLE_MASK 	(0x00200000)
+
+#define clk_enable_all	\
+	(MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK			| \
+	MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_MAN_CLK_ENABLE_MASK 		| \
+	MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_MAN_CLK_ENABLE_MASK 		| \
+	MSVDX_MAN_CLK_ENABLE_VDMC_MAN_CLK_ENABLE_MASK	 		| \
+	MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_MAN_CLK_ENABLE_MASK 		| \
+	MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_MAN_CLK_ENABLE_MASK 		| \
+	MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK)
+
+#define clk_enable_minimal \
+	(MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK | \
+	MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK)
+
+#define clk_enable_auto	\
+	(MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK	| \
+	MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK		| \
+	MSVDX_MAN_CLK_ENABLE_VDMC_AUTO_CLK_ENABLE_MASK			| \
+	MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK		| \
+	MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_AUTO_CLK_ENABLE_MASK		| \
+	MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK			| \
+	MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK)
+
+#define MSVDX_CORE_ID_OFFSET				(0x0630)
+#define MSVDX_CORE_REV_OFFSET				(0x0640)
+
+#define MSVDX_DMAC_STREAM_STATUS_OFFSET			(0x0648)
+
+#define MSVDX_MMU_CONTROL0_OFFSET			(0x0680)
+#define MSVDX_MMU_CONTROL0_MMU_PAUSE_MASK			(0x00000002)
+#define MSVDX_MMU_CONTROL0_MMU_PAUSE_SHIFT			(1)
+#define MSVDX_MMU_CONTROL0_MMU_INVALDC_MASK          		(0x00000008)
+#define MSVDX_MMU_CONTROL0_MMU_INVALDC_SHIFT         		(3)
+
+#define MSVDX_MMU_BANK_INDEX_OFFSET			(0x0688)
+
+#define MSVDX_MMU_STATUS_OFFSET				(0x068C)
+
+#define MSVDX_MMU_CONTROL2_OFFSET			(0x0690)
+
+#define MSVDX_MMU_DIR_LIST_BASE_OFFSET			(0x0694)
+
+#define MSVDX_MMU_MEM_REQ_OFFSET			(0x06D0)
+
+#define MSVDX_MMU_TILE_BASE0_OFFSET			(0x06D4)
+
+#define MSVDX_MMU_TILE_BASE1_OFFSET			(0x06D8)
+
+#define MSVDX_MTX_RAM_BANK_OFFSET			(0x06F0)
+#define MSVDX_MTX_RAM_BANK_MTX_RAM_BANK_SIZE_MASK		(0x000F0000)
+#define MSVDX_MTX_RAM_BANK_MTX_RAM_BANK_SIZE_SHIFT		(16)
+
+#define MSVDX_MTX_DEBUG_OFFSET				MSVDX_MTX_RAM_BANK_OFFSET
+#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_MASK			(0x00000004)
+#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_LSBMASK		(0x00000001)
+#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_SHIFT			(2)
+#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_MASK			(0x00000003)
+#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_LSBMASK		(0x00000003)
+#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_SHIFT			(0)
+
+/*watch dog for FE and BE*/
+#define FE_MSVDX_WDT_CONTROL_OFFSET			(0x0664)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_CNT_CTRL */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CNT_CTRL_MASK		(0x00060000)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CNT_CTRL_LSBMASK		(0x00000003)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CNT_CTRL_SHIFT		(17)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_ENABLE */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ENABLE_MASK		(0x00010000)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ENABLE_LSBMASK		(0x00000001)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ENABLE_SHIFT		(16)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_ACTION1 */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION1_MASK		(0x00003000)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION1_LSBMASK		(0x00000003)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION1_SHIFT		(12)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_ACTION0 */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION0_MASK		(0x00000100)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION0_LSBMASK		(0x00000001)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION0_SHIFT		(8)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_CLEAR_SELECT */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLEAR_SELECT_MASK		(0x00000030)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLEAR_SELECT_LSBMASK	(0x00000003)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLEAR_SELECT_SHIFT		(4)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_CLKDIV_SELECT */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLKDIV_SELECT_MASK		(0x00000007)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLKDIV_SELECT_LSBMASK	(0x00000007)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLKDIV_SELECT_SHIFT	(0)
+
+#define FE_MSVDX_WDTIMER_OFFSET				(0x0668)
+/* MSVDX_CORE, CR_FE_MSVDX_WDTIMER, FE_WDT_COUNTER */
+#define FE_MSVDX_WDTIMER_FE_WDT_COUNTER_MASK			(0x0000FFFF)
+#define FE_MSVDX_WDTIMER_FE_WDT_COUNTER_LSBMASK		(0x0000FFFF)
+#define FE_MSVDX_WDTIMER_FE_WDT_COUNTER_SHIFT			(0)
+
+#define FE_MSVDX_WDT_COMPAREMATCH_OFFSET		(0x066c)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_COMPAREMATCH, FE_WDT_CM1 */
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM1_MASK		(0xFFFF0000)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM1_LSBMASK		(0x0000FFFF)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM1_SHIFT		(16)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_COMPAREMATCH, FE_WDT_CM0 */
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM0_MASK		(0x0000FFFF)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM0_LSBMASK		(0x0000FFFF)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM0_SHIFT		(0)
+
+#define BE_MSVDX_WDT_CONTROL_OFFSET			(0x0670)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_CNT_CTRL */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CNT_CTRL_MASK		(0x001E0000)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CNT_CTRL_LSBMASK		(0x0000000F)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CNT_CTRL_SHIFT		(17)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_ENABLE */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ENABLE_MASK		(0x00010000)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ENABLE_LSBMASK		(0x00000001)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ENABLE_SHIFT		(16)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_ACTION0 */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ACTION0_MASK		(0x00000100)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ACTION0_LSBMASK		(0x00000001)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ACTION0_SHIFT		(8)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_CLEAR_SELECT */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLEAR_SELECT_MASK		(0x000000F0)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLEAR_SELECT_LSBMASK	(0x0000000F)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLEAR_SELECT_SHIFT		(4)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_CLKDIV_SELECT */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLKDIV_SELECT_MASK		(0x00000007)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLKDIV_SELECT_LSBMASK	(0x00000007)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLKDIV_SELECT_SHIFT	(0)
+
+#define BE_MSVDX_WDTIMER_OFFSET				(0x0674)
+/* MSVDX_CORE, CR_BE_MSVDX_WDTIMER, BE_WDT_COUNTER */
+#define BE_MSVDX_WDTIMER_BE_WDT_COUNTER_MASK			(0x0000FFFF)
+#define BE_MSVDX_WDTIMER_BE_WDT_COUNTER_LSBMASK		(0x0000FFFF)
+#define BE_MSVDX_WDTIMER_BE_WDT_COUNTER_SHIFT			(0)
+
+#define BE_MSVDX_WDT_COMPAREMATCH_OFFSET		(0x678)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_COMPAREMATCH, BE_WDT_CM0 */
+#define BE_MSVDX_WDT_COMPAREMATCH_BE_WDT_CM0_MASK		(0x0000FFFF)
+#define BE_MSVDX_WDT_COMPAREMATCH_BE_WDT_CM0_LSBMASK		(0x0000FFFF)
+#define BE_MSVDX_WDT_COMPAREMATCH_BE_WDT_CM0_SHIFT		(0)
+
+/*watch dog end*/
+/************************** MSVDX Core Registers end *************************/
+
+/******************* VEC Registers: 0x0800 - 0x0FFF (2048B) ******************/
+#define VEC_SHIFTREG_CONTROL_OFFSET			(0x0818)
+#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_MASK		(0x00000300)
+#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_SHIFT		(8)
+/************************** VEC Registers end **************************/
+
+/************************** RENDEC Registers **************************/
+#define MSVDX_RENDEC_CONTROL0_OFFSET			(0x0868)
+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK		(0x00000001)
+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT		(0)
+
+#define MSVDX_RENDEC_CONTROL1_OFFSET			(0x086C)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK	(0x000000FF)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT	(0)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK		(0x000C0000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT		(18)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK		(0x00030000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT		(16)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK	(0x01000000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT	(24)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DEC_DISABLE_MASK		(0x08000000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DEC_DISABLE_SHIFT		(27)
+
+#define MSVDX_RENDEC_BUFFER_SIZE_OFFSET			(0x0870)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK	(0x0000FFFF)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT	(0)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK	(0xFFFF0000)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT	(16)
+
+#define MSVDX_RENDEC_BASE_ADDR0_OFFSET			(0x0874)
+
+#define MSVDX_RENDEC_BASE_ADDR1_OFFSET			(0x0878)
+
+#define MSVDX_RENDEC_READ_DATA_OFFSET			(0x0898)
+
+#define MSVDX_RENDEC_CONTEXT0_OFFSET			(0x0950)
+
+#define MSVDX_RENDEC_CONTEXT1_OFFSET			(0x0954)
+
+#define MSVDX_RENDEC_CONTEXT2_OFFSET			(0x0958)
+
+#define MSVDX_RENDEC_CONTEXT3_OFFSET			(0x095C)
+
+#define MSVDX_RENDEC_CONTEXT4_OFFSET			(0x0960)
+
+#define MSVDX_RENDEC_CONTEXT5_OFFSET			(0x0964)
+/*************************** RENDEC registers end ****************************/
+
+/******************** VEC Local RAM: 0x2000 - 0x2FFF (4kB) *******************/
+/* vec local MEM save/restore */
+#define VEC_LOCAL_MEM_BYTE_SIZE (4 * 1024)
+#define VEC_LOCAL_MEM_OFFSET 0x2000
+
+#define MSVDX_EXT_FW_ERROR_STATE 		(0x2CC4)
+/* Decode operations in progress or not complete */
+#define MSVDX_FW_STATUS_IN_PROGRESS			0x00000000
+/* there's no work underway on the hardware, idle, can be powered down */
+#define MSVDX_FW_STATUS_HW_IDLE				0x00000001
+/* Panic, waiting to be reloaded */
+#define MSVDX_FW_STATUS_HW_PANIC			0x00000003
+
+/*
+ * This defines the MSVDX communication buffer
+ */
+#define MSVDX_COMMS_SIGNATURE_VALUE	(0xA5A5A5A5)	/*!< Signature value */
+/*!< Host buffer size (in 32-bit words) */
+#define NUM_WORDS_HOST_BUF		(100)
+/*!< MTX buffer size (in 32-bit words) */
+#define NUM_WORDS_MTX_BUF		(100)
+
+#define MSVDX_COMMS_AREA_ADDR			(0x02fe0)
+#define MSVDX_COMMS_CORE_WTD			(MSVDX_COMMS_AREA_ADDR - 0x08)
+#define MSVDX_COMMS_ERROR_TRIG			(MSVDX_COMMS_AREA_ADDR - 0x08)
+#define MSVDX_COMMS_FIRMWARE_ID			(MSVDX_COMMS_AREA_ADDR - 0x0C)
+#define MSVDX_COMMS_OFFSET_FLAGS		(MSVDX_COMMS_AREA_ADDR + 0x18)
+#define	MSVDX_COMMS_MSG_COUNTER			(MSVDX_COMMS_AREA_ADDR - 0x04)
+#define MSVDX_COMMS_FW_STATUS			(MSVDX_COMMS_AREA_ADDR - 0x10)
+#define	MSVDX_COMMS_SIGNATURE			(MSVDX_COMMS_AREA_ADDR + 0x00)
+#define	MSVDX_COMMS_TO_HOST_BUF_SIZE		(MSVDX_COMMS_AREA_ADDR + 0x04)
+#define MSVDX_COMMS_TO_HOST_RD_INDEX		(MSVDX_COMMS_AREA_ADDR + 0x08)
+#define MSVDX_COMMS_TO_HOST_WRT_INDEX		(MSVDX_COMMS_AREA_ADDR + 0x0C)
+#define MSVDX_COMMS_TO_MTX_BUF_SIZE		(MSVDX_COMMS_AREA_ADDR + 0x10)
+#define MSVDX_COMMS_TO_MTX_RD_INDEX		(MSVDX_COMMS_AREA_ADDR + 0x14)
+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX		(MSVDX_COMMS_AREA_ADDR + 0x18)
+#define MSVDX_COMMS_TO_MTX_WRT_INDEX		(MSVDX_COMMS_AREA_ADDR + 0x1C)
+#define MSVDX_COMMS_TO_HOST_BUF			(MSVDX_COMMS_AREA_ADDR + 0x20)
+#define MSVDX_COMMS_TO_MTX_BUF	\
+			(MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
+
+/*
+ * FW FLAGs: it shall be written by the host prior to starting the Firmware.
+ */
+/* Disable Firmware based Watch dog timers. */
+#define DSIABLE_FW_WDT				0x0008
+	/* Abort Immediately on errors */
+#define ABORT_ON_ERRORS_IMMEDIATE		0x0010
+	/* Aborts faulted slices as soon as possible. Allows non faulted slices to
+	 * reach backend but the faulted slice will not be allowed to start. */
+#define ABORT_FAULTED_SLICE_IMMEDIATE		0x0020
+	/* Flush faulted slices - Debug option */
+#define FLUSH_FAULTED_SLICES			0x0080
+	/* Don't interrupt host when to host buffer becomes full.
+	 * Stall until space is freed up by host on it's own. */
+#define NOT_INTERRUPT_WHEN_HOST_IS_FULL		0x0200
+	/* Contiguity warning msg will be send to host for stream with
+         * FW_ERROR_DETECTION_AND_RECOVERY flag set if non-contiguous
+	 * macroblocks are detected. */
+#define NOT_ENABLE_ON_HOST_CONCEALMENT		0x0400
+	/* Return VDEB Signature Value in Completion message.
+	 * This requires a VDEB data flush every slice for constant results.*/
+#define RETURN_VDEB_DATA_IN_COMPLETION		0x0800
+	/* Disable Auto Clock Gating. */
+#define DSIABLE_Auto_CLOCK_GATING		0x1000
+	/* Disable Idle GPIO signal. */
+#define DSIABLE_IDLE_GPIO_SIG			0x2000
+	/* Enable Setup, FE and BE Time stamps in completion message.
+	 * Used by IMG only for firmware profiling. */
+#define ENABLE_TIMESTAMPS_IN_COMPLETE_MSG	0x4000
+	/* Disable off-host 2nd pass Deblocking in Firmware.  */
+#define DSIABLE_OFFHOST_SECOND_DEBLOCK		0x20000
+	/* Sum address signature to data signature
+	 * when returning VDEB signature values. */
+#define SUM_ADD_SIG_TO_DATA_SIGNATURE		0x80000
+
+/*
+#define MSVDX_COMMS_AREA_END	\
+  (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
+*/
+#define MSVDX_COMMS_AREA_END 0x03000
+
+#if (MSVDX_COMMS_AREA_END != 0x03000)
+#error
+#endif
+/***************************** VEC Local RAM end *****************************/
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/decode/psb_msvdxinit.c b/drivers/external_drivers/intel_media/video/decode/psb_msvdxinit.c
new file mode 100644
index 0000000..c74b0a7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/decode/psb_msvdxinit.c
@@ -0,0 +1,1163 @@
+/**************************************************************************
+ * psb_msvdxinit.c
+ * MSVDX initialization and mtx-firmware upload
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Li Zeng <li.zeng@intel.com>
+ *    Binglin Chen <binglin.chen@intel.com>
+ *    Fei Jiang <fei.jiang@intel.com>
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#ifdef CONFIG_DRM_VXD_BYT
+#include "vxd_drv.h"
+#else
+#include "psb_drv.h"
+#endif
+
+#include "psb_msvdx.h"
+#include "psb_msvdx_msg.h"
+#include "psb_msvdx_reg.h"
+#include "psb_msvdx_ec.h"
+#include <linux/firmware.h>
+#ifdef CONFIG_VIDEO_MRFLD
+#include "video_ospm.h"
+#endif
+
+
+#ifdef CONFIG_DX_SEP54
+extern int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num);
+#endif
+
+uint8_t psb_rev_id;
+
+/*
+ * the original 1000 of udelay is derive from reference driver
+ * From Liu, Haiyang, changed the originial udelay value from 1000 to 5
+ * can save 3% C0 residence
+ */
+int psb_wait_for_register(struct drm_psb_private *dev_priv,
+			  uint32_t offset, uint32_t value, uint32_t enable,
+			  uint32_t poll_cnt, uint32_t timeout)
+{
+	uint32_t reg_value = 0;
+	/* uint32_t poll_cnt = 2000000; */
+	while (poll_cnt) {
+		reg_value = PSB_RMSVDX32(offset);
+		if (value == (reg_value & enable))
+			return 0;
+
+		if (timeout < 10) {
+			/* Wait a bit */
+			PSB_UDELAY(timeout);
+			/* PSB_UDELAY(5); */
+		}
+		else if (timeout < 20000) {
+			usleep_range(timeout, timeout + timeout / 5);
+		}
+		else {
+			msleep(timeout / 1000);
+		}
+
+		poll_cnt--;
+	}
+	PSB_DEBUG_MSVDX("MSVDX: Timeout while waiting for register %08x:"
+		  " expecting %08x (mask %08x), got %08x\n",
+		  offset, value, enable, reg_value);
+
+	return 1;
+}
+
+#if 0
+int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
+{
+	int ret = 0;
+	uint32_t mtx_int = 0;
+
+	REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, MTX_IRQ,
+			       1);
+
+	ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS_OFFSET,
+				    /* Required value */
+				    mtx_int,
+				    /* Enabled bits */
+				    mtx_int, 2000000, 5);
+
+	if (ret) {
+		DRM_ERROR("MSVDX: Error Mtx did not return"
+			  " int within a resonable time\n");
+		return ret;
+	}
+
+	PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
+
+	/* Got it so clear the bit */
+	PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR_OFFSET);
+
+	return ret;
+}
+#endif
+
+static void msvdx_free_ccb(struct ttm_buffer_object **ccb)
+{
+	ttm_bo_unref(ccb);
+	*ccb = NULL;
+}
+
+int psb_msvdx_core_reset(struct drm_psb_private *dev_priv)
+{
+	int ret = 0;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	/* Enable Clocks */
+	PSB_DEBUG_GENERAL("Enabling clocks\n");
+	psb_msvdx_mtx_set_clocks(dev_priv->dev, clk_enable_all);
+
+	/* Always pause the MMU as the core may be still active
+	 * when resetting.  It is very bad to have memory
+	 * activity at the same time as a reset - Very Very bad
+	 */
+	PSB_WMSVDX32(2, MSVDX_MMU_CONTROL0_OFFSET);
+
+	/* BRN26106, BRN23944, BRN33671 */
+	/* This is neccessary for all cores up to Tourmaline */
+	if ((PSB_RMSVDX32(MSVDX_CORE_REV_OFFSET) < 0x00050502) &&
+		(PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS_OFFSET)
+			& MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK) &&
+		(PSB_RMSVDX32(MSVDX_MMU_STATUS_OFFSET) & 1)) {
+		unsigned int *pptd;
+		unsigned int loop;
+		uint32_t ptd_addr;
+
+		/* do work around */
+		ptd_addr = page_to_pfn(msvdx_priv->mmu_recover_page)
+					<< PAGE_SHIFT;
+		pptd = kmap(msvdx_priv->mmu_recover_page);
+		if (!pptd) {
+			DRM_ERROR("failed to kmap mmu recover page.\n");
+			return -1;
+		}
+		for (loop = 0; loop < 1024; loop++)
+			pptd[loop] = ptd_addr | 0x00000003;
+		PSB_WMSVDX32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET +  0);
+		PSB_WMSVDX32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET +  4);
+		PSB_WMSVDX32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET +  8);
+		PSB_WMSVDX32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET + 12);
+
+		PSB_WMSVDX32(6, MSVDX_MMU_CONTROL0_OFFSET);
+		PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK,
+					MSVDX_INTERRUPT_STATUS_OFFSET);
+		kunmap(msvdx_priv->mmu_recover_page);
+	}
+
+	/* make sure *ALL* outstanding reads have gone away */
+	{
+		int loop;
+		uint32_t cmd;
+		for (loop = 0; loop < 10; loop++)
+			ret = psb_wait_for_register(dev_priv,
+						MSVDX_MMU_MEM_REQ_OFFSET,
+						0, 0xff, 100, 1);
+		if (ret) {
+			PSB_DEBUG_WARN("MSVDX_MMU_MEM_REQ is %d,\n"
+				"indicate outstanding read request 0.\n",
+				PSB_RMSVDX32(MSVDX_MMU_MEM_REQ_OFFSET));
+			ret = -1;
+			return ret;
+		}
+		/* disconnect RENDEC decoders from memory */
+		cmd = PSB_RMSVDX32(MSVDX_RENDEC_CONTROL1_OFFSET);
+		REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, RENDEC_DEC_DISABLE, 1);
+		PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1_OFFSET);
+
+		/* Issue software reset for all but core */
+		PSB_WMSVDX32((unsigned int)~MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK, MSVDX_CONTROL_OFFSET);
+		PSB_RMSVDX32(MSVDX_CONTROL_OFFSET);
+		/* bit format is set as little endian */
+		PSB_WMSVDX32(0, MSVDX_CONTROL_OFFSET);
+		/* make sure read requests are zero */
+		ret = psb_wait_for_register(dev_priv, MSVDX_MMU_MEM_REQ_OFFSET,
+						0, 0xff, 100, 100);
+		if (!ret) {
+			/* Issue software reset */
+			PSB_WMSVDX32(MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK, MSVDX_CONTROL_OFFSET);
+
+			ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL_OFFSET, 0,
+					MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK,
+					2000000, 5);
+			if (!ret) {
+				/* Clear interrupt enabled flag */
+				PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+				/* Clear any pending interrupt flags */
+				PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR_OFFSET);
+			} else {
+				PSB_DEBUG_WARN("MSVDX_CONTROL_OFFSET is %d,\n"
+					"indicate software reset failed.\n",
+					PSB_RMSVDX32(MSVDX_CONTROL_OFFSET));
+			}
+		} else {
+			PSB_DEBUG_WARN("MSVDX_MMU_MEM_REQ is %d,\n"
+				"indicate outstanding read request 1.\n",
+				PSB_RMSVDX32(MSVDX_MMU_MEM_REQ_OFFSET));
+		}
+	}
+	return ret;
+}
+
+/**
+ * Reset chip and disable interrupts.
+ * Return 0 success, 1 failure
+ */
+int psb_msvdx_reset(struct drm_psb_private *dev_priv)
+{
+	int ret = 0;
+
+	/* Issue software reset */
+	/* PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL); */
+	PSB_WMSVDX32(MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK, MSVDX_CONTROL_OFFSET);
+
+	ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL_OFFSET, 0,
+			MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK, 2000000, 5);
+	if (!ret) {
+		/* Clear interrupt enabled flag */
+		PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+		/* Clear any pending interrupt flags */
+		PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR_OFFSET);
+	} else {
+		PSB_DEBUG_WARN("MSVDX_CONTROL_OFFSET is %d,\n"
+			"indicate software reset failed.\n",
+			PSB_RMSVDX32(MSVDX_CONTROL_OFFSET));
+	}
+
+	return ret;
+}
+
+static int msvdx_allocate_ccb(struct drm_device *dev,
+			    struct ttm_buffer_object **ccb,
+			    uint32_t *base_addr, unsigned long size)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	int ret;
+	struct ttm_bo_kmap_obj tmp_kmap;
+	bool is_iomem;
+
+	PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(bdev, size,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
+				       NULL, ccb);
+#else
+	ret = ttm_buffer_object_create(bdev, size,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT, 0, 0,
+				       NULL, ccb);
+#endif
+	if (ret) {
+		DRM_ERROR("MSVDX:failed to allocate CCB.\n");
+		*ccb = NULL;
+		return 1;
+	}
+
+	ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
+	if (ret) {
+		DRM_ERROR("ttm_bo_kmap failed ret: %d\n", ret);
+		ttm_bo_unref(ccb);
+		*ccb = NULL;
+		return 1;
+	}
+
+	memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
+	       size);
+	ttm_bo_kunmap(&tmp_kmap);
+
+	*base_addr = (*ccb)->offset;
+	return 0;
+}
+
+static ssize_t psb_msvdx_pmstate_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	struct drm_psb_private *dev_priv = NULL;
+	struct msvdx_private *msvdx_priv = NULL;
+	int ret = -EINVAL;
+
+	if (drm_dev == NULL)
+		return 0;
+
+	dev_priv = drm_dev->dev_private;
+	msvdx_priv = dev_priv->msvdx_private;
+#ifndef CONFIG_DRM_VXD_BYT
+	ret = snprintf(buf, 64, "MSVDX Power state 0x%s, gating count 0x%08x\n",
+		       ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)
+				? "ON" : "OFF", msvdx_priv->pm_gating_count);
+#endif
+	return ret;
+}
+
+#ifdef CONFIG_VIDEO_MRFLD
+static ssize_t ved_freq_scaling_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	int ret = -EINVAL;
+	int freq_code;
+
+	if (drm_dev == NULL)
+               return 0;
+
+	freq_code = psb_msvdx_get_ved_freq(VED_SS_PM1);
+
+	ret = snprintf(buf, 32,"freq_code/freq: %d/%dMHz\n",
+                                       freq_code, GET_MSVDX_FREQUENCY(freq_code));
+
+	return ret;
+}
+
+static ssize_t ved_freq_scaling_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	u32 freq_code;
+	int chars;
+
+	if (drm_dev == NULL)
+		return 0;
+
+	chars = sscanf(buf, "%d", &freq_code);
+	if (chars == 0)
+		return size;
+
+	if ((freq_code ^ IP_FREQ_106_67
+		&& freq_code ^ IP_FREQ_133_30
+		&& freq_code ^ IP_FREQ_160_00
+		&& freq_code ^ IP_FREQ_177_78
+		&& freq_code ^ IP_FREQ_200_00
+		&& freq_code ^ IP_FREQ_213_33
+		&& freq_code ^ IP_FREQ_266_67
+		&& freq_code ^ IP_FREQ_320_00) == 0) {
+		psb_msvdx_set_ved_freq(freq_code);
+		psb_set_freq_control_switch(false);
+	} else {
+		if ((freq_code ^ IP_FREQ_RESUME_SET) == 0) {
+			psb_set_freq_control_switch(true);
+		} else {
+			printk(KERN_ERR "%s: invalid freq_code %d\n", buf, freq_code);
+		}
+	}
+	return size;
+}
+
+static DEVICE_ATTR(ved_freq_scaling, 0664, ved_freq_scaling_show, ved_freq_scaling_store);
+#endif
+
+static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL);
+
+static int32_t msvdx_alloc_ccb_for_rendec(struct drm_device *dev)
+{
+	struct msvdx_private *msvdx_priv = NULL;
+	int32_t ret = 0;
+	struct drm_psb_private *dev_priv;
+
+	PSB_DEBUG_INIT("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
+
+	if (dev == NULL)
+		return 1;
+
+	dev_priv = psb_priv(dev);
+	if (dev_priv == NULL)
+		return 1;
+
+	msvdx_priv = dev_priv->msvdx_private;
+	if (msvdx_priv == NULL)
+		return 1;
+
+	/* Allocate device virtual memory as required by rendec.... */
+	if (!msvdx_priv->ccb0) {
+		ret = msvdx_allocate_ccb(dev, &msvdx_priv->ccb0,
+				       &msvdx_priv->base_addr0,
+				       RENDEC_A_SIZE);
+		if (ret) {
+			DRM_ERROR("Allocate Rendec A fail.\n");
+			goto err_exit;
+		}
+	}
+
+	if (!msvdx_priv->ccb1) {
+		ret = msvdx_allocate_ccb(dev, &msvdx_priv->ccb1,
+				       &msvdx_priv->base_addr1,
+				       RENDEC_B_SIZE);
+		if (ret) {
+			DRM_ERROR("Allocate Rendec B fail.\n");
+			goto err_exit;
+		}
+	}
+
+	PSB_DEBUG_INIT("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
+			  msvdx_priv->base_addr0, msvdx_priv->base_addr1);
+
+	return 0;
+
+err_exit:
+	DRM_ERROR("MSVDX: %s failed.\n", __func__);
+	if (msvdx_priv->ccb0)
+		msvdx_free_ccb(&msvdx_priv->ccb0);
+	if (msvdx_priv->ccb1)
+		msvdx_free_ccb(&msvdx_priv->ccb1);
+
+	return 1;
+}
+
+#ifndef CONFIG_SLICE_HEADER_PARSING
+static void msvdx_rendec_init_by_reg(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	uint32_t cmd;
+
+	PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0_OFFSET);
+	PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1_OFFSET);
+
+	cmd = 0;
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
+			RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
+			RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE_OFFSET);
+
+	cmd = 0;
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+			RENDEC_DECODE_START_SIZE, 0);
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+			RENDEC_BURST_SIZE_W, 1);
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+			RENDEC_BURST_SIZE_R, 1);
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+			RENDEC_EXTERNAL_MEMORY, 1);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1_OFFSET);
+
+	cmd = 0x00101010;
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0_OFFSET);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1_OFFSET);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2_OFFSET);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3_OFFSET);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4_OFFSET);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5_OFFSET);
+
+	cmd = 0;
+	REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
+			1);
+	PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0_OFFSET);
+}
+#endif
+
+int32_t msvdx_rendec_init_by_msg(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	/* at this stage, FW is uplaoded successfully,
+	 * can send RENDEC init message */
+	struct fw_init_msg init_msg;
+	init_msg.header.bits.msg_size = sizeof(struct fw_init_msg);
+	init_msg.header.bits.msg_type = MTX_MSGID_INIT;
+	init_msg.rendec_addr0 = msvdx_priv->base_addr0;
+	init_msg.rendec_addr1 = msvdx_priv->base_addr1;
+	init_msg.rendec_size.bits.rendec_size0 = RENDEC_A_SIZE / (4 * 1024);
+	init_msg.rendec_size.bits.rendec_size1 = RENDEC_B_SIZE / (4 * 1024);
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	init_msg.nalu_extract_term_buff_addr = msvdx_priv->term_buf_addr;
+#endif
+	return psb_mtx_send(dev_priv, (void *)&init_msg);
+}
+
+#if 0
+static void msvdx_sw_rest(struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val;
+
+	/* Issue software reset for all but core*/
+	PSB_WMSVDX32((uint32_t) ~MSVDX_CONTROL__MSVDX_SOFT_RESET_MASK, MSVDX_CONTROL_OFFSET);
+
+	reg_val = PSB_RMSVDX32(MSVDX_CONTROL_OFFSET);
+	PSB_WMSVDX32(0, MSVDX_CONTROL_OFFSET);
+	PSB_WMSVDX32(MSVDX_CONTROL__MSVDX_SOFT_RESET_MASK, MSVDX_CONTROL_OFFSET);
+
+	reg_val = 0;
+	REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_CNT_CTRL, 0x3);
+	REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_ENABLE, 0);
+	REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_ACTION0, 1);
+	REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_CLEAR_SELECT, 1);
+	REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_CLKDIV_SELECT, 7);
+	PSB_WMSVDX32(820, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+	PSB_WMSVDX32(reg_val, FE_MSVDX_WDT_CONTROL_OFFSET);
+
+	reg_val = 0;
+	REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_CNT_CTRL, 0x7);
+	REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_ENABLE, 0);
+	REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_ACTION0, 1);
+	REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_CLEAR_SELECT, 0xd);
+	REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_CLKDIV_SELECT, 7);
+	PSB_WMSVDX32(8200, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+	PSB_WMSVDX32(reg_val, BE_MSVDX_WDT_CONTROL_OFFSET);
+}
+#endif
+
+static void msvdx_tile_setup(struct drm_psb_private *dev_priv)
+{
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	msvdx_priv->tile_region_start0 =
+		dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].gpu_offset;
+
+	msvdx_priv->tile_region_end0 = msvdx_priv->tile_region_start0 +
+	(dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].size << PAGE_SHIFT);
+
+	msvdx_priv->tile_region_start1 =
+		dev_priv->bdev.man[TTM_PL_TT].gpu_offset;
+
+	msvdx_priv->tile_region_end1 = msvdx_priv->tile_region_start1 +
+		(dev_priv->bdev.man[TTM_PL_TT].size << PAGE_SHIFT);
+
+#if 0
+	if (drm_psb_msvdx_tiling && IS_MSVDX_MEM_TILE(dev)) {
+		uint32_t tile_start =
+			dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].gpu_offset;
+		uint32_t tile_end = tile_start +
+		(dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].size << PAGE_SHIFT);
+
+		/* Enable memory tiling */
+		cmd = ((tile_start >> 20) + (((tile_end >> 20) - 1) << 12) +
+					((0x8 | 2) << 24)); /* 2k stride */
+
+		PSB_DEBUG_GENERAL("MSVDX: MMU Tiling register0 %08x\n", cmd);
+		PSB_DEBUG_GENERAL("	  Region 0x%08x-0x%08x\n",
+					tile_start, tile_end);
+		PSB_WMSVDX32(cmd, MSVDX_MMU_TILE_BASE0_OFFSET);
+
+		tile_start =
+			dev_priv->bdev.man[TTM_PL_TT].gpu_offset;
+		tile_end = tile_start +
+			(dev_priv->bdev.man[TTM_PL_TT].size << PAGE_SHIFT);
+
+		cmd = ((tile_start >> 20) + (((tile_end >> 20) - 1) << 12) +
+					((0x8 | 2) << 24)); /* 2k stride */
+
+		PSB_DEBUG_GENERAL("MSVDX: MMU Tiling register1 %08x\n", cmd);
+		PSB_DEBUG_GENERAL("	  Region 0x%08x-0x%08x\n",
+					tile_start, tile_end);
+		PSB_WMSVDX32(cmd, MSVDX_MMU_TILE_BASE1_OFFSET);
+	}
+#endif
+	return;
+}
+
+#ifdef CONFIG_VIDEO_MRFLD_EC
+static void msvdx_init_ec(struct msvdx_private *msvdx_priv)
+{
+	struct drm_psb_private *dev_priv = msvdx_priv->dev_priv;
+
+	/* we should restore the state, if we power down/up
+	 * during EC */
+	PSB_WMSVDX32(0, 0x2000 + 0xcc4); /* EXT_FW_ERROR_STATE */
+	PSB_WMSVDX32(0, 0x2000 + 0xcb0); /* EXT_FW_LAST_MBS */
+	PSB_WMSVDX32(0, 0x2000 + 0xcb4); /* EXT_FW_LAST_MBS */
+	PSB_WMSVDX32(0, 0x2000 + 0xcb8); /* EXT_FW_LAST_MBS */
+	PSB_WMSVDX32(0, 0x2000 + 0xcbc); /* EXT_FW_LAST_MBS */
+
+	msvdx_priv->vec_ec_mem_saved = 1;
+
+	msvdx_priv->msvdx_ec_ctx[0] =
+		kzalloc(sizeof(struct psb_msvdx_ec_ctx) *
+				PSB_MAX_EC_INSTANCE,
+				GFP_KERNEL);
+	if (msvdx_priv->msvdx_ec_ctx[0] == NULL) {
+		DRM_ERROR("MSVDX:fail to allocate memory for ec ctx\n");
+	} else {
+		int i;
+		for (i = 1; i < PSB_MAX_EC_INSTANCE; i++)
+			msvdx_priv->msvdx_ec_ctx[i] =
+				msvdx_priv->msvdx_ec_ctx[0] + i;
+		for (i = 0; i < PSB_MAX_EC_INSTANCE; i++)
+			msvdx_priv->msvdx_ec_ctx[i]->fence =
+					PSB_MSVDX_INVALID_FENCE;
+	}
+	INIT_WORK(&(msvdx_priv->ec_work), psb_msvdx_do_concealment);
+	return;
+}
+#endif
+
+#if 0
+void msvdx_init_test(struct drm_device *dev)
+{
+	/* Send test message */
+	uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
+
+	MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
+			  FW_VA_DEBUG_TEST2_SIZE);
+	MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
+			  VA_MSGID_TEST2);
+
+	ret = psb_mtx_send(dev_priv, msg_buf);
+	if (ret) {
+		DRM_ERROR("psb: MSVDX sending fails.\n");
+		goto out;
+	}
+
+	/* Wait for Mtx to ack this message */
+	psb_poll_mtx_irq(dev_priv);
+}
+#endif
+
+static int msvdx_startup_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv;
+
+	msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL);
+	if (msvdx_priv == NULL) {
+		DRM_ERROR("MSVDX: alloc msvdx_private failed.\n");
+		return 1;
+	}
+
+	dev_priv->msvdx_private = msvdx_priv;
+	memset(msvdx_priv, 0, sizeof(struct msvdx_private));
+	msvdx_priv->dev_priv = dev_priv;
+	msvdx_priv->dev = dev;
+#ifdef CONFIG_DRM_VXD_BYT
+	msvdx_priv->fw_loaded_by_punit = 0;
+#else
+#ifdef MERRIFIELD
+	msvdx_priv->msvdx_needs_reset = 1;
+	msvdx_priv->fw_b0_uploaded = 0;
+
+	if (IS_MRFLD(dev))
+			msvdx_priv->fw_loaded_by_punit = 1;
+	else
+#endif
+		msvdx_priv->fw_loaded_by_punit =
+			((dev)->pdev->revision >= 0xc) || \
+			(((dev)->pci_device & 0xffff) == 0x08c7) || \
+			(((dev)->pci_device & 0xffff) == 0x08c8);
+#endif
+	msvdx_tile_setup(dev_priv);
+	msvdx_priv->pm_gating_count = 0;
+
+	/* get device --> drm_device --> drm_psb_private --> msvdx_priv
+	 * for psb_msvdx_pmstate_show: msvdx_pmpolicy
+	 * if not pci_set_drvdata, can't get drm_device from device
+	 */
+	/* pci_set_drvdata(dev->pdev, dev); */
+	if (device_create_file(&dev->pdev->dev,
+			       &dev_attr_msvdx_pmstate))
+		DRM_ERROR("MSVDX: could not create sysfs file\n");
+#ifdef CONFIG_VIDEO_MRFLD
+	if (device_create_file(&dev->pdev->dev,
+                               &dev_attr_ved_freq_scaling))
+		DRM_ERROR("Freq: could not create sysfs file\n");
+#endif
+	msvdx_priv->sysfs_pmstate = sysfs_get_dirent(
+					    dev->pdev->dev.kobj.sd,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+					    NULL,
+#endif
+					    "msvdx_pmstate");
+#ifdef CONFIG_VIDEO_MRFLD_EC
+	msvdx_init_ec(msvdx_priv);
+#endif
+	INIT_DELAYED_WORK(&msvdx_priv->msvdx_suspend_wq,
+				&psb_powerdown_msvdx);
+	/* Initialize comand msvdx queueing */
+	INIT_LIST_HEAD(&msvdx_priv->msvdx_queue);
+	mutex_init(&msvdx_priv->msvdx_mutex);
+	spin_lock_init(&msvdx_priv->msvdx_lock);
+#ifdef CONFIG_ION
+	INIT_LIST_HEAD(&msvdx_priv->ion_buffers_list);
+	mutex_init(&msvdx_priv->ion_buf_list_lock);
+#endif
+#ifndef CONFIG_DRM_VXD_BYT
+	/*figure out the stepping */
+	pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
+#endif
+	msvdx_priv->mmu_recover_page = alloc_page(GFP_DMA32);
+	if (!msvdx_priv->mmu_recover_page)
+		goto err_exit;
+
+	tasklet_init(&msvdx_priv->msvdx_tasklet,
+			msvdx_powerdown_tasklet, (unsigned long)dev);
+
+#ifndef CONFIG_DRM_VXD_BYT
+	if (IS_MRFLD(dev))
+		drm_msvdx_bottom_half = PSB_BOTTOM_HALF_WQ;
+	else
+#endif
+		drm_msvdx_bottom_half = PSB_BOTTOM_HALF_WQ;
+
+	return 0;
+
+err_exit:
+	DRM_ERROR("MSVDX: init one time failed\n");
+	kfree(dev_priv->msvdx_private);
+
+	return 1;
+}
+
+void msvdx_post_powerup_core_reset(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	psb_msvdx_mtx_set_clocks(dev_priv->dev, clk_enable_all);
+
+	/* msvdx sw reset should be done by gunit before loading fw */
+#if 0
+	/* Issue software reset for all but core*/
+	PSB_WMSVDX32(~MSVDX_CONTROL__MSVDX_SOFT_RESET_MASK,
+				MSVDX_CONTROL_OFFSET);
+
+	PSB_RMSVDX32(MSVDX_CONTROL_OFFSET);
+	PSB_WMSVDX32(0, MSVDX_CONTROL_OFFSET);
+
+	psb_wait_for_register(dev_priv, MSVDX_MMU_MEM_REQ_OFFSET, 0,
+						0xff, 100, 100);
+	PSB_WMSVDX32(MSVDX_CONTROL__MSVDX_SOFT_RESET_MASK,
+				MSVDX_CONTROL_OFFSET);
+	psb_wait_for_register(dev_priv, MSVDX_CONTROL_OFFSET, 0,
+			MSVDX_CONTROL__MSVDX_SOFT_RESET_MASK,
+									100, 100);
+#endif
+
+#ifndef CONFIG_DRM_VXD_BYT
+       psb_irq_preinstall_islands(dev, OSPM_VIDEO_DEC_ISLAND);
+       psb_irq_postinstall_islands(dev, OSPM_VIDEO_DEC_ISLAND);
+#endif
+	/* psb_msvdx_clearirq only clear CR_MTX_IRQ int,
+	 * while DDK set 0xFFFFFFFF */
+	psb_msvdx_clearirq(dev);
+	psb_msvdx_enableirq(dev);
+}
+
+int msvdx_mtx_init(struct drm_device *dev, int error_reset)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t clk_divider = 200;
+	int ret;
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	/* Reset MTX not done here, should be done before loading fw
+	 * while fw is loaded by gunit now */
+#if 0
+	PSB_WMSVDX32(MTX_SOFT_RESET__MTXRESET, MTX_SOFT_RESET_OFFSET);
+	PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
+#endif
+
+	/* These should not be reprogrames after a error reset */
+	if (!error_reset) {
+		PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
+		PSB_WMSVDX32(0, MSVDX_EXT_FW_ERROR_STATE);
+	}
+
+	PSB_WMSVDX32(0, MSVDX_COMMS_ERROR_TRIG);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+	PSB_WMSVDX32(0, MSVDX_COMMS_FIRMWARE_ID);
+	/* IMG DDK set as: gui32DeviceNodeFlags & 0x4000, while it is not set in fw spec
+	 * The bit neede to be set preboot is the performce data bit since this
+	 * controls caused the firmware to rebalance the message queues. */
+	PSB_WMSVDX32(0, MSVDX_COMMS_OFFSET_FLAGS);
+
+	/* DDK: check device_node_flags with 0x400 to | (1<<16),
+	 * while it is not set in fw spec */
+	PSB_WMSVDX32(clk_divider - 1, MTX_SYSC_TIMERDIV_OFFSET);
+
+	/* DDK: LLDMA upload fw, which is now done by gunit */
+
+	/* DDK: redefine toHost and toMTX msg buffer, seems not needed */
+
+	/* Wait for the signature value to be written back */
+	ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
+				    MSVDX_COMMS_SIGNATURE_VALUE,
+				    0xffffffff,
+				    1000, 900);
+	if (ret) {
+		PSB_DEBUG_WARN("WARN: Gunit upload fw failure,\n"
+				"MSVDX_COMMS_SIGNATURE reg is 0x%x,"
+				"MSVDX_COMMS_FW_STATUS reg is 0x%x,"
+				"MTX_ENABLE reg is 0x%x.\n",
+				PSB_RMSVDX32(MSVDX_COMMS_SIGNATURE),
+				PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS),
+				PSB_RMSVDX32(MTX_ENABLE_OFFSET));
+		msvdx_priv->msvdx_needs_reset |=
+				MSVDX_RESET_NEEDS_REUPLOAD_FW |
+				MSVDX_RESET_NEEDS_INIT_FW;
+	}
+	return ret;
+}
+
+/* This value is hardcoded in FW */
+#define WDT_CLOCK_DIVIDER 128
+int psb_msvdx_post_boot_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+	uint32_t device_node_flags =
+			RETURN_VDEB_DATA_IN_COMPLETION | NOT_ENABLE_ON_HOST_CONCEALMENT;
+
+	/* DDK set fe_wdt_clks as 0x820 and be_wdt_clks as 0x8200 */
+	uint32_t fe_wdt_clks = 0x334 * WDT_CLOCK_DIVIDER;
+	uint32_t be_wdt_clks = 0x2008 * WDT_CLOCK_DIVIDER;
+
+	PSB_WMSVDX32(FIRMWAREID, MSVDX_COMMS_FIRMWARE_ID);
+	PSB_WMSVDX32(device_node_flags, MSVDX_COMMS_OFFSET_FLAGS);
+
+	/* read register bank size */
+	{
+		uint32_t ram_bank_size;
+		uint32_t bank_size, reg;
+		reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK_OFFSET);
+		bank_size =
+			REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
+					 MTX_RAM_BANK_SIZE);
+		ram_bank_size = (uint32_t)(1 << (bank_size + 2));
+		PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
+				  ram_bank_size);
+	}
+	/* host end */
+
+	/* DDK setup tiling region here */
+	/* DDK set MMU_CONTROL2 register */
+
+	/* set watchdog timer here */
+	if (!msvdx_priv->fw_loaded_by_punit) {
+		int reg_val = 0;
+		REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_CNT_CTRL, 0x3);
+		REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_ENABLE, 0);
+		REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_ACTION0, 1);
+		REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_CLEAR_SELECT, 1);
+		REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL, FE_WDT_CLKDIV_SELECT, 7);
+		PSB_WMSVDX32(fe_wdt_clks / WDT_CLOCK_DIVIDER, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+		PSB_WMSVDX32(reg_val, FE_MSVDX_WDT_CONTROL_OFFSET);
+
+		reg_val = 0;
+		/* DDK set BE_WDT_CNT_CTRL as 0x5 and BE_WDT_CLEAR_SELECT as 0x1 */
+		REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_CNT_CTRL, 0x7);
+		REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_ENABLE, 0);
+		REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_ACTION0, 1);
+		REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_CLEAR_SELECT, 0xd);
+		REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL, BE_WDT_CLKDIV_SELECT, 7);
+
+		PSB_WMSVDX32(be_wdt_clks / WDT_CLOCK_DIVIDER, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+		PSB_WMSVDX32(reg_val, BE_MSVDX_WDT_CONTROL_OFFSET);
+	} else {
+		/* for the other two, use the default value punit set */
+		PSB_WMSVDX32(fe_wdt_clks / WDT_CLOCK_DIVIDER, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+		PSB_WMSVDX32(be_wdt_clks / WDT_CLOCK_DIVIDER, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+	}
+
+	return msvdx_rendec_init_by_msg(dev);
+}
+
+int psb_msvdx_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	int ret;
+
+	if (!dev_priv->msvdx_private) {
+		if (msvdx_startup_init(dev))
+			return 1;
+	}
+
+	if (dev_priv->msvdx_private == NULL)
+		return 1;
+
+        ret = msvdx_alloc_ccb_for_rendec(dev);
+        if (ret) {
+                printk("msvdx_alloc_ccb_for_rendec failed.\n");
+                return 1;
+        }
+
+#ifdef MERRIFIELD
+	if (!(IS_TNG_B0(dev) || IS_MOFD(dev))) {
+#endif
+		ret = psb_msvdx_post_init(dev);
+		if (ret) {
+			printk("psb_msvdx_post_init failed.\n");
+			return 1;
+		}
+#ifdef MERRIFIELD
+	}
+#endif
+
+	return 0;
+}
+
+int psb_msvdx_post_init(struct drm_device *dev)
+{
+        struct drm_psb_private *dev_priv = psb_priv(dev);
+        uint32_t cmd;
+        int ret;
+        struct msvdx_private *msvdx_priv;
+
+        if (dev_priv->msvdx_private == NULL)
+                return 1;
+
+        msvdx_priv = dev_priv->msvdx_private;
+
+        msvdx_priv->msvdx_busy = 0;
+        msvdx_priv->msvdx_hw_busy = 1;
+
+        if (msvdx_priv->fw_loaded_by_punit) {
+                /* DDK: Configure MSVDX Memory Stalling iwth the min, max and ratio of access */
+                msvdx_post_powerup_core_reset(dev);
+        }
+
+        if (!msvdx_priv->fw_loaded_by_punit) {
+                /* Enable MMU by removing all bypass bits */
+                PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0_OFFSET);
+#ifdef CONFIG_DRM_VXD_BYT
+		/* we need set tile format as 512x8 on Baytrail */
+		PSB_WMSVDX32(0x1<<3, MSVDX_MMU_CONTROL2_OFFSET);
+#endif
+        } else {
+                msvdx_priv->rendec_init = 0;
+                ret = msvdx_mtx_init(dev, msvdx_priv->decoding_err);
+                if (ret) {
+                        printk("WARN: msvdx_mtx_init failed.\n");
+                        return 1;
+                }
+        }
+
+        if (!msvdx_priv->fw_loaded_by_punit) {
+#ifndef CONFIG_SLICE_HEADER_PARSING
+                msvdx_rendec_init_by_reg(dev);
+#endif
+                if (!msvdx_priv->fw) {
+                        ret = psb_msvdx_alloc_fw_bo(dev_priv);
+                        if (ret) {
+                                DRM_ERROR("psb_msvdx_alloc_fw_bo failed.\n");
+                                return 1;
+                        }
+                }
+                /* move fw loading to the place receiving first cmd buffer */
+                msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */
+                /* it should be set at punit post boot init phase */
+                PSB_WMSVDX32(820, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+                PSB_WMSVDX32(8200, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+
+                PSB_WMSVDX32(820, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+                PSB_WMSVDX32(8200, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+
+#ifndef CONFIG_DRM_VXD_BYT
+                if (IS_MRFLD(dev)) {
+                        psb_irq_preinstall_islands(dev, OSPM_VIDEO_DEC_ISLAND);
+                        psb_irq_postinstall_islands(dev, OSPM_VIDEO_DEC_ISLAND);
+                }
+#endif
+                psb_msvdx_clearirq(dev);
+                psb_msvdx_enableirq(dev);
+
+
+                cmd = 0;
+                cmd = PSB_RMSVDX32(VEC_SHIFTREG_CONTROL_OFFSET);
+                REGIO_WRITE_FIELD(cmd,
+                                  VEC_SHIFTREG_CONTROL,
+                                  SR_MASTER_SELECT,
+                                  1);  /* Host */
+                PSB_WMSVDX32(cmd, VEC_SHIFTREG_CONTROL_OFFSET);
+        }
+
+#ifndef CONFIG_DRM_VXD_BYT
+        PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n",
+                       PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
+#endif
+#ifdef CONFIG_SLICE_HEADER_PARSING
+        if (!msvdx_priv->term_buf) {
+                ret = psb_allocate_term_buf(dev, &msvdx_priv->term_buf,
+                                    &msvdx_priv->term_buf_addr,
+                                    TERMINATION_SIZE);
+                if (ret)
+                        return 1;
+        }
+#endif
+        return 0;
+}
+
+int psb_msvdx_uninit(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	/* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
+	PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
+	psb_msvdx_mtx_set_clocks(dev_priv->dev, 0);
+
+	if (NULL == msvdx_priv) {
+		DRM_ERROR("MSVDX: psb_msvdx_uninit: msvdx_priv is NULL!\n");
+		return -1;
+	}
+
+	if (msvdx_priv->ccb0)
+		msvdx_free_ccb(&msvdx_priv->ccb0);
+	if (msvdx_priv->ccb1)
+		msvdx_free_ccb(&msvdx_priv->ccb1);
+
+#ifdef CONFIG_SLICE_HEADER_PARSING
+	if (msvdx_priv && msvdx_priv->term_buf) {
+		ttm_bo_unref(&msvdx_priv->term_buf);
+		msvdx_priv->term_buf = NULL;
+	}
+#endif
+
+	if (!msvdx_priv->fw_loaded_by_punit) {
+		if (msvdx_priv->msvdx_fw)
+			kfree(msvdx_priv->msvdx_fw);
+	}
+
+#ifdef PSB_MSVDX_SAVE_RESTORE_VEC
+	if (msvdx_priv->vec_local_mem_data)
+		kfree(msvdx_priv->vec_local_mem_data);
+#endif
+	kfree(msvdx_priv->msvdx_ec_ctx[0]);
+
+	if (msvdx_priv->mmu_recover_page)
+		__free_page(msvdx_priv->mmu_recover_page);
+
+	if (msvdx_priv) {
+		/* pci_set_drvdata(dev->pdev, NULL); */
+		device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate);
+		sysfs_put(msvdx_priv->sysfs_pmstate);
+		msvdx_priv->sysfs_pmstate = NULL;
+#ifdef CONFIG_VIDEO_MRFLD
+		device_remove_file(&dev->pdev->dev, &dev_attr_ved_freq_scaling);
+#endif
+		tasklet_kill(&msvdx_priv->msvdx_tasklet);
+		kfree(msvdx_priv);
+		dev_priv->msvdx_private = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * watchdog function can be enabled whenever required.
+ */
+#if 0
+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
+{
+	struct timer_list *wt = &dev_priv->watchdog_timer;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+	if (dev_priv->timer_available && !timer_pending(wt)) {
+		wt->expires = jiffies + PSB_WATCHDOG_DELAY;
+		add_timer(wt);
+	}
+	spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+}
+
+
+static void psb_watchdog_func(unsigned long data)
+{
+	struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
+	int msvdx_lockup;
+	int msvdx_idle;
+	unsigned long irq_flags;
+
+	psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
+
+	if (msvdx_lockup) {
+		spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+		dev_priv->timer_available = 0;
+		spin_unlock_irqrestore(&dev_priv->watchdog_lock,
+				       irq_flags);
+		if (msvdx_lockup)
+			schedule_work(&dev_priv->msvdx_watchdog_wq);
+	}
+	if (!msvdx_idle)
+		psb_schedule_watchdog(dev_priv);
+}
+
+static void psb_msvdx_reset_wq(struct work_struct *work)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
+	struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
+
+	unsigned long irq_flags;
+
+	mutex_lock(&msvdx_priv->msvdx_mutex);
+	if (msvdx_priv->fw_loaded_by_punit)
+		msvdx_priv->msvdx_needs_reset |= MSVDX_RESET_NEEDS_REUPLOAD_FW |
+			MSVDX_RESET_NEEDS_INIT_FW;
+	else
+		msvdx_priv->msvdx_needs_reset = 1;
+	msvdx_priv->msvdx_current_sequence++;
+	PSB_DEBUG_GENERAL
+	("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
+	 msvdx_priv->msvdx_current_sequence);
+
+	psb_fence_error(msvdx_priv->dev, PSB_ENGINE_DECODE,
+			msvdx_priv->msvdx_current_sequence,
+			_PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
+
+	spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+	dev_priv->timer_available = 1;
+	spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+
+	psb_msvdx_flush_cmd_queue(msvdx_priv->dev);
+
+	psb_schedule_watchdog(dev_priv);
+	mutex_unlock(&msvdx_priv->msvdx_mutex);
+}
+
+void psb_watchdog_init(struct drm_psb_private *dev_priv)
+{
+	struct timer_list *wt = &dev_priv->watchdog_timer;
+	unsigned long irq_flags;
+
+	spin_lock_init(&dev_priv->watchdog_lock);
+	spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+	init_timer(wt);
+	INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
+	wt->data = (unsigned long) dev_priv;
+	wt->function = &psb_watchdog_func;
+	dev_priv->timer_available = 1;
+	spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+}
+
+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
+	dev_priv->timer_available = 0;
+	spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
+	(void) del_timer_sync(&dev_priv->watchdog_timer);
+}
+#endif
diff --git a/drivers/external_drivers/intel_media/video/encode/Makefile b/drivers/external_drivers/intel_media/video/encode/Makefile
new file mode 100644
index 0000000..ce31ea2
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/Makefile
@@ -0,0 +1,25 @@
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+INCDIR=drivers/staging/mrfl
+MEDIA_INCDIR=drivers/staging/intel_media
+ccflags-y += \
+	-I$(INCDIR)/ \
+	-I$(INCDIR)/rgx/include \
+	-I$(INCDIR)/interface \
+	-I$(INCDIR)/drv \
+	-I$(INCDIR)/../intel_media/video/common \
+	-I$(INCDIR)/../mrfl/drv/ospm \
+	-I$(INCDIR)/../intel_media/video/decode \
+	-I$(INCDIR)/../intel_media/video/encode \
+	-I$(INCDIR)/../../../include/linux \
+	-I$(INCDIR)/../../../include/drm \
+	-I$(INCDIR)/../../../include/drm/ttm
+
+ccflags-y += -DANDROID -D_linux_ -DLINUX -D__KERNEL__ -DCONFIG_VIDEO_MRFLD
+
+#VIDEO_COMMON_DIR = $(TOP_DIR)/driver/staging/intel_media/video/common
+#DECODE_DIR = $(TOP_DIR)/driver/staging/intel_media/video/decode
+
+obj-y += tng_topaz.o tng_topazinit.o
+
diff --git a/drivers/external_drivers/intel_media/video/encode/pnw_topaz.c b/drivers/external_drivers/intel_media/video/encode/pnw_topaz.c
new file mode 100644
index 0000000..695426e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/pnw_topaz.c
@@ -0,0 +1,907 @@
+/**
+ * file pnw_topaz.c
+ * TOPAZ I/O operations and IRQ handling
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *      Shengquan(Austin) Yuan <shengquan.yuan@intel.com>
+ *      Elaine Wang <elaine.wang@intel.com>
+ *      Li Zeng <li.zeng@intel.com>
+ **************************************************************************/
+
+/* include headers */
+/* #define DRM_DEBUG_CODE 2 */
+#include <drm/drmP.h>
+
+#include "psb_drv.h"
+#include "psb_drm.h"
+#include "pnw_topaz.h"
+#include "psb_powermgmt.h"
+#include "pnw_topaz_hw_reg.h"
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#define TOPAZ_MAX_COMMAND_IN_QUEUE 0x1000
+/* #define SYNC_FOR_EACH_COMMAND */
+
+#define PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size, cur_cmd_size, cur_cmd_id)\
+	do {\
+		if ((cmd_size) < (cur_cmd_size)) {\
+			DRM_ERROR("%s L%d cmd size(%d) of cmd id(%x)"\
+					" is not correct\n",\
+					__func__, __LINE__, cur_cmd_size,\
+					cur_cmd_id);\
+			return -EINVAL;\
+		} \
+	} while (0)
+
+#define PNW_TOPAZ_CHECK_CORE_ID(core_id)\
+	do {\
+		if ((core_id) >= TOPAZSC_NUM_CORES) {\
+			DRM_ERROR("%s L%d core_id(%d)"\
+					" is not correct\n",\
+					__func__, __LINE__,\
+					core_id);\
+			return -EINVAL;\
+		} \
+	} while (0)
+
+
+
+/* static function define */
+static int pnw_topaz_deliver_command(struct drm_device *dev,
+				     struct ttm_buffer_object *cmd_buffer,
+				     u32 cmd_offset,
+				     u32 cmd_size,
+				     void **topaz_cmd, uint32_t sequence,
+				     int copy_cmd);
+static int pnw_topaz_send(struct drm_device *dev, unsigned char *cmd,
+			  u32 cmd_size, uint32_t sync_seq);
+static int pnw_topaz_dequeue_send(struct drm_device *dev);
+static int pnw_topaz_save_command(struct drm_device *dev, void *cmd,
+				  u32 cmd_size, uint32_t sequence);
+
+static void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t core_id,
+			   uint32_t kick_count);
+
+IMG_BOOL pnw_topaz_interrupt(void *pvData)
+{
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	uint32_t clr_flag;
+	struct pnw_topaz_private *topaz_priv;
+	uint32_t topaz_stat;
+	uint32_t cur_seq, cmd_id;
+
+	PSB_DEBUG_IRQ("Got an TopazSC interrupt\n");
+
+	if (pvData == NULL) {
+		DRM_ERROR("ERROR: TOPAZ %s, Invalid params\n", __func__);
+		return IMG_FALSE;
+	}
+
+	dev = (struct drm_device *)pvData;
+
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+		DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
+		return IMG_FALSE;
+	}
+
+	dev_priv = (struct drm_psb_private *) dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+
+	/*TODO : check if topaz is busy*/
+	topaz_priv->topaz_hw_busy = REG_READ(0x20D0) & (0x1 << 11);
+
+	TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &topaz_stat, 0);
+	clr_flag = pnw_topaz_queryirq(dev);
+
+	pnw_topaz_clearirq(dev, clr_flag);
+
+	TOPAZ_MTX_WB_READ32(topaz_priv->topaz_sync_addr,
+			    0, MTX_WRITEBACK_CMDWORD, &cmd_id);
+	cmd_id = (cmd_id & 0x7f); /* CMD ID */
+	if (cmd_id != MTX_CMDID_NULL)
+		return IMG_TRUE;
+
+	TOPAZ_MTX_WB_READ32(topaz_priv->topaz_sync_addr,
+			    0, MTX_WRITEBACK_VALUE, &cur_seq);
+
+	PSB_DEBUG_TOPAZ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX)"
+		      " vs 0x%08x(fence)\n",
+		      cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
+
+	psb_fence_handler(dev, LNC_ENGINE_ENCODE);
+
+	topaz_priv->topaz_busy = 1;
+	pnw_topaz_dequeue_send(dev);
+
+	if (drm_topaz_pmpolicy != PSB_PMPOLICY_NOPM \
+			&& topaz_priv->topaz_busy == 0) {
+		PSB_DEBUG_IRQ("TOPAZ:Schedule a work to power down Topaz\n");
+		schedule_delayed_work(&topaz_priv->topaz_suspend_wq, 0);
+	}
+
+	return IMG_TRUE;
+}
+
+/* #define PSB_DEBUG_GENERAL DRM_ERROR */
+static int pnw_submit_encode_cmdbuf(struct drm_device *dev,
+				    struct ttm_buffer_object *cmd_buffer,
+				    u32 cmd_offset, u32 cmd_size,
+				    struct ttm_fence_object *fence)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long irq_flags;
+	int ret = 0;
+	void *cmd;
+	uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
+
+	PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
+
+	if (dev_priv->last_topaz_ctx != dev_priv->topaz_ctx) {
+		/* todo: save current context into dev_priv->last_topaz_ctx
+		 * and reload dev_priv->topaz_ctx context
+		 */
+		PSB_DEBUG_GENERAL("TOPAZ: context switch\n");
+		dev_priv->last_topaz_ctx = dev_priv->topaz_ctx;
+	}
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		/* #.# load fw to driver */
+		PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fwsc.bin\n");
+		ret = pnw_topaz_init_fw(dev);
+		if (ret != 0) {
+			/* FIXME: find a proper return value */
+			DRM_ERROR("TOPAX:load /lib/firmware/topaz_fwsc.bin"
+				  " fails, ensure udevd is configured"
+				  " correctly!\n");
+			return -EFAULT;
+		}
+		topaz_priv->topaz_fw_loaded = 1;
+	}
+
+	/* # schedule watchdog */
+	/* psb_schedule_watchdog(dev_priv); */
+
+	/* # spin lock irq save [msvdx_lock] */
+	spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
+
+	/* # if topaz need to reset, reset it */
+	if (topaz_priv->topaz_needs_reset) {
+		/* #.# reset it */
+		spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
+		DRM_ERROR("TOPAZ: needs reset.\n");
+		return -EFAULT;
+	}
+
+	if (!topaz_priv->topaz_busy) {
+		/* # direct map topaz command if topaz is free */
+		PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x\n",
+				  sequence);
+
+		topaz_priv->topaz_busy = 1;
+		spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
+
+		ret = pnw_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
+						cmd_size, NULL, sequence, 0);
+
+		if (ret) {
+			DRM_ERROR("TOPAZ: failed to extract cmd...\n");
+			return ret;
+		}
+	} else {
+		PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
+				  sequence);
+		cmd = NULL;
+
+		spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
+
+		ret = pnw_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
+						cmd_size, &cmd, sequence, 1);
+		if (cmd == NULL || ret) {
+			DRM_ERROR("TOPAZ: map command for save fialed\n");
+			return ret;
+		}
+
+		ret = pnw_topaz_save_command(dev, cmd, cmd_size, sequence);
+		if (ret) {
+			kfree(cmd);
+			DRM_ERROR("TOPAZ: save command failed\n");
+		}
+	}
+
+	return ret;
+}
+
+static int pnw_topaz_save_command(struct drm_device *dev, void *cmd,
+				  u32 cmd_size, uint32_t sequence)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pnw_topaz_cmd_queue *topaz_cmd;
+	unsigned long irq_flags;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
+			  sequence);
+
+	topaz_cmd = kzalloc(sizeof(struct pnw_topaz_cmd_queue),
+			    GFP_KERNEL);
+	if (topaz_cmd == NULL) {
+		DRM_ERROR("TOPAZ: out of memory....\n");
+		return -ENOMEM;
+	}
+
+	topaz_cmd->cmd = cmd;
+	topaz_cmd->cmd_size = cmd_size;
+	topaz_cmd->sequence = sequence;
+
+	spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
+	list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
+	if (!topaz_priv->topaz_busy) {
+		/* topaz_priv->topaz_busy = 1; */
+		PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
+		pnw_topaz_dequeue_send(dev);
+		PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
+	}
+
+	spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
+
+	return 0;
+}
+
+
+int pnw_cmdbuf_video(struct drm_file *priv,
+		     struct list_head *validate_list,
+		     uint32_t fence_type,
+		     struct drm_psb_cmdbuf_arg *arg,
+		     struct ttm_buffer_object *cmd_buffer,
+		     struct psb_ttm_fence_rep *fence_arg)
+{
+	struct drm_device *dev = priv->minor->dev;
+	struct ttm_fence_object *fence = NULL;
+	int ret;
+
+	PSB_DEBUG_GENERAL("TOPAZ : enter %s cmdsize: %d\n", __func__,
+			  arg->cmdbuf_size);
+
+	ret = pnw_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
+				       arg->cmdbuf_size, fence);
+	if (ret)
+		return ret;
+
+	/* workaround for interrupt issue */
+	psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
+			  validate_list, fence_arg, &fence);
+
+	if (fence)
+		ttm_fence_object_unref(&fence);
+
+	spin_lock(&cmd_buffer->bdev->fence_lock);
+	if (cmd_buffer->sync_obj != NULL)
+		ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
+	spin_unlock(&cmd_buffer->bdev->fence_lock);
+
+	PSB_DEBUG_GENERAL("TOPAZ exit %s\n", __func__);
+	return 0;
+}
+
+int pnw_wait_on_sync(struct drm_psb_private *dev_priv,
+		     uint32_t sync_seq,
+		     uint32_t *sync_p)
+{
+	int count = 10000;
+	if (sync_p == NULL) {
+		DRM_ERROR("TOPAZ: pnw_wait_on_sync invalid memory address\n ");
+		return -1;
+	}
+
+	while (count && (sync_seq != *sync_p)) {
+		PSB_UDELAY(100);/* experimental value */
+		--count;
+	}
+	if ((count == 0) && (sync_seq != *sync_p)) {
+		DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
+			  sync_seq, *sync_p);
+		return -EBUSY;
+	}
+	PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
+	return 0;
+}
+
+int pnw_topaz_deliver_command(struct drm_device *dev,
+			      struct ttm_buffer_object *cmd_buffer,
+			      u32 cmd_offset, u32 cmd_size,
+			      void **topaz_cmd, uint32_t sequence,
+			      int copy_cmd)
+{
+	unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
+	struct ttm_bo_kmap_obj cmd_kmap;
+	bool is_iomem;
+	int ret;
+	unsigned char *cmd_start, *tmp;
+	u16 num_pages;
+
+	num_pages = ((cmd_buffer->num_pages < PNW_MAX_CMD_BUF_PAGE_NUM) ?
+		       cmd_buffer->num_pages : PNW_MAX_CMD_BUF_PAGE_NUM);
+	if (cmd_size > (num_pages << PAGE_SHIFT) ||
+			cmd_offset > (num_pages << PAGE_SHIFT) ||
+			(cmd_size + cmd_offset) > (num_pages << PAGE_SHIFT)
+			|| (cmd_size == 0)) {
+		DRM_ERROR("TOPAZ: %s invalid cmd_size(%d) or cmd_offset(%d)",
+				__func__, cmd_size, cmd_offset);
+		return -EINVAL;
+	}
+	ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, num_pages,
+			  &cmd_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
+		return ret;
+	}
+	cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
+			&is_iomem) + cmd_page_offset;
+
+	if (copy_cmd) {
+		PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
+		tmp = kzalloc(cmd_size, GFP_KERNEL);
+		if (tmp == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		memcpy(tmp, cmd_start, cmd_size);
+		*topaz_cmd = tmp;
+	} else {
+		PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
+		ret = pnw_topaz_send(dev, cmd_start, cmd_size, sequence);
+		if (ret) {
+			DRM_ERROR("TOPAZ: commit commands failed.\n");
+			ret = -EINVAL;
+		}
+	}
+
+out:
+	PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%d), sequence(%d)"
+			  " copy_cmd(%d)\n",
+			  cmd_size, sequence, copy_cmd);
+
+	ttm_bo_kunmap(&cmd_kmap);
+
+	return ret;
+}
+
+
+int pnw_topaz_kick_null_cmd(struct drm_psb_private *dev_priv,
+			    uint32_t core_id,
+			    uint32_t wb_offset,
+			    uint32_t sync_seq,
+			    uint8_t irq_enable)
+{
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	uint32_t cur_free_space;
+	struct topaz_cmd_header cur_cmd_header;
+	int ret;
+
+	POLL_TOPAZ_FREE_FIFO_SPACE(PNW_TOPAZ_WORDS_PER_CMDSET,
+			PNW_TOPAZ_POLL_DELAY,
+			PNW_TOPAZ_POLL_RETRY,
+			&cur_free_space);
+	if (ret) {
+		DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
+		return ret;
+	}
+
+	cur_cmd_header.core = core_id;
+	cur_cmd_header.seq = sync_seq,
+		       cur_cmd_header.enable_interrupt = ((irq_enable == 0) ? 0 : 1);
+	cur_cmd_header.id = MTX_CMDID_NULL;
+
+	topaz_priv->topaz_cmd_count %= MAX_TOPAZ_CMD_COUNT;
+	PSB_DEBUG_GENERAL("TOPAZ: free FIFO space %d\n",
+			  cur_free_space);
+	PSB_DEBUG_GENERAL("TOPAZ: write 4 words to FIFO:"
+			  "0x%08x,0x%08x,0x%08x,0x%08x\n",
+			  cur_cmd_header.val,
+			  0,
+			  wb_offset,
+			  cur_cmd_header.seq);
+
+	TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+				cur_cmd_header.val);
+	TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+				0);
+	TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+				wb_offset);
+	TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+				sync_seq);
+
+	PSB_DEBUG_GENERAL("TOPAZ: Write back value for NULL CMD is %d\n",
+			  sync_seq);
+
+	topaz_mtx_kick(dev_priv, 0, 1);
+
+	return 0;
+}
+
+
+static void pnw_topaz_save_bias_table(struct pnw_topaz_private *topaz_priv,
+	const void *cmd, int byte_size, int core)
+{
+	PSB_DEBUG_GENERAL("TOPAZ: Save BIAS table(size %d) for core %d\n",
+			byte_size, core);
+
+	if (byte_size > PNW_TOPAZ_BIAS_TABLE_MAX_SIZE) {
+		DRM_ERROR("Invalid BIAS table size %d!\n", byte_size);
+		return;
+	}
+
+	if (core > (topaz_priv->topaz_num_cores - 1)) {
+		DRM_ERROR("Invalid core id %d\n", core);
+		return;
+	}
+
+	if (topaz_priv->topaz_bias_table[core] == NULL) {
+		topaz_priv->topaz_bias_table[core] =
+			kmalloc(PNW_TOPAZ_BIAS_TABLE_MAX_SIZE,
+					GFP_KERNEL);
+		if (NULL == topaz_priv->topaz_bias_table[core]) {
+			DRM_ERROR("Run out of memory!\n");
+			return;
+		}
+	}
+
+	memcpy(topaz_priv->topaz_bias_table[core],
+			cmd, byte_size);
+	return;
+}
+
+static inline int pnw_topaz_write_reg(struct drm_psb_private *dev_priv,
+		u32 *p_command, u32 reg_cnt, u8 core_id)
+{
+	u32 reg_off, reg_val;
+	for (; reg_cnt > 0; reg_cnt--) {
+		reg_off = *p_command;
+		p_command++;
+		reg_val = *p_command;
+		p_command++;
+		if (reg_off > TOPAZ_BIASREG_MAX(core_id) ||
+				reg_off < TOPAZ_BIASREG_MIN(core_id)) {
+			DRM_ERROR("TOPAZ: Ignore write (0x%08x)" \
+					" to register 0x%08x\n",
+					reg_val, reg_off);
+			return -EINVAL;
+		} else
+			MM_WRITE32(0, reg_off, reg_val);
+	}
+	return 0;
+}
+
+
+int
+pnw_topaz_send(struct drm_device *dev, unsigned char *cmd,
+	       u32 cmd_size, uint32_t sync_seq)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret = 0;
+	struct topaz_cmd_header *cur_cmd_header;
+	uint32_t cur_cmd_size = 4, cur_cmd_id, cur_free_space = 0;
+	uint32_t codec;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	uint32_t reg_cnt;
+	uint32_t *p_command;
+	uint32_t tmp;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: send encoding commands(seq 0x%08x) to HW\n",
+		sync_seq);
+
+	tmp = atomic_cmpxchg(&dev_priv->topaz_mmu_invaldc, 1, 0);
+	if (tmp)
+		pnw_topaz_mmu_flushcache(dev_priv);
+
+
+	/* Command header(struct topaz_cmd_header) is 32 bit */
+	while (cmd_size >= sizeof(struct topaz_cmd_header)) {
+		cur_cmd_header = (struct topaz_cmd_header *) cmd;
+		PNW_TOPAZ_CHECK_CORE_ID(cur_cmd_header->core);
+		cur_cmd_id = cur_cmd_header->id;
+		PSB_DEBUG_GENERAL("TOPAZ: %s:\n", cmd_to_string(cur_cmd_id));
+
+		switch (cur_cmd_id) {
+		case MTX_CMDID_SW_NEW_CODEC:
+			cur_cmd_size = sizeof(struct topaz_cmd_header)
+				+ TOPAZ_NEW_CODEC_CMD_BYTES;
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			p_command = (uint32_t *)
+				(cmd + sizeof(struct topaz_cmd_header));
+			codec = *p_command;
+
+			if (codec >= PNW_TOPAZ_CODEC_NUM_MAX) {
+				DRM_ERROR("%s unknown video codec %d\n",
+						__func__, codec);
+				return -EINVAL;
+			}
+
+			p_command++;
+			topaz_priv->frame_h =
+				(u16)((*p_command) & 0xffff) ;
+			topaz_priv->frame_w =
+				 (u16)((*p_command >> 16) & 0xffff);
+			PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d),"
+					  " width %d, height %d\n",
+					  codec_to_string(codec), codec,
+					  topaz_priv->frame_w,
+					  topaz_priv->frame_h);
+			if (pnw_topaz_setup_fw(dev, codec)) {
+				DRM_ERROR("TOPAZ: upload FW to HW failed\n");
+				return -EBUSY;
+			}
+			topaz_priv->topaz_cur_codec = codec;
+			break;
+#ifdef CONFIG_VIDEO_MRFLD
+		case MTX_CMDID_SW_ENTER_LOWPOWER:
+			PSB_DEBUG_GENERAL("TOPAZ: enter lowpower....\n");
+			cur_cmd_size = sizeof(struct topaz_cmd_header)
+				+ TOPAZ_POWER_CMD_BYTES;
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			break;
+
+		case MTX_CMDID_SW_LEAVE_LOWPOWER:
+			PSB_DEBUG_GENERAL("TOPAZ: leave lowpower...\n");
+			cur_cmd_size = sizeof(struct topaz_cmd_header)
+				+ TOPAZ_POWER_CMD_BYTES;
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			break;
+#endif
+		case MTX_CMDID_SW_WRITEREG:
+			p_command = (uint32_t *)
+				(cmd + sizeof(struct topaz_cmd_header));
+			cur_cmd_size = sizeof(struct topaz_cmd_header)
+				+ sizeof(u32);
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			reg_cnt = *p_command;
+			p_command++;
+			PNW_TOPAZ_CHECK_CMD_SIZE(TOPAZ_WRITEREG_MAX_SETS,
+					reg_cnt, cur_cmd_id);
+			/* Reg_off and reg_val are stored in a pair of words*/
+			cur_cmd_size += (reg_cnt *
+					TOPAZ_WRITEREG_BYTES_PER_SET);
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			if ((drm_topaz_pmpolicy != PSB_PMPOLICY_NOPM) &&
+				(!PNW_IS_JPEG_ENC(topaz_priv->topaz_cur_codec)))
+				pnw_topaz_save_bias_table(topaz_priv,
+					(const void *)cmd,
+					cur_cmd_size,
+					cur_cmd_header->core);
+
+			PSB_DEBUG_GENERAL("TOPAZ: Start to write" \
+					" %d Registers\n", reg_cnt);
+
+			ret = pnw_topaz_write_reg(dev_priv,
+				p_command,
+				reg_cnt,
+				cur_cmd_header->core);
+			break;
+		case MTX_CMDID_PAD:
+			/* Ignore this command, which is used to skip
+			 * some commands in user space */
+			cur_cmd_size = sizeof(struct topaz_cmd_header);
+			cur_cmd_size += TOPAZ_COMMON_CMD_BYTES;
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			break;
+		/* ordinary commmand */
+		case MTX_CMDID_START_PIC:
+		case MTX_CMDID_DO_HEADER:
+		case MTX_CMDID_ENCODE_SLICE:
+		case MTX_CMDID_END_PIC:
+		case MTX_CMDID_SETQUANT:
+		case MTX_CMDID_RESET_ENCODE:
+		case MTX_CMDID_ISSUEBUFF:
+		case MTX_CMDID_SETUP:
+		case MTX_CMDID_NULL:
+			cur_cmd_header->seq = topaz_priv->topaz_cmd_count++;
+			cur_cmd_header->enable_interrupt = 0;
+			cur_cmd_size = sizeof(struct topaz_cmd_header);
+			cur_cmd_size += TOPAZ_COMMON_CMD_BYTES;
+			PNW_TOPAZ_CHECK_CMD_SIZE(cmd_size,
+					cur_cmd_size, cur_cmd_id);
+			if (cur_free_space < cur_cmd_size) {
+				POLL_TOPAZ_FREE_FIFO_SPACE(
+						PNW_TOPAZ_WORDS_PER_CMDSET,
+						PNW_TOPAZ_POLL_DELAY,
+						PNW_TOPAZ_POLL_RETRY,
+						&cur_free_space);
+
+				if (ret) {
+					DRM_ERROR("TOPAZ: error -- ret(%d)\n",
+						  ret);
+					goto out;
+				}
+			}
+			p_command = (uint32_t *)
+				(cmd + sizeof(struct topaz_cmd_header));
+			PSB_DEBUG_GENERAL("TOPAZ: free FIFO space %d\n",
+					  cur_free_space);
+			PSB_DEBUG_GENERAL("TOPAZ: write 4 words to FIFO:"
+					  "0x%08x,0x%08x,0x%08x,0x%08x\n",
+					  cur_cmd_header->val,
+					  p_command[0],
+					  TOPAZ_MTX_WB_OFFSET(
+						  topaz_priv->topaz_wb_offset,
+						  cur_cmd_header->core),
+					  cur_cmd_header->seq);
+
+			TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+						cur_cmd_header->val);
+			TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+						p_command[0]);
+			TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+						TOPAZ_MTX_WB_OFFSET(
+							topaz_priv->topaz_wb_offset,
+							cur_cmd_header->core));
+			TOPAZ_MULTICORE_WRITE32(TOPAZSC_CR_MULTICORE_CMD_FIFO_0,
+						cur_cmd_header->seq);
+
+			cur_free_space -= 4;
+			topaz_priv->topaz_cmd_count %= MAX_TOPAZ_CMD_COUNT;
+			topaz_mtx_kick(dev_priv, 0, 1);
+#ifdef SYNC_FOR_EACH_COMMAND
+			pnw_wait_on_sync(dev_priv, cur_cmd_header->seq,
+					 topaz_priv->topaz_mtx_wb +
+					 cur_cmd_header->core *
+					 MTX_WRITEBACK_DATASIZE_ROUND + 1);
+#endif
+			break;
+		default:
+			DRM_ERROR("TOPAZ: unsupported command id: %x\n",
+				  cur_cmd_id);
+			goto out;
+		}
+
+		cmd += cur_cmd_size;
+		cmd_size -= cur_cmd_size;
+	}
+#if PNW_TOPAZ_NO_IRQ
+	PSB_DEBUG_GENERAL("reset NULL writeback to 0xffffffff,"
+			  "topaz_priv->topaz_sync_addr=0x%p\n",
+			  topaz_priv->topaz_sync_addr);
+
+	*((uint32_t *)topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE) = ~0;
+	pnw_topaz_kick_null_cmd(dev_priv, 0,
+				topaz_priv->topaz_sync_offset, sync_seq, 0);
+
+	if (0 != pnw_wait_on_sync(dev_priv, sync_seq,
+				  topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE)) {
+		DRM_ERROR("TOPAZSC: Polling the writeback of last command"
+			  " failed!\n");
+		topaz_read_core_reg(dev_priv,  0, 0x5, &reg_val);
+		DRM_ERROR("TOPAZSC: PC pointer of core 0 is %x\n", reg_val);
+		topaz_read_core_reg(dev_priv, 1, 0x5, &reg_val);
+		DRM_ERROR("TOPAZSC: PC pointer of core 1 is %x\n", reg_val);
+		TOPAZ_MULTICORE_READ32(TOPAZSC_CR_MULTICORE_CMD_FIFO_1,
+				       &reg_val);
+		reg_val &= MASK_TOPAZSC_CR_CMD_FIFO_SPACE;
+		DRM_ERROR("TOPAZSC: Free words in command FIFO %d\n", reg_val);
+		DRM_ERROR("TOPAZSC: Last writeback of core 0 %d\n",
+			  *(topaz_priv->topaz_mtx_wb +  1));
+		DRM_ERROR("TOPAZSC: Last writeback of core 1 %d\n",
+			  *(topaz_priv->topaz_mtx_wb +
+			    MTX_WRITEBACK_DATASIZE_ROUND + 1));
+	}
+
+	PSB_DEBUG_GENERAL("Kicked command with sequence 0x%08x,"
+			  " and polling it, got 0x%08x\n",
+			  sync_seq,
+			  *(topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE));
+	PSB_DEBUG_GENERAL("Can handle unfence here, but let fence"
+			  " polling do it\n");
+	topaz_priv->topaz_busy = 0;
+#else
+	PSB_DEBUG_GENERAL("Kick command with sequence %x\n", sync_seq);
+	topaz_priv->topaz_busy = 1; /* This may be reset in topaz_setup_fw*/
+	pnw_topaz_kick_null_cmd(dev_priv, 0,
+				topaz_priv->topaz_sync_offset,
+				sync_seq, 1);
+#endif
+out:
+	return ret;
+}
+
+int pnw_topaz_dequeue_send(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pnw_topaz_cmd_queue *topaz_cmd = NULL;
+	int ret;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
+
+	if (list_empty(&topaz_priv->topaz_queue)) {
+		topaz_priv->topaz_busy = 0;
+		return 0;
+	}
+
+	topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
+		struct pnw_topaz_cmd_queue, head);
+	if (dev_priv->topaz_ctx) {
+		topaz_priv->topaz_busy = 1;
+
+		PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n",
+			topaz_cmd->sequence);
+		ret = pnw_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
+			topaz_cmd->sequence);
+		if (ret) {
+			DRM_ERROR("TOPAZ: pnw_topaz_send failed.\n");
+			ret = -EINVAL;
+		}
+	} else {
+		/* Since context has been removed, we should discard the
+		   encoding commands in the queue and release fence */
+		PSB_DEBUG_TOPAZ("TOPAZ: Context has been removed!\n");
+		TOPAZ_MTX_WB_WRITE32(topaz_priv->topaz_sync_addr,
+			0, MTX_WRITEBACK_VALUE,
+			topaz_cmd->sequence);
+		topaz_priv->topaz_busy = 0;
+		psb_fence_handler(dev, LNC_ENGINE_ENCODE);
+		ret = -EINVAL;
+	}
+
+	list_del(&topaz_cmd->head);
+	kfree(topaz_cmd->cmd);
+	kfree(topaz_cmd);
+
+	return ret;
+}
+
+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t core_id, uint32_t kick_count)
+{
+	PSB_DEBUG_GENERAL("TOPAZ: kick core(%d) mtx count(%d).\n",
+			  core_id, kick_count);
+	topaz_set_mtx_target(dev_priv, core_id, 0);
+	MTX_WRITE32(MTX_CR_MTX_KICK, kick_count, core_id);
+	return;
+}
+
+int pnw_check_topaz_idle(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	/*HW is stuck. Need to power off TopazSC to reset HW*/
+	if (topaz_priv->topaz_needs_reset)
+		return 0;
+
+	/* Even if topaz_ctx is null, which means there is no more encoding
+	    commands, return busy here to avoid runtime PM power down Topaz,
+	    but let Topaz suspend(D0i3) task will power down Topaz */
+	if (topaz_priv->topaz_busy)
+		return -EBUSY;
+
+	if (dev_priv->topaz_ctx == NULL)
+		return 0;
+
+	if (topaz_priv->topaz_hw_busy) {
+		PSB_DEBUG_PM("TOPAZ: %s, HW is busy\n", __func__);
+		return -EBUSY;
+	}
+
+	return 0; /* we think it is idle */
+}
+
+
+
+void pnw_topaz_flush_cmd_queue(struct pnw_topaz_private *topaz_priv)
+{
+	struct pnw_topaz_cmd_queue *entry, *next;
+
+	if (list_empty(&topaz_priv->topaz_queue))
+		return;
+
+	/* flush all command in queue */
+	list_for_each_entry_safe(entry, next,
+				 &topaz_priv->topaz_queue,
+				 head) {
+		list_del(&entry->head);
+		kfree(entry->cmd);
+		kfree(entry);
+	}
+
+	return;
+}
+
+void pnw_topaz_handle_timeout(struct ttm_fence_device *fdev)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(fdev, struct drm_psb_private, fdev);
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	u32 cur_seq;
+
+	TOPAZ_MTX_WB_READ32(topaz_priv->topaz_sync_addr,
+			    0, MTX_WRITEBACK_VALUE, &cur_seq);
+
+	DRM_ERROR("TOPAZ:last sync seq:0x%08x (MTX)" \
+		      " vs 0x%08x(fence)\n",
+		      cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
+
+	DRM_ERROR("TOPAZ: current codec is %s\n",
+			codec_to_string(topaz_priv->topaz_cur_codec));
+	pnw_topaz_flush_cmd_queue(topaz_priv);
+	topaz_priv->topaz_needs_reset = 1;
+	topaz_priv->topaz_busy = 0;
+}
+
+
+void pnw_topaz_enableirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	/* uint32_t ier = dev_priv->vdc_irq_mask | _PNW_IRQ_TOPAZ_FLAG; */
+
+	PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
+
+	/* Only enable the master core IRQ*/
+	TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
+		      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
+		      /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
+		      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
+		      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
+		      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT),
+		      0);
+
+	/* write in sysirq.c */
+	/* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
+}
+
+void pnw_topaz_disableirq(struct drm_device *dev)
+{
+
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	/*uint32_t ier = dev_priv->vdc_irq_mask & (~_PNW_IRQ_TOPAZ_FLAG); */
+
+	PSB_DEBUG_IRQ("TOPAZ: disable IRQ\n");
+
+	TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0, 0);
+
+	/* write in sysirq.c */
+	/* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
+}
+
+void psb_powerdown_topaz(struct work_struct *work)
+{
+	struct pnw_topaz_private *topaz_priv =
+		container_of(work, struct pnw_topaz_private,
+					topaz_suspend_wq.work);
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)topaz_priv->dev->dev_private;
+
+	if (!dev_priv->topaz_disabled)
+		ospm_apm_power_down_topaz(topaz_priv->dev);
+}
+
diff --git a/drivers/external_drivers/intel_media/video/encode/pnw_topaz.h b/drivers/external_drivers/intel_media/video/encode/pnw_topaz.h
new file mode 100644
index 0000000..754b483
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/pnw_topaz.h
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PNW_TOPAZ_H_
+#define _PNW_TOPAZ_H_
+
+#include "psb_drv.h"
+#include "img_types.h"
+
+#define PNW_TOPAZ_NO_IRQ 0
+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
+#define MAX_TOPAZ_CORES 2
+
+/*Must be equal to IMG_CODEC_NUM*/
+#define PNW_TOPAZ_CODEC_NUM_MAX (11)
+#define PNW_TOPAZ_BIAS_TABLE_MAX_SIZE (2 * 1024)
+/* #define TOPAZ_PDUMP */
+
+/* Max command buffer size */
+#define PNW_MAX_CMD_BUF_PAGE_NUM (2)
+/* One cmd set contains 4 words */
+#define PNW_TOPAZ_WORDS_PER_CMDSET (4)
+#define PNW_TOPAZ_POLL_DELAY (100)
+#define PNW_TOPAZ_POLL_RETRY (10000)
+
+#define TOPAZ_NEW_CODEC_CMD_BYTES (4 * 2)
+#define TOPAZ_COMMON_CMD_BYTES (4 * 3)
+#define TOPAZ_POWER_CMD_BYTES (0)
+/* Every WRITEREG command set contain 2 words.
+   The first word indicates register offset.
+   The second word indicates register value */
+#define TOPAZ_WRITEREG_BYTES_PER_SET (4 * 2)
+#define TOPAZ_WRITEREG_MAX_SETS \
+	(PNW_TOPAZ_BIAS_TABLE_MAX_SIZE / TOPAZ_WRITEREG_BYTES_PER_SET)
+
+/* in words */
+#define TOPAZ_CMD_FIFO_SIZE (32)
+
+#define PNW_IS_H264_ENC(codec) \
+	(codec == IMG_CODEC_H264_VBR || \
+	 codec == IMG_CODEC_H264_VCM || \
+	 codec == IMG_CODEC_H264_CBR || \
+	 codec == IMG_CODEC_H264_NO_RC)
+
+#define PNW_IS_JPEG_ENC(codec) \
+	(codec == IMG_CODEC_JPEG)
+
+#define PNW_IS_MPEG4_ENC(codec) \
+	(codec == IMG_CODEC_MPEG4_VBR || \
+	 codec == IMG_CODEC_MPEG4_CBR || \
+	 codec == IMG_CODEC_MPEG4_NO_RC)
+
+#define PNW_IS_H263_ENC(codec) \
+	(codec == IMG_CODEC_H263_VBR || \
+	 codec == IMG_CODEC_H263_CBR || \
+	 codec == IMG_CODEC_H263_NO_RC)
+
+extern int drm_topaz_pmpolicy;
+
+/* XXX: it's a copy of msvdx cmd queue. should have some change? */
+struct pnw_topaz_cmd_queue {
+	struct list_head head;
+	void *cmd;
+	unsigned long cmd_size;
+	uint32_t sequence;
+};
+
+/* define structure */
+/* firmware file's info head */
+struct topazsc_fwinfo {
+	unsigned int ver:16;
+	unsigned int codec:16;
+
+	unsigned int text_size;
+	unsigned int data_size;
+	unsigned int data_location;
+};
+
+/* firmware data array define  */
+struct pnw_topaz_codec_fw {
+	uint32_t ver;
+	uint32_t codec;
+
+	uint32_t text_size;
+	uint32_t data_size;
+	uint32_t data_location;
+
+	struct ttm_buffer_object *text;
+	struct ttm_buffer_object *data;
+};
+
+struct pnw_topaz_private {
+	struct drm_device *dev;
+	unsigned int pmstate;
+	struct sysfs_dirent *sysfs_pmstate;
+
+	/*Save content of MTX register, whole RAM and BIAS table*/
+	void *topaz_mtx_reg_state[MAX_TOPAZ_CORES];
+	struct ttm_buffer_object *topaz_mtx_data_mem[MAX_TOPAZ_CORES];
+	uint32_t topaz_cur_codec;
+	uint32_t cur_mtx_data_size[MAX_TOPAZ_CORES];
+	int topaz_needs_reset;
+	void *topaz_bias_table[MAX_TOPAZ_CORES];
+
+	/*
+	 *topaz command queue
+	 */
+	spinlock_t topaz_lock;
+	struct list_head topaz_queue;
+	int topaz_busy;		/* 0 means topaz is free */
+	int topaz_fw_loaded;
+
+	uint32_t stored_initial_qp;
+	uint32_t topaz_dash_access_ctrl;
+
+	struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
+	struct ttm_bo_kmap_obj topaz_bo_kmap;
+	uint32_t *topaz_mtx_wb;
+	uint32_t topaz_wb_offset;
+	uint32_t *topaz_sync_addr;
+	uint32_t topaz_sync_offset;
+	uint32_t topaz_cmd_count;
+	uint32_t topaz_mtx_saved;
+
+
+	/* firmware */
+	struct pnw_topaz_codec_fw topaz_fw[PNW_TOPAZ_CODEC_NUM_MAX * 2];
+
+	uint32_t topaz_hw_busy;
+
+	uint32_t topaz_num_cores;
+
+	/*Before load firmware, need to set up jitter according to resolution*/
+	/*The data of MTX_CMDID_SW_NEW_CODEC command contains width and length*/
+	uint16_t frame_w;
+	uint16_t frame_h;
+	/* topaz suspend work queue */
+	struct delayed_work topaz_suspend_wq;
+	uint32_t pm_gating_count;
+};
+
+/* external function declare */
+/*ISR of TopazSC*/
+extern IMG_BOOL pnw_topaz_interrupt(void *pvData);
+
+/*topaz commad handling function*/
+extern int pnw_cmdbuf_video(struct drm_file *priv,
+			    struct list_head *validate_list,
+			    uint32_t fence_type,
+			    struct drm_psb_cmdbuf_arg *arg,
+			    struct ttm_buffer_object *cmd_buffer,
+			    struct psb_ttm_fence_rep *fence_arg);
+extern int pnw_wait_topaz_idle(struct drm_device *dev);
+extern int pnw_check_topaz_idle(struct drm_device *dev);
+extern int pnw_topaz_restore_mtx_state(struct drm_device *dev);
+extern void pnw_topaz_enableirq(struct drm_device *dev);
+extern void pnw_topaz_disableirq(struct drm_device *dev);
+
+extern int pnw_topaz_init(struct drm_device *dev);
+extern int pnw_topaz_uninit(struct drm_device *dev);
+extern void pnw_topaz_handle_timeout(struct ttm_fence_device *fdev);
+
+extern int pnw_topaz_save_mtx_state(struct drm_device *dev);
+
+#define PNW_TOPAZ_START_CTX (0x1)
+#define PNW_TOPAZ_END_CTX (0x1<<1)
+extern void pnw_reset_fw_status(struct drm_device *dev, u32 flag);
+
+extern void topaz_write_core_reg(struct drm_psb_private *dev_priv,
+				 uint32_t core,
+				 uint32_t reg,
+				 const uint32_t val);
+extern void topaz_read_core_reg(struct drm_psb_private *dev_priv,
+				uint32_t core,
+				uint32_t reg,
+				uint32_t *ret_val);
+extern void psb_powerdown_topaz(struct work_struct *work);
+
+extern void pnw_topaz_flush_cmd_queue(struct pnw_topaz_private *topaz_priv);
+#define PNW_TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state)		\
+do { \
+	topaz_priv->pmstate = new_state;				\
+	if (new_state == PSB_PMSTATE_POWERDOWN)				\
+		topaz_priv->pm_gating_count++;				\
+	sysfs_notify_dirent(topaz_priv->sysfs_pmstate);			\
+	PSB_DEBUG_PM("TOPAZ: %s, power gating count 0x%08x\n",		\
+	(new_state == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown",	\
+		topaz_priv->pm_gating_count); \
+} while (0)
+
+#endif	/* _PNW_TOPAZ_H_ */
diff --git a/drivers/external_drivers/intel_media/video/encode/pnw_topaz_hw_reg.h b/drivers/external_drivers/intel_media/video/encode/pnw_topaz_hw_reg.h
new file mode 100644
index 0000000..f094fb7
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/pnw_topaz_hw_reg.h
@@ -0,0 +1,1371 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PNW_TOPAZ_HW_REG_H_
+#define _PNW_TOPAZ_HW_REG_H_
+
+#include "psb_drv.h"
+#include "img_types.h"
+#include "pnw_topaz.h"
+
+/*
+ * MACROS to insert values into fields within a word. The basename of the
+ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
+ */
+#define MM_WRITE32(base, offset, value)  \
+do {				       \
+	*((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)	\
+				+ base + offset)) = value;		\
+} while (0)
+
+#define MM_READ32(base, offset, pointer) \
+do {                                   \
+	*(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
+						+ base + offset));	\
+} while (0)
+
+#define F_MASK(basename)  (MASK_##basename)
+#define F_SHIFT(basename) (SHIFT_##basename)
+
+#define F_ENCODE(val, basename)  \
+	(((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
+
+
+#define F_EXTRACT(val, basename) (((val) & (F_MASK(basename))) >> (F_SHIFT(basename)))
+
+/*! The number of TOPAZ cores present in the system */
+#define TOPAZSC_NUM_CORES 2
+
+/* The max RAM memory firmware will use */
+#define TOPAZ_MASTER_FW_MAX (22 * 1024)
+#define TOPAZ_SLAVE_FW_MAX (18 * 1024)
+
+#define TOPAZSC_REG_OFF_MAX (TOPAZSC_NUM_CORES * 0x10000 + 0x10000)
+#define TOPAZSC_REG_OFF_MIN (0x10000)
+#define REG_BASE_MTX                        0x04800000
+#define REG_BASE_HOST                       0x00000000
+
+#define MTX_CORE_CODE_MEM       (0x10)
+#define MTX_CORE_DATA_MEM       (0x18)
+
+/* Multicore Regs */
+#define REG_OFFSET_TOPAZ_MULTICORE	0x00000000
+#define REG_OFFSET_TOPAZ_DMAC		0x00001000
+
+#define REG_SIZE_TOPAZ_MULTICORE	0x00001000
+#define REG_SIZE_TOPAZ_DMAC		0x00001000
+
+/* Topaz core registers - Host view */
+#define REG_OFFSET_TOPAZ_CORE_HOST	0x00010000
+#define REG_SIZE_TOPAZ_CORE_HOST	0x00010000
+
+#define REG_OFFSET_TOPAZ_MTX_HOST	0x00000000
+#define REG_OFFSET_TOPAZ_TOPAZ_HOST	0x00002000
+#define REG_OFFSET_TOPAZ_MVEA_HOST	0x00003000
+#define REG_OFFSET_TOPAZ_MVEACMD_HOST	0x00004000
+#define REG_OFFSET_TOPAZ_VLC_HOST	0x00005000
+#define REG_OFFSET_TOPAZ_DEBLOCKER_HOST	0x00006000
+#define REG_OFFSET_TOPAZ_COMMS_HOST	0x00007000
+#define REG_OFFSET_TOPAZ_ESB_HOST	0x00008000
+
+#define MVEA_CR_SPE_PRED_VECTOR_BIAS_TABLE 0x037C
+#define MVEA_CR_IPE_LAMBDA_TABLE	0x01F0
+#define TOPAZ_BIASREG_MAX(core) \
+	(core * 0x10000 \
+	+ 0x10000 + REG_OFFSET_TOPAZ_MVEA_HOST \
+	+ MVEA_CR_SPE_PRED_VECTOR_BIAS_TABLE)
+
+#define TOPAZ_BIASREG_MIN(core) \
+	(core * 0x10000 \
+	+ 0x10000 + REG_OFFSET_TOPAZ_MVEA_HOST \
+	+ MVEA_CR_IPE_LAMBDA_TABLE)
+
+#define REG_SIZE_TOPAZ_MTX_HOST	0x00002000
+#define REG_SIZE_TOPAZ_TOPAZ_HOST	0x00001000
+#define REG_SIZE_TOPAZ_MVEA_HOST	0x00001000
+#define REG_SIZE_TOPAZ_MVEACMD_HOST	0x00001000
+#define REG_SIZE_TOPAZ_VLC_HOST	0x00001000
+#define REG_SIZE_TOPAZ_DEBLOCKER_HOST	0x00001000
+#define REG_SIZE_TOPAZ_COMMS_HOST	0x00001000
+#define REG_SIZE_TOPAZ_ESB_HOST	0x00004000
+
+
+/* Topaz core registers MTX view */
+#define REG_OFFSET_TOPAZ_CORE_MTX	0x00010000	/* MUST confirm */
+#define REG_SIZE_TOPAZ_CORE_MTX	0x00010000	/* MUST confirm */
+
+#define REG_OFFSET_TOPAZ_MTX_MTX	0x00000000
+#define REG_OFFSET_TOPAZ_TOPAZ_MTX	0x00000800
+#define REG_OFFSET_TOPAZ_MVEA_MTX	0x00000C00
+#define REG_OFFSET_TOPAZ_MVEACMD_MTX	0x00001000
+#define REG_OFFSET_TOPAZ_VLC_MTX	0x00001400
+#define REG_OFFSET_TOPAZ_DEBLOCKER_MTX	0x00001800
+#define REG_OFFSET_TOPAZ_COMMS_MTX	0x00001C00
+#define REG_OFFSET_TOPAZ_ESB_MTX	0x00002000
+
+#define REG_SIZE_TOPAZ_MTX_MTX		0x00000800
+#define REG_SIZE_TOPAZ_TOPAZ_MTX	0x00000400
+#define REG_SIZE_TOPAZ_MVEA_MTX		0x00000400
+#define REG_SIZE_TOPAZ_MVEACMD_MTX	0x00000400
+#define REG_SIZE_TOPAZ_VLC_MTX		0x00000400
+#define REG_SIZE_TOPAZ_DEBLOCKER_MTX	0x00000400
+#define REG_SIZE_TOPAZ_COMMS_MTX	0x00000400
+#define REG_SIZE_TOPAZ_ESB_MTX		0x00002000
+
+
+/* Register bank addresses - Host View */
+#define REG_START_TOPAZ_MULTICORE_HOST	(REG_BASE_HOST + REG_OFFSET_TOPAZ_MULTICORE)
+#define REG_END_TOPAZ_MULTICORE_HOST	(REG_START_TOPAZ_MULTICORE_HOST + REG_SIZE_TOPAZ_MULTICORE)
+
+#define REG_START_TOPAZ_DMAC_HOST	(REG_BASE_HOST + REG_OFFSET_TOPAZ_DMAC)
+#define REG_END_TOPAZ_DMAC_HOST		(REG_START_TOPAZ_DMAC_HOST + REG_SIZE_TOPAZ_DMAC)
+
+#define REG_START_TOPAZ_MTX_HOST(core)	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_MTX_HOST)
+#define REG_END_TOPAZ_MTX_HOST(core)	(REG_START_TOPAZ_MTX_HOST(core) + REG_SIZE_TOPAZ_MTX_HOST)
+
+#define REG_START_TOPAZ_TOPAZ_HOST(core)	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_TOPAZ_HOST)
+#define REG_END_TOPAZ_TOPAZ_HOST(core)	(REG_START_TOPAZ_TOPAZ_HOST(core) + REG_SIZE_TOPAZ_TOPAZ_HOST)
+
+#define REG_START_TOPAZ_MVEA_HOST(core)	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_MVEA_HOST)
+#define REG_END_TOPAZ_MVEA_HOST(core)	(REG_START_TOPAZ_MVEA_HOST(core) + REG_SIZE_TOPAZ_MVEA_HOST)
+
+
+/* Register bank addresses - MTX view */
+#define REG_START_TOPAZ_MULTICORE_MTX	(REG_BASE_MTX + REG_OFFSET_TOPAZ_MULTICORE)
+#define REG_END_TOPAZ_MULTICORE_MTX	(REG_START_TOPAZ_MULTICORE_MTX + REG_SIZE_TOPAZ_MULTICORE)
+
+#define REG_START_TOPAZ_DMAC_MTX	(REG_BASE_MTX + REG_OFFSET_TOPAZ_DMAC)
+#define REG_END_TOPAZ_DMAC_MTX		(REG_START_TOPAZ_DMAC_MTX + REG_SIZE_TOPAZ_DMAC)
+
+#define REG_START_TOPAZ_MTX_MTX(core)	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MTX_MTX)
+#define REG_END_TOPAZ_MTX_MTX(core)	(REG_START_TOPAZ_MTX_MTX(core) + REG_SIZE_TOPAZ_MTX_MTX)
+
+#define REG_START_TOPAZ_TOPAZ_MTX(core)	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_TOPAZ_MTX)
+#define REG_END_TOPAZ_TOPAZ_MTX(core)	(REG_START_TOPAZ_TOPAZ_MTX(core) + REG_SIZE_TOPAZ_TOPAZ_MTX)
+
+#define REG_START_TOPAZ_MVEA_MTX(core)	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MVEA_MTX)
+#define REG_END_TOPAZ_MVEA_MTX(core)	(REG_START_TOPAZ_MVEA_MTX(core) + REG_SIZE_TOPAZ_MVEA_MTX)
+
+
+/* Every Topaz core has a 64K address space*/
+#define TOPAZ_CORE_REG_BASE(core) (REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST)
+
+/* MVEA macro */
+#define MVEA_START 0x03000
+
+#ifdef TOPAZ_PDUMP
+#define MVEA_WRITE32(offset, value, core) \
+	do { \
+		MM_WRITE32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_WT %x %x\n", core, offset, value); \
+	} while (0)
+#define MVEA_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_RD %x %x\n", core, offset, *(uint32_t *)pointer);\
+	} while (0)
+#else
+#define MVEA_WRITE32(offset, value, core) \
+		MM_WRITE32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, value)
+
+#define MVEA_READ32(offset, pointer, core) \
+		MM_READ32(MVEA_START + TOPAZ_CORE_REG_BASE(core), offset, pointer)
+#endif
+
+#define F_MASK_MVEA(basename)  (MASK_MVEA_##basename)	/*     MVEA    */
+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename)	/*     MVEA    */
+#define F_ENCODE_MVEA(val, basename)  \
+	(((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
+
+/* MVEA ESB macro */
+#define MVEA_ESB_START 0x08000
+
+#ifdef TOPAZ_PDUMP
+#define MVEA_ESB_WRITE32(offset, value, core) \
+	do { \
+		MM_WRITE32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core),\
+				offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_WT %x %x\n", \
+				core, offset, value); \
+	} while (0)
+#define MVEA_ESB_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core),\
+				offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_RD %x %x\n", \
+				core, offset, *(uint32_t *)pointer);\
+	} while (0)
+#else
+#define MVEA_ESB_WRITE32(offset, value, core) \
+		MM_WRITE32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, value)
+
+#define MVEA_ESB_READ32(offset, pointer, core) \
+		MM_READ32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, pointer)
+#endif
+
+
+/* VLC macro */
+#define TOPAZ_VLC_START 0x05000
+
+/* TOPAZ macro */
+#define TOPAZ_START 0x02000
+
+#ifdef TOPAZ_PDUMP
+#define TOPAZ_WRITE32(offset, value, core) \
+	do {\
+		MM_WRITE32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: TOPAZ_CORE %d REG_WT: %x %x\n", core,  \
+			 offset, value);\
+	} while (0)
+#define TOPAZ_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: TOPAZ_CORE %d REG_RD: %x %x\n", core, \
+			offset, *(uint32_t *)pointer);\
+	} while (0)
+#else
+#define TOPAZ_WRITE32(offset, value, core) \
+		MM_WRITE32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, value)
+#define TOPAZ_READ32(offset, pointer, core) \
+		MM_READ32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), offset, pointer)
+#endif
+#define F_MASK_TOPAZ(basename)  (MASK_TOPAZ_##basename)
+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
+#define F_ENCODE_TOPAZ(val, basename) \
+	(((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
+
+/* MTX macro */
+#define MTX_START 0x0
+
+#ifdef TOPAZ_PDUMP
+#define MTX_WRITE32(offset, value, core) \
+	do { \
+		MM_WRITE32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MTX core %d REG_WT: %x %x\n", core,\
+			offset, value);\
+	} while (0)
+
+
+#define MTX_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MTX core %d REG_RD %x %x\n", core, \
+			offset, *(uint32_t *)pointer); \
+	} while (0);
+#else
+
+#define MTX_WRITE32(offset, value, core) \
+		MM_WRITE32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, value)
+#define MTX_READ32(offset, pointer, core) \
+		MM_READ32(MTX_START + TOPAZ_CORE_REG_BASE(core), offset, pointer)
+#endif
+
+/* DMAC macro */
+#define DMAC_START 0x01000
+
+#ifdef TOPAZ_DUMP
+#define DMAC_WRITE32(offset, value) \
+	do { \
+		 MM_WRITE32(DMAC_START, offset, value);\
+		DRM_ERROR("TOPAZ_PDUMP: DMAC WT %x %x\n", offset, value);\
+	} while (0);
+
+#define DMAC_READ32(offset, pointer) \
+	do {\
+		MM_READ32(DMAC_START, offset, pointer);\
+		DRM_ERROR("TOPAZ_PDUMP: DMAC RD %x %x\n", offset, *(uint32_t *)pointer); \
+	} while (0)
+#else
+
+#define DMAC_WRITE32(offset, value) \
+		 MM_WRITE32(DMAC_START, offset, value)
+
+#define DMAC_READ32(offset, pointer) \
+		MM_READ32(DMAC_START, offset, pointer)
+#endif
+#define F_MASK_DMAC(basename)  (MASK_DMAC_##basename)
+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
+#define F_ENCODE_DMAC(val, basename)  \
+	(((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
+
+#define TOPAZ_CR_FIRMWARE_REG_1 (0x100)
+#define MTX_SCRATCHREG_TOMTX (2)
+#define TOPAZ_FIRMWARE_MAGIC (0xa5a5a5a5)
+
+/* Register CR_IMG_TOPAZ_INTENAB */
+#define TOPAZ_CR_IMG_TOPAZ_INTENAB  0x0008
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
+
+/*(Bit 3 enables fault interrupts caused by the topaz_cores. Bit 4 enables
+ * fault interrupts caused by the DMAC)*/
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000018
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
+
+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
+
+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT  0x0004
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
+
+#define MTX_CCBCTRL_ROFF		0
+#define MTX_CCBCTRL_COMPLETE		4
+#define MTX_CCBCTRL_CCBSIZE		8
+#define MTX_CCBCTRL_QP			12
+#define MTX_CCBCTRL_FRAMESKIP		20
+#define MTX_CCBCTRL_INITQP		24
+
+#define TOPAZ_CR_MMU_STATUS         0x001C
+#define MASK_TOPAZ_CR_MMU_PF_N_RW   0x00000001
+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW  0
+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
+
+#define TOPAZ_CR_MMU_MEM_REQ        0x0020
+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
+
+/* Register CR_TOPAZ_CMD_FIFO_2 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_2   0x005C
+#define MASK_TOPAZ_CR_CMD_FIFO_FLUSH 0x00000001
+#define SHIFT_TOPAZ_CR_CMD_FIFO_FLUSH 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_FLUSH 0x005C
+
+#define MTX_CR_MTX_KICK             0x0080
+#define MASK_MTX_MTX_KICK           0x0000FFFF
+#define SHIFT_MTX_MTX_KICK          0
+#define REGNUM_MTX_MTX_KICK         0x0080
+
+#define MTX_DATA_MEM_BASE		0x82880000
+
+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
+#define MASK_MTX_MTX_MCMR           0x00000001
+#define SHIFT_MTX_MTX_MCMR          0
+#define REGNUM_MTX_MTX_MCMR         0x0108
+
+#define MASK_MTX_MTX_MCMID          0x0FF00000
+#define SHIFT_MTX_MTX_MCMID         20
+#define REGNUM_MTX_MTX_MCMID        0x0108
+
+#define MASK_MTX_MTX_MCM_ADDR       0x000FFFFC
+#define SHIFT_MTX_MTX_MCM_ADDR      2
+#define REGNUM_MTX_MTX_MCM_ADDR     0x0108
+
+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
+#define MASK_MTX_MTX_MTX_MCM_STAT   0x00000001
+#define SHIFT_MTX_MTX_MTX_MCM_STAT  0
+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
+
+#define MASK_MTX_MTX_MCMAI          0x00000002
+#define SHIFT_MTX_MTX_MCMAI         1
+#define REGNUM_MTX_MTX_MCMAI        0x0108
+
+#define MVEA_CR_MVEA_BUSY           0x0018
+#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
+#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
+
+#define MVEA_CR_IMG_MVEA_SRST       0x0000
+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
+
+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID  0x03C0
+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
+
+#define TOPAZ_MTX_PC		(0x00000005)
+
+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
+
+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
+
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
+
+#define	TOPAZ_CORE_CR_MTX_DEBUG_OFFSET	0x00000044
+
+#define MASK_TOPAZ_CR_MTX_RAM_BANKS 0x00000F00
+#define SHIFT_TOPAZ_CR_MTX_RAM_BANKS 8
+#define REGNUM_TOPAZ_CR_MTX_RAM_BANKS 0x0044
+
+#define MASK_TOPAZ_CR_MTX_RAM_BANK_SIZE 0x000F0000
+#define SHIFT_TOPAZ_CR_MTX_RAM_BANK_SIZE 16
+#define REGNUM_TOPAZ_CR_MTX_RAM_BANK_SIZE 0x0044
+
+#define MASK_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 0x0F000000
+#define SHIFT_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 24
+#define REGNUM_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 0x0044
+
+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
+
+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
+
+/* Register CR_MTX_RAM_ACCESS_DATA_EXCHANGE */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_EXCHANGE 0x0100
+/* Register CR_MTX_RAM_ACCESS_DATA_TRANSFER */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
+
+#define	MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
+#define MASK_MTX_MTX_MCMR           0x00000001
+#define SHIFT_MTX_MTX_MCMR          0
+#define REGNUM_MTX_MTX_MCMR         0x0108
+
+#define MASK_MTX_MTX_MCMAI          0x00000002
+#define SHIFT_MTX_MTX_MCMAI         1
+#define REGNUM_MTX_MTX_MCMAI        0x0108
+
+#define MASK_MTX_MTX_MCM_ADDR       0x000FFFFC
+#define SHIFT_MTX_MTX_MCM_ADDR      2
+#define REGNUM_MTX_MTX_MCM_ADDR     0x0108
+
+#define MASK_MTX_MTX_MCMID          0x0FF00000
+#define SHIFT_MTX_MTX_MCMID         20
+#define REGNUM_MTX_MTX_MCMID        0x0108
+
+#define TOPAZ_CR_MMU_CONTROL0       0x0024
+#define MASK_TOPAZ_CR_MMU_BYPASS_DMAC 0x00020000
+#define SHIFT_TOPAZ_CR_MMU_BYPASS_DMAC 17
+#define REGNUM_TOPAZ_CR_MMU_BYPASS_DMAC 0x0024
+
+#define MASK_TOPAZ_CR_MMU_BYPASS    0x00010000
+#define SHIFT_TOPAZ_CR_MMU_BYPASS   16
+#define REGNUM_TOPAZ_CR_MMU_BYPASS  0x0024
+
+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
+
+#define MASK_TOPAZ_CR_MMU_INVALDC   0x00000008
+#define SHIFT_TOPAZ_CR_MMU_INVALDC  3
+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
+
+#define MASK_TOPAZ_CR_MMU_FLUSH     0x00000004
+#define SHIFT_TOPAZ_CR_MMU_FLUSH    2
+#define REGNUM_TOPAZ_CR_MMU_FLUSH   0x0024
+
+#define TOPAZ_CR_MMU_CONTROL1       0x0028
+#define TOPAZ_CR_MMU_TILE_BASE0     0x0038
+
+/* Register CR_MMU_BANK_INDEX */
+#define TOPAZ_CR_MMU_BANK_INDEX     0x0040
+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0040
+
+#define MASK_TOPAZ_CR_MMU_BANK_SELECT(i) (0x00000001 << (0 + ((i) * 1)))
+#define SHIFT_TOPAZ_CR_MMU_BANK_SELECT(i) (0 + ((i) * 1))
+#define REGNUM_TOPAZ_CR_MMU_BANK_SELECT(i) 0x0040
+
+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
+
+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
+#define TXRPT_WAITONKICK_VALUE 0x8ade0000
+
+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
+
+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
+#define	MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
+
+#define	MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
+#define	MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
+
+#define MTX_CR_MTX_SYSC_CDMAA       0x0344
+#define MASK_MTX_CDMAA_ADDRESS      0x03FFFFFC
+#define SHIFT_MTX_CDMAA_ADDRESS     2
+#define REGNUM_MTX_CDMAA_ADDRESS    0x0344
+
+#define MTX_CR_MTX_SYSC_CDMAC       0x0340
+#define MASK_MTX_LENGTH             0x0000FFFF
+#define SHIFT_MTX_LENGTH            0
+#define REGNUM_MTX_LENGTH           0x0340
+
+#define MASK_MTX_BURSTSIZE          0x07000000
+#define SHIFT_MTX_BURSTSIZE         24
+#define REGNUM_MTX_BURSTSIZE        0x0340
+
+#define MASK_MTX_RNW                0x00020000
+#define SHIFT_MTX_RNW               17
+#define REGNUM_MTX_RNW              0x0340
+
+#define MASK_MTX_ENABLE             0x00010000
+#define SHIFT_MTX_ENABLE            16
+#define REGNUM_MTX_ENABLE           0x0340
+
+#define MASK_MTX_LENGTH             0x0000FFFF
+#define SHIFT_MTX_LENGTH            0
+#define REGNUM_MTX_LENGTH           0x0340
+
+#define TOPAZ_CR_IMG_TOPAZ_SRST     0x0000
+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 0x00000004
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 2
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 0x0000
+#define SIGNED_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 0
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 0x00000010
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 4
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 0x0000
+#define SIGNED_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 0
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
+
+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
+
+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
+
+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
+
+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
+
+/*#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
+#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
+#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
+#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040*/
+
+/* Register CR_TOPAZ_HW_CFG */
+#define TOPAZ_CR_TOPAZ_HW_CFG       0x0050
+#define MASK_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0000001F
+#define SHIFT_TOPAZ_CR_NUM_CORES_SUPPORTED 0
+#define REGNUM_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0050
+
+/* Register CR_TOPAZ_CMD_FIFO_0 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_0   0x0054
+#define MASK_TOPAZ_CR_CMD_FIFO_RDATA 0xFFFFFFFF
+#define SHIFT_TOPAZ_CR_CMD_FIFO_RDATA 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_RDATA 0x0054
+
+/* Register CR_TOPAZ_CMD_FIFO_1 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_1   0x0058
+#define MASK_TOPAZ_CR_CMD_FIFO_QUANTITY 0x000000FF
+#define SHIFT_TOPAZ_CR_CMD_FIFO_QUANTITY 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_QUANTITY 0x0058
+
+#define MASK_TOPAZ_CR_CMD_FIFO_NOTEMPTY 0x00000100
+#define SHIFT_TOPAZ_CR_CMD_FIFO_NOTEMPTY 8
+#define REGNUM_TOPAZ_CR_CMD_FIFO_NOTEMPTY 0x0058
+
+/* Register CR_TOPAZ_CMD_FIFO_2 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_2   0x005C
+#define MASK_TOPAZ_CR_CMD_FIFO_FLUSH 0x00000001
+#define SHIFT_TOPAZ_CR_CMD_FIFO_FLUSH 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_FLUSH 0x005C
+
+#define MTX_CR_MTX_SYSC_CDMAT       0x0350
+#define MASK_MTX_TRANSFERDATA       0xFFFFFFFF
+#define SHIFT_MTX_TRANSFERDATA      0
+#define REGNUM_MTX_TRANSFERDATA     0x0350
+
+#define IMG_SOC_DMAC_IRQ_STAT(X)    (0x000C + (32 * (X)))
+#define MASK_IMG_SOC_TRANSFER_FIN   0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN  17
+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
+
+#define IMG_SOC_DMAC_COUNT(X)       (0x0004 + (32 * (X)))
+#define MASK_IMG_SOC_CNT            0x0000FFFF
+#define SHIFT_IMG_SOC_CNT           0
+#define REGNUM_IMG_SOC_CNT          0x0004
+
+#define MASK_IMG_SOC_EN             0x00010000
+#define SHIFT_IMG_SOC_EN            16
+#define REGNUM_IMG_SOC_EN           0x0004
+
+#define MASK_IMG_SOC_LIST_EN        0x00040000
+#define SHIFT_IMG_SOC_LIST_EN       18
+#define REGNUM_IMG_SOC_LIST_EN      0x0004
+
+#define IMG_SOC_DMAC_PER_HOLD(X)    (0x0018 + (32 * (X)))
+#define MASK_IMG_SOC_PER_HOLD       0x0000007F
+#define SHIFT_IMG_SOC_PER_HOLD      0
+#define REGNUM_IMG_SOC_PER_HOLD     0x0018
+
+#define IMG_SOC_DMAC_SETUP(X)       (0x0000 + (32 * (X)))
+#define MASK_IMG_SOC_START_ADDRESS  0xFFFFFFF
+#define SHIFT_IMG_SOC_START_ADDRESS 0
+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
+
+#define MASK_IMG_SOC_BSWAP          0x40000000
+#define SHIFT_IMG_SOC_BSWAP         30
+#define REGNUM_IMG_SOC_BSWAP        0x0004
+
+#define MASK_IMG_SOC_PW             0x18000000
+#define SHIFT_IMG_SOC_PW            27
+#define REGNUM_IMG_SOC_PW           0x0004
+
+#define MASK_IMG_SOC_DIR            0x04000000
+#define SHIFT_IMG_SOC_DIR           26
+#define REGNUM_IMG_SOC_DIR          0x0004
+
+#define MASK_IMG_SOC_PI             0x03000000
+#define SHIFT_IMG_SOC_PI            24
+#define REGNUM_IMG_SOC_PI           0x0004
+#define IMG_SOC_PI_1		0x00000002
+#define IMG_SOC_PI_2		0x00000001
+#define IMG_SOC_PI_4		0x00000000
+
+#define MASK_IMG_SOC_TRANSFER_IEN   0x20000000
+#define SHIFT_IMG_SOC_TRANSFER_IEN  29
+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
+
+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT)        \
+	((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)|	\
+		(((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)|		\
+		(((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)|	\
+		(((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
+		(((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
+
+#define IMG_SOC_DMAC_PERIPH(X)      (0x0008 + (32 * (X)))
+#define MASK_IMG_SOC_EXT_SA         0x0000000F
+#define SHIFT_IMG_SOC_EXT_SA        0
+#define REGNUM_IMG_SOC_EXT_SA       0x0008
+
+#define MASK_IMG_SOC_ACC_DEL        0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL       29
+#define REGNUM_IMG_SOC_ACC_DEL      0x0008
+
+#define MASK_IMG_SOC_INCR           0x08000000
+#define SHIFT_IMG_SOC_INCR          27
+#define REGNUM_IMG_SOC_INCR         0x0008
+
+#define MASK_IMG_SOC_BURST          0x07000000
+#define SHIFT_IMG_SOC_BURST         24
+#define REGNUM_IMG_SOC_BURST        0x0008
+
+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST)             \
+((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)|	\
+(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)|             \
+(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
+
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
+#define MASK_IMG_SOC_ADDR           0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR          0
+#define REGNUM_IMG_SOC_ADDR         0x0014
+
+#define SHIFT_TOPAZ_VEC_BUSY        11
+#define MASK_TOPAZ_VEC_BUSY         (0x1<<SHIFT_TOPAZ_VEC_BUSY)
+
+#define TOPAZ_MTX_TXRPT_OFFSET         0xc
+#define TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET 0x20D0
+
+#define TOPAZ_GUNIT_READ32(offset)  ioread32(dev_priv->vdc_reg + offset)
+#define TOPAZ_READ_BITS(val, basename) \
+		(((val)&MASK_TOPAZ_##basename)>>SHIFT_TOPAZ_##basename)
+
+#define TOPAZ_WAIT_UNTIL_IDLE \
+    do { \
+	uint8_t tmp_poll_number = 0;\
+	uint32_t tmp_reg; \
+	if (topaz_priv->topaz_cmd_windex == WB_CCB_CTRL_RINDEX(dev_priv)) { \
+		tmp_reg = TOPAZ_GUNIT_READ32(TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET);\
+		if (0 != TOPAZ_READ_BITS(tmp_reg, VEC_BUSY)) { \
+			MTX_READ32(TOPAZ_MTX_TXRPT_OFFSET, &tmp_reg);\
+			while ((tmp_reg != 0x8ade0000) && \
+			       (tmp_poll_number++ < 10)) \
+				MTX_READ32(0xc, &tmp_reg); \
+			PSB_DEBUG_GENERAL(	\
+			  "TOPAZ: TXRPT reg remain: %x,poll %d times.\n",\
+			  tmp_reg, tmp_poll_number);\
+		} \
+	} \
+    } while (0)
+
+/* Register CR_BUFFER_SIDEBAND */
+#define MVEA_CR_BUFFER_SIDEBAND     0x017C
+#define MASK_MVEA_CR_CURR_MB_SBAND  0x00000003
+#define SHIFT_MVEA_CR_CURR_MB_SBAND 0
+#define REGNUM_MVEA_CR_CURR_MB_SBAND 0x017C
+
+#define MASK_MVEA_CR_ABOVE_PIX_IN_SBAND 0x0000000C
+#define SHIFT_MVEA_CR_ABOVE_PIX_IN_SBAND 2
+#define REGNUM_MVEA_CR_ABOVE_PIX_IN_SBAND 0x017C
+
+#define MASK_MVEA_CR_CURR_PARAM_SBAND 0x00000030
+#define SHIFT_MVEA_CR_CURR_PARAM_SBAND 4
+#define REGNUM_MVEA_CR_CURR_PARAM_SBAND 0x017C
+
+#define MASK_MVEA_CR_BELOW_PARAM_IN_SBAND 0x000000C0
+#define SHIFT_MVEA_CR_BELOW_PARAM_IN_SBAND 6
+#define REGNUM_MVEA_CR_BELOW_PARAM_IN_SBAND 0x017C
+
+#define MASK_MVEA_CR_ABOVE_PARAM_IN_SBAND 0x00000300
+#define SHIFT_MVEA_CR_ABOVE_PARAM_IN_SBAND 8
+#define REGNUM_MVEA_CR_ABOVE_PARAM_IN_SBAND 0x017C
+
+#define MASK_MVEA_CR_REF_SBAND      0x00000C00
+#define SHIFT_MVEA_CR_REF_SBAND     10
+#define REGNUM_MVEA_CR_REF_SBAND    0x017C
+
+#define MASK_MVEA_CR_RECON_SBAND    0x00003000
+#define SHIFT_MVEA_CR_RECON_SBAND   12
+#define REGNUM_MVEA_CR_RECON_SBAND  0x017C
+
+#define MASK_MVEA_CR_ABOVE_PIX_OUT_SBAND 0x0000C000
+#define SHIFT_MVEA_CR_ABOVE_PIX_OUT_SBAND 14
+#define REGNUM_MVEA_CR_ABOVE_PIX_OUT_SBAND 0x017C
+
+#define MASK_MVEA_CR_BELOW_PARAM_OUT_SBAND 0x00030000
+#define SHIFT_MVEA_CR_BELOW_PARAM_OUT_SBAND 16
+#define REGNUM_MVEA_CR_BELOW_PARAM_OUT_SBAND 0x017C
+
+#define MASK_MVEA_CR_ABOVE_PARAM_OUT_SBAND 0x000C0000
+#define SHIFT_MVEA_CR_ABOVE_PARAM_OUT_SBAND 18
+#define REGNUM_MVEA_CR_ABOVE_PARAM_OUT_SBAND 0x017C
+
+/* Register CR_IPE_JITTER_FACTOR */
+#define MVEA_CR_IPE_JITTER_FACTOR   0x0218
+#define MASK_MVEA_CR_IPE_JITTER_FACTOR 0x00000003
+#define SHIFT_MVEA_CR_IPE_JITTER_FACTOR 0
+#define REGNUM_MVEA_CR_IPE_JITTER_FACTOR 0x0218
+
+/* Register CR_MULTICORE_INT_STAT */
+#define TOPAZSC_CR_MULTICORE_INT_STAT 0x0000
+#define MASK_TOPAZSC_CR_INT_STAT_DMAC 0x80000000
+#define SHIFT_TOPAZSC_CR_INT_STAT_DMAC 31
+#define REGNUM_TOPAZSC_CR_INT_STAT_DMAC 0x0000
+
+#define MASK_TOPAZSC_CR_INT_STAT_CORES 0x7FFFFFFF
+#define SHIFT_TOPAZSC_CR_INT_STAT_CORES 0
+#define REGNUM_TOPAZSC_CR_INT_STAT_CORES 0x0000
+
+/* Register CR_MULTICORE_CORE_SEL_0 */
+#define TOPAZSC_CR_MULTICORE_CORE_SEL_0 0x0004
+#define MASK_TOPAZSC_CR_DMAC_CORE_SELECT 0x0000000F
+#define SHIFT_TOPAZSC_CR_DMAC_CORE_SELECT 0
+#define REGNUM_TOPAZSC_CR_DMAC_CORE_SELECT 0x0004
+
+#define MASK_TOPAZSC_CR_WRITES_CORE_ALL 0x80000000
+#define SHIFT_TOPAZSC_CR_WRITES_CORE_ALL 31
+#define REGNUM_TOPAZSC_CR_WRITES_CORE_ALL 0x0004
+
+/* Register CR_MULTICORE_CORE_SEL_1 */
+#define TOPAZSC_CR_MULTICORE_CORE_SEL_1 0x0008
+#define MASK_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0x0000000F
+#define SHIFT_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0
+#define REGNUM_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0x0008
+
+/* Register CR_MULTICORE_RSVD0 */
+#define TOPAZSC_CR_MULTICORE_RSVD0  0x0010
+#define MASK_TOPAZSC_CR_RESERVED0   0xFFFFFFFF
+#define SHIFT_TOPAZSC_CR_RESERVED0  0
+#define REGNUM_TOPAZSC_CR_RESERVED0 0x0010
+
+/* Register CR_MULTICORE_CMD_FIFO_0 */
+#define TOPAZSC_CR_MULTICORE_CMD_FIFO_0 0x0014
+#define MASK_TOPAZSC_CR_CMD_FIFO_WDATA 0xFFFFFFFF
+#define SHIFT_TOPAZSC_CR_CMD_FIFO_WDATA 0
+#define REGNUM_TOPAZSC_CR_CMD_FIFO_WDATA 0x0014
+
+/* Register CR_MULTICORE_CMD_FIFO_1 */
+#define TOPAZSC_CR_MULTICORE_CMD_FIFO_1 0x0018
+#define MASK_TOPAZSC_CR_CMD_FIFO_SPACE 0x000000FF
+#define SHIFT_TOPAZSC_CR_CMD_FIFO_SPACE 0
+#define REGNUM_TOPAZSC_CR_CMD_FIFO_SPACE 0x0018
+
+#define MASK_TOPAZSC_CR_CMD_FIFO_FULL 0x00000100
+#define SHIFT_TOPAZSC_CR_CMD_FIFO_FULL 8
+#define REGNUM_TOPAZSC_CR_CMD_FIFO_FULL 0x0018
+
+/* Register CR_MULTICORE_IDLE_PWR_MAN */
+#define TOPAZSC_CR_MULTICORE_IDLE_PWR_MAN 0x001C
+#define MASK_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0x00000001
+#define SHIFT_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0
+#define REGNUM_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0x001C
+
+/* Register CR_CMC_PROC_ESB_ACCESS */
+#define MVEA_CR_CMC_PROC_ESB_ACCESS 0x011C
+#define MASK_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0x0000001F
+#define SHIFT_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0
+#define REGNUM_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0x011C
+#define SIGNED_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0
+
+
+/* Table CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE */
+
+/* Register CR_CMC_ESB_LOGICAL_REGION_SETUP */
+#define MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP(X) (0x0080 + (4 * (X)))
+#define MASK_MVEA_CR_CMC_ESB_REGION_VALID 0x80000000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_VALID 31
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_VALID 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_VALID 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_TYPE 0x60000000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_TYPE 29
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_TYPE 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_TYPE 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 0x00F00000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 20
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 0x000F0000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 16
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 0x0000F000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 12
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 0x00000F00
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 8
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 0x000000F0
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 4
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0x0000000F
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0
+
+
+/* Bit 31:16 is the count of remaining items to be transferred */
+/* Bit 0 is 1 if transfer State == 'do_nothing'*/
+#define MTX_CR_MTX_SYSC_CDMAS0      0x0348
+
+/* Bit 25:2 is the current core DMA transfer address.*/
+#define MTX_CR_MTX_SYSC_CDMAS1      0x034C
+
+/* Bit 31:0 is the data to be wroten */
+#define MTX_CR_MTX_SYSC_CDMAT       0x0350
+
+/* Number of entries in table CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE */
+
+#define MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE_SIZE_UINT32 32
+#define MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE_NUM_ENTRIES 32
+
+#define ESB_HWSYNC      10
+#define ESB_POS_MANGLER_ORIGINX(x) ((x) >> 3)
+#define ESB_POS_MANGLER(x) ((x) >> 3)
+
+#define ESB_SIZE_MANGLER(x) (((x) >> 3) - 1)
+#define REGION_TYPE_LINEAR 0
+
+#define TOPAZSC_ESB_REGION_Y_MAX		(46)
+#define TOPAZSC_ESB_REGION_X_MAX		(64)
+
+#define TOPAZSC_ESB_REGION_HEIGH		(48)
+#define TOPAZSC_ESB_REGION_WIDTH		(64)
+
+
+#define REG_OFFSET_COMMS_CORE_HOST	0x00070000
+#define REG_SIZE_COMMS_CORE_HOST	0x00070000
+#define REG_OFFSET_TOPAZ_COMMS_HOST	0x00007000
+#define REG_SIZE_TOPAZ_COMMS_HOST	0x00001000
+#define TOPAZ_COMMS_START 0x07000
+/* Register CR_STAT_1 */
+#define TOPAZ_COMMS_CR_STAT_1(X)    (0x0018 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_STAT_DATA_1 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_STAT_DATA_1 0
+#define REGNUM_TOPAZ_COMMS_CR_STAT_DATA_1 0x0018
+
+/* Register CR_STAT_0 */
+#define TOPAZ_COMMS_CR_STAT_0(X)    (0x0014 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_STAT_DATA_0 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_STAT_DATA_0 0
+#define REGNUM_TOPAZ_COMMS_CR_STAT_DATA_0 0x0014
+
+/* Register CR_MTX_STATUS */
+#define TOPAZ_COMMS_CR_MTX_STATUS(X) (0x0010 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_FLAGS_WORD 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_FLAGS_WORD 0
+#define REGNUM_TOPAZ_COMMS_FLAGS_WORD 0x0010
+
+/* Register CR_CMD_WB_VAL */
+#define TOPAZ_COMMS_CR_CMD_WB_VAL(X) (0x000C + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_WB_VAL  0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_WB_VAL 0
+#define REGNUM_TOPAZ_COMMS_CR_WB_VAL 0x000C
+
+/* Register CR_CMD_WB_ADDR */
+#define TOPAZ_COMMS_CR_CMD_WB_ADDR(X) (0x0008 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_WB_ADDR 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_WB_ADDR 0
+#define REGNUM_TOPAZ_COMMS_CR_WB_ADDR 0x0008
+
+/* Register CR_CMD_DATA_ADDR */
+#define TOPAZ_COMMS_CR_CMD_DATA_ADDR(X) (0x0004 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_DATA_ADDR 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_DATA_ADDR 0
+#define REGNUM_TOPAZ_COMMS_CR_DATA_ADDR 0x0004
+/* Register CR_TOPAZ_HW_CFG */
+#define TOPAZ_CR_TOPAZ_HW_CFG       0x0050
+#define MASK_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0000001F
+#define SHIFT_TOPAZ_CR_NUM_CORES_SUPPORTED 0
+#define REGNUM_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0050
+/* Register CR_CMD_WORD */
+#define TOPAZ_COMMS_CR_CMD_WORD(X)  (0x0000 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_MTX_CMD_ID 0x0000007F
+#define SHIFT_TOPAZ_COMMS_CR_MTX_CMD_ID 0
+#define REGNUM_TOPAZ_COMMS_CR_MTX_CMD_ID 0x0000
+#define MTX_CR_MTX_SYSC_CDMAT       0x0350
+#define MASK_MTX_TRANSFERDATA       0xFFFFFFFF
+#define SHIFT_MTX_TRANSFERDATA      0
+#define REGNUM_MTX_TRANSFERDATA     0x0350
+
+
+#define COMMS_WRITE32(offset, value, core) \
+		MM_WRITE32(TOPAZ_COMMS_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, value)
+#define COMMS_READ32(offset, pointer, core) \
+		MM_READ32(TOPAZ_COMMS_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, pointer)
+
+#define TOPAZ_CORE_NUMBER_SET_OFFSET (0x100 + (2 << 2))
+
+#define REG_SIZE_COMMS_CORE_HOST	0x00070000
+#define MVEASETUPESBREGION(_OriginX_, _OriginY_, _PhysWidth_,\
+		_PhysHeight_, _LogWidth_, _RegType_)\
+((F_ENCODE(ESB_POS_MANGLER_ORIGINX(_OriginX_),\
+	   MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X))\
+ | (F_ENCODE(ESB_POS_MANGLER(_OriginY_),\
+		 MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y))\
+ | (F_ENCODE(ESB_SIZE_MANGLER(_PhysWidth_),\
+		 MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH))\
+ | (F_ENCODE(ESB_SIZE_MANGLER(_PhysHeight_),\
+		 MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT))\
+ | (F_ENCODE(ESB_SIZE_MANGLER(_LogWidth_),\
+		 MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH))\
+ | (F_ENCODE(_RegType_, MVEA_CR_CMC_ESB_REGION_TYPE))\
+ | (F_ENCODE(1, MVEA_CR_CMC_ESB_REGION_VALID)))
+
+
+#define TOPAZ_MULTICORE_START		0x00000000
+
+#ifdef TOPAZ_PDUMP
+#define TOPAZ_MULTICORE_WRITE32(offset, value) \
+	do { \
+		MM_WRITE32(TOPAZ_MULTICORE_START, offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MULTICORE, REG_WT %x %x\n", offset, value);\
+	} while (0)
+
+#define TOPAZ_MULTICORE_READ32(offset, pointer) \
+	do { \
+		MM_READ32(TOPAZ_MULTICORE_START, offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MULTICORE, REG_RD %x %x\n", offset, *(uint32_t *)pointer); \
+	} while (0)
+#else
+#define TOPAZ_MULTICORE_WRITE32(offset, value) \
+	MM_WRITE32(TOPAZ_MULTICORE_START, offset, value)
+#define TOPAZ_MULTICORE_READ32(offset, pointer) \
+	MM_READ32(TOPAZ_MULTICORE_START, offset, pointer)
+#endif
+
+#define MTX_DMA_BURSTSIZE_BYTES 32
+#define MTX_DMA_ALIGNMENT_BYTES 16
+
+#define MTX_DMA_MEMORY_BASE (0x82880000)
+#define PC_START_ADDRESS    (0x80900000)
+
+#define MAX_TOPAZ_CMD_COUNT	(0x1000) /* max syncStatus value used*/
+
+/* It's the default value after reset */
+#define VEC_CG_DIS_MASK (0x007fffff)
+
+#define MTX_WRITEBACK_DATASIZE_ROUND 4
+
+#define TOPAZ_MTX_WB_READ32(base, core, word, pointer) \
+    do { \
+	*(uint32_t *)(pointer) = *((uint32_t *)(base) + \
+		(core) * MTX_WRITEBACK_DATASIZE_ROUND + (word)); \
+    } while (0)
+
+#define TOPAZ_MTX_WB_WRITE32(base, core, word, value) \
+    do { \
+	 *((uint32_t *)(base) + (core) * MTX_WRITEBACK_DATASIZE_ROUND + (word)) \
+	    = value; \
+    } while (0)
+
+
+#define TOPAZ_MTX_WB_OFFSET(base, core) \
+    ((base) + (core)*MTX_WRITEBACK_DATASIZE_ROUND*4)
+
+#define POLL_TOPAZ_FREE_FIFO_SPACE(word_num, delay, retries, pointer) \
+    do { \
+	uint32_t free_space = 0, i; \
+	for (i = 0; i < retries; i++) { \
+	    TOPAZ_MULTICORE_READ32(TOPAZSC_CR_MULTICORE_CMD_FIFO_1, &free_space);\
+	    free_space &= MASK_TOPAZSC_CR_CMD_FIFO_SPACE; \
+	    if (free_space >= word_num) \
+		break; \
+	    else \
+		PSB_UDELAY(delay); \
+	} \
+	if (i >= retries) { \
+	    ret  = -1;\
+	    DRM_ERROR("TOPAZ: poll FIFO free space failed (%d words free)!\n", free_space); \
+	} \
+	else \
+	    ret = 0; \
+	*pointer = free_space; \
+    } while (0)
+
+
+/* **************** DMAC define **************** */
+enum  DMAC_eBSwap {
+	DMAC_BSWAP_NO_SWAP = 0x0,/*  No byte swapping will be performed. */
+	DMAC_BSWAP_REVERSE = 0x1,/*  Byte order will be reversed. */
+};
+
+enum DMAC_ePW {
+	DMAC_PWIDTH_32_BIT = 0x0,/*  Peripheral width 32-bit. */
+	DMAC_PWIDTH_16_BIT = 0x1,/*  Peripheral width 16-bit. */
+	DMAC_PWIDTH_8_BIT = 0x2,/*  Peripheral width 8-bit. */
+};
+
+enum DMAC_eAccDel {
+	DMAC_ACC_DEL_0 = 0x0,	/*  Access delay zero clock cycles */
+	DMAC_ACC_DEL_256 = 0x1,	/*  Access delay 256 clock cycles */
+	DMAC_ACC_DEL_512 = 0x2,	/*  Access delay 512 clock cycles */
+	DMAC_ACC_DEL_768 = 0x3,	/*  Access delay 768 clock cycles */
+	DMAC_ACC_DEL_1024 = 0x4,/*  Access delay 1024 clock cycles */
+	DMAC_ACC_DEL_1280 = 0x5,/*  Access delay 1280 clock cycles */
+	DMAC_ACC_DEL_1536 = 0x6,/*  Access delay 1536 clock cycles */
+	DMAC_ACC_DEL_1792 = 0x7,/*  Access delay 1792 clock cycles */
+};
+
+enum  DMAC_eBurst {
+	DMAC_BURST_0 = 0x0,	/*  burst size of 0 */
+	DMAC_BURST_1 = 0x1,	/*  burst size of 1 */
+	DMAC_BURST_2 = 0x2,	/*  burst size of 2 */
+	DMAC_BURST_3 = 0x3,	/*  burst size of 3 */
+	DMAC_BURST_4 = 0x4,	/*  burst size of 4 */
+	DMAC_BURST_5 = 0x5,	/*  burst size of 5 */
+	DMAC_BURST_6 = 0x6,     /*  burst size of 6 */
+	DMAC_BURST_7 = 0x7,	/*  burst size of 7 */
+};
+
+/* commands for topaz,shared with user space driver */
+enum drm_pnw_topaz_cmd {
+	MTX_CMDID_NULL = 0,
+	MTX_CMDID_SHUTDOWN = 1,
+	MTX_CMDID_START_PIC = 2,
+	MTX_CMDID_DO_HEADER = 3,
+	MTX_CMDID_ENCODE_SLICE = 4,
+	MTX_CMDID_END_PIC = 5,
+	MTX_CMDID_FLUSH = 6,
+	/*JPEG commands*/
+	MTX_CMDID_SETQUANT = 7,
+	MTX_CMDID_RESET_ENCODE = 8,
+	MTX_CMDID_ISSUEBUFF = 9,
+	MTX_CMDID_SETUP = 10,
+
+	MTX_CMDID_PAD = 0x7a, /*Will be ignored*/
+	MTX_CMDID_SW_WRITEREG = 0x7b,
+	MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
+	MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
+	MTX_CMDID_SW_NEW_CODEC = 0x7f
+};
+
+struct topaz_cmd_header {
+	union {
+		struct {
+			unsigned long id:7;
+			unsigned long enable_interrupt:1;
+			unsigned long core:8;
+			unsigned long seq:16;
+		};
+		uint32_t val;
+	};
+};
+
+/*
+ * codecs topaz supports,shared with user space driver.
+ * PNW_TOPAZ_CODEC_NUM_MAX should be modified if the number
+ * of codecs is changed
+ */
+enum drm_pnw_topaz_codec {
+	IMG_CODEC_JPEG = 0,
+	IMG_CODEC_H264_NO_RC,
+	IMG_CODEC_H264_VBR,
+	IMG_CODEC_H264_CBR,
+	IMG_CODEC_H263_NO_RC,
+	IMG_CODEC_H263_VBR,
+	IMG_CODEC_H263_CBR,
+	IMG_CODEC_MPEG4_NO_RC,
+	IMG_CODEC_MPEG4_VBR,
+	IMG_CODEC_MPEG4_CBR,
+	IMG_CODEC_H264_VCM,
+};
+
+
+typedef enum {
+	MTX_WRITEBACK_CMDWORD = 0, /* Command word executed by MTX */
+	MTX_WRITEBACK_VALUE = 1, /* Writeback value returned by command */
+	MTX_WRITEBACK_FLAGSWORD_0 = 2, /* Flags word indicating MTX status */
+	MTX_WRITEBACK_BITSWRITTEN = 3, /* number of bits written out */
+	MTX_WRITEBACK_DATASIZE /* End marker for enum */
+} MTX_eWriteBackData;
+
+/* pnw_topazinit.c */
+int pnw_topaz_reset(struct drm_psb_private *dev_priv);
+int pnw_topaz_init_fw(struct drm_device *dev);
+int pnw_topaz_setup_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec);
+int pnw_topaz_wait_for_register(struct drm_psb_private *dev_priv,
+				uint32_t addr, uint32_t value,
+				uint32_t enable);
+void pnw_topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
+
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
+int pnw_topaz_kick_null_cmd(struct drm_psb_private *dev_priv,
+			    uint32_t core_id,
+			    uint32_t wb_offset,
+			    uint32_t sync_req,
+			    uint8_t irq_enable);
+int pnw_wait_on_sync(struct drm_psb_private *dev_priv,
+		     uint32_t sync_seq,
+		     uint32_t *sync_p);
+
+static inline char *cmd_to_string(int cmd_id)
+{
+	switch (cmd_id) {
+	case MTX_CMDID_START_PIC:
+		return "MTX_CMDID_START_PIC";
+	case MTX_CMDID_END_PIC:
+		return "MTX_CMDID_END_PIC";
+	case MTX_CMDID_DO_HEADER:
+		return "MTX_CMDID_DO_HEADER";
+	case MTX_CMDID_ENCODE_SLICE:
+		return "MTX_CMDID_ENCODE_SLICE";
+	case MTX_CMDID_SW_NEW_CODEC:
+		return "MTX_CMDID_SW_NEW_CODEC";
+	case MTX_CMDID_SETQUANT:
+		return "MTX_CMDID_SETQUANT";
+	case MTX_CMDID_RESET_ENCODE:
+		return "MTX_CMDID_RESET_ENCODE";
+	case MTX_CMDID_ISSUEBUFF:
+		return "MTX_CMDID_ISSUEBUFF";
+	case MTX_CMDID_SETUP:
+		return "MTX_CMDID_SETUP";
+	case MTX_CMDID_SW_WRITEREG:
+		return "MTX_CMDID_SW_WRITEREG";
+	default:
+		return "Undefined command";
+
+	}
+}
+
+static inline char *codec_to_string(int codec)
+{
+	switch (codec) {
+	case IMG_CODEC_JPEG:
+		return "JPEG";
+	case IMG_CODEC_H264_NO_RC:
+		return "H264_NO_RC";
+	case IMG_CODEC_H264_VBR:
+		return "H264_VBR";
+	case IMG_CODEC_H264_CBR:
+		return "H264_CBR";
+	case IMG_CODEC_H263_NO_RC:
+		return "H263_NO_RC";
+	case IMG_CODEC_H263_VBR:
+		return "H263_VBR";
+	case IMG_CODEC_H263_CBR:
+		return "H263_CBR";
+	case IMG_CODEC_MPEG4_NO_RC:
+		return "MPEG4_NO_RC";
+	case IMG_CODEC_MPEG4_VBR:
+		return "MPEG4_VBR";
+	case IMG_CODEC_MPEG4_CBR:
+		return "MPEG4_CBR";
+	case IMG_CODEC_H264_VCM:
+		return "H264_VCM";
+	default:
+		return "Undefined codec";
+	}
+}
+
+static inline void pnw_topaz_clearirq(struct drm_device *dev,
+				      uint32_t clear_topaz)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_GENERAL("TOPAZ: clear IRQ\n");
+	if (clear_topaz != 0)
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz, 0);
+
+	/* PSB_WVDC32(_PNW_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
+}
+
+static inline uint32_t pnw_topaz_queryirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t val, /* iir, */ clear = 0;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val, 0);
+	/* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
+
+	(void) topaz_priv;
+
+	if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
+		PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
+		return 0;
+	}
+
+	PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
+
+	if (val & (1 << 31))
+		PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
+			      "sync seq: 0x%08x\n",
+			      dev_priv->sequence[LNC_ENGINE_ENCODE],
+			      *((uint32_t *)topaz_priv->topaz_mtx_wb + MTX_WRITEBACK_VALUE));
+	else
+		PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
+			      "sync seq: 0x%08x\n",
+			      dev_priv->sequence[LNC_ENGINE_ENCODE],
+			      *((uint32_t *)topaz_priv->topaz_mtx_wb + MTX_WRITEBACK_VALUE));
+
+	if (val & 0x8) {
+		uint32_t mmu_status, mmu_req;
+
+		TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status, 0);
+		TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req, 0);
+
+		PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
+			      "address=0x%08x,mem req=0x%08x\n",
+			      mmu_status, mmu_req);
+		clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
+	}
+
+	if (val & 0x4) {
+		PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
+		clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
+	}
+
+	if (val & 0x2) {
+		PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
+		clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
+	}
+
+	if (val & 0x1) {
+		PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
+		clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
+	}
+
+	return clear;
+}
+
+/*Set whether the write operation take effect on all cores
+ * or only the specific one*/
+static inline void topaz_set_mtx_target(struct drm_psb_private *dev_priv,
+					uint32_t core, uint8_t bTargetAll)
+{
+	TOPAZ_MULTICORE_WRITE32(
+		TOPAZSC_CR_MULTICORE_CORE_SEL_0,
+		F_ENCODE((bTargetAll), TOPAZSC_CR_WRITES_CORE_ALL) |
+		F_ENCODE(core, TOPAZSC_CR_DMAC_CORE_SELECT));
+
+}
+
+#endif	/* _PNW_TOPAZ_H_ */
diff --git a/drivers/external_drivers/intel_media/video/encode/pnw_topazinit.c b/drivers/external_drivers/intel_media/video/encode/pnw_topazinit.c
new file mode 100644
index 0000000..9255b3a
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/pnw_topazinit.c
@@ -0,0 +1,2232 @@
+/**
+ * file pnw_topazinit.c
+ * TOPAZ initialization and mtx-firmware upload
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *      Shengquan(Austin) Yuan <shengquan.yuan@intel.com>
+ *      Elaine Wang <elaine.wang@intel.com>
+ *      Li Zeng <li.zeng@intel.com>
+ **************************************************************************/
+
+/* NOTE: (READ BEFORE REFINE CODE)
+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
+ * measured by word to DMAC.
+ *
+ *
+ *
+ */
+
+/* include headers */
+
+/* #define DRM_DEBUG_CODE 2 */
+
+#include <linux/firmware.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "psb_drv.h"
+#include "pnw_topaz.h"
+#include "psb_powermgmt.h"
+#include "pnw_topaz_hw_reg.h"
+
+#ifdef CONFIG_MDFD_GL3
+#include "mdfld_gl3.h"
+#endif
+
+/* WARNING: this define is very important */
+#define RAM_SIZE (1024 * 24)
+#define MAX_TOPAZ_DATA_SIZE (12 * 4096)
+#define MAX_TOPAZ_TEXT_SIZE (12 * 4096)
+#define MAX_TOPAZ_RAM_SIZE (12 * 4096)
+
+#define	MEMORY_ONLY 0
+#define	MEM_AND_CACHE 1
+#define CACHE_ONLY 2
+
+#define FIRMWARE_NAME "topazsc_fw.bin"
+
+extern int drm_psb_msvdx_tiling;
+/* static function define */
+static int topaz_upload_fw(struct drm_device *dev,
+			   enum drm_pnw_topaz_codec codec,
+			   uint32_t core_id);
+
+#define UPLOAD_FW_BY_DMA 1
+
+#if UPLOAD_FW_BY_DMA
+static int topaz_dma_transfer(struct drm_psb_private *dev_priv,
+			      uint32_t channel, uint32_t src_phy_addr,
+			      uint32_t offset, uint32_t dst_addr,
+			      uint32_t byte_num, uint32_t is_increment,
+			      uint32_t is_write);
+#else
+static void topaz_mtx_upload_by_register(struct drm_device *dev,
+		uint32_t mtx_mem, uint32_t addr,
+		uint32_t size,
+		struct ttm_buffer_object *buf,
+		uint32_t core);
+#endif
+
+static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv,
+				      uint32_t core);
+static void release_mtx_control_from_dash(struct drm_psb_private *dev_priv,
+		uint32_t core);
+static void pnw_topaz_mmu_hwsetup(struct drm_psb_private *dev_priv,
+				  uint32_t core_id);
+static int  mtx_dma_read(struct drm_device *dev, uint32_t core,
+			 uint32_t source_addr, uint32_t size);
+static int  mtx_dma_write(struct drm_device *dev,
+			  uint32_t core);
+static void pnw_topaz_restore_bias_table(struct drm_psb_private *dev_priv,
+		int core);
+
+/* Reset the encode system buffer registers.*/
+static int pnw_topazsc_reset_ESB(struct drm_psb_private *dev_priv, int core_id)
+{
+	int x_pos, y_pos, i;
+
+	MVEA_WRITE32(MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP(ESB_HWSYNC),
+		     MVEASETUPESBREGION(0, 0,
+					TOPAZSC_ESB_REGION_HEIGH,
+					TOPAZSC_ESB_REGION_WIDTH,
+					TOPAZSC_ESB_REGION_HEIGH,
+					REGION_TYPE_LINEAR),
+		     core_id);
+	MVEA_WRITE32(MVEA_CR_CMC_PROC_ESB_ACCESS, ESB_HWSYNC, core_id);
+
+	i = 0;
+	for (y_pos = 0; y_pos < TOPAZSC_ESB_REGION_Y_MAX; y_pos++) {
+		for (x_pos = 0; x_pos < TOPAZSC_ESB_REGION_X_MAX;
+		     x_pos += 4, i += 4) {
+			MVEA_ESB_WRITE32(i, 0, core_id);
+		}
+	}
+
+	MVEA_WRITE32(MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP(ESB_HWSYNC),
+		     MVEASETUPESBREGION(0, TOPAZSC_ESB_REGION_WIDTH,
+					TOPAZSC_ESB_REGION_HEIGH,
+					TOPAZSC_ESB_REGION_WIDTH,
+					TOPAZSC_ESB_REGION_HEIGH,
+					REGION_TYPE_LINEAR),
+		     core_id);
+
+	i = 0;
+	for (y_pos = 0; y_pos < TOPAZSC_ESB_REGION_Y_MAX; y_pos++) {
+		for (x_pos = 0; x_pos < TOPAZSC_ESB_REGION_X_MAX;
+		     x_pos += 4, i += 4) {
+			MVEA_ESB_WRITE32(i, 0, core_id);
+		}
+	}
+
+	MVEA_WRITE32(MVEA_CR_CMC_PROC_ESB_ACCESS, 0, core_id);
+	return 0;
+}
+
+int pnw_error_dump_reg(struct drm_psb_private *dev_priv, int core_id)
+{
+	uint32_t reg_val;
+	int i;
+	DRM_ERROR("DMA Register value dump:\n");
+	for (i = 0; i < 8; i++) {
+		DMAC_READ32(i * 4, &reg_val);
+		DRM_ERROR("DMAC REG%d: 0x%08x\n", i, reg_val);
+	}
+	TOPAZ_MULTICORE_READ32(
+		TOPAZSC_CR_MULTICORE_CORE_SEL_0, &reg_val);
+	DRM_ERROR("TOPAZSC_CR_MULTICORE_CORE_SEL_0 0x%08x\n", reg_val);
+	MTX_READ32(MTX_CR_MTX_SYSC_CDMAA, &reg_val, core_id);
+	DRM_ERROR("MTX_CR_MTX_SYSC_CDMAA 0x%08x\n", reg_val);
+	MTX_READ32(MTX_CR_MTX_SYSC_CDMAC, &reg_val, core_id);
+	DRM_ERROR("MTX_CR_MTX_SYSC_CDMAC 0x%08x\n", reg_val);
+
+	MTX_READ32(MTX_CR_MTX_SYSC_CDMAS0 , &reg_val, core_id);
+	DRM_ERROR("MTX_CR_MTX_SYSC_CDMAS0 0x%08x\n", reg_val);
+	MTX_READ32(MTX_CR_MTX_SYSC_CDMAS1, &reg_val, core_id);
+	DRM_ERROR("MTX_CR_MTX_SYSC_CDMAS1 0x%08x\n", reg_val);
+	MTX_READ32(MTX_CR_MTX_SYSC_CDMAT, &reg_val, core_id);
+	DRM_ERROR("MTX_CR_MTX_SYSC_CDMAT 0x%08x\n", reg_val);
+	for (i = 0; i < 6; i++) {
+		TOPAZ_READ32(0x1c + i * 4, &reg_val, core_id);
+		DRM_ERROR("MMU REG %d value 0x%08x\n", i, reg_val);
+	}
+
+	topaz_read_core_reg(dev_priv, core_id, TOPAZ_MTX_PC, &reg_val);
+	DRM_ERROR("PC pointer: 0x%08x\n", reg_val);
+
+	TOPAZ_MULTICORE_READ32(TOPAZSC_CR_MULTICORE_CMD_FIFO_1,
+			&reg_val);
+	reg_val &= MASK_TOPAZSC_CR_CMD_FIFO_SPACE;
+	DRM_ERROR("TOPAZSC: Free words in command FIFO %d\n", reg_val);
+
+	return 0;
+}
+
+int pnw_topaz_wait_for_register(struct drm_psb_private *dev_priv,
+				uint32_t addr, uint32_t value, uint32_t mask)
+{
+	uint32_t tmp;
+	uint32_t count = 10000;
+
+	/* # poll topaz register for certain times */
+	while (count) {
+		/* #.# read */
+		MM_READ32(addr, 0, &tmp);
+
+		if (value == (tmp & mask))
+			return 0;
+
+		/* #.# delay and loop */
+		PSB_UDELAY(100);/* derive from reference driver */
+		--count;
+	}
+
+	/* # now waiting is timeout, return 1 indicat failed */
+	/* XXX: testsuit means a timeout 10000 */
+
+	DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
+		  "actual 0x%08x (0x%08x & 0x%08x)\n",
+		  addr, value, tmp & mask, tmp, mask);
+
+	return -EBUSY;
+
+}
+
+static ssize_t psb_topaz_pmstate_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	struct drm_psb_private *dev_priv;
+	struct pnw_topaz_private *topaz_priv;
+	unsigned int pmstate;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (drm_dev == NULL)
+		return 0;
+
+	dev_priv = drm_dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+	pmstate = topaz_priv->pmstate;
+
+	pmstate = topaz_priv->pmstate;
+	spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
+	ret = snprintf(buf, 64, "%s, gating count 0x%08x\n",
+		       (pmstate == PSB_PMSTATE_POWERUP) ?
+		       "powerup" : "powerdown", topaz_priv->pm_gating_count);
+	spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
+
+	return ret;
+}
+
+static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
+
+
+/* this function finish the first part of initialization, the rest
+ * should be done in pnw_topaz_setup_fw
+ */
+int pnw_topaz_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	uint32_t core_id, core_rev;
+	int ret = 0, n;
+	bool is_iomem;
+	struct pnw_topaz_private *topaz_priv;
+	void *topaz_bo_virt;
+
+	PSB_DEBUG_GENERAL("TOPAZ: init topazsc data structures\n");
+	topaz_priv = kmalloc(sizeof(struct pnw_topaz_private), GFP_KERNEL);
+	if (topaz_priv == NULL)
+		return -1;
+
+	dev_priv->topaz_private = topaz_priv;
+	memset(topaz_priv, 0, sizeof(struct pnw_topaz_private));
+
+	/* get device --> drm_device --> drm_psb_private --> topaz_priv
+	 * for psb_topaz_pmstate_show: topaz_pmpolicy
+	 * if not pci_set_drvdata, can't get drm_device from device
+	 */
+	pci_set_drvdata(dev->pdev, dev);
+	if (device_create_file(&dev->pdev->dev,
+			       &dev_attr_topaz_pmstate))
+		DRM_ERROR("TOPAZ: could not create sysfs file\n");
+	topaz_priv->sysfs_pmstate = sysfs_get_dirent(
+					    dev->pdev->dev.kobj.sd,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+					    NULL,
+#endif
+					    "topaz_pmstate");
+
+
+	topaz_priv = dev_priv->topaz_private;
+	topaz_priv->dev = dev;
+	INIT_DELAYED_WORK(&topaz_priv->topaz_suspend_wq,
+			  &psb_powerdown_topaz);
+
+	/* # initialize comand topaz queueing [msvdx_queue] */
+	INIT_LIST_HEAD(&topaz_priv->topaz_queue);
+	/* # spin lock init? CHECK spin lock usage [msvdx_lock] */
+	spin_lock_init(&topaz_priv->topaz_lock);
+
+	/* # topaz status init. [msvdx_busy] */
+	topaz_priv->topaz_busy = 0;
+	/*Initial topaz_cmd_count should be larger than initial
+	 *writeback value*/
+	topaz_priv->topaz_cmd_count = 1;
+	topaz_priv->topaz_fw_loaded = 0;
+	/* FIXME: workaround since JPEG firmware is not ready */
+	topaz_priv->topaz_cur_codec = 0;
+	topaz_priv->topaz_hw_busy = 1;
+	/* # gain write back structure,we may only need 32+4=40DW */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(bdev, PAGE_SIZE,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+				       0, 0, 0, NULL, &(topaz_priv->topaz_bo));
+#else
+	ret = ttm_buffer_object_create(bdev, PAGE_SIZE,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+				       0, 0, NULL, &(topaz_priv->topaz_bo));
+#endif
+	if (ret != 0) {
+		DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
+		return -1;
+	}
+
+	ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
+			  topaz_priv->topaz_bo->num_pages,
+			  &topaz_priv->topaz_bo_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
+		ttm_bo_unref(&topaz_priv->topaz_bo);
+		return -1;
+	}
+
+	TOPAZ_READ32(TOPAZ_CR_TOPAZ_HW_CFG, &topaz_priv->topaz_num_cores, 0);
+
+	topaz_priv->topaz_num_cores = F_EXTRACT(topaz_priv->topaz_num_cores,
+						TOPAZ_CR_NUM_CORES_SUPPORTED);
+	PSB_DEBUG_GENERAL("TOPAZ: number of cores: %d\n",
+			  topaz_priv->topaz_num_cores);
+
+	if (topaz_priv->topaz_num_cores > TOPAZSC_NUM_CORES) {
+		topaz_priv->topaz_num_cores = TOPAZSC_NUM_CORES;
+		DRM_ERROR("TOPAZ: number of cores (%d) exceed "
+			  "TOPAZSC_NUM_CORES (%d)!\n",
+			  topaz_priv->topaz_num_cores, TOPAZSC_NUM_CORES);
+	}
+
+	for (n = 0; n <  MAX_TOPAZ_CORES; n++) {
+		topaz_priv->topaz_mtx_data_mem[n] = NULL;
+		topaz_priv->topaz_mtx_reg_state[n] = NULL;
+		topaz_priv->cur_mtx_data_size[n] = 0;
+		topaz_priv->topaz_fw[n].text = NULL;
+		topaz_priv->topaz_fw[n].data = NULL;
+	}
+
+	for (n = 0; n < topaz_priv->topaz_num_cores; n++) {
+		TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id, n);
+		TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev, n);
+
+		PSB_DEBUG_GENERAL("TOPAZ: core(%d), core_id(%x) core_rev(%x)\n",
+				  n, core_id, core_rev);
+
+		topaz_priv->topaz_mtx_reg_state[n] = kmalloc(TOPAZ_MTX_REG_SIZE,
+						     GFP_KERNEL);
+		if (topaz_priv->topaz_mtx_reg_state[n] == NULL) {
+			DRM_ERROR("TOPAZ: failed to allocate space "
+				  "for mtx register\n");
+			goto out;
+		}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		ret = ttm_buffer_object_create(bdev,
+					       MAX_TOPAZ_RAM_SIZE,
+					       ttm_bo_type_kernel,
+					       DRM_PSB_FLAG_MEM_MMU |
+					       TTM_PL_FLAG_NO_EVICT,
+					       0, 0, 0, NULL,
+					       &topaz_priv->topaz_mtx_data_mem[n]);
+#else
+		ret = ttm_buffer_object_create(bdev,
+					       MAX_TOPAZ_RAM_SIZE,
+					       ttm_bo_type_kernel,
+					       DRM_PSB_FLAG_MEM_MMU |
+					       TTM_PL_FLAG_NO_EVICT,
+					       0, 0, NULL,
+					       &topaz_priv->topaz_mtx_data_mem[n]);
+#endif
+		if (ret) {
+			DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
+				  "mtx data save of core (%d)\n", n);
+			goto out;
+		}
+
+	}
+
+	topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
+					     &is_iomem);
+	topaz_priv->topaz_mtx_wb = (uint32_t *) topaz_bo_virt;
+	topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
+	topaz_priv->topaz_sync_addr = (uint32_t *)(topaz_bo_virt
+				      + 2048);
+	topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
+					+ 2048;
+
+	PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack\n");
+	PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
+			  topaz_priv->topaz_wb_offset);
+	PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
+			  topaz_priv->topaz_sync_offset);
+
+	/*topaz_cmd_count starts with 1. Reset writback value with 0*/
+	memset((void *)(topaz_priv->topaz_mtx_wb), 0,
+	       topaz_priv->topaz_num_cores
+	       * MTX_WRITEBACK_DATASIZE_ROUND);
+	memset((void *)topaz_priv->topaz_sync_addr, 0,
+	       MTX_WRITEBACK_DATASIZE_ROUND);
+
+	/*fence sequence number starts with 0. Reset sync seq with ~1*/
+	*(topaz_priv->topaz_sync_addr) = ~0;
+
+	pnw_topaz_mmu_flushcache(dev_priv);
+
+	/* # set up MMU */
+	for (n = 0; n < topaz_priv->topaz_num_cores; n++)
+		pnw_topaz_mmu_hwsetup(dev_priv, n);
+
+
+	for (n = 0; n < topaz_priv->topaz_num_cores; n++) {
+		/* # reset topaz */
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+			     |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+			     n);
+
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+			     |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+			     n);
+	}
+
+	PSB_DEBUG_GENERAL("TOPAZ: Reset MVEA successfully.\n");
+
+	/* create firmware storage */
+	for (n = 0; n < PNW_TOPAZ_CODEC_NUM_MAX * 2; ++n) {
+		/* #.# malloc DRM object for fw storage */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		ret = ttm_buffer_object_create(bdev, MAX_TOPAZ_TEXT_SIZE,
+					       ttm_bo_type_kernel,
+					       DRM_PSB_FLAG_MEM_MMU |
+					       TTM_PL_FLAG_NO_EVICT,
+					       0, 0, 0, NULL,
+					       &topaz_priv->topaz_fw[n].text);
+#else
+		ret = ttm_buffer_object_create(bdev, MAX_TOPAZ_TEXT_SIZE,
+					       ttm_bo_type_kernel,
+					       DRM_PSB_FLAG_MEM_MMU |
+					       TTM_PL_FLAG_NO_EVICT,
+					       0, 0, NULL,
+					       &topaz_priv->topaz_fw[n].text);
+#endif
+		if (ret) {
+			DRM_ERROR("Failed to allocate firmware.\n");
+			goto out;
+		}
+
+		/* #.# malloc DRM object for fw storage */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		ret = ttm_buffer_object_create(bdev, MAX_TOPAZ_DATA_SIZE,
+					       ttm_bo_type_kernel,
+					       DRM_PSB_FLAG_MEM_MMU |
+					       TTM_PL_FLAG_NO_EVICT,
+					       0, 0, 0, NULL,
+					       &topaz_priv->topaz_fw[n].data);
+#else
+		ret = ttm_buffer_object_create(bdev, MAX_TOPAZ_DATA_SIZE,
+					       ttm_bo_type_kernel,
+					       DRM_PSB_FLAG_MEM_MMU |
+					       TTM_PL_FLAG_NO_EVICT,
+					       0, 0, NULL,
+					       &topaz_priv->topaz_fw[n].data);
+#endif
+		if (ret) {
+			DRM_ERROR("Failed to allocate firmware.\n");
+			goto out;
+		}
+	}
+
+	PSB_DEBUG_GENERAL("TOPAZ:old clock gating reg = 0x%08x\n",
+		       PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
+	/* Disable vec auto clockgating due to random encoder HW hang
+	   during 1080p video recording */
+	PSB_DEBUG_INIT("TOPAZ:reset to disable clock gating\n");
+
+	PSB_WVDC32(VEC_CG_DIS_MASK, PSB_TOPAZ_CLOCKGATING);
+
+	PSB_DEBUG_GENERAL("TOPAZ: Exit initialization\n");
+	return 0;
+
+out:
+	for (n = 0; n < PNW_TOPAZ_CODEC_NUM_MAX * 2; ++n) {
+		if (topaz_priv->topaz_fw[n].text != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
+		if (topaz_priv->topaz_fw[n].data != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
+	}
+
+	for (n = 0; n < MAX_TOPAZ_CORES; n++) {
+		if (topaz_priv->topaz_mtx_data_mem[n] != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem[n]);
+		if (topaz_priv->topaz_mtx_reg_state[n] != NULL)
+			kfree(topaz_priv->topaz_mtx_reg_state[n]);
+	}
+
+	return ret;
+}
+
+int pnw_topaz_uninit(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int n;
+
+	/* flush MMU */
+	PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
+	/* pnw_topaz_mmu_flushcache (dev_priv); */
+
+	if (NULL == topaz_priv) {
+		DRM_ERROR("TOPAZ: topaz_priv is NULL!\n");
+		return -1;
+	}
+
+	/* # reset TOPAZ chip */
+	pnw_topaz_reset(dev_priv);
+
+	/* release resources */
+	/* # release write back memory */
+	topaz_priv->topaz_mtx_wb = NULL;
+
+	for (n = 0; n < topaz_priv->topaz_num_cores; n++) {
+		/* release mtx register save space */
+		kfree(topaz_priv->topaz_mtx_reg_state[n]);
+
+		/* release mtx data memory save space */
+		if (topaz_priv->topaz_mtx_data_mem[n])
+			ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem[n]);
+
+		kfree(topaz_priv->topaz_bias_table[n]);
+	}
+	/* # release firmware storage */
+	for (n = 0; n < PNW_TOPAZ_CODEC_NUM_MAX * 2; ++n) {
+		if (topaz_priv->topaz_fw[n].text != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
+		if (topaz_priv->topaz_fw[n].data != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
+	}
+
+	ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
+	ttm_bo_unref(&topaz_priv->topaz_bo);
+
+	if (topaz_priv) {
+		pci_set_drvdata(dev->pdev, NULL);
+		device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
+		sysfs_put(topaz_priv->sysfs_pmstate);
+		topaz_priv->sysfs_pmstate = NULL;
+
+		kfree(topaz_priv);
+		dev_priv->topaz_private = NULL;
+	}
+
+	return 0;
+}
+
+int pnw_topaz_reset(struct drm_psb_private *dev_priv)
+{
+	struct pnw_topaz_private *topaz_priv;
+	uint32_t i;
+
+	topaz_priv = dev_priv->topaz_private;
+	topaz_priv->topaz_busy = 0;
+	topaz_priv->topaz_cmd_count = 0;
+	for (i = 0; i < MAX_TOPAZ_CORES; i++)
+		topaz_priv->cur_mtx_data_size[i] = 0;
+	topaz_priv->topaz_needs_reset = 0;
+
+	memset((void *)(topaz_priv->topaz_mtx_wb), 0,
+	       MAX_TOPAZ_CORES * MTX_WRITEBACK_DATASIZE_ROUND);
+
+	for (i = 0; i < topaz_priv->topaz_num_cores; i++) {
+		/* # reset topaz */
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+			     |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+			     i);
+
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+			     |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+			     i);
+	}
+
+	/* # set up MMU */
+	for (i = 0; i < topaz_priv->topaz_num_cores; i++)
+		pnw_topaz_mmu_hwsetup(dev_priv, i);
+
+	return 0;
+}
+
+
+/* read firmware bin file and load all data into driver */
+int pnw_topaz_init_fw(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct firmware *raw = NULL;
+	unsigned char *ptr;
+	int ret = 0;
+	int n;
+	struct topazsc_fwinfo *cur_fw;
+	size_t cur_size, total_size;
+	struct pnw_topaz_codec_fw *cur_codec;
+	struct ttm_buffer_object **cur_drm_obj;
+	struct ttm_bo_kmap_obj tmp_kmap;
+	bool is_iomem;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	topaz_priv->stored_initial_qp = 0;
+
+	/* # get firmware */
+	ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
+	if (ret != 0) {
+		DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if ((NULL == raw) || (raw->size < sizeof(struct topazsc_fwinfo))) {
+		DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	total_size = raw->size;
+	PSB_DEBUG_GENERAL("TOPAZ: opened firmware, size %zu\n", raw->size);
+
+	if (total_size > (PNW_TOPAZ_CODEC_NUM_MAX * 2 *
+			(MAX_TOPAZ_DATA_SIZE + MAX_TOPAZ_TEXT_SIZE))) {
+		DRM_ERROR("%s firmwae size(%zu) isn't correct\n",
+				__func__, total_size);
+	}
+	ptr = (unsigned char *) raw->data;
+
+	if (!ptr) {
+		DRM_ERROR("TOPAZ: failed to load firmware.\n");
+		goto out;
+	}
+
+	/* # load fw from file */
+	PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
+	cur_fw = NULL;
+	for (n = 0; n < PNW_TOPAZ_CODEC_NUM_MAX * 2; ++n) {
+		if (total_size < sizeof(struct topazsc_fwinfo)) {
+			PSB_DEBUG_GENERAL("TOPAZ: WARNING: Rearch end of "
+					  "firmware. Have loaded %d firmwares.",
+					  n);
+			break;
+		}
+
+		total_size -=  sizeof(struct topazsc_fwinfo);
+		cur_fw = (struct topazsc_fwinfo *) ptr;
+		if (cur_fw->codec > PNW_TOPAZ_CODEC_NUM_MAX * 2) {
+			DRM_ERROR("%s L%d unknown video codec(%d)",
+					__func__, __LINE__,
+					cur_fw->codec);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
+		cur_codec->ver = cur_fw->ver;
+		cur_codec->codec = cur_fw->codec;
+		cur_codec->text_size = cur_fw->text_size;
+		cur_codec->data_size = cur_fw->data_size;
+		cur_codec->data_location = cur_fw->data_location;
+
+		if (total_size < (cur_fw->text_size + cur_fw->data_size) ||
+			       cur_fw->text_size > MAX_TOPAZ_TEXT_SIZE	||
+			       cur_fw->data_size > MAX_TOPAZ_DATA_SIZE) {
+			PSB_DEBUG_GENERAL("TOPAZ: WARNING: wrong size number" \
+					"of data(%d) or text(%d). Have loaded" \
+					" %d firmwares.", n,
+					  cur_fw->data_size,
+					  cur_fw->text_size);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		total_size -= cur_fw->text_size;
+		total_size -= cur_fw->data_size;
+
+		PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
+				  codec_to_string(cur_fw->codec / 2));
+
+		/* #.# handle text section */
+		ptr += sizeof(struct topazsc_fwinfo);
+		cur_drm_obj = &cur_codec->text;
+		cur_size = cur_fw->text_size;
+
+		/* #.# fill DRM object with firmware data */
+		ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
+				  &tmp_kmap);
+		if (ret) {
+			PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
+			ttm_bo_unref(cur_drm_obj);
+			*cur_drm_obj = NULL;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (cur_size > ((*cur_drm_obj)->num_pages << PAGE_SHIFT)) {
+			DRM_ERROR("%s L%d data size(%zu) is bigger than" \
+					" BO size(%lu pages)\n",
+					__func__, __LINE__,
+					cur_size,
+					(*cur_drm_obj)->num_pages);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		PSB_DEBUG_GENERAL("\ttext_size: %d, "
+				  "data_size %d, data_location 08%x\n",
+				  cur_codec->text_size,
+				  cur_codec->data_size,
+				  cur_codec->data_location);
+		memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
+		       cur_size);
+
+		ttm_bo_kunmap(&tmp_kmap);
+
+		/* #.# handle data section */
+		ptr += cur_fw->text_size;
+		cur_drm_obj = &cur_codec->data;
+		cur_size = cur_fw->data_size;
+
+		if (cur_size > ((*cur_drm_obj)->num_pages << PAGE_SHIFT)) {
+			DRM_ERROR("%s L%d data size(%zu) is bigger than" \
+					" BO size(%lu pages)\n",
+					__func__, __LINE__,
+					cur_size,
+					(*cur_drm_obj)->num_pages);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		/* #.# fill DRM object with firmware data */
+		ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
+				  &tmp_kmap);
+		if (ret) {
+			PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
+			ttm_bo_unref(cur_drm_obj);
+			*cur_drm_obj = NULL;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
+		       cur_size);
+
+		ttm_bo_kunmap(&tmp_kmap);
+
+		/* #.# update ptr */
+		ptr += cur_fw->data_size;
+	}
+
+	release_firmware(raw);
+	PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
+
+	return 0;
+
+out:
+	if (raw) {
+		PSB_DEBUG_GENERAL("release firmware....\n");
+		release_firmware(raw);
+	}
+
+	return ret;
+}
+
+/* setup fw when start a new context */
+int pnw_topaz_setup_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t verify_pc;
+	int core_id;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int i, ret = 0;
+
+	/*Reset TopazSC*/
+	PSB_DEBUG_GENERAL("TOPAZ: should reset topaz when context change\n");
+
+	if (topaz_priv->topaz_num_cores > MAX_TOPAZ_CORES) {
+		DRM_ERROR("TOPAZ: Invalid core nubmer %d\n",
+			  topaz_priv->topaz_num_cores);
+		return -EINVAL;
+	}
+
+	PSB_DEBUG_GENERAL("TOPAZ: Set up mmu for all %d cores\n",
+			  topaz_priv->topaz_num_cores);
+
+	for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++)
+		pnw_topaz_mmu_hwsetup(dev_priv, core_id);
+
+	/* # reset MVEA */
+	for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++) {
+		pnw_topazsc_reset_ESB(dev_priv, core_id);
+
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+			     |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+			     F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+			     core_id);
+
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+			     |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+			     F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+			     core_id);
+	}
+
+	psb_irq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+
+	PSB_DEBUG_GENERAL("TOPAZ: will setup firmware ....\n");
+
+	/*start each MTX in turn MUST start with master to
+	 * enable comms to other cores*/
+	for (core_id = topaz_priv->topaz_num_cores - 1;
+	     core_id >= 0; core_id--) {
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+		/* # reset mtx */
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
+			      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET)
+			      |
+			      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
+			      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET),
+			      core_id);
+
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0, core_id);
+
+		/* # upload fw by drm */
+		PSB_DEBUG_GENERAL("TOPAZ: will upload firmware to %d cores\n",
+				  topaz_priv->topaz_num_cores);
+
+		topaz_upload_fw(dev, codec, core_id);
+
+		PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
+
+		/* D0.5, D0.6 and D0.7 */
+		for (i = 5; i < 8; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x1 | (i << 4),
+					     0);
+		}
+		/* Saves 8 Registers of D1 Bank  */
+		/* D1.5, D1.6 and D1.7 */
+		for (i = 5; i < 8; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x2 | (i << 4),
+					     0);
+		}
+
+		PSB_DEBUG_GENERAL("TOPAZ: setting up pc address: 0x%08x"
+				  "for core (%d)\n",
+				  PC_START_ADDRESS, core_id);
+		topaz_write_core_reg(dev_priv, core_id,
+				     TOPAZ_MTX_PC, PC_START_ADDRESS);
+
+		topaz_read_core_reg(dev_priv, core_id,
+				    TOPAZ_MTX_PC, &verify_pc);
+
+		PSB_DEBUG_GENERAL("TOPAZ: verify pc address for core"
+				  " (%d):0x%08x\n",
+				  core_id, verify_pc);
+
+		/* enable auto clock is essential for this driver */
+		TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
+			      F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
+			      F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE),
+			      core_id);
+		MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
+			     F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
+			     F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
+			     F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
+			     F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE),
+			     core_id);
+
+		/* flush the command FIFO - only has effect on master MTX */
+		if (core_id == 0)
+			TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_CMD_FIFO_2,
+				      F_ENCODE(1, TOPAZ_CR_CMD_FIFO_FLUSH),
+				      0);
+
+		/* clear MTX interrupt */
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
+			      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX),
+			      core_id);
+
+		/* put the number of cores in use in the scratch register
+		 * so is is ready when the firmware wakes up. */
+		TOPAZ_WRITE32(TOPAZ_CR_FIRMWARE_REG_1 +
+				(MTX_SCRATCHREG_TOMTX << 2),
+				topaz_priv->topaz_num_cores, core_id);
+
+		/* # turn on MTX */
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+		MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
+			    MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK,
+			    core_id);
+
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+		MTX_WRITE32(MTX_CR_MTX_KICK, 1, core_id);
+	}
+
+	topaz_priv->topaz_cmd_count = 1;
+	/* # poll on the interrupt which the firmware will generate */
+	/*With DDKv186, interrupt would't be generated automatically after
+	 * firmware set up*/
+	PSB_DEBUG_GENERAL("TOPAZ: send NULL command to test firmware\n");
+	for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++) {
+		pnw_topaz_kick_null_cmd(dev_priv, core_id,
+					topaz_priv->topaz_sync_offset,
+					topaz_priv->topaz_cmd_count++, 0);
+
+		ret = pnw_wait_on_sync(dev_priv,
+				       topaz_priv->topaz_cmd_count - 1,
+				       topaz_priv->topaz_sync_addr
+				       + MTX_WRITEBACK_VALUE);
+		if (0 != ret) {
+			DRM_ERROR("TOPAZ: Failed to upload firmware for codec"
+				  " %d!\n",
+				  codec);
+			pnw_error_dump_reg(dev_priv, core_id);
+			return -EBUSY;
+		}
+
+		*(topaz_priv->topaz_sync_addr + MTX_WRITEBACK_VALUE)
+		= TOPAZ_FIRMWARE_MAGIC;
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
+			      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX),
+			      core_id);
+	}
+
+
+	PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
+
+	memset((void *)(topaz_priv->topaz_mtx_wb),
+	       0,
+	       topaz_priv->topaz_num_cores
+	       * MTX_WRITEBACK_DATASIZE_ROUND);
+
+	PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
+
+	topaz_priv->topaz_busy = 0;
+
+	for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++) {
+		MVEA_WRITE32(MVEA_CR_BUFFER_SIDEBAND,
+		     F_ENCODE(CACHE_ONLY, MVEA_CR_ABOVE_PARAM_OUT_SBAND) |
+		     F_ENCODE(CACHE_ONLY, MVEA_CR_BELOW_PARAM_OUT_SBAND) |
+		     F_ENCODE(MEM_AND_CACHE, MVEA_CR_ABOVE_PIX_OUT_SBAND) |
+		     F_ENCODE(MEM_AND_CACHE, MVEA_CR_RECON_SBAND) |
+		     F_ENCODE(CACHE_ONLY, MVEA_CR_REF_SBAND) |
+		     F_ENCODE(CACHE_ONLY, MVEA_CR_ABOVE_PARAM_IN_SBAND) |
+		     F_ENCODE(CACHE_ONLY, MVEA_CR_BELOW_PARAM_IN_SBAND) |
+		     F_ENCODE(MEMORY_ONLY, MVEA_CR_CURR_PARAM_SBAND) |
+		     F_ENCODE(CACHE_ONLY, MVEA_CR_ABOVE_PIX_IN_SBAND) |
+		     F_ENCODE(MEMORY_ONLY, MVEA_CR_CURR_MB_SBAND),
+		     core_id);
+		MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 3 - 1, core_id);
+
+		/*setup the jitter, base it on image size (using the height)*/
+		if (topaz_priv->frame_h >= 720)
+			MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 3 - 1,
+					core_id);
+		else if (topaz_priv->frame_w >= 480)
+			MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 2 - 1,
+					core_id);
+		else
+			MVEA_WRITE32(MVEA_CR_IPE_JITTER_FACTOR, 3 - 1,
+					core_id);
+	}
+
+	psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+	pnw_topaz_enableirq(dev);
+
+	return 0;
+}
+
+#if UPLOAD_FW_BY_DMA
+int topaz_upload_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec, uint32_t core_id)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct pnw_topaz_codec_fw *cur_codec_fw;
+	uint32_t text_size, data_size;
+	uint32_t data_location;
+	uint32_t cur_mtx_data_size;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int ret = 0;
+
+	if (codec >= PNW_TOPAZ_CODEC_NUM_MAX) {
+		DRM_ERROR("TOPAZ: Invalid codec %d\n", codec);
+		return -EINVAL;
+	}
+
+	/* # MTX reset */
+	PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
+	MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
+		    MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK,
+		    core_id);
+
+	/* # upload the master and slave firmware by DMA */
+	if (core_id == 0)
+		cur_codec_fw = &topaz_priv->topaz_fw[codec * 2];
+	else
+		cur_codec_fw = &topaz_priv->topaz_fw[codec*2 + 1];
+
+	PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d\n"
+			  "data location(0x%08x) to core(%d).\n",
+			  codec_to_string(codec), codec,
+			  cur_codec_fw->text_size, cur_codec_fw->data_size,
+			  cur_codec_fw->data_location, core_id);
+
+	/* # upload text. text_size is byte size*/
+	text_size = cur_codec_fw->text_size / 4;
+	/* adjust transfer sizes of text and data sections to match burst size*/
+	text_size = ((text_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1))
+		     & ~(MTX_DMA_BURSTSIZE_BYTES - 1)) / 4;
+
+	PSB_DEBUG_GENERAL("TOPAZ: text_size round up to %d\n", text_size);
+	/* setup the MTX to start recieving data:
+	   use a register for the transfer which will point to the source
+	   (MTX_CR_MTX_SYSC_CDMAT) */
+	/*MTX burst size (4 * 2 * 32bits = 32bytes) should match DMA burst
+	  size (2 * 128bits = 32bytes) */
+	/* #.# fill the dst addr */
+
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, MTX_DMA_MEMORY_BASE, core_id);
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+		    F_ENCODE(4, MTX_BURSTSIZE) |
+		    F_ENCODE(0, MTX_RNW) |
+		    F_ENCODE(1, MTX_ENABLE) |
+		    F_ENCODE(text_size, MTX_LENGTH), core_id);
+
+	/* #.# set DMAC access to host memory via BIF (deserted)*/
+	/* TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, core_id);*/
+
+	/* #.# transfer the codec */
+	if (0 != topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
+				    MTX_CR_MTX_SYSC_CDMAT,
+				    text_size, core_id, 0)) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+
+	/* #.# wait dma finish */
+	ret = pnw_topaz_wait_for_register(dev_priv,
+					  DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret != 0) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+
+	/* #.# clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	PSB_DEBUG_GENERAL("TOPAZ: firmware text upload complete.\n");
+
+	/* # return access to topaz core (deserted)*/
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, core_id);*/
+
+	/* # upload data */
+	data_size = cur_codec_fw->data_size / 4;
+	data_size = ((data_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1))
+		     & ~(MTX_DMA_BURSTSIZE_BYTES - 1)) / 4;
+
+	data_location = cur_codec_fw->data_location;
+	PSB_DEBUG_GENERAL("TOPAZ: data_size round up to %d\n"
+			  "data_location round up to 0x%08x\n",
+			  data_size, data_location);
+	/* #.# fill the dst addr */
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
+		    data_location, core_id);
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+		    F_ENCODE(4, MTX_BURSTSIZE) |
+		    F_ENCODE(0, MTX_RNW) |
+		    F_ENCODE(1, MTX_ENABLE) |
+		    F_ENCODE(data_size, MTX_LENGTH), core_id);
+	/* #.# set DMAC access to host memory via BIF(deserted) */
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, core_id);*/
+
+	/* #.# transfer the codec */
+	if (0 != topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
+				    MTX_CR_MTX_SYSC_CDMAT, data_size, core_id, 0)) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+
+	/* #.# wait dma finish */
+	ret = pnw_topaz_wait_for_register(dev_priv,
+					  DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret != 0) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+	/* #.# clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	pnw_topaz_mmu_flushcache(dev_priv);
+	PSB_DEBUG_GENERAL("TOPAZ: firmware data upload complete.\n");
+	/* # return access to topaz core(deserted) */
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, core_id);*/
+
+	/* record this codec's mtx data size(+stack) for
+	 * context save & restore */
+	/* FIXME: since non-root sighting fixed by pre allocated,
+	 * only need to correct the buffer size
+	 */
+	if (core_id == 0)
+		cur_mtx_data_size =
+			TOPAZ_MASTER_FW_MAX - (cur_codec_fw->data_location - \
+					MTX_DMA_MEMORY_BASE);
+	else
+		cur_mtx_data_size =
+			TOPAZ_SLAVE_FW_MAX - (cur_codec_fw->data_location - \
+					MTX_DMA_MEMORY_BASE);
+	topaz_priv->cur_mtx_data_size[core_id] = cur_mtx_data_size / 4;
+	PSB_DEBUG_GENERAL("TOPAZ: Need to save %d words data for core %d\n",
+			cur_mtx_data_size / 4, core_id);
+
+	return 0;
+}
+
+#else
+/* This function is only for debug */
+void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
+				  uint32_t addr, uint32_t size,
+				  struct ttm_buffer_object *buf,
+				  uint32_t core)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t *buf_p;
+	uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
+	uint32_t cur_ram_id, ram_addr , ram_id;
+	int map_ret, lp;
+	struct ttm_bo_kmap_obj bo_kmap;
+	bool is_iomem;
+	uint32_t cur_addr, ui32Size;
+
+	PSB_DEBUG_GENERAL("TOPAZ: mtx upload: mtx_mem(0x%08x) addr(0x%08x)"
+			  "size(%d)\n", mtx_mem, addr, size);
+
+	get_mtx_control_from_dash(dev_priv, core);
+
+	map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
+	if (map_ret) {
+		DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
+		return;
+	}
+	buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
+
+
+	TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg, core);
+	debug_reg = 0x0a0a0600;
+	/*bank_size = (debug_reg & 0xf0000) >> 16;
+	  bank_ram_size = 1 << (bank_size + 2);*/
+
+	/*Bank size 4096, BanK number 6, Totally ram size:24k*/
+	ui32Size = 0x1 << (F_EXTRACT(debug_reg,
+				     TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE) + 2);
+	/* all other banks */
+	bank_size = 0x1 << (F_EXTRACT(debug_reg,
+				      TOPAZ_CR_MTX_RAM_BANK_SIZE) + 2);
+	/* total RAM size */
+	bank_ram_size = ui32Size + (bank_size *
+				    (F_EXTRACT(debug_reg, TOPAZ_CR_MTX_RAM_BANKS) - 1));
+
+	bank_count = (debug_reg & 0xf00) >> 8;
+
+	PSB_DEBUG_GENERAL("TOPAZ: bank size %d, bank count %d, ram size %d\n",
+			  bank_size, bank_count, bank_ram_size);
+
+	pnw_topaz_wait_for_register(dev_priv,
+				    REG_START_TOPAZ_MTX_HOST(core)
+				    + MTX_CR_MTX_RAM_ACCESS_STATUS,
+				    MASK_MTX_MTX_MTX_MCM_STAT,
+				    MASK_MTX_MTX_MTX_MCM_STAT);
+
+	cur_ram_id = -1;
+	cur_addr = addr;
+	for (lp = 0; lp < size / 4; ++lp) {
+		ram_id = mtx_mem + (cur_addr / bank_size);
+
+		if (cur_ram_id != ram_id) {
+			ram_addr = cur_addr >> 2;
+
+			MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
+				    F_ENCODE(ram_id, MTX_MTX_MCMID) |
+				    F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
+				    F_ENCODE(1, MTX_MTX_MCMAI),
+				    core);
+
+			cur_ram_id = ram_id;
+		}
+		cur_addr += 4;
+
+		MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER,
+			    *(buf_p + lp), core);
+
+		pnw_topaz_wait_for_register(dev_priv,
+					    MTX_CR_MTX_RAM_ACCESS_STATUS
+					    + REG_START_TOPAZ_MTX_HOST(core),
+					    MASK_MTX_MTX_MTX_MCM_STAT,
+					    MASK_MTX_MTX_MTX_MCM_STAT);
+	}
+
+	release_mtx_control_from_dash(dev_priv, core);
+	ttm_bo_kunmap(&bo_kmap);
+
+	PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
+	return;
+}
+
+/* This function is only for debug when DMA isn't working */
+int topaz_upload_fw(struct drm_device *dev, enum drm_pnw_topaz_codec codec,
+		    uint32_t core_id)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct pnw_topaz_codec_fw *cur_codec_fw;
+	uint32_t text_size, data_size;
+	uint32_t data_location;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	if (codec >= PNW_TOPAZ_CODEC_NUM_MAX) {
+		DRM_ERROR("TOPAZ: Invalid codec %d\n", codec);
+		return -EINVAL;
+	}
+
+	/* # refer HLD document */
+	/* # MTX reset */
+	PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
+	MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
+		    MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK,
+		    core_id);
+
+	/* # upload the master and slave firmware by DMA */
+	if (core_id == 0)
+		cur_codec_fw = &topaz_priv->topaz_fw[codec * 2];
+	else
+		cur_codec_fw = &topaz_priv->topaz_fw[codec * 2 + 1];
+
+	PSB_DEBUG_GENERAL("Topaz:upload codec by MTX reg %s(%d)"
+			  " text sz=%d data sz=%d"
+			  " data location(%d) to core(%d).\n",
+			  codec_to_string(codec), codec,
+			  cur_codec_fw->text_size, cur_codec_fw->data_size,
+			  cur_codec_fw->data_location, core_id);
+
+	/* # upload text */
+	text_size = cur_codec_fw->text_size;
+
+	topaz_mtx_upload_by_register(dev, MTX_CORE_CODE_MEM,
+				     0,
+				     /*PC_START_ADDRESS - MTX_MEMORY_BASE,*/
+				     text_size, cur_codec_fw->text, core_id);
+
+	/* # upload data */
+	data_size = cur_codec_fw->data_size;
+	data_location = cur_codec_fw->data_location;
+
+	topaz_mtx_upload_by_register(dev, MTX_CORE_DATA_MEM,
+				     data_location - MTX_DMA_MEMORY_BASE, data_size,
+				     cur_codec_fw->data, core_id);
+
+	return 0;
+}
+
+#endif /* UPLOAD_FW_BY_DMA */
+
+/* is_increment is always 0, so use it as core_id for workaround*/
+int topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
+		       uint32_t src_phy_addr, uint32_t offset,
+		       uint32_t soc_addr, uint32_t byte_num,
+		       uint32_t is_increment, uint32_t is_write)
+{
+	uint32_t dmac_count;
+	uint32_t irq_stat;
+	uint32_t count;
+
+	PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
+	/* # check that no transfer is currently in progress and no
+	   interrupts are outstanding ?? (why care interrupt) */
+	DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
+	if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
+		DRM_ERROR("TOPAZ: there is tranfer in progress\n");
+		return -1;
+	}
+
+	/* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
+
+	/* clear status of any previous interrupts */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
+
+	/* check irq status */
+	DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
+	/* assert(0 == irq_stat); */
+	if (0 != irq_stat)
+		DRM_ERROR("TOPAZ: there is hold up\n");
+
+	/*MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, MTX_DMA_MEMORY_BASE, is_increment);
+	  MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+	  F_ENCODE(4, MTX_BURSTSIZE) |
+	  F_ENCODE(0, MTX_RNW) |
+	  F_ENCODE(1, MTX_ENABLE) |
+	  F_ENCODE(byte_num, MTX_LENGTH), is_increment);*/
+
+	/* per hold - allow HW to sort itself out */
+	DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 16);
+	/* clear previous interrupts */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
+
+	DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
+		     (src_phy_addr + offset));
+	count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
+				 is_write, DMAC_PWIDTH_32_BIT, byte_num);
+	/* generate an interrupt at the end of transfer */
+	/* count |= MASK_IMG_SOC_TRANSFER_IEN; */
+	/*count |= F_ENCODE(is_write, IMG_SOC_DIR);*/
+	DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
+
+	/* Burst : 2 * 128 bits = 32 bytes*/
+	DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
+		     DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
+					     0, DMAC_BURST_2));
+
+	/* is_increment here is actually core_id*/
+	DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel),
+		     MTX_CR_MTX_SYSC_CDMAT
+		     + REG_START_TOPAZ_MTX_HOST(is_increment));
+
+	/* Finally, rewrite the count register with
+	 * the enable bit set to kick off the transfer
+	 */
+	DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
+
+	PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
+
+	return 0;
+}
+
+void topaz_write_core_reg(struct drm_psb_private *dev_priv,
+			  uint32_t core,
+			  uint32_t reg,
+			  const uint32_t val)
+{
+	uint32_t tmp;
+	get_mtx_control_from_dash(dev_priv, core);
+
+	/* put data into MTX_RW_DATA */
+	MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val, core);
+
+	/* request a write */
+	tmp = reg &
+	      ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
+
+	MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
+		    tmp, core);
+
+	/* wait for operation finished */
+	pnw_topaz_wait_for_register(dev_priv,
+				    REG_START_TOPAZ_MTX_HOST(core) +
+				    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
+				    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
+				    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
+
+	release_mtx_control_from_dash(dev_priv, core);
+}
+
+void topaz_read_core_reg(struct drm_psb_private *dev_priv,
+			 uint32_t core,
+			 uint32_t reg,
+			 uint32_t *ret_val)
+{
+	uint32_t tmp;
+
+	get_mtx_control_from_dash(dev_priv, core);
+
+	/* request a write */
+	tmp = (reg &
+	       ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
+
+	MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
+		    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK
+		    | tmp, core);
+
+	/* wait for operation finished */
+	pnw_topaz_wait_for_register(dev_priv,
+				    REG_START_TOPAZ_MTX_HOST(core) +
+				    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
+				    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
+				    MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
+
+	/* read  */
+	MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
+		   ret_val, core);
+
+	release_mtx_control_from_dash(dev_priv, core);
+}
+
+void get_mtx_control_from_dash(struct drm_psb_private *dev_priv, uint32_t core)
+{
+	int debug_reg_slave_val;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int count = 0;
+
+	/* GetMTXControlFromDash */
+	TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
+		      F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
+		      F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT), core);
+	do {
+		TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
+			     &debug_reg_slave_val, core);
+		count++;
+	} while (((debug_reg_slave_val & 0x18) != 0) && count < 50000);
+
+	if (count >= 50000)
+		PSB_DEBUG_GENERAL("TOPAZ: timeout in "
+				  "get_mtx_control_from_dash\n");
+
+	/* save access control */
+	TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
+		     &topaz_priv->topaz_dash_access_ctrl, core);
+}
+
+void release_mtx_control_from_dash(struct drm_psb_private *dev_priv,
+				   uint32_t core)
+{
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	/* restore access control */
+	TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
+		      topaz_priv->topaz_dash_access_ctrl, core);
+
+	/* release bus */
+	TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
+		      F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE), core);
+}
+
+/* When width or height is bigger than 1280. Encode will
+   treat TTM_PL_TT buffers as tilied memory */
+#define PSB_TOPAZ_TILING_THRESHOLD (1280)
+void pnw_topaz_mmu_hwsetup(struct drm_psb_private *dev_priv, uint32_t core_id)
+{
+	uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	u32 val;
+	bool is_src_tiled = false;
+
+	PSB_DEBUG_GENERAL("TOPAZ: core (%d) MMU set up.\n", core_id);
+
+	/* bypass all request while MMU is being configured */
+	TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
+		      F_ENCODE(1, TOPAZ_CR_MMU_BYPASS)
+		      | F_ENCODE(1, TOPAZ_CR_MMU_BYPASS_DMAC), core_id);
+
+	/* set MMU hardware at the page table directory */
+	PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
+			  "into MMU_DIR_LIST0/1\n", pd_addr);
+	/*There's two of these (0) and (1).. only 0 is currently used*/
+	TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr, core_id);
+	/*TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0, core_id);*/
+
+	/* setup index register, all pointing to directory bank 0 */
+	TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0, core_id);
+
+	if ((topaz_priv->frame_w > PSB_TOPAZ_TILING_THRESHOLD) ||
+			(topaz_priv->frame_h > PSB_TOPAZ_TILING_THRESHOLD))
+		is_src_tiled = true;
+
+	if (drm_psb_msvdx_tiling && dev_priv->have_mem_mmu_tiling &&
+		is_src_tiled) {
+		uint32_t tile_start =
+			dev_priv->bdev.man[TTM_PL_TT].gpu_offset;
+		uint32_t tile_end =
+			dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].gpu_offset +
+			(dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].size
+			<< PAGE_SHIFT);
+
+		/* Enable memory tiling */
+		val = ((tile_start >> 20) + (((tile_end >> 20) - 1) << 12) +
+		((0x8 | 2) << 24)); /* 2k stride */
+
+		PSB_DEBUG_GENERAL("Topax: Set up MMU Tiling register %08x\n",
+					val);
+		TOPAZ_WRITE32(TOPAZ_CR_MMU_TILE_BASE0, val, core_id);
+	}
+
+	/* now enable MMU access for all requestors */
+	TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
+		      F_ENCODE(0, TOPAZ_CR_MMU_BYPASS)
+		      | F_ENCODE(0, TOPAZ_CR_MMU_BYPASS_DMAC), core_id);
+}
+
+void pnw_topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
+{
+	uint32_t mmu_control;
+
+	if (dev_priv->topaz_disabled)
+		return;
+
+	PSB_DEBUG_GENERAL("TOPAZ: pnw_topaz_mmu_flushcache\n");
+#if 0
+	PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
+			  " so flush using the master core\n");
+#endif
+	/* XXX: disable interrupt */
+	TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control, 0);
+	mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
+	/*mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);*/
+
+#if 0
+	PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
+			  "still operating afterwards even if not cleared,\n"
+			  "but may want to replace with MMU_FLUSH?\n");
+#endif
+	TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control, 0);
+
+	/* clear it */
+	mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
+	/* mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH)); */
+	TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control, 0);
+#ifdef CONFIG_MDFD_GL3
+	gl3_invalidate();
+#endif
+}
+
+
+static void pnw_topaz_restore_bias_table(struct drm_psb_private *dev_priv,
+		int core)
+{
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	u32 *p_command;
+	unsigned int reg_cnt, reg_off, reg_val;
+	u32 cmd_cnt;
+	u32 max_cmd_cnt;
+
+	if (core >= MAX_TOPAZ_CORES ||
+			topaz_priv->topaz_bias_table[core] == NULL) {
+		/*
+		 * If VEC D0i3 isn't enabled, the bias table won't be saved
+		 * in initialization. No need to restore.
+		 */
+		return;
+	}
+
+	/* First word is command header. Ignore */
+	p_command = (u32 *)(topaz_priv->topaz_bias_table[core]
+				+ sizeof(struct topaz_cmd_header));
+	/* Second word indicates how many register write command sets */
+	cmd_cnt = *p_command;
+	max_cmd_cnt = PNW_TOPAZ_BIAS_TABLE_MAX_SIZE -
+		sizeof(struct topaz_cmd_header);
+	max_cmd_cnt /= TOPAZ_WRITEREG_BYTES_PER_SET;
+	if (cmd_cnt > max_cmd_cnt) {
+		DRM_ERROR("%s the number of command sets(%d) is wrong\n",
+				__func__,
+				cmd_cnt);
+		return;
+	}
+	p_command++;
+
+	PSB_DEBUG_GENERAL("TOPAZ: Restore BIAS table(size %d) for core %d\n",
+			cmd_cnt,
+			core);
+
+	for (reg_cnt = 0; reg_cnt < cmd_cnt; reg_cnt++) {
+		reg_off = *p_command;
+		p_command++;
+		reg_val = *p_command;
+		p_command++;
+
+		if (reg_off > TOPAZ_BIASREG_MAX(core) ||
+				reg_off < TOPAZ_BIASREG_MIN(core))
+			DRM_ERROR("TOPAZ: Ignore write (0x%08x)"
+					" to register 0x%08x\n",
+					reg_val, reg_off);
+		else
+			MM_WRITE32(0, reg_off, reg_val);
+	}
+
+	return;
+}
+
+
+int pnw_topaz_restore_mtx_state(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	int32_t core_id;
+	uint32_t *mtx_reg_state;
+	int i, need_restore = 0;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+	unsigned long irq_flags;
+
+	if (!topaz_priv->topaz_mtx_saved)
+		return -1;
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if ((pos->ctx_type & 0xff) == VAEntrypointEncSlice ||
+			(pos->ctx_type & 0xff) == VAEntrypointEncPicture)
+			need_restore = 1;
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	if (0 == need_restore) {
+		topaz_priv->topaz_mtx_saved = 0;
+		PSB_DEBUG_GENERAL("topaz: no vec context found. needn't"
+				  " to restore mtx registers.\n");
+		return 0;
+	}
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		PSB_DEBUG_GENERAL("TOPAZ: needn't to restore context"
+				  " without firmware uploaded\n");
+		return 0;
+	}
+
+	if (topaz_priv->topaz_mtx_data_mem[0] == NULL) {
+		PSB_DEBUG_GENERAL("TOPAZ: try to restore context"
+				  " without space allocated, return"
+				  " directly without restore\n");
+		return -1;
+	}
+
+	/*TopazSC will be reset, no need to restore context.*/
+	if (topaz_priv->topaz_needs_reset) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Reset. No need to restore context\n");
+		topaz_priv->topaz_mtx_saved = 0;
+		return 0;
+	}
+
+	/*There is no need to restore context for JPEG encoding*/
+	if (PNW_IS_JPEG_ENC(topaz_priv->topaz_cur_codec)) {
+		if (pnw_topaz_setup_fw(dev, topaz_priv->topaz_cur_codec))
+			DRM_ERROR("TOPAZ: Setup JPEG firmware fails!\n");
+		topaz_priv->topaz_mtx_saved = 0;
+		return 0;
+	}
+
+	pnw_topaz_mmu_flushcache(dev_priv);
+
+	for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++)
+		pnw_topaz_mmu_hwsetup(dev_priv, core_id);
+	for (core_id = 0; core_id < topaz_priv->topaz_num_cores; core_id++) {
+		pnw_topazsc_reset_ESB(dev_priv, core_id);
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+				F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+				F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+				F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+				F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+				|
+				F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+				F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+				core_id);
+		MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
+				F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
+				F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
+				F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
+				F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET)
+				|
+				F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
+				F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET),
+				core_id);
+	}
+
+	for (core_id = topaz_priv->topaz_num_cores - 1;
+	     core_id >= 0; core_id--) {
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
+				F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET)
+				|
+				F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET) |
+				F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET) |
+				F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
+				F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET),
+				core_id);
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
+			0,
+			core_id);
+
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+		MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
+			MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK,
+			core_id);
+		MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
+			0, core_id);
+
+		/* # upload fw by drm */
+		PSB_DEBUG_GENERAL("TOPAZ: Restore text and data for core %d\n",
+				  core_id);
+		if (0 != mtx_dma_write(dev, core_id))
+			return -1;
+
+		pnw_topaz_mmu_flushcache(dev_priv);
+	}
+
+	for (core_id = topaz_priv->topaz_num_cores - 1;
+			core_id >= 0; core_id--) {
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+
+		mtx_reg_state = topaz_priv->topaz_mtx_reg_state[core_id];
+		/* restore register */
+		/* Saves 8 Registers of D0 Bank  */
+		/* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
+		for (i = 0; i < 8; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x1 | (i << 4),
+					     *mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 8 Registers of D1 Bank  */
+		/* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
+		for (i = 0; i < 8; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x2 | (i << 4),
+					     *mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 4 Registers of A0 Bank  */
+		/* A0StP, A0FrP, A0.2 and A0.3 */
+		for (i = 0; i < 4; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x3 | (i << 4),
+					     *mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 4 Registers of A1 Bank  */
+		/* A1GbP, A1LbP, A1.2 and A1.3 */
+		for (i = 0; i < 4; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x4 | (i << 4),
+					     *mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves PC and PCX  */
+		for (i = 0; i < 2; i++) {
+			topaz_write_core_reg(dev_priv, core_id, 0x5 | (i << 4),
+					     *mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 8 Control Registers */
+		/* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
+		 * TXGPIOO */
+		for (i = 0; i < 8; i++) {
+			topaz_write_core_reg(dev_priv, core_id,
+					0x7 | (i << 4),
+					*mtx_reg_state);
+			mtx_reg_state++;
+		}
+		COMMS_WRITE32(TOPAZ_COMMS_CR_STAT_1(0), \
+				*mtx_reg_state++, core_id);
+		COMMS_WRITE32(TOPAZ_COMMS_CR_STAT_0(0), \
+				*mtx_reg_state++, core_id);
+		COMMS_WRITE32(TOPAZ_COMMS_CR_MTX_STATUS(0), \
+				*mtx_reg_state++, core_id);
+		COMMS_WRITE32(TOPAZ_COMMS_CR_CMD_WB_VAL(0), \
+				*mtx_reg_state++, core_id);
+		COMMS_WRITE32(TOPAZ_COMMS_CR_CMD_WB_ADDR(0), \
+				*mtx_reg_state++, core_id);
+		COMMS_WRITE32(TOPAZ_COMMS_CR_CMD_DATA_ADDR(0), \
+				*mtx_reg_state++, core_id);
+		COMMS_WRITE32(TOPAZ_COMMS_CR_CMD_WORD(0), \
+				*mtx_reg_state++, core_id);
+
+		/* enable auto clock is essential for this driver */
+		TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
+			      F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
+			      F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE),
+			      core_id);
+		MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
+			     F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
+			     F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
+			     F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
+			     F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE),
+			     core_id);
+
+		topaz_read_core_reg(dev_priv, core_id,
+			TOPAZ_MTX_PC, &i);
+
+		PSB_DEBUG_GENERAL("TOPAZ: verify pc address for core"
+			" (%d):0x%08x\n",
+			core_id, i);
+
+		/* flush the command FIFO - only has effect on master MTX */
+		if (core_id == 0)
+			TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_CMD_FIFO_2,
+				      F_ENCODE(1, TOPAZ_CR_CMD_FIFO_FLUSH),
+				      0);
+
+		/* clear MTX interrupt */
+		TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
+			      F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX),
+			      core_id);
+
+		/* put the number of cores in use in the scratch register
+		 * so is is ready when the firmware wakes up. */
+		TOPAZ_WRITE32(TOPAZ_CORE_NUMBER_SET_OFFSET, 2, core_id);
+
+		/* # turn on MTX */
+		topaz_set_mtx_target(dev_priv, core_id, 0);
+		MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
+			    MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK,
+			    core_id);
+
+	}
+
+	if (!PNW_IS_JPEG_ENC(topaz_priv->topaz_cur_codec)) {
+		for (core_id = topaz_priv->topaz_num_cores - 1;
+				core_id >= 0; core_id--) {
+		    /*MPEG4/H263 only use core 0*/
+		    if (!PNW_IS_H264_ENC(topaz_priv->topaz_cur_codec)
+			    && core_id > 0)
+			continue;
+		    pnw_topaz_restore_bias_table(dev_priv, core_id);
+		}
+	}
+
+	PSB_DEBUG_GENERAL("TOPAZ: send NULL command to test firmware\n");
+	topaz_priv->topaz_mtx_saved = 0;
+	return 0;
+}
+
+int pnw_topaz_save_mtx_state(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	uint32_t *mtx_reg_state;
+	int i, need_save = 0;
+	uint32_t data_location, data_size;
+	int core;
+	struct pnw_topaz_codec_fw *cur_codec_fw;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+	unsigned long irq_flags;
+
+	topaz_priv->topaz_mtx_saved = 0;
+
+	/*TopazSC will be reset, no need to save context.*/
+	if (topaz_priv->topaz_needs_reset) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Will be reset\n");
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if ((pos->ctx_type & 0xff) == VAEntrypointEncSlice ||
+			(pos->ctx_type & 0xff) == VAEntrypointEncPicture)
+			need_save = 1;
+	}
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+
+	if (0 == need_save) {
+		PSB_DEBUG_TOPAZ("TOPAZ: vec context not found. No need"
+				  " to save mtx registers.\n");
+		return 0;
+	}
+
+	PSB_DEBUG_INIT("TOPAZ: Found one vec codec(%d)." \
+			  "Need to save mtx registers.\n",
+			topaz_priv->topaz_cur_codec);
+
+	if (topaz_priv->topaz_num_cores > MAX_TOPAZ_CORES) {
+		DRM_ERROR("TOPAZ: Invalid core numbers: %d\n",
+			  topaz_priv->topaz_num_cores);
+		return -1;
+	}
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		PSB_DEBUG_GENERAL("TOPAZ: No need to restore context since"
+				  " firmware has not been uploaded.\n");
+		return -1;
+	}
+
+	/*JPEG encoding needn't to save context*/
+	if (PNW_IS_JPEG_ENC(topaz_priv->topaz_cur_codec)) {
+		topaz_priv->topaz_mtx_saved = 1;
+		return 0;
+	}
+
+	for (core = topaz_priv->topaz_num_cores - 1; core >= 0; core--) {
+		if (topaz_priv->topaz_mtx_data_mem[core] == NULL) {
+			PSB_DEBUG_GENERAL("TOPAZ: try to save context "
+					  "without space allocated, return"
+					  " directly without save\n");
+			return -1;
+		}
+
+		topaz_set_mtx_target(dev_priv, core, 0);
+		/* stop mtx */
+		MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
+				MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK, core);
+
+		if (core == 0) {
+			if (0 != pnw_topaz_wait_for_register(dev_priv,
+							     REG_START_TOPAZ_MTX_HOST(core)
+							     + MTX_CORE_CR_MTX_TXRPT_OFFSET,
+							     TXRPT_WAITONKICK_VALUE,
+							     0xffffffff)) {
+				DRM_ERROR("TOPAZ: Stop MTX failed!\n");
+				topaz_priv->topaz_needs_reset = 1;
+				return -1;
+			}
+		}
+
+		mtx_reg_state = topaz_priv->topaz_mtx_reg_state[core];
+
+		/* Saves 8 Registers of D0 Bank  */
+		/* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
+		for (i = 0; i < 8; i++) {
+			topaz_read_core_reg(dev_priv, core, 0x1 | (i << 4),
+					    mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 8 Registers of D1 Bank  */
+		/* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
+		for (i = 0; i < 8; i++) {
+			topaz_read_core_reg(dev_priv, core, 0x2 | (i << 4),
+					    mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 4 Registers of A0 Bank  */
+		/* A0StP, A0FrP, A0.2 and A0.3 */
+		for (i = 0; i < 4; i++) {
+			topaz_read_core_reg(dev_priv, core, 0x3 | (i << 4),
+					    mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 4 Registers of A1 Bank  */
+		/* A1GbP, A1LbP, A1.2 and A1.3 */
+		for (i = 0; i < 4; i++) {
+			topaz_read_core_reg(dev_priv, core, 0x4 | (i << 4),
+					    mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves PC and PCX  */
+		for (i = 0; i < 2; i++) {
+			topaz_read_core_reg(dev_priv, core, 0x5 | (i << 4),
+					    mtx_reg_state);
+			mtx_reg_state++;
+		}
+		/* Saves 8 Control Registers */
+		/* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
+		 * TXGPIOO */
+		for (i = 0; i < 8; i++) {
+			topaz_read_core_reg(dev_priv, core, 0x7 | (i << 4),
+					    mtx_reg_state);
+			mtx_reg_state++;
+		}
+
+		COMMS_READ32(TOPAZ_COMMS_CR_STAT_1(0), \
+				mtx_reg_state++, core);
+		COMMS_READ32(TOPAZ_COMMS_CR_STAT_0(0), \
+				mtx_reg_state++, core);
+		COMMS_READ32(TOPAZ_COMMS_CR_MTX_STATUS(0), \
+				mtx_reg_state++, core);
+		COMMS_READ32(TOPAZ_COMMS_CR_CMD_WB_VAL(0), \
+				mtx_reg_state++, core);
+		COMMS_READ32(TOPAZ_COMMS_CR_CMD_WB_ADDR(0), \
+				mtx_reg_state++, core);
+		COMMS_READ32(TOPAZ_COMMS_CR_CMD_DATA_ADDR(0), \
+				mtx_reg_state++, core);
+		COMMS_READ32(TOPAZ_COMMS_CR_CMD_WORD(0), \
+				mtx_reg_state++, core);
+
+		/* save mtx data memory */
+		if (0 == core) {
+			/*master core*/
+			cur_codec_fw = &topaz_priv->topaz_fw[
+					       topaz_priv->topaz_cur_codec * 2];
+		} else {
+			cur_codec_fw = &topaz_priv->topaz_fw[
+					       topaz_priv->topaz_cur_codec * 2 \
+					       + 1];
+		}
+
+		data_size = topaz_priv->cur_mtx_data_size[core];
+		if (data_size > (MAX_TOPAZ_RAM_SIZE / 4)) {
+			DRM_ERROR("TOPAZ: %s wrong data size %d!\n",
+					__func__, data_size);
+			data_size = MAX_TOPAZ_RAM_SIZE;
+		}
+
+		data_location = cur_codec_fw->data_location
+				& ~(MTX_DMA_BURSTSIZE_BYTES - 1);
+		if (0 != mtx_dma_read(dev, core,
+				      data_location,
+				      data_size)) {
+			DRM_ERROR("TOPAZ: mtx_dma_read failed!\n");
+			return -1;
+		}
+		pnw_topaz_mmu_flushcache(dev_priv);
+	}
+
+	topaz_priv->topaz_mtx_saved = 1;
+	return 0;
+}
+
+int mtx_dma_read(struct drm_device *dev, uint32_t core,
+		 uint32_t source_addr, uint32_t size)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct ttm_buffer_object *target;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	/* setup mtx DMAC registers to do transfer */
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr, core);
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+		    F_ENCODE(4, MTX_BURSTSIZE) |
+		    F_ENCODE(1, MTX_RNW) |
+		    F_ENCODE(1, MTX_ENABLE) |
+		    F_ENCODE(size, MTX_LENGTH), core);
+
+	/* give the DMAC access to the host memory via BIF */
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, 0);*/
+
+	target = topaz_priv->topaz_mtx_data_mem[core];
+	/* transfert the data */
+	if (0 != topaz_dma_transfer(dev_priv, 0, target->offset, 0,
+				    MTX_CR_MTX_SYSC_CDMAT,
+				    size, core, 1)) {
+		pnw_error_dump_reg(dev_priv, core);
+		return -1;
+	}
+
+	/* wait for it transfer */
+	if (0 != pnw_topaz_wait_for_register(dev_priv,
+				IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
+				F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+				F_ENCODE(1, IMG_SOC_TRANSFER_FIN))) {
+		pnw_error_dump_reg(dev_priv, core);
+		return -1;
+	}
+
+	/* clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+	/* give access back to topaz core */
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, 0);*/
+	return 0;
+}
+
+int mtx_dma_write(struct drm_device *dev, uint32_t core_id)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct pnw_topaz_codec_fw *cur_codec_fw;
+	uint32_t text_size, data_size;
+	uint32_t data_location;
+	int ret = 0;
+
+	if (core_id == 0)/*for master core*/
+		cur_codec_fw = &topaz_priv->topaz_fw[
+				       topaz_priv->topaz_cur_codec * 2];
+	else
+		cur_codec_fw = &topaz_priv->topaz_fw[
+				       topaz_priv->topaz_cur_codec * 2 + 1];
+
+	PSB_DEBUG_GENERAL("Topaz: Restore codec %s(%d) text sz=%d data sz=%d\n"
+			  "data location(0x%x) to core(%d).\n",
+			  codec_to_string(topaz_priv->topaz_cur_codec),
+			  topaz_priv->topaz_cur_codec,
+			  cur_codec_fw->text_size, cur_codec_fw->data_size,
+			  cur_codec_fw->data_location, core_id);
+
+	/* # upload text. text_size is byte size*/
+	text_size = cur_codec_fw->text_size / 4;
+	/* adjust transfer sizes of text and data sections to match burst size*/
+	text_size = ((text_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1))
+		     & ~(MTX_DMA_BURSTSIZE_BYTES - 1)) / 4;
+
+	PSB_DEBUG_GENERAL("TOPAZ: text_size round up to %d\n", text_size);
+	/* setup the MTX to start recieving data:
+	   use a register for the transfer which will point to the source
+	   (MTX_CR_MTX_SYSC_CDMAT) */
+	/*MTX burst size (4 * 2 * 32bits = 32bytes) should match DMA burst
+	  size (2 * 128bits = 32bytes) */
+	/* #.# fill the dst addr */
+
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, MTX_DMA_MEMORY_BASE, core_id);
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+		    F_ENCODE(4, MTX_BURSTSIZE) |
+		    F_ENCODE(0, MTX_RNW) |
+		    F_ENCODE(1, MTX_ENABLE) |
+		    F_ENCODE(text_size, MTX_LENGTH), core_id);
+
+	/* #.# set DMAC access to host memory via BIF (deserted)*/
+	/* TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, core_id);*/
+
+	/* #.# transfer the codec */
+	if (0 != topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
+				    MTX_CR_MTX_SYSC_CDMAT,
+				    text_size, core_id, 0)) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+
+	/* #.# wait dma finish */
+	ret = pnw_topaz_wait_for_register(dev_priv,
+					  DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret != 0) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+
+	/* #.# clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	PSB_DEBUG_GENERAL("TOPAZ: firmware text upload complete.\n");
+
+	/* # return access to topaz core (deserted)*/
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, core_id);*/
+
+	/* # upload data */
+	data_size = topaz_priv->cur_mtx_data_size[core_id];
+	data_location = cur_codec_fw->data_location;
+	data_location = data_location & (~(MTX_DMA_BURSTSIZE_BYTES - 1));
+
+	if (data_size > (MAX_TOPAZ_RAM_SIZE / 4)) {
+		DRM_ERROR("TOPAZ: %s wrong data size %d!\n",
+				__func__, data_size);
+		data_size = MAX_TOPAZ_RAM_SIZE;
+	}
+
+	PSB_DEBUG_GENERAL("TOPAZ: data_size round up to %d\n"
+			"data_location round up to 0x%08x\n",
+			data_size, data_location);
+	/* #.# fill the dst addr */
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
+		    data_location, core_id);
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+		    F_ENCODE(4, MTX_BURSTSIZE) |
+		    F_ENCODE(0, MTX_RNW) |
+		    F_ENCODE(1, MTX_ENABLE) |
+		    F_ENCODE(data_size, MTX_LENGTH), core_id);
+	/* #.# set DMAC access to host memory via BIF(deserted) */
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, core_id);*/
+
+	/* #.# transfer the codec */
+	if (0 != topaz_dma_transfer(dev_priv, 0,
+				topaz_priv->topaz_mtx_data_mem[core_id]->offset,
+				0,
+				MTX_CR_MTX_SYSC_CDMAT, data_size, core_id, 0)) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+
+	/* #.# wait dma finish */
+	ret = pnw_topaz_wait_for_register(dev_priv,
+					  DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+					  F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret != 0) {
+		pnw_error_dump_reg(dev_priv, core_id);
+		return -1;
+	}
+	/* #.# clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	PSB_DEBUG_GENERAL("TOPAZ: firmware text upload complete.\n");
+
+	/*TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0, 0);*/
+	return 0;
+}
+
+
+void pnw_reset_fw_status(struct drm_device *dev, u32 flag)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+	u32 reg;
+
+	/*Before end the session, mark firmware MTX data as invalid.*/
+	if (topaz_priv) {
+		topaz_priv->topaz_mtx_saved = 0;
+		if (flag & PNW_TOPAZ_START_CTX)
+			topaz_priv->topaz_needs_reset = 0;
+		else if (flag & PNW_TOPAZ_END_CTX) {
+			TOPAZ_MTX_WB_READ32(topaz_priv->topaz_sync_addr,
+				0, MTX_WRITEBACK_VALUE, &reg);
+			PSB_DEBUG_TOPAZ("TOPAZ: current fence 0x%08x " \
+				"last writeback 0x%08x\n",
+				dev_priv->sequence[LNC_ENGINE_ENCODE],
+				reg);
+			if (topaz_priv->topaz_needs_reset) {
+				DRM_ERROR("TOPAZ: reset Topaz\n");
+				ospm_apm_power_down_topaz(topaz_priv->dev);
+			}
+		}
+	}
+}
diff --git a/drivers/external_drivers/intel_media/video/encode/tng_topaz.c b/drivers/external_drivers/intel_media/video/encode/tng_topaz.c
new file mode 100644
index 0000000..387f9a8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/tng_topaz.c
@@ -0,0 +1,4264 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* include headers */
+/* #define DRM_DEBUG_CODE 2 */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "psb_drv.h"
+#include "tng_topaz.h"
+#include "psb_powermgmt.h"
+#include "tng_topaz_hw_reg.h"
+/*#include "private_data.h"*/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#define TOPAZ_MAX_COMMAND_IN_QUEUE 0x1000
+#define MASK_MTX_INT_ENAB 0x4000ff00
+
+#define LOOP_COUNT 10000
+
+#define MTX_PC		(0x05)
+
+#define tng__max(a, b) ((a)> (b)) ? (a) : (b)
+#define tng__min(a, b) ((a) < (b)) ? (a) : (b)
+
+/*static uint32_t setv_cnt = 0;*/
+
+enum MTX_MESSAGE_ID {
+	MTX_MESSAGE_ACK,   /* !< (no data)\n Null command does nothing\n */
+	MTX_MESSAGE_CODED, /* !< (no data)\n Null command does nothing\n */
+} ;
+
+/* static function define */
+static int tng_topaz_deliver_command(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	struct ttm_buffer_object *cmd_buffer,
+	uint32_t cmd_offset,
+	uint32_t cmd_size,
+	void **topaz_cmd, uint32_t sequence,
+	int copy_cmd);
+
+static int tng_topaz_send(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	void *cmd,
+	uint32_t cmd_size,
+	uint32_t sync_seq);
+
+static int tng_topaz_save_command(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	void *cmd,
+	uint32_t cmd_size,
+	uint32_t sequence);
+
+static void tng_topaz_getvideo(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx);
+
+static void tng_topaz_setvideo(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx);
+
+void mtx_start(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	mtx_set_target(dev_priv);
+	MTX_WRITE32(MTX_CR_MTX_ENABLE, MASK_MTX_MTX_ENABLE);
+}
+
+void mtx_stop(struct drm_psb_private *dev_priv)
+{
+	mtx_set_target(dev_priv);
+	MTX_WRITE32(MTX_CR_MTX_ENABLE, MASK_MTX_MTX_TOFF);
+}
+
+void mtx_kick(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	mtx_set_target(dev_priv);
+	PSB_DEBUG_TOPAZ("TOPAZ: Kick MTX to start\n");
+	MTX_WRITE32(MTX_CR_MTX_KICK, 1);
+}
+
+void tng_set_consumer(struct drm_device *dev, uint32_t consumer)
+{
+	unsigned int reg_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+		(MTX_SCRATCHREG_TOMTX << 2), &reg_val);
+
+	reg_val = F_INSERT(reg_val, consumer, WB_CONSUMER);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+		(MTX_SCRATCHREG_TOMTX << 2), reg_val);
+}
+
+uint32_t tng_get_consumer(struct drm_device *dev)
+{
+	unsigned int reg_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+		(MTX_SCRATCHREG_TOMTX << 2), &reg_val);
+
+	return F_DECODE(reg_val, WB_CONSUMER);
+}
+
+void tng_set_producer(struct drm_device *dev, uint32_t producer)
+{
+	unsigned int reg_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+		(MTX_SCRATCHREG_TOHOST << 2), &reg_val);
+
+	reg_val = F_INSERT(reg_val, producer, WB_PRODUCER);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+		(MTX_SCRATCHREG_TOHOST << 2), reg_val);
+}
+
+uint32_t tng_get_producer(struct drm_device *dev)
+{
+	unsigned int reg_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+		(MTX_SCRATCHREG_TOHOST << 2), &reg_val);
+
+	return F_DECODE(reg_val, WB_PRODUCER);
+}
+
+uint32_t tng_wait_for_ctrl(struct drm_device *dev, uint32_t control)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int32_t ret = 0;
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_TOHOST << 2),
+		control, 0x80000000);
+	if (ret)
+		DRM_ERROR("Wait for register timeout");
+
+	return ret;
+}
+
+uint32_t tng_serialize_enter(struct drm_device *dev)
+{
+	uint32_t reg_val;
+	int32_t ret;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	/*
+	* Poll for idle register to tell that both HW
+	* and FW are idle (`FW_IDLE_STATUS_IDLE` state)
+	*/
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		MTX_SCRATCHREG_IDLE,
+		F_ENCODE(FW_IDLE_STATUS_IDLE, FW_IDLE_REG_STATUS),
+		MASK_FW_IDLE_REG_STATUS);
+	if (ret)
+		DRM_ERROR("Wait for register timeout");
+
+	MULTICORE_READ32(MTX_SCRATCHREG_IDLE, &reg_val);
+
+	return F_EXTRACT(reg_val, FW_IDLE_REG_RECEIVED_COMMANDS);
+}
+
+void tng_serialize_exit(struct drm_device *dev, uint32_t enter_token)
+{
+	int32_t ret;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	/*
+	* Poll for idle register to tell that both HW
+	* and FW are idle (`FW_IDLE_STATUS_IDLE` state)
+	*/
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_NOTEQUAL,
+		MTX_SCRATCHREG_IDLE,
+		F_ENCODE(enter_token, FW_IDLE_REG_RECEIVED_COMMANDS),
+		MASK_FW_IDLE_REG_RECEIVED_COMMANDS);
+	if (ret)
+		DRM_ERROR("Wait for register timeout");
+}
+
+static void tng_topaz_Int_clear(
+	struct drm_psb_private *dev_priv,
+	uint32_t intClearMask)
+{
+	unsigned long irq_flags;
+	struct tng_topaz_private *topaz_priv;
+
+	topaz_priv = dev_priv->topaz_private;
+	spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
+	/* clear interrupt */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_INT_CLEAR,
+		intClearMask);
+	spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
+}
+
+struct psb_video_ctx *get_ctx_from_fp(
+	struct drm_device *dev, struct file *filp)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_video_ctx *pos;
+	unsigned long irq_flags;
+	int entrypoint;
+
+	spin_lock_irqsave(&dev_priv->video_ctx_lock, irq_flags);
+
+	list_for_each_entry(pos, &dev_priv->video_ctx, head) {
+		if (pos->filp == filp) {
+			entrypoint = pos->ctx_type & 0xff;
+			if (entrypoint == VAEntrypointEncSlice ||
+			    entrypoint == VAEntrypointEncPicture) {
+				spin_unlock_irqrestore(
+					&dev_priv->video_ctx_lock,
+					irq_flags);
+				return pos;
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev_priv->video_ctx_lock, irq_flags);
+	return NULL;
+}
+
+uint32_t get_ctx_cnt(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_video_ctx *pos, *n;
+	int count = 0;
+	int entrypoint;
+
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		entrypoint = pos->ctx_type & 0xff;
+		if (entrypoint == VAEntrypointEncSlice ||
+		    entrypoint == VAEntrypointEncPicture)
+			count++;
+	}
+
+	return count;
+}
+
+int32_t dispatch_wb_message_polling(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	int32_t sync_seq,
+	unsigned char *command)
+{
+	struct psb_video_ctx *video_ctx;
+	struct tng_topaz_private *topaz_priv;
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_cmd_header *cur_cmd_header;
+	struct IMG_WRITEBACK_MSG *wb_msg;
+	int32_t ret;
+
+	dev_priv = (struct drm_psb_private *) dev->dev_private;
+	if (!dev_priv) {
+		DRM_ERROR("Failed to get dev_priv\n");
+		return -1;
+	}
+
+	topaz_priv = dev_priv->topaz_private;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		return -1;
+	}
+
+	topaz_priv->consumer = tng_get_consumer(dev);
+	topaz_priv->producer = tng_get_producer(dev);
+
+	/* Read and compare consumer and producer */
+	if (topaz_priv->producer == topaz_priv->consumer) {
+		PSB_DEBUG_TOPAZ("TOPAZ: producer = consumer = %d",
+			topaz_priv->producer);
+		PSB_DEBUG_TOPAZ("polling producer until change\n");
+		/* if the same -> poll on Producer change */
+		ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_NOTEQUAL,
+			TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+				(MTX_SCRATCHREG_TOHOST << 2),
+			topaz_priv->consumer, MASK_WB_PRODUCER);
+		if (ret) {
+			wb_msg = (struct IMG_WRITEBACK_MSG *)
+				video_ctx->wb_addr[topaz_priv->consumer];
+			DRM_ERROR("Polling timeout, ui32CmdWord = %08x, " \
+				"ui32Data = %08x, ui32ExtraData = %08x, " \
+				"ui32WritebackVal = %08x, " \
+				"ui32CodedBufferConsumed = %08x\n",
+				wb_msg->ui32CmdWord, wb_msg->ui32Data,
+				wb_msg->ui32ExtraData, wb_msg->ui32WritebackVal,
+				wb_msg->ui32CodedBufferConsumed);
+
+			return ret;
+		}
+
+		topaz_priv->producer = tng_get_producer(dev);
+	}
+
+	/* Dispatch new messages */
+	do {
+		PSB_DEBUG_TOPAZ("TOPAZ: Dispatch write back message, " \
+			"producer = %d, consumer = %d\n",
+			topaz_priv->producer, topaz_priv->consumer);
+
+		ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_NOTEQUAL,
+			TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+				(MTX_SCRATCHREG_TOHOST << 2),
+			topaz_priv->consumer, MASK_WB_PRODUCER);
+		if (ret) {
+			DRM_ERROR("Wait for register timeout");
+			return ret;
+		}
+
+		topaz_priv->consumer++;
+		if (topaz_priv->consumer == WB_FIFO_SIZE)
+			topaz_priv->consumer = 0;
+
+		tng_set_consumer(dev, topaz_priv->consumer);
+
+	} while (topaz_priv->consumer != topaz_priv->producer);
+
+	cur_cmd_header = (struct tng_topaz_cmd_header *)command;
+
+	if (cur_cmd_header->id != MTX_CMDID_ENCODE_FRAME &&
+	    cur_cmd_header->id != MTX_CMDID_ISSUEBUFF)
+		return 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Handle context saving/fence " \
+		"handler/dequeue send on ENCODE_FRAME command\n");
+
+	if (get_ctx_cnt(dev) > 1) {
+		PSB_DEBUG_TOPAZ("TOPAZ: More than one context, " \
+			"save current context\n");
+		if (topaz_priv->cur_context->codec != IMG_CODEC_JPEG) {
+			ret = tng_topaz_save_mtx_state(dev);
+			if (ret) {
+				DRM_ERROR("Failed to save mtx status");
+				return ret;
+			}
+		}
+	}
+
+	*topaz_priv->topaz_sync_addr = sync_seq;
+	(topaz_priv->cur_context)->handle_sequence_needed = false;
+	psb_fence_handler(dev, LNC_ENGINE_ENCODE);
+
+	topaz_priv->topaz_busy = 1;
+	tng_topaz_dequeue_send(dev);
+
+	return ret;
+}
+
+int32_t dispatch_wb_message_irq(struct drm_device *dev)
+{
+	struct tng_topaz_private *topaz_priv;
+	struct drm_psb_private *dev_priv;
+	/* uint32_t crMultiCoreIntStat;*/
+	/* struct psb_video_ctx *video_ctx; */
+	/* struct IMG_WRITEBACK_MSG *wb_msg; */
+	int32_t ret;
+	int32_t count = 0;
+
+	dev_priv = (struct drm_psb_private *) dev->dev_private;
+	if (!dev_priv) {
+		DRM_ERROR("Failed to get dev_priv\n");
+		return -1;
+	}
+
+	topaz_priv = dev_priv->topaz_private;
+
+	topaz_priv->consumer = tng_get_consumer(dev);
+
+	do {
+		topaz_priv->producer = tng_get_producer(dev);
+		count++;
+	} while (topaz_priv->producer == topaz_priv->consumer &&
+		 count < 300000);
+
+	if (count == 300000) {
+		DRM_ERROR("Waiting for IRQ timeout\n");
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Producer = %d, Consumer = %d\n",
+		topaz_priv->producer, topaz_priv->consumer);
+
+	do {
+		ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_NOTEQUAL,
+			TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+				(MTX_SCRATCHREG_TOHOST << 2),
+			topaz_priv->consumer, MASK_WB_PRODUCER);
+		if (ret)
+			return ret;
+
+		topaz_priv->consumer++;
+		if (topaz_priv->consumer == WB_FIFO_SIZE)
+			topaz_priv->consumer = 0;
+
+		/* Indicate buffer used by consmer is available */
+		tng_set_consumer(dev, topaz_priv->consumer);
+	} while (topaz_priv->consumer != topaz_priv->producer);
+
+	return 0;
+}
+
+int32_t tng_wait_on_sync(
+	struct drm_device *dev,
+	int32_t sync_seq,
+	unsigned long cmd_id)
+{
+	struct tng_topaz_private *topaz_priv;
+	struct drm_psb_private *dev_priv;
+	/* struct IMG_WRITEBACK_MSG *wb_msg; */
+	/* struct tng_topaz_cmd_header *cur_cmd_header; */
+	int32_t ret;
+	int32_t count = 0;
+	/* uint32_t crMultiCoreIntStat; */
+
+	dev_priv = (struct drm_psb_private *) dev->dev_private;
+	if (!dev_priv) {
+		DRM_ERROR("Failed to get dev_priv\n");
+		return -1;
+	}
+
+	topaz_priv = dev_priv->topaz_private;
+
+	topaz_priv->consumer = tng_get_consumer(dev);
+	topaz_priv->producer = tng_get_producer(dev);
+
+#ifdef TOPAZHP_IRQ_ENABLED
+	while (topaz_priv->producer == topaz_priv->consumer &&
+		count < LOOP_COUNT * 1000) {
+		topaz_priv->producer = tng_get_producer(dev);
+		count++;
+	}
+
+	if (count == LOOP_COUNT * 1000) {
+		DRM_ERROR("Sync cmd(%s) timeout\n", \
+			cmd_to_string(cmd_id & (~MTX_CMDID_PRIORITY)));
+		return -1;
+	}
+#else
+	topaz_priv->producer = tng_get_producer(dev);
+
+	/* Read and compare consumer and producer */
+	if (topaz_priv->producer == topaz_priv->consumer) {
+		PSB_DEBUG_TOPAZ("TOPAZ: producer = consumer = %d, " \
+			"polling producer until change\n",
+			topaz_priv->producer);
+		/* if the same -> poll on Producer change */
+		ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_NOTEQUAL,
+			TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+				(MTX_SCRATCHREG_TOHOST << 2),
+			topaz_priv->consumer, MASK_WB_PRODUCER);
+		if (ret) {
+
+			DRM_ERROR("Polling timeout\n");
+
+			return ret;
+		}
+
+		topaz_priv->producer = tng_get_producer(dev);
+	}
+#endif
+	/* Dispatch new messages */
+	do {
+		PSB_DEBUG_TOPAZ("TOPAZ: Dispatch write back message, " \
+			"producer = %d, consumer = %d\n",
+			topaz_priv->producer, topaz_priv->consumer);
+		/*
+		ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_NOTEQUAL,
+			TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+				(MTX_SCRATCHREG_TOHOST << 2),
+			topaz_priv->consumer, MASK_WB_PRODUCER);
+		if (ret)
+			return ret;
+		*/
+
+		topaz_priv->consumer++;
+		if (topaz_priv->consumer == WB_FIFO_SIZE)
+			topaz_priv->consumer = 0;
+
+		/* Indicate buffer used by consmer is available */
+		tng_set_consumer(dev, topaz_priv->consumer);
+	} while (topaz_priv->consumer != topaz_priv->producer);
+
+	/* When IRQ enabled, the belowing code will be called in ISR */
+#ifdef TOPAZHP_IRQ_ENABLED
+	return 0;
+#endif
+
+	if (cmd_id != MTX_CMDID_ENCODE_FRAME &&
+	    cmd_id != MTX_CMDID_ISSUEBUFF)
+		return 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Handle context saving/fence " \
+		"handler/dequeue send on ENCODE_FRAME command\n");
+
+	if (get_ctx_cnt(dev) > 1) {
+		PSB_DEBUG_TOPAZ("TOPAZ: More than one context, " \
+			"save current context\n");
+		if (topaz_priv->cur_context->codec != IMG_CODEC_JPEG) {
+			ret = tng_topaz_save_mtx_state(dev);
+			if (ret) {
+				DRM_ERROR("Failed to save mtx status");
+				return ret;
+			}
+		}
+	}
+
+	*topaz_priv->topaz_sync_addr = sync_seq;
+	(topaz_priv->cur_context)->handle_sequence_needed = false;
+	psb_fence_handler(dev, LNC_ENGINE_ENCODE);
+
+	topaz_priv->topaz_busy = 1;
+	tng_topaz_dequeue_send(dev);
+
+	return ret;
+}
+
+bool tng_topaz_interrupt(void *pvData)
+{
+	struct drm_device *dev;
+	/* struct drm_minor *minor; */
+	struct tng_topaz_private *topaz_priv;
+	struct drm_psb_private *dev_priv;
+	uint32_t crMultiCoreIntStat;
+	struct psb_video_ctx *video_ctx;
+	struct IMG_WRITEBACK_MSG *wb_msg;
+	unsigned long flags;
+
+	if (pvData == NULL) {
+		DRM_ERROR("Invalid params\n");
+		return false;
+	}
+	dev = (struct drm_device *)pvData;
+	/*
+	minor = (struct drm_minor *)container_of(dev, struct drm_minor, dev);
+	file_priv = (struct drm_file *)container_of(minor,
+			struct drm_file, minor);
+	*/
+	/*
+	if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
+		DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
+		return false;
+	}
+	*/
+	dev_priv = (struct drm_psb_private *) dev->dev_private;
+	if (!dev_priv) {
+		DRM_ERROR("Failed to get dev_priv\n");
+		return false;
+	}
+
+	topaz_priv = dev_priv->topaz_private;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_INT_STAT,
+			 &crMultiCoreIntStat);
+
+	/* if interrupts enabled and fired */
+	if ((crMultiCoreIntStat & MASK_TOPAZHP_TOP_CR_INT_STAT_MTX) ==
+		MASK_TOPAZHP_TOP_CR_INT_STAT_MTX) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Get MTX interrupt , clear IRQ\n");
+		tng_topaz_Int_clear(dev_priv, MASK_TOPAZHP_TOP_CR_INTCLR_MTX);
+	} else
+		return 0;
+
+	topaz_priv->consumer = tng_get_consumer(dev);
+	topaz_priv->producer = tng_get_producer(dev);
+
+	spin_lock_irqsave(&(topaz_priv->ctx_spinlock), flags);
+	/* Encode ctx has already been destroyed by user-space process */
+	if (NULL == topaz_priv->irq_context) {
+		PSB_DEBUG_TOPAZ("TOPAZ: ctx destroyed before ISR.\n");
+		spin_unlock_irqrestore(&(topaz_priv->ctx_spinlock), flags);
+		return true;
+	}
+
+	video_ctx = topaz_priv->irq_context;
+	wb_msg = (struct IMG_WRITEBACK_MSG *)
+		video_ctx->wb_addr[(topaz_priv->producer == 0) \
+			? 31 \
+			: topaz_priv->producer - 1];
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Dispatch write back message:\n");
+	PSB_DEBUG_TOPAZ("producer = %d, consumer = %d\n",
+		topaz_priv->producer, topaz_priv->consumer);
+
+	while (topaz_priv->consumer != topaz_priv->producer) {
+		topaz_priv->consumer++;
+		if (topaz_priv->consumer == WB_FIFO_SIZE)
+			topaz_priv->consumer = 0;
+		tng_set_consumer(dev, topaz_priv->producer);
+	};
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Context %p(%s):\n",
+			video_ctx, codec_to_string(video_ctx->codec));
+	PSB_DEBUG_TOPAZ("TOPAZ: frame %d, command %s IRQ\n",
+			video_ctx->frame_count,
+			cmd_to_string(wb_msg->ui32CmdWord));
+
+	video_ctx->frame_count++;
+#if 0
+	if (video_ctx->codec == IMG_CODEC_JPEG) {
+		if (wb_msg->ui32CmdWord != MTX_CMDID_NULL) {
+			/* The LAST ISSUEBUF cmd means encoding complete */
+			if (--topaz_priv->issuebuf_cmd_count) {
+				PSB_DEBUG_TOPAZ("TOPAZ: JPEG ISSUEBUF cmd " \
+					  "count left %d, return\n", \
+					  topaz_priv->issuebuf_cmd_count);
+			return true;
+			}
+		} else {
+			return true;
+		}
+	}
+#endif
+	*topaz_priv->topaz_sync_addr = wb_msg->ui32WritebackVal;
+	video_ctx->handle_sequence_needed = false;
+	spin_unlock_irqrestore(&(topaz_priv->ctx_spinlock), flags);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Set seq %08x, " \
+		"dqueue cmd and schedule other work queue\n",
+		wb_msg->ui32WritebackVal);
+	psb_fence_handler(dev, LNC_ENGINE_ENCODE);
+
+	/* Launch the task anyway */
+	schedule_delayed_work(&topaz_priv->topaz_suspend_work, 0);
+
+	return true;
+}
+
+static int tng_submit_encode_cmdbuf(struct drm_device *dev,
+				    struct drm_file *file_priv,
+				    struct ttm_buffer_object *cmd_buffer,
+				    uint32_t cmd_offset, uint32_t cmd_size,
+				    struct ttm_fence_object *fence)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret = 0;
+	void *cmd;
+	uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *video_ctx = NULL;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: command submit, topaz busy = %d\n",
+		topaz_priv->topaz_busy);
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		/* #.# load fw to driver */
+		PSB_DEBUG_TOPAZ("TOPAZ:load /lib/firmware/topazhp_fw.bin\n");
+		if (Is_Secure_Fw())
+			ret = tng_securefw(dev, "topaz", "VEC",
+					   TNG_IMR6L_MSG_REGADDR);
+		else
+			ret = tng_rawfw(dev, "topaz");
+		if (ret) {
+			/* FIXME: find a proper return value */
+			DRM_ERROR("TOPAX:load firmware from storage failed\n");
+			return -EFAULT;
+		}
+		topaz_priv->topaz_fw_loaded = 1;
+	}
+
+	/* # if topaz need to reset, reset it */
+	if (topaz_priv->topaz_needs_reset) {
+		/* #.# reset it */
+		PSB_DEBUG_TOPAZ("TOPAZ: needs reset.\n");
+
+		tng_topaz_reset(dev_priv);
+
+		PSB_DEBUG_TOPAZ("TOPAZ: reset ok.\n");
+
+		if (Is_Secure_Fw() == 0) {
+			video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+			if (!video_ctx) {
+				DRM_ERROR("Failed to get context from fp\n");
+				return -1;
+			}
+
+			/* #.# upload firmware */
+			ret = tng_topaz_setup_fw(dev, video_ctx,
+						 topaz_priv->cur_codec);
+			if (ret) {
+				DRM_ERROR("TOPAZ: upload FW to HW failed\n");
+				return ret;
+			}
+		}
+	}
+
+	if (!topaz_priv->topaz_busy) {
+		/* # direct map topaz command if topaz is free */
+		PSB_DEBUG_TOPAZ("TOPAZ:direct send command,sequence %08x\n",
+				  sequence);
+
+		ret = tng_topaz_deliver_command(dev, file_priv,
+			cmd_buffer, cmd_offset, cmd_size, NULL, sequence, 0);
+
+		if (ret) {
+			DRM_ERROR("TOPAZ: failed to extract cmd...\n");
+			return ret;
+		}
+	} else {
+		PSB_DEBUG_TOPAZ("TOPAZ: queue command of sequence %08x\n",
+				  sequence);
+		cmd = NULL;
+
+		ret = tng_topaz_deliver_command(dev, file_priv,
+			cmd_buffer, cmd_offset, cmd_size, &cmd, sequence, 1);
+		if (cmd == NULL || ret) {
+			DRM_ERROR("TOPAZ: map command for save fialed\n");
+			return ret;
+		}
+
+		ret = tng_topaz_save_command(dev, file_priv,
+			cmd, cmd_size, sequence);
+		if (ret)
+			DRM_ERROR("TOPAZ: save command failed\n");
+	}
+
+	return ret;
+}
+
+static int tng_topaz_save_command(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	void *cmd,
+	uint32_t cmd_size,
+	uint32_t sequence)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_cmd_queue *topaz_cmd;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: queue command,sequence: %08x..\n",
+		sequence);
+
+	topaz_cmd = kzalloc(sizeof(struct tng_topaz_cmd_queue),
+			    GFP_KERNEL);
+	if (!topaz_cmd) {
+		DRM_ERROR("TOPAZ: out of memory....\n");
+		return -ENOMEM;
+	}
+
+	topaz_cmd->file_priv = file_priv;
+	topaz_cmd->cmd = cmd;
+	topaz_cmd->cmd_size = cmd_size;
+	topaz_cmd->sequence = sequence;
+
+	/* Avoid race condition with dequeue buffer in kernel task */
+	mutex_lock(&topaz_priv->topaz_mutex);
+	list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
+	mutex_unlock(&topaz_priv->topaz_mutex);
+
+	if (!topaz_priv->topaz_busy) {
+		/* topaz_priv->topaz_busy = 1; */
+		PSB_DEBUG_TOPAZ("TOPAZ: need immediate dequeue...\n");
+		tng_topaz_dequeue_send(dev);
+		PSB_DEBUG_TOPAZ("TOPAZ: after dequeue command\n");
+	}
+
+	return 0;
+}
+
+int tng_cmdbuf_video(struct drm_file *file_priv,
+		     struct list_head *validate_list,
+		     uint32_t fence_type,
+		     struct drm_psb_cmdbuf_arg *arg,
+		     struct ttm_buffer_object *cmd_buffer,
+		     struct psb_ttm_fence_rep *fence_arg)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct ttm_fence_object *fence = NULL;
+	int32_t ret = 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ : enter %s cmdsize: %d\n", __func__,
+			  arg->cmdbuf_size);
+
+	ret = tng_submit_encode_cmdbuf(dev, file_priv, cmd_buffer,
+		arg->cmdbuf_offset, arg->cmdbuf_size, fence);
+	if (ret)
+		return ret;
+
+	/* workaround for interrupt issue */
+	psb_fence_or_sync(file_priv, LNC_ENGINE_ENCODE,
+		fence_type, arg->fence_flags,
+		validate_list, fence_arg, &fence);
+	PSB_DEBUG_TOPAZ("TOPAZ : current fence 0x%08x\n",
+		dev_priv->sequence[LNC_ENGINE_ENCODE]);
+	if (fence)
+		ttm_fence_object_unref(&fence);
+
+	spin_lock(&cmd_buffer->bdev->fence_lock);
+	if (cmd_buffer->sync_obj != NULL)
+		ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
+	spin_unlock(&cmd_buffer->bdev->fence_lock);
+
+	PSB_DEBUG_TOPAZ("TOPAZ exit %s\n", __func__);
+	return ret;
+}
+
+#define SHIFT_MTX_CMDWORD_ID    (0)
+#define MASK_MTX_CMDWORD_ID     (0xff << SHIFT_MTX_CMDWORD_ID)
+#define SHIFT_MTX_CMDWORD_CORE  (8)
+#define MASK_MTX_CMDWORD_CORE   (0xff << SHIFT_MTX_CMDWORD_CORE)
+#define SHIFT_MTX_CMDWORD_COUNT (16)
+#define MASK_MTX_CMDWORD_COUNT  (0xffff << SHIFT_MTX_CMDWORD_COUNT)
+
+#define SHIFT_MTX_WBWORD_ID    (0)
+#define MASK_MTX_WBWORD_ID     (0xff << SHIFT_MTX_WBWORD_ID)
+#define SHIFT_MTX_WBWORD_CORE  (8)
+#define MASK_MTX_WBWORD_CORE   (0xff << SHIFT_MTX_WBWORD_CORE)
+
+#if 0
+static int tng_error_dump_reg(struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val;
+	PSB_DEBUG_TOPAZ("MULTICORE Registers:\n");
+	MULTICORE_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_SRST %08x\n", reg_val);
+	MULTICORE_READ32(0x04, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_INT_STAT %08x\n", reg_val);
+	MULTICORE_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_MTX_INT_ENAB %08x\n", reg_val);
+	MULTICORE_READ32(0x0C, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_HOST_INT_ENAB %08x\n", reg_val);
+	MULTICORE_READ32(0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_INT_CLEAR %08x\n", reg_val);
+	MULTICORE_READ32(0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_MAN_CLK_GATE %08x\n", reg_val);
+	MULTICORE_READ32(0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_MTX_C_RATIO %08x\n", reg_val);
+	MULTICORE_READ32(0x1c, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_STATUS %08x\n", reg_val);
+	MULTICORE_READ32(0x1c, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_STATUS %08x\n", reg_val);
+	MULTICORE_READ32(0x20, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_MEM_REQ %08x\n", reg_val);
+	MULTICORE_READ32(0x24, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL0 %08x\n", reg_val);
+	MULTICORE_READ32(0x28, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL1 %08x\n", reg_val);
+	MULTICORE_READ32(0x2c , &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL2 %08x\n", reg_val);
+	MULTICORE_READ32(0x30, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_DIR_LIST_BASE %08x\n", reg_val);
+	MULTICORE_READ32(0x38, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_TILE %08x\n", reg_val);
+	MULTICORE_READ32(0x44, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_DEBUG_MSTR %08x\n", reg_val);
+	MULTICORE_READ32(0x48, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_DEBUG_SLV %08x\n", reg_val);
+	MULTICORE_READ32(0x50, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CORE_SEL_0 %08x\n", reg_val);
+	MULTICORE_READ32(0x54, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CORE_SEL_1 %08x\n", reg_val);
+	MULTICORE_READ32(0x58, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_HW_CFG %08x\n", reg_val);
+	MULTICORE_READ32(0x60, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CMD_FIFO_WRITE %08x\n", reg_val);
+	MULTICORE_READ32(0x64, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CMD_FIFO_WRITE_SPACE %08x\n", reg_val);
+	MULTICORE_READ32(0x70, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_READ %08x\n", reg_val);
+	MULTICORE_READ32(0x74, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_READ_AVAILABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x78, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_FLUSH %08x\n", reg_val);
+	MULTICORE_READ32(0x80, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_TILE_EXT %08x\n", reg_val);
+	MULTICORE_READ32(0x100, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_1 %08x\n", reg_val);
+	MULTICORE_READ32(0x104, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_2 %08x\n", reg_val);
+	MULTICORE_READ32(0x108, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_3 %08x\n", reg_val);
+	MULTICORE_READ32(0x110, &reg_val);
+	PSB_DEBUG_TOPAZ("CYCLE_COUNTER %08x\n", reg_val);
+	MULTICORE_READ32(0x114, &reg_val);
+	PSB_DEBUG_TOPAZ("CYCLE_COUNTER_CTRL %08x\n", reg_val);
+	MULTICORE_READ32(0x118, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_IDLE_PWR_MAN %08x\n", reg_val);
+	MULTICORE_READ32(0x124, &reg_val);
+	PSB_DEBUG_TOPAZ("DIRECT_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x128, &reg_val);
+	PSB_DEBUG_TOPAZ("INTRA_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x12c, &reg_val);
+	PSB_DEBUG_TOPAZ("INTER_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x130, &reg_val);
+	PSB_DEBUG_TOPAZ("INTRA_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x134, &reg_val);
+	PSB_DEBUG_TOPAZ("QPCB_QPCR_OFFSET %08x\n", reg_val);
+	MULTICORE_READ32(0x140, &reg_val);
+	PSB_DEBUG_TOPAZ("INTER_INTRA_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x144, &reg_val);
+	PSB_DEBUG_TOPAZ("SKIPPED_CODED_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x148, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_ALPHA_COEFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x14c, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_GAMMA_COEFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x150, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_CUTOFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x154, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_ALPHA_COEFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x158, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_GAMMA_COEFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x15c, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_CUTOFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x300, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_4 %08x\n", reg_val);
+	MULTICORE_READ32(0x304, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_5 %08x\n", reg_val);
+	MULTICORE_READ32(0x308, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_6 %08x\n", reg_val);
+	MULTICORE_READ32(0x30c, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_7 %08x\n", reg_val);
+	MULTICORE_READ32(0x3b0, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_RSVD0 %08x\n", reg_val);
+
+	PSB_DEBUG_TOPAZ("TopazHP Core Registers:\n");
+	TOPAZCORE_READ32(0, 0x0, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_SRST %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x4, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INTSTAT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x8, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MTX_INTENAB %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0xc, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_HOST_INTENAB %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INTCLEAR %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INT_COMB_SEL %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_BUSY %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x24, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_AUTO_CLOCK_GATING %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x28, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MAN_CLOCK_GATING %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x30, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RTM %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x34, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RTM_VALUE %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x38, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MB_PERFORMANCE_RESULT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3c, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MB_PERFORMANCE_MB_NUMBER %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x188, &reg_val);
+	PSB_DEBUG_TOPAZ("FIELD_PARITY %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3d0, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_CONTROL %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3d4, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_COEFFS %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3e0, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_INV_WEIGHT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3f0, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RSVD0 %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3f4, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_CRC_CLEAR %08x\n", reg_val);
+
+
+	PSB_DEBUG_TOPAZ("MTX Registers:\n");
+	MTX_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_ENABLE %08x\n", reg_val);
+	MTX_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_STATUS %08x\n", reg_val);
+	MTX_READ32(0x80, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_KICK %08x\n", reg_val);
+	MTX_READ32(0x88, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_KICKI %08x\n", reg_val);
+	MTX_READ32(0x90, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_FAULT0 %08x\n", reg_val);
+	MTX_READ32(0xf8, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_REGISTER_READ_WRITE_DATA %08x\n", reg_val);
+	MTX_READ32(0xfc, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_REGISTER_READ_WRITE_REQUEST %08x\n", reg_val);
+	MTX_READ32(0x100, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_DATA_EXCHANGE %08x\n", reg_val);
+	MTX_READ32(0x104, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_DATA_TRANSFER %08x\n", reg_val);
+	MTX_READ32(0x108, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_CONTROL %08x\n", reg_val);
+	MTX_READ32(0x10c, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_STATUS %08x\n", reg_val);
+	MTX_READ32(0x200, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SOFT_RESET %08x\n", reg_val);
+	MTX_READ32(0x340, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAC %08x\n", reg_val);
+	MTX_READ32(0x344, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAA %08x\n", reg_val);
+	MTX_READ32(0x348, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAS0 %08x\n", reg_val);
+	MTX_READ32(0x34c, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAS1 %08x\n", reg_val);
+	MTX_READ32(0x350, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAT %08x\n", reg_val);
+
+	PSB_DEBUG_TOPAZ("DMA Registers:\n");
+	DMAC_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Setup_n %08x\n", reg_val);
+	DMAC_READ32(0x04, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Count_n %08x\n", reg_val);
+	DMAC_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ(" DMA_Peripheral_param_n %08x\n", reg_val);
+	DMAC_READ32(0x0C, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_IRQ_Stat_n %08x\n", reg_val);
+	DMAC_READ32(0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_2D_Mode_n %08x\n", reg_val);
+	DMAC_READ32(0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Peripheral_addr_n %08x\n", reg_val);
+	DMAC_READ32(0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Per_hold %08x\n", reg_val);
+	return 0;
+}
+#endif
+
+int tng_topaz_deliver_command(struct drm_device *dev,
+			      struct drm_file *file_priv,
+			      struct ttm_buffer_object *cmd_buffer,
+			      uint32_t cmd_offset, uint32_t cmd_size,
+			      void **topaz_cmd, uint32_t sequence,
+			      int copy_cmd)
+{
+	unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
+	struct ttm_bo_kmap_obj cmd_kmap;
+	bool is_iomem;
+	int ret;
+	unsigned char *cmd_start, *tmp;
+
+	if (cmd_size > (cmd_buffer->num_pages << PAGE_SHIFT)) {
+		DRM_ERROR("Command size %d is bigger than " \
+			"command buffer size %d", cmd_size,
+			(uint32_t)cmd_buffer->num_pages << PAGE_SHIFT);
+		return -EINVAL;
+	}
+
+	ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
+			  &cmd_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
+		return ret;
+	}
+	cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
+		    &is_iomem) + cmd_page_offset;
+
+	if (copy_cmd) {
+		tmp = kzalloc(cmd_size, GFP_KERNEL);
+		if (tmp == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		memcpy(tmp, cmd_start, cmd_size);
+		*topaz_cmd = tmp;
+	} else {
+		PSB_DEBUG_TOPAZ("TOPAZ: directly send the command\n");
+		ret = tng_topaz_send(dev, file_priv,
+			cmd_start, cmd_size, sequence);
+		if (ret) {
+			DRM_ERROR("TOPAZ: commit commands failed.\n");
+			ret = -EINVAL;
+		}
+	}
+
+out:
+	ttm_bo_kunmap(&cmd_kmap);
+
+	return ret;
+}
+
+#if 0
+static int32_t tng_topaz_wait_for_completion(struct drm_psb_private *dev_priv)
+{
+	int32_t ret = 0;
+
+	mtx_set_target(dev_priv);
+
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		REG_OFFSET_TOPAZ_MTX + MTX_CR_MTX_ENABLE,
+		MASK_MTX_MTX_TOFF,
+		MASK_MTX_MTX_TOFF | MASK_MTX_MTX_ENABLE);
+	if (ret)
+		DRM_ERROR("TOPAZ: Wait for MTX completion time out\n");
+
+	return ret;
+}
+#endif
+
+static int32_t tng_restore_bias_table(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx)
+{
+	/* bool is_iomem; */
+	uint32_t i;
+	uint32_t *p_command;
+	uint32_t reg_id;
+	uint32_t reg_off;
+	uint32_t reg_val;
+	uint32_t size;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int32_t ret = 0;
+
+	p_command = video_ctx->bias_reg;
+
+	size = *p_command++;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Restore BIAS %d regs of ctx %p(%s)\n",
+			size - 1, video_ctx,
+			codec_to_string(video_ctx->codec));
+	for (i = 0; i < size; i++) {
+		reg_id = *p_command;
+		p_command++;
+		reg_off = *p_command;
+		p_command++;
+		reg_val = *p_command;
+		p_command++;
+
+		switch (reg_id) {
+		case TOPAZ_MULTICORE_REG:
+			MULTICORE_WRITE32(reg_off, reg_val);
+			break;
+		case TOPAZ_CORE_REG:
+			TOPAZCORE_WRITE32(0, reg_off, reg_val);
+			break;
+		case TOPAZ_VLC_REG:
+			VLC_WRITE32(0, reg_off, reg_val);
+			break;
+		default:
+			DRM_ERROR("Unknown reg space id: %08x\n", reg_id);
+			/* ttm_bo_kunmap(&tmp_kmap); */
+			return -1;
+		}
+	}
+
+	/* ttm_bo_kunmap(&tmp_kmap); */
+
+	return ret;
+}
+
+int32_t tng_topaz_restore_mtx_state(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	uint32_t *mtx_reg_state;
+	int i, need_restore = 0;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+
+	/* struct ttm_bo_kmap_obj tmp_kmap; */
+	/* bool is_iomem; */
+	int32_t ret = 0;
+	uint32_t num_pipes;
+	struct psb_video_ctx *video_ctx = topaz_priv->cur_context;
+
+	/*if (!topaz_priv->topaz_mtx_saved)
+		return -1;
+	*/
+
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if ((pos->ctx_type & 0xff) == VAEntrypointEncSlice ||
+			(pos->ctx_type & 0xff) == VAEntrypointEncPicture)
+			need_restore = 1;
+	}
+
+	if (0 == need_restore) {
+		topaz_priv->topaz_mtx_saved = 0;
+		PSB_DEBUG_TOPAZ("topaz: no vec context found. needn't" \
+			" to restore mtx registers.\n");
+		return ret;
+	}
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		PSB_DEBUG_TOPAZ("TOPAZ: needn't to restore context" \
+			" without firmware uploaded\n");
+		return ret;
+	}
+
+	/*TopazSC will be reset, no need to restore context.*/
+	if (topaz_priv->topaz_needs_reset)
+		return ret;
+	/*There is no need to restore context for JPEG encoding*/
+	/*
+	if (TNG_IS_JPEG_ENC(topaz_priv->cur_codec)) {
+		if (tng_topaz_setup_fw(dev, 0, topaz_priv->cur_codec))
+			DRM_ERROR("TOPAZ: Setup JPEG firmware fails!\n");
+		topaz_priv->topaz_mtx_saved = 0;
+		return 0;
+	}
+	*/
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HW_CFG, &num_pipes);
+	num_pipes = num_pipes & MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED;
+
+	/* Clear registers used for Host-FW communications */
+	MULTICORE_WRITE32(MTX_SCRATCHREG_IDLE, 0);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_BOOTSTATUS << 2), 0);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_TOHOST << 2), 0);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_TOMTX  << 2), 0);
+
+	mtx_set_target(dev_priv);
+
+	/* write the topaz reset bits */
+	/*1) Disable MTX by writing one to the MTX_TOFF
+	field of the MTX_ENABLE register*/
+	MTX_WRITE32(MTX_CR_MTX_ENABLE,
+		    MASK_MTX_MTX_TOFF);
+
+	/* 2) Software reset MTX only by writing 0x1
+	then 0x0 to the MULTICORE_SRST register */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, 1);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, 0);
+
+	/* 3) Software reset MTX, cores, and IO by writing 0x7
+	then 0x0 to the MULTICORE_SRST register */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST,
+		F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET) |
+		F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET) |
+		F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET));
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, 0);
+
+	MTX_WRITE32(MTX_CR_MTX_SOFT_RESET, MASK_MTX_MTX_RESET);
+
+	PSB_UDELAY(6);
+
+	MTX_WRITE32(MTX_CR_MTX_SOFT_RESET, 0);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Restore status of context %p(%s)\n",
+			video_ctx, codec_to_string(video_ctx->codec));
+
+	/* restore register */
+	mtx_reg_state = (uint32_t *)(topaz_priv->topaz_mtx_reg_state[0]);
+
+	/* Restore the MMU Control Registers */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(0),
+			  *mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_TILE(0),
+			  *mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_TILE(1),
+			  *mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL2, *mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL1, *mtx_reg_state);
+	mtx_reg_state++;
+
+	/* we do not want to run in secre FW mode so write a place 
+	 * holder to the FIFO that the firmware will know to ignore */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+			  TOPAZHP_NON_SECURE_FW_MARKER);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL0, *mtx_reg_state);
+	mtx_reg_state++;
+
+	/* CARC registers */
+	for (i = 0; i < num_pipes; i++) {
+		TOPAZCORE_WRITE32(i, INTEL_JMCMP_CF_TOTAL,
+			*mtx_reg_state);
+		mtx_reg_state++;
+	}
+
+	for (i = 0; i < num_pipes; i++) {
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING,
+			*mtx_reg_state);
+		mtx_reg_state++;
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING,
+			*mtx_reg_state);
+		mtx_reg_state++;
+	}
+
+	ret = mtx_upload_fw(dev, video_ctx->codec, video_ctx);
+	if (ret) {
+		DRM_ERROR("Failed to upload firmware for codec %s\n",
+			codec_to_string(video_ctx->codec));
+			/* tng_error_dump_reg(dev_priv); */
+		return ret;
+	}
+
+	/* ttm_bo_kunmap(&tmp_kmap); */
+
+	/* Turn on MTX */
+	mtx_start(dev);
+
+	/* Kick the MTX to get things running */
+	mtx_kick(dev);
+
+	/* topaz_priv->topaz_mtx_saved = 0; */
+	PSB_DEBUG_TOPAZ("TOPAZ: Restore MTX status return\n");
+
+	video_ctx->status &= ~MASK_TOPAZ_CONTEXT_SAVED;
+
+#ifdef TOPAZHP_IRQ_ENABLED
+	psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+
+	tng_topaz_enableirq(dev);
+#endif
+	tng_topaz_setvideo(dev, video_ctx);
+
+	ret = tng_restore_bias_table(dev, video_ctx);
+	if (ret) {
+		DRM_ERROR("Failed to restore BIAS table");
+		goto out;
+	}
+out:
+	/* ttm_bo_kunmap(&tmp_kmap); */
+	return ret;
+}
+
+int32_t tng_topaz_restore_mtx_state_b0(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	uint32_t *mtx_reg_state;
+	int i, need_restore = 0;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+
+	/* struct ttm_bo_kmap_obj tmp_kmap; */
+	/* bool is_iomem; */
+	int32_t ret = 0;
+	uint32_t num_pipes;
+	struct psb_video_ctx *video_ctx = topaz_priv->cur_context;
+
+	/*if (!topaz_priv->topaz_mtx_saved)
+		return -1;
+	*/
+
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if ((pos->ctx_type & 0xff) == VAEntrypointEncSlice ||
+			(pos->ctx_type & 0xff) == VAEntrypointEncPicture)
+			need_restore = 1;
+	}
+
+	if (0 == need_restore) {
+		topaz_priv->topaz_mtx_saved = 0;
+		PSB_DEBUG_TOPAZ("topaz: no vec context found. needn't" \
+			" to restore mtx registers.\n");
+		return ret;
+	}
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		PSB_DEBUG_TOPAZ("TOPAZ: needn't to restore context" \
+			" without firmware uploaded\n");
+		return ret;
+	}
+
+	if (topaz_priv->topaz_mtx_reg_state[0] == NULL) {
+		PSB_DEBUG_TOPAZ("TOPAZ: try to restore context" \
+			" without space allocated, return" \
+			" directly without restore\n");
+		ret = -1;
+		return ret;
+	}
+
+	/*TopazSC will be reset, no need to restore context.*/
+	if (topaz_priv->topaz_needs_reset)
+		return ret;
+	/*There is no need to restore context for JPEG encoding*/
+	/*
+	if (TNG_IS_JPEG_ENC(topaz_priv->cur_codec)) {
+		if (tng_topaz_setup_fw(dev, 0, topaz_priv->cur_codec))
+			DRM_ERROR("TOPAZ: Setup JPEG firmware fails!\n");
+		topaz_priv->topaz_mtx_saved = 0;
+		return 0;
+	}
+	*/
+	ret = intel_mid_msgbus_read32(PUNIT_PORT, VEC_SS_PM0);
+	PSB_DEBUG_TOPAZ("Read R(0x%08x) V(0x%08x)\n",
+		VEC_SS_PM0, ret);
+	PSB_DEBUG_TOPAZ("TOPAZ: Restore status of context %p(%s)\n",
+			video_ctx, codec_to_string(video_ctx->codec));
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HW_CFG, &num_pipes);
+	num_pipes = num_pipes & MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Restore status of pipe (%d)\n",
+		num_pipes);
+#ifdef MRFLD_B0_DEBUG
+	/* repeat fw_run's steps */
+	/* clock gating */
+	reg_val = F_ENCODE(1, TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE)|
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE)|
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE) |
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE)|
+		F_ENCODE(1, TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE);
+
+	TOPAZCORE_WRITE32(0, TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING, reg_val);
+#endif
+
+	/* restore register */
+	mtx_reg_state = (uint32_t *)(topaz_priv->topaz_mtx_reg_state[0]);
+
+	/* Restore the MMU Control Registers */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(0),
+		*mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: MMU_DIR_LIST_BASE(0) == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_TILE(0), *mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: CR_MMU_TILE(0) == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_TILE(1), *mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: CR_MMU_TILE(1) == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL2, *mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: CR_MMU_CONTROL2 == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+
+	mtx_reg_state++;
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL1, *mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: CR_MMU_CONTROL1 == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+	mtx_reg_state++;
+
+	/* we do not want to run in secre FW mode so write a place 
+	 * holder to the FIFO that the firmware will know to ignore */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		*mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: CMD_FIFO_WRITE == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+#ifdef MRFLD_B0_DEBUG
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		TOPAZHP_NON_SECURE_FW_MARKER);
+	PSB_DEBUG_TOPAZ("TOPAZ Restore: CMD_FIFO_WRITE == 0x%08x\n",
+		*mtx_reg_state);
+#endif
+
+	/* MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL0, *mtx_reg_state); */
+	mtx_reg_state++;
+
+	/* CARC registers */
+	for (i = 0; i < num_pipes; i++) {
+		TOPAZCORE_WRITE32(i, INTEL_JMCMP_CF_TOTAL, *mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+		PSB_DEBUG_TOPAZ("TOPAZ Restore: JMCMP_CF_TOTAL == 0x%08x\n",
+			*mtx_reg_state);
+#endif
+		mtx_reg_state++;
+	}
+
+	for (i = 0; i < num_pipes; i++) {
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING,
+			*mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+		PSB_DEBUG_TOPAZ("TOPAZ Restore: AUTO_CLOCK_GATING == 0x%08x\n",
+			*mtx_reg_state);
+#endif
+		mtx_reg_state++;
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING,
+			*mtx_reg_state);
+#ifdef MRFLD_B0_DEBUG
+		PSB_DEBUG_TOPAZ("TOPAZ Restore: MAN_CLOCK_GATING == 0x%08x\n",
+			*mtx_reg_state);
+#endif
+		mtx_reg_state++;
+	}
+
+	/* ttm_bo_kunmap(&tmp_kmap); */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, 0);
+	/* Turn on MTX */
+	mtx_start(dev);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, 0);
+	/* Kick the MTX to get things running */
+	mtx_kick(dev);
+	ret = tng_topaz_wait_for_register(
+		dev_priv, CHECKFUNC_ISEQUAL,
+		TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_BOOTSTATUS<<2),
+		TOPAZHP_FW_BOOT_SIGNAL, 0xffffffff);
+	if (ret) {
+		DRM_ERROR("Restore Firmware failed\n");
+		return ret;
+	}
+
+	tng_topaz_mmu_flushcache(dev_priv);
+
+#ifdef TOPAZHP_IRQ_ENABLED
+	psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+
+	tng_topaz_enableirq(dev);
+#endif
+
+	ret = tng_restore_bias_table(dev, video_ctx);
+	if (ret) {
+		DRM_ERROR("Failed to restore BIAS table");
+		goto out;
+	}
+
+	tng_topaz_setvideo(dev, video_ctx);
+
+	video_ctx->status &= ~MASK_TOPAZ_CONTEXT_SAVED;
+	/* topaz_priv->topaz_mtx_saved = 0; */
+	PSB_DEBUG_TOPAZ("TOPAZ: Restore MTX status return\n");
+out:
+	/* ttm_bo_kunmap(&tmp_kmap); */
+	return ret;
+}
+
+
+static int  tng_poll_hw_inactive(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int32_t ret = 0;
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		MTX_SCRATCHREG_IDLE,
+		F_ENCODE(FW_IDLE_STATUS_IDLE, FW_IDLE_REG_STATUS),
+		MASK_FW_IDLE_REG_STATUS);
+	if (ret)
+		DRM_ERROR("Wait for register timeout");
+
+	return ret;
+}
+
+static int mtx_wait_for_completion(struct drm_psb_private *dev_priv)
+{
+	int32_t ret;
+
+	mtx_set_target(dev_priv);
+
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		REG_OFFSET_TOPAZ_MTX + MTX_CR_MTX_ENABLE,
+		MASK_MTX_MTX_TOFF,
+		MASK_MTX_MTX_TOFF | MASK_MTX_MTX_ENABLE);
+	if (ret)
+		DRM_ERROR("TOPAZ: Wait for MTX completion time out\n");
+
+	return ret;
+}
+
+int tng_topaz_save_mtx_state(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	uint32_t *mtx_reg_state;
+	int i, need_save = 0;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *pos, *n;
+	/* struct ttm_bo_kmap_obj tmp_kmap; */
+	struct psb_video_ctx *video_ctx;
+	/* bool is_iomem; */
+	uint32_t num_pipes;
+	int32_t ret = 0;
+	unsigned long irq_flags;
+
+	PSB_DEBUG_TOPAZ("tng_topaz_save_mtx_state: start\n");
+
+#ifdef TOPAZHP_IRQ_ENABLED
+	spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
+	spin_lock(&topaz_priv->ctx_spinlock);
+
+	/* In case the context has been removed in
+	 * tng_topaz_remove_ctx()
+	*/
+	video_ctx = topaz_priv->irq_context;
+	if (!video_ctx || !video_ctx->wb_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Context %p has "\
+				"been released, bypass saving context\n",
+			video_ctx);
+		goto out;
+	}
+#else
+	if (topaz_priv->cur_context) {
+		video_ctx = topaz_priv->cur_context;
+	} else {
+		DRM_ERROR("Invalid context\n");
+		return -1;
+	}
+#endif
+	/* topaz_priv->topaz_mtx_saved = 0; */
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if ((pos->ctx_type & 0xff) == VAEntrypointEncSlice ||
+			(pos->ctx_type & 0xff) == VAEntrypointEncPicture)
+			need_save = 1;
+	}
+
+	if (0 == need_save) {
+		PSB_DEBUG_TOPAZ("TOPAZ: vec context not found. No need" \
+			" to save mtx registers.\n");
+		goto out;
+	}
+
+	/*TopazSC will be reset, no need to save context.*/
+	if (topaz_priv->topaz_needs_reset)
+		goto out;
+
+	if (topaz_priv->topaz_fw_loaded == 0) {
+		PSB_DEBUG_TOPAZ("TOPAZ: No need to restore context since" \
+			" firmware has not been uploaded.\n");
+		ret = -1;
+		goto out;
+	}
+
+	/*JPEG encoding needn't to save context*/
+	if (TNG_IS_JPEG_ENC(topaz_priv->cur_codec)) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Bypass context saving for JPEG\n");
+		topaz_priv->topaz_mtx_saved = 1;
+		goto out;
+	}
+
+	tng_topaz_mmu_flushcache(dev_priv);
+
+	tng_topaz_getvideo(dev, video_ctx);
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HW_CFG, &num_pipes);
+	num_pipes = num_pipes & MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED;
+
+	mtx_set_target(dev_priv);
+
+	ret = tng_poll_hw_inactive(dev);
+	if (ret)
+		goto out;
+
+	/* Turn off MTX */
+	mtx_stop(dev_priv);
+	ret = mtx_wait_for_completion(dev_priv);
+	if (ret) {
+		DRM_ERROR("Mtx wait for completion error");
+		goto out;
+	}
+
+	mtx_reg_state = (uint32_t *)(topaz_priv->topaz_mtx_reg_state[0]);
+
+	/* Save the MMU Control Registers */
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(0), mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_TILE(0), mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_TILE(1), mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_CONTROL2, mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_CONTROL1, mtx_reg_state);
+	mtx_reg_state++;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_CONTROL0, mtx_reg_state);
+	mtx_reg_state++;
+
+	/* CARC registers */
+	for (i = 0; i < num_pipes; i++) {
+		TOPAZCORE_READ32(i, INTEL_JMCMP_CF_TOTAL, mtx_reg_state);
+		mtx_reg_state++;
+	}
+
+	for (i = 0; i < num_pipes; i++) {
+		TOPAZCORE_READ32(i, TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING,
+			mtx_reg_state);
+		mtx_reg_state++;
+		TOPAZCORE_READ32(i, TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING,
+			mtx_reg_state);
+		mtx_reg_state++;
+	}
+
+	video_ctx->status |= MASK_TOPAZ_CONTEXT_SAVED;
+
+out:
+#ifdef TOPAZHP_IRQ_ENABLED
+	spin_unlock(&topaz_priv->ctx_spinlock);
+	spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
+#endif
+	/* topaz_priv->topaz_mtx_saved = 1; */
+	PSB_DEBUG_TOPAZ("TOPAZ: Save MTX status return\n");
+	/* ttm_bo_kunmap(&tmp_kmap); */
+	return ret;
+}
+
+struct file *tng_get_context_fp(
+	struct drm_psb_private *dev_priv,
+	struct drm_file *file_priv)
+{
+	struct file *current_context = NULL;
+	struct psb_video_ctx *pos, *n;
+
+	list_for_each_entry_safe(pos, n, &dev_priv->video_ctx, head) {
+		if (pos->filp == file_priv->filp)
+			current_context = pos->filp;
+	}
+
+	return current_context;
+}
+
+static int tng_save_bias_table(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	const void *cmd)
+{
+	/* bool is_iomem; */
+	uint32_t *reg_saving_ptr;
+	uint32_t size;
+	/* struct ttm_bo_kmap_obj tmp_kmap; */
+	uint32_t *p_command;
+	struct psb_video_ctx *video_ctx;
+	int32_t ret = 0;
+
+	p_command = (uint32_t *)cmd;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		ret = -1;
+		goto out;
+	}
+
+	reg_saving_ptr = video_ctx->bias_reg;
+
+	p_command++;
+	/* Register count */
+	size = *reg_saving_ptr = *p_command;
+	PSB_DEBUG_TOPAZ("TOPAZ: Save BIAS table %d registers " \
+			"for context %p\n", size, video_ctx);
+
+	p_command++;
+	reg_saving_ptr++;
+
+	memcpy(reg_saving_ptr, p_command, size * 3 * 4);
+
+	/* ttm_bo_kunmap(&tmp_kmap); */
+
+out:
+	return ret;
+}
+
+#if 0
+/*
+ * Check contxt status and assign ID for new context
+ * Return -1 on error
+ */
+static int tng_check_context_status(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint32_t reg_handle,
+	uint32_t data_handle,
+	uint32_t codec,
+	uint32_t *buf_idx)
+{
+	return 0;
+}
+
+/*
+ * If current context issued MTX_CMDID_SHUTDOWN command, mark the ctx_status,
+ * clean related reg/data BO, write back BO. Return -1 on the last context.
+ */
+static int32_t tng_release_context(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint32_t cur_codec)
+{
+	struct psb_video_ctx *video_ctx;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		return -1;
+	}
+
+	if (cur_codec != IMG_CODEC_JPEG) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Free bias saving memory\n");
+		if (video_ctx->bias_reg) {
+			kfree(video_ctx->bias_reg);
+			video_ctx->bias_reg = NULL;
+		}
+
+		video_ctx->status |= MASK_TOPAZ_FIRMWARE_EXIT;
+	} else {
+		PSB_DEBUG_TOPAZ("TOPAZ: JPEG bypass unmap " \
+				"reg/data saving BO\n");
+	}
+
+	return 0;
+}
+#endif
+
+int tng_topaz_kick_null_cmd(struct drm_device *dev,
+			    uint32_t sync_seq)
+{
+	uint32_t cur_free_space;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+
+#ifdef TOPAZHP_SERIALIZED
+	uint32_t serializeToken;
+	serializeToken = tng_serialize_enter(dev);
+#endif
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE,
+			 &cur_free_space);
+
+	cur_free_space = F_DECODE(cur_free_space,
+			TOPAZHP_TOP_CR_CMD_FIFO_SPACE);
+
+	while (cur_free_space < 4) {
+		POLL_TOPAZ_FREE_FIFO_SPACE(4, 100, 10000, &cur_free_space);
+		if (ret) {
+			DRM_ERROR("TOPAZ : error ret %d\n", ret);
+			return ret;
+		}
+
+		MULTICORE_READ32(
+			TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE,
+			&cur_free_space);
+
+		cur_free_space = F_DECODE(cur_free_space,
+				TOPAZHP_TOP_CR_CMD_FIFO_SPACE);
+	}
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		MTX_CMDID_NULL | MTX_CMDID_WB_INTERRUPT);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		0);
+
+	/* Write back address is always 0 */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		0);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		sync_seq);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Write to command FIFO:\n");
+	PSB_DEBUG_TOPAZ("%08x, %08x, %08x, %08x\n",
+		MTX_CMDID_NULL, 0, 0, 0);
+
+	/* Notify ISR which context trigger interrupt */
+#ifdef TOPAZHP_IRQ_ENABLED
+	topaz_priv->irq_context = topaz_priv->cur_context;
+#endif
+	mtx_kick(dev);
+
+#ifdef TOPAZHP_SERIALIZED
+	tng_serialize_exit(dev, serializeToken);
+#endif
+
+	return ret;
+}
+
+int mtx_write_FIFO(
+	struct drm_device *dev,
+	struct tng_topaz_cmd_header *cmd_header,
+	uint32_t param,
+	uint32_t param_addr,
+	uint32_t sync_seq)
+{
+	uint32_t cur_free_space;
+	uint32_t wb_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+	uint32_t cmdword = 0;
+	struct psb_video_ctx *video_ctx = NULL;
+
+	if (topaz_priv->cur_context) {
+		video_ctx = topaz_priv->cur_context;
+	} else {
+		DRM_ERROR("Invalid video context\n");
+		return -1;
+	}
+
+#ifdef TOPAZHP_SERIALIZED
+	uint32_t serializeToken;
+	serializeToken = tng_serialize_enter(dev);
+#endif
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE,
+			 &cur_free_space);
+
+	cur_free_space = F_DECODE(cur_free_space,
+			TOPAZHP_TOP_CR_CMD_FIFO_SPACE);
+
+	while (cur_free_space < 4) {
+		POLL_TOPAZ_FREE_FIFO_SPACE(4, 100, 10000, &cur_free_space);
+		if (ret) {
+			DRM_ERROR("TOPAZ : error ret %d\n", ret);
+			return ret;
+		}
+
+		MULTICORE_READ32(
+			TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE,
+			&cur_free_space);
+
+		cur_free_space = F_DECODE(cur_free_space,
+				TOPAZHP_TOP_CR_CMD_FIFO_SPACE);
+	}
+
+	cmdword = F_ENCODE(0, MTX_MSG_CORE) | cmd_header->id;
+
+	if (cmd_header->id & MTX_CMDID_PRIORITY) {
+		video_ctx->high_cmd_count++;
+		cmdword |= F_ENCODE(1, MTX_MSG_PRIORITY) |
+			   F_ENCODE(((video_ctx->low_cmd_count - 1) & 0xff) |
+			   (video_ctx->high_cmd_count << 8), MTX_MSG_COUNT);
+
+	} else {
+		cmdword |= F_ENCODE(video_ctx->low_cmd_count & 0xff, MTX_MSG_COUNT);
+	}
+
+	cmd_header->val = cmdword;
+
+	/* Trigger interrupt on MTX_CMDID_ENCODE_FRAME cmd */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+				  cmd_header->val);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+			  param);
+
+	/* Write back address is always 0 */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+			  param_addr);
+
+	if (cmd_header->id & MTX_CMDID_PRIORITY) {
+		/* prepare Writeback value */
+		wb_val = video_ctx->high_cmd_count << 24;
+	} else {
+		wb_val = video_ctx->low_cmd_count << 16;
+		video_ctx->low_cmd_count++;
+	}
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+			  wb_val);
+	PSB_DEBUG_TOPAZ("TOPAZ: Write to command FIFO: " \
+		"%08x, %08x, %08x, %08x\n",
+		cmd_header->val, param, param_addr, wb_val);
+
+	/* Notify ISR which context trigger interrupt */
+#ifdef TOPAZHP_IRQ_ENABLED
+	topaz_priv->irq_context = topaz_priv->cur_context;
+#endif
+	mtx_kick(dev);
+
+#ifdef TOPAZHP_SERIALIZED
+	tng_serialize_exit(dev, serializeToken);
+#endif
+
+	return ret;
+}
+
+static void tng_topaz_getvideo(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx)
+{
+	int ret;
+	struct tng_topaz_cmd_header cmd_header;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Issue MTX_CMDID_GETVIDEO command");
+	PSB_DEBUG_TOPAZ("TOPAZ: to save context\n");
+	cmd_header.id = MTX_CMDID_GETVIDEO;
+
+	ret = mtx_write_FIFO(dev, &cmd_header,
+		video_ctx->enc_ctx_param,
+		video_ctx->enc_ctx_addr, 0);
+	if (ret) {
+		DRM_ERROR("Failed to write command to FIFO");
+		goto out;
+	}
+
+	if ((video_ctx->codec == IMG_CODEC_H263_VBR) ||
+		(video_ctx->codec == IMG_CODEC_H263_CBR))
+		tng_wait_on_sync(dev, 0, cmd_header.id);
+out:
+    return;
+}
+
+static void tng_topaz_setvideo(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx)
+{
+	int ret;
+	struct tng_topaz_cmd_header cmd_header;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Issue MTX_CMDID_SETVIDEO command\n");
+	PSB_DEBUG_TOPAZ("TOPAZ: to resotre context\n");
+	cmd_header.id = MTX_CMDID_SETVIDEO;
+
+	ret = mtx_write_FIFO(dev, &cmd_header,
+			     video_ctx->enc_ctx_param,
+			     video_ctx->enc_ctx_addr, 0);
+	if (ret) {
+		DRM_ERROR("Failed to write command to FIFO");
+		goto out;
+	}
+
+	if ((video_ctx->codec == IMG_CODEC_H263_VBR) ||
+		(video_ctx->codec == IMG_CODEC_H263_CBR))
+		tng_wait_on_sync(dev, 0, cmd_header.id);
+out:
+    return;
+}
+
+static inline void tng_topaz_trace_ctx(
+	char *words,
+	struct psb_video_ctx *trace_ctx)
+{
+	if (words != NULL)
+		PSB_DEBUG_TOPAZ("TOPAZ: %s:\n", words);
+#ifdef MRFLD_B0_DEBUG
+	if (trace_ctx != NULL)
+		PSB_DEBUG_TOPAZ("%08x(%s), status %08x\n",
+		trace_ctx, codec_to_string(trace_ctx->codec),
+		trace_ctx->status);
+#endif
+	return ;
+}
+
+int tng_topaz_getvideo_setvideo(
+	struct drm_device *dev,
+	struct tng_topaz_private *topaz_priv,
+	struct psb_video_ctx *old_video_ctx,
+	struct psb_video_ctx *new_video_ctx,
+	uint32_t codec)
+{
+	struct tng_topaz_cmd_header cmd_header;
+	int ret = 1;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Issue MTX_CMDID_GETVIDEO command");
+	cmd_header.id = MTX_CMDID_GETVIDEO;
+
+	ret = mtx_write_FIFO(dev, &cmd_header,
+		old_video_ctx->enc_ctx_param,
+		old_video_ctx->enc_ctx_addr, 0);
+	if (ret) {
+		DRM_ERROR("Failed to write command to FIFO");
+		goto out;
+	}
+
+	/*tng_wait_on_sync(dev, 0, cmd_header.id);*/
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Issue MTX_CMDID_SETVIDEO command\n");
+	cmd_header.id = MTX_CMDID_SETVIDEO;
+
+	ret = mtx_write_FIFO(dev, &cmd_header,
+		new_video_ctx->enc_ctx_param,
+		new_video_ctx->enc_ctx_addr, 0);
+	if (ret) {
+		DRM_ERROR("Failed to write command to FIFO");
+		goto out;
+	}
+
+	/*tng_wait_on_sync(dev, 0, cmd_header.id);*/
+out:
+	return ret;
+}
+
+static int tng_context_switch_secure(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint32_t codec,
+	uint32_t is_first_frame)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *video_ctx;
+	int32_t ret = 0;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Frame (%d)\n", video_ctx->frame_count);
+
+	if (TNG_IS_H264_ENC(codec)) {
+		codec = IMG_CODEC_H264_ALL_RC;
+		PSB_DEBUG_TOPAZ("TOPAZ: use RC_ALL for all H264 Codec\n");
+		PSB_DEBUG_TOPAZ("TOPAZ: %s to %s\n",
+				codec_to_string(codec),
+				codec_to_string(video_ctx->codec));
+	}
+
+	PSB_DEBUG_TOPAZ("Incoming context is %p (%s, %08x)\n",
+			video_ctx,
+			codec_to_string(video_ctx->codec),
+			video_ctx->status);
+
+	/* Handle JPEG burst mode, save current context only if it's not JPEG */
+	if (codec == IMG_CODEC_JPEG) {
+		if (topaz_priv->cur_context &&
+		    topaz_priv->cur_context->codec != IMG_CODEC_JPEG &&
+		    !(topaz_priv->cur_context->status & MASK_TOPAZ_CONTEXT_SAVED)) {
+
+			ret = tng_topaz_save_mtx_state(dev);
+			if (ret) {
+				DRM_ERROR("Failed to save mtx status");
+				return ret;
+			}
+		}
+		topaz_priv->cur_context = video_ctx;
+		topaz_priv->cur_codec = codec;
+		return ret;
+	}
+
+	/* Continue doing other commands */
+	if (is_first_frame) {
+		PSB_DEBUG_TOPAZ("TOPAZ: First frame of ctx " \
+				"%p(%s, %08x), continue\n",
+				video_ctx, codec_to_string(codec),
+				video_ctx->status);
+		topaz_priv->cur_context = video_ctx;
+		topaz_priv->cur_codec = codec;
+		return ret;
+	}
+
+	if (drm_topaz_pmpolicy == PSB_PMPOLICY_FORCE_PM) {
+		ret = tng_topaz_power_off(dev);
+		if (ret) {
+			DRM_ERROR("TOPAZ: Failed to power off");
+			return ret;
+		}
+	}
+
+	if (is_island_on(OSPM_VIDEO_ENC_ISLAND)
+		&& (topaz_priv->cur_context != video_ctx)) {
+		if (TNG_IS_H264_ENC(topaz_priv->cur_context->codec)
+			&& TNG_IS_H264_ENC(video_ctx->codec)) {
+			PSB_DEBUG_TOPAZ("ctx switch without power up/off\n");
+			ret = tng_topaz_getvideo_setvideo(
+				dev, topaz_priv,
+				topaz_priv->cur_context,
+				video_ctx, codec);
+			if (ret) {
+				DRM_ERROR("Failed to context switch");
+				return ret;
+			}
+			/* Context switch */
+			topaz_priv->cur_context = video_ctx;
+			topaz_priv->cur_codec = codec;
+		} else {
+			ret = tng_topaz_power_off(dev);
+			if (ret) {
+				DRM_ERROR("TOPAZ: Failed to power off");
+				return ret;
+			}
+		}
+	}
+
+	if (!is_island_on(OSPM_VIDEO_ENC_ISLAND)) {
+		tng_topaz_power_up(dev, codec);
+		if (ret) {
+			DRM_ERROR("TOPAZ: Failed to power up");
+			return ret;
+		}
+
+		PSB_DEBUG_TOPAZ("Restore context %p(%s, %08x)",
+				video_ctx,
+				codec_to_string(video_ctx->codec),
+				video_ctx->status);
+
+		/* Context switch */
+		topaz_priv->cur_context = video_ctx;
+		topaz_priv->cur_codec = codec;
+		ret = tng_topaz_restore_mtx_state_b0(dev);
+		if (ret) {
+			DRM_ERROR("Failed to restore mtx status");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int tng_context_switch(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint32_t codec,
+	uint32_t is_first_frame)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *video_ctx;
+	int32_t ret = 0;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Frame (%d)\n", video_ctx->frame_count);
+	PSB_DEBUG_TOPAZ("Incoming context is %p(%s, %08x)\n",
+			video_ctx,
+			codec_to_string(video_ctx->codec),
+			video_ctx->status);
+
+	/* Handle JPEG burst mode, save current context only if it's not JPEG */
+	if (codec == IMG_CODEC_JPEG) {
+		if (topaz_priv->cur_context &&
+		    topaz_priv->cur_context->codec != IMG_CODEC_JPEG &&
+		    !(topaz_priv->cur_context->
+		      status &MASK_TOPAZ_CONTEXT_SAVED)) {
+
+			ret = tng_topaz_save_mtx_state(dev);
+			if (ret) {
+				DRM_ERROR("Failed to save mtx status");
+				return ret;
+			}
+		}
+		topaz_priv->cur_context = video_ctx;
+		topaz_priv->cur_codec = codec;
+		return ret;
+	}
+
+	/* Continue doing other commands */
+	if (is_first_frame) {
+		PSB_DEBUG_TOPAZ("First frame of ctx %p(%s, %08x), continue\n",
+				video_ctx,
+				codec_to_string(codec),
+				video_ctx->status);
+		topaz_priv->cur_context = video_ctx;
+		topaz_priv->cur_codec = codec;
+		return ret;
+	}
+
+	if (topaz_priv->cur_context == video_ctx) {
+		if (drm_topaz_pmpolicy == PSB_PMPOLICY_FORCE_PM) {
+			ret = tng_topaz_power_off(dev);
+			if (ret) {
+				DRM_ERROR("TOPAZ: Failed to power off");
+				return ret;
+			}
+		}
+		if (video_ctx->status & MASK_TOPAZ_CONTEXT_SAVED) {
+			PSB_DEBUG_TOPAZ("Same comtext %p(%s, %08x) " \
+					"and already saved\n",
+					video_ctx,
+					codec_to_string(video_ctx->codec),
+					video_ctx->status);
+
+			if (Is_Secure_Fw()) {
+				ret = tng_topaz_power_up(dev, codec);
+				if (ret) {
+					DRM_ERROR("TOPAZ: Failed to power up");
+					return ret;
+				}
+			}
+
+			/* Context switch */
+			topaz_priv->cur_context = video_ctx;
+			topaz_priv->cur_codec = codec;
+			if (Is_Secure_Fw()) {
+				ret = tng_topaz_restore_mtx_state_b0(dev);
+				if (ret) {
+					DRM_ERROR("Failed to restore mtx B0");
+					return ret;
+				}
+			} else {
+				ret = tng_topaz_restore_mtx_state(dev);
+				if (ret) {
+					DRM_ERROR("Failed to restore mtx");
+					return ret;
+				}
+			}
+		} else {
+			PSB_DEBUG_TOPAZ("Same context %p(%s, %08x) " \
+					"but not saved, continue\n",
+					video_ctx,
+					codec_to_string(video_ctx->codec),
+					video_ctx->status);
+			topaz_priv->cur_context = video_ctx;
+			topaz_priv->cur_codec = codec;
+			return ret;
+		}
+	} else {
+		/* Current context already saved */
+		if (topaz_priv->cur_context->status & \
+		    MASK_TOPAZ_CONTEXT_SAVED) {
+			PSB_DEBUG_TOPAZ("Different context and current context"\
+					" %p(%s, %08x) already saved, "\
+					"continue\n",
+					topaz_priv->cur_context,
+					codec_to_string(topaz_priv->
+							cur_context->
+							codec),
+					topaz_priv->cur_context->status);
+		} else {
+			/* Save current context */
+			PSB_DEBUG_TOPAZ("Different context and current context"\
+					" %p(%s, %08x) not saved,"\
+					" save it first",
+					topaz_priv->cur_context,
+					codec_to_string(topaz_priv->
+							cur_context->codec),
+					topaz_priv->cur_context->status);
+			if (Is_Secure_Fw()) {
+				ret = tng_topaz_power_off(dev);
+				if (ret) {
+					DRM_ERROR("TOPAZ: Failed to power off");
+					return ret;
+				}
+			} else {
+				if (topaz_priv->cur_context->codec !=
+				    IMG_CODEC_JPEG) {
+					ret = tng_topaz_save_mtx_state(dev);
+					if (ret) {
+						DRM_ERROR("Failed to save "\
+							  "mtx status");
+						return ret;
+					}
+				} else
+					PSB_DEBUG_TOPAZ("TOPAZ: Bypass saving "\
+							"JPEG context\n");
+			}
+		}
+
+		if (Is_Secure_Fw()) {
+			tng_topaz_power_up(dev, codec);
+			if (ret) {
+				DRM_ERROR("TOPAZ: Failed to power up");
+				return ret;
+			}
+		}
+
+		PSB_DEBUG_TOPAZ("Restore context %p(%s, %08x)",
+				video_ctx,
+				codec_to_string(video_ctx->codec),
+				video_ctx->status);
+
+		/* Context switch */
+		topaz_priv->cur_context = video_ctx;
+		topaz_priv->cur_codec = codec;
+		if (Is_Secure_Fw()) {
+			ret = tng_topaz_restore_mtx_state_b0(dev);
+			if (ret) {
+				DRM_ERROR("Failed to restore mtx status");
+				return ret;
+			}
+		} else {
+			if (topaz_priv->cur_context->codec != IMG_CODEC_JPEG) {
+				ret = tng_topaz_restore_mtx_state(dev);
+				if (ret) {
+					DRM_ERROR("Failed to restore "\
+						  "mtx status");
+					return ret;
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+static uint16_t tng__rand(struct psb_video_ctx * video_ctx)
+{
+    uint16_t ret = 0;
+    video_ctx->pseudo_rand_seed =  (uint32_t)((video_ctx->pseudo_rand_seed * 1103515245 + 12345) & 0xffffffff); //Using mask, just in case
+    ret = (uint16_t)(video_ctx->pseudo_rand_seed / 65536) % 32768;
+    return ret;
+}
+
+static int32_t tng_fill_input_control (
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	int8_t slot_num,
+	int16_t ir,
+	int8_t init_qp,
+	int8_t min_qp,
+	int32_t buf_size,
+	uint32_t pseudo_rand_seed)
+{
+	uint16_t default_param;
+	uint16_t intra_param;
+	bool refresh = false;
+	uint32_t cur_index;
+	uint32_t mb_x, mb_y;
+	uint32_t mb_w, mb_h;
+	uint16_t *p_input_buf;
+	int8_t qp;
+	int8_t max_qp = 31;
+	uint16_t mb_param;
+	int32_t ret;
+	bool is_iomem;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *video_ctx;
+
+	mb_w = topaz_priv->frame_w / 16;
+	mb_h = topaz_priv->frame_h / 16;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		return -1;
+	}
+	video_ctx->pseudo_rand_seed = pseudo_rand_seed;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Fill input control, cir=%d, initqp=%d, " \
+			"minqp=%d, slot=%d, bufsize=%d, pseudo=%d, " \
+			"mb_w=%d, mb_h=%d\n",
+			ir, init_qp, min_qp, slot_num, buf_size,
+			pseudo_rand_seed, mb_w, mb_h);
+
+	ret = ttm_bo_reserve(video_ctx->cir_input_ctrl_bo,
+			     true, true, false, 0);
+	if (ret) {
+		DRM_ERROR("TOPAZ: reserver failed.\n");
+		return -1;
+	}
+
+	ret = ttm_bo_kmap(video_ctx->cir_input_ctrl_bo, 0,
+			video_ctx->cir_input_ctrl_bo->num_pages,
+			&video_ctx->cir_input_ctrl_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: Failed to map cir input ctrl bo\n");
+		ttm_bo_unref(&video_ctx->cir_input_ctrl_bo);
+		return -1;
+	}
+
+	video_ctx->cir_input_ctrl_addr =
+		(uint32_t *)(ttm_kmap_obj_virtual(
+				     &video_ctx->
+				     cir_input_ctrl_kmap,
+				     &is_iomem) + slot_num * buf_size);
+
+	if (ir > 0) {
+		refresh = true;
+	}
+
+	p_input_buf = (uint16_t *)video_ctx->cir_input_ctrl_addr;
+
+	cur_index = 0;
+
+	for(mb_y = 0; mb_y < (uint32_t)(mb_h); mb_y++) {
+		for(mb_x = 0; mb_x < mb_w; mb_x++) {
+			mb_param = 0;
+
+			qp = init_qp + ((tng__rand(video_ctx) % 6)-3);
+			qp = tng__max(tng__min(qp, max_qp), min_qp);
+
+			default_param = (qp << 10) | (3 << 7) | (3 << 4);
+			intra_param = (qp << 10) | (0 << 7) | (0 << 4);
+
+			mb_param = default_param;
+			if (refresh) {
+				if ((int32_t)cur_index >
+				    video_ctx-> last_cir_index) {
+					video_ctx->last_cir_index = cur_index;
+					mb_param = intra_param;
+					ir--;
+					if(ir <= 0)
+						refresh = false;
+				}
+			}
+			p_input_buf[cur_index++] = mb_param;
+		}
+	}
+
+	if (refresh) {
+		video_ctx->last_cir_index = -1;
+		while (ir) {
+			qp = init_qp + ((tng__rand(video_ctx) % 6) - 3);
+			qp = tng__max(tng__min(qp, max_qp), min_qp);
+			intra_param = (qp << 10) |(0 << 7) |(0 << 4);
+			p_input_buf[++video_ctx->last_cir_index] = intra_param;
+			ir--;
+		}
+	}
+
+	ttm_bo_unreserve(video_ctx->cir_input_ctrl_bo);
+	ttm_bo_kunmap(&video_ctx->cir_input_ctrl_kmap);
+
+	return 0;
+}
+
+static int32_t tng_update_air_send (
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint8_t slot_num,
+	int16_t skip_count,
+	int32_t air_per_frame,
+	uint32_t buf_size,
+	uint32_t frame_count)
+{
+	int32_t ret;
+	bool is_iomem;
+	uint16_t ui16IntraParam;
+	uint32_t ui32CurrentCnt, ui32SentCnt;
+	uint32_t ui32MBMaxSize;
+	uint16_t *pui16MBParam;
+	uint32_t ui32NewScanPos;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *video_ctx;
+
+	ui16IntraParam = (0 << 7) | (0 << 4);
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp\n");
+		return -1;
+	}
+
+	if (skip_count >= 0)
+		video_ctx->air_info.air_skip_cnt = skip_count;
+	else /* Pseudorandom skip */
+		video_ctx->air_info.air_skip_cnt = (frame_count & 0x7) + 1;
+
+	if (frame_count < 1) {
+		video_ctx->air_info.air_per_frame = air_per_frame;
+		return 0;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Update air send, slot_num=%d, skip_count=%d, "\
+			"air_per_frame=%d, buf_size=%d, frame_count=%d\n", \
+			slot_num, video_ctx->air_info.air_skip_cnt,
+			video_ctx->air_info.air_per_frame,
+			buf_size, frame_count);
+
+	ret = ttm_bo_reserve(video_ctx->cir_input_ctrl_bo,
+			     true, true, false, 0);
+	if (ret) {
+		DRM_ERROR("TOPAZ: reserver failed.\n");
+		return -1;
+	}
+
+	ret = ttm_bo_kmap(video_ctx->cir_input_ctrl_bo, 0,
+			video_ctx->cir_input_ctrl_bo->num_pages,
+			&video_ctx->cir_input_ctrl_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: Failed to map cir input ctrl bo\n");
+		ttm_bo_unref(&video_ctx->cir_input_ctrl_bo);
+		return -1;
+	}
+
+	video_ctx->cir_input_ctrl_addr =
+		(uint32_t *)(ttm_kmap_obj_virtual(
+				    &video_ctx->cir_input_ctrl_kmap,
+				    &is_iomem) + slot_num * buf_size);
+
+	/* get the buffer */
+	pui16MBParam = (uint16_t *) video_ctx->cir_input_ctrl_addr;
+
+	/* fill data */
+	ui32MBMaxSize = (uint32_t)(topaz_priv->frame_w / 16) *
+		(IMG_UINT32)(topaz_priv->frame_h / 16);
+
+	ui32CurrentCnt = 0;
+	ui32NewScanPos = (uint32_t) (video_ctx->air_info.air_scan_pos +
+				     video_ctx->air_info.air_skip_cnt) %
+		ui32MBMaxSize;
+	ui32CurrentCnt = ui32SentCnt = 0;
+
+	while (ui32CurrentCnt < ui32MBMaxSize &&
+		((video_ctx->air_info.air_per_frame == 0) ||
+		ui32SentCnt < (uint32_t) video_ctx->air_info.air_per_frame)) {
+
+		uint16_t ui16MBParam;
+
+		if (video_ctx->air_info.air_table[ui32NewScanPos] >= 0) {
+			// Mark the entry as 'touched'
+			video_ctx->air_info.air_table[ui32NewScanPos] =
+				-1 - video_ctx->
+				air_info.air_table[ui32NewScanPos];
+
+			if (video_ctx->air_info.air_table[ui32NewScanPos] <
+			    -1) {
+				ui16MBParam = pui16MBParam[ui32NewScanPos] &
+					(0xFF << 10);
+				ui16MBParam |= ui16IntraParam;
+				pui16MBParam[ui32NewScanPos] = ui16MBParam;
+				video_ctx->air_info.air_table[ui32NewScanPos]++;
+				ui32NewScanPos +=
+					video_ctx->air_info.air_skip_cnt;
+				ui32SentCnt++;
+			}
+			ui32CurrentCnt++;
+		}
+
+		ui32NewScanPos++;
+		ui32NewScanPos = ui32NewScanPos % ui32MBMaxSize;
+		if (ui32NewScanPos == video_ctx->air_info.air_scan_pos) {
+			/* we have looped around */
+			break;
+		}
+	}
+
+	video_ctx->air_info.air_scan_pos = ui32NewScanPos;
+
+	ttm_bo_unreserve(video_ctx->cir_input_ctrl_bo);
+	ttm_bo_kunmap(&video_ctx->cir_input_ctrl_kmap);
+	return 0;
+}
+
+static int32_t tng_air_buf_clear(
+	struct drm_device *dev,
+	struct drm_file *file_priv)
+{
+	struct psb_video_ctx *video_ctx;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp\n");
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Clear AIR buffer\n");
+	memset(video_ctx->air_info.air_table, 0,
+	       (topaz_priv->frame_h * topaz_priv->frame_w) >> 8);
+
+	return 0;
+}
+
+static int32_t tng_update_air_calc(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint8_t slot_num,
+	uint32_t buf_size,
+	int32_t sad_threshold,
+	uint8_t enable_sel_states_flag)
+{
+	int32_t ret;
+	bool is_iomem;
+	uint8_t *pSADPointer;
+	uint8_t *pvSADBuffer;
+	uint8_t ui8IsAlreadyIntra;
+	uint32_t ui32MBFrameWidth;
+	uint32_t ui32MBPictureHeight;
+	uint16_t ui16IntraParam;
+	uint32_t ui32MBx, ui32MBy;
+	uint32_t ui32SADParam;
+	uint32_t ui32tSAD_Threshold, ui32tSAD_ThresholdLo, ui32tSAD_ThresholdHi;
+	uint32_t ui32MaxMBs, ui32NumMBsOverThreshold,
+		ui32NumMBsOverLo, ui32NumMBsOverHi;
+	struct psb_video_ctx *video_ctx;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	IMG_BEST_MULTIPASS_MB_PARAMS *psBestMB_Params;
+	IMG_FIRST_STAGE_MB_PARAMS *psFirstMB_Params;
+	uint8_t *pFirstPassOutBuf;
+	uint8_t *pBestMBDecisionCtrlBuf;
+
+	ui16IntraParam = (0 << 7) | (0 << 4);
+	ui32NumMBsOverThreshold = ui32NumMBsOverLo = ui32NumMBsOverHi = 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Update air calc, slot_num=%d, buf_size=%d, "\
+			"sad_threshold=%d, enable_sel_states_flag=%08x\n", \
+			slot_num, buf_size, sad_threshold,
+			enable_sel_states_flag);
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp\n");
+		return -1;
+	}
+
+	/* Map first pass out params */
+	ret = ttm_bo_reserve(video_ctx->bufs_f_p_out_params_bo, true,
+			     true, false, 0);
+	if (ret) {
+		DRM_ERROR("TOPAZ: reserver failed.\n");
+		return -1;
+	}
+
+	ret = ttm_bo_kmap(video_ctx->bufs_f_p_out_params_bo, 0,
+			video_ctx->bufs_f_p_out_params_bo->num_pages,
+			&video_ctx->bufs_f_p_out_params_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: Failed to map first pass out param bo\n");
+		ttm_bo_unref(&video_ctx->bufs_f_p_out_params_bo);
+		return -1;
+	}
+
+	video_ctx->bufs_f_p_out_params_addr =
+		(uint32_t *)(ttm_kmap_obj_virtual(
+				     &video_ctx->bufs_f_p_out_params_kmap,
+				     &is_iomem) + slot_num * buf_size);
+
+	pFirstPassOutBuf =
+		(uint8_t *)video_ctx->bufs_f_p_out_params_addr;
+
+	/* Map first pass out best multipass params */
+	ret = ttm_bo_reserve(video_ctx->
+			     bufs_f_p_out_best_mp_param_bo,
+			     true, true, false, 0);
+	if (ret) {
+		DRM_ERROR("TOPAZ: reserver failed.\n");
+		return -1;
+	}
+
+	ret = ttm_bo_kmap(video_ctx->
+			  bufs_f_p_out_best_mp_param_bo, 0,
+			  video_ctx->
+			  bufs_f_p_out_best_mp_param_bo->
+			  num_pages,
+			  &video_ctx->
+			  bufs_f_p_out_best_mp_param_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: Failed to map first pass out best multipass "\
+			  "param bo\n");
+		ttm_bo_unref(&video_ctx->
+			     bufs_f_p_out_best_mp_param_bo);
+		return -1;
+	}
+	video_ctx->bufs_f_p_out_best_mp_param_addr =
+		(uint32_t *)(ttm_kmap_obj_virtual(
+				     &video_ctx->
+				     bufs_f_p_out_best_mp_param_kmap,
+				     &is_iomem) + slot_num * buf_size);
+
+	pBestMBDecisionCtrlBuf = (uint8_t *)video_ctx->
+		bufs_f_p_out_best_mp_param_addr;
+
+	/* fill data */
+	ui32MBFrameWidth  = (topaz_priv->frame_w / 16);
+	ui32MBPictureHeight = (topaz_priv->frame_h / 16);
+
+	/* get the SAD results buffer (either IPE0 and IPE1 results or,
+	   preferably, the more accurate Best Multipass SAD results) */
+	if (pBestMBDecisionCtrlBuf) {
+		pvSADBuffer = pBestMBDecisionCtrlBuf;
+
+		if (enable_sel_states_flag & ESF_MP_BEST_MOTION_VECTOR_STATS) {
+			/* The actual Param structures (which contain SADs) are
+			   located after the Multipass Motion Vector entries */
+			pvSADBuffer +=
+				(ui32MBPictureHeight * (ui32MBFrameWidth) *
+				 sizeof(IMG_BEST_MULTIPASS_MB_PARAMS_IPMV));
+		}
+	} else {
+		pvSADBuffer = pFirstPassOutBuf;
+	}
+
+	if (video_ctx->air_info.air_per_frame == 0)
+		/* Default to ALL MB's in frame */
+		ui32MaxMBs = ui32MBFrameWidth * ui32MBPictureHeight;
+	else if (video_ctx->air_info.air_per_frame < 0)
+		/* Default to 1% of MB's in frame (min 1) */
+		video_ctx->air_info.air_per_frame = ui32MaxMBs =
+			((ui32MBFrameWidth * ui32MBPictureHeight) + 99) / 100;
+	else
+		ui32MaxMBs = video_ctx->air_info.air_per_frame;
+
+	pSADPointer = (uint8_t *)pvSADBuffer;
+
+	video_ctx->air_info.sad_threshold = sad_threshold;
+	if (video_ctx->air_info.sad_threshold >= 0)
+		ui32tSAD_Threshold =
+			(uint16_t)video_ctx->air_info.sad_threshold;
+	else {
+		// Running auto adjust threshold adjust mode
+		if (video_ctx->air_info.sad_threshold == -1) {
+			// This will occur only the first time
+			if (pBestMBDecisionCtrlBuf) {
+				/*Auto seed the threshold with the first value*/
+				psBestMB_Params =
+					(IMG_BEST_MULTIPASS_MB_PARAMS *)
+					pSADPointer;
+				ui32SADParam = psBestMB_Params->
+					ui32SAD_Inter_MBInfo &
+					IMG_BEST_MULTIPASS_SAD_MASK;
+			} else {
+				/*Auto seed the threshold with the first value*/
+				psFirstMB_Params =
+					(IMG_FIRST_STAGE_MB_PARAMS *)
+					pSADPointer;
+				ui32SADParam = (uint32_t) psFirstMB_Params->
+					ui16Ipe0Sad;
+			}
+			/* Negative numbers indicate auto-adjusting threshold */
+			video_ctx->air_info.sad_threshold = -1 - ui32SADParam;
+		}
+		ui32tSAD_Threshold =
+			(int32_t) - (video_ctx->air_info.sad_threshold + 1);
+	}
+
+	ui32tSAD_ThresholdLo = ui32tSAD_Threshold / 2;
+	ui32tSAD_ThresholdHi = ui32tSAD_Threshold + ui32tSAD_ThresholdLo;
+
+	// This loop could be optimised to a single counter if necessary, retaining for clarity
+	for (ui32MBy = 0; ui32MBy < ui32MBPictureHeight; ui32MBy++) {
+		for( ui32MBx=0; ui32MBx<ui32MBFrameWidth; ui32MBx++) {
+			psBestMB_Params = (IMG_BEST_MULTIPASS_MB_PARAMS *) pSADPointer;
+			pSADPointer = (uint8_t *) &(psBestMB_Params[1]);
+			//pSADPointer += sizeof(IMG_BEST_MULTIPASS_MB_PARAMS);
+
+			// Turn all negative table values to positive (reset 'touched' state of a block that may have been set in APP_SendAIRInpCtrlBuf())
+			if (video_ctx->air_info.air_table[ui32MBy *  ui32MBFrameWidth + ui32MBx] < 0)
+				video_ctx->air_info.air_table[ui32MBy *  ui32MBFrameWidth + ui32MBx] = -1 - video_ctx->air_info.air_table[ui32MBy *  ui32MBFrameWidth + ui32MBx];
+
+			// This will read the SAD value from the buffer (either IPE0 SAD or the superior Best multipass parameter structure SAD value)
+			if (pBestMBDecisionCtrlBuf) {
+				psBestMB_Params = (IMG_BEST_MULTIPASS_MB_PARAMS *) pSADPointer;
+				ui32SADParam = psBestMB_Params->ui32SAD_Inter_MBInfo & IMG_BEST_MULTIPASS_SAD_MASK;
+				if ((psBestMB_Params->ui32SAD_Intra_MBInfo & IMG_BEST_MULTIPASS_MB_TYPE_MASK) >> IMG_BEST_MULTIPASS_MB_TYPE_SHIFT == 1)
+					ui8IsAlreadyIntra = 1;
+				else
+					ui8IsAlreadyIntra = 0;
+
+				pSADPointer = (uint8_t *) &(psBestMB_Params[1]);
+			} else {
+				psFirstMB_Params = (IMG_FIRST_STAGE_MB_PARAMS *) pSADPointer;
+				ui32SADParam = (uint32_t) psFirstMB_Params->ui16Ipe0Sad;
+				ui32SADParam += (uint32_t) psFirstMB_Params->ui16Ipe1Sad;
+				ui32SADParam /= 2;
+				ui8IsAlreadyIntra = 0; // We don't have the information to determine this
+				pSADPointer = (uint8_t *) &(psFirstMB_Params[1]);
+			}
+
+			if (ui32SADParam >= ui32tSAD_ThresholdLo) {
+				ui32NumMBsOverLo++;
+
+				if (ui32SADParam >= ui32tSAD_Threshold) {
+				// if (!ui8IsAlreadyIntra) // Don't mark this block if it's just been encoded as an Intra block anyway
+				// (results seem better without this condition anyway)
+					video_ctx->air_info.air_table[ui32MBy *  ui32MBFrameWidth + ui32MBx]++;
+					ui32NumMBsOverThreshold++;
+					if (ui32SADParam >= ui32tSAD_ThresholdHi)
+						ui32NumMBsOverHi++;
+				}
+			}
+		}
+		if ((uint32_t)(uintptr_t)pSADPointer % 64)
+			pSADPointer = (uint8_t *)(uintptr_t)
+				ALIGN_64(((uint32_t)(uintptr_t) pSADPointer));
+	}
+
+	// Test and process running adaptive threshold case
+	if (video_ctx->air_info.sad_threshold < 0) {
+		// Adjust our threshold (to indicate it's auto-adjustable store it as a negative value minus 1)
+		if (ui32NumMBsOverLo <= ui32MaxMBs)
+			video_ctx->air_info.sad_threshold = (int32_t) - ((int32_t)ui32tSAD_ThresholdLo) - 1;
+		else {
+			if (ui32NumMBsOverHi >= ui32MaxMBs)
+				video_ctx->air_info.sad_threshold = (int32_t) - ((int32_t)ui32tSAD_ThresholdHi) - 1;
+			else {
+				if (ui32MaxMBs < ui32NumMBsOverThreshold) {
+					video_ctx->air_info.sad_threshold = ((int32_t)ui32tSAD_ThresholdHi - (int32_t)ui32tSAD_Threshold);
+					video_ctx->air_info.sad_threshold *= ((int32_t)ui32MaxMBs - (int32_t)ui32NumMBsOverThreshold);
+					video_ctx->air_info.sad_threshold /= ((int32_t)ui32NumMBsOverHi - (int32_t)ui32NumMBsOverThreshold);
+					video_ctx->air_info.sad_threshold += ui32tSAD_Threshold;
+				} else {
+                    			video_ctx->air_info.sad_threshold = ((int32_t)ui32tSAD_Threshold - (int32_t)ui32tSAD_ThresholdLo);
+                    			video_ctx->air_info.sad_threshold *= ((int32_t)ui32MaxMBs - (int32_t)ui32NumMBsOverLo);
+                    			video_ctx->air_info.sad_threshold /= ((int32_t)ui32NumMBsOverThreshold - (int32_t)ui32NumMBsOverLo);
+                    			video_ctx->air_info.sad_threshold += ui32tSAD_ThresholdLo;
+                		}
+                		video_ctx->air_info.sad_threshold = -video_ctx->air_info.sad_threshold - 1;
+			}
+		}
+	}
+
+	ttm_bo_unreserve(video_ctx->bufs_f_p_out_params_bo);
+	ttm_bo_kunmap(&video_ctx->bufs_f_p_out_params_kmap);
+	ttm_bo_unreserve(video_ctx->bufs_f_p_out_best_mp_param_bo);
+	ttm_bo_kunmap(&video_ctx->bufs_f_p_out_best_mp_param_kmap);
+
+	return 0;
+}
+
+static int32_t tng_setup_WB_mem(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	const void *command)
+{
+	struct ttm_object_file *tfile = BCVideoGetPriv(file_priv)->tfile;
+	struct psb_video_ctx *video_ctx;
+	uint32_t i = 0;
+	const uint32_t len = sizeof(struct IMG_WRITEBACK_MSG);
+	int ret;
+	bool is_iomem;
+	uint32_t wb_handle;
+	uint32_t mtx_ctx_handle;
+	uint32_t cir_input_ctrl_handle;
+	uint32_t bufs_f_p_out_params_handle;
+	uint32_t bufs_f_p_out_best_mp_param_handle;
+	uint8_t *ptmp = NULL;
+	const uint8_t pas_val = (uint8_t) ~0x0;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		return -1;
+	}
+
+	wb_handle = *((uint32_t *)command + 3);
+	PSB_DEBUG_TOPAZ("TOPAZ: Map write back memory from handle %08x\n",
+		wb_handle);
+
+	video_ctx->wb_bo = ttm_buffer_object_lookup(tfile, wb_handle);
+
+        if (unlikely(video_ctx->wb_bo == NULL)) {
+                DRM_ERROR("TOPAZ: Failed to lookup write back BO\n");
+                return -1;
+        }
+
+	ret = ttm_bo_reserve(video_ctx->wb_bo , true, true, false, 0);
+	if (ret) {
+		DRM_ERROR("TOPAZ: reserver failed.\n");
+		return -1;
+	}
+
+	ret = ttm_bo_kmap(video_ctx->wb_bo, 0,
+			  video_ctx->wb_bo->num_pages,
+			  &video_ctx->wb_bo_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: Failed to map topaz write back BO\n");
+		ttm_bo_unref(&video_ctx->wb_bo);
+		return -1;
+	}
+
+	video_ctx->wb_addr[0] = (uint32_t *)ttm_kmap_obj_virtual(
+				&video_ctx->wb_bo_kmap, &is_iomem);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: memset: val=0x%08x, len=%d\n", \
+			pas_val, len);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: memset: wb_addr=%p, i=%d\n", \
+			video_ctx->wb_addr[0], i);
+
+	ptmp = (uint8_t *)(video_ctx->wb_addr[i++]);
+	memset(ptmp, pas_val, len);
+	while (i < WB_FIFO_SIZE) {
+		video_ctx->wb_addr[i] = video_ctx->wb_addr[i-1] + 0x400;
+		PSB_DEBUG_TOPAZ("TOPAZ: memset: wb_addr=%p, i=%d\n", \
+				video_ctx->wb_addr[i], i);
+		ptmp = (uint8_t *)(video_ctx->wb_addr[i++]);
+		memset(ptmp, pas_val, len);
+	} ;
+
+	video_ctx->enc_ctx_param = *((uint32_t *)command + 1);
+	video_ctx->enc_ctx_addr = *((uint32_t *)command + 2);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: GET/SETVIDEO data: %08x, address: %08x\n", \
+			video_ctx->enc_ctx_param, video_ctx->enc_ctx_addr);
+
+	if (video_ctx->codec == IMG_CODEC_JPEG)
+		return 0;
+
+	mtx_ctx_handle = *((uint32_t *)command + 4);
+	PSB_DEBUG_TOPAZ("TOPAZ: Map IMG_MTX_VIDEO_CONTEXT buffer from handle %08x\n",
+		mtx_ctx_handle);
+
+	video_ctx->mtx_ctx_bo = ttm_buffer_object_lookup(tfile, mtx_ctx_handle);
+        if (unlikely(video_ctx->mtx_ctx_bo == NULL)) {
+                DRM_ERROR("TOPAZ: Failed to lookup IMG_MTX_VIDEO_CONTEXT handle\n");
+                return -1;
+        }
+
+	/* If this cmd package is dequeued, must reserve it now */
+	if (0 == atomic_read(&video_ctx->mtx_ctx_bo->reserved)) {
+		PSB_DEBUG_TOPAZ("MTX context not reserved, reserve it now\n");
+		ret = ttm_bo_reserve(video_ctx->mtx_ctx_bo, true, true, false, 0);
+		if (ret) {
+			DRM_ERROR("Reserve MTX context failed.\n");
+			return -1;
+		}
+	}
+
+	ret = ttm_bo_kmap(video_ctx->mtx_ctx_bo, 0,
+			  video_ctx->mtx_ctx_bo->num_pages,
+			  &video_ctx->mtx_ctx_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: Failed to map IMG_MTX_VIDEO_CONTEXT BO\n");
+		ttm_bo_unref(&video_ctx->mtx_ctx_bo);
+		return -1;
+	}
+
+	video_ctx->setv_addr = (uint32_t)(uintptr_t)ttm_kmap_obj_virtual(
+				&video_ctx->mtx_ctx_kmap, &is_iomem);
+
+	cir_input_ctrl_handle = *((uint32_t *)command + 5);
+
+	video_ctx->cir_input_ctrl_bo = ttm_buffer_object_lookup(tfile, cir_input_ctrl_handle);
+	if (unlikely(video_ctx->cir_input_ctrl_bo == NULL)) {
+		DRM_ERROR("TOPAZ: Failed to lookup cir input ctrl handle\n");
+		return -1;
+	}
+
+	bufs_f_p_out_params_handle = *((uint32_t *)command + 6);
+
+	video_ctx->bufs_f_p_out_params_bo = ttm_buffer_object_lookup(tfile, bufs_f_p_out_params_handle);
+	if (unlikely(video_ctx->bufs_f_p_out_params_bo == NULL)) {
+		DRM_ERROR("TOPAZ: Failed to lookup air first pass out parameter handle\n");
+		return -1;
+	}
+	video_ctx->bufs_f_p_out_params_addr = NULL;
+
+	bufs_f_p_out_best_mp_param_handle = *((uint32_t *)command + 7);
+
+	video_ctx->bufs_f_p_out_best_mp_param_bo = ttm_buffer_object_lookup(tfile, bufs_f_p_out_best_mp_param_handle);
+	if (unlikely(video_ctx->bufs_f_p_out_best_mp_param_bo == NULL)) {
+		DRM_ERROR("TOPAZ: Failed to lookup air first pass out best multipass parameter handle\n");
+		return -1;
+	}
+	video_ctx->bufs_f_p_out_best_mp_param_addr = NULL;
+
+	return ret;
+}
+
+static int tng_setup_new_context_secure(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint32_t *cmd,
+	uint32_t codec)
+{
+	struct psb_video_ctx *video_ctx;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+
+	topaz_priv->frame_h = (uint16_t)((*((uint32_t *) cmd + 1)) & 0xffff);
+	topaz_priv->frame_w = (uint16_t)(((*((uint32_t *) cmd + 1))
+				& 0xffff0000)  >> 16) ;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		ret = -1;
+		goto out;
+	}
+
+	video_ctx->codec = codec;
+	if (TNG_IS_H264_ENC(codec)) {
+		video_ctx->codec = codec = IMG_CODEC_H264_ALL_RC;
+		PSB_DEBUG_TOPAZ("TOPAZ: use RC_ALL for all H264 Codec\n");
+		PSB_DEBUG_TOPAZ("TOPAZ: %s to %s\n", \
+		codec_to_string(codec), codec_to_string(video_ctx->codec));
+	}
+
+	video_ctx->status = 0;
+	video_ctx->frame_count = 0;
+	video_ctx->bias_reg = NULL;
+	video_ctx->handle_sequence_needed = false;
+	video_ctx->high_cmd_count = 0;
+	video_ctx->low_cmd_count = 0xa5a5a5a5 % MAX_TOPAZ_CMD_COUNT;
+	video_ctx->last_cir_index = -1;
+	video_ctx->air_info.air_scan_pos = 0;
+	video_ctx->air_info.air_table = kzalloc((topaz_priv->frame_h * topaz_priv->frame_w) >> 8, GFP_KERNEL);
+	if (!video_ctx->air_info.air_table) {
+		DRM_ERROR("TOPAZ: Failed to alloc memory for AIR table\n");
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: new context %p(%s)(cur context = %p)\n",
+			video_ctx, codec_to_string(codec),
+			topaz_priv->cur_context);
+
+	if (Is_Secure_Fw() && drm_topaz_pmpolicy == PSB_PMPOLICY_NOPM) {
+		PSB_DEBUG_TOPAZ("TOPAZ: new context, force a poweroff to reload firmware anyway\n");
+
+		drm_topaz_pmpolicy = PSB_PMPOLICY_POWERDOWN; /* off NOPM policy */
+		tng_topaz_power_off(dev);
+		drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM; /* reset back to NOPM */
+	}
+
+	if (topaz_priv->cur_context &&
+		(topaz_priv->cur_context != video_ctx) &&
+		is_island_on(OSPM_VIDEO_ENC_ISLAND)) {
+
+		PSB_DEBUG_TOPAZ("Current context %p(%s, %08x)" \
+				" not saved, save it first\n",
+				topaz_priv->cur_context,
+				codec_to_string(topaz_priv->cur_context->codec),
+				topaz_priv->cur_context->status);
+
+		ret = tng_topaz_power_off(dev);
+		if (ret) {
+			DRM_ERROR("TOPAZ: Failed");
+			DRM_ERROR("to power off");
+			goto out;
+		}
+	}
+
+	if (video_ctx->codec != IMG_CODEC_JPEG) {
+		video_ctx->bias_reg = (uint32_t *)kzalloc(2 * PAGE_SIZE, GFP_KERNEL);
+		if (!video_ctx->bias_reg) {
+			DRM_ERROR("Failed to kzalloc bias reg, OOM\n");
+			ret = -1;
+			goto out;
+		}
+	} else {
+		video_ctx->bias_reg = NULL;
+	}
+
+	ret = tng_topaz_power_up(dev, codec);
+	if (ret) {
+		DRM_ERROR("TOPAZ: failed power up\n");
+		goto out;
+	}
+	ret = tng_topaz_fw_run(dev, video_ctx, codec);
+	if (ret) {
+		DRM_ERROR("TOPAZ: upload FW to HW failed\n");
+		goto out;
+	}
+out:
+	return ret;
+}
+
+static int tng_setup_new_context(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	uint32_t *cmd,
+	uint32_t codec)
+{
+	struct psb_video_ctx *video_ctx;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+
+	topaz_priv->frame_h = (uint16_t)((*((uint32_t *) cmd + 1)) & 0xffff);
+	topaz_priv->frame_w = (uint16_t)(((*((uint32_t *) cmd + 1))
+				& 0xffff0000)  >> 16) ;
+
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (video_ctx == NULL) {
+		DRM_ERROR("Failed to get video contex from filp");
+		ret = -1;
+		goto out;
+	}
+
+	video_ctx->codec = codec;
+	video_ctx->status = 0;
+	video_ctx->frame_count = 0;
+	video_ctx->bias_reg = NULL;
+	video_ctx->handle_sequence_needed = false;
+	video_ctx->high_cmd_count = 0;
+	video_ctx->low_cmd_count = 0xa5a5a5a5 % MAX_TOPAZ_CMD_COUNT;
+	video_ctx->last_cir_index = -1;
+	video_ctx->air_info.air_scan_pos = 0;
+	video_ctx->air_info.air_table = (int8_t *)kzalloc((topaz_priv->frame_h * topaz_priv->frame_w) >> 8, GFP_KERNEL);
+	if (!video_ctx->air_info.air_table) {
+		DRM_ERROR("TOPAZ: Failed to alloc memory for AIR table\n");
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: new context %p(%s)(cur context = %p)\n",
+			video_ctx, codec_to_string(codec),
+			topaz_priv->cur_context);
+
+	if (Is_Secure_Fw() && drm_topaz_pmpolicy == PSB_PMPOLICY_NOPM) {
+		PSB_DEBUG_TOPAZ("TOPAZ: new context, force a poweroff to reload firmware anyway\n");
+
+		drm_topaz_pmpolicy = PSB_PMPOLICY_POWERDOWN; /* off NOPM policy */
+		tng_topaz_power_off(dev);
+		drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM; /* reset back to NOPM */
+	}
+
+	if (topaz_priv->cur_context &&
+		topaz_priv->cur_context != video_ctx) {
+		if (topaz_priv->cur_context->status & \
+		    MASK_TOPAZ_CONTEXT_SAVED) {
+			PSB_DEBUG_TOPAZ("Current context %p(%s, %08x)" \
+					" already saved, continue\n",
+					topaz_priv->cur_context,
+					codec_to_string(topaz_priv->cur_context->codec),
+					topaz_priv->cur_context->status);
+		/* If the previous context not saved */
+		} else {
+			PSB_DEBUG_TOPAZ("Current context %p(%s, %08x)" \
+					" not saved, save it first\n",
+					topaz_priv->cur_context,
+					codec_to_string(topaz_priv->cur_context->codec),
+					topaz_priv->cur_context->status);
+
+			if (Is_Secure_Fw()) {
+				ret = tng_topaz_power_off(dev);
+				if (ret) {
+					DRM_ERROR("TOPAZ: Failed");
+					DRM_ERROR("to power off");
+					goto out;
+				}
+			} else {
+				ret = tng_topaz_save_mtx_state(dev);
+				if (ret) {
+					DRM_ERROR("Failed to save");
+					DRM_ERROR("mtx status");
+					goto out;
+				}
+			}
+		}
+	}
+
+	if (video_ctx->codec != IMG_CODEC_JPEG) {
+		video_ctx->bias_reg = (uint32_t *)kzalloc(2 * PAGE_SIZE, GFP_KERNEL);
+		if (!video_ctx->bias_reg) {
+			DRM_ERROR("Failed to kzalloc bias reg, OOM\n");
+			ret = -1;
+			goto out;
+		}
+	} else {
+		video_ctx->bias_reg = NULL;
+	}
+
+	if (Is_Secure_Fw()) {
+		ret = tng_topaz_power_up(dev, codec);
+		if (ret) {
+			DRM_ERROR("TOPAZ: failed power up\n");
+			goto out;
+		}
+		ret = tng_topaz_fw_run(dev, video_ctx, codec);
+		if (ret) {
+			DRM_ERROR("TOPAZ: upload FW to HW failed\n");
+			goto out;
+		}
+	} else {
+	/* Upload the new codec firmware */
+		ret = tng_topaz_init_board(dev, video_ctx, codec);
+		if (ret) {
+			DRM_ERROR("TOPAZ: init board failed\n");
+			/* tng_error_dump_reg(dev_priv, 0); */
+			goto out;
+		}
+
+		/* Upload the new codec firmware */
+		ret = tng_topaz_setup_fw(dev, video_ctx, codec);
+		if (ret) {
+			DRM_ERROR("TOPAZ: upload FW to HW failed\n");
+			/* tng_error_dump_reg(dev_priv, 0); */
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+
+static int32_t tng_check_bias_register(uint32_t reg_id, uint32_t reg_off)
+{
+	switch (reg_id) {
+	case TOPAZ_MULTICORE_REG:
+		if (reg_off < REG_MIN_TOPAZ_MULTICORE ||
+			reg_off > REG_MAX_TOPAZ_MULTICORE) {
+			DRM_ERROR("Invalid MULTICORE register %08x\n",
+				reg_off);
+			return -1;
+		}
+		break;
+	case TOPAZ_CORE_REG:
+		if (reg_off < REG_MIN_TOPAZ_CORE ||
+			reg_off > REG_MAX_TOPAZ_CORE) {
+			DRM_ERROR("Invalid CORE register %08x\n", reg_off);
+			return -1;
+		}
+		break;
+	case TOPAZ_VLC_REG:
+		if (reg_off < REG_MIN_TOPAZ_VLC ||
+			reg_off > REG_MAX_TOPAZ_VLC) {
+			DRM_ERROR("Invalid VLC register %08x\n", reg_off);
+			return -1;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown reg space id: %08x\n", reg_id);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int tng_topaz_set_bias(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	const uint32_t *command,
+	uint32_t codec,
+	uint32_t *cmd_size)
+{
+	uint32_t reg_id, reg_off, reg_val, reg_cnt;
+	uint32_t *p_command;
+	struct drm_psb_private *dev_priv;
+	int ret = 0;
+
+	dev_priv = dev->dev_private;
+	p_command = (uint32_t *)command;
+	p_command++;
+	*cmd_size = *p_command;
+	p_command++;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Start to write %d Registers\n", *cmd_size);
+	for (reg_cnt = 0; reg_cnt < *cmd_size; reg_cnt++) {
+		/* Reg space ID */
+		reg_id = *p_command;
+		p_command++;
+		/* Reg offset */
+		reg_off = *p_command;
+		p_command++;
+		/* Reg value */
+		reg_val = *p_command;
+		p_command++;
+
+		ret = tng_check_bias_register(reg_id, reg_off);
+		if (ret) {
+			DRM_ERROR("Failed in checking BIAS register");
+			return ret;
+		}
+
+		switch (reg_id) {
+		case TOPAZ_MULTICORE_REG:
+			MULTICORE_WRITE32(reg_off, reg_val);
+			break;
+		case TOPAZ_CORE_REG:
+			TOPAZCORE_WRITE32(0, reg_off, reg_val);
+			break;
+		case TOPAZ_VLC_REG:
+			VLC_WRITE32(0, reg_off, reg_val);
+			break;
+		default:
+			DRM_ERROR("Unknown reg space id: (%08x)\n", reg_id);
+			return -1;
+		}
+	}
+
+	p_command = (uint32_t *)command;
+
+	/* For now, saving BIAS table no matter necessary or not */
+	ret = tng_save_bias_table(dev, file_priv, p_command);
+	if (ret) {
+		DRM_ERROR("Failed to save BIAS table");
+		return ret;
+	}
+
+	/* Update Globals */
+	if (codec != IMG_CODEC_JPEG) {
+		uint32_t ui32ToMtxReg = 0;
+
+		MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+			(MTX_SCRATCHREG_TOMTX << 2), ui32ToMtxReg);
+	}
+
+	return ret;
+}
+/* #define MULTI_STREAM_TEST */
+
+int
+tng_topaz_send(
+	struct drm_device *dev,
+	struct drm_file *file_priv,
+	void *cmd,
+	uint32_t cmd_size_in,
+	uint32_t sync_seq)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned char *command = (unsigned char *) cmd;
+	struct tng_topaz_cmd_header *cur_cmd_header;
+	uint32_t cur_cmd_id;
+	uint32_t codec = 0;
+	int32_t cur_cmd_size = 4;
+	int32_t cmd_size = (int32_t)cmd_size_in;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+	struct psb_video_ctx *video_ctx;
+
+	if (Is_Secure_Fw() == 0) {
+		ret = tng_topaz_power_up(dev, 1);
+		if (ret) {
+			DRM_ERROR("TOPAZ: Failed to power up");
+			return ret;
+		}
+	}
+	video_ctx = get_ctx_from_fp(dev, file_priv->filp);
+	if (!video_ctx) {
+		DRM_ERROR("Failed to get context from filp %p\n",
+			  file_priv->filp);
+		ret = -1;
+		goto out;
+	}
+
+	topaz_priv->topaz_busy = 1;
+
+	PSB_DEBUG_TOPAZ("TOPAZ : send the command in the buffer" \
+		" one by one, cmdsize(%d), sequence(%08x)\n",
+		cmd_size, sync_seq);
+
+	if (is_island_on(OSPM_VIDEO_ENC_ISLAND)) {
+		/* Must flush here in case of invalid cache data */
+		tng_topaz_mmu_flushcache(dev_priv);
+	}
+
+	while (cmd_size > 0) {
+		cur_cmd_header = (struct tng_topaz_cmd_header *) command;
+		cur_cmd_id = cur_cmd_header->id;
+
+		PSB_DEBUG_TOPAZ("TOPAZ : cmd is(%s)\n",
+			cmd_to_string(cur_cmd_id & (~MTX_CMDID_PRIORITY)));
+		PSB_DEBUG_TOPAZ("remaining cmd size is(%d)\n",
+			cmd_size);
+
+		switch (cur_cmd_id) {
+		case MTX_CMDID_SW_NEW_CODEC:
+			codec = (*((uint32_t *) cmd) & 0xFF00) >> 8;
+			if (Is_Secure_Fw()) {
+				ret = tng_setup_new_context_secure(dev,file_priv,
+					(uint32_t *)command, codec);
+
+			} else {
+				ret = tng_setup_new_context(dev, file_priv,
+					(uint32_t *)command, codec);
+			}
+			if (ret) {
+				DRM_ERROR("Failed to setup new context");
+				return ret;
+			}
+			cur_cmd_size = 2;
+			break;
+		case MTX_CMDID_SW_ENTER_LOWPOWER:
+			PSB_DEBUG_TOPAZ("TOPAZ : Enter lowpower....\n");
+			PSB_DEBUG_TOPAZ("XXX : implement it\n");
+			cur_cmd_size = 1;
+			break;
+
+		case MTX_CMDID_SW_LEAVE_LOWPOWER:
+			cur_cmd_size = 2;
+			if (Is_Secure_Fw()) {
+				ret = tng_context_switch_secure(dev, file_priv,
+					*((uint32_t *)command + 1),
+					((*((uint32_t *)command) & 0xFF00) >> 8));
+
+			} else {
+				ret = tng_context_switch(dev, file_priv,
+					*((uint32_t *)command + 1),
+					((*((uint32_t *)command) & 0xFF00) >> 8));
+			}
+			if (ret) {
+				DRM_ERROR("Failed to switch context");
+				return ret;
+			}
+
+			break;
+		case MTX_CMDID_SW_WRITEREG:
+			ret = tng_topaz_set_bias(dev, file_priv,
+				(const uint32_t *)command, codec,
+				&cur_cmd_size);
+			if (ret) {
+				DRM_ERROR("Failed to set BIAS table");
+				return ret;
+			}
+
+			/*
+			* cur_cmd_size if the register count here,
+			* reg_id, reg_off and reg_val are stored in a 3 words
+			* */
+			cur_cmd_size *= 3;
+			/* Header size, 2 words */
+			cur_cmd_size += 2;
+			break;
+
+		case MTX_CMDID_SW_FILL_INPUT_CTRL:
+			ret = tng_fill_input_control (dev, file_priv,
+					*((uint32_t *)command + 1),//slot
+					*((uint32_t *)command + 3),//ir
+					*((uint32_t *)command + 4),//initqp
+					*((uint32_t *)command + 5),//minqp
+					*((uint32_t *)command + 6),//bufsize
+					*((uint32_t *)command + 7));//seed
+                        if (ret) {
+                                DRM_ERROR("Failed to fill " \
+                                        "input control buffer");
+                                return ret;
+                        }
+			cur_cmd_size = 8;
+			break;
+		case MTX_CMDID_SW_UPDATE_AIR_SEND:
+			ret = tng_update_air_send (dev, file_priv,
+					*((uint32_t *)command + 1),//slot
+					*((uint32_t *)command + 3),//air_skip_count
+					*((uint32_t *)command + 4),//air_per_frame
+					*((uint32_t *)command + 5),//bufsize
+					*((uint32_t *)command + 6));//frame_count
+                        if (ret) {
+                                DRM_ERROR("Failed to update air " \
+                                        "send");
+                                return ret;
+                        }
+			cur_cmd_size = 7;
+			break;
+		case MTX_CMDID_SW_AIR_BUF_CLEAR:
+			ret = tng_air_buf_clear(dev, file_priv);
+                        if (ret) {
+                                DRM_ERROR("Failed to clear " \
+                                        "AIR buffer");
+                                return ret;
+                        }
+			cur_cmd_size = 3;
+			break;
+		case MTX_CMDID_SW_UPDATE_AIR_CALC:
+			ret = tng_update_air_calc(dev, file_priv,
+					*((uint32_t *)command + 1),//slot
+					*((uint32_t *)command + 3),//buf_size
+					*((uint32_t *)command + 4),//sad_threashold
+					*((uint32_t *)command + 5));//enable_seld_stats_flag
+                        if (ret) {
+                                DRM_ERROR("Failed to update " \
+                                        "air calc");
+                                return ret;
+                        }
+			cur_cmd_size = 6;
+			break;
+		case MTX_CMDID_PAD:
+			/*Ignore this command, which is used to skip
+			 * some commands in user space*/
+			cur_cmd_size = 4;
+			break;
+		/* Ordinary commmand */
+                case MTX_CMDID_SETUP_INTERFACE:
+			if (video_ctx && video_ctx->wb_bo) {
+				PSB_DEBUG_TOPAZ("TOPAZ: reset\n");
+				if (Is_Secure_Fw()) {
+					tng_topaz_power_off(dev);
+					tng_topaz_power_up(dev, IMG_CODEC_JPEG);
+					tng_topaz_fw_run(dev, video_ctx, IMG_CODEC_JPEG);
+				} else {
+					tng_topaz_reset(dev_priv);
+					tng_topaz_setup_fw(dev, video_ctx, topaz_priv->cur_codec);
+				}
+
+				PSB_DEBUG_TOPAZ("TOPAZ: unref write back bo\n");
+				ttm_bo_kunmap(&video_ctx->wb_bo_kmap);
+				ttm_bo_unreserve(video_ctx->wb_bo);
+				ttm_bo_unref(&video_ctx->wb_bo);
+				video_ctx->wb_bo = NULL;
+			}
+                case MTX_CMDID_SETVIDEO:
+                        ret = tng_setup_WB_mem(dev, file_priv,
+                                (const void *)command);
+                        if (ret) {
+                                DRM_ERROR("Failed to setup " \
+                                        "write back memory region");
+                                return ret;
+                        }
+
+		case MTX_CMDID_DO_HEADER:
+		case MTX_CMDID_ENCODE_FRAME:
+		case MTX_CMDID_GETVIDEO:
+		case MTX_CMDID_PICMGMT:
+		case (MTX_CMDID_PICMGMT | MTX_CMDID_PRIORITY):
+
+		case MTX_CMDID_START_FRAME:
+		case MTX_CMDID_ENCODE_SLICE:
+		case MTX_CMDID_END_FRAME:
+		case MTX_CMDID_RC_UPDATE:
+		case (MTX_CMDID_RC_UPDATE | MTX_CMDID_PRIORITY):
+		case MTX_CMDID_PROVIDE_SOURCE_BUFFER:
+		case MTX_CMDID_PROVIDE_REF_BUFFER:
+		case MTX_CMDID_PROVIDE_CODED_BUFFER:
+		case (MTX_CMDID_PROVIDE_SOURCE_BUFFER | MTX_CMDID_PRIORITY):
+		case (MTX_CMDID_PROVIDE_REF_BUFFER | MTX_CMDID_PRIORITY):
+		case (MTX_CMDID_PROVIDE_CODED_BUFFER | MTX_CMDID_PRIORITY):
+
+		case MTX_CMDID_SETQUANT:
+		case MTX_CMDID_ISSUEBUFF:
+		case MTX_CMDID_SETUP:
+		case MTX_CMDID_SHUTDOWN:
+			/*
+			if (cur_cmd_header->id == MTX_CMDID_SHUTDOWN) {
+				cur_cmd_size = 4;
+				PSB_DEBUG_TOPAZ("TOPAZ : Doesn't handle " \
+					"SHUTDOWN command for now\n");
+				break;
+			}
+			*/
+
+			if (video_ctx) {
+				video_ctx->cur_sequence = sync_seq;
+				video_ctx->handle_sequence_needed = true;
+			}
+
+			/* Write command to FIFO */
+			ret = mtx_write_FIFO(dev, cur_cmd_header,
+				*((uint32_t *)(command) + 1),
+				*((uint32_t *)(command) + 2), sync_seq);
+			if (ret) {
+				DRM_ERROR("Failed to write command to FIFO");
+				goto out;
+			}
+
+			/*tng_wait_on_sync(dev, sync_seq, cur_cmd_id);*/
+
+			/*
+			for (m = 0; m < 1000; m++) {
+				PSB_UDELAY(100);
+			}
+			PSB_UDELAY(6);
+			*/
+
+			/* Calculate command size */
+			switch (cur_cmd_id) {
+			case MTX_CMDID_SETVIDEO:
+				cur_cmd_size =
+					(video_ctx->codec == IMG_CODEC_JPEG) ?
+					(6 + 1) : (7 + 1);
+				break;
+			case MTX_CMDID_SETUP_INTERFACE:
+			case MTX_CMDID_SHUTDOWN:
+				cur_cmd_size = 4;
+				break;
+			case (MTX_CMDID_PROVIDE_SOURCE_BUFFER |
+				MTX_CMDID_PRIORITY):
+			case (MTX_CMDID_PROVIDE_REF_BUFFER |
+				MTX_CMDID_PRIORITY):
+			case (MTX_CMDID_PROVIDE_CODED_BUFFER |
+				MTX_CMDID_PRIORITY):
+			case (MTX_CMDID_PICMGMT | MTX_CMDID_PRIORITY):
+				cur_cmd_size = 3;
+				break;
+			default:
+				cur_cmd_size = 3;
+				break;
+			}
+
+
+			break;
+		default:
+			DRM_ERROR("TOPAZ: unsupported command id: %x\n",
+				  cur_cmd_id);
+			return -1;
+		}
+
+		/*cur_cmd_size indicate the number of words of
+		current command*/
+		command += cur_cmd_size * 4;
+		cmd_size -= cur_cmd_size * 4;
+
+		PSB_DEBUG_TOPAZ("TOPAZ : remaining cmd size is(%d)\n",
+			cmd_size);
+
+	}
+
+	tng_topaz_kick_null_cmd(dev, sync_seq);
+out:
+	return ret;
+}
+
+int tng_topaz_remove_ctx(
+	struct drm_psb_private *dev_priv,
+	struct psb_video_ctx *video_ctx)
+{
+	struct tng_topaz_private *topaz_priv;
+	/* struct psb_video_ctx *video_ctx; */
+	struct tng_topaz_cmd_queue *entry, *next;
+	unsigned long flags;
+
+	topaz_priv = dev_priv->topaz_private;
+
+	spin_lock_irqsave(&(topaz_priv->ctx_spinlock), flags);
+	if (video_ctx == topaz_priv->irq_context)
+		topaz_priv->irq_context = NULL;
+	spin_unlock_irqrestore(&(topaz_priv->ctx_spinlock), flags);
+
+	mutex_lock(&topaz_priv->topaz_mutex);
+
+	/* topaz_priv->topaz_busy = 0; */
+	/* video_ctx = NULL; */
+
+	/* Disable ISR */
+	/*if (TOPAZHP_IRQ_ENABLED) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Disalbe IRQ and " \
+			"Wait for MTX completion\n");
+		tng_topaz_disableirq(dev_priv);
+	}*/
+
+	/* Stop the MTX */
+	/*
+	mtx_stop(dev_priv);
+	ret = mtx_wait_for_completion(dev_priv);
+	if (ret) {
+		DRM_ERROR("Mtx wait for completion error");
+		return ret;
+	}
+
+	list_for_each_entry(pos, &dev_priv->video_ctx, head) {
+		if (pos->filp == filp) {
+			video_ctx = pos;
+			break;
+		}
+	}
+	*/
+
+	if (video_ctx == NULL) {
+		DRM_ERROR("Invalid video context\n");
+		mutex_unlock(&topaz_priv->topaz_mutex);
+		return -1;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: release context %p(%s)\n",
+			video_ctx, codec_to_string(video_ctx->codec));
+
+	if (video_ctx->bias_reg) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Free bias reg saving memory\n");
+		kfree(video_ctx->bias_reg);
+		video_ctx->bias_reg = NULL;
+	}
+
+	if (video_ctx->wb_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: unref write back bo\n");
+		ttm_bo_kunmap(&video_ctx->wb_bo_kmap);
+		ttm_bo_unreserve(video_ctx->wb_bo);
+		ttm_bo_unref(&video_ctx->wb_bo);
+		video_ctx->wb_bo = NULL;
+	}
+
+	if (video_ctx->mtx_ctx_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: unref setvideo bo\n");
+		ttm_bo_kunmap(&video_ctx->mtx_ctx_kmap);
+		/* unreserve if reserved in tng_setup_WB_mem() */
+		if (0 != atomic_read(&video_ctx->mtx_ctx_bo->reserved)) {
+			PSB_DEBUG_TOPAZ("MTX context reserved, "\
+					"unreserve it now\n");
+			ttm_bo_unreserve(video_ctx->mtx_ctx_bo);
+		}
+		ttm_bo_unref(&video_ctx->mtx_ctx_bo);
+		video_ctx->mtx_ctx_bo = NULL;
+	}
+
+	if (video_ctx->cir_input_ctrl_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: unref cir input ctrl bo\n");
+		ttm_bo_unref(&video_ctx->cir_input_ctrl_bo);
+		video_ctx->cir_input_ctrl_bo = NULL;
+	}
+
+	if (video_ctx->bufs_f_p_out_best_mp_param_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: unref first pass out best "\
+				"multipass param bo\n");
+		ttm_bo_unref(&video_ctx->bufs_f_p_out_best_mp_param_bo);
+		video_ctx->bufs_f_p_out_best_mp_param_bo = NULL;
+	}
+
+	if (video_ctx->bufs_f_p_out_params_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: unref first pass out param bo\n");
+		ttm_bo_unref(&video_ctx->bufs_f_p_out_params_bo);
+		video_ctx->bufs_f_p_out_params_bo = NULL;
+	}
+
+	if (video_ctx->air_info.air_table) {
+		PSB_DEBUG_TOPAZ("TOPAZ: free air table\n");
+		kfree(video_ctx->air_info.air_table);
+		video_ctx->air_info.air_table = NULL;
+	}
+
+	video_ctx->status = 0;
+	video_ctx->codec = 0;
+
+	/* If shutdown before completion, need to handle the fence.*/
+	if (video_ctx->handle_sequence_needed) {
+		*topaz_priv->topaz_sync_addr = video_ctx->cur_sequence;
+		psb_fence_handler(dev_priv->dev, LNC_ENGINE_ENCODE);
+		video_ctx->handle_sequence_needed = false;
+	}
+
+	if (!list_empty(&topaz_priv->topaz_queue) &&
+	    get_ctx_cnt(dev_priv->dev) == 0) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Flush all commands " \
+			"the in queue\n");
+		/* clear all the commands in queue */
+		list_for_each_entry_safe(entry, next,
+				 &topaz_priv->topaz_queue,
+				 head) {
+			list_del(&entry->head);
+			kfree(entry->cmd);
+			kfree(entry);
+		}
+	}
+
+	if (get_ctx_cnt(dev_priv->dev) == 0) {
+		PSB_DEBUG_TOPAZ("No more active VEC context\n");
+		dev_priv->topaz_ctx = NULL;
+	}
+
+	mutex_unlock(&topaz_priv->topaz_mutex);
+
+	return 0;
+}
+
+void tng_topaz_handle_sigint(
+	struct drm_device *dev,
+	struct file *filp)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	struct psb_video_ctx *video_ctx;
+	uint32_t count = 0;
+
+	video_ctx = get_ctx_from_fp(dev, filp);
+	if (video_ctx) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Prepare to handle CTRL + C\n");
+	} else {
+		PSB_DEBUG_TOPAZ("TOPAZ: Not VEC context or already released\n");
+		return;
+	}
+
+	while (topaz_priv->topaz_busy == 1 &&
+	       count++ < 20000)
+		PSB_UDELAY(6);
+
+	if (count == 20000)
+		DRM_ERROR("Failed to handle sigint event\n");
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Start to handle CTRL + C\n");
+	psb_remove_videoctx(dev_priv, filp);
+}
+
+int tng_topaz_dequeue_send(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_cmd_queue *topaz_cmd = NULL;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+
+	/* Avoid race condition with queue buffer when topaz_busy = 1 */
+	mutex_lock(&topaz_priv->topaz_mutex);
+	if (list_empty(&topaz_priv->topaz_queue)) {
+		topaz_priv->topaz_busy = 0;
+		PSB_DEBUG_TOPAZ("TOPAZ: empty command queue, " \
+			"set topaz_busy = 0, directly return\n");
+		mutex_unlock(&topaz_priv->topaz_mutex);
+		return ret;
+	}
+
+	topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
+			struct tng_topaz_cmd_queue, head);
+
+	memset(topaz_priv->saved_queue, 0, sizeof(struct tng_topaz_cmd_queue));
+	memset(topaz_priv->saved_cmd, 0, MAX_CMD_SIZE);
+
+	topaz_priv->saved_queue->file_priv = topaz_cmd->file_priv;
+	topaz_priv->saved_queue->cmd_size = topaz_cmd->cmd_size;
+	topaz_priv->saved_queue->sequence = topaz_cmd->sequence;
+	topaz_priv->saved_queue->head = topaz_cmd->head;
+
+	memcpy(topaz_priv->saved_cmd, topaz_cmd->cmd, topaz_cmd->cmd_size);
+
+	list_del(&topaz_cmd->head);
+	kfree(topaz_cmd->cmd);
+	kfree(topaz_cmd);
+	mutex_unlock(&topaz_priv->topaz_mutex);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: dequeue command of sequence %08x " \
+			"and send it to topaz\n", \
+			topaz_priv->saved_queue->sequence);
+
+	ret = tng_topaz_send(dev,
+		topaz_priv->saved_queue->file_priv,
+		topaz_priv->saved_cmd,
+		topaz_priv->saved_queue->cmd_size,
+		topaz_priv->saved_queue->sequence);
+	if (ret) {
+		DRM_ERROR("TOPAZ: tng_topaz_send failed.\n");
+		ret = -EINVAL;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: dequeue command of sequence %08x " \
+			"finished\n", topaz_priv->saved_queue->sequence);
+
+	return ret;
+}
+
+int32_t tng_check_topaz_idle(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	uint32_t reg_val;
+
+	if (dev_priv->topaz_ctx == NULL) {
+		PSB_DEBUG_TOPAZ("TOPAZ: topaz context is null\n");
+		return 0;
+	}
+
+	/*HW is stuck. Need to power off TopazSC to reset HW*/
+	if (1 == topaz_priv->topaz_needs_reset) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Topaz needs reset\n");
+		return 0;
+	}
+
+	if (topaz_priv->topaz_busy) {
+		PSB_DEBUG_TOPAZ("TOPAZ: can't save, topaz_busy = %d\n", \
+				   topaz_priv->topaz_busy);
+		return -EBUSY;
+	}
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE,
+		&reg_val);
+	reg_val &= MASK_TOPAZHP_TOP_CR_CMD_FIFO_SPACE;
+	if (reg_val != 32) {
+		PSB_DEBUG_TOPAZ("TOPAZ: HW is busy. Free words in command" \
+				"FIFO is %d.\n",
+				reg_val);
+		return -EBUSY;
+	}
+
+	return 0; /* we think it is idle */
+}
+
+
+int32_t tng_video_get_core_num(struct drm_device *dev, uint64_t user_pointer)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+
+	ret = copy_to_user((void __user *)((unsigned long)user_pointer),
+			   &topaz_priv->topaz_num_pipes,
+			   sizeof(topaz_priv->topaz_num_pipes));
+
+	if (ret)
+		return -EFAULT;
+
+	return ret;
+
+}
+
+int32_t tng_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = 0;
+
+	ret = copy_to_user((void __user *)((unsigned long)user_pointer),
+		&topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
+
+	if (ret)
+		return -EFAULT;
+
+	return ret;
+}
+
+static void tng_topaz_flush_cmd_queue(struct tng_topaz_private *topaz_priv)
+{
+	struct tng_topaz_cmd_queue *entry, *next;
+
+	/* remind to reset topaz */
+	topaz_priv->topaz_needs_reset = 1;
+	topaz_priv->topaz_busy = 0;
+
+	if (list_empty(&topaz_priv->topaz_queue))
+		return;
+
+	/* flush all command in queue */
+	list_for_each_entry_safe(entry, next,
+				 &topaz_priv->topaz_queue,
+				 head) {
+		list_del(&entry->head);
+		kfree(entry->cmd);
+		kfree(entry);
+	}
+
+	return;
+}
+
+void tng_topaz_handle_timeout(struct ttm_fence_device *fdev)
+{
+	struct drm_psb_private *dev_priv =
+		container_of(fdev, struct drm_psb_private, fdev);
+	/*struct drm_device *dev =
+		container_of((void *)dev_priv,
+		struct drm_device, dev_private);*/
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	/* if (IS_MRST(dev))
+		return  lnc_topaz_handle_timeout(fdev);
+	*/
+	DRM_ERROR("TOPAZ: current codec is %s\n",
+			codec_to_string(topaz_priv->cur_codec));
+	tng_topaz_flush_cmd_queue(topaz_priv);
+
+
+	/* Power down TopazSC to reset HW*/
+	/* schedule_delayed_work(&topaz_priv->topaz_suspend_wq, 0); */
+}
+
+void tng_topaz_enableirq(struct drm_device *dev)
+{
+#ifdef TOPAZHP_IRQ_ENABLED
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t crImgTopazIntenab;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Enable TOPAZHP IRQ\n");
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB,
+		&crImgTopazIntenab);
+
+	crImgTopazIntenab |= (MASK_TOPAZHP_TOP_CR_MTX_INTEN_MTX |
+		MASK_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB,
+		crImgTopazIntenab);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_MTX_INT_ENAB,
+		MASK_MTX_INT_ENAB);
+#else
+	return 0;
+#endif
+}
+
+void tng_topaz_disableirq(struct drm_device *dev)
+{
+	uint32_t crImgTopazIntenab;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Disable TOPAZHP IRQ\n");
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB,
+		&crImgTopazIntenab);
+
+	crImgTopazIntenab &= ~(MASK_TOPAZHP_TOP_CR_MTX_INTEN_MTX);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB,
+		crImgTopazIntenab);
+}
+
+/* Disable VEC or GFX clock gating */
+void tng_topaz_CG_disable(struct drm_device *dev)
+{
+	int reg_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (drm_topaz_cgpolicy & PSB_CGPOLICY_GFXCG_DIS) {
+		reg_val = ioread32(dev_priv->wrapper_reg + 0);
+		PSB_DEBUG_TOPAZ("TOPAZ: GFX CG 0 = %08x, " \
+			"disable GFX CG\n", reg_val);
+		iowrite32(0x103, dev_priv->wrapper_reg + 0);
+		reg_val = ioread32(dev_priv->wrapper_reg + 0);
+		PSB_DEBUG_TOPAZ("TOPAZ: GFX CG 0 = %08x\n", reg_val);
+	}
+
+	if (drm_topaz_cgpolicy & PSB_CGPOLICY_VECCG_DIS) {
+		reg_val = ioread32(dev_priv->vec_wrapper_reg + 0);
+		PSB_DEBUG_TOPAZ("TOPAZ: VEC CG 0 = %08x, " \
+			"disable VEC CG\n", reg_val);
+		iowrite32(0x03, dev_priv->vec_wrapper_reg + 0);
+		reg_val = ioread32(dev_priv->vec_wrapper_reg + 0);
+		PSB_DEBUG_TOPAZ("TOPAZ: VEC CG 0 = %08x\n", reg_val);
+	}
+}
+
+static int pm_cmd_freq_wait(u32 reg_freq)
+{
+	int tcount;
+	u32 freq_val;
+
+	for (tcount = 0; ; tcount++) {
+		freq_val = intel_mid_msgbus_read32(PUNIT_PORT, reg_freq);
+		if ((freq_val & IP_FREQ_VALID) == 0)
+			break;
+		if (tcount > 500) {
+			DRM_ERROR("P-Unit freq request wait timeout");
+			return -EBUSY;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static int pm_cmd_freq_set(u32 reg_freq, u32 freq_code)
+{
+	u32 freq_val;
+	int rva;
+
+	pm_cmd_freq_wait(reg_freq);
+
+	freq_val = IP_FREQ_VALID | freq_code;
+	intel_mid_msgbus_write32(PUNIT_PORT, reg_freq, freq_val);
+
+	rva = pm_cmd_freq_wait(reg_freq);
+
+	return rva;
+}
+
+int tng_topaz_set_vec_freq(u32 freq_code)
+{
+	return pm_cmd_freq_set(VEC_SS_PM1, freq_code);
+}
+
+#define PMU_ENC			0x1
+/* Workaround to disable D0i3 */
+bool power_island_get_dummy(struct drm_device *dev)
+{
+	int pm_ret = 0;
+	unsigned long irqflags;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	if (atomic_read(&topaz_priv->vec_ref_count))
+		goto out;
+
+	pm_ret = pmu_nc_set_power_state(PMU_ENC, OSPM_ISLAND_UP, VEC_SS_PM0);
+	if (pm_ret) {
+		PSB_DEBUG_PM("power up vec failed\n");
+		return false;
+	}
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	atomic_inc(&topaz_priv->vec_ref_count);
+
+	if (drm_topaz_cgpolicy != PSB_CGPOLICY_ON)
+		tng_topaz_CG_disable(dev);
+out:
+	return true;
+}
+
+/* Workaround to disable D0i3 */
+bool power_island_put_dummy(struct drm_device *dev)
+{
+	return true;
+}
+
+int tng_topaz_power_up(
+	struct drm_device *dev,
+	enum drm_tng_topaz_codec codec)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	u32 reg_val = 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: power up start\n");
+
+	if (Is_Secure_Fw()) {
+		if (is_island_on(OSPM_VIDEO_ENC_ISLAND)) {
+			PSB_DEBUG_TOPAZ("TOPAZ: power is already up, end and return\n");
+			return 0;
+		}
+		reg_val = intel_mid_msgbus_read32(PUNIT_PORT, VEC_SS_PM0);
+		PSB_DEBUG_TOPAZ("TOPAZ: VECSSPM0 (0) = 0x%08x\n", reg_val);
+		reg_val &= ~((u32)0x7c);
+		reg_val |= ((u32)codec) << 2;
+		intel_mid_msgbus_write32(PUNIT_PORT, VEC_SS_PM0, reg_val);
+		PSB_DEBUG_TOPAZ("TOPAZ: VECSSPM0 (1) = 0x%08x\n", reg_val);
+		/* udelay(2); */
+	}
+
+#ifdef MRFLD_B0_DEBUG
+	reg_val = intel_mid_msgbus_read32(PUNIT_PORT, VEC_SS_PM0);
+	PSB_DEBUG_TOPAZ("Read R(0x%08x) V(0x%08x)\n",
+		VEC_SS_PM0, reg_val);
+	reg_val &= ~((u32)0x3);
+
+	intel_mid_msgbus_write32(PUNIT_PORT, VEC_SS_PM0, reg_val);
+	PSB_DEBUG_TOPAZ("write R(0x%08x) V(0x%08x)\n",
+		VEC_SS_PM0, reg_val);
+	while (count != 0) {
+		reg_val = intel_mid_msgbus_read32(PUNIT_PORT, DPC_SSC);
+		PSB_DEBUG_TOPAZ("RR (%d), A(0x%08x) V(0x%08x)\n",
+			count, DPC_SSC, reg_val);
+		reg_val = intel_mid_msgbus_read32(PUNIT_PORT, VEC_SS_PM0);
+		count -= 1;
+		if (reg_val == (codec<<2))
+			count = 0;
+		PSB_DEBUG_TOPAZ("RR (%d), A(0x%08x) V(0x%08x)\n",
+			count, VEC_SS_PM0, reg_val);
+			udelay(20);
+		if (count != 0) {
+			reg_val &= ~((u32)0x3);
+			intel_mid_msgbus_write32(PUNIT_PORT,
+				VEC_SS_PM0, reg_val);
+			PSB_DEBUG_TOPAZ("WW (%d), A(0x%08x) V(0x%08x)\n",
+			count, VEC_SS_PM0, reg_val);
+		}
+	}
+#endif
+
+	if (drm_topaz_pmpolicy == PSB_PMPOLICY_NOPM)
+		PSB_DEBUG_TOPAZ("Topaz: NOPM policy, but still need powerup here\n");
+
+	/* Reduce D0i0 residency, original it is 0 set by GFX driver */
+	topaz_priv->dev->pdev->d3_delay = 10;
+
+	if (!power_island_get(OSPM_VIDEO_ENC_ISLAND)) {
+		DRM_ERROR("Failed to power on ENC island\n");
+		return -1;
+	}
+
+	/* Must flush here in case of invalid cache data */
+	/* tng_topaz_mmu_flushcache(dev_priv); */
+	PSB_DEBUG_TOPAZ("TOPAZ: power up end\n");
+	return 0;
+}
+
+int tng_topaz_power_off(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: power off start\n");
+	if (Is_Secure_Fw()) {
+		if (!is_island_on(OSPM_VIDEO_ENC_ISLAND)) {
+			PSB_DEBUG_TOPAZ("TOPAZ: power is already off\n");
+			return 0;
+		}
+	}
+
+	if (drm_topaz_pmpolicy == PSB_PMPOLICY_NOPM)
+		PSB_DEBUG_TOPAZ("TOPAZ: skip power off since NOPM policy\n");
+	else {
+		power_island_put(OSPM_VIDEO_ENC_ISLAND);
+		/* Restore delay to 0 */
+		topaz_priv->dev->pdev->d3_delay = 0;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: power off end\n");
+
+	return 0;
+}
+
+int Is_Secure_Fw()
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER &&
+		intel_mid_soc_stepping() < 1)
+		return 0;
+	return 1;
+}
diff --git a/drivers/external_drivers/intel_media/video/encode/tng_topaz.h b/drivers/external_drivers/intel_media/video/encode/tng_topaz.h
new file mode 100644
index 0000000..3559539
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/tng_topaz.h
@@ -0,0 +1,319 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#ifndef _FPGA_TOPAZ_H_
+#define _FPGA_TOPAZ_H_
+
+#include "psb_drv.h"
+#include "tng_topaz_hw_reg.h"
+
+
+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
+
+/*Must be equal to IMG_CODEC_NUM*/
+#define TNG_TOPAZ_CODEC_NUM_MAX (25)
+#define TNG_TOPAZ_BIAS_TABLE_MAX_SIZE (2 * 1024)
+/*#define TOPAZ_PDUMP*/
+
+#define TOPAZHP_IRQ_ENABLED
+#define TOPAZHP_PIPE_NUM 2
+
+/* #define MRFLD_B0_DEBUG */
+
+#define TNG_IS_H264_ENC(codec) \
+	(codec == IMG_CODEC_H264_NO_RC || \
+	codec == IMG_CODEC_H264_VBR  || \
+	codec == IMG_CODEC_H264_VCM  || \
+	codec == IMG_CODEC_H264_CBR  || \
+	codec == IMG_CODEC_H264_LLRC || \
+	codec == IMG_CODEC_H264_ALL_RC)
+
+#define TNG_IS_H264MVC_ENC(codec) \
+	(codec == IMG_CODEC_H264MVC_NO_RC || \
+	codec == IMG_CODEC_H264MVC_CBR || \
+	codec == IMG_CODEC_H264MVC_VBR)
+
+#define TNG_IS_JPEG_ENC(codec) \
+	(codec == IMG_CODEC_JPEG)
+
+#define MASK_WB_HIGH_CMDID      0xFF000000
+#define SHIFT_WB_HIGH_CMDID     24
+
+#define MASK_WB_LOW_CMDID       0x00FFFF00
+#define SHIFT_WB_LOW_CMDID      8
+
+#define MASK_WB_SYNC_CNT        0x000000FF
+#define SHIFT_WB_SYNC_CNT       0
+
+#define MAX_CMD_SIZE		4096
+
+#define SHIFT_MTX_MSG_PRIORITY          (7)
+#define MASK_MTX_MSG_PRIORITY           (0x1 << SHIFT_MTX_MSG_PRIORITY)
+#define SHIFT_MTX_MSG_CORE                      (8)
+#define MASK_MTX_MSG_CORE                       (0x7f << SHIFT_MTX_MSG_CORE)
+#define SHIFT_MTX_MSG_WB_INTERRUPT      (15)
+#define MASK_MTX_MSG_WB_INTERRUPT       (0x1 << SHIFT_MTX_MSG_WB_INTERRUPT)
+#define SHIFT_MTX_MSG_COUNT                     (16)
+#define MASK_MTX_MSG_COUNT                      (0xffff << SHIFT_MTX_MSG_COUNT)
+
+/*#define VERIFYFW*/
+/*#define VERIFY_CONTEXT_SWITCH*/
+
+enum TOPAZ_REG_ID {
+	TOPAZ_MULTICORE_REG,
+	TOPAZ_CORE_REG,
+	TOPAZ_VLC_REG
+};
+
+extern int drm_topaz_pmpolicy;
+extern int drm_topaz_cgpolicy;
+extern int drm_topaz_cmdpolicy;
+
+/* XXX: it's a copy of msvdx cmd queue. should have some change? */
+struct tng_topaz_cmd_queue {
+	struct drm_file *file_priv;
+	struct list_head head;
+	void *cmd;
+	uint32_t cmd_size;
+	uint32_t sequence;
+};
+
+#define SECURE_VRL_HEADER 728
+#define SECURE_FIP_HEADER 296
+
+struct tng_secure_fw {
+	uint32_t codec_idx;
+	uint32_t addr_data;
+	uint32_t text_size;
+	uint32_t data_size;
+	uint32_t data_loca;
+
+	struct ttm_buffer_object *text;
+	struct ttm_buffer_object *data;
+};
+
+
+#define MAX_CONTEXT_CNT	2
+#define MAX_TOPAZHP_CORES 4
+#define MASK_TOPAZ_CONTEXT_SAVED	(0x1)
+#define MASK_TOPAZ_FIRMWARE_EXIT	(0x1 << 1)
+#define MASK_TOPAZ_FIRMWARE_ACTIVE	(0x1 << 2)
+
+struct tng_topaz_private {
+	unsigned int pmstate;
+	struct sysfs_dirent *sysfs_pmstate;
+	int frame_skip;
+
+#ifdef VERIFYFW
+	/* For verify firmware */
+	struct ttm_buffer_object *text_mem;
+	struct ttm_buffer_object *data_mem;
+	uint32_t bo_text_items[10];
+	uint32_t bo_data_items[10];
+	uint32_t dma_text_items[10];
+	uint32_t dma_data_items[10];
+#endif
+	uint32_t cur_codec;
+	int topaz_needs_reset;
+	void *topaz_mtx_reg_state[MAX_TOPAZHP_CORES];
+	void *topaz_bias_table[MAX_TOPAZHP_CORES];
+	uint32_t cur_mtx_data_size[MAX_TOPAZHP_CORES];
+	struct ttm_buffer_object *topaz_mtx_data_mem[MAX_TOPAZHP_CORES];
+
+	/*
+	 *topaz command queue
+	 */
+	struct tng_topaz_cmd_queue *saved_queue;
+	char *saved_cmd;
+	spinlock_t topaz_lock;
+	struct mutex topaz_mutex;
+	struct list_head topaz_queue;
+	atomic_t cmd_wq_free;
+	atomic_t vec_ref_count;
+	wait_queue_head_t cmd_wq;
+	int topaz_busy;		/* 0 means topaz is free */
+	int topaz_fw_loaded;
+
+	uint32_t stored_initial_qp;
+	uint32_t topaz_dash_access_ctrl;
+
+	struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
+	struct ttm_bo_kmap_obj topaz_bo_kmap;
+
+	uint32_t *topaz_mtx_wb;
+	uint32_t topaz_wb_offset;
+	uint32_t *topaz_sync_addr;
+	uint32_t topaz_sync_offset;
+	uint32_t topaz_cmd_count;
+	uint32_t core_id;
+	uint32_t topaz_wb_received;
+	uint32_t topaz_mtx_saved;
+
+	/* firmware */
+	struct tng_secure_fw topaz_fw[TNG_TOPAZ_CODEC_NUM_MAX];
+	uint32_t topaz_hw_busy;
+
+	uint32_t topaz_num_pipes;
+
+	/*Before load firmware, need to set up
+	jitter according to resolution*/
+	/*The data of MTX_CMDID_SW_NEW_CODEC command
+	contains width and length.*/
+	uint16_t frame_w;
+	uint16_t frame_h;
+
+	/* For IRQ and Sync */
+	uint32_t producer;
+	uint32_t consumer;
+
+	/* JPEG ISSUEBUF cmd count */
+	uint32_t issuebuf_cmd_count;
+
+	/* Context parameters */
+	struct psb_video_ctx *cur_context;
+	struct psb_video_ctx *irq_context;
+
+	/* topaz suspend work queue */
+	struct drm_device *dev;
+	struct delayed_work topaz_suspend_work;
+	uint32_t isr_enabled;
+	uint32_t power_down_by_release;
+
+	struct ttm_object_file *tfile;
+
+	spinlock_t ctx_spinlock;
+};
+
+struct tng_topaz_cmd_header {
+	union {
+		struct {
+			uint32_t id:8;
+			uint32_t core:8;
+			uint32_t low_cmd_count:8;
+			uint32_t high_cmd_count:8;
+		};
+		uint32_t val;
+	};
+};
+
+/* external function declare */
+/*ISR of TopazSC*/
+extern bool tng_topaz_interrupt(void *pvData);
+
+/*topaz commad handling function*/
+extern int tng_cmdbuf_video(struct drm_file *priv,
+			    struct list_head *validate_list,
+			    uint32_t fence_type,
+			    struct drm_psb_cmdbuf_arg *arg,
+			    struct ttm_buffer_object *cmd_buffer,
+			    struct psb_ttm_fence_rep *fence_arg);
+
+extern int tng_check_topaz_idle(struct drm_device *dev);
+
+extern void tng_topaz_enableirq(struct drm_device *dev);
+
+extern void tng_topaz_disableirq(struct drm_device *dev);
+
+extern int tng_topaz_init(struct drm_device *dev);
+
+extern int tng_topaz_uninit(struct drm_device *dev);
+
+extern void tng_topaz_handle_timeout(struct ttm_fence_device *fdev);
+
+extern int32_t mtx_write_core_reg(struct drm_psb_private *dev_priv,
+				 uint32_t reg,
+				 const uint32_t val);
+
+extern int32_t mtx_read_core_reg(struct drm_psb_private *dev_priv,
+				uint32_t reg,
+				uint32_t *ret_val);
+
+int tng_topaz_kick_null_cmd(struct drm_device *dev,
+			    uint32_t sync_seq);
+
+void tng_set_producer(struct drm_device *dev,
+		      uint32_t consumer);
+
+void tng_set_consumer(struct drm_device *dev,
+		      uint32_t consumer);
+
+uint32_t tng_get_producer(struct drm_device *dev);
+
+uint32_t tng_get_consumer(struct drm_device *dev);
+
+uint32_t tng_serialize(struct drm_device *dev);
+
+uint32_t tng_wait_for_ctrl(struct drm_device *dev,
+			   uint32_t control);
+
+int mtx_upload_fw(struct drm_device *dev,
+		  uint32_t codec,
+		  struct psb_video_ctx *video_ctx);
+
+int32_t mtx_dma_read(struct drm_device *dev,
+		struct ttm_buffer_object *dst_bo,
+		uint32_t src_ram_addr,
+		uint32_t size);
+
+void mtx_start(struct drm_device *dev);
+
+void mtx_stop(struct drm_psb_private *dev_priv);
+
+void mtx_kick(struct drm_device *dev);
+
+int mtx_write_FIFO(struct drm_device *dev,
+	struct tng_topaz_cmd_header *cmd_header,
+	uint32_t param, uint32_t param_addr, uint32_t sync_seq);
+
+int tng_topaz_remove_ctx(struct drm_psb_private *dev,
+	struct psb_video_ctx *video_ctx);
+
+extern int tng_topaz_save_mtx_state(struct drm_device *dev);
+
+extern int tng_topaz_restore_mtx_state(struct drm_device *dev);
+extern int tng_topaz_restore_mtx_state_b0(struct drm_device *dev);
+
+int tng_topaz_dequeue_send(struct drm_device *dev);
+
+uint32_t get_ctx_cnt(struct drm_device *dev);
+
+struct psb_video_ctx *get_ctx_from_fp(
+	struct drm_device *dev, struct file *filp);
+
+void tng_topaz_handle_sigint(
+	struct drm_device *dev,
+	struct file *filp);
+
+void tng_topaz_CG_disable(struct drm_device *dev);
+
+int tng_topaz_set_vec_freq(u32 freq_code);
+
+bool power_island_get_dummy(struct drm_device *dev);
+
+bool power_island_put_dummy(struct drm_device *dev);
+
+#define TNG_TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state)		\
+do { \
+	topaz_priv->pmstate = new_state;				\
+	sysfs_notify_dirent(topaz_priv->sysfs_pmstate);			\
+	PSB_DEBUG_PM("TOPAZ: %s\n",					\
+		(new_state == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown"); \
+} while (0)
+
+#endif	/* _FPGA_TOPAZ_H_ */
diff --git a/drivers/external_drivers/intel_media/video/encode/tng_topaz_hw_reg.h b/drivers/external_drivers/intel_media/video/encode/tng_topaz_hw_reg.h
new file mode 100644
index 0000000..60a0a50
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/tng_topaz_hw_reg.h
@@ -0,0 +1,3719 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _TNG_TOPAZ_HW_REG_H_
+#define _TNG_TOPAZ_HW_REG_H_
+
+#ifdef _LNC_TOPAZ_HW_REG_H_
+#error "lnc_topaz_hw_reg.h shouldn't be included"
+#endif
+
+#include "psb_drv.h"
+
+#define MTX_CMDID_PRIORITY 0x80
+
+/* Register CR_MULTICORE_SRST */
+#define TOPAZHP_TOP_CR_MULTICORE_SRST 0x0000
+#define MASK_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0
+#define REGNUM_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET 0
+
+#define MASK_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 1
+#define REGNUM_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET 0
+
+#define MASK_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 2
+#define REGNUM_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET 0
+
+/* Register CR_MULTICORE_INT_STAT */
+#define TOPAZHP_TOP_CR_MULTICORE_INT_STAT 0x0004
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_DMAC 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MTX 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 8
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_MTX_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 16
+#define REGNUM_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_INT_STAT_HOST_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 30
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAS_MTX_INTS 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 31
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 0x0004
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAS_HOST_INTS 0
+
+/* Register CR_MULTICORE_MTX_INT_ENAB */
+#define TOPAZHP_TOP_CR_MULTICORE_MTX_INT_ENAB 0x0008
+#define MASK_TOPAZHP_TOP_CR_MTX_INTEN_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MTX_INTEN_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_MTX_INTEN_DMAC 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_INTEN_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_INTEN_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_MTX_INTEN_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_MTX_INTEN_MTX 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_INTEN_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_INTEN_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_MTX_INTEN_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_MTX_INTEN_MTX_HALT 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_INTEN_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_INTEN_MMU_FAULT 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_MTX_INTEN_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_MTX_INTEN_MMU_FAULT 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_INTEN_MMU_FAULT 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_INTEN_MTX_CORES 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_MTX_INTEN_MTX_CORES 8
+#define REGNUM_TOPAZHP_TOP_CR_MTX_INTEN_MTX_CORES 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_INTEN_MTX_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_INTEN_HOST_CORES 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_MTX_INTEN_HOST_CORES 16
+#define REGNUM_TOPAZHP_TOP_CR_MTX_INTEN_HOST_CORES 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_INTEN_HOST_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_TOPAZHP_MAS_INTEN 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_MTX_TOPAZHP_MAS_INTEN 30
+#define REGNUM_TOPAZHP_TOP_CR_MTX_TOPAZHP_MAS_INTEN 0x0008
+#define SIGNED_TOPAZHP_TOP_CR_MTX_TOPAZHP_MAS_INTEN 0
+
+/* Register CR_MULTICORE_HOST_INT_ENAB */
+#define TOPAZHP_TOP_CR_MULTICORE_HOST_INT_ENAB 0x000C
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MTX 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MMU_FAULT 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 8
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_MTX_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 16
+#define REGNUM_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_INTEN_HOST_CORES 0
+
+#define MASK_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 31
+#define REGNUM_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 0x000C
+#define SIGNED_TOPAZHP_TOP_CR_HOST_TOPAZHP_MAS_INTEN 0
+
+/* Register CR_MULTICORE_INT_CLEAR */
+#define TOPAZHP_TOP_CR_MULTICORE_INT_CLEAR 0x0010
+#define MASK_TOPAZHP_TOP_CR_INTCLR_DMAC 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_DMAC 0
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_DMAC 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_DMAC 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MTX 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MTX 1
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MTX 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 2
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MTX_HALT 0
+
+#define MASK_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 3
+#define REGNUM_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 0x0010
+#define SIGNED_TOPAZHP_TOP_CR_INTCLR_MMU_FAULT 0
+
+/* Register CR_MULTICORE_MAN_CLK_GATE */
+#define TOPAZHP_TOP_CR_MULTICORE_MAN_CLK_GATE 0x0014
+#define MASK_TOPAZHP_TOP_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZ_MTX_MAN_CLK_GATE 1
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0014
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZ_MTX_MAN_CLK_GATE 0
+
+/* Register CR_TOPAZ_MTX_C_RATIO */
+#define TOPAZHP_TOP_CR_TOPAZ_MTX_C_RATIO 0x0018
+#define MASK_TOPAZHP_TOP_CR_MTX_C_RATIO 0x00000003
+#define SHIFT_TOPAZHP_TOP_CR_MTX_C_RATIO 0
+#define REGNUM_TOPAZHP_TOP_CR_MTX_C_RATIO 0x0018
+#define SIGNED_TOPAZHP_TOP_CR_MTX_C_RATIO 0
+
+/* Register CR_MMU_STATUS */
+#define TOPAZHP_TOP_CR_MMU_STATUS   0x001C
+#define MASK_TOPAZHP_TOP_CR_MMU_PF_N_RW 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MMU_PF_N_RW 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_PF_N_RW 0x001C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_PF_N_RW 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 0xFFFFF000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 12
+#define REGNUM_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 0x001C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_FAULT_ADDR 0
+
+/* Register CR_MMU_MEM_REQ */
+#define TOPAZHP_TOP_CR_MMU_MEM_REQ  0x0020
+#define MASK_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0
+#define REGNUM_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0x0020
+#define SIGNED_TOPAZHP_TOP_CR_MEM_REQ_STAT_READS 0
+
+/* Register CR_MMU_CONTROL0 */
+#define TOPAZHP_TOP_CR_MMU_CONTROL0 0x0024
+#define MASK_TOPAZHP_TOP_CR_MMU_NOREORDER 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MMU_NOREORDER 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_NOREORDER 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_NOREORDER 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_PAUSE 0x00000002
+#define SHIFT_TOPAZHP_TOP_CR_MMU_PAUSE 1
+#define REGNUM_TOPAZHP_TOP_CR_MMU_PAUSE 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_PAUSE 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_FLUSH 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_MMU_FLUSH 2
+#define REGNUM_TOPAZHP_TOP_CR_MMU_FLUSH 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_FLUSH 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_INVALDC 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_MMU_INVALDC 3
+#define REGNUM_TOPAZHP_TOP_CR_MMU_INVALDC 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_INVALDC 0
+
+#define MASK_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 0x00000700
+#define SHIFT_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 8
+#define REGNUM_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_FLOWRATE_TOPAZ 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 0x00010000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 16
+#define REGNUM_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 0x0024
+#define SIGNED_TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ 0
+
+/* Register CR_MMU_CONTROL1 */
+#define TOPAZHP_TOP_CR_MMU_CONTROL1 0x0028
+#define MASK_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0x00000FFF
+#define SHIFT_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_TTE_THRESHOLD 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_ADT_TTE 0x000FF000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_ADT_TTE 12
+#define REGNUM_TOPAZHP_TOP_CR_MMU_ADT_TTE 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_ADT_TTE 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_BEST_COUNT 0x0FF00000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_BEST_COUNT 20
+#define REGNUM_TOPAZHP_TOP_CR_MMU_BEST_COUNT 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_BEST_COUNT 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 0xF0000000
+#define SHIFT_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 28
+#define REGNUM_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 0x0028
+#define SIGNED_TOPAZHP_TOP_CR_MMU_PAGE_SIZE 0
+
+/* Register CR_MMU_CONTROL2 */
+#define TOPAZHP_TOP_CR_MMU_CONTROL2 0x002C
+#define MASK_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0
+#define REGNUM_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0x002C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING 0
+
+#define MASK_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 0x00000008
+#define SHIFT_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 3
+#define REGNUM_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 0x002C
+#define SIGNED_TOPAZHP_TOP_CR_MMU_TILING_SCHEME 0
+
+/* Register CR_MMU_DIR_LIST_BASE_0 */
+#define TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_0 0x0030
+#define MASK_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR_00 0xFFFFFFF0
+#define SHIFT_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR_00 4
+#define REGNUM_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR_00 0x0030
+#define SIGNED_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR_00 0
+
+/* Register CR_MMU_TILE_0 */
+#define TOPAZHP_TOP_CR_MMU_TILE_0   0x0038
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR_00 0x00000FFF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR_00 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR_00 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR_00 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR_00 0x00FFF000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR_00 12
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR_00 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR_00 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_STRIDE_00 0x07000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_STRIDE_00 24
+#define REGNUM_TOPAZHP_TOP_CR_TILE_STRIDE_00 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_STRIDE_00 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_ENABLE_00 0x10000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_ENABLE_00 28
+#define REGNUM_TOPAZHP_TOP_CR_TILE_ENABLE_00 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_ENABLE_00 0
+
+/* Register CR_MMU_TILE_1 */
+#define TOPAZHP_TOP_CR_MMU_TILE_1   0x003C
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR_01 0x00000FFF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR_01 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR_01 0x003C
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR_01 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR_01 0x00FFF000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR_01 12
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR_01 0x003C
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR_01 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_STRIDE_01 0x07000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_STRIDE_01 24
+#define REGNUM_TOPAZHP_TOP_CR_TILE_STRIDE_01 0x003C
+#define SIGNED_TOPAZHP_TOP_CR_TILE_STRIDE_01 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_ENABLE_01 0x10000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_ENABLE_01 28
+#define REGNUM_TOPAZHP_TOP_CR_TILE_ENABLE_01 0x003C
+#define SIGNED_TOPAZHP_TOP_CR_TILE_ENABLE_01 0
+
+/* Register CR_MULTICORE_CORE_SEL_0 */
+#define TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0 0x0050
+#define MASK_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0x00000007
+#define SHIFT_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0
+#define REGNUM_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0x0050
+#define SIGNED_TOPAZHP_TOP_CR_DMAC_MTX_SELECT 0
+
+#define MASK_TOPAZHP_TOP_CR_WRITES_MTX_ALL 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_WRITES_MTX_ALL 30
+#define REGNUM_TOPAZHP_TOP_CR_WRITES_MTX_ALL 0x0050
+#define SIGNED_TOPAZHP_TOP_CR_WRITES_MTX_ALL 0
+
+#define MASK_TOPAZHP_TOP_CR_WRITES_CORE_ALL 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_WRITES_CORE_ALL 31
+#define REGNUM_TOPAZHP_TOP_CR_WRITES_CORE_ALL 0x0050
+#define SIGNED_TOPAZHP_TOP_CR_WRITES_CORE_ALL 0
+
+/* Register CR_MULTICORE_CORE_SEL_1 */
+#define TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_1 0x0054
+#define MASK_TOPAZHP_TOP_CR_RTM_PORT_CORE_SELECT 0x0000001F
+#define SHIFT_TOPAZHP_TOP_CR_RTM_PORT_CORE_SELECT 0
+#define REGNUM_TOPAZHP_TOP_CR_RTM_PORT_CORE_SELECT 0x0054
+#define SIGNED_TOPAZHP_TOP_CR_RTM_PORT_CORE_SELECT 0
+
+/* Register CR_MULTICORE_HW_CFG */
+#define TOPAZHP_TOP_CR_MULTICORE_HW_CFG 0x0058
+#define MASK_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0x0000001F
+#define SHIFT_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0
+#define REGNUM_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 0x00000700
+#define SHIFT_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 8
+#define REGNUM_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_NUM_MTX_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 0x00070000
+#define SHIFT_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 16
+#define REGNUM_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_NUM_CORES_PER_MTX 0
+
+#define MASK_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 0x0F000000
+#define SHIFT_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 24
+#define REGNUM_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 0x0058
+#define SIGNED_TOPAZHP_TOP_CR_EXTENDED_ADDR_RANGE 0
+
+/* Register CR_MULTICORE_CMD_FIFO_WRITE */
+#define TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE 0x0060
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0x0060
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_WDATA 0
+
+/* Register CR_MULTICORE_CMD_FIFO_WRITE_SPACE */
+#define TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE 0x0064
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0x0064
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_SPACE 0
+
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_FULL 0x00000100
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_FULL 8
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_FULL 0x0064
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_FULL 0
+
+/* Register CR_TOPAZ_CMD_FIFO_READ */
+#define TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_READ 0x0070
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_RDATA 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_RDATA 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_RDATA 0x0070
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_RDATA 0
+
+/* Register CR_TOPAZ_CMD_FIFO_READ_AVAILABLE */
+#define TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_READ_AVAILABLE 0x0074
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_QUANTITY 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_QUANTITY 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_QUANTITY 0x0074
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_QUANTITY 0
+
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_NOTEMPTY 0x00000100
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_NOTEMPTY 8
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_NOTEMPTY 0x0074
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_NOTEMPTY 0
+
+/* Register CR_TOPAZ_CMD_FIFO_FLUSH */
+#define TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_FLUSH 0x0078
+#define MASK_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0x00000001
+#define SHIFT_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0
+#define REGNUM_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0x0078
+#define SIGNED_TOPAZHP_TOP_CR_CMD_FIFO_FLUSH 0
+
+/* Register CR_MMU_TILE_EXT_0 */
+#define TOPAZHP_TOP_CR_MMU_TILE_EXT_0 0x0080
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_00 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_00 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_00 0x0080
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_00 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_00 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_00 8
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_00 0x0080
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_00 0
+
+/* Register CR_MMU_TILE_EXT_1 */
+#define TOPAZHP_TOP_CR_MMU_TILE_EXT_1 0x0084
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_01 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_01 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_01 0x0084
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT_01 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_01 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_01 8
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_01 0x0084
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT_01 0
+
+/* Register CR_TOPAZHP_CORE_ID */
+#define TOPAZHP_TOP_CR_TOPAZHP_CORE_ID 0x03C0
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_CORE_CONFIG 0x0000FFFF
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_CORE_CONFIG 0
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_CORE_CONFIG 0x03C0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_CORE_CONFIG 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_CORE_ID 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_CORE_ID 16
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_CORE_ID 0x03C0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_CORE_ID 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_GROUP_ID 0xFF000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_GROUP_ID 24
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_GROUP_ID 0x03C0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_GROUP_ID 0
+
+/* Register CR_TOPAZHP_CORE_REV */
+#define TOPAZHP_TOP_CR_TOPAZHP_CORE_REV 0x03D0
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAINT_REV 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 8
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MINOR_REV 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 0x00FF0000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 16
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MAJOR_REV 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 0xFF000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 24
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 0x03D0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_DESIGNER 0
+
+/* Register CR_TOPAZHP_CORE_DES1 */
+#define TOPAZHP_TOP_CR_TOPAZHP_CORE_DES1 0x03E0
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 0x00000080
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 7
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SCALER_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 0x00000100
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 8
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_GENERATE_PERFORMANCE_STORE 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 0x00000200
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 9
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_LOSSLESS_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 0x00000400
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 10
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_CUSTOM_QUANT_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 0x00000800
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 11
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MPEG2_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 0x00001000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 12
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_SUBSET 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 0x00002000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 13
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SIGNATURES_SUPPORTED_ALL 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 0x00004000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 14
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_ME_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 0x00008000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 15
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_WEIGHTED_PRED_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 0x00010000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 16
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_2_REF_ON_P_PIC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 0x00020000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 17
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_SPATIAL_DIRECT_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 0x00040000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 18
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_MULTIPASS_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 0x00080000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 19
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_DEFAULT_TABLES_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 0x00100000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 20
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_8X8_TRANSFORM_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 0x00200000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 21
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_INTERLACED_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 0x00400000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 22
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_B_PIC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 0x00800000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 23
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_16X8_8X16_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 0x01000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 24
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_CABAC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 0x02000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 25
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_SLAVE_JPEG_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 0x04000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 26
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_JPEG_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 0x08000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 27
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H263_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 0x10000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 28
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MPEG4_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 0x20000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 29
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_H264_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 0x40000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 30
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_DMAC_SUPPORTED 0
+
+#define MASK_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 0x80000000
+#define SHIFT_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 31
+#define REGNUM_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 0x03E0
+#define SIGNED_TOPAZHP_TOP_CR_TOPAZHP_MMU_SUPPORTED 0
+
+/* Table MMU_DIR_LIST_BASE */
+
+/* Register CR_MMU_DIR_LIST_BASE */
+#define TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
+#define MASK_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFFFF0
+#define SHIFT_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 4
+#define REGNUM_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
+#define SIGNED_TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE_ADDR 0
+
+/* Number of entries in table MMU_DIR_LIST_BASE */
+
+#define TOPAZHP_TOP_MMU_DIR_LIST_BASE_SIZE_UINT32 1
+#define TOPAZHP_TOP_MMU_DIR_LIST_BASE_NUM_ENTRIES 1
+
+
+/* Table MMU_TILE */
+
+/* Register CR_MMU_TILE */
+#define TOPAZHP_TOP_CR_MMU_TILE(X)  (0x0038 + (4 * (X)))
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0x00000FFF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR 0x00FFF000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR 12
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_STRIDE 0x07000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_STRIDE 24
+#define REGNUM_TOPAZHP_TOP_CR_TILE_STRIDE 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_STRIDE 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_ENABLE 0x10000000
+#define SHIFT_TOPAZHP_TOP_CR_TILE_ENABLE 28
+#define REGNUM_TOPAZHP_TOP_CR_TILE_ENABLE 0x0038
+#define SIGNED_TOPAZHP_TOP_CR_TILE_ENABLE 0
+
+/* Number of entries in table MMU_TILE */
+
+#define TOPAZHP_TOP_MMU_TILE_SIZE_UINT32 2
+#define TOPAZHP_TOP_MMU_TILE_NUM_ENTRIES 2
+
+
+/* Table MMU_TILE_EXT */
+
+/* Register CR_MMU_TILE_EXT */
+#define TOPAZHP_TOP_CR_MMU_TILE_EXT(X) (0x0080 + (4 * (X)))
+#define MASK_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0x000000FF
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0x0080
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MIN_ADDR_EXT 0
+
+#define MASK_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 0x0000FF00
+#define SHIFT_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 8
+#define REGNUM_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 0x0080
+#define SIGNED_TOPAZHP_TOP_CR_TILE_MAX_ADDR_EXT 0
+
+/* Number of entries in table MMU_TILE_EXT */
+
+#define TOPAZHP_TOP_MMU_TILE_EXT_SIZE_UINT32 2
+#define TOPAZHP_TOP_MMU_TILE_EXT_NUM_ENTRIES 2
+
+
+/* Register CR_MTX_DEBUG_MSTR */
+#define TOPAZHP_TOP_CR_MTX_DEBUG_MSTR 0x0044
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0x00000003
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 0x00000004
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 2
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 0x00000018
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 3
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_OUT 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 0x00000F00
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 8
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 0x000F0000
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 16
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE 0
+
+#define MASK_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 0x0F000000
+#define SHIFT_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 24
+#define REGNUM_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 0x0044
+#define SIGNED_TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE 0
+
+
+/* Register CR_FIRMWARE_REG_1 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_1 0x0100
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0x0100
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_1 0
+
+/* Register CR_FIRMWARE_REG_4 */
+#define TOPAZHP_TOP_CR_FIRMWARE_REG_4 0x0300
+#define MASK_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0xFFFFFFFF
+#define SHIFT_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0
+#define REGNUM_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0x0300
+#define SIGNED_TOPAZHP_TOP_CR_FIRMWARE_REG_4 0
+
+#define MTX_SCRATCHREG_IDLE                     TOPAZHP_TOP_CR_FIRMWARE_REG_4
+
+/* Flags relating to MTX_SCRATCHREG_IDLE */
+#define SHIFT_FW_IDLE_REG_RECEIVED_COMMANDS	(2)
+#define MASK_FW_IDLE_REG_RECEIVED_COMMANDS	(0x3FC)
+#define SHIFT_FW_IDLE_REG_STATUS                (0)
+#define MASK_FW_IDLE_REG_STATUS                 (3)
+
+#define FW_IDLE_STATUS_IDLE                     (1)
+#define FW_IDLE_STATUS_HW_ACTIVE                (2)
+#define FW_IDLE_STATUS_BUSY                     (3)
+
+
+/*
+ * In secure FW mode the first value written to the command FIFO is copied to MMU_CONTROL_0 by the firmware.
+ * When we don't want that to happen we can write this value instead.  The firmware will know to ignore it as 
+ * long as it is written BEFORE the firmware starts up
+ */
+ #define TOPAZHP_NON_SECURE_FW_MARKER		(0xffffffff)
+
+/*
+ * This value is an arbitrary value that the firmware will write to TOPAZHP_TOP_CR_FIRMWARE_REG_1 when it has 
+ * completed the boot process it copies the value to TOPAZHP_TOP_CR_FIRMWARE_REG_1 to indicate that it is ready
+ */
+ #define TOPAZHP_FW_BOOT_SIGNAL			(0x12345678)
+
+/* Multicore Regs */
+#define REG_OFFSET_TOPAZ_MULTICORE                      0x00000000
+#define REG_OFFSET_TOPAZ_DMAC                           0x00000400
+#define REG_OFFSET_TOPAZ_MTX                            0x00000800
+#define REG_OFFSET_TOPAZ_VLC                            0x00000400
+
+#define REG_MIN_TOPAZ_MULTICORE				0x0
+#define REG_MAX_TOPAZ_MULTICORE				0x03FC
+#define REG_MIN_TOPAZ_CORE				0x0
+#define REG_MAX_TOPAZ_CORE				0x03FC
+#define REG_MIN_TOPAZ_VLC				0x0
+#define REG_MAX_TOPAZ_VLC				0x03FC
+
+#define REG_SIZE_TOPAZ_MULTICORE                        0x00000400
+#define REG_SIZE_TOPAZ_DMAC                             0x00000400
+#define REG_SIZE_TOPAZ_MTX                              0x00000800
+
+enum MTX_eScratchRegData {
+	MTX_SCRATCHREG_BOOTSTATUS = 0, /* !< Coded buffer fullness */
+	MTX_SCRATCHREG_TOHOST,       /* !< Reg for MTX->Host data */
+	MTX_SCRATCHREG_TOMTX,        /* !< Reg for Host->MTX data */
+	MTX_SCRATCHREG_SIZE          /*!< End marker for enum */
+};
+
+/* FIXME, for not(IMG_UINT32_IS_ULONG), use "int" to replace "IMG_INT32" */
+struct IMG_WRITEBACK_MSG {
+	uint32_t ui32CmdWord;
+	union {
+		struct {
+			uint32_t ui32Data;
+			uint32_t ui32ExtraData;
+			uint32_t ui32WritebackVal;
+		};
+		uint32_t ui32CodedBufferConsumed;
+	};
+};
+
+#define TNG_WRITE32(_base, _offs, _val) \
+	iowrite32((_val), dev_priv->topaz_reg + (_base) + (_offs))
+
+#define TNG_READ32(_base, _offs, _pval) \
+	*(_pval) = ioread32(dev_priv->topaz_reg + (_base) + (_offs))
+
+#define MULTICORE_WRITE32(offset, value) \
+	TNG_WRITE32(REG_OFFSET_TOPAZ_MULTICORE, offset, value)
+
+#define MULTICORE_READ32(offset, pointer) \
+	TNG_READ32(REG_OFFSET_TOPAZ_MULTICORE, offset, pointer)
+
+#define DMAC_WRITE32(offset, value) \
+	TNG_WRITE32(REG_OFFSET_TOPAZ_DMAC, offset, value)
+
+#define DMAC_READ32(offset, pointer) \
+	TNG_READ32(REG_OFFSET_TOPAZ_DMAC, offset, pointer)
+
+#define MTX_WRITE32(offset, value) \
+	TNG_WRITE32(REG_OFFSET_TOPAZ_MTX, offset, value)
+
+#define MTX_READ32(offset, pointer) \
+	TNG_READ32(REG_OFFSET_TOPAZ_MTX, offset, pointer)
+
+#define TOPAZCORE_WRITE32(core, offset, value) \
+	TNG_WRITE32(core*(0x1000) + 0x1000, offset, value)
+
+#define TOPAZCORE_READ32(core, offset, pointer) \
+	TNG_READ32(core*(0x1000) + 0x1000, offset, pointer)
+
+#define VLC_WRITE32(core, offset, value) \
+	TNG_WRITE32(core*(0x1000)+0x1000+REG_OFFSET_TOPAZ_VLC, offset, value)
+
+#define FPGA_AXI_WRITE32
+#define FPGA_AXI_READ32
+#define FPGA_OCP_WRITE32
+#define FPGA_OCP_READ32
+
+#define BIAS_MM_WRITE32(base, offset, value)  \
+	TNG_WRITE32(base, offset, value)
+
+#undef MM_WRITE32
+#define MM_WRITE32(base, offset, value)  \
+	TNG_WRITE32(base, offset, value)
+
+#undef MM_READ32
+#define MM_READ32(base, offset, pointer) \
+	TNG_READ32(base, offset, pointer)
+
+#define F_MASK(basename)  (MASK_##basename)
+#define F_SHIFT(basename) (SHIFT_##basename)
+
+#define F_INSERT(word, val, basename)	\
+	(((word) & ~(F_MASK(basename))) | (F_ENCODE(val, basename)))
+
+#define F_ENCODE(val, basename)	\
+	(((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
+
+#define F_DECODE(val, basename)	\
+	(((val) & (F_MASK(basename))) >> (F_SHIFT(basename)))
+
+
+#define F_EXTRACT(val, basename) \
+	(((val) & (F_MASK(basename))) >> (F_SHIFT(basename)))
+
+/*! The number of TOPAZ cores present in the system */
+#define TOPAZSC_NUM_CORES 2
+
+#define TOPAZSC_REG_OFF_MAX (TOPAZSC_NUM_CORES * 0x10000 + 0x10000)
+#define REG_BASE_MTX                        0x04800000
+#define REG_BASE_HOST                       0x00000000
+
+#define MTX_CORE_CODE_MEM       (0x10)
+#define MTX_CORE_DATA_MEM       (0x18)
+
+/* Topaz core registers - Host view */
+#define REG_OFFSET_TOPAZ_CORE_HOST	0x00010000
+#define REG_SIZE_TOPAZ_CORE_HOST	0x00010000
+
+#define REG_OFFSET_TOPAZ_MTX_HOST	0x00000000
+#define REG_OFFSET_TOPAZ_TOPAZ_HOST	0x00002000
+#define REG_OFFSET_TOPAZ_MVEA_HOST	0x00003000
+#define REG_OFFSET_TOPAZ_MVEACMD_HOST	0x00004000
+#define REG_OFFSET_TOPAZ_VLC_HOST	0x00005000
+#define REG_OFFSET_TOPAZ_DEBLOCKER_HOST	0x00006000
+#define REG_OFFSET_TOPAZ_COMMS_HOST	0x00007000
+#define REG_OFFSET_TOPAZ_ESB_HOST	0x00008000
+
+#define REG_SIZE_TOPAZ_MTX_HOST		0x00002000
+#define REG_SIZE_TOPAZ_TOPAZ_HOST	0x00001000
+#define REG_SIZE_TOPAZ_MVEA_HOST	0x00001000
+#define REG_SIZE_TOPAZ_MVEACMD_HOST	0x00001000
+#define REG_SIZE_TOPAZ_VLC_HOST		0x00001000
+#define REG_SIZE_TOPAZ_DEBLOCKER_HOST	0x00001000
+#define REG_SIZE_TOPAZ_COMMS_HOST	0x00001000
+#define REG_SIZE_TOPAZ_ESB_HOST		0x00004000
+
+
+/* Topaz core registers MTX view */
+#define REG_OFFSET_TOPAZ_CORE_MTX	0x00010000 /* MUST confirm */
+#define REG_SIZE_TOPAZ_CORE_MTX		0x00010000 /* MUST confirm */
+
+#define REG_OFFSET_TOPAZ_MTX_MTX	0x00000000
+#define REG_OFFSET_TOPAZ_TOPAZ_MTX	0x00000800
+#define REG_OFFSET_TOPAZ_MVEA_MTX	0x00000C00
+#define REG_OFFSET_TOPAZ_MVEACMD_MTX	0x00001000
+#define REG_OFFSET_TOPAZ_VLC_MTX	0x00001400
+#define REG_OFFSET_TOPAZ_DEBLOCKER_MTX	0x00001800
+#define REG_OFFSET_TOPAZ_COMMS_MTX	0x00001C00
+#define REG_OFFSET_TOPAZ_ESB_MTX	0x00002000
+
+#define REG_SIZE_TOPAZ_MTX_MTX		0x00000800
+#define REG_SIZE_TOPAZ_TOPAZ_MTX	0x00000400
+#define REG_SIZE_TOPAZ_MVEA_MTX		0x00000400
+#define REG_SIZE_TOPAZ_MVEACMD_MTX	0x00000400
+#define REG_SIZE_TOPAZ_VLC_MTX		0x00000400
+#define REG_SIZE_TOPAZ_DEBLOCKER_MTX	0x00000400
+#define REG_SIZE_TOPAZ_COMMS_MTX	0x00000400
+#define REG_SIZE_TOPAZ_ESB_MTX		0x00002000
+
+
+/* Register bank addresses - Host View */
+#define REG_START_TOPAZ_MULTICORE_HOST	\
+	(REG_BASE_HOST + REG_OFFSET_TOPAZ_MULTICORE)
+#define REG_END_TOPAZ_MULTICORE_HOST	\
+	(REG_START_TOPAZ_MULTICORE_HOST + REG_SIZE_TOPAZ_MULTICORE)
+
+#define REG_START_TOPAZ_DMAC_HOST	\
+	(REG_BASE_HOST + REG_OFFSET_TOPAZ_DMAC)
+#define REG_END_TOPAZ_DMAC_HOST		\
+	(REG_START_TOPAZ_DMAC_HOST + REG_SIZE_TOPAZ_DMAC)
+
+/* #define REG_START_TOPAZ_MTX_HOST(core)	\
+	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + \
+	REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_MTX_HOST) */
+/* #define REG_END_TOPAZ_MTX_HOST(core)	\
+	(REG_START_TOPAZ_MTX_HOST(core) + REG_SIZE_TOPAZ_MTX_HOST) */
+#define REG_START_TOPAZ_MTX_HOST	\
+	(REG_BASE_HOST + REG_OFFSET_TOPAZ_MTX)
+#define EG_END_TOPAZ_MTX_HOST(core)	\
+	(REG_START_TOPAZ_MTX_HOST + REG_SIZE_TOPAZ_MTX)
+
+
+#define REG_START_TOPAZ_TOPAZ_HOST(core)	\
+	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + \
+	REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_TOPAZ_HOST)
+#define REG_END_TOPAZ_TOPAZ_HOST(core)	\
+	(REG_START_TOPAZ_TOPAZ_HOST(core) + REG_SIZE_TOPAZ_TOPAZ_HOST)
+
+#define REG_START_TOPAZ_MVEA_HOST(core)	\
+	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + \
+	REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_MVEA_HOST)
+#define REG_END_TOPAZ_MVEA_HOST(core)	\
+	(REG_START_TOPAZ_MVEA_HOST(core) + REG_SIZE_TOPAZ_MVEA_HOST)
+
+
+/* Register bank addresses - MTX view */
+#define REG_START_TOPAZ_MULTICORE_MTX	\
+	(REG_BASE_MTX + REG_OFFSET_TOPAZ_MULTICORE)
+#define REG_END_TOPAZ_MULTICORE_MTX	\
+	(REG_START_TOPAZ_MULTICORE_MTX + REG_SIZE_TOPAZ_MULTICORE)
+
+#define REG_START_TOPAZ_DMAC_MTX	\
+	(REG_BASE_MTX + REG_OFFSET_TOPAZ_DMAC)
+#define REG_END_TOPAZ_DMAC_MTX		\
+	(REG_START_TOPAZ_DMAC_MTX + REG_SIZE_TOPAZ_DMAC)
+
+#define REG_START_TOPAZ_MTX_MTX(core)	\
+	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + \
+	REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MTX_MTX)
+#define REG_END_TOPAZ_MTX_MTX(core)	\
+	(REG_START_TOPAZ_MTX_MTX(core) + REG_SIZE_TOPAZ_MTX_MTX)
+
+#define REG_START_TOPAZ_TOPAZ_MTX(core)	\
+	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + \
+	REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_TOPAZ_MTX)
+#define REG_END_TOPAZ_TOPAZ_MTX(core)	\
+	(REG_START_TOPAZ_TOPAZ_MTX(core) + REG_SIZE_TOPAZ_TOPAZ_MTX)
+
+#define REG_START_TOPAZ_MVEA_MTX(core)	\
+	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + \
+	REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MVEA_MTX)
+#define REG_END_TOPAZ_MVEA_MTX(core)	\
+	(REG_START_TOPAZ_MVEA_MTX(core) + REG_SIZE_TOPAZ_MVEA_MTX)
+
+
+/* Every Topaz core has a 64K address space*/
+#define TOPAZ_CORE_REG_BASE(core) (REG_BASE_HOST + \
+	(REG_SIZE_TOPAZ_CORE_HOST*core) + REG_OFFSET_TOPAZ_CORE_HOST)
+
+/* MVEA macro */
+#define MVEA_START 0x03000
+
+#ifdef TOPAZ_PDUMP
+#define MVEA_WRITE32(offset, value, core) \
+	do { \
+		MM_WRITE32(MVEA_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_WT %x %x\n", \
+			core, offset, value); \
+	} while (0)
+#define MVEA_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(MVEA_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_RD %x %x\n", \
+			core, offset, *(uint32_t *)pointer);\
+	} while (0)
+#else
+#define MVEA_WRITE32(offset, value, core) \
+		MM_WRITE32(MVEA_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, value)
+
+#define MVEA_READ32(offset, pointer, core) \
+		MM_READ32(MVEA_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, pointer)
+#endif
+
+#define F_MASK_MVEA(basename)  (MASK_MVEA_##basename)	/*     MVEA    */
+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename)	/*     MVEA    */
+#define F_ENCODE_MVEA(val, basename)  \
+	(((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
+
+/* MVEA ESB macro */
+#define MVEA_ESB_START 0x08000
+
+#ifdef TOPAZ_PDUMP
+#define MVEA_ESB_WRITE32(offset, value, core) \
+	do { \
+		MM_WRITE32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core),\
+				offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_WT %x %x\n", \
+				core, offset, value); \
+	} while (0)
+#define MVEA_ESB_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core),\
+				offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MVEA core %d, REG_RD %x %x\n", \
+				core, offset, *(uint32_t *)pointer);\
+	} while (0)
+#else
+#define MVEA_ESB_WRITE32(offset, value, core) \
+		MM_WRITE32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, value)
+
+#define MVEA_ESB_READ32(offset, pointer, core) \
+		MM_READ32(MVEA_ESB_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, pointer)
+#endif
+
+
+/* VLC macro */
+#define TOPAZ_VLC_START 0x05000
+
+/* TopazHP Multicore Memory Map */
+#define TOPAZ_START 0x0000
+
+#ifdef TOPAZ_PDUMP
+#define FPGA_WRITE32(offset, value, core) \
+	do {\
+		MM_WRITE32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: TOPAZ_CORE %d REG_WT: %x %x\n", \
+			core, offset, value); \
+	} while (0)
+#define FPGA_READ32(offset, pointer, core) \
+	do { \
+		MM_READ32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: TOPAZ_CORE %d REG_RD: %x %x\n", \
+			core, offset, *(uint32_t *)pointer);\
+	} while (0)
+#else
+#define FPGA_WRITE32(offset, value, core) \
+		MM_WRITE32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, value)
+#define FPGA_READ32(offset, pointer, core) \
+		MM_READ32(TOPAZ_START + TOPAZ_CORE_REG_BASE(core), \
+			offset, pointer)
+#endif
+#define F_MASK_TOPAZ(basename)  (MASK_TOPAZ_##basename)
+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
+#define F_ENCODE_TOPAZ(val, basename) \
+	(((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
+/* DMAC macro */
+#define DMAC_START 0x01000
+
+#define F_MASK_DMAC(basename)  (MASK_DMAC_##basename)
+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
+#define F_ENCODE_DMAC(val, basename)  \
+	(((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
+
+/* Register CR_IMG_TOPAZ_INTENAB */
+#define TOPAZ_CR_IMG_TOPAZ_INTENAB  0x0008
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
+
+/*(Bit 3 enables fault interrupts caused by the topaz_cores. Bit 4 enables
+ * fault interrupts caused by the DMAC)*/
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000018
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
+
+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
+
+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT  0x0004
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
+
+#define MTX_CCBCTRL_ROFF		0
+#define MTX_CCBCTRL_COMPLETE		4
+#define MTX_CCBCTRL_CCBSIZE		8
+#define MTX_CCBCTRL_QP			12
+#define MTX_CCBCTRL_FRAMESKIP		20
+#define MTX_CCBCTRL_INITQP		24
+
+#define TOPAZ_CR_MMU_STATUS         0x001C
+#define MASK_TOPAZ_CR_MMU_PF_N_RW   0x00000001
+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW  0
+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
+
+#define TOPAZ_CR_MMU_MEM_REQ        0x0020
+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
+
+/* Register CR_TOPAZ_CMD_FIFO_2 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_2   0x005C
+#define MASK_TOPAZ_CR_CMD_FIFO_FLUSH 0x00000001
+#define SHIFT_TOPAZ_CR_CMD_FIFO_FLUSH 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_FLUSH 0x005C
+
+#define MTX_DATA_MEM_BASE		0x82880000
+#define MASK_MTX_MTX_MCMID          0x0FF00000
+#define SHIFT_MTX_MTX_MCMID         20
+#define REGNUM_MTX_MTX_MCMID        0x0108
+
+#define MASK_MTX_MTX_MCM_ADDR       0x000FFFFC
+#define SHIFT_MTX_MTX_MCM_ADDR      2
+#define REGNUM_MTX_MTX_MCM_ADDR     0x0108
+
+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
+#define MASK_MTX_MTX_MTX_MCM_STAT   0x00000001
+#define SHIFT_MTX_MTX_MTX_MCM_STAT  0
+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
+
+#define MASK_MTX_MTX_MCMAI          0x00000002
+#define SHIFT_MTX_MTX_MCMAI         1
+#define REGNUM_MTX_MTX_MCMAI        0x0108
+
+#define MVEA_CR_MVEA_BUSY           0x0018
+#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
+#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
+
+#define MVEA_CR_IMG_MVEA_SRST       0x0000
+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
+
+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
+
+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID  0x03C0
+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
+
+#define TOPAZ_MTX_PC		(0x00000005)
+
+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
+
+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
+
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
+#define	MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
+
+#define	TOPAZ_CORE_CR_MTX_DEBUG_OFFSET	0x00000044
+
+#define MASK_TOPAZ_CR_MTX_RAM_BANKS 0x00000F00
+#define SHIFT_TOPAZ_CR_MTX_RAM_BANKS 8
+#define REGNUM_TOPAZ_CR_MTX_RAM_BANKS 0x0044
+
+#define MASK_TOPAZ_CR_MTX_RAM_BANK_SIZE 0x000F0000
+#define SHIFT_TOPAZ_CR_MTX_RAM_BANK_SIZE 16
+#define REGNUM_TOPAZ_CR_MTX_RAM_BANK_SIZE 0x0044
+
+#define MASK_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 0x0F000000
+#define SHIFT_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 24
+#define REGNUM_TOPAZ_CR_MTX_LAST_RAM_BANK_SIZE 0x0044
+
+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
+
+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
+
+/* Register CR_MTX_RAM_ACCESS_DATA_EXCHANGE */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_EXCHANGE 0x0100
+/* Register CR_MTX_RAM_ACCESS_DATA_TRANSFER */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
+
+#define	MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
+#define MASK_MTX_MTX_MCMR           0x00000001
+#define SHIFT_MTX_MTX_MCMR          0
+#define REGNUM_MTX_MTX_MCMR         0x0108
+
+#define MASK_MTX_MTX_MCMAI          0x00000002
+#define SHIFT_MTX_MTX_MCMAI         1
+#define REGNUM_MTX_MTX_MCMAI        0x0108
+
+#define MASK_MTX_MTX_MCM_ADDR       0x000FFFFC
+#define SHIFT_MTX_MTX_MCM_ADDR      2
+#define REGNUM_MTX_MTX_MCM_ADDR     0x0108
+
+#define MASK_MTX_MTX_MCMID          0x0FF00000
+#define SHIFT_MTX_MTX_MCMID         20
+#define REGNUM_MTX_MTX_MCMID        0x0108
+
+#define TOPAZ_CR_MMU_CONTROL0       0x0024
+#define MASK_TOPAZ_CR_MMU_BYPASS_DMAC 0x00020000
+#define SHIFT_TOPAZ_CR_MMU_BYPASS_DMAC 17
+#define REGNUM_TOPAZ_CR_MMU_BYPASS_DMAC 0x0024
+
+#define MASK_TOPAZ_CR_MMU_BYPASS    0x00010000
+#define SHIFT_TOPAZ_CR_MMU_BYPASS   16
+#define REGNUM_TOPAZ_CR_MMU_BYPASS  0x0024
+
+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
+
+#define MASK_TOPAZ_CR_MMU_INVALDC   0x00000008
+#define SHIFT_TOPAZ_CR_MMU_INVALDC  3
+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
+
+#define MASK_TOPAZ_CR_MMU_FLUSH     0x00000004
+#define SHIFT_TOPAZ_CR_MMU_FLUSH    2
+#define REGNUM_TOPAZ_CR_MMU_FLUSH   0x0024
+
+#define TOPAZ_CR_MMU_CONTROL1       0x0028
+
+/* Register CR_MMU_BANK_INDEX */
+#define TOPAZ_CR_MMU_BANK_INDEX     0x0040
+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0040
+
+#define MASK_TOPAZ_CR_MMU_BANK_SELECT(i) (0x00000001 << (0 + ((i) * 1)))
+#define SHIFT_TOPAZ_CR_MMU_BANK_SELECT(i) (0 + ((i) * 1))
+#define REGNUM_TOPAZ_CR_MMU_BANK_SELECT(i) 0x0040
+
+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
+
+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
+#define TXRPT_WAITONKICK_VALUE 0x8ade0000
+
+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
+
+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
+#define	MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
+
+#define	MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
+#define	MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
+#define MASK_MTX_BURSTSIZE          0x07000000
+#define SHIFT_MTX_BURSTSIZE         24
+#define REGNUM_MTX_BURSTSIZE        0x0340
+
+#define MASK_MTX_RNW                0x00020000
+#define SHIFT_MTX_RNW               17
+#define REGNUM_MTX_RNW              0x0340
+
+#define MASK_MTX_ENABLE             0x00010000
+#define SHIFT_MTX_ENABLE            16
+#define REGNUM_MTX_ENABLE           0x0340
+
+#define MASK_MTX_LENGTH             0x0000FFFF
+#define SHIFT_MTX_LENGTH            0
+#define REGNUM_MTX_LENGTH           0x0340
+
+#define TOPAZ_CR_IMG_TOPAZ_SRST     0x0000
+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 0x00000004
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 2
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 0x0000
+#define SIGNED_TOPAZ_CR_IMG_TOPAZ_IO_SOFT_RESET 0
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 0x00000010
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 4
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 0x0000
+#define SIGNED_TOPAZ_CR_IMG_TOPAZ_DB_SOFT_RESET 0
+
+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
+
+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
+
+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
+
+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
+
+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
+
+/* Register CR_TOPAZ_HW_CFG */
+#define TOPAZ_CR_TOPAZ_HW_CFG       0x0050
+#define MASK_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0000001F
+#define SHIFT_TOPAZ_CR_NUM_CORES_SUPPORTED 0
+#define REGNUM_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0050
+
+/* Register CR_TOPAZ_CMD_FIFO_0 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_0   0x0054
+#define MASK_TOPAZ_CR_CMD_FIFO_RDATA 0xFFFFFFFF
+#define SHIFT_TOPAZ_CR_CMD_FIFO_RDATA 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_RDATA 0x0054
+
+/* Register CR_TOPAZ_CMD_FIFO_1 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_1   0x0058
+#define MASK_TOPAZ_CR_CMD_FIFO_QUANTITY 0x000000FF
+#define SHIFT_TOPAZ_CR_CMD_FIFO_QUANTITY 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_QUANTITY 0x0058
+
+#define MASK_TOPAZ_CR_CMD_FIFO_NOTEMPTY 0x00000100
+#define SHIFT_TOPAZ_CR_CMD_FIFO_NOTEMPTY 8
+#define REGNUM_TOPAZ_CR_CMD_FIFO_NOTEMPTY 0x0058
+
+/* Register CR_TOPAZ_CMD_FIFO_2 */
+#define TOPAZ_CR_TOPAZ_CMD_FIFO_2   0x005C
+#define MASK_TOPAZ_CR_CMD_FIFO_FLUSH 0x00000001
+#define SHIFT_TOPAZ_CR_CMD_FIFO_FLUSH 0
+#define REGNUM_TOPAZ_CR_CMD_FIFO_FLUSH 0x005C
+
+#define MASK_IMG_SOC_BSWAP          0x40000000
+#define SHIFT_IMG_SOC_BSWAP         30
+#define REGNUM_IMG_SOC_BSWAP        0x0004
+
+#define MASK_IMG_SOC_PW             0x18000000
+#define SHIFT_IMG_SOC_PW            27
+#define REGNUM_IMG_SOC_PW           0x0004
+
+#define MASK_IMG_SOC_DIR            0x04000000
+#define SHIFT_IMG_SOC_DIR           26
+#define REGNUM_IMG_SOC_DIR          0x0004
+
+#define MASK_IMG_SOC_PI             0x03000000
+#define SHIFT_IMG_SOC_PI            24
+#define REGNUM_IMG_SOC_PI           0x0004
+#define IMG_SOC_PI_1		0x00000002
+#define IMG_SOC_PI_2		0x00000001
+#define IMG_SOC_PI_4		0x00000000
+
+/* #define MASK_IMG_SOC_TRANSFER_IEN   0x20000000 */
+/* #define SHIFT_IMG_SOC_TRANSFER_IEN  29 */
+/* #define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004 */
+
+/* copy from topazscfwif.h */
+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
+	((((BSWAP)       << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP) | \
+	(((PW)          << SHIFT_IMG_SOC_PW)     & MASK_IMG_SOC_PW)    | \
+	(((DIR)         << SHIFT_IMG_SOC_DIR)    & MASK_IMG_SOC_DIR)   | \
+	(((PERIPH_INCR) << SHIFT_IMG_SOC_PI)     & MASK_IMG_SOC_PI)    | \
+	(((COUNT)       << SHIFT_IMG_SOC_CNT)    & MASK_IMG_SOC_CNT))
+
+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
+	((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL) | \
+	(((INCR)    << SHIFT_IMG_SOC_INCR)    & MASK_IMG_SOC_INCR) | \
+	(((BURST)   << SHIFT_IMG_SOC_BURST)   & MASK_IMG_SOC_BURST))
+
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
+#define MASK_IMG_SOC_ADDR           0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR          0
+#define REGNUM_IMG_SOC_ADDR         0x0014
+
+#define SHIFT_TOPAZ_VEC_BUSY        11
+#define MASK_TOPAZ_VEC_BUSY         (0x1<<SHIFT_TOPAZ_VEC_BUSY)
+
+#define TOPAZ_MTX_TXRPT_OFFSET         0xc
+#define TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET 0x20D0
+
+#define TOPAZ_GUNIT_READ32(offset)  ioread32(dev_priv->vdc_reg + offset)
+#define TOPAZ_READ_BITS(val, basename) \
+		(((val)&MASK_TOPAZ_##basename)>>SHIFT_TOPAZ_##basename)
+
+#define TOPAZ_WAIT_UNTIL_IDLE \
+	do { \
+		uint8_t tmp_poll_number = 0;\
+		uint32_t tmp_reg; \
+		if (topaz_priv->topaz_cmd_windex == \
+			WB_CCB_CTRL_RINDEX(dev_priv)) { \
+			tmp_reg = TOPAZ_GUNIT_READ32( \
+					TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET); \
+		if (0 != TOPAZ_READ_BITS(tmp_reg, VEC_BUSY)) { \
+			MTX_READ32(TOPAZ_MTX_TXRPT_OFFSET, &tmp_reg);\
+			while ((tmp_reg != 0x8ade0000) && \
+				(tmp_poll_number++ < 10)) \
+				MTX_READ32(0xc, &tmp_reg); \
+				PSB_DEBUG_GENERAL( \
+					"TOPAZ: TXRPT reg remain: %x," \
+					"poll %d times.\n", \
+					tmp_reg, tmp_poll_number);\
+			} \
+		} \
+	} while (0)
+
+/* Register CR_BUFFER_SIDEBAND */
+#define MVEA_CR_BUFFER_SIDEBAND     0x017C
+#define MASK_MVEA_CR_CURR_MB_SBAND  0x00000003
+#define SHIFT_MVEA_CR_CURR_MB_SBAND 0
+#define REGNUM_MVEA_CR_CURR_MB_SBAND 0x017C
+
+#define MASK_MVEA_CR_ABOVE_PIX_IN_SBAND 0x0000000C
+#define SHIFT_MVEA_CR_ABOVE_PIX_IN_SBAND 2
+#define REGNUM_MVEA_CR_ABOVE_PIX_IN_SBAND 0x017C
+
+#define MASK_MVEA_CR_CURR_PARAM_SBAND 0x00000030
+#define SHIFT_MVEA_CR_CURR_PARAM_SBAND 4
+#define REGNUM_MVEA_CR_CURR_PARAM_SBAND 0x017C
+
+#define MASK_MVEA_CR_BELOW_PARAM_IN_SBAND 0x000000C0
+#define SHIFT_MVEA_CR_BELOW_PARAM_IN_SBAND 6
+#define REGNUM_MVEA_CR_BELOW_PARAM_IN_SBAND 0x017C
+
+#define MASK_MVEA_CR_ABOVE_PARAM_IN_SBAND 0x00000300
+#define SHIFT_MVEA_CR_ABOVE_PARAM_IN_SBAND 8
+#define REGNUM_MVEA_CR_ABOVE_PARAM_IN_SBAND 0x017C
+
+#define MASK_MVEA_CR_REF_SBAND      0x00000C00
+#define SHIFT_MVEA_CR_REF_SBAND     10
+#define REGNUM_MVEA_CR_REF_SBAND    0x017C
+
+#define MASK_MVEA_CR_RECON_SBAND    0x00003000
+#define SHIFT_MVEA_CR_RECON_SBAND   12
+#define REGNUM_MVEA_CR_RECON_SBAND  0x017C
+
+#define MASK_MVEA_CR_ABOVE_PIX_OUT_SBAND 0x0000C000
+#define SHIFT_MVEA_CR_ABOVE_PIX_OUT_SBAND 14
+#define REGNUM_MVEA_CR_ABOVE_PIX_OUT_SBAND 0x017C
+
+#define MASK_MVEA_CR_BELOW_PARAM_OUT_SBAND 0x00030000
+#define SHIFT_MVEA_CR_BELOW_PARAM_OUT_SBAND 16
+#define REGNUM_MVEA_CR_BELOW_PARAM_OUT_SBAND 0x017C
+
+#define MASK_MVEA_CR_ABOVE_PARAM_OUT_SBAND 0x000C0000
+#define SHIFT_MVEA_CR_ABOVE_PARAM_OUT_SBAND 18
+#define REGNUM_MVEA_CR_ABOVE_PARAM_OUT_SBAND 0x017C
+
+/* Register CR_IPE_JITTER_FACTOR */
+#define MVEA_CR_IPE_JITTER_FACTOR   0x0218
+#define MASK_MVEA_CR_IPE_JITTER_FACTOR 0x00000003
+#define SHIFT_MVEA_CR_IPE_JITTER_FACTOR 0
+#define REGNUM_MVEA_CR_IPE_JITTER_FACTOR 0x0218
+
+/* Register CR_MULTICORE_INT_STAT */
+#define TOPAZSC_CR_MULTICORE_INT_STAT 0x0000
+#define MASK_TOPAZSC_CR_INT_STAT_DMAC 0x80000000
+#define SHIFT_TOPAZSC_CR_INT_STAT_DMAC 31
+#define REGNUM_TOPAZSC_CR_INT_STAT_DMAC 0x0000
+
+#define MASK_TOPAZSC_CR_INT_STAT_CORES 0x7FFFFFFF
+#define SHIFT_TOPAZSC_CR_INT_STAT_CORES 0
+#define REGNUM_TOPAZSC_CR_INT_STAT_CORES 0x0000
+
+/* Register CR_MULTICORE_CORE_SEL_0 */
+#define TOPAZSC_CR_MULTICORE_CORE_SEL_0 0x0004
+#define MASK_TOPAZSC_CR_DMAC_CORE_SELECT 0x0000000F
+#define SHIFT_TOPAZSC_CR_DMAC_CORE_SELECT 0
+#define REGNUM_TOPAZSC_CR_DMAC_CORE_SELECT 0x0004
+
+#define MASK_TOPAZSC_CR_WRITES_CORE_ALL 0x80000000
+#define SHIFT_TOPAZSC_CR_WRITES_CORE_ALL 31
+#define REGNUM_TOPAZSC_CR_WRITES_CORE_ALL 0x0004
+
+/* Register CR_MULTICORE_CORE_SEL_1 */
+#define TOPAZSC_CR_MULTICORE_CORE_SEL_1 0x0008
+#define MASK_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0x0000000F
+#define SHIFT_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0
+#define REGNUM_TOPAZSC_CR_RTM_PORT_CORE_SELECT 0x0008
+
+/* Register CR_MULTICORE_RSVD0 */
+#define TOPAZSC_CR_MULTICORE_RSVD0  0x0010
+#define MASK_TOPAZSC_CR_RESERVED0   0xFFFFFFFF
+#define SHIFT_TOPAZSC_CR_RESERVED0  0
+#define REGNUM_TOPAZSC_CR_RESERVED0 0x0010
+
+/* Register CR_MULTICORE_CMD_FIFO_0 */
+#define TOPAZSC_CR_MULTICORE_CMD_FIFO_0 0x0014
+#define MASK_TOPAZSC_CR_CMD_FIFO_WDATA 0xFFFFFFFF
+#define SHIFT_TOPAZSC_CR_CMD_FIFO_WDATA 0
+#define REGNUM_TOPAZSC_CR_CMD_FIFO_WDATA 0x0014
+
+/* Register CR_MULTICORE_CMD_FIFO_1 */
+#define TOPAZSC_CR_MULTICORE_CMD_FIFO_1 0x0018
+#define MASK_TOPAZSC_CR_CMD_FIFO_SPACE 0x000000FF
+#define SHIFT_TOPAZSC_CR_CMD_FIFO_SPACE 0
+#define REGNUM_TOPAZSC_CR_CMD_FIFO_SPACE 0x0018
+
+#define MASK_TOPAZSC_CR_CMD_FIFO_FULL 0x00000100
+#define SHIFT_TOPAZSC_CR_CMD_FIFO_FULL 8
+#define REGNUM_TOPAZSC_CR_CMD_FIFO_FULL 0x0018
+
+/* Register CR_MULTICORE_IDLE_PWR_MAN */
+#define TOPAZSC_CR_MULTICORE_IDLE_PWR_MAN 0x001C
+#define MASK_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0x00000001
+#define SHIFT_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0
+#define REGNUM_TOPAZSC_CR_TOPAZ_IDLE_DISABLE 0x001C
+
+/* Register CR_CMC_PROC_ESB_ACCESS */
+#define MVEA_CR_CMC_PROC_ESB_ACCESS 0x011C
+#define MASK_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0x0000001F
+#define SHIFT_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0
+#define REGNUM_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0x011C
+#define SIGNED_MVEA_CR_CMC_PROC_ESB_REGION_NUMBER 0
+
+
+/* Table CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE */
+
+/* Register CR_CMC_ESB_LOGICAL_REGION_SETUP */
+#define MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP(X) (0x0080 + (4 * (X)))
+#define MASK_MVEA_CR_CMC_ESB_REGION_VALID 0x80000000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_VALID 31
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_VALID 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_VALID 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_TYPE 0x60000000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_TYPE 29
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_TYPE 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_TYPE 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 0x00F00000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 20
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 0x000F0000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 16
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_LOGICAL_OFFSET_X 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 0x0000F000
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 12
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 0x00000F00
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 8
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 0x000000F0
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 4
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y 0
+
+#define MASK_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0x0000000F
+#define SHIFT_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0
+#define REGNUM_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0x0080
+#define SIGNED_MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X 0
+
+
+/* Bit 31:16 is the count of remaining items to be transferred */
+/* Bit 0 is 1 if transfer State == 'do_nothing'*/
+#define MTX_CR_MTX_SYSC_CDMAS0      0x0348
+
+/* Bit 25:2 is the current core DMA transfer address.*/
+#define MTX_CR_MTX_SYSC_CDMAS1      0x034C
+
+/* Number of entries in table CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE */
+
+#define MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE_SIZE_UINT32 32
+#define MVEA_CR_CMC_ESB_LOGICAL_REGION_SETUP_TABLE_NUM_ENTRIES 32
+
+#define ESB_HWSYNC      10
+#define ESB_POS_MANGLER_ORIGINX(x) ((x) >> 3)
+#define ESB_POS_MANGLER(x) ((x) >> 3)
+
+#define ESB_SIZE_MANGLER(x) (((x) >> 3) - 1)
+#define REGION_TYPE_LINEAR 0
+
+#define TOPAZSC_ESB_REGION_Y_MAX		(46)
+#define TOPAZSC_ESB_REGION_X_MAX		(64)
+
+#define TOPAZSC_ESB_REGION_HEIGH		(48)
+#define TOPAZSC_ESB_REGION_WIDTH		(64)
+
+
+#define REG_OFFSET_COMMS_CORE_HOST	0x00070000
+#define REG_SIZE_COMMS_CORE_HOST	0x00070000
+#define REG_OFFSET_TOPAZ_COMMS_HOST	0x00007000
+#define REG_SIZE_TOPAZ_COMMS_HOST	0x00001000
+#define TOPAZ_COMMS_START 0x07000
+/* Register CR_STAT_1 */
+#define TOPAZ_COMMS_CR_STAT_1(X)    (0x0018 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_STAT_DATA_1 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_STAT_DATA_1 0
+#define REGNUM_TOPAZ_COMMS_CR_STAT_DATA_1 0x0018
+
+/* Register CR_STAT_0 */
+#define TOPAZ_COMMS_CR_STAT_0(X)    (0x0014 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_STAT_DATA_0 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_STAT_DATA_0 0
+#define REGNUM_TOPAZ_COMMS_CR_STAT_DATA_0 0x0014
+
+/* Register CR_MTX_STATUS */
+#define TOPAZ_COMMS_CR_MTX_STATUS(X) (0x0010 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_FLAGS_WORD 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_FLAGS_WORD 0
+#define REGNUM_TOPAZ_COMMS_FLAGS_WORD 0x0010
+
+/* Register CR_CMD_WB_VAL */
+#define TOPAZ_COMMS_CR_CMD_WB_VAL(X) (0x000C + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_WB_VAL  0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_WB_VAL 0
+#define REGNUM_TOPAZ_COMMS_CR_WB_VAL 0x000C
+
+/* Register CR_CMD_WB_ADDR */
+#define TOPAZ_COMMS_CR_CMD_WB_ADDR(X) (0x0008 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_WB_ADDR 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_WB_ADDR 0
+#define REGNUM_TOPAZ_COMMS_CR_WB_ADDR 0x0008
+
+/* Register CR_CMD_DATA_ADDR */
+#define TOPAZ_COMMS_CR_CMD_DATA_ADDR(X) (0x0004 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_DATA_ADDR 0xFFFFFFFF
+#define SHIFT_TOPAZ_COMMS_CR_DATA_ADDR 0
+#define REGNUM_TOPAZ_COMMS_CR_DATA_ADDR 0x0004
+/* Register CR_TOPAZ_HW_CFG */
+#define TOPAZ_CR_TOPAZ_HW_CFG       0x0050
+#define MASK_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0000001F
+#define SHIFT_TOPAZ_CR_NUM_CORES_SUPPORTED 0
+#define REGNUM_TOPAZ_CR_NUM_CORES_SUPPORTED 0x0050
+/* Register CR_CMD_WORD */
+#define TOPAZ_COMMS_CR_CMD_WORD(X)  (0x0000 + (192 * (X)))
+#define MASK_TOPAZ_COMMS_CR_MTX_CMD_ID 0x0000007F
+#define SHIFT_TOPAZ_COMMS_CR_MTX_CMD_ID 0
+#define REGNUM_TOPAZ_COMMS_CR_MTX_CMD_ID 0x0000
+
+#define COMMS_WRITE32(offset, value, core) \
+		MM_WRITE32(TOPAZ_COMMS_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, value)
+#define COMMS_READ32(offset, pointer, core) \
+		MM_READ32(TOPAZ_COMMS_START + TOPAZ_CORE_REG_BASE(core), \
+				offset, pointer)
+
+#define TOPAZ_CORE_NUMBER_SET_OFFSET (0x100 + (2 << 2))
+
+#define REG_SIZE_COMMS_CORE_HOST	0x00070000
+#define MVEASETUPESBREGION(_OriginX_, _OriginY_, _PhysWidth_, \
+		_PhysHeight_, _LogWidth_, _RegType_) \
+	((F_ENCODE(ESB_POS_MANGLER_ORIGINX(_OriginX_), \
+	MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_X)) | `\
+		(F_ENCODE(ESB_POS_MANGLER(_OriginY_), \
+	MVEA_CR_CMC_ESB_REGION_PHYS_ORIGIN_Y)) | \
+		(F_ENCODE(ESB_SIZE_MANGLER(_PhysWidth_), \
+	MVEA_CR_CMC_ESB_REGION_PHYS_WIDTH)) | \
+		(F_ENCODE(ESB_SIZE_MANGLER(_PhysHeight_), \
+	MVEA_CR_CMC_ESB_REGION_PHYS_HEIGHT)) | \
+		(F_ENCODE(ESB_SIZE_MANGLER(_LogWidth_),\
+	MVEA_CR_CMC_ESB_REGION_LOGICAL_WIDTH)) | \
+		(F_ENCODE(_RegType_, MVEA_CR_CMC_ESB_REGION_TYPE)) | \
+		(F_ENCODE(1, MVEA_CR_CMC_ESB_REGION_VALID)))
+
+
+#define TOPAZ_MULTICORE_START		0x00000000
+
+#ifdef TOPAZ_PDUMP
+#define TOPAZ_MULTICORE_WRITE32(offset, value) \
+	do { \
+		MM_WRITE32(TOPAZ_MULTICORE_START, offset, value); \
+		DRM_ERROR("TOPAZ_PDUMP: MULTICORE, REG_WT %x %x\n", \
+			offset, value);\
+	} while (0)
+
+#define TOPAZ_MULTICORE_READ32(offset, pointer) \
+	do { \
+		MM_READ32(TOPAZ_MULTICORE_START, offset, pointer); \
+		DRM_ERROR("TOPAZ_PDUMP: MULTICORE, REG_RD %x %x\n", \
+			offset, *(uint32_t *)pointer); \
+	} while (0)
+#else
+#define TOPAZ_MULTICORE_WRITE32(offset, value) \
+	MM_WRITE32(TOPAZ_MULTICORE_START, offset, value)
+#define TOPAZ_MULTICORE_READ32(offset, pointer) \
+	MM_READ32(TOPAZ_MULTICORE_START, offset, pointer)
+#endif
+
+#define MTX_DMA_BURSTSIZE_BYTES 32
+#define MTX_DMA_ALIGNMENT_BYTES 16
+
+#define MTX_DMA_MEMORY_BASE (0x82880000)
+#define PC_START_ADDRESS    (0x80900000)
+
+#define MAX_TOPAZ_CMD_COUNT	(0x1000) /* max syncStatus value used*/
+
+#define ALIGN_64(X) (((X)+63) & ~63)
+#define IMG_BEST_MULTIPASS_MB_TYPE_SHIFT (24)
+#define IMG_BEST_MULTIPASS_MB_TYPE_MASK (0x03 << IMG_BEST_MULTIPASS_MB_TYPE_SHIFT)
+#define ESF_MP_BEST_MOTION_VECTOR_STATS 4
+#define IMG_BEST_MULTIPASS_SAD_MASK (0xFFFFFF)
+
+#define MTX_WRITEBACK_DATASIZE_ROUND 4
+
+#define TOPAZ_MTX_WB_READ32(base, core, word, pointer) \
+	do { \
+		*(uint32_t *)(pointer) = *((uint32_t *)(base) + \
+		(core)*MTX_WRITEBACK_DATASIZE_ROUND + (word)); \
+	} while (0)
+
+#define TOPAZ_MTX_WB_WRITE32(base, core, word, value) \
+	do { \
+		*((uint32_t *)(base) + \
+		(core)*MTX_WRITEBACK_DATASIZE_ROUND + (word)) \
+		= value; \
+	} while (0)
+
+
+#define TOPAZ_MTX_WB_OFFSET(base, core) \
+	((base) + (core)*MTX_WRITEBACK_DATASIZE_ROUND*4)
+
+#define POLL_TOPAZ_FREE_FIFO_SPACE(word_num, delay, retries, pointer) \
+	do { \
+		uint32_t free_space = 0, i; \
+		for (i = 0; i < retries; i++) { \
+			MULTICORE_READ32( \
+			TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE_SPACE, \
+			&free_space); \
+			free_space &= MASK_TOPAZHP_TOP_CR_CMD_FIFO_SPACE; \
+			if (free_space >= word_num) \
+				break; \
+			else \
+				PSB_UDELAY(delay); \
+		} \
+		if (i >= retries) { \
+			ret  = -1; \
+			DRM_ERROR("TOPAZ: poll FIFO free space failed " \
+				"(%d words free)!\n", free_space); \
+		} else \
+			ret = 0; \
+		*pointer = free_space; \
+	} while (0)
+
+/* **************** DMAC define **************** */
+enum  DMAC_eBSwap {
+	DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
+	DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
+};
+enum DMAC_eAccDel {
+	DMAC_ACC_DEL_0 = 0x0,	/* !< Access delay zero clock cycles */
+	DMAC_ACC_DEL_256 = 0x1,	/* !< Access delay 256 clock cycles */
+	DMAC_ACC_DEL_512 = 0x2,	/* !< Access delay 512 clock cycles */
+	DMAC_ACC_DEL_768 = 0x3,	/* !< Access delay 768 clock cycles */
+	DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
+	DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
+	DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
+	DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
+};
+
+enum  DMAC_eBurst {
+	DMAC_BURST_0 = 0x0,	/* !< burst size of 0 */
+	DMAC_BURST_1 = 0x1,	/* !< burst size of 1 */
+	DMAC_BURST_2 = 0x2,	/* !< burst size of 2 */
+	DMAC_BURST_3 = 0x3,	/* !< burst size of 3 */
+	DMAC_BURST_4 = 0x4,	/* !< burst size of 4 */
+	DMAC_BURST_5 = 0x5,	/* !< burst size of 5 */
+	DMAC_BURST_6 = 0x6,     /* !< burst size of 6 */
+	DMAC_BURST_7 = 0x7,	/* !< burst size of 7 */
+};
+
+/* commands for topaz,shared with user space driver */
+enum drm_tng_topaz_cmd {
+	MTX_CMDID_NULL = 0,
+	MTX_CMDID_SHUTDOWN = 1,
+	MTX_CMDID_DO_HEADER = 2,
+	MTX_CMDID_ENCODE_FRAME = 3,
+	MTX_CMDID_START_FRAME = 4,
+	MTX_CMDID_ENCODE_SLICE = 5,
+	MTX_CMDID_END_FRAME = 6,
+	MTX_CMDID_SETVIDEO = 7,
+	MTX_CMDID_GETVIDEO = 8,
+	MTX_CMDID_DO_CHANGE_PIPEWORK = 9,
+	MTX_CMDID_PICMGMT = 0xa,
+	MTX_CMDID_RC_UPDATE = 0xb,
+	MTX_CMDID_PROVIDE_SOURCE_BUFFER = 0xc,
+	MTX_CMDID_PROVIDE_REF_BUFFER = 0xd,
+	MTX_CMDID_PROVIDE_CODED_BUFFER = 0xe,
+	MTX_CMDID_ABORT = 0xf,
+
+	/*JPEG commands*/
+	MTX_CMDID_SETQUANT = 0x10,
+	MTX_CMDID_SETUP_INTERFACE = 0x11,
+	MTX_CMDID_ISSUEBUFF = 0x12,
+	MTX_CMDID_SETUP = 0x13,
+
+	MTX_CMDID_PAD = 0x7a, /*Will be ignored*/
+	MTX_CMDID_SW_WRITEREG = 0x7b,
+	MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
+	MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
+	MTX_CMDID_SW_NEW_CODEC = 0x7f,
+	MTX_CMDID_SW_FILL_INPUT_CTRL = 0x81,
+	MTX_CMDID_SW_UPDATE_AIR_SEND = 0x82,
+	MTX_CMDID_SW_AIR_BUF_CLEAR = 0x83,
+	MTX_CMDID_SW_UPDATE_AIR_CALC = 0x84
+};
+
+/* codecs topaz supports,shared with user space driver */
+enum drm_tng_topaz_codec {
+	IMG_CODEC_JPEG = 0,    /* !< JPEG */
+	IMG_CODEC_H264_NO_RC,  /* !< H264 with no rate control */
+	IMG_CODEC_H264_VBR,    /* !< H264 variable bitrate */
+	IMG_CODEC_H264_CBR,    /* !< H264 constant bitrate */
+	IMG_CODEC_H264_VCM,    /* !< H264 video conferance mode */
+	IMG_CODEC_H264_LLRC,   /* !< H264 low-latency rate control */
+	IMG_CODEC_H264_ALL_RC, /* !< H264 with multiple rate control modes */
+	IMG_CODEC_H263_NO_RC,  /* !< H263 with no rate control */
+	IMG_CODEC_H263_VBR,    /* !< H263 variable bitrate */
+	IMG_CODEC_H263_CBR,    /* !< H263 constant bitrate */
+	IMG_CODEC_MPEG4_NO_RC, /* !< MPEG4 with no rate control */
+	IMG_CODEC_MPEG4_VBR,   /* !< MPEG4 variable bitrate */
+	IMG_CODEC_MPEG4_CBR,   /* !< MPEG4 constant bitrate */
+	IMG_CODEC_MPEG2_NO_RC, /* !< MPEG2 with no rate control */
+	IMG_CODEC_MPEG2_VBR,   /* !< MPEG2 variable bitrate */
+	IMG_CODEC_MPEG2_CBR,   /* !< MPEG2 constant bitrate */
+	IMG_CODEC_H264MVC_NO_RC, /* !< MVC H264 with no rate control */
+	IMG_CODEC_H264MVC_CBR, /* !< MVC H264 constant bitrate */
+	IMG_CODEC_H264MVC_VBR, /* !< MVC H264 variable bitrate */
+	IMG_CODEC_NUM
+};
+
+enum drm_tng_topaz_standard {
+	IMG_STANDARD_NONE = 0, /* !< There is no FW in MTX memory */
+	IMG_STANDARD_JPEG,     /* !< JPEG */
+	IMG_STANDARD_H264,     /* !< H264 with no rate control */
+	IMG_STANDARD_H263,     /* !< H263 with no rate control */
+	IMG_STANDARD_MPEG4,    /* !< MPEG4 with no rate control */
+	IMG_STANDARD_MPEG2     /* !< MPEG2 with no rate control */
+};
+
+enum MTX_eWriteBackData {
+	/* !< Command word of command executed by MTX */
+	MTX_WRITEBACK_CMDWORD = 0,
+	/* !< Writeback value returned by command */
+	MTX_WRITEBACK_VALUE = 1,
+	/* !< Flags word indicating MTX status (see MTX writeback flags) */
+	MTX_WRITEBACK_FLAGSWORD_0 = 2,
+	/* !< number of bits written out by this core */
+	MTX_WRITEBACK_BITSWRITTEN = 3,
+	/* !< End marker for enum */
+	MTX_WRITEBACK_DATASIZE
+};
+
+typedef struct
+{
+	uint32_t ui32SAD_Intra_MBInfo;		//!< SATD/SAD for best Intra candidate (24-bit unsigned value) plus 8 bit field containing MB info
+	uint32_t ui32SAD_Inter_MBInfo;		//!< SATD/SAD for best Inter candidate (24-bit unsigned value) plus 8 bit field containing MB info
+	uint32_t ui32SAD_Direct_MBInfo;	//!< SATD/SAD for best Direct candidate (24-bit unsigned value) plus 8 bit field containing MB info
+	uint32_t ui32Reserved;
+} IMG_BEST_MULTIPASS_MB_PARAMS, *P_IMG_BEST_MULTIPASS_MB_PARAMS;
+
+typedef struct
+{
+	uint16_t ui16MV4_0_X;       //!< MV4_0_X (this is also MV8_0_X if block size is 8x8, or MV16_X if block size is 16x16)
+	uint16_t ui16MV4_0_Y;       //!< MV4_0_Y (this is also MV8_0_Y if block size is 8x8, or MV16_Y if block size is 16x16)
+	uint16_t ui16MV4_1_X;       //!< MV4_1_X
+	uint16_t ui16MV4_1_Y;       //!< MV4_1_Y
+	uint16_t ui16MV4_2_X;       //!< MV4_2_X
+	uint16_t ui16MV4_2_Y;       //!< MV4_2_Y
+	uint16_t ui16MV4_3_X;       //!< MV4_3_X
+	uint16_t ui16MV4_3_Y;       //!< MV4_3_Y
+
+	uint16_t ui16MV4_4_X;       //!< MV4_4_X (this is also MV8_1_X if block size is 8x8, or 2nd MV if block size is 8x16)
+	uint16_t ui16MV4_4_Y;       //!< MV4_4_Y (this is also MV8_1_Y if block size is 8x8, or 2nd MV if block size is 8x16)
+	uint16_t ui16MV4_5_X;       //!< MV4_5_X
+	uint16_t ui16MV4_5_Y;       //!< MV4_5_Y
+	uint16_t ui16MV4_6_X;       //!< MV4_6_X
+	uint16_t ui16MV4_6_Y;       //!< MV4_6_Y
+	uint16_t ui16MV4_7_X;       //!< MV4_7_X
+	uint16_t ui16MV4_7_Y;       //!< MV4_7_Y
+
+	uint16_t ui16MV4_8_X;       //!< MV4_8_X (this is also MV8_2_X if block size is 8x8, or 2nd MV if block size is 16x8)
+	uint16_t ui16MV4_8_Y;       //!< MV4_8_Y (this is also MV8_2_Y if block size is 8x8, or 2nd MV if block size is 16x8)
+	uint16_t ui16MV4_9_X;       //!< MV4_9_X
+	uint16_t ui16MV4_9_Y;       //!< MV4_9_Y
+	uint16_t ui16MV4_10_X;     //!< MV4_10_X
+	uint16_t ui16MV4_10_Y;     //!< MV4_10_Y
+	uint16_t ui16MV4_11_X;     //!< MV4_11_X
+	uint16_t ui16MV4_11_Y;     //!< MV4_11_Y
+
+	uint16_t ui16MV4_12_X;       //!< MV4_12_X (this is also MV8_3_X if block size is 8x8)
+	uint16_t ui16MV4_12_Y;       //!< MV4_12_Y (this is also MV8_3_Y if block size is 8x8)
+	uint16_t ui16MV4_13_X;       //!< MV4_13_X
+	uint16_t ui16MV4_13_Y;       //!< MV4_13_Y
+	uint16_t ui16MV4_14_X;       //!< MV4_14_X
+	uint16_t ui16MV4_14_Y;       //!< MV4_14_Y
+	uint16_t ui16MV4_15_X;       //!< MV4_15_X
+	uint16_t ui16MV4_15_Y;       //!< MV4_15_Y
+} IMG_BEST_MULTIPASS_MB_PARAMS_IPMV, *P_IMG_BEST_MULTIPASS_MB_PARAMS_IPMV;
+
+typedef struct _IMG_FIRST_STAGE_MB_PARAMS {
+	uint16_t ui16Ipe0Sad;        //!< Final SAD value for best candidate calculated by IPE 0
+	uint16_t ui16Ipe1Sad;        //!< Final SAD value for best candidate calculated by IPE 1
+	uint8_t ui8Ipe0Blks;        //!< Block dimentions for IPE 0 for this Macro-Block
+	uint8_t ui8Ipe1Blks;        //!< Block dimentions for IPE 1 for this Macro-Block
+	uint8_t ui8CARCCmplxVal;    //!< CARC complexity value for this macroblock
+	uint8_t ui8dummy;           //!< Reserved (added for alignment).
+} IMG_FIRST_STAGE_MB_PARAMS, *P_IMG_FIRST_STAGE_MB_PARAMS;
+
+int tng_topaz_reset(struct drm_psb_private *dev_priv);
+int tng_topaz_init_fw(struct drm_device *dev);
+
+int tng_topaz_init_board(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec);
+
+int tng_topaz_setup_fw(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec);
+
+int tng_topaz_fw_run(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec);
+
+int tng_topaz_wait_for_register(
+	struct drm_psb_private *dev_priv,
+	uint32_t checkfunc,
+	uint32_t addr, uint32_t value,
+	uint32_t enable);
+void tng_topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
+
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
+int pnw_video_get_core_num(struct drm_device *dev, uint64_t user_pointer);
+int pnw_topaz_kick_null_cmd(
+	struct drm_psb_private *dev_priv,
+	uint32_t core_id, uint32_t wb_offset,
+	uint32_t sync_req, uint8_t irq_enable);
+int pnw_wait_on_sync(
+	struct drm_psb_private *dev_priv,
+	uint32_t sync_seq,
+	uint32_t *sync_p);
+
+int pnw_video_frameskip(struct drm_device *dev, uint64_t user_pointer);
+
+static inline char *cmd_to_string(int cmd_id)
+{
+	switch (cmd_id) {
+	case MTX_CMDID_DO_HEADER:
+		return "MTX_CMDID_DO_HEADER";
+	case MTX_CMDID_ENCODE_FRAME:
+		return "MTX_CMDID_ENCODE_FRAME";
+	case MTX_CMDID_SETVIDEO:
+		return "MTX_CMDID_SETVIDEO";
+	case MTX_CMDID_GETVIDEO:
+		return "MTX_CMDID_GETVIDEO";
+	case MTX_CMDID_PICMGMT:
+		return "MTX_CMDID_PICMGMT";
+	case MTX_CMDID_SW_NEW_CODEC:
+		return "MTX_CMDID_SW_NEW_CODEC";
+	case MTX_CMDID_SETQUANT:
+		return "MTX_CMDID_SETQUANT";
+	case MTX_CMDID_SETUP_INTERFACE:
+		return "MTX_CMDID_SETUP_INTERFACE";
+	case MTX_CMDID_ISSUEBUFF:
+		return "MTX_CMDID_ISSUEBUFF";
+	case MTX_CMDID_SETUP:
+		return "MTX_CMDID_SETUP";
+	case MTX_CMDID_SW_WRITEREG:
+		return "MTX_CMDID_SW_WRITEREG";
+	case (MTX_CMDID_PICMGMT | MTX_CMDID_PRIORITY):
+		return "MTX_CMDID_PICMGMT | MTX_CMDID_PRIORITY";
+	case MTX_CMDID_SHUTDOWN:
+		return "MTX_CMDID_SHUTDOWN";
+	case MTX_CMDID_SW_LEAVE_LOWPOWER:
+		return "MTX_CMDID_SW_LEAVE_LOWPOWER";
+	case MTX_CMDID_START_FRAME:
+		return "MTX_CMDID_START_FRAME";
+	case MTX_CMDID_ENCODE_SLICE:
+		return "MTX_CMDID_ENCODE_SLICE";
+	case MTX_CMDID_END_FRAME:
+		return "MTX_CMDID_END_FRAME";
+	case MTX_CMDID_RC_UPDATE:
+		return "MTX_CMDID_RC_UPDATE";
+	case MTX_CMDID_PROVIDE_SOURCE_BUFFER:
+		return "MTX_CMDID_PROVIDE_SOURCE_BUFFER";
+	case MTX_CMDID_PROVIDE_REF_BUFFER:
+		return "MTX_CMDID_PROVIDE_REF_BUFFER";
+	case MTX_CMDID_PROVIDE_CODED_BUFFER:
+		return "MTX_CMDID_PROVIDE_CODED_BUFFER";
+	case MTX_CMDID_NULL:
+		return "MTX_CMDID_NULL";
+	case MTX_CMDID_SW_FILL_INPUT_CTRL:
+		return "MTX_CMDID_SW_FILL_INPUT_CTRL";
+	case MTX_CMDID_SW_UPDATE_AIR_SEND:
+		return "MTX_CMDID_SW_UPDATE_AIR_SEND";
+	default:
+		DRM_ERROR("Command ID: %08x\n", cmd_id);
+		return "Undefined command";
+
+	}
+}
+
+static inline char *codec_to_string(int codec)
+{
+	switch (codec) {
+	/* Just guess, is JPEG firmware included in topaz_bin? */
+	case IMG_CODEC_JPEG:
+		return "JPEG";
+	case IMG_CODEC_H264_NO_RC:
+		return "H264_NO_RC";
+	case IMG_CODEC_H264_VBR:
+		return "H264_VBR";
+	case IMG_CODEC_H264_CBR:
+		return "H264_CBR";
+	case IMG_CODEC_H264_VCM:
+		return "H264_VCM";
+	case IMG_CODEC_H264_LLRC:
+		return "H264_LLRC";
+	case IMG_CODEC_H264_ALL_RC:
+		return "H264_ALL_RC";
+	case IMG_CODEC_H263_NO_RC:
+		return "H263_NO_RC";
+	case IMG_CODEC_H263_VBR:
+		return "H263_VBR";
+	case IMG_CODEC_H263_CBR:
+		return "H263_CBR";
+	case IMG_CODEC_MPEG4_NO_RC:
+		return "MPEG4_NO_RC";
+	case IMG_CODEC_MPEG4_VBR:
+		return "MPEG4_VBR";
+	case IMG_CODEC_MPEG4_CBR:
+		return "MPEG4_CBR";
+	case IMG_CODEC_MPEG2_NO_RC:
+		return "MPEG2_NO_RC";
+	case IMG_CODEC_MPEG2_VBR:
+		return "MPEG2_VBR";
+	case IMG_CODEC_MPEG2_CBR:
+		return "MPEG2_CBR";
+	case IMG_CODEC_H264MVC_NO_RC:
+		return "H264MVC_NO_RC";
+	case IMG_CODEC_H264MVC_CBR:
+		return "H264MVC_CBR";
+	case IMG_CODEC_H264MVC_VBR:
+		return "H264MVC_VBR";
+	default:
+		return "Undefined codec";
+	}
+}
+
+static inline void tng_topaz_clearirq(struct drm_device *dev,
+				      uint32_t clear_topaz)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (clear_topaz != 0)
+		MULTICORE_WRITE32(
+			TOPAZHP_TOP_CR_MULTICORE_INT_CLEAR, clear_topaz);
+
+	/* PSB_WVDC32(_PNW_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
+}
+
+static inline uint32_t tng_topaz_queryirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t val, /* iir, */ clear = 0;
+	struct pnw_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	printk(KERN_ERR "[FPGA] tng_topaz_queryirq");
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_INT_STAT, &val);
+	/* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
+
+	(void) topaz_priv;
+
+	if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
+		printk(KERN_ERR "[FPGA] no interrupt,IIR=TOPAZ_INTSTAT=0\n");
+		return 0;
+	}
+
+#if 0
+	if (val & (1 << 31))
+		PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x," \
+			"sync seq: 0x%08x\n",
+			dev_priv->sequence[LNC_ENGINE_ENCODE],
+			*((uint32_t *)topaz_priv->topaz_mtx_wb +
+				MTX_WRITEBACK_VALUE));
+	else
+		PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x," \
+			"sync seq: 0x%08x\n",
+			dev_priv->sequence[LNC_ENGINE_ENCODE],
+			*((uint32_t *)topaz_priv->topaz_mtx_wb +
+			MTX_WRITEBACK_VALUE));
+#endif
+	if (val & MASK_TOPAZHP_TOP_CR_INT_STAT_MMU_FAULT) {
+		uint32_t mmu_status, mmu_req;
+
+		MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_STATUS, &mmu_status);
+		MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_STATUS, &mmu_req);
+
+		PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt,");
+		PSB_DEBUG_IRQ("address=0x%08x,mem req=0x%08x\n",
+			mmu_status, mmu_req);
+		clear |= F_ENCODE(1, TOPAZHP_TOP_CR_INTCLR_MMU_FAULT);
+	}
+
+	if (val & MASK_TOPAZHP_TOP_CR_INT_STAT_MTX_HALT) {
+		PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
+		clear |= F_ENCODE(1, TOPAZHP_TOP_CR_INTCLR_MTX_HALT);
+	}
+
+	if (val & MASK_TOPAZHP_TOP_CR_INT_STAT_MTX) {
+		PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
+		clear |= F_ENCODE(1, TOPAZHP_TOP_CR_INTCLR_MTX);
+	}
+
+	if (val & MASK_TOPAZHP_TOP_CR_INT_STAT_DMAC) {
+		PSB_DEBUG_IRQ("TOPAZ: detect a DMAC interrupt\n");
+		clear |= F_ENCODE(1, TOPAZHP_TOP_CR_INTCLR_DMAC);
+	}
+	return clear;
+}
+
+/*Set whether the write operation take effect on all cores
+* or only the specific one*/
+static inline void mtx_set_target(struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val = 0;
+	reg_val = F_ENCODE(0, TOPAZHP_TOP_CR_WRITES_CORE_ALL) |
+		F_ENCODE(0, TOPAZHP_TOP_CR_RTM_PORT_CORE_SELECT);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, reg_val);
+}
+
+int tng_topaz_power_up(
+	struct drm_device *dev,
+	enum drm_tng_topaz_codec codec);
+
+int tng_topaz_power_off(struct drm_device *dev);
+
+int Is_Secure_Fw(void);
+
+#define SHIFT_WB_PRODUCER       (0)
+#define MASK_WB_PRODUCER	\
+	(((1 << LOG2_WB_FIFO_SIZE) - 1) << SHIFT_WB_PRODUCER)
+#define SHIFT_WB_CONSUMER       (0)
+#define MASK_WB_CONSUMER	\
+	(((1 << LOG2_WB_FIFO_SIZE) - 1) << SHIFT_WB_CONSUMER)
+
+#define MTX_CMDID_WB_INTERRUPT 0x8000
+
+/*! Predefined "=" change fucntion. */
+#define CHECKFUNC_ISEQUAL	(0xFFFF0000)
+/*! Predefined "<" change fucntion. */
+#define CHECKFUNC_LESS		(0xFFFF0001)
+/*! Predefined "<=" change fucntion. */
+#define CHECKFUNC_LESSEQ	(0xFFFF0002)
+/*! Predefined ">" change fucntion.  */
+#define CHECKFUNC_GREATER	(0xFFFF0003)
+/*! Predefined ">=" change fucntion. */
+#define CHECKFUNC_GREATEREQ	(0xFFFF0004)
+/*! Predefined "!=" change fucntion. */
+#define CHECKFUNC_NOTEQUAL	(0xFFFF0005)
+
+/* Register CR_PROC_ESB_ACCESS_CONTROL */
+#define TOPAZHP_CR_PROC_ESB_ACCESS_CONTROL 0x00EC
+#define MASK_TOPAZHP_CR_PROC_ESB_ADDR 0x00003FF0
+#define SHIFT_TOPAZHP_CR_PROC_ESB_ADDR 4
+#define REGNUM_TOPAZHP_CR_PROC_ESB_ADDR 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ESB_ADDR 0
+
+#define MASK_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 0x00010000
+#define SHIFT_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 16
+#define REGNUM_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ESB_READ_N_WRITE 0
+
+#define MASK_TOPAZHP_CR_PROC_ESB_OP_VALID 0x00020000
+#define SHIFT_TOPAZHP_CR_PROC_ESB_OP_VALID 17
+#define REGNUM_TOPAZHP_CR_PROC_ESB_OP_VALID 0x00EC
+#define SIGNED_TOPAZHP_CR_PROC_ESB_OP_VALID 0
+
+/* Register INTEL_JMCMP_CF_TOTAL */
+#define INTEL_JMCMP_CF_TOTAL 0x0340
+#define MASK_INTEL_JMCMP_CF_TOTAL 0xFFFFFFFF
+#define SHIFT_INTEL_JMCMP_CF_TOTAL 0
+#define REGNUM_INTEL_JMCMP_CF_TOTAL 0x0340
+#define SIGNED_INTEL_JMCMP_CF_TOTAL 0
+
+/* Register CR_PROC_ESB_ACCESS_WORD0 */
+#define TOPAZHP_CR_PROC_ESB_ACCESS_WORD0 0x00F0
+#define MASK_TOPAZHP_CR_PROC_ESB_WORD0 0xFFFFFFFF
+#define SHIFT_TOPAZHP_CR_PROC_ESB_WORD0 0
+#define REGNUM_TOPAZHP_CR_PROC_ESB_WORD0 0x00F0
+#define SIGNED_TOPAZHP_CR_PROC_ESB_WORD0 0
+
+/*img_soc_dmac_reg.h*/
+/* Register DMAC_SETUP_0 */
+#define IMG_SOC_DMAC_SETUP_0        0x0000
+#define MASK_IMG_SOC_START_ADDRESS_00 0xFFFFFFFF
+#define SHIFT_IMG_SOC_START_ADDRESS_00 0
+#define REGNUM_IMG_SOC_START_ADDRESS_00 0x0000
+#define SIGNED_IMG_SOC_START_ADDRESS_00 0
+
+/* Register DMAC_COUNT_0 */
+#define IMG_SOC_DMAC_COUNT_0        0x0004
+#define MASK_IMG_SOC_CNT_00         0x0000FFFF
+#define SHIFT_IMG_SOC_CNT_00        0
+#define REGNUM_IMG_SOC_CNT_00       0x0004
+#define SIGNED_IMG_SOC_CNT_00       0
+
+#define MASK_IMG_SOC_EN_00          0x00010000
+#define SHIFT_IMG_SOC_EN_00         16
+#define REGNUM_IMG_SOC_EN_00        0x0004
+#define SIGNED_IMG_SOC_EN_00        0
+
+#define MASK_IMG_SOC_ENABLE_2D_MODE_00 0x00020000
+#define SHIFT_IMG_SOC_ENABLE_2D_MODE_00 17
+#define REGNUM_IMG_SOC_ENABLE_2D_MODE_00 0x0004
+#define SIGNED_IMG_SOC_ENABLE_2D_MODE_00 0
+#define IMG_SOC_ENABLE_2D_MODE_00_ENABLED		0x00000001
+#define IMG_SOC_ENABLE_2D_MODE_00_DISABLED		0x00000000
+
+#define MASK_IMG_SOC_LIST_EN_00     0x00040000
+#define SHIFT_IMG_SOC_LIST_EN_00    18
+#define REGNUM_IMG_SOC_LIST_EN_00   0x0004
+#define SIGNED_IMG_SOC_LIST_EN_00   0
+
+#define MASK_IMG_SOC_SRST_00        0x00080000
+#define SHIFT_IMG_SOC_SRST_00       19
+#define REGNUM_IMG_SOC_SRST_00      0x0004
+#define SIGNED_IMG_SOC_SRST_00      0
+
+#define MASK_IMG_SOC_DREQ_00        0x00100000
+#define SHIFT_IMG_SOC_DREQ_00       20
+#define REGNUM_IMG_SOC_DREQ_00      0x0004
+#define SIGNED_IMG_SOC_DREQ_00      0
+
+#define MASK_IMG_SOC_LIST_FIN_CTL_00 0x00400000
+#define SHIFT_IMG_SOC_LIST_FIN_CTL_00 22
+#define REGNUM_IMG_SOC_LIST_FIN_CTL_00 0x0004
+#define SIGNED_IMG_SOC_LIST_FIN_CTL_00 0
+
+#define MASK_IMG_SOC_PI_00          0x03000000
+#define SHIFT_IMG_SOC_PI_00         24
+#define REGNUM_IMG_SOC_PI_00        0x0004
+#define SIGNED_IMG_SOC_PI_00        0
+#define IMG_SOC_PI_00_1		0x00000002
+#define IMG_SOC_PI_00_2		0x00000001
+#define IMG_SOC_PI_00_4		0x00000000
+
+#define MASK_IMG_SOC_DIR_00         0x04000000
+#define SHIFT_IMG_SOC_DIR_00        26
+#define REGNUM_IMG_SOC_DIR_00       0x0004
+#define SIGNED_IMG_SOC_DIR_00       0
+
+#define MASK_IMG_SOC_PW_00          0x18000000
+#define SHIFT_IMG_SOC_PW_00         27
+#define REGNUM_IMG_SOC_PW_00        0x0004
+#define SIGNED_IMG_SOC_PW_00        0
+
+#define MASK_IMG_SOC_TRANSFER_IEN_00 0x20000000
+#define SHIFT_IMG_SOC_TRANSFER_IEN_00 29
+#define REGNUM_IMG_SOC_TRANSFER_IEN_00 0x0004
+#define SIGNED_IMG_SOC_TRANSFER_IEN_00 0
+
+#define MASK_IMG_SOC_BSWAP_00       0x40000000
+#define SHIFT_IMG_SOC_BSWAP_00      30
+#define REGNUM_IMG_SOC_BSWAP_00     0x0004
+#define SIGNED_IMG_SOC_BSWAP_00     0
+
+#define MASK_IMG_SOC_LIST_IEN_00    0x80000000
+#define SHIFT_IMG_SOC_LIST_IEN_00   31
+#define REGNUM_IMG_SOC_LIST_IEN_00  0x0004
+#define SIGNED_IMG_SOC_LIST_IEN_00  0
+
+/* Register DMAC_PERIPH_0 */
+#define IMG_SOC_DMAC_PERIPH_0       0x0008
+#define MASK_IMG_SOC_EXT_SA_00      0x0000000F
+#define SHIFT_IMG_SOC_EXT_SA_00     0
+#define REGNUM_IMG_SOC_EXT_SA_00    0x0008
+#define SIGNED_IMG_SOC_EXT_SA_00    0
+
+#define MASK_IMG_SOC_BURST_00       0x07000000
+#define SHIFT_IMG_SOC_BURST_00      24
+#define REGNUM_IMG_SOC_BURST_00     0x0008
+#define SIGNED_IMG_SOC_BURST_00     0
+
+#define MASK_IMG_SOC_INCR_00        0x08000000
+#define SHIFT_IMG_SOC_INCR_00       27
+#define REGNUM_IMG_SOC_INCR_00      0x0008
+#define SIGNED_IMG_SOC_INCR_00      0
+
+#define MASK_IMG_SOC_ACC_DEL_00     0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL_00    29
+#define REGNUM_IMG_SOC_ACC_DEL_00   0x0008
+#define SIGNED_IMG_SOC_ACC_DEL_00   0
+
+/* Register DMAC_IRQ_STAT_0 */
+#define IMG_SOC_DMAC_IRQ_STAT_0     0x000C
+#define MASK_IMG_SOC_TRANSFER_FIN_00 0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN_00 17
+#define REGNUM_IMG_SOC_TRANSFER_FIN_00 0x000C
+#define SIGNED_IMG_SOC_TRANSFER_FIN_00 0
+
+#define MASK_IMG_SOC_LIST_INT_00    0x00100000
+#define SHIFT_IMG_SOC_LIST_INT_00   20
+#define REGNUM_IMG_SOC_LIST_INT_00  0x000C
+#define SIGNED_IMG_SOC_LIST_INT_00  0
+
+/* The last linked-list element processed initiated an interrupt.
+If LIST_IEN is set for the channel, an interrupt will be generated
+on the IRQ line until this bit is cleared, */
+#define IMG_SOC_LIST_INT_00_ENABLE	0x00000001
+
+/* last linked list element processed did not initiate an interrupt. */
+#define IMG_SOC_LIST_INT_00_DISABLE	0x00000000
+
+#define MASK_IMG_SOC_LIST_FIN_00    0x00200000
+#define SHIFT_IMG_SOC_LIST_FIN_00   21
+#define REGNUM_IMG_SOC_LIST_FIN_00  0x000C
+#define SIGNED_IMG_SOC_LIST_FIN_00  0
+
+/* Register DMAC_2D_MODE_0 */
+#define IMG_SOC_DMAC_2D_MODE_0      0x0010
+#define MASK_IMG_SOC_ROW_LENGTH_00  0x000003FF
+#define SHIFT_IMG_SOC_ROW_LENGTH_00 0
+#define REGNUM_IMG_SOC_ROW_LENGTH_00 0x0010
+#define SIGNED_IMG_SOC_ROW_LENGTH_00 0
+
+#define MASK_IMG_SOC_LINE_ADDR_OFFSET_00 0x000FFC00
+#define SHIFT_IMG_SOC_LINE_ADDR_OFFSET_00 10
+#define REGNUM_IMG_SOC_LINE_ADDR_OFFSET_00 0x0010
+#define SIGNED_IMG_SOC_LINE_ADDR_OFFSET_00 0
+
+#define MASK_IMG_SOC_REP_COUNT_00   0x7FF00000
+#define SHIFT_IMG_SOC_REP_COUNT_00  20
+#define REGNUM_IMG_SOC_REP_COUNT_00 0x0010
+#define SIGNED_IMG_SOC_REP_COUNT_00 0
+
+/* Register DMAC_PERIPHERAL_ADDR_0 */
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR_0 0x0014
+#define MASK_IMG_SOC_ADDR_00        0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR_00       0
+#define REGNUM_IMG_SOC_ADDR_00      0x0014
+#define SIGNED_IMG_SOC_ADDR_00      0
+
+/* Register DMAC_PER_HOLD_0 */
+#define IMG_SOC_DMAC_PER_HOLD_0     0x0018
+#define MASK_IMG_SOC_PER_HOLD_00    0x0000007F
+#define SHIFT_IMG_SOC_PER_HOLD_00   0
+#define REGNUM_IMG_SOC_PER_HOLD_00  0x0018
+#define SIGNED_IMG_SOC_PER_HOLD_00  0
+
+/* Register DMAC_SETUP_1 */
+#define IMG_SOC_DMAC_SETUP_1        0x0020
+#define MASK_IMG_SOC_START_ADDRESS_01 0xFFFFFFFF
+#define SHIFT_IMG_SOC_START_ADDRESS_01 0
+#define REGNUM_IMG_SOC_START_ADDRESS_01 0x0020
+#define SIGNED_IMG_SOC_START_ADDRESS_01 0
+
+/* Register DMAC_COUNT_1 */
+#define IMG_SOC_DMAC_COUNT_1        0x0024
+#define MASK_IMG_SOC_CNT_01         0x0000FFFF
+#define SHIFT_IMG_SOC_CNT_01        0
+#define REGNUM_IMG_SOC_CNT_01       0x0024
+#define SIGNED_IMG_SOC_CNT_01       0
+
+#define MASK_IMG_SOC_EN_01          0x00010000
+#define SHIFT_IMG_SOC_EN_01         16
+#define REGNUM_IMG_SOC_EN_01        0x0024
+#define SIGNED_IMG_SOC_EN_01        0
+
+#define MASK_IMG_SOC_ENABLE_2D_MODE_01 0x00020000
+#define SHIFT_IMG_SOC_ENABLE_2D_MODE_01 17
+#define REGNUM_IMG_SOC_ENABLE_2D_MODE_01 0x0024
+#define SIGNED_IMG_SOC_ENABLE_2D_MODE_01 0
+#define IMG_SOC_ENABLE_2D_MODE_01_ENABLED		0x00000001
+#define IMG_SOC_ENABLE_2D_MODE_01_DISABLED		0x00000000
+
+#define MASK_IMG_SOC_LIST_EN_01     0x00040000
+#define SHIFT_IMG_SOC_LIST_EN_01    18
+#define REGNUM_IMG_SOC_LIST_EN_01   0x0024
+#define SIGNED_IMG_SOC_LIST_EN_01   0
+
+#define MASK_IMG_SOC_SRST_01        0x00080000
+#define SHIFT_IMG_SOC_SRST_01       19
+#define REGNUM_IMG_SOC_SRST_01      0x0024
+#define SIGNED_IMG_SOC_SRST_01      0
+
+#define MASK_IMG_SOC_DREQ_01        0x00100000
+#define SHIFT_IMG_SOC_DREQ_01       20
+#define REGNUM_IMG_SOC_DREQ_01      0x0024
+#define SIGNED_IMG_SOC_DREQ_01      0
+
+#define MASK_IMG_SOC_LIST_FIN_CTL_01 0x00400000
+#define SHIFT_IMG_SOC_LIST_FIN_CTL_01 22
+#define REGNUM_IMG_SOC_LIST_FIN_CTL_01 0x0024
+#define SIGNED_IMG_SOC_LIST_FIN_CTL_01 0
+
+#define MASK_IMG_SOC_PI_01          0x03000000
+#define SHIFT_IMG_SOC_PI_01         24
+#define REGNUM_IMG_SOC_PI_01        0x0024
+#define SIGNED_IMG_SOC_PI_01        0
+#define IMG_SOC_PI_01_1		0x00000002
+#define IMG_SOC_PI_01_2		0x00000001
+#define IMG_SOC_PI_01_4		0x00000000
+
+#define MASK_IMG_SOC_DIR_01         0x04000000
+#define SHIFT_IMG_SOC_DIR_01        26
+#define REGNUM_IMG_SOC_DIR_01       0x0024
+#define SIGNED_IMG_SOC_DIR_01       0
+
+#define MASK_IMG_SOC_PW_01          0x18000000
+#define SHIFT_IMG_SOC_PW_01         27
+#define REGNUM_IMG_SOC_PW_01        0x0024
+#define SIGNED_IMG_SOC_PW_01        0
+
+#define MASK_IMG_SOC_TRANSFER_IEN_01 0x20000000
+#define SHIFT_IMG_SOC_TRANSFER_IEN_01 29
+#define REGNUM_IMG_SOC_TRANSFER_IEN_01 0x0024
+#define SIGNED_IMG_SOC_TRANSFER_IEN_01 0
+
+#define MASK_IMG_SOC_BSWAP_01       0x40000000
+#define SHIFT_IMG_SOC_BSWAP_01      30
+#define REGNUM_IMG_SOC_BSWAP_01     0x0024
+#define SIGNED_IMG_SOC_BSWAP_01     0
+
+#define MASK_IMG_SOC_LIST_IEN_01    0x80000000
+#define SHIFT_IMG_SOC_LIST_IEN_01   31
+#define REGNUM_IMG_SOC_LIST_IEN_01  0x0024
+#define SIGNED_IMG_SOC_LIST_IEN_01  0
+
+/* Register DMAC_PERIPH_1 */
+#define IMG_SOC_DMAC_PERIPH_1       0x0028
+#define MASK_IMG_SOC_EXT_SA_01      0x0000000F
+#define SHIFT_IMG_SOC_EXT_SA_01     0
+#define REGNUM_IMG_SOC_EXT_SA_01    0x0028
+#define SIGNED_IMG_SOC_EXT_SA_01    0
+
+#define MASK_IMG_SOC_BURST_01       0x07000000
+#define SHIFT_IMG_SOC_BURST_01      24
+#define REGNUM_IMG_SOC_BURST_01     0x0028
+#define SIGNED_IMG_SOC_BURST_01     0
+
+#define MASK_IMG_SOC_INCR_01        0x08000000
+#define SHIFT_IMG_SOC_INCR_01       27
+#define REGNUM_IMG_SOC_INCR_01      0x0028
+#define SIGNED_IMG_SOC_INCR_01      0
+
+#define MASK_IMG_SOC_ACC_DEL_01     0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL_01    29
+#define REGNUM_IMG_SOC_ACC_DEL_01   0x0028
+#define SIGNED_IMG_SOC_ACC_DEL_01   0
+
+/* Register DMAC_IRQ_STAT_1 */
+#define IMG_SOC_DMAC_IRQ_STAT_1     0x002C
+#define MASK_IMG_SOC_TRANSFER_FIN_01 0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN_01 17
+#define REGNUM_IMG_SOC_TRANSFER_FIN_01 0x002C
+#define SIGNED_IMG_SOC_TRANSFER_FIN_01 0
+
+#define MASK_IMG_SOC_LIST_INT_01    0x00100000
+#define SHIFT_IMG_SOC_LIST_INT_01   20
+#define REGNUM_IMG_SOC_LIST_INT_01  0x002C
+#define SIGNED_IMG_SOC_LIST_INT_01  0
+
+/* The last linked-list element processed initiated an interrupt.
+If LIST_IEN is set for the channel, an interrupt will be generated
+on the IRQ line until this bit is cleared, */
+#define IMG_SOC_LIST_INT_01_ENABLE	0x00000001
+
+/* last linked list element processed did not initiate an interrupt. */
+#define IMG_SOC_LIST_INT_01_DISABLE	0x00000000
+
+#define MASK_IMG_SOC_LIST_FIN_01    0x00200000
+#define SHIFT_IMG_SOC_LIST_FIN_01   21
+#define REGNUM_IMG_SOC_LIST_FIN_01  0x002C
+#define SIGNED_IMG_SOC_LIST_FIN_01  0
+
+/* Register DMAC_2D_MODE_1 */
+#define IMG_SOC_DMAC_2D_MODE_1      0x0030
+#define MASK_IMG_SOC_ROW_LENGTH_01  0x000003FF
+#define SHIFT_IMG_SOC_ROW_LENGTH_01 0
+#define REGNUM_IMG_SOC_ROW_LENGTH_01 0x0030
+#define SIGNED_IMG_SOC_ROW_LENGTH_01 0
+
+#define MASK_IMG_SOC_LINE_ADDR_OFFSET_01 0x000FFC00
+#define SHIFT_IMG_SOC_LINE_ADDR_OFFSET_01 10
+#define REGNUM_IMG_SOC_LINE_ADDR_OFFSET_01 0x0030
+#define SIGNED_IMG_SOC_LINE_ADDR_OFFSET_01 0
+
+#define MASK_IMG_SOC_REP_COUNT_01   0x7FF00000
+#define SHIFT_IMG_SOC_REP_COUNT_01  20
+#define REGNUM_IMG_SOC_REP_COUNT_01 0x0030
+#define SIGNED_IMG_SOC_REP_COUNT_01 0
+
+/* Register DMAC_PERIPHERAL_ADDR_1 */
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR_1 0x0034
+#define MASK_IMG_SOC_ADDR_01        0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR_01       0
+#define REGNUM_IMG_SOC_ADDR_01      0x0034
+#define SIGNED_IMG_SOC_ADDR_01      0
+
+/* Register DMAC_PER_HOLD_1 */
+#define IMG_SOC_DMAC_PER_HOLD_1     0x0038
+#define MASK_IMG_SOC_PER_HOLD_01    0x0000007F
+#define SHIFT_IMG_SOC_PER_HOLD_01   0
+#define REGNUM_IMG_SOC_PER_HOLD_01  0x0038
+#define SIGNED_IMG_SOC_PER_HOLD_01  0
+
+/* Register DMAC_SETUP_2 */
+#define IMG_SOC_DMAC_SETUP_2        0x0040
+#define MASK_IMG_SOC_START_ADDRESS_02 0xFFFFFFFF
+#define SHIFT_IMG_SOC_START_ADDRESS_02 0
+#define REGNUM_IMG_SOC_START_ADDRESS_02 0x0040
+#define SIGNED_IMG_SOC_START_ADDRESS_02 0
+
+/* Register DMAC_COUNT_2 */
+#define IMG_SOC_DMAC_COUNT_2        0x0044
+#define MASK_IMG_SOC_CNT_02         0x0000FFFF
+#define SHIFT_IMG_SOC_CNT_02        0
+#define REGNUM_IMG_SOC_CNT_02       0x0044
+#define SIGNED_IMG_SOC_CNT_02       0
+
+#define MASK_IMG_SOC_EN_02          0x00010000
+#define SHIFT_IMG_SOC_EN_02         16
+#define REGNUM_IMG_SOC_EN_02        0x0044
+#define SIGNED_IMG_SOC_EN_02        0
+
+#define MASK_IMG_SOC_ENABLE_2D_MODE_02 0x00020000
+#define SHIFT_IMG_SOC_ENABLE_2D_MODE_02 17
+#define REGNUM_IMG_SOC_ENABLE_2D_MODE_02 0x0044
+#define SIGNED_IMG_SOC_ENABLE_2D_MODE_02 0
+#define IMG_SOC_ENABLE_2D_MODE_02_ENABLED		0x00000001
+#define IMG_SOC_ENABLE_2D_MODE_02_DISABLED		0x00000000
+
+#define MASK_IMG_SOC_LIST_EN_02     0x00040000
+#define SHIFT_IMG_SOC_LIST_EN_02    18
+#define REGNUM_IMG_SOC_LIST_EN_02   0x0044
+#define SIGNED_IMG_SOC_LIST_EN_02   0
+
+#define MASK_IMG_SOC_SRST_02        0x00080000
+#define SHIFT_IMG_SOC_SRST_02       19
+#define REGNUM_IMG_SOC_SRST_02      0x0044
+#define SIGNED_IMG_SOC_SRST_02      0
+
+#define MASK_IMG_SOC_DREQ_02        0x00100000
+#define SHIFT_IMG_SOC_DREQ_02       20
+#define REGNUM_IMG_SOC_DREQ_02      0x0044
+#define SIGNED_IMG_SOC_DREQ_02      0
+
+#define MASK_IMG_SOC_LIST_FIN_CTL_02 0x00400000
+#define SHIFT_IMG_SOC_LIST_FIN_CTL_02 22
+#define REGNUM_IMG_SOC_LIST_FIN_CTL_02 0x0044
+#define SIGNED_IMG_SOC_LIST_FIN_CTL_02 0
+
+#define MASK_IMG_SOC_PI_02          0x03000000
+#define SHIFT_IMG_SOC_PI_02         24
+#define REGNUM_IMG_SOC_PI_02        0x0044
+#define SIGNED_IMG_SOC_PI_02        0
+#define IMG_SOC_PI_02_1		0x00000002
+#define IMG_SOC_PI_02_2		0x00000001
+#define IMG_SOC_PI_02_4		0x00000000
+
+#define MASK_IMG_SOC_DIR_02         0x04000000
+#define SHIFT_IMG_SOC_DIR_02        26
+#define REGNUM_IMG_SOC_DIR_02       0x0044
+#define SIGNED_IMG_SOC_DIR_02       0
+
+#define MASK_IMG_SOC_PW_02          0x18000000
+#define SHIFT_IMG_SOC_PW_02         27
+#define REGNUM_IMG_SOC_PW_02        0x0044
+#define SIGNED_IMG_SOC_PW_02        0
+
+#define MASK_IMG_SOC_TRANSFER_IEN_02 0x20000000
+#define SHIFT_IMG_SOC_TRANSFER_IEN_02 29
+#define REGNUM_IMG_SOC_TRANSFER_IEN_02 0x0044
+#define SIGNED_IMG_SOC_TRANSFER_IEN_02 0
+
+#define MASK_IMG_SOC_BSWAP_02       0x40000000
+#define SHIFT_IMG_SOC_BSWAP_02      30
+#define REGNUM_IMG_SOC_BSWAP_02     0x0044
+#define SIGNED_IMG_SOC_BSWAP_02     0
+
+#define MASK_IMG_SOC_LIST_IEN_02    0x80000000
+#define SHIFT_IMG_SOC_LIST_IEN_02   31
+#define REGNUM_IMG_SOC_LIST_IEN_02  0x0044
+#define SIGNED_IMG_SOC_LIST_IEN_02  0
+
+/* Register DMAC_PERIPH_2 */
+#define IMG_SOC_DMAC_PERIPH_2       0x0048
+#define MASK_IMG_SOC_EXT_SA_02      0x0000000F
+#define SHIFT_IMG_SOC_EXT_SA_02     0
+#define REGNUM_IMG_SOC_EXT_SA_02    0x0048
+#define SIGNED_IMG_SOC_EXT_SA_02    0
+
+#define MASK_IMG_SOC_BURST_02       0x07000000
+#define SHIFT_IMG_SOC_BURST_02      24
+#define REGNUM_IMG_SOC_BURST_02     0x0048
+#define SIGNED_IMG_SOC_BURST_02     0
+
+#define MASK_IMG_SOC_INCR_02        0x08000000
+#define SHIFT_IMG_SOC_INCR_02       27
+#define REGNUM_IMG_SOC_INCR_02      0x0048
+#define SIGNED_IMG_SOC_INCR_02      0
+
+#define MASK_IMG_SOC_ACC_DEL_02     0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL_02    29
+#define REGNUM_IMG_SOC_ACC_DEL_02   0x0048
+#define SIGNED_IMG_SOC_ACC_DEL_02   0
+
+/* Register DMAC_IRQ_STAT_2 */
+#define IMG_SOC_DMAC_IRQ_STAT_2     0x004C
+#define MASK_IMG_SOC_TRANSFER_FIN_02 0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN_02 17
+#define REGNUM_IMG_SOC_TRANSFER_FIN_02 0x004C
+#define SIGNED_IMG_SOC_TRANSFER_FIN_02 0
+
+#define MASK_IMG_SOC_LIST_INT_02    0x00100000
+#define SHIFT_IMG_SOC_LIST_INT_02   20
+#define REGNUM_IMG_SOC_LIST_INT_02  0x004C
+#define SIGNED_IMG_SOC_LIST_INT_02  0
+
+/* The last linked-list element processed initiated an interrupt.
+If LIST_IEN is set for the channel, an interrupt will be generated
+on the IRQ line until this bit is cleared, */
+#define IMG_SOC_LIST_INT_02_ENABLE	0x00000001
+/* last linked list element processed did not initiate an interrupt. */
+#define IMG_SOC_LIST_INT_02_DISABLE	0x00000000
+
+#define MASK_IMG_SOC_LIST_FIN_02    0x00200000
+#define SHIFT_IMG_SOC_LIST_FIN_02   21
+#define REGNUM_IMG_SOC_LIST_FIN_02  0x004C
+#define SIGNED_IMG_SOC_LIST_FIN_02  0
+
+/* Register DMAC_2D_MODE_2 */
+#define IMG_SOC_DMAC_2D_MODE_2      0x0050
+#define MASK_IMG_SOC_ROW_LENGTH_02  0x000003FF
+#define SHIFT_IMG_SOC_ROW_LENGTH_02 0
+#define REGNUM_IMG_SOC_ROW_LENGTH_02 0x0050
+#define SIGNED_IMG_SOC_ROW_LENGTH_02 0
+
+#define MASK_IMG_SOC_LINE_ADDR_OFFSET_02 0x000FFC00
+#define SHIFT_IMG_SOC_LINE_ADDR_OFFSET_02 10
+#define REGNUM_IMG_SOC_LINE_ADDR_OFFSET_02 0x0050
+#define SIGNED_IMG_SOC_LINE_ADDR_OFFSET_02 0
+
+#define MASK_IMG_SOC_REP_COUNT_02   0x7FF00000
+#define SHIFT_IMG_SOC_REP_COUNT_02  20
+#define REGNUM_IMG_SOC_REP_COUNT_02 0x0050
+#define SIGNED_IMG_SOC_REP_COUNT_02 0
+
+/* Register DMAC_PERIPHERAL_ADDR_2 */
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR_2 0x0054
+#define MASK_IMG_SOC_ADDR_02        0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR_02       0
+#define REGNUM_IMG_SOC_ADDR_02      0x0054
+#define SIGNED_IMG_SOC_ADDR_02      0
+
+/* Register DMAC_PER_HOLD_2 */
+#define IMG_SOC_DMAC_PER_HOLD_2     0x0058
+#define MASK_IMG_SOC_PER_HOLD_02    0x0000007F
+#define SHIFT_IMG_SOC_PER_HOLD_02   0
+#define REGNUM_IMG_SOC_PER_HOLD_02  0x0058
+#define SIGNED_IMG_SOC_PER_HOLD_02  0
+
+/* Register DMAC_SETUP_3 */
+#define IMG_SOC_DMAC_SETUP_3        0x0060
+#define MASK_IMG_SOC_START_ADDRESS_03 0xFFFFFFFF
+#define SHIFT_IMG_SOC_START_ADDRESS_03 0
+#define REGNUM_IMG_SOC_START_ADDRESS_03 0x0060
+#define SIGNED_IMG_SOC_START_ADDRESS_03 0
+
+/* Register DMAC_COUNT_3 */
+#define IMG_SOC_DMAC_COUNT_3        0x0064
+#define MASK_IMG_SOC_CNT_03         0x0000FFFF
+#define SHIFT_IMG_SOC_CNT_03        0
+#define REGNUM_IMG_SOC_CNT_03       0x0064
+#define SIGNED_IMG_SOC_CNT_03       0
+
+#define MASK_IMG_SOC_EN_03          0x00010000
+#define SHIFT_IMG_SOC_EN_03         16
+#define REGNUM_IMG_SOC_EN_03        0x0064
+#define SIGNED_IMG_SOC_EN_03        0
+
+#define MASK_IMG_SOC_ENABLE_2D_MODE_03 0x00020000
+#define SHIFT_IMG_SOC_ENABLE_2D_MODE_03 17
+#define REGNUM_IMG_SOC_ENABLE_2D_MODE_03 0x0064
+#define SIGNED_IMG_SOC_ENABLE_2D_MODE_03 0
+#define IMG_SOC_ENABLE_2D_MODE_03_ENABLED		0x00000001
+#define IMG_SOC_ENABLE_2D_MODE_03_DISABLED		0x00000000
+
+#define MASK_IMG_SOC_LIST_EN_03     0x00040000
+#define SHIFT_IMG_SOC_LIST_EN_03    18
+#define REGNUM_IMG_SOC_LIST_EN_03   0x0064
+#define SIGNED_IMG_SOC_LIST_EN_03   0
+
+#define MASK_IMG_SOC_SRST_03        0x00080000
+#define SHIFT_IMG_SOC_SRST_03       19
+#define REGNUM_IMG_SOC_SRST_03      0x0064
+#define SIGNED_IMG_SOC_SRST_03      0
+
+#define MASK_IMG_SOC_DREQ_03        0x00100000
+#define SHIFT_IMG_SOC_DREQ_03       20
+#define REGNUM_IMG_SOC_DREQ_03      0x0064
+#define SIGNED_IMG_SOC_DREQ_03      0
+
+#define MASK_IMG_SOC_LIST_FIN_CTL_03 0x00400000
+#define SHIFT_IMG_SOC_LIST_FIN_CTL_03 22
+#define REGNUM_IMG_SOC_LIST_FIN_CTL_03 0x0064
+#define SIGNED_IMG_SOC_LIST_FIN_CTL_03 0
+
+#define MASK_IMG_SOC_PI_03          0x03000000
+#define SHIFT_IMG_SOC_PI_03         24
+#define REGNUM_IMG_SOC_PI_03        0x0064
+#define SIGNED_IMG_SOC_PI_03        0
+#define IMG_SOC_PI_03_1		0x00000002
+#define IMG_SOC_PI_03_2		0x00000001
+#define IMG_SOC_PI_03_4		0x00000000
+
+#define MASK_IMG_SOC_DIR_03         0x04000000
+#define SHIFT_IMG_SOC_DIR_03        26
+#define REGNUM_IMG_SOC_DIR_03       0x0064
+#define SIGNED_IMG_SOC_DIR_03       0
+
+#define MASK_IMG_SOC_PW_03          0x18000000
+#define SHIFT_IMG_SOC_PW_03         27
+#define REGNUM_IMG_SOC_PW_03        0x0064
+#define SIGNED_IMG_SOC_PW_03        0
+
+#define MASK_IMG_SOC_TRANSFER_IEN_03 0x20000000
+#define SHIFT_IMG_SOC_TRANSFER_IEN_03 29
+#define REGNUM_IMG_SOC_TRANSFER_IEN_03 0x0064
+#define SIGNED_IMG_SOC_TRANSFER_IEN_03 0
+
+#define MASK_IMG_SOC_BSWAP_03       0x40000000
+#define SHIFT_IMG_SOC_BSWAP_03      30
+#define REGNUM_IMG_SOC_BSWAP_03     0x0064
+#define SIGNED_IMG_SOC_BSWAP_03     0
+
+#define MASK_IMG_SOC_LIST_IEN_03    0x80000000
+#define SHIFT_IMG_SOC_LIST_IEN_03   31
+#define REGNUM_IMG_SOC_LIST_IEN_03  0x0064
+#define SIGNED_IMG_SOC_LIST_IEN_03  0
+
+/* Register DMAC_PERIPH_3 */
+#define IMG_SOC_DMAC_PERIPH_3       0x0068
+#define MASK_IMG_SOC_EXT_SA_03      0x0000000F
+#define SHIFT_IMG_SOC_EXT_SA_03     0
+#define REGNUM_IMG_SOC_EXT_SA_03    0x0068
+#define SIGNED_IMG_SOC_EXT_SA_03    0
+
+#define MASK_IMG_SOC_BURST_03       0x07000000
+#define SHIFT_IMG_SOC_BURST_03      24
+#define REGNUM_IMG_SOC_BURST_03     0x0068
+#define SIGNED_IMG_SOC_BURST_03     0
+
+#define MASK_IMG_SOC_INCR_03        0x08000000
+#define SHIFT_IMG_SOC_INCR_03       27
+#define REGNUM_IMG_SOC_INCR_03      0x0068
+#define SIGNED_IMG_SOC_INCR_03      0
+
+#define MASK_IMG_SOC_ACC_DEL_03     0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL_03    29
+#define REGNUM_IMG_SOC_ACC_DEL_03   0x0068
+#define SIGNED_IMG_SOC_ACC_DEL_03   0
+
+/* Register DMAC_IRQ_STAT_3 */
+#define IMG_SOC_DMAC_IRQ_STAT_3     0x006C
+#define MASK_IMG_SOC_TRANSFER_FIN_03 0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN_03 17
+#define REGNUM_IMG_SOC_TRANSFER_FIN_03 0x006C
+#define SIGNED_IMG_SOC_TRANSFER_FIN_03 0
+
+#define MASK_IMG_SOC_LIST_INT_03    0x00100000
+#define SHIFT_IMG_SOC_LIST_INT_03   20
+#define REGNUM_IMG_SOC_LIST_INT_03  0x006C
+#define SIGNED_IMG_SOC_LIST_INT_03  0
+
+/* The last linked-list element processed initiated an interrupt.
+If LIST_IEN is set for the channel, an interrupt will be generated
+on the IRQ line until this bit is cleared, */
+#define IMG_SOC_LIST_INT_03_ENABLE	0x00000001
+
+/* last linked list element processed did not initiate an interrupt. */
+#define IMG_SOC_LIST_INT_03_DISABLE	0x00000000
+
+#define MASK_IMG_SOC_LIST_FIN_03    0x00200000
+#define SHIFT_IMG_SOC_LIST_FIN_03   21
+#define REGNUM_IMG_SOC_LIST_FIN_03  0x006C
+#define SIGNED_IMG_SOC_LIST_FIN_03  0
+
+/* Register DMAC_2D_MODE_3 */
+#define IMG_SOC_DMAC_2D_MODE_3      0x0070
+#define MASK_IMG_SOC_ROW_LENGTH_03  0x000003FF
+#define SHIFT_IMG_SOC_ROW_LENGTH_03 0
+#define REGNUM_IMG_SOC_ROW_LENGTH_03 0x0070
+#define SIGNED_IMG_SOC_ROW_LENGTH_03 0
+
+#define MASK_IMG_SOC_LINE_ADDR_OFFSET_03 0x000FFC00
+#define SHIFT_IMG_SOC_LINE_ADDR_OFFSET_03 10
+#define REGNUM_IMG_SOC_LINE_ADDR_OFFSET_03 0x0070
+#define SIGNED_IMG_SOC_LINE_ADDR_OFFSET_03 0
+
+#define MASK_IMG_SOC_REP_COUNT_03   0x7FF00000
+#define SHIFT_IMG_SOC_REP_COUNT_03  20
+#define REGNUM_IMG_SOC_REP_COUNT_03 0x0070
+#define SIGNED_IMG_SOC_REP_COUNT_03 0
+
+/* Register DMAC_PERIPHERAL_ADDR_3 */
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR_3 0x0074
+#define MASK_IMG_SOC_ADDR_03        0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR_03       0
+#define REGNUM_IMG_SOC_ADDR_03      0x0074
+#define SIGNED_IMG_SOC_ADDR_03      0
+
+/* Register DMAC_PER_HOLD_3 */
+#define IMG_SOC_DMAC_PER_HOLD_3     0x0078
+#define MASK_IMG_SOC_PER_HOLD_03    0x0000007F
+#define SHIFT_IMG_SOC_PER_HOLD_03   0
+#define REGNUM_IMG_SOC_PER_HOLD_03  0x0078
+#define SIGNED_IMG_SOC_PER_HOLD_03  0
+
+/* Register DMAC_SOFT_RESET */
+#define IMG_SOC_DMAC_SOFT_RESET     0x0080
+
+/* Table DMAC */
+
+/* Register DMAC_SETUP */
+#define IMG_SOC_DMAC_SETUP(X)       (0x0000 + (32 * (X)))
+#define MASK_IMG_SOC_START_ADDRESS  0xFFFFFFFF
+#define SHIFT_IMG_SOC_START_ADDRESS 0
+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
+#define SIGNED_IMG_SOC_START_ADDRESS 0
+
+/* Register DMAC_COUNT */
+#define IMG_SOC_DMAC_COUNT(X)       (0x0004 + (32 * (X)))
+#define MASK_IMG_SOC_CNT            0x0000FFFF
+#define SHIFT_IMG_SOC_CNT           0
+#define REGNUM_IMG_SOC_CNT          0x0004
+#define SIGNED_IMG_SOC_CNT          0
+
+#define MASK_IMG_SOC_EN             0x00010000
+#define SHIFT_IMG_SOC_EN            16
+#define REGNUM_IMG_SOC_EN           0x0004
+#define SIGNED_IMG_SOC_EN           0
+
+#define MASK_IMG_SOC_ENABLE_2D_MODE 0x00020000
+#define SHIFT_IMG_SOC_ENABLE_2D_MODE 17
+#define REGNUM_IMG_SOC_ENABLE_2D_MODE 0x0004
+#define SIGNED_IMG_SOC_ENABLE_2D_MODE 0
+#define IMG_SOC_ENABLE_2D_MODE_ENABLED		0x00000001
+#define IMG_SOC_ENABLE_2D_MODE_DISABLED		0x00000000
+
+#define MASK_IMG_SOC_LIST_EN        0x00040000
+#define SHIFT_IMG_SOC_LIST_EN       18
+#define REGNUM_IMG_SOC_LIST_EN      0x0004
+#define SIGNED_IMG_SOC_LIST_EN      0
+
+#define MASK_IMG_SOC_SRST           0x00080000
+#define SHIFT_IMG_SOC_SRST          19
+#define REGNUM_IMG_SOC_SRST         0x0004
+#define SIGNED_IMG_SOC_SRST         0
+
+#define MASK_IMG_SOC_DREQ           0x00100000
+#define SHIFT_IMG_SOC_DREQ          20
+#define REGNUM_IMG_SOC_DREQ         0x0004
+#define SIGNED_IMG_SOC_DREQ         0
+
+#define MASK_IMG_SOC_LIST_FIN_CTL   0x00400000
+#define SHIFT_IMG_SOC_LIST_FIN_CTL  22
+#define REGNUM_IMG_SOC_LIST_FIN_CTL 0x0004
+#define SIGNED_IMG_SOC_LIST_FIN_CTL 0
+
+#define MASK_IMG_SOC_PI             0x03000000
+#define SHIFT_IMG_SOC_PI            24
+#define REGNUM_IMG_SOC_PI           0x0004
+#define SIGNED_IMG_SOC_PI           0
+#define IMG_SOC_PI_1		0x00000002
+#define IMG_SOC_PI_2		0x00000001
+#define IMG_SOC_PI_4		0x00000000
+
+#define MASK_IMG_SOC_DIR            0x04000000
+#define SHIFT_IMG_SOC_DIR           26
+#define REGNUM_IMG_SOC_DIR          0x0004
+#define SIGNED_IMG_SOC_DIR          0
+
+#define MASK_IMG_SOC_PW             0x18000000
+#define SHIFT_IMG_SOC_PW            27
+#define REGNUM_IMG_SOC_PW           0x0004
+#define SIGNED_IMG_SOC_PW           0
+
+#define MASK_IMG_SOC_TRANSFER_IEN   0x20000000
+#define SHIFT_IMG_SOC_TRANSFER_IEN  29
+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
+#define SIGNED_IMG_SOC_TRANSFER_IEN 0
+
+#define MASK_IMG_SOC_BSWAP          0x40000000
+#define SHIFT_IMG_SOC_BSWAP         30
+#define REGNUM_IMG_SOC_BSWAP        0x0004
+#define SIGNED_IMG_SOC_BSWAP        0
+
+#define MASK_IMG_SOC_LIST_IEN       0x80000000
+#define SHIFT_IMG_SOC_LIST_IEN      31
+#define REGNUM_IMG_SOC_LIST_IEN     0x0004
+#define SIGNED_IMG_SOC_LIST_IEN     0
+
+/* Register DMAC_PERIPH */
+#define IMG_SOC_DMAC_PERIPH(X)      (0x0008 + (32 * (X)))
+#define MASK_IMG_SOC_EXT_SA         0x0000000F
+#define SHIFT_IMG_SOC_EXT_SA        0
+#define REGNUM_IMG_SOC_EXT_SA       0x0008
+#define SIGNED_IMG_SOC_EXT_SA       0
+
+#define MASK_IMG_SOC_BURST          0x07000000
+#define SHIFT_IMG_SOC_BURST         24
+#define REGNUM_IMG_SOC_BURST        0x0008
+#define SIGNED_IMG_SOC_BURST        0
+
+#define MASK_IMG_SOC_INCR           0x08000000
+#define SHIFT_IMG_SOC_INCR          27
+#define REGNUM_IMG_SOC_INCR         0x0008
+#define SIGNED_IMG_SOC_INCR         0
+
+#define MASK_IMG_SOC_ACC_DEL        0xE0000000
+#define SHIFT_IMG_SOC_ACC_DEL       29
+#define REGNUM_IMG_SOC_ACC_DEL      0x0008
+#define SIGNED_IMG_SOC_ACC_DEL      0
+
+/* Register DMAC_IRQ_STAT */
+#define IMG_SOC_DMAC_IRQ_STAT(X)    (0x000C + (32 * (X)))
+#define MASK_IMG_SOC_TRANSFER_FIN   0x00020000
+#define SHIFT_IMG_SOC_TRANSFER_FIN  17
+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
+#define SIGNED_IMG_SOC_TRANSFER_FIN 0
+
+#define MASK_IMG_SOC_LIST_INT       0x00100000
+#define SHIFT_IMG_SOC_LIST_INT      20
+#define REGNUM_IMG_SOC_LIST_INT     0x000C
+#define SIGNED_IMG_SOC_LIST_INT     0
+/* The last linked-list element processed initiated an interrupt.
+If LIST_IEN is set for the channel, an interrupt will be generated
+on the IRQ line until this bit is cleared, */
+#define IMG_SOC_LIST_INT_ENABLE		0x00000001
+/* last linked list element processed did not initiate an interrupt. */
+#define IMG_SOC_LIST_INT_DISABLE	0x00000000
+
+#define MASK_IMG_SOC_LIST_FIN       0x00200000
+#define SHIFT_IMG_SOC_LIST_FIN      21
+#define REGNUM_IMG_SOC_LIST_FIN     0x000C
+#define SIGNED_IMG_SOC_LIST_FIN     0
+
+/* Register DMAC_2D_MODE */
+#define IMG_SOC_DMAC_2D_MODE(X)     (0x0010 + (32 * (X)))
+#define MASK_IMG_SOC_ROW_LENGTH     0x000003FF
+#define SHIFT_IMG_SOC_ROW_LENGTH    0
+#define REGNUM_IMG_SOC_ROW_LENGTH   0x0010
+#define SIGNED_IMG_SOC_ROW_LENGTH   0
+
+#define MASK_IMG_SOC_LINE_ADDR_OFFSET 0x000FFC00
+#define SHIFT_IMG_SOC_LINE_ADDR_OFFSET 10
+#define REGNUM_IMG_SOC_LINE_ADDR_OFFSET 0x0010
+#define SIGNED_IMG_SOC_LINE_ADDR_OFFSET 0
+
+#define MASK_IMG_SOC_REP_COUNT      0x7FF00000
+#define SHIFT_IMG_SOC_REP_COUNT     20
+#define REGNUM_IMG_SOC_REP_COUNT    0x0010
+#define SIGNED_IMG_SOC_REP_COUNT    0
+
+/* Register DMAC_PERIPHERAL_ADDR */
+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
+#define MASK_IMG_SOC_ADDR           0x007FFFFF
+#define SHIFT_IMG_SOC_ADDR          0
+#define REGNUM_IMG_SOC_ADDR         0x0014
+#define SIGNED_IMG_SOC_ADDR         0
+
+/* Register DMAC_PER_HOLD */
+#define IMG_SOC_DMAC_PER_HOLD(X)    (0x0018 + (32 * (X)))
+#define MASK_IMG_SOC_PER_HOLD       0x0000007F
+#define SHIFT_IMG_SOC_PER_HOLD      0
+#define REGNUM_IMG_SOC_PER_HOLD     0x0018
+#define SIGNED_IMG_SOC_PER_HOLD     0
+
+/* Number of entries in table DMAC */
+
+#define IMG_SOC_DMAC_SIZE_UINT32    31
+#define IMG_SOC_DMAC_NUM_ENTRIES    4
+
+/* From mtx_reg.h */
+/* Register CR_MTX_ENABLE */
+#define MTX_CR_MTX_ENABLE           0x0000
+#define MASK_MTX_MTX_ENABLE         0x00000001
+#define SHIFT_MTX_MTX_ENABLE        0
+#define REGNUM_MTX_MTX_ENABLE       0x0000
+#define SIGNED_MTX_MTX_ENABLE       0
+
+#define MASK_MTX_MTX_TOFF           0x00000002
+#define SHIFT_MTX_MTX_TOFF          1
+#define REGNUM_MTX_MTX_TOFF         0x0000
+#define SIGNED_MTX_MTX_TOFF         0
+
+#define MASK_MTX_MTX_TSTOPPED       0x00000004
+#define SHIFT_MTX_MTX_TSTOPPED      2
+#define REGNUM_MTX_MTX_TSTOPPED     0x0000
+#define SIGNED_MTX_MTX_TSTOPPED     0
+
+#define MASK_MTX_MTX_STEP_REC       0x000000F0
+#define SHIFT_MTX_MTX_STEP_REC      4
+#define REGNUM_MTX_MTX_STEP_REC     0x0000
+#define SIGNED_MTX_MTX_STEP_REC     0
+
+#define MASK_MTX_MTX_ARCH           0x00000800
+#define SHIFT_MTX_MTX_ARCH          11
+#define REGNUM_MTX_MTX_ARCH         0x0000
+#define SIGNED_MTX_MTX_ARCH         0
+
+#define MASK_MTX_MTX_TCAPS          0x0000F000
+#define SHIFT_MTX_MTX_TCAPS         12
+#define REGNUM_MTX_MTX_TCAPS        0x0000
+#define SIGNED_MTX_MTX_TCAPS        0
+
+#define MASK_MTX_MTX_MIN_REV        0x00FF0000
+#define SHIFT_MTX_MTX_MIN_REV       16
+#define REGNUM_MTX_MTX_MIN_REV      0x0000
+#define SIGNED_MTX_MTX_MIN_REV      0
+
+#define MASK_MTX_MTX_MAJ_REV        0xFF000000
+#define SHIFT_MTX_MTX_MAJ_REV       24
+#define REGNUM_MTX_MTX_MAJ_REV      0x0000
+#define SIGNED_MTX_MTX_MAJ_REV      0
+
+/* Register CR_MTX_STATUS */
+#define MTX_CR_MTX_STATUS           0x0008
+#define MASK_MTX_MTX_CF_C           0x00000001
+#define SHIFT_MTX_MTX_CF_C          0
+#define REGNUM_MTX_MTX_CF_C         0x0008
+#define SIGNED_MTX_MTX_CF_C         0
+
+#define MASK_MTX_MTX_CR_O           0x00000002
+#define SHIFT_MTX_MTX_CR_O          1
+#define REGNUM_MTX_MTX_CR_O         0x0008
+#define SIGNED_MTX_MTX_CR_O         0
+
+#define MASK_MTX_MTX_CF_N           0x00000004
+#define SHIFT_MTX_MTX_CF_N          2
+#define REGNUM_MTX_MTX_CF_N         0x0008
+#define SIGNED_MTX_MTX_CF_N         0
+
+#define MASK_MTX_MTX_CF_Z           0x00000008
+#define SHIFT_MTX_MTX_CF_Z          3
+#define REGNUM_MTX_MTX_CF_Z         0x0008
+#define SIGNED_MTX_MTX_CF_Z         0
+
+#define MASK_MTX_MTX_LSM_STEP       0x00000700
+#define SHIFT_MTX_MTX_LSM_STEP      8
+#define REGNUM_MTX_MTX_LSM_STEP     0x0008
+#define SIGNED_MTX_MTX_LSM_STEP     0
+
+#define MASK_MTX_MTX_HREASON        0x000C0000
+#define SHIFT_MTX_MTX_HREASON       18
+#define REGNUM_MTX_MTX_HREASON      0x0008
+#define SIGNED_MTX_MTX_HREASON      0
+
+/* Register CR_MTX_KICK */
+#define MTX_CR_MTX_KICK             0x0080
+#define MASK_MTX_MTX_KICK           0x0000FFFF
+#define SHIFT_MTX_MTX_KICK          0
+#define REGNUM_MTX_MTX_KICK         0x0080
+#define SIGNED_MTX_MTX_KICK         0
+
+/* Register CR_MTX_KICKI */
+#define MTX_CR_MTX_KICKI            0x0088
+#define MASK_MTX_MTX_KICKI          0x0000FFFF
+#define SHIFT_MTX_MTX_KICKI         0
+#define REGNUM_MTX_MTX_KICKI        0x0088
+#define SIGNED_MTX_MTX_KICKI        0
+
+/* Register CR_MTX_FAULT0 */
+#define MTX_CR_MTX_FAULT0           0x0090
+#define MASK_MTX_REQ_SB             0x000000FF
+#define SHIFT_MTX_REQ_SB            0
+#define REGNUM_MTX_REQ_SB           0x0090
+#define SIGNED_MTX_REQ_SB           0
+
+#define MASK_MTX_REQ_RN_W           0x00000100
+#define SHIFT_MTX_REQ_RN_W          8
+#define REGNUM_MTX_REQ_RN_W         0x0090
+#define SIGNED_MTX_REQ_RN_W         0
+
+#define MASK_MTX_REQ_STATE          0x00000C00
+#define SHIFT_MTX_REQ_STATE         10
+#define REGNUM_MTX_REQ_STATE        0x0090
+#define SIGNED_MTX_REQ_STATE        0
+
+#define MASK_MTX_REQ_LD_DEST        0x00FF0000
+#define SHIFT_MTX_REQ_LD_DEST       16
+#define REGNUM_MTX_REQ_LD_DEST      0x0090
+#define SIGNED_MTX_REQ_LD_DEST      0
+
+#define MASK_MTX_REQ_LD_REG         0xF8000000
+#define SHIFT_MTX_REQ_LD_REG        27
+#define REGNUM_MTX_REQ_LD_REG       0x0090
+#define SIGNED_MTX_REQ_LD_REG       0
+
+/* Register CR_MTX_REGISTER_READ_WRITE_DATA */
+#define MTX_CR_MTX_REGISTER_READ_WRITE_DATA 0x00F8
+/* Register CR_MTX_REGISTER_READ_WRITE_REQUEST */
+#define MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST 0x00FC
+#define MASK_MTX_MTX_USPECIFIER     0x0000000F
+#define SHIFT_MTX_MTX_USPECIFIER    0
+#define REGNUM_MTX_MTX_USPECIFIER   0x00FC
+#define SIGNED_MTX_MTX_USPECIFIER   0
+
+#define MASK_MTX_MTX_RSPECIFIER     0x00000070
+#define SHIFT_MTX_MTX_RSPECIFIER    4
+#define REGNUM_MTX_MTX_RSPECIFIER   0x00FC
+#define SIGNED_MTX_MTX_RSPECIFIER   0
+
+#define MASK_MTX_MTX_RNW            0x00010000
+#define SHIFT_MTX_MTX_RNW           16
+#define REGNUM_MTX_MTX_RNW          0x00FC
+#define SIGNED_MTX_MTX_RNW          0
+
+#define MASK_MTX_MTX_DREADY         0x80000000
+#define SHIFT_MTX_MTX_DREADY        31
+#define REGNUM_MTX_MTX_DREADY       0x00FC
+#define SIGNED_MTX_MTX_DREADY       0
+
+/* Register CR_MTX_RAM_ACCESS_DATA_EXCHANGE */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_EXCHANGE 0x0100
+/* Register CR_MTX_RAM_ACCESS_DATA_TRANSFER */
+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
+/* Register CR_MTX_RAM_ACCESS_CONTROL */
+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
+#define MASK_MTX_MTX_MCMR           0x00000001
+#define SHIFT_MTX_MTX_MCMR          0
+#define REGNUM_MTX_MTX_MCMR         0x0108
+#define SIGNED_MTX_MTX_MCMR         0
+
+#define MASK_MTX_MTX_MCMAI          0x00000002
+#define SHIFT_MTX_MTX_MCMAI         1
+#define REGNUM_MTX_MTX_MCMAI        0x0108
+#define SIGNED_MTX_MTX_MCMAI        0
+
+#define MASK_MTX_MTX_MCM_ADDR       0x000FFFFC
+#define SHIFT_MTX_MTX_MCM_ADDR      2
+#define REGNUM_MTX_MTX_MCM_ADDR     0x0108
+#define SIGNED_MTX_MTX_MCM_ADDR     0
+
+#define MASK_MTX_MTX_MCMID          0x0FF00000
+#define SHIFT_MTX_MTX_MCMID         20
+#define REGNUM_MTX_MTX_MCMID        0x0108
+#define SIGNED_MTX_MTX_MCMID        0
+
+/* Register CR_MTX_RAM_ACCESS_STATUS */
+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
+#define MASK_MTX_MTX_MTX_MCM_STAT   0x00000001
+#define SHIFT_MTX_MTX_MTX_MCM_STAT  0
+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
+#define SIGNED_MTX_MTX_MTX_MCM_STAT 0
+
+/* Register CR_MTX_SOFT_RESET */
+#define MTX_CR_MTX_SOFT_RESET       0x0200
+#define MASK_MTX_MTX_RESET          0x00000001
+#define SHIFT_MTX_MTX_RESET         0
+#define REGNUM_MTX_MTX_RESET        0x0200
+#define SIGNED_MTX_MTX_RESET        0
+
+/* Register CR_MTX_SYSC_CDMAC */
+#define MTX_CR_MTX_SYSC_CDMAC       0x0340
+#define MASK_MTX_LENGTH             0x0000FFFF
+#define SHIFT_MTX_LENGTH            0
+#define REGNUM_MTX_LENGTH           0x0340
+#define SIGNED_MTX_LENGTH           0
+
+#define MASK_MTX_ENABLE             0x00010000
+#define SHIFT_MTX_ENABLE            16
+#define REGNUM_MTX_ENABLE           0x0340
+#define SIGNED_MTX_ENABLE           0
+
+#define MASK_MTX_RNW                0x00020000
+#define SHIFT_MTX_RNW               17
+#define REGNUM_MTX_RNW              0x0340
+#define SIGNED_MTX_RNW              0
+
+#define MASK_MTX_BURSTSIZE          0x07000000
+#define SHIFT_MTX_BURSTSIZE         24
+#define REGNUM_MTX_BURSTSIZE        0x0340
+#define SIGNED_MTX_BURSTSIZE        0
+
+/* Register CR_MTX_SYSC_CDMAA */
+#define MTX_CR_MTX_SYSC_CDMAA       0x0344
+#define MASK_MTX_CDMAA_ADDRESS      0x03FFFFFC
+#define SHIFT_MTX_CDMAA_ADDRESS     2
+#define REGNUM_MTX_CDMAA_ADDRESS    0x0344
+#define SIGNED_MTX_CDMAA_ADDRESS    0
+
+/* Register CR_MTX_SYSC_CDMAS0 */
+#define MTX_CR_MTX_SYSC_CDMAS0      0x0348
+#define MASK_MTX_DONOTHING          0x00000001
+#define SHIFT_MTX_DONOTHING         0
+#define REGNUM_MTX_DONOTHING        0x0348
+#define SIGNED_MTX_DONOTHING        0
+
+#define MASK_MTX_DMAREQUEST         0x00000010
+#define SHIFT_MTX_DMAREQUEST        4
+#define REGNUM_MTX_DMAREQUEST       0x0348
+#define SIGNED_MTX_DMAREQUEST       0
+
+#define MASK_MTX_RAMNUMBER          0x00000F00
+#define SHIFT_MTX_RAMNUMBER         8
+#define REGNUM_MTX_RAMNUMBER        0x0348
+#define SIGNED_MTX_RAMNUMBER        0
+
+#define MASK_MTX_COUNT              0xFFFF0000
+#define SHIFT_MTX_COUNT             16
+#define REGNUM_MTX_COUNT            0x0348
+#define SIGNED_MTX_COUNT            0
+
+/* Register CR_MTX_SYSC_CDMAS1 */
+#define MTX_CR_MTX_SYSC_CDMAS1      0x034C
+#define MASK_MTX_CDMAS1_ADDRESS     0x03FFFFFC
+#define SHIFT_MTX_CDMAS1_ADDRESS    2
+#define REGNUM_MTX_CDMAS1_ADDRESS   0x034C
+#define SIGNED_MTX_CDMAS1_ADDRESS   0
+
+/* Register CR_MTX_SYSC_CDMAT */
+#define MTX_CR_MTX_SYSC_CDMAT       0x0350
+#define MASK_MTX_TRANSFERDATA       0xFFFFFFFF
+#define SHIFT_MTX_TRANSFERDATA      0
+#define REGNUM_MTX_TRANSFERDATA     0x0350
+#define SIGNED_MTX_TRANSFERDATA     0
+
+/* Register CR_TOPAZHP_CRC_CLEAR */
+#define TOPAZHP_CR_TOPAZHP_CRC_CLEAR 0x03F4
+#define MASK_TOPAZHP_CR_IPE_CRC_CLEAR 0x00000001
+#define SHIFT_TOPAZHP_CR_IPE_CRC_CLEAR 0
+#define REGNUM_TOPAZHP_CR_IPE_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_IPE_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_SPE_CRC_CLEAR 0x00000002
+#define SHIFT_TOPAZHP_CR_SPE_CRC_CLEAR 1
+#define REGNUM_TOPAZHP_CR_SPE_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_SPE_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_JMCOMP_CRC_CLEAR 0x00000008
+#define SHIFT_TOPAZHP_CR_JMCOMP_CRC_CLEAR 3
+#define REGNUM_TOPAZHP_CR_JMCOMP_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_JMCOMP_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_H264COMP_CRC_CLEAR 0x00000010
+#define SHIFT_TOPAZHP_CR_H264COMP_CRC_CLEAR 4
+#define REGNUM_TOPAZHP_CR_H264COMP_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_H264COMP_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_DB_CRC_CLEAR 0x00000020
+#define SHIFT_TOPAZHP_CR_DB_CRC_CLEAR 5
+#define REGNUM_TOPAZHP_CR_DB_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_DB_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_VLC_CRC_CLEAR 0x00000040
+#define SHIFT_TOPAZHP_CR_VLC_CRC_CLEAR 6
+#define REGNUM_TOPAZHP_CR_VLC_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_VLC_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_PC_CRC_CLEAR 0x00000080
+#define SHIFT_TOPAZHP_CR_PC_CRC_CLEAR 7
+#define REGNUM_TOPAZHP_CR_PC_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_PC_CRC_CLEAR 0
+
+#define MASK_TOPAZHP_CR_LRITC_CRC_CLEAR 0x00000100
+#define SHIFT_TOPAZHP_CR_LRITC_CRC_CLEAR 8
+#define REGNUM_TOPAZHP_CR_LRITC_CRC_CLEAR 0x03F4
+#define SIGNED_TOPAZHP_CR_LRITC_CRC_CLEAR 0
+
+
+/* Table CR_JMCOMP_LUMA_QUANTISER_TABLE */
+
+/* Register CR_JMCOMP_LUMA_QUANTISER */
+#define TOPAZHP_CR_JMCOMP_LUMA_QUANTISER(X) (0x0280 + (4 * (X)))
+#define MASK_TOPAZHP_CR_JMCOMP_LUMA_QUANT(i) (0x000000FF << (0 + ((i) * 8)))
+#define SHIFT_TOPAZHP_CR_JMCOMP_LUMA_QUANT(i) (0 + ((i) * 8))
+#define REGNUM_TOPAZHP_CR_JMCOMP_LUMA_QUANT(X, i) (0x0280 + (4 * (X)))
+
+/* Number of entries in table CR_JMCOMP_LUMA_QUANTISER_TABLE */
+
+#define TOPAZHP_CR_JMCOMP_LUMA_QUANTISER_TABLE_SIZE_UINT32 16
+#define TOPAZHP_CR_JMCOMP_LUMA_QUANTISER_TABLE_NUM_ENTRIES 16
+
+
+/* Table CR_JMCOMP_CHROMA_QUANTISER_TABLE */
+
+/* Register CR_JMCOMP_CHROMA_QUANTISER */
+#define TOPAZHP_CR_JMCOMP_CHROMA_QUANTISER(X) (0x02C0 + (4 * (X)))
+#define MASK_TOPAZHP_CR_JMCOMP_CHROMA_QUANT(i) (0x000000FF << (0 + ((i) * 8)))
+#define SHIFT_TOPAZHP_CR_JMCOMP_CHROMA_QUANT(i) (0 + ((i) * 8))
+#define REGNUM_TOPAZHP_CR_JMCOMP_CHROMA_QUANT(X, i) (0x02C0 + (4 * (X)))
+
+/* Number of entries in table CR_JMCOMP_CHROMA_QUANTISER_TABLE */
+
+#define TOPAZHP_CR_JMCOMP_CHROMA_QUANTISER_TABLE_SIZE_UINT32 16
+#define TOPAZHP_CR_JMCOMP_CHROMA_QUANTISER_TABLE_NUM_ENTRIES 16
+
+
+/* Table CR_WEIGHTED_PRED_COEFFS */
+
+/* Register CR_WEIGHTED_PRED_COEFFS */
+#define TOPAZHP_CR_WEIGHTED_PRED_COEFFS(X) (0x03D4 + (4 * (X)))
+#define MASK_TOPAZHP_CR_WEIGHTED_PRED_OFFSET(i) (0x000000FF << (16 + ((i) * 8)))
+#define SHIFT_TOPAZHP_CR_WEIGHTED_PRED_OFFSET(i) (16 + ((i) * 8))
+#define REGNUM_TOPAZHP_CR_WEIGHTED_PRED_OFFSET(X, i) (0x03D4 + (4 * (X)))
+
+#define MASK_TOPAZHP_CR_WEIGHTED_PRED_WEIGHT(i) (0x000000FF << (0 + ((i) * 8)))
+#define SHIFT_TOPAZHP_CR_WEIGHTED_PRED_WEIGHT(i) (0 + ((i) * 8))
+#define REGNUM_TOPAZHP_CR_WEIGHTED_PRED_WEIGHT(X, i) \
+	(0x03D4 + (4 * (X)))
+
+/* Number of entries in table CR_WEIGHTED_PRED_COEFFS */
+
+#define TOPAZHP_CR_WEIGHTED_PRED_COEFFS_SIZE_UINT32 3
+#define TOPAZHP_CR_WEIGHTED_PRED_COEFFS_NUM_ENTRIES 3
+
+
+
+/* From system.h */
+/*!
+******************************************************************************
+ TOPAZ configuration values:
+******************************************************************************/
+
+/*! The number of TOPAZ cores present in the system */
+#define TOPAZHP_NUM_PIPES 3
+
+#define REG_BASE_MTX			0x04800000 /* 0x0200000 FIXME!!! */
+#define REG_BASE_HOST			0x00000000
+
+/* Multicore Regs */
+#define REG_OFFSET_TOPAZ_MULTICORE	0x00000000
+#define REG_OFFSET_TOPAZ_DMAC		0x00000400
+#define REG_OFFSET_TOPAZ_MTX		0x00000800
+
+#define REG_SIZE_TOPAZ_MULTICORE	0x00000400
+#define REG_SIZE_TOPAZ_DMAC		0x00000400
+#define REG_SIZE_TOPAZ_MTX		0x00000800
+
+/* Register bank addresses - Host View */
+#define REG_START_TOPAZ_MULTICORE_HOST	\
+	(REG_BASE_HOST + REG_OFFSET_TOPAZ_MULTICORE)
+#define REG_END_TOPAZ_MULTICORE_HOST	\
+	(REG_START_TOPAZ_MULTICORE_HOST + REG_SIZE_TOPAZ_MULTICORE)
+
+#define REG_START_TOPAZ_DMAC_HOST	\
+	(REG_BASE_HOST + REG_OFFSET_TOPAZ_DMAC)
+#define REG_END_TOPAZ_DMAC_HOST		\
+	(REG_START_TOPAZ_DMAC_HOST + REG_SIZE_TOPAZ_DMAC)
+
+#define REG_START_TOPAZ_MTX_HOST	\
+	(REG_BASE_HOST + REG_OFFSET_TOPAZ_MTX)
+#define REG_END_TOPAZ_MTX_HOST(core)	\
+	(REG_START_TOPAZ_MTX_HOST + REG_SIZE_TOPAZ_MTX)
+
+#define REG_START_TOPAZ_TOPAZ_HOST(core)	\
+	(REG_BASE_HOST + (REG_SIZE_TOPAZ_CORE_HOST*core) + \
+	REG_OFFSET_TOPAZ_CORE_HOST + REG_OFFSET_TOPAZ_TOPAZ_HOST)
+#define REG_END_TOPAZ_TOPAZ_HOST(core)	\
+	(REG_START_TOPAZ_TOPAZ_HOST(core) + REG_SIZE_TOPAZ_TOPAZ_HOST)
+
+/* FIXME ALL OF THIS IS INCORRECT(start) */
+/* Topaz core registers MTX view */
+#define REG_OFFSET_TOPAZ_CORE_MTX	0x00010000	/* MUST confirm */
+#define REG_SIZE_TOPAZ_CORE_MTX		0x00010000	/* MUST confirm */
+
+#define REG_OFFSET_TOPAZ_MTX_MTX	0x00000000
+#define REG_OFFSET_TOPAZ_TOPAZ_MTX	0x00000800
+#define REG_OFFSET_TOPAZ_MVEA_MTX	0x00000C00
+#define REG_OFFSET_TOPAZ_MVEACMD_MTX	0x00001000
+#define REG_OFFSET_TOPAZ_VLC_MTX	0x00001400
+#define REG_OFFSET_TOPAZ_DEBLOCKER_MTX	0x00001800
+#define REG_OFFSET_TOPAZ_COMMS_MTX	0x00001C00
+#define REG_OFFSET_TOPAZ_ESB_MTX	0x00002000
+
+#define REG_SIZE_TOPAZ_MTX_MTX		0x00000800
+#define REG_SIZE_TOPAZ_TOPAZ_MTX	0x00000400
+#define REG_SIZE_TOPAZ_MVEA_MTX		0x00000400
+#define REG_SIZE_TOPAZ_MVEACMD_MTX	0x00000400
+#define REG_SIZE_TOPAZ_VLC_MTX		0x00000400
+#define REG_SIZE_TOPAZ_DEBLOCKER_MTX	0x00000400
+#define REG_SIZE_TOPAZ_COMMS_MTX	0x00000400
+#define REG_SIZE_TOPAZ_ESB_MTX		0x00002000
+
+
+/* Register bank addresses - MTX view */
+#define REG_START_TOPAZ_MULTICORE_MTX	\
+	(REG_BASE_MTX + REG_OFFSET_TOPAZ_MULTICORE)
+#define REG_END_TOPAZ_MULTICORE_MTX	\
+	(REG_START_TOPAZ_MULTICORE_MTX + REG_SIZE_TOPAZ_MULTICORE)
+
+#define REG_START_TOPAZ_DMAC_MTX	\
+	(REG_BASE_MTX + REG_OFFSET_TOPAZ_DMAC)
+#define REG_END_TOPAZ_DMAC_MTX		\
+	(REG_START_TOPAZ_DMAC_MTX + REG_SIZE_TOPAZ_DMAC)
+
+#define REG_START_TOPAZ_MTX_MTX(core)	\
+	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + \
+	REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MTX_MTX)
+#define REG_END_TOPAZ_MTX_MTX(core)	\
+	(REG_START_TOPAZ_MTX_MTX(core) + REG_SIZE_TOPAZ_MTX_MTX)
+
+#define REG_START_TOPAZ_TOPAZ_MTX(core)	\
+	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + \
+	REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_TOPAZ_MTX)
+#define REG_END_TOPAZ_TOPAZ_MTX(core) \
+	(REG_START_TOPAZ_TOPAZ_MTX(core) + REG_SIZE_TOPAZ_TOPAZ_MTX)
+
+#define REG_START_TOPAZ_MVEA_MTX(core) \
+	(REG_BASE_MTX + (REG_SIZE_TOPAZ_CORE_MTX*core) + \
+	REG_OFFSET_TOPAZ_CORE_MTX + REG_OFFSET_TOPAZ_MVEA_MTX)
+#define REG_END_TOPAZ_MVEA_MTX(core) \
+	(REG_START_TOPAZ_MVEA_MTX(core) + REG_SIZE_TOPAZ_MVEA_MTX)
+
+
+/*!
+******************************************************************************
+ DMAC configuration values:
+******************************************************************************/
+/*! Defined for DMAC as first updated by the SoC Group      */
+#define __DMAC_REV_002__
+/*! Defined to force the DMAC API to use MSVDX memory spaces*/
+#define __DMAC_MSVDX_MEMSPACE__
+/*! The maximum number of channels in the SoC               */
+#define DMAC_MAX_CHANNELS       (1)
+#define DMA_MAX_CHANNELS        (3)
+/*! The size of the DMAX HISR stack                         */
+#define DMAC_HISR_STACKSIZE     (DEFAULT_HISR_STACK_SIZE)
+/*! The priority of the DMAC HISR                           */
+#define DMAC_HISR_PRIORITY      (KRN_HIGHEST_PRIORITY)
+/*! The number of active DMAC channels supported by the API */
+#define DMAC_NUM_ACTIVE_CHANNELS (1)
+/*! The width of the DMA memory bus */
+#define DMAC_MEMORY_BUSWIDTH	(128)
+
+
+
+/* #define MMU_PAGE_SIZE	(0x1000) */
+#define TOPAZ_DEV_NAME		"TOPAZ"
+
+enum DMAC_ePW {
+	DMAC_PWIDTH_32_BIT = 0x0, /* !< Peripheral width 32-bit.*/
+	DMAC_PWIDTH_16_BIT = 0x1, /* !< Peripheral width 16-bit.*/
+	DMAC_PWIDTH_8_BIT  = 0x2, /* !< Peripheral width 8-bit.*/
+};
+
+/* Register CR_TOPAZHP_SRST */
+#define TOPAZHP_CR_TOPAZHP_SRST     0x0000
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0x00000001
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 0x00000002
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 1
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 0x00000004
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 2
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 0x00000008
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 3
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 0x00000010
+#define SHIFT_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 4
+#define REGNUM_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 0x00000020
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 5
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 0x00000040
+#define SHIFT_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 6
+#define REGNUM_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 0x00000080
+#define SHIFT_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 7
+#define REGNUM_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 0x00000100
+#define SHIFT_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 8
+#define REGNUM_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 0x0000
+#define SIGNED_TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET 0
+
+/* Register CR_TOPAZHP_AUTO_CLOCK_GATING */
+#define TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING 0x0024
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0x00000001
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 0x00000002
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 1
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 0x00000004
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 2
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 0x00000008
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 3
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 0x00000010
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 4
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 0x00000020
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 5
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 0x00000040
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 6
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 0x00000080
+#define SHIFT_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 7
+#define REGNUM_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 0x00000200
+#define SHIFT_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 9
+#define REGNUM_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 0x00000400
+#define SHIFT_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 10
+#define REGNUM_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 0x00000800
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 11
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 0x00001000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 12
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 0x00002000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 13
+#define REGNUM_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 0x0024
+#define SIGNED_TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE 0
+
+/* Register CR_TOPAZHP_MAN_CLOCK_GATING */
+#define TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING 0x0028
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0x00000001
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE0_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 0x00000002
+#define SHIFT_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 1
+#define REGNUM_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_IPE1_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 0x00000004
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 2
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE0_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 0x00000008
+#define SHIFT_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 3
+#define REGNUM_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_SPE1_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 0x00000010
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 4
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP4X4_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 0x00000020
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 5
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP8X8_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 0x00000040
+#define SHIFT_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 6
+#define REGNUM_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_H264COMP16X16_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 0x00000080
+#define SHIFT_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 7
+#define REGNUM_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_JMCOMP_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 0x00000100
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 8
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 0x00000200
+#define SHIFT_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 9
+#define REGNUM_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_VLC_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 0x00000400
+#define SHIFT_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 10
+#define REGNUM_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_DEB_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 0x00000800
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 11
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DM_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 0x00001000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 12
+#define REGNUM_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_PC_DMS_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 0x00002000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 13
+#define REGNUM_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_CABAC_MAN_CLK_GATE 0
+
+#define MASK_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 0x00004000
+#define SHIFT_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 14
+#define REGNUM_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 0x0028
+#define SIGNED_TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE 0
+
+#define INTEL_CHE_CHK_CFG 0x0050
+#define MASK_INTEL_CHE_CK_PRY 0x000000FF
+#define SHIFT_INTEL_CHE_CK_PRY 0
+#define REGNUM_INTEL_CHE_CK_PRY 0x0050
+#define SIGNED_INTEL_CHE_CK_PRY 0
+
+#define TOPAZHP_CR_SEQUENCER_CONFIG 0x0154
+
+#endif
diff --git a/drivers/external_drivers/intel_media/video/encode/tng_topazinit.c b/drivers/external_drivers/intel_media/video/encode/tng_topazinit.c
new file mode 100644
index 0000000..2ff4ddc
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/encode/tng_topazinit.c
@@ -0,0 +1,1820 @@
+/**
+ *
+ * file tng_topazinit.c
+ * TOPAZ initialization and mtx-firmware upload
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* NOTE: (READ BEFORE REFINE CODE)
+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
+ * measured by word to DMAC.
+ *
+ *
+ *
+ */
+
+/* include headers */
+
+/* #define DRM_DEBUG_CODE 2 */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "psb_drv.h"
+#include "tng_topaz.h"
+#include "psb_powermgmt.h"
+#include "pwr_mgmt.h"
+#include "tng_topaz_hw_reg.h"
+
+/* WARNING: this define is very important */
+#define RAM_SIZE	(1024 * 24)
+#define MTX_PC		(0x05)
+
+#define TOPAZHP_MMU_BASE	0x80400000
+
+#define	MEMORY_ONLY	0
+#define	MEM_AND_CACHE	1
+#define CACHE_ONLY	2
+
+extern int drm_psb_msvdx_tiling;
+
+/* When width or height is bigger than 1280. Encode will
+   treat TTM_PL_TT buffers as tilied memory */
+#define PSB_TOPAZ_TILING_THRESHOLD (1280)
+
+#ifdef MRFLD_B0_DEBUG
+/* #define TOPAZHP_ENCODE_FPGA */
+static int tng_init_error_dump_reg(struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val;
+	DRM_ERROR("MULTICORE Registers:\n");
+	MULTICORE_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_SRST %08x\n", reg_val);
+	MULTICORE_READ32(0x04, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_INT_STAT %08x\n", reg_val);
+	MULTICORE_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_MTX_INT_ENAB %08x\n", reg_val);
+	MULTICORE_READ32(0x0C, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_HOST_INT_ENAB %08x\n", reg_val);
+	MULTICORE_READ32(0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_INT_CLEAR %08x\n", reg_val);
+	MULTICORE_READ32(0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_MAN_CLK_GATE %08x\n", reg_val);
+	MULTICORE_READ32(0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_MTX_C_RATIO %08x\n", reg_val);
+	MULTICORE_READ32(0x1c, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_STATUS %08x\n", reg_val);
+	MULTICORE_READ32(0x1c, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_STATUS %08x\n", reg_val);
+	MULTICORE_READ32(0x20, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_MEM_REQ %08x\n", reg_val);
+	MULTICORE_READ32(0x24, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL0 %08x\n", reg_val);
+	MULTICORE_READ32(0x28, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL1 %08x\n", reg_val);
+	MULTICORE_READ32(0x2c , &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL2 %08x\n", reg_val);
+	MULTICORE_READ32(0x30, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_DIR_LIST_BASE %08x\n", reg_val);
+	MULTICORE_READ32(0x38, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_TILE %08x\n", reg_val);
+	MULTICORE_READ32(0x44, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_DEBUG_MSTR %08x\n", reg_val);
+	MULTICORE_READ32(0x48, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_DEBUG_SLV %08x\n", reg_val);
+	MULTICORE_READ32(0x50, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CORE_SEL_0 %08x\n", reg_val);
+	MULTICORE_READ32(0x54, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CORE_SEL_1 %08x\n", reg_val);
+	MULTICORE_READ32(0x58, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_HW_CFG %08x\n", reg_val);
+	MULTICORE_READ32(0x60, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CMD_FIFO_WRITE %08x\n", reg_val);
+	MULTICORE_READ32(0x64, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CMD_FIFO_WRITE_SPACE %08x\n", reg_val);
+	MULTICORE_READ32(0x70, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_READ %08x\n", reg_val);
+	MULTICORE_READ32(0x74, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_READ_AVAILABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x78, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_FLUSH %08x\n", reg_val);
+	MULTICORE_READ32(0x80, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_TILE_EXT %08x\n", reg_val);
+	MULTICORE_READ32(0x100, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_1 %08x\n", reg_val);
+	MULTICORE_READ32(0x104, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_2 %08x\n", reg_val);
+	MULTICORE_READ32(0x108, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_3 %08x\n", reg_val);
+	MULTICORE_READ32(0x110, &reg_val);
+	PSB_DEBUG_TOPAZ("CYCLE_COUNTER %08x\n", reg_val);
+	MULTICORE_READ32(0x114, &reg_val);
+	PSB_DEBUG_TOPAZ("CYCLE_COUNTER_CTRL %08x\n", reg_val);
+	MULTICORE_READ32(0x118, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_IDLE_PWR_MAN %08x\n", reg_val);
+	MULTICORE_READ32(0x124, &reg_val);
+	PSB_DEBUG_TOPAZ("DIRECT_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x128, &reg_val);
+	PSB_DEBUG_TOPAZ("INTRA_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x12c, &reg_val);
+	PSB_DEBUG_TOPAZ("INTER_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x130, &reg_val);
+	PSB_DEBUG_TOPAZ("INTRA_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x134, &reg_val);
+	PSB_DEBUG_TOPAZ("QPCB_QPCR_OFFSET %08x\n", reg_val);
+	MULTICORE_READ32(0x140, &reg_val);
+	PSB_DEBUG_TOPAZ("INTER_INTRA_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x144, &reg_val);
+	PSB_DEBUG_TOPAZ("SKIPPED_CODED_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x148, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_ALPHA_COEFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x14c, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_GAMMA_COEFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x150, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_CUTOFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x154, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_ALPHA_COEFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x158, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_GAMMA_COEFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x15c, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_CUTOFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x300, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_4 %08x\n", reg_val);
+	MULTICORE_READ32(0x304, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_5 %08x\n", reg_val);
+	MULTICORE_READ32(0x308, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_6 %08x\n", reg_val);
+	MULTICORE_READ32(0x30c, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_7 %08x\n", reg_val);
+	MULTICORE_READ32(0x3b0, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_RSVD0 %08x\n", reg_val);
+
+	DRM_ERROR("TopazHP Core Registers:\n");
+	TOPAZCORE_READ32(0, 0x0, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_SRST %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x4, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INTSTAT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x8, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MTX_INTENAB %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0xc, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_HOST_INTENAB %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INTCLEAR %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INT_COMB_SEL %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_BUSY %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x24, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_AUTO_CLOCK_GATING %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x28, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MAN_CLOCK_GATING %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x30, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RTM %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x34, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RTM_VALUE %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x38, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MB_PERFORMANCE_RESULT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3c, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MB_PERFORMANCE_MB_NUMBER %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x188, &reg_val);
+	PSB_DEBUG_TOPAZ("FIELD_PARITY %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3d0, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_CONTROL %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3d4, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_COEFFS %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3e0, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_INV_WEIGHT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3f0, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RSVD0 %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3f4, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_CRC_CLEAR %08x\n", reg_val);
+
+
+	DRM_ERROR("MTX Registers:\n");
+	MTX_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_ENABLE %08x\n", reg_val);
+	MTX_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_STATUS %08x\n", reg_val);
+	MTX_READ32(0x80, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_KICK %08x\n", reg_val);
+	MTX_READ32(0x88, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_KICKI %08x\n", reg_val);
+	MTX_READ32(0x90, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_FAULT0 %08x\n", reg_val);
+	MTX_READ32(0xf8, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_REGISTER_READ_WRITE_DATA %08x\n", reg_val);
+	MTX_READ32(0xfc, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_REGISTER_READ_WRITE_REQUEST %08x\n", reg_val);
+	MTX_READ32(0x100, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_DATA_EXCHANGE %08x\n", reg_val);
+	MTX_READ32(0x104, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_DATA_TRANSFER %08x\n", reg_val);
+	MTX_READ32(0x108, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_CONTROL %08x\n", reg_val);
+	MTX_READ32(0x10c, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_STATUS %08x\n", reg_val);
+	MTX_READ32(0x200, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SOFT_RESET %08x\n", reg_val);
+	MTX_READ32(0x340, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAC %08x\n", reg_val);
+	MTX_READ32(0x344, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAA %08x\n", reg_val);
+	MTX_READ32(0x348, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAS0 %08x\n", reg_val);
+	MTX_READ32(0x34c, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAS1 %08x\n", reg_val);
+	MTX_READ32(0x350, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAT %08x\n", reg_val);
+
+	DRM_ERROR("DMA Registers:\n");
+	DMAC_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Setup_n %08x\n", reg_val);
+	DMAC_READ32(0x04, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Count_n %08x\n", reg_val);
+	DMAC_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ(" DMA_Peripheral_param_n %08x\n", reg_val);
+	DMAC_READ32(0x0C, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_IRQ_Stat_n %08x\n", reg_val);
+	DMAC_READ32(0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_2D_Mode_n %08x\n", reg_val);
+	DMAC_READ32(0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Peripheral_addr_n %08x\n", reg_val);
+	DMAC_READ32(0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Per_hold %08x\n", reg_val);
+	return 0;
+}
+#endif
+
+/* static function define */
+static int32_t mtx_dmac_transfer(struct drm_psb_private *dev_priv,
+			      uint32_t channel, uint32_t src_phy_addr,
+			      uint32_t offset, uint32_t mtx_addr,
+			      uint32_t byte_num, uint32_t is_write);
+
+static int32_t get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
+
+static void release_mtx_control_from_dash(struct drm_psb_private *dev_priv);
+
+static void tng_topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
+
+#ifdef MRFLD_B0_DEBUG
+static int tng_error_dump_reg(struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val;
+	DRM_ERROR("MULTICORE Registers:\n");
+	MULTICORE_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_SRST %08x\n", reg_val);
+	MULTICORE_READ32(0x04, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_INT_STAT %08x\n", reg_val);
+	MULTICORE_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_MTX_INT_ENAB %08x\n", reg_val);
+	MULTICORE_READ32(0x0C, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_HOST_INT_ENAB %08x\n", reg_val);
+	MULTICORE_READ32(0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_INT_CLEAR %08x\n", reg_val);
+	MULTICORE_READ32(0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_MAN_CLK_GATE %08x\n", reg_val);
+	MULTICORE_READ32(0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_MTX_C_RATIO %08x\n", reg_val);
+	MULTICORE_READ32(0x1c, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_STATUS %08x\n", reg_val);
+	MULTICORE_READ32(0x1c, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_STATUS %08x\n", reg_val);
+	MULTICORE_READ32(0x20, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_MEM_REQ %08x\n", reg_val);
+	MULTICORE_READ32(0x24, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL0 %08x\n", reg_val);
+	MULTICORE_READ32(0x28, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL1 %08x\n", reg_val);
+	MULTICORE_READ32(0x2c , &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_CONTROL2 %08x\n", reg_val);
+	MULTICORE_READ32(0x30, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_DIR_LIST_BASE %08x\n", reg_val);
+	MULTICORE_READ32(0x38, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_TILE %08x\n", reg_val);
+	MULTICORE_READ32(0x44, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_DEBUG_MSTR %08x\n", reg_val);
+	MULTICORE_READ32(0x48, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_DEBUG_SLV %08x\n", reg_val);
+	MULTICORE_READ32(0x50, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CORE_SEL_0 %08x\n", reg_val);
+	MULTICORE_READ32(0x54, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CORE_SEL_1 %08x\n", reg_val);
+	MULTICORE_READ32(0x58, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_HW_CFG %08x\n", reg_val);
+	MULTICORE_READ32(0x60, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CMD_FIFO_WRITE %08x\n", reg_val);
+	MULTICORE_READ32(0x64, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_CMD_FIFO_WRITE_SPACE %08x\n", reg_val);
+	MULTICORE_READ32(0x70, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_READ %08x\n", reg_val);
+	MULTICORE_READ32(0x74, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_READ_AVAILABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x78, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ_CMD_FIFO_FLUSH %08x\n", reg_val);
+	MULTICORE_READ32(0x80, &reg_val);
+	PSB_DEBUG_TOPAZ("MMU_TILE_EXT %08x\n", reg_val);
+	MULTICORE_READ32(0x100, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_1 %08x\n", reg_val);
+	MULTICORE_READ32(0x104, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_2 %08x\n", reg_val);
+	MULTICORE_READ32(0x108, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_3 %08x\n", reg_val);
+	MULTICORE_READ32(0x110, &reg_val);
+	PSB_DEBUG_TOPAZ("CYCLE_COUNTER %08x\n", reg_val);
+	MULTICORE_READ32(0x114, &reg_val);
+	PSB_DEBUG_TOPAZ("CYCLE_COUNTER_CTRL %08x\n", reg_val);
+	MULTICORE_READ32(0x118, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_IDLE_PWR_MAN %08x\n", reg_val);
+	MULTICORE_READ32(0x124, &reg_val);
+	PSB_DEBUG_TOPAZ("DIRECT_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x128, &reg_val);
+	PSB_DEBUG_TOPAZ("INTRA_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x12c, &reg_val);
+	PSB_DEBUG_TOPAZ("INTER_BIAS_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x130, &reg_val);
+	PSB_DEBUG_TOPAZ("INTRA_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x134, &reg_val);
+	PSB_DEBUG_TOPAZ("QPCB_QPCR_OFFSET %08x\n", reg_val);
+	MULTICORE_READ32(0x140, &reg_val);
+	PSB_DEBUG_TOPAZ("INTER_INTRA_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x144, &reg_val);
+	PSB_DEBUG_TOPAZ("SKIPPED_CODED_SCALE_TABLE %08x\n", reg_val);
+	MULTICORE_READ32(0x148, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_ALPHA_COEFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x14c, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_GAMMA_COEFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x150, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_CUTOFF_CORE0 %08x\n", reg_val);
+	MULTICORE_READ32(0x154, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_ALPHA_COEFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x158, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_GAMMA_COEFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x15c, &reg_val);
+	PSB_DEBUG_TOPAZ("POLYNOM_CUTOFF_CORE1 %08x\n", reg_val);
+	MULTICORE_READ32(0x300, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_4 %08x\n", reg_val);
+	MULTICORE_READ32(0x304, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_5 %08x\n", reg_val);
+	MULTICORE_READ32(0x308, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_6 %08x\n", reg_val);
+	MULTICORE_READ32(0x30c, &reg_val);
+	PSB_DEBUG_TOPAZ("FIRMWARE_REG_7 %08x\n", reg_val);
+	MULTICORE_READ32(0x3b0, &reg_val);
+	PSB_DEBUG_TOPAZ("MULTICORE_RSVD0 %08x\n", reg_val);
+
+	DRM_ERROR("TopazHP Core Registers:\n");
+	TOPAZCORE_READ32(0, 0x0, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_SRST %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x4, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INTSTAT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x8, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MTX_INTENAB %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0xc, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_HOST_INTENAB %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INTCLEAR %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_INT_COMB_SEL %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_BUSY %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x24, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_AUTO_CLOCK_GATING %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x28, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MAN_CLOCK_GATING %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x30, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RTM %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x34, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RTM_VALUE %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x38, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MB_PERFORMANCE_RESULT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3c, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_MB_PERFORMANCE_MB_NUMBER %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x188, &reg_val);
+	PSB_DEBUG_TOPAZ("FIELD_PARITY %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3d0, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_CONTROL %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3d4, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_COEFFS %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3e0, &reg_val);
+	PSB_DEBUG_TOPAZ("WEIGHTED_PRED_INV_WEIGHT %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3f0, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_RSVD0 %08x\n", reg_val);
+	TOPAZCORE_READ32(0, 0x3f4, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZHP_CRC_CLEAR %08x\n", reg_val);
+
+
+	DRM_ERROR("MTX Registers:\n");
+	MTX_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_ENABLE %08x\n", reg_val);
+	MTX_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_STATUS %08x\n", reg_val);
+	MTX_READ32(0x80, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_KICK %08x\n", reg_val);
+	MTX_READ32(0x88, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_KICKI %08x\n", reg_val);
+	MTX_READ32(0x90, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_FAULT0 %08x\n", reg_val);
+	MTX_READ32(0xf8, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_REGISTER_READ_WRITE_DATA %08x\n", reg_val);
+	MTX_READ32(0xfc, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_REGISTER_READ_WRITE_REQUEST %08x\n", reg_val);
+	MTX_READ32(0x100, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_DATA_EXCHANGE %08x\n", reg_val);
+	MTX_READ32(0x104, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_DATA_TRANSFER %08x\n", reg_val);
+	MTX_READ32(0x108, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_CONTROL %08x\n", reg_val);
+	MTX_READ32(0x10c, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_RAM_ACCESS_STATUS %08x\n", reg_val);
+	MTX_READ32(0x200, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SOFT_RESET %08x\n", reg_val);
+	MTX_READ32(0x340, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAC %08x\n", reg_val);
+	MTX_READ32(0x344, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAA %08x\n", reg_val);
+	MTX_READ32(0x348, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAS0 %08x\n", reg_val);
+	MTX_READ32(0x34c, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAS1 %08x\n", reg_val);
+	MTX_READ32(0x350, &reg_val);
+	PSB_DEBUG_TOPAZ("MTX_SYSC_CDMAT %08x\n", reg_val);
+
+	DRM_ERROR("DMA Registers:\n");
+	DMAC_READ32(0x00, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Setup_n %08x\n", reg_val);
+	DMAC_READ32(0x04, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Count_n %08x\n", reg_val);
+	DMAC_READ32(0x08, &reg_val);
+	PSB_DEBUG_TOPAZ(" DMA_Peripheral_param_n %08x\n", reg_val);
+	DMAC_READ32(0x0C, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_IRQ_Stat_n %08x\n", reg_val);
+	DMAC_READ32(0x10, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_2D_Mode_n %08x\n", reg_val);
+	DMAC_READ32(0x14, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Peripheral_addr_n %08x\n", reg_val);
+	DMAC_READ32(0x18, &reg_val);
+	PSB_DEBUG_TOPAZ("DMA_Per_hold %08x\n", reg_val);
+	return 0;
+}
+#endif
+
+int equal_func(uint32_t reqval, uint32_t regval, uint32_t mask)
+{
+	return ((reqval & mask) == (regval & mask));
+}
+
+int notequal_func(uint32_t reqval, uint32_t regval, uint32_t mask)
+{
+	return ((reqval & mask) != (regval & mask));
+}
+
+int tng_topaz_wait_for_register(
+	struct drm_psb_private *dev_priv,
+	uint32_t check_func,
+	uint32_t addr,
+	uint32_t value,
+	uint32_t mask)
+{
+#ifdef KPDUMP
+	printk(KERN_ERR "POL :REG_TOPAZHP_MULTICORE:0x%x 0x%x 0x%x\n",
+		addr, value, mask);
+#endif
+	uint32_t tmp;
+	uint32_t count = 10000;
+	int (*func)(uint32_t reqval, uint32_t regval, uint32_t mask);
+
+	switch (check_func) {
+	case CHECKFUNC_ISEQUAL:
+		func = &equal_func;
+		break;
+	case CHECKFUNC_NOTEQUAL:
+		func = &notequal_func;
+		break;
+	default:
+		func = &equal_func;
+		break;
+	}
+
+	/* # poll topaz register for certain times */
+	while (count) {
+		MM_READ32(addr, 0, &tmp);
+
+		if (func(value, tmp, mask))
+			return 0;
+
+		/* FIXME: use cpu_relax instead */
+		PSB_UDELAY(1000);/* derive from reference driver */
+		--count;
+	}
+
+	/* # now waiting is timeout, return 1 indicat failed */
+	/* XXX: testsuit means a timeout 10000 */
+
+	if (check_func == CHECKFUNC_ISEQUAL) {
+		DRM_ERROR("TOPAZ: Time out to poll addr(0x%x) " \
+			"expected value(0x%08x), " \
+			"actual 0x%08x = (0x%08x & 0x%08x)\n",
+			addr, value & mask, tmp & mask, tmp, mask);
+	} else if (check_func == CHECKFUNC_NOTEQUAL) {
+		DRM_ERROR("ERROR time out to poll addr(0x%x) expected " \
+			"0x%08x != (0x%08x & 0x%08x)\n",
+			addr, value & mask, tmp, mask);
+	}
+
+	return -EBUSY;
+
+}
+
+static ssize_t psb_topaz_pmstate_show(
+	struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_private *topaz_priv;
+	unsigned int pmstate;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (drm_dev == NULL)
+		return 0;
+
+	dev_priv = drm_dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+	pmstate = topaz_priv->pmstate;
+
+	pmstate = topaz_priv->pmstate;
+	spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
+	ret = snprintf(buf, 64, "%s\n",
+		       (pmstate == PSB_PMSTATE_POWERUP) ?
+		       "powerup" : "powerdown");
+	spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
+
+	return ret;
+}
+
+static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
+
+#ifdef TOPAZHP_ENCODE_FPGA
+#define DEFAULT_TILE_STRIDE 0
+static void tng_topaz_mmu_configure(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_TILE(0),
+		F_ENCODE(1, TOPAZHP_TOP_CR_TILE_ENABLE) |
+		F_ENCODE(DEFAULT_TILE_STRIDE, TOPAZHP_TOP_CR_TILE_STRIDE) |
+			F_ENCODE((PSB_MEM_VRAM_START + 0x800000 +
+			pci_resource_len(dev->pdev, 2) - 0x800000) >> 20,
+		TOPAZHP_TOP_CR_TILE_MAX_ADDR) |
+			F_ENCODE((PSB_MEM_VRAM_START + 0x800000) >> 20,
+		TOPAZHP_TOP_CR_TILE_MIN_ADDR));
+}
+#endif
+
+void tng_topaz_mmu_enable_tiling(
+	struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val;
+	uint32_t min_addr = dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].gpu_offset;
+	uint32_t max_addr = dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].gpu_offset +
+			(dev_priv->bdev.man[DRM_PSB_MEM_MMU_TILING].size<<PAGE_SHIFT);
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+
+	if ((topaz_priv->frame_w>PSB_TOPAZ_TILING_THRESHOLD) ||
+		(topaz_priv->frame_h>PSB_TOPAZ_TILING_THRESHOLD))
+		min_addr = dev_priv->bdev.man[TTM_PL_TT].gpu_offset;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Enable tiled memory from %08x ~ %08x\n",
+			min_addr, max_addr);
+
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_TILE_ENABLE) | /* Enable tiling */
+		F_ENCODE(2, TOPAZHP_TOP_CR_TILE_STRIDE) | /* Set stride to 2048 as tiling is only used for 1080p */
+		F_ENCODE((max_addr>>20), TOPAZHP_TOP_CR_TILE_MAX_ADDR) | /* Set max address */
+		F_ENCODE((min_addr>>20), TOPAZHP_TOP_CR_TILE_MIN_ADDR); /* Set min address */
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_TILE_0, reg_val);
+
+}
+
+static int tng_topaz_query_queue(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int32_t ret = -1;
+
+	mutex_lock(&topaz_priv->topaz_mutex);
+	if (list_empty(&topaz_priv->topaz_queue)) {
+		/* topaz_priv->topaz_busy = 0; */
+		PSB_DEBUG_TOPAZ("TOPAZ: empty command queue\n");
+		ret = 0;
+	}
+	mutex_unlock(&topaz_priv->topaz_mutex);
+	return ret;
+}
+
+void tng_powerdown_topaz(struct work_struct *work)
+{
+	struct tng_topaz_private *topaz_priv =
+		container_of(to_delayed_work(work), struct tng_topaz_private,
+			     topaz_suspend_work);
+	struct drm_device *dev = (struct drm_device *)topaz_priv->dev;
+	int32_t ret = 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Task start\n");
+	if (Is_Secure_Fw()) {
+		ret = tng_topaz_query_queue(dev);
+		if (ret == 0) {
+			ret = tng_topaz_power_off(dev);
+			if (ret) {
+				DRM_ERROR("TOPAZ: Failed to power off");
+				return;
+			}
+		}
+		tng_topaz_dequeue_send(dev);
+	} else {
+		tng_topaz_dequeue_send(dev);
+		ret = tng_topaz_power_off(dev);
+		if (ret) {
+			DRM_ERROR("TOPAZ: Failed to power off");
+			return;
+		}
+	}
+
+	if (drm_topaz_cmdpolicy != PSB_CMDPOLICY_PARALLEL) {
+		atomic_set(&topaz_priv->cmd_wq_free, 1);
+		wake_up_interruptible(&topaz_priv->cmd_wq);
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Task finish\n");
+	return;
+}
+
+/* this function finish the first part of initialization, the rest
+ * should be done in pnw_topaz_setup_fw
+ */
+int tng_topaz_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	int ret = 0, n;
+	bool is_iomem;
+	struct tng_topaz_private *topaz_priv;
+	void *topaz_bo_virt;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: init topazsc data structures\n");
+	topaz_priv = kmalloc(sizeof(struct tng_topaz_private), GFP_KERNEL);
+	if (topaz_priv == NULL)
+		return -1;
+	dev_priv->topaz_private = topaz_priv;
+	memset(topaz_priv, 0, sizeof(struct tng_topaz_private));
+
+	/* get device --> drm_device --> drm_psb_private --> topaz_priv
+	* for psb_topaz_pmstate_show: topaz_pmpolicy
+	* if not pci_set_drvdata, can't get drm_device from device
+	*/
+	pci_set_drvdata(dev->pdev, dev);
+
+	if (device_create_file(&dev->pdev->dev, &dev_attr_topaz_pmstate))
+		DRM_ERROR("TOPAZ: could not create sysfs file\n");
+	topaz_priv->sysfs_pmstate = sysfs_get_dirent(dev->pdev->dev.kobj.sd,
+						     NULL,
+						     "topaz_pmstate");
+
+	topaz_priv = dev_priv->topaz_private;
+	topaz_priv->dev = dev;
+	INIT_DELAYED_WORK(&topaz_priv->topaz_suspend_work,
+		&tng_powerdown_topaz);
+
+	INIT_LIST_HEAD(&topaz_priv->topaz_queue);
+	mutex_init(&topaz_priv->topaz_mutex);
+	spin_lock_init(&topaz_priv->topaz_lock);
+	spin_lock_init(&topaz_priv->ctx_spinlock);
+
+	topaz_priv->topaz_busy = 0;
+	topaz_priv->topaz_fw_loaded = 0;
+	topaz_priv->cur_codec = 0;
+	topaz_priv->topaz_hw_busy = 1;
+	topaz_priv->power_down_by_release = 0;
+	atomic_set(&topaz_priv->cmd_wq_free, 1);
+	atomic_set(&topaz_priv->vec_ref_count, 0);
+	init_waitqueue_head(&topaz_priv->cmd_wq);
+
+	topaz_priv->saved_queue = kzalloc(\
+			sizeof(struct tng_topaz_cmd_queue), \
+			GFP_KERNEL);
+	if (!topaz_priv->saved_queue) {
+		DRM_ERROR("TOPAZ: Failed to alloc memory for saved queue\n");
+		return -1;
+	}
+
+	/* Allocate a large enough buffer to save command */
+	topaz_priv->saved_cmd = kzalloc(MAX_CMD_SIZE, GFP_KERNEL);
+	if (!topaz_priv->saved_cmd) {
+		DRM_ERROR("TOPAZ: Failed to alloc memory for saved cmd\n");
+		return -1;
+	}
+
+	/* # gain write back structure,we may only need 32+4=40DW */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(bdev, 4096, ttm_bo_type_kernel,
+		DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+		0, 0, 0, NULL, &(topaz_priv->topaz_bo));
+#else
+	ret = ttm_buffer_object_create(bdev, 4096, ttm_bo_type_kernel,
+		DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+		0, 0, NULL, &(topaz_priv->topaz_bo));
+#endif
+	if (ret || (NULL==topaz_priv->topaz_bo)) {
+		DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
+		if (topaz_priv->topaz_bo)
+		{
+			ttm_bo_unref(&topaz_priv->topaz_bo);
+		}
+		return ret;
+	}
+
+	ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
+			  topaz_priv->topaz_bo->num_pages,
+			  &topaz_priv->topaz_bo_kmap);
+	if (ret) {
+		DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
+		ttm_bo_unref(&topaz_priv->topaz_bo);
+		return ret;
+	}
+
+	topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
+					     &is_iomem);
+
+	topaz_priv->topaz_sync_addr = (uint32_t *)topaz_bo_virt;
+	*topaz_priv->topaz_sync_addr = ~0;
+
+	topaz_priv->cur_context = NULL;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Create fiwmware text/data storage");
+	/* create firmware storage */
+	for (n = 0; n < IMG_CODEC_NUM; ++n) {
+		/* FIXME: 12*4096 wast mem? */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		ret = ttm_buffer_object_create(bdev, 12 * 4096,
+			ttm_bo_type_kernel,
+			DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+			0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
+#else
+		ret = ttm_buffer_object_create(bdev, 12 * 4096,
+			ttm_bo_type_kernel,
+			DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+			0, 0, NULL, &topaz_priv->topaz_fw[n].text);
+#endif
+
+		if (ret) {
+			DRM_ERROR("Failed to allocate memory " \
+				"for firmware text section.\n");
+			goto out;
+		}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+		ret = ttm_buffer_object_create(bdev, 12 * 4096,
+			ttm_bo_type_kernel,
+			DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+			0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
+#else
+		ret = ttm_buffer_object_create(bdev, 12 * 4096,
+			ttm_bo_type_kernel,
+			DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
+			0, 0, NULL, &topaz_priv->topaz_fw[n].data);
+#endif
+		if (ret) {
+			DRM_ERROR("Failed to allocate memory " \
+				"for firmware data section.\n");
+			goto out;
+		}
+	}
+
+	for (n = 0; n < MAX_TOPAZHP_CORES; n++)
+		topaz_priv->topaz_mtx_reg_state[n] = NULL;
+
+	topaz_priv->topaz_mtx_reg_state[0] = kzalloc(2 * PAGE_SIZE, GFP_KERNEL);
+	if (topaz_priv->topaz_mtx_reg_state[0] == NULL) {
+		DRM_ERROR("Failed to kzalloc mtx reg, OOM\n");
+		ret = -1;
+		goto out;
+	}
+
+	return ret;
+out:
+	for (n = 0; n < IMG_CODEC_NUM; ++n) {
+		if (topaz_priv->topaz_fw[n].text)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
+		if (topaz_priv->topaz_fw[n].data)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
+	}
+
+	ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
+	ttm_bo_unref(&topaz_priv->topaz_bo);
+
+	kfree(topaz_priv);
+	dev_priv->topaz_private = NULL;
+
+	return ret;
+}
+
+int tng_topaz_reset(struct drm_psb_private *dev_priv)
+{
+	struct tng_topaz_private *topaz_priv;
+	uint32_t i;
+	uint32_t reg_val;
+
+	topaz_priv = dev_priv->topaz_private;
+	/* topaz_priv->topaz_busy = 0; */
+
+	topaz_priv->topaz_needs_reset = 0;
+
+	for (i = 0; i < topaz_priv->topaz_num_pipes; i++) {
+		/* # reset topaz */
+		PSB_DEBUG_TOPAZ("TOPAZ: reset pipe %d", i);
+		reg_val = F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET) ;
+
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_SRST, reg_val);
+		TOPAZCORE_READ32(i, TOPAZHP_CR_TOPAZHP_SRST, &reg_val);
+
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_SRST, 0);
+		TOPAZCORE_READ32(i, TOPAZHP_CR_TOPAZHP_SRST, &reg_val);
+	}
+
+	tng_topaz_mmu_hwsetup(dev_priv);
+
+	return 0;
+}
+
+/* FIXME: When D0i3 enabled,
+flush mmu and reset may have issue because hw power down */
+int tng_topaz_uninit(struct drm_device *dev)
+{
+	int n;
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_private *topaz_priv;
+	dev_priv = dev->dev_private;
+
+	if (NULL == dev_priv) {
+		PSB_DEBUG_TOPAZ("TOPAZ: dev_priv is NULL!\n");
+		return -1;
+	}
+
+	topaz_priv = dev_priv->topaz_private;
+
+	if (NULL == topaz_priv) {
+		PSB_DEBUG_TOPAZ("TOPAZ: topaz_priv is NULL!\n");
+		return -1;
+	}
+
+	tng_topaz_reset(dev_priv);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Release fiwmware text/data storage");
+
+	for (n = 0; n < IMG_CODEC_NUM; ++n) {
+		if (topaz_priv->topaz_fw[n].text != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
+
+		if (topaz_priv->topaz_fw[n].data != NULL)
+			ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
+	}
+
+	if (topaz_priv->topaz_bo) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Release fiwmware topaz_bo storage");
+		ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
+		ttm_bo_unref(&topaz_priv->topaz_bo);
+	}
+
+	if (topaz_priv) {
+		for(n = 0; n < MAX_TOPAZHP_CORES; n++) {
+			if (topaz_priv->topaz_mtx_reg_state[n] != NULL) {
+				PSB_DEBUG_TOPAZ("TOPAZ: Free mtx reg\n");
+				kfree(topaz_priv->topaz_mtx_reg_state[n]);
+				topaz_priv->topaz_mtx_reg_state[n] = NULL;
+			}
+		}
+
+		pci_set_drvdata(dev->pdev, NULL);
+		device_remove_file(&dev->pdev->dev,
+				   &dev_attr_topaz_pmstate);
+		sysfs_put(topaz_priv->sysfs_pmstate);
+		topaz_priv->sysfs_pmstate = NULL;
+
+		kfree(topaz_priv);
+		dev_priv->topaz_private = NULL;
+	}
+
+	return 0;
+}
+
+
+static void tng_set_auto_clk_gating(
+	struct drm_device *dev,
+	enum drm_tng_topaz_codec codec,
+	uint32_t gating)
+{
+	uint32_t reg_val;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	PSB_DEBUG_TOPAZ("TOPAZ: Setting automatic clock gating on ");
+	PSB_DEBUG_TOPAZ("codec %d to %d\n", codec, gating);
+
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_WRITES_CORE_ALL);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0,
+			  reg_val);
+
+	/* enable auto clock is essential for this driver */
+	reg_val = F_ENCODE(1, TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE)|
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE)|
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE)|
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE);
+
+	TOPAZCORE_WRITE32(0, TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING, reg_val);
+
+	if (codec != IMG_CODEC_JPEG) {
+		reg_val = 0;
+		TOPAZCORE_READ32(0, TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING,
+			&reg_val);
+
+		/* Disable LRITC clocks */
+		reg_val = F_INSERT(reg_val, 1,
+			TOPAZHP_CR_TOPAZHP_LRITC_MAN_CLK_GATE);
+
+#if  (defined(ENABLE_PREFETCH_GATING))
+		/* Disable PREFETCH clocks */
+		reg_val = F_INSERT(reg_val , 1,
+			TOPAZHP_CR_TOPAZHP_PREFETCH_MAN_CLK_GATE);
+#endif
+		TOPAZCORE_WRITE32(0,
+			TOPAZHP_CR_TOPAZHP_MAN_CLOCK_GATING, reg_val);
+	}
+
+	reg_val = F_ENCODE(0, TOPAZHP_TOP_CR_WRITES_CORE_ALL);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, reg_val);
+}
+
+static void tng_get_bank_size(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec)
+{
+	uint32_t last_bank_ram_size;
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_private *topaz_priv;
+
+	dev_priv = dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MTX_DEBUG_MSTR,
+			 &video_ctx->mtx_debug_val);
+
+	last_bank_ram_size = 0x1 << (F_EXTRACT(video_ctx->mtx_debug_val,
+			TOPAZHP_TOP_CR_MTX_MSTR_LAST_RAM_BANK_SIZE) + 2);
+
+	video_ctx->mtx_bank_size = 0x1 << (F_EXTRACT(video_ctx->mtx_debug_val,
+			TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANK_SIZE) + 2);
+
+	video_ctx->mtx_ram_size = last_bank_ram_size +
+			(video_ctx->mtx_bank_size *
+			(F_EXTRACT(video_ctx->mtx_debug_val,
+			TOPAZHP_TOP_CR_MTX_MSTR_RAM_BANKS) - 1));
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Last bank ram size: %08x\n",
+		last_bank_ram_size);
+	PSB_DEBUG_TOPAZ("TOPAZ: mtx bank size: %08x, mtx ram size: %08x\n",
+		video_ctx->mtx_bank_size, video_ctx->mtx_ram_size);
+}
+
+/* setup fw when start a new context */
+int tng_topaz_init_board(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec)
+{
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_private *topaz_priv;
+	int32_t i;
+	uint32_t reg_val = 0;
+
+	dev_priv = dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+
+	/*psb_irq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);*/
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Init board\n");
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HW_CFG, &reg_val);
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MULTICORE_HW_CFG,
+		&topaz_priv->topaz_num_pipes);
+
+	topaz_priv->topaz_num_pipes =
+		F_EXTRACT(topaz_priv->topaz_num_pipes,
+			TOPAZHP_TOP_CR_NUM_CORES_SUPPORTED);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Number of pipes: %d\n", \
+			topaz_priv->topaz_num_pipes);
+
+	if (topaz_priv->topaz_num_pipes > TOPAZHP_PIPE_NUM) {
+		DRM_ERROR("TOPAZ: Number of pipes: %d\n",
+			topaz_priv->topaz_num_pipes);
+		topaz_priv->topaz_num_pipes = TOPAZHP_PIPE_NUM;
+	}
+
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
+		F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET) |
+		F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_IO_SOFT_RESET);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, reg_val);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, 0x0);
+
+	for (i = 0; i < topaz_priv->topaz_num_pipes; i++) {
+		PSB_DEBUG_TOPAZ("TOPAZ: Reset topaz registers for pipe %d",
+			i);
+		reg_val = F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_JMCOMP_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PREFETCH_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_VLC_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_DB_SOFT_RESET) |
+			F_ENCODE(1, TOPAZHP_CR_TOPAZHP_LTRITC_SOFT_RESET);
+
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_SRST, reg_val);
+
+		TOPAZCORE_WRITE32(i, TOPAZHP_CR_TOPAZHP_SRST, 0);
+	}
+
+	tng_topaz_mmu_hwsetup(dev_priv);
+
+	tng_set_producer(dev, 0);
+	tng_set_consumer(dev, 0);
+	return 0;
+}
+
+int tng_topaz_setup_fw(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec)
+{
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_private *topaz_priv;
+	int32_t ret = 0;
+	uint32_t reg_val = 0;
+
+	dev_priv = dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+
+	/*psb_irq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);*/
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Setup firmware\n");
+
+	tng_get_bank_size(dev, video_ctx, codec);
+
+	/* Soft reset of MTX */
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
+		  F_ENCODE(1, TOPAZHP_TOP_CR_IMG_TOPAZ_CORE_SOFT_RESET);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, reg_val);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_SRST, 0x0);
+
+	/*
+	 * clear TOHOST register now so that our ISR doesn't see any
+	 * intermediate value before the FW has output anything
+	*/
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+			 (MTX_SCRATCHREG_TOHOST << 2), 0);
+	/*
+	 * clear BOOTSTATUS register.  Firmware will write to this
+	 * to indicate firmware boot progress
+	*/
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_FIRMWARE_REG_1 +
+			 (MTX_SCRATCHREG_BOOTSTATUS << 2), 0);
+
+	tng_set_auto_clk_gating(dev, codec, 1);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: will upload firmware to %d pipes\n",
+			  topaz_priv->topaz_num_pipes);
+
+	ret = mtx_upload_fw(dev, codec, video_ctx);
+	if (ret) {
+		DRM_ERROR("Failed to upload firmware for codec %s\n",
+				codec_to_string(codec));
+		/*tng_error_dump_reg(dev_priv);*/
+		return ret;
+	}
+
+	/* flush the command FIFO - only has effect on master MTX */
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_CMD_FIFO_FLUSH);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_FLUSH, reg_val);
+
+	/*
+	 * we do not want to run in secre FW mode so write a place
+	 * holder to the FIFO that the firmware will know to ignore
+	*/
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+			  TOPAZHP_NON_SECURE_FW_MARKER);
+
+	/* Clear FW_IDLE_STATUS register */
+	MULTICORE_WRITE32(MTX_SCRATCHREG_IDLE, 0);
+
+	/* turn on MTX */
+	mtx_start(dev);
+	mtx_kick(dev);
+
+	/* While we aren't using comm serialization, we do still need to ensure
+	 * that the firmware has completed it's setup before continuing
+	 */
+	PSB_DEBUG_TOPAZ("TOPAZ: Polling to ensure " \
+		"firmware has completed its setup before continuing\n");
+	ret = tng_topaz_wait_for_register(
+		dev_priv, CHECKFUNC_ISEQUAL,
+		TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_BOOTSTATUS << 2),
+		TOPAZHP_FW_BOOT_SIGNAL, 0xffffffff);
+	if (ret) {
+		DRM_ERROR("Firmware failed to complete its setup\n");
+		return ret;
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Firmware uploaded successfully.\n");
+
+	/* tng_topaz_enableirq(dev); */
+#ifdef TOPAZHP_IRQ_ENABLED
+	psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+
+	tng_topaz_enableirq(dev);
+#endif
+
+	return ret;
+}
+
+#ifdef MRFLD_B0_DEBUG
+static int pm_cmd_wait(u32 reg_addr, u32 reg_mask)
+{
+	int tcount;
+	u32 reg_val;
+
+	for (tcount = 0; ; tcount++) {
+		reg_val = intel_mid_msgbus_read32(PUNIT_PORT, reg_addr);
+		if ((reg_val & reg_mask) != 0)
+			break;
+		if (tcount > 500) {
+			DRM_ERROR(1, "%s: wait 0x%08x from 0x%08x timeout",
+			__func__, reg_val, reg_addr);
+			return -EBUSY;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+#endif
+
+int tng_topaz_fw_run(
+	struct drm_device *dev,
+	struct psb_video_ctx *video_ctx,
+	enum drm_tng_topaz_codec codec)
+{
+	struct drm_psb_private *dev_priv;
+	struct tng_topaz_private *topaz_priv;
+	int32_t ret = 0;
+	uint32_t reg_val = 0;
+
+	dev_priv = dev->dev_private;
+	topaz_priv = dev_priv->topaz_private;
+
+	reg_val = codec;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: check PUnit load %d fw status\n", codec);
+
+	reg_val = intel_mid_msgbus_read32(PUNIT_PORT, VEC_SS_PM0);
+	PSB_DEBUG_TOPAZ("Read R(0x%08x) V(0x%08x)\n",
+		VEC_SS_PM0, reg_val);
+
+	tng_get_bank_size(dev, video_ctx, codec);
+
+	/* clock gating */
+	reg_val = F_ENCODE(1, TOPAZHP_CR_TOPAZHP_VLC_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_DEB_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE0_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_IPE1_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE0_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_SPE1_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP4X4_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP8X8_AUTO_CLK_GATE)|
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_H264COMP16X16_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_JMCOMP_AUTO_CLK_GATE)|
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_DM_AUTO_CLK_GATE) |
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_PC_DMS_AUTO_CLK_GATE)|
+		  F_ENCODE(1, TOPAZHP_CR_TOPAZHP_CABAC_AUTO_CLK_GATE);
+
+	TOPAZCORE_WRITE32(0, TOPAZHP_CR_TOPAZHP_AUTO_CLOCK_GATING, reg_val);
+
+
+	/* FIFO_FLUSH */
+	/* write 0x78, 0x60, 0x300 */
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_CMD_FIFO_FLUSH);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_TOPAZ_CMD_FIFO_FLUSH, reg_val);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CMD_FIFO_WRITE,
+		0);
+		/* TOPAZHP_NON_SECURE_FW_MARKER); */
+		/* reg_val); */
+
+	if (codec != IMG_CODEC_JPEG) /* Not JPEG */
+		MULTICORE_WRITE32(MTX_SCRATCHREG_IDLE, 0);
+
+	/* set up mmu */
+	tng_topaz_mmu_hwsetup(dev_priv);
+
+	/* write 50 */
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, 0);
+	mtx_start(dev);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MULTICORE_CORE_SEL_0, 0);
+	mtx_kick(dev);
+
+#ifdef MRFLD_B0_DEBUG
+	udelay(10);
+#endif
+
+	ret = tng_topaz_wait_for_register(
+		dev_priv, CHECKFUNC_ISEQUAL,
+		TOPAZHP_TOP_CR_FIRMWARE_REG_1 + (MTX_SCRATCHREG_BOOTSTATUS << 2),
+		TOPAZHP_FW_BOOT_SIGNAL, 0xffffffff);
+
+#ifdef MRFLD_B0_DEBUG
+	/* read 104 */
+	MULTICORE_READ32(TOPAZHP_TOP_CR_FIRMWARE_REG_1, &reg_val);
+	PSB_DEBUG_TOPAZ("TOPAZ: read 100 (0x%08x)\n", reg_val);
+#endif
+
+	if (ret) {
+		DRM_ERROR("Firmware failed to run on B0\n");
+		return ret;
+	}
+
+	/* flush cache */
+	tng_topaz_mmu_flushcache(dev_priv);
+
+#ifdef TOPAZHP_IRQ_ENABLED
+	psb_irq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+	psb_irq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
+
+	tng_topaz_enableirq(dev);
+#endif
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Firmware uploaded successfully.\n");
+
+	return ret;
+
+}
+
+
+int mtx_upload_fw(struct drm_device *dev,
+		  enum drm_tng_topaz_codec codec,
+		  struct psb_video_ctx *video_ctx)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct tng_secure_fw *cur_codec_fw;
+	uint32_t text_size, data_size;
+	uint32_t data_location;
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	int ret = 0;
+	uint32_t verify_pc;
+	int i;
+
+	if (codec >= IMG_CODEC_NUM) {
+		DRM_ERROR("TOPAZ: Invalid codec %d\n", codec);
+		return -1;
+	}
+
+	/* set target to current or all MTXs */
+	mtx_set_target(dev_priv);
+
+	/* MTX reset */
+	MTX_WRITE32(MTX_CR_MTX_SOFT_RESET,
+		    MASK_MTX_MTX_RESET);
+
+	/* upload the master and slave firmware by DMA */
+	cur_codec_fw = &topaz_priv->topaz_fw[codec];
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Upload firmware");
+	PSB_DEBUG_TOPAZ("text_location = %08x, text_size = %d\n",
+		MTX_DMA_MEMORY_BASE, cur_codec_fw->text_size);
+	PSB_DEBUG_TOPAZ("data_location = %08x, data_size = %d\n",
+		cur_codec_fw->data_loca, cur_codec_fw->data_size);
+
+	/* upload text. text_size is byte size*/
+	text_size = cur_codec_fw->text_size / 4;
+	/* adjust transfer sizes of text and data sections to match burst size*/
+	text_size = ((text_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1)) &
+		    ~(MTX_DMA_BURSTSIZE_BYTES - 1)) / 4;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Text size round up to %d\n", text_size);
+	/* setup the MTX to start recieving data:
+	   use a register for the transfer which will point to the source
+	   (MTX_CR_MTX_SYSC_CDMAT) */
+	/*MTX burst size (4 * 2 * 32bits = 32bytes) should match DMA burst
+	  size (2 * 128bits = 32bytes) */
+	/* #.# fill the dst addr */
+
+	/* Transfer the text section */
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Text offset %08x\n",
+		(unsigned int)cur_codec_fw->text->offset);
+
+	ret = mtx_dmac_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
+		MTX_DMA_MEMORY_BASE, text_size, 1);
+	if (ret) {
+		DRM_ERROR("Failed to transfer text by DMA\n");
+		/* tng_error_dump_reg(dev_priv); */
+		return ret;
+	}
+
+	/* wait for it to finish */
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+			REG_OFFSET_TOPAZ_DMAC + IMG_SOC_DMAC_IRQ_STAT(0),
+			F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+			F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret) {
+		DRM_ERROR("Transfer text by DMA timeout\n");
+		/* tng_error_dump_reg(dev_priv); */
+		return ret;
+	}
+
+	/* clear the interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Firmware text upload complete.\n");
+
+	/* # upload data */
+	data_size = cur_codec_fw->data_size / 4;
+	data_size = ((data_size * 4 + (MTX_DMA_BURSTSIZE_BYTES - 1)) &
+			~(MTX_DMA_BURSTSIZE_BYTES - 1)) / 4;
+
+	data_location = cur_codec_fw->data_loca;
+
+	ret = mtx_dmac_transfer(dev_priv, 0,
+		cur_codec_fw->data->offset,
+		0, /*offset + 0 = source address*/
+		data_location, data_size, 1);
+	if (ret) {
+		DRM_ERROR("Failed to transfer data by DMA\n");
+		/* tng_error_dump_reg(dev_priv); */
+		return ret;
+	}
+
+	/* wait for it to finish */
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+			REG_OFFSET_TOPAZ_DMAC + IMG_SOC_DMAC_IRQ_STAT(0),
+			F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+			F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret) {
+		DRM_ERROR("Transfer data by DMA timeout\n");
+		/* tng_error_dump_reg(dev_priv); */
+		return ret;
+	}
+
+	/* clear the interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	tng_topaz_mmu_flushcache(dev_priv);
+
+	/* D0.5, D0.6 and D0.7 */
+	for (i = 5; i < 8; i++)  {
+		ret = mtx_write_core_reg(dev_priv, 0x1 | (i << 4), 0);
+		if (ret) {
+			DRM_ERROR("Failed to write core reg");
+			return ret;
+		}
+	}
+
+	/* Restore 8 Registers of D1 Bank, D1Re0,
+	D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
+	for (i = 5; i < 8; i++) {
+		ret = mtx_write_core_reg(dev_priv, 0x2 | (i << 4), 0);
+		if (ret) {
+			DRM_ERROR("Failed to read core reg");
+			return ret;
+		}
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: setting up pc address 0x%08x\n",
+		PC_START_ADDRESS);
+
+	/* Set Starting PC address */
+	ret = mtx_write_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
+	if (ret) {
+		DRM_ERROR("Failed to write core reg");
+		return ret;
+	}
+
+	ret = mtx_read_core_reg(dev_priv, MTX_PC, &verify_pc);
+	if (ret) {
+		DRM_ERROR("Failed to read core reg");
+		return ret;
+	}
+
+	if (verify_pc != PC_START_ADDRESS) {
+		DRM_ERROR("TOPAZ: Wrong starting PC address");
+		return -1;
+	} else {
+		PSB_DEBUG_TOPAZ("TOPAZ: verify pc address = 0x%08x\n",
+				  verify_pc);
+	}
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Firmware data upload complete.\n");
+
+	return 0;
+}
+
+int mtx_dmac_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
+		     uint32_t src_phy_addr, uint32_t offset,
+		     uint32_t mtx_addr, uint32_t byte_num,
+		     uint32_t is_write)
+{
+	uint32_t dmac_count;
+	uint32_t irq_stat;
+	uint32_t count;
+
+	/* check that no transfer is currently in progress */
+	DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
+	if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
+		DRM_ERROR("TOPAZ: there is tranfer in progress (0x%08x)\n",
+			dmac_count);
+		return -1;
+	}
+
+	/* clear status of any previous interrupts */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
+
+	/* and that no interrupts are outstanding */
+	DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
+	if (0 != irq_stat) {
+		DRM_ERROR("TOPAZ: there is hold up\n");
+		return -1;
+	}
+
+	/* Transfer the data section */
+	/* MTX Address */
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, mtx_addr);
+
+	MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
+		    F_ENCODE(4, MTX_BURSTSIZE) |
+		    F_ENCODE(is_write ? 0 : 1, MTX_RNW) |
+		    F_ENCODE(1, MTX_ENABLE) |
+		    F_ENCODE(byte_num, MTX_LENGTH));
+
+	/* Write System DMAC registers */
+	/* per hold - allow HW to sort itself out */
+	DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 16);
+
+	/* clear previous interrupts */
+	/* DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);*/
+
+	/* Set start address */
+	DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), (src_phy_addr + offset));
+
+	/* count reg */
+	count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
+			(is_write ? 0 : 1), DMAC_PWIDTH_32_BIT, byte_num);
+	/* generate an interrupt at the end of transfer */
+	count |= MASK_IMG_SOC_TRANSFER_IEN;
+
+	/*count |= F_ENCODE(is_write, IMG_SOC_DIR);*/
+	DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
+
+	/* don't inc address, set burst size */
+	DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
+		DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, 0, DMAC_BURST_2));
+
+	/* Target correct MTX DMAC port */
+	DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel),
+		MTX_CR_MTX_SYSC_CDMAT + REG_START_TOPAZ_MTX_HOST);
+
+	/* Finally, rewrite the count register
+	with the enable bit set to kick off the transfer */
+	DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
+		count | MASK_IMG_SOC_EN);
+
+	/* DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &tmp);*/
+
+	return 0;
+}
+
+int32_t mtx_write_core_reg(
+	struct drm_psb_private *dev_priv,
+	uint32_t reg,
+	const uint32_t val)
+{
+	int32_t ret = 0;
+	ret = get_mtx_control_from_dash(dev_priv);
+	if (ret) {
+		DRM_ERROR("Failed to get control from dash");
+		return ret;
+	}
+
+	/* put data into MTX_RW_DATA */
+	MTX_WRITE32(MTX_CR_MTX_REGISTER_READ_WRITE_DATA, val);
+
+	/* DREADY is set to 0 and request a write*/
+	MTX_WRITE32(MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+		reg & ~MASK_MTX_MTX_DREADY);
+
+	/* wait for operation finished */
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		REG_OFFSET_TOPAZ_MTX + MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+		MASK_MTX_MTX_DREADY, MASK_MTX_MTX_DREADY);
+	if (ret) {
+		DRM_ERROR("Wait for register timeout");
+		return ret;
+	}
+
+	release_mtx_control_from_dash(dev_priv);
+
+	return ret;
+}
+
+int32_t mtx_read_core_reg(
+	struct drm_psb_private *dev_priv,
+	uint32_t reg,
+	uint32_t *ret_val)
+{
+	int32_t ret = 0;
+
+	ret = get_mtx_control_from_dash(dev_priv);
+	if (ret) {
+		DRM_ERROR("Failed to get control from dash");
+		return ret;
+	}
+
+	/* Issue read request */
+	MTX_WRITE32(MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+		    (MASK_MTX_MTX_RNW | reg) & ~MASK_MTX_MTX_DREADY);
+
+	/* Wait for done */
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		REG_OFFSET_TOPAZ_MTX + MTX_CR_MTX_REGISTER_READ_WRITE_REQUEST,
+		MASK_MTX_MTX_DREADY, MASK_MTX_MTX_DREADY);
+	if (ret) {
+		DRM_ERROR("Wait for register timeout");
+		return ret;
+	}
+
+	/* Read */
+	MTX_READ32(MTX_CR_MTX_REGISTER_READ_WRITE_DATA, ret_val);
+
+	release_mtx_control_from_dash(dev_priv);
+
+	return ret;
+}
+
+static int32_t get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
+{
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	uint32_t count = 0;
+	uint32_t reg_val = 0;
+
+	/* Request the bus from the Dash...*/
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE) |
+		  F_ENCODE(0x2, TOPAZHP_TOP_CR_MTX_MSTR_DBG_GPIO_IN);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MTX_DEBUG_MSTR, reg_val);
+
+	do {
+		MULTICORE_READ32(TOPAZHP_TOP_CR_MTX_DEBUG_MSTR, &reg_val);
+		count++;
+	} while (((reg_val & 0x18) != 0) && count < 50000);
+
+	if (count >= 50000) {
+		DRM_ERROR("TOPAZ: timeout in getting control from dash");
+		return -1;
+	}
+
+	/* Save the access control register...*/
+	MTX_READ32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
+		&topaz_priv->topaz_dash_access_ctrl);
+
+	return 0;
+}
+
+static void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
+{
+	struct tng_topaz_private *topaz_priv = dev_priv->topaz_private;
+	uint32_t reg_val;
+
+	/* Restore the access control register...*/
+	MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
+		topaz_priv->topaz_dash_access_ctrl);
+
+	/* Release the bus...*/
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_MTX_MSTR_DBG_IS_SLAVE);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MTX_DEBUG_MSTR, reg_val);
+}
+
+void tng_topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
+{
+	uint32_t reg_val = 0, pd_addr = 0;
+
+	PSB_DEBUG_TOPAZ("TOPAZ: Setup MMU\n");
+
+	/* pd_addr = TOPAZHP_MMU_BASE; */
+	pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
+	/* bypass all request while MMU is being configured */
+	reg_val = F_ENCODE(1, TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL0, reg_val);
+
+	/* set MMU hardware at the page table directory */
+	PSB_DEBUG_TOPAZ("TOPAZ: write PD phyaddr=0x%08x " \
+		"into MMU_DIR_LIST0/1\n", pd_addr);
+
+	/*There's two of these (0) and (1).. only 0 is currently used*/
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_DIR_LIST_BASE(0), pd_addr);
+	/* setup index register, all pointing to directory bank 0 */
+
+	/* Enable tiling */
+	if (drm_psb_msvdx_tiling && dev_priv->have_mem_mmu_tiling)
+		tng_topaz_mmu_enable_tiling(dev_priv);
+
+	/* now enable MMU access for all requestors */
+	reg_val = F_ENCODE(0, TOPAZHP_TOP_CR_MMU_BYPASS_TOPAZ);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL0, reg_val);
+
+	/* This register does not get reset between encoder runs,
+	so we need to ensure we always set it up one way or another here */
+	/* 36-bit actually means "not 32-bit" */
+	reg_val = F_ENCODE(0, TOPAZHP_TOP_CR_MMU_ENABLE_36BIT_ADDRESSING);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL2, reg_val);
+}
+
+void tng_topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
+{
+	uint32_t mmu_control;
+
+	if (dev_priv->topaz_disabled) {
+		printk(KERN_ERR "topazhp disabled\n");
+		return;
+	}
+
+	MULTICORE_READ32(TOPAZHP_TOP_CR_MMU_CONTROL0, &mmu_control);
+
+	/* Set Invalid flag (this causes a flush with MMU still
+	operating afterwards even if not cleared,
+	but may want to replace with MMU_FLUSH? */
+	mmu_control |= F_ENCODE(1, TOPAZHP_TOP_CR_MMU_INVALDC);
+
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL0, mmu_control);
+
+	/* Clear Invalid flag */
+	mmu_control |= F_ENCODE(0, TOPAZHP_TOP_CR_MMU_INVALDC);
+	MULTICORE_WRITE32(TOPAZHP_TOP_CR_MMU_CONTROL0, mmu_control);
+	/*
+	psb_gl3_global_invalidation(dev_priv->dev);
+	*/
+}
+
+int32_t mtx_dma_read(struct drm_device *dev, struct ttm_buffer_object *dst_bo,
+		 uint32_t src_ram_addr, uint32_t size)
+{
+	uint32_t irq_state;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct ttm_buffer_object *target;
+	int32_t ret = 0;
+
+	/* clear status of any previous interrupts */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+	DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(0), &irq_state);
+	if (irq_state != 0) {
+		DRM_ERROR("irq state is not 0");
+		return -1;
+	}
+
+	target = dst_bo;
+	/* transfer the data */
+	ret = mtx_dmac_transfer(dev_priv, 0, (uint32_t)target->offset, 0,
+			       src_ram_addr,
+			       size, 0);
+	if (ret) {
+		/* tng_error_dump_reg(dev_priv); */
+		DRM_ERROR("DMA transfer failed");
+		return ret;
+	}
+
+	/* wait for it transfer */
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		REG_OFFSET_TOPAZ_DMAC + IMG_SOC_DMAC_IRQ_STAT(0),
+		F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+		F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret) {
+		/* tng_error_dump_reg(dev_priv); */
+		DRM_ERROR("Waiting register timeout");
+		return ret;
+	}
+
+	/* clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	return ret;
+}
+
+#if 0
+static int mtx_dma_write(
+	struct drm_device *dev,
+	struct ttm_buffer_object *src_bo,
+	uint32_t dst_ram_addr, uint32_t size)
+{
+	uint32_t irq_state;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct ttm_buffer_object *target;
+	int32_t ret = 0;
+
+	/* clear status of any previous interrupts */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+	DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(0), &irq_state);
+	if (irq_state != 0) {
+		DRM_ERROR("irq state is not 0");
+		return -1;
+	}
+
+	/* give the DMAC access to the host memory via BIF */
+	/*FPGA_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1, 0);*/
+
+	target = src_bo;
+	/* transfert the data */
+	ret = mtx_dmac_transfer(dev_priv, 0, target->offset, 0,
+			       dst_ram_addr,
+			       size, 1);
+
+
+	if (ret) {
+		/* tng_error_dump_reg(dev_priv); */
+		DRM_ERROR("DMA transfer failed");
+		return ret;
+	}
+
+	/* wait for it transfer */
+	ret = tng_topaz_wait_for_register(dev_priv, CHECKFUNC_ISEQUAL,
+		REG_OFFSET_TOPAZ_DMAC + IMG_SOC_DMAC_IRQ_STAT(0),
+		F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
+		F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
+	if (ret) {
+		/* tng_error_dump_reg(dev_priv); */
+		DRM_ERROR("Waiting register timeout");
+		return ret;
+	}
+
+	/* clear interrupt */
+	DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
+
+	return ret;
+}
+#endif
+
+
diff --git a/drivers/external_drivers/intel_media/video/vsp/Makefile b/drivers/external_drivers/intel_media/video/vsp/Makefile
new file mode 100644
index 0000000..687fdd8
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/vsp/Makefile
@@ -0,0 +1,23 @@
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+INCDIR=drivers/staging/mrfl
+MEDIA_INCDIR=drivers/staging/intel_media
+ccflags-y += \
+	-I$(INCDIR)/ \
+	-I$(INCDIR)/rgx/include \
+	-I$(INCDIR)/interface \
+	-I$(INCDIR)/drv \
+	-I$(INCDIR)/drv/ospm \
+	-I$(INCDIR)/../intel_media/video/common \
+	-I$(INCDIR)/../intel_media/video/vsp \
+	-I$(INCDIR)/../../../include/linux \
+	-I$(INCDIR)/../../../include/drm \
+	-I$(INCDIR)/../../../include/drm/ttm
+
+ccflags-y += -DANDROID -D_linux_ -DLINUX -D__KERNEL__ -DSUPPORT_VSP -DMERRIFIELD -DCONFIG_VIDEO_MRFLD
+
+obj-y += \
+	vsp.o \
+	vsp_init.o
+
diff --git a/drivers/external_drivers/intel_media/video/vsp/vsp.c b/drivers/external_drivers/intel_media/video/vsp/vsp.c
new file mode 100755
index 0000000..15b5404
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/vsp/vsp.c
@@ -0,0 +1,1789 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <linux/math64.h>
+
+#include "psb_drv.h"
+#include "psb_drm.h"
+#include "vsp.h"
+#include "ttm/ttm_execbuf_util.h"
+#include "vsp_fw.h"
+#include "pwr_mgmt.h"
+
+#define PARTITIONS_MAX 9
+
+#define REF_FRAME_LAST	0
+#define REF_FRAME_ALT	1
+#define REF_FRAME_GOLD	2
+
+static int vsp_submit_cmdbuf(struct drm_device *dev,
+			     struct file *filp,
+			     unsigned char *cmd_start,
+			     unsigned long cmd_size);
+static int vsp_send_command(struct drm_device *dev,
+			    struct file *filp,
+			    unsigned char *cmd_start,
+			    unsigned long cmd_size);
+static int vsp_prehandle_command(struct drm_file *priv,
+			    struct list_head *validate_list,
+			    uint32_t fence_type,
+			    struct drm_psb_cmdbuf_arg *arg,
+			    unsigned char *cmd_start,
+			    struct psb_ttm_fence_rep *fence_arg);
+static int vsp_fence_vpp_surfaces(struct drm_file *priv,
+			      struct list_head *validate_list,
+			      uint32_t fence_type,
+			      struct drm_psb_cmdbuf_arg *arg,
+			      struct psb_ttm_fence_rep *fence_arg,
+			      struct ttm_buffer_object *pic_param_bo);
+static void handle_error_response(unsigned int error_type,
+				unsigned int cmd_type);
+static int vsp_fence_vp8enc_surfaces(struct drm_file *priv,
+				struct list_head *validate_list,
+				uint32_t fence_type,
+				struct drm_psb_cmdbuf_arg *arg,
+				struct psb_ttm_fence_rep *fence_arg,
+				struct ttm_buffer_object *pic_param_bo);
+static int vsp_fence_compose_surfaces(struct drm_file *priv,
+				struct list_head *validate_list,
+				uint32_t fence_type,
+				struct drm_psb_cmdbuf_arg *arg,
+				struct psb_ttm_fence_rep *fence_arg,
+				struct ttm_buffer_object *pic_param_bo);
+
+static inline void psb_clflush(void *addr)
+{
+	__asm__ __volatile__("wbinvd ");
+}
+
+
+static inline void force_power_down_vsp(void)
+{
+	int count = 0;
+	VSP_DEBUG("Force to power down VSP\n");
+	while (is_island_on(OSPM_VIDEO_VPP_ISLAND) && (count < 255)) {
+		count++;
+		VSP_DEBUG("The VSP is on, power down it, tries %d\n", count);
+		power_island_put(OSPM_VIDEO_VPP_ISLAND);
+	}
+	VSP_DEBUG("The VSP is off now (tried %d times)\n", count);
+}
+
+static inline void power_down_vsp(void)
+{
+	VSP_DEBUG("Try to power down VSP\n");
+
+	if (is_island_on(OSPM_VIDEO_VPP_ISLAND)) {
+		VSP_DEBUG("The VSP is on, power down it\n");
+		power_island_put(OSPM_VIDEO_VPP_ISLAND);
+	} else
+		VSP_DEBUG("The VSP is already off\n");
+}
+
+static inline void power_up_vsp(void)
+{
+	VSP_DEBUG("Try to power up VSP\n");
+
+	if (is_island_on(OSPM_VIDEO_VPP_ISLAND))
+		VSP_DEBUG("The VSP is alraedy on\n");
+	else {
+		VSP_DEBUG("The VSP is off, power up it\n");
+		power_island_get(OSPM_VIDEO_VPP_ISLAND);
+	}
+}
+
+
+int vsp_handle_response(struct drm_psb_private *dev_priv)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	int ret = 0;
+	unsigned int rd, wr;
+	unsigned int idx;
+	unsigned int msg_num;
+	struct vss_response_t *msg;
+	uint32_t sequence;
+	uint32_t status;
+	unsigned int cmd_rd, cmd_wr;
+
+	idx = 0;
+	sequence = vsp_priv->current_sequence;
+	while (1) {
+		rd = vsp_priv->ctrl->ack_rd;
+		wr = vsp_priv->ctrl->ack_wr;
+		msg_num = wr > rd ? wr - rd : wr == rd ? 0 :
+			VSP_ACK_QUEUE_SIZE - (rd - wr);
+		VSP_DEBUG("ack rd %d wr %d, msg_num %d, size %d\n",
+			  rd, wr, msg_num, VSP_ACK_QUEUE_SIZE);
+
+		if (msg_num == 0)
+			break;
+		else if ( msg_num < 0) {
+			DRM_ERROR("invalid msg num, exit\n");
+			break;
+		}
+
+		msg = vsp_priv->ack_queue + (idx + rd) % VSP_ACK_QUEUE_SIZE;
+		VSP_DEBUG("ack[%d]->type = %x\n", idx, msg->type);
+
+		switch (msg->type) {
+		case VssErrorResponse:
+			DRM_ERROR("error response:%.8x %.8x %.8x %.8x %.8x\n",
+				  msg->context, msg->type, msg->buffer,
+				  msg->size, msg->vss_cc);
+			handle_error_response(msg->buffer & 0xFFFF,
+					msg->buffer >> 16);
+
+			cmd_rd = vsp_priv->ctrl->cmd_rd;
+			cmd_wr = vsp_priv->ctrl->cmd_wr;
+
+			if (msg->context != 0 && cmd_wr == cmd_rd) {
+				vsp_priv->vp8_cmd_num = 0;
+				sequence = vsp_priv->last_sequence;
+			}
+
+			ret = false;
+
+			/* For VPP component, wouldn't receive any command
+			 * from user space. Release the fence.
+			 */
+			if (msg->context == CONTEXT_VPP_ID)
+				vsp_priv->vsp_state = VSP_STATE_HANG;
+			else if (msg->context == CONTEXT_COMPOSE_ID) {
+				vsp_priv->vsp_state = VSP_STATE_HANG;
+				sequence = vsp_priv->compose_fence;
+			}
+			break;
+
+		case VssEndOfSequenceResponse:
+			PSB_DEBUG_GENERAL("end of the sequence received\n");
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+
+			break;
+
+		case VssOutputSurfaceReadyResponse:
+			VSP_DEBUG("sequence %x is done!!\n", msg->buffer);
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+			sequence = msg->buffer;
+
+			break;
+
+		case VssOutputSurfaceFreeResponse:
+			VSP_DEBUG("sequence surface %x should be freed\n",
+				  msg->buffer);
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+			break;
+
+		case VssOutputSurfaceCrcResponse:
+			VSP_DEBUG("Crc of sequence %x is %x\n", msg->buffer,
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+			break;
+
+		case VssInputSurfaceReadyResponse:
+			VSP_DEBUG("input surface ready\n");
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+			break;
+
+		case VssCommandBufferReadyResponse:
+			VSP_DEBUG("command buffer ready\n");
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+			break;
+
+		case VssIdleResponse:
+		{
+			unsigned int cmd_rd, cmd_wr;
+
+			VSP_DEBUG("VSP is idle\n");
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+
+			cmd_rd = vsp_priv->ctrl->cmd_rd;
+			cmd_wr = vsp_priv->ctrl->cmd_wr;
+			VSP_DEBUG("cmd_rd=%d, cmd_wr=%d\n", cmd_rd, cmd_wr);
+
+			if (vsp_priv->vsp_state == VSP_STATE_ACTIVE)
+				vsp_priv->vsp_state = VSP_STATE_IDLE;
+			break;
+		}
+		case VssVp8encSetSequenceParametersResponse:
+			VSP_DEBUG("VSP clock cycles from pre response %x\n",
+				  msg->vss_cc);
+			vsp_priv->vss_cc_acc += msg->vss_cc;
+			status = msg->buffer;
+			switch (status) {
+			case VssOK:
+				VSP_DEBUG("vp8 sequence response received\n");
+				break;
+			default:
+				VSP_DEBUG("Unknown VssStatus %x\n", status);
+				DRM_ERROR("Invalid vp8 sequence response\n");
+				break;
+			}
+
+			break;
+		case VssVp8encEncodeFrameResponse:
+		{
+			sequence = msg->buffer;
+			/* received VssVp8encEncodeFrameResponse indicates cmd has been handled */
+			vsp_priv->vp8_cmd_num--;
+
+			VSP_DEBUG("sequence %d\n", sequence);
+			VSP_DEBUG("receive vp8 encoded frame response\n");
+
+			break;
+		}
+		case VssWiDi_ComposeSetSequenceParametersResponse:
+		{
+			VSP_DEBUG("The Compose respose value is %d\n", msg->buffer);
+			break;
+		}
+		case VssWiDi_ComposeFrameResponse:
+		{
+			VSP_DEBUG("Compose sequence %x is done!!\n", msg->buffer);
+			sequence = msg->buffer;
+			break;
+		}
+		default:
+			DRM_ERROR("VSP: Unknown response type %x\n",
+				  msg->type);
+			DRM_ERROR("VSP: there're %d response remaining\n",
+				  msg_num - idx - 1);
+			ret = false;
+			break;
+		}
+
+		vsp_priv->ctrl->ack_rd = (vsp_priv->ctrl->ack_rd + 1) %
+			VSP_ACK_QUEUE_SIZE;
+	}
+
+	return sequence;
+}
+
+bool vsp_interrupt(void *pvData)
+{
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	struct vsp_private *vsp_priv;
+	unsigned long status;
+	bool ret = true;
+
+	VSP_DEBUG("got vsp interrupt\n");
+
+	if (pvData == NULL) {
+		DRM_ERROR("VSP: vsp %s, Invalid params\n", __func__);
+		return false;
+	}
+
+	dev = (struct drm_device *)pvData;
+	dev_priv = (struct drm_psb_private *) dev->dev_private;
+	vsp_priv = dev_priv->vsp_private;
+
+	/* read interrupt status */
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_STATUS, &status);
+	VSP_DEBUG("irq status %lx\n", status);
+
+	/* clear interrupt status */
+	if (!(status & (1 << VSP_SP0_IRQ_SHIFT))) {
+		DRM_ERROR("VSP: invalid irq\n");
+		return false;
+	} else {
+		IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_CLR, (1 << VSP_SP0_IRQ_SHIFT));
+		/* we need to clear IIR VSP bit */
+		PSB_WVDC32(_TNG_IRQ_VSP_FLAG, PSB_INT_IDENTITY_R);
+		(void)PSB_RVDC32(PSB_INT_IDENTITY_R);
+	}
+
+	schedule_delayed_work(&vsp_priv->vsp_irq_wq, 0);
+
+	VSP_DEBUG("will leave interrupt\n");
+	return ret;
+}
+
+int vsp_cmdbuf_vpp(struct drm_file *priv,
+		    struct list_head *validate_list,
+		    uint32_t fence_type,
+		    struct drm_psb_cmdbuf_arg *arg,
+		    struct ttm_buffer_object *cmd_buffer,
+		    struct psb_ttm_fence_rep *fence_arg)
+{
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int ret = 0;
+	unsigned char *cmd_start;
+	unsigned long cmd_page_offset = arg->cmdbuf_offset & ~PAGE_MASK;
+	struct ttm_bo_kmap_obj cmd_kmap;
+	bool is_iomem;
+	struct file *filp = priv->filp;
+	bool need_power_put = 0;
+
+	/* If VSP timeout, don't send cmd to hardware anymore */
+	if (vsp_priv->vsp_state == VSP_STATE_HANG) {
+		DRM_ERROR("The VSP is hang abnormally, try to reset vsp hardware!\n");
+
+		VSP_DEBUG("Force state to DOWN to force power down\n");
+		vsp_priv->ctrl->entry_kind = vsp_exit;
+		vsp_priv->vsp_state = VSP_STATE_DOWN;
+		force_power_down_vsp();
+		//return -EFAULT;
+	}
+
+	memset(&cmd_kmap, 0, sizeof(cmd_kmap));
+	vsp_priv->vsp_cmd_num = 1;
+
+	/* check command buffer parameter */
+	if ((arg->cmdbuf_offset > cmd_buffer->acc_size) ||
+	    (arg->cmdbuf_size > cmd_buffer->acc_size) ||
+	    (arg->cmdbuf_size + arg->cmdbuf_offset) > cmd_buffer->acc_size) {
+		DRM_ERROR("VSP: the size of cmdbuf is invalid!");
+		DRM_ERROR("VSP: offset=%x, size=%x,cmd_buffer size=%zx\n",
+			  arg->cmdbuf_offset, arg->cmdbuf_size,
+			  cmd_buffer->acc_size);
+		vsp_priv->vsp_cmd_num = 0;
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	VSP_DEBUG("map command first\n");
+	ret = ttm_bo_kmap(cmd_buffer, arg->cmdbuf_offset >> PAGE_SHIFT, 2,
+			  &cmd_kmap);
+	if (ret) {
+		DRM_ERROR("VSP: ttm_bo_kmap failed: %d\n", ret);
+		vsp_priv->vsp_cmd_num = 0;
+		goto out_err;
+	}
+
+	cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
+			&is_iomem) + cmd_page_offset;
+
+	/* handle the Context and Fence command */
+	VSP_DEBUG("handle Context and Fence commands\n");
+	ret = vsp_prehandle_command(priv, validate_list, fence_type, arg,
+			       cmd_start, fence_arg);
+	if (ret)
+		goto out;
+
+	if (time_after(jiffies, vsp_priv->cmd_submit_time + HZ * 50 / 1000)) {
+		VSP_DEBUG(" will be force to flush cmd due to jiffies\n");
+		vsp_priv->force_flush_cmd = 1;
+	}
+
+	if (drm_vsp_vpp_batch_cmd == 0)
+		vsp_priv->force_flush_cmd = 1;
+
+        if ((drm_vsp_pmpolicy != PSB_PMPOLICY_NOPM) &&
+	    (vsp_priv->vsp_state == VSP_STATE_IDLE))
+		power_down_vsp();
+
+	if (vsp_priv->acc_num_cmd >= 1 || vsp_priv->force_flush_cmd != 0
+	    || vsp_priv->delayed_burst_cnt > 0) {
+		if (power_island_get(OSPM_VIDEO_VPP_ISLAND) == false) {
+			ret = -EBUSY;
+			goto out_err1;
+		}
+		need_power_put = 1;
+	}
+
+	VSP_DEBUG("will submit command\n");
+	ret = vsp_submit_cmdbuf(dev, filp, cmd_start, arg->cmdbuf_size);
+	if (ret)
+		goto out_err1;
+
+out_err1:
+	if (need_power_put)
+		power_island_put(OSPM_VIDEO_VPP_ISLAND);
+out:
+	ttm_bo_kunmap(&cmd_kmap);
+
+	spin_lock(&cmd_buffer->bdev->fence_lock);
+	if (cmd_buffer->sync_obj != NULL)
+		ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
+	spin_unlock(&cmd_buffer->bdev->fence_lock);
+
+	vsp_priv->vsp_cmd_num = 0;
+out_err:
+	return ret;
+}
+
+int vsp_submit_cmdbuf(struct drm_device *dev,
+		      struct file *filp,
+		      unsigned char *cmd_start,
+		      unsigned long cmd_size)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int ret;
+
+	if (vsp_priv->acc_num_cmd >= 1 || vsp_priv->force_flush_cmd != 0
+	    || vsp_priv->delayed_burst_cnt > 0) {
+		/* consider to invalidate/flush MMU */
+		if (vsp_priv->vsp_state == VSP_STATE_DOWN) {
+			VSP_DEBUG("needs reset\n");
+
+			if (vsp_reset(dev_priv)) {
+				ret = -EBUSY;
+				DRM_ERROR("VSP: failed to reset\n");
+				return ret;
+			}
+		}
+
+		if (vsp_priv->vsp_state == VSP_STATE_SUSPEND) {
+			ret = vsp_resume_function(dev_priv);
+			VSP_DEBUG("The VSP is on suspend, send resume!\n");
+		}
+	}
+
+	/* submit command to HW */
+	ret = vsp_send_command(dev, filp, cmd_start, cmd_size);
+	if (ret != 0) {
+		DRM_ERROR("VSP: failed to send command\n");
+		return ret;
+	}
+
+#if 0
+	/* If the VSP is in Suspend, need to send "Resume" */
+	if (vsp_priv->vsp_state == VSP_STATE_SUSPEND) {
+		ret = vsp_resume_function(dev_priv);
+		VSP_DEBUG("The VSP is on suspend, send resume!\n");
+	}
+#endif
+	return ret;
+}
+
+int vsp_send_command(struct drm_device *dev,
+		     struct file *filp,
+		     unsigned char *cmd_start,
+		     unsigned long cmd_size)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	unsigned int rd, wr;
+	unsigned int remaining_space;
+	unsigned int cmd_idx, num_cmd = 0;
+	struct vss_command_t *cur_cmd, *cur_cell_cmd;
+
+	VSP_DEBUG("will send command here: cmd_start %p, cmd_size %ld\n",
+		  cmd_start, cmd_size);
+
+	cur_cmd = (struct vss_command_t *)cmd_start;
+
+
+	/* if the VSP in suspend, update the saved config info */
+	if (vsp_priv->vsp_state == VSP_STATE_SUSPEND) {
+		VSP_DEBUG("In suspend, need update saved cmd_wr!\n");
+		vsp_priv->ctrl = (struct vsp_ctrl_reg *)
+				 &(vsp_priv->saved_config_regs[2]);
+	}
+
+	while (cmd_size) {
+		rd = vsp_priv->ctrl->cmd_rd;
+		wr = vsp_priv->ctrl->cmd_wr;
+
+		remaining_space = rd >= wr + 1 ? rd - wr - 1 :
+			VSP_CMD_QUEUE_SIZE - (wr + 1 - rd) ;
+		remaining_space -= vsp_priv->acc_num_cmd;
+
+		VSP_DEBUG("VSP: rd %d, wr %d, remaining_space %d, ",
+			  rd, wr, remaining_space);
+		VSP_DEBUG("cmd_size %ld sizeof(*cur_cmd) %zu\n",
+			  cmd_size, sizeof(*cur_cmd));
+
+		if (remaining_space < vsp_priv->vsp_cmd_num) {
+			DRM_ERROR("no enough space for cmd queue\n");
+			DRM_ERROR("VSP: rd %d, wr %d, remaining_space %d\n",
+				  rd, wr, remaining_space);
+			/* VP handle the data very slowly,
+			* so we have to delay longer
+			*/
+#ifdef CONFIG_BOARD_MRFLD_VP
+			udelay(1000);
+#else
+			udelay(10);
+#endif
+			continue;
+		}
+
+		for (cmd_idx = vsp_priv->acc_num_cmd; cmd_idx < remaining_space;) {
+			VSP_DEBUG("current cmd type %x\n", cur_cmd->type);
+			if (cur_cmd->type == VspFencePictureParamCommand) {
+				VSP_DEBUG("skip VspFencePictureParamCommand");
+				cur_cmd++;
+				cmd_size -= sizeof(*cur_cmd);
+				VSP_DEBUG("first cmd_size %ld\n", cmd_size);
+				if (cmd_size == 0)
+					goto out;
+				else
+					continue;
+			} else if (cur_cmd->type == VspSetContextCommand ||
+					cur_cmd->type == Vss_Sys_Ref_Frame_COMMAND) {
+				VSP_DEBUG("skip Vss_Sys_Ref_Frame_COMMAND\n");
+				cur_cmd++;
+
+				cmd_size -= sizeof(*cur_cmd);
+				if (cmd_size == 0)
+					goto out;
+				else
+					continue;
+			} else if (cur_cmd->type == VspFenceComposeCommand) {
+				VSP_DEBUG("skip VspFenceComposeCommand\n");
+				cur_cmd++;
+				cmd_size -= sizeof(*cur_cmd);
+				VSP_DEBUG("first cmd_size %ld\n", cmd_size);
+				if (cmd_size == 0)
+					goto out;
+				else
+					continue;
+			} else if (cur_cmd->type == VssWiDi_ComposeFrameCommand) {
+				/* save the fence value in buffer_id */
+				cur_cmd->buffer_id = vsp_priv->compose_fence;
+			}
+
+			/* FIXME: could remove cmd_idx here */
+			cur_cell_cmd = vsp_priv->cmd_queue +
+				(wr + cmd_idx) % VSP_CMD_QUEUE_SIZE;
+			++cmd_idx;
+
+			memcpy(cur_cell_cmd, cur_cmd, sizeof(*cur_cmd));
+			VSP_DEBUG("cmd: %.8x %.8x %.8x %.8x %.8x %.8x\n",
+				cur_cell_cmd->context, cur_cell_cmd->type,
+				cur_cell_cmd->buffer, cur_cell_cmd->size,
+				cur_cell_cmd->buffer_id, cur_cell_cmd->irq);
+			VSP_DEBUG("send %.8x cmd to VSP\n",
+					cur_cell_cmd->type);
+
+			num_cmd++;
+			cur_cmd++;
+			cmd_size -= sizeof(*cur_cmd);
+			if (cmd_size == 0)
+				goto out;
+			else if (cmd_size < sizeof(*cur_cmd)) {
+				DRM_ERROR("invalid command size %ld\n",
+					  cmd_size);
+				goto out;
+			}
+		}
+	}
+out:
+	/* update write index */
+	VSP_DEBUG("%d cmd will send to VSP!\n", num_cmd);
+	if (vsp_priv->delayed_burst_cnt > 0)
+		--vsp_priv->delayed_burst_cnt;
+
+	vsp_priv->cmd_submit_time = jiffies;
+
+	vsp_priv->acc_num_cmd += num_cmd;
+	if (vsp_priv->acc_num_cmd > 1 || vsp_priv->force_flush_cmd != 0 ||
+	    vsp_priv->delayed_burst_cnt > 0) {
+		vsp_priv->ctrl->cmd_wr =
+			(vsp_priv->ctrl->cmd_wr + vsp_priv->acc_num_cmd) %
+			VSP_CMD_QUEUE_SIZE;
+		vsp_priv->acc_num_cmd = 0;
+		vsp_priv->force_flush_cmd = 0;
+		cancel_delayed_work(&vsp_priv->vsp_cmd_submit_check_wq);
+	} else {
+		schedule_delayed_work(&vsp_priv->vsp_cmd_submit_check_wq, HZ);
+	}
+
+	return 0;
+}
+
+static int vsp_prehandle_command(struct drm_file *priv,
+			    struct list_head *validate_list,
+			    uint32_t fence_type,
+			    struct drm_psb_cmdbuf_arg *arg,
+			    unsigned char *cmd_start,
+			    struct psb_ttm_fence_rep *fence_arg)
+{
+	struct ttm_object_file *tfile = BCVideoGetPriv(priv)->tfile;
+	struct vss_command_t *cur_cmd;
+	unsigned int cmd_size = arg->cmdbuf_size;
+	int ret = 0;
+	struct ttm_buffer_object *pic_param_bo = NULL;
+	int pic_param_num, vsp_cmd_num = 0;
+	struct ttm_validate_buffer *pos, *next;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	struct ttm_buffer_object *pic_bo_vp8 = NULL;
+	int vp8_pic_num = 0;
+	struct ttm_buffer_object *compose_param_bo = NULL;
+	int compose_param_num = 0;
+
+
+	cur_cmd = (struct vss_command_t *)cmd_start;
+
+	pic_param_num = 0;
+	VSP_DEBUG("cmd size %d\n", cmd_size);
+	while (cmd_size) {
+		VSP_DEBUG("cmd type %x, buffer offset %x\n", cur_cmd->type,
+			  cur_cmd->buffer);
+		if (cur_cmd->type == VspFencePictureParamCommand) {
+			pic_param_bo =
+				ttm_buffer_object_lookup(tfile,
+							 cur_cmd->buffer);
+			if (pic_param_bo == NULL) {
+				DRM_ERROR("VSP: failed to find %x bo\n",
+					  cur_cmd->buffer);
+				ret = -1;
+				goto out;
+			}
+			pic_param_num++;
+			VSP_DEBUG("find pic param buffer: id %x, offset %lx\n",
+				  cur_cmd->buffer, pic_param_bo->offset);
+			VSP_DEBUG("pic param placement %x bus.add %p\n",
+				  pic_param_bo->mem.placement,
+				  pic_param_bo->mem.bus.addr);
+			if (pic_param_num > 1) {
+				DRM_ERROR("pic_param_num invalid(%d)!\n",
+					  pic_param_num);
+				ret = -1;
+				goto out;
+			}
+		} else if (cur_cmd->type == VspFenceComposeCommand) {
+			compose_param_bo =
+				ttm_buffer_object_lookup(tfile,
+							 cur_cmd->buffer);
+			if (compose_param_bo == NULL) {
+				DRM_ERROR("VSP: failed to find %x bo\n",
+					  cur_cmd->buffer);
+				ret = -1;
+				goto out;
+			}
+			compose_param_num++;
+			VSP_DEBUG("find compose param buffer: id %x, offset %lx\n",
+				  cur_cmd->buffer, compose_param_bo->offset);
+			VSP_DEBUG("compose param placement %x bus.add %p\n",
+				  compose_param_bo->mem.placement,
+				  compose_param_bo->mem.bus.addr);
+			if (compose_param_num > 1) {
+				DRM_ERROR("compose_param_num invalid(%d)!\n",
+					  compose_param_num);
+				ret = -1;
+				goto out;
+			}
+		} else if (cur_cmd->type == VspSetContextCommand) {
+			VSP_DEBUG("set context and new vsp FRC context\n");
+		} else if (cur_cmd->type == Vss_Sys_STATE_BUF_COMMAND) {
+			VSP_DEBUG("set context and new vsp VP8 context\n");
+
+			cur_cmd->context = VSP_API_GENERIC_CONTEXT_ID;
+			cur_cmd->type = VssGenInitializeContext;
+			if (priv->filp == vsp_priv->vp8_filp[0]) {
+				cur_cmd->buffer = 1;
+			} else if (priv->filp == vsp_priv->vp8_filp[1]) {
+				cur_cmd->buffer = 2;
+			} else if (priv->filp == vsp_priv->vp8_filp[2]) {
+				cur_cmd->buffer = 3;
+			} else {
+				DRM_ERROR("got the wrong context_id and exit\n");
+				return -1;
+			}
+
+			cur_cmd->size = VSP_APP_ID_VP8_ENC;
+			cur_cmd->buffer_id = 0;
+			cur_cmd->irq = 0;
+			cur_cmd->reserved6 = 0;
+			cur_cmd->reserved7 = 0;
+		} else if (cur_cmd->type == VssGenInitializeContext) {
+			/* Init them just get InitContext command */
+			vsp_priv->force_flush_cmd = 0;
+			vsp_priv->acc_num_cmd = 0;
+			vsp_priv->delayed_burst_cnt = 90;
+
+			vsp_cmd_num++;
+
+		} else if (cur_cmd->type == VssGenDestroyContext) {
+			vsp_cmd_num++;
+		} else
+			/* calculate the numbers of cmd send to VSP */
+			vsp_cmd_num++;
+
+		if (cur_cmd->type == VssVp8encEncodeFrameCommand) {
+			/* calculate VssVp8encEncodeFrameCommand cmd numbers */
+			vsp_priv->vp8_cmd_num++;
+
+			/* set 1st VP8 process context_vp8_id=1 *
+			 * set 2nd VP8 process context_vp8_id=2 *
+			 * */
+			if (priv->filp == vsp_priv->vp8_filp[0]) {
+				cur_cmd->context = 1;
+			} else if (priv->filp == vsp_priv->vp8_filp[1]) {
+				cur_cmd->context = 2;
+			} else if (priv->filp == vsp_priv->vp8_filp[2]) {
+				cur_cmd->context = 3;
+			} else {
+				DRM_ERROR("got the wrong context_id and exit\n");
+				return -1;
+			}
+
+			pic_bo_vp8 =
+				ttm_buffer_object_lookup(tfile,
+						cur_cmd->reserved7);
+
+			if (pic_bo_vp8 == NULL) {
+				DRM_ERROR("VSP: failed to find %x bo\n",
+					cur_cmd->reserved7);
+				ret = -1;
+				goto out;
+			}
+
+			vp8_pic_num++;
+			VSP_DEBUG("find pic param buffer: id %x, offset %lx\n",
+				cur_cmd->reserved7, pic_bo_vp8->offset);
+			VSP_DEBUG("pic param placement %x bus.add %p\n",
+				pic_bo_vp8->mem.placement,
+				pic_bo_vp8->mem.bus.addr);
+			if (vp8_pic_num > 1) {
+				DRM_ERROR("should be only 1 pic param cmd\n");
+				ret = -1;
+				goto out;
+			}
+		}
+
+		if (cur_cmd->type == VssVp8encSetSequenceParametersCommand) {
+			if (priv->filp == vsp_priv->vp8_filp[0]) {
+				cur_cmd->context = 1;
+			} else if (priv->filp == vsp_priv->vp8_filp[1]) {
+				cur_cmd->context = 2;
+			} else if (priv->filp == vsp_priv->vp8_filp[2]) {
+				cur_cmd->context = 3;
+			} else {
+				DRM_ERROR("got the wrong context_id and exit\n");
+				return -1;
+			}
+
+			memcpy(&vsp_priv->seq_cmd, cur_cmd, sizeof(struct vss_command_t));
+		}
+
+		/* for VP8, directly submit without delay */
+		if (cur_cmd->context != 0)
+			vsp_priv->force_flush_cmd = 1;
+
+		cmd_size -= sizeof(*cur_cmd);
+		cur_cmd++;
+	}
+
+	if (vsp_cmd_num)
+		vsp_priv->vsp_cmd_num = vsp_cmd_num;
+
+	if (pic_param_num > 0) {
+		ret = vsp_fence_vpp_surfaces(priv, validate_list, fence_type, arg,
+					 fence_arg, pic_param_bo);
+	} else if (vp8_pic_num > 0) {
+		ret = vsp_fence_vp8enc_surfaces(priv, validate_list,
+					fence_type, arg,
+					fence_arg, pic_bo_vp8);
+	} else if (compose_param_num) {
+		vsp_priv->force_flush_cmd = 1;
+		ret = vsp_fence_compose_surfaces(priv, validate_list,
+					fence_type, arg,
+					fence_arg, compose_param_bo);
+	} else {
+		/* unreserve these buffer */
+		list_for_each_entry_safe(pos, next, validate_list, head) {
+			ttm_bo_unreserve(pos->bo);
+		}
+
+		vsp_priv->force_flush_cmd = 1;
+
+		VSP_DEBUG("no fence for this command\n");
+		goto out;
+	}
+
+
+	VSP_DEBUG("finished fencing\n");
+out:
+
+	return ret;
+}
+
+int vsp_fence_vpp_surfaces(struct drm_file *priv,
+		       struct list_head *validate_list,
+		       uint32_t fence_type,
+		       struct drm_psb_cmdbuf_arg *arg,
+		       struct psb_ttm_fence_rep *fence_arg,
+		       struct ttm_buffer_object *pic_param_bo)
+{
+	struct ttm_bo_kmap_obj pic_param_kmap;
+	struct psb_ttm_fence_rep local_fence_arg;
+	bool is_iomem;
+	int ret = 0;
+	struct VssProcPictureParameterBuffer *pic_param;
+	int output_surf_num;
+	int idx;
+	int found;
+	uint32_t surf_handler;
+	struct ttm_buffer_object *surf_bo;
+	struct ttm_fence_object *fence = NULL;
+	struct list_head surf_list, tmp_list;
+	struct ttm_validate_buffer *pos, *next, *cur_valid_buf = NULL;
+	struct ttm_object_file *tfile = BCVideoGetPriv(priv)->tfile;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	INIT_LIST_HEAD(&surf_list);
+	INIT_LIST_HEAD(&tmp_list);
+
+	/* map pic param */
+	ret = ttm_bo_kmap(pic_param_bo, 0, pic_param_bo->num_pages,
+			  &pic_param_kmap);
+	if (ret) {
+		DRM_ERROR("VSP: ttm_bo_kmap failed: %d\n", ret);
+		ttm_bo_unref(&pic_param_bo);
+		goto out;
+	}
+
+	pic_param = (struct VssProcPictureParameterBuffer *)
+		ttm_kmap_obj_virtual(&pic_param_kmap, &is_iomem);
+
+	output_surf_num = pic_param->num_output_pictures;
+	VSP_DEBUG("output surf num %d\n", output_surf_num);
+
+	if (output_surf_num == 0)
+		vsp_priv->force_flush_cmd = 1;
+
+	/* create surface fence*/
+	for (idx = 0; idx < output_surf_num - 1; ++idx) {
+		found = 0;
+
+		surf_handler = pic_param->output_picture[idx].surface_id;
+		VSP_DEBUG("handling surface id %x\n", surf_handler);
+
+		if (drm_vsp_single_int) {
+			pic_param->output_picture[idx].irq = 0;
+			continue;
+		}
+
+		surf_bo = ttm_buffer_object_lookup(tfile, surf_handler);
+		if (surf_bo == NULL) {
+			DRM_ERROR("VSP: failed to find %x surface\n",
+				  surf_handler);
+			ret = -1;
+			goto out;
+		}
+		VSP_DEBUG("find target surf_bo %lx\n", surf_bo->offset);
+
+		/* remove from original validate list */
+		list_for_each_entry_safe(pos, next,
+					 validate_list, head) {
+			if (surf_bo->offset ==  pos->bo->offset) {
+				cur_valid_buf = pos;
+				list_del_init(&pos->head);
+				found = 1;
+				break;
+			}
+		}
+
+		ttm_bo_unref(&surf_bo);
+
+		BUG_ON(!list_empty(&surf_list));
+		/* create fence */
+		if (found == 1) {
+			/* create right list */
+			list_add_tail(&cur_valid_buf->head, &surf_list);
+			psb_fence_or_sync(priv, VSP_ENGINE_VPP,
+					  fence_type, arg->fence_flags,
+					  &surf_list, &local_fence_arg,
+					  &fence);
+			list_del_init(&pos->head);
+			/* reserve it */
+			list_add_tail(&pos->head, &tmp_list);
+		} else {
+			DRM_ERROR("VSP: failed to find %d bo: %x\n",
+				  idx, surf_handler);
+			ret = -1;
+			goto out;
+		}
+
+		/* assign sequence number
+		 * FIXME: do we need fc lock for sequence read?
+		 */
+		if (fence) {
+			VSP_DEBUG("fence sequence %x,pic_idx %d,surf %x\n",
+				  fence->sequence, idx,
+				  pic_param->output_picture[idx].surface_id);
+
+			pic_param->output_picture[idx].surface_id =
+				fence->sequence;
+			ttm_fence_object_unref(&fence);
+		}
+	}
+
+	/* just fence pic param if this is not end command */
+	/* only send last output fence_arg back */
+	psb_fence_or_sync(priv, VSP_ENGINE_VPP, fence_type,
+			  arg->fence_flags, validate_list,
+			  fence_arg, &fence);
+	if (fence) {
+		VSP_DEBUG("fence sequence %x at output pic %d\n",
+			  fence->sequence, idx);
+		pic_param->output_picture[idx].surface_id = fence->sequence;
+
+		if (drm_vsp_single_int)
+			for (idx = 0; idx < output_surf_num - 1; ++idx)
+				pic_param->output_picture[idx].surface_id = 0;
+
+		ttm_fence_object_unref(&fence);
+	}
+
+	/* add surface back into validate_list */
+	list_for_each_entry_safe(pos, next, &tmp_list, head) {
+		list_add_tail(&pos->head, validate_list);
+	}
+out:
+	ttm_bo_kunmap(&pic_param_kmap);
+	ttm_bo_unref(&pic_param_bo);
+
+	return ret;
+}
+
+static int vsp_fence_vp8enc_surfaces(struct drm_file *priv,
+				struct list_head *validate_list,
+				uint32_t fence_type,
+				struct drm_psb_cmdbuf_arg *arg,
+				struct psb_ttm_fence_rep *fence_arg,
+				struct ttm_buffer_object *pic_param_bo)
+{
+	bool is_iomem;
+	int ret = 0;
+	struct VssVp8encPictureParameterBuffer *pic_param;
+	struct ttm_fence_object *fence = NULL;
+	struct list_head surf_list;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	struct ttm_bo_kmap_obj vp8_encode_frame__kmap;
+
+	INIT_LIST_HEAD(&surf_list);
+
+	/* map pic param */
+	ret = ttm_bo_kmap(pic_param_bo, 0, pic_param_bo->num_pages,
+			  &vp8_encode_frame__kmap);
+	if (ret) {
+		DRM_ERROR("VSP: ttm_bo_kmap failed: %d\n", ret);
+		ttm_bo_unref(&pic_param_bo);
+		goto out;
+	}
+
+	pic_param = (struct VssVp8encPictureParameterBuffer *)
+		ttm_kmap_obj_virtual(
+				&vp8_encode_frame__kmap,
+				&is_iomem);
+
+	VSP_DEBUG("save vp8 pic param address %p\n", pic_param);
+
+	VSP_DEBUG("bo addr %p kernel addr %p surfaceid %x base %x base_uv %x\n",
+			pic_param_bo,
+			pic_param,
+			pic_param->input_frame.surface_id,
+			pic_param->input_frame.base,
+			pic_param->input_frame.base_uv);
+
+	VSP_DEBUG("pic_param->encoded_frame_base = %x\n",
+			pic_param->encoded_frame_base);
+
+	vsp_priv->vp8_encode_frame_cmd = (void *)pic_param;
+
+	/* just fence pic param if this is not end command */
+	/* only send last output fence_arg back */
+	psb_fence_or_sync(priv, VSP_ENGINE_VPP, fence_type,
+			  arg->fence_flags, validate_list,
+			  fence_arg, &fence);
+	if (fence) {
+		VSP_DEBUG("vp8 fence sequence %x\n", fence->sequence);
+		pic_param->input_frame.surface_id = fence->sequence;
+		vsp_priv->last_sequence = fence->sequence;
+
+		ttm_fence_object_unref(&fence);
+	} else {
+		VSP_DEBUG("NO fence?????\n");
+	}
+
+out:
+#ifndef VP8_ENC_DEBUG
+	ttm_bo_kunmap(&vp8_encode_frame__kmap);
+	ttm_bo_unref(&pic_param_bo);
+#endif
+	return ret;
+}
+
+static int vsp_fence_compose_surfaces(struct drm_file *priv,
+				struct list_head *validate_list,
+				uint32_t fence_type,
+				struct drm_psb_cmdbuf_arg *arg,
+				struct psb_ttm_fence_rep *fence_arg,
+				struct ttm_buffer_object *compose_param_bo)
+{
+	int ret = 0;
+	struct ttm_fence_object *fence = NULL;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	psb_fence_or_sync(priv, VSP_ENGINE_VPP, fence_type,
+			  arg->fence_flags, validate_list,
+			  fence_arg, &fence);
+	if (fence) {
+		VSP_DEBUG("compose fence sequence %x\n",
+			  fence->sequence);
+		vsp_priv->compose_fence = fence->sequence;
+
+		ttm_fence_object_unref(&fence);
+	} else {
+		VSP_DEBUG("NO fence?????\n");
+		ret = -1;
+	}
+	ttm_bo_unref(&compose_param_bo);
+	return ret;
+}
+
+
+
+
+bool vsp_fence_poll(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	uint32_t sequence;
+	unsigned long irq_flags;
+
+	VSP_DEBUG("polling vsp msg\n");
+
+	sequence = vsp_priv->current_sequence;
+
+	spin_lock_irqsave(&vsp_priv->lock, irq_flags);
+
+	/* handle the response message */
+	sequence = vsp_handle_response(dev_priv);
+
+	spin_unlock_irqrestore(&vsp_priv->lock, irq_flags);
+
+	if (sequence != vsp_priv->current_sequence) {
+		vsp_priv->current_sequence = sequence;
+		psb_fence_handler(dev, VSP_ENGINE_VPP);
+		return true;
+	}
+
+	return false;
+}
+
+int vsp_new_context(struct drm_device *dev, struct file *filp, int ctx_type)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int ret = 0;
+
+	dev_priv = dev->dev_private;
+	if (dev_priv == NULL) {
+		DRM_ERROR("VSP: drm driver is not initialized correctly\n");
+		return -1;
+	}
+
+	vsp_priv = dev_priv->vsp_private;
+	if (vsp_priv == NULL) {
+		DRM_ERROR("VSP: vsp driver is not initialized correctly\n");
+		return -1;
+	}
+
+	mutex_lock(&vsp_priv->vsp_mutex);
+	if (VAEntrypointEncSlice == ctx_type) {
+		vsp_priv->context_vp8_num++;
+		if (vsp_priv->context_vp8_num > MAX_VP8_CONTEXT_NUM) {
+			DRM_ERROR("VSP: Only support 3 vp8 encoding!\n");
+			/* store the 4th vp8 encoding fd for remove context use */
+			vsp_priv->vp8_filp[3] = filp;
+			mutex_unlock(&vsp_priv->vsp_mutex);
+			return -1;
+		}
+
+		/* store the fd of 3 vp8 encoding processes */
+		if (vsp_priv->vp8_filp[0] == NULL) {
+			vsp_priv->vp8_filp[0] = filp;
+		} else if (vsp_priv->vp8_filp[1] == NULL) {
+			vsp_priv->vp8_filp[1] = filp;
+		} else if (vsp_priv->vp8_filp[2] == NULL) {
+			vsp_priv->vp8_filp[2] = filp;
+		} else {
+			DRM_ERROR("VSP: The current 3 vp8 contexts have not been removed\n");
+		}
+	} else if (ctx_type == VAEntrypointVideoProc) {
+		vsp_priv->context_vpp_num++;
+#ifdef MOOREFIELD
+		if (vsp_priv->context_vpp_num > MAX_VPP_CONTEXT_NUM) {
+			DRM_ERROR("VSP: Only support one VPP stream!\n");
+			ret = -1;
+		}
+#endif
+	} else {
+		DRM_ERROR("VSP: couldn't support the context %x\n", ctx_type);
+		ret = -1;
+	}
+	mutex_unlock(&vsp_priv->vsp_mutex);
+
+	VSP_DEBUG("context_vp8_num %d, context_vpp_num %d\n",
+		  vsp_priv->context_vp8_num, vsp_priv->context_vpp_num);
+	return ret;
+}
+
+void vsp_rm_context(struct drm_device *dev, struct file *filp, int ctx_type)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int count = 0;
+	struct vss_command_t *cur_cmd;
+	bool tmp = true;
+	int i = 0;
+
+	dev_priv = dev->dev_private;
+	if (dev_priv == NULL) {
+		DRM_ERROR("RM context, but dev_priv is NULL");
+		return;
+	}
+
+	vsp_priv = dev_priv->vsp_private;
+	if (vsp_priv == NULL) {
+		DRM_ERROR("RM context, but vsp_priv is NULL");
+		return;
+	}
+
+	mutex_lock(&vsp_priv->vsp_mutex);
+	if (vsp_priv->ctrl == NULL) {
+		for (i = 0; i < MAX_VP8_CONTEXT_NUM + 1; i++) {
+			if (filp == vsp_priv->vp8_filp[i])
+				vsp_priv->vp8_filp[i] = NULL;
+		}
+
+		if (VAEntrypointEncSlice == ctx_type) {
+			if (vsp_priv->context_vp8_num > 0)
+				vsp_priv->context_vp8_num--;
+		} else if (ctx_type == VAEntrypointVideoProc)
+			if (vsp_priv->context_vpp_num > 0)
+				vsp_priv->context_vpp_num--;
+		mutex_unlock(&vsp_priv->vsp_mutex);
+		return;
+	}
+
+	VSP_DEBUG("ctx_type=%d\n", ctx_type);
+
+	/* power on the VSP hardware to write registers */
+	power_up_vsp();
+
+	if (VAEntrypointEncSlice == ctx_type && filp != vsp_priv->vp8_filp[3]) {
+		if (vsp_priv->vsp_state == VSP_STATE_SUSPEND) {
+			tmp = vsp_resume_function(dev_priv);
+			VSP_DEBUG("The VSP is on suspend, send resume!\n");
+		}
+
+		VSP_DEBUG("VP8 send the last command here to destroy context buffer\n");
+		/* Update cmd_wr for VP8 and FRC/VPP switch context case */
+		if (vsp_priv->acc_num_cmd >= 1) {
+			vsp_priv->ctrl->cmd_wr = (vsp_priv->ctrl->cmd_wr + 1) % VSP_CMD_QUEUE_SIZE;
+			vsp_priv->acc_num_cmd = 0;
+		}
+
+		cur_cmd = vsp_priv->cmd_queue + vsp_priv->ctrl->cmd_wr % VSP_CMD_QUEUE_SIZE;
+
+		cur_cmd->context = VSP_API_GENERIC_CONTEXT_ID;
+		cur_cmd->type = VssGenDestroyContext;
+		cur_cmd->size = 0;
+		cur_cmd->buffer_id = 0;
+		cur_cmd->irq = 0;
+		cur_cmd->reserved6 = 0;
+		cur_cmd->reserved7 = 0;
+
+		/* judge which vp8 process should be remove context */
+		for (i = 0; i< MAX_VP8_CONTEXT_NUM; i++) {
+			/* context_id=1 for filp[0] */
+			/* context_id=2 for filp[1] */
+			/* context_id=3 for filp[2] */
+			if (filp == vsp_priv->vp8_filp[i]) {
+				cur_cmd->buffer = i + 1;
+				vsp_priv->vp8_filp[i] = NULL;
+			}
+		}
+
+		vsp_priv->ctrl->cmd_wr =
+			(vsp_priv->ctrl->cmd_wr + 1) % VSP_CMD_QUEUE_SIZE;
+		mutex_unlock(&vsp_priv->vsp_mutex);
+
+		/* Wait all the cmd be finished */
+		while (vsp_priv->vp8_cmd_num > 0 && count++ < 20000) {
+			PSB_UDELAY(6);
+		}
+
+		mutex_lock(&vsp_priv->vsp_mutex);
+		vsp_priv->context_vp8_num--;
+		if (count == 20000) {
+			DRM_ERROR("Failed to handle sigint event\n");
+		}
+	} else if(VAEntrypointEncSlice == ctx_type && filp == vsp_priv->vp8_filp[3]) {
+		/* driver support 3 vp8 encoding simultaneously at most */
+		/* clear the 4th vp8 encoding fd */
+		vsp_priv->context_vp8_num--;
+		vsp_priv->vp8_filp[3] = NULL;
+	} else if (ctx_type == VAEntrypointVideoProc)
+		vsp_priv->context_vpp_num--;
+
+	/* Return if there is any context is running */
+	if (vsp_priv->context_vp8_num > 0 || vsp_priv->context_vpp_num > 0) {
+		VSP_DEBUG("context_vp8_num %d, context_vpp_num %d\n",
+			vsp_priv->context_vp8_num, vsp_priv->context_vpp_num);
+
+		power_down_vsp();
+		mutex_unlock(&vsp_priv->vsp_mutex);
+		return;
+	}
+
+	vsp_priv->ctrl->entry_kind = vsp_exit;
+
+	VSP_DEBUG("No context now, set state to DOWN to force power down\n");
+	PSB_UDELAY(800);
+	
+	/* in case of power mode 0, HW always active,
+	 * * in case got no response from FW, vsp_state=hang but could not be powered off,
+	 * * force state to down */
+	vsp_priv->vsp_state = VSP_STATE_DOWN;
+	force_power_down_vsp();
+	vsp_priv->vsp_state = VSP_STATE_DOWN;
+
+	mutex_unlock(&vsp_priv->vsp_mutex);
+	VSP_DEBUG("vsp_rm_context is successful\n");
+	/* FIXME: frequency should change */
+	VSP_PERF("the total time spend on VSP is %llu ms\n",
+		 div_u64(vsp_priv->vss_cc_acc, 200 * 1000));
+
+	return;
+}
+
+int psb_vsp_save_context(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int i;
+
+	if (vsp_priv->fw_loaded == VSP_FW_NONE)
+		return 0;
+
+	/* save the VSP config registers */
+	for (i = 2; i < VSP_CONFIG_SIZE; i++)
+		CONFIG_REG_READ32(i, &(vsp_priv->saved_config_regs[i]));
+
+	/* set VSP PM/entry status */
+	vsp_priv->ctrl->entry_kind = vsp_entry_booted;
+	vsp_priv->vsp_state = VSP_STATE_SUSPEND;
+
+	return 0;
+}
+
+int psb_vsp_restore_context(struct drm_device *dev)
+{
+	/* restore the VSP info */
+	return 0;
+}
+
+int psb_check_vsp_idle(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	int cmd_rd, cmd_wr;
+	unsigned int reg, mode;
+
+	if (vsp_priv->fw_loaded == 0
+	    || vsp_priv->vsp_state == VSP_STATE_DOWN
+	    || vsp_priv->vsp_state == VSP_STATE_SUSPEND)
+		return 0;
+
+	cmd_rd = vsp_priv->ctrl->cmd_rd;
+	cmd_wr = vsp_priv->ctrl->cmd_wr;
+	if ((cmd_rd != cmd_wr && vsp_priv->vsp_state != VSP_STATE_IDLE)
+	    || vsp_priv->vsp_state == VSP_STATE_ACTIVE) {
+		PSB_DEBUG_PM("VSP: there is command need to handle!\n");
+		return -EBUSY;
+	}
+
+	/* make sure VSP system has really been idle
+	 * vsp-api runs on sp0 or sp1, but we don't know which one when booting
+	 * securely. So wait for both.
+	 */
+	if (!vsp_is_idle(dev_priv, vsp_sp0)) {
+		PSB_DEBUG_PM("VSP: sp0 return busy!\n");
+		goto out;
+	}
+	if (!vsp_is_idle(dev_priv, vsp_sp1)) {
+		PSB_DEBUG_PM("VSP: sp1 return busy!\n");
+		goto out;
+	}
+
+	if (!vsp_is_idle(dev_priv, vsp_vp0)) {
+		PSB_DEBUG_PM("VSP: vp0 return busy!\n");
+		goto out;
+	}
+	if (!vsp_is_idle(dev_priv, vsp_vp1)) {
+		PSB_DEBUG_PM("VSP: vp1 return busy!\n");
+		goto out;
+	}
+
+	return 0;
+out:
+	/* For suspend_and_hw_idle power-mode, sometimes hw couldn't handle
+	 * the hw_idle signal correctly. So driver still need power off
+	 * the VSP with error log to trace this situation.
+	 */
+	CONFIG_REG_READ32(1, &reg);
+	mode = vsp_priv->ctrl->power_saving_mode;
+	if (reg == 1 &&
+	    mode == vsp_suspend_and_hw_idle_on_empty_queue) {
+		PSB_DEBUG_PM("VSP core is active, but config_reg_d1 is 1\n");
+		return 0;
+	} else
+		return -EBUSY;
+}
+
+/* The tasklet function to power down VSP */
+void psb_powerdown_vsp(struct work_struct *work)
+{
+	struct vsp_private *vsp_priv =
+		container_of(work, struct vsp_private, vsp_suspend_wq.work);
+	bool ret;
+
+	if (!vsp_priv)
+		return;
+
+	ret = power_island_put(OSPM_VIDEO_VPP_ISLAND);
+
+	if (ret == false)
+		PSB_DEBUG_PM("The VSP could NOT be powered off!\n");
+	else
+		PSB_DEBUG_PM("The VSP has been powered off!\n");
+
+	return;
+}
+
+/* vsp irq tasklet function */
+void vsp_irq_task(struct work_struct *work)
+{
+	struct vsp_private *vsp_priv =
+		container_of(work, struct vsp_private, vsp_irq_wq.work);
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	uint32_t sequence;
+
+	if (!vsp_priv)
+		return;
+
+	dev = vsp_priv->dev;
+	dev_priv = dev->dev_private;
+
+	mutex_lock(&vsp_priv->vsp_mutex);
+	/* handle the response message */
+	sequence = vsp_handle_response(dev_priv);
+
+	/* handle fence info */
+	if (sequence != vsp_priv->current_sequence) {
+		vsp_priv->current_sequence = sequence;
+		psb_fence_handler(dev, VSP_ENGINE_VPP);
+	} else {
+		VSP_DEBUG("will not handle fence for %x vs current %x\n",
+			  sequence, vsp_priv->current_sequence);
+	}
+
+	if (drm_vsp_pmpolicy != PSB_PMPOLICY_NOPM){
+		if (vsp_priv->vsp_state == VSP_STATE_IDLE) {
+			if (vsp_priv->ctrl->cmd_rd == vsp_priv->ctrl->cmd_wr)
+				power_down_vsp();
+			else {
+				force_power_down_vsp();
+
+				VSP_DEBUG("Now power up VSP again to resume\n");
+				power_up_vsp();
+				vsp_resume_function(dev_priv);
+			}
+		}
+	}
+	mutex_unlock(&vsp_priv->vsp_mutex);
+
+	return;
+}
+
+void vsp_cmd_submit_check(struct work_struct *work)
+{
+	struct vsp_private *vsp_priv =
+		container_of(work, struct vsp_private, vsp_cmd_submit_check_wq.work);
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	uint32_t power_up_try_count;
+
+	if (!vsp_priv)
+		return;
+
+	dev = vsp_priv->dev;
+	dev_priv = dev->dev_private;
+
+	mutex_lock(&vsp_priv->vsp_mutex);
+
+	if (vsp_priv->acc_num_cmd > 0) {
+		power_up_try_count = 10;
+		while (power_up_try_count--)
+			if (power_island_get(OSPM_VIDEO_VPP_ISLAND) == true)
+				break;
+		if (power_up_try_count <= 0) {
+			DRM_ERROR("failed to send remaining command");
+			goto out;
+		}
+
+		vsp_resume_function(dev_priv);
+
+		vsp_priv->ctrl->cmd_wr =
+			(vsp_priv->ctrl->cmd_wr + vsp_priv->acc_num_cmd) % VSP_CMD_QUEUE_SIZE;
+		vsp_priv->acc_num_cmd = 0;
+		vsp_priv->force_flush_cmd = 0;
+
+		power_island_put(OSPM_VIDEO_VPP_ISLAND);
+	}
+
+out:
+	mutex_unlock(&vsp_priv->vsp_mutex);
+	return;
+}
+
+int psb_vsp_dump_info(struct drm_psb_private *dev_priv)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	unsigned int reg, i;
+
+	/* config info */
+	for (i = 0; i < VSP_CONFIG_SIZE; i++) {
+		CONFIG_REG_READ32(i, &reg);
+		VSP_DEBUG("partition1_config_reg_d%d=%x\n", i, reg);
+	}
+
+	/* ma_header_reg */
+	MM_READ32(vsp_priv->boot_header.ma_header_reg, 0, &reg);
+	VSP_DEBUG("ma_header_reg:%x\n", reg);
+
+	/* The setting-struct */
+	VSP_DEBUG("setting addr:%lu\n", vsp_priv->setting_bo->offset);
+	VSP_DEBUG("setting->command_queue_size:0x%x\n",
+			vsp_priv->setting->command_queue_size);
+	VSP_DEBUG("setting->command_queue_addr:%x\n",
+			vsp_priv->setting->command_queue_addr);
+	VSP_DEBUG("setting->response_queue_size:0x%x\n",
+			vsp_priv->setting->response_queue_size);
+	VSP_DEBUG("setting->response_queue_addr:%x\n",
+			vsp_priv->setting->response_queue_addr);
+
+	/* dump dma register */
+	VSP_DEBUG("partition1_dma_external_ch[0..23]_pending_req_cnt\n");
+	for (i=0; i <= 23; i++) {
+		MM_READ32(0x150010, i * 0x20, &reg);
+		if (reg != 0)
+			VSP_DEBUG("partition1_dma_external_ch%d_pending_req_cnt = 0x%x\n",
+					i, reg);
+	}
+
+	VSP_DEBUG("partition1_dma_external_dim[0..31]_pending_req_cnt\n");
+	for (i=0; i <= 31; i++) {
+		MM_READ32(0x151008, i * 0x20, &reg);
+		if (reg != 0)
+			VSP_DEBUG("partition1_dma_external_dim%d_pending_req_cnt = 0x%x\n",
+					i, reg);
+	}
+
+	VSP_DEBUG("partition1_dma_internal_ch[0..7]_pending_req_cnt\n");
+	for (i=0; i <= 7; i++) {
+		MM_READ32(0x160010, i * 0x20, &reg);
+		if (reg != 0)
+			VSP_DEBUG("partition1_dma_internal_ch%d_pending_req_cnt = 0x%x\n",
+					i, reg);
+	}
+	VSP_DEBUG("partition1_dma_internal_dim[0..7]_pending_req_cnt\n");
+	for (i=0; i <= 7; i++) {
+		MM_READ32(0x160408, i * 0x20, &reg);
+		if (reg != 0)
+			VSP_DEBUG("partition1_dma_internal_dim%d_pending_req_cnt = 0x%x\n",
+					i, reg);
+	}
+
+	/* IRQ registers */
+	for (i = 0; i < 6; i++) {
+		MM_READ32(0x180000, i * 4, &reg);
+		VSP_DEBUG("partition1_gp_ireg_IRQ%d:%x", i, reg);
+	}
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_EDGE, &reg);
+	VSP_DEBUG("partition1_irq_control_irq_edge:%x\n", reg);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_MASK, &reg);
+	VSP_DEBUG("partition1_irq_control_irq_mask:%x\n", reg);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_STATUS, &reg);
+	VSP_DEBUG("partition1_irq_control_irq_status:%x\n", reg);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_CLR, &reg);
+	VSP_DEBUG("partition1_irq_control_irq_clear:%x\n", reg);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_ENB, &reg);
+	VSP_DEBUG("partition1_irq_control_irq_enable:%x\n", reg);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_LEVEL_PULSE, &reg);
+	VSP_DEBUG("partition1_irq_control_irq_pulse:%x\n", reg);
+
+	/* MMU table address */
+	MM_READ32(MMU_TABLE_ADDR, 0x0, &reg);
+	VSP_DEBUG("mmu_page_table_address:%x\n", reg);
+
+	/* SP0 info */
+	VSP_DEBUG("sp0_processor:%d\n", vsp_sp0);
+	SP_REG_READ32(0x0, &reg, vsp_sp0);
+	VSP_DEBUG("sp0_stat_and_ctrl:%x\n", reg);
+	SP_REG_READ32(0x4, &reg, vsp_sp0);
+	VSP_DEBUG("sp0_base_address:%x\n", reg);
+	SP_REG_READ32(0x24, &reg, vsp_sp0);
+	VSP_DEBUG("sp0_debug_pc:%x\n", reg);
+	SP_REG_READ32(0x28, &reg, vsp_sp0);
+	VSP_DEBUG("sp0_cfg_pmem_iam_op0:%x\n", reg);
+	SP_REG_READ32(0x10, &reg, vsp_sp0);
+	VSP_DEBUG("sp0_cfg_pmem_master:%x\n", reg);
+
+	/* SP1 info */
+	VSP_DEBUG("sp1_processor:%d\n", vsp_sp1);
+	SP_REG_READ32(0x0, &reg, vsp_sp1);
+	VSP_DEBUG("sp1_stat_and_ctrl:%x\n", reg);
+	SP_REG_READ32(0x4, &reg, vsp_sp1);
+	VSP_DEBUG("sp1_base_address:%x\n", reg);
+	SP_REG_READ32(0x24, &reg, vsp_sp1);
+	VSP_DEBUG("sp1_debug_pc:%x\n", reg);
+	SP_REG_READ32(0x28, &reg, vsp_sp1);
+	VSP_DEBUG("sp1_cfg_pmem_iam_op0:%x\n", reg);
+	SP_REG_READ32(0x10, &reg, vsp_sp1);
+	VSP_DEBUG("sp1_cfg_pmem_master:%x\n", reg);
+
+	/* VP0 info */
+	VSP_DEBUG("vp0_processor:%d\n", vsp_vp0);
+	SP_REG_READ32(0x0, &reg, vsp_vp0);
+	VSP_DEBUG("partition2_vp0_tile_vp_stat_and_ctrl:%x\n", reg);
+	SP_REG_READ32(0x4, &reg, vsp_vp0);
+	VSP_DEBUG("partition2_vp0_tile_vp_base_address:%x\n", reg);
+	SP_REG_READ32(0x34, &reg, vsp_vp0);
+	VSP_DEBUG("partition2_vp0_tile_vp_debug_pc:%x\n", reg);
+	SP_REG_READ32(0x38, &reg, vsp_vp0);
+	VSP_DEBUG("partition2_vp0_tile_vp_stall_stat_cfg_pmem_iam_op0:%x\n",
+			reg);
+	SP_REG_READ32(0x10, &reg, vsp_vp0);
+	VSP_DEBUG("partition2_vp0_tile_vp_base_addr_MI_cfg_pmem_master:%x\n",
+			reg);
+
+	/* VP1 info */
+	VSP_DEBUG("vp1_processor:%d\n", vsp_vp1);
+	SP_REG_READ32(0x0, &reg, vsp_vp1);
+	VSP_DEBUG("partition2_vp1_tile_vp_stat_and_ctrl:%x\n", reg);
+	SP_REG_READ32(0x4, &reg, vsp_vp1);
+	VSP_DEBUG("partition2_vp1_tile_vp_base_address:%x\n", reg);
+	SP_REG_READ32(0x34, &reg, vsp_vp1);
+	VSP_DEBUG("partition2_vp1_tile_vp_debug_pc:%x\n", reg);
+	SP_REG_READ32(0x38, &reg, vsp_vp1);
+	VSP_DEBUG("partition2_vp1_tile_vp_stall_stat_cfg_pmem_iam_op0:%x\n",
+			reg);
+	SP_REG_READ32(0x10, &reg, vsp_vp1);
+	VSP_DEBUG("partition2_vp1_tile_vp_base_addr_MI_cfg_pmem_master:%x\n",
+			reg);
+
+	/* MEA info */
+	VSP_DEBUG("mea_processor:%d\n", vsp_mea);
+	SP_REG_READ32(0x0, &reg, vsp_mea);
+	VSP_DEBUG("partition3_mea_tile_mea_stat_and_ctrl:%x\n", reg);
+	SP_REG_READ32(0x4, &reg, vsp_mea);
+	VSP_DEBUG("partition3_mea_tile_mea_base_address:%x\n", reg);
+	SP_REG_READ32(0x2C, &reg, vsp_mea);
+	VSP_DEBUG("partition3_mea_tile_mea_debug_pc:%x\n", reg);
+	SP_REG_READ32(0x30, &reg, vsp_mea);
+	VSP_DEBUG("partition3_mea_tile_mea_stall_stat_cfg_pmem_iam_op0:%x\n",
+			reg);
+	SP_REG_READ32(0x10, &reg, vsp_mea);
+	VSP_DEBUG("partition3_mea_tile_mea_base_addr_MI_cfg_pmem_master:%x\n",
+			reg);
+
+	/* ECA info */
+	VSP_DEBUG("ECA info\n");
+	MM_READ32(0x30000, 0x0, &reg);
+	VSP_DEBUG("partition1_sp0_tile_eca_stat_and_ctrl:%x\n", reg);
+	MM_READ32(0x30000, 0x4, &reg);
+	VSP_DEBUG("partition1_sp0_tile_eca_base_address:%x\n", reg);
+	MM_READ32(0x30000, 0x2C, &reg);
+	VSP_DEBUG("partition1_sp0_tile_eca_debug_pc:%x\n", reg);
+	MM_READ32(0x30000, 0x30, &reg);
+	VSP_DEBUG("partition1_sp0_tile_eca_stall_stat_cfg_pmem_loc_op0:%x\n",
+			reg);
+
+	/* WDT info */
+	for (i = 0; i < 14; i++) {
+		MM_READ32(0x170000, i * 4, &reg);
+		VSP_DEBUG("partition1_wdt_reg%d:%x\n", i, reg);
+	}
+
+	/* command queue */
+	VSP_DEBUG("command queue:\n");
+	for (i = 0; i < VSP_CMD_QUEUE_SIZE; i++) {
+		VSP_DEBUG("cmd[%d]:%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", i,
+			vsp_priv->cmd_queue[i].context,
+			vsp_priv->cmd_queue[i].type,
+			vsp_priv->cmd_queue[i].buffer,
+			vsp_priv->cmd_queue[i].size,
+			vsp_priv->cmd_queue[i].buffer_id,
+			vsp_priv->cmd_queue[i].irq,
+			vsp_priv->cmd_queue[i].reserved6,
+			vsp_priv->cmd_queue[i].reserved7);
+	}
+
+	/* response queue */
+	VSP_DEBUG("ack queue:\n");
+	for (i = 0; i < VSP_ACK_QUEUE_SIZE; i++) {
+		VSP_DEBUG("ack[%d]:%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", i,
+			vsp_priv->ack_queue[i].context,
+			vsp_priv->ack_queue[i].type,
+			vsp_priv->ack_queue[i].buffer,
+			vsp_priv->ack_queue[i].size,
+			vsp_priv->ack_queue[i].vss_cc,
+			vsp_priv->ack_queue[i].reserved5,
+			vsp_priv->ack_queue[i].reserved6,
+			vsp_priv->ack_queue[i].reserved7);
+	}
+
+	return 0;
+}
+
+void check_invalid_cmd_type(unsigned int cmd_type)
+{
+	switch (cmd_type) {
+	case VssProcSharpenParameterCommand:
+		DRM_ERROR("VSP: Sharpen parameter command is received ");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssProcDenoiseParameterCommand:
+		DRM_ERROR("VSP: Denoise parameter command is received ");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssProcColorEnhancementParameterCommand:
+		DRM_ERROR("VSP: color enhancer parameter command is received");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssProcFrcParameterCommand:
+		DRM_ERROR("VSP: Frc parameter command is received ");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssProcPictureCommand:
+		DRM_ERROR("VSP: Picture parameter command is received ");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssVp8encSetSequenceParametersCommand:
+		DRM_ERROR("VSP: VP8 sequence parameter command is received\n");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssVp8encEncodeFrameCommand:
+		DRM_ERROR("VSP: VP8 picture parameter command is received\n");
+		DRM_ERROR("before pipeline command %x\n", cmd_type);
+		break;
+
+	case VssVp8encInit:
+		DRM_ERROR("Firmware initialization failure\n");
+		DRM_ERROR("state buffer size too small\n");
+		break;
+
+	default:
+		DRM_ERROR("VSP: Unknown command type %x\n", cmd_type);
+		break;
+	}
+
+	return;
+}
+
+void check_invalid_cmd_arg(unsigned int cmd_type)
+{
+	switch (cmd_type) {
+	case VssProcDenoiseParameterCommand:
+		DRM_ERROR("VSP: unsupport value for denoise parameter\n");
+		break;
+	default:
+		DRM_ERROR("VSP: input frame resolution is different");
+		DRM_ERROR("from previous command\n");
+		break;
+	}
+
+	return;
+}
+
+void handle_error_response(unsigned int error_type, unsigned int cmd_type)
+{
+
+	switch (error_type) {
+	case VssInvalidCommandType:
+		check_invalid_cmd_type(cmd_type);
+		DRM_ERROR("VSP: Invalid command\n");
+		break;
+	case VssInvalidCommandArgument:
+		check_invalid_cmd_arg(cmd_type);
+		DRM_ERROR("VSP: Invalid command\n");
+		break;
+	case VssInvalidProcPictureCommand:
+		DRM_ERROR("VSP: wrong num of input/output\n");
+		break;
+	case VssInvalidDdrAddress:
+		DRM_ERROR("VSP: DDR address isn't in allowed 1GB range\n");
+		break;
+	case VssInvalidSequenceParameters_VP8:
+		check_invalid_cmd_type(cmd_type);
+		DRM_ERROR("VSP: Invalid sequence parameter\n");
+		break;
+	case VssInvalidPictureParameters_VP8:
+		check_invalid_cmd_type(cmd_type);
+		DRM_ERROR("VSP: Invalid picture parameter\n");
+		break;
+	case VssInitFailure_VP8:
+		check_invalid_cmd_type(cmd_type);
+		DRM_ERROR("VSP: Init Failure\n");
+		break;
+	case VssCorruptFrame:
+		DRM_ERROR("VSP: Coded Frame is corrupted\n");
+		break;
+	case VssCorruptFramecontinue_VP8:
+		DRM_ERROR("VSP: not need to re-init context\n");
+		break;
+	case VssContextMustBeDestroyed_VP8:
+		DRM_ERROR("VSP: context must be destroyed and new context is created\n");
+		break;
+	default:
+		DRM_ERROR("VSP: Unknown error, code %x\n", error_type);
+		break;
+	}
+}
+
diff --git a/drivers/external_drivers/intel_media/video/vsp/vsp.h b/drivers/external_drivers/intel_media/video/vsp/vsp.h
new file mode 100644
index 0000000..5356d0c
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/vsp/vsp.h
@@ -0,0 +1,368 @@
+/**
+ * file vsp.h
+ * Author: Binglin Chen <binglin.chen@intel.com>
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _VSP_H_
+#define _VSP_H_
+
+#include "psb_drv.h"
+#include "vsp_fw.h"
+
+/* reg define */
+#define SP1_SP_DMEM_IP 0x70000
+
+/* processor */
+#define SP0_SP_REG_BASE 0x000000
+#define SP1_SP_REG_BASE 0x050000
+#define VP0_SP_REG_BASE 0x080000
+#define VP1_SP_REG_BASE 0x0C0000
+#define MEA_SP_REG_BASE 0x100000
+
+/* SP stat_ctrl */
+#define SP_STAT_AND_CTRL_REG 0x0
+#define SP_STAT_AND_CTRL_REG_RESET_FLAG           0
+#define SP_STAT_AND_CTRL_REG_START_FLAG           1
+#define SP_STAT_AND_CTRL_REG_BREAK_FLAG           2
+#define SP_STAT_AND_CTRL_REG_RUN_FLAG             3
+#define SP_STAT_AND_CTRL_REG_BROKEN_FLAG          4
+#define SP_STAT_AND_CTRL_REG_READY_FLAG           5
+#define SP_STAT_AND_CTRL_REG_SLEEP_FLAG           6
+#define SP_STAT_AND_CTRL_REG_ICACHE_INVALID_FLAG  0xC
+#define SP_STAT_AND_CTRL_REG_ICACHE_PREFETCH_FLAG 0xD
+
+/* offsets of registers in processors */
+#define VSP_STAT_CTRL_REG_OFFSET             0x00000
+#define VSP_START_PC_REG_OFFSET              0x00004
+#define VSP_ICACHE_BASE_REG_OFFSET           0x00010
+
+#define SP_BASE_ADDR_REG (0x1 * 4)
+
+#define SP_CFG_PMEM_MASTER 0x10
+
+/* MMU */
+#define MMU_INVALID         0x1B0000
+#define MMU_TABLE_ADDR      0x1B0004
+
+/* IRQ controller */
+#define VSP_IRQ_REG_BASE 0x190000
+#define VSP_IRQ_CTRL_IRQ_EDGE            0x0
+#define VSP_IRQ_CTRL_IRQ_MASK            0x4
+#define VSP_IRQ_CTRL_IRQ_STATUS          0x8
+#define VSP_IRQ_CTRL_IRQ_CLR             0xC
+#define VSP_IRQ_CTRL_IRQ_ENB             0x10
+#define VSP_IRQ_CTRL_IRQ_LEVEL_PULSE     0x14
+
+#define VSP_SP0_IRQ_SHIFT 0x7
+#define VSP_SP1_IRQ_SHIFT 0x8
+
+#define VSP_CONFIG_REG_SDRAM_BASE 0x1A0000
+#define VSP_CONFIG_REG_START 0x8
+
+#define VSP_FIRMWARE_MEM_ALIGNMENT 4096
+/* #define VP8_ENC_DEBUG 1 */
+
+#define MAX_VP8_CONTEXT_NUM 3
+#define MAX_VPP_CONTEXT_NUM 1
+
+#define CONTEXT_VPP_ID      0
+#define CONTEXT_VP8_ID      1
+#define CONTEXT_COMPOSE_ID  5
+
+static const unsigned int vsp_processor_base[] = {
+				SP0_SP_REG_BASE,
+				SP1_SP_REG_BASE,
+				VP0_SP_REG_BASE,
+				VP1_SP_REG_BASE,
+				MEA_SP_REG_BASE
+				};
+
+/* help macro */
+#ifdef MM_WRITE32
+#undef MM_WRITE32
+#endif
+#define MM_WRITE32(base, offset, value)				\
+	do {								\
+		*((uint32_t *)((unsigned char *)(dev_priv->vsp_reg)	\
+			       + base + offset)) = value;		\
+	} while (0)
+
+#ifdef MM_READ32
+#undef MM_READ32
+#endif
+#define MM_READ32(base, offset, pointer)				\
+	do {								\
+		*(pointer) =						\
+			*((uint32_t *)((unsigned char *)		\
+				       (dev_priv->vsp_reg)		\
+				       + base + offset));		\
+	} while (0)
+
+#define SP1_DMEM_WRITE32(offset, value)		\
+	MM_WRITE32(SP1_SP_DMEM_IP, offset, value)
+#define SP1_DMEM_READ32(offset, pointer)	\
+	MM_READ32(SP1_SP_DMEM_IP, offset, pointer)
+
+#define SP_REG_WRITE32(offset, value, processor)			 \
+	do {								 \
+		MM_WRITE32(vsp_processor_base[processor], offset, value); \
+	} while (0)
+
+#define SP_REG_READ32(offset, pointer, processor)		\
+	do {							\
+		MM_READ32(vsp_processor_base[processor], offset, pointer); \
+	} while (0)
+
+
+#define SP0_REG_WRITE32(offset, value)		\
+	MM_WRITE32(SP0_SP_REG_BASE, offset, value)
+#define SP0_REG_READ32(offset, pointer)		\
+	MM_READ32(SP0_SP_REG_BASE, offset, pointer)
+
+#define SP1_REG_WRITE32(offset, value)		\
+	MM_WRITE32(SP1_SP_REG_BASE, offset, value)
+#define SP1_REG_READ32(offset, pointer)		\
+	MM_READ32(SP1_SP_REG_BASE, offset, pointer)
+
+#define CONFIG_REG_WRITE32(offset, value)			\
+	MM_WRITE32(VSP_CONFIG_REG_SDRAM_BASE, ((offset) * 4), value)
+#define CONFIG_REG_READ32(offset, pointer)			\
+	MM_READ32(VSP_CONFIG_REG_SDRAM_BASE, ((offset) * 4), pointer)
+
+#define PAGE_TABLE_SHIFT PAGE_SHIFT
+#define INVALID_MMU MM_WRITE32(0, MMU_INVALID, 0x1)
+#define SET_MMU_PTD(address)						\
+	do {								\
+		MM_WRITE32(0, MMU_TABLE_ADDR, address);			\
+	} while (0)
+
+#define VSP_SET_FLAG(val, offset) \
+	((val) = ((val) | (0x1 << (offset))))
+#define VSP_CLEAR_FLAG(val, offset) \
+	((val) = ((val) & (~(0x1 << (offset)))))
+#define VSP_READ_FLAG(val, offset) \
+	(((val) & (0x1 << (offset))) >> (offset))
+#define VSP_REVERT_FLAG(val, offset) \
+	((val) = (val ^ (0x1 << (offset))))
+
+#define IRQ_REG_WRITE32(offset, value)		\
+	MM_WRITE32(VSP_IRQ_REG_BASE, offset, value)
+#define IRQ_REG_READ32(offset, pointer)		\
+	MM_READ32(VSP_IRQ_REG_BASE, offset, pointer)
+
+#define VSP_NEW_PMSTATE(drm_dev, vsp_priv, new_state)			\
+do {									\
+	vsp_priv->pmstate = new_state;					\
+	sysfs_notify_dirent(vsp_priv->sysfs_pmstate);			\
+	PSB_DEBUG_PM("VSP: %s\n",					\
+		(new_state == PSB_PMSTATE_POWERUP) ? "powerup"		\
+		: ((new_state == PSB_PMSTATE_POWERDOWN) ? "powerdown"	\
+		: "clockgated"));					\
+} while (0)
+
+extern int drm_vsp_pmpolicy;
+
+/* The status of vsp hardware */
+enum vsp_power_state {
+	VSP_STATE_HANG = -1,
+	VSP_STATE_DOWN = 0,
+	VSP_STATE_SUSPEND,
+	VSP_STATE_IDLE,
+	VSP_STATE_ACTIVE
+};
+
+/* The status of firmware */
+enum vsp_firmware_state {
+	VSP_FW_NONE = 0,
+	VSP_FW_LOADED
+};
+
+#define VSP_CONFIG_SIZE 16
+
+enum vsp_irq_reg {
+	VSP_IRQ_REG_EDGE   = 0,
+	VSP_IRQ_REG_MASK   = 1,
+	VSP_IRQ_REG_STATUS = 2,
+	VSP_IRQ_REG_CLR    = 3,
+	VSP_IRQ_REG_ENB    = 4,
+	VSP_IRQ_REG_PULSE  = 5,
+	VSP_IRQ_REG_SIZE
+};
+
+enum vsp_context_num {
+	VSP_CONTEXT_NUM_VPP = 0,
+	VSP_CONTEXT_NUM_VP8 = 1,
+	VSP_CONTEXT_NUM_MAX = 3
+};
+
+enum vsp_fw_type {
+	VSP_FW_TYPE_VPP,
+	VSP_FW_TYPE_VP8
+};
+
+struct vsp_private {
+	struct drm_device *dev;
+	uint32_t current_sequence;
+
+	int fw_loaded;
+	int vsp_state;
+
+	spinlock_t lock;
+
+	unsigned int cmd_queue_size;
+	unsigned int ack_queue_size;
+
+	struct ttm_buffer_object *cmd_queue_bo;
+	unsigned int cmd_queue_sz;
+	struct ttm_bo_kmap_obj cmd_kmap;
+	struct vss_command_t *cmd_queue;
+
+	struct ttm_buffer_object *ack_queue_bo;
+	unsigned int ack_queue_sz;
+	struct ttm_bo_kmap_obj ack_kmap;
+	struct vss_response_t *ack_queue;
+
+	struct ttm_buffer_object *setting_bo;
+	struct ttm_bo_kmap_obj setting_kmap;
+	struct vsp_settings_t *setting;
+
+	struct vsp_secure_boot_header boot_header;
+	struct vsp_multi_app_blob_data ma_header;
+
+	struct vsp_ctrl_reg *ctrl;
+
+	unsigned int pmstate;
+	struct sysfs_dirent *sysfs_pmstate;
+
+	uint64_t vss_cc_acc;
+
+	unsigned int saved_config_regs[VSP_CONFIG_SIZE];
+
+	/* lock for vsp command */
+	struct mutex vsp_mutex;
+
+	/* pm suspend wq */
+	struct delayed_work vsp_suspend_wq;
+
+	/* irq tasklet */
+	struct delayed_work vsp_irq_wq;
+
+	/* the number of cmd will send to VSP */
+	int vsp_cmd_num;
+
+	/* save the address of vp8 cmd_buffer for now */
+	struct VssVp8encPictureParameterBuffer *vp8_encode_frame_cmd;
+	struct ttm_bo_kmap_obj vp8_encode_frame__kmap;
+
+	/* For VP8 dual encoding */
+	struct file *vp8_filp[4];
+	int context_vp8_num;
+
+	/* The context number of VPP */
+	int context_vpp_num;
+
+	/*
+	 * to fix problem when CTRL+C vp8 encoding *
+	 * save VssVp8encEncodeFrameCommand cmd numbers *
+	 * */
+	int vp8_cmd_num;
+
+	struct vss_command_t seq_cmd;
+
+	/* to save the last sequence */
+	uint32_t last_sequence;
+
+	/* VPP pnp usage */
+	unsigned long cmd_submit_time;
+	int acc_num_cmd;
+	int force_flush_cmd;
+	int delayed_burst_cnt;
+	struct delayed_work vsp_cmd_submit_check_wq;
+
+	/* Composer related */
+	uint32_t compose_fence;
+};
+
+extern int vsp_init(struct drm_device *dev);
+extern int vsp_deinit(struct drm_device *dev);
+
+extern int vsp_reset(struct drm_psb_private *dev_priv);
+
+extern int vsp_init_fw(struct drm_device *dev);
+extern int vsp_setup_fw(struct drm_psb_private *dev_priv);
+
+extern void vsp_enableirq(struct drm_device *dev);
+extern void vsp_disableirq(struct drm_device *dev);
+
+extern bool vsp_interrupt(void *pvData);
+
+extern int vsp_cmdbuf_vpp(struct drm_file *priv,
+			  struct list_head *validate_list,
+			  uint32_t fence_type,
+			  struct drm_psb_cmdbuf_arg *arg,
+			  struct ttm_buffer_object *cmd_buffer,
+			  struct psb_ttm_fence_rep *fence_arg);
+
+extern bool vsp_fence_poll(struct drm_device *dev);
+
+extern int vsp_new_context(struct drm_device *dev, struct file *filp, int ctx_type);
+extern void vsp_rm_context(struct drm_device *dev, struct file *filp, int ctx_type);
+extern uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
+
+extern int psb_vsp_save_context(struct drm_device *dev);
+extern int psb_vsp_restore_context(struct drm_device *dev);
+extern int psb_check_vsp_idle(struct drm_device *dev);
+
+void vsp_init_function(struct drm_psb_private *dev_priv);
+void vsp_continue_function(struct drm_psb_private *dev_priv);
+int vsp_resume_function(struct drm_psb_private *dev_priv);
+
+extern int psb_vsp_dump_info(struct drm_psb_private *dev_priv);
+
+extern void psb_powerdown_vsp(struct work_struct *work);
+extern void vsp_irq_task(struct work_struct *work);
+extern void vsp_cmd_submit_check(struct work_struct *work);
+
+static inline
+unsigned int vsp_is_idle(struct drm_psb_private *dev_priv,
+			 unsigned int processor)
+{
+	unsigned int reg, start_bit, idle_bit;
+
+	SP_REG_READ32(SP_STAT_AND_CTRL_REG, &reg, processor);
+	start_bit = VSP_READ_FLAG(reg, SP_STAT_AND_CTRL_REG_START_FLAG);
+	idle_bit = VSP_READ_FLAG(reg, SP_STAT_AND_CTRL_REG_READY_FLAG);
+
+	return !start_bit && idle_bit;
+}
+
+static inline
+unsigned int vsp_is_sleeping(struct drm_psb_private *dev_priv,
+			     unsigned int processor)
+{
+	unsigned int reg;
+
+	SP_REG_READ32(SP_STAT_AND_CTRL_REG, &reg, processor);
+	return VSP_READ_FLAG(reg, SP_STAT_AND_CTRL_REG_SLEEP_FLAG);
+}
+#endif	/* _VSP_H_ */
diff --git a/drivers/external_drivers/intel_media/video/vsp/vsp_fw.h b/drivers/external_drivers/intel_media/video/vsp/vsp_fw.h
new file mode 100644
index 0000000..89b8889
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/vsp/vsp_fw.h
@@ -0,0 +1,740 @@
+/**
+ * file vsp.h
+ * Author: Binglin Chen <binglin.chen@intel.com>
+ *
+ */
+
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _VSP_FW_H_
+#define _VSP_FW_H_
+
+#pragma pack(4)
+#define VssProcPipelineMaxNumFilters 5
+#define VSS_PROC_MAX_INPUT_PICTURES  1
+#define VSS_PROC_MAX_OUTPUT_PICTURES 4
+
+/* Application IDs for applications that use secure boot
+ * and context-switching
+ * */
+#define VSP_APP_ID_NONE 0
+#define VSP_APP_ID_FRC_VPP 1
+#define VSP_APP_ID_VP8_ENC 2
+#define VSP_APP_ID_WIDI_ENC 3
+
+enum VssProcFilterType {
+	VssProcFilterDenoise,
+	VssProcFilterSharpening,
+	VssProcFilterColorEnhancement,
+	VssProcFilterFrameRateConversion
+};
+
+enum VssDenoiseType {
+	VssProcDegrain,
+	VssProcDeblock
+};
+
+enum VssFrcQuality {
+	/* VssFrcLowQuality, */
+	VssFrcMediumQuality,
+	VssFrcHighQuality
+};
+
+enum VssFrcConversionRate {
+	VssFrc2xConversionRate,
+	VssFrc2_5xConversionRate,
+	VssFrc4xConversionRate,
+	VssFrc1_25xConversionRate
+};
+
+struct VssProcPipelineParameterBuffer {
+	unsigned int      num_filters;
+	enum VssProcFilterType filter_pipeline[VssProcPipelineMaxNumFilters];
+	unsigned int intermediate_buffer_base;
+	unsigned int intermediate_buffer_size;
+};
+
+struct VssProcSharpenParameterBuffer {
+	int quality;
+	/* to make multiple of 32 bytes*/
+	unsigned int _pad[7];
+};
+
+struct VssProcDenoiseParameterBuffer {
+	enum VssDenoiseType     type;
+	int                value_thr;
+	int                cnt_thr;
+	int                coef;
+	int                temp_thr1;
+	int                temp_thr2;
+	/* to make multiple of 32 bytes*/
+	int                _pad[2];
+};
+
+struct VssProcColorEnhancementParameterBuffer {
+	int                temp_detect;
+	int                temp_correct;
+	int                clip_thr;
+	int                mid_thr;
+	int                luma_amm;
+	int                chroma_amm;
+	/* to make multiple of 32 bytes*/
+	int                _pad[2];
+};
+
+struct VssProcFrcParameterBuffer {
+	enum VssFrcQuality quality;
+	enum VssFrcConversionRate conversion_rate;
+	/* to make multiple of 32 bytes*/
+	int  _pad[6];
+};
+
+/* Set the rotation angle */
+#define VSP_ROTATION_NONE 0
+#define VSP_ROTATION_90   90
+#define VSP_ROTATION_180  180
+#define VSP_ROTATION_270  270
+
+struct VssProcPicture {
+	unsigned int surface_id;
+	/* send interupt when input or output surface is ready */
+	unsigned int irq;
+	unsigned int base;
+	unsigned int height;
+	unsigned int width;
+	unsigned int rot_angle;
+	unsigned int stride;
+	/* frame raw format */
+	unsigned int format;
+	/* flag indicating if frame is stored in tiled format */
+	unsigned int tiled;
+	/* to make multiple of 32 bytes*/
+	int _pad[7];
+};
+
+struct VssProcPictureParameterBuffer {
+	unsigned int num_input_pictures;
+	unsigned int num_output_pictures;
+	/* to make multiple of 32 bytes*/
+	int          _pad[6];
+	struct VssProcPicture input_picture[VSS_PROC_MAX_INPUT_PICTURES];
+	struct VssProcPicture output_picture[VSS_PROC_MAX_OUTPUT_PICTURES];
+};
+
+union VssProcBuffer {
+	struct VssProcPipelineParameterBuffer         pipeline;
+	struct VssProcSharpenParameterBuffer          sharpen_base;
+	struct VssProcDenoiseParameterBuffer          denoiser_base;
+	struct VssProcColorEnhancementParameterBuffer enhancer_base;
+	struct VssProcFrcParameterBuffer              frc;
+	struct VssProcPictureParameterBuffer          picture;
+};
+
+enum VssProcCommandType {
+	VssProcPipelineParameterCommand =         0xFFFE,
+	VssProcSharpenParameterCommand =          0xFFFD,
+	VssProcDenoiseParameterCommand =          0xFFFC,
+	VssProcColorEnhancementParameterCommand = 0xFFFB,
+	VssProcFrcParameterCommand =              0xFFFA,
+	VssProcPictureCommand =                   0xFFF9,
+	VspFencePictureParamCommand =             0xEBEC,
+	VspSetContextCommand =                    0xEBED,
+	Vss_Sys_STATE_BUF_COMMAND =		  0xEBEE,
+	VspFenceComposeCommand =		  0xEBEF
+};
+
+#define VSP_CMD_QUEUE_SIZE (64)
+#define VSP_ACK_QUEUE_SIZE (64)
+
+/*
+ * Command types and data structure.
+ * Each command has a type. Depending on the type there is some kind
+ * of data in external memory,
+ * The VSS will use its DMA to load data from the buffer into local memory.
+ */
+struct vss_command_t {
+	unsigned int       context;
+	unsigned int       type;
+	unsigned int       buffer;
+	unsigned int       size;
+	unsigned int       buffer_id;
+	unsigned int       irq;
+	unsigned int       reserved6;
+	unsigned int       reserved7;
+};
+
+struct vss_response_t {
+	unsigned int       context;
+	unsigned int       type;
+	unsigned int       buffer;
+	unsigned int       size;
+	unsigned int       vss_cc;
+	unsigned int       reserved5;
+	unsigned int       reserved6;
+	unsigned int       reserved7;
+};
+
+/* Default initial values for vsp-command and vsp-response
+* Using those avoids the risk of uninitialized warnings when
+* the definition changes.
+*/
+#define VSP_COMMAND_INITIALIZER {0, 0, 0, 0, 0, 0, 0, 0}
+#define VSP_RESPONSE_INITIALIZER {0, 0, 0, 0, 0, 0, 0, 0}
+
+/*
+ * Response types
+ */
+enum VssResponseType {
+	VssIdleResponse               = 0x80010000,
+	VssErrorResponse              = 0x80020000,
+	VssEndOfSequenceResponse      = 0x80030000,
+	VssCommandBufferReadyResponse = 0x80040000,
+	VssInputSurfaceReadyResponse  = 0x80050000,
+	VssOutputSurfaceReadyResponse = 0x80060000,
+	VssVp8encSetSequenceParametersResponse = 150,
+	VssVp8encEncodeFrameResponse
+};
+
+enum VssStatus {
+	VssOK                         = 0x8001,
+	VssInvalidCommandType         = 0x8002,
+	VssInvalidCommandArgument     = 0x8003,
+	VssInvalidProcPictureCommand  = 0x8004,
+	VssInvalidDdrAddress          = 0x8005,
+	VssInvalidSequenceParameters_VP8 = 0x1,
+	VssInvalidPictureParameters_VP8  = 0x2,
+	VssContextMustBeDestroyed_VP8    = 0x3,
+	VssInitFailure_VP8               = 0x5,
+	VssCorruptFrame                  = 0x6,
+	VssCorruptFramecontinue_VP8      = 0x7
+};
+
+enum FrcResponseType {
+	VssOutputSurfaceFreeResponse = 0x0000F001,
+	VssOutputSurfaceCrcResponse  = 0x0000F002
+};
+
+enum vsp_format {
+	VSP_NV12,
+	VSP_YV12,
+	VSP_UYVY,
+	VSP_YUY2,
+	VSP_NV11,
+	VSP_NV16,
+	VSP_IYUV,
+	VSP_TYPE_ERROR
+};
+
+struct vsp_data {
+	unsigned int fw_state;
+	unsigned int uninit_req;
+};
+
+#define VSP_SECURE_BOOT_MAGIC_NR 0xb0070001
+
+enum vsp_processor {
+	vsp_sp0 = 0,
+	vsp_sp1 = 1,
+	vsp_vp0 = 2,
+	vsp_vp1 = 3,
+	vsp_mea = 4
+};
+
+/**
+* Header-data/struct by PUnit to start VSP boot-processor
+* This struct is mapped directly into the header of the multi-application-blob
+*
+* For each value that is to be written to the VSP, the register-address to
+* write to is listed directly after the value to be written.
+*
+* Entries that contain values can be written directly into the VSP-system.
+* Offsets need to have the secure-boot-header-address added and then be written
+* into the VSP
+*
+* boot_start_value should always be the last value written. (Since it starts
+* the VSP)
+*/
+struct vsp_secure_boot_header {
+	/* Magic number to identify header version */
+	unsigned int magic_number;
+
+	/* Offset to text section of boot-program in blob */
+	unsigned int boot_text_offset;
+	/* iCache base-address of boot-processor */
+	unsigned int boot_text_reg;
+
+	/* Value of icache-control-bits to write to boot-processor */
+	unsigned int boot_icache_value;
+	/* status&control register of boot-processor */
+	unsigned int boot_icache_reg;
+
+	/* Value of program counter to write to boot-processor */
+	/* address of main-function in boot-program */
+	unsigned int boot_pc_value;
+	/* pc-start-register of boot-processor */
+	unsigned int boot_pc_reg;
+
+	/* Offset of multi-application-header in blob */
+	unsigned int ma_header_offset;
+	unsigned int ma_header_reg;
+
+	/* Value to write to start the boot-processor */
+	unsigned int boot_start_value;
+	/* status&control register of boot-processor */
+	unsigned int boot_start_reg;
+};
+
+#define VSP_MULTI_APP_MAGIC_NR 0xb10b0005
+/*
+ * Note: application index/id 0 is reserved.
+ * So the maximum number of applications is one less than listed here.
+ * */
+#define VSP_MULTI_APP_MAX_APPS 16
+
+/*
+ * With a 1MB state-buffer in IMR and a 50k context-buffer-size, we could run
+ * * max 20 apps. Using 32 as a nice round number of maximum nr of contexts.
+ * * Actual maximum allowed contexts is currently less, since context-buffer-size
+ * * is larger than 50k.
+ * */
+#define VSP_MULTI_APP_MAX_CONTEXTS 32
+#define VSP_API_GENERIC_CONTEXT_ID (0xffffffff)
+/*
+ * Struct used by VSP-boot-processor to start the correct application
+ * Read from header in firmware ma-blob.
+ * Address of the header is communicated by p-unit.
+ *
+ * Note: this is a VIED internal header
+ */
+struct vsp_multi_app_blob_data {
+	unsigned int magic_number;
+	unsigned int offset_from_start;
+	/** State buffer address in virtual memory, default location on TNG B0 and ANN
+	 * * is 0xA0000000 (2.5GB memory offset, master port 2, 2nd IMR region) */
+	unsigned int imr_state_buffer_addr;
+	/** Size of state-buffer in IMR (in bytes). Default state buffer size for TNG
+	 * * B0 and ANN is 1 MB */
+	unsigned int imr_state_buffer_size;
+	/** default context-buffer size of apps in this blob (each app also has it's
+	 * context-size in it's header. */
+	unsigned int apps_default_context_buffer_size;
+	/**
+	* Address of genboot-helper-program in blob (relative to start of this header)
+	*/
+	unsigned int genboot_helper_prog_offset;
+	/*
+	 * * This table contains a zero (offset of zero) for unused entries
+	 * * Offsets here are relative to the start-address of this header.
+	 */
+	unsigned int application_blob_offsets[VSP_MULTI_APP_MAX_APPS];
+};
+
+/*
+ * Struct for the settings of a single context. Normally placed in an array in
+ * the multi-app header in IMR
+ *
+ * Context-id is determined by the position in the array, so it is not stored in
+ * the struct itself.
+ *
+ * State_buffer_size and state_buffer_addr are currently not stored, since they
+ * can/will be determined automatically based on generic IMR parameters.
+ *
+ * Usage field is the last field, so that it gets written last during a memory
+ * transfer.
+ */
+struct vsp_multi_app_context_settings {
+	unsigned int app_id;  /* Which app this context belongs to */
+	unsigned int usage; /* Indicates if this context is in use */
+};
+
+/*
+ * Datastructure placed at the beginning of the VSP IMR state-save region.
+ * */
+struct vsp_multi_app_imr_header {
+	/*
+	 * Usage field (32-bit), set to 0 by Chaabi during system bootup, set to 1
+	 * by VSP if it is safe for PUnit to perform a restart without power-cycle.
+	 * Set to any other value by VSP if VSP is running.
+	 * */
+	unsigned int vsp_and_imr_state;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_1;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_2;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_3;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_4;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_5;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_6;
+	/* Reserved field for 256-bit alignment of header */
+	unsigned int reserved_7;
+	/* Settings of all active/available contexts */
+	struct vsp_multi_app_context_settings context_settings[VSP_MULTI_APP_MAX_CONTEXTS];
+};
+
+enum vsp_imr_state{
+	/** State when no data for VSP is initialized */
+	vsp_imr_uninitialized = 0,
+	/** State where datastructures are initialized, but no VSP is running */
+	vsp_imr_safe_to_resume = 1,
+	/** State where datastructures are initialized and VSP(-API) is running */
+	vsp_imr_initialized = 2,
+	/** State where datastructures are initialized and VSP(-APP) is running */
+	vsp_imr_app_is_running = 3
+};
+
+enum vsp_ctrl_reg_addr {
+	VSP_SETTING_ADDR_REG      = 3,
+	VSP_SECBOOT_DEBUG_REG     = 4,
+	VSP_ENTRY_KIND_REG        = 5,
+	VSP_POWER_SAVING_MODE_REG = 6,
+	VSP_MMU_TLB_SOFT_INVALIDATE_REG = 7,
+	VSP_CMD_QUEUE_RD_REG      = 12,
+	VSP_CMD_QUEUE_WR_REG      = 13,
+	VSP_ACK_QUEUE_RD_REG      = 14,
+	VSP_ACK_QUEUE_WR_REG      = 15
+};
+
+struct vsp_ctrl_reg {
+	unsigned int reserved_2;
+
+	/* setting address from host to firmware */
+	unsigned int setting_addr;
+
+	/* used for sending debug-status from firmware to host */
+	unsigned int secboot_debug;
+
+	/* entry type from host to firmware
+	 * If it contains vsp_exit, uninitialize the firmware
+	 */
+	unsigned int entry_kind;
+
+	/* set the power-saving-mode setting */
+	unsigned int power_saving_mode;
+
+	/* config reg to request firmware to perform an MMU TLB invalidate.
+	* MMU TLB invalidation for VSP on TNG needs to be done through firmware
+	* due to a hardware bug that could trigger if TLB invalidation is done
+	* while VSP DMA is not idle.
+	*/
+	unsigned int mmu_tlb_soft_invalidate;
+
+	unsigned int reserved_8;
+	unsigned int reserved_9;
+	unsigned int reserved_10;
+	unsigned int reserved_11;
+
+	/* used for the command and response queues */
+	unsigned int cmd_rd;
+	unsigned int cmd_wr;
+	unsigned int ack_rd;
+	unsigned int ack_wr;
+};
+
+/* constant parameters passed from host to firmware,
+ * address of this struct is passed via config reg
+ * struct is written to ddr in vsp_init call, destroyed upon uninit
+ */
+struct vsp_settings_t {
+	/* Extra field to align to 256 bit (for DMA) */
+	unsigned int reserved0;
+	unsigned int command_queue_size;
+	unsigned int command_queue_addr;
+	unsigned int response_queue_size;
+	unsigned int response_queue_addr;
+	/* Extra field to align to 256 bit (for DMA) */
+	unsigned int reserved5;
+	/* Extra field to align to 256 bit (for DMA) */
+	unsigned int reserved6;
+	unsigned int reserved7;
+};
+
+/**
+* The host should only modify the vsp_context_settings_entry when the usage
+* field is vsp_context_unused or vsp_context_uninit. The host can do the
+* following state-transitions for the usage field:
+* 1) vsp_context_unused->vsp_context_starting: start a new stream/context.
+* After this transition, the host can submit commands into the command-queue
+* for the context-id associated with this vsp_context_settings entry.
+* 2) vsp_context_deinit->vsp_context_unused: destroy resources (free state
+* buffer) from the no longer needed context and mark the context as being
+* unused.
+*
+* The VSP will only modify the vsp_context_settings_entry when the usage
+* field is vsp_context_starting or vsp_context_in_use. The VSP will do the
+* following state-transitions for the usage field:
+* 3) vsp_context_starting->vsp_context_in_use: Perform initialisation of
+* state-buffers and other VSP-side initialisation required to start a new
+* stream/context. This is typically done when the first command for this
+* context is received from the host.
+* 4) vsp_context_in_use->vsp_context_deinit: Mark a context as being no longer
+* used by the VSP. The VSP will no longer access any resource used by this
+* context after this transition. This transition is done after an
+* end-of-stream response or similar response to the host to indicate that an
+* application finished for a specific context.
+*/
+enum vsp_context_usage {
+	vsp_context_unused   = 0,
+	vsp_context_deinit   = 1,
+	vsp_context_starting = 16,
+	vsp_context_in_use   = 17
+};
+
+/* default initializer to initialize vsp_settings struct
+ * (including the extra alignment fields)
+ */
+#define VSP_SETTINGS_INITIALIZER {0, 0, 0, 0, 0, 0, 0, 0}
+
+/* values passed via VSP_ENTRY_TYPE_REG
+ * vsp_entry_booted is the default value, it means no init or resume has been
+ * communicated by the host yet.
+ * vsp_entry_init and vsp_entry_resume are used for initial starting of the
+ * system and for resuming after a suspend/power-down.
+*/
+enum vsp_entry_kind {
+	vsp_entry_booted   = 0,
+	vsp_entry_init     = 1,
+	vsp_entry_resume   = 2,
+	vsp_exit           = 3
+};
+
+/* values passed via VSP_POWER_SAVING_MODE_REG */
+enum vsp_power_saving_mode {
+	vsp_always_on              = 0,
+	vsp_suspend_on_empty_queue = 1,
+	vsp_hw_idle_on_empty_queue = 2,
+	vsp_suspend_and_hw_idle_on_empty_queue
+};
+
+/****************************
+ * VP8enc data structures
+ ****************************/
+
+/**
+ * Picture data structure. Currently the same as frc
+ */
+struct VssProcPictureVP8 {
+	uint32_t surface_id;
+	uint32_t irq;  /* send interupt when input or output surface is ready */
+	uint32_t base; /* pointer to luma picture in DDR */
+	uint32_t base_uv; /* pointer to chroma picture in DDR */
+	uint32_t height;
+	uint32_t width;
+	uint32_t stride;
+	uint32_t format; /* frame raw format */
+};
+
+/**
+ * Enumeration for recon_buffer_mode
+ */
+typedef enum {
+	vss_vp8enc_seq_param_recon_buffer_mode_per_seq = 0, /* send 4 ref/recon frame buffers at seq lvl */
+	vss_vp8enc_seq_param_recon_buffer_mode_per_pic,     /* send 1 recon frame buffer per picture */
+	vss_vp8enc_seq_param_recon_buffer_mode_cnt          /* nr of modes */
+} vss_vp8enc_seq_param_recon_buffer_mode_t;
+
+/**
+ * Sequence parameter data structure.
+ */
+struct VssVp8encSequenceParameterBuffer {
+	uint32_t frame_width;
+	uint32_t frame_height;
+	uint32_t frame_rate;
+	uint32_t error_resilient;
+	uint32_t num_token_partitions;
+	uint32_t kf_mode;
+	uint32_t kf_min_dist;
+	uint32_t kf_max_dist;
+	uint32_t rc_target_bitrate;
+	uint32_t rc_min_quantizer;
+	uint32_t rc_max_quantizer;
+	uint32_t rc_undershoot_pct;
+	uint32_t rc_overshoot_pct;
+	uint32_t rc_end_usage;
+	uint32_t rc_buf_sz;
+	uint32_t rc_buf_initial_sz;
+	uint32_t rc_buf_optimal_sz;
+	uint32_t max_intra_rate;
+	uint32_t cyclic_intra_refresh;
+	uint32_t concatenate_partitions;
+	uint32_t recon_buffer_mode;
+	uint32_t generate_skip_frames;
+	uint32_t max_num_dropped_frames;
+	uint32_t ts_number_layers;
+	uint32_t ts_target_bitrate[3];
+	uint32_t ts_rate_decimator[3];
+	uint32_t ts_periodicity;
+	uint8_t ts_layer_id[32];
+	struct VssProcPictureVP8 ref_frame_buffers[4];
+};
+
+struct VssVp8encEncodedFrame {
+	uint32_t frame_size;
+	uint32_t status;
+	uint32_t partitions;
+	uint32_t partition_size[9];
+	uint32_t partition_start[9];
+	uint32_t segments;
+	uint32_t quantizer[4];
+	uint32_t frame_flags;
+	uint32_t partition_id;
+	uint32_t buffer_level[3];
+	uint32_t quality;
+	uint32_t overflow_bytes;
+	uint32_t surfaceId_of_ref_frame[4];
+	uint32_t reserved[15];
+	uint32_t coded_data[1];
+};
+
+/**
+ * Encode frame command buffer
+ */
+struct VssVp8encPictureParameterBuffer {
+	struct VssProcPictureVP8 input_frame;
+	struct VssProcPictureVP8 recon_frame;
+
+	uint32_t version;
+	uint32_t pic_flags;
+	uint32_t prev_frame_dropped;
+	uint32_t cpuused;
+	uint32_t sharpness;
+	uint32_t num_token_partitions;
+	uint32_t encoded_frame_size;
+	uint32_t encoded_frame_base;
+};
+
+/**
+ * Command enumeration
+ */
+enum VssVp8encCommandType {
+	VssVp8encSetSequenceParametersCommand = 123,
+	VssVp8encEncodeFrameCommand,
+	VssVp8encEndOfSequenceCommand,
+	VssVp8encInit,
+	Vss_Sys_Ref_Frame_COMMAND
+};
+
+/*
+ * Generic VSP commands
+ *
+ * Generic VSP commands should be sent with the context field set to
+ * VSP_API_GENERIC_CONTEXT_ID.
+ */
+enum VssGenCommandType {
+	/** Generic command to instruct the VSP to (create and) initialize a context.
+	 * * The buffer field contains the context-id of the new context to initialize.
+	 * The size-field contains the app-id for the new context to initialize
+	 */
+	VssGenInitializeContext       = 0xab01,
+	/** Generic command to instruct the VSP to de-initialize and destroy a
+	 * context. The buffer field contains the context-id of the context to
+	 * de-initialize and destroy. The size-field should always be set to 0.
+	 */
+	VssGenDestroyContext          = 0xab02
+};
+
+/****************************
+ * WiDi Compose data structures
+ ****************************/
+enum VssWiDi_ComposeCommandType {
+	VssWiDi_ComposeSetSequenceParametersCommand = 200,
+	VssWiDi_ComposeFrameCommand,
+	VssWiDi_ComposeEndOfSequenceCommand
+};
+
+enum VssWiDi_ComposeResponseType {
+	VssWiDi_ComposeSetSequenceParametersResponse = 250,
+	VssWiDi_ComposeFrameResponse,
+};
+
+enum VssWiDi_ColorFormat {
+	MonoChrome = 0,
+	YUV_4_2_0,
+	YUV_4_2_0_NV12,
+	YUV_4_2_2,
+	YUV_4_4_4
+};
+/**
+ * WiDi Compose sequence parameter data structure.
+ */
+struct VssWiDi_ComposeSequenceParameterBuffer {
+	unsigned int R_Buffer;
+	unsigned int G_Buffer;
+	unsigned int B_Buffer;
+	unsigned int RGBA_Buffer;
+	unsigned int Y_Buffer;
+	unsigned int UV_Buffer;
+	unsigned int U_Buffer;
+	unsigned int V_Buffer;
+	unsigned int A_Buffer;
+	int ActualWidth;
+	int ActualHeight;
+	int ProcessedWidth;
+	int ProcessedHeight;
+	int TotalMBCount;
+	int Stride;
+	/*Video related*/
+	int Video_IN_xsize;
+	int Video_IN_ysize;
+	int Video_IN_stride;
+	int Video_IN_yuv_format; 
+
+	unsigned int Video_IN_Y_Buffer;
+	unsigned int Video_IN_UV_Buffer;
+        unsigned int Video_IN_U_Buffer;
+	unsigned int Video_IN_V_Buffer;
+	int Video_OUT_xsize;
+	int Video_OUT_ysize;
+	int Video_OUT_stride;
+	int Video_OUT_yuv_format; 
+
+	unsigned int Video_OUT_Y_Buffer;
+	unsigned int Video_OUT_UV_Buffer;
+	unsigned int Video_OUT_V_Buffer;
+
+	unsigned int scaled_width;
+	unsigned int scaled_height;
+	unsigned int scalefactor_dx;
+	unsigned int scalefactor_dy;
+
+	/*Blending related params*/
+	int Is_Blending_Enabled;
+	int ROI_width;
+	int ROI_height;
+	int ROI_x1;
+	int ROI_y1;
+	int ROI_x2;
+	int ROI_y2;
+	int alpha1;
+	int alpha2;
+	int Is_video_the_back_ground;
+	int Is_source_1_image_available;
+	int Is_source_2_image_available;
+	int Is_alpha_channel_available;
+	int Video_TotalMBCount;
+	int CSC_FormatSelect; /* 0: YUV420NV12; 1: YUV444; */
+	int CSC_InputFormatSelect; // 0: RGB Planar; 1: RGBA Interleaved
+};
+
+#pragma pack()
+#endif
diff --git a/drivers/external_drivers/intel_media/video/vsp/vsp_init.c b/drivers/external_drivers/intel_media/video/vsp/vsp_init.c
new file mode 100644
index 0000000..2be105e
--- /dev/null
+++ b/drivers/external_drivers/intel_media/video/vsp/vsp_init.c
@@ -0,0 +1,474 @@
+/**
+ * file vsp_init.c
+ * VSP initialization and firmware upload
+ * Author: Binglin Chen <binglin.chen@intel.com>
+ *
+ */
+
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/firmware.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "vsp.h"
+
+
+static ssize_t psb_vsp_pmstate_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	int ret = -EINVAL;
+
+	if (drm_dev == NULL)
+		return 0;
+
+	ret = snprintf(buf, 64, "VSP Power state 0x%s\n",
+			ospm_power_is_hw_on(OSPM_VIDEO_VPP_ISLAND)
+			? "ON" : "OFF");
+
+	return ret;
+}
+static DEVICE_ATTR(vsp_pmstate, 0444, psb_vsp_pmstate_show, NULL);
+
+int vsp_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct vsp_private *vsp_priv;
+	bool is_iomem;
+	int ret;
+	int i = 0;
+
+	VSP_DEBUG("init vsp private data structure\n");
+	vsp_priv = kmalloc(sizeof(struct vsp_private), GFP_KERNEL);
+	if (vsp_priv == NULL)
+		return -1;
+
+	memset(vsp_priv, 0, sizeof(*vsp_priv));
+
+	/* get device --> drm_device --> drm_psb_private --> vsp_priv
+	 * for psb_vsp_pmstate_show: vsp_pmpolicy
+	 * if not pci_set_drvdata, can't get drm_device from device
+	 */
+	/* pci_set_drvdata(dev->pdev, dev); */
+	if (device_create_file(&dev->pdev->dev,
+			       &dev_attr_vsp_pmstate))
+		DRM_ERROR("TOPAZ: could not create sysfs file\n");
+
+	vsp_priv->sysfs_pmstate = sysfs_get_dirent(
+		dev->pdev->dev.kobj.sd, NULL,
+		"vsp_pmstate");
+
+	vsp_priv->vsp_cmd_num = 0;
+	vsp_priv->fw_loaded = VSP_FW_NONE;
+	vsp_priv->current_sequence = 0;
+	vsp_priv->vsp_state = VSP_STATE_DOWN;
+	vsp_priv->dev = dev;
+
+	atomic_set(&dev_priv->vsp_mmu_invaldc, 0);
+
+	dev_priv->vsp_private = vsp_priv;
+
+	vsp_priv->cmd_queue_sz = VSP_CMD_QUEUE_SIZE *
+		sizeof(struct vss_command_t);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(bdev,
+				       vsp_priv->cmd_queue_sz,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT,
+				       0, 0, 0, NULL, &vsp_priv->cmd_queue_bo);
+#else
+	ret = ttm_buffer_object_create(bdev,
+				       vsp_priv->cmd_queue_sz,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT,
+				       0, 0, NULL, &vsp_priv->cmd_queue_bo);
+#endif
+	if (ret != 0) {
+		DRM_ERROR("VSP: failed to allocate VSP cmd queue\n");
+		goto out_clean;
+	}
+
+	vsp_priv->ack_queue_sz = VSP_ACK_QUEUE_SIZE *
+		sizeof(struct vss_response_t);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret = ttm_buffer_object_create(bdev,
+				       vsp_priv->ack_queue_sz,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT,
+				       0, 0, 0, NULL, &vsp_priv->ack_queue_bo);
+#else
+	ret = ttm_buffer_object_create(bdev,
+				       vsp_priv->ack_queue_sz,
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT,
+				       0, 0, NULL, &vsp_priv->ack_queue_bo);
+#endif
+	if (ret != 0) {
+		DRM_ERROR("VSP: failed to allocate VSP cmd ack queue\n");
+		goto out_clean;
+	}
+
+	/* Create setting buffer */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+	ret =  ttm_buffer_object_create(bdev,
+				       sizeof(struct vsp_settings_t),
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT,
+				       0, 0, 0, NULL, &vsp_priv->setting_bo);
+#else
+	ret =  ttm_buffer_object_create(bdev,
+				       sizeof(struct vsp_settings_t),
+				       ttm_bo_type_kernel,
+				       DRM_PSB_FLAG_MEM_MMU |
+				       TTM_PL_FLAG_NO_EVICT,
+				       0, 0, NULL, &vsp_priv->setting_bo);
+#endif
+	if (ret != 0) {
+		DRM_ERROR("VSP: failed to allocate VSP setting buffer\n");
+		goto out_clean;
+	}
+
+	/* map cmd queue */
+	ret = ttm_bo_kmap(vsp_priv->cmd_queue_bo, 0,
+			  vsp_priv->cmd_queue_bo->num_pages,
+			  &vsp_priv->cmd_kmap);
+	if (ret) {
+		DRM_ERROR("drm_bo_kmap failed: %d\n", ret);
+		ttm_bo_unref(&vsp_priv->cmd_queue_bo);
+		ttm_bo_kunmap(&vsp_priv->cmd_kmap);
+		goto out_clean;
+	}
+
+	vsp_priv->cmd_queue = ttm_kmap_obj_virtual(&vsp_priv->cmd_kmap,
+						   &is_iomem);
+
+
+	/* map ack queue */
+	ret = ttm_bo_kmap(vsp_priv->ack_queue_bo, 0,
+			  vsp_priv->ack_queue_bo->num_pages,
+			  &vsp_priv->ack_kmap);
+	if (ret) {
+		DRM_ERROR("drm_bo_kmap failed: %d\n", ret);
+		ttm_bo_unref(&vsp_priv->ack_queue_bo);
+		ttm_bo_kunmap(&vsp_priv->ack_kmap);
+		goto out_clean;
+	}
+
+	vsp_priv->ack_queue = ttm_kmap_obj_virtual(&vsp_priv->ack_kmap,
+						   &is_iomem);
+
+	/* map vsp setting */
+	ret = ttm_bo_kmap(vsp_priv->setting_bo, 0,
+			  vsp_priv->setting_bo->num_pages,
+			  &vsp_priv->setting_kmap);
+	if (ret) {
+		DRM_ERROR("drm_bo_kmap setting_bo failed: %d\n", ret);
+		ttm_bo_unref(&vsp_priv->setting_bo);
+		ttm_bo_kunmap(&vsp_priv->setting_kmap);
+		goto out_clean;
+	}
+	vsp_priv->setting = ttm_kmap_obj_virtual(&vsp_priv->setting_kmap,
+						 &is_iomem);
+
+	for (i = 0; i < MAX_VP8_CONTEXT_NUM + 1; i++)
+		vsp_priv->vp8_filp[i] = NULL;
+	vsp_priv->context_vp8_num = 0;
+	vsp_priv->context_vpp_num = 0;
+
+	vsp_priv->vp8_cmd_num = 0;
+
+	spin_lock_init(&vsp_priv->lock);
+	mutex_init(&vsp_priv->vsp_mutex);
+
+	INIT_DELAYED_WORK(&vsp_priv->vsp_suspend_wq,
+			&psb_powerdown_vsp);
+	INIT_DELAYED_WORK(&vsp_priv->vsp_irq_wq,
+			&vsp_irq_task);
+	INIT_DELAYED_WORK(&vsp_priv->vsp_cmd_submit_check_wq,
+			&vsp_cmd_submit_check);
+
+	return 0;
+out_clean:
+	vsp_deinit(dev);
+	return -1;
+}
+
+int vsp_deinit(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	VSP_DEBUG("free VSP firmware/context buffer\n");
+
+	if (vsp_priv->cmd_queue) {
+		ttm_bo_kunmap(&vsp_priv->cmd_kmap);
+		vsp_priv->cmd_queue = NULL;
+	}
+
+	if (vsp_priv->ack_queue) {
+		ttm_bo_kunmap(&vsp_priv->ack_kmap);
+		vsp_priv->ack_queue = NULL;
+	}
+	if (vsp_priv->setting) {
+		ttm_bo_kunmap(&vsp_priv->setting_kmap);
+		vsp_priv->setting = NULL;
+	}
+
+	if (vsp_priv->ack_queue_bo)
+		ttm_bo_unref(&vsp_priv->ack_queue_bo);
+	if (vsp_priv->cmd_queue_bo)
+		ttm_bo_unref(&vsp_priv->cmd_queue_bo);
+	if (vsp_priv->setting_bo)
+		ttm_bo_unref(&vsp_priv->setting_bo);
+
+	device_remove_file(&dev->pdev->dev, &dev_attr_vsp_pmstate);
+	sysfs_put(vsp_priv->sysfs_pmstate);
+
+	VSP_DEBUG("free VSP private structure\n");
+	kfree(dev_priv->vsp_private);
+
+	return 0;
+}
+
+void vsp_enableirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned int mask;
+	unsigned int enable;
+	unsigned int clear;
+
+	VSP_DEBUG("will enable irq\n");
+
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_MASK, &mask);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_ENB, &enable);
+	clear = 0;
+
+	VSP_SET_FLAG(mask, VSP_SP0_IRQ_SHIFT);
+	VSP_SET_FLAG(enable, VSP_SP0_IRQ_SHIFT);
+	VSP_SET_FLAG(clear, VSP_SP0_IRQ_SHIFT);
+
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_EDGE, mask);
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_CLR, clear);
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_ENB, enable);
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_MASK, mask);
+	/* use the Level type interrupt */
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_LEVEL_PULSE, 0x80);
+}
+
+void vsp_disableirq(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned int mask, enable;
+
+	VSP_DEBUG("will disable irq\n");
+
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_MASK, &mask);
+	IRQ_REG_READ32(VSP_IRQ_CTRL_IRQ_ENB, &enable);
+
+	VSP_CLEAR_FLAG(mask, VSP_SP0_IRQ_SHIFT);
+	VSP_CLEAR_FLAG(enable, VSP_SP0_IRQ_SHIFT);
+
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_MASK, mask);
+	IRQ_REG_WRITE32(VSP_IRQ_CTRL_IRQ_ENB, enable);
+	return;
+}
+
+
+int vsp_reset(struct drm_psb_private *dev_priv)
+{
+	int ret;
+
+	ret = vsp_setup_fw(dev_priv);
+
+	return ret;
+}
+
+int vsp_setup_fw(struct drm_psb_private *dev_priv)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	uint32_t pd_addr;
+
+	/* set MMU */
+	pd_addr = psb_get_default_pd_addr(dev_priv->vsp_mmu);
+	SET_MMU_PTD(pd_addr >> PAGE_TABLE_SHIFT);
+	SET_MMU_PTD(pd_addr >> PAGE_SHIFT);
+
+	/* vsp setting */
+	vsp_priv->setting->command_queue_size = VSP_CMD_QUEUE_SIZE;
+	vsp_priv->setting->command_queue_addr = vsp_priv->cmd_queue_bo->offset;
+	vsp_priv->setting->response_queue_size = VSP_ACK_QUEUE_SIZE;
+	vsp_priv->setting->response_queue_addr = vsp_priv->ack_queue_bo->offset;
+
+	vsp_priv->ctrl->setting_addr = vsp_priv->setting_bo->offset;
+	vsp_priv->ctrl->mmu_tlb_soft_invalidate = 0;
+	vsp_priv->ctrl->cmd_rd = 0;
+	vsp_priv->ctrl->cmd_wr = 0;
+	vsp_priv->ctrl->ack_rd = 0;
+	vsp_priv->ctrl->ack_wr = 0;
+
+	VSP_DEBUG("setup firmware\n");
+
+	/* Set power-saving mode */
+	if (drm_vsp_pmpolicy == PSB_PMPOLICY_NOPM)
+		vsp_priv->ctrl->power_saving_mode = vsp_always_on;
+	else if (drm_vsp_pmpolicy == PSB_PMPOLICY_POWERDOWN ||
+			drm_vsp_pmpolicy == PSB_PMPOLICY_CLOCKGATING)
+		vsp_priv->ctrl->power_saving_mode = vsp_suspend_on_empty_queue;
+	else
+		vsp_priv->ctrl->power_saving_mode =
+			vsp_suspend_and_hw_idle_on_empty_queue;
+
+	/* communicate the type of init
+	 * this is the last value to write
+	 * it will cause the VSP to read all other settings as wll
+	 */
+	vsp_priv->ctrl->entry_kind = vsp_entry_init;
+
+	vsp_priv->vsp_state = VSP_STATE_ACTIVE;
+
+	/* enable irq */
+	psb_irq_preinstall_islands(dev_priv->dev, OSPM_VIDEO_VPP_ISLAND);
+	psb_irq_postinstall_islands(dev_priv->dev, OSPM_VIDEO_VPP_ISLAND);
+
+	return 0;
+}
+
+void vsp_start_function(struct drm_psb_private *dev_priv, unsigned int pc,
+		    unsigned int processor)
+{
+	unsigned int reg;
+
+	/* set the start addr */
+	SP_REG_WRITE32(VSP_START_PC_REG_OFFSET, pc, processor);
+
+	/* set start command */
+	SP_REG_READ32(SP_STAT_AND_CTRL_REG, &reg, processor);
+	VSP_SET_FLAG(reg, SP_STAT_AND_CTRL_REG_RUN_FLAG);
+	VSP_SET_FLAG(reg, SP_STAT_AND_CTRL_REG_START_FLAG);
+	SP_REG_WRITE32(SP_STAT_AND_CTRL_REG, reg, processor);
+	return;
+}
+#if 0
+unsigned int vsp_set_firmware(struct drm_psb_private *dev_priv,
+			      unsigned int processor)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	unsigned int reg = 0;
+
+	/* config icache */
+	VSP_SET_FLAG(reg, SP_STAT_AND_CTRL_REG_ICACHE_INVALID_FLAG);
+	/* disable ICACHE_PREFETCH_FLAG from v2.3 */
+	/* VSP_SET_FLAG(reg, SP_STAT_AND_CTRL_REG_ICACHE_PREFETCH_FLAG); */
+	SP_REG_WRITE32(SP_STAT_AND_CTRL_REG, reg, processor);
+
+	/* set icache base address: point to instructions in DDR */
+	SP_REG_WRITE32(VSP_ICACHE_BASE_REG_OFFSET,
+		       vsp_priv->firmware->offset +
+		       vsp_priv->boot_header.boot_text_offset,
+		       processor);
+
+	/* write ma_header_address to the variable allocated for it*/
+	MM_WRITE32(vsp_priv->boot_header.ma_header_reg,
+		   0,
+		   vsp_priv->firmware->offset +
+		   vsp_priv->boot_header.ma_header_offset);
+
+	/* start the secure boot program */
+	vsp_start_function(dev_priv,
+			   vsp_priv->boot_header.boot_pc_value,
+			   processor);
+
+	return 0;
+}
+#endif
+
+void vsp_init_function(struct drm_psb_private *dev_priv)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	vsp_priv->ctrl->entry_kind = vsp_entry_init;
+}
+
+void vsp_continue_function(struct drm_psb_private *dev_priv)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+
+	vsp_priv->ctrl->entry_kind = vsp_entry_booted;
+
+	vsp_priv->ctrl->entry_kind = vsp_entry_resume;
+
+	vsp_priv->vsp_state = VSP_STATE_ACTIVE;
+}
+
+int vsp_resume_function(struct drm_psb_private *dev_priv)
+{
+	struct vsp_private *vsp_priv = dev_priv->vsp_private;
+	struct pci_dev *pdev = vsp_priv->dev->pdev;
+	uint32_t pd_addr, mmadr;
+
+	/* FIXME, change should be removed once bz 120324 is fixed */
+	pci_read_config_dword(pdev, 0x10, &mmadr);
+	if (mmadr == 0) {
+		DRM_ERROR("Bad PCI config!\n");
+		return -1;
+	}
+
+	vsp_priv->ctrl = (struct vsp_ctrl_reg *) (dev_priv->vsp_reg +
+						  VSP_CONFIG_REG_SDRAM_BASE +
+						  VSP_CONFIG_REG_START);
+
+	/* Set MMU */
+	pd_addr = psb_get_default_pd_addr(dev_priv->vsp_mmu);
+	SET_MMU_PTD(pd_addr >> PAGE_TABLE_SHIFT);
+	SET_MMU_PTD(pd_addr >> PAGE_SHIFT);
+
+	/* enable irq */
+	psb_irq_preinstall_islands(dev_priv->dev, OSPM_VIDEO_VPP_ISLAND);
+	psb_irq_postinstall_islands(dev_priv->dev, OSPM_VIDEO_VPP_ISLAND);
+
+	/* restore the config regs */
+	CONFIG_REG_WRITE32(VSP_SETTING_ADDR_REG,
+			vsp_priv->saved_config_regs[VSP_SETTING_ADDR_REG]);
+	CONFIG_REG_WRITE32(VSP_POWER_SAVING_MODE_REG,
+			vsp_priv->saved_config_regs[VSP_POWER_SAVING_MODE_REG]);
+	CONFIG_REG_WRITE32(VSP_CMD_QUEUE_RD_REG,
+			vsp_priv->saved_config_regs[VSP_CMD_QUEUE_RD_REG]);
+	CONFIG_REG_WRITE32(VSP_CMD_QUEUE_WR_REG,
+			vsp_priv->saved_config_regs[VSP_CMD_QUEUE_WR_REG]);
+	CONFIG_REG_WRITE32(VSP_ACK_QUEUE_RD_REG,
+			vsp_priv->saved_config_regs[VSP_ACK_QUEUE_RD_REG]);
+	CONFIG_REG_WRITE32(VSP_ACK_QUEUE_WR_REG,
+			vsp_priv->saved_config_regs[VSP_ACK_QUEUE_WR_REG]);
+
+	vsp_priv->ctrl->entry_kind = vsp_entry_resume;
+
+	vsp_priv->vsp_state = VSP_STATE_ACTIVE;
+
+	return 0;
+}
+
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 7ef316f..ac1b43a 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -54,6 +54,7 @@
 #define FW_CDEV_KERNEL_VERSION			5
 #define FW_CDEV_VERSION_EVENT_REQUEST2		4
 #define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
+#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW	5
 
 struct client {
 	u32 version;
@@ -1005,6 +1006,8 @@
 			a->channel, a->speed, a->header_size, cb, client);
 	if (IS_ERR(context))
 		return PTR_ERR(context);
+	if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
+		context->drop_overflow_headers = true;
 
 	/* We only support one context at this time. */
 	spin_lock_irq(&client->lock);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9e1db64..afb701e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2749,8 +2749,11 @@
 {
 	u32 *ctx_hdr;
 
-	if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
+	if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
+		if (ctx->base.drop_overflow_headers)
+			return;
 		flush_iso_completions(ctx);
+	}
 
 	ctx_hdr = ctx->header + ctx->header_length;
 	ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@@ -2910,8 +2913,11 @@
 
 	sync_it_packet_for_cpu(context, d);
 
-	if (ctx->header_length + 4 > PAGE_SIZE)
+	if (ctx->header_length + 4 > PAGE_SIZE) {
+		if (ctx->base.drop_overflow_headers)
+			return 1;
 		flush_iso_completions(ctx);
+	}
 
 	ctx_hdr = ctx->header + ctx->header_length;
 	ctx->last_timestamp = le16_to_cpu(last->res_count);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 573c449..aed9283 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -81,6 +81,17 @@
 	  Kernel drivers may also request that a particular GPIO be
 	  exported to userspace; this can be useful when debugging.
 
+config GPIODEBUG
+	tristate "GPIO Setting DEBUG"
+	depends on DEBUG_FS
+	help
+	  Say yes here to support GPIO/FLIS Setting Debug.
+
+	  This is mostly useful to dump and set gpio/flis conguration.
+
+	  Kernel drivers may also request that a particular GPIO be
+	  exported to userspace; this can be useful when debugging.
+
 config GPIO_GENERIC
 	tristate
 
@@ -106,6 +117,14 @@
 config GPIO_MAX730X
 	tristate
 
+config GPIO_VIRTUAL
+	tristate "Virtual GPIO Controller"
+	help
+	  Say yes here to enable the Virtual GPIO Controller driver. This
+	  driver enables USH wakeup from D0i3.
+
+	  If driver is built as a module it will be called gpio-virtual.
+
 comment "Memory mapped GPIO drivers:"
 
 config GPIO_CLPS711X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0cb2d65..fe2c496 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_GPIOLIB)		+= gpiolib.o
 obj-$(CONFIG_OF_GPIO)		+= gpiolib-of.o
 obj-$(CONFIG_GPIO_ACPI)		+= gpiolib-acpi.o
+obj-$(CONFIG_GPIODEBUG)		+= gpiodebug.o
 
 # Device drivers. Generally keep list sorted alphabetically
 obj-$(CONFIG_GPIO_GENERIC)	+= gpio-generic.o
@@ -81,6 +82,7 @@
 obj-$(CONFIG_GPIO_TWL6040)	+= gpio-twl6040.o
 obj-$(CONFIG_GPIO_UCB1400)	+= gpio-ucb1400.o
 obj-$(CONFIG_GPIO_VIPERBOARD)	+= gpio-viperboard.o
+obj-$(CONFIG_GPIO_VIRTUAL)	+= gpio-virtual.o
 obj-$(CONFIG_GPIO_VR41XX)	+= gpio-vr41xx.o
 obj-$(CONFIG_GPIO_VX855)	+= gpio-vx855.o
 obj-$(CONFIG_GPIO_WM831X)	+= gpio-wm831x.o
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index 62ef10a..0dcbc1f 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -1,7 +1,5 @@
-/*
- * Moorestown platform Langwell chip GPIO driver
- *
- * Copyright (c) 2008 - 2009,  Intel Corporation.
+/* gpio-langwell.c Moorestown platform Langwell chip GPIO driver
+ * Copyright (c) 2008 - 2013,  Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -35,8 +33,16 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
+#include <linux/lnw_gpio.h>
 #include <linux/pm_runtime.h>
+#include <asm/intel-mid.h>
 #include <linux/irqdomain.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_flis.h>
+#include "gpiodebug.h"
+
+#define IRQ_TYPE_EDGE	(1 << 0)
+#define IRQ_TYPE_LEVEL	(1 << 1)
 
 /*
  * Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -61,14 +67,193 @@
 	GFER,		/* falling edge detect */
 	GEDR,		/* edge detect result */
 	GAFR,		/* alt function */
+	GFBR = 9,	/* glitch filter bypas */
+	GPIT,		/* interrupt type */
+	GPIP = GFER,	/* level interrupt polarity */
+	GPIM = GRER,	/* level interrupt mask */
+
+	/* the following registers only exist on MRFLD */
+	GFBR_TNG = 6,
+	GIMR,		/* interrupt mask */
+	GISR,		/* interrupt source */
+	GITR = 32,	/* interrupt type */
+	GLPR = 33,	/* level-input polarity */
+};
+
+enum GPIO_CONTROLLERS {
+	LINCROFT_GPIO,
+	PENWELL_GPIO_AON,
+	PENWELL_GPIO_CORE,
+	CLOVERVIEW_GPIO_AON,
+	CLOVERVIEW_GPIO_CORE,
+	TANGIER_GPIO,
+};
+
+/* langwell gpio driver data */
+struct lnw_gpio_ddata_t {
+	u16 ngpio;		/* number of gpio pins */
+	u32 gplr_offset;	/* offset of first GPLR register from base */
+	u32 (*get_flis_offset)(int gpio);
+	u32 chip_irq_type;	/* chip interrupt type */
+};
+
+struct gpio_flis_pair {
+	int gpio;	/* gpio number */
+	int offset;	/* register offset from FLIS base */
+};
+
+/*
+ * The following mapping table lists the pin and flis offset pair
+ * of some key gpio pins, the offset of other gpios can be calculated
+ * from the table.
+ */
+static struct gpio_flis_pair gpio_flis_tng_mapping_table[] = {
+	{ 0,	0x2900 },
+	{ 12,	0x2544 },
+	{ 14,	0x0958 },
+	{ 16,	0x2D18 },
+	{ 17,	0x1D10 },
+	{ 19,	0x1D00 },
+	{ 23,	0x1D18 },
+	{ 31,	-EINVAL }, /* No GPIO 31 in pin list */
+	{ 32,	0x1508 },
+	{ 44,	0x3500 },
+	{ 64,	0x2534 },
+	{ 68,	0x2D1C },
+	{ 70,	0x1500 },
+	{ 72,	0x3D00 },
+	{ 77,	0x0D00 },
+	{ 97,	0x0954 },
+	{ 98,	-EINVAL }, /* No GPIO 98-101 in pin list */
+	{ 102,	0x1910 },
+	{ 120,	0x1900 },
+	{ 124,	0x2100 },
+	{ 136,	-EINVAL }, /* No GPIO 136 in pin list */
+	{ 137,	0x2D00 },
+	{ 143,	-EINVAL }, /* No GPIO 143-153 in pin list */
+	{ 154,	0x092C },
+	{ 164,	0x3900 },
+	{ 177,	0x2500 },
+	{ 190,	0x2D50 },
+};
+
+static struct gpio_flis_pair gpio_flis_ann_mapping_table[] = {
+	{ 0,	0x2900 },
+	{ 12,	0x2154 },
+	{ 14,	0x2540 },
+	{ 16,	0x2930 },
+	{ 17,	0x1D18 },
+	{ 19,	0x1D08 },
+	{ 23,	0x1D20 },
+	{ 31,	0x111C },
+	{ 32,	0x1508 },
+	{ 44,	0x3500 },
+	{ 64,	0x312C },
+	{ 68,	0x2934 },
+	{ 70,	0x1500 },
+	{ 72,	0x3D00 },
+	{ 77,	0x0D00 },
+	{ 87,   0x0D2C },
+	{ 88,   0x0D28 },
+	{ 89,   0x0D30 },
+	{ 97,	0x2130 },
+	{ 98,	0x2D18 },
+	{ 99,	-EINVAL }, /* No GPIO 99-100 in pin list */
+	{ 101,	0x0500 },
+	{ 102,	0x1910 },
+	{ 120,	0x1900 },
+	{ 124,	0x2100 },
+	{ 136,	0x0504 },
+	{ 137,  0x2D00 },
+	{ 143,  0x0508 },
+	{ 154,	0x2134 },
+	{ 162,	0x2548 },
+	{ 164,	0x3D14 },
+	{ 176,	0x2500 },
+
+};
+
+/*
+ * In new version of FW for Merrifield, I2C FLIS register can not
+ * be written directly but go though a IPC way which is sleepable,
+ * so we shouldn't use spin_lock_irq to protect the access when
+ * is_merr_i2c_flis() return true.
+ */
+static inline bool is_merr_i2c_flis(u32 offset)
+{
+	return ((offset >= I2C_FLIS_START)
+		&& (offset <= I2C_FLIS_END));
+}
+
+static u32 get_flis_offset_by_gpio(int gpio)
+{
+	int i;
+	int start;
+	u32 offset = -EINVAL, size;
+	struct gpio_flis_pair *gpio_flis_map;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		size = ARRAY_SIZE(gpio_flis_tng_mapping_table);
+		gpio_flis_map = gpio_flis_tng_mapping_table;
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) {
+		size = ARRAY_SIZE(gpio_flis_ann_mapping_table);
+		gpio_flis_map = gpio_flis_ann_mapping_table;
+	} else
+		return -EINVAL;
+
+	for (i = 0; i < size - 1; i++) {
+		if (gpio >= gpio_flis_map[i].gpio
+			&& gpio < gpio_flis_map[i + 1].gpio)
+			break;
+	}
+
+	start = gpio_flis_map[i].gpio;
+
+	if (gpio_flis_map[i].offset != -EINVAL)
+		offset = gpio_flis_map[i].offset + (gpio - start) * 4;
+
+	return offset;
+}
+
+static struct lnw_gpio_ddata_t lnw_gpio_ddata[] = {
+	[LINCROFT_GPIO] = {
+		.ngpio = 64,
+	},
+	[PENWELL_GPIO_AON] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE,
+	},
+	[PENWELL_GPIO_CORE] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE,
+	},
+	[CLOVERVIEW_GPIO_AON] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE | IRQ_TYPE_LEVEL,
+	},
+	[CLOVERVIEW_GPIO_CORE] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE,
+	},
+	[TANGIER_GPIO] = {
+		.ngpio = 192,
+		.gplr_offset = 4,
+		.get_flis_offset = get_flis_offset_by_gpio,
+		.chip_irq_type = IRQ_TYPE_EDGE | IRQ_TYPE_LEVEL,
+	},
 };
 
 struct lnw_gpio {
-	struct gpio_chip		chip;
-	void				*reg_base;
-	spinlock_t			lock;
-	struct pci_dev			*pdev;
-	struct irq_domain		*domain;
+	struct gpio_chip	chip;
+	void			*reg_base;
+	void			*reg_gplr;
+	spinlock_t		lock;
+	struct pci_dev		*pdev;
+	struct irq_domain	*domain;
+	u32			(*get_flis_offset)(int gpio);
+	u32			chip_irq_type;
+	int			type;
+	struct gpio_debug	*debug;
 };
 
 #define to_lnw_priv(chip)	container_of(chip, struct lnw_gpio, chip)
@@ -80,11 +265,162 @@
 	unsigned nreg = chip->ngpio / 32;
 	u8 reg = offset / 32;
 	void __iomem *ptr;
+	void *base;
 
-	ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+	/**
+	 * On TNG B0, GITR[0]'s address is 0xFF008300, while GPLR[0]'s address
+	 * is 0xFF008004. To count GITR[0]'s address, it's easier to count
+	 * from 0xFF008000. So for GITR,GLPR... we switch the base to reg_base.
+	 * This does not affect PNW/CLV, since the reg_gplr is the reg_base,
+	 * while on TNG, the reg_gplr has an offset of 0x4.
+	 */
+	base = reg_type < GITR ? lnw->reg_gplr : lnw->reg_base;
+	ptr = (void __iomem *)(base + reg_type * nreg * 4 + reg * 4);
 	return ptr;
 }
 
+void lnw_gpio_set_alt(int gpio, int alt)
+{
+	struct lnw_gpio *lnw;
+	u32 __iomem *mem;
+	int reg;
+	int bit;
+	u32 offset;
+	u32 value;
+	unsigned long flags;
+
+	/* use this trick to get memio */
+	lnw = irq_get_chip_data(gpio_to_irq(gpio));
+	if (!lnw) {
+		pr_err("langwell_gpio: can not find pin %d\n", gpio);
+		return;
+	}
+	if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) {
+		dev_err(lnw->chip.dev, "langwell_gpio: wrong pin %d to config alt\n", gpio);
+		return;
+	}
+#if 0
+	if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) {
+		dev_err(lnw->chip.dev, "langwell_gpio: wrong chip data for pin %d\n", gpio);
+		return;
+	}
+#endif
+	gpio -= lnw->chip.base;
+
+	if (lnw->type != TANGIER_GPIO) {
+		reg = gpio / 16;
+		bit = gpio % 16;
+
+		mem = gpio_reg(&lnw->chip, 0, GAFR);
+		spin_lock_irqsave(&lnw->lock, flags);
+		value = readl(mem + reg);
+		value &= ~(3 << (bit * 2));
+		value |= (alt & 3) << (bit * 2);
+		writel(value, mem + reg);
+		spin_unlock_irqrestore(&lnw->lock, flags);
+		dev_dbg(lnw->chip.dev, "ALT: writing 0x%x to %p\n",
+			value, mem + reg);
+	} else {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return;
+
+		if (!is_merr_i2c_flis(offset))
+			spin_lock_irqsave(&lnw->lock, flags);
+
+		value = get_flis_value(offset);
+		value &= ~7;
+		value |= (alt & 7);
+		set_flis_value(value, offset);
+
+		if (!is_merr_i2c_flis(offset))
+			spin_unlock_irqrestore(&lnw->lock, flags);
+	}
+}
+EXPORT_SYMBOL_GPL(lnw_gpio_set_alt);
+
+int gpio_get_alt(int gpio)
+{
+	struct lnw_gpio *lnw;
+	u32 __iomem *mem;
+	int reg;
+	int bit;
+	u32 value;
+	u32 offset;
+
+	 /* use this trick to get memio */
+	lnw = irq_get_chip_data(gpio_to_irq(gpio));
+	if (!lnw) {
+		pr_err("langwell_gpio: can not find pin %d\n", gpio);
+		return -1;
+	}
+	if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) {
+		dev_err(lnw->chip.dev,
+			"langwell_gpio: wrong pin %d to config alt\n", gpio);
+		return -1;
+	}
+#if 0
+	if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) {
+		dev_err(lnw->chip.dev,
+			"langwell_gpio: wrong chip data for pin %d\n", gpio);
+		return -1;
+	}
+#endif
+	gpio -= lnw->chip.base;
+
+	if (lnw->type != TANGIER_GPIO) {
+		reg = gpio / 16;
+		bit = gpio % 16;
+
+		mem = gpio_reg(&lnw->chip, 0, GAFR);
+		value = readl(mem + reg);
+		value &= (3 << (bit * 2));
+		value >>= (bit * 2);
+	} else {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -EINVAL;
+
+		value = get_flis_value(offset) & 7;
+	}
+
+	return value;
+}
+EXPORT_SYMBOL_GPL(gpio_get_alt);
+
+static int lnw_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+				 unsigned debounce)
+{
+	struct lnw_gpio *lnw = to_lnw_priv(chip);
+	void __iomem *gfbr;
+	unsigned long flags;
+	u32 value;
+	enum GPIO_REG reg_type;
+
+	reg_type = (lnw->type == TANGIER_GPIO) ? GFBR_TNG : GFBR;
+	gfbr = gpio_reg(chip, offset, reg_type);
+
+	if (lnw->pdev)
+		pm_runtime_get(&lnw->pdev->dev);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	value = readl(gfbr);
+	if (debounce) {
+		/* debounce bypass disable */
+		value &= ~BIT(offset % 32);
+	} else {
+		/* debounce bypass enable */
+		value |= BIT(offset % 32);
+	}
+	writel(value, gfbr);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
+	if (lnw->pdev)
+		pm_runtime_put(&lnw->pdev->dev);
+
+	return 0;
+}
+
 static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
 				   enum GPIO_REG reg_type)
 {
@@ -99,9 +435,18 @@
 
 static int lnw_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
-	void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
-	u32 value = readl(gafr);
-	int shift = (offset % 16) << 1, af = (value >> shift) & 3;
+	struct lnw_gpio *lnw = to_lnw_priv(chip);
+	u32 value;
+	void __iomem *gafr;
+	int shift, af;
+
+	if (lnw->type > CLOVERVIEW_GPIO_CORE)
+		return 0;
+
+	gafr = gpio_reg_2bit(chip, offset, GAFR);
+	value = readl(gafr);
+	shift = (offset % 16) << 1;
+	af = (value >> shift) & 3;
 
 	if (af) {
 		value &= ~(3 << shift);
@@ -188,8 +533,10 @@
 	u32 gpio = irqd_to_hwirq(d);
 	unsigned long flags;
 	u32 value;
+	int ret = 0;
 	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
 	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+	void __iomem *gpit, *gpip;
 
 	if (gpio >= lnw->chip.ngpio)
 		return -EINVAL;
@@ -197,47 +544,224 @@
 	if (lnw->pdev)
 		pm_runtime_get(&lnw->pdev->dev);
 
-	spin_lock_irqsave(&lnw->lock, flags);
-	if (type & IRQ_TYPE_EDGE_RISING)
-		value = readl(grer) | BIT(gpio % 32);
-	else
-		value = readl(grer) & (~BIT(gpio % 32));
-	writel(value, grer);
+	/* Chip that supports level interrupt has extra GPIT registers */
+	if (lnw->chip_irq_type & IRQ_TYPE_LEVEL) {
+		switch (lnw->type) {
+		case CLOVERVIEW_GPIO_AON:
+			gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+			gpip = gpio_reg(&lnw->chip, gpio, GPIP);
+			break;
+		case TANGIER_GPIO:
+			gpit = gpio_reg(&lnw->chip, gpio, GITR);
+			gpip = gpio_reg(&lnw->chip, gpio, GLPR);
+			break;
+		default:
+			ret = -EINVAL;
+			goto out;
+		}
 
-	if (type & IRQ_TYPE_EDGE_FALLING)
-		value = readl(gfer) | BIT(gpio % 32);
-	else
-		value = readl(gfer) & (~BIT(gpio % 32));
-	writel(value, gfer);
-	spin_unlock_irqrestore(&lnw->lock, flags);
+		spin_lock_irqsave(&lnw->lock, flags);
+		if (type & IRQ_TYPE_LEVEL_MASK) {
+			/* To prevent glitches from triggering an unintended
+			 * level interrupt, configure GLPR register first
+			 * and then configure GITR.
+			 */
+			if (type & IRQ_TYPE_LEVEL_LOW)
+				value = readl(gpip) | BIT(gpio % 32);
+			else
+				value = readl(gpip) & (~BIT(gpio % 32));
+			writel(value, gpip);
 
+			value = readl(gpit) | BIT(gpio % 32);
+			writel(value, gpit);
+
+			__irq_set_handler_locked(d->irq, handle_level_irq);
+		} else if (type & IRQ_TYPE_EDGE_BOTH) {
+			value = readl(gpit) & (~BIT(gpio % 32));
+			writel(value, gpit);
+
+			if (type & IRQ_TYPE_EDGE_RISING)
+				value = readl(grer) | BIT(gpio % 32);
+			else
+				value = readl(grer) & (~BIT(gpio % 32));
+			writel(value, grer);
+
+			if (type & IRQ_TYPE_EDGE_FALLING)
+				value = readl(gfer) | BIT(gpio % 32);
+			else
+				value = readl(gfer) & (~BIT(gpio % 32));
+			writel(value, gfer);
+
+			__irq_set_handler_locked(d->irq, handle_edge_irq);
+		}
+		spin_unlock_irqrestore(&lnw->lock, flags);
+	} else {
+		if (type & IRQ_TYPE_LEVEL_MASK) {
+			ret = -EINVAL;
+		} else if (type & IRQ_TYPE_EDGE_BOTH) {
+			spin_lock_irqsave(&lnw->lock, flags);
+
+			if (type & IRQ_TYPE_EDGE_RISING)
+				value = readl(grer) | BIT(gpio % 32);
+			else
+				value = readl(grer) & (~BIT(gpio % 32));
+			writel(value, grer);
+
+			if (type & IRQ_TYPE_EDGE_FALLING)
+				value = readl(gfer) | BIT(gpio % 32);
+			else
+				value = readl(gfer) & (~BIT(gpio % 32));
+			writel(value, gfer);
+
+			spin_unlock_irqrestore(&lnw->lock, flags);
+		}
+	}
+
+out:
 	if (lnw->pdev)
 		pm_runtime_put(&lnw->pdev->dev);
 
+	return ret;
+}
+
+static int lnw_set_maskunmask(struct irq_data *d, enum GPIO_REG reg_type,
+				unsigned unmask)
+{
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 value;
+	void __iomem *gp_reg;
+
+	gp_reg = gpio_reg(&lnw->chip, gpio, reg_type);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+
+	if (unmask) {
+		/* enable interrupt from GPIO input pin */
+		value = readl(gp_reg) | BIT(gpio % 32);
+	} else {
+		/* disable interrupt from GPIO input pin */
+		value = readl(gp_reg) & (~BIT(gpio % 32));
+	}
+
+	writel(value, gp_reg);
+
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
 	return 0;
 }
 
 static void lnw_irq_unmask(struct irq_data *d)
 {
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	void __iomem *gpit;
+	void __iomem *gisr;
+
+	if (gpio >= lnw->chip.ngpio)
+		return;
+
+	switch (lnw->type) {
+	case CLOVERVIEW_GPIO_AON:
+		gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+
+		/* if it's level trigger, unmask GPIM */
+		if (readl(gpit) & BIT(gpio % 32))
+			lnw_set_maskunmask(d, GPIM, 1);
+
+		break;
+	case TANGIER_GPIO:
+		gpit = gpio_reg(&lnw->chip, gpio, GITR);
+		gisr = gpio_reg(&lnw->chip, gpio, GISR);
+
+		if (readl(gpit) & BIT(gpio % 32))
+			writel(BIT(gpio % 32), gisr);
+
+		lnw_set_maskunmask(d, GIMR, 1);
+		break;
+	default:
+		break;
+	}
 }
 
 static void lnw_irq_mask(struct irq_data *d)
 {
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	void __iomem *gpit;
+
+	if (gpio >= lnw->chip.ngpio)
+		return;
+
+	switch (lnw->type) {
+	case CLOVERVIEW_GPIO_AON:
+		gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+
+		/* if it's level trigger, mask GPIM */
+		if (readl(gpit) & BIT(gpio % 32))
+			lnw_set_maskunmask(d, GPIM, 0);
+
+		break;
+	case TANGIER_GPIO:
+		lnw_set_maskunmask(d, GIMR, 0);
+		break;
+	default:
+		break;
+	}
 }
 
+static int lwn_irq_set_wake(struct irq_data *d, unsigned on)
+{
+	return 0;
+}
+
+static void lnw_irq_ack(struct irq_data *d)
+{
+}
+
+static void lnw_irq_shutdown(struct irq_data *d)
+{
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 value;
+	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	value = readl(grer) & (~BIT(gpio % 32));
+	writel(value, grer);
+	value = readl(gfer) & (~BIT(gpio % 32));
+	writel(value, gfer);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+};
+
+
 static struct irq_chip lnw_irqchip = {
 	.name		= "LNW-GPIO",
+	.flags		= IRQCHIP_SET_TYPE_MASKED,
 	.irq_mask	= lnw_irq_mask,
 	.irq_unmask	= lnw_irq_unmask,
 	.irq_set_type	= lnw_irq_type,
+	.irq_set_wake	= lwn_irq_set_wake,
+	.irq_ack	= lnw_irq_ack,
+	.irq_shutdown	= lnw_irq_shutdown,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {   /* pin number */
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb), .driver_data = 96 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = 96 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
+		.driver_data = LINCROFT_GPIO },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f),
+		.driver_data = PENWELL_GPIO_AON },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a),
+		.driver_data = PENWELL_GPIO_CORE },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb),
+		.driver_data = CLOVERVIEW_GPIO_AON },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+		.driver_data = CLOVERVIEW_GPIO_CORE },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+		.driver_data = TANGIER_GPIO },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -245,28 +769,446 @@
 static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	struct lnw_gpio *lnw;
+	struct gpio_debug *debug;
 	u32 base, gpio, mask;
 	unsigned long pending;
-	void __iomem *gedr;
+	void __iomem *gp_reg;
+	enum GPIO_REG reg_type;
+	struct irq_desc *lnw_irq_desc;
+	unsigned int lnw_irq;
+
+	lnw = irq_data_get_irq_handler_data(data);
+
+	debug = lnw->debug;
+
+	reg_type = (lnw->type == TANGIER_GPIO) ? GISR : GEDR;
 
 	/* check GPIO controller to check which pin triggered the interrupt */
 	for (base = 0; base < lnw->chip.ngpio; base += 32) {
-		gedr = gpio_reg(&lnw->chip, base, GEDR);
-		while ((pending = readl(gedr))) {
+		gp_reg = gpio_reg(&lnw->chip, base, reg_type);
+		while ((pending = (lnw->type != TANGIER_GPIO) ?
+			readl(gp_reg) :
+			(readl(gp_reg) &
+			readl(gpio_reg(&lnw->chip, base, GIMR))))) {
 			gpio = __ffs(pending);
+			DEFINE_DEBUG_IRQ_CONUNT_INCREASE(lnw->chip.base +
+				base + gpio);
+			/* Mask irq if not requested in kernel */
+			lnw_irq = irq_find_mapping(lnw->domain, base + gpio);
+			lnw_irq_desc = irq_to_desc(lnw_irq);
+			if (lnw_irq_desc && unlikely(!lnw_irq_desc->action)) {
+				lnw_irq_mask(&lnw_irq_desc->irq_data);
+				continue;
+			}
+
 			mask = BIT(gpio);
 			/* Clear before handling so we can't lose an edge */
-			writel(mask, gedr);
-			generic_handle_irq(irq_find_mapping(lnw->domain,
-							    base + gpio));
+			writel(mask, gp_reg);
+			generic_handle_irq(lnw_irq);
 		}
 	}
 
 	chip->irq_eoi(data);
 }
 
+static char conf_reg_msg[] =
+	"\nGPIO configuration register:\n"
+	"\t[ 2: 0]\tpinmux\n"
+	"\t[ 6: 4]\tpull strength\n"
+	"\t[ 8: 8]\tpullup enable\n"
+	"\t[ 9: 9]\tpulldown enable\n"
+	"\t[10:10]\tslew A, B setting\n"
+	"\t[12:12]\toverride input enable\n"
+	"\t[13:13]\toverride input enable enable\n"
+	"\t[14:14]\toverride output enable\n"
+	"\t[15:15]\toverride output enable enable\n"
+	"\t[16:16]\toverride input value\n"
+	"\t[17:17]\tenable input data override\n"
+	"\t[18:18]\toverride output value\n"
+	"\t[19:19]\tenable output data override\n"
+	"\t[21:21]\topen drain enable\n"
+	"\t[22:22]\tenable OVR_IOSTBY_VAL\n"
+	"\t[23:23]\tOVR_IOSTBY_VAL\n"
+	"\t[24:24]\tSBY_OUTDATAOV_EN\n"
+	"\t[25:25]\tSBY_INDATAOV_EN\n"
+	"\t[26:26]\tSBY_OVOUTEN_EN\n"
+	"\t[27:27]\tSBY_OVINEN_EN\n"
+	"\t[29:28]\tstandby pullmode\n"
+	"\t[30:30]\tstandby open drain mode\n";
+
+static char *pinvalue[] = {"low", "high"};
+static char *pindirection[] = {"in", "out"};
+static char *irqtype[] = {"irq_none", "edge_rising", "edge_falling",
+			"edge_both"};
+static char *pinmux[] = {"mode0", "mode1", "mode2", "mode3", "mode4", "mode5",
+			"mode6", "mode7"};
+static char *pullmode[] = {"nopull", "pullup", "pulldown"};
+static char *pullstrength[] = {"2k", "20k", "50k", "910ohms"};
+static char *enable[] = {"disable", "enable"};
+static char *override_direction[] = {"no-override", "override-enable",
+			"override-disable"};
+static char *override_value[] = {"no-override", "override-high",
+			"override-low"};
+static char *standby_trigger[] = {"no-override", "override-trigger",
+			"override-notrigger"};
+static char *standby_pupd_state[] = {"keep", "pulldown", "pullup", "nopull"};
+
+static int gpio_get_pinvalue(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 value;
+
+	value = lnw_gpio_get(&lnw->chip, gpio);
+
+	return value ? 1 : 0;
+}
+
+static int gpio_set_pinvalue(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+
+	lnw_gpio_set(&lnw->chip, gpio, num);
+	return 0;
+}
+
+static int gpio_get_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 __iomem *mem;
+	u32 value;
+
+	mem = gpio_reg(&lnw->chip, gpio, control->reg);
+
+	value = readl(mem);
+	value &= BIT(gpio % 32);
+
+	if (control->invert)
+		return value ? 0 : 1;
+	else
+		return value ? 1 : 0;
+}
+
+static int gpio_set_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 __iomem *mem;
+	u32 value;
+	unsigned long flags;
+
+	mem = gpio_reg(&lnw->chip, gpio, control->reg);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	value = readl(mem);
+	value &= ~BIT(gpio % 32);
+	if (control->invert) {
+		if (num)
+			value &= ~BIT(gpio % 32);
+		else
+			value |= BIT(gpio % 32);
+	} else {
+		if (num)
+			value |= BIT(gpio % 32);
+		else
+			value &= ~BIT(gpio % 32);
+	}
+	writel(value, mem);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
+	return 0;
+}
+
+static int gpio_get_irqtype(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+	u32 value;
+	int num;
+
+	value = readl(grer) & BIT(gpio % 32);
+	num = value ? 1 : 0;
+	value = readl(gfer) & BIT(gpio % 32);
+	if (num)
+		num = value ? 3 : 1;
+	else
+		num = value ? 2 : 0;
+
+	return num;
+}
+
+static int flis_get_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 offset, value;
+	int num;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		value = get_flis_value(offset);
+		num = (value >> control->shift) & control->mask;
+		if (num < control->num)
+			return num;
+	}
+
+	return -1;
+}
+
+static int flis_set_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 shift = control->shift;
+	u32 mask = control->mask;
+	u32 offset, value;
+	unsigned long flags;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		if (!is_merr_i2c_flis(offset))
+			spin_lock_irqsave(&lnw->lock, flags);
+		value = get_flis_value(offset);
+		value &= ~(mask << shift);
+		value |= ((num & mask) << shift);
+		set_flis_value(value, offset);
+		if (!is_merr_i2c_flis(offset))
+			spin_unlock_irqrestore(&lnw->lock, flags);
+		return 0;
+	}
+
+	return -1;
+}
+
+static int flis_get_override(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 offset, value;
+	u32 val_bit, en_bit;
+	int num;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		val_bit = 1 << control->shift;
+		en_bit = 1 << control->rshift;
+
+		value = get_flis_value(offset);
+
+		if (value & en_bit)
+			if (value & val_bit)
+				num = 1;
+			else
+				num = 2;
+		else
+			num = 0;
+
+		return num;
+	}
+
+	return -1;
+}
+
+static int flis_set_override(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 offset, value;
+	u32 val_bit, en_bit;
+	unsigned long flags;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		val_bit = 1 << control->shift;
+		en_bit = 1 << control->rshift;
+
+		if (!is_merr_i2c_flis(offset))
+			spin_lock_irqsave(&lnw->lock, flags);
+		value = get_flis_value(offset);
+		switch (num) {
+		case 0:
+			value &= ~(en_bit | val_bit);
+			break;
+		case 1:
+			value |= (en_bit | val_bit);
+			break;
+		case 2:
+			value |= en_bit;
+			value &= ~val_bit;
+			break;
+		default:
+			break;
+		}
+		set_flis_value(value, offset);
+		if (!is_merr_i2c_flis(offset))
+			spin_unlock_irqrestore(&lnw->lock, flags);
+
+		return 0;
+	}
+
+	return -1;
+}
+
+#define GPIO_VALUE_CONTROL(xtype, xinfo, xnum) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, \
+	.get = gpio_get_pinvalue, .set = gpio_set_pinvalue}
+#define GPIO_NORMAL_CONTROL(xtype, xinfo, xnum, xreg, xinvert) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, .reg = xreg, \
+	.invert = xinvert, .get = gpio_get_normal, .set = gpio_set_normal}
+#define GPIO_IRQTYPE_CONTROL(xtype, xinfo, xnum) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, \
+	.get = gpio_get_irqtype, .set = NULL}
+#define FLIS_NORMAL_CONTROL(xtype, xinfo, xnum, xshift, xmask) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, .shift = xshift, \
+	.mask = xmask, .get = flis_get_normal, .set = flis_set_normal}
+#define FLIS_OVERRIDE_CONTROL(xtype, xinfo, xnum, xshift, xrshift) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, .shift = xshift, \
+	.rshift = xrshift, .get = flis_get_override, .set = flis_set_override}
+
+static struct gpio_control lnw_gpio_controls[] = {
+GPIO_VALUE_CONTROL(TYPE_PIN_VALUE, pinvalue, 2),
+GPIO_NORMAL_CONTROL(TYPE_DIRECTION, pindirection, 2, GPDR, 0),
+GPIO_IRQTYPE_CONTROL(TYPE_IRQ_TYPE, irqtype, 4),
+GPIO_NORMAL_CONTROL(TYPE_DEBOUNCE, enable, 2, GFBR_TNG, 1),
+FLIS_NORMAL_CONTROL(TYPE_PINMUX, pinmux, 8, 0, 0x7),
+FLIS_NORMAL_CONTROL(TYPE_PULLSTRENGTH, pullstrength, 4, 4, 0x7),
+FLIS_NORMAL_CONTROL(TYPE_PULLMODE, pullmode, 3, 8, 0x3),
+FLIS_NORMAL_CONTROL(TYPE_OPEN_DRAIN, enable, 2, 21, 0x1),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_INDIR, override_direction, 3, 12, 13),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_OUTDIR, override_direction, 3, 14, 15),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_INVAL, override_value, 3, 16, 17),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_OUTVAL, override_value, 3, 18, 19),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_IO, standby_trigger, 3, 23, 22),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_OUTVAL, override_value, 3, 18, 24),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_INVAL, override_value, 3, 16, 25),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_OUTDIR, override_direction, 3, 14, 26),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_INDIR, override_direction, 3, 12, 27),
+FLIS_NORMAL_CONTROL(TYPE_SBY_PUPD_STATE, standby_pupd_state, 4, 28, 0x3),
+FLIS_NORMAL_CONTROL(TYPE_SBY_OD_DIS, enable, 2, 30, 0x1),
+};
+
+static unsigned int lnw_get_conf_reg(struct gpio_debug *debug, unsigned gpio)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	u32 offset, value = 0;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -EINVAL;
+
+		value = get_flis_value(offset);
+	}
+
+	return value;
+}
+
+static void lnw_set_conf_reg(struct gpio_debug *debug, unsigned gpio,
+		unsigned int value)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	u32 offset;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return;
+
+		set_flis_value(value, offset);
+	}
+
+	return;
+}
+
+static char **lnw_get_avl_pininfo(struct gpio_debug *debug, unsigned gpio,
+		unsigned int type, unsigned *num)
+{
+	struct gpio_control *control;
+
+	control = find_gpio_control(lnw_gpio_controls,
+			ARRAY_SIZE(lnw_gpio_controls), type);
+	if (control == NULL)
+		return NULL;
+
+	*num = control->num;
+
+	return control->pininfo;
+}
+
+static char *lnw_get_cul_pininfo(struct gpio_debug *debug, unsigned gpio,
+		unsigned int type)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	struct gpio_control *control;
+	int num;
+
+	control = find_gpio_control(lnw_gpio_controls,
+			ARRAY_SIZE(lnw_gpio_controls), type);
+	if (control == NULL)
+		return NULL;
+
+	num = control->get(control, lnw, gpio);
+	if (num == -1)
+		return NULL;
+
+	return *(control->pininfo + num);
+}
+
+static void lnw_set_pininfo(struct gpio_debug *debug, unsigned gpio,
+		unsigned int type, const char *info)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	struct gpio_control *control;
+	int num;
+
+	control = find_gpio_control(lnw_gpio_controls,
+			ARRAY_SIZE(lnw_gpio_controls), type);
+	if (control == NULL)
+		return;
+
+	num = find_pininfo_num(control, info);
+	if (num == -1)
+		return;
+
+	if (control->set)
+		control->set(control, lnw, gpio, num);
+}
+
+static int lnw_get_register_msg(char **buf, unsigned long *size)
+{
+	*buf = conf_reg_msg;
+	*size = strlen(conf_reg_msg);
+
+	return 0;
+}
+
+static struct gpio_debug_ops lnw_gpio_debug_ops = {
+	.get_conf_reg = lnw_get_conf_reg,
+	.set_conf_reg = lnw_set_conf_reg,
+	.get_avl_pininfo = lnw_get_avl_pininfo,
+	.get_cul_pininfo = lnw_get_cul_pininfo,
+	.set_pininfo = lnw_set_pininfo,
+	.get_register_msg = lnw_get_register_msg,
+};
+
 static void lnw_irq_init_hw(struct lnw_gpio *lnw)
 {
 	void __iomem *reg;
@@ -303,6 +1245,16 @@
 	.xlate = irq_domain_xlate_twocell,
 };
 
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
 static int lnw_gpio_runtime_idle(struct device *dev)
 {
 	int err = pm_schedule_suspend(dev, 500);
@@ -314,7 +1266,9 @@
 }
 
 static const struct dev_pm_ops lnw_gpio_pm_ops = {
-	SET_RUNTIME_PM_OPS(NULL, NULL, lnw_gpio_runtime_idle)
+	SET_RUNTIME_PM_OPS(lnw_gpio_runtime_suspend,
+			   lnw_gpio_runtime_resume,
+			   lnw_gpio_runtime_idle)
 };
 
 static int lnw_gpio_probe(struct pci_dev *pdev,
@@ -323,10 +1277,15 @@
 	void *base;
 	resource_size_t start, len;
 	struct lnw_gpio *lnw;
+	struct gpio_debug *debug;
 	u32 gpio_base;
 	u32 irq_base;
 	int retval;
-	int ngpio = id->driver_data;
+	struct lnw_gpio_ddata_t *ddata;
+	int pid;
+
+	pid = id->driver_data;
+	ddata = &lnw_gpio_ddata[pid];
 
 	retval = pci_enable_device(pdev);
 	if (retval)
@@ -367,20 +1326,29 @@
 		goto err_ioremap;
 	}
 
+	lnw->type = pid;
 	lnw->reg_base = base;
+	lnw->reg_gplr = lnw->reg_base + ddata->gplr_offset;
+	lnw->get_flis_offset = ddata->get_flis_offset;
+	lnw->chip_irq_type = ddata->chip_irq_type;
 	lnw->chip.label = dev_name(&pdev->dev);
 	lnw->chip.request = lnw_gpio_request;
 	lnw->chip.direction_input = lnw_gpio_direction_input;
 	lnw->chip.direction_output = lnw_gpio_direction_output;
+	lnw->chip.set_pinmux = lnw_gpio_set_alt;
+	lnw->chip.get_pinmux = gpio_get_alt;
 	lnw->chip.get = lnw_gpio_get;
 	lnw->chip.set = lnw_gpio_set;
 	lnw->chip.to_irq = lnw_gpio_to_irq;
 	lnw->chip.base = gpio_base;
-	lnw->chip.ngpio = ngpio;
+	lnw->chip.ngpio = ddata->ngpio;
 	lnw->chip.can_sleep = 0;
+	lnw->chip.set_debounce = lnw_gpio_set_debounce;
+	lnw->chip.dev = &pdev->dev;
 	lnw->pdev = pdev;
-
-	lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
+	spin_lock_init(&lnw->lock);
+	lnw->domain = irq_domain_add_simple(pdev->dev.of_node,
+					    lnw->chip.ngpio, irq_base,
 					    &lnw_gpio_irq_ops, lnw);
 	if (!lnw->domain) {
 		retval = -ENOMEM;
@@ -395,15 +1363,40 @@
 	}
 
 	lnw_irq_init_hw(lnw);
-
 	irq_set_handler_data(pdev->irq, lnw);
 	irq_set_chained_handler(pdev->irq, lnw_irq_handler);
 
-	spin_lock_init(&lnw->lock);
-
 	pm_runtime_put_noidle(&pdev->dev);
 	pm_runtime_allow(&pdev->dev);
 
+	/* add for gpiodebug */
+	debug = gpio_debug_alloc();
+	if (debug) {
+		__set_bit(TYPE_OVERRIDE_OUTDIR, debug->typebit);
+		__set_bit(TYPE_OVERRIDE_OUTVAL, debug->typebit);
+		__set_bit(TYPE_OVERRIDE_INDIR, debug->typebit);
+		__set_bit(TYPE_OVERRIDE_INVAL, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_IO, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_OUTVAL, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_INVAL, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_OUTDIR, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_INDIR, debug->typebit);
+		__set_bit(TYPE_SBY_PUPD_STATE, debug->typebit);
+		__set_bit(TYPE_SBY_OD_DIS, debug->typebit);
+
+		debug->chip = &lnw->chip;
+		debug->ops = &lnw_gpio_debug_ops;
+		debug->private_data = lnw;
+		lnw->debug = debug;
+
+		retval = gpio_debug_register(debug);
+		if (retval) {
+			dev_err(&pdev->dev, "langwell gpio_debug_register failed %d\n",
+				retval);
+			gpio_debug_remove(debug);
+		}
+	}
+
 	return 0;
 
 err_ioremap:
@@ -506,4 +1499,4 @@
 	return ret;
 }
 
-device_initcall(lnw_gpio_init);
+fs_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 86c17de..71d8614 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -248,14 +248,15 @@
 	struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	u32 base, pin, mask;
-	unsigned long reg, pending;
+	unsigned long reg, ena, pending;
 	unsigned virq;
 
 	/* check from GPIO controller which pin triggered the interrupt */
 	for (base = 0; base < lg->chip.ngpio; base += 32) {
 		reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+		ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
 
-		while ((pending = inl(reg))) {
+		while ((pending = (inl(reg) & inl(ena)))) {
 			pin = __ffs(pending);
 			mask = BIT(pin);
 			/* Clear before handling so we don't lose an edge */
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 4a43036..5405212 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -63,6 +63,7 @@
 	struct gpio_chip chip;
 	struct clk *dbck;
 	u32 mod_usage;
+	u32 irq_usage;
 	u32 dbck_enable_mask;
 	bool dbck_enabled;
 	struct device *dev;
@@ -86,6 +87,9 @@
 #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
 #define GPIO_MOD_CTRL_BIT	BIT(0)
 
+#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
+#define LINE_USED(line, offset) (line & (1 << offset))
+
 static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 {
 	return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@
 	return 0;
 }
 
+static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
+{
+	if (bank->regs->pinctrl) {
+		void __iomem *reg = bank->base + bank->regs->pinctrl;
+
+		/* Claim the pin for MPU */
+		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
+	}
+
+	if (bank->regs->ctrl && !BANK_USED(bank)) {
+		void __iomem *reg = bank->base + bank->regs->ctrl;
+		u32 ctrl;
+
+		ctrl = __raw_readl(reg);
+		/* Module is enabled, clocks are not gated */
+		ctrl &= ~GPIO_MOD_CTRL_BIT;
+		__raw_writel(ctrl, reg);
+		bank->context.ctrl = ctrl;
+	}
+}
+
+static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
+{
+	void __iomem *base = bank->base;
+
+	if (bank->regs->wkup_en &&
+	    !LINE_USED(bank->mod_usage, offset) &&
+	    !LINE_USED(bank->irq_usage, offset)) {
+		/* Disable wake-up during idle for dynamic tick */
+		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
+		bank->context.wake_en =
+			__raw_readl(bank->base + bank->regs->wkup_en);
+	}
+
+	if (bank->regs->ctrl && !BANK_USED(bank)) {
+		void __iomem *reg = bank->base + bank->regs->ctrl;
+		u32 ctrl;
+
+		ctrl = __raw_readl(reg);
+		/* Module is disabled, clocks are gated */
+		ctrl |= GPIO_MOD_CTRL_BIT;
+		__raw_writel(ctrl, reg);
+		bank->context.ctrl = ctrl;
+	}
+}
+
+static int gpio_is_input(struct gpio_bank *bank, int mask)
+{
+	void __iomem *reg = bank->base + bank->regs->direction;
+
+	return __raw_readl(reg) & mask;
+}
+
 static int gpio_irq_type(struct irq_data *d, unsigned type)
 {
 	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	unsigned gpio = 0;
 	int retval;
 	unsigned long flags;
+	unsigned offset;
 
-	if (WARN_ON(!bank->mod_usage))
-		return -EINVAL;
+	if (!BANK_USED(bank))
+		pm_runtime_get_sync(bank->dev);
 
 #ifdef CONFIG_ARCH_OMAP1
 	if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@
 		return -EINVAL;
 
 	spin_lock_irqsave(&bank->lock, flags);
-	retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
+	offset = GPIO_INDEX(bank, gpio);
+	retval = _set_gpio_triggering(bank, offset, type);
+	if (!LINE_USED(bank->mod_usage, offset)) {
+		_enable_gpio_module(bank, offset);
+		_set_gpio_direction(bank, offset, 1);
+	} else if (!gpio_is_input(bank, 1 << offset)) {
+		spin_unlock_irqrestore(&bank->lock, flags);
+		return -EINVAL;
+	}
+
+	bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
 	spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@
 	 * If this is the first gpio_request for the bank,
 	 * enable the bank module.
 	 */
-	if (!bank->mod_usage)
+	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
 	spin_lock_irqsave(&bank->lock, flags);
 	/* Set trigger to none. You need to enable the desired trigger with
-	 * request_irq() or set_irq_type().
+	 * request_irq() or set_irq_type(). Only do this if the IRQ line has
+	 * not already been requested.
 	 */
-	_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-
-	if (bank->regs->pinctrl) {
-		void __iomem *reg = bank->base + bank->regs->pinctrl;
-
-		/* Claim the pin for MPU */
-		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
+	if (!LINE_USED(bank->irq_usage, offset)) {
+		_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+		_enable_gpio_module(bank, offset);
 	}
-
-	if (bank->regs->ctrl && !bank->mod_usage) {
-		void __iomem *reg = bank->base + bank->regs->ctrl;
-		u32 ctrl;
-
-		ctrl = __raw_readl(reg);
-		/* Module is enabled, clocks are not gated */
-		ctrl &= ~GPIO_MOD_CTRL_BIT;
-		__raw_writel(ctrl, reg);
-		bank->context.ctrl = ctrl;
-	}
-
 	bank->mod_usage |= 1 << offset;
-
 	spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
@@ -640,31 +692,11 @@
 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 {
 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
-	void __iomem *base = bank->base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&bank->lock, flags);
-
-	if (bank->regs->wkup_en) {
-		/* Disable wake-up during idle for dynamic tick */
-		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
-		bank->context.wake_en =
-			__raw_readl(bank->base + bank->regs->wkup_en);
-	}
-
 	bank->mod_usage &= ~(1 << offset);
-
-	if (bank->regs->ctrl && !bank->mod_usage) {
-		void __iomem *reg = bank->base + bank->regs->ctrl;
-		u32 ctrl;
-
-		ctrl = __raw_readl(reg);
-		/* Module is disabled, clocks are gated */
-		ctrl |= GPIO_MOD_CTRL_BIT;
-		__raw_writel(ctrl, reg);
-		bank->context.ctrl = ctrl;
-	}
-
+	_disable_gpio_module(bank, offset);
 	_reset_gpio(bank, bank->chip.base + offset);
 	spin_unlock_irqrestore(&bank->lock, flags);
 
@@ -672,7 +704,7 @@
 	 * If this is the last gpio to be freed in the bank,
 	 * disable the bank module.
 	 */
-	if (!bank->mod_usage)
+	if (!BANK_USED(bank))
 		pm_runtime_put(bank->dev);
 }
 
@@ -762,10 +794,20 @@
 	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 	unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 	unsigned long flags;
+	unsigned offset = GPIO_INDEX(bank, gpio);
 
 	spin_lock_irqsave(&bank->lock, flags);
+	bank->irq_usage &= ~(1 << offset);
+	_disable_gpio_module(bank, offset);
 	_reset_gpio(bank, gpio);
 	spin_unlock_irqrestore(&bank->lock, flags);
+
+	/*
+	 * If this is the last IRQ to be freed in the bank,
+	 * disable the bank module.
+	 */
+	if (!BANK_USED(bank))
+		pm_runtime_put(bank->dev);
 }
 
 static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@
 	return 0;
 }
 
-static int gpio_is_input(struct gpio_bank *bank, int mask)
-{
-	void __iomem *reg = bank->base + bank->regs->direction;
-
-	return __raw_readl(reg) & mask;
-}
-
 static int gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	struct gpio_bank *bank;
@@ -922,13 +957,22 @@
 {
 	struct gpio_bank *bank;
 	unsigned long flags;
+	int retval = 0;
 
 	bank = container_of(chip, struct gpio_bank, chip);
 	spin_lock_irqsave(&bank->lock, flags);
+
+	if (LINE_USED(bank->irq_usage, offset)) {
+			retval = -EINVAL;
+			goto exit;
+	}
+
 	bank->set_dataout(bank, offset, value);
 	_set_gpio_direction(bank, offset, 0);
+
+exit:
 	spin_unlock_irqrestore(&bank->lock, flags);
-	return 0;
+	return retval;
 }
 
 static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@
 	struct gpio_bank *bank;
 
 	list_for_each_entry(bank, &omap_gpio_list, node) {
-		if (!bank->mod_usage || !bank->loses_context)
+		if (!BANK_USED(bank) || !bank->loses_context)
 			continue;
 
 		bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@
 	struct gpio_bank *bank;
 
 	list_for_each_entry(bank, &omap_gpio_list, node) {
-		if (!bank->mod_usage || !bank->loses_context)
+		if (!BANK_USED(bank) || !bank->loses_context)
 			continue;
 
 		pm_runtime_get_sync(bank->dev);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 426c51d..2506178 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -59,6 +59,7 @@
 	{ "pca9557", 8  | PCA953X_TYPE, },
 	{ "pca9574", 8  | PCA957X_TYPE | PCA_INT, },
 	{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+	{ "pca953x", 8  | PCA957X_TYPE | PCA_INT, },
 
 	{ "max7310", 8  | PCA953X_TYPE, },
 	{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
@@ -778,7 +779,6 @@
 		if (ret < 0)
 			dev_warn(&client->dev, "setup failed, %d\n", ret);
 	}
-
 	i2c_set_clientdata(client, chip);
 	return 0;
 }
@@ -838,6 +838,17 @@
 
 MODULE_DEVICE_TABLE(of, pca953x_dt_ids);
 
+int pca953x_command(struct i2c_client *client, unsigned int cmd, void *arg)
+{
+	u8 *buff = (u8 *)arg;
+	struct pca953x_chip *chip;
+	if (cmd == 1) {
+		chip = i2c_get_clientdata(client);
+		pca953x_write_single(chip, buff[0], buff[1], 0);
+	}
+	return 0;
+}
+
 static struct i2c_driver pca953x_driver = {
 	.driver = {
 		.name	= "pca953x",
@@ -846,8 +857,8 @@
 	.probe		= pca953x_probe,
 	.remove		= pca953x_remove,
 	.id_table	= pca953x_id,
+	.command	= pca953x_command,
 };
-
 static int __init pca953x_init(void)
 {
 	return i2c_add_driver(&pca953x_driver);
diff --git a/drivers/gpio/gpiodebug.c b/drivers/gpio/gpiodebug.c
new file mode 100644
index 0000000..c14592c
--- /dev/null
+++ b/drivers/gpio/gpiodebug.c
@@ -0,0 +1,543 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include<linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include "gpiodebug.h"
+
+struct gpiodebug_data {
+	struct gpio_debug *debug;
+	int gpio;
+	unsigned int type;
+};
+
+enum {
+	REGISTER_FOPS = 0,
+	NORMAL_FOPS,
+	COUNT_FOPS,
+};
+
+static struct {
+	unsigned	fops_type;
+	unsigned	type;
+	char		*available_name;
+	char		*current_name;
+} global_array[] = {
+	{REGISTER_FOPS, TYPE_CONF_REG, "conf_reg", "conf_reg"},
+	{NORMAL_FOPS, TYPE_PIN_VALUE, "available_value",
+		"current_value"},
+	{NORMAL_FOPS, TYPE_DIRECTION, "available_direction",
+		"current_direction"},
+	{NORMAL_FOPS, TYPE_IRQ_TYPE, "available_irqtype",
+		"current_irqtype"},
+	{NORMAL_FOPS, TYPE_PINMUX, "available_pinmux",
+		"current_pinmux"},
+	{NORMAL_FOPS, TYPE_PULLMODE, "available_pullmode",
+		"current_pullmode"},
+	{NORMAL_FOPS, TYPE_PULLSTRENGTH, "available_pullstrength",
+		"current_pullstrength"},
+	{NORMAL_FOPS, TYPE_OPEN_DRAIN, "available_opendrain",
+		"current_opendrain"},
+	{COUNT_FOPS, TYPE_IRQ_COUNT, "irq_count", "irq_count"},
+	{NORMAL_FOPS, TYPE_WAKEUP, "available_wakeup", "current_wakeup"},
+	{COUNT_FOPS, TYPE_WAKEUP_COUNT, "wakeup_count", "wakeup_count"},
+	{NORMAL_FOPS, TYPE_DEBOUNCE, "available_debounce",
+		"current_debounce"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_OUTDIR, "available_override_outdir",
+		"current_override_outdir"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_OUTVAL, "available_override_outval",
+		"current_override_outval"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_INDIR, "available_override_indir",
+		"current_override_indir"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_INVAL, "available_override_inval",
+		"current_override_inval"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_IO, "available_standby_trigger",
+		"current_standby_trigger"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_OUTVAL, "available_standby_outval",
+		"current_standby_outval"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_INVAL, "available_standby_inval",
+		"current_standby_inval"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_OUTDIR, "available_standby_outdir",
+		"current_standby_outdir"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_INDIR, "available_standby_indir",
+		"current_standby_indir"},
+	{NORMAL_FOPS, TYPE_SBY_PUPD_STATE, "available_standby_pullmode",
+		"current_standby_pullmode"},
+	{NORMAL_FOPS, TYPE_SBY_OD_DIS, "available_standby_opendrain",
+		"current_standby_opendrain"},
+	{NORMAL_FOPS, TYPE_IRQ_LINE, "available_irq_line",
+		"current_irq_line"},
+
+};
+
+static struct dentry *gpio_root[ARCH_NR_GPIOS];
+static struct gpiodebug_data global_data[ARCH_NR_GPIOS][TYPE_MAX];
+
+static struct dentry *gpiodebug_debugfs_root;
+
+struct gpio_control *find_gpio_control(struct gpio_control *control, int num,
+			unsigned type)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		if ((control+i)->type == type)
+			break;
+	}
+
+	if (i < num)
+		return control+i;
+
+	return NULL;
+}
+
+int find_pininfo_num(struct gpio_control *control, const char *info)
+{
+	int num = 0;
+
+	while (num < control->num) {
+		if (!strcmp(*(control->pininfo+num), info))
+			break;
+		num++;
+	}
+
+	if (num < control->num)
+		return num;
+
+	return -1;
+}
+
+static struct dentry *gpiodebug_create_file(const char *name,
+			umode_t mode, struct dentry *parent,
+			void *data, const struct file_operations *fops)
+{
+	struct dentry *ret;
+
+	ret = debugfs_create_file(name, mode, parent, data, fops);
+	if (!ret)
+		pr_warn("Could not create debugfs '%s' entry\n", name);
+
+	return ret;
+}
+
+static int gpiodebug_open_file(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static const char readme_msg[] =
+	"\n GPIO Debug Tool-HOWTO (Example):\n\n"
+	"# mount -t debugfs nodev /sys/kernel/debug\n\n"
+	"# cat /sys/kernel/debug/gpio_debug/gpio0/available_pullmode\n"
+	"nopull	pullup	pulldown\n\n"
+	"# cat /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+	"nopull\n"
+	"# echo pullup > /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+	"# cat /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+	"pullup\n\n"
+	"# cat conf_reg\n"
+	"0x00003120\n"
+	"# echo 0x00003121 > conf_reg\n"
+	"0x00003121\n\n"
+	"# cat irq_count\n"
+	"1\n";
+
+/* gpio_readme_fops */
+static ssize_t show_gpio_readme(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, readme_msg,
+		strlen(readme_msg));
+
+	return ret;
+}
+
+static const struct file_operations gpio_readme_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= show_gpio_readme,
+	.llseek		= generic_file_llseek,
+};
+
+/* gpio_reginfo_fops */
+static ssize_t show_gpio_reginfo(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpio_debug *debug = filp->private_data;
+	unsigned long size;
+	char *buf;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	if (debug->ops->get_register_msg) {
+		debug->ops->get_register_msg(&buf, &size);
+		ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, size);
+	}
+
+	return ret;
+}
+
+static const struct file_operations gpio_reginfo_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= show_gpio_reginfo,
+	.llseek		= generic_file_llseek,
+};
+
+
+/* gpio_conf_fops */
+static ssize_t gpio_conf_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	int gpio = data->gpio;
+	char *buf;
+	unsigned int value = 0;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (debug->ops->get_conf_reg)
+		value = debug->ops->get_conf_reg(debug, gpio);
+
+	if (value == -EINVAL)
+		ret = sprintf(buf, "Invalid pin\n");
+	else
+		ret = sprintf(buf, "0x%08x\n", value);
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t gpio_conf_write(struct file *filp, const char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	int i, gpio = data->gpio;
+	char *buf, *start;
+	unsigned int value;
+
+	ret = cnt;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, cnt))
+		return -EFAULT;
+
+	start = buf;
+
+	while (*start == ' ')
+		start++;
+
+	/* strip ending whitespace. */
+	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+		buf[i] = 0;
+
+	if (kstrtouint(start, 16, &value))
+		return -EINVAL;
+
+	if (debug->ops->set_conf_reg)
+		debug->ops->set_conf_reg(debug, gpio, value);
+
+	*ppos += ret;
+
+	return ret;
+}
+
+static const struct file_operations gpio_conf_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= gpio_conf_read,
+	.write		= gpio_conf_write,
+	.llseek		= generic_file_llseek,
+};
+
+/* show_gpiodebug_fops */
+static ssize_t gpiodebug_show_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	int i, num = 0;
+	int gpio = data->gpio;
+	char *buf, **avl_buf = NULL;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* debug->ops->get_avl_info */
+	if (debug->ops->get_avl_pininfo) {
+		avl_buf = debug->ops->get_avl_pininfo(debug, gpio, type, &num);
+
+		for (i = 0; i < num; i++)
+			sprintf(buf, "%s%s\t", buf, *(avl_buf+i));
+	}
+
+	ret = sprintf(buf, "%s\n", buf);
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations show_gpiodebug_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= gpiodebug_show_read,
+	.llseek		= generic_file_llseek,
+};
+
+/* set_gpiodebug_fops */
+static ssize_t gpiodebug_set_gpio_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	int gpio = data->gpio;
+	char *buf, *cur_info = NULL;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (debug->ops->get_cul_pininfo)
+		cur_info = debug->ops->get_cul_pininfo(debug, gpio, type);
+
+	if (cur_info)
+		ret = sprintf(buf, "%s\n", cur_info);
+	else
+		ret = sprintf(buf, "\n");
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t gpiodebug_set_gpio_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	int i, gpio = data->gpio;
+	char *buf;
+
+	ret = cnt;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, cnt))
+		return -EFAULT;
+
+	/* strip ending whitespace. */
+	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+		buf[i] = 0;
+
+	if (debug->ops->set_pininfo)
+		debug->ops->set_pininfo(debug, gpio, type, buf);
+
+	*ppos += ret;
+
+	return ret;
+}
+
+static const struct file_operations set_gpiodebug_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= gpiodebug_set_gpio_read,
+	.write		= gpiodebug_set_gpio_write,
+	.llseek		= generic_file_llseek,
+};
+
+/* show_count_fops */
+static ssize_t show_count_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	unsigned long count = 0;
+	int gpio = data->gpio;
+	char *buf;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (type == TYPE_IRQ_COUNT)
+		count = debug->irq_count[gpio];
+	else if (type == TYPE_WAKEUP_COUNT)
+		count = debug->wakeup_count[gpio];
+
+	ret = sprintf(buf, "%ld\n", count);
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations show_count_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= show_count_read,
+	.llseek		= generic_file_llseek,
+};
+
+/******************************************************************************/
+struct gpio_debug *gpio_debug_alloc(void)
+{
+	struct gpio_debug *debug;
+
+	debug = kzalloc(sizeof(struct gpio_debug), GFP_KERNEL);
+	if (debug) {
+		__set_bit(TYPE_CONF_REG, debug->typebit);
+		__set_bit(TYPE_PIN_VALUE, debug->typebit);
+		__set_bit(TYPE_DIRECTION, debug->typebit);
+		__set_bit(TYPE_IRQ_TYPE, debug->typebit);
+		__set_bit(TYPE_PINMUX, debug->typebit);
+		__set_bit(TYPE_PULLMODE, debug->typebit);
+		__set_bit(TYPE_PULLSTRENGTH, debug->typebit);
+		__set_bit(TYPE_OPEN_DRAIN, debug->typebit);
+		__set_bit(TYPE_IRQ_COUNT, debug->typebit);
+		__set_bit(TYPE_DEBOUNCE, debug->typebit);
+	}
+
+	return debug;
+}
+
+void gpio_debug_remove(struct gpio_debug *debug)
+{
+	struct gpio_chip *chip = debug->chip;
+	int base = chip->base;
+	unsigned ngpio = chip->ngpio;
+	int i;
+
+	for (i = base; i < base+ngpio; i++)
+		debugfs_remove_recursive(gpio_root[i]);
+
+	kfree(debug);
+}
+
+int gpio_debug_register(struct gpio_debug *debug)
+{
+	struct gpio_chip *chip = debug->chip;
+	int base = chip->base;
+	unsigned ngpio = chip->ngpio;
+	int i, j;
+	char gpioname[32];
+
+	for (i = base; i < base+ngpio; i++) {
+		sprintf(gpioname, "gpio%d", i);
+		gpio_root[i] = debugfs_create_dir(gpioname,
+				gpiodebug_debugfs_root);
+		if (!gpio_root[i]) {
+			pr_warn("gpiodebug: Failed to create debugfs directory\n");
+			return -ENOMEM;
+		}
+
+		/* register info */
+		gpiodebug_create_file("register_info", 0400, gpio_root[i],
+			debug, &gpio_reginfo_fops);
+
+		for (j = 0; j < ARRAY_SIZE(global_array); j++) {
+			if (test_bit(global_array[j].type, debug->typebit)) {
+				global_data[i][j].gpio = i;
+				global_data[i][j].debug = debug;
+				global_data[i][j].type = global_array[j].type;
+
+				switch (global_array[j].fops_type) {
+				case REGISTER_FOPS:
+					gpiodebug_create_file(
+					  global_array[j].current_name, 0600,
+					  gpio_root[i], &global_data[i][j],
+					  &gpio_conf_fops);
+					break;
+				case NORMAL_FOPS:
+					gpiodebug_create_file(
+					  global_array[j].available_name, 0400,
+					  gpio_root[i], &global_data[i][j],
+					  &show_gpiodebug_fops);
+
+					gpiodebug_create_file(
+					  global_array[j].current_name, 0600,
+					  gpio_root[i], &global_data[i][j],
+					  &set_gpiodebug_fops);
+					break;
+				case COUNT_FOPS:
+					gpiodebug_create_file(
+					  global_array[j].current_name, 0400,
+					  gpio_root[i], &global_data[i][j],
+					  &show_count_fops);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int __init gpio_debug_init(void)
+{
+	gpiodebug_debugfs_root = debugfs_create_dir("gpio_debug", NULL);
+	if (IS_ERR(gpiodebug_debugfs_root) || !gpiodebug_debugfs_root) {
+		pr_warn("gpiodebug: Failed to create debugfs directory\n");
+		gpiodebug_debugfs_root = NULL;
+	}
+
+	/* readme */
+	gpiodebug_create_file("readme", 0400, gpiodebug_debugfs_root,
+		NULL, &gpio_readme_fops);
+
+	return 0;
+}
+
+subsys_initcall(gpio_debug_init);
diff --git a/drivers/gpio/gpiodebug.h b/drivers/gpio/gpiodebug.h
new file mode 100644
index 0000000..12cf550
--- /dev/null
+++ b/drivers/gpio/gpiodebug.h
@@ -0,0 +1,111 @@
+#ifndef __GPIO_DEBUG_H_
+#define __GPIO_DEBUG_H_
+
+#include <linux/gpio.h>
+
+struct gpio_debug;
+
+#define TYPE_CONF_REG			0x00
+#define TYPE_PIN_VALUE			0x01
+#define TYPE_DIRECTION			0x02
+#define TYPE_IRQ_TYPE			0x03
+#define TYPE_PINMUX			0x04
+#define TYPE_PULLMODE			0x05
+#define TYPE_PULLSTRENGTH		0x06
+#define TYPE_OPEN_DRAIN			0x07
+
+#define TYPE_IRQ_COUNT			0x08
+#define TYPE_WAKEUP			0x09
+#define TYPE_WAKEUP_COUNT		0x0A
+#define TYPE_OVERRIDE_OUTDIR		0x0B
+#define TYPE_OVERRIDE_OUTVAL		0x0C
+#define TYPE_OVERRIDE_INDIR		0x0D
+#define TYPE_OVERRIDE_INVAL		0x0E
+#define TYPE_DEBOUNCE			0x0F
+
+#define TYPE_SBY_OVR_IO			0x10
+#define TYPE_SBY_OVR_OUTVAL		0x11
+#define TYPE_SBY_OVR_INVAL		0x12
+#define TYPE_SBY_OVR_OUTDIR		0x13
+#define TYPE_SBY_OVR_INDIR		0x14
+#define TYPE_SBY_PUPD_STATE		0x15
+#define TYPE_SBY_OD_DIS			0x16
+#define TYPE_IRQ_LINE			0x17
+#define TYPE_MAX			0x18
+
+struct gpio_control {
+	unsigned type, num;
+	char	 **pininfo;
+	u32	reg, invert;
+	u32 shift, rshift;
+	u32	mask;
+	int (*get)(struct gpio_control *control, void *private_data,
+		unsigned gpio);
+	int (*set)(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num);
+	int (*get_handle)(int val);
+	void (*set_handle)(unsigned int num, int *val);
+};
+
+struct gpio_debug_ops {
+	unsigned int (*get_conf_reg)(struct gpio_debug *debug, unsigned gpio);
+	void	(*set_conf_reg)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int value);
+	char	**(*get_avl_pininfo)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int type, unsigned *num);
+	char	*(*get_cul_pininfo)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int type);
+	void	(*set_pininfo)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int type, const char *info);
+	int	(*get_register_msg)(char **buf, unsigned long *size);
+};
+
+struct gpio_debug {
+	unsigned long		typebit[BITS_TO_LONGS(TYPE_MAX)];
+	struct gpio_chip	*chip;
+	struct gpio_debug_ops	*ops;
+	unsigned long		irq_count[ARCH_NR_GPIOS];
+	unsigned long		wakeup_count[ARCH_NR_GPIOS];
+	void			*private_data;
+};
+
+#ifdef CONFIG_GPIODEBUG
+
+#define DEFINE_DEBUG_IRQ_CONUNT_INCREASE(gpio) (debug->irq_count[gpio]++)
+
+struct gpio_control *find_gpio_control(struct gpio_control *control, int num,
+			unsigned type);
+int find_pininfo_num(struct gpio_control *control, const char *info);
+
+struct gpio_debug *gpio_debug_alloc(void);
+void gpio_debug_remove(struct gpio_debug *debug);
+int gpio_debug_register(struct gpio_debug *debug);
+#else
+
+#define DEFINE_DEBUG_IRQ_CONUNT_INCREASE(gpio)
+
+static inline struct gpio_control *find_gpio_control(
+			struct gpio_control *control, int num, unsigned type)
+{
+	return NULL;
+}
+static inline int find_pininfo_num(struct gpio_control *control,
+			const char *info)
+{
+	return 0;
+}
+static inline struct gpio_debug *gpio_debug_alloc(void)
+{
+	return NULL;
+}
+
+static inline void gpio_debug_remove(struct gpio_debug *debug)
+{
+	return NULL;
+}
+static inline int gpio_debug_register(struct gpio_debug *debug)
+{
+	return 0;
+}
+#endif
+#endif
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a35c5b9..0c51425 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -244,6 +244,9 @@
 
 /*
  * /sys/class/gpio/gpioN... only for GPIOs that are exported
+ *   /pinmux
+ *      * configures GPIO or alternate function
+ *      * r/w as zero (normal GPIO) or alternate function number
  *   /direction
  *      * MAY BE OMITTED if kernel won't allow direction changes
  *      * is read/write as "in" or "out"
@@ -262,6 +265,54 @@
  *      * also affects existing and subsequent "falling" and "rising"
  *        /edge configuration
  */
+static ssize_t gpio_pinmux_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	const struct gpio_desc	*desc = dev_get_drvdata(dev);
+	unsigned		gpio = desc - gpio_desc;
+	struct gpio_chip	*chip;
+	ssize_t			status = -EINVAL;
+
+	mutex_lock(&sysfs_lock);
+
+	chip = desc->chip;
+
+	if (!test_bit(FLAG_EXPORT, &desc->flags))
+		status = -EIO;
+	else if (chip->get_pinmux != NULL)
+		status = sprintf(buf, "%d\n", chip->get_pinmux(gpio));
+
+	mutex_unlock(&sysfs_lock);
+	return status;
+}
+
+static ssize_t gpio_pinmux_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	const struct gpio_desc	*desc = dev_get_drvdata(dev);
+	unsigned		gpio = desc - gpio_desc;
+	ssize_t			status = -EINVAL;
+	struct gpio_chip	*chip;
+	long	mux;
+
+	mutex_lock(&sysfs_lock);
+
+	chip = desc->chip;
+
+	if (!test_bit(FLAG_EXPORT, &desc->flags))
+		status = -EIO;
+	else if (chip->set_pinmux != NULL) {
+		status = kstrtol(buf, 0, &mux);
+		if (status == 0)
+			chip->set_pinmux(gpio, mux);
+	}
+
+	mutex_unlock(&sysfs_lock);
+	return status ? : size;
+}
+
+static DEVICE_ATTR(pinmux, 0644,
+		gpio_pinmux_show, gpio_pinmux_store);
 
 static ssize_t gpio_direction_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -796,6 +847,10 @@
 			    desc_to_gpio(desc));
 	if (IS_ERR(dev)) {
 		status = PTR_ERR(dev);
+			if (!status)
+				status = device_create_file(dev,
+						&dev_attr_pinmux);
+
 		goto fail_unlock;
 	}
 
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b16c50e..5cc8976 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,14 @@
 	depends on USB_SUPPORT && USB_ARCH_HAS_HCD
 	select USB
 
+config INTEL_NO_FB_PANIC_NOTIFY
+	bool "FB PANIC NOTIFY disable"
+	depends on DRM
+	default n
+	help
+	  For embeded android system, there is no need to use FB Panic notifier function.
+	  Disable it will make system more efficient.
+
 config DRM_KMS_HELPER
 	tristate
 	depends on DRM
@@ -167,6 +175,26 @@
 	  the driver to bind to PCI devices, which precludes loading things
 	  like intelfb.
 
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+	bool "Enable preliminary support for prerelease Intel hardware by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you have prerelease Intel hardware and want the
+	  i915 driver to support it by default.  You can enable such support at
+	  runtime with the module option i915.preliminary_hw_support=1; this
+	  option changes the default for that module option.
+
+	  If in doubt, say "N".
+
+config DRM_I915_SYNC
+	bool "Enable native sync support"
+	depends on SYNC && DRM_I915
+	default y
+	help
+	  Choose this option if you require native sync support within the i915
+	  driver. This requires userspace support to actually make use of this
+	  functionality.
+
 config DRM_MGA
 	tristate "Matrox g200/g400"
 	depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1c9f243..1ecbe5b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,8 @@
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
-		drm_trace_points.o drm_global.o drm_prime.o
+		drm_trace_points.o drm_global.o drm_prime.o \
+		drm_rect.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 02e52d5..b6b7d70 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@
 
 static inline void ast_open_key(struct ast_private *ast)
 {
-	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
 }
 
 #define AST_VIDMEM_SIZE_8M    0x00800000
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 09da339..d5902e2 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -348,6 +348,7 @@
 
 	astbo->gem.driver_private = NULL;
 	astbo->bo.bdev = &ast->ttm.bdev;
+	astbo->bo.bdev->dev_mapping = dev->dev_mapping;
 
 	ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ed8cfc..c18faff 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -353,6 +353,7 @@
 
 	cirrusbo->gem.driver_private = NULL;
 	cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+	cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
 
 	cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 0128147..5a4dbb4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -210,12 +210,16 @@
 		if (drm_core_has_MTRR(dev)) {
 			if (map->type == _DRM_FRAME_BUFFER ||
 			    (map->flags & _DRM_WRITE_COMBINING)) {
-				map->mtrr = mtrr_add(map->offset, map->size,
-						     MTRR_TYPE_WRCOMB, 1);
+				map->mtrr =
+					arch_phys_wc_add(map->offset, map->size);
 			}
 		}
 		if (map->type == _DRM_REGISTERS) {
-			map->handle = ioremap(map->offset, map->size);
+			if (map->flags & _DRM_WRITE_COMBINING)
+				map->handle = ioremap_wc(map->offset,
+							 map->size);
+			else
+				map->handle = ioremap(map->offset, map->size);
 			if (!map->handle) {
 				kfree(map);
 				return -ENOMEM;
@@ -410,6 +414,15 @@
 
 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 	map->handle = (void *)(unsigned long)maplist->user_token;
+
+	/*
+	 * It appears that there are no users of this value whatsoever --
+	 * drmAddMap just discards it.  Let's not encourage its use.
+	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
+	 *  it's not a real mtrr index anymore.)
+	 */
+	map->mtrr = -1;
+
 	return 0;
 }
 
@@ -451,11 +464,8 @@
 		iounmap(map->handle);
 		/* FALLTHROUGH */
 	case _DRM_FRAME_BUFFER:
-		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
-			int retcode;
-			retcode = mtrr_del(map->mtrr, map->offset, map->size);
-			DRM_DEBUG("mtrr_del=%d\n", retcode);
-		}
+		if (drm_core_has_MTRR(dev))
+			arch_phys_wc_del(map->mtrr);
 		break;
 	case _DRM_SHM:
 		vfree(map->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e7e9242..e84754c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -29,6 +29,7 @@
  *      Dave Airlie <airlied@linux.ie>
  *      Jesse Barnes <jesse.barnes@intel.com>
  */
+#include <linux/ctype.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -91,7 +92,7 @@
 
 /* Avoid boilerplate.  I'm tired of typing. */
 #define DRM_ENUM_NAME_FN(fnname, list)				\
-	char *fnname(int val)					\
+	const char *fnname(int val)				\
 	{							\
 		int i;						\
 		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
@@ -104,11 +105,13 @@
 /*
  * Global properties
  */
-static struct drm_prop_enum_list drm_dpms_enum_list[] =
+static const struct drm_prop_enum_list drm_dpms_enum_list[] =
 {	{ DRM_MODE_DPMS_ON, "On" },
 	{ DRM_MODE_DPMS_STANDBY, "Standby" },
 	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
-	{ DRM_MODE_DPMS_OFF, "Off" }
+	{ DRM_MODE_DPMS_OFF, "Off" },
+	{ DRM_MODE_DPMS_ASYNC_ON, "AsyncOn" },
+	{ DRM_MODE_DPMS_ASYNC_OFF, "AsyncOff" }
 };
 
 DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
@@ -116,7 +119,7 @@
 /*
  * Optional properties
  */
-static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
 {
 	{ DRM_MODE_SCALE_NONE, "None" },
 	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
@@ -124,7 +127,7 @@
 	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
 };
 
-static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
 {
 	{ DRM_MODE_DITHERING_OFF, "Off" },
 	{ DRM_MODE_DITHERING_ON, "On" },
@@ -134,7 +137,7 @@
 /*
  * Non-global properties, but "required" for certain connectors.
  */
-static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
@@ -143,7 +146,7 @@
 
 DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
 
-static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
@@ -153,7 +156,7 @@
 DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
 		 drm_dvi_i_subconnector_enum_list)
 
-static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -164,7 +167,7 @@
 
 DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
 
-static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -176,7 +179,7 @@
 DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
 		 drm_tv_subconnector_enum_list)
 
-static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
 	{ DRM_MODE_DIRTY_OFF,      "Off"      },
 	{ DRM_MODE_DIRTY_ON,       "On"       },
 	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
@@ -184,7 +187,7 @@
 
 struct drm_conn_prop_enum_list {
 	int type;
-	char *name;
+	const char *name;
 	int count;
 };
 
@@ -192,34 +195,48 @@
  * Connector and encoder types.
  */
 static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{	{ DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
-	{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
-	{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
-	{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
-	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
-	{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
-	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
-	{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
-	{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
-	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
-	{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
-	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
-	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
-	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
-	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
-	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+{	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
+	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
+	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+	{ DRM_MODE_CONNECTOR_Component, "Component" },
+	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+	{ DRM_MODE_CONNECTOR_TV, "TV" },
+	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
+	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
 };
 
-static struct drm_prop_enum_list drm_encoder_enum_list[] =
+static const struct drm_prop_enum_list drm_encoder_enum_list[] =
 {	{ DRM_MODE_ENCODER_NONE, "None" },
 	{ DRM_MODE_ENCODER_DAC, "DAC" },
 	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
+	{ DRM_MODE_ENCODER_DSI, "DSI" },
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 };
 
-char *drm_get_encoder_name(struct drm_encoder *encoder)
+struct drm_mode_object *gobj;
+uint64_t gvalue;
+
+static void drm_dpms_execute(struct work_struct *work)
+{
+	struct drm_connector *connector = obj_to_connector(gobj);
+	(*connector->funcs->dpms)(connector, (int)gvalue);
+	drm_object_property_set_value(&connector->base,
+		connector->dev->mode_config.dpms_property, gvalue);
+}
+EXPORT_SYMBOL(drm_dpms_execute);
+
+const char *drm_get_encoder_name(const struct drm_encoder *encoder)
 {
 	static char buf[32];
 
@@ -230,7 +247,7 @@
 }
 EXPORT_SYMBOL(drm_get_encoder_name);
 
-char *drm_get_connector_name(struct drm_connector *connector)
+const char *drm_get_connector_name(const struct drm_connector *connector)
 {
 	static char buf[32];
 
@@ -241,7 +258,7 @@
 }
 EXPORT_SYMBOL(drm_get_connector_name);
 
-char *drm_get_connector_status_name(enum drm_connector_status status)
+const char *drm_get_connector_status_name(enum drm_connector_status status)
 {
 	if (status == connector_status_connected)
 		return "connected";
@@ -252,6 +269,28 @@
 }
 EXPORT_SYMBOL(drm_get_connector_status_name);
 
+static char printable_char(int c)
+{
+	return isascii(c) && isprint(c) ? c : '?';
+}
+
+const char *drm_get_format_name(uint32_t format)
+{
+	static char buf[32];
+
+	snprintf(buf, sizeof(buf),
+		 "%c%c%c%c %s-endian (0x%08x)",
+		 printable_char(format & 0xff),
+		 printable_char((format >> 8) & 0xff),
+		 printable_char((format >> 16) & 0xff),
+		 printable_char((format >> 24) & 0x7f),
+		 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+		 format);
+
+	return buf;
+}
+EXPORT_SYMBOL(drm_get_format_name);
+
 /**
  * drm_mode_object_get - allocate a new modeset identifier
  * @dev: DRM device
@@ -569,16 +608,8 @@
 		}
 
 		list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-			if (plane->fb == fb) {
-				/* should turn off the crtc */
-				ret = plane->funcs->disable_plane(plane);
-				if (ret)
-					DRM_ERROR("failed to disable plane with busy fb\n");
-				/* disconnect the plane from the fb and crtc: */
-				__drm_framebuffer_unreference(plane->fb);
-				plane->fb = NULL;
-				plane->crtc = NULL;
-			}
+			if (plane->fb == fb)
+				drm_plane_force_disable(plane);
 		}
 		drm_modeset_unlock_all(dev);
 	}
@@ -593,7 +624,7 @@
  * @crtc: CRTC object to init
  * @funcs: callbacks for the new CRTC
  *
- * Inits a new object created as base part of an driver crtc object.
+ * Inits a new object created as base part of a driver crtc object.
  *
  * RETURNS:
  * Zero on success, error code on failure.
@@ -608,6 +639,7 @@
 	crtc->invert_dimensions = false;
 
 	drm_modeset_lock_all(dev);
+	INIT_DELAYED_WORK(&dev->mode_config.dpms_work, drm_dpms_execute);
 	mutex_init(&crtc->mutex);
 	mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
 
@@ -628,11 +660,12 @@
 EXPORT_SYMBOL(drm_crtc_init);
 
 /**
- * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * drm_crtc_cleanup - Clean up the core crtc usage
  * @crtc: CRTC to cleanup
  *
- * Cleanup @crtc. Removes from drm modesetting space
- * does NOT free object, caller does that.
+ * This function cleans up @crtc and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the crtc structure itself,
+ * this is the responsibility of the caller.
  */
 void drm_crtc_cleanup(struct drm_crtc *crtc)
 {
@@ -657,7 +690,7 @@
 void drm_mode_probed_add(struct drm_connector *connector,
 			 struct drm_display_mode *mode)
 {
-	list_add(&mode->head, &connector->probed_modes);
+	list_add_tail(&mode->head, &connector->probed_modes);
 }
 EXPORT_SYMBOL(drm_mode_probed_add);
 
@@ -803,6 +836,21 @@
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
 
+/**
+ * drm_plane_init - Initialise a new plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @priv: plane is private (hidden from userspace)?
+ *
+ * Inits a new object created as base part of a driver plane object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
 		   unsigned long possible_crtcs,
 		   const struct drm_plane_funcs *funcs,
@@ -851,6 +899,14 @@
 }
 EXPORT_SYMBOL(drm_plane_init);
 
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
 void drm_plane_cleanup(struct drm_plane *plane)
 {
 	struct drm_device *dev = plane->dev;
@@ -868,6 +924,32 @@
 EXPORT_SYMBOL(drm_plane_cleanup);
 
 /**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+	int ret;
+
+	if (!plane->fb)
+		return;
+
+	ret = plane->funcs->disable_plane(plane);
+	if (ret)
+		DRM_ERROR("failed to disable plane with busy fb\n");
+	/* disconnect the plane from the fb and crtc: */
+	__drm_framebuffer_unreference(plane->fb);
+	plane->fb = NULL;
+	plane->crtc = NULL;
+}
+EXPORT_SYMBOL(drm_plane_force_disable);
+
+/**
  * drm_mode_create - create a new display mode
  * @dev: DRM device
  *
@@ -1194,6 +1276,9 @@
 	out->vrefresh = in->vrefresh;
 	out->flags = in->flags;
 	out->type = in->type;
+#if defined(CONFIG_DRM_I915)
+	out->picture_aspect_ratio = in->picture_aspect_ratio;
+#endif
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
@@ -1229,6 +1314,9 @@
 	out->vrefresh = in->vrefresh;
 	out->flags = in->flags;
 	out->type = in->type;
+#if defined(CONFIG_DRM_I915)
+	out->picture_aspect_ratio = in->picture_aspect_ratio;
+#endif
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
@@ -1740,7 +1828,7 @@
 
 	plane_resp->plane_id = plane->base.id;
 	plane_resp->possible_crtcs = plane->possible_crtcs;
-	plane_resp->gamma_size = plane->gamma_size;
+	plane_resp->gamma_size = 0;
 
 	/*
 	 * This ioctl is called twice, once to determine how much space is
@@ -1780,8 +1868,10 @@
 	struct drm_plane *plane;
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+	struct drm_pending_vblank_event *e = NULL;
 	int ret = 0;
 	unsigned int fb_width, fb_height;
+	unsigned long flags;
 	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -1834,7 +1924,8 @@
 		if (fb->pixel_format == plane->format_types[i])
 			break;
 	if (i == plane->format_count) {
-		DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+		DRM_DEBUG_KMS("Invalid pixel format %s\n",
+			      drm_get_format_name(fb->pixel_format));
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1874,16 +1965,48 @@
 	}
 
 	drm_modeset_lock_all(dev);
+
+	if (plane_req->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+		ret = -ENOMEM;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (file_priv->event_space < sizeof(e->event)) {
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+		file_priv->event_space -= sizeof(e->event);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		e = kzalloc(sizeof(*e), GFP_KERNEL);
+		if (e == NULL) {
+			spin_lock_irqsave(&dev->event_lock, flags);
+			file_priv->event_space += sizeof(e->event);
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+
+		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+		e->event.base.length = sizeof(e->event);
+		e->event.user_data = plane_req->user_data;
+		e->base.event = &e->event.base;
+		e->base.file_priv = file_priv;
+		e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+	}
+
 	ret = plane->funcs->update_plane(plane, crtc, fb,
 					 plane_req->crtc_x, plane_req->crtc_y,
 					 plane_req->crtc_w, plane_req->crtc_h,
 					 plane_req->src_x, plane_req->src_y,
-					 plane_req->src_w, plane_req->src_h);
+					 plane_req->src_w, plane_req->src_h, e);
 	if (!ret) {
 		old_fb = plane->fb;
 		plane->crtc = crtc;
 		plane->fb = fb;
 		fb = NULL;
+	} else if (plane_req->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		file_priv->event_space += sizeof(e->event);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(e);
+		old_fb = NULL;
 	}
 	drm_modeset_unlock_all(dev);
 
@@ -1906,18 +2029,31 @@
 int drm_mode_set_config_internal(struct drm_mode_set *set)
 {
 	struct drm_crtc *crtc = set->crtc;
-	struct drm_framebuffer *fb, *old_fb;
+	struct drm_framebuffer *fb;
+	struct drm_crtc *tmp;
 	int ret;
 
-	old_fb = crtc->fb;
+	/*
+	 * NOTE: ->set_config can also disable other crtcs (if we steal all
+	 * connectors from it), hence we need to refcount the fbs across all
+	 * crtcs. Atomic modeset will have saner semantics ...
+	 */
+	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+		tmp->old_fb = tmp->fb;
+
 	fb = set->fb;
 
 	ret = crtc->funcs->set_config(set);
 	if (ret == 0) {
-		if (old_fb)
-			drm_framebuffer_unreference(old_fb);
-		if (fb)
-			drm_framebuffer_reference(fb);
+		/* crtc->fb must be updated by ->set_config, enforces this. */
+		WARN_ON(fb != crtc->fb);
+	}
+
+	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+		if (tmp->fb)
+			drm_framebuffer_reference(tmp->fb);
+		if (tmp->old_fb)
+			drm_framebuffer_unreference(tmp->old_fb);
 	}
 
 	return ret;
@@ -2099,10 +2235,10 @@
 	return ret;
 }
 
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-			void *data, struct drm_file *file_priv)
+static int drm_mode_cursor_common(struct drm_device *dev,
+				  struct drm_mode_cursor2 *req,
+				  struct drm_file *file_priv)
 {
-	struct drm_mode_cursor *req = data;
 	struct drm_mode_object *obj;
 	struct drm_crtc *crtc;
 	int ret = 0;
@@ -2122,13 +2258,17 @@
 
 	mutex_lock(&crtc->mutex);
 	if (req->flags & DRM_MODE_CURSOR_BO) {
-		if (!crtc->funcs->cursor_set) {
+		if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
 			ret = -ENXIO;
 			goto out;
 		}
 		/* Turns off the cursor if handle is 0 */
-		ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
-					      req->width, req->height);
+		if (crtc->funcs->cursor_set2)
+			ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+						      req->width, req->height, req->hot_x, req->hot_y);
+		else
+			ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+						      req->width, req->height);
 	}
 
 	if (req->flags & DRM_MODE_CURSOR_MOVE) {
@@ -2143,6 +2283,25 @@
 	mutex_unlock(&crtc->mutex);
 
 	return ret;
+
+}
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_cursor *req = data;
+	struct drm_mode_cursor2 new_req;
+
+	memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+	new_req.hot_x = new_req.hot_y = 0;
+
+	return drm_mode_cursor_common(dev, &new_req, file_priv);
+}
+
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_cursor2 *req = data;
+	return drm_mode_cursor_common(dev, req, file_priv);
 }
 
 /* Original addfb only supported RGB formats, so figure out which one */
@@ -2312,7 +2471,8 @@
 
 	ret = format_check(r);
 	if (ret) {
-		DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+		DRM_DEBUG_KMS("bad framebuffer format %s\n",
+			      drm_get_format_name(r->pixel_format));
 		return ret;
 	}
 
@@ -2501,10 +2661,22 @@
 	r->depth = fb->depth;
 	r->bpp = fb->bits_per_pixel;
 	r->pitch = fb->pitches[0];
-	if (fb->funcs->create_handle)
-		ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
-	else
+	if (fb->funcs->create_handle) {
+		if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
+			ret = fb->funcs->create_handle(fb, file_priv,
+						       &r->handle);
+		} else {
+			/* GET_FB() is an unprivileged ioctl so we must not
+			 * return a buffer-handle to non-master processes! For
+			 * backwards-compatibility reasons, we cannot make
+			 * GET_FB() privileged, so just return an invalid handle
+			 * for non-masters. */
+			r->handle = 0;
+			ret = 0;
+		}
+	} else {
 		ret = -ENODEV;
+	}
 
 	drm_framebuffer_unreference(fb);
 
@@ -3084,12 +3256,27 @@
 					   uint64_t value)
 {
 	int ret = -EINVAL;
+
 	struct drm_connector *connector = obj_to_connector(obj);
+	struct drm_device *dev = connector->dev;
+	gobj = obj;
+	if (value == DRM_MODE_DPMS_ASYNC_ON)
+		gvalue = DRM_MODE_DPMS_ON;
+	else if (value == DRM_MODE_DPMS_ASYNC_OFF)
+		gvalue = DRM_MODE_DPMS_OFF;
 
 	/* Do DPMS ourselves */
 	if (property == connector->dev->mode_config.dpms_property) {
-		if (connector->funcs->dpms)
-			(*connector->funcs->dpms)(connector, (int)value);
+		if (connector->funcs->dpms) {
+			if ((value == DRM_MODE_DPMS_ASYNC_ON) ||
+				(value == DRM_MODE_DPMS_ASYNC_OFF)) {
+				DRM_ERROR("ASYNC DPMS flag ON\n");
+				queue_delayed_work(system_nrt_wq,
+					&dev->mode_config.dpms_work, 0);
+			} else
+				(*connector->funcs->dpms)(connector,
+					(int)value);
+		}
 		ret = 0;
 	} else if (connector->funcs->set_property)
 		ret = connector->funcs->set_property(connector, property, value);
@@ -3097,6 +3284,7 @@
 	/* store the property value if successful */
 	if (!ret)
 		drm_object_property_set_value(&connector->base, property, value);
+
 	return ret;
 }
 
@@ -3226,7 +3414,7 @@
 	switch (arg_obj->type) {
 	case DRM_MODE_OBJECT_CONNECTOR:
 		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
-						      arg->value);
+				arg->value);
 		break;
 	case DRM_MODE_OBJECT_CRTC:
 		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
@@ -3422,6 +3610,7 @@
 		 * due to a hotplug event, that userspace has not
 		 * yet discovered.
 		 */
+		DRM_ERROR("fb = NULL");
 		ret = -EBUSY;
 		goto out;
 	}
@@ -3439,10 +3628,10 @@
 	if (crtc->invert_dimensions)
 		swap(hdisplay, vdisplay);
 
-	if (hdisplay > fb->width ||
+	if ((hdisplay > fb->width ||
 	    vdisplay > fb->height ||
 	    crtc->x > fb->width - hdisplay ||
-	    crtc->y > fb->height - vdisplay) {
+	    crtc->y > fb->height - vdisplay) && !crtc->panning_en) {
 		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
 			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
 			      crtc->invert_dimensions ? " (inverted)" : "");
@@ -3450,11 +3639,8 @@
 		goto out;
 	}
 
-	if (crtc->fb->pixel_format != fb->pixel_format) {
-		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
-		ret = -EINVAL;
-		goto out;
-	}
+	if (crtc->fb->pixel_format != fb->pixel_format)
+		DRM_DEBUG_KMS(" Allow Page flip to change fb format.\n");
 
 	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
 		ret = -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
old mode 100644
new mode 100755
index ed1334e..c111826
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -189,13 +189,14 @@
 	if (list_empty(&connector->modes))
 		return 0;
 
+	list_for_each_entry(mode, &connector->modes, head)
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
 	drm_mode_sort(&connector->modes);
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
 			drm_get_connector_name(connector));
 	list_for_each_entry(mode, &connector->modes, head) {
-		mode->vrefresh = drm_mode_vrefresh(mode);
-
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 		drm_mode_debug_printmodeline(mode);
 	}
@@ -220,6 +221,13 @@
 {
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
+	struct drm_encoder_helper_funcs *encoder_funcs =
+						encoder->helper_private;
+
+	if (encoder_funcs->inuse)
+		if (encoder_funcs->inuse(encoder))
+			return true;
+
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 		if (connector->encoder == encoder)
 			return true;
@@ -424,6 +432,14 @@
 		if (encoder->crtc != crtc)
 			continue;
 		encoder_funcs = encoder->helper_private;
+
+		/* HDMI Encoder actually being used as of now for Audio */
+		if (encoder_funcs->inuse)
+			if (encoder_funcs->inuse(encoder)) {
+				ret = true;
+				goto done;
+			}
+
 		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
 						      adjusted_mode))) {
 			DRM_DEBUG_KMS("Encoder fixup failed\n");
@@ -564,14 +580,13 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	if (!set)
-		return -EINVAL;
+	BUG_ON(!set);
+	BUG_ON(!set->crtc);
+	BUG_ON(!set->crtc->helper_private);
 
-	if (!set->crtc)
-		return -EINVAL;
-
-	if (!set->crtc->helper_private)
-		return -EINVAL;
+	/* Enforce sane interface api - has been abused by the fb helper. */
+	BUG_ON(!set->mode && set->fb);
+	BUG_ON(set->fb && set->num_connectors == 0);
 
 	crtc_funcs = set->crtc->helper_private;
 
@@ -645,11 +660,6 @@
 			mode_changed = true;
 		} else if (set->fb == NULL) {
 			mode_changed = true;
-		} else if (set->fb->depth != set->crtc->fb->depth) {
-			mode_changed = true;
-		} else if (set->fb->bits_per_pixel !=
-			   set->crtc->fb->bits_per_pixel) {
-			mode_changed = true;
 		} else if (set->fb->pixel_format !=
 			   set->crtc->fb->pixel_format) {
 			mode_changed = true;
@@ -682,6 +692,11 @@
 					/* don't break so fail path works correct */
 					fail = 1;
 				break;
+
+				if (connector->dpms != DRM_MODE_DPMS_ON) {
+					DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+					mode_changed = true;
+				}
 			}
 		}
 
@@ -896,7 +911,6 @@
 						     drm_helper_choose_crtc_dpms(crtc));
 		}
 	}
-
 	return;
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
@@ -966,10 +980,11 @@
 
 void drm_kms_helper_hotplug_event(struct drm_device *dev)
 {
-	/* send a uevent + call fbdev */
-	drm_sysfs_hotplug_event(dev);
+	/* call fbdev then send a uevent*/
 	if (dev->mode_config.funcs->output_poll_changed)
 		dev->mode_config.funcs->output_poll_changed(dev);
+
+	drm_sysfs_hotplug_event(dev);
 }
 EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9cc247f..55ccf01 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -166,6 +166,7 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
@@ -358,6 +359,61 @@
 	return err;
 }
 
+
+
+/** Prevent new IOCTLs from starting.
+ */
+void drm_halt(struct drm_device *dev)
+{
+	DRM_DEBUG("Halt request\n");
+	/* Hold the mutex to prevent the ioctl_count incrementing
+	* while halt_count == 0 in drm_ioctl */
+	mutex_lock(&dev->halt_mutex);
+	atomic_inc(&dev->halt_count);
+	mutex_unlock(&dev->halt_mutex);
+}
+EXPORT_SYMBOL(drm_halt);
+
+
+/** Wait up to timeout milliseconds for active IOCTLs to complete.
+ * Note: drm_continue() must be called to allow new
+ *       IOCTLs even if this call timeout.
+ */
+int drm_wait_idle(struct drm_device *dev, unsigned timeout)
+{
+	int rc;
+
+	/* Wait for all active IOCTLs to exit */
+	rc = wait_event_interruptible_timeout(dev->halt_queue,
+		(atomic_read(&dev->ioctl_count) == 0),
+		msecs_to_jiffies(timeout));
+
+	if (rc == 0)
+		return -ETIMEDOUT;
+	else if (rc < 0)
+		return rc;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_wait_idle);
+
+
+/** Release a previous halt request.
+ * Notifies sleeping IOCTLs that they can continue
+ * once the refcount reaches 0.
+ */
+void drm_continue(struct drm_device *dev)
+{
+	mutex_lock(&dev->halt_mutex);
+	WARN_ON(atomic_read(&dev->halt_count) == 0);
+	if (atomic_dec_return(&dev->halt_count) == 0)
+		wake_up_all(&dev->ioctl_queue);
+	mutex_unlock(&dev->halt_mutex);
+	DRM_DEBUG("Continue\n");
+}
+EXPORT_SYMBOL(drm_continue);
+
+
 /**
  * Called whenever a process performs an ioctl on /dev/drm.
  *
@@ -382,14 +438,50 @@
 	char stack_kdata[128];
 	char *kdata = NULL;
 	unsigned int usize, asize;
+	unsigned ready = 0;
 
 	dev = file_priv->minor->dev;
 
 	if (drm_device_is_unplugged(dev))
 		return -ENODEV;
 
-	atomic_inc(&dev->ioctl_count);
-	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+	while (!ready) {
+		/* halt_mutex ensures that ioctl_count can only increment
+		* whilst halt_count == 0. Without this we could get
+		* the following scenario:
+		*
+		*	drm_ioctl:	halt_count == 0 ? --> YES
+		*	    drm_halt:	    halt_count++
+		*	    drm_wait_idle:  ioctl_count == 0 ? --> YES
+		*	drm_ioctl:	ioctl_count++
+		*	    drm_wait_idle:  return "idle" to caller
+		*	drm_ioctl:	ioctl continues executing
+		*
+		* In the above scenario drm_wait_idle thinks we are
+		* halted with no active ioctls but drm_ioctl
+		* thinks we are not halted so it allows the current
+		* ioctl to execute! The mutex protects against this
+		* concurrency problem.
+		*/
+		mutex_lock(&dev->halt_mutex);
+		if (atomic_read(&dev->halt_count) == 0) {
+			atomic_inc(&dev->ioctl_count);
+			ready = 1;
+		}
+		mutex_unlock(&dev->halt_mutex);
+
+		if (!ready) {
+			retcode = wait_event_interruptible(dev->ioctl_queue,
+					(atomic_read(&dev->halt_count) == 0));
+
+			if (retcode != 0)
+				return retcode;
+
+			/* OK to proceed. Set retcode back to default */
+			retcode = -EINVAL;
+		}
+	}
+
 	++file_priv->ioctl_count;
 
 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -406,9 +498,16 @@
 		cmd = ioctl->cmd_drv;
 	}
 	else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+		u32 drv_size;
+
 		ioctl = &drm_ioctls[nr];
-		cmd = ioctl->cmd;
+
+		drv_size = _IOC_SIZE(ioctl->cmd);
 		usize = asize = _IOC_SIZE(cmd);
+		if (drv_size > asize)
+			asize = drv_size;
+
+		cmd = ioctl->cmd;
 	} else
 		goto err_i1;
 
@@ -423,14 +522,24 @@
 	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
 		func = dev->driver->dma_ioctl;
 
+	/* workaround drm authentification issue on Android
+	* don't need following check for DRM_AUTH
+	* otherwise maybe it will be reset before the check
+	*/
+	file_priv->authenticated = 1;
+
 	if (!func) {
 		DRM_DEBUG("no function\n");
 		retcode = -EINVAL;
-	} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
-		   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
-		   ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
-		   (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+	} else if (((ioctl->flags & DRM_ROOT_ONLY) &&
+			   !capable(CAP_SYS_ADMIN)) ||
+		    (!(ioctl->flags & DRM_CONTROL_ALLOW) &&
+			   (file_priv->minor->type == DRM_MINOR_CONTROL))) {
 		retcode = -EACCES;
+		DRM_ERROR("Ioctl check failed for ioctl nr 0x%x"\
+				"for process %s with pid=%d & is root=%d\n",
+				nr, current->comm,
+				task_pid_nr(current), capable(CAP_SYS_ADMIN));
 	} else {
 		if (cmd & (IOC_IN | IOC_OUT)) {
 			if (asize <= sizeof(stack_kdata)) {
@@ -452,7 +561,7 @@
 				retcode = -EFAULT;
 				goto err_i1;
 			}
-		} else
+		} else if (cmd & IOC_OUT)
 			memset(kdata, 0, usize);
 
 		if (ioctl->flags & DRM_UNLOCKED)
@@ -479,7 +588,8 @@
 
 	if (kdata != stack_kdata)
 		kfree(kdata);
-	atomic_dec(&dev->ioctl_count);
+	if (atomic_dec_return(&dev->ioctl_count) == 0)
+		wake_up_all(&dev->halt_queue);
 	if (retcode)
 		DRM_DEBUG("ret = %d\n", retcode);
 	return retcode;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9e62bbe..ddafe62 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
+#include <linux/hdmi.h>
 
 #define version_greater(edid, maj, min) \
 	(((edid)->version > (maj)) || \
@@ -82,6 +83,8 @@
 #define LEVEL_GTF2	2
 #define LEVEL_CVT	3
 
+#define DEBUG_RAW_EDID
+
 static struct edid_quirk {
 	char vendor[4];
 	int product_id;
@@ -125,6 +128,9 @@
 
 	/* ViewSonic VA2026w */
 	{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+	/* Medion MD 30217 PG */
+	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 };
 
 /*
@@ -135,378 +141,378 @@
 	/* 640x350@85Hz */
 	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
 		   736, 832, 0, 350, 382, 385, 445, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 640x400@85Hz */
 	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
 		   736, 832, 0, 400, 401, 404, 445, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 720x400@85Hz */
 	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
 		   828, 936, 0, 400, 401, 404, 446, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 640x480@60Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
 		   752, 800, 0, 480, 489, 492, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 640x480@72Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
 		   704, 832, 0, 480, 489, 492, 520, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 640x480@75Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
 		   720, 840, 0, 480, 481, 484, 500, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 640x480@85Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
 		   752, 832, 0, 480, 481, 484, 509, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 800x600@56Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
 		   896, 1024, 0, 600, 601, 603, 625, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 800x600@60Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 800x600@72Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
 		   976, 1040, 0, 600, 637, 643, 666, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 800x600@75Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
 		   896, 1056, 0, 600, 601, 604, 625, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 800x600@85Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
 		   896, 1048, 0, 600, 601, 604, 631, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 800x600@120Hz RB */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
 		   880, 960, 0, 600, 603, 607, 636, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 848x480@60Hz */
 	{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
 		   976, 1088, 0, 480, 486, 494, 517, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1024x768@43Hz, interlace */
 	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 772, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-			DRM_MODE_FLAG_INTERLACE) },
+			DRM_MODE_FLAG_INTERLACE, 0) },
 	/* 1024x768@60Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
 		   1184, 1344, 0, 768, 771, 777, 806, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1024x768@70Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
 		   1184, 1328, 0, 768, 771, 777, 806, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1024x768@75Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
 		   1136, 1312, 0, 768, 769, 772, 800, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1024x768@85Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
 		   1168, 1376, 0, 768, 769, 772, 808, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1024x768@120Hz RB */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
 		   1104, 1184, 0, 768, 771, 775, 813, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1152x864@75Hz */
 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
 		   1344, 1600, 0, 864, 865, 868, 900, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x768@60Hz RB */
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
 		   1360, 1440, 0, 768, 771, 778, 790, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x768@60Hz */
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
 		   1472, 1664, 0, 768, 771, 778, 798, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x768@75Hz */
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
 		   1488, 1696, 0, 768, 771, 778, 805, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x768@85Hz */
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
 		   1496, 1712, 0, 768, 771, 778, 809, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x768@120Hz RB */
 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
 		   1360, 1440, 0, 768, 771, 778, 813, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x800@60Hz RB */
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
 		   1360, 1440, 0, 800, 803, 809, 823, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x800@60Hz */
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
 		   1480, 1680, 0, 800, 803, 809, 831, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x800@75Hz */
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
 		   1488, 1696, 0, 800, 803, 809, 838, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x800@85Hz */
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
 		   1496, 1712, 0, 800, 803, 809, 843, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x800@120Hz RB */
 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
 		   1360, 1440, 0, 800, 803, 809, 847, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x960@60Hz */
 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x960@85Hz */
 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
 		   1504, 1728, 0, 960, 961, 964, 1011, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x960@120Hz RB */
 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
 		   1360, 1440, 0, 960, 963, 967, 1017, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1280x1024@60Hz */
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x1024@75Hz */
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x1024@85Hz */
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
 		   1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1280x1024@120Hz RB */
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
 		   1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1360x768@60Hz */
 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
 		   1536, 1792, 0, 768, 771, 777, 795, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1360x768@120Hz RB */
 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
 		   1440, 1520, 0, 768, 771, 776, 813, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1400x1050@60Hz RB */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
 		   1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1400x1050@60Hz */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1400x1050@75Hz */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
 		   1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1400x1050@85Hz */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
 		   1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1400x1050@120Hz RB */
 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
 		   1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1440x900@60Hz RB */
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
 		   1520, 1600, 0, 900, 903, 909, 926, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1440x900@60Hz */
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
 		   1672, 1904, 0, 900, 903, 909, 934, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1440x900@75Hz */
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
 		   1688, 1936, 0, 900, 903, 909, 942, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1440x900@85Hz */
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
 		   1696, 1952, 0, 900, 903, 909, 948, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1440x900@120Hz RB */
 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
 		   1520, 1600, 0, 900, 903, 909, 953, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1600x1200@60Hz */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1600x1200@65Hz */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1600x1200@70Hz */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1600x1200@75Hz */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1600x1200@85Hz */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1600x1200@120Hz RB */
 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
 		   1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1680x1050@60Hz RB */
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
 		   1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1680x1050@60Hz */
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1680x1050@75Hz */
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
 		   1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1680x1050@85Hz */
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
 		   1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1680x1050@120Hz RB */
 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
 		   1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1792x1344@60Hz */
 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1792x1344@75Hz */
 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
 		   2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1792x1344@120Hz RB */
 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
 		   1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1856x1392@60Hz */
 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1856x1392@75Hz */
 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
 		   2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1856x1392@120Hz RB */
 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
 		   1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1920x1200@60Hz RB */
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
 		   2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1920x1200@60Hz */
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1920x1200@75Hz */
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
 		   2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1920x1200@85Hz */
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
 		   2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1920x1200@120Hz RB */
 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
 		   2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 1920x1440@60Hz */
 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1920x1440@75Hz */
 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
 		   2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 1920x1440@120Hz RB */
 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
 		   2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 2560x1600@60Hz RB */
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
 		   2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 	/* 2560x1600@60Hz */
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 2560x1600@75HZ */
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
 		   3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 2560x1600@85HZ */
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
 		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) },
 	/* 2560x1600@120Hz RB */
 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
 		   2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, 0) },
 };
 
 static const struct drm_display_mode edid_est_modes[] = {
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 800x600@60Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
 		   896, 1024, 0, 600, 601, 603,  625, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 800x600@56Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
 		   720, 840, 0, 480, 481, 484, 500, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 640x480@75Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
 		   704,  832, 0, 480, 489, 491, 520, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 640x480@72Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
 		   768,  864, 0, 480, 483, 486, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 640x480@67Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
 		   752, 800, 0, 480, 490, 492, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 640x480@60Hz */
 	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
 		   846, 900, 0, 400, 421, 423,  449, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 720x400@88Hz */
 	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
 		   846,  900, 0, 400, 412, 414, 449, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 720x400@70Hz */
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 1280x1024@75Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
 		   1136, 1312, 0,  768, 769, 772, 800, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 1024x768@75Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
 		   1184, 1328, 0,  768, 771, 777, 806, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 1024x768@70Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
 		   1184, 1344, 0,  768, 771, 777, 806, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 1024x768@60Hz */
 	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 776, 817, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE, 0) }, /* 1024x768@43Hz */
 	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
 		   928, 1152, 0, 624, 625, 628, 667, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 0) }, /* 832x624@75Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
 		   896, 1056, 0, 600, 601, 604,  625, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 800x600@75Hz */
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
 		   976, 1040, 0, 600, 637, 643, 666, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 800x600@72Hz */
 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
 		   1344, 1600, 0,  864, 865, 868, 900, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 0) }, /* 1152x864@75Hz */
 };
 
 struct minimode {
@@ -585,349 +591,349 @@
  */
 static const struct drm_display_mode edid_cea_modes[] = {
 	/* 1 - 640x480@60Hz */
-	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
 		   752, 800, 0, 480, 490, 492, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 2 - 720x480@60Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 3 - 720x480@60Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 4 - 1280x720@60Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
 		   1430, 1650, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 5 - 1920x1080i@60Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 6 - 1440x480i@60Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 7 - 1440x480i@60Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 8 - 1440x240@60Hz */
 	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 240, 244, 247, 262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_DBLCLK),
-	  .vrefresh = 60, },
+			DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
+	  .vrefresh = 60,},
 	/* 9 - 1440x240@60Hz */
 	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 240, 244, 247, 262, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 10 - 2880x480i@60Hz */
 	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 11 - 2880x480i@60Hz */
 	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 12 - 2880x240@60Hz */
 	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 240, 244, 247, 262, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 13 - 2880x240@60Hz */
 	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 240, 244, 247, 262, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 14 - 1440x480@60Hz */
 	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
 		   1596, 1716, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 15 - 1440x480@60Hz */
 	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
 		   1596, 1716, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 16 - 1920x1080@60Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 17 - 720x576@50Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 18 - 720x576@50Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 19 - 1280x720@50Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
 		   1760, 1980, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 20 - 1920x1080i@50Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 21 - 1440x576i@50Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 22 - 1440x576i@50Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 23 - 1440x288@50Hz */
 	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 288, 290, 293, 312, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 24 - 1440x288@50Hz */
 	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 288, 290, 293, 312, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 25 - 2880x576i@50Hz */
 	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 26 - 2880x576i@50Hz */
 	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 27 - 2880x288@50Hz */
 	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 288, 290, 293, 312, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 28 - 2880x288@50Hz */
 	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 288, 290, 293, 312, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 29 - 1440x576@50Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1592, 1728, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 30 - 1440x576@50Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1592, 1728, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 31 - 1920x1080@50Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 32 - 1920x1080@24Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
 		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 24, },
 	/* 33 - 1920x1080@25Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 25, },
 	/* 34 - 1920x1080@30Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 30, },
 	/* 35 - 2880x480@60Hz */
 	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
 		   3192, 3432, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 60, },
 	/* 36 - 2880x480@60Hz */
 	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
 		   3192, 3432, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 60, },
 	/* 37 - 2880x576@50Hz */
 	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
 		   3184, 3456, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 50, },
 	/* 38 - 2880x576@50Hz */
 	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
 		   3184, 3456, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 39 - 1920x1080i@50Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
 		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 50, },
 	/* 40 - 1920x1080i@100Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 100, },
 	/* 41 - 1280x720@100Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
 		   1760, 1980, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 100, },
 	/* 42 - 720x576@100Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 100, },
 	/* 43 - 720x576@100Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 100, },
 	/* 44 - 1440x576i@100Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 100, },
 	/* 45 - 1440x576i@100Hz */
 	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 100, },
 	/* 46 - 1920x1080i@120Hz */
 	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-			DRM_MODE_FLAG_INTERLACE),
+			DRM_MODE_FLAG_INTERLACE, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 120, },
 	/* 47 - 1280x720@120Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
 		   1430, 1650, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 120, },
 	/* 48 - 720x480@120Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 120, },
 	/* 49 - 720x480@120Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 120, },
 	/* 50 - 1440x480i@120Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 120, },
 	/* 51 - 1440x480i@120Hz */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 120, },
 	/* 52 - 720x576@200Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 200, },
 	/* 53 - 720x576@200Hz */
 	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
 		   796, 864, 0, 576, 581, 586, 625, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 200, },
 	/* 54 - 1440x576i@200Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 200, },
 	/* 55 - 1440x576i@200Hz */
 	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 200, },
 	/* 56 - 720x480@240Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 240, },
 	/* 57 - 720x480@240Hz */
 	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
 		   798, 858, 0, 480, 489, 495, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 240, },
 	/* 58 - 1440x480i@240 */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_4_3),
 	  .vrefresh = 240, },
 	/* 59 - 1440x480i@240 */
 	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 240, },
 	/* 60 - 1280x720@24Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
 		   3080, 3300, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 24, },
 	/* 61 - 1280x720@25Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
 		   3740, 3960, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 25, },
 	/* 62 - 1280x720@30Hz */
 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
 		   3080, 3300, 0, 720, 725, 730, 750, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	  .vrefresh = 30, },
 	/* 63 - 1920x1080@120Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	 .vrefresh = 120, },
 	/* 64 - 1920x1080@100Hz */
 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, HDMI_PICTURE_ASPECT_16_9),
 	 .vrefresh = 100, },
 };
 
@@ -968,6 +974,16 @@
 	u8 csum = 0;
 	struct edid *edid = (struct edid *)raw_edid;
 
+	if (WARN_ON(!raw_edid))
+		return false;
+
+#ifdef DEBUG_RAW_EDID
+	pr_info("*********** Print EDID block %d start **********\n", block);
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
+			raw_edid, EDID_LENGTH, false);
+	pr_info("*********** Print EDID block %d end ************\n", block);
+#endif
+
 	if (edid_fixup > 8 || edid_fixup < 0)
 		edid_fixup = 6;
 
@@ -1010,15 +1026,15 @@
 		break;
 	}
 
-	return 1;
+	return true;
 
 bad:
-	if (raw_edid && print_bad_edid) {
+	if (print_bad_edid) {
 		printk(KERN_ERR "Raw EDID:\n");
 		print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
 			       raw_edid, EDID_LENGTH, false);
 	}
-	return 0;
+	return false;
 }
 EXPORT_SYMBOL(drm_edid_block_valid);
 
@@ -1675,6 +1691,16 @@
 	mode->flags |= DRM_MODE_FLAG_INTERLACE;
 }
 
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(u8 vic)
+
+{
+/*return Aspect Ratio for VIC-1 to access the right array element*/
+
+	return edid_cea_modes[vic-1].picture_aspect_ratio;
+}
+EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
+
+
 /**
  * drm_mode_detailed - create a new mode from an EDID detailed timing section
  * @dev: DRM device (needed to create new mode)
@@ -1706,11 +1732,11 @@
 		return NULL;
 
 	if (pt->misc & DRM_EDID_PT_STEREO) {
-		printk(KERN_WARNING "stereo mode not supported\n");
+		DRM_DEBUG_KMS("stereo mode not supported\n");
 		return NULL;
 	}
 	if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
-		printk(KERN_WARNING "composite sync not supported\n");
+		DRM_DEBUG_KMS("composite sync not supported\n");
 	}
 
 	/* it is incorrect if hsync/vsync width is zero */
@@ -1778,6 +1804,14 @@
 		mode->height_mm = edid->height_cm * 10;
 	}
 
+#if defined(CONFIG_DRM_I915)
+#else
+	if (mode->width_mm/4 == mode->height_mm/3)
+		mode->flags |= DRM_MODE_FLAG_PAR4_3;
+	if (mode->width_mm/16 == mode->height_mm/9)
+		mode->flags |= DRM_MODE_FLAG_PAR16_9;
+#endif
+
 	mode->type = DRM_MODE_TYPE_DRIVER;
 	mode->vrefresh = drm_mode_vrefresh(mode);
 	drm_mode_set_name(mode);
@@ -2321,6 +2355,73 @@
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+#if defined(CONFIG_DRM_I915)
+/*
+ * helper function to check whether two clocks can fall into the same VIC.
+ *
+ * Returns: true if possible, false otherwise.
+*/
+static bool drm_check_clock_match(int target, int reference)
+{
+	/* check target clock is in range of 60Hz or 59.94
+	 * (reference * 1000/1001) with (-0.5%, +0.5%) tolerance.
+	 * Based on CEA spec, when determining whether two video timings
+	 * are identical, clock frequencey within (-0.5%, +0.5%) tolerance
+	 * should be considered as the same.
+	*/
+
+	if (target >= DIV_ROUND_UP(reference * 995, 1001) &&
+		target <= DIV_ROUND_UP(reference * 1005, 1000))
+		return true;
+	return false;
+}
+
+/*
+ * helper function to add aspect ratio from the short video desriptor
+ * of cea_mode.
+ *
+*/
+static void drm_add_aspect_cea_mode(struct drm_display_mode *cur_mode,
+		struct drm_display_mode *cea_mode)
+{
+	if (drm_check_clock_match(cur_mode->clock, cea_mode->clock)
+	&& drm_mode_equal_no_clocks(cur_mode, cea_mode)) {
+		if (cea_mode->picture_aspect_ratio
+		== HDMI_PICTURE_ASPECT_4_3)
+			cur_mode->picture_aspect_ratio =
+			HDMI_PICTURE_ASPECT_4_3;
+		else
+			cur_mode->picture_aspect_ratio =
+			HDMI_PICTURE_ASPECT_16_9;
+	}
+}
+#endif
+
+/*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+ */
+static unsigned int
+cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
+{
+	unsigned int clock = cea_mode->clock;
+
+	if (cea_mode->vrefresh % 6 != 0)
+		return clock;
+
+	/*
+	 * edid_cea_modes contains the 59.94Hz
+	 * variant for 240 and 480 line modes,
+	 * and the 60Hz variant otherwise.
+	 */
+	if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
+		clock = clock * 1001 / 1000;
+	else
+		clock = DIV_ROUND_UP(clock * 1000, 1001);
+
+	return clock;
+}
+
 /**
  * drm_match_cea_mode - look for a CEA mode matching given mode
  * @to_match: display mode
@@ -2337,38 +2438,98 @@
 
 	for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
 		const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
+#if defined(CONFIG_DRM_I915)
+		if (drm_check_clock_match(to_match->clock, cea_mode->clock) &&
+			drm_mode_equal_no_clocks(to_match, cea_mode) &&
+			to_match->picture_aspect_ratio ==
+			cea_mode->picture_aspect_ratio)
+			return mode + 1;
+#else
 		unsigned int clock1, clock2;
 
-		clock1 = clock2 = cea_mode->clock;
-
 		/* Check both 60Hz and 59.94Hz */
-		if (cea_mode->vrefresh % 6 == 0) {
-			/*
-			 * edid_cea_modes contains the 59.94Hz
-			 * variant for 240 and 480 line modes,
-			 * and the 60Hz variant otherwise.
-			 */
-			if (cea_mode->vdisplay == 240 ||
-			    cea_mode->vdisplay == 480)
-				clock1 = clock1 * 1001 / 1000;
-			else
-				clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
-		}
+		clock1 = cea_mode->clock;
+		clock2 = cea_mode_alternate_clock(cea_mode);
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
-		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, cea_mode))
-			return mode + 1;
+			KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+			drm_mode_equal_no_clocks(to_match, cea_mode)) {
+				return mode + 1;
+		}
+#endif
 	}
 	return 0;
 }
 EXPORT_SYMBOL(drm_match_cea_mode);
 
+static int
+add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *tmp;
+	LIST_HEAD(list);
+	int modes = 0;
+
+	/* Don't add CEA modes if the CEA extension block is missing */
+	if (!drm_find_cea_extension(edid))
+		return 0;
+
+	/*
+	 * Go through all probed modes and create a new mode
+	 * with the alternate clock for certain CEA modes.
+	 */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		const struct drm_display_mode *cea_mode;
+		struct drm_display_mode *newmode;
+		u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+		unsigned int clock1, clock2;
+
+		if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
+			continue;
+
+		cea_mode = &edid_cea_modes[cea_mode_idx];
+
+		clock1 = cea_mode->clock;
+		clock2 = cea_mode_alternate_clock(cea_mode);
+
+		if (clock1 == clock2)
+			continue;
+
+		if (mode->clock != clock1 && mode->clock != clock2)
+			continue;
+
+		newmode = drm_mode_duplicate(dev, cea_mode);
+		if (!newmode)
+			continue;
+
+		/*
+		 * The current mode could be either variant. Make
+		 * sure to pick the "other" clock for the new mode.
+		 */
+		if (mode->clock != clock1)
+			newmode->clock = clock1;
+		else
+			newmode->clock = clock2;
+
+		list_add_tail(&newmode->head, &list);
+	}
+
+	list_for_each_entry_safe(mode, tmp, &list, head) {
+		list_del(&mode->head);
+		drm_mode_probed_add(connector, mode);
+		modes++;
+	}
+
+	return modes;
+}
 
 static int
 do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
 {
 	struct drm_device *dev = connector->dev;
+#if defined(CONFIG_DRM_I915)
+	struct drm_display_mode *cur_mode, *t;
+#endif
 	u8 * mode, cea_mode;
 	int modes = 0;
 
@@ -2377,7 +2538,18 @@
 		if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
 			struct drm_display_mode *newmode;
 			newmode = drm_mode_duplicate(dev,
-						     &edid_cea_modes[cea_mode]);
+					&edid_cea_modes[cea_mode]);
+#if defined(CONFIG_DRM_I915)
+			if (newmode) {
+				list_for_each_entry_safe(cur_mode, t,
+					&connector->probed_modes, head) {
+					if (cur_mode->picture_aspect_ratio)
+						continue;
+					drm_add_aspect_cea_mode(cur_mode,
+						newmode);
+				}
+			}
+#endif
 			if (newmode) {
 				newmode->vrefresh = 0;
 				drm_mode_probed_add(connector, newmode);
@@ -2946,6 +3118,7 @@
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 	num_modes += add_cea_modes(connector, edid);
+	num_modes += add_alternate_cea_modes(connector, edid);
 
 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
 		edid_fixup_preferred(connector, quirks);
@@ -3025,11 +3198,13 @@
 	if (err < 0)
 		return err;
 
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		frame->pixel_repeat = 1;
+
 	frame->video_code = drm_match_cea_mode(mode);
-	if (!frame->video_code)
-		return 0;
 
 	frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+	frame->active_info_valid = 1;
 	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b78cbe7..d3275c9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -124,7 +124,8 @@
 		mode = &fb_helper_conn->cmdline_mode;
 
 		/* do something on return - turn off connector maybe */
-		if (fb_get_options(drm_get_connector_name(connector), &option))
+		if (fb_get_options((char *) drm_get_connector_name(connector),
+				   &option))
 			continue;
 
 		if (drm_mode_parse_command_line_for_connector(option,
@@ -168,6 +169,9 @@
 	uint16_t *r_base, *g_base, *b_base;
 	int i;
 
+	if (helper->funcs->gamma_get == NULL)
+		return;
+
 	r_base = crtc->gamma_store;
 	g_base = r_base + crtc->gamma_size;
 	b_base = g_base + crtc->gamma_size;
@@ -284,13 +288,27 @@
  */
 bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 {
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_plane *plane;
 	bool error = false;
-	int i, ret;
+	int i;
 
-	drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+	drm_warn_on_modeset_not_all_locked(dev);
+
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+		drm_plane_force_disable(plane);
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+		struct drm_crtc *crtc = mode_set->crtc;
+		int ret;
+
+		if (crtc->funcs->cursor_set) {
+			ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
+			if (ret)
+				error = true;
+		}
+
 		ret = drm_mode_set_config_internal(mode_set);
 		if (ret)
 			error = true;
@@ -299,6 +317,7 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
 
+#if !defined(CONFIG_INTEL_NO_FB_PANIC_NOTIFY)
 /*
  * restore fbcon display for all kms driver's using this helper, used for sysrq
  * and panic handling.
@@ -339,6 +358,8 @@
 static struct notifier_block paniced = {
 	.notifier_call = drm_fb_helper_panic,
 };
+#endif
+
 
 static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
 {
@@ -358,6 +379,7 @@
 	return true;
 }
 
+#if !defined(CONFIG_INTEL_NO_FB_PANIC_NOTIFY)
 #ifdef CONFIG_MAGIC_SYSRQ
 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
@@ -382,6 +404,7 @@
 static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
 #endif
 
+#endif
 static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
 {
 	struct drm_fb_helper *fb_helper = info->par;
@@ -541,12 +564,15 @@
 {
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
+#if !defined(CONFIG_INTEL_NO_FB_PANIC_NOTIFY)
 		if (list_empty(&kernel_fb_helper_list)) {
 			pr_info("drm: unregistered panic notifier\n");
 			atomic_notifier_chain_unregister(&panic_notifier_list,
 							 &paniced);
 			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 		}
+#endif
+
 	}
 
 	drm_fb_helper_crtc_free(fb_helper);
@@ -583,6 +609,14 @@
 		return 0;
 	}
 
+	/*
+	 * The driver really shouldn't advertise pseudo/directcolor
+	 * visuals if it can't deal with the palette.
+	 */
+	if (WARN_ON(!fb_helper->funcs->gamma_set ||
+		    !fb_helper->funcs->gamma_get))
+		return -EINVAL;
+
 	pindex = regno;
 
 	if (fb->bits_per_pixel == 16) {
@@ -626,12 +660,19 @@
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 {
 	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_device *dev = fb_helper->dev;
 	struct drm_crtc_helper_funcs *crtc_funcs;
 	u16 *red, *green, *blue, *transp;
 	struct drm_crtc *crtc;
 	int i, j, rc = 0;
 	int start;
 
+	drm_modeset_lock_all(dev);
+	if (!drm_fb_helper_is_bound(fb_helper)) {
+		drm_modeset_unlock_all(dev);
+		return -EBUSY;
+	}
+
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		crtc = fb_helper->crtc_info[i].mode_set.crtc;
 		crtc_funcs = crtc->helper_private;
@@ -654,10 +695,13 @@
 
 			rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
 			if (rc)
-				return rc;
+				goto out;
 		}
-		crtc_funcs->load_lut(crtc);
+		if (crtc_funcs->load_lut)
+			crtc_funcs->load_lut(crtc);
 	}
+ out:
+	drm_modeset_unlock_all(dev);
 	return rc;
 }
 EXPORT_SYMBOL(drm_fb_helper_setcmap);
@@ -951,6 +995,7 @@
 	dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
 			info->node, info->fix.id);
 
+#if !defined(CONFIG_INTEL_NO_FB_PANIC_NOTIFY)
 	/* Switch back to kernel console on panic */
 	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
@@ -959,7 +1004,7 @@
 					       &paniced);
 		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
-
+#endif
 	list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 429e07d..5d12a1f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -271,9 +271,22 @@
 	priv->uid = current_euid();
 	priv->pid = get_pid(task_pid(current));
 	priv->minor = idr_find(&drm_minors_idr, minor_id);
+	if (!priv->minor) {
+		ret = -ENODEV;
+		goto out_put_pid;
+	}
+
 	priv->ioctl_count = 0;
 	/* for compatibility root is always authenticated */
-	priv->authenticated = capable(CAP_SYS_ADMIN);
+
+	/*
+	 * HACK: For Android compatibility we don't require CAP_SYS_ADMIN.
+	 *
+	 * Commented line:
+	 * priv->authenticated = capable(CAP_SYS_ADMIN);
+	 */
+	priv->authenticated = 1;
+
 	priv->lock_count = 0;
 
 	INIT_LIST_HEAD(&priv->lhead);
@@ -292,7 +305,7 @@
 	if (dev->driver->open) {
 		ret = dev->driver->open(dev, priv);
 		if (ret < 0)
-			goto out_free;
+			goto out_prime_destroy;
 	}
 
 
@@ -304,7 +317,7 @@
 		if (!priv->minor->master) {
 			mutex_unlock(&dev->struct_mutex);
 			ret = -ENOMEM;
-			goto out_free;
+			goto out_close;
 		}
 
 		priv->is_master = 1;
@@ -322,7 +335,7 @@
 				drm_master_put(&priv->minor->master);
 				drm_master_put(&priv->master);
 				mutex_unlock(&dev->struct_mutex);
-				goto out_free;
+				goto out_close;
 			}
 		}
 		mutex_lock(&dev->struct_mutex);
@@ -333,7 +346,7 @@
 				drm_master_put(&priv->minor->master);
 				drm_master_put(&priv->master);
 				mutex_unlock(&dev->struct_mutex);
-				goto out_free;
+				goto out_close;
 			}
 		}
 		mutex_unlock(&dev->struct_mutex);
@@ -367,7 +380,17 @@
 #endif
 
 	return 0;
-      out_free:
+
+out_close:
+	if (dev->driver->postclose)
+		dev->driver->postclose(dev, priv);
+out_prime_destroy:
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_destroy_file_private(&priv->prime);
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_release(dev, priv);
+out_put_pid:
+	put_pid(priv->pid);
 	kfree(priv);
 	filp->private_data = NULL;
 	return ret;
@@ -501,10 +524,12 @@
 	if (file_priv->is_master) {
 		struct drm_master *master = file_priv->master;
 		struct drm_file *temp;
-		list_for_each_entry(temp, &dev->filelist, lhead) {
-			if ((temp->master == file_priv->master) &&
-			    (temp != file_priv))
-				temp->authenticated = 0;
+		if (!list_empty(&dev->filelist)) {
+			list_for_each_entry(temp, &dev->filelist, lhead) {
+				if ((temp->master == file_priv->master) &&
+				    (temp != file_priv))
+					temp->authenticated = 0;
+			}
 		}
 
 		/**
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index cf919e3..603f256 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -108,12 +108,8 @@
 		return -ENOMEM;
 	}
 
-	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-			DRM_FILE_PAGE_OFFSET_SIZE)) {
-		drm_ht_remove(&mm->offset_hash);
-		kfree(mm);
-		return -ENOMEM;
-	}
+	drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+		    DRM_FILE_PAGE_OFFSET_SIZE);
 
 	return 0;
 }
@@ -453,25 +449,21 @@
 	spin_lock(&dev->object_name_lock);
 	if (!obj->name) {
 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
-		obj->name = ret;
-		args->name = (uint64_t) obj->name;
-		spin_unlock(&dev->object_name_lock);
-		idr_preload_end();
-
 		if (ret < 0)
 			goto err;
-		ret = 0;
+
+		obj->name = ret;
 
 		/* Allocate a reference for the name table.  */
 		drm_gem_object_reference(obj);
-	} else {
-		args->name = (uint64_t) obj->name;
-		spin_unlock(&dev->object_name_lock);
-		idr_preload_end();
-		ret = 0;
 	}
 
+	args->name = (uint64_t) obj->name;
+	ret = 0;
+
 err:
+	spin_unlock(&dev->object_name_lock);
+	idr_preload_end();
 	drm_gem_object_unreference_unlocked(obj);
 	return ret;
 }
@@ -644,6 +636,59 @@
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
+/**
+ * drm_gem_mmap_obj - memory map a GEM object
+ * @obj: the GEM object to map
+ * @obj_size: the object size to be mapped, in bytes
+ * @vma: VMA for the area to be mapped
+ *
+ * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
+ * provided by the driver. Depending on their requirements, drivers can either
+ * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * the object will be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring), or mmap the buffer memory
+ * synchronously after calling drm_gem_mmap_obj.
+ *
+ * This function is mainly intended to implement the DMABUF mmap operation, when
+ * the GEM object is not looked up based on its fake offset. To implement the
+ * DRM mmap operation, drivers should use the drm_gem_mmap() function.
+ *
+ * NOTE: This function has to be protected with dev->struct_mutex
+ *
+ * Return 0 or success or -EINVAL if the object size is smaller than the VMA
+ * size, or if no gem_vm_ops are provided.
+ */
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+		     struct vm_area_struct *vma)
+{
+	struct drm_device *dev = obj->dev;
+
+	lockdep_assert_held(&dev->struct_mutex);
+
+	/* Check for valid size. */
+	if (obj_size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!dev->driver->gem_vm_ops)
+		return -EINVAL;
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = dev->driver->gem_vm_ops;
+	vma->vm_private_data = obj;
+	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	/* Take a ref for this mapping of the object, so that the fault
+	 * handler can dereference the mmap offset's pointer to the object.
+	 * This reference is cleaned up by the corresponding vm_close
+	 * (which should happen whether the vma was created by this call, or
+	 * by a vm_open due to mremap or partial unmap or whatever).
+	 */
+	drm_gem_object_reference(obj);
+
+	drm_vm_open_locked(dev, vma);
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_mmap_obj);
 
 /**
  * drm_gem_mmap - memory map routine for GEM objects
@@ -653,11 +698,9 @@
  * If a driver supports GEM object mapping, mmap calls on the DRM file
  * descriptor will end up here.
  *
- * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
  * contain the fake offset we created when the GTT map ioctl was called on
- * the object), we set up the driver fault handler so that any accesses
- * to the object can be trapped, to perform migration, GTT binding, surface
- * register allocation, or performance monitoring.
+ * the object) and map it with a call to drm_gem_mmap_obj().
  */
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
@@ -665,7 +708,6 @@
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_gem_mm *mm = dev->mm_private;
 	struct drm_local_map *map = NULL;
-	struct drm_gem_object *obj;
 	struct drm_hash_item *hash;
 	int ret = 0;
 
@@ -686,32 +728,7 @@
 		goto out_unlock;
 	}
 
-	/* Check for valid size. */
-	if (map->size < vma->vm_end - vma->vm_start) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	obj = map->handle;
-	if (!obj->dev->driver->gem_vm_ops) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_ops = obj->dev->driver->gem_vm_ops;
-	vma->vm_private_data = map->handle;
-	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
-	/* Take a ref for this mapping of the object, so that the fault
-	 * handler can dereference the mmap offset's pointer to the object.
-	 * This reference is cleaned up by the corresponding vm_close
-	 * (which should happen whether the vma was created by this call, or
-	 * by a vm_open due to mremap or partial unmap or whatever).
-	 */
-	drm_gem_object_reference(obj);
-
-	drm_vm_open_locked(dev, vma);
+	ret = drm_gem_mmap_obj(map->handle, map->size, vma);
 
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f731116..52215ed 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -66,9 +66,13 @@
 int drm_global_item_ref(struct drm_global_reference *ref)
 {
 	int ret;
-	struct drm_global_item *item = &glob[ref->global_type];
+	struct drm_global_item *item;
 	void *object;
 
+	if (ref->global_type >= DRM_GLOBAL_NUM)
+		return -EINVAL;
+
+	item = &glob[ref->global_type];
 	mutex_lock(&item->mutex);
 	if (item->refcount == 0) {
 		item->object = kzalloc(ref->size, GFP_KERNEL);
@@ -97,8 +101,12 @@
 
 void drm_global_item_unref(struct drm_global_reference *ref)
 {
-	struct drm_global_item *item = &glob[ref->global_type];
+	struct drm_global_item *item;
 
+	if (ref->global_type >= DRM_GLOBAL_NUM)
+		return;
+
+	item = &glob[ref->global_type];
 	mutex_lock(&item->mutex);
 	BUG_ON(item->refcount == 0);
 	BUG_ON(ref->object != item->object);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7e4bae7..a981188 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -206,3 +206,34 @@
 	}
 }
 EXPORT_SYMBOL(drm_ht_remove);
+
+
+static struct hlist_node *drm_ht_find_anyused(struct drm_open_hash *ht)
+{
+	struct hlist_head *h_list;
+	struct hlist_node *list;
+	int i;
+
+	for (i = 0; i < (1 << ht->order); i++) {
+		h_list = &ht->table[i];
+		hlist_for_each(list, h_list) {
+			return list;
+		}
+	}
+	return NULL;
+}
+
+
+int drm_ht_find_item_anyused(struct drm_open_hash *ht,
+		     struct drm_hash_item **item)
+{
+	struct hlist_node *list;
+
+	list = drm_ht_find_anyused(ht);
+	if (!list)
+		return -EINVAL;
+
+	*item = hlist_entry(list, struct drm_hash_item, head);
+	return 0;
+}
+EXPORT_SYMBOL(drm_ht_find_item_anyused);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index d4b20ce..ac3aef8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -235,9 +235,9 @@
 #endif
 
 	mutex_lock(&dev->struct_mutex);
-	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
+	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%llxK\n",
 		   atomic_read(&dev->vma_count),
-		   high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
+		   high_memory, virt_to_phys(high_memory));
 
 	list_for_each_entry(pt, &dev->vmalist, head) {
 		vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index e77bd8b..ffd7a7b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -38,6 +38,9 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#ifdef CONFIG_X86
+#include <asm/mtrr.h>
+#endif
 
 /**
  * Get the bus id.
@@ -181,7 +184,17 @@
 	map->type = r_list->map->type;
 	map->flags = r_list->map->flags;
 	map->handle = (void *)(unsigned long) r_list->user_token;
-	map->mtrr = r_list->map->mtrr;
+
+#ifdef CONFIG_X86
+	/*
+	 * There appears to be exactly one user of the mtrr index: dritest.
+	 * It's easy enough to keep it working on non-PAT systems.
+	 */
+	map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
+#else
+	map->mtrr = -1;
+#endif
+
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce78..4c07cba 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -312,7 +312,7 @@
  * \c irq_preinstall() and \c irq_postinstall() functions
  * before and after the installation.
  */
-int drm_irq_install(struct drm_device *dev)
+int drm_irq_install_locked(struct drm_device *dev, int locked)
 {
 	int ret;
 	unsigned long sh_flags = 0;
@@ -324,20 +324,24 @@
 	if (drm_dev_to_irq(dev) == 0)
 		return -EINVAL;
 
-	mutex_lock(&dev->struct_mutex);
+	if (!locked)
+		mutex_lock(&dev->struct_mutex);
 
 	/* Driver must have been initialized */
 	if (!dev->dev_private) {
-		mutex_unlock(&dev->struct_mutex);
+		if (!locked)
+			mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
 	}
 
 	if (dev->irq_enabled) {
-		mutex_unlock(&dev->struct_mutex);
+		if (!locked)
+			mutex_unlock(&dev->struct_mutex);
 		return -EBUSY;
 	}
 	dev->irq_enabled = 1;
-	mutex_unlock(&dev->struct_mutex);
+	if (!locked)
+		mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
 
@@ -358,9 +362,11 @@
 			  sh_flags, irqname, dev);
 
 	if (ret < 0) {
-		mutex_lock(&dev->struct_mutex);
+		if (!locked)
+			mutex_lock(&dev->struct_mutex);
 		dev->irq_enabled = 0;
-		mutex_unlock(&dev->struct_mutex);
+		if (!locked)
+			mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
 
@@ -372,9 +378,11 @@
 		ret = dev->driver->irq_postinstall(dev);
 
 	if (ret < 0) {
-		mutex_lock(&dev->struct_mutex);
+		if (!locked)
+			mutex_lock(&dev->struct_mutex);
 		dev->irq_enabled = 0;
-		mutex_unlock(&dev->struct_mutex);
+		if (!locked)
+			mutex_unlock(&dev->struct_mutex);
 		if (!drm_core_check_feature(dev, DRIVER_MODESET))
 			vga_client_register(dev->pdev, NULL, NULL, NULL);
 		free_irq(drm_dev_to_irq(dev), dev);
@@ -382,6 +390,13 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(drm_irq_install_locked);
+
+
+int drm_irq_install(struct drm_device *dev)
+{
+	return drm_irq_install_locked(dev, 0);
+}
 EXPORT_SYMBOL(drm_irq_install);
 
 /**
@@ -391,7 +406,7 @@
  *
  * Calls the driver's \c irq_uninstall() function, and stops the irq.
  */
-int drm_irq_uninstall(struct drm_device *dev)
+int drm_irq_uninstall_locked(struct drm_device *dev, int locked)
 {
 	unsigned long irqflags;
 	int irq_enabled, i;
@@ -399,10 +414,12 @@
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
 
-	mutex_lock(&dev->struct_mutex);
+	if (!locked)
+		mutex_lock(&dev->struct_mutex);
 	irq_enabled = dev->irq_enabled;
 	dev->irq_enabled = 0;
-	mutex_unlock(&dev->struct_mutex);
+	if (!locked)
+		mutex_unlock(&dev->struct_mutex);
 
 	/*
 	 * Wake up any waiters so they don't hang.
@@ -433,8 +450,51 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_irq_uninstall_locked);
+
+
+int drm_irq_uninstall(struct drm_device *dev)
+{
+	return drm_irq_uninstall_locked(dev, 0);
+}
 EXPORT_SYMBOL(drm_irq_uninstall);
 
+
+/**
+ * Release unhandled vblank events
+ */
+void drm_clean_pending_vblanks(struct drm_device *dev)
+{
+	int i;
+	unsigned int seq;
+	unsigned long evflags;
+	struct timeval now;
+	struct drm_pending_vblank_event *e, *t;
+
+	spin_lock_irqsave(&dev->event_lock, evflags);
+	for (i = 0; i < dev->num_crtcs; i++) {
+		seq = drm_vblank_count_and_time(dev, i, &now);
+		list_for_each_entry_safe(e, t,
+					  &dev->vblank_event_list,
+					  base.link) {
+			if (e->pipe != i)
+				continue;
+			DRM_DEBUG("Send vblank event\n");
+			e->event.sequence = seq;
+			e->event.tv_sec = now.tv_sec;
+			e->event.tv_usec = now.tv_usec;
+			drm_vblank_put(dev, e->pipe);
+			list_move_tail(&e->base.link,
+				&e->base.file_priv->event_list);
+			wake_up_interruptible(&e->base.file_priv->event_wait);
+			trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+							  e->event.sequence);
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, evflags);
+}
+EXPORT_SYMBOL(drm_clean_pending_vblanks);
+
 /**
  * IRQ control ioctl.
  *
@@ -708,7 +768,10 @@
 	/* Subtract time delta from raw timestamp to get final
 	 * vblank_time timestamp for end of vblank.
 	 */
-	etime = ktime_sub_ns(etime, delta_ns);
+	if (delta_ns < 0)
+		etime = ktime_add_ns(etime, -delta_ns);
+	else
+		etime = ktime_sub_ns(etime, delta_ns);
 	*vblank_time = ktime_to_timeval(etime);
 
 	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
@@ -1289,7 +1352,8 @@
 	DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
 		    (((drm_vblank_count(dev, crtc) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
-		     !dev->irq_enabled));
+		     !dev->irq_enabled ||
+			atomic_read(&dev->halt_count)));
 
 	if (ret != -EINTR) {
 		struct timeval now;
@@ -1308,6 +1372,7 @@
 	drm_vblank_put(dev, crtc);
 	return ret;
 }
+EXPORT_SYMBOL(drm_wait_vblank);
 
 static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 07cf99c..feb267f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -147,33 +147,27 @@
 	}
 }
 
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-					unsigned long start,
-					unsigned long size,
-					bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-	struct drm_mm_node *hole, *node;
-	unsigned long end = start + size;
+	struct drm_mm_node *hole;
+	unsigned long end = node->start + node->size;
 	unsigned long hole_start;
 	unsigned long hole_end;
 
+	BUG_ON(node == NULL);
+
+	/* Find the relevant hole to add our node to */
 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-		if (hole_start > start || hole_end < end)
+		if (hole_start > node->start || hole_end < end)
 			continue;
 
-		node = drm_mm_kmalloc(mm, atomic);
-		if (unlikely(node == NULL))
-			return NULL;
-
-		node->start = start;
-		node->size = size;
 		node->mm = mm;
 		node->allocated = 1;
 
 		INIT_LIST_HEAD(&node->hole_stack);
 		list_add(&node->node_list, &hole->node_list);
 
-		if (start == hole_start) {
+		if (node->start == hole_start) {
 			hole->hole_follows = 0;
 			list_del_init(&hole->hole_stack);
 		}
@@ -184,13 +178,14 @@
 			node->hole_follows = 1;
 		}
 
-		return node;
+		return 0;
 	}
 
-	WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
-	return NULL;
+	WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+	     node->start, node->size);
+	return -ENOSPC;
 }
-EXPORT_SYMBOL(drm_mm_create_block);
+EXPORT_SYMBOL(drm_mm_reserve_node);
 
 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
 					     unsigned long size,
@@ -351,6 +346,9 @@
 	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
 
+	if (WARN_ON(!node->allocated))
+		return;
+
 	BUG_ON(node->scanned_block || node->scanned_prev_free
 				   || node->scanned_next_free);
 
@@ -669,7 +667,7 @@
 }
 EXPORT_SYMBOL(drm_mm_clean);
 
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
 	INIT_LIST_HEAD(&mm->hole_stack);
 	INIT_LIST_HEAD(&mm->unused_nodes);
@@ -690,8 +688,6 @@
 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
 	mm->color_adjust = NULL;
-
-	return 0;
 }
 EXPORT_SYMBOL(drm_mm_init);
 
@@ -699,8 +695,8 @@
 {
 	struct drm_mm_node *entry, *next;
 
-	if (!list_empty(&mm->head_node.node_list)) {
-		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+	if (WARN(!list_empty(&mm->head_node.node_list),
+		 "Memory manager not clean. Delaying takedown\n")) {
 		return;
 	}
 
@@ -716,36 +712,37 @@
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
+static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
+				       const char *prefix)
+{
+	unsigned long hole_start, hole_end, hole_size;
+
+	if (entry->hole_follows) {
+		hole_start = drm_mm_hole_node_start(entry);
+		hole_end = drm_mm_hole_node_end(entry);
+		hole_size = hole_end - hole_start;
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+			prefix, hole_start, hole_end,
+			hole_size);
+		return hole_size;
+	}
+
+	return 0;
+}
+
 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 {
 	struct drm_mm_node *entry;
 	unsigned long total_used = 0, total_free = 0, total = 0;
-	unsigned long hole_start, hole_end, hole_size;
 
-	hole_start = drm_mm_hole_node_start(&mm->head_node);
-	hole_end = drm_mm_hole_node_end(&mm->head_node);
-	hole_size = hole_end - hole_start;
-	if (hole_size)
-		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
-			prefix, hole_start, hole_end,
-			hole_size);
-	total_free += hole_size;
+	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
 
 	drm_mm_for_each_node(entry, mm) {
 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
 			prefix, entry->start, entry->start + entry->size,
 			entry->size);
 		total_used += entry->size;
-
-		if (entry->hole_follows) {
-			hole_start = drm_mm_hole_node_start(entry);
-			hole_end = drm_mm_hole_node_end(entry);
-			hole_size = hole_end - hole_start;
-			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
-				prefix, hole_start, hole_end,
-				hole_size);
-			total_free += hole_size;
-		}
+		total_free += drm_mm_debug_hole(entry, prefix);
 	}
 	total = total_free + total_used;
 
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a371ff8..2d1116f 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -535,6 +535,8 @@
 		dmode->flags |= DRM_MODE_FLAG_INTERLACE;
 	if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
 		dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+	if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
+		dmode->flags |= DRM_MODE_FLAG_DBLCLK;
 	drm_mode_set_name(dmode);
 
 	return 0;
@@ -787,16 +789,17 @@
  * LOCKING:
  * None.
  *
- * Copy an existing mode into another mode, preserving the object id
- * of the destination mode.
+ * Copy an existing mode into another mode, preserving the object id and
+ * list head of the destination mode.
  */
 void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
 {
 	int id = dst->base.id;
+	struct list_head head = dst->head;
 
 	*dst = *src;
 	dst->base.id = id;
-	INIT_LIST_HEAD(&dst->head);
+	dst->head = head;
 }
 EXPORT_SYMBOL(drm_mode_copy);
 
@@ -848,6 +851,11 @@
 	} else if (mode1->clock != mode2->clock)
 		return false;
 
+#if defined(CONFIG_DRM_I915)
+	if (mode1->picture_aspect_ratio != mode2->picture_aspect_ratio)
+		return false;
+#endif
+
 	return drm_mode_equal_no_clocks(mode1, mode2);
 }
 EXPORT_SYMBOL(drm_mode_equal);
@@ -1017,6 +1025,11 @@
 	diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
 	if (diff)
 		return diff;
+
+	diff = b->vrefresh - a->vrefresh;
+	if (diff)
+		return diff;
+
 	diff = b->clock - a->clock;
 	return diff;
 }
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 14194b6..80c0b2b 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -278,10 +278,10 @@
 		}
 		if (drm_core_has_MTRR(dev)) {
 			if (dev->agp)
-				dev->agp->agp_mtrr =
-					mtrr_add(dev->agp->agp_info.aper_base,
-						 dev->agp->agp_info.aper_size *
-						 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+				dev->agp->agp_mtrr = arch_phys_wc_add(
+					dev->agp->agp_info.aper_base,
+					dev->agp->agp_info.aper_size *
+					1024 * 1024);
 		}
 	}
 	return 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 5b7b911..23696b5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,20 +62,125 @@
 	struct dma_buf *dma_buf;
 	uint32_t handle;
 };
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+
+struct drm_prime_attachment {
+	struct sg_table *sgt;
+	enum dma_data_direction dir;
+};
+
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+{
+	struct drm_prime_member *member;
+
+	member = kmalloc(sizeof(*member), GFP_KERNEL);
+	if (!member)
+		return -ENOMEM;
+
+	get_dma_buf(dma_buf);
+	member->dma_buf = dma_buf;
+	member->handle = handle;
+	list_add(&member->entry, &prime_fpriv->head);
+	return 0;
+}
+
+static int drm_gem_map_attach(struct dma_buf *dma_buf,
+			      struct device *target_dev,
+			      struct dma_buf_attachment *attach)
+{
+	struct drm_prime_attachment *prime_attach;
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
+	if (!prime_attach)
+		return -ENOMEM;
+
+	prime_attach->dir = DMA_NONE;
+	attach->priv = prime_attach;
+
+	if (!dev->driver->gem_prime_pin)
+		return 0;
+
+	return dev->driver->gem_prime_pin(obj);
+}
+
+static void drm_gem_map_detach(struct dma_buf *dma_buf,
+			       struct dma_buf_attachment *attach)
+{
+	struct drm_prime_attachment *prime_attach = attach->priv;
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+	struct sg_table *sgt;
+
+	if (dev->driver->gem_prime_unpin)
+		dev->driver->gem_prime_unpin(obj);
+
+	if (!prime_attach)
+		return;
+
+	sgt = prime_attach->sgt;
+	if (sgt) {
+		if (prime_attach->dir != DMA_NONE)
+			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+					prime_attach->dir);
+		sg_free_table(sgt);
+	}
+
+	kfree(sgt);
+	kfree(prime_attach);
+	attach->priv = NULL;
+}
+
+static void drm_prime_remove_buf_handle_locked(
+		struct drm_prime_file_private *prime_fpriv,
+		struct dma_buf *dma_buf)
+{
+	struct drm_prime_member *member, *safe;
+
+	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+		if (member->dma_buf == dma_buf) {
+			dma_buf_put(dma_buf);
+			list_del(&member->entry);
+			kfree(member);
+		}
+	}
+}
 
 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 		enum dma_data_direction dir)
 {
+	struct drm_prime_attachment *prime_attach = attach->priv;
 	struct drm_gem_object *obj = attach->dmabuf->priv;
 	struct sg_table *sgt;
 
+	if (WARN_ON(dir == DMA_NONE || !prime_attach))
+		return ERR_PTR(-EINVAL);
+
+	/* return the cached mapping when possible */
+	if (prime_attach->dir == dir)
+		return prime_attach->sgt;
+
+	/*
+	 * two mappings with different directions for the same attachment are
+	 * not allowed
+	 */
+	if (WARN_ON(prime_attach->dir != DMA_NONE))
+		return ERR_PTR(-EBUSY);
+
 	mutex_lock(&obj->dev->struct_mutex);
 
 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
-	if (!IS_ERR_OR_NULL(sgt))
-		dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+	if (!IS_ERR(sgt)) {
+		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
+			sg_free_table(sgt);
+			kfree(sgt);
+			sgt = ERR_PTR(-ENOMEM);
+		} else {
+			prime_attach->sgt = sgt;
+			prime_attach->dir = dir;
+		}
+	}
 
 	mutex_unlock(&obj->dev->struct_mutex);
 	return sgt;
@@ -84,9 +189,7 @@
 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 		struct sg_table *sgt, enum dma_data_direction dir)
 {
-	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-	sg_free_table(sgt);
-	kfree(sgt);
+	/* nothing to be done here */
 }
 
 static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
@@ -142,10 +245,18 @@
 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 		struct vm_area_struct *vma)
 {
-	return -EINVAL;
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	if (!dev->driver->gem_prime_mmap)
+		return -ENOSYS;
+
+	return dev->driver->gem_prime_mmap(obj, vma);
 }
 
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
+	.attach = drm_gem_map_attach,
+	.detach = drm_gem_map_detach,
 	.map_dma_buf = drm_gem_map_dma_buf,
 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
 	.release = drm_gem_dmabuf_release,
@@ -185,12 +296,8 @@
 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 				     struct drm_gem_object *obj, int flags)
 {
-	if (dev->driver->gem_prime_pin) {
-		int ret = dev->driver->gem_prime_pin(obj);
-		if (ret)
-			return ERR_PTR(ret);
-	}
-	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
+	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+		600);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
@@ -235,15 +342,34 @@
 	ret = drm_prime_add_buf_handle(&file_priv->prime,
 				       obj->export_dma_buf, handle);
 	if (ret)
-		goto out;
+		goto fail_put_dmabuf;
 
-	*prime_fd = dma_buf_fd(buf, flags);
+	ret = dma_buf_fd(buf, flags);
+	if (ret < 0)
+		goto fail_rm_handle;
+
+	*prime_fd = ret;
 	mutex_unlock(&file_priv->prime.lock);
 	return 0;
 
 out_have_obj:
 	get_dma_buf(dmabuf);
-	*prime_fd = dma_buf_fd(dmabuf, flags);
+	ret = dma_buf_fd(dmabuf, flags);
+	if (ret < 0) {
+		dma_buf_put(dmabuf);
+	} else {
+		*prime_fd = ret;
+		ret = 0;
+	}
+
+	goto out;
+
+fail_rm_handle:
+	drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
+fail_put_dmabuf:
+	/* clear NOT to be checked when releasing dma_buf */
+	obj->export_dma_buf = NULL;
+	dma_buf_put(buf);
 out:
 	drm_gem_object_unreference_unlocked(obj);
 	mutex_unlock(&file_priv->prime.lock);
@@ -276,7 +402,7 @@
 
 	attach = dma_buf_attach(dma_buf, dev->dev);
 	if (IS_ERR(attach))
-		return ERR_PTR(PTR_ERR(attach));
+		return ERR_CAST(attach);
 
 	get_dma_buf(dma_buf);
 
@@ -412,8 +538,10 @@
 	int ret;
 
 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!sg)
+	if (!sg) {
+		ret = -ENOMEM;
 		goto out;
+	}
 
 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
@@ -423,7 +551,7 @@
 	return sg;
 out:
 	kfree(sg);
-	return NULL;
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(drm_prime_pages_to_sg);
 
@@ -492,21 +620,6 @@
 }
 EXPORT_SYMBOL(drm_prime_destroy_file_private);
 
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
-{
-	struct drm_prime_member *member;
-
-	member = kmalloc(sizeof(*member), GFP_KERNEL);
-	if (!member)
-		return -ENOMEM;
-
-	get_dma_buf(dma_buf);
-	member->dma_buf = dma_buf;
-	member->handle = handle;
-	list_add(&member->entry, &prime_fpriv->head);
-	return 0;
-}
-
 int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
 {
 	struct drm_prime_member *member;
@@ -523,16 +636,8 @@
 
 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
 {
-	struct drm_prime_member *member, *safe;
-
 	mutex_lock(&prime_fpriv->lock);
-	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
-		if (member->dma_buf == dma_buf) {
-			dma_buf_put(dma_buf);
-			list_del(&member->entry);
-			kfree(member);
-		}
-	}
+	drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
 	mutex_unlock(&prime_fpriv->lock);
 }
 EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
new file mode 100644
index 0000000..7047ca0
--- /dev/null
+++ b/drivers/gpu/drm/drm_rect.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drmP.h>
+#include <drm/drm_rect.h>
+
+/**
+ * drm_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * Calculate the intersection of rectangles @r1 and @r2.
+ * @r1 will be overwritten with the intersection.
+ *
+ * RETURNS:
+ * %true if rectangle @r1 is still visible after the operation,
+ * %false otherwise.
+ */
+bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
+{
+	r1->x1 = max(r1->x1, r2->x1);
+	r1->y1 = max(r1->y1, r2->y1);
+	r1->x2 = min(r1->x2, r2->x2);
+	r1->y2 = min(r1->y2, r2->y2);
+
+	return drm_rect_visible(r1);
+}
+EXPORT_SYMBOL(drm_rect_intersect);
+
+/**
+ * drm_rect_clip_scaled - perform a scaled clip operation
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @clip: clip rectangle
+ * @hscale: horizontal scaling factor
+ * @vscale: vertical scaling factor
+ *
+ * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
+ * same amounts multiplied by @hscale and @vscale.
+ *
+ * RETURNS:
+ * %true if rectangle @dst is still visible after being clipped,
+ * %false otherwise
+ */
+bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+			  const struct drm_rect *clip,
+			  int hscale, int vscale)
+{
+	int diff;
+
+	diff = clip->x1 - dst->x1;
+	if (diff > 0) {
+		int64_t tmp = src->x1 + (int64_t) diff * hscale;
+		src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+	diff = clip->y1 - dst->y1;
+	if (diff > 0) {
+		int64_t tmp = src->y1 + (int64_t) diff * vscale;
+		src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+	diff = dst->x2 - clip->x2;
+	if (diff > 0) {
+		int64_t tmp = src->x2 - (int64_t) diff * hscale;
+		src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+	diff = dst->y2 - clip->y2;
+	if (diff > 0) {
+		int64_t tmp = src->y2 - (int64_t) diff * vscale;
+		src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+
+	return drm_rect_intersect(dst, clip);
+}
+EXPORT_SYMBOL(drm_rect_clip_scaled);
+
+static int drm_calc_scale(int src, int dst)
+{
+	int scale = 0;
+
+	if (src < 0 || dst < 0)
+		return -EINVAL;
+
+	if (dst == 0)
+		return 0;
+
+	scale = src / dst;
+
+	return scale;
+}
+
+/**
+ * drm_rect_calc_hscale - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * RETURNS:
+ * The horizontal scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_hscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_hscale, int max_hscale)
+{
+	int src_w = drm_rect_width(src);
+	int dst_w = drm_rect_width(dst);
+	int hscale = drm_calc_scale(src_w, dst_w);
+
+	if (hscale < 0 || dst_w == 0)
+		return hscale;
+
+	if (hscale < min_hscale || hscale > max_hscale)
+		return -ERANGE;
+
+	return hscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_hscale);
+
+/**
+ * drm_rect_calc_vscale - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * RETURNS:
+ * The vertical scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_vscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_vscale, int max_vscale)
+{
+	int src_h = drm_rect_height(src);
+	int dst_h = drm_rect_height(dst);
+	int vscale = drm_calc_scale(src_h, dst_h);
+
+	if (vscale < 0 || dst_h == 0)
+		return vscale;
+
+	if (vscale < min_vscale || vscale > max_vscale)
+		return -ERANGE;
+
+	return vscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_vscale);
+
+/**
+ * drm_calc_hscale_relaxed - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The horizontal scaling factor.
+ */
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_hscale, int max_hscale)
+{
+	int src_w = drm_rect_width(src);
+	int dst_w = drm_rect_width(dst);
+	int hscale = drm_calc_scale(src_w, dst_w);
+
+	if (hscale < 0 || dst_w == 0)
+		return hscale;
+
+	if (hscale < min_hscale) {
+		int max_dst_w = src_w / min_hscale;
+
+		drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
+
+		return min_hscale;
+	}
+
+	if (hscale > max_hscale) {
+		int max_src_w = dst_w * max_hscale;
+
+		drm_rect_adjust_size(src, max_src_w - src_w, 0);
+
+		return max_hscale;
+	}
+
+	return hscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
+
+/**
+ * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The vertical scaling factor.
+ */
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_vscale, int max_vscale)
+{
+	int src_h = drm_rect_height(src);
+	int dst_h = drm_rect_height(dst);
+	int vscale = drm_calc_scale(src_h, dst_h);
+
+	if (vscale < 0 || dst_h == 0)
+		return vscale;
+
+	if (vscale < min_vscale) {
+		int max_dst_h = src_h / min_vscale;
+
+		drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
+
+		return min_vscale;
+	}
+
+	if (vscale > max_vscale) {
+		int max_src_h = dst_h * max_vscale;
+
+		drm_rect_adjust_size(src, 0, max_src_h - src_h);
+
+		return max_vscale;
+	}
+
+	return vscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
+
+/**
+ * drm_rect_debug_print - print the rectangle information
+ * @r: rectangle to print
+ * @fixed_point: rectangle is in 16.16 fixed point format
+ */
+void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
+{
+	int w = drm_rect_width(r);
+	int h = drm_rect_height(r);
+
+	if (fixed_point)
+		DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
+			      w >> 16, ((w & 0xffff) * 15625) >> 10,
+			      h >> 16, ((h & 0xffff) * 15625) >> 10,
+			      r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
+			      r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+	else
+		DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
+}
+EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 16f3ec5..8f62c38 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -203,7 +203,7 @@
 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-	int ret;
+	int ret = 0;
 
 	if (file_priv->is_master)
 		return 0;
@@ -229,7 +229,7 @@
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	return 0;
+	return ret;
 }
 
 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
@@ -266,6 +266,9 @@
 	spin_lock_init(&dev->event_lock);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);
+	mutex_init(&dev->halt_mutex);
+	init_waitqueue_head(&dev->ioctl_queue);
+	init_waitqueue_head(&dev->halt_queue);
 
 	if (drm_ht_create(&dev->map_hash, 12)) {
 		return -ENOMEM;
@@ -451,14 +454,8 @@
 
 	drm_lastclose(dev);
 
-	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
-	    dev->agp && dev->agp->agp_mtrr >= 0) {
-		int retval;
-		retval = mtrr_del(dev->agp->agp_mtrr,
-				  dev->agp->agp_info.aper_base,
-				  dev->agp->agp_info.aper_size * 1024 * 1024);
-		DRM_DEBUG("mtrr_del=%d\n", retval);
-	}
+	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
+		arch_phys_wc_del(dev->agp->agp_mtrr);
 
 	if (dev->driver->unload)
 		dev->driver->unload(dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0229665..2290b3b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -30,14 +30,14 @@
 };
 
 /**
- * drm_class_suspend - DRM class suspend hook
+ * __drm_class_suspend - internal DRM class suspend routine
  * @dev: Linux device to suspend
  * @state: power state to enter
  *
  * Just figures out what the actual struct drm_device associated with
  * @dev is and calls its suspend hook, if present.
  */
-static int drm_class_suspend(struct device *dev, pm_message_t state)
+static int __drm_class_suspend(struct device *dev, pm_message_t state)
 {
 	if (dev->type == &drm_sysfs_device_minor) {
 		struct drm_minor *drm_minor = to_drm_minor(dev);
@@ -52,6 +52,26 @@
 }
 
 /**
+ * drm_class_suspend - internal DRM class suspend hook. Simply calls
+ * __drm_class_suspend() with the correct pm state.
+ * @dev: Linux device to suspend
+ */
+static int drm_class_suspend(struct device *dev)
+{
+	return __drm_class_suspend(dev, PMSG_SUSPEND);
+}
+
+/**
+ * drm_class_freeze - internal DRM class freeze hook. Simply calls
+ * __drm_class_suspend() with the correct pm state.
+ * @dev: Linux device to freeze
+ */
+static int drm_class_freeze(struct device *dev)
+{
+	return __drm_class_suspend(dev, PMSG_FREEZE);
+}
+
+/**
  * drm_class_resume - DRM class resume hook
  * @dev: Linux device to resume
  *
@@ -72,6 +92,12 @@
 	return 0;
 }
 
+static const struct dev_pm_ops drm_class_dev_pm_ops = {
+	.suspend	= drm_class_suspend,
+	.resume		= drm_class_resume,
+	.freeze		= drm_class_freeze,
+};
+
 static char *drm_devnode(struct device *dev, umode_t *mode)
 {
 	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -106,8 +132,7 @@
 		goto err_out;
 	}
 
-	class->suspend = drm_class_suspend;
-	class->resume = drm_class_resume;
+	class->pm = &drm_class_dev_pm_ops;
 
 	err = class_create_file(class, &class_attr_version.attr);
 	if (err)
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 03ea964..27cc95f 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -21,7 +21,7 @@
 		    __entry->crtc = crtc;
 		    __entry->seq = seq;
 		    ),
-	    TP_printk("crtc=%d, seq=%d", __entry->crtc, __entry->seq)
+	    TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
 );
 
 TRACE_EVENT(drm_vblank_event_queued,
@@ -37,7 +37,7 @@
 		    __entry->crtc = crtc;
 		    __entry->seq = seq;
 		    ),
-	    TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+	    TP_printk("pid=%d, crtc=%d, seq=%u", __entry->pid, __entry->crtc, \
 		      __entry->seq)
 );
 
@@ -54,7 +54,7 @@
 		    __entry->crtc = crtc;
 		    __entry->seq = seq;
 		    ),
-	    TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+	    TP_printk("pid=%d, crtc=%d, seq=%u", __entry->pid, __entry->crtc, \
 		      __entry->seq)
 );
 
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1d4f7c9..b3ddc12 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -43,18 +43,19 @@
 static void drm_vm_open(struct vm_area_struct *vma);
 static void drm_vm_close(struct vm_area_struct *vma);
 
-static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+static pgprot_t drm_io_prot(struct drm_local_map *map,
+			    struct vm_area_struct *vma)
 {
 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 
 #if defined(__i386__) || defined(__x86_64__)
-	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
-		pgprot_val(tmp) |= _PAGE_PCD;
-		pgprot_val(tmp) &= ~_PAGE_PWT;
-	}
+	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
+		tmp = pgprot_noncached(tmp);
+	else
+		tmp = pgprot_writecombine(tmp);
 #elif defined(__powerpc__)
 	pgprot_val(tmp) |= _PAGE_NO_CACHE;
-	if (map_type == _DRM_REGISTERS)
+	if (map->type == _DRM_REGISTERS)
 		pgprot_val(tmp) |= _PAGE_GUARDED;
 #elif defined(__ia64__)
 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
@@ -250,13 +251,8 @@
 			switch (map->type) {
 			case _DRM_REGISTERS:
 			case _DRM_FRAME_BUFFER:
-				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
-					int retcode;
-					retcode = mtrr_del(map->mtrr,
-							   map->offset,
-							   map->size);
-					DRM_DEBUG("mtrr_del = %d\n", retcode);
-				}
+				if (drm_core_has_MTRR(dev))
+					arch_phys_wc_del(map->mtrr);
 				iounmap(map->handle);
 				break;
 			case _DRM_SHM:
@@ -493,9 +489,12 @@
 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
 
 	/* Length must match exact page count */
-	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+	if (!dma) {
+		DRM_DEBUG("return EINVAL\n");
 		return -EINVAL;
 	}
+	if (!dma || (length >> PAGE_SHIFT) != dma->page_count)
+		return -EINVAL;
 
 	if (!capable(CAP_SYS_ADMIN) &&
 	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
@@ -617,8 +616,7 @@
 	case _DRM_FRAME_BUFFER:
 	case _DRM_REGISTERS:
 		offset = drm_core_get_reg_ofs(dev);
-		vma->vm_flags |= VM_IO;	/* not in core dump */
-		vma->vm_page_prot = drm_io_prot(map->type, vma);
+		vma->vm_page_prot = drm_io_prot(map, vma);
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       (map->offset + offset) >> PAGE_SHIFT,
 				       vma->vm_end - vma->vm_start,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3b315ba..17d9b0b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1511,6 +1511,14 @@
 	dev_priv->dev = dev;
 	dev_priv->info = info;
 
+	spin_lock_init(&dev_priv->irq_lock);
+	spin_lock_init(&dev_priv->gpu_error.lock);
+	spin_lock_init(&dev_priv->rps.lock);
+	spin_lock_init(&dev_priv->gt_lock);
+	mutex_init(&dev_priv->dpio_lock);
+	mutex_init(&dev_priv->rps.hw_lock);
+	mutex_init(&dev_priv->modeset_restore_lock);
+
 	i915_dump_device_info(dev_priv);
 
 	if (i915_get_bridge_dev(dev)) {
@@ -1601,6 +1609,8 @@
 	intel_detect_pch(dev);
 
 	intel_irq_init(dev);
+	intel_pm_init(dev);
+	intel_gt_sanitize(dev);
 	intel_gt_init(dev);
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1626,14 +1636,6 @@
 	if (!IS_I945G(dev) && !IS_I945GM(dev))
 		pci_enable_msi(dev->pdev);
 
-	spin_lock_init(&dev_priv->irq_lock);
-	spin_lock_init(&dev_priv->gpu_error.lock);
-	spin_lock_init(&dev_priv->rps.lock);
-	mutex_init(&dev_priv->dpio_lock);
-
-	mutex_init(&dev_priv->rps.hw_lock);
-	mutex_init(&dev_priv->modeset_restore_lock);
-
 	dev_priv->num_plane = 1;
 	if (IS_VALLEYVIEW(dev))
 		dev_priv->num_plane = 2;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a2e4953..bc6cd31 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -685,7 +685,7 @@
 {
 	int error = 0;
 
-	intel_gt_reset(dev);
+	intel_gt_sanitize(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		mutex_lock(&dev->struct_mutex);
@@ -711,7 +711,7 @@
 
 	pci_set_master(dev->pdev);
 
-	intel_gt_reset(dev);
+	intel_gt_sanitize(dev);
 
 	/*
 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1247,21 +1247,21 @@
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+	unsigned long irqflags; \
 	u##x val = 0; \
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
 	if (IS_GEN5(dev_priv->dev)) \
 		ilk_dummy_write(dev_priv); \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		unsigned long irqflags; \
-		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
 		if (dev_priv->forcewake_count == 0) \
 			dev_priv->gt.force_wake_get(dev_priv); \
 		val = read##y(dev_priv->regs + reg); \
 		if (dev_priv->forcewake_count == 0) \
 			dev_priv->gt.force_wake_put(dev_priv); \
-		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
 	} else { \
 		val = read##y(dev_priv->regs + reg); \
 	} \
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
 	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
 	return val; \
 }
@@ -1274,8 +1274,10 @@
 
 #define __i915_write(x, y) \
 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+	unsigned long irqflags; \
 	u32 __fifo_ret = 0; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
 	} \
@@ -1287,6 +1289,7 @@
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
 	hsw_unclaimed_reg_check(dev_priv, reg); \
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
 }
 __i915_write(8, b)
 __i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9669a0b..47d8b68 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -491,6 +491,7 @@
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -1474,9 +1475,10 @@
 void i915_handle_error(struct drm_device *dev, bool wedged);
 
 extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
 extern void intel_hpd_init(struct drm_device *dev);
 extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
 
 void i915_error_state_free(struct kref *error_ref);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9e35daf..0a30088 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1160,7 +1160,8 @@
 	/* Manually manage the write flush as we may have not yet
 	 * retired the buffer.
 	 */
-	if (obj->last_write_seqno &&
+	if (ret == 0 &&
+	    obj->last_write_seqno &&
 	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
 		obj->last_write_seqno = 0;
 		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1880,6 +1881,10 @@
 	u32 seqno = intel_ring_get_seqno(ring);
 
 	BUG_ON(ring == NULL);
+	if (obj->ring != ring && obj->last_write_seqno) {
+		/* Keep the seqno relative to the current ring */
+		obj->last_write_seqno = seqno;
+	}
 	obj->ring = ring;
 
 	/* Add a reference if we're newly entering the active list. */
@@ -2133,7 +2138,17 @@
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-		i915_gem_write_fence(dev, i, reg->obj);
+
+		/*
+		 * Commit delayed tiling changes if we have an object still
+		 * attached to the fence, otherwise just clear the fence.
+		 */
+		if (reg->obj) {
+			i915_gem_object_update_fence(reg->obj, reg,
+						     reg->obj->tiling_mode);
+		} else {
+			i915_gem_write_fence(dev, i, NULL);
+		}
 	}
 }
 
@@ -2533,7 +2548,6 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int fence_reg;
 	int fence_pitch_shift;
-	uint64_t val;
 
 	if (INTEL_INFO(dev)->gen >= 6) {
 		fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2543,8 +2557,23 @@
 		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
 	}
 
+	fence_reg += reg * 8;
+
+	/* To w/a incoherency with non-atomic 64-bit register updates,
+	 * we split the 64-bit update into two 32-bit writes. In order
+	 * for a partial fence not to be evaluated between writes, we
+	 * precede the update with write to turn off the fence register,
+	 * and only enable the fence as the last step.
+	 *
+	 * For extra levels of paranoia, we make sure each step lands
+	 * before applying the next step.
+	 */
+	I915_WRITE(fence_reg, 0);
+	POSTING_READ(fence_reg);
+
 	if (obj) {
 		u32 size = obj->gtt_space->size;
+		uint64_t val;
 
 		val = (uint64_t)((obj->gtt_offset + size - 4096) &
 				 0xfffff000) << 32;
@@ -2553,12 +2582,16 @@
 		if (obj->tiling_mode == I915_TILING_Y)
 			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 		val |= I965_FENCE_REG_VALID;
-	} else
-		val = 0;
 
-	fence_reg += reg * 8;
-	I915_WRITE64(fence_reg, val);
-	POSTING_READ(fence_reg);
+		I915_WRITE(fence_reg + 4, val >> 32);
+		POSTING_READ(fence_reg + 4);
+
+		I915_WRITE(fence_reg + 0, val);
+		POSTING_READ(fence_reg);
+	} else {
+		I915_WRITE(fence_reg + 4, 0);
+		POSTING_READ(fence_reg + 4);
+	}
 }
 
 static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2653,6 +2686,10 @@
 	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
 		mb();
 
+	WARN(obj && (!obj->stride || !obj->tiling_mode),
+	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+	     obj->stride, obj->tiling_mode);
+
 	switch (INTEL_INFO(dev)->gen) {
 	case 7:
 	case 6:
@@ -2712,6 +2749,7 @@
 		fence->obj = NULL;
 		list_del_init(&fence->lru_list);
 	}
+	obj->fence_dirty = false;
 }
 
 static int
@@ -2841,7 +2879,6 @@
 		return 0;
 
 	i915_gem_object_update_fence(obj, reg, enable);
-	obj->fence_dirty = false;
 
 	return 0;
 }
@@ -4456,7 +4493,7 @@
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
 		if (obj->pages_pin_count == 0)
 			cnt += obj->base.size >> PAGE_SHIFT;
-	list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
 		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
 			cnt += obj->base.size >> PAGE_SHIFT;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a1e8ecb..3bc8a58 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -113,7 +113,7 @@
 	case 7:
 		reg = I915_READ(GEN7_CXT_SIZE);
 		if (IS_HASWELL(dev))
-			ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+			ret = HSW_CXT_TOTAL_SIZE;
 		else
 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 		break;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0aa2ef0..c8d16a6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -70,15 +70,6 @@
 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 };
 
-static const u32 hpd_status_i965[] = {
-	 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
-	 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
-	 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
-	 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
-	 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-	 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-};
-
 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -1018,6 +1009,34 @@
 	return ret;
 }
 
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+			       bool reset_completed)
+{
+	struct intel_ring_buffer *ring;
+	int i;
+
+	/*
+	 * Notify all waiters for GPU completion events that reset state has
+	 * been changed, and that they need to restart their wait after
+	 * checking for potential errors (and bail out to drop locks if there is
+	 * a gpu reset pending so that i915_error_work_func can acquire them).
+	 */
+
+	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+	for_each_ring(ring, dev_priv, i)
+		wake_up_all(&ring->irq_queue);
+
+	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+	wake_up_all(&dev_priv->pending_flip_queue);
+
+	/*
+	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
+	 * reset state is cleared.
+	 */
+	if (reset_completed)
+		wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
 /**
  * i915_error_work_func - do process context error handling work
  * @work: work struct
@@ -1032,11 +1051,10 @@
 	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
 						    gpu_error);
 	struct drm_device *dev = dev_priv->dev;
-	struct intel_ring_buffer *ring;
 	char *error_event[] = { "ERROR=1", NULL };
 	char *reset_event[] = { "RESET=1", NULL };
 	char *reset_done_event[] = { "ERROR=0", NULL };
-	int i, ret;
+	int ret;
 
 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
@@ -1055,8 +1073,16 @@
 		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
 				   reset_event);
 
+		/*
+		 * All state reset _must_ be completed before we update the
+		 * reset counter, for otherwise waiters might miss the reset
+		 * pending state and not properly drop locks, resulting in
+		 * deadlocks with the reset work.
+		 */
 		ret = i915_reset(dev);
 
+		intel_display_handle_reset(dev);
+
 		if (ret == 0) {
 			/*
 			 * After all the gem state is reset, increment the reset
@@ -1077,12 +1103,11 @@
 			atomic_set(&error->reset_counter, I915_WEDGED);
 		}
 
-		for_each_ring(ring, dev_priv, i)
-			wake_up_all(&ring->irq_queue);
-
-		intel_display_handle_reset(dev);
-
-		wake_up_all(&dev_priv->gpu_error.reset_queue);
+		/*
+		 * Note: The wake_up also serves as a memory barrier so that
+		 * waiters see the update value of the reset counter atomic_t.
+		 */
+		i915_error_wake_up(dev_priv, true);
 	}
 }
 
@@ -1718,8 +1743,6 @@
 void i915_handle_error(struct drm_device *dev, bool wedged)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
-	int i;
 
 	i915_capture_error_state(dev);
 	i915_report_and_clear_eir(dev);
@@ -1729,14 +1752,28 @@
 				&dev_priv->gpu_error.reset_counter);
 
 		/*
-		 * Wakeup waiting processes so that the reset work item
-		 * doesn't deadlock trying to grab various locks.
+		 * Wakeup waiting processes so that the reset work function
+		 * i915_error_work_func doesn't deadlock trying to grab various
+		 * locks. By bumping the reset counter first, the woken
+		 * processes will see a reset in progress and back off,
+		 * releasing their locks and then wait for the reset completion.
+		 * We must do this for _all_ gpu waiters that might hold locks
+		 * that the reset work needs to acquire.
+		 *
+		 * Note: The wake_up serves as the required memory barrier to
+		 * ensure that the waiters see the updated value of the reset
+		 * counter atomic_t.
 		 */
-		for_each_ring(ring, dev_priv, i)
-			wake_up_all(&ring->irq_queue);
+		i915_error_wake_up(dev_priv, false);
 	}
 
-	queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+	/*
+	 * Our reset work can grab modeset locks (since it needs to reset the
+	 * state of outstanding pagelips). Hence it must not be run on our own
+	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+	 * code will deadlock.
+	 */
+	schedule_work(&dev_priv->gpu_error.work);
 }
 
 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2952,13 +2989,13 @@
 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
 								  HOTPLUG_INT_STATUS_G4X :
-								  HOTPLUG_INT_STATUS_I965);
+								  HOTPLUG_INT_STATUS_I915);
 
 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 				  hotplug_status);
 			if (hotplug_trigger) {
 				if (hotplug_irq_storm_detect(dev, hotplug_trigger,
-							    IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
+							    IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
 					i915_hpd_irq_setup(dev);
 				queue_work(dev_priv->wq,
 					   &dev_priv->hotplug_work);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d6b62e..b1a0cdb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -617,6 +617,8 @@
 					will not assert AGPBUSY# and will only
 					be delivered when out of C3. */
 #define   INSTPM_FORCE_ORDERING				(1<<7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE	(1<<9)
+#define   INSTPM_SYNC_FLUSH	(1<<5)
 #define ACTHD	        0x020c8
 #define FW_BLC		0x020d8
 #define FW_BLC2		0x020dc
@@ -1535,14 +1537,13 @@
 					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
 					 GEN7_CXT_GT1_SIZE(ctx_reg) + \
 					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-#define HSW_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 26) & 0x3f)
-#define HSW_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 23) & 0x7)
-#define HSW_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 15) & 0xff)
-#define HSW_CXT_TOTAL_SIZE(ctx_reg)	(HSW_CXT_POWER_SIZE(ctx_reg) + \
-					 HSW_CXT_RING_SIZE(ctx_reg) + \
-					 HSW_CXT_RENDER_SIZE(ctx_reg) + \
-					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
 
 /*
  * Overlay regs
@@ -1674,10 +1675,16 @@
 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
 
 #define PORT_HOTPLUG_STAT	(dev_priv->info->display_mmio_offset + 0x61114)
-/* HDMI/DP bits are gen4+ */
-#define   PORTB_HOTPLUG_LIVE_STATUS               (1 << 29)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define   PORTD_HOTPLUG_LIVE_STATUS               (1 << 29)
 #define   PORTC_HOTPLUG_LIVE_STATUS               (1 << 28)
-#define   PORTD_HOTPLUG_LIVE_STATUS               (1 << 27)
+#define   PORTB_HOTPLUG_LIVE_STATUS               (1 << 27)
 #define   PORTD_HOTPLUG_INT_STATUS		(3 << 21)
 #define   PORTC_HOTPLUG_INT_STATUS		(3 << 19)
 #define   PORTB_HOTPLUG_INT_STATUS		(3 << 17)
@@ -1691,6 +1698,12 @@
 /* SDVO is different across gen3/4 */
 #define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3)
 #define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
 #define   SDVOC_HOTPLUG_INT_STATUS_I965		(3 << 4)
 #define   SDVOB_HOTPLUG_INT_STATUS_I965		(3 << 2)
 #define   SDVOC_HOTPLUG_INT_STATUS_I915		(1 << 7)
@@ -1702,13 +1715,6 @@
 						 PORTC_HOTPLUG_INT_STATUS | \
 						 PORTD_HOTPLUG_INT_STATUS)
 
-#define HOTPLUG_INT_STATUS_I965			(CRT_HOTPLUG_INT_STATUS | \
-						 SDVOB_HOTPLUG_INT_STATUS_I965 | \
-						 SDVOC_HOTPLUG_INT_STATUS_I965 | \
-						 PORTB_HOTPLUG_INT_STATUS | \
-						 PORTC_HOTPLUG_INT_STATUS | \
-						 PORTD_HOTPLUG_INT_STATUS)
-
 #define HOTPLUG_INT_STATUS_I915			(CRT_HOTPLUG_INT_STATUS | \
 						 SDVOB_HOTPLUG_INT_STATUS_I915 | \
 						 SDVOC_HOTPLUG_INT_STATUS_I915 | \
@@ -4246,7 +4252,7 @@
 #define EDP_LINK_TRAIN_600MV_0DB_IVB		(0x30 <<22)
 #define EDP_LINK_TRAIN_600MV_3_5DB_IVB		(0x36 <<22)
 #define EDP_LINK_TRAIN_800MV_0DB_IVB		(0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB		(0x33 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB		(0x3e <<22)
 
 /* legacy values */
 #define EDP_LINK_TRAIN_500MV_0DB_IVB		(0x00 <<22)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fb961bb..16e674a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -684,7 +684,7 @@
 		struct intel_digital_port *intel_dig_port =
 			enc_to_dig_port(encoder);
 
-		intel_dp->DP = intel_dig_port->port_reversal |
+		intel_dp->DP = intel_dig_port->saved_port_bits |
 			       DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
 		switch (intel_dp->lane_count) {
 		case 1:
@@ -1324,7 +1324,8 @@
 		 * enabling the port.
 		 */
 		I915_WRITE(DDI_BUF_CTL(port),
-			   intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+			   intel_dig_port->saved_port_bits |
+			   DDI_BUF_CTL_ENABLE);
 	} else if (type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
@@ -1543,8 +1544,9 @@
 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
 
 	intel_dig_port->port = port;
-	intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
-					DDI_BUF_PORT_REVERSAL;
+	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+					  (DDI_BUF_PORT_REVERSAL |
+					   DDI_A_4_LANES);
 	if (hdmi_connector)
 		intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
 	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56746dc..ab95259 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3946,8 +3946,6 @@
  * consider. */
 void intel_connector_dpms(struct drm_connector *connector, int mode)
 {
-	struct intel_encoder *encoder = intel_attached_encoder(connector);
-
 	/* All the simple cases only support two dpms states. */
 	if (mode != DRM_MODE_DPMS_ON)
 		mode = DRM_MODE_DPMS_OFF;
@@ -3958,10 +3956,8 @@
 	connector->dpms = mode;
 
 	/* Only need to change hw state when actually enabled */
-	if (encoder->base.crtc)
-		intel_encoder_dpms(encoder, mode);
-	else
-		WARN_ON(encoder->connectors_active != false);
+	if (connector->encoder)
+		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
 
 	intel_modeset_check_state(connector->dev);
 }
@@ -4333,7 +4329,8 @@
 
 static void i9xx_update_pll(struct intel_crtc *crtc,
 			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+			    int num_connectors,
+			    bool needs_tv_clock)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4391,7 +4388,7 @@
 	if (INTEL_INFO(dev)->gen >= 4)
 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
-	if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
+	if (is_sdvo && needs_tv_clock)
 		dpll |= PLL_REF_INPUT_TVCLKINBC;
 	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
 		/* XXX: just matching BIOS for now */
@@ -4563,6 +4560,10 @@
 
 	pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
 
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+		pipeconf |= PIPECONF_ENABLE;
+
 	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
 		 * core speed.
@@ -4716,7 +4717,8 @@
 	else
 		i9xx_update_pll(intel_crtc,
 				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+				num_connectors,
+				is_sdvo && is_tv);
 
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -8146,15 +8148,20 @@
 }
 
 static bool
-is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
-		      int num_connectors)
+is_crtc_connector_off(struct drm_mode_set *set)
 {
 	int i;
 
-	for (i = 0; i < num_connectors; i++)
-		if (connectors[i].encoder &&
-		    connectors[i].encoder->crtc == crtc &&
-		    connectors[i].dpms != DRM_MODE_DPMS_ON)
+	if (set->num_connectors == 0)
+		return false;
+
+	if (WARN_ON(set->connectors == NULL))
+		return false;
+
+	for (i = 0; i < set->num_connectors; i++)
+		if (set->connectors[i]->encoder &&
+		    set->connectors[i]->encoder->crtc == set->crtc &&
+		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
 			return true;
 
 	return false;
@@ -8167,10 +8174,8 @@
 
 	/* We should be able to check here if the fb has the same properties
 	 * and then just flip_or_move it */
-	if (set->connectors != NULL &&
-	    is_crtc_connector_off(set->crtc, *set->connectors,
-				  set->num_connectors)) {
-			config->mode_changed = true;
+	if (is_crtc_connector_off(set)) {
+		config->mode_changed = true;
 	} else if (set->crtc->fb != set->fb) {
 		/* If we have no fb then treat it as a full mode set */
 		if (set->crtc->fb == NULL) {
@@ -8914,6 +8919,17 @@
 	DRM_INFO("applying inverted panel brightness quirk\n");
 }
 
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
 struct intel_quirk {
 	int device;
 	int subsystem_vendor;
@@ -8983,6 +8999,11 @@
 
 	/* Acer Aspire 4736Z */
 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+	/* Dell XPS13 HD Sandy Bridge */
+	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 70789b1..80feaec 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -604,7 +604,18 @@
 			DRM_DEBUG_KMS("aux_ch native nack\n");
 			return -EREMOTEIO;
 		case AUX_NATIVE_REPLY_DEFER:
-			udelay(100);
+			/*
+			 * For now, just give more slack to branch devices. We
+			 * could check the DPCD for I2C bit rate capabilities,
+			 * and if available, adjust the interval. We could also
+			 * be more careful with DP-to-Legacy adapters where a
+			 * long legacy cable may force very low I2C bit rates.
+			 */
+			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			    DP_DWN_STRM_PORT_PRESENT)
+				usleep_range(500, 600);
+			else
+				usleep_range(300, 400);
 			continue;
 		default:
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 624a9e6..7cd5584 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -426,7 +426,7 @@
 struct intel_digital_port {
 	struct intel_encoder base;
 	enum port port;
-	u32 port_reversal;
+	u32 saved_port_bits;
 	struct intel_dp dp;
 	struct intel_hdmi hdmi;
 };
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index eb5e6e9..33cb87f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -354,7 +354,8 @@
 		POSTING_READ(reg);
 		I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
 
-		if (HAS_PCH_SPLIT(dev)) {
+		if (HAS_PCH_SPLIT(dev) &&
+		    !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
 			tmp = I915_READ(BLC_PWM_PCH_CTL1);
 			tmp |= BLM_PCH_PWM_ENABLE;
 			tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa01128..94ad6bc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4486,7 +4486,7 @@
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
-void intel_gt_reset(struct drm_device *dev)
+void intel_gt_sanitize(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -4497,26 +4497,61 @@
 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 			__gen6_gt_force_wake_mt_reset(dev_priv);
 	}
+
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	if (INTEL_INFO(dev)->gen >= 6)
+		intel_disable_gt_powersave(dev);
 }
 
 void intel_gt_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	spin_lock_init(&dev_priv->gt_lock);
-
-	intel_gt_reset(dev);
-
 	if (IS_VALLEYVIEW(dev)) {
 		dev_priv->gt.force_wake_get = vlv_force_wake_get;
 		dev_priv->gt.force_wake_put = vlv_force_wake_put;
-	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+	} else if (IS_HASWELL(dev)) {
 		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
 		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+	} else if (IS_IVYBRIDGE(dev)) {
+		u32 ecobus;
+
+		/* IVB configs may use multi-threaded forcewake */
+
+		/* A small trick here - if the bios hasn't configured
+		 * MT forcewake, and if the device is in RC6, then
+		 * force_wake_mt_get will not wake the device and the
+		 * ECOBUS read will return zero. Which will be
+		 * (correctly) interpreted by the test below as MT
+		 * forcewake being disabled.
+		 */
+		mutex_lock(&dev->struct_mutex);
+		__gen6_gt_force_wake_mt_get(dev_priv);
+		ecobus = I915_READ_NOTRACE(ECOBUS);
+		__gen6_gt_force_wake_mt_put(dev_priv);
+		mutex_unlock(&dev->struct_mutex);
+
+		if (ecobus & FORCEWAKE_MT_ENABLE) {
+			dev_priv->gt.force_wake_get =
+						__gen6_gt_force_wake_mt_get;
+			dev_priv->gt.force_wake_put =
+						__gen6_gt_force_wake_mt_put;
+		} else {
+			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+			DRM_INFO("when using vblank-synced partial screen updates.\n");
+			dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+			dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+		}
 	} else if (IS_GEN6(dev)) {
 		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
 		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
 	}
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
 			  intel_gen6_powersave_work);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d5d613..48fe23e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -490,9 +490,6 @@
 	struct pipe_control *pc = ring->private;
 	struct drm_i915_gem_object *obj;
 
-	if (!ring->private)
-		return;
-
 	obj = pc->obj;
 
 	kunmap(sg_page(obj->pages->sgl));
@@ -500,7 +497,6 @@
 	drm_gem_object_unreference(&obj->base);
 
 	kfree(pc);
-	ring->private = NULL;
 }
 
 static int init_render_ring(struct intel_ring_buffer *ring)
@@ -571,7 +567,10 @@
 	if (HAS_BROKEN_CS_TLB(dev))
 		drm_gem_object_unreference(to_gem_object(ring->private));
 
-	cleanup_pipe_control(ring);
+	if (INTEL_INFO(dev)->gen >= 5)
+		cleanup_pipe_control(ring);
+
+	ring->private = NULL;
 }
 
 static void
@@ -908,6 +907,18 @@
 
 	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
 	POSTING_READ(mmio);
+
+	/* Flush the TLB for this page */
+	if (INTEL_INFO(dev)->gen >= 6) {
+		u32 reg = RING_INSTPM(ring->mmio_base);
+		I915_WRITE(reg,
+			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+					      INSTPM_SYNC_FLUSH));
+		if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+			     1000))
+			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+				  ring->name);
+	}
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b945bc5..a202d8d 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -921,6 +921,14 @@
 	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
 	pipe_config->pipe_bpp = 8*3;
 
+	/* TV has it's own notion of sync and other mode flags, so clear them. */
+	pipe_config->adjusted_mode.flags = 0;
+
+	/*
+	 * FIXME: We don't check whether the input mode is actually what we want
+	 * or whether userspace is doing something stupid.
+	 */
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index bf29b2f..988911a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -198,7 +198,8 @@
 		struct ttm_bo_device bdev;
 	} ttm;
 
-	u32 reg_1e24; /* SE model number */
+	/* SE model number stored in reg 0x1e24 */
+	u32 unique_rev_id;
 };
 
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9905923..dafe049 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -176,7 +176,7 @@
 
 	/* stash G200 SE model number for later use */
 	if (IS_G200_SE(mdev))
-		mdev->reg_1e24 = RREG32(0x1e24);
+		mdev->unique_rev_id = RREG32(0x1e24);
 
 	ret = mga_vram_init(mdev);
 	if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee66bad..99e07b6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1008,7 +1008,7 @@
 
 
 	if (IS_G200_SE(mdev)) {
-		if (mdev->reg_1e24 >= 0x02) {
+		if (mdev->unique_rev_id >= 0x02) {
 			u8 hi_pri_lvl;
 			u32 bpp;
 			u32 mb;
@@ -1038,7 +1038,7 @@
 			WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
 		} else {
 			WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
-			if (mdev->reg_1e24 >= 0x01)
+			if (mdev->unique_rev_id >= 0x01)
 				WREG8(MGAREG_CRTCEXT_DATA, 0x03);
 			else
 				WREG8(MGAREG_CRTCEXT_DATA, 0x04);
@@ -1410,6 +1410,32 @@
 	return ret;
 }
 
+static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
+							int bits_per_pixel)
+{
+	uint32_t total_area, divisor;
+	int64_t active_area, pixels_per_second, bandwidth;
+	uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
+
+	divisor = 1024;
+
+	if (!mode->htotal || !mode->vtotal || !mode->clock)
+		return 0;
+
+	active_area = mode->hdisplay * mode->vdisplay;
+	total_area = mode->htotal * mode->vtotal;
+
+	pixels_per_second = active_area * mode->clock * 1000;
+	do_div(pixels_per_second, total_area);
+
+	bandwidth = pixels_per_second * bytes_per_pixel * 100;
+	do_div(bandwidth, divisor);
+
+	return (uint32_t)(bandwidth);
+}
+
+#define MODE_BANDWIDTH	MODE_BAD
+
 static int mga_vga_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
@@ -1421,7 +1447,45 @@
 	int bpp = 32;
 	int i = 0;
 
-	/* FIXME: Add bandwidth and g200se limitations */
+	if (IS_G200_SE(mdev)) {
+		if (mdev->unique_rev_id == 0x01) {
+			if (mode->hdisplay > 1600)
+				return MODE_VIRTUAL_X;
+			if (mode->vdisplay > 1200)
+				return MODE_VIRTUAL_Y;
+			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+				> (24400 * 1024))
+				return MODE_BANDWIDTH;
+		} else if (mdev->unique_rev_id >= 0x02) {
+			if (mode->hdisplay > 1920)
+				return MODE_VIRTUAL_X;
+			if (mode->vdisplay > 1200)
+				return MODE_VIRTUAL_Y;
+			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+				> (30100 * 1024))
+				return MODE_BANDWIDTH;
+		}
+	} else if (mdev->type == G200_WB) {
+		if (mode->hdisplay > 1280)
+			return MODE_VIRTUAL_X;
+		if (mode->vdisplay > 1024)
+			return MODE_VIRTUAL_Y;
+		if (mga_vga_calculate_mode_bandwidth(mode,
+			bpp > (31877 * 1024)))
+			return MODE_BANDWIDTH;
+	} else if (mdev->type == G200_EV &&
+		(mga_vga_calculate_mode_bandwidth(mode, bpp)
+			> (32700 * 1024))) {
+		return MODE_BANDWIDTH;
+	} else if (mode->type == G200_EH &&
+		(mga_vga_calculate_mode_bandwidth(mode, bpp)
+			> (37500 * 1024))) {
+		return MODE_BANDWIDTH;
+	} else if (mode->type == G200_ER &&
+		(mga_vga_calculate_mode_bandwidth(mode,
+			bpp) > (55000 * 1024))) {
+		return MODE_BANDWIDTH;
+	}
 
 	if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
 	    mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 401c989..d2cb32f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -347,6 +347,7 @@
 
 	mgabo->gem.driver_private = NULL;
 	mgabo->bo.bdev = &mdev->ttm.bdev;
+	mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
 
 	mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index f02fd9f..a66b27c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -49,18 +49,23 @@
 nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
 {
 	const u32 doff = (or * 0x800);
-	int load = -EINVAL;
+
 	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
 	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
 	nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
 	mdelay(9);
 	udelay(500);
-	nv_wr32(priv, 0x61a00c + doff, 0x80000000);
-	load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
-	nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+	loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+
 	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
 	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
-	return load;
+
+	nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+	if (!(loadval & 0x80000000))
+		return -ETIMEDOUT;
+
+	return (loadval & 0x38000000) >> 27;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
index f065fc2..db8c6fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -55,6 +55,10 @@
 	nv_wr32(priv, 0x61c510 + soff, 0x00000000);
 	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
 
+	nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
 	/* ??? */
 	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
 	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 6a38402..5680d3e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1107,6 +1107,7 @@
 	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 hval, hreg = 0x614200 + (head * 0x800);
 	u32 oval, oreg;
+	u32 mask;
 	u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
 	if (conf != ~0) {
 		if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
@@ -1133,6 +1134,7 @@
 			oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
 			oval = 0x00000000;
 			hval = 0x00000000;
+			mask = 0xffffffff;
 		} else
 		if (!outp.location) {
 			if (outp.type == DCB_OUTPUT_DP)
@@ -1140,14 +1142,16 @@
 			oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
 			oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
 			hval = 0x00000000;
+			mask = 0x00000707;
 		} else {
 			oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
 			oval = 0x00000001;
 			hval = 0x00000001;
+			mask = 0x00000707;
 		}
 
 		nv_mask(priv, hreg, 0x0000000f, hval);
-		nv_mask(priv, oreg, 0x00000707, oval);
+		nv_mask(priv, oreg, mask, oval);
 	}
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d550226..9d2cd20 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
 }
 
-#define nouveau_mc_create(p,e,o,d)                                             \
-	nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,m,d)                                           \
+	nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
 #define nouveau_mc_destroy(p) ({                                               \
 	struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc));        \
 })
@@ -33,7 +33,8 @@
 })
 
 int  nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
-			struct nouveau_oclass *, int, void **);
+			struct nouveau_oclass *, const struct nouveau_mc_intr *,
+			int, void **);
 void _nouveau_mc_dtor(struct nouveau_object *);
 int  _nouveau_mc_init(struct nouveau_object *);
 int  _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index c434d39..e2d7f38 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -580,8 +580,22 @@
 init_reserved(struct nvbios_init *init)
 {
 	u8 opcode = nv_ro08(init->bios, init->offset);
-	trace("RESERVED\t0x%02x\n", opcode);
-	init->offset += 1;
+	u8 length, i;
+
+	switch (opcode) {
+	case 0xaa:
+		length = 4;
+		break;
+	default:
+		length = 1;
+		break;
+	}
+
+	trace("RESERVED 0x%02x\t", opcode);
+	for (i = 1; i < length; i++)
+		cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+	cont("\n");
+	init->offset += length;
 }
 
 /**
@@ -2136,6 +2150,7 @@
 	[0x99] = { init_zm_auxch },
 	[0x9a] = { init_i2c_long_if },
 	[0xa9] = { init_gpio_ne },
+	[0xaa] = { init_reserved },
 };
 
 #define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b..ec9cd6f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -80,7 +80,9 @@
 
 int
 nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass, int length, void **pobject)
+		   struct nouveau_oclass *oclass,
+		   const struct nouveau_mc_intr *intr_map,
+		   int length, void **pobject)
 {
 	struct nouveau_device *device = nv_device(parent);
 	struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@
 	if (ret)
 		return ret;
 
+	pmc->intr_map = intr_map;
+
 	ret = request_irq(device->pdev->irq, nouveau_mc_intr,
 			  IRQF_SHARED, "nouveau", pmc);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c76971..64aa4ed 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@
 	struct nv04_mc_priv *priv;
 	int ret;
 
-	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.intr_map = nv04_mc_intr;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 5191937..d989178 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@
 	struct nv44_mc_priv *priv;
 	int ret;
 
-	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.intr_map = nv04_mc_intr;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index d796924..732d810 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -52,12 +52,11 @@
 	struct nv50_mc_priv *priv;
 	int ret;
 
-	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.intr_map = nv50_mc_intr;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21..0d57b4d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@
 	struct nv98_mc_priv *priv;
 	int ret;
 
-	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.intr_map = nv98_mc_intr;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 737bd4b..4c97cd2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -56,12 +56,11 @@
 	struct nvc0_mc_priv *priv;
 	int ret;
 
-	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.intr_map = nvc0_mc_intr;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 77c67fc..e66fb77 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -362,7 +362,7 @@
 	vm->fpde = offset >> (vmm->pgt_bits + 12);
 	vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
 
-	vm->pgt  = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
 	if (!vm->pgt) {
 		kfree(vm);
 		return -ENOMEM;
@@ -371,7 +371,7 @@
 	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
 			      block >> 12);
 	if (ret) {
-		kfree(vm->pgt);
+		vfree(vm->pgt);
 		kfree(vm);
 		return ret;
 	}
@@ -446,7 +446,7 @@
 	}
 
 	nouveau_mm_fini(&vm->mm);
-	kfree(vm->pgt);
+	vfree(vm->pgt);
 	kfree(vm);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9b..22aa996 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@
 	struct ttm_mem_reg *mem = &priv->bo->bo.mem;
 	struct nouveau_object *object;
 	u32 start = mem->start * PAGE_SIZE;
-	u32 limit = mem->start + mem->size - 1;
+	u32 limit = start + mem->size - 1;
 	int ret = 0;
 
 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e5..0ee3638 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@
 	struct nv10_fence_chan *fctx;
 	struct ttm_mem_reg *mem = &priv->bo->bo.mem;
 	struct nouveau_object *object;
+	u32 start = mem->start * PAGE_SIZE;
+	u32 limit = start + mem->size - 1;
 	int ret, i;
 
 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@
 	fctx->base.sync = nv17_fence_sync;
 
 	ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
-				 NvSema, 0x0002,
+				 NvSema, 0x003d,
 				 &(struct nv_dma_class) {
 					.flags = NV_DMA_TARGET_VRAM |
 						 NV_DMA_ACCESS_RDWR,
-					.start = mem->start * PAGE_SIZE,
-					.limit = mem->size - 1,
+					.start = start,
+					.limit = limit,
 				 }, sizeof(struct nv_dma_class),
 				 &object);
 
 	/* dma objects for display sync channel semaphore blocks */
 	for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
 		struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+		u32 start = bo->bo.mem.start * PAGE_SIZE;
+		u32 limit = start + bo->bo.mem.size - 1;
 
 		ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
 					 NvEvoSema0 + i, 0x003d,
 					 &(struct nv_dma_class) {
 						.flags = NV_DMA_TARGET_VRAM |
 							 NV_DMA_ACCESS_RDWR,
-						.start = bo->bo.offset,
-						.limit = bo->bo.offset + 0xfff,
+						.start = start,
+						.limit = limit,
 					 }, sizeof(struct nv_dma_class),
 					 &object);
 	}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a7..15da7ef 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@
 	int r;
 
 	mutex_lock(&ctx->mutex);
+	/* reset data block */
+	ctx->data_block = 0;
 	/* reset reg block */
 	ctx->reg_block = 0;
 	/* reset fb window */
 	ctx->fb_base = 0;
 	/* reset io mode */
 	ctx->io_mode = ATOM_IO_MM;
+	/* reset divmul */
+	ctx->divmul[0] = 0;
+	ctx->divmul[1] = 0;
 	r = atom_execute_table_locked(ctx, index, params);
 	mutex_unlock(&ctx->mutex);
 	return r;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023b..1602398 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@
 };
 
 /***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+	u32 *dst32, *src32;
+	int i;
+
+	memcpy(src_tmp, src, num_bytes);
+	src32 = (u32 *)src_tmp;
+	dst32 = (u32 *)dst_tmp;
+	if (to_le) {
+		for (i = 0; i < ((num_bytes + 3) / 4); i++)
+			dst32[i] = cpu_to_le32(src32[i]);
+		memcpy(dst, dst_tmp, num_bytes);
+	} else {
+		u8 dws = num_bytes & ~3;
+		for (i = 0; i < ((num_bytes + 3) / 4); i++)
+			dst32[i] = le32_to_cpu(src32[i]);
+		memcpy(dst, dst_tmp, dws);
+		if (num_bytes % 4) {
+			for (i = 0; i < (num_bytes % 4); i++)
+				dst[dws+i] = dst_tmp[dws+i];
+		}
+	}
+#else
+	memcpy(dst, src, num_bytes);
+#endif
+}
+
 union aux_channel_transaction {
 	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
 	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@
 
 	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
 
-	memcpy(base, send, send_bytes);
+	radeon_atom_copy_swap(base, send, send_bytes, true);
 
-	args.v1.lpAuxRequest = 0 + 4;
-	args.v1.lpDataOut = 16 + 4;
+	args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+	args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
 	args.v1.ucDataOutLen = 0;
 	args.v1.ucChannelID = chan->rec.i2c_id;
 	args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@
 		recv_bytes = recv_size;
 
 	if (recv && recv_size)
-		memcpy(recv, base + 16, recv_bytes);
+		radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
 
 	return recv_bytes;
 }
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 8406c82..4c81e9f 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -186,6 +186,13 @@
 	u8 backlight_level;
 	char bl_name[16];
 
+	/* Mac laptops with multiple GPUs use the gmux driver for backlight
+	 * so don't register a backlight device
+	 */
+	if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+	    (rdev->pdev->device == 0x6741))
+		return;
+
 	if (!radeon_encoder->enc_priv)
 		return;
 
@@ -1629,8 +1636,12 @@
 			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			/* some early dce3.2 boards have a bug in their transmitter control table */
-			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+			/* some dce3.x boards have a bug in their transmitter control table.
+			 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+			 * does the same thing and more.
+			 */
+			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+			    (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 		}
 		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 082338d..2ca389d 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,6 +27,8 @@
 #include "radeon.h"
 #include "atom.h"
 
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
 #define TARGET_HW_I2C_CLOCK 50
 
 /* these are a limitation of ProcessI2cChannelTransaction not the hw */
@@ -77,7 +79,7 @@
 	}
 
 	if (!(flags & HW_I2C_WRITE))
-		memcpy(buf, base, num);
+		radeon_atom_copy_swap(buf, base, num, false);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0f89ce3..8b6b0ba 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1718,7 +1718,8 @@
 					struct drm_display_mode *mode,
 					struct drm_display_mode *other_mode)
 {
-	u32 tmp;
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
 	/*
 	 * Line Buffer Setup
 	 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -1741,18 +1742,34 @@
 	 * non-linked crtcs for maximum line buffer allocation.
 	 */
 	if (radeon_crtc->base.enabled && mode) {
-		if (other_mode)
+		if (other_mode) {
 			tmp = 0; /* 1/2 */
-		else
+			buffer_alloc = 1;
+		} else {
 			tmp = 2; /* whole */
-	} else
+			buffer_alloc = 2;
+		}
+	} else {
 		tmp = 0;
+		buffer_alloc = 0;
+	}
 
 	/* second controller of the pair uses second half of the lb */
 	if (radeon_crtc->crtc_id % 2)
 		tmp += 4;
 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
 
+	if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+		WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+		       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+			    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+				break;
+			udelay(1);
+		}
+	}
+
 	if (radeon_crtc->base.enabled && mode) {
 		switch (tmp) {
 		case 0:
@@ -2973,7 +2990,7 @@
 		rdev->config.evergreen.sx_max_export_size = 256;
 		rdev->config.evergreen.sx_max_export_pos_size = 64;
 		rdev->config.evergreen.sx_max_export_smx_size = 192;
-		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.max_hw_contexts = 4;
 		rdev->config.evergreen.sq_num_cf_insts = 2;
 
 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
@@ -4681,6 +4698,8 @@
 	/* enable pcie gen2 link */
 	evergreen_pcie_gen2_enable(rdev);
 
+	evergreen_mc_program(rdev);
+
 	if (ASIC_IS_DCE5(rdev)) {
 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
 			r = ni_init_microcode(rdev);
@@ -4708,7 +4727,6 @@
 	if (r)
 		return r;
 
-	evergreen_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		evergreen_agp_enable(rdev);
 	} else {
@@ -4854,10 +4872,10 @@
 int evergreen_suspend(struct radeon_device *rdev)
 {
 	r600_audio_fini(rdev);
+	r600_uvd_stop(rdev);
 	radeon_uvd_suspend(rdev);
 	r700_cp_stop(rdev);
 	r600_dma_stop(rdev);
-	r600_uvd_rbc_stop(rdev);
 	evergreen_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
 	evergreen_pcie_gart_disable(rdev);
@@ -4988,6 +5006,7 @@
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	evergreen_pcie_gart_fini(rdev);
+	r600_uvd_stop(rdev);
 	radeon_uvd_fini(rdev);
 	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index ed7c8a7..bb9ea36 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -128,14 +128,7 @@
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	uint32_t offset = dig->afmt->offset;
 	uint8_t *frame = buffer + 3;
-
-	/* Our header values (type, version, length) should be alright, Intel
-	 * is using the same. Checksum function also seems to be OK, it works
-	 * fine for audio infoframe. However calculated value is always lower
-	 * by 2 in comparison to fglrx. It breaks displaying anything in case
-	 * of TVs that strictly check the checksum. Hack it manually here to
-	 * workaround this issue. */
-	frame[0x0] += 2;
+	uint8_t *header = buffer;
 
 	WREG32(AFMT_AVI_INFO0 + offset,
 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -144,7 +137,7 @@
 	WREG32(AFMT_AVI_INFO2 + offset,
 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
 	WREG32(AFMT_AVI_INFO3 + offset,
-		frame[0xC] | (frame[0xD] << 8));
+		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
 }
 
 static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
@@ -164,9 +157,9 @@
 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
 	 */
+	WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
 	WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
 	WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
-	WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
 }
 
 
@@ -184,6 +177,9 @@
 	uint32_t offset;
 	ssize_t err;
 
+	if (!dig || !dig->afmt)
+		return;
+
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (!dig->afmt->enabled)
 		return;
@@ -287,6 +283,9 @@
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
+	if (!dig || !dig->afmt)
+		return;
+
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (enable && dig->afmt->enabled)
 		return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 75c0563..150e318 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -810,6 +810,10 @@
 #       define LATENCY_LOW_WATERMARK(x)                   ((x) << 0)
 #       define LATENCY_HIGH_WATERMARK(x)                  ((x) << 16)
 
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
 #define IH_RB_CNTL                                        0x3e00
 #       define IH_RB_ENABLE                               (1 << 0)
 #       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
@@ -1100,7 +1104,7 @@
  * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
  */
 #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
-                /* 0 - SRC_ADDR
+                /* 0 - DST_ADDR
 		 * 1 - GDS
 		 */
 #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
@@ -1115,7 +1119,7 @@
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
 #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
 		 * 1 - 8 in 16
 		 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8458330..3bf43a1 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1929,6 +1929,8 @@
 	/* enable pcie gen2 link */
 	evergreen_pcie_gen2_enable(rdev);
 
+	evergreen_mc_program(rdev);
+
 	if (rdev->flags & RADEON_IS_IGP) {
 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 			r = ni_init_microcode(rdev);
@@ -1957,7 +1959,6 @@
 	if (r)
 		return r;
 
-	evergreen_mc_program(rdev);
 	r = cayman_pcie_gart_enable(rdev);
 	if (r)
 		return r;
@@ -2133,7 +2134,7 @@
 	radeon_vm_manager_fini(rdev);
 	cayman_cp_enable(rdev, false);
 	cayman_dma_stop(rdev);
-	r600_uvd_rbc_stop(rdev);
+	r600_uvd_stop(rdev);
 	radeon_uvd_suspend(rdev);
 	evergreen_irq_suspend(rdev);
 	radeon_wb_disable(rdev);
@@ -2265,6 +2266,7 @@
 	radeon_vm_manager_fini(rdev);
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
+	r600_uvd_stop(rdev);
 	radeon_uvd_fini(rdev);
 	cayman_pcie_gart_fini(rdev);
 	r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d0314ec..46470dd 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2935,9 +2935,11 @@
 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
 	seq_printf(m, "%u dwords in ring\n", count);
-	for (j = 0; j <= count; j++) {
-		i = (rdp + j) & ring->ptr_mask;
-		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+	if (ring->ready) {
+		for (j = 0; j <= count; j++) {
+			i = (rdp + j) & ring->ptr_mask;
+			seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+		}
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6948eb8..f19620b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2675,12 +2675,29 @@
 	return 0;
 }
 
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
+void r600_uvd_stop(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 
 	/* force RBC into idle state */
 	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+	mdelay(1);
+
+	/* put VCPU into reset */
+	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+	mdelay(5);
+
+	/* disable VCPU clock */
+	WREG32(UVD_VCPU_CNTL, 0x0);
+
+	/* Unstall UMC and register bus */
+	WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
 	ring->ready = false;
 }
 
@@ -2700,6 +2717,11 @@
 	/* disable interupt */
 	WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
 
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+	mdelay(1);
+
 	/* put LMI, VCPU, RBC etc... into reset */
 	WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
 	       LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
@@ -2729,10 +2751,6 @@
 	WREG32(UVD_MPC_SET_ALU, 0);
 	WREG32(UVD_MPC_SET_MUX, 0x88);
 
-	/* Stall UMC */
-	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
-	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
 	/* take all subblocks out of reset, except VCPU */
 	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
 	mdelay(5);
@@ -2986,7 +3004,7 @@
 			 struct radeon_fence *fence)
 {
 	struct radeon_ring *ring = &rdev->ring[fence->ring];
-	uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
 
 	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
 	radeon_ring_write(ring, fence->seq);
@@ -3206,6 +3224,8 @@
 	/* enable pcie gen2 link */
 	r600_pcie_gen2_enable(rdev);
 
+	r600_mc_program(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 		r = r600_init_microcode(rdev);
 		if (r) {
@@ -3218,7 +3238,6 @@
 	if (r)
 		return r;
 
-	r600_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		r600_agp_enable(rdev);
 	} else {
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 456750a..0efe2a9 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -133,14 +133,7 @@
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	uint32_t offset = dig->afmt->offset;
 	uint8_t *frame = buffer + 3;
-
-	/* Our header values (type, version, length) should be alright, Intel
-	 * is using the same. Checksum function also seems to be OK, it works
-	 * fine for audio infoframe. However calculated value is always lower
-	 * by 2 in comparison to fglrx. It breaks displaying anything in case
-	 * of TVs that strictly check the checksum. Hack it manually here to
-	 * workaround this issue. */
-	frame[0x0] += 2;
+	uint8_t *header = buffer;
 
 	WREG32(HDMI0_AVI_INFO0 + offset,
 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -149,7 +142,7 @@
 	WREG32(HDMI0_AVI_INFO2 + offset,
 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
 	WREG32(HDMI0_AVI_INFO3 + offset,
-		frame[0xC] | (frame[0xD] << 8));
+		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
 }
 
 /*
@@ -245,15 +238,31 @@
 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
 	 */
-	if (ASIC_IS_DCE3(rdev)) {
+	if (ASIC_IS_DCE32(rdev)) {
+		if (dig->dig_encoder == 0) {
+			WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+		} else {
+			WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+		}
+	} else if (ASIC_IS_DCE3(rdev)) {
 		/* according to the reg specs, this should DCE3.2 only, but in
-		 * practice it seems to cover DCE3.0 as well.
+		 * practice it seems to cover DCE3.0/3.1 as well.
 		 */
-		WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
-		WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
-		WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+		if (dig->dig_encoder == 0) {
+			WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+		} else {
+			WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+		}
 	} else {
-		/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+		/* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
 		WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
 		       AUDIO_DTO_MODULE(clock / 10));
 	}
@@ -273,6 +282,9 @@
 	uint32_t offset;
 	ssize_t err;
 
+	if (!dig || !dig->afmt)
+		return;
+
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (!dig->afmt->enabled)
 		return;
@@ -455,6 +467,9 @@
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 	u32 hdmi = HDMI0_ERROR_ACK;
 
+	if (!dig || !dig->afmt)
+		return;
+
 	/* Silent, r600_hdmi_enable will raise WARN for us */
 	if (enable && dig->afmt->enabled)
 		return;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 79df558..2fd2241 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1259,7 +1259,7 @@
  */
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
 		 * 1 - 8 in 16
 		 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 142ce6c..d4ff48c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -408,6 +408,7 @@
 	uint64_t		gpu_addr;
 	void			*cpu_ptr;
 	uint32_t		domain;
+	uint32_t		align;
 };
 
 struct radeon_sa_bo;
@@ -1144,6 +1145,7 @@
 	struct radeon_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
+	void			*saved_bo;
 	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
 	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
@@ -1762,7 +1764,7 @@
 		WREG32(reg, tmp_);				\
 	} while (0)
 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
 #define WREG32_PLL_P(reg, val, mask)				\
 	do {							\
 		uint32_t tmp_ = RREG32_PLL(reg);		\
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2802b47..de36c47 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -986,8 +986,8 @@
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
 		.dma = &r600_copy_dma,
 		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.copy = &r600_copy_dma,
-		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
@@ -1074,8 +1074,8 @@
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
 		.dma = &r600_copy_dma,
 		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.copy = &r600_copy_dma,
-		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
 	},
 	.surface = {
 		.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a72759e..34223fc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -399,7 +399,7 @@
 /* uvd */
 int r600_uvd_init(struct radeon_device *rdev);
 int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
 int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 void r600_uvd_fence_emit(struct radeon_device *rdev,
 			 struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index dea6f63c..239a4074 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -715,13 +715,16 @@
 								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
 								(ctx->bios + data_offset +
 								 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+							u8 *num_dst_objs = (u8 *)
+								((u8 *)router_src_dst_table + 1 +
+								 (router_src_dst_table->ucNumberOfSrc * 2));
+							u16 *dst_objs = (u16 *)(num_dst_objs + 1);
 							int enum_id;
 
 							router.router_id = router_obj_id;
-							for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
-							     enum_id++) {
+							for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
 								if (le16_to_cpu(path->usConnObjectId) ==
-								    le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+								    le16_to_cpu(dst_objs[enum_id]))
 									break;
 							}
 
@@ -1651,7 +1654,9 @@
 								kfree(edid);
 						}
 					}
-					record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					record += fake_edid_record->ucFakeEDIDLength ?
+						fake_edid_record->ucFakeEDIDLength + 2 :
+						sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
 					break;
 				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
 					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc..68ce360 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@
 					 enum radeon_combios_table_offset table)
 {
 	struct radeon_device *rdev = dev->dev_private;
-	int rev;
+	int rev, size;
 	uint16_t offset = 0, check_offset;
 
 	if (!rdev->bios)
@@ -156,174 +156,106 @@
 	switch (table) {
 		/* absolute offset tables */
 	case COMBIOS_ASIC_INIT_1_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0xc);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0xc;
 		break;
 	case COMBIOS_BIOS_SUPPORT_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x14);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x14;
 		break;
 	case COMBIOS_DAC_PROGRAMMING_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x2a;
 		break;
 	case COMBIOS_MAX_COLOR_DEPTH_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x2c;
 		break;
 	case COMBIOS_CRTC_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x2e;
 		break;
 	case COMBIOS_PLL_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x30);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x30;
 		break;
 	case COMBIOS_TV_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x32);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x32;
 		break;
 	case COMBIOS_DFP_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x34);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x34;
 		break;
 	case COMBIOS_HW_CONFIG_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x36);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x36;
 		break;
 	case COMBIOS_MULTIMEDIA_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x38);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x38;
 		break;
 	case COMBIOS_TV_STD_PATCH_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x3e;
 		break;
 	case COMBIOS_LCD_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x40);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x40;
 		break;
 	case COMBIOS_MOBILE_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x42);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x42;
 		break;
 	case COMBIOS_PLL_INIT_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x46);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x46;
 		break;
 	case COMBIOS_MEM_CONFIG_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x48);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x48;
 		break;
 	case COMBIOS_SAVE_MASK_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x4a;
 		break;
 	case COMBIOS_HARDCODED_EDID_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x4c;
 		break;
 	case COMBIOS_ASIC_INIT_2_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x4e;
 		break;
 	case COMBIOS_CONNECTOR_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x50);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x50;
 		break;
 	case COMBIOS_DYN_CLK_1_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x52);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x52;
 		break;
 	case COMBIOS_RESERVED_MEM_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x54);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x54;
 		break;
 	case COMBIOS_EXT_TMDS_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x58);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x58;
 		break;
 	case COMBIOS_MEM_CLK_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x5a;
 		break;
 	case COMBIOS_EXT_DAC_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x5c;
 		break;
 	case COMBIOS_MISC_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x5e;
 		break;
 	case COMBIOS_CRT_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x60);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x60;
 		break;
 	case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x62);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x62;
 		break;
 	case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x64);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x64;
 		break;
 	case COMBIOS_FAN_SPEED_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x66);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x66;
 		break;
 	case COMBIOS_OVERDRIVE_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x68);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x68;
 		break;
 	case COMBIOS_OEM_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x6a;
 		break;
 	case COMBIOS_DYN_CLK_2_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x6c;
 		break;
 	case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x6e;
 		break;
 	case COMBIOS_I2C_INFO_TABLE:
-		check_offset = RBIOS16(rdev->bios_header_start + 0x70);
-		if (check_offset)
-			offset = check_offset;
+		check_offset = 0x70;
 		break;
 		/* relative offset tables */
 	case COMBIOS_ASIC_INIT_3_TABLE:	/* offset from misc info */
@@ -439,11 +371,16 @@
 		}
 		break;
 	default:
+		check_offset = 0;
 		break;
 	}
 
-	return offset;
+	size = RBIOS8(rdev->bios_header_start + 0x6);
+	/* check absolute offset tables */
+	if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+		offset = RBIOS16(rdev->bios_header_start + check_offset);
 
+	return offset;
 }
 
 bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@
 			dac = RBIOS8(dac_info + 0x3) & 0xf;
 			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
 		}
-		/* if the values are all zeros, use the table */
-		if (p_dac->ps2_pdac_adj)
+		/* if the values are zeros, use the table */
+		if ((dac == 0) || (bg == 0))
+			found = 0;
+		else
 			found = 1;
 	}
 
 	/* quirks */
-	/* Radeon 9100 (R200) */
-	if ((dev->pdev->device == 0x514D) &&
+	/* Radeon 7000 (RV100) */
+	if (((dev->pdev->device == 0x5159) &&
 	    (dev->pdev->subsystem_vendor == 0x174B) &&
-	    (dev->pdev->subsystem_device == 0x7149)) {
+	    (dev->pdev->subsystem_device == 0x7c28)) ||
+	/* Radeon 9100 (R200) */
+	   ((dev->pdev->device == 0x514D) &&
+	    (dev->pdev->subsystem_vendor == 0x174B) &&
+	    (dev->pdev->subsystem_device == 0x7149))) {
 		/* vbios value is bad, use the default */
 		found = 0;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25..5a87c9f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1489,6 +1489,24 @@
 	.force = radeon_dvi_force,
 };
 
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
 void
 radeon_add_atom_connector(struct drm_device *dev,
 			  uint32_t connector_id,
@@ -1580,8 +1598,6 @@
 			goto failed;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
-		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
-		drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 		if (i2c_bus->valid) {
 			/* add DP i2c bus */
 			if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1614,10 @@
 		case DRM_MODE_CONNECTOR_VGA:
 		case DRM_MODE_CONNECTOR_DVIA:
 		default:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
 			connector->interlace_allowed = true;
 			connector->doublescan_allowed = true;
 			radeon_connector->dac_load_detect = true;
@@ -1610,6 +1630,10 @@
 		case DRM_MODE_CONNECTOR_HDMIA:
 		case DRM_MODE_CONNECTOR_HDMIB:
 		case DRM_MODE_CONNECTOR_DisplayPort:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_property,
 						      UNDERSCAN_OFF);
@@ -1634,6 +1658,10 @@
 			break;
 		case DRM_MODE_CONNECTOR_LVDS:
 		case DRM_MODE_CONNECTOR_eDP:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_lvds_bridge_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
 			drm_object_attach_property(&radeon_connector->base.base,
 						      dev->mode_config.scaling_mode_property,
 						      DRM_MODE_SCALE_FULLSCREEN);
@@ -1797,7 +1825,7 @@
 				goto failed;
 			radeon_dig_connector->igp_lane_info = igp_lane_info;
 			radeon_connector->con_priv = radeon_dig_connector;
-			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+			drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
 			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 			if (i2c_bus->valid) {
 				/* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7e265a5..fe36f1d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -80,9 +80,11 @@
 		p->relocs[i].lobj.bo = p->relocs[i].robj;
 		p->relocs[i].lobj.written = !!r->write_domain;
 
-		/* the first reloc of an UVD job is the
-		   msg and that must be in VRAM */
-		if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
+		/* the first reloc of an UVD job is the msg and that must be in
+		   VRAM, also but everything into VRAM on AGP cards to avoid
+		   image corruptions */
+		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+		    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
 			/* TODO: is this still needed for NI+ ? */
 			p->relocs[i].lobj.domain =
 				RADEON_GEM_DOMAIN_VRAM;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b0dc0b6..8df1525 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1196,13 +1196,22 @@
 			return r;
 	}
 	if ((radeon_testing & 1)) {
-		radeon_test_moves(rdev);
+		if (rdev->accel_working)
+			radeon_test_moves(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
 	}
 	if ((radeon_testing & 2)) {
-		radeon_test_syncing(rdev);
+		if (rdev->accel_working)
+			radeon_test_syncing(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
 	}
 	if (radeon_benchmarking) {
-		radeon_benchmark(rdev, radeon_benchmarking);
+		if (rdev->accel_working)
+			radeon_benchmark(rdev, radeon_benchmarking);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 43ec4a4..5ce190b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -467,6 +467,7 @@
 		size *= 2;
 		r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
 					      RADEON_GPU_PAGE_ALIGN(size),
+					      RADEON_GPU_PAGE_SIZE,
 					      RADEON_GEM_DOMAIN_VRAM);
 		if (r) {
 			dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5a99d43..1fe12ab 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -241,9 +241,6 @@
 {
 	int r = 0;
 
-	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
-	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
-
 	spin_lock_init(&rdev->irq.lock);
 	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
@@ -265,6 +262,10 @@
 		rdev->irq.installed = false;
 		return r;
 	}
+
+	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+
 	DRM_INFO("radeon: irq initialized.\n");
 	return 0;
 }
@@ -284,8 +285,8 @@
 		rdev->irq.installed = false;
 		if (rdev->msi_enabled)
 			pci_disable_msi(rdev->pdev);
+		flush_work(&rdev->hotplug_work);
 	}
-	flush_work(&rdev->hotplug_work);
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4f2d4f4..7e292d8 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -414,6 +414,9 @@
 		value = rdev->config.si.tile_mode_array;
 		value_size = sizeof(uint32_t)*32;
 		break;
+	case RADEON_INFO_SI_CP_DMA_COMPUTE:
+		*value = 1;
+		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e2cb80a..2943823 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -158,7 +158,7 @@
 
 extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 				     struct radeon_sa_manager *sa_manager,
-				     unsigned size, u32 domain);
+				     unsigned size, u32 align, u32 domain);
 extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 				      struct radeon_sa_manager *sa_manager);
 extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 82434018..6e0f480 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -224,6 +224,7 @@
 	}
 	r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
 				      RADEON_IB_POOL_SIZE*64*1024,
+				      RADEON_GPU_PAGE_SIZE,
 				      RADEON_GEM_DOMAIN_GTT);
 	if (r) {
 		return r;
@@ -822,9 +823,11 @@
 	 * packet that is the root issue
 	 */
 	i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-	for (j = 0; j <= (count + 32); j++) {
-		seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
-		i = (i + 1) & ring->ptr_mask;
+	if (ring->ready) {
+		for (j = 0; j <= (count + 32); j++) {
+			seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+			i = (i + 1) & ring->ptr_mask;
+		}
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0abe5a9..f0bac68 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@
 
 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 			      struct radeon_sa_manager *sa_manager,
-			      unsigned size, u32 domain)
+			      unsigned size, u32 align, u32 domain)
 {
 	int i, r;
 
@@ -57,13 +57,14 @@
 	sa_manager->bo = NULL;
 	sa_manager->size = size;
 	sa_manager->domain = domain;
+	sa_manager->align = align;
 	sa_manager->hole = &sa_manager->olist;
 	INIT_LIST_HEAD(&sa_manager->olist);
 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 		INIT_LIST_HEAD(&sa_manager->flist[i]);
 	}
 
-	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+	r = radeon_bo_create(rdev, size, align, true,
 			     domain, NULL, &sa_manager->bo);
 	if (r) {
 		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
@@ -317,7 +318,7 @@
 	unsigned tries[RADEON_NUM_RINGS];
 	int i, r;
 
-	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+	BUG_ON(align > sa_manager->align);
 	BUG_ON(size > sa_manager->size);
 
 	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index bbed4af..f9ebf2b 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -37,8 +37,8 @@
 	struct radeon_bo **gtt_obj = NULL;
 	struct radeon_fence *fence = NULL;
 	uint64_t gtt_addr, vram_addr;
-	unsigned i, n, size;
-	int r, ring;
+	unsigned n, size;
+	int i, r, ring;
 
 	switch (flag) {
 	case RADEON_TEST_COPY_DMA:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index cad735d..f3ccf6d 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -123,64 +123,6 @@
 		return r;
 	}
 
-	r = radeon_uvd_resume(rdev);
-	if (r)
-		return r;
-
-	memset(rdev->uvd.cpu_addr, 0, bo_size);
-	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
-
-	r = radeon_uvd_suspend(rdev);
-	if (r)
-		return r;
-
-	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-		atomic_set(&rdev->uvd.handles[i], 0);
-		rdev->uvd.filp[i] = NULL;
-	}
-
-	return 0;
-}
-
-void radeon_uvd_fini(struct radeon_device *rdev)
-{
-	radeon_uvd_suspend(rdev);
-	radeon_bo_unref(&rdev->uvd.vcpu_bo);
-}
-
-int radeon_uvd_suspend(struct radeon_device *rdev)
-{
-	int r;
-
-	if (rdev->uvd.vcpu_bo == NULL)
-		return 0;
-
-	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
-	if (!r) {
-		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
-		radeon_bo_unpin(rdev->uvd.vcpu_bo);
-		rdev->uvd.cpu_addr = NULL;
-		if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
-			radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
-		}
-		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
-
-		if (rdev->uvd.cpu_addr) {
-			radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
-		} else {
-			rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
-		}
-	}
-	return r;
-}
-
-int radeon_uvd_resume(struct radeon_device *rdev)
-{
-	int r;
-
-	if (rdev->uvd.vcpu_bo == NULL)
-		return -EINVAL;
-
 	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
 	if (r) {
 		radeon_bo_unref(&rdev->uvd.vcpu_bo);
@@ -188,10 +130,6 @@
 		return r;
 	}
 
-	/* Have been pin in cpu unmap unpin */
-	radeon_bo_kunmap(rdev->uvd.vcpu_bo);
-	radeon_bo_unpin(rdev->uvd.vcpu_bo);
-
 	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
 			  &rdev->uvd.gpu_addr);
 	if (r) {
@@ -209,6 +147,84 @@
 
 	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
 
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		atomic_set(&rdev->uvd.handles[i], 0);
+		rdev->uvd.filp[i] = NULL;
+	}
+
+	return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return;
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+	if (!r) {
+		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+		radeon_bo_unpin(rdev->uvd.vcpu_bo);
+		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+	}
+
+	radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+	release_firmware(rdev->uvd_fw);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+	unsigned size;
+	void *ptr;
+	int i;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return 0;
+
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+		if (atomic_read(&rdev->uvd.handles[i]))
+			break;
+
+	if (i == RADEON_MAX_UVD_HANDLES)
+		return 0;
+
+	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+	size -= rdev->uvd_fw->size;
+
+	ptr = rdev->uvd.cpu_addr;
+	ptr += rdev->uvd_fw->size;
+
+	rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+	memcpy(rdev->uvd.saved_bo, ptr, size);
+
+	return 0;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+	unsigned size;
+	void *ptr;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return -EINVAL;
+
+	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+	size -= rdev->uvd_fw->size;
+
+	ptr = rdev->uvd.cpu_addr;
+	ptr += rdev->uvd_fw->size;
+
+	if (rdev->uvd.saved_bo != NULL) {
+		memcpy(ptr, rdev->uvd.saved_bo, size);
+		kfree(rdev->uvd.saved_bo);
+		rdev->uvd.saved_bo = NULL;
+	} else
+		memset(ptr, 0, size);
+
 	return 0;
 }
 
@@ -222,8 +238,8 @@
 {
 	int i, r;
 	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-		if (rdev->uvd.filp[i] == filp) {
-			uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+		if (handle != 0 && rdev->uvd.filp[i] == filp) {
 			struct radeon_fence *fence;
 
 			r = radeon_uvd_get_destroy_msg(rdev,
@@ -343,6 +359,14 @@
 		return -EINVAL;
 	}
 
+	if (bo->tbo.sync_obj) {
+		r = radeon_fence_wait(bo->tbo.sync_obj, false);
+		if (r) {
+			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+			return r;
+		}
+	}
+
 	r = radeon_bo_kmap(bo, &ptr);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 233a9b9..b8074a8 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@
 	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
 	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
 	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
-		WREG32_MC(RS480_MC_MISC_CNTL,
-			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+		tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+		tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+		WREG32_MC(RS480_MC_MISC_CNTL, tmp);
 	} else {
-		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+		tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+		tmp |= RS480_GART_INDEX_REG_EN;
+		WREG32_MC(RS480_MC_MISC_CNTL, tmp);
 	}
 	/* Enable gart */
 	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4a62ad2..f5e92cf 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@
 						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
 		radeon_program_register_sequence(rdev,
 						 rv730_golden_registers,
-						 (const u32)ARRAY_SIZE(rv770_golden_registers));
+						 (const u32)ARRAY_SIZE(rv730_golden_registers));
 		radeon_program_register_sequence(rdev,
 						 rv730_mgcg_init,
-						 (const u32)ARRAY_SIZE(rv770_mgcg_init));
+						 (const u32)ARRAY_SIZE(rv730_mgcg_init));
 		break;
 	case CHIP_RV710:
 		radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@
 						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
 		radeon_program_register_sequence(rdev,
 						 rv710_golden_registers,
-						 (const u32)ARRAY_SIZE(rv770_golden_registers));
+						 (const u32)ARRAY_SIZE(rv710_golden_registers));
 		radeon_program_register_sequence(rdev,
 						 rv710_mgcg_init,
-						 (const u32)ARRAY_SIZE(rv770_mgcg_init));
+						 (const u32)ARRAY_SIZE(rv710_mgcg_init));
 		break;
 	case CHIP_RV740:
 		radeon_program_register_sequence(rdev,
 						 rv740_golden_registers,
-						 (const u32)ARRAY_SIZE(rv770_golden_registers));
+						 (const u32)ARRAY_SIZE(rv740_golden_registers));
 		radeon_program_register_sequence(rdev,
 						 rv740_mgcg_init,
-						 (const u32)ARRAY_SIZE(rv770_mgcg_init));
+						 (const u32)ARRAY_SIZE(rv740_mgcg_init));
 		break;
 	default:
 		break;
@@ -1829,6 +1829,8 @@
 	/* enable pcie gen2 link */
 	rv770_pcie_gen2_enable(rdev);
 
+	rv770_mc_program(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 		r = r600_init_microcode(rdev);
 		if (r) {
@@ -1841,7 +1843,6 @@
 	if (r)
 		return r;
 
-	rv770_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		rv770_agp_enable(rdev);
 	} else {
@@ -1983,6 +1984,7 @@
 int rv770_suspend(struct radeon_device *rdev)
 {
 	r600_audio_fini(rdev);
+	r600_uvd_stop(rdev);
 	radeon_uvd_suspend(rdev);
 	r700_cp_stop(rdev);
 	r600_dma_stop(rdev);
@@ -2098,6 +2100,7 @@
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	rv770_pcie_gart_fini(rdev);
+	r600_uvd_stop(rdev);
 	radeon_uvd_fini(rdev);
 	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a1b0da6..f054a3b 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1467,7 +1467,8 @@
 				   struct drm_display_mode *mode,
 				   struct drm_display_mode *other_mode)
 {
-	u32 tmp;
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
 	/*
 	 * Line Buffer Setup
 	 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -1482,16 +1483,30 @@
 	 * non-linked crtcs for maximum line buffer allocation.
 	 */
 	if (radeon_crtc->base.enabled && mode) {
-		if (other_mode)
+		if (other_mode) {
 			tmp = 0; /* 1/2 */
-		else
+			buffer_alloc = 1;
+		} else {
 			tmp = 2; /* whole */
-	} else
+			buffer_alloc = 2;
+		}
+	} else {
 		tmp = 0;
+		buffer_alloc = 0;
+	}
 
 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
 	       DC_LB_MEMORY_CONFIG(tmp));
 
+	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+			break;
+		udelay(1);
+	}
+
 	if (radeon_crtc->base.enabled && mode) {
 		switch (tmp) {
 		case 0:
@@ -3796,13 +3811,64 @@
 	return 0;
 }
 
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+	u32 start_reg, reg, i;
+	u32 command = ib[idx + 4];
+	u32 info = ib[idx + 1];
+	u32 idx_value = ib[idx];
+	if (command & PACKET3_CP_DMA_CMD_SAS) {
+		/* src address space is register */
+		if (((info & 0x60000000) >> 29) == 0) {
+			start_reg = idx_value << 2;
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				reg = start_reg;
+				if (!si_vm_reg_valid(reg)) {
+					DRM_ERROR("CP DMA Bad SRC register\n");
+					return -EINVAL;
+				}
+			} else {
+				for (i = 0; i < (command & 0x1fffff); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				}
+			}
+		}
+	}
+	if (command & PACKET3_CP_DMA_CMD_DAS) {
+		/* dst address space is register */
+		if (((info & 0x00300000) >> 20) == 0) {
+			start_reg = ib[idx + 2];
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				reg = start_reg;
+				if (!si_vm_reg_valid(reg)) {
+					DRM_ERROR("CP DMA Bad DST register\n");
+					return -EINVAL;
+				}
+			} else {
+				for (i = 0; i < (command & 0x1fffff); i++) {
+					reg = start_reg + (4 * i);
+				if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				}
+			}
+		}
+	}
+	return 0;
+}
+
 static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
 				   u32 *ib, struct radeon_cs_packet *pkt)
 {
+	int r;
 	u32 idx = pkt->idx + 1;
 	u32 idx_value = ib[idx];
 	u32 start_reg, end_reg, reg, i;
-	u32 command, info;
 
 	switch (pkt->opcode) {
 	case PACKET3_NOP:
@@ -3903,50 +3969,9 @@
 		}
 		break;
 	case PACKET3_CP_DMA:
-		command = ib[idx + 4];
-		info = ib[idx + 1];
-		if (command & PACKET3_CP_DMA_CMD_SAS) {
-			/* src address space is register */
-			if (((info & 0x60000000) >> 29) == 0) {
-				start_reg = idx_value << 2;
-				if (command & PACKET3_CP_DMA_CMD_SAIC) {
-					reg = start_reg;
-					if (!si_vm_reg_valid(reg)) {
-						DRM_ERROR("CP DMA Bad SRC register\n");
-						return -EINVAL;
-					}
-				} else {
-					for (i = 0; i < (command & 0x1fffff); i++) {
-						reg = start_reg + (4 * i);
-						if (!si_vm_reg_valid(reg)) {
-							DRM_ERROR("CP DMA Bad SRC register\n");
-							return -EINVAL;
-						}
-					}
-				}
-			}
-		}
-		if (command & PACKET3_CP_DMA_CMD_DAS) {
-			/* dst address space is register */
-			if (((info & 0x00300000) >> 20) == 0) {
-				start_reg = ib[idx + 2];
-				if (command & PACKET3_CP_DMA_CMD_DAIC) {
-					reg = start_reg;
-					if (!si_vm_reg_valid(reg)) {
-						DRM_ERROR("CP DMA Bad DST register\n");
-						return -EINVAL;
-					}
-				} else {
-					for (i = 0; i < (command & 0x1fffff); i++) {
-						reg = start_reg + (4 * i);
-						if (!si_vm_reg_valid(reg)) {
-							DRM_ERROR("CP DMA Bad DST register\n");
-							return -EINVAL;
-						}
-					}
-				}
-			}
-		}
+		r = si_vm_packet3_cp_dma_check(ib, idx);
+		if (r)
+			return r;
 		break;
 	default:
 		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
@@ -3958,6 +3983,7 @@
 static int si_vm_packet3_compute_check(struct radeon_device *rdev,
 				       u32 *ib, struct radeon_cs_packet *pkt)
 {
+	int r;
 	u32 idx = pkt->idx + 1;
 	u32 idx_value = ib[idx];
 	u32 start_reg, reg, i;
@@ -4030,6 +4056,11 @@
 				return -EINVAL;
 		}
 		break;
+	case PACKET3_CP_DMA:
+		r = si_vm_packet3_cp_dma_check(ib, idx);
+		if (r)
+			return r;
+		break;
 	default:
 		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
 		return -EINVAL;
@@ -5270,6 +5301,8 @@
 	struct radeon_ring *ring;
 	int r;
 
+	si_mc_program(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
 	    !rdev->rlc_fw || !rdev->mc_fw) {
 		r = si_init_microcode(rdev);
@@ -5289,7 +5322,6 @@
 	if (r)
 		return r;
 
-	si_mc_program(rdev);
 	r = si_pcie_gart_enable(rdev);
 	if (r)
 		return r;
@@ -5473,7 +5505,7 @@
 	si_cp_enable(rdev, false);
 	cayman_dma_stop(rdev);
 	if (rdev->has_uvd) {
-		r600_uvd_rbc_stop(rdev);
+		r600_uvd_stop(rdev);
 		radeon_uvd_suspend(rdev);
 	}
 	si_irq_suspend(rdev);
@@ -5613,8 +5645,10 @@
 	radeon_vm_manager_fini(rdev);
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
-	if (rdev->has_uvd)
+	if (rdev->has_uvd) {
+		r600_uvd_stop(rdev);
 		radeon_uvd_fini(rdev);
+	}
 	si_pcie_gart_fini(rdev);
 	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 8f2d7d4..495f41f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -97,6 +97,10 @@
 
 #define DMIF_ADDR_CALC  				0xC00
 
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
 #define	SRBM_STATUS				        0xE50
 #define		GRBM_RQ_PENDING 			(1 << 5)
 #define		VMC_BUSY 				(1 << 8)
@@ -924,7 +928,7 @@
  * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
  */
 #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
-                /* 0 - SRC_ADDR
+                /* 0 - DST_ADDR
 		 * 1 - GDS
 		 */
 #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
@@ -939,7 +943,7 @@
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
 #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
 		 * 1 - 8 in 16
 		 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9b07b7d..461505b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -363,6 +363,7 @@
 	atomic_set(&bo->reserved, 0);
 	wake_up_all(&bo->event_queue);
 }
+EXPORT_SYMBOL(ttm_bo_unreserve_locked);
 
 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
@@ -446,8 +447,7 @@
 
 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 		if (bo->ttm == NULL) {
-			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
-			ret = ttm_bo_add_ttm(bo, zero);
+			ret = ttm_bo_add_ttm(bo, false);
 			if (ret)
 				goto out_err;
 		}
@@ -1619,9 +1619,7 @@
 		goto out_no_sys;
 
 	bdev->addr_space_rb = RB_ROOT;
-	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-	if (unlikely(ret != 0))
-		goto out_no_addr_mm;
+	drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
 
 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
 	INIT_LIST_HEAD(&bdev->ddestroy);
@@ -1635,8 +1633,6 @@
 	mutex_unlock(&glob->device_list_mutex);
 
 	return 0;
-out_no_addr_mm:
-	ttm_bo_clean_mm(bdev, 0);
 out_no_sys:
 	return ret;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9212494..e4367f9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -103,18 +103,12 @@
 			   unsigned long p_size)
 {
 	struct ttm_range_manager *rman;
-	int ret;
 
 	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
 	if (!rman)
 		return -ENOMEM;
 
-	ret = drm_mm_init(&rman->mm, 0, p_size);
-	if (ret) {
-		kfree(rman);
-		return ret;
-	}
-
+	drm_mm_init(&rman->mm, 0, p_size);
 	spin_lock_init(&rman->lock);
 	man->priv = rman;
 	return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52..210d503 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@
 		ttm_tt_unbind(ttm);
 	}
 
-	if (likely(ttm->pages != NULL)) {
+	if (ttm->state == tt_unbound) {
 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 07dfd82..6c44c69 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -740,9 +740,17 @@
 	struct vmw_fpriv *vmw_fp;
 
 	vmw_fp = vmw_fpriv(file_priv);
-	ttm_object_file_release(&vmw_fp->tfile);
-	if (vmw_fp->locked_master)
+
+	if (vmw_fp->locked_master) {
+		struct vmw_master *vmaster =
+			vmw_master(vmw_fp->locked_master);
+
+		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+		ttm_vt_unlock(&vmaster->lock);
 		drm_master_put(&vmw_fp->locked_master);
+	}
+
+	ttm_object_file_release(&vmw_fp->tfile);
 	kfree(vmw_fp);
 }
 
@@ -942,14 +950,13 @@
 
 	vmw_fp->locked_master = drm_master_get(file_priv->master);
 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-	vmw_execbuf_release_pinned_bo(dev_priv);
-
 	if (unlikely((ret != 0))) {
 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
 		drm_master_put(&vmw_fp->locked_master);
 	}
 
-	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	if (!dev_priv->enable_fb) {
 		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730..1a0bf07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
 #include <drm/drmP.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
-#define VMW_PPN_SIZE sizeof(unsigned long)
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
 
 static int vmw_gmr2_bind(struct vmw_private *dev_priv,
 			 struct page *pages[],
@@ -38,43 +40,61 @@
 {
 	SVGAFifoCmdDefineGMR2 define_cmd;
 	SVGAFifoCmdRemapGMR2 remap_cmd;
-	uint32_t define_size = sizeof(define_cmd) + 4;
-	uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
 	uint32_t *cmd;
 	uint32_t *cmd_orig;
+	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+	uint32_t remap_pos = 0;
+	uint32_t cmd_size = define_size + remap_size;
 	uint32_t i;
 
-	cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+	cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
 	if (unlikely(cmd == NULL))
 		return -ENOMEM;
 
 	define_cmd.gmrId = gmr_id;
 	define_cmd.numPages = num_pages;
 
+	*cmd++ = SVGA_CMD_DEFINE_GMR2;
+	memcpy(cmd, &define_cmd, sizeof(define_cmd));
+	cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+	/*
+	 * Need to split the command if there are too many
+	 * pages that goes into the gmr.
+	 */
+
 	remap_cmd.gmrId = gmr_id;
 	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
 		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
-	remap_cmd.offsetPages = 0;
-	remap_cmd.numPages = num_pages;
 
-	*cmd++ = SVGA_CMD_DEFINE_GMR2;
-	memcpy(cmd, &define_cmd, sizeof(define_cmd));
-	cmd += sizeof(define_cmd) / sizeof(uint32);
+	while (num_pages > 0) {
+		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
 
-	*cmd++ = SVGA_CMD_REMAP_GMR2;
-	memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
-	cmd += sizeof(remap_cmd) / sizeof(uint32);
+		remap_cmd.offsetPages = remap_pos;
+		remap_cmd.numPages = nr;
 
-	for (i = 0; i < num_pages; ++i) {
-		if (VMW_PPN_SIZE <= 4)
-			*cmd = page_to_pfn(*pages++);
-		else
-			*((uint64_t *)cmd) = page_to_pfn(*pages++);
+		*cmd++ = SVGA_CMD_REMAP_GMR2;
+		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+		cmd += sizeof(remap_cmd) / sizeof(*cmd);
 
-		cmd += VMW_PPN_SIZE / sizeof(*cmd);
+		for (i = 0; i < nr; ++i) {
+			if (VMW_PPN_SIZE <= 4)
+				*cmd = page_to_pfn(*pages++);
+			else
+				*((uint64_t *)cmd) = page_to_pfn(*pages++);
+
+			cmd += VMW_PPN_SIZE / sizeof(*cmd);
+		}
+
+		num_pages -= nr;
+		remap_pos += nr;
 	}
 
-	vmw_fifo_commit(dev_priv, define_size + remap_size);
+	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+	vmw_fifo_commit(dev_priv, cmd_size);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index bc78425..407d7f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -970,7 +970,7 @@
 	if (new_backup)
 		res->backup_offset = new_backup_offset;
 
-	if (!res->func->may_evict)
+	if (!res->func->may_evict || res->id == -1)
 		return;
 
 	write_lock(&dev_priv->resource_lock);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index fb52f3f..58041bd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -758,6 +758,12 @@
 	  for events and handle data streams. Each sensor driver can format
 	  data and present to user mode using input or IIO interface.
 
+config HID_ANDROIDTV_REMOTE
+	tristate "AndroidTV remote control support"
+	depends on USB_HID
+	---help---
+	Support for AndroidTV remote control.
+
 endmenu
 
 endif # HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 2065694..b95ee1a 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -117,6 +117,7 @@
 obj-$(CONFIG_HID_WALTOP)	+= hid-waltop.o
 obj-$(CONFIG_HID_WIIMOTE)	+= hid-wiimote.o
 obj-$(CONFIG_HID_SENSOR_HUB)	+= hid-sensor-hub.o
+obj-$(CONFIG_HID_ANDROIDTV_REMOTE)	+= hid-atv-remote.o sbcdec.o
 
 obj-$(CONFIG_USB_HID)		+= usbhid/
 obj-$(CONFIG_USB_MOUSE)		+= usbhid/
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index feae88b..c7710b5 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -524,6 +524,12 @@
 		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
+		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-atv-remote.c b/drivers/hid/hid-atv-remote.c
new file mode 100644
index 0000000..e08086b
--- /dev/null
+++ b/drivers/hid/hid-atv-remote.c
@@ -0,0 +1,1520 @@
+/*
+ *  HID driver for the Android TV remote
+ *  providing keys and microphone audio functionality
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/hardirq.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/switch.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/info.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#include "hid-ids.h"
+#include "sbcdec.h"
+
+MODULE_LICENSE("GPL v2");
+
+#define snd_atvr_log(...) pr_info("snd_atvr: " __VA_ARGS__)
+
+/* These values are copied from Android WiredAccessoryObserver */
+enum headset_state {
+	BIT_NO_HEADSET = 0,
+	BIT_HEADSET = (1 << 0),
+	BIT_HEADSET_NO_MIC = (1 << 1),
+};
+
+/* This has to be static and created at init/boot, or else Android
+ * WiredAccessoryManager won't watch for it since it only checks
+ * for it's existence once on boot.
+ */
+static struct switch_dev h2w_switch = {
+	.name = "h2w",
+};
+
+#define ADPCM_AUDIO_REPORT_ID 30
+
+#define MSBC_AUDIO1_REPORT_ID 0xF7
+#define MSBC_AUDIO2_REPORT_ID 0xFA
+#define MSBC_AUDIO3_REPORT_ID 0xFB
+
+#define INPUT_REPORT_ID 2
+
+#define KEYCODE_PRESENT_IN_AUDIO_PACKET_FLAG 0x80
+
+/* defaults */
+#define MAX_PCM_DEVICES     1
+#define MAX_PCM_SUBSTREAMS  4
+#define MAX_MIDI_DEVICES    0
+
+/* Define these all in one place so they stay in sync. */
+#define USE_RATE_MIN          8000
+#define USE_RATE_MAX          8000
+#define USE_RATES_ARRAY      {USE_RATE_MIN}
+#define USE_RATES_MASK       (SNDRV_PCM_RATE_8000)
+
+#define MAX_FRAMES_PER_BUFFER  (8192)
+
+#define USE_CHANNELS_MIN   1
+#define USE_CHANNELS_MAX   1
+#define USE_PERIODS_MIN    1
+#define USE_PERIODS_MAX    1024
+
+#define MAX_PCM_BUFFER_SIZE  (MAX_FRAMES_PER_BUFFER * sizeof(int16_t))
+#define MIN_PERIOD_SIZE      64
+#define MAX_PERIOD_SIZE      (MAX_PCM_BUFFER_SIZE / 8)
+#define USE_FORMATS          (SNDRV_PCM_FMTBIT_S16_LE)
+
+#define PACKET_TYPE_ADPCM 0
+#define PACKET_TYPE_MSBC  1
+
+
+/* Normally SBC has a H2 header but because we want
+ * to embed keycode support while audio is active without
+ * incuring an additional packet in the connection interval,
+ * we only use half the H2 header.  A normal H2 header has
+ * a 12-bit synchronization word and a 2-bit sequence number
+ * (SN0, SN1).  The sequence number is duplicated, so each
+ * pair of bits in the sequence number shall be always 00
+ * or 11 (see 5.7.2 of HFP_SPEC_V16).  We only receive
+ * the second byte of the H2 header that has the latter part
+ * of the sync word and the entire sequence number.
+ *
+ *  0      70      7
+ * b100000000001XXYY - where X is SN0 repeated and Y is SN1 repeated
+ *
+ * So the sequence numbers are:
+ * b1000000000010000 - 0x01 0x08  - only the 0x08 is received
+ * b1000000000011100 - 0x01 0x38  - only the 0x38 is received
+ * b1000000000010011 - 0x01 0xc8  - only the 0xc8 is received
+ * b1000000000011111 - 0x01 0xf8  - only the 0xf8 is received
+ *
+ * Each mSBC frame is split over 3 BLE frames, where each BLE packet has
+ * a 20 byte payload.
+ * The first BLE packet has the format:
+ * byte 0: keycode LSB
+ * byte 1: keycode MSB, with most significant bit 0 for no key
+ *         code active and 1 if keycode is active
+ * byte 2: Second byte of H2
+ * bytes 3-19: then four byte SBC header, then 13 bytes of audio data
+ *
+ * The second and third packet are purely 20 bytes of audio
+ * data.  Second packet arrives on report 0xFA and third packet
+ * arrives on report 0xFB.
+ *
+ * The mSBC decoder works on a mSBC frame, including the four byte SBC header,
+ * so we have to accumulate 3 BLE packets before sending it to the decoder.
+ */
+#define NUM_SEQUENCES 4
+const uint8_t mSBC_sequence_table[NUM_SEQUENCES] = {0x08, 0x38, 0xc8, 0xf8};
+#define BLE_PACKETS_PER_MSBC_FRAME 3
+#define MSBC_PACKET1_BYTES 17
+#define MSBC_PACKET2_BYTES 20
+#define MSBC_PACKET3_BYTES 20
+
+#define BYTES_PER_MSBC_FRAME \
+      (MSBC_PACKET1_BYTES + MSBC_PACKET2_BYTES + MSBC_PACKET3_BYTES)
+
+const uint8_t mSBC_start_offset_in_packet[BLE_PACKETS_PER_MSBC_FRAME] = {
+	1, /* SBC header starts after 1 byte sequence num portion of H2 */
+	0,
+	0
+};
+const uint8_t mSBC_start_offset_in_buffer[BLE_PACKETS_PER_MSBC_FRAME] = {
+	0,
+	MSBC_PACKET1_BYTES,
+	MSBC_PACKET1_BYTES + MSBC_PACKET2_BYTES
+};
+const uint8_t mSBC_bytes_in_packet[BLE_PACKETS_PER_MSBC_FRAME] = {
+	MSBC_PACKET1_BYTES, /* includes the SBC header but not the sequence num or keycode */
+	MSBC_PACKET2_BYTES,
+	MSBC_PACKET3_BYTES
+};
+
+struct fifo_packet {
+	uint8_t  type;
+	uint8_t  num_bytes;
+	/* Expect no more than 20 bytes. But align struct size to power of 2. */
+	uint8_t  raw_data[30];
+};
+
+#define MAX_SAMPLES_PER_PACKET 128
+#define MIN_SAMPLES_PER_PACKET_P2  32
+#define MAX_PACKETS_PER_BUFFER  \
+		(MAX_FRAMES_PER_BUFFER / MIN_SAMPLES_PER_PACKET_P2)
+#define MAX_BUFFER_SIZE  \
+		(MAX_PACKETS_PER_BUFFER * sizeof(struct fifo_packet))
+
+#define SND_ATVR_RUNNING_TIMEOUT_MSEC    (500)
+
+
+#define TIMER_STATE_BEFORE_DECODE    0
+#define TIMER_STATE_DURING_DECODE    1
+#define TIMER_STATE_AFTER_DECODE     2
+
+static int packet_counter;
+static int num_remotes;
+static bool card_created = false;
+static int dev;
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;  /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;   /* ID for this card */
+static bool enable[SNDRV_CARDS] = {true, false};
+/* Linux does not like NULL initialization. */
+static char *model[SNDRV_CARDS]; /* = {[0 ... (SNDRV_CARDS - 1)] = NULL}; */
+static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
+static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for AndroidTV Remote soundcard.");
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string for AndroidTV Remote soundcard.");
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable this AndroidTV Remote soundcard.");
+module_param_array(model, charp, NULL, 0444);
+MODULE_PARM_DESC(model, "Soundcard model.");
+module_param_array(pcm_devs, int, NULL, 0444);
+MODULE_PARM_DESC(pcm_devs, "PCM devices # (0-4) for AndroidTV Remote driver.");
+module_param_array(pcm_substreams, int, NULL, 0444);
+MODULE_PARM_DESC(pcm_substreams,
+	"PCM substreams # (1-128) for AndroidTV Remote driver?");
+
+/* Debug feature to save captured raw and decoded audio into buffers
+ * and make them available for reading from misc devices.
+ * It will record the last session only and only up to the buffer size.
+ * The recording is cleared on read.
+ */
+#define DEBUG_WITH_MISC_DEVICE 0
+
+/* Debug feature to trace audio packets being received */
+#define DEBUG_AUDIO_RECEPTION 1
+
+/* Debug feature to trace HID reports we see */
+#define DEBUG_HID_RAW_INPUT 0
+
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+static int16_t large_pcm_buffer[1280*1024];
+static int large_pcm_index;
+
+static struct miscdevice pcm_dev_node;
+static int pcm_dev_open(struct inode *inode, struct file *file)
+{
+	/* nothing special to do here right now. */
+	return 0;
+}
+
+static ssize_t pcm_dev_read(struct file *file, char __user *buffer,
+			    size_t count, loff_t *ppos)
+{
+	const uint8_t *data = (const uint8_t *)large_pcm_buffer;
+	size_t bytes_left = large_pcm_index * sizeof(int16_t) - *ppos;
+	if (count > bytes_left)
+		count = bytes_left;
+	if (copy_to_user(buffer, &data[*ppos], count))
+		return -EFAULT;
+
+	*ppos += count;
+	return count;
+}
+
+static const struct file_operations pcm_fops = {
+	.owner = THIS_MODULE,
+	.open = pcm_dev_open,
+	.llseek = no_llseek,
+	.read = pcm_dev_read,
+};
+
+static uint8_t raw_adpcm_buffer[640*1024];
+static int raw_adpcm_index;
+static struct miscdevice adpcm_dev_node;
+static int adpcm_dev_open(struct inode *inode, struct file *file)
+{
+	/* nothing special to do here right now. */
+	return 0;
+}
+
+static ssize_t adpcm_dev_read(struct file *file, char __user *buffer,
+			  size_t count, loff_t *ppos)
+{
+	size_t bytes_left = raw_adpcm_index - *ppos;
+	if (count > bytes_left)
+		count = bytes_left;
+	if (copy_to_user(buffer, &raw_adpcm_buffer[*ppos], count))
+		return -EFAULT;
+
+	*ppos += count;
+	return count;
+}
+
+static const struct file_operations adpcm_fops = {
+	.owner = THIS_MODULE,
+	.open = adpcm_dev_open,
+	.llseek = no_llseek,
+	.read = adpcm_dev_read,
+};
+
+static uint8_t raw_mSBC_buffer[640*1024];
+static int raw_mSBC_index;
+static struct miscdevice mSBC_dev_node;
+static int mSBC_dev_open(struct inode *inode, struct file *file)
+{
+	/* nothing special to do here right now. */
+	return 0;
+}
+
+static ssize_t mSBC_dev_read(struct file *file, char __user *buffer,
+			  size_t count, loff_t *ppos)
+{
+	size_t bytes_left = raw_mSBC_index - *ppos;
+	if (count > bytes_left)
+		count = bytes_left;
+	if (copy_to_user(buffer, &raw_mSBC_buffer[*ppos], count))
+		return -EFAULT;
+
+	*ppos += count;
+	return count;
+}
+
+static const struct file_operations mSBC_fops = {
+	.owner = THIS_MODULE,
+	.open = mSBC_dev_open,
+	.llseek = no_llseek,
+	.read = mSBC_dev_read,
+};
+
+#endif
+
+/*
+ * Static substream is needed so Bluetooth can pass encoded audio
+ * to a running stream.
+ * This also serves to enable or disable the decoding of audio in the callback.
+ */
+static struct snd_pcm_substream *s_substream_for_btle;
+static DEFINE_SPINLOCK(s_substream_lock);
+
+struct simple_atomic_fifo {
+	/* Read and write cursors are modified by different threads. */
+	uint read_cursor;
+	uint write_cursor;
+	/* Size must be a power of two. */
+	uint size;
+	/* internal mask is 2*size - 1
+	 * This allows us to tell the difference between full and empty. */
+	uint internal_mask;
+	uint external_mask;
+};
+
+struct snd_atvr {
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	struct snd_pcm_hardware pcm_hw;
+
+	uint32_t sample_rate;
+
+	uint previous_jiffies; /* Used to detect underflows. */
+	uint timeout_jiffies;
+	struct timer_list decoding_timer;
+	uint timer_state;
+	bool timer_enabled;
+	uint timer_callback_count;
+
+	int16_t peak_level;
+	struct simple_atomic_fifo fifo_controller;
+	struct fifo_packet *fifo_packet_buffer;
+
+	/* IMA/DVI ADPCM Decoder */
+	int pcm_value;
+	int step_index;
+	bool first_packet;
+
+	/* mSBC decoder */
+	uint8_t mSBC_frame_data[BYTES_PER_MSBC_FRAME];
+	int16_t audio_output[MAX_SAMPLES_PER_PACKET];
+	uint8_t packet_in_frame;
+	uint8_t seq_index;
+
+	/*
+	 * Write_index is the circular buffer position.
+	 * It is advanced by the BTLE thread after decoding.
+	 * It is read by ALSA in snd_atvr_pcm_pointer().
+	 * It is not declared volatile because that is not
+	 * allowed in the Linux kernel.
+	 */
+	uint32_t write_index;
+	uint32_t frames_per_buffer;
+	/* count frames generated so far in this period */
+	uint32_t frames_in_period;
+	int16_t *pcm_buffer;
+
+};
+
+/***************************************************************************/
+/************* Atomic FIFO *************************************************/
+/***************************************************************************/
+/*
+ * This FIFO is atomic if used by no more than 2 threads.
+ * One thread modifies the read cursor and the other
+ * thread modifies the write_cursor.
+ * Size and mask are not modified while being used.
+ *
+ * The read and write cursors range internally from 0 to (2*size)-1.
+ * This allows us to tell the difference between full and empty.
+ * When we get the cursors for external use we mask with size-1.
+ *
+ * Memory barriers required on SMP platforms.
+ */
+static int atomic_fifo_init(struct simple_atomic_fifo *fifo_ptr, uint size)
+{
+	/* Make sure size is a power of 2. */
+	if ((size & (size-1)) != 0) {
+		pr_err("%s:%d - ERROR FIFO size = %d, not power of 2!\n",
+			__func__, __LINE__, size);
+		return -EINVAL;
+	}
+	fifo_ptr->read_cursor = 0;
+	fifo_ptr->write_cursor = 0;
+	fifo_ptr->size = size;
+	fifo_ptr->internal_mask = (size * 2) - 1;
+	fifo_ptr->external_mask = size - 1;
+	smp_wmb();
+	return 0;
+}
+
+
+static uint atomic_fifo_available_to_read(struct simple_atomic_fifo *fifo_ptr)
+{
+	smp_rmb();
+	return (fifo_ptr->write_cursor - fifo_ptr->read_cursor)
+			& fifo_ptr->internal_mask;
+}
+
+static uint atomic_fifo_available_to_write(struct simple_atomic_fifo *fifo_ptr)
+{
+	smp_rmb();
+	return fifo_ptr->size - atomic_fifo_available_to_read(fifo_ptr);
+}
+
+static void atomic_fifo_advance_read(
+		struct simple_atomic_fifo *fifo_ptr,
+		uint frames)
+{
+	smp_rmb();
+	BUG_ON(frames > atomic_fifo_available_to_read(fifo_ptr));
+	fifo_ptr->read_cursor = (fifo_ptr->read_cursor + frames)
+			& fifo_ptr->internal_mask;
+	smp_wmb();
+}
+
+static void atomic_fifo_advance_write(
+		struct simple_atomic_fifo *fifo_ptr,
+		uint frames)
+{
+	smp_rmb();
+	BUG_ON(frames > atomic_fifo_available_to_write(fifo_ptr));
+	fifo_ptr->write_cursor = (fifo_ptr->write_cursor + frames)
+		& fifo_ptr->internal_mask;
+	smp_wmb();
+}
+
+static uint atomic_fifo_get_read_index(struct simple_atomic_fifo *fifo_ptr)
+{
+	smp_rmb();
+	return fifo_ptr->read_cursor & fifo_ptr->external_mask;
+}
+
+static uint atomic_fifo_get_write_index(struct simple_atomic_fifo *fifo_ptr)
+{
+	smp_rmb();
+	return fifo_ptr->write_cursor & fifo_ptr->external_mask;
+}
+
+/****************************************************************************/
+static void snd_atvr_handle_frame_advance(
+		struct snd_pcm_substream *substream, uint num_frames)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	atvr_snd->frames_in_period += num_frames;
+	/* Tell ALSA if we have advanced by one or more periods. */
+	if (atvr_snd->frames_in_period >= substream->runtime->period_size) {
+		snd_pcm_period_elapsed(substream);
+		atvr_snd->frames_in_period %= substream->runtime->period_size;
+	}
+}
+
+static uint32_t snd_atvr_bump_write_index(
+			struct snd_pcm_substream *substream,
+			uint32_t num_samples)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	uint32_t pos = atvr_snd->write_index;
+
+	/* Advance write position. */
+	pos += num_samples;
+	/* Wrap around at end of the circular buffer. */
+	pos %= atvr_snd->frames_per_buffer;
+	atvr_snd->write_index = pos;
+
+	snd_atvr_handle_frame_advance(substream, num_samples);
+
+	return pos;
+}
+
+/*
+ * Decode an IMA/DVI ADPCM packet and write the PCM data into a circular buffer.
+ * ADPCM is 4:1 16kHz@256kbps -> 16kHz@64kbps.
+ * ADPCM is 4:1 8kHz@128kbps -> 8kHz@32kbps.
+ */
+static const int ima_index_table[16] = {
+	-1, -1, -1, -1, /* +0 - +3, decrease the step size */
+	2, 4, 6, 8,     /* +4 - +7, increase the step size */
+	-1, -1, -1, -1, /* -0 - -3, decrease the step size */
+	2, 4, 6, 8      /* -4 - -7, increase the step size */
+};
+static const int16_t ima_step_table[89] = {
+	7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
+	19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
+	50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
+	130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
+	337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
+	876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
+	2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
+	5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
+	15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
+};
+
+static void decode_adpcm_nibble(uint8_t nibble, struct snd_atvr *atvr_snd,
+				struct snd_pcm_substream *substream)
+{
+	int step_index = atvr_snd->step_index;
+	int value = atvr_snd->pcm_value;
+	int step = ima_step_table[step_index];
+	int diff;
+
+	diff = step >> 3;
+	if (nibble & 1)
+		diff += (step >> 2);
+	if (nibble & 2)
+		diff += (step >> 1);
+	if (nibble & 4)
+		diff += step;
+
+	if (nibble & 8) {
+		value -= diff;
+		if (value < -32768)
+			value = -32768;
+	} else {
+		value += diff;
+		if (value > 32767)
+			value = 32767;
+	}
+	atvr_snd->pcm_value = value;
+
+	/* copy to stream */
+	atvr_snd->pcm_buffer[atvr_snd->write_index] = value;
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+	if (large_pcm_index < ARRAY_SIZE(large_pcm_buffer))
+		large_pcm_buffer[large_pcm_index++] = value;
+#endif
+	snd_atvr_bump_write_index(substream, 1);
+	if (value > atvr_snd->peak_level)
+		atvr_snd->peak_level = value;
+
+	/* update step_index */
+	step_index += ima_index_table[nibble];
+	/* clamp step_index */
+	if (step_index < 0)
+		step_index = 0;
+	else if (step_index >= ARRAY_SIZE(ima_step_table))
+		step_index = ARRAY_SIZE(ima_step_table) - 1;
+	atvr_snd->step_index = step_index;
+}
+
+static int snd_atvr_decode_adpcm_packet(
+			struct snd_pcm_substream *substream,
+			const uint8_t *adpcm_input,
+			size_t num_bytes
+			)
+{
+	uint i;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+
+	/* Decode IMA ADPCM data to PCM. */
+	if (atvr_snd->first_packet) {
+		/* the first two bytes of the first packet
+		 * is the unencoded first 16-bit sample, high
+		 * byte first.
+		 */
+		int value = ((int)adpcm_input[0] << 8) | adpcm_input[1];
+		pr_info("%s: first packet, initial value is %d (0x%x, 0x%x)\n",
+			__func__, value, adpcm_input[0], adpcm_input[1]);
+		atvr_snd->pcm_value = value;
+		atvr_snd->pcm_buffer[atvr_snd->write_index] = value;
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+		if (raw_adpcm_index < ARRAY_SIZE(raw_adpcm_buffer))
+			raw_adpcm_buffer[raw_adpcm_index++] = adpcm_input[0];
+		if (raw_adpcm_index < ARRAY_SIZE(raw_adpcm_buffer))
+			raw_adpcm_buffer[raw_adpcm_index++] = adpcm_input[1];
+		if (large_pcm_index < ARRAY_SIZE(large_pcm_buffer))
+			large_pcm_buffer[large_pcm_index++] = value;
+#endif
+		snd_atvr_bump_write_index(substream, 1);
+		atvr_snd->peak_level = value;
+		atvr_snd->first_packet = false;
+		i = 2;
+	} else {
+		i = 0;
+	}
+
+	for (; i < num_bytes; i++) {
+		uint8_t raw = adpcm_input[i];
+		uint8_t nibble;
+
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+		if (raw_adpcm_index < ARRAY_SIZE(raw_adpcm_buffer))
+			raw_adpcm_buffer[raw_adpcm_index++] = raw;
+#endif
+
+		/* process first nibble */
+		nibble = (raw >> 4) & 0x0f;
+		decode_adpcm_nibble(nibble, atvr_snd, substream);
+
+		/* process second nibble */
+		nibble = raw & 0x0f;
+		decode_adpcm_nibble(nibble, atvr_snd, substream);
+	}
+
+	return num_bytes * 2;
+}
+
+/*
+ * Decode an mSBC packet and write the PCM data into a circular buffer.
+ * mSBC is supposed to be 16KHz but this is a 8KHz variant version.
+ */
+#define BLOCKS_PER_PACKET 15
+#define NUM_BITS 26
+
+static int snd_atvr_decode_8KHz_mSBC_packet(
+			struct snd_pcm_substream *substream,
+			const uint8_t *sbc_input,
+			size_t num_bytes
+			)
+{
+	uint num_samples = 0;
+	uint remaining;
+	uint i;
+	uint32_t pos;
+	uint read_index;
+	uint write_index;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+
+	/* Decode mSBC data to PCM. */
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+	for (i = 0; i < num_bytes; i++) {
+		if (raw_mSBC_index < ARRAY_SIZE(raw_mSBC_buffer))
+			raw_mSBC_buffer[raw_mSBC_index++] = sbc_input[i];
+		else
+			break;
+	}
+#endif
+	if (atvr_snd->packet_in_frame == 0) {
+		if (sbc_input[0] != mSBC_sequence_table[atvr_snd->seq_index]) {
+			snd_atvr_log("sequence_num err, 0x%02x != 0x%02x\n",
+				     sbc_input[1],
+				     mSBC_sequence_table[atvr_snd->seq_index]);
+			return 0;
+		}
+		atvr_snd->seq_index++;
+		if (atvr_snd->seq_index == NUM_SEQUENCES)
+			atvr_snd->seq_index = 0;
+
+		/* subtract the sequence number */
+		num_bytes--;
+	}
+	if (num_bytes != mSBC_bytes_in_packet[atvr_snd->packet_in_frame]) {
+		pr_err("%s: received %zd audio bytes but expected %d bytes\n",
+		       __func__, num_bytes,
+		       mSBC_bytes_in_packet[atvr_snd->packet_in_frame]);
+		return 0;
+	}
+	write_index = mSBC_start_offset_in_buffer[atvr_snd->packet_in_frame];
+	read_index = mSBC_start_offset_in_packet[atvr_snd->packet_in_frame];
+	memcpy(&atvr_snd->mSBC_frame_data[write_index],
+	       &sbc_input[read_index],
+	       mSBC_bytes_in_packet[atvr_snd->packet_in_frame]);
+	atvr_snd->packet_in_frame++;
+	if (atvr_snd->packet_in_frame < BLE_PACKETS_PER_MSBC_FRAME) {
+		/* we don't have a complete mSBC frame yet, just return */
+		return 0;
+	}
+	/* reset for next mSBC frame */
+	atvr_snd->packet_in_frame = 0;
+
+	/* we have a complete mSBC frame, send it to the decoder */
+	num_samples = sbc_decode(BLOCKS_PER_PACKET, NUM_BITS,
+				 atvr_snd->mSBC_frame_data,
+				 BYTES_PER_MSBC_FRAME,
+				 &atvr_snd->audio_output[0]);
+
+	/* Write PCM data to the buffer. */
+	pos = atvr_snd->write_index;
+	read_index = 0;
+	if ((pos + num_samples) > atvr_snd->frames_per_buffer) {
+		for (i = pos; i < atvr_snd->frames_per_buffer; i++) {
+			int16_t sample = atvr_snd->audio_output[read_index++];
+			if (sample > atvr_snd->peak_level)
+				atvr_snd->peak_level = sample;
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+			if (large_pcm_index < ARRAY_SIZE(large_pcm_buffer))
+				large_pcm_buffer[large_pcm_index++] = sample;
+#endif
+			atvr_snd->pcm_buffer[i] = sample;
+		}
+
+		remaining = (pos + num_samples) - atvr_snd->frames_per_buffer;
+		for (i = 0; i < remaining; i++) {
+			int16_t sample = atvr_snd->audio_output[read_index++];
+			if (sample > atvr_snd->peak_level)
+				atvr_snd->peak_level = sample;
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+			if (large_pcm_index < ARRAY_SIZE(large_pcm_buffer))
+				large_pcm_buffer[large_pcm_index++] = sample;
+#endif
+
+			atvr_snd->pcm_buffer[i] = sample;
+		}
+
+	} else {
+		for (i = 0; i < num_samples; i++) {
+			int16_t sample = atvr_snd->audio_output[read_index++];
+			if (sample > atvr_snd->peak_level)
+				atvr_snd->peak_level = sample;
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+			if (large_pcm_index < ARRAY_SIZE(large_pcm_buffer))
+				large_pcm_buffer[large_pcm_index++] = sample;
+#endif
+			atvr_snd->pcm_buffer[i + pos] = sample;
+		}
+	}
+
+	snd_atvr_bump_write_index(substream, num_samples);
+
+	return num_samples;
+}
+
+/**
+ * This is called by the event filter when it gets an audio packet
+ * from the AndroidTV remote.  It writes the packet into a FIFO
+ * which is then read and decoded by the timer task.
+ * @param input pointer to data to be decoded
+ * @param num_bytes how many bytes in raw_input
+ * @return number of samples decoded or negative error.
+ */
+static void audio_dec(const uint8_t *raw_input, int type, size_t num_bytes)
+{
+	bool dropped_packet = false;
+	struct snd_pcm_substream *substream;
+
+	spin_lock(&s_substream_lock);
+	substream = s_substream_for_btle;
+	if (substream != NULL) {
+		struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+		/* Write data to a FIFO for decoding by the timer task. */
+		uint writable = atomic_fifo_available_to_write(
+			&atvr_snd->fifo_controller);
+		if (writable > 0) {
+			uint fifo_index = atomic_fifo_get_write_index(
+				&atvr_snd->fifo_controller);
+			struct fifo_packet *packet =
+				&atvr_snd->fifo_packet_buffer[fifo_index];
+			packet->type = type;
+			packet->num_bytes = (uint8_t)num_bytes;
+			memcpy(packet->raw_data, raw_input, num_bytes);
+			atomic_fifo_advance_write(
+				&atvr_snd->fifo_controller, 1);
+		} else {
+			dropped_packet = true;
+			s_substream_for_btle = NULL; /* Stop decoding. */
+		}
+	}
+	packet_counter++;
+	spin_unlock(&s_substream_lock);
+
+	if (dropped_packet)
+		snd_atvr_log("WARNING, raw audio packet dropped, FIFO full\n");
+}
+
+/*
+ * Note that smp_rmb() is called by snd_atvr_timer_callback()
+ * before calling this function.
+ *
+ * Reads:
+ *    jiffies
+ *    atvr_snd->previous_jiffies
+ * Writes:
+ *    atvr_snd->previous_jiffies
+ * Returns:
+ *    num_frames needed to catch up to the current time
+ */
+static uint snd_atvr_calc_frame_advance(struct snd_atvr *atvr_snd)
+{
+	/* Determine how much time passed. */
+	uint now_jiffies = jiffies;
+	uint elapsed_jiffies = now_jiffies - atvr_snd->previous_jiffies;
+	/* Convert jiffies to frames. */
+	uint frames_by_time = jiffies_to_msecs(elapsed_jiffies)
+		* atvr_snd->sample_rate / 1000;
+	atvr_snd->previous_jiffies = now_jiffies;
+
+	/* Don't write more than one buffer full. */
+	if (frames_by_time > (atvr_snd->frames_per_buffer - 4))
+		frames_by_time  = atvr_snd->frames_per_buffer - 4;
+
+	return frames_by_time;
+}
+
+/* Write zeros into the PCM buffer. */
+static uint32_t snd_atvr_write_silence(struct snd_atvr *atvr_snd,
+			uint32_t pos,
+			int frames_to_advance)
+{
+	/* Does it wrap? */
+	if ((pos + frames_to_advance) > atvr_snd->frames_per_buffer) {
+		/* Write to end of buffer. */
+		int16_t *destination = &atvr_snd->pcm_buffer[pos];
+		size_t num_frames = atvr_snd->frames_per_buffer - pos;
+		size_t num_bytes = num_frames * sizeof(int16_t);
+		memset(destination, 0, num_bytes);
+		/* Write from start of buffer to new pos. */
+		destination = &atvr_snd->pcm_buffer[0];
+		num_frames = frames_to_advance - num_frames;
+		num_bytes = num_frames * sizeof(int16_t);
+		memset(destination, 0, num_bytes);
+	} else {
+		/* Write within the buffer. */
+		int16_t *destination = &atvr_snd->pcm_buffer[pos];
+		size_t num_bytes = frames_to_advance * sizeof(int16_t);
+		memset(destination, 0, num_bytes);
+	}
+	/* Advance and wrap write_index */
+	pos += frames_to_advance;
+	pos %= atvr_snd->frames_per_buffer;
+	return pos;
+}
+
+/*
+ * Called by timer task to decode raw audio data from the FIFO into the PCM
+ * buffer.  Returns the number of packets decoded.
+ */
+static uint snd_atvr_decode_from_fifo(struct snd_pcm_substream *substream)
+{
+	uint i;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	uint readable = atomic_fifo_available_to_read(
+		&atvr_snd->fifo_controller);
+	for (i = 0; i < readable; i++) {
+		uint fifo_index = atomic_fifo_get_read_index(
+			&atvr_snd->fifo_controller);
+		struct fifo_packet *packet =
+			&atvr_snd->fifo_packet_buffer[fifo_index];
+		if (packet->type == PACKET_TYPE_ADPCM) {
+			snd_atvr_decode_adpcm_packet(substream,
+						     packet->raw_data,
+						     packet->num_bytes);
+		} else if (packet->type == PACKET_TYPE_MSBC) {
+			snd_atvr_decode_8KHz_mSBC_packet(substream,
+							 packet->raw_data,
+							 packet->num_bytes);
+		} else {
+			pr_err("Unknown packet type %d\n", packet->type);
+		}
+
+		atomic_fifo_advance_read(&atvr_snd->fifo_controller, 1);
+	}
+	return readable;
+}
+
+static int snd_atvr_schedule_timer(struct snd_pcm_substream *substream)
+{
+	int ret;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	uint msec_to_sleep = (substream->runtime->period_size * 1000)
+			/ atvr_snd->sample_rate;
+	uint jiffies_to_sleep = msecs_to_jiffies(msec_to_sleep);
+	if (jiffies_to_sleep < 2)
+		jiffies_to_sleep = 2;
+	ret = mod_timer(&atvr_snd->decoding_timer, jiffies + jiffies_to_sleep);
+	if (ret < 0)
+		pr_err("%s:%d - ERROR in mod_timer, ret = %d\n",
+			   __func__, __LINE__, ret);
+	return ret;
+}
+
+static void snd_atvr_timer_callback(unsigned long data)
+{
+	uint readable;
+	uint packets_read;
+	bool need_silence = false;
+	struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+
+	/* timer_enabled will be false when stopping a stream. */
+	smp_rmb();
+	if (!atvr_snd->timer_enabled)
+		return;
+	atvr_snd->timer_callback_count++;
+
+	switch (atvr_snd->timer_state) {
+	case TIMER_STATE_BEFORE_DECODE:
+		readable = atomic_fifo_available_to_read(
+				&atvr_snd->fifo_controller);
+		if (readable > 0) {
+			atvr_snd->timer_state = TIMER_STATE_DURING_DECODE;
+			/* Fall through into next state. */
+		} else {
+			need_silence = true;
+			break;
+		}
+
+	case TIMER_STATE_DURING_DECODE:
+		packets_read = snd_atvr_decode_from_fifo(substream);
+		if (packets_read > 0) {
+			/* Defer timeout */
+			atvr_snd->previous_jiffies = jiffies;
+			break;
+		}
+		if (s_substream_for_btle == NULL) {
+			atvr_snd->timer_state = TIMER_STATE_AFTER_DECODE;
+			/* Decoder died. Overflowed?
+			 * Fall through into next state. */
+		} else if ((jiffies - atvr_snd->previous_jiffies) >
+			   atvr_snd->timeout_jiffies) {
+			snd_atvr_log("audio UNDERFLOW detected\n");
+			/*  Not fatal.  Reset timeout. */
+			atvr_snd->previous_jiffies = jiffies;
+			break;
+		} else
+			break;
+
+	case TIMER_STATE_AFTER_DECODE:
+		need_silence = true;
+		break;
+	}
+
+	/* Write silence before and after decoding. */
+	if (need_silence) {
+		uint frames_to_silence = snd_atvr_calc_frame_advance(atvr_snd);
+		atvr_snd->write_index = snd_atvr_write_silence(
+				atvr_snd,
+				atvr_snd->write_index,
+				frames_to_silence);
+		/* This can cause snd_atvr_pcm_trigger() to be called, which
+		 * may try to stop the timer. */
+		snd_atvr_handle_frame_advance(substream, frames_to_silence);
+	}
+
+	smp_rmb();
+	if (atvr_snd->timer_enabled)
+		snd_atvr_schedule_timer(substream);
+}
+
+static void snd_atvr_timer_start(struct snd_pcm_substream *substream)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	atvr_snd->timer_enabled = true;
+	atvr_snd->previous_jiffies = jiffies;
+	atvr_snd->timeout_jiffies =
+		msecs_to_jiffies(SND_ATVR_RUNNING_TIMEOUT_MSEC);
+	atvr_snd->timer_callback_count = 0;
+	smp_wmb();
+	setup_timer(&atvr_snd->decoding_timer,
+		snd_atvr_timer_callback,
+		(unsigned long)substream);
+
+	snd_atvr_schedule_timer(substream);
+}
+
+static void snd_atvr_timer_stop(struct snd_pcm_substream *substream)
+{
+	int ret;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+
+	/* Tell timer function not to reschedule itself if it runs. */
+	atvr_snd->timer_enabled = false;
+	smp_wmb();
+	if (!in_interrupt()) {
+		/* del_timer_sync will hang if called in the timer callback. */
+		ret = del_timer_sync(&atvr_snd->decoding_timer);
+		if (ret < 0)
+			pr_err("%s:%d - ERROR del_timer_sync failed, %d\n",
+				__func__, __LINE__, ret);
+	}
+	/*
+	 * Else if we are in an interrupt then we are being called from the
+	 * middle of the snd_atvr_timer_callback(). The timer will not get
+	 * rescheduled because atvr_snd->timer_enabled will be false
+	 * at the end of snd_atvr_timer_callback().
+	 * We do not need to "delete" the timer.
+	 * The del_timer functions just cancel pending timers.
+	 * There are no resources that need to be cleaned up.
+	 */
+}
+
+/* ===================================================================== */
+/*
+ * PCM interface
+ */
+
+static int snd_atvr_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		snd_atvr_log("%s starting audio\n", __func__);
+
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+		large_pcm_index = 0;
+		raw_adpcm_index = 0;
+		raw_mSBC_index = 0;
+#endif
+		packet_counter = 0;
+		atvr_snd->peak_level = -32768;
+		atvr_snd->previous_jiffies = jiffies;
+		atvr_snd->timer_state = TIMER_STATE_BEFORE_DECODE;
+
+		/* ADPCM decoder state */
+		atvr_snd->step_index = 0;
+		atvr_snd->pcm_value = 0;
+		atvr_snd->first_packet = true;
+
+		/* mSBC decoder */
+		atvr_snd->packet_in_frame = 0;
+		atvr_snd->seq_index = 0;
+
+		snd_atvr_timer_start(substream);
+		 /* Enables callback from BTLE driver. */
+		s_substream_for_btle = substream;
+		smp_wmb(); /* so other thread will see s_substream_for_btle */
+		return 0;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		snd_atvr_log("%s stopping audio, peak = %d, # packets = %d\n",
+			__func__, atvr_snd->peak_level, packet_counter);
+
+		s_substream_for_btle = NULL;
+		smp_wmb(); /* so other thread will see s_substream_for_btle */
+		snd_atvr_timer_stop(substream);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int snd_atvr_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	snd_atvr_log("%s, rate = %d, period_size = %d, buffer_size = %d\n",
+		__func__, (int) runtime->rate,
+		(int) runtime->period_size,
+		(int) runtime->buffer_size);
+
+	if (runtime->buffer_size > MAX_FRAMES_PER_BUFFER)
+		return -EINVAL;
+
+	atvr_snd->sample_rate = runtime->rate;
+	atvr_snd->frames_per_buffer = runtime->buffer_size;
+
+	return 0; /* TODO - review */
+}
+
+static struct snd_pcm_hardware atvr_pcm_hardware = {
+	.info =			(SNDRV_PCM_INFO_MMAP |
+				 SNDRV_PCM_INFO_INTERLEAVED |
+				 SNDRV_PCM_INFO_RESUME |
+				 SNDRV_PCM_INFO_MMAP_VALID),
+	.formats =		USE_FORMATS,
+	.rates =		USE_RATES_MASK,
+	.rate_min =		USE_RATE_MIN,
+	.rate_max =		USE_RATE_MAX,
+	.channels_min =		USE_CHANNELS_MIN,
+	.channels_max =		USE_CHANNELS_MAX,
+	.buffer_bytes_max =	MAX_PCM_BUFFER_SIZE,
+	.period_bytes_min =	MIN_PERIOD_SIZE,
+	.period_bytes_max =	MAX_PERIOD_SIZE,
+	.periods_min =		USE_PERIODS_MIN,
+	.periods_max =		USE_PERIODS_MAX,
+	.fifo_size =		0,
+};
+
+static int snd_atvr_pcm_hw_params(struct snd_pcm_substream *substream,
+					struct snd_pcm_hw_params *hw_params)
+{
+	int ret = 0;
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+
+	atvr_snd->write_index = 0;
+	smp_wmb();
+
+	return ret;
+}
+
+static int snd_atvr_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return 0;
+}
+
+static int snd_atvr_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	int ret = atomic_fifo_init(&atvr_snd->fifo_controller,
+				   MAX_PACKETS_PER_BUFFER);
+	if (ret)
+		return ret;
+
+	runtime->hw = atvr_snd->pcm_hw;
+	if (substream->pcm->device & 1) {
+		runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED;
+		runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED;
+	}
+	if (substream->pcm->device & 2)
+		runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP
+			| SNDRV_PCM_INFO_MMAP_VALID);
+
+	snd_atvr_log("%s, built %s %s\n", __func__, __DATE__, __TIME__);
+
+	/*
+	 * Allocate the maximum buffer now and then just use part of it when
+	 * the substream starts. We don't need DMA because it will just
+	 * get written to by the BTLE code.
+	 */
+	/* We only use this buffer in the kernel and we do not do
+	 * DMA so vmalloc should be OK. */
+	atvr_snd->pcm_buffer = vmalloc(MAX_PCM_BUFFER_SIZE);
+	if (atvr_snd->pcm_buffer == NULL) {
+		pr_err("%s:%d - ERROR PCM buffer allocation failed\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	/* We only use this buffer in the kernel and we do not do
+	 * DMA so vmalloc should be OK.
+	 */
+	atvr_snd->fifo_packet_buffer = vmalloc(MAX_BUFFER_SIZE);
+	if (atvr_snd->fifo_packet_buffer == NULL) {
+		pr_err("%s:%d - ERROR buffer allocation failed\n",
+			__func__, __LINE__);
+		vfree(atvr_snd->pcm_buffer);
+		atvr_snd->pcm_buffer = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int snd_atvr_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+
+	/* Make sure the timer is not running */
+	if (atvr_snd->timer_enabled)
+		snd_atvr_timer_stop(substream);
+
+	if (atvr_snd->timer_callback_count > 0)
+		snd_atvr_log("processed %d packets in %d timer callbacks\n",
+			packet_counter, atvr_snd->timer_callback_count);
+
+	if (atvr_snd->pcm_buffer) {
+		vfree(atvr_snd->pcm_buffer);
+		atvr_snd->pcm_buffer = NULL;
+	}
+
+	/*
+	 * Use spinlock so we don't free the FIFO when the
+	 * driver is writing to it.
+	 * The s_substream_for_btle should already be NULL by now.
+	 */
+	spin_lock(&s_substream_lock);
+	if (atvr_snd->fifo_packet_buffer) {
+		vfree(atvr_snd->fifo_packet_buffer);
+		atvr_snd->fifo_packet_buffer = NULL;
+	}
+	spin_unlock(&s_substream_lock);
+	return 0;
+}
+
+static snd_pcm_uframes_t snd_atvr_pcm_pointer(
+		struct snd_pcm_substream *substream)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	/* write_index is written by another driver thread */
+	smp_rmb();
+	return atvr_snd->write_index;
+}
+
+static int snd_atvr_pcm_copy(struct snd_pcm_substream *substream,
+			  int channel, snd_pcm_uframes_t pos,
+			  void __user *dst, snd_pcm_uframes_t count)
+{
+	struct snd_atvr *atvr_snd = snd_pcm_substream_chip(substream);
+	short *output = (short *)dst;
+
+	/* TODO Needs to be modified if we support more than 1 channel. */
+	/*
+	 * Copy from PCM buffer to user memory.
+	 * Are we reading past the end of the buffer?
+	 */
+	if ((pos + count) > atvr_snd->frames_per_buffer) {
+		const int16_t *source = &atvr_snd->pcm_buffer[pos];
+		int16_t *destination = output;
+		size_t num_frames = atvr_snd->frames_per_buffer - pos;
+		size_t num_bytes = num_frames * sizeof(int16_t);
+		memcpy(destination, source, num_bytes);
+
+		source = &atvr_snd->pcm_buffer[0];
+		destination += num_frames;
+		num_frames = count - num_frames;
+		num_bytes = num_frames * sizeof(int16_t);
+		memcpy(destination, source, num_bytes);
+	} else {
+		const int16_t *source = &atvr_snd->pcm_buffer[pos];
+		int16_t *destination = output;
+		size_t num_bytes = count * sizeof(int16_t);
+		memcpy(destination, source, num_bytes);
+	}
+
+	return 0;
+}
+
+static int snd_atvr_pcm_silence(struct snd_pcm_substream *substream,
+				int channel, snd_pcm_uframes_t pos,
+				snd_pcm_uframes_t count)
+{
+	return 0; /* Do nothing. Only used by output? */
+}
+
+static struct snd_pcm_ops snd_atvr_pcm_ops_no_buf = {
+	.open =		snd_atvr_pcm_open,
+	.close =	snd_atvr_pcm_close,
+	.ioctl =	snd_pcm_lib_ioctl,
+	.hw_params =	snd_atvr_pcm_hw_params,
+	.hw_free =	snd_atvr_pcm_hw_free,
+	.prepare =	snd_atvr_pcm_prepare,
+	.trigger =	snd_atvr_pcm_trigger,
+	.pointer =	snd_atvr_pcm_pointer,
+	.copy =		snd_atvr_pcm_copy,
+	.silence =	snd_atvr_pcm_silence,
+};
+
+static int snd_card_atvr_pcm(struct snd_atvr *atvr_snd,
+			     int device,
+			     int substreams)
+{
+	struct snd_pcm *pcm;
+	struct snd_pcm_ops *ops;
+	int err;
+
+	err = snd_pcm_new(atvr_snd->card, "ATVR PCM", device,
+			  0, /* no playback substreams */
+			  1, /* 1 capture substream */
+			  &pcm);
+	if (err < 0)
+		return err;
+	atvr_snd->pcm = pcm;
+	ops = &snd_atvr_pcm_ops_no_buf;
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ops);
+	pcm->private_data = atvr_snd;
+	pcm->info_flags = 0;
+	strcpy(pcm->name, "ATVR PCM");
+
+	return 0;
+}
+
+static int atvr_snd_initialize(struct hid_device *hdev)
+{
+	struct snd_card *card;
+	struct snd_atvr *atvr_snd;
+	int err;
+	int i;
+
+	if (dev >= SNDRV_CARDS)
+		return -ENODEV;
+	if (!enable[dev]) {
+		dev++;
+		return -ENOENT;
+	}
+	err = snd_card_create(index[dev], id[dev], THIS_MODULE,
+			      sizeof(struct snd_atvr), &card);
+	if (err < 0) {
+		pr_err("%s: snd_card_create() returned err %d\n",
+		       __func__, err);
+		return err;
+	}
+	hid_set_drvdata(hdev, card);
+	atvr_snd = card->private_data;
+	atvr_snd->card = card;
+	for (i = 0; i < MAX_PCM_DEVICES && i < pcm_devs[dev]; i++) {
+		if (pcm_substreams[dev] < 1)
+			pcm_substreams[dev] = 1;
+		if (pcm_substreams[dev] > MAX_PCM_SUBSTREAMS)
+			pcm_substreams[dev] = MAX_PCM_SUBSTREAMS;
+		err = snd_card_atvr_pcm(atvr_snd, i, pcm_substreams[dev]);
+		if (err < 0) {
+			pr_err("%s: snd_card_atvr_pcm() returned err %d\n",
+			       __func__, err);
+			goto __nodev;
+		}
+	}
+
+
+	atvr_snd->pcm_hw = atvr_pcm_hardware;
+
+	strcpy(card->driver, "AndroidTV Remote Audio");
+	strcpy(card->shortname, "ATVRAudio");
+	sprintf(card->longname, "AndroidTV Remote %i audio", dev + 1);
+
+	snd_card_set_dev(card, &hdev->dev);
+
+	err = snd_card_register(card);
+	if (!err)
+		return 0;
+
+__nodev:
+	snd_card_free(card);
+	return err;
+}
+
+static int atvr_raw_event(struct hid_device *hdev, struct hid_report *report,
+	u8 *data, int size)
+{
+#if (DEBUG_HID_RAW_INPUT == 1)
+	pr_info("%s: report->id = 0x%x, size = %d\n",
+		__func__, report->id, size);
+	if (size < 20) {
+		int i;
+		for (i = 1; i < size; i++) {
+			pr_info("data[%d] = 0x%02x\n", i, data[i]);
+		}
+	}
+#endif
+	if (report->id == ADPCM_AUDIO_REPORT_ID) {
+		/* send the data, minus the report-id in data[0], to the
+		 * alsa audio decoder driver for ADPCM
+		 */
+#if (DEBUG_AUDIO_RECEPTION == 1)
+		if (packet_counter == 0) {
+			snd_atvr_log("first ADPCM packet received\n");
+		}
+#endif
+		audio_dec(&data[1], PACKET_TYPE_ADPCM, size - 1);
+		/* we've handled the event */
+		return 1;
+	} else if (report->id == MSBC_AUDIO1_REPORT_ID) {
+		/* first do special case check if there is any
+		 * keyCode active in this report.  if so, we
+		 * generate the same keyCode but on report 2, which
+		 * is where normal keys are reported.  the keycode
+		 * is being sent in the audio packet to save packets
+		 * and over the air bandwidth.
+		 */
+		if (data[2] & KEYCODE_PRESENT_IN_AUDIO_PACKET_FLAG) {
+			u8 key_data[3];
+			key_data[0] = INPUT_REPORT_ID;
+			key_data[1] = data[1]; /* low byte */
+			key_data[2] = data[2]; /* high byte */
+			key_data[2] &= ~KEYCODE_PRESENT_IN_AUDIO_PACKET_FLAG;
+			hid_report_raw_event(hdev, 0, key_data,
+					     sizeof(key_data), 0);
+			pr_info("%s: generated hid keycode 0x%02x%02x\n",
+				__func__, key_data[2], key_data[1]);
+		}
+
+		/* send the audio part to the alsa audio decoder for mSBC */
+#if (DEBUG_AUDIO_RECEPTION == 1)
+		if (packet_counter == 0) {
+			snd_atvr_log("first MSBC packet received\n");
+		}
+#endif
+		/* strip the one byte report id and two byte keycode field */
+		audio_dec(&data[1 + 2], PACKET_TYPE_MSBC, size - 1 - 2);
+		/* we've handled the event */
+		return 1;
+	} else if ((report->id == MSBC_AUDIO2_REPORT_ID) ||
+		   (report->id == MSBC_AUDIO3_REPORT_ID)) {
+		/* strip the one byte report id */
+		audio_dec(&data[1], PACKET_TYPE_MSBC, size - 1);
+		/* we've handled the event */
+		return 1;
+	}
+	/* let the event through for regular input processing */
+	return 0;
+}
+
+static int atvr_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+
+	/* since vendor/product id filter doesn't work yet, because
+	 * Bluedroid is unable to get the vendor/product id, we
+	 * have to filter on name
+	 */
+	pr_info("%s: hdev->name = %s, vendor_id = %d, product_id = %d, num %d\n",
+		__func__, hdev->name, hdev->vendor, hdev->product, num_remotes);
+	if (strcmp(hdev->name, "ADT-1_Remote") &&
+	    strcmp(hdev->name, "Spike") &&
+	    strcmp(hdev->name, "Nexus Remote")) {
+		ret = -ENODEV;
+		goto err_match;
+	}
+	pr_info("%s: Found target remote %s\n", __func__, hdev->name);
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		hid_err(hdev, "hid parse failed\n");
+		goto err_parse;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		hid_err(hdev, "hw start failed\n");
+		goto err_start;
+	}
+
+	// Lazy-creation of the soundcard, and enable the wired headset only then
+	// to avoid race conditions on subsequent connections.
+	// AudioService.java delays enabling the output
+	if (!card_created) {
+		ret = atvr_snd_initialize(hdev);
+		if (ret)
+			goto err_stop;
+		card_created = true;
+		switch_set_state(&h2w_switch, BIT_HEADSET);
+	}
+	pr_info("%s: num_remotes %d->%d\n", __func__, num_remotes, num_remotes + 1);
+	num_remotes++;
+
+	return 0;
+err_stop:
+	hid_hw_stop(hdev);
+err_start:
+err_parse:
+err_match:
+	return ret;
+}
+
+static void atvr_remove(struct hid_device *hdev)
+{
+	pr_info("%s: hdev->name = %s removed, num %d->%d\n",
+		__func__, hdev->name, num_remotes, num_remotes - 1);
+	num_remotes--;
+	hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id atvr_devices[] = {
+	{HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE,
+			      USB_DEVICE_ID_ADT1_REMOTE)},
+	{HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE,
+			      USB_DEVICE_ID_NEXUS_REMOTE)},
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, atvr_devices);
+
+static struct hid_driver atvr_driver = {
+	.name = "AndroidTV remote",
+	.id_table = atvr_devices,
+	.raw_event = atvr_raw_event,
+	.probe = atvr_probe,
+	.remove = atvr_remove,
+};
+
+static int atvr_init(void)
+{
+	int ret;
+
+	ret = switch_dev_register(&h2w_switch);
+	if (ret)
+		pr_err("%s: failed to create h2w switch\n", __func__);
+
+	ret = hid_register_driver(&atvr_driver);
+	if (ret)
+		pr_err("%s: can't register AndroidTV Remote driver\n", __func__);
+
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+	pcm_dev_node.minor = MISC_DYNAMIC_MINOR;
+	pcm_dev_node.name = "snd_atvr_pcm";
+	pcm_dev_node.fops = &pcm_fops;
+	ret = misc_register(&pcm_dev_node);
+	if (ret)
+		pr_err("%s: failed to create pcm misc device %d\n",
+		       __func__, ret);
+	else
+		pr_info("%s: succeeded creating misc device %s\n",
+			__func__, pcm_dev_node.name);
+
+	adpcm_dev_node.minor = MISC_DYNAMIC_MINOR;
+	adpcm_dev_node.name = "snd_atvr_adpcm";
+	adpcm_dev_node.fops = &adpcm_fops;
+	ret = misc_register(&adpcm_dev_node);
+	if (ret)
+		pr_err("%s: failed to create adpcm misc device %d\n",
+		       __func__, ret);
+	else
+		pr_info("%s: succeeded creating misc device %s\n",
+			__func__, adpcm_dev_node.name);
+
+	mSBC_dev_node.minor = MISC_DYNAMIC_MINOR;
+	mSBC_dev_node.name = "snd_atvr_mSBC";
+	mSBC_dev_node.fops = &mSBC_fops;
+	ret = misc_register(&mSBC_dev_node);
+	if (ret)
+		pr_err("%s: failed to create mSBC misc device %d\n",
+		       __func__, ret);
+	else
+		pr_info("%s: succeeded creating misc device %s\n",
+			__func__, mSBC_dev_node.name);
+#endif
+
+	return ret;
+}
+
+static void atvr_exit(void)
+{
+#if (DEBUG_WITH_MISC_DEVICE == 1)
+	misc_deregister(&mSBC_dev_node);
+	misc_deregister(&adpcm_dev_node);
+	misc_deregister(&pcm_dev_node);
+#endif
+
+	hid_unregister_driver(&atvr_driver);
+
+	switch_set_state(&h2w_switch, BIT_NO_HEADSET);
+	switch_dev_unregister(&h2w_switch);
+}
+
+module_init(atvr_init);
+module_exit(atvr_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5b3e4cf..fe738aa 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1188,7 +1188,8 @@
 }
 
 /*
- * Create a report.
+ * Create a report. 'data' has to be allocated using
+ * hid_alloc_report_buf() so that it has proper size.
  */
 
 void hid_output_report(struct hid_report *report, __u8 *data)
@@ -1205,6 +1206,22 @@
 EXPORT_SYMBOL_GPL(hid_output_report);
 
 /*
+ * Allocator for buffer that is going to be passed to hid_output_report()
+ */
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+{
+	/*
+	 * 7 extra bytes are necessary to achieve proper functionality
+	 * of implement() working on 8 byte chunks
+	 */
+
+	int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+
+	return kmalloc(len, flags);
+}
+EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
+
+/*
  * Set a field value. The report this field belongs to has to be
  * created and transferred to the device, to set this value in the
  * device.
@@ -1612,6 +1629,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1735,6 +1755,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
@@ -1797,6 +1818,8 @@
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_ADT1_REMOTE) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_NEXUS_REMOTE) },
 	{ }
 };
 
@@ -2244,6 +2267,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
 	{ }
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 38535c9..917856b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,9 @@
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI  0x0255
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY	0x030a
 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY	0x030b
 #define USB_DEVICE_ID_APPLE_IRCONTROL	0x8240
@@ -341,6 +344,10 @@
 #define USB_VENDOR_ID_GOODTOUCH		0x1aad
 #define USB_DEVICE_ID_GOODTOUCH_000f	0x000f
 
+#define USB_VENDOR_ID_GOOGLE		0x18d1
+#define USB_DEVICE_ID_ADT1_REMOTE	0x2c41
+#define USB_DEVICE_ID_NEXUS_REMOTE	0x2c42
+
 #define USB_VENDOR_ID_GOTOP		0x08f2
 #define USB_DEVICE_ID_SUPER_Q2		0x007f
 #define USB_DEVICE_ID_GOGOPEN		0x00ce
@@ -643,6 +650,7 @@
 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16   0x0012
 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17   0x0013
 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18   0x0014
+#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
 
 #define USB_VENDOR_ID_ONTRAK		0x0a07
 #define USB_DEVICE_ID_ONTRAK_ADU100	0x0064
@@ -701,6 +709,7 @@
 #define USB_DEVICE_ID_ROCCAT_KONE	0x2ced
 #define USB_DEVICE_ID_ROCCAT_KONEPLUS	0x2d51
 #define USB_DEVICE_ID_ROCCAT_KONEPURE	0x2dbe
+#define USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL	0x2db4
 #define USB_DEVICE_ID_ROCCAT_KONEXTD	0x2e22
 #define USB_DEVICE_ID_ROCCAT_KOVAPLUS	0x2d50
 #define USB_DEVICE_ID_ROCCAT_LUA	0x2c2e
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 762b2cf..c02f435 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -340,7 +340,7 @@
 {
 	struct hid_device *dev = container_of(psy, struct hid_device, battery);
 	int ret = 0;
-	__u8 buf[2] = {};
+	__u8 *buf;
 
 	switch (prop) {
 	case POWER_SUPPLY_PROP_PRESENT:
@@ -349,13 +349,20 @@
 		break;
 
 	case POWER_SUPPLY_PROP_CAPACITY:
+
+		buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
+		if (!buf) {
+			ret = -ENOMEM;
+			break;
+		}
 		ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
-					      buf, sizeof(buf),
+					      buf, 2,
 					      dev->battery_report_type);
 
 		if (ret != 2) {
 			if (ret >= 0)
 				ret = -EINVAL;
+			kfree(buf);
 			break;
 		}
 
@@ -364,6 +371,7 @@
 		    buf[1] <= dev->battery_max)
 			val->intval = (100 * (buf[1] - dev->battery_min)) /
 				(dev->battery_max - dev->battery_min);
+		kfree(buf);
 		break;
 
 	case POWER_SUPPLY_PROP_MODEL_NAME:
@@ -1153,6 +1161,69 @@
 }
 EXPORT_SYMBOL_GPL(hidinput_count_leds);
 
+static void hidinput_led_worker(struct work_struct *work)
+{
+	struct hid_device *hid = container_of(work, struct hid_device,
+					      led_work);
+	struct hid_field *field;
+	struct hid_report *report;
+	int len;
+	__u8 *buf;
+
+	field = hidinput_get_led_field(hid);
+	if (!field)
+		return;
+
+	/*
+	 * field->report is accessed unlocked regarding HID core. So there might
+	 * be another incoming SET-LED request from user-space, which changes
+	 * the LED state while we assemble our outgoing buffer. However, this
+	 * doesn't matter as hid_output_report() correctly converts it into a
+	 * boolean value no matter what information is currently set on the LED
+	 * field (even garbage). So the remote device will always get a valid
+	 * request.
+	 * And in case we send a wrong value, a next led worker is spawned
+	 * for every SET-LED request so the following worker will send the
+	 * correct value, guaranteed!
+	 */
+
+	report = field->report;
+
+	len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+	buf = kmalloc(len, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	hid_output_report(report, buf);
+	/* synchronous output report */
+	hid->hid_output_raw_report(hid, buf, len, HID_OUTPUT_REPORT);
+	kfree(buf);
+}
+
+static int hidinput_input_event(struct input_dev *dev, unsigned int type,
+				unsigned int code, int value)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct hid_field *field;
+	int offset;
+
+	if (type == EV_FF)
+		return input_ff_event(dev, type, code, value);
+
+	if (type != EV_LED)
+		return -1;
+
+	if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
+		hid_warn(dev, "event field not found\n");
+		return -1;
+	}
+
+	hid_set_field(field, offset, value);
+
+	schedule_work(&hid->led_work);
+	return 0;
+}
+
 static int hidinput_open(struct input_dev *dev)
 {
 	struct hid_device *hid = input_get_drvdata(dev);
@@ -1204,7 +1275,12 @@
 	}
 
 	input_set_drvdata(input_dev, hid);
-	input_dev->event = hid->ll_driver->hidinput_input_event;
+	if(hid->ll_driver->hidinput_input_event) {
+		input_dev->event =
+				hid->ll_driver->hidinput_input_event;
+	} else if (hid->hid_output_raw_report) {
+		input_dev->event = hidinput_input_event;
+	}
 	input_dev->open = hidinput_open;
 	input_dev->close = hidinput_close;
 	input_dev->setkeycode = hidinput_setkeycode;
@@ -1299,6 +1375,7 @@
 	int i, j, k;
 
 	INIT_LIST_HEAD(&hid->inputs);
+	INIT_WORK(&hid->led_work, hidinput_led_worker);
 
 	if (!force) {
 		for (i = 0; i < hid->maxcollection; i++) {
@@ -1402,6 +1479,12 @@
 		input_unregister_device(hidinput->input);
 		kfree(hidinput);
 	}
+
+	/* led_work is spawned by input_dev callbacks, but doesn't access the
+	 * parent input_dev at all. Once all input devices are removed, we
+	 * know that led_work will never get restarted, so we can cancel it
+	 * synchronously and are safe. */
+	cancel_work_sync(&hid->led_work);
 }
 EXPORT_SYMBOL_GPL(hidinput_disconnect);
 
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 762d988..31cf29a 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -414,22 +414,27 @@
 	ret = hid_parse(hdev);
 	if (ret) {
 		hid_err(hdev, "hid_parse failed\n");
-		goto err_free;
+		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
 		hid_err(hdev, "hid_hw_start failed\n");
-		goto err_free;
+		goto err;
 	}
 
 	uhdev = (struct usbhid_device *) hdev->driver_data;
 
-	if (uhdev->ifnum == 1)
-		return tpkbd_probe_tp(hdev);
+	if (uhdev->ifnum == 1) {
+		ret = tpkbd_probe_tp(hdev);
+		if (ret)
+			goto err_hid;
+	}
 
 	return 0;
-err_free:
+err_hid:
+	hid_hw_stop(hdev);
+err:
 	return ret;
 }
 
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 0522b80..e495ec1 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -471,6 +471,14 @@
 	dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
 	retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
 	kfree(dj_report);
+
+	/*
+	 * Ugly sleep to work around a USB 3.0 bug when the receiver is still
+	 * processing the "switch-to-dj" command while we send an other command.
+	 * 50 msec should gives enough time to the receiver to be ready.
+	 */
+	msleep(50);
+
 	return retval;
 }
 
@@ -574,7 +582,7 @@
 
 	struct hid_field *field;
 	struct hid_report *report;
-	unsigned char data[8];
+	unsigned char *data;
 	int offset;
 
 	dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
@@ -590,6 +598,13 @@
 		return -1;
 	}
 	hid_set_field(field, offset, value);
+
+	data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+	if (!data) {
+		dev_warn(&dev->dev, "failed to allocate report buf memory\n");
+		return -1;
+	}
+
 	hid_output_report(field->report, &data[0]);
 
 	output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
@@ -600,8 +615,9 @@
 
 	hid_hw_request(dj_rcv_hiddev, report, HID_REQ_SET_REPORT);
 
-	return 0;
+	kfree(data);
 
+	return 0;
 }
 
 static int logi_dj_ll_start(struct hid_device *hid)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 5bc3734..4b1e506 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -461,7 +461,7 @@
 	return 0;
 }
 
-static void magicmouse_input_configured(struct hid_device *hdev,
+static int magicmouse_input_configured(struct hid_device *hdev,
 		struct hid_input *hi)
 
 {
@@ -473,6 +473,7 @@
 		/* clean msc->input to notify probe() of the failure */
 		msc->input = NULL;
 	}
+	return ret;
 }
 
 
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 5482156..048c83a 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -859,14 +859,14 @@
 	return 1;
 }
 
-static void ntrig_input_configured(struct hid_device *hid,
+static int ntrig_input_configured(struct hid_device *hid,
 		struct hid_input *hidinput)
 
 {
 	struct input_dev *input = hidinput->input;
 
 	if (hidinput->report->maxfield < 1)
-		return;
+		return 0;
 
 	switch (hidinput->report->field[0]->application) {
 	case HID_DG_PEN:
@@ -890,6 +890,7 @@
 							"N-Trig MultiTouch";
 		break;
 	}
+	return 0;
 }
 
 static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index e346038..59d5eb1 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -145,6 +145,7 @@
 	struct rc_dev *rdev = data->rc_dev;
 
 	data->rc_dev = NULL;
-	rc_unregister_device(rdev);
+	if (rdev)
+		rc_unregister_device(rdev);
 }
 
diff --git a/drivers/hid/hid-picolcd_debugfs.c b/drivers/hid/hid-picolcd_debugfs.c
index 59ab8e1..024cdf3 100644
--- a/drivers/hid/hid-picolcd_debugfs.c
+++ b/drivers/hid/hid-picolcd_debugfs.c
@@ -394,7 +394,7 @@
 void picolcd_debug_out_report(struct picolcd_data *data,
 		struct hid_device *hdev, struct hid_report *report)
 {
-	u8 raw_data[70];
+	u8 *raw_data;
 	int raw_size = (report->size >> 3) + 1;
 	char *buff;
 #define BUFF_SZ 256
@@ -407,19 +407,19 @@
 	if (!buff)
 		return;
 
+	raw_data = hid_alloc_report_buf(report, GFP_ATOMIC);
+	if (!raw_data) {
+		kfree(buff);
+		return;
+	}
+
 	snprintf(buff, BUFF_SZ, "\nout report %d (size %d) =  ",
 			report->id, raw_size);
 	hid_debug_event(hdev, buff);
-	if (raw_size + 5 > sizeof(raw_data)) {
-		kfree(buff);
-		hid_debug_event(hdev, " TOO BIG\n");
-		return;
-	} else {
-		raw_data[0] = report->id;
-		hid_output_report(report, raw_data);
-		dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
-		hid_debug_event(hdev, buff);
-	}
+	raw_data[0] = report->id;
+	hid_output_report(report, raw_data);
+	dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+	hid_debug_event(hdev, buff);
 
 	switch (report->id) {
 	case REPORT_LED_STATE:
@@ -644,6 +644,7 @@
 		break;
 	}
 	wake_up_interruptible(&hdev->debug_wait);
+	kfree(raw_data);
 	kfree(buff);
 }
 
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index 591f6b2..c930ab8 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -593,10 +593,14 @@
 void picolcd_exit_framebuffer(struct picolcd_data *data)
 {
 	struct fb_info *info = data->fb_info;
-	struct picolcd_fb_data *fbdata = info->par;
+	struct picolcd_fb_data *fbdata;
 	unsigned long flags;
 
+	if (!info)
+		return;
+
 	device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+	fbdata = info->par;
 
 	/* disconnect framebuffer from HID dev */
 	spin_lock_irqsave(&fbdata->lock, flags);
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index c79d0b0..5850959 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -262,6 +262,7 @@
 
 static const struct hid_device_id konepure_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
 	{ }
 };
 
@@ -300,5 +301,5 @@
 module_exit(konepure_exit);
 
 MODULE_AUTHOR("Stefan Achatz");
-MODULE_DESCRIPTION("USB Roccat KonePure driver");
+MODULE_DESCRIPTION("USB Roccat KonePure/Optical driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
index a2f587d..7112f3e 100644
--- a/drivers/hid/hid-speedlink.c
+++ b/drivers/hid/hid-speedlink.c
@@ -3,7 +3,7 @@
  *  Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
  *  the HID descriptor.
  *
- *  Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
+ *  Copyright (c) 2011, 2013 Stefan Kriwanek <dev@stefankriwanek.de>
  */
 
 /*
@@ -46,8 +46,13 @@
 		struct hid_usage *usage, __s32 value)
 {
 	/* No other conditions due to usage_table. */
-	/* Fix "jumpy" cursor (invalid events sent by device). */
-	if (value == 256)
+
+	/* This fixes the "jumpy" cursor occuring due to invalid events sent
+	 * by the device. Some devices only send them with value==+256, others
+	 * don't. However, catching abs(value)>=256 is restrictive enough not
+	 * to interfere with devices that were bug-free (has been tested).
+	 */
+	if (abs(value) >= 256)
 		return 1;
 	/* Drop useless distance 0 events (on button clicks etc.) as well */
 	if (value == 0)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index a745163..612a655 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -113,7 +113,7 @@
 	__u8 *buf;
 	int ret = 0;
 
-	if (!hidraw_table[minor]) {
+	if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -261,7 +261,7 @@
 	}
 
 	mutex_lock(&minors_lock);
-	if (!hidraw_table[minor]) {
+	if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
 		err = -ENODEV;
 		goto out_unlock;
 	}
@@ -302,39 +302,38 @@
 	return fasync_helper(fd, file, on, &list->fasync);
 }
 
+static void drop_ref(struct hidraw *hidraw, int exists_bit)
+{
+	if (exists_bit) {
+		hid_hw_close(hidraw->hid);
+		hidraw->exist = 0;
+		if (hidraw->open)
+			wake_up_interruptible(&hidraw->wait);
+	} else {
+		--hidraw->open;
+	}
+
+	if (!hidraw->open && !hidraw->exist) {
+		device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+		hidraw_table[hidraw->minor] = NULL;
+		kfree(hidraw);
+	}
+}
+
 static int hidraw_release(struct inode * inode, struct file * file)
 {
 	unsigned int minor = iminor(inode);
-	struct hidraw *dev;
 	struct hidraw_list *list = file->private_data;
-	int ret;
-	int i;
 
 	mutex_lock(&minors_lock);
-	if (!hidraw_table[minor]) {
-		ret = -ENODEV;
-		goto unlock;
-	}
 
 	list_del(&list->node);
-	dev = hidraw_table[minor];
-	if (!--dev->open) {
-		if (list->hidraw->exist) {
-			hid_hw_power(dev->hid, PM_HINT_NORMAL);
-			hid_hw_close(dev->hid);
-		} else {
-			kfree(list->hidraw);
-		}
-	}
-
-	for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
-		kfree(list->buffer[i].value);
 	kfree(list);
-	ret = 0;
-unlock:
-	mutex_unlock(&minors_lock);
 
-	return ret;
+	drop_ref(hidraw_table[minor], 0);
+
+	mutex_unlock(&minors_lock);
+	return 0;
 }
 
 static long hidraw_ioctl(struct file *file, unsigned int cmd,
@@ -539,18 +538,9 @@
 	struct hidraw *hidraw = hid->hidraw;
 
 	mutex_lock(&minors_lock);
-	hidraw->exist = 0;
 
-	device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+	drop_ref(hidraw, 1);
 
-	hidraw_table[hidraw->minor] = NULL;
-
-	if (hidraw->open) {
-		hid_hw_close(hid);
-		wake_up_interruptible(&hidraw->wait);
-	} else {
-		kfree(hidraw);
-	}
 	mutex_unlock(&minors_lock);
 }
 EXPORT_SYMBOL_GPL(hidraw_disconnect);
diff --git a/drivers/hid/sbcdec.c b/drivers/hid/sbcdec.c
new file mode 100644
index 0000000..f0129b6
--- /dev/null
+++ b/drivers/hid/sbcdec.c
@@ -0,0 +1,560 @@
+/*
+
+Copyright (c) 2012, Dmitry Grinberg (dmitrygr@gmail.com / http://dmitrygr.com)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "sbcdec.h"
+#include <linux/kernel.h>
+
+#define QUALITY_MEDIUM  1   /* pretty good */
+#define QUALITY_GREAT   2   /* as good as it will get without an FPU */
+
+/* /config options begin */
+
+#define QUALITY QUALITY_MEDIUM
+/* set to cheat a bit with shifts (saves a divide per sample) */
+#define SPEED_OVER_ACCURACY
+/* iterator up to 180 use fastest type for your platform */
+#define ITER   uint32_t
+
+/* /config options end */
+
+#define DEBUG_DECODING 0
+
+#if QUALITY == QUALITY_MEDIUM
+
+	#define CONST(x)       (x >> 16)
+	#define SAMPLE_CVT(x)  (x)
+	#define INSAMPLE       int16_t
+	#define OUTSAMPLE      uint16_t
+	#define FIXED          int16_t
+	#define FIXED_S        int32_t
+	#define OUT_CLIP_MAX   0x7FFF
+	#define OUT_CLIP_MIN   -0x8000
+
+	#define NUM_FRAC_BITS_PROTO 16
+	#define NUM_FRAC_BITS_COS   14
+
+#elif QUALITY == QUALITY_GREAT
+
+	#define CONST(x)       (x)
+	#define SAMPLE_CVT(x)  (x)
+	#define INSAMPLE       int16_t
+	#define OUTSAMPLE      uint16_t
+	#define FIXED          int32_t
+	#define FIXED_S        int64_t
+	#define OUT_CLIP_MAX   0x7FFF
+	#define OUT_CLIP_MIN   -0x8000
+
+	#define NUM_FRAC_BITS_PROTO 32
+	#define NUM_FRAC_BITS_COS   30
+
+#else
+
+	#error "You did not define SBC decoder synthesizer quality to use"
+
+#endif
+
+static FIXED gV[160];
+
+
+static const FIXED proto_4_40[] =
+{
+	CONST(0x00000000), CONST(0x00FB7991), CONST(0x02CB3E8B), CONST(0x069FDC59),
+	CONST(0x22B63DA5), CONST(0x4B583FE6), CONST(0xDD49C25B), CONST(0x069FDC59),
+	CONST(0xFD34C175), CONST(0x00FB7991), CONST(0x002329CC), CONST(0x00FF11CA),
+	CONST(0x053B7546), CONST(0x0191E578), CONST(0x31EAB920), CONST(0x4825E4A3),
+	CONST(0xEC1F5E6D), CONST(0x083DDC80), CONST(0xFF3773A8), CONST(0x00B32807),
+	CONST(0x0061C5A7), CONST(0x007A4737), CONST(0x07646684), CONST(0xF89F23A7),
+	CONST(0x3F23948D), CONST(0x3F23948D), CONST(0xF89F23A7), CONST(0x07646684),
+	CONST(0x007A4737), CONST(0x0061C5A7), CONST(0x00B32807), CONST(0xFF3773A8),
+	CONST(0x083DDC80), CONST(0xEC1F5E6D), CONST(0x4825E4A3), CONST(0x31EAB920),
+	CONST(0x0191E578), CONST(0x053B7546), CONST(0x00FF11CA), CONST(0x002329CC)
+};
+
+static const FIXED proto_8_80[] =
+{
+	CONST(0x00000000), CONST(0x0083D8D4), CONST(0x0172E691), CONST(0x034FD9E0),
+	CONST(0x116860F5), CONST(0x259ED8EB), CONST(0xEE979F0B), CONST(0x034FD9E0),
+	CONST(0xFE8D196F), CONST(0x0083D8D4), CONST(0x000A42E6), CONST(0x0089DE90),
+	CONST(0x020E372C), CONST(0x02447D75), CONST(0x153E7D35), CONST(0x253844DE),
+	CONST(0xF2625120), CONST(0x03EBE849), CONST(0xFF1ACF26), CONST(0x0074E5CF),
+	CONST(0x00167EE3), CONST(0x0082B6EC), CONST(0x02AD6794), CONST(0x00BFA1FF),
+	CONST(0x18FAB36D), CONST(0x24086BF5), CONST(0xF5FF2BF8), CONST(0x04270CA8),
+	CONST(0xFF93E21B), CONST(0x0060C1E9), CONST(0x002458FC), CONST(0x0069F16C),
+	CONST(0x03436717), CONST(0xFEBDD6E5), CONST(0x1C7762DF), CONST(0x221D9DE0),
+	CONST(0xF950DCFC), CONST(0x0412523E), CONST(0xFFF44825), CONST(0x004AB4C5),
+	CONST(0x0035FF13), CONST(0x003B1FA4), CONST(0x03C04499), CONST(0xFC4086B8),
+	CONST(0x1F8E43F2), CONST(0x1F8E43F2), CONST(0xFC4086B8), CONST(0x03C04499),
+	CONST(0x003B1FA4), CONST(0x0035FF13), CONST(0x004AB4C5), CONST(0xFFF44825),
+	CONST(0x0412523E), CONST(0xF950DCFC), CONST(0x221D9DE0), CONST(0x1C7762DF),
+	CONST(0xFEBDD6E5), CONST(0x03436717), CONST(0x0069F16C), CONST(0x002458FC),
+	CONST(0x0060C1E9), CONST(0xFF93E21B), CONST(0x04270CA8), CONST(0xF5FF2BF8),
+	CONST(0x24086BF5), CONST(0x18FAB36D), CONST(0x00BFA1FF), CONST(0x02AD6794),
+	CONST(0x0082B6EC), CONST(0x00167EE3), CONST(0x0074E5CF), CONST(0xFF1ACF26),
+	CONST(0x03EBE849), CONST(0xF2625120), CONST(0x253844DE), CONST(0x153E7D35),
+	CONST(0x02447D75), CONST(0x020E372C), CONST(0x0089DE90), CONST(0x000A42E6)
+};
+
+static const FIXED costab_4[] =
+{
+	CONST(0x2D413CCD), CONST(0xD2BEC333), CONST(0xD2BEC333), CONST(0x2D413CCD),
+	CONST(0x187DE2A7), CONST(0xC4DF2862), CONST(0x3B20D79E), CONST(0xE7821D59),
+	CONST(0x00000000), CONST(0x00000000), CONST(0x00000000), CONST(0x00000000),
+	CONST(0xE7821D59), CONST(0x3B20D79E), CONST(0xC4DF2862), CONST(0x187DE2A7),
+	CONST(0xD2BEC333), CONST(0x2D413CCD), CONST(0x2D413CCD), CONST(0xD2BEC333),
+	CONST(0xC4DF2862), CONST(0xE7821D59), CONST(0x187DE2A7), CONST(0x3B20D79E),
+	CONST(0xC0000000), CONST(0xC0000000), CONST(0xC0000000), CONST(0xC0000000),
+	CONST(0xC4DF2862), CONST(0xE7821D59), CONST(0x187DE2A7), CONST(0x3B20D79E)
+};
+
+static const FIXED costab_8[] =
+{
+	CONST(0x2D413CCD), CONST(0xD2BEC333), CONST(0xD2BEC333), CONST(0x2D413CCD),
+	CONST(0x2D413CCD), CONST(0xD2BEC333), CONST(0xD2BEC333), CONST(0x2D413CCD),
+	CONST(0x238E7673), CONST(0xC13AD060), CONST(0x0C7C5C1E), CONST(0x3536CC52),
+	CONST(0xCAC933AE), CONST(0xF383A3E2), CONST(0x3EC52FA0), CONST(0xDC71898D),
+	CONST(0x187DE2A7), CONST(0xC4DF2862), CONST(0x3B20D79E), CONST(0xE7821D59),
+	CONST(0xE7821D59), CONST(0x3B20D79E), CONST(0xC4DF2862), CONST(0x187DE2A7),
+	CONST(0x0C7C5C1E), CONST(0xDC71898D), CONST(0x3536CC52), CONST(0xC13AD060),
+	CONST(0x3EC52FA0), CONST(0xCAC933AE), CONST(0x238E7673), CONST(0xF383A3E2),
+	CONST(0x00000000), CONST(0x00000000), CONST(0x00000000), CONST(0x00000000),
+	CONST(0x00000000), CONST(0x00000000), CONST(0x00000000), CONST(0x00000000),
+	CONST(0xF383A3E2), CONST(0x238E7673), CONST(0xCAC933AE), CONST(0x3EC52FA0),
+	CONST(0xC13AD060), CONST(0x3536CC52), CONST(0xDC71898D), CONST(0x0C7C5C1E),
+	CONST(0xE7821D59), CONST(0x3B20D79E), CONST(0xC4DF2862), CONST(0x187DE2A7),
+	CONST(0x187DE2A7), CONST(0xC4DF2862), CONST(0x3B20D79E), CONST(0xE7821D59),
+	CONST(0xDC71898D), CONST(0x3EC52FA0), CONST(0xF383A3E2), CONST(0xCAC933AE),
+	CONST(0x3536CC52), CONST(0x0C7C5C1E), CONST(0xC13AD060), CONST(0x238E7673),
+	CONST(0xD2BEC333), CONST(0x2D413CCD), CONST(0x2D413CCD), CONST(0xD2BEC333),
+	CONST(0xD2BEC333), CONST(0x2D413CCD), CONST(0x2D413CCD), CONST(0xD2BEC333),
+	CONST(0xCAC933AE), CONST(0x0C7C5C1E), CONST(0x3EC52FA0), CONST(0x238E7673),
+	CONST(0xDC71898D), CONST(0xC13AD060), CONST(0xF383A3E2), CONST(0x3536CC52),
+	CONST(0xC4DF2862), CONST(0xE7821D59), CONST(0x187DE2A7), CONST(0x3B20D79E),
+	CONST(0x3B20D79E), CONST(0x187DE2A7), CONST(0xE7821D59), CONST(0xC4DF2862),
+	CONST(0xC13AD060), CONST(0xCAC933AE), CONST(0xDC71898D), CONST(0xF383A3E2),
+	CONST(0x0C7C5C1E), CONST(0x238E7673), CONST(0x3536CC52), CONST(0x3EC52FA0),
+	CONST(0xC0000000), CONST(0xC0000000), CONST(0xC0000000), CONST(0xC0000000),
+	CONST(0xC0000000), CONST(0xC0000000), CONST(0xC0000000), CONST(0xC0000000),
+	CONST(0xC13AD060), CONST(0xCAC933AE), CONST(0xDC71898D), CONST(0xF383A3E2),
+	CONST(0x0C7C5C1E), CONST(0x238E7673), CONST(0x3536CC52), CONST(0x3EC52FA0),
+	CONST(0xC4DF2862), CONST(0xE7821D59), CONST(0x187DE2A7), CONST(0x3B20D79E),
+	CONST(0x3B20D79E), CONST(0x187DE2A7), CONST(0xE7821D59), CONST(0xC4DF2862),
+	CONST(0xCAC933AE), CONST(0x0C7C5C1E), CONST(0x3EC52FA0), CONST(0x238E7673),
+	CONST(0xDC71898D), CONST(0xC13AD060), CONST(0xF383A3E2), CONST(0x3536CC52)
+};
+
+static const int8_t loudness_4[4][4] =
+{
+	{ -1, 0, 0, 0 }, { -2, 0, 0, 1 },
+	{ -2, 0, 0, 1 }, { -2, 0, 0, 1 }
+};
+
+static const int8_t loudness_8[4][8] =
+{
+	{ -2, 0, 0, 0, 0, 0, 0, 1 }, { -3, 0, 0, 0, 0, 0, 1, 2 },
+	{ -4, 0, 0, 0, 0, 0, 1, 2 }, { -4, 0, 0, 0, 0, 0, 1, 2 }
+};
+
+static void synth_4(OUTSAMPLE* dst, const INSAMPLE* src, FIXED* V){  /* A2DP figure 12.3 */
+
+	ITER i, j;
+	const FIXED* tabl = proto_4_40;
+	const FIXED* costab = costab_4;
+
+	/* shift */
+	for(i = 79; i >= 8; i--) V[i] = V[i - 8];
+
+	/* matrix */
+	i = 8;
+	do{
+		FIXED_S t;
+		t  = (FIXED_S)costab[0] * (FIXED_S)src[0];
+		t += (FIXED_S)costab[1] * (FIXED_S)src[1];
+		t += (FIXED_S)costab[2] * (FIXED_S)src[2];
+		t += (FIXED_S)costab[3] * (FIXED_S)src[3];
+		costab += 4;
+		*V++ = t >> NUM_FRAC_BITS_COS;
+	}while(--i);
+	V -= 8;
+
+
+	/* calculate audio samples */
+	j = 4;
+	do{
+
+		OUTSAMPLE s;
+		FIXED_S sample;
+		sample  = (FIXED_S)V[  0] * (FIXED_S)tabl[0];
+		sample += (FIXED_S)V[ 12] * (FIXED_S)tabl[1];
+		sample += (FIXED_S)V[ 16] * (FIXED_S)tabl[2];
+		sample += (FIXED_S)V[ 28] * (FIXED_S)tabl[3];
+		sample += (FIXED_S)V[ 32] * (FIXED_S)tabl[4];
+		sample += (FIXED_S)V[ 44] * (FIXED_S)tabl[5];
+		sample += (FIXED_S)V[ 48] * (FIXED_S)tabl[6];
+		sample += (FIXED_S)V[ 60] * (FIXED_S)tabl[7];
+		sample += (FIXED_S)V[ 64] * (FIXED_S)tabl[8];
+		sample += (FIXED_S)V[ 76] * (FIXED_S)tabl[9];
+		tabl += 10;
+		V++;
+
+		sample >>= (NUM_FRAC_BITS_PROTO - 1 - 2); /* -2 is for the -4 we need to multiply by :) */
+		sample = -sample;
+
+		if(sample >= OUT_CLIP_MAX) sample = OUT_CLIP_MAX;
+		if(sample <= OUT_CLIP_MIN) sample = OUT_CLIP_MIN;
+		s = sample;
+
+		*dst++ = s;
+
+	}while(--j);
+}
+
+static void synth_8(OUTSAMPLE* dst, const INSAMPLE* src, FIXED* V){  /* A2DP figure 12.3 */
+
+	ITER i, j;
+	const FIXED* tabl = proto_8_80;
+	const FIXED* costab = costab_8;
+
+	/* shift */
+	for(i = 159; i >= 16; i--) V[i] = V[i - 16];
+
+	/* matrix */
+	i = 16;
+	do{
+		FIXED_S t;
+		t  = (FIXED_S)costab[0] * (FIXED_S)src[0];
+		t += (FIXED_S)costab[1] * (FIXED_S)src[1];
+		t += (FIXED_S)costab[2] * (FIXED_S)src[2];
+		t += (FIXED_S)costab[3] * (FIXED_S)src[3];
+		t += (FIXED_S)costab[4] * (FIXED_S)src[4];
+		t += (FIXED_S)costab[5] * (FIXED_S)src[5];
+		t += (FIXED_S)costab[6] * (FIXED_S)src[6];
+		t += (FIXED_S)costab[7] * (FIXED_S)src[7];
+		costab += 8;
+		*V++ = t >> NUM_FRAC_BITS_COS;
+
+	}while(--i);
+
+	V -= 16;
+
+	/* calculate audio samples */
+	j = 8;
+	do{
+
+		OUTSAMPLE s;
+		FIXED_S sample;
+
+		sample  = (FIXED_S)V[  0] * (FIXED_S)tabl[0];
+		sample += (FIXED_S)V[ 24] * (FIXED_S)tabl[1];
+		sample += (FIXED_S)V[ 32] * (FIXED_S)tabl[2];
+		sample += (FIXED_S)V[ 56] * (FIXED_S)tabl[3];
+		sample += (FIXED_S)V[ 64] * (FIXED_S)tabl[4];
+		sample += (FIXED_S)V[ 88] * (FIXED_S)tabl[5];
+		sample += (FIXED_S)V[ 96] * (FIXED_S)tabl[6];
+		sample += (FIXED_S)V[120] * (FIXED_S)tabl[7];
+		sample += (FIXED_S)V[128] * (FIXED_S)tabl[8];
+		sample += (FIXED_S)V[152] * (FIXED_S)tabl[9];
+		tabl += 10;
+		V++;
+
+		sample >>= (NUM_FRAC_BITS_PROTO - 1 - 3); /* -3 is for the -8 we need to multiply by :) */
+		sample = -sample;
+
+		if(sample > OUT_CLIP_MAX) sample = OUT_CLIP_MAX;
+		if(sample < OUT_CLIP_MIN) sample = OUT_CLIP_MIN;
+		s = sample;
+
+		*dst++ = s;
+
+	}while(--j);
+}
+
+static void synth(OUTSAMPLE* dst, const INSAMPLE* src, uint8_t nBands, FIXED* V) {
+	/* A2DP sigure 12.3  */
+
+	if(nBands == 4) synth_4(dst, src, V);
+	else synth_8(dst, src, V);
+}
+
+#ifdef SPEED_OVER_ACCURACY
+	static inline int32_t mulshift(int32_t val, uint32_t bits) {
+		/* return approximately  val / ((2^bits) - 1)  */
+
+		static const uint32_t cooltable[] = {0, 0, 0x55555555, 0x24924925, 0x11111111, 0x08421084,
+			0x04104104, 0x02040810, 0x01010101, 0x00804020, 0x00401004, 0x00200400,
+			0x00100100, 0x00080040, 0x00040010, 0x00020004, 0x00010001};
+
+		if(bits != 1) val = ((uint64_t)(uint32_t)val * (uint64_t)cooltable[bits]) >> 32;
+
+		return val;
+	}
+#endif
+
+void sbc_decoder_reset(void) {
+	unsigned i;
+	for(i = 0; i < sizeof(gV) / sizeof(*gV); i++) {
+		gV[i] = 0;
+	}
+}
+
+/**
+ * We expect a mSBC header at the start of the buffer.
+ */
+uint32_t sbc_decode(uint8_t blocks_per_packet, uint8_t num_bits,
+		    const uint8_t* buf, uint16_t len, int16_t* outPCM){
+
+	/* convenience  */
+	const uint8_t* end = buf + len;
+	int16_t* outBufPtr = outPCM;
+	#define left (end - buf)
+
+	/* workspace */
+	static INSAMPLE samples[16][8]; /*  We blow the stack if this is not static. */
+	ITER i, j, k;
+	uint32_t scaleFactors[8];
+	int32_t bitneed[8];
+	uint32_t bits[8];
+	int32_t bitcount, slicecount, bitslice;
+	uint8_t samplingRate, blocks, snr, numSubbands, bitpoolSz, bitpos = 0x80;
+	int8_t max_bitneed = 0;
+#ifndef SPEED_OVER_ACCURACY
+	int32_t levels[8];
+#endif
+
+#if (DEBUG_DECODING == 1)
+	const uint8_t *start_buf = buf;
+	pr_info("%s: blocks_per_packet = %d, num_bits = %d, buf = %p, len = %d\n",
+		__func__, blocks_per_packet, num_bits, buf, len);
+	for (i = 0; i < len; i++) {
+		pr_info("buf[%d] = 0x%02x\n", i, buf[i]);
+	}
+#endif
+
+	/* look into the frame header */
+	if(left < 2) goto out;      /* too short a frame header  */
+
+	/*  use Bemote specific constants  */
+	samplingRate = 0; /*  always 16000 Hz */
+	blocks = blocks_per_packet;
+	snr = 0;
+	numSubbands = 8;
+	bitpoolSz = num_bits;
+
+	if (buf[0] != 0xAD) {
+		pr_err("mSBC syncword not found\n");
+		goto out;
+	}
+	/* skip header and process scale factors */
+	buf += 4;
+
+	/* read scale factors */
+	/* pr_info("sbc_decode: read scale factors, numSubbands = %d\n", numSubbands); */
+	for(i = 0; i < numSubbands; i++){
+
+		if(bitpos == 0x80){
+
+			scaleFactors[i] = (*buf) >> 4;
+			bitpos = 0x08;
+		}
+		else{
+
+			scaleFactors[i] = (*buf++) & 0x0F;
+			bitpos = 0x80;
+		}
+	}
+
+	/* calculate bitneed table and max_bitneed value (A2DP 12.6.3.1)  */
+	if(snr){
+
+		for(i = 0; i < numSubbands; i++){
+
+			bitneed[i] = scaleFactors[i];
+			if(bitneed[i] > max_bitneed) max_bitneed = bitneed[i];
+		}
+	}
+	else{
+
+		const signed char* tbl;
+
+		if(numSubbands == 4) tbl = loudness_4[samplingRate];
+		else tbl = loudness_8[samplingRate];
+
+		for(i = 0; i < numSubbands; i++){
+
+			if(scaleFactors[i]){
+
+				int loudness = scaleFactors[i] - tbl[i];
+
+				if(loudness > 0) loudness /= 2;
+				bitneed[i] = loudness;
+			}
+			else bitneed[i] = -5;
+			if(bitneed[i] > max_bitneed) max_bitneed = bitneed[i];
+		}
+	}
+
+	/* fit bitslices into the bitpool */
+	bitcount = 0;
+	slicecount = 0;
+	bitslice = max_bitneed + 1;
+	/* pr_info("sbc_decode: fit bitslices into the bitpool, bitslice = %d\n", bitslice ); */
+	do{
+		bitslice--;
+		bitcount += slicecount;
+		slicecount = 0;
+		for(i = 0; i < numSubbands; i++){
+
+			if(bitneed[i] > bitslice + 1 && bitneed[i] < bitslice + 16) slicecount++;
+			else if(bitneed[i] == bitslice + 1) slicecount += 2;
+		}
+
+	}while(bitcount + slicecount < bitpoolSz);
+
+	/* distribute bits */
+	for(i = 0; i < numSubbands; i++){
+
+		if(bitneed[i] < bitslice + 2) bits[i] = 0;
+		else{
+
+			int8_t v = bitneed[i] - bitslice;
+			if(v > 16) v = 16;
+			bits[i] = v;
+		}
+	}
+
+	/* allocate remaining bits */
+	for(i = 0; i < numSubbands && bitcount < bitpoolSz; i++){
+
+		if(bits[i] >= 2 && bits[i] < 16){
+
+			bits[i]++;
+			bitcount++;
+		}
+		else if(bitneed[i] == bitslice + 1 && bitpoolSz > bitcount + 1){
+
+			bits[i] = 2;
+			bitcount += 2;
+		}
+	}
+	for(i = 0; i < numSubbands && bitcount < bitpoolSz; i++){
+
+		if(bits[i] < 16){
+
+			bits[i]++;
+			bitcount++;
+		}
+	}
+
+	/* reconstruct subband samples (A2DP 12.6.4) */
+	#ifndef SPEED_OVER_ACCURACY
+		for(i = 0; i < numSubbands; i++) levels[i] = (1 << bits[i]) - 1;
+	#endif
+
+	/* pr_info("sbc_decode: reconstruct subband samples, blocks = %d\n", blocks );  */
+	for(j = 0; j < blocks; j++){
+
+		for(i = 0; i < numSubbands; i++){
+
+			if(bits[i]){
+
+				uint32_t val = 0;
+				k = bits[i];
+				do{
+
+					val <<= 1;
+#if (DEBUG_DECODING == 1)
+					pr_info("%s: buf = %p, offset %d\n",
+						__func__, buf, buf-start_buf);
+#endif
+					if(*buf & bitpos) val++;
+					if(!(bitpos >>= 1)){
+						bitpos = 0x80;
+						buf++;
+					}
+				}while(--k);
+
+				val = (val << 1) | 1;
+				val <<= scaleFactors[i];
+
+				#ifdef SPEED_OVER_ACCURACY
+					val = mulshift(val, bits[i]);
+				#else
+					val /= levels[i];
+				#endif
+
+				val -= (1 << scaleFactors[i]);
+
+				samples[j][i] = SAMPLE_CVT(val);
+			}
+			else samples[j][i] = SAMPLE_CVT(0);
+		}
+	}
+
+	/* synthesis  */
+#if (DEBUG_DECODING == 1)
+	pr_info("sbc_decode: synthesis, blocks = %d\n", blocks );
+#endif
+	for(j = 0; j < blocks; j++){
+		synth((OUTSAMPLE*)outPCM, samples[j], numSubbands, gV);
+		outPCM += numSubbands;
+	}
+	/* if we used a byte partially, skip the rest of it, it is "padding"  */
+	if(bitpos != 0x80) buf++;
+out:
+#if (DEBUG_DECODING == 1)
+	if(left < 0)
+		pr_err("SBC: buffer over-read by %d bytes.\n", -left);
+	if(left > 0)
+		pr_err("SBC: buffer under-read by %d bytes.\n", left);
+#endif
+
+	return outPCM - outBufPtr;
+}
+
+uint32_t sbcDecGetNumSamples(const uint8_t bufFirstByte){
+
+	uint8_t blocks = (bufFirstByte >> 4) & 3; /* see A2DP table 12.17  */
+	uint8_t numSubbands = bufFirstByte & 1; /* see A2DP table 12.20  */
+
+	numSubbands = numSubbands ? 8 : 4;
+	blocks = (blocks + 1) << 2;
+
+	return numSubbands * blocks;
+}
+
+uint32_t sbcDecGetPacketSize(const uint8_t bufFirstByte, uint8_t bufsecondByte){
+
+	uint8_t blocks = (bufFirstByte >> 4) & 3; /* see A2DP table 12.17 */
+	uint8_t numSubbands = bufFirstByte & 1; /* see A2DP table 12.20 */
+
+	numSubbands = numSubbands ? 8 : 4;
+	blocks = (blocks + 1) << 2;
+
+
+	return 2 + /* header */
+		numSubbands / 2 + /* bit allocations */
+		(blocks * bufsecondByte + 7) / 8; /* data */
+}
diff --git a/drivers/hid/sbcdec.h b/drivers/hid/sbcdec.h
new file mode 100644
index 0000000..11dee3e
--- /dev/null
+++ b/drivers/hid/sbcdec.h
@@ -0,0 +1,51 @@
+/*
+
+Copyright (c) 2012, Dmitry Grinberg (dmitrygr@gmail.com / http://dmitrygr.com)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _SBC_DEC_H_
+#define _SBC_DEC_H_
+
+#include <linux/types.h>
+
+#define SBC_MAX_SAMPLES_PER_PACKET   128
+#define SBC_MAX_PACKET_SIZE          262  /* 2 + 8 / 2 + 16 * 8 * 16 / 8 */
+
+/**
+ * Reset the SBC audio decoder state.
+ */
+void sbc_decoder_reset(void);
+
+/**
+ * Decode a packet of SBC audio data to PCM.
+ */
+uint32_t sbc_decode(uint8_t blocks_per_packet, uint8_t num_bits,
+		    const uint8_t* buf, uint16_t len,
+		    int16_t* outbuf);	/*return num SAMPLES produced */
+
+
+#endif
+
+
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index fc307e0..93b00d7 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -116,30 +116,6 @@
 	uhid_queue_event(uhid, UHID_CLOSE);
 }
 
-static int uhid_hid_input(struct input_dev *input, unsigned int type,
-			  unsigned int code, int value)
-{
-	struct hid_device *hid = input_get_drvdata(input);
-	struct uhid_device *uhid = hid->driver_data;
-	unsigned long flags;
-	struct uhid_event *ev;
-
-	ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-	if (!ev)
-		return -ENOMEM;
-
-	ev->type = UHID_OUTPUT_EV;
-	ev->u.output_ev.type = type;
-	ev->u.output_ev.code = code;
-	ev->u.output_ev.value = value;
-
-	spin_lock_irqsave(&uhid->qlock, flags);
-	uhid_queue(uhid, ev);
-	spin_unlock_irqrestore(&uhid->qlock, flags);
-
-	return 0;
-}
-
 static int uhid_hid_parse(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
@@ -273,7 +249,6 @@
 	.stop = uhid_hid_stop,
 	.open = uhid_hid_open,
 	.close = uhid_hid_close,
-	.hidinput_input_event = uhid_hid_input,
 	.parse = uhid_hid_parse,
 };
 
@@ -640,7 +615,7 @@
 
 static struct miscdevice uhid_misc = {
 	.fops		= &uhid_fops,
-	.minor		= MISC_DYNAMIC_MINOR,
+	.minor		= UHID_MINOR,
 	.name		= UHID_NAME,
 };
 
@@ -659,3 +634,5 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
+MODULE_ALIAS_MISCDEV(UHID_MINOR);
+MODULE_ALIAS("devname:" UHID_NAME);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 9941828..a347aaa 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -535,7 +535,6 @@
 {
 	int head;
 	struct usbhid_device *usbhid = hid->driver_data;
-	int len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
 
 	if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN)
 		return;
@@ -546,7 +545,7 @@
 			return;
 		}
 
-		usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC);
+		usbhid->out[usbhid->outhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC);
 		if (!usbhid->out[usbhid->outhead].raw_report) {
 			hid_warn(hid, "output queueing failed\n");
 			return;
@@ -595,7 +594,7 @@
 	}
 
 	if (dir == USB_DIR_OUT) {
-		usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC);
+		usbhid->ctrl[usbhid->ctrlhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC);
 		if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) {
 			hid_warn(hid, "control queueing failed\n");
 			return;
@@ -1184,6 +1183,12 @@
 		usbhid_set_leds(hid);
 		device_set_wakeup_enable(&dev->dev, 1);
 	}
+
+	if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
+			interface->desc.bInterfaceProtocol ==
+			USB_INTERFACE_PROTOCOL_MOUSE)
+		device_set_wakeup_enable(&dev->dev, 1);
+
 	return 0;
 
 fail:
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 19b8360..0734552 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -109,6 +109,8 @@
 	{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+
 	{ 0, 0 }
 };
 
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4c605c7..deb5c25 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@
 				struct hv_hotadd_state *has)
 {
 	int ret = 0;
-	int i, nid, t;
+	int i, nid;
 	unsigned long start_pfn;
 	unsigned long processed_pfn;
 	unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@
 
 		/*
 		 * Wait for the memory block to be onlined.
+		 * Since the hot add has succeeded, it is ok to
+		 * proceed even if the pages in the hot added region
+		 * have not been "onlined" within the allowed time.
 		 */
-		t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
-		if (t == 0) {
-			pr_info("hot_add memory timedout\n");
-			has->ha_end_pfn -= HA_CHUNK;
-			has->covered_end_pfn -=  processed_pfn;
-			break;
-		}
+		wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
 
 	}
 
@@ -978,6 +975,14 @@
 				dm->num_pages_ballooned +
 				compute_balloon_floor();
 
+	/*
+	 * If our transaction ID is no longer current, just don't
+	 * send the status. This can happen if we were interrupted
+	 * after we picked our transaction ID.
+	 */
+	if (status.hdr.trans_id != atomic_read(&trans_id))
+		return;
+
 	vmbus_sendpacket(dm->dev->channel, &status,
 				sizeof(struct dm_status),
 				(unsigned long)NULL,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index d6fbb577..791f45d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -32,7 +32,7 @@
 void hv_begin_read(struct hv_ring_buffer_info *rbi)
 {
 	rbi->ring_buffer->interrupt_mask = 1;
-	smp_mb();
+	mb();
 }
 
 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
@@ -41,7 +41,7 @@
 	u32 write;
 
 	rbi->ring_buffer->interrupt_mask = 0;
-	smp_mb();
+	mb();
 
 	/*
 	 * Now check to see if the ring buffer is still empty.
@@ -71,7 +71,7 @@
 
 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
 {
-	smp_mb();
+	mb();
 	if (rbi->ring_buffer->interrupt_mask)
 		return false;
 
@@ -442,7 +442,7 @@
 					     sizeof(u64));
 
 	/* Issue a full memory barrier before updating the write index */
-	smp_mb();
+	mb();
 
 	/* Now, update the write location */
 	hv_set_next_write_location(outring_info, next_write_location);
@@ -549,7 +549,7 @@
 	/* Make sure all reads are done before we update the read index since */
 	/* the writer may start writing to the read area once the read index */
 	/*is updated */
-	smp_mb();
+	mb();
 
 	/* Update the read index */
 	hv_set_next_read_location(inring_info, next_read_location);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index bf421e0..4004e54 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -434,7 +434,7 @@
 		 * will not deliver any more messages since there is
 		 * no empty slot
 		 */
-		smp_mb();
+		mb();
 
 		if (msg->header.message_flags.msg_pending) {
 			/*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0428e8a..9d5d5d8 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -296,8 +296,8 @@
 	  If you say yes here you get support for the temperature
 	  sensor(s) inside your CPU. Supported are later revisions of
 	  the AMD Family 10h and all revisions of the AMD Family 11h,
-	  12h (Llano), 14h (Brazos) and 15h (Bulldozer/Trinity)
-	  microarchitectures.
+	  12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity) and
+	  16h (Kabini) microarchitectures.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called k10temp.
@@ -504,6 +504,16 @@
 	  sensor inside your CPU. Most of the family 6 CPUs
 	  are supported. Check Documentation/hwmon/coretemp for details.
 
+config SENSORS_CORETEMP_INTERRUPT
+	tristate "Intel Core/Core2/Atom temperature sensor Interrupts"
+	depends on SENSORS_CORETEMP
+	help
+	  If you say yes here you get support for interrupts when the
+	  CPU temperature crosses the programmed threshold.
+
+	  This is tested only for specific platforms(e.g Atom). If you
+	  are not sure, say N here.
+
 config SENSORS_IBMAEM
 	tristate "IBM Active Energy Manager temperature/power sensors and control"
 	select IPMI_SI
@@ -822,6 +832,12 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm95245.
 
+config MSIC_GPADC
+	tristate "MSIC GPADC driver for Intel Medfield platform"
+	depends on INTEL_SCU_IPC
+	help
+	  Say Y here to enable MSIC GPADC driver on Intel Medfield Platform
+
 config SENSORS_MAX1111
 	tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
 	depends on SPI_MASTER
@@ -1074,16 +1090,6 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called dme1737.
 
-config SENSORS_EMC1403
-	tristate "SMSC EMC1403/23 thermal sensor"
-	depends on I2C
-	help
-	  If you say yes here you get support for the SMSC EMC1403/23
-	  temperature monitoring chip.
-
-	  Threshold values can be configured using sysfs.
-	  Data from the different diodes are accessible via sysfs.
-
 config SENSORS_EMC2103
 	tristate "SMSC EMC2103"
 	depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d17d3e6..6b368dd 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -51,7 +51,6 @@
 obj-$(CONFIG_SENSORS_DME1737)	+= dme1737.o
 obj-$(CONFIG_SENSORS_DS620)	+= ds620.o
 obj-$(CONFIG_SENSORS_DS1621)	+= ds1621.o
-obj-$(CONFIG_SENSORS_EMC1403)	+= emc1403.o
 obj-$(CONFIG_SENSORS_EMC2103)	+= emc2103.o
 obj-$(CONFIG_SENSORS_EMC6W201)	+= emc6w201.o
 obj-$(CONFIG_SENSORS_F71805F)	+= f71805f.o
@@ -140,6 +139,7 @@
 obj-$(CONFIG_SENSORS_W83L786NG)	+= w83l786ng.o
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
+obj-$(CONFIG_MSIC_GPADC)        += intel_mid_gpadc.o
 
 obj-$(CONFIG_PMBUS)		+= pmbus/
 
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index b83bf4b..5863735 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -215,7 +215,7 @@
 					  u16 value)
 {
 	return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
-	       && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+	       || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
 }
 
 static void adt7470_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32..3288f13 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -230,6 +230,7 @@
 
 static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 {
+	u8 status, data = 0;
 	int i;
 
 	if (send_command(cmd) || send_argument(key)) {
@@ -237,6 +238,7 @@
 		return -EIO;
 	}
 
+	/* This has no effect on newer (2012) SMCs */
 	if (send_byte(len, APPLESMC_DATA_PORT)) {
 		pr_warn("%.4s: read len fail\n", key);
 		return -EIO;
@@ -250,6 +252,17 @@
 		buffer[i] = inb(APPLESMC_DATA_PORT);
 	}
 
+	/* Read the data port until bit0 is cleared */
+	for (i = 0; i < 16; i++) {
+		udelay(APPLESMC_MIN_WAIT);
+		status = inb(APPLESMC_CMD_PORT);
+		if (!(status & 0x01))
+			break;
+		data = inb(APPLESMC_DATA_PORT);
+	}
+	if (i)
+		pr_warn("flushed %d bytes, last value is: %d\n", i, data);
+
 	return 0;
 }
 
@@ -525,16 +538,25 @@
 {
 	struct applesmc_registers *s = &smcreg;
 	bool left_light_sensor, right_light_sensor;
+	unsigned int count;
 	u8 tmp[1];
 	int ret;
 
 	if (s->init_complete)
 		return 0;
 
-	ret = read_register_count(&s->key_count);
+	ret = read_register_count(&count);
 	if (ret)
 		return ret;
 
+	if (s->cache && s->key_count != count) {
+		pr_warn("key count changed from %d to %d\n",
+			s->key_count, count);
+		kfree(s->cache);
+		s->cache = NULL;
+	}
+	s->key_count = count;
+
 	if (!s->cache)
 		s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
 	if (!s->cache)
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 658ce3a..1d6bfbd3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -37,6 +37,7 @@
 #include <linux/smp.h>
 #include <linux/moduleparam.h>
 #include <asm/msr.h>
+#include <asm/mce.h>
 #include <asm/processor.h>
 #include <asm/cpu_device_id.h>
 
@@ -52,9 +53,10 @@
 
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
 #define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */
-#define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
-#define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
+#define CORETEMP_NAME_LENGTH	33	/* String Length of attrs */
+#define MAX_CORE_ATTRS		5	/* Maximum no of basic attrs */
+#define MAX_THRESH_ATTRS	4	/* Maximum no of threshold attrs */
+#define TOTAL_ATTRS		(MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
 #define TO_PHYS_ID(cpu)		(cpu_data(cpu).phys_proc_id)
@@ -75,6 +77,8 @@
  *		This value is passed as "id" field to rdmsr/wrmsr functions.
  * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
  *		from where the temperature values should be read.
+ * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
+ *		from where the thresholds are read.
  * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
  * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
  *		Otherwise, temp_data holds coretemp data.
@@ -88,6 +92,7 @@
 	unsigned int cpu;
 	u32 cpu_core_id;
 	u32 status_reg;
+	u32 intrpt_reg;
 	int attr_size;
 	bool is_pkg_data;
 	bool valid;
@@ -102,6 +107,7 @@
 	u16 phys_proc_id;
 	struct temp_data *core_data[MAX_CORE_DATA];
 	struct device_attribute name_attr;
+
 };
 
 struct pdev_entry {
@@ -113,12 +119,119 @@
 static LIST_HEAD(pdev_list);
 static DEFINE_MUTEX(pdev_list_mutex);
 
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+static DEFINE_PER_CPU(struct delayed_work, core_threshold_work);
+#endif
 static ssize_t show_name(struct device *dev,
 			struct device_attribute *devattr, char *buf)
 {
 	return sprintf(buf, "%s\n", DRVNAME);
 }
 
+static ssize_t show_tx_triggered(struct device *dev,
+				 struct device_attribute *devattr, char *buf,
+				 u32 mask)
+{
+	u32 eax, edx;
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct platform_data *pdata = dev_get_drvdata(dev);
+	struct temp_data *tdata = pdata->core_data[attr->index];
+
+	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+
+	return sprintf(buf, "%d\n", !!(eax & mask));
+}
+
+static ssize_t show_t0_triggered(struct device *dev,
+				 struct device_attribute *devattr, char *buf)
+{
+	return show_tx_triggered(dev, devattr, buf, THERM_STATUS_THRESHOLD0);
+}
+
+static ssize_t show_t1_triggered(struct device *dev,
+				 struct device_attribute *devattr, char *buf)
+{
+	return show_tx_triggered(dev, devattr, buf, THERM_STATUS_THRESHOLD1);
+}
+
+static ssize_t show_tx(struct device *dev,
+		       struct device_attribute *devattr, char *buf,
+		       u32 mask, int shift)
+{
+	struct platform_data *pdata = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct temp_data *tdata = pdata->core_data[attr->index];
+	u32 eax, edx;
+	int t;
+
+	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+	t = tdata->tjmax - ((eax & mask) >> shift) * 1000;
+	return sprintf(buf, "%d\n", t);
+}
+
+static ssize_t store_tx(struct device *dev,
+			struct device_attribute *devattr,
+			const char *buf, size_t count,
+			u32 mask, int shift)
+{
+	struct platform_data *pdata = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct temp_data *tdata = pdata->core_data[attr->index];
+	u32 eax, edx;
+	unsigned long val;
+	int diff;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	/*
+	 * Thermal threshold mask is 7 bits wide. Values are entered in terms
+	 * of milli degree celsius. Hence don't accept val > (127 * 1000)
+	 */
+	if (val > tdata->tjmax || val > 127000)
+		return -EINVAL;
+
+	diff = (tdata->tjmax - val) / 1000;
+
+	mutex_lock(&tdata->update_lock);
+	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+	eax = (eax & ~mask) | (diff << shift);
+	wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+	mutex_unlock(&tdata->update_lock);
+
+	return count;
+}
+
+static ssize_t show_t0(struct device *dev,
+		       struct device_attribute *devattr, char *buf)
+{
+	return show_tx(dev, devattr, buf, THERM_MASK_THRESHOLD0,
+		       THERM_SHIFT_THRESHOLD0);
+}
+
+static ssize_t store_t0(struct device *dev,
+			struct device_attribute *devattr,
+			const char *buf, size_t count)
+{
+	return store_tx(dev, devattr, buf, count, THERM_MASK_THRESHOLD0,
+			THERM_SHIFT_THRESHOLD0);
+}
+
+static ssize_t show_t1(struct device *dev,
+		       struct device_attribute *devattr, char *buf)
+{
+	return show_tx(dev, devattr, buf, THERM_MASK_THRESHOLD1,
+		       THERM_SHIFT_THRESHOLD1);
+}
+
+static ssize_t store_t1(struct device *dev,
+			struct device_attribute *devattr,
+			const char *buf, size_t count)
+{
+	return store_tx(dev, devattr, buf, count, THERM_MASK_THRESHOLD1,
+			THERM_SHIFT_THRESHOLD1);
+}
+
 static ssize_t show_label(struct device *dev,
 				struct device_attribute *devattr, char *buf)
 {
@@ -187,6 +300,7 @@
 	}
 
 	mutex_unlock(&tdata->update_lock);
+
 	return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
@@ -198,7 +312,7 @@
 static const struct tjmax __cpuinitconst tjmax_table[] = {
 	{ "CPU  230", 100000 },		/* Model 0x1c, stepping 2	*/
 	{ "CPU  330", 125000 },		/* Model 0x1c, stepping 2	*/
-	{ "CPU CE4110", 110000 },	/* Model 0x1c, stepping 10 Sodaville */
+	{ "CPU CE4110", 110000 },	/* Model 0x1c, stepping 10	*/
 	{ "CPU CE4150", 110000 },	/* Model 0x1c, stepping 10	*/
 	{ "CPU CE4170", 110000 },	/* Model 0x1c, stepping 10	*/
 };
@@ -212,7 +326,7 @@
 #define ANY 0xff
 
 static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
-	{ 0x1c, 10, 100000 },	/* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
+	{ 0x1c, 10, 100000 },	/* D4xx, N4xx, D5xx, N5xx */
 	{ 0x1c, ANY, 90000 },	/* Z5xx, N2xx, possibly others
 				 * Note: Also matches 230 and 330,
 				 * which are covered by tjmax_table
@@ -222,7 +336,7 @@
 				 * is undetectable by software
 				 */
 	{ 0x27, ANY, 90000 },	/* Atom Medfield (Z2460) */
-	{ 0x35, ANY, 90000 },	/* Atom Clover Trail/Cloverview (Z2760) */
+	{ 0x35, ANY, 90000 },	/* Atom Clovertrail */
 	{ 0x36, ANY, 100000 },	/* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
 };
 
@@ -357,67 +471,7 @@
 	return adjust_tjmax(c, id, dev);
 }
 
-static int create_name_attr(struct platform_data *pdata,
-				      struct device *dev)
-{
-	sysfs_attr_init(&pdata->name_attr.attr);
-	pdata->name_attr.attr.name = "name";
-	pdata->name_attr.attr.mode = S_IRUGO;
-	pdata->name_attr.show = show_name;
-	return device_create_file(dev, &pdata->name_attr);
-}
-
-static int __cpuinit create_core_attrs(struct temp_data *tdata,
-				       struct device *dev, int attr_no)
-{
-	int err, i;
-	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
-			struct device_attribute *devattr, char *buf) = {
-			show_label, show_crit_alarm, show_temp, show_tjmax,
-			show_ttarget };
-	static const char *const names[TOTAL_ATTRS] = {
-					"temp%d_label", "temp%d_crit_alarm",
-					"temp%d_input", "temp%d_crit",
-					"temp%d_max" };
-
-	for (i = 0; i < tdata->attr_size; i++) {
-		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
-			attr_no);
-		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
-		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
-		tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
-		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
-		tdata->sd_attrs[i].index = attr_no;
-		err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
-		if (err)
-			goto exit_free;
-	}
-	return 0;
-
-exit_free:
-	while (--i >= 0)
-		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
-	return err;
-}
-
-
-static int __cpuinit chk_ucode_version(unsigned int cpu)
-{
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-	/*
-	 * Check if we have problem with errata AE18 of Core processors:
-	 * Readings might stop update when processor visited too deep sleep,
-	 * fixed for stepping D0 (6EC).
-	 */
-	if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
-		pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
+static struct platform_device *coretemp_get_pdev(unsigned int cpu)
 {
 	u16 phys_proc_id = TO_PHYS_ID(cpu);
 	struct pdev_entry *p;
@@ -434,6 +488,228 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+/* Interrupt Handler for Core Threshold Events */
+static int coretemp_interrupt(__u64 msr_val)
+{
+	unsigned int cpu = smp_processor_id();
+
+	schedule_delayed_work_on(cpu, &per_cpu(core_threshold_work, cpu), 0);
+	return 0;
+}
+
+static void core_threshold_work_fn(struct work_struct *work)
+{
+	u32 eax, edx;
+	int t0, t1, temp;
+	int event = -1, thresh = -1;
+	char *thermal_event[5];
+	bool notify = false;
+	unsigned int cpu = smp_processor_id();
+	int indx = TO_ATTR_NO(cpu);
+	struct platform_device *pdev = coretemp_get_pdev(cpu);
+	struct platform_data *pdata = platform_get_drvdata(pdev);
+	struct temp_data *tdata = pdata->core_data[indx];
+
+	if (!tdata) {
+		pr_err("Could not retrieve temp_data\n");
+		return;
+	}
+
+	rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax, &edx);
+	if (eax & THERM_LOG_THRESHOLD0) {
+		thresh = 0;
+		event = !!(eax & THERM_STATUS_THRESHOLD0);
+
+		/* Reset the Threshold0 interrupt */
+		eax = eax & ~THERM_LOG_THRESHOLD0;
+		wrmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, eax, edx);
+
+		/* Notify only when we go below the lower threshold */
+		if (event != 1)
+			notify = true;
+
+	} else if (eax & THERM_LOG_THRESHOLD1) {
+		thresh = 1;
+		event = !!(eax & THERM_STATUS_THRESHOLD1);
+
+		/* Reset the Threshold1 interrupt */
+		eax = eax & ~THERM_LOG_THRESHOLD1;
+		wrmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, eax, edx);
+
+		/* Notify only when we go above the upper threshold */
+		if (event != 0)
+			notify = true;
+	}
+
+	/*
+	 * Read the current Temperature and send it to user land;
+	 * so that the user space can avoid a sysfs read.
+	 */
+	temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+
+	/* Read the threshold registers (only) to print threshold values. */
+	rdmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, &eax, &edx);
+	t0 = tdata->tjmax - ((eax & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0) * 1000;
+	t1 = tdata->tjmax - ((eax & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1) * 1000;
+
+	if (!notify) {
+		pr_debug("Thermal Event: Sensor: Core %u, cur_temp: %d,\
+			event: %d, level: %d, t0: %d, t1: %d\n",
+			tdata->cpu_core_id, temp, event, thresh, t0, t1);
+		return;
+	} else {
+		pr_info("Thermal Event: Sensor: Core %u, cur_temp: %d,\
+			event: %d, level: %d, t0: %d, t1: %d\n",
+			tdata->cpu_core_id, temp, event, thresh, t0, t1);
+	}
+
+	thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=Core %u",
+						tdata->cpu_core_id);
+	thermal_event[1] = kasprintf(GFP_KERNEL, "TEMP=%d", temp);
+	thermal_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", event);
+	thermal_event[3] = kasprintf(GFP_KERNEL, "LEVEL=%d", thresh);
+	thermal_event[4] = NULL;
+
+	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, thermal_event);
+
+	kfree(thermal_event[3]);
+	kfree(thermal_event[2]);
+	kfree(thermal_event[1]);
+	kfree(thermal_event[0]);
+}
+
+static void configure_apic(void *info)
+{
+	u32 l;
+	int *flag = (int *)info;
+
+	l = apic_read(APIC_LVTTHMR);
+
+	if (*flag)	/* Non-Zero flag Masks the APIC */
+		apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
+	else		/* Zero flag UnMasks the APIC */
+		apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+}
+
+static int config_thresh_intrpt(struct temp_data *data, int enable)
+{
+	u32 eax, edx;
+	unsigned int cpu = data->cpu;
+	int flag = 1; /* Non-Zero Flag masks the apic */
+
+	smp_call_function_single(cpu, &configure_apic, &flag, 1);
+
+	rdmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, &eax, &edx);
+
+	if (enable) {
+		INIT_DELAYED_WORK(&per_cpu(core_threshold_work, cpu),
+					core_threshold_work_fn);
+
+		eax |= (THERM_INT_THRESHOLD0_ENABLE |
+						THERM_INT_THRESHOLD1_ENABLE);
+		platform_thermal_notify = coretemp_interrupt;
+
+		pr_info("Enabled Aux0/Aux1 interrupts for coretemp\n");
+	} else {
+		eax &= (~(THERM_INT_THRESHOLD0_ENABLE |
+						THERM_INT_THRESHOLD1_ENABLE));
+		platform_thermal_notify = NULL;
+
+		cancel_delayed_work_sync(&per_cpu(core_threshold_work, cpu));
+	}
+
+	wrmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, eax, edx);
+
+	flag = 0; /* Flag should be zero to unmask the apic */
+	smp_call_function_single(cpu, &configure_apic, &flag, 1);
+
+	return 0;
+}
+#else
+static inline int config_thresh_intrpt(struct temp_data *data, int enable)
+{
+	return 0;
+}
+#endif
+
+static int create_name_attr(struct platform_data *pdata,
+				      struct device *dev)
+{
+	sysfs_attr_init(&pdata->name_attr.attr);
+	pdata->name_attr.attr.name = "name";
+	pdata->name_attr.attr.mode = S_IRUGO;
+	pdata->name_attr.show = show_name;
+	return device_create_file(dev, &pdata->name_attr);
+}
+
+static int __cpuinit create_core_attrs(struct temp_data *tdata,
+			struct device *dev, int attr_no, bool have_ttarget)
+{
+	int err, i;
+	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+			struct device_attribute *devattr, char *buf) = {
+			show_label, show_crit_alarm, show_temp, show_tjmax,
+			show_ttarget, show_t0, show_t0_triggered,
+			show_t1, show_t1_triggered };
+	static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
+			struct device_attribute *devattr, const char *buf,
+			size_t count) = { NULL, NULL, NULL, NULL, NULL,
+					store_t0, NULL, store_t1, NULL };
+	static const char *const names[TOTAL_ATTRS] = {
+					"temp%d_label", "temp%d_crit_alarm",
+					"temp%d_input", "temp%d_crit",
+					"temp%d_max",
+					"temp%d_threshold1",
+					"temp%d_threshold1_triggered",
+					"temp%d_threshold2",
+					"temp%d_threshold2_triggered" };
+
+	for (i = 0; i < tdata->attr_size; i++) {
+		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
+				names[i], attr_no);
+		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
+		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
+		tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
+		if (rw_ptr[i]) {
+			tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
+			tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
+		}
+		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
+		tdata->sd_attrs[i].index = attr_no;
+		err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
+		if (err)
+			goto exit_free;
+	}
+	return 0;
+
+exit_free:
+	while (--i >= 0) {
+		if (!tdata->sd_attrs[i].dev_attr.attr.name)
+			continue;
+		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+	}
+	return err;
+}
+
+
+static int __cpuinit chk_ucode_version(unsigned int cpu)
+{
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+	/*
+	 * Check if we have problem with errata AE18 of Core processors:
+	 * Readings might stop update when processor visited too deep sleep,
+	 * fixed for stepping D0 (6EC).
+	 */
+	if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
+		pr_err("Errata AE18 not fixed, update BIOS or "
+		       "microcode of the CPU!\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
 static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
 						  int pkg_flag)
 {
@@ -445,6 +721,8 @@
 
 	tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
 							MSR_IA32_THERM_STATUS;
+	tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
+						MSR_IA32_THERM_INTERRUPT;
 	tdata->is_pkg_data = pkg_flag;
 	tdata->cpu = cpu;
 	tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -461,6 +739,7 @@
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	u32 eax, edx;
 	int err, attr_no;
+	bool have_ttarget = false;
 
 	/*
 	 * Find attr number for sysfs:
@@ -506,17 +785,28 @@
 		if (!err) {
 			tdata->ttarget
 			  = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
-			tdata->attr_size++;
+			have_ttarget = true;
 		}
 	}
 
+	/*
+	 * Test if we can access the intrpt register. If so, increase
+	 * 'size' enough to support t0 and t1 attributes.
+	 */
+	err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
+	if (!err)
+		tdata->attr_size += MAX_THRESH_ATTRS;
+
 	pdata->core_data[attr_no] = tdata;
 
 	/* Create sysfs interfaces */
-	err = create_core_attrs(tdata, &pdev->dev, attr_no);
+	err = create_core_attrs(tdata, &pdev->dev, attr_no, have_ttarget);
 	if (err)
 		goto exit_free;
 
+	/* Enable threshold interrupt support */
+	config_thresh_intrpt(tdata, 1);
+
 	return 0;
 exit_free:
 	pdata->core_data[attr_no] = NULL;
@@ -544,8 +834,14 @@
 	struct temp_data *tdata = pdata->core_data[indx];
 
 	/* Remove the sysfs attributes */
-	for (i = 0; i < tdata->attr_size; i++)
+	for (i = 0; i < tdata->attr_size; i++) {
+		if (!tdata->sd_attrs[i].dev_attr.attr.name)
+			continue;
 		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+	}
+
+	/* Enable threshold interrupt support */
+	config_thresh_intrpt(tdata, 0);
 
 	kfree(pdata->core_data[indx]);
 	pdata->core_data[indx] = NULL;
diff --git a/drivers/hwmon/intel_mid_gpadc.c b/drivers/hwmon/intel_mid_gpadc.c
new file mode 100644
index 0000000..f4d0d2f
--- /dev/null
+++ b/drivers/hwmon/intel_mid_gpadc.c
@@ -0,0 +1,1212 @@
+/*
+ * intel_mdf_msic_gpadc.c - Intel Medfield MSIC GPADC Driver
+ *
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/rpmsg.h>
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_mid_gpadc.h>
+
+#define VAUDACNT		0x0DB
+#define MCCINT			0x013
+#define IRQLVL1			0x002
+#define IRQLVL1MSK		0x021
+#define ADC1INT			0x003
+#define ADC1ADDR0		0x1C5
+#define ADC1SNS0H		0x1D4
+#define ADC1OFFSETH		0x1C3
+#define ADC1OFFSETL		0x1C4
+#define ADC1CNTL1		0x1C0
+#define ADC1CNTL2		0x1C1
+#define ADC1CNTL3		0x1C2
+#define	ADC1BV0H		0x1F2
+#define ADC1BI0H		0x1FA
+
+#ifdef CONFIG_BOARD_CTP
+#define EEPROMCAL1		0x309
+#define EEPROMCAL2		0x30A
+#else
+#define EEPROMCAL1		0x317
+#define EEPROMCAL2		0x318
+#endif
+
+#define MCCINT_MCCCAL		(1 << 1)
+#define MCCINT_MOVERFLOW	(1 << 0)
+
+#define IRQLVL1MSK_ADCM		(1 << 1)
+
+#define ADC1CNTL1_AD1OFFSETEN	(1 << 6)
+#define ADC1CNTL1_AD1CALEN	(1 << 5)
+#define ADC1CNTL1_ADEN		(1 << 4)
+#define ADC1CNTL1_ADSTRT	(1 << 3)
+#define ADC1CNTL1_ADSLP		7
+#define ADC1CNTL1_ADSLP_DEF	1
+
+#define ADC1INT_ADC1CAL		(1 << 2)
+#define ADC1INT_GSM		(1 << 1)
+#define ADC1INT_RND		(1 << 0)
+
+#define ADC1CNTL3_ADCTHERM	(1 << 2)
+#define ADC1CNTL3_GSMDATARD	(1 << 1)
+#define ADC1CNTL3_RRDATARD	(1 << 0)
+
+#define ADC1CNTL2_DEF		0x7
+#define ADC1CNTL2_ADCGSMEN	(1 << 7)
+
+#define MSIC_STOPCH		(1 << 4)
+
+#define GPADC_CH_MAX		15
+
+#define GPADC_POWERON_DELAY	1
+
+#define SAMPLE_CH_MAX		2
+
+static void *adc_handle[GPADC_CH_MAX] = {};
+static int sample_result[GPADC_CH_MAX][SAMPLE_CH_MAX];
+static struct completion gsmadc_complete;
+static int vol_val;
+static int cur_val;
+
+struct gpadc_info {
+	int initialized;
+	int depth;
+
+	struct workqueue_struct *workq;
+	wait_queue_head_t trimming_wait;
+	struct work_struct trimming_work;
+	struct work_struct gsmpulse_work;
+	int trimming_start;
+
+	/* This mutex protects gpadc sample/config from concurrent conflict.
+	   Any function, which does the sample or config, needs to
+	   hold this lock.
+	   If it is locked, it also means the gpadc is in active mode.
+	   GSM mode sample does not need to hold this lock. It can be used with
+	   normal sample concurrent without poweron.
+	*/
+	struct mutex lock;
+	struct device *dev;
+	int irq;
+	void __iomem *intr;
+	int irq_status;
+
+	int vzse;
+	int vge;
+	int izse;
+	int ige;
+	int addr_mask;
+
+	wait_queue_head_t wait;
+	int rnd_done;
+	int conv_done;
+	int gsmpulse_done;
+
+	struct pm_qos_request pm_qos_request;
+	void (*gsmadc_notify)(int vol, int cur);
+
+	int pmic_ipc_status;
+};
+
+struct gpadc_request {
+	int count;
+	int vref;
+	int ch[GPADC_CH_MAX];
+	int addr[GPADC_CH_MAX];
+};
+
+static struct gpadc_info gpadc_info;
+
+static inline int gpadc_clear_bits(u16 addr, u8 mask)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_update_register(addr, 0, mask);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static inline int gpadc_set_bits(u16 addr, u8 mask)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_update_register(addr, 0xff, mask);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static inline int gpadc_write(u16 addr, u8 data)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_iowrite8(addr, data);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static inline int gpadc_read(u16 addr, u8 *data)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_ioread8(addr, data);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static void gpadc_dump(struct gpadc_info *mgi)
+{
+	u8 data;
+	int i;
+
+	dev_err(mgi->dev, "pmic ipc status: %s\n",
+			mgi->pmic_ipc_status ? "bad" : "good");
+	gpadc_read(VAUDACNT, &data);
+	dev_err(mgi->dev, "VAUDACNT: 0x%x\n", data);
+	gpadc_read(IRQLVL1MSK, &data);
+	dev_err(mgi->dev, "IRQLVL1MSK: 0x%x\n", data);
+	gpadc_read(IRQLVL1, &data);
+	dev_err(mgi->dev, "IRQLVL1: 0x%x\n", data);
+	gpadc_read(ADC1INT, &data);
+	dev_err(mgi->dev, "ADC1INT: 0x%x\n", data);
+	gpadc_read(ADC1CNTL1, &data);
+	dev_err(mgi->dev, "ADC1CNTL1: 0x%x\n", data);
+	gpadc_read(ADC1CNTL2, &data);
+	dev_err(mgi->dev, "ADC1CNTL2: 0x%x\n", data);
+	gpadc_read(ADC1CNTL3, &data);
+	dev_err(mgi->dev, "ADC1CNTL3: 0x%x\n", data);
+	for (i = 0; i < GPADC_CH_MAX; i++) {
+		gpadc_read(ADC1ADDR0+i, &data);
+		dev_err(mgi->dev, "ADC1ADDR[%d]: 0x%x\n", i, data);
+	}
+}
+
+static int gpadc_poweron(struct gpadc_info *mgi, int vref)
+{
+	if (!mgi->depth++) {
+		if (gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADEN) != 0)
+			return -EIO;
+		msleep(GPADC_POWERON_DELAY);
+	}
+	if (vref) {
+		if (gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_ADCTHERM) != 0)
+			return -EIO;
+		msleep(GPADC_POWERON_DELAY);
+	}
+	return 0;
+}
+
+static int gpadc_poweroff(struct gpadc_info *mgi)
+{
+	if (!--mgi->depth) {
+		if (gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADEN) != 0)
+			return -EIO;
+		if (gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_ADCTHERM) != 0)
+			return -EIO;
+	}
+	return 0;
+}
+
+static int gpadc_calib(int rc, int zse, int ge)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int tmp;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if (ge == 0) {
+			dev_err(mgi->dev, "calibration divider is zero\n");
+			return 0;
+		}
+
+		/**
+		 * For Cloverview, using the calibration data, we have the
+		 * voltage and current after calibration correction as below:
+		 * V_CAL_CODE = 213.33 * (V_RAW_CODE - VZSE) / VGE
+		 * I_CAL_CODE = 213.33 * (I_RAW_CODE - IZSE) / IGE
+		 */
+
+		/* note: the input zse is multipled by 10,
+		 * input ge is multipled by 100, need to handle them here
+		 */
+		tmp = 21333 * (10 * rc - zse) / ge;
+	} else {
+		/**
+		 * For Medfield, using the calibration data, we have the
+		 * voltage and current after calibration correction as below:
+		 * V_CAL_CODE = V_RAW_CODE - (VZSE + (VGE)* VRAW_CODE/1023)
+		 * I_CAL_CODE = I_RAW_CODE - (IZSE + (IGE)* IRAW_CODE/1023)
+		 */
+		tmp = (10230 * rc - (10230 * zse + 10 * ge * rc)) / 1023;
+	}
+
+	/* tmp is 10 times of result value,
+	 * and it's used to obtain result's closest integer
+	 */
+	return DIV_ROUND_CLOSEST(tmp, 10);
+
+}
+
+static void gpadc_calc_zse_ge(struct gpadc_info *mgi)
+{
+	u8 data;
+	int fse, zse, fse_sign, zse_sign, ge, ge_sign;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		gpadc_read(EEPROMCAL1, &data);
+		zse = data & 0xf;
+		ge = (data >> 4) & 0xf;
+		gpadc_read(EEPROMCAL2, &data);
+		zse_sign = (data & (1 << 6)) ? -1 : 1;
+		ge_sign = (data & (1 << 7)) ? -1 : 1;
+		zse *= zse_sign;
+		ge *= ge_sign;
+		/* vzse divided by 2 may cause 0.5, x10 to avoid float */
+		mgi->vzse = mgi->izse = zse * 10 / 2;
+		/* vge multiple 100 to avoid float */
+		mgi->vge = mgi->ige = 21333 - (ge * 100 / 4);
+	} else {
+		/* voltage trim */
+		gpadc_read(EEPROMCAL1, &data);
+		zse = (data & 0xf)/2;
+		fse = ((data >> 4) & 0xf)/2;
+		gpadc_read(EEPROMCAL2, &data);
+		zse_sign = (data & (1 << 6)) ? 1 : 0;
+		fse_sign = (data & (1 << 7)) ? 1 : 0;
+		zse *= zse_sign;
+		fse *= fse_sign;
+		mgi->vzse = zse;
+		mgi->vge = fse - zse;
+
+		/* current trim */
+		fse = (data & 0xf)/2;
+		fse_sign = (data & (1 << 5)) ? 1 : 0;
+		fse = ~(fse_sign * fse) + 1;
+		gpadc_read(ADC1OFFSETH, &data);
+		zse = data << 2;
+		gpadc_read(ADC1OFFSETL, &data);
+		zse += data & 0x3;
+		mgi->izse = zse;
+		mgi->ige = fse + zse;
+	}
+}
+
+static void gpadc_trimming(struct work_struct *work)
+{
+	u8 data;
+	struct gpadc_info *mgi =
+		container_of(work, struct gpadc_info, trimming_work);
+
+	mutex_lock(&mgi->lock);
+	mgi->trimming_start = 1;
+	wake_up(&mgi->trimming_wait);
+	if (gpadc_poweron(mgi, 1)) {
+		dev_err(mgi->dev, "power on failed\n");
+		goto failed;
+	}
+	/* calibration */
+	gpadc_read(ADC1CNTL1, &data);
+	data &= ~ADC1CNTL1_AD1OFFSETEN;
+	data |= ADC1CNTL1_AD1CALEN;
+	gpadc_write(ADC1CNTL1, data);
+	gpadc_read(ADC1INT, &data);
+
+	/*workarround: no calib int */
+	msleep(300);
+	gpadc_set_bits(ADC1INT, ADC1INT_ADC1CAL);
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1CALEN);
+
+	gpadc_calc_zse_ge(mgi);
+
+	if (gpadc_poweroff(mgi)) {
+		dev_err(mgi->dev, "power off failed\n");
+		goto failed;
+	}
+
+failed:
+	mutex_unlock(&mgi->lock);
+}
+
+static irqreturn_t msic_gpadc_isr(int irq, void *data)
+{
+	struct gpadc_info *mgi = data;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		mgi->irq_status = ADC1INT_RND;
+	else
+		mgi->irq_status = readl(mgi->intr) >> 8 & 0xff;
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t msic_gpadc_irq(int irq, void *data)
+{
+	struct gpadc_info *mgi = data;
+
+	if (mgi->irq_status & ADC1INT_GSM) {
+		mgi->gsmpulse_done = 1;
+		queue_work(mgi->workq, &mgi->gsmpulse_work);
+	} else if (mgi->irq_status & ADC1INT_RND) {
+		mgi->rnd_done = 1;
+		wake_up(&mgi->wait);
+	} else if (mgi->irq_status & ADC1INT_ADC1CAL) {
+		mgi->conv_done = 1;
+		wake_up(&mgi->wait);
+	} else {
+		/* coulomb counter should be handled by firmware. Ignore it */
+		dev_dbg(mgi->dev, "coulomb counter is not support\n");
+	}
+	return IRQ_HANDLED;
+}
+
+static int alloc_channel_addr(struct gpadc_info *mgi, int ch)
+{
+	int i;
+	int addr = -EBUSY;
+	int last = 0;
+
+	for (i = 0; i < GPADC_CH_MAX; i++)
+		if (mgi->addr_mask & (1 << i))
+			last = i;
+
+	for (i = 0; i < GPADC_CH_MAX; i++) {
+		if (!(mgi->addr_mask & (1 << i))) {
+			addr = i;
+			mgi->addr_mask |= 1 << i;
+			if (addr > last) {
+				gpadc_clear_bits(ADC1ADDR0+last, MSIC_STOPCH);
+				gpadc_write(ADC1ADDR0+addr, ch|MSIC_STOPCH);
+			} else {
+				gpadc_write(ADC1ADDR0+addr, ch);
+			}
+			break;
+		}
+	}
+	return addr;
+}
+
+static void free_channel_addr(struct gpadc_info *mgi, int addr)
+{
+	int last = 0;
+	int i;
+
+	mgi->addr_mask &= ~(1 << addr);
+	for (i = 0; i < GPADC_CH_MAX; i++)
+		if (mgi->addr_mask & (1 << i))
+			last = i;
+	if (addr > last)
+		gpadc_set_bits(ADC1ADDR0+last, MSIC_STOPCH);
+}
+
+static void gpadc_gsmpulse_work(struct work_struct *work)
+{
+	int i;
+	u8 data;
+	int tmp;
+	int vol, cur;
+	struct gpadc_info *mgi =
+		container_of(work, struct gpadc_info, gsmpulse_work);
+
+	mutex_lock(&mgi->lock);
+	gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_GSMDATARD);
+
+	vol = 0;
+	cur = 0;
+	for (i = 0; i < 4; i++) {
+		gpadc_read(ADC1BV0H + i * 2, &data);
+		tmp = data << 2;
+		gpadc_read(ADC1BV0H + i * 2 + 1, &data);
+		tmp += data & 0x3;
+		if (tmp > vol)
+			vol = tmp;
+
+		gpadc_read(ADC1BI0H + i * 2, &data);
+		tmp = data << 2;
+		gpadc_read(ADC1BI0H + i * 2 + 1, &data);
+		tmp += data & 0x3;
+		if (tmp > cur)
+			cur = tmp;
+	}
+
+	vol = gpadc_calib(vol, mgi->vzse, mgi->vge);
+	cur = gpadc_calib(cur, mgi->izse, mgi->ige);
+
+	gpadc_set_bits(ADC1INT, ADC1INT_GSM);
+	gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_GSMDATARD);
+	if (mgi->gsmadc_notify)
+		mgi->gsmadc_notify(vol, cur);
+	mutex_unlock(&mgi->lock);
+}
+
+/**
+ * intel_mid_gpadc_gsmpulse_register - power on gsm adc and register a callback
+ * @fn: callback function after gsm adc conversion is completed
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_gsmpulse_register(void(*fn)(int vol, int cur))
+{
+	int ret = 0;
+	struct gpadc_info *mgi = &gpadc_info;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+	mutex_lock(&mgi->lock);
+	if (!mgi->gsmadc_notify) {
+		gpadc_write(ADC1CNTL2, ADC1CNTL2_DEF);
+		gpadc_set_bits(ADC1CNTL2, ADC1CNTL2_ADCGSMEN);
+		mgi->gsmadc_notify = fn;
+	} else {
+		ret = -EBUSY;
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_gsmpulse_register);
+
+/**
+ * intel_mid_gpadc_gsmpulse_unregister - power off gsm adc and unregister
+ *					the callback
+ * @fn: callback function after gsm adc conversion is completed
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_gsmpulse_unregister(void(*fn)(int vol, int cur))
+{
+	int ret = 0;
+	struct gpadc_info *mgi = &gpadc_info;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+	mutex_lock(&mgi->lock);
+	if (mgi->gsmadc_notify == fn) {
+		mgi->gsmadc_notify = NULL;
+		gpadc_clear_bits(ADC1CNTL2, ADC1CNTL2_ADCGSMEN);
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_gsmpulse_unregister);
+
+/**
+ * intel_mid_gpadc_sample - do gpadc sample.
+ * @handle: the gpadc handle
+ * @sample_count: do sample serveral times and get the average value.
+ * @...: sampling resulting arguments of all channels. refer to sscanf.
+ *       caller should not access it before return.
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_sample(void *handle, int sample_count, ...)
+{
+
+	struct gpadc_request *rq = handle;
+	struct gpadc_info *mgi = &gpadc_info;
+	int i;
+	u8 data;
+	int ret = 0;
+	int count;
+	int tmp;
+	int *val[GPADC_CH_MAX];
+	va_list args;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	va_start(args, sample_count);
+	for (i = 0; i < rq->count; i++) {
+		val[i] = va_arg(args, int*);
+		*val[i] = 0;
+	}
+	va_end(args);
+
+	pm_qos_add_request(&mgi->pm_qos_request,
+			PM_QOS_CPU_DMA_LATENCY,	CSTATE_EXIT_LATENCY_S0i1-1);
+	gpadc_poweron(mgi, rq->vref);
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1OFFSETEN);
+	gpadc_read(ADC1CNTL1, &data);
+	data = (data & ~ADC1CNTL1_ADSLP) + ADC1CNTL1_ADSLP_DEF;
+	gpadc_write(ADC1CNTL1, data);
+	mgi->rnd_done = 0;
+	gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	for (count = 0; count < sample_count; count++) {
+		if (wait_event_timeout(mgi->wait, mgi->rnd_done, HZ) == 0) {
+			gpadc_dump(mgi);
+			dev_err(mgi->dev, "sample timeout\n");
+			ret = -ETIMEDOUT;
+			goto fail;
+		}
+		gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		for (i = 0; i < rq->count; ++i) {
+			tmp = 0;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i], &data);
+			tmp += data << 2;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i] + 1, &data);
+			tmp += data & 0x3;
+
+			if (rq->ch[i] & CH_NEED_VCALIB)
+				tmp = gpadc_calib(tmp, mgi->vzse, mgi->vge);
+			if (rq->ch[i] & CH_NEED_ICALIB)
+				tmp = gpadc_calib(tmp, mgi->izse, mgi->ige);
+
+			*val[i] += tmp;
+		}
+		gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		mgi->rnd_done = 0;
+	}
+
+	for (i = 0; i < rq->count; ++i)
+		*val[i] /= sample_count;
+
+fail:
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	gpadc_poweroff(mgi);
+	pm_qos_remove_request(&mgi->pm_qos_request);
+
+	if (mgi->pmic_ipc_status) {
+		dev_err(mgi->dev, "sample broken\n");
+		ret = mgi->pmic_ipc_status;
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_sample);
+
+/**
+ * get_gpadc_sample() - get gpadc sample.
+ * @handle: the gpadc handle
+ * @sample_count: do sample serveral times and get the average value.
+ * @buffer: sampling resulting arguments of all channels.
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int get_gpadc_sample(void *handle, int sample_count, int *buffer)
+{
+
+	struct gpadc_request *rq = handle;
+	struct gpadc_info *mgi = &gpadc_info;
+	int i;
+	u8 data;
+	int ret = 0;
+	int count;
+	int tmp;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	for (i = 0; i < rq->count; i++)
+		buffer[i] = 0;
+
+	pm_qos_add_request(&mgi->pm_qos_request,
+			PM_QOS_CPU_DMA_LATENCY,	CSTATE_EXIT_LATENCY_S0i1-1);
+	gpadc_poweron(mgi, rq->vref);
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1OFFSETEN);
+	gpadc_read(ADC1CNTL1, &data);
+	data = (data & ~ADC1CNTL1_ADSLP) + ADC1CNTL1_ADSLP_DEF;
+	gpadc_write(ADC1CNTL1, data);
+	mgi->rnd_done = 0;
+	gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	for (count = 0; count < sample_count; count++) {
+		if (wait_event_timeout(mgi->wait, mgi->rnd_done, HZ) == 0) {
+			gpadc_dump(mgi);
+			dev_err(mgi->dev, "sample timeout\n");
+			ret = -ETIMEDOUT;
+			goto fail;
+		}
+		gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		for (i = 0; i < rq->count; ++i) {
+			tmp = 0;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i], &data);
+			tmp += data << 2;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i] + 1, &data);
+			tmp += data & 0x3;
+
+			if (rq->ch[i] & CH_NEED_VCALIB)
+				tmp = gpadc_calib(tmp, mgi->vzse, mgi->vge);
+			if (rq->ch[i] & CH_NEED_ICALIB)
+				tmp = gpadc_calib(tmp, mgi->izse, mgi->ige);
+			buffer[i] += tmp;
+		}
+		gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		mgi->rnd_done = 0;
+	}
+
+	for (i = 0; i < rq->count; ++i)
+		buffer[i] /= sample_count;
+
+fail:
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	gpadc_poweroff(mgi);
+	pm_qos_remove_request(&mgi->pm_qos_request);
+	if (mgi->pmic_ipc_status) {
+		dev_err(mgi->dev, "sample broken\n");
+		ret = mgi->pmic_ipc_status;
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(get_gpadc_sample);
+
+/**
+ * intel_mid_gpadc_free - free gpadc
+ * @handle: the gpadc handle
+ *
+ * This function may sleep.
+ */
+void intel_mid_gpadc_free(void *handle)
+{
+	struct gpadc_request *rq = handle;
+	struct gpadc_info *mgi = &gpadc_info;
+	int i;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+	for (i = 0; i < rq->count; i++)
+		free_channel_addr(mgi, rq->addr[i]);
+
+	if (mgi->pmic_ipc_status)
+		dev_err(mgi->dev, "gpadc free broken\n");
+
+	mutex_unlock(&mgi->lock);
+	kfree(rq);
+}
+EXPORT_SYMBOL(intel_mid_gpadc_free);
+
+/**
+ * intel_mid_gpadc_alloc - allocate gpadc for channels
+ * @count: the count of channels
+ * @...: the channel parameters. (channel idx | flags)
+ *       flags:
+ *             CH_NEED_VCALIB   it needs voltage calibration
+ *             CH_NEED_ICALIB   it needs current calibration
+ *
+ * Returns gpadc handle on success or NULL on fail.
+ *
+ * This function may sleep.
+ */
+void *intel_mid_gpadc_alloc(int count, ...)
+{
+	struct gpadc_request *rq;
+	struct gpadc_info *mgi = &gpadc_info;
+	va_list args;
+	int ch;
+	int i;
+
+	if (!mgi->initialized)
+		return NULL;
+
+	rq = kzalloc(sizeof(struct gpadc_request), GFP_KERNEL);
+	if (rq == NULL)
+		return NULL;
+
+	va_start(args, count);
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	rq->count = count;
+	for (i = 0; i < count; i++) {
+		ch = va_arg(args, int);
+		rq->ch[i] = ch;
+		if (ch & CH_NEED_VREF)
+			rq->vref = 1;
+		ch &= 0xf;
+		rq->addr[i] = alloc_channel_addr(mgi, ch);
+		if (rq->addr[i] < 0) {
+			dev_err(mgi->dev, "alloc addr failed\n");
+			while (i-- > 0)
+				free_channel_addr(mgi, rq->addr[i]);
+			kfree(rq);
+			rq = NULL;
+			break;
+		}
+	}
+	if (mgi->pmic_ipc_status)
+		dev_err(mgi->dev, "gpadc alloc broken\n");
+
+	mutex_unlock(&mgi->lock);
+	va_end(args);
+
+	return rq;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_alloc);
+
+ /**
+ * gpadc_alloc_channels - allocate gpadc for channels
+ * @count: the count of channels
+ * @...: the channel parameters. (channel idx | flags)
+ *       flags:
+ *             CH_NEED_VCALIB   it needs voltage calibration
+ *             CH_NEED_ICALIB   it needs current calibration
+ *
+ * Returns gpadc handle on success or NULL on fail.
+ *
+ * This function may sleep.
+ *
+ * TODO: Cleanup intel_mid_gpadc_alloc() once all its users
+ *       are moved to gpadc_alloc_channels()
+ *
+ */
+
+void *gpadc_alloc_channels(int n, int *channel_info)
+{
+	struct gpadc_request *rq;
+	struct gpadc_info *mgi = &gpadc_info;
+	int ch;
+	int i;
+
+	if (!mgi->initialized)
+		return NULL;
+
+	rq = kzalloc(sizeof(struct gpadc_request), GFP_KERNEL);
+	if (rq == NULL)
+		return NULL;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	rq->count = n;
+	for (i = 0; i < n; i++) {
+		ch = channel_info[i];
+		rq->ch[i] = ch;
+		if (ch & CH_NEED_VREF)
+			rq->vref = 1;
+		ch &= 0xf;
+		rq->addr[i] = alloc_channel_addr(mgi, ch);
+		if (rq->addr[i] < 0) {
+			dev_err(mgi->dev, "alloc addr failed\n");
+			while (i-- > 0)
+				free_channel_addr(mgi, rq->addr[i]);
+			kfree(rq);
+			rq = NULL;
+			break;
+		}
+	}
+	if (mgi->pmic_ipc_status)
+		dev_err(mgi->dev, "gpadc alloc broken\n");
+
+	mutex_unlock(&mgi->lock);
+
+	return rq;
+}
+EXPORT_SYMBOL(gpadc_alloc_channels);
+
+static ssize_t intel_mid_gpadc_store_alloc_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int val, hdn;
+	int ch[SAMPLE_CH_MAX];
+
+	val = sscanf(buf, "%d %x %x", &hdn, &ch[0], &ch[1]);
+
+	if (val < 2 || val > 3) {
+		dev_err(dev, "invalid number of arguments");
+		return -EINVAL;
+	}
+
+	if (hdn < 1 || hdn > GPADC_CH_MAX) {
+		dev_err(dev, "invalid handle value");
+		return -EINVAL;
+	}
+
+	if (adc_handle[hdn - 1]) {
+		dev_err(dev, "adc handle %d has been occupied", hdn);
+		return -EBUSY;
+	}
+
+	if (val == 2)
+		adc_handle[hdn - 1] = intel_mid_gpadc_alloc(1, ch[0]);
+	else
+		adc_handle[hdn - 1] = intel_mid_gpadc_alloc(2, ch[0], ch[1]);
+
+	if (!adc_handle[hdn - 1]) {
+		dev_err(dev, "allocating adc handle %d failed", hdn);
+		return -ENOMEM;
+	}
+
+	return size;
+}
+
+static ssize_t intel_mid_gpadc_store_free_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int hdn;
+
+	if (sscanf(buf, "%d", &hdn) != 1) {
+		dev_err(dev, "invalid number of argument");
+		return -EINVAL;
+	}
+
+	if (hdn < 1 || hdn > GPADC_CH_MAX) {
+		dev_err(dev, "invalid handle value");
+		return -EINVAL;
+	}
+
+	if (adc_handle[hdn - 1]) {
+		intel_mid_gpadc_free(adc_handle[hdn - 1]);
+		adc_handle[hdn - 1] = NULL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_mid_gpadc_store_sample(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int hdn, spc;
+	int ret;
+	struct gpadc_request *rq;
+
+	if (sscanf(buf, "%d %d", &hdn, &spc) != 2) {
+		dev_err(dev, "invalid number of arguments");
+		return -EINVAL;
+	}
+
+	if (hdn < 1 || hdn > GPADC_CH_MAX) {
+		dev_err(dev, "invalid handle value");
+		return -EINVAL;
+	}
+
+	rq = adc_handle[hdn - 1];
+	if (!rq) {
+		dev_err(dev, "null handle");
+		return -EINVAL;
+	}
+
+	if (rq->count == 1)
+		ret = intel_mid_gpadc_sample(adc_handle[hdn-1],
+			spc, &sample_result[hdn - 1][0]);
+	else
+		ret = intel_mid_gpadc_sample(adc_handle[hdn - 1],
+			spc, &sample_result[hdn - 1][0],
+			&sample_result[hdn - 1][1]);
+
+	if (ret) {
+		dev_err(dev, "sampling failed. adc handle: %d", hdn);
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_mid_gpadc_show_sample(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int hdc;
+	int used = 0;
+	struct gpadc_request *rq;
+
+	for (hdc = 0; hdc < GPADC_CH_MAX; hdc++) {
+		if (adc_handle[hdc]) {
+			rq = adc_handle[hdc];
+			if (rq->count == 1)
+				used += snprintf(buf + used, PAGE_SIZE - used,
+					  "%d ", sample_result[hdc][0]);
+			else
+				used += snprintf(buf + used, PAGE_SIZE - used,
+					  "%d %d ", sample_result[hdc][0],
+					  sample_result[hdc][1]);
+		}
+	}
+
+	return used;
+}
+
+
+static void gsmpulse_sysfs_callback(int vol, int cur)
+{
+	vol_val = vol;
+	cur_val = cur;
+	complete(&gsmadc_complete);
+}
+
+static ssize_t intel_mid_gpadc_show_gsmpulse_sample(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+
+	INIT_COMPLETION(gsmadc_complete);
+	intel_mid_gpadc_gsmpulse_register(gsmpulse_sysfs_callback);
+	ret = wait_for_completion_interruptible(&gsmadc_complete);
+	intel_mid_gpadc_gsmpulse_unregister(gsmpulse_sysfs_callback);
+	if (ret)
+		return 0;
+	else
+		return snprintf(buf, PAGE_SIZE, "%d %d", vol_val, cur_val);
+}
+
+static DEVICE_ATTR(alloc_channel, S_IWUSR, NULL,
+		intel_mid_gpadc_store_alloc_channel);
+static DEVICE_ATTR(free_channel, S_IWUSR, NULL,
+		intel_mid_gpadc_store_free_channel);
+static DEVICE_ATTR(sample, S_IRUGO | S_IWUSR,
+		intel_mid_gpadc_show_sample, intel_mid_gpadc_store_sample);
+static DEVICE_ATTR(gsmpulse_sample, S_IRUGO,
+		intel_mid_gpadc_show_gsmpulse_sample, NULL);
+
+static struct attribute *intel_mid_gpadc_attrs[] = {
+	&dev_attr_alloc_channel.attr,
+	&dev_attr_free_channel.attr,
+	&dev_attr_sample.attr,
+	&dev_attr_gsmpulse_sample.attr,
+	NULL,
+};
+
+static struct attribute_group intel_mid_gpadc_attr_group = {
+	.name = "mid_gpadc",
+	.attrs = intel_mid_gpadc_attrs,
+};
+
+static int msic_gpadc_probe(struct platform_device *pdev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	struct intel_mid_gpadc_platform_data *pdata = pdev->dev.platform_data;
+	int err = 0;
+
+	mutex_init(&mgi->lock);
+	init_waitqueue_head(&mgi->wait);
+	init_waitqueue_head(&mgi->trimming_wait);
+	mgi->workq = create_singlethread_workqueue(dev_name(&pdev->dev));
+	if (mgi->workq == NULL)
+		return -ENOMEM;
+
+	mgi->dev = &pdev->dev;
+	mgi->intr = ioremap_nocache(pdata->intr, 4);
+	mgi->irq = platform_get_irq(pdev, 0);
+
+	gpadc_clear_bits(IRQLVL1MSK, IRQLVL1MSK_ADCM);
+	if (request_threaded_irq(mgi->irq, msic_gpadc_isr, msic_gpadc_irq,
+					IRQF_ONESHOT, "msic_adc", mgi)) {
+		dev_err(&pdev->dev, "unable to register irq %d\n", mgi->irq);
+		err = -ENODEV;
+		goto err_exit;
+	}
+
+	gpadc_write(ADC1ADDR0, MSIC_STOPCH);
+	INIT_WORK(&mgi->trimming_work, gpadc_trimming);
+	INIT_WORK(&mgi->gsmpulse_work, gpadc_gsmpulse_work);
+	queue_work(mgi->workq, &mgi->trimming_work);
+	wait_event(mgi->trimming_wait, mgi->trimming_start);
+	mgi->initialized = 1;
+
+	init_completion(&gsmadc_complete);
+
+	err = sysfs_create_group(&pdev->dev.kobj,
+			&intel_mid_gpadc_attr_group);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to export sysfs interface, error: %d\n",
+			err);
+		goto err_release_irq;
+	}
+
+	return 0;
+
+err_release_irq:
+	free_irq(mgi->irq, mgi);
+err_exit:
+	if (mgi->intr)
+		iounmap(mgi->intr);
+	return err;
+}
+
+static int msic_gpadc_remove(struct platform_device *pdev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+
+	sysfs_remove_group(&pdev->dev.kobj, &intel_mid_gpadc_attr_group);
+	free_irq(mgi->irq, mgi);
+	iounmap(mgi->intr);
+	flush_workqueue(mgi->workq);
+	destroy_workqueue(mgi->workq);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msic_gpadc_suspend_noirq(struct device *dev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+
+	/* If the gpadc is locked, it means gpadc is still in active mode. */
+	if (mutex_trylock(&mgi->lock))
+		return 0;
+	else
+		return -EBUSY;
+}
+
+static int msic_gpadc_resume_noirq(struct device *dev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+
+	mutex_unlock(&mgi->lock);
+	return 0;
+}
+#else
+#define msic_gpadc_suspend_noirq    NULL
+#define msic_gpadc_resume_noirq     NULL
+#endif
+
+static const struct dev_pm_ops msic_gpadc_driver_pm_ops = {
+	.suspend_noirq	= msic_gpadc_suspend_noirq,
+	.resume_noirq	= msic_gpadc_resume_noirq,
+};
+
+static struct platform_driver msic_gpadc_driver = {
+	.driver = {
+		   .name = "msic_adc",
+		   .owner = THIS_MODULE,
+		   .pm = &msic_gpadc_driver_pm_ops,
+		   },
+	.probe = msic_gpadc_probe,
+	.remove = msic_gpadc_remove,
+};
+
+static int msic_gpadc_module_init(void)
+{
+	return platform_driver_register(&msic_gpadc_driver);
+}
+
+static void msic_gpadc_module_exit(void)
+{
+	platform_driver_unregister(&msic_gpadc_driver);
+}
+
+static int msic_adc_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed msic_gpadc rpmsg device\n");
+
+	ret = msic_gpadc_module_init();
+
+out:
+	return ret;
+}
+
+static void msic_adc_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	msic_gpadc_module_exit();
+	dev_info(&rpdev->dev, "Removed msic_gpadc rpmsg device\n");
+}
+
+static void msic_adc_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id msic_adc_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_msic_adc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, msic_adc_rpmsg_id_table);
+
+static struct rpmsg_driver msic_adc_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= msic_adc_rpmsg_id_table,
+	.probe		= msic_adc_rpmsg_probe,
+	.callback	= msic_adc_rpmsg_cb,
+	.remove		= msic_adc_rpmsg_remove,
+};
+
+static int __init msic_adc_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&msic_adc_rpmsg);
+}
+
+#ifdef MODULE
+module_init(msic_adc_rpmsg_init);
+#else
+rootfs_initcall(msic_adc_rpmsg_init);
+#endif
+
+static void __exit msic_adc_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&msic_adc_rpmsg);
+}
+module_exit(msic_adc_rpmsg_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("Intel Medfield MSIC GPADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index e3b037c..e633856 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
 /*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
  *
  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
  *
@@ -211,6 +211,7 @@
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 	{}
 };
 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 328fb03..a41b5f3 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -605,12 +605,12 @@
 		if (ret < 0)
 			return ret;
 		ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
-						pdata->ideality_mask >> 1);
+						pdata->ideality_value);
 		if (ret < 0)
 			return ret;
 		ret = i2c_smbus_write_byte_data(client,
 						MAX6581_REG_IDEALITY_SELECT,
-						pdata->ideality_value);
+						pdata->ideality_mask >> 1);
 		if (ret < 0)
 			return ret;
 	}
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 04638ae..99cec18 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -199,7 +199,7 @@
 	0, 1, 2, 3, 8, 21, 20, 16,	/* in0.. in7 */
 	17, -1, -1, -1, -1, -1, -1,	/* in8..in14 */
 	-1,				/* unused */
-	6, 7, 11, 10, 23,		/* fan1..fan5 */
+	6, 7, 11, -1, -1,		/* fan1..fan5 */
 	-1, -1, -1,			/* unused */
 	4, 5, 13, -1, -1, -1,		/* temp1..temp6 */
 	12, -1 };			/* intrusion0, intrusion1 */
@@ -625,6 +625,7 @@
 	u8 has_fan_min;		/* some fans don't have min register */
 	bool has_fan_div;
 
+	u8 num_temp_alarms;	/* 2 or 3 */
 	u8 temp_fixed_num;	/* 3 or 6 */
 	u8 temp_type[NUM_TEMP_FIXED];
 	s8 temp_offset[NUM_TEMP_FIXED];
@@ -1193,6 +1194,42 @@
 		       (unsigned int)((data->alarms >> nr) & 0x01));
 }
 
+static int find_temp_source(struct nct6775_data *data, int index, int count)
+{
+	int source = data->temp_src[index];
+	int nr;
+
+	for (nr = 0; nr < count; nr++) {
+		int src;
+
+		src = nct6775_read_value(data,
+					 data->REG_TEMP_SOURCE[nr]) & 0x1f;
+		if (src == source)
+			return nr;
+	}
+	return -1;
+}
+
+static ssize_t
+show_temp_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+	struct nct6775_data *data = nct6775_update_device(dev);
+	unsigned int alarm = 0;
+	int nr;
+
+	/*
+	 * For temperatures, there is no fixed mapping from registers to alarm
+	 * bits. Alarm bits are determined by the temperature source mapping.
+	 */
+	nr = find_temp_source(data, sattr->index, data->num_temp_alarms);
+	if (nr >= 0) {
+		int bit = data->ALARM_BITS[nr + TEMP_ALARM_BASE];
+		alarm = (data->alarms >> bit) & 0x01;
+	}
+	return sprintf(buf, "%u\n", alarm);
+}
+
 static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in_reg, NULL, 0, 0);
 static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in_reg, NULL, 1, 0);
 static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in_reg, NULL, 2, 0);
@@ -1874,22 +1911,18 @@
 };
 
 static struct sensor_device_attribute sda_temp_alarm[] = {
-	SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL,
-		    TEMP_ALARM_BASE),
-	SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL,
-		    TEMP_ALARM_BASE + 1),
-	SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL,
-		    TEMP_ALARM_BASE + 2),
-	SENSOR_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL,
-		    TEMP_ALARM_BASE + 3),
-	SENSOR_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL,
-		    TEMP_ALARM_BASE + 4),
-	SENSOR_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL,
-		    TEMP_ALARM_BASE + 5),
+	SENSOR_ATTR(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0),
+	SENSOR_ATTR(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 1),
+	SENSOR_ATTR(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 2),
+	SENSOR_ATTR(temp4_alarm, S_IRUGO, show_temp_alarm, NULL, 3),
+	SENSOR_ATTR(temp5_alarm, S_IRUGO, show_temp_alarm, NULL, 4),
+	SENSOR_ATTR(temp6_alarm, S_IRUGO, show_temp_alarm, NULL, 5),
+	SENSOR_ATTR(temp7_alarm, S_IRUGO, show_temp_alarm, NULL, 6),
+	SENSOR_ATTR(temp8_alarm, S_IRUGO, show_temp_alarm, NULL, 7),
+	SENSOR_ATTR(temp9_alarm, S_IRUGO, show_temp_alarm, NULL, 8),
+	SENSOR_ATTR(temp10_alarm, S_IRUGO, show_temp_alarm, NULL, 9),
 };
 
-#define NUM_TEMP_ALARM	ARRAY_SIZE(sda_temp_alarm)
-
 static ssize_t
 show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -3215,13 +3248,11 @@
 		device_remove_file(dev, &sda_temp_max[i].dev_attr);
 		device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
 		device_remove_file(dev, &sda_temp_crit[i].dev_attr);
+		device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
 		if (!(data->have_temp_fixed & (1 << i)))
 			continue;
 		device_remove_file(dev, &sda_temp_type[i].dev_attr);
 		device_remove_file(dev, &sda_temp_offset[i].dev_attr);
-		if (i >= NUM_TEMP_ALARM)
-			continue;
-		device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
 	}
 
 	device_remove_file(dev, &sda_caseopen[0].dev_attr);
@@ -3419,6 +3450,7 @@
 		data->auto_pwm_num = 6;
 		data->has_fan_div = true;
 		data->temp_fixed_num = 3;
+		data->num_temp_alarms = 3;
 
 		data->ALARM_BITS = NCT6775_ALARM_BITS;
 
@@ -3483,6 +3515,7 @@
 		data->auto_pwm_num = 4;
 		data->has_fan_div = false;
 		data->temp_fixed_num = 3;
+		data->num_temp_alarms = 3;
 
 		data->ALARM_BITS = NCT6776_ALARM_BITS;
 
@@ -3547,6 +3580,7 @@
 		data->auto_pwm_num = 4;
 		data->has_fan_div = false;
 		data->temp_fixed_num = 6;
+		data->num_temp_alarms = 2;
 
 		data->ALARM_BITS = NCT6779_ALARM_BITS;
 
@@ -3843,10 +3877,12 @@
 						 &sda_fan_input[i].dev_attr);
 			if (err)
 				goto exit_remove;
-			err = device_create_file(dev,
-						 &sda_fan_alarm[i].dev_attr);
-			if (err)
-				goto exit_remove;
+			if (data->ALARM_BITS[FAN_ALARM_BASE + i] >= 0) {
+				err = device_create_file(dev,
+						&sda_fan_alarm[i].dev_attr);
+				if (err)
+					goto exit_remove;
+			}
 			if (data->kind != nct6776 &&
 			    data->kind != nct6779) {
 				err = device_create_file(dev,
@@ -3897,6 +3933,12 @@
 			if (err)
 				goto exit_remove;
 		}
+		if (find_temp_source(data, i, data->num_temp_alarms) >= 0) {
+			err = device_create_file(dev,
+						 &sda_temp_alarm[i].dev_attr);
+			if (err)
+				goto exit_remove;
+		}
 		if (!(data->have_temp_fixed & (1 << i)))
 			continue;
 		err = device_create_file(dev, &sda_temp_type[i].dev_attr);
@@ -3905,12 +3947,6 @@
 		err = device_create_file(dev, &sda_temp_offset[i].dev_attr);
 		if (err)
 			goto exit_remove;
-		if (i >= NUM_TEMP_ALARM ||
-		    data->ALARM_BITS[TEMP_ALARM_BASE + i] < 0)
-			continue;
-		err = device_create_file(dev, &sda_temp_alarm[i].dev_attr);
-		if (err)
-			goto exit_remove;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(sda_caseopen); i++) {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 631736e..4faf02b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -150,6 +150,7 @@
 	    ATI SB700/SP5100
 	    ATI SB800
 	    AMD Hudson-2
+	    AMD CZ
 	    Serverworks OSB4
 	    Serverworks CSB5
 	    Serverworks CSB6
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index cd82eb4..7c9f053 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@
 
 	desc = &priv->hw[priv->head];
 
+	/* Initialize the DMA buffer */
+	memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
+
 	/* Initialize the descriptor */
 	memset(desc, 0, sizeof(struct ismt_desc));
 	desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 2039f23..6d8094d 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -494,7 +494,7 @@
 	 * based on this empirical measurement and a lot of previous frobbing.
 	 */
 	i2c->cmd_err = 0;
-	if (msg->len < 8) {
+	if (0) {	/* disable PIO mode until a proper fix is made */
 		ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
 		if (ret)
 			mxs_i2c_reset(i2c);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index e02f9e3..b06be8e 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -941,6 +941,9 @@
 		/*
 		 * ProDB0017052: Clear ARDY bit twice
 		 */
+		if (stat & OMAP_I2C_STAT_ARDY)
+			omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
+
 		if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
 					OMAP_I2C_STAT_AL)) {
 			omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 39ab78c..d05ad59 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -22,7 +22,7 @@
 	Intel PIIX4, 440MX
 	Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
 	ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
-	AMD Hudson-2
+	AMD Hudson-2, CZ
 	SMSC Victory66
 
    Note: we assume there can only be one device, with one or more
@@ -522,6 +522,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
 		     PCI_DEVICE_ID_SERVERWORKS_OSB4) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index f24cc64..7b39bbc 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -61,7 +61,7 @@
  * The board info passed can safely be __initdata, but be careful of embedded
  * pointers (for platform_data, functions, etc) since that won't be copied.
  */
-int __init
+int
 i2c_register_board_info(int busnum,
 	struct i2c_board_info const *info, unsigned len)
 {
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index fa6964d..334489c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -61,13 +61,21 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/intel_mid_pm.h>
 #include <asm/cpu_device_id.h>
 #include <asm/mwait.h>
 #include <asm/msr.h>
+#include <asm/io_apic.h>
+
 
 #define INTEL_IDLE_VERSION "0.4"
 #define PREFIX "intel_idle: "
 
+#define CLPU_CR_C6_POLICY_CONFIG	0x668
+#define CLPU_MD_C6_POLICY_CONFIG	0x669
+#define DISABLE_CORE_C6_DEMOTION	0x0
+#define DISABLE_MODULE_C6_DEMOTION	0x0
+
 static struct cpuidle_driver intel_idle_driver = {
 	.name = "intel_idle",
 	.owner = THIS_MODULE,
@@ -330,6 +338,227 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state vlv_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "C1-ATM",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C4 */
+		.name = "C4-ATM",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 100,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "C6-ATM",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{ /* MWAIT C7-S0i1 */
+		.name = "S0i1-ATM",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle },
+	{ /* MWAIT C9-S0i3 */
+		.name = "S0i3-ATM",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
+static struct cpuidle_state chv_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "C1-ATM",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C4 */
+		.name = "C4-ATM",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID
+						| CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 100,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "C6-ATM",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID
+						| CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{ /* MWAIT C7-S0i1 */
+		.name = "S0i1-ATM",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID
+						| CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle },
+	{ /* MWAIT C8-S0i2 */
+		.name = "S0i2-ATM",
+		.desc = "MWAIT 0x62",
+		.flags = MWAIT2flg(0x62) | CPUIDLE_FLAG_TIME_VALID
+						| CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 2000,
+		.target_residency = 8000,
+		.enter = &intel_idle },
+	{ /* MWAIT C9-S0i3 */
+		.name = "S0i3-ATM",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID
+						| CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
+#if defined(CONFIG_ATOM_SOC_POWER)
+static struct cpuidle_state mrfld_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "C1-ATM",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C4 */
+		.name = "C4-ATM",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 100,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "C6-ATM",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{ /* MWAIT C7-S0i1 */
+		.name = "S0i1-ATM",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle },
+	{ /* MWAIT C9-S0i3 */
+		.name = "S0i3-ATM",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+#else
+#define mrfld_cstates atom_cstates
+#endif
+
+static struct cpuidle_state moorfld_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "C1-ATM",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "C6-ATM",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID
+					 | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{ /* MWAIT C7-S0i1 */
+		.name = "S0i1-ATM",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID
+					 | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle },
+	{ /* MWAIT C9-S0i3 */
+		.name = "S0i3-ATM",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID
+					 | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static unsigned int get_target_residency(unsigned int cstate)
+{
+	unsigned int t_sleep = cpuidle_state_table[cstate].target_residency;
+	unsigned int prev_idx;
+
+	/* get the previous lower sleep state */
+	if ((cstate == 5) || (cstate == 6))
+		prev_idx = cstate - 2;
+	else
+		prev_idx = cstate - 1;
+
+	/* calculate target_residency only if not defined already */
+	if (!t_sleep) {
+		/* Use C0 power usage to calculate the target residency */
+		unsigned int p_active = C0_POWER_USAGE;
+		unsigned int prev_state_power = cpuidle_state_table
+							[prev_idx].power_usage;
+		unsigned int curr_state_power = cpuidle_state_table
+							[cstate].power_usage;
+		unsigned int prev_state_lat = cpuidle_state_table
+							[prev_idx].exit_latency;
+		unsigned int curr_state_lat = cpuidle_state_table
+							[cstate].exit_latency;
+
+		if (curr_state_power && prev_state_power && p_active &&
+		    prev_state_lat && curr_state_lat &&
+		    (curr_state_lat > prev_state_lat) &&
+		    (prev_state_power > curr_state_power)) {
+
+			t_sleep = ((p_active * (curr_state_lat - prev_state_lat))
+					+ (prev_state_lat * prev_state_power)
+					- (curr_state_lat * curr_state_power)) /
+				  (prev_state_power - curr_state_power);
+
+			/* round-up target_residency */
+			t_sleep++;
+
+		}
+	}
+
+	WARN_ON(!t_sleep);
+
+	pr_debug(PREFIX "cpuidle: target_residency[%d]= %d\n", cstate, t_sleep);
+
+	return t_sleep;
+}
+#endif
+
 /**
  * intel_idle
  * @dev: cpuidle_device
@@ -347,6 +576,17 @@
 	unsigned int cstate;
 	int cpu = smp_processor_id();
 
+#if (defined(CONFIG_ATOM_SOC_POWER) && \
+	defined(CONFIG_PM_DEBUG))
+	{
+		/* Get Cstate based on ignore table from PMU driver */
+		unsigned int ncstate;
+		cstate =
+		(((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+		ncstate = pmu_get_new_cstate(cstate, &index);
+		eax	= flg2MWAIT(drv->states[index].flags);
+	}
+#endif
 	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 
 	/*
@@ -360,7 +600,6 @@
 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
 	if (!need_resched()) {
-
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
 		smp_mb();
 		if (!need_resched())
@@ -462,6 +701,22 @@
 	.disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_mrfld = {
+	.state_table = mrfld_cstates,
+};
+
+static const struct idle_cpu idle_cpu_vlv = {
+	.state_table = vlv_cstates,
+};
+
+static const struct idle_cpu idle_cpu_moorfld = {
+	.state_table = moorfld_cstates,
+};
+
+static const struct idle_cpu idle_cpu_chv = {
+	.state_table = chv_cstates,
+};
+
 #define ICPU(model, cpu) \
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -477,12 +732,16 @@
 	ICPU(0x2f, idle_cpu_nehalem),
 	ICPU(0x2a, idle_cpu_snb),
 	ICPU(0x2d, idle_cpu_snb),
+	ICPU(0x4c, idle_cpu_chv),
+	ICPU(0x37, idle_cpu_vlv),
 	ICPU(0x3a, idle_cpu_ivb),
 	ICPU(0x3e, idle_cpu_ivb),
 	ICPU(0x3c, idle_cpu_hsw),
 	ICPU(0x3f, idle_cpu_hsw),
 	ICPU(0x45, idle_cpu_hsw),
 	ICPU(0x46, idle_cpu_hsw),
+	ICPU(0x4a, idle_cpu_mrfld),	/* Tangier SoC */
+	ICPU(0x5a, idle_cpu_moorfld),	/* Anniedale SoC */
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -582,18 +841,35 @@
 		mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
 
 		/* does the state exist in CPUID.MWAIT? */
-		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+
+		/* FIXME: Do not check number of substates for any states above C6
+		 * as these are not real C states supported by the CPU, they
+		 * are emulated c states for s0ix support.
+		*/
+		if ((mwait_cstate + 1) <= 6) {
+			num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
 					& MWAIT_SUBSTATE_MASK;
+			if (num_substates == 0)
+				continue;
+		}
 
-		/* if sub-state in table is not enumerated by CPUID */
-		if ((mwait_substate + 1) > num_substates)
-			continue;
-
+#if !defined(CONFIG_ATOM_SOC_POWER)
+		if ((boot_cpu_data.x86_model != 0x37) && (boot_cpu_data.x86_model != 0x4c)) {
+			/* if sub-state in table is not enumerated by CPUID */
+			if ((mwait_substate + 1) > num_substates)
+				continue;
+		}
+#endif
 		if (((mwait_cstate + 1) > 2) &&
 			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 			mark_tsc_unstable("TSC halts in idle"
 					" states deeper than C2");
 
+#ifdef CONFIG_ATOM_SOC_POWER
+		/* Calculate target_residency if power_usage is given */
+		cpuidle_state_table[cstate].target_residency =
+			get_target_residency(cstate);
+#endif
 		drv->states[drv->state_count] =	/* structure copy */
 			cpuidle_state_table[cstate];
 
@@ -625,7 +901,7 @@
 	dev->state_count = 1;
 
 	for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
-		int num_substates, mwait_hint, mwait_cstate, mwait_substate;
+		int num_substates = 0, mwait_hint, mwait_cstate, mwait_substate;
 
 		if (cpuidle_state_table[cstate].enter == NULL)
 			continue;
@@ -640,13 +916,25 @@
 		mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
 
 		/* does the state exist in CPUID.MWAIT? */
-		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+
+		/* FIXME: Do not check number of substates for any states above C6
+		 * as these are not real C states supported by the CPU, they
+		 * are emulated c states for s0ix support.
+		 */
+		if ((mwait_cstate + 1) <= 6) {
+			num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
 					& MWAIT_SUBSTATE_MASK;
+			if (num_substates == 0)
+				continue;
+		}
 
-		/* if sub-state in table is not enumerated by CPUID */
-		if ((mwait_substate + 1) > num_substates)
-			continue;
-
+#if !defined(CONFIG_ATOM_SOC_POWER)
+		if ((boot_cpu_data.x86_model != 0x37) && (boot_cpu_data.x86_model != 0x4c)) {
+			/* if sub-state in table is not enumerated by CPUID */
+			if ((mwait_substate + 1) > num_substates)
+				continue;
+		}
+#endif
 		dev->state_count += 1;
 	}
 
@@ -661,6 +949,8 @@
 	if (icpu->auto_demotion_disable_flags)
 		smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
 
+	__get_cpu_var(update_buckets) = 1;
+
 	return 0;
 }
 
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ab0767e6..797ab0f 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -157,4 +157,10 @@
 	  Say yes here to access the ADC part of the Nano River
 	  Technologies Viperboard.
 
+config IIO_BASINCOVE_GPADC
+	tristate "IIO Basincove GPADC driver"
+	depends on IIO
+	help
+	  Say yes here to build support for the IIO basincove GPADC driver.
+
 endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0a825be..88c3e1a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -17,3 +17,6 @@
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
 obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
 obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+obj-$(CONFIG_IIO_BASINCOVE_GPADC) += iio_basincove_gpadc.o
+obj-$(CONFIG_CRYSTAL_COVE) += iio_crystalcove_gpadc.o
+obj-$(CONFIG_INTEL_MID_PMIC) += iio_dc_xpwr_gpadc.o
diff --git a/drivers/iio/adc/iio_basincove_gpadc.c b/drivers/iio/adc/iio_basincove_gpadc.c
new file mode 100644
index 0000000..4315b58
--- /dev/null
+++ b/drivers/iio/adc/iio_basincove_gpadc.c
@@ -0,0 +1,677 @@
+/*
+ * iio_basincove_gpadc.c - Intel Merrifield Basin Cove GPADC Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/rpmsg.h>
+
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_basincove_gpadc.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/types.h>
+#include <linux/iio/consumer.h>
+
+struct gpadc_info {
+	int initialized;
+	/* This mutex protects gpadc sample/config from concurrent conflict.
+	   Any function, which does the sample or config, needs to
+	   hold this lock.
+	   If it is locked, it also means the gpadc is in active mode.
+	*/
+	struct mutex lock;
+	struct device *dev;
+	int irq;
+	u8 irq_status;
+	wait_queue_head_t wait;
+	int sample_done;
+	void __iomem *intr;
+	u8 intr_mask;
+	int channel_num;
+	struct gpadc_regmap_t *gpadc_regmaps;
+	struct gpadc_regs_t *gpadc_regs;
+	u8 pmic_id;
+	bool is_pmic_provisioned;
+};
+
+static inline int gpadc_clear_bits(u16 addr, u8 mask)
+{
+	return intel_scu_ipc_update_register(addr, 0, mask);
+}
+
+static inline int gpadc_set_bits(u16 addr, u8 mask)
+{
+	return intel_scu_ipc_update_register(addr, 0xff, mask);
+}
+
+static inline int gpadc_write(u16 addr, u8 data)
+{
+	return intel_scu_ipc_iowrite8(addr, data);
+}
+
+static inline int gpadc_read(u16 addr, u8 *data)
+{
+	return intel_scu_ipc_ioread8(addr, data);
+}
+
+static int gpadc_busy_wait(struct gpadc_regs_t *regs)
+{
+	u8 tmp;
+	int timeout = 0;
+
+	gpadc_read(regs->gpadcreq, &tmp);
+	while (tmp & regs->gpadcreq_busy && timeout < 500) {
+		gpadc_read(regs->gpadcreq, &tmp);
+		usleep_range(1800, 2000);
+		timeout++;
+	}
+
+	if (tmp & regs->gpadcreq_busy)
+		return -EBUSY;
+	else
+		return 0;
+}
+
+static void gpadc_dump(struct gpadc_info *info)
+{
+	u8 tmp;
+	struct gpadc_regs_t *regs = info->gpadc_regs;
+
+	dev_err(info->dev, "GPADC registers dump:\n");
+	gpadc_read(regs->adcirq, &tmp);
+	dev_err(info->dev, "ADCIRQ: 0x%x\n", tmp);
+	gpadc_read(regs->madcirq, &tmp);
+	dev_err(info->dev, "MADCIRQ: 0x%x\n", tmp);
+	gpadc_read(regs->gpadcreq, &tmp);
+	dev_err(info->dev, "GPADCREQ: 0x%x\n", tmp);
+	gpadc_read(regs->adc1cntl, &tmp);
+	dev_err(info->dev, "ADC1CNTL: 0x%x\n", tmp);
+}
+
+static irqreturn_t gpadc_isr(int irq, void *data)
+{
+	struct gpadc_info *info = iio_priv(data);
+
+	info->irq_status = ioread8(info->intr);
+	info->sample_done = 1;
+	wake_up(&info->wait);
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t gpadc_threaded_isr(int irq, void *data)
+{
+	struct gpadc_info *info = iio_priv(data);
+	struct gpadc_regs_t *regs = info->gpadc_regs;
+
+	/* Clear IRQLVL1MASK */
+	gpadc_clear_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+
+	return IRQ_HANDLED;
+}
+
+
+/**
+ * iio_basincove_gpadc_sample - do gpadc sample.
+ * @indio_dev: industrial IO GPADC device handle
+ * @ch: gpadc bit set of channels to sample, for example, set ch = (1<<0)|(1<<2)
+ *	means you are going to sample both channel 0 and 2 at the same time.
+ * @res:gpadc sampling result
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+
+int iio_basincove_gpadc_sample(struct iio_dev *indio_dev,
+				int ch, struct gpadc_result *res)
+{
+	struct gpadc_info *info = iio_priv(indio_dev);
+	int i, ret, reg_val;
+	u8 tmp, th, tl;
+	u8 mask, cursrc;
+	unsigned long rlsb;
+	unsigned long rlsb_array[] = {
+		0, 260420, 130210, 65100, 32550, 16280,
+		8140, 4070, 2030, 0, 260420, 130210};
+
+	struct gpadc_regs_t *regs = info->gpadc_regs;
+	bool pmic_a0 = false;
+
+	if (!info->initialized)
+		return -ENODEV;
+
+	pmic_a0 = ((info->pmic_id & PMIC_MAJOR_REV_MASK) == PMIC_MAJOR_REV_A0)
+		&& ((info->pmic_id & PMIC_MINOR_REV_MASK) == PMIC_MINOR_REV_X0);
+
+	mutex_lock(&info->lock);
+
+	mask = info->intr_mask;
+	gpadc_clear_bits(regs->madcirq, mask);
+	gpadc_clear_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+
+	tmp = regs->gpadcreq_irqen;
+
+	for (i = 0; i < info->channel_num; i++) {
+		if (ch & (1 << i))
+			tmp |= (1 << info->gpadc_regmaps[i].cntl);
+	}
+
+	info->sample_done = 0;
+
+	ret = gpadc_busy_wait(regs);
+	if (ret) {
+		dev_err(info->dev, "GPADC is busy\n");
+		goto done;
+	}
+
+	gpadc_write(regs->gpadcreq, tmp);
+
+	ret = wait_event_timeout(info->wait, info->sample_done, HZ);
+	if (ret == 0) {
+		gpadc_dump(info);
+		ret = -ETIMEDOUT;
+		dev_err(info->dev, "sample timeout, return %d\n", ret);
+		goto done;
+	} else {
+		ret = 0;
+	}
+
+	for (i = 0; i < info->channel_num; i++) {
+		if (ch & (1 << i)) {
+			gpadc_read(info->gpadc_regmaps[i].rsltl, &tl);
+			gpadc_read(info->gpadc_regmaps[i].rslth, &th);
+
+			reg_val = ((th & 0xF) << 8) + tl;
+
+			if ((info->pmic_id & PMIC_VENDOR_ID_MASK)
+					== SHADYCOVE_VENDORID) {
+				switch (i) {
+				case PMIC_GPADC_CHANNEL_VBUS:
+				case PMIC_GPADC_CHANNEL_PMICTEMP:
+				case PMIC_GPADC_CHANNEL_PEAK:
+				case PMIC_GPADC_CHANNEL_AGND:
+				case PMIC_GPADC_CHANNEL_VREF:
+					/* Auto mode not applicable */
+					res->data[i] = reg_val;
+					break;
+				case PMIC_GPADC_CHANNEL_BATID:
+				case PMIC_GPADC_CHANNEL_BATTEMP0:
+				case PMIC_GPADC_CHANNEL_BATTEMP1:
+				case PMIC_GPADC_CHANNEL_SYSTEMP0:
+				case PMIC_GPADC_CHANNEL_SYSTEMP1:
+				case PMIC_GPADC_CHANNEL_SYSTEMP2:
+					if (pmic_a0 &&
+						!info->is_pmic_provisioned) {
+						/* Auto mode with Scaling 4
+						 * for non-provisioned A0 */
+						rlsb = 32550;
+						res->data[i] =
+							(reg_val * rlsb)/10000;
+						break;
+					}
+				/* Case fall-through for PMIC-A1 onwards.
+				 * For USBID, Auto-mode-without-scaling always
+				 */
+				case PMIC_GPADC_CHANNEL_USBID:
+					/* Auto mode without Scaling */
+					cursrc = (th & 0xF0) >> 4;
+					rlsb = rlsb_array[cursrc];
+					res->data[i] = (reg_val * rlsb)/10000;
+					break;
+				}
+			} else {
+				res->data[i] = reg_val;
+			}
+		}
+	}
+
+done:
+	gpadc_set_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+	gpadc_set_bits(regs->madcirq, mask);
+	mutex_unlock(&info->lock);
+	return ret;
+}
+EXPORT_SYMBOL(iio_basincove_gpadc_sample);
+
+static struct gpadc_result sample_result;
+static int chs;
+
+static ssize_t intel_basincove_gpadc_store_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	if (sscanf(buf, "%x", &chs) != 1) {
+		dev_err(dev, "one channel argument is needed\n");
+		return -EINVAL;
+	}
+
+	if (chs < (1 << 0) || chs >= (1 << info->channel_num)) {
+		dev_err(dev, "invalid channel, should be in [0x1 - 0x1FF]\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_basincove_gpadc_show_channel(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", chs);
+}
+
+static ssize_t intel_basincove_gpadc_store_sample(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int value, ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+	memset(sample_result.data, 0, sizeof(sample_result.data));
+
+	if (sscanf(buf, "%d", &value) != 1) {
+		dev_err(dev, "one argument is needed\n");
+		return -EINVAL;
+	}
+
+	if (value == 1) {
+		ret = iio_basincove_gpadc_sample(indio_dev, chs,
+						&sample_result);
+		if (ret) {
+			dev_err(dev, "sample failed\n");
+			return ret;
+		}
+	} else {
+		dev_err(dev, "input '1' to sample\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_basincove_gpadc_show_result(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int i;
+	int used = 0;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	for (i = 0; i < info->channel_num; i++) {
+		used += snprintf(buf + used, PAGE_SIZE - used,
+				"sample_result[%s] = %x\n",
+				info->gpadc_regmaps[i].name,
+				sample_result.data[i]);
+	}
+
+	return used;
+}
+
+
+static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO,
+		intel_basincove_gpadc_show_channel,
+		intel_basincove_gpadc_store_channel);
+static DEVICE_ATTR(sample, S_IWUSR, NULL, intel_basincove_gpadc_store_sample);
+static DEVICE_ATTR(result, S_IRUGO, intel_basincove_gpadc_show_result, NULL);
+
+static struct attribute *intel_basincove_gpadc_attrs[] = {
+	&dev_attr_channel.attr,
+	&dev_attr_sample.attr,
+	&dev_attr_result.attr,
+	NULL,
+};
+static struct attribute_group intel_basincove_gpadc_attr_group = {
+	.name = "basincove_gpadc",
+	.attrs = intel_basincove_gpadc_attrs,
+};
+
+static int basincove_adc_read_raw(struct iio_dev *indio_dev,
+			struct iio_chan_spec const *chan,
+			int *val, int *val2, long m)
+{
+	int ret;
+	int ch = chan->channel;
+	struct gpadc_info *info = iio_priv(indio_dev);
+	struct gpadc_result res;
+
+	ret = iio_basincove_gpadc_sample(indio_dev, (1 << ch), &res);
+	if (ret) {
+		dev_err(info->dev, "sample failed\n");
+		return -EINVAL;
+	}
+
+	*val = res.data[ch];
+
+	return ret;
+}
+
+static int basincove_adc_read_all_raw(struct iio_channel *chan,
+					int *val)
+{
+	int ret;
+	int i, num = 0;
+	int ch = 0;
+	int *channels;
+	struct gpadc_info *info = iio_priv(chan->indio_dev);
+	struct gpadc_result res;
+
+	while (chan[num].indio_dev)
+		num++;
+
+	channels = kzalloc(sizeof(int) * num, GFP_KERNEL);
+	if (channels == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < num; i++) {
+		channels[i] = chan[i].channel->channel;
+		ch |= (1 << channels[i]);
+	}
+
+	ret = iio_basincove_gpadc_sample(chan->indio_dev, ch, &res);
+	if (ret) {
+		dev_err(info->dev, "sample failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	for (i = 0; i < num; i++)
+		val[i] = res.data[channels[i]];
+
+end:
+	kfree(channels);
+	return ret;
+}
+
+static const struct iio_info basincove_adc_info = {
+	.read_raw = &basincove_adc_read_raw,
+	.read_all_raw = &basincove_adc_read_all_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static int bcove_gpadc_probe(struct platform_device *pdev)
+{
+	int err;
+	u8 pmic_prov;
+	struct gpadc_info *info;
+	struct iio_dev *indio_dev;
+	struct intel_basincove_gpadc_platform_data *pdata =
+			pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data supplied\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	indio_dev = iio_device_alloc(sizeof(struct gpadc_info));
+	if (indio_dev == NULL) {
+		dev_err(&pdev->dev, "allocating iio device failed\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	info = iio_priv(indio_dev);
+
+	mutex_init(&info->lock);
+	init_waitqueue_head(&info->wait);
+	info->dev = &pdev->dev;
+	info->irq = platform_get_irq(pdev, 0);
+	info->intr = ioremap_nocache(pdata->intr, 1);
+	if (!info->intr) {
+		dev_err(&pdev->dev, "ioremap of ADCIRQ failed\n");
+		err = -ENOMEM;
+		goto err_free;
+	}
+	info->intr_mask = pdata->intr_mask;
+	info->channel_num = pdata->channel_num;
+	info->gpadc_regmaps = pdata->gpadc_regmaps;
+	info->gpadc_regs = pdata->gpadc_regs;
+
+	err = request_threaded_irq(info->irq, gpadc_isr, gpadc_threaded_isr,
+			IRQF_ONESHOT, "adc", indio_dev);
+	if (err) {
+		gpadc_dump(info);
+		dev_err(&pdev->dev, "unable to register irq %d\n", info->irq);
+		goto err_iounmap;
+	}
+
+	platform_set_drvdata(pdev, indio_dev);
+
+	indio_dev->dev.parent = &pdev->dev;
+	indio_dev->name = pdev->name;
+
+	indio_dev->channels = pdata->gpadc_channels;
+	indio_dev->num_channels = pdata->channel_num;
+	indio_dev->info = &basincove_adc_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	err = iio_map_array_register(indio_dev, pdata->gpadc_iio_maps);
+	if (err)
+		goto err_release_irq;
+
+	err = iio_device_register(indio_dev);
+	if (err < 0)
+		goto err_array_unregister;
+
+	err = gpadc_read(PMIC_ID_ADDR, &info->pmic_id);
+	if (err) {
+		dev_err(&pdev->dev, "Error reading PMIC ID register\n");
+		goto err_iio_device_unregister;
+	}
+
+	dev_info(&pdev->dev, "PMIC-ID: %x\n", info->pmic_id);
+	if ((info->pmic_id & PMIC_VENDOR_ID_MASK) == SHADYCOVE_VENDORID) {
+		/* Check if PMIC is provisioned */
+		err = gpadc_read(PMIC_SPARE03_ADDR, &pmic_prov);
+		if (err) {
+			dev_err(&pdev->dev,
+					"Error reading PMIC SPARE03 REG\n");
+			goto err_iio_device_unregister;
+		}
+
+		if ((pmic_prov & PMIC_PROV_MASK) == PMIC_PROVISIONED) {
+			dev_info(&pdev->dev, "ShadyCove PMIC provisioned\n");
+			info->is_pmic_provisioned = true;
+		} else
+			dev_info(info->dev,
+					"ShadyCove PMIC not provisioned\n");
+	}
+
+	err = sysfs_create_group(&pdev->dev.kobj,
+			&intel_basincove_gpadc_attr_group);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to export sysfs interface, error: %d\n",
+			err);
+		goto err_iio_device_unregister;
+	}
+
+	info->initialized = 1;
+
+	dev_info(&pdev->dev, "bcove adc probed\n");
+
+	return 0;
+
+err_iio_device_unregister:
+	iio_device_unregister(indio_dev);
+err_array_unregister:
+	iio_map_array_unregister(indio_dev);
+err_release_irq:
+	free_irq(info->irq, info);
+err_iounmap:
+	iounmap(info->intr);
+err_free:
+	iio_device_free(indio_dev);
+out:
+	return err;
+}
+
+static int bcove_gpadc_remove(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	sysfs_remove_group(&pdev->dev.kobj,
+			&intel_basincove_gpadc_attr_group);
+
+	iio_device_unregister(indio_dev);
+	iio_map_array_unregister(indio_dev);
+	free_irq(info->irq, info);
+	iounmap(info->intr);
+	iio_device_free(indio_dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int bcove_gpadc_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	if (!mutex_trylock(&info->lock))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int bcove_gpadc_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	mutex_unlock(&info->lock);
+	return 0;
+}
+#else
+#define bcove_gpadc_suspend		NULL
+#define bcove_gpadc_resume		NULL
+#endif
+
+static const struct dev_pm_ops bcove_gpadc_driver_pm_ops = {
+	.suspend	= bcove_gpadc_suspend,
+	.resume		= bcove_gpadc_resume,
+};
+
+static struct platform_driver bcove_gpadc_driver = {
+	.driver = {
+		   .name = "bcove_adc",
+		   .owner = THIS_MODULE,
+		   .pm = &bcove_gpadc_driver_pm_ops,
+		   },
+	.probe = bcove_gpadc_probe,
+	.remove = bcove_gpadc_remove,
+};
+
+static int bcove_gpadc_module_init(void)
+{
+	return platform_driver_register(&bcove_gpadc_driver);
+}
+
+static void bcove_gpadc_module_exit(void)
+{
+	platform_driver_unregister(&bcove_gpadc_driver);
+}
+
+static int bcove_adc_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed bcove_gpadc rpmsg device\n");
+
+	ret = bcove_gpadc_module_init();
+
+out:
+	return ret;
+}
+
+static void bcove_adc_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	bcove_gpadc_module_exit();
+	dev_info(&rpdev->dev, "Removed bcove_gpadc rpmsg device\n");
+}
+
+static void bcove_adc_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id bcove_adc_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_bcove_adc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, bcove_adc_rpmsg_id_table);
+
+static struct rpmsg_driver bcove_adc_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= bcove_adc_rpmsg_id_table,
+	.probe		= bcove_adc_rpmsg_probe,
+	.callback	= bcove_adc_rpmsg_cb,
+	.remove		= bcove_adc_rpmsg_remove,
+};
+
+static int __init bcove_adc_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&bcove_adc_rpmsg);
+}
+
+#ifdef MODULE
+module_init(bcove_adc_rpmsg_init);
+#else
+rootfs_initcall(bcove_adc_rpmsg_init);
+#endif
+
+static void __exit bcove_adc_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&bcove_adc_rpmsg);
+}
+module_exit(bcove_adc_rpmsg_exit);
+
+MODULE_AUTHOR("Yang Bin<bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield Basin Cove GPADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e145931..424dea1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -66,6 +66,7 @@
 	[IIO_ALTVOLTAGE] = "altvoltage",
 	[IIO_CCT] = "cct",
 	[IIO_PRESSURE] = "pressure",
+	[IIO_RESISTANCE] = "resistance",
 };
 
 static const char * const iio_modifier_names[] = {
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 98ddc32..5bd1c15 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -44,7 +44,7 @@
 		}
 		mapi->map = &maps[i];
 		mapi->indio_dev = indio_dev;
-		list_add(&mapi->l, &iio_map_list);
+		list_add_tail(&mapi->l, &iio_map_list);
 		i++;
 	}
 error_ret:
@@ -413,6 +413,48 @@
 }
 EXPORT_SYMBOL_GPL(iio_channel_release_all);
 
+int iio_channel_get_num(const struct iio_channel *chan)
+{
+	int num = 0;
+
+	if (chan == NULL)
+		return -ENODEV;
+
+	while (chan[num].indio_dev)
+		num++;
+
+	return num;
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_num);
+
+int iio_channel_get_name(const struct iio_channel *chan, char **chan_name)
+{
+	int i = 0;
+	struct iio_map_internal *c = NULL;
+
+	if (chan == NULL)
+		return -ENODEV;
+
+	if (chan_name == NULL)
+		return -EINVAL;
+
+	while (chan[i].indio_dev) {
+		mutex_lock(&iio_map_list_lock);
+		list_for_each_entry(c, &iio_map_list, l) {
+			if (strcmp(chan[i].channel->datasheet_name,
+				c->map->adc_channel_label) != 0)
+				continue;
+			strcpy(chan_name[i], c->map->consumer_channel);
+			break;
+		}
+		mutex_unlock(&iio_map_list_lock);
+		i++;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_name);
+
 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
 	enum iio_chan_info_enum info)
 {
@@ -443,6 +485,24 @@
 }
 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
 
+int iio_read_channel_all_raw(struct iio_channel *chan, int *val)
+{
+	int ret;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (chan->indio_dev->info == NULL) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = chan->indio_dev->info->read_all_raw(chan, val);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_all_raw);
+
 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
 	int raw, int *processed, unsigned int scale)
 {
@@ -451,7 +511,7 @@
 	int ret;
 
 	ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
-	if (ret == 0)
+	if (ret >= 0)
 		raw64 += offset;
 
 	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 41712f0..5849dc0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -388,6 +388,7 @@
 	init_waitqueue_head(&isert_conn->conn_wait_comp_err);
 	kref_init(&isert_conn->conn_kref);
 	kref_get(&isert_conn->conn_kref);
+	mutex_init(&isert_conn->conn_mutex);
 
 	cma_id->context = isert_conn;
 	isert_conn->conn_cm_id = cma_id;
@@ -540,15 +541,32 @@
 				struct isert_conn, conn_logout_work);
 
 	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
-
+	mutex_lock(&isert_conn->conn_mutex);
 	isert_conn->state = ISER_CONN_DOWN;
 
 	if (isert_conn->post_recv_buf_count == 0 &&
 	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
 		pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
-		wake_up(&isert_conn->conn_wait);
+		mutex_unlock(&isert_conn->conn_mutex);
+		goto wake_up;
 	}
+	if (!isert_conn->conn_cm_id) {
+		mutex_unlock(&isert_conn->conn_mutex);
+		isert_put_conn(isert_conn);
+		return;
+	}
+	if (!isert_conn->logout_posted) {
+		pr_debug("Calling rdma_disconnect for !logout_posted from"
+			 " isert_disconnect_work\n");
+		rdma_disconnect(isert_conn->conn_cm_id);
+		mutex_unlock(&isert_conn->conn_mutex);
+		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+		goto wake_up;
+	}
+	mutex_unlock(&isert_conn->conn_mutex);
 
+wake_up:
+	wake_up(&isert_conn->conn_wait);
 	isert_put_conn(isert_conn);
 }
 
@@ -934,16 +952,11 @@
 	}
 
 sequence_cmd:
-	rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+	rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
 
 	if (!rc && dump_payload == false && unsol_data)
 		iscsit_set_unsoliticed_dataout(cmd);
 
-	if (rc == CMDSN_ERROR_CANNOT_RECOVER)
-		return iscsit_add_reject_from_cmd(
-			   ISCSI_REASON_PROTOCOL_ERROR,
-			   1, 0, (unsigned char *)hdr, cmd);
-
 	return 0;
 }
 
@@ -1184,14 +1197,12 @@
 {
 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
 	struct isert_conn *isert_conn = isert_cmd->conn;
-	struct iscsi_conn *conn;
+	struct iscsi_conn *conn = isert_conn->conn;
 
 	pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
 
 	switch (cmd->iscsi_opcode) {
 	case ISCSI_OP_SCSI_CMD:
-		conn = isert_conn->conn;
-
 		spin_lock_bh(&conn->cmd_lock);
 		if (!list_empty(&cmd->i_conn_node))
 			list_del(&cmd->i_conn_node);
@@ -1201,16 +1212,18 @@
 			iscsit_stop_dataout_timer(cmd);
 
 		isert_unmap_cmd(isert_cmd, isert_conn);
-		/*
-		 * Fall-through
-		 */
+		transport_generic_free_cmd(&cmd->se_cmd, 0);
+		break;
 	case ISCSI_OP_SCSI_TMFUNC:
+		spin_lock_bh(&conn->cmd_lock);
+		if (!list_empty(&cmd->i_conn_node))
+			list_del(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+
 		transport_generic_free_cmd(&cmd->se_cmd, 0);
 		break;
 	case ISCSI_OP_REJECT:
 	case ISCSI_OP_NOOP_OUT:
-		conn = isert_conn->conn;
-
 		spin_lock_bh(&conn->cmd_lock);
 		if (!list_empty(&cmd->i_conn_node))
 			list_del(&cmd->i_conn_node);
@@ -1222,6 +1235,9 @@
 		 * associated cmd->se_cmd needs to be released.
 		 */
 		if (cmd->se_cmd.se_tfo != NULL) {
+			pr_debug("Calling transport_generic_free_cmd from"
+				 " isert_put_cmd for 0x%02x\n",
+				 cmd->iscsi_opcode);
 			transport_generic_free_cmd(&cmd->se_cmd, 0);
 			break;
 		}
@@ -1318,8 +1334,8 @@
 		atomic_dec(&isert_conn->post_send_buf_count);
 
 		cmd->i_state = ISTATE_SENT_STATUS;
-		complete(&cmd->reject_comp);
 		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		break;
 	case ISTATE_SEND_LOGOUTRSP:
 		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
 		/*
@@ -1345,7 +1361,8 @@
 	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
 
 	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
-	    cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
+	    cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
+	    cmd->i_state == ISTATE_SEND_REJECT) {
 		isert_unmap_tx_desc(tx_desc, ib_dev);
 
 		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
@@ -1419,7 +1436,11 @@
 		pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 		pr_debug("Calling wake_up from isert_cq_comp_err\n");
 
-		isert_conn->state = ISER_CONN_TERMINATING;
+		mutex_lock(&isert_conn->conn_mutex);
+		if (isert_conn->state != ISER_CONN_DOWN)
+			isert_conn->state = ISER_CONN_TERMINATING;
+		mutex_unlock(&isert_conn->conn_mutex);
+
 		wake_up(&isert_conn->conn_wait_comp_err);
 	}
 }
@@ -1637,11 +1658,25 @@
 				struct isert_cmd, iscsi_cmd);
 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
 	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+	struct iscsi_reject *hdr =
+		(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
 
 	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-	iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
-				&isert_cmd->tx_desc.iscsi_header);
+	iscsit_build_reject(cmd, conn, hdr);
 	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+
+	hton24(hdr->dlength, ISCSI_HDR_LEN);
+	isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
+			(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
+			DMA_TO_DEVICE);
+	isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
+	tx_dsg->addr	= isert_cmd->sense_buf_dma;
+	tx_dsg->length	= ISCSI_HDR_LEN;
+	tx_dsg->lkey	= isert_conn->conn_mr->lkey;
+	isert_cmd->tx_desc.num_sge = 2;
+
 	isert_init_send_wr(isert_cmd, send_wr);
 
 	pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
@@ -2175,6 +2210,17 @@
 	kfree(isert_np);
 }
 
+static int isert_check_state(struct isert_conn *isert_conn, int state)
+{
+	int ret;
+
+	mutex_lock(&isert_conn->conn_mutex);
+	ret = (isert_conn->state == state);
+	mutex_unlock(&isert_conn->conn_mutex);
+
+	return ret;
+}
+
 static void isert_free_conn(struct iscsi_conn *conn)
 {
 	struct isert_conn *isert_conn = conn->context;
@@ -2184,26 +2230,43 @@
 	 * Decrement post_send_buf_count for special case when called
 	 * from isert_do_control_comp() -> iscsit_logout_post_handler()
 	 */
+	mutex_lock(&isert_conn->conn_mutex);
 	if (isert_conn->logout_posted)
 		atomic_dec(&isert_conn->post_send_buf_count);
 
-	if (isert_conn->conn_cm_id)
+	if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+		pr_debug("Calling rdma_disconnect from isert_free_conn\n");
 		rdma_disconnect(isert_conn->conn_cm_id);
+	}
 	/*
 	 * Only wait for conn_wait_comp_err if the isert_conn made it
 	 * into full feature phase..
 	 */
-	if (isert_conn->state > ISER_CONN_INIT) {
+	if (isert_conn->state == ISER_CONN_UP) {
 		pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
 			 isert_conn->state);
-		wait_event(isert_conn->conn_wait_comp_err,
-			   isert_conn->state == ISER_CONN_TERMINATING);
-		pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
-	}
+		mutex_unlock(&isert_conn->conn_mutex);
 
-	pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
-	wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
-	pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
+		wait_event(isert_conn->conn_wait_comp_err,
+			  (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
+
+		wait_event(isert_conn->conn_wait,
+			  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+
+		isert_put_conn(isert_conn);
+		return;
+	}
+	if (isert_conn->state == ISER_CONN_INIT) {
+		mutex_unlock(&isert_conn->conn_mutex);
+		isert_put_conn(isert_conn);
+		return;
+	}
+	pr_debug("isert_free_conn: wait_event conn_wait %d\n",
+		 isert_conn->state);
+	mutex_unlock(&isert_conn->conn_mutex);
+
+	wait_event(isert_conn->conn_wait,
+		  (isert_check_state(isert_conn, ISER_CONN_DOWN)));
 
 	isert_put_conn(isert_conn);
 }
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index b104f4c..5795c82 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -102,6 +102,7 @@
 	struct ib_qp		*conn_qp;
 	struct isert_device	*conn_device;
 	struct work_struct	conn_logout_work;
+	struct mutex		conn_mutex;
 	wait_queue_head_t	conn_wait;
 	wait_queue_head_t	conn_wait_comp_err;
 	struct kref		conn_kref;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3f3f041..6c66a72 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1588,7 +1588,7 @@
 	int resp_data_len;
 	int resp_len;
 
-	resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+	resp_data_len = 4;
 	resp_len = sizeof(*srp_rsp) + resp_data_len;
 
 	srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@
 				    + atomic_xchg(&ch->req_lim_delta, 0));
 	srp_rsp->tag = tag;
 
-	if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
-		srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
-		srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
-		srp_rsp->data[3] = rsp_code;
-	}
+	srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+	srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+	srp_rsp->data[3] = rsp_code;
 
 	return resp_len;
 }
@@ -2358,6 +2356,8 @@
 	transport_deregister_session(se_sess);
 	ch->sess = NULL;
 
+	ib_destroy_cm_id(ch->cm_id);
+
 	srpt_destroy_ch_ib(ch);
 
 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@
 	list_del(&ch->list);
 	spin_unlock_irq(&sdev->spinlock);
 
-	ib_destroy_cm_id(ch->cm_id);
-
 	if (ch->release_done)
 		complete(ch->release_done);
 
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c044699..e03f856 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -679,6 +679,31 @@
 }
 
 /*
+ * Simulate keyup events for all keys that were marked as
+ * pressed during input device suspend.
+ * The function must be called with dev->event_lock held.
+ */
+static void input_dev_resume_keys(struct input_dev *dev)
+{
+	int code;
+	bool sync = false;
+
+	if (!is_event_supported(EV_KEY, dev->evbit, EV_MAX))
+		return;
+
+	for (code = 0; code <= KEY_MAX; code++) {
+		if (is_event_supported(code, dev->keybit, KEY_MAX) &&
+		    test_bit(code, dev->key_suspend) &&
+		    __test_and_clear_bit(code, dev->key)) {
+			sync = true;
+			input_pass_event(dev, EV_KEY, code, 0);
+		}
+	}
+	if (sync)
+		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+}
+
+/*
  * Prepare device for unregistering
  */
 static void input_disconnect_device(struct input_dev *dev)
@@ -1658,10 +1683,6 @@
 	if (dev->users) {
 		input_dev_toggle(dev, true);
 
-		/*
-		 * Keys that have been pressed at suspend time are unlikely
-		 * to be still pressed when we resume.
-		 */
 		spin_lock_irq(&dev->event_lock);
 		input_dev_release_keys(dev);
 		spin_unlock_irq(&dev->event_lock);
@@ -1678,8 +1699,11 @@
 
 	mutex_lock(&input_dev->mutex);
 
-	if (input_dev->users)
+	if (input_dev->users) {
 		input_dev_toggle(input_dev, false);
+		memcpy(input_dev->key_suspend, input_dev->key,
+			sizeof(input_dev->key_suspend));
+	}
 
 	mutex_unlock(&input_dev->mutex);
 
@@ -1690,7 +1714,22 @@
 {
 	struct input_dev *input_dev = to_input_dev(dev);
 
-	input_reset_device(input_dev);
+	mutex_lock(&input_dev->mutex);
+
+	if (input_dev->users) {
+		input_dev_toggle(input_dev, true);
+
+		/*
+		 * For keys that have been pressed at suspend time
+		 * and are seen as released at resume time, simulate
+		 * a key release event for upper layers.
+		 */
+		spin_lock_irq(&input_dev->event_lock);
+		input_dev_resume_keys(input_dev);
+		spin_unlock_irq(&input_dev->event_lock);
+	}
+
+	mutex_unlock(&input_dev->mutex);
 
 	return 0;
 }
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index b29ca65..c573c9e 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -30,7 +30,9 @@
 #include <linux/of_gpio.h>
 #include <linux/spinlock.h>
 
+struct gpio_keys_drvdata;
 struct gpio_button_data {
+	struct gpio_keys_drvdata *ddata;
 	const struct gpio_keys_button *button;
 	struct input_dev *input;
 	struct timer_list timer;
@@ -43,12 +45,39 @@
 };
 
 struct gpio_keys_drvdata {
-	const struct gpio_keys_platform_data *pdata;
 	struct input_dev *input;
 	struct mutex disable_lock;
+	unsigned int n_buttons;
+	int force_trigger;
+	int (*enable)(struct device *dev);
+	void (*disable)(struct device *dev);
 	struct gpio_button_data data[0];
 };
 
+static int gpio_keys_request_irq(int gpio, irq_handler_t isr,
+		unsigned long flags, const char *name, void *data)
+{
+	int ret;
+
+	if (gpio_cansleep(gpio))
+		ret = request_threaded_irq(gpio_to_irq(gpio), NULL, isr,
+				flags | IRQF_ONESHOT, name, data);
+	else
+		ret = request_irq(gpio_to_irq(gpio), isr, flags, name, data);
+	return ret;
+}
+
+static int gpio_keys_getval(int gpio)
+{
+	int ret;
+
+	if (gpio_cansleep(gpio))
+		ret = gpio_get_value_cansleep(gpio);
+	else
+		ret = gpio_get_value(gpio);
+	return ret;
+}
+
 /*
  * SYSFS interface for enabling/disabling keys and switches:
  *
@@ -169,7 +198,7 @@
 	if (!bits)
 		return -ENOMEM;
 
-	for (i = 0; i < ddata->pdata->nbuttons; i++) {
+	for (i = 0; i < ddata->n_buttons; i++) {
 		struct gpio_button_data *bdata = &ddata->data[i];
 
 		if (bdata->button->type != type)
@@ -217,7 +246,7 @@
 		goto out;
 
 	/* First validate */
-	for (i = 0; i < ddata->pdata->nbuttons; i++) {
+	for (i = 0; i < ddata->n_buttons; i++) {
 		struct gpio_button_data *bdata = &ddata->data[i];
 
 		if (bdata->button->type != type)
@@ -232,7 +261,7 @@
 
 	mutex_lock(&ddata->disable_lock);
 
-	for (i = 0; i < ddata->pdata->nbuttons; i++) {
+	for (i = 0; i < ddata->n_buttons; i++) {
 		struct gpio_button_data *bdata = &ddata->data[i];
 
 		if (bdata->button->type != type)
@@ -310,11 +339,59 @@
 		   gpio_keys_show_disabled_switches,
 		   gpio_keys_store_disabled_switches);
 
+static ssize_t gpio_keys_wakeup_enable(struct device *dev,
+		struct device_attribute *attr, const char *buf,
+			size_t size, int enable_wakeup)
+{
+	int i, wakeup = 0, ret = -EINVAL;
+	long code;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
+
+	ret = kstrtol(buf, 10, &code);
+	if (ret != 0) {
+		dev_err(dev, "Invalid input.\n");
+		return ret;
+	}
+
+	for (i = 0; i < pdata->nbuttons; i++) {
+		struct gpio_keys_button *button = &pdata->buttons[i];
+		if ((int)code == button->code)
+			button->wakeup = enable_wakeup;
+		if (button->wakeup)
+			wakeup = button->wakeup;
+	}
+
+	device_init_wakeup(dev, wakeup);
+
+	return size;
+}
+
+static ssize_t gpio_keys_store_enabled_wakeup(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	return gpio_keys_wakeup_enable(dev, attr, buf, size, 1);
+}
+
+static ssize_t gpio_keys_store_disabled_wakeup(struct device *dev,
+	       struct device_attribute *attr, const char *buf, size_t size)
+{
+	return gpio_keys_wakeup_enable(dev, attr, buf, size, 0);
+}
+static DEVICE_ATTR(enabled_wakeup, S_IWUSR | S_IRUGO,
+		   NULL,
+		   gpio_keys_store_enabled_wakeup);
+static DEVICE_ATTR(disabled_wakeup, S_IWUSR | S_IRUGO,
+		   NULL,
+		   gpio_keys_store_disabled_wakeup);
+
 static struct attribute *gpio_keys_attrs[] = {
 	&dev_attr_keys.attr,
 	&dev_attr_switches.attr,
 	&dev_attr_disabled_keys.attr,
 	&dev_attr_disabled_switches.attr,
+	&dev_attr_enabled_wakeup.attr,
+	&dev_attr_disabled_wakeup.attr,
 	NULL,
 };
 
@@ -327,7 +404,8 @@
 	const struct gpio_keys_button *button = bdata->button;
 	struct input_dev *input = bdata->input;
 	unsigned int type = button->type ?: EV_KEY;
-	int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
+	int state =
+		(gpio_keys_getval(button->gpio) ? 1 : 0) ^ button->active_low;
 
 	if (type == EV_ABS) {
 		if (state)
@@ -344,9 +422,6 @@
 		container_of(work, struct gpio_button_data, work);
 
 	gpio_keys_gpio_report_event(bdata);
-
-	if (bdata->button->wakeup)
-		pm_relax(bdata->input->dev.parent);
 }
 
 static void gpio_keys_gpio_timer(unsigned long _data)
@@ -359,11 +434,25 @@
 static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
 {
 	struct gpio_button_data *bdata = dev_id;
+	const struct gpio_keys_button *button;
+	struct input_dev *input;
+	unsigned int type;
+	int state;
 
+	BUG_ON(!bdata);
+	BUG_ON(!bdata->button);
 	BUG_ON(irq != bdata->irq);
 
-	if (bdata->button->wakeup)
-		pm_stay_awake(bdata->input->dev.parent);
+	button = bdata->button;
+	input = bdata->input;
+	state = (gpio_keys_getval(button->gpio) ? 1 : 0) ^ button->active_low;
+
+	if (bdata->ddata->force_trigger && !state) {
+		type = button->type ?: EV_KEY;
+		input_event(input, type, button->code, !state);
+		bdata->ddata->force_trigger = 0;
+	}
+
 	if (bdata->timer_debounce)
 		mod_timer(&bdata->timer,
 			jiffies + msecs_to_jiffies(bdata->timer_debounce));
@@ -400,9 +489,6 @@
 	spin_lock_irqsave(&bdata->lock, flags);
 
 	if (!bdata->key_pressed) {
-		if (bdata->button->wakeup)
-			pm_wakeup_event(bdata->input->dev.parent, 0);
-
 		input_event(input, EV_KEY, button->code, 1);
 		input_sync(input);
 
@@ -440,13 +526,21 @@
 
 	if (gpio_is_valid(button->gpio)) {
 
-		error = gpio_request_one(button->gpio, GPIOF_IN, desc);
+		error = gpio_request(button->gpio, desc);
 		if (error < 0) {
 			dev_err(dev, "Failed to request GPIO %d, error %d\n",
 				button->gpio, error);
 			return error;
 		}
 
+		error = gpio_direction_input(button->gpio);
+		if (error < 0) {
+			dev_err(dev,
+			"Failed to configure direction for GPIO %d, error %d\n",
+				button->gpio, error);
+			goto fail;
+		}
+
 		if (button->debounce_interval) {
 			error = gpio_set_debounce(button->gpio,
 					button->debounce_interval * 1000);
@@ -502,7 +596,7 @@
 	if (!button->can_disable)
 		irqflags |= IRQF_SHARED;
 
-	error = request_any_context_irq(bdata->irq, isr, irqflags, desc, bdata);
+	error = gpio_keys_request_irq(button->gpio, isr, irqflags, desc, bdata);
 	if (error < 0) {
 		dev_err(dev, "Unable to claim irq %d; error %d\n",
 			bdata->irq, error);
@@ -518,91 +612,60 @@
 	return error;
 }
 
-static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
-{
-	struct input_dev *input = ddata->input;
-	int i;
-
-	for (i = 0; i < ddata->pdata->nbuttons; i++) {
-		struct gpio_button_data *bdata = &ddata->data[i];
-		if (gpio_is_valid(bdata->button->gpio))
-			gpio_keys_gpio_report_event(bdata);
-	}
-	input_sync(input);
-}
-
 static int gpio_keys_open(struct input_dev *input)
 {
 	struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
-	const struct gpio_keys_platform_data *pdata = ddata->pdata;
-	int error;
 
-	if (pdata->enable) {
-		error = pdata->enable(input->dev.parent);
-		if (error)
-			return error;
-	}
-
-	/* Report current state of buttons that are connected to GPIOs */
-	gpio_keys_report_state(ddata);
-
-	return 0;
+	return ddata->enable ? ddata->enable(input->dev.parent) : 0;
 }
 
 static void gpio_keys_close(struct input_dev *input)
 {
 	struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
-	const struct gpio_keys_platform_data *pdata = ddata->pdata;
 
-	if (pdata->disable)
-		pdata->disable(input->dev.parent);
+	if (ddata->disable)
+		ddata->disable(input->dev.parent);
 }
 
 /*
  * Handlers for alternative sources of platform_data
  */
-
 #ifdef CONFIG_OF
 /*
  * Translate OpenFirmware node properties into platform_data
  */
-static struct gpio_keys_platform_data *
-gpio_keys_get_devtree_pdata(struct device *dev)
+static int gpio_keys_get_devtree_pdata(struct device *dev,
+			    struct gpio_keys_platform_data *pdata)
 {
 	struct device_node *node, *pp;
-	struct gpio_keys_platform_data *pdata;
-	struct gpio_keys_button *button;
-	int error;
-	int nbuttons;
 	int i;
+	struct gpio_keys_button *buttons;
+	u32 reg;
 
 	node = dev->of_node;
-	if (!node) {
-		error = -ENODEV;
-		goto err_out;
-	}
+	if (node == NULL)
+		return -ENODEV;
 
-	nbuttons = of_get_child_count(node);
-	if (nbuttons == 0) {
-		error = -ENODEV;
-		goto err_out;
-	}
-
-	pdata = kzalloc(sizeof(*pdata) + nbuttons * (sizeof *button),
-			GFP_KERNEL);
-	if (!pdata) {
-		error = -ENOMEM;
-		goto err_out;
-	}
-
-	pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
-	pdata->nbuttons = nbuttons;
+	memset(pdata, 0, sizeof(*pdata));
 
 	pdata->rep = !!of_get_property(node, "autorepeat", NULL);
 
+	/* First count the subnodes */
+	pdata->nbuttons = 0;
+	pp = NULL;
+	while ((pp = of_get_next_child(node, pp)))
+		pdata->nbuttons++;
+
+	if (pdata->nbuttons == 0)
+		return -ENODEV;
+
+	buttons = kzalloc(pdata->nbuttons * sizeof(*buttons), GFP_KERNEL);
+	if (!buttons)
+		return -ENOMEM;
+
+	pp = NULL;
 	i = 0;
-	for_each_child_of_node(node, pp) {
-		int gpio;
+	while ((pp = of_get_next_child(node, pp))) {
 		enum of_gpio_flags flags;
 
 		if (!of_find_property(pp, "gpios", NULL)) {
@@ -610,52 +673,41 @@
 			dev_warn(dev, "Found button without gpios\n");
 			continue;
 		}
+		buttons[i].gpio = of_get_gpio_flags(pp, 0, &flags);
+		buttons[i].active_low = flags & OF_GPIO_ACTIVE_LOW;
 
-		gpio = of_get_gpio_flags(pp, 0, &flags);
-		if (gpio < 0) {
-			error = gpio;
-			if (error != -EPROBE_DEFER)
-				dev_err(dev,
-					"Failed to get gpio flags, error: %d\n",
-					error);
-			goto err_free_pdata;
-		}
-
-		button = &pdata->buttons[i++];
-
-		button->gpio = gpio;
-		button->active_low = flags & OF_GPIO_ACTIVE_LOW;
-
-		if (of_property_read_u32(pp, "linux,code", &button->code)) {
+		if (of_property_read_u32(pp, "linux,code", &reg)) {
 			dev_err(dev, "Button without keycode: 0x%x\n",
-				button->gpio);
-			error = -EINVAL;
-			goto err_free_pdata;
+				buttons[i].gpio);
+			goto out_fail;
 		}
+		buttons[i].code = reg;
 
-		button->desc = of_get_property(pp, "label", NULL);
+		buttons[i].desc = of_get_property(pp, "label", NULL);
 
-		if (of_property_read_u32(pp, "linux,input-type", &button->type))
-			button->type = EV_KEY;
+		if (of_property_read_u32(pp, "linux,input-type", &reg) == 0)
+			buttons[i].type = reg;
+		else
+			buttons[i].type = EV_KEY;
 
-		button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+		buttons[i].wakeup =
+			!!of_get_property(pp, "gpio-key,wakeup", NULL);
 
-		if (of_property_read_u32(pp, "debounce-interval",
-					 &button->debounce_interval))
-			button->debounce_interval = 5;
+		if (of_property_read_u32(pp, "debounce-interval", &reg) == 0)
+			buttons[i].debounce_interval = reg;
+		else
+			buttons[i].debounce_interval = 5;
+
+		i++;
 	}
 
-	if (pdata->nbuttons == 0) {
-		error = -EINVAL;
-		goto err_free_pdata;
-	}
+	pdata->buttons = buttons;
 
-	return pdata;
+	return 0;
 
-err_free_pdata:
-	kfree(pdata);
-err_out:
-	return ERR_PTR(error);
+out_fail:
+	kfree(buttons);
+	return -ENODEV;
 }
 
 static struct of_device_id gpio_keys_of_match[] = {
@@ -666,12 +718,14 @@
 
 #else
 
-static inline struct gpio_keys_platform_data *
-gpio_keys_get_devtree_pdata(struct device *dev)
+static int gpio_keys_get_devtree_pdata(struct device *dev,
+			    struct gpio_keys_platform_data *altp)
 {
-	return ERR_PTR(-ENODEV);
+	return -ENODEV;
 }
 
+#define gpio_keys_of_match NULL
+
 #endif
 
 static void gpio_remove_key(struct gpio_button_data *bdata)
@@ -686,17 +740,19 @@
 
 static int gpio_keys_probe(struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
-	const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
+	const struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
 	struct gpio_keys_drvdata *ddata;
+	struct device *dev = &pdev->dev;
+	struct gpio_keys_platform_data alt_pdata;
 	struct input_dev *input;
 	int i, error;
 	int wakeup = 0;
 
 	if (!pdata) {
-		pdata = gpio_keys_get_devtree_pdata(dev);
-		if (IS_ERR(pdata))
-			return PTR_ERR(pdata);
+		error = gpio_keys_get_devtree_pdata(dev, &alt_pdata);
+		if (error)
+			return error;
+		pdata = &alt_pdata;
 	}
 
 	ddata = kzalloc(sizeof(struct gpio_keys_drvdata) +
@@ -709,8 +765,10 @@
 		goto fail1;
 	}
 
-	ddata->pdata = pdata;
 	ddata->input = input;
+	ddata->n_buttons = pdata->nbuttons;
+	ddata->enable = pdata->enable;
+	ddata->disable = pdata->disable;
 	mutex_init(&ddata->disable_lock);
 
 	platform_set_drvdata(pdev, ddata);
@@ -735,6 +793,7 @@
 		const struct gpio_keys_button *button = &pdata->buttons[i];
 		struct gpio_button_data *bdata = &ddata->data[i];
 
+		bdata->ddata = ddata;
 		error = gpio_keys_setup_key(pdev, input, bdata, button);
 		if (error)
 			goto fail2;
@@ -757,6 +816,14 @@
 		goto fail3;
 	}
 
+	/* get current state of buttons that are connected to GPIOs */
+	for (i = 0; i < pdata->nbuttons; i++) {
+		struct gpio_button_data *bdata = &ddata->data[i];
+		if (gpio_is_valid(bdata->button->gpio))
+			gpio_keys_gpio_report_event(bdata);
+	}
+	input_sync(input);
+
 	device_init_wakeup(&pdev->dev, wakeup);
 
 	return 0;
@@ -771,9 +838,9 @@
  fail1:
 	input_free_device(input);
 	kfree(ddata);
-	/* If we have no platform data, we allocated pdata dynamically. */
-	if (!dev_get_platdata(&pdev->dev))
-		kfree(pdata);
+	/* If we have no platform_data, we allocated buttons dynamically. */
+	if (!pdev->dev.platform_data)
+		kfree(pdata->buttons);
 
 	return error;
 }
@@ -788,14 +855,18 @@
 
 	device_init_wakeup(&pdev->dev, 0);
 
-	for (i = 0; i < ddata->pdata->nbuttons; i++)
+	for (i = 0; i < ddata->n_buttons; i++)
 		gpio_remove_key(&ddata->data[i]);
 
 	input_unregister_device(input);
 
-	/* If we have no platform data, we allocated pdata dynamically. */
-	if (!dev_get_platdata(&pdev->dev))
-		kfree(ddata->pdata);
+	/*
+	 * If we had no platform_data, we allocated buttons dynamically, and
+	 * must free them here. ddata->data[0].button is the pointer to the
+	 * beginning of the allocated array.
+	 */
+	if (!pdev->dev.platform_data)
+		kfree(ddata->data[0].button);
 
 	kfree(ddata);
 
@@ -806,20 +877,14 @@
 static int gpio_keys_suspend(struct device *dev)
 {
 	struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
-	struct input_dev *input = ddata->input;
 	int i;
 
-	if (device_may_wakeup(dev)) {
-		for (i = 0; i < ddata->pdata->nbuttons; i++) {
-			struct gpio_button_data *bdata = &ddata->data[i];
-			if (bdata->button->wakeup)
-				enable_irq_wake(bdata->irq);
-		}
-	} else {
-		mutex_lock(&input->mutex);
-		if (input->users)
-			gpio_keys_close(input);
-		mutex_unlock(&input->mutex);
+	for (i = 0; i < ddata->n_buttons; i++) {
+		struct gpio_button_data *bdata = &ddata->data[i];
+		if (bdata->button->wakeup && device_may_wakeup(dev))
+			enable_irq_wake(bdata->irq);
+		else if (gpio_is_valid(bdata->button->gpio))
+			free_irq(bdata->irq, bdata);
 	}
 
 	return 0;
@@ -828,41 +893,76 @@
 static int gpio_keys_resume(struct device *dev)
 {
 	struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
-	struct input_dev *input = ddata->input;
-	int error = 0;
-	int i;
+	unsigned long irqflags;
+	const char *desc;
+	int i, error;
 
-	if (device_may_wakeup(dev)) {
-		for (i = 0; i < ddata->pdata->nbuttons; i++) {
-			struct gpio_button_data *bdata = &ddata->data[i];
-			if (bdata->button->wakeup)
-				disable_irq_wake(bdata->irq);
+	ddata->force_trigger = 0;
+
+	for (i = 0; i < ddata->n_buttons; i++) {
+		struct gpio_button_data *bdata = &ddata->data[i];
+		if (bdata->button->wakeup && device_may_wakeup(dev))
+			disable_irq_wake(bdata->irq);
+		else if (gpio_is_valid(bdata->button->gpio)) {
+			irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+			desc = bdata->button->desc ?
+				bdata->button->desc : "gpio_keys";
+			if (!bdata->button->can_disable)
+				irqflags |= IRQF_SHARED;
+			error = gpio_keys_request_irq(bdata->button->gpio,
+					gpio_keys_gpio_isr, irqflags,
+					desc, bdata);
+			if (error) {
+				dev_err(dev, "Unable to claim irq %d; error %d\n",
+						bdata->irq, error);
+				return error;
+			}
 		}
-	} else {
-		mutex_lock(&input->mutex);
-		if (input->users)
-			error = gpio_keys_open(input);
-		mutex_unlock(&input->mutex);
+
+		if (gpio_is_valid(bdata->button->gpio))
+			gpio_keys_gpio_report_event(bdata);
 	}
+	input_sync(ddata->input);
 
-	if (error)
-		return error;
-
-	gpio_keys_report_state(ddata);
 	return 0;
 }
+
+static int gpio_keys_resume_noirq(struct device *dev)
+{
+	struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+
+	ddata->force_trigger = 1;
+	return 0;
+}
+
+static const struct dev_pm_ops gpio_keys_pm_ops = {
+	.suspend	= gpio_keys_suspend,
+	.resume		= gpio_keys_resume,
+	.resume_noirq	= gpio_keys_resume_noirq,
+};
 #endif
 
-static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
+static struct platform_device_id gpio_keys_ids[] = {
+	{
+		.name = "gpio-keys",
+	}, {
+		.name = "gpio-lesskey",
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(platform, gpio_keys_ids);
 
 static struct platform_driver gpio_keys_device_driver = {
 	.probe		= gpio_keys_probe,
 	.remove		= gpio_keys_remove,
+	.id_table	= gpio_keys_ids,
 	.driver		= {
 		.name	= "gpio-keys",
 		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
 		.pm	= &gpio_keys_pm_ops,
-		.of_match_table = of_match_ptr(gpio_keys_of_match),
+#endif
+		.of_match_table = gpio_keys_of_match,
 	}
 };
 
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 2baff1b..a73f961 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -88,6 +88,10 @@
 #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI	0x0259
 #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO	0x025a
 #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS	0x025b
+/* MacbookAir6,2 (unibody, June 2013) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
 
 #define BCM5974_DEVICE(prod) {					\
 	.match_flags = (USB_DEVICE_ID_MATCH_DEVICE |		\
@@ -145,6 +149,10 @@
 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
+	/* MacbookAir6,2 */
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
 	/* Terminating entry */
 	{}
 };
@@ -172,15 +180,18 @@
 /* trackpad header types */
 enum tp_type {
 	TYPE1,			/* plain trackpad */
-	TYPE2			/* button integrated in trackpad */
+	TYPE2,			/* button integrated in trackpad */
+	TYPE3			/* additional header fields since June 2013 */
 };
 
 /* trackpad finger data offsets, le16-aligned */
 #define FINGER_TYPE1		(13 * sizeof(__le16))
 #define FINGER_TYPE2		(15 * sizeof(__le16))
+#define FINGER_TYPE3		(19 * sizeof(__le16))
 
 /* trackpad button data offsets */
 #define BUTTON_TYPE2		15
+#define BUTTON_TYPE3		23
 
 /* list of device capability bits */
 #define HAS_INTEGRATED_BUTTON	1
@@ -400,6 +411,19 @@
 		{ SN_COORD, -150, 6730 },
 		{ SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
 	},
+	{
+		USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI,
+		USB_DEVICE_ID_APPLE_WELLSPRING8_ISO,
+		USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
+		HAS_INTEGRATED_BUTTON,
+		0, sizeof(struct bt_data),
+		0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS,
+		{ SN_PRESSURE, 0, 300 },
+		{ SN_WIDTH, 0, 2048 },
+		{ SN_COORD, -4620, 5140 },
+		{ SN_COORD, -150, 6600 },
+		{ SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+	},
 	{}
 };
 
@@ -557,6 +581,9 @@
 		input_report_key(input, BTN_LEFT, ibt);
 	}
 
+	if (c->tp_type == TYPE3)
+		input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
+
 	input_sync(input);
 
 	return 0;
@@ -572,9 +599,14 @@
 
 static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
 {
-	char *data = kmalloc(8, GFP_KERNEL);
 	int retval = 0, size;
+	char *data;
 
+	/* Type 3 does not require a mode switch */
+	if (dev->cfg.tp_type == TYPE3)
+		return 0;
+
+	data = kmalloc(8, GFP_KERNEL);
 	if (!data) {
 		dev_err(&dev->intf->dev, "out of memory\n");
 		retval = -ENOMEM;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 21d02b0..a3c3389 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1484,6 +1484,10 @@
 
 			/* Large PTE found which maps this address */
 			unmap_size = PTE_PAGE_SIZE(*pte);
+
+			/* Only unmap from the first pte in the page */
+			if ((unmap_size - 1) & bus_addr)
+				break;
 			count      = PAGE_SIZE_PTE_COUNT(unmap_size);
 			for (i = 0; i < count; i++)
 				pte[i] = 0ULL;
@@ -1493,7 +1497,7 @@
 		unmapped += unmap_size;
 	}
 
-	BUG_ON(!is_power_of_2(unmapped));
+	BUG_ON(unmapped && !is_power_of_2(unmapped));
 
 	return unmapped;
 }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b4f0e28..fa004b1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -890,56 +890,54 @@
 	return order;
 }
 
+static void dma_pte_free_level(struct dmar_domain *domain, int level,
+			       struct dma_pte *pte, unsigned long pfn,
+			       unsigned long start_pfn, unsigned long last_pfn)
+{
+	pfn = max(start_pfn, pfn);
+	pte = &pte[pfn_level_offset(pfn, level)];
+
+	do {
+		unsigned long level_pfn;
+		struct dma_pte *level_pte;
+
+		if (!dma_pte_present(pte) || dma_pte_superpage(pte))
+			goto next;
+
+		level_pfn = pfn & level_mask(level - 1);
+		level_pte = phys_to_virt(dma_pte_addr(pte));
+
+		if (level > 2)
+			dma_pte_free_level(domain, level - 1, level_pte,
+					   level_pfn, start_pfn, last_pfn);
+
+		/* If range covers entire pagetable, free it */
+		if (!(start_pfn > level_pfn ||
+		      last_pfn < level_pfn + level_size(level))) {
+			dma_clear_pte(pte);
+			domain_flush_cache(domain, pte, sizeof(*pte));
+			free_pgtable_page(level_pte);
+		}
+next:
+		pfn += level_size(level);
+	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
+}
+
 /* free page table pages. last level pte should already be cleared */
 static void dma_pte_free_pagetable(struct dmar_domain *domain,
 				   unsigned long start_pfn,
 				   unsigned long last_pfn)
 {
 	int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-	struct dma_pte *first_pte, *pte;
-	int total = agaw_to_level(domain->agaw);
-	int level;
-	unsigned long tmp;
-	int large_page = 2;
 
 	BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
 	BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
 	BUG_ON(start_pfn > last_pfn);
 
 	/* We don't need lock here; nobody else touches the iova range */
-	level = 2;
-	while (level <= total) {
-		tmp = align_to_level(start_pfn, level);
+	dma_pte_free_level(domain, agaw_to_level(domain->agaw),
+			   domain->pgd, 0, start_pfn, last_pfn);
 
-		/* If we can't even clear one PTE at this level, we're done */
-		if (tmp + level_size(level) - 1 > last_pfn)
-			return;
-
-		do {
-			large_page = level;
-			first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
-			if (large_page > level)
-				level = large_page + 1;
-			if (!pte) {
-				tmp = align_to_level(tmp + 1, level + 1);
-				continue;
-			}
-			do {
-				if (dma_pte_present(pte)) {
-					free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
-					dma_clear_pte(pte);
-				}
-				pte++;
-				tmp += level_size(level);
-			} while (!first_pte_in_page(pte) &&
-				 tmp + level_size(level) - 1 <= last_pfn);
-
-			domain_flush_cache(domain, first_pte,
-					   (void *)pte - (void *)first_pte);
-			
-		} while (tmp && tmp + level_size(level) - 1 <= last_pfn);
-		level++;
-	}
 	/* free pgd */
 	if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
 		free_pgtable_page(domain->pgd);
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 6bd5c67..b7d83d6 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -230,9 +230,9 @@
 	int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
 	int ret;
 
-	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	res = platform_get_resource(pdev, IORESOURCE_REG, 0);
 	if (res == NULL) {
-		dev_err(&pdev->dev, "No I/O resource\n");
+		dev_err(&pdev->dev, "No register resource\n");
 		ret = -EINVAL;
 		goto err;
 	}
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 0b9a79b..82fc86a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -439,15 +439,15 @@
 
 /* Slots fan */
 static const struct wf_pid_param slots_param = {
-	.interval	= 5,
-	.history_len	= 2,
-	.gd		= 30 << 20,
-	.gp		= 5 << 20,
-	.gr		= 0,
-	.itarget	= 40 << 16,
-	.additive	= 1,
-	.min		= 300,
-	.max		= 4000,
+	.interval	= 1,
+	.history_len	= 20,
+	.gd		= 0,
+	.gp		= 0,
+	.gr		= 0x00100000,
+	.itarget	= 3200000,
+	.additive	= 0,
+	.min		= 20,
+	.max		= 100,
 };
 
 static void slots_fan_tick(void)
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d3e15b4..6bc016e 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -437,6 +437,7 @@
 
 	/* If nonzero, we're detaching/unregistering from cache set */
 	atomic_t		detaching;
+	int			flush_done;
 
 	atomic_long_t		sectors_dirty;
 	unsigned long		sectors_dirty_gc;
@@ -498,7 +499,7 @@
 	 */
 	atomic_t		has_dirty;
 
-	struct ratelimit	writeback_rate;
+	struct bch_ratelimit	writeback_rate;
 	struct delayed_work	writeback_rate_update;
 
 	/*
@@ -507,10 +508,9 @@
 	 */
 	sector_t		last_read;
 
-	/* Number of writeback bios in flight */
-	atomic_t		in_flight;
+	/* Limit number of writeback bios in flight */
+	struct semaphore	in_flight;
 	struct closure_with_timer writeback;
-	struct closure_waitlist	writeback_wait;
 
 	struct keybuf		writeback_keys;
 
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index cb4578a..14032e8 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -918,28 +918,45 @@
 
 /* Mergesort */
 
+static void sort_key_next(struct btree_iter *iter,
+			  struct btree_iter_set *i)
+{
+	i->k = bkey_next(i->k);
+
+	if (i->k == i->end)
+		*i = iter->data[--iter->used];
+}
+
 static void btree_sort_fixup(struct btree_iter *iter)
 {
 	while (iter->used > 1) {
 		struct btree_iter_set *top = iter->data, *i = top + 1;
-		struct bkey *k;
 
 		if (iter->used > 2 &&
 		    btree_iter_cmp(i[0], i[1]))
 			i++;
 
-		for (k = i->k;
-		     k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
-		     k = bkey_next(k))
-			if (top->k > i->k)
-				__bch_cut_front(top->k, k);
-			else if (KEY_SIZE(k))
-				bch_cut_back(&START_KEY(k), top->k);
-
-		if (top->k < i->k || k == i->k)
+		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
 			break;
 
-		heap_sift(iter, i - top, btree_iter_cmp);
+		if (!KEY_SIZE(i->k)) {
+			sort_key_next(iter, i);
+			heap_sift(iter, i - top, btree_iter_cmp);
+			continue;
+		}
+
+		if (top->k > i->k) {
+			if (bkey_cmp(top->k, i->k) >= 0)
+				sort_key_next(iter, i);
+			else
+				bch_cut_front(top->k, i->k);
+
+			heap_sift(iter, i - top, btree_iter_cmp);
+		} else {
+			/* can't happen because of comparison func */
+			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+			bch_cut_back(&START_KEY(i->k), top->k);
+		}
 	}
 }
 
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 7a5658f..7d3deab 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -326,10 +326,25 @@
 	i->csum		= btree_csum_set(b, i);
 
 	btree_bio_init(b);
-	b->bio->bi_rw	= REQ_META|WRITE_SYNC;
+	b->bio->bi_rw	= REQ_META|WRITE_SYNC|REQ_FUA;
 	b->bio->bi_size	= set_blocks(i, b->c) * block_bytes(b->c);
 	bch_bio_map(b->bio, i);
 
+	/*
+	 * If we're appending to a leaf node, we don't technically need FUA -
+	 * this write just needs to be persisted before the next journal write,
+	 * which will be marked FLUSH|FUA.
+	 *
+	 * Similarly if we're writing a new btree root - the pointer is going to
+	 * be in the next journal entry.
+	 *
+	 * But if we're writing a new btree node (that isn't a root) or
+	 * appending to a non leaf btree node, we need either FUA or a flush
+	 * when we write the parent with the new pointer. FUA is cheaper than a
+	 * flush, and writes appending to leaf nodes aren't blocking anything so
+	 * just make all btree node writes FUA to keep things sane.
+	 */
+
 	bkey_copy(&k.key, &b->key);
 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
 
@@ -618,7 +633,7 @@
 		return mca_can_free(c) * c->btree_pages;
 
 	/* Return -1 if we can't do anything right now */
-	if (sc->gfp_mask & __GFP_WAIT)
+	if (sc->gfp_mask & __GFP_IO)
 		mutex_lock(&c->bucket_lock);
 	else if (!mutex_trylock(&c->bucket_lock))
 		return -1;
@@ -1419,8 +1434,10 @@
 	for_each_cache(ca, c, i)
 		for_each_bucket(b, ca) {
 			b->gc_gen = b->gen;
-			if (!atomic_read(&b->pin))
+			if (!atomic_read(&b->pin)) {
 				SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+				SET_GC_SECTORS_USED(b, 0);
+			}
 		}
 
 	for (d = c->devices;
@@ -2140,6 +2157,9 @@
 void bch_btree_set_root(struct btree *b)
 {
 	unsigned i;
+	struct closure cl;
+
+	closure_init_stack(&cl);
 
 	BUG_ON(!b->written);
 
@@ -2153,8 +2173,9 @@
 	b->c->root = b;
 	__bkey_put(b->c, &b->key);
 
-	bch_journal_meta(b->c, NULL);
+	bch_journal_meta(b->c, &cl);
 	pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
+	closure_sync(&cl);
 }
 
 /* Cache lookup */
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index bd05a9a..9aba201 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -66,16 +66,18 @@
 		} else {
 			struct closure *parent = cl->parent;
 			struct closure_waitlist *wait = closure_waitlist(cl);
+			closure_fn *destructor = cl->fn;
 
 			closure_debug_destroy(cl);
 
+			smp_mb();
 			atomic_set(&cl->remaining, -1);
 
 			if (wait)
 				closure_wake_up(wait);
 
-			if (cl->fn)
-				cl->fn(cl);
+			if (destructor)
+				destructor(cl);
 
 			if (parent)
 				closure_put(parent);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 48efd4d..d285cd4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -97,6 +97,8 @@
 
 	if (bio->bi_rw & REQ_DISCARD) {
 		ret = bio_alloc_bioset(gfp, 1, bs);
+		if (!ret)
+			return NULL;
 		idx = 0;
 		goto out;
 	}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8c8dfdc..151a4ab 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -151,7 +151,8 @@
 		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
 		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
 
-		/* Read journal buckets ordered by golden ratio hash to quickly
+		/*
+		 * Read journal buckets ordered by golden ratio hash to quickly
 		 * find a sequence of buckets with valid journal entries
 		 */
 		for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -164,36 +165,45 @@
 				goto bsearch;
 		}
 
-		/* If that fails, check all the buckets we haven't checked
+		/*
+		 * If that fails, check all the buckets we haven't checked
 		 * already
 		 */
 		pr_debug("falling back to linear search");
 
-		for (l = 0; l < ca->sb.njournal_buckets; l++) {
-			if (test_bit(l, bitmap))
-				continue;
-
+		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
+		     l < ca->sb.njournal_buckets;
+		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
 			if (read_bucket(l))
 				goto bsearch;
-		}
+
+		if (list_empty(list))
+			continue;
 bsearch:
 		/* Binary search */
 		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
 		pr_debug("starting binary search, l %u r %u", l, r);
 
 		while (l + 1 < r) {
-			m = (l + r) >> 1;
+			seq = list_entry(list->prev, struct journal_replay,
+					 list)->j.seq;
 
-			if (read_bucket(m))
+			m = (l + r) >> 1;
+			read_bucket(m);
+
+			if (seq != list_entry(list->prev, struct journal_replay,
+					      list)->j.seq)
 				l = m;
 			else
 				r = m;
 		}
 
-		/* Read buckets in reverse order until we stop finding more
+		/*
+		 * Read buckets in reverse order until we stop finding more
 		 * journal entries
 		 */
-		pr_debug("finishing up");
+		pr_debug("finishing up: m %u njournal_buckets %u",
+			 m, ca->sb.njournal_buckets);
 		l = m;
 
 		while (1) {
@@ -221,9 +231,10 @@
 			}
 	}
 
-	c->journal.seq = list_entry(list->prev,
-				    struct journal_replay,
-				    list)->j.seq;
+	if (!list_empty(list))
+		c->journal.seq = list_entry(list->prev,
+					    struct journal_replay,
+					    list)->j.seq;
 
 	return 0;
 #undef read_bucket
@@ -420,7 +431,7 @@
 		return;
 	}
 
-	switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+	switch (atomic_read(&ja->discard_in_flight)) {
 	case DISCARD_IN_FLIGHT:
 		return;
 
@@ -617,7 +628,7 @@
 		bio_reset(bio);
 		bio->bi_sector	= PTR_OFFSET(k, i);
 		bio->bi_bdev	= ca->bdev;
-		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH;
+		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
 		bio->bi_size	= sectors << 9;
 
 		bio->bi_end_io	= journal_write_endio;
@@ -681,6 +692,7 @@
 		if (cl)
 			BUG_ON(!closure_wait(&w->wait, cl));
 
+		closure_flush(&c->journal.io);
 		__journal_try_write(c, true);
 	}
 }
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e5ff12e5..a30a0f8 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -489,6 +489,12 @@
 		bch_queue_gc(op->c);
 	}
 
+	/*
+	 * Journal writes are marked REQ_FLUSH; if the original write was a
+	 * flush, it'll wait on the journal write.
+	 */
+	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
+
 	do {
 		unsigned i;
 		struct bkey *k;
@@ -716,7 +722,7 @@
 	s->task			= current;
 	s->orig_bio		= bio;
 	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
-	s->op.flush_journal	= (bio->bi_rw & REQ_FLUSH) != 0;
+	s->op.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
 	s->op.skip		= (bio->bi_rw & REQ_DISCARD) != 0;
 	s->recoverable		= 1;
 	s->start_time		= jiffies;
@@ -1047,9 +1053,22 @@
 		trace_bcache_writethrough(s->orig_bio);
 		closure_bio_submit(bio, cl, s->d);
 	} else {
-		s->op.cache_bio = bio;
 		trace_bcache_writeback(s->orig_bio);
 		bch_writeback_add(dc, bio_sectors(bio));
+		s->op.cache_bio = bio;
+
+		if (bio->bi_rw & REQ_FLUSH) {
+			/* Also need to send a flush to the backing device */
+			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
+							     dc->disk.bio_split);
+
+			flush->bi_rw	= WRITE_FLUSH;
+			flush->bi_bdev	= bio->bi_bdev;
+			flush->bi_end_io = request_endio;
+			flush->bi_private = cl;
+
+			closure_bio_submit(flush, cl, s->d);
+		}
 	}
 out:
 	closure_call(&s->op.cl, bch_insert_data, NULL, cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f88e2b6..b4713ce 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -704,7 +704,8 @@
 		atomic_set(&d->detaching, 0);
 	}
 
-	bcache_device_unlink(d);
+	if (!d->flush_done)
+		bcache_device_unlink(d);
 
 	d->c->devices[d->id] = NULL;
 	closure_put(&d->c->caching);
@@ -781,6 +782,8 @@
 	set_bit(QUEUE_FLAG_NONROT,	&d->disk->queue->queue_flags);
 	set_bit(QUEUE_FLAG_DISCARD,	&d->disk->queue->queue_flags);
 
+	blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
+
 	return 0;
 }
 
@@ -1014,6 +1017,14 @@
 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
 	struct bcache_device *d = &dc->disk;
 
+	mutex_lock(&bch_register_lock);
+	d->flush_done = 1;
+
+	if (d->c)
+		bcache_device_unlink(d);
+
+	mutex_unlock(&bch_register_lock);
+
 	bch_cache_accounting_destroy(&dc->accounting);
 	kobject_del(&d->kobj);
 
@@ -1303,18 +1314,22 @@
 static void __cache_set_unregister(struct closure *cl)
 {
 	struct cache_set *c = container_of(cl, struct cache_set, caching);
-	struct cached_dev *dc, *t;
+	struct cached_dev *dc;
 	size_t i;
 
 	mutex_lock(&bch_register_lock);
 
-	if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
-		list_for_each_entry_safe(dc, t, &c->cached_devs, list)
-			bch_cached_dev_detach(dc);
-
 	for (i = 0; i < c->nr_uuids; i++)
-		if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
-			bcache_device_stop(c->devices[i]);
+		if (c->devices[i]) {
+			if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
+			    test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
+				dc = container_of(c->devices[i],
+						  struct cached_dev, disk);
+				bch_cached_dev_detach(dc);
+			} else {
+				bcache_device_stop(c->devices[i]);
+			}
+		}
 
 	mutex_unlock(&bch_register_lock);
 
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4d9cca4..e9bd6c0 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -214,7 +214,13 @@
 	}
 
 	if (attr == &sysfs_label) {
-		memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+		if (size > SB_LABEL_SIZE)
+			return -EINVAL;
+		memcpy(dc->sb.label, buf, size);
+		if (size < SB_LABEL_SIZE)
+			dc->sb.label[size] = '\0';
+		if (size && dc->sb.label[size - 1] == '\n')
+			dc->sb.label[size - 1] = '\0';
 		bch_write_bdev_super(dc, NULL);
 		if (dc->disk.c) {
 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index da3a99e..38a43f8 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@
 	stats->last = now ?: 1;
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+/**
+ * bch_next_delay() - increment @d by the amount of work done, and return how
+ * long to delay until the next time to do some work.
+ *
+ * @d - the struct bch_ratelimit to update
+ * @done - the amount of work done, in arbitrary units
+ *
+ * Returns the amount of time to delay by, in jiffies
+ */
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 {
 	uint64_t now = local_clock();
 
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 577393e..43fd78a 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -452,17 +452,23 @@
 	(ewma) >> factor;						\
 })
 
-struct ratelimit {
+struct bch_ratelimit {
+	/* Next time we want to do some work, in nanoseconds */
 	uint64_t		next;
+
+	/*
+	 * Rate at which we want to do work, in units per nanosecond
+	 * The units here correspond to the units passed to bch_next_delay()
+	 */
 	unsigned		rate;
 };
 
-static inline void ratelimit_reset(struct ratelimit *d)
+static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
 {
 	d->next = local_clock();
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
 
 #define __DIV_SAFE(n, d, zero)						\
 ({									\
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 2714ed3..841f049 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -91,11 +91,15 @@
 
 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
 {
+	uint64_t ret;
+
 	if (atomic_read(&dc->disk.detaching) ||
 	    !dc->writeback_percent)
 		return 0;
 
-	return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+	ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+
+	return min_t(uint64_t, ret, HZ);
 }
 
 /* Background writeback */
@@ -165,7 +169,7 @@
 
 	up_write(&dc->writeback_lock);
 
-	ratelimit_reset(&dc->writeback_rate);
+	bch_ratelimit_reset(&dc->writeback_rate);
 
 	/* Punt to workqueue only so we don't recurse and blow the stack */
 	continue_at(cl, read_dirty, dirty_wq);
@@ -246,9 +250,7 @@
 	}
 
 	bch_keybuf_del(&dc->writeback_keys, w);
-	atomic_dec_bug(&dc->in_flight);
-
-	closure_wake_up(&dc->writeback_wait);
+	up(&dc->in_flight);
 
 	closure_return_with_destructor(cl, dirty_io_destructor);
 }
@@ -278,7 +280,7 @@
 	trace_bcache_write_dirty(&io->bio);
 	closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-	continue_at(cl, write_dirty_finish, dirty_wq);
+	continue_at(cl, write_dirty_finish, system_wq);
 }
 
 static void read_dirty_endio(struct bio *bio, int error)
@@ -299,7 +301,7 @@
 	trace_bcache_read_dirty(&io->bio);
 	closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-	continue_at(cl, write_dirty, dirty_wq);
+	continue_at(cl, write_dirty, system_wq);
 }
 
 static void read_dirty(struct closure *cl)
@@ -324,12 +326,8 @@
 
 		if (delay > 0 &&
 		    (KEY_START(&w->key) != dc->last_read ||
-		     jiffies_to_msecs(delay) > 50)) {
-			w->private = NULL;
-
-			closure_delay(&dc->writeback, delay);
-			continue_at(cl, read_dirty, dirty_wq);
-		}
+		     jiffies_to_msecs(delay) > 50))
+			delay = schedule_timeout_uninterruptible(delay);
 
 		dc->last_read	= KEY_OFFSET(&w->key);
 
@@ -354,15 +352,10 @@
 
 		pr_debug("%s", pkey(&w->key));
 
-		closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+		down(&dc->in_flight);
+		closure_call(&io->cl, read_dirty_submit, NULL, cl);
 
 		delay = writeback_delay(dc, KEY_SIZE(&w->key));
-
-		atomic_inc(&dc->in_flight);
-
-		if (!closure_wait_event(&dc->writeback_wait, cl,
-					atomic_read(&dc->in_flight) < 64))
-			continue_at(cl, read_dirty, dirty_wq);
 	}
 
 	if (0) {
@@ -372,11 +365,16 @@
 		bch_keybuf_del(&dc->writeback_keys, w);
 	}
 
-	refill_dirty(cl);
+	/*
+	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
+	 * freed) before refilling again
+	 */
+	continue_at(cl, refill_dirty, dirty_wq);
 }
 
 void bch_cached_dev_writeback_init(struct cached_dev *dc)
 {
+	sema_init(&dc->in_flight, 64);
 	closure_init_unlocked(&dc->writeback);
 	init_rwsem(&dc->writeback_lock);
 
@@ -406,7 +404,7 @@
 
 int __init bch_writeback_init(void)
 {
-	dirty_wq = create_singlethread_workqueue("bcache_writeback");
+	dirty_wq = create_workqueue("bcache_writeback");
 	if (!dirty_wq)
 		return -ENOMEM;
 
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index aa04f02..81a79b7 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1644,7 +1644,10 @@
 	}
 
 	if (!dmi) {
+		unsigned noio_flag;
+		noio_flag = memalloc_noio_save();
 		dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+		memalloc_noio_restore(noio_flag);
 		if (dmi)
 			*param_flags |= DM_PARAMS_VMALLOC;
 	}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index bdf26f5..c5b7b3d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1284,8 +1284,17 @@
 	if (!error && !clone->errors)
 		return 0;	/* I/O complete */
 
-	if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
+	if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) {
+		if ((clone->cmd_flags & REQ_WRITE_SAME) &&
+		    !clone->q->limits.max_write_same_sectors) {
+			struct queue_limits *limits;
+
+			/* device doesn't really support WRITE SAME, disable it */
+			limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
+			limits->max_write_same_sectors = 0;
+		}
 		return error;
+	}
 
 	if (mpio->pgpath)
 		fail_path(mpio->pgpath);
@@ -1561,7 +1570,6 @@
 	unsigned long flags;
 	int r;
 
-again:
 	bdev = NULL;
 	mode = 0;
 	r = 0;
@@ -1579,7 +1587,7 @@
 	}
 
 	if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
-		r = -EAGAIN;
+		r = -ENOTCONN;
 	else if (!bdev)
 		r = -EIO;
 
@@ -1591,11 +1599,8 @@
 	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
 		r = scsi_verify_blk_ioctl(NULL, cmd);
 
-	if (r == -EAGAIN && !fatal_signal_pending(current)) {
+	if (r == -ENOTCONN && !fatal_signal_pending(current))
 		queue_work(kmultipathd, &m->process_queued_ios);
-		msleep(10);
-		goto again;
-	}
 
 	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1d3fe1a..2dea49c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -380,7 +380,7 @@
 static int validate_raid_redundancy(struct raid_set *rs)
 {
 	unsigned i, rebuild_cnt = 0;
-	unsigned rebuilds_per_group, copies, d;
+	unsigned rebuilds_per_group = 0, copies, d;
 	unsigned group_size, last_group_start;
 
 	for (i = 0; i < rs->md.raid_disks; i++)
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac4156..2d2b1b7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@
 	 */
 	INIT_WORK_ONSTACK(&req.work, do_metadata);
 	queue_work(ps->metadata_wq, &req.work);
-	flush_work(&req.work);
+	flush_workqueue(ps->metadata_wq);
 
 	return req.result;
 }
@@ -269,6 +269,14 @@
 	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 }
 
+static void skip_metadata(struct pstore *ps)
+{
+	uint32_t stride = ps->exceptions_per_area + 1;
+	chunk_t next_free = ps->next_free;
+	if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
+		ps->next_free++;
+}
+
 /*
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
@@ -502,6 +510,8 @@
 
 	ps->current_area--;
 
+	skip_metadata(ps);
+
 	return 0;
 }
 
@@ -616,8 +626,6 @@
 					struct dm_exception *e)
 {
 	struct pstore *ps = get_info(store);
-	uint32_t stride;
-	chunk_t next_free;
 	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 
 	/* Is there enough room ? */
@@ -630,10 +638,8 @@
 	 * Move onto the next free pending, making sure to take
 	 * into account the location of the metadata chunks.
 	 */
-	stride = (ps->exceptions_per_area + 1);
-	next_free = ++ps->next_free;
-	if (sector_div(next_free, stride) == 1)
-		ps->next_free++;
+	ps->next_free++;
+	skip_metadata(ps);
 
 	atomic_inc(&ps->pending_count);
 	return 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5a..aec57d7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@
  */
 static int init_hash_tables(struct dm_snapshot *s)
 {
-	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+	sector_t hash_size, cow_dev_size, max_buckets;
 
 	/*
 	 * Calculate based on the size of the original volume or
 	 * the COW volume...
 	 */
 	cow_dev_size = get_dev_size(s->cow->bdev);
-	origin_dev_size = get_dev_size(s->origin->bdev);
 	max_buckets = calc_max_buckets();
 
-	hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
+	hash_size = cow_dev_size >> s->store->chunk_shift;
 	hash_size = min(hash_size, max_buckets);
 
 	if (hash_size < 64)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d5370a9..1c13071 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -386,10 +386,12 @@
 			unsigned int cmd, unsigned long arg)
 {
 	struct mapped_device *md = bdev->bd_disk->private_data;
-	struct dm_table *map = dm_get_live_table(md);
+	struct dm_table *map;
 	struct dm_target *tgt;
 	int r = -ENOTTY;
 
+retry:
+	map = dm_get_live_table(md);
 	if (!map || !dm_table_get_size(map))
 		goto out;
 
@@ -410,6 +412,11 @@
 out:
 	dm_table_put(map);
 
+	if (r == -ENOTCONN) {
+		msleep(10);
+		goto retry;
+	}
+
 	return r;
 }
 
@@ -2212,6 +2219,17 @@
 }
 
 /*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'.
+ */
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+	BUG_ON(!atomic_read(&md->holders));
+	return &md->queue->limits;
+}
+EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+
+/*
  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
  */
 static int dm_init_request_based_queue(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9b82377..d78f1ff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7697,20 +7697,6 @@
 			continue;
 
 		rdev->recovery_offset = 0;
-		if (rdev->saved_raid_disk >= 0 && mddev->in_sync) {
-			spin_lock_irq(&mddev->write_lock);
-			if (mddev->in_sync)
-				/* OK, this device, which is in_sync,
-				 * will definitely be noticed before
-				 * the next write, so recovery isn't
-				 * needed.
-				 */
-				rdev->recovery_offset = mddev->recovery_cp;
-			spin_unlock_irq(&mddev->write_lock);
-		}
-		if (mddev->ro && rdev->recovery_offset != MaxSector)
-			/* not safe to add this disk now */
-			continue;
 		if (mddev->pers->
 		    hot_add_disk(mddev, rdev) == 0) {
 			if (sysfs_link_rdev(mddev, rdev))
@@ -8086,6 +8072,7 @@
 	u64 *p;
 	int lo, hi;
 	int rv = 1;
+	unsigned long flags;
 
 	if (bb->shift < 0)
 		/* badblocks are disabled */
@@ -8100,7 +8087,7 @@
 		sectors = next - s;
 	}
 
-	write_seqlock_irq(&bb->lock);
+	write_seqlock_irqsave(&bb->lock, flags);
 
 	p = bb->page;
 	lo = 0;
@@ -8216,7 +8203,7 @@
 	bb->changed = 1;
 	if (!acknowledged)
 		bb->unacked_exist = 1;
-	write_sequnlock_irq(&bb->lock);
+	write_sequnlock_irqrestore(&bb->lock, flags);
 
 	return rv;
 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6e17f81..afaa5d4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1479,6 +1479,7 @@
 			}
 		}
 		if (rdev
+		    && rdev->recovery_offset == MaxSector
 		    && !test_bit(Faulty, &rdev->flags)
 		    && !test_and_set_bit(In_sync, &rdev->flags)) {
 			count++;
@@ -1848,6 +1849,36 @@
 	int i;
 	int vcnt;
 
+	/* Fix variable parts of all bios */
+	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
+	for (i = 0; i < conf->raid_disks * 2; i++) {
+		int j;
+		int size;
+		struct bio *b = r1_bio->bios[i];
+		if (b->bi_end_io != end_sync_read)
+			continue;
+		/* fixup the bio for reuse */
+		bio_reset(b);
+		b->bi_vcnt = vcnt;
+		b->bi_size = r1_bio->sectors << 9;
+		b->bi_sector = r1_bio->sector +
+			conf->mirrors[i].rdev->data_offset;
+		b->bi_bdev = conf->mirrors[i].rdev->bdev;
+		b->bi_end_io = end_sync_read;
+		b->bi_private = r1_bio;
+
+		size = b->bi_size;
+		for (j = 0; j < vcnt ; j++) {
+			struct bio_vec *bi;
+			bi = &b->bi_io_vec[j];
+			bi->bv_offset = 0;
+			if (size > PAGE_SIZE)
+				bi->bv_len = PAGE_SIZE;
+			else
+				bi->bv_len = size;
+			size -= PAGE_SIZE;
+		}
+	}
 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
 		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
@@ -1856,12 +1887,10 @@
 			break;
 		}
 	r1_bio->read_disk = primary;
-	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
 		struct bio *pbio = r1_bio->bios[primary];
 		struct bio *sbio = r1_bio->bios[i];
-		int size;
 
 		if (sbio->bi_end_io != end_sync_read)
 			continue;
@@ -1887,27 +1916,6 @@
 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
 			continue;
 		}
-		/* fixup the bio for reuse */
-		bio_reset(sbio);
-		sbio->bi_vcnt = vcnt;
-		sbio->bi_size = r1_bio->sectors << 9;
-		sbio->bi_sector = r1_bio->sector +
-			conf->mirrors[i].rdev->data_offset;
-		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
-		sbio->bi_end_io = end_sync_read;
-		sbio->bi_private = r1_bio;
-
-		size = sbio->bi_size;
-		for (j = 0; j < vcnt ; j++) {
-			struct bio_vec *bi;
-			bi = &sbio->bi_io_vec[j];
-			bi->bv_offset = 0;
-			if (size > PAGE_SIZE)
-				bi->bv_len = PAGE_SIZE;
-			else
-				bi->bv_len = size;
-			size -= PAGE_SIZE;
-		}
 
 		bio_copy_data(sbio, pbio);
 	}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6ddae25..0add868 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1762,6 +1762,7 @@
 			}
 			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
 		} else if (tmp->rdev
+			   && tmp->rdev->recovery_offset == MaxSector
 			   && !test_bit(Faulty, &tmp->rdev->flags)
 			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
 			count++;
@@ -2075,11 +2076,17 @@
 			 * both 'first' and 'i', so we just compare them.
 			 * All vec entries are PAGE_SIZE;
 			 */
-			for (j = 0; j < vcnt; j++)
+			int sectors = r10_bio->sectors;
+			for (j = 0; j < vcnt; j++) {
+				int len = PAGE_SIZE;
+				if (sectors < (len / 512))
+					len = sectors * 512;
 				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
 					   page_address(tbio->bi_io_vec[j].bv_page),
-					   fbio->bi_io_vec[j].bv_len))
+					   len))
 					break;
+				sectors -= len/512;
+			}
 			if (j == vcnt)
 				continue;
 			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
@@ -2262,12 +2269,18 @@
 	d = r10_bio->devs[1].devnum;
 	wbio = r10_bio->devs[1].bio;
 	wbio2 = r10_bio->devs[1].repl_bio;
+	/* Need to test wbio2->bi_end_io before we call
+	 * generic_make_request as if the former is NULL,
+	 * the latter is free to free wbio2.
+	 */
+	if (wbio2 && !wbio2->bi_end_io)
+		wbio2 = NULL;
 	if (wbio->bi_end_io) {
 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
 		generic_make_request(wbio);
 	}
-	if (wbio2 && wbio2->bi_end_io) {
+	if (wbio2) {
 		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
 		md_sync_acct(conf->mirrors[d].replacement->bdev,
 			     bio_sectors(wbio2));
@@ -2909,14 +2922,13 @@
 	 */
 	if (mddev->bitmap == NULL &&
 	    mddev->recovery_cp == MaxSector &&
+	    mddev->reshape_position == MaxSector &&
+	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
 	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
 	    conf->fullsync == 0) {
 		*skipped = 1;
-		max_sector = mddev->dev_sectors;
-		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
-		    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
-			max_sector = mddev->resync_max_sectors;
-		return max_sector - sector_nr;
+		return mddev->dev_sectors - sector_nr;
 	}
 
  skipped:
@@ -3386,6 +3398,7 @@
 
 		if (bio->bi_end_io == end_sync_read) {
 			md_sync_acct(bio->bi_bdev, nr_sectors);
+			set_bit(BIO_UPTODATE, &bio->bi_flags);
 			generic_make_request(bio);
 		}
 	}
@@ -3532,7 +3545,7 @@
 
 	/* FIXME calc properly */
 	conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
-							    max(0,mddev->delta_disks)),
+							    max(0,-mddev->delta_disks)),
 				GFP_KERNEL);
 	if (!conf->mirrors)
 		goto out;
@@ -3691,7 +3704,7 @@
 		    conf->geo.far_offset == 0)
 			goto out_free_conf;
 		if (conf->prev.far_copies != 1 &&
-		    conf->geo.far_offset == 0)
+		    conf->prev.far_offset == 0)
 			goto out_free_conf;
 	}
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 05e4a10..4bed545 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -668,6 +668,12 @@
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_offset = 0;
 			bi->bi_size = STRIPE_SIZE;
+			/*
+			 * If this is discard request, set bi_vcnt 0. We don't
+			 * want to confuse SCSI because SCSI will replace payload
+			 */
+			if (rw & REQ_DISCARD)
+				bi->bi_vcnt = 0;
 			if (rrdev)
 				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
@@ -706,6 +712,12 @@
 			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			rbi->bi_io_vec[0].bv_offset = 0;
 			rbi->bi_size = STRIPE_SIZE;
+			/*
+			 * If this is discard request, set bi_vcnt 0. We don't
+			 * want to confuse SCSI because SCSI will replace payload
+			 */
+			if (rw & REQ_DISCARD)
+				rbi->bi_vcnt = 0;
 			if (conf->mddev->gendisk)
 				trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
 						      rbi, disk_devt(conf->mddev->gendisk),
@@ -2800,6 +2812,14 @@
 		}
 		/* now that discard is done we can proceed with any sync */
 		clear_bit(STRIPE_DISCARD, &sh->state);
+		/*
+		 * SCSI discard will change some bio fields and the stripe has
+		 * no updated data, so remove it from hash list and the stripe
+		 * will be reinitialized
+		 */
+		spin_lock_irq(&conf->device_lock);
+		remove_hash(sh);
+		spin_unlock_irq(&conf->device_lock);
 		if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
 			set_bit(STRIPE_HANDLE, &sh->state);
 
@@ -3462,6 +3482,7 @@
 		    test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
 			set_bit(STRIPE_SYNCING, &sh->state);
 			clear_bit(STRIPE_INSYNC, &sh->state);
+			clear_bit(STRIPE_REPLACED, &sh->state);
 		}
 		spin_unlock(&sh->stripe_lock);
 	}
@@ -3607,19 +3628,23 @@
 			handle_parity_checks5(conf, sh, &s, disks);
 	}
 
-	if (s.replacing && s.locked == 0
-	    && !test_bit(STRIPE_INSYNC, &sh->state)) {
+	if ((s.replacing || s.syncing) && s.locked == 0
+	    && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
+	    && !test_bit(STRIPE_REPLACED, &sh->state)) {
 		/* Write out to replacement devices where possible */
 		for (i = 0; i < conf->raid_disks; i++)
-			if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
-			    test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+			if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+				WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
 				set_bit(R5_WantReplace, &sh->dev[i].flags);
 				set_bit(R5_LOCKED, &sh->dev[i].flags);
 				s.locked++;
 			}
-		set_bit(STRIPE_INSYNC, &sh->state);
+		if (s.replacing)
+			set_bit(STRIPE_INSYNC, &sh->state);
+		set_bit(STRIPE_REPLACED, &sh->state);
 	}
 	if ((s.syncing || s.replacing) && s.locked == 0 &&
+	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
 	    test_bit(STRIPE_INSYNC, &sh->state)) {
 		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
 		clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b..70c4932 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@
 	STRIPE_SYNC_REQUESTED,
 	STRIPE_SYNCING,
 	STRIPE_INSYNC,
+	STRIPE_REPLACED,
 	STRIPE_PREREAD_ACTIVE,
 	STRIPE_DELAYED,
 	STRIPE_DEGRADED,
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 297f1b2..8df1aea 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -275,7 +275,8 @@
 
 	/* Legacy PER/BER */
 	tmp = p->ets_packets * 65535;
-	do_div(tmp, p->ts_packets + p->ets_packets);
+	if (p->ts_packets + p->ets_packets)
+		do_div(tmp, p->ts_packets + p->ets_packets);
 	client->legacy_per = tmp;
 }
 
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index a1a3a51..0b4616b 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -377,10 +377,8 @@
 		ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
 					      buffer2_len);
 	}
-	if (ret < 0) {
-		dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+	if (ret < 0)
 		dmxdevfilter->buffer.error = ret;
-	}
 	if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
 		dmxdevfilter->state = DMXDEV_STATE_DONE;
 	spin_unlock(&dmxdevfilter->dev->lock);
@@ -416,10 +414,8 @@
 	ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
 	if (ret == buffer1_len)
 		ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
-	if (ret < 0) {
-		dvb_ringbuffer_flush(buffer);
+	if (ret < 0)
 		buffer->error = ret;
-	}
 	spin_unlock(&dmxdevfilter->dev->lock);
 	wake_up(&buffer->queue);
 	return 0;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0e2ec6f..dea17b0c 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -531,6 +531,14 @@
 	  An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
 	  to support this frontend.
 
+config DVB_LGDT3306A
+	tristate "LG Electronics LGDT3306A based"
+	depends on DVB_CORE && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  An ATSC 8VSB and QAM-B 64/256 demodulator module. Say Y when you want
+	  to support this frontend.
+
 config DVB_LG2160
 	tristate "LG Electronics LG216x based"
 	depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index cebc0fa..3b79d38 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -49,6 +49,7 @@
 obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
 obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
 obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
+obj-$(CONFIG_DVB_LGDT3306A) += lgdt3306a.o
 obj-$(CONFIG_DVB_LG2160) += lg2160.o
 obj-$(CONFIG_DVB_CX24123) += cx24123.o
 obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
new file mode 100644
index 0000000..656a38d
--- /dev/null
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -0,0 +1,2145 @@
+/*
+ *    Support for LGDT3306A - 8VSB/QAM-B
+ *
+ *    Copyright (C) 2013 Fred Richter <frichter@hauppauge.com>
+ *    - driver structure based on lgdt3305.[ch] by Michael Krufky
+ *    - code based on LG3306_V0.35 API by LG Electronics Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/div64.h>
+#include <linux/dvb/frontend.h>
+#include "dvb_math.h"
+#include "lgdt3306a.h"
+
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debug level (info=1, reg=2 (or-able))");
+
+#define DBG_INFO 1
+#define DBG_REG  2
+#define DBG_DUMP 4 /* FGR - comment out to remove dump code */
+
+#define lg_debug(fmt, arg...) \
+	printk(KERN_DEBUG pr_fmt(fmt), ## arg)
+
+#define dbg_info(fmt, arg...)					\
+	do {							\
+		if (debug & DBG_INFO)				\
+			lg_debug(fmt, ## arg);			\
+	} while (0)
+
+#define dbg_reg(fmt, arg...)					\
+	do {							\
+		if (debug & DBG_REG)				\
+			lg_debug(fmt, ## arg);			\
+	} while (0)
+
+#define lg_chkerr(ret)							\
+({									\
+	int __ret;							\
+	__ret = (ret < 0);						\
+	if (__ret)							\
+		pr_err("error %d on line %d\n",	ret, __LINE__);		\
+	__ret;								\
+})
+
+struct lgdt3306a_state {
+	struct i2c_adapter *i2c_adap;
+	const struct lgdt3306a_config *cfg;
+
+	struct dvb_frontend frontend;
+
+	fe_modulation_t current_modulation;
+	u32 current_frequency;
+	u32 snr;
+};
+
+/*
+ * LG3306A Register Usage
+ *  (LG does not really name the registers, so this code does not either)
+ *
+ * 0000 -> 00FF Common control and status
+ * 1000 -> 10FF Synchronizer control and status
+ * 1F00 -> 1FFF Smart Antenna control and status
+ * 2100 -> 21FF VSB Equalizer control and status
+ * 2800 -> 28FF QAM Equalizer control and status
+ * 3000 -> 30FF FEC control and status
+ */
+
+enum lgdt3306a_lock_status {
+	LG3306_UNLOCK       = 0x00,
+	LG3306_LOCK         = 0x01,
+	LG3306_UNKNOWN_LOCK = 0xff
+};
+
+enum lgdt3306a_neverlock_status {
+	LG3306_NL_INIT    = 0x00,
+	LG3306_NL_PROCESS = 0x01,
+	LG3306_NL_LOCK    = 0x02,
+	LG3306_NL_FAIL    = 0x03,
+	LG3306_NL_UNKNOWN = 0xff
+};
+
+enum lgdt3306a_modulation {
+	LG3306_VSB          = 0x00,
+	LG3306_QAM64        = 0x01,
+	LG3306_QAM256       = 0x02,
+	LG3306_UNKNOWN_MODE = 0xff
+};
+
+enum lgdt3306a_lock_check {
+	LG3306_SYNC_LOCK,
+	LG3306_FEC_LOCK,
+	LG3306_TR_LOCK,
+	LG3306_AGC_LOCK,
+};
+
+
+#ifdef DBG_DUMP
+static void lgdt3306a_DumpAllRegs(struct lgdt3306a_state *state);
+static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state);
+#endif
+
+
+static int lgdt3306a_write_reg(struct lgdt3306a_state *state, u16 reg, u8 val)
+{
+	int ret;
+	u8 buf[] = { reg >> 8, reg & 0xff, val };
+	struct i2c_msg msg = {
+		.addr = state->cfg->i2c_addr, .flags = 0,
+		.buf = buf, .len = 3,
+	};
+
+	dbg_reg("reg: 0x%04x, val: 0x%02x\n", reg, val);
+
+	ret = i2c_transfer(state->i2c_adap, &msg, 1);
+
+	if (ret != 1) {
+		pr_err("error (addr %02x %02x <- %02x, err = %i)\n",
+		       msg.buf[0], msg.buf[1], msg.buf[2], ret);
+		if (ret < 0)
+			return ret;
+		else
+			return -EREMOTEIO;
+	}
+	return 0;
+}
+
+static int lgdt3306a_read_reg(struct lgdt3306a_state *state, u16 reg, u8 *val)
+{
+	int ret;
+	u8 reg_buf[] = { reg >> 8, reg & 0xff };
+	struct i2c_msg msg[] = {
+		{ .addr = state->cfg->i2c_addr,
+		  .flags = 0, .buf = reg_buf, .len = 2 },
+		{ .addr = state->cfg->i2c_addr,
+		  .flags = I2C_M_RD, .buf = val, .len = 1 },
+	};
+
+	ret = i2c_transfer(state->i2c_adap, msg, 2);
+
+	if (ret != 2) {
+		pr_err("error (addr %02x reg %04x error (ret == %i)\n",
+		       state->cfg->i2c_addr, reg, ret);
+		if (ret < 0)
+			return ret;
+		else
+			return -EREMOTEIO;
+	}
+	dbg_reg("reg: 0x%04x, val: 0x%02x\n", reg, *val);
+
+	return 0;
+}
+
+#define read_reg(state, reg)						\
+({									\
+	u8 __val;							\
+	int ret = lgdt3306a_read_reg(state, reg, &__val);		\
+	if (lg_chkerr(ret))						\
+		__val = 0;						\
+	__val;								\
+})
+
+static int lgdt3306a_set_reg_bit(struct lgdt3306a_state *state,
+				u16 reg, int bit, int onoff)
+{
+	u8 val;
+	int ret;
+
+	dbg_reg("reg: 0x%04x, bit: %d, level: %d\n", reg, bit, onoff);
+
+	ret = lgdt3306a_read_reg(state, reg, &val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	val &= ~(1 << bit);
+	val |= (onoff & 1) << bit;
+
+	ret = lgdt3306a_write_reg(state, reg, val);
+	lg_chkerr(ret);
+fail:
+	return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_soft_reset(struct lgdt3306a_state *state)
+{
+	int ret;
+
+	dbg_info("\n");
+
+	ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	msleep(20);
+	ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 1);
+	lg_chkerr(ret);
+
+fail:
+	return ret;
+}
+
+static int lgdt3306a_mpeg_mode(struct lgdt3306a_state *state,
+				     enum lgdt3306a_mpeg_mode mode)
+{
+	u8 val;
+	int ret;
+
+	dbg_info("(%d)\n", mode);
+	/* transport packet format - TPSENB=0x80 */
+	ret = lgdt3306a_set_reg_bit(state, 0x0071, 7,
+				     mode == LGDT3306A_MPEG_PARALLEL ? 1 : 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/*
+	 * start of packet signal duration
+	 * TPSSOPBITEN=0x40; 0=byte duration, 1=bit duration
+	 */
+	ret = lgdt3306a_set_reg_bit(state, 0x0071, 6, 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_read_reg(state, 0x0070, &val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	val |= 0x10; /* TPCLKSUPB=0x10 */
+
+	if (mode == LGDT3306A_MPEG_PARALLEL)
+		val &= ~0x10;
+
+	ret = lgdt3306a_write_reg(state, 0x0070, val);
+	lg_chkerr(ret);
+
+fail:
+	return ret;
+}
+
+static int lgdt3306a_mpeg_mode_polarity(struct lgdt3306a_state *state,
+				       enum lgdt3306a_tp_clock_edge edge,
+				       enum lgdt3306a_tp_valid_polarity valid)
+{
+	u8 val;
+	int ret;
+
+	dbg_info("edge=%d, valid=%d\n", edge, valid);
+
+	ret = lgdt3306a_read_reg(state, 0x0070, &val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	val &= ~0x06; /* TPCLKPOL=0x04, TPVALPOL=0x02 */
+
+	if (edge == LGDT3306A_TPCLK_RISING_EDGE)
+		val |= 0x04;
+	if (valid == LGDT3306A_TP_VALID_HIGH)
+		val |= 0x02;
+
+	ret = lgdt3306a_write_reg(state, 0x0070, val);
+	lg_chkerr(ret);
+
+fail:
+	return ret;
+}
+
+static int lgdt3306a_mpeg_tristate(struct lgdt3306a_state *state,
+				     int mode)
+{
+	u8 val;
+	int ret;
+
+	dbg_info("(%d)\n", mode);
+
+	if (mode) {
+		ret = lgdt3306a_read_reg(state, 0x0070, &val);
+		if (lg_chkerr(ret))
+			goto fail;
+		/*
+		 * Tristate bus; TPOUTEN=0x80, TPCLKOUTEN=0x20,
+		 * TPDATAOUTEN=0x08
+		 */
+		val &= ~0xa8;
+		ret = lgdt3306a_write_reg(state, 0x0070, val);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		/* AGCIFOUTENB=0x40; 1=Disable IFAGC pin */
+		ret = lgdt3306a_set_reg_bit(state, 0x0003, 6, 1);
+		if (lg_chkerr(ret))
+			goto fail;
+
+	} else {
+		/* enable IFAGC pin */
+		ret = lgdt3306a_set_reg_bit(state, 0x0003, 6, 0);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		ret = lgdt3306a_read_reg(state, 0x0070, &val);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		val |= 0xa8; /* enable bus */
+		ret = lgdt3306a_write_reg(state, 0x0070, val);
+		if (lg_chkerr(ret))
+			goto fail;
+	}
+
+fail:
+	return ret;
+}
+
+static int lgdt3306a_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	dbg_info("acquire=%d\n", acquire);
+
+	return lgdt3306a_mpeg_tristate(state, acquire ? 0 : 1);
+
+}
+
+static int lgdt3306a_power(struct lgdt3306a_state *state,
+				     int mode)
+{
+	int ret;
+
+	dbg_info("(%d)\n", mode);
+
+	if (mode == 0) {
+		/* into reset */
+		ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 0);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		/* power down */
+		ret = lgdt3306a_set_reg_bit(state, 0x0000, 0, 0);
+		if (lg_chkerr(ret))
+			goto fail;
+
+	} else {
+		/* out of reset */
+		ret = lgdt3306a_set_reg_bit(state, 0x0000, 7, 1);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		/* power up */
+		ret = lgdt3306a_set_reg_bit(state, 0x0000, 0, 1);
+		if (lg_chkerr(ret))
+			goto fail;
+	}
+
+#ifdef DBG_DUMP
+	lgdt3306a_DumpAllRegs(state);
+#endif
+fail:
+	return ret;
+}
+
+
+static int lgdt3306a_set_vsb(struct lgdt3306a_state *state)
+{
+	u8 val;
+	int ret;
+
+	dbg_info("\n");
+
+	/* 0. Spectrum inversion detection manual; spectrum inverted */
+	ret = lgdt3306a_read_reg(state, 0x0002, &val);
+	val &= 0xf7; /* SPECINVAUTO Off */
+	val |= 0x04; /* SPECINV On */
+	ret = lgdt3306a_write_reg(state, 0x0002, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 1. Selection of standard mode(0x08=QAM, 0x80=VSB) */
+	ret = lgdt3306a_write_reg(state, 0x0008, 0x80);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 2. Bandwidth mode for VSB(6MHz) */
+	ret = lgdt3306a_read_reg(state, 0x0009, &val);
+	val &= 0xe3;
+	val |= 0x0c; /* STDOPDETTMODE[2:0]=3 */
+	ret = lgdt3306a_write_reg(state, 0x0009, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 3. QAM mode detection mode(None) */
+	ret = lgdt3306a_read_reg(state, 0x0009, &val);
+	val &= 0xfc; /* STDOPDETCMODE[1:0]=0 */
+	ret = lgdt3306a_write_reg(state, 0x0009, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 4. ADC sampling frequency rate(2x sampling) */
+	ret = lgdt3306a_read_reg(state, 0x000d, &val);
+	val &= 0xbf; /* SAMPLING4XFEN=0 */
+	ret = lgdt3306a_write_reg(state, 0x000d, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+#if 0
+	/* FGR - disable any AICC filtering, testing only */
+
+	ret = lgdt3306a_write_reg(state, 0x0024, 0x00);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* AICCFIXFREQ0 NT N-1(Video rejection) */
+	ret = lgdt3306a_write_reg(state, 0x002e, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x002f, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x0030, 0x00);
+
+	/* AICCFIXFREQ1 NT N-1(Audio rejection) */
+	ret = lgdt3306a_write_reg(state, 0x002b, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x002c, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x002d, 0x00);
+
+	/* AICCFIXFREQ2 NT Co-Channel(Video rejection) */
+	ret = lgdt3306a_write_reg(state, 0x0028, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x0029, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x002a, 0x00);
+
+	/* AICCFIXFREQ3 NT Co-Channel(Audio rejection) */
+	ret = lgdt3306a_write_reg(state, 0x0025, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x0026, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x0027, 0x00);
+
+#else
+	/* FGR - this works well for HVR-1955,1975 */
+
+	/* 5. AICCOPMODE  NT N-1 Adj. */
+	ret = lgdt3306a_write_reg(state, 0x0024, 0x5A);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* AICCFIXFREQ0 NT N-1(Video rejection) */
+	ret = lgdt3306a_write_reg(state, 0x002e, 0x5A);
+	ret = lgdt3306a_write_reg(state, 0x002f, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x0030, 0x00);
+
+	/* AICCFIXFREQ1 NT N-1(Audio rejection) */
+	ret = lgdt3306a_write_reg(state, 0x002b, 0x36);
+	ret = lgdt3306a_write_reg(state, 0x002c, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x002d, 0x00);
+
+	/* AICCFIXFREQ2 NT Co-Channel(Video rejection) */
+	ret = lgdt3306a_write_reg(state, 0x0028, 0x2A);
+	ret = lgdt3306a_write_reg(state, 0x0029, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x002a, 0x00);
+
+	/* AICCFIXFREQ3 NT Co-Channel(Audio rejection) */
+	ret = lgdt3306a_write_reg(state, 0x0025, 0x06);
+	ret = lgdt3306a_write_reg(state, 0x0026, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x0027, 0x00);
+#endif
+
+	ret = lgdt3306a_read_reg(state, 0x001e, &val);
+	val &= 0x0f;
+	val |= 0xa0;
+	ret = lgdt3306a_write_reg(state, 0x001e, val);
+
+	ret = lgdt3306a_write_reg(state, 0x0022, 0x08);
+
+	ret = lgdt3306a_write_reg(state, 0x0023, 0xFF);
+
+	ret = lgdt3306a_read_reg(state, 0x211f, &val);
+	val &= 0xef;
+	ret = lgdt3306a_write_reg(state, 0x211f, val);
+
+	ret = lgdt3306a_write_reg(state, 0x2173, 0x01);
+
+	ret = lgdt3306a_read_reg(state, 0x1061, &val);
+	val &= 0xf8;
+	val |= 0x04;
+	ret = lgdt3306a_write_reg(state, 0x1061, val);
+
+	ret = lgdt3306a_read_reg(state, 0x103d, &val);
+	val &= 0xcf;
+	ret = lgdt3306a_write_reg(state, 0x103d, val);
+
+	ret = lgdt3306a_write_reg(state, 0x2122, 0x40);
+
+	ret = lgdt3306a_read_reg(state, 0x2141, &val);
+	val &= 0x3f;
+	ret = lgdt3306a_write_reg(state, 0x2141, val);
+
+	ret = lgdt3306a_read_reg(state, 0x2135, &val);
+	val &= 0x0f;
+	val |= 0x70;
+	ret = lgdt3306a_write_reg(state, 0x2135, val);
+
+	ret = lgdt3306a_read_reg(state, 0x0003, &val);
+	val &= 0xf7;
+	ret = lgdt3306a_write_reg(state, 0x0003, val);
+
+	ret = lgdt3306a_read_reg(state, 0x001c, &val);
+	val &= 0x7f;
+	ret = lgdt3306a_write_reg(state, 0x001c, val);
+
+	/* 6. EQ step size */
+	ret = lgdt3306a_read_reg(state, 0x2179, &val);
+	val &= 0xf8;
+	ret = lgdt3306a_write_reg(state, 0x2179, val);
+
+	ret = lgdt3306a_read_reg(state, 0x217a, &val);
+	val &= 0xf8;
+	ret = lgdt3306a_write_reg(state, 0x217a, val);
+
+	/* 7. Reset */
+	ret = lgdt3306a_soft_reset(state);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	dbg_info("complete\n");
+fail:
+	return ret;
+}
+
+static int lgdt3306a_set_qam(struct lgdt3306a_state *state, int modulation)
+{
+	u8 val;
+	int ret;
+
+	dbg_info("modulation=%d\n", modulation);
+
+	/* 1. Selection of standard mode(0x08=QAM, 0x80=VSB) */
+	ret = lgdt3306a_write_reg(state, 0x0008, 0x08);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 1a. Spectrum inversion detection to Auto */
+	ret = lgdt3306a_read_reg(state, 0x0002, &val);
+	val &= 0xfb; /* SPECINV Off */
+	val |= 0x08; /* SPECINVAUTO On */
+	ret = lgdt3306a_write_reg(state, 0x0002, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 2. Bandwidth mode for QAM */
+	ret = lgdt3306a_read_reg(state, 0x0009, &val);
+	val &= 0xe3; /* STDOPDETTMODE[2:0]=0 VSB Off */
+	ret = lgdt3306a_write_reg(state, 0x0009, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 3. : 64QAM/256QAM detection(manual, auto) */
+	ret = lgdt3306a_read_reg(state, 0x0009, &val);
+	val &= 0xfc;
+	val |= 0x02; /* STDOPDETCMODE[1:0]=1=Manual 2=Auto */
+	ret = lgdt3306a_write_reg(state, 0x0009, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 3a. : 64QAM/256QAM selection for manual */
+	ret = lgdt3306a_read_reg(state, 0x101a, &val);
+	val &= 0xf8;
+	if (modulation == QAM_64)
+		val |= 0x02; /* QMDQMODE[2:0]=2=QAM64 */
+	else
+		val |= 0x04; /* QMDQMODE[2:0]=4=QAM256 */
+
+	ret = lgdt3306a_write_reg(state, 0x101a, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 4. ADC sampling frequency rate(4x sampling) */
+	ret = lgdt3306a_read_reg(state, 0x000d, &val);
+	val &= 0xbf;
+	val |= 0x40; /* SAMPLING4XFEN=1 */
+	ret = lgdt3306a_write_reg(state, 0x000d, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 5. No AICC operation in QAM mode */
+	ret = lgdt3306a_read_reg(state, 0x0024, &val);
+	val &= 0x00;
+	ret = lgdt3306a_write_reg(state, 0x0024, val);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 6. Reset */
+	ret = lgdt3306a_soft_reset(state);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	dbg_info("complete\n");
+fail:
+	return ret;
+}
+
+static int lgdt3306a_set_modulation(struct lgdt3306a_state *state,
+				   struct dtv_frontend_properties *p)
+{
+	int ret;
+
+	dbg_info("\n");
+
+	switch (p->modulation) {
+	case VSB_8:
+		ret = lgdt3306a_set_vsb(state);
+		break;
+	case QAM_64:
+		ret = lgdt3306a_set_qam(state, QAM_64);
+		break;
+	case QAM_256:
+		ret = lgdt3306a_set_qam(state, QAM_256);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (lg_chkerr(ret))
+		goto fail;
+
+	state->current_modulation = p->modulation;
+
+fail:
+	return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_agc_setup(struct lgdt3306a_state *state,
+			      struct dtv_frontend_properties *p)
+{
+	/* TODO: anything we want to do here??? */
+	dbg_info("\n");
+
+	switch (p->modulation) {
+	case VSB_8:
+		break;
+	case QAM_64:
+	case QAM_256:
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_set_inversion(struct lgdt3306a_state *state,
+				       int inversion)
+{
+	int ret;
+
+	dbg_info("(%d)\n", inversion);
+
+	ret = lgdt3306a_set_reg_bit(state, 0x0002, 2, inversion ? 1 : 0);
+	return ret;
+}
+
+static int lgdt3306a_set_inversion_auto(struct lgdt3306a_state *state,
+				       int enabled)
+{
+	int ret;
+
+	dbg_info("(%d)\n", enabled);
+
+	/* 0=Manual 1=Auto(QAM only) - SPECINVAUTO=0x04 */
+	ret = lgdt3306a_set_reg_bit(state, 0x0002, 3, enabled);
+	return ret;
+}
+
+static int lgdt3306a_spectral_inversion(struct lgdt3306a_state *state,
+				       struct dtv_frontend_properties *p,
+				       int inversion)
+{
+	int ret = 0;
+
+	dbg_info("(%d)\n", inversion);
+#if 0
+	/*
+	 * FGR - spectral_inversion defaults already set for VSB and QAM;
+	 * can enable later if desired
+	 */
+
+	ret = lgdt3306a_set_inversion(state, inversion);
+
+	switch (p->modulation) {
+	case VSB_8:
+		/* Manual only for VSB */
+		ret = lgdt3306a_set_inversion_auto(state, 0);
+		break;
+	case QAM_64:
+	case QAM_256:
+		/* Auto ok for QAM */
+		ret = lgdt3306a_set_inversion_auto(state, 1);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+#endif
+	return ret;
+}
+
+static int lgdt3306a_set_if(struct lgdt3306a_state *state,
+			   struct dtv_frontend_properties *p)
+{
+	int ret;
+	u16 if_freq_khz;
+	u8 nco1, nco2;
+
+	switch (p->modulation) {
+	case VSB_8:
+		if_freq_khz = state->cfg->vsb_if_khz;
+		break;
+	case QAM_64:
+	case QAM_256:
+		if_freq_khz = state->cfg->qam_if_khz;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (if_freq_khz) {
+	default:
+		pr_warn("IF=%d KHz is not supportted, 3250 assumed\n",
+			if_freq_khz);
+		/* fallthrough */
+	case 3250: /* 3.25Mhz */
+		nco1 = 0x34;
+		nco2 = 0x00;
+		break;
+	case 3500: /* 3.50Mhz */
+		nco1 = 0x38;
+		nco2 = 0x00;
+		break;
+	case 4000: /* 4.00Mhz */
+		nco1 = 0x40;
+		nco2 = 0x00;
+		break;
+	case 5000: /* 5.00Mhz */
+		nco1 = 0x50;
+		nco2 = 0x00;
+		break;
+	case 5380: /* 5.38Mhz */
+		nco1 = 0x56;
+		nco2 = 0x14;
+		break;
+	}
+	ret = lgdt3306a_write_reg(state, 0x0010, nco1);
+	if (ret)
+		return ret;
+	ret = lgdt3306a_write_reg(state, 0x0011, nco2);
+	if (ret)
+		return ret;
+
+	dbg_info("if_freq=%d KHz->[%04x]\n", if_freq_khz, nco1<<8 | nco2);
+
+	return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	if (state->cfg->deny_i2c_rptr) {
+		dbg_info("deny_i2c_rptr=%d\n", state->cfg->deny_i2c_rptr);
+		return 0;
+	}
+	dbg_info("(%d)\n", enable);
+
+	/* NI2CRPTEN=0x80 */
+	return lgdt3306a_set_reg_bit(state, 0x0002, 7, enable ? 0 : 1);
+}
+
+static int lgdt3306a_sleep(struct lgdt3306a_state *state)
+{
+	int ret;
+
+	dbg_info("\n");
+	state->current_frequency = -1; /* force re-tune, when we wake */
+
+	ret = lgdt3306a_mpeg_tristate(state, 1); /* disable data bus */
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_power(state, 0); /* power down */
+	lg_chkerr(ret);
+
+fail:
+	return 0;
+}
+
+static int lgdt3306a_fe_sleep(struct dvb_frontend *fe)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	return lgdt3306a_sleep(state);
+}
+
+static int lgdt3306a_init(struct dvb_frontend *fe)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+	u8 val;
+	int ret;
+
+	dbg_info("\n");
+
+	/* 1. Normal operation mode */
+	ret = lgdt3306a_set_reg_bit(state, 0x0001, 0, 1); /* SIMFASTENB=0x01 */
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 2. Spectrum inversion auto detection (Not valid for VSB) */
+	ret = lgdt3306a_set_inversion_auto(state, 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 3. Spectrum inversion(According to the tuner configuration) */
+	ret = lgdt3306a_set_inversion(state, 1);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 4. Peak-to-peak voltage of ADC input signal */
+
+	/* ADCSEL1V=0x80=1Vpp; 0x00=2Vpp */
+	ret = lgdt3306a_set_reg_bit(state, 0x0004, 7, 1);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 5. ADC output data capture clock phase */
+
+	/* 0=same phase as ADC clock */
+	ret = lgdt3306a_set_reg_bit(state, 0x0004, 2, 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 5a. ADC sampling clock source */
+
+	/* ADCCLKPLLSEL=0x08; 0=use ext clock, not PLL */
+	ret = lgdt3306a_set_reg_bit(state, 0x0004, 3, 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	/* 6. Automatic PLL set */
+
+	/* PLLSETAUTO=0x40; 0=off */
+	ret = lgdt3306a_set_reg_bit(state, 0x0005, 6, 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	if (state->cfg->xtalMHz == 24) {	/* 24MHz */
+		/* 7. Frequency for PLL output(0x2564 for 192MHz for 24MHz) */
+		ret = lgdt3306a_read_reg(state, 0x0005, &val);
+		if (lg_chkerr(ret))
+			goto fail;
+		val &= 0xc0;
+		val |= 0x25;
+		ret = lgdt3306a_write_reg(state, 0x0005, val);
+		if (lg_chkerr(ret))
+			goto fail;
+		ret = lgdt3306a_write_reg(state, 0x0006, 0x64);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		/* 8. ADC sampling frequency(0x180000 for 24MHz sampling) */
+		ret = lgdt3306a_read_reg(state, 0x000d, &val);
+		if (lg_chkerr(ret))
+			goto fail;
+		val &= 0xc0;
+		val |= 0x18;
+		ret = lgdt3306a_write_reg(state, 0x000d, val);
+		if (lg_chkerr(ret))
+			goto fail;
+
+	} else if (state->cfg->xtalMHz == 25) { /* 25MHz */
+		/* 7. Frequency for PLL output */
+		ret = lgdt3306a_read_reg(state, 0x0005, &val);
+		if (lg_chkerr(ret))
+			goto fail;
+		val &= 0xc0;
+		val |= 0x25;
+		ret = lgdt3306a_write_reg(state, 0x0005, val);
+		if (lg_chkerr(ret))
+			goto fail;
+		ret = lgdt3306a_write_reg(state, 0x0006, 0x64);
+		if (lg_chkerr(ret))
+			goto fail;
+
+		/* 8. ADC sampling frequency(0x190000 for 25MHz sampling) */
+		ret = lgdt3306a_read_reg(state, 0x000d, &val);
+		if (lg_chkerr(ret))
+			goto fail;
+		val &= 0xc0;
+		val |= 0x19;
+		ret = lgdt3306a_write_reg(state, 0x000d, val);
+		if (lg_chkerr(ret))
+			goto fail;
+	} else {
+		pr_err("Bad xtalMHz=%d\n", state->cfg->xtalMHz);
+	}
+#if 0
+	ret = lgdt3306a_write_reg(state, 0x000e, 0x00);
+	ret = lgdt3306a_write_reg(state, 0x000f, 0x00);
+#endif
+
+	/* 9. Center frequency of input signal of ADC */
+	ret = lgdt3306a_write_reg(state, 0x0010, 0x34); /* 3.25MHz */
+	ret = lgdt3306a_write_reg(state, 0x0011, 0x00);
+
+	/* 10. Fixed gain error value */
+	ret = lgdt3306a_write_reg(state, 0x0014, 0); /* gain error=0 */
+
+	/* 10a. VSB TR BW gear shift initial step */
+	ret = lgdt3306a_read_reg(state, 0x103c, &val);
+	val &= 0x0f;
+	val |= 0x20; /* SAMGSAUTOSTL_V[3:0] = 2 */
+	ret = lgdt3306a_write_reg(state, 0x103c, val);
+
+	/* 10b. Timing offset calibration in low temperature for VSB */
+	ret = lgdt3306a_read_reg(state, 0x103d, &val);
+	val &= 0xfc;
+	val |= 0x03;
+	ret = lgdt3306a_write_reg(state, 0x103d, val);
+
+	/* 10c. Timing offset calibration in low temperature for QAM */
+	ret = lgdt3306a_read_reg(state, 0x1036, &val);
+	val &= 0xf0;
+	val |= 0x0c;
+	ret = lgdt3306a_write_reg(state, 0x1036, val);
+
+	/* 11. Using the imaginary part of CIR in CIR loading */
+	ret = lgdt3306a_read_reg(state, 0x211f, &val);
+	val &= 0xef; /* do not use imaginary of CIR */
+	ret = lgdt3306a_write_reg(state, 0x211f, val);
+
+	/* 12. Control of no signal detector function */
+	ret = lgdt3306a_read_reg(state, 0x2849, &val);
+	val &= 0xef; /* NOUSENOSIGDET=0, enable no signal detector */
+	ret = lgdt3306a_write_reg(state, 0x2849, val);
+
+	/* FGR - put demod in some known mode */
+	ret = lgdt3306a_set_vsb(state);
+
+	/* 13. TP stream format */
+	ret = lgdt3306a_mpeg_mode(state, state->cfg->mpeg_mode);
+
+	/* 14. disable output buses */
+	ret = lgdt3306a_mpeg_tristate(state, 1);
+
+	/* 15. Sleep (in reset) */
+	ret = lgdt3306a_sleep(state);
+	lg_chkerr(ret);
+
+fail:
+	return ret;
+}
+
+static int lgdt3306a_set_parameters(struct dvb_frontend *fe)
+{
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+	int ret;
+
+	dbg_info("(%d, %d)\n", p->frequency, p->modulation);
+
+	if (state->current_frequency  == p->frequency &&
+	   state->current_modulation == p->modulation) {
+		dbg_info(" (already set, skipping ...)\n");
+		return 0;
+	}
+	state->current_frequency = -1;
+	state->current_modulation = -1;
+
+	ret = lgdt3306a_power(state, 1); /* power up */
+	if (lg_chkerr(ret))
+		goto fail;
+
+	if (fe->ops.tuner_ops.set_params) {
+		ret = fe->ops.tuner_ops.set_params(fe);
+		if (fe->ops.i2c_gate_ctrl)
+			fe->ops.i2c_gate_ctrl(fe, 0);
+#if 0
+		if (lg_chkerr(ret))
+			goto fail;
+		state->current_frequency = p->frequency;
+#endif
+	}
+
+	ret = lgdt3306a_set_modulation(state, p);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_agc_setup(state, p);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_set_if(state, p);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_spectral_inversion(state, p,
+					state->cfg->spectral_inversion ? 1 : 0);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_mpeg_mode(state, state->cfg->mpeg_mode);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_mpeg_mode_polarity(state,
+					  state->cfg->tpclk_edge,
+					  state->cfg->tpvalid_polarity);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_mpeg_tristate(state, 0); /* enable data bus */
+	if (lg_chkerr(ret))
+		goto fail;
+
+	ret = lgdt3306a_soft_reset(state);
+	if (lg_chkerr(ret))
+		goto fail;
+
+#ifdef DBG_DUMP
+	lgdt3306a_DumpAllRegs(state);
+#endif
+	state->current_frequency = p->frequency;
+fail:
+	return ret;
+}
+
+static int lgdt3306a_get_frontend(struct dvb_frontend *fe)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+	struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+	dbg_info("(%u, %d)\n",
+		 state->current_frequency, state->current_modulation);
+
+	p->modulation = state->current_modulation;
+	p->frequency = state->current_frequency;
+	return 0;
+}
+
+static enum dvbfe_algo lgdt3306a_get_frontend_algo(struct dvb_frontend *fe)
+{
+#if 1
+	return DVBFE_ALGO_CUSTOM;
+#else
+	return DVBFE_ALGO_HW;
+#endif
+}
+
+/* ------------------------------------------------------------------------ */
+static int lgdt3306a_monitor_vsb(struct lgdt3306a_state *state)
+{
+	u8 val;
+	int ret;
+	u8 snrRef, maxPowerMan, nCombDet;
+	u16 fbDlyCir;
+
+	ret = lgdt3306a_read_reg(state, 0x21a1, &val);
+	if (ret)
+		return ret;
+	snrRef = val & 0x3f;
+
+	ret = lgdt3306a_read_reg(state, 0x2185, &maxPowerMan);
+	if (ret)
+		return ret;
+
+	ret = lgdt3306a_read_reg(state, 0x2191, &val);
+	if (ret)
+		return ret;
+	nCombDet = (val & 0x80) >> 7;
+
+	ret = lgdt3306a_read_reg(state, 0x2180, &val);
+	if (ret)
+		return ret;
+	fbDlyCir = (val & 0x03) << 8;
+
+	ret = lgdt3306a_read_reg(state, 0x2181, &val);
+	if (ret)
+		return ret;
+	fbDlyCir |= val;
+
+	dbg_info("snrRef=%d maxPowerMan=0x%x nCombDet=%d fbDlyCir=0x%x\n",
+		snrRef, maxPowerMan, nCombDet, fbDlyCir);
+
+	/* Carrier offset sub loop bandwidth */
+	ret = lgdt3306a_read_reg(state, 0x1061, &val);
+	if (ret)
+		return ret;
+	val &= 0xf8;
+	if ((snrRef > 18) && (maxPowerMan > 0x68)
+	    && (nCombDet == 0x01)
+	    && ((fbDlyCir == 0x03FF) || (fbDlyCir < 0x6C))) {
+		/* SNR is over 18dB and no ghosting */
+		val |= 0x00; /* final bandwidth = 0 */
+	} else {
+		val |= 0x04; /* final bandwidth = 4 */
+	}
+	ret = lgdt3306a_write_reg(state, 0x1061, val);
+	if (ret)
+		return ret;
+
+	/* Adjust Notch Filter */
+	ret = lgdt3306a_read_reg(state, 0x0024, &val);
+	if (ret)
+		return ret;
+	val &= 0x0f;
+	if (nCombDet == 0) { /* Turn on the Notch Filter */
+		val |= 0x50;
+	}
+	ret = lgdt3306a_write_reg(state, 0x0024, val);
+	if (ret)
+		return ret;
+
+	/* VSB Timing Recovery output normalization */
+	ret = lgdt3306a_read_reg(state, 0x103d, &val);
+	if (ret)
+		return ret;
+	val &= 0xcf;
+	val |= 0x20;
+	ret = lgdt3306a_write_reg(state, 0x103d, val);
+
+	return ret;
+}
+
+static enum lgdt3306a_modulation
+lgdt3306a_check_oper_mode(struct lgdt3306a_state *state)
+{
+	u8 val = 0;
+	int ret;
+
+	ret = lgdt3306a_read_reg(state, 0x0081, &val);
+	if (ret)
+		goto err;
+
+	if (val & 0x80)	{
+		dbg_info("VSB\n");
+		return LG3306_VSB;
+	}
+	if (val & 0x08) {
+		ret = lgdt3306a_read_reg(state, 0x00a6, &val);
+		if (ret)
+			goto err;
+		val = val >> 2;
+		if (val & 0x01) {
+			dbg_info("QAM256\n");
+			return LG3306_QAM256;
+		}
+		dbg_info("QAM64\n");
+		return LG3306_QAM64;
+	}
+err:
+	pr_warn("UNKNOWN\n");
+	return LG3306_UNKNOWN_MODE;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_check_lock_status(struct lgdt3306a_state *state,
+			    enum lgdt3306a_lock_check whatLock)
+{
+	u8 val = 0;
+	int ret;
+	enum lgdt3306a_modulation	modeOper;
+	enum lgdt3306a_lock_status lockStatus;
+
+	modeOper = LG3306_UNKNOWN_MODE;
+
+	switch (whatLock) {
+	case LG3306_SYNC_LOCK:
+	{
+		ret = lgdt3306a_read_reg(state, 0x00a6, &val);
+		if (ret)
+			return ret;
+
+		if ((val & 0x80) == 0x80)
+			lockStatus = LG3306_LOCK;
+		else
+			lockStatus = LG3306_UNLOCK;
+
+		dbg_info("SYNC_LOCK=%x\n", lockStatus);
+		break;
+	}
+	case LG3306_AGC_LOCK:
+	{
+		ret = lgdt3306a_read_reg(state, 0x0080, &val);
+		if (ret)
+			return ret;
+
+		if ((val & 0x40) == 0x40)
+			lockStatus = LG3306_LOCK;
+		else
+			lockStatus = LG3306_UNLOCK;
+
+		dbg_info("AGC_LOCK=%x\n", lockStatus);
+		break;
+	}
+	case LG3306_TR_LOCK:
+	{
+		modeOper = lgdt3306a_check_oper_mode(state);
+		if ((modeOper == LG3306_QAM64) || (modeOper == LG3306_QAM256)) {
+			ret = lgdt3306a_read_reg(state, 0x1094, &val);
+			if (ret)
+				return ret;
+
+			if ((val & 0x80) == 0x80)
+				lockStatus = LG3306_LOCK;
+			else
+				lockStatus = LG3306_UNLOCK;
+		} else
+			lockStatus = LG3306_UNKNOWN_LOCK;
+
+		dbg_info("TR_LOCK=%x\n", lockStatus);
+		break;
+	}
+	case LG3306_FEC_LOCK:
+	{
+		modeOper = lgdt3306a_check_oper_mode(state);
+		if ((modeOper == LG3306_QAM64) || (modeOper == LG3306_QAM256)) {
+			ret = lgdt3306a_read_reg(state, 0x0080, &val);
+			if (ret)
+				return ret;
+
+			if ((val & 0x10) == 0x10)
+				lockStatus = LG3306_LOCK;
+			else
+				lockStatus = LG3306_UNLOCK;
+		} else
+			lockStatus = LG3306_UNKNOWN_LOCK;
+
+		dbg_info("FEC_LOCK=%x\n", lockStatus);
+		break;
+	}
+
+	default:
+		lockStatus = LG3306_UNKNOWN_LOCK;
+		pr_warn("UNKNOWN whatLock=%d\n", whatLock);
+		break;
+	}
+
+	return lockStatus;
+}
+
+static enum lgdt3306a_neverlock_status
+lgdt3306a_check_neverlock_status(struct lgdt3306a_state *state)
+{
+	u8 val = 0;
+	int ret;
+	enum lgdt3306a_neverlock_status lockStatus;
+
+	ret = lgdt3306a_read_reg(state, 0x0080, &val);
+	if (ret)
+		return ret;
+	lockStatus = (enum lgdt3306a_neverlock_status)(val & 0x03);
+
+	dbg_info("NeverLock=%d", lockStatus);
+
+	return lockStatus;
+}
+
+static int lgdt3306a_pre_monitoring(struct lgdt3306a_state *state)
+{
+	u8 val = 0;
+	int ret;
+	u8 currChDiffACQ, snrRef, mainStrong, aiccrejStatus;
+
+	/* Channel variation */
+	ret = lgdt3306a_read_reg(state, 0x21bc, &currChDiffACQ);
+	if (ret)
+		return ret;
+
+	/* SNR of Frame sync */
+	ret = lgdt3306a_read_reg(state, 0x21a1, &val);
+	if (ret)
+		return ret;
+	snrRef = val & 0x3f;
+
+	/* Strong Main CIR */
+	ret = lgdt3306a_read_reg(state, 0x2199, &val);
+	if (ret)
+		return ret;
+	mainStrong = (val & 0x40) >> 6;
+
+	ret = lgdt3306a_read_reg(state, 0x0090, &val);
+	if (ret)
+		return ret;
+	aiccrejStatus = (val & 0xf0) >> 4;
+
+	dbg_info("snrRef=%d mainStrong=%d aiccrejStatus=%d currChDiffACQ=0x%x\n",
+		snrRef, mainStrong, aiccrejStatus, currChDiffACQ);
+
+#if 0
+	/* Dynamic ghost exists */
+	if ((mainStrong == 0) && (currChDiffACQ > 0x70))
+#endif
+	if (mainStrong == 0) {
+		ret = lgdt3306a_read_reg(state, 0x2135, &val);
+		if (ret)
+			return ret;
+		val &= 0x0f;
+		val |= 0xa0;
+		ret = lgdt3306a_write_reg(state, 0x2135, val);
+		if (ret)
+			return ret;
+
+		ret = lgdt3306a_read_reg(state, 0x2141, &val);
+		if (ret)
+			return ret;
+		val &= 0x3f;
+		val |= 0x80;
+		ret = lgdt3306a_write_reg(state, 0x2141, val);
+		if (ret)
+			return ret;
+
+		ret = lgdt3306a_write_reg(state, 0x2122, 0x70);
+		if (ret)
+			return ret;
+	} else { /* Weak ghost or static channel */
+		ret = lgdt3306a_read_reg(state, 0x2135, &val);
+		if (ret)
+			return ret;
+		val &= 0x0f;
+		val |= 0x70;
+		ret = lgdt3306a_write_reg(state, 0x2135, val);
+		if (ret)
+			return ret;
+
+		ret = lgdt3306a_read_reg(state, 0x2141, &val);
+		if (ret)
+			return ret;
+		val &= 0x3f;
+		val |= 0x40;
+		ret = lgdt3306a_write_reg(state, 0x2141, val);
+		if (ret)
+			return ret;
+
+		ret = lgdt3306a_write_reg(state, 0x2122, 0x40);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_sync_lock_poll(struct lgdt3306a_state *state)
+{
+	enum lgdt3306a_lock_status syncLockStatus = LG3306_UNLOCK;
+	int	i;
+
+	for (i = 0; i < 2; i++)	{
+		msleep(30);
+
+		syncLockStatus = lgdt3306a_check_lock_status(state,
+							     LG3306_SYNC_LOCK);
+
+		if (syncLockStatus == LG3306_LOCK) {
+			dbg_info("locked(%d)\n", i);
+			return LG3306_LOCK;
+		}
+	}
+	dbg_info("not locked\n");
+	return LG3306_UNLOCK;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_fec_lock_poll(struct lgdt3306a_state *state)
+{
+	enum lgdt3306a_lock_status FECLockStatus = LG3306_UNLOCK;
+	int	i;
+
+	for (i = 0; i < 2; i++)	{
+		msleep(30);
+
+		FECLockStatus = lgdt3306a_check_lock_status(state,
+							    LG3306_FEC_LOCK);
+
+		if (FECLockStatus == LG3306_LOCK) {
+			dbg_info("locked(%d)\n", i);
+			return FECLockStatus;
+		}
+	}
+	dbg_info("not locked\n");
+	return FECLockStatus;
+}
+
+static enum lgdt3306a_neverlock_status
+lgdt3306a_neverlock_poll(struct lgdt3306a_state *state)
+{
+	enum lgdt3306a_neverlock_status NLLockStatus = LG3306_NL_FAIL;
+	int	i;
+
+	for (i = 0; i < 5; i++) {
+		msleep(30);
+
+		NLLockStatus = lgdt3306a_check_neverlock_status(state);
+
+		if (NLLockStatus == LG3306_NL_LOCK) {
+			dbg_info("NL_LOCK(%d)\n", i);
+			return NLLockStatus;
+		}
+	}
+	dbg_info("NLLockStatus=%d\n", NLLockStatus);
+	return NLLockStatus;
+}
+
+static u8 lgdt3306a_get_packet_error(struct lgdt3306a_state *state)
+{
+	u8 val;
+	int ret;
+
+	ret = lgdt3306a_read_reg(state, 0x00fa, &val);
+	if (ret)
+		return ret;
+
+	return val;
+}
+
+static const u32 valx_x10[] = {
+	10,  11,  13,  15,  17,  20,  25,  33,  41,  50,  59,  73,  87,  100
+};
+static const u32 log10x_x1000[] = {
+	0,   41, 114, 176, 230, 301, 398, 518, 613, 699, 771, 863, 939, 1000
+};
+
+static u32 log10_x1000(u32 x)
+{
+	u32 diff_val, step_val, step_log10;
+	u32 log_val = 0;
+	u32 i;
+
+	if (x <= 0)
+		return -1000000; /* signal error */
+
+	if (x == 10)
+		return 0; /* log(1)=0 */
+
+	if (x < 10) {
+		while (x < 10) {
+			x = x * 10;
+			log_val--;
+		}
+	} else {	/* x > 10 */
+		while (x >= 100) {
+			x = x / 10;
+			log_val++;
+		}
+	}
+	log_val *= 1000;
+
+	if (x == 10) /* was our input an exact multiple of 10 */
+		return log_val;	/* don't need to interpolate */
+
+	/* find our place on the log curve */
+	for (i = 1; i < ARRAY_SIZE(valx_x10); i++) {
+		if (valx_x10[i] >= x)
+			break;
+	}
+	if (i == ARRAY_SIZE(valx_x10))
+		return log_val + log10x_x1000[i - 1];
+
+	diff_val   = x - valx_x10[i-1];
+	step_val   = valx_x10[i] - valx_x10[i - 1];
+	step_log10 = log10x_x1000[i] - log10x_x1000[i - 1];
+
+	/* do a linear interpolation to get in-between values */
+	return log_val + log10x_x1000[i - 1] +
+		((diff_val*step_log10) / step_val);
+}
+
+static u32 lgdt3306a_calculate_snr_x100(struct lgdt3306a_state *state)
+{
+	u32 mse; /* Mean-Square Error */
+	u32 pwr; /* Constelation power */
+	u32 snr_x100;
+
+	mse = (read_reg(state, 0x00ec) << 8) |
+	      (read_reg(state, 0x00ed));
+	pwr = (read_reg(state, 0x00e8) << 8) |
+	      (read_reg(state, 0x00e9));
+
+	if (mse == 0) /* no signal */
+		return 0;
+
+	snr_x100 = log10_x1000((pwr * 10000) / mse) - 3000;
+	dbg_info("mse=%u, pwr=%u, snr_x100=%d\n", mse, pwr, snr_x100);
+
+	return snr_x100;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_vsb_lock_poll(struct lgdt3306a_state *state)
+{
+	int ret;
+	u8 cnt = 0;
+	u8 packet_error;
+	u32 snr;
+
+	for (cnt = 0; cnt < 10; cnt++) {
+		if (lgdt3306a_sync_lock_poll(state) == LG3306_UNLOCK) {
+			dbg_info("no sync lock!\n");
+			return LG3306_UNLOCK;
+		}
+
+		msleep(20);
+		ret = lgdt3306a_pre_monitoring(state);
+		if (ret)
+			break;
+
+		packet_error = lgdt3306a_get_packet_error(state);
+		snr = lgdt3306a_calculate_snr_x100(state);
+		dbg_info("cnt=%d errors=%d snr=%d\n", cnt, packet_error, snr);
+
+		if ((snr >= 1500) && (packet_error < 0xff))
+			return LG3306_LOCK;
+	}
+
+	dbg_info("not locked!\n");
+	return LG3306_UNLOCK;
+}
+
+static enum lgdt3306a_lock_status
+lgdt3306a_qam_lock_poll(struct lgdt3306a_state *state)
+{
+	u8 cnt;
+	u8 packet_error;
+	u32	snr;
+
+	for (cnt = 0; cnt < 10; cnt++) {
+		if (lgdt3306a_fec_lock_poll(state) == LG3306_UNLOCK) {
+			dbg_info("no fec lock!\n");
+			return LG3306_UNLOCK;
+		}
+
+		msleep(20);
+
+		packet_error = lgdt3306a_get_packet_error(state);
+		snr = lgdt3306a_calculate_snr_x100(state);
+		dbg_info("cnt=%d errors=%d snr=%d\n", cnt, packet_error, snr);
+
+		if ((snr >= 1500) && (packet_error < 0xff))
+			return LG3306_LOCK;
+	}
+
+	dbg_info("not locked!\n");
+	return LG3306_UNLOCK;
+}
+
+static int lgdt3306a_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+	u16 strength = 0;
+	int ret = 0;
+
+	if (fe->ops.tuner_ops.get_rf_strength) {
+		ret = fe->ops.tuner_ops.get_rf_strength(fe, &strength);
+		if (ret == 0)
+			dbg_info("strength=%d\n", strength);
+		else
+			dbg_info("fe->ops.tuner_ops.get_rf_strength() failed\n");
+	}
+
+	*status = 0;
+	if (lgdt3306a_neverlock_poll(state) == LG3306_NL_LOCK) {
+		*status |= FE_HAS_SIGNAL;
+		*status |= FE_HAS_CARRIER;
+
+		switch (state->current_modulation) {
+		case QAM_256:
+		case QAM_64:
+			if (lgdt3306a_qam_lock_poll(state) == LG3306_LOCK) {
+				*status |= FE_HAS_VITERBI;
+				*status |= FE_HAS_SYNC;
+
+				*status |= FE_HAS_LOCK;
+			}
+			break;
+		case VSB_8:
+			if (lgdt3306a_vsb_lock_poll(state) == LG3306_LOCK) {
+				*status |= FE_HAS_VITERBI;
+				*status |= FE_HAS_SYNC;
+
+				*status |= FE_HAS_LOCK;
+
+				ret = lgdt3306a_monitor_vsb(state);
+			}
+			break;
+		default:
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+
+static int lgdt3306a_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	state->snr = lgdt3306a_calculate_snr_x100(state);
+	/* report SNR in dB * 10 */
+	*snr = state->snr/10;
+
+	return 0;
+}
+
+static int lgdt3306a_read_signal_strength(struct dvb_frontend *fe,
+					 u16 *strength)
+{
+	/*
+	 * Calculate some sort of "strength" from SNR
+	 */
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+	u16 snr; /* snr_x10 */
+	int ret;
+	u32 ref_snr; /* snr*100 */
+	u32 str;
+
+	*strength = 0;
+
+	switch (state->current_modulation) {
+	case VSB_8:
+		 ref_snr = 1600; /* 16dB */
+		 break;
+	case QAM_64:
+		 ref_snr = 2200; /* 22dB */
+		 break;
+	case QAM_256:
+		 ref_snr = 2800; /* 28dB */
+		 break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = fe->ops.read_snr(fe, &snr);
+	if (lg_chkerr(ret))
+		goto fail;
+
+	if (state->snr <= (ref_snr - 100))
+		str = 0;
+	else if (state->snr <= ref_snr)
+		str = (0xffff * 65) / 100; /* 65% */
+	else {
+		str = state->snr - ref_snr;
+		str /= 50;
+		str += 78; /* 78%-100% */
+		if (str > 100)
+			str = 100;
+		str = (0xffff * str) / 100;
+	}
+	*strength = (u16)str;
+	dbg_info("strength=%u\n", *strength);
+
+fail:
+	return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lgdt3306a_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+	u32 tmp;
+
+	*ber = 0;
+#if 1
+	/* FGR - FIXME - I don't know what value is expected by dvb_core
+	 * what is the scale of the value?? */
+	tmp =              read_reg(state, 0x00fc); /* NBERVALUE[24-31] */
+	tmp = (tmp << 8) | read_reg(state, 0x00fd); /* NBERVALUE[16-23] */
+	tmp = (tmp << 8) | read_reg(state, 0x00fe); /* NBERVALUE[8-15] */
+	tmp = (tmp << 8) | read_reg(state, 0x00ff); /* NBERVALUE[0-7] */
+	*ber = tmp;
+	dbg_info("ber=%u\n", tmp);
+#endif
+	return 0;
+}
+
+static int lgdt3306a_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	*ucblocks = 0;
+#if 1
+	/* FGR - FIXME - I don't know what value is expected by dvb_core
+	 * what happens when value wraps? */
+	*ucblocks = read_reg(state, 0x00f4); /* TPIFTPERRCNT[0-7] */
+	dbg_info("ucblocks=%u\n", *ucblocks);
+#endif
+
+	return 0;
+}
+
+static int lgdt3306a_tune(struct dvb_frontend *fe, bool re_tune,
+			  unsigned int mode_flags, unsigned int *delay,
+			  fe_status_t *status)
+{
+	int ret = 0;
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	dbg_info("re_tune=%u\n", re_tune);
+
+	if (re_tune) {
+		state->current_frequency = -1; /* force re-tune */
+		ret = lgdt3306a_set_parameters(fe);
+		if (ret != 0)
+			return ret;
+	}
+	*delay = 125;
+	ret = lgdt3306a_read_status(fe, status);
+
+	return ret;
+}
+
+static int lgdt3306a_get_tune_settings(struct dvb_frontend *fe,
+				       struct dvb_frontend_tune_settings
+				       *fe_tune_settings)
+{
+	fe_tune_settings->min_delay_ms = 100;
+	dbg_info("\n");
+	return 0;
+}
+
+static int lgdt3306a_search(struct dvb_frontend *fe)
+{
+	fe_status_t status = 0;
+	int i, ret;
+
+	/* set frontend */
+	ret = lgdt3306a_set_parameters(fe);
+	if (ret)
+		goto error;
+
+	/* wait frontend lock */
+	for (i = 20; i > 0; i--) {
+		dbg_info(": loop=%d\n", i);
+		msleep(50);
+		ret = lgdt3306a_read_status(fe, &status);
+		if (ret)
+			goto error;
+
+		if (status & FE_HAS_LOCK)
+			break;
+	}
+
+	/* check if we have a valid signal */
+	if (status & FE_HAS_LOCK)
+		return DVBFE_ALGO_SEARCH_SUCCESS;
+	else
+		return DVBFE_ALGO_SEARCH_AGAIN;
+
+error:
+	dbg_info("failed (%d)\n", ret);
+	return DVBFE_ALGO_SEARCH_ERROR;
+}
+
+static void lgdt3306a_release(struct dvb_frontend *fe)
+{
+	struct lgdt3306a_state *state = fe->demodulator_priv;
+
+	dbg_info("\n");
+	kfree(state);
+}
+
+static struct dvb_frontend_ops lgdt3306a_ops;
+
+struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
+				      struct i2c_adapter *i2c_adap)
+{
+	struct lgdt3306a_state *state = NULL;
+	int ret;
+	u8 val;
+
+	dbg_info("(%d-%04x)\n",
+	       i2c_adap ? i2c_adapter_id(i2c_adap) : 0,
+	       config ? config->i2c_addr : 0);
+
+	state = kzalloc(sizeof(struct lgdt3306a_state), GFP_KERNEL);
+	if (state == NULL)
+		goto fail;
+
+	state->cfg = config;
+	state->i2c_adap = i2c_adap;
+
+	memcpy(&state->frontend.ops, &lgdt3306a_ops,
+	       sizeof(struct dvb_frontend_ops));
+	state->frontend.demodulator_priv = state;
+
+	/* verify that we're talking to a lg3306a */
+	/* FGR - NOTE - there is no obvious ChipId to check; we check
+	 * some "known" bits after reset, but it's still just a guess */
+	ret = lgdt3306a_read_reg(state, 0x0000, &val);
+	if (lg_chkerr(ret))
+		goto fail;
+	if ((val & 0x74) != 0x74) {
+		pr_warn("expected 0x74, got 0x%x\n", (val & 0x74));
+#if 0
+		/* FIXME - re-enable when we know this is right */
+		goto fail;
+#endif
+	}
+	ret = lgdt3306a_read_reg(state, 0x0001, &val);
+	if (lg_chkerr(ret))
+		goto fail;
+	if ((val & 0xf6) != 0xc6) {
+		pr_warn("expected 0xc6, got 0x%x\n", (val & 0xf6));
+#if 0
+		/* FIXME - re-enable when we know this is right */
+		goto fail;
+#endif
+	}
+	ret = lgdt3306a_read_reg(state, 0x0002, &val);
+	if (lg_chkerr(ret))
+		goto fail;
+	if ((val & 0x73) != 0x03) {
+		pr_warn("expected 0x03, got 0x%x\n", (val & 0x73));
+#if 0
+		/* FIXME - re-enable when we know this is right */
+		goto fail;
+#endif
+	}
+
+	state->current_frequency = -1;
+	state->current_modulation = -1;
+
+	lgdt3306a_sleep(state);
+
+	return &state->frontend;
+
+fail:
+	pr_warn("unable to detect LGDT3306A hardware\n");
+	kfree(state);
+	return NULL;
+}
+EXPORT_SYMBOL(lgdt3306a_attach);
+
+#ifdef DBG_DUMP
+
+static const short regtab[] = {
+	0x0000, /* SOFTRSTB 1'b1 1'b1 1'b1 ADCPDB 1'b1 PLLPDB GBBPDB 11111111 */
+	0x0001, /* 1'b1 1'b1 1'b0 1'b0 AUTORPTRS */
+	0x0002, /* NI2CRPTEN 1'b0 1'b0 1'b0 SPECINVAUT */
+	0x0003, /* AGCRFOUT */
+	0x0004, /* ADCSEL1V ADCCNT ADCCNF ADCCNS ADCCLKPLL */
+	0x0005, /* PLLINDIVSE */
+	0x0006, /* PLLCTRL[7:0] 11100001 */
+	0x0007, /* SYSINITWAITTIME[7:0] (msec) 00001000 */
+	0x0008, /* STDOPMODE[7:0] 10000000 */
+	0x0009, /* 1'b0 1'b0 1'b0 STDOPDETTMODE[2:0] STDOPDETCMODE[1:0] 00011110 */
+	0x000a, /* DAFTEN 1'b1 x x SCSYSLOCK */
+	0x000b, /* SCSYSLOCKCHKTIME[7:0] (10msec) 01100100 */
+	0x000d, /* x SAMPLING4 */
+	0x000e, /* SAMFREQ[15:8] 00000000 */
+	0x000f, /* SAMFREQ[7:0] 00000000 */
+	0x0010, /* IFFREQ[15:8] 01100000 */
+	0x0011, /* IFFREQ[7:0] 00000000 */
+	0x0012, /* AGCEN AGCREFMO */
+	0x0013, /* AGCRFFIXB AGCIFFIXB AGCLOCKDETRNGSEL[1:0] 1'b1 1'b0 1'b0 1'b0 11101000 */
+	0x0014, /* AGCFIXVALUE[7:0] 01111111 */
+	0x0015, /* AGCREF[15:8] 00001010 */
+	0x0016, /* AGCREF[7:0] 11100100 */
+	0x0017, /* AGCDELAY[7:0] 00100000 */
+	0x0018, /* AGCRFBW[3:0] AGCIFBW[3:0] 10001000 */
+	0x0019, /* AGCUDOUTMODE[1:0] AGCUDCTRLLEN[1:0] AGCUDCTRL */
+	0x001c, /* 1'b1 PFEN MFEN AICCVSYNC */
+	0x001d, /* 1'b0 1'b1 1'b0 1'b1 AICCVSYNC */
+	0x001e, /* AICCALPHA[3:0] 1'b1 1'b0 1'b1 1'b0 01111010 */
+	0x001f, /* AICCDETTH[19:16] AICCOFFTH[19:16] 00000000 */
+	0x0020, /* AICCDETTH[15:8] 01111100 */
+	0x0021, /* AICCDETTH[7:0] 00000000 */
+	0x0022, /* AICCOFFTH[15:8] 00000101 */
+	0x0023, /* AICCOFFTH[7:0] 11100000 */
+	0x0024, /* AICCOPMODE3[1:0] AICCOPMODE2[1:0] AICCOPMODE1[1:0] AICCOPMODE0[1:0] 00000000 */
+	0x0025, /* AICCFIXFREQ3[23:16] 00000000 */
+	0x0026, /* AICCFIXFREQ3[15:8] 00000000 */
+	0x0027, /* AICCFIXFREQ3[7:0] 00000000 */
+	0x0028, /* AICCFIXFREQ2[23:16] 00000000 */
+	0x0029, /* AICCFIXFREQ2[15:8] 00000000 */
+	0x002a, /* AICCFIXFREQ2[7:0] 00000000 */
+	0x002b, /* AICCFIXFREQ1[23:16] 00000000 */
+	0x002c, /* AICCFIXFREQ1[15:8] 00000000 */
+	0x002d, /* AICCFIXFREQ1[7:0] 00000000 */
+	0x002e, /* AICCFIXFREQ0[23:16] 00000000 */
+	0x002f, /* AICCFIXFREQ0[15:8] 00000000 */
+	0x0030, /* AICCFIXFREQ0[7:0] 00000000 */
+	0x0031, /* 1'b0 1'b1 1'b0 1'b0 x DAGC1STER */
+	0x0032, /* DAGC1STEN DAGC1STER */
+	0x0033, /* DAGC1STREF[15:8] 00001010 */
+	0x0034, /* DAGC1STREF[7:0] 11100100 */
+	0x0035, /* DAGC2NDE */
+	0x0036, /* DAGC2NDREF[15:8] 00001010 */
+	0x0037, /* DAGC2NDREF[7:0] 10000000 */
+	0x0038, /* DAGC2NDLOCKDETRNGSEL[1:0] */
+	0x003d, /* 1'b1 SAMGEARS */
+	0x0040, /* SAMLFGMA */
+	0x0041, /* SAMLFBWM */
+	0x0044, /* 1'b1 CRGEARSHE */
+	0x0045, /* CRLFGMAN */
+	0x0046, /* CFLFBWMA */
+	0x0047, /* CRLFGMAN */
+	0x0048, /* x x x x CRLFGSTEP_VS[3:0] xxxx1001 */
+	0x0049, /* CRLFBWMA */
+	0x004a, /* CRLFBWMA */
+	0x0050, /* 1'b0 1'b1 1'b1 1'b0 MSECALCDA */
+	0x0070, /* TPOUTEN TPIFEN TPCLKOUTE */
+	0x0071, /* TPSENB TPSSOPBITE */
+	0x0073, /* TP47HINS x x CHBERINT PERMODE[1:0] PERINT[1:0] 1xx11100 */
+	0x0075, /* x x x x x IQSWAPCTRL[2:0] xxxxx000 */
+	0x0076, /* NBERCON NBERST NBERPOL NBERWSYN */
+	0x0077, /* x NBERLOSTTH[2:0] NBERACQTH[3:0] x0000000 */
+	0x0078, /* NBERPOLY[31:24] 00000000 */
+	0x0079, /* NBERPOLY[23:16] 00000000 */
+	0x007a, /* NBERPOLY[15:8] 00000000 */
+	0x007b, /* NBERPOLY[7:0] 00000000 */
+	0x007c, /* NBERPED[31:24] 00000000 */
+	0x007d, /* NBERPED[23:16] 00000000 */
+	0x007e, /* NBERPED[15:8] 00000000 */
+	0x007f, /* NBERPED[7:0] 00000000 */
+	0x0080, /* x AGCLOCK DAGCLOCK SYSLOCK x x NEVERLOCK[1:0] */
+	0x0085, /* SPECINVST */
+	0x0088, /* SYSLOCKTIME[15:8] */
+	0x0089, /* SYSLOCKTIME[7:0] */
+	0x008c, /* FECLOCKTIME[15:8] */
+	0x008d, /* FECLOCKTIME[7:0] */
+	0x008e, /* AGCACCOUT[15:8] */
+	0x008f, /* AGCACCOUT[7:0] */
+	0x0090, /* AICCREJSTATUS[3:0] AICCREJBUSY[3:0] */
+	0x0091, /* AICCVSYNC */
+	0x009c, /* CARRFREQOFFSET[15:8] */
+	0x009d, /* CARRFREQOFFSET[7:0] */
+	0x00a1, /* SAMFREQOFFSET[23:16] */
+	0x00a2, /* SAMFREQOFFSET[15:8] */
+	0x00a3, /* SAMFREQOFFSET[7:0] */
+	0x00a6, /* SYNCLOCK SYNCLOCKH */
+#if 0 /* covered elsewhere */
+	0x00e8, /* CONSTPWR[15:8] */
+	0x00e9, /* CONSTPWR[7:0] */
+	0x00ea, /* BMSE[15:8] */
+	0x00eb, /* BMSE[7:0] */
+	0x00ec, /* MSE[15:8] */
+	0x00ed, /* MSE[7:0] */
+	0x00ee, /* CONSTI[7:0] */
+	0x00ef, /* CONSTQ[7:0] */
+#endif
+	0x00f4, /* TPIFTPERRCNT[7:0] */
+	0x00f5, /* TPCORREC */
+	0x00f6, /* VBBER[15:8] */
+	0x00f7, /* VBBER[7:0] */
+	0x00f8, /* VABER[15:8] */
+	0x00f9, /* VABER[7:0] */
+	0x00fa, /* TPERRCNT[7:0] */
+	0x00fb, /* NBERLOCK x x x x x x x */
+	0x00fc, /* NBERVALUE[31:24] */
+	0x00fd, /* NBERVALUE[23:16] */
+	0x00fe, /* NBERVALUE[15:8] */
+	0x00ff, /* NBERVALUE[7:0] */
+	0x1000, /* 1'b0 WODAGCOU */
+	0x1005, /* x x 1'b1 1'b1 x SRD_Q_QM */
+	0x1009, /* SRDWAITTIME[7:0] (10msec) 00100011 */
+	0x100a, /* SRDWAITTIME_CQS[7:0] (msec) 01100100 */
+	0x101a, /* x 1'b1 1'b0 1'b0 x QMDQAMMODE[2:0] x100x010 */
+	0x1036, /* 1'b0 1'b1 1'b0 1'b0 SAMGSEND_CQS[3:0] 01001110 */
+	0x103c, /* SAMGSAUTOSTL_V[3:0] SAMGSAUTOEDL_V[3:0] 01000110 */
+	0x103d, /* 1'b1 1'b1 SAMCNORMBP_V[1:0] 1'b0 1'b0 SAMMODESEL_V[1:0] 11100001 */
+	0x103f, /* SAMZTEDSE */
+	0x105d, /* EQSTATUSE */
+	0x105f, /* x PMAPG2_V[2:0] x DMAPG2_V[2:0] x001x011 */
+	0x1060, /* 1'b1 EQSTATUSE */
+	0x1061, /* CRMAPBWSTL_V[3:0] CRMAPBWEDL_V[3:0] 00000100 */
+	0x1065, /* 1'b0 x CRMODE_V[1:0] 1'b1 x 1'b1 x 0x111x1x */
+	0x1066, /* 1'b0 1'b0 1'b1 1'b0 1'b1 PNBOOSTSE */
+	0x1068, /* CREPHNGAIN2_V[3:0] CREPHNPBW_V[3:0] 10010001 */
+	0x106e, /* x x x x x CREPHNEN_ */
+	0x106f, /* CREPHNTH_V[7:0] 00010101 */
+	0x1072, /* CRSWEEPN */
+	0x1073, /* CRPGAIN_V[3:0] x x 1'b1 1'b1 1001xx11 */
+	0x1074, /* CRPBW_V[3:0] x x 1'b1 1'b1 0001xx11 */
+	0x1080, /* DAFTSTATUS[1:0] x x x x x x */
+	0x1081, /* SRDSTATUS[1:0] x x x x x SRDLOCK */
+	0x10a9, /* EQSTATUS_CQS[1:0] x x x x x x */
+	0x10b7, /* EQSTATUS_V[1:0] x x x x x x */
+#if 0 /* SMART_ANT */
+	0x1f00, /* MODEDETE */
+	0x1f01, /* x x x x x x x SFNRST xxxxxxx0 */
+	0x1f03, /* NUMOFANT[7:0] 10000000 */
+	0x1f04, /* x SELMASK[6:0] x0000000 */
+	0x1f05, /* x SETMASK[6:0] x0000000 */
+	0x1f06, /* x TXDATA[6:0] x0000000 */
+	0x1f07, /* x CHNUMBER[6:0] x0000000 */
+	0x1f09, /* AGCTIME[23:16] 10011000 */
+	0x1f0a, /* AGCTIME[15:8] 10010110 */
+	0x1f0b, /* AGCTIME[7:0] 10000000 */
+	0x1f0c, /* ANTTIME[31:24] 00000000 */
+	0x1f0d, /* ANTTIME[23:16] 00000011 */
+	0x1f0e, /* ANTTIME[15:8] 10010000 */
+	0x1f0f, /* ANTTIME[7:0] 10010000 */
+	0x1f11, /* SYNCTIME[23:16] 10011000 */
+	0x1f12, /* SYNCTIME[15:8] 10010110 */
+	0x1f13, /* SYNCTIME[7:0] 10000000 */
+	0x1f14, /* SNRTIME[31:24] 00000001 */
+	0x1f15, /* SNRTIME[23:16] 01111101 */
+	0x1f16, /* SNRTIME[15:8] 01111000 */
+	0x1f17, /* SNRTIME[7:0] 01000000 */
+	0x1f19, /* FECTIME[23:16] 00000000 */
+	0x1f1a, /* FECTIME[15:8] 01110010 */
+	0x1f1b, /* FECTIME[7:0] 01110000 */
+	0x1f1d, /* FECTHD[7:0] 00000011 */
+	0x1f1f, /* SNRTHD[23:16] 00001000 */
+	0x1f20, /* SNRTHD[15:8] 01111111 */
+	0x1f21, /* SNRTHD[7:0] 10000101 */
+	0x1f80, /* IRQFLG x x SFSDRFLG MODEBFLG SAVEFLG SCANFLG TRACKFLG */
+	0x1f81, /* x SYNCCON SNRCON FECCON x STDBUSY SYNCRST AGCFZCO */
+	0x1f82, /* x x x SCANOPCD[4:0] */
+	0x1f83, /* x x x x MAINOPCD[3:0] */
+	0x1f84, /* x x RXDATA[13:8] */
+	0x1f85, /* RXDATA[7:0] */
+	0x1f86, /* x x SDTDATA[13:8] */
+	0x1f87, /* SDTDATA[7:0] */
+	0x1f89, /* ANTSNR[23:16] */
+	0x1f8a, /* ANTSNR[15:8] */
+	0x1f8b, /* ANTSNR[7:0] */
+	0x1f8c, /* x x x x ANTFEC[13:8] */
+	0x1f8d, /* ANTFEC[7:0] */
+	0x1f8e, /* MAXCNT[7:0] */
+	0x1f8f, /* SCANCNT[7:0] */
+	0x1f91, /* MAXPW[23:16] */
+	0x1f92, /* MAXPW[15:8] */
+	0x1f93, /* MAXPW[7:0] */
+	0x1f95, /* CURPWMSE[23:16] */
+	0x1f96, /* CURPWMSE[15:8] */
+	0x1f97, /* CURPWMSE[7:0] */
+#endif /* SMART_ANT */
+	0x211f, /* 1'b1 1'b1 1'b1 CIRQEN x x 1'b0 1'b0 1111xx00 */
+	0x212a, /* EQAUTOST */
+	0x2122, /* CHFAST[7:0] 01100000 */
+	0x212b, /* FFFSTEP_V[3:0] x FBFSTEP_V[2:0] 0001x001 */
+	0x212c, /* PHDEROTBWSEL[3:0] 1'b1 1'b1 1'b1 1'b0 10001110 */
+	0x212d, /* 1'b1 1'b1 1'b1 1'b1 x x TPIFLOCKS */
+	0x2135, /* DYNTRACKFDEQ[3:0] x 1'b0 1'b0 1'b0 1010x000 */
+	0x2141, /* TRMODE[1:0] 1'b1 1'b1 1'b0 1'b1 1'b1 1'b1 01110111 */
+	0x2162, /* AICCCTRLE */
+	0x2173, /* PHNCNFCNT[7:0] 00000100 */
+	0x2179, /* 1'b0 1'b0 1'b0 1'b1 x BADSINGLEDYNTRACKFBF[2:0] 0001x001 */
+	0x217a, /* 1'b0 1'b0 1'b0 1'b1 x BADSLOWSINGLEDYNTRACKFBF[2:0] 0001x001 */
+	0x217e, /* CNFCNTTPIF[7:0] 00001000 */
+	0x217f, /* TPERRCNTTPIF[7:0] 00000001 */
+	0x2180, /* x x x x x x FBDLYCIR[9:8] */
+	0x2181, /* FBDLYCIR[7:0] */
+	0x2185, /* MAXPWRMAIN[7:0] */
+	0x2191, /* NCOMBDET x x x x x x x */
+	0x2199, /* x MAINSTRON */
+	0x219a, /* FFFEQSTEPOUT_V[3:0] FBFSTEPOUT_V[2:0] */
+	0x21a1, /* x x SNRREF[5:0] */
+	0x2845, /* 1'b0 1'b1 x x FFFSTEP_CQS[1:0] FFFCENTERTAP[1:0] 01xx1110 */
+	0x2846, /* 1'b0 x 1'b0 1'b1 FBFSTEP_CQS[1:0] 1'b1 1'b0 0x011110 */
+	0x2847, /* ENNOSIGDE */
+	0x2849, /* 1'b1 1'b1 NOUSENOSI */
+	0x284a, /* EQINITWAITTIME[7:0] 01100100 */
+	0x3000, /* 1'b1 1'b1 1'b1 x x x 1'b0 RPTRSTM */
+	0x3001, /* RPTRSTWAITTIME[7:0] (100msec) 00110010 */
+	0x3031, /* FRAMELOC */
+	0x3032, /* 1'b1 1'b0 1'b0 1'b0 x x FRAMELOCKMODE_CQS[1:0] 1000xx11 */
+	0x30a9, /* VDLOCK_Q FRAMELOCK */
+	0x30aa, /* MPEGLOCK */
+};
+
+#define numDumpRegs (sizeof(regtab)/sizeof(regtab[0]))
+static u8 regval1[numDumpRegs] = {0, };
+static u8 regval2[numDumpRegs] = {0, };
+
+static void lgdt3306a_DumpAllRegs(struct lgdt3306a_state *state)
+{
+		memset(regval2, 0xff, sizeof(regval2));
+		lgdt3306a_DumpRegs(state);
+}
+
+static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state)
+{
+	int i;
+	int sav_debug = debug;
+
+	if ((debug & DBG_DUMP) == 0)
+		return;
+	debug &= ~DBG_REG; /* suppress DBG_REG during reg dump */
+
+	lg_debug("\n");
+
+	for (i = 0; i < numDumpRegs; i++) {
+		lgdt3306a_read_reg(state, regtab[i], &regval1[i]);
+		if (regval1[i] != regval2[i]) {
+			lg_debug(" %04X = %02X\n", regtab[i], regval1[i]);
+			regval2[i] = regval1[i];
+		}
+	}
+	debug = sav_debug;
+}
+#endif /* DBG_DUMP */
+
+
+
+static struct dvb_frontend_ops lgdt3306a_ops = {
+	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
+	.info = {
+		.name = "LG Electronics LGDT3306A VSB/QAM Frontend",
+		.frequency_min      = 54000000,
+		.frequency_max      = 858000000,
+		.frequency_stepsize = 62500,
+		.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
+	},
+	.i2c_gate_ctrl        = lgdt3306a_i2c_gate_ctrl,
+	.init                 = lgdt3306a_init,
+	.sleep                = lgdt3306a_fe_sleep,
+	/* if this is set, it overrides the default swzigzag */
+	.tune                 = lgdt3306a_tune,
+	.set_frontend         = lgdt3306a_set_parameters,
+	.get_frontend         = lgdt3306a_get_frontend,
+	.get_frontend_algo    = lgdt3306a_get_frontend_algo,
+	.get_tune_settings    = lgdt3306a_get_tune_settings,
+	.read_status          = lgdt3306a_read_status,
+	.read_ber             = lgdt3306a_read_ber,
+	.read_signal_strength = lgdt3306a_read_signal_strength,
+	.read_snr             = lgdt3306a_read_snr,
+	.read_ucblocks        = lgdt3306a_read_ucblocks,
+	.release              = lgdt3306a_release,
+	.ts_bus_ctrl          = lgdt3306a_ts_bus_ctrl,
+	.search               = lgdt3306a_search,
+};
+
+MODULE_DESCRIPTION("LG Electronics LGDT3306A ATSC/QAM-B Demodulator Driver");
+MODULE_AUTHOR("Fred Richter <frichter@hauppauge.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.2");
diff --git a/drivers/media/dvb-frontends/lgdt3306a.h b/drivers/media/dvb-frontends/lgdt3306a.h
new file mode 100644
index 0000000..ed8aa3e
--- /dev/null
+++ b/drivers/media/dvb-frontends/lgdt3306a.h
@@ -0,0 +1,74 @@
+/*
+ *    Support for LGDT3306A - 8VSB/QAM-B
+ *
+ *    Copyright (C) 2013,2014 Fred Richter <frichter@hauppauge.com>
+ *      based on lgdt3305.[ch] by Michael Krufky
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef _LGDT3306A_H_
+#define _LGDT3306A_H_
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+
+enum lgdt3306a_mpeg_mode {
+	LGDT3306A_MPEG_PARALLEL = 0,
+	LGDT3306A_MPEG_SERIAL = 1,
+};
+
+enum lgdt3306a_tp_clock_edge {
+	LGDT3306A_TPCLK_RISING_EDGE = 0,
+	LGDT3306A_TPCLK_FALLING_EDGE = 1,
+};
+
+enum lgdt3306a_tp_valid_polarity {
+	LGDT3306A_TP_VALID_LOW = 0,
+	LGDT3306A_TP_VALID_HIGH = 1,
+};
+
+struct lgdt3306a_config {
+	u8 i2c_addr;
+
+	/* user defined IF frequency in KHz */
+	u16 qam_if_khz;
+	u16 vsb_if_khz;
+
+	/* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
+	unsigned int deny_i2c_rptr:1;
+
+	/* spectral inversion - 0:disabled 1:enabled */
+	unsigned int spectral_inversion:1;
+
+	enum lgdt3306a_mpeg_mode mpeg_mode;
+	enum lgdt3306a_tp_clock_edge tpclk_edge;
+	enum lgdt3306a_tp_valid_polarity tpvalid_polarity;
+
+	/* demod clock freq in MHz; 24 or 25 supported */
+	int  xtalMHz;
+};
+
+#if IS_ENABLED(CONFIG_DVB_LGDT3306A)
+struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
+				      struct i2c_adapter *i2c_adap);
+#else
+static inline
+struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
+				      struct i2c_adapter *i2c_adap)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif /* CONFIG_DVB_LGDT3306A */
+
+#endif /* _LGDT3306A_H_ */
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 856374b..2c7217f 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -157,7 +157,6 @@
 	{ 0x45, 0x04 },				/* CN symbol 4 */
 	{ 0x48, 0x04 },				/* CN manual mode */
 
-	{ 0x50, 0xd5 }, { 0x51, 0x01 },		/* Serial */
 	{ 0x50, 0xd6 }, { 0x51, 0x1f },
 	{ 0x50, 0xd2 }, { 0x51, 0x03 },
 	{ 0x50, 0xd7 }, { 0x51, 0xbf },
@@ -1860,16 +1859,15 @@
 	dev_dbg(&state->i2c->dev, "%s: IF=%d, IF reg=0x%06llx\n",
 		__func__, state->if_freq, (long long)pll);
 
-	if (!state->config->is_serial) {
+	if (!state->config->is_serial)
 		regD5 &= ~1;
 
-		rc = mb86a20s_writereg(state, 0x50, 0xd5);
-		if (rc < 0)
-			goto err;
-		rc = mb86a20s_writereg(state, 0x51, regD5);
-		if (rc < 0)
-			goto err;
-	}
+	rc = mb86a20s_writereg(state, 0x50, 0xd5);
+	if (rc < 0)
+		goto err;
+	rc = mb86a20s_writereg(state, 0x51, regD5);
+	if (rc < 0)
+		goto err;
 
 	rc = mb86a20s_writeregdata(state, mb86a20s_init2);
 	if (rc < 0)
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index e1cd132..7004cb0 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -496,25 +496,17 @@
 
 	mdev = source->parent;
 
-	if ((flags & MEDIA_LNK_FL_ENABLED) && mdev->link_notify) {
-		ret = mdev->link_notify(link->source, link->sink,
-					MEDIA_LNK_FL_ENABLED);
+	if (mdev->link_notify) {
+		ret = mdev->link_notify(link, flags,
+					MEDIA_DEV_NOTIFY_PRE_LINK_CH);
 		if (ret < 0)
 			return ret;
 	}
 
 	ret = __media_entity_setup_link_notify(link, flags);
-	if (ret < 0)
-		goto err;
 
-	if (!(flags & MEDIA_LNK_FL_ENABLED) && mdev->link_notify)
-		mdev->link_notify(link->source, link->sink, 0);
-
-	return 0;
-
-err:
-	if ((flags & MEDIA_LNK_FL_ENABLED) && mdev->link_notify)
-		mdev->link_notify(link->source, link->sink, 0);
+	if (mdev->link_notify)
+		mdev->link_notify(link, flags, MEDIA_DEV_NOTIFY_POST_LINK_CH);
 
 	return ret;
 }
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index d4e2ed3..53196f1 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -1,6 +1,7 @@
+if PCI && MEDIA_SUPPORT
+
 menuconfig MEDIA_PCI_SUPPORT
 	bool "Media PCI Adapters"
-	depends on PCI && MEDIA_SUPPORT
 	help
 	  Enable media drivers for PCI/PCIe bus.
 	  If you have such devices, say Y.
@@ -45,3 +46,4 @@
 endif
 
 endif #MEDIA_PCI_SUPPORT
+endif #PCI
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 10460fd..dbcdfbf8 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -172,7 +172,9 @@
 		dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
 			dev->dmasound.bufsize, dev->dmasound.blocks);
 		spin_unlock(&dev->slock);
+		snd_pcm_stream_lock(dev->dmasound.substream);
 		snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(dev->dmasound.substream);
 		return;
 	}
 
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 9d1481a..c504f70 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1933,7 +1933,7 @@
 
 #ifdef CONFIG_OF
 static const struct of_device_id coda_dt_ids[] = {
-	{ .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
+	{ .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
 	{ .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
 	{ /* sentinel */ }
 };
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 33b5ffc..f45b940 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1122,10 +1122,14 @@
 		goto err_clk;
 	}
 
-	ret = gsc_register_m2m_device(gsc);
+	ret = v4l2_device_register(dev, &gsc->v4l2_dev);
 	if (ret)
 		goto err_clk;
 
+	ret = gsc_register_m2m_device(gsc);
+	if (ret)
+		goto err_v4l2;
+
 	platform_set_drvdata(pdev, gsc);
 	pm_runtime_enable(dev);
 	ret = pm_runtime_get_sync(&pdev->dev);
@@ -1147,6 +1151,8 @@
 	pm_runtime_put(dev);
 err_m2m:
 	gsc_unregister_m2m_device(gsc);
+err_v4l2:
+	v4l2_device_unregister(&gsc->v4l2_dev);
 err_clk:
 	gsc_clk_put(gsc);
 	return ret;
@@ -1157,6 +1163,7 @@
 	struct gsc_dev *gsc = platform_get_drvdata(pdev);
 
 	gsc_unregister_m2m_device(gsc);
+	v4l2_device_unregister(&gsc->v4l2_dev);
 
 	vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
 	pm_runtime_disable(&pdev->dev);
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index cc19bba..76435d3 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -343,6 +343,7 @@
 	unsigned long			state;
 	struct vb2_alloc_ctx		*alloc_ctx;
 	struct video_device		vdev;
+	struct v4l2_device		v4l2_dev;
 };
 
 /**
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 40a73f7..e576ff2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -751,6 +751,7 @@
 	gsc->vdev.release	= video_device_release_empty;
 	gsc->vdev.lock		= &gsc->lock;
 	gsc->vdev.vfl_dir	= VFL_DIR_M2M;
+	gsc->vdev.v4l2_dev	= &gsc->v4l2_dev;
 	snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
 					GSC_MODULE_NAME, gsc->id);
 
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 15ef8f2..b5b480b 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1441,9 +1441,9 @@
 err_unlock:
 	mutex_unlock(&fmd->media_dev.graph_mutex);
 err_clk:
-	media_device_unregister(&fmd->media_dev);
 	fimc_md_put_clocks(fmd);
 	fimc_md_unregister_entities(fmd);
+	media_device_unregister(&fmd->media_dev);
 err_md:
 	v4l2_device_unregister(&fmd->v4l2_dev);
 	return ret;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 553d87e..fd6289d 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -784,6 +784,7 @@
 	}
 	*vfd = g2d_videodev;
 	vfd->lock = &dev->mutex;
+	vfd->v4l2_dev = &dev->v4l2_dev;
 	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
 	if (ret) {
 		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 7d02350..5d538e7 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -776,7 +776,7 @@
 	v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
 			      &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
 
-	for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+	for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
 		if (vou_fmt[i].pfmt == pix->pixelformat)
 			return 0;
 
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 15665de..e5150b6 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -1,7 +1,7 @@
 # Analog TV tuners, auto-loaded via tuner.ko
 config MEDIA_TUNER
 	tristate
-	depends on (MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT) && I2C
+	depends on (MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT) && I2C
 	default y
 	select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
@@ -49,6 +49,13 @@
 	help
 	  A silicon tuner module. Say Y when you want to support this tuner.
 
+config MEDIA_TUNER_TDA18272
+	tristate "NXP TDA18272 silicon tuner"
+	depends on MEDIA_SUPPORT && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  A silicon tuner module. Say Y when you want to support this tuner.
+
 config MEDIA_TUNER_TDA9887
 	tristate "TDA 9885/6/7 analog IF demodulator"
 	depends on MEDIA_SUPPORT && I2C
@@ -222,6 +229,13 @@
 	help
 	  Infineon TUA 9001 silicon tuner driver.
 
+config MEDIA_TUNER_SI2157
+	tristate "Silicon Labs Si2157 silicon tuner"
+	depends on MEDIA_SUPPORT && I2C
+	default m if !MEDIA_SUBDRV_AUTOSELECT
+	help
+	  Silicon Labs Si2157 silicon tuner driver.
+
 config MEDIA_TUNER_IT913X
 	tristate "ITE Tech IT913x silicon tuner"
 	depends on MEDIA_SUPPORT && I2C
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 308f108..e35349e 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
 obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
 obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
+obj-$(CONFIG_MEDIA_TUNER_TDA18272) += tda18272.o
 obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
 obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
 obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
@@ -31,6 +32,7 @@
 obj-$(CONFIG_MEDIA_TUNER_E4000) += e4000.o
 obj-$(CONFIG_MEDIA_TUNER_FC2580) += fc2580.o
 obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_SI2157) += si2157.o
 obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
 obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
 obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
new file mode 100644
index 0000000..7c2f621
--- /dev/null
+++ b/drivers/media/tuners/si2157.c
@@ -0,0 +1,467 @@
+/*
+ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#include "si2157_priv.h"
+
+static const struct dvb_tuner_ops si2157_ops;
+
+/* si2157_i2c_master_send - issue a single I2C message in master transmit mode */
+static int si2157_i2c_master_send(struct i2c_adapter *adap, u8 i2c_addr,
+				  const char *buf, int count)
+{
+	int ret;
+	struct i2c_msg msg;
+
+	msg.addr = i2c_addr;
+	msg.flags = 0;
+	msg.len = count;
+	msg.buf = (char *)buf;
+
+	ret = i2c_transfer(adap, &msg, 1);
+
+	/*
+	 * If everything went ok (i.e. 1 msg transmitted), return #bytes
+	 * transmitted, else error code.
+	 */
+	return (ret == 1) ? count : ret;
+}
+
+/* si2157_i2c_master_recv - issue a single I2C message in master receive mode */
+static int si2157_i2c_master_recv(struct i2c_adapter *adap, u8 i2c_addr,
+				  char *buf, int count)
+{
+	struct i2c_msg msg;
+	int ret;
+
+	msg.addr = i2c_addr;
+	msg.flags = I2C_M_RD;
+	msg.len = count;
+	msg.buf = buf;
+
+	ret = i2c_transfer(adap, &msg, 1);
+
+	/*
+	 * If everything went ok (i.e. 1 msg received), return #bytes received,
+	 * else error code.
+	 */
+	return (ret == 1) ? count : ret;
+}
+
+/* execute firmware command */
+static int si2157_cmd_execute(struct si2157_dev *dev, struct si2157_cmd *cmd)
+{
+	int ret;
+	unsigned long timeout;
+
+	mutex_lock(&dev->i2c_mutex);
+
+	if (cmd->wlen) {
+		/* write cmd and args for firmware */
+		ret = si2157_i2c_master_send(dev->i2c_adap, dev->i2c_addr,
+									 cmd->args, cmd->wlen);
+		if (ret < 0) {
+			goto err_mutex_unlock;
+		} else if (ret != cmd->wlen) {
+			ret = -EREMOTEIO;
+			goto err_mutex_unlock;
+		}
+	}
+
+	if (cmd->rlen) {
+		/* wait cmd execution terminate */
+		#define TIMEOUT 80
+		timeout = jiffies + msecs_to_jiffies(TIMEOUT);
+		while (!time_after(jiffies, timeout)) {
+			ret = si2157_i2c_master_recv(dev->i2c_adap, dev->i2c_addr,
+										 cmd->args, cmd->rlen);
+			if (ret < 0) {
+				goto err_mutex_unlock;
+			} else if (ret != cmd->rlen) {
+				ret = -EREMOTEIO;
+				goto err_mutex_unlock;
+			}
+
+			/* firmware ready? */
+			if ((cmd->args[0] >> 7) & 0x01)
+				break;
+		}
+
+		dev_dbg(&dev->i2c_adap->dev, "cmd execution took %d ms\n",
+				jiffies_to_msecs(jiffies) -
+				(jiffies_to_msecs(timeout) - TIMEOUT));
+
+		if (!((cmd->args[0] >> 7) & 0x01)) {
+			ret = -ETIMEDOUT;
+			goto err_mutex_unlock;
+		}
+	}
+
+	mutex_unlock(&dev->i2c_mutex);
+	return 0;
+
+err_mutex_unlock:
+	mutex_unlock(&dev->i2c_mutex);
+	dev_dbg(&dev->i2c_adap->dev, "failed=%d\n", ret);
+	return ret;
+}
+
+static int si2157_init(struct dvb_frontend *fe)
+{
+	struct si2157_dev *dev = fe->tuner_priv;
+	int ret, len, remaining;
+	struct si2157_cmd cmd;
+	const struct firmware *fw;
+	const char *fw_name;
+	unsigned int chip_id;
+
+	dev_dbg(&dev->i2c_adap->dev, "\n");
+
+	if (dev->fw_loaded)
+		goto warm;
+
+	/* power up */
+	if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
+		memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
+		cmd.wlen = 9;
+	} else {
+		memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
+		cmd.wlen = 15;
+	}
+	cmd.rlen = 1;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	/* query chip revision */
+	memcpy(cmd.args, "\x02", 1);
+	cmd.wlen = 1;
+	cmd.rlen = 13;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
+			cmd.args[4] << 0;
+
+	#define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
+	#define SI2148_A20 ('A' << 24 | 48 << 16 | '2' << 8 | '0' << 0)
+	#define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
+	#define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
+	#define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
+
+	switch (chip_id) {
+	case SI2158_A20:
+	case SI2148_A20:
+		fw_name = SI2158_A20_FIRMWARE;
+		break;
+	case SI2157_A30:
+	case SI2147_A30:
+	case SI2146_A10:
+		fw_name = NULL;
+		break;
+	default:
+		dev_err(&dev->i2c_adap->dev,
+			"unknown chip version Si21%d-%c%c%c\n", cmd.args[2],
+			cmd.args[1], cmd.args[3], cmd.args[4]);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	dev_info(&dev->i2c_adap->dev, "found a 'Silicon Labs Si21%d-%c%c%c'\n",
+			cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
+
+	if (fw_name == NULL)
+		goto skip_fw_download;
+
+	/* request the firmware, this will block and timeout */
+	ret = request_firmware(&fw, fw_name, &dev->i2c_adap->dev);
+	if (ret) {
+		dev_err(&dev->i2c_adap->dev, "firmware file '%s' not found\n",
+				fw_name);
+		goto err;
+	}
+
+	/* firmware should be n chunks of 17 bytes */
+	if (fw->size % 17 != 0) {
+		dev_err(&dev->i2c_adap->dev, "firmware file '%s' is invalid\n",
+				fw_name);
+		ret = -EINVAL;
+		goto err_release_firmware;
+	}
+
+	dev_info(&dev->i2c_adap->dev, "downloading firmware from file '%s'\n",
+			fw_name);
+
+	for (remaining = fw->size; remaining > 0; remaining -= 17) {
+		len = fw->data[fw->size - remaining];
+		memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+		cmd.wlen = len;
+		cmd.rlen = 1;
+		ret = si2157_cmd_execute(dev, &cmd);
+		if (ret) {
+			dev_err(&dev->i2c_adap->dev, "firmware download failed %d\n",
+					ret);
+			goto err_release_firmware;
+		}
+	}
+
+	release_firmware(fw);
+
+skip_fw_download:
+	/* reboot the tuner with new firmware? */
+	memcpy(cmd.args, "\x01\x01", 2);
+	cmd.wlen = 2;
+	cmd.rlen = 1;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	/* query firmware version */
+	memcpy(cmd.args, "\x11", 1);
+	cmd.wlen = 1;
+	cmd.rlen = 10;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	dev_info(&dev->i2c_adap->dev, "firmware version: %c.%c.%d\n",
+			cmd.args[6], cmd.args[7], cmd.args[8]);
+
+	dev->fw_loaded = true;
+
+warm:
+	dev->active = true;
+	return 0;
+
+err_release_firmware:
+	release_firmware(fw);
+err:
+	dev_dbg(&dev->i2c_adap->dev, "failed=%d\n", ret);
+	return ret;
+}
+
+static int si2157_sleep(struct dvb_frontend *fe)
+{
+	struct si2157_dev *dev = fe->tuner_priv;
+	int ret;
+	struct si2157_cmd cmd;
+
+	dev_dbg(&dev->i2c_adap->dev, "\n");
+
+	dev->active = false;
+
+	/* standby */
+	memcpy(cmd.args, "\x16\x00", 2);
+	cmd.wlen = 2;
+	cmd.rlen = 1;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	dev_dbg(&dev->i2c_adap->dev, "failed=%d\n", ret);
+	return ret;
+}
+
+static int si2157_set_params(struct dvb_frontend *fe)
+{
+	struct si2157_dev *dev = fe->tuner_priv;
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	int ret;
+	struct si2157_cmd cmd;
+	u8 bandwidth, delivery_system;
+	u32 if_frequency = 5000000;
+
+	dev_dbg(&dev->i2c_adap->dev,
+			"delivery_system=%d frequency=%u bandwidth_hz=%u\n",
+			c->delivery_system, c->frequency, c->bandwidth_hz);
+
+	if (!dev->active) {
+		ret = -EAGAIN;
+		goto err;
+	}
+
+	if (c->bandwidth_hz <= 6000000)
+		bandwidth = 0x06;
+	else if (c->bandwidth_hz <= 7000000)
+		bandwidth = 0x07;
+	else if (c->bandwidth_hz <= 8000000)
+		bandwidth = 0x08;
+	else
+		bandwidth = 0x0f;
+
+	switch (c->delivery_system) {
+	case SYS_ATSC:
+			delivery_system = 0x00;
+			if_frequency = 3250000;
+			break;
+	case SYS_DVBC_ANNEX_B:
+			delivery_system = 0x10;
+			if_frequency = 4000000;
+			break;
+	case SYS_DVBT:
+	case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */
+			delivery_system = 0x20;
+			break;
+	case SYS_DVBC_ANNEX_A:
+			delivery_system = 0x30;
+			break;
+	default:
+			ret = -EINVAL;
+			goto err;
+	}
+
+	memcpy(cmd.args, "\x14\x00\x03\x07\x00\x00", 6);
+	cmd.args[4] = delivery_system | bandwidth;
+	if (dev->inversion)
+		cmd.args[5] = 0x01;
+	cmd.wlen = 6;
+	cmd.rlen = 4;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	if (dev->chiptype == SI2157_CHIPTYPE_SI2146)
+		memcpy(cmd.args, "\x14\x00\x02\x07\x00\x01", 6);
+	else
+		memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
+	cmd.wlen = 6;
+	cmd.rlen = 4;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	/* set if frequency if needed */
+	if (if_frequency != dev->if_frequency) {
+		memcpy(cmd.args, "\x14\x00\x06\x07", 4);
+		cmd.args[4] = (if_frequency / 1000) & 0xff;
+		cmd.args[5] = ((if_frequency / 1000) >> 8) & 0xff;
+		cmd.wlen = 6;
+		cmd.rlen = 4;
+		ret = si2157_cmd_execute(dev, &cmd);
+		if (ret)
+			goto err;
+
+		dev->if_frequency = if_frequency;
+	}
+
+	/* set frequency */
+	memcpy(cmd.args, "\x41\x00\x00\x00\x00\x00\x00\x00", 8);
+	cmd.args[4] = (c->frequency >>  0) & 0xff;
+	cmd.args[5] = (c->frequency >>  8) & 0xff;
+	cmd.args[6] = (c->frequency >> 16) & 0xff;
+	cmd.args[7] = (c->frequency >> 24) & 0xff;
+	cmd.wlen = 8;
+	cmd.rlen = 1;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	dev_dbg(&dev->i2c_adap->dev, "failed=%d\n", ret);
+	return ret;
+}
+
+static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+	struct si2157_dev *dev = fe->tuner_priv;
+
+	*frequency = dev->if_frequency;
+	return 0;
+}
+
+static int si2157_release(struct dvb_frontend *fe)
+{
+	struct si2157_dev *dev = fe->tuner_priv;
+
+	dev_dbg(&dev->i2c_adap->dev, "%s:\n", __func__);
+
+	kfree(fe->tuner_priv);
+
+	return 0;
+}
+
+static const struct dvb_tuner_ops si2157_ops = {
+	.info = {
+		.name           = "Silicon Labs Si2146/2147/2148/2157/2158",
+		.frequency_min  = 55000000,
+		.frequency_max  = 862000000,
+	},
+
+	.release = si2157_release,
+	.init = si2157_init,
+	.sleep = si2157_sleep,
+	.set_params = si2157_set_params,
+	.get_if_frequency = si2157_get_if_frequency,
+};
+
+struct dvb_frontend *si2157_attach(struct dvb_frontend *fe,
+		struct i2c_adapter *i2c, const struct si2157_config *cfg)
+{
+	struct si2157_dev *dev;
+	struct si2157_cmd cmd;
+	int ret;
+
+	if (!cfg) {
+		dev_err(&i2c->dev, "no configuration submitted\n");
+		goto err;
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		dev_err(&i2c->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+		goto err;
+	}
+
+	dev->i2c_adap = i2c;
+	dev->i2c_addr = cfg->i2c_addr;
+	dev->inversion = cfg->inversion;
+	dev->fw_loaded = false;
+	dev->chiptype = SI2157_CHIPTYPE_SI2157; /* (u8)id->driver_data; */
+	dev->if_frequency = 5000000; /* default value of property 0x0706 */
+	mutex_init(&dev->i2c_mutex);
+
+	/* check if the tuner is there */
+	cmd.wlen = 0;
+	cmd.rlen = 1;
+	ret = si2157_cmd_execute(dev, &cmd);
+	if (ret)
+		goto err_kfree;
+
+	memcpy(&fe->ops.tuner_ops, &si2157_ops, sizeof(struct dvb_tuner_ops));
+	fe->tuner_priv = dev;
+
+	dev_info(&i2c->dev, "%s: Silicon Labs %s successfully attached\n",
+			KBUILD_MODNAME, dev->chiptype == SI2157_CHIPTYPE_SI2146 ?
+			"Si2146" : "Si2147/2148/2157/2158");
+
+	return fe;
+
+err_kfree:
+	kfree(dev);
+err:
+	dev_dbg(&i2c->dev, "failed=%d\n", ret);
+	return NULL;
+}
+EXPORT_SYMBOL(si2157_attach);
+
+MODULE_DESCRIPTION("Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
new file mode 100644
index 0000000..1755ba3
--- /dev/null
+++ b/drivers/media/tuners/si2157.h
@@ -0,0 +1,47 @@
+/*
+ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef SI2157_H
+#define SI2157_H
+
+#include <linux/kconfig.h>
+#include "dvb_frontend.h"
+
+struct si2157_config {
+	/*
+	 * I2C address
+	 */
+	u8 i2c_addr;
+
+	/*
+	 * Spectral Inversion
+	 */
+	bool inversion;
+};
+
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_SI2157)
+extern struct dvb_frontend *si2157_attach(struct dvb_frontend *fe,
+		struct i2c_adapter *i2c, const struct si2157_config *cfg);
+#else
+static inline struct dvb_frontend *si2157_attach(struct dvb_frontend *fe,
+		struct i2c_adapter *i2c, const struct si2157_config *cfg)
+{
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
new file mode 100644
index 0000000..7830ccd
--- /dev/null
+++ b/drivers/media/tuners/si2157_priv.h
@@ -0,0 +1,48 @@
+/*
+ * Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ */
+
+#ifndef SI2157_PRIV_H
+#define SI2157_PRIV_H
+
+#include <linux/firmware.h>
+#include "si2157.h"
+
+/* state struct */
+struct si2157_dev {
+	struct mutex i2c_mutex;
+	struct i2c_adapter *i2c_adap;
+	u8 i2c_addr;
+	bool active;
+	bool fw_loaded;
+	bool inversion;
+	u8 chiptype;
+	u32 if_frequency;
+};
+
+#define SI2157_CHIPTYPE_SI2157 0
+#define SI2157_CHIPTYPE_SI2146 1
+
+/* firmware command struct */
+#define SI2157_ARGLEN      30
+struct si2157_cmd {
+	u8 args[SI2157_ARGLEN];
+	unsigned wlen;
+	unsigned rlen;
+};
+
+#define SI2158_A20_FIRMWARE "dvb-tuner-si2158-a20-01.fw"
+
+#endif
diff --git a/drivers/media/tuners/tda18272.c b/drivers/media/tuners/tda18272.c
new file mode 100644
index 0000000..644d70c
--- /dev/null
+++ b/drivers/media/tuners/tda18272.c
@@ -0,0 +1,1602 @@
+/*
+	TDA18272 Silicon tuner driver
+	Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the Free Software
+	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "dvb_frontend.h"
+
+#include "tda18272.h"
+#include "tda18272_reg.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "Set Verbosity level");
+
+#define FE_ERROR				0
+#define FE_NOTICE				1
+#define FE_INFO					2
+#define FE_DEBUG				3
+#define FE_DEBUGREG				4
+
+#define dprintk(__y, __z, format, arg...) do {						\
+	if (__z) {									\
+		if	((verbose > FE_ERROR) && (verbose > __y))			\
+			printk(KERN_ERR "%s: " format "\n", __func__ , ##arg);		\
+		else if	((verbose > FE_NOTICE) && (verbose > __y))			\
+			printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg);	\
+		else if ((verbose > FE_INFO) && (verbose > __y))			\
+			printk(KERN_INFO "%s: " format "\n", __func__ , ##arg);		\
+		else if ((verbose > FE_DEBUG) && (verbose > __y))			\
+			printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg);	\
+	} else {									\
+		if (verbose > __y)							\
+			printk(format, ##arg);						\
+	}										\
+} while (0)
+
+#define TDA18272_SETFIELD(mask, bitf, val)						\
+	(mask = (mask & (~(((1 << TDA18272_WIDTH_##bitf) - 1) <<			\
+				  TDA18272_OFFST_##bitf))) | 				\
+			  (val << TDA18272_OFFST_##bitf))
+
+#define TDA18272_GETFIELD(bitf, val)							\
+	((val >> TDA18272_OFFST_##bitf) & 						\
+	((1 << TDA18272_WIDTH_##bitf) - 1))
+
+
+enum tda18272_lpf {
+	TDA18272_LPF_6MHz	= 0,
+	TDA18272_LPF_7MHz,
+	TDA18272_LPF_8MHz,
+	TDA18272_LPF_9MHz,
+	TDA18272_LPF_1_5MHz
+};
+
+enum tda18272_lpf_offset {
+	TDA18272_LPFOFFSET_0PC	= 0,
+	TDA18272_LPFOFFSET_4PC,
+	TDA18272_LPFOFFSET_8PC,
+	TDA18272_LPFOFFSET_12PC
+};
+
+enum tda18272_agcgain {
+	TDA18272_AGCGAIN_2VPP	= 0,
+	TDA18272_AGCGAIN_1_25VPP,
+	TDA18272_AGCGAIN_1VPP,
+	TDA18272_AGCGAIN_0_8VPP,
+	TDA18272_AGCGAIN_0_85VPP,
+	TDA18272_AGCGAIN_0_7VPP,
+	TDA18272_AGCGAIN_0_6VPP,
+	TDA18272_AGCGAIN_0_5VPP
+};
+
+enum tda18272_notch {
+	TDA18272_NOTCH_DISABLED	= 0,
+	TDA18272_NOTCH_ENABLED,
+};
+
+enum tda18272_hpf {
+	TDA18272_HPF_DISABLED	= 0,
+	TDA18272_HPF_0_4MHz,
+	TDA18272_HPF_0_85MHz,
+	TDA18272_HPF_1MHz,
+	TDA18272_HPF_1_5Mhz
+};
+
+enum tda18272_lnatop {
+	TDA18272_LNATOP_95_89 = 0,
+	TDA18272_LNATOP_95_93, /* unused */
+	TDA18272_LNATOP_95_94, /* unused */
+	TDA18272_LNATOP_95_95, /* unused */
+	TDA18272_LNATOP_99_89,
+	TDA18272_LNATOP_99_93,
+	TDA18272_LNATOP_99_94,
+	TDA18272_LNATOP_99_95,
+	TDA18272_LNATOP_99_95s,
+	TDA18272_LNATOP_100_93,
+	TDA18272_LNATOP_100_94,
+	TDA18272_LNATOP_100_95,
+	TDA18272_LNATOP_100_95s,
+	TDA18272_LNATOP_101_93d,
+	TDA18272_LNATOP_101_94d,
+	TDA18272_LNATOP_101_95,
+	TDA18272_LNATOP_101_95s,
+};
+
+enum tda18272_rfatttop {
+	TDA18272_RFATTTOP_89_81	= 0,
+	TDA18272_RFATTTOP_91_83,
+	TDA18272_RFATTTOP_93_85,
+	TDA18272_RFATTTOP_95_87,
+	TDA18272_RFATTTOP_88_88,
+	TDA18272_RFATTTOP_89_82,
+	TDA18272_RFATTTOP_90_83,
+	TDA18272_RFATTTOP_91_84,
+	TDA18272_RFATTTOP_92_85,
+	TDA18272_RFATTTOP_93_86,
+	TDA18272_RFATTTOP_94_87,
+	TDA18272_RFATTTOP_95_88,
+	TDA18272_RFATTTOP_87_81,
+	TDA18272_RFATTTOP_88_82,
+	TDA18272_RFATTTOP_89_83,
+	TDA18272_RFATTTOP_90_84,
+	TDA18272_RFATTTOP_91_85,
+	TDA18272_RFATTTOP_92_86,
+	TDA18272_RFATTTOP_93_87,
+	TDA18272_RFATTTOP_94_88,
+	TDA18272_RFATTTOP_95_89,
+};
+
+
+#define TDA18272_AGC3_RF_AGC_TOP_FREQ_LIM	291000000
+
+enum tda18272_rfagctop {
+	TDA18272_RFAGCTOP_94 = 0,
+	TDA18272_RFAGCTOP_96,
+	TDA18272_RFAGCTOP_98,
+	TDA18272_RFAGCTOP_100,
+	TDA18272_RFAGCTOP_102,
+	TDA18272_RFAGCTOP_104,
+	TDA18272_RFAGCTOP_106,
+	TDA18272_RFAGCTOP_107,
+};
+
+enum tda18272_irmixtop {
+	TDA18272_IRMIXTOP_105_99	= 0,
+	TDA18272_IRMIXTOP_105_100,
+	TDA18272_IRMIXTOP_105_101,
+	TDA18272_IRMIXTOP_107_101,
+	TDA18272_IRMIXTOP_107_102,
+	TDA18272_IRMIXTOP_107_103,
+	TDA18272_IRMIXTOP_108_103,
+	TDA18272_IRMIXTOP_109_103,
+	TDA18272_IRMIXTOP_109_104,
+	TDA18272_IRMIXTOP_109_105,
+	TDA18272_IRMIXTOP_110_104,
+	TDA18272_IRMIXTOP_110_105,
+	TDA18272_IRMIXTOP_110_106,
+	TDA18272_IRMIXTOP_112_106,
+	TDA18272_IRMIXTOP_112_107,
+	TDA18272_IRMIXTOP_112_108,
+};
+
+enum tda18272_ifagctop {
+	TDA18272_IFAGCTOP_105_99	= 0,
+	TDA18272_IFAGCTOP_105_100,
+	TDA18272_IFAGCTOP_105_101,
+	TDA18272_IFAGCTOP_107_101,
+	TDA18272_IFAGCTOP_107_102,
+	TDA18272_IFAGCTOP_107_103,
+	TDA18272_IFAGCTOP_108_103,
+	TDA18272_IFAGCTOP_109_103,
+	TDA18272_IFAGCTOP_109_104,
+	TDA18272_IFAGCTOP_109_105,
+	TDA18272_IFAGCTOP_110_104,
+	TDA18272_IFAGCTOP_110_105,
+	TDA18272_IFAGCTOP_110_106,
+	TDA18272_IFAGCTOP_112_106,
+	TDA18272_IFAGCTOP_112_107,
+	TDA18272_IFAGCTOP_112_108,
+};
+
+enum tda18272_dethpf {
+	TDA18272_DETHPF_DISABLED	= 0,
+	TDA18272_DETHPF_ENABLED
+};
+
+enum tda18272_agc3adapt {
+	TDA18272_AGC3ADAPT_ENABLED	= 0,
+	TDA18272_AGC3ADAPT_DISABLED,
+};
+
+enum tda18272_agc3adapt_top {
+	TDA18272_AGC3ADAPT_TOP_0	= 0,
+	TDA18272_AGC3ADAPT_TOP_1,
+	TDA18272_AGC3ADAPT_TOP_2,
+	TDA18272_AGC3ADAPT_TOP_3
+};
+
+enum tda18272_3dbatt {
+	TDA18272_3DBATT_DISABLED	= 0,
+	TDA18272_3DBATT_ENABLED,
+};
+
+
+enum tda18272_vhffilt6 {
+	TDA18272_VHFFILT6_DISABLED	= 0,
+	TDA18272_VHFFILT6_ENABLED,
+};
+
+enum tda18272_lpfgain {
+	TDA18272_LPFGAIN_UNKNOWN	= 0,
+	TDA18272_LPFGAIN_FROZEN,
+	TDA18272_LPFGAIN_FREE
+};
+
+
+enum tda18272_stdmode {
+	TDA18272_DVBT_6MHz = 0,
+	TDA18272_DVBT_7MHz,
+	TDA18272_DVBT_8MHz,
+	TDA18272_QAM_6MHz,
+	TDA18272_QAM_8MHz,
+	TDA18272_ISDBT_6MHz,
+	TDA18272_ATSC_6MHz,
+	TDA18272_DMBT_8MHz,
+	TDA18272_ANLG_MN,
+	TDA18272_ANLG_B,
+	TDA18272_ANLG_GH,
+	TDA18272_ANLG_I,
+	TDA18272_ANLG_DK,
+	TDA18272_ANLG_L,
+	TDA18272_ANLG_LL,
+	TDA18272_FM_RADIO,
+	TDA18272_Scanning,
+	TDA18272_ScanXpress,
+};
+
+static struct tda18272_coeff {
+	u8				desc[16];
+	u32				if_val;
+	s32				cf_off;
+	enum tda18272_lpf		lpf;
+	enum tda18272_lpf_offset	lpf_off;
+	enum tda18272_agcgain		if_gain;
+	enum tda18272_notch		if_notch;
+	enum tda18272_hpf		if_hpf;
+	enum tda18272_notch		dc_notch;
+	enum tda18272_lnatop		lna_top;
+	enum tda18272_rfatttop		rfatt_top;
+	enum tda18272_rfagctop		loband_rfagc_top;
+	enum tda18272_rfagctop		hiband_rfagc_top;
+	enum tda18272_irmixtop		irmix_top;
+	enum tda18272_ifagctop		ifagc_top;
+	enum tda18272_dethpf		det_hpf;
+	enum tda18272_agc3adapt		agc3_adapt;
+	enum tda18272_agc3adapt_top	agc3_adapt_top;
+
+	enum tda18272_3dbatt		att3db;
+	u8				gsk;
+	enum tda18272_vhffilt6		filter;
+	enum tda18272_lpfgain		lpf_gain;
+	int				agc1_freeze;
+	int				ltosto_immune;
+} coeft[] = {
+	{
+		.desc			= "DVB-T 6MHz",
+		.if_val			= 3250000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_6MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_0_4MHz,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_102,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_2,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "DVB-T 7MHz",
+		.if_val			= 3500000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_7MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_8PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_102,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_2,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "DVB-T 8MHz",
+		.if_val			= 4000000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_102,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_2,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "QAM 6MHz",
+		.if_val			= 3600000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_6MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_8PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 1,
+		.ltosto_immune		= 1
+	}, {
+		.desc			= "QAM 8MHz",
+		.if_val			= 5000000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_9MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_8PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_0_85MHz,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 1,
+		.ltosto_immune		= 1
+	}, {
+		.desc			= "ISDB-T 6MHz",
+		.if_val			= 3250000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_6MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_6VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_0_4MHz,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_102,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_2,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATSC 6MHz",
+		.if_val			= 3250000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_6MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_6VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_0_4MHz,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_100_94,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_104,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_104,
+		.irmix_top		= TDA18272_IRMIXTOP_112_107,
+		.ifagc_top		= TDA18272_IFAGCTOP_112_107,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_3,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "DMB-T 8MHz",
+		.if_val			= 4000000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_102,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_2,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV M/N",
+		.if_val			= 5400000,
+		.cf_off			= 1750000,
+		.lpf			= TDA18272_LPF_6MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV B",
+		.if_val			= 6400000,
+		.cf_off			= 2250000,
+		.lpf			= TDA18272_LPF_7MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV G/H",
+		.if_val			= 6750000,
+		.cf_off			= 2750000,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV I",
+		.if_val			= 7250000,
+		.cf_off			= 2750000,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV DK",
+		.if_val			= 6850000,
+		.cf_off			= 2750000,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV L",
+		.if_val			= 6750000,
+		.cf_off			= 2750000,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "ATV Lc",
+		.if_val			= 1250000,
+		.cf_off			= -2750000,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "FM Radio",
+		.if_val			= 1250000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_1_5MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_0_85MHz,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x02,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "PAL I Blindscan",
+		.if_val			= 7250000,
+		.cf_off			= 2750000,
+		.lpf			= TDA18272_LPF_8MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_0_7VPP,
+		.if_notch		= TDA18272_NOTCH_DISABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_DISABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_96,
+		.irmix_top		= TDA18272_IRMIXTOP_105_100,
+		.ifagc_top		= TDA18272_IFAGCTOP_105_100,
+		.det_hpf		= TDA18272_DETHPF_ENABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_DISABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_0,
+		.att3db			= TDA18272_3DBATT_DISABLED,
+		.gsk			= 0x01,
+		.filter			= TDA18272_VHFFILT6_DISABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FROZEN,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, {
+		.desc			= "XpressScan",
+		.if_val			= 5000000,
+		.cf_off			= 0,
+		.lpf			= TDA18272_LPF_9MHz,
+		.lpf_off		= TDA18272_LPFOFFSET_0PC,
+		.if_gain		= TDA18272_AGCGAIN_1VPP,
+		.if_notch		= TDA18272_NOTCH_ENABLED,
+		.if_hpf			= TDA18272_HPF_DISABLED,
+		.dc_notch		= TDA18272_NOTCH_ENABLED,
+		.lna_top		= TDA18272_LNATOP_95_89,
+		.rfatt_top		= TDA18272_RFATTTOP_90_84,
+		.loband_rfagc_top	= TDA18272_RFAGCTOP_100,
+		.hiband_rfagc_top	= TDA18272_RFAGCTOP_102,
+		.irmix_top		= TDA18272_IRMIXTOP_110_105,
+		.ifagc_top		= TDA18272_IFAGCTOP_110_105,
+		.det_hpf		= TDA18272_DETHPF_DISABLED,
+		.agc3_adapt		= TDA18272_AGC3ADAPT_ENABLED,
+		.agc3_adapt_top		= TDA18272_AGC3ADAPT_TOP_2,
+		.att3db			= TDA18272_3DBATT_ENABLED,
+		.gsk			= 0x0e,
+		.filter			= TDA18272_VHFFILT6_ENABLED,
+		.lpf_gain		= TDA18272_LPFGAIN_FREE,
+		.agc1_freeze		= 0,
+		.ltosto_immune		= 0
+	}, { }
+};
+
+#define TDA18272_REGMAPSIZ	68
+
+struct tda18272_state {
+	const struct tda18272_coeff	*coe;
+	u8				lna_top;
+	u8				psm_agc;
+	u8				agc1;
+	u8				mode;
+
+	u8				ms;
+
+	u32				bandwidth;
+	u32				frequency;
+
+	u8				regs[TDA18272_REGMAPSIZ];
+	struct dvb_frontend		*fe;
+	struct i2c_adapter		*i2c;
+	const struct tda18272_config	*config;
+};
+
+static int tda18272_rd_regs(struct tda18272_state *tda18272, u8 reg, u8 *data, int count)
+{
+	int ret;
+	const struct tda18272_config *config	= tda18272->config;
+	struct dvb_frontend *fe			= tda18272->fe;
+	struct i2c_msg msg[]			= {
+		{ .addr = config->addr, .flags = 0, 	   .buf = &reg, .len = 1 },
+		{ .addr = config->addr, .flags = I2C_M_RD, .buf = data, .len = count }
+	};
+
+	BUG_ON(count >= 255);
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 1);
+
+	ret = i2c_transfer(tda18272->i2c, msg, 2);
+	if (ret != 2) {
+		dprintk(FE_ERROR, 1, "I/O Error");
+		ret = -EREMOTEIO;
+	} else {
+		ret = 0;
+	}
+
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 0);
+
+	return ret;
+}
+
+static int tda18272_wr_regs(struct tda18272_state *tda18272, u8 start, u8 *data, u8 count)
+{
+	int ret;
+	const struct tda18272_config *config	= tda18272->config;
+	struct dvb_frontend *fe			= tda18272->fe;
+	u8 buf[0x45];
+	struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = count + 1 };
+
+	BUG_ON(count >= 0x44);
+	BUG_ON(start >= 0x43);
+	BUG_ON(start + count > 0x44);
+
+	buf[0] = start;
+	memcpy(&buf[1], data, count);
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 1);
+
+	ret = i2c_transfer(tda18272->i2c, &msg, 1);
+	if (ret != 1) {
+		dprintk(FE_ERROR, 1, "I/O Error");
+		ret = -EREMOTEIO;
+	} else {
+		ret = 0;
+	}
+
+	if (fe->ops.i2c_gate_ctrl)
+		fe->ops.i2c_gate_ctrl(fe, 0);
+
+	return ret;
+}
+
+static int tda18272_wr(struct tda18272_state *tda18272, u8 reg, u8 data)
+{
+	return tda18272_wr_regs(tda18272, reg, &data, 1);
+}
+
+static int tda18272_rd(struct tda18272_state *tda18272, u8 reg, u8 *data)
+{
+	return tda18272_rd_regs(tda18272, reg, data, 1);
+}
+
+static int tda18272_cal_wait(struct tda18272_state *tda18272)
+{
+	int ret = 0;
+	u8 xtal_cal, count = 20;
+
+	while (count > 0) {
+		ret = tda18272_rd(tda18272, TDA18272_IRQ_STATUS, &tda18272->regs[TDA18272_IRQ_STATUS]);
+		xtal_cal = TDA18272_GETFIELD(IRQ_STATUS_XTALCAL_STATUS, tda18272->regs[TDA18272_IRQ_STATUS]);
+		if (ret)
+			break;
+
+		if (xtal_cal)
+			break;
+
+		msleep(5);
+		--count;
+		if (!count) {
+			ret = -1;
+			break;
+		}
+	}
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+enum tda18272_power {
+	TDA18272_NORMAL = 0,
+	TDA18272_STDBY_1,
+	TDA18272_STDBY_2,
+	TDA18272_STDBY
+};
+
+static int tda18272_pstate(struct tda18272_state *tda18272, enum tda18272_power pstate)
+{
+	int ret;
+
+	ret = tda18272_rd_regs(tda18272, TDA18272_POWERSTATE_BYTE_2, &tda18272->regs[TDA18272_POWERSTATE_BYTE_2], 15);
+	if (ret)
+		goto err;
+
+	if (pstate != TDA18272_NORMAL) {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0);
+		ret = tda18272_wr(tda18272, TDA18272_REFERENCE, tda18272->regs[TDA18272_REFERENCE]);
+		if (ret)
+			goto err;
+	}
+
+	switch (pstate) {
+	case TDA18272_NORMAL:
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM, 0x00);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_PLL, 0x00);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_LNA, 0x00);
+		break;
+	case TDA18272_STDBY_1:
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM, 0x01);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_PLL, 0x00);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_LNA, 0x00);
+		break;
+	case TDA18272_STDBY_2:
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM, 0x01);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_PLL, 0x01);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_LNA, 0x00);
+		break;
+	case TDA18272_STDBY:
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM, 0x01);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_PLL, 0x01);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_POWERSTATE_BYTE_2], POWERSTATE_BYTE_2_SM_LNA, 0x01);
+		break;
+	}
+	ret = tda18272_wr(tda18272, TDA18272_POWERSTATE_BYTE_2, tda18272->regs[TDA18272_POWERSTATE_BYTE_2]);
+	if (ret)
+		goto err;
+
+	if (pstate == TDA18272_NORMAL) {
+		if (tda18272->ms)
+			TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_XTOUT, 0x03);
+
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x01);
+		ret = tda18272_wr(tda18272, TDA18272_REFERENCE, tda18272->regs[TDA18272_REFERENCE]);
+		if (ret)
+			goto err;
+	}
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_wait_irq(struct tda18272_state *tda18272, u32 timeout, u32 step, u8 status)
+{
+	int ret;
+	u8 irq_status;
+	u32 count = timeout / step;
+
+	BUG_ON(!count);
+	do {
+		ret = tda18272_rd(tda18272, TDA18272_IRQ_STATUS, &tda18272->regs[TDA18272_IRQ_STATUS]);
+		if (ret)
+			break;
+
+		if (TDA18272_GETFIELD(IRQ_STATUS_IRQ_STATUS, tda18272->regs[TDA18272_IRQ_STATUS]))
+			break;
+
+		if (status) {
+			irq_status = tda18272->regs[TDA18272_IRQ_STATUS] & 0x1f;
+			if (status == irq_status)
+				break;
+		}
+		msleep(step);
+		--count;
+		if (!count) {
+			ret = -1;
+			break;
+		}
+	} while (count);
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_reset(struct tda18272_state *tda18272)
+{
+	int ret;
+
+	ret = tda18272_rd_regs(tda18272, TDA18272_ID_BYTE_1, tda18272->regs, TDA18272_REGMAPSIZ);
+	if (ret)
+		goto err;
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_POWER_BYTE_2], POWER_BYTE_2_RSSI_CK_SPEED, 0x00);
+	ret = tda18272_wr(tda18272, TDA18272_POWER_BYTE_2, tda18272->regs[TDA18272_POWER_BYTE_2]);
+	if (ret)
+		goto err;
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_AGC1_DO_STEP, 0x02);
+	ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+	if (ret)
+		goto err;
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_RF_FILTER_BYTE_3], RF_FILTER_BYTE_3_AGC2_DO_STEP, 0x01);
+	ret = tda18272_wr(tda18272, TDA18272_RF_FILTER_BYTE_3, tda18272->regs[TDA18272_RF_FILTER_BYTE_3]);
+	if (ret)
+		goto err;
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGCK_BYTE_1], AGC1_BYTE_1_AGCs_UP_STEP_ASYM, 0x03);
+	ret = tda18272_wr(tda18272, TDA18272_AGCK_BYTE_1, tda18272->regs[TDA18272_AGCK_BYTE_1]);
+	if (ret)
+		goto err;
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC5_BYTE_1], AGC5_BYTE_1_AGCs_DO_STEP_ASYM, 0x02);
+	ret = tda18272_wr(tda18272, TDA18272_AGC5_BYTE_1, tda18272->regs[TDA18272_AGC5_BYTE_1]);
+	if (ret)
+		goto err;
+	ret = tda18272_wr(tda18272, TDA18272_IRQ_CLEAR, 0x9f);
+	if (ret)
+		goto err;
+	ret = tda18272_pstate(tda18272, TDA18272_NORMAL);
+	if (ret) {
+		dprintk(FE_ERROR, 1, "Power state switch failed, ret=%d", ret);
+		goto err;
+	}
+	tda18272->regs[TDA18272_MSM_BYTE_1] = 0x38;
+	tda18272->regs[TDA18272_MSM_BYTE_2] = 0x01;
+	ret = tda18272_wr_regs(tda18272, TDA18272_MSM_BYTE_1, &tda18272->regs[TDA18272_MSM_BYTE_1], 2);
+	if (ret)
+		goto err;
+
+	ret = tda18272_wait_irq(tda18272, 1500, 50, 0x1f);
+	if (ret)
+		goto err;
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_init(struct dvb_frontend *fe)
+{
+	struct tda18272_state *tda18272 = fe->tuner_priv;
+	int ret;
+
+	if (tda18272->mode) {
+		dprintk(FE_DEBUG, 1, "Initializing Master ..");
+		ret = tda18272_cal_wait(tda18272);
+		if (ret)
+			goto err;
+	} else {
+		dprintk(FE_DEBUG, 1, "Initializing Slave ..");
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_FLO_MAX_BYTE], FLO_MAX_BYTE_FMAX_LO, 0x00);
+		ret = tda18272_wr(tda18272, TDA18272_FLO_MAX_BYTE, tda18272->regs[TDA18272_FLO_MAX_BYTE]);
+		if (ret)
+			goto err;
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_CP_CURRENT], CP_CURRENT_N_CP_CURRENT, 0x68);
+		ret = tda18272_wr(tda18272, TDA18272_CP_CURRENT, tda18272->regs[TDA18272_CP_CURRENT]);
+	}
+	ret = tda18272_reset(tda18272);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_FLO_MAX_BYTE], FLO_MAX_BYTE_FMAX_LO, 0x0a);
+	ret = tda18272_wr(tda18272, TDA18272_FLO_MAX_BYTE, tda18272->regs[TDA18272_FLO_MAX_BYTE]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_1], AGC1_BYTE_1_LT_ENABLE, tda18272->lna_top);
+	ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_1, tda18272->regs[TDA18272_AGC1_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_PSM_BYTE_1], PSM_BYTE_1_PSM_AGC1, tda18272->psm_agc);
+	ret = tda18272_wr(tda18272, TDA18272_PSM_BYTE_1, tda18272->regs[TDA18272_PSM_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_1], AGC1_BYTE_1_AGC1_6_15DB, tda18272->agc1);
+	ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_1, tda18272->regs[TDA18272_AGC1_BYTE_1]);
+	if (ret)
+		goto err;
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_clear_irq(struct tda18272_state *tda18272, u8 status)
+{
+	tda18272->regs[TDA18272_IRQ_CLEAR] = status & 0x1f;
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IRQ_CLEAR], IRQ_CLEAR_IRQ_CLEAR, 0x80);
+	return tda18272_wr(tda18272, TDA18272_IRQ_CLEAR, tda18272->regs[TDA18272_IRQ_CLEAR]);
+}
+
+static int tda18272_set_rf(struct tda18272_state *tda18272, u32 freq)
+{
+	u32 tmp;
+	int ret;
+
+	ret = tda18272_clear_irq(tda18272, 0x0c);
+	if (ret)
+		goto err;
+
+	ret = tda18272_pstate(tda18272, TDA18272_NORMAL);
+	if (ret)
+		goto err;
+
+	tmp = freq / 1000;
+	tda18272->regs[TDA18272_RF_FREQUENCY_BYTE_1] = (u8) ((tmp & 0xff0000) >> 16);
+	tda18272->regs[TDA18272_RF_FREQUENCY_BYTE_2] = (u8) ((tmp & 0x00ff00) >>  8);
+	tda18272->regs[TDA18272_RF_FREQUENCY_BYTE_3] = (u8)  (tmp & 0x0000ff);
+	ret = tda18272_wr_regs(tda18272, TDA18272_RF_FREQUENCY_BYTE_1, &tda18272->regs[TDA18272_RF_FREQUENCY_BYTE_1], 3);
+	if (ret)
+		goto err;
+
+	tda18272->regs[TDA18272_MSM_BYTE_1] = 0x41;
+	tda18272->regs[TDA18272_MSM_BYTE_2] = 0x01;
+	ret = tda18272_wr_regs(tda18272, TDA18272_MSM_BYTE_1, &tda18272->regs[TDA18272_MSM_BYTE_1], 2);
+	if (ret)
+		goto err;
+
+	ret = tda18272_wait_irq(tda18272, 50, 5, 0x0c);
+	if (ret)
+		goto err;
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_set_frequency(struct tda18272_state *tda18272, u32 frequency)
+{
+	int ret;
+
+	u8 ratio_l, ratio_h;
+	u32 delta_l, delta_h;
+	u8 loop_off, rffilt_gv = 0;
+
+	u8 g1, count, agc1, agc1_steps, done = 0;
+	s16 steps_up, steps_down;
+
+	const struct tda18272_coeff *coe = tda18272->coe;
+
+	dprintk(FE_DEBUG, 1, "set freq=%d", frequency);
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IF_BYTE_1], IF_BYTE_1_LP_FC, coe->lpf); /* LPF */
+	ret = tda18272_wr(tda18272, TDA18272_IF_BYTE_1, tda18272->regs[TDA18272_IF_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IF_BYTE_1], IF_BYTE_1_LP_FC_OFFSET, coe->lpf_off);
+	ret = tda18272_wr(tda18272, TDA18272_IF_BYTE_1, tda18272->regs[TDA18272_IF_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IFAGC], IFAGC_IF_LEVEL, coe->if_gain);
+	ret = tda18272_wr(tda18272, TDA18272_IFAGC, tda18272->regs[TDA18272_IFAGC]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IF_BYTE_1], IF_BYTE_1_IF_NOTCH, coe->if_notch);
+	ret = tda18272_wr(tda18272, TDA18272_IF_BYTE_1, tda18272->regs[TDA18272_IF_BYTE_1]);
+	if (ret)
+		goto err;
+
+	if (coe->if_hpf == TDA18272_HPF_DISABLED) {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_IRMIXER_BYTE_2], IRMIXER_BYTE_2_HI_PASS, 0x0);
+		ret = tda18272_wr(tda18272, TDA18272_IRMIXER_BYTE_2, tda18272->regs[TDA18272_IRMIXER_BYTE_2]);
+		if (ret)
+			goto err;
+	} else {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_IRMIXER_BYTE_2], IRMIXER_BYTE_2_HI_PASS, 0x1);
+		ret = tda18272_wr(tda18272, TDA18272_IRMIXER_BYTE_2, tda18272->regs[TDA18272_IRMIXER_BYTE_2]);
+		if (ret)
+			goto err;
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_IF_BYTE_1], IF_BYTE_1_IF_HP_FC, (coe->if_hpf - 1));
+		ret = tda18272_wr(tda18272, TDA18272_IF_BYTE_1, tda18272->regs[TDA18272_IF_BYTE_1]);
+		if (ret)
+			goto err;
+	}
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IRMIXER_BYTE_2], IRMIXER_BYTE_2_DC_NOTCH, coe->dc_notch);
+	ret = tda18272_wr(tda18272, TDA18272_IRMIXER_BYTE_2, tda18272->regs[TDA18272_IRMIXER_BYTE_2]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_1], AGC1_BYTE_1_AGC1_TOP, coe->lna_top);
+	ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_1, tda18272->regs[TDA18272_AGC1_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC2_BYTE_1], AGC2_BYTE_1_AGC2_TOP, coe->rfatt_top);
+	ret = tda18272_wr(tda18272, TDA18272_AGC2_BYTE_1, tda18272->regs[TDA18272_AGC2_BYTE_1]);
+	if (ret)
+		goto err;
+
+	if (frequency < TDA18272_AGC3_RF_AGC_TOP_FREQ_LIM)
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_AGC3_TOP, coe->loband_rfagc_top);
+	else
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_AGC3_TOP, coe->hiband_rfagc_top);
+	ret= tda18272_wr(tda18272, TDA18272_RFAGC_BYTE_1, tda18272->regs[TDA18272_RFAGC_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IRMIXER_BYTE_1], IRMIXER_BYTE_1_AGC4_TOP, coe->irmix_top);
+	ret = tda18272_wr(tda18272, TDA18272_IRMIXER_BYTE_1, tda18272->regs[TDA18272_IRMIXER_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC5_BYTE_1], AGC5_BYTE_1_AGC5_TOP, coe->ifagc_top);
+	ret = tda18272_wr(tda18272, TDA18272_AGC5_BYTE_1, tda18272->regs[TDA18272_AGC5_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_PD_RFAGC_ADAPT, coe->agc3_adapt);
+	ret = tda18272_wr(tda18272, TDA18272_RFAGC_BYTE_1, tda18272->regs[TDA18272_RFAGC_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_RFAGC_ADAPT_TOP, coe->agc3_adapt_top);
+	ret = tda18272_wr(tda18272, TDA18272_RFAGC_BYTE_1, tda18272->regs[TDA18272_RFAGC_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_RF_ATTEN_3DB, coe->att3db);
+	ret = tda18272_wr(tda18272, TDA18272_RFAGC_BYTE_1, tda18272->regs[TDA18272_RFAGC_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC5_BYTE_1], AGC5_BYTE_1_AGC5_HPF, coe->det_hpf);
+	ret = tda18272_wr(tda18272, TDA18272_AGC5_BYTE_1, tda18272->regs[TDA18272_AGC5_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGCK_BYTE_1], AGCK_BYTE_1_AGCK_MODE, coe->gsk & 0x03);
+	ret = tda18272_wr(tda18272, TDA18272_AGCK_BYTE_1, tda18272->regs[TDA18272_AGCK_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_AGCK_BYTE_1], AGCK_BYTE_1_AGCK_STEP, (coe->gsk & 0x0c) >> 2);
+	ret = tda18272_wr(tda18272, TDA18272_AGCK_BYTE_1, tda18272->regs[TDA18272_AGCK_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_PSM_BYTE_1], PSM_BYTE_1_PSM_STOB, coe->filter);
+	ret = tda18272_wr(tda18272, TDA18272_PSM_BYTE_1, tda18272->regs[TDA18272_PSM_BYTE_1]);
+	if (ret)
+		goto err;
+
+	TDA18272_SETFIELD(tda18272->regs[TDA18272_IF_FREQUENCY], IF_FREQUENCY_IF_FREQ, (coe->if_val - coe->cf_off) / 50000);
+	ret = tda18272_wr(tda18272, TDA18272_IF_FREQUENCY, tda18272->regs[TDA18272_IF_FREQUENCY]);
+	if (ret)
+		goto err;
+
+	if (coe->ltosto_immune && tda18272->mode) {
+		ret = tda18272_rd(tda18272, TDA18272_RF_AGC_GAIN_BYTE_1, &tda18272->regs[TDA18272_RF_AGC_GAIN_BYTE_1]);
+		if (ret)
+			goto err;
+		rffilt_gv = TDA18272_GETFIELD(RF_AGC_GAIN_BYTE_1_RF_FILTER_GAIN, tda18272->regs[TDA18272_RF_AGC_GAIN_BYTE_1]);
+
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RF_FILTER_BYTE_1], RF_FILTER_BYTE_1_RF_FILTER_GV, rffilt_gv);
+		ret = tda18272_wr(tda18272, TDA18272_RF_FILTER_BYTE_1, tda18272->regs[TDA18272_RF_FILTER_BYTE_1]);
+		if (ret)
+			goto err;
+
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RF_FILTER_BYTE_1], RF_FILTER_BYTE_1_FORCE_AGC2_GAIN, 0x1);
+		ret = tda18272_wr(tda18272, TDA18272_RF_FILTER_BYTE_1, tda18272->regs[TDA18272_RF_FILTER_BYTE_1]);
+		if (ret)
+			goto err;
+
+		if (rffilt_gv) {
+			do {
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_RF_FILTER_BYTE_1], RF_FILTER_BYTE_1_RF_FILTER_GV, (rffilt_gv - 1));
+				ret = tda18272_wr(tda18272, TDA18272_RF_FILTER_BYTE_1, tda18272->regs[TDA18272_RF_FILTER_BYTE_1]);
+				if (ret)
+					goto err;
+
+				msleep(10);
+				rffilt_gv -= 1;
+			} while (rffilt_gv > 0);
+		}
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_RF_ATTEN_3DB, 0x01);
+		ret = tda18272_wr(tda18272, TDA18272_RFAGC_BYTE_1, tda18272->regs[TDA18272_RFAGC_BYTE_1]);
+		if (ret)
+			goto err;
+	}
+	ret = tda18272_set_rf(tda18272, frequency + coe->cf_off);
+	if (ret)
+		goto err;
+
+	if (coe->ltosto_immune && tda18272->mode) {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RFAGC_BYTE_1], RFAGC_BYTE_1_RF_ATTEN_3DB, 0x00);
+		ret = tda18272_wr(tda18272, TDA18272_RFAGC_BYTE_1, tda18272->regs[TDA18272_RFAGC_BYTE_1]);
+		if (ret)
+			goto err;
+
+		msleep(50);
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_RF_FILTER_BYTE_1], RF_FILTER_BYTE_1_FORCE_AGC2_GAIN, 0x1);
+		ret = tda18272_wr(tda18272, TDA18272_RF_FILTER_BYTE_1, tda18272->regs[TDA18272_RF_FILTER_BYTE_1]);
+		if (ret)
+			goto err;
+	}
+	ratio_l = (u8)(frequency / 16000000);
+	ratio_h = (u8)(frequency / 16000000) + 1;
+	delta_l = (frequency - (ratio_l * 16000000));
+	delta_h = ((ratio_h * 16000000) - frequency);
+
+	if (frequency < 72000000 ) {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x1);
+	} else if (frequency < 104000000 ) {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x0);
+	} else if (frequency <= 120000000) {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x1);
+	} else {
+		if (delta_l <= delta_h) {
+			if (ratio_l & 0x000001)
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x0);
+			else
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x1);
+		} else {
+			if (ratio_l & 0x000001)
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x1);
+			else
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_REFERENCE], REFERENCE_DIGITAL_CLOCK, 0x0);
+		}
+	}
+	ret = tda18272_wr(tda18272, TDA18272_REFERENCE, tda18272->regs[TDA18272_REFERENCE]);
+	if (ret)
+		goto err;
+
+	if (coe->agc1_freeze) {
+		tda18272_rd(tda18272, TDA18272_AGC1_BYTE_2, &tda18272->regs[TDA18272_AGC1_BYTE_2]);
+		loop_off = TDA18272_GETFIELD(AGC1_BYTE_2_AGC1_LOOP_OFF, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+		if (!loop_off) {
+			TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_AGC1_LOOP_OFF, 0x1);
+			ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+			if (ret)
+				goto err;
+			TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_FORCE_AGC1_GAIN, 0x01);
+			ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+			if (ret)
+				goto err;
+		}
+		if (!TDA18272_GETFIELD(AGC1_BYTE_1_AGC1_6_15DB, tda18272->regs[TDA18272_AGC1_BYTE_1])) {
+			agc1 = 0;
+			agc1_steps = 10;
+		} else {
+			agc1 = 6;
+			agc1_steps = 4;
+		}
+
+		while (done < agc1_steps) {
+			count		 = 0;
+			steps_up	 = 0;
+			steps_down	 = 0;
+			done		+= 1;
+
+			while ((count++) < 40) {
+				ret = tda18272_rd(tda18272, TDA18272_AGC_DET_OUT, &tda18272->regs[TDA18272_AGC_DET_OUT]);
+				if (ret)
+					goto err;
+				steps_down += (TDA18272_GETFIELD(AGC_DET_OUT_DO_AGC1, tda18272->regs[TDA18272_AGC_DET_OUT]) ? 14 : -1 );
+				steps_up += (TDA18272_GETFIELD(AGC_DET_OUT_UP_AGC1, tda18272->regs[TDA18272_AGC_DET_OUT]) ? 1 : -4 );
+				msleep(1);
+			} if (steps_up >= 15 && (TDA18272_GETFIELD(AGC1_BYTE_2_AGC1_GAIN, tda18272->regs[TDA18272_AGC1_BYTE_2]) != 9)) {
+				g1 = TDA18272_GETFIELD(AGC1_BYTE_2_AGC1_GAIN, tda18272->regs[TDA18272_AGC1_BYTE_2]) + 1;
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_AGC1_GAIN, g1);
+				ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+				if (ret)
+					goto err;
+			} else if (steps_down >= 10 && TDA18272_GETFIELD(AGC1_BYTE_2_AGC1_GAIN, tda18272->regs[TDA18272_AGC1_BYTE_2]) != agc1) {
+				g1 = TDA18272_GETFIELD(AGC1_BYTE_2_AGC1_GAIN, tda18272->regs[TDA18272_AGC1_BYTE_2]) - 1;
+				TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_AGC1_GAIN, g1);
+				ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+				if (ret)
+					goto err;
+			} else {
+				done = agc1_steps;
+			}
+		}
+	} else {
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_FORCE_AGC1_GAIN, 0x00);
+		ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+		if (ret)
+			goto err;
+
+		TDA18272_SETFIELD(tda18272->regs[TDA18272_AGC1_BYTE_2], AGC1_BYTE_2_AGC1_LOOP_OFF, 0x00);
+		ret = tda18272_wr(tda18272, TDA18272_AGC1_BYTE_2, tda18272->regs[TDA18272_AGC1_BYTE_2]);
+		if (ret)
+			goto err;
+	}
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_get_status(struct dvb_frontend *fe, u32 *status)
+{
+	struct tda18272_state *tda18272 = fe->tuner_priv;
+	int ret = 0;
+
+	*status = 0;
+
+	ret = tda18272_wr(tda18272, TDA18272_THERMO_BYTE_2, 0x01);
+	if (ret)
+		goto err;
+
+	ret = tda18272_rd(tda18272, TDA18272_THERMO_BYTE_1, &tda18272->regs[TDA18272_THERMO_BYTE_1]);
+	if (ret)
+		goto err;
+
+	ret = tda18272_rd_regs(tda18272, TDA18272_POWERSTATE_BYTE_1, &tda18272->regs[TDA18272_POWERSTATE_BYTE_1], 3);
+	if (ret)
+		goto err;
+
+	if (TDA18272_GETFIELD(POWERSTATE_BYTE_1_LO_LOCK, tda18272->regs[TDA18272_POWERSTATE_BYTE_1])) {
+		dprintk(FE_ERROR, 1, "PLL Locked");
+		*status |= 0x01;
+	}
+	if ((tda18272->regs[TDA18272_POWERSTATE_BYTE_2] >> 1) == 0)
+		dprintk(FE_ERROR, 1, "Normal MODE");
+	if ((tda18272->regs[TDA18272_POWERSTATE_BYTE_2] >> 1) == 7)
+		dprintk(FE_ERROR, 1, "Standby MODE, LNA=ON, PLL=OFF");
+	if ((tda18272->regs[TDA18272_POWERSTATE_BYTE_2] >> 1) == 6)
+		dprintk(FE_ERROR, 1, "Standby MODE, LNA=ON, PLL=OFF");
+	if ((tda18272->regs[TDA18272_POWERSTATE_BYTE_2] >> 1) == 4)
+		dprintk(FE_ERROR, 1, "Standby MODE, LNA=ON, PLL=ON");
+
+	dprintk(FE_ERROR, 1, "Junction Temperature:%d Power level:%d",
+		tda18272->regs[TDA18272_THERMO_BYTE_1],
+		tda18272->regs[TDA18272_INPUT_POWERLEVEL]);
+
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_set_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state)
+{
+	return -EINVAL;
+}
+
+static int tda18272_get_state(struct dvb_frontend *fe, enum tuner_param param, struct tuner_state *state)
+{
+	struct tda18272_state *tda18272		= fe->tuner_priv;
+	const struct tda18272_coeff *coe	= tda18272->coe;
+	int ret;
+
+	switch (param) {
+	case DVBFE_TUNER_FREQUENCY:
+		state->frequency = tda18272->frequency;
+		ret = 0;
+		break;
+	case DVBFE_TUNER_TUNERSTEP:
+		state->tunerstep = fe->ops.tuner_ops.info.frequency_step;
+		ret = 0;
+		break;
+	case DVBFE_TUNER_IFFREQ:
+		state->ifreq = coe->if_val;
+		ret = 0;
+		break;
+	case DVBFE_TUNER_BANDWIDTH:
+		if (fe->ops.info.type == FE_OFDM)
+			state->bandwidth = tda18272->bandwidth;
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int tda18272_set_params(struct dvb_frontend *fe)
+{
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+	struct tda18272_state *tda18272 = fe->tuner_priv;
+	struct tda18272_coeff *coe	= NULL;
+	u32 status;
+	u32 delsys = c->delivery_system;
+	u32 bw = c->bandwidth_hz;
+	u32 freq = c->frequency;
+	int ret;
+
+	BUG_ON(!tda18272);
+
+	dprintk(FE_DEBUG, 1, "freq=%d, bw=%d", freq, bw);
+	switch (delsys) {
+	case SYS_ATSC:
+		coe = coeft + TDA18272_ATSC_6MHz;
+		break;
+	case SYS_DVBT:
+	case SYS_DVBT2:
+		switch (bw) {
+		case 6000000:
+			coe = coeft + TDA18272_DVBT_6MHz;
+			break;
+		case 7000000:
+			coe = coeft + TDA18272_DVBT_7MHz;
+			break;
+		case 8000000:
+			coe = coeft + TDA18272_DVBT_8MHz;
+			break;
+		default:
+			coe = NULL;
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	case SYS_DVBC_ANNEX_A:
+	case SYS_DVBC_ANNEX_C:
+		coe = coeft + TDA18272_QAM_8MHz;
+		break;
+	case SYS_DVBC_ANNEX_B:
+		coe = coeft + TDA18272_QAM_6MHz;
+		break;
+	}
+	BUG_ON(!coe);
+	tda18272->coe = coe;
+	dprintk(FE_DEBUG, 1, "Loading %s coeffecients...", coe->desc);
+	ret = tda18272_set_frequency(tda18272, freq);
+	if (ret)
+		goto err;
+	msleep(100);
+	ret = tda18272_get_status(fe, &status);
+	if (ret)
+		goto err;
+
+	if (status == 0x01) {
+		tda18272->frequency = freq;
+		if (fe->ops.info.type == FE_OFDM)
+			tda18272->bandwidth = bw;
+	}
+err:
+	dprintk(FE_DEBUG, 1, "ret=%d", ret);
+	return ret;
+}
+
+static int tda18272_get_ifreq(struct dvb_frontend *fe, u32 *frequency)
+{
+	struct tda18272_state *tda18272	= fe->tuner_priv;
+	const struct tda18272_coeff *coe = tda18272->coe;
+
+	*frequency = coe->if_val;
+	return 0;
+}
+
+static int tda18272_release(struct dvb_frontend *fe)
+{
+	struct tda18272_state *tda18272 = fe->tuner_priv;
+
+	BUG_ON(!tda18272);
+	fe->tuner_priv = NULL;
+	kfree(tda18272);
+	return 0;
+}
+
+static struct dvb_tuner_ops tda18272_ops = {
+	.info = {
+		.name		= "TDA18272 Silicon Tuner",
+		.frequency_min  =  42000000,
+		.frequency_max  = 870000000,
+		.frequency_step	= 50000,
+	},
+	.init			= tda18272_init,
+//	.sleep			= tda18272_sleep,
+	.get_status		= tda18272_get_status,
+	.set_params		= tda18272_set_params,
+	.set_state		= tda18272_set_state,
+	.get_state		= tda18272_get_state,
+	.get_frequency		= tda18272_get_ifreq,
+	.release		= tda18272_release
+};
+
+
+#define TDA18272_CHIP_ID	18272
+#define TDA18272_MAJOR_REV	1
+#define TDA18272_MINOR_REV	1
+
+struct dvb_frontend *tda18272_attach(struct dvb_frontend *fe,
+				     struct i2c_adapter *i2c,
+				     const struct tda18272_config *config)
+{
+	struct tda18272_state *tda18272;
+	u8 major = 0, minor = 0, mode = 0;
+	int id = 0, ret;
+
+	tda18272 = kzalloc(sizeof (struct tda18272_state), GFP_KERNEL);
+	if (!tda18272)
+		goto err;
+
+	BUG_ON(!i2c);
+	BUG_ON(!config);
+
+	tda18272->i2c		= i2c;
+	tda18272->config	= config;
+	tda18272->fe		= fe;
+
+	fe->tuner_priv		= tda18272;
+	fe->ops.tuner_ops	= tda18272_ops;
+
+	ret = tda18272_rd_regs(tda18272, TDA18272_ID_BYTE_1, &tda18272->regs[TDA18272_ID_BYTE_1], 3);
+	if (ret)
+		goto err;
+
+	id    = (TDA18272_GETFIELD(ID_BYTE_1_IDENT, tda18272->regs[TDA18272_ID_BYTE_1]) << 8) |
+		 TDA18272_GETFIELD(ID_BYTE_2_IDENT, tda18272->regs[TDA18272_ID_BYTE_2]);
+
+	major = TDA18272_GETFIELD(ID_BYTE_3_MAJOR_REV, tda18272->regs[TDA18272_ID_BYTE_3]);
+	minor = TDA18272_GETFIELD(ID_BYTE_3_MINOR_REV, tda18272->regs[TDA18272_ID_BYTE_3]);
+	mode  = TDA18272_GETFIELD(ID_BYTE_1_MASTER_SLAVE, tda18272->regs[TDA18272_ID_BYTE_1]);
+
+	if (id == TDA18272_CHIP_ID) {
+		dprintk(FE_ERROR, 1, "Found TDA%d %s Rev:%d.%d", id, mode ? "Master" : "Slave", major, minor);
+		if ((major != TDA18272_MAJOR_REV) || (minor != TDA18272_MINOR_REV))
+			dprintk(FE_ERROR, 1, "Unknown Version:%d.%d, trying anyway ..", major, minor);
+
+		tda18272->mode	  = mode;
+		if (config->mode == TDA18272_SLAVE && tda18272->mode == 1)
+			dprintk(FE_ERROR, 1, "Config as TDA18272 Slave, but TDA18272 Master found ???");
+
+		if (config->mode == TDA18272_MASTER)
+			tda18272->ms = 1;
+		else
+			tda18272->ms = 0;
+
+		tda18272->lna_top = 0;
+		tda18272->psm_agc = 1;
+		tda18272->agc1    = 0;
+
+		ret = tda18272_init(fe);
+		if (ret) {
+			dprintk(FE_ERROR, 1, "Error Initializing!");
+			goto err1;
+		}
+
+		dprintk(FE_DEBUG, 1, "Done");
+		return tda18272->fe;
+	}
+err:
+	dprintk(FE_ERROR, 1, "TDA18272 not found!, ID=0x%02x exiting..", id);
+err1:
+	kfree(tda18272);
+	return NULL;
+}
+EXPORT_SYMBOL(tda18272_attach);
+
+MODULE_AUTHOR("Manu Abraham");
+MODULE_DESCRIPTION("TDA18272 Silicon tuner");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/tda18272.h b/drivers/media/tuners/tda18272.h
new file mode 100644
index 0000000..179428d
--- /dev/null
+++ b/drivers/media/tuners/tda18272.h
@@ -0,0 +1,51 @@
+/*
+	TDA18272 Silicon tuner driver
+	Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the Free Software
+	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __TDA18272_H
+#define __TDA18272_H
+
+enum tda18272_mode {
+	TDA18272_SINGLE	= 0,
+	TDA18272_MASTER,
+	TDA18272_SLAVE,
+};
+
+struct tda18272_config {
+	u8			addr;
+	enum tda18272_mode	mode;
+};
+
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18272)
+
+extern struct dvb_frontend *tda18272_attach(struct dvb_frontend *fe,
+					    struct i2c_adapter *i2c,
+					    const struct tda18272_config *config);
+
+#else
+static inline struct dvb_frontend *tda18272_attach(struct dvb_frontend *fe,
+						   struct i2c_adapter *i2c,
+						   const struct tda18272_config *config)
+{
+	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	return NULL;
+}
+
+#endif /* CONFIG_MEDIA_TUNER_TDA18272 */
+
+#endif /* __TDA18272_H */
diff --git a/drivers/media/tuners/tda18272_reg.h b/drivers/media/tuners/tda18272_reg.h
new file mode 100644
index 0000000..fbcb97b
--- /dev/null
+++ b/drivers/media/tuners/tda18272_reg.h
@@ -0,0 +1,532 @@
+/*
+	TDA18272 Silicon tuner driver
+	Copyright (C) Manu Abraham <abraham.manu@gmail.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the Free Software
+	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __TDA18272_REG_H
+#define __TDA18272_REG_H
+
+#define TDA18272_ID_BYTE_1					0x00
+#define TDA18272_OFFST_ID_BYTE_1_MASTER_SLAVE			7
+#define TDA18272_WIDTH_ID_BYTE_1_MASTER_SLAVE			1
+#define TDA18272_OFFST_ID_BYTE_1_IDENT				0
+#define TDA18272_WIDTH_ID_BYTE_1_IDENT				7
+
+#define TDA18272_ID_BYTE_2					0x01
+#define TDA18272_OFFST_ID_BYTE_2_IDENT				0
+#define TDA18272_WIDTH_ID_BYTE_2_IDENT				8
+
+#define TDA18272_ID_BYTE_3					0x02
+#define TDA18272_OFFST_ID_BYTE_3_MAJOR_REV			4
+#define TDA18272_WIDTH_ID_BYTE_3_MAJOR_REV			4
+#define TDA18272_OFFST_ID_BYTE_3_MINOR_REV			0
+#define TDA18272_WIDTH_ID_BYTE_3_MINOR_REV			4
+
+#define TDA18272_THERMO_BYTE_1					0x03
+#define TDA18272_OFFST_THERMO_BYTE_1_TM_D			0
+#define TDA18272_WIDTH_THERMO_BYTE_1_TM_D			7
+
+#define TDA18272_THERMO_BYTE_2					0x04
+#define TDA18272_OFFST_THERMO_BYTE_2_TM_ON			O
+#define TDA18272_WIDTH_THERMO_BYTE_2_TM_ON			1
+
+#define TDA18272_POWERSTATE_BYTE_1				0x05
+#define TDA18272_OFFST_POWERSTATE_BYTE_1_POR			1
+#define TDA18272_WIDTH_POWERSTATE_BYTE_1_POR			1
+#define TDA18272_OFFST_POWERSTATE_BYTE_1_LO_LOCK		0
+#define TDA18272_WIDTH_POWERSTATE_BYTE_1_LO_LOCK		1
+
+#define TDA18272_POWERSTATE_BYTE_2				0x06
+#define TDA18272_OFFST_POWERSTATE_BYTE_2_SM_LNA			1
+#define TDA18272_WIDTH_POWERSTATE_BYTE_2_SM_LNA			1
+#define TDA18272_OFFST_POWERSTATE_BYTE_2_SM_PLL			2
+#define TDA18272_WIDTH_POWERSTATE_BYTE_2_SM_PLL			1
+#define TDA18272_OFFST_POWERSTATE_BYTE_2_SM			3
+#define TDA18272_WIDTH_POWERSTATE_BYTE_2_SM			1
+
+#define TDA18272_INPUT_POWERLEVEL				0x07
+#define TDA18272_OFFST_INPUT_POWERLEVEL_POWER_LEVEL		0
+#define TDA18272_WIDTH_INPUT_POWERLEVEL_POWER_LEVEL		7
+
+#define TDA18272_IRQ_STATUS					0x08
+#define TDA18272_OFFST_IRQ_STATUS_IRQ_STATUS			7
+#define TDA18272_WIDTH_IRQ_STATUS_IRQ_STATUS			1
+#define TDA18272_OFFST_IRQ_STATUS_XTALCAL_STATUS		5
+#define TDA18272_WIDTH_IRQ_STATUS_XTALCAL_STATUS		1
+#define TDA18272_OFFST_IRQ_STATUS_MSM_RSSI_STATUS		4
+#define TDA18272_WIDTH_IRQ_STATUS_MSM_RSSI_STATUS		1
+#define TDA18272_OFFST_IRQ_STATUS_MSM_LOCALC_STATUS		3
+#define TDA18272_WIDTH_IRQ_STATUS_MSM_LOCALC_STATUS		1
+#define TDA18272_OFFST_IRQ_STATUS_MSM_RFCAL_STATUS		2
+#define TDA18272_WIDTH_IRQ_STATUS_MSM_RFCAL_STATUS		1
+#define TDA18272_OFFST_IRQ_STATUS_MSM_IRCAL_STATUS		1
+#define TDA18272_WIDTH_IRQ_STATUS_MSM_IRCAL_STATUS		1
+#define TDA18272_OFFST_IRQ_STATUS_MSM_RCCAL_STATUS		0
+#define TDA18272_WIDTH_IRQ_STATUS_MSM_RCCAL_STATUS		1
+
+#define TDA18272_IRQ_ENABLE					0x09
+#define TDA18272_OFFST_IRQ_ENABLE_IRQ_ENABLE			7
+#define TDA18272_WIDTH_IRQ_ENABLE_IRQ_ENABLE			1
+#define TDA18272_OFFST_IRQ_ENABLE_UNUSED_I0_D0			6
+#define TDA18272_WIDTH_IRQ_ENABLE_UNUSED_I0_D0			1
+#define TDA18272_OFFST_IRQ_ENABLE_XTALCAL_ENABLE		5
+#define TDA18272_WIDTH_IRQ_ENABLE_XTALCAL_ENABLE		1
+#define TDA18272_OFFST_IRQ_ENABLE_MSM_RSSI_ENABLE		4
+#define TDA18272_WIDTH_IRQ_ENABLE_MSM_RSSI_ENABLE		1
+#define TDA18272_OFFST_IRQ_ENABLE_MSM_LOCALC_ENABLE		3
+#define TDA18272_WIDTH_IRQ_ENABLE_MSM_LOCALC_ENABLE		1
+#define TDA18272_OFFST_IRQ_ENABLE_MSM_RFCAL_ENABLE		2
+#define TDA18272_WIDTH_IRQ_ENABLE_MSM_RFCAL_ENABLE		1
+#define TDA18272_OFFST_IRQ_ENABLE_MSM_IRCAL_ENABLE		1
+#define TDA18272_WIDTH_IRQ_ENABLE_MSM_IRCAL_ENABLE		1
+#define TDA18272_OFFST_IRQ_ENABLE_MSM_RCCAL_ENABLE		0
+#define TDA18272_WIDTH_IRQ_ENABLE_MSM_RCCAL_ENABLE		1
+
+#define TDA18272_IRQ_CLEAR					0x0a
+#define TDA18272_OFFST_IRQ_CLEAR_IRQ_CLEAR			7
+#define TDA18272_WIDTH_IRQ_CLEAR_IRQ_CLEAR			1
+
+#define TDA18272_IRQ_SET					0x0b
+#define TDA18272_OFFST_IRQ_SET_IRQ_SET				7
+#define TDA18272_WIDTH_IRQ_SET_IRQ_SET				1
+#define TDA18272_OFFST_IRQ_SET_UNUSED_I0_D0			6
+#define TDA18272_WIDTH_IRQ_SET_UNUSED_I0_D0			1
+#define TDA18272_OFFST_IRQ_SET_XTALCAL_SET			5
+#define TDA18272_WIDTH_IRQ_SET_XTALCAL_SET			1
+#define TDA18272_OFFST_IRQ_SET_MSM_RSSI_SET			4
+#define TDA18272_WIDTH_IRQ_SET_MSM_RSSI_SET			1
+#define TDA18272_OFFST_IRQ_SET_MSM_LOCALC_SET			3
+#define TDA18272_WIDTH_IRQ_SET_MSM_LOCALC_SET			1
+#define TDA18272_OFFST_IRQ_SET_MSM_RFCAL_SET			2
+#define TDA18272_WIDTH_IRQ_SET_MSM_RFCAL_SET			1
+#define TDA18272_OFFST_IRQ_SET_MSM_IRCAL_SET			1
+#define TDA18272_WIDTH_IRQ_SET_MSM_IRCAL_SET			1
+#define TDA18272_OFFST_IRQ_SET_MSM_RCCAL_SET			0
+#define TDA18272_WIDTH_IRQ_SET_MSM_RCCAL_SET			1
+
+#define TDA18272_AGC1_BYTE_1					0x0c
+#define TDA18272_OFFST_AGC1_BYTE_1_LT_ENABLE			7
+#define TDA18272_WIDTH_AGC1_BYTE_1_LT_ENABLE			1
+#define TDA18272_OFFST_AGC1_BYTE_1_AGC1_6_15DB			6
+#define TDA18272_WIDTH_AGC1_BYTE_1_AGC1_6_15DB			1
+#define TDA18272_OFFST_AGC1_BYTE_1_AGC1_TOP			0
+#define TDA18272_WIDTH_AGC1_BYTE_1_AGC1_TOP			4
+
+#define TDA18272_AGC2_BYTE_1					0x0d
+#define TDA18272_OFFST_AGC2_BYTE_1_UNUSED_I0_D0			5
+#define TDA18272_WIDTH_AGC2_BYTE_1_UNUSED_I0_D0			3
+#define TDA18272_OFFST_AGC2_BYTE_1_AGC2_TOP			0
+#define TDA18272_WIDTH_AGC2_BYTE_1_AGC2_TOP			5
+
+#define TDA18272_AGCK_BYTE_1					0x0e
+#define TDA18272_OFFST_AGC1_BYTE_1_AGCs_UP_STEP_ASYM		6
+#define TDA18272_WIDTH_AGC1_BYTE_1_AGCs_UP_STEP_ASYM		2
+#define TDA18272_OFFST_AGCK_BYTE_1_AGCs_UP_STEP			5
+#define TDA18272_WIDTH_AGCK_BYTE_1_AGCs_UP_STEP			1
+#define TDA18272_OFFST_AGCK_BYTE_1_PULSE_SHAPER_DISABLE		4
+#define TDA18272_WIDTH_AGCK_BYTE_1_PULSE_SHAPER_DISABLE		1
+#define TDA18272_OFFST_AGCK_BYTE_1_AGCK_STEP			2
+#define TDA18272_WIDTH_AGCK_BYTE_1_AGCK_STEP			2
+#define TDA18272_OFFST_AGCK_BYTE_1_AGCK_MODE			0
+#define TDA18272_WIDTH_AGCK_BYTE_1_AGCK_MODE			2
+
+#define TDA18272_RFAGC_BYTE_1					0x0f
+#define TDA18272_OFFST_RFAGC_BYTE_1_PD_RFAGC_ADAPT		7
+#define TDA18272_WIDTH_RFAGC_BYTE_1_PD_RFAGC_ADAPT		1
+#define TDA18272_OFFST_RFAGC_BYTE_1_RFAGC_ADAPT_TOP		5
+#define TDA18272_WIDTH_RFAGC_BYTE_1_RFAGC_ADAPT_TOP		2
+#define TDA18272_OFFST_RFAGC_BYTE_1_RF_ATTEN_3DB		3
+#define TDA18272_WIDTH_RFAGC_BYTE_1_RF_ATTEN_3DB		1
+#define TDA18272_OFFST_RFAGC_BYTE_1_AGC3_TOP			0
+#define TDA18272_WIDTH_RFAGC_BYTE_1_AGC3_TOP			3
+
+#define TDA18272_IRMIXER_BYTE_1					0x10
+#define TDA18272_OFFST_IRMIXER_BYTE_1_AGC4_TOP			0
+#define TDA18272_WIDTH_IRMIXER_BYTE_1_AGC4_TOP			4
+
+#define TDA18272_AGC5_BYTE_1					0x11
+#define TDA18272_OFFST_AGC5_BYTE_1_AGC5_TOP			0
+#define TDA18272_WIDTH_AGC5_BYTE_1_AGC5_TOP			4
+#define TDA18272_OFFST_AGC5_BYTE_1_AGC5_HPF			4
+#define TDA18272_WIDTH_AGC5_BYTE_1_AGC5_HPF			1
+#define TDA18272_OFFST_AGC5_BYTE_1_AGCs_DO_STEP_ASYM		5
+#define TDA18272_WIDTH_AGC5_BYTE_1_AGCs_DO_STEP_ASYM		2
+
+
+#define TDA18272_IFAGC						0x12
+#define TDA18272_OFFST_IFAGC_IF_LEVEL				0
+#define TDA18272_WIDTH_IFAGC_IF_LEVEL				3
+
+#define TDA18272_IF_BYTE_1					0x13
+#define TDA18272_OFFST_IF_BYTE_1_IF_HP_FC			6
+#define TDA18272_WIDTH_IF_BYTE_1_IF_HP_FC			2
+#define TDA18272_OFFST_IF_BYTE_1_IF_NOTCH			5
+#define TDA18272_WIDTH_IF_BYTE_1_IF_NOTCH			1
+#define TDA18272_OFFST_IF_BYTE_1_LP_FC_OFFSET			3
+#define TDA18272_WIDTH_IF_BYTE_1_LP_FC_OFFSET			2
+#define TDA18272_OFFST_IF_BYTE_1_LP_FC				0
+#define TDA18272_WIDTH_IF_BYTE_1_LP_FC				3
+
+#define TDA18272_REFERENCE					0x14
+#define TDA18272_OFFST_REFERENCE_XTOUT				0
+#define TDA18272_WIDTH_REFERENCE_XTOUT				2
+#define TDA18272_OFFST_REFERENCE_DIGITAL_CLOCK			6
+#define TDA18272_WIDTH_REFERENCE_DIGITAL_CLOCK			1
+
+#define TDA18272_IF_FREQUENCY					0x15
+#define TDA18272_OFFST_IF_FREQUENCY_IF_FREQ			0
+#define TDA18272_WIDTH_IF_FREQUENCY_IF_FREQ			8
+
+#define TDA18272_RF_FREQUENCY_BYTE_1				0x16
+#define TDA18272_OFFST_RF_FREQUENCY_BYTE_1_RF_FREQ		0
+#define TDA18272_WIDTH_RF_FREQUENCY_BYTE_1_RF_FREQ		4
+
+#define TDA18272_RF_FREQUENCY_BYTE_2				0x17
+#define TDA18272_OFFST_RF_FREQUENCY_BYTE_2_RF_FREQ		0
+#define TDA18272_WIDTH_RF_FREQUENCY_BYTE_2_RF_FREQ		8
+
+#define TDA18272_RF_FREQUENCY_BYTE_3				0x18
+#define TDA18272_OFFST_RF_FREQUENCY_BYTE_3_RF_FREQ		0
+#define TDA18272_WIDTH_RF_FREQUENCY_BYTE_3_RF_FREQ		8
+
+#define TDA18272_MSM_BYTE_1					0x19
+#define TDA18272_OFFST_MSM_BYTE_1_POWER_MEAS			7
+#define TDA18272_WIDTH_MSM_BYTE_1_POWER_MEAS			1
+#define TDA18272_OFFST_MSM_BYTE_1_RF_CAL_AV			6
+#define TDA18272_WIDTH_MSM_BYTE_1_RF_CAL_AV			1
+#define TDA18272_OFFST_MSM_BYTE_1_RF_CAL			5
+#define TDA18272_WIDTH_MSM_BYTE_1_RF_CAL			1
+#define TDA18272_OFFST_MSM_BYTE_1_IR_CAL			3
+#define TDA18272_WIDTH_MSM_BYTE_1_IR_CAL			2
+#define TDA18272_OFFST_MSM_BYTE_1_RC_CAL			1
+#define TDA18272_WIDTH_MSM_BYTE_1_RC_CAL			1
+#define TDA18272_OFFST_MSM_BYTE_1_CALC_PLL			0
+#define TDA18272_WIDTH_MSM_BYTE_1_CALC_PLL			1
+
+#define TDA18272_MSM_BYTE_2					0x1a
+#define TDA18272_OFFST_MSM_BYTE_2_MSM_LAUNCH			0
+#define TDA18272_WIDTH_MSM_BYTE_2_MSM_LAUNCH			1
+
+#define TDA18272_PSM_BYTE_1					0x1b
+#define TDA18272_OFFST_PSM_BYTE_1_PSM_AGC1			6
+#define TDA18272_WIDTH_PSM_BYTE_1_PSM_AGC1			2
+#define TDA18272_OFFST_PSM_BYTE_1_PSM_STOB			5
+#define TDA18272_WIDTH_PSM_BYTE_1_PSM_STOB			1
+#define TDA18272_OFFST_PSM_BYTE_1_PSMRFPOLY			4
+#define TDA18272_WIDTH_PSM_BYTE_1_PSMRFPOLY			1
+#define TDA18272_OFFST_PSM_BYTE_1_PSM_MIXER			3
+#define TDA18272_WIDTH_PSM_BYTE_1_PSM_MIXER			1
+#define TDA18272_OFFST_PSM_BYTE_1_PSM_IFPOLY			2
+#define TDA18272_WIDTH_PSM_BYTE_1_PSM_IFPOLY			1
+#define TDA18272_OFFST_PSM_BYTE_1_PSM_LODRIVER			0
+#define TDA18272_WIDTH_PSM_BYTE_1_PSM_LODRIVER			2
+
+#define TDA18272_DCC_BYTE_1					0x1c
+#define TDA18272_OFFST_DCC_BYTE_1_DCC_BYPASS			7
+#define TDA18272_WIDTH_DCC_BYTE_1_DCC_BYPASS			1
+#define TDA18272_OFFST_DCC_BYTE_1_DCC_SLOW			6
+#define TDA18272_WIDTH_DCC_BYTE_1_DCC_SLOW			1
+#define TDA18272_OFFST_DCC_BYTE_1_DCC_PSM			5
+#define TDA18272_WIDTH_DCC_BYTE_1_DCC_PSM			1
+#define TDA18272_OFFST_DCC_BYTE_1_UNUSED_I0_D0			0
+#define TDA18272_WIDTH_DCC_BYTE_1_UNUSED_I0_D0			5
+
+#define TDA18272_FLO_MAX_BYTE					0x1d
+#define TDA18272_OFFST_FLO_MAX_BYTE_UNUSED_I0_D0		6
+#define TDA18272_WIDTH_FLO_MAX_BYTE_UNUSED_I0_D0		2
+#define TDA18272_OFFST_FLO_MAX_BYTE_FMAX_LO			0
+#define TDA18272_WIDTH_FLO_MAX_BYTE_FMAX_LO			6
+
+#define TDA18272_IR_CAL_BYTE_1					0x1e
+#define TDA18272_OFFST_IR_CAL_BYTE_1_IR_LOOP			6
+#define TDA18272_WIDTH_IR_CAL_BYTE_1_IR_LOOP			2
+#define TDA18272_OFFST_IR_CAL_BYTE_1_IR_TARGET			3
+#define TDA18272_WIDTH_IR_CAL_BYTE_1_IR_TARGET			3
+#define TDA18272_OFFST_IR_CAL_BYTE_1_IR_GSTEP			0
+#define TDA18272_WIDTH_IR_CAL_BYTE_1_IR_GSTEP			3
+
+#define TDA18272_IR_CAL_BYTE_2					0x1f
+#define TDA18272_OFFST_IR_CAL_BYTE_2_IR_CORR_BOOST		7
+#define TDA18272_WIDTH_IR_CAL_BYTE_2_IR_CORR_BOOST		1
+#define TDA18272_OFFST_IR_CAL_BYTE_2_IR_FREQLOW_SEL		6
+#define TDA18272_WIDTH_IR_CAL_BYTE_2_IR_FREQLOW_SEL		1
+#define TDA18272_OFFST_IR_CAL_BYTE_2_IR_MODE_RAM_STORE		5
+#define TDA18272_WIDTH_IR_CAL_BYTE_2_IR_MODE_RAM_STORE		1
+#define TDA18272_OFFST_IR_CAL_BYTE_2_IR_FREQLOW			0
+#define TDA18272_WIDTH_IR_CAL_BYTE_2_IR_FREQLOW			5
+
+#define TDA18272_IR_CAL_BYTE_3					0x20
+#define TDA18272_OFFST_IR_CAL_BYTE_3_UNUSED_I0_D0		5
+#define TDA18272_WIDTH_IR_CAL_BYTE_3_UNUSED_I0_D0		3
+#define TDA18272_OFFST_IR_CAL_BYTE_3_IR_FREQMID			0
+#define TDA18272_WIDTH_IR_CAL_BYTE_3_IR_FREQMID			5
+
+#define TDA18272_IR_CAL_BYTE_4					0x21
+#define TDA18272_OFFST_IR_CAL_BYTE_4_UNUSED_I0_D0		6
+#define TDA18272_WIDTH_IR_CAL_BYTE_4_UNUSED_I0_D0		2
+#define TDA18272_OFFST_IR_CAL_BYTE_4_COARSE_IR_FREQHIGH		5
+#define TDA18272_WIDTH_IR_CAL_BYTE_4_COARSE_IR_FREQHIGH		1
+#define TDA18272_OFFST_IR_CAL_BYTE_4_IR_FREQHIGH		0
+#define TDA18272_WIDTH_IR_CAL_BYTE_4_IR_FREQHIGH		5
+
+#define TDA18272_VSYNC_MGT					0x22
+#define TDA18272_OFFST_VSYNC_MGT_PD_VSYNC_MGT			7
+#define TDA18272_WIDTH_VSYNC_MGT_PD_VSYNC_MGT			1
+#define TDA18272_OFFST_VSYNC_MGT_PD_OVLD			6
+#define TDA18272_WIDTH_VSYNC_MGT_PD_OVLD			1
+#define TDA18272_OFFST_VSYNC_MGT_PD_UDLD			5
+#define TDA18272_WIDTH_VSYNC_MGT_PD_UDLD			1
+#define TDA18272_OFFST_VSYNC_MGT_AGC_OVLD_TOP			2
+#define TDA18272_WIDTH_VSYNC_MGT_AGC_OVLD_TOP			3
+#define TDA18272_OFFST_VSYNC_MGT_AGC_OVLD_TIMER			0
+#define TDA18272_WIDTH_VSYNC_MGT_AGC_OVLD_TIMER			2
+
+#define TDA18272_IRMIXER_BYTE_2					0x23
+#define TDA18272_OFFST_IRMIXER_BYTE_2_HI_PASS			1
+#define TDA18272_WIDTH_IRMIXER_BYTE_2_HI_PASS			1
+#define TDA18272_OFFST_IRMIXER_BYTE_2_DC_NOTCH			0
+#define TDA18272_WIDTH_IRMIXER_BYTE_2_DC_NOTCH			1
+
+#define TDA18272_AGC1_BYTE_2					0x24
+#define TDA18272_OFFST_AGC1_BYTE_2_AGC1_LOOP_OFF		7
+#define TDA18272_WIDTH_AGC1_BYTE_2_AGC1_LOOP_OFF		1
+#define TDA18272_OFFST_AGC1_BYTE_2_AGC1_DO_STEP			5
+#define TDA18272_WIDTH_AGC1_BYTE_2_AGC1_DO_STEP			2
+#define TDA18272_OFFST_AGC1_BYTE_2_FORCE_AGC1_GAIN		4
+#define TDA18272_WIDTH_AGC1_BYTE_2_FORCE_AGC1_GAIN		1
+#define TDA18272_OFFST_AGC1_BYTE_2_AGC1_GAIN			0
+#define TDA18272_WIDTH_AGC1_BYTE_2_AGC1_GAIN			4
+
+#define TDA18272_AGC5_BYTE_2					0x25
+#define TDA18272_OFFST_AGC5_BYTE_2_AGC5_LOOP_OFF		7
+#define TDA18272_WIDTH_AGC5_BYTE_2_AGC5_LOOP_OFF		1
+#define TDA18272_OFFST_AGC5_BYTE_2_AGC5_DO_STEP			5
+#define TDA18272_WIDTH_AGC5_BYTE_2_AGC5_DO_STEP			2
+#define TDA18272_OFFST_AGC5_BYTE_2_UNUSED_I1_D0			4
+#define TDA18272_WIDTH_AGC5_BYTE_2_UNUSED_I1_D0			1
+#define TDA18272_OFFST_AGC5_BYTE_2_FORCE_AGC5_GAIN		3
+#define TDA18272_WIDTH_AGC5_BYTE_2_FORCE_AGC5_GAIN		1
+#define TDA18272_OFFST_AGC5_BYTE_2_UNUSED_I0_D0			2
+#define TDA18272_WIDTH_AGC5_BYTE_2_UNUSED_I0_D0			1
+#define TDA18272_OFFST_AGC5_BYTE_2_AGC5_GAIN			0
+#define TDA18272_WIDTH_AGC5_BYTE_2_AGC5_GAIN			2
+
+#define TDA18272_RF_CAL_BYTE_1					0x26
+#define TDA18272_OFFST_RF_CAL_BYTE_1_RFCAL_OFFSET_CPROG0	6
+#define TDA18272_WIDTH_RF_CAL_BYTE_1_RFCAL_OFFSET_CPROG0	2
+#define TDA18272_OFFST_RF_CAL_BYTE_1_RFCAL_FREQ0		4
+#define TDA18272_WIDTH_RF_CAL_BYTE_1_RFCAL_FREQ0		2
+#define TDA18272_OFFST_RF_CAL_BYTE_1_RFCAL_OFFSET_CPROG1	2
+#define TDA18272_WIDTH_RF_CAL_BYTE_1_RFCAL_OFFSET_CPROG1	2
+#define TDA18272_OFFST_RF_CAL_BYTE_1_RFCAL_FREQ1		0
+#define TDA18272_WIDTH_RF_CAL_BYTE_1_RFCAL_FREQ1		2
+
+#define TDA18272_RF_CAL_BYTE_2		0x27
+#define TDA18272_OFFST_RF_CAL_BYTE_2_RFCAL_OFFSET_CPROG2	6
+#define TDA18272_WIDTH_RF_CAL_BYTE_2_RFCAL_OFFSET_CPROG2	2
+#define TDA18272_OFFST_RF_CAL_BYTE_2_RFCAL_FREQ2		4
+#define TDA18272_WIDTH_RF_CAL_BYTE_2_RFCAL_FREQ2		2
+#define TDA18272_OFFST_RF_CAL_BYTE_2_RFCAL_OFFSET_CPROG3	2
+#define TDA18272_WIDTH_RF_CAL_BYTE_2_RFCAL_OFFSET_CPROG3	2
+#define TDA18272_OFFST_RF_CAL_BYTE_2_RFCAL_FREQ3		0
+#define TDA18272_WIDTH_RF_CAL_BYTE_2_RFCAL_FREQ3		2
+
+#define TDA18272_RF_CAL_BYTE_3					0x28
+#define TDA18272_OFFST_RF_CAL_BYTE_3_RFCAL_OFFSET_CPROG4	6
+#define TDA18272_WIDTH_RF_CAL_BYTE_3_RFCAL_OFFSET_CPROG4	2
+#define TDA18272_OFFST_RF_CAL_BYTE_3_RFCAL_FREQ4		4
+#define TDA18272_WIDTH_RF_CAL_BYTE_3_RFCAL_FREQ4		2
+#define TDA18272_OFFST_RF_CAL_BYTE_3_RFCAL_OFFSET_CPROG5	2
+#define TDA18272_WIDTH_RF_CAL_BYTE_3_RFCAL_OFFSET_CPROG5	2
+#define TDA18272_OFFST_RF_CAL_BYTE_3_RFCAL_FREQ5		0
+#define TDA18272_WIDTH_RF_CAL_BYTE_3_RFCAL_FREQ5		2
+
+#define TDA18272_RF_CAL_BYTE_4					0x29
+#define TDA18272_OFFST_RF_CAL_BYTE_4_RFCAL_OFFSET_CPROG6	6
+#define TDA18272_WIDTH_RF_CAL_BYTE_4_RFCAL_OFFSET_CPROG6	2
+#define TDA18272_OFFST_RF_CAL_BYTE_4_RFCAL_FREQ6		4
+#define TDA18272_WIDTH_RF_CAL_BYTE_4_RFCAL_FREQ6		2
+#define TDA18272_OFFST_RF_CAL_BYTE_4_RFCAL_OFFSET_CPROG7	2
+#define TDA18272_WIDTH_RF_CAL_BYTE_4_RFCAL_OFFSET_CPROG7	2
+#define TDA18272_OFFST_RF_CAL_BYTE_4_RFCAL_FREQ7		0
+#define TDA18272_WIDTH_RF_CAL_BYTE_4_RFCAL_FREQ7		2
+
+#define TDA18272_RF_CAL_BYTE_5					0x2a
+#define TDA18272_OFFST_RF_CAL_BYTE_5_RFCAL_OFFSET_CPROG		6
+#define TDA18272_WIDTH_RF_CAL_BYTE_5_RFCAL_OFFSET_CPROG8	2
+#define TDA18272_OFFST_RF_CAL_BYTE_5_RFCAL_FREQ8		4
+#define TDA18272_WIDTH_RF_CAL_BYTE_5_RFCAL_FREQ8		2
+#define TDA18272_OFFST_RF_CAL_BYTE_5_RFCAL_OFFSET_CPROG9	2
+#define TDA18272_WIDTH_RF_CAL_BYTE_5_RFCAL_OFFSET_CPROG9	2
+#define TDA18272_OFFST_RF_CAL_BYTE_5_RFCAL_FREQ9		0
+#define TDA18272_WIDTH_RF_CAL_BYTE_5_RFCAL_FREQ9		2
+
+#define TDA18272_RF_CAL_BYTE_6					0x2b
+#define TDA18272_OFFST_RF_CAL_BYTE_6_RFCAL_OFFSET_CPROG10	6
+#define TDA18272_WIDTH_RF_CAL_BYTE_6_RFCAL_OFFSET_CPROG10	2
+#define TDA18272_OFFST_RF_CAL_BYTE_6_RFCAL_FREQ10		4
+#define TDA18272_WIDTH_RF_CAL_BYTE_6_RFCAL_FREQ10		2
+#define TDA18272_OFFST_RF_CAL_BYTE_6_RFCAL_OFFSET_CPROG11	2
+#define TDA18272_WIDTH_RF_CAL_BYTE_6_RFCAL_OFFSET_CPROG11	2
+#define TDA18272_OFFST_RF_CAL_BYTE_6_RFCAL_FREQ11		0
+#define TDA18272_WIDTH_RF_CAL_BYTE_6_RFCAL_FREQ11		2
+
+#define TDA18272_RF_FILTER_BYTE_1				0x2c
+#define TDA18272_OFFST_RF_FILTER_BYTE_1_RF_FILTER_BYPASS	7
+#define TDA18272_WIDTH_RF_FILTER_BYTE_1_RF_FILTER_BYPASS	1
+#define TDA18272_OFFST_RF_FILTER_BYTE_1_UNUSED_I0_D0		6
+#define TDA18272_WIDTH_RF_FILTER_BYTE_1_UNUSED_I0_D0		1
+#define TDA18272_OFFST_RF_FILTER_BYTE_1_AGC2_LOOP_OFF		5
+#define TDA18272_WIDTH_RF_FILTER_BYTE_1_AGC2_LOOP_OFF		1
+#define TDA18272_OFFST_RF_FILTER_BYTE_1_FORCE_AGC2_GAIN		4
+#define TDA18272_WIDTH_RF_FILTER_BYTE_1_FORCE_AGC2_GAIN		1
+#define TDA18272_OFFST_RF_FILTER_BYTE_1_RF_FILTER_GV		2
+#define TDA18272_WIDTH_RF_FILTER_BYTE_1_RF_FILTER_GV		2
+#define TDA18272_OFFST_RF_FILTER_BYTE_1_RF_FILTER_BAND		0
+#define TDA18272_WIDTH_RF_FILTER_BYTE_1_RF_FILTER_BAND		2
+
+#define TDA18272_RF_FILTER_BYTE_2				0x2d
+#define TDA18272_OFFST_RF_FILTER_BYTE_2_RF_FILTER_CAP		0
+#define TDA18272_WIDTH_RF_FILTER_BYTE_2_RF_FILTER_CAP		8
+
+#define TDA18272_RF_FILTER_BYTE_3				0x2e
+#define TDA18272_OFFST_RF_FILTER_BYTE_3_AGC2_DO_STEP		6
+#define TDA18272_WIDTH_RF_FILTER_BYTE_3_AGC2_DO_STEP		2
+#define TDA18272_OFFST_RF_FILTER_BYTE_3_GAIN_TAPER		0
+#define TDA18272_WIDTH_RF_FILTER_BYTE_3_GAIN_TAPER		6
+
+#define TDA18272_RF_BANDPASS_FILTER				0x2f
+#define TDA18272_OFFST_RF_BANDPASS_FILTER_RF_BPF_BYPASS		7
+#define TDA18272_WIDTH_RF_BANDPASS_FILTER_RF_BPF_BYPASS		1
+#define TDA18272_OFFST_RF_BANDPASS_FILTER_UNUSED_I0_D0		3
+#define TDA18272_WIDTH_RF_BANDPASS_FILTER_UNUSED_I0_D0		4
+#define TDA18272_OFFST_RF_BANDPASS_FILTER_RF_BPF		0
+#define TDA18272_WIDTH_RF_BANDPASS_FILTER_RF_BPF		3
+
+#define TDA18272_CP_CURRENT					0x30
+#define TDA18272_OFFST_CP_CURRENT_UNUSED_I0_D0			7
+#define TDA18272_WIDTH_CP_CURRENT_UNUSED_I0_D0			1
+#define TDA18272_OFFST_CP_CURRENT_N_CP_CURRENT			0
+#define TDA18272_WIDTH_CP_CURRENT_N_CP_CURRENT			7
+
+#define TDA18272_AGC_DET_OUT					0x31
+#define TDA18272_OFFST_AGC_DET_OUT_UP_AGC5			7
+#define TDA18272_WIDTH_AGC_DET_OUT_UP_AGC5			1
+#define TDA18272_OFFST_AGC_DET_OUT_DO_AGC5			6
+#define TDA18272_WIDTH_AGC_DET_OUT_DO_AGC5			1
+#define TDA18272_OFFST_AGC_DET_OUT_UP_AGC4			5
+#define TDA18272_WIDTH_AGC_DET_OUT_UP_AGC4			1
+#define TDA18272_OFFST_AGC_DET_OUT_DO_AGC4			4
+#define TDA18272_WIDTH_AGC_DET_OUT_DO_AGC4			1
+#define TDA18272_OFFST_AGC_DET_OUT_UP_AGC2			3
+#define TDA18272_WIDTH_AGC_DET_OUT_UP_AGC2			1
+#define TDA18272_OFFST_AGC_DET_OUT_DO_AGC2			2
+#define TDA18272_WIDTH_AGC_DET_OUT_DO_AGC2			1
+#define TDA18272_OFFST_AGC_DET_OUT_UP_AGC1			1
+#define TDA18272_WIDTH_AGC_DET_OUT_UP_AGC1			1
+#define TDA18272_OFFST_AGC_DET_OUT_DO_AGC1			0
+#define TDA18272_WIDTH_AGC_DET_OUT_DO_AGC1			1
+
+#define TDA18272_RF_AGC_GAIN_BYTE_1				0x32
+#define TDA18272_OFFST_RF_AGC_GAIN_BYTE_1_RF_FILTER_GAIN	4
+#define TDA18272_WIDTH_RF_AGC_GAIN_BYTE_1_RF_FILTER_GAIN	2
+#define TDA18272_OFFST_RF_AGC_GAIN_BYTE_1_LNA_GAIN		0
+#define TDA18272_WIDTH_RF_AGC_GAIN_BYTE_1_LNA_GAIN		4
+
+#define TDA18272_RF_AGC_GAIN_BYTE_2				0x33
+#define TDA18272_OFFST_RF_AGC_GAIN_BYTE_2_TOP_AGC3_READ		0
+#define TDA18272_WIDTH_RF_AGC_GAIN_BYTE_2_TOP_AGC3_READ		3
+
+#define TDA18272_IF_AGC_GAIN					0x34
+#define TDA18272_OFFST_IF_AGC_GAIN_LPF_GAIN			3
+#define TDA18272_WIDTH_IF_AGC_GAIN_LPF_GAIN			2
+#define TDA18272_OFFST_IF_AGC_GAIN_IR_MIXER			0
+#define TDA18272_WIDTH_IF_AGC_GAIN_IR_MIXER			3
+
+#define TDA18272_POWER_BYTE_1					0x35
+#define TDA18272_OFFST_POWER_BYTE_1_RSSI			0
+#define TDA18272_WIDTH_POWER_BYTE_1_RSSI			8
+
+#define TDA18272_POWER_BYTE_2					0x36
+#define TDA18272_OFFST_POWER_BYTE_2_UNUSED_I1_D0		6
+#define TDA18272_WIDTH_POWER_BYTE_2_UNUSED_I1_D0		2
+#define TDA18272_OFFST_POWER_BYTE_2_RSSI_AV			5
+#define TDA18272_WIDTH_POWER_BYTE_2_RSSI_AV			1
+#define TDA18272_OFFST_POWER_BYTE_2_UNUSED_I0_D0		4
+#define TDA18272_WIDTH_POWER_BYTE_2_UNUSED_I0_D0		1
+#define TDA18272_OFFST_POWER_BYTE_2_RSSI_CAP_RESET_EN		3
+#define TDA18272_WIDTH_POWER_BYTE_2_RSSI_CAP_RESET_EN		1
+#define TDA18272_OFFST_POWER_BYTE_2_RSSI_CAP_VAL		2
+#define TDA18272_WIDTH_POWER_BYTE_2_RSSI_CAP_VAL		1
+#define TDA18272_OFFST_POWER_BYTE_2_RSSI_CK_SPEED		1
+#define TDA18272_WIDTH_POWER_BYTE_2_RSSI_CK_SPEED		1
+#define TDA18272_OFFST_POWER_BYTE_2_RSSI_DICHO_NOT		0
+#define TDA18272_WIDTH_POWER_BYTE_2_RSSI_DICHO_NOT		1
+
+#define TDA18272_MISC_BYTE_1					0x37
+#define TDA18272_OFFST_MISC_BYTE_1_IRQ_POLARITY			0
+#define TDA18272_WIDTH_MISC_BYTE_1_IRQ_POLARITY			1
+
+#define TDA18272_RF_CAL_LOG_1					0x38
+#define TDA18272_OFFST_RF_CAL_LOG_1				0
+#define TDA18272_WIDTH_RF_CAL_LOG_1				8
+
+#define TDA18272_RF_CAL_LOG_2					0x39
+#define TDA18272_OFFST_RF_CAL_LOG_2				0
+#define TDA18272_WIDTH_RF_CAL_LOG_2				8
+
+#define TDA18272_RF_CAL_LOG_3					0x3a
+#define TDA18272_OFFST_RF_CAL_LOG_3				0
+#define TDA18272_WIDTH_RF_CAL_LOG_3				8
+
+#define TDA18272_RF_CAL_LOG_4					0x3b
+#define TDA18272_OFFST_RF_CAL_LOG_4				0
+#define TDA18272_WIDTH_RF_CAL_LOG_4				8
+
+#define TDA18272_RF_CAL_LOG_5					0x3c
+#define TDA18272_OFFST_RF_CAL_LOG_5				0
+#define TDA18272_WIDTH_RF_CAL_LOG_5				8
+
+#define TDA18272_RF_CAL_LOG_6					0x3d
+#define TDA18272_OFFST_RF_CAL_LOG_6				0
+#define TDA18272_WIDTH_RF_CAL_LOG_6				8
+
+#define TDA18272_RF_CAL_LOG_7					0x3e
+#define TDA18272_OFFST_RF_CAL_LOG_7				0
+#define TDA18272_WIDTH_RF_CAL_LOG_7				8
+
+#define TDA18272_RF_CAL_LOG_8					0x3f
+#define TDA18272_OFFST_RF_CAL_LOG_8				0
+#define TDA18272_WIDTH_RF_CAL_LOG_8				8
+
+#define TDA18272_RF_CAL_LOG_9					0x40
+#define TDA18272_OFFST_RF_CAL_LOG_9				0
+#define TDA18272_WIDTH_RF_CAL_LOG_9				8
+
+#define TDA18272_RF_CAL_LOG_10					0x41
+#define TDA18272_OFFST_RF_CAL_LOG_10				0
+#define TDA18272_WIDTH_RF_CAL_LOG_10				8
+
+#define TDA18272_RF_CAL_LOG_11					0x42
+#define TDA18272_OFFST_RF_CAL_LOG_11				0
+#define TDA18272_WIDTH_RF_CAL_LOG_11				8
+
+#define TDA18272_RF_CAL_LOG_12					0x43
+#define TDA18272_OFFST_RF_CAL_LOG_12				0
+#define TDA18272_WIDTH_RF_CAL_LOG_12				8
+
+#endif /* __TDA18272_REG_H */
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 0a7d520..7cac453 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -1,6 +1,7 @@
+if USB && MEDIA_SUPPORT
+
 menuconfig MEDIA_USB_SUPPORT
 	bool "Media USB Adapters"
-	depends on USB && MEDIA_SUPPORT
 	help
 	  Enable media drivers for USB bus.
 	  If you have such devices, say Y.
@@ -52,3 +53,4 @@
 endif
 
 endif #MEDIA_USB_SUPPORT
+endif #USB
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 86feeea..3d55bdf 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -44,7 +44,11 @@
 	select VIDEOBUF_DVB
 	select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
+	select MEDIA_TUNER_TDA18272 if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_LGDT3306A if MEDIA_SUBDRV_AUTOSELECT
+	select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
 
 	---help---
 	  This adds support for DVB cards based on the
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 235ba65..9034d24 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -2264,6 +2264,8 @@
 	case POLARIS_AVMODE_ANALOGT_TV:
 
 		tmp |= PWR_DEMOD_EN;
+		if (CX231XX_BOARD_AVERMEDIA_H837B == dev->model)
+			tmp &= ~PWR_DEMOD_EN;
 		tmp |= (I2C_DEMOD_EN);
 		value[0] = (u8) tmp;
 		value[1] = (u8) (tmp >> 8);
@@ -2371,8 +2373,19 @@
 		status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
 						PWR_CTL_EN, value, 4);
 		msleep(PWR_SLEEP_INTERVAL);
-
-		if (!(tmp & PWR_DEMOD_EN)) {
+		if (is_model_avermedia_h837_series(dev->model)) {
+			if (CX231XX_BOARD_AVERMEDIA_H837B == dev->model)
+				tmp |= PWR_DEMOD_EN;
+			else
+				tmp &= ~PWR_DEMOD_EN;
+			value[0] = (u8) tmp;
+			value[1] = (u8) (tmp >> 8);
+			value[2] = (u8) (tmp >> 16);
+			value[3] = (u8) (tmp >> 24);
+			status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+							PWR_CTL_EN, value, 4);
+			msleep(5 * PWR_SLEEP_INTERVAL);
+		} else if (!(tmp & PWR_DEMOD_EN)) {
 			tmp |= PWR_DEMOD_EN;
 			value[0] = (u8) tmp;
 			value[1] = (u8) (tmp >> 8);
@@ -2402,6 +2415,21 @@
 		}
 		break;
 
+	case POLARIS_AVMODE_DEFAULT:
+		if (is_model_avermedia_h837_series(dev->model)) {
+			tmp &= ~PWR_MODE_MASK;
+			if( CX231XX_BOARD_AVERMEDIA_H837A==dev->model ||
+			    CX231XX_BOARD_AVERMEDIA_H837M==dev->model )
+				tmp |= PWR_DEMOD_EN;
+			value[0] = (u8) tmp;
+			value[1] = (u8) (tmp >> 8);
+			value[2] = (u8) (tmp >> 16);
+			value[3] = (u8) (tmp >> 24);
+			cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4);
+			msleep(PWR_SLEEP_INTERVAL);
+			return 0;
+		}
+
 	default:
 		break;
 	}
@@ -2567,7 +2595,11 @@
 		} else {
 			cx231xx_info(" BDA\n");
 			status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101);
-			status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010);
+			if (is_model_avermedia_h837_series(dev->model)) {
+				status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x408);
+			} else {
+				status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010);
+			}
 		}
 			break;
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 13249e5..e416e94 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -42,7 +42,7 @@
 module_param(tuner, int, 0444);
 MODULE_PARM_DESC(tuner, "tuner type");
 
-static int transfer_mode = 1;
+static int transfer_mode = 0;
 module_param(transfer_mode, int, 0444);
 MODULE_PARM_DESC(transfer_mode, "transfer mode (1-ISO or 0-BULK)");
 
@@ -667,6 +667,147 @@
 			}
 		},
 	},
+	[CX231XX_BOARD_HAUPPAUGE_955Q] = {
+		.name = "Hauppauge WinTV-HVR-955Q (111401)",
+		.tuner_type = TUNER_ABSENT,
+		.tuner_addr = 0x60,
+		.tuner_gpio = RDE250_XCV_TUNER,
+		.tuner_sif_gpio = 0x05,
+		.tuner_scl_gpio = 0x1a,
+		.tuner_sda_gpio = 0x1b,
+		.decoder = CX231XX_AVDECODER,
+		.output_mode = OUT_MODE_VIP11,
+		.demod_xfer_mode = 0,
+		.ctl_pin_status_mask = 0xFFFFFFC4,
+		.agc_analog_digital_select_gpio = 0x0c,
+		.gpio_pin_status_mask = 0x4001000,
+		.tuner_i2c_master = 1,
+		.demod_i2c_master = 2,
+		.has_dvb = 1,
+		.demod_addr = 0x0e,
+		.norm = V4L2_STD_NTSC,
+
+		.input = {{
+			.type = CX231XX_VMUX_TELEVISION,
+			.vmux = CX231XX_VIN_3_1,
+			.amux = CX231XX_AMUX_VIDEO,
+			.gpio = NULL,
+		}, {
+			.type = CX231XX_VMUX_COMPOSITE1,
+			.vmux = CX231XX_VIN_2_1,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = NULL,
+		}, {
+			.type = CX231XX_VMUX_SVIDEO,
+			.vmux = CX231XX_VIN_1_1 |
+				(CX231XX_VIN_1_2 << 8) |
+				CX25840_SVIDEO_ON,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = NULL,
+		} },
+	},
+	[CX231XX_BOARD_AVERMEDIA_H837A] = {
+		.name = "AVerMedia H837-A USB Hybrid ATSC/QAM",
+		.tuner_type = TUNER_ABSENT,
+		.tuner_addr = 0x60,
+		.tuner_sif_gpio = 0x05,
+		.demod_xfer_mode = 0,
+		.ctl_pin_status_mask = 0xFFFFFFC4,
+		.agc_analog_digital_select_gpio = 0x1c,
+		.gpio_pin_status_mask = 0x4001000,
+		.tuner_i2c_master = 2,
+		.demod_i2c_master = 1,
+		.has_dvb = 1,
+		.norm = V4L2_STD_NTSC,
+
+		.input = {{
+			.type = CX231XX_VMUX_TELEVISION,
+			.vmux = CX231XX_VIN_3_1,
+			.amux = CX231XX_AMUX_VIDEO,
+			.gpio = 0,
+		}, {
+			.type = CX231XX_VMUX_COMPOSITE1,
+			.vmux = CX231XX_VIN_2_1,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = 0,
+		}, {
+			.type = CX231XX_VMUX_SVIDEO,
+			.vmux = CX231XX_VIN_1_1 |
+			        (CX231XX_VIN_1_2 << 8) |
+				CX25840_SVIDEO_ON,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = 0,
+		}
+		},
+	},
+	[CX231XX_BOARD_AVERMEDIA_H837B] = {
+		.name = "AVerMedia H837-B USB Hybrid ATSC/QAM",
+		.tuner_type = TUNER_ABSENT,
+		.tuner_addr = 0x60,
+		.tuner_sif_gpio = 0x05,
+		.demod_xfer_mode = 0,
+		.ctl_pin_status_mask = 0xFFFFFFC4,
+		.agc_analog_digital_select_gpio = 0x1c,
+		.gpio_pin_status_mask = 0x4001000,
+		.tuner_i2c_master = 2,
+		.demod_i2c_master = 1,
+		.has_dvb = 1,
+		.norm = V4L2_STD_NTSC,
+
+		.input = {{
+			.type = CX231XX_VMUX_TELEVISION,
+			.vmux = CX231XX_VIN_3_1,
+			.amux = CX231XX_AMUX_VIDEO,
+			.gpio = 0,
+		}, {
+			.type = CX231XX_VMUX_COMPOSITE1,
+			.vmux = CX231XX_VIN_2_1,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = 0,
+		}, {
+			.type = CX231XX_VMUX_SVIDEO,
+			.vmux = CX231XX_VIN_1_1 |
+			        (CX231XX_VIN_1_2 << 8) |
+				CX25840_SVIDEO_ON,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = 0,
+		}
+		},
+	},
+	[CX231XX_BOARD_AVERMEDIA_H837M] = {
+		.name = "AVerMedia H837-M USB Hybrid ATSC/QAM",
+		.tuner_type = TUNER_ABSENT,
+		.tuner_addr = 0x60,
+		.tuner_sif_gpio = 0x05,
+		.demod_xfer_mode = 0,
+		.ctl_pin_status_mask = 0xFFFFFFC4,
+		.agc_analog_digital_select_gpio = 0x1c,
+		.gpio_pin_status_mask = 0x4001000,
+		.tuner_i2c_master = 2,
+		.demod_i2c_master = 1,
+		.has_dvb = 1,
+		.norm = V4L2_STD_NTSC,
+
+		.input = {{
+			.type = CX231XX_VMUX_TELEVISION,
+			.vmux = CX231XX_VIN_3_1,
+			.amux = CX231XX_AMUX_VIDEO,
+			.gpio = 0,
+		}, {
+			.type = CX231XX_VMUX_COMPOSITE1,
+			.vmux = CX231XX_VIN_2_1,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = 0,
+		}, {
+			.type = CX231XX_VMUX_SVIDEO,
+			.vmux = CX231XX_VIN_1_1 |
+			        (CX231XX_VIN_1_2 << 8) |
+				CX25840_SVIDEO_ON,
+			.amux = CX231XX_AMUX_LINE_IN,
+			.gpio = 0,
+		}
+		},
+	},
 };
 const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
 
@@ -694,6 +835,8 @@
 	 .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC},
 	{USB_DEVICE(0x2040, 0xb120),
 	 .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
+	{USB_DEVICE(0x2040, 0xb123),
+	 .driver_info = CX231XX_BOARD_HAUPPAUGE_955Q},
 	{USB_DEVICE(0x2040, 0xb140),
 	 .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
 	{USB_DEVICE(0x2040, 0xc200),
@@ -710,6 +853,12 @@
 	 .driver_info = CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2},
 	{USB_DEVICE(0x1f4d, 0x0102),
 	 .driver_info = CX231XX_BOARD_OTG102},
+	{USB_DEVICE(0x07ca, 0x0837),
+	 .driver_info = CX231XX_BOARD_AVERMEDIA_H837A},
+	{USB_DEVICE(0x07ca, 0x0837),
+	 .driver_info = CX231XX_BOARD_AVERMEDIA_H837B},
+	{USB_DEVICE(0x07ca, 0x1837),
+	 .driver_info = CX231XX_BOARD_AVERMEDIA_H837M},
 	{},
 };
 
@@ -1132,7 +1281,9 @@
 	dev->gpio_dir = 0;
 	dev->gpio_val = 0;
 	dev->xc_fw_load_done = 0;
-	dev->has_alsa_audio = 1;
+	if (!is_model_avermedia_h837_series(dev->model)) {
+		dev->has_alsa_audio = 1;
+	}
 	dev->power_mode = -1;
 	atomic_set(&dev->devlist_count, 0);
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 4ba3ce0..1a509ee 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -340,6 +340,76 @@
 	return ret;
 }
 
+int cx231xx_send_h837_vendor_cmd(struct cx231xx *dev,
+				struct VENDOR_REQUEST_IN *ven_req)
+{
+	int ret;
+	int pipe = 0;
+	int unsend_size = 0;
+	u8 *pdata;
+
+	if (dev->state & DEV_DISCONNECTED)
+		return -ENODEV;
+
+	if ((ven_req->wLength > URB_MAX_CTRL_SIZE))
+		return -EINVAL;
+
+	if (ven_req->direction)
+		pipe = usb_rcvctrlpipe(dev->udev, 0);
+	else
+		pipe = usb_sndctrlpipe(dev->udev, 0);
+
+	/*
+	 * If the cx23102 read more than 4 bytes with i2c bus,
+	 * need chop to 4 byte per request
+	 */
+	if ((ven_req->wLength > 4) && (ven_req->bRequest <= VRT_GET_I2C2)) {
+		unsend_size = 0;
+		pdata = ven_req->pBuff;
+
+
+		unsend_size = ven_req->wLength;
+
+		/* the first package */
+		ven_req->wValue = ven_req->wValue & 0xFFFB;
+		ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x2;
+		ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+			ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			ven_req->wValue, ven_req->wIndex, pdata,
+			0x0004, HZ);
+		unsend_size = unsend_size - 4;
+
+		/* the middle package */
+		ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x42;
+		while (unsend_size - 4 > 0) {
+			pdata = pdata + 4;
+			ret = __usb_control_msg(dev, pipe,
+				ven_req->bRequest,
+				ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				ven_req->wValue, ven_req->wIndex, pdata,
+				0x0004, HZ);
+			unsend_size = unsend_size - 4;
+		}
+
+		/* the last package */
+		ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x40;
+		pdata = pdata + 4;
+		ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+			ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			ven_req->wValue, ven_req->wIndex, pdata,
+			unsend_size, HZ);
+	} else {
+		if (ven_req->bRequest <= VRT_GET_I2C2)
+			ven_req->wValue &= ~0x42;
+		ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+				ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				ven_req->wValue, ven_req->wIndex,
+				ven_req->pBuff, ven_req->wLength, HZ);
+	}
+
+	return ret;
+}
+
 int cx231xx_send_vendor_cmd(struct cx231xx *dev,
 				struct VENDOR_REQUEST_IN *ven_req)
 {
@@ -348,6 +418,9 @@
 	int unsend_size = 0;
 	u8 *pdata;
 
+	if (is_model_avermedia_h837_series(dev->model))
+		return cx231xx_send_h837_vendor_cmd(dev, ven_req);
+
 	if (dev->state & DEV_DISCONNECTED)
 		return -ENODEV;
 
@@ -719,7 +792,19 @@
 		case CX231XX_BOARD_CNXT_RDE_250:
 		case CX231XX_BOARD_CNXT_SHELBY:
 		case CX231XX_BOARD_CNXT_RDU_250:
-		errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+			errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+			break;
+		case CX231XX_BOARD_AVERMEDIA_H837M:
+		case CX231XX_BOARD_AVERMEDIA_H837B:
+		case CX231XX_BOARD_AVERMEDIA_H837A: {
+			cx231xx_set_power_mode(dev, POLARIS_AVMODE_DEFAULT);
+			msleep(20);
+			cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+			cx231xx_set_power_mode(dev, POLARIS_AVMODE_DIGITAL);
+			msleep(50);
+			cx231xx_set_gpio_value(dev, AVERMEDIA_H837_LED_PIN, 0);
+			return 0;
+			}
 			break;
 		case CX231XX_BOARD_CNXT_RDE_253S:
 		case CX231XX_BOARD_CNXT_RDU_253S:
@@ -735,6 +820,13 @@
 	} else/* Set Analog Power mode */ {
 	/* set AGC mode to Analog */
 		switch (dev->model) {
+		case CX231XX_BOARD_AVERMEDIA_H837A:
+		case CX231XX_BOARD_AVERMEDIA_H837B:
+		case CX231XX_BOARD_AVERMEDIA_H837M:
+			cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+			cx231xx_set_power_mode(dev, POLARIS_AVMODE_DEFAULT);
+			cx231xx_set_gpio_value(dev, AVERMEDIA_H837_LED_PIN, 1);
+			return 0;
 		case CX231XX_BOARD_CNXT_CARRAERA:
 		case CX231XX_BOARD_CNXT_RDE_250:
 		case CX231XX_BOARD_CNXT_SHELBY:
@@ -808,7 +900,7 @@
 	case -ESHUTDOWN:
 		return;
 	default:		/* error */
-		cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+		cx231xx_isocdbg("urb completion error %d.\n", urb->status);
 		break;
 	}
 
@@ -851,8 +943,11 @@
 	case -ENOENT:
 	case -ESHUTDOWN:
 		return;
+	case -EPIPE:		/* stall */
+		cx231xx_isocdbg("urb completion error - device is stalled.\n");
+		return;
 	default:		/* error */
-		cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+		cx231xx_isocdbg("urb completion error %d.\n", urb->status);
 		break;
 	}
 
@@ -924,8 +1019,10 @@
  */
 void cx231xx_uninit_bulk(struct cx231xx *dev)
 {
+	struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
 	struct urb *urb;
 	int i;
+	bool broken_pipe = false;
 
 	cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n");
 
@@ -941,22 +1038,31 @@
 			if (dev->video_mode.bulk_ctl.transfer_buffer[i]) {
 				usb_free_coherent(dev->udev,
 						urb->transfer_buffer_length,
-						dev->video_mode.isoc_ctl.
+						dev->video_mode.bulk_ctl.
 						transfer_buffer[i],
 						urb->transfer_dma);
 			}
+			if (urb->status == -EPIPE) {
+				broken_pipe = true;
+			}
 			usb_free_urb(urb);
 			dev->video_mode.bulk_ctl.urb[i] = NULL;
 		}
 		dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL;
 	}
+	if (broken_pipe) {
+		cx231xx_err("Reset endpoint to recover broken pipe.");
+		usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr);
+	}
 
 	kfree(dev->video_mode.bulk_ctl.urb);
 	kfree(dev->video_mode.bulk_ctl.transfer_buffer);
+	kfree(dma_q->p_left_data);
 
 	dev->video_mode.bulk_ctl.urb = NULL;
 	dev->video_mode.bulk_ctl.transfer_buffer = NULL;
 	dev->video_mode.bulk_ctl.num_bufs = 0;
+	dma_q->p_left_data = NULL;
 
 	if (dev->mode_tv == 0)
 		cx231xx_capture_start(dev, 0, Raw_Video);
@@ -1203,6 +1309,15 @@
 				  sb_size, cx231xx_bulk_irq_callback, dma_q);
 	}
 
+	/* clear halt */
+	rc = usb_clear_halt(dev->udev, dev->video_mode.bulk_ctl.urb[0]->pipe);
+	if (rc < 0) {
+		cx231xx_err("failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
+					rc);
+		cx231xx_uninit_bulk(dev);
+		return rc;
+	}
+
 	init_waitqueue_head(&dma_q->wq);
 
 	/* submit urbs and enables IRQ */
@@ -1266,6 +1381,47 @@
 /*****************************************************************
 *             Device Init/UnInit functions                       *
 ******************************************************************/
+static void cx231xx_check_model(struct cx231xx *dev)
+{
+	if (is_model_avermedia_h837_series(dev->model)) {
+		struct i2c_msg msg[2];
+		unsigned char offset = 255, value = 0;
+
+		dev->i2c_bus[0].i2c_period =
+		dev->i2c_bus[1].i2c_period =
+		dev->i2c_bus[2].i2c_period = I2C_SPEED_400K;
+		// first a write message to write EE offset
+		msg[0].addr = 0x50;
+		msg[0].flags = 0;
+		msg[0].len = 1;
+		msg[0].buf = &offset;
+
+		// then a read message to read EE content, maximum read length is 4 bytes
+		msg[1].addr = 0x50;
+		msg[1].flags = I2C_M_RD;
+		msg[1].len = 1;
+		msg[1].buf = &value;
+
+		if (i2c_transfer(&dev->i2c_bus[1].i2c_adap, msg, 2) < 0) {
+			cx231xx_warn("Failed to check EEPROM");
+			return;
+		}
+
+		if( 0x01 == value) {
+			if (CX231XX_BOARD_AVERMEDIA_H837B==dev->model)
+				return;
+			dev->model = CX231XX_BOARD_AVERMEDIA_H837B;
+		} else {
+			if (CX231XX_BOARD_AVERMEDIA_H837A==dev->model ||
+				CX231XX_BOARD_AVERMEDIA_H837M==dev->model)
+				return;
+			dev->model = CX231XX_BOARD_AVERMEDIA_H837A;
+		}
+		dev->board = cx231xx_boards[dev->model];
+		cx231xx_info("Correct device model as %s\n", dev->board.name);
+	}
+}
+
 int cx231xx_dev_init(struct cx231xx *dev)
 {
 	int errCode = 0;
@@ -1298,6 +1454,9 @@
 	cx231xx_i2c_register(&dev->i2c_bus[1]);
 	cx231xx_i2c_register(&dev->i2c_bus[2]);
 
+	/* model check */
+	cx231xx_check_model(dev);
+
 	/* init hardware */
 	/* Note : with out calling set power mode function,
 	afe can not be set up correctly */
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 14e2610..66340b5 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -33,6 +33,8 @@
 #include "s5h1411.h"
 #include "lgdt3305.h"
 #include "mb86a20s.h"
+#include "si2157.h"
+#include "lgdt3306a.h"
 
 MODULE_DESCRIPTION("driver for cx231xx based DVB cards");
 MODULE_AUTHOR("Srinivasa Deevi <srinivasa.deevi@conexant.com>");
@@ -67,6 +69,13 @@
 	struct dmx_frontend fe_hw;
 	struct dmx_frontend fe_mem;
 	struct dvb_net net;
+	int    power_on;
+};
+
+#include "tda18272.h"
+static struct tda18272_config h837_tda18272_config = {
+	  0x60                  //  dev->board.tuner_addr
+	, TDA18272_SINGLE
 };
 
 static struct s5h1432_config dvico_s5h1432_config = {
@@ -128,6 +137,17 @@
 	.vsb_if_khz         = 3250,
 };
 
+static struct lgdt3305_config h837_lgdt3305_config = {
+	.i2c_addr           = 0xB2 >> 1,
+	.mpeg_mode          = LGDT3305_MPEG_SERIAL,
+	.tpclk_edge         = LGDT3305_TPCLK_FALLING_EDGE,
+	.tpvalid_polarity   = LGDT3305_TP_VALID_HIGH,
+	.deny_i2c_rptr      = 1,
+	.spectral_inversion = 1,
+	.qam_if_khz         = 3600,
+	.vsb_if_khz         = 3250,
+};
+
 static struct tda18271_std_map hauppauge_tda18271_std_map = {
 	.atsc_6   = { .if_freq = 3250, .agc_mode = 3, .std = 4,
 		      .if_lvl = 1, .rfagc_top = 0x58, },
@@ -151,6 +171,23 @@
 	.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
 };
 
+static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
+	.i2c_addr           = 0x59,
+	.qam_if_khz         = 4000,
+	.vsb_if_khz         = 3250,
+	.deny_i2c_rptr      = 1,
+	.spectral_inversion = 1,
+	.mpeg_mode          = LGDT3306A_MPEG_SERIAL,
+	.tpclk_edge         = LGDT3306A_TPCLK_RISING_EDGE,
+	.tpvalid_polarity   = LGDT3306A_TP_VALID_HIGH,
+	.xtalMHz            = 25,
+};
+
+static struct si2157_config si2157_config = {
+	.i2c_addr           = 0x60,
+	.inversion			= true,
+};
+
 static inline void print_err_status(struct cx231xx *dev, int packet, int status)
 {
 	char *errmsg = "Unknown";
@@ -252,9 +289,14 @@
 	if (dev->USE_ISO) {
 		cx231xx_info("DVB transfer mode is ISO.\n");
 		mutex_lock(&dev->i2c_lock);
-		cx231xx_enable_i2c_port_3(dev, false);
-		cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
-		cx231xx_enable_i2c_port_3(dev, true);
+		if (is_model_avermedia_h837_series(dev->model)) {
+			cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
+			++dvb->power_on;
+		} else {
+			cx231xx_enable_i2c_port_3(dev, false);
+			cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
+			cx231xx_enable_i2c_port_3(dev, true);
+		}
 		mutex_unlock(&dev->i2c_lock);
 		rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
 		if (rc < 0)
@@ -270,6 +312,9 @@
 		rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
 		if (rc < 0)
 			return rc;
+		if (is_model_avermedia_h837_series(dev->model)) {
+			++dvb->power_on;
+		}
 		dev->mode_tv = 1;
 		return cx231xx_init_bulk(dev, CX231XX_DVB_MAX_PACKETS,
 					CX231XX_DVB_NUM_BUFS,
@@ -288,6 +333,11 @@
 	else
 		cx231xx_uninit_bulk(dev);
 
+	if (-1 != dvb->power_on) {
+		--dvb->power_on;
+		if (dvb->power_on)
+			return 0;
+	}
 	cx231xx_set_mode(dev, CX231XX_SUSPEND);
 
 	return 0;
@@ -336,11 +386,20 @@
 static int cx231xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
 {
 	struct cx231xx *dev = fe->dvb->priv;
+	struct cx231xx_dvb *dvb = dev->dvb;
 
-	if (acquire)
+	if (acquire) {
+		if (dvb != NULL && -1 != dvb->power_on)
+			++dvb->power_on;
 		return cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
-	else
+	} else {
+		if (dvb != NULL && -1 != dvb->power_on) {
+			--dvb->power_on;
+			if (dvb->power_on)
+				return 0;
+		}
 		return cx231xx_set_mode(dev, CX231XX_SUSPEND);
+	}
 }
 
 /* ------------------------------------------------------------------ */
@@ -576,12 +635,19 @@
 		return -ENOMEM;
 	}
 	dev->dvb = dvb;
+	dvb->power_on = -1;
 	dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq;
 	dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner;
 
 	mutex_lock(&dev->lock);
-	cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
-	cx231xx_demod_reset(dev);
+	if (is_model_avermedia_h837_series(dev->model)) {
+		cx231xx_set_mode(dev, CX231XX_SUSPEND);
+		cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+		dvb->power_on = 0;
+	} else {
+		cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+		cx231xx_demod_reset(dev);
+	}
 	/* init frontend */
 	switch (dev->model) {
 	case CX231XX_BOARD_CNXT_CARRAERA:
@@ -703,6 +769,30 @@
 			   &hcw_tda18271_config);
 		break;
 
+	case CX231XX_BOARD_HAUPPAUGE_955Q:
+
+		printk(KERN_INFO "%s: looking for tuner / demod on i2c bus: %d\n",
+		       __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
+
+		dev->dvb->frontend = dvb_attach(lgdt3306a_attach,
+						&hauppauge_955q_lgdt3306a_config,
+						&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap);
+
+		if (dev->dvb->frontend == NULL) {
+			printk(DRIVER_NAME
+			       ": Failed to attach LG3306A front end\n");
+			result = -EINVAL;
+			goto out_free;
+		}
+
+		/* define general-purpose callback pointer */
+		dvb->frontend->callback = cx231xx_tuner_callback;
+
+		dvb_attach(si2157_attach, dev->dvb->frontend,
+			   &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+			   &si2157_config);
+		break;
+
 	case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
 	case CX231XX_BOARD_KWORLD_UB430_USB_HYBRID:
 
@@ -728,6 +818,29 @@
 			   &pv_tda18271_config);
 		break;
 
+	case CX231XX_BOARD_AVERMEDIA_H837A:
+	case CX231XX_BOARD_AVERMEDIA_H837B:
+	case CX231XX_BOARD_AVERMEDIA_H837M:
+		dev->dvb->frontend = dvb_attach(lgdt3305_attach,
+						&h837_lgdt3305_config,
+						&dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+
+		if (dev->dvb->frontend == NULL) {
+			printk(DRIVER_NAME
+			       ": Failed to attach LG3305 front end\n");
+			result = -EINVAL;
+			goto out_free;
+		}
+
+		/* define general-purpose callback pointer */
+		dvb->frontend->callback = cx231xx_tuner_callback;
+		{
+			dvb_attach(tda18272_attach, dev->dvb->frontend,
+				&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+				&h837_tda18272_config);
+		}
+		break;
+
 	default:
 		printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
 		       " isn't supported yet\n", dev->name);
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index cd22147..792ee54 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -802,6 +802,8 @@
 
 void video_mux(struct cx231xx *dev, int index)
 {
+	if (is_model_avermedia_h837_series(dev->model))
+		return;
 	dev->video_input = index;
 	dev->ctl_ainput = INPUT(index)->amux;
 
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 5ad9fd6..c97cfb3 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -72,6 +72,10 @@
 #define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
 #define CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2 16
 #define CX231XX_BOARD_OTG102 17
+#define CX231XX_BOARD_HAUPPAUGE_955Q 18
+#define CX231XX_BOARD_AVERMEDIA_H837A 19
+#define CX231XX_BOARD_AVERMEDIA_H837B 20
+#define CX231XX_BOARD_AVERMEDIA_H837M 21
 
 /* Limits minimum and default number of buffers */
 #define CX231XX_MIN_BUF                 4
@@ -121,6 +125,7 @@
 #define SLEEP_S5H1432    30
 #define CX23417_OSC_EN   8
 #define CX23417_RESET    9
+#define AVERMEDIA_H837_LED_PIN 27
 
 struct cx23417_fmt {
 	char  *name;
@@ -969,8 +974,8 @@
 int cx231xx_ir_init(struct cx231xx *dev);
 void cx231xx_ir_exit(struct cx231xx *dev);
 #else
-#define cx231xx_ir_init(dev)	(0)
-#define cx231xx_ir_exit(dev)	(0)
+#define cx231xx_ir_init(dev)	while (0)
+#define cx231xx_ir_exit(dev)	while (0)
 #endif
 
 
@@ -1005,4 +1010,16 @@
 	else
 		return (dev->norm & V4L2_STD_625_50) ? 576 : 480;
 }
+
+static inline bool is_model_avermedia_h837_series(int model)
+{
+	switch (model) {
+		case CX231XX_BOARD_AVERMEDIA_H837A:
+		case CX231XX_BOARD_AVERMEDIA_H837B:
+		case CX231XX_BOARD_AVERMEDIA_H837M:
+			return true;
+	}
+	return false;
+}
+
 #endif
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 4851cc2..c4ff973 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -726,7 +726,7 @@
 
 	*eedata = data;
 	*eedata_len = len;
-	dev_config = (void *)eedata;
+	dev_config = (void *)*eedata;
 
 	switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
 	case 0:
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 8247c19..77d7b7f 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -311,6 +311,11 @@
 
 	dev->workqueue = 0;
 
+	/* init video transfer queues first of all */
+	/* to prevent oops in hdpvr_delete() on error paths */
+	INIT_LIST_HEAD(&dev->free_buff_list);
+	INIT_LIST_HEAD(&dev->rec_buff_list);
+
 	/* register v4l2_device early so it can be used for printks */
 	if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
 		dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -333,10 +338,6 @@
 	if (!dev->workqueue)
 		goto error;
 
-	/* init video transfer queues */
-	INIT_LIST_HEAD(&dev->free_buff_list);
-	INIT_LIST_HEAD(&dev->rec_buff_list);
-
 	dev->options = hdpvr_default_options;
 
 	if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -413,7 +414,7 @@
 				    video_nr[atomic_inc_return(&dev_nr)]);
 	if (retval < 0) {
 		v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
-		goto error;
+		goto reg_fail;
 	}
 
 	/* let the user know what node this device is now attached to */
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index f129551..1b18616 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1006,104 +1006,14 @@
 	if (!file->f_op->unlocked_ioctl)
 		return ret;
 
-	switch (cmd) {
-	case VIDIOC_QUERYCAP:
-	case VIDIOC_RESERVED:
-	case VIDIOC_ENUM_FMT:
-	case VIDIOC_G_FMT32:
-	case VIDIOC_S_FMT32:
-	case VIDIOC_REQBUFS:
-	case VIDIOC_QUERYBUF32:
-	case VIDIOC_G_FBUF32:
-	case VIDIOC_S_FBUF32:
-	case VIDIOC_OVERLAY32:
-	case VIDIOC_QBUF32:
-	case VIDIOC_EXPBUF:
-	case VIDIOC_DQBUF32:
-	case VIDIOC_STREAMON32:
-	case VIDIOC_STREAMOFF32:
-	case VIDIOC_G_PARM:
-	case VIDIOC_S_PARM:
-	case VIDIOC_G_STD:
-	case VIDIOC_S_STD:
-	case VIDIOC_ENUMSTD32:
-	case VIDIOC_ENUMINPUT32:
-	case VIDIOC_G_CTRL:
-	case VIDIOC_S_CTRL:
-	case VIDIOC_G_TUNER:
-	case VIDIOC_S_TUNER:
-	case VIDIOC_G_AUDIO:
-	case VIDIOC_S_AUDIO:
-	case VIDIOC_QUERYCTRL:
-	case VIDIOC_QUERYMENU:
-	case VIDIOC_G_INPUT32:
-	case VIDIOC_S_INPUT32:
-	case VIDIOC_G_OUTPUT32:
-	case VIDIOC_S_OUTPUT32:
-	case VIDIOC_ENUMOUTPUT:
-	case VIDIOC_G_AUDOUT:
-	case VIDIOC_S_AUDOUT:
-	case VIDIOC_G_MODULATOR:
-	case VIDIOC_S_MODULATOR:
-	case VIDIOC_S_FREQUENCY:
-	case VIDIOC_G_FREQUENCY:
-	case VIDIOC_CROPCAP:
-	case VIDIOC_G_CROP:
-	case VIDIOC_S_CROP:
-	case VIDIOC_G_SELECTION:
-	case VIDIOC_S_SELECTION:
-	case VIDIOC_G_JPEGCOMP:
-	case VIDIOC_S_JPEGCOMP:
-	case VIDIOC_QUERYSTD:
-	case VIDIOC_TRY_FMT32:
-	case VIDIOC_ENUMAUDIO:
-	case VIDIOC_ENUMAUDOUT:
-	case VIDIOC_G_PRIORITY:
-	case VIDIOC_S_PRIORITY:
-	case VIDIOC_G_SLICED_VBI_CAP:
-	case VIDIOC_LOG_STATUS:
-	case VIDIOC_G_EXT_CTRLS32:
-	case VIDIOC_S_EXT_CTRLS32:
-	case VIDIOC_TRY_EXT_CTRLS32:
-	case VIDIOC_ENUM_FRAMESIZES:
-	case VIDIOC_ENUM_FRAMEINTERVALS:
-	case VIDIOC_G_ENC_INDEX:
-	case VIDIOC_ENCODER_CMD:
-	case VIDIOC_TRY_ENCODER_CMD:
-	case VIDIOC_DECODER_CMD:
-	case VIDIOC_TRY_DECODER_CMD:
-	case VIDIOC_DBG_S_REGISTER:
-	case VIDIOC_DBG_G_REGISTER:
-	case VIDIOC_DBG_G_CHIP_IDENT:
-	case VIDIOC_S_HW_FREQ_SEEK:
-	case VIDIOC_S_DV_TIMINGS:
-	case VIDIOC_G_DV_TIMINGS:
-	case VIDIOC_DQEVENT:
-	case VIDIOC_DQEVENT32:
-	case VIDIOC_SUBSCRIBE_EVENT:
-	case VIDIOC_UNSUBSCRIBE_EVENT:
-	case VIDIOC_CREATE_BUFS32:
-	case VIDIOC_PREPARE_BUF32:
-	case VIDIOC_ENUM_DV_TIMINGS:
-	case VIDIOC_QUERY_DV_TIMINGS:
-	case VIDIOC_DV_TIMINGS_CAP:
-	case VIDIOC_ENUM_FREQ_BANDS:
-	case VIDIOC_SUBDEV_G_EDID32:
-	case VIDIOC_SUBDEV_S_EDID32:
+	if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
 		ret = do_video_ioctl(file, cmd, arg);
-		break;
+	else if (vdev->fops->compat_ioctl32)
+		ret = vdev->fops->compat_ioctl32(file, cmd, arg);
 
-	default:
-		if (vdev->fops->compat_ioctl32)
-			ret = vdev->fops->compat_ioctl32(file, cmd, arg);
-
-		if (ret == -ENOIOCTLCMD)
-			printk(KERN_WARNING "compat_ioctl32: "
-				"unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
-				_IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
-				cmd);
-		break;
-	}
+	if (ret == -ENOIOCTLCMD)
+		pr_warn("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+			_IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 996c248..60d2550 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -368,6 +368,17 @@
 	return video_usercopy(file, cmd, arg, subdev_do_ioctl);
 }
 
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
+}
+#endif
+
 static unsigned int subdev_poll(struct file *file, poll_table *wait)
 {
 	struct video_device *vdev = video_devdata(file);
@@ -389,6 +400,9 @@
 	.owner = THIS_MODULE,
 	.open = subdev_open,
 	.unlocked_ioctl = subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = subdev_compat_ioctl32,
+#endif
 	.release = subdev_close,
 	.poll = subdev_poll,
 };
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index d8d5137..44b60c9 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -18,7 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
 
 #define MSIC_VENDOR(id)		((id >> 6) & 3)
 #define MSIC_VERSION(id)	(id & 0x3f)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 00e4fe2..ffea6f7 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -759,7 +759,7 @@
 	if (i2c->dev.of_node) {
 		of_id = of_match_device(wm8994_of_match, &i2c->dev);
 		if (of_id)
-			wm8994->type = (int)of_id->data;
+			wm8994->type = (uintptr_t) of_id->data;
 	} else {
 		wm8994->type = id->driver_data;
 	}
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index 2fbce9c..1d2cb50 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1107,6 +1107,7 @@
 	case WM8958_MBC_LPF_2:
 	case WM8958_MBC_RMS_LIMIT_1:
 	case WM8958_MBC_RMS_LIMIT_2:
+	case 0xCB:
 		return true;
 	default:
 		return wm8994_readable_register(dev, reg);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 181fc59..833b3d2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -147,6 +147,18 @@
 	  an Intel Atom (non-netbook) mobile device containing a MIPI
 	  P1149.7 standard implementation.
 
+config INTEL_PTI_STM
+	tristate "MIPI Sytem Trace Macro (STM) for Intel"
+	default n
+	depends on INTEL_MID_PTI
+	help
+	  The STM (Sytem Trace Monitor) driver control trace data
+	  route through an Intel Tangier PTI port or through USB xDCI
+	  interface with Debug-Class DvC.Trace support.
+
+	  It provide the ability to PTI driver to setup the output and
+	  to user to change the output with sysfs and exported header.
+
 config SGI_IOC4
 	tristate "SGI IOC4 Base IO support"
 	depends on PCI
@@ -381,6 +393,15 @@
 	  This driver provides support for the Honeywell HMC6352 compass,
 	  providing configuration and heading data via sysfs.
 
+config MONZA_X
+	tristate "Impinj Monza-x-2K RFID Chip"
+	depends on I2C
+	help
+	  This driver provides support for the Impinj Monza-x-2K RFID Chip,
+	  Monza x-Dura is UHF Gen2 RFID IC product with 2176bits user NVM,
+	  it can be accessed by i2c interface and UHF Gen2 RFID protocol,
+	  this driver provide access interface via sysfs for userland.
+
 config EP93XX_PWM
 	tristate "EP93xx PWM support"
 	depends on ARCH_EP93XX
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 89435c9..92ca219 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_AD525X_DPOT_I2C)	+= ad525x_dpot-i2c.o
 obj-$(CONFIG_AD525X_DPOT_SPI)	+= ad525x_dpot-spi.o
 obj-$(CONFIG_INTEL_MID_PTI)	+= pti.o
+obj-$(CONFIG_INTEL_PTI_STM)	+= stm.o
 obj-$(CONFIG_ATMEL_PWM)		+= atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)		+= atmel-ssc.o
 obj-$(CONFIG_ATMEL_TCLIB)	+= atmel_tclib.o
@@ -39,6 +40,7 @@
 obj-$(CONFIG_UID_STAT)		+= uid_stat.o
 obj-$(CONFIG_C2PORT)		+= c2port/
 obj-$(CONFIG_HMC6352)		+= hmc6352.o
+obj-$(CONFIG_MONZA_X)		+= monza_x.o
 obj-y				+= eeprom/
 obj-y				+= cb710/
 obj-$(CONFIG_SPEAR13XX_PCIE_GADGET)	+= spear13xx_pcie_gadget.o
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 494d050..a6dc56e 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -90,8 +90,10 @@
 	unsigned long	flags;
 	int		status = 0;
 
-	/* insist on PWM init, with this signal pinned out */
-	if (!pwm || !(pwm->mask & 1 << index))
+	if (!pwm)
+		return -EPROBE_DEFER;
+
+	if (!(pwm->mask & 1 << index))
 		return -ENODEV;
 
 	if (index < 0 || index >= PWM_NCHAN || !ch)
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 621c7a3..b83e3ca 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -759,7 +759,7 @@
 
 	/* Ignore subsystem_device = 0x1979 (set by BIOS)  */
 	if (pdev->subsystem_device == 0x1979)
-		goto out;
+		return 0;
 
 	if (max_ccb > MAX_CCB)
 		max_ccb = MAX_CCB;
@@ -899,7 +899,7 @@
 	class_destroy(ilo_class);
 }
 
-MODULE_VERSION("1.4");
+MODULE_VERSION("1.4.1");
 MODULE_ALIAS(ILO_NAME);
 MODULE_DESCRIPTION(ILO_NAME);
 MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index b3e5098..3db9291 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@
 	dev->iamthif_ioctl = false;
 	dev->iamthif_state = MEI_IAMTHIF_IDLE;
 	dev->iamthif_timer = 0;
+	dev->iamthif_stall_timer = 0;
 }
 
 /**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 9ecd49a..99cc0b0 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -295,10 +295,13 @@
 
 	if (cl->reading_state != MEI_READ_COMPLETE &&
 	    !waitqueue_active(&cl->rx_wait)) {
+
 		mutex_unlock(&dev->device_lock);
 
 		if (wait_event_interruptible(cl->rx_wait,
-				(MEI_READ_COMPLETE == cl->reading_state))) {
+				cl->reading_state == MEI_READ_COMPLETE  ||
+				mei_cl_is_transitioning(cl))) {
+
 			if (signal_pending(current))
 				return -EINTR;
 			return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index cfdb144..467d9dd 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -76,6 +76,12 @@
 		(cl1->host_client_id == cl2->host_client_id) &&
 		(cl1->me_client_id == cl2->me_client_id);
 }
+static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
+{
+	return (MEI_FILE_INITIALIZING == cl->state ||
+		MEI_FILE_DISCONNECTED == cl->state ||
+		MEI_FILE_DISCONNECTING == cl->state);
+}
 
 
 int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6916045..23b5b7b 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@
 	struct mei_me_client *clients;
 	int b;
 
+	dev->me_clients_num = 0;
+	dev->me_client_presentation_num = 0;
+	dev->me_client_index = 0;
+
 	/* count how many ME clients we have */
 	for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
 		dev->me_clients_num++;
 
-	if (dev->me_clients_num <= 0)
+	if (dev->me_clients_num == 0)
 		return;
 
 	kfree(dev->me_clients);
@@ -221,7 +225,7 @@
 	struct hbm_props_request *prop_req;
 	const size_t len = sizeof(struct hbm_props_request);
 	unsigned long next_client_index;
-	u8 client_num;
+	unsigned long client_num;
 
 
 	client_num = dev->me_client_presentation_num;
@@ -650,8 +654,6 @@
 		if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
 		    dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
 				dev->init_clients_timer = 0;
-				dev->me_client_presentation_num = 0;
-				dev->me_client_index = 0;
 				mei_hbm_me_cl_allocate(dev);
 				dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
 
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 822170f..1bf3f8b 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -176,16 +176,14 @@
 	struct mei_me_hw *hw = to_me_hw(dev);
 	u32 hcsr = mei_hcsr_read(hw);
 
-	dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
-
-	hcsr |= (H_RST | H_IG);
+	hcsr |= H_RST | H_IG | H_IS;
 
 	if (intr_enable)
 		hcsr |= H_IE;
 	else
-		hcsr |= ~H_IE;
+		hcsr &= ~H_IE;
 
-	mei_hcsr_set(hw, hcsr);
+	mei_me_reg_write(hw, H_CSR, hcsr);
 
 	if (dev->dev_state == MEI_DEV_POWER_DOWN)
 		mei_me_hw_reset_release(dev);
@@ -238,14 +236,18 @@
 	if (mei_me_hw_is_ready(dev))
 		return 0;
 
+	dev->recvd_hw_ready = false;
 	mutex_unlock(&dev->device_lock);
 	err = wait_event_interruptible_timeout(dev->wait_hw_ready,
-			dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT);
+			dev->recvd_hw_ready,
+			mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
 	mutex_lock(&dev->device_lock);
 	if (!err && !dev->recvd_hw_ready) {
+		if (!err)
+			err = -ETIMEDOUT;
 		dev_err(&dev->pdev->dev,
-			"wait hw ready failed. status = 0x%x\n", err);
-		return -ETIMEDOUT;
+			"wait hw ready failed. status = %d\n", err);
+		return err;
 	}
 
 	dev->recvd_hw_ready = false;
@@ -482,7 +484,9 @@
 	/* check if ME wants a reset */
 	if (!mei_hw_is_ready(dev) &&
 	    dev->dev_state != MEI_DEV_RESETTING &&
-	    dev->dev_state != MEI_DEV_INITIALIZING) {
+	    dev->dev_state != MEI_DEV_INITIALIZING &&
+	    dev->dev_state != MEI_DEV_POWER_DOWN &&
+	    dev->dev_state != MEI_DEV_POWER_UP) {
 		dev_dbg(&dev->pdev->dev, "FW not ready.\n");
 		mei_reset(dev, 1);
 		mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index f580d30..878bc1c 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -143,7 +143,8 @@
 
 	dev->hbm_state = MEI_HBM_IDLE;
 
-	if (dev->dev_state != MEI_DEV_INITIALIZING) {
+	if (dev->dev_state != MEI_DEV_INITIALIZING &&
+	    dev->dev_state != MEI_DEV_POWER_UP) {
 		if (dev->dev_state != MEI_DEV_DISABLED &&
 		    dev->dev_state != MEI_DEV_POWER_DOWN)
 			dev->dev_state = MEI_DEV_RESETTING;
@@ -163,6 +164,9 @@
 		memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
 	}
 
+	/* we're already in reset, cancel the init timer */
+	dev->init_clients_timer = 0;
+
 	dev->me_clients_num = 0;
 	dev->rd_msg_hdr = 0;
 	dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 053139f..701698d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -262,19 +262,16 @@
 		mutex_unlock(&dev->device_lock);
 
 		if (wait_event_interruptible(cl->rx_wait,
-			(MEI_READ_COMPLETE == cl->reading_state ||
-			 MEI_FILE_INITIALIZING == cl->state ||
-			 MEI_FILE_DISCONNECTED == cl->state ||
-			 MEI_FILE_DISCONNECTING == cl->state))) {
+				MEI_READ_COMPLETE == cl->reading_state ||
+				mei_cl_is_transitioning(cl))) {
+
 			if (signal_pending(current))
 				return -EINTR;
 			return -ERESTARTSYS;
 		}
 
 		mutex_lock(&dev->device_lock);
-		if (MEI_FILE_INITIALIZING == cl->state ||
-		    MEI_FILE_DISCONNECTED == cl->state ||
-		    MEI_FILE_DISCONNECTING == cl->state) {
+		if (mei_cl_is_transitioning(cl)) {
 			rets = -EBUSY;
 			goto out;
 		}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 4de5140..73c7700 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -402,9 +402,9 @@
 	struct mei_me_client *me_clients; /* Note: memory has to be allocated */
 	DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
 	DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
-	u8 me_clients_num;
-	u8 me_client_presentation_num;
-	u8 me_client_index;
+	unsigned long me_clients_num;
+	unsigned long me_client_presentation_num;
+	unsigned long me_client_index;
 
 	struct mei_cl wd_cl;
 	enum mei_wd_states wd_state;
diff --git a/drivers/misc/monza_x.c b/drivers/misc/monza_x.c
new file mode 100644
index 0000000..4e3ab34
--- /dev/null
+++ b/drivers/misc/monza_x.c
@@ -0,0 +1,523 @@
+/*
+ * monza_x.c: driver for Impinj RFID chip
+ *
+ * (c) copyright 2013 intel corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/log2.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/acpi.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#define MONZAX_2K_BYTE_LEN 336
+#define MONZAX_8K_BYTE_LEN 1088
+#define MONZAX_KBUF_MAX 1088
+
+#define MONZAX_2K_CLASSID_OFF 328
+#define MONZAX_8K_CLASSID_OFF 40
+#define MONZAX_GEN2_CLASSID 0xE2
+
+enum slave_addr_num {
+	MONZAX_8K_ADDR_NUM = 1,
+	MONZAX_2K_ADDR_NUM
+};
+/*
+ * word/2word write will take time before next write,
+ * set 100ms threshold for safe.
+ */
+#define  WRITE_TIMEOUT 100
+
+struct monza_data {
+	struct mutex lock;
+	struct bin_attribute bin;
+
+	u8 *writebuf;
+	unsigned write_max;
+	unsigned num_addr;
+
+	struct miscdevice miscdev;
+	/* monzax_2k has 2 i2c slave addr */
+	struct i2c_client *client[2];
+};
+
+static struct i2c_client *monza_translate_offset(struct monza_data *monza,
+		unsigned *offset)
+{
+	unsigned i = 0;
+
+	if (monza->num_addr == MONZAX_2K_ADDR_NUM) {
+		i = *offset >> 8;
+		*offset &= 0xff;
+	}
+
+	return monza->client[i];
+}
+
+static ssize_t monza_eeprom_read(struct monza_data *monza, char *buf,
+		unsigned offset, size_t count)
+{
+	struct i2c_client *client;
+	struct i2c_msg msg[2];
+	u8 msgbuf[2];
+	int status, i = 0;
+
+	memset(msg, 0, sizeof(msg));
+	client = monza_translate_offset(monza, &offset);
+
+	/* for monzax 8k, eeprom offset is 16bit/2byt mode */
+	if (monza->num_addr == MONZAX_8K_ADDR_NUM)
+		msgbuf[i++] = offset >> 8;
+	msgbuf[i++] = offset;
+
+	msg[0].addr = client->addr;
+	msg[0].buf = msgbuf;
+	msg[0].len = i;
+
+	msg[1].addr = client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].buf = buf;
+	msg[1].len = count;
+
+	status = i2c_transfer(client->adapter, msg, 2);
+	if (status == 2)
+		status = count;
+	dev_dbg(&client->dev, "read %zd@%d --> %d\n",
+			count, offset, status);
+	return status;
+}
+
+static ssize_t monza_read(struct monza_data *monza,
+		char *buf, loff_t off, size_t count)
+{
+	ssize_t retval = 0;
+	unsigned long timeout, read_time;
+	/*
+	 * Read data from chip, protecting against concurrent updates
+	 * from this host, but not from other I2C masters.
+	 */
+	mutex_lock(&monza->lock);
+
+	while (count) {
+		ssize_t	status;
+
+		/*
+		 * Reads fail if the previous write didn't complete yet. We may
+		 * loop a few times until this one succeeds.
+		 */
+		timeout = jiffies + msecs_to_jiffies(WRITE_TIMEOUT);
+		do {
+			read_time = jiffies;
+			status = monza_eeprom_read(monza, buf, off, count);
+			if (status == count)
+				break;
+			usleep_range(2000, 2050);
+		} while (time_before(read_time, timeout));
+
+		/* exception handle */
+		if (status < 0) {
+			if (retval == 0)
+				retval = status;
+			break;
+		}
+
+		buf += status;
+		off += status;
+		count -= status;
+		retval += status;
+	}
+
+	mutex_unlock(&monza->lock);
+
+	return retval;
+}
+
+static ssize_t monza_bin_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct monza_data *monza;
+
+	monza = dev_get_drvdata(container_of(kobj, struct device, kobj));
+	return monza_read(monza, buf, off, count);
+}
+
+static ssize_t monza_eeprom_write(struct monza_data *monza, const char *buf,
+		unsigned offset, size_t count)
+{
+	struct i2c_client *client;
+	struct i2c_msg msg;
+	int status, i = 0;
+
+	/* Get corresponding I2C address and adjust offset */
+	client = monza_translate_offset(monza, &offset);
+
+	msg.addr = client->addr;
+	msg.flags = 0;
+	msg.buf = monza->writebuf;
+	/* for monzax 8k, eeprom offset is 16bit/2byt mode */
+	if (monza->num_addr == MONZAX_8K_ADDR_NUM)
+		msg.buf[i++] = offset >> 8;
+	msg.buf[i++] = offset;
+	memcpy(&msg.buf[i], buf, count);
+	msg.len = i + count;
+
+	status = i2c_transfer(client->adapter, &msg, 1);
+	dev_dbg(&client->dev, "write %zd@%d --> %d\n",
+			count, offset, status);
+	if (status == 1)
+		status = count;
+	return status;
+}
+
+static ssize_t monza_write(struct monza_data *monza, const char *buf,
+				loff_t off, size_t count)
+{
+	ssize_t retval = 0;
+	unsigned long timeout, write_time;
+
+	if ((off % 2 != 0) || (count % 2 != 0)) {
+		dev_err(&monza->client[0]->dev, "word boundary error\n");
+		return 0;
+	}
+	/*
+	 * Write data to chip, protecting against concurrent updates
+	 * from this host, but not from other I2C masters.
+	 */
+	mutex_lock(&monza->lock);
+
+	while (count) {
+		ssize_t	status;
+		size_t cnt;
+		/* write_max is at most a 2word/4byte */
+		if (count > monza->write_max)
+			cnt = monza->write_max;
+		else
+			cnt = count;
+		/*
+		 * Writes fail if the previous one didn't complete yet. We may
+		 * loop a few times until this one succeeds.
+		 */
+		timeout = jiffies + msecs_to_jiffies(WRITE_TIMEOUT);
+		do {
+			write_time = jiffies;
+			status = monza_eeprom_write(monza, buf, off, cnt);
+			if (status == cnt)
+				break;
+			usleep_range(2000, 2050);
+		} while (time_before(write_time, timeout));
+
+		/* exception handle */
+		if (status < 0) {
+			if (retval == 0)
+				retval = status;
+			break;
+		}
+
+		buf += status;
+		off += status;
+		count -= status;
+		retval += status;
+	}
+
+	mutex_unlock(&monza->lock);
+
+	return retval;
+}
+
+static ssize_t monza_bin_write(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct monza_data *monza;
+
+	monza = dev_get_drvdata(container_of(kobj, struct device, kobj));
+	return monza_write(monza, buf, off, count);
+}
+
+static int monza_check_ids(struct monza_data *monza)
+{
+	int status, off = MONZAX_2K_CLASSID_OFF;
+	unsigned char buf[2] = { 0 };
+
+	if (monza->num_addr == MONZAX_2K_ADDR_NUM)
+		off = MONZAX_2K_CLASSID_OFF;
+	else if (monza->num_addr == MONZAX_8K_ADDR_NUM)
+		off = MONZAX_8K_CLASSID_OFF;
+
+	status = monza_read(monza, buf, off, 1);
+	if (status > 0 && buf[0] == MONZAX_GEN2_CLASSID)
+		return 0;
+	else
+		return -ENODEV;
+}
+
+static int monza_misc_open(struct inode *inode, struct file *filp)
+{
+	struct monza_data *monza = container_of(filp->private_data,
+					      struct monza_data, miscdev);
+	filp->private_data = monza;
+	return 0;
+}
+
+static int monza_misc_release(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static ssize_t monza_misc_read(struct file *filp, char __user *ubuf,
+				size_t count, loff_t *pos)
+{
+	struct monza_data *monza = filp->private_data;
+	u8 *kbuf;
+	ssize_t cnt;
+
+	kbuf = kmalloc(MONZAX_KBUF_MAX, GFP_KERNEL);
+	if (kbuf == NULL)  {
+		dev_err(&monza->client[0]->dev, "%s(%d): buf allocation failed\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	count = min_t(size_t, MONZAX_KBUF_MAX - *pos, count);
+	cnt = monza_read(monza, kbuf, *pos, count);
+	if (cnt <= 0)
+		goto out;
+
+	if (copy_to_user(ubuf, kbuf, cnt)) {
+		cnt = -EFAULT;
+		goto out;
+	}
+	*pos += cnt;
+out:
+	kfree(kbuf);
+	return cnt;
+}
+
+static ssize_t monza_misc_write(struct file *filp, const char __user *ubuf,
+			 size_t count, loff_t *pos)
+{
+	struct monza_data *monza = filp->private_data;
+	u8 *kbuf;
+	ssize_t cnt;
+
+	kbuf = kmalloc(MONZAX_KBUF_MAX, GFP_KERNEL);
+	if (kbuf == NULL)  {
+		dev_err(&monza->client[0]->dev, "%s(%d): buf allocation failed\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	count = min_t(size_t, MONZAX_KBUF_MAX - *pos, count);
+	if (copy_from_user(kbuf, ubuf, count)) {
+		cnt = -EFAULT;
+		goto out;
+	}
+
+	cnt = monza_write(monza, kbuf, *pos, count);
+	if (cnt <= 0)
+		goto out;
+
+	*pos += count;
+out:
+	kfree(kbuf);
+	return cnt;
+}
+
+static const struct file_operations monza_misc_fops = {
+	.owner   = THIS_MODULE,
+	.read    = monza_misc_read,
+	.write	 = monza_misc_write,
+	.llseek	 = generic_file_llseek,
+	.open    = monza_misc_open,
+	.release = monza_misc_release,
+};
+
+static const struct i2c_device_id i2c_monza_ids[] = {
+	{ "MNZX2000", MONZAX_2K_ADDR_NUM },
+	{ "MNZX8000", MONZAX_8K_ADDR_NUM },
+	{ "IMPJ0003", MONZAX_8K_ADDR_NUM },
+	{ /* END OF LIST */ }
+};
+MODULE_DEVICE_TABLE(i2c, i2c_monza_ids);
+
+static const struct acpi_device_id acpi_monza_ids[] = {
+	{ "MNZX2000", MONZAX_2K_ADDR_NUM },
+	{ "MNZX8000", MONZAX_8K_ADDR_NUM },
+	{ "IMPJ0003", MONZAX_8K_ADDR_NUM },
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_monza_ids);
+
+static int monza_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct monza_data *monza;
+	const struct acpi_device_id *aid;
+	int err;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "client not i2c capable\n");
+		err = -ENODEV;
+		goto err_out;
+	}
+
+	monza = kzalloc(sizeof(struct monza_data), GFP_KERNEL);
+	if (!monza) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mutex_init(&monza->lock);
+
+	if (id)
+		monza->num_addr = id->driver_data;
+	else {
+		/* acpi id detect */
+		for (aid = acpi_monza_ids; aid->id[0]; aid++)
+			if (!strncmp(aid->id, client->name, strlen(aid->id))) {
+				monza->num_addr = aid->driver_data;
+				dev_info(&client->dev, "acpi id: %s\n", client->name);
+			}
+	}
+	if (!monza->num_addr) {
+		dev_err(&client->dev, "Invalid id driver data error.\n");
+		err = -ENODEV;
+		goto err_struct;
+	}
+
+	monza->client[0] = client;
+	/* use dummy device, since monzax-2k has 2 slave address */
+	if (monza->num_addr == MONZAX_2K_ADDR_NUM) {
+		monza->client[1] = i2c_new_dummy(client->adapter,
+					client->addr + 1);
+		if (!monza->client[1]) {
+			dev_err(&client->dev, "address 0x%02x unavailable\n",
+					client->addr + 1);
+			err = -EADDRINUSE;
+			goto err_struct;
+		}
+	}
+
+	/* identify the real chip and address */
+	err = monza_check_ids(monza);
+	if (err) {
+		dev_err(&client->dev, " detect chip failure.\n");
+		goto err_clients;
+	}
+
+	/* buffer (data + address at the beginning) */
+	monza->write_max = 4;
+	monza->writebuf = kmalloc(monza->write_max + 2, GFP_KERNEL);
+	if (!monza->writebuf) {
+		err = -ENOMEM;
+		goto err_clients;
+	}
+
+	/*
+	 * Export the EEPROM bytes through sysfs, since that's convenient.
+	 * By default, only root should see the data (maybe passwords etc)
+	 */
+	sysfs_bin_attr_init(&monza->bin);
+	monza->bin.attr.name = "monzax_data";
+	monza->bin.attr.mode = S_IRUSR | S_IWUSR;
+	monza->bin.read = monza_bin_read;
+	monza->bin.write = monza_bin_write;
+	if (monza->num_addr == MONZAX_2K_ADDR_NUM)
+		monza->bin.size = MONZAX_2K_BYTE_LEN;
+	else if (monza->num_addr == MONZAX_8K_ADDR_NUM)
+		monza->bin.size = MONZAX_8K_BYTE_LEN;
+	else {
+		err = -ENODEV;
+		goto err_bin;
+	}
+
+	err = sysfs_create_bin_file(&client->dev.kobj, &monza->bin);
+	if (err)
+		goto err_bin;
+
+	i2c_set_clientdata(client, monza);
+
+	monza->miscdev.minor	= MISC_DYNAMIC_MINOR;
+	monza->miscdev.name	= "monzax";
+	monza->miscdev.fops	= &monza_misc_fops;
+
+	if (misc_register(&monza->miscdev)) {
+		dev_err(&client->dev, "misc_register failed\n");
+		goto err_miscdev;
+	}
+
+	dev_info(&client->dev, "%zu byte %s EEPROM, %u bytes/write\n",
+		monza->bin.size, client->name, monza->write_max);
+
+	return 0;
+
+err_miscdev:
+	sysfs_remove_bin_file(&client->dev.kobj, &monza->bin);
+err_bin:
+	kfree(monza->writebuf);
+err_clients:
+	if (monza->client[1])
+		i2c_unregister_device(monza->client[1]);
+err_struct:
+	kfree(monza);
+err_out:
+	dev_err(&client->dev, "probe error %d\n", err);
+	return err;
+}
+
+static int monza_remove(struct i2c_client *client)
+{
+	struct monza_data *monza;
+
+	monza = i2c_get_clientdata(client);
+	misc_deregister(&monza->miscdev);
+	sysfs_remove_bin_file(&client->dev.kobj, &monza->bin);
+	kfree(monza->writebuf);
+
+	if (monza->client[1])
+		i2c_unregister_device(monza->client[1]);
+
+	kfree(monza);
+	return 0;
+}
+
+static struct i2c_driver monza_driver = {
+	.driver = {
+		.name = "monzax",
+		.owner = THIS_MODULE,
+		.acpi_match_table = ACPI_PTR(acpi_monza_ids),
+	},
+	.probe = monza_probe,
+	.remove = monza_remove,
+	.id_table = i2c_monza_ids,
+};
+
+static int __init monza_init(void)
+{
+	return i2c_add_driver(&monza_driver);
+}
+module_init(monza_init);
+
+static void __exit monza_exit(void)
+{
+	i2c_del_driver(&monza_driver);
+}
+module_exit(monza_exit);
+
+MODULE_AUTHOR("Jiantao Zhou<jiantao.zhou@intel.com>");
+MODULE_DESCRIPTION("MONZA-X-2K RFID chip driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index f84ff0c..48c4227 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -21,6 +21,8 @@
  * compact JTAG, standard.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
@@ -30,12 +32,18 @@
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
 #include <linux/pci.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
 #include <linux/miscdevice.h>
 #include <linux/pti.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
+#include <asm/intel_scu_ipc.h>
+
+#ifdef CONFIG_INTEL_PTI_STM
+#include "stm.h"
+#endif
+
 #define DRIVERNAME		"pti"
 #define PCINAME			"pciPTI"
 #define TTYNAME			"ttyPTI"
@@ -55,6 +63,55 @@
 #define APERTURE_14		0x3800000 /* offset to first OS write addr */
 #define APERTURE_LEN		0x400000  /* address length */
 
+#define SMIP_PTI_OFFSET	0x30C  /* offset to PTI config in MIP header */
+#define SMIP_PTI_EN	(1<<7) /* PTI enable bit in PTI configuration */
+
+#define PTI_PNW_PCI_ID			0x082B
+#define PTI_CLV_PCI_ID			0x0900
+#define PTI_TNG_PCI_ID			0x119F
+
+#define INTEL_PTI_PCI_DEVICE(dev, info) {	\
+	.vendor = PCI_VENDOR_ID_INTEL,		\
+	.device = dev,				\
+	.subvendor = PCI_ANY_ID,		\
+	.subdevice = PCI_ANY_ID,		\
+	.driver_data = (unsigned long) info }
+
+struct pti_device_info {
+	u8 pci_bar;
+	u8 scu_secure_mode:1;
+	u8 has_d8_d16_support:1;
+};
+
+static const struct pti_device_info intel_pti_pnw_info = {
+	.pci_bar = 1,
+	.scu_secure_mode = 0,
+	.has_d8_d16_support = 0,
+};
+
+static const struct pti_device_info intel_pti_clv_info = {
+	.pci_bar = 1,
+	.scu_secure_mode = 1,
+	.has_d8_d16_support = 0,
+};
+
+static const struct pti_device_info intel_pti_tng_info = {
+	.pci_bar = 2,
+	.scu_secure_mode = 0,
+	.has_d8_d16_support = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	INTEL_PTI_PCI_DEVICE(PTI_PNW_PCI_ID, &intel_pti_pnw_info),
+	INTEL_PTI_PCI_DEVICE(PTI_CLV_PCI_ID, &intel_pti_clv_info),
+	INTEL_PTI_PCI_DEVICE(PTI_TNG_PCI_ID, &intel_pti_tng_info),
+	{0}
+};
+
+#define GET_PCI_BAR(pti_dev) (pti_dev->pti_dev_info->pci_bar)
+#define HAS_SCU_SECURE_MODE(pti_dev) (pti_dev->pti_dev_info->scu_secure_mode)
+#define HAS_D8_D16_SUPPORT(pti_dev) (pti_dev->pti_dev_info->has_d8_d16_support)
+
 struct pti_tty {
 	struct pti_masterchannel *mc;
 };
@@ -67,19 +124,22 @@
 	u8 ia_app[MAX_APP_IDS];
 	u8 ia_os[MAX_OS_IDS];
 	u8 ia_modem[MAX_MODEM_IDS];
+	struct pti_device_info *pti_dev_info;
+#ifdef CONFIG_INTEL_PTI_STM
+	struct stm_dev stm;
+#endif
 };
 
+static unsigned int stm_enabled;
+module_param(stm_enabled, uint, 0600);
+MODULE_PARM_DESC(stm_enabled, "set to 1 to enable stm");
+
 /*
  * This protects access to ia_app, ia_os, and ia_modem,
  * which keeps track of channels allocated in
  * an aperture write id.
  */
-static DEFINE_MUTEX(alloclock);
-
-static const struct pci_device_id pci_ids[] = {
-		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
-		{0}
-};
+static DEFINE_SPINLOCK(cid_lock);
 
 static struct tty_driver *pti_tty_driver;
 static struct pti_dev *drv_data;
@@ -95,6 +155,8 @@
  *  @buf: Data being written to the HW that will ultimately be seen
  *        in a debugging tool (Fido, Lauterbach).
  *  @len: Size of buffer.
+ *  @eom: End Of Message indication. If true, DTS shall be used for
+ *        the last bytes of the message.
  *
  *  Since each aperture is specified by a unique
  *  master/channel ID, no two processes will be writing
@@ -106,12 +168,15 @@
  */
 static void pti_write_to_aperture(struct pti_masterchannel *mc,
 				  u8 *buf,
-				  int len)
+				  int len,
+				  bool eom)
 {
 	int dwordcnt;
 	int final;
 	int i;
 	u32 ptiword;
+	u16 ptishort;
+	u8  ptibyte;
 	u32 __iomem *aperture;
 	u8 *p = buf;
 
@@ -135,13 +200,40 @@
 		iowrite32(ptiword, aperture);
 	}
 
-	aperture += PTI_LASTDWORD_DTS;	/* adding DTS signals that is EOM */
+	if (!HAS_D8_D16_SUPPORT(drv_data)) {
+		aperture += eom ? PTI_LASTDWORD_DTS : 0; /* DTS signals EOM */
+		ptiword = 0;
+		for (i = 0; i < final; i++)
+			ptiword |= *p++ << (24-(8*i));
+		iowrite32(ptiword, aperture);
+	} else {
+		switch (final) {
 
-	ptiword = 0;
-	for (i = 0; i < final; i++)
-		ptiword |= *p++ << (24-(8*i));
+		case 3:
+			ptishort = be16_to_cpu(*(u16 *)p);
+			p += 2;
+			iowrite16(ptishort, aperture);
+			/* fall-through */
+		case 1:
+			ptibyte = *(u8 *)p;
+			aperture += eom ? PTI_LASTDWORD_DTS : 0;
+			iowrite8(ptibyte, aperture);
+			break;
+		case 2:
+			ptishort = be16_to_cpu(*(u16 *)p);
+			aperture += eom ? PTI_LASTDWORD_DTS : 0;
+			iowrite16(ptishort, aperture);
+			break;
+		case 4:
+			ptiword = be32_to_cpu(*(u32 *)p);
+			aperture += eom ? PTI_LASTDWORD_DTS : 0;
+			iowrite32(ptiword, aperture);
+			break;
+		default:
+			break;
+		}
+	}
 
-	iowrite32(ptiword, aperture);
 	return;
 }
 
@@ -177,10 +269,12 @@
 	u8 control_frame[CONTROL_FRAME_LEN];
 
 	if (!thread_name) {
-		if (!in_interrupt())
-			get_task_comm(comm, current);
+		if (in_irq())
+			strncpy(comm, "hardirq", sizeof(comm));
+		else if (in_softirq())
+			strncpy(comm, "softirq", sizeof(comm));
 		else
-			strncpy(comm, "Interrupt", TASK_COMM_LEN);
+			strncpy(comm, current->comm, sizeof(comm));
 
 		/* Absolutely ensure our buffer is zero terminated. */
 		comm[TASK_COMM_LEN-1] = 0;
@@ -194,7 +288,8 @@
 
 	snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master,
 		mc->channel, thread_name_p);
-	pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame));
+	pti_write_to_aperture(&mccontrol, control_frame,
+			      strlen(control_frame), true);
 }
 
 /**
@@ -216,7 +311,7 @@
 						int len)
 {
 	pti_control_frame_built_and_sent(mc, NULL);
-	pti_write_to_aperture(mc, (u8 *)buf, len);
+	pti_write_to_aperture(mc, (u8 *)buf, len, true);
 }
 
 /**
@@ -224,7 +319,7 @@
  *
  * @id_array:    an array of bits representing what channel
  *               id's are allocated for writing.
- * @max_ids:     The max amount of available write IDs to use.
+ * @array_size:  array size in bytes
  * @base_id:     The starting SW channel ID, based on the Intel
  *               PTI arch.
  * @thread_name: The thread name associated with the master / channel or
@@ -239,37 +334,40 @@
  * every master there are 128 channel id's.
  */
 static struct pti_masterchannel *get_id(u8 *id_array,
-					int max_ids,
+					int array_size,
 					int base_id,
 					const char *thread_name)
 {
 	struct pti_masterchannel *mc;
-	int i, j, mask;
+	unsigned long flags;
+	unsigned long *addr = (unsigned long *)id_array;
+	unsigned long num_bits = array_size*8, n;
 
-	mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL);
+	/* Allocate memory with GFP_ATOMIC flag because this API
+	 * can be called in interrupt context.
+	 */
+	mc = kmalloc(sizeof(struct pti_masterchannel), GFP_ATOMIC);
 	if (mc == NULL)
 		return NULL;
 
-	/* look for a byte with a free bit */
-	for (i = 0; i < max_ids; i++)
-		if (id_array[i] != 0xff)
-			break;
-	if (i == max_ids) {
+	/* Find the first available channel ID (first zero bit) in the
+	 * bitdfield and toggle the corresponding bit to reserve it.
+	 * This must be done under spinlock with interrupts disabled
+	 * to ensure there is no concurrent access to the bitfield.
+	 */
+	spin_lock_irqsave(&cid_lock, flags);
+	n = find_first_zero_bit(addr, num_bits);
+	if (n >= num_bits) {
 		kfree(mc);
+		spin_unlock_irqrestore(&cid_lock, flags);
 		return NULL;
 	}
-	/* find the bit in the 128 possible channel opportunities */
-	mask = 0x80;
-	for (j = 0; j < 8; j++) {
-		if ((id_array[i] & mask) == 0)
-			break;
-		mask >>= 1;
-	}
+	change_bit(n, addr);
+	spin_unlock_irqrestore(&cid_lock, flags);
 
-	/* grab it */
-	id_array[i] |= mask;
 	mc->master  = base_id;
-	mc->channel = ((i & 0xf)<<3) + j;
+	mc->channel = n;
+
 	/* write new master Id / channel Id allocation to channel control */
 	pti_control_frame_built_and_sent(mc, thread_name);
 	return mc;
@@ -306,7 +404,8 @@
 {
 	struct pti_masterchannel *mc;
 
-	mutex_lock(&alloclock);
+	if (drv_data == NULL)
+		return NULL;
 
 	switch (type) {
 
@@ -328,7 +427,6 @@
 		mc = NULL;
 	}
 
-	mutex_unlock(&alloclock);
 	return mc;
 }
 EXPORT_SYMBOL_GPL(pti_request_masterchannel);
@@ -343,29 +441,41 @@
  */
 void pti_release_masterchannel(struct pti_masterchannel *mc)
 {
-	u8 master, channel, i;
-
-	mutex_lock(&alloclock);
+	u8 master, channel;
 
 	if (mc) {
 		master = mc->master;
 		channel = mc->channel;
 
-		if (master == APP_BASE_ID) {
-			i = channel >> 3;
-			drv_data->ia_app[i] &=  ~(0x80>>(channel & 0x7));
-		} else if (master == OS_BASE_ID) {
-			i = channel >> 3;
-			drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7));
-		} else {
-			i = channel >> 3;
-			drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7));
+		switch (master) {
+
+		/* Note that clear_bit is atomic, so there is no need
+		 * to use cid_lock here to protect the bitfield
+		 */
+
+		case APP_BASE_ID:
+			clear_bit(mc->channel,
+				  (unsigned long *)drv_data->ia_app);
+			break;
+
+		case OS_BASE_ID:
+			clear_bit(mc->channel,
+				  (unsigned long *)drv_data->ia_os);
+			break;
+
+		case MODEM_BASE_ID:
+			clear_bit(mc->channel,
+				  (unsigned long *)drv_data->ia_modem);
+			break;
+
+		default:
+			pr_err("%s(%d) : Invalid master ID!\n",
+			       __func__, __LINE__);
+			break;
 		}
 
 		kfree(mc);
 	}
-
-	mutex_unlock(&alloclock);
 }
 EXPORT_SYMBOL_GPL(pti_release_masterchannel);
 
@@ -379,8 +489,10 @@
  *         Null value will return with no write occurring.
  * @count: Size of buf. Value of 0 or a negative number will
  *         return with no write occuring.
+ * @eom:   End Of Message indication. If true, DTS shall be used
+ *         for last bytes of the message.
  */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count)
+void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count, bool eom)
 {
 	/*
 	 * since this function is exported, this is treated like an
@@ -388,7 +500,7 @@
 	 * be checked for validity.
 	 */
 	if ((mc != NULL) && (buf != NULL) && (count > 0))
-		pti_write_to_aperture(mc, buf, count);
+		pti_write_to_aperture(mc, buf, count, eom);
 	return;
 }
 EXPORT_SYMBOL_GPL(pti_writedata);
@@ -518,7 +630,7 @@
 {
 	struct pti_tty *pti_tty_data = tty->driver_data;
 	if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) {
-		pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len);
+		pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len, true);
 		return len;
 	}
 	/*
@@ -636,7 +748,7 @@
 			return n ? n : -EFAULT;
 		}
 
-		pti_write_to_aperture(mc, kbuf, size);
+		pti_write_to_aperture(mc, kbuf, size, true);
 		n  += size;
 		tmp += size;
 
@@ -780,6 +892,36 @@
 	.shutdown = pti_port_shutdown,
 };
 
+
+#ifdef CONFIG_INTEL_SCU_IPC
+/**
+ * pti_scu_check()- Used to check whether the PTI is enabled on SCU
+ *
+ * Returns:
+ *	0 if PTI is enabled
+ *	otherwise, error value
+ */
+static int pti_scu_check(void)
+{
+	int retval;
+	u8 smip_pti;
+
+	retval = intel_scu_ipc_read_mip(&smip_pti, 1, SMIP_PTI_OFFSET, 1);
+	if (retval) {
+		pr_err("%s(%d): Mip read failed (retval = %d)\n",
+		       __func__, __LINE__, retval);
+		return retval;
+	}
+	if (!(smip_pti & SMIP_PTI_EN)) {
+		pr_info("%s(%d): PTI disabled in MIP header\n",
+			__func__, __LINE__);
+		return -EPERM;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_INTEL_SCU_IPC */
+
 /*
  * Note the _probe() call sets everything up and ties the char and tty
  * to successfully detecting the PTI device on the pci bus.
@@ -801,7 +943,6 @@
 {
 	unsigned int a;
 	int retval = -EINVAL;
-	int pci_bar = 1;
 
 	dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
 			__func__, __LINE__, pdev->vendor, pdev->device);
@@ -831,9 +972,21 @@
 			__func__, __LINE__);
 		goto err_disable_pci;
 	}
-	drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
 
-	retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+	drv_data->pti_dev_info = (struct pti_device_info *)ent->driver_data;
+
+#ifdef CONFIG_INTEL_SCU_IPC
+	if (HAS_SCU_SECURE_MODE(drv_data)) {
+		retval = pti_scu_check();
+		if (retval != 0)
+			goto err_free_dd;
+	}
+#endif /* CONFIG_INTEL_SCU_IPC */
+
+	drv_data->pti_addr = pci_resource_start(pdev, GET_PCI_BAR(drv_data));
+
+	retval = pci_request_region(pdev, GET_PCI_BAR(drv_data),
+				    dev_name(&pdev->dev));
 	if (retval != 0) {
 		dev_err(&pdev->dev,
 			"%s(%d): pci_request_region() returned error %d\n",
@@ -849,6 +1002,14 @@
 		goto err_rel_reg;
 	}
 
+#ifdef CONFIG_INTEL_PTI_STM
+	/* Initialize STM resources */
+	if ((stm_enabled) && (stm_dev_init(pdev, &drv_data->stm) != 0)) {
+		retval = -ENOMEM;
+		goto err_rel_reg;
+	}
+#endif
+
 	pci_set_drvdata(pdev, drv_data);
 
 	for (a = 0; a < PTITTY_MINOR_NUM; a++) {
@@ -863,9 +1024,10 @@
 
 	return 0;
 err_rel_reg:
-	pci_release_region(pdev, pci_bar);
+	pci_release_region(pdev, GET_PCI_BAR(drv_data));
 err_free_dd:
 	kfree(drv_data);
+	drv_data = NULL;
 err_disable_pci:
 	pci_disable_device(pdev);
 err_unreg_misc:
@@ -891,10 +1053,14 @@
 		tty_port_destroy(&drv_data->port[a]);
 	}
 
+#ifdef CONFIG_INTEL_PTI_STM
+	if (stm_enabled)
+		stm_dev_clean(pdev, &drv_data->stm);
+#endif
 	iounmap(drv_data->pti_ioaddr);
+	pci_release_region(pdev, GET_PCI_BAR(drv_data));
 	pci_set_drvdata(pdev, NULL);
 	kfree(drv_data);
-	pci_release_region(pdev, 1);
 	pci_disable_device(pdev);
 
 	misc_deregister(&pti_char_driver);
diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c
new file mode 100644
index 0000000..69edc1e
--- /dev/null
+++ b/drivers/misc/stm.c
@@ -0,0 +1,487 @@
+/*
+ *  stm.c - MIPI STM Debug Unit driver
+ *
+ *  Copyright (C) Intel 2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The STM (Sytem Trace Macro) Unit driver configure trace output
+ * to the Intel Tangier PTI port and DWC3 USB xHCI controller
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sdm.h>
+
+#include "stm.h"
+#include <asm/intel-mid.h>
+#include <asm/intel_soc_debug.h>
+#include "../usb/dwc3/core.h"
+
+/* STM Registers */
+#define STM_CTRL		0x0000
+#define STM_USB3DBGGTHR		0x0008
+#define STM_MASMSK0		0x0010
+#define STM_MASMSK1		0x0018
+#define STM_USBTO		0x0020
+#define STM_CHMSK		0x0080
+#define STM_AGTBAR0		0x00C0
+#define STM_AGTBAR1		0x0140
+#define STM_AGTBAR2		0x01C0
+#define STM_AGTBAR3		0x0240
+#define STM_AGTBAR4		0x02C0
+#define STM_AGTBAR5		0x0340
+#define STM_AGTBAR6		0x03C0
+#define STM_AGTBAR7		0x0440
+#define STM_AGTBAR8		0x04C0
+#define STM_AGTBAR9		0x0540
+#define STM_AGTBAR10		0x05C0
+#define STM_AGTBAR11		0x0640
+
+/*
+ * STM registers
+ */
+#define STM_REG_BASE		0x0        /* registers base offset */
+#define STM_REG_LEN		0x20       /* address length */
+/*
+ * TRB buffers
+ */
+#define STM_TRB_BASE		0x400      /* TRB base offset */
+#define STM_TRB_LEN		0x100	   /* address length */
+#define STM_TRB_NUM		16         /* number of TRBs */
+
+/*
+ * This protects R/W to stm registers
+ */
+static DEFINE_MUTEX(stmlock);
+
+static struct stm_dev *_dev_stm;
+
+static inline u32 stm_readl(void __iomem *base, u32 offset)
+{
+	return readl(base + offset);
+}
+
+static inline void stm_writel(void __iomem *base, u32 offset, u32 value)
+{
+	writel(value, base + offset);
+}
+
+/**
+ * stm_kernel_set_out()-
+ * Kernel API function used to
+ * set STM output configuration to PTI or USB.
+ *
+ * @bus_type:
+ *	0 = PTI 4-bits legacy end user
+ *	1 = PTI 4-bits NiDnT
+ *	2 = PTI 16-bits
+ *	3 = PTI 12-bits
+ *	4 = PTI 8-bits
+ *	15 = USB Debug-Class (DvC.Trace)
+ *
+ */
+int stm_kernel_set_out(int bus_type)
+{
+
+	struct stm_dev *drv_stm = _dev_stm;
+
+	/*
+	 * since this function is exported, this is treated like an
+	 * API function, thus, all parameters should
+	 * be checked for validity.
+	 */
+	if (drv_stm == NULL)
+		return 0;
+
+	mutex_lock(&stmlock);
+
+	drv_stm->stm_ctrl_hwreg.reg_word =
+		stm_readl(drv_stm->stm_ioaddr, (u32)STM_CTRL);
+
+	switch (bus_type) {
+	case STM_PTI_4BIT_LEGACY:
+	case STM_PTI_4BIT_NIDNT:
+	case STM_PTI_16BIT:
+	case STM_PTI_12BIT:
+	case STM_PTI_8BIT:
+		drv_stm->stm_ctrl_hwreg.pti_out_en = true;
+		drv_stm->stm_ctrl_hwreg.usb_debug_en = false;
+		drv_stm->stm_ctrl_hwreg.pti_out_mode_sel = bus_type;
+		stm_writel(drv_stm->stm_ioaddr, (u32)STM_CTRL,
+			   drv_stm->stm_ctrl_hwreg.reg_word);
+		break;
+	case STM_USB:
+		drv_stm->stm_ctrl_hwreg.pti_out_en = false;
+		drv_stm->stm_ctrl_hwreg.usb_debug_en = true;
+		stm_writel(drv_stm->stm_ioaddr, (u32)STM_CTRL,
+			   drv_stm->stm_ctrl_hwreg.reg_word);
+		break;
+	default:
+		/* N/A */
+		break;
+	}
+	mutex_unlock(&stmlock);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(stm_kernel_set_out);
+
+/**
+ * stm_kernel_get_out()-
+ * Kernel API function used to get
+ * STM output cofiguration PTI or USB.
+ *
+ */
+int stm_kernel_get_out(void)
+{
+	struct stm_dev *drv_stm = _dev_stm;
+	int ret = -EOPNOTSUPP;
+
+	if (drv_stm == NULL)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&stmlock);
+
+	drv_stm->stm_ctrl_hwreg.reg_word =
+		stm_readl(drv_stm->stm_ioaddr, (u32)STM_CTRL);
+
+	if (!drv_stm->stm_ctrl_hwreg.usb_debug_en) {
+		if (drv_stm->stm_ctrl_hwreg.pti_out_en)
+			ret = (int)drv_stm->stm_ctrl_hwreg.pti_out_mode_sel;
+	} else {
+		ret = (int)STM_USB;
+	}
+	mutex_unlock(&stmlock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(stm_kernel_get_out);
+
+/**
+ * stm_set_out() - 'out' parameter set function from 'STM' module
+ *
+ * called when writing to 'out' parameter from 'STM' module in sysfs
+ */
+static int stm_set_out(const char *val, struct kernel_param *kp)
+{
+	int bus_type_value;
+	int ret = -EINVAL;
+
+	if (sscanf(val, "%2d", &bus_type_value) != 1)
+		return ret;
+
+	return stm_kernel_set_out(bus_type_value);
+}
+
+/**
+ * stm_get_out() - 'out' parameter get function from 'STM' module
+ *
+ * called when reading 'out' parameter from 'STM' module in sysfs
+ */
+static int stm_get_out(char *buffer, struct kernel_param *kp)
+{
+	int i;
+
+	i = stm_kernel_get_out();
+	if (i == -EOPNOTSUPP) {
+		buffer[0] = '\0';
+		return 0;
+	}
+
+	return sprintf(buffer, "%2d", i);
+}
+
+/**
+ * stm_init() - initialize stmsub3dbgthr register
+ *
+ * @return - 0 on Success
+ */
+static int stm_init(void)
+{
+	struct stm_dev *stm = _dev_stm;
+	struct stm_usb3_ctrl *usb3dbg;
+
+	if (!stm)
+		return -ENODEV;
+
+	usb3dbg = &stm->stm_usb3_hwreg;
+	usb3dbg->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_USB3DBGGTHR);
+
+	usb3dbg->reg_word = 0xFF;
+
+	stm_writel(stm->stm_ioaddr, (u32)STM_USB3DBGGTHR, usb3dbg->reg_word);
+
+	return 0;
+}
+
+/**
+ * stm_alloc_static_trb_pool() - set stm trb pool dma_addr and return
+ * trb_pool
+ *
+ * @dma_addr - trb pool dma physical address to set
+ * @return - trb pool address ioremaped pointer
+ */
+static void *stm_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+	struct stm_dev *stm = _dev_stm;
+	if (!stm)
+		return NULL;
+
+	*dma_addr = stm->stm_trb_base;
+	return stm->trb_ioaddr;
+}
+
+static void ebc_io_free_static_trb_pool(void)
+{
+	/* Nothing to do, HW TRB */
+}
+
+static int stm_xfer_start(void)
+{
+	struct stm_dev *stm = _dev_stm;
+	struct stm_ctrl *stm_ctrl;
+	u32 reg_word;
+
+	if (!stm)
+		return -ENODEV;
+
+	/* REVERTME : filter PUNIT and SCU MasterID when switching to USB */
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) {
+		pr_info("%s\n REVERTME : filter PUNIT and SCU MasterID\n", __func__);
+		reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_MASMSK1);
+		reg_word |= 0x28;
+		stm_writel(stm->stm_ioaddr, (u32)STM_MASMSK1, reg_word);
+
+		pr_info("%s\n REVERTME : USBTO\n", __func__);
+		reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_USBTO);
+		reg_word |= 0x01;
+		stm_writel(stm->stm_ioaddr, (u32)STM_USBTO, reg_word);
+	}
+
+	stm_ctrl = &stm->stm_ctrl_hwreg;
+	stm_ctrl->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_CTRL);
+
+	stm_ctrl->usb_debug_en = true;
+	stm_ctrl->pti_out_en = false;
+
+	stm_writel(stm->stm_ioaddr, (u32)STM_CTRL, stm_ctrl->reg_word);
+	pr_info("%s\n switch STM output to DvC.Trace ", __func__);
+
+	return 0;
+}
+
+static int stm_xfer_stop(void)
+{
+	struct stm_dev *stm = _dev_stm;
+	struct stm_ctrl *stm_ctrl;
+
+	if (!stm)
+		return -ENODEV;
+
+	stm_ctrl = &stm->stm_ctrl_hwreg;
+	stm_ctrl->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_CTRL);
+
+	stm_ctrl->usb_debug_en = false;
+	stm_ctrl->pti_out_en = true;
+
+	stm_writel(stm->stm_ioaddr, (u32)STM_CTRL, stm_ctrl->reg_word);
+	pr_info("%s\n switch STM to 4bits MIPI PTI (default)", __func__);
+
+	return 0;
+}
+
+static struct ebc_io stm_ebc_io_ops = {
+	.name = "stmbuf4kB",
+	.epname = "ep1in",
+	.epnum = 3,
+	.is_ondemand = 1,
+	.static_trb_pool_size = 4,
+	.init = stm_init,
+	.alloc_static_trb_pool = stm_alloc_static_trb_pool,
+	.free_static_trb_pool = ebc_io_free_static_trb_pool,
+	.xfer_start = stm_xfer_start,
+	.xfer_stop = stm_xfer_stop,
+};
+
+#define EXI_IN_TRB_POOL_OFFSET (4*16)
+static void *exi_inbound_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+	struct stm_dev *stm = _dev_stm;
+	if (!stm)
+		return NULL;
+
+	*dma_addr = stm->stm_trb_base + EXI_IN_TRB_POOL_OFFSET;
+	return stm->trb_ioaddr + EXI_IN_TRB_POOL_OFFSET;
+}
+
+static struct ebc_io exi_in_ebc_io_ops = {
+	.name = "exi-inbound",
+	.epname = "ep8in",
+	.epnum = 17,
+	.is_ondemand = 0,
+	.static_trb_pool_size = 4,
+	.alloc_static_trb_pool = exi_inbound_alloc_static_trb_pool,
+	.free_static_trb_pool = ebc_io_free_static_trb_pool,
+};
+
+#define EXI_OUT_TRB_POOL_OFFSET (8*16)
+static void *exi_outbound_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+	struct stm_dev *stm = _dev_stm;
+	if (!stm)
+		return NULL;
+
+	*dma_addr = stm->stm_trb_base + EXI_OUT_TRB_POOL_OFFSET;
+	return stm->trb_ioaddr + EXI_OUT_TRB_POOL_OFFSET;
+}
+
+static struct ebc_io exi_out_ebc_io_ops = {
+	.name = "exi-outbound",
+	.epname = "ep8out",
+	.epnum = 16,
+	.is_ondemand = 0,
+	.static_trb_pool_size = 2,
+	.alloc_static_trb_pool = exi_outbound_alloc_static_trb_pool,
+	.free_static_trb_pool = ebc_io_free_static_trb_pool,
+};
+
+int stm_is_enabled()
+{
+	return (_dev_stm != NULL);
+}
+EXPORT_SYMBOL_GPL(stm_is_enabled);
+
+/**
+ * stm_dev_init()- Used to setup STM resources on the pci bus.
+ *
+ * @pdev- pci_dev struct values for pti device.
+ * @stm- stm_dev struct managing stm resources
+ *
+ * Returns:
+ *	0 for success
+ *	otherwise, error
+ */
+int stm_dev_init(struct pci_dev *pdev,
+		 struct stm_dev *stm)
+{
+	int retval = 0;
+	int pci_bar = 0;
+
+	if (!cpu_has_debug_feature(DEBUG_FEATURE_PTI))
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "%s %s(%d): STM PCI ID %04x:%04x\n", __FILE__,
+		__func__, __LINE__, pdev->vendor, pdev->device);
+
+	stm->stm_addr = pci_resource_start(pdev, pci_bar);
+
+	retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+	if (retval != 0) {
+		dev_err(&pdev->dev,
+			"%s(%d): pci_request_region() returned error %d\n",
+			__func__, __LINE__, retval);
+		return retval;
+	}
+	pr_info("stm add %lx\n", stm->stm_addr);
+
+	stm->stm_reg_base = stm->stm_addr+STM_REG_BASE;
+	stm->stm_ioaddr = ioremap_nocache((u32)stm->stm_reg_base,
+					  STM_REG_LEN);
+	if (!stm->stm_ioaddr) {
+		retval = -ENOMEM;
+		goto out_release_region;
+	}
+
+	stm->stm_trb_base = stm->stm_addr+STM_TRB_BASE;
+	stm->trb_ioaddr = ioremap_nocache((u32)stm->stm_trb_base,
+					  STM_TRB_LEN);
+	if (!stm->trb_ioaddr) {
+		retval = -ENOMEM;
+		goto out_iounmap_stm_ioaddr;
+	}
+
+	stm->stm_ctrl_hwreg.reg_word = stm_readl(stm->stm_ioaddr,
+						 (u32)STM_CTRL);
+	stm->stm_usb3_hwreg.reg_word = stm_readl(stm->stm_ioaddr,
+						 (u32)STM_USB3DBGGTHR);
+
+	_dev_stm = stm;
+
+	dwc3_register_io_ebc(&stm_ebc_io_ops);
+	dwc3_register_io_ebc(&exi_in_ebc_io_ops);
+	dwc3_register_io_ebc(&exi_out_ebc_io_ops);
+
+	pr_info("successfully registered ebc io ops\n");
+
+	return retval;
+
+out_iounmap_stm_ioaddr:
+	pci_iounmap(pdev, stm->stm_ioaddr);
+
+out_release_region:
+	pci_release_region(pdev, pci_bar);
+
+	_dev_stm = NULL;
+	return retval;
+
+}
+EXPORT_SYMBOL_GPL(stm_dev_init);
+
+/**
+ * stm_dev_clean()- Driver exit method to free STM resources from
+ *		   PCI bus.
+ * @pdev: variable containing pci info of STM.
+ * @dev_stm: stm_dev resources to clean.
+ */
+void stm_dev_clean(struct pci_dev *pdev,
+		   struct stm_dev *dev_stm)
+{
+	int pci_bar = 0;
+
+	/* If STM driver was not initialized properly,
+	 * there is nothing to do.
+	 */
+	if (_dev_stm == NULL)
+		return;
+
+	dwc3_unregister_io_ebc(&stm_ebc_io_ops);
+	dwc3_unregister_io_ebc(&exi_in_ebc_io_ops);
+	dwc3_unregister_io_ebc(&exi_out_ebc_io_ops);
+
+	if (dev_stm != NULL) {
+		pci_iounmap(pdev, dev_stm->stm_ioaddr);
+		pci_iounmap(pdev, dev_stm->trb_ioaddr);
+	}
+
+	pci_release_region(pdev, pci_bar);
+
+	_dev_stm = NULL;
+}
+EXPORT_SYMBOL_GPL(stm_dev_clean);
+
+module_param_call(stm_out, stm_set_out, stm_get_out, NULL, 0644);
+MODULE_PARM_DESC(stm_out, "configure System Trace Macro output");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florent Pirou");
+MODULE_DESCRIPTION("STM Driver");
diff --git a/drivers/misc/stm.h b/drivers/misc/stm.h
new file mode 100644
index 0000000..1fb2d2e
--- /dev/null
+++ b/drivers/misc/stm.h
@@ -0,0 +1,114 @@
+/*
+ * stm.h
+ *
+ *  Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The STM (Sytem Trace Macro) Unit driver configure trace output
+ * to the Intel Tangier PTI port and DWC3 USB xHCI controller
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef _STM_H
+#define _STM_H
+
+#include <linux/pci.h>
+
+/* STM_CTRL register bitmap */
+/**
+ * struct stm_ctrl - STM control block
+ * @usb_debug_en : STM needs to redirece the trace packet to the USB3
+ * @pti_io_idle_threshold : threshold for disabling the IO clock.
+ * @pkt_transfer_size : asserts the *buff_avail signal after it has
+ * 1 or 2 KB of data in buffer
+ * @dis_dcu7_use : disables the useage of DCU7 instead of PTI_Disable
+ * @en_sw_ms : enables software master usage
+ * @mst_id_en : enables the PTI unit to suppress sending the Master Command
+ * @d64_cmd_en : PTI unit to use the D64 commands
+ * @pti_out_mode_sel
+ *	0 = PTI 4-bits legacy end user
+ *	1 = PTI 4-bits NiDnT
+ *	2 = PTI 16-bits
+ *	3 = PTI 12-bits
+ *	4 = PTI 8-bits
+ * @pti_out_en : PTI output enable muxselects that propagate
+ * to the FLIS to be enabled
+ * @lossy_mode_enable : Output Agent will continue to accept writes,
+ * even if the queuese are full. The data will be dropped and the
+ * dropped packet indicator will be incremented
+ * @time_stamp_enable : Enable time stamping the final packet in trace record.
+ */
+struct stm_ctrl {
+	union {
+		struct {
+			u32             time_stamp_enable:1;
+			u32             lossy_mode_enable:1;
+			u32             pti_out_en:1;
+			u32             reserved:1;
+			u32             pti_out_mode_sel:4;
+			u32             d64_cmd_en:1;
+			u32             mst_id_en:1;
+			u32             en_sw_ms:1;
+			u32             dis_dcu7_use:1;
+			u32             pkt_transfer_size:1;
+			u32             pti_io_idle_threshold:5;
+			u32             usb_debug_en:1;
+			u32             reserved31_19:13;
+		};
+		u32 reg_word;
+	};
+} __packed;
+
+/**
+ * struct stm_usb3_ctrl - STM buffer USB3 hardware EBC
+ * @region_closure_threshold : This is the threshold for closing
+ * the 1KB region in the debug trace buffer. STM will wait for the
+ * configured time as specified in this field and then closes the region.
+ * The unit of this field is in 64 us. Eg when this field value is set
+ * to 0xffff, then it indicates 2 ms
+ * @empty_packets_threshold : When STM does not have data to send,
+ * it can send empty packets to keep the USB3 alive. This is useful
+ * in case of ISOC traffic, because in this mode the wake up latency
+ * is high. STM will send the configured number of empty packets as
+ * specified in this field.
+ */
+struct stm_usb3_ctrl {
+	union {
+		struct {
+			u32             region_closure_threshold:15;
+			u32             empty_packets_threshold:6;
+			u32             reserved31_21:11;
+		};
+		u32 reg_word;
+	};
+} __packed;
+
+struct stm_dev {
+	unsigned long stm_addr;
+	unsigned long stm_reg_base;
+	unsigned long stm_trb_base;
+	void __iomem *stm_ioaddr;
+	void __iomem *trb_ioaddr;
+	struct stm_ctrl stm_ctrl_hwreg;
+	struct stm_usb3_ctrl stm_usb3_hwreg;
+};
+
+int stm_dev_init(struct pci_dev *pdev, struct stm_dev *dev_stm);
+void stm_dev_clean(struct pci_dev *pdev, struct stm_dev *dev_stm);
+
+#endif /* _STM_H */
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index de22a00..0c59ee5 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -68,6 +68,8 @@
 #define PACKED_CMD_VER	0x01
 #define PACKED_CMD_WR	0x02
 
+#define MAX_DTR_DDR50	52000000
+
 static DEFINE_MUTEX(block_mutex);
 
 /*
@@ -99,6 +101,7 @@
 #define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
 #define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */
+#define MMC_BLK_SUSPENDED	(1 << 3)	/* MMC block device suspended */
 
 	unsigned int	usage;
 	unsigned int	read_only;
@@ -109,7 +112,8 @@
 #define MMC_BLK_WRITE		BIT(1)
 #define MMC_BLK_DISCARD		BIT(2)
 #define MMC_BLK_SECDISCARD	BIT(3)
-
+#define MMC_BLK_RPMB		BIT(4)
+#define MMC_BLK_USER		BIT(5)
 	/*
 	 * Only set in main mmc_blk_data associated
 	 * with mmc_card with mmc_set_drvdata, and keeps
@@ -149,6 +153,9 @@
 	packed->blocks = 0;
 }
 
+static int mmc_rpmb_req_process(struct mmc_blk_data *,
+		struct mmc_ioc_rpmb_req *);
+
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
 	struct mmc_blk_data *md;
@@ -256,6 +263,10 @@
 	int ret;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 
+	if (!md)
+		return -EINVAL;
+
+
 	ret = snprintf(buf, PAGE_SIZE, "%d",
 		       get_disk_ro(dev_to_disk(dev)) ^
 		       md->read_only);
@@ -270,6 +281,9 @@
 	char *end;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 	unsigned long set = simple_strtoul(buf, &end, 0);
+	if (!md)
+		return -EINVAL;
+
 	if (end == buf) {
 		ret = -EINVAL;
 		goto out;
@@ -417,7 +431,7 @@
 	struct mmc_data data = {0};
 	struct mmc_request mrq = {NULL};
 	struct scatterlist sg;
-	int err;
+	int err = 0;
 	int is_rpmb = false;
 	u32 status = 0;
 
@@ -560,19 +574,61 @@
 	mmc_release_host(card->host);
 
 cmd_done:
-	mmc_blk_put(md);
+	if (md)
+		mmc_blk_put(md);
 cmd_err:
 	kfree(idata->buf);
 	kfree(idata);
 	return err;
 }
 
+static int mmc_blk_ioctl_rpmb_req(struct block_device *bdev,
+		struct mmc_ioc_rpmb_req __user *ptr)
+{
+	struct mmc_ioc_rpmb_req req;
+	struct mmc_blk_data *md = NULL;
+	int err = 0;
+
+	/* The caller must have CAP_SYS_RAWIO */
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	memset(&req, 0, sizeof(req));
+
+	if (copy_from_user(&req, ptr, sizeof(req)))
+		return -EFAULT;
+
+	md = mmc_blk_get(bdev->bd_disk);
+	if (!md) {
+		pr_err("%s: NO eMMC block data. Try it later\n",
+				__func__);
+		return -ENODEV;
+	}
+	/* handle RPMB request event */
+	err = mmc_rpmb_req_process(md, &req);
+	if (err) {
+		mmc_blk_put(md);
+		return err;
+	}
+	/*
+	 * feedback to user space
+	 */
+	if (copy_to_user(ptr, &req, sizeof(req)))
+		return -EFAULT;
+
+	mmc_blk_put(md);
+	return 0;
+}
+
 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
 	unsigned int cmd, unsigned long arg)
 {
 	int ret = -EINVAL;
 	if (cmd == MMC_IOC_CMD)
 		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+	else if (cmd == MMC_IOC_RPMB_REQ)
+		ret = mmc_blk_ioctl_rpmb_req(bdev,
+				(struct mmc_ioc_rpmb_req __user *)arg);
 	return ret;
 }
 
@@ -595,6 +651,30 @@
 #endif
 };
 
+static int mmc_rpmb_reset(struct mmc_host *host, u8 part_config)
+{
+	int err = 0;
+
+	if (!mmc_card_mmc(host->card))
+		return err;
+
+	if ((part_config & 0x07) == EXT_CSD_PART_CONFIG_ACC_RPMB &&
+	    mmc_card_hs200(host->card)) {
+		pr_info("%s: disable eMMC HS200 on rpmb part\n", __func__);
+		host->card->last_max_dtr = host->card->ext_csd.hs_max_dtr;
+		host->card->ext_csd.hs_max_dtr = MAX_DTR_DDR50;
+		err = mmc_hw_reset(host);
+	} else if ((part_config & 0x07) != EXT_CSD_PART_CONFIG_ACC_RPMB &&
+	    host->card->last_max_dtr > MAX_DTR_DDR50) {
+		pr_info("%s: enable eMMC HS200 on non-rpmb part\n", __func__);
+		host->card->ext_csd.hs_max_dtr = host->card->last_max_dtr;
+		host->card->last_max_dtr = 0;
+		err = mmc_hw_reset(host);
+	}
+
+	return err;
+}
+
 static inline int mmc_blk_part_switch(struct mmc_card *card,
 				      struct mmc_blk_data *md)
 {
@@ -610,6 +690,10 @@
 		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 		part_config |= md->part_type;
 
+		if (mmc_rpmb_reset(card->host, part_config))
+			pr_warn("%s: eMMC rpmb reset failed\n",
+				mmc_hostname(card->host));
+
 		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_PART_CONFIG, part_config,
 				 card->ext_csd.part_time);
@@ -858,9 +942,23 @@
 		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
 				prev_cmd_status_valid, status);
 
+	/* Check for stop cmd errors */
+	if (mmc_card_sd(card) && brq->stop.error == -ETIMEDOUT)
+		return mmc_blk_cmd_error(req, "stop cmd", brq->stop.error,
+				prev_cmd_status_valid, status);
+
 	/* Data errors */
-	if (!brq->stop.error)
-		return ERR_CONTINUE;
+	if (!brq->stop.error) {
+		/*
+		 * Didn't re-send stop command, and if card status
+		 * is already in transfer state, let's have
+		 * a retry.
+		 */
+		if (R1_CURRENT_STATE(status) == R1_STATE_TRAN)
+			return ERR_RETRY;
+		else
+			return ERR_CONTINUE;
+	}
 
 	/* Now for stop errors.  These aren't fatal to the transfer. */
 	pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
@@ -883,10 +981,25 @@
 {
 	int err;
 
+	pr_err("%s: mmc_blk_reset: md->reset_done (0x%x), type (0x%x)\n",
+		md->disk->disk_name, md->reset_done, type);
+
 	if (md->reset_done & type)
 		return -EEXIST;
 
 	md->reset_done |= type;
+	/*
+	 * It was observed that some kind of eMMC device may fail to response
+	 * to CMD suddenly during normal usage. And the issue dispeared if
+	 * the same eMMC device working in DDR50. So disabling HS200 and force
+	 * the eMMC device working in DDR50 before reset the eMMC device.
+	 */
+	if ((host->caps2 & MMC_CAP2_HS200_1_8V_SDR) &&
+	    (host->caps2 & MMC_CAP2_HS200_DIS)) {
+		pr_warn("%s: disable eMMC HS200 due to error\n", __func__);
+		host->caps2 &= ~MMC_CAP2_HS200_1_8V_SDR;
+		host->card->last_max_dtr = 0;
+	}
 	err = mmc_hw_reset(host);
 	/* Ensure we switch back to the correct partition */
 	if (err != -EOPNOTSUPP) {
@@ -911,6 +1024,118 @@
 	md->reset_done &= ~type;
 }
 
+static int mmc_rpmb_req_process(struct mmc_blk_data *md,
+		struct mmc_ioc_rpmb_req *req)
+{
+	struct mmc_core_rpmb_req rpmb_req;
+	struct mmc_card *card = NULL;
+	int ret;
+
+	if (!md || !req)
+		return -EINVAL;
+
+	if (!(md->flags & MMC_BLK_CMD23) ||
+			(md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB))
+		return -EOPNOTSUPP;
+
+	card = md->queue.card;
+	if (!card || !mmc_card_mmc(card) || !card->ext_csd.rpmb_size)
+		return -ENODEV;
+
+	memset(&rpmb_req, 0, sizeof(struct mmc_core_rpmb_req));
+	rpmb_req.req = req;
+	/* check request */
+	ret = mmc_rpmb_pre_frame(&rpmb_req, card);
+	if (ret) {
+		pr_err("%s: prepare frame failed\n", mmc_hostname(card->host));
+		return ret;
+	}
+
+	mmc_claim_host(card->host);
+
+	if (md->flags & MMC_BLK_SUSPENDED) {
+		pr_warn("%s: MMC block device is already suspended\n",
+				mmc_hostname(card->host));
+		ret = -EPERM;
+		goto out;
+	}
+
+	/* wait for background operation finished */
+	mmc_stop_bkops(card);
+
+	/*
+	 * before start, let's change to RPMB partition first
+	 */
+	ret = mmc_blk_part_switch(card, md);
+	if (ret) {
+		pr_err("%s: Invalid RPMB partition switch (%d)!\n",
+				mmc_hostname(card->host), ret);
+		/*
+		 * In case partition is not in user data area, make
+		 * a force partition switch.
+		 * we need reset eMMC card at here
+		 */
+		ret = mmc_blk_reset(md, card->host, MMC_BLK_RPMB);
+		if (!ret)
+			mmc_blk_reset_success(md, MMC_BLK_RPMB);
+		else
+			pr_err("%s: eMMC card reset failed (%d)\n",
+					mmc_hostname(card->host), ret);
+		goto out;
+	}
+
+	ret = mmc_rpmb_partition_ops(&rpmb_req, card);
+	if (ret)
+		pr_err("%s: failed (%d) to handle RPMB request type (%d)!\n",
+				mmc_hostname(card->host), ret, req->type);
+out:
+	mmc_release_host(card->host);
+	mmc_rpmb_post_frame(&rpmb_req);
+	return ret;
+}
+
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+	struct mmc_blk_data *md = mq->data;
+	/*
+	 * If this is a RPMB partition access, return ture
+	 */
+	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(mmc_access_rpmb);
+
+int mmc_rpmb_req_handle(struct device *emmc, struct mmc_ioc_rpmb_req *req)
+{
+	int ret = 0;
+	struct gendisk *disk    = NULL;
+	struct mmc_blk_data *md = NULL;
+
+	if (!emmc || !req)
+		return -EINVAL;
+
+	disk = dev_to_disk(emmc);
+	if (!disk) {
+		pr_err("%s: NO eMMC disk found. Try it later\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	md = mmc_blk_get(disk);
+	if (!md) {
+		pr_err("%s: NO eMMC block data. Try it later\n",
+				__func__);
+		return -ENODEV;
+	}
+	ret = mmc_rpmb_req_process(md, req);
+	mmc_blk_put(md);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_req_handle);
+
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -1024,8 +1249,11 @@
 
 	if (mmc_can_sanitize(card)) {
 		trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_SANITIZE_START, 1, 0);
+		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_SANITIZE_START, 1, 0, false);
+		/* send status cmd to check */
+		if (!err)
+			err = mmc_busy_wait(card->host);
 		trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
 	}
 out_retry:
@@ -1937,6 +2165,7 @@
 	struct mmc_card *card = md->queue.card;
 	struct mmc_host *host = card->host;
 	unsigned long flags;
+	unsigned int cmd_flags = 0;
 
 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	if (mmc_bus_needs_resume(card->host))
@@ -1947,13 +2176,21 @@
 		/* claim host only for the first request */
 		mmc_claim_host(card->host);
 
+	if (req)
+		cmd_flags = req->cmd_flags;
+
 	ret = mmc_blk_part_switch(card, md);
 	if (ret) {
-		if (req) {
-			blk_end_request_all(req, -EIO);
+		pr_err("%s: switch part failed. Try to reset eMMC\n",
+				mmc_hostname(card->host));
+		if (mmc_blk_reset(md, card->host, MMC_BLK_USER)) {
+			if (req)
+				blk_end_request_all(req, -EIO);
+			ret = 0;
+			goto out;
 		}
-		ret = 0;
-		goto out;
+		pr_info("%s: Reset eMMC success\n", mmc_hostname(card->host));
+		mmc_blk_reset_success(md, MMC_BLK_USER);
 	}
 
 	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
@@ -1982,7 +2219,7 @@
 
 out:
 	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-	     (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
+	     (req && (cmd_flags & MMC_REQ_SPECIAL_MASK)))
 		/*
 		 * Release host when there are no more requests
 		 * and after special request(discard, flush) is done.
@@ -2007,7 +2244,7 @@
 					      int area_type)
 {
 	struct mmc_blk_data *md;
-	int devidx, ret;
+	int devidx, ret, name_idx;
 
 	devidx = find_first_zero_bit(dev_use, max_devices);
 	if (devidx >= max_devices)
@@ -2027,7 +2264,13 @@
 	 * index anymore so we keep track of a name index.
 	 */
 	if (!subname) {
-		md->name_idx = find_first_zero_bit(name_use, max_devices);
+		name_idx = find_first_zero_bit(name_use, max_devices);
+		if (name_idx == 0 && !mmc_card_mmc(card)) {
+			__set_bit(0, name_use);
+			name_idx = find_first_zero_bit(name_use, max_devices);
+			__clear_bit(0, name_use);
+		}
+		md->name_idx = name_idx;
 		__set_bit(md->name_idx, name_use);
 	} else
 		md->name_idx = ((struct mmc_blk_data *)
@@ -2292,6 +2535,7 @@
 #define CID_MANFID_TOSHIBA	0x11
 #define CID_MANFID_MICRON	0x13
 #define CID_MANFID_SAMSUNG	0x15
+#define CID_MANFID_HYNIX	0x90
 
 static const struct mmc_fixup blk_fixups[] =
 {
@@ -2350,6 +2594,9 @@
 	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
 
+	MMC_FIXUP("HBG4e", CID_MANFID_HYNIX, CID_OEMID_ANY, dis_cache_mmc,
+		  0),
+
 	END_FIXUP
 };
 
@@ -2422,6 +2669,19 @@
 		mmc_queue_suspend(&md->queue);
 		list_for_each_entry(part_md, &md->part, part) {
 			mmc_queue_suspend(&part_md->queue);
+			if (part_md->part_type ==
+				EXT_CSD_PART_CONFIG_ACC_RPMB) {
+				/*
+				 * RPMB partition is accessed by API directly.
+				 * Driver need to set a flag when suspending
+				 * MMC block device to notify API that the
+				 * accessing of RPMB partition needs to be
+				 * stopped
+				 */
+				mmc_claim_host(card->host);
+				part_md->flags |= MMC_BLK_SUSPENDED;
+				mmc_release_host(card->host);
+			}
 		}
 	}
 	return 0;
@@ -2452,6 +2712,18 @@
 		mmc_queue_resume(&md->queue);
 		list_for_each_entry(part_md, &md->part, part) {
 			mmc_queue_resume(&part_md->queue);
+			if (part_md->part_type ==
+					EXT_CSD_PART_CONFIG_ACC_RPMB) {
+				/*
+				 * RPMB partition is accessed by API directly.
+				 * Driver need to clear MMC_BLK_SUSPENDED flag
+				 * to make sure the next RPMB partition access
+				 * request won't be blocked
+				 */
+				mmc_claim_host(card->host);
+				part_md->flags &= ~MMC_BLK_SUSPENDED;
+				mmc_release_host(card->host);
+			}
 		}
 	}
 	return 0;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9447a0e..645519f 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -37,7 +37,7 @@
 		return BLKPREP_KILL;
 	}
 
-	if (mq && mmc_card_removed(mq->card))
+	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
 		return BLKPREP_KILL;
 
 	req->cmd_flags |= REQ_DONTPREP;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 5752d50..3bbd4e6 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -73,4 +73,5 @@
 extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
 extern void mmc_packed_clean(struct mmc_queue *);
 
+extern int mmc_access_rpmb(struct mmc_queue *);
 #endif
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 38ed210..d55cc5d 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,6 +7,6 @@
 				   mmc.o mmc_ops.o sd.o sd_ops.o \
 				   sdio.o sdio_ops.o sdio_bus.o \
 				   sdio_cis.o sdio_io.o sdio_irq.o \
-				   quirks.o slot-gpio.o
+				   quirks.o slot-gpio.o mmc_panic_ops.o
 
 mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index d0b980c..1921e37 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -328,11 +328,12 @@
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			type);
 	} else {
-		pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+		pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
 			mmc_hostname(card->host),
 			mmc_card_uhs(card) ? "ultra high speed " :
 			(mmc_card_highspeed(card) ? "high speed " : ""),
 			(mmc_card_hs200(card) ? "HS200 " : ""),
+			(mmc_card_hs400(card) ? "HS400 " : ""),
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			uhs_bus_speed_mode, type, card->rca);
 	}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8742ca0..37b095a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -28,6 +28,7 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/wakelock.h>
+#include <linux/intel_mid_pm.h>
 
 #include <trace/events/mmc.h>
 
@@ -136,6 +137,47 @@
 
 #endif /* CONFIG_FAIL_MMC_REQUEST */
 
+int mmc_busy_wait(struct mmc_host *host)
+{
+	unsigned long timeout;
+	struct mmc_command cmd = {0};
+	struct mmc_card *card;
+	int err;
+
+	if (!host || !host->card)
+		return 0;
+
+	card = host->card;
+	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+	do {
+		memset(&cmd, 0, sizeof(struct mmc_command));
+		cmd.opcode = MMC_SEND_STATUS;
+		cmd.arg = card->rca << 16;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+		/* Do not retry else we can't see errors */
+		err = mmc_wait_for_cmd(host, &cmd, 0);
+		if (err || (cmd.resp[0] & 0xFDF92000)) {
+			pr_err("error %d requesting status %#x\n",
+				err, cmd.resp[0]);
+			return -EIO;
+		}
+
+		/* Timeout if the device never becomes ready for data and
+		 * never leaves the program state.
+		 */
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Card stuck in programming state! %s\n",
+				mmc_hostname(host), __func__);
+			return -EIO;
+		}
+
+	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
+		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_busy_wait);
+
 /**
  *	mmc_request_done - finish processing an MMC request
  *	@host: MMC host which completed request
@@ -195,6 +237,19 @@
 
 EXPORT_SYMBOL(mmc_request_done);
 
+static void mmc_qos_update(struct mmc_host *host, struct mmc_request *mrq,
+		s32 new_value)
+{
+	if (!host || !host->qos || !mrq)
+		return;
+
+	if (host->card && mmc_card_mmc(host->card) && mrq->data) {
+		if (mrq->data->flags & MMC_DATA_WRITE)
+			pm_qos_update_request(host->qos, new_value);
+	} else
+		pm_qos_update_request(host->qos, new_value);
+}
+
 static void
 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 {
@@ -256,6 +311,7 @@
 	}
 	mmc_host_clk_hold(host);
 	led_trigger_event(host->led, LED_FULL);
+	mmc_qos_update(host, mrq, CSTATE_EXIT_LATENCY_C2);
 	host->ops->request(host, mrq);
 }
 
@@ -331,8 +387,13 @@
  */
 static void mmc_wait_data_done(struct mmc_request *mrq)
 {
+	unsigned long flags;
+	struct mmc_context_info *context_info = &mrq->host->context_info;
+
+	spin_lock_irqsave(&context_info->lock, flags);
 	mrq->host->context_info.is_done_rcv = true;
 	wake_up_interruptible(&mrq->host->context_info.wait);
+	spin_unlock_irqrestore(&context_info->lock, flags);
 }
 
 static void mmc_wait_done(struct mmc_request *mrq)
@@ -393,6 +454,7 @@
 	struct mmc_command *cmd;
 	struct mmc_context_info *context_info = &host->context_info;
 	int err;
+	bool is_done_rcv = false;
 	unsigned long flags;
 
 	while (1) {
@@ -401,8 +463,9 @@
 				 context_info->is_new_req));
 		spin_lock_irqsave(&context_info->lock, flags);
 		context_info->is_waiting_last_req = false;
+		is_done_rcv = context_info->is_done_rcv;
 		spin_unlock_irqrestore(&context_info->lock, flags);
-		if (context_info->is_done_rcv) {
+		if (is_done_rcv) {
 			context_info->is_done_rcv = false;
 			context_info->is_new_req = false;
 			cmd = mrq->cmd;
@@ -410,6 +473,7 @@
 			    mmc_card_removed(host->card)) {
 				err = host->areq->err_check(host->card,
 							    host->areq);
+				mmc_qos_update(host, mrq, PM_QOS_DEFAULT_VALUE);
 				break; /* return err */
 			} else {
 				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
@@ -441,8 +505,10 @@
 
 		cmd = mrq->cmd;
 		if (!cmd->error || !cmd->retries ||
-		    mmc_card_removed(host->card))
+		    mmc_card_removed(host->card)) {
+			mmc_qos_update(host, mrq, PM_QOS_DEFAULT_VALUE);
 			break;
+		}
 
 		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 			 mmc_hostname(host), cmd->opcode, cmd->error);
@@ -1413,7 +1479,10 @@
 	mmc_set_ios(host);
 
 	/* Wait for at least 1 ms according to spec */
-	mmc_delay(1);
+	if (host->ops->busy_wait)
+		host->ops->busy_wait(host, 1000);
+	else
+		mmc_delay(1);
 
 	/*
 	 * Failure to switch is indicated by the card holding
@@ -1476,6 +1545,9 @@
 
 	mmc_host_clk_hold(host);
 
+	if (host->ops->set_dev_power)
+		host->ops->set_dev_power(host, true);
+
 	/* If ocr is set, we use it */
 	if (host->ocr)
 		bit = ffs(host->ocr) - 1;
@@ -1500,7 +1572,7 @@
 	 * This delay should be sufficient to allow the power supply
 	 * to reach the minimum voltage.
 	 */
-	mmc_delay(10);
+	usleep_range(10000, 11000);
 
 	host->ios.clock = host->f_init;
 
@@ -1511,7 +1583,7 @@
 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
 	 * time required to reach a stable voltage.
 	 */
-	mmc_delay(10);
+	usleep_range(5000, 6000);
 
 	mmc_host_clk_release(host);
 }
@@ -1542,6 +1614,9 @@
 	host->ios.timing = MMC_TIMING_LEGACY;
 	mmc_set_ios(host);
 
+	if (host->ops->set_dev_power)
+		host->ops->set_dev_power(host, false);
+
 	/*
 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
 	 * XO-1.5, require a short delay after poweroff before the card
@@ -1619,7 +1694,7 @@
 		host->bus_ops->resume(host);
 	}
 
-	if (host->bus_ops->detect && !host->bus_dead)
+	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead)
 		host->bus_ops->detect(host);
 
 	mmc_bus_put(host);
@@ -1730,7 +1805,7 @@
 		card->erase_shift = ffs(card->ssr.au) - 1;
 	} else if (card->ext_csd.hc_erase_size) {
 		card->pref_erase = card->ext_csd.hc_erase_size;
-	} else {
+	} else if (card->erase_size) {
 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
 		if (sz < 128)
 			card->pref_erase = 512 * 1024 / 512;
@@ -1747,7 +1822,8 @@
 			if (sz)
 				card->pref_erase += card->erase_size - sz;
 		}
-	}
+	} else
+		card->pref_erase = 0;
 }
 
 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
@@ -1854,7 +1930,6 @@
 {
 	struct mmc_command cmd = {0};
 	unsigned int qty = 0;
-	unsigned long timeout;
 	unsigned int fr, nr;
 	int err;
 
@@ -1924,8 +1999,16 @@
 	memset(&cmd, 0, sizeof(struct mmc_command));
 	cmd.opcode = MMC_ERASE;
 	cmd.arg = arg;
-	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-	cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
+	if (card->host->caps2 & MMC_CAP2_POLL_R1B_BUSY) {
+		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+		if (card->host->max_discard_to)
+			cmd.cmd_timeout_ms = card->host->max_discard_to - 1;
+		else
+			cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
+	} else {
+		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+		cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
+	}
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err) {
 		pr_err("mmc_erase: erase error %d, status %#x\n",
@@ -1937,33 +2020,7 @@
 	if (mmc_host_is_spi(card->host))
 		goto out;
 
-	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
-	do {
-		memset(&cmd, 0, sizeof(struct mmc_command));
-		cmd.opcode = MMC_SEND_STATUS;
-		cmd.arg = card->rca << 16;
-		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
-		/* Do not retry else we can't see errors */
-		err = mmc_wait_for_cmd(card->host, &cmd, 0);
-		if (err || (cmd.resp[0] & 0xFDF92000)) {
-			pr_err("error %d requesting status %#x\n",
-				err, cmd.resp[0]);
-			err = -EIO;
-			goto out;
-		}
-
-		/* Timeout if the device never becomes ready for data and
-		 * never leaves the program state.
-		 */
-		if (time_after(jiffies, timeout)) {
-			pr_err("%s: Card stuck in programming state! %s\n",
-				mmc_hostname(card->host), __func__);
-			err =  -EIO;
-			goto out;
-		}
-
-	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
-		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
+	err = mmc_busy_wait(card->host);
 out:
 
 	trace_mmc_blk_erase_end(arg, fr, nr);
@@ -2100,7 +2157,7 @@
 {
 	struct mmc_host *host = card->host;
 	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
-	unsigned int last_timeout = 0;
+	unsigned int last_timeout = 0, aligned_qty;
 
 	if (card->erase_shift)
 		max_qty = UINT_MAX >> card->erase_shift;
@@ -2127,16 +2184,24 @@
 	if (!qty)
 		return 0;
 
-	if (qty == 1)
-		return 1;
+	if (arg & MMC_TRIM_ARGS) {
+		if (qty == 1)
+			aligned_qty = 1;
+		else
+			aligned_qty = qty - 1;
+	} else if (mmc_card_sd(card) &&
+			(card->erase_size != 1 << card->erase_shift))
+		aligned_qty = qty - 1;
+	else
+		aligned_qty = qty;
 
 	/* Convert qty to sectors */
 	if (card->erase_shift)
-		max_discard = --qty << card->erase_shift;
+		max_discard = aligned_qty << card->erase_shift;
 	else if (mmc_card_sd(card))
 		max_discard = qty;
 	else
-		max_discard = --qty * card->erase_size;
+		max_discard = aligned_qty * card->erase_size;
 
 	return max_discard;
 }
@@ -2149,13 +2214,8 @@
 	if (!host->max_discard_to)
 		return UINT_MAX;
 
-	/*
-	 * Without erase_group_def set, MMC erase timeout depends on clock
-	 * frequence which can change.  In that case, the best choice is
-	 * just the preferred erase size.
-	 */
-	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
-		return card->pref_erase;
+	if (host->caps2 & MMC_CAP2_POLL_R1B_BUSY)
+		return UINT_MAX;
 
 	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
 	if (mmc_can_trim(card)) {
@@ -2165,7 +2225,7 @@
 	} else if (max_discard < card->erase_size) {
 		max_discard = 0;
 	}
-	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
+	pr_info("%s: calculated max. discard sectors %u for timeout %u ms\n",
 		 mmc_hostname(host), max_discard, host->max_discard_to);
 	return max_discard;
 }
@@ -2228,36 +2288,59 @@
 	if (!host->bus_ops->power_restore)
 		return -EOPNOTSUPP;
 
-	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
-		return -EOPNOTSUPP;
-
 	if (!card)
 		return -EINVAL;
 
-	if (!mmc_can_reset(card))
-		return -EOPNOTSUPP;
+	/*
+	 * before HW reset card, cache needs to be flushed. Otherwise
+	 * the data in cache can be lost. But this flush may be failed
+	 * because card may be not in a good state
+	 */
+	if (mmc_cache_ctrl(host, 0)) {
+		pr_err("%s: flushing cache before HW reset failed, ",
+				mmc_hostname(host));
+		pr_err("this maybe cause file system unexpected error!\n");
+	}
 
 	mmc_host_clk_hold(host);
 	mmc_set_clock(host, host->f_init);
 
-	host->ops->hw_reset(host);
+	/*
+	 * if host has HW reset cap, use HW reset first before re-init card
+	 */
+	if (mmc_can_reset(card) && (host->caps & MMC_CAP_HW_RESET) &&
+			host->ops->hw_reset) {
+		host->ops->hw_reset(host);
+		/* If the reset has happened, then a status command will fail */
+		if (check) {
+			struct mmc_command cmd = {0};
+			int err;
 
-	/* If the reset has happened, then a status command will fail */
-	if (check) {
-		struct mmc_command cmd = {0};
-		int err;
+			cmd.opcode = MMC_SEND_STATUS;
+			if (!mmc_host_is_spi(card->host))
+				cmd.arg = card->rca << 16;
+			cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+			err = mmc_wait_for_cmd(card->host, &cmd, 0);
+			if (!err) {
+				mmc_host_clk_release(host);
+				return -ENOSYS;
+			}
 
-		cmd.opcode = MMC_SEND_STATUS;
-		if (!mmc_host_is_spi(card->host))
-			cmd.arg = card->rca << 16;
-		cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-		err = mmc_wait_for_cmd(card->host, &cmd, 0);
-		if (!err) {
-			mmc_host_clk_release(host);
-			return -ENOSYS;
 		}
 	}
 
+	if (card && mmc_card_sd(card) &&
+			(card->host->caps2 & MMC_CAP2_FIXED_NCRC) &&
+			(card->scr.sda_spec3) &&
+			(card->sw_caps.sd3_bus_mode & (SD_MODE_UHS_DDR50 |
+				SD_MODE_UHS_SDR104))) {
+		pr_warn("%s: SD card disable DDR50 and SDR104\n", __func__);
+		mmc_card_set_noddr50(card);
+	}
+
+	mmc_power_off(host);
+	mmc_power_up(host);
+
 	host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
 	if (mmc_host_is_spi(host)) {
 		host->ios.chip_select = MMC_CS_HIGH;
@@ -2384,7 +2467,8 @@
 			 * Schedule a detect work as soon as possible to let a
 			 * rescan handle the card removal.
 			 */
-			cancel_delayed_work(&host->detect);
+			if (cancel_delayed_work(&host->detect))
+				wake_unlock(&host->detect_wake_lock);
 			mmc_detect_change(host, 0);
 		}
 	}
@@ -2400,12 +2484,16 @@
 	int i;
 	bool extend_wakelock = false;
 
-	if (host->rescan_disable)
+	if (host->rescan_disable) {
+		wake_unlock(&host->detect_wake_lock);
 		return;
+	}
 
 	/* If there is a non-removable card registered, only scan once */
-	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
+	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) {
+		wake_unlock(&host->detect_wake_lock);
 		return;
+	}
 	host->rescan_entered = 1;
 
 	mmc_bus_get(host);
@@ -2464,6 +2552,7 @@
 	mmc_release_host(host);
 
  out:
+	mmc_emergency_setup(host);
 	if (extend_wakelock)
 		wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
 	else
@@ -2483,6 +2572,8 @@
 	else
 		mmc_power_up(host);
 	mmc_detect_change(host, 0);
+	if (host->caps2 & MMC_CAP2_INIT_CARD_SYNC)
+		flush_work_sync(&host->detect.work);
 }
 
 void mmc_stop_host(struct mmc_host *host)
@@ -2504,6 +2595,10 @@
 
 	mmc_bus_get(host);
 	if (host->bus_ops && !host->bus_dead) {
+		/*
+		 * disable cache before remove card
+		 */
+		mmc_cache_ctrl(host, 0);
 		/* Calling bus_ops->remove() with a claimed host can deadlock */
 		if (host->bus_ops->remove)
 			host->bus_ops->remove(host);
@@ -2542,6 +2637,11 @@
 
 	mmc_bus_put(host);
 
+	/*
+	 * disable cache before remove card
+	 */
+	mmc_cache_ctrl(host, 0);
+
 	mmc_power_off(host);
 
 	return ret;
@@ -2643,6 +2743,56 @@
 }
 EXPORT_SYMBOL(mmc_flush_cache);
 
+/*
+ * Turn the cache ON/OFF.
+ * Turning the cache OFF shall trigger flushing of the data
+ * to the non-volatile storage.
+ * This function should be called with host claimed
+ */
+int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
+{
+	struct mmc_card *card = host->card;
+	int err = 0;
+
+	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
+			mmc_card_is_removable(host))
+		return err;
+
+	if (card && mmc_card_mmc(card) &&
+			(card->ext_csd.cache_size > 0)) {
+		enable = !!enable;
+
+		if (card->ext_csd.cache_ctrl ^ enable) {
+			if (enable)
+				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_CACHE_CTRL, enable,
+					card->ext_csd.generic_cmd6_time);
+			else {
+				/*
+				 * disable cache will cause flushing data to
+				 * non-volatile storage, so we may need to
+				 * check busy state here by polling card status
+				 */
+				err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+						EXT_CSD_CACHE_CTRL, enable,
+						0, false);
+				if (!err)
+					err = mmc_busy_wait(host);
+			}
+			if (err)
+				pr_err("%s: cache %s error %d\n",
+						mmc_hostname(card->host),
+						enable ? "on" : "off",
+						err);
+			else
+				card->ext_csd.cache_ctrl = enable;
+		}
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_cache_ctrl);
+
 #ifdef CONFIG_PM
 
 /**
@@ -2786,6 +2936,11 @@
 		if (!host->bus_ops || host->bus_ops->suspend)
 			break;
 
+		/*
+		 * disable cache before remove card
+		 */
+		mmc_cache_ctrl(host, 0);
+
 		/* Calling bus_ops->remove() with a claimed host can deadlock */
 		if (host->bus_ops->remove)
 			host->bus_ops->remove(host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 35c2f85..dd90836 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -126,6 +126,9 @@
 	case MMC_TIMING_SD_HS:
 		str = "sd high-speed";
 		break;
+	case MMC_TIMING_UHS_SDR25:
+		str = "sd uhs SDR25";
+		break;
 	case MMC_TIMING_UHS_SDR50:
 		str = "sd uhs SDR50";
 		break;
@@ -138,6 +141,9 @@
 	case MMC_TIMING_MMC_HS200:
 		str = "mmc high-speed SDR200";
 		break;
+	case MMC_TIMING_MMC_HS400:
+		str = "mmc high-speed DDR200";
+		break;
 	default:
 		str = "invalid";
 		break;
@@ -158,7 +164,7 @@
 		str = "invalid";
 		break;
 	}
-	seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str);
+	seq_printf(s, "signal voltage:\t%u (%s)\n", ios->signal_voltage, str);
 
 	return 0;
 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6b0d943..446e80e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -239,7 +239,8 @@
 static void mmc_select_card_type(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
-	u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+	u8 card_type = card->ext_csd.raw_card_type &
+			EXT_CSD_CARD_TYPE_MASK_FULL;
 	u32 caps = host->caps, caps2 = host->caps2;
 	unsigned int hs_max_dtr = 0;
 
@@ -262,6 +263,12 @@
 			card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
 		hs_max_dtr = MMC_HS200_MAX_DTR;
 
+	if ((caps2 & MMC_CAP2_HS400_1_8V_DDR &&
+			card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) ||
+	    (caps2 & MMC_CAP2_HS400_1_2V_DDR &&
+			card_type & EXT_CSD_CARD_TYPE_HS400_1_2V))
+		hs_max_dtr = MMC_HS400_MAX_DTR;
+
 	card->ext_csd.hs_max_dtr = hs_max_dtr;
 	card->ext_csd.card_type = card_type;
 }
@@ -293,7 +300,7 @@
 	}
 
 	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
-	if (card->ext_csd.rev > 6) {
+	if (card->ext_csd.rev > 7) {
 		pr_err("%s: unrecognised EXT_CSD revision %d\n",
 			mmc_hostname(card->host), card->ext_csd.rev);
 		err = -EINVAL;
@@ -324,6 +331,8 @@
 		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
 	card->ext_csd.raw_hc_erase_grp_size =
 		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+	card->ext_csd.part_set_complete =
+		ext_csd[EXT_CSD_PART_SET_COMPLETE];
 	if (card->ext_csd.rev >= 3) {
 		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
 		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -438,12 +447,15 @@
 					<< 8) +
 				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
 				part_size *= (size_t)(hc_erase_grp_sz *
-					hc_wp_grp_sz);
+						hc_wp_grp_sz);
+				card->ext_csd.gpp_sz[idx] = part_size << 10;
 				mmc_part_add(card, part_size << 19,
 					EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
 					"gp%d", idx, false,
 					MMC_BLK_DATA_AREA_GP);
 			}
+			card->ext_csd.wpg_sz = (size_t)(hc_erase_grp_sz *
+					hc_wp_grp_sz);
 		}
 		card->ext_csd.sec_trim_mult =
 			ext_csd[EXT_CSD_SEC_TRIM_MULT];
@@ -497,7 +509,11 @@
 		 * RPMB regions are defined in multiples of 128K.
 		 */
 		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
-		if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
+		card->ext_csd.rpmb_size = 128 *
+			card->ext_csd.raw_rpmb_size_mult;
+		card->ext_csd.rpmb_size <<= 2; /* Unit: half sector */
+		if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)
+			&& mmc_rpmb_partition_access(card->host)) {
 			mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
 				EXT_CSD_PART_CONFIG_ACC_RPMB,
 				"rpmb", 0, false,
@@ -548,6 +564,17 @@
 		card->ext_csd.data_sector_size = 512;
 	}
 
+	/*
+	 * If use legacy relaible write, then the blk counts must not
+	 * big than the relaible write sectors
+	 */
+	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+		if (card->ext_csd.rel_sectors < RPMB_AVALIABLE_SECTORS)
+			card->rpmb_max_req = card->ext_csd.rel_sectors;
+		else
+			card->rpmb_max_req = RPMB_AVALIABLE_SECTORS;
+	} else
+		card->rpmb_max_req = RPMB_AVALIABLE_SECTORS;
 out:
 	return err;
 }
@@ -630,11 +657,153 @@
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
+MMC_DEV_ATTR(enhanced_area_offset, "%lld\n",
 		card->ext_csd.enhanced_area_offset);
-MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
+MMC_DEV_ATTR(enhanced_area_size, "%d KBytes\n",
+		card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+MMC_DEV_ATTR(hpi_support, "%d\n", card->ext_csd.hpi);
+MMC_DEV_ATTR(hpi_enable, "%d\n", card->ext_csd.hpi_en);
+MMC_DEV_ATTR(hpi_command, "%d\n", card->ext_csd.hpi_cmd);
+MMC_DEV_ATTR(hw_reset_support, "%d\n", card->ext_csd.rst_n_function);
+MMC_DEV_ATTR(bkops_support, "%d\n", card->ext_csd.bkops);
+MMC_DEV_ATTR(bkops_enable, "%d\n", card->ext_csd.bkops_en);
+MMC_DEV_ATTR(rpmb_size, "%d\n", card->ext_csd.rpmb_size);
+
+/* init gpp_wppart as an invalide GPP */
+static unsigned int gpp_wppart = EXT_CSD_PART_CONFIG_ACC_GP0 - 1;
+static ssize_t gpp_wppart_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	/* make GPP number readable */
+	return sprintf(buf, "%d\n", gpp_wppart -
+			EXT_CSD_PART_CONFIG_ACC_GP0 + 1);
+}
+
+static ssize_t gpp_wppart_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t n)
+{
+	long part;
+	struct mmc_card *card = mmc_dev_to_card(dev);
+
+	if (card == NULL)
+		return -ENODEV;
+	if (kstrtol(buf, 10, &part) != 0 || part != (u32)part)
+		return -EINVAL;
+	if (part > EXT_CSD_GPP_NUM || part <= 0)
+		return -EINVAL;
+	if (!card->ext_csd.gpp_sz[part - 1])
+		return -EINVAL;
+	device_lock(dev);
+	/* make GPP number recognized by eMMC device */
+	gpp_wppart = part + EXT_CSD_PART_CONFIG_ACC_GP0 - 1;
+	device_unlock(dev);
+	return n;
+}
+static DEVICE_ATTR(gpp_wppart, 0644, gpp_wppart_show, gpp_wppart_set);
+
+static unsigned int gpp_wpgroup;
+static ssize_t gpp_wpgroup_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", gpp_wpgroup);
+}
+
+static ssize_t gpp_wpgroup_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t n)
+{
+	long group;
+	struct mmc_card *card = mmc_dev_to_card(dev);
+
+	if (card == NULL)
+		return -ENODEV;
+
+	if (kstrtol(buf, 10, &group) != 0 || group != (u32)group)
+		return -EINVAL;
+
+	if (group < 0 || gpp_wppart < EXT_CSD_PART_CONFIG_ACC_GP0 ||
+			gpp_wppart >
+			EXT_CSD_PART_CONFIG_ACC_GP0 + EXT_CSD_GPP_NUM - 1)
+		return -EINVAL;
+
+	if (group > card->ext_csd.gpp_sz[gpp_wppart -
+			EXT_CSD_PART_CONFIG_ACC_GP0] - 1)
+		return -EINVAL;
+
+	device_lock(dev);
+	gpp_wpgroup = group;
+	device_unlock(dev);
+	return n;
+}
+static DEVICE_ATTR(gpp_wpgroup, 0644, gpp_wpgroup_show, gpp_wpgroup_set);
+
+static ssize_t gpp_wp_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_card *card = mmc_dev_to_card(dev);
+	int err;
+	u8 wp_status = 0;
+
+	if (card == NULL)
+		return -ENODEV;
+
+	device_lock(dev);
+	if (gpp_wppart < EXT_CSD_PART_CONFIG_ACC_GP0) {
+		device_unlock(dev);
+		return -EINVAL;
+	}
+
+	err = mmc_wp_status(card, gpp_wppart, gpp_wpgroup, &wp_status);
+	if (err) {
+		device_unlock(dev);
+		return err;
+	}
+
+	device_unlock(dev);
+
+	return sprintf(buf, "%d\n", wp_status);
+}
+
+#define PERMANENT_PROTECT      1
+#define GPP_WPG0               0
+/*
+ * protect: 1 means permanent write protect. Right now only allow this
+ * protection method
+ */
+static ssize_t gpp_wp_set(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t n)
+{
+	long protect;
+	struct mmc_card *card = mmc_dev_to_card(dev);
+	int err;
+
+	if (card == NULL)
+		return -ENODEV;
+
+	if (kstrtol(buf, 10, &protect) != 0 || protect != (u32)protect)
+		return -EINVAL;
+
+	if (protect != PERMANENT_PROTECT)
+		return -EINVAL;
+
+	device_lock(dev);
+
+	if (gpp_wppart != EXT_CSD_PART_CONFIG_ACC_GP0 ||
+			gpp_wpgroup != GPP_WPG0) {
+		device_unlock(dev);
+		return -EINVAL;
+	}
+
+	err = mmc_set_user_wp(card, gpp_wppart, gpp_wpgroup);
+	if (err) {
+		pr_err("%s: err to set write protect\n", __func__);
+		n = err;
+	}
+	device_unlock(dev);
+	return n;
+}
+static DEVICE_ATTR(gpp_wp, 0644, gpp_wp_show, gpp_wp_set);
 
 static struct attribute *mmc_std_attrs[] = {
 	&dev_attr_cid.attr,
@@ -653,6 +822,16 @@
 	&dev_attr_enhanced_area_size.attr,
 	&dev_attr_raw_rpmb_size_mult.attr,
 	&dev_attr_rel_sectors.attr,
+	&dev_attr_hpi_support.attr,
+	&dev_attr_hpi_enable.attr,
+	&dev_attr_hpi_command.attr,
+	&dev_attr_hw_reset_support.attr,
+	&dev_attr_bkops_support.attr,
+	&dev_attr_bkops_enable.attr,
+	&dev_attr_rpmb_size.attr,
+	&dev_attr_gpp_wppart.attr,
+	&dev_attr_gpp_wpgroup.attr,
+	&dev_attr_gpp_wp.attr,
 	NULL,
 };
 
@@ -707,8 +886,12 @@
 			index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
 				EXT_CSD_PWR_CL_52_195 :
 				EXT_CSD_PWR_CL_DDR_52_195;
-		else if (host->ios.clock <= 200000000)
-			index = EXT_CSD_PWR_CL_200_195;
+		else if (host->ios.clock <= 200000000) {
+			if (mmc_card_hs400(card))
+				index = EXT_CSD_PWR_CL_200_DDR_195;
+			else
+				index = EXT_CSD_PWR_CL_200_195;
+		}
 		break;
 	case MMC_VDD_27_28:
 	case MMC_VDD_28_29:
@@ -755,6 +938,81 @@
 }
 
 /*
+ * Support HS400:
+ * This function should be called after HS200 tuning.
+ */
+static int mmc_select_hs400_start(struct mmc_card *card)
+{
+	int err = -EINVAL;
+	struct mmc_host *host;
+	static unsigned ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
+	static unsigned bus_width = MMC_BUS_WIDTH_8;
+
+	BUG_ON(!card);
+
+	host = card->host;
+	/* HS400 mode only supports 8bit bus.*/
+	if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+		pr_err("HS400: MMC host does not support 8bit bus, error!\n");
+		goto err;
+	}
+
+	/* Must set HS_TIMING to 1 after tuning completion. */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_HS_TIMING, 1, 0);
+	if (!err) {
+		/* Set timing to DDR50 first */
+		mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
+		/* Then, set clock to 50MHz */
+		mmc_set_clock(host, MMC_HIGH_DDR_MAX_DTR);
+	} else {
+		goto err;
+	}
+
+	/*
+	 * Host is capable of 8bit transfer, switch
+	 * the device to work in 8bit transfer mode.
+	 * On success set 8bit bus width on the host.
+	 */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_BUS_WIDTH,
+			ext_csd_bit,
+			card->ext_csd.generic_cmd6_time);
+	if (err)
+		goto err;
+
+	/* Bus test */
+	mmc_set_bus_width(card->host, bus_width);
+	if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+		err = mmc_compare_ext_csds(card, bus_width);
+	else
+		err = mmc_bus_test(card, bus_width);
+	if (err)
+		goto err;
+
+err:
+	return err;
+}
+
+static int mmc_select_hs400_end(struct mmc_card *card, unsigned int max_dtr)
+{
+	int err = -EINVAL;
+
+	/* Switch timing to HS400 now. */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_HS_TIMING, 3, 0);
+	if (!err) {
+		mmc_set_timing(card->host, MMC_TIMING_MMC_HS400);
+		/*
+		 * After enablig HS400 mode, we should restore
+		 * frequency to 200MHz.
+		 */
+		mmc_set_clock(card->host, max_dtr);
+	}
+	return err;
+}
+
+/*
  * Selects the desired buswidth and switch to the HS200 mode
  * if bus width set without error
  */
@@ -775,12 +1033,16 @@
 
 	host = card->host;
 
-	if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
-			host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+	if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+			host->caps2 & MMC_CAP2_HS200_1_2V_SDR) ||
+	    (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_HS400_1_2V &&
+			host->caps2 & MMC_CAP2_HS400_1_2V_DDR))
 		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
 
-	if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
-			host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+	if (err && ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+			host->caps2 & MMC_CAP2_HS200_1_8V_SDR) ||
+	    (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_HS400_1_8V &&
+			host->caps2 & MMC_CAP2_HS400_1_8V_DDR)))
 		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
 
 	/* If fails try again during next card power cycle */
@@ -826,6 +1088,12 @@
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_HS_TIMING, 2, 0);
 err:
+	if (err) {
+		host->caps2 &= ~MMC_CAP2_HS200;
+		pr_warn("%s: failed to init eMMC in HS200 retry other mode\n",
+				mmc_hostname(card->host));
+	}
+
 	return err;
 }
 
@@ -971,25 +1239,20 @@
 	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
 	 * bit.  This bit will be lost every time after a reset or power off.
 	 */
-	if (card->ext_csd.enhanced_area_en ||
+	if (card->ext_csd.enhanced_area_en || card->ext_csd.part_set_complete ||
 	    (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_ERASE_GROUP_DEF, 1,
 				 card->ext_csd.generic_cmd6_time);
 
-		if (err && err != -EBADMSG)
+		/*
+		 * GPP partition write protection is set when
+		 * ERASE_GROUP_DEF is 1, if driver failed to set
+		 * this bit to 1, report error
+		 */
+		if (err)
 			goto free_card;
-
-		if (err) {
-			err = 0;
-			/*
-			 * Just disable enhanced area off & sz
-			 * will try to enable ERASE_GROUP_DEF
-			 * during next time reinit
-			 */
-			card->ext_csd.enhanced_area_offset = -EINVAL;
-			card->ext_csd.enhanced_area_size = -EINVAL;
-		} else {
+		else {
 			card->ext_csd.erase_group_def = 1;
 			/*
 			 * enable ERASE_GRP_DEF successfully.
@@ -1036,8 +1299,10 @@
 	 */
 	if (card->ext_csd.hs_max_dtr != 0) {
 		err = 0;
+		/* Support HS400: set to HS200 before tuning complete. */
 		if (card->ext_csd.hs_max_dtr > 52000000 &&
-		    host->caps2 & MMC_CAP2_HS200)
+		    (host->caps2 & MMC_CAP2_HS200 ||
+		    host->caps2 & MMC_CAP2_HS400))
 			err = mmc_select_hs200(card);
 		else if	(host->caps & MMC_CAP_MMC_HIGHSPEED)
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1053,11 +1318,20 @@
 			err = 0;
 		} else {
 			if (card->ext_csd.hs_max_dtr > 52000000 &&
+			    host->caps2 & MMC_CAP2_HS400 &&
+			    (card->ext_csd.card_type &
+				EXT_CSD_CARD_TYPE_HS400_1_8V ||
+			    card->ext_csd.card_type &
+				EXT_CSD_CARD_TYPE_HS400_1_2V)) {
+				mmc_card_set_hs400(card);
+				mmc_set_timing(card->host,
+						MMC_TIMING_MMC_HS200);
+			} else if (card->ext_csd.hs_max_dtr > 52000000 &&
 			    host->caps2 & MMC_CAP2_HS200) {
 				mmc_card_set_hs200(card);
 				mmc_set_timing(card->host,
 					       MMC_TIMING_MMC_HS200);
-			} else {
+			} else if (host->caps & MMC_CAP_MMC_HIGHSPEED) {
 				mmc_card_set_highspeed(card);
 				mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
 			}
@@ -1069,7 +1343,9 @@
 	 */
 	max_dtr = (unsigned int)-1;
 
-	if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
+	if (mmc_card_highspeed(card) ||
+	    mmc_card_hs200(card) ||
+	    mmc_card_hs400(card)) {
 		if (max_dtr > card->ext_csd.hs_max_dtr)
 			max_dtr = card->ext_csd.hs_max_dtr;
 		if (mmc_card_highspeed(card) && (max_dtr > 52000000))
@@ -1097,9 +1373,9 @@
 	}
 
 	/*
-	 * Indicate HS200 SDR mode (if supported).
+	 * Indicate HS200 SDR mode or HS400 DDR mode (if supported).
 	 */
-	if (mmc_card_hs200(card)) {
+	if (mmc_card_hs200(card) || mmc_card_hs400(card)) {
 		u32 ext_csd_bits;
 		u32 bus_width = card->host->ios.bus_width;
 
@@ -1114,7 +1390,9 @@
 		 * 3. set the clock to > 52Mhz <=200MHz and
 		 * 4. execute tuning for HS200
 		 */
-		if ((host->caps2 & MMC_CAP2_HS200) &&
+		/* Support HS400: tuning under HS200 mode. */
+		if ((host->caps2 & MMC_CAP2_HS200 ||
+		    host->caps2 & MMC_CAP2_HS400) &&
 		    card->host->ops->execute_tuning) {
 			mmc_host_clk_hold(card->host);
 			err = card->host->ops->execute_tuning(card->host,
@@ -1127,19 +1405,45 @@
 			goto err;
 		}
 
-		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+		/* Support HS400 */
+		if (mmc_card_hs400(card)) {
+			/*
+			 * Per spec 5.0, follow below sequence to enable HS400:
+			 * 1. Set HS_TIMING to 1 after HS200 tuning.
+			 * 2. Set frequency below 52MHz.
+			 * 3. Set bus width to DDR 8bit.
+			 * 4. Set HS_TIMING to 3 as HS400.
+			 */
+			err = mmc_select_hs400_start(card);
+			if (err) {
+				pr_warn("%s: hs400_start err=0x%x.\n",
+					mmc_hostname(card->host), err);
+				goto free_card;
+			}
+			ext_csd_bits = EXT_CSD_DDR_BUS_WIDTH_8;
+		} else {
+			ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
 				EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+		}
 		err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
 		if (err)
-			pr_warning("%s: power class selection to bus width %d"
+			pr_warn("%s: power class selection to bus width %d"
 				   " failed\n", mmc_hostname(card->host),
 				   1 << bus_width);
+		if (mmc_card_hs400(card)) {
+			err = mmc_select_hs400_end(card, max_dtr);
+			if (err) {
+				pr_warn("%s: hs400_end err=0x%x.\n",
+					mmc_hostname(card->host), err);
+				goto free_card;
+			}
+		}
 	}
 
 	/*
 	 * Activate wide bus and DDR (if supported).
 	 */
-	if (!mmc_card_hs200(card) &&
+	if ((!mmc_card_hs200(card) && !mmc_card_hs400(card)) &&
 	    (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
 	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
 		static unsigned ext_csd_bits[][2] = {
@@ -1231,6 +1535,15 @@
 					MMC_SIGNAL_VOLTAGE_120);
 				if (err)
 					goto err;
+			} else {
+				/*
+				 * for SDHC host controller, 1.8v signaling is
+				 * required for DDR mode
+				 */
+				err = __mmc_set_signal_voltage(host,
+					MMC_SIGNAL_VOLTAGE_180);
+				if (err)
+					goto err;
 			}
 			mmc_card_set_ddr_mode(card);
 			mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 49f04bc..91200fc 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -141,7 +141,7 @@
 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 
-	for (i = 100; i; i--) {
+	for (i = 200; i; i--) {
 		err = mmc_wait_for_cmd(host, &cmd, 0);
 		if (err)
 			break;
@@ -161,7 +161,7 @@
 
 		err = -ETIMEDOUT;
 
-		mmc_delay(10);
+		usleep_range(5000, 5500);
 	}
 
 	if (rocr && !mmc_host_is_spi(host))
@@ -444,8 +444,16 @@
 	timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS);
 	do {
 		err = mmc_send_status(card, &status);
-		if (err)
-			return err;
+		if (err) {
+			if (err == -EILSEQ && index == EXT_CSD_HS_TIMING) {
+				pr_warn("%s: CMD13 error after switching timing\n"
+					"%s: this error can be ignored...\n",
+					mmc_hostname(card->host),
+					mmc_hostname(card->host));
+				return 0;
+			} else
+				return err;
+		}
 		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 			break;
 		if (mmc_host_is_spi(card->host))
@@ -622,6 +630,8 @@
 		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 	else if (opcode == MMC_SEND_STATUS)
 		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+	else
+		return -EINVAL;
 
 	cmd.opcode = opcode;
 	cmd.arg = card->rca << 16 | 1;
@@ -638,3 +648,711 @@
 
 	return 0;
 }
+
+static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks,
+		__u16 type, u8 req_type)
+{
+	struct mmc_request mrq = {NULL};
+	struct mmc_command cmd = {0};
+	struct mmc_command sbc = {0};
+	struct mmc_data data = {0};
+	struct scatterlist sg;
+	u8 *transfer_buf = NULL;
+
+	mrq.sbc = &sbc;
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = NULL;
+	transfer_buf = kzalloc(512 * blks, GFP_KERNEL);
+	if (!transfer_buf)
+		return -ENOMEM;
+
+	/*
+	 * set CMD23
+	 */
+	sbc.opcode = MMC_SET_BLOCK_COUNT;
+	sbc.arg = blks;
+	if ((req_type == RPMB_REQ) && type == RPMB_WRITE_DATA)
+		sbc.arg |= 1 << 31;
+	sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+	/*
+	 * set CMD25/18
+	 */
+	sg_init_one(&sg, transfer_buf, 512 * blks);
+	if (req_type == RPMB_REQ) {
+		cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+		sg_copy_from_buffer(&sg, 1, buf, 512 * blks);
+		data.flags |= MMC_DATA_WRITE;
+	} else {
+		cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+		data.flags |= MMC_DATA_READ;
+	}
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+	data.blksz = 512;
+	data.blocks = blks;
+	data.sg = &sg;
+	data.sg_len = 1;
+
+	mmc_set_data_timeout(&data, card);
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	if (req_type != RPMB_REQ)
+		sg_copy_to_buffer(&sg, 1, buf, 512 * blks);
+
+	kfree(transfer_buf);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+	return 0;
+}
+
+void mmc_rpmb_post_frame(struct mmc_core_rpmb_req *rpmb_req)
+{
+	int i;
+	struct mmc_ioc_rpmb_req *p_req;
+	__u8 *buf_frame;
+
+	if (!rpmb_req || !rpmb_req->ready)
+		return;
+
+	p_req = rpmb_req->req;
+	buf_frame = rpmb_req->frame;
+
+	if (!p_req || !buf_frame)
+		return;
+	/*
+	 * Regarding to the check rules, here is the post
+	 * rules
+	 * All will return result.
+	 * GET_WRITE_COUNTER:
+	 *              must: write counter, nonce
+	 *              optional: MAC
+	 * WRITE_DATA:
+	 *              must: MAC, write counter
+	 * READ_DATA:
+	 *              must: nonce, data
+	 *              optional: MAC
+	 * PROGRAM_KEY:
+	 *              must: Nothing
+	 *
+	 * Except READ_DATA, all of these operations only need to parse
+	 * one frame. READ_DATA needs blks frames to get DATA
+	 */
+
+	memcpy(p_req->result, buf_frame + RPMB_RES_BEG, 2);
+	*p_req->result = be16_to_cpup(p_req->result);
+
+	if (p_req->type == RPMB_PROGRAM_KEY)
+		goto out;
+
+	if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+			p_req->type == RPMB_WRITE_DATA) {
+		memcpy(p_req->wc, buf_frame + RPMB_WCOUNTER_BEG, 4);
+		*p_req->wc = be32_to_cpup(p_req->wc);
+	}
+
+	if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+			p_req->type == RPMB_READ_DATA) {
+		/* nonce copy */
+		memcpy(p_req->nonce, buf_frame + RPMB_NONCE_BEG, 16);
+	}
+	/*
+	 * Take MAC within the last package
+	 */
+	if (p_req->type == RPMB_READ_DATA) {
+		__u8 *data = p_req->data;
+		for (i = 0; i < p_req->blk_cnt; i++) {
+			memcpy(data, buf_frame + i * 512 + RPMB_DATA_BEG, 256);
+			data += 256;
+		}
+		/*
+		 * MAC stored in the last package
+		 */
+		if (p_req->mac) {
+			i--;
+			memcpy(p_req->mac, buf_frame + i * 512 + RPMB_MAC_BEG,
+					32);
+		}
+	} else if (p_req->mac)
+		memcpy(p_req->mac, buf_frame + RPMB_MAC_BEG, 32);
+out:
+	kfree(buf_frame);
+	rpmb_req->frame = NULL;
+	return;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_post_frame);
+
+static int mmc_rpmb_request_check(struct mmc_card *card,
+		struct mmc_ioc_rpmb_req *p_req)
+{
+	/*
+	 * Some paramter is a must for the operation. Different
+	 * operation expect different paramters. Below code is
+	 * used for checking this.
+	 *
+	 * All operations will need result.
+	 * GET_WRITE_COUNTER:
+	 *              must: write counter, nonce
+	 *              optional: MAC
+	 * WRITE_DATA:
+	 *              must: MAC, data, write counter
+	 * READ_DATA:
+	 *              must: nonce, data
+	 *              optional: MAC
+	 * PROGRAM_KEY:
+	 *              must: MAC
+	 *
+	 * So here, we only check the 'must' paramters
+	 */
+	if (!p_req->result) {
+		pr_err("%s: Type %d has NULL pointer for result\n",
+				mmc_hostname(card->host), p_req->type);
+		return -EINVAL;
+	}
+
+	if (p_req->type == RPMB_GET_WRITE_COUNTER) {
+		if (!p_req->nonce || !p_req->wc) {
+			pr_err("%s: Type %d has NULL pointer for nonce/wc\n",
+					mmc_hostname(card->host), p_req->type);
+			return -EINVAL;
+		}
+		/*
+		 * used to allocate frame
+		 */
+		p_req->blk_cnt = 1;
+	} else if (p_req->type == RPMB_WRITE_DATA ||
+			p_req->type == RPMB_READ_DATA) {
+		if ((__u32)(p_req->addr + p_req->blk_cnt) >
+				card->ext_csd.rpmb_size) {
+			pr_err("%s Type %d: beyond the RPMB partition rang addr %d, blk_cnt %d, rpmb_size %d\n",
+					mmc_hostname(card->host),
+					p_req->type,
+					p_req->addr,
+					p_req->blk_cnt,
+					card->ext_csd.rpmb_size);
+			/*
+			 * Not return error here since we want device to handle
+			 * such errors
+			 */
+		}
+		if (p_req->blk_cnt == 0) {
+			pr_err("%s: Type %d has zero block count\n",
+					mmc_hostname(card->host),
+					p_req->blk_cnt);
+			return -EINVAL;
+		} else if (p_req->blk_cnt > card->rpmb_max_req) {
+			pr_err("%s: Type %d has invalid block count, cannot large than %d\n",
+					mmc_hostname(card->host),
+					p_req->blk_cnt,
+					card->rpmb_max_req);
+			return -EINVAL;
+		}
+		if (!p_req->data) {
+			pr_err("%s: Type %d has NULL pointer for data\n",
+					mmc_hostname(card->host), p_req->type);
+			return -EINVAL;
+		}
+		if (p_req->type == RPMB_WRITE_DATA) {
+			if (!p_req->wc || !p_req->mac) {
+				pr_err("%s: Type %d has NULL pointer for write counter/MAC\n",
+						mmc_hostname(card->host),
+						p_req->type);
+				return -EINVAL;
+			}
+		} else {
+			if (!p_req->nonce) {
+				pr_err("%s: Type %d has NULL pointer for nonce\n",
+						mmc_hostname(card->host),
+						p_req->type);
+				return -EINVAL;
+			}
+		}
+	} else
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+/*
+ * prepare the request of RPMB frame
+ * RPMB frame is MSB first
+ * convert needed bytes
+ * return how many frames will be prepared
+ */
+int mmc_rpmb_pre_frame(struct mmc_core_rpmb_req *rpmb_req,
+		struct mmc_card *card)
+{
+	int i, ret;
+	struct mmc_ioc_rpmb_req *p_req;
+	__u8 *buf_frame;
+	__u16 blk_cnt, addr, type;
+	__u32 w_counter;
+
+	if (!rpmb_req || !card)
+		return -EINVAL;
+
+	p_req = rpmb_req->req;
+	if (!p_req) {
+		pr_err("%s: mmc_ioc_rpmb_req is NULL. Wrong parameter\n",
+				mmc_hostname(card->host));
+		return -EINVAL;
+	}
+
+	/*
+	 * make sure these two items are clear
+	 */
+	rpmb_req->ready = 0;
+	rpmb_req->frame = NULL;
+
+	ret = mmc_rpmb_request_check(card, p_req);
+	if (ret)
+		return ret;
+
+	buf_frame = kzalloc(512 * p_req->blk_cnt, GFP_KERNEL);
+	if (!buf_frame) {
+		pr_err("%s: cannot allocate frame for type %d\n",
+				mmc_hostname(card->host), p_req->type);
+		return -ENOMEM;
+	}
+
+	type = cpu_to_be16p(&p_req->type);
+	if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+			p_req->type == RPMB_READ_DATA) {
+		/*
+		 * One package prepared
+		 * This request needs Nonce and type
+		 * If is data read, then also need addr
+		 */
+		memcpy(buf_frame + RPMB_TYPE_BEG, &type, 2);
+		if (p_req->type == RPMB_READ_DATA) {
+			addr = cpu_to_be16p(&p_req->addr);
+			memcpy(buf_frame + RPMB_ADDR_BEG, &addr, 2);
+		}
+		/* convert Nonce code */
+		memcpy(buf_frame + RPMB_NONCE_BEG, p_req->nonce, 16);
+	} else if (p_req->type == RPMB_WRITE_DATA) {
+		__u8 *data = p_req->data;
+		/*
+		 * multiple package prepared
+		 * This request nees blk_cnt, addr, write_counter,
+		 * data and mac
+		 */
+		blk_cnt = cpu_to_be16p(&p_req->blk_cnt);
+		addr = cpu_to_be16p(&p_req->addr);
+		w_counter = cpu_to_be32p(p_req->wc);
+		for (i = 0; i < p_req->blk_cnt; i++) {
+			memcpy(buf_frame + i * 512 + RPMB_TYPE_BEG,
+					&type, 2);
+			memcpy(buf_frame + i * 512 + RPMB_BLKS_BEG,
+					&blk_cnt, 2);
+			memcpy(buf_frame + i * 512 + RPMB_ADDR_BEG,
+					&addr, 2);
+			memcpy(buf_frame + i * 512 + RPMB_WCOUNTER_BEG,
+					&w_counter, 4);
+			memcpy(buf_frame + i * 512 + RPMB_DATA_BEG,
+					data, 256);
+			data += 256;
+		}
+		/* convert MAC code */
+		memcpy(buf_frame + 512 * (i - 1) + RPMB_MAC_BEG,
+				p_req->mac, 32);
+	} else {
+		pr_err("%s: We shouldn't be here\n", mmc_hostname(card->host));
+		kfree(buf_frame);
+		return -EINVAL;
+	}
+	rpmb_req->ready = 1;
+	rpmb_req->frame = buf_frame;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_pre_frame);
+
+int mmc_rpmb_partition_ops(struct mmc_core_rpmb_req *rpmb_req,
+		struct mmc_card *card)
+{
+	int err = 0;
+	struct mmc_ioc_rpmb_req *p_req;
+	__u16 type, blks;
+	__u8 *buf_frame;
+
+	if (!rpmb_req || !card)
+		return -EINVAL;
+
+	p_req = rpmb_req->req;
+	buf_frame = rpmb_req->frame;
+
+	if (!p_req || !rpmb_req->ready || !buf_frame) {
+		pr_err("%s: mmc_ioc_rpmb_req is not prepared\n",
+				mmc_hostname(card->host));
+		return -EINVAL;
+	}
+
+	type = p_req->type;
+	blks = p_req->blk_cnt;
+
+	/*
+	 * STEP 1: send request to RPMB partition
+	 */
+	if (type == RPMB_WRITE_DATA)
+		err = mmc_rpmb_send_command(card, buf_frame, blks,
+				type, RPMB_REQ);
+	else
+		err = mmc_rpmb_send_command(card, buf_frame, 1, type, RPMB_REQ);
+
+	if (err) {
+		pr_err("%s: request write counter failed (%d)\n",
+				mmc_hostname(card->host), err);
+		goto out;
+	}
+
+	memset(buf_frame, 0, 512 * blks);
+	/*
+	 * STEP 2: check write result
+	 * Only for WRITE_DATA or Program key
+	 */
+	if (type == RPMB_WRITE_DATA) {
+		buf_frame[RPMB_TYPE_BEG + 1] = RPMB_RESULT_READ;
+		err = mmc_rpmb_send_command(card, buf_frame, 1,
+				RPMB_RESULT_READ, RPMB_REQ);
+		if (err) {
+			pr_err("%s: request write counter failed (%d)\n",
+					mmc_hostname(card->host), err);
+			goto out;
+		}
+	}
+
+	/*
+	 * STEP 3: get response from RPMB partition
+	 */
+
+	if (type == RPMB_READ_DATA)
+		err = mmc_rpmb_send_command(card, buf_frame,
+				blks, type, RPMB_RESP);
+	else
+		err = mmc_rpmb_send_command(card, buf_frame,
+				1, type, RPMB_RESP);
+	if (err) {
+		pr_err("%s: response write counter failed (%d)\n",
+				mmc_hostname(card->host), err);
+	}
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_partition_ops);
+
+static int mmc_switch_part(struct mmc_card *card, u8 part)
+{
+	int ret;
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_PART_CONFIG, part,
+			card->ext_csd.part_time);
+	if (ret)
+		pr_err("%s: switch failed with %d, part %d\n",
+				__func__, ret, part);
+
+	return ret;
+}
+/*
+ * @part: GPP partition part number
+ * @addr: GPP write group
+ */
+int mmc_wp_status(struct mmc_card *card, unsigned int part,
+		unsigned int addr, u8 *wp_status)
+{
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+	struct mmc_request mrq = {0};
+	struct scatterlist sg;
+	u32 status = 0;
+	int err = 0;
+	u8 *rbuf = NULL;
+
+	if (!card)
+		return -ENODEV;
+
+	if (!card->ext_csd.gpp_sz[part - EXT_CSD_PART_CONFIG_ACC_GP0]) {
+		pr_err("%s: doesn't have GPP%d\n", __func__,
+				part - 3);
+		return -ENODEV;
+	}
+
+	rbuf = kzalloc(8, GFP_KERNEL);
+	if (rbuf == NULL) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	cmd.opcode = MMC_SEND_WRITE_PROT_TYPE;
+	cmd.arg = addr * card->ext_csd.wpg_sz;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	data.sg = &sg;
+	data.sg_len = 1;
+	data.blksz = 8;
+	data.blocks = 1;
+	data.flags = MMC_DATA_READ;
+	sg_init_one(data.sg, rbuf, 8);
+	mrq.data = &data;
+	mrq.cmd = &cmd;
+
+	mmc_claim_host(card->host);
+
+	mmc_set_data_timeout(&data, card);
+
+	err = mmc_switch_part(card, part);
+	if (err) {
+		mmc_release_host(card->host);
+		dev_err(mmc_dev(card->host), "%s: swith error %d\n",
+				__func__, err);
+		goto out;
+	}
+
+	mmc_wait_for_req(card->host, &mrq);
+	if (cmd.error) {
+		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+				__func__, cmd.error);
+	}
+	if (data.error) {
+		dev_err(mmc_dev(card->host), "%s: data error %d\n",
+				__func__, data.error);
+	}
+
+	/* Must check status to be sure of no errors */
+	do {
+		err = mmc_send_status(card, &status);
+		if (err) {
+			pr_err("%s: get card status err %d, status 0x%x\n",
+					__func__, err, status);
+			goto out;
+		}
+		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+			break;
+		if (mmc_host_is_spi(card->host))
+			break;
+	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+
+	if (mmc_host_is_spi(card->host)) {
+		if (status & R1_SPI_ILLEGAL_COMMAND) {
+			pr_err("%s: error card status 0x%x\n",
+					__func__, status);
+			goto out;
+		}
+	} else {
+		if (status & 0xFDFFA000)
+			pr_warn("%s: unexpected status %#x after switch",
+					__func__, status);
+		if (status & R1_SWITCH_ERROR) {
+			pr_err("%s: card switch error, status 0x%x\n",
+					__func__, status);
+		}
+		if (status & R1_OUT_OF_RANGE) {
+			pr_err("%s: addr out of range, status 0x%x\n",
+					__func__, status);
+			goto out;
+		}
+	}
+
+	mmc_switch_part(card, EXT_CSD_PART_CONFIG_ACC_USER);
+
+	mmc_release_host(card->host);
+
+	sg_copy_from_buffer(data.sg, 1, rbuf, 8);
+
+	/*
+	 * the first write protect group type is in the last two
+	 * bits in the last byte read from the device.
+	 */
+	*wp_status = rbuf[7] & 0x3;
+
+	kfree(rbuf);
+
+	return 0;
+out:
+	kfree(rbuf);
+
+	return -EPERM;
+}
+EXPORT_SYMBOL_GPL(mmc_wp_status);
+
+/**
+ *     mmc_switch_bits - modify EXT_CSD register
+ *     @card: the MMC card associated with the data transfer
+ *     @set: cmd set values
+ *     @index: EXT_CSD register index
+ *     @value: value to program into EXT_CSD register
+ *     @timeout_ms: timeout (ms) for operation performed by register write,
+ *                   timeout of zero implies maximum possible timeout
+ *     @check_busy: Set the 'R1B' flag or not. Some operations, such as
+ *                   Sanitize, may need long time to finish. And some
+ *                   host controller, such as the SDHCI host controller,
+ *                   only allows limited max timeout value. So, introduce
+ *                   this to skip the busy check for those operations.
+ *     @set: true when want to set value; false when want to clear value
+ *
+ *     Modifies the EXT_CSD register for selected card.
+ */
+static int mmc_switch_bits(struct mmc_card *card, u8 cmdset, u8 index, u8 value,
+		unsigned int timeout_ms, int check_busy, bool set)
+{
+	int err;
+	struct mmc_command cmd = {0};
+	u32 status;
+	u8 access = set ? MMC_SWITCH_MODE_SET_BITS :
+		MMC_SWITCH_MODE_CLEAR_BITS;
+
+	BUG_ON(!card);
+	BUG_ON(!card->host);
+
+	cmd.opcode = MMC_SWITCH;
+	cmd.arg = (access << 24) |
+		(index << 16) |
+		(value << 8) |
+		cmdset;
+	if (check_busy)
+		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	else
+		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	cmd.cmd_timeout_ms = timeout_ms;
+
+	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	/* Must check status to be sure of no errors */
+	do {
+		err = mmc_send_status(card, &status);
+		if (err)
+			return err;
+		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+			break;
+		if (mmc_host_is_spi(card->host))
+			break;
+	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+
+	if (mmc_host_is_spi(card->host)) {
+		if (status & R1_SPI_ILLEGAL_COMMAND)
+			return -EBADMSG;
+	} else {
+		if (status & 0xFDFFA000)
+			pr_warn("%s: unexpected status %#x\n",
+					mmc_hostname(card->host), status);
+		if (status & R1_SWITCH_ERROR)
+			return -EBADMSG;
+	}
+
+	return 0;
+}
+/*
+ * This needs to be called with host claimed
+ * @part: GPP partition part ID, should be 1/2/3/4.
+ * @addr: GPP write group unit
+ */
+int mmc_set_user_wp(struct mmc_card *card, unsigned int part,
+		unsigned int wpg)
+{
+	struct mmc_command cmd = {0};
+	int err = 0;
+	u32 status = 0;
+
+	if (!card)
+		return -ENODEV;
+
+	mmc_claim_host(card->host);
+
+	/*
+	 * enable WP to partitions
+	 * set bit2 of ext_csd[171], permanent write protect
+	 */
+	err = mmc_switch_bits(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_USER_WP,
+			EXT_CSD_PERMANENT_WP, card->ext_csd.generic_cmd6_time,
+			true, true);
+	if (err) {
+		pr_err("%s: enable permanent write protect err %d!\n",
+				__func__, err);
+		mmc_release_host(card->host);
+		return err;
+	}
+
+	err = mmc_switch_part(card, part);
+	if (err)
+		goto switchback;
+
+	cmd.opcode = MMC_SET_WRITE_PROT;
+	cmd.arg = wpg * card->ext_csd.wpg_sz;
+	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+	if (err) {
+		pr_err("%s: failed to set addr 0x%x write protected, err %d\n",
+				__func__, cmd.arg, err);
+		goto out;
+	}
+
+	/* Must check status to be sure of no errors */
+	do {
+		err = mmc_send_status(card, &status);
+		if (err) {
+			pr_err("%s: card status get err %d, status 0x%x\n",
+					__func__, err, status);
+			goto out;
+		}
+		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+			break;
+		if (mmc_host_is_spi(card->host))
+			break;
+	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+
+	if (mmc_host_is_spi(card->host)) {
+		if (status & R1_SPI_ILLEGAL_COMMAND) {
+			pr_err("%s: error card status 0x%x\n",
+					__func__, status);
+			err = -EILSEQ;
+			goto out;
+		}
+	} else {
+		if (status & 0xFDFFA000)
+			pr_warn("%s: unexpected status %#x after switch",
+					__func__, status);
+		if (status & R1_SWITCH_ERROR) {
+			pr_err("%s: card switch error, status 0x%x\n",
+					__func__, status);
+			err = -EIO;
+			goto out;
+		}
+		if (status & R1_OUT_OF_RANGE) {
+			pr_err("%s: addr out of range, status 0x%x\n",
+					__func__, status);
+			err = -EINVAL;
+		}
+	}
+
+out:
+	err = mmc_switch_part(card, EXT_CSD_PART_CONFIG_ACC_USER);
+	if (err) {
+		pr_warn("%s: switch to USER partition failed!\n", __func__);
+		WARN_ON(err);
+	}
+
+switchback:
+	/*
+	 * clear bit2 of ext_csd[171], permanent write protect
+	 */
+	err = mmc_switch_bits(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_USER_WP,
+			EXT_CSD_PERMANENT_WP, card->ext_csd.generic_cmd6_time,
+			true, false);
+	if (err) {
+		pr_err("%s: clear write protect err %d!\n",
+				__func__, err);
+	}
+
+	mmc_release_host(card->host);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mmc_set_user_wp);
diff --git a/drivers/mmc/core/mmc_panic_ops.c b/drivers/mmc/core/mmc_panic_ops.c
new file mode 100644
index 0000000..4b52691
--- /dev/null
+++ b/drivers/mmc/core/mmc_panic_ops.c
@@ -0,0 +1,885 @@
+/*
+ * linux/drivers/mmc/core/mmc_panic_ops.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * Author: chuanxiao.dong@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "bus.h"
+#include "host.h"
+
+#include "mmc_ops.h"
+
+
+static struct mmc_panic_host *panic_host;
+
+static int mmc_emergency_prepare(void)
+{
+	struct mmc_host *mmc = panic_host->mmc;
+
+	if (mmc == NULL) {
+		pr_err("%s: panic host was not setup\n", __func__);
+		return -ENODEV;
+	}
+
+	/*
+	 * once panic happened, we monopolize the host controller.
+	 * so claim host without relase any more.
+	 */
+	mmc->claimed = 1;
+	mmc->claimer = current;
+	mmc->claim_cnt += 1;
+#ifdef CONFIG_MMC_CLKGATE
+	/*
+	 * disable the clock gating
+	 */
+	mmc->clk_gated = false;
+	mmc->clk_requests++;
+	mmc->ios.clock = mmc->clk_old;
+#endif
+	return 0;
+}
+
+static void mmc_emergency_ready(void)
+{
+	panic_host->panic_ready = 1;
+}
+
+/*
+ * Return the card size in sectors.
+ *
+ * return value:
+ * the sector number
+ */
+static unsigned int mmc_get_capacity(struct mmc_card *card)
+{
+	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+		return card->ext_csd.sectors;
+	else
+		return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
+static void mmc_emergency_send_req(struct mmc_request *mrq)
+{
+	struct mmc_panic_host *host = panic_host;
+
+	mrq->cmd->error = 0;
+	mrq->cmd->mrq = mrq;
+
+	if (mrq->data) {
+		BUG_ON(mrq->data->blksz > host->max_blk_size);
+		BUG_ON(mrq->data->blocks > host->max_blk_count);
+		BUG_ON(mrq->data->blocks * mrq->data->blksz >
+				host->max_req_size);
+
+		mrq->cmd->data = mrq->data;
+		mrq->data->error = 0;
+		mrq->data->mrq = mrq;
+		if (mrq->stop) {
+			mrq->data->stop = mrq->stop;
+			mrq->stop->error = 0;
+			mrq->stop->mrq = mrq;
+		}
+	}
+
+	/*
+	 * Send the request to host
+	 *
+	 * if request handling is successful, return.
+	 * If request handling is failed and has rety, resend request.
+	 * During retry, if request handling is still failed, core layer
+	 * will keep on retry untill cmd->retries is 0.
+	 *
+	 * So in this way, makes retry blind to host driver. Totally
+	 * controlled by core driver
+	 */
+	host->panic_ops->request(host, mrq);
+
+	while ((mrq->cmd->error || (mrq->data && (mrq->data->error ||
+			(mrq->data->stop && mrq->data->stop->error)))) &&
+			mrq->cmd->retries > 0) {
+		/* clear errors */
+		mrq->cmd->error = 0;
+		if (mrq->data) {
+			mrq->data->error = 0;
+			if (mrq->stop)
+				mrq->stop->error = 0;
+		}
+		host->panic_ops->request(host, mrq);
+		mrq->cmd->retries--;
+	}
+}
+
+static int mmc_emergency_send_cmd(struct mmc_command *cmd, int retries)
+{
+	struct mmc_request mrq;
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+
+	memset(cmd->resp, 0, sizeof(cmd->resp));
+	cmd->retries = retries;
+
+	mrq.cmd = cmd;
+	cmd->data = NULL;
+
+	mmc_emergency_send_req(&mrq);
+
+	return cmd->error;
+}
+
+static int __mmc_emergency_write(unsigned int blk_id)
+{
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+	memset(&cmd, 0, sizeof(struct mmc_command));
+	memset(&data, 0, sizeof(struct mmc_data));
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	cmd.opcode = MMC_WRITE_BLOCK;
+	cmd.arg = blk_id;
+	if (!panic_host->blkaddr)
+		cmd.arg <<= 9;
+	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+	/*
+	 * Fix these values;
+	 */
+	data.blksz = 512;
+	data.blocks = 1;
+	data.dmabuf = panic_host->dmabuf;
+
+	mmc_emergency_send_req(&mrq);
+
+	return cmd.error;
+}
+
+
+static int mmc_emergency_go_idle(struct mmc_panic_host *host)
+{
+	int err;
+	struct mmc_command cmd;
+
+	/*
+	 * Non-SPI hosts need to prevent chipselect going active during
+	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
+	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
+	 *
+	 * SPI hosts ignore ios.chip_select; it's managed according to
+	 * rules that must accomodate non-MMC slaves which this layer
+	 * won't even know about.
+	 */
+	if (!mmc_host_is_spi(host)) {
+		host->ios.chip_select = MMC_CS_HIGH;
+		host->panic_ops->set_ios(host);
+		mdelay(1);
+	}
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_GO_IDLE_STATE;
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+	err = mmc_emergency_send_cmd(&cmd, 0);
+
+	mdelay(1);
+
+	if (!mmc_host_is_spi(host)) {
+		host->ios.chip_select = MMC_CS_DONTCARE;
+		host->panic_ops->set_ios(host);
+		mdelay(1);
+	}
+
+	return err;
+}
+static int mmc_emergency_send_op_cond(struct mmc_panic_host *host,
+		u32 ocr, u32 *rocr)
+{
+	struct mmc_command cmd;
+	int i, err = 0;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SEND_OP_COND;
+	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+	for (i = 100; i; i--) {
+		err = mmc_emergency_send_cmd(&cmd, 0);
+		if (err)
+			break;
+
+		/* if we're just probing, do a single pass */
+		if (ocr == 0)
+			break;
+
+		/* otherwise wait until reset completes */
+		if (mmc_host_is_spi(host)) {
+			if (!(cmd.resp[0] & R1_SPI_IDLE))
+				break;
+		} else {
+			if (cmd.resp[0] & MMC_CARD_BUSY)
+				break;
+		}
+
+		err = -ETIMEDOUT;
+
+		/*
+		 * If command 1 is failed, wait 10ms and then
+		 * have a retry. Card may need time to prepare
+		 * for the next command 1
+		 */
+		mdelay(10);
+	}
+
+	if (rocr && !mmc_host_is_spi(host))
+		*rocr = cmd.resp[0];
+
+	return err;
+}
+
+static int mmc_emergency_all_send_cid(u32 *cid)
+{
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_ALL_SEND_CID;
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	memcpy(cid, cmd.resp, sizeof(u32) * 4);
+
+	return 0;
+}
+
+static int mmc_emergency_set_relative_addr(struct mmc_card *card)
+{
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SET_RELATIVE_ADDR;
+	cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mmc_emergency_select_card(struct mmc_card *card)
+{
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SELECT_CARD;
+
+	if (card) {
+		cmd.arg = card->rca << 16;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+	} else {
+		cmd.arg = 0;
+		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+	}
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mmc_emergency_send_status(struct mmc_panic_host *host, u32 *status)
+{
+	struct mmc_card *card = host->card;
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SEND_STATUS;
+	if (!mmc_host_is_spi(host))
+		cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	/* NOTE: callers are required to understand the difference
+	 * between "native" and SPI format status words!
+	 */
+	if (status)
+		*status = cmd.resp[0];
+
+	return 0;
+}
+static int mmc_emergency_switch(struct mmc_panic_host *host,
+		u8 set, u8 index, u8 value, u8 check_busy)
+{
+	struct mmc_card *card = host->card;
+	int err;
+	struct mmc_command cmd;
+	u32 status;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SWITCH;
+	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+		(index << 16) |
+		(value << 8) |
+		set;
+	if (check_busy)
+		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	else
+		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	/* Must check status to be sure of no errors */
+	do {
+		err = mmc_emergency_send_status(host, &status);
+		if (err)
+			return err;
+		if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+			break;
+		if (mmc_host_is_spi(host))
+			break;
+	} while (R1_CURRENT_STATE(status) == 7);
+
+	if (mmc_host_is_spi(host)) {
+		if (status & R1_SPI_ILLEGAL_COMMAND)
+			return -EBADMSG;
+	} else {
+		if (status & 0xFDFFA000)
+			pr_warn("%s: unexpected status %#x after switch",
+				mmc_hostname(card->host), status);
+		if (status & R1_SWITCH_ERROR)
+			return -EBADMSG;
+	}
+
+	return 0;
+}
+
+static int mmc_emergency_cache_disable(struct mmc_panic_host *host)
+{
+	struct mmc_card *card = host->card;
+	int err = 0;
+
+	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
+		return err;
+
+	if (card && mmc_card_mmc(card) && card->ext_csd.cache_ctrl &&
+			(card->ext_csd.cache_size > 0)) {
+		/*
+		 * disable cache will cause flushing data to
+		 * non-volatile storage, so we may need to
+		 * check busy state here by polling card status
+		 */
+		err = mmc_emergency_switch(host,
+				EXT_CSD_CMD_SET_NORMAL,
+				EXT_CSD_CACHE_CTRL, 0,
+				0);
+
+		if (err)
+			pr_err("%s: disable cache error %d in panic mode\n",
+					mmc_hostname(card->host), err);
+		else
+			card->ext_csd.cache_ctrl = 0;
+	}
+
+	return err;
+}
+
+static int mmc_emergency_spi_set_crc(struct mmc_panic_host *host, int use)
+{
+	return -1;
+}
+
+static int mmc_emergency_send_cid(struct mmc_panic_host *host, u32 *cid)
+{
+	return -1;
+}
+/*
+ * reinit card:
+ * should also consider about the SPI host
+ */
+static int mmc_emergency_reinit_card(void)
+{
+	struct mmc_panic_host *host = panic_host;
+	struct mmc_card *card = host->card;
+	u32 ocr = host->ocr;
+	int err, ddr = 0;
+	u32 cid[4];
+	unsigned int max_dtr;
+
+	if (mmc_card_sd(card))
+		return 0;
+
+	/*
+	 * before re-init card, flush cache first
+	 * if there is.
+	 * flush may be failed. So just ignore the failure
+	 * here
+	 */
+	mmc_emergency_cache_disable(host);
+	/*
+	 * low the clock to be init clock
+	 */
+	if (mmc_host_is_spi(host)) {
+		host->ios.chip_select = MMC_CS_HIGH;
+		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+	} else {
+		host->ios.chip_select = MMC_CS_DONTCARE;
+		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+	}
+	host->ios.bus_width = MMC_BUS_WIDTH_1;
+	host->ios.timing = MMC_TIMING_LEGACY;
+	/*
+	 * AS eMMC spec said, card init frequency cannot higher
+	 * then 400Khz. But a good card should support for 400Khz
+	 * frequence in initialize process.
+	 */
+	host->ios.clock = 400000;
+	host->panic_ops->set_ios(host);
+
+	/*
+	 * Since we're changing the OCR value, we seem to
+	 * need to tell some cards to go back to the idle
+	 * state.  We wait 1ms to give cards time to
+	 * respond.
+	 */
+	mmc_emergency_go_idle(host);
+
+	/* The extra bit indicates that we support high capacity */
+	err = mmc_emergency_send_op_cond(host, ocr | (1 << 30), NULL);
+	if (err)
+		goto err;
+
+	/*
+	 * For SPI, enable CRC as appropriate.
+	 */
+	if (mmc_host_is_spi(host)) {
+		err = mmc_emergency_spi_set_crc(host, 1);
+		if (err)
+			goto err;
+	}
+
+	/*
+	 * Fetch CID from card.
+	 */
+	if (mmc_host_is_spi(host))
+		err = mmc_emergency_send_cid(host, cid);
+	else
+		err = mmc_emergency_all_send_cid(cid);
+	if (err)
+		goto err;
+
+	if (memcmp(cid, card->raw_cid, sizeof(cid)) != 0) {
+		err = -ENOENT;
+		goto err;
+	}
+
+	/*
+	 * For native busses:  set card RCA and quit open drain mode.
+	 */
+	if (!mmc_host_is_spi(host)) {
+		err = mmc_emergency_set_relative_addr(card);
+		if (err)
+			goto err;
+
+		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+		host->panic_ops->set_ios(host);
+	}
+	/*
+	 * Select card, as all following commands rely on that.
+	 */
+	if (!mmc_host_is_spi(host)) {
+		err = mmc_emergency_select_card(card);
+		if (err)
+			goto err;
+	}
+
+	/*
+	 * Activate high speed (if supported)
+	 */
+	if ((card->ext_csd.hs_max_dtr != 0) &&
+			(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
+		err = mmc_emergency_switch(host, EXT_CSD_CMD_SET_NORMAL,
+				EXT_CSD_HS_TIMING, 1, true);
+		if (err && err != -EBADMSG)
+			goto err;
+
+		if (err) {
+			pr_warn("%s: switch to highspeed failed\n",
+					__func__);
+			err = 0;
+		} else {
+			mmc_card_set_highspeed(card);
+			host->ios.timing = MMC_TIMING_MMC_HS;
+			host->panic_ops->set_ios(host);
+		}
+	}
+
+	/*
+	 * Compute bus speed.
+	 */
+	max_dtr = (unsigned int)-1;
+
+	if (mmc_card_highspeed(card)) {
+		if (max_dtr > card->ext_csd.hs_max_dtr)
+			max_dtr = card->ext_csd.hs_max_dtr;
+		if (max_dtr > MMC_HIGH_52_MAX_DTR)
+			max_dtr = MMC_HIGH_52_MAX_DTR;
+	} else if (max_dtr > card->csd.max_dtr) {
+		max_dtr = card->csd.max_dtr;
+	}
+
+	host->ios.clock = max_dtr;
+	host->panic_ops->set_ios(host);
+
+	/*
+	 * Activate wide bus.
+	 * By default use SDR mode for panic write
+	 */
+	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
+		(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
+		unsigned ext_csd_bit, bus_width;
+
+		if (host->caps & MMC_CAP_8_BIT_DATA) {
+			ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
+			bus_width = MMC_BUS_WIDTH_8;
+		} else {
+			ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
+			bus_width = MMC_BUS_WIDTH_4;
+		}
+
+		err = mmc_emergency_switch(host, EXT_CSD_CMD_SET_NORMAL,
+				EXT_CSD_BUS_WIDTH, ext_csd_bit, true);
+
+		if (err && err != -EBADMSG)
+			goto err;
+
+		if (err) {
+			pr_warn("%s: switch to bus %dbit failed\n",
+				__func__, 1 << bus_width);
+			err = 0;
+		} else {
+			ddr = MMC_SDR_MODE;
+			host->ios.bus_width = bus_width;
+			host->panic_ops->set_ios(host);
+		}
+	}
+
+	return 0;
+err:
+	return err;
+}
+
+/*
+ * mmc_emergency_write - write 512Bytes to card in panic mode
+ * @data: data pointer which should pointed to an area no more than
+ * 512Bytes
+ * @blk_id: the block id need to write this 512B data
+ *
+ * This function is supplied to ipanic driver to write 512B data
+ * in panic mode. Please also make sure the data size should not be
+ * larger than 512B, otherwise data lossing.
+ */
+int mmc_emergency_write(char *data, unsigned int blk_id)
+{
+	struct mmc_panic_host *host = panic_host;
+	int ret;
+	if (host == NULL) {
+		pr_err("%s: no device for panic record\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!host->panic_ready) {
+		pr_err("%s: device is not ready for panic record\n", __func__);
+		return -EPERM;
+	}
+
+	if (!data) {
+		pr_err("%s: invalided writing data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (blk_id > host->totalsecs || blk_id < 0) {
+		pr_err("%s: invalided writing blk_id\n", __func__);
+		return -EINVAL;
+	}
+	/*
+	 * everything is OK. So, let's start panic record.
+	 *
+	 * Copy the message data to logbuf
+	 */
+	memcpy(host->logbuf, data, SECTOR_SIZE);
+
+	/* hold Dekker mutex first */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex) {
+		ret = host->panic_ops->hold_mutex(host);
+		if (ret) {
+			pr_err("%s: hold Dekker mutex failed\n", __func__);
+			return ret;
+		}
+	}
+
+	ret = __mmc_emergency_write(blk_id);
+
+	/* release Dekker mutex */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex)
+		host->panic_ops->release_mutex(host);
+
+	return ret;
+}
+EXPORT_SYMBOL(mmc_emergency_write);
+
+/*
+ * mmc_emergency_init: init host controller and emmc card
+ * when kernel panic occures
+ *
+ * return value:
+ * 0 - init successfully
+ * negative value - Failed during init
+ * -ENODEV - emmc card was removed by driver
+ */
+int mmc_emergency_init(void)
+{
+	struct mmc_panic_host *host = panic_host;
+	int ret;
+	if (host == NULL || !host->mmc || !host->mmc->card) {
+		pr_err("%s: no device for panic record\n", __func__);
+		return -ENODEV;
+	}
+
+	ret = mmc_emergency_prepare();
+	if (ret) {
+		pr_err("%s: prepare host controller failed\n", __func__);
+		return ret;
+	}
+
+	if (!host->panic_ops) {
+		pr_err("%s: no panic_ops for panic host\n", __func__);
+		return -EPERM;
+	}
+
+	/*
+	 * prepare host controller
+	 */
+	if (host->panic_ops->prepare)
+		host->panic_ops->prepare(host);
+
+	/*
+	 * during init eMMC card, don't want to be interrupted by SCU FW
+	 */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex) {
+		ret = host->panic_ops->hold_mutex(host);
+		if (ret) {
+			pr_err("%s: hold Dekker mutex failed\n", __func__);
+			return ret;
+		}
+	} else if (host->panic_ops->power_on)
+		/* don't have Dekker mutex, just power on host controller */
+		host->panic_ops->power_on(host);
+
+	/*
+	 * reset card since we are not sure whether card is in a good status
+	 *
+	 * Since in panic mode, we init a old card, so all the command to be
+	 * used has no data. So we can reuse the sdhci ops
+	 */
+	ret = mmc_emergency_reinit_card();
+	if (ret) {
+		pr_info("%s: reinit card failed\n", __func__);
+		goto out;
+	}
+
+	/*
+	 * OK. we are ready
+	 */
+	mmc_emergency_ready();
+out:
+	/* release Dekker mutex */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex)
+		host->panic_ops->release_mutex(host);
+
+	return ret;
+}
+EXPORT_SYMBOL(mmc_emergency_init);
+
+/*
+ * mmc_emergency_setup - init panic_host which is used for panic writing
+ * @host: mmc host
+ *
+ * This function can sample some important value for panic_host use to init
+ * host controller and card. It only works for the driver which has already
+ * called mmc_alloc_panic_host in its probing process
+ */
+void mmc_emergency_setup(struct mmc_host *mmc)
+{
+	struct mmc_panic_host *host = panic_host;
+
+	/*
+	 * mmc host has no panic host
+	 */
+	if (!mmc->phost)
+		return;
+
+	/*
+	 * before setup panic host, make sure panic host is
+	 * allocated
+	 */
+	if (host == NULL)
+		return;
+
+	/*
+	 * panic host has already been setup
+	 */
+	if (host->mmc)
+		return;
+
+	/*
+	 * mmc host didn't init card
+	 */
+	if (!mmc->card)
+		return;
+	/*
+	 * if is SDIO card or SD card, by pass
+	 */
+	if (mmc_card_sdio(mmc->card))
+		return;
+
+	kfree(host->card);
+
+	host->card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL);
+	if (!host->card) {
+		pr_err("%s: cannot alloc mmc_card for panic host\n",
+				__func__);
+		return;
+	}
+
+	memcpy(host->card, mmc->card, sizeof(struct mmc_card));
+	host->caps = mmc->caps;
+	host->caps2 = mmc->caps2;
+	host->mmc = mmc;
+	host->ocr = mmc->ocr;
+	host->totalsecs = mmc_get_capacity(mmc->card);
+	host->max_blk_size = mmc->max_blk_size;
+	host->max_blk_count = mmc->max_blk_count;
+	host->max_req_size = mmc->max_req_size;
+	if (mmc_card_blockaddr(mmc->card))
+		host->blkaddr = 1;
+	/*
+	 * sample ios values
+	 */
+	memset(&host->ios, 0, sizeof(struct mmc_ios));
+	memcpy(&host->ios, &mmc->ios, sizeof(struct mmc_ios));
+#ifdef CONFIG_MMC_CLKGATE
+	if (mmc->ios.clock == 0)
+		host->ios.clock = mmc->clk_old;
+#endif
+	if (host->panic_ops && host->panic_ops->setup)
+		host->panic_ops->setup(host);
+
+	return;
+}
+EXPORT_SYMBOL(mmc_emergency_setup);
+/*
+ * mmc_alloc_panic_host - used for host layer driver to alloc mmc_panic_host.
+ * @host: mmc host
+ * @ops: this is a pointer which points to mmc_host_panic_ops. This ops should
+ * be defined in host layer driver
+ *
+ * This function need to know the mmc_host_panic_ops, host layer driver should
+ * call this function during probing.
+ *
+ */
+void mmc_alloc_panic_host(struct mmc_host *host,
+		const struct mmc_host_panic_ops *ops)
+{
+	if (panic_host) {
+		pr_info("%s: already allocate panic host\n", __func__);
+		return;
+	}
+
+	panic_host = kzalloc(sizeof(struct mmc_panic_host), GFP_KERNEL);
+	if (!panic_host) {
+		pr_err("%s %s: panic structure allocate error\n",
+				__func__, mmc_hostname(host));
+		return;
+	}
+	/*
+	 * allocate log buffer and DMA buffer
+	 * log buffer size is 512
+	 */
+	panic_host->logbuf = kzalloc(SECTOR_SIZE, GFP_KERNEL);
+	if (!panic_host->logbuf) {
+		pr_err("%s %s: log buf allocate error\n",
+				__func__, mmc_hostname(host));
+		goto free_panic_host;
+	}
+
+	panic_host->dmabuf = dma_map_single(host->parent, panic_host->logbuf,
+			SECTOR_SIZE, DMA_TO_DEVICE);
+	if (!panic_host->dmabuf) {
+		pr_err("%s %s: DMA buf allocate error\n",
+				__func__, mmc_hostname(host));
+		goto free_logbuf;
+	}
+
+	panic_host->panic_ops = ops;
+	panic_host->mmc = NULL;
+	host->phost = panic_host;
+
+	return;
+
+free_logbuf:
+	kfree(panic_host->logbuf);
+free_panic_host:
+	kfree(panic_host);
+}
+EXPORT_SYMBOL(mmc_alloc_panic_host);
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 06ee1ae..7c5baa3 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
 
 #ifndef SDIO_VENDOR_ID_TI
 #define SDIO_VENDOR_ID_TI		0x0097
@@ -61,6 +62,25 @@
 	END_FIXUP
 };
 
+/*
+ * Some product name in CID may contain '\n' or other non-name
+ * character. So use below func to compare name.
+ */
+static int str_cmp(const char *dst, const char *src)
+{
+	int i = 0;
+
+	if (NULL == dst || NULL == src)
+		return 0;
+
+	while (dst[i] != '\0' && src[i] != '\0') {
+		if (dst[i] != src[i])
+			return 0;
+		i++;
+	}
+	return 1;
+}
+
 void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
 {
 	const struct mmc_fixup *f;
@@ -76,8 +96,8 @@
 		    (f->oemid == CID_OEMID_ANY ||
 		     f->oemid == card->cid.oemid) &&
 		    (f->name == CID_NAME_ANY ||
-		     !strncmp(f->name, card->cid.prod_name,
-			      sizeof(card->cid.prod_name))) &&
+		     str_cmp((const char *)f->name,
+			(const char *)card->cid.prod_name)) &&
 		    (f->cis_vendor == card->cis.vendor ||
 		     f->cis_vendor == (u16) SDIO_ANY_ID) &&
 		    (f->cis_device == card->cis.device ||
@@ -89,3 +109,16 @@
 	}
 }
 EXPORT_SYMBOL(mmc_fixup_device);
+
+void dis_cache_mmc(struct mmc_card *card, int data)
+{
+	if (mmc_card_mmc(card)) {
+		mmc_claim_host(card->host);
+		pr_warn("%s: enther dis_cache_mmc.\n",
+			mmc_hostname(card->host));
+		mmc_cache_ctrl(card->host, 0);
+		card->host->caps2 &= ~MMC_CAP2_CACHE_CTRL;
+		mmc_release_host(card->host);
+	}
+}
+EXPORT_SYMBOL(dis_cache_mmc);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6d945e0..cc05f9e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -215,7 +215,7 @@
 static int mmc_read_ssr(struct mmc_card *card)
 {
 	unsigned int au, es, et, eo;
-	int err, i;
+	int err, i, max_au;
 	u32 *ssr;
 
 	if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -239,12 +239,15 @@
 	for (i = 0; i < 16; i++)
 		ssr[i] = be32_to_cpu(ssr[i]);
 
+	/* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
+	max_au = card->scr.sda_spec3 ? 0xF : 0x9;
+
 	/*
 	 * UNSTUFF_BITS only works with four u32s so we have to offset the
 	 * bitfield positions accordingly.
 	 */
 	au = UNSTUFF_BITS(ssr, 428 - 384, 4);
-	if (au > 0 && au <= 9) {
+	if (au > 0 && au <= max_au) {
 		card->ssr.au = 1 << (au + 4);
 		es = UNSTUFF_BITS(ssr, 408 - 384, 16);
 		et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -983,6 +986,26 @@
 	if (err)
 		goto free_card;
 
+	if (!(rocr & SD_ROCR_S18A) && mmc_sd_card_uhs(card)) {
+		/*
+		 * SD card which has DDR50/SDR104 flag and noddr50 flag has
+		 * already in 1.8v IO voltage without a power loss
+		 */
+		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+		if (err) {
+			pr_err("%s: swith to 1.8v for re-init failed\n",
+					mmc_hostname(host));
+			goto free_card;
+		}
+		rocr |= SD_ROCR_S18A;
+	}
+
+	if (mmc_card_noddr50(card)) {
+		card->sw_caps.sd3_bus_mode &= ~(SD_MODE_UHS_DDR50 |
+				SD_MODE_UHS_SDR104);
+		pr_info("%s: disable DDR50/SDR104\n", __func__);
+	}
+
 	/* Initialization sequence for UHS-I cards */
 	if (rocr & SD_ROCR_S18A) {
 		err = mmc_sd_init_uhs_card(card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 46e68f1..d3023a3 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -929,41 +929,18 @@
  */
 static int mmc_sdio_suspend(struct mmc_host *host)
 {
-	int i, err = 0;
-
-	for (i = 0; i < host->card->sdio_funcs; i++) {
-		struct sdio_func *func = host->card->sdio_func[i];
-		if (func && sdio_func_present(func) && func->dev.driver) {
-			const struct dev_pm_ops *pmops = func->dev.driver->pm;
-			if (!pmops || !pmops->suspend || !pmops->resume) {
-				/* force removal of entire card in that case */
-				err = -ENOSYS;
-			} else
-				err = pmops->suspend(&func->dev);
-			if (err)
-				break;
-		}
-	}
-	while (err && --i >= 0) {
-		struct sdio_func *func = host->card->sdio_func[i];
-		if (func && sdio_func_present(func) && func->dev.driver) {
-			const struct dev_pm_ops *pmops = func->dev.driver->pm;
-			pmops->resume(&func->dev);
-		}
-	}
-
-	if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+	if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
 		mmc_claim_host(host);
 		sdio_disable_wide(host->card);
 		mmc_release_host(host);
 	}
 
-	return err;
+	return 0;
 }
 
 static int mmc_sdio_resume(struct mmc_host *host)
 {
-	int i, err = 0;
+	int err = 0;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
@@ -990,24 +967,6 @@
 		wake_up_process(host->sdio_irq_thread);
 	mmc_release_host(host);
 
-	/*
-	 * If the card looked to be the same as before suspending, then
-	 * we proceed to resume all card functions.  If one of them returns
-	 * an error then we simply return that error to the core and the
-	 * card will be redetected as new.  It is the responsibility of
-	 * the function driver to perform further tests with the extra
-	 * knowledge it has of the card to confirm the card is indeed the
-	 * same as before suspending (same MAC address for network cards,
-	 * etc.) and return an error otherwise.
-	 */
-	for (i = 0; !err && i < host->card->sdio_funcs; i++) {
-		struct sdio_func *func = host->card->sdio_func[i];
-		if (func && sdio_func_present(func) && func->dev.driver) {
-			const struct dev_pm_ops *pmops = func->dev.driver->pm;
-			err = pmops->resume(&func->dev);
-		}
-	}
-
 	return err;
 }
 
@@ -1256,7 +1215,7 @@
 
 	mmc_go_idle(host);
 
-	mmc_set_clock(host, host->f_min);
+	mmc_set_clock(host, host->f_init);
 
 	err = mmc_send_io_op_cond(host, 0, &ocr);
 	if (err)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index c012cf5..de10f48 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -198,20 +198,8 @@
 
 #ifdef CONFIG_PM
 
-#ifdef CONFIG_PM_SLEEP
-static int pm_no_operation(struct device *dev)
-{
-	/*
-	 * Prevent the PM core from calling SDIO device drivers' suspend
-	 * callback routines, which it is not supposed to do, by using this
-	 * empty function as the bus type suspend callaback for SDIO.
-	 */
-	return 0;
-}
-#endif
-
 static const struct dev_pm_ops sdio_bus_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
 	SET_RUNTIME_PM_OPS(
 		pm_generic_runtime_suspend,
 		pm_generic_runtime_resume,
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 8e94e55..8b7539f 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -97,8 +97,6 @@
 	{ 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 };
 static const unsigned int speed_unit[8] =
 	{ 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
-
-
 typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
 			   const unsigned char *, unsigned);
 
@@ -225,12 +223,77 @@
 	{	0x22,	0,	cistpl_funce		},
 };
 
+/***************************** WP B0 WA *******************************/
+
+unsigned char wp_tpl_codes[] = {
+	0x21, 0x22, 0x20, 0x21, 0x22, 0x91, 0x15,
+};
+
+unsigned char wp_tpl_links[] = {
+	0x2, 0x4, 0x4, 0x2, 0x2a, 0x2, 0x19,
+};
+
+unsigned char wp_tuple_data[7][42] = {
+	{
+	  12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  0, 0, 2, 11, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  137, 0, 96, 114, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  1, 1, 48, 0, 0, 3, 0, 2, 0, 128,
+	  255, 0, 7, 0, 0, 7, 7, 255, 0, 16,
+	  0, 200, 100, 0, 0, 0, 0, 0, 16, 1,
+	  33, 2, 0, 0, 0, 0, 32, 4, 137, 0,
+	  96, 114
+	},
+	{
+	  7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  8, 0, 73, 110, 116, 101, 108, 40, 82, 41,
+	  32, 87, 105, 114, 101, 108, 101, 115, 115, 32,
+	  67, 111, 114, 101, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+};
+
+/**********************************************************************/
+
 static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
 {
 	int ret;
 	struct sdio_func_tuple *this, **prev;
 	unsigned i, ptr = 0;
-
+	int count = 0;
+	bool replace = false;
 	/*
 	 * Note that this works for the common CIS (function number 0) as
 	 * well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
@@ -245,7 +308,8 @@
 			fn = 0;
 
 		ret = mmc_io_rw_direct(card, 0, 0,
-			SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
+				       SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i,
+				       0, &x);
 		if (ret)
 			return ret;
 		ptr |= x << (i * 8);
@@ -258,20 +322,45 @@
 
 	BUG_ON(*prev);
 
+	if (card->quirks & MMC_QUIRK_NON_STD_CIS)
+		count = (func) ? 2 : -1;
+
 	do {
 		unsigned char tpl_code, tpl_link;
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			count++;
+			if ((func && (count > 6)) || (!func && (count > 2))) {
+				pr_debug("%s: break: count %d\n",
+					 __func__, count);
+				break;
+			}
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
 		if (ret)
 			break;
-
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			/* if the first tuple is 0 - then it's b0, so replace */
+			if ((count < 4) && (tpl_code == 0)) {
+				pr_info("%s card with non std CIS",
+					mmc_hostname(card->host));
+				/* disable UHS on buggy cards */
+				card->sw_caps.sd3_bus_mode = 0;
+				replace = true;
+			}
+		}
 		/* 0xff means we're done */
 		if (tpl_code == 0xff)
 			break;
 
 		/* null entries have no link field or data */
-		if (tpl_code == 0x00)
-			continue;
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			if ((tpl_code == 0x00) && (!replace))
+				continue;
+		} else {
+			if (tpl_code == 0x00)
+				continue;
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
 		if (ret)
@@ -290,6 +379,7 @@
 					       ptr + i, 0, &this->data[i]);
 			if (ret)
 				break;
+			pr_debug("%d, ", this->data[i]);
 		}
 		if (ret) {
 			kfree(this);
@@ -297,9 +387,28 @@
 		}
 
 		/* Try to parse the CIS tuple */
-		ret = cis_tpl_parse(card, func, "CIS",
-				    cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
-				    tpl_code, this->data, tpl_link);
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			if (!replace)
+				ret = cis_tpl_parse(card, func, "CIS",
+						    cis_tpl_list,
+						    ARRAY_SIZE(cis_tpl_list),
+						    tpl_code, this->data,
+						    tpl_link);
+			else
+				ret = cis_tpl_parse(card, func, "CIS",
+						    cis_tpl_list,
+						    ARRAY_SIZE(cis_tpl_list),
+						    wp_tpl_codes[count],
+						    wp_tuple_data[count],
+						    wp_tpl_links[count]);
+		} else {
+			ret = cis_tpl_parse(card, func, "CIS",
+					    cis_tpl_list,
+					    ARRAY_SIZE(cis_tpl_list),
+					    tpl_code, this->data,
+					    tpl_link);
+		}
+
 		if (ret == -EILSEQ || ret == -ENOENT) {
 			/*
 			 * The tuple is unknown or known but not parsed.
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9ab8f8d..55cc96b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,18 +81,6 @@
 
 	  If unsure, say Y.
 
-config MMC_SDHCI_ACPI
-	tristate "SDHCI support for ACPI enumerated SDHCI controllers"
-	depends on MMC_SDHCI && ACPI
-	help
-	  This selects support for ACPI enumerated SDHCI controllers,
-	  identified by ACPI Compatibility ID PNP0D40 or specific
-	  ACPI Hardware IDs.
-
-	  If you have a controller with this interface, say Y or M here.
-
-	  If unsure, say N.
-
 config MMC_SDHCI_PLTFM
 	tristate "SDHCI platform and OF driver helper"
 	depends on MMC_SDHCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index cd32280..776282d 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,7 +9,6 @@
 obj-$(CONFIG_MMC_SDHCI)		+= sdhci.o
 obj-$(CONFIG_MMC_SDHCI_PCI)	+= sdhci-pci.o
 obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))	+= sdhci-pci-data.o
-obj-$(CONFIG_MMC_SDHCI_ACPI)	+= sdhci-acpi.o
 obj-$(CONFIG_MMC_SDHCI_PXAV3)	+= sdhci-pxav3.o
 obj-$(CONFIG_MMC_SDHCI_PXAV2)	+= sdhci-pxav2.o
 obj-$(CONFIG_MMC_SDHCI_S3C)	+= sdhci-s3c.o
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 701d06d..f2b9c58 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -25,17 +25,24 @@
 #include <linux/gpio.h>
 #include <linux/pm_runtime.h>
 #include <linux/mmc/sdhci-pci-data.h>
+#include <linux/lnw_gpio.h>
+#include <linux/acpi_gpio.h>
+
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_scu_flis.h>
+#include <asm/intel_scu_pmic.h>
 
 #include "sdhci.h"
 
+/* Settle down values copied from broadcom reference design. */
+#define DELAY_CARD_INSERTED	200
+#define DELAY_CARD_REMOVED	50
+
 /*
  * PCI device IDs
  */
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO0	0x8809
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1	0x880a
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC	0x0f14
-#define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15
-#define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16
 
 /*
  * PCI registers
@@ -50,6 +57,18 @@
 #define  PCI_SLOT_INFO_FIRST_BAR_MASK	0x07
 
 #define MAX_SLOTS			8
+#define IPC_EMMC_MUTEX_CMD             0xEE
+
+/* CLV SD card power resource */
+
+#define VCCSDIO_ADDR		0xd5
+#define VCCSDIO_OFF		0x4
+#define VCCSDIO_NORMAL		0x7
+#define ENCTRL0_ISOLATE		0x55555557
+#define ENCTRL1_ISOLATE		0x5555
+#define STORAGESTIO_FLISNUM	0x8
+#define ENCTRL0_OFF		0x10
+#define ENCTRL1_OFF		0x11
 
 struct sdhci_pci_chip;
 struct sdhci_pci_slot;
@@ -77,6 +96,10 @@
 	int			rst_n_gpio;
 	int			cd_gpio;
 	int			cd_irq;
+	bool			dev_power;
+	struct mutex		power_lock;
+	bool			dma_enabled;
+	unsigned int		tuning_count;
 };
 
 struct sdhci_pci_chip {
@@ -85,10 +108,14 @@
 	unsigned int		quirks;
 	unsigned int		quirks2;
 	bool			allow_runtime_pm;
+	unsigned int		autosuspend_delay;
 	const struct sdhci_pci_fixes *fixes;
 
 	int			num_slots;	/* Slots on controller */
 	struct sdhci_pci_slot	*slots[MAX_SLOTS]; /* Pointers to host slots */
+
+	unsigned int		enctrl0_orig;
+	unsigned int		enctrl1_orig;
 };
 
 
@@ -263,8 +290,7 @@
 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
-	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC |
-				  MMC_CAP2_HC_ERASE_SZ;
+	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ | MMC_CAP2_POLL_R1B_BUSY;
 	return 0;
 }
 
@@ -307,31 +333,193 @@
 	.probe_slot	= pch_hc_probe_slot,
 };
 
-static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+#define TNG_IOAPIC_IDX	0xfec00000
+static void mrfl_ioapic_rte_reg_addr_map(struct sdhci_pci_slot *slot)
 {
-	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
-	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+	slot->host->rte_addr = ioremap_nocache(TNG_IOAPIC_IDX, 256);
+	if (!slot->host->rte_addr)
+		dev_err(&slot->chip->pdev->dev, "rte_addr ioremap fail!\n");
+	else
+		dev_info(&slot->chip->pdev->dev, "rte_addr mapped addr: %p\n",
+			slot->host->rte_addr);
+}
+
+/* Define Host controllers for Intel Merrifield platform */
+#define INTEL_MRFL_EMMC_0	0
+#define INTEL_MRFL_EMMC_1	1
+#define INTEL_MRFL_SD		2
+#define INTEL_MRFL_SDIO		3
+
+static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = 0;
+
+	switch (PCI_FUNC(slot->chip->pdev->devfn)) {
+	case INTEL_MRFL_EMMC_0:
+		sdhci_alloc_panic_host(slot->host);
+		slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+					MMC_CAP_NONREMOVABLE |
+					MMC_CAP_1_8V_DDR;
+		slot->host->mmc->caps2 |= MMC_CAP2_POLL_R1B_BUSY |
+					MMC_CAP2_INIT_CARD_SYNC |
+					MMC_CAP2_CACHE_CTRL;
+		if (slot->chip->pdev->revision == 0x1) { /* B0 stepping */
+			slot->host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+			/* WA for async abort silicon issue */
+			slot->host->quirks2 |= SDHCI_QUIRK2_CARD_CD_DELAY |
+					SDHCI_QUIRK2_WAIT_FOR_IDLE |
+					SDHCI_QUIRK2_TUNING_POLL;
+		}
+		mrfl_ioapic_rte_reg_addr_map(slot);
+		slot->tuning_count = 8;
+		break;
+	case INTEL_MRFL_SD:
+		slot->host->quirks2 |= SDHCI_QUIRK2_WAIT_FOR_IDLE;
+		slot->host->mmc->caps2 |= MMC_CAP2_FIXED_NCRC;
+		break;
+	case INTEL_MRFL_SDIO:
+		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+		slot->host->quirks2 |= SDHCI_QUIRK2_FAKE_VDD;
+		break;
+	}
+
+	if (slot->data->platform_quirks & PLFM_QUIRK_NO_HIGH_SPEED) {
+		slot->host->quirks2 |= SDHCI_QUIRK2_DISABLE_HIGH_SPEED;
+		slot->host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+	}
+
+	if (slot->data->platform_quirks & PLFM_QUIRK_NO_EMMC_BOOT_PART)
+		slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+
+	if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW) {
+		dev_info(&slot->chip->pdev->dev, "Disable MMC Func %d.\n",
+			PCI_FUNC(slot->chip->pdev->devfn));
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static void intel_mrfl_mmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+	if (PCI_FUNC(slot->chip->pdev->devfn) == INTEL_MRFL_EMMC_0)
+		if (slot->host->rte_addr)
+			iounmap(slot->host->rte_addr);
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE |
+				SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+	.allow_runtime_pm = true,
+	.probe_slot	= intel_mrfl_mmc_probe_slot,
+	.remove_slot	= intel_mrfl_mmc_remove_slot,
+};
+
+static int intel_moor_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+				MMC_CAP_NONREMOVABLE |
+				MMC_CAP_1_8V_DDR;
+
+	sdhci_alloc_panic_host(slot->host);
+
+	slot->host->mmc->caps2 |= MMC_CAP2_POLL_R1B_BUSY |
+				MMC_CAP2_INIT_CARD_SYNC;
+
+	/* Enable HS200 and HS400 */
+	slot->host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR |
+				MMC_CAP2_HS200_DIS;
+
+	if (slot->chip->pdev->revision == 0x1) { /* B0 stepping */
+		slot->host->mmc->caps2 |= MMC_CAP2_HS400_1_8V_DDR;
+	}
+
+	slot->host->quirks2 |= SDHCI_QUIRK2_TUNING_POLL;
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_HIGH_SPEED) {
+			slot->host->quirks2 |= SDHCI_QUIRK2_DISABLE_HIGH_SPEED;
+			slot->host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+			slot->host->mmc->caps2 &= ~MMC_CAP2_HS200_1_8V_SDR;
+			if (slot->chip->pdev->revision == 0x1) {
+				slot->host->mmc->caps2 &=
+					~MMC_CAP2_HS400_1_8V_DDR;
+			}
+		}
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_EMMC_BOOT_PART)
+			slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+
 	return 0;
 }
 
-static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+static void intel_moor_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
 {
-	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
-	return 0;
 }
 
-static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+static int intel_moor_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = 0;
+
+	slot->host->mmc->caps2 |= MMC_CAP2_FIXED_NCRC;
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW)
+			ret = -ENODEV;
+
+	return ret;
+}
+
+static void intel_moor_sd_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static int intel_moor_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = 0;
+
+	slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW)
+			ret = -ENODEV;
+
+	return ret;
+}
+
+static void intel_moor_sdio_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_moor_emmc = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
 	.allow_runtime_pm = true,
-	.probe_slot	= byt_emmc_probe_slot,
+	.probe_slot	= intel_moor_emmc_probe_slot,
+	.remove_slot	= intel_moor_emmc_remove_slot,
 };
 
-static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
-	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+static const struct sdhci_pci_fixes sdhci_intel_moor_sd = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
 	.allow_runtime_pm = true,
-	.probe_slot	= byt_sdio_probe_slot,
+	.probe_slot	= intel_moor_sd_probe_slot,
+	.remove_slot	= intel_moor_sd_remove_slot,
 };
 
-static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+static const struct sdhci_pci_fixes sdhci_intel_moor_sdio = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE |
+				SDHCI_QUIRK2_FAKE_VDD |
+				SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+	.allow_runtime_pm = true,
+	.probe_slot	= intel_moor_sdio_probe_slot,
+	.remove_slot	= intel_moor_sdio_remove_slot,
 };
 
 /* O2Micro extra registers */
@@ -887,26 +1075,34 @@
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_EMMC,
+		.device		= PCI_DEVICE_ID_INTEL_MRFL_MMC,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
 	},
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SDIO,
+		.device		= PCI_DEVICE_ID_INTEL_MOOR_EMMC,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_moor_emmc,
 	},
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SD,
+		.device		= PCI_DEVICE_ID_INTEL_MOOR_SD,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_moor_sd,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_MOOR_SDIO,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_moor_sdio,
 	},
 
 	{
@@ -964,6 +1160,53 @@
  *                                                                           *
 \*****************************************************************************/
 
+static int try_request_regulator(struct device *dev, void *data)
+{
+	struct pci_dev        *pdev = container_of(dev, struct pci_dev, dev);
+	struct sdhci_pci_chip *chip;
+	struct sdhci_pci_slot *slot;
+	struct sdhci_host     *host;
+	int i;
+
+	chip = pci_get_drvdata(pdev);
+	if (!chip)
+		return 0;
+
+	for (i = 0; i < chip->num_slots; i++) {
+		slot = chip->slots[i];
+		if (!slot)
+			continue;
+		host = slot->host;
+		if (!host)
+			continue;
+		if (sdhci_try_get_regulator(host) == 0)
+			mmc_detect_change(host->mmc, 0);
+	}
+	return 0;
+}
+
+static struct pci_driver sdhci_driver;
+
+/**
+ *      sdhci_pci_request_regulators - retry requesting regulators of
+ *                                     all sdhci-pci devices
+ *
+ *      One some platforms, the regulators associated to the mmc are available
+ *      late in the boot.
+ *      sdhci_pci_request_regulators() is called by platform code to retry
+ *      getting the regulators associated to pci sdhcis
+ */
+
+int sdhci_pci_request_regulators(void)
+{
+	/* driver not yet registered */
+	if (!sdhci_driver.driver.p)
+		return 0;
+	return driver_for_each_device(&sdhci_driver.driver,
+				      NULL, NULL, try_request_regulator);
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_request_regulators);
+
 static int sdhci_pci_enable_dma(struct sdhci_host *host)
 {
 	struct sdhci_pci_slot *slot;
@@ -971,6 +1214,9 @@
 	int ret;
 
 	slot = sdhci_priv(host);
+	if (slot->dma_enabled)
+		return 0;
+
 	pdev = slot->chip->pdev;
 
 	if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
@@ -986,6 +1232,8 @@
 
 	pci_set_master(pdev);
 
+	slot->dma_enabled = true;
+
 	return 0;
 }
 
@@ -1018,21 +1266,139 @@
 {
 	struct sdhci_pci_slot *slot = sdhci_priv(host);
 	int rst_n_gpio = slot->rst_n_gpio;
+	u8 pwr;
 
-	if (!gpio_is_valid(rst_n_gpio))
-		return;
-	gpio_set_value_cansleep(rst_n_gpio, 0);
-	/* For eMMC, minimum is 1us but give it 10us for good measure */
-	udelay(10);
-	gpio_set_value_cansleep(rst_n_gpio, 1);
-	/* For eMMC, minimum is 200us but give it 300us for good measure */
-	usleep_range(300, 1000);
+	if (gpio_is_valid(rst_n_gpio)) {
+		gpio_set_value_cansleep(rst_n_gpio, 0);
+		/* For eMMC, minimum is 1us but give it 10us for good measure */
+		udelay(10);
+		gpio_set_value_cansleep(rst_n_gpio, 1);
+		/*
+		 * For eMMC, minimum is 200us,
+		 * but give it 300us for good measure
+		 */
+		usleep_range(300, 1000);
+	} else if (slot->host->mmc->caps & MMC_CAP_HW_RESET) {
+		/* first set bit4 of power control register */
+		pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
+		pwr |= SDHCI_HW_RESET;
+		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		/* keep the same delay for safe */
+		usleep_range(300, 1000);
+		/* then clear bit4 of power control register */
+		pwr &= ~SDHCI_HW_RESET;
+		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		/* keep the same delay for safe */
+		usleep_range(300, 1000);
+	}
 }
 
+static int sdhci_pci_power_up_host(struct sdhci_host *host)
+{
+	int ret = -ENOSYS;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (slot->data && slot->data->power_up)
+		ret = slot->data->power_up(host);
+	else {
+		/*
+		 * use standard PCI power up function
+		 */
+		ret = pci_set_power_state(slot->chip->pdev, PCI_D0);
+		mdelay(50);
+	}
+	/*
+	 * If there is no power_up callbacks in platform data,
+	 * return -ENOSYS;
+	 */
+	if (ret)
+		return ret;
+
+	/*
+	 * after power up host, let's have a little test
+	 */
+
+	if (sdhci_readl(host, SDHCI_HOST_VERSION) ==
+			0xffffffff) {
+		pr_err("%s: power up sdhci host failed\n",
+				__func__);
+		return -EPERM;
+	}
+
+	pr_info("%s: host controller power up is done\n", __func__);
+
+	return 0;
+}
+
+static int sdhci_pci_get_cd(struct sdhci_host *host)
+{
+	bool present;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (gpio_is_valid(slot->cd_gpio))
+		return gpio_get_value(slot->cd_gpio) ? 0 : 1;
+
+	/* If nonremovable or polling, assume that the card is always present */
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		present = true;
+	else
+		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			SDHCI_CARD_PRESENT;
+
+	return present;
+}
+
+static void  sdhci_platform_reset_exit(struct sdhci_host *host, u8 mask)
+{
+	if (host->quirks2 & SDHCI_QUIRK2_POWER_PIN_GPIO_MODE) {
+		if (mask & SDHCI_RESET_ALL) {
+			/* reset back to 3.3v signaling */
+			gpio_set_value(host->gpio_1p8_en, 0);
+			/* disable the VDD power */
+			gpio_set_value(host->gpio_pwr_en, 1);
+		}
+	}
+}
+
+static int sdhci_pci_get_tuning_count(struct sdhci_host *host)
+{
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	return slot->tuning_count;
+}
+
+static int sdhci_gpio_buf_check(struct sdhci_host *host, unsigned int clk)
+{
+	int ret = -ENOSYS;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (slot->data && slot->data->flis_check)
+		ret = slot->data->flis_check(slot->data->flis_addr,
+					host->clock, clk);
+
+	return ret;
+}
+
+static int sdhci_gpio_buf_dump(struct sdhci_host *host)
+{
+	int ret = -ENOSYS;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (slot->data && slot->data->flis_dump)
+		ret = slot->data->flis_dump(slot->data->flis_addr);
+
+	return ret;
+}
 static const struct sdhci_ops sdhci_pci_ops = {
 	.enable_dma	= sdhci_pci_enable_dma,
 	.platform_bus_width	= sdhci_pci_bus_width,
 	.hw_reset		= sdhci_pci_hw_reset,
+	.power_up_host		= sdhci_pci_power_up_host,
+	.get_cd		= sdhci_pci_get_cd,
+	.platform_reset_exit = sdhci_platform_reset_exit,
+	.get_tuning_count = sdhci_pci_get_tuning_count,
+	.gpio_buf_check = sdhci_gpio_buf_check,
+	.gpio_buf_dump = sdhci_gpio_buf_dump,
 };
 
 /*****************************************************************************\
@@ -1071,6 +1437,7 @@
 			sdhci_enable_irq_wakeups(slot->host);
 
 		pm_flags |= slot_pm_flags;
+		slot->dma_enabled = false;
 	}
 
 	if (chip->fixes && chip->fixes->suspend) {
@@ -1079,19 +1446,6 @@
 			goto err_pci_suspend;
 	}
 
-	pci_save_state(pdev);
-	if (pm_flags & MMC_PM_KEEP_POWER) {
-		if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
-			pci_pme_active(pdev, true);
-			pci_enable_wake(pdev, PCI_D3hot, 1);
-		}
-		pci_set_power_state(pdev, PCI_D3hot);
-	} else {
-		pci_enable_wake(pdev, PCI_D3hot, 0);
-		pci_disable_device(pdev);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
 	return 0;
 
 err_pci_suspend:
@@ -1111,12 +1465,6 @@
 	if (!chip)
 		return 0;
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	ret = pci_enable_device(pdev);
-	if (ret)
-		return ret;
-
 	if (chip->fixes && chip->fixes->resume) {
 		ret = chip->fixes->resume(chip);
 		if (ret)
@@ -1232,6 +1580,19 @@
 	.runtime_idle = sdhci_pci_runtime_idle,
 };
 
+static void sdhci_hsmmc_virtual_detect(void *dev_id, int carddetect)
+{
+	struct sdhci_host *host = dev_id;
+
+	if (carddetect)
+		mmc_detect_change(host->mmc,
+			msecs_to_jiffies(DELAY_CARD_INSERTED));
+	else
+		mmc_detect_change(host->mmc,
+			msecs_to_jiffies(DELAY_CARD_REMOVED));
+}
+
+
 /*****************************************************************************\
  *                                                                           *
  * Device probing/removal                                                    *
@@ -1280,11 +1641,17 @@
 	slot->rst_n_gpio = -EINVAL;
 	slot->cd_gpio = -EINVAL;
 
+	host->hw_name = "PCI";
+	host->ops = &sdhci_pci_ops;
+	host->quirks = chip->quirks;
+	host->quirks2 = chip->quirks2;
+
 	/* Retrieve platform data if there is any */
 	if (*sdhci_pci_get_data)
 		slot->data = sdhci_pci_get_data(pdev, slotno);
 
 	if (slot->data) {
+		slot->data->pdev = pdev;
 		if (slot->data->setup) {
 			ret = slot->data->setup(slot->data);
 			if (ret) {
@@ -1294,12 +1661,15 @@
 		}
 		slot->rst_n_gpio = slot->data->rst_n_gpio;
 		slot->cd_gpio = slot->data->cd_gpio;
+
+		if (slot->data->quirks)
+			host->quirks2 |= slot->data->quirks;
+
+		if (slot->data->register_embedded_control)
+			slot->data->register_embedded_control(host,
+					sdhci_hsmmc_virtual_detect);
 	}
 
-	host->hw_name = "PCI";
-	host->ops = &sdhci_pci_ops;
-	host->quirks = chip->quirks;
-	host->quirks2 = chip->quirks2;
 
 	host->irq = pdev->irq;
 
@@ -1334,8 +1704,14 @@
 
 	host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
 	host->mmc->slotno = slotno;
+
+	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_MMC_CAP_NONREMOVABLE)
+		host->mmc->caps &= ~MMC_CAP_NONREMOVABLE;
 	host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
 
+	if (host->quirks2 & SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY)
+		host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
+
 	ret = sdhci_add_host(host);
 	if (ret)
 		goto remove;
@@ -1395,11 +1771,20 @@
 	sdhci_free_host(slot->host);
 }
 
-static void sdhci_pci_runtime_pm_allow(struct device *dev)
+static void sdhci_pci_runtime_pm_allow(struct sdhci_pci_chip *chip)
 {
+	struct device *dev;
+
+	if (!chip || !chip->pdev)
+		return;
+
+	dev = &chip->pdev->dev;
 	pm_runtime_put_noidle(dev);
 	pm_runtime_allow(dev);
-	pm_runtime_set_autosuspend_delay(dev, 50);
+	if (chip->autosuspend_delay)
+		pm_runtime_set_autosuspend_delay(dev, chip->autosuspend_delay);
+	else
+		pm_runtime_set_autosuspend_delay(dev, 50);
 	pm_runtime_use_autosuspend(dev);
 	pm_suspend_ignore_children(dev, 1);
 }
@@ -1434,7 +1819,10 @@
 	if (slots == 0)
 		return -ENODEV;
 
-	BUG_ON(slots > MAX_SLOTS);
+	if (slots > MAX_SLOTS) {
+		dev_err(&pdev->dev, "Invalid number of the slots. Aborting.\n");
+		return -ENODEV;
+	}
 
 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
 	if (ret)
@@ -1442,7 +1830,7 @@
 
 	first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
 
-	if (first_bar > 5) {
+	if (first_bar > 4) {
 		dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
 		return -ENODEV;
 	}
@@ -1475,6 +1863,11 @@
 	}
 
 	slots = chip->num_slots;	/* Quirk may have changed this */
+	/* slots maybe changed again, so check again */
+	if (slots > MAX_SLOTS) {
+		dev_err(&pdev->dev, "Invalid number of the slots. Aborting.\n");
+		goto free;
+	}
 
 	for (i = 0; i < slots; i++) {
 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
@@ -1489,7 +1882,7 @@
 	}
 
 	if (chip->allow_runtime_pm)
-		sdhci_pci_runtime_pm_allow(&pdev->dev);
+		sdhci_pci_runtime_pm_allow(chip);
 
 	return 0;
 
@@ -1523,11 +1916,43 @@
 	pci_disable_device(pdev);
 }
 
+static void sdhci_pci_shutdown(struct pci_dev *pdev)
+{
+	struct sdhci_pci_chip *chip;
+	struct sdhci_pci_slot *slot;
+	int i;
+
+	chip = pci_get_drvdata(pdev);
+
+	if (!chip || !chip->pdev)
+		return;
+
+	for (i = 0; i < chip->num_slots; i++) {
+		slot = chip->slots[i];
+		if (slot && slot->data)
+			if (slot->data->cleanup)
+				slot->data->cleanup(slot->data);
+	}
+
+	switch (chip->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+		if (chip->allow_runtime_pm) {
+			pm_runtime_get_sync(&pdev->dev);
+			pm_runtime_disable(&pdev->dev);
+			pm_runtime_put_noidle(&pdev->dev);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
 static struct pci_driver sdhci_driver = {
 	.name =		"sdhci-pci",
 	.id_table =	pci_ids,
 	.probe =	sdhci_pci_probe,
 	.remove =	sdhci_pci_remove,
+	.shutdown =	sdhci_pci_shutdown,
 	.driver =	{
 		.pm =   &sdhci_pci_pm_ops
 	},
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 2ea429c..d15ab14 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -25,6 +25,7 @@
 
 #include <linux/leds.h>
 
+#include <linux/mmc/core.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
@@ -54,6 +55,7 @@
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
 static void sdhci_tuning_timer(unsigned long data);
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios);
 
 #ifdef CONFIG_PM_RUNTIME
 static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -71,51 +73,79 @@
 
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
-	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+	pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
 		mmc_hostname(host->mmc));
 
-	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
+	pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
 		sdhci_readl(host, SDHCI_DMA_ADDRESS),
 		sdhci_readw(host, SDHCI_HOST_VERSION));
-	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
+	pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
 		sdhci_readw(host, SDHCI_BLOCK_SIZE),
 		sdhci_readw(host, SDHCI_BLOCK_COUNT));
-	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+	pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
 		sdhci_readl(host, SDHCI_ARGUMENT),
 		sdhci_readw(host, SDHCI_TRANSFER_MODE));
-	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
+	pr_err(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
 		sdhci_readl(host, SDHCI_PRESENT_STATE),
 		sdhci_readb(host, SDHCI_HOST_CONTROL));
-	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
+	pr_err(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
 		sdhci_readb(host, SDHCI_POWER_CONTROL),
 		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
-	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
+	pr_err(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
 		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
 		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
-	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
+	pr_err(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
 		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
 		sdhci_readl(host, SDHCI_INT_STATUS));
-	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+	pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
 		sdhci_readl(host, SDHCI_INT_ENABLE),
 		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
-	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+	pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
 		sdhci_readw(host, SDHCI_ACMD12_ERR),
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
+	pr_err(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
 		sdhci_readl(host, SDHCI_CAPABILITIES_1));
-	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+	pr_err(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
 		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
-	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+	pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
 		sdhci_readw(host, SDHCI_HOST_CONTROL2));
 
 	if (host->flags & SDHCI_USE_ADMA)
-		pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+		pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
 		       readl(host->ioaddr + SDHCI_ADMA_ERROR),
 		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 
-	pr_debug(DRIVER_NAME ": ===========================================\n");
+	if (host->cmd)
+		pr_err(DRIVER_NAME
+				": command pending, Cmdcode: %d\n",
+				host->cmd->opcode);
+	else
+		pr_err(DRIVER_NAME ": No command pending\n");
+
+	if (host->data)
+		pr_err(DRIVER_NAME ": data pending\n");
+	else
+		pr_err(DRIVER_NAME ": No data pending\n");
+
+	pr_err(DRIVER_NAME ": pwr:     0x%x | clock:   %d\n",
+			host->pwr, host->clock);
+#ifdef CONFIG_PM_RUNTIME
+	pr_err(DRIVER_NAME ": usage_count %d | Runtime_status %d\n",
+			atomic_read(&host->mmc->parent->power.usage_count),
+			host->mmc->parent->power.runtime_status);
+#endif
+	if (test_bit(TASKLET_STATE_SCHED, &host->finish_tasklet.state))
+		pr_err(DRIVER_NAME
+				": finish_tasklet pending running, state %ld\n",
+				host->finish_tasklet.state);
+	else
+		pr_err(DRIVER_NAME
+				": finish_tasklet NOT start, state %ld\n",
+				host->finish_tasklet.state);
+
+	pr_err(DRIVER_NAME ": ===========================================\n");
 }
 
 /*****************************************************************************\
@@ -150,7 +180,8 @@
 	u32 present, irqs;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
-	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+	    (host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+	    (host->quirks2 & SDHCI_QUIRK2_BAD_SD_CD))
 		return;
 
 	present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
@@ -173,6 +204,21 @@
 	sdhci_set_card_detection(host, false);
 }
 
+static void sdhci_busy_wait(struct mmc_host *mmc, u32 delay)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	/* totally 'delay' us, each loop 4us */
+	u32 loop = delay / 4;
+	while (loop) {
+		/* have a delay here */
+		udelay(4);
+		/* read register to make sure host won't be clock gated */
+		sdhci_readw(host, SDHCI_HOST_VERSION);
+		loop--;
+	}
+}
+
 static void sdhci_reset(struct sdhci_host *host, u8 mask)
 {
 	unsigned long timeout;
@@ -196,7 +242,7 @@
 		host->clock = 0;
 
 	/* Wait max 100 ms */
-	timeout = 100;
+	timeout = 10000;
 
 	/* hw clears the bit when it's done */
 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
@@ -207,7 +253,7 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(10);
 	}
 
 	if (host->ops->platform_reset_exit)
@@ -267,6 +313,9 @@
 {
 	u8 ctrl;
 
+	if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+		return;
+
 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 	ctrl |= SDHCI_CTRL_LED;
 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -276,6 +325,9 @@
 {
 	u8 ctrl;
 
+	if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+		return;
+
 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 	ctrl &= ~SDHCI_CTRL_LED;
 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -288,6 +340,9 @@
 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 	unsigned long flags;
 
+	if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+		return;
+
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->runtime_suspended)
@@ -707,7 +762,8 @@
 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
 {
 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
-	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR |
+		SDHCI_INT_TAR_RSP_ERR;
 
 	if (host->flags & SDHCI_REQ_USE_DMA)
 		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
@@ -960,6 +1016,8 @@
 		 * upon error conditions.
 		 */
 		if (data->error) {
+			if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+				sdhci_busy_wait(host->mmc, 1000);
 			sdhci_reset(host, SDHCI_RESET_CMD);
 			sdhci_reset(host, SDHCI_RESET_DATA);
 		}
@@ -978,7 +1036,7 @@
 	WARN_ON(host->cmd);
 
 	/* Wait max 10 ms */
-	timeout = 10;
+	timeout = 1000;
 
 	mask = SDHCI_CMD_INHIBIT;
 	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
@@ -999,12 +1057,13 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(10);
 	}
 
 	mod_timer(&host->timer, jiffies + 10 * HZ);
 
 	host->cmd = cmd;
+	host->r1b_busy_end = 0;
 
 	sdhci_prepare_data(host, cmd);
 
@@ -1105,6 +1164,9 @@
 	case SDHCI_CTRL_UHS_DDR50:
 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
 		break;
+	case SDHCI_CTRL_HS_DDR200:
+		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
+		break;
 	default:
 		pr_warn("%s: Invalid UHS-I mode selected\n",
 			mmc_hostname(host->mmc));
@@ -1137,6 +1199,23 @@
 	if (clock == 0)
 		goto out;
 
+	/*
+	 * Check and change Host Controller pin GPIO buffer setting
+	 * according to the new clock will be used.
+	 * For example, when the SD bus frequency is 50MHz or 200MHz,
+	 * the controller SD bus CLK/CMD/DAT pin may need different
+	 * driving strength and slew settings.
+	 * So we add check here. And this API will also change the pin
+	 * gpio buffer settings if needed after the check. Of course,
+	 * it's platform specific behaviours.
+	 * To ensure that the clock signal does not change when gpio
+	 * buffer setting modified, we'd better disable SD bus clock
+	 * first before changing any gpio pin buffer settings and
+	 * enable the SD bus clock again after the changing.
+	 */
+	if (host->ops->gpio_buf_check)
+		host->ops->gpio_buf_check(host, clock);
+
 	if (host->version >= SDHCI_SPEC_300) {
 		if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
 			SDHCI_CTRL_PRESET_VAL_ENABLE) {
@@ -1210,7 +1289,7 @@
 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
 	/* Wait max 20 ms */
-	timeout = 20;
+	timeout = 2000;
 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
 		& SDHCI_CLOCK_INT_STABLE)) {
 		if (timeout == 0) {
@@ -1220,7 +1299,7 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(10);
 	}
 
 	clk |= SDHCI_CLOCK_CARD_EN;
@@ -1268,6 +1347,9 @@
 
 	if (pwr == 0) {
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+		/* disable the power by setting GPIO pin */
+		if (host->quirks2 & SDHCI_QUIRK2_POWER_PIN_GPIO_MODE)
+			gpio_set_value(host->gpio_pwr_en, 1);
 		return 0;
 	}
 
@@ -1289,6 +1371,10 @@
 
 	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
 
+	/* enable the power by setting GPIO pin */
+	if (host->quirks2 & SDHCI_QUIRK2_POWER_PIN_GPIO_MODE)
+		gpio_set_value(host->gpio_pwr_en, 0);
+
 	/*
 	 * Some controllers need an extra 10ms delay of 10ms before they
 	 * can apply clock after applying power
@@ -1299,6 +1385,216 @@
 	return power;
 }
 
+/*
+ * One of the Medfield eMMC controller (PCI device id 0x0823, SDIO3) is
+ * a shared resource used by the SCU and the IA processors. SCU primarily
+ * uses the eMMC host controller to access the eMMC device's Boot Partition,
+ * while the IA CPU uses the eMMC host controller to access the eMMC device's
+ * User Partition.
+ *
+ * After the SCU hands off the system to the IA processor, the IA processor
+ * assumes ownership to the eMMC host controller. Due to absence of any
+ * arbitration at the eMMC host controller, this could result in concurrent
+ * eMMC host accesses resulting in bus contention and garbage data ending up
+ * in either of the partitions.
+ * To circumvent this from happening, eMMC host controller locking mechanism
+ * is employed, where at any one given time, only one agent, SCU or IA, may be
+ * allowed to access the host. This is achieved by implementing Dekker's
+ * Algorithm (http://en.wikipedia.org/wiki/Dekker's_algorithm) between the
+ * two processors.
+ *
+ * Before handing off the system to the IA processor, SCU must set up three
+ * housekeeping mutex variables allocated in the shared SRAM as follows:
+ *
+ * eMMC_Owner = IA (SCU and IA processors - RW, 32bit)
+ * IA_Req = FALSE (IA -RW, SCU - RO, 32bit)
+ * SCU_Req = FALSE (IA - RO, SCU - R/W, 32bit)
+ *
+ * There is no hardware based access control to these variables and so code
+ * executing on SCU and IA processors must follow below access rules
+ * (Dekker's algorithm):
+ *
+ * -----------------------------------------
+ * SCU Processor Implementation
+ * -----------------------------------------
+ * SCU_Req = TRUE;
+ * while (IA_Req == TRUE) {
+ *     if (eMMC_Owner != SCU){
+ *         SCU_Req = FALSE;
+ *         while (eMMC_Owner != SCU);
+ *         SCU_Req = TRUE;
+ *     }
+ * }
+ * // SCU now performs eMMC transactions here
+ * ...
+ * // When done, relinquish control to IA
+ * eMMC_Owner = IA;
+ * SCU_Req = FALSE;
+ *
+ * -----------------------------------------
+ * IA Processor Implementation
+ * -----------------------------------------
+ * IA_Req = TRUE;
+ * while (SCU_Req == TRUE) {
+ *     if (eMMC_Owner != IA){
+ *         IA_Req = FALSE;
+ *         while (eMMC_Owner != IA);
+ *         IA_Req = TRUE;
+ *     }
+ * }
+ * //IA now performs eMMC transactions here
+ * ...
+ * //When done, relinquish control to SCU
+ * eMMC_Owner = SCU;
+ * IA_Req = FALSE;
+ *
+ * ----------------------------------------
+ *
+ * sdhci_do_acquire_ownership- implement the Dekker's algorithm on IA side
+ * This function is only used for acquire ownership, not to re-cofnig host
+ * controller. Since in some scenarios, re-config is not useless. We can
+ * save some unused expenses.
+ * @mmc: mmc host
+ *
+ * @return return value:
+ * 0 - Acquried the ownership successfully. The last owner is IA
+ * 1 - Acquried the ownership successfully. The last owenr is SCU
+ * -EBUSY - failed to acquire ownership within the timeout period
+ */
+static int sdhci_do_acquire_ownership(struct mmc_host *mmc)
+{
+	struct sdhci_host *host;
+	unsigned long t1, t2;
+	unsigned long flags;
+
+	host = mmc_priv(mmc);
+
+	if (!host->sram_addr)
+		return 0;
+
+	/* if host has sram_addr, dekker_lock is initialized */
+	spin_lock_irqsave(&host->dekker_lock, flags);
+
+	host->usage_cnt++;
+
+	/* If IA has already hold the eMMC mutex, then just exit */
+	if (readl(host->sram_addr + DEKKER_IA_REQ_OFFSET)) {
+		spin_unlock_irqrestore(&host->dekker_lock, flags);
+		return 0;
+	}
+
+	DBG("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+	writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+	t1 = jiffies + 10 * HZ;
+	t2 = 500;
+
+	while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+		if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) !=
+				DEKKER_OWNER_IA) {
+			writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+			while (t2) {
+				if (readl(host->sram_addr +
+					DEKKER_EMMC_OWNER_OFFSET) ==
+						DEKKER_OWNER_IA)
+					break;
+				spin_unlock_irqrestore(&host->dekker_lock,
+						flags);
+				usleep_range(8000, 12000);
+				spin_lock_irqsave(&host->dekker_lock, flags);
+				t2--;
+			}
+			if (t2)
+				writel(1, host->sram_addr +
+						DEKKER_IA_REQ_OFFSET);
+			else
+				goto timeout;
+		}
+		if (time_after(jiffies, t1))
+			goto timeout;
+
+		cpu_relax();
+	}
+
+	spin_unlock_irqrestore(&host->dekker_lock, flags);
+	/*
+	 * if the last owner is SCU, will do the re-config host controller
+	 * in the next
+	 */
+	return (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) ==
+			DEKKER_OWNER_IA) ? 1 : 0;
+
+timeout:
+	pr_err(KERN_ERR "eMMC mutex timeout!\n"
+			"Dump Dekker's house keeping variables -"
+			"eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+	/* Release eMMC mutex anyway */
+	writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+	writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+	spin_unlock_irqrestore(&host->dekker_lock, flags);
+
+	return -EBUSY;
+}
+
+static int sdhci_acquire_ownership(struct mmc_host *mmc)
+{
+	int ret;
+
+	ret = sdhci_do_acquire_ownership(mmc);
+	if (ret) {
+		struct sdhci_host *host;
+		host = mmc_priv(mmc);
+		/* Re-config HC in case SCU has changed HC reg already */
+		pm_runtime_get_sync(mmc->parent);
+		/*
+		 * reinit host registers.
+		 * include reset host controller all,
+		 * reconfigure clock, pwr and other registers.
+		 */
+		sdhci_init(host, 0);
+		host->clock = 0;
+		host->pwr = 0;
+		sdhci_do_set_ios(host, &host->mmc->ios);
+		pm_runtime_put(mmc->parent);
+	}
+
+	return ret;
+}
+
+static void sdhci_release_ownership(struct mmc_host *mmc)
+{
+	struct sdhci_host *host;
+	unsigned long flags;
+
+	host = mmc_priv(mmc);
+
+	if (!host->sram_addr)
+		return;
+
+	spin_lock_irqsave(&host->dekker_lock, flags);
+	BUG_ON(host->usage_cnt == 0);
+	host->usage_cnt--;
+	if (host->usage_cnt == 0) {
+		writel(DEKKER_OWNER_SCU,
+				host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+		writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+		DBG("Exit ownership-eMMC owner: %d,IA req: %d,SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+	}
+	spin_unlock_irqrestore(&host->dekker_lock, flags);
+}
+
 /*****************************************************************************\
  *                                                                           *
  * MMC callbacks                                                             *
@@ -1316,8 +1612,16 @@
 
 	sdhci_runtime_pm_get(host);
 
+	sdhci_acquire_ownership(host->mmc);
+
 	spin_lock_irqsave(&host->lock, flags);
 
+	if (host->suspended) {
+		pr_err("%s: %s: host is in suspend state\n",
+				__func__, mmc_hostname(mmc));
+		BUG_ON(1);
+	}
+
 	WARN_ON(host->mrq != NULL);
 
 #ifndef SDHCI_USE_LEDS_CLASS
@@ -1347,8 +1651,12 @@
 	present = mmc_gpio_get_cd(host->mmc);
 	if (present < 0) {
 		/* If polling, assume that the card is always present. */
-		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+				(host->mmc->caps & MMC_CAP_NONREMOVABLE))
 			present = 1;
+		else if ((host->quirks2 & SDHCI_QUIRK2_BAD_SD_CD) &&
+				host->ops->get_cd)
+			present = host->ops->get_cd(host);
 		else
 			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
 					SDHCI_CARD_PRESENT;
@@ -1367,28 +1675,58 @@
 		 * tuning procedure before sending command.
 		 */
 		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
-		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
+		    (present_state & SDHCI_DATA_0_LVL_MASK)) {
 			if (mmc->card) {
+				if (mmc_card_sdio(mmc->card) &&
+				    (mmc_cmd_type(mrq->cmd) != MMC_CMD_ADTC))
+					goto end_tuning;
+				if ((mmc->card->ext_csd.part_config & 0x07) ==
+					EXT_CSD_PART_CONFIG_ACC_RPMB)
+					goto end_tuning;
 				/* eMMC uses cmd21 but sd and sdio use cmd19 */
 				tuning_opcode =
 					mmc->card->type == MMC_TYPE_MMC ?
 					MMC_SEND_TUNING_BLOCK_HS200 :
 					MMC_SEND_TUNING_BLOCK;
+				host->mrq = NULL;
 				spin_unlock_irqrestore(&host->lock, flags);
 				sdhci_execute_tuning(mmc, tuning_opcode);
 				spin_lock_irqsave(&host->lock, flags);
-
+end_tuning:
 				/* Restore original mmc_request structure */
 				host->mrq = mrq;
 			}
 		}
 
+		if (!(sdhci_readw(host, SDHCI_CLOCK_CONTROL) &
+					SDHCI_CLOCK_CARD_EN)) {
+			/*
+			 * SD bus clock is stopped. no interrupts will be
+			 * generate in this case.
+			 */
+			pr_warn("%s:%s: SD bus clock not enabled for CMD %d\n",
+					__func__, mmc_hostname(mmc),
+					host->mrq->cmd->opcode);
+			pr_warn("%s:%s: host->pwr 0x%x, host->clock %d\n",
+					__func__, mmc_hostname(mmc),
+					host->pwr, host->clock);
+			host->mrq->cmd->error = -EIO;
+			tasklet_schedule(&host->finish_tasklet);
+			goto out;
+		}
+
+		/* Clear the flag for Samsung emmc APS prepartion bug WA */
+		if (host->flags & SDHCI_EXIT_RPM_RESUME)
+			host->flags &= ~SDHCI_EXIT_RPM_RESUME;
+
 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
 			sdhci_send_command(host, mrq->sbc);
 		else
 			sdhci_send_command(host, mrq->cmd);
 	}
 
+out:
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -1401,6 +1739,9 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 
+	if (host->quirks2 & SDHCI_QUIRK2_FAKE_VDD)
+		ios->vdd = 7;
+
 	if (host->flags & SDHCI_DEVICE_DEAD) {
 		spin_unlock_irqrestore(&host->lock, flags);
 		if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
@@ -1470,11 +1811,14 @@
 	else
 		ctrl &= ~SDHCI_CTRL_HISPD;
 
-	if (host->version >= SDHCI_SPEC_300) {
+	if (((host->version >= SDHCI_SPEC_300) ||
+			(host->quirks2 & SDHCI_QUIRK2_V2_0_SUPPORT_DDR50)) &&
+			ios->timing != MMC_TIMING_LEGACY) {
 		u16 clk, ctrl_2;
 
 		/* In case of UHS-I modes, set High Speed Enable */
 		if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+		    (ios->timing == MMC_TIMING_MMC_HS400) ||
 		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
 		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
 		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
@@ -1526,7 +1870,9 @@
 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
 			/* Select Bus Speed Mode for host */
 			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-			if (ios->timing == MMC_TIMING_MMC_HS200)
+			if (ios->timing == MMC_TIMING_MMC_HS400)
+				ctrl_2 |= SDHCI_CTRL_HS_DDR200;
+			else if (ios->timing == MMC_TIMING_MMC_HS200)
 				ctrl_2 |= SDHCI_CTRL_HS_SDR200;
 			else if (ios->timing == MMC_TIMING_UHS_SDR12)
 				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
@@ -1555,6 +1901,18 @@
 				>> SDHCI_PRESET_DRV_SHIFT;
 		}
 
+		/*
+		 * Some buggy SDHC Host Controller requires the
+		 * Host Control Register High Speed Enable bit
+		 * must be set after Host Control 2 Register
+		 * 1.8V Signaling Enable bit set.
+		 * Otherwise it will fail to work on High Speed.
+		 * So, here we just write the Host control Register
+		 * with the same value again to workaround the issue.
+		 */
+		if (host->quirks2 & SDHCI_QUIRK2_HIGH_SPEED_SET_LATE)
+			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
 		/* Re-enable SD Clock */
 		sdhci_update_clock(host);
 	} else
@@ -1568,6 +1926,12 @@
 	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 
+	if (host->quirks2 & SDHCI_QUIRK2_BAD_SD_CD) {
+		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+		ctrl |= SDHCI_CTRL_CD_SD | SDHCI_CTRL_CD_TL;
+		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+	}
+
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -1577,7 +1941,9 @@
 	struct sdhci_host *host = mmc_priv(mmc);
 
 	sdhci_runtime_pm_get(host);
+	sdhci_acquire_ownership(mmc);
 	sdhci_do_set_ios(host, ios);
+	sdhci_release_ownership(mmc);
 	sdhci_runtime_pm_put(host);
 }
 
@@ -1658,8 +2024,13 @@
 {
 	struct sdhci_host *host = mmc_priv(mmc);
 
-	if (host->ops && host->ops->hw_reset)
+	if (host->ops && host->ops->hw_reset) {
+		sdhci_runtime_pm_get(host);
+		sdhci_acquire_ownership(mmc);
 		host->ops->hw_reset(host);
+		sdhci_release_ownership(mmc);
+		sdhci_runtime_pm_put(host);
+	}
 }
 
 static int sdhci_get_ro(struct mmc_host *mmc)
@@ -1725,6 +2096,9 @@
 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
 		ctrl &= ~SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		/* 3.3v by setting GPIO pin */
+		if (host->quirks2 & SDHCI_QUIRK2_POWER_PIN_GPIO_MODE)
+			gpio_set_value(host->gpio_1p8_en, 0);
 
 		if (host->vqmmc) {
 			ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
@@ -1736,6 +2110,11 @@
 		}
 		/* Wait for 5ms */
 		usleep_range(5000, 5500);
+		if (host->ops->set_io_voltage) {
+			ret = host->ops->set_io_voltage(host, false);
+			if (ret)
+				return ret;
+		}
 
 		/* 3.3V regulator output should be stable within 5 ms */
 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1763,9 +2142,17 @@
 		 */
 		ctrl |= SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		/* 1.8v by setting GPIO pin */
+		if (host->quirks2 & SDHCI_QUIRK2_POWER_PIN_GPIO_MODE)
+			gpio_set_value(host->gpio_1p8_en, 1);
 
 		/* Wait for 5ms */
 		usleep_range(5000, 5500);
+		if (host->ops->set_io_voltage) {
+			ret = host->ops->set_io_voltage(host, true);
+			if (ret)
+				return ret;
+		}
 
 		/* 1.8V regulator output should be stable within 5 ms */
 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1825,15 +2212,28 @@
 	u16 ctrl;
 	u32 ier;
 	int tuning_loop_counter = MAX_TUNING_LOOP;
-	unsigned long timeout;
 	int err = 0;
 	bool requires_tuning_nonuhs = false;
+	unsigned long flags, loop;
 
 	host = mmc_priv(mmc);
 
 	sdhci_runtime_pm_get(host);
-	disable_irq(host->irq);
-	spin_lock(&host->lock);
+	spin_lock_irqsave(&host->lock, flags);
+
+	/*
+	 * Workaround for Samsung eMMC device -
+	 * Samsung eMMC device may fail to response to CMD21
+	 * if it in the APS preparation mode.
+	 * If just resume from runtime pm, then CMD21 should
+	 * be the first CMD and will not trigger the APS preparation
+	 * mode. But in other case, such as tuning timer expired,
+	 * we have to delay at least 2ms to avoid trigger this issue.
+	 */
+	if (host->flags & SDHCI_EXIT_RPM_RESUME)
+		host->flags &= ~SDHCI_EXIT_RPM_RESUME;
+	else
+		mdelay(2);
 
 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
 
@@ -1844,17 +2244,19 @@
 	 * If the Host Controller supports the HS200 mode then the
 	 * tuning function has to be executed.
 	 */
-	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
-	    (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
-	     host->flags & SDHCI_HS200_NEEDS_TUNING))
+	if ((((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+	    (host->flags & SDHCI_SDR50_NEEDS_TUNING) &&
+	    (mmc->ios.timing == MMC_TIMING_UHS_SDR50)) ||
+	     ((host->flags & SDHCI_HS200_NEEDS_TUNING) &&
+	      mmc->ios.timing == MMC_TIMING_MMC_HS200) ||
+	       mmc->ios.timing == MMC_TIMING_UHS_SDR104)
 		requires_tuning_nonuhs = true;
 
 	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
 	    requires_tuning_nonuhs)
 		ctrl |= SDHCI_CTRL_EXEC_TUNING;
 	else {
-		spin_unlock(&host->lock);
-		enable_irq(host->irq);
+		spin_unlock_irqrestore(&host->lock, flags);
 		sdhci_runtime_pm_put(host);
 		return 0;
 	}
@@ -1875,15 +2277,20 @@
 	sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
 
 	/*
-	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
-	 * of loops reaches 40 times or a timeout of 150ms occurs.
+	 * set the data timeout register to be max value
 	 */
-	timeout = 150;
+	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+
+	/*
+	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
+	 * of loops reaches 40 times.
+	 */
 	do {
 		struct mmc_command cmd = {0};
 		struct mmc_request mrq = {NULL};
+		unsigned int intmask;
 
-		if (!tuning_loop_counter && !timeout)
+		if (!tuning_loop_counter)
 			break;
 
 		cmd.opcode = opcode;
@@ -1922,59 +2329,93 @@
 		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
 
 		sdhci_send_command(host, &cmd);
+		mmiowb();
 
 		host->cmd = NULL;
 		host->mrq = NULL;
 
-		spin_unlock(&host->lock);
-		enable_irq(host->irq);
+		/* delete the timer created by send command */
+		del_timer(&host->timer);
 
-		/* Wait for Buffer Read Ready interrupt */
-		wait_event_interruptible_timeout(host->buf_ready_int,
-					(host->tuning_done == 1),
-					msecs_to_jiffies(50));
-		disable_irq(host->irq);
-		spin_lock(&host->lock);
+		if (host->quirks2 & SDHCI_QUIRK2_TUNING_POLL) {
+			/* wait for 150ms for each tuning cmd */
+			loop = 150 * 1000;
+			while (loop--) {
+				intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+				if (intmask & SDHCI_INT_DATA_AVAIL) {
+					host->tuning_done = 1;
+					sdhci_writel(host,
+						intmask & SDHCI_INT_DATA_AVAIL,
+						SDHCI_INT_STATUS);
+					break;
+				}
+				udelay(1);
+			}
+		} else {
+			intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+			if (intmask & SDHCI_INT_DATA_AVAIL) {
+				host->tuning_done = 1;
+				sdhci_writel(host,
+					intmask & SDHCI_INT_DATA_AVAIL,
+					SDHCI_INT_STATUS);
+			}
+			spin_unlock_irqrestore(&host->lock, flags);
+
+			if (!host->tuning_done)
+				/* Wait for Buffer Read Ready interrupt */
+				wait_event_interruptible_timeout(
+						host->buf_ready_int,
+						(host->tuning_done == 1),
+						msecs_to_jiffies(50));
+			spin_lock_irqsave(&host->lock, flags);
+
+			intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+			if (intmask & SDHCI_INT_DATA_AVAIL) {
+				host->tuning_done = 1;
+				sdhci_writel(host,
+					intmask & SDHCI_INT_DATA_AVAIL,
+					SDHCI_INT_STATUS);
+			}
+		}
 
 		if (!host->tuning_done) {
-			pr_info(DRIVER_NAME ": Timeout waiting for "
+			pr_warn(DRIVER_NAME ": Timeout waiting for "
 				"Buffer Read Ready interrupt during tuning "
-				"procedure, falling back to fixed sampling "
-				"clock\n");
-			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-			ctrl &= ~SDHCI_CTRL_TUNED_CLK;
-			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
-			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
-			err = -EIO;
-			goto out;
+				"procedure\n");
+			pr_warn("%s: present %08x, ctrl2 %08x, irq %08x\n"
+				"%s: loop %d, retry....\n",
+				mmc_hostname(host->mmc),
+				sdhci_readl(host, SDHCI_PRESENT_STATE),
+				sdhci_readw(host, SDHCI_HOST_CONTROL2),
+				sdhci_readl(host, SDHCI_INT_STATUS),
+				mmc_hostname(host->mmc),
+				tuning_loop_counter);
 		}
 
 		host->tuning_done = 0;
 
 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-		tuning_loop_counter--;
-		timeout--;
-		mdelay(1);
+		if (tuning_loop_counter)
+			tuning_loop_counter--;
 	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
 
 	/*
 	 * The Host Driver has exhausted the maximum number of loops allowed,
 	 * so use fixed sampling frequency.
 	 */
-	if (!tuning_loop_counter || !timeout) {
+	if (!tuning_loop_counter) {
+		ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
 		ctrl &= ~SDHCI_CTRL_TUNED_CLK;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
 	} else {
 		if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
-			pr_info(DRIVER_NAME ": Tuning procedure"
+			pr_err(DRIVER_NAME ": Tuning procedure"
 				" failed, falling back to fixed sampling"
 				" clock\n");
 			err = -EIO;
 		}
 	}
 
-out:
 	/*
 	 * If this is the very first time we are here, we start the retuning
 	 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
@@ -1989,9 +2430,11 @@
 		/* Tuning mode 1 limits the maximum data length to 4MB */
 		mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
 	} else {
-		host->flags &= ~SDHCI_NEEDS_RETUNING;
+		if (tuning_loop_counter)
+			host->flags &= ~SDHCI_NEEDS_RETUNING;
 		/* Reload the new initial value for timer */
-		if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+		if ((host->tuning_mode == SDHCI_TUNING_MODE_1) &&
+				host->tuning_count)
 			mod_timer(&host->tuning_timer, jiffies +
 				host->tuning_count * HZ);
 	}
@@ -2008,14 +2451,12 @@
 		err = 0;
 
 	sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
-	spin_unlock(&host->lock);
-	enable_irq(host->irq);
+	spin_unlock_irqrestore(&host->lock, flags);
 	sdhci_runtime_pm_put(host);
 
 	return err;
 }
 
-
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
 {
 	u16 ctrl;
@@ -2056,6 +2497,8 @@
 		pr_err("%s: Resetting controller.\n",
 			mmc_hostname(host->mmc));
 
+		if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+			sdhci_busy_wait(mmc, 1000);
 		sdhci_reset(host, SDHCI_RESET_CMD);
 		sdhci_reset(host, SDHCI_RESET_DATA);
 
@@ -2066,6 +2509,23 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static void sdhci_set_dev_power(struct mmc_host *mmc, bool poweron)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	if (host->ops->set_dev_power)
+		host->ops->set_dev_power(host, poweron);
+}
+
+static void sdhci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->quirks2 & SDHCI_QUIRK2_NON_STD_CIS) {
+		card->quirks |= MMC_QUIRK_NON_STD_CIS;
+	}
+}
+
+
 static const struct mmc_host_ops sdhci_ops = {
 	.request	= sdhci_request,
 	.set_ios	= sdhci_set_ios,
@@ -2077,6 +2537,9 @@
 	.execute_tuning			= sdhci_execute_tuning,
 	.card_event			= sdhci_card_event,
 	.card_busy	= sdhci_card_busy,
+	.set_dev_power = sdhci_set_dev_power,
+	.init_card = sdhci_init_card,
+	.busy_wait	= sdhci_busy_wait,
 };
 
 /*****************************************************************************\
@@ -2089,9 +2552,11 @@
 {
 	struct sdhci_host *host = (struct sdhci_host*)param;
 
+	cancel_delayed_work(&host->mmc->detect);
+
 	sdhci_card_event(host->mmc);
 
-	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+	mmc_detect_change(host->mmc, msecs_to_jiffies(500));
 }
 
 static void sdhci_tasklet_finish(unsigned long param)
@@ -2122,7 +2587,8 @@
 	 * upon error conditions.
 	 */
 	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
-	    ((mrq->cmd && mrq->cmd->error) ||
+		((mrq->cmd && mrq->cmd->error &&
+		mrq->cmd->error != -ENOMEDIUM) ||
 		 (mrq->data && (mrq->data->error ||
 		  (mrq->data->stop && mrq->data->stop->error))) ||
 		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
@@ -2134,6 +2600,8 @@
 
 		/* Spec says we should do both at the same time, but Ricoh
 		   controllers do not like that. */
+		if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+			sdhci_busy_wait(host->mmc, 1000);
 		sdhci_reset(host, SDHCI_RESET_CMD);
 		sdhci_reset(host, SDHCI_RESET_DATA);
 	}
@@ -2149,10 +2617,25 @@
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_release_ownership(host->mmc);
 	mmc_request_done(host->mmc, mrq);
 	sdhci_runtime_pm_put(host);
 }
 
+static void dump_rte_apic_reg(struct sdhci_host *host, void __iomem *idx_addr)
+{
+	unsigned int rte_lo, rte_hi;
+
+	writeb(0x10 + 2 * host->irq, idx_addr);
+	rte_lo = readl(host->rte_addr + 0x10);
+
+	writeb(0x10 + 2 * host->irq + 1, idx_addr);
+	rte_hi = readl(host->rte_addr + 0x10);
+
+	pr_err("%s: dump APIC RTE reg - L32: 0x%08x, H32: 0x%08x\n",
+		mmc_hostname(host->mmc), rte_lo, rte_hi);
+}
+
 static void sdhci_timeout_timer(unsigned long data)
 {
 	struct sdhci_host *host;
@@ -2167,6 +2650,12 @@
 			"interrupt.\n", mmc_hostname(host->mmc));
 		sdhci_dumpregs(host);
 
+		if (host->ops->gpio_buf_dump)
+			host->ops->gpio_buf_dump(host);
+
+		if (host->rte_addr)
+			dump_rte_apic_reg(host, host->rte_addr);
+
 		if (host->data) {
 			host->data->error = -ETIMEDOUT;
 			sdhci_finish_data(host);
@@ -2243,7 +2732,10 @@
 			DBG("Cannot wait for busy signal when also "
 				"doing a data transfer");
 		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
-			return;
+			if (!host->r1b_busy_end) {
+				host->r1b_busy_end = 1;
+				return;
+			}
 
 		/* The controller does not support the end-of-busy IRQ,
 		 * fall through and take the SDHCI_INT_RESPONSE */
@@ -2253,7 +2745,6 @@
 		sdhci_finish_command(host);
 }
 
-#ifdef CONFIG_MMC_DEBUG
 static void sdhci_show_adma_error(struct sdhci_host *host)
 {
 	const char *name = mmc_hostname(host->mmc);
@@ -2269,7 +2760,7 @@
 		len = (__le16 *)(desc + 2);
 		attr = *desc;
 
-		DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+		pr_err("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
 		    name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
 
 		desc += 8;
@@ -2278,9 +2769,6 @@
 			break;
 	}
 }
-#else
-static void sdhci_show_adma_error(struct sdhci_host *host) { }
-#endif
 
 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
@@ -2303,10 +2791,23 @@
 		 * The "data complete" interrupt is also used to
 		 * indicate that a busy state has ended. See comment
 		 * above in sdhci_cmd_irq().
+		 *
+		 * "data timeout" interrupt may also happen
 		 */
 		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
 			if (intmask & SDHCI_INT_DATA_END) {
-				sdhci_finish_command(host);
+				if (host->r1b_busy_end)
+					sdhci_finish_command(host);
+				else
+					host->r1b_busy_end = 1;
+				return;
+			} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+				pr_err("%s: Got data interrupt 0x%08x for busy cmd %d\n",
+						mmc_hostname(host->mmc),
+						(unsigned)intmask,
+						host->cmd->opcode);
+				host->cmd->error = -ETIMEDOUT;
+				tasklet_schedule(&host->finish_tasklet);
 				return;
 			}
 		}
@@ -2328,13 +2829,26 @@
 			!= MMC_BUS_TEST_R)
 		host->data->error = -EILSEQ;
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
-		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+		pr_err("%s: ADMA error, int 0x%08x\n",
+				mmc_hostname(host->mmc), intmask);
 		sdhci_show_adma_error(host);
 		host->data->error = -EIO;
 		if (host->ops->adma_workaround)
 			host->ops->adma_workaround(host, intmask);
 	}
 
+	if (intmask & SDHCI_INT_TAR_RSP_ERR) {
+		pr_err("%s: Target response error, int 0x%08x\n",
+				mmc_hostname(host->mmc), intmask);
+		sdhci_show_adma_error(host);
+		if (!host->data->error) {
+			host->data->error = -EIO;
+		} else
+			pr_err("%s: data error is set already, error %d\n",
+					mmc_hostname(host->mmc),
+					host->data->error);
+	}
+
 	if (host->data->error)
 		sdhci_finish_data(host);
 	else {
@@ -2394,7 +2908,7 @@
 
 	if (host->runtime_suspended) {
 		spin_unlock(&host->lock);
-		pr_warning("%s: got irq while runtime suspended\n",
+		DBG("%s: got irq while runtime suspended\n",
 		       mmc_hostname(host->mmc));
 		return IRQ_HANDLED;
 	}
@@ -2436,6 +2950,21 @@
 	}
 
 	if (intmask & SDHCI_INT_CMD_MASK) {
+		/*
+		 * If encounter command conflict interrupts,
+		 * before clearing it, delay 64 clocks, otherwise the interrupts
+		 * will be generated again.
+		 * This is just experience. SDHC spec doesn't
+		 * say the command conflict interrupts will be generated
+		 * again without a delay before clearing them.
+		 */
+		if ((intmask & SDHCI_INT_CMD_CONFLICT) ==
+				SDHCI_INT_CMD_CONFLICT) {
+			if (host->clock)
+				udelay(64 * 1000000 / host->clock);
+			else
+				udelay(500);
+		}
 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
 			SDHCI_INT_STATUS);
 		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
@@ -2491,12 +3020,797 @@
 	return result;
 }
 
+/************************************************************************\
+ *                                                                      *
+ * APIs for panic record use                                           *
+ * Note:                                                               *
+ * For panic use, please take care of sdhci_read/write.                        *
+ *                                                                     *
+ * sdhci_read/write function are defined by sdhci host layer which     *
+ * warpped the read/write function.                                    *
+ * But before calling read/write, sdhci_read/write will try to see if  *
+ * some host drivers defined special register reading/writing functions.*
+ * If do, that is means read/write function defined by kernel cannot be        *
+ * used, have to use the special ones.                                 *
+ * So, if host driver are using special ones, please make sure when in *
+ * panic mode, the special ones are still good to use                  *
+ * So, if not, read/write defined by kernel is safe for panic using    *
+ *                                                                     *
+ * @For MFLD sdhci host controller driver, no special reading/writing  *
+ * funtion are used                                                    *
+ *                                                                      *
+ \************************************************************************/
+
+static int panic_irq_done;
+
+static void sdhci_panic_irq_wait(struct sdhci_host *host);
+
+static inline void sdhci_panic_finish_req(struct sdhci_host *host)
+{
+	host->mrq = NULL;
+	host->cmd = NULL;
+	host->data = NULL;
+	panic_irq_done = 1;
+}
+
+/*
+ * assuming only use SDMA write and data length is 512Bytes
+ */
+static void sdhci_panic_send_cmd(struct sdhci_host *host,
+		struct mmc_command *cmd)
+{
+	unsigned long timeout;
+	u32 mask;
+	int flags;
+
+	WARN_ON(host->cmd);
+	/* Wait max 10 ms */
+	timeout = 10;
+	mask = SDHCI_CMD_INHIBIT;
+	if ((cmd->data != 0) || (cmd->flags & MMC_RSP_BUSY))
+		mask |= SDHCI_DATA_INHIBIT;
+
+	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
+		if (timeout == 0) {
+			pr_err("%s %s: Controller never released inhibit bit(s).\n",
+					__func__, mmc_hostname(host->mmc));
+			sdhci_dumpregs(host);
+			sdhci_panic_finish_req(host);
+			return;
+		}
+		timeout--;
+		/*
+		 * seems card is not ready for the next command.
+		 * We can wait for 1ms and then to have a retry
+		 */
+		mdelay(1);
+	}
+
+	host->cmd = cmd;
+	host->r1b_busy_end = 0;
+
+	/*
+	 * set the data timeout register to be max value
+	 */
+	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+	/*
+	 * prepare data
+	 */
+	if (cmd->data) {
+		unsigned int mode;
+		struct mmc_data *data = cmd->data;
+		u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
+		u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+
+		host->data = data;
+		host->data_early = 0;
+		/*
+		 * update DMA address
+		 */
+		sdhci_writel(host, data->dmabuf, SDHCI_DMA_ADDRESS);
+
+		if (host->version >= SDHCI_SPEC_200) {
+			u8 ctrl;
+			ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+			ctrl &= ~SDHCI_CTRL_DMA_MASK;
+			if ((host->flags & SDHCI_REQ_USE_DMA) &&
+					(host->flags & SDHCI_USE_ADMA))
+				ctrl |= SDHCI_CTRL_ADMA32;
+			else
+				ctrl |= SDHCI_CTRL_SDMA;
+			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+		}
+
+		if (host->flags & SDHCI_REQ_USE_DMA)
+			sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
+		else
+			sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
+
+		/*
+		 * We do not handle DMA boundaries,
+		 * so set it to max (512 KiB)
+		 */
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz),
+				SDHCI_BLOCK_SIZE);
+		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+
+		/*
+		 * set transfer mode
+		 */
+		mode = SDHCI_TRNS_BLK_CNT_EN;
+		if (data->blocks > 1) {
+			if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+				mode |= SDHCI_TRNS_MULTI |
+					SDHCI_TRNS_AUTO_CMD12;
+			else
+				mode |= SDHCI_TRNS_MULTI;
+		}
+		if (host->flags & SDHCI_REQ_USE_DMA)
+			mode |= SDHCI_TRNS_DMA;
+
+		sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+	}
+
+	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
+
+	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+		pr_err("%s %s: Unsupported response type!\n",
+				__func__, mmc_hostname(host->mmc));
+		sdhci_panic_finish_req(host);
+		return;
+	}
+
+	if (!(cmd->flags & MMC_RSP_PRESENT))
+		flags = SDHCI_CMD_RESP_NONE;
+	else if (cmd->flags & MMC_RSP_136)
+		flags = SDHCI_CMD_RESP_LONG;
+	else if (cmd->flags & MMC_RSP_BUSY)
+		flags = SDHCI_CMD_RESP_SHORT_BUSY;
+	else
+		flags = SDHCI_CMD_RESP_SHORT;
+
+	if (cmd->flags & MMC_RSP_CRC)
+		flags |= SDHCI_CMD_CRC;
+	if (cmd->flags & MMC_RSP_OPCODE)
+		flags |= SDHCI_CMD_INDEX;
+	if (cmd->data)
+		flags |= SDHCI_CMD_DATA;
+
+	/*
+	 * send command
+	 */
+	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+
+	mmiowb();
+
+	/*
+	 * polling interrupt
+	 */
+	sdhci_panic_irq_wait(host);
+}
+
+static void sdhci_panic_finish_data(struct sdhci_host *host)
+{
+	struct mmc_data *data;
+
+	BUG_ON(!host->data);
+
+	data = host->data;
+	host->data = NULL;
+
+	/*
+	 * panic use, will not unmap anything here
+	 */
+
+	/*
+	 * The specification states that the block count register must
+	 * be updated, but it does not specify at what point in the
+	 * data flow. That makes the register entirely useless to read
+	 * back so we have to assume that nothing made it to the card
+	 * in the event of an error.
+	 */
+	if (data->error)
+		data->bytes_xfered = 0;
+	else
+		data->bytes_xfered = data->blksz * data->blocks;
+
+	if (data->stop) {
+		/*
+		 * we will not be here since we use single block
+		 * transfer when panic occured
+		 */
+		sdhci_panic_send_cmd(host, data->stop);
+	} else
+		sdhci_panic_finish_req(host);
+}
+
+static void sdhci_panic_finish_command(struct sdhci_host *host)
+{
+	int i;
+
+	BUG_ON(host->cmd == NULL);
+
+	if (host->cmd->flags & MMC_RSP_PRESENT) {
+		if (host->cmd->flags & MMC_RSP_136) {
+			/* CRC is stripped so we need to do some shifting. */
+			for (i = 0; i < 4; i++) {
+				host->cmd->resp[i] = sdhci_readl(host,
+						SDHCI_RESPONSE + (3-i)*4) << 8;
+				if (i != 3)
+					host->cmd->resp[i] |=
+						sdhci_readb(host,
+						SDHCI_RESPONSE + (3-i)*4-1);
+			}
+		} else {
+			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+		}
+	}
+
+	host->cmd->error = 0;
+
+	if (host->data && host->data_early)
+		sdhci_panic_finish_data(host);
+
+	if (!host->cmd->data)
+		sdhci_panic_finish_req(host);
+
+	host->cmd = NULL;
+}
+
+/*
+ * sdhci_panic_data_irq: handle data irq in panic mode
+ *
+ * When host is in panic mode, host driver need to poll its interrupt
+ * status register. Once looked up some cmd irqs, call this function
+ * to handle.
+ */
+static void sdhci_panic_cmd_irq(struct sdhci_host *host, u32 intmask)
+{
+	BUG_ON(intmask == 0);
+
+	if (intmask & SDHCI_INT_TIMEOUT)
+		host->cmd->error = -ETIMEDOUT;
+	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+				SDHCI_INT_INDEX))
+		host->cmd->error = -EILSEQ;
+
+	if (host->cmd->error) {
+		sdhci_panic_finish_req(host);
+		return;
+	}
+
+	if (host->cmd->flags & MMC_RSP_BUSY) {
+		if (host->cmd->data)
+			pr_debug("Cannot wait for busy signal when also doing a data transfer\n");
+		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
+			if (!host->r1b_busy_end) {
+				host->r1b_busy_end = 1;
+				return;
+			}
+	}
+
+	if (intmask & SDHCI_INT_RESPONSE)
+		sdhci_panic_finish_command(host);
+}
+
+/*
+ * sdhci_panic_data_irq: handle data irq in panic mode
+ *
+ * When host is in panic mode, host driver need to poll its interrupt
+ * status register. Once looked up some data irqs, call this function
+ * to handle.
+ */
+static void sdhci_panic_data_irq(struct sdhci_host *host, u32 intmask)
+{
+	BUG_ON(intmask == 0);
+
+	if (!host->data) {
+		/*
+		 * The "data complete" interrupt is also used to
+		 * indicate that a busy state has ended. See comment
+		 * above in sdhci_cmd_irq().
+		 */
+		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+			if (intmask & SDHCI_INT_DATA_END) {
+				if (host->r1b_busy_end)
+					sdhci_panic_finish_command(host);
+				else
+					host->r1b_busy_end = 1;
+				return;
+			}
+		}
+
+		pr_err("%s %s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
+			__func__, mmc_hostname(host->mmc), (unsigned)intmask);
+		sdhci_dumpregs(host);
+
+		return;
+	}
+
+	if (intmask & SDHCI_INT_DATA_TIMEOUT)
+		host->data->error = -ETIMEDOUT;
+	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+		host->data->error = -EILSEQ;
+	else if (intmask & SDHCI_INT_ADMA_ERROR) {
+		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+		host->data->error = -EIO;
+	}
+
+	if (host->data->error)
+		sdhci_panic_finish_data(host);
+	else {
+		if (intmask & SDHCI_INT_DMA_END)
+			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
+					SDHCI_DMA_ADDRESS);
+
+		if (intmask & SDHCI_INT_DATA_END) {
+			if (host->cmd)
+				host->data_early = 1;
+			else
+				sdhci_panic_finish_data(host);
+		}
+	}
+}
+
+/*
+ * sdhci_panic_irq_wait: irq handler for panic record
+ */
+static void sdhci_panic_irq_wait(struct sdhci_host *host)
+{
+	u32 intmask;
+	panic_irq_done = 0;
+retry:
+	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+
+	if (!intmask || intmask == 0xffffffff)
+		goto retry;
+
+	DBG("***%s got interrupt: 0x%08x\n",
+			__func__, intmask);
+
+	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+				SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+		/*
+		 * do nothing for card detect
+		 */
+	}
+
+	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+
+	if (intmask & SDHCI_INT_CMD_MASK) {
+		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
+				SDHCI_INT_STATUS);
+		sdhci_panic_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+	}
+
+	if (intmask & SDHCI_INT_DATA_MASK) {
+		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
+				SDHCI_INT_STATUS);
+		sdhci_panic_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+	}
+
+	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
+
+	intmask &= ~SDHCI_INT_ERROR;
+
+	if (intmask & SDHCI_INT_BUS_POWER) {
+		pr_err("%s %s: Card is consuming too much power!\n",
+				__func__, mmc_hostname(host->mmc));
+		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
+	}
+
+	intmask &= ~SDHCI_INT_BUS_POWER;
+
+	if (intmask & SDHCI_INT_CARD_INT) {
+		sdhci_writel(host, intmask & SDHCI_INT_CARD_INT,
+				SDHCI_INT_STATUS);
+		/*
+		 * do nothing for this irq
+		 */
+		intmask &= ~SDHCI_INT_CARD_INT;
+	}
+
+	if (intmask) {
+		pr_err("%s %s: Unexpected interrupt 0x%08x.\n",
+				__func__, mmc_hostname(host->mmc), intmask);
+		sdhci_dumpregs(host);
+
+		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+	}
+
+	mmiowb();
+	if (!panic_irq_done)
+		goto retry;
+}
+
+static void sdhci_mfld_panic_set_ios(struct mmc_panic_host *mmc)
+{
+	struct sdhci_host *host;
+	struct mmc_ios *ios;
+	u8 ctrl;
+	u16 clk, ctrl_2;
+
+	if (!mmc)
+		return;
+	ios = &mmc->ios;
+	host = (struct sdhci_host *)mmc->priv;
+
+	/*
+	 * Reset the chip on each power off.
+	 * Should clear out any weird states.
+	 */
+	if (ios->power_mode == MMC_POWER_OFF)
+		pr_info("%s: we are in panic, why need power off?\n", __func__);
+
+	sdhci_set_clock(host, ios->clock);
+
+	if (ios->power_mode == MMC_POWER_OFF)
+		sdhci_set_power(host, -1);
+	else
+		sdhci_set_power(host, ios->vdd);
+
+	if (host->ops->platform_send_init_74_clocks)
+		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+	if (ios->bus_width == MMC_BUS_WIDTH_8)
+		ctrl |= SDHCI_CTRL_8BITBUS;
+	else
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+
+	if (ios->bus_width == MMC_BUS_WIDTH_4)
+		ctrl |= SDHCI_CTRL_4BITBUS;
+	else
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+
+	if (ios->timing && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+		ctrl |= SDHCI_CTRL_HISPD;
+	else
+		ctrl &= ~SDHCI_CTRL_HISPD;
+
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+	if (((host->version >= SDHCI_SPEC_300) ||
+			(host->quirks2 & SDHCI_QUIRK2_V2_0_SUPPORT_DDR50)) &&
+			ios->timing != MMC_TIMING_LEGACY) {
+		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+		ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
+		if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
+			ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
+		else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
+			ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
+		sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+		/* Reset SD Clock Enable */
+		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+		clk &= ~SDHCI_CLOCK_CARD_EN;
+		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+		if (host->ops->set_uhs_signaling)
+			host->ops->set_uhs_signaling(host, ios->timing);
+		else {
+			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+			/* Select Bus Speed Mode for host */
+			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+			if (ios->timing == MMC_TIMING_MMC_HS400)
+				ctrl_2 |= SDHCI_CTRL_HS_DDR200;
+			else if (ios->timing == MMC_TIMING_MMC_HS200)
+				ctrl_2 |= SDHCI_CTRL_HS_SDR200;
+			else if (ios->timing == MMC_TIMING_UHS_SDR12)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+			else if (ios->timing == MMC_TIMING_UHS_SDR25)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+			else if (ios->timing == MMC_TIMING_UHS_SDR50)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+			else if (ios->timing == MMC_TIMING_UHS_SDR104)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+			else if (ios->timing == MMC_TIMING_UHS_DDR50)
+				ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+		}
+		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+				((ios->timing == MMC_TIMING_UHS_SDR12) ||
+				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
+				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
+				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
+				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
+			u16 preset;
+
+			sdhci_enable_preset_value(host, true);
+			preset = sdhci_get_preset_value(host);
+			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
+				>> SDHCI_PRESET_DRV_SHIFT;
+		}
+		/* Re-enable SD Clock */
+		sdhci_update_clock(host);
+	}
+	/*
+	 * Some (ENE) controllers go apeshit on some ios operation,
+	 * signalling timeout and CRC errors even on CMD0. Resetting
+	 * it on each ios seems to solve the problem.
+	 */
+	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+	mmiowb();
+}
+
+static void sdhci_panic_reinit_host(struct mmc_panic_host *mmc)
+{
+	struct sdhci_host *host = mmc->priv;
+	sdhci_init(host, 0);
+	host->pwr = 0; /* force power reprogram */
+	host->clock = 0; /* force clock reprogram */
+	sdhci_mfld_panic_set_ios(mmc);
+	mmiowb();
+}
+
+static void sdhci_mfld_panic_request(struct mmc_panic_host *panic_mmc,
+		struct mmc_request *mrq)
+{
+	struct sdhci_host *host;
+	bool present;
+
+	if (!panic_mmc || !mrq)
+		return;
+
+	host = (struct sdhci_host *)panic_mmc->priv;
+
+	/*
+	 * only support single block data DMA write
+	 */
+	if (mrq->cmd->data) {
+		if (mrq->cmd->data->blocks != 1 ||
+				mrq->cmd->data->flags & MMC_DATA_READ)
+			mrq->cmd->error = -EINVAL;
+	}
+
+	if (host->flags & SDHCI_USE_ADMA)
+		host->flags &= ~SDHCI_USE_ADMA;
+
+	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
+		if (mrq->stop) {
+			mrq->data->stop = NULL;
+			mrq->stop = NULL;
+		}
+	}
+
+	host->mrq = mrq;
+
+	/* If polling, assume that the card is always present. */
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		present = true;
+	else
+		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			SDHCI_CARD_PRESENT;
+
+	if (!present) {
+		host->mrq->cmd->error = -ENOMEDIUM;
+		sdhci_panic_finish_req(host);
+	} else
+		sdhci_panic_send_cmd(host, mrq->cmd);
+
+	/*
+	 * The controller needs a reset of internal state machines
+	 * upon error conditions.
+	 */
+	if (mrq->cmd->error || (mrq->data && (mrq->data->error ||
+			(mrq->data->stop && mrq->data->stop->error))) ||
+			(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
+		pr_err("%s: request handle failed\n", __func__);
+		sdhci_dumpregs(host);
+		sdhci_panic_reinit_host(panic_mmc);
+	}
+}
+
+/*
+ * The same like sdhci_acquire_ownership, used for IA to get the ownership
+ * before using host controller. Since this function is called in panic mode,
+ * so we can not use msleep() like sdhci_acquire_ownership does, use mdelay()
+ * instead.
+ */
+static int sdhci_mfld_panic_acquire_ownership(struct sdhci_host *host)
+{
+	unsigned long t1, t2;
+
+	if (!host->sram_addr)
+		return DEKKER_OWNER_IA;
+
+	/* If IA has already hold the eMMC mutex, then just exit */
+	if (readl(host->sram_addr + DEKKER_IA_REQ_OFFSET))
+		return 0;
+
+	writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+	t1 = 100;
+	t2 = 500;
+
+	while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+		if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) !=
+				DEKKER_OWNER_IA) {
+			writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+			while (t2) {
+				if (readl(host->sram_addr +
+						DEKKER_EMMC_OWNER_OFFSET) ==
+						DEKKER_OWNER_IA)
+					break;
+				mdelay(10);
+				t2--;
+			}
+			if (t2)
+				writel(1, host->sram_addr +
+						DEKKER_IA_REQ_OFFSET);
+			else
+				goto timeout;
+		}
+		/*
+		 * if we go to here, that means SCU FW is releasing the
+		 * ownership, so we just wait for a short time here.
+		 */
+		if (t1) {
+			mdelay(10);
+			t1--;
+		} else
+			goto timeout;
+	}
+
+	pr_debug("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+	return (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) ==
+		DEKKER_OWNER_IA) ? DEKKER_OWNER_SCU : DEKKER_OWNER_IA;
+timeout:
+
+	pr_warn("%s: Timeout to hold eMMC mutex\n", __func__);
+	return -EBUSY;
+}
+
+static int sdhci_mfld_panic_power_on(struct mmc_panic_host *panic_host)
+{
+	int ret;
+	struct mmc_host *mmc;
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return -ENODEV;
+	mmc = panic_host->mmc;
+	host = panic_host->priv;
+
+	if (host->runtime_suspended) {
+		/*
+		 * power host controller
+		 */
+		pm_runtime_get_noresume(mmc->parent);
+
+		if (host->ops->power_up_host) {
+			ret = host->ops->power_up_host(host);
+			if (ret)
+				return ret;
+		}
+		sdhci_panic_reinit_host(panic_host);
+		host->runtime_suspended = false;
+	}
+
+	return 0;
+}
+
+static int sdhci_mfld_panic_hold_mutex(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+	int ret;
+
+	if (!panic_host)
+		return -ENODEV;
+
+	host = panic_host->priv;
+
+	ret = sdhci_mfld_panic_acquire_ownership(host);
+
+	if (ret == DEKKER_OWNER_SCU) {
+		if (host->ops->power_up_host) {
+			ret = host->ops->power_up_host(host);
+			if (ret)
+				return ret;
+		}
+		sdhci_panic_reinit_host(panic_host);
+		return 0;
+	} else if (ret == DEKKER_OWNER_IA)
+		return sdhci_mfld_panic_power_on(panic_host);
+
+	return ret;
+}
+
+static void sdhci_mfld_panic_release_mutex(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return;
+	host = panic_host->priv;
+
+	if (!host->sram_addr)
+		return;
+
+	writel(DEKKER_OWNER_SCU,
+			host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+	writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+	DBG("Exit ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+}
+
+static void sdhci_mfld_panic_prepare(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return;
+	host = panic_host->priv;
+
+	/*
+	 * assume host is powered off
+	 */
+	host->runtime_suspended = true;
+
+#ifdef CONFIG_PM_RUNTIME
+	/*
+	 * disable runtime pm directly
+	 */
+	panic_host->mmc->parent->power.disable_depth = 1;
+#endif
+}
+
+static int sdhci_mfld_panic_setup(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return 0;
+
+	host = mmc_priv(panic_host->mmc);
+	panic_host->priv = (void *)host;
+
+	return 0;
+}
+
+const struct mmc_host_panic_ops sdhci_panic_ops = {
+	.request        = sdhci_mfld_panic_request,
+	.prepare        = sdhci_mfld_panic_prepare,
+	.setup          = sdhci_mfld_panic_setup,
+	.set_ios        = sdhci_mfld_panic_set_ios,
+	.power_on       = sdhci_mfld_panic_power_on,
+	.hold_mutex     = sdhci_mfld_panic_hold_mutex,
+	.release_mutex  = sdhci_mfld_panic_release_mutex,
+};
+
+void sdhci_alloc_panic_host(struct sdhci_host *host)
+{
+	if (!host->mmc)
+		return;
+	mmc_alloc_panic_host(host->mmc, &sdhci_panic_ops);
+}
+EXPORT_SYMBOL_GPL(sdhci_alloc_panic_host);
+
+
 /*****************************************************************************\
  *                                                                           *
  * Suspend/resume                                                            *
  *                                                                           *
 \*****************************************************************************/
 
+static void sdhci_set_emmc_state(struct sdhci_host *host, uint32_t state)
+{
+	/* Only if there is dekker mutex available */
+	if (!host->sram_addr)
+		return;
+	writel(state, host->sram_addr + DEKKER_EMMC_STATE);
+}
+
 #ifdef CONFIG_PM
 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
 {
@@ -2528,31 +3842,27 @@
 int sdhci_suspend_host(struct sdhci_host *host)
 {
 	int ret;
+	unsigned long flags;
 
 	if (host->ops->platform_suspend)
 		host->ops->platform_suspend(host);
 
+	sdhci_acquire_ownership(host->mmc);
+
 	sdhci_disable_card_detection(host);
 
+	ret = mmc_suspend_host(host->mmc);
+	if (ret) {
+		sdhci_enable_card_detection(host);
+		goto out;
+	}
+
 	/* Disable tuning since we are suspending */
 	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
 		del_timer_sync(&host->tuning_timer);
 		host->flags &= ~SDHCI_NEEDS_RETUNING;
 	}
 
-	ret = mmc_suspend_host(host->mmc);
-	if (ret) {
-		if (host->flags & SDHCI_USING_RETUNING_TIMER) {
-			host->flags |= SDHCI_NEEDS_RETUNING;
-			mod_timer(&host->tuning_timer, jiffies +
-					host->tuning_count * HZ);
-		}
-
-		sdhci_enable_card_detection(host);
-
-		return ret;
-	}
-
 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
 		sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
 		free_irq(host->irq, host);
@@ -2560,6 +3870,15 @@
 		sdhci_enable_irq_wakeups(host);
 		enable_irq_wake(host->irq);
 	}
+
+	/* Card succesfully suspended. Tell information to SCU */
+	sdhci_set_emmc_state(host, DEKKER_EMMC_CHIP_SUSPENDED);
+
+	spin_lock_irqsave(&host->lock, flags);
+	host->suspended = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+out:
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 
@@ -2568,6 +3887,32 @@
 int sdhci_resume_host(struct sdhci_host *host)
 {
 	int ret;
+	unsigned long flags;
+
+	if (host->quirks2 & SDHCI_QUIRK2_CARD_CD_DELAY) {
+		int loop = 0;
+		unsigned int present;
+		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			SDHCI_CARD_PRESENT;
+		/*
+		 * delay 10ms to wait for present register stable
+		 * try 5 loops, and each loops will wait 2ms
+		 */
+		while (!present && loop < 5) {
+			/* BYT eMMC4.5 silicon issue workaround: 4599639 */
+			mdelay(2);
+			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+				SDHCI_CARD_PRESENT;
+			loop++;
+		}
+		if (loop == 5) {
+			WARN_ON(1);
+			pr_warn("%s %s: PRESENT bit16 is not recover\n",
+					__func__, mmc_hostname(host->mmc));
+		}
+	}
+
+	sdhci_acquire_ownership(host->mmc);
 
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma)
@@ -2578,7 +3923,7 @@
 		ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
 				  mmc_hostname(host->mmc), host);
 		if (ret)
-			return ret;
+			goto out;
 	} else {
 		sdhci_disable_irq_wakeups(host);
 		disable_irq_wake(host->irq);
@@ -2596,6 +3941,10 @@
 		mmiowb();
 	}
 
+	spin_lock_irqsave(&host->lock, flags);
+	host->suspended = false;
+	spin_unlock_irqrestore(&host->lock, flags);
+
 	ret = mmc_resume_host(host->mmc);
 	sdhci_enable_card_detection(host);
 
@@ -2606,6 +3955,10 @@
 	if (host->flags & SDHCI_USING_RETUNING_TIMER)
 		host->flags |= SDHCI_NEEDS_RETUNING;
 
+	/* Card back in active state */
+	sdhci_set_emmc_state(host, DEKKER_EMMC_CHIP_ACTIVE);
+out:
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 
@@ -2630,6 +3983,7 @@
 	unsigned long flags;
 	int ret = 0;
 
+	sdhci_do_acquire_ownership(host->mmc);
 	/* Disable tuning since we are suspending */
 	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
 		del_timer_sync(&host->tuning_timer);
@@ -2646,6 +4000,7 @@
 	host->runtime_suspended = true;
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
@@ -2655,12 +4010,46 @@
 	unsigned long flags;
 	int ret = 0, host_flags = host->flags;
 
+	if (host->quirks2 & SDHCI_QUIRK2_CARD_CD_DELAY) {
+		int loop = 0;
+		unsigned int present;
+		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			SDHCI_CARD_PRESENT;
+		/*
+		 * delay 10ms to wait for present register stable
+		 * try 5 loops, and each loops will wait 2ms
+		 */
+		while (!present && loop < 5) {
+			/* BYT eMMC4.5 silicon issue workaround: 4599639 */
+			mdelay(2);
+			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+				SDHCI_CARD_PRESENT;
+			loop++;
+		}
+		if (loop == 5) {
+			WARN_ON(1);
+			pr_warn("%s %s: PRESENT bit16 is not recover\n",
+					__func__, mmc_hostname(host->mmc));
+
+		}
+	}
+
+	sdhci_do_acquire_ownership(host->mmc);
+
 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma)
 			host->ops->enable_dma(host);
 	}
 
-	sdhci_init(host, 0);
+	if (host->mmc->caps2 & MMC_CAP2_PWCTRL_POWER)
+		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
+			SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+			SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
+			SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
+			SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
+			SDHCI_INT_RESPONSE);
+	else
+		sdhci_init(host, 0);
 
 	/* Force clock and power re-program */
 	host->pwr = 0;
@@ -2676,8 +4065,10 @@
 	}
 
 	/* Set the re-tuning expiration flag */
-	if (host->flags & SDHCI_USING_RETUNING_TIMER)
+	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
 		host->flags |= SDHCI_NEEDS_RETUNING;
+		host->flags |= SDHCI_EXIT_RPM_RESUME;
+	}
 
 	spin_lock_irqsave(&host->lock, flags);
 
@@ -2692,6 +4083,7 @@
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
@@ -2724,6 +4116,36 @@
 
 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
 
+/**
+ *      sdhci_pci_request_regulators - try requesting regulator of
+ *                                     a sdhci device
+ *
+ *      We take care of race conditions here between sdhci_add_host() (probe)
+ *      and platform code that may kick a retry at anytime during boot.
+ */
+int sdhci_try_get_regulator(struct sdhci_host *host)
+{
+	struct regulator *vmmc;
+	unsigned long flags;
+	if (!host->vmmc) {
+		vmmc = regulator_get(mmc_dev(host->mmc), "vmmc");
+		if (!IS_ERR(vmmc)) {
+			spin_lock_irqsave(&host->lock, flags);
+			if (!host->vmmc) {
+				host->vmmc = vmmc;
+				spin_unlock_irqrestore(&host->lock, flags);
+				return 0;
+			} else {
+			  /* race! we got the regulator twice */
+				spin_unlock_irqrestore(&host->lock, flags);
+				regulator_put(vmmc);
+			}
+		}
+	}
+	return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(sdhci_try_get_regulator);
+
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
@@ -2757,6 +4179,7 @@
 	caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
 		sdhci_readl(host, SDHCI_CAPABILITIES);
 
+
 	if (host->version >= SDHCI_SPEC_300)
 		caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
 			host->caps1 :
@@ -2897,13 +4320,16 @@
 
 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
+	mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+
 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
 		host->flags |= SDHCI_AUTO_CMD12;
 
 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
 	if ((host->version >= SDHCI_SPEC_300) &&
-	    ((host->flags & SDHCI_USE_ADMA) ||
-	     !(host->flags & SDHCI_USE_SDMA))) {
+		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_AUTO_CMD23) &&
+		((host->flags & SDHCI_USE_ADMA) ||
+		!(host->flags & SDHCI_USE_SDMA))) {
 		host->flags |= SDHCI_AUTO_CMD23;
 		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
 	} else {
@@ -2923,7 +4349,8 @@
 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
 		mmc->caps &= ~MMC_CAP_CMD23;
 
-	if (caps[0] & SDHCI_CAN_DO_HISPD)
+	if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HIGH_SPEED) &&
+			(caps[0] & SDHCI_CAN_DO_HISPD))
 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
@@ -2962,12 +4389,14 @@
 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
 
 	/* SDR104 supports also implies SDR50 support */
-	if (caps[1] & SDHCI_SUPPORT_SDR104)
+	if ((caps[1] & SDHCI_SUPPORT_SDR104) &&
+			!(host->quirks2 & SDHCI_QUIRK2_SDR104_BROKEN))
 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
 	else if (caps[1] & SDHCI_SUPPORT_SDR50)
 		mmc->caps |= MMC_CAP_UHS_SDR50;
 
-	if (caps[1] & SDHCI_SUPPORT_DDR50)
+	if ((caps[1] & SDHCI_SUPPORT_DDR50) ||
+			(host->quirks2 & SDHCI_QUIRK2_V2_0_SUPPORT_DDR50))
 		mmc->caps |= MMC_CAP_UHS_DDR50;
 
 	/* Does the host need tuning for SDR50? */
@@ -2989,30 +4418,30 @@
 	/* Initial value for re-tuning timer count */
 	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
 			      SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+	if ((host->tuning_count == 0 || host->tuning_count ==
+				SDHCI_OTHER_TUNING_SOURCE) &&
+			host->ops->get_tuning_count)
+		host->tuning_count = host->ops->get_tuning_count(host);
 
 	/*
 	 * In case Re-tuning Timer is not disabled, the actual value of
 	 * re-tuning timer will be 2 ^ (n - 1).
 	 */
-	if (host->tuning_count)
+	if (host->tuning_count && host->tuning_count <= SDHCI_MAX_TUNING_TIMER)
 		host->tuning_count = 1 << (host->tuning_count - 1);
+	else
+		/* disable tuning timer */
+		host->tuning_count = 0;
 
 	/* Re-tuning mode supported by the Host Controller */
 	host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
 			     SDHCI_RETUNING_MODE_SHIFT;
 
 	ocr_avail = 0;
-
-	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
-	if (IS_ERR_OR_NULL(host->vmmc)) {
-		if (PTR_ERR(host->vmmc) < 0) {
-			pr_info("%s: no vmmc regulator found\n",
-				mmc_hostname(mmc));
-			host->vmmc = NULL;
-		}
-	}
+	spin_lock_init(&host->lock);
 
 #ifdef CONFIG_REGULATOR
+	sdhci_try_get_regulator(host);
 	/*
 	 * Voltage range check makes sense only if regulator reports
 	 * any voltage value.
@@ -3055,6 +4484,10 @@
 		}
 	}
 
+	if (host->quirks2 & SDHCI_QUIRK2_FAKE_VDD)
+		caps[0] |= SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_300 |
+			SDHCI_CAN_VDD_180;
+
 	if (caps[0] & SDHCI_CAN_VDD_330) {
 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
 
@@ -3099,8 +4532,6 @@
 		return -ENODEV;
 	}
 
-	spin_lock_init(&host->lock);
-
 	/*
 	 * Maximum number of segments. Depends on if the hardware
 	 * can do scatter/gather or not.
@@ -3181,6 +4612,7 @@
 		       mmc_hostname(mmc), host->irq, ret);
 		goto untasklet;
 	}
+	sdhci_do_acquire_ownership(mmc);
 
 	sdhci_init(host, 0);
 
@@ -3188,19 +4620,25 @@
 	sdhci_dumpregs(host);
 #endif
 
-#ifdef SDHCI_USE_LEDS_CLASS
-	snprintf(host->led_name, sizeof(host->led_name),
-		"%s::", mmc_hostname(mmc));
-	host->led.name = host->led_name;
-	host->led.brightness = LED_OFF;
-	host->led.default_trigger = mmc_hostname(mmc);
-	host->led.brightness_set = sdhci_led_control;
+	/* dump eMMC flis setting for debug */
+	if (host->ops->gpio_buf_dump)
+		host->ops->gpio_buf_dump(host);
 
-	ret = led_classdev_register(mmc_dev(mmc), &host->led);
-	if (ret) {
-		pr_err("%s: Failed to register LED device: %d\n",
-		       mmc_hostname(mmc), ret);
-		goto reset;
+#ifdef SDHCI_USE_LEDS_CLASS
+	if (mmc->caps2 & MMC_CAP2_LED_SUPPORT) {
+		snprintf(host->led_name, sizeof(host->led_name),
+				"%s::", mmc_hostname(mmc));
+		host->led.name = host->led_name;
+		host->led.brightness = LED_OFF;
+		host->led.default_trigger = mmc_hostname(mmc);
+		host->led.brightness_set = sdhci_led_control;
+
+		ret = led_classdev_register(mmc_dev(mmc), &host->led);
+		if (ret) {
+			pr_err("%s: Failed to register LED device: %d\n",
+					mmc_hostname(mmc), ret);
+			goto reset;
+		}
 	}
 #endif
 
@@ -3215,13 +4653,18 @@
 
 	sdhci_enable_card_detection(host);
 
+	sdhci_release_ownership(mmc);
+
 	return 0;
 
 #ifdef SDHCI_USE_LEDS_CLASS
 reset:
-	sdhci_reset(host, SDHCI_RESET_ALL);
-	sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
-	free_irq(host->irq, host);
+	if (mmc->caps2 & MMC_CAP2_LED_SUPPORT) {
+		sdhci_reset(host, SDHCI_RESET_ALL);
+		sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+		free_irq(host->irq, host);
+	}
+	sdhci_release_ownership(mmc);
 #endif
 untasklet:
 	tasklet_kill(&host->card_tasklet);
@@ -3257,7 +4700,8 @@
 	mmc_remove_host(host->mmc);
 
 #ifdef SDHCI_USE_LEDS_CLASS
-	led_classdev_unregister(&host->led);
+	if (host->mmc->caps2 & MMC_CAP2_LED_SUPPORT)
+		led_classdev_unregister(&host->led);
 #endif
 
 	if (!dead)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 379e09d..954dc11 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -17,6 +17,7 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 
 #include <linux/mmc/sdhci.h>
 
@@ -72,6 +73,7 @@
 #define  SDHCI_WRITE_PROTECT	0x00080000
 #define  SDHCI_DATA_LVL_MASK	0x00F00000
 #define   SDHCI_DATA_LVL_SHIFT	20
+#define   SDHCI_DATA_0_LVL_MASK	0x00100000
 
 #define SDHCI_HOST_CONTROL	0x28
 #define  SDHCI_CTRL_LED		0x01
@@ -83,12 +85,15 @@
 #define   SDHCI_CTRL_ADMA32	0x10
 #define   SDHCI_CTRL_ADMA64	0x18
 #define   SDHCI_CTRL_8BITBUS	0x20
+#define	SDHCI_CTRL_CD_SD	0x80
+#define	SDHCI_CTRL_CD_TL	0x40
 
 #define SDHCI_POWER_CONTROL	0x29
 #define  SDHCI_POWER_ON		0x01
 #define  SDHCI_POWER_180	0x0A
 #define  SDHCI_POWER_300	0x0C
 #define  SDHCI_POWER_330	0x0E
+#define	 SDHCI_HW_RESET		0x10
 
 #define SDHCI_BLOCK_GAP_CONTROL	0x2A
 
@@ -130,6 +135,7 @@
 #define  SDHCI_INT_ERROR	0x00008000
 #define  SDHCI_INT_TIMEOUT	0x00010000
 #define  SDHCI_INT_CRC		0x00020000
+#define  SDHCI_INT_CMD_CONFLICT	0x00030000
 #define  SDHCI_INT_END_BIT	0x00040000
 #define  SDHCI_INT_INDEX	0x00080000
 #define  SDHCI_INT_DATA_TIMEOUT	0x00100000
@@ -138,6 +144,7 @@
 #define  SDHCI_INT_BUS_POWER	0x00800000
 #define  SDHCI_INT_ACMD12ERR	0x01000000
 #define  SDHCI_INT_ADMA_ERROR	0x02000000
+#define  SDHCI_INT_TAR_RSP_ERR	0x10000000
 
 #define  SDHCI_INT_NORMAL_MASK	0x00007FFF
 #define  SDHCI_INT_ERROR_MASK	0xFFFF8000
@@ -148,7 +155,7 @@
 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
 		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
-		SDHCI_INT_BLK_GAP)
+		SDHCI_INT_BLK_GAP | SDHCI_INT_TAR_RSP_ERR)
 #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
 
 #define SDHCI_ACMD12_ERR	0x3C
@@ -160,7 +167,8 @@
 #define   SDHCI_CTRL_UHS_SDR50		0x0002
 #define   SDHCI_CTRL_UHS_SDR104		0x0003
 #define   SDHCI_CTRL_UHS_DDR50		0x0004
-#define   SDHCI_CTRL_HS_SDR200		0x0005 /* reserved value in SDIO spec */
+#define   SDHCI_CTRL_HS_SDR200		SDHCI_CTRL_UHS_SDR104
+#define   SDHCI_CTRL_HS_DDR200		0x0005
 #define  SDHCI_CTRL_VDD_180		0x0008
 #define  SDHCI_CTRL_DRV_TYPE_MASK	0x0030
 #define   SDHCI_CTRL_DRV_TYPE_B		0x0000
@@ -198,6 +206,8 @@
 #define  SDHCI_DRIVER_TYPE_D	0x00000040
 #define  SDHCI_RETUNING_TIMER_COUNT_MASK	0x00000F00
 #define  SDHCI_RETUNING_TIMER_COUNT_SHIFT	8
+#define  SDHCI_MAX_TUNING_TIMER			0xb
+#define  SDHCI_OTHER_TUNING_SOURCE		0xf
 #define  SDHCI_USE_SDR50_TUNING			0x00002000
 #define  SDHCI_RETUNING_MODE_MASK		0x0000C000
 #define  SDHCI_RETUNING_MODE_SHIFT		14
@@ -234,6 +244,7 @@
 #define SDHCI_PRESET_FOR_SDR50 0x6A
 #define SDHCI_PRESET_FOR_SDR104        0x6C
 #define SDHCI_PRESET_FOR_DDR50 0x6E
+#define SDHCI_PRESET_FOR_HS400 0x74
 #define SDHCI_PRESET_DRV_MASK  0xC000
 #define SDHCI_PRESET_DRV_SHIFT  14
 #define SDHCI_PRESET_CLKGEN_SEL_MASK   0x400
@@ -294,6 +305,13 @@
 	void	(*platform_resume)(struct sdhci_host *host);
 	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask);
 	void	(*platform_init)(struct sdhci_host *host);
+	int	(*power_up_host)(struct sdhci_host *host);
+	void	(*set_dev_power)(struct sdhci_host *, bool);
+	int	(*get_cd)(struct sdhci_host *host);
+	int	(*get_tuning_count)(struct sdhci_host *host);
+	int	(*gpio_buf_check)(struct sdhci_host *host, unsigned int clk);
+	int	(*gpio_buf_dump)(struct sdhci_host *host);
+	int	(*set_io_voltage)(struct sdhci_host *, bool);
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -392,6 +410,7 @@
 extern void sdhci_card_detect(struct sdhci_host *host);
 extern int sdhci_add_host(struct sdhci_host *host);
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
+extern int sdhci_try_get_regulator(struct sdhci_host *host);
 
 #ifdef CONFIG_PM
 extern int sdhci_suspend_host(struct sdhci_host *host);
@@ -404,4 +423,5 @@
 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
 #endif
 
+extern void sdhci_alloc_panic_host(struct sdhci_host *host);
 #endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index fff9286..491e9ec 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -104,6 +104,7 @@
 pio:
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
+		tmio_mmc_enable_dma(host, false);
 		if (ret >= 0)
 			ret = -EIO;
 		host->chan_rx = NULL;
@@ -116,7 +117,6 @@
 		}
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
-		tmio_mmc_enable_dma(host, false);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
@@ -185,6 +185,7 @@
 pio:
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
+		tmio_mmc_enable_dma(host, false);
 		if (ret >= 0)
 			ret = -EIO;
 		host->chan_tx = NULL;
@@ -197,7 +198,6 @@
 		}
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
-		tmio_mmc_enable_dma(host, false);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 257579d..f2ab08c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -102,7 +102,7 @@
 
 config MTD_NAND_OMAP_BCH
 	depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
-	bool "Enable support for hardware BCH error correction"
+	tristate "Enable support for hardware BCH error correction"
 	default n
 	select BCH
 	select BCH_CONST_PARAMS
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index dfcd0a5..fb8c4de 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2793,7 +2793,9 @@
 
 	if (!chip->select_chip)
 		chip->select_chip = nand_select_chip;
-	if (!chip->read_byte)
+
+	/* If called twice, pointers that depend on busw may need to be reset */
+	if (!chip->read_byte || chip->read_byte == nand_read_byte)
 		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
 	if (!chip->read_word)
 		chip->read_word = nand_read_word;
@@ -2801,9 +2803,9 @@
 		chip->block_bad = nand_block_bad;
 	if (!chip->block_markbad)
 		chip->block_markbad = nand_default_block_markbad;
-	if (!chip->write_buf)
+	if (!chip->write_buf || chip->write_buf == nand_write_buf)
 		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
-	if (!chip->read_buf)
+	if (!chip->read_buf || chip->read_buf == nand_read_buf)
 		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
 	if (!chip->scan_bbt)
 		chip->scan_bbt = nand_default_bbt;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5df49d3..c95bfb1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1069,6 +1069,9 @@
 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
 			       e1->ec, e2->ec);
+
+			/* Give the unused PEB back */
+			wl_tree_add(e2, &ubi->free);
 			goto out_cancel;
 		}
 		self_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a746ba2..a956053 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1007,7 +1007,7 @@
 
 	soft = &pkt.soft.rfc1201;
 
-	lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
+	lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
 	if (pkt.hard.offset[0]) {
 		ofs = pkt.hard.offset[0];
 		length = 256 - ofs;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f975696..8395b09 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1991,6 +1991,7 @@
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave, *oldcurrent;
 	struct sockaddr addr;
+	int old_flags = bond_dev->flags;
 	netdev_features_t old_features = bond_dev->features;
 
 	/* slave is not a slave or master is not master of this slave */
@@ -2123,12 +2124,18 @@
 	 * already taken care of above when we detached the slave
 	 */
 	if (!USES_PRIMARY(bond->params.mode)) {
-		/* unset promiscuity level from slave */
-		if (bond_dev->flags & IFF_PROMISC)
+		/* unset promiscuity level from slave
+		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
+		 * of the IFF_PROMISC flag in the bond_dev, but we need the
+		 * value of that flag before that change, as that was the value
+		 * when this slave was attached, so we cache at the start of the
+		 * function and use it here. Same goes for ALLMULTI below
+		 */
+		if (old_flags & IFF_PROMISC)
 			dev_set_promiscuity(slave_dev, -1);
 
 		/* unset allmulti level from slave */
-		if (bond_dev->flags & IFF_ALLMULTI)
+		if (old_flags & IFF_ALLMULTI)
 			dev_set_allmulti(slave_dev, -1);
 
 		/* flush master's mc_list from slave */
@@ -3770,11 +3777,17 @@
  * The bonding ndo_neigh_setup is called at init time beofre any
  * slave exists. So we must declare proxy setup function which will
  * be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
  */
 static int bond_neigh_setup(struct net_device *dev,
 			    struct neigh_parms *parms)
 {
-	parms->neigh_setup   = bond_neigh_init;
+	/* modify only our neigh_parms */
+	if (parms->dev == dev)
+		parms->neigh_setup = bond_neigh_init;
 
 	return 0;
 }
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index db52f441..535d5dd 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1409,10 +1409,10 @@
 
 static const struct platform_device_id at91_can_id_table[] = {
 	{
-		.name = "at91_can",
+		.name = "at91sam9x5_can",
 		.driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
 	}, {
-		.name = "at91sam9x5_can",
+		.name = "at91_can",
 		.driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
 	}, {
 		/* sentinel */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba41..1870c47 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@
 	size_t size;
 
 	size = nla_total_size(sizeof(u32));   /* IFLA_CAN_STATE */
-	size += sizeof(struct can_ctrlmode);  /* IFLA_CAN_CTRLMODE */
+	size += nla_total_size(sizeof(struct can_ctrlmode));  /* IFLA_CAN_CTRLMODE */
 	size += nla_total_size(sizeof(u32));  /* IFLA_CAN_RESTART_MS */
-	size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
-	size += sizeof(struct can_clock);     /* IFLA_CAN_CLOCK */
+	size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+	size += nla_total_size(sizeof(struct can_clock));     /* IFLA_CAN_CLOCK */
 	if (priv->do_get_berr_counter)        /* IFLA_CAN_BERR_COUNTER */
-		size += sizeof(struct can_berr_counter);
+		size += nla_total_size(sizeof(struct can_berr_counter));
 	if (priv->bittiming_const)	      /* IFLA_CAN_BITTIMING_CONST */
-		size += sizeof(struct can_bittiming_const);
+		size += nla_total_size(sizeof(struct can_bittiming_const));
 
 	return size;
 }
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 769d29e..a8f33a5 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -63,7 +63,7 @@
 #define FLEXCAN_MCR_BCC			BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN		BIT(13)
 #define FLEXCAN_MCR_AEN			BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)		((x) & 0xf)
+#define FLEXCAN_MCR_MAXMB(x)		((x) & 0x1f)
 #define FLEXCAN_MCR_IDAM_A		(0 << 8)
 #define FLEXCAN_MCR_IDAM_B		(1 << 8)
 #define FLEXCAN_MCR_IDAM_C		(2 << 8)
@@ -745,9 +745,11 @@
 	 *
 	 */
 	reg_mcr = flexcan_read(&regs->mcr);
+	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
 	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
 		FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
-		FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+		FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
+		FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
 	netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
 	flexcan_write(reg_mcr, &regs->mcr);
 
@@ -792,6 +794,10 @@
 			&regs->cantxfg[i].can_ctrl);
 	}
 
+	/* Abort any pending TX, mark Mailbox as INACTIVE */
+	flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+		      &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
 	/* acceptance mask/acceptance code (accept everything) */
 	flexcan_write(0x0, &regs->rxgmask);
 	flexcan_write(0x0, &regs->rx14mask);
@@ -983,9 +989,9 @@
 }
 
 static const struct of_device_id flexcan_of_match[] = {
-	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
-	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8..925ab8e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@
 		if ((mc->ptr + rec_len) > mc->end)
 			goto decode_failed;
 
-		memcpy(cf->data, mc->ptr, rec_len);
+		memcpy(cf->data, mc->ptr, cf->can_dlc);
 		mc->ptr += rec_len;
 	}
 
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 42aa54a..b710c6b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -185,6 +185,8 @@
 
 	rtnl_lock();
 	err = __rtnl_link_register(&dummy_link_ops);
+	if (err < 0)
+		goto out;
 
 	for (i = 0; i < numdummies && !err; i++) {
 		err = dummy_init_one();
@@ -192,6 +194,8 @@
 	}
 	if (err < 0)
 		__rtnl_link_unregister(&dummy_link_ops);
+
+out:
 	rtnl_unlock();
 
 	return err;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 418de8b..d30085c 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1303,6 +1303,8 @@
 
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 	alx = netdev_priv(netdev);
+	spin_lock_init(&alx->hw.mdio_lock);
+	spin_lock_init(&alx->irq_lock);
 	alx->dev = netdev;
 	alx->hw.pdev = pdev;
 	alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
@@ -1385,9 +1387,6 @@
 
 	INIT_WORK(&alx->link_check_wk, alx_link_check);
 	INIT_WORK(&alx->reset_wk, alx_reset);
-	spin_lock_init(&alx->hw.mdio_lock);
-	spin_lock_init(&alx->irq_lock);
-
 	netif_carrier_off(netdev);
 
 	err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b2bf324..0f05565 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -520,6 +520,9 @@
 	struct net_device   *netdev;
 	struct pci_dev      *pdev;
 	struct napi_struct  napi;
+	struct page         *rx_page;
+	unsigned int	    rx_page_offset;
+	unsigned int	    rx_frag_size;
 	struct atl1c_hw        hw;
 	struct atl1c_hw_stats  hw_stats;
 	struct mii_if_info  mii;    /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0ba9007..11cdf1d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -481,10 +481,15 @@
 static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
 				struct net_device *dev)
 {
+	unsigned int head_size;
 	int mtu = dev->mtu;
 
 	adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
 		roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+
+	head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
+		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	adapter->rx_frag_size = roundup_pow_of_two(head_size);
 }
 
 static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -952,6 +957,10 @@
 		kfree(adapter->tpd_ring[0].buffer_info);
 		adapter->tpd_ring[0].buffer_info = NULL;
 	}
+	if (adapter->rx_page) {
+		put_page(adapter->rx_page);
+		adapter->rx_page = NULL;
+	}
 }
 
 /**
@@ -1639,6 +1648,35 @@
 	skb_checksum_none_assert(skb);
 }
 
+static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
+{
+	struct sk_buff *skb;
+	struct page *page;
+
+	if (adapter->rx_frag_size > PAGE_SIZE)
+		return netdev_alloc_skb(adapter->netdev,
+					adapter->rx_buffer_len);
+
+	page = adapter->rx_page;
+	if (!page) {
+		adapter->rx_page = page = alloc_page(GFP_ATOMIC);
+		if (unlikely(!page))
+			return NULL;
+		adapter->rx_page_offset = 0;
+	}
+
+	skb = build_skb(page_address(page) + adapter->rx_page_offset,
+			adapter->rx_frag_size);
+	if (likely(skb)) {
+		adapter->rx_page_offset += adapter->rx_frag_size;
+		if (adapter->rx_page_offset >= PAGE_SIZE)
+			adapter->rx_page = NULL;
+		else
+			get_page(page);
+	}
+	return skb;
+}
+
 static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
 {
 	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -1660,7 +1698,7 @@
 	while (next_info->flags & ATL1C_BUFFER_FREE) {
 		rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
 
-		skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
+		skb = atl1c_alloc_skb(adapter);
 		if (unlikely(!skb)) {
 			if (netif_msg_rx_err(adapter))
 				dev_warn(&pdev->dev, "alloc rx buffer failed\n");
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 0688bb8..c23bb02 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1665,8 +1665,8 @@
 	return 0;
 }
 
-static void atl1e_tx_map(struct atl1e_adapter *adapter,
-		      struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+static int atl1e_tx_map(struct atl1e_adapter *adapter,
+			struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
 {
 	struct atl1e_tpd_desc *use_tpd = NULL;
 	struct atl1e_tx_buffer *tx_buffer = NULL;
@@ -1677,6 +1677,8 @@
 	u16 nr_frags;
 	u16 f;
 	int segment;
+	int ring_start = adapter->tx_ring.next_to_use;
+	int ring_end;
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
 	segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
@@ -1689,6 +1691,9 @@
 		tx_buffer->length = map_len;
 		tx_buffer->dma = pci_map_single(adapter->pdev,
 					skb->data, hdr_len, PCI_DMA_TODEVICE);
+		if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
+			return -ENOSPC;
+
 		ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
 		mapped_len += map_len;
 		use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
@@ -1715,6 +1720,22 @@
 		tx_buffer->dma =
 			pci_map_single(adapter->pdev, skb->data + mapped_len,
 					map_len, PCI_DMA_TODEVICE);
+
+		if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
+			/* We need to unwind the mappings we've done */
+			ring_end = adapter->tx_ring.next_to_use;
+			adapter->tx_ring.next_to_use = ring_start;
+			while (adapter->tx_ring.next_to_use != ring_end) {
+				tpd = atl1e_get_tpd(adapter);
+				tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
+				pci_unmap_single(adapter->pdev, tx_buffer->dma,
+						 tx_buffer->length, PCI_DMA_TODEVICE);
+			}
+			/* Reset the tx rings next pointer */
+			adapter->tx_ring.next_to_use = ring_start;
+			return -ENOSPC;
+		}
+
 		ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
 		mapped_len  += map_len;
 		use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
@@ -1750,6 +1771,23 @@
 							  (i * MAX_TX_BUF_LEN),
 							  tx_buffer->length,
 							  DMA_TO_DEVICE);
+
+			if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
+				/* We need to unwind the mappings we've done */
+				ring_end = adapter->tx_ring.next_to_use;
+				adapter->tx_ring.next_to_use = ring_start;
+				while (adapter->tx_ring.next_to_use != ring_end) {
+					tpd = atl1e_get_tpd(adapter);
+					tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
+					dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
+						       tx_buffer->length, DMA_TO_DEVICE);
+				}
+
+				/* Reset the ring next to use pointer */
+				adapter->tx_ring.next_to_use = ring_start;
+				return -ENOSPC;
+			}
+
 			ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
 			use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
 			use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
@@ -1767,6 +1805,7 @@
 	/* The last buffer info contain the skb address,
 	   so it will be free after unmap */
 	tx_buffer->skb = skb;
+	return 0;
 }
 
 static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
@@ -1834,10 +1873,15 @@
 		return NETDEV_TX_OK;
 	}
 
-	atl1e_tx_map(adapter, skb, tpd);
+	if (atl1e_tx_map(adapter, skb, tpd)) {
+		dev_kfree_skb_any(skb);
+		goto out;
+	}
+
 	atl1e_tx_queue(adapter, tpd_req, tpd);
 
 	netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+out:
 	spin_unlock_irqrestore(&adapter->tx_lock, flags);
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af4..1c6bc96 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -908,7 +908,7 @@
 		struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
 		u8 et_swtype = 0;
 		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
-			     BGMAC_CHIPCTL_1_IF_TYPE_RMII;
+			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
 		char buf[2];
 
 		if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5f..12a35cf 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
 
 #define BGMAC_CHIPCTL_1_IF_TYPE_MASK		0x00000030
 #define BGMAC_CHIPCTL_1_IF_TYPE_RMII		0x00000000
-#define BGMAC_CHIPCTL_1_IF_TYPE_MI		0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_MII		0x00000010
 #define BGMAC_CHIPCTL_1_IF_TYPE_RGMII		0x00000020
 #define BGMAC_CHIPCTL_1_SW_TYPE_MASK		0x000000C0
 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHY		0x00000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 638e554..8c4babc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -670,6 +670,7 @@
 		}
 	}
 #endif
+	skb_record_rx_queue(skb, fp->rx_queue);
 	napi_gro_receive(&fp->napi, skb);
 }
 
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a13463e..0877a05 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3003,6 +3003,19 @@
 	return false;
 }
 
+static bool tg3_phy_led_bug(struct tg3 *tp)
+{
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5719:
+		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+		    !tp->pci_fn)
+			return true;
+		return false;
+	}
+
+	return false;
+}
+
 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 {
 	u32 val;
@@ -3050,8 +3063,9 @@
 		}
 		return;
 	} else if (do_low_power) {
-		tg3_writephy(tp, MII_TG3_EXT_CTRL,
-			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+		if (!tg3_phy_led_bug(tp))
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
 
 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c89aa41..b4e0dc8 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1070,7 +1070,7 @@
 static void macb_configure_caps(struct macb *bp)
 {
 	if (macb_is_gem(bp)) {
-		if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+		if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
 			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
 	}
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index f12e6b8..f057a18 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1600,7 +1600,8 @@
 	flits = skb_transport_offset(skb) / 8;
 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
 	sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
-			     skb->tail - skb->transport_header,
+			     skb_tail_pointer(skb) -
+			     skb_transport_header(skb),
 			     adap->pdev);
 	if (need_skb_unmap()) {
 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1db2df6..696674e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1150,7 +1150,6 @@
 
 	if (lancer_chip(adapter)) {
 		req->hdr.version = 1;
-		req->if_id = cpu_to_le16(adapter->if_handle);
 	} else if (BEx_chip(adapter)) {
 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
 			req->hdr.version = 2;
@@ -1158,6 +1157,8 @@
 		req->hdr.version = 2;
 	}
 
+	if (req->hdr.version > 0)
+		req->if_id = cpu_to_le16(adapter->if_handle);
 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
 	req->ulp_num = BE_ULP1_NUM;
 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a0b4be5..7371626 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -782,16 +782,22 @@
 
 	if (vlan_tx_tag_present(skb))
 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-	else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
-		vlan_tag = adapter->pvid;
+
+	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
+		if (!vlan_tag)
+			vlan_tag = adapter->pvid;
+		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
+		 * skip VLAN insertion
+		 */
+		if (skip_hw_vlan)
+			*skip_hw_vlan = true;
+	}
 
 	if (vlan_tag) {
 		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 		if (unlikely(!skb))
 			return skb;
 		skb->vlan_tci = 0;
-		if (skip_hw_vlan)
-			*skip_hw_vlan = true;
 	}
 
 	/* Insert the outer VLAN, if any */
@@ -2555,8 +2561,8 @@
 	/* Wait for all pending tx completions to arrive so that
 	 * all tx skbs are freed.
 	 */
-	be_tx_compl_clean(adapter);
 	netif_tx_disable(netdev);
+	be_tx_compl_clean(adapter);
 
 	be_rx_qs_destroy(adapter);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index ac78077..7a77f37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -108,9 +108,8 @@
 
 	/* Enable arbiter */
 	reg &= ~IXGBE_DPMCS_ARBDIS;
-	/* Enable DFP and Recycle mode */
-	reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
 	reg |= IXGBE_DPMCS_TSOEF;
+
 	/* Configure Max TSO packet size 34KB including payload and headers */
 	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
 
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d1cbfb1..4be11ff 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1125,15 +1125,13 @@
 	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
 	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
 	spin_unlock_bh(&mp->mib_counters_lock);
-
-	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 static void mib_counters_timer_wrapper(unsigned long _mp)
 {
 	struct mv643xx_eth_private *mp = (void *)_mp;
-
 	mib_counters_update(mp);
+	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 
@@ -2231,6 +2229,7 @@
 		mp->int_mask |= INT_TX_END_0 << i;
 	}
 
+	add_timer(&mp->mib_counters_timer);
 	port_start(mp);
 
 	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2739,7 +2738,6 @@
 	mp->mib_counters_timer.data = (unsigned long)mp;
 	mp->mib_counters_timer.function = mib_counters_timer_wrapper;
 	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
-	add_timer(&mp->mib_counters_timer);
 
 	spin_lock_init(&mp->mib_counters_lock);
 
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c966785..254f255 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -136,7 +136,9 @@
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
+#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
+#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 #define MVNETA_MIB_COUNTERS_BASE                 0x3080
 #define      MVNETA_MIB_LATE_COLLISION           0x7c
 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
@@ -911,6 +913,13 @@
 	/* Assign port SDMA configuration */
 	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
 
+	/* Disable PHY polling in hardware, since we're using the
+	 * kernel phylib to do this.
+	 */
+	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
+	val &= ~MVNETA_PHY_POLLING_ENABLE;
+	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+
 	mvneta_set_ucast_table(pp, -1);
 	mvneta_set_special_mcast_table(pp, -1);
 	mvneta_set_other_mcast_table(pp, -1);
@@ -2288,7 +2297,9 @@
 			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
 			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
 				 MVNETA_GMAC_CONFIG_GMII_SPEED |
-				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+				 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+				 MVNETA_GMAC_AN_SPEED_EN |
+				 MVNETA_GMAC_AN_DUPLEX_EN);
 
 			if (phydev->duplex)
 				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0e572a5..28d706b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1544,7 +1544,7 @@
 			vp_oper->vlan_idx = NO_INDX;
 		}
 		if (NO_INDX != vp_oper->mac_idx) {
-			__mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
+			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
 			vp_oper->mac_idx = NO_INDX;
 		}
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2c97901..593177d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -840,16 +840,7 @@
 			   MLX4_CMD_NATIVE);
 
 	if (!err && dev->caps.function != slave) {
-		/* if config MAC in DB use it */
-		if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
-			def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
-		else {
-			/* set slave default_mac address */
-			MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
-			def_mac += slave << 8;
-			priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
-		}
-
+		def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
 		MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
 
 		/* get port type - currently only eth is enabled */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 8a43499..1b195fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -371,7 +371,7 @@
 
 	dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
 
-	if (!enable_64b_cqe_eqe) {
+	if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
 		if (dev_cap->flags &
 		    (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
 			mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 0352345..9095ff9 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@
 
 	while (1) {
 		u32 status, len;
-		dma_addr_t mapping;
+		dma_addr_t mapping, new_mapping;
 		struct sk_buff *skb, *new_skb;
 		struct cp_desc *desc;
 		const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,14 @@
 			goto rx_next;
 		}
 
+		new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+					 PCI_DMA_FROMDEVICE);
+		if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
+			dev->stats.rx_dropped++;
+			kfree_skb(new_skb);
+			goto rx_next;
+		}
+
 		dma_unmap_single(&cp->pdev->dev, mapping,
 				 buflen, PCI_DMA_FROMDEVICE);
 
@@ -531,12 +539,11 @@
 
 		skb_put(skb, len);
 
-		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
-					 PCI_DMA_FROMDEVICE);
 		cp->rx_skb[rx_tail] = new_skb;
 
 		cp_rx_skb(cp, skb, desc);
 		rx++;
+		mapping = new_mapping;
 
 rx_next:
 		cp->rx_ring[rx_tail].opts2 = 0;
@@ -716,6 +723,22 @@
 		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 }
 
+static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
+				   int first, int entry_last)
+{
+	int frag, index;
+	struct cp_desc *txd;
+	skb_frag_t *this_frag;
+	for (frag = 0; frag+first < entry_last; frag++) {
+		index = first+frag;
+		cp->tx_skb[index] = NULL;
+		txd = &cp->tx_ring[index];
+		this_frag = &skb_shinfo(skb)->frags[frag];
+		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
+				 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
+	}
+}
+
 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 					struct net_device *dev)
 {
@@ -749,6 +772,9 @@
 
 		len = skb->len;
 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+		if (dma_mapping_error(&cp->pdev->dev, mapping))
+			goto out_dma_error;
+
 		txd->opts2 = opts2;
 		txd->addr = cpu_to_le64(mapping);
 		wmb();
@@ -786,6 +812,9 @@
 		first_len = skb_headlen(skb);
 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 					       first_len, PCI_DMA_TODEVICE);
+		if (dma_mapping_error(&cp->pdev->dev, first_mapping))
+			goto out_dma_error;
+
 		cp->tx_skb[entry] = skb;
 		entry = NEXT_TX(entry);
 
@@ -799,6 +828,11 @@
 			mapping = dma_map_single(&cp->pdev->dev,
 						 skb_frag_address(this_frag),
 						 len, PCI_DMA_TODEVICE);
+			if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+				unwind_tx_frag_mapping(cp, skb, first_entry, entry);
+				goto out_dma_error;
+			}
+
 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 
 			ctrl = eor | len | DescOwn;
@@ -859,11 +893,16 @@
 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 		netif_stop_queue(dev);
 
+out_unlock:
 	spin_unlock_irqrestore(&cp->lock, intr_flags);
 
 	cpw8(TxPoll, NormalTxPoll);
 
 	return NETDEV_TX_OK;
+out_dma_error:
+	kfree_skb(skb);
+	cp->dev->stats.tx_dropped++;
+	goto out_unlock;
 }
 
 /* Set or clear the multicast filter for this adaptor.
@@ -1054,6 +1093,10 @@
 
 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+		if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+			kfree_skb(skb);
+			goto err_out;
+		}
 		cp->rx_skb[i] = skb;
 
 		cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 393f961..7199d2a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4218,6 +4218,7 @@
 	case RTL_GIGA_MAC_VER_23:
 	case RTL_GIGA_MAC_VER_24:
 	case RTL_GIGA_MAC_VER_34:
+	case RTL_GIGA_MAC_VER_35:
 		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
 		break;
 	case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2397f0e..a520465 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -675,7 +675,7 @@
 		BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
 		BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
 			     EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
-		rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+		rep_index = spec->type - EFX_FILTER_UC_DEF;
 		ins_index = rep_index;
 
 		spin_lock_bh(&state->lock);
@@ -1196,7 +1196,9 @@
 	EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
 	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
 
-	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+			   rxq_index);
 	rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
 				      ip->daddr, ports[1], ip->saddr, ports[0]);
 	if (rc)
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a7dfe36..5173eaa 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -282,9 +282,9 @@
 }
 
 /* Recycle the pages that are used by buffers that have just been received. */
-static void efx_recycle_rx_buffers(struct efx_channel *channel,
-				   struct efx_rx_buffer *rx_buf,
-				   unsigned int n_frags)
+static void efx_recycle_rx_pages(struct efx_channel *channel,
+				 struct efx_rx_buffer *rx_buf,
+				 unsigned int n_frags)
 {
 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 
@@ -294,6 +294,20 @@
 	} while (--n_frags);
 }
 
+static void efx_discard_rx_packet(struct efx_channel *channel,
+				  struct efx_rx_buffer *rx_buf,
+				  unsigned int n_frags)
+{
+	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+	efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+	do {
+		efx_free_rx_buffer(rx_buf);
+		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+	} while (--n_frags);
+}
+
 /**
  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  * @rx_queue:		RX descriptor queue
@@ -533,8 +547,7 @@
 	 */
 	if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
 		efx_rx_flush_packet(channel);
-		put_page(rx_buf->page);
-		efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+		efx_discard_rx_packet(channel, rx_buf, n_frags);
 		return;
 	}
 
@@ -570,9 +583,9 @@
 		efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
 	}
 
-	/* All fragments have been DMA-synced, so recycle buffers and pages. */
+	/* All fragments have been DMA-synced, so recycle pages. */
 	rx_buf = efx_rx_buffer(rx_queue, index);
-	efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+	efx_recycle_rx_pages(channel, rx_buf, n_frags);
 
 	/* Pipeline receives so that we give time for packet headers to be
 	 * prefetched into cache.
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1df0ff3..3df5684 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1239,6 +1239,8 @@
 		dev_set_drvdata(&vdev->dev, NULL);
 
 		kfree(port);
+
+		unregister_netdev(vp->dev);
 	}
 	return 0;
 }
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 860e15d..7233610 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -876,8 +876,7 @@
 		    netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
 			mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
 			emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
-		}
-		if (!netdev_mc_empty(ndev)) {
+		} else if (!netdev_mc_empty(ndev)) {
 			struct netdev_hw_addr *ha;
 
 			mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ca98aca..75b82b6 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #define DRV_NAME	"via-rhine"
-#define DRV_VERSION	"1.5.0"
+#define DRV_VERSION	"1.5.1"
 #define DRV_RELDATE	"2010-10-09"
 
 #include <linux/types.h>
@@ -1694,7 +1694,12 @@
 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
 	if (unlikely(vlan_tx_tag_present(skb))) {
-		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+		u16 vid_pcp = vlan_tx_tag_get(skb);
+
+		/* drop CFI/DEI bit, register needs VID and PCP */
+		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
+			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
+		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
 		/* request tagging */
 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
 	}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 57c2e5e..5444f2b 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@
 		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
 	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 
+	/* Init descriptor indexes */
+	lp->tx_bd_ci = 0;
+	lp->tx_bd_next = 0;
+	lp->tx_bd_tail = 0;
+	lp->rx_bd_ci = 0;
+
 	return 0;
 
 out:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4dccead..23a0fff 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -431,8 +431,8 @@
 	net->netdev_ops = &device_ops;
 
 	/* TODO: Add GSO and Checksum offload */
-	net->hw_features = NETIF_F_SG;
-	net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
+	net->hw_features = 0;
+	net->features = NETIF_F_HW_VLAN_CTAG_TX;
 
 	SET_ETHTOOL_OPS(net, &ethtool_ops);
 	SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index dc9f6a4..a3bed28 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -291,11 +291,17 @@
 
 	rtnl_lock();
 	err = __rtnl_link_register(&ifb_link_ops);
+	if (err < 0)
+		goto out;
 
-	for (i = 0; i < numifbs && !err; i++)
+	for (i = 0; i < numifbs && !err; i++) {
 		err = ifb_init_one(i);
+		cond_resched();
+	}
 	if (err)
 		__rtnl_link_unregister(&ifb_link_ops);
+
+out:
 	rtnl_unlock();
 
 	return err;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6e91931..06eba6e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -727,6 +727,10 @@
 			return -EADDRNOTAVAIL;
 	}
 
+	if (data && data[IFLA_MACVLAN_FLAGS] &&
+	    nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+		return -EINVAL;
+
 	if (data && data[IFLA_MACVLAN_MODE]) {
 		switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
 		case MACVLAN_MODE_PRIVATE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index b6dd6a7..523d6b2 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -633,6 +633,28 @@
 	return 0;
 }
 
+static unsigned long iov_pages(const struct iovec *iv, int offset,
+			       unsigned long nr_segs)
+{
+	unsigned long seg, base;
+	int pages = 0, len, size;
+
+	while (nr_segs && (offset >= iv->iov_len)) {
+		offset -= iv->iov_len;
+		++iv;
+		--nr_segs;
+	}
+
+	for (seg = 0; seg < nr_segs; seg++) {
+		base = (unsigned long)iv[seg].iov_base + offset;
+		len = iv[seg].iov_len - offset;
+		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+		pages += size;
+		offset = 0;
+	}
+
+	return pages;
+}
 
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
@@ -647,6 +669,7 @@
 	int vnet_hdr_len = 0;
 	int copylen = 0;
 	bool zerocopy = false;
+	size_t linear;
 
 	if (q->flags & IFF_VNET_HDR) {
 		vnet_hdr_len = q->vnet_hdr_sz;
@@ -678,42 +701,35 @@
 	if (unlikely(count > UIO_MAXIOV))
 		goto err;
 
-	if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
-		zerocopy = true;
+	if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+		copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
+		linear = copylen;
+		if (iov_pages(iv, vnet_hdr_len + copylen, count)
+		    <= MAX_SKB_FRAGS)
+			zerocopy = true;
+	}
 
-	if (zerocopy) {
-		/* Userspace may produce vectors with count greater than
-		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
-		 * to let the rest of data to be fit in the frags.
-		 */
-		if (count > MAX_SKB_FRAGS) {
-			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
-			if (copylen < vnet_hdr_len)
-				copylen = 0;
-			else
-				copylen -= vnet_hdr_len;
-		}
-		/* There are 256 bytes to be copied in skb, so there is enough
-		 * room for skb expand head in case it is used.
-		 * The rest buffer is mapped from userspace.
-		 */
-		if (copylen < vnet_hdr.hdr_len)
-			copylen = vnet_hdr.hdr_len;
-		if (!copylen)
-			copylen = GOODCOPY_LEN;
-	} else
+	if (!zerocopy) {
 		copylen = len;
+		linear = vnet_hdr.hdr_len;
+	}
 
 	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
-				vnet_hdr.hdr_len, noblock, &err);
+				linear, noblock, &err);
 	if (!skb)
 		goto err;
 
 	if (zerocopy)
 		err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
-	else
+	else {
 		err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
 						   len);
+		if (!err && m && m->msg_control) {
+			struct ubuf_info *uarg = m->msg_control;
+			uarg->callback(uarg, false);
+		}
+	}
+
 	if (err)
 		goto err_kfree;
 
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 162464f..7f10588 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@
 	nf_reset(skb);
 
 	skb->ip_summed = CHECKSUM_NONE;
-	ip_select_ident(iph, &rt->dst, NULL);
+	ip_select_ident(skb, &rt->dst, NULL);
 	ip_send_check(iph);
 
 	ip_local_out(skb);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index eb5609b..a4f35b0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1037,6 +1037,29 @@
 	return 0;
 }
 
+static unsigned long iov_pages(const struct iovec *iv, int offset,
+			       unsigned long nr_segs)
+{
+	unsigned long seg, base;
+	int pages = 0, len, size;
+
+	while (nr_segs && (offset >= iv->iov_len)) {
+		offset -= iv->iov_len;
+		++iv;
+		--nr_segs;
+	}
+
+	for (seg = 0; seg < nr_segs; seg++) {
+		base = (unsigned long)iv[seg].iov_base + offset;
+		len = iv[seg].iov_len - offset;
+		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+		pages += size;
+		offset = 0;
+	}
+
+	return pages;
+}
+
 /* Get packet from user space buffer */
 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			    void *msg_control, const struct iovec *iv,
@@ -1044,7 +1067,7 @@
 {
 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
 	struct sk_buff *skb;
-	size_t len = total_len, align = NET_SKB_PAD;
+	size_t len = total_len, align = NET_SKB_PAD, linear;
 	struct virtio_net_hdr gso = { 0 };
 	int offset = 0;
 	int copylen;
@@ -1053,8 +1076,9 @@
 	u32 rxhash;
 
 	if (!(tun->flags & TUN_NO_PI)) {
-		if ((len -= sizeof(pi)) > total_len)
+		if (len < sizeof(pi))
 			return -EINVAL;
+		len -= sizeof(pi);
 
 		if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
 			return -EFAULT;
@@ -1062,8 +1086,9 @@
 	}
 
 	if (tun->flags & TUN_VNET_HDR) {
-		if ((len -= tun->vnet_hdr_sz) > total_len)
+		if (len < tun->vnet_hdr_sz)
 			return -EINVAL;
+		len -= tun->vnet_hdr_sz;
 
 		if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
 			return -EFAULT;
@@ -1084,34 +1109,23 @@
 			return -EINVAL;
 	}
 
-	if (msg_control)
-		zerocopy = true;
-
-	if (zerocopy) {
-		/* Userspace may produce vectors with count greater than
-		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
-		 * to let the rest of data to be fit in the frags.
-		 */
-		if (count > MAX_SKB_FRAGS) {
-			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
-			if (copylen < offset)
-				copylen = 0;
-			else
-				copylen -= offset;
-		} else
-				copylen = 0;
-		/* There are 256 bytes to be copied in skb, so there is enough
-		 * room for skb expand head in case it is used.
+	if (msg_control) {
+		/* There are 256 bytes to be copied in skb, so there is
+		 * enough room for skb expand head in case it is used.
 		 * The rest of the buffer is mapped from userspace.
 		 */
-		if (copylen < gso.hdr_len)
-			copylen = gso.hdr_len;
-		if (!copylen)
-			copylen = GOODCOPY_LEN;
-	} else
-		copylen = len;
+		copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+		linear = copylen;
+		if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
+			zerocopy = true;
+	}
 
-	skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
+	if (!zerocopy) {
+		copylen = len;
+		linear = gso.hdr_len;
+	}
+
+	skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
 	if (IS_ERR(skb)) {
 		if (PTR_ERR(skb) != -EAGAIN)
 			tun->dev->stats.rx_dropped++;
@@ -1120,8 +1134,13 @@
 
 	if (zerocopy)
 		err = zerocopy_sg_from_iovec(skb, iv, offset, count);
-	else
+	else {
 		err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+		if (!err && msg_control) {
+			struct ubuf_info *uarg = msg_control;
+			uarg->callback(uarg, false);
+		}
+	}
 
 	if (err) {
 		tun->dev->stats.rx_dropped++;
@@ -1674,11 +1693,11 @@
 		INIT_LIST_HEAD(&tun->disabled);
 		err = tun_attach(tun, file);
 		if (err < 0)
-			goto err_free_dev;
+			goto err_free_flow;
 
 		err = register_netdevice(tun->dev);
 		if (err < 0)
-			goto err_free_dev;
+			goto err_detach;
 
 		if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
 		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
@@ -1722,7 +1741,12 @@
 	strcpy(ifr->ifr_name, tun->dev->name);
 	return 0;
 
- err_free_dev:
+err_detach:
+	tun_detach_all(dev);
+err_free_flow:
+	tun_flow_uninit(tun);
+	security_tun_dev_free_security(tun->security);
+err_free_dev:
 	free_netdev(dev);
 	return err;
 }
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index bd8758f..cea1f3d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,10 +1029,10 @@
 	dev->mii.supports_gmii = 1;
 
 	dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-			      NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+			      NETIF_F_RXCSUM;
 
 	dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-				 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+				 NETIF_F_RXCSUM;
 
 	/* Enable checksum offload */
 	*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1173,7 +1173,6 @@
 	if (((skb->len + 8) % frame_size) == 0)
 		tx_hdr2 |= 0x80008000;	/* Enable padding */
 
-	skb_linearize(skb);
 	headroom = skb_headroom(skb);
 	tailroom = skb_tailroom(skb);
 
@@ -1317,10 +1316,10 @@
 			  1, 1, tmp);
 
 	dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-			      NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+			      NETIF_F_RXCSUM;
 
 	dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-				 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+				 NETIF_F_RXCSUM;
 
 	/* Enable checksum offload */
 	*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 04ee044..b1897c7 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -709,6 +709,11 @@
 	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
 	.driver_info = (unsigned long)&wwan_info,
 }, {
+	/* Telit modules */
+	USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+	.driver_info = (kernel_ulong_t) &wwan_info,
+}, {
 	USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
 			USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 8728198..25ba7ec 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -400,6 +400,10 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
 	},
+	/* HP hs2434 Mobile Broadband Module needs ZLPs */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+	},
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info,
 	},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 43afde8..eec6f18 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -66,7 +66,9 @@
 static void cdc_ncm_txpath_bh(unsigned long param);
 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
+static const struct driver_info cdc_ncm_info, cdc_ncm_info_alt;
 static struct usb_driver cdc_ncm_driver;
+static const struct ethtool_ops cdc_ncm_ethtool_ops;
 
 static void
 cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
@@ -603,17 +605,25 @@
 
 	/* NCM data altsetting is always 1 */
 	ret = cdc_ncm_bind_common(dev, intf, 1);
-
-	/*
-	 * We should get an event when network connection is "connected" or
-	 * "disconnected". Set network connection in "disconnected" state
-	 * (carrier is OFF) during attach, so the IP network stack does not
-	 * start IPv6 negotiation and more.
-	 */
-	usbnet_link_change(dev, 0, 0);
+	if (!ret) {
+		/*
+		 * We should get an event when network connection is "connected"
+		 * or "disconnected". Set network connection in "disconnected"
+		 * state (carrier is OFF) during attach, so the IP network stack
+		 * does not start IPv6 negotiation and more.
+		 */
+		usbnet_link_change(dev, 0, 0);
+	}
 	return ret;
 }
 
+static int cdc_ncm_bind_alt(struct usbnet *dev, struct usb_interface *intf)
+{
+	dev_info(&dev->udev->dev, "Use of alternate settings\n");
+	dev->net->addr_len = 1;
+	return cdc_ncm_bind(dev, intf);
+}
+
 static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
 {
 	size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
@@ -917,6 +927,12 @@
 		goto error;
 	}
 
+	if (len != skb_in->len) {
+		pr_debug("invalid NTB block size %u vs %u\n", skb_in->len, len);
+
+		goto error;
+	}
+
 	if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) &&
 		(ctx->rx_seq || le16_to_cpu(nth16->wSequence)) &&
 		!((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) {
@@ -965,6 +981,115 @@
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
 
+/* handle NTB fragments recombination if needed (limited to 2 fragments)      */
+/* return:                                                                    */
+/*  0: valid NTB packet to be processed                                       */
+/*  1: invalid NTB packet                                                     */
+static inline int cdc_ncm_handle_fragments_recombination(
+					struct cdc_ncm_ctx *ctx,
+					struct sk_buff *skb_in,
+					int *ndpoffset)
+{
+	int len;
+	int i;
+	char *pc, *pcd;
+
+	if (*ndpoffset >= 0) {
+
+		/* valid NTD packet */
+		/* delete saved fragment if existing since flow back to normal*/
+		if (ctx->fragment_size) {
+
+			kfree(ctx->fragment);
+			ctx->fragment_size = 0;
+			pr_debug("frag deleted (%d) due to valid flow\n",
+				(int)(++ctx->fragment_deleted));
+		}
+		return 0;
+	}
+
+	/* invalid NTD packet */
+
+	if (ctx->fragment_size == 0) {
+
+		/* Save the current fragment */
+		ctx->fragment = kmalloc(skb_in->len, GFP_ATOMIC);
+		if (ctx->fragment == NULL) {
+			pr_debug("frag deleted (%d) due to kmalloc error\n",
+			(int)(++ctx->fragment_deleted));
+			return 1;
+		}
+		memcpy(ctx->fragment,
+			(unsigned char *)skb_in->data, skb_in->len);
+		ctx->fragment_size = skb_in->len;
+		pr_debug("frag saved\n");
+		return 1;
+	}
+
+	/* Try to recombinate current fragment with saved one (in skbuff) */
+
+	/* If skbuff is too small for the 2 fragments */
+	/* then delete previous fragment and save the current one */
+	len = skb_in->len;
+	if (ctx->fragment_size > skb_tailroom(skb_in)) {
+
+		kfree(ctx->fragment);
+		ctx->fragment_size = 0;
+		pr_debug("frag deleted (%d) due to size\n",
+			(int)(++ctx->fragment_deleted));
+
+		ctx->fragment = kmalloc(len, GFP_ATOMIC);
+		if (ctx->fragment == NULL) {
+			pr_debug("frag deleted (%d) due to kmalloc error\n",
+			(int)(++ctx->fragment_deleted));
+			return 1;
+		}
+		memcpy(ctx->fragment, skb_in->data, len);
+		ctx->fragment_size = len;
+		pr_debug("frag saved\n");
+
+		return 1;
+	}
+
+	/* recombinate current fragment with saved one (in skbuff) */
+	skb_put(skb_in, ctx->fragment_size);
+
+	pc = (unsigned char *)(skb_in->data); /* need to memcpy by the end */
+	pcd = pc + ctx->fragment_size;
+	pc = pc + len - 1;
+	pcd = pcd + len - 1;
+	for (i = 0; i < len; i++)
+		*pcd-- = *pc--;
+
+	memcpy(skb_in->data, ctx->fragment, ctx->fragment_size);
+
+	kfree(ctx->fragment);
+	ctx->fragment_size = 0;
+
+	/* test the recombination and deliver it if ok */
+	*ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+	if (*ndpoffset >= 0) {
+		ctx->fragment_recombinated += 2;
+		pr_debug("frag successfully recombinated (%d)\n",
+			(int)ctx->fragment_recombinated);
+		return 0;
+	}
+
+	/* Else delete previous fragment and save the current one */
+	pr_debug("frag deleted (%d) due to recombination error\n",
+			(int)(++ctx->fragment_deleted));
+	ctx->fragment = kmalloc(len, GFP_ATOMIC);
+	if (ctx->fragment == NULL) {
+		pr_debug("frag deleted (%d) due to kmalloc error\n",
+			(int)(++ctx->fragment_deleted));
+		return 1;
+	}
+	memcpy(ctx->fragment, ++pcd, len);
+	ctx->fragment_size = len;
+	pr_debug("frag saved\n");
+	return 1;
+}
+
 static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
 {
 	struct sk_buff *skb;
@@ -978,10 +1103,14 @@
 	int ndpoffset;
 	int loopcount = 50; /* arbitrary max preventing infinite loop */
 
-	ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
-	if (ndpoffset < 0)
+	if (ctx == NULL)
 		goto error;
 
+	ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+
+	if (cdc_ncm_handle_fragments_recombination(ctx, skb_in, &ndpoffset))
+		return 1;
+
 next_ndp:
 	nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
 	if (nframes < 0)
@@ -1025,6 +1154,7 @@
 			if (!skb)
 				goto error;
 			skb->len = len;
+			skb->truesize = len + sizeof(struct sk_buff);
 			skb->data = ((u8 *)skb_in->data) + offset;
 			skb_set_tail_pointer(skb, len);
 			usbnet_skb_return(dev, skb);
@@ -1155,6 +1285,24 @@
 	usbnet_disconnect(intf);
 }
 
+static int cdc_ncm_manage_power(struct usbnet *dev, int status)
+{
+	dev->intf->needs_remote_wakeup = status;
+	return 0;
+}
+
+static const struct driver_info cdc_ncm_info_alt = {
+	.description = "CDC NCM",
+	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+	.bind = cdc_ncm_bind_alt,
+	.unbind = cdc_ncm_unbind,
+	.check_connect = cdc_ncm_check_connect,
+	.manage_power = cdc_ncm_manage_power,
+	.status = cdc_ncm_status,
+	.rx_fixup = cdc_ncm_rx_fixup,
+	.tx_fixup = cdc_ncm_tx_fixup,
+};
+
 static const struct driver_info cdc_ncm_info = {
 	.description = "CDC NCM",
 	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
@@ -1196,6 +1344,21 @@
 };
 
 static const struct usb_device_id cdc_devs[] = {
+	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+			| USB_DEVICE_ID_MATCH_VENDOR
+			| USB_DEVICE_ID_MATCH_PRODUCT,
+	  .bInterfaceClass = USB_CLASS_COMM,
+	  .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+	  .bInterfaceProtocol = (USB_CDC_PROTO_NONE),
+	  .idVendor = 0x1519,
+	  .idProduct = 0x0452,
+	  .driver_info = (unsigned long)&cdc_ncm_info_alt
+	},
+	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM,
+				USB_CDC_PROTO_NONE),
+		  .driver_info = (unsigned long)&cdc_ncm_info
+	},
+
 	/* Ericsson MBM devices like F5521gw */
 	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
 		| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2dbb946..c6867f9 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -303,7 +303,7 @@
 		rx_ctl |= 0x02;
 	} else if (net->flags & IFF_ALLMULTI ||
 		   netdev_mc_count(net) > DM_MAX_MCAST) {
-		rx_ctl |= 0x04;
+		rx_ctl |= 0x08;
 	} else if (!netdev_mc_empty(net)) {
 		struct netdev_hw_addr *ha;
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5645921..34a081f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -518,6 +518,135 @@
 
 	/* 3. Combined interface devices matching on interface number */
 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
+	{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+	{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7101, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7101, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x7101, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x7102, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x7102, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x7102, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x8000, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x8001, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9000, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9003, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9005, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x900a, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x900b, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x900c, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x900c, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x900c, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x900d, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x900f, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x900f, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x900f, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9010, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9010, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9011, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9025, 4)},	/* Alcatel-sbell ASB TL131 TDD LTE  (China Mobile) */
+	{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9032, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9033, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9034, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9035, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9036, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9037, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9038, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x903b, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x903c, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x903d, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x903e, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9043, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9046, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9046, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9046, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9047, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9047, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9047, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9048, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x904c, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9050, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9052, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9053, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9053, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9054, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9054, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9055, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9056, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9062, 9)},
+	{QMI_FIXED_INTF(0x05c6, 0x9064, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9065, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9065, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9066, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9066, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9067, 1)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 2)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9068, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9069, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9070, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9070, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9075, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9076, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9077, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9078, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9079, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 5)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 6)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 7)},
+	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
 	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
 	{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
@@ -610,7 +739,6 @@
 	{QMI_GOBI_DEVICE(0x413c, 0x8186)},	/* Dell Gobi 2000 Modem device (N0218, VU936) */
 	{QMI_GOBI_DEVICE(0x413c, 0x8194)},	/* Dell Gobi 3000 Composite */
 	{QMI_GOBI_DEVICE(0x05c6, 0x920b)},	/* Generic Gobi 2000 Modem device */
-	{QMI_GOBI_DEVICE(0x05c6, 0x920d)},	/* Gobi 3000 Composite */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9225)},	/* Sony Gobi 2000 Modem device (N0279, VU730) */
 	{QMI_GOBI_DEVICE(0x05c6, 0x9245)},	/* Samsung Gobi 2000 Modem device (VL176) */
 	{QMI_GOBI_DEVICE(0x03f0, 0x251d)},	/* HP Gobi 2000 Modem device (VP412) */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 75409748c..66ebbac 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -45,7 +45,6 @@
 #define EEPROM_MAC_OFFSET		(0x01)
 #define DEFAULT_TX_CSUM_ENABLE		(true)
 #define DEFAULT_RX_CSUM_ENABLE		(true)
-#define DEFAULT_TSO_ENABLE		(true)
 #define SMSC75XX_INTERNAL_PHY_ID	(1)
 #define SMSC75XX_TX_OVERHEAD		(8)
 #define MAX_RX_FIFO_SIZE		(20 * 1024)
@@ -1410,17 +1409,14 @@
 
 	INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
 
-	if (DEFAULT_TX_CSUM_ENABLE) {
+	if (DEFAULT_TX_CSUM_ENABLE)
 		dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-		if (DEFAULT_TSO_ENABLE)
-			dev->net->features |= NETIF_F_SG |
-				NETIF_F_TSO | NETIF_F_TSO6;
-	}
+
 	if (DEFAULT_RX_CSUM_ENABLE)
 		dev->net->features |= NETIF_F_RXCSUM;
 
 	dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-		NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
+				NETIF_F_RXCSUM;
 
 	ret = smsc75xx_wait_ready(dev, 0);
 	if (ret < 0) {
@@ -2200,8 +2196,6 @@
 {
 	u32 tx_cmd_a, tx_cmd_b;
 
-	skb_linearize(skb);
-
 	if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
 		struct sk_buff *skb2 =
 			skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 06ee82f..05dbd11 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -46,6 +46,7 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
 
 #define DRIVER_VERSION		"22-Aug-2005"
 
@@ -76,9 +77,6 @@
 // us (it polls at HZ/4 usually) before we report too many false errors.
 #define THROTTLE_JIFFIES	(HZ/8)
 
-// between wakeups
-#define UNLINK_TIMEOUT_MS	3
-
 /*-------------------------------------------------------------------------*/
 
 // randomly generated ethernet address
@@ -88,9 +86,50 @@
 
 /* use ethtool to change the level for any given device */
 static int msg_level = -1;
+static wait_queue_head_t unlink_wakeup;
+
+static struct dentry *usbnet_debug_root;
+static struct dentry *usbnet_debug_data_dump_enable;
+static struct dentry *usbnet_debug_partial_dump_len;
+static u32 usbnet_data_dump_enable;
+static u32 usbnet_partial_dump_len = 10;
+
 module_param (msg_level, int, 0);
 MODULE_PARM_DESC (msg_level, "Override default message level");
 
+static inline int is_hsic_modem(struct usb_device *udev)
+{
+	/* Check if it is Infenion (now intel) HSIC device */
+	if (udev->descriptor.idVendor == 0x1519 &&
+		udev->descriptor.idProduct == 0x0452)
+		return 1;
+
+	return 0;
+}
+
+static void usbnet_dump(struct usbnet *dev, u8 is_out,
+	const void *buf, size_t len)
+{
+	const u8 *ptr = buf;
+	int i, linelen, remaining = len;
+	unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+	int rowsize = 16;
+	int groupsize = 1;
+	bool ascii = true;
+
+	for (i = 0; i < len; i += rowsize) {
+		linelen = min(remaining, rowsize);
+		remaining -= rowsize;
+
+		hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+				   linebuf, sizeof(linebuf), ascii);
+
+		trace_printk("[%s %s] %.4x: %s\n", dev->net->name,
+			is_out == 1 ? "-->" : "<--", i, linebuf);
+	}
+
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* handles CDC Ethernet and many other network "bulk data" interfaces */
@@ -193,6 +232,14 @@
 
 	/* software-driven interface shutdown */
 	case -ENOENT:		/* urb killed */
+		if (urb->actual_length) {
+			netdev_dbg(dev->net,
+				"intr status %d\n, length: %dn",
+				status, urb->actual_length);
+			dev->driver_info->status(dev, urb);
+		}
+		break;
+
 	case -ESHUTDOWN:	/* hardware gone */
 		netif_dbg(dev, ifdown, dev->net,
 			  "intr shutdown, code %d\n", status);
@@ -559,6 +606,23 @@
 			netif_dbg(dev, rx_err, dev->net,
 				  "rx length %d\n", skb->len);
 		}
+		/* In the bug analysis, in general, the error frame is
+		 * more valuable. So here we ignore the NCM fixup,
+		 * and directly dump all the data if there is no URB
+		 * error.
+		 */
+		if (is_hsic_modem(urb->dev)) {
+			if (usbnet_data_dump_enable == 1)
+				usbnet_dump(dev, 0, urb->transfer_buffer,
+					urb->actual_length);
+			else if (usbnet_data_dump_enable == 2)
+				usbnet_dump(dev, 0, urb->transfer_buffer,
+					min(urb->actual_length,
+					usbnet_partial_dump_len));
+		}
+
+		usb_autopm_get_interface_async(dev->intf);
+		usb_autopm_put_interface_async(dev->intf);
 		break;
 
 	/* stalls need manual reset. this is rare ... except that
@@ -572,7 +636,27 @@
 		// FALLTHROUGH
 
 	/* software-driven interface shutdown */
+	case -ENOENT:		/* urb killed */
 	case -ECONNRESET:		/* async unlink */
+		if (urb->actual_length) {
+			if (skb->len < dev->net->hard_header_len) {
+				state = rx_cleanup;
+				dev->net->stats.rx_errors++;
+				dev->net->stats.rx_length_errors++;
+				netif_dbg(dev, rx_err, dev->net,
+					  "rx length %d\n", skb->len);
+			}
+			netif_dbg(dev, ifdown, dev->net,
+				  "rx length in async unlink: %d\n",
+					urb->actual_length);
+		} else {
+			netif_dbg(dev, ifdown, dev->net,
+				  "rx async unlink, code %d\n",
+					urb_status);
+			goto block;
+		}
+		break;
+
 	case -ESHUTDOWN:		/* hardware gone */
 		netif_dbg(dev, ifdown, dev->net,
 			  "rx shutdown, code %d\n", urb_status);
@@ -735,7 +819,6 @@
 // precondition: never called in_interrupt
 static void usbnet_terminate_urbs(struct usbnet *dev)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
 	DECLARE_WAITQUEUE(wait, current);
 	int temp;
 
@@ -750,7 +833,7 @@
 	while (!skb_queue_empty(&dev->rxq)
 		&& !skb_queue_empty(&dev->txq)
 		&& !skb_queue_empty(&dev->done)) {
-			schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+			schedule();
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			netif_dbg(dev, ifdown, dev->net,
 				  "waited for %d urb completions\n", temp);
@@ -1144,6 +1227,17 @@
 		if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
 			dev->net->stats.tx_packets++;
 		dev->net->stats.tx_bytes += entry->length;
+
+		if (is_hsic_modem(urb->dev)) {
+			if (usbnet_data_dump_enable == 1)
+				usbnet_dump(dev, 1, urb->transfer_buffer,
+					urb->actual_length);
+			else if (usbnet_data_dump_enable == 2)
+				usbnet_dump(dev, 1, urb->transfer_buffer,
+					min(urb->actual_length,
+					usbnet_partial_dump_len));
+		}
+
 	} else {
 		dev->net->stats.tx_errors++;
 
@@ -1370,8 +1464,10 @@
 
 	// waiting for all pending urbs to complete?
 	if (dev->wait) {
+		wait_queue_head_t *wait_d = dev->wait;
 		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
-			wake_up (dev->wait);
+			if (wait_d)
+				wake_up(wait_d);
 		}
 
 	// or are we maybe short a few urbs?
@@ -1438,6 +1534,9 @@
 	usb_free_urb(dev->interrupt);
 
 	free_netdev(net);
+
+	debugfs_remove_recursive(usbnet_debug_root);
+	usbnet_debug_root = NULL;
 }
 EXPORT_SYMBOL_GPL(usbnet_disconnect);
 
@@ -1523,6 +1622,7 @@
 	init_timer (&dev->delay);
 	mutex_init (&dev->phy_mutex);
 	mutex_init(&dev->interrupt_mutex);
+	init_waitqueue_head(&unlink_wakeup);
 	dev->interrupt_count = 0;
 
 	dev->net = net;
@@ -1617,6 +1717,37 @@
 	if (dev->driver_info->flags & FLAG_LINK_INTR)
 		usbnet_link_change(dev, 0, 0);
 
+	if (is_hsic_modem(xdev)) {
+		if (!usbnet_debug_root) {
+			usbnet_debug_root = debugfs_create_dir("net",
+				usb_debug_root);
+
+			if (!usbnet_debug_root)
+				goto out;
+
+			usbnet_debug_data_dump_enable = debugfs_create_u32(
+					"dump_enable",	0644, usbnet_debug_root,
+					&usbnet_data_dump_enable);
+
+			if (!usbnet_debug_data_dump_enable) {
+				debugfs_remove_recursive(usbnet_debug_root);
+				usbnet_debug_root = NULL;
+				goto out;
+			}
+
+			usbnet_debug_partial_dump_len = debugfs_create_u32(
+					"partial_dump_max_len",	0644,
+					usbnet_debug_root,
+					&usbnet_partial_dump_len);
+
+			if (!usbnet_debug_partial_dump_len) {
+				debugfs_remove_recursive(usbnet_debug_root);
+				usbnet_debug_root = NULL;
+				goto out;
+			}
+		}
+	}
+
 	return 0;
 
 out4:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c9e0038..64cf702 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -602,7 +602,7 @@
 		container_of(napi, struct receive_queue, napi);
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	void *buf;
-	unsigned int len, received = 0;
+	unsigned int r, len, received = 0;
 
 again:
 	while (received < budget &&
@@ -619,8 +619,9 @@
 
 	/* Out of packets? */
 	if (received < budget) {
+		r = virtqueue_enable_cb_prepare(rq->vq);
 		napi_complete(napi);
-		if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
+		if (unlikely(virtqueue_poll(rq->vq, r)) &&
 		    napi_schedule_prep(napi)) {
 			virtqueue_disable_cb(rq->vq);
 			__napi_schedule(napi);
@@ -901,7 +902,6 @@
 	struct scatterlist sg;
 	struct virtio_net_ctrl_mq s;
 	struct net_device *dev = vi->dev;
-	int i;
 
 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
 		return 0;
@@ -915,10 +915,10 @@
 			 queue_pairs);
 		return -EINVAL;
 	} else {
-		for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
-			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
-				schedule_delayed_work(&vi->refill, 0);
 		vi->curr_queue_pairs = queue_pairs;
+		/* virtnet_open() will refill when device is going to up. */
+		if (dev->flags & IFF_UP)
+			schedule_delayed_work(&vi->refill, 0);
 	}
 
 	return 0;
@@ -1108,6 +1108,7 @@
 	default:
 		break;
 	}
+
 	return NOTIFY_OK;
 }
 
@@ -1664,6 +1665,8 @@
 	struct virtnet_info *vi = vdev->priv;
 	int i;
 
+	unregister_hotcpu_notifier(&vi->nb);
+
 	/* Prevent config work handler from accessing the device */
 	mutex_lock(&vi->config_lock);
 	vi->config_enable = false;
@@ -1708,7 +1711,13 @@
 	vi->config_enable = true;
 	mutex_unlock(&vi->config_lock);
 
+	rtnl_lock();
 	virtnet_set_queues(vi, vi->curr_queue_pairs);
+	rtnl_unlock();
+
+	err = register_hotcpu_notifier(&vi->nb);
+	if (err)
+		return err;
 
 	return 0;
 }
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 57325f3..054489f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1090,7 +1090,7 @@
 	iph->daddr	= dst;
 	iph->saddr	= fl4.saddr;
 	iph->ttl	= ttl ? : ip4_dst_hoplimit(&rt->dst);
-	tunnel_ip_select_ident(skb, old_iph, &rt->dst);
+	__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 
 	nf_reset(skb);
 
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3f0c4f2..bcfff0d 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1972,6 +1972,7 @@
 	}
 
 	i = port->index;
+	memset(&sync, 0, sizeof(sync));
 	sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
 	/* Lucky card and linux use same encoding here */
 	sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 6a24a5a..4c0a697 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -355,6 +355,7 @@
 			ifr->ifr_settings.size = size; /* data size wanted */
 			return -ENOBUFS;
 		}
+		memset(&line, 0, sizeof(line));
 		line.clock_type = get_status(port)->clocking;
 		line.clock_rate = 0;
 		line.loopback = 0;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5b0a49c..73816cb 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -269,6 +269,11 @@
 	help
 	  Enables Power/Reset/Carddetect function abstraction
 
+config WIFI_PLATFORM_DATA
+	bool "Enable WiFi platform data"
+	---help---
+	 Enables platform_wifi
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
@@ -285,5 +290,6 @@
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
+source "drivers/net/wireless/bcmdhd/Kconfig"
 
 endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 67156ef..3e4f6f6 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -57,3 +57,4 @@
 
 obj-$(CONFIG_BRCMFMAC)	+= brcm80211/
 obj-$(CONFIG_BRCMSMAC)	+= brcm80211/
+obj-$(CONFIG_BCMDHD)	+= bcmdhd/
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index e6b92ff..25b8bbb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3563,14 +3563,18 @@
 {
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
 	int chain;
-	u32 regval;
+	u32 regval, value;
 	static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
 			AR_PHY_SWITCH_CHAIN_0,
 			AR_PHY_SWITCH_CHAIN_1,
 			AR_PHY_SWITCH_CHAIN_2,
 	};
 
-	u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+	if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+		ath9k_hw_cfg_output(ah, AR9300_EXT_LNA_CTL_GPIO_AR9485,
+				    AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+
+	value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
 
 	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
 		REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e1714d7..3457ca5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1076,6 +1076,10 @@
 		 * is_on == 0 means MRC CCK is OFF (more noise imm)
 		 */
 		bool is_on = param ? 1 : 0;
+
+		if (ah->caps.rx_chainmask == 1)
+			break;
+
 		REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
 			      AR_PHY_MRC_CCK_ENABLE, is_on);
 		REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index e717741..5013c73 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -351,6 +351,8 @@
 
 #define AR_PHY_CCA_NOM_VAL_9330_2GHZ          -118
 
+#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
+
 /*
  * AGC Field Definitions
  */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 42b03dc..4ebd9fd 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -79,10 +79,6 @@
 		       sizeof(struct ath_buf_state));		\
 	} while (0)
 
-#define ATH_RXBUF_RESET(_bf) do {		\
-		(_bf)->bf_stale = false;	\
-	} while (0)
-
 /**
  * enum buffer_type - Buffer type flags
  *
@@ -316,6 +312,7 @@
 	struct ath_descdma rxdma;
 	struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
 
+	struct ath_buf *buf_hold;
 	struct sk_buff *frag;
 
 	u32 ampdu_ref;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 7304e75..5e8219a 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -387,7 +387,6 @@
 
 	if (!caldata) {
 		chan->noisefloor = nf;
-		ah->noise = ath9k_hw_getchan_noise(ah, chan);
 		return false;
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f5dda84..75a6376 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1289,7 +1289,9 @@
 
 	usb_set_intfdata(interface, NULL);
 
-	if (!unplugged && (hif_dev->flags & HIF_USB_START))
+	/* If firmware was loaded we should drop it
+	 * go back to first stage bootloader. */
+	if (!unplugged && (hif_dev->flags & HIF_USB_READY))
 		ath9k_hif_usb_reboot(udev);
 
 	kfree(hif_dev);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a47f5e0..3b202ff 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -846,6 +846,7 @@
 	if (error != 0)
 		goto err_rx;
 
+	ath9k_hw_disable(priv->ah);
 #ifdef CONFIG_MAC80211_LEDS
 	/* must be initialized before ieee80211_register_hw */
 	priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 6bd0e92..417a089 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -448,6 +448,7 @@
 	struct ieee80211_conf *cur_conf = &priv->hw->conf;
 	bool txok;
 	int slot;
+	int hdrlen, padsize;
 
 	slot = strip_drv_header(priv, skb);
 	if (slot < 0) {
@@ -504,6 +505,15 @@
 
 	ath9k_htc_tx_clear_slot(priv, slot);
 
+	/* Remove padding before handing frame back to mac80211 */
+	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+	padsize = hdrlen & 3;
+	if (padsize && skb->len > hdrlen + padsize) {
+		memmove(skb->data + padsize, skb->data, hdrlen);
+		skb_pull(skb, padsize);
+	}
+
 	/* Send status to mac80211 */
 	ieee80211_tx_status(priv->hw, skb);
 }
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 15dfefc..b1d5037 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1872,7 +1872,8 @@
 
 	ah->caldata = caldata;
 	if (caldata && (chan->channel != caldata->channel ||
-			chan->channelFlags != caldata->channelFlags)) {
+			chan->channelFlags != caldata->channelFlags ||
+			chan->chanmode != caldata->chanmode)) {
 		/* Operating channel changed, reset channel calibration data */
 		memset(caldata, 0, sizeof(*caldata));
 		ath9k_init_nfcal_hist_buffer(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 2ba4945..bd126c2 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -767,7 +767,8 @@
 		IEEE80211_HW_PS_NULLFUNC_STACK |
 		IEEE80211_HW_SPECTRUM_MGMT |
 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
-		IEEE80211_HW_SUPPORTS_RC_TABLE;
+		IEEE80211_HW_SUPPORTS_RC_TABLE |
+		IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5092eca..a8fee08 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -173,8 +173,7 @@
 {
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
 
-	if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
-	    AR_SREV_9550(sc->sc_ah))
+	if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
 		ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
 				     msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
 
@@ -210,6 +209,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	unsigned long flags;
+	int i;
 
 	if (ath_startrecv(sc) != 0) {
 		ath_err(common, "Unable to restart recv logic\n");
@@ -237,6 +237,15 @@
 		}
 	work:
 		ath_restart_work(sc);
+
+		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+			if (!ATH_TXQ_SETUP(sc, i))
+				continue;
+
+			spin_lock_bh(&sc->tx.txq[i].axq_lock);
+			ath_txq_schedule(sc, &sc->tx.txq[i]);
+			spin_unlock_bh(&sc->tx.txq[i].axq_lock);
+		}
 	}
 
 	if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
@@ -544,21 +553,10 @@
 
 static int ath_reset(struct ath_softc *sc)
 {
-	int i, r;
+	int r;
 
 	ath9k_ps_wakeup(sc);
-
 	r = ath_reset_internal(sc, NULL);
-
-	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-		if (!ATH_TXQ_SETUP(sc, i))
-			continue;
-
-		spin_lock_bh(&sc->tx.txq[i].axq_lock);
-		ath_txq_schedule(sc, &sc->tx.txq[i]);
-		spin_unlock_bh(&sc->tx.txq[i].axq_lock);
-	}
-
 	ath9k_ps_restore(sc);
 
 	return r;
@@ -1211,13 +1209,6 @@
 		ath_update_survey_stats(sc);
 		spin_unlock_irqrestore(&common->cc_lock, flags);
 
-		/*
-		 * Preserve the current channel values, before updating
-		 * the same channel
-		 */
-		if (ah->curchan && (old_pos == pos))
-			ath9k_hw_getnf(ah, ah->curchan);
-
 		ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
 					  curchan, channel_type);
 
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 8be2b5d..f53dbd1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -42,8 +42,6 @@
 	struct ath_desc *ds;
 	struct sk_buff *skb;
 
-	ATH_RXBUF_RESET(bf);
-
 	ds = bf->bf_desc;
 	ds->ds_link = 0; /* link to null */
 	ds->ds_data = bf->bf_buf_addr;
@@ -70,6 +68,14 @@
 	sc->rx.rxlink = &ds->ds_link;
 }
 
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+{
+	if (sc->rx.buf_hold)
+		ath_rx_buf_link(sc, sc->rx.buf_hold);
+
+	sc->rx.buf_hold = bf;
+}
+
 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
 {
 	/* XXX block beacon interrupts */
@@ -117,7 +123,6 @@
 
 	skb = bf->bf_mpdu;
 
-	ATH_RXBUF_RESET(bf);
 	memset(skb->data, 0, ah->caps.rx_status_len);
 	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
 				ah->caps.rx_status_len, DMA_TO_DEVICE);
@@ -432,6 +437,7 @@
 	if (list_empty(&sc->rx.rxbuf))
 		goto start_recv;
 
+	sc->rx.buf_hold = NULL;
 	sc->rx.rxlink = NULL;
 	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
 		ath_rx_buf_link(sc, bf);
@@ -677,6 +683,9 @@
 	}
 
 	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+	if (bf == sc->rx.buf_hold)
+		return NULL;
+
 	ds = bf->bf_desc;
 
 	/*
@@ -1378,7 +1387,7 @@
 		if (edma) {
 			ath_rx_edma_buf_link(sc, qtype);
 		} else {
-			ath_rx_buf_link(sc, bf);
+			ath_rx_buf_relink(sc, bf);
 			ath9k_hw_rxena(ah);
 		}
 	} while (1);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 83ab6be..e752f5d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2387,6 +2387,7 @@
 	for (acno = 0, ac = &an->ac[acno];
 	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
 		ac->sched    = false;
+		ac->clear_ps_filter = true;
 		ac->txq = sc->tx.txq_map[acno];
 		INIT_LIST_HEAD(&ac->tid_q);
 	}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index e9010a4..0686375 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1857,7 +1857,8 @@
 		     IEEE80211_HW_SUPPORTS_PS |
 		     IEEE80211_HW_PS_NULLFUNC_STACK |
 		     IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
-		     IEEE80211_HW_SIGNAL_DBM;
+		     IEEE80211_HW_SIGNAL_DBM |
+		     IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
 	if (!modparam_noht) {
 		/*
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 727b1f5..d57e5be 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -145,7 +145,7 @@
 				   le16_to_cpu(hdr.type), hdr.flags);
 			if (len <= MAX_MBOXITEM_SIZE) {
 				int n = 0;
-				unsigned char printbuf[16 * 3 + 2];
+				char printbuf[16 * 3 + 2];
 				unsigned char databuf[MAX_MBOXITEM_SIZE];
 				void __iomem *src = wmi_buffer(wil, d.addr) +
 					sizeof(struct wil6210_mbox_hdr);
@@ -416,7 +416,7 @@
 		seq_printf(s, "  SKB = %p\n", skb);
 
 		if (skb) {
-			unsigned char printbuf[16 * 3 + 2];
+			char printbuf[16 * 3 + 2];
 			int i = 0;
 			int len = skb_headlen(skb);
 			void *p = skb->data;
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 078e6f3..13f91ac 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -28,7 +28,7 @@
 
 config B43_BCMA
 	bool "Support for BCMA bus"
-	depends on B43 && BCMA
+	depends on B43 && (BCMA = y || BCMA = B43)
 	default y
 
 config B43_BCMA_EXTRA
@@ -39,7 +39,7 @@
 
 config B43_SSB
 	bool
-	depends on B43 && SSB
+	depends on B43 && (SSB = y || SSB = B43)
 	default y
 
 # Auto-select SSB PCI-HOST support, if possible
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 0000000..d9e52bb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,68 @@
+config BCMDHD
+	tristate "Broadcom FullMAC wireless cards support"
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom FullMAC chipset.
+
+	  If you choose to build a module, it'll be called dhd. Say M if
+	  unsure.
+
+config BCMDHD_SDIO
+	bool "SDIO bus interface support"
+	depends on BCMDHD && MMC
+
+config BCMDHD_PCIE
+	bool "PCIe bus interface support"
+	depends on BCMDHD && PCI && !BCMDHD_SDIO
+
+config BCM4354
+	tristate "BCM4354 support"
+	depends on BCMDHD
+
+config BCM4356
+	tristate "BCM4356 support"
+	depends on BCMDHD
+	default n
+
+config BCMDHD_FW_PATH
+	depends on BCMDHD
+	string "Firmware path"
+	default "/system/vendor/firmware/fw_bcmdhd.bin"
+	---help---
+	  Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+	depends on BCMDHD
+	string "NVRAM path"
+	default "/system/etc/wifi/bcmdhd.cal"
+	---help---
+	  Path to the calibration file.
+
+config BCMDHD_WEXT
+	bool "Enable WEXT support"
+	depends on BCMDHD && CFG80211 = n
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	help
+	  Enables WEXT support
+
+config DHD_USE_STATIC_BUF
+	bool "Enable memory preallocation"
+	depends on BCMDHD
+	default n
+	---help---
+	  Use memory preallocated in platform
+
+config DHD_USE_SCHED_SCAN
+	bool "Use CFG80211 sched scan"
+	depends on BCMDHD && CFG80211
+	default n
+	---help---
+	  Use CFG80211 sched scan
+
+config DHD_SET_RANDOM_MAC_VAL
+	hex "Vendor OUI"
+	depends on BCMDHD
+	default 0x001A11
+	---help---
+	  Set vendor OUI for SoftAP
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 0000000..4079316
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,247 @@
+# bcmdhd
+#####################
+# SDIO Basic feature
+#####################
+
+DHDCFLAGS += -Wall -Wstrict-prototypes -Dlinux -DLINUX -DBCMDRIVER            \
+	-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE            \
+	-DDHDTHREAD -DSHOW_EVENTS -DBCMDBG -DWLP2P                            \
+	-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT                                \
+	-DKEEP_ALIVE -DCSCAN -DPKT_FILTER_SUPPORT                             \
+	-DEMBEDDED_PLATFORM -DPNO_SUPPORT  -DSHOW_LOGTRACE                    \
+	-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT                           \
+	-DCUSTOMER_HW2 -DGET_CUSTOM_MAC_ENABLE
+
+#################
+# Common feature
+#################
+DHDCFLAGS += -DWL_CFG80211
+# Print out kernel panic point of file and line info when assertion happened
+DHDCFLAGS += -DBCMASSERT_LOG
+
+# keepalive
+DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
+
+DHDCFLAGS += -DVSDB
+
+# For p2p connection issue
+DHDCFLAGS += -DWL_SCB_TIMEOUT=10
+
+
+# TDLS enable
+DHDCFLAGS += -DWLTDLS -DWLTDLS_AUTO_ENABLE
+# For TDLS tear down inactive time 40 sec
+DHDCFLAGS += -DCUSTOM_TDLS_IDLE_MODE_SETTING=40000
+# for TDLS RSSI HIGH for establishing TDLS link
+DHDCFLAGS += -DCUSTOM_TDLS_RSSI_THRESHOLD_HIGH=-60
+# for TDLS RSSI HIGH for tearing down TDLS link
+DHDCFLAGS += -DCUSTOM_TDLS_RSSI_THRESHOLD_LOW=-70
+
+# Roaming
+DHDCFLAGS += -DROAM_AP_ENV_DETECTION
+DHDCFLAGS += -DROAM_ENABLE -DROAM_CHANNEL_CACHE -DROAM_API
+DHDCFLAGS += -DENABLE_FW_ROAM_SUSPEND
+# Roaming trigger
+DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-75
+DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=10
+# Set PM 2 always regardless suspend/resume
+DHDCFLAGS += -DSUPPORT_PM2_ONLY
+
+# For special PNO Event keep wake lock for 10sec
+DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
+DHDCFLAGS += -DMIRACAST_AMPDU_SIZE=8
+
+#Gscan
+#DHDCFLAGS += -DGSCAN_SUPPORT
+DHDCFLAGS += -DRTT_SUPPORT
+DHDCFLAGS += -DWL_VENDOR_EXT_SUPPORT
+#Link Statistics
+DHDCFLAGS += -DLINKSTAT_SUPPORT
+
+
+# Early suspend
+DHDCFLAGS += -DDHD_USE_EARLYSUSPEND
+
+# For Scan result patch
+DHDCFLAGS += -DESCAN_RESULT_PATCH
+
+# For Static Buffer
+ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
+  DHDCFLAGS += -DENHANCED_STATIC_BUF
+  DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT
+endif
+ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),)
+DHDCFLAGS += -DWL_SCHED_SCAN
+endif
+
+# Ioctl timeout 5000ms
+DHDCFLAGS += -DIOCTL_RESP_TIMEOUT=5000
+
+# Prevent rx thread monopolize
+DHDCFLAGS += -DWAIT_DEQUEUE
+
+# Config PM Control
+DHDCFLAGS += -DCONFIG_CONTROL_PM
+
+# idle count
+DHDCFLAGS += -DDHD_USE_IDLECOUNT
+
+# SKB TAILPAD to avoid out of boundary memory access
+DHDCFLAGS += -DDHDENABLE_TAILPAD
+
+# Wi-Fi Direct
+DHDCFLAGS += -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+DHDCFLAGS += -DWL_CFG80211_STA_EVENT
+DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+DHDCFLAGS += -DWL_ENABLE_P2P_IF
+
+DHDCFLAGS += -DWL_CFG80211_ACL
+DHDCFLAGS += -DDISABLE_11H_SOFTAP
+DHDCFLAGS += -DSET_RANDOM_MAC_SOFTAP
+DHDCFLAGS += -DCUSTOM_FORCE_NODFS_FLAG
+DHDCFLAGS += -DCUSTOM_SET_SHORT_DWELL_TIME
+
+##########################
+# driver type
+# m: module type driver
+# y: built-in type driver
+##########################
+DRIVER_TYPE ?= y
+
+#########################
+# Chip dependent feature
+#########################
+
+ifneq ($(filter y, $(CONFIG_BCM4354) $(CONFIG_BCM4356)),)
+  DHDCFLAGS += -DUSE_WL_TXBF
+  DHDCFLAGS += -DUSE_WL_FRAMEBURST
+  DHDCFLAGS += -DCUSTOM_DPC_CPUCORE=0
+  DHDCFLAGS += -DMAX_AP_CLIENT_CNT=10
+  DHDCFLAGS += -DMAX_GO_CLIENT_CNT=5
+
+# New Features
+  DHDCFLAGS += -DWL11U
+  DHDCFLAGS += -DMFP
+  DHDCFLAGS += -DDHD_ENABLE_LPC
+  DHDCFLAGS += -DCUSTOM_COUNTRY_CODE
+#  DHDCFLAGS += -DSAR_SUPPORT
+
+# debug info
+  DHDCFLAGS += -DDHD_WAKE_STATUS
+
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+  DHDCFLAGS += -DBDC -DOOB_INTR_ONLY -DHW_OOB -DDHD_BCMEVENTS -DMMC_SDIO_ABORT
+  DHDCFLAGS += -DBCMSDIO -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DPROP_TXSTATUS
+  DHDCFLAGS += -DCUSTOM_AMPDU_MPDU=16
+  DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=64
+# tput enhancement
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1
+  DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128
+  DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED
+  DHDCFLAGS += -DDHDTCPACK_SUPPRESS
+  DHDCFLAGS += -DRXFRAME_THREAD
+  DHDCFLAGS += -DREPEAT_READFRAME
+  DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=40
+  DHDCFLAGS += -DMAX_HDR_READ=128
+  DHDCFLAGS += -DDHD_FIRSTREAD=128
+
+# bcn_timeout
+  DHDCFLAGS += -DCUSTOM_BCN_TIMEOUT_SETTING=5
+
+  DHDCFLAGS += -DWLFC_STATE_PREALLOC
+endif
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+  DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1
+# tput enhancement
+  DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=64
+  DHDCFLAGS += -DCUSTOM_AMPDU_MPDU=32
+  DHDCFLAGS += -DCUSTOM_AMPDU_RELEASE=16
+  DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+# Disable watchdog thread
+  DHDCFLAGS += -DCUSTOM_DHD_WATCHDOG_MS=0
+
+  DHDCFLAGS += -DMAX_CNTL_TX_TIMEOUT=1
+ifneq ($(CONFIG_ARCH_MSM),)
+  DHDCFLAGS += -DMSM_PCIE_LINKDOWN_RECOVERY
+endif
+ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
+  DHDCFLAGS += -DDHD_USE_STATIC_IOCTLBUF
+endif
+
+  DHDCFLAGS += -DDONGLE_ENABLE_ISOLATION
+endif
+
+# Print 802.1X packets
+  DHDCFLAGS += -DDHD_8021X_DUMP
+# Print DHCP packets
+  DHDCFLAGS += -DDHD_DHCP_DUMP
+endif
+
+ifneq ($(CONFIG_BCM4339),)
+  DHDCFLAGS += -DBCM4339_CHIP -DHW_OOB
+
+  # tput enhancement
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1
+  DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128
+  DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED
+  DHDCFLAGS += -DDHDTCPACK_SUPPRESS
+  DHDCFLAGS += -DUSE_WL_TXBF
+  DHDCFLAGS += -DUSE_WL_FRAMEBURST
+  DHDCFLAGS += -DRXFRAME_THREAD
+  DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=64
+  DHDCFLAGS += -DCUSTOM_DPC_CPUCORE=0
+  DHDCFLAGS += -DPROP_TXSTATUS_VSDB
+  DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=32
+
+  # New Features
+  DHDCFLAGS += -DWL11U
+  DHDCFLAGS += -DDHD_ENABLE_LPC
+  DHDCFLAGS += -DCUSTOM_PSPRETEND_THR=30
+endif
+
+
+#EXTRA_LDFLAGS += --strip-debug
+
+ifeq ($(DRIVER_TYPE),y)
+  DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD
+#  DHDCFLAGS += -DUSE_LATE_INITCALL_SYNC
+endif
+
+EXTRA_CFLAGS += $(DHDCFLAGS) -DDHD_DEBUG
+EXTRA_CFLAGS += -DSRCBASE=\"$(src)\"
+EXTRA_CFLAGS += -I$(src)/include/ -I$(src)/
+KBUILD_CFLAGS += -I$(LINUXDIR)/include -I$(shell pwd)
+
+DHDOFILES := dhd_pno.o dhd_common.o dhd_ip.o dhd_custom_gpio.o \
+	dhd_linux.o dhd_linux_sched.o dhd_cfg80211.o dhd_linux_wq.o aiutils.o bcmevent.o \
+	bcmutils.o bcmwifi_channels.o hndpmu.o linux_osl.o sbutils.o siutils.o \
+	wl_android.o wl_roam.o wl_cfg80211.o wl_cfgp2p.o wl_cfg_btcoex.o wldev_common.o wl_linux_mon.o  \
+	dhd_linux_platdev.o dhd_pno.o dhd_rtt.o dhd_linux_wq.o wl_cfg_btcoex.o \
+	hnd_pktq.o hnd_pktpool.o wl_cfgvendor.o
+
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+  DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o
+  DHDOFILES += dhd_cdc.o dhd_wlfc.o dhd_sdio.o
+endif
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+  DHDOFILES += dhd_pcie.o dhd_pcie_linux.o dhd_msgbuf.o dhd_flowring.o
+  DHDOFILES += pcie_core.o
+endif
+
+bcmdhd-objs := $(DHDOFILES)
+obj-$(DRIVER_TYPE)   += bcmdhd.o
+
+all:
+	@echo "$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) modules"
+	@$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) modules
+
+clean:
+	rm -rf *.o *.ko *.mod.c *~ .*.cmd *.o.cmd .*.o.cmd \
+	Module.symvers modules.order .tmp_versions modules.builtin
+
+install:
+	@$(MAKE) --no-print-directory -C $(KDIR) \
+		SUBDIRS=$(CURDIR) modules_install
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 0000000..9095894
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,1115 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c 467150 2014-04-02 17:30:43Z $
+ */
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+#define BCM47162_DMP() (0)
+#define BCM5357_DMP() (0)
+#define BCM4707_DMP() (0)
+#define PMU_DMP() (0)
+#define remap_coreid(sih, coreid)	(coreid)
+#define remap_corerev(sih, corerev)	(corerev)
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+	uint32 ent;
+	uint inv = 0, nom = 0;
+
+	while (TRUE) {
+		ent = R_REG(si_osh(sih), *eromptr);
+		(*eromptr)++;
+
+		if (mask == 0)
+			break;
+
+		if ((ent & ER_VALID) == 0) {
+			inv++;
+			continue;
+		}
+
+		if (ent == (ER_END | ER_VALID))
+			break;
+
+		if ((ent & mask) == match)
+			break;
+
+		nom++;
+	}
+
+	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+	if (inv + nom) {
+		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
+	}
+	return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+        uint32 *sizel, uint32 *sizeh)
+{
+	uint32 asd, sz, szd;
+
+	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+	if (((asd & ER_TAG1) != ER_ADD) ||
+	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+	    ((asd & AD_ST_MASK) != st)) {
+		/* This is not what we want, "push" it back */
+		(*eromptr)--;
+		return 0;
+	}
+	*addrl = asd & AD_ADDR_MASK;
+	if (asd & AD_AG32)
+		*addrh = get_erom_ent(sih, eromptr, 0, 0);
+	else
+		*addrh = 0;
+	*sizeh = 0;
+	sz = asd & AD_SZ_MASK;
+	if (sz == AD_SZ_SZD) {
+		szd = get_erom_ent(sih, eromptr, 0, 0);
+		*sizel = szd & SD_SZ_MASK;
+		if (szd & SD_SG32)
+			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
+	} else
+		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+	return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+/* parse the enumeration rom to identify all cores */
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = (chipcregs_t *)regs;
+	uint32 erombase, *eromptr, *eromlim;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+		break;
+
+	case PCI_BUS:
+		/* Set wrappers address */
+		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+		/* Now point the window at the erom */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		eromptr = regs;
+		break;
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		eromptr = (uint32 *)(uintptr)erombase;
+		break;
+#endif	/* BCMSDIO */
+
+	case PCMCIA_BUS:
+	default:
+		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+		ASSERT(0);
+		return;
+	}
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+	         regs, erombase, eromptr, eromlim));
+	while (eromptr < eromlim) {
+		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+		uint i, j, idx;
+		bool br;
+
+		br = FALSE;
+
+		/* Grok a component */
+		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+		if (cia == (ER_END | ER_VALID)) {
+			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+			ai_hwfixup(sii);
+			return;
+		}
+
+		cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+		if ((cib & ER_TAG) != ER_CI) {
+			SI_ERROR(("CIA not followed by CIB\n"));
+			goto error;
+		}
+
+		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+		         "nsw = %d, nmp = %d & nsp = %d\n",
+		         mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
+#else
+		BCM_REFERENCE(crev);
+#endif
+
+		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+			continue;
+		if ((nmw + nsw == 0)) {
+			/* A component which is not a core */
+			if (cid == OOB_ROUTER_CORE_ID) {
+				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+					&addrl, &addrh, &sizel, &sizeh);
+				if (asd != 0) {
+					sii->oob_router = addrl;
+				}
+			}
+			if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
+				cid != PMU_CORE_ID && cid != GCI_CORE_ID)
+				continue;
+		}
+
+		idx = sii->numcores;
+
+		cores_info->cia[idx] = cia;
+		cores_info->cib[idx] = cib;
+		cores_info->coreid[idx] = remap_coreid(sih, cid);
+
+		for (i = 0; i < nmp; i++) {
+			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+			if ((mpd & ER_TAG) != ER_MP) {
+				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+				goto error;
+			}
+			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
+			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+		}
+
+		/* First Slave Address Descriptor should be port 0:
+		 * the main register space for the core
+		 */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+		if (asd == 0) {
+			do {
+			/* Try again to see if it is a bridge */
+			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd != 0)
+				br = TRUE;
+			else {
+					if (br == TRUE) {
+						break;
+					}
+					else if ((addrh != 0) || (sizeh != 0) ||
+						(sizel != SI_CORE_SIZE)) {
+						SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
+							"0x%x\n", addrh, sizeh, sizel));
+						SI_ERROR(("First Slave ASD for"
+							"core 0x%04x malformed "
+							"(0x%08x)\n", cid, asd));
+						goto error;
+					}
+				}
+			} while (1);
+		}
+		cores_info->coresba[idx] = addrl;
+		cores_info->coresba_size[idx] = sizel;
+		/* Get any more ASDs in port 0 */
+		j = 1;
+		do {
+			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+				cores_info->coresba2[idx] = addrl;
+				cores_info->coresba2_size[idx] = sizel;
+			}
+			j++;
+		} while (asd != 0);
+
+		/* Go through the ASDs for other slave ports */
+		for (i = 1; i < nsp; i++) {
+			j = 0;
+			do {
+				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				              &sizel, &sizeh);
+
+				if (asd == 0)
+					break;
+				j++;
+			} while (1);
+			if (j == 0) {
+				SI_ERROR((" SP %d has no address descriptors\n", i));
+				goto error;
+			}
+		}
+
+		/* Now get master wrappers */
+		for (i = 0; i < nmw; i++) {
+			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for MW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if (i == 0)
+				cores_info->wrapba[idx] = addrl;
+		}
+
+		/* And finally slave wrappers */
+		for (i = 0; i < nsw; i++) {
+			uint fwp = (nsp == 1) ? 0 : 1;
+			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for SW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if ((nmw == 0) && (i == 0))
+				cores_info->wrapba[idx] = addrl;
+		}
+
+
+		/* Don't record bridges */
+		if (br)
+			continue;
+
+		/* Done with core */
+		sii->numcores++;
+	}
+
+	SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+	sii->numcores = 0;
+	return;
+}
+
+#define AI_SETCOREIDX_MAPSIZE(coreid) \
+	(((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 addr, wrap;
+	void *regs;
+
+	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+		return (NULL);
+
+	addr = cores_info->coresba[coreidx];
+	wrap = cores_info->wrapba[coreidx];
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(addr,
+				AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		sii->curmap = regs = cores_info->regs[coreidx];
+		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
+		}
+		sii->curwrap = cores_info->wrappers[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+		regs = sii->curmap;
+		/* point bar0 2nd 4KB window to the primary wrapper */
+		if (PCIE_GEN2(sii))
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+		else
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+		break;
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		sii->curmap = regs = (void *)((uintptr)addr);
+		sii->curwrap = (void *)((uintptr)wrap);
+		break;
+#endif	/* BCMSDIO */
+
+	case PCMCIA_BUS:
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	sii->curmap = regs;
+	sii->curidx = coreidx;
+
+	return regs;
+}
+
+
+void
+ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = NULL;
+	uint32 erombase, *eromptr, *eromlim;
+	uint i, j, cidx;
+	uint32 cia, cib, nmp, nsp;
+	uint32 asd, addrl, addrh, sizel, sizeh;
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == CC_CORE_ID) {
+			cc = (chipcregs_t *)cores_info->regs[i];
+			break;
+		}
+	}
+	if (cc == NULL)
+		goto error;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	cidx = sii->curidx;
+	cia = cores_info->cia[cidx];
+	cib = cores_info->cib[cidx];
+
+	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+	/* scan for cores */
+	while (eromptr < eromlim) {
+		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+			break;
+		}
+	}
+
+	/* skip master ports */
+	for (i = 0; i < nmp; i++)
+		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+	/* Skip ASDs in port 0 */
+	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+	if (asd == 0) {
+		/* Try again to see if it is a bridge */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+		              &sizel, &sizeh);
+	}
+
+	j = 1;
+	do {
+		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+		              &sizel, &sizeh);
+		j++;
+	} while (asd != 0);
+
+	/* Go through the ASDs for other slave ports */
+	for (i = 1; i < nsp; i++) {
+		j = 0;
+		do {
+			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				&sizel, &sizeh);
+			if (asd == 0)
+				break;
+
+			if (!asidx--) {
+				*addr = addrl;
+				*size = sizel;
+				return;
+			}
+			j++;
+		} while (1);
+
+		if (j == 0) {
+			SI_ERROR((" SP %d has no address descriptors\n", i));
+			break;
+		}
+	}
+
+error:
+	*size = 0;
+	return;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(si_t *sih)
+{
+	return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba_size[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2_size[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+uint
+ai_flag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+
+#ifdef REROUTE_OOBINT
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+			__FUNCTION__));
+		return PMU_OOB_BIT;
+	}
+#endif /* REROUTE_OOBINT */
+
+	ai = sii->curwrap;
+	ASSERT(ai != NULL);
+
+	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+uint
+ai_flag_alt(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+#ifdef REROUTE_OOBINT
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+			__FUNCTION__));
+		return PMU_OOB_BIT;
+	}
+#endif /* REROUTE_OOBINT */
+
+	ai = sii->curwrap;
+
+	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 *map = (uint32 *) sii->curwrap;
+
+	if (mask || val) {
+		uint32 w = R_REG(sii->osh, map+(offset/4));
+		w &= ~mask;
+		w |= val;
+		W_REG(sii->osh, map+(offset/4), w);
+	}
+
+	return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cia;
+
+	cia = cores_info->cia[sii->curidx];
+	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cib;
+
+
+	cib = cores_info->cib[sii->curidx];
+	return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	ai = sii->curwrap;
+
+	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_REG(sii->osh, r) & ~mask) | val;
+		W_REG(sii->osh, r, w);
+	}
+
+	/* readback */
+	w = R_REG(sii->osh, r);
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			ai_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	volatile uint32 dummy;
+	uint32 status;
+	aidmp_t *ai;
+
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* if core is already in reset, just return */
+	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+		return;
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+	/* if pending backplane ops still, try waiting longer */
+	if (status != 0) {
+		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+		/* during driver load we may need more time */
+		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+		/* if still pending ops, continue on and try disable anyway */
+		/* this is in big hammer path, so don't call wl_reinit in this case... */
+	}
+
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	dummy = R_REG(sii->osh, &ai->resetctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	W_REG(sii->osh, &ai->ioctrl, bits);
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	volatile uint32 dummy;
+	uint loop_counter = 10;
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	/* put core into reset state */
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	OSL_DELAY(10);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+		/* ensure there are no pending backplane operations */
+		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+		/* take core out of reset */
+		W_REG(sii->osh, &ai->resetctrl, 0);
+
+		/* ensure there are no pending backplane operations */
+		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+	}
+
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return;
+	}
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return 0;
+	}
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+
+	return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+	if (PMU_DMP()) {
+		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+		W_REG(sii->osh, &ai->iostatus, w);
+	}
+
+	return R_REG(sii->osh, &ai->iostatus);
+}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting aidmp registers */
+void
+ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	osl_t *osh;
+	aidmp_t *ai;
+	uint i;
+
+	osh = sii->osh;
+
+	for (i = 0; i < sii->numcores; i++) {
+		si_setcoreidx(&sii->pub, i);
+		ai = sii->curwrap;
+
+		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+		if (BCM47162_DMP()) {
+			bcm_bprintf(b, "Skipping mips74k in 47162a0\n");
+			continue;
+		}
+		if (BCM5357_DMP()) {
+			bcm_bprintf(b, "Skipping usb20h in 5357\n");
+			continue;
+		}
+		if (BCM4707_DMP()) {
+			bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
+			continue;
+		}
+
+		if (PMU_DMP()) {
+			bcm_bprintf(b, "Skipping pmu core\n");
+			continue;
+		}
+
+		bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x"
+			    "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
+			    "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
+			    "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x"
+			    "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
+			    "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
+			    "intstatus 0x%x config 0x%x itcr 0x%x\n",
+			    R_REG(osh, &ai->ioctrlset),
+			    R_REG(osh, &ai->ioctrlclear),
+			    R_REG(osh, &ai->ioctrl),
+			    R_REG(osh, &ai->iostatus),
+			    R_REG(osh, &ai->ioctrlwidth),
+			    R_REG(osh, &ai->iostatuswidth),
+			    R_REG(osh, &ai->resetctrl),
+			    R_REG(osh, &ai->resetstatus),
+			    R_REG(osh, &ai->resetreadid),
+			    R_REG(osh, &ai->resetwriteid),
+			    R_REG(osh, &ai->errlogctrl),
+			    R_REG(osh, &ai->errlogdone),
+			    R_REG(osh, &ai->errlogstatus),
+			    R_REG(osh, &ai->errlogaddrlo),
+			    R_REG(osh, &ai->errlogaddrhi),
+			    R_REG(osh, &ai->errlogid),
+			    R_REG(osh, &ai->errloguser),
+			    R_REG(osh, &ai->errlogflags),
+			    R_REG(osh, &ai->intstatus),
+			    R_REG(osh, &ai->config),
+			    R_REG(osh, &ai->itcr));
+	}
+}
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 0000000..f55bb68
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,189 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmevent.c 470794 2014-04-16 12:01:41Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+	uint event;
+	const char *name;
+} bcmevent_name_str_t;
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+static const bcmevent_name_str_t bcmevent_names[] = {
+	BCMEVENT_NAME(WLC_E_SET_SSID),
+	BCMEVENT_NAME(WLC_E_JOIN),
+	BCMEVENT_NAME(WLC_E_START),
+	BCMEVENT_NAME(WLC_E_AUTH),
+	BCMEVENT_NAME(WLC_E_AUTH_IND),
+	BCMEVENT_NAME(WLC_E_DEAUTH),
+	BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+	BCMEVENT_NAME(WLC_E_ASSOC),
+	BCMEVENT_NAME(WLC_E_ASSOC_IND),
+	BCMEVENT_NAME(WLC_E_REASSOC),
+	BCMEVENT_NAME(WLC_E_REASSOC_IND),
+	BCMEVENT_NAME(WLC_E_DISASSOC),
+	BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+	BCMEVENT_NAME(WLC_E_QUIET_START),
+	BCMEVENT_NAME(WLC_E_QUIET_END),
+	BCMEVENT_NAME(WLC_E_BEACON_RX),
+	BCMEVENT_NAME(WLC_E_LINK),
+	BCMEVENT_NAME(WLC_E_MIC_ERROR),
+	BCMEVENT_NAME(WLC_E_NDIS_LINK),
+	BCMEVENT_NAME(WLC_E_ROAM),
+	BCMEVENT_NAME(WLC_E_TXFAIL),
+	BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+	BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+	BCMEVENT_NAME(WLC_E_PRUNE),
+	BCMEVENT_NAME(WLC_E_AUTOAUTH),
+	BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_ADDTS_IND),
+	BCMEVENT_NAME(WLC_E_DELTS_IND),
+	BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+	BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+	BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+	BCMEVENT_NAME(WLC_E_ROAM_PREP),
+	BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+	BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+	BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+	BCMEVENT_NAME(WLC_E_RADIO),
+	BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
+	BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+	BCMEVENT_NAME(WLC_E_PSK_SUP),
+	BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+	BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+	BCMEVENT_NAME(WLC_E_ICV_ERROR),
+	BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_TRACE),
+	BCMEVENT_NAME(WLC_E_IF),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
+#endif
+	BCMEVENT_NAME(WLC_E_RSSI),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
+#ifdef WIFI_ACT_FRAME
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
+#endif
+	BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+	BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
+#endif
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
+#endif
+	BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+	BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+#ifdef WLMEDIA_HTSF
+	BCMEVENT_NAME(WLC_E_HTSFSYNC),
+#endif
+	BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+	BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+	BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+#ifdef SOFTAP
+	BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
+#endif
+	BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+	BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+	BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
+#ifdef WLTDLS
+	BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
+#endif /* WLTDLS */
+	BCMEVENT_NAME(WLC_E_NATIVE),
+#ifdef WLPKTDLYSTAT
+	BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
+#endif /* WLPKTDLYSTAT */
+	BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+	BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+	BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+	BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+	BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
+#ifdef WLWNM
+	BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
+#endif /* WLWNM */
+#if defined(WL_PROXDETECT)
+	BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+	BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+	BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+	BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+#ifdef GSCAN_SUPPORT
+	{ WLC_E_PFN_GSCAN_FULL_RESULT, "PFN_GSCAN_FULL_RESULT"},
+	{ WLC_E_PFN_SWC, "PFN_SIGNIFICANT_WIFI_CHANGE"}
+#endif /* GSCAN_SUPPORT */
+#ifdef WLBSSLOAD_REPORT
+	BCMEVENT_NAME(WLC_E_BSS_LOAD),
+#endif
+#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
+	BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
+#endif
+};
+
+
+const char *bcmevent_get_name(uint event_type)
+{
+	/* note:  first coded this as a static const but some
+	 * ROMs already have something called event_name so
+	 * changed it so we don't have a variable for the
+	 * 'unknown string
+	 */
+	const char *event_name = NULL;
+
+	uint idx;
+	for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) {
+
+		if (bcmevent_names[idx].event == event_type) {
+			event_name = bcmevent_names[idx].name;
+			break;
+		}
+	}
+
+	/* if we find an event name in the array, return it.
+	 * otherwise return unknown string.
+	 */
+	return ((event_name) ? event_name : "Unknown Event");
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 0000000..5ee526b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,705 @@
+/*
+ *  BCMSDH interface glue
+ *  implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c 450676 2014-01-22 22:45:13Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h>	/* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h>	/* common SDIO/controller interface */
+#include <sbsdio.h>	/* SDIO device core hardware definitions. */
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+	sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
+{
+	bcmsdh_info_t *bcmsdh;
+
+	if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+		BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+	bcmsdh->sdioh = sdioh;
+	bcmsdh->osh = osh;
+	bcmsdh->init_success = TRUE;
+	*regsva = SI_ENUM_BASE;
+
+	/* Report the BAR, to fix if needed */
+	bcmsdh->sbwad = SI_ENUM_BASE;
+
+	/* save the handler locally */
+	l_bcmsdh = bcmsdh;
+
+	return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bcmsdh != NULL) {
+		MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+	}
+
+	l_bcmsdh = NULL;
+
+	return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+                void *params, int plen, void *arg, int len, bool set)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	bool on;
+
+	ASSERT(bcmsdh);
+	status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+	if (SDIOH_API_SUCCESS(status))
+		return FALSE;
+	else
+		return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	ASSERT(sdh);
+	return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	ASSERT(sdh);
+
+	/* don't support yet */
+	return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+	uint8 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(1000);
+#endif
+	status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+	             addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	uint8 *tmp_buf, *tmp_ptr;
+	uint8 *ptr;
+	bool ascii = func & ~0xf;
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cis);
+	ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+	status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+	if (ascii) {
+		/* Move binary bits to tmp and format them into the provided buffer. */
+		if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+			BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+			return BCME_NOMEM;
+		}
+		bcopy(cis, tmp_buf, length);
+		for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+			ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
+				"%.2x ", *tmp_ptr & 0xff);
+			if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+				ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
+		}
+		MFREE(bcmsdh->osh, tmp_buf, length);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+	int err = 0;
+	uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bar0 != bcmsdh->sbwad || force_set) {
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+			(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+				(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+				(address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+		if (!err)
+			bcmsdh->sbwad = bar0;
+		else
+			/* invalidate cached window var */
+			bcmsdh->sbwad = 0;
+
+	}
+
+	return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 word = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
+		return 0xFFFFFFFF;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+		SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+	/* if ok, return appropriately masked word */
+	if (SDIOH_API_SUCCESS(status)) {
+		switch (size) {
+			case sizeof(uint8):
+				return (word & 0xff);
+			case sizeof(uint16):
+				return (word & 0xffff);
+			case sizeof(uint32):
+				return word;
+			default:
+				bcmsdh->regfail = TRUE;
+
+		}
+	}
+
+	/* otherwise, bad sdio access or invalid size */
+	BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+	return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	int err = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+	             __FUNCTION__, addr, size*8, data));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+	                            addr, &data, size);
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	if (SDIOH_API_SUCCESS(status))
+		return 0;
+
+	BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+	              __FUNCTION__, data, addr, size));
+	return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+	return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	             __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	            __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+	ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+	                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+	                              addr, 4, nbytes, buf, NULL);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+	return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+	ASSERT(sdh);
+	return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+	return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+	return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_sleep(sd, enab);
+#else
+	return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioout(sd, gpio, enab);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 0000000..76783c5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,501 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c 461444 2014-03-12 02:55:28Z $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#ifdef DHD_WAKE_STATUS
+#include <linux/wakeup_reason.h>
+#endif
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+#include <dhd_linux.h>
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+	DHD_INTR_INVALID = 0,
+	DHD_INTR_INBAND,
+	DHD_INTR_HWOOB,
+	DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+	DHD_HOST_INTR_TYPE	intr_type;
+	int			oob_irq_num;	/* valid when hardware or software oob in use */
+	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
+	bool			oob_irq_registered;
+	bool			oob_irq_enabled;
+	bool			oob_irq_wake_enabled;
+	spinlock_t		oob_irq_spinlock;
+	bcmsdh_cb_fn_t		oob_irq_handler;
+	void			*oob_irq_handler_context;
+	void			*context;	/* context returned from upper layer */
+	void			*sdioh;		/* handle to lower layer (sdioh) */
+	void			*dev;		/* handle to the underlying device */
+	bool			dev_wake_enabled;
+} bcmsdh_os_info_t;
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+	/* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+	/* Check for Arasan host controller */
+	if (vendor == VENDOR_SI_IMAGE) {
+		return (TRUE);
+	}
+	/* Check for BRCM 27XX Standard host controller */
+	if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for BRCM Standard host controller */
+	if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for TI PCIxx21 Standard host controller */
+	if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	/* Ricoh R5C822 Standard SDIO Host */
+	if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+		return (TRUE);
+	}
+	/* JMicron Standard SDIO Host */
+	if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+	/* This is the PciSpiHost. */
+	if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		printf("Found PCI SPI Host Controller\n");
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_SPI */
+
+	return (FALSE);
+}
+
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num)
+{
+	ulong regs;
+	bcmsdh_info_t *bcmsdh;
+	uint32 vendevid;
+	bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
+
+	bcmsdh = bcmsdh_attach(osh, sdioh, &regs);
+	if (bcmsdh == NULL) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+	if (bcmsdh_osinfo == NULL) {
+		SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+		goto err;
+	}
+	bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	bcmsdh->os_cxt = bcmsdh_osinfo;
+	bcmsdh_osinfo->sdioh = sdioh;
+	bcmsdh_osinfo->dev = dev;
+	osl_set_bus_handle(osh, bcmsdh);
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dev && device_init_wakeup(dev, true) == 0)
+		bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+#if defined(OOB_INTR_ONLY)
+	spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+	/* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+	bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
+		&bcmsdh_osinfo->oob_irq_flags);
+	if  (bcmsdh_osinfo->oob_irq_num < 0) {
+		SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+		goto err;
+	}
+#endif /* defined(BCMLXSDMMC) */
+
+	/* Read the vendor/device ID from the CIS */
+	vendevid = bcmsdh_query_device(bcmsdh);
+	/* try to attach to the target device */
+	bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+		slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+	if (bcmsdh_osinfo->context == NULL) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+#ifdef DHD_WAKE_STATUS
+	bcmsdh->wake_irq = wifi_platform_get_wake_irq(adapter_info);
+	if (bcmsdh->wake_irq == -1)
+		bcmsdh->wake_irq = bcmsdh_osinfo->oob_irq_num;
+#endif
+	return bcmsdh;
+
+	/* error handling */
+err:
+	if (bcmsdh != NULL)
+		bcmsdh_detach(osh, bcmsdh);
+	if (bcmsdh_osinfo != NULL)
+		MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	return NULL;
+}
+
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (bcmsdh_osinfo->dev)
+		device_init_wakeup(bcmsdh_osinfo->dev, false);
+	bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+	drvinfo.remove(bcmsdh_osinfo->context);
+	MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+	bcmsdh_detach(bcmsdh->osh, bcmsdh);
+
+	return 0;
+}
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh)
+{
+	return bcmsdh->total_wake_count;
+}
+
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+
+	ret = bcmsdh->pkt_wake;
+	bcmsdh->total_wake_count += flag;
+	bcmsdh->pkt_wake = flag;
+
+	spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+	return ret;
+}
+#endif
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+		return -EBUSY;
+	return 0;
+}
+
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#ifdef DHD_WAKE_STATUS
+	if (check_wakeup_reason(bcmsdh->wake_irq))
+		bcmsdh_set_get_wake(bcmsdh, 1);
+#endif
+
+	if (drvinfo.resume)
+		return drvinfo.resume(bcmsdh_osinfo->context);
+	return 0;
+}
+
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+	return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+	sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+	int error = 0;
+
+	drvinfo = *driver;
+	SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+	error = bcmsdh_register_client_driver();
+	if (error)
+		SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error));
+
+	return error;
+}
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+		if (bcmsdh_pci_driver.node.next == NULL)
+			return;
+#endif
+
+	bcmsdh_unregister_client_driver();
+}
+
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	return bcmsdh_osinfo->dev_wake_enabled;
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
+{
+	unsigned long flags;
+	bcmsdh_os_info_t *bcmsdh_osinfo;
+
+	if (!bcmsdh)
+		return;
+
+	bcmsdh_osinfo = bcmsdh->os_cxt;
+	spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+	if (bcmsdh_osinfo->oob_irq_enabled != enable) {
+		if (enable)
+			enable_irq(bcmsdh_osinfo->oob_irq_num);
+		else
+			disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = enable;
+	}
+	spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	bcmsdh_oob_intr_set(bcmsdh, FALSE);
+	bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
+
+	return IRQ_HANDLED;
+}
+
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
+		return -EBUSY;
+	}
+	SDLX_MSG(("%s OOB irq=%d flags=%X \n", __FUNCTION__,
+		(int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+	bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+	bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+#if defined(CONFIG_ARCH_ODIN)
+	err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+	err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+	if (err) {
+		SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+		return err;
+	}
+
+		err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+		if (!err)
+			bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_registered = TRUE;
+	return err;
+}
+
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (!bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+		return;
+	}
+	if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+			err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+			if (!err)
+				bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+	}
+	if (bcmsdh_osinfo->oob_irq_enabled) {
+		disable_irq(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = FALSE;
+	}
+	free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+	bcmsdh_osinfo->oob_irq_registered = FALSE;
+}
+#endif 
+
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel;	/* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power;	/* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock;	/* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor;	/* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode;	/* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok;	/* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+extern uint sd_tuning_period;
+module_param(sd_tuning_period, uint, 0);
+extern int sd_delay_value;
+module_param(sd_delay_value, uint, 0);
+
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 0000000..118079a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1458 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c 459285 2014-03-03 02:54:39Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+#define DEFAULT_SDIO_F2_BLKSIZE		512
+#ifndef CUSTOM_SDIO_F2_BLKSIZE
+#define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
+#endif
+
+#define MAX_IO_RW_EXTENDED_BLK		511
+
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+
+#ifndef CUSTOM_RXCHAIN
+#define CUSTOM_RXCHAIN 0
+#endif
+
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK	0x03
+#define MMC_SDIO_ABORT_RETRY_LIMIT 5
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+	int err_ret;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Enable Function 1 */
+	sdio_claim_host(sd->func[1]);
+	err_ret = sdio_enable_func(sd->func[1]);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+	}
+
+	return FALSE;
+}
+
+/*
+ *	Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, struct sdio_func *func)
+{
+	sdioh_info_t *sd = NULL;
+	int err_ret;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (func == NULL) {
+		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	sd->fake_func0.num = 0;
+	sd->fake_func0.card = func->card;
+	sd->func[0] = &sd->fake_func0;
+	sd->func[1] = func->card->sdio_func[0];
+	sd->func[2] = func->card->sdio_func[1];
+	sd->num_funcs = 2;
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->client_block_size[0] = 64;
+	sd->use_rxchain = CUSTOM_RXCHAIN;
+	if (sd->func[1] == NULL || sd->func[2] == NULL) {
+		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+		goto fail;
+	}
+	sdio_set_drvdata(sd->func[1], sd);
+
+	sdio_claim_host(sd->func[1]);
+	sd->client_block_size[1] = 64;
+	err_ret = sdio_set_block_size(sd->func[1], 64);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+		goto fail;
+	}
+
+	sdio_claim_host(sd->func[2]);
+	sd->client_block_size[2] = sd_f2_blocksize;
+	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+	sdio_release_host(sd->func[2]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+			sd_f2_blocksize, err_ret));
+		goto fail;
+	}
+
+	sdioh_sdmmc_card_enablefuncs(sd);
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+
+fail:
+	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	return NULL;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+
+		/* Disable Function 2 */
+		if (sd->func[2]) {
+			sdio_claim_host(sd->func[2]);
+			sdio_disable_func(sd->func[2]);
+			sdio_release_host(sd->func[2]);
+		}
+
+		/* Disable Function 1 */
+		if (sd->func[1]) {
+			sdio_claim_host(sd->func[1]);
+			sdio_disable_func(sd->func[1]);
+			sdio_release_host(sd->func[1]);
+		}
+
+		sd->func[1] = NULL;
+		sd->func[2] = NULL;
+
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	/* Enable F1 and F2 interrupts, clear master enable */
+	reg &= ~INTR_CTL_MASTER_EN;
+	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+	/* Disable master interrupt with the last function interrupt */
+	if (!(reg & 0xFE))
+		reg = 0;
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	if (fn == NULL) {
+		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	/* register and unmask irq */
+	if (sd->func[2]) {
+		sdio_claim_host(sd->func[2]);
+		sdio_claim_irq(sd->func[2], IRQHandlerF2);
+		sdio_release_host(sd->func[2]);
+	}
+
+	if (sd->func[1]) {
+		sdio_claim_host(sd->func[1]);
+		sdio_claim_irq(sd->func[1], IRQHandler);
+		sdio_release_host(sd->func[1]);
+	}
+#elif defined(HW_OOB)
+	sdioh_enable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+	if (sd->func[1]) {
+		/* register and unmask irq */
+		sdio_claim_host(sd->func[1]);
+		sdio_release_irq(sd->func[1]);
+		sdio_release_host(sd->func[1]);
+	}
+
+	if (sd->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(sd->func[2]);
+		sdio_release_irq(sd->func[2]);
+		/* Release host controller F2 */
+		sdio_release_host(sd->func[2]);
+	}
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+	sdioh_disable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel", IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE, 0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints", 	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG, 	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode", 	IOV_SDMODE, 	0,	IOVT_UINT32,	100},
+	{"sd_highspeed", IOV_HISPEED,	0,	IOVT_UINT32,	0 },
+	{"sd_rxchain",  IOV_RXCHAIN,    0, 	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                           void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+	BCM_REFERENCE(bool_val);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		si->client_block_size[func] = blksize;
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RXCHAIN):
+		int_val = (int32)si->use_rxchain;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+		else if (sd_ptr->offset & 2)
+			int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+		else
+			int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = 0;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+	SDIOH_API_RC status;
+	uint8 data;
+
+	if (enable)
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+	else
+		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+	return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int err_ret = 0;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+
+	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	if(rw) { /* CMD52 Write */
+		if (func == 0) {
+			/* Can only directly write to some F0 registers.  Handle F2 enable
+			 * as a special case.
+			 */
+			if (regaddr == SDIOD_CCCR_IOEN) {
+				if (sd->func[2]) {
+					sdio_claim_host(sd->func[2]);
+					if (*byte & SDIO_FUNC_ENABLE_2) {
+						/* Enable Function 2 */
+						err_ret = sdio_enable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+								err_ret));
+						}
+					} else {
+						/* Disable Function 2 */
+						err_ret = sdio_disable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+								err_ret));
+						}
+					}
+					sdio_release_host(sd->func[2]);
+				}
+			}
+#if defined(MMC_SDIO_ABORT)
+			/* to allow abort command through F1 */
+			else if (regaddr == SDIOD_CCCR_IOABORT) {
+				while (sdio_abort_retry--) {
+					if (sd->func[func]) {
+						sdio_claim_host(sd->func[func]);
+						/*
+						 * this sdio_f0_writeb() can be replaced with
+						 * another api depending upon MMC driver change.
+						 * As of this time, this is temporaray one
+						 */
+						sdio_writeb(sd->func[func],
+							*byte, regaddr, &err_ret);
+						sdio_release_host(sd->func[func]);
+					}
+					if (!err_ret)
+						break;
+				}
+			}
+#endif /* MMC_SDIO_ABORT */
+			else if (regaddr < 0xF0) {
+				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+			} else {
+				/* Claim host controller, perform F0 write, and release */
+				if (sd->func[func]) {
+					sdio_claim_host(sd->func[func]);
+					sdio_f0_writeb(sd->func[func],
+						*byte, regaddr, &err_ret);
+					sdio_release_host(sd->func[func]);
+				}
+			}
+		} else {
+			/* Claim host controller, perform Fn write, and release */
+			if (sd->func[func]) {
+				sdio_claim_host(sd->func[func]);
+				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(sd->func[func]);
+			}
+		}
+	} else { /* CMD52 Read */
+		/* Claim host controller, perform Fn read, and release */
+		if (sd->func[func]) {
+			sdio_claim_host(sd->func[func]);
+			if (func == 0) {
+				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
+			} else {
+				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
+			}
+			sdio_release_host(sd->func[func]);
+		}
+	}
+
+	if (err_ret) {
+		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
+		} else {
+			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+		}
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                                   uint32 *word, uint nbytes)
+{
+	int err_ret = SDIOH_API_RC_FAIL;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+
+	if (func == 0) {
+		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Claim host controller */
+	sdio_claim_host(sd->func[func]);
+
+	if(rw) { /* CMD52 Write */
+		if (nbytes == 4) {
+			sdio_writel(sd->func[func], *word, addr, &err_ret);
+		} else if (nbytes == 2) {
+			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	} else { /* CMD52 Read */
+		if (nbytes == 4) {
+			*word = sdio_readl(sd->func[func], addr, &err_ret);
+		} else if (nbytes == 2) {
+			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	}
+
+	/* Release host controller */
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret) {
+#if defined(MMC_SDIO_ABORT)
+		/* Any error on CMD53 transaction should abort that function using function 0. */
+		while (sdio_abort_retry--) {
+			if (sd->func[0]) {
+				sdio_claim_host(sd->func[0]);
+				/*
+				 * this sdio_f0_writeb() can be replaced with another api
+				 * depending upon MMC driver change.
+				 * As of this time, this is temporaray one
+				 */
+				sdio_writeb(sd->func[0],
+					func, SDIOD_CCCR_IOABORT, &err_ret);
+				sdio_release_host(sd->func[0]);
+			}
+			if (!err_ret)
+				break;
+		}
+		if (err_ret)
+#endif /* MMC_SDIO_ABORT */
+		{
+			sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+				rw ? "Write" : "Read", err_ret));
+		}
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, void *pkt)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+	void *pnext;
+	uint ttl_len, pkt_offset;
+	uint blk_num;
+	uint blk_size;
+	uint max_blk_count;
+	uint max_req_size;
+	struct mmc_request mmc_req;
+	struct mmc_command mmc_cmd;
+	struct mmc_data mmc_dat;
+	uint32 sg_count;
+	struct sdio_func *sdio_func = sd->func[func];
+	struct mmc_host *host = sdio_func->card->host;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(pkt);
+	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	blk_size = sd->client_block_size[func];
+	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
+
+	pkt_offset = 0;
+	pnext = pkt;
+
+	while (pnext != NULL) {
+		ttl_len = 0;
+		sg_count = 0;
+		memset(&mmc_req, 0, sizeof(struct mmc_request));
+		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+		memset(&mmc_dat, 0, sizeof(struct mmc_data));
+		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
+
+		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
+		 * data we can transfer with one command 53. blocks per command is limited by
+		 * host max_req_size and 9-bit max block number. when the total length of this
+		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+		 * commands (each transfer is still block aligned)
+		 */
+		while (pnext != NULL && ttl_len < max_req_size) {
+			int pkt_len;
+			int sg_data_size;
+			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
+
+			ASSERT(pdata != NULL);
+			pkt_len = PKTLEN(sd->osh, pnext);
+			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+			/* sg_count is unlikely larger than the array size, and this is
+			 * NOT something we can handle here, but in case it happens, PLEASE put
+			 * a restriction on max tx/glom count (based on host->max_segs).
+			 */
+			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+				sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__));
+				return (SDIOH_API_RC_FAIL);
+			}
+			pdata += pkt_offset;
+
+			sg_data_size = pkt_len - pkt_offset;
+			if (sg_data_size > max_req_size - ttl_len)
+				sg_data_size = max_req_size - ttl_len;
+			/* some platforms put a restriction on the data size of each scatter-gather
+			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+			 * max_seg_size
+			 */
+			if (sg_data_size > host->max_seg_size)
+				sg_data_size = host->max_seg_size;
+			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+			ttl_len += sg_data_size;
+			pkt_offset += sg_data_size;
+			if (pkt_offset == pkt_len) {
+				pnext = PKTNEXT(sd->osh, pnext);
+				pkt_offset = 0;
+			}
+		}
+
+		if (ttl_len % blk_size != 0) {
+			sd_err(("%s, data length %d not aligned to block size %d\n",
+				__FUNCTION__,  ttl_len, blk_size));
+			return SDIOH_API_RC_FAIL;
+		}
+		blk_num = ttl_len / blk_size;
+		mmc_dat.sg = sd->sg_list;
+		mmc_dat.sg_len = sg_count;
+		mmc_dat.blksz = blk_size;
+		mmc_dat.blocks = blk_num;
+		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+		mmc_cmd.arg = write ? 1<<31 : 0;
+		mmc_cmd.arg |= (func & 0x7) << 28;
+		mmc_cmd.arg |= 1<<27;
+		mmc_cmd.arg |= fifo ? 0 : 1<<26;
+		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+		mmc_cmd.arg |= blk_num & 0x1FF;
+		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+		mmc_req.cmd = &mmc_cmd;
+		mmc_req.data = &mmc_dat;
+		if (!fifo)
+			addr += ttl_len;
+
+		sdio_claim_host(sdio_func);
+		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+		mmc_wait_for_req(host, &mmc_req);
+		sdio_release_host(sdio_func);
+
+		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+		if (0 != err_ret) {
+			sd_err(("%s:CMD53 %s failed with code %d\n",
+				__FUNCTION__, write ? "write" : "read", err_ret));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, uint8 *buf, uint len)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(buf);
+
+	/* NOTE:
+	 * For all writes, each packet length is aligned to 32 (or 4)
+	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+	 * is aligned to block boundary. If you want to align each packet to
+	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+	 *
+	 * For reads, the alignment is doen in sdioh_request_buffer.
+	 *
+	 */
+	sdio_claim_host(sd->func[func]);
+
+	if ((write) && (!fifo))
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (write)
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (fifo)
+		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+	else
+		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
+
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret)
+		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
+	else
+		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+			(write) ? "TX" : "RX", buf, addr, len));
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
+ * then all the packets in the chain must be properly aligned.  If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
+{
+	SDIOH_API_RC status;
+	void *tmppkt;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	if (pkt) {
+		/* packet chain, only used for tx/rx glom, all packets length
+		 * are aligned, total length is a block multiple
+		 */
+		if (PKTNEXT(sd->osh, pkt))
+			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
+
+		/* non-glom mode, ignore the buffer parameter and use the packet pointer
+		 * (this shouldn't happen)
+		 */
+		buffer = PKTDATA(sd->osh, pkt);
+		buf_len = PKTLEN(sd->osh, pkt);
+	}
+
+	ASSERT(buffer);
+
+	/* buffer and length are aligned, use it directly so we can avoid memory copy */
+	if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
+		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
+
+	sd_err(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+		__FUNCTION__, write, buffer, buf_len));
+
+	/* otherwise, a memory copy is needed as the input buffer is not aligned */
+	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+	if (tmppkt == NULL) {
+		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (write)
+		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+	if (!write)
+		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+	return status;
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+	char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+	/* issue abort cmd52 command through F1 */
+	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp = 0;
+
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		*data = temp;
+		*data &= 0xff;
+		sd_data(("%s: byte read data=0x%02x\n",
+		         __FUNCTION__, *data));
+	} else {
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+		if (regsize == 2)
+			*data &= 0xffff;
+
+		sd_data(("%s: word read data=0x%08x\n",
+		         __FUNCTION__, *data));
+	}
+
+	return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd = sdio_get_drvdata(func);
+
+	ASSERT(sd != NULL);
+	sdio_release_host(sd->func[0]);
+
+	if (sd->use_client_ints) {
+		sd->intrcount++;
+		ASSERT(sd->intr_handler);
+		ASSERT(sd->intr_handler_arg);
+		(sd->intr_handler)(sd->intr_handler_arg);
+	} else {
+		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+	}
+
+	sdio_claim_host(sd->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp;
+
+		temp = data & 0xff;
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		sd_data(("%s: byte write data=0x%02x\n",
+		         __FUNCTION__, data));
+	} else {
+		if (regsize == 2)
+			data &= 0xffff;
+
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+		sd_data(("%s: word write data=0x%08x\n",
+		         __FUNCTION__, data));
+	}
+
+	return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	int ret;
+
+	if (!sd) {
+		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
+		return (0);
+	}
+
+	/* Need to do this stages as we can't enable the interrupt till
+		downloading of the firmware is complete, other wise polling
+		sdio access will come in way
+	*/
+	if (sd->func[0]) {
+			if (stage == 0) {
+		/* Since the power to the chip is killed, we will have
+			re enumerate the device again. Set the block size
+			and enable the fucntion 1 for in preparation for
+			downloading the code
+		*/
+		/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+		   2.6.27. The implementation prior to that is buggy, and needs broadcom's
+		   patch for it
+		*/
+		/*
+		if ((ret = sdio_reset_comm(sd->func[0]->card))) {
+			sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		else {
+		*/
+			sd->num_funcs = 2;
+			sd->sd_blockmode = TRUE;
+			sd->use_client_ints = TRUE;
+			sd->client_block_size[0] = 64;
+
+			if (sd->func[1]) {
+				/* Claim host controller */
+				sdio_claim_host(sd->func[1]);
+
+				sd->client_block_size[1] = 64;
+				ret = sdio_set_block_size(sd->func[1], 64);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+						"blocksize(%d)\n", ret));
+				}
+
+				/* Release host controller F1 */
+				sdio_release_host(sd->func[1]);
+			}
+
+			if (sd->func[2]) {
+				/* Claim host controller F2 */
+				sdio_claim_host(sd->func[2]);
+
+				sd->client_block_size[2] = sd_f2_blocksize;
+				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
+				}
+
+				/* Release host controller F2 */
+				sdio_release_host(sd->func[2]);
+			}
+
+			sdioh_sdmmc_card_enablefuncs(sd);
+			/*	}*/
+		} else {
+#if !defined(OOB_INTR_ONLY)
+			sdio_claim_host(sd->func[0]);
+			if (sd->func[2])
+				sdio_claim_irq(sd->func[2], IRQHandlerF2);
+			if (sd->func[1])
+				sdio_claim_irq(sd->func[1], IRQHandler);
+			sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+			sdioh_enable_func_intr(sd);
+#endif
+			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+		}
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+
+	return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	/* MSM7201A Android sdio stack has bug with interrupt
+		So internaly within SDIO stack they are polling
+		which cause issue when device is turned off. So
+		unregister interrupt with SDIO stack to stop the
+		polling
+	*/
+	if (sd->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+		sdio_claim_host(sd->func[0]);
+		if (sd->func[1])
+			sdio_release_irq(sd->func[1]);
+		if (sd->func[2])
+			sdio_release_irq(sd->func[2]);
+		sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+		sdioh_disable_func_intr(sd);
+#endif
+		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+	return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return (1);
+}
+
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+	return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+	return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 0000000..0a991c3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,420 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 434777 2013-11-07 09:30:27Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdhci.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM		0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT	0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB	0x0492	/* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325	0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319	0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330	0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334    0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43340)
+#define SDIO_DEVICE_ID_BROADCOM_43340    0xa94d
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43340) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4335)
+#define SDIO_DEVICE_ID_BROADCOM_4335    0x4335
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4335) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324    0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239    43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4354)
+#define SDIO_DEVICE_ID_BROADCOM_4354    0x4354
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4354) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43430)
+#define SDIO_DEVICE_ID_BROADCOM_43430    0xa9a6
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43430) */
+
+
+extern void wl_cfg80211_set_parent_dev(void *dev);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+static struct sdio_func * gfunc = NULL;
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern volatile bool dhd_mmc_suspend;
+
+
+
+int bcmsdh_sdmmc_set_power(int on)
+{
+	static struct sdio_func *sdio_func;
+	struct sdhci_host *host;
+
+	if (gfunc) {
+		sdio_func = gfunc;
+
+		host = (struct sdhci_host *)sdio_func->card->host;
+
+		if (on)
+			mmc_power_restore_host(sdio_func->card->host);
+		else
+			mmc_power_save_host(sdio_func->card->host);
+	}
+	return 0;
+}
+
+static int sdioh_probe(struct sdio_func *func)
+{
+	int host_idx = func->card->host->index;
+	uint32 rca = func->card->rca;
+	wifi_adapter_info_t *adapter;
+	osl_t *osh = NULL;
+	sdioh_info_t *sdioh = NULL;
+
+	sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+	adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+	if (adapter  != NULL)
+		sd_err(("found adapter info '%s'\n", adapter->name));
+	else
+		sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+	wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+	 /* allocate SDIO Host Controller state info */
+	 osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+	 if (osh == NULL) {
+		 sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 osl_static_mem_init(osh, adapter);
+	 sdioh = sdioh_attach(osh, func);
+	 if (sdioh == NULL) {
+		 sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+	 if (sdioh->bcmsdh == NULL) {
+		 sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+
+	sdio_set_drvdata(func, sdioh);
+	return 0;
+
+fail:
+	if (sdioh != NULL)
+		sdioh_detach(osh, sdioh);
+	if (osh != NULL)
+		osl_detach(osh);
+	return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+	sdioh_info_t *sdioh;
+	osl_t *osh;
+
+	sdioh = sdio_get_drvdata(func);
+	if (sdioh == NULL) {
+		sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+		return;
+	}
+
+	osh = sdioh->osh;
+	bcmsdh_remove(sdioh->bcmsdh);
+	sdioh_detach(osh, sdioh);
+	osl_detach(osh);
+}
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	int ret = 0;
+
+	if (func == NULL)
+		return -EINVAL;
+
+	sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	/* 4318 doesn't have function 2 */
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) {
+		gfunc = func;
+		ret = sdioh_probe(func);
+	}
+	return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+	if (func == NULL) {
+		sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+		return;
+	}
+
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+		sdioh_remove(func);
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354) },
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+	int err;
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+	mmc_pm_flag_t sdio_flags;
+
+	sd_err(("%s Enter\n", __FUNCTION__));
+	if (func->num != 2)
+		return 0;
+
+	sdioh = sdio_get_drvdata(func);
+	err = bcmsdh_suspend(sdioh->bcmsdh);
+	if (err)
+		return err;
+
+	sdio_flags = sdio_get_host_pm_caps(func);
+	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+		sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+		return  -EINVAL;
+	}
+
+	/* keep power while host suspended */
+	err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+	if (err) {
+		sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+		return err;
+	}
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE);
+#endif 
+	dhd_mmc_suspend = TRUE;
+	smp_mb();
+
+	return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+	struct mmc_host *host;
+
+	sd_err(("%s Enter\n", __FUNCTION__));
+	if (func->num != 2)
+		return 0;
+
+	host = func->card->host;
+	host->pm_flags &= ~MMC_PM_KEEP_POWER;
+
+	sdioh = sdio_get_drvdata(func);
+	dhd_mmc_suspend = FALSE;
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_resume(sdioh->bcmsdh);
+#endif 
+
+	smp_mb();
+	return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+	.suspend	= bcmsdh_sdmmc_suspend,
+	.resume		= bcmsdh_sdmmc_resume,
+};
+#endif  /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	if (func && (func->num != 2)) {
+		return 0;
+	}
+
+	if (notify_semaphore)
+		up(notify_semaphore);
+	return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+	.probe		= dummy_probe,
+	.remove		= dummy_remove,
+	.name		= "dummy_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+	};
+
+int sdio_func_reg_notify(void* semaphore)
+{
+	notify_semaphore = semaphore;
+	return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+	OSL_SLEEP(15);
+	sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+	.probe		= bcmsdh_sdmmc_probe,
+	.remove		= bcmsdh_sdmmc_remove,
+	.name		= "bcmsdh_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+	.drv = {
+	.pm	= &bcmsdh_sdmmc_pm_ops,
+	},
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+	};
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+};
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	if (!sd)
+		return BCME_BADARG;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+	int error = 0;
+	error = sdio_function_init();
+	return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+	sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+	return sdio_register_driver(&bcmsdh_sdmmc_driver);
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+	sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
new file mode 100644
index 0000000..0f15102
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c
@@ -0,0 +1,249 @@
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi_linux.c 406045 2013-06-05 22:09:52Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>		/* to get msglevel bit values */
+
+#include <pcicfg.h>
+#include <sdio.h>		/* SDIO Device and Protocol Specs */
+#include <linux/sched.h>	/* request_irq(), free_irq() */
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE()	(!in_atomic())
+#else
+#define BLOCKABLE()	(!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+	sdioh_info_t *sd;
+	struct sdos_info *sdos;
+	bool ours;
+
+	sd = (sdioh_info_t *)dev_id;
+	sd->local_intrcount++;
+
+	if (!sd->card_init_done) {
+		sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+		return IRQ_RETVAL(FALSE);
+	} else {
+		ours = spi_check_client_intr(sd, NULL);
+
+		/* For local interrupts, wake the waiting process */
+		if (ours && sd->got_hcint) {
+			sdos = (struct sdos_info *)sd->sdos_info;
+			wake_up_interruptible(&sdos->intr_wait_queue);
+		}
+
+		return IRQ_RETVAL(ours);
+	}
+}
+
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+	sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+	if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+		sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+		return ERROR;
+	}
+	return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+	free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+	REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+	sd->sdos_info = (void*)sdos;
+	if (sdos == NULL)
+		return BCME_NOMEM;
+
+	sdos->sd = sd;
+	spin_lock_init(&sdos->lock);
+	init_waitqueue_head(&sdos->intr_wait_queue);
+	return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+	struct sdos_info *sdos;
+	ASSERT(sd && sd->sdos_info);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	if (!(sd->host_init_done && sd->card_init_done)) {
+		sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+		sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	/* Ensure atomicity for enable/disable calls */
+	spin_lock_irqsave(&sdos->lock, flags);
+
+	sd->client_intr_enabled = enable;
+	if (enable && !sd->lockcount)
+		spi_devintr_on(sd);
+	else
+		spi_devintr_off(sd);
+
+	spin_unlock_irqrestore(&sdos->lock, flags);
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (sd->lockcount) {
+		sd_err(("%s: Already locked!\n", __FUNCTION__));
+		ASSERT(sd->lockcount == 0);
+	}
+	spi_devintr_off(sd);
+	sd->lockcount++;
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+	ulong flags;
+	struct sdos_info *sdos;
+
+	sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+	ASSERT(sd->lockcount > 0);
+
+	sdos = (struct sdos_info *)sd->sdos_info;
+	ASSERT(sdos);
+
+	spin_lock_irqsave(&sdos->lock, flags);
+	if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+		spi_devintr_on(sd);
+	}
+	spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+#ifndef BCMSDYIELD
+	ASSERT(!yield);
+#endif
+	sd_trace(("%s: yield %d canblock %d\n",
+	          __FUNCTION__, yield, BLOCKABLE()));
+
+	/* Clear the "interrupt happened" flag and last intrstatus */
+	sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+	if (yield && BLOCKABLE()) {
+		struct sdos_info *sdos;
+		sdos = (struct sdos_info *)sd->sdos_info;
+		/* Wait for the indication, the interrupt will be masked when the ISR fires. */
+		wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+	} else
+#endif /* BCMSDYIELD */
+	{
+		spi_spinbits(sd);
+	}
+
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmspibrcm.c b/drivers/net/wireless/bcmdhd/bcmspibrcm.c
new file mode 100644
index 0000000..f0a6102
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmspibrcm.c
@@ -0,0 +1,1810 @@
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspibrcm.c 373331 2012-12-07 04:46:22Z $
+ */
+
+#define HSMODE
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h>	/* SDIO device core hardware definitions. */
+#include <spid.h>
+
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+
+#include <pcicfg.h>
+
+
+#include <bcmspibrcm.h>
+#include <bcmspi.h>
+
+/* these are for the older cores... for newer cores we have control for each of them */
+#define F0_RESPONSE_DELAY	16
+#define F1_RESPONSE_DELAY	16
+#define F2_RESPONSE_DELAY	F0_RESPONSE_DELAY
+
+
+#define GSPI_F0_RESP_DELAY		0
+#define GSPI_F1_RESP_DELAY		F1_RESPONSE_DELAY
+#define GSPI_F2_RESP_DELAY		0
+#define GSPI_F3_RESP_DELAY		0
+
+#define CMDLEN		4
+
+#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE)
+
+/* Globals */
+#if defined(DHD_DEBUG)
+uint sd_msglevel = SDH_ERROR_VAL;
+#else
+uint sd_msglevel = 0;
+#endif 
+
+uint sd_hiok = FALSE;		/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI;		/* Use SD4 mode by default */
+uint sd_f2_blocksize = 64;		/* Default blocksize */
+
+
+uint sd_divisor = 2;
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_crc = 0;		/* Default to SPI CRC Check turned OFF */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+
+uint8	spi_outbuf[SPI_MAX_PKT_LEN];
+uint8	spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN	128
+uint8	spi_outbuf2[BUF2_PKT_LEN];
+uint8	spi_inbuf2[BUF2_PKT_LEN];
+
+#define SPISWAP_WD4(x) bcmswap32(x);
+#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \
+						(bcmswap16((x & 0xffff0000) >> 16) << 16);
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                           uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+                                 uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (spi_osinit(sd) != 0) {
+		sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->bar0 = bar0;
+	sd->irq = irq;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->use_client_ints = TRUE;
+	sd->sd_use_dma = FALSE;	/* DMA Not supported */
+
+	/* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+	 * mode
+	 */
+	sd->wordlen = 2;
+
+
+	if (!spi_hw_attach(sd)) {
+		sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (bcmspi_driver_init(sd) != SUCCESS) {
+		sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (spi_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd) {
+		sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+		spi_free_irq(sd->irq, sd);
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return 0;
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_query_device(sdioh_info_t *sd)
+{
+	/* Return a BRCM ID appropriate to the dongle class */
+	return (sd->num_funcs > 1) ? BCM4329_D11N_ID : BCM4318_D11G_ID;
+}
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+	return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+	sd->chip = chip;
+	sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+	uint8 reg = 0;
+	int status;
+
+	if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+
+	if (set) {
+		reg |= DWORD_PKT_LEN_EN;
+		sd->dwordmode = TRUE;
+		sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+	} else {
+		reg &= ~DWORD_PKT_LEN_EN;
+		sd->dwordmode = FALSE;
+		sd->client_block_size[SPI_FUNC_2] = 2048;
+	}
+
+	if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+}
+
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_SPIERRSTATS,
+	IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+	{"spi_respdelay",	IOV_RESP_DELAY_ALL,	0,	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+/*
+	sdioh_regs_t *regs;
+*/
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!spi_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("%s: set clock failed\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+
+		if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+			sd_err(("%s: Failed changing highspeed mode to %d.\n",
+			        __FUNCTION__, sd_hiok));
+			bcmerror = BCME_ERROR;
+			return ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	case IOV_GVAL(IOV_SPIERRSTATS):
+	{
+		bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SPIERRSTATS):
+	{
+		bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_GVAL(IOV_RESP_DELAY_ALL):
+		int_val = (int32)si->resp_delay_all;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_RESP_DELAY_ALL):
+		si->resp_delay_all = (bool)int_val;
+		int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+		if (si->resp_delay_all)
+			int_val |= RESP_DELAY_ALL;
+		else {
+			if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+			     F1_RESPONSE_DELAY) != SUCCESS) {
+				sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				break;
+			}
+		}
+
+		if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+		     != SUCCESS) {
+			sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+
+	if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+		uint8 dummy_data;
+		status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+		if (status) {
+			sd_err(("sdioh_cfg_read() failed.\n"));
+			return status;
+		}
+	}
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 cis_byte;
+	uint16 *cis = (uint16 *)cisd;
+	uint bar0 = SI_ENUM_BASE;
+	int status;
+	uint8 data;
+
+	sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+	spi_lock(sd);
+
+	/* Set sb window address to 0x18000000 */
+	data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+	status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+	if (status == SUCCESS) {
+		data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+	if (status == SUCCESS) {
+		data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+
+	offset =  CC_SROM_OTP; /* OTP offset in chipcommon. */
+	for (count = 0; count < length/2; count++) {
+		if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			spi_unlock(sd);
+			return (BCME_ERROR);
+		}
+
+		*cis = (uint16)cis_byte;
+		cis++;
+		offset += 2;
+	}
+
+	spi_unlock(sd);
+
+	return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	spi_lock(sd);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	if (rw == SDIOH_READ) {
+		sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n",
+		          __FUNCTION__, cmd_arg, func, regaddr));
+	} else {
+		sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+		          __FUNCTION__, cmd_arg, func, regaddr, data));
+	}
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) {
+		spi_unlock(sd);
+		return status;
+	}
+
+	if (rw == SDIOH_READ) {
+		*byte = (uint8)data;
+		sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte));
+	}
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus=0x%x\n", dstatus));
+
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+
+	spi_lock(sd);
+
+	if (rw == SDIOH_READ)
+		status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+	else
+		status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+	spi_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+	spi_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks. */
+	while (buflen > 0) {
+		len = MIN(sd->client_block_size[func], buflen);
+		if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			sd_err(("%s: bcmspi_card_buf %s failed\n",
+				__FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+	uint32 cmd_arg;
+	uint32 datalen = 1;
+	uint32 hostlen;
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg);
+		if (datalen & 0x1)
+			datalen++;
+	} else {
+		sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer  */
+	if (datalen != 0) {
+			if (sd->wordlen == 4) { /* 32bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte);
+			} else if (sd->wordlen == 2) { /* 16bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte);
+			}
+	}
+
+	/* +4 for cmd, +4 for dstatus */
+	hostlen = datalen + 8;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+	if (sd->resp_delay_all == FALSE)
+		return (BCME_OK);
+
+	if (sd->prev_fun == func)
+		return (BCME_OK);
+
+	if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+		return (BCME_OK);
+
+	bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+	/* Remember function for which to avoid reprogramming resp-delay in next iteration */
+	sd->prev_fun = func;
+
+	return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN	0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+	uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	*(uint32 *)spi_outbuf2 = cmd_arg;
+
+	/* for Write, put the data into the output buffer  */
+	*(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+	/* +4 for cmd, +4 for dstatus */
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+	uint32 dstatus = sd->card_dstatus;
+	struct spierrstats_t *spierrstats = &sd->spierrstats;
+	int err = SUCCESS;
+
+	sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+	/* Store dstatus of last few gSPI transactions */
+	spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+	spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+	dstatus_count++;
+
+	if (sd->card_init_done == FALSE)
+		return err;
+
+	if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+		spierrstats->dna++;
+		sd_trace(("Read data not available on F1 addr = 0x%x\n",
+		        GFIELD(cmd_arg, SPI_REG_ADDR)));
+		/* Clear dna bit */
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+	}
+
+	if (dstatus & STATUS_UNDERFLOW) {
+		spierrstats->rdunderflow++;
+		sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+	}
+
+	if (dstatus & STATUS_OVERFLOW) {
+		spierrstats->wroverflow++;
+		sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+		bcmspi_resync_f1(sd);
+		sd_err(("Recovering from F1 FIFO overflow.\n"));
+	}
+
+	if (dstatus & STATUS_F2_INTR) {
+		spierrstats->f2interrupt++;
+		sd_trace(("Interrupt from F2.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_F3_INTR) {
+		spierrstats->f3interrupt++;
+		sd_err(("Interrupt from F3.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+		spierrstats->hostcmddataerr++;
+		sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+	}
+
+	if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+		spierrstats->f2pktavailable++;
+		sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+		sd_trace(("Packet length = %d\n", sd->dwordmode ?
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+	}
+
+	if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+		spierrstats->f3pktavailable++;
+		sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+		sd_err(("Packet length = %d\n",
+		        (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+	}
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+	return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+	/* Default power on mode */
+	sd->sd_mode = SDIOH_MODE_SPI;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = 1;
+
+	return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+	uint32 regdata[2];
+	int status;
+
+	/* Find F1/F2/F3 max packet size */
+	if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+	                                 8, regdata)) != SUCCESS) {
+		return status;
+	}
+
+	sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+	        regdata[0], regdata[1]));
+
+	sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+	ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+	sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+	ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+	sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+	ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+	return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+	uint32	status_en_reg = 0;
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifdef HSMODE
+	if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#else
+	/* Start at ~400KHz clock rate for initialization */
+	if (!spi_start_clock(sd, 128)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	if (!bcmspi_host_device_init_adapt(sd)) {
+		sd_err(("bcmspi_host_device_init_adapt failed\n"));
+		return ERROR;
+	}
+
+	if (!bcmspi_test_card(sd)) {
+		sd_err(("bcmspi_test_card failed\n"));
+		return ERROR;
+	}
+
+	sd->num_funcs = SPI_MAX_IOFUNCS;
+
+	get_client_blocksize(sd);
+
+	/* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+	bcmspi_resync_f1(sd);
+
+	sd->dwordmode = FALSE;
+
+	bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+	sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+	status_en_reg |= INTR_WITH_STATUS;
+
+	if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+	    status_en_reg & 0xff) != SUCCESS) {
+		sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+		return ERROR;
+	}
+
+#ifndef HSMODE
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!spi_start_clock(sd, 4)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	/* check to see if the response delay needs to be programmed properly */
+	{
+		uint32 f1_respdelay = 0;
+		bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay);
+		if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) {
+			/* older sdiodevice core and has no separte resp delay for each of */
+			sd_err(("older corerev < 4 so use the same resp delay for all funcs\n"));
+			sd->resp_delay_new = FALSE;
+		}
+		else {
+			/* older sdiodevice core and has no separte resp delay for each of */
+			int ret_val;
+			sd->resp_delay_new = TRUE;
+			sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n"));
+			sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n",
+				GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY,
+				GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY));
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1,
+				GSPI_F0_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__));
+				return ERROR;
+			}
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1,
+				GSPI_F1_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__));
+				return ERROR;
+			}
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1,
+				GSPI_F2_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+				return ERROR;
+			}
+			ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1,
+				GSPI_F3_RESP_DELAY);
+			if (ret_val != SUCCESS) {
+				sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+				return ERROR;
+			}
+		}
+	}
+
+
+	sd->card_init_done = TRUE;
+
+	/* get the device rev to program the prop respdelays */
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+	                                 4, &regdata)) != SUCCESS)
+		return status;
+
+	sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+
+	if (hsmode == TRUE) {
+		sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			sd_trace(("Device is already in High-Speed mode.\n"));
+			return status;
+		} else {
+			regdata |= HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS) {
+				return status;
+			}
+		}
+	} else {
+		sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			regdata &= ~HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS)
+				return status;
+		}
+		 else {
+			sd_trace(("Device is already in Low-Speed mode.\n"));
+			return status;
+		}
+	}
+	spi_controller_highspeed_mode(sd, hsmode);
+
+	return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+	sd->wordlen = 2; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd->wordlen = 4; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd_trace(("Silicon testability issue: regdata = 0x%x." \
+		" Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \
+	OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP		100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+	uint32 wrregdata, regdata = 0;
+	int status;
+	int i;
+
+	/* Due to a silicon testability issue, the first command from the Host
+	 * to the device will get corrupted (first bit will be lost). So the
+	 * Host should poll the device with a safe read request. ie: The Host
+	 * should try to read F0 addr 0x14 using the Fixed address mode
+	 * (This will prevent a unintended write command to be detected by device)
+	 */
+	for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+	/* If device was not power-cycled it will stay in 32bit mode with
+	 * response-delay-all bit set.  Alternate the iteration so that
+	 * read either with or without response-delay for F0 to succeed.
+	 */
+		bcmspi_find_curr_mode(sd);
+		sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = TRUE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = FALSE;
+	}
+
+	/* Bail out, device not detected */
+	if (i == INIT_ADAPT_LOOP)
+		return FALSE;
+
+	/* Softreset the spid logic */
+	if ((sd->dwordmode) || (sd->wordlen == 4)) {
+		bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+		bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, &regdata);
+		sd_trace(("reset reg read = 0x%x\n", regdata));
+		sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+		       sd->wordlen, sd->resp_delay_all));
+		/* Restore default state after softreset */
+		sd->wordlen = 2;
+		sd->dwordmode = FALSE;
+	}
+
+	if (sd->wordlen == 4) {
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) !=
+		     SUCCESS)
+				return FALSE;
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+			          regdata));
+			sd_trace(("Spid power was left on.\n"));
+		} else {
+			sd_err(("Spid power was left on but signature read failed."
+			        " Value read = 0x%x\n", regdata));
+			return FALSE;
+		}
+	} else {
+		sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT	0x00010430 /* according to the host m/c */
+
+		wrregdata = (CTRL_REG_DEFAULT);
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+		sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+		wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+		wrregdata &= ~HIGH_SPEED_MODE;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+		for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+			if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+				sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+				if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+				     &regdata)) != SUCCESS)
+					return FALSE;
+			}
+			OSL_DELAY(1000);
+		}
+
+		/* Change to host controller intr-polarity of active-low */
+		wrregdata &= ~INTR_POLARITY;
+		sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+		        wrregdata));
+		/* Change to 32bit mode */
+		wrregdata |= WORD_LENGTH_32;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+		/* Change command/data packaging in 32bit LE mode */
+		sd->wordlen = 4;
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+			sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+		} else {
+			sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+			  "0x%x\n", regdata));
+			return FALSE;
+		}
+	}
+
+
+	return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+		return FALSE;
+
+	if (regdata == (TEST_RO_DATA_32BIT_LE))
+		sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+	else {
+		sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+		return FALSE;
+	}
+
+
+#define RW_PATTERN1	0xA0A1A2A3
+#define RW_PATTERN2	0x4B5B6B7B
+
+	regdata = RW_PATTERN1;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN1) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN1, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	regdata = RW_PATTERN2;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN2) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN2, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((bcmspi_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (bcmspi_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);	/* Fixed access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+	sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+		return status;
+
+	sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data));
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	sd_trace(("dstatus =0x%x\n", dstatus));
+	return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, regsize, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus=0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+	          __FUNCTION__, cmd_arg, func, regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+	*dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                uint32 *data, uint32 datalen)
+{
+	uint32	i, j;
+	uint8	resp_delay = 0;
+	int	err = SUCCESS;
+	uint32	hostlen;
+	uint32 spilen = 0;
+	uint32 dstatus_idx = 0;
+	uint16 templen, buslen, len, *ptr = NULL;
+
+	sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+
+	if (DWORDMODE_ON) {
+		spilen = GFIELD(cmd_arg, SPI_LEN);
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) ||
+		    (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1))
+			dstatus_idx = spilen * 3;
+
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			spilen = spilen << 2;
+			dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0;
+			/* convert len to mod16 size */
+			spilen = ROUNDUP(spilen, 16);
+			cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+		}
+	}
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg);
+		if (datalen & 0x1)
+			datalen++;
+		if (datalen < 4)
+			datalen = ROUNDUP(datalen, 4);
+	} else {
+		sd_err(("Host is %d bit spid, could not create SPI command.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+		/* We send len field of hw-header always a mod16 size, both from host and dongle */
+		if (DWORDMODE_ON) {
+			if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				/* ASSERT(*ptr == ~*(ptr + 1)); */
+				templen = ROUNDUP(templen, 16);
+				*ptr = templen;
+				sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1))));
+			}
+		}
+
+		if (datalen != 0) {
+			for (i = 0; i < datalen/4; i++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						SPISWAP_WD4(data[i]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						SPISWAP_WD2(data[i]);
+				}
+			}
+		}
+	}
+
+	/* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+	if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) {
+		int func = GFIELD(cmd_arg, SPI_FUNCTION);
+		switch (func) {
+			case 0:
+				if (sd->resp_delay_new)
+					resp_delay = GSPI_F0_RESP_DELAY;
+				else
+					resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+				break;
+			case 1:
+				if (sd->resp_delay_new)
+					resp_delay = GSPI_F1_RESP_DELAY;
+				else
+					resp_delay = F1_RESPONSE_DELAY;
+				break;
+			case 2:
+				if (sd->resp_delay_new)
+					resp_delay = GSPI_F2_RESP_DELAY;
+				else
+					resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+				break;
+			default:
+				ASSERT(0);
+				break;
+		}
+		/* Program response delay */
+		if (sd->resp_delay_new == FALSE)
+			bcmspi_prog_resp_delay(sd, func, resp_delay);
+	}
+
+	/* +4 for cmd and +4 for dstatus */
+	hostlen = datalen + 8 + resp_delay;
+	hostlen += dstatus_idx;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+	/* for Read, get the data into the input buffer */
+	if (datalen != 0) {
+		if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+			for (j = 0; j < datalen/4; j++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				}
+			}
+
+			if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				buslen = len = ~(*(ptr + 1));
+				buslen = ROUNDUP(buslen, 16);
+				/* populate actual len in hw-header */
+				if (templen == buslen)
+					*ptr = len;
+			}
+		}
+	}
+
+	/* Restore back the len field of the hw header */
+	if (DWORDMODE_ON) {
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			ptr = (uint16 *)&data[0];
+			*ptr = (uint16)(~*(ptr+1));
+		}
+	}
+
+	dstatus_idx += (datalen + CMDLEN + resp_delay);
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else {
+		sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+	if (sd->card_dstatus == 0xffffffff) {
+		sd_err(("looks like not a GSPI device or device is not powered.\n"));
+	}
+
+	err = bcmspi_update_stats(sd, cmd_arg);
+
+	return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	bool write = rw == SDIOH_READ ? 0 : 1;
+	uint retries = 0;
+
+	bool enable;
+	uint32	spilen;
+
+	cmd_arg = 0;
+
+	ASSERT(nbytes);
+	ASSERT(nbytes <= sd->client_block_size[func]);
+
+	if (write) sd->t_cnt++; else sd->r_cnt++;
+
+	if (func == 2) {
+		/* Frame len check limited by gSPI. */
+		if ((nbytes > 2000) && write) {
+			sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+		}
+		/* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+		/* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+		if (write) {
+			uint32 dstatus;
+			/* check F2 ready with cached one */
+			bcmspi_cmd_getdstatus(sd, &dstatus);
+			if ((dstatus & STATUS_F2_RX_READY) == 0) {
+				retries = WAIT_F2RXFIFORDY;
+				enable = 0;
+				while (retries-- && !enable) {
+					OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+					bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+					                   &dstatus);
+					if (dstatus & STATUS_F2_RX_READY)
+						enable = TRUE;
+				}
+				if (!enable) {
+					struct spierrstats_t *spierrstats = &sd->spierrstats;
+					spierrstats->f2rxnotready++;
+					sd_err(("F2 FIFO is not ready to receive data.\n"));
+					return ERROR;
+				}
+				sd_trace(("No of retries on F2 ready %d\n",
+					(WAIT_F2RXFIFORDY - retries)));
+			}
+		}
+	}
+
+	/* F2 transfers happen on 0 addr */
+	addr = (func == 2) ? 0 : addr;
+
+	/* In pio mode buffer is read using fixed address fifo in func 1 */
+	if ((func == 1) && (fifo))
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+	spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+	if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+		/* convert len to mod4 size */
+		spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+	} else
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+	if ((func == 2) && (fifo == 1)) {
+		sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+		          __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+		          addr, nbytes, sd->r_cnt, sd->t_cnt));
+	}
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+	         addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+			(write ? "write" : "read")));
+		return status;
+	}
+
+	/* gSPI expects that hw-header-len is equal to spi-command-len */
+	if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+	}
+
+	if ((nbytes > 2000) && !write) {
+		sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+	}
+
+	return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	si->card_init_done = FALSE;
+	return bcmspi_client_init(si);
+}
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+	return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+	return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 0000000..1e94084
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,3053 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c 473326 2014-04-29 00:37:35Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+#if defined(BCMNVRAM)
+#include <siutils.h>
+#include <bcmnvram.h>
+#endif
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+
+void *_bcmutils_dummy_fn = NULL;
+
+
+
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	if (len < 0)
+		len = 4096;	/* "infinite" */
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(PKTDATA(osh, p) + offset, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(buf, PKTDATA(osh, p) + offset, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+	uint total;
+	int len;
+
+	total = 0;
+	for (; p; p = PKTNEXT(osh, p)) {
+		len = PKTLEN(osh, p);
+		total += len;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				total += PKTFRAGTOTLEN(osh, p);
+			}
+		}
+#endif
+	}
+
+	return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+	for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+		;
+
+	return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+	uint cnt;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				cnt += PKTFRAGTOTNUM(osh, p);
+			}
+		}
+#endif
+	}
+
+	return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+	uint cnt;
+	uint8 *pktdata;
+	uint len, remain, align64;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+		len = PKTLEN(osh, p);
+		if (len > 128) {
+			pktdata = (uint8 *)PKTDATA(osh, p);	/* starting address of data */
+			/* Check for page boundary straddle (2048B) */
+			if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+				cnt++;
+
+			align64 = (uint)((uintptr)pktdata & 0x3f);	/* aligned to 64B */
+			align64 = (64 - align64) & 0x3f;
+			len -= align64;		/* bytes from aligned 64B to end */
+			/* if aligned to 128B, check for MOD 128 between 1 to 4B */
+			remain = len % 128;
+			if (remain > 0 && remain <= 4)
+				cnt++;		/* add extra seg */
+		}
+	}
+
+	return cnt;
+}
+
+uint8 * BCMFASTPATH
+pktdataoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint pkt_off = 0, len = 0;
+	uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		pdata = (uint8 *) PKTDATA(osh, p);
+		pkt_off = offset - len;
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return (uint8*) (pdata+pkt_off);
+}
+
+
+/* given a offset in pdata, find the pkt seg hdr */
+void *
+pktoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint len = 0;
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return p;
+}
+
+#endif /* BCMDRIVER */
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+const unsigned char bcm_ctype[] = {
+
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
+	_BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+	_BCM_C,	/* 8-15 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,		/* 32-39 */
+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
+	_BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+	_BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
+	_BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+	_BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 128-143 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 144-159 */
+	_BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 160-175 */
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 176-191 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,	/* 192-207 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L,	/* 208-223 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,	/* 224-239 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+	ulong result, last_result = 0, value;
+	bool minus;
+
+	minus = FALSE;
+
+	while (bcm_isspace(*cp))
+		cp++;
+
+	if (cp[0] == '+')
+		cp++;
+	else if (cp[0] == '-') {
+		minus = TRUE;
+		cp++;
+	}
+
+	if (base == 0) {
+		if (cp[0] == '0') {
+			if ((cp[1] == 'x') || (cp[1] == 'X')) {
+				base = 16;
+				cp = &cp[2];
+			} else {
+				base = 8;
+				cp = &cp[1];
+			}
+		} else
+			base = 10;
+	} else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+		cp = &cp[2];
+	}
+
+	result = 0;
+
+	while (bcm_isxdigit(*cp) &&
+	       (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+		result = result*base + value;
+		/* Detected overflow */
+		if (result < last_result && !minus)
+			return (ulong)-1;
+		last_result = result;
+		cp++;
+	}
+
+	if (minus)
+		result = (ulong)(-(long)result);
+
+	if (endp)
+		*endp = DISCARD_QUAL(cp, char);
+
+	return (result);
+}
+
+int
+bcm_atoi(const char *s)
+{
+	return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+	int len, nlen;
+	int i;
+
+	if ((haystack == NULL) || (needle == NULL))
+		return DISCARD_QUAL(haystack, char);
+
+	nlen = (int)strlen(needle);
+	len = (int)strlen(haystack) - nlen + 1;
+
+	for (i = 0; i < len; i++)
+		if (memcmp(needle, &haystack[i], nlen) == 0)
+			return DISCARD_QUAL(&haystack[i], char);
+	return (NULL);
+}
+
+char *
+bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
+{
+	for (; s_len >= substr_len; s++, s_len--)
+		if (strncmp(s, substr, substr_len) == 0)
+			return DISCARD_QUAL(s, char);
+
+	return NULL;
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+	char *p;
+
+	p = dest + strlen(dest);
+
+	while ((*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+	char *endp;
+	char *p;
+
+	p = dest + strlen(dest);
+	endp = p + size;
+
+	while (p != endp && (*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+
+/****************************************************************************
+* Function:   bcmstrtok
+*
+* Purpose:
+*  Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+*  but allows strToken() to be used by different strings or callers at the same
+*  time. Each call modifies '*string' by substituting a NULL character for the
+*  first delimiter that is encountered, and updates 'string' to point to the char
+*  after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+*  string      (mod) Ptr to string ptr, updated by token.
+*  delimiters  (in)  Set of delimiter characters.
+*  tokdelim    (out) Character that delimits the returned token. (May
+*                    be set to NULL if token delimiter is not required).
+*
+* Returns:  Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+	unsigned char *str;
+	unsigned long map[8];
+	int count;
+	char *nextoken;
+
+	if (tokdelim != NULL) {
+		/* Prime the token delimiter */
+		*tokdelim = '\0';
+	}
+
+	/* Clear control map */
+	for (count = 0; count < 8; count++) {
+		map[count] = 0;
+	}
+
+	/* Set bits in delimiter table */
+	do {
+		map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+	}
+	while (*delimiters++);
+
+	str = (unsigned char*)*string;
+
+	/* Find beginning of token (skip over leading delimiters). Note that
+	 * there is no token iff this loop sets str to point to the terminal
+	 * null (*str == '\0')
+	 */
+	while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+		str++;
+	}
+
+	nextoken = (char*)str;
+
+	/* Find the end of the token. If it is not the end of the string,
+	 * put a null there.
+	 */
+	for (; *str; str++) {
+		if (map[*str >> 5] & (1 << (*str & 31))) {
+			if (tokdelim != NULL) {
+				*tokdelim = *str;
+			}
+
+			*str++ = '\0';
+			break;
+		}
+	}
+
+	*string = (char*)str;
+
+	/* Determine if a token has been found. */
+	if (nextoken == (char *) str) {
+		return NULL;
+	}
+	else {
+		return nextoken;
+	}
+}
+
+
+#define xToLower(C) \
+	((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function:   bcmstricmp
+*
+* Purpose:    Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+*             s2 (in) Second string to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+	char dc, sc;
+
+	while (*s2 && *s1) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+	}
+
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+
+/****************************************************************************
+* Function:   bcmstrnicmp
+*
+* Purpose:    Compare to strings case insensitively, upto a max of 'cnt'
+*             characters.
+*
+* Parameters: s1  (in) First string to compare.
+*             s2  (in) Second string to compare.
+*             cnt (in) Max characters to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+	char dc, sc;
+
+	while (*s2 && *s1 && cnt) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+		cnt--;
+	}
+
+	if (!cnt) return 0;
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+	int i = 0;
+	char *ep;
+
+	for (;;) {
+		ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+		p = ep;
+		if (!*p++ || i == 6)
+			break;
+	}
+
+	return (i == 6);
+}
+
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
+
+	int i = 0;
+	char *c;
+	for (;;) {
+		ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+		if (*c++ != '.' || i == IPV4_ADDR_LEN)
+			break;
+		p = c;
+	}
+	return (i == IPV4_ADDR_LEN);
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+	ulong copyct = 1;
+	ushort i;
+
+	if (abuflen == 0)
+		return 0;
+
+	/* wbuflen is in bytes */
+	wbuflen /= sizeof(ushort);
+
+	for (i = 0; i < wbuflen; ++i) {
+		if (--abuflen == 0)
+			break;
+		*abuf++ = (char) *wbuf++;
+		++copyct;
+	}
+	*abuf = '\0';
+
+	return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+	static const char hex[] =
+	  {
+		  '0', '1', '2', '3', '4', '5', '6', '7',
+		  '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+	  };
+	const uint8 *octet = ea->octet;
+	char *p = buf;
+	int i;
+
+	for (i = 0; i < 6; i++, octet++) {
+		*p++ = hex[(*octet >> 4) & 0xf];
+		*p++ = hex[*octet & 0xf];
+		*p++ = ':';
+	}
+
+	*(p-1) = '\0';
+
+	return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+	snprintf(buf, 16, "%d.%d.%d.%d",
+	         ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+	return (buf);
+}
+
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+	/* Implementing RFC 5952 Sections 4 + 5 */
+	/* Not thoroughly tested */
+	uint16 tmp[8];
+	uint16 *a = &tmp[0];
+	char *p = buf;
+	int i, i_max = -1, cnt = 0, cnt_max = 1;
+	uint8 *a4 = NULL;
+	memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if (a[i]) {
+			if (cnt > cnt_max) {
+				cnt_max = cnt;
+				i_max = i - cnt;
+			}
+			cnt = 0;
+		} else
+			cnt++;
+	}
+	if (cnt > cnt_max) {
+		cnt_max = cnt;
+		i_max = i - cnt;
+	}
+	if (i_max == 0 &&
+		/* IPv4-translated: ::ffff:0:a.b.c.d */
+		((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+		/* IPv4-mapped: ::ffff:a.b.c.d */
+		(cnt_max == 5 && a[5] == 0xffff)))
+		a4 = (uint8*) (a + 6);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if ((uint8*) (a + i) == a4) {
+			snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+			break;
+		} else if (i == i_max) {
+			*p++ = ':';
+			i += cnt_max - 1;
+			p[0] = ':';
+			p[1] = '\0';
+		} else {
+			if (i)
+				*p++ = ':';
+			p += snprintf(p, 8, "%x", ntoh16(a[i]));
+		}
+	}
+
+	return buf;
+}
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+	uint i;
+
+	for (i = 0; i < ms; i++) {
+		OSL_DELAY(1000);
+	}
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+	void *p;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	for (p = p0; p; p = PKTNEXT(osh, p))
+		prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata;
+	int priority = 0;
+	int rc = 0;
+
+	pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+		uint16 vlan_tag;
+		int vlan_prio, dscp_prio = 0;
+
+		evh = (struct ethervlan_header *)eh;
+
+		vlan_tag = ntoh16(evh->vlan_tag);
+		vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+		if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+			(evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+			uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+			uint8 tos_tc = IP_TOS46(ip_body);
+			dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		}
+
+		/* DSCP priority gets precedence over 802.1P (vlan tag) */
+		if (dscp_prio != 0) {
+			priority = dscp_prio;
+			rc |= PKTPRIO_VDSCP;
+		} else {
+			priority = vlan_prio;
+			rc |= PKTPRIO_VLAN;
+		}
+		/*
+		 * If the DSCP priority is not the same as the VLAN priority,
+		 * then overwrite the priority field in the vlan tag, with the
+		 * DSCP priority value. This is required for Linux APs because
+		 * the VLAN driver on Linux, overwrites the skb->priority field
+		 * with the priority value in the vlan tag
+		 */
+		if (update_vtag && (priority != vlan_prio)) {
+			vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+			vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+			evh->vlan_tag = hton16(vlan_tag);
+			rc |= PKTPRIO_UPD;
+		}
+	} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+		(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+		uint8 *ip_body = pktdata + sizeof(struct ether_header);
+		uint8 tos_tc = IP_TOS46(ip_body);
+		uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+		switch (dscp) {
+		case DSCP_EF:
+			priority = PRIO_8021D_VO;
+			break;
+		case DSCP_AF31:
+		case DSCP_AF32:
+		case DSCP_AF33:
+			priority = PRIO_8021D_CL;
+			break;
+		case DSCP_AF21:
+		case DSCP_AF22:
+		case DSCP_AF23:
+		case DSCP_AF11:
+		case DSCP_AF12:
+		case DSCP_AF13:
+			priority = PRIO_8021D_EE;
+			break;
+		default:
+			priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+			break;
+		}
+
+		rc |= PKTPRIO_DSCP;
+	}
+
+	ASSERT(priority >= 0 && priority <= MAXPRIO);
+	PKTSETPRIO(pkt, priority);
+	return (rc | priority);
+}
+
+/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
+ */
+bool BCMFASTPATH
+pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *ip_body;
+	bool rc = FALSE;
+
+	/* minimum length is ether header and IP header */
+	if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)
+		return FALSE;
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
+		ip_body = pktdata + sizeof(struct ether_header);
+		*dscp = IP_DSCP46(ip_body);
+		rc = TRUE;
+	}
+	else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
+		evh = (struct ethervlan_header *)eh;
+
+		/* minimum length is ethervlan header and IP header */
+		if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
+			evh->ether_type == HTON16(ETHER_TYPE_IP)) {
+			ip_body = pktdata + sizeof(struct ethervlan_header);
+			*dscp = IP_DSCP46(ip_body);
+			rc = TRUE;
+		}
+	}
+
+	return rc;
+}
+
+/* The 0.5KB string table is not removed by compiler even though it's unused */
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings  */
+const char *
+bcmerrorstr(int bcmerror)
+{
+	/* check if someone added a bcmerror code but forgot to add errorstring */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+	if (bcmerror > 0 || bcmerror < BCME_LAST) {
+		snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+		return bcm_undeferrstr;
+	}
+
+	ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+	return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+/* could mandate sorted tables and do a binary search */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+	const bcm_iovar_t *vi;
+	const char *lookup_name;
+
+	/* skip any ':' delimited option prefixes */
+	lookup_name = strrchr(name, ':');
+	if (lookup_name != NULL)
+		lookup_name++;
+	else
+		lookup_name = name;
+
+	ASSERT(table != NULL);
+
+	for (vi = table; vi->name; vi++) {
+		if (!strcmp(vi->name, lookup_name))
+			return vi;
+	}
+	/* ran to end of table */
+
+	return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+
+	/* length check on io buf */
+	switch (vi->type) {
+	case IOVT_BOOL:
+	case IOVT_INT8:
+	case IOVT_INT16:
+	case IOVT_INT32:
+	case IOVT_UINT8:
+	case IOVT_UINT16:
+	case IOVT_UINT32:
+		/* all integers are int32 sized args at the ioctl interface */
+		if (len < (int)sizeof(int)) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_BUFFER:
+		/* buffer must meet minimum length requirement */
+		if (len < vi->minlen) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_VOID:
+		if (!set) {
+			/* Cannot return nil... */
+			bcmerror = BCME_UNSUPPORTED;
+		} else if (len) {
+			/* Set is an action w/o parameters */
+			bcmerror = BCME_BUFTOOLONG;
+		}
+		break;
+
+	default:
+		/* unknown type for length check in iovar info */
+		ASSERT(0);
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#endif	/* BCMDRIVER */
+
+
+uint8 *
+bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
+
+	/* dst buffer should always be valid */
+	ASSERT(dst);
+
+	/* data len must be within valid range */
+	ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
+
+	/* source data buffer pointer should be valid, unless datalen is 0
+	 * meaning no data with this TLV
+	 */
+	ASSERT((data != NULL) || (datalen == 0));
+
+	/* only do work if the inputs are valid
+	 * - must have a dst to write to AND
+	 * - datalen must be within range AND
+	 * - the source data pointer must be non-NULL if datalen is non-zero
+	 * (this last condition detects datalen > 0 with a NULL data pointer)
+	 */
+	if ((dst != NULL) &&
+	    ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+	    ((data != NULL) || (datalen == 0))) {
+
+	        /* write type, len fields */
+		dst_tlv->id = (uint8)type;
+	        dst_tlv->len = (uint8)datalen;
+
+		/* if data is present, copy to the output buffer and update
+		 * pointer to output buffer
+		 */
+		if (datalen > 0) {
+
+			memcpy(dst_tlv->data, data, datalen);
+		}
+
+		/* update the output destination poitner to point past
+		 * the TLV written
+		 */
+		new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+
+	if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+
+		/* if len + tlv hdr len is more than destlen, don't do anything
+		 * just return the buffer untouched
+		 */
+		if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+
+			new_dst = bcm_write_tlv(type, data, datalen, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+	uint totlen;
+
+	ASSERT(dst && src);
+	if (dst && src) {
+
+		totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+		memcpy(dst, src_tlv, totlen);
+		new_dst = dst + totlen;
+	}
+
+	return (new_dst);
+}
+
+
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+
+	ASSERT(src);
+	if (src) {
+		if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+			new_dst = bcm_copy_tlv(src, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ *       x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+    0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+    0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+    0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+    0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+    0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+    0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+    0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+    0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+    0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+    0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+    0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+    0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+    0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+    0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+    0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+    0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+    0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+    0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+    0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+    0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+    0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+    0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+    0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+    0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+    0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+    0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+    0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+    0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+    0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+    0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+    0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+    0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+	(c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+	uint8 *pdata,	/* pointer to array of data to process */
+	uint  nbytes,	/* number of input data bytes to process */
+	uint8 crc	/* either CRC8_INIT_VALUE or previous return value */
+)
+{
+	/* hard code the crc loop instead of using CRC_INNER_LOOP macro
+	 * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+	 */
+	while (nbytes-- > 0)
+		crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ *       x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+    0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+    0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+    0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+    0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+    0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+    0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+    0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+    0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+    0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+    0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+    0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+    0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+    0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+    0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+    0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+    0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+    0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+    0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+    0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+    0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+    0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+    0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+    0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+    0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+    0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+    0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+    0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+    0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+    0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+    0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+    0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+    0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+    uint8 *pdata,  /* pointer to array of data to process */
+    uint nbytes, /* number of input data bytes to process */
+    uint16 crc     /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+	while (nbytes-- > 0)
+		CRC_INNER_LOOP(16, crc, *pdata++);
+	return crc;
+}
+
+static const uint32 crc32_table[256] = {
+    0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+    0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+    0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+    0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+    0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+    0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+    0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+    0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+    0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+    0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+    0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+    0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+    0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+    0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+    0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+    0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+    0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+    0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+    0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+    0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+    0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+    0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+    0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+    0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+    0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+    0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+    0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+    0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+    0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+    0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+    0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+    0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+    0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+    0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+    0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+    0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+    0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+    0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+    0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+    0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+    0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+    0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+    0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+    0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+    0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+    0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+    0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+    0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+    0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+    0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+    0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+    0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+    0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+    0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+    0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+    0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+    0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+    0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+    0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+    0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+    0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+    0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+    0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+    0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+	uint8 *pend;
+	pend = pdata + nbytes;
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+
+	return crc;
+}
+
+#ifdef notdef
+#define CLEN 	1499 	/*  CRC Length */
+#define CBUFSIZ 	(CLEN+4)
+#define CNBUFS		5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+	uint j, k, l;
+	uint8 *buf;
+	uint len[CNBUFS];
+	uint32 crcr;
+	uint32 crc32tv[CNBUFS] =
+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+	/* step through all possible alignments */
+	for (l = 0; l <= 4; l++) {
+		for (j = 0; j < CNBUFS; j++) {
+			len[j] = CLEN;
+			for (k = 0; k < len[j]; k++)
+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+		}
+
+		for (j = 0; j < CNBUFS; j++) {
+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+			ASSERT(crcr == crc32tv[j]);
+		}
+	}
+
+	MFREE(buf, CBUFSIZ*CNBUFS);
+	return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+	int len;
+
+	/* validate current elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	/* advance to next elt */
+	len = elt->len;
+	elt = (bcm_tlv_t*)(elt->data + len);
+	*buflen -= (TLV_HDR_LEN + len);
+
+	/* validate next elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+
+	return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or length field < min_varlen
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen)
+{
+	bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key);
+	if (ret == NULL || ret->len < min_bodylen) {
+		return NULL;
+	}
+	return ret;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.  Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		uint id = elt->id;
+		int len = elt->len;
+
+		/* Punt if we start seeing IDs > than target key */
+		if (id > key) {
+			return (NULL);
+		}
+
+		/* validate remaining totlen */
+		if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+	return NULL;
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG)
+int
+bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len)
+{
+	int i, slen = 0;
+	uint32 bit, mask;
+	const char *name;
+	mask = bd->mask;
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0;  (name = bd->bitfield[i].name) != NULL; i++) {
+		bit = bd->bitfield[i].bit;
+		if ((flags & mask) == bit) {
+			if (len > (int)strlen(name)) {
+				slen = strlen(name);
+				strncpy(buf, name, slen+1);
+			}
+			break;
+		}
+	}
+	return slen;
+}
+
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+	int i;
+	char* p = buf;
+	char hexstr[16];
+	int slen = 0, nlen = 0;
+	uint32 bit;
+	const char* name;
+
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0; flags != 0; i++) {
+		bit = bd[i].bit;
+		name = bd[i].name;
+		if (bit == 0 && flags != 0) {
+			/* print any unnamed bits */
+			snprintf(hexstr, 16, "0x%X", flags);
+			name = hexstr;
+			flags = 0;	/* exit loop */
+		} else if ((flags & bit) == 0)
+			continue;
+		flags &= ~bit;
+		nlen = strlen(name);
+		slen += nlen;
+		/* count btwn flag space */
+		if (flags != 0)
+			slen += 1;
+		/* need NULL char as well */
+		if (len <= slen)
+			break;
+		/* copy NULL char but don't count it */
+		strncpy(p, name, nlen + 1);
+		p += nlen;
+		/* copy btwn flag space and NULL char */
+		if (flags != 0)
+			p += snprintf(p, 2, " ");
+	}
+
+	/* indicate the str was too short */
+	if (flags != 0) {
+		if (len < 2)
+			p -= 2 - len;	/* overwrite last char */
+		p += snprintf(p, 2, ">");
+	}
+
+	return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+	int i;
+	char *p = str;
+	const uint8 *src = (const uint8*)bytes;
+
+	for (i = 0; i < len; i++) {
+		p += snprintf(p, 3, "%02X", *src);
+		src++;
+	}
+	return (int)(p - str);
+}
+#endif
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+	char line[128], *p;
+	int len = sizeof(line);
+	int nchar;
+	uint i;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	p = line;
+	for (i = 0; i < nbytes; i++) {
+		if (i % 16 == 0) {
+			nchar = snprintf(p, len, "  %04d: ", i);	/* line prefix */
+			p += nchar;
+			len -= nchar;
+		}
+		if (len > 0) {
+			nchar = snprintf(p, len, "%02x ", buf[i]);
+			p += nchar;
+			len -= nchar;
+		}
+
+		if (i % 16 == 15) {
+			printf("%s\n", line);		/* flush line */
+			p = line;
+			len = sizeof(line);
+		}
+	}
+
+	/* flush last partial line */
+	if (p != line)
+		printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+	"NONE",
+	"WEP1",
+	"TKIP",
+	"WEP128",
+	"AES_CCM",
+	"AES_OCB_MSDU",
+	"AES_OCB_MPDU",
+	"NALG",
+	"UNDEF",
+	"UNDEF",
+	"UNDEF",
+	"WAPI",
+	"PMK",
+	"BIP",
+	"AES_GCM",
+	"AES_CCM256",
+	"AES_GCM256",
+	"BIP_CMAC256",
+	"BIP_GMAC",
+	"BIP_GMAC256",
+	"UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+	return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+	if (brev < 0x100)
+		snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+	else
+		snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+	return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+	uint len, max_len;
+	char c;
+
+	len = (uint)strlen(buf);
+
+	max_len = BUFSIZE_TODUMP_ATONCE;
+
+	while (len > max_len) {
+		c = buf[max_len];
+		buf[max_len] = '\0';
+		printf("%s", buf);
+		buf[max_len] = c;
+
+		buf += max_len;
+		len -= max_len;
+	}
+	/* print the remaining string */
+	printf("%s\n", buf);
+	return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+	char *buf, uint32 bufsize)
+{
+	uint  filled_len;
+	int len;
+	struct fielddesc *cur_ptr;
+
+	filled_len = 0;
+	cur_ptr = fielddesc_array;
+
+	while (bufsize > 1) {
+		if (cur_ptr->nameandfmt == NULL)
+			break;
+		len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+		               read_rtn(arg0, arg1, cur_ptr->offset));
+		/* check for snprintf overflow or error */
+		if (len < 0 || (uint32)len >= bufsize)
+			len = bufsize - 1;
+		buf += len;
+		bufsize -= len;
+		filled_len += len;
+		cur_ptr++;
+	}
+	return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+	uint len;
+
+	len = (uint)strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	strncpy(buf, name, buflen);
+
+	/* append data onto the end of the name string */
+	memcpy(&buf[len], data, datalen);
+	len += datalen;
+
+	return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153		/* Offset for first entry */
+#define QDBM_TABLE_LEN 40	/* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: 	+0 	+1 	+2 	+3 	+4 	+5 	+6 	+7 */
+/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
+/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
+/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
+/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
+/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+	uint factor = 1;
+	int idx = qdbm - QDBM_OFFSET;
+
+	if (idx >= QDBM_TABLE_LEN) {
+		/* clamp to max uint16 mW value */
+		return 0xFFFF;
+	}
+
+	/* scale the qdBm index up to the range of the table 0-40
+	 * where an offset of 40 qdBm equals a factor of 10 mW.
+	 */
+	while (idx < 0) {
+		idx += 40;
+		factor *= 10;
+	}
+
+	/* return the mW value scaled down to the correct factor of 10,
+	 * adding in factor/2 to get proper rounding.
+	 */
+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+	uint8 qdbm;
+	int offset;
+	uint mw_uint = mw;
+	uint boundary;
+
+	/* handle boundary case */
+	if (mw_uint <= 1)
+		return 0;
+
+	offset = QDBM_OFFSET;
+
+	/* move mw into the range of the table */
+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+		mw_uint *= 10;
+		offset -= 40;
+	}
+
+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+		                                    nqdBm_to_mW_map[qdbm])/2;
+		if (mw_uint < boundary) break;
+	}
+
+	qdbm += (uint8)offset;
+
+	return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+	uint bitcount = 0, i;
+	uint8 tmp;
+	for (i = 0; i < length; i++) {
+		tmp = bitmap[i];
+		while (tmp) {
+			bitcount++;
+			tmp &= (tmp - 1);
+		}
+	}
+	return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+	b->origsize = b->size = size;
+	b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+
+	r = vsnprintf(b->buf, b->size, fmt, ap);
+
+	/* Non Ansi C99 compliant returns -1,
+	 * Ansi compliant return r >= b->size,
+	 * bcmstdlib returns 0, handle all
+	 */
+	/* r == 0 is also the case when strlen(fmt) is zero.
+	 * typically the case when "" is passed as argument.
+	 */
+	if ((r == -1) || (r >= (int)b->size)) {
+		b->size = 0;
+	} else {
+		b->size -= r;
+		b->buf += r;
+	}
+
+	va_end(ap);
+
+	return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len)
+{
+	int i;
+
+	if (msg != NULL && msg[0] != '\0')
+		bcm_bprintf(b, "%s", msg);
+	for (i = 0; i < len; i ++)
+		bcm_bprintf(b, "%02X", buf[i]);
+	if (newline)
+		bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+	int i;
+
+	for (i = 0; i < num_bytes; i++) {
+		num[i] += amount;
+		if (num[i] >= amount)
+			break;
+		amount = 1;
+	}
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+	int i;
+
+	for (i = nbytes - 1; i >= 0; i--) {
+		if (arg1[i] != arg2[i])
+			return (arg1[i] - arg2[i]);
+	}
+	return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+	int i;
+	int per_line = 0;
+
+	printf("%s: %d \n", name ? name : "", len);
+	for (i = 0; i < len; i++) {
+		printf("%02x ", *data++);
+		per_line++;
+		if (per_line == 16) {
+			per_line = 0;
+			printf("\n");
+		}
+	}
+	printf("\n");
+}
+
+/* Look for vendor-specific IE with specified OUI and optional type */
+bcm_tlv_t *
+bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+{
+	bcm_tlv_t *ie;
+	uint8 ie_len;
+
+	ie = (bcm_tlv_t*)tlvs;
+
+	/* make sure we are looking at a valid IE */
+	if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
+		return NULL;
+	}
+
+	/* Walk through the IEs looking for an OUI match */
+	do {
+		ie_len = ie->len;
+		if ((ie->id == DOT11_MNG_PROPR_ID) &&
+		    (ie_len >= (DOT11_OUI_LEN + type_len)) &&
+		    !bcmp(ie->data, voui, DOT11_OUI_LEN))
+		{
+			/* compare optional type */
+			if (type_len == 0 ||
+			    !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
+				return (ie);		/* a match */
+			}
+		}
+	} while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
+
+	return NULL;
+}
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN	((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+	uint i, c;
+	char *p = buf;
+	char *endp = buf + SSID_FMT_BUF_LEN;
+
+	if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (uint)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (bcm_isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += snprintf(p, (endp - p), "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+	ASSERT(p < endp);
+
+	return (int)(p - buf);
+}
+#endif
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs.  End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+	char *dp;
+	bool findNewline;
+	int column;
+	unsigned int buf_len, n;
+	unsigned int pad = 0;
+
+	dp = varbuf;
+
+	findNewline = FALSE;
+	column = 0;
+
+	for (n = 0; n < len; n++) {
+		if (varbuf[n] == '\r')
+			continue;
+		if (findNewline && varbuf[n] != '\n')
+			continue;
+		findNewline = FALSE;
+		if (varbuf[n] == '#') {
+			findNewline = TRUE;
+			continue;
+		}
+		if (varbuf[n] == '\n') {
+			if (column == 0)
+				continue;
+			*dp++ = 0;
+			column = 0;
+			continue;
+		}
+		*dp++ = varbuf[n];
+		column++;
+	}
+	buf_len = (unsigned int)(dp - varbuf);
+	if (buf_len % 4) {
+		pad = 4 - buf_len % 4;
+		if (pad && (buf_len + pad <= len)) {
+			buf_len += pad;
+		}
+	}
+
+	while (dp < varbuf + n)
+		*dp++ = 0;
+
+	return buf_len;
+}
+
+/* calculate a * b + c */
+void
+bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c)
+{
+#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;}
+	uint32 r1, r0;
+	uint32 a1, a0, b1, b0, t, cc = 0;
+
+	a1 = a >> 16;
+	a0 = a & 0xffff;
+	b1 = b >> 16;
+	b0 = b & 0xffff;
+
+	r0 = a0 * b0;
+	FORMALIZE(r0);
+
+	t = (a1 * b0) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	t = (a0 * b1) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	FORMALIZE(c);
+
+	r0 += c;
+	FORMALIZE(r0);
+
+	r0 |= (cc % 2) ? 0x80000000 : 0;
+	r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2);
+
+	*r_high = r1;
+	*r_low = r0;
+}
+
+/* calculate a / b */
+void
+bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b < 2)
+		return;
+
+	while (a1 != 0) {
+		r0 += (0xffffffff / b) * a1;
+		bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0);
+	}
+
+	r0 += a0 / b;
+	*r = r0;
+}
+
+#ifndef setbit /* As in the header file */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+/* Set bit in byte array. */
+void
+setbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
+}
+
+/* Clear bit in byte array. */
+void
+clrbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+}
+
+/* Test if bit is set in byte array. */
+bool
+isset(const void *array, uint bit)
+{
+	return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+}
+
+/* Test if bit is clear in byte array. */
+bool
+isclr(const void *array, uint bit)
+{
+	return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+}
+#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
+#endif /* setbit */
+
+void
+set_bitrange(void *array, uint start, uint end, uint maxbit)
+{
+	uint startbyte = start/NBBY;
+	uint endbyte = end/NBBY;
+	uint i, startbytelastbit, endbytestartbit;
+
+	if (end >= start) {
+		if (endbyte - startbyte > 1)
+		{
+			startbytelastbit = (startbyte+1)*NBBY - 1;
+			endbytestartbit = endbyte*NBBY;
+			for (i = startbyte+1; i < endbyte; i++)
+				((uint8 *)array)[i] = 0xFF;
+			for (i = start; i <= startbytelastbit; i++)
+				setbit(array, i);
+			for (i = endbytestartbit; i <= end; i++)
+				setbit(array, i);
+		} else {
+			for (i = start; i <= end; i++)
+				setbit(array, i);
+		}
+	}
+	else {
+		set_bitrange(array, start, maxbit, maxbit);
+		set_bitrange(array, 0, end, maxbit);
+	}
+}
+
+void
+bcm_bitprint32(const uint32 u32)
+{
+	int i;
+	for (i = NBITS(uint32) - 1; i >= 0; i--) {
+		isbitset(u32, i) ? printf("1") : printf("0");
+		if ((i % NBBY) == 0) printf(" ");
+	}
+	printf("\n");
+}
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16
+bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
+{
+	while (len > 1) {
+		sum += (buf[0] << 8) | buf[1];
+		buf += 2;
+		len -= 2;
+	}
+
+	if (len > 0) {
+		sum += (*buf) << 8;
+	}
+
+	while (sum >> 16) {
+		sum = (sum & 0xffff) + (sum >> 16);
+	}
+
+	return ((uint16)~sum);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ *                       non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP:  Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#define BCM_MWBMAP_ITEMS_MAX    (4 * 1024)  /* May increase to 16K */
+
+#define BCM_MWBMAP_BITS_WORD    (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX    (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX    (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP     (5)
+#define BCM_MWBMAP_MODOP(ix)    ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix)    ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix)    ((ix) << BCM_MWBMAP_SHIFT_OP)
+
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl)		((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr)		((void *)(ptr))
+
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+	do { \
+		ASSERT((mwb != NULL) && \
+		       (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+		bcm_mwbmap_audit(mwb); \
+	} while (0)
+#define MWBMAP_ASSERT(exp)		ASSERT(exp)
+#define MWBMAP_DBG(x)           printf x
+#else   /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb)   do {} while (0)
+#define MWBMAP_ASSERT(exp)		do {} while (0)
+#define MWBMAP_DBG(x)
+#endif  /* !BCM_MWBMAP_DEBUG */
+
+
+typedef struct bcm_mwbmap {     /* Hierarchical multiword bitmap allocator    */
+	uint16 wmaps;               /* Total number of words in free wd bitmap    */
+	uint16 imaps;               /* Total number of words in free id bitmap    */
+	int16  ifree;               /* Count of free indices. Used only in audits */
+	uint16 total;               /* Total indices managed by multiword bitmap  */
+
+	void * magic;               /* Audit handle parameter from user           */
+
+	uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of            */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	int8   wd_count[BCM_MWBMAP_WORDS_MAX];  /* free id running count, 1st lvl */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+	uint32 id_bitmap[0];        /* Second level bitmap                        */
+} bcm_mwbmap_t;
+
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+{
+	struct bcm_mwbmap * mwbmap_p;
+	uint32 wordix, size, words, extra;
+
+	/* Implementation Constraint: Uses 32bit word bitmap */
+	MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+	MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+	MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+	MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+
+	ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+
+	/* Determine the number of words needed in the multiword bitmap */
+	extra = BCM_MWBMAP_MODOP(items_max);
+	words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+
+	/* Allocate runtime state of multiword bitmap */
+	/* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+	size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+	mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+	if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+		ASSERT(0);
+		goto error1;
+	}
+	memset(mwbmap_p, 0, size);
+
+	/* Initialize runtime multiword bitmap state */
+	mwbmap_p->imaps = (uint16)words;
+	mwbmap_p->ifree = (int16)items_max;
+	mwbmap_p->total = (uint16)items_max;
+
+	/* Setup magic, for use in audit of handle */
+	mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+
+	/* Setup the second level bitmap of free indices */
+	/* Mark all indices as available */
+	for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+		mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Ensure that extra indices are tagged as un-available */
+	if (extra) { /* fixup the free ids in last bitmap and wd_count */
+		uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Setup the first level bitmap hierarchy */
+	extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+	words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
+
+	mwbmap_p->wmaps = (uint16)words;
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+		mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+	if (extra) {
+		uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+	}
+
+	return mwbmap_p;
+
+error1:
+	return BCM_MWBMAP_INVALID_HDL;
+}
+
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+	                     + (sizeof(uint32) * mwbmap_p->imaps));
+	return;
+}
+
+/* Allocate a unique small index using a multiword bitmap index allocator.    */
+uint32 BCMFASTPATH
+bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	/* Start with the first hierarchy */
+	for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
+
+		if (bitmap != 0U) {
+
+			uint32 count, bitix, *bitmap_p;
+
+			bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+			wordix   = BCM_MWBMAP_MULOP(wordix) + bitix;
+
+			/* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+			count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			mwbmap_p->wd_count[wordix]--;
+			count = mwbmap_p->wd_count[wordix];
+			MWBMAP_ASSERT(count ==
+			              (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			MWBMAP_ASSERT(count >= 0);
+
+			/* clear wd_bitmap bit if id_map count is 0 */
+			bitmap = (count == 0) << bitix;
+
+			MWBMAP_DBG((
+			    "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
+
+			*bitmap_p ^= bitmap;
+
+			/* Use bitix in the second hierarchy */
+			bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+			bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+			MWBMAP_ASSERT(bitmap != 0U);
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = BCM_MWBMAP_MULOP(wordix)
+			         + (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+
+			mwbmap_p->ifree--; /* decrement system wide free count */
+			MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+			MWBMAP_DBG((
+			    "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+			    mwbmap_p->ifree));
+
+			*bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+			return bitix;
+		}
+	}
+
+	ASSERT(mwbmap_p->ifree == 0);
+
+	return BCM_MWBMAP_INVALID_IDX;
+}
+
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == bitmap);
+
+	mwbmap_p->ifree--; /* update free count */
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	/* Update first hierarchy */
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+	count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	mwbmap_p->wd_count[bitix]--;
+	count = mwbmap_p->wd_count[bitix];
+	MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	MWBMAP_ASSERT(count >= 0);
+
+	bitmap   = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+
+	MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+	           BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+	           (*bitmap_p) ^ bitmap, count));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	return;
+}
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void BCMFASTPATH
+bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second level hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == 0U);	/* ASSERT not a double free */
+
+	mwbmap_p->ifree++; /* update free count */
+	ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+
+	MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p |= bitmap; /* mark as available */
+
+	/* Now update first level hierarchy */
+
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+	{
+		uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[bitix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+		MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+		MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+		            bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+	}
+#endif /* BCM_MWBMAP_DEBUG */
+
+	*bitmap_p |= bitmap;
+
+	return;
+}
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	return mwbmap_p->ifree;
+}
+
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+
+	return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+}
+
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+	uint32 ix, count;
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p,
+	       mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+	for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+		printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+		bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+		printf("\n");
+	}
+	for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[ix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+		bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+		printf("\n");
+	}
+
+	return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
+
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+		for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+			if ((*bitmap_p) & (1 << bitix)) {
+				idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+				count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				count = mwbmap_p->wd_count[idmap_ix];
+				ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				ASSERT(count != 0U);
+				free_cnt += count;
+			}
+		}
+	}
+
+	ASSERT((int)free_cnt == mwbmap_p->ifree);
+}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
+
+/* Simple 16bit Id allocator using a stack implementation. */
+typedef struct id16_map {
+	uint16  total;     /* total number of ids managed by allocator */
+	uint16  start;     /* start value of 16bit ids to be managed */
+	uint32  failures;  /* count of failures */
+	void    *dbg;      /* debug placeholder */
+	int     stack_idx; /* index into stack of available ids */
+	uint16  stack[0];  /* stack of 16 bit ids */
+} id16_map_t;
+
+#define ID16_MAP_SZ(items)      (sizeof(id16_map_t) + \
+	                             (sizeof(uint16) * (items)))
+
+#if defined(BCM_DBG)
+
+/* Uncomment BCM_DBG_ID16 to debug double free */
+/* #define BCM_DBG_ID16 */
+
+typedef struct id16_map_dbg {
+	uint16  total;
+	bool    avail[0];
+} id16_map_dbg_t;
+#define ID16_MAP_DBG_SZ(items)  (sizeof(id16_map_dbg_t) + \
+	                             (sizeof(bool) * (items)))
+#define ID16_MAP_MSG(x)         print x
+#else
+#define ID16_MAP_MSG(x)
+#endif /* BCM_DBG */
+
+void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
+id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+{
+	uint16 idx, val16;
+	id16_map_t * id16_map;
+
+	ASSERT(total_ids > 0);
+	ASSERT((start_val16 + total_ids) < ID16_INVALID);
+
+	id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
+	if (id16_map == NULL) {
+		return NULL;
+	}
+
+	id16_map->total = total_ids;
+	id16_map->start = start_val16;
+	id16_map->failures = 0;
+	id16_map->dbg = NULL;
+
+	/* Populate stack with 16bit id values, commencing with start_val16 */
+	id16_map->stack_idx = 0;
+	val16 = start_val16;
+
+	for (idx = 0; idx < total_ids; idx++, val16++) {
+		id16_map->stack_idx = idx;
+		id16_map->stack[id16_map->stack_idx] = val16;
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+
+	if (id16_map->dbg) {
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		id16_map_dbg->total = total_ids;
+		for (idx = 0; idx < total_ids; idx++) {
+			id16_map_dbg->avail[idx] = TRUE;
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return (void *)id16_map;
+}
+
+void * /* Destruct an id16 allocator instance */
+id16_map_fini(osl_t *osh, void * id16_map_hndl)
+{
+	uint16 total_ids;
+	id16_map_t * id16_map;
+
+	if (id16_map_hndl == NULL)
+		return NULL;
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	total_ids = id16_map->total;
+	ASSERT(total_ids > 0);
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
+		id16_map->dbg = NULL;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	id16_map->total = 0;
+	MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
+
+	return NULL;
+}
+
+void
+id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16)
+{
+	uint16 idx, val16;
+	id16_map_t * id16_map;
+
+	ASSERT(total_ids > 0);
+	ASSERT((start_val16 + total_ids) < ID16_INVALID);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+	if (id16_map == NULL) {
+		return;
+	}
+
+	id16_map->total = total_ids;
+	id16_map->start = start_val16;
+	id16_map->failures = 0;
+
+	/* Populate stack with 16bit id values, commencing with start_val16 */
+	id16_map->stack_idx = 0;
+	val16 = start_val16;
+
+	for (idx = 0; idx < total_ids; idx++, val16++) {
+		id16_map->stack_idx = idx;
+		id16_map->stack[id16_map->stack_idx] = val16;
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		id16_map_dbg->total = total_ids;
+		for (idx = 0; idx < total_ids; idx++) {
+			id16_map_dbg->avail[idx] = TRUE;
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+}
+
+
+uint16 BCMFASTPATH /* Allocate a unique 16bit id */
+id16_map_alloc(void * id16_map_hndl)
+{
+	uint16 val16;
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	ASSERT(id16_map->total > 0);
+
+	if (id16_map->stack_idx < 0) {
+		id16_map->failures++;
+		return ID16_INVALID;
+	}
+
+	val16 = id16_map->stack[id16_map->stack_idx];
+	id16_map->stack_idx--;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+
+	ASSERT(val16 < (id16_map->start + id16_map->total));
+
+	if (id16_map->dbg) { /* Validate val16 */
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
+		id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return val16;
+}
+
+
+void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */
+id16_map_free(void * id16_map_hndl, uint16 val16)
+{
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+
+	ASSERT(val16 < (id16_map->start + id16_map->total));
+
+	if (id16_map->dbg) { /* Validate val16 */
+		id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+		ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
+		id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	id16_map->stack_idx++;
+	id16_map->stack[id16_map->stack_idx] = val16;
+}
+
+uint32 /* Returns number of failures to allocate an unique id16 */
+id16_map_failures(void * id16_map_hndl)
+{
+	ASSERT(id16_map_hndl != NULL);
+	return ((id16_map_t *)id16_map_hndl)->failures;
+}
+
+bool
+id16_map_audit(void * id16_map_hndl)
+{
+	int idx;
+	int insane = 0;
+	id16_map_t * id16_map;
+
+	ASSERT(id16_map_hndl != NULL);
+
+	id16_map = (id16_map_t *)id16_map_hndl;
+
+	ASSERT((id16_map->stack_idx > 0) && (id16_map->stack_idx < id16_map->total));
+	for (idx = 0; idx <= id16_map->stack_idx; idx++) {
+		ASSERT(id16_map->stack[idx] >= id16_map->start);
+		ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+		if (id16_map->dbg) {
+			uint16 val16 = id16_map->stack[idx];
+			if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
+				insane |= 1;
+				ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
+				              id16_map_hndl, idx, val16));
+			}
+		}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+	}
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+	if (id16_map->dbg) {
+		uint16 avail = 0; /* Audit available ids counts */
+		for (idx = 0; idx < id16_map_dbg->total; idx++) {
+			if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
+				avail++;
+		}
+		if (avail && (avail != (id16_map->stack_idx + 1))) {
+			insane |= 1;
+			ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
+			              id16_map_hndl, avail, id16_map->stack_idx));
+		}
+	}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+	return (!!insane);
+}
+/* END: Simple id16 allocator */
+
+
+#endif /* BCMDRIVER */
+
+/* calculate a >> b; and returns only lower 32 bits */
+void
+bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b == 0) {
+		r0 = a_low;
+		*r = r0;
+		return;
+	}
+
+	if (b < 32) {
+		a0 = a0 >> b;
+		a1 = a1 & ((1 << b) - 1);
+		a1 = a1 << (32 - b);
+		r0 = a0 | a1;
+		*r = r0;
+		return;
+	} else {
+		r0 = a1 >> (b - 32);
+		*r = r0;
+		return;
+	}
+
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) += offset;
+	if (*r_lo < r1_lo)
+		(*r_hi) ++;
+}
+
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) -= offset;
+	if (*r_lo > r1_lo)
+		(*r_hi) --;
+}
+
+#ifdef DEBUG_COUNTER
+#if (OSL_SYSUPTIME_SUPPORT == TRUE)
+void counter_printlog(counter_tbl_t *ctr_tbl)
+{
+	uint32 now;
+
+	if (!ctr_tbl->enabled)
+		return;
+
+	now = OSL_SYSUPTIME();
+
+	if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) {
+		uint8 i = 0;
+		printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print);
+
+		for (i = 0; i < ctr_tbl->needed_cnt; i++) {
+			printf(" %u", ctr_tbl->cnt[i]);
+		}
+		printf("\n");
+
+		ctr_tbl->prev_log_print = now;
+		bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint));
+	}
+}
+#else
+/* OSL_SYSUPTIME is not supported so no way to get time */
+#define counter_printlog(a) do {} while (0)
+#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
+#endif /* DEBUG_COUNTER */
+
+#ifdef BCMDRIVER
+void
+dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
+{
+	uint32 mem_size;
+	mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+	if (pool)
+		MFREE(osh, pool, mem_size);
+}
+dll_pool_t *
+dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
+{
+	uint32 mem_size, i;
+	dll_pool_t * dll_pool_p;
+	dll_t * elem_p;
+
+	ASSERT(elem_size > sizeof(dll_t));
+
+	mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+
+	if ((dll_pool_p = (dll_pool_t *)MALLOC(osh, mem_size)) == NULL) {
+		printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
+			elems_max, elem_size);
+		ASSERT(0);
+		return dll_pool_p;
+	}
+
+	bzero(dll_pool_p, mem_size);
+
+	dll_init(&dll_pool_p->free_list);
+	dll_pool_p->elems_max = elems_max;
+	dll_pool_p->elem_size = elem_size;
+
+	elem_p = dll_pool_p->elements;
+	for (i = 0; i < elems_max; i++) {
+		dll_append(&dll_pool_p->free_list, elem_p);
+		elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+	}
+
+	dll_pool_p->free_count = elems_max;
+
+	return dll_pool_p;
+}
+
+
+void *
+dll_pool_alloc(dll_pool_t * dll_pool_p)
+{
+	dll_t * elem_p;
+
+	if (dll_pool_p->free_count == 0) {
+		ASSERT(dll_empty(&dll_pool_p->free_list));
+		return NULL;
+	}
+
+	elem_p = dll_head_p(&dll_pool_p->free_list);
+	dll_delete(elem_p);
+	dll_pool_p->free_count -= 1;
+
+	return (void *)elem_p;
+}
+
+void
+dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p)
+{
+	dll_t * node_p = (dll_t *)elem_p;
+	dll_prepend(&dll_pool_p->free_list, node_p);
+	dll_pool_p->free_count += 1;
+}
+
+
+void
+dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+{
+	dll_t * node_p = (dll_t *)elem_p;
+	dll_append(&dll_pool_p->free_list, node_p);
+	dll_pool_p->free_count += 1;
+}
+
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
new file mode 100644
index 0000000..8655937
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
@@ -0,0 +1,1229 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> 	/* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
+#endif
+
+/* Definitions for D11AC capable Chanspec type */
+
+/* Chanspec ASCII representation with 802.11ac capability:
+ * [<band> 'g'] <channel> ['/'<bandwidth> [<ctl-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
+ *
+ * <band>:
+ *      (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively.
+ *      Default value is 2g if channel <= 14, otherwise 5g.
+ * <channel>:
+ *      channel number of the 5MHz, 10MHz, 20MHz channel,
+ *      or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel.
+ * <bandwidth>:
+ *      (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20.
+ * <primary-sideband>:
+ *      (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower.
+ *
+ *      For 2.4GHz band 40MHz channels, the same primary channel may be the
+ *      upper sideband for one 40MHz channel, and the lower sideband for an
+ *      overlapping 40MHz channel.  The U/L disambiguates which 40MHz channel
+ *      is being specified.
+ *
+ *      For 40MHz in the 5GHz band and all channel bandwidths greater than
+ *      40MHz, the U/L specificaion is not allowed since the channels are
+ *      non-overlapping and the primary sub-band is derived from its
+ *      position in the wide bandwidth channel.
+ *
+ * <1st80Channel>:
+ * <2nd80Channel>:
+ *      Required for 80+80, otherwise not allowed.
+ *      Specifies the center channel of the first and second 80MHz band.
+ *
+ * In its simplest form, it is a 20MHz channel number, with the implied band
+ * of 2.4GHz if channel number <= 14, and 5GHz otherwise.
+ *
+ * To allow for backward compatibility with scripts, the old form for
+ * 40MHz channels is also allowed: <channel><ctl-sideband>
+ *
+ * <channel>:
+ *	primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
+ * <ctl-sideband>:
+ * 	"U" for upper, "L" for lower (or lower case "u" "l")
+ *
+ * 5 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      5g8             20MHz     8          -              -
+ *      52              20MHz     52         -              -
+ *      52/40           40MHz     54         52-56          52
+ *      56/40           40MHz     54         52-56          56
+ *      52/80           80MHz     58         52-64          52
+ *      56/80           80MHz     58         52-64          56
+ *      60/80           80MHz     58         52-64          60
+ *      64/80           80MHz     58         52-64          64
+ *      52/160          160MHz    50         36-64          52
+ *      36/160          160MGz    50         36-64          36
+ *      36/80+80/42-106 80+80MHz  42,106     36-48,100-112  36
+ *
+ * 2 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      2g8             20MHz     8          -              -
+ *      8               20MHz     8          -              -
+ *      6               20MHz     6          -              -
+ *      6/40l           40MHz     8          6-10           6
+ *      6l              40MHz     8          6-10           6
+ *      6/40u           40MHz     4          2-6            6
+ *      6u              40MHz     4          2-6            6
+ */
+
+/* bandwidth ASCII string */
+static const char *wf_chspec_bw_str[] =
+{
+	"5",
+	"10",
+	"20",
+	"40",
+	"80",
+	"160",
+	"80+80",
+	"na"
+};
+
+static const uint8 wf_chspec_bw_mhz[] =
+{5, 10, 20, 40, 80, 160, 160};
+
+#define WF_NUM_BW \
+	(sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
+
+/* 40MHz channels in 5GHz band */
+static const uint8 wf_5g_40m_chans[] =
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+#define WF_NUM_5G_40M_CHANS \
+	(sizeof(wf_5g_40m_chans)/sizeof(uint8))
+
+/* 80MHz channels in 5GHz band */
+static const uint8 wf_5g_80m_chans[] =
+{42, 58, 106, 122, 138, 155};
+#define WF_NUM_5G_80M_CHANS \
+	(sizeof(wf_5g_80m_chans)/sizeof(uint8))
+
+/* 160MHz channels in 5GHz band */
+static const uint8 wf_5g_160m_chans[] =
+{50, 114};
+#define WF_NUM_5G_160M_CHANS \
+	(sizeof(wf_5g_160m_chans)/sizeof(uint8))
+
+
+/* convert bandwidth from chanspec to MHz */
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
+{
+	uint bw;
+
+	bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+	return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+/* bw in MHz, return the channel count from the center channel to the
+ * the channel at the edge of the band
+ */
+static uint8
+center_chan_to_edge(uint bw)
+{
+	/* edge channels separated by BW - 10MHz on each side
+	 * delta from cf to edge is half of that,
+	 * MHz to channel num conversion is 5MHz/channel
+	 */
+	return (uint8)(((bw - 20) / 2) / 5);
+}
+
+/* return channel number of the low edge of the band
+ * given the center channel and BW
+ */
+static uint8
+channel_low_edge(uint center_ch, uint bw)
+{
+	return (uint8)(center_ch - center_chan_to_edge(bw));
+}
+
+/* return side band number given center channel and control channel
+ * return -1 on error
+ */
+static int
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
+{
+	uint lowest = channel_low_edge(center_ch, bw);
+	uint sb;
+
+	if ((ctl_ch - lowest) % 4) {
+		/* bad ctl channel, not mult 4 */
+		return -1;
+	}
+
+	sb = ((ctl_ch - lowest) / 4);
+
+	/* sb must be a index to a 20MHz channel in range */
+	if (sb >= (bw / 20)) {
+		/* ctl_ch must have been too high for the center_ch */
+		return -1;
+	}
+
+	return sb;
+}
+
+/* return control channel given center channel and side band */
+static uint8
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
+{
+	return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+/* return index of 80MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_80mhz_to_id(uint ch)
+{
+	uint i;
+	for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+		if (ch == wf_5g_80m_chans[i])
+			return i;
+	}
+
+	return -1;
+}
+
+/* wrapper function for wf_chspec_ntoa. In case of an error it puts
+ * the original chanspec in the output buffer, prepended with "invalid".
+ * Can be directly used in print routines as it takes care of null
+ */
+char *
+wf_chspec_ntoa_ex(chanspec_t chspec, char *buf)
+{
+	if (wf_chspec_ntoa(chspec, buf) == NULL)
+		snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec);
+	return buf;
+}
+
+/* given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer a.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL
+ */
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+	const char *band;
+	uint ctl_chan;
+
+	if (wf_chspec_malformed(chspec))
+		return NULL;
+
+	band = "";
+
+	/* check for non-default band spec */
+	if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
+	    (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
+		band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
+
+	/* ctl channel */
+	ctl_chan = wf_chspec_ctlchan(chspec);
+
+	/* bandwidth and ctl sideband */
+	if (CHSPEC_IS20(chspec)) {
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
+	} else if (!CHSPEC_IS8080(chspec)) {
+		const char *bw;
+		const char *sb = "";
+
+		bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+		/* ctl sideband string if needed for 2g 40MHz */
+		if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+		}
+
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
+#else
+		/* ctl sideband string instead of BW for 40MHz */
+		if (CHSPEC_IS40(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
+		} else {
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
+		}
+#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
+
+	} else {
+		/* 80+80 */
+		uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
+		uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
+
+		/* convert to channel number */
+		chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
+		chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
+
+		/* Outputs a max of CHANSPEC_STR_LEN chars including '\0'  */
+		snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
+	}
+
+	return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+	unsigned long val;
+	char *endp = NULL;
+
+	val = strtoul(*p, &endp, 10);
+	/* if endp is the initial pointer value, then a number was not read */
+	if (endp == *p)
+		return 0;
+
+	/* advance the buffer pointer to the end of the integer string */
+	*p = endp;
+	/* return the parsed integer */
+	*num = (unsigned int)val;
+
+	return 1;
+}
+
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+	chanspec_t chspec;
+	uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
+	uint num, ctl_ch;
+	uint ch1, ch2;
+	char c, sb_ul = '\0';
+	int i;
+
+	bw = 20;
+	chspec_sb = 0;
+	chspec_ch = ch1 = ch2 = 0;
+
+	/* parse channel num or band */
+	if (!read_uint(&a, &num))
+		return 0;
+
+	/* if we are looking at a 'g', then the first number was a band */
+	c = tolower((int)a[0]);
+	if (c == 'g') {
+		a ++; /* consume the char */
+
+		/* band must be "2" or "5" */
+		if (num == 2)
+			chspec_band = WL_CHANSPEC_BAND_2G;
+		else if (num == 5)
+			chspec_band = WL_CHANSPEC_BAND_5G;
+		else
+			return 0;
+
+		/* read the channel number */
+		if (!read_uint(&a, &ctl_ch))
+			return 0;
+
+		c = tolower((int)a[0]);
+	}
+	else {
+		/* first number is channel, use default for band */
+		ctl_ch = num;
+		chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
+		               WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+	}
+
+	if (c == '\0') {
+		/* default BW of 20MHz */
+		chspec_bw = WL_CHANSPEC_BW_20;
+		goto done_read;
+	}
+
+	a ++; /* consume the 'u','l', or '/' */
+
+	/* check 'u'/'l' */
+	if (c == 'u' || c == 'l') {
+		sb_ul = c;
+		chspec_bw = WL_CHANSPEC_BW_40;
+		goto done_read;
+	}
+
+	/* next letter must be '/' */
+	if (c != '/')
+		return 0;
+
+	/* read bandwidth */
+	if (!read_uint(&a, &bw))
+		return 0;
+
+	/* convert to chspec value */
+	if (bw == 20) {
+		chspec_bw = WL_CHANSPEC_BW_20;
+	} else if (bw == 40) {
+		chspec_bw = WL_CHANSPEC_BW_40;
+	} else if (bw == 80) {
+		chspec_bw = WL_CHANSPEC_BW_80;
+	} else if (bw == 160) {
+		chspec_bw = WL_CHANSPEC_BW_160;
+	} else {
+		return 0;
+	}
+
+	/* So far we have <band>g<chan>/<bw>
+	 * Can now be followed by u/l if bw = 40,
+	 * or '+80' if bw = 80, to make '80+80' bw.
+	 */
+
+	c = tolower((int)a[0]);
+
+	/* if we have a 2g/40 channel, we should have a l/u spec now */
+	if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+		if (c == 'u' || c == 'l') {
+			a ++; /* consume the u/l char */
+			sb_ul = c;
+			goto done_read;
+		}
+	}
+
+	/* check for 80+80 */
+	if (c == '+') {
+		/* 80+80 */
+		static const char *plus80 = "80/";
+
+		/* must be looking at '+80/'
+		 * check and consume this string.
+		 */
+		chspec_bw = WL_CHANSPEC_BW_8080;
+
+		a ++; /* consume the char '+' */
+
+		/* consume the '80/' string */
+		for (i = 0; i < 3; i++) {
+			if (*a++ != *plus80++) {
+				return 0;
+			}
+		}
+
+		/* read primary 80MHz channel */
+		if (!read_uint(&a, &ch1))
+			return 0;
+
+		/* must followed by '-' */
+		if (a[0] != '-')
+			return 0;
+		a ++; /* consume the char */
+
+		/* read secondary 80MHz channel */
+		if (!read_uint(&a, &ch2))
+			return 0;
+	}
+
+done_read:
+	/* skip trailing white space */
+	while (a[0] == ' ') {
+		a ++;
+	}
+
+	/* must be end of string */
+	if (a[0] != '\0')
+		return 0;
+
+	/* Now have all the chanspec string parts read;
+	 * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2.
+	 * chspec_band and chspec_bw are chanspec values.
+	 * Need to convert ctl_ch, sb_ul, and ch1,ch2 into
+	 * a center channel (or two) and sideband.
+	 */
+
+	/* if a sb u/l string was given, just use that,
+	 * guaranteed to be bw = 40 by sting parse.
+	 */
+	if (sb_ul != '\0') {
+		if (sb_ul == 'l') {
+			chspec_ch = UPPER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+		} else if (sb_ul == 'u') {
+			chspec_ch = LOWER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+		}
+	}
+	/* if the bw is 20, center and sideband are trivial */
+	else if (chspec_bw == WL_CHANSPEC_BW_20) {
+		chspec_ch = ctl_ch;
+		chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
+	}
+	/* if the bw is 40/80/160, not 80+80, a single method
+	 * can be used to to find the center and sideband
+	 */
+	else if (chspec_bw != WL_CHANSPEC_BW_8080) {
+		/* figure out ctl sideband based on ctl channel and bandwidth */
+		const uint8 *center_ch = NULL;
+		int num_ch = 0;
+		int sb = -1;
+
+		if (chspec_bw == WL_CHANSPEC_BW_40) {
+			center_ch = wf_5g_40m_chans;
+			num_ch = WF_NUM_5G_40M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+			center_ch = wf_5g_80m_chans;
+			num_ch = WF_NUM_5G_80M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+			center_ch = wf_5g_160m_chans;
+			num_ch = WF_NUM_5G_160M_CHANS;
+		} else {
+			return 0;
+		}
+
+		for (i = 0; i < num_ch; i ++) {
+			sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+			if (sb >= 0) {
+				chspec_ch = center_ch[i];
+				chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+				break;
+			}
+		}
+
+		/* check for no matching sb/center */
+		if (sb < 0) {
+			return 0;
+		}
+	}
+	/* Otherwise, bw is 80+80. Figure out channel pair and sb */
+	else {
+		int ch1_id = 0, ch2_id = 0;
+		int sb;
+
+		/* look up the channel ID for the specified channel numbers */
+		ch1_id = channel_80mhz_to_id(ch1);
+		ch2_id = channel_80mhz_to_id(ch2);
+
+		/* validate channels */
+		if (ch1_id < 0 || ch2_id < 0)
+			return 0;
+
+		/* combine 2 channel IDs in channel field of chspec */
+		chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+		             ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+
+		/* figure out primary 20 MHz sideband */
+
+		/* is the primary channel contained in the 1st 80MHz channel? */
+		sb = channel_to_sb(ch1, ctl_ch, bw);
+		if (sb < 0) {
+			/* no match for primary channel 'ctl_ch' in segment0 80MHz channel */
+			return 0;
+		}
+
+		chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+	}
+
+	chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+
+	if (wf_chspec_malformed(chspec))
+		return 0;
+
+	return chspec;
+}
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: TRUE is the chanspec is malformed, false if it looks good.
+ */
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	/* must be 2G or 5G band */
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth */
+		if (chspec_bw != WL_CHANSPEC_BW_20 &&
+		    chspec_bw != WL_CHANSPEC_BW_40) {
+			return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint ch1_id, ch2_id;
+
+			/* channel IDs in 80+80 must be in range */
+			ch1_id = CHSPEC_CHAN1(chanspec);
+			ch2_id = CHSPEC_CHAN2(chanspec);
+			if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
+				return TRUE;
+
+		} else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
+		           chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
+
+			if (chspec_ch > MAXCHANNEL) {
+				return TRUE;
+			}
+		} else {
+			/* invalid bandwidth */
+			return TRUE;
+		}
+	} else {
+		/* must be 2G or 5G band */
+		return TRUE;
+	}
+
+	/* side band needs to be consistent with bandwidth */
+	if (chspec_bw == WL_CHANSPEC_BW_20) {
+		if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_80 ||
+	           chspec_bw == WL_CHANSPEC_BW_8080) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
+			return TRUE;
+	}
+	else if (chspec_bw == WL_CHANSPEC_BW_160) {
+		ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU);
+	}
+	return FALSE;
+}
+
+/*
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ * RETURNS: TRUE if the chanspec is a valid 802.11 channel
+ */
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	if (wf_chspec_malformed(chanspec))
+		return FALSE;
+
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth and channel range */
+		if (chspec_bw == WL_CHANSPEC_BW_20) {
+			if (chspec_ch >= 1 && chspec_ch <= 14)
+				return TRUE;
+		} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+			if (chspec_ch >= 3 && chspec_ch <= 11)
+				return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint16 ch1, ch2;
+
+			ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
+			ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
+
+			/* the two channels must be separated by more than 80MHz by VHT req */
+			if ((ch2 > ch1 + CH_80MHZ_APART) ||
+			    (ch1 > ch2 + CH_80MHZ_APART))
+				return TRUE;
+		} else {
+			const uint8 *center_ch;
+			uint num_ch, i;
+
+			if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+				center_ch = wf_5g_40m_chans;
+				num_ch = WF_NUM_5G_40M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+				center_ch = wf_5g_80m_chans;
+				num_ch = WF_NUM_5G_80M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+				center_ch = wf_5g_160m_chans;
+				num_ch = WF_NUM_5G_160M_CHANS;
+			} else {
+				/* invalid bandwidth */
+				return FALSE;
+			}
+
+			/* check for a valid center channel */
+			if (chspec_bw == WL_CHANSPEC_BW_20) {
+				/* We don't have an array of legal 20MHz 5G channels, but they are
+				 * each side of the legal 40MHz channels.  Check the chanspec
+				 * channel against either side of the 40MHz channels.
+				 */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
+					    chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
+						break; /* match found */
+				}
+
+				if (i == num_ch) {
+					/* check for channel 165 which is not the side band
+					 * of 40MHz 5G channel
+					 */
+					if (chspec_ch == 165)
+						i = 0;
+
+					/* check for legacy JP channels on failure */
+					if (chspec_ch == 34 || chspec_ch == 38 ||
+					    chspec_ch == 42 || chspec_ch == 46)
+						i = 0;
+				}
+			} else {
+				/* check the chanspec channel to each legal channel */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == center_ch[i])
+						break; /* match found */
+				}
+			}
+
+			if (i < num_ch) {
+				/* match found */
+				return TRUE;
+			}
+		}
+	}
+
+	return FALSE;
+}
+
+/*
+ * This function returns the channel number that control traffic is being sent on, for 20MHz
+ * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ
+ * sideband depending on the chanspec selected
+ */
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+	uint center_chan;
+	uint bw_mhz;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (CHSPEC_IS20(chspec)) {
+		return CHSPEC_CHANNEL(chspec);
+	} else {
+		sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		if (CHSPEC_IS8080(chspec)) {
+			/* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband
+			 * (LL, LU, UL, LU) for the 80 MHz frequency segment 0.
+			 */
+			uint chan_id = CHSPEC_CHAN1(chspec);
+
+			bw_mhz = 80;
+
+			/* convert from channel index to channel number */
+			center_chan = wf_5g_80m_chans[chan_id];
+		}
+		else {
+			bw_mhz = bw_chspec_to_mhz(chspec);
+			center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+		}
+
+		return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
+	}
+}
+
+/* given a chanspec, return the bandwidth string */
+char *
+wf_chspec_to_bw_str(chanspec_t chspec)
+{
+	return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
+}
+
+/*
+ * This function returns the chanspec of the control channel of a given chanspec
+ */
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+	chanspec_t ctl_chspec = chspec;
+	uint8 ctl_chan;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (!CHSPEC_IS20(chspec)) {
+		ctl_chan = wf_chspec_ctlchan(chspec);
+		ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+		ctl_chspec |= CHSPEC_BAND(chspec);
+	}
+	return ctl_chspec;
+}
+
+/* return chanspec given control channel and bandwidth
+ * return 0 on error
+ */
+uint16
+wf_channel2chspec(uint ctl_ch, uint bw)
+{
+	uint16 chspec;
+	const uint8 *center_ch = NULL;
+	int num_ch = 0;
+	int sb = -1;
+	int i = 0;
+
+	chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+
+	chspec |= bw;
+
+	if (bw == WL_CHANSPEC_BW_40) {
+		center_ch = wf_5g_40m_chans;
+		num_ch = WF_NUM_5G_40M_CHANS;
+		bw = 40;
+	} else if (bw == WL_CHANSPEC_BW_80) {
+		center_ch = wf_5g_80m_chans;
+		num_ch = WF_NUM_5G_80M_CHANS;
+		bw = 80;
+	} else if (bw == WL_CHANSPEC_BW_160) {
+		center_ch = wf_5g_160m_chans;
+		num_ch = WF_NUM_5G_160M_CHANS;
+		bw = 160;
+	} else if (bw == WL_CHANSPEC_BW_20) {
+		chspec |= ctl_ch;
+		return chspec;
+	} else {
+		return 0;
+	}
+
+	for (i = 0; i < num_ch; i ++) {
+		sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+		if (sb >= 0) {
+			chspec |= center_ch[i];
+			chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT);
+			break;
+		}
+	}
+
+	/* check for no matching sb/center */
+	if (sb < 0) {
+		return 0;
+	}
+
+	return chspec;
+}
+
+/*
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec40 = chspec;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */
+	if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) {
+		chspec = wf_chspec_primary80_chspec(chspec);
+	}
+
+	/* determine primary 40 MHz sub-channel of an 80 MHz chanspec */
+	if (CHSPEC_IS80(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < WL_CHANSPEC_CTL_SB_UL) {
+			/* Primary 40MHz is on lower side */
+			center_chan -= CH_20MHZ_APART;
+			/* sideband bits are the same for LL/LU and L/U */
+		} else {
+			/* Primary 40MHz is on upper side */
+			center_chan += CH_20MHZ_APART;
+			/* sideband bits need to be adjusted by UL offset */
+			sb -= WL_CHANSPEC_CTL_SB_UL;
+		}
+
+		/* Create primary 40MHz chanspec */
+		chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+		            sb | center_chan);
+	}
+
+	return chspec40;
+}
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+	int ch = -1;
+	uint base;
+	int offset;
+
+	/* take the default channel start frequency */
+	if (start_factor == 0) {
+		if (freq >= 2400 && freq <= 2500)
+			start_factor = WF_CHAN_FACTOR_2_4_G;
+		else if (freq >= 5000 && freq <= 6000)
+			start_factor = WF_CHAN_FACTOR_5_G;
+	}
+
+	if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+		return 14;
+
+	base = start_factor / 2;
+
+	/* check that the frequency is in 1GHz range of the base */
+	if ((freq < base) || (freq > base + 1000))
+		return -1;
+
+	offset = freq - base;
+	ch = offset / 5;
+
+	/* check that frequency is a 5MHz multiple from the base */
+	if (offset != (ch * 5))
+		return -1;
+
+	/* restricted channel range check for 2.4G */
+	if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+		return -1;
+
+	return ch;
+}
+
+/*
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G
+ * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+	int freq;
+
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+	    (ch > 200))
+		freq = -1;
+	else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+		freq = 2484;
+	else
+		freq = ch * 5 + start_factor / 2;
+
+	return freq;
+}
+
+static const uint16 sidebands[] = {
+	WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU,
+	WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU,
+	WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU,
+	WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU
+};
+
+/*
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ *	primary_channel - primary 20Mhz channel
+ *	center_channel   - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_chspec_80(uint8 center_channel, uint8 primary_channel)
+{
+
+	chanspec_t chanspec = INVCHANSPEC;
+	chanspec_t chanspec_cur;
+	uint i;
+
+	for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) {
+		chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]);
+		if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) {
+			chanspec = chanspec_cur;
+			break;
+		}
+	}
+	/* If the loop ended early, we are good, otherwise we did not
+	* find a 80MHz chanspec with the given center_channel that had a primary channel
+	*matching the given primary_channel.
+	*/
+	return chanspec;
+}
+
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 MHz channel
+ *    chan0 - center channel number of one frequency segment
+ *    chan1 - center channel number of the other frequency segment
+ *
+ * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+chanspec_t
+wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1)
+{
+	int sb = 0;
+	uint16 chanspec = 0;
+	int chan0_id = 0, chan1_id = 0;
+	int seg0, seg1;
+
+	chan0_id = channel_80mhz_to_id(chan0);
+	chan1_id = channel_80mhz_to_id(chan1);
+
+	/* make sure the channel numbers were valid */
+	if (chan0_id == -1 || chan1_id == -1)
+		return INVCHANSPEC;
+
+	/* does the primary channel fit with the 1st 80MHz channel ? */
+	sb = channel_to_sb(chan0, primary_20mhz, 80);
+	if (sb >= 0) {
+		/* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+		seg0 = chan0_id;
+		seg1 = chan1_id;
+	} else {
+		/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+		sb = channel_to_sb(chan1, primary_20mhz, 80);
+		if (sb < 0) {
+			/* no match for ctl_ch to either 80MHz center channel */
+			return INVCHANSPEC;
+		}
+		/* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+		seg0 = chan1_id;
+		seg1 = chan0_id;
+	}
+
+	chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+	            (seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
+	            (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+	            WL_CHANSPEC_BW_8080 |
+	            WL_CHANSPEC_BAND_5G);
+
+	return chanspec;
+}
+
+/*
+ * This function returns the 80Mhz channel for the given id.
+ */
+static uint8
+wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id)
+{
+	if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS)
+		return wf_5g_80m_chans[chan_80Mhz_id];
+
+	return 0;
+}
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+
+uint8
+wf_chspec_primary80_channel(chanspec_t chanspec)
+{
+	uint8 primary80_chan;
+
+	if (CHSPEC_IS80(chanspec))	{
+		primary80_chan = CHSPEC_CHANNEL(chanspec);
+	}
+	else if (CHSPEC_IS8080(chanspec)) {
+		/* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+		primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+		uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		/* based on the sb value primary 80 channel can be retrieved
+		 * if sb is in range 0 to 3 the lower band is the 80Mhz primary band
+		 */
+		if (sb < 4) {
+			primary80_chan = center_chan - CH_40MHZ_APART;
+		}
+		/* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */
+		else
+		{
+			primary80_chan = center_chan + CH_40MHZ_APART;
+		}
+	}
+	else {
+		/* for 20 and 40 Mhz */
+		primary80_chan = -1;
+	}
+	return primary80_chan;
+}
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40/80 Mhz chanspec
+ */
+uint8
+wf_chspec_secondary80_channel(chanspec_t chanspec)
+{
+	uint8 secondary80_chan;
+
+	if (CHSPEC_IS8080(chanspec)) {
+		secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+		uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		/* based on the sb value  secondary 80 channel can be retrieved
+		 * if sb is in range 0 to 3 upper band is the secondary 80Mhz band
+		 */
+		if (sb < 4) {
+			secondary80_chan = center_chan + CH_40MHZ_APART;
+		}
+		/* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */
+		else
+		{
+			secondary80_chan = center_chan - CH_40MHZ_APART;
+		}
+	}
+	else {
+		/* for 20, 40, and 80 Mhz */
+		secondary80_chan = -1;
+	}
+	return secondary80_chan;
+}
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ *
+ *    chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived
+ *
+ *  returns the input chanspec in case the provided chanspec is an 80 MHz chanspec
+ *  returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec
+ */
+chanspec_t
+wf_chspec_primary80_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec80;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+	if (CHSPEC_IS80(chspec)) {
+		chspec80 = chspec;
+	}
+	else if (CHSPEC_IS8080(chspec)) {
+
+		/* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+		center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
+
+		sb = CHSPEC_CTL_SB(chspec);
+
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else if (CHSPEC_IS160(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < WL_CHANSPEC_CTL_SB_ULL) {
+			/* Primary 80MHz is on lower side */
+			center_chan -= CH_40MHZ_APART;
+		}
+		else {
+			/* Primary 80MHz is on upper side */
+			center_chan += CH_40MHZ_APART;
+			sb -= WL_CHANSPEC_CTL_SB_ULL;
+		}
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else {
+		chspec80 = INVCHANSPEC;
+	}
+
+	return chspec80;
+}
+
+#ifdef WL11AC_80P80
+uint8
+wf_chspec_channel(chanspec_t chspec)
+{
+	if (CHSPEC_IS8080(chspec)) {
+		return wf_chspec_primary80_channel(chspec);
+	}
+	else {
+		return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK));
+	}
+}
+#endif /* WL11AC_80P80 */
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
new file mode 100644
index 0000000..b3a446e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
@@ -0,0 +1,548 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_channels.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef	_bcmwifi_channels_h_
+#define	_bcmwifi_channels_h_
+
+
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
+typedef uint16 chanspec_t;
+
+/* channel defines */
+#define CH_UPPER_SB			0x01
+#define CH_LOWER_SB			0x02
+#define CH_EWA_VALID			0x04
+#define CH_80MHZ_APART			16
+#define CH_40MHZ_APART			8
+#define CH_20MHZ_APART			4
+#define CH_10MHZ_APART			2
+#define CH_5MHZ_APART			1	/* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL		14	/* Max channel in 2G band */
+#define MAXCHANNEL		224	/* max # supported channels. The max channel no is above,
+					 * this is that + 1 rounded up to a multiple of NBBY (8).
+					 * DO NOT MAKE it > 255: channels are uint8's all over
+					 */
+#define MAXCHANNEL_NUM	(MAXCHANNEL - 1)	/* max channel number */
+
+/* make sure channel num is within valid range */
+#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM)
+
+#define CHSPEC_CTLOVLP(sp1, sp2, sep)	(ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < \
+				  (sep))
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK		0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT		0
+#define WL_CHANSPEC_CHAN1_MASK		0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT		0
+#define WL_CHANSPEC_CHAN2_MASK		0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT		4
+
+#define WL_CHANSPEC_CTL_SB_MASK		0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT	8
+#define WL_CHANSPEC_CTL_SB_LLL		0x0000
+#define WL_CHANSPEC_CTL_SB_LLU		0x0100
+#define WL_CHANSPEC_CTL_SB_LUL		0x0200
+#define WL_CHANSPEC_CTL_SB_LUU		0x0300
+#define WL_CHANSPEC_CTL_SB_ULL		0x0400
+#define WL_CHANSPEC_CTL_SB_ULU		0x0500
+#define WL_CHANSPEC_CTL_SB_UUL		0x0600
+#define WL_CHANSPEC_CTL_SB_UUU		0x0700
+#define WL_CHANSPEC_CTL_SB_LL		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL		WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU		WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER	WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER	WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE		WL_CHANSPEC_CTL_SB_LLL
+
+#define WL_CHANSPEC_BW_MASK		0x3800
+#define WL_CHANSPEC_BW_SHIFT		11
+#define WL_CHANSPEC_BW_5		0x0000
+#define WL_CHANSPEC_BW_10		0x0800
+#define WL_CHANSPEC_BW_20		0x1000
+#define WL_CHANSPEC_BW_40		0x1800
+#define WL_CHANSPEC_BW_80		0x2000
+#define WL_CHANSPEC_BW_160		0x2800
+#define WL_CHANSPEC_BW_8080		0x3000
+
+#define WL_CHANSPEC_BAND_MASK		0xc000
+#define WL_CHANSPEC_BAND_SHIFT		14
+#define WL_CHANSPEC_BAND_2G		0x0000
+#define WL_CHANSPEC_BAND_3G		0x4000
+#define WL_CHANSPEC_BAND_4G		0x8000
+#define WL_CHANSPEC_BAND_5G		0xc000
+#define INVCHANSPEC			255
+
+/* channel defines */
+#define LOWER_20_SB(channel)		(((channel) > CH_10MHZ_APART) ? \
+					((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel)		(((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+					((channel) + CH_10MHZ_APART) : 0)
+
+#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
+#define UU_20_SB(channel) 	(((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+				((channel) + 3 * CH_10MHZ_APART) : 0)
+#define LU_20_SB(channel) LOWER_20_SB(channel)
+#define UL_20_SB(channel) UPPER_20_SB(channel)
+
+#define LOWER_40_SB(channel)		((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel)		((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec)	(CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel)		(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+					(((channel) <= CH_MAX_2G_CHANNEL) ? \
+					WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel)	(((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+					((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+					((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+					WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+
+/* simple MACROs to get different fields of chanspec */
+#ifdef WL11AC_80P80
+#define CHSPEC_CHANNEL(chspec)	wf_chspec_channel(chspec)
+#else
+#define CHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#endif
+#define CHSPEC_CHAN1(chspec)	((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
+#define CHSPEC_CHAN2(chspec)	((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
+#define CHSPEC_BAND(chspec)		((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec)	((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec)		((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec)	0
+#define CHSPEC_IS20(chspec)	1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	0
+#endif
+
+#else /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS10(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#endif /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS5G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
+#define CHANSPEC_STR_LEN    20
+
+
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+	CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made
+* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80,
+* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080).
+*
+* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
+* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
+*/
+#define CHSPEC_BW_GE(chspec, bw) \
+	((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) >= bw))
+
+#define CHSPEC_BW_LE(chspec, bw) \
+	((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) <= bw))
+
+#define CHSPEC_BW_GT(chspec, bw) \
+	(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) > bw))
+
+#define CHSPEC_BW_LT(chspec, bw) \
+	(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) < bw))
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
+#define WL_LCHANSPEC_CHAN_MASK		0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT		     0
+
+#define WL_LCHANSPEC_CTL_SB_MASK	0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT	     8
+#define WL_LCHANSPEC_CTL_SB_LOWER	0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER	0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE	0x0300
+
+#define WL_LCHANSPEC_BW_MASK		0x0C00
+#define WL_LCHANSPEC_BW_SHIFT		    10
+#define WL_LCHANSPEC_BW_10		0x0400
+#define WL_LCHANSPEC_BW_20		0x0800
+#define WL_LCHANSPEC_BW_40		0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK		0xf000
+#define WL_LCHANSPEC_BAND_SHIFT		    12
+#define WL_LCHANSPEC_BAND_5G		0x1000
+#define WL_LCHANSPEC_BAND_2G		0x2000
+
+#define LCHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec)	((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec)	((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec)	((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
+#define LCHSPEC_CREATE(chan, band, bw, sb)  ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#define CH20MHZ_LCHSPEC(channel) \
+	(chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \
+	WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+	WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
+
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G		4814	/* 2.4 GHz band, 2407 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G		10000	/* 5   GHz band, 5000 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G		8000	/* 4.9 GHz band for Japan */
+
+#define WLC_2G_25MHZ_OFFSET		5	/* 2.4GHz band channel offset */
+
+/**
+ *  No of sub-band vlaue of the specified Mhz chanspec
+ */
+#define WF_NUM_SIDEBANDS_40MHZ   2
+#define WF_NUM_SIDEBANDS_80MHZ   4
+#define WF_NUM_SIDEBANDS_8080MHZ 4
+#define WF_NUM_SIDEBANDS_160MHZ  8
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *		Original chanspec in case of error
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf);
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *		NULL in case of error
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param	a     pointer to input string
+ *
+ * @return	>= 0 if successful or 0 otherwise
+ */
+extern chanspec_t wf_chspec_aton(const char *a);
+
+/**
+ * Verify the chanspec fields are valid.
+ *
+ * Verify the chanspec is using a legal set field values, i.e. that the chanspec
+ * specified a band, bw, ctl_sb and channel and that the combination could be
+ * legal given some set of circumstances.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is a valid 802.11 channel
+ */
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+/**
+ * Return the primary (control) channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+/**
+ * Return the bandwidth string.
+ *
+ * This function returns the bandwidth string for the passed chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the bandwidth string
+ */
+extern char * wf_chspec_to_bw_str(chanspec_t chspec);
+
+/**
+ * Return the primary (control) chanspec.
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHZ channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+/**
+ * Return a channel number corresponding to a frequency.
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	freq          frequency in MHz
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a channel number
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	channel       input channel number
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+/**
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ *	primary_channel - primary 20Mhz channel
+ *	center_channel   - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel);
+
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param	ctl_ch		channel
+ * @param	bw	        bandwidth
+ *
+ * @return	> 0 if successful or 0 otherwise
+ *
+ */
+extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
+
+extern uint wf_channel2freq(uint channel);
+extern uint wf_freq2channel(uint freq);
+
+/*
+ * Returns the 80+80 MHz chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 MHz channel
+ *    chan0_80MHz - center channel number of one frequency segment
+ *    chan1_80MHz - center channel number of the other frequency segment
+ *
+ * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+	uint8 chan0_80Mhz, uint8 chan1_80Mhz);
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+#ifdef WL11AC_80P80
+/*
+ * This function returns the centre chanel for the given chanspec.
+ * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel
+ */
+extern uint8 wf_chspec_channel(chanspec_t chspec);
+#endif
+#endif	/* _bcmwifi_channels_h_ */
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
new file mode 100644
index 0000000..f8983a1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
@@ -0,0 +1,470 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_rates.h 5187 2012-06-29 06:17:50Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#define WL_RATESET_SZ_DSSS		4
+#define WL_RATESET_SZ_OFDM		8
+#define WL_RATESET_SZ_VHT_MCS	10
+
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS	WL_RATESET_SZ_VHT_MCS
+#else
+#define WL_RATESET_SZ_HT_MCS	8
+#endif
+
+#define WL_RATESET_SZ_HT_IOCTL	8	/* MAC histogram, compatibility with wl utility */
+
+#define WL_TX_CHAINS_MAX	3
+
+#define WL_RATE_DISABLED		(-128) /* Power value corresponding to unsupported rate */
+
+/* Transmit channel bandwidths */
+typedef enum wl_tx_bw {
+	WL_TX_BW_20,
+	WL_TX_BW_40,
+	WL_TX_BW_80,
+	WL_TX_BW_20IN40,
+	WL_TX_BW_20IN80,
+	WL_TX_BW_40IN80,
+	WL_TX_BW_160,
+	WL_TX_BW_20IN160,
+	WL_TX_BW_40IN160,
+	WL_TX_BW_80IN160,
+	WL_TX_BW_ALL,
+	WL_TX_BW_8080,
+	WL_TX_BW_8080CHAN2,
+	WL_TX_BW_20IN8080,
+	WL_TX_BW_40IN8080,
+	WL_TX_BW_80IN8080
+} wl_tx_bw_t;
+
+
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
+typedef enum wl_tx_mode {
+	WL_TX_MODE_NONE,
+	WL_TX_MODE_STBC,
+	WL_TX_MODE_CDD,
+	WL_TX_MODE_TXBF,
+	WL_NUM_TX_MODES
+} wl_tx_mode_t;
+
+
+/* Number of transmit chains */
+typedef enum wl_tx_chains {
+	WL_TX_CHAINS_1 = 1,
+	WL_TX_CHAINS_2,
+	WL_TX_CHAINS_3
+} wl_tx_chains_t;
+
+
+/* Number of transmit streams */
+typedef enum wl_tx_nss {
+	WL_TX_NSS_1 = 1,
+	WL_TX_NSS_2,
+	WL_TX_NSS_3
+} wl_tx_nss_t;
+
+
+typedef enum clm_rates {
+	/************
+	* 1 chain  *
+	************
+	*/
+
+	/* 1 Stream */
+	WL_RATE_1X1_DSSS_1         = 0,
+	WL_RATE_1X1_DSSS_2         = 1,
+	WL_RATE_1X1_DSSS_5_5       = 2,
+	WL_RATE_1X1_DSSS_11        = 3,
+
+	WL_RATE_1X1_OFDM_6         = 4,
+	WL_RATE_1X1_OFDM_9         = 5,
+	WL_RATE_1X1_OFDM_12        = 6,
+	WL_RATE_1X1_OFDM_18        = 7,
+	WL_RATE_1X1_OFDM_24        = 8,
+	WL_RATE_1X1_OFDM_36        = 9,
+	WL_RATE_1X1_OFDM_48        = 10,
+	WL_RATE_1X1_OFDM_54        = 11,
+
+	WL_RATE_1X1_MCS0           = 12,
+	WL_RATE_1X1_MCS1           = 13,
+	WL_RATE_1X1_MCS2           = 14,
+	WL_RATE_1X1_MCS3           = 15,
+	WL_RATE_1X1_MCS4           = 16,
+	WL_RATE_1X1_MCS5           = 17,
+	WL_RATE_1X1_MCS6           = 18,
+	WL_RATE_1X1_MCS7           = 19,
+
+	WL_RATE_1X1_VHT0SS1        = 12,
+	WL_RATE_1X1_VHT1SS1        = 13,
+	WL_RATE_1X1_VHT2SS1        = 14,
+	WL_RATE_1X1_VHT3SS1        = 15,
+	WL_RATE_1X1_VHT4SS1        = 16,
+	WL_RATE_1X1_VHT5SS1        = 17,
+	WL_RATE_1X1_VHT6SS1        = 18,
+	WL_RATE_1X1_VHT7SS1        = 19,
+	WL_RATE_1X1_VHT8SS1        = 20,
+	WL_RATE_1X1_VHT9SS1        = 21,
+
+
+	/************
+	* 2 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 1 */
+	WL_RATE_1X2_DSSS_1         = 22,
+	WL_RATE_1X2_DSSS_2         = 23,
+	WL_RATE_1X2_DSSS_5_5       = 24,
+	WL_RATE_1X2_DSSS_11        = 25,
+
+	WL_RATE_1X2_CDD_OFDM_6     = 26,
+	WL_RATE_1X2_CDD_OFDM_9     = 27,
+	WL_RATE_1X2_CDD_OFDM_12    = 28,
+	WL_RATE_1X2_CDD_OFDM_18    = 29,
+	WL_RATE_1X2_CDD_OFDM_24    = 30,
+	WL_RATE_1X2_CDD_OFDM_36    = 31,
+	WL_RATE_1X2_CDD_OFDM_48    = 32,
+	WL_RATE_1X2_CDD_OFDM_54    = 33,
+
+	WL_RATE_1X2_CDD_MCS0       = 34,
+	WL_RATE_1X2_CDD_MCS1       = 35,
+	WL_RATE_1X2_CDD_MCS2       = 36,
+	WL_RATE_1X2_CDD_MCS3       = 37,
+	WL_RATE_1X2_CDD_MCS4       = 38,
+	WL_RATE_1X2_CDD_MCS5       = 39,
+	WL_RATE_1X2_CDD_MCS6       = 40,
+	WL_RATE_1X2_CDD_MCS7       = 41,
+
+	WL_RATE_1X2_VHT0SS1        = 34,
+	WL_RATE_1X2_VHT1SS1        = 35,
+	WL_RATE_1X2_VHT2SS1        = 36,
+	WL_RATE_1X2_VHT3SS1        = 37,
+	WL_RATE_1X2_VHT4SS1        = 38,
+	WL_RATE_1X2_VHT5SS1        = 39,
+	WL_RATE_1X2_VHT6SS1        = 40,
+	WL_RATE_1X2_VHT7SS1        = 41,
+	WL_RATE_1X2_VHT8SS1        = 42,
+	WL_RATE_1X2_VHT9SS1        = 43,
+
+	/* 2 Streams */
+	WL_RATE_2X2_STBC_MCS0      = 44,
+	WL_RATE_2X2_STBC_MCS1      = 45,
+	WL_RATE_2X2_STBC_MCS2      = 46,
+	WL_RATE_2X2_STBC_MCS3      = 47,
+	WL_RATE_2X2_STBC_MCS4      = 48,
+	WL_RATE_2X2_STBC_MCS5      = 49,
+	WL_RATE_2X2_STBC_MCS6      = 50,
+	WL_RATE_2X2_STBC_MCS7      = 51,
+
+	WL_RATE_2X2_STBC_VHT0SS1   = 44,
+	WL_RATE_2X2_STBC_VHT1SS1   = 45,
+	WL_RATE_2X2_STBC_VHT2SS1   = 46,
+	WL_RATE_2X2_STBC_VHT3SS1   = 47,
+	WL_RATE_2X2_STBC_VHT4SS1   = 48,
+	WL_RATE_2X2_STBC_VHT5SS1   = 49,
+	WL_RATE_2X2_STBC_VHT6SS1   = 50,
+	WL_RATE_2X2_STBC_VHT7SS1   = 51,
+	WL_RATE_2X2_STBC_VHT8SS1   = 52,
+	WL_RATE_2X2_STBC_VHT9SS1   = 53,
+
+	WL_RATE_2X2_SDM_MCS8       = 54,
+	WL_RATE_2X2_SDM_MCS9       = 55,
+	WL_RATE_2X2_SDM_MCS10      = 56,
+	WL_RATE_2X2_SDM_MCS11      = 57,
+	WL_RATE_2X2_SDM_MCS12      = 58,
+	WL_RATE_2X2_SDM_MCS13      = 59,
+	WL_RATE_2X2_SDM_MCS14      = 60,
+	WL_RATE_2X2_SDM_MCS15      = 61,
+
+	WL_RATE_2X2_VHT0SS2        = 54,
+	WL_RATE_2X2_VHT1SS2        = 55,
+	WL_RATE_2X2_VHT2SS2        = 56,
+	WL_RATE_2X2_VHT3SS2        = 57,
+	WL_RATE_2X2_VHT4SS2        = 58,
+	WL_RATE_2X2_VHT5SS2        = 59,
+	WL_RATE_2X2_VHT6SS2        = 60,
+	WL_RATE_2X2_VHT7SS2        = 61,
+	WL_RATE_2X2_VHT8SS2        = 62,
+	WL_RATE_2X2_VHT9SS2        = 63,
+
+	/************
+	* 3 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 2 */
+	WL_RATE_1X3_DSSS_1         = 64,
+	WL_RATE_1X3_DSSS_2         = 65,
+	WL_RATE_1X3_DSSS_5_5       = 66,
+	WL_RATE_1X3_DSSS_11        = 67,
+
+	WL_RATE_1X3_CDD_OFDM_6     = 68,
+	WL_RATE_1X3_CDD_OFDM_9     = 69,
+	WL_RATE_1X3_CDD_OFDM_12    = 70,
+	WL_RATE_1X3_CDD_OFDM_18    = 71,
+	WL_RATE_1X3_CDD_OFDM_24    = 72,
+	WL_RATE_1X3_CDD_OFDM_36    = 73,
+	WL_RATE_1X3_CDD_OFDM_48    = 74,
+	WL_RATE_1X3_CDD_OFDM_54    = 75,
+
+	WL_RATE_1X3_CDD_MCS0       = 76,
+	WL_RATE_1X3_CDD_MCS1       = 77,
+	WL_RATE_1X3_CDD_MCS2       = 78,
+	WL_RATE_1X3_CDD_MCS3       = 79,
+	WL_RATE_1X3_CDD_MCS4       = 80,
+	WL_RATE_1X3_CDD_MCS5       = 81,
+	WL_RATE_1X3_CDD_MCS6       = 82,
+	WL_RATE_1X3_CDD_MCS7       = 83,
+
+	WL_RATE_1X3_VHT0SS1        = 76,
+	WL_RATE_1X3_VHT1SS1        = 77,
+	WL_RATE_1X3_VHT2SS1        = 78,
+	WL_RATE_1X3_VHT3SS1        = 79,
+	WL_RATE_1X3_VHT4SS1        = 80,
+	WL_RATE_1X3_VHT5SS1        = 81,
+	WL_RATE_1X3_VHT6SS1        = 82,
+	WL_RATE_1X3_VHT7SS1        = 83,
+	WL_RATE_1X3_VHT8SS1        = 84,
+	WL_RATE_1X3_VHT9SS1        = 85,
+
+	/* 2 Streams expanded + 1 */
+	WL_RATE_2X3_STBC_MCS0      = 86,
+	WL_RATE_2X3_STBC_MCS1      = 87,
+	WL_RATE_2X3_STBC_MCS2      = 88,
+	WL_RATE_2X3_STBC_MCS3      = 89,
+	WL_RATE_2X3_STBC_MCS4      = 90,
+	WL_RATE_2X3_STBC_MCS5      = 91,
+	WL_RATE_2X3_STBC_MCS6      = 92,
+	WL_RATE_2X3_STBC_MCS7      = 93,
+
+	WL_RATE_2X3_STBC_VHT0SS1   = 86,
+	WL_RATE_2X3_STBC_VHT1SS1   = 87,
+	WL_RATE_2X3_STBC_VHT2SS1   = 88,
+	WL_RATE_2X3_STBC_VHT3SS1   = 89,
+	WL_RATE_2X3_STBC_VHT4SS1   = 90,
+	WL_RATE_2X3_STBC_VHT5SS1   = 91,
+	WL_RATE_2X3_STBC_VHT6SS1   = 92,
+	WL_RATE_2X3_STBC_VHT7SS1   = 93,
+	WL_RATE_2X3_STBC_VHT8SS1   = 94,
+	WL_RATE_2X3_STBC_VHT9SS1   = 95,
+
+	WL_RATE_2X3_SDM_MCS8       = 96,
+	WL_RATE_2X3_SDM_MCS9       = 97,
+	WL_RATE_2X3_SDM_MCS10      = 98,
+	WL_RATE_2X3_SDM_MCS11      = 99,
+	WL_RATE_2X3_SDM_MCS12      = 100,
+	WL_RATE_2X3_SDM_MCS13      = 101,
+	WL_RATE_2X3_SDM_MCS14      = 102,
+	WL_RATE_2X3_SDM_MCS15      = 103,
+
+	WL_RATE_2X3_VHT0SS2        = 96,
+	WL_RATE_2X3_VHT1SS2        = 97,
+	WL_RATE_2X3_VHT2SS2        = 98,
+	WL_RATE_2X3_VHT3SS2        = 99,
+	WL_RATE_2X3_VHT4SS2        = 100,
+	WL_RATE_2X3_VHT5SS2        = 101,
+	WL_RATE_2X3_VHT6SS2        = 102,
+	WL_RATE_2X3_VHT7SS2        = 103,
+	WL_RATE_2X3_VHT8SS2        = 104,
+	WL_RATE_2X3_VHT9SS2        = 105,
+
+	/* 3 Streams */
+	WL_RATE_3X3_SDM_MCS16      = 106,
+	WL_RATE_3X3_SDM_MCS17      = 107,
+	WL_RATE_3X3_SDM_MCS18      = 108,
+	WL_RATE_3X3_SDM_MCS19      = 109,
+	WL_RATE_3X3_SDM_MCS20      = 110,
+	WL_RATE_3X3_SDM_MCS21      = 111,
+	WL_RATE_3X3_SDM_MCS22      = 112,
+	WL_RATE_3X3_SDM_MCS23      = 113,
+
+	WL_RATE_3X3_VHT0SS3        = 106,
+	WL_RATE_3X3_VHT1SS3        = 107,
+	WL_RATE_3X3_VHT2SS3        = 108,
+	WL_RATE_3X3_VHT3SS3        = 109,
+	WL_RATE_3X3_VHT4SS3        = 110,
+	WL_RATE_3X3_VHT5SS3        = 111,
+	WL_RATE_3X3_VHT6SS3        = 112,
+	WL_RATE_3X3_VHT7SS3        = 113,
+	WL_RATE_3X3_VHT8SS3        = 114,
+	WL_RATE_3X3_VHT9SS3        = 115,
+
+
+	/****************************
+	 * TX Beamforming, 2 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 1 */
+
+	WL_RATE_1X2_TXBF_OFDM_6    = 116,
+	WL_RATE_1X2_TXBF_OFDM_9    = 117,
+	WL_RATE_1X2_TXBF_OFDM_12   = 118,
+	WL_RATE_1X2_TXBF_OFDM_18   = 119,
+	WL_RATE_1X2_TXBF_OFDM_24   = 120,
+	WL_RATE_1X2_TXBF_OFDM_36   = 121,
+	WL_RATE_1X2_TXBF_OFDM_48   = 122,
+	WL_RATE_1X2_TXBF_OFDM_54   = 123,
+
+	WL_RATE_1X2_TXBF_MCS0      = 124,
+	WL_RATE_1X2_TXBF_MCS1      = 125,
+	WL_RATE_1X2_TXBF_MCS2      = 126,
+	WL_RATE_1X2_TXBF_MCS3      = 127,
+	WL_RATE_1X2_TXBF_MCS4      = 128,
+	WL_RATE_1X2_TXBF_MCS5      = 129,
+	WL_RATE_1X2_TXBF_MCS6      = 130,
+	WL_RATE_1X2_TXBF_MCS7      = 131,
+
+	WL_RATE_1X2_TXBF_VHT0SS1   = 124,
+	WL_RATE_1X2_TXBF_VHT1SS1   = 125,
+	WL_RATE_1X2_TXBF_VHT2SS1   = 126,
+	WL_RATE_1X2_TXBF_VHT3SS1   = 127,
+	WL_RATE_1X2_TXBF_VHT4SS1   = 128,
+	WL_RATE_1X2_TXBF_VHT5SS1   = 129,
+	WL_RATE_1X2_TXBF_VHT6SS1   = 130,
+	WL_RATE_1X2_TXBF_VHT7SS1   = 131,
+	WL_RATE_1X2_TXBF_VHT8SS1   = 132,
+	WL_RATE_1X2_TXBF_VHT9SS1   = 133,
+
+	/* 2 Streams */
+
+	WL_RATE_2X2_TXBF_SDM_MCS8  = 134,
+	WL_RATE_2X2_TXBF_SDM_MCS9  = 135,
+	WL_RATE_2X2_TXBF_SDM_MCS10 = 136,
+	WL_RATE_2X2_TXBF_SDM_MCS11 = 137,
+	WL_RATE_2X2_TXBF_SDM_MCS12 = 138,
+	WL_RATE_2X2_TXBF_SDM_MCS13 = 139,
+	WL_RATE_2X2_TXBF_SDM_MCS14 = 140,
+	WL_RATE_2X2_TXBF_SDM_MCS15 = 141,
+
+	WL_RATE_2X2_TXBF_VHT0SS2   = 134,
+	WL_RATE_2X2_TXBF_VHT1SS2   = 135,
+	WL_RATE_2X2_TXBF_VHT2SS2   = 136,
+	WL_RATE_2X2_TXBF_VHT3SS2   = 137,
+	WL_RATE_2X2_TXBF_VHT4SS2   = 138,
+	WL_RATE_2X2_TXBF_VHT5SS2   = 139,
+	WL_RATE_2X2_TXBF_VHT6SS2   = 140,
+	WL_RATE_2X2_TXBF_VHT7SS2   = 141,
+
+
+	/****************************
+	 * TX Beamforming, 3 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 2 */
+
+	WL_RATE_1X3_TXBF_OFDM_6    = 142,
+	WL_RATE_1X3_TXBF_OFDM_9    = 143,
+	WL_RATE_1X3_TXBF_OFDM_12   = 144,
+	WL_RATE_1X3_TXBF_OFDM_18   = 145,
+	WL_RATE_1X3_TXBF_OFDM_24   = 146,
+	WL_RATE_1X3_TXBF_OFDM_36   = 147,
+	WL_RATE_1X3_TXBF_OFDM_48   = 148,
+	WL_RATE_1X3_TXBF_OFDM_54   = 149,
+
+	WL_RATE_1X3_TXBF_MCS0      = 150,
+	WL_RATE_1X3_TXBF_MCS1      = 151,
+	WL_RATE_1X3_TXBF_MCS2      = 152,
+	WL_RATE_1X3_TXBF_MCS3      = 153,
+	WL_RATE_1X3_TXBF_MCS4      = 154,
+	WL_RATE_1X3_TXBF_MCS5      = 155,
+	WL_RATE_1X3_TXBF_MCS6      = 156,
+	WL_RATE_1X3_TXBF_MCS7      = 157,
+
+	WL_RATE_1X3_TXBF_VHT0SS1   = 150,
+	WL_RATE_1X3_TXBF_VHT1SS1   = 151,
+	WL_RATE_1X3_TXBF_VHT2SS1   = 152,
+	WL_RATE_1X3_TXBF_VHT3SS1   = 153,
+	WL_RATE_1X3_TXBF_VHT4SS1   = 154,
+	WL_RATE_1X3_TXBF_VHT5SS1   = 155,
+	WL_RATE_1X3_TXBF_VHT6SS1   = 156,
+	WL_RATE_1X3_TXBF_VHT7SS1   = 157,
+	WL_RATE_1X3_TXBF_VHT8SS1   = 158,
+	WL_RATE_1X3_TXBF_VHT9SS1   = 159,
+
+	/* 2 Streams expanded + 1 */
+
+	WL_RATE_2X3_TXBF_SDM_MCS8  = 160,
+	WL_RATE_2X3_TXBF_SDM_MCS9  = 161,
+	WL_RATE_2X3_TXBF_SDM_MCS10 = 162,
+	WL_RATE_2X3_TXBF_SDM_MCS11 = 163,
+	WL_RATE_2X3_TXBF_SDM_MCS12 = 164,
+	WL_RATE_2X3_TXBF_SDM_MCS13 = 165,
+	WL_RATE_2X3_TXBF_SDM_MCS14 = 166,
+	WL_RATE_2X3_TXBF_SDM_MCS15 = 167,
+
+	WL_RATE_2X3_TXBF_VHT0SS2   = 160,
+	WL_RATE_2X3_TXBF_VHT1SS2   = 161,
+	WL_RATE_2X3_TXBF_VHT2SS2   = 162,
+	WL_RATE_2X3_TXBF_VHT3SS2   = 163,
+	WL_RATE_2X3_TXBF_VHT4SS2   = 164,
+	WL_RATE_2X3_TXBF_VHT5SS2   = 165,
+	WL_RATE_2X3_TXBF_VHT6SS2   = 166,
+	WL_RATE_2X3_TXBF_VHT7SS2   = 167,
+	WL_RATE_2X3_TXBF_VHT8SS2   = 168,
+	WL_RATE_2X3_TXBF_VHT9SS2   = 169,
+
+	/* 3 Streams */
+
+	WL_RATE_3X3_TXBF_SDM_MCS16 = 170,
+	WL_RATE_3X3_TXBF_SDM_MCS17 = 171,
+	WL_RATE_3X3_TXBF_SDM_MCS18 = 172,
+	WL_RATE_3X3_TXBF_SDM_MCS19 = 173,
+	WL_RATE_3X3_TXBF_SDM_MCS20 = 174,
+	WL_RATE_3X3_TXBF_SDM_MCS21 = 175,
+	WL_RATE_3X3_TXBF_SDM_MCS22 = 176,
+	WL_RATE_3X3_TXBF_SDM_MCS23 = 177,
+
+	WL_RATE_3X3_TXBF_VHT0SS3   = 170,
+	WL_RATE_3X3_TXBF_VHT1SS3   = 171,
+	WL_RATE_3X3_TXBF_VHT2SS3   = 172,
+	WL_RATE_3X3_TXBF_VHT3SS3   = 173,
+	WL_RATE_3X3_TXBF_VHT4SS3   = 174,
+	WL_RATE_3X3_TXBF_VHT5SS3   = 175,
+	WL_RATE_3X3_TXBF_VHT6SS3   = 176,
+	WL_RATE_3X3_TXBF_VHT7SS3   = 177
+} clm_rates_t;
+
+/* Number of rate codes */
+#define WL_NUMRATES 178
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _bcmwifi_rates_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 0000000..de485e7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,1085 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h 474409 2014-05-01 04:27:15Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+int get_scheduler_policy(struct task_struct *p);
+#define MAX_EVENT	16
+
+#define ALL_INTERFACES	0xff
+
+#include <wlioctl.h>
+#include <wlfc_proto.h>
+
+#if defined(BCMWDF)
+#include <wdf.h>
+#include <WdfMiniport.h>
+#endif /* (BCMWDF)  */
+
+#if defined(WL11U)
+#ifndef MFP
+#define MFP /* Applying interaction with MFP by spec HS2.0 REL2 */
+#endif /* MFP */
+#endif /* WL11U */
+
+#if defined(KEEP_ALIVE)
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR	"null_pkt"
+#endif /* KEEP_ALIVE */
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_ioctl;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+	DHD_BUS_DOWN,		/* Not ready for frame transfers */
+	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
+	DHD_BUS_DATA,		/* Ready for frame transfers */
+	DHD_BUS_SUSPEND,	/* Bus has been suspended */
+};
+
+#define DHD_IF_ROLE_STA(role)	(role == WLC_E_IF_ROLE_STA ||\
+				role == WLC_E_IF_ROLE_P2P_CLIENT)
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS	16
+#define DHD_DEL_IF	-0xE
+#define DHD_BAD_IF	-0xF
+
+enum dhd_op_flags {
+/* Firmware requested operation mode */
+	DHD_FLAG_STA_MODE				= (1 << (0)), /* STA only */
+	DHD_FLAG_HOSTAP_MODE				= (1 << (1)), /* SOFTAP only */
+	DHD_FLAG_P2P_MODE				= (1 << (2)), /* P2P Only */
+	/* STA + P2P */
+	DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE),
+	DHD_FLAG_CONCURR_MULTI_CHAN_MODE		= (1 << (4)), /* STA + P2P */
+	/* Current P2P mode for P2P connection */
+	DHD_FLAG_P2P_GC_MODE				= (1 << (5)),
+	DHD_FLAG_P2P_GO_MODE				= (1 << (6)),
+	DHD_FLAG_MBSS_MODE				= (1 << (7)), /* MBSS in future */
+	DHD_FLAG_IBSS_MODE				= (1 << (8)),
+	DHD_FLAG_MFG_MODE				= (1 << (9))
+};
+
+/* Max sequential TX/RX Control timeouts to set HANG event */
+#ifndef MAX_CNTL_TX_TIMEOUT
+#define MAX_CNTL_TX_TIMEOUT 2
+#endif /* MAX_CNTL_TX_TIMEOUT */
+#ifndef MAX_CNTL_RX_TIMEOUT
+#define MAX_CNTL_RX_TIMEOUT 1
+#endif /* MAX_CNTL_RX_TIMEOUT */
+
+#define DHD_SCAN_ASSOC_ACTIVE_TIME	20 /* ms: Embedded default Active setting from DHD */
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME	40 /* ms: Embedded def. Unassoc Active setting from DHD */
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME_PS	30
+#define DHD_SCAN_PASSIVE_TIME		130 /* ms: Embedded default Passive setting from DHD */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY	3 /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS		2000 /* ms: time out in waiting wifi to come up */
+#endif
+
+enum dhd_bus_wake_state {
+	WAKE_LOCK_OFF,
+	WAKE_LOCK_PRIV,
+	WAKE_LOCK_DPC,
+	WAKE_LOCK_IOCTL,
+	WAKE_LOCK_DOWNLOAD,
+	WAKE_LOCK_TMOUT,
+	WAKE_LOCK_WATCHDOG,
+	WAKE_LOCK_LINK_DOWN_TMOUT,
+	WAKE_LOCK_PNO_FIND_TMOUT,
+	WAKE_LOCK_SOFTAP_SET,
+	WAKE_LOCK_SOFTAP_STOP,
+	WAKE_LOCK_SOFTAP_START,
+	WAKE_LOCK_SOFTAP_THREAD
+};
+
+enum dhd_prealloc_index {
+	DHD_PREALLOC_PROT = 0,
+	DHD_PREALLOC_RXBUF,
+	DHD_PREALLOC_DATABUF,
+	DHD_PREALLOC_OSL_BUF,
+#if defined(STATIC_WL_PRIV_STRUCT)
+	DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+#endif /* STATIC_WL_PRIV_STRUCT */
+	DHD_PREALLOC_DHD_INFO = 7,
+	DHD_PREALLOC_IF_FLOW_LKUP = 9
+};
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+	void **p;
+	uint8 flow_id;
+	uint8 cur_idx;
+	uint8 exp_idx;
+	uint8 max_idx;
+	uint8 pend_pkts;
+} reorder_info_t;
+
+#ifdef DHDTCPACK_SUPPRESS
+
+enum {
+	/* TCPACK suppress off */
+	TCPACK_SUP_OFF,
+	/* Replace TCPACK in txq when new coming one has higher ACK number. */
+	TCPACK_SUP_REPLACE,
+	/* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
+	 * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
+	 * 1. we are able to read TCP DATA packets first from the bus
+	 * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
+	 */
+	TCPACK_SUP_DELAYTX,
+	TCPACK_SUP_LAST_MODE
+};
+#endif /* DHDTCPACK_SUPPRESS */
+
+
+/* DMA'ing r/w indices for rings supported */
+#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */
+#define DMA_INDX_ENAB(dma_indxsup)	0
+#elif defined BCM_INDX_DMA  /* FW gets r/w indices from Host memory */
+#define DMA_INDX_ENAB(dma_indxsup)	1
+#else	/* r/w indices in TCM or host memory based on FW/Host agreement */
+#define DMA_INDX_ENAB(dma_indxsup)	dma_indxsup
+#endif	/* BCM_INDX_TCM */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+struct tdls_peer_node {
+	uint8 addr[ETHER_ADDR_LEN];
+	struct tdls_peer_node *next;
+};
+typedef struct tdls_peer_node tdls_peer_node_t;
+typedef struct {
+	tdls_peer_node_t *node;
+	uint8 tdls_peer_count;
+} tdls_peer_tbl_t;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+	/* Linkage ponters */
+	osl_t *osh;		/* OSL handle */
+	struct dhd_bus *bus;	/* Bus module handle */
+	struct dhd_prot *prot;	/* Protocol module handle */
+	struct dhd_info  *info; /* Info module handle */
+
+	/* to NDIS developer, the structure dhd_common is redundant,
+	 * please do NOT merge it back from other branches !!!
+	 */
+
+
+	/* Internal dhd items */
+	bool up;		/* Driver up/down (to OS) */
+	bool txoff;		/* Transmit flow-controlled */
+	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
+	enum dhd_bus_state busstate;
+	uint hdrlen;		/* Total DHD header length (proto + bus) */
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	uint rxsz;		/* Rx buffer size bus module should use */
+	uint8 wme_dp;	/* wme discard priority */
+
+	/* Dongle media info */
+	bool iswl;		/* Dongle-resident driver is wl */
+	ulong drv_version;	/* Version of dongle-resident driver */
+	struct ether_addr mac;	/* MAC address obtained from dongle */
+	dngl_stats_t dstats;	/* Stats for dongle-based data */
+
+	/* Additional stats for the bus level */
+	ulong tx_packets;	/* Data packets sent to dongle */
+	ulong tx_dropped;	/* Data packets dropped in dhd */
+	ulong tx_multicast;	/* Multicast data packets sent to dongle */
+	ulong tx_errors;	/* Errors in sending data to dongle */
+	ulong tx_ctlpkts;	/* Control packets sent to dongle */
+	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
+	ulong rx_packets;	/* Packets sent up the network interface */
+	ulong rx_multicast;	/* Multicast packets sent up the network interface */
+	ulong rx_errors;	/* Errors processing rx data packets */
+	ulong rx_ctlpkts;	/* Control frames processed from dongle */
+	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
+	ulong rx_dropped;	/* Packets dropped locally (no memory) */
+	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
+	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
+
+	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
+	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
+	ulong fc_packets;       /* Number of flow control pkts recvd */
+
+	/* Last error return */
+	int bcmerror;
+	uint tickcnt;
+
+	/* Last error from dongle */
+	int dongle_error;
+
+	uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+	/* Suspend disable flag and "in suspend" flag */
+	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+	int in_suspend;			/* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+	int pno_enable;			/* pno status : "1" is pno enable */
+	int pno_suspend;		/* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+	/* DTIM skip value, default 0(or 1) means wake each DTIM
+	 * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
+	 */
+	int suspend_bcn_li_dtim;         /* bcn_li_dtim value in suspend mode */
+#ifdef PKT_FILTER_SUPPORT
+	int early_suspended;	/* Early suspend status */
+	int dhcp_in_progress;	/* DHCP period */
+#endif
+
+	/* Pkt filter defination */
+	char * pktfilter[100];
+	int pktfilter_count;
+
+	wl_country_t dhd_cspec;		/* Current Locale info */
+	u32 dhd_cflags;
+	bool force_country_change;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int	op_mode;				/* STA, HostAPD, WFD, SoftAP */
+
+/* Set this to 1 to use a seperate interface (p2p0) for p2p operations.
+ *  For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework
+ *  see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile
+ */
+/* #define WL_ENABLE_P2P_IF		1 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	struct mutex 	wl_start_stop_lock; /* lock/unlock for Android start/stop */
+	struct mutex 	wl_softap_lock;		 /* lock/unlock for any SoftAP/STA settings */
+#endif
+
+#ifdef PROP_TXSTATUS
+	bool	wlfc_enabled;
+	int	wlfc_mode;
+	void*	wlfc_state;
+	/*
+	Mode in which the dhd flow control shall operate. Must be set before
+	traffic starts to the device.
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	3 - Only AMPDU hostreorder used. no wlfc.
+	*/
+	uint8	proptxstatus_mode;
+	bool	proptxstatus_txoff;
+	bool	proptxstatus_module_ignore;
+	bool	proptxstatus_credit_ignore;
+	bool	proptxstatus_txstatus_ignore;
+
+	bool	wlfc_rxpkt_chk;
+	/*
+	 * implement below functions in each platform if needed.
+	 */
+	/* platform specific function whether to skip flow control */
+	bool (*skip_fc)(void);
+	/* platform specific function for wlfc_enable and wlfc_deinit */
+	void (*plat_init)(void *dhd);
+	void (*plat_deinit)(void *dhd);
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	void *pno_state;
+#endif
+#ifdef RTT_SUPPORT
+	void *rtt_state;
+#endif
+#ifdef ROAM_AP_ENV_DETECTION
+	bool	roam_env_detection;
+#endif
+	bool	dongle_isolation;
+	bool	dongle_trap_occured;	/* flag for sending HANG event to upper layer */
+	int   hang_was_sent;
+	int   rxcnt_timeout;		/* counter rxcnt timeout to send HANG */
+	int   txcnt_timeout;		/* counter txcnt timeout to send HANG */
+	bool hang_report;		/* enable hang report by default */
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+#ifdef WLTDLS
+	bool tdls_enable;
+#endif
+	struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+	char  fw_capabilities[WLC_IOCTL_SMLEN];
+	#define MAXSKBPEND 1024
+	void *skbbuf[MAXSKBPEND];
+	uint32 store_idx;
+	uint32 sent_idx;
+#ifdef DHDTCPACK_SUPPRESS
+	uint8 tcpack_sup_mode;		/* TCPACK suppress mode */
+	void *tcpack_sup_module;	/* TCPACK suppress module */
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(ARP_OFFLOAD_SUPPORT)
+	uint32 arp_version;
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	struct task_struct * current_dpc;
+	struct task_struct * current_rxf;
+	int chan_isvht80;
+#endif /* CUSTOM_SET_CPUCORE */
+
+
+	void    *sta_pool;          /* pre-allocated pool of sta objects */
+	void    *staid_allocator;   /* allocator of sta indexes */
+
+	void    *flowid_allocator;  /* unique flowid allocator */
+	void	*flow_ring_table;   /* flow ring table, include prot and bus info */
+	void	*if_flow_lkup;      /* per interface flowid lkup hash table */
+	void	*flowid_lock;		/* per os lock for flowid info protection */
+	uint32  num_flow_rings;
+	uint8  flow_prio_map[NUMPRIO];
+	uint8	flow_prio_map_type;
+	char enable_log[MAX_EVENT];
+	bool dma_d2h_ring_upd_support;
+	bool dma_h2d_ring_upd_support;
+	int  short_dwell_time;
+#ifdef DHD_WMF
+	bool wmf_ucast_igmp;
+#ifdef DHD_IGMP_UCQUERY
+	bool wmf_ucast_igmp_query;
+#endif
+#ifdef DHD_UCAST_UPNP
+	bool wmf_ucast_upnp;
+#endif
+#endif /* DHD_WMF */
+#ifdef DHD_UNICAST_DHCP
+	bool dhcp_unicast;
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+	bool block_ping;
+#endif
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+	tdls_peer_tbl_t peer_tbl;
+#endif
+} dhd_pub_t;
+
+#if defined(BCMWDF)
+typedef struct {
+	dhd_pub_t *dhd_pub;
+} dhd_workitem_context_t;
+
+WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
+#endif /* (BCMWDF)  */
+
+	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define _DHD_PM_RESUME_WAIT(a, b) do {\
+			int retry = 0; \
+			SMP_RD_BARRIER_DEPENDS(); \
+			while (dhd_mmc_suspend && retry++ != b) { \
+				SMP_RD_BARRIER_DEPENDS(); \
+				wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \
+			} \
+		} 	while (0)
+	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
+	#ifdef CUSTOMER_HW4
+		#define DHD_PM_RESUME_RETURN_ERROR(a)   do { \
+				if (dhd_mmc_suspend) { \
+					printf("%s[%d]: mmc is still in suspend state!!!\n", \
+							__FUNCTION__, __LINE__); \
+					return a; \
+				} \
+			} while (0)
+	#else
+		#define DHD_PM_RESUME_RETURN_ERROR(a)	do { \
+			if (dhd_mmc_suspend) return a; } while (0)
+	#endif
+	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define SPINWAIT_SLEEP(a, exp, us) do { \
+		uint countdown = (us) + 9999; \
+		while ((exp) && (countdown >= 10000)) { \
+			wait_event_interruptible_timeout(a, FALSE, 1); \
+			countdown -= 10000; \
+		} \
+	} while (0)
+
+	#else
+
+	#define DHD_PM_RESUME_WAIT_INIT(a)
+	#define DHD_PM_RESUME_WAIT(a)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)
+	#define DHD_PM_RESUME_RETURN
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a)
+	#define SPINWAIT_SLEEP(a, exp, us)  do { \
+		uint countdown = (us) + 9; \
+		while ((exp) && (countdown >= 10)) { \
+			OSL_DELAY(10);  \
+			countdown -= 10;  \
+		} \
+	} while (0)
+
+	#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifndef OSL_SLEEP
+#define OSL_SLEEP(ms)		OSL_DELAY(ms*1000)
+#endif /* OSL_SLEEP */
+
+#define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
+
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* PNO_SUPPORT */
+/*
+ *  Wake locks are an Android power management concept. They are used by applications and services
+ *  to request CPU resources.
+ */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
+int dhd_os_get_wake_irq(dhd_pub_t *pub);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub)			dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub)		dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)		dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
+	dhd_os_wake_lock_rx_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
+	dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+	dhd_os_wake_lock_ctrl_timeout_cancel(pub)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub)             dhd_os_wake_lock_waive(pub)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub)           dhd_os_wake_lock_restore(pub)
+
+#define DHD_OS_WD_WAKE_LOCK(pub)		dhd_os_wd_wake_lock(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub)		dhd_os_wd_wake_unlock(pub)
+#define DHD_PACKET_TIMEOUT_MS	500
+#define DHD_EVENT_TIMEOUT_MS	1500
+
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+
+typedef enum dhd_attach_states
+{
+	DHD_ATTACH_STATE_INIT = 0x0,
+	DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+	DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+	DHD_ATTACH_STATE_ADD_IF = 0x4,
+	DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+	DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+	DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+	DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+	DHD_ATTACH_STATE_CFG80211 = 0x80,
+	DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+	DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID 	-1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID	-2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface  */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+extern void dhd_clear(dhd_pub_t *dhdp);
+
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+#define WIFI_FEATURE_INFRA              0x0001      /* Basic infrastructure mode        */
+#define WIFI_FEATURE_INFRA_5G           0x0002      /* Support for 5 GHz Band           */
+#define WIFI_FEATURE_HOTSPOT            0x0004      /* Support for GAS/ANQP             */
+#define WIFI_FEATURE_P2P                0x0008      /* Wifi-Direct                      */
+#define WIFI_FEATURE_SOFT_AP            0x0010      /* Soft AP                          */
+#define WIFI_FEATURE_GSCAN              0x0020      /* Google-Scan APIs                 */
+#define WIFI_FEATURE_NAN                0x0040      /* Neighbor Awareness Networking    */
+#define WIFI_FEATURE_D2D_RTT            0x0080      /* Device-to-device RTT             */
+#define WIFI_FEATURE_D2AP_RTT           0x0100      /* Device-to-AP RTT                 */
+#define WIFI_FEATURE_BATCH_SCAN         0x0200      /* Batched Scan (legacy)            */
+#define WIFI_FEATURE_PNO                0x0400      /* Preferred network offload        */
+#define WIFI_FEATURE_ADDITIONAL_STA     0x0800      /* Support for two STAs             */
+#define WIFI_FEATURE_TDLS               0x1000      /* Tunnel directed link setup       */
+#define WIFI_FEATURE_TDLS_OFFCHANNEL    0x2000      /* Support for TDLS off channel     */
+#define WIFI_FEATURE_EPR                0x4000      /* Enhanced power reporting         */
+#define WIFI_FEATURE_AP_STA             0x8000      /* Support for AP STA Concurrency   */
+#define WIFI_FEATURE_LINKSTAT           0x10000     /* Support for Linkstats            */
+
+#define MAX_FEATURE_SET_CONCURRRENT_GROUPS  3
+
+extern int dhd_dev_get_feature_set(struct net_device *dev);
+extern int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num);
+extern int dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_d3ack_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void * dhd_os_open_image(char * filename);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+#ifdef DHDTCPACK_SUPPRESS
+extern void dhd_os_tcpacklock(dhd_pub_t *pub);
+extern void dhd_os_tcpackunlock(dhd_pub_t *pub);
+#endif /* DHDTCPACK_SUPPRESS */
+
+extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
+extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
+extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
+extern void get_customized_country_code(void *adapter, char *country_iso_code,
+	wl_country_t *cspec, u32 flags);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
+extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
+extern void dhd_set_short_dwell_time(dhd_pub_t *dhd, int set);
+#ifdef CUSTOM_SET_SHORT_DWELL_TIME
+extern void net_set_short_dwell_time(struct net_device *dev, int set);
+#endif
+extern bool dhd_os_check_if_up(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock_all(dhd_pub_t *pub);
+extern int dhd_get_instance(dhd_pub_t *pub);
+#ifdef CUSTOM_SET_CPUCORE
+extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
+#endif /* CUSTOM_SET_CPUCORE */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_UNICAST_FILTER_NUM		0
+#define DHD_BROADCAST_FILTER_NUM	1
+#define DHD_MULTICAST4_FILTER_NUM	2
+#define DHD_MULTICAST6_FILTER_NUM	3
+#define DHD_MDNS_FILTER_NUM		4
+#define DHD_ARP_FILTER_NUM		5
+extern int 	dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
+extern int net_os_enable_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+#endif /* PKT_FILTER_SUPPORT */
+
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+
+typedef struct {
+	uint32 limit;		/* Expiration time (usec) */
+	uint32 increment;	/* Current expiration increment (usec) */
+	uint32 elapsed;		/* Current elapsed time (usec) */
+	uint32 tick;		/* O/S tick time (usec) */
+} dhd_timeout_t;
+
+#ifdef SHOW_LOGTRACE
+typedef struct {
+	int  num_fmts;
+	char **fmts;
+	char *raw_fmts;
+} dhd_event_log_t;
+#endif /* SHOW_LOGTRACE */
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_ifidx2hostidx(struct dhd_info *dhd, int ifidx);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, size_t pktlen,
+                         wl_event_msg_t *, void **data_ptr,  void *);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+                            int ifindex);
+extern void dhd_common_init(osl_t *osh);
+
+extern int dhd_do_driver_init(struct net_device *net);
+extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock);
+extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+#ifdef LOG_INTO_TCPDUMP
+extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
+#endif /* LOG_INTO_TCPDUMP */
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int  dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
+extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval);
+#if defined(BCMSDIO) || defined(BCMPCIE)
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
+#endif /* defined(BCMSDIO) || defined(BCMPCIE) */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+/* OS spin lock API */
+extern void *dhd_os_spin_lock_init(osl_t *osh);
+extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long dhd_os_spin_lock(void *lock);
+void dhd_os_spin_unlock(void *lock, unsigned long flags);
+
+/*
+ * Manage sta objects in an interface. Interface is identified by an ifindex and
+ * sta(s) within an interfaces are managed using a MacAddress of the sta.
+ */
+struct dhd_sta;
+extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea);
+extern void dhd_del_sta(void *pub, int ifidx, void *ea);
+extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
+
+extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
+extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set);
+typedef enum cust_gpio_modes {
+	WLAN_RESET_ON,
+	WLAN_RESET_OFF,
+	WLAN_POWER_ON,
+	WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/*  Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#ifdef DHD_USE_IDLECOUNT
+#define DHD_IDLETIME_TICKS 5
+#else
+#define DHD_IDLETIME_TICKS 1
+#endif /* DHD_USE_IDLECOUNT */
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+
+/* Default bcn_timeout value is 4 */
+#define DEFAULT_BCN_TIMEOUT_VALUE        4
+#ifndef CUSTOM_BCN_TIMEOUT_SETTING
+#define CUSTOM_BCN_TIMEOUT_SETTING	DEFAULT_BCN_TIMEOUT_VALUE
+#endif
+
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define DEFAULT_KEEP_ALIVE_VALUE 	55000 /* msec */
+#ifndef CUSTOM_KEEP_ALIVE_SETTING
+#define CUSTOM_KEEP_ALIVE_SETTING 	DEFAULT_KEEP_ALIVE_VALUE
+#endif /* DEFAULT_KEEP_ALIVE_VALUE */
+
+#define NULL_PKT_STR	"null_pkt"
+
+/* hooks for custom glom setting option via Makefile */
+#define DEFAULT_GLOM_VALUE 	-1
+#ifndef CUSTOM_GLOM_SETTING
+#define CUSTOM_GLOM_SETTING 	DEFAULT_GLOM_VALUE
+#endif
+#define WL_AUTO_ROAM_TRIGGER -75
+/* hooks for custom Roaming Trigger  setting via Makefile */
+#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
+#define DEFAULT_ROAM_TRIGGER_SETTING 	-1
+#ifndef CUSTOM_ROAM_TRIGGER_SETTING
+#define CUSTOM_ROAM_TRIGGER_SETTING 	DEFAULT_ROAM_TRIGGER_VALUE
+#endif
+
+/* hooks for custom Roaming Romaing  setting via Makefile */
+#define DEFAULT_ROAM_DELTA_VALUE  10 /* dBm default roam delta all band */
+#define DEFAULT_ROAM_DELTA_SETTING 	-1
+#ifndef CUSTOM_ROAM_DELTA_SETTING
+#define CUSTOM_ROAM_DELTA_SETTING 	DEFAULT_ROAM_DELTA_VALUE
+#endif
+
+/* hooks for custom PNO Event wake lock to guarantee enough time
+	for the Platform to detect Event before system suspended
+*/
+#define DEFAULT_PNO_EVENT_LOCK_xTIME 	2 	/* multiplier of DHD_PACKET_TIMEOUT_MS */
+#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
+#define CUSTOM_PNO_EVENT_LOCK_xTIME	 DEFAULT_PNO_EVENT_LOCK_xTIME
+#endif
+
+#define DEFAULT_DHCP_LOCK_xTIME		2 	/* multiplier of DHD_PACKET_TIMEOUT_MS */
+#ifndef CUSTOM_DHCP_LOCK_xTIME
+#define CUSTOM_DHCP_LOCK_xTIME		DEFAULT_DHCP_LOCK_xTIME
+#endif
+
+/* hooks for custom dhd_dpc_prio setting option via Makefile */
+#define DEFAULT_DHP_DPC_PRIO  1
+#ifndef CUSTOM_DPC_PRIO_SETTING
+#define CUSTOM_DPC_PRIO_SETTING 	DEFAULT_DHP_DPC_PRIO
+#endif
+
+#ifndef CUSTOM_LISTEN_INTERVAL
+#define CUSTOM_LISTEN_INTERVAL 		LISTEN_INTERVAL
+#endif /* CUSTOM_LISTEN_INTERVAL */
+
+#define DEFAULT_SUSPEND_BCN_LI_DTIM		3
+#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
+#define CUSTOM_SUSPEND_BCN_LI_DTIM		DEFAULT_SUSPEND_BCN_LI_DTIM
+#endif
+
+#ifndef CUSTOM_RXF_PRIO_SETTING
+#define CUSTOM_RXF_PRIO_SETTING		MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
+#endif
+
+#define DEFAULT_WIFI_TURNOFF_DELAY		0
+#define WIFI_TURNOFF_DELAY		DEFAULT_WIFI_TURNOFF_DELAY
+
+#define DEFAULT_WIFI_TURNON_DELAY		200
+#ifndef WIFI_TURNON_DELAY
+#define WIFI_TURNON_DELAY		DEFAULT_WIFI_TURNON_DELAY
+#endif /* WIFI_TURNON_DELAY */
+
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS	10 /* msec */
+#ifndef CUSTOM_DHD_WATCHDOG_MS
+#define CUSTOM_DHD_WATCHDOG_MS			DEFAULT_DHD_WATCHDOG_INTERVAL_MS
+#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
+
+#ifdef WLTDLS
+#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
+#define CUSTOM_TDLS_IDLE_MODE_SETTING  60000 /* 60sec to tear down TDLS of not active */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
+#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
+#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
+#endif
+#endif /* WLTDLS */
+
+
+#define MAX_DTIM_SKIP_BEACON_INTERVAL	100 /* max allowed associated AP beacon for DTIM skip */
+#ifndef MAX_DTIM_ALLOWED_INTERVAL
+#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
+#endif
+#define NO_DTIM_SKIP 1
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN	2048
+#define MOD_PARAM_INFOLEN	512
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock)       *lock = 0
+#define IFLOCK(lock)    while (InterlockedCompareExchange((lock), 1, 0))	\
+	NdisStallExecution(1);
+#define IFUNLOCK(lock)  InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, #capa) != NULL))
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES	8
+void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef WLTDLS
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+#ifdef PCIE_FULL_DONGLE
+void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* WLTDLS */
+/* Neighbor Discovery Offload Support */
+int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
+int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
+int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
+/* ioctl processing for nl80211 */
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
+
+void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path);
+void dhd_set_bus_state(void *bus, uint32 state);
+
+/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
+typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
+extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
+
+#ifdef PROP_TXSTATUS
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+extern const uint8 prio2fifo[];
+#endif /* PROP_TXSTATUS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
+#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
+#else
+#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
+#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+
+#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid)  do {} while (0)
+#define dhd_del_flowid(pub, ifidx, flowid)               do {} while (0)
+
+extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/** Miscellaenous DHD Spin Locks */
+
+/* Disable router 3GMAC bypass path perimeter lock */
+#define DHD_PERIM_LOCK(dhdp)              do {} while (0)
+#define DHD_PERIM_UNLOCK(dhdp)            do {} while (0)
+
+/* Enable DHD general spin lock/unlock */
+#define DHD_GENERAL_LOCK(dhdp, flags) \
+	(flags) = dhd_os_general_spin_lock(dhdp)
+#define DHD_GENERAL_UNLOCK(dhdp, flags) \
+	dhd_os_general_spin_unlock((dhdp), (flags))
+
+/* Enable DHD flowring spin lock/unlock */
+#define DHD_FLOWRING_LOCK(lock, flags)     (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWRING_UNLOCK(lock, flags)   dhd_os_spin_unlock((lock), (flags))
+
+/* Enable DHD common flowring info spin lock/unlock */
+#define DHD_FLOWID_LOCK(lock, flags)       (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWID_UNLOCK(lock, flags)     dhd_os_spin_unlock((lock), (flags))
+
+
+
+typedef struct wl_io_pport {
+	dhd_pub_t *dhd_pub;
+	uint ifidx;
+} wl_io_pport_t;
+
+extern void *dhd_pub_wlinfo(dhd_pub_t *dhd_pub);
+
+
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c
new file mode 100644
index 0000000..d82d6d2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.c
@@ -0,0 +1,337 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.c 434434 2013-11-06 07:16:02Z $
+ */
+#error "WLBTAMP is not defined"
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+	uint len = sizeof(buf);
+	wl_ioctl_t ioc;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+		return BCME_BADLEN;
+
+	if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+		return BCME_BADLEN;
+
+	len = bcm_mkiovar("HCI_cmd",
+		(char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = TRUE;
+
+	return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+	int prec;
+	struct pktq *q;
+	uint count = 0;
+
+	q = dhd_bus_txq(pub->bus);
+	if (q == NULL)
+		return;
+
+	DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+	dhd_os_sdlock_txq(pub);
+
+	/* Walk through the txq and toss all HCI ACL data packets */
+	PKTQ_PREC_ITER(q, prec) {
+		void *head_pkt = NULL;
+
+		while (pktq_ppeek(q, prec) != head_pkt) {
+			void *pkt = pktq_pdeq(q, prec);
+			int ifidx;
+
+			dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL);
+
+			if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+				struct ether_header *eh =
+				        (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+				if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+					struct dot11_llc_snap_header *lsh =
+					        (struct dot11_llc_snap_header *)&eh[1];
+
+					if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+					         DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+					    ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+						amp_hci_ACL_data_t *ACL_data =
+						        (amp_hci_ACL_data_t *)&lsh[1];
+						uint16 handle = ltoh16(ACL_data->handle);
+
+						if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+							PKTFREE(pub->osh, pkt, TRUE);
+							count ++;
+							continue;
+						}
+					}
+				}
+			}
+
+			dhd_prot_hdrpush(pub, ifidx, pkt);
+
+			if (head_pkt == NULL)
+				head_pkt = pkt;
+			pktq_penq(q, prec, pkt);
+		}
+	}
+
+	dhd_os_sdunlock_txq(pub);
+
+	DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ *        < 0: stop, fail
+ *        > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+	int status = 0;
+
+	switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+	case HCI_Enhanced_Flush: {
+		eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+		dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+		break;
+	}
+	default:
+		break;
+	}
+
+	return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+	int status;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+		DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+		           len, cmd_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* intercept and handle the HCI cmd locally */
+	if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+		return 0;
+	else if (status < 0)
+		return status;
+
+	/* copy in HCI cmd */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(cmd, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	ETHER_SET_LOCALADDR(eh->ether_dhost);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = 0;
+
+	return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+	amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+
+	if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+		           len, data_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* copy in HCI ACL data header and HCI ACL data */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(data, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = HTON16(BTA_PROT_L2CAP);
+
+	return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+	amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+	uint16 handle = ltoh16(ACL_data->handle);
+	uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+	wl_event_msg_t event;
+	uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+	amp_hci_event_t *evt;
+	num_completed_data_blocks_evt_parms_t *parms;
+
+	uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+	/* update the event struct */
+	memset(&event, 0, sizeof(event));
+	event.version = hton16(BCM_EVENT_MSG_VERSION);
+	event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+	event.status = 0;
+	event.reason = 0;
+	event.auth_type = 0;
+	event.datalen = hton32(len);
+	event.flags = 0;
+
+	/* generate Number of Completed Blocks event */
+	evt = (amp_hci_event_t *)data;
+	evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+	evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+	parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+	htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+	parms->num_handles = 1;
+	htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+	parms->completed[0].pkts = 1;
+	parms->completed[0].blocks = 1;
+
+	dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+	amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+	ASSERT(dhdp);
+	ASSERT(evt);
+
+	switch (evt->ecode) {
+	case HCI_Command_Complete: {
+		cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+		switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+		case HCI_Read_Data_Block_Size: {
+			read_data_block_size_evt_parms_t *parms2 =
+			        (read_data_block_size_evt_parms_t *)parms->parms;
+			dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+			break;
+		}
+		}
+		break;
+	}
+
+	case HCI_Flush_Occurred: {
+		flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+		dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+		break;
+	}
+	default:
+		break;
+	}
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h
new file mode 100644
index 0000000..db636a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.h
@@ -0,0 +1,39 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 0000000..bc7c869
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,190 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h 469959 2014-04-11 23:07:39Z $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+#ifdef BCMPCIE
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
+#else
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+#endif
+
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable);
+extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub);
+extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub);
+extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                            void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* return the dongle chiprev */
+extern uint dhd_bus_chiprev(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern void *dhd_bus_sih(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+#ifdef BCMSDIO
+extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+#else
+#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
+#endif
+
+#define DHD_SET_BUS_STATE_DOWN(_bus)  do { \
+	(_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable);
+extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
+	uint32 *slot_num);
+
+#ifdef BCMPCIE
+enum {
+	DNGL_TO_HOST_BUF_IOCT,
+	DNGL_TO_HOST_DMA_SCRATCH_BUFFER,
+	DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN,
+	HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
+	HOST_TO_DNGL_DMA_READINDX_BUFFER,
+	DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
+	DNGL_TO_HOST_DMA_READINDX_BUFFER,
+	TOTAL_LFRAG_PACKET_CNT,
+	HTOD_MB_DATA,
+	DTOH_MB_DATA,
+	RING_BUF_ADDR,
+	H2D_DMA_WRITEINDX,
+	H2D_DMA_READINDX,
+	D2H_DMA_WRITEINDX,
+	D2H_DMA_READINDX,
+	RING_READ_PTR,
+	RING_WRITE_PTR,
+	RING_LEN_ITEMS,
+	RING_MAX_ITEM,
+	MAX_HOST_RXBUFS
+};
+typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
+extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type,
+	uint16 ringid);
+extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value);
+extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid);
+extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus);
+extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
+extern void dhd_bus_start_queue(struct dhd_bus *bus);
+extern void dhd_bus_stop_queue(struct dhd_bus *bus);
+extern void dhd_bus_update_retlen(struct dhd_bus *bus, uint32 retlen, uint32 cmd_id, uint16 status,
+	uint32 resp_len);
+extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
+extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus,
+	void * data, uint16 flowid);
+extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus,
+	void * data, uint8 flowid);
+extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status);
+extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern uint8 dhd_bus_is_txmode_push(struct dhd_bus *bus);
+extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush);
+extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+extern int dhdpcie_bus_clock_start(struct dhd_bus *bus);
+extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus);
+extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_disable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus);
+extern void dhdpcie_bus_free_resource(struct dhd_bus *bus);
+extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus);
+extern int dhd_bus_release_dongle(struct dhd_bus *bus);
+extern int dhd_bus_request_irq(struct dhd_bus *bus);
+
+#endif /* BCMPCIE */
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 0000000..4a69389
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,811 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c 472193 2014-04-23 06:27:38Z $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN	(24+DHD_SDALIGN)	/* Must be at least SDPCM_RESERVE
+				 * defined in dhd_sdio.c (amount of header tha might be added)
+				 * plus any space that might be needed for alignment padding.
+				 */
+#define ROUND_UP_MARGIN	2048	/* Biggest SDIO block size possible for
+				 * round off at the end of buffer
+				 */
+
+typedef struct dhd_prot {
+	uint16 reqid;
+	uint8 pending;
+	uint32 lastcmd;
+	uint8 bus_header[BUS_HEADER_LEN];
+	cdc_ioctl_t msg;
+	unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+	int err = 0;
+	dhd_prot_t *prot = dhd->prot;
+	int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
+	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+	 *	  is actually sent to the dongle
+	 */
+	if (len > CDC_MAX_MSG_SIZE)
+		len = CDC_MAX_MSG_SIZE;
+
+	/* Send request */
+	err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+	int ret;
+	int cdc_len = len + sizeof(cdc_ioctl_t);
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	do {
+		ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+		if (ret < 0)
+			break;
+	} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+
+	return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0, retries = 0;
+	uint32 id, flags = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		if (!dhd->hang_was_sent)
+		DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if ((id < prot->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Copy info buffer */
+	if (buf)
+	{
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, (void*) prot->buf, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0;
+	uint32 flags, id;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+	if (action & WL_IOCTL_ACTION_SET)
+		ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	else {
+		ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret - sizeof(cdc_ioctl_t);
+	}
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		cdc_ioctl_t *msg = &prot->msg;
+		ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+	prot->pending = FALSE;
+
+done:
+
+	return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                  void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+/*	The FreeBSD PKTPUSH could change the packet buf pinter
+	so we need to make it changable
+*/
+#define PKTBUF pktbuf
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif /* BDC */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Push BDC header used to convey priority for buses that don't */
+
+	PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN);
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF);
+
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(PKTBUF))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = 0;
+#endif /* BDC */
+	BDC_SET_IF_IDX(h, ifidx);
+}
+#undef PKTBUF	/* Only defined in the above routine */
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+	uint hdrlen = 0;
+#ifdef BDC
+	/* Length of BDC(+WLFC) headers pushed */
+	hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4);
+#endif
+	return hdrlen;
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+	uint *reorder_info_len)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif
+	uint8 data_offset = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	if (reorder_info_len)
+		*reorder_info_len = 0;
+	/* Pop BDC header used to convey priority for buses that don't */
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	if (!ifidx) {
+		/* for tx packet, skip the analysis */
+		data_offset = h->dataOffset;
+		PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+		goto exit;
+	}
+
+	if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+		           __FUNCTION__, *ifidx));
+		return BCME_ERROR;
+	}
+
+	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+		DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+		           dhd_ifname(dhd, *ifidx), h->flags));
+		if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+			h->dataOffset = 0;
+		else
+		return BCME_ERROR;
+	}
+
+	if (h->flags & BDC_FLAG_SUM_GOOD) {
+		DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+		          dhd_ifname(dhd, *ifidx), h->flags));
+		PKTSETSUMGOOD(pktbuf, TRUE);
+	}
+
+	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+	data_offset = h->dataOffset;
+	PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+
+#ifdef PROP_TXSTATUS
+	if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
+		/*
+		- parse txstatus only for packets that came from the firmware
+		*/
+		dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2),
+			reorder_buf_info, reorder_info_len);
+
+	}
+#endif /* PROP_TXSTATUS */
+
+exit:
+	PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
+	return 0;
+}
+
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *cdc;
+
+	if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(cdc, 0, sizeof(dhd_prot_t));
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+		DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+		goto fail;
+	}
+
+	dhd->prot = cdc;
+#ifdef BDC
+	dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+	dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+	return 0;
+
+fail:
+	if (cdc != NULL)
+		DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t));
+	return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_deinit(dhd);
+#endif
+	DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
+	dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+	/*  copy bus stats */
+
+	dhd->dstats.tx_packets = dhd->tx_packets;
+	dhd->dstats.tx_errors = dhd->tx_errors;
+	dhd->dstats.rx_packets = dhd->rx_packets;
+	dhd->dstats.rx_errors = dhd->rx_errors;
+	dhd->dstats.rx_dropped = dhd->rx_dropped;
+	dhd->dstats.multicast = dhd->rx_multicast;
+	return;
+}
+
+int
+dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+
+
+	dhd_process_cid_mac(dhd, TRUE);
+
+	ret = dhd_preinit_ioctls(dhd);
+
+	if (!ret)
+		dhd_process_cid_mac(dhd, FALSE);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+
+done:
+	return ret;
+}
+
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+	return TRUE;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+/* Nothing to do for CDC */
+}
+
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+	uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+	void *plast = NULL, *p;
+	uint32 pkt_cnt = 0;
+
+	if (ptr->pend_pkts == 0) {
+		DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+		*pplast = NULL;
+		*pkt_count = 0;
+		*pkt = NULL;
+		return;
+	}
+	do {
+		p = (void *)(ptr->p[start]);
+		ptr->p[start] = NULL;
+
+		if (p != NULL) {
+			if (plast == NULL)
+				*pkt = p;
+			else
+				PKTSETNEXT(osh, plast, p);
+
+			plast = p;
+			pkt_cnt++;
+		}
+		start++;
+		if (start > ptr->max_idx)
+			start = 0;
+	} while (start != end);
+	*pplast = plast;
+	*pkt_count = pkt_cnt;
+	ptr->pend_pkts -= (uint8)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count)
+{
+	uint8 flow_id, max_idx, cur_idx, exp_idx;
+	struct reorder_info *ptr;
+	uint8 flags;
+	void *cur_pkt, *plast = NULL;
+	uint32 cnt = 0;
+
+	if (pkt == NULL) {
+		if (pkt_count != NULL)
+			*pkt_count = 0;
+		return 0;
+	}
+
+	flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+	flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+	DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+		reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+	/* validate flags and flow id */
+	if (flags == 0xFF) {
+		DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+		*pkt_count = 1;
+		return 0;
+	}
+
+	cur_pkt = *pkt;
+	*pkt = NULL;
+
+	ptr = dhd->reorder_bufs[flow_id];
+	if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+		uint32 buf_size = sizeof(struct reorder_info);
+
+		DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+			__FUNCTION__, flow_id));
+
+		if (ptr == NULL) {
+			DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n",
+				__FUNCTION__, flow_id));
+			*pkt_count = 1;
+			*pkt = cur_pkt;
+			return 0;
+		}
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+			ptr->exp_idx, ptr->exp_idx);
+		/* set it to the last packet */
+		if (plast) {
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+			cnt++;
+		}
+		else {
+			if (cnt != 0) {
+				DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+					__FUNCTION__, cnt));
+			}
+			*pkt = cur_pkt;
+			cnt = 1;
+		}
+		buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+		MFREE(dhd->osh, ptr, buf_size);
+		dhd->reorder_bufs[flow_id] = NULL;
+		*pkt_count = cnt;
+		return 0;
+	}
+	/* all the other cases depend on the existance of the reorder struct for that flow id */
+	if (ptr == NULL) {
+		uint32 buf_size_alloc = sizeof(reorder_info_t);
+		max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+		buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+		/* allocate space to hold the buffers, index etc */
+
+		DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+			__FUNCTION__, buf_size_alloc, flow_id, max_idx));
+		ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+		if (ptr == NULL) {
+			DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+			*pkt_count = 1;
+			return 0;
+		}
+		bzero(ptr, buf_size_alloc);
+		dhd->reorder_bufs[flow_id] = ptr;
+		ptr->p = (void *)(ptr+1);
+		ptr->max_idx = max_idx;
+	}
+	if (flags & WLHOST_REORDERDATA_NEW_HOLE)  {
+		DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+		if (ptr->pend_pkts) {
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, ptr->exp_idx);
+			ptr->pend_pkts = 0;
+		}
+		ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+		ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+		ptr->p[ptr->cur_idx] = cur_pkt;
+		ptr->pend_pkts++;
+		*pkt_count = cnt;
+	}
+	else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+		cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+
+		if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+			/* still in the current hole */
+			/* enqueue the current on the buffer chain */
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+			ptr->cur_idx = cur_idx;
+			DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+				__FUNCTION__, ptr->pend_pkts));
+			*pkt_count = 0;
+			*pkt = NULL;
+		}
+		else if (ptr->exp_idx == cur_idx) {
+			/* got the right one ..flush from cur to exp and update exp */
+			DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+				__FUNCTION__, cur_idx));
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: Error buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+
+			ptr->cur_idx = cur_idx;
+			ptr->exp_idx = exp_idx;
+
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				cur_idx, exp_idx);
+			*pkt_count = cnt;
+			DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+				__FUNCTION__, cnt, ptr->pend_pkts));
+		}
+		else {
+			uint8 end_idx;
+			bool flush_current = FALSE;
+			/* both cur and exp are moved now .. */
+			DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+				__FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+				ptr->exp_idx, exp_idx));
+			if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+				end_idx = ptr->exp_idx;
+			else
+				end_idx = exp_idx;
+
+			/* flush pkts first */
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, end_idx);
+
+			if (cur_idx == ptr->max_idx) {
+				if (exp_idx == 0)
+					flush_current = TRUE;
+			} else {
+				if (exp_idx == cur_idx + 1)
+					flush_current = TRUE;
+			}
+			if (flush_current) {
+				if (plast)
+					PKTSETNEXT(dhd->osh, plast, cur_pkt);
+				else
+					*pkt = cur_pkt;
+				cnt++;
+			}
+			else {
+				ptr->p[cur_idx] = cur_pkt;
+				ptr->pend_pkts++;
+			}
+			ptr->exp_idx = exp_idx;
+			ptr->cur_idx = cur_idx;
+			*pkt_count = cnt;
+		}
+	}
+	else {
+		uint8 end_idx;
+		/* no real packet but update to exp_seq...that means explicit window move */
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+		DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+			__FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+		if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+			end_idx =  ptr->exp_idx;
+		else
+			end_idx =  exp_idx;
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+		if (plast)
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+		else
+			*pkt = cur_pkt;
+		cnt++;
+		*pkt_count = cnt;
+		/* set the new expected idx */
+		ptr->exp_idx = exp_idx;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
new file mode 100644
index 0000000..eb98e28
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
@@ -0,0 +1,342 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <linux/vmalloc.h>
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+
+#ifdef PKT_FILTER_SUPPORT
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+
+extern struct bcm_cfg80211 *g_bcm_cfg;
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+static int dhd_dongle_up = FALSE;
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <brcm_nl80211.h>
+#include <dhd_cfg80211.h>
+#ifdef PCIE_FULL_DONGLE
+#include <dhd_flowring.h>
+#endif
+
+static s32 wl_dongle_up(struct net_device *ndev);
+static s32 wl_dongle_down(struct net_device *ndev);
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (!dhd_dongle_up) {
+		WL_ERR(("Dongle is already down\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wl_dongle_down(ndev);
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode |= val;
+	WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode));
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is enabled, disable arpoe */
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, false);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE);
+	WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is disabled, enable arpoe back for STA mode. */
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+		dhd_arp_offload_enable(dhd, true);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx)
+{
+	return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE);
+}
+
+int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
+{
+	return dhd_register_if(cfg->pub, ifidx, FALSE);
+}
+
+int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
+{
+	return dhd_remove_if(cfg->pub, ifidx, FALSE);
+}
+
+struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
+{
+	if (ndev) {
+		if (ndev->ieee80211_ptr) {
+			kfree(ndev->ieee80211_ptr);
+			ndev->ieee80211_ptr = NULL;
+		}
+		free_netdev(ndev);
+		return NULL;
+	}
+
+	return ndev;
+}
+
+void dhd_netdev_free(struct net_device *ndev)
+{
+#ifdef WL_CFG80211
+	ndev = dhd_cfg80211_netdev_free(ndev);
+#endif
+	if (ndev)
+		free_netdev(ndev);
+}
+
+static s32
+wl_dongle_up(struct net_device *ndev)
+{
+	s32 err = 0;
+	u32 up = 0;
+
+	err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_UP error (%d)\n", err));
+	}
+	return err;
+}
+
+static s32
+wl_dongle_down(struct net_device *ndev)
+{
+	s32 err = 0;
+	u32 down = 0;
+
+	err = wldev_ioctl(ndev, WLC_DOWN, &down, sizeof(down), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_DOWN error (%d)\n", err));
+	}
+	return err;
+}
+
+
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (dhd_dongle_up) {
+		WL_ERR(("Dongle is already up\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	err = wl_dongle_up(ndev);
+	if (unlikely(err)) {
+		WL_ERR(("wl_dongle_up failed\n"));
+		goto default_conf_out;
+	}
+	dhd_dongle_up = true;
+
+default_conf_out:
+
+	return err;
+
+}
+
+#ifdef PCIE_FULL_DONGLE
+void wl_roam_flowring_cleanup(struct bcm_cfg80211 *cfg)
+{
+	int hostidx = 0;
+	dhd_pub_t *dhd_pub =  (dhd_pub_t *)(cfg->pub);
+	hostidx = dhd_ifidx2hostidx(dhd_pub->info, hostidx);
+	dhd_flow_rings_delete(dhd_pub, hostidx);
+}
+#endif
+
+#ifdef CONFIG_NL80211_TESTMODE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len)
+#else
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+{
+	struct sk_buff *reply;
+	struct bcm_cfg80211 *cfg;
+	dhd_pub_t *dhd;
+	struct bcm_nlmsg_hdr *nlioc = data;
+	dhd_ioctl_t ioc = { 0 };
+	int err = 0;
+	void *buf = NULL, *cur;
+	u16 buflen;
+	u16 maxmsglen = PAGE_SIZE - 0x100;
+	bool newbuf = false;
+	int8 index = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	struct net_device *ndev = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+
+	WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+	cfg = wiphy_priv(wiphy);
+	dhd = cfg->pub;
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->hang_was_sent) {
+		WL_ERR(("HANG was sent up earlier\n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(dhd);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	len -= sizeof(struct bcm_nlmsg_hdr);
+
+	if (nlioc->len > 0) {
+		if (nlioc->len <= len) {
+			buf = (void *)nlioc + nlioc->offset;
+			*(char *)(buf + nlioc->len) = '\0';
+		} else {
+			if (nlioc->len > DHD_IOCTL_MAXLEN)
+				nlioc->len = DHD_IOCTL_MAXLEN;
+			buf = vzalloc(nlioc->len);
+			if (!buf)
+				return -ENOMEM;
+			newbuf = true;
+			memcpy(buf, (void *)nlioc + nlioc->offset, len);
+			*(char *)(buf + len) = '\0';
+		}
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	ndev = wdev_to_wlc_ndev(wdev, cfg);
+	index = dhd_net2idx(dhd->info, ndev);
+	if (index == DHD_BAD_IF) {
+		WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+		return BCME_ERROR;
+	}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+
+	ioc.cmd = nlioc->cmd;
+	ioc.len = nlioc->len;
+	ioc.set = nlioc->set;
+	ioc.driver = nlioc->magic;
+	err = dhd_ioctl_process(dhd, index, &ioc, buf);
+	if (err) {
+		WL_TRACE(("dhd_ioctl_process return err %d\n", err));
+		err = OSL_ERROR(err);
+		goto done;
+	}
+
+	cur = buf;
+	while (nlioc->len > 0) {
+		buflen = nlioc->len > maxmsglen ? maxmsglen : nlioc->len;
+		nlioc->len -= buflen;
+		reply = cfg80211_testmode_alloc_reply_skb(wiphy, buflen+4);
+		if (!reply) {
+			WL_ERR(("Failed to allocate reply msg\n"));
+			err = -ENOMEM;
+			break;
+		}
+
+		if (nla_put(reply, BCM_NLATTR_DATA, buflen, cur) ||
+			nla_put_u16(reply, BCM_NLATTR_LEN, buflen)) {
+			kfree_skb(reply);
+			err = -ENOBUFS;
+			break;
+		}
+
+		do {
+			err = cfg80211_testmode_reply(reply);
+		} while (err == -EAGAIN);
+		if (err) {
+			WL_ERR(("testmode reply failed:%d\n", err));
+			break;
+		}
+		cur += buflen;
+	}
+
+done:
+	if (newbuf)
+		vfree(buf);
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+#endif /* CONFIG_NL80211_TESTMODE */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
new file mode 100644
index 0000000..bf89f12
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
@@ -0,0 +1,69 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+#ifndef WL_ERR
+#define WL_ERR CFG80211_ERR
+#endif
+#ifndef WL_TRACE
+#define WL_TRACE CFG80211_TRACE
+#endif
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
+#ifdef PCIE_FULL_DONGLE
+void wl_roam_flowring_cleanup(struct bcm_cfg80211 *cfg);
+#endif
+
+#ifdef CONFIG_NL80211_TESTMODE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len);
+#else
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static inline int
+dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len)
+#else
+static inline int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+{
+	return 0;
+}
+#endif /* CONFIG_NL80211_TESTMODE */
+
+#endif /* __DHD_CFG80211__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 0000000..b0e97d1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,2802 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c 473079 2014-04-27 07:47:16Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+#include <dhd_ip.h>
+#include <proto/bcmevent.h>
+
+#ifdef SHOW_LOGTRACE
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef DHD_WMF
+#include <dhd_linux.h>
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+#if defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE)
+static int check_event_log_sequence_number(uint32 seq_no);
+#endif /* defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) */
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+#if !defined(AP) && defined(WLP2P)
+extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
+#endif
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE        "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_DEBUG)
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+	DHD_COMPILED " on " __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+
+
+/* IOVar table */
+enum {
+	IOV_VERSION = 1,
+	IOV_MSGLEVEL,
+	IOV_BCMERRORSTR,
+	IOV_BCMERROR,
+	IOV_WDTICK,
+	IOV_DUMP,
+	IOV_CLEARCOUNTS,
+	IOV_LOGDUMP,
+	IOV_LOGCAL,
+	IOV_LOGSTAMP,
+	IOV_GPIOOB,
+	IOV_IOCTLTIMEOUT,
+#if defined(DHD_DEBUG)
+	IOV_CONS,
+	IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+	IOV_PROPTXSTATUS_ENABLE,
+	IOV_PROPTXSTATUS_MODE,
+	IOV_PROPTXSTATUS_OPT,
+	IOV_PROPTXSTATUS_MODULE_IGNORE,
+	IOV_PROPTXSTATUS_CREDIT_IGNORE,
+	IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
+	IOV_PROPTXSTATUS_RXPKT_CHK,
+#endif /* PROP_TXSTATUS */
+	IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+	IOV_WLPKTDLYSTAT_SZ,
+#endif
+	IOV_CHANGEMTU,
+	IOV_HOSTREORDER_FLOWS,
+#ifdef DHDTCPACK_SUPPRESS
+	IOV_TCPACK_SUPPRESS,
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	IOV_WMF_BSS_ENAB,
+	IOV_WMF_UCAST_IGMP,
+	IOV_WMF_MCAST_DATA_SENDUP,
+#ifdef WL_IGMP_UCQUERY
+	IOV_WMF_UCAST_IGMP_QUERY,
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	IOV_WMF_UCAST_UPNP,
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+	IOV_AP_ISOLATE,
+#ifdef DHD_UNICAST_DHCP
+	IOV_DHCP_UNICAST,
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+	IOV_BLOCK_PING,
+#endif
+	IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+	{"version",	IOV_VERSION,	0,	IOVT_BUFFER,	sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+	{"msglevel",	IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG */
+	{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER,	BCME_STRLEN },
+	{"bcmerror",	IOV_BCMERROR,	0,	IOVT_INT8,	0 },
+	{"wdtick",	IOV_WDTICK, 0,	IOVT_UINT32,	0 },
+	{"dump",	IOV_DUMP,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+	{"cons",	IOV_CONS,	0,	IOVT_BUFFER,	0 },
+	{"dconpoll",	IOV_DCONSOLE_POLL, 0,	IOVT_UINT32,	0 },
+#endif
+	{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID,	0 },
+	{"gpioob",	IOV_GPIOOB,	0,	IOVT_UINT32,	0 },
+	{"ioctl_timeout",	IOV_IOCTLTIMEOUT,	0,	IOVT_UINT32,	0 },
+#ifdef PROP_TXSTATUS
+	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	IOVT_BOOL,	0 },
+	/*
+	set the proptxtstatus operation mode:
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	*/
+	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	IOVT_UINT32,	0 },
+	{"proptx_opt", IOV_PROPTXSTATUS_OPT,	0,	IOVT_UINT32,	0 },
+	{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 },
+	{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 },
+	{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 },
+	{"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, IOVT_BOOL, 0 },
+#endif /* PROP_TXSTATUS */
+	{"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+	{"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+	{"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+	{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER,
+	(WLHOST_REORDERDATA_MAXFLOWS + 1) },
+#ifdef DHDTCPACK_SUPPRESS
+	{"tcpack_suppress",	IOV_TCPACK_SUPPRESS,	0,	IOVT_UINT8,	0 },
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	{"wmf_bss_enable", IOV_WMF_BSS_ENAB,	0,	IOVT_BOOL,	0 },
+	{"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP,	0,	IOVT_BOOL,	0 },
+	{"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP,	0,	IOVT_BOOL,	0 },
+#ifdef WL_IGMP_UCQUERY
+	{"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), IOVT_BOOL, 0 },
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	{"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 },
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+#ifdef DHD_UNICAST_DHCP
+	{"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 },
+#endif /* DHD_UNICAST_DHCP */
+	{"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0},
+#ifdef DHD_L2_FILTER
+	{"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0},
+#endif
+	{NULL, 0, 0, 0, 0 }
+};
+
+#define DHD_IOVAR_BUF_SIZE	128
+
+/* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	struct bcmstrbuf b;
+	struct bcmstrbuf *strbuf = &b;
+
+	bcm_binit(strbuf, buf, buflen);
+
+	/* Base DHD info */
+	bcm_bprintf(strbuf, "%s\n", dhd_version);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+	            dhdp->up, dhdp->txoff, dhdp->busstate);
+	bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
+	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+	            dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
+
+	bcm_bprintf(strbuf, "dongle stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
+	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
+	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+	bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
+
+	bcm_bprintf(strbuf, "bus stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu  tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
+	            dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
+	bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
+	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
+	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+	bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
+	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+	bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
+	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any prot info */
+	dhd_prot_dump(dhdp, strbuf);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any bus info */
+	dhd_bus_dump(dhdp, strbuf);
+
+
+	return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
+{
+	wl_ioctl_t ioc;
+
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
+}
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
+{
+	int ret = BCME_ERROR;
+
+	if (dhd_os_proto_block(dhd_pub))
+	{
+#if defined(WL_WLC_SHIM)
+		wl_info_t *wl = dhd_pub_wlinfo(dhd_pub);
+
+		wl_io_pport_t io_pport;
+		io_pport.dhd_pub = dhd_pub;
+		io_pport.ifidx = ifidx;
+
+		ret = wl_shim_ioctl(wl->shim, ioc, &io_pport);
+		if (ret != BCME_OK) {
+			DHD_ERROR(("%s: wl_shim_ioctl(%d) ERR %d\n", __FUNCTION__, ioc->cmd, ret));
+		}
+#else
+		ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+#endif /* defined(WL_WLC_SHIM) */
+
+		if (ret && dhd_pub->up) {
+			/* Send hang event only if dhd_open() was success */
+			dhd_os_check_hang(dhd_pub, ifidx, ret);
+		}
+
+		if (ret == -ETIMEDOUT && !dhd_pub->up) {
+			DHD_ERROR(("%s: 'resumed on timeout' error is "
+				"occurred before the interface does not"
+				" bring up\n", __FUNCTION__));
+			dhd_pub->busstate = DHD_BUS_DOWN;
+		}
+
+		dhd_os_proto_unblock(dhd_pub);
+
+	}
+
+	return ret;
+}
+
+uint wl_get_port_num(wl_io_pport_t *io_pport)
+{
+	return 0;
+}
+
+/* Get bssidx from iovar params
+ * Input:   dhd_pub - pointer to dhd_pub_t
+ *	    params  - IOVAR params
+ * Output:  idx	    - BSS index
+ *	    val	    - ponter to the IOVAR arguments
+ */
+static int
+dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val)
+{
+	char *prefix = "bsscfg:";
+	uint32	bssidx;
+
+	if (!(strncmp(params, prefix, strlen(prefix)))) {
+		/* per bss setting should be prefixed with 'bsscfg:' */
+		char *p = (char *)params + strlen(prefix);
+
+		/* Skip Name */
+		while (*p != '\0')
+			p++;
+		/* consider null */
+		p = p + 1;
+		bcopy(p, &bssidx, sizeof(uint32));
+		/* Get corresponding dhd index */
+		bssidx = dhd_bssidx2idx(dhd_pub, bssidx);
+
+		if (bssidx >= DHD_MAX_IFS) {
+			DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+
+		/* skip bss idx */
+		p += sizeof(uint32);
+		*val = p;
+		*idx = bssidx;
+	} else {
+		DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+            void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_VERSION):
+		/* Need to have checked buffer length */
+		bcm_strncpy_s((char*)arg, len, dhd_version, len);
+		break;
+
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)dhd_msg_level;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+#ifdef WL_CFG80211
+		/* Enable DHD and WL logs in oneshot */
+		if (int_val & DHD_WL_VAL2)
+			wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2));
+		else if (int_val & DHD_WL_VAL)
+			wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG);
+		if (!(int_val & DHD_WL_VAL2))
+#endif /* WL_CFG80211 */
+		dhd_msg_level = int_val;
+		break;
+	case IOV_GVAL(IOV_BCMERRORSTR):
+		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+		((char *)arg)[BCME_STRLEN - 1] = 0x00;
+		break;
+
+	case IOV_GVAL(IOV_BCMERROR):
+		int_val = (int32)dhd_pub->bcmerror;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_WDTICK):
+		int_val = (int32)dhd_watchdog_ms;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_WDTICK):
+		if (!dhd_pub->up) {
+			bcmerror = BCME_NOTUP;
+			break;
+		}
+		dhd_os_wd_timer(dhd_pub, (uint)int_val);
+		break;
+
+	case IOV_GVAL(IOV_DUMP):
+		bcmerror = dhd_dump(dhd_pub, arg, len);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_DCONSOLE_POLL):
+		int_val = (int32)dhd_console_ms;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DCONSOLE_POLL):
+		dhd_console_ms = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_CONS):
+		if (len > 0)
+			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+		break;
+#endif /* DHD_DEBUG */
+
+	case IOV_SVAL(IOV_CLEARCOUNTS):
+		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+		dhd_pub->tx_dropped = 0;
+		dhd_pub->rx_dropped = 0;
+		dhd_pub->rx_readahead_cnt = 0;
+		dhd_pub->tx_realloc = 0;
+		dhd_pub->wd_dpc_sched = 0;
+		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+		dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+		/* clear proptxstatus related counters */
+		dhd_wlfc_clear_counts(dhd_pub);
+#endif /* PROP_TXSTATUS */
+		break;
+
+
+	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+		break;
+	}
+
+
+#ifdef PROP_TXSTATUS
+	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		int_val = wlfc_enab ? 1 : 0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+
+		/* wlfc is already set as desired */
+		if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
+			goto exit;
+
+		if (int_val == TRUE)
+			bcmerror = dhd_wlfc_init(dhd_pub);
+		else
+			bcmerror = dhd_wlfc_deinit(dhd_pub);
+
+		break;
+	}
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
+		bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+		dhd_wlfc_set_mode(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		dhd_wlfc_set_module_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+		bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+		dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
+		break;
+
+#endif /* PROP_TXSTATUS */
+
+	case IOV_GVAL(IOV_BUS_TYPE):
+		/* The dhd application queries the driver to check if its usb or sdio.  */
+#ifdef BCMDHDUSB
+		int_val = BUS_TYPE_USB;
+#endif
+#ifdef BCMSDIO
+		int_val = BUS_TYPE_SDIO;
+#endif
+#ifdef PCIE_FULL_DONGLE
+		int_val = BUS_TYPE_PCIE;
+#endif
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+
+#ifdef WLMEDIA_HTSF
+	case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+		int_val = dhd_pub->htsfdlystat_sz;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+		dhd_pub->htsfdlystat_sz = int_val & 0xff;
+		printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+		break;
+#endif
+	case IOV_SVAL(IOV_CHANGEMTU):
+		int_val &= 0xffff;
+		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+	{
+		uint i = 0;
+		uint8 *ptr = (uint8 *)arg;
+		uint8 count = 0;
+
+		ptr++;
+		for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+			if (dhd_pub->reorder_bufs[i] != NULL) {
+				*ptr = dhd_pub->reorder_bufs[i]->flow_id;
+				ptr++;
+				count++;
+			}
+		}
+		ptr = (uint8 *)arg;
+		*ptr = count;
+		break;
+	}
+#ifdef DHDTCPACK_SUPPRESS
+	case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
+		int_val = (uint32)dhd_pub->tcpack_sup_mode;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+	case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
+		bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
+		break;
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+	case IOV_GVAL(IOV_WMF_BSS_ENAB): {
+		uint32	bssidx;
+		dhd_wmf_t *wmf;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		wmf = dhd_wmf_conf(dhd_pub, bssidx);
+		int_val = wmf->wmf_enable ? 1 :0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+	case IOV_SVAL(IOV_WMF_BSS_ENAB): {
+		/* Enable/Disable WMF */
+		uint32	bssidx;
+		dhd_wmf_t *wmf;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		wmf = dhd_wmf_conf(dhd_pub, bssidx);
+		if (wmf->wmf_enable == int_val)
+			break;
+		if (int_val) {
+			/* Enable WMF */
+			if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
+				DHD_ERROR(("%s: Error in creating WMF instance\n",
+				__FUNCTION__));
+				break;
+			}
+			if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
+				DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
+				break;
+			}
+			wmf->wmf_enable = TRUE;
+		} else {
+			/* Disable WMF */
+			wmf->wmf_enable = FALSE;
+			dhd_wmf_stop(dhd_pub, bssidx);
+			dhd_wmf_instance_del(dhd_pub, bssidx);
+		}
+		break;
+	}
+	case IOV_GVAL(IOV_WMF_UCAST_IGMP):
+		int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_IGMP):
+		if (dhd_pub->wmf_ucast_igmp == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_igmp = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+	case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
+		int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
+		dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
+		break;
+
+#ifdef WL_IGMP_UCQUERY
+	case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
+		int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
+		if (dhd_pub->wmf_ucast_igmp_query == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_igmp_query = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+	case IOV_GVAL(IOV_WMF_UCAST_UPNP):
+		int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_WMF_UCAST_UPNP):
+		if (dhd_pub->wmf_ucast_upnp == int_val)
+			break;
+
+		if (int_val >= OFF && int_val <= ON)
+			dhd_pub->wmf_ucast_upnp = int_val;
+		else
+			bcmerror = BCME_RANGE;
+		break;
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+
+
+#ifdef DHD_UNICAST_DHCP
+	case IOV_GVAL(IOV_DHCP_UNICAST):
+		int_val = dhd_pub->dhcp_unicast;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_DHCP_UNICAST):
+		if (dhd_pub->dhcp_unicast == int_val)
+			break;
+
+		if (int_val >= OFF || int_val <= ON) {
+			dhd_pub->dhcp_unicast = int_val;
+		} else {
+			bcmerror = BCME_RANGE;
+		}
+		break;
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+	case IOV_GVAL(IOV_BLOCK_PING):
+		int_val = dhd_pub->block_ping;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_BLOCK_PING):
+		if (dhd_pub->block_ping == int_val)
+			break;
+		if (int_val >= OFF || int_val <= ON) {
+			dhd_pub->block_ping = int_val;
+		} else {
+			bcmerror = BCME_RANGE;
+		}
+		break;
+#endif
+
+	case IOV_GVAL(IOV_AP_ISOLATE): {
+		uint32	bssidx;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+	case IOV_SVAL(IOV_AP_ISOLATE): {
+		uint32	bssidx;
+		char *val;
+
+		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		ASSERT(val);
+		bcopy(val, &int_val, sizeof(uint32));
+		dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+	return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+	 * because an encryption/rsn mismatch results in both events, and
+	 * the important information is in the WLC_E_PRUNE.
+	 */
+	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+	      dhd_conn_event == WLC_E_PRUNE)) {
+		dhd_conn_event = event;
+		dhd_conn_status = status;
+		dhd_conn_reason = reason;
+	}
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+	void *p;
+	int eprec = -1;		/* precedence to evict from */
+	bool discard_oldest;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		pktq_penq(q, prec, pkt);
+		return TRUE;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec))
+		eprec = prec;
+	else if (pktq_full(q)) {
+		p = pktq_peek_tail(q, &eprec);
+		ASSERT(p);
+		if (eprec > prec || eprec < 0)
+			return FALSE;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(q, eprec));
+		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+		if (eprec == prec && !discard_oldest)
+			return FALSE;		/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+		ASSERT(p);
+#ifdef DHDTCPACK_SUPPRESS
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+		PKTFREE(dhdp->osh, p, TRUE);
+	}
+
+	/* Enqueue */
+	p = pktq_penq(q, prec, pkt);
+	ASSERT(p);
+
+	return TRUE;
+}
+
+/*
+ * Functions to drop proper pkts from queue:
+ *	If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
+ *	If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
+ *	If can't find pkts matching upper 2 cases, drop first pkt anyway
+ */
+bool
+dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
+{
+	struct pktq_prec *q = NULL;
+	void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
+	pkt_frag_t frag_info;
+
+	ASSERT(dhdp && pq);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	if (p == NULL)
+		return FALSE;
+
+	while (p) {
+		frag_info = pkt_frag_info(dhdp->osh, p);
+		if (frag_info == DHD_PKT_FRAG_NONE) {
+			break;
+		} else if (frag_info == DHD_PKT_FRAG_FIRST) {
+			if (first) {
+				/* No last frag pkt, use prev as last */
+				last = prev;
+				break;
+			} else {
+				first = p;
+				prev_first = prev;
+			}
+		} else if (frag_info == DHD_PKT_FRAG_LAST) {
+			if (first) {
+				last = p;
+				break;
+			}
+		}
+
+		prev = p;
+		p = PKTLINK(p);
+	}
+
+	if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
+		/* Not found matching pkts, use oldest */
+		prev = NULL;
+		p = q->head;
+		frag_info = 0;
+	}
+
+	if (frag_info == DHD_PKT_FRAG_NONE) {
+		first = last = p;
+		prev_first = prev;
+	}
+
+	p = first;
+	while (p) {
+		next = PKTLINK(p);
+		q->len--;
+		pq->len--;
+
+		PKTSETLINK(p, NULL);
+
+		if (fn)
+			fn(dhdp, prec, p, TRUE);
+
+		if (p == last)
+			break;
+
+		p = next;
+	}
+
+	if (prev_first == NULL) {
+		if ((q->head = next) == NULL)
+			q->tail = NULL;
+	} else {
+		PKTSETLINK(prev_first, next);
+		if (!next)
+			q->tail = prev_first;
+	}
+
+	return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	int val_size;
+	const bcm_iovar_t *vi = NULL;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+		name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+	int bcmerror = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!buf) {
+		return BCME_BADARG;
+	}
+
+	switch (ioc->cmd) {
+	case DHD_GET_MAGIC:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_MAGIC;
+		break;
+
+	case DHD_GET_VERSION:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_VERSION;
+		break;
+
+	case DHD_GET_VAR:
+	case DHD_SET_VAR: {
+		char *arg;
+		uint arglen;
+
+		/* scan past the name to any arguments */
+		for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+			;
+
+		if (*arg) {
+			bcmerror = BCME_BUFTOOSHORT;
+			break;
+		}
+
+		/* account for the NUL terminator */
+		arg++, arglen--;
+
+		/* call with the appropriate arguments */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+			buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* not in generic table, try protocol module */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+				arglen, buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* if still not found, try bus module */
+		if (ioc->cmd == DHD_GET_VAR) {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				arg, arglen, buf, buflen, IOV_GET);
+		} else {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		}
+
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+#ifdef SHOW_LOGTRACE
+
+#define AVOID_BYTE 64
+#define MAX_NO_OF_ARG 16
+
+static int
+check_event_log_sequence_number(uint32 seq_no)
+{
+	int32 diff;
+	uint32 ret;
+	static uint32 logtrace_seqnum_prev = 0;
+
+	diff = ntoh32(seq_no)-logtrace_seqnum_prev;
+	switch (diff)
+	{
+		case 0:
+			ret = -1; /* duplicate packet . drop */
+			break;
+
+		case 1:
+			ret =0; /* in order */
+			break;
+
+		default:
+			if ((ntoh32(seq_no) == 0) &&
+				(logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */
+					ret = 0;
+			} else {
+
+				if (diff > 0) {
+					DHD_EVENT(("WLC_E_TRACE:"
+						"Event lost (log) seqnum %d nblost %d\n",
+						ntoh32(seq_no), (diff-1)));
+				} else {
+					DHD_EVENT(("WLC_E_TRACE:"
+						"Event Packets coming out of order!!\n"));
+				}
+				ret = 0;
+			}
+	}
+
+	logtrace_seqnum_prev = ntoh32(seq_no);
+
+	return ret;
+}
+#endif /* SHOW_LOGTRACE */
+
+static void
+wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
+	void *raw_event_ptr, char *eventmask)
+{
+	uint i, status, reason;
+	bool group = FALSE, flush_txq = FALSE, link = FALSE;
+	const char *auth_str;
+	const char *event_name;
+	uchar *buf;
+	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+	uint event_type, flags, auth_type, datalen;
+
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	reason = ntoh32(event->reason);
+	BCM_REFERENCE(reason);
+	auth_type = ntoh32(event->auth_type);
+	datalen = ntoh32(event->datalen);
+
+	/* debug dump of event messages */
+	snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x",
+	        (uchar)event->addr.octet[0]&0xff,
+	        (uchar)event->addr.octet[1]&0xff,
+	        (uchar)event->addr.octet[2]&0xff,
+	        (uchar)event->addr.octet[3]&0xff,
+	        (uchar)event->addr.octet[4]&0xff,
+	        (uchar)event->addr.octet[5]&0xff);
+
+	event_name = bcmevent_get_name(event_type);
+	BCM_REFERENCE(event_name);
+
+	if (flags & WLC_EVENT_MSG_LINK)
+		link = TRUE;
+	if (flags & WLC_EVENT_MSG_GROUP)
+		group = TRUE;
+	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+		flush_txq = TRUE;
+
+	switch (event_type) {
+	case WLC_E_START:
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC:
+	case WLC_E_REASSOC:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+			       event_name, eabuf, (int)reason));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+			       event_name, eabuf, (int)status));
+		}
+		break;
+
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+		break;
+
+	case WLC_E_AUTH:
+	case WLC_E_AUTH_IND:
+		if (auth_type == DOT11_OPEN_SYSTEM)
+			auth_str = "Open System";
+		else if (auth_type == DOT11_SHARED_KEY)
+			auth_str = "Shared Key";
+		else {
+			snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
+			auth_str = err_msg;
+		}
+		if (event_type == WLC_E_AUTH_IND) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+			       event_name, eabuf, auth_str, (int)reason));
+		}
+		BCM_REFERENCE(auth_str);
+
+		break;
+
+	case WLC_E_JOIN:
+	case WLC_E_ROAM:
+	case WLC_E_SET_SSID:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+		} else if (status == WLC_E_STATUS_NO_NETWORKS) {
+			DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+				event_name, (int)status));
+		}
+		break;
+
+	case WLC_E_BEACON_RX:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+		}
+		break;
+
+	case WLC_E_LINK:
+		DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+		BCM_REFERENCE(link);
+		break;
+
+	case WLC_E_MIC_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+		       event_name, eabuf, group, flush_txq));
+		BCM_REFERENCE(group);
+		BCM_REFERENCE(flush_txq);
+		break;
+
+	case WLC_E_ICV_ERROR:
+	case WLC_E_UNICAST_DECODE_ERROR:
+	case WLC_E_MULTICAST_DECODE_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+		       event_name, eabuf));
+		break;
+
+	case WLC_E_TXFAIL:
+		DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_SCAN_COMPLETE:
+	case WLC_E_ASSOC_REQ_IE:
+	case WLC_E_ASSOC_RESP_IE:
+	case WLC_E_PMKID_CACHE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+	case WLC_E_PFN_SCAN_COMPLETE:
+	case WLC_E_PFN_SCAN_NONE:
+	case WLC_E_PFN_SCAN_ALLGONE:
+	case WLC_E_PFN_GSCAN_FULL_RESULT:
+	case WLC_E_PFN_SWC:
+		DHD_EVENT(("PNOEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PSK_SUP:
+	case WLC_E_PRUNE:
+		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+		           event_name, (int)status, (int)reason));
+		break;
+
+#ifdef WIFI_ACT_FRAME
+	case WLC_E_ACTION_FRAME:
+		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+		break;
+#endif /* WIFI_ACT_FRAME */
+
+#ifdef SHOW_LOGTRACE
+	case WLC_E_TRACE:
+	{
+		msgtrace_hdr_t hdr;
+		uint32 nblost;
+		uint8 count;
+		char *s, *p;
+		static uint32 seqnum_prev = 0;
+		uint32 *record = NULL;
+		uint32 *log_ptr =  NULL;
+		uint32 writeindex = 0;
+		event_log_hdr_t event_hdr;
+		int no_of_fmts = 0;
+		char *fmt = NULL;
+		dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr;
+
+		buf = (uchar *) event_data;
+		memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+		if (hdr.version != MSGTRACE_VERSION) {
+			DHD_EVENT(("\nMACEVENT: %s [unsupported version --> "
+				"dhd version:%d dongle version:%d]\n",
+				event_name, MSGTRACE_VERSION, hdr.version));
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+			break;
+		}
+
+		if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) {
+			/* There are 2 bytes available at the end of data */
+			buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+			if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+				DHD_EVENT(("WLC_E_TRACE: [Discarded traces in dongle -->"
+					"discarded_bytes %d discarded_printf %d]\n",
+					ntoh32(hdr.discarded_bytes),
+					ntoh32(hdr.discarded_printf)));
+			}
+
+			nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+			if (nblost > 0) {
+				DHD_EVENT(("WLC_E_TRACE:"
+					"[Event lost (msg) --> seqnum %d nblost %d\n",
+					ntoh32(hdr.seqnum), nblost));
+			}
+			seqnum_prev = ntoh32(hdr.seqnum);
+
+			/* Display the trace buffer. Advance from
+			 * \n to \n to avoid display big
+			 * printf (issue with Linux printk )
+			 */
+			p = (char *)&buf[MSGTRACE_HDRLEN];
+			while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
+				*s = '\0';
+				DHD_ERROR(("%s\n", p));
+				p = s+1;
+			}
+			if (*p)
+				DHD_ERROR(("%s", p));
+
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+
+		} else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) {
+			/* Let the standard event printing work for now */
+			uint32 timestamp, w, malloc_len;
+
+			if (check_event_log_sequence_number(hdr.seqnum)) {
+
+				DHD_EVENT(("%s: WLC_E_TRACE:"
+					"[Event duplicate (log) %d] dropping!!\n",
+					__FUNCTION__, hdr.seqnum));
+				return; /* drop duplicate events */
+			}
+
+			p = (char *)&buf[MSGTRACE_HDRLEN];
+			datalen -= MSGTRACE_HDRLEN;
+			w = ntoh32((uint32)*p);
+			p += 4;
+			datalen -= 4;
+			timestamp = ntoh32((uint32)*p);
+			BCM_REFERENCE(timestamp);
+			BCM_REFERENCE(w);
+
+			DHD_EVENT(("timestamp %x%x\n", timestamp, w));
+
+			if (raw_event->fmts) {
+				malloc_len = datalen+ AVOID_BYTE;
+				record = (uint32 *)MALLOC(dhd_pub->osh, malloc_len);
+				if (record == NULL) {
+					DHD_EVENT(("MSGTRACE_HDR_TYPE_LOG:"
+						"malloc failed\n"));
+					return;
+				}
+				log_ptr = (uint32 *) (p + datalen);
+				writeindex = datalen/4;
+
+				if (record) {
+					while (datalen > 4) {
+						log_ptr--;
+						datalen -= 4;
+						event_hdr.t = *log_ptr;
+						/*
+						 * Check for partially overriten entries
+						 */
+						if (log_ptr - (uint32 *) p < event_hdr.count) {
+								break;
+						}
+						/*
+						* Check for end of the Frame.
+						*/
+						if (event_hdr.tag ==  EVENT_LOG_TAG_NULL) {
+							continue;
+						}
+						/*
+						* Check For Special Time Stamp Packet
+						*/
+						if (event_hdr.tag == EVENT_LOG_TAG_TS) {
+							datalen -= 12;
+							log_ptr = log_ptr - 3;
+							continue;
+						}
+
+						log_ptr[0] = event_hdr.t;
+						if (event_hdr.count > MAX_NO_OF_ARG) {
+							break;
+						}
+						/* Now place the header at the front
+						* and copy back.
+						*/
+						log_ptr -= event_hdr.count;
+
+						writeindex = writeindex - event_hdr.count;
+						record[writeindex++] = event_hdr.t;
+						for (count = 0; count < (event_hdr.count-1);
+							count++) {
+							record[writeindex++] = log_ptr[count];
+						}
+						writeindex = writeindex - event_hdr.count;
+						datalen = datalen - (event_hdr.count * 4);
+						no_of_fmts++;
+					}
+				}
+
+				while (no_of_fmts--)
+				{
+					event_log_hdr_t event_hdr;
+					event_hdr.t = record[writeindex];
+
+					if ((event_hdr.fmt_num>>2) < raw_event->num_fmts) {
+						fmt = raw_event->fmts[event_hdr.fmt_num>>2];
+						DHD_EVENT((fmt,
+							record[writeindex + 1],
+							record[writeindex + 2],
+							record[writeindex + 3],
+							record[writeindex + 4],
+							record[writeindex + 5],
+							record[writeindex + 6],
+							record[writeindex + 7],
+							record[writeindex + 8],
+							record[writeindex + 9],
+							record[writeindex + 10],
+							record[writeindex + 11],
+							record[writeindex + 12],
+							record[writeindex + 13],
+							record[writeindex + 14],
+							record[writeindex + 15],
+							record[writeindex + 16]));
+
+						if (fmt[strlen(fmt) - 1] != '\n') {
+							/* Add newline if missing */
+							DHD_EVENT(("\n"));
+						}
+					}
+
+					writeindex = writeindex + event_hdr.count;
+				}
+
+				if (record) {
+					MFREE(dhd_pub->osh, record, malloc_len);
+					record = NULL;
+				}
+			} else {
+				while (datalen > 4) {
+					p += 4;
+					datalen -= 4;
+					/* Print each word.  DO NOT ntoh it.  */
+					DHD_EVENT((" %8.8x", *((uint32 *) p)));
+				}
+				DHD_EVENT(("\n"));
+			}
+			datalen = 0;
+		}
+		break;
+	}
+#endif /* SHOW_LOGTRACE */
+
+	case WLC_E_RSSI:
+		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+		break;
+
+	case WLC_E_SERVICE_FOUND:
+	case WLC_E_P2PO_ADD_DEVICE:
+	case WLC_E_P2PO_DEL_DEVICE:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+#ifdef BT_WIFI_HANDOBER
+	case WLC_E_BT_WIFI_HANDOVER_REQ:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+#endif
+
+	default:
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+		       event_name, event_type, eabuf, (int)status, (int)reason,
+		       (int)auth_type));
+		break;
+	}
+
+	/* show any appended data */
+	if (DHD_BYTES_ON() && DHD_EVENT_ON() && datalen) {
+		buf = (uchar *) event_data;
+		BCM_REFERENCE(buf);
+		DHD_EVENT((" data (%d) : ", datalen));
+		for (i = 0; i < datalen; i++)
+			DHD_EVENT((" 0x%02x ", *buf++));
+		DHD_EVENT(("\n"));
+	}
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, size_t pktlen,
+	wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+	/* check whether packet is a BRCM event pkt */
+	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+	uint8 *event_data;
+	uint32 type, status, datalen;
+	uint16 flags;
+	int evlen;
+	int hostidx;
+
+	if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+		DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+	if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+		DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	if (pktlen < sizeof(bcm_event_t))
+		return (BCME_ERROR);
+
+	*data_ptr = &pvt_data[1];
+	event_data = *data_ptr;
+
+
+	/* memcpy since BRCM event pkt may be unaligned. */
+	memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+	type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+
+	datalen = ntoh32_ua((void *)&event->datalen);
+	if (datalen > pktlen)
+		return (BCME_ERROR);
+
+	evlen = datalen + sizeof(bcm_event_t);
+	if (evlen > pktlen) {
+		return (BCME_ERROR);
+	}
+
+	/* find equivalent host index for event ifidx */
+	hostidx = dhd_ifidx2hostidx(dhd_pub->info, event->ifidx);
+
+	switch (type) {
+#ifdef PROP_TXSTATUS
+	case WLC_E_FIFO_CREDIT_MAP:
+		dhd_wlfc_enable(dhd_pub);
+		dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
+		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+			event_data[2],
+			event_data[3], event_data[4], event_data[5]));
+		break;
+
+	case WLC_E_BCMC_CREDIT_SUPPORT:
+		dhd_wlfc_BCMCCredit_support_event(dhd_pub);
+		break;
+#endif
+
+	case WLC_E_IF:
+		{
+		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
+
+		/* Ignore the event if NOIF is set */
+		if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
+			DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\n"));
+			return (BCME_UNSUPPORTED);
+		}
+#ifdef PCIE_FULL_DONGLE
+		dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
+			ifevent->opcode, ifevent->role);
+#endif
+#ifdef PROP_TXSTATUS
+		{
+			uint8* ea = pvt_data->eth.ether_dhost;
+			WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+			              "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+			              ifevent->ifidx,
+			              ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
+			              ((ifevent->role == 0) ? "STA":"AP "),
+			              ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+			(void)ea;
+
+			if (ifevent->opcode == WLC_E_IF_CHANGE)
+				dhd_wlfc_interface_event(dhd_pub,
+					eWLFC_MAC_ENTRY_ACTION_UPDATE,
+					ifevent->ifidx, ifevent->role, ea);
+			else
+				dhd_wlfc_interface_event(dhd_pub,
+					((ifevent->opcode == WLC_E_IF_ADD) ?
+					eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+					ifevent->ifidx, ifevent->role, ea);
+
+			/* dhd already has created an interface by default, for 0 */
+			if (ifevent->ifidx == 0)
+				break;
+		}
+#endif /* PROP_TXSTATUS */
+
+		if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+			if (ifevent->opcode == WLC_E_IF_ADD) {
+				if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet)) {
+
+					DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d  %s\n",
+						__FUNCTION__, ifevent->ifidx, event->ifname));
+					return (BCME_ERROR);
+				}
+			} else if (ifevent->opcode == WLC_E_IF_DEL) {
+				dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet);
+			} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
+#ifdef WL_CFG80211
+				wl_cfg80211_notify_ifchange(ifevent->ifidx,
+					event->ifname, event->addr.octet, ifevent->bssidx);
+#endif /* WL_CFG80211 */
+			}
+		} else {
+#if !defined(PROP_TXSTATUS) || !defined(PCIE_FULL_DONGLE)
+			DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+			           __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+		}
+			/* send up the if event: btamp user needs it */
+			*ifidx = hostidx;
+			/* push up to external supp/auth */
+			dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		break;
+	}
+
+#ifdef WLMEDIA_HTSF
+	case WLC_E_HTSFSYNC:
+		htsf_update(dhd_pub->info, event_data);
+		break;
+#endif /* WLMEDIA_HTSF */
+	case WLC_E_NDIS_LINK: {
+		uint32 temp = hton32(WLC_E_LINK);
+
+		memcpy((void *)(&pvt_data->event.event_type), &temp,
+		       sizeof(pvt_data->event.event_type));
+		break;
+	}
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+		break;
+#if defined(PNO_SUPPORT)
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+	case WLC_E_PFN_BEST_BATCHING:
+		dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
+		break;
+#endif
+#if defined(RTT_SUPPORT)
+	case WLC_E_PROXD:
+		dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
+		break;
+#endif /* RTT_SUPPORT */
+		/* These are what external supplicant/authenticator wants */
+	case WLC_E_ASSOC_IND:
+	case WLC_E_AUTH_IND:
+	case WLC_E_REASSOC_IND:
+		dhd_findadd_sta(dhd_pub, hostidx, &event->addr.octet);
+		break;
+	case WLC_E_LINK:
+#ifdef PCIE_FULL_DONGLE
+		if (dhd_update_interface_link_status(dhd_pub, (uint8)hostidx,
+			(uint8)flags) != BCME_OK)
+			break;
+		if (!flags) {
+			dhd_flow_rings_delete(dhd_pub, hostidx);
+		}
+		/* fall through */
+#endif
+	case WLC_E_DEAUTH:
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC:
+	case WLC_E_DISASSOC_IND:
+		if (type != WLC_E_LINK) {
+			dhd_del_sta(dhd_pub, hostidx, &event->addr.octet);
+		}
+		DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+#ifdef PCIE_FULL_DONGLE
+		if (type != WLC_E_LINK) {
+			uint8 ifindex = (uint8)hostidx;
+			uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
+			if (DHD_IF_ROLE_STA(role)) {
+				dhd_flow_rings_delete(dhd_pub, ifindex);
+			} else {
+				dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+					&event->addr.octet[0]);
+			}
+		}
+#endif
+		/* fall through */
+	default:
+		*ifidx = hostidx;
+		/* push up to external supp/auth */
+		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+		BCM_REFERENCE(flags);
+		BCM_REFERENCE(status);
+
+		break;
+	}
+
+#ifdef SHOW_EVENTS
+	wl_show_host_event(dhd_pub, event,
+		(void *)event_data, raw_event, dhd_pub->enable_log);
+#endif /* SHOW_EVENTS */
+
+	return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	 * byte order. Convert all members to host-order.
+	 */
+	evt->event_type = ntoh32(evt->event_type);
+	evt->flags = ntoh16(evt->flags);
+	evt->status = ntoh32(evt->status);
+	evt->reason = ntoh32(evt->reason);
+	evt->auth_type = ntoh32(evt->auth_type);
+	evt->datalen = ntoh32(evt->datalen);
+	evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+	int i, j = 0;
+	unsigned char *buf = pbuf;
+
+	if (bytes_per_line == 0) {
+		bytes_per_line = len;
+	}
+
+	for (i = 0; i < len; i++) {
+		printf("%2.2x", *buf++);
+		j++;
+		if (j == bytes_per_line) {
+			printf("\n");
+			j = 0;
+		} else {
+			printf(":");
+		}
+	}
+	printf("\n");
+#endif /* DHD_DEBUG */
+}
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+
+#ifdef PKT_FILTER_SUPPORT
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		bcm_strncpy_s(num, sizeof(num), src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+	char				*argv[8];
+	int					i = 0;
+	const char			*str;
+	int					buf_len;
+	int					str_len;
+	char				*arg_save = 0, *arg_org = 0;
+	int					rc;
+	char				buf[32] = {0};
+	wl_pkt_filter_enable_t	enable_parm;
+	wl_pkt_filter_enable_t	* pkt_filterp;
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	arg_org = arg_save;
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_enable";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
+	buf[ sizeof(buf) - 1 ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+	/* Parse enable/disable value. */
+	enable_parm.enable = htod32(enable);
+
+	buf_len += sizeof(enable_parm);
+	memcpy((char *)pkt_filterp,
+	       &enable_parm,
+	       sizeof(enable_parm));
+
+	/* Enable/disable the specified filter. */
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+	/* Contorl the master mode */
+	bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+	const char 			*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int					buf_len;
+	int					str_len;
+	int 				rc;
+	uint32				mask_size;
+	uint32				pattern_size;
+	char				*argv[8], * buf = 0;
+	int					i = 0;
+	char				*arg_save = 0, *arg_org = 0;
+#define BUF_SIZE		2048
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	arg_org = arg_save;
+
+	if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	if (strlen(arg) > BUF_SIZE) {
+		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+		goto fail;
+	}
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+	while (argv[i++])
+		argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Polarity not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Filter type not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Offset not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Bitmask not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter mask. */
+	mask_size =
+		htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Pattern not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter pattern. */
+	pattern_size =
+		htod32(wl_pattern_atoh(argv[i],
+	         (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		DHD_ERROR(("Mask and pattern not the same size\n"));
+		goto fail;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+	** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	** guarantee that the buffer is properly aligned.
+	*/
+	memcpy((char *)pkt_filterp,
+	       &pkt_filter,
+	       WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+	if (buf)
+		MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
+{
+	char iovbuf[32];
+	int ret;
+
+	bcm_mkiovar("pkt_filter_delete", (char *)&id, 4, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	if (ret < 0) {
+		DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
+			__FUNCTION__, id, ret));
+	}
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iovar_len;
+	int retcode;
+
+	iovar_len = bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+	if (!iovar_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+			__FUNCTION__, arp_mode, retcode));
+	else
+		DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+			__FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iovar_len;
+	int retcode;
+
+	iovar_len = bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+	if (!iovar_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+			__FUNCTION__, arp_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+			__FUNCTION__, arp_enable));
+	if (arp_enable) {
+		uint32 version;
+		bcm_mkiovar("arp_version", 0, 0, iovbuf, sizeof(iovbuf));
+		retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+		if (retcode) {
+			DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
+				__FUNCTION__, retcode));
+			dhd->arp_version = 1;
+		}
+		else {
+			memcpy(&version, iovbuf, sizeof(version));
+			DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
+			dhd->arp_version = version;
+		}
+	}
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+	iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr,
+		sizeof(ipaddr), iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+		__FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
+{
+	int retcode, i;
+	int iov_len;
+	uint32 *ptr32 = buf;
+	bool clr_bottom = FALSE;
+
+	if (!buf)
+		return -1;
+	if (dhd == NULL) return -1;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+	BCM_REFERENCE(iov_len);
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx);
+
+	if (retcode) {
+		DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+		__FUNCTION__, retcode));
+
+		return -1;
+	}
+
+	/* clean up the buf, ascii reminder */
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (!clr_bottom) {
+			if (*ptr32 == 0)
+				clr_bottom = TRUE;
+		} else {
+			*ptr32 = 0;
+		}
+		ptr32++;
+	}
+
+	return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT  */
+
+/*
+ * Neighbor Discovery Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up/goes down
+ */
+int
+dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iov_len;
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+	if (retcode)
+		DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
+			__FUNCTION__, ndo_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
+			__FUNCTION__, ndo_enable));
+
+	return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up
+ */
+int
+dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
+		IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry added \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface goes down
+ */
+int
+dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
+		0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry removed \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+	default:
+		break;
+	}
+
+	/* Call per-port handler. */
+	dhd_sendup_event(dhdp, event, data);
+}
+
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval)
+{
+	char bssid[6], zbuf[6];
+	int ret = -1;
+
+	bzero(bssid, 6);
+	bzero(zbuf, 6);
+
+	ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+	DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+	if (ret == BCME_NOTASSOCIATED) {
+		DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+	}
+
+	if (retval)
+		*retval = ret;
+
+	if (ret < 0)
+		return FALSE;
+
+	if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
+		/*  STA is assocoated BSSID is non zero */
+
+		if (bss_buf) {
+			/* return bss if caller provided buf */
+			memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
+		}
+		return TRUE;
+	} else {
+		DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+		return FALSE;
+	}
+}
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
+{
+	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+	int ret = -1;
+	int dtim_period = 0;
+	int ap_beacon = 0;
+	int allowed_skip_dtim_cnt = 0;
+	/* Check if associated */
+	if (dhd_is_associated(dhd, NULL, NULL) == FALSE) {
+		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated AP beacon interval */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+		&ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated ap's dtim setup */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+		&dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* if not assocated just eixt */
+	if (dtim_period == 0) {
+		goto exit;
+	}
+
+	/* attemp to use platform defined dtim skip interval */
+	bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+	/* check if sta listen interval fits into AP dtim */
+	if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
+		/* AP DTIM to big for our Listen Interval : no dtim skiping */
+		bcn_li_dtim = NO_DTIM_SKIP;
+		DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+			__FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
+		goto exit;
+	}
+
+	if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+		 allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
+		 bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+	}
+
+	if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
+		/* Round up dtim_skip to fit into STAs Listen Interval */
+		bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
+		DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+	}
+
+	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+		__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+exit:
+	return bcn_li_dtim;
+}
+
+/* Check if the mode supports STA MODE */
+bool dhd_support_sta_mode(dhd_pub_t *dhd)
+{
+
+#ifdef  WL_CFG80211
+	if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+		return FALSE;
+	else
+#endif /* WL_CFG80211 */
+		return TRUE;
+}
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+	char				buf[32] = {0};
+	const char			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt = {0};
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res					= -1;
+
+	if (!dhd_support_sta_mode(dhd))
+		return res;
+
+	DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, sizeof(buf) - 1);
+	buf[ sizeof(buf) - 1 ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+	return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ *  data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+                     int input_size, int *bytes_left)
+{
+	char* str;
+	uint16 short_temp;
+	uint32 int_temp;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	/* Clean all dest bytes */
+	memset(dst, 0, dst_size);
+	while (*bytes_left > 0) {
+
+		if (str[0] != token) {
+			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+				__FUNCTION__, token, str[0], *bytes_left));
+			return -1;
+		}
+
+		*bytes_left -= 1;
+		str += 1;
+
+		if (input_size == 1) {
+			memcpy(dst, str, input_size);
+		}
+		else if (input_size == 2) {
+			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+				input_size);
+		}
+		else if (input_size == 4) {
+			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+				input_size);
+		}
+
+		*bytes_left -= input_size;
+		str += input_size;
+		*list_str = str;
+		return 1;
+	}
+	return 1;
+}
+
+/*
+ *  channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+                             int channel_num, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+			*list_str = str;
+			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* All channels */
+			channel_list[idx] = 0x0;
+		}
+		else {
+			channel_list[idx] = (uint16)str[0];
+			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
+		}
+		*bytes_left -= 1;
+		str += 1;
+
+		if (idx++ > 255) {
+			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/*
+ *  SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+			*list_str = str;
+			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+
+		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* Broadcast SSID */
+			ssid[idx].SSID_len = 0;
+			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+			*bytes_left -= 1;
+			str += 1;
+
+			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
+		}
+		else if (str[0] <= DOT11_MAX_SSID_LEN) {
+			/* Get proper SSID size */
+			ssid[idx].SSID_len = str[0];
+			*bytes_left -= 1;
+			str += 1;
+
+			/* Get SSID */
+			if (ssid[idx].SSID_len > *bytes_left) {
+				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+				return -1;
+			}
+
+			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+			*bytes_left -= ssid[idx].SSID_len;
+			str += ssid[idx].SSID_len;
+			ssid[idx].hidden = TRUE;
+
+			DHD_TRACE(("%s :size=%d left=%d\n",
+				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+		}
+		else {
+			DHD_ERROR(("### SSID size more that %d\n", str[0]));
+			return -1;
+		}
+
+		if (idx++ >  max) {
+			DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx.  Max specifies size of the ssid array.  Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied.  Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+	char* str, *ptr;
+
+	if ((list_str == NULL) || (*list_str == NULL))
+		return -1;
+
+	for (str = *list_str; str != NULL; str = ptr) {
+
+		/* check for next TAG */
+		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+			*list_str	 = str + strlen(GET_CHANNEL);
+			return idx;
+		}
+
+		if ((ptr = strchr(str, ',')) != NULL) {
+			*ptr++ = '\0';
+		}
+
+		if (strlen(str) > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+			return -1;
+		}
+
+		if (strlen(str) == 0)
+			ssid[idx].SSID_len = 0;
+
+		if (idx < max) {
+			bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
+			strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
+			ssid[idx].SSID_len = strlen(str);
+		}
+		idx++;
+	}
+	return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+	int num;
+	int val;
+	char* str;
+	char* endptr = NULL;
+
+	if ((list_str == NULL)||(*list_str == NULL))
+		return -1;
+
+	str = *list_str;
+	num = 0;
+	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+		val = (int)strtoul(str, &endptr, 0);
+		if (endptr == str) {
+			printf("could not parse channel number starting at"
+				" substring \"%s\" in list:\n%s\n",
+				str, *list_str);
+			return -1;
+		}
+		str = endptr + strspn(endptr, " ,");
+
+		if (num == channel_num) {
+			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+				channel_num, *list_str));
+			return -1;
+		}
+
+		channel_list[num++] = (uint16)val;
+	}
+	*list_str = str;
+	return num;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 0000000..b7d162c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,302 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2014, Broadcom Corporation
+* 
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c 447105 2014-01-08 05:27:09Z $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#if defined(CUSTOMER_HW2)
+
+
+#endif 
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC)  */
+
+#if defined(CUSTOMER_HW3)
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion  */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ *  1) return :  Host gpio interrupt number per customer platform
+ *  2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ *  NOTE :
+ *  Customer should check his platform definitions
+ *  and his Host Interrupt spec
+ *  to figure out the proper setting for his platform.
+ *  Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr)
+{
+	int  host_oob_irq = 0;
+
+#if defined(CUSTOMER_HW2)
+	host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+	if (dhd_oob_gpio_num < 0) {
+		dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+	}
+#endif /* CUSTOMER_OOB_GPIO_NUM */
+
+	if (dhd_oob_gpio_num < 0) {
+		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+		__FUNCTION__));
+		return (dhd_oob_gpio_num);
+	}
+
+	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+	         __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW3
+	gpio_request(dhd_oob_gpio_num, "oob irq");
+	host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+	gpio_direction_input(dhd_oob_gpio_num);
+#endif 
+#endif 
+
+	return (host_oob_irq);
+}
+#endif 
+
+/* Customer function to control hw specific wlan gpios */
+int
+dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff)
+{
+	int err = 0;
+
+	return err;
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
+{
+	int ret = 0;
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+
+	/* Customer access to MAC address stored outside of DHD driver */
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+	ret = wifi_platform_get_mac_addr(adapter, buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+
+	return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+	{"",   "XY", 4},  /* Universal if Country code is unknown or empty */
+	{"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+	{"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+	{"EU", "EU", 5},  /* European union countries to : EU regrev 05 */
+	{"AT", "EU", 5},
+	{"BE", "EU", 5},
+	{"BG", "EU", 5},
+	{"CY", "EU", 5},
+	{"CZ", "EU", 5},
+	{"DK", "EU", 5},
+	{"EE", "EU", 5},
+	{"FI", "EU", 5},
+	{"FR", "EU", 5},
+	{"DE", "EU", 5},
+	{"GR", "EU", 5},
+	{"HU", "EU", 5},
+	{"IE", "EU", 5},
+	{"IT", "EU", 5},
+	{"LV", "EU", 5},
+	{"LI", "EU", 5},
+	{"LT", "EU", 5},
+	{"LU", "EU", 5},
+	{"MT", "EU", 5},
+	{"NL", "EU", 5},
+	{"PL", "EU", 5},
+	{"PT", "EU", 5},
+	{"RO", "EU", 5},
+	{"SK", "EU", 5},
+	{"SI", "EU", 5},
+	{"ES", "EU", 5},
+	{"SE", "EU", 5},
+	{"GB", "EU", 5},
+	{"KR", "XY", 3},
+	{"AU", "XY", 3},
+	{"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+	{"TW", "XY", 3},
+	{"AR", "XY", 3},
+	{"MX", "XY", 3},
+	{"IL", "IL", 0},
+	{"CH", "CH", 0},
+	{"TR", "TR", 0},
+	{"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+#if defined(CUSTOMER_HW2)
+#if defined(BCM4335_CHIP)
+	{"",   "XZ", 11},  /* Universal if Country code is unknown or empty */
+#endif
+	{"AE", "AE", 1},
+	{"AR", "AR", 1},
+	{"AT", "AT", 1},
+	{"AU", "AU", 2},
+	{"BE", "BE", 1},
+	{"BG", "BG", 1},
+	{"BN", "BN", 1},
+	{"CA", "CA", 2},
+	{"CH", "CH", 1},
+	{"CY", "CY", 1},
+	{"CZ", "CZ", 1},
+	{"DE", "DE", 3},
+	{"DK", "DK", 1},
+	{"EE", "EE", 1},
+	{"ES", "ES", 1},
+	{"FI", "FI", 1},
+	{"FR", "FR", 1},
+	{"GB", "GB", 1},
+	{"GR", "GR", 1},
+	{"HR", "HR", 1},
+	{"HU", "HU", 1},
+	{"IE", "IE", 1},
+	{"IS", "IS", 1},
+	{"IT", "IT", 1},
+	{"ID", "ID", 1},
+	{"JP", "JP", 8},
+	{"KR", "KR", 24},
+	{"KW", "KW", 1},
+	{"LI", "LI", 1},
+	{"LT", "LT", 1},
+	{"LU", "LU", 1},
+	{"LV", "LV", 1},
+	{"MA", "MA", 1},
+	{"MT", "MT", 1},
+	{"MX", "MX", 1},
+	{"NL", "NL", 1},
+	{"NO", "NO", 1},
+	{"PL", "PL", 1},
+	{"PT", "PT", 1},
+	{"PY", "PY", 1},
+	{"RO", "RO", 1},
+	{"SE", "SE", 1},
+	{"SI", "SI", 1},
+	{"SK", "SK", 1},
+	{"TR", "TR", 7},
+	{"TW", "TW", 1},
+	{"IR", "XZ", 11},	/* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */
+	{"SD", "XZ", 11},	/* Universal if Country code is SUDAN */
+	{"SY", "XZ", 11},	/* Universal if Country code is SYRIAN ARAB REPUBLIC */
+	{"GL", "XZ", 11},	/* Universal if Country code is GREENLAND */
+	{"PS", "XZ", 11},	/* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
+	{"TL", "XZ", 11},	/* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
+	{"MH", "XZ", 11},	/* Universal if Country code is MARSHALL ISLANDS */
+#ifdef BCM4330_CHIP
+	{"RU", "RU", 1},
+	{"US", "US", 5}
+#endif
+#endif /* CUSTOMER_HW2 */
+};
+
+
+/* Customized Locale convertor
+*  input : ISO 3166-1 country abbreviation
+*  output: customized cspec
+*/
+void get_customized_country_code(void *adapter, char *country_iso_code,
+				 wl_country_t *cspec, u32 flags)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+	struct cntry_locales_custom *cloc_ptr;
+
+	if (!cspec)
+		return;
+
+	cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code,
+						  flags);
+	if (cloc_ptr) {
+		strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+		cspec->rev = cloc_ptr->custom_locale_rev;
+	}
+	return;
+#else
+	int size, i;
+
+	size = ARRAYSIZE(translate_custom_table);
+
+	if (cspec == 0)
+		 return;
+
+	if (size == 0)
+		 return;
+
+	for (i = 0; i < size; i++) {
+		if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+			memcpy(cspec->ccode,
+				translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+			cspec->rev = translate_custom_table[i].custom_locale_rev;
+			return;
+		}
+	}
+#ifdef EXAMPLE_TABLE
+	/* if no country code matched return first universal code from translate_custom_table */
+	memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+	cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+	return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 0000000..6ab5f60
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,125 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h 424863 2013-09-19 20:06:14Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#define USE_NET_RATELIMIT		1
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args)		do {if ((dhd_msg_level & DHD_ERROR_VAL) && USE_NET_RATELIMIT) \
+								printf args;} while (0)
+#define DHD_TRACE(args)		do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args)		do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args)		do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args)		do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args)		do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args)		do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args)		do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args)		do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args)		do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args)		do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args)		do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args)		do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args)	do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+#define DHD_PNO(args)		do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
+#define DHD_RTT(args)		do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
+
+#define DHD_TRACE_HW4	DHD_TRACE
+
+#define DHD_ERROR_ON()		(dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON()		(dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON()		(dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON()		(dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON()		(dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON()		(dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON()		(dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON()		(dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON()		(dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON()		(dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON()		(dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON()		(dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON()		(dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON()		(dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON()	(dhd_msg_level & DHD_REORDER_VAL)
+#define DHD_NOCHECKDIED_ON()	(dhd_msg_level & DHD_NOCHECKDIED_VAL)
+#define DHD_PNO_ON()		(dhd_msg_level & DHD_PNO_VAL)
+#define DHD_RTT_ON()		(dhd_msg_level & DHD_RTT_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args)		do {if (USE_NET_RATELIMIT) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+#define DHD_PNO(args)
+
+#define DHD_TRACE_HW4	DHD_TRACE
+
+#define DHD_ERROR_ON()		0
+#define DHD_TRACE_ON()		0
+#define DHD_INFO_ON()		0
+#define DHD_DATA_ON()		0
+#define DHD_CTL_ON()		0
+#define DHD_TIMER_ON()		0
+#define DHD_HDRS_ON()		0
+#define DHD_BYTES_ON()		0
+#define DHD_INTR_ON()		0
+#define DHD_GLOM_ON()		0
+#define DHD_EVENT_ON()		0
+#define DHD_BTA_ON()		0
+#define DHD_ISCAN_ON()		0
+#define DHD_ARPOE_ON()		0
+#define DHD_REORDER_ON()	0
+#define DHD_NOCHECKDIED_ON()	0
+#define DHD_PNO_ON()		0
+#define DHD_RTT_ON()		0
+#endif 
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.c b/drivers/net/wireless/bcmdhd/dhd_flowring.c
new file mode 100644
index 0000000..95d186e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_flowring.c
@@ -0,0 +1,819 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_flowrings.c jaganlv $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <proto/bcmevent.h>
+#include <dngl_stats.h>
+
+#include <dhd.h>
+
+#include <dhd_flowring.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <proto/802.1d.h>
+#include <pcie_core.h>
+#include <bcmmsgbuf.h>
+#include <dhd_pcie.h>
+
+static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
+                                     uint8 prio, char *sa, char *da);
+
+static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
+                                      uint8 prio, char *sa, char *da);
+
+static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+                                uint8 prio, char *sa, char *da, uint16 *flowid);
+int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
+
+#define FLOW_QUEUE_PKT_NEXT(p)          PKTLINK(p)
+#define FLOW_QUEUE_PKT_SETNEXT(p, x)    PKTSETLINK((p), (x))
+
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
+const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+int BCMFASTPATH
+dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
+{
+	return BCME_NORESOURCE;
+}
+
+/* Flow ring's queue management functions */
+
+void /* Initialize a flow ring's queue */
+dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
+{
+	ASSERT((queue != NULL) && (max > 0));
+
+	dll_init(&queue->list);
+	queue->head = queue->tail = NULL;
+	queue->len = 0;
+	queue->max = max - 1;
+	queue->failures = 0U;
+	queue->cb = &dhd_flow_queue_overflow;
+}
+
+void /* Register an enqueue overflow callback handler */
+dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
+{
+	ASSERT(queue != NULL);
+	queue->cb = cb;
+}
+
+
+int BCMFASTPATH /* Enqueue a packet in a flow ring's queue */
+dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+	int ret = BCME_OK;
+
+	ASSERT(queue != NULL);
+
+	if (queue->len >= queue->max) {
+		queue->failures++;
+		ret = (*queue->cb)(queue, pkt);
+		goto done;
+	}
+
+	if (queue->head) {
+		FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
+	} else {
+		queue->head = pkt;
+	}
+
+	FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
+
+	queue->tail = pkt; /* at tail */
+
+	queue->len++;
+
+done:
+	return ret;
+}
+
+void * BCMFASTPATH /* Dequeue a packet from a flow ring's queue, from head */
+dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
+{
+	void * pkt;
+
+	ASSERT(queue != NULL);
+
+	pkt = queue->head; /* from head */
+
+	if (pkt == NULL) {
+		ASSERT((queue->len == 0) && (queue->tail == NULL));
+		goto done;
+	}
+
+	queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
+	if (queue->head == NULL)
+		queue->tail = NULL;
+
+	queue->len--;
+
+	FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
+
+done:
+	return pkt;
+}
+
+void BCMFASTPATH /* Reinsert a dequeued packet back at the head */
+dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
+{
+	if (queue->head == NULL) {
+		queue->tail = pkt;
+	}
+
+	FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
+	queue->head = pkt;
+	queue->len++;
+}
+
+
+/* Init Flow Ring specific data structures */
+int
+dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
+{
+	uint32 idx;
+	uint32 flow_ring_table_sz;
+	uint32 if_flow_lkup_sz;
+	void * flowid_allocator;
+	flow_ring_table_t *flow_ring_table;
+	if_flow_lkup_t *if_flow_lkup = NULL;
+	void *lock = NULL;
+	unsigned long flags;
+
+
+	DHD_INFO(("%s\n", __FUNCTION__));
+
+	/* Construct a 16bit flow1d allocator */
+	flowid_allocator = id16_map_init(dhdp->osh,
+	                       num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED);
+	if (flowid_allocator == NULL) {
+		DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+
+	/* Allocate a flow ring table, comprising of requested number of rings */
+	flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
+	flow_ring_table = (flow_ring_table_t *)MALLOC(dhdp->osh, flow_ring_table_sz);
+	if (flow_ring_table == NULL) {
+		DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Initialize flow ring table state */
+	bzero((uchar *)flow_ring_table, flow_ring_table_sz);
+	for (idx = 0; idx < num_flow_rings; idx++) {
+		flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
+		flow_ring_table[idx].flowid = (uint16)idx;
+		flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh);
+		if (flow_ring_table[idx].lock == NULL) {
+			DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		dll_init(&flow_ring_table[idx].list);
+
+		/* Initialize the per flow ring backup queue */
+		dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
+		                    FLOW_RING_QUEUE_THRESHOLD);
+	}
+
+	/* Allocate per interface hash table */
+	if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+	if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
+			DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
+	if (if_flow_lkup == NULL) {
+		DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Initialize per interface hash table */
+	bzero((uchar *)if_flow_lkup, if_flow_lkup_sz);
+	for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+		int hash_ix;
+		if_flow_lkup[idx].status = 0;
+		if_flow_lkup[idx].role = 0;
+		for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
+			if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
+	}
+
+	lock = dhd_os_spin_lock_init(dhdp->osh);
+	if (lock == NULL)
+		goto fail;
+
+	dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
+	bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+	/* Now populate into dhd pub */
+	DHD_FLOWID_LOCK(lock, flags);
+	dhdp->num_flow_rings = num_flow_rings;
+	dhdp->flowid_allocator = (void *)flowid_allocator;
+	dhdp->flow_ring_table = (void *)flow_ring_table;
+	dhdp->if_flow_lkup = (void *)if_flow_lkup;
+	dhdp->flowid_lock = lock;
+	DHD_FLOWID_UNLOCK(lock, flags);
+
+	DHD_INFO(("%s done\n", __FUNCTION__));
+	return BCME_OK;
+
+fail:
+	if (lock != NULL)
+		dhd_os_spin_lock_deinit(dhdp->osh, lock);
+
+	/* Destruct the per interface flow lkup table */
+	if (dhdp->if_flow_lkup != NULL) {
+		DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
+	}
+	if (flow_ring_table != NULL) {
+		for (idx = 0; idx < num_flow_rings; idx++) {
+			if (flow_ring_table[idx].lock != NULL)
+				dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+		}
+		MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+	}
+	id16_map_fini(dhdp->osh, flowid_allocator);
+
+	return BCME_NOMEM;
+}
+
+/* Deinit Flow Ring specific data structures */
+void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
+{
+	uint16 idx;
+	uint32 flow_ring_table_sz;
+	uint32 if_flow_lkup_sz;
+	flow_ring_table_t *flow_ring_table;
+	unsigned long flags;
+	void *lock;
+
+	DHD_INFO(("dhd_flow_rings_deinit\n"));
+
+	if (dhdp->flow_ring_table != NULL) {
+
+		ASSERT(dhdp->num_flow_rings > 0);
+
+		DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+		flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+		dhdp->flow_ring_table = NULL;
+		DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+		for (idx = 0; idx < dhdp->num_flow_rings; idx++) {
+			if (flow_ring_table[idx].active) {
+				dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
+			}
+			ASSERT(flow_queue_empty(&flow_ring_table[idx].queue));
+
+			/* Deinit flow ring queue locks before destroying flow ring table */
+			dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
+			flow_ring_table[idx].lock = NULL;
+		}
+
+		/* Destruct the flow ring table */
+		flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t);
+		MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
+	}
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+
+	/* Destruct the per interface flow lkup table */
+	if (dhdp->if_flow_lkup != NULL) {
+		if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
+		memset(dhdp->if_flow_lkup, 0, sizeof(if_flow_lkup_sz));
+		DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
+		dhdp->if_flow_lkup = NULL;
+	}
+
+	/* Destruct the flowid allocator */
+	if (dhdp->flowid_allocator != NULL)
+		dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
+
+	dhdp->num_flow_rings = 0U;
+	lock = dhdp->flowid_lock;
+	dhdp->flowid_lock = NULL;
+
+	DHD_FLOWID_UNLOCK(lock, flags);
+	dhd_os_spin_lock_deinit(dhdp->osh, lock);
+}
+
+uint8
+dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
+{
+	if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+	ASSERT(if_flow_lkup);
+	return if_flow_lkup[ifindex].role;
+}
+
+#ifdef WLTDLS
+bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
+{
+	tdls_peer_node_t *cur = dhdp->peer_tbl.node;
+	while (cur != NULL) {
+		if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+			return TRUE;
+		}
+		cur = cur->next;
+	}
+	return FALSE;
+}
+#endif /* WLTDLS */
+
+/* For a given interface, search the hash table for a matching flow */
+static INLINE uint16
+dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+	int hash;
+	bool ismcast = FALSE;
+	flow_hash_info_t *cur;
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+#ifdef WLTDLS
+		if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
+			is_tdls_destination(dhdp, da)) {
+			hash = DHD_FLOWRING_HASHINDEX(da, prio);
+			cur = if_flow_lkup[ifindex].fl_hash[hash];
+			while (cur != NULL) {
+				if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
+					DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+					return cur->flowid;
+				}
+				cur = cur->next;
+			}
+			DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+			return FLOWID_INVALID;
+		}
+#endif /* WLTDLS */
+		cur = if_flow_lkup[ifindex].fl_hash[prio];
+		if (cur) {
+			DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+			return cur->flowid;
+		}
+
+	} else {
+
+		if (ETHER_ISMULTI(da)) {
+			ismcast = TRUE;
+			hash = 0;
+		} else {
+			hash = DHD_FLOWRING_HASHINDEX(da, prio);
+		}
+
+		cur = if_flow_lkup[ifindex].fl_hash[hash];
+
+		while (cur) {
+			if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
+				(!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
+				(cur->flow_info.tid == prio))) {
+				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+				return cur->flowid;
+			}
+			cur = cur->next;
+		}
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	return FLOWID_INVALID;
+}
+
+/* Allocate Flow ID */
+static INLINE uint16
+dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
+{
+	flow_hash_info_t *fl_hash_node, *cur;
+	if_flow_lkup_t *if_flow_lkup;
+	int hash;
+	uint16 flowid;
+	unsigned long flags;
+
+	fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t));
+	if (fl_hash_node == NULL) {
+		DHD_ERROR(("%s: fl_hash_node alloc failed  \n", __FUNCTION__));
+		return FLOWID_INVALID;
+	}
+	memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	ASSERT(dhdp->flowid_allocator != NULL);
+	flowid = id16_map_alloc(dhdp->flowid_allocator);
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	if (flowid == FLOWID_INVALID) {
+		MFREE(dhdp->osh, fl_hash_node,  sizeof(flow_hash_info_t));
+		DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
+		return FLOWID_INVALID;
+	}
+
+	fl_hash_node->flowid = flowid;
+	fl_hash_node->flow_info.tid = prio;
+	fl_hash_node->flow_info.ifindex = ifindex;
+	fl_hash_node->next = NULL;
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+	if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+		/* For STA non TDLS dest we allocate entry based on prio only */
+#ifdef WLTDLS
+		if (dhdp->peer_tbl.tdls_peer_count &&
+			(is_tdls_destination(dhdp, da))) {
+			hash = DHD_FLOWRING_HASHINDEX(da, prio);
+			cur = if_flow_lkup[ifindex].fl_hash[hash];
+			if (cur) {
+				while (cur->next) {
+					cur = cur->next;
+				}
+				cur->next = fl_hash_node;
+			} else {
+				if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+			}
+		} else
+#endif /* WLTDLS */
+			if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
+	} else {
+
+		/* For bcast/mcast assign first slot in in interface */
+		hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
+		cur = if_flow_lkup[ifindex].fl_hash[hash];
+		if (cur) {
+			while (cur->next) {
+				cur = cur->next;
+			}
+			cur->next = fl_hash_node;
+		} else
+			if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
+
+	return fl_hash_node->flowid;
+}
+
+/* Get flow ring ID, if not present try to create one */
+static INLINE int
+dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
+                  uint8 prio, char *sa, char *da, uint16 *flowid)
+{
+	uint16 id;
+	flow_ring_node_t *flow_ring_node;
+	flow_ring_table_t *flow_ring_table;
+	unsigned long flags;
+
+	DHD_INFO(("%s\n", __FUNCTION__));
+
+	if (!dhdp->flow_ring_table)
+		return BCME_ERROR;
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+
+	id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
+
+	if (id == FLOWID_INVALID) {
+
+		if_flow_lkup_t *if_flow_lkup;
+		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+		if (!if_flow_lkup[ifindex].status)
+			return BCME_ERROR;
+
+		id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
+		if (id == FLOWID_INVALID) {
+			DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
+			           __FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
+			return BCME_ERROR;
+		}
+
+		/* register this flowid in dhd_pub */
+		dhd_add_flowid(dhdp, ifindex, prio, da, id);
+	}
+
+	ASSERT(id < dhdp->num_flow_rings);
+
+	flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	if (flow_ring_node->active) {
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+		*flowid = id;
+		return BCME_OK;
+	}
+
+	/* Init Flow info */
+	memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
+	memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
+	flow_ring_node->flow_info.tid = prio;
+	flow_ring_node->flow_info.ifindex = ifindex;
+	flow_ring_node->active = TRUE;
+	flow_ring_node->status = FLOW_RING_STATUS_PENDING;
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	dll_prepend(&dhdp->bus->const_flowring, &flow_ring_node->list);
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	/* Create and inform device about the new flow */
+	if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
+	        != BCME_OK) {
+		DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
+		return BCME_ERROR;
+	}
+
+	*flowid = id;
+	return BCME_OK;
+}
+
+/* Update flowid information on the packet */
+int BCMFASTPATH
+dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
+{
+	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+	struct ether_header *eh = (struct ether_header *)pktdata;
+	uint16 flowid;
+
+	if (dhd_bus_is_txmode_push(dhdp->bus))
+		return BCME_OK;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS) {
+		return BCME_BADARG;
+	}
+
+	if (!dhdp->flowid_allocator) {
+		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost,
+		&flowid) != BCME_OK) {
+		return BCME_ERROR;
+	}
+
+	DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
+
+	/* Tag the packet with flowid */
+	DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), flowid);
+	return BCME_OK;
+}
+
+void
+dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
+{
+	int hashix;
+	bool found = FALSE;
+	flow_hash_info_t *cur, *prev;
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
+
+		cur = if_flow_lkup[ifindex].fl_hash[hashix];
+
+		if (cur) {
+			if (cur->flowid == flowid) {
+				found = TRUE;
+			}
+
+			prev = NULL;
+			while (!found && cur) {
+				if (cur->flowid == flowid) {
+					found = TRUE;
+					break;
+				}
+				prev = cur;
+				cur = cur->next;
+			}
+			if (found) {
+				if (!prev) {
+					if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
+				} else {
+					prev->next = cur->next;
+				}
+
+				/* deregister flowid from dhd_pub. */
+				dhd_del_flowid(dhdp, ifindex, flowid);
+
+				id16_map_free(dhdp->flowid_allocator, flowid);
+				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+				MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
+
+				return;
+			}
+		}
+	}
+
+
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+	DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
+	           __FUNCTION__, flowid));
+}
+
+
+/* Delete all Flow rings assocaited with the given Interface */
+void
+dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
+{
+	uint32 id;
+	flow_ring_table_t *flow_ring_table;
+
+	DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	if (!dhdp->flow_ring_table)
+		return;
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+	for (id = 0; id < dhdp->num_flow_rings; id++) {
+		if (flow_ring_table[id].active &&
+		    (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+		    (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
+			DHD_INFO(("%s: deleting flowid %d\n",
+			          __FUNCTION__, flow_ring_table[id].flowid));
+			dhd_bus_flow_ring_delete_request(dhdp->bus,
+			                                 (void *) &flow_ring_table[id]);
+		}
+	}
+}
+
+/* Delete flow/s for given peer address */
+void
+dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
+{
+	uint32 id;
+	flow_ring_table_t *flow_ring_table;
+
+	DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	if (!dhdp->flow_ring_table)
+		return;
+
+	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
+	for (id = 0; id < dhdp->num_flow_rings; id++) {
+		if (flow_ring_table[id].active &&
+		    (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+		    (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
+		    (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
+			DHD_INFO(("%s: deleting flowid %d\n",
+			          __FUNCTION__, flow_ring_table[id].flowid));
+			dhd_bus_flow_ring_delete_request(dhdp->bus,
+			                                 (void *) &flow_ring_table[id]);
+		}
+	}
+}
+
+/* Handle Interface ADD, DEL operations */
+void
+dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+                               uint8 op, uint8 role)
+{
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return;
+
+	DHD_INFO(("%s: ifindex %u op %u role is %u \n",
+	          __FUNCTION__, ifindex, op, role));
+	if (!dhdp->flowid_allocator) {
+		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+		return;
+	}
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
+
+		if_flow_lkup[ifindex].role = role;
+
+		if (!(DHD_IF_ROLE_STA(role))) {
+			if_flow_lkup[ifindex].status = TRUE;
+			DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
+			          __FUNCTION__, ifindex, role));
+			/* Create Mcast Flow */
+		}
+	} else	if (op == WLC_E_IF_DEL) {
+		if_flow_lkup[ifindex].status = FALSE;
+		DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
+		          __FUNCTION__, ifindex, role));
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+}
+
+/* Handle a STA interface link status update */
+int
+dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
+{
+	if_flow_lkup_t *if_flow_lkup;
+	unsigned long flags;
+
+	ASSERT(ifindex < DHD_MAX_IFS);
+	if (ifindex >= DHD_MAX_IFS)
+		return BCME_BADARG;
+
+	DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
+
+	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+
+	if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+		if (status)
+			if_flow_lkup[ifindex].status = TRUE;
+		else
+			if_flow_lkup[ifindex].status = FALSE;
+	}
+	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+
+	return BCME_OK;
+}
+/* Update flow priority mapping */
+int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
+{
+	uint16 flowid;
+	flow_ring_node_t *flow_ring_node;
+
+	if (map > DHD_FLOW_PRIO_TID_MAP)
+		return BCME_BADOPTION;
+
+	/* Check if we need to change prio map */
+	if (map == dhdp->flow_prio_map_type)
+		return BCME_OK;
+
+	/* If any ring is active we cannot change priority mapping for flow rings */
+	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+		if (flow_ring_node->active)
+			return BCME_EPERM;
+	}
+	/* Infor firmware about new mapping type */
+	if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
+		return BCME_ERROR;
+
+	/* update internal structures */
+	dhdp->flow_prio_map_type = map;
+	if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
+		bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+	else
+		bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
+	return BCME_OK;
+}
+
+/* Set/Get flwo ring priority map */
+int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
+{
+	uint8 iovbuf[24];
+	if (!set) {
+		bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+			DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
+			return BCME_ERROR;
+		}
+		*map = iovbuf[0];
+		return BCME_OK;
+	}
+	bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set fl_prio_map \n",
+			__FUNCTION__));
+		return BCME_ERROR;
+	}
+	return BCME_OK;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.h b/drivers/net/wireless/bcmdhd/dhd_flowring.h
new file mode 100644
index 0000000..9f263b3a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_flowring.h
@@ -0,0 +1,175 @@
+/*
+ * Header file describing the flow rings DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to create, delete and manage
+ *
+ * flow rings at high level
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_flowrings.h  jaganlv $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_flowrings_h_
+#define _dhd_flowrings_h_
+
+/* Max pkts held in a flow ring's backup queue */
+#define FLOW_RING_QUEUE_THRESHOLD       (2048)
+
+/* Number of H2D common rings : PCIE Spec Rev? */
+#define FLOW_RING_COMMON                2
+
+#define FLOWID_INVALID                  (ID16_INVALID)
+#define FLOWID_RESERVED                 (FLOW_RING_COMMON)
+
+#define FLOW_RING_STATUS_OPEN           0
+#define FLOW_RING_STATUS_PENDING        1
+#define FLOW_RING_STATUS_CLOSED         2
+#define FLOW_RING_STATUS_DELETE_PENDING 3
+#define FLOW_RING_STATUS_FLUSH_PENDING  4
+
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ	2048
+
+#define DHD_FLOW_PRIO_AC_MAP		0
+#define DHD_FLOW_PRIO_TID_MAP		1
+
+
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_pkttag_fr {
+	uint16  flowid;
+	int     dataoff;
+} dhd_pkttag_fr_t;
+
+#define DHD_PKTTAG_SET_FLOWID(tag, flow)    ((tag)->flowid = (uint16)(flow))
+#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset))
+
+#define DHD_PKTTAG_FLOWID(tag)              ((tag)->flowid)
+#define DHD_PKTTAG_DATAOFF(tag)             ((tag)->dataoff)
+
+/* Hashing a MacAddress for lkup into a per interface flow hash table */
+#define DHD_FLOWRING_HASH_SIZE    256
+#define	DHD_FLOWRING_HASHINDEX(ea, prio) \
+	       ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \
+		% DHD_FLOWRING_HASH_SIZE)
+
+#define DHD_IF_ROLE(pub, idx)		(((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
+#define DHD_IF_ROLE_AP(pub, idx)	(DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
+#define DHD_IF_ROLE_P2PGO(pub, idx)	(DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
+#define DHD_FLOW_RING(dhdp, flowid) \
+	(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
+
+struct flow_queue;
+
+/* Flow Ring Queue Enqueue overflow callback */
+typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt);
+
+typedef struct flow_queue {
+	dll_t  list;                /* manage a flowring queue in a dll */
+	void * head;                /* first packet in the queue */
+	void * tail;                /* last packet in the queue */
+	uint16 len;                 /* number of packets in the queue */
+	uint16 max;                 /* maximum number of packets, queue may hold */
+	uint32 failures;            /* enqueue failures due to queue overflow */
+	flow_queue_cb_t cb;         /* callback invoked on threshold crossing */
+} flow_queue_t;
+
+#define flow_queue_len(queue)   ((int)(queue)->len)
+#define flow_queue_max(queue)   ((int)(queue)->max)
+#define flow_queue_avail(queue) ((int)((queue)->max - (queue)->len))
+#define flow_queue_full(queue)  ((queue)->len >= (queue)->max)
+#define flow_queue_empty(queue) ((queue)->len == 0)
+
+typedef struct flow_info {
+	uint8		tid;
+	uint8		ifindex;
+	char		sa[ETHER_ADDR_LEN];
+	char		da[ETHER_ADDR_LEN];
+} flow_info_t;
+
+typedef struct flow_ring_node {
+	dll_t		list; /* manage a constructed flowring in a dll, must be at first place */
+	flow_queue_t	queue;
+	bool		active;
+	uint8		status;
+	uint16		flowid;
+	flow_info_t	flow_info;
+	void		*prot_info;
+	void		*lock; /* lock for flowring access protection */
+} flow_ring_node_t;
+typedef flow_ring_node_t flow_ring_table_t;
+
+typedef struct flow_hash_info {
+	uint16			flowid;
+	flow_info_t		flow_info;
+	struct flow_hash_info	*next;
+} flow_hash_info_t;
+
+typedef struct if_flow_lkup {
+	bool		status;
+	uint8		role; /* Interface role: STA/AP */
+	flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */
+} if_flow_lkup_t;
+
+static INLINE flow_ring_node_t *
+dhd_constlist_to_flowring(dll_t *item)
+{
+	return ((flow_ring_node_t *)item);
+}
+
+/* Exported API */
+
+/* Flow ring's queue management functions */
+extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
+extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb);
+extern int  dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue);
+extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+
+extern int  dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings);
+
+extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp);
+
+extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
+                void *pktbuf);
+
+extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid);
+
+extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex);
+
+extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex,
+                char *addr);
+
+/* Handle Interface ADD, DEL operations */
+extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
+                uint8 op, uint8 role);
+
+/* Handle a STA interface link status update */
+extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex,
+                uint8 status);
+extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set);
+extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map);
+
+extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex);
+#endif /* _dhd_flowrings_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.c b/drivers/net/wireless/bcmdhd/dhd_ip.c
new file mode 100644
index 0000000..55657c3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_ip.c
@@ -0,0 +1,1022 @@
+/*
+ * IP Packet Parser Module.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_ip.c 468932 2014-04-09 06:58:15Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/802.3.h>
+#include <proto/bcmip.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+
+#include <dhd_ip.h>
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <proto/bcmtcp.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+/* special values */
+/* 802.3 llc/snap header */
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+pkt_frag_t pkt_frag_info(osl_t *osh, void *p)
+{
+	uint8 *frame;
+	int length;
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int ipl;			/* IP frame length */
+	uint16 iph_frag;
+
+	ASSERT(osh && p);
+
+	frame = PKTDATA(osh, p);
+	length = PKTLEN(osh, p);
+
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+		return DHD_PKT_FRAG_NONE;
+	} else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+	} else {
+		DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	ethertype = ntoh16(*(uint16 *)pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if (pt + ETHER_TYPE_LEN > frame + length) {
+			DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+			return DHD_PKT_FRAG_NONE;
+		}
+
+		ethertype = ntoh16(*(uint16 *)pt);
+	}
+
+	if (ethertype != ETHER_TYPE_IP) {
+		DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+			__FUNCTION__, ethertype, length));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+	ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+	/* We support IPv4 only */
+	if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) {
+		DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph_frag = ntoh16(iph->frag);
+
+	if (iph_frag & IPV4_FRAG_DONT) {
+		return DHD_PKT_FRAG_NONE;
+	} else if ((iph_frag & IPV4_FRAG_MORE) == 0) {
+		return DHD_PKT_FRAG_LAST;
+	} else {
+		return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST;
+	}
+}
+
+bool pkt_is_dhcp(osl_t *osh, void *p)
+{
+	uint8 *frame;
+	int length;
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int ipl;			/* IP frame length */
+	uint16 src_port;
+
+	frame = PKTDATA(osh, p);
+	length = PKTLEN(osh, p);
+
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+		return FALSE;
+	} else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+	} else {
+		DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	ethertype = ntoh16(*(uint16 *)pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if (pt + ETHER_TYPE_LEN > frame + length) {
+			DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+			return FALSE;
+		}
+
+		ethertype = ntoh16(*(uint16 *)pt);
+	}
+
+	if (ethertype != ETHER_TYPE_IP) {
+		DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+			__FUNCTION__, ethertype, length));
+		return FALSE;
+	}
+
+	iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+	ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+	/* We support IPv4 only */
+	if ((ipl < (IPV4_OPTIONS_OFFSET + 2)) || (IP_VER(iph) != IP_VER_4)) {
+		DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+		return FALSE;
+	}
+
+	src_port = ntoh16(*(uint16 *)(pt + ETHER_TYPE_LEN + IPV4_OPTIONS_OFFSET));
+
+	return (src_port == 0x43 || src_port == 0x44);
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+
+typedef struct {
+	void *pkt_in_q;			/* TCP ACK packet that is already in txq or DelayQ */
+	void *pkt_ether_hdr;	/* Ethernet header pointer of pkt_in_q */
+} tcpack_info_t;
+
+typedef struct _tdata_psh_info_t {
+	uint32 end_seq;			/* end seq# of a received TCP PSH DATA pkt */
+	struct _tdata_psh_info_t *next;	/* next pointer of the link chain */
+} tdata_psh_info_t;
+
+typedef struct {
+	uint8 src_ip_addr[IPV4_ADDR_LEN];	/* SRC ip addrs of this TCP stream */
+	uint8 dst_ip_addr[IPV4_ADDR_LEN];	/* DST ip addrs of this TCP stream */
+	uint8 src_tcp_port[TCP_PORT_LEN];	/* SRC tcp ports of this TCP stream */
+	uint8 dst_tcp_port[TCP_PORT_LEN];	/* DST tcp ports of this TCP stream */
+	tdata_psh_info_t *tdata_psh_info_head;	/* Head of received TCP PSH DATA chain */
+	tdata_psh_info_t *tdata_psh_info_tail;	/* Tail of received TCP PSH DATA chain */
+	uint32 last_used_time;	/* The last time this tcpdata_info was used(in ms) */
+} tcpdata_info_t;
+
+/* TCPACK SUPPRESS module */
+typedef struct {
+	int tcpack_info_cnt;
+	tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM];	/* Info of TCP ACK to send */
+	int tcpdata_info_cnt;
+	tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM];	/* Info of received TCP DATA */
+	tdata_psh_info_t *tdata_psh_info_pool;	/* Pointer to tdata_psh_info elements pool */
+	tdata_psh_info_t *tdata_psh_info_free;	/* free tdata_psh_info elements chain in pool */
+#ifdef DHDTCPACK_SUP_DBG
+	int psh_info_enq_num;	/* Number of free TCP PSH DATA info elements in pool */
+#endif /* DHDTCPACK_SUP_DBG */
+} tcpack_sup_module_t;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1};
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+static void
+_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod,
+	tdata_psh_info_t *tdata_psh_info)
+{
+	if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) {
+		DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod, tdata_psh_info));
+		return;
+	}
+
+	ASSERT(tdata_psh_info->next == NULL);
+	tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free;
+	tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num++;
+#endif
+}
+
+static tdata_psh_info_t*
+_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info = NULL;
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod));
+		return NULL;
+	}
+
+	tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free;
+	if (tdata_psh_info == NULL)
+		DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__));
+	else {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+#ifdef DHDTCPACK_SUP_DBG
+		tcpack_sup_mod->psh_info_enq_num--;
+#endif /* DHDTCPACK_SUP_DBG */
+	}
+
+	return tdata_psh_info;
+}
+
+static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info_pool = NULL;
+	uint i;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL)
+		return BCME_ERROR;
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL);
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL);
+
+	tdata_psh_info_pool =
+		MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+	if (tdata_psh_info_pool == NULL)
+		return BCME_NOMEM;
+	bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num = 0;
+#endif /* DHDTCPACK_SUP_DBG */
+
+	/* Enqueue newly allocated tcpdata psh info elements to the pool */
+	for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++)
+		_tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]);
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL);
+	tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool;
+
+	return BCME_OK;
+}
+
+static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	uint i;
+	tdata_psh_info_t *tdata_psh_info;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n",
+			__FUNCTION__, __LINE__));
+		return;
+	}
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		/* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */
+		while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+		}
+		tcpdata_info->tdata_psh_info_tail = NULL;
+	}
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	i = 0;
+	/* Be sure we recollected all tdata_psh_info elements */
+	while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+		i++;
+	}
+	ASSERT(i == TCPDATA_PSH_INFO_MAXNUM);
+	MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool,
+		sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+	tcpack_sup_mod->tdata_psh_info_pool = NULL;
+
+	return;
+}
+
+int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
+{
+	int ret = BCME_OK;
+
+	dhd_os_tcpacklock(dhdp);
+
+	if (dhdp->tcpack_sup_mode == mode) {
+		DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
+		goto exit;
+	}
+
+	if (mode >= TCPACK_SUP_LAST_MODE ||
+#ifndef BCMSDIO
+		mode == TCPACK_SUP_DELAYTX ||
+#endif
+		FALSE) {
+		DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode));
+		ret = BCME_BADARG;
+		goto exit;
+	}
+
+	DHD_TRACE(("%s: %d -> %d\n",
+		__FUNCTION__, dhdp->tcpack_sup_mode, mode));
+
+	/* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) {
+		tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+		/* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */
+		_tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod);
+		tcpack_sup_mod->tcpdata_info_cnt = 0;
+		bzero(tcpack_sup_mod->tcpdata_info_tbl,
+			sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM);
+		/* For half duplex bus interface, tx precedes rx by default */
+		if (dhdp->bus)
+			dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+	}
+
+	dhdp->tcpack_sup_mode = mode;
+
+	if (mode == TCPACK_SUP_OFF) {
+		ASSERT(dhdp->tcpack_sup_module != NULL);
+		MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t));
+		dhdp->tcpack_sup_module = NULL;
+		goto exit;
+	}
+
+	if (dhdp->tcpack_sup_module == NULL) {
+		tcpack_sup_module_t *tcpack_sup_mod =
+			MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t));
+		if (tcpack_sup_mod == NULL) {
+			DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__));
+			dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+			ret = BCME_NOMEM;
+			goto exit;
+		}
+		bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t));
+		dhdp->tcpack_sup_module = tcpack_sup_mod;
+	}
+
+	if (mode == TCPACK_SUP_DELAYTX) {
+		ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module);
+		if (ret != BCME_OK)
+			DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret));
+		else if (dhdp->bus)
+			dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
+	}
+
+exit:
+	dhd_os_tcpackunlock(dhdp);
+	return ret;
+}
+
+void
+dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
+{
+	tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	dhd_os_tcpacklock(dhdp);
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+			__FUNCTION__, __LINE__));
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	tcpack_sup_mod->tcpack_info_cnt = 0;
+	bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return;
+}
+
+inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 i;
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int tbl_cnt;
+	int ret = BCME_OK;
+	void *pdata;
+	uint32 pktlen;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	pdata = PKTDATA(dhdp->osh, pkt);
+	pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata);
+
+	if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, pktlen));
+		goto exit;
+	}
+
+	dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+	tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM);
+
+	for (i = 0; i < tbl_cnt; i++) {
+		if (tcpack_info_tbl[i].pkt_in_q == pkt) {
+			DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n",
+				__FUNCTION__, __LINE__, pkt, i, tbl_cnt));
+			/* This pkt is being transmitted so remove the tcp_ack_info of it. */
+			if (i < tbl_cnt - 1) {
+				bcopy(&tcpack_info_tbl[tbl_cnt - 1],
+					&tcpack_info_tbl[i], sizeof(tcpack_info_t));
+			}
+			bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t));
+			if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+				DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+					__FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+				ret = BCME_ERROR;
+			}
+			break;
+		}
+	}
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return ret;
+}
+
+static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr,
+	uint8 *tcp_hdr, uint32 tcp_ack_num)
+{
+	tcpack_sup_module_t *tcpack_sup_mod;
+	int i;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info = NULL;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+		" TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_ack_num));
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->src_ip_addr)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->dst_ip_addr)),
+			ntoh16_ua(tcpdata_info_tmp->src_tcp_port),
+			ntoh16_ua(tcpdata_info_tmp->dst_tcp_port)));
+
+		/* If either IP address or TCP port number does not match, skip. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tcpdata_info_tmp->dst_ip_addr, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
+			tcpdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tcpdata_info_tmp->dst_tcp_port, TCP_PORT_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
+			tcpdata_info_tmp->src_tcp_port, TCP_PORT_LEN) == 0) {
+			tcpdata_info = tcpdata_info_tmp;
+			break;
+		}
+	}
+
+	if (tcpdata_info == NULL) {
+		DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	if (tcpdata_info->tdata_psh_info_head == NULL) {
+		DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__));
+	}
+
+	while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+		if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) {
+			DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n",
+				__FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq));
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+			ret = TRUE;
+		} else
+			break;
+	}
+	if (tdata_psh_info == NULL)
+		tcpdata_info->tdata_psh_info_tail = NULL;
+
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+exit:
+	return ret;
+}
+
+bool
+dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *new_ether_hdr;	/* Ethernet header of the new packet */
+	uint16 new_ether_type;	/* Ethernet type of the new packet */
+	uint8 *new_ip_hdr;		/* IP header of the new packet */
+	uint8 *new_tcp_hdr;		/* TCP header of the new packet */
+	uint32 new_ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint32 new_tcp_ack_num;		/* TCP acknowledge number of the new packet */
+	uint16 new_ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 new_tcp_hdr_len;		/* TCP header length of the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int i;
+	bool ret = FALSE;
+	bool set_dotxinrx = TRUE;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, cur_framelen));
+		goto exit;
+	}
+
+	new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+	if (new_ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, new_ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+	new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+	if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+		goto exit;
+	}
+
+	new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+	cur_framelen -= new_ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	/* is it an ack ? Allow only ACK flag, not to suppress others. */
+	if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+		DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+			__FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+		goto exit;
+	}
+
+	new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+	new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet has TCP data, so just send */
+	if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+	new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+	DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+	/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+	dhd_os_tcpacklock(dhdp);
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	counter_printlog(&tack_tbl);
+	tack_tbl.cnt[0]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) {
+		/* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[5]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else
+		set_dotxinrx = FALSE;
+
+	for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) {
+		void *oldpkt;	/* TCPACK packet that is already in txq or DelayQ */
+		uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+		uint32 old_ip_hdr_len, old_tcp_hdr_len;
+		uint32 old_tcpack_num;	/* TCP ACK number of old TCPACK packet in Q */
+
+		if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+			DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+			DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+		old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+		old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+		old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+		old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+		DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		/* If either of IP address or TCP port number does not match, skip. */
+		if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+			&old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+			memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+			&old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2))
+			continue;
+
+		old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+		if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) {
+			/* New packet has higher TCP ACK number, so it replaces the old packet */
+			if (new_ip_hdr_len == old_ip_hdr_len &&
+				new_tcp_hdr_len == old_tcp_hdr_len) {
+				ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0);
+				bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len);
+				PKTFREE(dhdp->osh, pkt, FALSE);
+				DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n",
+					__FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num));
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[2]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				ret = TRUE;
+			} else {
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[6]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d"
+					" ACK %u -> %u\n", __FUNCTION__, __LINE__,
+					new_ip_hdr_len, old_ip_hdr_len,
+					new_tcp_hdr_len, old_tcp_hdr_len,
+					old_tcpack_num, new_tcp_ack_num));
+			}
+		} else if (new_tcp_ack_num == old_tcpack_num) {
+			set_dotxinrx = TRUE;
+			/* TCPACK retransmission */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+			tack_tbl.cnt[3]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+		} else {
+			DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n",
+				__FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
+				new_tcp_ack_num, pkt));
+		}
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) {
+		/* No TCPACK packet with the same IP addr and TCP port is found
+		 * in tcp_ack_info_tbl. So add this packet to the table.
+		 */
+		DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+			__FUNCTION__, __LINE__, pkt, new_ether_hdr,
+			tcpack_sup_mod->tcpack_info_cnt));
+
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt;
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr;
+		tcpack_sup_mod->tcpack_info_cnt++;
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[1]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else {
+		ASSERT(i == tcpack_sup_mod->tcpack_info_cnt);
+		DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+			__FUNCTION__, __LINE__));
+	}
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	/* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx)
+		dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+
+	return ret;
+}
+
+bool
+dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *ether_hdr;	/* Ethernet header of the new packet */
+	uint16 ether_type;	/* Ethernet type of the new packet */
+	uint8 *ip_hdr;		/* IP header of the new packet */
+	uint8 *tcp_hdr;		/* TCP header of the new packet */
+	uint32 ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint16 ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 tcp_hdr_len;		/* TCP header length of the new packet */
+	uint32 tcp_seq_num;		/* TCP sequence number of the new packet */
+	uint16 tcp_data_len;	/* TCP DATA length that excludes IP and TCP headers */
+	uint32 end_tcp_seq_num;	/* TCP seq number of the last byte in the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info;
+
+	int i;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+	if (ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type));
+
+	ip_hdr = ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	ip_hdr_len = IPV4_HLEN(ip_hdr);
+	if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+		goto exit;
+	}
+
+	tcp_hdr = ip_hdr + ip_hdr_len;
+	cur_framelen -= ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]);
+	tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet is mere TCP ACK, so do nothing */
+	if (ip_total_len == ip_hdr_len + tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len);
+
+	if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) {
+		DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_hdr[TCP_FLAGS_OFFSET]));
+
+	dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	/* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */
+	i = 0;
+	while (i < tcpack_sup_mod->tcpdata_info_cnt) {
+		tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		uint32 now_in_ms = OSL_SYSUPTIME();
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->src_ip_addr)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->dst_ip_addr)),
+			ntoh16_ua(tdata_info_tmp->src_tcp_port),
+			ntoh16_ua(tdata_info_tmp->dst_tcp_port)));
+
+		/* If both IP address and TCP port number match, we found it so break. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tdata_info_tmp->src_tcp_port, TCP_PORT_LEN * 2) == 0) {
+			tcpdata_info = tdata_info_tmp;
+			tcpdata_info->last_used_time = now_in_ms;
+			break;
+		}
+
+		if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) {
+			tdata_psh_info_t *tdata_psh_info_tmp;
+			tcpdata_info_t *last_tdata_info;
+
+			while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) {
+				tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next;
+				tdata_psh_info_tmp->next = NULL;
+				DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n",
+					__FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq));
+				_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp);
+			}
+#ifdef DHDTCPACK_SUP_DBG
+			DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+				__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+			tcpack_sup_mod->tcpdata_info_cnt--;
+			ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0);
+
+			last_tdata_info =
+				&tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt];
+			if (i < tcpack_sup_mod->tcpdata_info_cnt) {
+				ASSERT(last_tdata_info != tdata_info_tmp);
+				bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
+			}
+			bzero(last_tdata_info, sizeof(tcpdata_info_t));
+			DHD_TRACE(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
+			/* Don't increase "i" here, so that the prev last tcpdata_info is checked */
+		} else
+			 i++;
+	}
+
+	tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]);
+	tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len;
+	end_tcp_seq_num = tcp_seq_num + tcp_data_len;
+
+	if (tcpdata_info == NULL) {
+		ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt);
+		if (i >= TCPDATA_INFO_MAXNUM) {
+			DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d"
+				" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt,
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+				ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+				ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+			dhd_os_tcpackunlock(dhdp);
+			goto exit;
+		}
+		tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+
+		/* No TCP flow with the same IP addr and TCP port is found
+		 * in tcp_data_info_tbl. So add this flow to the table.
+		 */
+		DHD_TRACE(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n",
+			__FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], tcpdata_info->src_ip_addr,
+			IPV4_ADDR_LEN * 2);
+		bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], tcpdata_info->src_tcp_port,
+			TCP_PORT_LEN * 2);
+
+		tcpdata_info->last_used_time = OSL_SYSUPTIME();
+		tcpack_sup_mod->tcpdata_info_cnt++;
+	}
+
+	ASSERT(tcpdata_info != NULL);
+
+	tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod);
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	if (tdata_psh_info == NULL) {
+		DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+	tdata_psh_info->end_seq = end_tcp_seq_num;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	tack_tbl.cnt[4]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n",
+		__FUNCTION__, __LINE__, tdata_psh_info->end_seq));
+
+	ASSERT(tdata_psh_info->next == NULL);
+
+	if (tcpdata_info->tdata_psh_info_head == NULL)
+		tcpdata_info->tdata_psh_info_head = tdata_psh_info;
+	else {
+		ASSERT(tcpdata_info->tdata_psh_info_tail);
+		tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info;
+	}
+	tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
+
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return ret;
+}
+
+#endif /* DHDTCPACK_SUPPRESS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.h b/drivers/net/wireless/bcmdhd/dhd_ip.h
new file mode 100644
index 0000000..835046c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_ip.h
@@ -0,0 +1,73 @@
+/*
+ * Header file describing the common ip parser function.
+ *
+ * Provides type definitions and function prototypes used to parse ip packet.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_ip.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _dhd_ip_h_
+#define _dhd_ip_h_
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+typedef enum pkt_frag
+{
+	DHD_PKT_FRAG_NONE = 0,
+	DHD_PKT_FRAG_FIRST,
+	DHD_PKT_FRAG_CONT,
+	DHD_PKT_FRAG_LAST
+} pkt_frag_t;
+
+extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+extern bool pkt_is_dhcp(osl_t *osh, void *p);
+
+#ifdef DHDTCPACK_SUPPRESS
+#define	TCPACKSZMIN	(ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
+/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
+#define	TCPACKSZMAX	(TCPACKSZMIN + 100)
+
+/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */
+#define TCPACK_INFO_MAXNUM 4
+#define TCPDATA_INFO_MAXNUM 4
+#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM)
+
+#define TCPDATA_INFO_TIMEOUT 5000	/* Remove tcpdata_info if inactive for this time (in ms) */
+
+extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
+extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
+extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
+
+/* #define DHDTCPACK_SUP_DBG */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+extern counter_tbl_t tack_tbl;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#endif /* _dhd_ip_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 0000000..587a5df
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,9307 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c 477711 2014-05-14 08:45:17Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <net/addrconf.h>
+#ifdef ENABLE_ADAPTIVE_SCHED
+#include <linux/cpufreq.h>
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <proto/bcmevent.h>
+#include <proto/vlan.h>
+#include <proto/bcmudp.h>
+#include <proto/bcmdhcp.h>
+#ifdef DHD_L2_FILTER
+#include <proto/bcmicmp.h>
+#endif
+#include <proto/802.3.h>
+
+#include <dngl_stats.h>
+#include <dhd_linux_wq.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#ifdef PCIE_FULL_DONGLE
+#include <dhd_flowring.h>
+#endif
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200    /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
+#define TSMAX  1000        /* max no. of timing record kept   */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+	uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
+#define CONFIG_DHD_SET_RANDOM_MAC_VAL	0x001A11
+#endif
+static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
+#endif
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+#define DEFAULT_CPUFREQ_THRESH		1000000	/* threshold frequency : 1000000 = 1GHz */
+#ifndef CUSTOM_CPUFREQ_THRESH
+#define CUSTOM_CPUFREQ_THRESH	DEFAULT_CPUFREQ_THRESH
+#endif /* CUSTOM_CPUFREQ_THRESH */
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+/* Maximum STA per radio */
+#define DHD_MAX_STA     32
+
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio)  wme_fifo2ac[prio2fifo[(prio)]]
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inetaddr_notifier = {
+	.notifier_call = dhd_inetaddr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inetaddr_notifier_registered = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef CONFIG_IPV6
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inet6addr_notifier = {
+	.notifier_call = dhd_inet6addr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inet6addr_notifier_registered = FALSE;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+#endif /* BCM_FD_AGGR */
+
+#ifdef PROP_TXSTATUS
+extern bool dhd_wlfc_skip_fc(void);
+extern void dhd_wlfc_plat_init(void *dhd);
+extern void dhd_wlfc_plat_deinit(void *dhd);
+#endif /* PROP_TXSTATUS */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+	return "";
+}
+#endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t  g_wl_iw_params;
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif
+
+
+#ifdef READ_MACADDR
+extern int dhd_read_macaddr(struct dhd_info *dhd);
+#else
+static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
+#endif
+#ifdef WRITE_MACADDR
+extern int dhd_write_macaddr(struct ether_addr *mac);
+#else
+static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
+#endif
+
+
+
+static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
+static struct notifier_block dhd_reboot_notifier = {
+	.notifier_call = dhd_reboot_callback,
+	.priority = 1,
+};
+
+typedef struct dhd_if_event {
+	struct list_head	list;
+	wl_event_data_if_t	event;
+	char			name[IFNAMSIZ+1];
+	uint8			mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+	struct dhd_info *info;			/* back pointer to dhd_info */
+	/* OS/stack specifics */
+	struct net_device *net;
+	int				idx;			/* iface idx in dongle */
+	uint			subunit;		/* subunit */
+	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	bool			set_macaddress;
+	bool			set_multicast;
+	uint8			bssidx;			/* bsscfg index for the interface */
+	bool			attached;		/* Delayed attachment when unset */
+	bool			txflowcontrol;	/* Per interface flow control indicator */
+	char			name[IFNAMSIZ+1]; /* linux interface name */
+	struct net_device_stats stats;
+#ifdef DHD_WMF
+	dhd_wmf_t		wmf;		/* per bsscfg wmf setting */
+#endif /* DHD_WMF */
+#ifdef PCIE_FULL_DONGLE
+	struct list_head sta_list;		/* sll of associated stations */
+#if !defined(BCM_GMAC3)
+	spinlock_t	sta_list_lock;		/* lock for manipulating sll */
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+	uint32  ap_isolate;			/* ap-isolation settings */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+	uint32 low;
+	uint32 high;
+} tsf_t;
+
+typedef struct {
+	uint32 last_cycle;
+	uint32 last_sec;
+	uint32 last_tsf;
+	uint32 coef;     /* scaling factor */
+	uint32 coefdec1; /* first decimal  */
+	uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+	uint32 t1;
+	uint32 t2;
+	uint32 t3;
+	uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif  /* WLMEDIA_HTSF */
+
+struct ipv6_work_info_t {
+	uint8			if_idx;
+	char			ipv6_addr[16];
+	unsigned long		event;
+};
+
+#ifdef DHD_MEMDUMP
+typedef struct dhd_dump {
+	uint8 *buf;
+	int bufsize;
+} dhd_dump_t;
+#endif /* DHD_MEMDUMP */
+
+/* When Perimeter locks are deployed, any blocking calls must be preceeded
+ * with a PERIM UNLOCK and followed by a PERIM LOCK.
+ * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
+ * wait_event_timeout().
+ */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+	wl_iw_t		iw;		/* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+	dhd_pub_t pub;
+	dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
+
+	void *adapter;			/* adapter information, interrupt, fw path etc. */
+	char fw_path[PATH_MAX];		/* path to firmware image */
+	char nv_path[PATH_MAX];		/* path to nvram vars file */
+
+	struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+	spinlock_t	wlfc_spinlock;
+
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+	htsf_t  htsf;
+#endif
+	wait_queue_head_t ioctl_resp_wait;
+	wait_queue_head_t d3ack_wait;
+
+	uint32	default_wd_interval;
+
+	struct timer_list timer;
+	bool wd_timer_valid;
+	struct tasklet_struct tasklet;
+	spinlock_t	sdlock;
+	spinlock_t	txqlock;
+	spinlock_t	dhd_lock;
+
+	struct semaphore sdsem;
+	tsk_ctl_t	thr_dpc_ctl;
+	tsk_ctl_t	thr_wdt_ctl;
+
+	tsk_ctl_t	thr_rxf_ctl;
+	spinlock_t	rxf_lock;
+	bool		rxthread_enabled;
+
+	/* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	struct wake_lock wl_wifi;   /* Wifi wakelock */
+	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+	struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+	struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	/* net_device interface lock, prevent race conditions among net_dev interface
+	 * calls and wifi_on or wifi_off
+	 */
+	struct mutex dhd_net_if_mutex;
+	struct mutex dhd_suspend_mutex;
+#endif
+	spinlock_t wakelock_spinlock;
+	uint32 wakelock_counter;
+	int wakelock_wd_counter;
+	int wakelock_rx_timeout_enable;
+	int wakelock_ctrl_timeout_enable;
+	bool waive_wakelock;
+	uint32 wakelock_before_waive;
+
+	/* Thread to issue ioctl for multicast */
+	wait_queue_head_t ctrl_wait;
+	atomic_t pend_8021x_cnt;
+	dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+	dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+	void *rpc_th;
+	void *rpc_osh;
+	struct timer_list rpcth_timer;
+	bool rpcth_timer_active;
+	bool fdaggr;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+	spinlock_t	tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+	void			*dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+	struct notifier_block freq_trans;
+	int __percpu *new_freq;
+#endif
+	unsigned int unit;
+	struct notifier_block pm_notifier;
+#ifdef SAR_SUPPORT
+	struct notifier_block sar_notifier;
+	s32 sar_enable;
+#endif
+} dhd_info_t;
+
+#define DHDIF_FWDER(dhdif)      FALSE
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+/* backup buffer for firmware and nvram path */
+char fw_bak_path[MOD_PARAM_PATHLEN];
+char nv_bak_path[MOD_PARAM_PATHLEN];
+
+/* information string to keep firmware, chio, cheip version info visiable from log */
+char info_string[MOD_PARAM_INFOLEN];
+module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
+int op_mode = 0;
+int disable_proptx = 0;
+module_param(op_mode, int, 0644);
+extern int wl_control_wl_start(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
+struct semaphore dhd_registration_sem;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* deferred handlers */
+static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
+#ifdef CONFIG_IPV6
+static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
+#endif
+
+#ifdef WL_CFG80211
+extern void dhd_netdev_free(struct net_device *ndev);
+#endif /* WL_CFG80211 */
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+
+module_param(dhd_arp_mode, uint, 0);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* Disable Prop tx */
+module_param(disable_proptx, int, 0644);
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
+
+/* Watchdog interval */
+
+/* extend watchdog expiration to 2 seconds when DPC is running */
+#define WATCHDOG_EXTEND_INTERVAL (2000)
+
+uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#endif /* defined(DHD_DEBUG) */
+
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 0);
+
+int dhd_watchdog_prio = 0;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority */
+int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
+module_param(dhd_dpc_prio, int, 0);
+
+/* RX frame thread priority */
+int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
+module_param(dhd_rxf_prio, int, 0);
+
+#if !defined(BCMDHDUSB)
+extern int dhd_dongle_ramsize;
+module_param(dhd_dongle_ramsize, int, 0);
+#endif /* BCMDHDUSB */
+
+/* Keep track of number of instances */
+static int dhd_found = 0;
+static int instance_base = 0; /* Starting instance number */
+module_param(instance_base, int, 0644);
+
+
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT()              do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag)      do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag)    do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_ALL()                do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_ALL()              do { /* noop */ } while (0)
+
+#ifdef PCIE_FULL_DONGLE
+#if defined(BCM_GMAC3)
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp)      do { /* noop */ } while (0)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags)    ({ BCM_REFERENCE(flags); })
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags)  ({ BCM_REFERENCE(flags); })
+#else /* ! BCM_GMAC3 */
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+	spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+	spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+#endif /* BCMSDIO */
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+
+extern char dhd_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, size_t pktlen,
+                             wl_event_msg_t *event_ptr, void **data_ptr);
+#ifdef DHD_UNICAST_DHCP
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
+	int *len_ptr, uint8 *prot_ptr);
+static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
+	int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
+
+static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
+#endif
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+	int ret = NOTIFY_DONE;
+	bool suspend = FALSE;
+	dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+
+	BCM_REFERENCE(dhdinfo);
+	switch (action) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		suspend = TRUE;
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		suspend = FALSE;
+		break;
+	}
+
+#if defined(SUPPORT_P2P_GO_PS)
+#ifdef PROP_TXSTATUS
+	if (suspend) {
+		DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+		dhd_wlfc_suspend(&dhdinfo->pub);
+		DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+	} else
+		dhd_wlfc_resume(&dhdinfo->pub);
+#endif
+#endif /* defined(SUPPORT_P2P_GO_PS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 39))
+	dhd_mmc_suspend = suspend;
+	smp_mb();
+#endif
+
+	return ret;
+}
+
+static struct notifier_block dhd_pm_notifier = {
+	.notifier_call = dhd_pm_callback,
+	.priority = 10
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
+
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* CONFIG_PM_SLEEP */
+
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
+
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+	dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+	dhd_if_t   * ifp; /* cached pointer to dhd_if in netdevice priv */
+	int          ifidx; /* interface index */
+} dhd_dev_priv_t;
+
+#define DHD_DEV_PRIV_SIZE       (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev)       ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev)       (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev)        (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev)      (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+	dhd_dev_priv_t * dev_priv;
+	ASSERT(dev != (struct net_device *)NULL);
+	dev_priv = DHD_DEV_PRIV(dev);
+	dev_priv->dhd = (dhd_info_t *)NULL;
+	dev_priv->ifp = (dhd_if_t *)NULL;
+	dev_priv->ifidx = DHD_BAD_IF;
+}
+
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+                  int ifidx)
+{
+	dhd_dev_priv_t * dev_priv;
+	ASSERT(dev != (struct net_device *)NULL);
+	dev_priv = DHD_DEV_PRIV(dev);
+	dev_priv->dhd = dhd;
+	dev_priv->ifp = ifp;
+	dev_priv->ifidx = ifidx;
+}
+#ifdef SAR_SUPPORT
+static int dhd_sar_callback(struct notifier_block *nfb, unsigned long action, void *data)
+{
+	dhd_info_t *dhd = (dhd_info_t*)container_of(nfb, struct dhd_info, sar_notifier);
+	char iovbuf[32];
+	s32 sar_enable;
+	s32 txpower;
+	int ret;
+
+	if (dhd->pub.busstate == DHD_BUS_DOWN)
+		return NOTIFY_DONE;
+
+	if (data) {
+		/* if data != NULL then we expect that the notifier passed
+		 * the exact value of max tx power in quarters of dB.
+		 * qtxpower variable allows us to overwrite TX power.
+		 */
+		txpower = *(s32*)data;
+		if (txpower == -1 || txpower > 127)
+			txpower = 127; /* Max val of 127 qdbm */
+
+		txpower |= WL_TXPWR_OVERRIDE;
+		txpower = htod32(txpower);
+
+		bcm_mkiovar("qtxpower", (char *)&txpower, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s wl qtxpower failed %d\n", __FUNCTION__, ret));
+	} else {
+		/* '1' means activate sarlimit and '0' means back to normal
+		 *  state (deactivate sarlimit)
+		 */
+		sar_enable = action ? 1 : 0;
+		if (dhd->sar_enable == sar_enable)
+			return NOTIFY_DONE;
+		bcm_mkiovar("sar_enable", (char *)&sar_enable, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s wl sar_enable %d failed %d\n", __FUNCTION__, sar_enable, ret));
+		else
+			dhd->sar_enable = sar_enable;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static bool dhd_sar_notifier_registered = FALSE;
+
+extern int register_notifier_by_sar(struct notifier_block *nb);
+extern int unregister_notifier_by_sar(struct notifier_block *nb);
+#endif
+
+#ifdef PCIE_FULL_DONGLE
+
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
+
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+#if defined(BCM_GMAC3)
+	.fwdh = FWDER_NULL,
+#endif
+	.pub = {
+	         .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+	         .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+	         .up = FALSE, .busstate = DHD_BUS_DOWN
+	}
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL  (&dhd_info_null.pub)
+
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+	.reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
+
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#if defined(BCM_GMAC3)
+	.fwdh = FWDER_NULL,
+#endif
+#ifdef WMF
+	.wmf = { .wmf_enable = TRUE },
+#endif
+	.info = DHD_INFO_NULL,
+	.net = DHD_NET_DEV_NULL,
+	.idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL  (&dhd_if_null)
+
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+
+/** Interface STA list management. */
+
+/** Fetch the dhd_if object, given the interface index in the dhd. */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+static void	dhd_if_flush_sta(dhd_if_t * ifp);
+
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
+
+
+/* Return interface pointer */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+{
+	ASSERT(ifidx < DHD_MAX_IFS);
+	if (ifidx >= DHD_MAX_IFS) {
+		return NULL;
+	}
+	return dhdp->info->iflist[ifidx];
+}
+
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+{
+	int prio;
+
+	ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+
+	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+	id16_map_free(dhdp->staid_allocator, sta->idx);
+	for (prio = 0; prio < (int)NUMPRIO; prio++)
+		sta->flowid[prio] = FLOWID_INVALID;
+	sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+	sta->ifidx = DHD_BAD_IF;
+	bzero(sta->ea.octet, ETHER_ADDR_LEN);
+	INIT_LIST_HEAD(&sta->list);
+	sta->idx = ID16_INVALID; /* implying free */
+}
+
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
+{
+	uint16 idx;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+
+	ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+	idx = id16_map_alloc(dhdp->staid_allocator);
+	if (idx == ID16_INVALID) {
+		DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+		return DHD_STA_NULL;
+	}
+
+	sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+	sta = &sta_pool[idx];
+
+	ASSERT((sta->idx == ID16_INVALID) &&
+	       (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+	sta->idx = idx; /* implying allocated */
+
+	return sta;
+}
+
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
+{
+	dhd_sta_t *sta, *next;
+	unsigned long flags;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+		if (ifp->fwdh) {
+			/* Remove sta from WOFA forwarder. */
+			fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
+		}
+#endif /* BCM_GMAC3 */
+		list_del(&sta->list);
+		dhd_sta_free(&ifp->info->pub, sta);
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return;
+}
+
+/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
+static void
+dhd_if_flush_sta(dhd_if_t * ifp)
+{
+#if defined(BCM_GMAC3)
+
+	if (ifp && (ifp->fwdh != FWDER_NULL)) {
+		dhd_sta_t *sta, *next;
+		unsigned long flags;
+
+		DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+		list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+			/* Remove any sta entry from WOFA forwarder. */
+			fwder_flush(ifp->fwdh, (wofa_t)sta);
+		}
+
+		DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+	}
+#endif /* BCM_GMAC3 */
+}
+
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+{
+	int idx, sta_pool_memsz;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+	void * staid_allocator;
+
+	ASSERT(dhdp != (dhd_pub_t *)NULL);
+	ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+
+	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+	staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+	if (staid_allocator == NULL) {
+		DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	/* Pre allocate a pool of dhd_sta objects (one extra). */
+	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+	sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+	if (sta_pool == NULL) {
+		DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+		id16_map_fini(dhdp->osh, staid_allocator);
+		return BCME_ERROR;
+	}
+
+	dhdp->sta_pool = sta_pool;
+	dhdp->staid_allocator = staid_allocator;
+
+	/* Initialize all sta(s) for the pre-allocated free pool. */
+	bzero((uchar *)sta_pool, sta_pool_memsz);
+	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+		sta = &sta_pool[idx];
+		sta->idx = id16_map_alloc(staid_allocator);
+		ASSERT(sta->idx <= max_sta);
+	}
+	/* Now place them into the pre-allocated free pool. */
+	for (idx = 1; idx <= max_sta; idx++) {
+		sta = &sta_pool[idx];
+		dhd_sta_free(dhdp, sta);
+	}
+
+	return BCME_OK;
+}
+
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
+{
+	dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+
+	if (sta_pool) {
+		int idx;
+		int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+		for (idx = 1; idx <= max_sta; idx++) {
+			ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+			ASSERT(sta_pool[idx].idx == ID16_INVALID);
+		}
+		MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+		dhdp->sta_pool = NULL;
+	}
+
+	id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+	dhdp->staid_allocator = NULL;
+}
+
+
+
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void
+dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
+{
+	int idx, sta_pool_memsz;
+	dhd_sta_t * sta;
+	dhd_sta_pool_t * sta_pool;
+	void *staid_allocator;
+
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+	staid_allocator = dhdp->staid_allocator;
+
+	if (!sta_pool) {
+		DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (!staid_allocator) {
+		DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	/* clear free pool */
+	sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+	bzero((uchar *)sta_pool, sta_pool_memsz);
+
+	/* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+	id16_map_clear(staid_allocator, max_sta, 1);
+
+	/* Initialize all sta(s) for the pre-allocated free pool. */
+	for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+		sta = &sta_pool[idx];
+		sta->idx = id16_map_alloc(staid_allocator);
+		ASSERT(sta->idx <= max_sta);
+	}
+	/* Now place them into the pre-allocated free pool. */
+	for (idx = 1; idx <= max_sta; idx++) {
+		sta = &sta_pool[idx];
+		dhd_sta_free(dhdp, sta);
+	}
+}
+
+
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta, *next;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return DHD_STA_NULL;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+			DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+			return sta;
+		}
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return DHD_STA_NULL;
+}
+
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return DHD_STA_NULL;
+
+	sta = dhd_sta_alloc((dhd_pub_t *)pub);
+	if (sta == DHD_STA_NULL) {
+		DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+		return DHD_STA_NULL;
+	}
+
+	memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
+
+	/* link the sta and the dhd interface */
+	sta->ifp = ifp;
+	sta->ifidx = ifidx;
+	INIT_LIST_HEAD(&sta->list);
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_add_tail(&sta->list, &ifp->sta_list);
+
+#if defined(BCM_GMAC3)
+	if (ifp->fwdh) {
+		ASSERT(ISALIGNED(ea, 2));
+		/* Add sta to WOFA forwarder. */
+		fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
+	}
+#endif /* BCM_GMAC3 */
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return sta;
+}
+
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta, *next;
+	dhd_if_t *ifp;
+	unsigned long flags;
+
+	ASSERT(ea != NULL);
+	ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+	if (ifp == NULL)
+		return;
+
+	DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+#if defined(BCM_GMAC3)
+			if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+				ASSERT(ISALIGNED(ea, 2));
+				fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
+			}
+#endif /* BCM_GMAC3 */
+			list_del(&sta->list);
+			dhd_sta_free(&ifp->info->pub, sta);
+		}
+	}
+
+	DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return;
+}
+
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
+{
+	dhd_sta_t *sta;
+
+	sta = dhd_find_sta(pub, ifidx, ea);
+
+	if (!sta) {
+		/* Add entry */
+		sta = dhd_add_sta(pub, ifidx, ea);
+	}
+
+	return sta;
+}
+#else
+static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
+
+
+/* Returns dhd iflist index correspondig the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
+{
+	dhd_if_t *ifp;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	ASSERT(bssidx < DHD_MAX_IFS);
+	ASSERT(dhdp);
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ifp = dhd->iflist[i];
+		if (ifp && (ifp->bssidx == bssidx)) {
+			DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+				ifp->name, bssidx, i));
+			break;
+		}
+	}
+	return i;
+}
+
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+
+	if (!skb) {
+		DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+		return BCME_ERROR;
+	}
+
+	dhd_os_rxflock(dhdp);
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	if (dhdp->skbbuf[store_idx] != NULL) {
+		/* Make sure the previous packets are processed */
+		dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+		DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
+		DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		/* removed msleep here, should use wait_event_timeout if we
+		 * want to give rx frame thread a chance to run
+		 */
+#if defined(WAIT_DEQUEUE)
+		OSL_SLEEP(1);
+#endif
+		return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+	}
+	DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+		skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+	dhdp->skbbuf[store_idx] = skb;
+	dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+	dhd_os_rxfunlock(dhdp);
+
+	return BCME_OK;
+}
+
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+	void *skb;
+
+	dhd_os_rxflock(dhdp);
+
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	skb = dhdp->skbbuf[sent_idx];
+
+	if (skb == NULL) {
+		dhd_os_rxfunlock(dhdp);
+		DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+			store_idx, sent_idx));
+		return NULL;
+	}
+
+	dhdp->skbbuf[sent_idx] = NULL;
+	dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
+
+	DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+		skb, sent_idx));
+
+	dhd_os_rxfunlock(dhdp);
+
+	return skb;
+}
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	if (prepost) { /* pre process */
+		dhd_read_macaddr(dhd);
+	} else { /* post process */
+		dhd_write_macaddr(&dhd->pub.mac);
+	}
+
+	return 0;
+}
+
+#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
+{
+	bool _apply = FALSE;
+	/* In case of IBSS mode, apply arp pkt filter */
+	if (op_mode & DHD_FLAG_IBSS_MODE) {
+		_apply = TRUE;
+		goto exit;
+	}
+	/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+	if ((dhd->arp_version == 1) &&
+		(op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
+		_apply = TRUE;
+		goto exit;
+	}
+
+exit:
+	return _apply;
+}
+#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
+void dhd_set_packet_filter(dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	int i;
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+	if (dhd_pkt_filter_enable) {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+}
+
+void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	int i;
+
+	DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
+	/* 1 - Enable packet filter, only allow unicast packet to send up */
+	/* 0 - Disable packet filter */
+	if (dhd_pkt_filter_enable && (!value ||
+	    (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
+	    {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
+			if (value && (i == DHD_ARP_FILTER_NUM) &&
+				!_turn_on_arp_filter(dhd, dhd->op_mode)) {
+				DHD_TRACE(("Do not turn on ARP white list pkt filter:"
+					"val %d, cnt %d, op_mode 0x%x\n",
+					value, i, dhd->op_mode));
+				continue;
+			}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+				value, dhd_master_mode);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+}
+
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+#ifndef SUPPORT_PM2_ONLY
+	int power_mode = PM_MAX;
+#endif /* SUPPORT_PM2_ONLY */
+	/* wl_pkt_filter_enable_t	enable_parm; */
+	char iovbuf[32];
+	int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+	uint roamvar = 1;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+	uint nd_ra_filter = 0;
+	int ret = 0;
+
+	if (!dhd)
+		return -ENODEV;
+
+	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+		__FUNCTION__, value, dhd->in_suspend));
+
+	dhd_suspend_lock(dhd);
+
+#ifdef CUSTOM_SET_CPUCORE
+	DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+	/* set specific cpucore */
+	dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
+	if (dhd->up) {
+		if (value && dhd->in_suspend) {
+#ifdef PKT_FILTER_SUPPORT
+				dhd->early_suspended = 1;
+#endif
+				/* Kernel suspended */
+				DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+#ifdef CUSTOM_SET_SHORT_DWELL_TIME
+				dhd_set_short_dwell_time(dhd, TRUE);
+#endif
+#ifndef SUPPORT_PM2_ONLY
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+
+				/* Enable packet filter, only allow unicast packet to send up */
+				dhd_enable_packet_filter(1, dhd);
+
+
+				/* If DTIM skip is set up as default, force it to wake
+				 * each third DTIM for better power savings.  Note that
+				 * one side effect is a chance to miss BC/MC packet.
+				 */
+				bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+				if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
+					TRUE, 0) < 0)
+					DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+
+#ifndef ENABLE_FW_ROAM_SUSPEND
+				/* Disable firmware roaming during suspend */
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+					iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+				if (FW_SUPPORTED(dhd, ndoe)) {
+					/* enable IPv6 RA filter in  firmware during suspend */
+					nd_ra_filter = 1;
+					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+						iovbuf, sizeof(iovbuf));
+					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+						sizeof(iovbuf), TRUE, 0)) < 0)
+						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+							ret));
+				}
+			} else {
+#ifdef PKT_FILTER_SUPPORT
+				dhd->early_suspended = 0;
+#endif
+				/* Kernel resumed  */
+				DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
+#ifdef CUSTOM_SET_SHORT_DWELL_TIME
+				dhd_set_short_dwell_time(dhd, FALSE);
+#endif
+#ifndef SUPPORT_PM2_ONLY
+				power_mode = PM_FAST;
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+#ifdef PKT_FILTER_SUPPORT
+				/* disable pkt filter */
+				dhd_enable_packet_filter(0, dhd);
+#endif /* PKT_FILTER_SUPPORT */
+
+				/* restore pre-suspend setting for dtim_skip */
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#ifndef ENABLE_FW_ROAM_SUSPEND
+				roamvar = dhd_roam_disable;
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+					sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+				if (FW_SUPPORTED(dhd, ndoe)) {
+					/* disable IPv6 RA filter in  firmware during suspend */
+					nd_ra_filter = 0;
+					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+						iovbuf, sizeof(iovbuf));
+					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+						sizeof(iovbuf), TRUE, 0)) < 0)
+						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+							ret));
+				}
+			}
+	}
+	dhd_suspend_unlock(dhd);
+
+	return 0;
+}
+
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
+{
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	DHD_PERIM_LOCK(dhdp);
+
+	/* Set flag when early suspend was called */
+	dhdp->in_suspend = val;
+	if ((force || !dhdp->suspend_disable_flag) &&
+		dhd_support_sta_mode(dhdp))
+	{
+		ret = dhd_set_suspend(val, dhdp);
+	}
+
+	DHD_PERIM_UNLOCK(dhdp);
+	DHD_OS_WAKE_UNLOCK(dhdp);
+	return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+static void dhd_early_suspend(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 1, 0);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 0, 0);
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+/*
+ * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
+ *
+ *      dhd_timeout_start(&tmo, usec);
+ *      while (!dhd_timeout_expired(&tmo))
+ *              if (poll_something())
+ *                      break;
+ *      if (dhd_timeout_expired(&tmo))
+ *              fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+	tmo->limit = usec;
+	tmo->increment = 0;
+	tmo->elapsed = 0;
+	tmo->tick = jiffies_to_usecs(1);
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+	/* Does nothing the first call */
+	if (tmo->increment == 0) {
+		tmo->increment = 1;
+		return 0;
+	}
+
+	if (tmo->elapsed >= tmo->limit)
+		return 1;
+
+	/* Add the delay that's about to take place */
+	tmo->elapsed += tmo->increment;
+
+	if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
+		OSL_DELAY(tmo->increment);
+		tmo->increment *= 2;
+		if (tmo->increment > tmo->tick)
+			tmo->increment = tmo->tick;
+	} else {
+		wait_queue_head_t delay_wait;
+		DECLARE_WAITQUEUE(wait, current);
+		init_waitqueue_head(&delay_wait);
+		add_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		(void)schedule_timeout(1);
+		remove_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_RUNNING);
+	}
+
+	return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+	int i = 0;
+
+	ASSERT(dhd);
+	while (i < DHD_MAX_IFS) {
+		if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
+			return i;
+		i++;
+	}
+
+	return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+	struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+	struct dhd_info *dhd_info;
+
+	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+		return NULL;
+	dhd_info = dhd_pub->info;
+	if (dhd_info && dhd_info->iflist[ifidx])
+		return dhd_info->iflist[ifidx]->net;
+	return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (name == NULL || *name == '\0')
+		return 0;
+
+	while (--i > 0)
+		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+				break;
+
+	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+	return i;	/* default - the primary interface */
+}
+
+int
+dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_TRACE(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return 0;	/* default - the primary interface */
+	}
+
+	while (--i > 0)
+		if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
+				break;
+
+	DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
+
+	return i;	/* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return "<if_bad>";
+	}
+
+	if (dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+		return "<if_null>";
+	}
+
+	if (dhd->iflist[ifidx]->net)
+		return dhd->iflist[ifidx]->net->name;
+
+	return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+	int i;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+	ASSERT(dhd);
+	for (i = 0; i < DHD_MAX_IFS; i++)
+	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+		return dhd->iflist[i]->mac_addr;
+
+	return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+	struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	struct netdev_hw_addr *ha;
+#else
+	struct dev_mc_list *mclist;
+#endif
+	uint32 allmulti, cnt;
+
+	wl_ioctl_t ioc;
+	char *buf, *bufp;
+	uint buflen;
+	int ret;
+
+			ASSERT(dhd && dhd->iflist[ifidx]);
+			dev = dhd->iflist[ifidx]->net;
+			if (!dev)
+				return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+			cnt = netdev_mc_count(dev);
+#else
+			cnt = dev->mc_count;
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_unlock_bh(dev);
+#endif
+
+			/* Determine initial value of allmulti flag */
+	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+	/* Send down the multicast list first. */
+
+
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+		           dhd_ifname(&dhd->pub, ifidx), cnt));
+		return;
+	}
+
+	strncpy(bufp, "mcast_list", buflen - 1);
+	bufp[buflen - 1] = '\0';
+	bufp += strlen("mcast_list") + 1;
+
+	cnt = htol32(cnt);
+	memcpy(bufp, &cnt, sizeof(cnt));
+	bufp += sizeof(cnt);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+			netdev_for_each_mc_addr(ha, dev) {
+				if (!cnt)
+					break;
+				memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+				bufp += ETHER_ADDR_LEN;
+				cnt--;
+	}
+#else
+	for (mclist = dev->mc_list; (mclist && (cnt > 0));
+		cnt--, mclist = mclist->next) {
+				memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+				bufp += ETHER_ADDR_LEN;
+			}
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_unlock_bh(dev);
+#endif
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+			dhd_ifname(&dhd->pub, ifidx), cnt));
+		allmulti = cnt ? TRUE : allmulti;
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+
+	buflen = sizeof("allmulti") + sizeof(allmulti);
+	if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+		return;
+	}
+	allmulti = htol32(allmulti);
+
+	if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+		DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+		           dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+		MFREE(dhd->pub.osh, buf, buflen);
+		return;
+	}
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set allmulti %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+
+	allmulti = htol32(allmulti);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_PROMISC;
+	ioc.buf = &allmulti;
+	ioc.len = sizeof(allmulti);
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set promisc %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+}
+
+int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
+{
+	char buf[32];
+	wl_ioctl_t ioc;
+	int ret;
+
+	if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+		return -1;
+	}
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = 32;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+	} else {
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+		if (ifidx == 0)
+			memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_event_t *if_event = event_info;
+	struct net_device *ndev;
+	int ifidx, bssidx;
+	int ret;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	struct wireless_dev *vwdev, *primary_wdev;
+	struct net_device *primary_ndev;
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+
+	if (event != DHD_WQ_WORK_IF_ADD) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	bssidx = if_event->event.bssidx;
+	DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+
+	ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+		if_event->mac, bssidx, TRUE);
+	if (!ndev) {
+		DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
+		goto done;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+	if (unlikely(!vwdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		goto done;
+	}
+	primary_ndev = dhd->pub.info->iflist[0]->net;
+	primary_wdev = ndev_to_wdev(primary_ndev);
+	vwdev->wiphy = primary_wdev->wiphy;
+	vwdev->iftype = if_event->event.role;
+	vwdev->netdev = ndev;
+	ndev->ieee80211_ptr = vwdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
+	DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+	DHD_PERIM_LOCK(&dhd->pub);
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+			dhd_remove_if(&dhd->pub, ifidx, TRUE);
+	}
+#ifdef PCIE_FULL_DONGLE
+	/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
+	if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
+		char iovbuf[WLC_IOCTL_SMLEN];
+		uint32 var_int =  1;
+
+		memset(iovbuf, 0, sizeof(iovbuf));
+		bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
+		dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
+	}
+#endif /* PCIE_FULL_DONGLE */
+done:
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	int ifidx;
+	dhd_if_event_t *if_event = event_info;
+
+
+	if (event != DHD_WQ_WORK_IF_DEL) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+
+	dhd_remove_if(&dhd->pub, ifidx, TRUE);
+
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+
+	if (event != DHD_WQ_WORK_SET_MAC) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+#ifdef SOFTAP
+	{
+		unsigned long flags;
+		bool in_ap = FALSE;
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		in_ap = (ap_net_dev != NULL);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+		if (in_ap)  {
+			DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
+			           ifp->net->name));
+			goto done;
+		}
+	}
+#endif /* SOFTAP */
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
+	ifp->set_macaddress = FALSE;
+	if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
+		DHD_INFO(("%s: MACID is overwritten\n",	__FUNCTION__));
+	else
+		DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+
+done:
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+	int ifidx;
+
+	if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+#ifdef SOFTAP
+	{
+		bool in_ap = FALSE;
+		unsigned long flags;
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		in_ap = (ap_net_dev != NULL);
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+		if (in_ap)  {
+			DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
+			           ifp->net->name));
+			ifp->set_multicast = FALSE;
+			goto done;
+		}
+	}
+#endif /* SOFTAP */
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	ifidx = ifp->idx;
+
+
+	_dhd_set_multicast_list(dhd, ifidx);
+	DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int ifidx;
+	dhd_if_t *dhdif;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return -1;
+
+	dhdif = dhd->iflist[ifidx];
+
+	dhd_net_if_lock_local(dhd);
+	memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+	dhdif->set_macaddress = TRUE;
+	dhd_net_if_unlock_local(dhd);
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
+		dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
+	return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ifidx;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	dhd->iflist[ifidx]->set_multicast = TRUE;
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
+		DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+	ASSERT(di != NULL);
+	spin_lock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+	ASSERT(di != NULL);
+	spin_unlock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+#endif /* PROP_TXSTATUS */
+
+#if defined(DHD_8021X_DUMP)
+void
+dhd_tx_dump(osl_t *osh, void *pkt)
+{
+	uint8 *dump_data;
+	uint16 protocol;
+
+	dump_data = PKTDATA(osh, pkt);
+	protocol = (dump_data[12] << 8) | dump_data[13];
+
+	if (protocol == ETHER_TYPE_802_1X) {
+		DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
+			dump_data[14], dump_data[15], dump_data[30]));
+	}
+}
+#endif /* DHD_8021X_DUMP */
+
+int BCMFASTPATH
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret = BCME_OK;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh = NULL;
+
+	/* Reject if down */
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		/* free the packet here since the caller won't */
+		PKTFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+
+#ifdef PCIE_FULL_DONGLE
+	if (dhdp->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+		PKTFREE(dhdp->osh, pktbuf, TRUE);
+		return -EBUSY;
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_UNICAST_DHCP
+	/* if dhcp_unicast is enabled, we need to convert the */
+	/* broadcast DHCP ACK/REPLY packets to Unicast. */
+	if (dhdp->dhcp_unicast) {
+	    dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
+	}
+#endif /* DHD_UNICAST_DHCP */
+	/* Update multicast statistic */
+	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+		eh = (struct ether_header *)pktdata;
+
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			dhdp->tx_multicast++;
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+			atomic_inc(&dhd->pend_8021x_cnt);
+#ifdef DHD_DHCP_DUMP
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+			uint16 dump_hex;
+			uint16 source_port;
+			uint16 dest_port;
+			uint16 udp_port_pos;
+			uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
+			uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
+
+			udp_port_pos = ETHER_HDR_LEN + ip_header_len;
+			source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
+			dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
+			if (source_port == 0x0044 || dest_port == 0x0044) {
+				dump_hex = (pktdata[udp_port_pos+249] << 8) |
+					pktdata[udp_port_pos+250];
+				if (dump_hex == 0x0101) {
+					DHD_ERROR(("DHCP - DISCOVER [TX]\n"));
+				} else if (dump_hex == 0x0102) {
+					DHD_ERROR(("DHCP - OFFER [TX]\n"));
+				} else if (dump_hex == 0x0103) {
+					DHD_ERROR(("DHCP - REQUEST [TX]\n"));
+				} else if (dump_hex == 0x0105) {
+					DHD_ERROR(("DHCP - ACK [TX]\n"));
+				} else {
+					DHD_ERROR(("DHCP - 0x%X [TX]\n", dump_hex));
+				}
+			} else if (source_port == 0x0043 || dest_port == 0x0043) {
+				DHD_ERROR(("DHCP - BOOTP [RX]\n"));
+			}
+		}
+#endif /* DHD_DHCP_DUMP */
+	} else {
+			PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+			return BCME_ERROR;
+	}
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* If this packet has replaced another packet and got freed, just return */
+	if (dhd_tcpack_suppress(dhdp, pktbuf))
+		return ret;
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Look into the packet and update the packet priority */
+#ifndef PKTPRIO_OVERRIDE
+	if (PKTPRIO(pktbuf) == 0)
+#endif
+		pktsetprio(pktbuf, FALSE);
+
+
+#ifdef PCIE_FULL_DONGLE
+	/*
+	 * Lkup the per interface hash table, for a matching flowring. If one is not
+	 * available, allocate a unique flowid and add a flowring entry.
+	 * The found or newly created flowid is placed into the pktbuf's tag.
+	 */
+	ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+	if (ret != BCME_OK) {
+		PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
+		return ret;
+	}
+#endif
+
+#ifdef PROP_TXSTATUS
+	if (dhd_wlfc_is_supported(dhdp)) {
+		/* store the interface ID */
+		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+		/* store destination MAC in the tag as well */
+		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+		/* decide which FIFO this packet belongs to */
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+		else
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+	} else
+#endif /* PROP_TXSTATUS */
+	/* If the protocol uses a data header, apply it */
+	dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+	/* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+	dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#if defined(DHD_8021X_DUMP)
+	dhd_tx_dump(dhdp->osh, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+	{
+		if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+			dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+			/* non-proptxstatus way */
+#ifdef BCMPCIE
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+		}
+	}
+#else
+#ifdef BCMPCIE
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+#endif /* PROP_TXSTATUS */
+
+	return ret;
+}
+
+int BCMFASTPATH
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	uint datalen;
+	void *pktbuf;
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp = NULL;
+	int ifidx;
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+	uint8 htsfdlystat_sz = 0;
+#endif
+#ifdef DHD_WMF
+	struct ether_header *eh;
+	uint8 *iph;
+#endif /* DHD_WMF */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+
+	/* Reject if down */
+	if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
+		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+		netif_stop_queue(net);
+		/* Send Event when bus down detected during data session */
+		if (dhd->pub.up) {
+			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+			net_os_send_hang_message(net);
+		}
+		DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	ifp = DHD_DEV_IFP(net);
+	ifidx = DHD_DEV_IFIDX(net);
+
+	ASSERT(ifidx == dhd_net2idx(dhd, net));
+	ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+		netif_stop_queue(net);
+		DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	/* re-align socket buffer if "skb->data" is odd address */
+	if (((unsigned long)(skb->data)) & 0x1) {
+		unsigned char *data = skb->data;
+		uint32 length = skb->len;
+		PKTPUSH(dhd->pub.osh, skb, 1);
+		memmove(skb->data, data, length);
+		PKTSETLEN(dhd->pub.osh, skb, length);
+	}
+
+	datalen  = PKTLEN(dhd->pub.osh, skb);
+
+	/* Make sure there's enough room for any header */
+
+	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+		struct sk_buff *skb2;
+
+		DHD_INFO(("%s: insufficient headroom\n",
+		          dhd_ifname(&dhd->pub, ifidx)));
+		dhd->pub.tx_realloc++;
+
+		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+		dev_kfree_skb(skb);
+		if ((skb = skb2) == NULL) {
+			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+			           dhd_ifname(&dhd->pub, ifidx)));
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+
+	/* Convert to packet */
+	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+		           dhd_ifname(&dhd->pub, ifidx)));
+		dev_kfree_skb_any(skb);
+		ret = -ENOMEM;
+		goto done;
+	}
+#ifdef WLMEDIA_HTSF
+	if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+		struct ether_header *eh = (struct ether_header *)pktdata;
+
+		if (!ETHER_ISMULTI(eh->ether_dhost) &&
+			(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+			eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+		}
+	}
+#endif
+#ifdef DHD_WMF
+	eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
+	iph = (uint8 *)eh + ETHER_HDR_LEN;
+
+	/* WMF processing for multicast packets
+	 * Only IPv4 packets are handled
+	 */
+	if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
+		(IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
+		((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+		void *sdu_clone;
+		bool ucast_convert = FALSE;
+#ifdef DHD_UCAST_UPNP
+		uint32 dest_ip;
+
+		dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+		ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
+#endif /* DHD_UCAST_UPNP */
+#ifdef DHD_IGMP_UCQUERY
+		ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
+			(IPV4_PROT(iph) == IP_PROT_IGMP) &&
+			(*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
+#endif /* DHD_IGMP_UCQUERY */
+		if (ucast_convert) {
+			dhd_sta_t *sta;
+			unsigned long flags;
+
+			DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+			/* Convert upnp/igmp query to unicast for each assoc STA */
+			list_for_each_entry(sta, &ifp->sta_list, list) {
+				if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
+					DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+					DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+					DHD_OS_WAKE_UNLOCK(&dhd->pub);
+					return (WMF_NOP);
+				}
+				dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
+			}
+
+			DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+			DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+			PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+			return NETDEV_TX_OK;
+		} else
+#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
+		{
+			/* There will be no STA info if the packet is coming from LAN host
+			 * Pass as NULL
+			 */
+			ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
+			switch (ret) {
+			case WMF_TAKEN:
+			case WMF_DROP:
+				/* Either taken by WMF or we should drop it.
+				 * Exiting send path
+				 */
+				DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+				return NETDEV_TX_OK;
+			default:
+				/* Continue the transmit path */
+				break;
+			}
+		}
+	}
+#endif /* DHD_WMF */
+
+	ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+done:
+	if (ret) {
+		ifp->stats.tx_dropped++;
+		dhd->pub.tx_dropped++;
+	}
+	else {
+		dhd->pub.tx_packets++;
+		ifp->stats.tx_packets++;
+		ifp->stats.tx_bytes += datalen;
+	}
+
+	DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	/* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+	return 0;
+#else
+	return NETDEV_TX_OK;
+#endif
+}
+
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+	struct net_device *net;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(dhd);
+
+	if (ifidx == ALL_INTERFACES) {
+		/* Flow control on all active interfaces */
+		dhdp->txoff = state;
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				net = dhd->iflist[i]->net;
+				if (state == ON)
+					netif_stop_queue(net);
+				else
+					netif_wake_queue(net);
+			}
+		}
+	}
+	else {
+		if (dhd->iflist[ifidx]) {
+			net = dhd->iflist[ifidx]->net;
+			if (state == ON)
+				netif_stop_queue(net);
+			else
+				netif_wake_queue(net);
+		}
+	}
+}
+
+#ifdef DHD_RX_DUMP
+typedef struct {
+	uint16 type;
+	const char *str;
+} PKTTYPE_INFO;
+
+static const PKTTYPE_INFO packet_type_info[] =
+{
+	{ ETHER_TYPE_IP, "IP" },
+	{ ETHER_TYPE_ARP, "ARP" },
+	{ ETHER_TYPE_BRCM, "BRCM" },
+	{ ETHER_TYPE_802_1X, "802.1X" },
+	{ ETHER_TYPE_WAI, "WAPI" },
+	{ 0, ""}
+};
+
+static const char *_get_packet_type_str(uint16 type)
+{
+	int i;
+	int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
+
+	for (i = 0; i < n; i++) {
+		if (packet_type_info[i].type == type)
+			return packet_type_info[i].str;
+	}
+
+	return packet_type_info[n].str;
+}
+#endif /* DHD_RX_DUMP */
+
+
+#ifdef DHD_WMF
+bool
+dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = dhdp->info;
+
+	return dhd->rxthread_enabled;
+}
+#endif /* DHD_WMF */
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	uchar *eth;
+	uint len;
+	void *data, *pnext = NULL;
+	int i;
+	dhd_if_t *ifp;
+	wl_event_msg_t event;
+	int tout_rx = 0;
+	int tout_ctrl = 0;
+	void *skbhead = NULL;
+	void *skbprev = NULL;
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+	char *dump_data;
+	uint16 protocol;
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+		struct ether_header *eh;
+
+		pnext = PKTNEXT(dhdp->osh, pktbuf);
+		PKTSETNEXT(dhdp->osh, pktbuf, NULL);
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+				__FUNCTION__));
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+		eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+
+		/* Dropping only data packets before registering net device to avoid kernel panic */
+#ifndef PROP_TXSTATUS_VSDB
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+#else
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+#endif /* PROP_TXSTATUS_VSDB */
+			DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+			__FUNCTION__));
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+
+#ifdef PROP_TXSTATUS
+		if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
+			/* WLFC may send header only packet when
+			there is an urgent message but no packet to
+			piggy-back on
+			*/
+			PKTCFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+#endif
+#ifdef DHD_L2_FILTER
+		/* If block_ping is enabled drop the ping packet */
+		if (dhdp->block_ping) {
+			if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
+				PKTFREE(dhdp->osh, pktbuf, FALSE);
+				continue;
+			}
+		}
+#endif
+#ifdef DHD_WMF
+		/* WMF processing for multicast packets */
+		if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
+			dhd_sta_t *sta;
+			int ret;
+
+			sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
+			ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
+			switch (ret) {
+				case WMF_TAKEN:
+					/* The packet is taken by WMF. Continue to next iteration */
+					continue;
+				case WMF_DROP:
+					/* Packet DROP decision by WMF. Toss it */
+					DHD_ERROR(("%s: WMF decides to drop packet\n",
+						__FUNCTION__));
+					PKTCFREE(dhdp->osh, pktbuf, FALSE);
+					continue;
+				default:
+					/* Continue the transmit path */
+					break;
+			}
+		}
+#endif /* DHD_WMF */
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
+		skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+
+#ifdef PCIE_FULL_DONGLE
+		if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+			(!ifp->ap_isolate)) {
+			eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+			if (ETHER_ISUCAST(eh->ether_dhost)) {
+				if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+						dhd_sendpkt(dhdp, ifidx, pktbuf);
+					continue;
+				}
+			} else {
+				void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
+				dhd_sendpkt(dhdp, ifidx, npktbuf);
+			}
+		}
+#endif /* PCIE_FULL_DONGLE */
+
+		/* Get the protocol, maintain skb around eth_type_trans()
+		 * The main reason for this hack is for the limitation of
+		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+		 * coping of the packet coming from the network stack to add
+		 * BDC, Hardware header etc, during network interface registration
+		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+		 * for BDC, Hardware header etc. and not just the ETH_HLEN
+		 */
+		eth = skb->data;
+		len = skb->len;
+
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
+		dump_data = skb->data;
+		protocol = (dump_data[12] << 8) | dump_data[13];
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
+#ifdef DHD_8021X_DUMP
+		if (protocol == ETHER_TYPE_802_1X) {
+			DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
+				"ver %d, type %d, replay %d\n",
+				dump_data[14], dump_data[15],
+				dump_data[30]));
+		}
+#endif /* DHD_8021X_DUMP */
+#ifdef DHD_DHCP_DUMP
+		if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
+			uint16 dump_hex;
+			uint16 source_port;
+			uint16 dest_port;
+			uint16 udp_port_pos;
+			uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
+			uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
+
+			udp_port_pos = ETHER_HDR_LEN + ip_header_len;
+			source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
+			dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
+			if (source_port == 0x0044 || dest_port == 0x0044) {
+				dump_hex = (dump_data[udp_port_pos+249] << 8) |
+					dump_data[udp_port_pos+250];
+				if (dump_hex == 0x0101) {
+					DHD_ERROR(("DHCP - DISCOVER [RX]\n"));
+				} else if (dump_hex == 0x0102) {
+					DHD_ERROR(("DHCP - OFFER [RX]\n"));
+				} else if (dump_hex == 0x0103) {
+					DHD_ERROR(("DHCP - REQUEST [RX]\n"));
+				} else if (dump_hex == 0x0105) {
+					DHD_ERROR(("DHCP - ACK [RX]\n"));
+				} else {
+					DHD_ERROR(("DHCP - 0x%X [RX]\n", dump_hex));
+				}
+			} else if (source_port == 0x0043 || dest_port == 0x0043) {
+				DHD_ERROR(("DHCP - BOOTP [RX]\n"));
+			}
+		}
+#endif /* DHD_DHCP_DUMP */
+#if defined(DHD_RX_DUMP)
+		DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
+		if (protocol != ETHER_TYPE_BRCM) {
+			if (dump_data[0] == 0xFF) {
+				DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
+
+				if ((dump_data[12] == 8) &&
+					(dump_data[13] == 6)) {
+					DHD_ERROR(("%s: ARP %d\n",
+						__FUNCTION__, dump_data[0x15]));
+				}
+			} else if (dump_data[0] & 1) {
+				DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(dump_data)));
+			}
+#ifdef DHD_RX_FULL_DUMP
+			{
+				int k;
+				for (k = 0; k < skb->len; k++) {
+					DHD_ERROR(("%02X ", dump_data[k]));
+					if ((k & 15) == 15)
+						DHD_ERROR(("\n"));
+				}
+				DHD_ERROR(("\n"));
+			}
+#endif /* DHD_RX_FULL_DUMP */
+		}
+#endif /* DHD_RX_DUMP */
+
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			dhd->pub.rx_multicast++;
+			ifp->stats.multicast++;
+		}
+
+		skb->data = eth;
+		skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+		dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Process special event packets and then discard them */
+		memset(&event, 0, sizeof(event));
+		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+			dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+			skb_mac_header(skb),
+#else
+			skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+			len - 2,
+			&event,
+			&data);
+
+			wl_event_to_host_order(&event);
+			if (!tout_ctrl)
+				tout_ctrl = DHD_PACKET_TIMEOUT_MS;
+
+#if defined(PNO_SUPPORT)
+			if (event.event_type == WLC_E_PFN_NET_FOUND) {
+				/* enforce custom wake lock to garantee that Kernel not suspended */
+				tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+			}
+#endif /* PNO_SUPPORT */
+
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+		} else {
+			if (skb->dev->ieee80211_ptr && skb->dev->ieee80211_ptr->ps == false)
+				tout_rx = CUSTOM_DHCP_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+			else
+				tout_rx = DHD_PACKET_TIMEOUT_MS;
+
+#ifdef PROP_TXSTATUS
+			dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
+#endif /* PROP_TXSTATUS */
+		}
+
+		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+		ifp = dhd->iflist[ifidx];
+
+		if (ifp->net)
+			ifp->net->last_rx = jiffies;
+
+		if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+			dhdp->dstats.rx_bytes += skb->len;
+			dhdp->rx_packets++; /* Local count */
+			ifp->stats.rx_bytes += skb->len;
+			ifp->stats.rx_packets++;
+		}
+
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			if (dhd->rxthread_enabled) {
+				if (!skbhead)
+					skbhead = skb;
+				else
+					PKTSETNEXT(dhdp->osh, skbprev, skb);
+				skbprev = skb;
+			} else {
+
+				/* If the receive is not processed inside an ISR,
+				 * the softirqd must be woken explicitly to service
+				 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
+				 * by netif_rx_ni(), but in earlier kernels, we need
+				 * to do it manually.
+				 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				ulong flags;
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+			}
+		}
+	}
+
+	if (dhd->rxthread_enabled && skbhead)
+		dhd_sched_rxf(dhdp, skbhead);
+
+	DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
+	DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+	/* Linux version has nothing to do */
+	return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh;
+	uint16 type;
+
+	dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+	type  = ntoh16(eh->ether_type);
+
+	if (type == ETHER_TYPE_802_1X)
+		atomic_dec(&dhd->pend_8021x_cnt);
+
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_if_t *ifp;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+
+		memset(&net->stats, 0, sizeof(net->stats));
+		return &net->stats;
+	}
+
+	ifp = dhd->iflist[ifidx];
+	ASSERT(dhd && ifp);
+
+	if (dhd->pub.up) {
+		/* Use the protocol to get dongle stats */
+		dhd_prot_dstats(&dhd->pub);
+	}
+	return &ifp->stats;
+}
+
+static int
+dhd_watchdog_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_watchdog_prio > 0) {
+		struct sched_param param;
+		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+			dhd_watchdog_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	while (1)
+		if (down_interruptible (&tsk->sema) == 0) {
+			unsigned long flags;
+			unsigned long jiffies_at_start = jiffies;
+			unsigned long time_lapse;
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+
+				/* Call the bus module watchdog */
+				dhd_bus_watchdog(&dhd->pub);
+
+
+				DHD_GENERAL_LOCK(&dhd->pub, flags);
+				/* Count the tick for reference */
+				dhd->pub.tickcnt++;
+				time_lapse = jiffies - jiffies_at_start;
+
+				/* Reschedule the watchdog */
+				if (dhd->wd_timer_valid)
+					mod_timer(&dhd->timer,
+					    jiffies +
+					    msecs_to_jiffies(dhd_watchdog_ms) -
+					    min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+					DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+				}
+		} else {
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_watchdog(ulong data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+	unsigned long flags;
+
+	if (dhd->pub.dongle_reset) {
+		return;
+	}
+
+	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+		up(&dhd->thr_wdt_ctl.sema);
+		return;
+	}
+
+	/* Call the bus module watchdog */
+	dhd_bus_watchdog(&dhd->pub);
+
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	/* Count the tick for reference */
+	dhd->pub.tickcnt++;
+
+	/* Reschedule the watchdog */
+	if (dhd->wd_timer_valid)
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+}
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+static void
+dhd_sched_policy(int prio)
+{
+	struct sched_param param;
+	if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+		param.sched_priority = 0;
+		setScheduler(current, SCHED_NORMAL, &param);
+	} else {
+		if (get_scheduler_policy(current) != SCHED_FIFO) {
+			param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+			setScheduler(current, SCHED_FIFO, &param);
+		}
+	}
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+	dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+	struct cpufreq_freqs *freq = data;
+	if (dhd) {
+		if (!dhd->new_freq)
+			goto exit;
+		if (val == CPUFREQ_POSTCHANGE) {
+			DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+				freq->new, freq->cpu));
+			*per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+		}
+	}
+exit:
+	return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
+static int
+dhd_dpc_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_dpc_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+#ifdef CUSTOM_DPC_CPUCORE
+	set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
+
+	/* Run until signal received */
+	while (1) {
+		if (!binary_sema_down(tsk)) {
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			/* Call bus dpc unless it indicated down (then clean stop) */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				dhd_os_wd_timer_extend(&dhd->pub, TRUE);
+				while (dhd_bus_dpc(dhd->pub.bus)) {
+					/* process all data */
+				}
+				dhd_os_wd_timer_extend(&dhd->pub, FALSE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+			} else {
+				if (dhd->pub.up)
+					dhd_bus_stop(dhd->pub.bus, TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+			}
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_rxf_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
+	ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
+	dhd_pub_t *pub = &dhd->pub;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_rxf_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	DAEMONIZE("dhd_rxf");
+	/* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
+
+	/*  signal: thread has started */
+	complete(&tsk->completed);
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
+
+	/* Run until signal received */
+	while (1) {
+		if (down_interruptible(&tsk->sema) == 0) {
+			void *skb;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+			ulong flags;
+#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+			SMP_RD_BARRIER_DEPENDS();
+
+			if (tsk->terminated) {
+				break;
+			}
+			skb = dhd_rxf_dequeue(pub);
+
+			if (skb == NULL) {
+				continue;
+			}
+			while (skb) {
+				void *skbnext = PKTNEXT(pub->osh, skb);
+				PKTSETNEXT(pub->osh, skb, NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+
+#endif
+				skb = skbnext;
+			}
+#if defined(WAIT_DEQUEUE)
+			if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+				OSL_SLEEP(1);
+				watchdogTime = OSL_SYSUPTIME();
+			}
+#endif
+
+			DHD_OS_WAKE_UNLOCK(pub);
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+#ifdef BCMPCIE
+void dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp)
+		return;
+
+	dhd = dhdp->info;
+
+	if(!dhd)
+		return;
+
+	tasklet_kill(&dhd->tasklet);
+	DHD_ERROR(("%s: tasklet disabled\n",__FUNCTION__));
+}
+#endif
+
+static int isresched = 0;
+
+static void
+dhd_dpc(ulong data)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)data;
+
+	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+	 * down below , wake lock is set,
+	 * the tasklet is initialized in dhd_attach()
+	 */
+	/* Call bus dpc unless it indicated down (then clean stop) */
+	if (dhd->pub.busstate != DHD_BUS_DOWN) {
+		isresched = dhd_bus_dpc(dhd->pub.bus);
+		if (isresched)
+			tasklet_schedule(&dhd->tasklet);
+		else
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	} else {
+		dhd_bus_stop(dhd->pub.bus, TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	}
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+		/* If the semaphore does not get up,
+		* wake unlock should be done here
+		*/
+		DHD_OS_WAKE_LOCK(dhdp);
+		if (!binary_sema_up(&dhd->thr_dpc_ctl))
+			DHD_OS_WAKE_UNLOCK(dhdp);
+		return;
+	} else {
+		if (!test_bit(TASKLET_STATE_SCHED, &dhd->tasklet.state) && !isresched) {
+			DHD_OS_WAKE_LOCK(dhdp);
+			tasklet_schedule(&dhd->tasklet);
+		}
+	}
+}
+
+static void
+dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+	int ret = BCME_OK;
+	int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+
+	DHD_OS_WAKE_LOCK(dhdp);
+
+	DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+	do {
+		ret = dhd_rxf_enqueue(dhdp, skb);
+		if (ret == BCME_OK || ret == BCME_ERROR)
+			break;
+		else
+			OSL_SLEEP(50); /* waiting for dequeueing */
+	} while (retry-- > 0);
+
+	if (retry <= 0 && ret == BCME_BUSY) {
+		void *skbp = skb;
+
+		while (skbp) {
+			void *skbnext = PKTNEXT(dhdp->osh, skbp);
+			PKTSETNEXT(dhdp->osh, skbp, NULL);
+			netif_rx_ni(skbp);
+			skbp = skbnext;
+		}
+		DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
+	}
+	else {
+		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+			up(&dhd->thr_rxf_ctl.sema);
+		}
+	}
+#else /* RXF_DEQUEUE_ON_BUSY */
+	do {
+		if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
+			break;
+	} while (1);
+	if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+		up(&dhd->thr_rxf_ctl.sema);
+	}
+	return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strncpy(buf, "toe_ol", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		/* Check for older dongle image that doesn't support toe_ol */
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: toe not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+
+		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	memcpy(toe_ol, buf, sizeof(uint32));
+	return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int toe, ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = TRUE;
+
+	/* Set toe_ol as requested */
+
+	strncpy(buf, "toe_ol", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+			dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	/* Enable toe globally only if any components are enabled. */
+
+	toe = (toe_ol != 0);
+
+	strcpy(buf, "toe");
+	memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+
+	snprintf(info->driver, sizeof(info->driver), "wl");
+	snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+	.get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+	struct ethtool_drvinfo info;
+	char drvname[sizeof(info.driver)];
+	uint32 cmd;
+#ifdef TOE
+	struct ethtool_value edata;
+	uint32 toe_cmpnt, csum_dir;
+	int ret;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* all ethtool calls start with a cmd word */
+	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GDRVINFO:
+		/* Copy out any request driver name */
+		if (copy_from_user(&info, uaddr, sizeof(info)))
+			return -EFAULT;
+		strncpy(drvname, info.driver, sizeof(info.driver));
+		drvname[sizeof(info.driver)-1] = '\0';
+
+		/* clear struct for return */
+		memset(&info, 0, sizeof(info));
+		info.cmd = cmd;
+
+		/* if dhd requested, identify ourselves */
+		if (strcmp(drvname, "?dhd") == 0) {
+			snprintf(info.driver, sizeof(info.driver), "dhd");
+			strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
+			info.version[sizeof(info.version) - 1] = '\0';
+		}
+
+		/* otherwise, require dongle to be up */
+		else if (!dhd->pub.up) {
+			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+			return -ENODEV;
+		}
+
+		/* finally, report dongle driver type */
+		else if (dhd->pub.iswl)
+			snprintf(info.driver, sizeof(info.driver), "wl");
+		else
+			snprintf(info.driver, sizeof(info.driver), "xx");
+
+		snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
+		if (copy_to_user(uaddr, &info, sizeof(info)))
+			return -EFAULT;
+		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+		         (int)sizeof(drvname), drvname, info.driver));
+		break;
+
+#ifdef TOE
+	/* Get toe offload components from dongle */
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GTXCSUM:
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		edata.cmd = cmd;
+		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+		if (copy_to_user(uaddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+
+	/* Set toe offload components in dongle */
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_STXCSUM:
+		if (copy_from_user(&edata, uaddr, sizeof(edata)))
+			return -EFAULT;
+
+		/* Read the current settings, update and write back */
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		if (edata.data != 0)
+			toe_cmpnt |= csum_dir;
+		else
+			toe_cmpnt &= ~csum_dir;
+
+		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+			return ret;
+
+		/* If setting TX checksum mode, tell Linux the new mode */
+		if (cmd == ETHTOOL_STXCSUM) {
+			if (edata.data)
+				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+			else
+				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+
+		break;
+#endif /* TOE */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	if (!dhdp->up)
+		return FALSE;
+
+	dhd = (dhd_info_t *)dhdp->info;
+#if !defined(BCMPCIE)
+	if (dhd->thr_dpc_ctl.thr_pid < 0) {
+		DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
+		return FALSE;
+	}
+#endif
+
+	if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+		((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+		net_os_send_hang_message(net);
+		return TRUE;
+	}
+	return FALSE;
+}
+
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+	int bcmerror = BCME_OK;
+	int buflen = 0;
+	struct net_device *net;
+
+	net = dhd_idx2net(pub, ifidx);
+	if (!net) {
+		bcmerror = BCME_BADARG;
+		goto done;
+	}
+
+	if (data_buf)
+		buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
+
+	/* check for local dhd ioctl and handle it */
+	if (ioc->driver == DHD_IOCTL_MAGIC) {
+		bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
+		if (bcmerror)
+			pub->bcmerror = bcmerror;
+		goto done;
+	}
+
+	/* send to dongle (must be up, and wl). */
+	if (pub->busstate != DHD_BUS_DATA) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	if (!pub->iswl) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	/*
+	 * Flush the TX queue if required for proper message serialization:
+	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+	 * prevent M4 encryption and
+	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+	 * prevent disassoc frame being sent before WPS-DONE frame.
+	 */
+	if (ioc->cmd == WLC_SET_KEY ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("wsec_key", data_buf, 9) == 0) ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
+	    ioc->cmd == WLC_DISASSOC)
+		dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+	if (data_buf) {
+		/*  short cut wl ioctl calls here  */
+		if (strcmp("htsf", data_buf) == 0) {
+			dhd_ioctl_htsf_get(dhd, 0);
+			return BCME_OK;
+		}
+
+		if (strcmp("htsflate", data_buf) == 0) {
+			if (ioc->set) {
+				memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+				memset(&maxdelayts, 0, sizeof(tstamp_t));
+				maxdelay = 0;
+				tspktcnt = 0;
+				maxdelaypktno = 0;
+				memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			} else {
+				dhd_dump_latency();
+			}
+			return BCME_OK;
+		}
+		if (strcmp("htsfclear", data_buf) == 0) {
+			memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			htsf_seqnum = 0;
+			return BCME_OK;
+		}
+		if (strcmp("htsfhis", data_buf) == 0) {
+			dhd_dump_htsfhisto(&vi_d1, "H to D");
+			dhd_dump_htsfhisto(&vi_d2, "D to D");
+			dhd_dump_htsfhisto(&vi_d3, "D to H");
+			dhd_dump_htsfhisto(&vi_d4, "H to H");
+			return BCME_OK;
+		}
+		if (strcmp("tsport", data_buf) == 0) {
+			if (ioc->set) {
+				memcpy(&tsport, data_buf + 7, 4);
+			} else {
+				DHD_ERROR(("current timestamp port: %d \n", tsport));
+			}
+			return BCME_OK;
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+
+	if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
+		data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+		bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+#else
+		bcmerror = BCME_UNSUPPORTED;
+#endif
+		goto done;
+	}
+	bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+
+done:
+	dhd_check_hang(net, pub, bcmerror);
+
+	return bcmerror;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	dhd_ioctl_t ioc;
+	int bcmerror = 0;
+	int ifidx;
+	int ret;
+	void *local_buf = NULL;
+	u16 buflen = 0;
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	/* Interface up check for built-in type */
+	if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
+		DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return BCME_NOTUP;
+	}
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->pub.hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -1;
+	}
+
+#if defined(WL_WIRELESS_EXT)
+	/* linux wireless extensions */
+	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+		/* may recurse, do NOT lock */
+		ret = wl_iw_ioctl(net, ifr, cmd);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+	if (cmd == SIOCETHTOOL) {
+		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(net, ifr, cmd);
+		dhd_check_hang(net, &dhd->pub, ret);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+
+	if (cmd != SIOCDEVPRIVATE) {
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -EOPNOTSUPP;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		compat_wl_ioctl_t compat_ioc;
+		if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		ioc.cmd = compat_ioc.cmd;
+		ioc.buf = compat_ptr(compat_ioc.buf);
+		ioc.len = compat_ioc.len;
+		ioc.set = compat_ioc.set;
+		ioc.used = compat_ioc.used;
+		ioc.needed = compat_ioc.needed;
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		/* Copy the ioc control structure part of ioctl request */
+		if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	}
+
+	if (!capable(CAP_NET_ADMIN)) {
+		bcmerror = BCME_EPERM;
+		goto done;
+	}
+
+	if (ioc.len > 0) {
+		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+		if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		if (copy_from_user(local_buf, ioc.buf, buflen)) {
+			DHD_PERIM_LOCK(&dhd->pub);
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		DHD_PERIM_LOCK(&dhd->pub);
+
+		*(char *)(local_buf + buflen) = '\0';
+	}
+
+	bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+
+	if (!bcmerror && buflen && local_buf && ioc.buf) {
+		DHD_PERIM_UNLOCK(&dhd->pub);
+		if (copy_to_user(ioc.buf, local_buf, buflen))
+			bcmerror = -EFAULT;
+		DHD_PERIM_LOCK(&dhd->pub);
+	}
+
+done:
+	if (local_buf)
+		MFREE(dhd->pub.osh, local_buf, buflen+1);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return OSL_ERROR(bcmerror);
+}
+
+
+
+static int
+dhd_stop(struct net_device *net)
+{
+	int ifidx = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+	DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
+	if (dhd->pub.up == 0) {
+		goto exit;
+	}
+
+	dhd_if_flush_sta(DHD_DEV_IFP(net));
+
+
+	ifidx = dhd_net2idx(dhd, net);
+	BCM_REFERENCE(ifidx);
+
+	/* Set state and stop OS transmissions */
+	netif_stop_queue(net);
+	dhd->pub.up = 0;
+
+#ifdef WL_CFG80211
+	if (ifidx == 0) {
+		wl_cfg80211_down(NULL);
+
+		/*
+		 * For CFG80211: Clean up all the left over virtual interfaces
+		 * when the primary Interface is brought down. [ifconfig wlan0 down]
+		 */
+		if (!dhd_download_fw_on_driverload) {
+			if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+				(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+				int i;
+				dhd_if_t *ifp;
+
+				dhd_net_if_lock_local(dhd);
+				for (i = 1; i < DHD_MAX_IFS; i++)
+					dhd_remove_if(&dhd->pub, i, FALSE);
+
+				/* remove sta list for primary interface */
+				ifp = dhd->iflist[0];
+				if (ifp && ifp->net) {
+					dhd_if_del_sta_list(ifp);
+				}
+#ifdef PCIE_FULL_DONGLE
+				/* Initialize STA info list */
+				INIT_LIST_HEAD(&ifp->sta_list);
+#endif
+				dhd_net_if_unlock_local(dhd);
+			}
+		}
+	}
+#endif /* WL_CFG80211 */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
+#endif
+	/* Stop the protocol module */
+	dhd_prot_stop(&dhd->pub);
+
+	OLD_MOD_DEC_USE_COUNT;
+exit:
+#if defined(WL_CFG80211)
+	if (ifidx == 0 && !dhd_download_fw_on_driverload)
+		wl_android_wifi_off(net, TRUE);
+#endif
+	dhd->pub.rxcnt_timeout = 0;
+	dhd->pub.txcnt_timeout = 0;
+
+	dhd->pub.hang_was_sent = 0;
+
+	/* Clear country spec for for built-in type driver */
+#ifndef CUSTOM_COUNTRY_CODE
+	if (!dhd_download_fw_on_driverload) {
+		dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
+		dhd->pub.dhd_cspec.rev = 0;
+		dhd->pub.dhd_cspec.ccode[0] = 0x00;
+	}
+#endif
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	return 0;
+}
+
+#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
+	defined(USE_INITIAL_SHORT_DWELL_TIME))
+extern bool g_first_broadcast_scan;
+#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
+
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	uint32 enable = true;
+	int ret = BCME_OK;
+
+	bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+	}
+
+	if (ret == BCME_OK) {
+		/* basic capabilities for HS20 REL2 */
+		uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
+		bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
+		}
+	}
+
+	return ret;
+}
+#endif /* WL11u */
+
+static int
+dhd_open(struct net_device *net)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(net);
+#ifdef TOE
+	uint32 toe_ol;
+#endif
+	int ifidx;
+	int32 ret = 0;
+
+
+
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+	dhd->pub.dongle_trap_occured = 0;
+	dhd->pub.hang_was_sent = 0;
+
+#if !defined(WL_CFG80211)
+	/*
+	 * Force start if ifconfig_up gets called before START command
+	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
+	 *  This should be removed in the future
+	 */
+	ret = wl_control_wl_start(net);
+	if (ret != 0) {
+		DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+		ret = -1;
+		goto exit;
+	}
+
+#endif
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if (ifidx < 0) {
+		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (!dhd->iflist[ifidx]) {
+		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (ifidx == 0) {
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+		if (!dhd_download_fw_on_driverload) {
+			DHD_ERROR(("\n%s\n", dhd_version));
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+			g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+			ret = wl_android_wifi_on(net);
+			if (ret != 0) {
+				DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
+					__FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+		}
+#endif
+
+		if (dhd->pub.busstate != DHD_BUS_DATA) {
+
+			/* try to bring up bus */
+			DHD_PERIM_UNLOCK(&dhd->pub);
+			ret = dhd_bus_start(&dhd->pub);
+			DHD_PERIM_LOCK(&dhd->pub);
+			if (ret) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+
+		}
+
+		/* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
+		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+		/* Get current TOE mode from dongle */
+		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+		else
+			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+		if (unlikely(wl_cfg80211_up(NULL))) {
+			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+			ret = -1;
+			goto exit;
+		}
+#endif /* WL_CFG80211 */
+	}
+
+	/* Allow transmit calls */
+	netif_start_queue(net);
+	dhd->pub.up = 1;
+
+#ifdef BCMDBGFS
+	dhd_dbg_init(&dhd->pub);
+#endif
+
+	OLD_MOD_INC_USE_COUNT;
+exit:
+	if (ret)
+		dhd_stop(net);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+
+	return ret;
+}
+
+int dhd_do_driver_init(struct net_device *net)
+{
+	dhd_info_t *dhd = NULL;
+
+	if (!net) {
+		DHD_ERROR(("Primary Interface not initialized \n"));
+		return -EINVAL;
+	}
+
+
+	/*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
+	dhd = DHD_DEV_INFO(net);
+
+	/* If driver is already initialized, do nothing
+	 */
+	if (dhd->pub.busstate == DHD_BUS_DATA) {
+		DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+		return 0;
+	}
+
+	if (dhd_open(net) < 0) {
+		DHD_ERROR(("Driver Init Failed \n"));
+		return -1;
+	}
+
+	return 0;
+}
+
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else. This has to be done asynchronously otherwise
+	 * DPC will be blocked (and iovars will timeout as DPC has no chance
+	 * to read the response back)
+	 */
+	if (ifevent->ifidx > 0) {
+		dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+
+		memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+		memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+		strncpy(if_event->name, name, IFNAMSIZ);
+		if_event->name[IFNAMSIZ - 1] = '\0';
+		dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
+			DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
+	}
+
+	return BCME_OK;
+}
+
+int
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+	dhd_if_event_t *if_event;
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif /* WL_CFG80211 */
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else
+	 */
+	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+	strncpy(if_event->name, name, IFNAMSIZ);
+	if_event->name[IFNAMSIZ - 1] = '\0';
+	dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
+		dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
+
+	return BCME_OK;
+}
+
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
+	ifp = dhdinfo->iflist[ifidx];
+
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
+
+			dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+	} else {
+		ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+			return NULL;
+		}
+	}
+
+	memset(ifp, 0, sizeof(dhd_if_t));
+	ifp->info = dhdinfo;
+	ifp->idx = ifidx;
+	ifp->bssidx = bssidx;
+	if (mac != NULL)
+		memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+
+	/* Allocate etherdev, including space for private structure */
+	ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+	if (ifp->net == NULL) {
+		DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+		goto fail;
+	}
+
+	/* Setup the dhd interface's netdevice private structure. */
+	dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+
+	if (name && name[0]) {
+		strncpy(ifp->net->name, name, IFNAMSIZ);
+		ifp->net->name[IFNAMSIZ - 1] = '\0';
+	}
+#ifdef WL_CFG80211
+	if (ifidx == 0)
+		ifp->net->destructor = free_netdev;
+	else
+		ifp->net->destructor = dhd_netdev_free;
+#else
+	ifp->net->destructor = free_netdev;
+#endif /* WL_CFG80211 */
+	strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
+	ifp->name[IFNAMSIZ - 1] = '\0';
+	dhdinfo->iflist[ifidx] = ifp;
+
+#ifdef PCIE_FULL_DONGLE
+	/* Initialize STA info list */
+	INIT_LIST_HEAD(&ifp->sta_list);
+	DHD_IF_STA_LIST_LOCK_INIT(ifp);
+#endif /* PCIE_FULL_DONGLE */
+
+	return ifp->net;
+
+fail:
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			dhd_dev_priv_clear(ifp->net);
+			free_netdev(ifp->net);
+			ifp->net = NULL;
+		}
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+		ifp = NULL;
+	}
+	dhdinfo->iflist[ifidx] = NULL;
+	return NULL;
+}
+
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ifp = dhdinfo->iflist[ifidx];
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+
+
+
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+#ifdef DHD_WMF
+		dhd_wmf_cleanup(dhdpub, ifidx);
+#endif /* DHD_WMF */
+
+		dhd_if_del_sta_list(ifp);
+
+		dhdinfo->iflist[ifidx] = NULL;
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+
+	}
+
+	return BCME_OK;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+	.ndo_open = dhd_open,
+	.ndo_stop = dhd_stop,
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+#ifdef DEBUGGER
+extern void debugger_init(void *bus_handle);
+#endif
+
+
+#ifdef SHOW_LOGTRACE
+static char *logstrs_path = "/root/logstrs.bin";
+module_param(logstrs_path, charp, S_IRUGO);
+
+int
+dhd_init_logstrs_array(dhd_event_log_t *temp)
+{
+	struct file *filep = NULL;
+	struct kstat stat;
+	mm_segment_t fs;
+	char *raw_fmts =  NULL;
+	int logstrs_size = 0;
+
+	logstr_header_t *hdr = NULL;
+	uint32 *lognums = NULL;
+	char *logstrs = NULL;
+	int ram_index = 0;
+	char **fmts;
+	int num_fmts = 0;
+	uint32 i = 0;
+	int error = 0;
+	set_fs(KERNEL_DS);
+	fs = get_fs();
+	filep = filp_open(logstrs_path, O_RDONLY, 0);
+	if (IS_ERR(filep)) {
+		DHD_ERROR(("Failed to open the file logstrs.bin in %s",  __FUNCTION__));
+		goto fail;
+	}
+	error = vfs_stat(logstrs_path, &stat);
+	if (error) {
+		DHD_ERROR(("Failed in %s to find file stat", __FUNCTION__));
+		goto fail;
+	}
+	logstrs_size = (int) stat.size;
+
+	raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
+	if (raw_fmts == NULL) {
+		DHD_ERROR(("Failed to allocate raw_fmts memory"));
+		goto fail;
+	}
+	if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) !=	logstrs_size) {
+		DHD_ERROR(("Error: Log strings file read failed"));
+		goto fail;
+	}
+
+	/* Remember header from the logstrs.bin file */
+	hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
+		sizeof(logstr_header_t));
+
+	if (hdr->log_magic == LOGSTRS_MAGIC) {
+		/*
+		* logstrs.bin start with header.
+		*/
+		num_fmts =	hdr->rom_logstrs_offset / sizeof(uint32);
+		ram_index = (hdr->ram_lognums_offset -
+			hdr->rom_lognums_offset) / sizeof(uint32);
+		lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
+		logstrs = (char *)	 &raw_fmts[hdr->rom_logstrs_offset];
+	} else {
+		/*
+		 * Legacy logstrs.bin format without header.
+		 */
+		num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
+		if (num_fmts == 0) {
+			/* Legacy ROM/RAM logstrs.bin format:
+			  *  - ROM 'lognums' section
+			  *   - RAM 'lognums' section
+			  *   - ROM 'logstrs' section.
+			  *   - RAM 'logstrs' section.
+			  *
+			  * 'lognums' is an array of indexes for the strings in the
+			  * 'logstrs' section. The first uint32 is 0 (index of first
+			  * string in ROM 'logstrs' section).
+			  *
+			  * The 4324b5 is the only ROM that uses this legacy format. Use the
+			  * fixed number of ROM fmtnums to find the start of the RAM
+			  * 'lognums' section. Use the fixed first ROM string ("Con\n") to
+			  * find the ROM 'logstrs' section.
+			  */
+			#define NUM_4324B5_ROM_FMTS	186
+			#define FIRST_4324B5_ROM_LOGSTR "Con\n"
+			ram_index = NUM_4324B5_ROM_FMTS;
+			lognums = (uint32 *) raw_fmts;
+			num_fmts =	ram_index;
+			logstrs = (char *) &raw_fmts[num_fmts << 2];
+			while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
+				num_fmts++;
+				logstrs = (char *) &raw_fmts[num_fmts << 2];
+			}
+		} else {
+				/* Legacy RAM-only logstrs.bin format:
+				 *	  - RAM 'lognums' section
+				 *	  - RAM 'logstrs' section.
+				 *
+				 * 'lognums' is an array of indexes for the strings in the
+				 * 'logstrs' section. The first uint32 is an index to the
+				 * start of 'logstrs'. Therefore, if this index is divided
+				 * by 'sizeof(uint32)' it provides the number of logstr
+				 *	entries.
+				 */
+				ram_index = 0;
+				lognums = (uint32 *) raw_fmts;
+				logstrs = (char *)	&raw_fmts[num_fmts << 2];
+			}
+	}
+	fmts = kmalloc(num_fmts  * sizeof(char *), GFP_KERNEL);
+	if (fmts == NULL) {
+		DHD_ERROR(("Failed to allocate fmts memory"));
+		goto fail;
+	}
+
+	for (i = 0; i < num_fmts; i++) {
+		/* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
+		* (they are 0-indexed relative to 'rom_logstrs_offset').
+		*
+		* RAM lognums are already indexed to point to the correct RAM logstrs (they
+		* are 0-indexed relative to the start of the logstrs.bin file).
+		*/
+		if (i == ram_index) {
+			logstrs = raw_fmts;
+		}
+		fmts[i] = &logstrs[lognums[i]];
+	}
+	temp->fmts = fmts;
+	temp->raw_fmts = raw_fmts;
+	temp->num_fmts = num_fmts;
+	filp_close(filep, NULL);
+	set_fs(fs);
+	return 0;
+fail:
+	if (raw_fmts) {
+		kfree(raw_fmts);
+		raw_fmts = NULL;
+	}
+	if (!IS_ERR(filep))
+		filp_close(filep, NULL);
+	set_fs(fs);
+	temp->fmts = NULL;
+	return -1;
+}
+#endif /* SHOW_LOGTRACE */
+
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+	dhd_info_t *dhd = NULL;
+	struct net_device *net = NULL;
+	char if_name[IFNAMSIZ] = {'\0'};
+	uint32 bus_type = -1;
+	uint32 bus_num = -1;
+	uint32 slot_num = -1;
+	wifi_adapter_info_t *adapter = NULL;
+
+	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* will implement get_ids for DBUS later */
+#if defined(BCMSDIO)
+	dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+#endif
+	adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+
+	/* Allocate primary dhd_info */
+	dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+	if (dhd == NULL) {
+		dhd = MALLOC(osh, sizeof(dhd_info_t));
+		if (dhd == NULL) {
+			DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+	memset(dhd, 0, sizeof(dhd_info_t));
+	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+	dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
+
+	dhd->pub.osh = osh;
+	dhd->adapter = adapter;
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+	dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+	dhd->pub.force_country_change = TRUE;
+#endif
+#ifdef CUSTOM_COUNTRY_CODE
+	get_customized_country_code(dhd->adapter,
+		dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
+		dhd->pub.dhd_cflags);
+#endif /* CUSTOM_COUNTRY_CODE */
+
+	dhd->pub.short_dwell_time = -1;
+
+	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+
+	/* Initialize thread based operation and lock */
+	sema_init(&dhd->sdsem, 1);
+
+	/* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+	 * This is indeed a hack but we have to make it work properly before we have a better
+	 * solution
+	 */
+	dhd_update_fw_nv_path(dhd);
+
+	/* Link to info module */
+	dhd->pub.info = dhd;
+
+
+	/* Link to bus module */
+	dhd->pub.bus = bus;
+	dhd->pub.hdrlen = bus_hdrlen;
+
+	/* Set network interface name if it was provided as module parameter */
+	if (iface_name[0]) {
+		int len;
+		char ch;
+		strncpy(if_name, iface_name, IFNAMSIZ);
+		if_name[IFNAMSIZ - 1] = 0;
+		len = strlen(if_name);
+		ch = if_name[len - 1];
+		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+			strcat(if_name, "%d");
+	}
+	net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
+	if (net == NULL)
+		goto fail;
+	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+
+	sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+	spin_lock_init(&dhd->wlfc_spinlock);
+
+	dhd->pub.skip_fc = dhd_wlfc_skip_fc;
+	dhd->pub.plat_init = dhd_wlfc_plat_init;
+	dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+#ifdef WLFC_STATE_PREALLOC
+	dhd->pub.wlfc_state = MALLOC(dhd->pub.osh, sizeof(athost_wl_status_info_t));
+	if (dhd->pub.wlfc_state == NULL)
+		DHD_ERROR(("%s: wlfc_state prealloc failed\n", __FUNCTION__));
+#endif /* WLFC_STATE_PREALLOC */
+#endif /* PROP_TXSTATUS */
+
+	/* Initialize other structure content */
+	init_waitqueue_head(&dhd->ioctl_resp_wait);
+	init_waitqueue_head(&dhd->d3ack_wait);
+	init_waitqueue_head(&dhd->ctrl_wait);
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&dhd->sdlock);
+	spin_lock_init(&dhd->txqlock);
+	spin_lock_init(&dhd->dhd_lock);
+	spin_lock_init(&dhd->rxf_lock);
+#if defined(RXFRAME_THREAD)
+	dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
+
+#ifdef DHDTCPACK_SUPPRESS
+	spin_lock_init(&dhd->tcpack_lock);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Initialize Wakelock stuff */
+	spin_lock_init(&dhd->wakelock_spinlock);
+	dhd->wakelock_counter = 0;
+	dhd->wakelock_wd_counter = 0;
+	dhd->wakelock_rx_timeout_enable = 0;
+	dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+	wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+	wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhd->dhd_net_if_mutex);
+	mutex_init(&dhd->dhd_suspend_mutex);
+#endif
+	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+	/* Attach and link in the protocol */
+	if (dhd_prot_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_prot_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+	/* Attach and link in the cfg80211 */
+	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+		DHD_ERROR(("wl_cfg80211_attach failed\n"));
+		goto fail;
+	}
+
+	dhd_monitor_init(&dhd->pub);
+	dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(WL_WIRELESS_EXT)
+	/* Attach and link in the iw */
+	if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
+		if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+		DHD_ERROR(("wl_iw_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef SHOW_LOGTRACE
+	dhd_init_logstrs_array(&dhd->event_data);
+#endif /* SHOW_LOGTRACE */
+
+	if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
+		DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
+		goto fail;
+	}
+
+
+	/* Set up the watchdog timer */
+	init_timer(&dhd->timer);
+	dhd->timer.data = (ulong)dhd;
+	dhd->timer.function = dhd_watchdog;
+	dhd->default_wd_interval = dhd_watchdog_ms;
+
+	if (dhd_watchdog_prio >= 0) {
+		/* Initialize watchdog thread */
+		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+
+	} else {
+		dhd->thr_wdt_ctl.thr_pid = -1;
+	}
+
+#ifdef DEBUGGER
+	debugger_init((void *) bus);
+#endif
+
+	/* Set up the bottom half handler */
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize DPC thread */
+		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+	} else {
+		/*  use tasklet for dpc */
+		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+		dhd->thr_dpc_ctl.thr_pid = -1;
+	}
+
+	if (dhd->rxthread_enabled) {
+		bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+		/* Initialize RXF thread */
+		PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+	}
+
+	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+#if defined(CONFIG_PM_SLEEP)
+	if (!dhd_pm_notifier_registered) {
+		dhd_pm_notifier_registered = TRUE;
+		register_pm_notifier(&dhd_pm_notifier);
+	}
+#endif /* CONFIG_PM_SLEEP */
+#ifdef SAR_SUPPORT
+	dhd->sar_notifier.notifier_call = dhd_sar_callback;
+	if (!dhd_sar_notifier_registered) {
+		dhd_sar_notifier_registered = TRUE;
+		dhd->sar_enable = 1;		/* unknown state value */
+		register_notifier_by_sar(&dhd->sar_notifier);
+	}
+#endif /* SAR_SUPPORT */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+	dhd->early_suspend.suspend = dhd_early_suspend;
+	dhd->early_suspend.resume = dhd_late_resume;
+	register_early_suspend(&dhd->early_suspend);
+	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	dhd->pend_ipaddr = 0;
+	if (!dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = TRUE;
+		register_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef CONFIG_IPV6
+	if (!dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = TRUE;
+		register_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+#endif
+	dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#ifdef DEBUG_CPU_FREQ
+	dhd->new_freq = alloc_percpu(int);
+	dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+	cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMSDIO
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
+#elif defined(BCMPCIE)
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
+#else
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* BCMSDIO */
+#endif /* DHDTCPACK_SUPPRESS */
+
+	dhd_state |= DHD_ATTACH_STATE_DONE;
+	dhd->dhd_state = dhd_state;
+
+	dhd_found++;
+	return &dhd->pub;
+
+fail:
+	if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
+		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+			__FUNCTION__, dhd_state, &dhd->pub));
+		dhd->dhd_state = dhd_state;
+		dhd_detach(&dhd->pub);
+		dhd_free(&dhd->pub);
+	}
+
+	return NULL;
+}
+
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+{
+	if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+		return DHD_FLAG_HOSTAP_MODE;
+	if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+		return DHD_FLAG_P2P_MODE;
+	if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+		return DHD_FLAG_IBSS_MODE;
+	if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+		return DHD_FLAG_MFG_MODE;
+
+	return DHD_FLAG_STA_MODE;
+}
+
+extern char *get_nvram_path(void);
+
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
+{
+	int fw_len;
+	int nv_len;
+	const char *fw = NULL;
+	const char *nv = NULL;
+	wifi_adapter_info_t *adapter = dhdinfo->adapter;
+
+
+	/* Update firmware and nvram path. The path may be from adapter info or module parameter
+	 * The path from adapter info is used for initialization only (as it won't change).
+	 *
+	 * The firmware_path/nvram_path module parameter may be changed by the system at run
+	 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+	 * command may change dhdinfo->fw_path. As such we need to clear the path info in
+	 * module parameter after it is copied. We won't update the path until the module parameter
+	 * is changed again (first character is not '\0')
+	 */
+
+	/* set default firmware and nvram path for built-in type driver */
+	if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_FW_PATH
+		fw = CONFIG_BCMDHD_FW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+		nv = CONFIG_BCMDHD_NVRAM_PATH;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+	}
+
+	/* check if we need to initialize the path */
+	if (dhdinfo->fw_path[0] == '\0') {
+		if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+			fw = adapter->fw_path;
+
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+			nv = adapter->nv_path;
+	}
+
+	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
+	 *
+	 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+	 */
+	if (firmware_path[0] != '\0')
+		fw = firmware_path;
+	if (nvram_path[0] != '\0')
+		nv = nvram_path;
+
+	// Update nvram patch
+	nv = get_nvram_path();
+
+	if (fw && fw[0] != '\0') {
+		fw_len = strlen(fw);
+		if (fw_len >= sizeof(dhdinfo->fw_path)) {
+			DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
+		if (dhdinfo->fw_path[fw_len-1] == '\n')
+		       dhdinfo->fw_path[fw_len-1] = '\0';
+	}
+	if (nv && nv[0] != '\0') {
+		nv_len = strlen(nv);
+		if (nv_len >= sizeof(dhdinfo->nv_path)) {
+			DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
+		if (dhdinfo->nv_path[nv_len-1] == '\n')
+		       dhdinfo->nv_path[nv_len-1] = '\0';
+	}
+
+	/* clear the path in module parameter */
+	firmware_path[0] = '\0';
+
+#ifndef BCMEMBEDIMAGE
+	/* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
+	if (dhdinfo->fw_path[0] == '\0') {
+		DHD_ERROR(("firmware path not found\n"));
+		return FALSE;
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		DHD_ERROR(("nvram path not found\n"));
+		return FALSE;
+	}
+#endif /* BCMEMBEDIMAGE */
+
+	return TRUE;
+}
+
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+	int ret = -1;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+	unsigned long flags;
+
+	ASSERT(dhd);
+
+	DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+	DHD_PERIM_LOCK(dhdp);
+
+	/* try to download image and nvram to the dongle */
+	if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+		DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
+		ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+		                                dhd->fw_path, dhd->nv_path);
+		if (ret < 0) {
+			DHD_ERROR(("%s: failed to download firmware %s\n",
+			          __FUNCTION__, dhd->fw_path));
+			DHD_PERIM_UNLOCK(dhdp);
+			return ret;
+		}
+	}
+	if (dhd->pub.busstate != DHD_BUS_LOAD) {
+		DHD_PERIM_UNLOCK(dhdp);
+		return -ENETDOWN;
+	}
+
+	dhd_os_sdlock(dhdp);
+
+	/* Start the watchdog timer */
+	dhd->pub.tickcnt = 0;
+	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+	/* Bring up the bus */
+	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+		dhd_os_sdunlock(dhdp);
+		DHD_PERIM_UNLOCK(dhdp);
+		return ret;
+	}
+#if defined(OOB_INTR_ONLY)
+	/* Host registration for OOB interrupt */
+	if (dhd_bus_oob_intr_register(dhdp)) {
+		/* deactivate timer and wait for the handler to finish */
+
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+
+		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		DHD_PERIM_UNLOCK(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif
+#ifdef PCIE_FULL_DONGLE
+	{
+		uint8 txpush = 0;
+		uint32 num_flowrings; /* includes H2D common rings */
+		num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
+		DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
+			num_flowrings));
+		if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
+			DHD_PERIM_UNLOCK(dhdp);
+			return ret;
+		}
+	}
+#endif /* PCIE_FULL_DONGLE */
+
+	/* Do protocol initialization necessary for IOCTL/IOVAR */
+	dhd_prot_init(&dhd->pub);
+
+	/* If bus is not ready, can't come up */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		DHD_GENERAL_LOCK(&dhd->pub, flags);
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		DHD_PERIM_UNLOCK(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	dhd_os_sdunlock(dhdp);
+
+	/* Bus is ready, query any dongle information */
+	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+		DHD_PERIM_UNLOCK(dhdp);
+		return ret;
+	}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+		aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+		dhd->pend_ipaddr = 0;
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	DHD_PERIM_UNLOCK(dhdp);
+	return 0;
+}
+#ifdef WLTDLS
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	uint32 tdls = tdls_on;
+	int ret = 0;
+	uint32 tdls_auto_op = 0;
+	uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
+	int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+	int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+	BCM_REFERENCE(mac);
+	if (!FW_SUPPORTED(dhd, tdls))
+		return BCME_ERROR;
+
+	if (dhd->tdls_enable == tdls_on)
+		goto auto_mode;
+	bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
+		goto exit;
+	}
+	dhd->tdls_enable = tdls_on;
+auto_mode:
+
+	tdls_auto_op = auto_on;
+	bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
+		iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	if (tdls_auto_op) {
+		bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
+			sizeof(tdls_idle_time),	iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+	}
+
+exit:
+	return ret;
+}
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+	if (dhd)
+		ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+	else
+		ret = BCME_ERROR;
+	return ret;
+}
+#ifdef PCIE_FULL_DONGLE
+void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_pub_t *dhdp =  (dhd_pub_t *)&dhd->pub;
+	tdls_peer_node_t *cur = dhdp->peer_tbl.node;
+	tdls_peer_node_t *new = NULL, *prev = NULL;
+	dhd_if_t *dhdif;
+	uint8 sa[ETHER_ADDR_LEN];
+	int ifidx = dhd_net2idx(dhd, dev);
+
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	dhdif = dhd->iflist[ifidx];
+	memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
+
+	if (connect) {
+		while (cur != NULL) {
+			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+				DHD_ERROR(("%s: TDLS Peer exist already %d\n",
+					__FUNCTION__, __LINE__));
+				return;
+			}
+			cur = cur->next;
+		}
+
+		new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
+		if (new == NULL) {
+			DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
+			return;
+		}
+		memcpy(new->addr, da, ETHER_ADDR_LEN);
+		new->next = dhdp->peer_tbl.node;
+		dhdp->peer_tbl.node = new;
+		dhdp->peer_tbl.tdls_peer_count++;
+
+	} else {
+		while (cur != NULL) {
+			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+				dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
+				if (prev)
+					prev->next = cur->next;
+				else
+					dhdp->peer_tbl.node = cur->next;
+				MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
+				dhdp->peer_tbl.tdls_peer_count--;
+				return;
+			}
+			prev = cur;
+			cur = cur->next;
+		}
+		DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
+	}
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif
+
+bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
+{
+	if (!dhd)
+		return FALSE;
+
+	if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+		return TRUE;
+	else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+		DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+		return TRUE;
+	else
+		return FALSE;
+}
+#if !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+{
+	int32 ret = 0;
+	char buf[WLC_IOCTL_SMLEN];
+	bool mchan_supported = FALSE;
+	/* if dhd->op_mode is already set for HOSTAP and Manufacturing
+	 * test mode, that means we only will use the mode as it is
+	 */
+	if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+		return 0;
+	if (FW_SUPPORTED(dhd, vsdb)) {
+		mchan_supported = TRUE;
+	}
+	if (!FW_SUPPORTED(dhd, p2p)) {
+		DHD_TRACE(("Chip does not support p2p\n"));
+		return 0;
+	}
+	else {
+		/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+			return 0;
+		}
+		else {
+			if (buf[0] == 1) {
+				/* By default, chip supports single chan concurrency,
+				* now lets check for mchan
+				*/
+				ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+				if (mchan_supported)
+					ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+				/* For customer_hw4, although ICS,
+				* we still support concurrent mode
+				*/
+				return ret;
+#else
+				return 0;
+#endif
+			}
+		}
+	}
+	return 0;
+}
+#endif
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+	uint32 buf_key_b4_m4 = 1;
+	uint8 msglen;
+	eventmsgs_ext_t *eventmask_msg;
+	char iov_buf[WLC_IOCTL_SMLEN];
+	int ret2 = 0;
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+	uint32 ampdu_ba_wsize = 0;
+#endif
+#if defined(CUSTOM_AMPDU_MPDU)
+	int32 ampdu_mpdu = 0;
+#endif
+#if defined(CUSTOM_AMPDU_RELEASE)
+	int32 ampdu_release = 0;
+#endif
+
+#if defined(BCMSDIO)
+#ifdef PROP_TXSTATUS
+	int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+	uint32 hostreorder = 1;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+#endif
+#ifdef PCIE_FULL_DONGLE
+	uint32 wl_ap_isolate;
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_ENABLE_LPC
+	uint32 lpc = 1;
+#endif /* DHD_ENABLE_LPC */
+	uint power_mode = PM_FAST;
+	uint32 dongle_align = DHD_SDALIGN;
+#if defined(BCMSDIO)
+	uint32 glom = CUSTOM_GLOM_SETTING;
+#endif /* defined(BCMSDIO) */
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	uint32 credall = 1;
+#endif
+	uint bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
+	uint retry_max = 3;
+#if defined(ARP_OFFLOAD_SUPPORT)
+	int arpoe = 1;
+#endif
+	char buf[WLC_IOCTL_SMLEN];
+	char *ptr;
+	uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#ifdef ROAM_ENABLE
+	uint roamvar = 0;
+	int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
+	int roam_scan_period[2] = {10, WLC_BAND_ALL};
+	int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
+#ifdef ROAM_AP_ENV_DETECTION
+	int roam_env_mode = AP_ENV_INDETERMINATE;
+#endif /* ROAM_AP_ENV_DETECTION */
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+	int roam_fullscan_period = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+	int roam_fullscan_period = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#else
+#ifdef DISABLE_BUILTIN_ROAM
+	uint roamvar = 1;
+#endif /* DISABLE_BUILTIN_ROAM */
+#endif /* ROAM_ENABLE */
+
+#if defined(SOFTAP)
+	uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+	uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+	struct ether_addr p2p_ea;
+#endif
+
+#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
+	uint32 apsta = 1; /* Enable APSTA mode */
+#elif defined(SOFTAP_AND_GC)
+	uint32 apsta = 0;
+	int ap_mode = 1;
+#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef DISABLE_11N
+	uint32 nmode = 0;
+#endif /* DISABLE_11N */
+
+#ifdef USE_WL_TXBF
+	uint32 txbf = 1;
+#endif /* USE_WL_TXBF */
+#ifdef USE_WL_FRAMEBURST
+	uint32 frameburst = 1;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef CUSTOM_PSPRETEND_THR
+	uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef MAX_AP_CLIENT_CNT
+	uint32 max_assoc = MAX_AP_CLIENT_CNT;
+#endif
+
+#ifdef PKT_FILTER_SUPPORT
+	dhd_pkt_filter_enable = TRUE;
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef WLTDLS
+	dhd->tdls_enable = FALSE;
+#endif /* WLTDLS */
+#ifdef DONGLE_ENABLE_ISOLATION
+	dhd->dongle_isolation = TRUE;
+#endif /* DONGLE_ENABLE_ISOLATION */
+	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+	DHD_TRACE(("Enter %s\n", __FUNCTION__));
+	dhd->op_mode = 0;
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+		/* Check and adjust IOCTL response timeout for Manufactring firmware */
+		dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+		DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+			__FUNCTION__));
+	}
+	else {
+		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+		DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+	}
+#ifdef GET_CUSTOM_MAC_ENABLE
+	ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
+	if (!ret) {
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
+	} else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+		/* Get the default device MAC address directly from firmware */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		/* Update public MAC address after reading from Firmware */
+		memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+	/* get a capabilities from firmware */
+	memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
+	bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
+		sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+			__FUNCTION__, ret));
+		return 0;
+	}
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
+		(op_mode == DHD_FLAG_HOSTAP_MODE)) {
+#ifdef SET_RANDOM_MAC_SOFTAP
+		uint rand_mac;
+#endif
+		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+		SRANDOM32((uint)jiffies);
+		rand_mac = RANDOM32();
+		iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02;	/* locally administered bit */
+		iovbuf[1] = (unsigned char)(vendor_oui >> 8);
+		iovbuf[2] = (unsigned char)vendor_oui;
+		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+		iovbuf[4] = (unsigned char)(rand_mac >> 8);
+		iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+		bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+#endif /* SET_RANDOM_MAC_SOFTAP */
+#if !defined(AP) && defined(WL_CFG80211)
+		/* Turn off MPC in AP mode */
+		bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s mpc for HostAPD failed  %d\n", __FUNCTION__, ret));
+		}
+#endif
+#ifdef MAX_AP_CLIENT_CNT
+		bcm_mkiovar("maxassoc", (char *)&max_assoc, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s maxassoc for HostAPD failed  %d\n", __FUNCTION__, ret));
+		}
+#endif
+	} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+		arpoe = 0;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+		dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+		dhd->op_mode = DHD_FLAG_MFG_MODE;
+	} else {
+		uint32 concurrent_mode = 0;
+		if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
+			(op_mode == DHD_FLAG_P2P_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+			dhd->op_mode = DHD_FLAG_P2P_MODE;
+		} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+			(op_mode == DHD_FLAG_IBSS_MODE)) {
+			dhd->op_mode = DHD_FLAG_IBSS_MODE;
+		} else
+			dhd->op_mode = DHD_FLAG_STA_MODE;
+#if !defined(AP) && defined(WLP2P)
+		if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
+			(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 1;
+#endif
+			dhd->op_mode |= concurrent_mode;
+		}
+
+		/* Check if we are enabling p2p */
+		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+			bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
+			}
+
+#if defined(SOFTAP_AND_GC)
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+			(char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
+		}
+#endif
+			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+			ETHER_SET_LOCALADDR(&p2p_ea);
+			bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
+				ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+			} else {
+				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
+			}
+		}
+#else
+	(void)concurrent_mode;
+#endif
+	}
+
+	DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+		dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+#ifdef CUSTOM_COUNTRY_CODE
+	get_customized_country_code(dhd->info->adapter,
+		dhd->dhd_cspec.country_abbrev, &dhd->dhd_cspec,
+		dhd->dhd_cflags);
+#endif /* CUSTOM_COUNTRY_CODE */
+	/* Set Country code  */
+	if (dhd->dhd_cspec.ccode[0] != 0) {
+		bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+			sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+	}
+
+
+	/* Set Listen Interval */
+	bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+	/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+	bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#if defined(ROAM_ENABLE)
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
+		sizeof(roam_trigger), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
+		sizeof(roam_scan_period), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
+	if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
+		sizeof(roam_delta), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
+	bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
+#ifdef ROAM_AP_ENV_DETECTION
+	if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
+		bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode,
+			4, iovbuf, sizeof(iovbuf));
+		if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) == BCME_OK)
+			dhd->roam_env_detection = TRUE;
+		else {
+			dhd->roam_env_detection = FALSE;
+		}
+	}
+#endif /* ROAM_AP_ENV_DETECTION */
+#endif /* ROAM_ENABLE */
+
+#ifdef WLTDLS
+	/* by default TDLS on and auto mode off */
+	_dhd_tdls_enable(dhd, true, false, NULL);
+#endif /* WLTDLS */
+
+#ifdef DHD_ENABLE_LPC
+	/* Set lpc 1 */
+	bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+        if (ret != BCME_NOTDOWN) {
+            DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
+        } else {
+            u32 wl_down = 1;
+            ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+                    (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+            DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+
+            bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+            ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+            DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+        }
+	}
+#endif /* DHD_ENABLE_LPC */
+
+	/* Set PowerSave mode */
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+	/* Match Host and Dongle rx alignment */
+	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	/* enable credall to reduce the chance of no bus credit happened. */
+	bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+
+#if defined(BCMSDIO)
+	if (glom != DEFAULT_GLOM_VALUE) {
+		DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
+		bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	}
+#endif /* defined(BCMSDIO) */
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	/* Setup assoc_retry_max count to reconnect target AP in dongle */
+	bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(AP) && !defined(WLP2P)
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* defined(AP) && !defined(WLP2P) */
+
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == TRUE) {
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+	}
+#endif
+
+#if defined(KEEP_ALIVE)
+	{
+	/* Set Keep Alive : be sure to use FW with -keepalive */
+	int res;
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == FALSE)
+#endif
+		if (!(dhd->op_mode &
+			(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+			if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+				DHD_ERROR(("%s set keeplive failed %d\n",
+				__FUNCTION__, res));
+		}
+	}
+#endif /* defined(KEEP_ALIVE) */
+
+#ifdef USE_WL_TXBF
+	bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set txbf failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* USE_WL_TXBF */
+#ifdef USE_WL_FRAMEBURST
+	/* Set frameburst to value */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+		sizeof(frameburst), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set frameburst failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* USE_WL_FRAMEBURST */
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+	/* Set ampdu ba wsize to 64 or 16 */
+#ifdef CUSTOM_AMPDU_BA_WSIZE
+	ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+	if (ampdu_ba_wsize != 0) {
+		bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
+				__FUNCTION__, ampdu_ba_wsize, ret));
+		}
+	}
+#endif
+
+
+#if defined(CUSTOM_AMPDU_MPDU)
+	ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+	if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+		bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_MPDU */
+
+#if defined(CUSTOM_AMPDU_RELEASE)
+	ampdu_release = CUSTOM_AMPDU_RELEASE;
+	if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
+		bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_release to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_RELEASE */
+
+#ifdef CUSTOM_PSPRETEND_THR
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
+		iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
+			__FUNCTION__, ret));
+	}
+#endif
+
+	bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
+	}
+
+	/* Read event_msgs mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+	/* Setup event_msgs */
+	setbit(eventmask, WLC_E_SET_SSID);
+	setbit(eventmask, WLC_E_PRUNE);
+	setbit(eventmask, WLC_E_AUTH);
+	setbit(eventmask, WLC_E_AUTH_IND);
+	setbit(eventmask, WLC_E_ASSOC);
+	setbit(eventmask, WLC_E_REASSOC);
+	setbit(eventmask, WLC_E_REASSOC_IND);
+	setbit(eventmask, WLC_E_DEAUTH);
+	setbit(eventmask, WLC_E_DEAUTH_IND);
+	setbit(eventmask, WLC_E_DISASSOC_IND);
+	setbit(eventmask, WLC_E_DISASSOC);
+	setbit(eventmask, WLC_E_JOIN);
+	setbit(eventmask, WLC_E_START);
+	setbit(eventmask, WLC_E_ASSOC_IND);
+	setbit(eventmask, WLC_E_PSK_SUP);
+	setbit(eventmask, WLC_E_LINK);
+	setbit(eventmask, WLC_E_NDIS_LINK);
+	setbit(eventmask, WLC_E_MIC_ERROR);
+	setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+	setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+#ifndef WL_CFG80211
+	setbit(eventmask, WLC_E_PMKID_CACHE);
+	setbit(eventmask, WLC_E_TXFAIL);
+#endif
+	setbit(eventmask, WLC_E_JOIN_START);
+#ifdef WLMEDIA_HTSF
+	setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+	setbit(eventmask, WLC_E_PFN_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
+#endif /* PNO_SUPPORT */
+	/* enable dongle roaming event */
+	setbit(eventmask, WLC_E_ROAM);
+	setbit(eventmask, WLC_E_BSSID);
+#ifdef WLTDLS
+	setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
+#endif /* WLTDLS */
+#ifdef RTT_SUPPORT
+	setbit(eventmask, WLC_E_PROXD);
+#endif /* RTT_SUPPORT */
+#ifdef WL_CFG80211
+	setbit(eventmask, WLC_E_ESCAN_RESULT);
+	if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+		setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+		setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	}
+#endif /* WL_CFG80211 */
+	setbit(eventmask, WLC_E_TRACE);
+
+	/* Write updated Event mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	/* make up event mask ext message iovar for event larger than 128 */
+	msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
+	eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
+	if (eventmask_msg == NULL) {
+		DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+		return BCME_NOMEM;
+	}
+	bzero(eventmask_msg, msglen);
+	eventmask_msg->ver = EVENTMSGS_VER;
+	eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+	/* Read event_msgs_ext mask */
+	bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, sizeof(iov_buf));
+	ret2  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, sizeof(iov_buf), FALSE, 0);
+	if (ret2 != BCME_UNSUPPORTED)
+		ret = ret2;
+	if (ret2 == 0) { /* event_msgs_ext must be supported */
+		bcopy(iov_buf, eventmask_msg, msglen);
+#ifdef GSCAN_SUPPORT
+		setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
+		setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
+		setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
+#endif /* GSCAN_SUPPORT */
+#ifdef BT_WIFI_HANDOVER
+		setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
+#endif /* BT_WIFI_HANDOVER */
+
+		/* Write updated Event mask */
+		eventmask_msg->ver = EVENTMSGS_VER;
+		eventmask_msg->command = EVENTMSGS_SET_MASK;
+		eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+		bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
+			msglen, iov_buf, sizeof(iov_buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			iov_buf, sizeof(iov_buf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
+			kfree(eventmask_msg);
+			goto done;
+		}
+	} else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
+		DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
+		kfree(eventmask_msg);
+		goto done;
+	} /* unsupported is ok */
+	kfree(eventmask_msg);
+
+	dhd_set_short_dwell_time(dhd, FALSE);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	/* Set and enable ARP offload feature for STA only  */
+#if defined(SOFTAP)
+	if (arpoe && !ap_fw_loaded) {
+#else
+	if (arpoe) {
+#endif
+		dhd_arp_offload_enable(dhd, TRUE);
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+	} else {
+		dhd_arp_offload_enable(dhd, FALSE);
+		dhd_arp_offload_set(dhd, 0);
+	}
+	dhd_arp_enable = arpoe;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+	/* Setup default defintions for pktfilter , enable in suspend */
+	dhd->pktfilter_count = 6;
+	/* Setup filter to allow only unicast */
+	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+	/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+	/* apply APP pktfilter */
+	dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded) {
+		dhd_enable_packet_filter(0, dhd);
+	}
+#endif /* defined(SOFTAP) */
+	dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_11N
+	bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11N */
+
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	else {
+		bcmstrtok(&ptr, "\n", 0);
+		/* Print fw version info */
+		DHD_ERROR(("Firmware version = %s\n", buf));
+
+		dhd_set_version_info(dhd, buf);
+	}
+
+#if defined(BCMSDIO)
+	dhd_txglom_enable(dhd, TRUE);
+#endif /* defined(BCMSDIO) */
+
+#if defined(BCMSDIO)
+#ifdef PROP_TXSTATUS
+	if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+		/* enable WLFC only if the firmware is VSDB when it is in STA mode */
+		(dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+		 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+		FALSE) {
+		wlfc_enable = FALSE;
+	}
+
+#ifndef DISABLE_11N
+	bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
+	if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+		if (ret2 != BCME_UNSUPPORTED)
+			ret = ret2;
+		if (ret2 != BCME_OK)
+			hostreorder = 0;
+	}
+#endif /* DISABLE_11N */
+
+
+	if (wlfc_enable)
+		dhd_wlfc_init(dhd);
+#ifndef DISABLE_11N
+	else if (hostreorder)
+		dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMBUS */
+#ifdef PCIE_FULL_DONGLE
+	/* For FD we need all the packets at DHD to handle intra-BSS forwarding */
+	if (FW_SUPPORTED(dhd, ap)) {
+		wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
+		bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	}
+#endif /* PCIE_FULL_DONGLE */
+#ifdef PNO_SUPPORT
+	if (!dhd->pno_state) {
+		dhd_pno_init(dhd);
+	}
+#endif
+#ifdef RTT_SUPPORT
+	if (!dhd->rtt_state) {
+		dhd_rtt_init(dhd);
+	}
+#endif
+
+#ifdef WL11U
+	dhd_interworking_enable(dhd);
+#endif /* WL11U */
+
+done:
+	return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+	char buf[strlen(name) + 1 + cmd_len];
+	int len = sizeof(buf);
+	wl_ioctl_t ioc;
+	int ret;
+
+	len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (!set && ret >= 0)
+		memcpy(cmd_buf, buf, cmd_len);
+
+	return ret;
+}
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+	struct dhd_info *dhd = dhdp->info;
+	struct net_device *dev = NULL;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+	ASSERT(dev);
+
+	if (netif_running(dev)) {
+		DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+		return BCME_NOTDOWN;
+	}
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+		return BCME_BADARG;
+	}
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
+{
+	u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+	int i;
+	int ret;
+
+	bzero(ipv4_buf, sizeof(ipv4_buf));
+
+	/* display what we've got */
+	ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+	/* now we saved hoste_ip table, clr it in the dongle AOE */
+	dhd_aoe_hostip_clr(dhd_pub, idx);
+
+	if (ret) {
+		DHD_ERROR(("%s failed\n", __FUNCTION__));
+		return;
+	}
+
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (add && (ipv4_buf[i] == 0)) {
+				ipv4_buf[i] = ipa;
+				add = FALSE; /* added ipa to local table  */
+				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+				__FUNCTION__, i));
+		} else if (ipv4_buf[i] == ipa) {
+			ipv4_buf[i]	= 0;
+			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+				__FUNCTION__, ipa, i));
+		}
+
+		if (ipv4_buf[i] != 0) {
+			/* add back host_ip entries from our local cache */
+			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
+			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+				__FUNCTION__, ipv4_buf[i], i));
+		}
+	}
+#ifdef AOE_DBG
+	/* see the resulting hostip table */
+	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+/*
+ * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
+ * whenever there is an event related to an IP address.
+ * ptr : kernel provided pointer to IP address that has changed
+ */
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	int idx;
+
+	if (!dhd_arp_enable)
+		return NOTIFY_DONE;
+	if (!ifa || !(ifa->ifa_dev->dev))
+		return NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
+	    (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
+#if defined(WL_ENABLE_P2P_IF)
+		if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
+#endif /* WL_ENABLE_P2P_IF */
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	dhd_pub = &dhd->pub;
+
+	if (dhd_pub->arp_version == 1) {
+		idx = 0;
+	}
+	else {
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
+			break;
+		}
+		if (idx < DHD_MAX_IFS)
+			DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
+				dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
+		else {
+			DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
+			idx = 0;
+		}
+	}
+
+	switch (event) {
+		case NETDEV_UP:
+			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+			if (dhd->pub.busstate != DHD_BUS_DATA) {
+				DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+				if (dhd->pend_ipaddr) {
+					DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+						__FUNCTION__, dhd->pend_ipaddr));
+				}
+				dhd->pend_ipaddr = ifa->ifa_address;
+				break;
+			}
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		case NETDEV_DOWN:
+			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+			dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+#else
+			dhd_aoe_hostip_clr(&dhd->pub, idx);
+			dhd_aoe_arp_clr(&dhd->pub, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		default:
+			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+				__func__, ifa->ifa_label, event));
+			break;
+	}
+	return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef CONFIG_IPV6
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
+{
+	struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+	dhd_pub_t	*pub = &((dhd_info_t *)dhd_info)->pub;
+	int		ret;
+
+	if (event != DHD_WQ_WORK_IPV6_NDO) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!ndo_work) {
+		DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
+		return;
+	}
+
+	if (!pub) {
+		DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
+		return;
+	}
+
+	if (ndo_work->if_idx) {
+		DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
+		return;
+	}
+
+	switch (ndo_work->event) {
+		case NETDEV_UP:
+			DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
+			ret = dhd_ndo_enable(pub, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+			}
+
+			ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+			}
+			break;
+		case NETDEV_DOWN:
+			DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
+			ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+				goto done;
+			}
+
+			ret = dhd_ndo_enable(pub, FALSE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+				goto done;
+			}
+			break;
+		default:
+			DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
+			break;
+	}
+done:
+	/* free ndo_work. alloced while scheduling the work */
+	kfree(ndo_work);
+
+	return;
+}
+
+/*
+ * Neighbor Discovery Offload: Called when an interface
+ * is assigned with ipv6 address.
+ * Handles only primary interface
+ */
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	struct inet6_ifaddr *inet6_ifa = ptr;
+	struct in6_addr *ipv6_addr = &inet6_ifa->addr;
+	struct ipv6_work_info_t *ndo_info;
+	int idx = 0; /* REVISIT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
+		return NOTIFY_DONE;
+	dhd_pub = &dhd->pub;
+	if (!FW_SUPPORTED(dhd_pub, ndoe))
+		return NOTIFY_DONE;
+
+	ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+	if (!ndo_info) {
+		DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+		return NOTIFY_DONE;
+	}
+
+	ndo_info->event = event;
+	ndo_info->if_idx = idx;
+	memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
+
+	/* defer the work to thread as it may block kernel */
+	dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+		dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
+	return NOTIFY_DONE;
+}
+#endif /* #ifdef CONFIG_IPV6 */
+
+int
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	dhd_if_t *ifp;
+	struct net_device *net = NULL;
+	int err = 0;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	ifp = dhd->iflist[ifidx];
+	net = ifp->net;
+	ASSERT(net && (ifp->idx == ifidx));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->get_stats = dhd_get_stats;
+	net->do_ioctl = dhd_ioctl_entry;
+	net->hard_start_xmit = dhd_start_xmit;
+	net->set_mac_address = dhd_set_mac_address;
+	net->set_multicast_list = dhd_set_multicast_list;
+	net->open = net->stop = NULL;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	/* Ok, link into the network layer... */
+	if (ifidx == 0) {
+		/*
+		 * device functions for the primary interface only
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+		net->open = dhd_open;
+		net->stop = dhd_stop;
+#else
+		net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+		if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
+			memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+	} else {
+		/*
+		 * We have to use the primary MAC for virtual interfaces
+		 */
+		memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
+		/*
+		 * Android sets the locally administered bit to indicate that this is a
+		 * portable hotspot.  This will not work in simultaneous AP/STA mode,
+		 * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
+		 */
+		if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+			ETHER_ADDR_LEN)) {
+			DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+			__func__, net->name));
+			temp_addr[0] |= 0x02;
+		}
+	}
+
+	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+	net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+	net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	if (ifidx == 0)
+		printf("%s\n", dhd_version);
+
+	if (need_rtnl_lock)
+		err = register_netdev(net);
+	else
+		err = register_netdevice(net);
+
+	if (err != 0) {
+		DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
+		goto fail;
+	}
+
+
+
+	printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
+		MAC2STRDBG(net->dev_addr));
+
+#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
+		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+#if defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (ifidx == 0) {
+#ifdef BCMLXSDMMC
+		up(&dhd_registration_sem);
+#endif
+		if (!dhd_download_fw_on_driverload) {
+#ifdef BCMSDIO
+			dhd_net_bus_devreset(net, TRUE);
+			dhd_net_bus_suspend(net);
+#endif /* BCMSDIO */
+			wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
+		}
+	}
+#endif /* OEM_ANDROID && BCMLXSDMMC && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+#if defined(BCMPCIE)
+	if (ifidx == 0) {
+		if (!dhd_download_fw_on_driverload) {
+			dhd_net_bus_devreset(net, TRUE);
+			wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
+		}
+	}
+#endif /* BCMPCIE */
+
+	return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+	return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+
+			/*
+			 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+			 *  calling stop again will cuase SD read/write errors.
+			 */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				/* Stop the protocol module */
+				dhd_prot_stop(&dhd->pub);
+
+				/* Stop the bus module */
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+			}
+
+#if defined(OOB_INTR_ONLY)
+			dhd_bus_oob_intr_unregister(dhdp);
+#endif
+		}
+	}
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	unsigned long flags;
+	int timer_valid = FALSE;
+
+	if (!dhdp)
+		return;
+
+	dhd = (dhd_info_t *)dhdp->info;
+	if (!dhd)
+		return;
+
+	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+	dhd->pub.up = 0;
+	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+		/* Give sufficient time for threads to start running in case
+		 * dhd_attach() has failed
+		 */
+		OSL_SLEEP(100);
+	}
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+#ifdef PCIE_FULL_DONGLE
+		dhd_flow_rings_deinit(dhdp);
+#endif
+		dhd_bus_detach(dhdp);
+
+		if (dhdp->prot)
+			dhd_prot_detach(dhdp);
+	}
+#ifdef PROP_TXSTATUS
+#ifdef WLFC_STATE_PREALLOC
+	MFREE(dhd->pub.osh, dhd->pub.wlfc_state, sizeof(athost_wl_status_info_t));
+#endif /* WLFC_STATE_PREALLOC */
+#endif /* PROP_TXSTATUS */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = FALSE;
+		unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef CONFIG_IPV6
+	if (dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = FALSE;
+		unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+#endif
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+		if (dhd->early_suspend.suspend)
+			unregister_early_suspend(&dhd->early_suspend);
+	}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#if defined(WL_WIRELESS_EXT)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+		/* Detatch and unlink in the iw */
+		wl_iw_detach();
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	/* delete all interfaces, start with virtual  */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+		int i = 1;
+		dhd_if_t *ifp;
+
+		/* Cleanup virtual interfaces */
+		dhd_net_if_lock_local(dhd);
+		for (i = 1; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i])
+				dhd_remove_if(&dhd->pub, i, TRUE);
+		}
+		dhd_net_if_unlock_local(dhd);
+
+		/*  delete primary interface 0 */
+		ifp = dhd->iflist[0];
+		ASSERT(ifp);
+		ASSERT(ifp->net);
+		if (ifp && ifp->net) {
+
+
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED)
+				free_netdev(ifp->net);
+			else
+				unregister_netdev(ifp->net);
+			ifp->net = NULL;
+#ifdef DHD_WMF
+			dhd_wmf_cleanup(dhdp, 0);
+#endif /* DHD_WMF */
+
+			dhd_if_del_sta_list(ifp);
+
+			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+			dhd->iflist[0] = NULL;
+		}
+	}
+
+	/* Clear the watchdog timer */
+	DHD_GENERAL_LOCK(&dhd->pub, flags);
+	timer_valid = dhd->wd_timer_valid;
+	dhd->wd_timer_valid = FALSE;
+	DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+	if (timer_valid)
+		del_timer_sync(&dhd->timer);
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_wdt_ctl);
+		}
+
+		if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_rxf_ctl);
+		}
+
+		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_dpc_ctl);
+		} else
+			tasklet_kill(&dhd->tasklet);
+	}
+#ifdef WL_CFG80211
+	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+		wl_cfg80211_detach(NULL);
+		dhd_monitor_uninit();
+	}
+#endif
+	/* free deferred work queue */
+	dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+	dhd->dhd_deferred_wq = NULL;
+
+#ifdef SHOW_LOGTRACE
+	if (dhd->event_data.fmts)
+		kfree(dhd->event_data.fmts);
+	if (dhd->event_data.raw_fmts)
+		kfree(dhd->event_data.raw_fmts);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef PNO_SUPPORT
+	if (dhdp->pno_state)
+		dhd_pno_deinit(dhdp);
+#endif
+#ifdef RTT_SUPPORT
+	if (dhdp->rtt_state)
+		dhd_rtt_deinit(dhdp);
+#endif
+#if defined(CONFIG_PM_SLEEP)
+	if (dhd_pm_notifier_registered) {
+		unregister_pm_notifier(&dhd_pm_notifier);
+		dhd_pm_notifier_registered = FALSE;
+	}
+#endif /* CONFIG_PM_SLEEP */
+#ifdef SAR_SUPPORT
+	if (dhd_sar_notifier_registered) {
+		unregister_notifier_by_sar(&dhd->sar_notifier);
+		dhd_sar_notifier_registered = FALSE;
+	}
+#endif /* SAR_SUPPORT */
+#ifdef DEBUG_CPU_FREQ
+		if (dhd->new_freq)
+			free_percpu(dhd->new_freq);
+		dhd->new_freq = NULL;
+		cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+		DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
+#ifdef CONFIG_HAS_WAKELOCK
+		dhd->wakelock_counter = 0;
+		dhd->wakelock_wd_counter = 0;
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		wake_lock_destroy(&dhd->wl_wifi);
+		wake_lock_destroy(&dhd->wl_rxwake);
+		wake_lock_destroy(&dhd->wl_ctrlwake);
+		wake_lock_destroy(&dhd->wl_wdwake);
+#endif /* CONFIG_HAS_WAKELOCK */
+	}
+
+
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* This will free all MEM allocated for TCPACK SUPPRESS */
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		int i;
+		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+			if (dhdp->reorder_bufs[i]) {
+				reorder_info_t *ptr;
+				uint32 buf_size = sizeof(struct reorder_info);
+
+				ptr = dhdp->reorder_bufs[i];
+
+				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+					i, ptr->max_idx, buf_size));
+
+				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+				dhdp->reorder_bufs[i] = NULL;
+			}
+		}
+
+		dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
+
+		dhd = (dhd_info_t *)dhdp->info;
+		/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
+		if (dhd &&
+			dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
+			MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+		dhd = NULL;
+	}
+}
+void
+dhd_clear(dhd_pub_t *dhdp)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef PCIE_FULL_DONGLE
+	if (dhdp) {
+		int i;
+		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+			if (dhdp->reorder_bufs[i]) {
+				reorder_info_t *ptr;
+				uint32 buf_size = sizeof(struct reorder_info);
+				ptr = dhdp->reorder_bufs[i];
+				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+					i, ptr->max_idx, buf_size));
+
+				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+				dhdp->reorder_bufs[i] = NULL;
+			}
+		}
+		dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
+	}
+#endif
+}
+
+static void
+dhd_module_cleanup(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_bus_unregister();
+
+	wl_android_exit();
+
+	dhd_wifi_platform_unregister_drv();
+}
+
+static void __exit
+dhd_module_exit(void)
+{
+	dhd_module_cleanup();
+	unregister_reboot_notifier(&dhd_reboot_notifier);
+}
+
+static int __init
+dhd_module_init(void)
+{
+	int err;
+	int retry = POWERUP_MAX_RETRY;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+
+	DHD_PERIM_RADIO_INIT();
+
+	if (firmware_path[0] != '\0') {
+		strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
+		fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+	}
+
+	if (nvram_path[0] != '\0') {
+		strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
+		nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+	}
+
+	do {
+		err = dhd_wifi_platform_register_drv();
+		if (!err) {
+			register_reboot_notifier(&dhd_reboot_notifier);
+			break;
+		}
+		else {
+			DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
+				__FUNCTION__, retry));
+			strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
+			firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
+			strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
+			nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
+		}
+	} while (retry--);
+
+	if (err)
+		DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
+
+	return err;
+}
+
+static int
+dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
+{
+	DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
+	if (code == SYS_RESTART) {
+	}
+
+	return NOTIFY_DONE;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_module_init);
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
+#else
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+#else
+module_init(dhd_module_init);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+
+module_exit(dhd_module_exit);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		DHD_PERIM_UNLOCK(pub);
+
+		down(&dhd->proto_sem);
+
+		DHD_PERIM_LOCK(pub);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		up(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+	return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+	dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+	DHD_PERIM_UNLOCK(pub);
+
+	timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+
+	DHD_PERIM_LOCK(pub);
+
+	return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->ioctl_resp_wait);
+	return 0;
+}
+
+int
+dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+	DHD_PERIM_UNLOCK(pub);
+	timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
+	DHD_PERIM_LOCK(pub);
+
+	return timeout;
+}
+
+int
+dhd_os_d3ack_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->d3ack_wait);
+	return 0;
+}
+
+
+void
+dhd_os_wd_timer_extend(void *bus, bool extend)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+	if (extend)
+		dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+	else
+		dhd_os_wd_timer(bus, dhd->default_wd_interval);
+}
+
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_GENERAL_LOCK(pub, flags);
+
+	/* don't start the wd until fw is loaded */
+	if (pub->busstate == DHD_BUS_DOWN) {
+		DHD_GENERAL_UNLOCK(pub, flags);
+		if (!wdtick)
+			DHD_OS_WD_WAKE_UNLOCK(pub);
+		return;
+	}
+
+	/* Totally stop the timer */
+	if (!wdtick && dhd->wd_timer_valid == TRUE) {
+		dhd->wd_timer_valid = FALSE;
+		DHD_GENERAL_UNLOCK(pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_OS_WD_WAKE_UNLOCK(pub);
+		return;
+	}
+
+	if (wdtick) {
+		DHD_OS_WD_WAKE_LOCK(pub);
+		dhd_watchdog_ms = (uint)wdtick;
+		/* Re arm the timer, at last watchdog period */
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+		dhd->wd_timer_valid = TRUE;
+	}
+	DHD_GENERAL_UNLOCK(pub, flags);
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd_dpc_prio >= 0)
+		down(&dhd->sdsem);
+	else
+		spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd_dpc_prio >= 0)
+		up(&dhd->sdsem);
+	else
+		spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->rxf_lock);
+
+}
+
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->rxf_lock);
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+void
+dhd_os_tcpacklock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->tcpack_lock);
+
+}
+
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->tcpack_lock);
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+{
+	uint8* buf;
+	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+
+	buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+	if (buf == NULL && kmalloc_if_fail)
+		buf = kmalloc(size, flags);
+
+	return buf;
+}
+
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+{
+}
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+	int res = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!dhd->pub.up) {
+		return NULL;
+	}
+
+	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+	if (res == 0)
+		return &dhd->iw.wstats;
+	else
+		return NULL;
+}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, size_t pktlen,
+	wl_event_msg_t *event, void **data)
+{
+	int bcmerror = 0;
+	ASSERT(dhd != NULL);
+
+#ifdef SHOW_LOGTRACE
+		bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, pktlen,
+				event, data, &dhd->event_data);
+#else
+		bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, pktlen,
+				event, data, NULL);
+#endif /* SHOW_LOGTRACE */
+
+	if (bcmerror != BCME_OK)
+		return (bcmerror);
+
+#if defined(WL_WIRELESS_EXT)
+	if (event->bsscfgidx == 0) {
+		/*
+		 * Wireless ext is on primary interface only
+		 */
+
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+		if (dhd->iflist[*ifidx]->net) {
+		wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+		}
+	}
+#endif /* defined(WL_WIRELESS_EXT)  */
+
+#ifdef WL_CFG80211
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+	if (dhd->iflist[*ifidx]->net)
+		wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+#endif /* defined(WL_CFG80211) */
+
+	return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+
+	default:
+		break;
+	}
+}
+
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+	struct sk_buff *p, *skb;
+	uint32 pktlen;
+	int len;
+	dhd_if_t *ifp;
+	dhd_info_t *dhd;
+	uchar *skb_data;
+	int ifidx = 0;
+	struct ether_header eth;
+
+	pktlen = sizeof(eth) + data_len;
+	dhd = dhdp->info;
+
+	if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+		ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+		bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
+		bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
+		ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
+		eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+		bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
+		bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+		skb = PKTTONATIVE(dhdp->osh, p);
+		skb_data = skb->data;
+		len = skb->len;
+
+		ifidx = dhd_ifname2idx(dhd, "wlan0");
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			 ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		skb->data = skb_data;
+		skb->len = len;
+
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Send the packet */
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			netif_rx_ni(skb);
+		}
+	}
+	else {
+		/* Could not allocate a sk_buf */
+		DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+	}
+}
+#endif /* LOG_INTO_TCPDUMP */
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
+#else
+	int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+	dhd_os_sdunlock(dhd);
+	wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+	dhd_os_sdlock(dhd);
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+	return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	if (waitqueue_active(&dhdinfo->ctrl_wait))
+		wake_up(&dhdinfo->ctrl_wait);
+#endif
+	return;
+}
+
+#if defined(BCMSDIO) || defined(BCMPCIE)
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (flag == TRUE) {
+		/* Issue wl down command before resetting the chip */
+		if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+			DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
+		}
+#ifdef PROP_TXSTATUS
+		if (dhd->pub.wlfc_enabled) {
+			dhd_wlfc_deinit(&dhd->pub);
+		}
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+		if (dhd->pub.pno_state) {
+			dhd_pno_deinit(&dhd->pub);
+		}
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+		if (dhd->pub.rtt_state) {
+			dhd_rtt_deinit(&dhd->pub);
+		}
+#endif /* RTT_SUPPORT */
+	}
+#ifdef BCMSDIO
+	if (!flag) {
+		dhd_update_fw_nv_path(dhd);
+		/* update firmware and nvram path to sdio bus */
+		dhd_bus_update_fw_nv_path(dhd->pub.bus,
+			dhd->fw_path, dhd->nv_path);
+	}
+#endif /* BCMSDIO */
+	ret = dhd_bus_devreset(&dhd->pub, flag);
+	if (ret) {
+		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+	return ret;
+}
+
+#ifdef BCMSDIO
+int
+dhd_net_bus_suspend(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_suspend(&dhd->pub);
+}
+
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return dhd_bus_resume(&dhd->pub, stage);
+}
+
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE */
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd) {
+		ret = dhd->pub.suspend_disable_flag;
+		dhd->pub.suspend_disable_flag = val;
+	}
+	return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val, int force)
+{
+	int ret = 0;
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd) {
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+		ret = dhd_set_suspend(val, &dhd->pub);
+#else
+		ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif
+#ifdef WL_CFG80211
+		wl_cfg80211_update_power_mode(dev);
+#endif
+	}
+	return ret;
+}
+
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd)
+		dhd->pub.suspend_bcn_li_dtim = val;
+
+	return 0;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	char *filterp = NULL;
+	int filter_id = 0;
+	int ret = 0;
+
+	if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
+		(num == DHD_MDNS_FILTER_NUM))
+		return ret;
+	if (num >= dhd->pub.pktfilter_count)
+		return -EINVAL;
+	switch (num) {
+		case DHD_BROADCAST_FILTER_NUM:
+			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+			filter_id = 101;
+			break;
+		case DHD_MULTICAST4_FILTER_NUM:
+			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+			filter_id = 102;
+			break;
+		case DHD_MULTICAST6_FILTER_NUM:
+			filterp = "103 0 0 0 0xFFFF 0x3333";
+			filter_id = 103;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/* Add filter */
+	if (add_remove) {
+		dhd->pub.pktfilter[num] = filterp;
+		dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
+	} else { /* Delete filter */
+		if (dhd->pub.pktfilter[num] != NULL) {
+			dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
+			dhd->pub.pktfilter[num] = NULL;
+		}
+	}
+	return ret;
+}
+
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
+{
+	int ret = 0;
+
+	/* Packet filtering is set only if we still in early-suspend and
+	 * we need either to turn it ON or turn it OFF
+	 * We can always turn it OFF in case of early-suspend, but we turn it
+	 * back ON only if suspend_disable_flag was not set
+	*/
+	if (dhdp && dhdp->up) {
+		if (dhdp->in_suspend) {
+			if (!val || (val && !dhdp->suspend_disable_flag))
+				dhd_enable_packet_filter(val, dhdp);
+		}
+	}
+	return ret;
+}
+
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return dhd_os_enable_packet_filter(&dhd->pub, val);
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret;
+
+	if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
+		goto done;
+
+done:
+	return ret;
+}
+int dhd_dev_get_feature_set(struct net_device *dev)
+{
+	dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
+	dhd_pub_t *dhd = (&ptr->pub);
+	int feature_set = 0;
+
+	if (!dhd)
+		return feature_set;
+
+	if (FW_SUPPORTED(dhd, sta))
+		feature_set |= WIFI_FEATURE_INFRA;
+	if (FW_SUPPORTED(dhd, dualband))
+		feature_set |= WIFI_FEATURE_INFRA_5G;
+	if (FW_SUPPORTED(dhd, p2p))
+		feature_set |= WIFI_FEATURE_P2P;
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+		feature_set |= WIFI_FEATURE_SOFT_AP;
+	if (FW_SUPPORTED(dhd, tdls))
+		feature_set |= WIFI_FEATURE_TDLS;
+	if (FW_SUPPORTED(dhd, vsdb))
+		feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
+	if (FW_SUPPORTED(dhd, nan)) {
+		feature_set |= WIFI_FEATURE_NAN;
+		/* NAN is essentail for d2d rtt */
+		if (FW_SUPPORTED(dhd, rttd2d))
+			feature_set |= WIFI_FEATURE_D2D_RTT;
+	}
+#ifdef RTT_SUPPORT
+	feature_set |= WIFI_FEATURE_D2AP_RTT;
+#endif /* RTT_SUPPORT */
+#ifdef LINKSTAT_SUPPORT
+	feature_set |= WIFI_FEATURE_LINKSTAT;
+#endif /* LINKSTAT_SUPPORT */
+	/* Supports STA + STA always */
+	feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
+#ifdef PNO_SUPPORT
+	if (dhd_is_pno_supported(dhd)) {
+		feature_set |= WIFI_FEATURE_PNO;
+		feature_set |= WIFI_FEATURE_BATCH_SCAN;
+#ifdef GSCAN_SUPPORT
+		feature_set |= WIFI_FEATURE_GSCAN;
+#endif /* GSCAN_SUPPORT */
+	}
+#endif /* PNO_SUPPORT */
+#ifdef WL11U
+	feature_set |= WIFI_FEATURE_HOTSPOT;
+#endif /* WL11U */
+	return feature_set;
+}
+
+int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
+{
+	int feature_set_full, mem_needed;
+	int *ret;
+
+	*num = 0;
+	mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
+	ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
+
+	 if (!ret) {
+		DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
+		mem_needed));
+		return ret;
+	 }
+
+	feature_set_full = dhd_dev_get_feature_set(dev);
+
+	ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
+	         (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+	         (feature_set_full & WIFI_FEATURE_NAN) |
+	         (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+	         (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+	         (feature_set_full & WIFI_FEATURE_PNO) |
+	         (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
+	         (feature_set_full & WIFI_FEATURE_GSCAN) |
+	         (feature_set_full & WIFI_FEATURE_HOTSPOT) |
+	         (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
+	         (feature_set_full & WIFI_FEATURE_EPR);
+
+	ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
+	         (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+	         /* Not yet verified NAN with P2P */
+	         /* (feature_set_full & WIFI_FEATURE_NAN) | */
+	         (feature_set_full & WIFI_FEATURE_P2P) |
+	         (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+	         (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+	         (feature_set_full & WIFI_FEATURE_EPR);
+
+	ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
+	         (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+	         (feature_set_full & WIFI_FEATURE_NAN) |
+	         (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+	         (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+	         (feature_set_full & WIFI_FEATURE_TDLS) |
+	         (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
+	         (feature_set_full & WIFI_FEATURE_EPR);
+	*num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
+
+	return ret;
+}
+
+int
+dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (nodfs)
+		dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+	else
+		dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+	dhd->pub.force_country_change = TRUE;
+	return 0;
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_stop_for_ssid(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+		pno_repeat, pno_freq_expo_max, channel_list, nchan));
+}
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int enable)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	return (dhd_pno_enable(&dhd->pub, enable));
+}
+
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
+int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_stop_for_batch(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+/* Linux wrapper to call common dhd_pno_set_mac_oui */
+int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return (dhd_pno_set_mac_oui(&dhd->pub, oui));
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, uint8 flush)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_get_gscan */
+void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+                      void *info, uint32 *len)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
+}
+
+/* Linux wrapper to call common dhd_wait_batch_results_complete */
+void dhd_dev_wait_batch_results_complete(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_wait_batch_results_complete(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_lock_batch_results */
+void
+dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_lock_batch_results(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_unlock_batch_results */
+void
+dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_unlock_batch_results(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
+int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
+int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
+}
+
+/* Linux wrapper to call common dhd_handle_swc_evt */
+void * dhd_dev_swc_scan_event(struct net_device *dev, const void  *data, int *send_evt_bytes)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
+}
+
+/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
+void * dhd_dev_hotlist_scan_event(struct net_device *dev,
+      const void  *data, int *send_evt_bytes, hotlist_type_t type)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
+}
+
+/* Linux wrapper to call common dhd_process_full_gscan_result */
+void * dhd_dev_process_full_gscan_result(struct net_device *dev,
+const void  *data, int *send_evt_bytes)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
+}
+
+void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
+
+	return;
+}
+
+int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_retreive_batch_scan_results */
+int dhd_dev_retrieve_batch_scan(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_retreive_batch_scan_results(&dhd->pub));
+}
+#endif /* GSCAN_SUPPORT */
+
+#ifdef RTT_SUPPORT
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_set_cfg(&dhd->pub, buf));
+}
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
+}
+
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
+}
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
+}
+
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_rtt_capability(&dhd->pub, capa));
+}
+#endif /* RTT_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
+{
+	dhd_info_t *dhd;
+	struct net_device *dev;
+
+	dhd = (dhd_info_t *)dhd_info;
+	dev = dhd->iflist[0]->net;
+
+	if (dev) {
+		rtnl_lock();
+		dev_close(dev);
+		rtnl_unlock();
+#if defined(WL_WIRELESS_EXT)
+		wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+		wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+	}
+}
+
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+{
+	int ret = 0;
+	if (dhdp) {
+		if (!dhdp->hang_was_sent) {
+			dhdp->hang_was_sent = 1;
+			dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
+				DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
+		}
+	}
+	return ret;
+}
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd) {
+		/* Report FW problem when enabled */
+		if (dhd->pub.hang_report) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+			ret = dhd_os_send_hang_message(&dhd->pub);
+#else
+			ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+		} else {
+			DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+				__FUNCTION__));
+			/* Enforce bus down to stop any future traffic */
+			dhd->pub.busstate = DHD_BUS_DOWN;
+		}
+	}
+	return ret;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+
+
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
+
+bool dhd_force_country_change(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (dhd && dhd->pub.up)
+		return dhd->pub.force_country_change;
+	return FALSE;
+}
+
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	get_customized_country_code(dhd->adapter, country_iso_code, cspec,
+				    dhd->pub.dhd_cflags);
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (dhd && dhd->pub.up) {
+		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+		dhd->pub.force_country_change = FALSE;
+#ifdef WL_CFG80211
+		wl_update_wiphybands(NULL, notify);
+#endif
+	}
+}
+
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+		wl_update_wiphybands(NULL, true);
+#endif
+	}
+}
+
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	if (!fw || fw[0] == '\0')
+		return -EINVAL;
+
+	strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+	dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+
+#if defined(SOFTAP)
+	if (strstr(fw, "apsta") != NULL) {
+		DHD_INFO(("GOT APSTA FIRMWARE\n"));
+		ap_fw_loaded = TRUE;
+	} else {
+		DHD_INFO(("GOT STA FIRMWARE\n"));
+		ap_fw_loaded = FALSE;
+	}
+#endif
+	return 0;
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_lock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+static void dhd_suspend_unlock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_unlock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags = 0;
+
+	if (dhd)
+		spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+	return flags;
+}
+
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+/* Linux specific multipurpose spinlock API */
+void *
+dhd_os_spin_lock_init(osl_t *osh)
+{
+	/* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+	/* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+	/* and this results in kernel asserts in internal builds */
+	spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+	if (lock)
+		spin_lock_init(lock);
+	return ((void *)lock);
+}
+void
+dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+{
+	MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+unsigned long
+dhd_os_spin_lock(void *lock)
+{
+	unsigned long flags = 0;
+
+	if (lock)
+		spin_lock_irqsave((spinlock_t *)lock, flags);
+
+	return flags;
+}
+void
+dhd_os_spin_unlock(void *lock, unsigned long flags)
+{
+	if (lock)
+		spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+	return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX	100
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int timeout = msecs_to_jiffies(10);
+	int ntimes = MAX_WAIT_FOR_8021X_TX;
+	int pend = dhd_get_pend_8021x_cnt(dhd);
+
+	while (ntimes && pend) {
+		if (pend) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			DHD_PERIM_UNLOCK(&dhd->pub);
+			schedule_timeout(timeout);
+			DHD_PERIM_LOCK(&dhd->pub);
+			set_current_state(TASK_RUNNING);
+			ntimes--;
+		}
+		pend = dhd_get_pend_8021x_cnt(dhd);
+	}
+	if (ntimes == 0)
+	{
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+		DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
+	}
+	return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+	int ret = 0;
+	struct file *fp;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* open file to write */
+	fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+	if (!fp) {
+		printf("%s: open file error\n", __FUNCTION__);
+		ret = -1;
+		goto exit;
+	}
+
+	/* Write buf to file */
+	fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+	/* free buf before return */
+	MFREE(dhd->osh, buf, size);
+	/* close file before return */
+	if (fp)
+		filp_close(fp, current->files);
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+			dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (dhd->wakelock_rx_timeout_enable)
+			wake_lock_timeout(&dhd->wl_rxwake,
+				msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+		if (dhd->wakelock_ctrl_timeout_enable)
+			wake_lock_timeout(&dhd->wl_ctrlwake,
+				msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+#endif
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_rx_timeout_enable)
+			dhd->wakelock_rx_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_ctrl_timeout_enable)
+			dhd->wakelock_ctrl_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (wake_lock_active(&dhd->wl_ctrlwake))
+			wake_unlock(&dhd->wl_ctrlwake);
+#endif
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+			dhd_bus_dev_pm_stay_awake(pub);
+#endif
+		}
+		dhd->wakelock_counter++;
+		ret = dhd->wakelock_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	dhd_os_wake_lock_timeout(pub);
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter > 0) {
+			dhd->wakelock_counter--;
+			if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+				wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+				dhd_bus_dev_pm_relax(pub);
+#endif
+			}
+			ret = dhd->wakelock_counter;
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_check_wakelock(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+	KERNEL_VERSION(2, 6, 36)))
+	dhd_info_t *dhd;
+
+	if (!pub)
+		return 0;
+	dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+	if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+		(wake_lock_active(&dhd->wl_wdwake))))
+		return 1;
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+		return 1;
+#endif
+	return 0;
+}
+
+int dhd_os_check_wakelock_all(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || \
+	(defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)))
+	dhd_info_t *dhd;
+
+	if (!pub)
+		return 0;
+	dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+	if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+		wake_lock_active(&dhd->wl_wdwake) ||
+		wake_lock_active(&dhd->wl_rxwake) ||
+		wake_lock_active(&dhd->wl_ctrlwake))) {
+		return 1;
+	}
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+		return 1;
+#endif
+	return 0;
+}
+
+int net_os_wake_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_unlock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+		/* if wakelock_wd_counter was never used : lock it at once */
+		if (!dhd->wakelock_wd_counter)
+			wake_lock(&dhd->wl_wdwake);
+#endif
+		dhd->wakelock_wd_counter++;
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_wd_counter) {
+			dhd->wakelock_wd_counter = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_unlock(&dhd->wl_wdwake);
+#endif
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+		if (dhd->waive_wakelock == FALSE) {
+			/* record current lock status */
+			dhd->wakelock_before_waive = dhd->wakelock_counter;
+			dhd->waive_wakelock = TRUE;
+		}
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!dhd)
+		return 0;
+
+	spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+	/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+	if (!dhd->waive_wakelock)
+		goto exit;
+
+	dhd->waive_wakelock = FALSE;
+	/* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+	 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+	 * the lock in between, do the same by calling wake_unlock or pm_relax
+	 */
+	if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif
+	} else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
+	}
+	dhd->wakelock_before_waive = 0;
+exit:
+	ret = dhd->wakelock_wd_counter;
+	spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	return ret;
+}
+
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+	if (!pub)
+		return FALSE;
+	return pub->up;
+}
+
+int dhd_os_get_wake_irq(dhd_pub_t *pub)
+{
+	if (!pub)
+		return -1;
+	return wifi_platform_get_wake_irq(pub->info->adapter);
+}
+
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
+{
+	int i;
+
+	i = snprintf(info_string, sizeof(info_string),
+		"  Driver: %s\n  Firmware: %s ", EPI_VERSION_STR, fw);
+
+	if (!dhdp)
+		return;
+
+	i = snprintf(&info_string[i], sizeof(info_string) - i,
+		"\n  Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
+		dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
+}
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+	int ifidx;
+	int ret = 0;
+	dhd_info_t *dhd = NULL;
+
+	if (!net || !DEV_PRIV(net)) {
+		DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd = DHD_DEV_INFO(net);
+	if (!dhd)
+		return -EINVAL;
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_PERIM_LOCK(&dhd->pub);
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+	dhd_check_hang(net, &dhd->pub, ret);
+
+	DHD_PERIM_UNLOCK(&dhd->pub);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+	struct net_device *net;
+
+	net = dhd_idx2net(dhdp, ifidx);
+	if (!net) {
+		DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+		return -EINVAL;
+	}
+
+	return dhd_check_hang(net, dhdp, ret);
+}
+
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
+{
+	return dhdp->info->unit;
+}
+
+void dhd_set_short_dwell_time(dhd_pub_t *dhd, int set)
+{
+	int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+	int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+	int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+
+	DHD_TRACE(("%s: Enter: %d\n", __FUNCTION__, set));
+	if (dhd->short_dwell_time != set) {
+		if (set) {
+			scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME_PS;
+		}
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME,
+				(char *)&scan_unassoc_time,
+				sizeof(scan_unassoc_time), TRUE, 0);
+		if (dhd->short_dwell_time == -1) {
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME,
+					(char *)&scan_assoc_time,
+					sizeof(scan_assoc_time), TRUE, 0);
+			dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME,
+					(char *)&scan_passive_time,
+					sizeof(scan_passive_time), TRUE, 0);
+		}
+		dhd->short_dwell_time = set;
+	}
+}
+
+#ifdef CUSTOM_SET_SHORT_DWELL_TIME
+void net_set_short_dwell_time(struct net_device *dev, int set)
+{
+	dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+	dhd_set_short_dwell_time(&dhd->pub, set);
+}
+#endif
+
+#ifdef PROP_TXSTATUS
+
+void dhd_wlfc_plat_init(void *dhd)
+{
+	return;
+}
+
+void dhd_wlfc_plat_deinit(void *dhd)
+{
+	return;
+}
+
+bool dhd_wlfc_skip_fc(void)
+{
+	return FALSE;
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+	struct dentry	*debugfs_dir;
+	struct dentry	*debugfs_mem;
+	dhd_pub_t 	*dhdp;
+	uint32 		size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+                       size_t count, loff_t *ppos)
+{
+	ssize_t rval;
+	uint32 tmp;
+	loff_t pos = *ppos;
+	size_t ret;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+	ret = copy_to_user(ubuf, &tmp, 4);
+	if (ret == count)
+		return -EFAULT;
+
+	count -= ret;
+	*ppos = pos + count;
+	rval = count;
+
+	return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	size_t ret;
+	uint32 buf;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+	if (ret == count)
+		return -EFAULT;
+
+	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+	return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	loff_t pos = -1;
+
+	switch (whence) {
+		case 0:
+			pos = off;
+			break;
+		case 1:
+			pos = file->f_pos + off;
+			break;
+		case 2:
+			pos = g_dbgfs.size - off;
+	}
+	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+	.read   = dhd_dbg_state_read,
+	.write	= dhd_debugfs_write,
+	.open   = dhd_dbg_state_open,
+	.llseek	= dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+	if (g_dbgfs.debugfs_dir) {
+		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+			NULL, &dhd_dbg_state_ops);
+	}
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+	int err;
+
+	g_dbgfs.dhdp = dhdp;
+	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+	if (IS_ERR(g_dbgfs.debugfs_dir)) {
+		err = PTR_ERR(g_dbgfs.debugfs_dir);
+		g_dbgfs.debugfs_dir = NULL;
+		return;
+	}
+
+	dhd_dbg_create();
+
+	return;
+}
+
+void dhd_dbg_remove(void)
+{
+	debugfs_remove(g_dbgfs.debugfs_mem);
+	debugfs_remove(g_dbgfs.debugfs_dir);
+
+	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct sk_buff *skb;
+	uint32 htsf = 0;
+	uint16 dport = 0, oldmagic = 0xACAC;
+	char *p1;
+	htsfts_t ts;
+
+	/*  timestamp packet  */
+
+	p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/*		memcpy(&proto, p1+26, 4);  	*/
+		memcpy(&dport, p1+40, 2);
+/* 	proto = ((ntoh32(proto))>> 16) & 0xFF;  */
+		dport = ntoh16(dport);
+	}
+
+	/* timestamp only if  icmp or udb iperf with port 5555 */
+/*	if (proto == 17 && dport == tsport) { */
+	if (dport >= tsport && dport <= tsport + 20) {
+
+		skb = (struct sk_buff *) pktbuf;
+
+		htsf = dhd_get_htsf(dhd, 0);
+		memset(skb->data + 44, 0, 2); /* clear checksum */
+		memcpy(skb->data+82, &oldmagic, 2);
+		memcpy(skb->data+84, &htsf, 4);
+
+		memset(&ts, 0, sizeof(htsfts_t));
+		ts.magic  = HTSFMAGIC;
+		ts.prio   = PKTPRIO(pktbuf);
+		ts.seqnum = htsf_seqnum++;
+		ts.c10    = get_cycles();
+		ts.t10    = htsf;
+		ts.endmagic = HTSFENDMAGIC;
+
+		memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+	}
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+	int pktcnt = 0, curval = 0, i;
+	for (i = 0; i < (NUMBIN-2); i++) {
+		curval += 500;
+		printf("%d ",  his->bin[i]);
+		pktcnt += his->bin[i];
+	}
+	printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+		his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+	int i, binval = 0;
+
+	if (value < 0) {
+		histo->bin[NUMBIN-1]++;
+		return;
+	}
+	if (value > histo->bin[NUMBIN-2])  /* store the max value  */
+		histo->bin[NUMBIN-2] = value;
+
+	for (i = 0; i < (NUMBIN-2); i++) {
+		binval += 500; /* 500m s bins */
+		if (value <= binval) {
+			histo->bin[i]++;
+			return;
+		}
+	}
+	histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	char *p1;
+	uint16 old_magic;
+	int d1, d2, d3, end2end;
+	htsfts_t *htsf_ts;
+	uint32 htsf;
+
+	skb = PKTTONATIVE(dhdp->osh, pktbuf);
+	p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+		memcpy(&old_magic, p1+78, 2);
+		htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+	}
+	else
+		return;
+
+	if (htsf_ts->magic == HTSFMAGIC) {
+		htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+		htsf_ts->cE0 = get_cycles();
+	}
+
+	if (old_magic == 0xACAC) {
+
+		tspktcnt++;
+		htsf = dhd_get_htsf(dhd, 0);
+		memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+		memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+		d1 = ts[tsidx].t2 - ts[tsidx].t1;
+		d2 = ts[tsidx].t3 - ts[tsidx].t2;
+		d3 = ts[tsidx].t4 - ts[tsidx].t3;
+		end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+		sorttobin(d1, &vi_d1);
+		sorttobin(d2, &vi_d2);
+		sorttobin(d3, &vi_d3);
+		sorttobin(end2end, &vi_d4);
+
+		if (end2end > 0 && end2end >  maxdelay) {
+			maxdelay = end2end;
+			maxdelaypktno = tspktcnt;
+			memcpy(&maxdelayts, &ts[tsidx], 16);
+		}
+		if (++tsidx >= TSMAX)
+			tsidx = 0;
+	}
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+	uint32 htsf = 0, cur_cycle, delta, delta_us;
+	uint32    factor, baseval, baseval2;
+	cycles_t t;
+
+	t = get_cycles();
+	cur_cycle = t;
+
+	if (cur_cycle >  dhd->htsf.last_cycle)
+		delta = cur_cycle -  dhd->htsf.last_cycle;
+	else {
+		delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
+	}
+
+	delta = delta >> 4;
+
+	if (dhd->htsf.coef) {
+		/* times ten to get the first digit */
+	        factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+		baseval  = (delta*10)/factor;
+		baseval2 = (delta*10)/(factor+1);
+		delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+		htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+	}
+	else {
+		DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+	}
+
+	return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+	int i, max = 0;
+	int d1, d2, d3, d4, d5;
+
+	printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
+	for (i = 0; i < TSMAX; i++) {
+		d1 = ts[i].t2 - ts[i].t1;
+		d2 = ts[i].t3 - ts[i].t2;
+		d3 = ts[i].t4 - ts[i].t3;
+		d4 = ts[i].t4 - ts[i].t1;
+		d5 = ts[max].t4-ts[max].t1;
+		if (d4 > d5 && d4 > 0)  {
+			max = i;
+		}
+		printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
+			ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+			d1, d2, d3, d4, i);
+	}
+
+	printf("current idx = %d \n", tsidx);
+
+	printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+	printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
+	maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+	maxdelayts.t2 - maxdelayts.t1,
+	maxdelayts.t3 - maxdelayts.t2,
+	maxdelayts.t4 - maxdelayts.t3,
+	maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+	uint32 s1, s2;
+
+	struct tsf {
+		uint32 low;
+		uint32 high;
+	} tsf_buf;
+
+	memset(&ioc, 0, sizeof(ioc));
+	memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strncpy(buf, "tsf", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	s1 = dhd_get_htsf(dhd, 0);
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: tsf is not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+		return ret;
+	}
+	s2 = dhd_get_htsf(dhd, 0);
+
+	memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+	printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+		tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+		dhd->htsf.coefdec2, s2-tsf_buf.low);
+	printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+	return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+	static ulong  cur_cycle = 0, prev_cycle = 0;
+	uint32 htsf, tsf_delta = 0;
+	uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+	ulong b, a;
+	cycles_t t;
+
+	/* cycles_t in inlcude/mips/timex.h */
+
+	t = get_cycles();
+
+	prev_cycle = cur_cycle;
+	cur_cycle = t;
+
+	if (cur_cycle > prev_cycle)
+		cyc_delta = cur_cycle - prev_cycle;
+	else {
+		b = cur_cycle;
+		a = prev_cycle;
+		cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+	}
+
+	if (data == NULL)
+		printf(" tsf update ata point er is null \n");
+
+	memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+	memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+	if (cur_tsf.low == 0) {
+		DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+		return;
+	}
+
+	if (cur_tsf.low > prev_tsf.low)
+		tsf_delta = (cur_tsf.low - prev_tsf.low);
+	else {
+		DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+		 cur_tsf.low, prev_tsf.low));
+		if (cur_tsf.high > prev_tsf.high) {
+			tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+			DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
+		}
+		else
+			return; /* do not update */
+	}
+
+	if (tsf_delta)  {
+		hfactor = cyc_delta / tsf_delta;
+		tmp  = 	(cyc_delta - (hfactor * tsf_delta))*10;
+		dec1 =  tmp/tsf_delta;
+		dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+		tmp  = 	(tmp   - (dec1*tsf_delta))*10;
+		dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+		if (dec3 > 4) {
+			if (dec2 == 9) {
+				dec2 = 0;
+				if (dec1 == 9) {
+					dec1 = 0;
+					hfactor++;
+				}
+				else {
+					dec1++;
+				}
+			}
+			else
+				dec2++;
+		}
+	}
+
+	if (hfactor) {
+		htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
+		dhd->htsf.coef = hfactor;
+		dhd->htsf.last_cycle = cur_cycle;
+		dhd->htsf.last_tsf = cur_tsf.low;
+		dhd->htsf.coefdec1 = dec1;
+		dhd->htsf.coefdec2 = dec2;
+	}
+	else {
+		htsf = prev_tsf.low;
+	}
+}
+
+#endif /* WLMEDIA_HTSF */
+
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+	int e_dpc = 0, e_rxf = 0, retry_set = 0;
+
+	if (!(dhd->chan_isvht80)) {
+		DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+		return;
+	}
+
+	if (DPC_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(DPC_CPUCORE));
+			} else {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+				return;
+			}
+			if (e_dpc < 0)
+				OSL_SLEEP(1);
+		} while (e_dpc < 0);
+	}
+	if (RXF_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(RXF_CPUCORE));
+			} else {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+				return;
+			}
+			if (e_rxf < 0)
+				OSL_SLEEP(1);
+		} while (e_rxf < 0);
+	}
+#ifdef DHD_OF_SUPPORT
+	interrupt_set_cpucore(set);
+#endif /* DHD_OF_SUPPORT */
+	DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+	return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
+
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	return ifp->ap_isolate;
+}
+
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+
+	ifp->ap_isolate = val;
+
+	return 0;
+}
+
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+{
+	dhd_info_t *dhd = dhdp->info;
+	dhd_if_t *ifp;
+
+	ASSERT(idx < DHD_MAX_IFS);
+
+	ifp = dhd->iflist[idx];
+	return &ifp->wmf;
+}
+#endif /* DHD_WMF */
+
+
+#ifdef DHD_UNICAST_DHCP
+static int
+dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
+	uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
+{
+	uint8 *frame = PKTDATA(pub->osh, pktbuf);
+	int length = PKTLEN(pub->osh, pktbuf);
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	bool snap = FALSE;
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
+		           __FUNCTION__, length));
+		return BCME_ERROR;
+	} else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+		snap = TRUE;
+	} else {
+		DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
+		           __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	ethertype = ntoh16_ua(pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
+			DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
+			          __FUNCTION__, length));
+			return BCME_ERROR;
+		}
+
+		ethertype = ntoh16_ua(pt);
+	}
+
+	*data_ptr = pt + ETHER_TYPE_LEN;
+	*len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
+	*et_ptr = ethertype;
+	*snap_ptr = snap;
+	return BCME_OK;
+}
+
+static int
+dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
+	uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
+{
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int iplen;			/* IP frame length */
+	uint16 ethertype, iphdrlen, ippktlen;
+	uint16 iph_frag;
+	uint8 prot;
+	bool snap;
+
+	if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
+	    &iplen, &ethertype, &snap) != 0)
+		return BCME_ERROR;
+
+	if (ethertype != ETHER_TYPE_IP) {
+		return BCME_ERROR;
+	}
+
+	/* We support IPv4 only */
+	if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
+		return BCME_ERROR;
+	}
+
+	/* Header length sanity */
+	iphdrlen = IPV4_HLEN(iph);
+
+	/*
+	 * Packet length sanity; sometimes we receive eth-frame size bigger
+	 * than the IP content, which results in a bad tcp chksum
+	 */
+	ippktlen = ntoh16(iph->tot_len);
+	if (ippktlen < iplen) {
+
+		DHD_INFO(("%s: extra frame length ignored\n",
+		          __FUNCTION__));
+		iplen = ippktlen;
+	} else if (ippktlen > iplen) {
+		DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
+		           __FUNCTION__, ippktlen - iplen));
+		return BCME_ERROR;
+	}
+
+	if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
+		DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
+		           __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
+		return BCME_ERROR;
+	}
+
+	/*
+	 * We don't handle fragmented IP packets.  A first frag is indicated by the MF
+	 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
+	 */
+	iph_frag = ntoh16(iph->frag);
+
+	if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
+		DHD_INFO(("DHD:%s: IP fragment not handled\n",
+		           __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	prot = IPV4_PROT(iph);
+
+	*data_ptr = (((uint8 *)iph) + iphdrlen);
+	*len_ptr = iplen - iphdrlen;
+	*prot_ptr = prot;
+	return BCME_OK;
+}
+
+/** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet	*/
+static
+int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
+{
+	dhd_sta_t* stainfo;
+	uint8 *eh = PKTDATA(pub->osh, pktbuf);
+	uint8 *udph;
+	uint8 *dhcp;
+	uint8 *chaddr;
+	int udpl;
+	int dhcpl;
+	uint16 port;
+	uint8 prot;
+
+	if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
+	    return BCME_ERROR;
+	if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
+		return BCME_ERROR;
+	if (prot != IP_PROT_UDP)
+		return BCME_ERROR;
+	/* check frame length, at least UDP_HDR_LEN */
+	if (udpl < UDP_HDR_LEN) {
+		DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
+		    __FUNCTION__));
+		return BCME_ERROR;
+	}
+	port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
+	/* only process DHCP packets from server to client */
+	if (port != DHCP_PORT_CLIENT)
+		return BCME_ERROR;
+
+	dhcp = udph + UDP_HDR_LEN;
+	dhcpl = udpl - UDP_HDR_LEN;
+
+	if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
+		DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
+		    __FUNCTION__));
+		return BCME_ERROR;
+	}
+	/* only process DHCP reply(offer/ack) packets */
+	if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+		return BCME_ERROR;
+	chaddr = dhcp + DHCP_CHADDR_OFFSET;
+	stainfo = dhd_find_sta(pub, ifidx, chaddr);
+	if (stainfo) {
+		bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+		return BCME_OK;
+	}
+	return BCME_ERROR;
+}
+#endif /* DHD_UNICAST_DHD */
+#ifdef DHD_L2_FILTER
+/* Check if packet type is ICMP ECHO */
+static
+int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
+{
+	struct bcmicmp_hdr *icmph;
+	int udpl;
+	uint8 prot;
+
+	if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
+		return BCME_ERROR;
+	if (prot == IP_PROT_ICMP) {
+		if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
+			return BCME_OK;
+	}
+	return BCME_ERROR;
+}
+#endif /* DHD_L2_FILTER */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.h b/drivers/net/wireless/bcmdhd/dhd_linux.h
new file mode 100644
index 0000000..d4b6045
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.h
@@ -0,0 +1,117 @@
+/*
+ * DHD Linux header file (dhd_linux exports for cfg80211 and other components)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.h 399301 2013-04-29 21:41:52Z $
+ */
+
+/* wifi platform functions for power, interrupt and pre-alloc, either
+ * from Android-like platform device data, or Broadcom wifi platform
+ * device data.
+ *
+ */
+#ifndef __DHD_LINUX_H__
+#define __DHD_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/wlan_plat.h>
+#endif
+
+#if !defined(CONFIG_WIFI_CONTROL_FUNC)
+#define WLAN_PLAT_NODFS_FLAG	0x01
+struct wifi_platform_data {
+	int (*set_power)(int val);
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+	int (*get_wake_irq)(void);
+	void *(*get_country_code)(char *ccode, u32 flags);
+};
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+#define DHD_REGISTRATION_TIMEOUT  12000  /* msec : allowed time to finished dhd registration */
+
+typedef struct wifi_adapter_info {
+	const char	*name;
+	uint		irq_num;
+	uint		intr_flags;
+	const char	*fw_path;
+	const char	*nv_path;
+	void		*wifi_plat_data;	/* wifi ctrl func, for backward compatibility */
+	uint		bus_type;
+	uint		bus_num;
+	uint		slot_num;
+} wifi_adapter_info_t;
+
+typedef struct bcmdhd_wifi_platdata {
+	uint				num_adapters;
+	wifi_adapter_info_t	*adapters;
+} bcmdhd_wifi_platdata_t;
+
+/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
+typedef struct dhd_sta {
+	uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */
+	void * ifp;             /* associated dhd_if */
+	struct ether_addr ea;   /* stations ethernet mac address */
+	struct list_head list;  /* link into dhd_if::sta_list */
+	int idx;                /* index of self in dhd_pub::sta_pool[] */
+	int ifidx;              /* index of interface in dhd */
+} dhd_sta_t;
+typedef dhd_sta_t dhd_sta_pool_t;
+
+int dhd_wifi_platform_register_drv(void);
+void dhd_wifi_platform_unregister_drv(void);
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
+	uint32 slot_num);
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode,
+	u32 flags);
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
+int wifi_platform_get_wake_irq(wifi_adapter_info_t *adapter);
+
+int dhd_get_fw_mode(struct dhd_info *dhdinfo);
+bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+
+#ifdef DHD_WMF
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx);
+#endif /* DHD_WMF */
+#endif /* __DHD_LINUX_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
new file mode 100644
index 0000000..d5c76c6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c
@@ -0,0 +1,745 @@
+/*
+ * Linux platform device for DHD WLAN adapter
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_platdev.c 401742 2013-05-13 15:03:21Z $
+ */
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_linux.h>
+#include <wl_android.h>
+
+#define WIFI_PLAT_NAME		"wlan"
+#define WIFI_PLAT_NAME2		"bcm4329_wlan"
+#define WIFI_PLAT_EXT		"bcmdhd_wifi_platform"
+
+bool cfg_multichip = FALSE;
+bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL;
+static int wifi_plat_dev_probe_ret = 0;
+static bool is_power_on = FALSE;
+#ifdef DHD_OF_SUPPORT
+static bool dts_enabled = TRUE;
+extern struct resource dhd_wlan_resources;
+extern struct wifi_platform_data dhd_wlan_control;
+#else
+static bool dts_enabled = FALSE;
+struct resource dhd_wlan_resources = {0};
+extern int bcmsdh_sdmmc_set_power(int on);
+struct wifi_platform_data dhd_wlan_control = {
+	.set_power = bcmsdh_sdmmc_set_power,
+};
+#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+
+static int dhd_wifi_platform_load(void);
+
+extern void* wl_cfg80211_get_dhdp(void);
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num)
+{
+	int i;
+
+	if (dhd_wifi_platdata == NULL)
+		return NULL;
+
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+		if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+			(adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+			(adapter->slot_num == -1 || adapter->slot_num == slot_num)) {
+			DHD_TRACE(("found adapter info '%s'\n", adapter->name));
+			return adapter;
+		}
+	}
+	return NULL;
+}
+
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size)
+{
+	void *alloc_ptr = NULL;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->mem_prealloc) {
+		alloc_ptr = plat_data->mem_prealloc(section, size);
+		if (alloc_ptr) {
+			DHD_INFO(("success alloc section %d\n", section));
+			if (size != 0L)
+				bzero(alloc_ptr, size);
+			return alloc_ptr;
+		} else {
+			DHD_ERROR(("%s: failed to alloc static mem section %d\n",
+				__FUNCTION__, section));
+		}
+	}
+
+	return NULL;
+}
+
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter)
+{
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	return plat_data->mem_prealloc;
+}
+
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr)
+{
+	if (adapter == NULL)
+		return -1;
+	if (irq_flags_ptr)
+		*irq_flags_ptr = adapter->intr_flags;
+	return adapter->irq_num;
+}
+
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec)
+{
+	int err = 0;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+	if (plat_data->set_power) {
+#ifdef ENABLE_4335BT_WAR
+		if (on) {
+			printk("WiFi: trying to acquire BT lock\n");
+			if (bcm_bt_lock(lock_cookie_wifi) != 0)
+				printk("** WiFi: timeout in acquiring bt lock**\n");
+			printk("%s: btlock acquired\n", __FUNCTION__);
+		}
+		else {
+			/* For a exceptional case, release btlock */
+			bcm_bt_unlock(lock_cookie_wifi);
+		}
+#endif /* ENABLE_4335BT_WAR */
+
+		err = plat_data->set_power(on);
+	}
+
+	if (msec && !err)
+		OSL_SLEEP(msec);
+
+	if (on && !err)
+		is_power_on = TRUE;
+	else
+		is_power_on = FALSE;
+
+	return err;
+}
+
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present)
+{
+	int err = 0;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present));
+	if (plat_data->set_carddetect) {
+		err = plat_data->set_carddetect(device_present);
+	}
+	return err;
+
+}
+
+int wifi_platform_get_wake_irq(wifi_adapter_info_t *adapter)
+{
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -1;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->get_wake_irq)
+		return plat_data->get_wake_irq();
+	return -1;
+}
+
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
+{
+	struct wifi_platform_data *plat_data;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+	if (!buf || !adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->get_mac_addr) {
+		return plat_data->get_mac_addr(buf);
+	}
+	return -EOPNOTSUPP;
+}
+
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode,
+				     u32 flags)
+{
+	/* get_country_code was added after 2.6.39 */
+#if	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+	struct wifi_platform_data *plat_data;
+
+	if (!ccode || !adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+	if (plat_data->get_country_code) {
+		return plat_data->get_country_code(ccode, flags);
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+	return NULL;
+}
+
+static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	wifi_adapter_info_t *adapter;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
+	if (!adapter->wifi_plat_data) {
+		DHD_ERROR(("%s: unable to get platform data !\n", __FUNCTION__));
+		return -ENODATA;
+	}
+	((struct wifi_platform_data *)(adapter->wifi_plat_data))->set_power = bcmsdh_sdmmc_set_power;
+
+	resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource == NULL) {
+		DHD_ERROR(("%s: unable to get IORESOURCE_IRQ !\n", __FUNCTION__));
+		return -ENODATA;
+	}
+
+	adapter->irq_num = resource->start;
+	adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+
+	wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	return wifi_plat_dev_probe_ret;
+}
+
+static int wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	wifi_adapter_info_t *adapter;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	if (is_power_on) {
+#ifdef BCMPCIE
+		wifi_platform_bus_enumerate(adapter, FALSE);
+		OSL_SLEEP(100);
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+#else
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+#endif /* BCMPCIE */
+	}
+
+	return 0;
+}
+
+static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+	defined(BCMSDIO)
+	bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static int wifi_plat_dev_drv_resume(struct platform_device *pdev)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+	defined(BCMSDIO)
+	if (dhd_os_check_if_up(wl_cfg80211_get_dhdp()))
+		bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static struct platform_driver wifi_platform_dev_driver = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name   = WIFI_PLAT_NAME,
+	}
+};
+
+static struct platform_driver wifi_platform_dev_driver_legacy = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name	= WIFI_PLAT_NAME2,
+	}
+};
+
+static int wifi_platdev_match(struct device *dev, void *data)
+{
+	char *name = (char*)data;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (strcmp(pdev->name, name) == 0) {
+		DHD_ERROR(("found wifi platform device %s\n", name));
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+static int wifi_ctrlfunc_register_drv(void)
+{
+	int err = 0;
+	struct device *dev1, *dev2;
+	wifi_adapter_info_t *adapter;
+
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+	if (!dts_enabled) {
+		if (dev1 == NULL && dev2 == NULL) {
+			DHD_ERROR(("no wifi platform data, skip\n"));
+			return -ENXIO;
+		}
+	}
+
+	/* multi-chip support not enabled, build one adapter information for
+	 * DHD (either SDIO, USB or PCIe)
+	 */
+	adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+	adapter->name = "DHD generic adapter";
+	adapter->bus_type = -1;
+	adapter->bus_num = -1;
+	adapter->slot_num = -1;
+	adapter->irq_num = -1;
+	is_power_on = FALSE;
+	wifi_plat_dev_probe_ret = 0;
+	dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL);
+	dhd_wifi_platdata->num_adapters = 1;
+	dhd_wifi_platdata->adapters = adapter;
+
+	if (dev1) {
+		err = platform_driver_register(&wifi_platform_dev_driver);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+	if (dev2) {
+		err = platform_driver_register(&wifi_platform_dev_driver_legacy);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+
+	if (dts_enabled) {
+		struct resource *resource;
+		adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+		resource = &dhd_wlan_resources;
+		adapter->irq_num = resource->start;
+		adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+		wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	}
+
+	/* return probe function's return value if registeration succeeded */
+	return wifi_plat_dev_probe_ret;
+}
+
+void wifi_ctrlfunc_unregister_drv(void)
+{
+	struct device *dev1, *dev2;
+
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+	if (!dts_enabled)
+		if (dev1 == NULL && dev2 == NULL)
+			return;
+
+	DHD_ERROR(("unregister wifi platform drivers\n"));
+	if (dev1)
+		platform_driver_unregister(&wifi_platform_dev_driver);
+	if (dev2)
+		platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+	if (dts_enabled) {
+		wifi_adapter_info_t *adapter;
+		adapter = &dhd_wifi_platdata->adapters[0];
+		if (is_power_on) {
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		}
+	}
+	kfree(dhd_wifi_platdata->adapters);
+	dhd_wifi_platdata->adapters = NULL;
+	dhd_wifi_platdata->num_adapters = 0;
+	kfree(dhd_wifi_platdata);
+	dhd_wifi_platdata = NULL;
+}
+
+static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
+
+	return dhd_wifi_platform_load();
+}
+
+static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	int i;
+	wifi_adapter_info_t *adapter;
+	ASSERT(dhd_wifi_platdata != NULL);
+
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+	return 0;
+}
+
+static struct platform_driver dhd_wifi_platform_dev_driver = {
+	.probe          = bcmdhd_wifi_plat_dev_drv_probe,
+	.remove         = bcmdhd_wifi_plat_dev_drv_remove,
+	.driver         = {
+	.name   = WIFI_PLAT_EXT,
+	}
+};
+
+int dhd_wifi_platform_register_drv(void)
+{
+	int err = 0;
+	struct device *dev;
+
+	/* register Broadcom wifi platform data driver if multi-chip is enabled,
+	 * otherwise use Android style wifi platform data (aka wifi control function)
+	 * if it exists
+	 *
+	 * to support multi-chip DHD, Broadcom wifi platform data device must
+	 * be added in kernel early boot (e.g. board config file).
+	 */
+	if (cfg_multichip) {
+		dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match);
+		if (dev == NULL) {
+			DHD_ERROR(("bcmdhd wifi platform data device not found!!\n"));
+			return -ENXIO;
+		}
+		err = platform_driver_register(&dhd_wifi_platform_dev_driver);
+	} else {
+		err = wifi_ctrlfunc_register_drv();
+
+		/* no wifi ctrl func either, load bus directly and ignore this error */
+		if (err) {
+			if (err == -ENXIO) {
+				/* wifi ctrl function does not exist */
+				err = dhd_wifi_platform_load();
+			} else {
+				/* unregister driver due to initialization failure */
+				wifi_ctrlfunc_unregister_drv();
+			}
+		}
+	}
+
+	return err;
+}
+
+#ifdef BCMPCIE
+static int dhd_wifi_platform_load_pcie(void)
+{
+	int err = 0;
+	int i;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+
+	if (dhd_wifi_platdata == NULL) {
+		err = dhd_bus_register();
+	} else {
+		if (dhd_download_fw_on_driverload) {
+			/* power up all adapters */
+			for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+				int retry = POWERUP_MAX_RETRY;
+				adapter = &dhd_wifi_platdata->adapters[i];
+
+				DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+				DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+					adapter->irq_num, adapter->intr_flags, adapter->fw_path,
+					adapter->nv_path));
+				DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+					adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+				do {
+					err = wifi_platform_set_power(adapter,
+						TRUE, WIFI_TURNON_DELAY);
+					if (err) {
+						DHD_ERROR(("failed to power up %s,"
+							" %d retry left\n",
+							adapter->name, retry));
+						/* WL_REG_ON state unknown, Power off forcely */
+						wifi_platform_set_power(adapter,
+							FALSE, WIFI_TURNOFF_DELAY);
+						continue;
+					} else {
+						err = wifi_platform_bus_enumerate(adapter, TRUE);
+						if (err) {
+							DHD_ERROR(("failed to enumerate bus %s, "
+								"%d retry left\n",
+								adapter->name, retry));
+							wifi_platform_set_power(adapter, FALSE,
+								WIFI_TURNOFF_DELAY);
+						} else {
+							break;
+						}
+					}
+				} while (retry--);
+
+				if (!retry) {
+					DHD_ERROR(("failed to power up %s, max retry reached**\n",
+						adapter->name));
+					return -ENODEV;
+				}
+			}
+		}
+
+		err = dhd_bus_register();
+
+		if (err) {
+			DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__));
+			if (dhd_download_fw_on_driverload) {
+				/* power down all adapters */
+				for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+					adapter = &dhd_wifi_platdata->adapters[i];
+					wifi_platform_bus_enumerate(adapter, FALSE);
+					wifi_platform_set_power(adapter,
+						FALSE, WIFI_TURNOFF_DELAY);
+				}
+			}
+		}
+	}
+
+	return err;
+}
+#else
+static int dhd_wifi_platform_load_pcie(void)
+{
+	return 0;
+}
+#endif /* BCMPCIE  */
+
+
+void dhd_wifi_platform_unregister_drv(void)
+{
+	if (cfg_multichip)
+		platform_driver_unregister(&dhd_wifi_platform_dev_driver);
+	else
+		wifi_ctrlfunc_unregister_drv();
+}
+
+extern int dhd_watchdog_prio;
+extern int dhd_dpc_prio;
+extern uint dhd_deferred_tx;
+#if defined(BCMLXSDMMC)
+extern struct semaphore dhd_registration_sem;
+#endif
+
+#ifdef BCMSDIO
+static int dhd_wifi_platform_load_sdio(void)
+{
+	int i;
+	int err = 0;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+	/* Sanity check on the module parameters
+	 * - Both watchdog and DPC as tasklets are ok
+	 * - If both watchdog and DPC are threads, TX must be deferred
+	 */
+	if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) &&
+		!(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx))
+		return -EINVAL;
+
+#if defined(BCMLXSDMMC)
+	if (dhd_wifi_platdata == NULL) {
+		DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
+		return -EINVAL;
+	}
+
+	sema_init(&dhd_registration_sem, 0);
+	/* power up all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		bool chip_up = FALSE;
+		int retry = POWERUP_MAX_RETRY;
+		struct semaphore dhd_chipup_sem;
+
+		adapter = &dhd_wifi_platdata->adapters[i];
+
+		DHD_INFO(("Power-up adapter '%s'\n", adapter->name));
+		DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+			adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+		DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+			adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+		do {
+			sema_init(&dhd_chipup_sem, 0);
+			err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+			if (err) {
+				DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+					__FUNCTION__, err));
+				return err;
+			}
+			err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+			if (err) {
+				/* WL_REG_ON state unknown, Power off forcely */
+				wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+				continue;
+			} else {
+				wifi_platform_bus_enumerate(adapter, TRUE);
+				err = 0;
+			}
+
+			if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+				dhd_bus_unreg_sdio_notify();
+				chip_up = TRUE;
+				break;
+			}
+
+			DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry));
+			dhd_bus_unreg_sdio_notify();
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		} while (retry--);
+
+		if (!chip_up) {
+			DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name));
+			return -ENODEV;
+		}
+
+	}
+
+	err = dhd_bus_register();
+
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+	/*
+	 * Wait till MMC sdio_register_driver callback called and made driver attach.
+	 * It's needed to make sync up exit from dhd insmod  and
+	 * Kernel MMC sdio device callback registration
+	 */
+	err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
+		dhd_bus_unregister();
+		goto fail;
+	}
+
+	return err;
+
+fail:
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+#else
+
+	/* x86 bring-up PC needs no power-up operations */
+	err = dhd_bus_register();
+
+#endif
+
+	return err;
+}
+#else /* BCMSDIO */
+static int dhd_wifi_platform_load_sdio(void)
+{
+	return 0;
+}
+#endif /* BCMSDIO */
+
+static int dhd_wifi_platform_load_usb(void)
+{
+	return 0;
+}
+
+static int dhd_wifi_platform_load()
+{
+	int err = 0;
+
+		wl_android_init();
+
+	if ((err = dhd_wifi_platform_load_usb()))
+		goto end;
+	else if ((err = dhd_wifi_platform_load_sdio()))
+		goto end;
+	else
+		err = dhd_wifi_platform_load_pcie();
+
+end:
+	if (err)
+		wl_android_exit();
+	else
+		wl_android_post_init();
+
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 0000000..8fc4ff5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,48 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c 457570 2014-02-23 13:54:46Z $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+	int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+	return rc;
+}
+
+int get_scheduler_policy(struct task_struct *p)
+{
+	int rc = SCHED_NORMAL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = p->policy;
+#endif /* LinuxVer */
+	return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
new file mode 100644
index 0000000..2d01570
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c
@@ -0,0 +1,317 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_wq.c 449578 2014-01-17 13:53:20Z $
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/kfifo.h>
+
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_wq.h>
+
+struct dhd_deferred_event_t {
+	u8	event; /* holds the event */
+	void	*event_data; /* Holds event specific data */
+	event_handler_t event_handler;
+};
+#define DEFRD_EVT_SIZE	sizeof(struct dhd_deferred_event_t)
+
+struct dhd_deferred_wq {
+	struct work_struct	deferred_work; /* should be the first member */
+
+	/*
+	 * work events may occur simultaneously.
+	 * Can hold upto 64 low priority events and 4 high priority events
+	 */
+#define DHD_PRIO_WORK_FIFO_SIZE	(4 * sizeof(struct dhd_deferred_event_t))
+#define DHD_WORK_FIFO_SIZE	(64 * sizeof(struct dhd_deferred_event_t))
+	struct kfifo			*prio_fifo;
+	struct kfifo			*work_fifo;
+	u8				*prio_fifo_buf;
+	u8				*work_fifo_buf;
+	spinlock_t			work_lock;
+	void				*dhd_info; /* review: does it require */
+};
+
+static inline struct kfifo*
+dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
+{
+	struct kfifo *fifo;
+	gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+	fifo = kfifo_init(buf, size, flags, lock);
+#else
+	fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
+	if (!fifo) {
+		return NULL;
+	}
+	kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+	return fifo;
+}
+
+static inline void
+dhd_kfifo_free(struct kfifo *fifo)
+{
+	kfifo_free(fifo);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+	/* FC11 releases the fifo memory */
+	kfree(fifo);
+#endif
+}
+
+/* deferred work functions */
+static void dhd_deferred_work_handler(struct work_struct *data);
+
+void*
+dhd_deferred_work_init(void *dhd_info)
+{
+	struct dhd_deferred_wq	*work = NULL;
+	u8*	buf;
+	unsigned long	fifo_size = 0;
+	gfp_t	flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+	if (!dhd_info) {
+		DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
+		goto return_null;
+	}
+
+	work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
+		flags);
+
+	if (!work) {
+		DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
+
+	/* initialize event fifo */
+	spin_lock_init(&work->work_lock);
+
+	/* allocate buffer to hold prio events */
+	fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize prio event fifo */
+	work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->prio_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	/* allocate buffer to hold work events */
+	fifo_size = DHD_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize event fifo */
+	work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->work_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	work->dhd_info = dhd_info;
+	DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__));
+	return work;
+
+return_null:
+
+	if (work)
+		dhd_deferred_work_deinit(work);
+
+	return NULL;
+}
+
+void
+dhd_deferred_work_deinit(void *work)
+{
+	struct dhd_deferred_wq *deferred_work = work;
+
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__));
+		return;
+	}
+
+	/* cancel the deferred work handling */
+	cancel_work_sync((struct work_struct *)deferred_work);
+
+	/*
+	 * free work event fifo.
+	 * kfifo_free frees locally allocated fifo buffer
+	 */
+	if (deferred_work->prio_fifo)
+		dhd_kfifo_free(deferred_work->prio_fifo);
+
+	if (deferred_work->work_fifo)
+		dhd_kfifo_free(deferred_work->work_fifo);
+
+	kfree(deferred_work);
+}
+
+/*
+ *	Prepares event to be queued
+ *	Schedules the event
+ */
+int
+dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+	event_handler_t event_handler, u8 priority)
+{
+	struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq;
+	struct	dhd_deferred_event_t	deferred_event;
+	int	status;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+		ASSERT(0);
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
+		DHD_ERROR(("%s: Unknown event \n", __FUNCTION__));
+		return DHD_WQ_STS_UNKNOWN_EVENT;
+	}
+
+	/*
+	 * default element size is 1, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	deferred_event.event = event;
+	deferred_event.event_data = event_data;
+	deferred_event.event_handler = event_handler;
+
+	if (priority == DHD_WORK_PRIORITY_HIGH) {
+		status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	} else {
+		status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	if (!status) {
+		return DHD_WQ_STS_SCHED_FAILED;
+	}
+	schedule_work((struct work_struct *)deferred_wq);
+	return DHD_WQ_STS_OK;
+}
+
+static int
+dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event)
+{
+	int	status = 0;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	/*
+	 * default element size is 1 byte, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	/* first read  priorit event fifo */
+	status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event,
+		DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+
+	if (!status) {
+		/* priority fifo is empty. Now read low prio work fifo */
+		status = kfifo_out_spinlocked(deferred_wq->work_fifo, event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	return status;
+}
+
+/*
+ *	Called when work is scheduled
+ */
+static void
+dhd_deferred_work_handler(struct work_struct *work)
+{
+	struct dhd_deferred_wq		*deferred_work = (struct dhd_deferred_wq *)work;
+	struct dhd_deferred_event_t	work_event;
+	int				status;
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+		return;
+	}
+
+	do {
+		status = dhd_get_scheduled_work(deferred_work, &work_event);
+		DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status));
+		if (!status) {
+			DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status));
+			break;
+		}
+
+		if (work_event.event > DHD_MAX_WQ_EVENTS) {
+			DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event));
+			break;
+		}
+
+		if (work_event.event_handler) {
+			work_event.event_handler(deferred_work->dhd_info,
+				work_event.event_data, work_event.event);
+		} else {
+			DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event));
+		}
+	} while (1);
+	return;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
new file mode 100644
index 0000000..e8c3639
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h
@@ -0,0 +1,64 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_wq.h 449578 2014-01-17 13:53:20Z $
+ */
+#ifndef _dhd_linux_wq_h_
+#define _dhd_linux_wq_h_
+/*
+ *	Work event definitions
+ */
+enum _wq_event {
+	DHD_WQ_WORK_IF_ADD = 1,
+	DHD_WQ_WORK_IF_DEL,
+	DHD_WQ_WORK_SET_MAC,
+	DHD_WQ_WORK_SET_MCAST_LIST,
+	DHD_WQ_WORK_IPV6_NDO,
+	DHD_WQ_WORK_HANG_MSG,
+
+	DHD_MAX_WQ_EVENTS
+};
+
+/*
+ *	Work event priority
+ */
+#define DHD_WORK_PRIORITY_LOW	0
+#define DHD_WORK_PRIORITY_HIGH	1
+
+/*
+ *	Error definitions
+ */
+#define DHD_WQ_STS_OK			 0
+#define DHD_WQ_STS_FAILED		-1	/* General failure */
+#define DHD_WQ_STS_UNINITIALIZED	-2
+#define DHD_WQ_STS_SCHED_FAILED		-3
+#define DHD_WQ_STS_UNKNOWN_EVENT	-4
+
+typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
+
+void *dhd_deferred_work_init(void *dhd);
+void dhd_deferred_work_deinit(void *workq);
+int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+	event_handler_t evt_handler, u8 priority);
+#endif /* _dhd_linux_wq_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
new file mode 100644
index 0000000..e1948a2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c
@@ -0,0 +1,3847 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_msgbuf.c 474409 2014-05-01 04:27:15Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmmsgbuf.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#include <siutils.h>
+
+
+#include <dhd_flowring.h>
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <pcie_core.h>
+#include <bcmpcie.h>
+#include <dhd_pcie.h>
+#include <dhd_ip.h>
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define IOCTL_HDR_LEN	12
+
+#define DEFAULT_RX_BUFFERS_TO_POST	256
+#define RXBUFPOST_THRESHOLD			32
+#define RX_BUF_BURST				16
+
+#define DHD_STOP_QUEUE_THRESHOLD	200
+#define DHD_START_QUEUE_THRESHOLD	100
+
+#define MODX(x, n)	((x) & ((n) -1))
+#define align(x, n)	(MODX(x, n) ? ((x) - MODX(x, n) + (n)) : ((x) - MODX(x, n)))
+#define RX_DMA_OFFSET		8
+#define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
+
+#define DMA_D2H_SCRATCH_BUF_LEN	8
+#define DMA_ALIGN_LEN		4
+#define DMA_XFER_LEN_LIMIT	0x400000
+
+#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
+
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
+#define DHD_FLOWRING_MAX_EVENTBUF_POST			8
+#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
+
+#define DHD_PROT_FUNCS	22
+
+typedef struct dhd_mem_map {
+	void *va;
+	dmaaddr_t pa;
+	void *dmah;
+} dhd_mem_map_t;
+
+typedef struct dhd_dmaxfer {
+	dhd_mem_map_t	srcmem;
+	dhd_mem_map_t	destmem;
+	uint32		len;
+	uint32		srcdelay;
+	uint32		destdelay;
+} dhd_dmaxfer_t;
+
+#define TXP_FLUSH_NITEMS
+#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
+
+typedef struct msgbuf_ring {
+	bool		inited;
+	uint16		idx;
+	uchar		name[24];
+	dhd_mem_map_t	ring_base;
+#ifdef TXP_FLUSH_NITEMS
+	void*		start_addr;
+	uint16		pend_items_count;
+#endif /* TXP_FLUSH_NITEMS */
+	ring_mem_t	*ringmem;
+	ring_state_t	*ringstate;
+} msgbuf_ring_t;
+
+
+typedef struct dhd_prot {
+	osl_t *osh;		/* OSL handle */
+	uint32 reqid;
+	uint32 lastcmd;
+	uint32 pending;
+	uint16 rxbufpost;
+	uint16 max_rxbufpost;
+	uint16 max_eventbufpost;
+	uint16 max_ioctlrespbufpost;
+	uint16 cur_event_bufs_posted;
+	uint16 cur_ioctlresp_bufs_posted;
+	uint16 active_tx_count;
+	uint16 max_tx_count;
+	uint16 txp_threshold;
+	/* Ring info */
+	msgbuf_ring_t	*h2dring_txp_subn;
+	msgbuf_ring_t	*h2dring_rxp_subn;
+	msgbuf_ring_t	*h2dring_ctrl_subn;	/* Cbuf handle for H2D ctrl ring */
+	msgbuf_ring_t	*d2hring_tx_cpln;
+	msgbuf_ring_t	*d2hring_rx_cpln;
+	msgbuf_ring_t	*d2hring_ctrl_cpln;	/* Cbuf handle for D2H ctrl ring */
+	uint32		rx_dataoffset;
+	dhd_mem_map_t	retbuf;
+	dhd_mem_map_t	ioctbuf;	/* For holding ioct request buf */
+	dhd_mb_ring_t	mb_ring_fn;
+
+	uint32		d2h_dma_scratch_buf_len; /* For holding ioct request buf */
+	dhd_mem_map_t	d2h_dma_scratch_buf;	/* For holding ioct request buf */
+
+	uint32	h2d_dma_writeindx_buf_len; /* For holding dma ringupd buf - submission write */
+	dhd_mem_map_t 	h2d_dma_writeindx_buf;	/* For holding dma ringupd buf - submission write */
+
+	uint32	h2d_dma_readindx_buf_len; /* For holding dma ringupd buf - submission read */
+	dhd_mem_map_t	h2d_dma_readindx_buf;	/* For holding dma ringupd buf - submission read */
+
+	uint32	d2h_dma_writeindx_buf_len; /* For holding dma ringupd buf - completion write */
+	dhd_mem_map_t	d2h_dma_writeindx_buf;	/* For holding dma ringupd buf - completion write */
+
+	uint32	d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */
+	dhd_mem_map_t	d2h_dma_readindx_buf;	/* For holding dma ringupd buf - completion read */
+
+	dhd_dmaxfer_t	dmaxfer;
+	bool		dmaxfer_in_progress;
+
+	uint16		ioctl_seq_no;
+	uint16		data_seq_no;
+	uint16		ioctl_trans_id;
+	void		*pktid_map_handle;
+	uint16		rx_metadata_offset;
+	uint16		tx_metadata_offset;
+	uint16		rx_cpln_early_upd_idx;
+} dhd_prot_t;
+
+static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+	void *buf, uint len, uint8 action);
+static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+	void *buf, uint len, uint8 action);
+static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf);
+
+static int dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd);
+static int dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count);
+static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt);
+static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len);
+static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+
+static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void* dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+	uint16 msglen, uint16 *alloced);
+static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf,
+	int ifidx);
+static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type);
+static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type);
+static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
+static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
+	uint destdelay, dhd_dmaxfer_t *dma);
+static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void *buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+
+
+
+
+#ifdef DHD_RX_CHAINING
+#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
+	(!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
+	 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
+	 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
+	 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
+	 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
+	 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
+	 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
+
+static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
+static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
+static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
+
+#define DHD_PKT_CTF_MAX_CHAIN_LEN	64
+#endif /* DHD_RX_CHAINING */
+
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
+static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
+static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
+
+static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item,
+	uint16 len_item, uint16 ringid);
+static void* prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced);
+static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index);
+static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid);
+static void prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 len);
+static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 *available_len);
+static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
+	NULL,
+	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
+	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
+	NULL,
+	dhd_prot_process_flow_ring_create_response, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
+	NULL,
+	dhd_prot_process_flow_ring_delete_response, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
+	NULL,
+	dhd_prot_process_flow_ring_flush_response, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
+	NULL,
+	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
+	NULL,
+	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
+	NULL,
+	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
+	NULL,
+	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
+	NULL,
+	dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
+	NULL,
+	dhdmsgbuf_dmaxfer_compare, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
+	NULL,
+};
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
+ * +---------------------------------------------------------------------------+
+ */
+#define MAX_PKTID_ITEMS     (3072) /* Maximum number of pktids supported */
+
+typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
+
+/* Construct a packet id mapping table, returing an opaque map handle */
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(void *osh, uint32 num_items);
+
+/* Destroy a packet id mapping table, freeing all packets active in the table */
+static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *map);
+
+/* Determine number of pktids that are available */
+static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *map);
+
+/* Allocate a unique pktid against which a pkt and some metadata is saved */
+static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle,
+                                           void *pkt);
+static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt,
+                       uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type);
+static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt,
+                                  dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type);
+
+/* Return an allocated pktid, retrieving previously saved pkt and metadata */
+static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id,
+                                dmaaddr_t *physaddr, uint32 *len, uint8 buf_type);
+
+/* Packet metadata saved in packet id mapper */
+
+typedef enum pkt_buf_type {
+	BUFF_TYPE_DATA_TX = 0,
+	BUFF_TYPE_DATA_RX,
+	BUFF_TYPE_IOCTL_RX,
+	BUFF_TYPE_EVENT_RX,
+	/* This is purely to work around the following scenario.
+	 * In the function dhd_prot_txdata, NATIVE_TO_PKTID_RSV is
+	 * called to just reserve the pkt id, later on if ring space
+	 * is not available, the pktid is freed. But note that now
+	 * dhd_prot_pkt_free will compare the the buf_type with the
+	 * buffer type and fail if they don't match. In this case
+	 * passing this flag will ensure that such a comparison is
+	 * not made. The other option I considered is to use physaddr
+	 * field itself. That is make it 0 in xxx_free and in the comparison
+	 * if this field is zero just skip the dma != buf_type comparison.
+	 * But that logic is too implicit and decided to use this logic to
+	 * explicitly skip the check only in this case.
+	 */
+	 BUFF_TYPE_NO_CHECK
+} pkt_buf_type_t;
+
+/* Packet metadata saved in packet id mapper */
+typedef struct dhd_pktid_item {
+	bool        inuse;    /* tag an item to be in use */
+	uint8       dma;      /* map direction: flush or invalidate */
+	uint8       buf_type;
+			    /* This filed is used to colour the
+			       * buffer pointers held in the locker.
+			       */
+	uint16      len;      /* length of mapped packet's buffer */
+	void        *pkt;     /* opaque native pointer to a packet */
+	dmaaddr_t   physaddr; /* physical address of mapped packet's buffer */
+} dhd_pktid_item_t;
+
+typedef struct dhd_pktid_map {
+    void        *osh;
+    int         items;    /* total items in map */
+    int         avail;    /* total available items */
+    int         failures; /* lockers unavailable count */
+    uint32      keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
+    dhd_pktid_item_t lockers[0];           /* metadata storage */
+} dhd_pktid_map_t;
+
+/*
+ * PktId (Locker) #0 is never allocated and is considered invalid.
+ *
+ * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
+ * depleted pktid pool and must not be used by the caller.
+ *
+ * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+ */
+#define DHD_PKTID_INVALID               (0U)
+
+#define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
+#define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
+	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+
+#define NATIVE_TO_PKTID_INIT(osh, items) dhd_pktid_map_init((osh), (items))
+#define NATIVE_TO_PKTID_FINI(map)        dhd_pktid_map_fini(map)
+#define NATIVE_TO_PKTID_CLEAR(map)        dhd_pktid_map_clear(map)
+
+#define NATIVE_TO_PKTID_RSV(map, pkt)    dhd_pktid_map_reserve((map), (pkt))
+#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma, buf_type) \
+	dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
+		(uint8)dma, (uint8)buf_type)
+#define NATIVE_TO_PKTID(map, pkt, pa, len, dma, buf_type) \
+	dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), \
+		(uint8)dma, (uint8)buf_type)
+
+#define PKTID_TO_NATIVE(map, pktid, pa, len, buf_type) \
+	dhd_pktid_map_free((map), (uint32)(pktid), \
+	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (uint8)buf_type)
+
+#define PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
+
+/*
+ * +---------------------------------------------------------------------------+
+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
+ *
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
+ *
+ * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
+ * packet id is returned. This unique packet id may be used to retrieve the
+ * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
+ * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
+ * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
+ *
+ * Implementation Note:
+ * Convert this into a <key,locker> abstraction and place into bcmutils !
+ * Locker abstraction should treat contents as opaque storage, and a
+ * callback should be registered to handle inuse lockers on destructor.
+ *
+ * +---------------------------------------------------------------------------+
+ */
+
+/* Allocate and initialize a mapper of num_items <numbered_key, locker> */
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(void *osh, uint32 num_items)
+{
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+
+	ASSERT((num_items >= 1) && num_items <= MAX_PKTID_ITEMS);
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
+
+	if ((map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz)) == NULL) {
+		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
+		           __FUNCTION__, __LINE__, dhd_pktid_map_sz));
+		return NULL;
+	}
+	bzero(map, dhd_pktid_map_sz);
+
+	map->osh = osh;
+	map->items = num_items;
+	map->avail = num_items;
+
+	map->lockers[DHD_PKTID_INVALID].inuse = TRUE; /* tag locker #0 as inuse */
+
+	for (nkey = 1; nkey <= num_items; nkey++) { /* locker #0 is reserved */
+		map->keys[nkey] = nkey; /* populate with unique keys */
+		map->lockers[nkey].inuse = FALSE;
+	}
+
+	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
+}
+
+/*
+ * Retrieve all allocated keys and free all <numbered_key, locker>.
+ * Freeing implies: unmapping the buffers and freeing the native packet
+ * This could have been a callback registered with the pktid mapper.
+ */
+static void
+dhd_pktid_map_fini(dhd_pktid_map_handle_t *handle)
+{
+	void *osh;
+	int nkey;
+	dhd_pktid_map_t *map;
+	uint32 dhd_pktid_map_sz;
+	dhd_pktid_item_t *locker;
+
+	if (handle == NULL)
+		return;
+
+	map = (dhd_pktid_map_t *)handle;
+	osh = map->osh;
+	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+
+	nkey = 1; /* skip reserved KEY #0, and start from 1 */
+	locker = &map->lockers[nkey];
+
+	for (; nkey <= map->items; nkey++, locker++) {
+		if (locker->inuse == TRUE) { /* numbered key still in use */
+			locker->inuse = FALSE; /* force open the locker */
+
+			{   /* This could be a callback registered with dhd_pktid_map */
+				DMA_UNMAP(osh, locker->physaddr, locker->len,
+				          locker->dma, 0, 0);
+#ifdef DHD_USE_STATIC_IOCTLBUF
+				if (locker->buf_type == BUFF_TYPE_IOCTL_RX)
+					PKTFREE_STATIC(osh, (ulong*)locker->pkt, FALSE);
+				else
+					PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+#else
+				PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+#endif
+
+			}
+		}
+	}
+
+	MFREE(osh, handle, dhd_pktid_map_sz);
+}
+
+static void
+dhd_pktid_map_clear(dhd_pktid_map_handle_t *handle)
+{
+	void *osh;
+	int nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	DHD_TRACE(("%s\n",__FUNCTION__));
+
+	if (handle == NULL)
+		return;
+
+	map = (dhd_pktid_map_t *)handle;
+	osh = map->osh;
+	map->failures = 0;
+
+	nkey = 1; /* skip reserved KEY #0, and start from 1 */
+	locker = &map->lockers[nkey];
+
+	for (; nkey <= map->items; nkey++, locker++) {
+		map->keys[nkey] = nkey; /* populate with unique keys */
+		if (locker->inuse == TRUE) { /* numbered key still in use */
+			locker->inuse = FALSE; /* force open the locker */
+			DHD_TRACE(("%s free id%d\n",__FUNCTION__,nkey ));
+			DMA_UNMAP(osh, (uint32)locker->physaddr, locker->len,
+				          locker->dma, 0, 0);
+#ifdef DHD_USE_STATIC_IOCTLBUF
+			if (locker->buf_type == BUFF_TYPE_IOCTL_RX)
+				PKTFREE_STATIC(osh, (ulong*)locker->pkt, FALSE);
+			else
+				PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+#else
+			PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+#endif
+
+		}
+	}
+	map->avail = map->items;
+}
+
+/* Get the pktid free count */
+static INLINE uint32 BCMFASTPATH
+dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
+{
+	dhd_pktid_map_t *map;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	return map->avail;
+}
+
+/*
+ * Allocate locker, save pkt contents, and return the locker's numbered key.
+ * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
+ * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
+ * implying a depleted pool of pktids.
+ */
+static INLINE uint32
+dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt)
+{
+	uint32 nkey;
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	if (map->avail <= 0) { /* no more pktids to allocate */
+		map->failures++;
+		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
+		return DHD_PKTID_INVALID; /* failed alloc request */
+	}
+	ASSERT(map->avail <= map->items);
+
+	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+	map->avail--;
+
+	locker = &map->lockers[nkey]; /* save packet metadata in locker */
+	locker->inuse = TRUE; /* reserve this locker */
+	locker->pkt = pkt;
+	locker->len = 0;
+	ASSERT(nkey != DHD_PKTID_INVALID);
+	return nkey; /* return locker's numbered key */
+}
+
+static INLINE void
+dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey,
+                   dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type)
+{
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+
+	ASSERT(handle != NULL);
+	map = (dhd_pktid_map_t *)handle;
+
+	ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+
+	locker = &map->lockers[nkey];
+	ASSERT(locker->pkt == pkt);
+
+	locker->dma = dma; /* store contents in locker */
+	locker->physaddr = physaddr;
+	locker->len = (uint16)len; /* 16bit len */
+	locker->buf_type = buf_type;
+}
+
+static uint32 BCMFASTPATH
+dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt,
+                    dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type)
+{
+	uint32 nkey = dhd_pktid_map_reserve(handle, pkt);
+	if (nkey != DHD_PKTID_INVALID) {
+		dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma, buf_type);
+	}
+	return nkey;
+}
+
+/*
+ * Given a numbered key, return the locker contents.
+ * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
+ * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
+ * value. Only a previously allocated pktid may be freed.
+ */
+static void * BCMFASTPATH
+dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey,
+                   dmaaddr_t *physaddr, uint32 *len, uint8 buf_type)
+{
+	dhd_pktid_map_t *map;
+	dhd_pktid_item_t *locker;
+	void *pkt;
+	ASSERT(handle != NULL);
+
+	map = (dhd_pktid_map_t *)handle;
+	ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+
+	locker = &map->lockers[nkey];
+
+	if (locker->inuse == FALSE) { /* Debug check for cloned numbered key */
+		DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
+		           __FUNCTION__, __LINE__, nkey));
+		ASSERT(locker->inuse != FALSE);
+		return NULL;
+	}
+	if ((buf_type != BUFF_TYPE_NO_CHECK) && (locker->buf_type != buf_type)) {
+		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
+		           __FUNCTION__, __LINE__, nkey));
+		return NULL;
+	}
+
+	map->avail++;
+	map->keys[map->avail] = nkey; /* make this numbered key available */
+
+	locker->inuse = FALSE; /* open and free Locker */
+
+	*physaddr = locker->physaddr; /* return contents of locker */
+	*len = (uint32)locker->len;
+	pkt = locker->pkt;
+	locker->pkt = NULL; /* Clear pkt */
+	locker->len = 0;
+
+	return pkt;
+}
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+int dhd_prot_attach(dhd_pub_t *dhd)
+{
+	uint alloced = 0;
+
+	dhd_prot_t *prot;
+
+	/* Allocate prot structure */
+	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
+		sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(prot, 0, sizeof(*prot));
+
+	prot->osh = dhd->osh;
+	dhd->prot = prot;
+
+	/* DMAing ring completes supported? FALSE by default  */
+	dhd->dma_d2h_ring_upd_support = FALSE;
+	dhd->dma_h2d_ring_upd_support = FALSE;
+
+	/* Ring Allocations */
+	/* 1.0	 H2D	TXPOST ring */
+	if (!(prot->h2dring_txp_subn = prot_ring_attach(prot, "h2dtxp",
+		H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+		BCMPCIE_H2D_TXFLOWRINGID))) {
+		DHD_ERROR(("%s: kmalloc for H2D    TXPOST ring  failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* 2.0	 H2D	RXPOST ring */
+	if (!(prot->h2dring_rxp_subn = prot_ring_attach(prot, "h2drxp",
+		H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
+		BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT))) {
+		DHD_ERROR(("%s: kmalloc for H2D    RXPOST ring  failed\n", __FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 3.0	 H2D	CTRL_SUBMISSION ring */
+	if (!(prot->h2dring_ctrl_subn = prot_ring_attach(prot, "h2dctrl",
+		H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
+		BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT))) {
+		DHD_ERROR(("%s: kmalloc for H2D    CTRL_SUBMISSION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 4.0	 D2H	TX_COMPLETION ring */
+	if (!(prot->d2hring_tx_cpln = prot_ring_attach(prot, "d2htxcpl",
+		D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
+		BCMPCIE_D2H_MSGRING_TX_COMPLETE))) {
+		DHD_ERROR(("%s: kmalloc for D2H    TX_COMPLETION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 5.0	 D2H	RX_COMPLETION ring */
+	if (!(prot->d2hring_rx_cpln = prot_ring_attach(prot, "d2hrxcpl",
+		D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
+		BCMPCIE_D2H_MSGRING_RX_COMPLETE))) {
+		DHD_ERROR(("%s: kmalloc for D2H    RX_COMPLETION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+
+	}
+
+	/* 6.0	 D2H	CTRL_COMPLETION ring */
+	if (!(prot->d2hring_ctrl_cpln = prot_ring_attach(prot, "d2hctrl",
+		D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
+		BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE))) {
+		DHD_ERROR(("%s: kmalloc for D2H    CTRL_COMPLETION ring failed\n",
+			__FUNCTION__));
+		goto fail;
+	}
+
+	/* Return buffer for ioctl */
+	prot->retbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
+		&alloced, &prot->retbuf.pa, &prot->retbuf.dmah);
+	if (prot->retbuf.va ==  NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	ASSERT(MODX((unsigned long)prot->retbuf.va, DMA_ALIGN_LEN) == 0);
+	bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
+	OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+
+	/* IOCTL request buffer */
+	prot->ioctbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
+		&alloced, &prot->ioctbuf.pa, &prot->ioctbuf.dmah);
+
+	if (prot->ioctbuf.va ==  NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	ASSERT(MODX((unsigned long)prot->ioctbuf.va, DMA_ALIGN_LEN) == 0);
+	bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+
+	/* Scratch buffer for dma rx offset */
+	prot->d2h_dma_scratch_buf_len = DMA_D2H_SCRATCH_BUF_LEN;
+	prot->d2h_dma_scratch_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, DMA_D2H_SCRATCH_BUF_LEN,
+		DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_scratch_buf.pa,
+		&prot->d2h_dma_scratch_buf.dmah);
+
+	if (prot->d2h_dma_scratch_buf.va == NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+	ASSERT(MODX((unsigned long)prot->d2h_dma_scratch_buf.va, DMA_ALIGN_LEN) == 0);
+	bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+	OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+
+
+	/* PKTID handle INIT */
+	prot->pktid_map_handle = NATIVE_TO_PKTID_INIT(dhd->osh, MAX_PKTID_ITEMS);
+	if (prot->pktid_map_handle == NULL) {
+		ASSERT(0);
+		return BCME_NOMEM;
+	}
+
+	prot->dmaxfer.srcmem.va = NULL;
+	prot->dmaxfer.destmem.va = NULL;
+	prot->dmaxfer_in_progress = FALSE;
+
+	prot->rx_metadata_offset = 0;
+	prot->tx_metadata_offset = 0;
+
+#ifdef DHD_RX_CHAINING
+	dhd_rxchain_reset(&prot->rxchain);
+#endif
+
+	return 0;
+
+fail:
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+	if (prot != NULL)
+		dhd_prot_detach(dhd);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	return BCME_NOMEM;
+}
+
+/* Init memory block on host DMA'ing indices */
+int
+dhd_prot_init_index_dma_block(dhd_pub_t *dhd, uint8 type, uint32 length)
+{
+	uint alloced = 0;
+
+	dhd_prot_t *prot = dhd->prot;
+	uint32 dma_block_size = 4 * length;
+
+	if (prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+
+	switch (type) {
+		case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
+			/* ring update dma buffer for submission write */
+			prot->h2d_dma_writeindx_buf_len = dma_block_size;
+			prot->h2d_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->h2d_dma_writeindx_buf.pa,
+				&prot->h2d_dma_writeindx_buf.dmah);
+
+			if (prot->h2d_dma_writeindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4));
+			bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size);
+			DHD_ERROR(("H2D_WRITEINDX_ARRAY_HOST: %d-bytes "
+				"inited for dma'ing h2d-w indices\n",
+				prot->h2d_dma_writeindx_buf_len));
+			break;
+
+		case HOST_TO_DNGL_DMA_READINDX_BUFFER:
+			/* ring update dma buffer for submission read */
+			prot->h2d_dma_readindx_buf_len = dma_block_size;
+			prot->h2d_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->h2d_dma_readindx_buf.pa,
+				&prot->h2d_dma_readindx_buf.dmah);
+			if (prot->h2d_dma_readindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4));
+			bzero(prot->h2d_dma_readindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size);
+			DHD_ERROR(("H2D_READINDX_ARRAY_HOST %d-bytes "
+				"inited for dma'ing h2d-r indices\n",
+				prot->h2d_dma_readindx_buf_len));
+			break;
+
+		case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
+			/* ring update dma buffer for completion write */
+			prot->d2h_dma_writeindx_buf_len = dma_block_size;
+			prot->d2h_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->d2h_dma_writeindx_buf.pa,
+				&prot->d2h_dma_writeindx_buf.dmah);
+
+			if (prot->d2h_dma_writeindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4));
+			bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size);
+			DHD_ERROR(("D2H_WRITEINDX_ARRAY_HOST %d-bytes "
+				"inited for dma'ing d2h-w indices\n",
+				prot->d2h_dma_writeindx_buf_len));
+			break;
+
+		case DNGL_TO_HOST_DMA_READINDX_BUFFER:
+			/* ring update dma buffer for completion read */
+			prot->d2h_dma_readindx_buf_len = dma_block_size;
+			prot->d2h_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+				dma_block_size, DMA_ALIGN_LEN, &alloced,
+				&prot->d2h_dma_readindx_buf.pa,
+				&prot->d2h_dma_readindx_buf.dmah);
+
+			if (prot->d2h_dma_readindx_buf.va == NULL) {
+				return BCME_NOMEM;
+			}
+
+			ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4));
+			bzero(prot->d2h_dma_readindx_buf.va, dma_block_size);
+			OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size);
+			DHD_ERROR(("D2H_READINDX_ARRAY_HOST %d-bytes "
+				"inited for dma'ing d2h-r indices\n",
+				prot->d2h_dma_readindx_buf_len));
+			break;
+
+		default:
+			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
+			return BCME_BADOPTION;
+	}
+
+	return BCME_OK;
+
+}
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+void dhd_prot_detach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	/* Stop the protocol module */
+	if (dhd->prot) {
+
+		/* free up scratch buffer */
+		if (prot->d2h_dma_scratch_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_scratch_buf.va,
+			DMA_D2H_SCRATCH_BUF_LEN, prot->d2h_dma_scratch_buf.pa,
+			prot->d2h_dma_scratch_buf.dmah);
+			prot->d2h_dma_scratch_buf.va = NULL;
+		}
+		/* free up ring upd buffer for submission writes */
+		if (prot->h2d_dma_writeindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_writeindx_buf.va,
+			  prot->h2d_dma_writeindx_buf_len, prot->h2d_dma_writeindx_buf.pa,
+			  prot->h2d_dma_writeindx_buf.dmah);
+			prot->h2d_dma_writeindx_buf.va = NULL;
+		}
+
+		/* free up ring upd buffer for submission reads */
+		if (prot->h2d_dma_readindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_readindx_buf.va,
+			  prot->h2d_dma_readindx_buf_len, prot->h2d_dma_readindx_buf.pa,
+			  prot->h2d_dma_readindx_buf.dmah);
+			prot->h2d_dma_readindx_buf.va = NULL;
+		}
+
+		/* free up ring upd buffer for completion writes */
+		if (prot->d2h_dma_writeindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_writeindx_buf.va,
+			  prot->d2h_dma_writeindx_buf_len, prot->d2h_dma_writeindx_buf.pa,
+			  prot->d2h_dma_writeindx_buf.dmah);
+			prot->d2h_dma_writeindx_buf.va = NULL;
+		}
+
+		/* free up ring upd buffer for completion writes */
+		if (prot->d2h_dma_readindx_buf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_readindx_buf.va,
+			  prot->d2h_dma_readindx_buf_len, prot->d2h_dma_readindx_buf.pa,
+			  prot->d2h_dma_readindx_buf.dmah);
+			prot->d2h_dma_readindx_buf.va = NULL;
+		}
+
+		/* ioctl return buffer */
+		if (prot->retbuf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->retbuf.va,
+			IOCT_RETBUF_SIZE, dhd->prot->retbuf.pa, dhd->prot->retbuf.dmah);
+			dhd->prot->retbuf.va = NULL;
+		}
+
+		/* ioctl request buffer */
+		if (prot->ioctbuf.va) {
+			DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->ioctbuf.va,
+			IOCT_RETBUF_SIZE, dhd->prot->ioctbuf.pa, dhd->prot->ioctbuf.dmah);
+
+			dhd->prot->ioctbuf.va = NULL;
+		}
+
+
+		/* 1.0	 H2D	TXPOST ring */
+		dhd_prot_ring_detach(dhd, prot->h2dring_txp_subn);
+		/* 2.0	 H2D	RXPOST ring */
+		dhd_prot_ring_detach(dhd, prot->h2dring_rxp_subn);
+		/* 3.0	 H2D	CTRL_SUBMISSION ring */
+		dhd_prot_ring_detach(dhd, prot->h2dring_ctrl_subn);
+		/* 4.0	 D2H	TX_COMPLETION ring */
+		dhd_prot_ring_detach(dhd, prot->d2hring_tx_cpln);
+		/* 5.0	 D2H	RX_COMPLETION ring */
+		dhd_prot_ring_detach(dhd, prot->d2hring_rx_cpln);
+		/* 6.0	 D2H	CTRL_COMPLETION ring */
+		dhd_prot_ring_detach(dhd, prot->d2hring_ctrl_cpln);
+
+		NATIVE_TO_PKTID_FINI(dhd->prot->pktid_map_handle);
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+		dhd->prot = NULL;
+	}
+}
+
+void
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->rx_dataoffset = rx_offset;
+}
+
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Post event buffer after shim layer is attached */
+	ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+
+	dhd_process_cid_mac(dhd, TRUE);
+
+	ret = dhd_preinit_ioctls(dhd);
+
+	if (!ret)
+		dhd_process_cid_mac(dhd, FALSE);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+done:
+	return ret;
+}
+
+/* This function does all necessary initialization needed
+* for IOCTL/IOVAR path
+*/
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Max pkts in ring */
+	prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
+
+	DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
+
+	/* Read max rx packets supported by dongle */
+	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
+	if (prot->max_rxbufpost == 0) {
+		/* This would happen if the dongle firmware is not */
+		/* using the latest shared structure template */
+		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
+	}
+	DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+
+	prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
+	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+
+	prot->active_tx_count = 0;
+	prot->data_seq_no = 0;
+	prot->ioctl_seq_no = 0;
+	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
+
+	prot->ioctl_trans_id = 1;
+
+	/* Register the interrupt function upfront */
+	/* remove corerev checks in data path */
+	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+
+	/* Initialise rings */
+	/* 1.0	 H2D	TXPOST ring */
+	if (dhd_bus_is_txmode_push(dhd->bus)) {
+		dhd_ring_init(dhd, prot->h2dring_txp_subn);
+	}
+
+	/* 2.0	 H2D	RXPOST ring */
+	dhd_ring_init(dhd, prot->h2dring_rxp_subn);
+	/* 3.0	 H2D	CTRL_SUBMISSION ring */
+	dhd_ring_init(dhd, prot->h2dring_ctrl_subn);
+	/* 4.0	 D2H	TX_COMPLETION ring */
+	dhd_ring_init(dhd, prot->d2hring_tx_cpln);
+	/* 5.0	 D2H	RX_COMPLETION ring */
+	dhd_ring_init(dhd, prot->d2hring_rx_cpln);
+	/* 6.0	 D2H	CTRL_COMPLETION ring */
+	dhd_ring_init(dhd, prot->d2hring_ctrl_cpln);
+
+	/* init the scratch buffer */
+	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.pa,
+		sizeof(prot->d2h_dma_scratch_buf.pa), DNGL_TO_HOST_DMA_SCRATCH_BUFFER, 0);
+	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf_len,
+		sizeof(prot->d2h_dma_scratch_buf_len), DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, 0);
+
+	/* If supported by the host, indicate the memory block
+	 * for comletion writes / submission reads to shared space
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_writeindx_buf.pa,
+			sizeof(prot->d2h_dma_writeindx_buf.pa),
+			DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, 0);
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_readindx_buf.pa,
+			sizeof(prot->h2d_dma_readindx_buf.pa),
+			HOST_TO_DNGL_DMA_READINDX_BUFFER, 0);
+	}
+
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_writeindx_buf.pa,
+			sizeof(prot->h2d_dma_writeindx_buf.pa),
+			HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, 0);
+		dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_readindx_buf.pa,
+			sizeof(prot->d2h_dma_readindx_buf.pa),
+			DNGL_TO_HOST_DMA_READINDX_BUFFER, 0);
+
+	}
+
+	ret = dhd_msgbuf_rxbuf_post(dhd);
+	ret = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+
+	return ret;
+}
+
+#define DHD_DBG_SHOW_METADATA	0
+#if DHD_DBG_SHOW_METADATA
+static void BCMFASTPATH
+dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
+{
+	uint8 tlv_t;
+	uint8 tlv_l;
+	uint8 *tlv_v = (uint8 *)ptr;
+
+	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
+		return;
+
+	len -= BCMPCIE_D2H_METADATA_HDRLEN;
+	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
+
+	while (len > TLV_HDR_LEN) {
+		tlv_t = tlv_v[TLV_TAG_OFF];
+		tlv_l = tlv_v[TLV_LEN_OFF];
+
+		len -= TLV_HDR_LEN;
+		tlv_v += TLV_HDR_LEN;
+		if (len < tlv_l)
+			break;
+		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
+			break;
+
+		switch (tlv_t) {
+		case WLFC_CTL_TYPE_TXSTATUS:
+			bcm_print_bytes("METADATA TX_STATUS", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_RSSI:
+			bcm_print_bytes("METADATA RX_RSSI", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
+			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
+			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_RX_STAMP:
+			bcm_print_bytes("METADATA RX_TIMESTAMP", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_TRANS_ID:
+			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
+			break;
+
+		case WLFC_CTL_TYPE_COMP_TXSTATUS:
+			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
+			break;
+
+		default:
+			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
+			break;
+		}
+
+		len -= tlv_l;
+		tlv_v += tlv_l;
+	}
+}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+static INLINE void BCMFASTPATH
+dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type)
+{
+	void *PKTBUF;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa,
+				pa_len, buf_type);
+
+	if (PKTBUF) {
+		DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0);
+#ifdef DHD_USE_STATIC_IOCTLBUF
+		if (buf_type == BUFF_TYPE_IOCTL_RX)
+			PKTFREE_STATIC(dhd->osh, PKTBUF, FALSE);
+		else
+			PKTFREE(dhd->osh, PKTBUF, FALSE);
+#else
+		PKTFREE(dhd->osh, PKTBUF, FALSE);
+#endif
+	}
+	return;
+}
+
+static INLINE void * BCMFASTPATH
+dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type)
+{
+	void *PKTBUF;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, buf_type);
+	if (PKTBUF) {
+		DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0);
+	}
+
+	return PKTBUF;
+}
+
+static int BCMFASTPATH
+dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int16 fillbufs;
+	uint16 cnt = 64;
+	int retcount = 0;
+
+	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+	while (fillbufs > 0) {
+		cnt--;
+		if (cnt == 0) {
+			/* find a better way to reschedule rx buf post if space not available */
+			DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
+			DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
+			break;
+		}
+
+		/* Post in a burst of 8 buffers ata time */
+		fillbufs = MIN(fillbufs, RX_BUF_BURST);
+
+		/* Post buffers */
+		retcount = dhd_prot_rxbufpost(dhd, fillbufs);
+
+		if (retcount > 0) {
+			prot->rxbufpost += (uint16)retcount;
+
+			/* how many more to post */
+			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+		} else {
+			/* Make sure we don't run loop any further */
+			fillbufs = 0;
+		}
+	}
+
+	return 0;
+}
+
+/* Post count no of rx buffers down to dongle */
+static int BCMFASTPATH
+dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count)
+{
+	void *p;
+	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+	uint8 *rxbuf_post_tmp;
+	host_rxbuf_post_t *rxbuf_post;
+	void* msg_start;
+	dmaaddr_t physaddr;
+	uint32 pktlen;
+	dhd_prot_t *prot = dhd->prot;
+	msgbuf_ring_t * ring = prot->h2dring_rxp_subn;
+	uint8 i = 0;
+	uint16 alloced = 0;
+	unsigned long flags;
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Claim space for 'count' no of messages */
+	msg_start = (void *)dhd_alloc_ring_space(dhd, ring, count, &alloced);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (msg_start == NULL) {
+		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
+	ASSERT(alloced > 0);
+
+	rxbuf_post_tmp = (uint8*)msg_start;
+
+	/* loop through each message */
+	for (i = 0; i < alloced; i++) {
+		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
+		/* Create a rx buffer */
+		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
+			return -1;
+		}
+
+		pktlen = PKTLEN(dhd->osh, p);
+		physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+		if (PHYSADDRISZERO(physaddr)) {
+			if (RING_WRITE_PTR(ring) < alloced - i)
+				RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
+			else
+				RING_WRITE_PTR(ring) -= alloced - i;
+			alloced = i;
+			DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+			PKTFREE(dhd->osh, p, FALSE);
+			DHD_ERROR(("Invalid phyaddr 0\n"));
+			ASSERT(0);
+			break;
+		}
+
+		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
+		pktlen = PKTLEN(dhd->osh, p);
+
+		/* CMN msg header */
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
+		rxbuf_post->cmn_hdr.if_id = 0;
+
+		/* get the lock before calling NATIVE_TO_PKTID */
+		DHD_GENERAL_LOCK(dhd, flags);
+
+		rxbuf_post->cmn_hdr.request_id =
+			htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr,
+			pktlen, DMA_RX, BUFF_TYPE_DATA_RX));
+
+		/* free lock */
+		DHD_GENERAL_UNLOCK(dhd, flags);
+
+		if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+			if (RING_WRITE_PTR(ring) < alloced - i)
+				RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
+			else
+				RING_WRITE_PTR(ring) -= alloced - i;
+			alloced = i;
+			DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+			PKTFREE(dhd->osh, p, FALSE);
+			DHD_ERROR(("Pktid pool depleted.\n"));
+			break;
+		}
+
+		rxbuf_post->data_buf_len = htol16((uint16)pktlen);
+		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+		rxbuf_post->data_buf_addr.low_addr =
+			htol32(PHYSADDRLO(physaddr) + prot->rx_metadata_offset);
+
+		if (prot->rx_metadata_offset) {
+			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
+			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(physaddr));
+		} else {
+			rxbuf_post->metadata_buf_len = 0;
+			rxbuf_post->metadata_buf_addr.high_addr = 0;
+			rxbuf_post->metadata_buf_addr.low_addr  = 0;
+		}
+
+		/* Move rxbuf_post_tmp to next item */
+		rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring);
+	}
+	/* Update the write pointer in TCM & ring bell */
+	if (alloced > 0)
+		prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced);
+
+	return alloced;
+}
+
+static int
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
+{
+	void *p;
+	uint16 pktsz;
+	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
+	dmaaddr_t physaddr;
+	uint32 pktlen;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 alloced = 0;
+	unsigned long flags;
+	uint8 buf_type;
+
+	if (event_buf) {
+		/* Allocate packet for event buffer post */
+		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+		buf_type = BUFF_TYPE_EVENT_RX;
+	} else {
+		/* Allocate packet for ctrl/ioctl buffer post */
+		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
+		buf_type = BUFF_TYPE_IOCTL_RX;
+	}
+
+#ifdef DHD_USE_STATIC_IOCTLBUF
+	if (!event_buf)
+		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+	else
+		p = PKTGET(dhd->osh, pktsz, FALSE);
+#else
+	p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif
+
+	pktlen = PKTLEN(dhd->osh, p);
+	physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+	if (PHYSADDRISZERO(physaddr)) {
+
+		DHD_ERROR(("Invalid phyaddr 0\n"));
+		ASSERT(0);
+		goto free_pkt_return;
+	}
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+	if (rxbuf_post == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
+			__FUNCTION__, __LINE__));
+		DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+		goto free_pkt_return;
+	}
+
+	/* CMN msg header */
+	if (event_buf)
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
+	else
+		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
+	rxbuf_post->cmn_hdr.if_id = 0;
+
+	rxbuf_post->cmn_hdr.request_id =
+		htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr,
+			pktlen, DMA_RX, buf_type));
+
+	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+		if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0)
+			RING_WRITE_PTR(prot->h2dring_ctrl_subn) =
+				RING_MAX_ITEM(prot->h2dring_ctrl_subn) - 1;
+		else
+			RING_WRITE_PTR(prot->h2dring_ctrl_subn)--;
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+		goto free_pkt_return;
+	}
+
+	rxbuf_post->cmn_hdr.flags = 0;
+	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
+	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(physaddr));
+
+	/* Update the write pointer in TCM & ring bell */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, rxbuf_post,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 1;
+
+free_pkt_return:
+#ifdef DHD_USE_STATIC_IOCTLBUF
+	if (buf_type == BUFF_TYPE_IOCTL_RX)
+		PKTFREE_STATIC(dhd->osh, p, FALSE);
+	else
+		PKTFREE(dhd->osh, p, FALSE);
+#else
+	PKTFREE(dhd->osh, p, FALSE);
+#endif
+
+	return -1;
+}
+
+static uint16
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
+{
+	uint32 i = 0;
+	int32 ret_val;
+
+	DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
+	while (i < max_to_post) {
+		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
+		if (ret_val < 0)
+			break;
+		i++;
+	}
+	DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
+	return (uint16)i;
+}
+
+static int
+dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint16 retcnt = 0;
+
+	DHD_INFO(("ioctl resp buf post\n"));
+	retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE,
+		prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted);
+	prot->cur_ioctlresp_bufs_posted += retcnt;
+	return 0;
+}
+
+static int
+dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE,
+		prot->max_eventbufpost - prot->cur_event_bufs_posted);
+	return 0;
+}
+
+int BCMFASTPATH
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Process all the messages - DTOH direction */
+	while (TRUE) {
+		uint8 *src_addr;
+		uint16 src_len;
+		/* Store current read pointer */
+		/* Read pointer will be updated in prot_early_upd_rxcpln_read_idx */
+		prot_store_rxcpln_read_idx(dhd, prot->d2hring_rx_cpln);
+		/* Get the message from ring */
+		src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len);
+		if (src_addr == NULL)
+			break;
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(src_addr);
+
+		if (dhd_prot_process_msgtype(dhd, prot->d2hring_rx_cpln, src_addr,
+			src_len) != BCME_OK) {
+			prot_upd_read_idx(dhd, prot->d2hring_rx_cpln);
+			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
+				__FUNCTION__, src_len));
+		}
+
+		/* Update read pointer */
+		prot_upd_read_idx(dhd, prot->d2hring_rx_cpln);
+	}
+
+	return 0;
+}
+
+void
+dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flow_id, void *msgring_info)
+{
+	uint16 r_index = 0;
+	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring_info;
+
+	/* Update read pointer */
+	if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+		r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
+		ring->ringstate->r_offset = r_index;
+	}
+
+	DHD_TRACE(("flow %d, write %d read %d \n\n", flow_id, RING_WRITE_PTR(ring),
+		RING_READ_PTR(ring)));
+
+	/* Need more logic here, but for now use it directly */
+	dhd_bus_schedule_queue(dhd->bus, flow_id, TRUE);
+}
+
+
+int BCMFASTPATH
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Process all the messages - DTOH direction */
+	while (TRUE) {
+		uint8 *src_addr;
+		uint16 src_len;
+
+		src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len);
+		if (src_addr == NULL)
+			break;
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(src_addr);
+
+		if (dhd_prot_process_msgtype(dhd, prot->d2hring_tx_cpln, src_addr,
+			src_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process txcmpl msgbuf of len %d\n",
+				__FUNCTION__, src_len));
+		}
+
+		/* Write to dngl rd ptr */
+		prot_upd_read_idx(dhd, prot->d2hring_tx_cpln);
+	}
+
+	return 0;
+}
+
+int BCMFASTPATH
+dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* Process all the messages - DTOH direction */
+	while (TRUE) {
+		uint8 *src_addr;
+		uint16 src_len;
+		src_addr = prot_get_src_addr(dhd, prot->d2hring_ctrl_cpln, &src_len);
+
+		if (src_addr == NULL) {
+			break;
+		}
+
+		/* Prefetch data to populate the cache */
+		OSL_PREFETCH(src_addr);
+		if (dhd_prot_process_msgtype(dhd, prot->d2hring_ctrl_cpln, src_addr,
+			src_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process ctrlmsgbuf of len %d\n",
+				__FUNCTION__, src_len));
+		}
+
+		/* Write to dngl rd ptr */
+		prot_upd_read_idx(dhd, prot->d2hring_ctrl_cpln);
+	}
+
+	return 0;
+}
+
+static int BCMFASTPATH
+dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	uint32 cur_dma_len = 0;
+	int ret = BCME_OK;
+
+	DHD_INFO(("%s: process msgbuf of len %d\n", __FUNCTION__, len));
+
+	while (len > 0) {
+		ASSERT(len > (sizeof(cmn_msg_hdr_t) + prot->rx_dataoffset));
+		if (prot->rx_dataoffset) {
+			cur_dma_len = *(uint32 *) buf;
+			ASSERT(cur_dma_len <= len);
+			buf += prot->rx_dataoffset;
+			len -= (uint16)prot->rx_dataoffset;
+		}
+		else {
+			cur_dma_len = len;
+		}
+		if (dhd_process_msgtype(dhd, ring, buf, (uint16)cur_dma_len) != BCME_OK) {
+			DHD_ERROR(("%s: Error at  process msg of dmalen %d\n",
+				__FUNCTION__, cur_dma_len));
+			ret = BCME_ERROR;
+		}
+
+		len -= (uint16)cur_dma_len;
+		buf += cur_dma_len;
+	}
+	return ret;
+}
+
+#define PCIE_M2M_D2H_DMA_WAIT_TRIES     256
+#define PCIE_D2H_RESET_MARK             0xdeadbeef
+void dhd_msgbuf_d2h_check_cmplt(msgbuf_ring_t *ring, void *msg)
+{
+	uint32 tries;
+	uint32 *marker = (uint32 *)msg + RING_LEN_ITEMS(ring) / sizeof(uint32) - 1;
+
+	for (tries = 0; tries < PCIE_M2M_D2H_DMA_WAIT_TRIES; tries++) {
+		if (*(volatile uint32 *)marker != PCIE_D2H_RESET_MARK)
+			return;
+		OSL_CACHE_INV(msg, RING_LEN_ITEMS(ring));
+	}
+
+	/* only print error for data ring */
+	if (ring->idx == BCMPCIE_D2H_MSGRING_TX_COMPLETE ||
+		ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE)
+		DHD_ERROR(("%s: stale msgbuf content after %d retries\n",
+			__FUNCTION__, tries));
+}
+
+static int BCMFASTPATH
+dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+{
+	uint16 pktlen = len;
+	uint16 msglen;
+	uint8 msgtype;
+	cmn_msg_hdr_t *msg = NULL;
+	int ret = BCME_OK;
+
+	ASSERT(ring && ring->ringmem);
+	msglen = RING_LEN_ITEMS(ring);
+	if (msglen == 0) {
+		DHD_ERROR(("%s: ringidx %d, msglen is %d, pktlen is %d \n",
+			__FUNCTION__, ring->idx, msglen, pktlen));
+		return BCME_ERROR;
+	}
+
+	while (pktlen > 0) {
+		msg = (cmn_msg_hdr_t *)buf;
+
+		dhd_msgbuf_d2h_check_cmplt(ring, msg);
+
+		msgtype = msg->msg_type;
+
+
+
+		DHD_INFO(("msgtype %d, msglen is %d, pktlen is %d \n",
+			msgtype, msglen, pktlen));
+		if (msgtype == MSG_TYPE_LOOPBACK) {
+			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen);
+			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", msglen));
+		}
+
+		ASSERT(msgtype < DHD_PROT_FUNCS);
+		if (table_lookup[msgtype]) {
+			table_lookup[msgtype](dhd, buf, msglen);
+		}
+
+		if (pktlen < msglen) {
+			ret = BCME_ERROR;
+			goto done;
+		}
+		pktlen = pktlen - msglen;
+		buf = buf + msglen;
+		if (msgtype == MSG_TYPE_RX_CMPLT)
+				prot_early_upd_rxcpln_read_idx(dhd,
+					dhd->prot->d2hring_rx_cpln);
+	}
+done:
+
+#ifdef DHD_RX_CHAINING
+	dhd_rxchain_commit(dhd);
+#endif
+
+	return ret;
+}
+
+static void
+dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf;
+	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n",
+		ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
+		ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
+	/* How do we track this to pair it with ??? */
+	return;
+}
+
+static void
+dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf;
+	DHD_ERROR(("gen status: request_id %d, status 0x%04x, flow ring %d \n",
+		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
+		gen_status->compl_hdr.flow_ring_id));
+
+	/* How do we track this to pair it with ??? */
+	return;
+}
+
+static void
+dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf;
+
+	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
+		ioct_ack->compl_hdr.flow_ring_id));
+	if (ioct_ack->compl_hdr.status != 0)  {
+		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
+	}
+
+	memset(buf, 0 , msglen);
+	ioct_ack->marker = PCIE_D2H_RESET_MARK;
+}
+static void
+dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	uint16 status;
+	uint32 resp_len = 0;
+	uint32 pkt_id, xt_id;
+	ioctl_comp_resp_msg_t * ioct_resp = (ioctl_comp_resp_msg_t *)buf;
+
+	resp_len = ltoh16(ioct_resp->resp_len);
+	xt_id = ltoh16(ioct_resp->trans_id);
+	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
+	status = ioct_resp->compl_hdr.status;
+
+	memset(buf, 0 , msglen);
+	ioct_resp->marker = PCIE_D2H_RESET_MARK;
+
+	DHD_CTL(("IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n",
+		pkt_id, xt_id, status, resp_len));
+
+	dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len);
+	dhd_os_ioctl_resp_wake(dhd);
+}
+
+static void BCMFASTPATH
+dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	dhd_prot_t *prot = dhd->prot;
+	host_txbuf_cmpl_t * txstatus;
+	unsigned long flags;
+	uint32 pktid;
+	void *pkt;
+
+	/* locks required to protect circular buffer accesses */
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	txstatus = (host_txbuf_cmpl_t *)buf;
+	pktid = ltoh32(txstatus->cmn_hdr.request_id);
+
+	DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+	if (prot->active_tx_count)
+		prot->active_tx_count--;
+	else
+		DHD_ERROR(("Extra packets are freed\n"));
+
+	ASSERT(pktid != 0);
+	pkt = dhd_prot_packet_get(dhd, pktid, BUFF_TYPE_DATA_TX);
+	if (pkt) {
+#if defined(BCMPCIE)
+		dhd_txcomplete(dhd, pkt, true);
+#endif
+
+#if DHD_DBG_SHOW_METADATA
+		if (dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+			uchar *ptr;
+			/* The Ethernet header of TX frame was copied and removed.
+			 * Here, move the data pointer forward by Ethernet header size.
+			 */
+			PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+			ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
+			bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+			dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
+		}
+#endif /* DHD_DBG_SHOW_METADATA */
+		PKTFREE(dhd->osh, pkt, TRUE);
+	}
+
+	memset(buf, 0 , msglen);
+	txstatus->marker = PCIE_D2H_RESET_MARK;
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return;
+}
+
+static void
+dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len)
+{
+	wlevent_req_msg_t *evnt;
+	uint32 bufid;
+	uint16 buflen;
+	int ifidx = 0;
+	void* pkt;
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+#ifdef DHD_WAKE_STATUS
+	int pkt_wake = bcmpcie_set_get_wake(dhd->bus, 0);
+#endif
+	/* Event complete header */
+	evnt = (wlevent_req_msg_t *)buf;
+	bufid = ltoh32(evnt->cmn_hdr.request_id);
+	buflen = ltoh16(evnt->event_data_len);
+
+	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
+
+	/* Post another rxbuf to the device */
+	if (prot->cur_event_bufs_posted)
+		prot->cur_event_bufs_posted--;
+	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+	memset(buf, 0 , len);
+	evnt->marker = PCIE_D2H_RESET_MARK;
+
+	/* locks required to protect pktid_map */
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, ltoh32(bufid), BUFF_TYPE_EVENT_RX);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt)
+		return;
+
+	/* DMA RX offset updated through shared area */
+	if (dhd->prot->rx_dataoffset)
+		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+
+	PKTSETLEN(dhd->osh, pkt, buflen);
+
+#ifdef DHD_WAKE_STATUS
+	dhd->bus->rcwake += pkt_wake;
+#endif
+	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+}
+
+static void BCMFASTPATH
+dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	host_rxbuf_cmpl_t *rxcmplt_h;
+	uint16 data_offset;             /* offset at which data starts */
+	void * pkt;
+	unsigned long flags;
+	static uint8 current_phase = 0;
+	uint ifidx;
+#ifdef DHD_WAKE_STATUS
+	int pkt_wake = bcmpcie_set_get_wake(dhd->bus, 0);
+#endif
+	/* RXCMPLT HDR */
+	rxcmplt_h = (host_rxbuf_cmpl_t *)buf;
+
+	/* Post another set of rxbufs to the device */
+	dhd_prot_return_rxbuf(dhd, 1);
+
+	/* offset from which data starts is populated in rxstatus0 */
+	data_offset = ltoh16(rxcmplt_h->data_offset);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	pkt = dhd_prot_packet_get(dhd, ltoh32(rxcmplt_h->cmn_hdr.request_id), BUFF_TYPE_DATA_RX);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	if (!pkt) {
+		return;
+	}
+
+	DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
+		ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
+		rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
+		ltoh16(rxcmplt_h->metadata_len)));
+
+#if DHD_DBG_SHOW_METADATA
+	if (dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
+		uchar *ptr;
+		ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
+		/* header followed by data */
+		bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
+		dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
+	}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+	if (current_phase !=  rxcmplt_h->cmn_hdr.flags) {
+		current_phase = rxcmplt_h->cmn_hdr.flags;
+	}
+	if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)
+		DHD_INFO(("D11 frame rxed \n"));
+	/* data_offset from buf start */
+	if (data_offset) {
+		/* data offset given from dongle after split rx */
+		PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
+	} else {
+		/* DMA RX offset updated through shared area */
+		if (dhd->prot->rx_dataoffset)
+			PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+	}
+	/* Actual length of the packet */
+	PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
+
+	ifidx = rxcmplt_h->cmn_hdr.if_id;
+	memset(buf, 0 , msglen);
+	rxcmplt_h->marker = PCIE_D2H_RESET_MARK;
+
+#ifdef DHD_WAKE_STATUS
+	dhd->bus->rxwake += pkt_wake;
+#endif
+#ifdef DHD_RX_CHAINING
+	/* Chain the packets */
+	dhd_rxchain_frame(dhd, pkt, ifidx);
+#else /* ! DHD_RX_CHAINING */
+	/* offset from which data starts is populated in rxstatus0 */
+	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+#endif /* ! DHD_RX_CHAINING */
+}
+
+/* Stop protocol: sync w/dongle state. */
+void dhd_prot_stop(dhd_pub_t *dhd)
+{
+	/* nothing to do for pcie */
+}
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+void BCMFASTPATH
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+	return;
+}
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+	return 0;
+}
+
+
+#define PKTBUF pktbuf
+
+int BCMFASTPATH
+dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
+{
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	host_txbuf_post_t *txdesc = NULL;
+	dmaaddr_t physaddr, meta_physaddr;
+	uint8 *pktdata;
+	uint16 pktlen;
+	uint32 pktid;
+	uint8	prio;
+	uint16 flowid = 0;
+	uint16 alloced = 0;
+	uint16	headroom;
+
+	msgbuf_ring_t *msg_ring;
+	uint8 dhcp_pkt;
+
+	if (!dhd_bus_is_txmode_push(dhd->bus)) {
+		flow_ring_table_t *flow_ring_table;
+		flow_ring_node_t *flow_ring_node;
+
+		flowid = (uint16)DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(PKTBUF));
+
+		flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+		flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+
+		msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+	} else {
+		msg_ring = prot->h2dring_txp_subn;
+	}
+
+
+
+	DHD_GENERAL_LOCK(dhd, flags);
+
+	/* Create a unique 32-bit packet id */
+	pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF);
+	if (pktid == DHD_PKTID_INVALID) {
+		DHD_ERROR(("Pktid pool depleted.\n"));
+		/*
+		 * If we return error here, the caller would queue the packet
+		 * again. So we'll just free the skb allocated in DMA Zone.
+		 * Since we have not freed the original SKB yet the caller would
+		 * requeue the same.
+		 */
+		goto err_no_res_pktfree;
+	}
+
+	/* Reserve space in the circular buffer */
+	txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd,
+		msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+	if (txdesc == NULL) {
+		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
+			__FUNCTION__, __LINE__, prot->active_tx_count));
+		/* Free up the PKTID */
+		PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr,
+			pktlen, BUFF_TYPE_NO_CHECK);
+		goto err_no_res_pktfree;
+	}
+	/* test if dhcp pkt */
+	dhcp_pkt = pkt_is_dhcp(dhd->osh, PKTBUF);
+	txdesc->flag2 = (txdesc->flag2 & ~(BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK <<
+		BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT)) | ((dhcp_pkt &
+		BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK) << BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT);
+	/* Extract the data pointer and length information */
+	pktdata = PKTDATA(dhd->osh, PKTBUF);
+	pktlen  = (uint16)PKTLEN(dhd->osh, PKTBUF);
+
+	/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
+	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
+
+	/* Extract the ethernet header and adjust the data pointer and length */
+	pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+	pktlen -= ETHER_HDR_LEN;
+
+	/* Map the data pointer to a DMA-able address */
+	physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+	if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) {
+		DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+		ASSERT(0);
+	}
+
+	/* No need to lock. Save the rest of the packet's metadata */
+	NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid,
+	                     physaddr, pktlen, DMA_TX, BUFF_TYPE_DATA_TX);
+
+#ifdef TXP_FLUSH_NITEMS
+	if (msg_ring->pend_items_count == 0)
+		msg_ring->start_addr = (void *)txdesc;
+	msg_ring->pend_items_count++;
+#endif
+
+	/* Form the Tx descriptor message buffer */
+
+	/* Common message hdr */
+	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
+	txdesc->cmn_hdr.request_id = htol32(pktid);
+	txdesc->cmn_hdr.if_id = ifidx;
+	txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
+	prio = (uint8)PKTPRIO(PKTBUF);
+
+
+	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
+	txdesc->seg_cnt = 1;
+
+	txdesc->data_len = htol16(pktlen);
+	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(physaddr));
+
+	/* Move data pointer to keep ether header in local PKTBUF for later reference */
+	PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+
+	/* Handle Tx metadata */
+	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
+	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
+		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
+		prot->tx_metadata_offset, headroom));
+
+	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
+		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
+
+		/* Adjust the data pointer to account for meta data in DMA_MAP */
+		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+		meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+			prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+		if (PHYSADDRISZERO(meta_physaddr)) {
+			DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+			ASSERT(0);
+		}
+
+		/* Adjust the data pointer back to original value */
+		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+		txdesc->metadata_buf_len = prot->tx_metadata_offset;
+		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_physaddr));
+		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_physaddr));
+	}
+	else {
+		txdesc->metadata_buf_len = htol16(0);
+		txdesc->metadata_buf_addr.high_addr = 0;
+		txdesc->metadata_buf_addr.low_addr = 0;
+	}
+
+
+	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
+		txdesc->cmn_hdr.request_id));
+
+	/* Update the write pointer in TCM & ring bell */
+#ifdef TXP_FLUSH_NITEMS
+	/* Flush if we have either hit the txp_threshold or if this msg is */
+	/* occupying the last slot in the flow_ring - before wrap around.  */
+	if ((msg_ring->pend_items_count == prot->txp_threshold) ||
+		((uint8 *) txdesc == (uint8 *) HOST_RING_END(msg_ring))) {
+		dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
+	}
+#else
+	prot_ring_write_complete(dhd, msg_ring, txdesc, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+#endif
+
+	prot->active_tx_count++;
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+
+err_no_res_pktfree:
+
+
+
+	DHD_GENERAL_UNLOCK(dhd, flags);
+	return BCME_NORESOURCE;
+
+}
+
+/* called with a lock */
+void BCMFASTPATH
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
+{
+#ifdef TXP_FLUSH_NITEMS
+	unsigned long flags = 0;
+	flow_ring_table_t *flow_ring_table;
+	flow_ring_node_t *flow_ring_node;
+	msgbuf_ring_t *msg_ring;
+
+
+	if (!in_lock) {
+		DHD_GENERAL_LOCK(dhd, flags);
+	}
+
+	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+	msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+	/* Update the write pointer in TCM & ring bell */
+	if (msg_ring->pend_items_count) {
+		prot_ring_write_complete(dhd, msg_ring, msg_ring->start_addr,
+			msg_ring->pend_items_count);
+		msg_ring->pend_items_count = 0;
+		msg_ring->start_addr = NULL;
+	}
+
+	if (!in_lock) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+	}
+#endif /* TXP_FLUSH_NITEMS */
+}
+
+#undef PKTBUF	/* Only defined in the above routine */
+int BCMFASTPATH
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
+{
+	return 0;
+}
+
+static void BCMFASTPATH
+dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	if (prot->rxbufpost >= rxcnt) {
+		prot->rxbufpost -= rxcnt;
+	} else {
+		/* ASSERT(0); */
+		prot->rxbufpost = 0;
+	}
+
+	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
+		dhd_msgbuf_rxbuf_post(dhd);
+
+	return;
+}
+
+
+/* Use protocol to issue ioctl to dongle */
+int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		goto done;
+	}
+
+	if (dhd->busstate == DHD_BUS_SUSPEND) {
+		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+
+
+	if (action & WL_IOCTL_ACTION_SET) {
+		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	} else {
+		ret = dhdmsgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret;
+	}
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		if (ret != BCME_NOTASSOCIATED) {
+			DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
+		}
+		dhd->dongle_error = ret;
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+
+	prot->pending = FALSE;
+
+done:
+	return ret;
+
+}
+
+int
+dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
+{
+	unsigned long flags;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 alloced = 0;
+
+	ioct_reqst_hdr_t *ioct_rqst;
+
+	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
+	uint16 msglen = len + hdrlen;
+
+
+	if (msglen  > MSGBUF_MAX_MSG_SIZE)
+		msglen = MSGBUF_MAX_MSG_SIZE;
+
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	ioct_rqst = (ioct_reqst_hdr_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (ioct_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return 0;
+	}
+
+	{
+		uint8 *ptr;
+		uint16 i;
+
+		ptr = (uint8 *)ioct_rqst;
+		for (i = 0; i < msglen; i++) {
+			ptr[i] = i % 256;
+		}
+	}
+
+
+	/* Common msg buf hdr */
+	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
+	ioct_rqst->msg.if_id = 0;
+
+	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
+
+	/* Update the write pointer in TCM & ring bell */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 0;
+}
+
+void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma)
+{
+	if (dma == NULL)
+		return;
+
+	if (dma->srcmem.va) {
+		DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
+			dma->len, dma->srcmem.pa, dma->srcmem.dmah);
+		dma->srcmem.va = NULL;
+	}
+	if (dma->destmem.va) {
+		DMA_FREE_CONSISTENT(dhd->osh, dma->destmem.va,
+			dma->len + 8, dma->destmem.pa, dma->destmem.dmah);
+		dma->destmem.va = NULL;
+	}
+}
+
+int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
+	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma)
+{
+	uint i;
+
+	if (!dma)
+		return BCME_ERROR;
+
+	/* First free up exisiting buffers */
+	dmaxfer_free_dmaaddr(dhd, dma);
+
+	dma->srcmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len, DMA_ALIGN_LEN,
+	&i, &dma->srcmem.pa, &dma->srcmem.dmah);
+	if (dma->srcmem.va ==  NULL) {
+		return BCME_NOMEM;
+	}
+
+	/* Populate source with a pattern */
+	for (i = 0; i < len; i++) {
+		((uint8*)dma->srcmem.va)[i] = i % 256;
+	}
+	OSL_CACHE_FLUSH(dma->srcmem.va, len);
+
+	dma->destmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len + 8, DMA_ALIGN_LEN,
+	&i, &dma->destmem.pa, &dma->destmem.dmah);
+	if (dma->destmem.va ==  NULL) {
+		DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
+			dma->len, dma->srcmem.pa, dma->srcmem.dmah);
+		dma->srcmem.va = NULL;
+		return BCME_NOMEM;
+	}
+
+
+	/* Clear the destination buffer */
+	bzero(dma->destmem.va, len +8);
+	OSL_CACHE_FLUSH(dma->destmem.va, len+8);
+
+	dma->len = len;
+	dma->srcdelay = srcdelay;
+	dma->destdelay = destdelay;
+
+	return BCME_OK;
+}
+
+static void
+dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	OSL_CACHE_INV(prot->dmaxfer.destmem.va, prot->dmaxfer.len);
+	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.destmem.va) {
+		if (memcmp(prot->dmaxfer.srcmem.va,
+			prot->dmaxfer.destmem.va,
+			prot->dmaxfer.len)) {
+			bcm_print_bytes("XFER SRC: ",
+				prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
+			bcm_print_bytes("XFER DEST: ",
+				prot->dmaxfer.destmem.va, prot->dmaxfer.len);
+		}
+		else {
+			DHD_INFO(("DMA successful\n"));
+		}
+	}
+	dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+	dhd->prot->dmaxfer_in_progress = FALSE;
+}
+
+int
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
+{
+	unsigned long flags;
+	int ret = BCME_OK;
+	dhd_prot_t *prot = dhd->prot;
+	pcie_dma_xfer_params_t *dmap;
+	uint32 xferlen = len > DMA_XFER_LEN_LIMIT ? DMA_XFER_LEN_LIMIT : len;
+	uint16 msglen = sizeof(pcie_dma_xfer_params_t);
+	uint16 alloced = 0;
+
+	if (prot->dmaxfer_in_progress) {
+		DHD_ERROR(("DMA is in progress...\n"));
+		return ret;
+	}
+	prot->dmaxfer_in_progress = TRUE;
+	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
+		&prot->dmaxfer)) != BCME_OK) {
+		prot->dmaxfer_in_progress = FALSE;
+		return ret;
+	}
+
+
+	if (msglen  > MSGBUF_MAX_MSG_SIZE)
+		msglen = MSGBUF_MAX_MSG_SIZE;
+
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	dmap = (pcie_dma_xfer_params_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (dmap == NULL) {
+		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+		prot->dmaxfer_in_progress = FALSE;
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
+	dmap->cmn_hdr.request_id = 0x1234;
+
+	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
+	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
+	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.destmem.pa));
+	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.destmem.pa));
+	dmap->xfer_len = htol32(prot->dmaxfer.len);
+	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
+	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
+
+	/* Update the write pointer in TCM & ring bell */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, dmap,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	DHD_ERROR(("DMA Started...\n"));
+
+	return BCME_OK;
+}
+
+static int
+dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	int ret = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+
+	DHD_INFO(("ACTION %d ifdix %d cmd %d len %d \n",
+		action, ifidx, cmd, len));
+
+	/* wait for interrupt and get first fragment */
+	ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+
+done:
+	return ret;
+}
+static int
+dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf)
+{
+	dhd_prot_t *prot = dhd->prot;
+	ioctl_comp_resp_msg_t  ioct_resp;
+	void* pkt;
+	int retlen;
+	int msgbuf_len = 0;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (prot->cur_ioctlresp_bufs_posted)
+		prot->cur_ioctlresp_bufs_posted--;
+
+	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+
+	retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len);
+	if (retlen <= 0) {
+		DHD_ERROR(("IOCTL request failed with error code %d\n", retlen));
+		return retlen;
+	}
+	DHD_INFO(("ioctl resp retlen %d status %d, resp_len %d, pktid %d\n",
+		retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len,
+		ioct_resp.cmn_hdr.request_id));
+	if (ioct_resp.resp_len != 0) {
+		DHD_GENERAL_LOCK(dhd, flags);
+		pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id, BUFF_TYPE_IOCTL_RX);
+		DHD_GENERAL_UNLOCK(dhd, flags);
+
+		DHD_INFO(("ioctl ret buf %p retlen %d status %x \n", pkt, retlen,
+			ioct_resp.compl_hdr.status));
+		/* get ret buf */
+		if ((buf) && (pkt)) {
+			/* bcopy(PKTDATA(dhd->osh, pkt), buf, ioct_resp.resp_len); */
+			/* ioct_resp.resp_len could have been changed to make it > 8 bytes */
+			bcopy(PKTDATA(dhd->osh, pkt), buf, len);
+		}
+		if (pkt) {
+#ifdef DHD_USE_STATIC_IOCTLBUF
+			PKTFREE_STATIC(dhd->osh, pkt, FALSE);
+#else
+			PKTFREE(dhd->osh, pkt, FALSE);
+#endif /* DHD_USE_STATIC_IOCTLBUF */
+
+		}
+	} else {
+		DHD_GENERAL_LOCK(dhd, flags);
+		dhd_prot_packet_free(dhd, ioct_resp.cmn_hdr.request_id, BUFF_TYPE_IOCTL_RX);
+		DHD_GENERAL_UNLOCK(dhd, flags);
+	}
+
+	return (int)(ioct_resp.compl_hdr.status);
+}
+static int
+dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	int ret = 0;
+
+	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
+	DHD_TRACE(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	/* Fill up msgbuf for ioctl req */
+	ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+
+	DHD_INFO(("ACTIOn %d ifdix %d cmd %d len %d \n",
+		action, ifidx, cmd, len));
+
+	ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+
+	return ret;
+}
+/* Handles a protocol control response asynchronously */
+int dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+	return 0;
+}
+
+/* Check for and handle local prot-specific iovar commands */
+int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
+                             void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+/* Add prot dump output to a buffer */
+void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+
+}
+
+/* Update local copy of dongle statistics */
+void dhd_prot_dstats(dhd_pub_t *dhd)
+{
+		return;
+}
+
+int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
+{
+	return 0;
+}
+/* post a dummy message to interrupt dongle */
+/* used to process cons commands */
+int
+dhd_post_dummy_msg(dhd_pub_t *dhd)
+{
+	unsigned long flags;
+	hostevent_hdr_t *hevent = NULL;
+	uint16 alloced = 0;
+
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	hevent = (hostevent_hdr_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (hevent == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return -1;
+	}
+
+	/* CMN msg header */
+	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
+	hevent->msg.if_id = 0;
+
+	/* Event payload */
+	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
+
+	/* Since, we are filling the data directly into the bufptr obtained
+	 * from the msgbuf, we can directly call the write_complete
+	 */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, hevent,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 0;
+}
+
+static void * BCMFASTPATH
+dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+{
+	void * ret_buf;
+	uint16 r_index = 0;
+
+	/* Alloc space for nitems in the ring */
+	ret_buf = prot_get_ring_space(ring, nitems, alloced);
+
+	if (ret_buf == NULL) {
+		/* if alloc failed , invalidate cached read ptr */
+		if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+			r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
+			ring->ringstate->r_offset = r_index;
+		} else
+			dhd_bus_cmn_readshared(dhd->bus, &(RING_READ_PTR(ring)),
+				RING_READ_PTR, ring->idx);
+
+		/* Try allocating once more */
+		ret_buf = prot_get_ring_space(ring, nitems, alloced);
+
+		if (ret_buf == NULL) {
+			DHD_INFO(("%s: Ring space not available  \n", ring->name));
+			return NULL;
+		}
+	}
+
+	/* Return alloced space */
+	return ret_buf;
+}
+
+#define DHD_IOCTL_REQ_PKTID	0xFFFE
+
+/* Non inline ioct request */
+/* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer */
+/* Form a separate request buffer where a 4 byte cmn header is added in the front */
+/* buf contents from parent function is copied to remaining section of this buffer */
+static int
+dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	ioctl_req_msg_t *ioct_rqst;
+	void * ioct_buf;	/* For ioctl payload */
+	uint16  rqstlen, resplen;
+	unsigned long flags;
+	uint16 alloced = 0;
+
+	rqstlen = len;
+	resplen = len;
+
+	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
+	/* 8K allocation of dongle buffer fails */
+	/* dhd doesnt give separate input & output buf lens */
+	/* so making the assumption that input length can never be more than 1.5k */
+	rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Request for cbuf space */
+	ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced);
+	if (ioct_rqst == NULL) {
+		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return -1;
+	}
+
+	/* Common msg buf hdr */
+	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
+	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
+	ioct_rqst->cmn_hdr.flags = 0;
+	ioct_rqst->cmn_hdr.request_id = DHD_IOCTL_REQ_PKTID;
+
+	ioct_rqst->cmd = htol32(cmd);
+	ioct_rqst->output_buf_len = htol16(resplen);
+	ioct_rqst->trans_id = prot->ioctl_trans_id ++;
+
+	/* populate ioctl buffer info */
+	ioct_rqst->input_buf_len = htol16(rqstlen);
+	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
+	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
+	/* copy ioct payload */
+	ioct_buf = (void *) prot->ioctbuf.va;
+
+	if (buf)
+		memcpy(ioct_buf, buf, len);
+
+	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
+
+	if ((ulong)ioct_buf % DMA_ALIGN_LEN)
+		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+
+	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
+		ioct_rqst->trans_id));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return 0;
+}
+
+/* Packet to PacketID mapper */
+typedef struct {
+	ulong native;
+	dmaaddr_t pa;
+	uint32 pa_len;
+	uchar dma;
+} pktid_t;
+
+typedef struct {
+	void	*osh;
+	void	*mwbmap_hdl;
+	pktid_t *pktid_list;
+	uint32	count;
+} pktid_map_t;
+
+
+void *pktid_map_init(void *osh, uint32 count)
+{
+	pktid_map_t *handle;
+
+	handle = (pktid_map_t *) MALLOC(osh, sizeof(pktid_map_t));
+	if (handle == NULL) {
+		printf("%s:%d: MALLOC failed for size %d\n",
+			__FUNCTION__, __LINE__, (uint32) sizeof(pktid_map_t));
+		return NULL;
+	}
+	handle->osh = osh;
+	handle->count = count;
+	handle->mwbmap_hdl = bcm_mwbmap_init(osh, count);
+	if (handle->mwbmap_hdl == NULL) {
+		printf("%s:%d: bcm_mwbmap_init failed for count %d\n",
+			__FUNCTION__, __LINE__, count);
+		MFREE(osh, handle, sizeof(pktid_map_t));
+		return NULL;
+	}
+
+	handle->pktid_list = (pktid_t *) MALLOC(osh, sizeof(pktid_t) * (count+1));
+	if (handle->pktid_list == NULL) {
+		printf("%s:%d: MALLOC failed for count %d / total = %d\n",
+			__FUNCTION__, __LINE__, count, (uint32) sizeof(pktid_t) * count);
+		bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
+		MFREE(osh, handle, sizeof(pktid_map_t));
+		return NULL;
+	}
+
+	return handle;
+}
+
+void
+pktid_map_uninit(void *pktid_map_handle)
+{
+	pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+	uint32 ix;
+
+	if (handle != NULL) {
+		void *osh = handle->osh;
+		for (ix = 0; ix < MAX_PKTID_ITEMS; ix++)
+		{
+			if (!bcm_mwbmap_isfree(handle->mwbmap_hdl, ix)) {
+				/* Mark the slot as free */
+				bcm_mwbmap_free(handle->mwbmap_hdl, ix);
+				/*
+				Here we can do dma unmapping for 32 bit also.
+				Since this in removal path, it will not affect performance
+				*/
+				DMA_UNMAP(osh, handle->pktid_list[ix+1].pa,
+					(uint) handle->pktid_list[ix+1].pa_len,
+					handle->pktid_list[ix+1].dma, 0, 0);
+				PKTFREE(osh, (unsigned long*)handle->pktid_list[ix+1].native, TRUE);
+			}
+		}
+		bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
+		MFREE(osh, handle->pktid_list, sizeof(pktid_t) * (handle->count+1));
+		MFREE(osh, handle, sizeof(pktid_map_t));
+	}
+	return;
+}
+
+uint32 BCMFASTPATH
+pktid_map_unique(void *pktid_map_handle, void *pkt, dmaaddr_t physaddr, uint32 physlen, uint32 dma)
+{
+	uint32 id;
+	pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+
+	if (handle == NULL) {
+		printf("%s:%d: Error !!! pktid_map_unique called without initing pktid_map\n",
+			__FUNCTION__, __LINE__);
+		return 0;
+	}
+	id = bcm_mwbmap_alloc(handle->mwbmap_hdl);
+	if (id == BCM_MWBMAP_INVALID_IDX) {
+		printf("%s:%d: bcm_mwbmap_alloc failed. Free Count = %d\n",
+			__FUNCTION__, __LINE__, bcm_mwbmap_free_cnt(handle->mwbmap_hdl));
+		return 0;
+	}
+
+	/* id=0 is invalid as we use this for error checking in the dongle */
+	id += 1;
+	handle->pktid_list[id].native = (ulong) pkt;
+	handle->pktid_list[id].pa     = physaddr;
+	handle->pktid_list[id].pa_len = (uint32) physlen;
+	handle->pktid_list[id].dma = (uchar)dma;
+
+	return id;
+}
+
+void * BCMFASTPATH
+pktid_get_packet(void *pktid_map_handle, uint32 id, dmaaddr_t *physaddr, uint32 *physlen)
+{
+	void *native = NULL;
+	pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+	if (handle == NULL) {
+		printf("%s:%d: Error !!! pktid_get_packet called without initing pktid_map\n",
+			__FUNCTION__, __LINE__);
+		return NULL;
+	}
+
+	/* Debug check */
+	if (bcm_mwbmap_isfree(handle->mwbmap_hdl, (id-1))) {
+		printf("%s:%d: Error !!!. slot (%d/0x%04x) free but the app is using it.\n",
+			__FUNCTION__, __LINE__, (id-1), (id-1));
+		return NULL;
+	}
+
+	native = (void *) handle->pktid_list[id].native;
+	*physaddr = handle->pktid_list[id].pa;
+	*physlen  = (uint32) handle->pktid_list[id].pa_len;
+
+	/* Mark the slot as free */
+	bcm_mwbmap_free(handle->mwbmap_hdl, (id-1));
+
+	return native;
+}
+static msgbuf_ring_t*
+prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid)
+{
+	uint alloced = 0;
+	msgbuf_ring_t *ring;
+	dmaaddr_t physaddr;
+	uint16 size, cnt;
+	uint32 *marker;
+
+	ASSERT(name);
+	BCM_REFERENCE(physaddr);
+
+	/* allocate ring info */
+	ring = MALLOC(prot->osh, sizeof(msgbuf_ring_t));
+	if (ring == NULL) {
+		ASSERT(0);
+		return NULL;
+	}
+	bzero(ring, sizeof(*ring));
+
+	/* Init name */
+	strncpy(ring->name, name, sizeof(ring->name));
+
+	/* Ringid in the order given in bcmpcie.h */
+	ring->idx = ringid;
+
+	/* init ringmem */
+	ring->ringmem = MALLOC(prot->osh, sizeof(ring_mem_t));
+	if (ring->ringmem == NULL)
+		goto fail;
+	bzero(ring->ringmem, sizeof(*ring->ringmem));
+
+	ring->ringmem->max_item = max_item;
+	ring->ringmem->len_items = len_item;
+	size = max_item * len_item;
+
+	/* Ring Memmory allocation */
+	ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN,
+		&alloced, &ring->ring_base.pa, &ring->ring_base.dmah);
+
+	if (ring->ring_base.va == NULL)
+		goto fail;
+	ring->ringmem->base_addr.high_addr = htol32(PHYSADDRHI(ring->ring_base.pa));
+	ring->ringmem->base_addr.low_addr = htol32(PHYSADDRLO(ring->ring_base.pa));
+
+	ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
+	bzero(ring->ring_base.va, size);
+	for (cnt = 0; cnt < max_item; cnt++) {
+		marker = (uint32 *)ring->ring_base.va +
+			(cnt + 1) * len_item / sizeof(uint32) - 1;
+		*marker = PCIE_D2H_RESET_MARK;
+	}
+	OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+
+	/* Ring state init */
+	ring->ringstate	= MALLOC(prot->osh, sizeof(ring_state_t));
+	if (ring->ringstate == NULL)
+		goto fail;
+	bzero(ring->ringstate, sizeof(*ring->ringstate));
+
+	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
+		"ring start %p buf phys addr  %x:%x \n",
+		ring->name, ring->ringmem->max_item, ring->ringmem->len_items,
+		size, ring->ring_base.va, ring->ringmem->base_addr.high_addr,
+		ring->ringmem->base_addr.low_addr));
+	return ring;
+fail:
+	if (ring->ring_base.va)
+		PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr);
+		PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr);
+		size = ring->ringmem->max_item * ring->ringmem->len_items;
+		DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL);
+		ring->ring_base.va = NULL;
+	if (ring->ringmem)
+		MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
+	MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+	ASSERT(0);
+	return NULL;
+}
+static void
+dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+	/* update buffer address of ring */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->base_addr,
+		sizeof(ring->ringmem->base_addr), RING_BUF_ADDR, ring->idx);
+
+	/* Update max items possible in ring */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->max_item,
+		sizeof(ring->ringmem->max_item), RING_MAX_ITEM, ring->idx);
+
+	/* Update length of each item in the ring */
+	dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->len_items,
+		sizeof(ring->ringmem->len_items), RING_LEN_ITEMS, ring->idx);
+
+	/* ring inited */
+	ring->inited = TRUE;
+}
+static void
+dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dmaaddr_t phyaddr;
+	uint16 size;
+	dhd_prot_t *prot = dhd->prot;
+
+	BCM_REFERENCE(phyaddr);
+
+	if (ring == NULL)
+		return;
+
+	ring->inited = FALSE;
+
+	PHYSADDRHISET(phyaddr, ring->ringmem->base_addr.high_addr);
+	PHYSADDRLOSET(phyaddr, ring->ringmem->base_addr.low_addr);
+	size = ring->ringmem->max_item * ring->ringmem->len_items;
+	/* Free up ring */
+	if (ring->ring_base.va) {
+		DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa,
+			ring->ring_base.dmah);
+		ring->ring_base.va = NULL;
+	}
+
+	/* Free up ring mem space */
+	if (ring->ringmem) {
+		MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
+		ring->ringmem = NULL;
+	}
+
+	/* Free up ring state info */
+	if (ring->ringstate) {
+		MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t));
+		ring->ringstate = NULL;
+	}
+
+	/* free up ring info */
+	MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+}
+/* Assumes only one index is updated ata time */
+static void *BCMFASTPATH
+prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+{
+	void *ret_ptr = NULL;
+	uint16 ring_avail_cnt;
+
+	ASSERT(nitems <= RING_MAX_ITEM(ring));
+
+	ring_avail_cnt = CHECK_WRITE_SPACE(RING_READ_PTR(ring), RING_WRITE_PTR(ring),
+		RING_MAX_ITEM(ring));
+
+	if (ring_avail_cnt == 0) {
+		DHD_INFO(("RING space not available on ring %s for %d items \n",
+			ring->name, nitems));
+		DHD_INFO(("write %d read %d \n\n", RING_WRITE_PTR(ring),
+			RING_READ_PTR(ring)));
+		return NULL;
+	}
+	*alloced = MIN(nitems, ring_avail_cnt);
+
+	/* Return next available space */
+	ret_ptr = (char*)HOST_RING_BASE(ring) + (RING_WRITE_PTR(ring) * RING_LEN_ITEMS(ring));
+
+	/* Update write pointer */
+	if ((RING_WRITE_PTR(ring) + *alloced) == RING_MAX_ITEM(ring))
+		RING_WRITE_PTR(ring) = 0;
+	else if ((RING_WRITE_PTR(ring) + *alloced) < RING_MAX_ITEM(ring))
+		RING_WRITE_PTR(ring) += *alloced;
+	else {
+		/* Should never hit this */
+		ASSERT(0);
+		return NULL;
+	}
+
+	return ret_ptr;
+}
+
+static void BCMFASTPATH
+prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	/* cache flush */
+	OSL_CACHE_FLUSH(p, RING_LEN_ITEMS(ring) * nitems);
+
+	/* update write pointer */
+	/* If dma'ing h2d indices are supported
+	 * update the values in the host memory
+	 * o/w update the values in TCM
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
+			ring->idx, (uint16)RING_WRITE_PTR(ring));
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(ring)),
+			sizeof(uint16), RING_WRITE_PTR, ring->idx);
+
+	/* raise h2d interrupt */
+	prot->mb_ring_fn(dhd->bus, RING_WRITE_PTR(ring));
+}
+
+/* If dma'ing h2d indices are supported
+ * this function updates the indices in
+ * the host memory
+ */
+static void
+dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index)
+{
+	dhd_prot_t *prot = dhd->prot;
+
+	uint32 *ptr = NULL;
+	uint16 offset = 0;
+
+	switch (type) {
+		case H2D_DMA_WRITEINDX:
+			ptr = (uint32 *)(prot->h2d_dma_writeindx_buf.va);
+
+			/* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+			 * but in host memory their indices start
+			 * after H2D Common Rings
+			 */
+			if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+				offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+					BCMPCIE_H2D_COMMON_MSGRINGS;
+			else
+				offset = ringid;
+			ptr += offset;
+
+			*ptr = htol16(new_index);
+
+			/* cache flush */
+			OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
+				prot->h2d_dma_writeindx_buf_len);
+
+			break;
+
+		case D2H_DMA_READINDX:
+			ptr = (uint32 *)(prot->d2h_dma_readindx_buf.va);
+
+			/* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+			offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+			ptr += offset;
+
+			*ptr = htol16(new_index);
+			/* cache flush */
+			OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
+				prot->d2h_dma_readindx_buf_len);
+
+			break;
+
+		default:
+			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+				__FUNCTION__));
+
+			break;
+	}
+	DHD_TRACE(("%s: Data 0x%p, ringId %d, new_index %d\n",
+		__FUNCTION__, ptr, ringid, new_index));
+}
+
+
+static uint16
+dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid)
+{
+	uint32 *ptr = NULL;
+	uint16 data = 0;
+	uint16 offset = 0;
+
+	switch (type) {
+		case H2D_DMA_WRITEINDX:
+			OSL_CACHE_INV((void *)dhd->prot->h2d_dma_writeindx_buf.va,
+				dhd->prot->h2d_dma_writeindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->h2d_dma_writeindx_buf.va);
+
+			/* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+			 * but in host memory their indices start
+			 * after H2D Common Rings
+			 */
+			if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+				offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+					BCMPCIE_H2D_COMMON_MSGRINGS;
+			else
+				offset = ringid;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		case H2D_DMA_READINDX:
+			OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
+				dhd->prot->h2d_dma_readindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+
+			/* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+			 * but in host memory their indices start
+			 * after H2D Common Rings
+			 */
+			if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+				offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+					BCMPCIE_H2D_COMMON_MSGRINGS;
+			else
+				offset = ringid;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		case D2H_DMA_WRITEINDX:
+			OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
+				dhd->prot->d2h_dma_writeindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+
+			/* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+			offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		case D2H_DMA_READINDX:
+			OSL_CACHE_INV((void *)dhd->prot->d2h_dma_readindx_buf.va,
+				dhd->prot->d2h_dma_readindx_buf_len);
+			ptr = (uint32 *)(dhd->prot->d2h_dma_readindx_buf.va);
+
+			/* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+			offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+			ptr += offset;
+
+			data = LTOH16((uint16)*ptr);
+			break;
+
+		default:
+			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+				__FUNCTION__));
+
+			break;
+	}
+	DHD_TRACE(("%s: Data 0x%p, data %d\n", __FUNCTION__, ptr, data));
+	return (data);
+}
+
+/* D2H dircetion: get next space to read from */
+static uint8*
+prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t * ring, uint16* available_len)
+{
+	uint16 w_ptr;
+	uint16 r_ptr;
+	uint16 depth;
+	void* ret_addr = NULL;
+	uint16 d2h_w_index = 0;
+
+	DHD_TRACE(("%s: h2d_dma_readindx_buf %p, d2h_dma_writeindx_buf %p\n",
+		__FUNCTION__, (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va),
+		(uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va)));
+
+	/* update write pointer */
+	if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+		/* DMAing write/read indices supported */
+		d2h_w_index = dhd_get_dmaed_index(dhd, D2H_DMA_WRITEINDX, ring->idx);
+		ring->ringstate->w_offset = d2h_w_index;
+	} else
+		dhd_bus_cmn_readshared(dhd->bus,
+			&(RING_WRITE_PTR(ring)), RING_WRITE_PTR, ring->idx);
+
+	w_ptr = ring->ringstate->w_offset;
+	r_ptr = ring->ringstate->r_offset;
+	depth = ring->ringmem->max_item;
+
+	/* check for avail space */
+	*available_len = READ_AVAIL_SPACE(w_ptr, r_ptr, depth);
+	if (*available_len == 0)
+		return NULL;
+
+	ASSERT(*available_len <= ring->ringmem->max_item);
+
+	/* if space available, calculate address to be read */
+	ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items);
+
+	/* update read pointer */
+	if ((ring->ringstate->r_offset + *available_len) >= ring->ringmem->max_item)
+		ring->ringstate->r_offset = 0;
+	else
+		ring->ringstate->r_offset += *available_len;
+
+	ASSERT(ring->ringstate->r_offset < ring->ringmem->max_item);
+
+	/* convert index to bytes */
+	*available_len = *available_len * ring->ringmem->len_items;
+
+	/* return read address */
+	return ret_addr;
+}
+static void
+prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	/* update read index */
+	/* If dma'ing h2d indices supported
+	 * update the r -indices in the
+	 * host memory o/w in TCM
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
+			ring->idx, (uint16)RING_READ_PTR(ring));
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(RING_READ_PTR(ring)),
+			sizeof(uint16), RING_READ_PTR, ring->idx);
+}
+static void
+prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dhd_prot_t *prot;
+	if (!dhd || !dhd->prot)
+		return;
+	prot = dhd->prot;
+	prot->rx_cpln_early_upd_idx = RING_READ_PTR(ring);
+}
+static void
+prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+	dhd_prot_t *prot;
+	if (!dhd || !dhd->prot)
+		return;
+	prot = dhd->prot;
+	if (prot->rx_cpln_early_upd_idx == RING_READ_PTR(ring))
+		return;
+	if (++prot->rx_cpln_early_upd_idx >= RING_MAX_ITEM(ring))
+		prot->rx_cpln_early_upd_idx = 0;
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
+			ring->idx, (uint16)prot->rx_cpln_early_upd_idx);
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(prot->rx_cpln_early_upd_idx),
+			sizeof(uint16), RING_READ_PTR, ring->idx);
+}
+
+int
+dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_create_request_t *flow_create_rqst;
+	msgbuf_ring_t *msgbuf_flow_info;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 hdrlen = sizeof(tx_flowring_create_request_t);
+	uint16 msglen = hdrlen;
+	unsigned long flags;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	uint16 alloced = 0;
+
+	if (!(msgbuf_flow_info = prot_ring_attach(prot, "h2dflr",
+		H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+		BCMPCIE_H2D_TXFLOWRINGID +
+		(flow_ring_node->flowid - BCMPCIE_H2D_COMMON_MSGRINGS)))) {
+		DHD_ERROR(("%s: kmalloc for H2D TX Flow ring failed\n", __FUNCTION__));
+		return BCME_NOMEM;
+	}
+	/* Clear write pointer of the ring */
+	flow_ring_node->prot_info = (void *)msgbuf_flow_info;
+
+	/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+
+	DHD_GENERAL_LOCK(dhd, flags);
+	/* Request for ring buffer space */
+	flow_create_rqst = (tx_flowring_create_request_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (flow_create_rqst == NULL) {
+		DHD_ERROR(("%s: No space in control ring for Flow create req\n", __FUNCTION__));
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		return BCME_NOMEM;
+	}
+	msgbuf_flow_info->inited = TRUE;
+
+	/* Common msg buf hdr */
+	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
+	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_create_rqst->msg.request_id = htol16(0); /* TBD */
+
+	/* Update flow create message */
+	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
+	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
+	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
+	flow_create_rqst->flow_ring_ptr.low_addr = msgbuf_flow_info->ringmem->base_addr.low_addr;
+	flow_create_rqst->flow_ring_ptr.high_addr = msgbuf_flow_info->ringmem->base_addr.high_addr;
+	flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
+	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+	bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf);
+	DHD_ERROR(("%s Send Flow create Req msglen flow ID %d for peer %s prio %d ifindex %d\n",
+		__FUNCTION__, flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid,
+		flow_ring_node->flow_info.ifindex));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_create_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+	/* If dma'ing indices supported
+	 * update the w-index in host memory o/w in TCM
+	 */
+	if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+		dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
+			msgbuf_flow_info->idx, (uint16)RING_WRITE_PTR(msgbuf_flow_info));
+	else
+		dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(msgbuf_flow_info)),
+			sizeof(uint16), RING_WRITE_PTR, msgbuf_flow_info->idx);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)buf;
+
+	DHD_ERROR(("%s Flow create Response status = %d Flow %d\n", __FUNCTION__,
+		flow_create_resp->cmplt.status, flow_create_resp->cmplt.flow_ring_id));
+
+	dhd_bus_flow_ring_create_response(dhd->bus, flow_create_resp->cmplt.flow_ring_id,
+		flow_create_resp->cmplt.status);
+}
+
+void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
+{
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+	dhd_prot_ring_detach(dhd, flow_ring);
+	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
+}
+
+void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+	struct bcmstrbuf *strbuf)
+{
+	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+	uint16 rd, wrt;
+	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_READ_PTR, flow_ring->idx);
+	dhd_bus_cmn_readshared(dhd->bus, &wrt, RING_WRITE_PTR, flow_ring->idx);
+	bcm_bprintf(strbuf, "RD %d WR %d\n", rd, wrt);
+}
+
+void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "CtrlPost: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_ctrl_subn, strbuf);
+	bcm_bprintf(strbuf, "CtrlCpl: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_ctrl_cpln, strbuf);
+	bcm_bprintf(strbuf, "RxPost: ");
+	bcm_bprintf(strbuf, "RBP %d ", dhd->prot->rxbufpost);
+	dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_rxp_subn, strbuf);
+	bcm_bprintf(strbuf, "RxCpl: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_rx_cpln, strbuf);
+	if (dhd_bus_is_txmode_push(dhd->bus)) {
+		bcm_bprintf(strbuf, "TxPost: ");
+		dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_txp_subn, strbuf);
+	}
+	bcm_bprintf(strbuf, "TxCpl: ");
+	dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_tx_cpln, strbuf);
+	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail %d\n",
+		dhd->prot->active_tx_count,
+		dhd_pktid_map_avail_cnt(dhd->prot->pktid_map_handle));
+}
+
+int
+dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_delete_request_t *flow_delete_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 msglen = sizeof(tx_flowring_delete_request_t);
+	unsigned long flags;
+	uint16 alloced = 0;
+
+	/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	/* Request for ring buffer space */
+	DHD_GENERAL_LOCK(dhd, flags);
+	flow_delete_rqst = (tx_flowring_delete_request_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+	if (flow_delete_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s Flow Delete req failure no ring mem %d \n", __FUNCTION__, msglen));
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
+	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_delete_rqst->msg.request_id = htol16(0); /* TBD */
+
+	/* Update Delete info */
+	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	flow_delete_rqst->reason = htol16(BCME_OK);
+
+	DHD_ERROR(("%s sending FLOW RING Delete req msglen %d \n", __FUNCTION__, msglen));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)buf;
+
+	DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__,
+		flow_delete_resp->cmplt.status));
+
+	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
+		flow_delete_resp->cmplt.status);
+}
+
+int
+dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+	tx_flowring_flush_request_t *flow_flush_rqst;
+	dhd_prot_t *prot = dhd->prot;
+	uint16 msglen = sizeof(tx_flowring_flush_request_t);
+	unsigned long flags;
+	uint16 alloced = 0;
+
+	/* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+	msglen = align(msglen, DMA_ALIGN_LEN);
+
+	/* Request for ring buffer space */
+	DHD_GENERAL_LOCK(dhd, flags);
+	flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_alloc_ring_space(dhd,
+		prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+	if (flow_flush_rqst == NULL) {
+		DHD_GENERAL_UNLOCK(dhd, flags);
+		DHD_ERROR(("%s Flow Flush req failure no ring mem %d \n", __FUNCTION__, msglen));
+		return BCME_NOMEM;
+	}
+
+	/* Common msg buf hdr */
+	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
+	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+	flow_flush_rqst->msg.request_id = htol16(0); /* TBD */
+
+	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+	flow_flush_rqst->reason = htol16(BCME_OK);
+
+	DHD_INFO(("%s sending FLOW RING Flush req msglen %d \n", __FUNCTION__, msglen));
+
+	/* upd wrt ptr and raise interrupt */
+	prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_flush_rqst,
+		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+	DHD_GENERAL_UNLOCK(dhd, flags);
+
+	return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)buf;
+
+	DHD_INFO(("%s Flow Flush Response status = %d \n", __FUNCTION__,
+		flow_flush_resp->cmplt.status));
+
+	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
+		flow_flush_resp->cmplt.status);
+}
+
+int
+dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+	uint32 *ptr;
+	uint32 value;
+	uint32 i;
+	uint8 txpush = 0;
+	uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus, &txpush);
+
+	OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
+		dhd->prot->d2h_dma_writeindx_buf_len);
+
+	ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+
+	bcm_bprintf(b, "\n max_tx_queues %d, txpush mode %d\n", max_h2d_queues, txpush);
+
+	bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+	if (txpush) {
+		ptr++;
+		value = ltoh32(*ptr);
+		bcm_bprintf(b, "\tH2D TXPOST value 0x%04x\n", value);
+	}
+	else {
+		ptr++;
+		bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+			value = ltoh32(*ptr);
+			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+			ptr++;
+		}
+	}
+
+	OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
+		dhd->prot->h2d_dma_readindx_buf_len);
+
+	ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+
+	bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+	ptr++;
+	value = ltoh32(*ptr);
+	bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+
+	return 0;
+}
+
+uint32
+dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (rx)
+		prot->rx_metadata_offset = (uint16)val;
+	else
+		prot->tx_metadata_offset = (uint16)val;
+	return dhd_prot_metadatalen_get(dhd, rx);
+}
+
+uint32
+dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (rx)
+		return prot->rx_metadata_offset;
+	else
+		return prot->tx_metadata_offset;
+}
+
+uint32
+dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+	dhd_prot_t *prot = dhd->prot;
+	if (set)
+		prot->txp_threshold = (uint16)val;
+	val = prot->txp_threshold;
+	return val;
+}
+
+#ifdef DHD_RX_CHAINING
+static INLINE void BCMFASTPATH
+dhd_rxchain_reset(rxchain_info_t *rxchain)
+{
+	rxchain->pkt_count = 0;
+}
+
+static void BCMFASTPATH
+dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
+{
+	uint8 *eh;
+	uint8 prio;
+	dhd_prot_t *prot = dhd->prot;
+	rxchain_info_t *rxchain = &prot->rxchain;
+
+	eh = PKTDATA(dhd->osh, pkt);
+	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
+
+	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
+	/* so that the chain can be handed off to CTF bridge as is. */
+	if (rxchain->pkt_count == 0) {
+		/* First packet in chain */
+		rxchain->pkthead = rxchain->pkttail = pkt;
+
+		/* Keep a copy of ptr to ether_da, ether_sa and prio */
+		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+		rxchain->h_prio = prio;
+		rxchain->ifidx = ifidx;
+		rxchain->pkt_count++;
+	} else {
+		if (PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
+			rxchain->h_da, rxchain->h_prio)) {
+			/* Same flow - keep chaining */
+			PKTSETCLINK(rxchain->pkttail, pkt);
+			rxchain->pkttail = pkt;
+			rxchain->pkt_count++;
+		} else {
+			/* Different flow - First release the existing chain */
+			dhd_rxchain_commit(dhd);
+
+			/* Create a new chain */
+			rxchain->pkthead = rxchain->pkttail = pkt;
+
+			/* Keep a copy of ptr to ether_da, ether_sa and prio */
+			rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+			rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+			rxchain->h_prio = prio;
+			rxchain->ifidx = ifidx;
+			rxchain->pkt_count++;
+		}
+	}
+
+	if ((!ETHER_ISMULTI(rxchain->h_da)) &&
+		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
+		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
+		PKTSETCHAINED(dhd->osh, pkt);
+		PKTCINCRCNT(rxchain->pkthead);
+		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
+	} else {
+		dhd_rxchain_commit(dhd);
+		return;
+	}
+
+	/* If we have hit the max chain length, dispatch the chain and reset */
+	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
+		dhd_rxchain_commit(dhd);
+	}
+}
+
+static void BCMFASTPATH
+dhd_rxchain_commit(dhd_pub_t *dhd)
+{
+	dhd_prot_t *prot = dhd->prot;
+	rxchain_info_t *rxchain = &prot->rxchain;
+
+	if (rxchain->pkt_count == 0)
+		return;
+
+	/* Release the packets to dhd_linux */
+	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
+
+	/* Reset the chain */
+	dhd_rxchain_reset(rxchain);
+}
+#endif /* DHD_RX_CHAINING */
+static void
+dhd_prot_ring_clear(msgbuf_ring_t* ring)
+{
+	uint16 size;
+	DHD_TRACE(("%s\n",__FUNCTION__));
+
+	size = ring->ringmem->max_item * ring->ringmem->len_items;
+	OSL_CACHE_INV((void *) ring->ring_base.va, size);
+	bzero(ring->ring_base.va, size);
+	OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+
+	bzero(ring->ringstate, sizeof(*ring->ringstate));
+}
+
+void
+dhd_prot_clear(dhd_pub_t *dhd)
+{
+	struct dhd_prot *prot = dhd->prot;
+
+	DHD_TRACE(("%s\n",__FUNCTION__));
+
+	if(prot == NULL)
+		return;
+
+	if(prot->h2dring_txp_subn)
+		dhd_prot_ring_clear(prot->h2dring_txp_subn);
+	if(prot->h2dring_rxp_subn)
+		dhd_prot_ring_clear(prot->h2dring_rxp_subn);
+	if(prot->h2dring_ctrl_subn)
+		dhd_prot_ring_clear(prot->h2dring_ctrl_subn);
+	if(prot->d2hring_tx_cpln)
+		dhd_prot_ring_clear(prot->d2hring_tx_cpln);
+	if(prot->d2hring_rx_cpln)
+		dhd_prot_ring_clear(prot->d2hring_rx_cpln);
+	if(prot->d2hring_ctrl_cpln)
+		dhd_prot_ring_clear(prot->d2hring_ctrl_cpln);
+
+
+	if(prot->retbuf.va) {
+		OSL_CACHE_INV((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+		bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
+		OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+	}
+
+	if(prot->ioctbuf.va) {
+		OSL_CACHE_INV((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+		bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+		OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+	}
+
+	if(prot->d2h_dma_scratch_buf.va) {
+		OSL_CACHE_INV((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+		bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+		OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+	}
+
+	if (prot->h2d_dma_readindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->h2d_dma_readindx_buf.va,
+			prot->h2d_dma_readindx_buf_len);
+		bzero(prot->h2d_dma_readindx_buf.va,
+			prot->h2d_dma_readindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va,
+			prot->h2d_dma_readindx_buf_len);
+	}
+
+	if (prot->h2d_dma_writeindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->h2d_dma_writeindx_buf.va,
+			prot->h2d_dma_writeindx_buf_len);
+		bzero(prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
+			prot->h2d_dma_writeindx_buf_len);
+	}
+
+	if (prot->d2h_dma_readindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->d2h_dma_readindx_buf.va,
+			prot->d2h_dma_readindx_buf_len);
+		bzero(prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
+			prot->d2h_dma_readindx_buf_len);
+	}
+
+	if (prot->d2h_dma_writeindx_buf.va) {
+		OSL_CACHE_INV((void *)prot->d2h_dma_writeindx_buf.va,
+			prot->d2h_dma_writeindx_buf_len);
+		bzero(prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len);
+		OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va,
+			prot->d2h_dma_writeindx_buf_len);
+	}
+
+	prot->rx_metadata_offset = 0;
+	prot->tx_metadata_offset = 0;
+
+	prot->rxbufpost = 0;
+	prot->cur_event_bufs_posted = 0;
+	prot->cur_ioctlresp_bufs_posted = 0;
+
+	prot->active_tx_count = 0;
+	prot->data_seq_no = 0;
+	prot->ioctl_seq_no = 0;
+	prot->pending = 0;
+	prot->lastcmd = 0;
+
+	prot->ioctl_trans_id = 1;
+
+	/* dhd_flow_rings_init is located at dhd_bus_start,
+	 *  so when stopping bus, flowrings shall be deleted
+	 */
+	dhd_flow_rings_deinit(dhd);
+	NATIVE_TO_PKTID_CLEAR(prot->pktid_map_handle);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.c b/drivers/net/wireless/bcmdhd/dhd_pcie.c
new file mode 100644
index 0000000..b26ae32
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie.c
@@ -0,0 +1,4311 @@
+/*
+ * DHD Bus Module for PCIE
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pcie.c 477711 2014-05-14 08:45:17Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_flowring.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <bcmpcie.h>
+#include <bcmendian.h>
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE	6144	/* max nvram buf size */
+
+#define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
+#define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
+/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
+#define DHD_FLOW_RING(dhdp, flowid) \
+	(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
+
+int dhd_dongle_memsize;
+int dhd_dongle_ramsize;
+#ifdef DHD_DEBUG
+static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
+#endif
+static void dhdpcie_bus_report_pcie_linkdown(dhd_bus_t *bus);
+static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
+static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+	const char *name, void *params,
+	int plen, void *arg, int len, int val_size);
+static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
+static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
+	uint32 len, uint32 srcdelay, uint32 destdelay);
+static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
+static int _dhdpcie_download_firmware(struct dhd_bus *bus);
+static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
+static void dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
+static void dhdpci_bus_read_frames(dhd_bus_t *bus);
+static int dhdpcie_readshared(dhd_bus_t *bus);
+static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
+static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
+static void dhdpcie_bus_intr_enable(dhd_bus_t *bus);
+static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
+static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
+	bool dongle_isolation, bool reset_flag);
+static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
+static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
+static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
+static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
+static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+
+#ifdef BCMEMBEDIMAGE
+static int dhdpcie_download_code_array(dhd_bus_t *bus);
+#endif /* BCMEMBEDIMAGE */
+extern void dhd_dpc_kill(dhd_pub_t *dhdp);
+
+
+
+#define     PCI_VENDOR_ID_BROADCOM          0x14e4
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_MEMBYTES,
+	IOV_MEMSIZE,
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_DEVRESET,
+	IOV_VARS,
+	IOV_MSI_SIM,
+	IOV_PCIE_LPBK,
+	IOV_CC_NVMSHADOW,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+	IOV_SLEEP_ALLOWED,
+	IOV_PCIE_DMAXFER,
+	IOV_PCIE_SUSPEND,
+	IOV_PCIEREG,
+	IOV_PCIECFGREG,
+	IOV_PCIECOREREG,
+	IOV_PCIESERDESREG,
+	IOV_BAR0_SECWIN_REG,
+	IOV_SBREG,
+	IOV_DONGLEISOLATION,
+	IOV_LTRSLEEPON_UNLOOAD,
+	IOV_RX_METADATALEN,
+	IOV_TX_METADATALEN,
+	IOV_TXP_THRESHOLD,
+	IOV_BUZZZ_DUMP,
+	IOV_DUMP_RINGUPD_BLOCK,
+	IOV_DMA_RINGINDICES,
+	IOV_DB1_FOR_MB,
+	IOV_FLOW_PRIO_MAP
+};
+
+
+
+const bcm_iovar_t dhdpcie_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"memsize",	IOV_MEMSIZE,	0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	IOVT_UINT32,	0 },
+	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
+	{"ramsize",	IOV_RAMSIZE,	0,	IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0,	IOVT_UINT32,	0 },
+	{"pciereg",	IOV_PCIEREG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"pciecfgreg",	IOV_PCIECFGREG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"pciecorereg",	IOV_PCIECOREREG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"bar0secwinreg",	IOV_BAR0_SECWIN_REG,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"pcie_dmaxfer",	IOV_PCIE_DMAXFER,	0,	IOVT_BUFFER,	3 * sizeof(int32) },
+	{"pcie_suspend", IOV_PCIE_SUSPEND,	0,	IOVT_UINT32,	0 },
+	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	IOVT_BOOL,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	IOVT_UINT32,	0 },
+	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	IOVT_UINT32,	0 },
+	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0,	IOVT_BUFFER,	0 },
+	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0,	IOVT_UINT32,	0},
+	{"rx_metadata_len", IOV_RX_METADATALEN,	0,	IOVT_UINT32,	0 },
+	{"tx_metadata_len", IOV_TX_METADATALEN,	0,	IOVT_UINT32,	0 },
+	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	IOVT_UINT32,	0 },
+	{"buzzz_dump", IOV_BUZZZ_DUMP,		0,	IOVT_UINT32,	0 },
+	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0,	IOVT_UINT32,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+#define MAX_READ_TIMEOUT	5 * 1000 * 1000
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return dhdpcie_bus_register();
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhdpcie_bus_unregister();
+	return;
+}
+
+
+/** returns a host virtual address */
+uint32 *
+dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
+{
+	return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
+{
+	REG_UNMAP((void*)(uintptr)addr);
+	return;
+}
+
+/**
+ * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
+ * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
+ * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
+ *
+ * 'tcm' is the *host* virtual address at which tcm is mapped.
+ */
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm)
+{
+	dhd_bus_t *bus;
+
+	DHD_ERROR(("%s: ENTER\n", __FUNCTION__));
+
+	do {
+		if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+			break;
+		}
+		bzero(bus, sizeof(dhd_bus_t));
+		bus->regs = regs;
+		bus->tcm = tcm;
+		bus->osh = osh;
+
+		dll_init(&bus->const_flowring);
+
+		/* Attach pcie shared structure */
+		bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
+
+		/* dhd_common_init(osh); */
+
+		if (dhdpcie_dongle_attach(bus)) {
+			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* software resources */
+		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
+			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+
+			break;
+		}
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->db1_for_mb = TRUE;
+		bus->dhd->hang_report  = TRUE;
+
+		DHD_TRACE(("%s: EXIT SUCCESS\n",
+			__FUNCTION__));
+
+		return bus;
+	} while (0);
+
+	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
+
+	return NULL;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return  bus->sih->chip;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->sih->chiprev;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	return bus->sih->chippkg;
+}
+
+
+/*
+
+Name:  dhdpcie_bus_isr
+
+Parametrs:
+
+1: IN int irq   -- interrupt vector
+2: IN void *arg      -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+int32
+dhdpcie_bus_isr(dhd_bus_t *bus)
+{
+
+	do {
+			DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+			/* verify argument */
+			if (!bus) {
+				DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+				break;
+			}
+
+			if (bus->dhd->busstate == DHD_BUS_DOWN) {
+				DHD_INFO(("%s : bus is down. we have nothing to do\n",
+					__FUNCTION__));
+				break;
+			}
+
+			/*  Overall operation:
+			 *    - Mask further interrupts
+			 *    - Read/ack intstatus
+			 *    - Take action based on bits and state
+			 *    - Reenable interrupts (as per state)
+			 */
+
+			/* Count the interrupt call */
+			bus->intrcount++;
+
+			/* read interrupt status register!! Status bits will be cleared in DPC !! */
+			bus->ipend = TRUE;
+			dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
+			bus->intdis = TRUE;
+
+#if defined(PCIE_ISR_THREAD)
+
+			DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
+			DHD_OS_WAKE_LOCK(bus->dhd);
+			while (dhd_bus_dpc(bus));
+			DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
+#endif /* defined(SDIO_ISR_THREAD) */
+
+			DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
+			return TRUE;
+
+	} while (0);
+
+	DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
+	return FALSE;
+}
+
+static bool
+dhdpcie_dongle_attach(dhd_bus_t *bus)
+{
+
+	osl_t *osh = bus->osh;
+	void *regsva = (void*)bus->regs;
+	uint16 devid = bus->cl_devid;
+	uint32 val;
+	sbpcieregs_t *sbpcieregs;
+
+	DHD_TRACE(("%s: ENTER\n",
+		__FUNCTION__));
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Set bar0 window to si_enum_base */
+	dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	sbpcieregs = (sbpcieregs_t*)(bus->regs);
+
+	/* WAR where the BAR1 window may not be sized properly */
+	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
+	val = R_REG(osh, &sbpcieregs->configdata);
+	W_REG(osh, &sbpcieregs->configdata, val);
+
+	/* Get info on the ARM and SOCRAM cores... */
+	/* Should really be qualified by device id */
+	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+		bus->armrev = si_corerev(bus->sih);
+	} else {
+		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+	} else {
+		/* cr4 has a different way to find the RAM size from TCM's */
+		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+			goto fail;
+		}
+		/* also populate base address */
+		switch ((uint16)bus->sih->chip) {
+		case BCM4339_CHIP_ID:
+		case BCM4335_CHIP_ID:
+			bus->dongle_ram_base = CR4_4335_RAM_BASE;
+			break;
+		case BCM4358_CHIP_ID:
+		case BCM4356_CHIP_ID:
+		case BCM4354_CHIP_ID:
+		case BCM43569_CHIP_ID:
+		case BCM4350_CHIP_ID:
+		case BCM43570_CHIP_ID:
+			bus->dongle_ram_base = CR4_4350_RAM_BASE;
+			break;
+		case BCM4360_CHIP_ID:
+			bus->dongle_ram_base = CR4_4360_RAM_BASE;
+			break;
+		case BCM4345_CHIP_ID:
+			bus->dongle_ram_base = CR4_4345_RAM_BASE;
+			break;
+		case BCM43602_CHIP_ID:
+			bus->dongle_ram_base = CR4_43602_RAM_BASE;
+			break;
+		case BCM4349_CHIP_GRPID:
+			bus->dongle_ram_base = CR4_4349_RAM_BASE;
+			break;
+		default:
+			bus->dongle_ram_base = 0;
+			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+			           __FUNCTION__, bus->dongle_ram_base));
+		}
+	}
+	bus->ramsize = bus->orig_ramsize;
+	if (dhd_dongle_memsize)
+		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+	bus->srmemsize = si_socram_srmem_size(bus->sih);
+
+
+	bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+
+	bus->wait_for_d3_ack = 1;
+	bus->suspended = FALSE;
+	DHD_TRACE(("%s: EXIT: SUCCESS\n",
+		__FUNCTION__));
+	return 0;
+
+fail:
+	if (bus->sih != NULL)
+		si_detach(bus->sih);
+	DHD_TRACE(("%s: EXIT: FAILURE\n",
+		__FUNCTION__));
+	return -1;
+}
+
+int
+dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
+{
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
+	return 0;
+}
+int
+dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
+{
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
+	return 0;
+}
+
+void
+dhdpcie_bus_intr_enable(dhd_bus_t *bus)
+{
+	DHD_TRACE(("enable interrupts\n"));
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		dhpcie_bus_unmask_interrupt(bus);
+	}
+	else if (bus->sih) {
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+			bus->def_intmask, bus->def_intmask);
+	}
+}
+
+void
+dhdpcie_bus_intr_disable(dhd_bus_t *bus)
+{
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+	if (bus) {
+
+		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+			(bus->sih->buscorerev == 4)) {
+			dhpcie_bus_mask_interrupt(bus);
+		}
+		else if (bus->sih) {
+			si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+				bus->def_intmask, 0);
+		}
+	}
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+
+/* Detach and free everything */
+void
+dhdpcie_bus_release(dhd_bus_t *bus)
+{
+	bool dongle_isolation = FALSE;
+	osl_t *osh = NULL;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+
+		osh = bus->osh;
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+
+			if (bus->intr) {
+				if (bus->dhd->dongle_reset == FALSE)
+					dhdpcie_bus_intr_disable(bus);
+				dhdpcie_free_irq(bus);
+			}
+			/* Disable tasklet, already scheduled tasklet may be executed even though dongle has been released */
+			dhd_dpc_kill(bus->dhd);
+			dhd_detach(bus->dhd);
+			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		/* unmap the regs and tcm here!! */
+		if (bus->regs) {
+			dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
+			bus->regs = NULL;
+		}
+		if (bus->tcm) {
+			dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
+			bus->tcm = NULL;
+		}
+
+		dhdpcie_bus_release_malloc(bus, osh);
+		/* Detach pcie shared structure */
+		if (bus->pcie_sh)
+			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+
+#ifdef DHD_DEBUG
+
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+
+		/* Finally free bus info */
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+
+	}
+
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+
+}
+
+
+void
+dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
+		DHD_TRACE(("%s Exit\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->sih) {
+
+		if (!dongle_isolation)
+			pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
+
+		if (bus->ltrsleep_on_unload) {
+			si_corereg(bus->sih, bus->sih->buscoreidx,
+				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
+		}
+		si_detach(bus->sih);
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+uint32
+dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
+{
+	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
+	return data;
+}
+
+/* 32 bit config write */
+void
+dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
+{
+	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
+}
+
+void
+dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
+{
+	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
+}
+
+void
+dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_MEMSIZE;
+	/* Restrict the memsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_memsize, min_size));
+	if ((dhd_dongle_memsize > min_size) &&
+		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_memsize;
+}
+
+void
+dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+	return;
+
+}
+
+/* Stop bus module: clear pending frames, disable data flow */
+void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	uint32 status;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus->dhd)
+		return;
+
+	if(bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: already down by net_dev_reset\n",__FUNCTION__));
+		goto done;
+	}
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	dhdpcie_bus_intr_disable(bus);
+	status =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+	dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+	if (!dhd_download_fw_on_driverload)
+		dhd_dpc_kill(bus->dhd);
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+done:
+
+	return;
+}
+
+/* Watchdog timer function */
+bool dhd_bus_watchdog(dhd_pub_t *dhd)
+{
+#ifdef DHD_DEBUG
+	dhd_bus_t *bus;
+	bus = dhd->bus;
+
+
+
+	/* Poll for console output periodically */
+	if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (dhdpcie_bus_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	return FALSE;
+}
+
+/* Download firmware image and nvram image */
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+
+	ret = dhdpcie_download_firmware(bus, osh);
+
+	return ret;
+}
+
+static int
+dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
+{
+	int ret = 0;
+
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	ret = _dhdpcie_download_firmware(bus);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+static int
+dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	/* Should succeed in opening image if it is actually given through registry
+	 * entry or in module param.
+	 */
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL)
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+
+static int
+dhdpcie_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+	}
+	else {
+
+		/* nvram is string with null terminated. cannot use strlen */
+		len = bus->nvram_params_len;
+		ASSERT(len <= MAX_NVRAMBUF_SIZE);
+		memcpy(memblock, bus->nvram_params, len);
+	}
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+
+		if (nvram_file_exists)
+			len = process_nvram_vars(bufp, len);
+
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+
+#ifdef BCMEMBEDIMAGE
+int
+dhdpcie_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *p_dlarray  = NULL;
+	unsigned int dlarray_size = 0;
+	unsigned int downloded_len, remaining_len, len;
+	char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+	uint8 *memblock = NULL, *memptr;
+
+	downloded_len = 0;
+	remaining_len = 0;
+	len = 0;
+
+	p_dlarray = dlarray;
+	dlarray_size = sizeof(dlarray);
+	p_dlimagename = dlimagename;
+	p_dlimagever  = dlimagever;
+	p_dlimagedate = dlimagedate;
+
+	if ((p_dlarray == 0) ||	(dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
+		(p_dlimagename == 0) ||	(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	while (downloded_len  < dlarray_size) {
+		remaining_len = dlarray_size - downloded_len;
+		if (remaining_len >= MEMBLOCK)
+			len = MEMBLOCK;
+		else
+			len = remaining_len;
+
+		memcpy(memptr, (p_dlarray + downloded_len), len);
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+		downloded_len += len;
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+				__FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+		offset += MEMBLOCK;
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		unsigned char *ularray = NULL;
+		unsigned int uploded_len;
+		uploded_len = 0;
+		bcmerror = -1;
+		ularray = MALLOC(bus->dhd->osh, dlarray_size);
+		if (ularray == NULL)
+			goto upload_err;
+		/* Upload image to verify downloaded contents. */
+		offset = bus->dongle_ram_base;
+		memset(ularray, 0xaa, dlarray_size);
+		while (uploded_len  < dlarray_size) {
+			remaining_len = dlarray_size - uploded_len;
+			if (remaining_len >= MEMBLOCK)
+				len = MEMBLOCK;
+			else
+				len = remaining_len;
+			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
+				(uint8 *)(ularray + uploded_len), len);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto upload_err;
+			}
+
+			uploded_len += len;
+			offset += MEMBLOCK;
+		}
+
+		if (memcmp(p_dlarray, ularray, dlarray_size)) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+			goto upload_err;
+
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+upload_err:
+		if (ularray)
+			MFREE(bus->dhd->osh, ularray, dlarray_size);
+	}
+#endif /* DHD_DEBUG */
+err:
+
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+
+static int
+_dhdpcie_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdpcie_bus_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdpcie_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+
+	/* External nvram takes precedence if specified */
+	if (dhdpcie_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdpcie_bus_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+	if (timeleft == 0) {
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+		bus->ioct_resp.cmn_hdr.request_id = 0;
+		bus->ioct_resp.compl_hdr.status = 0xffff;
+		bus->rxlen = 0;
+	}
+	rxlen = bus->rxlen;
+	bcopy(&bus->ioct_resp, msg, sizeof(ioctl_comp_resp_msg_t));
+	bus->rxlen = 0;
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
+	} else if (timeleft == 0) {
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+	} else if (pending == TRUE) {
+		DHD_CTL(("%s: canceled\n", __FUNCTION__));
+		return -ERESTARTSYS;
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+	}
+	if (timeleft == 0) {
+		bus->dhd->rxcnt_timeout++;
+		DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
+	}
+	else
+		bus->dhd->rxcnt_timeout = 0;
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT) {
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+		bus->islinkdown = TRUE;
+		DHD_ERROR(("PCIe link down\n"));
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+		return -ETIMEDOUT;
+	}
+	if (bus->dhd->dongle_trap_occured)
+		return -EREMOTEIO;
+
+	return rxlen ? (int)rxlen : -EIO;
+
+}
+
+#define CONSOLE_LINE_MAX	192
+
+#ifdef DHD_DEBUG
+static int
+dhdpcie_bus_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return -1;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+
+static int
+dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	char *console_buffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	pciedev_shared_t *pciedev_shared = bus->pcie_sh;
+	struct bcmstrbuf strbuf;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, i, addr;
+	int rv;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON())
+		return 0;
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdpcie_readshared(bus)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
+
+	if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (bus->pcie_sh->assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+				                                  bus->pcie_sh->assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (bus->pcie_sh->assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+				                                  bus->pcie_sh->assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
+		}
+
+		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+			                                  bus->pcie_sh->trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			            "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+			"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(bus->pcie_sh->trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+			addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+				(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+				goto printbuf;
+
+			addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+				(uint8 *)&console_size, sizeof(console_size))) < 0)
+				goto printbuf;
+
+			addr =  bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+				(uint8 *)&console_index, sizeof(console_index))) < 0)
+				goto printbuf;
+
+			console_ptr = ltoh32(console_ptr);
+			console_size = ltoh32(console_size);
+			console_index = ltoh32(console_index);
+
+			if (console_size > CONSOLE_BUFFER_MAX ||
+				!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+				goto printbuf;
+
+			if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
+				(uint8 *)console_buffer, console_size)) < 0)
+				goto printbuf;
+
+			for (i = 0, n = 0; i < console_size; i += n + 1) {
+				for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+					ch = console_buffer[(console_index + i + n) % console_size];
+					if (ch == '\n')
+						break;
+					line[n] = ch;
+				}
+
+
+				if (n > 0) {
+					if (line[n - 1] == '\r')
+						n--;
+					line[n] = 0;
+					/* Don't use DHD_ERROR macro since we print
+					 * a lot of information quickly. The macro
+					 * will truncate a lot of the printfs
+					 */
+
+					if (dhd_msg_level & DHD_ERROR_VAL)
+						printf("CONSOLE: %s\n", line);
+				}
+			}
+		}
+	}
+
+printbuf:
+	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+
+	return bcmerror;
+}
+#endif /* DHD_DEBUG */
+static void
+dhdpcie_bus_report_pcie_linkdown(dhd_bus_t *bus)
+{
+	if (bus == NULL)
+		return;
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+	bus->islinkdown = TRUE;
+	DHD_ERROR(("PCIe link down, Device ID and Vendor ID are 0x%x\n",
+			dhdpcie_bus_cfg_read_dword(bus, PCI_VENDOR_ID, 4)));
+	dhd_os_send_hang_message(bus->dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+}
+
+/**
+ * Transfers bytes from host to dongle using pio mode.
+ * Parameter 'address' is a backplane address.
+ */
+static int
+dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint dsize;
+	int detect_endian_flag = 0x01;
+	bool little_endian;
+
+	/* Detect endianness. */
+	little_endian = *(char *)&detect_endian_flag;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+
+	/* Determine initial transfer parameters */
+	dsize = sizeof(uint64);
+
+	/* Do the transfer(s) */
+	if (write) {
+		while (size) {
+			if (size >= sizeof(uint64) && little_endian)
+				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
+			else {
+				dsize = sizeof(uint8);
+				dhdpcie_bus_wtcm8(bus, address, *data);
+			}
+
+			/* Adjust for next transfer (if any) */
+			if ((size -= dsize)) {
+				data += dsize;
+				address += dsize;
+			}
+		}
+	} else {
+		while (size) {
+			if (size >= sizeof(uint64) && little_endian)
+				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
+			else {
+				dsize = sizeof(uint8);
+				*data = dhdpcie_bus_rtcm8(bus, address);
+			}
+
+			/* Adjust for next transfer (if any) */
+			if ((size -= dsize) > 0) {
+				data += dsize;
+				address += dsize;
+			}
+		}
+	}
+	return bcmerror;
+}
+
+int BCMFASTPATH
+dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
+{
+	flow_ring_node_t *flow_ring_node;
+	int ret = BCME_OK;
+
+	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+	/* ASSERT on flow_id */
+	if (flow_id >= bus->max_sub_queues) {
+		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
+			flow_id, bus->max_sub_queues));
+		return 0;
+	}
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+
+	{
+		unsigned long flags;
+		void *txp = NULL;
+		flow_queue_t *queue;
+
+		queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
+				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+				return BCME_NOTREADY;
+		}
+
+		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpack_check_xmit(bus->dhd, txp);
+#endif /* DHDTCPACK_SUPPRESS */
+			/* Attempt to transfer packet over flow ring */
+
+			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
+			if (ret != BCME_OK) { /* may not have resources in flow ring */
+				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
+				dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+				/* reinsert at head */
+				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
+				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+				/* If we are able to requeue back, return success */
+				return BCME_OK;
+			}
+		}
+
+		dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+	}
+
+	return ret;
+}
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+int BCMFASTPATH
+dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
+{
+	unsigned long flags;
+	int ret = BCME_OK;
+	void *txp_pend = NULL;
+	if (!bus->txmode_push) {
+		uint16 flowid;
+		flow_queue_t *queue;
+		flow_ring_node_t *flow_ring_node;
+		if (!bus->dhd->flowid_allocator) {
+			DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
+			goto toss;
+		}
+
+		flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp));
+
+		flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+
+		DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
+			__FUNCTION__, flowid, flow_ring_node->status,
+			flow_ring_node->active));
+
+		if ((flowid >= bus->dhd->num_flow_rings) ||
+			(!flow_ring_node->active) ||
+			(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
+			DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
+				__FUNCTION__, flowid, flow_ring_node->status,
+				flow_ring_node->active));
+			ret = BCME_ERROR;
+			goto toss;
+		}
+
+		queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
+			txp_pend = txp;
+
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+		if (flow_ring_node->status) {
+			DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
+			    __FUNCTION__, flowid, flow_ring_node->status,
+			    flow_ring_node->active));
+			if (txp_pend) {
+				txp = txp_pend;
+				goto toss;
+			}
+			return BCME_OK;
+		}
+		ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+		/* If we have anything pending, try to push into q */
+		if (txp_pend) {
+			DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+			if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
+				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+				txp = txp_pend;
+				goto toss;
+			}
+
+			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+		}
+
+		return ret;
+
+	} else { /* bus->txmode_push */
+		return dhd_prot_txdata(bus->dhd, txp, ifidx);
+	}
+
+toss:
+	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
+	PKTCFREE(bus->dhd->osh, txp, TRUE);
+	return ret;
+}
+
+
+void
+dhd_bus_stop_queue(struct dhd_bus *bus)
+{
+	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+	bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_start_queue(struct dhd_bus *bus)
+{
+	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+	bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status,
+	uint32 resp_len)
+{
+	bus->rxlen = retlen;
+	bus->ioct_resp.cmn_hdr.request_id = pkt_id;
+	bus->ioct_resp.compl_hdr.status = status;
+	bus->ioct_resp.resp_len = (uint16)resp_len;
+}
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhd->bus;
+	uint32 addr, val;
+	int rv;
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* generate an interurpt to dongle to indicate that it needs to process cons command */
+	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
+done:
+	return rv;
+}
+#endif /* defined(DHD_DEBUG) */
+
+/* Process rx frame , Send up the layer to netif */
+void BCMFASTPATH
+dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
+{
+	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
+}
+
+/** 'offset' is a backplane address */
+void
+dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+	*(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
+}
+
+uint8
+dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+#ifdef BCM47XX_ACP_WAR
+	volatile uint8 data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+#else
+	volatile uint8 data = *(volatile uint8 *)(bus->tcm + offset);
+#endif
+	return data;
+}
+
+void
+dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+	*(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
+}
+void
+dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+	*(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
+}
+void
+dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+	*(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
+}
+
+uint16
+dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+#ifdef BCM47XX_ACP_WAR
+	volatile uint16 data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+#else
+	volatile uint16 data = *(volatile uint16 *)(bus->tcm + offset);
+#endif
+	return data;
+}
+
+uint32
+dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+#ifdef BCM47XX_ACP_WAR
+	volatile uint32 data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+#else
+	volatile uint32 data = *(volatile uint32 *)(bus->tcm + offset);
+#endif
+	return data;
+}
+
+uint64
+dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+#ifdef BCM47XX_ACP_WAR
+	volatile uint64 data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+#else
+	volatile uint64 data = *(volatile uint64 *)(bus->tcm + offset);
+#endif
+	return data;
+}
+
+void
+dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid)
+{
+	uint64 long_data;
+	ulong tcm_offset;
+	pciedev_shared_t *sh;
+	pciedev_shared_t *shmem = NULL;
+
+	sh = (pciedev_shared_t*)bus->shared_addr;
+
+	DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len));
+
+	switch (type) {
+		case DNGL_TO_HOST_DMA_SCRATCH_BUFFER:
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN :
+			tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
+			dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case HOST_TO_DNGL_DMA_READINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case DNGL_TO_HOST_DMA_READINDX_BUFFER:
+			/* ring_info_ptr stored in pcie_sh */
+			shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = (ulong)shmem->rings_info_ptr;
+			tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case RING_LEN_ITEMS :
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, len_items);
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_MAX_ITEM :
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, max_item);
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case RING_BUF_ADDR :
+			long_data = HTOL64(*(uint64 *)data);
+			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+			tcm_offset += OFFSETOF(ring_mem_t, base_addr);
+			dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
+			prhex(__FUNCTION__, data, len);
+			break;
+
+		case RING_WRITE_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_w;
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+		case RING_READ_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_r;
+			dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+			break;
+
+		case DTOH_MB_DATA:
+			dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
+				(uint32) HTOL32(*(uint32 *)data));
+			break;
+
+		case HTOD_MB_DATA:
+			dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
+				(uint32) HTOL32(*(uint32 *)data));
+			break;
+		default:
+			break;
+	}
+}
+
+
+void
+dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
+{
+	pciedev_shared_t *sh;
+	ulong tcm_offset;
+
+	sh = (pciedev_shared_t*)bus->shared_addr;
+
+	switch (type) {
+		case RING_WRITE_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_w;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+			break;
+		case RING_READ_PTR :
+			tcm_offset = bus->ring_sh[ringid].ring_state_r;
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+			break;
+		case TOTAL_LFRAG_PACKET_CNT :
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+				(ulong) &sh->total_lfrag_pkt_cnt));
+			break;
+		case HTOD_MB_DATA:
+			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
+			break;
+		case DTOH_MB_DATA:
+			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
+			break;
+		case MAX_HOST_RXBUFS :
+			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+				(ulong) &sh->max_host_rxbufs));
+			break;
+		default :
+			break;
+	}
+}
+
+uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
+{
+	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
+		goto exit;
+	}
+
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+#ifdef BCM_BUZZZ
+#include <bcm_buzzz.h>
+
+int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+{
+	int bytes = 0;
+	uint32 ctr, curr[3], prev[3], delta[3];
+
+	/* Compute elapsed counter values per counter event type */
+	for (ctr = 0U; ctr < 3; ctr++) {
+		prev[ctr] = core[ctr];
+		curr[ctr] = *log++;
+		core[ctr] = curr[ctr];  /* saved for next log */
+
+		if (curr[ctr] < prev[ctr])
+			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
+		else
+			delta[ctr] = (curr[ctr] - prev[ctr]);
+
+		/* Adjust for instrumentation overhead */
+		if (delta[ctr] >= ovhd[ctr])
+			delta[ctr] -= ovhd[ctr];
+		else
+			delta[ctr] = 0;
+
+		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
+	}
+
+	return bytes;
+}
+
+typedef union cm3_cnts { /* export this in bcm_buzzz.h */
+	uint32 u32;
+	uint8  u8[4];
+	struct {
+		uint8 cpicnt;
+		uint8 exccnt;
+		uint8 sleepcnt;
+		uint8 lsucnt;
+	};
+} cm3_cnts_t;
+
+int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+{
+	int bytes = 0;
+
+	uint32 cyccnt, instrcnt;
+	cm3_cnts_t cm3_cnts;
+	uint8 foldcnt;
+
+	{   /* 32bit cyccnt */
+		uint32 curr, prev, delta;
+		prev = core[0]; curr = *log++; core[0] = curr;
+		if (curr < prev)
+			delta = curr + (~0U - prev);
+		else
+			delta = (curr - prev);
+		if (delta >= ovhd[0])
+			delta -= ovhd[0];
+		else
+			delta = 0;
+
+		bytes += sprintf(p + bytes, "%12u ", delta);
+		cyccnt = delta;
+	}
+
+	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
+		int i;
+		uint8 max8 = ~0;
+		cm3_cnts_t curr, prev, delta;
+		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
+		for (i = 0; i < 4; i++) {
+			if (curr.u8[i] < prev.u8[i])
+				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
+			else
+				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
+			if (delta.u8[i] >= ovhd[i + 1])
+				delta.u8[i] -= ovhd[i + 1];
+			else
+				delta.u8[i] = 0;
+			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
+		}
+		cm3_cnts.u32 = delta.u32;
+	}
+
+	{   /* Extract the foldcnt from arg0 */
+		uint8 curr, prev, delta, max8 = ~0;
+		buzzz_arg0_t arg0; arg0.u32 = *log;
+		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
+		if (curr < prev)
+			delta = curr + (max8 - prev);
+		else
+			delta = (curr - prev);
+		if (delta >= ovhd[5])
+			delta -= ovhd[5];
+		else
+			delta = 0;
+		bytes += sprintf(p + bytes, "%4u ", delta);
+		foldcnt = delta;
+	}
+
+	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
+		                 + cm3_cnts.u8[3]) + foldcnt;
+	if (instrcnt > 0xFFFFFF00)
+		bytes += sprintf(p + bytes, "[%10s] ", "~");
+	else
+		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
+	return bytes;
+}
+
+int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz)
+{
+	int bytes = 0;
+	buzzz_arg0_t arg0;
+	static uint8 * fmt[] = BUZZZ_FMT_STRINGS;
+
+	if (buzzz->counters == 6) {
+		bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log);
+		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
+	} else {
+		bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log);
+		log += 3; /* (3 x 32bit) CR4 */
+	}
+
+	/* Dump the logged arguments using the registered formats */
+	arg0.u32 = *log++;
+
+	switch (arg0.klog.args) {
+		case 0:
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
+			break;
+		case 1:
+		{
+			uint32 arg1 = *log++;
+			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
+			break;
+		}
+		default:
+			printf("Maximum one argument supported\n");
+			break;
+	}
+	bytes += sprintf(p + bytes, "\n");
+
+	return bytes;
+}
+
+void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p)
+{
+	int i;
+	uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX];
+	void * log;
+
+	for (i = 0; i < BUZZZ_COUNTERS_MAX; i++)
+		core[i] = 0;
+
+	log_sz = buzzz_p->log_sz;
+
+	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
+
+	if (buzzz_p->wrap == TRUE) {
+		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
+		total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz;
+	} else {
+		part2 = 0U;
+		total = buzzz_p->count;
+	}
+
+	if (total == 0U) {
+		printf("buzzz_dump total<%u> done\n", total);
+		return;
+	} else {
+		printf("buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
+		       total, part2, part1);
+	}
+
+	if (part2) {   /* with wrap */
+		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
+		while (part2--) {   /* from cur to end : part2 */
+			p[0] = '\0';
+			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+			printf("%s", p);
+			log = (void*)((size_t)log + buzzz_p->log_sz);
+		}
+	}
+
+	log = (void*)buffer_p;
+	while (part1--) {
+		p[0] = '\0';
+		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+		printf("%s", p);
+		log = (void*)((size_t)log + buzzz_p->log_sz);
+	}
+
+	printf("buzzz_dump done.\n");
+}
+
+int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
+{
+	buzzz_t * buzzz_p = NULL;
+	void * buffer_p = NULL;
+	char * page_p = NULL;
+	pciedev_shared_t *sh;
+	int ret = 0;
+
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		return BCME_UNSUPPORTED;
+	}
+	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
+		printf("Page memory allocation failure\n");
+		goto done;
+	}
+	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
+		printf("Buzzz memory allocation failure\n");
+		goto done;
+	}
+
+	ret = dhdpcie_readshared(bus);
+	if (ret < 0) {
+		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+		goto done;
+	}
+
+	sh = bus->pcie_sh;
+
+	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
+
+	if (sh->buzzz != 0U) {	/* Fetch and display dongle BUZZZ Trace */
+		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
+		                     (uint8 *)buzzz_p, sizeof(buzzz_t));
+		if (buzzz_p->count == 0) {
+			printf("Empty dongle BUZZZ trace\n\n");
+			goto done;
+		}
+		if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
+			printf("Counters<%u> mismatch\n", buzzz_p->counters);
+			goto done;
+		}
+		/* Allocate memory for trace buffer and format strings */
+		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
+		if (buffer_p == NULL) {
+			printf("Buffer memory allocation failure\n");
+			goto done;
+		}
+		/* Fetch the trace and format strings */
+		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
+		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
+		/* Process and display the trace using formatted output */
+		printf("<#cycle> <#instruction> <#ctr3> <event information>\n");
+		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
+		printf("----- End of dongle BUZZZ Trace -----\n\n");
+		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
+	}
+
+done:
+
+	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
+	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t));
+	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
+
+	return BCME_OK;
+}
+#endif /* BCM_BUZZZ */
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	int ret = 0;
+#ifdef CONFIG_ARCH_MSM
+	int retry = POWERUP_MAX_RETRY;
+#endif /* CONFIG_ARCH_MSM */
+
+	if (dhd_download_fw_on_driverload) {
+		ret = dhd_bus_start(dhdp);
+	} else {
+		if (flag == TRUE) {
+			 /* Turn off WLAN */
+			DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+			bus->dhd->up = FALSE;
+			if (bus->dhd->busstate != DHD_BUS_DOWN) {
+				if (bus->intr) {
+					dhdpcie_bus_intr_disable(bus);
+					dhdpcie_free_irq(bus);
+				}
+
+				dhd_os_wd_timer(dhdp, 0);
+				dhd_bus_stop(bus, TRUE);
+				dhd_prot_clear(dhdp);
+				dhd_clear(dhdp);
+				dhd_bus_release_dongle(bus);
+				dhdpcie_bus_free_resource(bus);
+				ret = dhdpcie_bus_disable_device(bus);
+				if (ret) {
+					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+
+#ifdef CONFIG_ARCH_MSM
+				ret = dhdpcie_bus_clock_stop(bus);
+				if (ret) {
+					DHD_ERROR(("%s: host clock stop failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+#endif /* CONFIG_ARCH_MSM */
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			} else {
+				if (bus->intr) {
+					dhdpcie_bus_intr_disable(bus);
+					dhdpcie_free_irq(bus);
+				}
+
+				dhd_prot_clear(dhdp);
+				dhd_clear(dhdp);
+				dhd_bus_release_dongle(bus);
+				dhdpcie_bus_free_resource(bus);
+				ret = dhdpcie_bus_disable_device(bus);
+				if (ret) {
+					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+#ifdef CONFIG_ARCH_MSM
+				ret = dhdpcie_bus_clock_stop(bus);
+				if (ret) {
+					DHD_ERROR(("%s: host clock stop failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+#endif  /* CONFIG_ARCH_MSM */
+			}
+
+			bus->dhd->dongle_reset = TRUE;
+			DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
+
+		} else {
+			if (bus->dhd->busstate == DHD_BUS_DOWN) {
+				/* Turn on WLAN */
+				DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+				while (retry--) {
+					ret = dhdpcie_bus_clock_start(bus);
+					if (!ret) {
+						DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
+							__FUNCTION__));
+						break;
+					}
+					else
+						OSL_SLEEP(10);
+				}
+
+				if (ret && !retry) {
+					DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+#endif /* CONFIG_ARCH_MSM */
+				ret = dhdpcie_bus_enable_device(bus);
+				if (ret) {
+					DHD_ERROR(("%s: host configuration restore failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+
+				ret = dhdpcie_bus_alloc_resource(bus);
+				if (ret) {
+					DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+
+				ret = dhdpcie_bus_dongle_attach(bus);
+				if (ret) {
+					DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+
+				ret = dhd_bus_request_irq(bus);
+				if (ret) {
+					DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+
+				bus->dhd->dongle_reset = FALSE;
+
+				ret = dhd_bus_start(dhdp);
+				if (ret) {
+					DHD_ERROR(("%s: dhd_bus_start: %d\n",
+						__FUNCTION__, ret));
+					goto done;
+				}
+
+				bus->dhd->up = TRUE;
+				DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+			} else {
+				DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
+				goto done;
+			}
+		}
+	}
+done:
+	if (ret)
+		bus->dhd->busstate = DHD_BUS_DOWN;
+
+	return ret;
+}
+
+static int
+dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	int32 int_val2 = 0;
+	int32 int_val3 = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	if (plen >= (int)sizeof(int_val) * 2)
+		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+	if (plen >= (int)sizeof(int_val) * 3)
+		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	switch (actionid) {
+
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdpcie_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_SVAL(IOV_PCIEREG):
+		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+			int_val);
+		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
+			int_val2);
+		break;
+
+	case IOV_GVAL(IOV_PCIEREG):
+		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+			int_val);
+		int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
+			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_BAR0_SECWIN_REG):
+	{
+		uint32 cur_base, base;
+		uchar *bar0;
+		volatile uint32 *offset;
+		/* set the bar0 secondary window to this */
+		/* write the register value */
+		cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
+		base = int_val & 0xFFFFF000;
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN,  sizeof(uint32), base);
+		bar0 = (uchar *)bus->regs;
+		offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
+		int_val = *offset;
+		bcopy(&int_val, arg, sizeof(int_val));
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
+	}
+		break;
+	case IOV_SVAL(IOV_BAR0_SECWIN_REG):
+	{
+		uint32 cur_base, base;
+		uchar *bar0;
+		volatile uint32 *offset;
+		/* set the bar0 secondary window to this */
+		/* write the register value */
+		cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
+		base = int_val & 0xFFFFF000;
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN,  sizeof(uint32), base);
+		bar0 = (uchar *)bus->regs;
+		offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
+		*offset = int_val2;
+		bcopy(&int_val2, arg, val_size);
+		dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
+	}
+		break;
+
+	case IOV_SVAL(IOV_PCIECOREREG):
+		si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
+		break;
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, coreidx;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = sdreg.offset;
+		coreidx =  (addr & 0xF000) >> 12;
+
+		int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0);
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, coreidx;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = sdreg.offset;
+		coreidx =  (addr & 0xF000) >> 12;
+
+		si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value);
+
+		break;
+	}
+
+
+	case IOV_GVAL(IOV_PCIECOREREG):
+		int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PCIECFGREG):
+		OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
+		break;
+
+	case IOV_GVAL(IOV_PCIECFGREG):
+		int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PCIE_LPBK):
+		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
+		break;
+
+	case IOV_SVAL(IOV_PCIE_DMAXFER):
+		bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
+		break;
+
+	case IOV_GVAL(IOV_PCIE_SUSPEND):
+		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_PCIE_SUSPEND):
+		dhdpcie_bus_suspend(bus, bool_val);
+		break;
+
+	case IOV_GVAL(IOV_MEMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;		/* absolute backplane address */
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
+		          (set ? "write" : "read"), size, address, dsize));
+
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+			if (set && address == bus->dongle_ram_base) {
+				bus->resetinstr = *(((uint32*)params) + 2);
+			}
+		} else {
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+		{
+			uint8 enable, protect, remap;
+			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+			if (!enable || protect) {
+				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+					__FUNCTION__, bus->orig_ramsize, size, address));
+				DHD_ERROR(("%s: socram enable %d, protect %d\n",
+					__FUNCTION__, enable, protect));
+				bcmerror = BCME_BADARG;
+				break;
+			}
+
+			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+				uint32 devramsize = si_socdevram_size(bus->sih);
+				if ((address < SOCDEVRAM_ARM_ADDR) ||
+					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+						__FUNCTION__, address, size));
+					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+					bcmerror = BCME_BADARG;
+					break;
+				}
+				/* move it such that address is real now */
+				address -= SOCDEVRAM_ARM_ADDR;
+				address += SOCDEVRAM_BP_ADDR;
+				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+					__FUNCTION__, (set ? "write" : "read"), size, address));
+			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+				/* Can not access remap region while devram remap bit is set
+				 * ROM content would be returned in this case
+				 */
+				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+					__FUNCTION__, address));
+				bcmerror = BCME_ERROR;
+				break;
+			}
+		}
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+#ifdef BCM_BUZZZ
+	case IOV_GVAL(IOV_BUZZZ_DUMP):
+		bcmerror = dhd_buzzz_dump_dngl(bus);
+		break;
+#endif /* BCM_BUZZZ */
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
+		break;
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_CC_NVMSHADOW):
+	{
+		struct bcmstrbuf dump_b;
+
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
+		break;
+	}
+
+	case IOV_GVAL(IOV_SLEEP_ALLOWED):
+		bool_val = bus->sleep_allowed;
+		bcopy(&bool_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SLEEP_ALLOWED):
+		bus->sleep_allowed = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
+		int_val = bus->ltrsleep_on_unload;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
+		bus->ltrsleep_on_unload = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
+	{
+		struct bcmstrbuf dump_b;
+		bcm_binit(&dump_b, arg, len);
+		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
+		break;
+	}
+	case IOV_GVAL(IOV_DMA_RINGINDICES):
+	{	int h2d_support, d2h_support;
+
+		d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
+		h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
+		int_val = d2h_support | (h2d_support << 1);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+	case IOV_SVAL(IOV_DMA_RINGINDICES):
+		/* Can change it only during initialization/FW download */
+		if (bus->dhd->busstate == DHD_BUS_DOWN) {
+			if ((int_val > 3) || (int_val < 0)) {
+				DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
+				bcmerror = BCME_BADARG;
+			} else {
+				bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+				bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+			}
+		} else {
+			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+				__FUNCTION__));
+			bcmerror = BCME_NOTDOWN;
+		}
+		break;
+
+	case IOV_GVAL(IOV_RX_METADATALEN):
+		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+		case IOV_SVAL(IOV_RX_METADATALEN):
+		if (int_val > 64) {
+			bcmerror = BCME_BUFTOOLONG;
+			break;
+		}
+		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
+		break;
+
+	case IOV_SVAL(IOV_TXP_THRESHOLD):
+		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
+		break;
+
+	case IOV_GVAL(IOV_TXP_THRESHOLD):
+		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DB1_FOR_MB):
+		if (int_val)
+			bus->db1_for_mb = TRUE;
+		else
+			bus->db1_for_mb = FALSE;
+		break;
+
+	case IOV_GVAL(IOV_DB1_FOR_MB):
+		if (bus->db1_for_mb)
+			int_val = 1;
+		else
+			int_val = 0;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_TX_METADATALEN):
+		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_TX_METADATALEN):
+		if (int_val > 64) {
+			bcmerror = BCME_BUFTOOLONG;
+			break;
+		}
+		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
+		break;
+
+	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
+		int_val = bus->dhd->flow_prio_map_type;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
+		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	return bcmerror;
+}
+/* Transfers bytes from host to dongle using pio mode */
+static int
+dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
+{
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return 0;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return 0;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		return 0;
+	}
+	dhdmsgbuf_lpbk_req(bus->dhd, len);
+	return 0;
+}
+
+void
+dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state)
+{
+	struct  dhd_bus *bus = dhdp->bus;
+	if (bus) {
+		dhdpcie_bus_suspend(bus, state);
+	}
+}
+
+int
+dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state)
+{
+
+	int timeleft;
+	bool pending;
+	int rc = 0;
+	DHD_INFO(("%s Enter with state :%d\n", __FUNCTION__, state));
+
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+
+	if (bus->suspended == state) /* Set to same state */
+		return BCME_OK;
+
+	if (state) {
+		bus->wait_for_d3_ack = 0;
+		bus->suspended = TRUE;
+		bus->dhd->busstate = DHD_BUS_SUSPEND;
+		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+		dhd_os_set_ioctl_resp_timeout(DEFAULT_IOCTL_RESP_TIMEOUT);
+		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
+		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+		if (bus->wait_for_d3_ack == 1) {
+			/* Got D3 Ack. Suspend the bus */
+			if (dhd_os_check_wakelock_all(bus->dhd)) {
+				DHD_ERROR(("Suspend failed because of wakelock\n"));
+				bus->dev->current_state = PCI_D3hot;
+				pci_set_master(bus->dev);
+				rc = pci_set_power_state(bus->dev, PCI_D0);
+				if (rc) {
+					DHD_ERROR(("%s: pci_set_power_state failed:"
+						" current_state[%d], ret[%d]\n",
+						__FUNCTION__, bus->dev->current_state, rc));
+				}
+				bus->suspended = FALSE;
+				bus->dhd->busstate = DHD_BUS_DATA;
+				rc = BCME_ERROR;
+			} else {
+				dhdpcie_bus_intr_disable(bus);
+				rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+			}
+		} else if (timeleft == 0) {
+			DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+			bus->suspended = FALSE;
+			bus->dhd->busstate = DHD_BUS_DATA;
+			rc = -ETIMEDOUT;
+		} else if (bus->wait_for_d3_ack == DHD_INVALID) {
+			DHD_ERROR(("PCIe link down during suspend"));
+			bus->suspended = FALSE;
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			rc = -ETIMEDOUT;
+			dhdpcie_bus_report_pcie_linkdown(bus);
+		}
+		bus->wait_for_d3_ack = 1;
+	} else {
+		/* Resume */
+		DHD_INFO(("dhdpcie_bus_suspend resume\n"));
+		rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+		bus->suspended = FALSE;
+		if (dhdpcie_bus_cfg_read_dword(bus, PCI_VENDOR_ID, 4) == PCIE_LINK_DOWN) {
+			DHD_ERROR(("PCIe link down during resume"));
+			rc = -ETIMEDOUT;
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			dhdpcie_bus_report_pcie_linkdown(bus);
+		} else {
+			bus->dhd->busstate = DHD_BUS_DATA;
+			dhdpcie_bus_intr_enable(bus);
+		}
+	}
+	return rc;
+}
+
+/* Transfers bytes from host to dongle and to host again using DMA */
+static int
+dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
+{
+	if (bus->dhd == NULL) {
+		DHD_ERROR(("bus not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->prot == NULL) {
+		DHD_ERROR(("prot is not inited\n"));
+		return BCME_ERROR;
+	}
+	if (bus->dhd->busstate != DHD_BUS_DATA) {
+		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
+		return BCME_ERROR;
+	}
+
+	if (len < 5 || len > 4194296) {
+		DHD_ERROR(("len is too small or too large\n"));
+		return BCME_ERROR;
+	}
+	return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
+}
+
+
+
+static int
+dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
+{
+	int bcmerror = 0;
+	uint32 *cr4_regs;
+
+	if (!bus->sih)
+		return BCME_ERROR;
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
+		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+
+		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+			goto fail;
+		}
+
+		if (cr4_regs == NULL) { /* no CR4 present on chip */
+			si_core_disable(bus->sih, 0);
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+			if (bus->sih->chip == BCM43602_CHIP_ID) {
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
+				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+			}
+			/* reset last 4 bytes of RAM address. to be used for shared area */
+			dhdpcie_init_shared_addr(bus);
+		}
+	} else {
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			if (bus->sih->chip == BCM43602_CHIP_ID) {
+				/* Firmware crashes on SOCSRAM access when core is in reset */
+				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
+						__FUNCTION__));
+					bcmerror = BCME_ERROR;
+					goto fail;
+				}
+				si_core_reset(bus->sih, 0, 0);
+				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+			}
+
+			/* write vars */
+			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			/* write address 0 with reset instruction */
+			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to PCIE core */
+	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+static int
+dhdpcie_bus_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+		/* Write the vars list */
+		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+
+		/* Implement read back and verify later */
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		phys_size, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+		bus->nvram_csm = varsizew;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		bus->nvram_csm = varsizew;
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+int
+dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+/* Add bus dump output to a buffer */
+void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	uint16 flowid;
+	flow_ring_node_t *flow_ring_node;
+
+#ifdef DHD_WAKE_STATUS
+	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
+		    bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->rxwake,
+		    dhdp->bus->rcwake);
+#endif
+	dhd_prot_print_info(dhdp, strbuf);
+	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+		if (flow_ring_node->active) {
+			bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d  Qlen %d ",
+				flow_ring_node->flowid, flow_ring_node->flow_info.ifindex,
+				flow_ring_node->flow_info.tid, flow_ring_node->queue.len);
+			dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf);
+		}
+	}
+}
+
+static void
+dhd_update_txflowrings(dhd_pub_t *dhd)
+{
+	dll_t *item, *next;
+	flow_ring_node_t *flow_ring_node;
+	struct dhd_bus *bus = dhd->bus;
+
+	for (item = dll_head_p(&bus->const_flowring);
+	         !dll_end(&bus->const_flowring, item); item = next) {
+		next = dll_next_p(item);
+
+		flow_ring_node = dhd_constlist_to_flowring(item);
+		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
+	}
+}
+
+/* Mailbox ringbell Function */
+static void
+dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		DHD_ERROR(("mailbox communication not supported\n"));
+		return;
+	}
+	if (bus->db1_for_mb)  {
+		/* this is a pcie core register, not the config regsiter */
+		/* XXX: makesure we are on PCIE */
+		DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
+	}
+	else {
+		DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
+		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+		/* XXX CRWLPCIEGEN2-182 requires double write */
+		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+	}
+}
+
+/* doorbell ring Function */
+void
+dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
+	} else {
+		/* this is a pcie core register, not the config regsiter */
+		DHD_INFO(("writing a door bell to the device\n"));
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
+	}
+}
+
+static void
+dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
+{
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
+}
+
+static void
+dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
+{
+	uint32 w;
+	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
+	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
+}
+
+dhd_mb_ring_t
+dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
+{
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+			PCIMailBoxInt);
+		if (bus->pcie_mb_intr_addr) {
+			bus->pcie_mb_intr_osh = si_osh(bus->sih);
+			return dhd_bus_ringbell_oldpcie;
+		}
+	} else {
+		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+			PCIH2D_MailBox);
+		if (bus->pcie_mb_intr_addr) {
+			bus->pcie_mb_intr_osh = si_osh(bus->sih);
+			return dhd_bus_ringbell_fast;
+		}
+	}
+	return dhd_bus_ringbell;
+}
+
+bool BCMFASTPATH
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	uint32 intstatus = 0;
+	uint32 newstatus = 0;
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+		bus->intstatus = 0;
+		return 0;
+	}
+
+	intstatus = bus->intstatus;
+
+	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+		(bus->sih->buscorerev == 2)) {
+		newstatus =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus);
+		/* Merge new bits with previous */
+		intstatus |= newstatus;
+		bus->intstatus = 0;
+		if (intstatus & I_MB) {
+			dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+		}
+	} else {
+		/* this is a PCIE core register..not a config register... */
+		newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+		intstatus |= (newstatus & bus->def_intmask);
+		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, intstatus, intstatus);
+		if (intstatus & bus->def_intmask) {
+			dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+			intstatus &= ~bus->def_intmask;
+		}
+	}
+
+	dhdpcie_bus_intr_enable(bus);
+	return resched;
+
+}
+
+
+static void
+dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
+{
+	uint32 cur_h2d_mb_data = 0;
+
+	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+
+	if (cur_h2d_mb_data != 0) {
+		uint32 i = 0;
+		DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
+		while ((i++ < 100) && cur_h2d_mb_data) {
+			OSL_DELAY(10);
+			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+		}
+		if (i >= 100)
+			DHD_ERROR(("waited 1ms for the dngl to ack the previous mb transaction\n"));
+	}
+
+	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
+	dhd_bus_gen_devmb_intr(bus);
+}
+
+static void
+dhdpcie_handle_mb_data(dhd_bus_t *bus)
+{
+	uint32 d2h_mb_data = 0;
+	uint32 zero = 0;
+	dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0);
+	if (!d2h_mb_data)
+		return;
+
+	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
+	if (d2h_mb_data == PCIE_LINK_DOWN) {
+		DHD_ERROR(("%s pcie linkdown, 0x%08x\n", __FUNCTION__, d2h_mb_data));
+		bus->wait_for_d3_ack = DHD_INVALID;
+		dhd_os_d3ack_wake(bus->dhd);
+	}
+	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
+	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
+		/* what should we do */
+		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+		DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
+	}
+	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
+		/* what should we do */
+		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+	}
+	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
+		/* what should we do */
+		DHD_ERROR(("D2H_MB_DATA: D3 ACK\n"));
+		if (!bus->wait_for_d3_ack) {
+			bus->wait_for_d3_ack = 1;
+			dhd_os_d3ack_wake(bus->dhd);
+		}
+	}
+	if (d2h_mb_data & D2H_DEV_FWHALT)  {
+		DHD_INFO(("FW trap has happened\n"));
+#ifdef DHD_DEBUG
+		dhdpcie_checkdied(bus, NULL, 0);
+#endif
+		bus->dhd->busstate = DHD_BUS_DOWN;
+	}
+}
+
+static void
+dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
+{
+
+	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+		(bus->sih->buscorerev == 4)) {
+		/* Msg stream interrupt */
+		if (intstatus & I_BIT1) {
+			dhdpci_bus_read_frames(bus);
+		} else if (intstatus & I_BIT0) {
+			/* do nothing for Now */
+		}
+	}
+	else {
+		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
+			dhdpcie_handle_mb_data(bus);
+
+		if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+			return;
+		}
+
+		if (intstatus & PCIE_MB_D2H_MB_MASK) {
+				dhdpci_bus_read_frames(bus);
+		}
+	}
+}
+
+/* Decode dongle to host message stream */
+static void
+dhdpci_bus_read_frames(dhd_bus_t *bus)
+{
+	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
+	DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
+
+	dhd_prot_process_ctrlbuf(bus->dhd);
+
+	/* update the flow ring cpls */
+	dhd_update_txflowrings(bus->dhd);
+
+	dhd_prot_process_msgbuf_txcpl(bus->dhd);
+
+	dhd_prot_process_msgbuf_rxcpl(bus->dhd);
+
+	DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+}
+
+static int
+dhdpcie_readshared(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	int rv, w_init, r_init;
+	uint32 shaddr = 0;
+	pciedev_shared_t *sh = bus->pcie_sh;
+	dhd_timeout_t tmo;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	/* start a timer for 5 seconds */
+	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
+
+	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
+		/* Read last word in memory to determine address of sdpcm_shared structure */
+		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+	}
+
+	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+		(addr > shaddr)) {
+		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
+			__FUNCTION__, addr));
+		DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
+		return BCME_ERROR;
+	} else {
+		bus->shared_addr = (ulong)addr;
+		DHD_ERROR(("PCIe shared addr read took %u usec "
+			"before dongle is ready\n", tmo.elapsed));
+	}
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
+		sizeof(pciedev_shared_t))) < 0) {
+		DHD_ERROR(("Failed to read PCIe shared struct,"
+			"size read %d < %d\n", rv, (int)sizeof(pciedev_shared_t)));
+		return rv;
+	}
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
+	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
+	/* load bus console address */
+
+#ifdef DHD_DEBUG
+	bus->console_addr = sh->console_addr;
+#endif
+
+	/* Read the dma rx offset */
+	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
+	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
+
+	DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
+
+	if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
+		DHD_ERROR(("%s: pcie_shared version %d in dhd "
+		           "is older than pciedev_shared version %d in dongle\n",
+		           __FUNCTION__, PCIE_SHARED_VERSION,
+		           sh->flags & PCIE_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+	if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) {
+		if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) {
+#ifdef DHDTCPACK_SUPPRESS
+			/* Do not use tcpack suppress as packets don't stay in queue */
+			dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+#endif
+			bus->txmode_push = TRUE;
+		} else
+			bus->txmode_push = FALSE;
+	}
+	DHD_ERROR(("bus->txmode_push is set to %d\n", bus->txmode_push));
+
+	/* Does the FW support DMA'ing r/w indices */
+	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
+
+		DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
+			__FUNCTION__,
+			(DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
+			(DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
+
+	} else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
+	           DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
+
+#ifdef BCM_INDX_DMA
+		DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
+			__FUNCTION__));
+		return BCME_ERROR;
+#endif
+		DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
+			__FUNCTION__));
+		bus->dhd->dma_d2h_ring_upd_support = FALSE;
+		bus->dhd->dma_h2d_ring_upd_support = FALSE;
+	}
+
+
+	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
+	{
+		ring_info_t  ring_info;
+
+		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
+			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
+			return rv;
+
+		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
+		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
+
+
+		bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
+
+		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
+		 * The max_sub_queues is read from FW initialized ring_info
+		 */
+		if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
+			w_init = dhd_prot_init_index_dma_block(bus->dhd,
+				HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
+				bus->max_sub_queues);
+			r_init = dhd_prot_init_index_dma_block(bus->dhd,
+				DNGL_TO_HOST_DMA_READINDX_BUFFER,
+				BCMPCIE_D2H_COMMON_MSGRINGS);
+
+			if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
+						"Host will use w/r indices in TCM\n",
+						__FUNCTION__));
+				bus->dhd->dma_h2d_ring_upd_support = FALSE;
+			}
+		}
+
+		if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
+			w_init = dhd_prot_init_index_dma_block(bus->dhd,
+				DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
+				BCMPCIE_D2H_COMMON_MSGRINGS);
+			r_init = dhd_prot_init_index_dma_block(bus->dhd,
+				HOST_TO_DNGL_DMA_READINDX_BUFFER,
+				bus->max_sub_queues);
+
+			if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
+						"Host will use w/r indices in TCM\n",
+						__FUNCTION__));
+				bus->dhd->dma_d2h_ring_upd_support = FALSE;
+			}
+		}
+
+		/* read ringmem and ringstate ptrs from shared area and store in host variables */
+		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
+
+		bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
+		DHD_INFO(("ring_info\n"));
+
+		DHD_ERROR(("max H2D queues %d\n", ltoh16(ring_info.max_sub_queues)));
+
+		DHD_INFO(("mail box address\n"));
+		DHD_INFO(("h2d_mb_data_ptr_addr 0x%04x\n", bus->h2d_mb_data_ptr_addr));
+		DHD_INFO(("d2h_mb_data_ptr_addr 0x%04x\n", bus->d2h_mb_data_ptr_addr));
+	}
+	return BCME_OK;
+}
+/* Read ring mem and ring state ptr info from shared are in TCM */
+static void
+dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
+{
+	uint16 i = 0;
+	uint16 j = 0;
+	uint32 tcm_memloc;
+	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
+
+	/* Ring mem ptr info */
+	/* Alloated in the order
+		H2D_MSGRING_CONTROL_SUBMIT              0
+		H2D_MSGRING_RXPOST_SUBMIT               1
+		D2H_MSGRING_CONTROL_COMPLETE            2
+		D2H_MSGRING_TX_COMPLETE                 3
+		D2H_MSGRING_RX_COMPLETE                 4
+		TX_FLOW_RING				5
+	*/
+
+	{
+		/* ringmemptr holds start of the mem block address space */
+		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
+
+		/* Find out ringmem ptr for each ring common  ring */
+		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
+			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+			/* Update mem block */
+			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
+			DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
+				i, bus->ring_sh[i].ring_mem_addr));
+		}
+
+		/* Tx flow Ring */
+		if (bus->txmode_push) {
+			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+			DHD_INFO(("TX ring ring id %d ring mem addr 0x%04x \n",
+				i, bus->ring_sh[i].ring_mem_addr));
+		}
+	}
+
+	/* Ring state mem ptr info */
+	{
+		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
+		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
+		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
+		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
+		/* Store h2d common ring write/read pointers */
+		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
+			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+			/* update mem block */
+			h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
+			h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+
+			DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		}
+		/* Store d2h common ring write/read pointers */
+		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
+			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+
+			/* update mem block */
+			d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
+			d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
+
+			DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		}
+
+		/* Store txflow ring write/read pointers */
+		if (bus->txmode_push) {
+			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+			DHD_INFO(("txflow : idx %d write %x read %x \n", i,
+				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+		} else {
+			for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
+				i++, j++)
+			{
+				bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+				bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+				/* update mem block */
+				h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
+				h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+
+				DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
+					bus->ring_sh[i].ring_state_w,
+					bus->ring_sh[i].ring_state_r));
+			}
+		}
+	}
+}
+/* Initialize bus module: prepare for communication w/dongle */
+int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	int  ret = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	/* Make sure we're talking to the core. */
+	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	ASSERT(bus->reg != NULL);
+
+	/* before opening up bus for data transfer, check if shared are is intact */
+	ret = dhdpcie_readshared(bus);
+	if (ret < 0) {
+		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+		return ret;
+	}
+
+
+	/* Make sure we're talking to the core. */
+	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+	ASSERT(bus->reg != NULL);
+
+	/* Set bus state according to enable result */
+	dhdp->busstate = DHD_BUS_DATA;
+
+	/* Enable the interrupt after device is up */
+	dhdpcie_bus_intr_enable(bus);
+
+	/* bcmsdh_intr_unmask(bus->sdh); */
+
+	return ret;
+
+}
+
+
+static void
+dhdpcie_init_shared_addr(dhd_bus_t *bus)
+{
+	uint32 addr = 0;
+	uint32 val = 0;
+	addr = bus->dongle_ram_base + bus->ramsize - 4;
+	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
+}
+
+
+bool
+dhdpcie_chipmatch(uint16 vendor, uint16 device)
+{
+	if (vendor != PCI_VENDOR_ID_BROADCOM) {
+		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
+			vendor, device));
+		return (-ENODEV);
+	}
+
+	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
+		(device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device))
+		return 0;
+
+	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
+		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
+		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
+		(device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
+		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
+		return 0;
+
+	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
+		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
+		return 0;
+
+	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
+		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
+		(device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID))
+		return 0;
+
+	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
+		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
+		return 0;
+	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
+		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
+		return 0;
+	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
+		(device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID))
+		return 0;
+
+
+	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
+	return (-ENODEV);
+}
+
+
+/*
+
+Name:  dhdpcie_cc_nvmshadow
+
+Description:
+A shadow of OTP/SPROM exists in ChipCommon Region
+betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+can also be read from ChipCommon Registers.
+*/
+
+static int
+dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
+{
+	uint16 dump_offset = 0;
+	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
+
+	/* Table for 65nm OTP Size (in bits) */
+	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
+
+	volatile uint16 *nvm_shadow;
+
+	uint cur_coreid;
+	uint chipc_corerev;
+	chipcregs_t *chipcregs;
+
+
+	/* Save the current core */
+	cur_coreid = si_coreid(bus->sih);
+	/* Switch to ChipC */
+	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+	chipc_corerev = si_corerev(bus->sih);
+
+	/* Check ChipcommonCore Rev */
+	if (chipc_corerev < 44) {
+		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* Check ChipID */
+	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
+		((uint16)bus->sih->chip != BCM4345_CHIP_ID)) {
+		DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
+			__FUNCTION__));
+		return BCME_UNSUPPORTED;
+	}
+
+	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
+	if (chipcregs->sromcontrol & SRC_PRESENT) {
+		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
+		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
+					>> SRC_SIZE_SHIFT))) * 1024;
+		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
+	}
+
+	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
+		bcm_bprintf(b, "\nOTP Present");
+
+		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
+			== OTPL_WRAP_TYPE_40NM) {
+			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
+			otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
+				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
+			bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+		} else {
+			/* This part is untested since newer chips have 40nm OTP */
+			otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
+				        >> CC_CAP_OTPSIZE_SHIFT];
+			bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+			DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
+				__FUNCTION__));
+		}
+	}
+
+	if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+		((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
+		DHD_ERROR(("%s: SPROM and OTP could not be found \n",
+			__FUNCTION__));
+		return BCME_NOTFOUND;
+	}
+
+	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
+	if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
+		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
+
+		bcm_bprintf(b, "OTP Strap selected.\n"
+		               "\nOTP Shadow in ChipCommon:\n");
+
+		dump_size = otp_size / 16 ; /* 16bit words */
+
+	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
+		(chipcregs->sromcontrol & SRC_PRESENT)) {
+
+		bcm_bprintf(b, "SPROM Strap selected\n"
+				"\nSPROM Shadow in ChipCommon:\n");
+
+		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
+		/* dump_size in 16bit words */
+		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
+	}
+	else {
+		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
+			__FUNCTION__));
+		return BCME_NOTFOUND;
+	}
+
+	if (bus->regs == NULL) {
+		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
+		return BCME_NOTREADY;
+	} else {
+	    bcm_bprintf(b, "\n OffSet:");
+
+	    /* Point to the SPROM/OTP shadow in ChipCommon */
+	    nvm_shadow = chipcregs->sromotp;
+
+	   /*
+	    * Read 16 bits / iteration.
+	    * dump_size & dump_offset in 16-bit words
+	    */
+	    while (dump_offset < dump_size) {
+		if (dump_offset % 2 == 0)
+			/* Print the offset in the shadow space in Bytes */
+			bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
+
+		bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
+		dump_offset += 0x1;
+	    }
+	}
+
+	/* Switch back to the original core */
+	si_setcore(bus->sih, cur_coreid, 0);
+
+	return BCME_OK;
+}
+
+
+uint8 BCMFASTPATH
+dhd_bus_is_txmode_push(dhd_bus_t *bus)
+{
+	return bus->txmode_push;
+}
+
+void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
+{
+	void *pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
+	unsigned long flags;
+
+	queue = &flow_ring_node->queue;
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* clean up BUS level info */
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(flow_queue_empty(queue));
+
+	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
+
+	flow_ring_node->active = FALSE;
+
+	dll_delete(&flow_ring_node->list);
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Call Flow ring clean up */
+	dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
+	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+					flow_ring_node->flowid);
+
+}
+
+/*
+ * Allocate a Flow ring buffer,
+ * Init Ring buffer,
+ * Send Msg to device about flow ring creation
+*/
+int
+dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
+{
+	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
+
+	/* Send Msg to device about flow ring creation */
+	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
+		return BCME_NOMEM;
+
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
+		     __FUNCTION__, status));
+		/* Call Flow clean up */
+		dhd_bus_clean_flow_ring(bus, flow_ring_node);
+		return;
+	}
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+	return;
+}
+
+int
+dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
+{
+	void * pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+	flow_ring_node = (flow_ring_node_t *)arg;
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
+		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+		DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
+
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(flow_queue_empty(queue));
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Send Msg to device about flow ring deletion */
+	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
+
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+
+	DHD_ERROR(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
+		    __FUNCTION__, status));
+		return;
+	}
+	/* Call Flow clean up */
+	dhd_bus_clean_flow_ring(bus, flow_ring_node);
+
+	return;
+
+}
+
+int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
+{
+	void *pkt;
+	flow_queue_t *queue;
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+	flow_ring_node = (flow_ring_node_t *)arg;
+	queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+	 * when there is a newly coming packet from network stack.
+	 */
+	dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+	/* Flush all pending packets in the queue, if any */
+	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+		PKTFREE(bus->dhd->osh, pkt, TRUE);
+	}
+	ASSERT(flow_queue_empty(queue));
+
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	/* Send Msg to device about flow ring flush */
+	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+	flow_ring_node_t *flow_ring_node;
+	unsigned long flags;
+
+	if (status != BCME_OK) {
+		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
+		    __FUNCTION__, status));
+		return;
+	}
+
+	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+	ASSERT(flow_ring_node->flowid == flowid);
+
+	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+	return;
+}
+
+uint32
+dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush)
+{
+	if (bus->txmode_push)
+		*txpush = 1;
+	else
+		*txpush = 0;
+	return bus->max_sub_queues;
+}
+
+int
+dhdpcie_bus_clock_start(struct dhd_bus *bus)
+{
+	return dhdpcie_start_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_clock_stop(struct dhd_bus *bus)
+{
+	return dhdpcie_stop_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_disable_device(struct dhd_bus *bus)
+{
+	return dhdpcie_disable_device(bus);
+}
+
+int
+dhdpcie_bus_enable_device(struct dhd_bus *bus)
+{
+	return dhdpcie_enable_device(bus);
+}
+
+int
+dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
+{
+	return dhdpcie_alloc_resource(bus);
+}
+
+void
+dhdpcie_bus_free_resource(struct dhd_bus *bus)
+{
+	dhdpcie_free_resource(bus);
+}
+
+int
+dhd_bus_request_irq(struct dhd_bus *bus)
+{
+	return dhdpcie_bus_request_irq(bus);
+}
+
+bool
+dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
+{
+	return dhdpcie_dongle_attach(bus);
+}
+
+int
+dhd_bus_release_dongle(struct dhd_bus *bus)
+{
+	bool dongle_isolation;
+	osl_t		*osh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		osh = bus->osh;
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.h b/drivers/net/wireless/bcmdhd/dhd_pcie.h
new file mode 100644
index 0000000..823d7a1a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie.h
@@ -0,0 +1,202 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pcie.h 473468 2014-04-29 07:30:27Z $
+ */
+
+
+#ifndef dhd_pcie_h
+#define dhd_pcie_h
+
+#include <bcmpcie.h>
+#include <hnd_cons.h>
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+#include <mach/msm_pcie.h>
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+
+/* defines */
+
+#define PCMSGBUF_HDRLEN 0
+#define DONGLE_REG_MAP_SIZE (32 * 1024)
+#define DONGLE_TCM_MAP_SIZE (4096 * 1024)
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+#ifdef DHD_DEBUG
+#define DHD_PCIE_SUCCESS 0
+#define DHD_PCIE_FAILURE 1
+#endif /* DHD_DEBUG */
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+
+#define MAX_DHD_TX_FLOWS	256
+#define PCIE_LINK_DOWN		0xFFFFFFFF
+#define DHD_INVALID 		-1
+/* user defined data structures */
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	2024
+
+
+typedef struct dhd_console {
+	 uint		count;	/* Poll interval msec counter */
+	 uint		log_addr;		 /* Log struct address (fixed) */
+	 hnd_log_t	 log;			 /* Log struct (host copy) */
+	 uint		 bufsize;		 /* Size of log buffer */
+	 uint8		 *buf;			 /* Log buffer (host copy) */
+	 uint		 last;			 /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+typedef struct ring_sh_info {
+	uint32 ring_mem_addr;
+	uint32 ring_state_w;
+	uint32 ring_state_r;
+} ring_sh_info_t;
+
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+	struct pci_dev  *dev;		/* pci device handle */
+	dll_t       const_flowring; /* constructed list of tx flowring queues */
+
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+	sbpcieregs_t	*reg;			/* Registers for PCIE core */
+
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+	char		*nvram_params;		/* user specified nvram params. */
+	int		nvram_params_len;
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+
+	uint		rxlen;			/* Length of valid data in buffer */
+
+
+	bool		intr;			/* Use interrupts */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	ulong		shared_addr;
+	pciedev_shared_t	*pcie_sh;
+	bool bus_flowctrl;
+	ioctl_comp_resp_msg_t	ioct_resp;
+	uint32		dma_rxoffset;
+	volatile char	*regs;		/* pci device memory va */
+	volatile char	*tcm;		/* pci device memory va */
+	osl_t		*osh;
+	uint32		nvram_csm;	/* Nvram checksum */
+	uint16		pollrate;
+	uint16  polltick;
+
+	uint32  *pcie_mb_intr_addr;
+	void    *pcie_mb_intr_osh;
+	bool	sleep_allowed;
+#ifdef DHD_WAKE_STATUS
+	uint		rxwake;
+	uint		rcwake;
+#endif
+	/* version 3 shared struct related info start */
+	ring_sh_info_t	ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
+	uint8	h2d_ring_count;
+	uint8	d2h_ring_count;
+	uint32  ringmem_ptr;
+	uint32  ring_state_ptr;
+
+	uint32 d2h_dma_scratch_buffer_mem_addr;
+
+	uint32 h2d_mb_data_ptr_addr;
+	uint32 d2h_mb_data_ptr_addr;
+	/* version 3 shared struct related info end */
+
+	uint32 def_intmask;
+	bool	ltrsleep_on_unload;
+	uint	wait_for_d3_ack;
+	uint8	txmode_push;
+	uint32 max_sub_queues;
+	bool	db1_for_mb;
+	bool	suspended;
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+	struct msm_pcie_register_event pcie_event;
+	bool islinkdown;
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+} dhd_bus_t;
+
+/* function declarations */
+
+extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
+extern int dhdpcie_bus_register(void);
+extern void dhdpcie_bus_unregister(void);
+extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
+
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm);
+extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
+extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
+extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
+extern void dhdpcie_bus_release(struct dhd_bus *bus);
+extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
+extern void dhdpcie_free_irq(dhd_bus_t *bus);
+extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state);
+extern int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state);
+extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_disable_device(dhd_bus_t *bus);
+extern int dhdpcie_enable_device(dhd_bus_t *bus);
+extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
+extern void dhdpcie_free_resource(dhd_bus_t *bus);
+extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
+extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
+#ifdef DHD_WAKE_STATUS
+int bcmpcie_get_total_wake(struct dhd_bus *bus);
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
+#endif
+
+#endif /* dhd_pcie_h */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
new file mode 100644
index 0000000..1528de8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c
@@ -0,0 +1,983 @@
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pcie_linux.c 477713 2014-05-14 08:59:12Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <dhd_linux.h>
+#ifdef DHD_WAKE_STATUS
+#include <linux/wakeup_reason.h>
+#endif
+#if defined (CONFIG_ARCH_MSM)
+#include <mach/msm_pcie.h>
+#endif
+
+#define PCI_CFG_RETRY 		10
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 	24		/* Mem. filename length */
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+
+/* user defined data structures  */
+
+typedef struct dhd_pc_res {
+	uint32 bar0_size;
+	void* bar0_addr;
+	uint32 bar1_size;
+	void* bar1_addr;
+} pci_config_res, *pPci_config_res;
+
+typedef bool (*dhdpcie_cb_fn_t)(void *);
+
+typedef struct dhdpcie_info
+{
+	dhd_bus_t	*bus;
+	osl_t 			*osh;
+	struct pci_dev  *dev;		/* pci device handle */
+	volatile char 	*regs;		/* pci device memory va */
+	volatile char 	*tcm;		/* pci device memory va */
+	uint32			tcm_size;	/* pci device memory size */
+	struct pcos_info *pcos_info;
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int	irq;
+	char pciname[32];
+	struct pci_saved_state* default_state;
+	struct pci_saved_state* state;
+#ifdef DHD_WAKE_STATUS
+	spinlock_t	pcie_lock;
+	unsigned int	total_wake_count;
+	int	pkt_wake;
+	int	wake_irq;
+#endif
+} dhdpcie_info_t;
+
+
+struct pcos_info {
+	dhdpcie_info_t *pc;
+	spinlock_t lock;
+	wait_queue_head_t intr_wait_queue;
+	struct timer_list tuning_timer;
+	int tuning_timer_exp;
+	atomic_t timer_enab;
+	struct tasklet_struct tuning_tasklet;
+};
+
+
+/* function declarations */
+static int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev);
+static int dhdpcie_init(struct pci_dev *pdev);
+static irqreturn_t dhdpcie_isr(int irq, void *arg);
+/* OS Routine functions for PCI suspend/resume */
+
+static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
+static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state);
+static int dhdpcie_pci_resume(struct pci_dev *dev);
+static int dhdpcie_resume_dev(struct pci_dev *dev);
+static int dhdpcie_suspend_dev(struct pci_dev *dev);
+static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
+	{ vendor: 0x14e4,
+	device: PCI_ANY_ID,
+	subvendor: PCI_ANY_ID,
+	subdevice: PCI_ANY_ID,
+	class: PCI_CLASS_NETWORK_OTHER << 8,
+	class_mask: 0xffff00,
+	driver_data: 0,
+	},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
+
+static struct pci_driver dhdpcie_driver = {
+	node:		{},
+	name:		"pcieh",
+	id_table:	dhdpcie_pci_devid,
+	probe:		dhdpcie_pci_probe,
+	remove:		dhdpcie_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	save_state:	NULL,
+#endif
+	suspend:	dhdpcie_pci_suspend,
+	resume:		dhdpcie_pci_resume,
+};
+
+int dhdpcie_init_succeeded = FALSE;
+
+static void dhdpcie_pme_active(struct pci_dev *pdev, bool enable)
+{
+	uint16 pmcsr;
+
+	pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+	/* Clear PME Status by writing 1 to it and enable PME# */
+	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
+	if (!enable)
+		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+
+	pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
+static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state)
+{
+	int ret = 0;
+	dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+	dhd_bus_t *bus = NULL;
+	DHD_INFO(("%s Enter with state :%x\n", __FUNCTION__, state));
+	if (pch) {
+		bus = pch->bus;
+	}
+
+	/* When firmware is not loaded do the PCI bus */
+	/* suspend/resume only */
+	if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) &&
+		!bus->dhd->dongle_reset) {
+		ret = dhdpcie_pci_suspend_resume(bus->dev, state);
+		return ret;
+	}
+
+	if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)||
+		(bus->dhd->busstate == DHD_BUS_DATA)) &&
+		(bus->suspended != state)) {
+
+		ret = dhdpcie_bus_suspend(bus, state);
+	}
+	DHD_INFO(("%s Exit with state :%d\n", __FUNCTION__, ret));
+	return ret;
+}
+
+static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
+{
+	BCM_REFERENCE(state);
+	DHD_INFO(("%s Enter with event %x\n", __FUNCTION__, state.event));
+	return dhdpcie_set_suspend_resume(pdev, TRUE);
+}
+
+static int dhdpcie_pci_resume(struct pci_dev *pdev)
+{
+	DHD_INFO(("%s Enter\n", __FUNCTION__));
+	return dhdpcie_set_suspend_resume(pdev, FALSE);
+}
+
+static int dhdpcie_suspend_dev(struct pci_dev *dev)
+{
+	int ret;
+	dhdpcie_info_t *pch = pci_get_drvdata(dev);
+	dhdpcie_pme_active(dev, TRUE);
+	pci_save_state(dev);
+	pch->state = pci_store_saved_state(dev);
+	pci_enable_wake(dev, PCI_D0, TRUE);
+	if (pci_is_enabled(dev))
+		pci_disable_device(dev);
+	ret = pci_set_power_state(dev, PCI_D3hot);
+	return ret;
+}
+
+int dhd_os_get_wake_irq(dhd_pub_t *pub);
+
+#ifdef DHD_WAKE_STATUS
+int bcmpcie_get_total_wake(struct dhd_bus *bus)
+{
+	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+
+	return pch->total_wake_count;
+}
+
+int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
+{
+	dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&pch->pcie_lock, flags);
+
+	ret = pch->pkt_wake;
+	pch->total_wake_count += flag;
+	pch->pkt_wake = flag;
+
+	spin_unlock_irqrestore(&pch->pcie_lock, flags);
+	return ret;
+}
+#endif
+
+static int dhdpcie_resume_dev(struct pci_dev *dev)
+{
+	int err = 0;
+	dhdpcie_info_t *pch = pci_get_drvdata(dev);
+
+#ifdef DHD_WAKE_STATUS
+	if (check_wakeup_reason(pch->wake_irq))
+		bcmpcie_set_get_wake(pch->bus, 1);
+#endif
+	pci_load_and_free_saved_state(dev, &pch->state);
+	pci_restore_state(dev);
+	err = pci_enable_device(dev);
+	if (err) {
+		printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
+		return err;
+	}
+	pci_set_master(dev);
+	/*
+	 * Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state
+	 * Code taken from ipw2100 driver
+	 */
+	err = pci_set_power_state(dev, PCI_D0);
+	if (err) {
+		printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
+		return err;
+	}
+	dhdpcie_pme_active(dev, FALSE);
+	return err;
+}
+
+int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state)
+{
+	int rc;
+
+	if (state)
+		rc = dhdpcie_suspend_dev(dev);
+	else
+		rc = dhdpcie_resume_dev(dev);
+	return rc;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+static int dhdpcie_device_scan(struct device *dev, void *data)
+{
+	struct pci_dev *pcidev;
+	int *cnt = data;
+
+	pcidev = container_of(dev, struct pci_dev, dev);
+	if (pcidev->vendor != 0x14e4)
+		return 0;
+
+	DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
+	*cnt += 1;
+	if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
+		DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
+			pcidev->device, pcidev->driver->name));
+
+	return 0;
+}
+#endif /* LINUX_VERSION >= 2.6.0 */
+
+int
+dhdpcie_bus_register(void)
+{
+	int error = 0;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	if (!(error = pci_module_init(&dhdpcie_driver)))
+		return 0;
+
+	DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#else
+	if (!(error = pci_register_driver(&dhdpcie_driver))) {
+		bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
+		if (!error) {
+			DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
+		} else if (!dhdpcie_init_succeeded) {
+			DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
+		} else {
+			return 0;
+		}
+
+		pci_unregister_driver(&dhdpcie_driver);
+		error = BCME_ERROR;
+	}
+#endif /* LINUX_VERSION < 2.6.0 */
+
+	return error;
+}
+
+
+void
+dhdpcie_bus_unregister(void)
+{
+	pci_unregister_driver(&dhdpcie_driver);
+}
+
+int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+
+	if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
+		DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
+			return -ENODEV;
+	}
+	printf("PCI_PROBE:  bus %X, slot %X,vendor %X, device %X"
+		"(good PCI location)\n", pdev->bus->number,
+		PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+
+	if (dhdpcie_init (pdev)) {
+		DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
+		return -ENODEV;
+	}
+	/* disable async suspend */
+	device_disable_async_suspend(&pdev->dev);
+	DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
+	return 0;
+}
+
+int
+dhdpcie_detach(dhdpcie_info_t *pch)
+{
+	osl_t *osh = pch->osh;
+	if (pch) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (!dhd_download_fw_on_driverload)
+			pci_load_and_free_saved_state(pch->dev, &pch->default_state);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+		MFREE(osh, pch, sizeof(dhdpcie_info_t));
+	}
+	return 0;
+}
+
+
+void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev)
+{
+	osl_t *osh = NULL;
+	dhdpcie_info_t *pch = NULL;
+	dhd_bus_t *bus = NULL;
+
+	DHD_TRACE(("%s Enter\n", __FUNCTION__));
+	pch = pci_get_drvdata(pdev);
+	bus = pch->bus;
+	osh = pch->osh;
+
+	dhdpcie_bus_release(bus);
+	pci_disable_device(pdev);
+	/* pcie info detach */
+	dhdpcie_detach(pch);
+	/* osl detach */
+	osl_detach(osh);
+
+	dhdpcie_init_succeeded = FALSE;
+
+	DHD_TRACE(("%s Exit\n", __FUNCTION__));
+
+	return;
+}
+
+/* Free Linux irq */
+int
+dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
+{
+	dhd_bus_t *bus = dhdpcie_info->bus;
+	struct pci_dev *pdev = dhdpcie_info->bus->dev;
+
+	snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
+	    "dhdpcie:%s", pci_name(pdev));
+	if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+	                dhdpcie_info->pciname, bus) < 0) {
+			DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+			return -1;
+	}
+
+	DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
+
+
+	return 0; /* SUCCESS */
+}
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRINTF_RESOURCE	"0x%016llx"
+#else
+#define PRINTF_RESOURCE	"0x%08x"
+#endif
+
+/*
+
+Name:  osl_pci_get_resource
+
+Parametrs:
+
+1: struct pci_dev *pdev   -- pci device structure
+2: pci_res                       -- structure containing pci configuration space values
+
+
+Return value:
+
+int   - Status (TRUE or FALSE)
+
+Description:
+Access PCI configuration space, retrieve  PCI allocated resources , updates in resource structure.
+
+ */
+int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
+{
+	phys_addr_t  bar0_addr, bar1_addr;
+	ulong bar1_size;
+	struct pci_dev *pdev = NULL;
+	pdev = dhdpcie_info->dev;
+	do {
+		if (pci_enable_device(pdev)) {
+			printf("%s: Cannot enable PCI device\n", __FUNCTION__);
+			break;
+		}
+		pci_set_master(pdev);
+		bar0_addr = pci_resource_start(pdev, 0);	/* Bar-0 mapped address */
+		bar1_addr = pci_resource_start(pdev, 2);	/* Bar-1 mapped address */
+
+		/* read Bar-1 mapped memory range */
+		bar1_size = pci_resource_len(pdev, 2);
+
+		if ((bar1_size == 0) || (bar1_addr == 0)) {
+			printf("%s: BAR1 Not enabled for this device  size(%ld),"
+				" addr(0x"PRINTF_RESOURCE")\n",
+				__FUNCTION__, bar1_size, bar1_addr);
+			goto err;
+		}
+
+		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+		dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+
+		if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
+			DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
+			break;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (!dhd_download_fw_on_driverload) {
+			/* Backup PCIe configuration so as to use Wi-Fi on/off process
+			 * in case of built in driver
+			 */
+			pci_save_state(pdev);
+			dhdpcie_info->default_state = pci_store_saved_state(pdev);
+
+			if (dhdpcie_info->default_state == NULL) {
+				DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
+					__FUNCTION__));
+				REG_UNMAP(dhdpcie_info->regs);
+				REG_UNMAP(dhdpcie_info->tcm);
+				pci_disable_device(pdev);
+				break;
+			}
+		}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
+		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+		return 0; /* SUCCESS  */
+	} while (0);
+err:
+	return -1;  /* FAILURE */
+}
+
+int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
+{
+
+	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+	do {
+		/* define it here only!! */
+		if (dhdpcie_get_resource (dhdpcie_info)) {
+			DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
+			break;
+		}
+		DHD_TRACE(("%s:Exit - SUCCESS \n",
+			__FUNCTION__));
+
+		return 0; /* SUCCESS */
+
+	} while (0);
+
+	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+	return -1; /* FAILURE */
+
+}
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+void dhdpcie_linkdown_cb(struct msm_pcie_notify *noti)
+{
+	struct pci_dev *pdev = (struct pci_dev *)noti->user;
+	dhdpcie_info_t *pch;
+	dhd_bus_t *bus;
+	dhd_pub_t *dhd;
+	if (pdev && (pch = pci_get_drvdata(pdev))) {
+		if ((bus = pch->bus) && (dhd = bus->dhd)) {
+			DHD_ERROR(("%s: Event HANG send up "
+				"due to PCIe linkdown\n", __FUNCTION__));
+			bus->islinkdown = TRUE;
+			DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+			dhd_os_check_hang(dhd, 0, -ETIMEDOUT);
+		}
+	}
+}
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+int dhdpcie_init(struct pci_dev *pdev)
+{
+
+	osl_t 				*osh = NULL;
+	dhd_bus_t 			*bus = NULL;
+	dhdpcie_info_t		*dhdpcie_info =  NULL;
+	wifi_adapter_info_t	*adapter = NULL;
+	DHD_ERROR(("%s enter\n", __FUNCTION__));
+	do {
+		/* osl attach */
+		if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+			DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
+			break;
+		}
+		/* initialize static buffer */
+		adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
+			PCI_SLOT(pdev->devfn));
+		if (adapter != NULL)
+			DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
+		else
+			DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+		osl_static_mem_init(osh, adapter);
+
+		/*  allocate linux spcific pcie structure here */
+		if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
+			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+			break;
+		}
+		bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
+		dhdpcie_info->osh = osh;
+		dhdpcie_info->dev = pdev;
+
+		/* Find the PCI resources, verify the  */
+		/* vendor and device ID, map BAR regions and irq,  update in structures */
+		if (dhdpcie_scan_resource(dhdpcie_info)) {
+			DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
+
+			break;
+		}
+
+		/* Bus initialization */
+		bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm);
+		if (!bus) {
+			DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_info->bus = bus;
+		dhdpcie_info->bus->dev = pdev;
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+		bus->islinkdown = FALSE;
+		bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
+		bus->pcie_event.user = pdev;
+		bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
+		bus->pcie_event.callback = dhdpcie_linkdown_cb;
+		bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
+		msm_pcie_register_event(&bus->pcie_event);
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+
+		if (bus->intr) {
+			/* Register interrupt callback, but mask it (not operational yet). */
+			DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+			dhdpcie_bus_intr_disable(bus);
+
+			if (dhdpcie_request_irq(dhdpcie_info)) {
+				DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+				break;
+			}
+		} else {
+			bus->pollrate = 1;
+			DHD_INFO(("%s: PCIe interrupt function is NOT registered "
+				"due to polling mode\n", __FUNCTION__));
+		}
+
+
+		/* set private data for pci_dev */
+		pci_set_drvdata(pdev, dhdpcie_info);
+		/* Attach to the OS network interface */
+		DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
+		if(dhd_register_if(bus->dhd, 0, TRUE)) {
+			DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
+			break;
+		}
+		if (dhd_download_fw_on_driverload) {
+			if (dhd_bus_start(bus->dhd)) {
+				DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
+				break;
+			}
+		}
+#ifdef DHD_WAKE_STATUS
+		spin_lock_init(&dhdpcie_info->pcie_lock);
+		dhdpcie_info->wake_irq = dhd_os_get_wake_irq(bus->dhd);
+		if (dhdpcie_info->wake_irq == -1)
+			dhdpcie_info->wake_irq = pdev->irq;
+#endif
+		dhdpcie_init_succeeded = TRUE;
+
+		DHD_ERROR(("%s:Exit - SUCCESS \n", __FUNCTION__));
+		return 0;  /* return  SUCCESS  */
+
+	} while (0);
+	/* reverse the initialization in order in case of error */
+
+	if (bus)
+		dhdpcie_bus_release(bus);
+
+	if (dhdpcie_info)
+		dhdpcie_detach(dhdpcie_info);
+	pci_disable_device(pdev);
+	if (osh)
+		osl_detach(osh);
+
+	dhdpcie_init_succeeded = FALSE;
+
+	DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+	return -1; /* return FAILURE  */
+}
+
+/* Free Linux irq */
+void
+dhdpcie_free_irq(dhd_bus_t *bus)
+{
+	struct pci_dev *pdev = NULL;
+
+	DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
+	if (bus) {
+		pdev = bus->dev;
+		free_irq(pdev->irq, bus);
+	}
+	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+	return;
+}
+
+/*
+
+Name:  dhdpcie_isr
+
+Parametrs:
+
+1: IN int irq   -- interrupt vector
+2: IN void *arg      -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+irqreturn_t
+dhdpcie_isr(int irq, void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	if (dhdpcie_bus_isr(bus))
+		return TRUE;
+	else
+		return FALSE;
+}
+
+int
+dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
+{
+	int ret=0;
+	int options = 0;
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if(bus == NULL)
+		return BCME_ERROR;
+
+	if(bus->dev == NULL)
+		return BCME_ERROR;
+
+#ifdef CONFIG_ARCH_MSM
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+	if (bus->islinkdown)
+		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+		bus->dev, NULL, options);
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+	if (bus->islinkdown && !ret) {
+		msm_pcie_recover_config(bus->dev);
+		if (bus->dhd)
+			DHD_OS_WAKE_UNLOCK(bus->dhd);
+		bus->islinkdown = FALSE;
+	}
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+
+	if (ret) {
+		DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
+	}
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
+{
+	int ret = 0;
+	int options = 0;
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if (bus == NULL)
+		return BCME_ERROR;
+
+	if (bus->dev == NULL)
+		return BCME_ERROR;
+
+#ifdef CONFIG_ARCH_MSM
+#ifdef MSM_PCIE_LINKDOWN_RECOVERY
+	if (bus->islinkdown)
+		options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+#endif /* MSM_PCIE_LINKDOWN_RECOVERY */
+
+	ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+		bus->dev, NULL, options);
+
+	if (ret) {
+		DHD_ERROR(("Failed to stop PCIe link\n"));
+	}
+#endif /* CONFIG_ARCH_MSM */
+	DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+	return ret;
+}
+
+int
+dhdpcie_disable_device(dhd_bus_t *bus)
+{
+	if (bus == NULL)
+		return BCME_ERROR;
+
+	if (bus->dev == NULL)
+		return BCME_ERROR;
+
+	pci_disable_device(bus->dev);
+
+	return 0;
+}
+
+int
+dhdpcie_enable_device(dhd_bus_t *bus)
+{
+	int ret = BCME_ERROR;
+	dhdpcie_info_t *pch;
+
+	DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+	if(bus == NULL)
+		return BCME_ERROR;
+
+	if(bus->dev == NULL)
+		return BCME_ERROR;
+
+	pch = pci_get_drvdata(bus->dev);
+	if(pch == NULL)
+		return BCME_ERROR;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	if (pci_load_saved_state(bus->dev, pch->default_state))
+		pci_disable_device(bus->dev);
+	else {
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+		pci_restore_state(bus->dev);
+		ret = pci_enable_device(bus->dev);
+		if(!ret)
+			pci_set_master(bus->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+	if(ret)
+		pci_disable_device(bus->dev);
+
+	return ret;
+}
+int
+dhdpcie_alloc_resource(dhd_bus_t *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+	phys_addr_t bar0_addr, bar1_addr;
+	ulong bar1_size;
+
+	do {
+		if (bus == NULL) {
+			DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		if (bus->dev == NULL) {
+			DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		dhdpcie_info = pci_get_drvdata(bus->dev);
+		if (dhdpcie_info == NULL) {
+			DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+			break;
+		}
+
+		bar0_addr = pci_resource_start(bus->dev, 0);	/* Bar-0 mapped address */
+		bar1_addr = pci_resource_start(bus->dev, 2);	/* Bar-1 mapped address */
+
+		/* read Bar-1 mapped memory range */
+		bar1_size = pci_resource_len(bus->dev, 2);
+
+		if ((bar1_size == 0) || (bar1_addr == 0)) {
+			printf("%s: BAR1 Not enabled for this device size(%ld),"
+				" addr(0x"PRINTF_RESOURCE")\n",
+				__FUNCTION__, bar1_size, bar1_addr);
+			break;
+		}
+
+		dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+		if (!dhdpcie_info->regs) {
+			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+			break;
+		}
+
+		bus->regs = dhdpcie_info->regs;
+		dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+		dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+		if (!dhdpcie_info->tcm) {
+			DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+			REG_UNMAP(dhdpcie_info->regs);
+			bus->regs = NULL;
+			break;
+		}
+
+		bus->tcm = dhdpcie_info->tcm;
+
+		DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->regs, bar0_addr));
+		DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+			__FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+		return 0;
+	} while (0);
+
+	return BCME_ERROR;
+}
+
+void
+dhdpcie_free_resource(dhd_bus_t *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	dhdpcie_info = pci_get_drvdata(bus->dev);
+	if (dhdpcie_info == NULL) {
+		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+		return;
+	}
+
+	if (bus->regs) {
+		REG_UNMAP(dhdpcie_info->regs);
+		bus->regs = NULL;
+	}
+
+	if (bus->tcm) {
+		REG_UNMAP(dhdpcie_info->tcm);
+		bus->tcm = NULL;
+	}
+}
+
+int
+dhdpcie_bus_request_irq(struct dhd_bus *bus)
+{
+	dhdpcie_info_t *dhdpcie_info;
+	int ret = 0;
+
+	if (bus == NULL) {
+		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (bus->dev == NULL) {
+		DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	dhdpcie_info = pci_get_drvdata(bus->dev);
+	if (dhdpcie_info == NULL) {
+		DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	if (bus->intr) {
+		/* Register interrupt callback, but mask it (not operational yet). */
+		DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+		dhdpcie_bus_intr_disable(bus);
+		ret = dhdpcie_request_irq(dhdpcie_info);
+		if (ret) {
+			DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
+				__FUNCTION__, ret));
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c
new file mode 100644
index 0000000..3e3e3a5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pno.c
@@ -0,0 +1,3713 @@
+/*
+ * Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload and Wi-Fi Location Service(WLS) code.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pno.c 423669 2013-09-18 13:01:55Z yangj$
+ */
+#ifdef PNO_SUPPORT
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <proto/bcmevent.h>
+#include <dhd.h>
+#include <dhd_pno.h>
+#include <dhd_dbg.h>
+#ifdef GSCAN_SUPPORT
+#include <linux/gcd.h>
+#endif /* GSCAN_SUPPORT */
+
+#ifdef __BIG_ENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#define NULL_CHECK(p, s, err)  \
+			do { \
+				if (!(p)) { \
+					printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+					err = BCME_ERROR; \
+					return err; \
+				} \
+			} while (0)
+#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
+#define PNO_BESTNET_LEN 2048
+#define PNO_ON 1
+#define PNO_OFF 0
+#define CHANNEL_2G_MAX 14
+#define CHANNEL_5G_MAX 165
+#define MAX_NODE_CNT 5
+#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
+#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000)  \
+						- (uint32)(timestamp2/1000)))
+#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1)  \
+						- (uint32)(timestamp2)))
+#define TIMESPEC_TO_US(ts)  (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+							(ts).tv_nsec / NSEC_PER_USEC)
+
+#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====")
+#define TIME_MIN_DIFF 5
+static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd,
+ dhd_pno_status_info_t *pno_state);
+#ifdef GSCAN_SUPPORT
+static wl_pfn_gscan_channel_bucket_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state,
+uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw);
+#endif /* GSCAN_SUPPORT */
+static inline bool
+is_dfs(uint16 channel)
+{
+	if (channel >= 52 && channel <= 64)			/* class 2 */
+		return TRUE;
+	else if (channel >= 100 && channel <= 140)	/* class 4 */
+		return TRUE;
+	else
+		return FALSE;
+}
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+	int pfn = 0;
+	int err;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	/* Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn(error : %d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	_pno_state->pno_status = DHD_PNO_DISABLED;
+	err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n",
+			__FUNCTION__, err));
+	}
+exit:
+	return err;
+}
+
+bool dhd_is_pno_supported(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+
+	if (!dhd || !dhd->pno_state) {
+		DHD_ERROR(("NULL POINTER : %s\n",
+			__FUNCTION__));
+		return FALSE;
+	}
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	return WLS_SUPPORTED(_pno_state);
+}
+
+int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+
+	if (!dhd || !dhd->pno_state) {
+		DHD_ERROR(("NULL POINTER : %s\n",
+			__FUNCTION__));
+		return BCME_ERROR;
+	}
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (ETHER_ISMULTI(oui)) {
+		DHD_ERROR(("Expected unicast OUI\n"));
+		err = BCME_ERROR;
+	} else {
+		memcpy(_pno_state->pno_oui, oui, DOT11_OUI_LEN);
+		DHD_PNO(("PNO mac oui to be used - %02x:%02x:%02x\n", _pno_state->pno_oui[0],
+		    _pno_state->pno_oui[1], _pno_state->pno_oui[2]));
+	}
+
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+static uint64 convert_fw_rel_time_to_systime(uint32 fw_ts_ms)
+{
+	struct timespec ts;
+
+	get_monotonic_boottime(&ts);
+	return ((uint64)(TIMESPEC_TO_US(ts)) - (uint64)(fw_ts_ms * 1000));
+}
+
+static int
+_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+
+static bool
+is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params)
+{
+	smp_rmb();
+	return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE);
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+dhd_pno_set_mac_addr(dhd_pub_t *dhd, struct ether_addr *macaddr)
+{
+	int err;
+	wl_pfn_macaddr_cfg_t cfg;
+
+	cfg.version = WL_PFN_MACADDR_CFG_VER;
+	if (ETHER_ISNULLADDR(macaddr))
+		cfg.flags = 0;
+	else
+		cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+	memcpy(&cfg.macaddr, macaddr, ETHER_ADDR_LEN);
+
+	err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&cfg, sizeof(cfg), 1);
+	if (err < 0)
+		DHD_ERROR(("%s : failed to execute pfn_macaddr\n", __FUNCTION__));
+
+	return err;
+}
+
+static int
+_dhd_pno_suspend(dhd_pub_t *dhd)
+{
+	int err;
+	int suspend = 1;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err));
+		goto exit;
+
+	}
+	_pno_state->pno_status = DHD_PNO_SUSPEND;
+exit:
+	return err;
+}
+static int
+_dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (enable & 0xfffe) {
+		DHD_ERROR(("%s invalid value\n", __FUNCTION__));
+		err = BCME_BADARG;
+		goto exit;
+	}
+	if (!dhd_support_sta_mode(dhd)) {
+		DHD_ERROR(("PNO is not allowed for non-STA mode"));
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (enable) {
+		if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+			dhd_is_associated(dhd, NULL, NULL)) {
+			DHD_ERROR(("%s Legacy PNO mode cannot be enabled "
+				"in assoc mode , ignore it\n", __FUNCTION__));
+			err = BCME_BADOPTION;
+			goto exit;
+		}
+	}
+	/* Enable/Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	_pno_state->pno_status = (enable)?
+		DHD_PNO_ENABLED : DHD_PNO_DISABLED;
+	if (!enable)
+		_pno_state->pno_mode = DHD_PNO_NONE_MODE;
+
+	DHD_PNO(("%s set pno as %s\n",
+		__FUNCTION__, enable ? "Enable" : "Disable"));
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	wl_pfn_param_t pfn_param;
+	dhd_pno_params_t *_params;
+	dhd_pno_status_info_t *_pno_state;
+	bool combined_scan = FALSE;
+	struct ether_addr macaddr;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	memset(&pfn_param, 0, sizeof(pfn_param));
+
+	/* set pfn parameters */
+	pfn_param.version = htod32(PFN_VERSION);
+	pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) |
+		(ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT));
+	if (mode == DHD_PNO_LEGACY_MODE) {
+		/* check and set extra pno params */
+		if ((pno_params->params_legacy.pno_repeat != 0) ||
+			(pno_params->params_legacy.pno_freq_expo_max != 0)) {
+			pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+			pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat);
+			pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max);
+		}
+		/* set up pno scan fr */
+		if (pno_params->params_legacy.scan_fr != 0)
+			pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr);
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n"));
+			mode |= DHD_PNO_BATCH_MODE;
+			combined_scan = TRUE;
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n"));
+			mode |= DHD_PNO_HOTLIST_MODE;
+			combined_scan = TRUE;
+		}
+#ifdef GSCAN_SUPPORT
+		else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+			DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n"));
+			mode |= DHD_PNO_GSCAN_MODE;
+		}
+#endif /* GSCAN_SUPPORT */
+	}
+	if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* Scan frequency of 30 sec */
+		pfn_param.scan_freq = htod32(30);
+		/* slow adapt scan is off by default */
+		pfn_param.slow_freq = htod32(0);
+		/* RSSI margin of 30 dBm */
+		pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+		/* Network timeout 60 sec */
+		pfn_param.lost_network_timeout = htod32(60);
+		/* best n = 2 by default */
+		pfn_param.bestn = DEFAULT_BESTN;
+		/* mscan m=0 by default, so not record best networks by default */
+		pfn_param.mscan = DEFAULT_MSCAN;
+		/*  default repeat = 10 */
+		pfn_param.repeat = DEFAULT_REPEAT;
+		/* by default, maximum scan interval = 2^2
+		 * scan_freq when adaptive scan is turned on
+		 */
+		pfn_param.exp = DEFAULT_EXP;
+		if (mode == DHD_PNO_BATCH_MODE) {
+			/* In case of BATCH SCAN */
+			if (pno_params->params_batch.bestn)
+				pfn_param.bestn = pno_params->params_batch.bestn;
+			if (pno_params->params_batch.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr);
+			if (pno_params->params_batch.mscan)
+				pfn_param.mscan = pno_params->params_batch.mscan;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		} else if (mode == DHD_PNO_HOTLIST_MODE) {
+			/* In case of HOTLIST SCAN */
+			if (pno_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr);
+			pfn_param.bestn = 0;
+			pfn_param.repeat = 0;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		}
+		if (combined_scan) {
+			/* Disable Adaptive Scan */
+			pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT));
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+			pfn_param.repeat = 0;
+			pfn_param.exp = 0;
+			if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+				/* In case of Legacy PNO + BATCH SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+				if (_params->params_batch.bestn)
+					pfn_param.bestn = _params->params_batch.bestn;
+				if (_params->params_batch.scan_fr)
+					pfn_param.scan_freq = htod32(_params->params_batch.scan_fr);
+				if (_params->params_batch.mscan)
+					pfn_param.mscan = _params->params_batch.mscan;
+			} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+				/* In case of Legacy PNO + HOTLIST SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+				if (_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr);
+				pfn_param.bestn = 0;
+				pfn_param.repeat = 0;
+			}
+		}
+	}
+#ifdef GSCAN_SUPPORT
+	if (mode & DHD_PNO_GSCAN_MODE) {
+		uint32 lost_network_timeout;
+
+		pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr);
+		if (pno_params->params_gscan.mscan) {
+			pfn_param.bestn = pno_params->params_gscan.bestn;
+			pfn_param.mscan =  pno_params->params_gscan.mscan;
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		}
+		/* RSSI margin of 30 dBm */
+		pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+		pfn_param.repeat = 0;
+		pfn_param.exp = 0;
+		pfn_param.slow_freq = 0;
+
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+			dhd_pno_params_t *_params;
+
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+
+			pfn_param.scan_freq = gcd(pno_params->params_gscan.scan_fr,
+			                 _params->params_legacy.scan_fr);
+
+			if ((_params->params_legacy.pno_repeat != 0) ||
+				(_params->params_legacy.pno_freq_expo_max != 0)) {
+				pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+				pfn_param.repeat = (uchar) (_params->params_legacy.pno_repeat);
+				pfn_param.exp = (uchar) (_params->params_legacy.pno_freq_expo_max);
+			}
+		}
+
+		lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq *
+		                        pfn_param.scan_freq *
+		                        pno_params->params_gscan.lost_ap_window);
+		if (lost_network_timeout) {
+			pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout,
+			                                 GSCAN_MIN_BSSID_TIMEOUT));
+		} else {
+			pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT);
+		}
+	} else
+#endif /* GSCAN_SUPPORT */
+	{
+		if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
+			pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
+			DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
+				__FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+			err = BCME_BADARG;
+			goto exit;
+		}
+	}
+
+	memset(&macaddr, 0, ETHER_ADDR_LEN);
+	memcpy(&macaddr, _pno_state->pno_oui, DOT11_OUI_LEN);
+
+	DHD_PNO(("Setting mac oui to FW - %02x:%02x:%02x\n", _pno_state->pno_oui[0],
+	    _pno_state->pno_oui[1], _pno_state->pno_oui[2]));
+	err = dhd_pno_set_mac_addr(dhd, &macaddr);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to set pno mac address, error - %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (mode == DHD_PNO_BATCH_MODE ||
+	((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) {
+#else
+	if (mode == DHD_PNO_BATCH_MODE) {
+#endif /* GSCAN_SUPPORT */
+		int _tmp = pfn_param.bestn;
+		/* set bestn to calculate the max mscan which firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		/* get max mscan which the firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn));
+		pfn_param.mscan = MIN(pfn_param.mscan, _tmp);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	/* need to return mscan if this is for batch scan instead of err */
+	err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err;
+exit:
+	return err;
+}
+static int
+_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssids_list, int nssid)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_t pfn_element;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nssid) {
+		NULL_CHECK(ssids_list, "ssid list is NULL", err);
+	}
+	memset(&pfn_element, 0, sizeof(pfn_element));
+	{
+		int j;
+		for (j = 0; j < nssid; j++) {
+			DHD_PNO(("%d: scan  for  %s size = %d hidden = %d\n", j,
+				ssids_list[j].SSID, ssids_list[j].SSID_len, ssids_list[j].hidden));
+		}
+	}
+	/* Check for broadcast ssid */
+	for (i = 0; i < nssid; i++) {
+		if (!ssids_list[i].SSID_len) {
+			DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i));
+			err = BCME_ERROR;
+			goto exit;
+		}
+	}
+	/* set all pfn ssid */
+	for (i = 0; i < nssid; i++) {
+		pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+		pfn_element.auth = (DOT11_OPEN_SYSTEM);
+		pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+		pfn_element.wsec = htod32(0);
+		pfn_element.infra = htod32(1);
+		if (ssids_list[i].hidden)
+			pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+		else
+			pfn_element.flags = 0;
+		memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID,
+			ssids_list[i].SSID_len);
+		pfn_element.ssid.SSID_len = ssids_list[i].SSID_len;
+		err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element,
+			sizeof(pfn_element), 1);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+			goto exit;
+		}
+	}
+exit:
+	return err;
+}
+/* qsort compare function */
+static int
+_dhd_pno_cmpfunc(const void *a, const void *b)
+{
+	return (*(uint16*)a - *(uint16*)b);
+}
+static int
+_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan,
+	uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2)
+{
+	int err = BCME_OK;
+	int i = 0, j = 0, k = 0;
+	uint16 tmp;
+	NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	NULL_CHECK(nchan, "nchan is NULL", err);
+	NULL_CHECK(chan_list1, "chan_list1 is NULL", err);
+	NULL_CHECK(chan_list2, "chan_list2 is NULL", err);
+	/* chan_list1 and chan_list2 should be sorted at first */
+	while (i < nchan1 && j < nchan2) {
+		tmp = chan_list1[i] < chan_list2[j]?
+			chan_list1[i++] : chan_list2[j++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (i < nchan1) {
+		tmp = chan_list1[i++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (j < nchan2) {
+		tmp = chan_list2[j++];
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+
+	}
+	*nchan = k;
+	return err;
+}
+static int
+_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list,
+	int *nchan, uint8 band, bool skip_dfs)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 chan_buf[WL_NUMCHANNELS + 1];
+	wl_uint32_list_t *list;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (*nchan) {
+		NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	}
+	list = (wl_uint32_list_t *) (void *)chan_buf;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0);
+	if (err < 0) {
+		DHD_ERROR(("failed to get channel list (err: %d)\n", err));
+		goto exit;
+	}
+	for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) {
+		if (band == WLC_BAND_2G) {
+			if (dtoh32(list->element[i]) > CHANNEL_2G_MAX)
+				continue;
+		} else if (band == WLC_BAND_5G) {
+			if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX)
+				continue;
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+
+		} else if (band == WLC_BAND_AUTO) {
+			if (skip_dfs || !is_dfs(dtoh32(list->element[i])))
+				continue;
+
+		} else { /* All channels */
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+		}
+		if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) {
+			d_chan_list[j++] = (uint16) dtoh32(list->element[i]);
+		} else {
+			err = BCME_BADCHAN;
+			goto exit;
+		}
+	}
+	*nchan = j;
+exit:
+	return err;
+}
+static int
+_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch,
+	char *buf, int nbufsize)
+{
+	int err = BCME_OK;
+	int bytes_written = 0, nreadsize = 0;
+	int t_delta = 0;
+	int nleftsize = nbufsize;
+	uint8 cnt = 0;
+	char *bp = buf;
+	char eabuf[ETHER_ADDR_STR_LEN];
+#ifdef PNO_DEBUG
+	char *_base_bp;
+	char msg[150];
+#endif
+	dhd_pno_bestnet_entry_t *iter, *next;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	NULL_CHECK(params_batch, "params_batch is NULL", err);
+	if (nbufsize > 0)
+		NULL_CHECK(buf, "buf is NULL", err);
+	/* initialize the buffer */
+	memset(buf, 0, nbufsize);
+	DHD_PNO(("%s enter \n", __FUNCTION__));
+	/* # of scans */
+	if (!params_batch->get_batch.batch_started) {
+		bp += nreadsize = sprintf(bp, "scancount=%d\n",
+			params_batch->get_batch.expired_tot_scan_cnt);
+		nleftsize -= nreadsize;
+		params_batch->get_batch.batch_started = TRUE;
+	}
+	DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
+	/* preestimate scan count until which scan result this report is going to end */
+	list_for_each_entry_safe(siter, snext,
+		&params_batch->get_batch.expired_scan_results_list, list) {
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			/* if left_size is less than bestheader total size , stop this */
+			if (nleftsize <=
+				(phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD))
+				goto exit;
+			/* increase scan count */
+			cnt++;
+			/* # best of each scan */
+			DHD_PNO(("\n<loop : %d, apcount %d>\n", cnt - 1, phead->tot_cnt));
+			/* attribute of the scan */
+			if (phead->reason & PNO_STATUS_ABORT_MASK) {
+				bp += nreadsize = sprintf(bp, "trunc\n");
+				nleftsize -= nreadsize;
+			}
+			list_for_each_entry_safe(iter, next,
+				&phead->entry_list, list) {
+				t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
+#ifdef PNO_DEBUG
+				_base_bp = bp;
+				memset(msg, 0, sizeof(msg));
+#endif
+				/* BSSID info */
+				bp += nreadsize = sprintf(bp, "bssid=%s\n",
+				bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf));
+				nleftsize -= nreadsize;
+				/* SSID */
+				bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID);
+				nleftsize -= nreadsize;
+				/* channel */
+				bp += nreadsize = sprintf(bp, "freq=%d\n",
+				wf_channel2mhz(iter->channel,
+				iter->channel <= CH_MAX_2G_CHANNEL?
+				WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+				nleftsize -= nreadsize;
+				/* RSSI */
+				bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI);
+				nleftsize -= nreadsize;
+				/* add the time consumed in Driver to the timestamp of firmware */
+				iter->timestamp += t_delta;
+				bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp);
+				nleftsize -= nreadsize;
+				/* RTT0 */
+				bp += nreadsize = sprintf(bp, "dist=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt0);
+				nleftsize -= nreadsize;
+				/* RTT1 */
+				bp += nreadsize = sprintf(bp, "distSd=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt1);
+				nleftsize -= nreadsize;
+				bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER);
+				nleftsize -= nreadsize;
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+#ifdef PNO_DEBUG
+				memcpy(msg, _base_bp, bp - _base_bp);
+				DHD_PNO(("Entry : \n%s", msg));
+#endif
+			}
+			bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER);
+			DHD_PNO(("%s", SCAN_END_MARKER));
+			nleftsize -= nreadsize;
+			pprev = phead;
+			/* reset the header */
+			siter->bestnetheader = phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+
+			siter->cnt_header--;
+		}
+		if (phead == NULL) {
+			/* we store all entry in this scan , so it is ok to delete */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+exit:
+	if (cnt < params_batch->get_batch.expired_tot_scan_cnt) {
+		DHD_ERROR(("Buffer size is small to save all batch entry,"
+			" cnt : %d (remained_scan_cnt): %d\n",
+			cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt));
+	}
+	params_batch->get_batch.expired_tot_scan_cnt -= cnt;
+	/* set FALSE only if the link list  is empty after returning the data */
+	if (list_empty(&params_batch->get_batch.expired_scan_results_list)) {
+		params_batch->get_batch.batch_started = FALSE;
+		bp += sprintf(bp, "%s", RESULTS_END_MARKER);
+		DHD_PNO(("%s", RESULTS_END_MARKER));
+		DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__));
+	}
+	/* return used memory in buffer */
+	bytes_written = (int32)(bp - buf);
+	return bytes_written;
+}
+static int
+_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last)
+{
+	int err = BCME_OK;
+	int removed_scan_cnt = 0;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	dhd_pno_bestnet_entry_t *iter, *next;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(head, "head is NULL", err);
+	NULL_CHECK(head->next, "head->next is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	list_for_each_entry_safe(siter, snext,
+		head, list) {
+		if (only_last) {
+			/* in case that we need to delete only last one */
+			if (!list_is_last(&siter->list, head)) {
+				/* skip if the one is not last */
+				continue;
+			}
+		}
+		/* delete all data belong if the one is last */
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			removed_scan_cnt++;
+			list_for_each_entry_safe(iter, next,
+			&phead->entry_list, list) {
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+			}
+			pprev = phead;
+			phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+		}
+		if (phead == NULL) {
+			/* it is ok to delete top node */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+	return removed_scan_cnt;
+}
+
+static int
+_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_cfg_t pfncfg_param;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nchan) {
+		NULL_CHECK(channel_list, "nchan is NULL", err);
+	}
+	DHD_PNO(("%s enter :  nchan : %d\n", __FUNCTION__, nchan));
+	memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t));
+	/* Setup default values */
+	pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET);
+	pfncfg_param.channel_num = htod32(0);
+
+	for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++)
+		pfncfg_param.channel_list[i] = channel_list[i];
+
+	pfncfg_param.channel_num = htod32(nchan);
+	err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+static int
+_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_lock(&_pno_state->pno_mutex);
+	switch (mode) {
+	case DHD_PNO_LEGACY_MODE: {
+		struct dhd_pno_ssid *iter, *next;
+		if (params->params_legacy.nssid > 0) {
+			list_for_each_entry_safe(iter, next,
+				&params->params_legacy.ssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		params->params_legacy.nssid = 0;
+		params->params_legacy.scan_fr = 0;
+		params->params_legacy.pno_freq_expo_max = 0;
+		params->params_legacy.pno_repeat = 0;
+		params->params_legacy.nchan = 0;
+		memset(params->params_legacy.chan_list, 0,
+			sizeof(params->params_legacy.chan_list));
+		break;
+	}
+	case DHD_PNO_BATCH_MODE: {
+		params->params_batch.scan_fr = 0;
+		params->params_batch.mscan = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.rtt = 0;
+		params->params_batch.bestn = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_batch.chan_list, 0,
+			sizeof(params->params_batch.chan_list));
+		params->params_batch.get_batch.batch_started = FALSE;
+		params->params_batch.get_batch.buf = NULL;
+		params->params_batch.get_batch.bufsize = 0;
+		params->params_batch.get_batch.reason = 0;
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.scan_results_list, FALSE);
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.expired_scan_results_list, FALSE);
+		params->params_batch.get_batch.tot_scan_cnt = 0;
+		params->params_batch.get_batch.expired_tot_scan_cnt = 0;
+		params->params_batch.get_batch.top_node_cnt = 0;
+		INIT_LIST_HEAD(&params->params_batch.get_batch.scan_results_list);
+		INIT_LIST_HEAD(&params->params_batch.get_batch.expired_scan_results_list);
+		break;
+	}
+	case DHD_PNO_HOTLIST_MODE: {
+		struct dhd_pno_bssid *iter, *next;
+		if (params->params_hotlist.nbssid > 0) {
+			list_for_each_entry_safe(iter, next,
+				&params->params_hotlist.bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		params->params_hotlist.scan_fr = 0;
+		params->params_hotlist.nbssid = 0;
+		params->params_hotlist.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_hotlist.chan_list, 0,
+			sizeof(params->params_hotlist.chan_list));
+		break;
+	}
+	default:
+		DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode));
+		break;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+	return err;
+}
+static int
+_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nbssid) {
+		NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid,
+		sizeof(wl_pfn_bssid_t) * nbssid, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+static int
+_dhd_pno_add_significant_bssid(dhd_pub_t *dhd,
+   wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	if (!nbssid) {
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+	NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err);
+
+	err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", (char *)p_pfn_significant_bssid,
+		sizeof(wl_pfn_significant_bssid_t) * nbssid, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+exit:
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+int
+dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	NULL_CHECK(dhd, "dev is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) {
+		DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		struct dhd_pno_gscan_params *gscan_params;
+
+		_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+		gscan_params = &_params->params_gscan;
+		if (gscan_params->mscan) {
+			/* retrieve the batching data from firmware into host */
+			dhd_wait_batch_results_complete(dhd);
+		}
+		/* save current pno_mode before calling dhd_pno_clean */
+		mutex_lock(&_pno_state->pno_mutex);
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			mutex_unlock(&_pno_state->pno_mutex);
+			goto exit;
+		}
+		/* restore previous pno_mode */
+		_pno_state->pno_mode = mode;
+		mutex_unlock(&_pno_state->pno_mutex);
+		/* Restart gscan */
+		err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+		goto exit;
+	}
+#endif /* GSCAN_SUPPORT */
+	/* restart Batch mode  if the batch mode is on */
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		dhd_pno_clean(dhd);
+		/* restore previous pno_mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			/* restart HOTLIST SCAN */
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+			_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+				" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+			list_for_each_entry_safe(iter, next,
+			&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid->macaddr,
+				&iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid->flags = iter->flags;
+				p_pfn_bssid++;
+			}
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	kfree(p_pfn_bssid);
+	return err;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	return (_dhd_pno_enable(dhd, enable));
+}
+
+static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd,
+            dhd_pno_status_info_t *pno_state)
+{
+	int err = BCME_OK;
+	int i;
+	struct dhd_pno_ssid *iter, *next;
+	dhd_pno_params_t	*_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+	wlc_ssid_ext_t *p_ssid_list;
+
+	p_ssid_list = kzalloc(sizeof(wlc_ssid_ext_t) *
+	                   _params1->params_legacy.nssid, GFP_KERNEL);
+	if (p_ssid_list == NULL) {
+		DHD_ERROR(("%s : failed to allocate wlc_ssid_ext_t array (count: %d)",
+			__FUNCTION__, _params1->params_legacy.nssid));
+		err = BCME_ERROR;
+		pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+		goto exit;
+	}
+	i = 0;
+	/* convert dhd_pno_ssid to wlc_ssid_ext_t */
+	list_for_each_entry_safe(iter, next, &_params1->params_legacy.ssid_list, list) {
+		p_ssid_list[i].SSID_len = iter->SSID_len;
+		p_ssid_list[i].hidden = iter->hidden;
+		memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len);
+		i++;
+	}
+exit:
+	return p_ssid_list;
+}
+
+static int
+dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list,
+    int nssid)
+{
+	int ret = 0;
+	int i;
+	struct dhd_pno_ssid *_pno_ssid;
+
+	for (i = 0; i < nssid; i++) {
+		if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("%s : Invalid SSID length %d\n",
+				__FUNCTION__, ssid_list[i].SSID_len));
+			ret = BCME_ERROR;
+			goto exit;
+		}
+		_pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
+		if (_pno_ssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
+				__FUNCTION__));
+			ret = BCME_ERROR;
+			goto exit;
+		}
+		_pno_ssid->SSID_len = ssid_list[i].SSID_len;
+		_pno_ssid->hidden = ssid_list[i].hidden;
+		memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
+		list_add_tail(&_pno_ssid->list, &params->params_legacy.ssid_list);
+	}
+
+exit:
+	return ret;
+}
+
+int
+dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int32 tot_nchan = 0;
+	int err = BCME_OK;
+	int i;
+	int mode = 0;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit_no_clear;
+	}
+	DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d,"
+			"pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
+			scan_fr, pno_repeat, pno_freq_expo_max, nchan));
+
+	_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+	/* If GSCAN is also ON will handle this down below */
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE &&
+	 !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+#else
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+#endif /* GSCAN_SUPPORT */
+		DHD_ERROR(("%s : Legacy PNO mode was already started, "
+			"will disable previous one to start new one\n", __FUNCTION__));
+		err = dhd_pno_stop_for_ssid(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
+				__FUNCTION__, err));
+			goto exit_no_clear;
+		}
+	}
+	_pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
+	err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+			__FUNCTION__, err));
+		goto exit_no_clear;
+	}
+	memset(_chan_list, 0, sizeof(_chan_list));
+	tot_nchan = MIN(nchan, WL_NUMCHANNELS);
+	if (tot_nchan > 0 && channel_list) {
+		for (i = 0; i < tot_nchan; i++)
+		_params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i];
+	}
+#ifdef GSCAN_SUPPORT
+	else {
+		tot_nchan = WL_NUMCHANNELS;
+		 err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan,
+		         (WLC_BAND_2G | WLC_BAND_5G), FALSE);
+		if (err < 0) {
+			tot_nchan = 0;
+			DHD_PNO(("Could not get channel list for PNO SSID\n"));
+		} else {
+			for (i = 0; i < tot_nchan; i++)
+				_params->params_legacy.chan_list[i] = _chan_list[i];
+		}
+	}
+#endif /* GSCAN_SUPPORT */
+
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		DHD_PNO(("BATCH SCAN is on progress in firmware\n"));
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit_no_clear;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* use superset of channel list between two mode */
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			if (_params2->params_batch.nchan > 0 && tot_nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_batch.chan_list[0],
+					_params2->params_batch.nchan,
+					&channel_list[0], tot_nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and batch\n",
+						__FUNCTION__));
+					goto exit_no_clear;
+				}
+			}  else {
+				DHD_PNO(("superset channel will use"
+				" all channels in firmware\n"));
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_hotlist.chan_list[0],
+					_params2->params_hotlist.nchan,
+					&channel_list[0], tot_nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit_no_clear;
+				}
+			}
+		}
+	}
+	_params->params_legacy.scan_fr = scan_fr;
+	_params->params_legacy.pno_repeat = pno_repeat;
+	_params->params_legacy.pno_freq_expo_max = pno_freq_expo_max;
+	_params->params_legacy.nchan = tot_nchan;
+	_params->params_legacy.nssid = nssid;
+	INIT_LIST_HEAD(&_params->params_legacy.ssid_list);
+#ifdef GSCAN_SUPPORT
+	/* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) {
+			err = BCME_ERROR;
+			goto exit;
+		}
+		DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n"));
+		err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+		goto exit;
+	}
+#endif /* GSCAN_SUPPORT */
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) {
+		DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) {
+		DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
+		goto exit;
+	}
+	if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) {
+		err = BCME_ERROR;
+		goto exit;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	if (err < 0)
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+exit_no_clear:
+	/* clear mode in case of error */
+	if (err < 0) {
+		int ret = dhd_pno_clean(dhd);
+
+		if (ret < 0) {
+			DHD_ERROR(("%s : dhd_pno_clean failure (err: %d)\n",
+				__FUNCTION__, ret));
+		} else {
+			_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+		}
+	}
+	return err;
+}
+int
+dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params)
+{
+	int err = BCME_OK;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0, tot_nchan = 0;
+	int mode = 0, mscan = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	wlc_ssid_ext_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(batch_params, "batch_params is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_BATCH_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	} else {
+		/* batch mode is already started */
+		return -EBUSY;
+	}
+	_params->params_batch.scan_fr = batch_params->scan_fr;
+	_params->params_batch.bestn = batch_params->bestn;
+	_params->params_batch.mscan = (batch_params->mscan)?
+		batch_params->mscan : DEFAULT_BATCH_MSCAN;
+	_params->params_batch.nchan = batch_params->nchan;
+	memcpy(_params->params_batch.chan_list, batch_params->chan_list,
+		sizeof(_params->params_batch.chan_list));
+
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan;
+	if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_batch.chan_list[batch_params->nchan],
+		&rem_nchan, batch_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, batch_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_batch.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_batch.chan_list, _params->params_batch.nchan,
+			sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_batch.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list));
+		tot_nchan = _params->params_batch.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		DHD_PNO(("PNO SSID is on progress in firmware\n"));
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* Use the superset for channelist between two mode */
+		_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+		if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) {
+			err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+				&_params2->params_legacy.chan_list[0],
+				_params2->params_legacy.nchan,
+				&_params->params_batch.chan_list[0], _params->params_batch.nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to merge channel list"
+				" between legacy and batch\n",
+					__FUNCTION__));
+				goto exit;
+			}
+		} else {
+			DHD_PNO(("superset channel will use all channels in firmware\n"));
+		}
+		p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+		if (!p_ssid_list) {
+			err = BCME_NOMEM;
+			DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+			goto exit;
+		}
+		if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list,
+			_params2->params_legacy.nssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+			goto exit;
+		}
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	} else {
+		/* we need to return mscan */
+		mscan = err;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	else {
+		/* return #max scan firmware can do */
+		err = mscan;
+	}
+	kfree(p_ssid_list);
+	return err;
+}
+
+
+#ifdef GSCAN_SUPPORT
+static void dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params,
+	dhd_pno_status_info_t *_pno_state, uint8 flags)
+{
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (flags & GSCAN_FLUSH_SCAN_CFG) {
+		_params->params_gscan.bestn = 0;
+		_params->params_gscan.mscan = 0;
+		_params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+		_params->params_gscan.scan_fr = 0;
+		_params->params_gscan.send_all_results_flag = 0;
+		memset(_params->params_gscan.channel_bucket, 0,
+		_params->params_gscan.nchannel_buckets *
+		 sizeof(struct dhd_pno_gscan_channel_bucket));
+		_params->params_gscan.nchannel_buckets = 0;
+		DHD_PNO(("Flush Scan config\n"));
+	}
+	if (flags & GSCAN_FLUSH_HOTLIST_CFG)
+	{
+		struct dhd_pno_bssid *iter, *next;
+		if (_params->params_gscan.nbssid_hotlist > 0) {
+			list_for_each_entry_safe(iter, next,
+				&_params->params_gscan.hotlist_bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		_params->params_gscan.nbssid_hotlist = 0;
+		DHD_PNO(("Flush Hotlist Config\n"));
+	}
+	if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG)
+	{
+		dhd_pno_significant_bssid_t *iter, *next;
+
+		if (_params->params_gscan.nbssid_significant_change > 0) {
+			list_for_each_entry_safe(iter, next,
+				&_params->params_gscan.significant_bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		_params->params_gscan.nbssid_significant_change = 0;
+		DHD_PNO(("Flush Significant Change Config\n"));
+	}
+
+	return;
+}
+
+void dhd_pno_lock_batch_results(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_lock(&_pno_state->pno_mutex);
+	return;
+}
+
+void dhd_pno_unlock_batch_results(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_unlock(&_pno_state->pno_mutex);
+	return;
+}
+
+void dhd_wait_batch_results_complete(dhd_pub_t *dhd)
+{
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	/* Has the workqueue finished its job already?? */
+	if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) {
+		DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__));
+		wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+		     is_batch_retrieval_complete(&_params->params_gscan),
+		     msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+	} else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */
+		gscan_results_cache_t *iter;
+		uint16 num_results = 0;
+		int err;
+
+		mutex_lock(&_pno_state->pno_mutex);
+		iter = _params->params_gscan.gscan_batch_cache;
+		while (iter) {
+			num_results += iter->tot_count - iter->tot_consumed;
+			iter = iter->next;
+		}
+		mutex_unlock(&_pno_state->pno_mutex);
+
+		/* All results consumed/No results cached??
+		 * Get fresh results from FW
+		 */
+		if (!num_results) {
+			DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__));
+			err = dhd_retreive_batch_scan_results(dhd);
+			if (err == BCME_OK) {
+				wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+				  is_batch_retrieval_complete(&_params->params_gscan),
+				  msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+			}
+		}
+	}
+	DHD_PNO(("%s: Wait complete\n", __FUNCTION__));
+
+	return;
+}
+
+static void *dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+	gscan_results_cache_t *iter, *results;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	uint16 num_scan_ids = 0, num_results = 0;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	iter = results = _params->params_gscan.gscan_batch_cache;
+	while (iter) {
+		num_results += iter->tot_count - iter->tot_consumed;
+		num_scan_ids++;
+		iter = iter->next;
+	}
+
+	*len = ((num_results << 16) | (num_scan_ids));
+	return results;
+}
+
+void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+                          void *info, uint32 *len)
+{
+	void *ret = NULL;
+	dhd_pno_gscan_capabilities_t *ptr;
+
+	if (!len) {
+		DHD_ERROR(("%s: len is NULL\n", __FUNCTION__));
+		return ret;
+	}
+
+	switch (type) {
+		case DHD_PNO_GET_CAPABILITIES:
+			ptr = (dhd_pno_gscan_capabilities_t *)
+			kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL);
+			if (!ptr)
+				break;
+			/* Hardcoding these values for now, need to get
+			 * these values from FW, will change in a later check-in
+			 */
+			ptr->max_scan_cache_size = 12;
+			ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
+			ptr->max_ap_cache_per_scan = 16;
+			ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX;
+			ptr->max_scan_reporting_threshold = 100;
+			ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS;
+			ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS;
+			ret = (void *)ptr;
+			*len = sizeof(dhd_pno_gscan_capabilities_t);
+			break;
+
+		case DHD_PNO_GET_BATCH_RESULTS:
+			ret = dhd_get_gscan_batch_results(dhd, len);
+			break;
+		case DHD_PNO_GET_CHANNEL_LIST:
+			if (info) {
+				uint16 ch_list[WL_NUMCHANNELS];
+				uint32 *ptr, mem_needed, i;
+				int32 err, nchan = WL_NUMCHANNELS;
+				uint32 *gscan_band = (uint32 *) info;
+				uint8 band = 0;
+
+				/* No band specified?, nothing to do */
+				if ((*gscan_band & GSCAN_BAND_MASK) == 0) {
+					DHD_PNO(("No band specified\n"));
+					*len = 0;
+					break;
+				}
+
+				/* HAL and DHD use different bits for 2.4G and
+				 * 5G in bitmap. Hence translating it here...
+				 */
+				if (*gscan_band & GSCAN_BG_BAND_MASK)
+					band |= WLC_BAND_2G;
+				if (*gscan_band & GSCAN_A_BAND_MASK)
+					band |= WLC_BAND_5G;
+
+				err = _dhd_pno_get_channels(dhd, ch_list, &nchan,
+				                          (band & GSCAN_ABG_BAND_MASK),
+				                          !(*gscan_band & GSCAN_DFS_MASK));
+
+				if (err < 0) {
+					DHD_ERROR(("%s: failed to get valid channel list\n",
+						__FUNCTION__));
+					*len = 0;
+				} else {
+					mem_needed = sizeof(uint32) * nchan;
+					ptr = (uint32 *) kmalloc(mem_needed, GFP_KERNEL);
+					if (!ptr) {
+						DHD_ERROR(("%s: Unable to malloc %d bytes\n",
+							__FUNCTION__, mem_needed));
+						break;
+					}
+					for (i = 0; i < nchan; i++) {
+						ptr[i] = wf_channel2mhz(ch_list[i],
+							(ch_list[i] <= CH_MAX_2G_CHANNEL?
+							WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+					}
+					ret = ptr;
+					*len = mem_needed;
+				}
+			} else {
+				*len = 0;
+				DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__));
+			}
+			break;
+
+		default:
+			break;
+	}
+
+	return ret;
+
+}
+
+int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+    void *buf, uint8 flush)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *_params;
+	int i;
+	dhd_pno_status_info_t *_pno_state;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	mutex_lock(&_pno_state->pno_mutex);
+
+	switch (type) {
+	case DHD_PNO_BATCH_SCAN_CFG_ID:
+		{
+			gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf;
+			_params->params_gscan.bestn = ptr->bestn;
+			_params->params_gscan.mscan = ptr->mscan;
+			_params->params_gscan.buffer_threshold = ptr->buffer_threshold;
+		}
+		break;
+	case DHD_PNO_GEOFENCE_SCAN_CFG_ID:
+		{
+			gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf;
+			struct dhd_pno_bssid *_pno_bssid;
+			struct bssid_t *bssid_ptr;
+			int8 flags;
+
+			if (flush) {
+				dhd_pno_reset_cfg_gscan(_params, _pno_state,
+				    GSCAN_FLUSH_HOTLIST_CFG);
+			}
+
+			if (!ptr->nbssid)
+				break;
+
+			if (!_params->params_gscan.nbssid_hotlist)
+				INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
+
+			if ((_params->params_gscan.nbssid_hotlist +
+			          ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+				DHD_ERROR(("Excessive number of hotlist APs programmed %d\n",
+				     (_params->params_gscan.nbssid_hotlist +
+				      ptr->nbssid)));
+				err = BCME_RANGE;
+				goto exit;
+			}
+
+			for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
+				_pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+
+				if (!_pno_bssid) {
+					DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes",
+					       sizeof(struct dhd_pno_bssid)));
+					err = BCME_NOMEM;
+					goto exit;
+				}
+				memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN);
+
+				flags = (int8) bssid_ptr->rssi_reporting_threshold;
+				_pno_bssid->flags = flags  << WL_PFN_RSSI_SHIFT;
+				list_add_tail(&_pno_bssid->list,
+				   &_params->params_gscan.hotlist_bssid_list);
+			}
+
+			_params->params_gscan.nbssid_hotlist += ptr->nbssid;
+			_params->params_gscan.lost_ap_window = ptr->lost_ap_window;
+		}
+		break;
+	case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID:
+		{
+			gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf;
+			dhd_pno_significant_bssid_t *_pno_significant_change_bssid;
+			wl_pfn_significant_bssid_t *significant_bssid_ptr;
+
+			if (flush) {
+				dhd_pno_reset_cfg_gscan(_params, _pno_state,
+				   GSCAN_FLUSH_SIGNIFICANT_CFG);
+			}
+
+			if (!ptr->nbssid)
+				break;
+
+			if (!_params->params_gscan.nbssid_significant_change)
+				INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list);
+
+			if ((_params->params_gscan.nbssid_significant_change +
+			          ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+				DHD_ERROR(("Excessive number of SWC APs programmed %d\n",
+				     (_params->params_gscan.nbssid_significant_change +
+				      ptr->nbssid)));
+				err = BCME_RANGE;
+				goto exit;
+			}
+
+			for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list;
+			     i < ptr->nbssid; i++, significant_bssid_ptr++) {
+				_pno_significant_change_bssid =
+				      kzalloc(sizeof(dhd_pno_significant_bssid_t),
+				      GFP_KERNEL);
+
+				if (!_pno_significant_change_bssid) {
+					DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes",
+					sizeof(dhd_pno_significant_bssid_t)));
+					err = BCME_NOMEM;
+					goto exit;
+				}
+				memcpy(&_pno_significant_change_bssid->BSSID,
+				    &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN);
+				_pno_significant_change_bssid->rssi_low_threshold =
+				    significant_bssid_ptr->rssi_low_threshold;
+				_pno_significant_change_bssid->rssi_high_threshold =
+				    significant_bssid_ptr->rssi_high_threshold;
+				list_add_tail(&_pno_significant_change_bssid->list,
+				    &_params->params_gscan.significant_bssid_list);
+			}
+
+			_params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold;
+			_params->params_gscan.swc_rssi_window_size = ptr->rssi_window;
+			_params->params_gscan.lost_ap_window = ptr->lost_ap_window;
+			_params->params_gscan.nbssid_significant_change += ptr->nbssid;
+
+		}
+		break;
+	case DHD_PNO_SCAN_CFG_ID:
+		{
+			int i, k;
+			uint16 band;
+			gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf;
+			struct dhd_pno_gscan_channel_bucket *ch_bucket;
+
+			if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) {
+				_params->params_gscan.nchannel_buckets = ptr->nchannel_buckets;
+
+				memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket,
+				    _params->params_gscan.nchannel_buckets *
+				    sizeof(struct dhd_pno_gscan_channel_bucket));
+				ch_bucket = _params->params_gscan.channel_bucket;
+
+				for (i = 0; i < ptr->nchannel_buckets; i++) {
+					band = ch_bucket[i].band;
+					for (k = 0; k < ptr->channel_bucket[i].num_channels; k++)  {
+						ch_bucket[i].chan_list[k] =
+						wf_mhz2channel(ptr->channel_bucket[i].chan_list[k],
+							0);
+					}
+					ch_bucket[i].band = 0;
+					/* HAL and DHD use different bits for 2.4G and
+					 * 5G in bitmap. Hence translating it here...
+					 */
+					if (band & GSCAN_BG_BAND_MASK)
+						ch_bucket[i].band |= WLC_BAND_2G;
+					if (band & GSCAN_A_BAND_MASK)
+						ch_bucket[i].band |= WLC_BAND_5G;
+					if (band & GSCAN_DFS_MASK)
+						ch_bucket[i].band |= GSCAN_DFS_MASK;
+
+					DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band,
+					          ch_bucket[i].report_flag));
+				}
+
+				for (i = 0; i < ptr->nchannel_buckets; i++) {
+					ch_bucket[i].bucket_freq_multiple =
+					ch_bucket[i].bucket_freq_multiple/ptr->scan_fr;
+				}
+				_params->params_gscan.scan_fr = ptr->scan_fr;
+
+				DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets,
+				        _params->params_gscan.scan_fr));
+			} else {
+				err = BCME_BADARG;
+			}
+		}
+		break;
+	default:
+		err = BCME_BADARG;
+		break;
+	}
+exit:
+	mutex_unlock(&_pno_state->pno_mutex);
+	return err;
+
+}
+
+
+static bool
+validate_gscan_params(struct dhd_pno_gscan_params *gscan_params)
+{
+	unsigned int i, k;
+
+	if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) {
+		DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n",
+		 __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets));
+		return false;
+	}
+
+	for (i = 0; i < gscan_params->nchannel_buckets; i++) {
+		if (!gscan_params->channel_bucket[i].band) {
+			for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) {
+				if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) {
+					DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__,
+					 gscan_params->channel_bucket[i].chan_list[k]));
+					return false;
+				}
+			}
+		}
+	}
+
+	return true;
+}
+
+static int
+dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params)
+{
+	int err = BCME_OK;
+	int mode, i = 0, k;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int tot_nchan = 0;
+	int num_buckets_to_fw, tot_num_buckets, gscan_param_size;
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL;
+	wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL;
+	wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	wlc_ssid_ext_t *pssid_list = NULL;
+	dhd_pno_params_t	*params_legacy;
+	dhd_pno_params_t	*_params;
+
+	params_legacy = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(gscan_params, "gscan_params is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!validate_gscan_params(gscan_params)) {
+		DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__));
+		err = BCME_BADARG;
+		goto exit;
+	}
+
+	if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state,
+	    _chan_list, &tot_num_buckets, &num_buckets_to_fw)))
+		goto exit;
+
+	if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) {
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+	}
+
+	_pno_state->pno_mode |= DHD_PNO_GSCAN_MODE;
+
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+
+		if (!pssid_list) {
+			err = BCME_NOMEM;
+			DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+			goto exit;
+		}
+
+		if ((err = _dhd_pno_add_ssid(dhd, pssid_list,
+			params_legacy->params_legacy.nssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+			goto exit;
+		}
+	}
+
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) {
+		DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+		goto exit;
+	}
+
+	gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) +
+	          (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t);
+	pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size);
+
+	if (!pfn_gscan_cfg_t) {
+		DHD_ERROR(("%s: failed to malloc memory of size %d\n",
+		   __FUNCTION__, gscan_param_size));
+		err = BCME_NOMEM;
+		goto exit;
+	}
+
+	pfn_gscan_cfg_t->version = WL_GSCAN_CFG_VERSION;
+	if (gscan_params->mscan)
+		pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold;
+	else
+		pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+	if (gscan_params->nbssid_significant_change) {
+		pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold;
+		pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size;
+		pfn_gscan_cfg_t->lost_ap_window	= gscan_params->lost_ap_window;
+	} else {
+		pfn_gscan_cfg_t->swc_nbssid_threshold = 0;
+		pfn_gscan_cfg_t->swc_rssi_window_size = 0;
+		pfn_gscan_cfg_t->lost_ap_window	= 0;
+	}
+
+	pfn_gscan_cfg_t->flags =
+	         (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK);
+	pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw;
+	pfn_gscan_cfg_t->retry_threshold = GSCAN_RETRY_THRESHOLD;
+
+	for (i = 0, k = 0; i < tot_num_buckets; i++) {
+		if (ch_bucket[i].bucket_end_index  != CHANNEL_BUCKET_EMPTY_INDEX) {
+			pfn_gscan_cfg_t->channel_bucket[k].bucket_end_index =
+			           ch_bucket[i].bucket_end_index;
+			pfn_gscan_cfg_t->channel_bucket[k].bucket_freq_multiple =
+			           ch_bucket[i].bucket_freq_multiple;
+			pfn_gscan_cfg_t->channel_bucket[k].flag =
+			           ch_bucket[i].flag;
+			k++;
+		}
+	}
+
+	tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1;
+	DHD_PNO(("Total channel num %d total ch_buckets  %d ch_buckets_to_fw %d \n", tot_nchan,
+	      tot_num_buckets, num_buckets_to_fw));
+
+	if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+
+	if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if (gscan_params->nbssid_significant_change) {
+		dhd_pno_significant_bssid_t *iter, *next;
+
+		p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) *
+		                   gscan_params->nbssid_significant_change, GFP_KERNEL);
+		if (p_pfn_significant_bssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate memory %zd\n",
+				__FUNCTION__,
+				sizeof(wl_pfn_significant_bssid_t) *
+				gscan_params->nbssid_significant_change));
+			err = BCME_NOMEM;
+			goto exit;
+		}
+		i = 0;
+		/* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */
+		list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) {
+			p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold;
+			p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold;
+			memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN);
+			i++;
+		}
+
+		DHD_PNO(("nbssid_significant_change %d \n",
+		    gscan_params->nbssid_significant_change));
+		err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid,
+		 gscan_params->nbssid_significant_change);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+
+	if (gscan_params->nbssid_hotlist) {
+		struct dhd_pno_bssid *iter, *next;
+		wl_pfn_bssid_t *ptr;
+		p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) *
+		       gscan_params->nbssid_hotlist, GFP_KERNEL);
+		if (p_pfn_bssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+			" (count: %d)",
+				__FUNCTION__, _params->params_hotlist.nbssid));
+			err = BCME_ERROR;
+			_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+			goto exit;
+		}
+		ptr = p_pfn_bssid;
+		/* convert dhd_pno_bssid to wl_pfn_bssid */
+		DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
+		list_for_each_entry_safe(iter, next,
+		    &gscan_params->hotlist_bssid_list, list) {
+			memcpy(&ptr->macaddr,
+			&iter->macaddr, ETHER_ADDR_LEN);
+			ptr->flags = iter->flags;
+			ptr++;
+		}
+
+		err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+
+	if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+		DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err));
+
+exit:
+	/* clear mode in case of error */
+	if (err < 0) {
+		int ret = dhd_pno_clean(dhd);
+
+		if (ret < 0) {
+			 DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+			 	__FUNCTION__, ret));
+		} else {
+			_pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE;
+		}
+	}
+	kfree(pssid_list);
+	kfree(p_pfn_significant_bssid);
+	kfree(p_pfn_bssid);
+	if (pfn_gscan_cfg_t)
+		MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size);
+	if (ch_bucket)
+		MFREE(dhd->osh, ch_bucket,
+		(tot_num_buckets * sizeof(wl_pfn_gscan_channel_bucket_t)));
+	return err;
+
+}
+
+static wl_pfn_gscan_channel_bucket_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd,
+                                  dhd_pno_status_info_t *_pno_state,
+                                  uint16 *chan_list,
+                                  uint32 *num_buckets,
+                                  uint32 *num_buckets_to_fw)
+{
+	int i, num_channels, err, nchan = WL_NUMCHANNELS;
+	uint16 *ptr = chan_list, max;
+	wl_pfn_gscan_channel_bucket_t *ch_bucket;
+	dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	bool is_pno_legacy_running = _pno_state->pno_mode & DHD_PNO_LEGACY_MODE;
+	dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket;
+
+	if (is_pno_legacy_running)
+		*num_buckets = _params->params_gscan.nchannel_buckets + 1;
+	else
+		*num_buckets = _params->params_gscan.nchannel_buckets;
+
+	*num_buckets_to_fw = *num_buckets;
+
+	ch_bucket = (wl_pfn_gscan_channel_bucket_t *) MALLOC(dhd->osh,
+	   ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+
+	if (!ch_bucket) {
+		DHD_ERROR(("%s: failed to malloc memory of size %zd\n",
+			__FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+		*num_buckets_to_fw = *num_buckets = 0;
+		return NULL;
+	}
+
+	max = gscan_buckets[0].bucket_freq_multiple;
+	num_channels = 0;
+	for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+		if (!gscan_buckets[i].band) {
+			num_channels += gscan_buckets[i].num_channels;
+			memcpy(ptr, gscan_buckets[i].chan_list,
+			    gscan_buckets[i].num_channels * sizeof(uint16));
+			ptr = ptr + gscan_buckets[i].num_channels;
+		} else {
+			/* get a valid channel list based on band B or A */
+			err = _dhd_pno_get_channels(dhd, ptr,
+			        &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK),
+			        !(gscan_buckets[i].band & GSCAN_DFS_MASK));
+
+			if (err < 0) {
+				DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+					__FUNCTION__, gscan_buckets[i].band));
+				MFREE(dhd->osh, ch_bucket,
+				      ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+				*num_buckets_to_fw = *num_buckets = 0;
+				return NULL;
+			}
+
+			num_channels += nchan;
+			ptr = ptr + nchan;
+		}
+
+		ch_bucket[i].bucket_end_index = num_channels - 1;
+		ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple;
+		ch_bucket[i].flag = gscan_buckets[i].report_flag;
+		ch_bucket[i].flag |= CH_BUCKET_GSCAN;
+		if (max < gscan_buckets[i].bucket_freq_multiple)
+			max = gscan_buckets[i].bucket_freq_multiple;
+		nchan = WL_NUMCHANNELS - num_channels;
+		DHD_PNO(("end_idx  %d freq_mult - %d\n",
+		ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple));
+	}
+
+	_params->params_gscan.max_ch_bucket_freq = max;
+	/* Legacy PNO maybe running, which means we need to create a legacy PNO bucket
+	 * Get GCF of Legacy PNO and Gscan scanfreq
+	 */
+	if (is_pno_legacy_running) {
+		dhd_pno_params_t *_params1 = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+		uint16 *legacy_chan_list = _params1->params_legacy.chan_list;
+		uint16 common_freq;
+		uint32 legacy_bucket_idx = _params->params_gscan.nchannel_buckets;
+
+		common_freq = gcd(_params->params_gscan.scan_fr,
+		         _params1->params_legacy.scan_fr);
+		max = gscan_buckets[0].bucket_freq_multiple;
+		/* GSCAN buckets */
+		for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+			ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr;
+			ch_bucket[i].bucket_freq_multiple /= common_freq;
+			if (max < gscan_buckets[i].bucket_freq_multiple)
+				max = gscan_buckets[i].bucket_freq_multiple;
+		}
+		/* Legacy PNO bucket */
+		ch_bucket[legacy_bucket_idx].bucket_freq_multiple =
+		                _params1->params_legacy.scan_fr;
+		ch_bucket[legacy_bucket_idx].bucket_freq_multiple /=
+		                common_freq;
+		_params->params_gscan.max_ch_bucket_freq = MAX(max,
+		       ch_bucket[legacy_bucket_idx].bucket_freq_multiple);
+		ch_bucket[legacy_bucket_idx].flag = CH_BUCKET_REPORT_REGULAR;
+		/* Now add channels to the legacy scan bucket */
+		for (i = 0; i < _params1->params_legacy.nchan; i++) {
+			ptr[i] = legacy_chan_list[i];
+			num_channels++;
+		}
+		ch_bucket[legacy_bucket_idx].bucket_end_index = num_channels - 1;
+		DHD_PNO(("end_idx  %d freq_mult - %d\n",
+		                   ch_bucket[legacy_bucket_idx].bucket_end_index,
+		                   ch_bucket[legacy_bucket_idx].bucket_freq_multiple));
+	}
+
+	return ch_bucket;
+}
+
+static int  dhd_pno_stop_for_gscan(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	int mode;
+	dhd_pno_status_info_t *_pno_state;
+	wlc_ssid_ext_t *pssid_list = NULL;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+		DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	if (_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan.mscan) {
+		/* retrieve the batching data from firmware into host */
+		dhd_wait_batch_results_complete(dhd);
+	}
+	mutex_lock(&_pno_state->pno_mutex);
+	mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE;
+	err = dhd_pno_clean(dhd);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+			__FUNCTION__, err));
+		mutex_unlock(&_pno_state->pno_mutex);
+		return err;
+	}
+	_pno_state->pno_mode = mode;
+	mutex_unlock(&_pno_state->pno_mutex);
+
+	/* Reprogram Legacy PNO if it was running */
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		struct dhd_pno_legacy_params *params_legacy;
+		uint16 chan_list[WL_NUMCHANNELS];
+
+		params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+		_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+		pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+		if (!pssid_list) {
+			err = BCME_NOMEM;
+			DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+			goto exit;
+		}
+
+		DHD_PNO(("Restarting Legacy PNO SSID scan...\n"));
+		memcpy(chan_list, params_legacy->chan_list,
+		    (params_legacy->nchan * sizeof(uint16)));
+		err = dhd_pno_set_for_ssid(dhd, pssid_list, params_legacy->nssid,
+			params_legacy->scan_fr, params_legacy->pno_repeat,
+			params_legacy->pno_freq_expo_max, chan_list,
+			params_legacy->nchan);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+
+	}
+
+exit:
+	kfree(pssid_list);
+	return err;
+}
+
+int
+dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_gscan_params *gscan_params;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush));
+
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+
+	if (run) {
+		err = dhd_pno_set_for_gscan(dhd, gscan_params);
+	} else {
+		if (flush) {
+			mutex_lock(&_pno_state->pno_mutex);
+			dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+			mutex_unlock(&_pno_state->pno_mutex);
+		}
+		/* Need to stop all gscan */
+		err = dhd_pno_stop_for_gscan(dhd);
+	}
+
+	return err;
+}
+
+int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag)
+{
+	int err = BCME_OK;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_gscan_params *gscan_params;
+	uint8 old_flag;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+
+	mutex_lock(&_pno_state->pno_mutex);
+
+	old_flag = gscan_params->send_all_results_flag;
+	gscan_params->send_all_results_flag = (uint8) real_time_flag;
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+	    if (old_flag != gscan_params->send_all_results_flag) {
+			wl_pfn_gscan_cfg_t gscan_cfg;
+
+			gscan_cfg.version = WL_GSCAN_CFG_VERSION;
+			gscan_cfg.flags = (gscan_params->send_all_results_flag &
+			                           GSCAN_SEND_ALL_RESULTS_MASK);
+			gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK;
+
+			if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg,
+			            sizeof(wl_pfn_gscan_cfg_t))) < 0) {
+				DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n",
+					__FUNCTION__, err));
+				goto exit_mutex_unlock;
+			}
+		} else {
+			DHD_PNO(("No change in flag - %d\n", old_flag));
+		}
+	} else {
+		DHD_PNO(("Gscan not started\n"));
+	}
+exit_mutex_unlock:
+	mutex_unlock(&_pno_state->pno_mutex);
+exit:
+	return err;
+}
+
+int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	dhd_pno_params_t *params;
+	struct dhd_pno_gscan_params *gscan_params;
+	dhd_pno_status_info_t *_pno_state;
+	gscan_results_cache_t *iter, *tmp;
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	gscan_params = &params->params_gscan;
+	iter = gscan_params->gscan_batch_cache;
+
+	while (iter) {
+		if (iter->tot_consumed == iter->tot_count) {
+			tmp = iter->next;
+			kfree(iter);
+			iter = tmp;
+		} else
+			break;
+	}
+	gscan_params->gscan_batch_cache = iter;
+	ret = (iter == NULL);
+	return ret;
+}
+
+static int _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 timestamp = 0, ts = 0, i, j, timediff;
+	dhd_pno_params_t *params;
+	dhd_pno_status_info_t *_pno_state;
+	wl_pfn_lnet_info_t *plnetinfo;
+	struct dhd_pno_gscan_params *gscan_params;
+	wl_pfn_lscanresults_t *plbestnet = NULL;
+	gscan_results_cache_t *iter, *tail;
+	wifi_gscan_result_t *result;
+	uint8 *nAPs_per_scan = NULL;
+	uint8 num_scans_in_cur_iter;
+	uint16 count, scan_id = 0;
+
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	gscan_params = &params->params_gscan;
+	nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan);
+
+	if (!nAPs_per_scan) {
+		DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
+		gscan_params->mscan));
+		err = BCME_NOMEM;
+		goto exit;
+	}
+
+	plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+
+	mutex_lock(&_pno_state->pno_mutex);
+
+	iter = gscan_params->gscan_batch_cache;
+	/* If a cache has not been consumed , just delete it */
+	while (iter) {
+		iter->tot_consumed = iter->tot_count;
+		iter = iter->next;
+	}
+	dhd_gscan_batch_cache_cleanup(dhd);
+
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+		DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+		goto exit_mutex_unlock;
+	}
+
+	timediff = gscan_params->scan_fr * 1000;
+	timediff = timediff >> 1;
+
+	/* Ok, now lets start getting results from the FW */
+	plbestnet->status = PFN_INCOMPLETE;
+	tail = gscan_params->gscan_batch_cache;
+	while (plbestnet->status != PFN_COMPLETE) {
+		memset(plbestnet, 0, PNO_BESTNET_LEN);
+		err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0);
+		if (err < 0) {
+			DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n",
+				__FUNCTION__, err));
+			goto exit_mutex_unlock;
+		}
+		DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+			plbestnet->status, plbestnet->count));
+		if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+			err = BCME_VERSION;
+			DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+				plbestnet->version, PFN_SCANRESULT_VERSION));
+			goto exit_mutex_unlock;
+		}
+
+		num_scans_in_cur_iter = 0;
+		timestamp = plbestnet->netinfo[0].timestamp;
+		/* find out how many scans' results did we get in this batch of FW results */
+		for (i = 0, count = 0; i < plbestnet->count; i++, count++) {
+			plnetinfo = &plbestnet->netinfo[i];
+			/* Unlikely to happen, but just in case the results from
+			 * FW doesnt make sense..... Assume its part of one single scan
+			 */
+			if (num_scans_in_cur_iter > gscan_params->mscan) {
+				num_scans_in_cur_iter = 0;
+				count = plbestnet->count;
+				break;
+			}
+			if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) {
+				nAPs_per_scan[num_scans_in_cur_iter] = count;
+				count = 0;
+				num_scans_in_cur_iter++;
+			}
+			timestamp = plnetinfo->timestamp;
+		}
+		nAPs_per_scan[num_scans_in_cur_iter] = count;
+		num_scans_in_cur_iter++;
+
+		DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
+		plnetinfo = &plbestnet->netinfo[0];
+
+		for (i = 0; i < num_scans_in_cur_iter; i++) {
+			iter = (gscan_results_cache_t *)
+			kmalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) +
+			 sizeof(gscan_results_cache_t),
+			 GFP_KERNEL);
+			if (!iter) {
+				DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
+				 __FUNCTION__, gscan_params->mscan));
+				err = BCME_NOMEM;
+				goto exit_mutex_unlock;
+			}
+			/* Need this check because the new set of results from FW
+			 * maybe a continuation of previous sets' scan results
+			 */
+			if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff)
+				iter->scan_id = ++scan_id;
+			else
+				iter->scan_id = scan_id;
+
+			DHD_PNO(("scan_id %d tot_count %d\n", scan_id, nAPs_per_scan[i]));
+			iter->tot_count = nAPs_per_scan[i];
+			iter->tot_consumed = 0;
+			iter->flag = 0;
+			if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+				DHD_PNO(("This scan is aborted\n"));
+				iter->flag = (ENABLE << PNO_STATUS_ABORT);
+			} else if (gscan_params->reason) {
+				iter->flag = (ENABLE << gscan_params->reason);
+			}
+
+			if (!tail)
+				gscan_params->gscan_batch_cache = iter;
+			else
+				tail->next = iter;
+
+			tail = iter;
+			iter->next = NULL;
+			for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) {
+				result = &iter->results[j];
+
+				result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+					(plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+					WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+				result->rssi = (int32) plnetinfo->RSSI;
+				/* Info not available & not expected */
+				result->beacon_period = 0;
+				result->capability = 0;
+				result->ie_length = 0;
+				result->rtt = (uint64) plnetinfo->rtt0;
+				result->rtt_sd = (uint64) plnetinfo->rtt1;
+				result->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp);
+				ts = plnetinfo->timestamp;
+				if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+					DHD_ERROR(("%s: Invalid SSID length %d\n",
+					      __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+					plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+				}
+				memcpy(result->ssid, plnetinfo->pfnsubnet.SSID,
+					plnetinfo->pfnsubnet.SSID_len);
+				result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+				memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID,
+				 ETHER_ADDR_LEN);
+
+				DHD_PNO(("\tSSID : "));
+				DHD_PNO(("\n"));
+				DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+					result->macaddr.octet[0],
+					result->macaddr.octet[1],
+					result->macaddr.octet[2],
+					result->macaddr.octet[3],
+					result->macaddr.octet[4],
+					result->macaddr.octet[5]));
+				DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+					plnetinfo->pfnsubnet.channel,
+					plnetinfo->RSSI, plnetinfo->timestamp));
+				DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
+				plnetinfo->rtt0, plnetinfo->rtt1));
+
+			}
+		}
+	}
+exit_mutex_unlock:
+	mutex_unlock(&_pno_state->pno_mutex);
+exit:
+	params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE;
+	smp_wmb();
+	wake_up_interruptible(&_pno_state->batch_get_wait);
+	if (nAPs_per_scan)
+		MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8));
+	if (plbestnet)
+		MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+	DHD_PNO(("Batch retrieval done!\n"));
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 timestamp = 0;
+	dhd_pno_params_t *_params = NULL;
+	dhd_pno_status_info_t *_pno_state = NULL;
+	wl_pfn_lscanresults_t *plbestnet = NULL;
+	wl_pfn_lnet_info_t *plnetinfo;
+	dhd_pno_bestnet_entry_t *pbestnet_entry;
+	dhd_pno_best_header_t *pbestnetheader = NULL;
+	dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
+	bool allocate_header = FALSE;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit_no_unlock;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit_no_unlock;
+	}
+#ifdef GSCAN_SUPPORT
+	if (!(_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_GSCAN_MODE))) {
+#else
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+#endif /* GSCAN_SUPPORT */
+		DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+		goto exit_no_unlock;
+	}
+	mutex_lock(&_pno_state->pno_mutex);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (buf && bufsize) {
+		if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
+			/* need to check whether we have cashed data or not */
+			DHD_PNO(("%s: have cashed batching data in Driver\n",
+				__FUNCTION__));
+			/* convert to results format */
+			goto convert_format;
+		} else {
+			/* this is a first try to get batching results */
+			if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+				/* move the scan_results_list to expired_scan_results_lists */
+				list_for_each_entry_safe(siter, snext,
+					&_params->params_batch.get_batch.scan_results_list, list) {
+					list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+				}
+				_params->params_batch.get_batch.top_node_cnt = 0;
+				_params->params_batch.get_batch.expired_tot_scan_cnt =
+					_params->params_batch.get_batch.tot_scan_cnt;
+				_params->params_batch.get_batch.tot_scan_cnt = 0;
+				goto convert_format;
+			}
+		}
+	}
+	/* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */
+	pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE);
+	if (pscan_results == NULL) {
+		err = BCME_NOMEM;
+		DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n"));
+		goto exit;
+	}
+	pscan_results->bestnetheader = NULL;
+	pscan_results->cnt_header = 0;
+	/* add the element into list unless total node cnt is less than MAX_NODE_ CNT */
+	if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) {
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+		_params->params_batch.get_batch.top_node_cnt++;
+	} else {
+		int _removed_scan_cnt;
+		/* remove oldest one and add new one */
+		DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__));
+		_removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd,
+			&_params->params_batch.get_batch.scan_results_list, TRUE);
+		_params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt;
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+
+	}
+	plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+	NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	memset(plbestnet, 0, PNO_BESTNET_LEN);
+	while (plbestnet->status != PFN_COMPLETE) {
+		memset(plbestnet, 0, PNO_BESTNET_LEN);
+		err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0);
+		if (err < 0) {
+			if (err == BCME_EPERM) {
+				DHD_ERROR(("we cannot get the batching data "
+					"during scanning in firmware, try again\n,"));
+				msleep(500);
+				continue;
+			} else {
+				DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+		DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+			plbestnet->status, plbestnet->count));
+		if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+			err = BCME_VERSION;
+			DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+				plbestnet->version, PFN_SCANRESULT_VERSION));
+			goto exit;
+		}
+		plnetinfo = plbestnet->netinfo;
+		for (i = 0; i < plbestnet->count; i++) {
+			pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+			MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+			if (pbestnet_entry == NULL) {
+				err = BCME_NOMEM;
+				DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+				goto exit;
+			}
+			memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+			pbestnet_entry->recorded_time = jiffies; /* record the current time */
+			/* create header for the first entry */
+			allocate_header = (i == 0)? TRUE : FALSE;
+			/* check whether the new generation is started or not */
+			if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
+				> TIME_MIN_DIFF))
+				allocate_header = TRUE;
+			timestamp = plnetinfo->timestamp;
+			if (allocate_header) {
+				pbestnetheader = (dhd_pno_best_header_t *)
+				MALLOC(dhd->osh, BEST_HEADER_SIZE);
+				if (pbestnetheader == NULL) {
+					err = BCME_NOMEM;
+					if (pbestnet_entry)
+						MFREE(dhd->osh, pbestnet_entry,
+						BESTNET_ENTRY_SIZE);
+					DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+					goto exit;
+				}
+				/* increase total cnt of bestnet header */
+				pscan_results->cnt_header++;
+				/* need to record the reason to call dhd_pno_get_for_bach */
+				if (reason)
+					pbestnetheader->reason = (ENABLE << reason);
+				memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+				/* initialize the head of linked list */
+				INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+				/* link the pbestnet heaer into existed list */
+				if (pscan_results->bestnetheader == NULL)
+					/* In case of header */
+					pscan_results->bestnetheader = pbestnetheader;
+				else {
+					dhd_pno_best_header_t *head = pscan_results->bestnetheader;
+					pscan_results->bestnetheader = pbestnetheader;
+					pbestnetheader->next = head;
+				}
+			}
+			/* fills the best network info */
+			pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
+			pbestnet_entry->RSSI = plnetinfo->RSSI;
+			if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+				/* if RSSI is positive value, we assume that
+				 * this scan is aborted by other scan
+				 */
+				DHD_PNO(("This scan is aborted\n"));
+				pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+			}
+			pbestnet_entry->rtt0 = plnetinfo->rtt0;
+			pbestnet_entry->rtt1 = plnetinfo->rtt1;
+			pbestnet_entry->timestamp = plnetinfo->timestamp;
+			if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+				DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n",
+				      __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+				plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+			}
+			pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
+			memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID,
+				pbestnet_entry->SSID_len);
+			memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+			/* add the element into list */
+			list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+			/* increase best entry count */
+			pbestnetheader->tot_cnt++;
+			pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+			DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+			DHD_PNO(("\tSSID : "));
+			for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
+				DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j]));
+			DHD_PNO(("\n"));
+			DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+				plnetinfo->pfnsubnet.BSSID.octet[0],
+				plnetinfo->pfnsubnet.BSSID.octet[1],
+				plnetinfo->pfnsubnet.BSSID.octet[2],
+				plnetinfo->pfnsubnet.BSSID.octet[3],
+				plnetinfo->pfnsubnet.BSSID.octet[4],
+				plnetinfo->pfnsubnet.BSSID.octet[5]));
+			DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+				plnetinfo->pfnsubnet.channel,
+				plnetinfo->RSSI, plnetinfo->timestamp));
+			DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1));
+			plnetinfo++;
+		}
+	}
+	if (pscan_results->cnt_header == 0) {
+		/* In case that we didn't get any data from the firmware
+		 * Remove the current scan_result list from get_bach.scan_results_list.
+		 */
+		DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n"));
+		list_del(&pscan_results->list);
+		MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE);
+		_params->params_batch.get_batch.top_node_cnt--;
+	}
+	/* increase total scan count using current scan count */
+	_params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header;
+
+	if (buf && bufsize) {
+		/* This is a first try to get batching results */
+		if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+			/* move the scan_results_list to expired_scan_results_lists */
+			list_for_each_entry_safe(siter, snext,
+				&_params->params_batch.get_batch.scan_results_list, list) {
+				list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+			}
+			/* reset gloval values after  moving to expired list */
+			_params->params_batch.get_batch.top_node_cnt = 0;
+			_params->params_batch.get_batch.expired_tot_scan_cnt =
+				_params->params_batch.get_batch.tot_scan_cnt;
+			_params->params_batch.get_batch.tot_scan_cnt = 0;
+		}
+convert_format:
+		err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
+		if (err < 0) {
+			DHD_ERROR(("failed to convert the data into upper layer format\n"));
+			goto exit;
+		}
+	}
+exit:
+	if (plbestnet)
+		MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+	if (_params) {
+		_params->params_batch.get_batch.buf = NULL;
+		_params->params_batch.get_batch.bufsize = 0;
+		_params->params_batch.get_batch.bytes_written = err;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+exit_no_unlock:
+	if (waitqueue_active(&_pno_state->get_batch_done.wait))
+		complete(&_pno_state->get_batch_done);
+	return err;
+}
+static void
+_dhd_pno_get_batch_handler(struct work_struct *work)
+{
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pub_t *dhd;
+	struct dhd_pno_batch_params *params_batch;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = container_of(work, struct dhd_pno_status_info, work);
+	dhd = _pno_state->dhd;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		_dhd_pno_get_gscan_batch_from_fw(dhd);
+		return;
+	} else
+#endif /* GSCAN_SUPPORT */
+	{
+		params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+
+		_dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
+			params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+	}
+
+}
+
+int
+dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	char *pbuf = buf;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_batch_params *params_batch;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		struct dhd_pno_gscan_params *gscan_params;
+		gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+		gscan_params->reason = reason;
+		err = dhd_retreive_batch_scan_results(dhd);
+		if (err == BCME_OK) {
+			wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+			     is_batch_retrieval_complete(gscan_params),
+			     msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+		}
+	} else
+#endif
+	{
+		if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+			DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+			memset(pbuf, 0, bufsize);
+			pbuf += sprintf(pbuf, "scancount=%d\n", 0);
+			sprintf(pbuf, "%s", RESULTS_END_MARKER);
+			err = strlen(buf);
+			goto exit;
+		}
+		params_batch->get_batch.buf = buf;
+		params_batch->get_batch.bufsize = bufsize;
+		params_batch->get_batch.reason = reason;
+		params_batch->get_batch.bytes_written = 0;
+		schedule_work(&_pno_state->work);
+		wait_for_completion(&_pno_state->get_batch_done);
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
+#endif
+	err = params_batch->get_batch.bytes_written;
+exit:
+	return err;
+}
+
+int
+dhd_pno_stop_for_batch(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	int mode = 0;
+	int i = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	wlc_ssid_ext_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		DHD_PNO(("Gscan is ongoing, nothing to stop here\n"));
+		return err;
+	}
+#endif
+
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) {
+		mode = _pno_state->pno_mode;
+		dhd_pno_clean(dhd);
+		_pno_state->pno_mode = mode;
+		/* restart Legacy PNO if the Legacy PNO is on */
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			struct dhd_pno_legacy_params *_params_legacy;
+			_params_legacy =
+				&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+			if (!p_ssid_list) {
+				err = BCME_NOMEM;
+				DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+				goto exit;
+			}
+			err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
+				_params_legacy->scan_fr, _params_legacy->pno_repeat,
+				_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
+				_params_legacy->nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+				_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+					" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			i = 0;
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+			list_for_each_entry_safe(iter, next,
+				&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid[i].flags = iter->flags;
+				i++;
+			}
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	kfree(p_ssid_list);
+	kfree(p_pfn_bssid);
+	return err;
+}
+
+int
+dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	int err = BCME_OK;
+	int i;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0;
+	int tot_nchan = 0;
+	int mode = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	struct dhd_pno_bssid *_pno_bssid;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(hotlist_params, "hotlist_params is NULL", err);
+	NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	}
+	_params->params_batch.nchan = hotlist_params->nchan;
+	_params->params_batch.scan_fr = hotlist_params->scan_fr;
+	if (hotlist_params->nchan)
+		memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list,
+			sizeof(_params->params_hotlist.chan_list));
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan;
+	if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_hotlist.chan_list[hotlist_params->nchan],
+		&rem_nchan, hotlist_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, hotlist_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_hotlist.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan,
+			sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		int i;
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_hotlist.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_hotlist.chan_list,
+			sizeof(_chan_list));
+		tot_nchan = _params->params_hotlist.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			DHD_PNO(("PNO SSID is on progress in firmware\n"));
+			/* store current pno_mode before disabling pno */
+			mode = _pno_state->pno_mode;
+			err = _dhd_pno_enable(dhd, PNO_OFF);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+				goto exit;
+			}
+			/* restore the previous mode */
+			_pno_state->pno_mode = mode;
+			/* Use the superset for channelist between two mode */
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+			if (_params2->params_legacy.nchan > 0 &&
+				_params->params_hotlist.nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_legacy.chan_list[0],
+					_params2->params_legacy.nchan,
+					&_params->params_hotlist.chan_list[0],
+					_params->params_hotlist.nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+						"between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+
+	}
+
+	INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list));
+
+	err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	for (i = 0; i < hotlist_params->nbssid; i++) {
+		_pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+		NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err);
+		memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN);
+		_pno_bssid->flags = p_pfn_bssid[i].flags;
+		list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list);
+	}
+	_params->params_hotlist.nbssid = hotlist_params->nbssid;
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+	return err;
+}
+
+int
+dhd_pno_stop_for_hotlist(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wlc_ssid_ext_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		DHD_ERROR(("%s : Hotlist MODE is not enabled\n",
+			__FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+		/* restore previos pno mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			/* restart Legacy PNO Scan */
+			struct dhd_pno_legacy_params *_params_legacy;
+			_params_legacy =
+			&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+			if (!p_ssid_list) {
+				err = BCME_NOMEM;
+				DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+				goto exit;
+			}
+			err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
+				_params_legacy->scan_fr, _params_legacy->pno_repeat,
+				_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
+				_params_legacy->nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			/* restart Batching Scan */
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__,  err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	kfree(p_ssid_list);
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int dhd_retreive_batch_scan_results(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	struct dhd_pno_batch_params *params_batch;
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+	if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) {
+		DHD_PNO(("Retreive batch results\n"));
+		params_batch->get_batch.buf = NULL;
+		params_batch->get_batch.bufsize = 0;
+		params_batch->get_batch.reason = PNO_STATUS_EVENT;
+		_params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS;
+		schedule_work(&_pno_state->work);
+	} else {
+		DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval"
+			"already in progress, will skip\n", __FUNCTION__));
+		err = BCME_ERROR;
+	}
+
+	return err;
+}
+
+/* Handle Significant WiFi Change (SWC) event from FW
+ * Send event to HAL when all results arrive from FW
+ */
+void * dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes)
+{
+	void *ptr = NULL;
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	struct dhd_pno_gscan_params *gscan_params;
+	struct dhd_pno_swc_evt_param *params;
+	wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data;
+	wl_pfn_significant_net_t *change_array;
+	int i;
+
+	gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+	params = &(gscan_params->param_significant);
+
+	if (!results->total_count) {
+		*send_evt_bytes = 0;
+		return ptr;
+	}
+
+	if (!params->results_rxed_so_far) {
+		if (!params->change_array) {
+			params->change_array = (wl_pfn_significant_net_t *)
+			kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count,
+			GFP_KERNEL);
+
+			if (!params->change_array) {
+				DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__,
+				sizeof(wl_pfn_significant_net_t) * results->total_count));
+				*send_evt_bytes = 0;
+				return ptr;
+			}
+		} else {
+			DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!"));
+			*send_evt_bytes = 0;
+			return ptr;
+		}
+
+	}
+
+	DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__,
+	results->pkt_count, results->total_count));
+
+	for (i = 0; i < results->pkt_count; i++) {
+		DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n",
+		results->list[i].BSSID.octet[0],
+		results->list[i].BSSID.octet[1],
+		results->list[i].BSSID.octet[2],
+		results->list[i].BSSID.octet[3],
+		results->list[i].BSSID.octet[4],
+		results->list[i].BSSID.octet[5]));
+	}
+
+	change_array = &params->change_array[params->results_rxed_so_far];
+	memcpy(change_array, results->list, sizeof(wl_pfn_significant_net_t) * results->pkt_count);
+	params->results_rxed_so_far += results->pkt_count;
+
+	if (params->results_rxed_so_far == results->total_count) {
+		params->results_rxed_so_far = 0;
+		*send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count;
+		/* Pack up change buffer to send up and reset
+		 * results_rxed_so_far, after its done.
+		 */
+		ptr = (void *) params->change_array;
+		/* expecting the callee to free this mem chunk */
+		params->change_array = NULL;
+	}
+	else {
+		*send_evt_bytes = 0;
+	}
+
+	return ptr;
+}
+
+void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type)
+{
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	struct dhd_pno_gscan_params *gscan_params;
+	gscan_results_cache_t *iter, *tmp;
+
+	if (!_pno_state)
+		return;
+	gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+	if (type == HOTLIST_FOUND) {
+		iter = gscan_params->gscan_hotlist_found;
+		gscan_params->gscan_hotlist_found = NULL;
+	} else {
+		iter = gscan_params->gscan_hotlist_lost;
+		gscan_params->gscan_hotlist_lost = NULL;
+	}
+
+	while (iter) {
+		tmp = iter->next;
+		kfree(iter);
+		iter = tmp;
+	}
+
+	return;
+}
+
+void *
+dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size)
+{
+	wl_bss_info_t *bi = NULL;
+	wl_gscan_result_t *gscan_result;
+	wifi_gscan_result_t *result = NULL;
+	u32 bi_length = 0;
+	uint8 channel;
+	uint32 mem_needed;
+	struct timespec ts;
+
+	*size = 0;
+
+	gscan_result = (wl_gscan_result_t *)data;
+
+	if (!gscan_result) {
+		DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
+		goto exit;
+	}
+	if (!gscan_result->bss_info) {
+		DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n"));
+		goto exit;
+	}
+	bi = &gscan_result->bss_info[0].info;
+	bi_length = dtoh32(bi->length);
+	if (bi_length != (dtoh32(gscan_result->buflen) -
+	       WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) {
+		DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
+		goto exit;
+	}
+	if (bi->SSID_len > DOT11_MAX_SSID_LEN) {
+		DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len));
+		bi->SSID_len = DOT11_MAX_SSID_LEN;
+	}
+
+	mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length;
+	result = kmalloc(mem_needed, GFP_KERNEL);
+
+	if (!result) {
+		DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n",
+		  __FUNCTION__, mem_needed));
+		goto exit;
+	}
+
+	memcpy(result->ssid, bi->SSID, bi->SSID_len);
+	result->ssid[bi->SSID_len] = '\0';
+	channel = wf_chspec_ctlchan(bi->chanspec);
+	result->channel = wf_channel2mhz(channel,
+		(channel <= CH_MAX_2G_CHANNEL?
+		WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+	result->rssi = (int32) bi->RSSI;
+	result->rtt = 0;
+	result->rtt_sd = 0;
+	get_monotonic_boottime(&ts);
+	result->ts = (uint64) TIMESPEC_TO_US(ts);
+	result->beacon_period = dtoh16(bi->beacon_period);
+	result->capability = dtoh16(bi->capability);
+	result->ie_length = dtoh32(bi->ie_length);
+	memcpy(&result->macaddr, &bi->BSSID, ETHER_ADDR_LEN);
+	memcpy(result->ie_data, ((uint8 *)bi + bi->ie_offset), bi->ie_length);
+	*size = mem_needed;
+exit:
+	return result;
+}
+
+void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes,
+      hotlist_type_t type)
+{
+	void *ptr = NULL;
+	dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+	struct dhd_pno_gscan_params *gscan_params;
+	wl_pfn_scanresults_t *results = (wl_pfn_scanresults_t *)event_data;
+	wifi_gscan_result_t *hotlist_found_array;
+	wl_pfn_net_info_t *plnetinfo;
+	gscan_results_cache_t *gscan_hotlist_cache;
+	int malloc_size = 0, i, total = 0;
+
+	gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+	if (!results->count) {
+		*send_evt_bytes = 0;
+		return ptr;
+	}
+
+	malloc_size = sizeof(gscan_results_cache_t) +
+	((results->count - 1) * sizeof(wifi_gscan_result_t));
+	gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL);
+
+	if (!gscan_hotlist_cache) {
+		DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
+		*send_evt_bytes = 0;
+		return ptr;
+	}
+
+	if (type == HOTLIST_FOUND) {
+		gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+		gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+		DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count));
+	} else {
+		gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+		gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+		DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count));
+	}
+
+	gscan_hotlist_cache->tot_count = results->count;
+	gscan_hotlist_cache->tot_consumed = 0;
+	plnetinfo = results->netinfo;
+
+	for (i = 0; i < results->count; i++, plnetinfo++) {
+		hotlist_found_array = &gscan_hotlist_cache->results[i];
+		hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+			(plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+		hotlist_found_array->rssi = (int32) plnetinfo->RSSI;
+		/* Info not available & not expected */
+		hotlist_found_array->beacon_period = 0;
+		hotlist_found_array->capability = 0;
+		hotlist_found_array->ie_length = 0;
+
+		hotlist_found_array->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp);
+		if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+			          plnetinfo->pfnsubnet.SSID_len));
+			plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+		}
+		memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.SSID,
+			plnetinfo->pfnsubnet.SSID_len);
+		hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+
+		memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+		DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid,
+		hotlist_found_array->macaddr.octet[0],
+		hotlist_found_array->macaddr.octet[1],
+		hotlist_found_array->macaddr.octet[2],
+		hotlist_found_array->macaddr.octet[3],
+		hotlist_found_array->macaddr.octet[4],
+		hotlist_found_array->macaddr.octet[5],
+		hotlist_found_array->rssi));
+	}
+
+
+	if (results->status == PFN_COMPLETE) {
+		ptr = (void *) gscan_hotlist_cache;
+		while (gscan_hotlist_cache) {
+			total += gscan_hotlist_cache->tot_count;
+			gscan_hotlist_cache = gscan_hotlist_cache->next;
+		}
+		*send_evt_bytes =  total * sizeof(wifi_gscan_result_t);
+	}
+
+	return ptr;
+}
+#endif /* GSCAN_SUPPORT */
+int
+dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+	int err = BCME_OK;
+	uint status, event_type, flags, datalen;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	datalen = ntoh32(event->datalen);
+	DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type));
+	switch (event_type) {
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+		/* TODO : need to implement event logic using generic netlink */
+		break;
+	case WLC_E_PFN_BEST_BATCHING:
+#ifndef GSCAN_SUPPORT
+	{
+		struct dhd_pno_batch_params *params_batch;
+		params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+		if (!waitqueue_active(&_pno_state->get_batch_done.wait)) {
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
+			params_batch->get_batch.buf = NULL;
+			params_batch->get_batch.bufsize = 0;
+			params_batch->get_batch.reason = PNO_STATUS_EVENT;
+			schedule_work(&_pno_state->work);
+		} else
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING"
+				"will skip this event\n", __FUNCTION__));
+		break;
+	}
+#else
+		break;
+#endif /* !GSCAN_SUPPORT */
+	default:
+		DHD_ERROR(("unknown event : %d\n", event_type));
+	}
+exit:
+	return err;
+}
+
+int dhd_pno_init(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	UNUSED_PARAMETER(_dhd_pno_suspend);
+	if (dhd->pno_state)
+		goto exit;
+	dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t));
+	NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err);
+	memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t));
+	/* need to check whether current firmware support batching and hotlist scan */
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_pno_state->wls_supported = TRUE;
+	_pno_state->dhd = dhd;
+	mutex_init(&_pno_state->pno_mutex);
+	INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler);
+	init_completion(&_pno_state->get_batch_done);
+#ifdef GSCAN_SUPPORT
+	init_waitqueue_head(&_pno_state->batch_get_wait);
+#endif /* GSCAN_SUPPORT */
+	err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0);
+	if (err == BCME_UNSUPPORTED) {
+		_pno_state->wls_supported = FALSE;
+		DHD_INFO(("Current firmware doesn't support"
+			" Android Location Service\n"));
+	}
+exit:
+	return err;
+}
+int dhd_pno_deinit(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	NULL_CHECK(_pno_state, "pno_state is NULL", err);
+	/* may need to free legacy ssid_list */
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	}
+
+#ifdef GSCAN_SUPPORT
+	if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+		mutex_lock(&_pno_state->pno_mutex);
+		dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+		mutex_unlock(&_pno_state->pno_mutex);
+	}
+#endif /* GSCAN_SUPPORT */
+
+	if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+		/* clear resource if the BATCH MODE is on */
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	}
+	cancel_work_sync(&_pno_state->work);
+	MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t));
+	dhd->pno_state = NULL;
+	return err;
+}
+#endif /* PNO_SUPPORT */
diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h
new file mode 100644
index 0000000..b1357f9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_pno.h
@@ -0,0 +1,497 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pno.h 423669 2013-09-18 13:01:55Z $
+ */
+
+#ifndef __DHD_PNO_H__
+#define __DHD_PNO_H__
+
+#if defined(PNO_SUPPORT)
+#define PNO_TLV_PREFIX			'S'
+#define PNO_TLV_VERSION			'1'
+#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
+#define PNO_TLV_RESERVED		'0'
+
+#define PNO_BATCHING_SET "SET"
+#define PNO_BATCHING_GET "GET"
+#define PNO_BATCHING_STOP "STOP"
+
+#define PNO_PARAMS_DELIMETER " "
+#define PNO_PARAM_CHANNEL_DELIMETER ","
+#define PNO_PARAM_VALUE_DELLIMETER '='
+#define PNO_PARAM_SCANFREQ "SCANFREQ"
+#define PNO_PARAM_BESTN	"BESTN"
+#define PNO_PARAM_MSCAN "MSCAN"
+#define PNO_PARAM_CHANNEL "CHANNEL"
+#define PNO_PARAM_RTT "RTT"
+
+#define PNO_TLV_TYPE_SSID_IE		'S'
+#define PNO_TLV_TYPE_TIME		'T'
+#define PNO_TLV_FREQ_REPEAT		'R'
+#define PNO_TLV_FREQ_EXPO_MAX		'M'
+
+#define MAXNUM_SSID_PER_ADD	16
+#define MAXNUM_PNO_PARAMS 2
+#define PNO_TLV_COMMON_LENGTH	1
+#define DEFAULT_BATCH_MSCAN 16
+
+#define RESULTS_END_MARKER "----\n"
+#define SCAN_END_MARKER "####\n"
+#define AP_END_MARKER "====\n"
+#define PNO_RSSI_MARGIN_DBM          30
+
+#ifdef GSCAN_SUPPORT
+
+#define GSCAN_MAX_CH_BUCKETS         8
+#define GSCAN_BG_BAND_MASK             (1 << 0)
+#define GSCAN_A_BAND_MASK              (1 << 1)
+#define GSCAN_DFS_MASK                 (1 << 2)
+#define GSCAN_ABG_BAND_MASK            (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK)
+#define GSCAN_BAND_MASK                (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK)
+
+#define GSCAN_FLUSH_HOTLIST_CFG      (1 << 0)
+#define GSCAN_FLUSH_SIGNIFICANT_CFG  (1 << 1)
+#define GSCAN_FLUSH_SCAN_CFG         (1 << 2)
+#define GSCAN_FLUSH_ALL_CFG     (GSCAN_FLUSH_SCAN_CFG | \
+								GSCAN_FLUSH_SIGNIFICANT_CFG | \
+								GSCAN_FLUSH_HOTLIST_CFG)
+/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */
+#define GSCAN_BATCH_RETRIEVAL_COMPLETE      0
+#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS   1
+#define GSCAN_BATCH_NO_THR_SET              101
+#define GSCAN_LOST_AP_WINDOW_DEFAULT        4
+#define GSCAN_MIN_BSSID_TIMEOUT             90
+#define GSCAN_BATCH_GET_MAX_WAIT            500
+
+#define CHANNEL_BUCKET_EMPTY_INDEX                      0xFFFF
+#define GSCAN_RETRY_THRESHOLD              3
+#endif /* GSCAN_SUPPORT */
+
+enum scan_status {
+	/* SCAN ABORT by other scan */
+	PNO_STATUS_ABORT,
+	/* RTT is presence or not */
+	PNO_STATUS_RTT_PRESENCE,
+	/* Disable PNO by Driver */
+	PNO_STATUS_DISABLE,
+	/* NORMAL BATCHING GET */
+	PNO_STATUS_NORMAL,
+	/* WLC_E_PFN_BEST_BATCHING */
+	PNO_STATUS_EVENT,
+	PNO_STATUS_MAX
+};
+#define PNO_STATUS_ABORT_MASK 0x0001
+#define PNO_STATUS_RTT_MASK 0x0002
+#define PNO_STATUS_DISABLE_MASK 0x0004
+#define PNO_STATUS_OOM_MASK 0x0010
+
+enum index_mode {
+	INDEX_OF_LEGACY_PARAMS,
+	INDEX_OF_BATCH_PARAMS,
+	INDEX_OF_HOTLIST_PARAMS,
+	/* GSCAN includes hotlist scan and they do not run
+	 * independent of each other
+	 */
+#ifdef GSCAN_SUPPORT
+	INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS,
+#endif /* GSCAN_SUPPORT */
+	INDEX_MODE_MAX
+};
+enum dhd_pno_status {
+	DHD_PNO_DISABLED,
+	DHD_PNO_ENABLED,
+	DHD_PNO_SUSPEND
+};
+typedef struct cmd_tlv {
+	char prefix;
+	char version;
+	char subtype;
+	char reserved;
+} cmd_tlv_t;
+#ifdef GSCAN_SUPPORT
+typedef enum {
+    WIFI_BAND_UNSPECIFIED,
+    WIFI_BAND_BG = 1,                       /* 2.4 GHz                   */
+    WIFI_BAND_A = 2,                        /* 5 GHz without DFS         */
+    WIFI_BAND_A_DFS = 4,                    /* 5 GHz DFS only            */
+    WIFI_BAND_A_WITH_DFS = 6,               /* 5 GHz with DFS            */
+    WIFI_BAND_ABG = 3,                      /* 2.4 GHz + 5 GHz; no DFS   */
+    WIFI_BAND_ABG_WITH_DFS = 7,             /* 2.4 GHz + 5 GHz with DFS  */
+} gscan_wifi_band_t;
+
+typedef enum {
+	HOTLIST_LOST,
+	HOTLIST_FOUND
+} hotlist_type_t;
+
+typedef enum dhd_pno_gscan_cmd_cfg {
+	DHD_PNO_BATCH_SCAN_CFG_ID,
+	DHD_PNO_GEOFENCE_SCAN_CFG_ID,
+	DHD_PNO_SIGNIFICANT_SCAN_CFG_ID,
+	DHD_PNO_SCAN_CFG_ID,
+	DHD_PNO_GET_CAPABILITIES,
+	DHD_PNO_GET_BATCH_RESULTS,
+	DHD_PNO_GET_CHANNEL_LIST
+} dhd_pno_gscan_cmd_cfg_t;
+
+typedef enum dhd_pno_mode {
+	/* Wi-Fi Legacy PNO Mode */
+	DHD_PNO_NONE_MODE   = 0,
+	DHD_PNO_LEGACY_MODE = (1 << (0)),
+	/* Wi-Fi Android BATCH SCAN Mode */
+	DHD_PNO_BATCH_MODE = (1 << (1)),
+	/* Wi-Fi Android Hotlist SCAN Mode */
+	DHD_PNO_HOTLIST_MODE = (1 << (2)),
+	/* Wi-Fi Google Android SCAN Mode */
+	DHD_PNO_GSCAN_MODE = (1 << (3))
+} dhd_pno_mode_t;
+#else
+typedef enum dhd_pno_mode {
+	/* Wi-Fi Legacy PNO Mode */
+	DHD_PNO_NONE_MODE   = 0,
+	DHD_PNO_LEGACY_MODE = (1 << (0)),
+	/* Wi-Fi Android BATCH SCAN Mode */
+	DHD_PNO_BATCH_MODE = (1 << (1)),
+	/* Wi-Fi Android Hotlist SCAN Mode */
+	DHD_PNO_HOTLIST_MODE = (1 << (2))
+} dhd_pno_mode_t;
+#endif /* GSCAN_SUPPORT */
+struct dhd_pno_ssid {
+	bool		hidden;
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+	struct list_head list;
+};
+struct dhd_pno_bssid {
+	struct ether_addr	macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16			flags;
+	struct list_head list;
+};
+typedef struct dhd_pno_bestnet_entry {
+	struct ether_addr BSSID;
+	uint8	SSID_len;
+	uint8	SSID[DOT11_MAX_SSID_LEN];
+	int8	RSSI;
+	uint8	channel;
+	uint32	timestamp;
+	uint16	rtt0; /* distance_cm based on RTT */
+	uint16	rtt1; /* distance_cm based on sample standard deviation */
+	unsigned long recorded_time;
+	struct list_head list;
+} dhd_pno_bestnet_entry_t;
+#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t))
+
+typedef struct dhd_pno_bestnet_header {
+	struct dhd_pno_bestnet_header *next;
+	uint8 reason;
+	uint32 tot_cnt;
+	uint32 tot_size;
+	struct list_head entry_list;
+} dhd_pno_best_header_t;
+#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t))
+
+typedef struct dhd_pno_scan_results {
+	dhd_pno_best_header_t *bestnetheader;
+	uint8 cnt_header;
+	struct list_head list;
+} dhd_pno_scan_results_t;
+#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t))
+
+struct dhd_pno_get_batch_info {
+	/* info related to get batch */
+	char *buf;
+	bool batch_started;
+	uint32 tot_scan_cnt;
+	uint32 expired_tot_scan_cnt;
+	uint32 top_node_cnt;
+	uint32 bufsize;
+	uint32 bytes_written;
+	int reason;
+	struct list_head scan_results_list;
+	struct list_head expired_scan_results_list;
+};
+struct dhd_pno_legacy_params {
+	uint16 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	int pno_repeat;
+	int pno_freq_expo_max;
+	int nssid;
+	struct list_head ssid_list;
+};
+struct dhd_pno_batch_params {
+	int32 scan_fr;
+	uint8 bestn;
+	uint8 mscan;
+	uint8 band;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 rtt;
+	struct dhd_pno_get_batch_info get_batch;
+};
+struct dhd_pno_hotlist_params {
+	uint8 band;
+	int32 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 nbssid;
+	struct list_head bssid_list;
+};
+#ifdef GSCAN_SUPPORT
+typedef struct dhd_pno_gscan_channel_bucket {
+	uint16 bucket_freq_multiple;
+	/* band = 1 All bg band channels,
+	 * band = 2 All a band channels,
+	 * band = 0 chan_list channels
+	 */
+	uint16 band;
+	uint8 report_flag;
+	uint8 num_channels;
+	uint16 chan_list[GSCAN_MAX_CH_BUCKETS];
+} dhd_pno_gscan_channel_bucket_t;
+
+struct dhd_pno_swc_evt_param {
+	uint16 results_rxed_so_far;
+	wl_pfn_significant_net_t *change_array;
+};
+
+typedef struct wifi_gscan_result {
+    uint64 ts;                           /* Time of discovery           */
+    char ssid[DOT11_MAX_SSID_LEN+1];     /* null terminated             */
+    struct ether_addr	macaddr;         /* BSSID                      */
+    uint32 channel;                      /* channel frequency in MHz    */
+    int32 rssi;                          /* in db                       */
+    uint64 rtt;                          /* in nanoseconds              */
+    uint64 rtt_sd;                       /* standard deviation in rtt   */
+    uint16 beacon_period;                /* units are Kusec             */
+    uint16 capability;		            /* Capability information       */
+    uint32 ie_length;		            /* byte length of Information Elements */
+    char  ie_data[1];					/* IE data to follow       */
+} wifi_gscan_result_t;
+
+typedef struct gscan_results_cache {
+	struct gscan_results_cache *next;
+	uint8  scan_id;
+	uint8  flag;
+	uint8  tot_count;
+	uint8  tot_consumed;
+	wifi_gscan_result_t results[1];
+} gscan_results_cache_t;
+
+typedef struct dhd_pno_gscan_capabilities {
+    int max_scan_cache_size;
+    int max_scan_buckets;
+    int max_ap_cache_per_scan;
+    int max_rssi_sample_size;
+    int max_scan_reporting_threshold;
+    int max_hotlist_aps;
+    int max_significant_wifi_change_aps;
+} dhd_pno_gscan_capabilities_t;
+
+struct dhd_pno_gscan_params {
+	int32 scan_fr;
+	uint8 bestn;
+	uint8 mscan;
+	uint8 buffer_threshold;
+	uint8 swc_nbssid_threshold;
+	uint8 swc_rssi_window_size;
+	uint8 lost_ap_window;
+	uint8 nchannel_buckets;
+	uint8 reason;
+	uint8 get_batch_flag;
+	uint8 send_all_results_flag;
+	uint16 max_ch_bucket_freq;
+	gscan_results_cache_t *gscan_batch_cache;
+	gscan_results_cache_t *gscan_hotlist_found;
+	gscan_results_cache_t *gscan_hotlist_lost;
+	uint16 nbssid_significant_change;
+	uint16 nbssid_hotlist;
+	struct dhd_pno_swc_evt_param param_significant;
+	struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+	struct list_head hotlist_bssid_list;
+	struct list_head significant_bssid_list;
+};
+
+typedef struct gscan_scan_params {
+	int32 scan_fr;
+	uint16 nchannel_buckets;
+	struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+} gscan_scan_params_t;
+
+typedef struct gscan_batch_params {
+	uint8 bestn;
+	uint8 mscan;
+	uint8 buffer_threshold;
+} gscan_batch_params_t;
+
+struct bssid_t {
+	struct ether_addr	macaddr;
+	int16 rssi_reporting_threshold;  /* 0 -> no reporting threshold */
+};
+
+typedef struct gscan_hotlist_scan_params {
+	uint16 lost_ap_window; /* number of scans to declare LOST */
+	uint16 nbssid;   /* number of bssids  */
+	struct bssid_t bssid[1];  /* n bssids to follow */
+} gscan_hotlist_scan_params_t;
+
+/* SWC (Significant WiFi Change) params */
+typedef struct gscan_swc_params {
+	/* Rssi averaging window size */
+	uint8 rssi_window;
+	/* Number of scans that the AP has to be absent before
+	 * being declared LOST
+	 */
+	uint8 lost_ap_window;
+	/* if x  Aps have a significant change generate an event. */
+	uint8 swc_threshold;
+	uint8 nbssid;
+	wl_pfn_significant_bssid_t bssid_elem_list[1];
+} gscan_swc_params_t;
+
+typedef struct dhd_pno_significant_bssid {
+	struct ether_addr BSSID;
+	int8 rssi_low_threshold;
+	int8 rssi_high_threshold;
+	struct list_head list;
+} dhd_pno_significant_bssid_t;
+#endif /* GSCAN_SUPPORT */
+typedef union dhd_pno_params {
+	struct dhd_pno_legacy_params params_legacy;
+	struct dhd_pno_batch_params params_batch;
+	struct dhd_pno_hotlist_params params_hotlist;
+#ifdef GSCAN_SUPPORT
+	struct dhd_pno_gscan_params params_gscan;
+#endif /* GSCAN_SUPPORT */
+} dhd_pno_params_t;
+typedef struct dhd_pno_status_info {
+	uint8 pno_oui[DOT11_OUI_LEN];
+	dhd_pub_t *dhd;
+	struct work_struct work;
+	struct mutex pno_mutex;
+#ifdef GSCAN_SUPPORT
+	wait_queue_head_t batch_get_wait;
+#endif /* GSCAN_SUPPORT */
+	struct completion get_batch_done;
+	bool wls_supported; /* wifi location service supported or not */
+	enum dhd_pno_status pno_status;
+	enum dhd_pno_mode pno_mode;
+	dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX];
+	struct list_head head_list;
+} dhd_pno_status_info_t;
+
+/* wrapper functions */
+extern int
+dhd_dev_pno_enable(struct net_device *dev, int enable);
+
+extern int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+	uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int
+dhd_dev_pno_set_for_batch(struct net_device *dev,
+	struct dhd_pno_batch_params *batch_params);
+
+extern int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize);
+
+extern int
+dhd_dev_pno_stop_for_batch(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+extern int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui);
+#ifdef GSCAN_SUPPORT
+extern int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+              void *buf, uint8 flush);
+extern void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info,
+        uint32 *len);
+void dhd_dev_pno_lock_access_batch_results(struct net_device *dev);
+void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev);
+extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush);
+extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time);
+extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void  *data,
+              int *send_evt_bytes);
+int dhd_retreive_batch_scan_results(dhd_pub_t *dhd);
+extern void * dhd_dev_hotlist_scan_event(struct net_device *dev,
+                         const void  *data, int *send_evt_bytes, hotlist_type_t type);
+void * dhd_dev_process_full_gscan_result(struct net_device *dev,
+                                        const void  *data, int *send_evt_bytes);
+extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev);
+extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type);
+extern void dhd_dev_wait_batch_results_complete(struct net_device *dev);
+#endif /* GSCAN_SUPPORT */
+/* dhd pno fuctions */
+extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int enable);
+extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params);
+
+extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason);
+
+
+extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd);
+
+extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+
+extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd);
+
+extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+extern int dhd_pno_init(dhd_pub_t *dhd);
+extern int dhd_pno_deinit(dhd_pub_t *dhd);
+extern bool dhd_is_pno_supported(dhd_pub_t *dhd);
+extern int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui);
+#ifdef GSCAN_SUPPORT
+extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+                       void *buf, uint8 flush);
+extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info,
+                       uint32 *len);
+extern void dhd_pno_lock_batch_results(dhd_pub_t *dhd);
+extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd);
+extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush);
+extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag);
+extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf);
+extern int dhd_dev_retrieve_batch_scan(struct net_device *dev);
+extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes);
+extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+                       int *send_evt_bytes, hotlist_type_t type);
+extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data,
+                       int *send_evt_bytes);
+extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd);
+extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type);
+extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd);
+#endif /* GSCAN_SUPPORT */
+#endif
+
+#endif /* __DHD_PNO_H__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 0000000..87e0c83
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,150 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h 472193 2014-04-23 06:27:38Z $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
+#define DEFAULT_IOCTL_RESP_TIMEOUT	2000
+#ifndef IOCTL_RESP_TIMEOUT
+#ifdef BCMQT
+#define IOCTL_RESP_TIMEOUT  30000 /* In milli second */
+#else
+/* In milli second default value for Production FW */
+#define IOCTL_RESP_TIMEOUT  DEFAULT_IOCTL_RESP_TIMEOUT
+#endif /* BCMQT */
+#endif /* IOCTL_RESP_TIMEOUT */
+
+#ifndef MFG_IOCTL_RESP_TIMEOUT
+#define MFG_IOCTL_RESP_TIMEOUT  20000  /* In milli second default value for MFG FW */
+#endif /* MFG_IOCTL_RESP_TIMEOUT */
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Initilizes the index block for dma'ing indices */
+extern int dhd_prot_init_index_dma_block(dhd_pub_t *dhdp, uint8 type, uint32 length);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_sync_with_dongle(dhd_pub_t *dhdp);
+
+/* Protocol initialization needed for IOCTL/IOVAR path */
+extern int dhd_prot_init(dhd_pub_t *dhd);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                             void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+#ifdef BCMPCIE
+extern int dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd);
+extern int dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd);
+extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
+extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
+extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
+extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len);
+extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
+extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
+extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay);
+
+extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info);
+extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex);
+extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b);
+extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx);
+extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx);
+extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+	struct bcmstrbuf *strbuf);
+extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
+extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
+extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock);
+extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern void dhd_prot_clear(dhd_pub_t *dhd);
+
+
+#endif /* BCMPCIE */
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.c b/drivers/net/wireless/bcmdhd/dhd_rtt.c
new file mode 100644
index 0000000..b96bf1c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_rtt.c
@@ -0,0 +1,693 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_rtt.c 423669 2014-07-01 13:01:55Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <proto/bcmevent.h>
+#include <dhd.h>
+#include <dhd_rtt.h>
+#include <dhd_dbg.h>
+#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
+static DEFINE_SPINLOCK(noti_list_lock);
+#define NULL_CHECK(p, s, err)  \
+			do { \
+				if (!(p)) { \
+					printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+					err = BCME_ERROR; \
+					return err; \
+				} \
+			} while (0)
+
+#define RTT_TWO_SIDED(capability) \
+			do { \
+				if((capability & RTT_CAP_ONE_WAY) == (uint8) (RTT_CAP_ONE_WAY)) \
+					return FALSE; \
+				else \
+					return TRUE; \
+			} while (0)
+#define TIMESPEC_TO_US(ts)  (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+							(ts).tv_nsec / NSEC_PER_USEC)
+struct rtt_noti_callback {
+	struct list_head list;
+	void *ctx;
+	dhd_rtt_compl_noti_fn noti_fn;
+};
+
+typedef struct rtt_status_info {
+	dhd_pub_t *dhd;
+	int8 status;   /* current status for the current entry */
+	int8 cur_idx; /* current entry to do RTT */
+	int32 capability; /* rtt capability */
+	struct mutex rtt_mutex;
+	rtt_config_params_t rtt_config;
+	struct work_struct work;
+	struct list_head noti_fn_list;
+	struct list_head rtt_results_cache; /* store results for RTT */
+} rtt_status_info_t;
+static int dhd_rtt_start(dhd_pub_t *dhd);
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info_t channel)
+{
+	int bw;
+	/* set witdh to 20MHZ for 2.4G HZ */
+	if (channel.center_freq >= 2400 && channel.center_freq <= 2500) {
+		channel.width = WIFI_CHAN_WIDTH_20;
+	}
+	switch (channel.width) {
+	case WIFI_CHAN_WIDTH_20:
+		bw = WL_CHANSPEC_BW_20;
+		break;
+	case WIFI_CHAN_WIDTH_40:
+		bw = WL_CHANSPEC_BW_40;
+		break;
+	case WIFI_CHAN_WIDTH_80:
+		bw = WL_CHANSPEC_BW_80;
+		break;
+	case WIFI_CHAN_WIDTH_160:
+		bw = WL_CHANSPEC_BW_160;
+		break;
+	default:
+		DHD_ERROR(("doesn't support this bandwith : %d", channel.width));
+		bw = -1;
+		break;
+	}
+	return wf_channel2chspec(wf_mhz2channel(channel.center_freq, 0), bw);
+}
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params)
+{
+	int err = BCME_OK;
+	int idx;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(params, "params is NULL", err);
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	if (rtt_status->capability == RTT_CAP_NONE) {
+		DHD_ERROR(("doesn't support RTT \n"));
+		return BCME_ERROR;
+	}
+	if (rtt_status->status == RTT_STARTED) {
+		DHD_ERROR(("rtt is already started\n"));
+		return BCME_BUSY;
+	}
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	bcopy(params, &rtt_status->rtt_config, sizeof(rtt_config_params_t));
+	rtt_status->status = RTT_STARTED;
+	/* start to measure RTT from 1th device */
+	/* find next target to trigger RTT */
+	for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+		/* skip the disabled device */
+		if (rtt_status->rtt_config.target_info[idx].disable)
+			continue;
+		else {
+			/*set the idx to cur_idx */
+			rtt_status->cur_idx = idx;
+			break;
+		}
+	}
+	if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+		DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx));
+		schedule_work(&rtt_status->work);
+	}
+	return err;
+}
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
+{
+	int err = BCME_OK;
+	int i = 0, j = 0;
+	rtt_status_info_t *rtt_status;
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	if (rtt_status->status == RTT_STOPPED) {
+		DHD_ERROR(("rtt is not started\n"));
+		return BCME_OK;
+	}
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	mutex_lock(&rtt_status->rtt_mutex);
+	for (i = 0; i < mac_cnt; i++) {
+		for ( j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
+				if (!bcmp(&mac_list[i],&rtt_status->rtt_config.target_info[j].addr,
+					ETHER_ADDR_LEN)) {
+					rtt_status->rtt_config.target_info[j].disable = TRUE;
+				}
+		}
+	}
+	mutex_unlock(&rtt_status->rtt_mutex);
+	return err;
+}
+
+static int
+dhd_rtt_start(dhd_pub_t *dhd) {
+	int err = BCME_OK;
+	int mpc = 0;
+	int nss, mcs, bw;
+	uint32 rspec = 0;
+	int8 eabuf[ETHER_ADDR_STR_LEN];
+	int8 chanbuf[CHANSPEC_STR_LEN];
+	bool set_mpc = FALSE;
+	wl_proxd_iovar_t proxd_iovar;
+	wl_proxd_params_iovar_t proxd_params;
+	wl_proxd_params_iovar_t proxd_tune;
+	wl_proxd_params_tof_method_t *tof_params = &proxd_params.u.tof_params;
+	rtt_status_info_t *rtt_status;
+	rtt_target_info_t *rtt_target;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	/* turn off mpc in case of non-associted */
+	if (!dhd_is_associated(dhd, NULL, NULL)) {
+		err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1);
+		if (err < 0) {
+				DHD_ERROR(("%s : failed to set proxd_tune\n", __FUNCTION__));
+				goto exit;
+		}
+		set_mpc = TRUE;
+	}
+
+	if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
+		err = BCME_RANGE;
+		goto exit;
+	}
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	bzero(&proxd_tune, sizeof(proxd_tune));
+	bzero(&proxd_params, sizeof(proxd_params));
+	mutex_lock(&rtt_status->rtt_mutex);
+	/* Get a target information */
+	rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+	mutex_unlock(&rtt_status->rtt_mutex);
+	/* set role */
+	proxd_iovar.method = PROXD_TOF_METHOD;
+	proxd_iovar.mode = WL_PROXD_MODE_INITIATOR;
+
+	/* make sure that proxd is stop */
+	//dhd_iovar(dhd, 0, "proxd_stop", (char *)NULL, 0, 1);
+
+	err = dhd_iovar(dhd, 0, "proxd", (char *)&proxd_iovar, sizeof(proxd_iovar), 1);
+	if (err < 0 && err != BCME_BUSY) {
+		DHD_ERROR(("%s : failed to set proxd %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	/* mac address */
+	bcopy(&rtt_target->addr, &tof_params->tgt_mac, ETHER_ADDR_LEN);
+	/* frame count */
+	if (rtt_target->ftm_cnt > RTT_MAX_FRAME_CNT)
+		rtt_target->ftm_cnt = RTT_MAX_FRAME_CNT;
+
+	if (rtt_target->ftm_cnt)
+		tof_params->ftm_cnt = htol16(rtt_target->ftm_cnt);
+	else
+		tof_params->ftm_cnt = htol16(DEFAULT_FTM_CNT);
+
+	if (rtt_target->retry_cnt > RTT_MAX_RETRY_CNT)
+		rtt_target->retry_cnt = RTT_MAX_RETRY_CNT;
+
+	/* retry count */
+	if (rtt_target->retry_cnt)
+		tof_params->retry_cnt = htol16(rtt_target->retry_cnt);
+	else
+		tof_params->retry_cnt = htol16(DEFAULT_RETRY_CNT);
+
+	/* chanspec */
+	tof_params->chanspec = htol16(rtt_target->chanspec);
+	/* set parameter */
+	DHD_RTT(("Target addr(Idx %d) %s, Channel : %s for RTT (ftm_cnt %d, rety_cnt : %d)\n",
+			rtt_status->cur_idx,
+			bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf),
+			wf_chspec_ntoa(rtt_target->chanspec, chanbuf), rtt_target->ftm_cnt,
+			rtt_target->retry_cnt));
+
+	if (rtt_target->type == RTT_ONE_WAY) {
+		proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_ONEWAY);
+		/* report RTT results for initiator */
+		proxd_tune.u.tof_tune.flags |= htol32(WL_PROXD_FLAG_INITIATOR_RPTRTT);
+		proxd_tune.u.tof_tune.vhtack = 0;
+		tof_params->tx_rate = htol16(WL_RATE_6M);
+		tof_params->vht_rate = htol16((WL_RATE_6M >> 16));
+	} else { /* RTT TWO WAY */
+		/* initiator will send the rtt result to the target  */
+		proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_INITIATOR_REPORT);
+		tof_params->timeout = 10; /* 10ms for timeout */
+		rspec = WL_RSPEC_ENCODE_VHT;	/* 11ac VHT */
+		/* TODO : need to find a way to set nss and mcs */
+		nss = 1; /* default Nss = 1 */
+		mcs = 0; /* default MCS 0 */
+		rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+		bw = 0;
+		switch (CHSPEC_BW(rtt_target->chanspec)) {
+		case WL_CHANSPEC_BW_20:
+			bw = WL_RSPEC_BW_20MHZ;
+			break;
+		case WL_CHANSPEC_BW_40:
+			bw = WL_RSPEC_BW_40MHZ;
+			break;
+		case WL_CHANSPEC_BW_80:
+			bw = WL_RSPEC_BW_80MHZ;
+			break;
+		case WL_CHANSPEC_BW_160:
+			bw = WL_RSPEC_BW_160MHZ;
+			break;
+		}
+		rspec |= bw;
+		tof_params->tx_rate = htol16(rspec & 0xffff);
+		tof_params->vht_rate = htol16(rspec >> 16);
+	}
+
+	/* Set Method to TOF */
+	proxd_tune.method = PROXD_TOF_METHOD;
+	err = dhd_iovar(dhd, 0, "proxd_tune", (char *)&proxd_tune, sizeof(proxd_tune), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to set proxd_tune %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+
+	/* Set Method to TOF */
+	proxd_params.method = PROXD_TOF_METHOD;
+	err = dhd_iovar(dhd, 0, "proxd_params", (char *)&proxd_params, sizeof(proxd_params), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to set proxd_params %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+	err = dhd_iovar(dhd, 0, "proxd_find", (char *)NULL, 0, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to set proxd_find %d\n", __FUNCTION__, err));
+		goto exit;
+	}
+exit:
+	if (err < 0) {
+		rtt_status->status = RTT_STOPPED;
+		if (set_mpc) {
+			/* enable mpc again in case of error */
+			  mpc = 1;
+			  err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1);
+		}
+	}
+	return err;
+}
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+	int err = BCME_OK;
+	struct rtt_noti_callback *cb = NULL, *iter;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	spin_lock_bh(&noti_list_lock);
+	list_for_each_entry(iter, &rtt_status->noti_fn_list, list)
+		if (iter->noti_fn == noti_fn) {
+			goto exit;
+		}
+	cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC);
+	if (!cb) {
+		err = -ENOMEM;
+		goto exit;
+	}
+	cb->noti_fn = noti_fn;
+	cb->ctx = ctx;
+	list_add(&cb->list, &rtt_status->noti_fn_list);
+exit:
+	spin_unlock_bh(&noti_list_lock);
+	return err;
+}
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn)
+{
+	int err = BCME_OK;
+	struct rtt_noti_callback *cb = NULL, *iter;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	spin_lock_bh(&noti_list_lock);
+	list_for_each_entry(iter, &rtt_status->noti_fn_list, list)
+		if (iter->noti_fn == noti_fn) {
+			cb = iter;
+			list_del(&cb->list);
+			break;
+		}
+	spin_unlock_bh(&noti_list_lock);
+	if (cb) {
+		kfree(cb);
+	}
+	return err;
+}
+static int
+dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* evp)
+{
+	int err = BCME_OK;
+	int i;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	char diststr[40];
+	struct timespec ts;
+	NULL_CHECK(rtt_results, "rtt_results is NULL", err);
+	NULL_CHECK(evp, "evp is NULL", err);
+	DHD_RTT(("%s enter\n", __FUNCTION__));
+	rtt_results->distance = ntoh32(evp->distance);
+	rtt_results->sdrtt = ntoh32(evp->sdrtt);
+	rtt_results->ftm_cnt = ntoh16(evp->ftm_cnt);
+	rtt_results->avg_rssi = ntoh16(evp->avg_rssi);
+	rtt_results->validfrmcnt = ntoh16(evp->validfrmcnt);
+	rtt_results->meanrtt = ntoh32(evp->meanrtt);
+	rtt_results->modertt = ntoh32(evp->modertt);
+	rtt_results->medianrtt = ntoh32(evp->medianrtt);
+	rtt_results->err_code = evp->err_code;
+	rtt_results->tx_rate.preamble = (evp->OFDM_frame_type == TOF_FRAME_RATE_VHT)? 3 : 0;
+	rtt_results->tx_rate.nss = 0; /* 1 x 1 */
+	rtt_results->tx_rate.bw = (evp->bandwidth == TOF_BW_80MHZ)? 2 : (evp->bandwidth == TOF_BW_40MHZ)? 1 : 0;
+	rtt_results->TOF_type = evp->TOF_type;
+	if (evp->TOF_type == TOF_TYPE_ONE_WAY) {
+		/* convert to 100kbps unit */
+		rtt_results->tx_rate.bitrate = WL_RATE_6M * 5;
+		rtt_results->tx_rate.rateMcsIdx = WL_RATE_6M;
+	} else {
+		/* TODO : check tx rate for two way */
+		rtt_results->tx_rate.bitrate = WL_RATE_6M * 5;
+		rtt_results->tx_rate.rateMcsIdx = 0; /* MCS 0 */
+	}
+	memset(diststr, 0, sizeof(diststr));
+	if (rtt_results->distance == 0xffffffff || rtt_results->distance == 0)
+		sprintf(diststr, "distance=-1m\n");
+	else
+		sprintf(diststr, "distance=%d.%d m\n", rtt_results->distance>>4, ((rtt_results->distance&0xf)*125)>>1);
+
+
+	if (ntoh32(evp->mode) == WL_PROXD_MODE_INITIATOR) {
+		DHD_RTT(("Target:(%s) %s;\n", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr));
+		DHD_RTT(("RTT : mean %d mode %d median %d\n", rtt_results->meanrtt,
+			rtt_results->modertt, rtt_results->medianrtt));
+	}
+	else {
+		DHD_RTT(("Initiator:(%s) %s; ", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr));
+	}
+	if (rtt_results->sdrtt > 0)
+		DHD_RTT(("sigma:%d.%d\n", rtt_results->sdrtt/10, rtt_results->sdrtt % 10));
+	else
+		DHD_RTT(("sigma:0\n"));
+
+	DHD_RTT(("rssi:%d validfrmcnt %d, err_code : %d\n", rtt_results->avg_rssi,
+						rtt_results->validfrmcnt, evp->err_code));
+
+	switch (evp->err_code) {
+	case TOF_REASON_OK:
+		rtt_results->err_code = RTT_REASON_SUCCESS;
+		break;
+	case TOF_REASON_TIMEOUT:
+		rtt_results->err_code = RTT_REASON_TIMEOUT;
+		break;
+	case TOF_REASON_NOACK:
+		rtt_results->err_code = RTT_REASON_NO_RSP;
+		break;
+	case TOF_REASON_ABORT:
+		rtt_results->err_code = RTT_REASON_ABORT;
+		break;
+	default:
+		rtt_results->err_code = RTT_REASON_FAILURE;
+		break;
+	}
+	rtt_results->peer_mac = evp->peer_mac;
+	/* get the time elapsed from boot time */
+	get_monotonic_boottime(&ts);
+	rtt_results->ts = (uint64) TIMESPEC_TO_US(ts);
+
+	for (i = 0; i < rtt_results->ftm_cnt; i++) {
+		rtt_results->ftm_buff[i].value = ltoh32(evp->ftm_buff[i].value);
+		rtt_results->ftm_buff[i].rssi = ltoh32(evp->ftm_buff[i].rssi);
+	}
+	return err;
+}
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+	int err = BCME_OK;
+	int len = 0;
+	int idx;
+	uint status, event_type, flags, reason, ftm_cnt;
+	rtt_status_info_t *rtt_status;
+	wl_proxd_event_data_t* evp;
+	struct rtt_noti_callback *iter;
+	rtt_result_t *rtt_result, *entry, *next;
+	gfp_t kflags;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	event_type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+	reason = ntoh32_ua((void *)&event->reason);
+
+	if (event_type != WLC_E_PROXD) {
+		goto exit;
+	}
+	kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL;
+	evp = (wl_proxd_event_data_t*)event_data;
+	DHD_RTT(("%s enter : mode: %s, reason :%d \n", __FUNCTION__,
+			(ntoh16(evp->mode) == WL_PROXD_MODE_INITIATOR)?
+			"initiator":"target", reason));
+	switch (reason) {
+	case WLC_E_PROXD_STOP:
+		DHD_RTT(("WLC_E_PROXD_STOP\n"));
+		break;
+	case WLC_E_PROXD_ERROR:
+	case WLC_E_PROXD_COMPLETED:
+		if (reason == WLC_E_PROXD_ERROR) {
+			DHD_RTT(("WLC_E_PROXD_ERROR\n"));
+		} else {
+			DHD_RTT(("WLC_E_PROXD_COMPLETED\n"));
+		}
+
+		if(!in_atomic())
+			mutex_lock(&rtt_status->rtt_mutex);
+		ftm_cnt = ltoh16(evp->ftm_cnt);
+
+		if (ftm_cnt > 0)
+			len = OFFSETOF(rtt_result_t, ftm_buff);
+		else
+			len = sizeof(rtt_result_t);
+		/* check whether the results is already reported or not*/
+		list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+			if (!memcmp(&entry->peer_mac, &evp->peer_mac, ETHER_ADDR_LEN))	{
+				if(!in_atomic())
+					mutex_unlock(&rtt_status->rtt_mutex);
+				goto exit;
+			}
+		}
+		rtt_result = kzalloc(len + sizeof(ftm_sample_t) * ftm_cnt, kflags);
+		if (!rtt_result) {
+			if(!in_atomic())
+				mutex_unlock(&rtt_status->rtt_mutex);
+			err = -ENOMEM;
+			goto exit;
+		}
+		/* point to target_info in status struct and increase pointer */
+		rtt_result->target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+		/* find next target to trigger RTT */
+		for (idx = (rtt_status->cur_idx + 1); idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+			/* skip the disabled device */
+			if (rtt_status->rtt_config.target_info[idx].disable)
+				continue;
+			else {
+				/*set the idx to cur_idx */
+				rtt_status->cur_idx = idx;
+				break;
+			}
+		}
+		/* convert the event results to host format */
+		dhd_rtt_convert_to_host(rtt_result, evp);
+		list_add_tail(&rtt_result->list, &rtt_status->rtt_results_cache);
+		if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+			/* restart to measure RTT from next device */
+			schedule_work(&rtt_status->work);
+		} else {
+			DHD_RTT(("RTT_STOPPED\n"));
+			rtt_status->status = RTT_STOPPED;
+			/* to turn on mpc mode */
+			schedule_work(&rtt_status->work);
+			/* notify the completed information to others */
+			list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+				iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+			}
+			/* remove the rtt results in cache */
+			list_for_each_entry_safe(rtt_result, next,
+				&rtt_status->rtt_results_cache, list) {
+				list_del(&rtt_result->list);
+				kfree(rtt_result);
+			}
+			/* reinit the HEAD */
+			INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+			/* clear information for rtt_config */
+			bzero(&rtt_status->rtt_config, sizeof(rtt_config_params_t));
+			rtt_status->cur_idx = 0;
+		}
+		if(!in_atomic())
+			mutex_unlock(&rtt_status->rtt_mutex);
+
+		break;
+	case WLC_E_PROXD_GONE:
+		DHD_RTT(("WLC_E_PROXD_GONE\n"));
+		break;
+	case WLC_E_PROXD_START:
+		/* event for targets / accesspoints  */
+		DHD_RTT(("WLC_E_PROXD_START\n"));
+		break;
+	case WLC_E_PROXD_COLLECT_START:
+		DHD_RTT(("WLC_E_PROXD_COLLECT_START\n"));
+		break;
+	case WLC_E_PROXD_COLLECT_STOP:
+		DHD_RTT(("WLC_E_PROXD_COLLECT_STOP\n"));
+		break;
+	case WLC_E_PROXD_COLLECT_COMPLETED:
+		DHD_RTT(("WLC_E_PROXD_COLLECT_COMPLETED\n"));
+		break;
+	case WLC_E_PROXD_COLLECT_ERROR:
+		DHD_RTT(("WLC_E_PROXD_COLLECT_ERROR; "));
+		break;
+	default:
+		DHD_ERROR(("WLC_E_PROXD: supported EVENT reason code:%d\n", reason));
+		break;
+	}
+
+exit:
+	return err;
+}
+static void
+dhd_rtt_work(struct work_struct *work)
+{
+	rtt_status_info_t *rtt_status;
+	dhd_pub_t *dhd;
+	rtt_status = container_of(work, rtt_status_info_t, work);
+	if (rtt_status == NULL) {
+		DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__));
+		return;
+	}
+	dhd = rtt_status->dhd;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+	dhd_rtt_start(dhd);
+}
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa)
+{
+	rtt_status_info_t *rtt_status;
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	NULL_CHECK(capa, "capa is NULL", err);
+	bzero(capa, sizeof(rtt_capabilities_t));
+
+	if (rtt_status->capability & RTT_CAP_ONE_WAY)
+		capa->rtt_one_sided_supported = 1;
+	if (rtt_status->capability & RTT_CAP_11V_WAY)
+		capa->rtt_11v_supported = 1;
+	if (rtt_status->capability & RTT_CAP_11MC_WAY)
+		capa->rtt_ftm_supported = 1;
+	if (rtt_status->capability & RTT_CAP_VS_WAY)
+		capa->rtt_vs_supported = 1;
+
+	return err;
+}
+int
+dhd_rtt_init(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	rtt_status_info_t *rtt_status;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (dhd->rtt_state)
+		goto exit;
+	dhd->rtt_state = MALLOC(dhd->osh, sizeof(rtt_status_info_t));
+	if (dhd->rtt_state == NULL) {
+		DHD_ERROR(("failed to create rtt_state\n"));
+		goto exit;
+	}
+	bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
+	rtt_status = GET_RTTSTATE(dhd);
+	rtt_status->dhd = dhd;
+	err = dhd_iovar(dhd, 0, "proxd_params", NULL, 0, 1);
+	if (err != BCME_UNSUPPORTED) {
+		/* TODO :  need to find a way to check rtt capability */
+		rtt_status->capability |= RTT_CAP_ONE_WAY;
+		rtt_status->capability |= RTT_CAP_VS_WAY;
+	}
+	mutex_init(&rtt_status->rtt_mutex);
+	INIT_LIST_HEAD(&rtt_status->noti_fn_list);
+	INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+	INIT_WORK(&rtt_status->work, dhd_rtt_work);
+exit:
+	return err;
+}
+
+int dhd_rtt_deinit(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	rtt_status_info_t *rtt_status;
+	rtt_result_t *rtt_result, *next;
+	struct rtt_noti_callback *iter, *iter2;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	rtt_status = GET_RTTSTATE(dhd);
+	NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+	rtt_status->status = RTT_STOPPED;
+	/* clear evt callback list */
+	if (!list_empty(&rtt_status->noti_fn_list)) {
+		list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
+			list_del(&iter->list);
+			kfree(iter);
+		}
+	}
+	/* remove the rtt results */
+	if (!list_empty(&rtt_status->rtt_results_cache)) {
+		list_for_each_entry_safe(rtt_result, next, &rtt_status->rtt_results_cache, list) {
+			list_del(&rtt_result->list);
+			kfree(rtt_result);
+		}
+	}
+	MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t));
+	dhd->rtt_state = NULL;
+	return err;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.h b/drivers/net/wireless/bcmdhd/dhd_rtt.h
new file mode 100644
index 0000000..7fb883c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_rtt.h
@@ -0,0 +1,232 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_rtt.h 423669 2014-07-01 13:01:56Z $
+ */
+#ifndef __DHD_RTT_H__
+#define __DHD_RTT_H__
+
+#include "dngl_stats.h"
+
+#define RTT_MAX_TARGET_CNT 10
+#define RTT_MAX_FRAME_CNT 25
+#define RTT_MAX_RETRY_CNT 10
+#define DEFAULT_FTM_CNT 6
+#define DEFAULT_RETRY_CNT 6
+
+
+/* DSSS, CCK and 802.11n rates in [500kbps] units */
+#define WL_MAXRATE	108	/* in 500kbps units */
+#define WL_RATE_1M	2	/* in 500kbps units */
+#define WL_RATE_2M	4	/* in 500kbps units */
+#define WL_RATE_5M5	11	/* in 500kbps units */
+#define WL_RATE_11M	22	/* in 500kbps units */
+#define WL_RATE_6M	12	/* in 500kbps units */
+#define WL_RATE_9M	18	/* in 500kbps units */
+#define WL_RATE_12M	24	/* in 500kbps units */
+#define WL_RATE_18M	36	/* in 500kbps units */
+#define WL_RATE_24M	48	/* in 500kbps units */
+#define WL_RATE_36M	72	/* in 500kbps units */
+#define WL_RATE_48M	96	/* in 500kbps units */
+#define WL_RATE_54M	108	/* in 500kbps units */
+
+
+enum rtt_role {
+	RTT_INITIATOR = 0,
+	RTT_TARGET = 1
+};
+enum rtt_status {
+	RTT_STOPPED = 0,
+	RTT_STARTED = 1
+};
+typedef int64_t wifi_timestamp; /* In microseconds (us) */
+typedef int64_t wifi_timespan;
+typedef int wifi_rssi;
+
+typedef enum {
+	RTT_INVALID,
+	RTT_ONE_WAY,
+	RTT_TWO_WAY,
+	RTT_AUTO
+} rtt_type_t;
+
+typedef enum {
+	RTT_PEER_STA,
+	RTT_PEER_AP,
+	RTT_PEER_P2P,
+	RTT_PEER_NAN,
+	RTT_PEER_INVALID
+} rtt_peer_type_t;
+
+typedef enum rtt_reason {
+	RTT_REASON_SUCCESS,
+	RTT_REASON_FAILURE,
+	RTT_REASON_NO_RSP,
+	RTT_REASON_REJECTED,
+	RTT_REASON_NOT_SCHEDULED_YET,
+	RTT_REASON_TIMEOUT,
+	RTT_REASON_AP_ON_DIFF_CH,
+	RTT_REASON_AP_NO_CAP,
+	RTT_REASON_ABORT
+} rtt_reason_t;
+
+typedef enum rtt_capability {
+	RTT_CAP_NONE = 0,
+	RTT_CAP_ONE_WAY	 = (1 << (0)),
+	RTT_CAP_11V_WAY  = (1 << (1)),  /* IEEE802.11v */
+	RTT_CAP_11MC_WAY  = (1 << (2)), /* IEEE802.11mc */
+	RTT_CAP_VS_WAY = (1 << (3)) /* BRCM vendor specific */
+} rtt_capability_t ;
+
+typedef struct wifi_channel_info {
+	wifi_channel_width_t width;
+	wifi_channel center_freq; /* primary 20 MHz channel */
+	wifi_channel center_freq0; /* center freq (MHz) first segment */
+	wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */
+} wifi_channel_info_t;
+
+typedef struct wifi_rate {
+	uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */
+	uint32 nss		:2; /* 0 : 1x1, 1: 2x2, 3: 3x3, 4: 4x4 */
+	uint32 bw		:3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */
+	/* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb
+	* HT/VHT it would be mcs index
+	*/
+	uint32 rateMcsIdx :8;
+	uint32 reserved :16; /* reserved */
+	uint32 bitrate; 	/* unit of 100 Kbps */
+} wifi_rate_t;
+
+typedef struct rtt_target_info {
+	struct ether_addr addr;
+	rtt_type_t type; /* rtt_type */
+	rtt_peer_type_t peer; /* peer type */
+	wifi_channel_info_t channel; /* channel information */
+	chanspec_t chanspec; /* chanspec for channel */
+	int8	continuous; /* 0 = single shot or 1 = continous raging */
+	bool	disable; /* disable for RTT measurement */
+	uint32	interval; /* interval of RTT measurement (unit ms) when continuous = true */
+	uint32	measure_cnt; /* total number of RTT measurement when continuous */
+	uint32	ftm_cnt; /* num of packets in each RTT measurement */
+	uint32	retry_cnt; /* num of retries if sampling fails */
+} rtt_target_info_t;
+
+typedef struct rtt_result {
+	struct list_head list;
+	uint16 ver;			/* version */
+	rtt_target_info_t *target_info; /* target info */
+	uint16 mode;			/* mode: target/initiator */
+	uint16 method;			/* method: rssi/TOF/AOA */
+	uint8  err_code;		/* error classification */
+	uint8  TOF_type;		/* one way or two way TOF */
+	wifi_rate_t tx_rate;           /* tx rate */
+	struct ether_addr peer_mac;	/* (e.g for tgt:initiator's */
+	int32 distance;		/* dst to tgt, units (meter * 16) */
+	uint32 meanrtt;			/* mean delta */
+	uint32 modertt;			/* Mode delta */
+	uint32 medianrtt;		/* median RTT */
+	uint32 sdrtt;			/* Standard deviation of RTT */
+	int16  avg_rssi;		/* avg rssi across the ftm frames */
+	int16  validfrmcnt;		/* Firmware's valid frame counts */
+	wifi_timestamp ts; /* the time elapsed from boot time when driver get this result */
+	uint16 ftm_cnt;			/*  num of rtd measurments/length in the ftm buffer  */
+	ftm_sample_t ftm_buff[1];	/* 1 ... ftm_cnt  */
+} rtt_result_t;
+
+typedef struct rtt_report {
+	struct ether_addr addr;
+	uint num_measurement; /* measurement number in case of continous raging */
+	rtt_reason_t status; /* raging status */
+	rtt_type_t type; /* rtt type */
+	rtt_peer_type_t peer; /* peer type */
+	wifi_channel_info_t channel; /* channel information */
+	wifi_rssi  rssi; /* avg rssi accroos the ftm frames */
+	wifi_rssi  rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */
+	wifi_rate_t tx_rate;           /* tx rate */
+	wifi_timespan rtt;	/*  round trip time in nanoseconds */
+	wifi_timespan rtt_sd;	/* rtt standard deviation in nanoseconds */
+	wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */
+	int32 distance; /* distance in cm (optional) */
+	int32 distance_sd; /* standard deviation in cm (optional) */
+	int32 distance_spread;/* difference between max and min distance recorded (optional) */
+	wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */
+} rtt_report_t;
+
+/* RTT Capabilities */
+typedef struct rtt_capabilities{
+	uint8 rtt_one_sided_supported;  /* if 1-sided rtt data collection is supported */
+	uint8 rtt_11v_supported;        /* if 11v rtt data collection is supported */
+	uint8 rtt_ftm_supported;        /* if ftm rtt data collection is supported */
+	uint8 rtt_vs_supported; 		 /* if vendor specific data collection is supported */
+} rtt_capabilities_t;
+
+typedef struct rtt_config_params {
+	int8 rtt_target_cnt;
+	rtt_target_info_t target_info[RTT_MAX_TARGET_CNT];
+} rtt_config_params_t;
+
+typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data);
+/* Linux wrapper to call common dhd_rtt_set_cfg */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf);
+
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt);
+
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa);
+
+/* export to upper layer */
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info_t channel);
+
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
+
+
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa);
+
+int
+dhd_rtt_init(dhd_pub_t *dhd);
+
+int
+dhd_rtt_deinit(dhd_pub_t *dhd);
+#endif
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 0000000..c8b970e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,8261 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c 476991 2014-05-12 06:21:02Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+bool dhd_mp_halting(dhd_pub_t *dhdp);
+extern void bcmsdh_waitfor_iodrain(void *sdh);
+extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
+extern bool  bcmsdh_fatal_error(void *sdh);
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME         "mem_dump"
+#endif
+
+#define QLEN		(1024) /* bulk rx and tx queue lengths */
+#define FCHI		(QLEN - 10)
+#define FCLOW		(FCHI / 2)
+#define PRIOMASK	7
+
+#define TXRETRIES	2	/* # of retries for tx frames */
+#define READ_FRM_CNT_RETRIES	3
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND	50	/* Default for max rx frames in one scheduling */
+#endif
+
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND	20	/* Default for max tx frames in one scheduling */
+#endif
+
+#define DHD_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE	4096	/* max nvram buf size */
+#define MAX_DATA_BUF	(64 * 1024)	/* Must be large enough to hold biggest possible glom */
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD   32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN	(SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_HDRLEN_TXGLOM	(SDPCM_HDRLEN + SDPCM_HWEXT_LEN)
+#define MAX_TX_PKTCHAIN_CNT	SDPCM_MAXGLOM_SIZE
+
+#ifdef SDTEST
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ	32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ	2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY	3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* hooks for limiting threshold custom tx num in rx processing */
+#define DEFAULT_TXINRX_THRES    0
+#ifndef CUSTOM_TXINRX_THRES
+#define CUSTOM_TXINRX_THRES     DEFAULT_TXINRX_THRES
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi.  Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	2024
+typedef struct dhd_console {
+	uint		count;			/* Poll interval msec counter */
+	uint		log_addr;		/* Log struct address (fixed) */
+	hnd_log_t	log;			/* Log struct (host copy) */
+	uint		bufsize;		/* Size of log buffer */
+	uint8		*buf;			/* Log buffer (host copy) */
+	uint		last;			/* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define	KSO_ENAB(bus)			((bus)->kso)
+#define	SR_ENAB(bus)			((bus)->_srenab)
+#define	SLPAUTO_ENAB(bus)		((SR_ENAB(bus)) && ((bus)->_slpauto))
+#define	MIN_RSRC_ADDR			(SI_ENUM_BASE + 0x618)
+#define	MIN_RSRC_SR			0x3
+#define	CORE_CAPEXT_ADDR		(SI_ENUM_BASE + 0x64c)
+#define	CORE_CAPEXT_SR_SUPPORTED_MASK	(1 << 1)
+#define RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define RCTL_LOGIC_DISABLE_MASK		(1 << 27)
+
+#define	OOB_WAKEUP_ENAB(bus)		((bus)->_oobwakeup)
+#define	GPIO_DEV_SRSTATE		16	/* Host gpio17 mapped to device gpio0 SR state */
+#define	GPIO_DEV_SRSTATE_TIMEOUT	320000	/* 320ms */
+#define	GPIO_DEV_WAKEUP			17	/* Host gpio17 mapped to device gpio1 wakeup */
+#define	CC_CHIPCTRL2_GPIO1_WAKEUP	(1  << 0)
+#define	CC_CHIPCTRL3_SR_ENG_ENABLE	(1  << 2)
+#define OVERFLOW_BLKSZ512_WM		96
+#define OVERFLOW_BLKSZ512_MES		80
+
+#define CC_PMUCC3	(0x3)
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+
+	bcmsdh_info_t	*sdh;			/* Handle for BCMSDH calls */
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+
+	sdpcmd_regs_t	*regs;			/* Registers for SDIO core */
+	uint		sdpcmrev;		/* SDIO core revision */
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		bus_num;		/* bus number */
+	uint32		slot_num;		/* slot ID */
+	uint32		hostintmask;	/* Copy of Host Interrupt Mask */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+	const char      *nvram_params;		/* user specified nvram params. */
+
+	uint		blocksize;		/* Block size of SDIO transfers */
+	uint		roundup;		/* Max roundup limit */
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+	uint8		flowcontrol;		/* per prio flow control bitmask */
+	uint8		tx_seq;			/* Transmit sequence number (next) */
+	uint8		tx_max;			/* Maximum transmit sequence allowed */
+
+	uint8		hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+	uint8		*rxhdr;			/* Header of current rx frame (in hdrbuf) */
+	uint16		nextlen;		/* Next Read Len from last header */
+	uint8		rx_seq;			/* Receive sequence number (expected) */
+	bool		rxskip;			/* Skip receive (awaiting NAK ACK) */
+
+	void		*glomd;			/* Packet containing glomming descriptor */
+	void		*glom;			/* Packet chain for glommed superframe */
+	uint		glomerr;		/* Glom packet read errors */
+
+	uint8		*rxbuf;			/* Buffer for receiving control packets */
+	uint		rxblen;			/* Allocated length of rxbuf */
+	uint8		*rxctl;			/* Aligned pointer into rxbuf */
+	uint8		*databuf;		/* Buffer for receiving big glom packet */
+	uint8		*dataptr;		/* Aligned pointer into databuf */
+	uint		rxlen;			/* Length of valid data in buffer */
+
+	uint8		sdpcm_ver;		/* Bus protocol reported by dongle */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint 		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+	uint		spurious;		/* Count of spurious interrupts */
+	uint		pollrate;		/* Ticks between device polls */
+	uint		polltick;		/* Tick counter */
+	uint		pollcnt;		/* Count of active polls */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	uint		regfails;		/* Count of R_REG/W_REG failures */
+
+	uint		clkstate;		/* State of sd and backplane clock(s) */
+	bool		activity;		/* Activity flag for clock down */
+	int32		idletime;		/* Control for activity timeout */
+	int32		idlecount;		/* Activity timeout counter */
+	int32		idleclock;		/* How to set bus driver when idle */
+	int32		sd_divisor;		/* Speed control to bus driver */
+	int32		sd_mode;		/* Mode control to bus driver */
+	int32		sd_rxchain;		/* If bcmsdh api accepts PKT chains */
+	bool		use_rxchain;		/* If dhd should use PKT chains */
+	bool		sleeping;		/* Is SDIO bus sleeping? */
+	wait_queue_head_t bus_sleep;
+	uint		rxflow_mode;		/* Rx flow control mode */
+	bool		rxflow;			/* Is rx flow control on */
+	uint		prev_rxlim_hit;		/* Is prev rx limit exceeded (per dpc schedule) */
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+	/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+	bool		usebufpool;
+	int32		txinrx_thres;	/* num of in-queued pkts */
+	int32		dotxinrx;	/* tx first in dhdsdio_readframes */
+#ifdef SDTEST
+	/* external loopback */
+	bool		ext_loop;
+	uint8		loopid;
+
+	/* pktgen configuration */
+	uint		pktgen_freq;		/* Ticks between bursts */
+	uint		pktgen_count;		/* Packets to send each burst */
+	uint		pktgen_print;		/* Bursts between count displays */
+	uint		pktgen_total;		/* Stop after this many */
+	uint		pktgen_minlen;		/* Minimum packet data len */
+	uint		pktgen_maxlen;		/* Maximum packet data len */
+	uint		pktgen_mode;		/* Configured mode: tx, rx, or echo */
+	uint		pktgen_stop;		/* Number of tx failures causing stop */
+
+	/* active pktgen fields */
+	uint		pktgen_tick;		/* Tick counter for bursts */
+	uint		pktgen_ptick;		/* Burst counter for printing */
+	uint		pktgen_sent;		/* Number of test packets generated */
+	uint		pktgen_rcvd;		/* Number of test packets received */
+	uint		pktgen_prev_time;	/* Time at which previous stats where printed */
+	uint		pktgen_prev_sent;	/* Number of test packets generated when
+						 * previous stats were printed
+						 */
+	uint		pktgen_prev_rcvd;	/* Number of test packets received when
+						 * previous stats were printed
+						 */
+	uint		pktgen_fail;		/* Number of failed send attempts */
+	uint16		pktgen_len;		/* Length of next packet to send */
+#define PKTGEN_RCV_IDLE     (0)
+#define PKTGEN_RCV_ONGOING  (1)
+	uint16		pktgen_rcv_state;		/* receive state */
+	uint		pktgen_rcvd_rcvsession;	/* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+	/* Some additional counters */
+	uint		tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint		fcqueued;		/* Tx packets that got queued */
+	uint		rxrtx;			/* Count of rtx requests (NAK to dongle) */
+	uint		rx_toolong;		/* Receive frames too long to receive */
+	uint		rxc_errors;		/* SDIO errors when reading control frames */
+	uint		rx_hdrfail;		/* SDIO errors on header reads */
+	uint		rx_badhdr;		/* Bad received headers (roosync?) */
+	uint		rx_badseq;		/* Mismatched rx sequence number */
+	uint		fc_rcvd;		/* Number of flow-control events received */
+	uint		fc_xoff;		/* Number which turned on flow-control */
+	uint		fc_xon;			/* Number which turned off flow-control */
+	uint		rxglomfail;		/* Failed deglom attempts */
+	uint		rxglomframes;		/* Number of glom frames (superframes) */
+	uint		rxglompkts;		/* Number of packets from glom frames */
+	uint		f2rxhdrs;		/* Number of header reads */
+	uint		f2rxdata;		/* Number of frame data reads */
+	uint		f2txdata;		/* Number of f2 frame writes */
+	uint		f1regdata;		/* Number of f1 register accesses */
+#ifdef DHD_WAKE_STATUS
+	uint		rxwake;
+	uint		rcwake;
+	uint		glomwake;
+#endif
+#ifdef DHDENABLE_TAILPAD
+	uint		tx_tailpad_chain;	/* Number of tail padding by chaining pad_pkt */
+	uint		tx_tailpad_pktget;	/* Number of tail padding by new PKTGET */
+#endif /* DHDENABLE_TAILPAD */
+	uint8		*ctrl_frame_buf;
+	uint32		ctrl_frame_len;
+	bool		ctrl_frame_stat;
+	uint32		rxint_mode;	/* rx interrupt mode */
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	bool		kso;
+	bool		_slpauto;
+	bool		_oobwakeup;
+	bool		_srenab;
+	bool        readframes;
+	bool        reqbussleep;
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	void		*glom_pkt_arr[SDPCM_MAXGLOM_SIZE];	/* Array of pkts for glomming */
+	uint32		txglom_cnt;	/* Number of pkts in the glom array */
+	uint32		txglom_total_len;	/* Total length of pkts in glom array */
+	bool		txglom_enable;	/* Flag to indicate whether tx glom is enabled/disabled */
+	uint32		txglomsize;	/* Glom size limitation */
+#ifdef DHDENABLE_TAILPAD
+	void		*pad_pkt;
+#endif /* DHDENABLE_TAILPAD */
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2	/* Not used yet */
+#define CLK_AVAIL	3
+
+#define DHD_NOPMU(dhd)	(FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_RAMSIZE (128 *1024)
+int dhd_dongle_ramsize;
+
+uint dhd_doflow = TRUE;
+uint dhd_dpcpoll = FALSE;
+
+module_param(dhd_doflow, uint, 0644);
+module_param(dhd_dpcpoll, uint, 0644);
+
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
+static const uint firstread = DHD_FIRSTREAD;
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#define ALIGNMENT  4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align)					\
+	do {								\
+		uintptr datalign;						\
+		datalign = (uintptr)PKTDATA((osh), (p));		\
+		datalign = ROUNDUP(datalign, (align)) - datalign;	\
+		ASSERT(datalign < (align));				\
+		ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign));	\
+		if (datalign)						\
+			PKTPULL((osh), (p), (uint)datalign);			\
+		PKTSETLEN((osh), (p), (len));				\
+	} while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Number of pkts available in dongle for data RX */
+#define DATABUFCNT(bus) \
+	((uint8)(bus->tx_max - bus->tx_seq) - 1)
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		regvar = R_REG(bus->dhd->osh, regaddr); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) { \
+			DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+			regvar = 0; \
+		} \
+	} \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		W_REG(bus->dhd->osh, regaddr, regval); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) \
+			DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+	} \
+} while (0)
+
+#define BUS_WAKE(bus) \
+	do { \
+		bus->idlecount = 0; \
+		if ((bus)->sleeping) \
+			dhdsdio_bussleep((bus), FALSE); \
+	} while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0:	Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1:	(sdiod core rev >= 4)
+ *		Device sets a new bit in the intstatus whenever there is a packet
+ *		available in fifo.  Host can't clear this specific status bit until all the
+ *		packets are read from the FIFO.  No need to ack dongle intstatus.
+ * Mode 2:	(sdiod core rev >= 4)
+ *		Device sets a bit in the intstatus, and host acks this by writing
+ *		one to this bit.  Dongle won't generate anymore packet interrupts
+ *		until host reads all the packets from the dongle and reads a zero to
+ *		figure that there are no more packets.  No need to disable host ints.
+ *		Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT		0	/* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0	1	/* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1	2	/* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) 	\
+	((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS			SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus)	((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
+#endif
+
+#ifdef DHD_DEBUG
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+                                 void * regsva, uint16  devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+	bool reset_flag);
+
+static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry);
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
+
+static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhdsdio_tune_fifoparam(struct dhd_bus *bus)
+{
+	int err;
+	uint8 devctl, wm, mes;
+
+	if (bus->sih->buscorerev >= 15) {
+		/* See .ppt in PR for these recommended values */
+		if (bus->blocksize == 512) {
+			wm = OVERFLOW_BLKSZ512_WM;
+			mes = OVERFLOW_BLKSZ512_MES;
+		} else {
+			mes = bus->blocksize/4;
+			wm = bus->blocksize/4;
+		}
+
+		watermark = wm;
+		mesbusyctrl = mes;
+	} else {
+		DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n",
+			bus->sih->buscorerev));
+		return;
+	}
+
+	/* Update watermark */
+	if (wm > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
+
+		devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+	}
+
+	/* Update MES */
+	if (mes > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			(mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+	}
+
+	DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err)));
+}
+
+static void
+dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_RAMSIZE;
+	/* Restrict the ramsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_ramsize, min_size));
+	if ((dhd_dongle_ramsize > min_size) &&
+		(dhd_dongle_ramsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_ramsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+	int err = 0;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+	return err;
+}
+
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+	uint32 val, addr, data;
+
+	bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+	/* Set device for gpio1 wakeup */
+	bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+	val = bcmsdh_reg_read(bus->sdh, data, 4);
+	val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+	bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+	bus->_oobwakeup = TRUE;
+
+	return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+	bool cap = FALSE;
+	uint32  core_capext, addr, data;
+
+	if (BCM4349_CHIP(bus->sih->chip)) {
+		/* For now SR capability would not be exercised */
+		return cap;
+	}
+	if (bus->sih->chip == BCM43430_CHIP_ID) {
+		/* For now SR capability would not be exercised */
+		return cap;
+	}
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+			addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+			data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+			bcmsdh_reg_write(bus->sdh, addr, 4, 3);
+			core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
+	} else if (bus->sih->chip == BCM4330_CHIP_ID) {
+			core_capext = FALSE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		(bus->sih->chip == BCM4345_CHIP_ID) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4356_CHIP_ID) ||
+		(bus->sih->chip == BCM4358_CHIP_ID) ||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		core_capext = TRUE;
+	} else {
+			core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4);
+			core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
+	}
+	if (!(core_capext))
+		return FALSE;
+
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+		/* FIX: Should change to query SR control register instead */
+		cap = TRUE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		(bus->sih->chip == BCM4345_CHIP_ID) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4356_CHIP_ID) ||
+		(bus->sih->chip == BCM4358_CHIP_ID) ||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		uint32 enabval = 0;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+		data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+		bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
+		enabval = bcmsdh_reg_read(bus->sdh, data, 4);
+
+		if ((bus->sih->chip == BCM4350_CHIP_ID) ||
+			(bus->sih->chip == BCM4345_CHIP_ID) ||
+			(bus->sih->chip == BCM4354_CHIP_ID) ||
+			(bus->sih->chip == BCM4356_CHIP_ID) ||
+			(bus->sih->chip == BCM4358_CHIP_ID))
+			enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
+
+		if (enabval)
+			cap = TRUE;
+	} else {
+		data = bcmsdh_reg_read(bus->sdh,
+			SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4);
+		if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0)
+			cap = TRUE;
+	}
+
+	return cap;
+}
+
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+	bcmsdh_gpio_init(bus->sdh);
+
+#ifdef USE_OOB_GPIO1
+	dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+	return 0;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+		dhdsdio_srwar_init(bus);
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+		1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+
+	/* Add CMD14 Support */
+	dhdsdio_devcap_set(bus,
+		(SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+		SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+
+	bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+	bus->_srenab = TRUE;
+
+	return 0;
+}
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	/* set flag */
+	bus->kso = TRUE;
+
+	/*
+	 * Enable KeepSdioOn (KSO) bit for normal operation
+	 * Default is 0 (4334A0) so set it. Fixed in B0.
+	 */
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+		if (err)
+			DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+	}
+
+	return 0;
+}
+
+#define KSO_DBG(x)
+#define KSO_WAIT_US 50
+#define KSO_WAIT_MS 1
+#define KSO_SLEEP_RETRY_COUNT 20
+#define ERROR_BCME_NODEVICE_MAX 1
+
+#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+	uint8 wr_val = 0, rd_val, cmp_val, bmask;
+	int err = 0;
+	int try_cnt = 0;
+
+	KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
+
+	wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+
+	if (on) {
+		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
+		bmask = cmp_val;
+
+		OSL_SLEEP(3);
+	} else {
+		/* Put device to sleep, turn off  KSO  */
+		cmp_val = 0;
+		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
+	}
+
+	do {
+		rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+		if (((rd_val & bmask) == cmp_val) && !err)
+			break;
+
+		KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
+
+		if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) {
+			OSL_SLEEP(KSO_WAIT_MS);
+		} else
+			OSL_DELAY(KSO_WAIT_US);
+
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
+
+
+	if (try_cnt > 2)
+		KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+
+	if (try_cnt > MAX_KSO_ATTEMPTS)  {
+		DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+	}
+	return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0;
+
+	if (on == FALSE) {
+
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+		dhdsdio_clk_kso_enab(bus, FALSE);
+	} else {
+		DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		dhdsdio_clk_kso_enab(bus, TRUE);
+
+		DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+			dhdsdio_sleepcsr_get(bus)));
+	}
+
+	bus->kso = on;
+	BCM_REFERENCE(err);
+
+	return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+	int err = 0;
+	uint8 val = 0;
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+	if (err)
+		DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+	return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+	return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+	int err = 0;
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+	if (err)
+		DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+	return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0, retry;
+	uint8 val;
+
+	retry = 0;
+	if (on == TRUE) {
+		/* Enter Sleep */
+
+		/* Be sure we request clk before going to sleep
+		 * so we can wake-up with clk request already set
+		 * else device can go back to sleep immediately
+		 */
+		if (!SLPAUTO_ENAB(bus))
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		else {
+			val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+			if ((val & SBSDIO_CSR_MASK) == 0) {
+				DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+					__FUNCTION__, val));
+
+				/* Reset clock request */
+				bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					SBSDIO_ALP_AVAIL_REQ, &err);
+				DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+					bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+			}
+		}
+
+		DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+		err = dhdsdio_clk_kso_enab(bus, FALSE);
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE);  /* GPIO_1 is off */
+		}
+#endif /* USE_CMD14 */
+	} else {
+		/* Exit Sleep */
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+				GPIO_DEV_SRSTATE_TIMEOUT);
+
+			if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+				DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+			}
+		}
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, FALSE);
+		if (SLPAUTO_ENAB(bus) && (err != 0)) {
+			OSL_DELAY(10000);
+			DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+			/* Toggle sleep to resync with host and device */
+			err = bcmsdh_sleep(bus->sdh, TRUE);
+			OSL_DELAY(10000);
+			err = bcmsdh_sleep(bus->sdh, FALSE);
+
+			if (err) {
+				OSL_DELAY(10000);
+				DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+				/* Toggle sleep to resync with host and device */
+				err = bcmsdh_sleep(bus->sdh, TRUE);
+				OSL_DELAY(10000);
+				err = bcmsdh_sleep(bus->sdh, FALSE);
+				if (err) {
+					DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+					DHD_ERROR(("%s: FATAL: Device non-response!\n",
+						__FUNCTION__));
+					err = 0;
+				}
+			}
+		}
+#else
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE);  /* GPIO_1 is on */
+		}
+		do {
+			err = dhdsdio_clk_kso_enab(bus, TRUE);
+			if (err)
+				OSL_SLEEP(10);
+		} while ((err != 0) && (++retry < 3));
+
+		if (err != 0) {
+			DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+			err = 0; /* continue anyway */
+		}
+#endif /* !USE_CMD14 */
+
+		if (err == 0) {
+			uint8 csr;
+
+			/* Wait for device ready during transition to wake-up */
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = dhdsdio_sleepcsr_get(bus)) &
+				SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+				(SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000));
+
+			DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+			if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+				DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+				SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+				(SBSDIO_HT_AVAIL)), (10000));
+
+			DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
+			if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) {
+				DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+		}
+	}
+
+	/* Update if successful */
+	if (err == 0)
+		bus->kso = on ? FALSE : TRUE;
+	else {
+		DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n",
+			__FUNCTION__, bus->kso, on, err));
+		if (!on && retry > 2)
+			bus->kso = FALSE;
+	}
+
+	return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+#define HT_AVAIL_ERROR_MAX 10
+	static int ht_avail_error = 0;
+	int err;
+	uint8 clkctl, clkreq, devctl;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	clkctl = 0;
+	sdh = bus->sdh;
+
+
+	if (!KSO_ENAB(bus))
+		return BCME_OK;
+
+	if (SLPAUTO_ENAB(bus)) {
+		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+		return BCME_OK;
+	}
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		if (err) {
+			ht_avail_error++;
+			if (ht_avail_error < HT_AVAIL_ERROR_MAX) {
+				DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
+				dhd_os_send_hang_message(bus->dhd);
+			}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+			return BCME_ERROR;
+		} else {
+			ht_avail_error = 0;
+		}
+
+
+		/* Check current status */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+#if !defined(OOB_INTR_ONLY)
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			DHD_INFO(("CLKCTL: set PENDING\n"));
+			bus->clkstate = CLK_PENDING;
+			return BCME_OK;
+		} else
+#endif /* !defined (OOB_INTR_ONLY) */
+		{
+			if (bus->clkstate == CLK_PENDING) {
+				/* Cancel CA-only interrupt filter */
+				devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+				devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			}
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+			                                    SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+			          !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+		}
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+			           __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+			return BCME_ERROR;
+		}
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+		if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+			if (!SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+			}
+#endif /* !defined(BCMLXSDMMC) */
+		} else {
+			if (SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+			}
+		}
+#endif /* defined (DHD_DEBUG) */
+
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+		bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		if (!SR_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+			DHD_INFO(("CLKCTL: turned OFF\n"));
+			if (err) {
+				DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+	}
+	return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+	int err;
+	int32 iovalue;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (on) {
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			/* Turn on clock and restore mode */
+			iovalue = 1;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			iovalue = bus->sd_mode;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_mode: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Restore clock speed */
+			iovalue = bus->sd_divisor;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_SDONLY;
+	} else {
+		/* Stop or slow the SD clock itself */
+		if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+			DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+			           __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+			return BCME_ERROR;
+		}
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			if (sd1idle) {
+				/* Change to SD1 mode and turn off clock */
+				iovalue = 1;
+				err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+				                      &iovalue, sizeof(iovalue), TRUE);
+				if (err) {
+					DHD_ERROR(("%s: error changing sd_clock: %d\n",
+					           __FUNCTION__, err));
+					return BCME_ERROR;
+				}
+			}
+
+			iovalue = 0;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Set divisor to idle value */
+			iovalue = bus->idleclock;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_NONE;
+	}
+
+	return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+	int ret = BCME_OK;
+#ifdef DHD_DEBUG
+	uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target) {
+		if (target == CLK_AVAIL) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		return ret;
+	}
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			dhdsdio_sdclk(bus, TRUE);
+		/* Now request HT Avail on the backplane */
+		ret = dhdsdio_htclk(bus, TRUE, pendok);
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		break;
+
+	case CLK_SDONLY:
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			ret = dhdsdio_sdclk(bus, TRUE);
+		else if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		else
+			DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+			           bus->clkstate, target));
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		}
+		break;
+
+	case CLK_NONE:
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		/* Now remove the SD clock */
+		ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+		if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+		if (bus->poll == 0)
+			dhd_os_wd_timer(bus->dhd, 0);
+		break;
+	}
+#ifdef DHD_DEBUG
+	DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+	return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+	int err = 0;
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+	          (sleep ? "SLEEP" : "WAKE"),
+	          (bus->sleeping ? "SLEEP" : "WAKE")));
+
+	if (bus->dhd->hang_was_sent)
+		return BCME_ERROR;
+
+	/* Done if we're already in the requested state */
+	if (sleep == bus->sleeping)
+		return BCME_OK;
+
+	/* Going to sleep: set the alarm and turn off the lights... */
+	if (sleep) {
+		/* Don't sleep if something is pending */
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+			return BCME_BUSY;
+
+
+		if (!SLPAUTO_ENAB(bus)) {
+			/* Disable SDIO interrupts (no longer interested) */
+			bcmsdh_intr_disable(bus->sdh);
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Tell device to start using OOB wakeup */
+			W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+			/* Turn off our contribution to the HT clock request */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+				SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+			/* Isolate the bus */
+			if (bus->sih->chip != BCM4329_CHIP_ID &&
+				bus->sih->chip != BCM4319_CHIP_ID) {
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+					SBSDIO_DEVCTL_PADS_ISO, NULL);
+			}
+		} else {
+			/* Leave interrupts enabled since device can exit sleep and
+			 * interrupt host
+			 */
+			err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+		}
+
+		/* Change state */
+		bus->sleeping = TRUE;
+		wake_up(&bus->bus_sleep);
+	} else {
+		/* Waking up: bus power up is ok, set local state */
+
+		if (!SLPAUTO_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+			/* Force pad isolation off if possible (in case power never toggled) */
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Send misc interrupt to indicate OOB not needed */
+			W_SDREG(0, &regs->tosbmailboxdata, retries);
+			if (retries <= retry_limit)
+				W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+			/* Make sure we have SD bus access */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			/* Enable interrupts again */
+			if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+				bus->intdis = FALSE;
+				bcmsdh_intr_enable(bus->sdh);
+			}
+		} else {
+			err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+		}
+
+		if (err == 0) {
+			/* Change state */
+			bus->sleeping = FALSE;
+		}
+	}
+
+	return err;
+}
+
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+	bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (enable == TRUE) {
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+	} else {
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+	}
+
+	/* Turn off our contribution to the HT clock request */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+	int ret = BCME_ERROR;
+	osl_t *osh;
+	uint datalen, prec;
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+	uint8 *dump_data;
+	uint16 protocol;
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	osh = bus->dhd->osh;
+	datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+	/* Push the test header if doing loopback */
+	if (bus->ext_loop) {
+		uint8* data;
+		PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+		data = PKTDATA(osh, pkt);
+		*data++ = SDPCM_TEST_ECHOREQ;
+		*data++ = (uint8)bus->loopid++;
+		*data++ = (datalen >> 0);
+		*data++ = (datalen >> 8);
+		datalen += SDPCM_TEST_HDRLEN;
+	}
+#else /* SDTEST */
+	BCM_REFERENCE(datalen);
+#endif /* SDTEST */
+
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+	dump_data = PKTDATA(osh, pkt);
+	dump_data += 4; /* skip 4 bytes header */
+	protocol = (dump_data[12] << 8) | dump_data[13];
+
+	if (protocol == ETHER_TYPE_802_1X) {
+		DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
+			dump_data[14], dump_data[15], dump_data[30]));
+	}
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+
+#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
+	{
+		int i;
+		DHD_ERROR(("TX DUMP\n"));
+
+		for (i = 0; i < (datalen - 4); i++) {
+			DHD_ERROR(("%02X ", dump_data[i]));
+			if ((i & 15) == 15)
+				printk("\n");
+		}
+		DHD_ERROR(("\n"));
+	}
+#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
+
+	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+	/* Check for existing queue, current flow-control, pending event, or pending clock */
+	if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+	    (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+	    (bus->clkstate != CLK_AVAIL)) {
+		bool deq_ret;
+		int pkq_len;
+
+		DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq)));
+		bus->fcqueued++;
+
+		/* Priority based enq */
+		dhd_os_sdlock_txq(bus->dhd);
+		deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (!deq_ret) {
+#ifdef PROP_TXSTATUS
+			if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0)
+#endif /* PROP_TXSTATUS */
+			{
+#ifdef DHDTCPACK_SUPPRESS
+				if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+					DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n",
+						__FUNCTION__, __LINE__));
+					dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+				}
+#endif /* DHDTCPACK_SUPPRESS */
+				dhd_txcomplete(bus->dhd, pkt, FALSE);
+				PKTFREE(osh, pkt, TRUE);
+			}
+			ret = BCME_NORESOURCE;
+		} else
+			ret = BCME_OK;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		pkq_len = pktq_len(&bus->txq);
+		dhd_os_sdunlock_txq(bus->dhd);
+		if (pkq_len >= FCHI) {
+			bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+			wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
+				WLFC_UNSUPPORTED);
+#endif
+			if (!wlfc_enabled && dhd_doflow) {
+				dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+			}
+		}
+
+#ifdef DHD_DEBUG
+		dhd_os_sdlock_txq(bus->dhd);
+		if (pktq_plen(&bus->txq, prec) > qcount[prec])
+			qcount[prec] = pktq_plen(&bus->txq, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+#endif
+
+		/* Schedule DPC if needed to send queued packet(s) */
+		if (dhd_deferred_tx && !bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+	} else {
+		int chan = SDPCM_DATA_CHANNEL;
+
+#ifdef SDTEST
+		chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
+#endif
+		/* Lock: we're about to use shared data/code (and SDIO) */
+		dhd_os_sdlock(bus->dhd);
+
+		/* Otherwise, send it now */
+		BUS_WAKE(bus);
+		/* Make sure back plane ht clk is on, no pending allowed */
+		dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+		ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE);
+
+		if (ret != BCME_OK)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+	}
+
+	return ret;
+}
+
+/* align packet data pointer and packet length to n-byte boundary, process packet headers,
+ * a new packet may be allocated if there is not enough head and/or tail from for padding.
+ * the caller is responsible for updating the glom size in the head packet (when glom is
+ * used)
+ *
+ * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter
+ * is taken in tx glom mode only
+ *
+ * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment
+ * padding, NULL if not needed, the caller is responsible for freeing the new packet
+ *
+ * return: positive value - length of the packet, including head and tail padding
+ *		   negative value - errors
+ */
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int pkt_len;
+	int modulo;
+	int head_padding;
+	int tail_padding = 0;
+	uint32 swheader;
+	uint32 swhdr_offset;
+	bool alloc_new_pkt = FALSE;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	*new_pkt = NULL;
+	osh = bus->dhd->osh;
+
+#ifdef DHDTCPACK_SUPPRESS
+	if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+		DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+			__FUNCTION__, __LINE__));
+		dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Add space for the SDPCM hardware/software headers */
+	PKTPUSH(osh, pkt, sdpcm_hdrlen);
+	ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+	frame = (uint8*)PKTDATA(osh, pkt);
+	if (PKTLEN(osh, pkt) >= 100) {
+		htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12);
+		if (htsf_ts->magic == HTSFMAGIC) {
+			htsf_ts->c20 = get_cycles();
+			htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+#ifdef DHD_DEBUG
+	if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
+		tx_packets[PKTPRIO(pkt)]++;
+#endif /* DHD_DEBUG */
+
+	/* align the data pointer, allocate a new packet if there is not enough space (new
+	 * packet data pointer will be aligned thus no padding will be needed)
+	 */
+	head_padding = (ulong)frame % DHD_SDALIGN;
+	if (PKTHEADROOM(osh, pkt) < head_padding) {
+		head_padding = 0;
+		alloc_new_pkt = TRUE;
+	} else {
+		uint cur_chain_total_len;
+		int chain_tail_padding = 0;
+
+		/* All packets need to be aligned by DHD_SDALIGN */
+		modulo = (pkt_len + head_padding) % DHD_SDALIGN;
+		tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+
+		/* Total pkt chain length needs to be aligned by block size,
+		 * unless it is a single pkt chain with total length less than one block size,
+		 * which we prefer sending by byte mode.
+		 *
+		 * Do the chain alignment here if
+		 * 1. This is the last pkt of the chain of multiple pkts or a single pkt.
+		 * 2-1. This chain is of multiple pkts, or
+		 * 2-2. This is a single pkt whose size is longer than one block size.
+		 */
+		cur_chain_total_len = prev_chain_total_len +
+			(head_padding + pkt_len + tail_padding);
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_chain_total_len % bus->blocksize;
+			chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+
+#ifdef DHDENABLE_TAILPAD
+		if (PKTTAILROOM(osh, pkt) < tail_padding) {
+			/* We don't have tail room to align by DHD_SDALIGN */
+			alloc_new_pkt = TRUE;
+			bus->tx_tailpad_pktget++;
+		} else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) {
+			/* We have tail room for tail_padding of this pkt itself, but not for
+			 * total pkt chain alignment by block size.
+			 * Use the padding packet to avoid memory copy if applicable,
+			 * otherwise, just allocate a new pkt.
+			 */
+			if (bus->pad_pkt) {
+				*pad_pkt_len = chain_tail_padding;
+				bus->tx_tailpad_chain++;
+			} else {
+				alloc_new_pkt = TRUE;
+				bus->tx_tailpad_pktget++;
+			}
+		} else
+		/* This last pkt's tailroom is sufficient to hold both tail_padding
+		 * of the pkt itself and chain_tail_padding of total pkt chain
+		 */
+#endif /* DHDENABLE_TAILPAD */
+		tail_padding += chain_tail_padding;
+	}
+
+	DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n",
+		__FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len));
+
+	if (alloc_new_pkt) {
+		void *tmp_pkt;
+		int newpkt_size;
+		int cur_total_len;
+
+		ASSERT(*pad_pkt_len == 0);
+
+		DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__));
+
+		/* head pointer is aligned now, no padding needed */
+		head_padding = 0;
+
+		/* update the tail padding as it depends on the head padding, since a new packet is
+		 * allocated, the head padding is non longer needed and packet length is chagned
+		 */
+
+		cur_total_len = prev_chain_total_len + pkt_len;
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_total_len % bus->blocksize;
+			tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+		else {
+			modulo = pkt_len % DHD_SDALIGN;
+			tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+		}
+
+		newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN;
+		bus->dhd->tx_realloc++;
+		tmp_pkt = PKTGET(osh, newpkt_size, TRUE);
+		if (tmp_pkt == NULL) {
+			DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size));
+			return BCME_NOMEM;
+		}
+		PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN);
+		bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt));
+		*new_pkt = tmp_pkt;
+		pkt = tmp_pkt;
+	}
+
+	if (head_padding)
+		PKTPUSH(osh, pkt, head_padding);
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	bzero(frame, head_padding + sdpcm_hdrlen);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+	/* the header has the followming format
+	 * 4-byte HW frame tag: length, ~length (for glom this is the total length)
+	 *
+	 * 8-byte HW extesion flags (glom mode only) as the following:
+	 *			2-byte packet length, excluding HW tag and padding
+	 *			2-byte frame channel and frame flags (e.g. next frame following)
+	 *			2-byte header length
+	 *			2-byte tail padding size
+	 *
+	 * 8-byte SW frame tags as the following
+	 *			4-byte flags: host tx seq, channel, data offset
+	 *			4-byte flags: TBD
+	 */
+
+	swhdr_offset = SDPCM_FRAMETAG_LEN;
+
+	/* hardware frame tag:
+	 *
+	 * in tx-glom mode, dongle only checks the hardware frame tag in the first
+	 * packet and sees it as the total lenght of the glom (including tail padding),
+	 * for each packet in the glom, the packet length needs to be updated, (see
+	 * below PKTSETLEN)
+	 *
+	 * in non tx-glom mode, PKTLEN still need to include tail padding as to be
+	 * referred to in sdioh_request_buffer(). The tail length will be excluded in
+	 * dhdsdio_txpkt_postprocess().
+	 */
+	*(uint16*)frame = (uint16)htol16(pkt_len);
+	*(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
+	pkt_len += tail_padding;
+
+	/* hardware extesion flags */
+	if (bus->txglom_enable) {
+		uint32 hwheader1;
+		uint32 hwheader2;
+
+		swhdr_offset += SDPCM_HWEXT_LEN;
+		hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+			(last_chained_pkt << 24);
+		hwheader2 = (tail_padding) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+	}
+	PKTSETLEN((osh), (pkt), (pkt_len));
+
+	/* software frame tags */
+	swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		| (txseq % SDPCM_SEQUENCE_WRAP) |
+		(((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + swhdr_offset);
+	htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader));
+
+	return pkt_len;
+}
+
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int data_offset;
+	int tail_padding;
+	int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0);
+
+	(void)osh;
+	osh = bus->dhd->osh;
+
+	/* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+	frame = (uint8*)PKTDATA(osh, pkt);
+
+	DHD_INFO(("%s PKTLEN before postprocess %d",
+		__FUNCTION__, PKTLEN(osh, pkt)));
+
+	/* PKTLEN still includes tail_padding, so exclude it.
+	 * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+	 */
+	if (bus->txglom_enable) {
+		/* txglom pkts have tail_padding length in HW ext header */
+		tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+		PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+		DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+			tail_padding, PKTLEN(osh, pkt)));
+	} else {
+		/* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+		 * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+		 * have the field for the total length of the chain.
+		 */
+		PKTSETLEN(osh, pkt, *(uint16*)frame);
+		DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+			*(uint16*)frame, PKTLEN(osh, pkt)));
+	}
+
+	data_offset = ltoh32_ua(frame + swhdr_offset);
+	data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+	/* Get rid of sdpcm header + head_padding */
+	PKTPULL(osh, pkt, data_offset);
+
+	DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+		__FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+	return BCME_OK;
+}
+
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
+{
+	int i;
+	int ret = 0;
+	osl_t *osh;
+	bcmsdh_info_t *sdh;
+	void *pkt = NULL;
+	void *pkt_chain;
+	int total_len = 0;
+	void *head_pkt = NULL;
+	void *prev_pkt = NULL;
+	int pad_pkt_len = 0;
+	int new_pkt_num = 0;
+	void *new_pkts[MAX_TX_PKTCHAIN_CNT];
+	bool wlfc_enabled = FALSE;
+
+	if (bus->dhd->dongle_reset)
+		return BCME_NOTREADY;
+
+	sdh = bus->sdh;
+	osh = bus->dhd->osh;
+	/* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */
+	new_pkts[0] = NULL;
+
+	for (i = 0; i < num_pkt; i++) {
+		int pkt_len;
+		bool last_pkt;
+		void *new_pkt = NULL;
+
+		pkt = pkts[i];
+		ASSERT(pkt);
+		last_pkt = (i == num_pkt - 1);
+		pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
+			total_len, last_pkt, &pad_pkt_len, &new_pkt);
+		if (pkt_len <= 0)
+			goto done;
+		if (new_pkt) {
+			pkt = new_pkt;
+			new_pkts[new_pkt_num++] = new_pkt;
+		}
+		total_len += pkt_len;
+
+		PKTSETNEXT(osh, pkt, NULL);
+		/* insert the packet into the list */
+		head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt);
+		prev_pkt = pkt;
+
+	}
+
+	/* Update the HW frame tag (total length) in the first pkt of the glom */
+	if (bus->txglom_enable) {
+		uint8 *frame;
+
+		total_len += pad_pkt_len;
+		frame = (uint8*)PKTDATA(osh, head_pkt);
+		*(uint16*)frame = (uint16)htol16(total_len);
+		*(((uint16*)frame) + 1) = (uint16)htol16(~total_len);
+
+	}
+
+#ifdef DHDENABLE_TAILPAD
+	/* if a padding packet if needed, insert it to the end of the link list */
+	if (pad_pkt_len) {
+		PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len);
+		PKTSETNEXT(osh, pkt, bus->pad_pkt);
+	}
+#endif /* DHDENABLE_TAILPAD */
+
+	/* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet
+	 * parameter is not NULL, for non packet chian we pass NULL pkt pointer
+	 * so it will take the aligned length and buffer pointer.
+	 */
+	pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP;
+
+	/* if a padding packet was needed, remove it from the link list as it not a data pkt */
+	if (pad_pkt_len && pkt)
+		PKTSETNEXT(osh, pkt, NULL);
+
+done:
+	pkt = head_pkt;
+	while (pkt) {
+		void *pkt_next = PKTNEXT(osh, pkt);
+		PKTSETNEXT(osh, pkt, NULL);
+		dhdsdio_txpkt_postprocess(bus, pkt);
+		pkt = pkt_next;
+	}
+
+	/* new packets might be allocated due to insufficient room for padding, but we
+	 * still have to indicate the original packets to upper layer
+	 */
+	for (i = 0; i < num_pkt; i++) {
+		pkt = pkts[i];
+		wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) {
+			wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) !=
+				WLFC_UNSUPPORTED);
+		}
+#endif /* PROP_TXSTATUS */
+		if (!wlfc_enabled) {
+			PKTSETNEXT(osh, pkt, NULL);
+			dhd_txcomplete(bus->dhd, pkt, ret != 0);
+			if (free_pkt)
+				PKTFREE(osh, pkt, TRUE);
+		}
+	}
+
+	for (i = 0; i < new_pkt_num; i++)
+		PKTFREE(osh, new_pkts[i], TRUE);
+
+	return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+	uint cnt = 0;
+	uint8 tx_prec_map;
+	uint16 txpktqlen = 0;
+	uint32 intstatus = 0;
+	uint retries = 0;
+	osl_t *osh;
+	uint datalen = 0;
+	dhd_pub_t *dhd = bus->dhd;
+	sdpcmd_regs_t *regs = bus->regs;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	osh = dhd->osh;
+	tx_prec_map = ~bus->flowcontrol;
+	for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
+		int i;
+		int num_pkt = 1;
+		void *pkts[MAX_TX_PKTCHAIN_CNT];
+		int prec_out;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		if (bus->txglom_enable) {
+			num_pkt = MIN((uint32)DATABUFCNT(bus), (uint32)bus->txglomsize);
+			num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
+		}
+		num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
+		for (i = 0; i < num_pkt; i++) {
+			pkts[i] = pktq_mdeq(&bus->txq, ~bus->flowcontrol, &prec_out);
+			if (!pkts[i]) {
+				DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n",
+					__FUNCTION__));
+				ASSERT(0);
+				break;
+			}
+			PKTORPHAN(pkts[i]);
+			datalen += PKTLEN(osh, pkts[i]);
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (i == 0)
+			break;
+		if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK)
+			dhd->tx_errors++;
+		else
+			dhd->dstats.tx_bytes += datalen;
+		cnt += i;
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr && cnt)
+		{
+			/* Check device status, signal pending interrupt */
+			R_SDREG(intstatus, &regs->intstatus, retries);
+			bus->f2txdata++;
+			if (bcmsdh_regfail(bus->sdh))
+				break;
+			if (intstatus & bus->hostintmask)
+				bus->ipend = TRUE;
+		}
+
+	}
+
+	dhd_os_sdlock_txq(bus->dhd);
+	txpktqlen = pktq_len(&bus->txq);
+	dhd_os_sdunlock_txq(bus->dhd);
+
+	/* Do flow-control if needed */
+	if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) {
+		bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
+#endif
+		if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
+			dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+		}
+	}
+
+	return cnt;
+}
+
+static void
+dhdsdio_sendpendctl(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	int ret;
+	uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
+
+	if (bus->txglom_enable)
+		frame_seq += SDPCM_HWEXT_LEN;
+
+	if (*frame_seq != bus->tx_seq) {
+		DHD_INFO(("%s IOCTL frame seq lag detected!"
+			" frm_seq:%d != bus->tx_seq:%d, corrected\n",
+			__FUNCTION__, *frame_seq, bus->tx_seq));
+		*frame_seq = bus->tx_seq;
+	}
+
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+		NULL, NULL, NULL, 1);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+	bus->ctrl_frame_stat = FALSE;
+	dhd_wait_event_wakeup(bus->dhd);
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	static int err_nodevice = 0;
+	uint8 *frame;
+	uint16 len;
+	uint32 swheader;
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint8 doff = 0;
+	int ret = -1;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Back the pointer to make a room for bus header */
+	frame = msg - sdpcm_hdrlen;
+	len = (msglen += sdpcm_hdrlen);
+
+	/* Add alignment padding (optional for ctl frames) */
+	if (dhd_alignctl) {
+		if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+			frame -= doff;
+			len += doff;
+			msglen += doff;
+			bzero(frame, doff + sdpcm_hdrlen);
+		}
+		ASSERT(doff < DHD_SDALIGN);
+	}
+	doff += sdpcm_hdrlen;
+
+	/* Round send length to next SDIO block */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+			len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (len & (ALIGNMENT - 1)))
+		len = ROUNDUP(len, ALIGNMENT);
+
+	ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+	/* Need to lock here to protect txseq and SDIO tx calls */
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	*(uint16*)frame = htol16((uint16)msglen);
+	*(((uint16*)frame) + 1) = htol16(~msglen);
+
+	if (bus->txglom_enable) {
+		uint32 hwheader1, hwheader2;
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+				| bus->tx_seq
+				| ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN
+			+ SDPCM_HWEXT_LEN + sizeof(swheader));
+
+		hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24);
+		hwheader2 = (len - (msglen)) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+
+		*(uint16*)frame = htol16(len);
+		*(((uint16*)frame) + 1) = htol16(~(len));
+	} else {
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		        | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+	}
+	if (!TXCTLOK(bus)) {
+		DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+			__FUNCTION__, bus->tx_max, bus->tx_seq));
+		bus->ctrl_frame_stat = TRUE;
+		/* Send from dpc */
+		bus->ctrl_frame_buf = frame;
+		bus->ctrl_frame_len = len;
+
+		if (!bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+		if (bus->ctrl_frame_stat) {
+			dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+		}
+
+		if (bus->ctrl_frame_stat == FALSE) {
+			DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+			ret = 0;
+		} else {
+			bus->dhd->txcnt_timeout++;
+			if (!bus->dhd->hang_was_sent) {
+				DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+					__FUNCTION__, bus->dhd->txcnt_timeout));
+			}
+			ret = -1;
+			bus->ctrl_frame_stat = FALSE;
+			goto done;
+		}
+	}
+
+	bus->dhd->txcnt_timeout = 0;
+	bus->ctrl_frame_stat = TRUE;
+
+	if (ret == -1) {
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+			prhex("Tx Frame", frame, len);
+		} else if (DHD_HDRS_ON()) {
+			prhex("TxHdr", frame, MIN(len, 16));
+		}
+#endif
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                          frame, len, NULL, NULL, NULL, TXRETRIES);
+		if (ret == BCME_OK)
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+	}
+	bus->ctrl_frame_stat = FALSE;
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (ret)
+		bus->dhd->tx_ctlerrs++;
+	else
+		bus->dhd->tx_ctlpkts++;
+
+	if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (ret == BCME_NODEVICE)
+		err_nodevice++;
+	else
+		err_nodevice = 0;
+
+	return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+	dhd_os_sdlock(bus->dhd);
+	rxlen = bus->rxlen;
+	bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+	bus->rxlen = 0;
+	dhd_os_sdunlock(bus->dhd);
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+			__FUNCTION__, rxlen, msglen));
+	} else if (timeleft == 0) {
+#ifdef DHD_DEBUG
+		uint32 status, retry = 0;
+		R_SDREG(status, &bus->regs->intstatus, retry);
+		DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n",
+			__FUNCTION__, status));
+#else
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG
+			dhd_os_sdlock(bus->dhd);
+			dhdsdio_checkdied(bus, NULL, 0);
+			dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	} else if (pending == TRUE) {
+		/* signal pending */
+		DHD_ERROR(("%s: signal pending\n", __FUNCTION__));
+		return -EINTR;
+
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	}
+	if (timeleft == 0) {
+		if (rxlen == 0)
+			bus->dhd->rxcnt_timeout++;
+		DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__,
+			bus->dhd->rxcnt_timeout, rxlen));
+	}
+	else
+		bus->dhd->rxcnt_timeout = 0;
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (bus->dhd->dongle_trap_occured)
+		return -EREMOTEIO;
+
+	return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_POLLRATE,
+	IOV_SDREG,
+	IOV_SBREG,
+	IOV_SDCIS,
+	IOV_MEMBYTES,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+#ifdef DHD_DEBUG
+	IOV_CHECKDIED,
+	IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_SOCRAM_STATE,
+	IOV_FORCEEVEN,
+	IOV_SDIOD_DRIVE,
+	IOV_READAHEAD,
+	IOV_SDRXCHAIN,
+	IOV_ALIGNCTL,
+	IOV_SDALIGN,
+	IOV_DEVRESET,
+	IOV_CPU,
+#if defined(USE_SDIOFIFO_IOVAR)
+	IOV_WATERMARK,
+	IOV_MESBUSYCTRL,
+#endif /* USE_SDIOFIFO_IOVAR */
+#ifdef SDTEST
+	IOV_PKTGEN,
+	IOV_EXTLOOP,
+#endif /* SDTEST */
+	IOV_SPROM,
+	IOV_TXBOUND,
+	IOV_RXBOUND,
+	IOV_TXMINMAX,
+	IOV_IDLETIME,
+	IOV_IDLECLOCK,
+	IOV_SD1IDLE,
+	IOV_SLEEP,
+	IOV_DONGLEISOLATION,
+	IOV_KSO,
+	IOV_DEVSLEEP,
+	IOV_DEVCAP,
+	IOV_VARS,
+#ifdef SOFTAP
+	IOV_FWPATH,
+#endif
+	IOV_TXGLOMSIZE,
+	IOV_TXGLOMMODE,
+	IOV_HANGREPORT,
+	IOV_TXINRX_THRES
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"sleep",	IOV_SLEEP,	0,	IOVT_BOOL,	0 },
+	{"pollrate",	IOV_POLLRATE,	0,	IOVT_UINT32,	0 },
+	{"idletime",	IOV_IDLETIME,	0,	IOVT_INT32,	0 },
+	{"idleclock",	IOV_IDLECLOCK,	0,	IOVT_INT32,	0 },
+	{"sd1idle",	IOV_SD1IDLE,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"ramsize",	IOV_RAMSIZE,	0,	IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	IOVT_BOOL,	0 },
+	{"socram_state",	IOV_SOCRAM_STATE,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"sdiod_drive",	IOV_SDIOD_DRIVE, 0,	IOVT_UINT32,	0 },
+	{"readahead",	IOV_READAHEAD,	0,	IOVT_BOOL,	0 },
+	{"sdrxchain",	IOV_SDRXCHAIN,	0,	IOVT_BOOL,	0 },
+	{"alignctl",	IOV_ALIGNCTL,	0,	IOVT_BOOL,	0 },
+	{"sdalign",	IOV_SDALIGN,	0,	IOVT_BOOL,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"sdreg",	IOV_SDREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_cis",	IOV_SDCIS,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"forcealign",	IOV_FORCEEVEN,	0,	IOVT_BOOL,	0 },
+	{"txbound",	IOV_TXBOUND,	0,	IOVT_UINT32,	0 },
+	{"rxbound",	IOV_RXBOUND,	0,	IOVT_UINT32,	0 },
+	{"txminmax",	IOV_TXMINMAX,	0,	IOVT_UINT32,	0 },
+	{"cpu",		IOV_CPU,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"checkdied",	IOV_CHECKDIED,	0,	IOVT_BUFFER,	0 },
+	{"serial",	IOV_SERIALCONS,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG  */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+	{"extloop",	IOV_EXTLOOP,	0,	IOVT_BOOL,	0 },
+	{"pktgen",	IOV_PKTGEN,	0,	IOVT_BUFFER,	sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+#if defined(USE_SDIOFIFO_IOVAR)
+	{"watermark",	IOV_WATERMARK,	0,	IOVT_UINT32,	0 },
+	{"mesbusyctrl",	IOV_MESBUSYCTRL,	0,	IOVT_UINT32,	0 },
+#endif /* USE_SDIOFIFO_IOVAR */
+	{"devcap", IOV_DEVCAP,	0,	IOVT_UINT32,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	IOVT_UINT32,	0 },
+	{"kso",	IOV_KSO,	0,	IOVT_UINT32,	0 },
+	{"devsleep", IOV_DEVSLEEP,	0,	IOVT_UINT32,	0 },
+#ifdef SOFTAP
+	{"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+	{"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 },
+	{"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
+	{"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+	uint q1, q2;
+
+	if (!div) {
+		bcm_bprintf(strbuf, "%s N/A", desc);
+	} else {
+		q1 = num / div;
+		q2 = (100 * (num - (q1 * div))) / div;
+		bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+	}
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+	bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+	            bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+	bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
+	            bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+	            bus->rxlen, bus->rx_seq);
+	bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
+	            bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+#ifdef DHD_WAKE_STATUS
+	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u glomwake %u\n",
+	            bcmsdh_get_total_wake(bus->sdh), bus->rxwake,
+	            bus->rcwake, bus->glomwake);
+#endif
+	bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n",
+	            bus->pollrate, bus->pollcnt, bus->regfails);
+
+	bcm_bprintf(strbuf, "\nAdditional counters:\n");
+#ifdef DHDENABLE_TAILPAD
+	bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n",
+	            bus->tx_tailpad_chain, bus->tx_tailpad_pktget);
+#endif /* DHDENABLE_TAILPAD */
+	bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
+	            bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+	            bus->rxc_errors);
+	bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n",
+	            bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+	bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n",
+	            bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+	bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n",
+	            bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+	bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n",
+	            (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+	            bus->f2txdata, bus->f1regdata);
+	{
+		dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+		             bus->dhd->rx_packets);
+		dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+		             (bus->f2txdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+		bcm_bprintf(strbuf, "\n\n");
+	}
+
+#ifdef SDTEST
+	if (bus->pktgen_count) {
+		bcm_bprintf(strbuf, "pktgen config and count:\n");
+		bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n",
+		            bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+		            bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+		bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n",
+		            bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+	}
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+	bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+	            bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+	bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+	bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+	            bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+	bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+	bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+#ifdef DHDENABLE_TAILPAD
+	bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0;
+#endif /* DHDENABLE_TAILPAD */
+	bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+	bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+	bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+
+	pktgen.version = DHD_PKTGEN_VERSION;
+	pktgen.freq = bus->pktgen_freq;
+	pktgen.count = bus->pktgen_count;
+	pktgen.print = bus->pktgen_print;
+	pktgen.total = bus->pktgen_total;
+	pktgen.minlen = bus->pktgen_minlen;
+	pktgen.maxlen = bus->pktgen_maxlen;
+	pktgen.numsent = bus->pktgen_sent;
+	pktgen.numrcvd = bus->pktgen_rcvd;
+	pktgen.numfail = bus->pktgen_fail;
+	pktgen.mode = bus->pktgen_mode;
+	pktgen.stop = bus->pktgen_stop;
+
+	bcopy(&pktgen, arg, sizeof(pktgen));
+
+	return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+	uint oldcnt, oldmode;
+
+	bcopy(arg, &pktgen, sizeof(pktgen));
+	if (pktgen.version != DHD_PKTGEN_VERSION)
+		return BCME_BADARG;
+
+	oldcnt = bus->pktgen_count;
+	oldmode = bus->pktgen_mode;
+
+	bus->pktgen_freq = pktgen.freq;
+	bus->pktgen_count = pktgen.count;
+	bus->pktgen_print = pktgen.print;
+	bus->pktgen_total = pktgen.total;
+	bus->pktgen_minlen = pktgen.minlen;
+	bus->pktgen_maxlen = pktgen.maxlen;
+	bus->pktgen_mode = pktgen.mode;
+	bus->pktgen_stop = pktgen.stop;
+
+	bus->pktgen_tick = bus->pktgen_ptick = 0;
+	bus->pktgen_prev_time = jiffies;
+	bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+	bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+	/* Clear counts for a new pktgen (mode change, or was stopped) */
+	if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) {
+		bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0;
+		bus->pktgen_prev_rcvd = bus->pktgen_fail = 0;
+	}
+
+	return 0;
+}
+#endif /* SDTEST */
+
+static void
+dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
+{
+	uint8 enable, protect, remap;
+
+	si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+	remap = val ? TRUE : FALSE;
+	si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
+}
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint32 sdaddr;
+	uint dsize;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+	if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+		address -= bus->orig_ramsize;
+		address += SOCDEVRAM_BP_ADDR;
+	}
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	/* Set the backplane window to include the start address */
+	if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+		DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+		goto xfer_done;
+	}
+
+	/* Do the transfer(s) */
+	while (size) {
+		DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+		          __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+		          (address & SBSDIO_SBWINDOW_MASK)));
+		if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+			DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* Adjust for next transfer (if any) */
+		if ((size -= dsize)) {
+			data += dsize;
+			address += dsize;
+			if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+				DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+				break;
+			}
+			sdaddr = 0;
+			dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+
+	}
+
+xfer_done:
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+		DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+			bcmsdh_cur_sbwad(bus->sdh)));
+	}
+
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+	uint32 addr;
+	int rv, i;
+	uint32 shaddr = 0;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	i = 0;
+	do {
+		/* Read last word in memory to determine address of sdpcm_shared structure */
+		if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+			return rv;
+
+		addr = ltoh32(addr);
+
+		DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+		/*
+		 * Check if addr is valid.
+		 * NVRAM length at the end of memory should have been overwritten.
+		 */
+		if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+			if ((bus->srmemsize > 0) && (i++ == 0)) {
+				shaddr -= bus->srmemsize;
+			} else {
+				DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+					__FUNCTION__, addr));
+				return BCME_ERROR;
+			}
+		} else
+			break;
+	} while (i < 2);
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+		return rv;
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+		return BCME_OK;
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+		DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+		           "is different than sdpcm_shared version %d in dongle\n",
+		           __FUNCTION__, SDPCM_SHARED_VERSION,
+		           sh->flags & SDPCM_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX	192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	if (!KSO_ENAB(bus))
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+#ifdef LOG_INTO_TCPDUMP
+			dhd_sendup_log(bus->dhd, line, n);
+#endif /* LOG_INTO_TCPDUMP */
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	char *console_buffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	sdpcm_shared_t sdpcm_shared;
+	struct bcmstrbuf strbuf;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, i, addr;
+	int rv;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON())
+		return 0;
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+	if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (sdpcm_shared.assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (sdpcm_shared.assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+		}
+
+		if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 sdpcm_shared.trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			            "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+			"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(sdpcm_shared.trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+				goto printbuf;
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_size, sizeof(console_size))) < 0)
+				goto printbuf;
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_index, sizeof(console_index))) < 0)
+				goto printbuf;
+
+			console_ptr = ltoh32(console_ptr);
+			console_size = ltoh32(console_size);
+			console_index = ltoh32(console_index);
+
+			if (console_size > CONSOLE_BUFFER_MAX ||
+				!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+				goto printbuf;
+
+			if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+				(uint8 *)console_buffer, console_size)) < 0)
+				goto printbuf;
+
+			for (i = 0, n = 0; i < console_size; i += n + 1) {
+				for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+					ch = console_buffer[(console_index + i + n) % console_size];
+					if (ch == '\n')
+						break;
+					line[n] = ch;
+				}
+
+
+				if (n > 0) {
+					if (line[n - 1] == '\r')
+						n--;
+					line[n] = 0;
+					/* Don't use DHD_ERROR macro since we print
+					 * a lot of information quickly. The macro
+					 * will truncate a lot of the printfs
+					 */
+
+					if (dhd_msg_level & DHD_ERROR_VAL)
+						printf("CONSOLE: %s\n", line);
+				}
+			}
+		}
+	}
+
+printbuf:
+	if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+
+	return bcmerror;
+}
+#endif /* #ifdef DHD_DEBUG */
+
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB		(1  << 24)
+#define CC_CHIPCTRL_JTAG_SEL			(1  << 3)
+#define CC_CHIPCTRL_GPIO_SEL				(0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334	(1  << 28)
+
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+	int int_val;
+	uint32 addr, data, uart_enab = 0;
+	uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+	uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+	*bcmerror = 0;
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+	}
+	else if (bus->sih->chip == BCM4334_CHIP_ID ||
+		bus->sih->chip == BCM43340_CHIP_ID ||
+		bus->sih->chip == BCM43341_CHIP_ID ||
+		bus->sih->chip == BCM43342_CHIP_ID ||
+		0) {
+		if (enable) {
+			/* Moved to PMU chipcontrol 1 from 4330 */
+			int_val &= ~gpio_sel;
+			int_val |= jtag_sel;
+		} else {
+			int_val |= gpio_sel;
+			int_val &= ~jtag_sel;
+		}
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+	}
+
+	if (!set)
+		return (int_val & uart_enab);
+	if (enable)
+		int_val |= uart_enab;
+	else
+		int_val &= ~uart_enab;
+	bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uint32 chipcontrol;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+		chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+		chipcontrol &= ~jtag_sel;
+		if (enable) {
+			chipcontrol |=  jtag_sel;
+			chipcontrol &= ~gpio_sel;
+		}
+		bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+	}
+
+	return (int_val & uart_enab);
+}
+#endif
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+	/* Some ioctls use the bus */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	/*
+	 * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+	 */
+	if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+		dhdsdio_clk_kso_iovar(bus, bool_val);
+		goto exit;
+	} else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+		{
+			dhdsdio_clk_devsleep_iovar(bus, bool_val);
+			if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+				DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+					bus->dpc_sched));
+				if (!bus->dpc_sched) {
+					bus->dpc_sched = TRUE;
+					dhd_sched_dpc(bus->dhd);
+				}
+			}
+		}
+		goto exit;
+	}
+
+	/* Handle sleep stuff before any clock mucking */
+	if (vi->varid == IOV_SLEEP) {
+		if (IOV_ISSET(actionid)) {
+			bcmerror = dhdsdio_bussleep(bus, bool_val);
+		} else {
+			int_val = (int32)bus->sleeping;
+			bcopy(&int_val, arg, sizeof(int_val));
+		}
+		goto exit;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	if (!bus->dhd->dongle_reset) {
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	}
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_INTR):
+		int_val = (int32)bus->intr;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_INTR):
+		bus->intr = bool_val;
+		bus->intdis = FALSE;
+		if (bus->dhd->up) {
+			if (bus->intr) {
+				DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+				bcmsdh_intr_enable(bus->sdh);
+			} else {
+				DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+				bcmsdh_intr_disable(bus->sdh);
+			}
+		}
+		break;
+
+	case IOV_GVAL(IOV_POLLRATE):
+		int_val = (int32)bus->pollrate;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_POLLRATE):
+		bus->pollrate = (uint)int_val;
+		bus->poll = (bus->pollrate != 0);
+		break;
+
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_IDLECLOCK):
+		int_val = (int32)bus->idleclock;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_IDLECLOCK):
+		bus->idleclock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SD1IDLE):
+		int_val = (int32)sd1idle;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_SD1IDLE):
+		sd1idle = bool_val;
+		break;
+
+
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+		          (set ? "write" : "read"), size, address));
+
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/*
+			 * If address is start of RAM (i.e. a downloaded image),
+			 * store the reset instruction to be written in 0
+			 */
+			if (set && address == bus->dongle_ram_base) {
+				bus->resetinstr = *(((uint32*)params) + 2);
+			}
+		} else {
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+		{
+			uint8 enable, protect, remap;
+			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+			if (!enable || protect) {
+				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+					__FUNCTION__, bus->orig_ramsize, size, address));
+				DHD_ERROR(("%s: socram enable %d, protect %d\n",
+					__FUNCTION__, enable, protect));
+				bcmerror = BCME_BADARG;
+				break;
+			}
+
+			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+				uint32 devramsize = si_socdevram_size(bus->sih);
+				if ((address < SOCDEVRAM_ARM_ADDR) ||
+					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+						__FUNCTION__, address, size));
+					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+					bcmerror = BCME_BADARG;
+					break;
+				}
+				/* move it such that address is real now */
+				address -= SOCDEVRAM_ARM_ADDR;
+				address += SOCDEVRAM_BP_ADDR;
+				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+					__FUNCTION__, (set ? "write" : "read"), size, address));
+			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+				/* Can not access remap region while devram remap bit is set
+				 * ROM content would be returned in this case
+				 */
+				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+					__FUNCTION__, address));
+				bcmerror = BCME_ERROR;
+				break;
+			}
+		}
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_SDIOD_DRIVE):
+		int_val = (int32)dhd_sdiod_drive_strength;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_SDIOD_DRIVE):
+		dhd_sdiod_drive_strength = int_val;
+		si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+		break;
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_SOCRAM_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdsdio_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_GVAL(IOV_READAHEAD):
+		int_val = (int32)dhd_readahead;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_READAHEAD):
+		if (bool_val && !dhd_readahead)
+			bus->nextlen = 0;
+		dhd_readahead = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDRXCHAIN):
+		int_val = (int32)bus->use_rxchain;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_SDRXCHAIN):
+		if (bool_val && !bus->sd_rxchain)
+			bcmerror = BCME_UNSUPPORTED;
+		else
+			bus->use_rxchain = bool_val;
+		break;
+	case IOV_GVAL(IOV_ALIGNCTL):
+		int_val = (int32)dhd_alignctl;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_ALIGNCTL):
+		dhd_alignctl = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDALIGN):
+		int_val = DHD_SDALIGN;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_VARS):
+		if (bus->varsz < (uint)len)
+			bcopy(bus->vars, arg, bus->varsz);
+		else
+			bcmerror = BCME_BUFTOOSHORT;
+		break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uint32)((ulong)bus->regs + sd_ptr->offset);
+		size = sd_ptr->func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (uint32)((ulong)bus->regs + sd_ptr->offset);
+		size = sd_ptr->func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	/* Same as above, but offset is not backplane (not SDIO core) */
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	case IOV_GVAL(IOV_SDCIS):
+	{
+		*(char *)arg = 0;
+
+		bcmstrcat(arg, "\nFunc 0\n");
+		bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 1\n");
+		bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 2\n");
+		bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		break;
+	}
+
+	case IOV_GVAL(IOV_FORCEEVEN):
+		int_val = (int32)forcealign;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_FORCEEVEN):
+		forcealign = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TXMINMAX):
+		int_val = (int32)dhd_txminmax;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_TXMINMAX):
+		dhd_txminmax = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_SERIALCONS):
+		int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+		if (bcmerror != 0)
+			break;
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_SERIALCONS):
+		dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+		break;
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+	case IOV_GVAL(IOV_EXTLOOP):
+		int_val = (int32)bus->ext_loop;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_EXTLOOP):
+		bus->ext_loop = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_get(bus, arg);
+		break;
+
+	case IOV_SVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_set(bus, arg);
+		break;
+#endif /* SDTEST */
+
+#if defined(USE_SDIOFIFO_IOVAR)
+	case IOV_GVAL(IOV_WATERMARK):
+		int_val = (int32)watermark;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_WATERMARK):
+		watermark = (uint)int_val;
+		watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark;
+		DHD_ERROR(("Setting watermark as 0x%x.\n", watermark));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL);
+		break;
+
+	case IOV_GVAL(IOV_MESBUSYCTRL):
+		int_val = (int32)mesbusyctrl;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_MESBUSYCTRL):
+		mesbusyctrl = (uint)int_val;
+		mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK)
+			? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl;
+		DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			((uint8)mesbusyctrl | 0x80), NULL);
+		break;
+#endif
+
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_SVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+		           __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+		           bus->dhd->busstate));
+
+		ASSERT(bus->dhd->osh);
+		/* ASSERT(bus->cl_devid); */
+
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+		break;
+	/*
+	 * softap firmware is updated through module parameter or android private command
+	 */
+
+	case IOV_GVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+		/* Get its status */
+		int_val = (bool) bus->dhd->dongle_reset;
+		bcopy(&int_val, arg, sizeof(int_val));
+
+		break;
+
+	case IOV_GVAL(IOV_KSO):
+		int_val = dhdsdio_sleepcsr_get(bus);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_DEVCAP):
+		int_val = dhdsdio_devcap_get(bus);
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_DEVCAP):
+		dhdsdio_devcap_set(bus, (uint8) int_val);
+		break;
+	case IOV_GVAL(IOV_TXGLOMSIZE):
+		int_val = (int32)bus->txglomsize;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_SVAL(IOV_TXGLOMSIZE):
+		if (int_val > SDPCM_MAXGLOM_SIZE) {
+			bcmerror = BCME_ERROR;
+		} else {
+			bus->txglomsize = (uint)int_val;
+		}
+		break;
+	case IOV_SVAL(IOV_HANGREPORT):
+		bus->dhd->hang_report = bool_val;
+		DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
+		break;
+
+	case IOV_GVAL(IOV_HANGREPORT):
+		int_val = (int32)bus->dhd->hang_report;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+
+	case IOV_GVAL(IOV_TXINRX_THRES):
+		int_val = bus->txinrx_thres;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	case IOV_SVAL(IOV_TXINRX_THRES):
+		if (int_val < 0) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->txinrx_thres = int_val;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+			if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+				DHD_ERROR(("PR85623WAR in place\n"));
+				varsize += 4;
+				varaddr -= 4;
+			}
+		}
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+
+		/* Write the vars list */
+		bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray)
+			return BCME_NOMEM;
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		phys_size, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+	uint retries;
+	int bcmerror = 0;
+	int foundcr4 = 0;
+
+	if (!bus->sih)
+		return BCME_ERROR;
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+				foundcr4 = 1;
+			} else {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		}
+
+		if (!foundcr4) {
+			si_core_disable(bus->sih, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n",
+				           __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			/* Disable remap for download */
+			if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, FALSE);
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_SDIO_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+		}
+	} else {
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+			if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, TRUE);
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			/* cr4 has no socram, but tcm's */
+			/* write vars */
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			/* write address 0 with reset instruction */
+			bcmerror = dhdsdio_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to SDIOD core */
+	if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+		si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+		dhd_os_sdlock(bus->dhd);
+
+		BUS_WAKE(bus);
+
+		/* Turn on clock in case SD command needs backplane */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+		/* Check for bus configuration changes of interest */
+
+		/* If it was divisor change, read the new one */
+		if (set && strcmp(name, "sd_divisor") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_divisor = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_divisor));
+			}
+		}
+		/* If it was a mode change, read the new one */
+		if (set && strcmp(name, "sd_mode") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_mode = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_mode));
+			}
+		}
+		/* Similar check for blocksize change */
+		if (set && strcmp(name, "sd_blocksize") == 0) {
+			int32 fnum = 2;
+			if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+			                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+				dhdsdio_tune_fifoparam(bus);
+			}
+		}
+		bus->roundup = MIN(max_roundup, bus->blocksize);
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	osl_t *osh;
+	uint32 local_hostintmask;
+	uint8 saveclk;
+	uint retries;
+	int err;
+	bool wlfc_enabled = FALSE;
+
+	if (!bus->dhd)
+		return;
+
+	osh = bus->dhd->osh;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_waitlockfree(bus->sdh);
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
+		/* if Firmware already hangs disbale any interrupt */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->hostintmask = 0;
+		bcmsdh_intr_disable(bus->sdh);
+	} else {
+
+		BUS_WAKE(bus);
+
+		/* Change our idea of bus state */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+
+		if (KSO_ENAB(bus)) {
+
+		/* Enable clock for device interrupts */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Disable and clear interrupts at the chip level also */
+		W_SDREG(0, &bus->regs->hostintmask, retries);
+		local_hostintmask = bus->hostintmask;
+		bus->hostintmask = 0;
+
+		/* Force clocks on backplane to be sure F2 interrupt propagates */
+		saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (!err) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+			                 (saveclk | SBSDIO_FORCE_HT), &err);
+		}
+		if (err) {
+			DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
+			            __FUNCTION__, err));
+		}
+
+		/* Turn off the bus (F2), free any pending packets */
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+		/* Clear any pending interrupts now that F2 is disabled */
+		W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+		}
+
+		/* Turn off the backplane clock (only) */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+	}
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		PKTFREE(osh, bus->glomd, FALSE);
+
+	if (bus->glom)
+		PKTFREE(osh, bus->glom, FALSE);
+
+	bus->glom = bus->glomd = NULL;
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = FALSE;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	bus->tx_max = 4;
+
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+}
+
+#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
+extern uint sd_txglom;
+#endif
+void
+dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
+{
+	/* can't enable host txglom by default, some platforms have no
+	 * (or crappy) ADMA support and txglom will cause kernel assertions (e.g.
+	 * panda board)
+	 */
+	dhd_bus_t *bus = dhdp->bus;
+#ifdef BCMSDIOH_TXGLOM
+	char buf[256];
+	uint32 rxglom;
+	int32 ret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BCMSDIOH_STD
+	if (enable)
+		enable = sd_txglom;
+#endif /* BCMSDIOH_STD */
+
+	if (enable) {
+		rxglom = 1;
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret >= 0)
+			bus->txglom_enable = TRUE;
+		else {
+#ifdef BCMSDIOH_STD
+			sd_txglom = 0;
+#endif /* BCMSDIOH_STD */
+			bus->txglom_enable = FALSE;
+		}
+	} else
+#endif /* BCMSDIOH_TXGLOM */
+		bus->txglom_enable = FALSE;
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	dhd_timeout_t tmo;
+	uint retries = 0;
+	uint8 ready, enable;
+	int err, ret = 0;
+	uint8 saveclk;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (bus->clkstate != CLK_AVAIL) {
+		DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+		ret = -1;
+		goto exit;
+	}
+
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+		ret = -1;
+		goto exit;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+	        &bus->regs->tosbmailboxdata, retries);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+	/* Give the dongle some time to do its thing and set IOR2 */
+	dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+	ready = 0;
+	while (ready != enable && !dhd_timeout_expired(&tmo))
+	        ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+	DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+	          __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (ready == enable) {
+		/* Make sure we're talking to the core. */
+		if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+			bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+		ASSERT(bus->regs != NULL);
+
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		/* corerev 4 could use the newer interrupt logic to detect the frames */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+			(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+			bus->hostintmask &= ~I_HMB_FRAME_IND;
+			bus->hostintmask |= I_XMTDATA_AVAIL;
+		}
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+		if (bus->sih->buscorerev < 15) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
+				(uint8)watermark, &err);
+		}
+
+		/* Set bus state according to enable result */
+		dhdp->busstate = DHD_BUS_DATA;
+
+		/* bcmsdh_intr_unmask(bus->sdh); */
+
+		bus->intdis = FALSE;
+		if (bus->intr) {
+			DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+			bcmsdh_intr_enable(bus->sdh);
+		} else {
+			DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+			bcmsdh_intr_disable(bus->sdh);
+		}
+
+	}
+
+
+	else {
+		/* Disable F2 again */
+		enable = SDIO_FUNC_ENABLE_1;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+	}
+
+	if (dhdsdio_sr_cap(bus)) {
+		dhdsdio_sr_init(bus);
+		/* Masking the chip active interrupt  permanantly */
+		bus->hostintmask &= ~I_CHIPACTIVE;
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+		DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
+		__FUNCTION__, bus->hostintmask));
+	}
+	else
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+	/* If we didn't come up, turn off backplane clock */
+	if (dhdp->busstate != DHD_BUS_DATA)
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+
+	return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+	uint16 lastrbc;
+	uint8 hi, lo;
+	int err;
+
+	DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+	           (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return;
+	}
+
+	if (abort) {
+		bcmsdh_abort(sdh, SDIO_FUNC_2);
+	}
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+	if (err) {
+		DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+		lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		if (err) {
+			DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__));
+			goto fail;
+		}
+
+		bus->f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+			           __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries) {
+		DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+	} else {
+		DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+	}
+
+	if (rtx) {
+		bus->rxrtx++;
+		W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+		bus->f1regdata++;
+		if (retries <= retry_limit) {
+			bus->rxskip = TRUE;
+		}
+	}
+
+	/* Clear partial in any case */
+	bus->nextlen = 0;
+
+fail:
+	/* If we can't reach the device, signal failure */
+	if (err || bcmsdh_regfail(sdh))
+		bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint rdlen, pad;
+
+	int sdret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Control data already received in aligned rxctl */
+	if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+		goto gotpkt;
+
+	ASSERT(bus->rxbuf);
+	/* Set rxctl for frame (w/optional alignment) */
+	bus->rxctl = bus->rxbuf;
+	if (dhd_alignctl) {
+		bus->rxctl += firstread;
+		if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+			bus->rxctl += (DHD_SDALIGN - pad);
+		bus->rxctl -= firstread;
+	}
+	ASSERT(bus->rxctl >= bus->rxbuf);
+
+	/* Copy the already-read portion over */
+	bcopy(hdr, bus->rxctl, firstread);
+	if (len <= firstread)
+		goto gotpkt;
+
+	/* Copy the full data pkt in gSPI case and process ioctl. */
+	if (bus->bus == SPI_BUS) {
+		bcopy(hdr, bus->rxctl, len);
+		goto gotpkt;
+	}
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - firstread;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->dhd->maxctl))
+			rdlen += pad;
+	} else if (rdlen % DHD_SDALIGN) {
+		rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (rdlen & (ALIGNMENT - 1)))
+		rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + firstread) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+		           __FUNCTION__, rdlen, bus->dhd->maxctl));
+		bus->dhd->rx_errors++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+	if ((len - doff) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+		           __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+		bus->dhd->rx_errors++; bus->rx_toolong++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+
+	/* Read remainder of frame body into the rxctl buffer */
+	sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+	                            (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+	bus->f2rxdata++;
+	ASSERT(sdret != BCME_PENDING);
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+		bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+		dhdsdio_rxfail(bus, TRUE, TRUE);
+		goto done;
+	}
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+	if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+		prhex("RxCtrl", bus->rxctl, len);
+	}
+#endif
+
+	/* Point to valid data and indicate its length */
+	bus->rxctl += doff;
+	bus->rxlen = len - doff;
+
+done:
+	/* Awake any waiters */
+	dhd_os_ioctl_resp_wake(bus->dhd);
+}
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count);
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+	uint16 dlen, totlen;
+	uint8 *dptr, num = 0;
+
+	uint16 sublen, check;
+	void *pfirst, *plast, *pnext;
+	void * list_tail[DHD_MAX_IFS] = { NULL };
+	void * list_head[DHD_MAX_IFS] = { NULL };
+	uint8 idx;
+	osl_t *osh = bus->dhd->osh;
+
+	int errcode;
+	uint8 chan, seq, doff, sfdoff;
+	uint8 txmax;
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+
+	int ifidx = 0;
+	bool usechain = bus->use_rxchain;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		dhd_os_sdlock_rxq(bus->dhd);
+
+		pfirst = plast = pnext = NULL;
+		dlen = (uint16)PKTLEN(osh, bus->glomd);
+		dptr = PKTDATA(osh, bus->glomd);
+		if (!dlen || (dlen & 1)) {
+			DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+			           __FUNCTION__, dlen));
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = ltoh16_ua(dptr);
+			dlen -= sizeof(uint16);
+			dptr += sizeof(uint16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+				           __FUNCTION__, num, sublen));
+				pnext = NULL;
+				break;
+			}
+			if (sublen % DHD_SDALIGN) {
+				DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+				           __FUNCTION__, sublen, DHD_SDALIGN));
+				usechain = FALSE;
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total is a block multiple */
+			if (!dlen) {
+				sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+				totlen = ROUNDUP(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				           __FUNCTION__, num, sublen));
+				break;
+			}
+			ASSERT(!PKTLINK(pnext));
+			if (!pfirst) {
+				ASSERT(!plast);
+				pfirst = plast = pnext;
+			} else {
+				ASSERT(plast);
+				PKTSETNEXT(osh, plast, pnext);
+				plast = pnext;
+			}
+
+			/* Adhere to start alignment requirements */
+			PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+		}
+
+		/* If all allocations succeeded, save packet chain in bus structure */
+		if (pnext) {
+			DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+			          __FUNCTION__, totlen, num));
+			if (DHD_GLOM_ON() && bus->nextlen) {
+				if (totlen != bus->nextlen) {
+					DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+					          "rxseq %d\n", __FUNCTION__, bus->nextlen,
+					          totlen, rxseq));
+				}
+			}
+			bus->glom = pfirst;
+			pfirst = pnext = NULL;
+		} else {
+			if (pfirst)
+				PKTFREE(osh, pfirst, FALSE);
+			bus->glom = NULL;
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		PKTFREE(osh, bus->glomd, FALSE);
+		bus->glomd = NULL;
+		bus->nextlen = 0;
+
+		dhd_os_sdunlock_rxq(bus->dhd);
+	}
+
+	/* Ok -- either we just generated a packet chain, or had one from before */
+	if (bus->glom) {
+		if (DHD_GLOM_ON()) {
+			DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+			for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
+				          pnext, (uint8*)PKTDATA(osh, pnext),
+				          PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+			}
+		}
+
+		pfirst = bus->glom;
+		dlen = (uint16)pkttotlen(osh, pfirst);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		if (usechain) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+			                              dlen, pfirst, NULL, NULL);
+		} else if (bus->dataptr) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, bus->dataptr,
+			                              dlen, NULL, NULL, NULL);
+			sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+			if (sublen != dlen) {
+				DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+				           __FUNCTION__, dlen, sublen));
+				errcode = -1;
+			}
+			pnext = NULL;
+		} else {
+			DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+			errcode = -1;
+		}
+		bus->f2rxdata++;
+		ASSERT(errcode != BCME_PENDING);
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+			           __FUNCTION__, dlen, errcode));
+			bus->dhd->rx_errors++;
+
+			if (bus->glomerr++ < 3) {
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			return 0;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_GLOM_ON()) {
+			prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+			      MIN(PKTLEN(osh, pfirst), 48));
+		}
+#endif
+
+
+		/* Validate the superframe header */
+		dptr = (uint8 *)PKTDATA(osh, pfirst);
+		sublen = ltoh16_ua(dptr);
+		check = ltoh16_ua(dptr + sizeof(uint16));
+
+		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+		bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+		errcode = 0;
+		if ((uint16)~(sublen^check)) {
+			DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, sublen, check));
+			errcode = -1;
+		} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+			DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+			           __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+			errcode = -1;
+		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+			DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+			           SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+			errcode = -1;
+		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+			DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+			errcode = -1;
+		} else if ((doff < SDPCM_HDRLEN) ||
+		           (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+				__FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
+				SDPCM_HDRLEN));
+			errcode = -1;
+		}
+
+		/* Check sequence number of superframe SW header */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+			          __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Remove superframe header, remember offset */
+		PKTPULL(osh, pfirst, doff);
+		sfdoff = doff;
+
+		/* Validate all the subframe headers */
+		for (num = 0, pnext = pfirst; pnext && !errcode;
+		     num++, pnext = PKTNEXT(osh, pnext)) {
+			dptr = (uint8 *)PKTDATA(osh, pnext);
+			dlen = (uint16)PKTLEN(osh, pnext);
+			sublen = ltoh16_ua(dptr);
+			check = ltoh16_ua(dptr + sizeof(uint16));
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				prhex("subframe", dptr, 32);
+			}
+#endif
+
+			if ((uint16)~(sublen^check)) {
+				DHD_ERROR(("%s (subframe %d): HW hdr error: "
+				           "len/check 0x%04x/0x%04x\n",
+				           __FUNCTION__, num, sublen, check));
+				errcode = -1;
+			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+				DHD_ERROR(("%s (subframe %d): length mismatch: "
+				           "len 0x%04x, expect 0x%04x\n",
+				           __FUNCTION__, num, sublen, dlen));
+				errcode = -1;
+			} else if ((chan != SDPCM_DATA_CHANNEL) &&
+			           (chan != SDPCM_EVENT_CHANNEL)) {
+				DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+				           __FUNCTION__, num, chan));
+				errcode = -1;
+			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+				DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+				           __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+				errcode = -1;
+			}
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request a couple retries */
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				PKTPUSH(osh, pfirst, sfdoff);
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			bus->nextlen = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+		bus->glom = NULL;
+		plast = NULL;
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+			pnext = PKTNEXT(osh, pfirst);
+			PKTSETNEXT(osh, pfirst, NULL);
+
+			dptr = (uint8 *)PKTDATA(osh, pfirst);
+			sublen = ltoh16_ua(dptr);
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+			          __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+			          PKTLEN(osh, pfirst), sublen, chan, seq));
+
+			ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+			if (rxseq != seq) {
+				DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Subframe Data", dptr, dlen);
+			}
+#endif
+
+			PKTSETLEN(osh, pfirst, sublen);
+			PKTPULL(osh, pfirst, doff);
+
+			reorder_info_len = sizeof(reorder_info_buf);
+
+			if (PKTLEN(osh, pfirst) == 0) {
+				PKTFREE(bus->dhd->osh, pfirst, FALSE);
+				continue;
+			} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+				&reorder_info_len) != 0) {
+				DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+				bus->dhd->rx_errors++;
+				PKTFREE(osh, pfirst, FALSE);
+				continue;
+			}
+			if (reorder_info_len) {
+				uint32 free_buf_count;
+				void *ppfirst;
+
+				ppfirst = pfirst;
+				/* Reordering info from the firmware */
+				dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+					reorder_info_len, &ppfirst, &free_buf_count);
+
+				if (free_buf_count == 0) {
+					continue;
+				}
+				else {
+					void *temp;
+
+					/*  go to the end of the chain and attach the pnext there */
+					temp = ppfirst;
+					while (PKTNEXT(osh, temp) != NULL) {
+						temp = PKTNEXT(osh, temp);
+					}
+					pfirst = temp;
+					if (list_tail[ifidx] == NULL)
+						list_head[ifidx] = ppfirst;
+					else
+						PKTSETNEXT(osh, list_tail[ifidx], ppfirst);
+					list_tail[ifidx] = pfirst;
+				}
+
+				num += (uint8)free_buf_count;
+			}
+			else {
+				/* this packet will go up, link back into chain and count it */
+
+				if (list_tail[ifidx] == NULL) {
+					list_head[ifidx] = list_tail[ifidx] = pfirst;
+				}
+				else {
+					PKTSETNEXT(osh, list_tail[ifidx], pfirst);
+					list_tail[ifidx] = pfirst;
+				}
+				num++;
+			}
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+				          __FUNCTION__, num, pfirst,
+				          PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+				          PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+				prhex("", (uint8 *)PKTDATA(osh, pfirst),
+				      MIN(PKTLEN(osh, pfirst), 32));
+			}
+#endif /* DHD_DEBUG */
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (list_head[idx]) {
+				void *temp;
+				uint8 cnt = 0;
+				temp = list_head[idx];
+				do {
+					temp = PKTNEXT(osh, temp);
+					cnt++;
+				} while (temp);
+				if (cnt) {
+					dhd_os_sdunlock(bus->dhd);
+					dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0);
+					dhd_os_sdlock(bus->dhd);
+				}
+			}
+		}
+		bus->rxglomframes++;
+		bus->rxglompkts += num;
+	}
+	return num;
+}
+
+
+#ifdef SDHOST3
+static bool
+dhdsdio_pr94636_WAR(dhd_bus_t *bus)
+{
+	uint cd = 0;
+	uint ld = 0;
+	int bcmerror = 0;
+	uint32 l_data[5];
+	uint32 l_addr = (0x18002200 & SBSDIO_SB_OFT_ADDR_MASK);
+
+	/* Read 20 bytes from 0x18002200
+	 * the sdiod Tx DMA registers address on AI Backplane.
+	*/
+	if ((bcmerror = bcmsdh_rwdata(bus->sdh, FALSE, l_addr, (uint8 *)&l_data[0], 20))) {
+		DHD_ERROR(("%s: bcmsdh_rwdata failed\n", __FUNCTION__));
+		return FALSE;
+	}
+	ld = l_data[1];
+	ld = ld & 0x00001fff;
+	cd = l_data[4];
+	cd = cd & 0x00001fff;
+	if (cd == ld)
+		return TRUE;
+	else
+		return FALSE;
+}
+#endif /* SDHOST3 */
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+	osl_t *osh = bus->dhd->osh;
+	bcmsdh_info_t *sdh = bus->sdh;
+
+	uint16 len, check;	/* Extracted hardware header fields */
+	uint8 chan, seq, doff;	/* Extracted software header fields */
+	uint8 fcbits;		/* Extracted fcbits from software header */
+	uint8 delta;
+
+	void *pkt;	/* Packet for event or data frames */
+	uint16 pad;	/* Number of pad bytes to read */
+	uint16 rdlen;	/* Total number of bytes to read */
+	uint8 rxseq;	/* Next sequence number to expect */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int sdret;	/* Return code from bcmsdh calls */
+	uint8 txmax;	/* Maximum tx sequence offered */
+	bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+	uint8 *rxbuf;
+	int ifidx = 0;
+	uint rxcount = 0; /* Total frames read */
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+	uint pkt_count;
+#ifdef DHD_WAKE_STATUS
+	int pkt_wake = bcmsdh_set_get_wake(bus->sdh, 0);
+#endif
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+	bool sdtest = FALSE;	/* To limit message spew from test mode */
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->readframes = TRUE;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+		bus->readframes = FALSE;
+		return 0;
+	}
+
+	ASSERT(maxframes);
+
+#ifdef SDTEST
+	/* Allow pktgen to override maxframes */
+	if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+		maxframes = bus->pktgen_count;
+		sdtest = TRUE;
+	}
+#endif
+
+	/* Not finished unless we encounter no more frames indication */
+	*finished = FALSE;
+
+
+	for (rxseq = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+#ifdef DHD_WAKE_STATUS
+	     rxseq++, rxleft--, pkt_wake=0) {
+#else
+	     rxseq++, rxleft--) {
+#endif
+#ifdef DHDTCPACK_SUP_DBG
+		if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) {
+			if (bus->dotxinrx == FALSE)
+				DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n",
+					__FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode));
+		}
+#ifdef DEBUG_COUNTER
+		else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) {
+			tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++;
+		}
+#endif /* DEBUG_COUNTER */
+#endif /* DHDTCPACK_SUP_DBG */
+		/* tx more to improve rx performance */
+		if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+			dhdsdio_sendpendctl(bus);
+		} else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
+			!bus->fcstate && DATAOK(bus) &&
+			(pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
+			dhdsdio_sendfromq(bus, dhd_txbound);
+#ifdef DHDTCPACK_SUPPRESS
+			/* In TCPACK_SUP_DELAYTX mode, do txinrx only if
+			 * 1. Any DATA packet to TX
+			 * 2. TCPACK to TCPDATA PSH packets.
+			 * in bus txq.
+			 */
+			bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
+				FALSE : TRUE;
+#endif
+		}
+
+		/* Handle glomming separately */
+		if (bus->glom || bus->glomd) {
+			uint8 cnt;
+			DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+			          __FUNCTION__, bus->glomd, bus->glom));
+#ifdef DHD_WAKE_STATUS
+			bus->glomwake += pkt_wake;
+#endif
+			cnt = dhdsdio_rxglom(bus, rxseq);
+			DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+			rxseq += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		/* Try doing single read if we can */
+		if (dhd_readahead && bus->nextlen) {
+			uint16 nextlen = bus->nextlen;
+			bus->nextlen = 0;
+
+			if (bus->bus == SPI_BUS) {
+				rdlen = len = nextlen;
+			}
+			else {
+				rdlen = len = nextlen << 4;
+
+				/* Pad read to blocksize for efficiency */
+				if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+					pad = bus->blocksize - (rdlen % bus->blocksize);
+					if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+						((rdlen + pad + firstread) < MAX_RX_DATASZ))
+						rdlen += pad;
+				} else if (rdlen % DHD_SDALIGN) {
+					rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+				}
+			}
+
+			/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+			 * Later we use buffer-poll for data as well as control packets.
+			 * This is required because dhd receives full frame in gSPI unlike SDIO.
+			 * After the frame is received we have to distinguish whether it is data
+			 * or non-data frame.
+			 */
+			/* Allocate a packet buffer */
+			dhd_os_sdlock_rxq(bus->dhd);
+			if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+				if (bus->bus == SPI_BUS) {
+					bus->usebufpool = FALSE;
+					bus->rxctl = bus->rxbuf;
+					if (dhd_alignctl) {
+						bus->rxctl += firstread;
+						if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+							bus->rxctl += (DHD_SDALIGN - pad);
+						bus->rxctl -= firstread;
+					}
+					ASSERT(bus->rxctl >= bus->rxbuf);
+					rxbuf = bus->rxctl;
+					/* Read the entire frame */
+					sdret = dhd_bcmsdh_recv_buf(bus,
+					                            bcmsdh_cur_sbwad(sdh),
+					                            SDIO_FUNC_2,
+					                            F2SYNC, rxbuf, rdlen,
+					                            NULL, NULL, NULL);
+					bus->f2rxdata++;
+					ASSERT(sdret != BCME_PENDING);
+
+
+					/* Control frame failures need retransmission */
+					if (sdret < 0) {
+						DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+						   __FUNCTION__, rdlen, sdret));
+						/* dhd.rx_ctlerrs is higher level */
+						bus->rxc_errors++;
+						dhd_os_sdunlock_rxq(bus->dhd);
+						dhdsdio_rxfail(bus, TRUE,
+						    (bus->bus == SPI_BUS) ? FALSE : TRUE);
+						continue;
+					}
+				} else {
+					/* Give up on data, request rtx of events */
+					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+					           "expected rxseq %d\n",
+					           __FUNCTION__, len, rdlen, rxseq));
+					/* Just go try again w/normal header read */
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			} else {
+				if (bus->bus == SPI_BUS)
+					bus->usebufpool = TRUE;
+
+				ASSERT(!PKTLINK(pkt));
+				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+				rxbuf = (uint8 *)PKTDATA(osh, pkt);
+				/* Read the entire frame */
+				sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+				                            SDIO_FUNC_2,
+				                            F2SYNC, rxbuf, rdlen,
+				                            pkt, NULL, NULL);
+				bus->f2rxdata++;
+				ASSERT(sdret != BCME_PENDING);
+
+				if (sdret < 0) {
+					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+					   __FUNCTION__, rdlen, sdret));
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+					bus->dhd->rx_errors++;
+					dhd_os_sdunlock_rxq(bus->dhd);
+					/* Force retry w/normal header read.  Don't attempt NAK for
+					 * gSPI
+					 */
+					dhdsdio_rxfail(bus, TRUE,
+					      (bus->bus == SPI_BUS) ? FALSE : TRUE);
+					continue;
+				}
+			}
+			dhd_os_sdunlock_rxq(bus->dhd);
+
+			/* Now check the header */
+			bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+			/* Extract hardware header fields */
+			len = ltoh16_ua(bus->rxhdr);
+			check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+			/* All zeros means readahead info was bad */
+			if (!(len|check)) {
+				DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+				           __FUNCTION__));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate check bytes */
+			if ((uint16)~(len^check)) {
+				DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+				           " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+				           len, check));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rx_badhdr++;
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate frame length */
+			if (len < SDPCM_HDRLEN) {
+				DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+				           __FUNCTION__, len));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Check for consistency with readahead info */
+				len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+			if (len_consistent) {
+				/* Mismatch, force retry w/normal header (may be >4K) */
+				DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+				           "expected rxseq %d\n",
+				           __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+
+			/* Extract software header fields */
+			chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+				bus->nextlen =
+				         bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+				if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+					DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+					          " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+					          seq));
+					bus->nextlen = 0;
+				}
+
+				bus->dhd->rx_readahead_cnt ++;
+			/* Handle Flow Control */
+			fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+			delta = 0;
+			if (~bus->flowcontrol & fcbits) {
+				bus->fc_xoff++;
+				delta = 1;
+			}
+			if (bus->flowcontrol & ~fcbits) {
+				bus->fc_xon++;
+				delta = 1;
+			}
+
+			if (delta) {
+				bus->fc_rcvd++;
+				bus->flowcontrol = fcbits;
+			}
+
+			/* Check and update sequence number */
+			if (rxseq != seq) {
+				DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+			/* Check window for sanity */
+			if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+					DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+						__FUNCTION__, txmax, bus->tx_seq));
+					txmax = bus->tx_max;
+			}
+			bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Data", rxbuf, len);
+			} else if (DHD_HDRS_ON()) {
+				prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+			}
+#endif
+
+			if (chan == SDPCM_CONTROL_CHANNEL) {
+				if (bus->bus == SPI_BUS) {
+					dhdsdio_read_control(bus, rxbuf, len, doff);
+					if (bus->usebufpool) {
+						dhd_os_sdlock_rxq(bus->dhd);
+						PKTFREE(bus->dhd->osh, pkt, FALSE);
+						dhd_os_sdunlock_rxq(bus->dhd);
+					}
+					continue;
+				} else {
+					DHD_ERROR(("%s (nextlen): readahead on control"
+					           " packet %d?\n", __FUNCTION__, seq));
+					/* Force retry w/normal header read */
+					bus->nextlen = 0;
+					dhdsdio_rxfail(bus, FALSE, TRUE);
+					dhd_os_sdlock_rxq(bus->dhd);
+					PKTFREE2();
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			}
+
+			if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+				DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+				           "rx pktbuf's or not yet malloced.\n", len, chan));
+				continue;
+			}
+
+			/* Validate data offset */
+			if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+				DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+				           __FUNCTION__, doff, len, SDPCM_HDRLEN));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				ASSERT(0);
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				continue;
+			}
+
+			/* All done with this one -- now deliver the packet */
+			goto deliver;
+		}
+		/* gSPI frames should not be handled in fractions */
+		if (bus->bus == SPI_BUS) {
+			break;
+		}
+#ifdef SDHOST3
+		if (((((uint16)bus->sih->chip) == BCM4324_CHIP_ID) && (bus->sih->chiprev <= 1)) ||
+			(((uint16)bus->sih->chip) == BCM43340_CHIP_ID) ||
+			(((uint16)bus->sih->chip) == BCM43341_CHIP_ID) ||
+			(((uint16)bus->sih->chip) == BCM4334_CHIP_ID)) {
+			if (dhdsdio_pr94636_WAR(bus) == TRUE) {
+				*finished = TRUE;
+				break;
+			}
+		}
+#endif /* SDHOST3 */
+
+		/* Read frame header (hardware and software) */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            bus->rxhdr, firstread, NULL, NULL, NULL);
+		bus->f2rxhdrs++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+			bus->rx_hdrfail++;
+			dhdsdio_rxfail(bus, TRUE, TRUE);
+			continue;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+			prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Extract hardware header fields */
+		len = ltoh16_ua(bus->rxhdr);
+		check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+		/* All zeros means no more frames */
+		if (!(len|check)) {
+			*finished = TRUE;
+			break;
+		}
+
+		/* Validate check bytes */
+		if ((uint16)~(len^check)) {
+			DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, len, check));
+			bus->rx_badhdr++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Validate frame length */
+		if (len < SDPCM_HDRLEN) {
+			DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+			continue;
+		}
+
+		/* Extract software header fields */
+		chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		/* Validate data offset */
+		if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+			DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+			           __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+			bus->rx_badhdr++;
+			ASSERT(0);
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Save the readahead length if there is one */
+		bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+
+		/* Handle Flow Control */
+		fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		delta = 0;
+		if (~bus->flowcontrol & fcbits) {
+			bus->fc_xoff++;
+			delta = 1;
+		}
+		if (bus->flowcontrol & ~fcbits) {
+			bus->fc_xon++;
+			delta = 1;
+		}
+
+		if (delta) {
+			bus->fc_rcvd++;
+			bus->flowcontrol = fcbits;
+		}
+
+		/* Check and update sequence number */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Call a separate function for control frames */
+		if (chan == SDPCM_CONTROL_CHANNEL) {
+#ifdef DHD_WAKE_STATUS
+			bus->rcwake += pkt_wake;
+#endif
+			dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+			continue;
+		}
+
+		ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+		       (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+		/* Length to read */
+		rdlen = (len > firstread) ? (len - firstread) : 0;
+
+		/* May pad read to blocksize for efficiency */
+		if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+			pad = bus->blocksize - (rdlen % bus->blocksize);
+			if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+			    ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+				rdlen += pad;
+		} else if (rdlen % DHD_SDALIGN) {
+			rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+		}
+
+		/* Satisfy length-alignment requirements */
+		if (forcealign && (rdlen & (ALIGNMENT - 1)))
+			rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+		if ((rdlen + firstread) > MAX_RX_DATASZ) {
+			/* Too long -- skip this frame */
+			DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+			bus->dhd->rx_errors++; bus->rx_toolong++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+			/* Give up on data, request rtx of events */
+			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			           __FUNCTION__, rdlen, chan));
+			bus->dhd->rx_dropped++;
+			dhd_os_sdunlock_rxq(bus->dhd);
+			dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+			continue;
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		ASSERT(!PKTLINK(pkt));
+
+		/* Leave room for what we already read, and align remainder */
+		ASSERT(firstread < (PKTLEN(osh, pkt)));
+		PKTPULL(osh, pkt, firstread);
+		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+		/* Read the remaining frame data */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+		bus->f2rxdata++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+			           ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+			            ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+			continue;
+		}
+
+		/* Copy the already-read portion */
+		PKTPUSH(osh, pkt, firstread);
+		bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			prhex("Rx Data", PKTDATA(osh, pkt), len);
+		}
+#endif
+
+deliver:
+		/* Save superframe descriptor and allocate packet frame */
+		if (chan == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+				DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+				          __FUNCTION__, len));
+#ifdef DHD_DEBUG
+				if (DHD_GLOM_ON()) {
+					prhex("Glom Data", PKTDATA(osh, pkt), len);
+				}
+#endif
+				PKTSETLEN(osh, pkt, len);
+				ASSERT(doff == SDPCM_HDRLEN);
+				PKTPULL(osh, pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+			}
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		PKTSETLEN(osh, pkt, len);
+		PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+		/* Test channel packets are processed separately */
+		if (chan == SDPCM_TEST_CHANNEL) {
+			dhdsdio_testrcv(bus, pkt, seq);
+			continue;
+		}
+#endif /* SDTEST */
+
+		if (PKTLEN(osh, pkt) == 0) {
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			continue;
+		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+			&reorder_info_len) != 0) {
+			DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			continue;
+		}
+		if (reorder_info_len) {
+			/* Reordering info from the firmware */
+			dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+				&pkt, &pkt_count);
+			if (pkt_count == 0)
+				continue;
+		}
+		else
+			pkt_count = 1;
+
+		/* Unlock during rx call */
+#ifdef DHD_WAKE_STATUS
+		bus->rxwake += pkt_wake;
+#endif
+		dhd_os_sdunlock(bus->dhd);
+		dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+		dhd_os_sdlock(bus->dhd);
+	}
+	rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+	/* Message if we hit the limit */
+	if (!rxleft && !sdtest)
+		DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+	else
+#endif /* DHD_DEBUG */
+	DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rxseq--;
+	bus->rx_seq = rxseq;
+
+	if (bus->reqbussleep)
+	{
+	    dhdsdio_bussleep(bus, TRUE);
+		bus->reqbussleep = FALSE;
+	}
+	bus->readframes = FALSE;
+
+	return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus = 0;
+	uint32 hmb_data;
+	uint8 fcbits;
+	uint retries = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Read mailbox data and ack that we did so */
+	R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+	if (retries <= retry_limit)
+		W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+	bus->f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+		if (!bus->rxskip) {
+			DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+		}
+		bus->rxskip = FALSE;
+		intstatus |= FRAME_AVAIL_MASK(bus);
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+			           bus->sdpcm_ver, SDPCM_PROT_VERSION));
+		else
+			DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+		/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		    (bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+			uint32 val;
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+			val &= ~CC_XMTDATAAVAIL_MODE;
+			val |= CC_XMTDATAAVAIL_CTRL;
+			W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+		}
+
+#ifdef DHD_DEBUG
+		/* Retrieve console state address now that firmware should have updated it */
+		{
+			sdpcm_shared_t shared;
+			if (dhdsdio_readshared(bus, &shared) == 0)
+				bus->console_addr = shared.console_addr;
+		}
+#endif /* DHD_DEBUG */
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.  Leave this here for possibly remaining backward
+	 * compatible with older dongles
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->fc_xoff++;
+		if (bus->flowcontrol & ~fcbits)
+			bus->fc_xon++;
+
+		bus->fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+#ifdef DHD_DEBUG
+	/* At least print a message if FW halted */
+	if (hmb_data & HMB_DATA_FWHALT) {
+		DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
+		dhdsdio_checkdied(bus, NULL, 0);
+		bus->dhd->busstate = DHD_BUS_DOWN;
+	}
+#endif /* DHD_DEBUG */
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+	                 HMB_DATA_FWHALT |
+	                 HMB_DATA_NAKHANDLED |
+	                 HMB_DATA_FC |
+	                 HMB_DATA_FWREADY |
+	                 HMB_DATA_FCDATA_MASK |
+	                 HMB_DATA_VERSION_MASK)) {
+		DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+	}
+
+	return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus, newstatus = 0;
+	uint retries = 0;
+	uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+	uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+	uint framecnt = 0;		  /* Temporary counter of tx/rx frames */
+	bool rxdone = TRUE;		  /* Flag for no more read data */
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_sdlock(bus->dhd);
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+		bus->intstatus = 0;
+		dhd_os_sdunlock(bus->dhd);
+		return 0;
+	}
+
+	/* Start with leftover status bits */
+	intstatus = bus->intstatus;
+
+	if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		goto exit;
+	}
+
+	/* If waiting for HTAVAIL, check status */
+	if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+		int err;
+		uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+		/* Check for inconsistent device control */
+		devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		} else {
+			ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+		}
+#endif /* DHD_DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			if (err) {
+				DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			bus->clkstate = CLK_AVAIL;
+		} else {
+			goto clkwait;
+		}
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+	if (bus->clkstate != CLK_AVAIL)
+		goto clkwait;
+
+	/* Pending interrupt indicates new device status */
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata++;
+		if (bcmsdh_regfail(bus->sdh))
+			newstatus = 0;
+		newstatus &= bus->hostintmask;
+		bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+		if (newstatus) {
+			bus->f1regdata++;
+			if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+				(newstatus == I_XMTDATA_AVAIL)) {
+			}
+			else
+				W_SDREG(newstatus, &regs->intstatus, retries);
+		}
+	}
+
+	/* Merge new bits with previous */
+	intstatus |= newstatus;
+	bus->intstatus = 0;
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata += 2;
+		bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Just being here means nothing more to do for chipactive */
+	if (intstatus & I_CHIPACTIVE) {
+		/* ASSERT(bus->clkstate == CLK_AVAIL); */
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus);
+	}
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		DHD_ERROR(("Dongle reports SBINT\n"));
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	if (intstatus & I_HMB_FC_STATE) {
+		DHD_INFO(("Dongle reports HMB_FC_STATE\n"));
+		intstatus &= ~I_HMB_FC_STATE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip) {
+		intstatus &= ~FRAME_AVAIL_MASK(bus);
+	}
+
+	/* On frame indication, read available frames */
+	if (PKT_AVAILABLE(bus, intstatus)) {
+		framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+		if (rxdone || bus->rxskip)
+			intstatus  &= ~FRAME_AVAIL_MASK(bus);
+		rxlimit -= MIN(framecnt, rxlimit);
+	}
+
+	/* Keep still-pending events for next scheduling */
+	bus->intstatus = intstatus;
+
+clkwait:
+	/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+	 * or clock availability.  (Allows tx loop to check ipend if desired.)
+	 * (Unless register access seems hosed, as we may not be able to ACK...)
+	 */
+	if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+		DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+		          __FUNCTION__, rxdone, framecnt));
+		bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+		bcmsdh_intr_enable(sdh);
+	}
+
+#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
+	/* In case of SW-OOB(using edge trigger),
+	 * Check interrupt status in the dongle again after enable irq on the host.
+	 * and rechedule dpc if interrupt is pended in the dongle.
+	 * There is a chance to miss OOB interrupt while irq is disabled on the host.
+	 * No need to do this with HW-OOB(level trigger)
+	 */
+	R_SDREG(newstatus, &regs->intstatus, retries);
+	if (bcmsdh_regfail(bus->sdh))
+		newstatus = 0;
+	if (newstatus & bus->hostintmask) {
+		bus->ipend = TRUE;
+		resched = TRUE;
+	}
+#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
+#endif
+
+	if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
+		dhdsdio_sendpendctl(bus);
+
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+	    pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+		framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+		framecnt = dhdsdio_sendfromq(bus, framecnt);
+		txlimit -= framecnt;
+	}
+	/* Resched the DPC if ctrl cmd is pending on bus credit */
+	if (bus->ctrl_frame_stat)
+		resched = TRUE;
+
+	/* Resched if events or tx frames are pending, else await next interrupt */
+	/* On failed register access, all bets are off: no resched or interrupts */
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+		if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+			SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+			/* Bus failed because of KSO */
+			DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+			bus->kso = FALSE;
+		} else {
+			DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+				__FUNCTION__));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			bus->intstatus = 0;
+		}
+	} else if (bus->clkstate == CLK_PENDING) {
+		/* Awaiting I_CHIPACTIVE; don't resched */
+	} else if (bus->intstatus || bus->ipend ||
+	           (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+			PKT_AVAILABLE(bus, bus->intstatus)) {  /* Read multiple frames */
+		resched = TRUE;
+	}
+
+	bus->dpc_sched = resched;
+
+	/* If we're done for now, turn off clock request. */
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+exit:
+
+	if (!resched && dhd_dpcpoll) {
+		if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0)
+			resched = TRUE;
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+	return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched;
+
+	/* Call the DPC directly. */
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	resched = dhdsdio_dpc(bus);
+
+	return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus) {
+		DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+		return;
+	}
+	sdh = bus->sdh;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Count the interrupt call */
+	bus->intrcount++;
+	bus->ipend = TRUE;
+
+	/* Shouldn't get this interrupt if we're sleeping? */
+	if (!SLPAUTO_ENAB(bus)) {
+		if (bus->sleeping) {
+			DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+			return;
+		} else if (!KSO_ENAB(bus)) {
+			DHD_ERROR(("ISR in devsleep 1\n"));
+		}
+	}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (bus->intr) {
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	} else {
+		DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+	}
+
+	bcmsdh_intr_disable(sdh);
+	bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	dhdsdio_dpc(bus);
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+
+	bus->dpc_sched = TRUE;
+	dhd_sched_dpc(bus->dhd);
+
+#endif /* defined(SDIO_ISR_THREAD) */
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+	/* Default to specified length, or full range */
+	if (dhd_pktgen_len) {
+		bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+		bus->pktgen_minlen = bus->pktgen_maxlen;
+	} else {
+		bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+		bus->pktgen_minlen = 0;
+	}
+	bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+	/* Default to per-watchdog burst with 10s print time */
+	bus->pktgen_freq = 1;
+	bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0;
+	bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+	/* Default to echo mode */
+	bus->pktgen_mode = DHD_PKTGEN_ECHO;
+	bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+	void *pkt;
+	uint8 *data;
+	uint pktcount;
+	uint fillbyte;
+	osl_t *osh = bus->dhd->osh;
+	uint16 len;
+	ulong time_lapse;
+	uint sent_pkts;
+	uint rcvd_pkts;
+
+	/* Display current count if appropriate */
+	if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+		bus->pktgen_ptick = 0;
+		printf("%s: send attempts %d, rcvd %d, errors %d\n",
+		       __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+
+		/* Print throughput stats only for constant length packet runs */
+		if (bus->pktgen_minlen == bus->pktgen_maxlen) {
+			time_lapse = jiffies - bus->pktgen_prev_time;
+			bus->pktgen_prev_time = jiffies;
+			sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent;
+			bus->pktgen_prev_sent = bus->pktgen_sent;
+			rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd;
+			bus->pktgen_prev_rcvd = bus->pktgen_rcvd;
+
+			printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n",
+			  __FUNCTION__,
+			  (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8,
+			  (rcvd_pkts * bus->pktgen_len  / jiffies_to_msecs(time_lapse)) * 8);
+		}
+	}
+
+	/* For recv mode, just make sure dongle has started sending */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+			dhdsdio_sdtest_set(bus, bus->pktgen_total);
+		}
+		return;
+	}
+
+	/* Otherwise, generate or request the specified number of packets */
+	for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+		/* Stop if total has been reached */
+		if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			break;
+		}
+
+		/* Allocate an appropriate-sized packet */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			len = SDPCM_TEST_PKT_CNT_FLD_LEN;
+		} else {
+			len = bus->pktgen_len;
+		}
+		if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+		                   TRUE))) {;
+			DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+			break;
+		}
+		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+		data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+		/* Write test header cmd and extra based on mode */
+		switch (bus->pktgen_mode) {
+		case DHD_PKTGEN_ECHO:
+			*data++ = SDPCM_TEST_ECHOREQ;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_SEND:
+			*data++ = SDPCM_TEST_DISCARD;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_RXBURST:
+			*data++ = SDPCM_TEST_BURST;
+			*data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */
+			break;
+
+		default:
+			DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+			PKTFREE(osh, pkt, TRUE);
+			bus->pktgen_count = 0;
+			return;
+		}
+
+		/* Write test header length field */
+		*data++ = (bus->pktgen_len >> 0);
+		*data++ = (bus->pktgen_len >> 8);
+
+		/* Write frame count in a 4 byte field adjucent to SDPCM test header for
+		 * burst mode
+		 */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			*data++ = (uint8)(bus->pktgen_count >> 0);
+			*data++ = (uint8)(bus->pktgen_count >> 8);
+			*data++ = (uint8)(bus->pktgen_count >> 16);
+			*data++ = (uint8)(bus->pktgen_count >> 24);
+		} else {
+
+			/* Then fill in the remainder -- N/A for burst */
+			for (fillbyte = 0; fillbyte < len; fillbyte++)
+				*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+			prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Send it */
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
+			bus->pktgen_fail++;
+			if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+				bus->pktgen_count = 0;
+		}
+		bus->pktgen_sent++;
+
+		/* Bump length if not fixed, wrap at max */
+		if (++bus->pktgen_len > bus->pktgen_maxlen)
+			bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+		/* Special case for burst mode: just send one request! */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+			break;
+	}
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint count)
+{
+	void *pkt;
+	uint8 *data;
+	osl_t *osh = bus->dhd->osh;
+
+	/* Allocate the packet */
+	if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) {
+		DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+		return;
+	}
+	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN);
+	data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+	/* Fill in the test header */
+	*data++ = SDPCM_TEST_SEND;
+	*data++ = (count > 0)?TRUE:FALSE;
+	*data++ = (bus->pktgen_maxlen >> 0);
+	*data++ = (bus->pktgen_maxlen >> 8);
+	*data++ = (uint8)(count >> 0);
+	*data++ = (uint8)(count >> 8);
+	*data++ = (uint8)(count >> 16);
+	*data++ = (uint8)(count >> 24);
+
+	/* Send it */
+	if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK)
+		bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint8 *data;
+	uint pktlen;
+
+	uint8 cmd;
+	uint8 extra;
+	uint16 len;
+	uint16 offset;
+
+	/* Check for min length */
+	if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+		PKTFREE(osh, pkt, FALSE);
+		return;
+	}
+
+	/* Extract header fields */
+	data = PKTDATA(osh, pkt);
+	cmd = *data++;
+	extra = *data++;
+	len = *data++; len += *data++ << 8;
+	DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+	/* Check length for relevant commands */
+	if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+		if (pktlen != len + SDPCM_TEST_HDRLEN) {
+			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+			           " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+			PKTFREE(osh, pkt, FALSE);
+			return;
+		}
+	}
+
+	/* Process as per command */
+	switch (cmd) {
+	case SDPCM_TEST_ECHOREQ:
+		/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+		*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) {
+			bus->pktgen_sent++;
+		} else {
+			bus->pktgen_fail++;
+			PKTFREE(osh, pkt, FALSE);
+		}
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_ECHORSP:
+		if (bus->ext_loop) {
+			PKTFREE(osh, pkt, FALSE);
+			bus->pktgen_rcvd++;
+			break;
+		}
+
+		for (offset = 0; offset < len; offset++, data++) {
+			if (*data != SDPCM_TEST_FILL(offset, extra)) {
+				DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+				           "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+				           offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+				break;
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_DISCARD:
+		{
+			int i = 0;
+			uint8 *prn = data;
+			uint8 testval = extra;
+			for (i = 0; i < len; i++) {
+				if (*prn != testval) {
+					DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+						i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+					prn++; testval++;
+				}
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_BURST:
+	case SDPCM_TEST_SEND:
+	default:
+		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+		          " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+		PKTFREE(osh, pkt, FALSE);
+		break;
+	}
+
+	/* For recv mode, stop at limit (and tell dongle to stop sending) */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcvd_rcvsession++;
+
+			if (bus->pktgen_total &&
+				(bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			DHD_ERROR(("Pktgen:rcv test complete!\n"));
+			bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+			dhdsdio_sdtest_set(bus, FALSE);
+				bus->pktgen_rcvd_rcvsession = 0;
+			}
+		}
+	}
+}
+#endif /* SDTEST */
+
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+	int err = 0;
+
+#if defined(OOB_INTR_ONLY)
+	err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
+#endif
+	return err;
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
+#endif
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
+#endif
+}
+
+void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh);
+}
+
+void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_relax(dhdpub->bus->sdh);
+}
+
+bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub)
+{
+	bool enabled = FALSE;
+
+	enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh);
+	return enabled;
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus;
+
+	DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+	bus = dhdp->bus;
+
+	if (bus->dhd->dongle_reset)
+		return FALSE;
+
+	if (bus->dhd->hang_was_sent) {
+		dhd_os_wd_timer(bus->dhd, 0);
+		return FALSE;
+	}
+
+	/* Ignore the timer if simulating bus down */
+	if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+		return FALSE;
+
+	if (dhdp->busstate == DHD_BUS_DOWN)
+		return FALSE;
+
+	dhd_os_sdlock(bus->dhd);
+
+	/* Poll period: check device if appropriate. */
+	if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+		uint32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+			if (!bus->dpc_sched) {
+				uint8 devpend;
+				devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+				                          SDIOD_CCCR_INTPEND, NULL);
+				intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and schedule the DPC */
+			if (intstatus) {
+				bus->pollcnt++;
+				bus->ipend = TRUE;
+				if (bus->intr) {
+					bcmsdh_intr_disable(bus->sdh);
+				}
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->lastintrs = bus->intrcount;
+	}
+
+#ifdef DHD_DEBUG
+	/* Poll for console output periodically */
+	if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (SLPAUTO_ENAB(bus))
+				dhdsdio_bussleep(bus, FALSE);
+			else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+			if (dhdsdio_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+	/* Generate packets if configured */
+	if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+		/* Make sure backplane clock is on */
+		if (SLPAUTO_ENAB(bus))
+			dhdsdio_bussleep(bus, FALSE);
+		else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		bus->pktgen_tick = 0;
+		dhdsdio_pktgen(bus);
+	}
+#endif
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+#ifdef DHD_USE_IDLECOUNT
+	if (bus->activity)
+		bus->activity = FALSE;
+	else {
+		bus->idlecount++;
+
+		if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+			DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
+			if (SLPAUTO_ENAB(bus)) {
+				if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
+					dhd_os_wd_timer(bus->dhd, 0);
+			} else
+				dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+			bus->idlecount = 0;
+		}
+	}
+#else
+	if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+		if (++bus->idlecount >= bus->idletime) {
+			bus->idlecount = 0;
+			if (bus->activity) {
+				bus->activity = FALSE;
+				if (SLPAUTO_ENAB(bus)) {
+					if (!bus->readframes)
+						dhdsdio_bussleep(bus, TRUE);
+					else
+						bus->reqbussleep = TRUE;
+				}
+				else
+					dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+			}
+		}
+	}
+#endif /* DHD_USE_IDLECOUNT */
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	uint32 addr, val;
+	int rv;
+	void *pkt;
+
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Exclusive bus access */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	BUS_WAKE(bus);
+	/* No pend allowed since txpkt is called later, ht clk has to be on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Bump dongle by sending an empty packet on the event channel.
+	 * sdpcm_sendup (RX) checks for virtual console input.
+	 */
+	if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+		rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+	uint byte, tag, tdata;
+	DHD_INFO(("Function %d CIS:\n", fn));
+
+	for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+		if ((byte % 16) == 0)
+			DHD_INFO(("    "));
+		DHD_INFO(("%02x ", cis[byte]));
+		if ((byte % 16) == 15)
+			DHD_INFO(("\n"));
+		if (!tdata--) {
+			tag = cis[byte];
+			if (tag == 0xff)
+				break;
+			else if (!tag)
+				tdata = 0;
+			else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+				tdata = cis[byte + 1] + 1;
+			else
+				DHD_INFO(("]"));
+		}
+	}
+	if ((byte % 16) != 15)
+		DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+	if (chipid == BCM4325_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4329_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4315_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4319_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4336_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4330_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43237_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43362_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4314_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43242_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43340_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43341_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43143_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43342_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4334_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43239_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4324_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4335_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4339_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43349_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4345_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4350_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4354_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4356_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4358_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43430_CHIP_ID)
+		return TRUE;
+	if (BCM4349_CHIP(chipid))
+		return TRUE;
+	return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+	uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+	int ret;
+	dhd_bus_t *bus;
+
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_txbound = DHD_TXBOUND;
+	dhd_rxbound = DHD_RXBOUND;
+	dhd_alignctl = TRUE;
+	sd1idle = TRUE;
+	dhd_readahead = TRUE;
+	retrydata = FALSE;
+	dhd_doflow = FALSE;
+	dhd_dongle_ramsize = 0;
+	dhd_txminmax = DHD_TXMINMAX;
+
+	forcealign = TRUE;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+	/* We make assumptions about address window mappings */
+	ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+	/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+	 * means early parse could fail, so here we should get either an ID
+	 * we recognize OR (-1) indicating we must request power first.
+	 */
+	/* Check the Vendor ID */
+	switch (venid) {
+		case 0x0000:
+		case VENDOR_BROADCOM:
+			break;
+		default:
+			DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+			           __FUNCTION__, venid));
+			goto forcereturn;
+	}
+
+	/* Check the Device ID and make sure it's one that we support */
+	switch (devid) {
+		case BCM4325_D11DUAL_ID:		/* 4325 802.11a/g id */
+		case BCM4325_D11G_ID:			/* 4325 802.11g 2.4Ghz band id */
+		case BCM4325_D11A_ID:			/* 4325 802.11a 5Ghz band id */
+			DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4329_D11N_ID:		/* 4329 802.11n dualband device */
+		case BCM4329_D11N2G_ID:		/* 4329 802.11n 2.4G device */
+		case BCM4329_D11N5G_ID:		/* 4329 802.11n 5G device */
+		case 0x4329:
+			DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4315_D11DUAL_ID:		/* 4315 802.11a/g id */
+		case BCM4315_D11G_ID:			/* 4315 802.11g id */
+		case BCM4315_D11A_ID:			/* 4315 802.11a id */
+			DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4319_D11N_ID:			/* 4319 802.11n id */
+		case BCM4319_D11N2G_ID:			/* 4319 802.11n2g id */
+		case BCM4319_D11N5G_ID:			/* 4319 802.11n5g id */
+			DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+			break;
+		case 0:
+			DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+			          __FUNCTION__));
+			break;
+
+		default:
+			DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+			           __FUNCTION__, venid, devid));
+			goto forcereturn;
+	}
+
+	if (osh == NULL) {
+		DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
+		goto forcereturn;
+	}
+
+	/* Allocate private bus interface state */
+	if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+		DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+		goto fail;
+	}
+	bzero(bus, sizeof(dhd_bus_t));
+	bus->sdh = sdh;
+	bus->cl_devid = (uint16)devid;
+	bus->bus = DHD_BUS;
+	bus->bus_num = bus_no;
+	bus->slot_num = slot;
+	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+	bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+	/* attempt to attach to the dongle */
+	if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+		DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Attach to the dhd/OS/network interface */
+	if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+		DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Allocate buffers */
+	if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (bus->intr) {
+		/* Register interrupt callback, but mask it (not operational yet). */
+		DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+		bcmsdh_intr_disable(sdh);
+		if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+			DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+			           __FUNCTION__, ret));
+			goto fail;
+		}
+		DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+	} else {
+		DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+		           __FUNCTION__));
+	}
+
+	DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+	/* if firmware path present try to download and bring up bus */
+	bus->dhd->hang_report  = TRUE;
+	if (dhd_download_fw_on_driverload) {
+		if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+			DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+				goto fail;
+		}
+	}
+	/* Ok, have the per-port tell the stack we're open for business */
+	if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
+		DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+
+	init_waitqueue_head(&bus->bus_sleep);
+
+	return bus;
+
+fail:
+	dhdsdio_release(bus, osh);
+
+forcereturn:
+
+	return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+                     uint16 devid)
+{
+	int err = 0;
+	uint8 clkctl = 0;
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+		DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+	}
+
+#if defined(DHD_DEBUG)
+	DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+		bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+#endif
+
+
+	/* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+	if (!err)
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+		DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+		           err, DHD_INIT_CLKCTL1, clkctl));
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	if (DHD_INFO_ON()) {
+		uint fn, numfn;
+		uint8 *cis[SDIOD_MAX_IOFUNCS];
+		int err = 0;
+
+		numfn = bcmsdh_query_iofnum(sdh);
+		ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+		/* Make sure ALP is available before trying to read CIS */
+		SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+		                                    SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+		          !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+		/* Now request ALP be put on the bus */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 DHD_INIT_CLKCTL2, &err);
+		OSL_DELAY(65);
+
+		for (fn = 0; fn <= numfn; fn++) {
+			if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+				break;
+			}
+			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+			if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+				MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+				break;
+			}
+			dhd_dump_cis(fn, cis[fn]);
+		}
+
+		while (fn-- > 0) {
+			ASSERT(cis[fn]);
+			MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+		}
+
+		if (err) {
+			DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+			goto fail;
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
+		bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
+#endif /* DHD_DEBUG */
+
+
+	bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+	if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+		DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+		           __FUNCTION__, bus->sih->chip));
+		goto fail;
+	}
+
+	if (bus->sih->buscorerev >= 12)
+		dhdsdio_clk_kso_init(bus);
+	else
+		bus->kso = TRUE;
+
+	if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+	}
+
+	si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+	/* Get info on the ARM and SOCRAM cores... */
+	if (!DHD_NOPMU(bus)) {
+		if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+			bus->armrev = si_corerev(bus->sih);
+		} else {
+			DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+		} else {
+			/* cr4 has a different way to find the RAM size from TCM's */
+			if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+			/* also populate base address */
+			switch ((uint16)bus->sih->chip) {
+			case BCM4335_CHIP_ID:
+			case BCM4339_CHIP_ID:
+			case BCM43349_CHIP_ID:
+				bus->dongle_ram_base = CR4_4335_RAM_BASE;
+				break;
+			case BCM4350_CHIP_ID:
+			case BCM4354_CHIP_ID:
+			case BCM4356_CHIP_ID:
+			case BCM4358_CHIP_ID:
+				bus->dongle_ram_base = CR4_4350_RAM_BASE;
+				break;
+			case BCM4360_CHIP_ID:
+				bus->dongle_ram_base = CR4_4360_RAM_BASE;
+				break;
+			case BCM4345_CHIP_ID:
+				bus->dongle_ram_base = CR4_4345_RAM_BASE;
+				break;
+			case BCM4349_CHIP_GRPID:
+				bus->dongle_ram_base = CR4_4349_RAM_BASE;
+				break;
+			default:
+				bus->dongle_ram_base = 0;
+				DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+				           __FUNCTION__, bus->dongle_ram_base));
+			}
+		}
+		bus->ramsize = bus->orig_ramsize;
+		if (dhd_dongle_ramsize)
+			dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
+
+		DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+		           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+		bus->srmemsize = si_socram_srmem_size(bus->sih);
+	}
+
+	/* ...but normally deal with the SDPCMDEV core */
+	if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+	    !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+		DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->sdpcmrev = si_corerev(bus->sih);
+
+	/* Set core control so an SDIO reset does a backplane reset */
+	OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+	bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+	if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		(bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1))
+	{
+		uint32 val;
+
+		val = R_REG(osh, &bus->regs->corecontrol);
+		val &= ~CC_XMTDATAAVAIL_MODE;
+		val |= CC_XMTDATAAVAIL_CTRL;
+		W_REG(osh, &bus->regs->corecontrol, val);
+	}
+
+
+	pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	/* Setting default Glom size */
+	bus->txglomsize = SDPCM_DEFGLOM_SIZE;
+
+	return TRUE;
+
+fail:
+	if (bus->sih != NULL) {
+		si_detach(bus->sih);
+		bus->sih = NULL;
+	}
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen)
+			DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+
+	/* Align the buffer */
+	if ((uintptr)bus->databuf % DHD_SDALIGN)
+		bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+	else
+		bus->dataptr = bus->databuf;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	int32 fnum;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->_srenab = FALSE;
+
+#ifdef SDTEST
+	dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	bus->sleeping = FALSE;
+	bus->rxflow = FALSE;
+	bus->prev_rxlim_hit = 0;
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = (int32)dhd_idletime;
+	bus->idleclock = DHD_IDLE_ACTIVE;
+
+	/* Query the SD clock speed */
+	if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+	                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+		bus->sd_divisor = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_divisor", bus->sd_divisor));
+	}
+
+	/* Query the SD bus mode */
+	if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+	                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+		bus->sd_mode = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_mode", bus->sd_mode));
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	fnum = 2;
+	if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+	                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+		bus->blocksize = 0;
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+		dhdsdio_tune_fifoparam(bus);
+	}
+	bus->roundup = MIN(max_roundup, bus->blocksize);
+
+#ifdef DHDENABLE_TAILPAD
+	if (bus->pad_pkt)
+		PKTFREE(osh, bus->pad_pkt, FALSE);
+	bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE);
+	if (bus->pad_pkt == NULL)
+		DHD_ERROR(("failed to allocate padding packet\n"));
+	else {
+		int alignment_offset = 0;
+		uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt);
+		if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN)))
+			PKTPUSH(osh, bus->pad_pkt, alignment_offset);
+		PKTSETNEXT(osh, bus->pad_pkt, NULL);
+	}
+#endif /* DHDENABLE_TAILPAD */
+
+	/* Query if bus module supports packet chaining, default to use if supported */
+	if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+	                    &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+		bus->sd_rxchain = FALSE;
+	} else {
+		DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+		          __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+	}
+	bus->use_rxchain = (bool)bus->sd_rxchain;
+	bus->txinrx_thres = CUSTOM_TXINRX_THRES;
+	/* TX first in dhdsdio_readframes() */
+	bus->dotxinrx = TRUE;
+
+	return TRUE;
+}
+
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+                          char *pfw_path, char *pnv_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+
+	ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+	return ret;
+}
+
+static int
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	int ret;
+
+
+	DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+		__FUNCTION__, bus->fw_path, bus->nv_path));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	/* Download the firmware */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	ret = _dhdsdio_download_firmware(bus);
+
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+	bool dongle_isolation = FALSE;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhd_detach(bus->dhd);
+		}
+
+		/* De-register interrupt handler */
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_intr_dereg(bus->sdh);
+
+		if (bus->dhd) {
+			dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+#ifdef DHDENABLE_TAILPAD
+		if (bus->pad_pkt)
+			PKTFREE(osh, bus->pad_pkt, FALSE);
+#endif /* DHDENABLE_TAILPAD */
+
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+		bus->rxctl = bus->rxbuf = NULL;
+		bus->rxlen = 0;
+	}
+
+	if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+		bus->databuf = NULL;
+	}
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+		return;
+
+	if (bus->sih) {
+#if !defined(BCMLXSDMMC)
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		}
+		if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+			si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+		si_detach(bus->sih);
+		bus->sih = NULL;
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+
+	if (bus) {
+		ASSERT(bus->dhd);
+		dhdsdio_release(bus, bus->dhd->osh);
+	}
+
+
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static int
+dhdsdio_suspend(void *context)
+{
+	int ret = 0;
+
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+	int wait_time = 0;
+	if (bus->idletime > 0) {
+		wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
+	}
+
+	ret = dhd_os_check_wakelock(bus->dhd);
+	if ((!ret) && (bus->dhd->up)) {
+		if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
+			if (!bus->sleeping) {
+				return 1;
+			}
+		}
+	}
+	return ret;
+}
+
+static int
+dhdsdio_resume(void *context)
+{
+#if defined(OOB_INTR_ONLY)
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+
+	if (dhd_os_check_if_up(bus->dhd))
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif
+	return 0;
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+	dhdsdio_probe,
+	dhdsdio_disconnect,
+	dhdsdio_suspend,
+	dhdsdio_resume
+};
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+	return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+	bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *ularray = NULL;
+
+	DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+	/* Download image */
+	while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)dlarray));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), MEMBLOCK);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+	if (offset < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+			goto err;
+		}
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+		/* Upload image to verify downloaded contents. */
+		offset = 0;
+		memset(ularray, 0xaa, bus->ramsize);
+		while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+
+			offset += MEMBLOCK;
+		}
+
+		if (offset < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+				ularray + offset, sizeof(dlarray) - offset);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+				goto err;
+			}
+		}
+
+		if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+			goto err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+	}
+#endif /* DHD_DEBUG */
+
+err:
+	if (ularray)
+		MFREE(bus->dhd->osh, ularray, bus->ramsize);
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL)
+		goto err;
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+/*
+	EXAMPLE: nvram_array
+	nvram_arry format:
+	name=value
+	Use carriage return at the end of each assignment, and an empty string with
+	carriage return at the end of array.
+
+	For example:
+	unsigned char  nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+	Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+	Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+	bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+	}
+	else {
+		len = strlen(bus->nvram_params);
+		ASSERT(len <= MAX_NVRAMBUF_SIZE);
+		memcpy(memblock, bus->nvram_params, len);
+	}
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+		len = process_nvram_vars(bufp, len);
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_SDIO_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdsdio_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdsdio_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+	/* External nvram takes precedence if specified */
+	if (dhdsdio_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdsdio_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	int status;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+	return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry)
+{
+	int ret;
+	int i = 0;
+	int retries = 0;
+	bcmsdh_info_t *sdh;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	sdh = bus->sdh;
+	do {
+		ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+			pkt, complete, handle);
+
+		bus->f2txdata++;
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret == BCME_NODEVICE) {
+			DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+		} else if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+				__FUNCTION__, ret));
+			bus->tx_sderrs++;
+			bus->f1regdata++;
+			bus->dhd->tx_errors++;
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+				SFC_WF_TERM, NULL);
+			for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+					NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+					NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+		}
+	} while ((ret < 0) && retrydata && ++retries < max_retry);
+
+	return ret;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+	return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+}
+
+void
+dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val)
+{
+	bus->dotxinrx = val;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+
+	if (flag == TRUE) {
+		if (!bus->dhd->dongle_reset) {
+			dhd_os_sdlock(dhdp);
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			/* Expect app to have torn down any connection before calling */
+			/* Stop the bus, disable F2 */
+			dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+			/* Clean up any pending IRQ */
+			dhd_enable_oob_intr(bus, FALSE);
+			bcmsdh_oob_intr_set(bus->sdh, FALSE);
+			bcmsdh_oob_intr_unregister(bus->sdh);
+#endif
+
+			/* Clean tx/rx buffer pointers, detach from the dongle */
+			dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+			bus->dhd->dongle_reset = TRUE;
+			bus->dhd->up = FALSE;
+			dhd_txglom_enable(dhdp, FALSE);
+			dhd_os_sdunlock(dhdp);
+
+			DHD_TRACE(("%s:  WLAN OFF DONE\n", __FUNCTION__));
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_SDIO_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+
+		DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+		if (bus->dhd->dongle_reset) {
+			/* Turn on WLAN */
+			dhd_os_sdlock(dhdp);
+			/* Reset SD client */
+			bcmsdh_reset(bus->sdh);
+
+			/* Attempt to re-attach & download */
+			if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+				(uint32 *)SI_ENUM_BASE,
+				bus->cl_devid)) {
+				/* Attempt to download binary to the dongle */
+				if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+				    dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
+
+					/* Re-init bus, enable F2 transfer */
+					bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+					if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+						dhd_enable_oob_intr(bus, TRUE);
+						bcmsdh_oob_intr_register(bus->sdh,
+							dhdsdio_isr, bus);
+						bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif
+
+						bus->dhd->dongle_reset = FALSE;
+						bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+						/* Restore flow control  */
+						dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif
+						dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+						DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+					} else {
+						dhd_bus_stop(bus, FALSE);
+						dhdsdio_release_dongle(bus, bus->dhd->osh,
+							TRUE, FALSE);
+					}
+				} else
+					bcmerror = BCME_SDIO_ERROR;
+			} else
+				bcmerror = BCME_SDIO_ERROR;
+
+				dhd_os_sdunlock(dhdp);
+		} else {
+			bcmerror = BCME_SDIO_ERROR;
+			DHD_INFO(("%s called when dongle is not in reset\n",
+				__FUNCTION__));
+			DHD_INFO(("Will call dhd_bus_start instead\n"));
+			dhd_bus_resume(dhdp, 1);
+			if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+				DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+					__FUNCTION__, bcmerror));
+		}
+	}
+	return bcmerror;
+}
+
+int dhd_bus_suspend(dhd_pub_t *dhdpub)
+{
+	return bcmsdh_stop(dhdpub->bus->sdh);
+}
+
+int dhd_bus_resume(dhd_pub_t *dhdpub, int stage)
+{
+	return bcmsdh_start(dhdpub->bus->sdh, stage);
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return  bus->sih->chip;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chiprev;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chippkg;
+}
+
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+	*bus_type = bus->bus;
+	*bus_num = bus->bus_num;
+	*slot_num = bus->slot_num;
+	return 0;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+	return dhdsdio_membytes(bus, set, address, data, size);
+}
+
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path)
+{
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+}
+
+int
+dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
+{
+	dhd_bus_t *bus = dhd->bus;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	if (sleep) {
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit) {
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+			return BCME_BUSY;
+		}
+		/* Turn off our contribution to the HT clock request */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	} else {
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+		/* Make sure we have SD bus access */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+	return BCME_OK;
+}
+
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	bool wlfc_enabled = FALSE;
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0);
+	}
+}
+
+#ifdef BCMSDIO
+int
+dhd_sr_config(dhd_pub_t *dhd, bool on)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (!bus->_srenab)
+		return -1;
+
+	return dhdsdio_clk_devsleep_iovar(bus, on);
+}
+
+uint16
+dhd_get_chipid(dhd_pub_t *dhd)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (bus && bus->sih)
+		return (uint16)bus->sih->chip;
+	else
+		return 0;
+}
+#endif /* BCMSDIO */
+
+#ifdef DEBUGGER
+uint32 dhd_sdio_reg_read(void *h, uint32 addr)
+{
+	uint32 rval;
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	rval = bcmsdh_reg_read(bus->sdh, addr, 4);
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rval;
+}
+
+void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val)
+{
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, val);
+
+	dhd_os_sdunlock(bus->dhd);
+}
+#endif /* DEBUGGER */
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/drivers/net/wireless/bcmdhd/dhd_wlfc.c
new file mode 100644
index 0000000..69600b8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.c
@@ -0,0 +1,4090 @@
+/*
+ * DHD PROP_TXSTATUS Module.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_wlfc.c 490028 2014-07-09 05:58:25Z $
+ *
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+#include <dhd_ip.h>
+
+
+/*
+ * wlfc naming and lock rules:
+ *
+ * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation.
+ * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed.
+ * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation.
+ *
+ */
+
+
+#ifdef PROP_TXSTATUS
+
+#define DHD_WLFC_QMON_COMPLETE(entry)
+
+#define LIMIT_BORROW
+
+
+static uint16
+_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
+{
+	uint16 seq;
+
+	if (!p) {
+		return 0xffff;
+	}
+
+	seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	if (seq < current_seq) {
+		/* wrap around */
+		seq += 256;
+	}
+
+	return seq;
+}
+
+static void
+_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
+	uint8 current_seq, bool reOrder)
+{
+	struct pktq_prec *q;
+	uint16 seq, seq2;
+	void *p2, *p2_prev;
+
+	if (!p)
+		return;
+
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	PKTSETLINK(p, NULL);
+	if (q->head == NULL) {
+		/* empty queue */
+		q->head = p;
+		q->tail = p;
+	} else {
+		if (reOrder && (prec & 1)) {
+			seq = _dhd_wlfc_adjusted_seq(p, current_seq);
+			p2 = qHead ? q->head : q->tail;
+			seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+			if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) {
+				/* need reorder */
+				p2 = q->head;
+				p2_prev = NULL;
+				seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+				while (seq > seq2) {
+					p2_prev = p2;
+					p2 = PKTLINK(p2);
+					if (!p2) {
+						break;
+					}
+					seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+				}
+
+				if (p2_prev == NULL) {
+					/* insert head */
+					PKTSETLINK(p, q->head);
+					q->head = p;
+				} else if (p2 == NULL) {
+					/* insert tail */
+					PKTSETLINK(p2_prev, p);
+					q->tail = p;
+				} else {
+					/* insert after p2_prev */
+					PKTSETLINK(p, PKTLINK(p2_prev));
+					PKTSETLINK(p2_prev, p);
+				}
+				goto exit;
+			}
+		}
+
+		if (qHead) {
+			PKTSETLINK(p, q->head);
+			q->head = p;
+		} else {
+			PKTSETLINK(q->tail, p);
+			q->tail = p;
+		}
+	}
+
+exit:
+
+	q->len++;
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until
+	a status comes back, suppress or otherwise.
+
+	hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+_dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+	int i;
+	wlfc_hanger_t* hanger;
+
+	/* allow only up to a specific size for now */
+	ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+	if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+		return NULL;
+
+	memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+	hanger->max_items = max_items;
+
+	for (i = 0; i < hanger->max_items; i++) {
+		hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+	}
+	return hanger;
+}
+
+static int
+_dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+		return BCME_OK;
+	}
+	return BCME_BADARG;
+}
+
+static uint16
+_dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+	uint32 i;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		i = h->slot_pos + 1;
+		if (i == h->max_items) {
+			i = 0;
+		}
+		while (i != h->slot_pos) {
+			if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) {
+				h->slot_pos = i;
+				return (uint16)i;
+			}
+			i++;
+			if (i == h->max_items)
+				i = 0;
+		}
+		h->failed_slotfind++;
+	}
+	return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	*gen = 0xff;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if ((h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+			(h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+			*gen = h->items[slot_id].gen;
+		}
+		else {
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+			h->items[slot_id].pkt = pkt;
+			h->items[slot_id].pkt_state = 0;
+			h->items[slot_id].pkt_txstatus = 0;
+			h->pushed++;
+		}
+		else {
+			h->failed_to_push++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+			*pktout = h->items[slot_id].pkt;
+			if (remove_from_hanger) {
+				h->items[slot_id].state =
+					WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[slot_id].pkt = NULL;
+				h->items[slot_id].gen = 0xff;
+				h->items[slot_id].identifier = 0;
+				h->popped++;
+			}
+		}
+		else {
+			h->failed_to_pop++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+	if (h) {
+		h->items[slot_id].gen = gen;
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
+		}
+		else
+			rc = BCME_BADARG;
+	}
+	else
+		rc = BCME_BADARG;
+
+	return rc;
+}
+
+/* remove reference of specific packet in hanger */
+static bool
+_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
+{
+	int i;
+
+	if (!h || !pkt) {
+		return FALSE;
+	}
+
+	for (i = 0; i < h->max_items; i++) {
+		if (pkt == h->items[i].pkt) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[i].pkt = NULL;
+				h->items[i].gen = 0xff;
+				h->items[i].identifier = 0;
+			}
+			return TRUE;
+		}
+	}
+
+	return FALSE;
+}
+
+
+static int
+_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
+{
+	wlfc_mac_descriptor_t* entry;
+	uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+	if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[entry_idx];
+	else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pktq_penq(&entry->afq, prec, p);
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
+	void **pktout)
+{
+	wlfc_mac_descriptor_t *entry;
+	struct pktq *pq;
+	struct pktq_prec *q;
+	void *p, *b;
+
+	if (!ctx) {
+		DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout));
+		return BCME_BADARG;
+	}
+
+	if (pktout) {
+		*pktout = NULL;
+	}
+
+	ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+	if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[hslot];
+	else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pq = &entry->afq;
+
+	ASSERT(prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	b = NULL;
+	p = q->head;
+
+	while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)))))
+	{
+		b = p;
+		p = PKTLINK(p);
+	}
+
+	if (p == NULL) {
+		/* none is matched */
+		if (b) {
+			DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+		} else {
+			DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+		}
+
+		return BCME_ERROR;
+	}
+
+	if (!b) {
+		/* head packet is matched */
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		/* middle packet is matched */
+		DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt,
+			WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head)))));
+		ctx->stats.ooo_pkts[prec]++;
+		PKTSETLINK(b, PKTLINK(p));
+		if (PKTLINK(p) == NULL) {
+			q->tail = b;
+		}
+	}
+
+	q->len--;
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	if (pktout) {
+		*pktout = p;
+	}
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+	uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
+{
+	uint32 wl_pktinfo = 0;
+	uint8* wlh;
+	uint8 dataOffset = 0;
+	uint8 fillers;
+	uint8 tim_signal_len = 0;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	struct bdc_header *h;
+
+	if (skip_wlfc_hdr)
+		goto push_bdc_hdr;
+
+	if (tim_signal) {
+		tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+	}
+
+	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+	dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len;
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		dataOffset += WLFC_CTL_VALUE_LEN_SEQ;
+	}
+
+	fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+	dataOffset += fillers;
+
+	PKTPUSH(ctx->osh, p, dataOffset);
+	wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+	wl_pktinfo = htol32(htodtag);
+
+	wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
+	wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
+	memcpy(&wlh[TLV_HDR_LEN], &wl_pktinfo, sizeof(uint32));
+
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		uint16 wl_seqinfo = htol16(htodseq);
+		wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ;
+		memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo,
+			WLFC_CTL_VALUE_LEN_SEQ);
+	}
+
+	if (tim_signal_len) {
+		wlh[dataOffset - fillers - tim_signal_len ] =
+			WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 1] =
+			WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+		wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+	}
+	if (fillers)
+		memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+push_bdc_hdr:
+
+	PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+	h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(p))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = dataOffset >> 2;
+	BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+	struct bdc_header *h;
+
+	if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+	h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+	/* pull BDC header */
+	PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+
+	if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2)));
+		return BCME_ERROR;
+	}
+
+	/* pull wl-header */
+	PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+	return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+	int i;
+	wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+	uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+	uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+	wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p));
+	int iftype = ctx->destination_entries.interfaces[ifid].iftype;
+
+	/* saved one exists, return it */
+	if (entry)
+		return entry;
+
+	/* Multicast destination, STA and P2P clients get the interface entry.
+	 * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+	 * have their own entry.
+	 */
+	if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
+		iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
+		(ctx->destination_entries.interfaces[ifid].occupied)) {
+			entry = &ctx->destination_entries.interfaces[ifid];
+	}
+
+	if (entry && ETHER_ISMULTI(dstn)) {
+		DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+		return entry;
+	}
+
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (table[i].occupied) {
+			if (table[i].interface_id == ifid) {
+				if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) {
+					entry = &table[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (entry == NULL)
+		entry = &ctx->destination_entries.other;
+
+	DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+
+	return entry;
+}
+
+static int
+_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
+{
+	athost_wl_status_info_t* ctx;
+	void *pout = NULL;
+
+	ASSERT(dhdp && p);
+	ASSERT(prec >= 0 && prec < WLFC_PSQ_PREC_COUNT);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+		/* suppressed queue, need pop from hanger */
+		_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG
+					(PKTTAG(p))), &pout, TRUE);
+		ASSERT(p == pout);
+	}
+
+	if (!(prec & 1)) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* pkt in delayed q, so fake push BDC header for
+		 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+		 */
+		_dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0, 0, 0, TRUE);
+
+		/* This packet is about to be freed, so remove it from tcp_ack_info_tbl
+		 * This must be one of...
+		 * 1. A pkt already in delayQ is evicted by another pkt with higher precedence
+		 * in _dhd_wlfc_prec_enq_with_drop()
+		 * 2. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_enque_delayq().
+		 * 3. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_rollback_packet_toq().
+		 */
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+				" Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+	}
+
+	if (bPktInQ) {
+		ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+		ctx->pkt_cnt_per_ac[prec>>1]--;
+	}
+
+	ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+	ctx->stats.pktout++;
+	ctx->stats.drop_pkts[prec]++;
+
+	dhd_txcomplete(dhdp, p, FALSE);
+	PKTFREE(ctx->osh, p, TRUE);
+
+	return 0;
+}
+
+static bool
+_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
+	uint8 current_seq)
+{
+	void *p = NULL;
+	int eprec = -1;		/* precedence to evict from */
+	athost_wl_status_info_t* ctx;
+
+	ASSERT(dhdp && pq && pkt);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
+		goto exit;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(pq, prec))
+		eprec = prec;
+	else if (pktq_full(pq)) {
+		p = pktq_peek_tail(pq, &eprec);
+		if (!p) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return FALSE;
+		}
+		if ((eprec > prec) || (eprec < 0)) {
+			if (!pktq_pempty(pq, prec)) {
+				eprec = prec;
+			} else {
+				return FALSE;
+			}
+		}
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(pq, eprec));
+		/* Evict all fragmented frames */
+		dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
+	}
+
+exit:
+	/* Enqueue */
+	_dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq,
+		WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
+	ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
+	ctx->pkt_cnt_per_ac[prec>>1]++;
+
+	return TRUE;
+}
+
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+	void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+	/*
+	put the packet back to the head of queue
+
+	- suppressed packet goes back to suppress sub-queue
+	- pull out the header, if new or delayed packet
+
+	Note: hslot is used only when header removal is done.
+	*/
+	wlfc_mac_descriptor_t* entry;
+	int rc = BCME_OK;
+	int prec, fifo_id;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+	fifo_id = prec << 1;
+	if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED)
+		fifo_id += 1;
+	if (entry != NULL) {
+		/*
+		if this packet did not count against FIFO credit, it must have
+		taken a requested_credit from the firmware (for pspoll etc.)
+		*/
+		if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p)))
+			entry->requested_credit++;
+
+		if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+			/* decrement sequence count */
+			WLFC_DECR_SEQCOUNT(entry, prec);
+			/* remove header first */
+			rc = _dhd_wlfc_pullheader(ctx, p);
+			if (rc != BCME_OK) {
+				DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+				goto exit;
+			}
+		}
+
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE,
+			WLFC_SEQCOUNT(entry, fifo_id>>1))
+			== FALSE) {
+			/* enque failed */
+			DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n",
+				__FUNCTION__, __LINE__, fifo_id));
+			rc = BCME_ERROR;
+		}
+	} else {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		rc = BCME_ERROR;
+	}
+exit:
+	if (rc != BCME_OK) {
+		ctx->stats.rollback_failed++;
+		_dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
+	}
+	else
+		ctx->stats.rollback++;
+
+	return rc;
+}
+
+static bool
+_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
+{
+	int prec, ac_traffic = WLFC_NO_TRAFFIC;
+
+	for (prec = 0; prec < AC_COUNT; prec++) {
+		if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) {
+			if (ac_traffic == WLFC_NO_TRAFFIC)
+				ac_traffic = prec + 1;
+			else if (ac_traffic != (prec + 1))
+				ac_traffic = WLFC_MULTI_TRAFFIC;
+		}
+	}
+
+	if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) {
+		/* single AC (BE/BK/VI/VO) in queue */
+		if (ctx->allow_fc) {
+			return TRUE;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (ctx->fc_defer_timestamp == 0) {
+				/* first signle ac scenario */
+				ctx->fc_defer_timestamp = curr_t;
+				return FALSE;
+			}
+
+			/* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */
+			delta = curr_t - ctx->fc_defer_timestamp;
+			if (delta >= WLFC_FC_DEFER_PERIOD_MS) {
+				ctx->allow_fc = TRUE;
+			}
+		}
+	} else {
+		/* multiple ACs or BCMC in queue */
+		ctx->allow_fc = FALSE;
+		ctx->fc_defer_timestamp = 0;
+	}
+
+	return ctx->allow_fc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+	dhd_pub_t *dhdp;
+
+	ASSERT(ctx);
+
+	dhdp = (dhd_pub_t *)ctx->dhdp;
+	ASSERT(dhdp);
+
+	if (dhdp->skip_fc && dhdp->skip_fc())
+		return;
+
+	if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
+		return;
+
+	if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+		/* start traffic */
+		ctx->hostif_flow_state[if_id] = OFF;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("F"));
+
+		dhd_txflowcontrol(dhdp, if_id, OFF);
+
+		ctx->toggle_host_if = 0;
+	}
+
+	if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+		/* stop traffic */
+		ctx->hostif_flow_state[if_id] = ON;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic   %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("N"));
+
+		dhd_txflowcontrol(dhdp, if_id, ON);
+
+		ctx->host_ifidx = if_id;
+		ctx->toggle_host_if = 1;
+	}
+
+	return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 ta_bmp)
+{
+	int rc = BCME_OK;
+	void* p = NULL;
+	int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	if (dhdp->proptxstatus_txoff) {
+		rc = BCME_NORESOURCE;
+		return rc;
+	}
+
+	/* allocate a dummy packet */
+	p = PKTGET(ctx->osh, dummylen, TRUE);
+	if (p) {
+		PKTPULL(ctx->osh, p, dummylen);
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+		_dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
+		DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+		ctx->stats.signal_only_pkts_sent++;
+#endif
+
+#if defined(BCMPCIE)
+		rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
+#else
+		rc = dhd_bus_txdata(dhdp->bus, p);
+#endif
+		if (rc != BCME_OK) {
+			_dhd_wlfc_pullheader(ctx, p);
+			PKTFREE(ctx->osh, p, TRUE);
+		}
+	}
+	else {
+		DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+		           __FUNCTION__, dummylen));
+		rc = BCME_NOMEM;
+	}
+	return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	int prec)
+{
+	bool rc = FALSE;
+
+	if (entry->state == WLFC_STATE_CLOSE) {
+		if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+			(pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+			if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp & ~ NBITVAL(prec);
+			}
+		}
+		else {
+			if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp | NBITVAL(prec);
+			}
+		}
+	}
+	if (rc) {
+		/* request a TIM update to firmware at the next piggyback opportunity */
+		if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+			entry->send_tim_signal = 1;
+			_dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+			entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+			entry->send_tim_signal = 0;
+		}
+		else {
+			rc = FALSE;
+		}
+	}
+	return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_NOTFOUND;
+	}
+	/*
+	- suppressed packets go to sub_queue[2*prec + 1] AND
+	- delayed packets go to sub_queue[2*prec + 0] to ensure
+	order of delivery.
+	*/
+	if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE,
+		WLFC_SEQCOUNT(entry, prec))
+		== FALSE) {
+		ctx->stats.delayq_full_error++;
+		/* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+		WLFC_DBGMESG(("s"));
+		return BCME_ERROR;
+	}
+
+	/* A packet has been pushed, update traffic availability bitmap, if applicable */
+	_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+	int rc = BCME_OK;
+	int hslot = WLFC_HANGER_MAXITEMS;
+	bool send_tim_update = FALSE;
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	uint8 free_ctr, flags = 0;
+	int gen = 0xff;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	*slot = hslot;
+
+	if (entry == NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, p);
+	}
+
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_ERROR;
+	}
+
+	if (entry->send_tim_signal) {
+		send_tim_update = TRUE;
+		entry->send_tim_signal = 0;
+		entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+	}
+
+	if (header_needed) {
+		if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			hslot = (uint)(entry - &ctx->destination_entries.nodes[0]);
+		} else {
+			hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+		}
+		gen = entry->generation;
+		free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	} else {
+		if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p));
+		}
+
+		hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+		if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) {
+			gen = entry->generation;
+		} else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		} else {
+			_dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
+		}
+
+		free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		/* remove old header */
+		_dhd_wlfc_pullheader(ctx, p);
+	}
+
+	if (hslot >= WLFC_HANGER_MAXITEMS) {
+		DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	flags = WLFC_PKTFLAG_PKTFROMHOST;
+	if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+		/*
+		Indicate that this packet is being sent in response to an
+		explicit request from the firmware side.
+		*/
+		flags |= WLFC_PKTFLAG_PKT_REQUESTED;
+	}
+	if (pkt_is_dhcp(ctx->osh, p)) {
+		flags |= WLFC_PKTFLAG_PKT_FORCELOWRATE;
+	}
+
+	WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+	WL_TXSTATUS_SET_HSLOT(htod, hslot);
+	WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	WL_TXSTATUS_SET_FLAGS(htod, flags);
+	WL_TXSTATUS_SET_GENERATION(htod, gen);
+	DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
+
+	rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+		entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
+	if (rc == BCME_OK) {
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+		if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && header_needed) {
+			/*
+			a new header was created for this packet.
+			push to hanger slot and scrub q. Since bus
+			send succeeded, increment seq number as well.
+			*/
+			rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+			if (rc == BCME_OK) {
+#ifdef PROP_TXSTATUS_DEBUG
+				((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+					OSL_SYSUPTIME();
+#endif
+			} else {
+				DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
+					__FUNCTION__, rc));
+			}
+		}
+
+		if ((rc == BCME_OK) && header_needed) {
+			/* increment free running sequence count */
+			WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+		}
+	}
+	*slot = hslot;
+	return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, int prec)
+{
+	if (entry->interface_id >= WLFC_MAX_IFNUM) {
+		ASSERT(&ctx->destination_entries.other == entry);
+		return 1;
+	}
+	if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+		WLC_E_IF_ROLE_P2P_GO) {
+		/* - destination interface is of type p2p GO.
+		For a p2pGO interface, if the destination is OPEN but the interface is
+		CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+		destination-specific-credit left send packets. This is because the
+		firmware storing the destination-specific-requested packet in queue.
+		*/
+		if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+			(entry->requested_packet == 0)) {
+			return 0;
+		}
+	}
+	/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+	if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+		(entry->requested_packet == 0)) ||
+		(!(entry->ac_bitmap & (1 << prec)))) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
+	uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
+	bool only_no_credit)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+	wlfc_mac_descriptor_t* entry;
+	int total_entries;
+	void* p = NULL;
+	int i;
+
+	*entry_out = NULL;
+	/* most cases a packet will count against FIFO credit */
+	*ac_credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
+
+	/* search all entries, include nodes as well as interfaces */
+	if (only_no_credit) {
+		total_entries = ctx->requested_entry_count;
+	} else {
+		total_entries = ctx->active_entry_count;
+	}
+
+	for (i = 0; i < total_entries; i++) {
+		if (only_no_credit) {
+			entry = ctx->requested_entry[i];
+		} else {
+			entry = ctx->active_entry_head;
+			/* move head to ensure fair round-robin */
+			ctx->active_entry_head = ctx->active_entry_head->next;
+		}
+		ASSERT(entry);
+
+		if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
+			(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
+			!(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) {
+			if (entry->state == WLFC_STATE_CLOSE) {
+				*ac_credit_spent = 0;
+			}
+
+			/* higher precedence will be picked up first,
+			 * i.e. suppressed packets before delayed ones
+			 */
+			p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
+			*needs_hdr = 0;
+			if (p == NULL) {
+				if (entry->suppressed == TRUE) {
+					/* skip this entry */
+					continue;
+				}
+				/* De-Q from delay Q */
+				p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
+				*needs_hdr = 1;
+			}
+
+			if (p != NULL) {
+				/* did the packet come from suppress sub-queue? */
+				if (entry->requested_credit > 0) {
+					entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+					entry->dstncredit_sent_packets++;
+#endif
+				} else if (entry->requested_packet > 0) {
+					entry->requested_packet--;
+					DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+				}
+
+				*entry_out = entry;
+				ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+				ctx->pkt_cnt_per_ac[prec]--;
+				_dhd_wlfc_flow_control_check(ctx, &entry->psq,
+					DHD_PKTTAG_IF(PKTTAG(p)));
+				/*
+				A packet has been picked up, update traffic
+				availability bitmap, if applicable
+				*/
+				_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+				return p;
+			}
+		}
+	}
+	return NULL;
+}
+
+static int
+_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	if (pktbuf != NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, pktbuf);
+		if (entry == NULL) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return BCME_ERROR;
+		}
+
+		/*
+		- suppressed packets go to sub_queue[2*prec + 1] AND
+		- delayed packets go to sub_queue[2*prec + 0] to ensure
+		order of delivery.
+		*/
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1),
+			FALSE, WLFC_SEQCOUNT(entry, prec))
+			== FALSE) {
+			WLFC_DBGMESG(("D"));
+			ctx->stats.delayq_full_error++;
+			return BCME_ERROR;
+		}
+
+
+		/*
+		A packet has been pushed, update traffic availability bitmap,
+		if applicable
+		*/
+		_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	}
+
+	return BCME_OK;
+}
+
+static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
+{
+	if (!p || !p_ifid)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
+}
+
+static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
+{
+	if (!p || !entry)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p))));
+}
+
+static void
+_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
+{
+	dhd_pub_t *dhdp;
+
+	if (!wlfc || !pkt) {
+		return;
+	}
+
+	dhdp = (dhd_pub_t *)(wlfc->dhdp);
+	if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) &&
+		DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+		int lender, credit_returned = 0;
+		uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+		/* Note that borrower is fifo_id */
+		/* Return credits to highest priority lender first */
+		for (lender = AC_COUNT; lender >= 0; lender--) {
+			if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+				wlfc->FIFO_credit[lender]++;
+				wlfc->credits_borrowed[fifo_id][lender]--;
+				credit_returned = 1;
+				break;
+			}
+		}
+
+		if (!credit_returned) {
+			wlfc->FIFO_credit[fifo_id]++;
+		}
+	}
+}
+
+static void
+_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state,
+	int pkt_txstatus)
+{
+	wlfc_hanger_t* hanger;
+	wlfc_hanger_item_t* item;
+
+	if (!wlfc)
+		return;
+
+	hanger = (wlfc_hanger_t*)wlfc->hanger;
+	if (!hanger)
+		return;
+
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return;
+
+	item = &hanger->items[slot_id];
+	item->pkt_state |= pkt_state;
+	if (pkt_txstatus != -1) {
+		item->pkt_txstatus = pkt_txstatus;
+	}
+
+	if (item->pkt) {
+		if ((item->pkt_state & WLFC_HANGER_PKT_STATE_TXCOMPLETE) &&
+			(item->pkt_state & (WLFC_HANGER_PKT_STATE_TXSTATUS |
+			WLFC_HANGER_PKT_STATE_CLEANUP))) {
+			void *p = NULL;
+			void *pkt = item->pkt;
+			uint8 old_state = item->state;
+			int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE);
+			BCM_REFERENCE(ret);
+			BCM_REFERENCE(pkt);
+			ASSERT((ret == BCME_OK) && p && (pkt == p));
+
+			/* free packet */
+			if (!(item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS)) {
+				/* cleanup case */
+				wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, p);
+
+				ASSERT(entry);
+				entry->transit_count--;
+				if (entry->suppressed &&
+					(--entry->suppr_transit_count == 0)) {
+					entry->suppressed = FALSE;
+				}
+				_dhd_wlfc_return_implied_credit(wlfc, p);
+				wlfc->stats.cleanup_fw_cnt++;
+				/* slot not freeable yet */
+				item->state = old_state;
+			}
+
+			wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))]
+				[DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+			wlfc->stats.pktout++;
+			dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus);
+			PKTFREE(wlfc->osh, p, TRUE);
+		}
+	} else {
+		if (item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS) {
+			/* free slot */
+			ASSERT(item->state != WLFC_HANGER_ITEM_STATE_FREE);
+			item->state = WLFC_HANGER_ITEM_STATE_FREE;
+		}
+	}
+}
+
+static void
+_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
+	bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
+{
+	int prec;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	ASSERT(dhdp);
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+
+	for (prec = 0; prec < pq->num_prec; prec++) {
+		struct pktq_prec *q;
+		void *p, *prev = NULL;
+
+		q = &pq->q[prec];
+		p = q->head;
+		while (p) {
+			if (fn == NULL || (*fn)(p, arg)) {
+				bool head = (p == q->head);
+				if (head)
+					q->head = PKTLINK(p);
+				else
+					PKTSETLINK(prev, PKTLINK(p));
+				if (q_type == Q_TYPE_PSQ) {
+					if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+						_dhd_wlfc_hanger_remove_reference(ctx->hanger, p);
+					}
+					ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->pkt_cnt_per_ac[prec>>1]--;
+					ctx->stats.cleanup_psq_cnt++;
+					if (!(prec & 1)) {
+						/* pkt in delayed q, so fake push BDC header for
+						 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+						 */
+						_dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0,
+							0, 0, TRUE);
+#ifdef DHDTCPACK_SUPPRESS
+						if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+							DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+								" Stop using it\n",
+								__FUNCTION__, __LINE__));
+							dhd_tcpack_suppress_set(dhdp,
+								TCPACK_SUP_OFF);
+						}
+#endif /* DHDTCPACK_SUPPRESS */
+					}
+				} else if (q_type == Q_TYPE_AFQ) {
+					wlfc_mac_descriptor_t* entry =
+						_dhd_wlfc_find_table_entry(ctx, p);
+					entry->transit_count--;
+					if (entry->suppressed &&
+						(--entry->suppr_transit_count == 0)) {
+						entry->suppressed = FALSE;
+					}
+					_dhd_wlfc_return_implied_credit(ctx, p);
+					ctx->stats.cleanup_fw_cnt++;
+				}
+				PKTSETLINK(p, NULL);
+				if (dir) {
+					ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->stats.pktout++;
+					dhd_txcomplete(dhdp, p, FALSE);
+				}
+				PKTFREE(ctx->osh, p, dir);
+
+				q->len--;
+				pq->len--;
+				p = (head ? q->head : PKTLINK(prev));
+			} else {
+				prev = p;
+				p = PKTLINK(p);
+			}
+		}
+
+		if (q->head == NULL) {
+			ASSERT(q->len == 0);
+			q->tail = NULL;
+		}
+
+	}
+
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+static void*
+_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+static void
+_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+	wlfc_mac_descriptor_t* entry;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+#ifdef DHDTCPACK_SUPPRESS
+			if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) {
+				DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+					__FUNCTION__, __LINE__));
+				dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+			}
+#endif /* DHDTCPACK_SUPPRESS */
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode) &&
+			!_dhd_wlfc_hanger_remove_reference(h, pkt)) {
+			DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
+				__FUNCTION__, pkt));
+		}
+		entry->transit_count--;
+		if (entry->suppressed &&
+			(--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+		_dhd_wlfc_return_implied_credit(wlfc, pkt);
+		wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--;
+		wlfc->stats.pktout++;
+		wlfc->stats.cleanup_txq_cnt++;
+		dhd_txcomplete(dhd, pkt, FALSE);
+		PKTFREE(wlfc->osh, pkt, TRUE);
+	}
+}
+
+void
+_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int i;
+	int total_entries;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+
+	wlfc->stats.cleanup_txq_cnt = 0;
+	wlfc->stats.cleanup_psq_cnt = 0;
+	wlfc->stats.cleanup_fw_cnt = 0;
+	/*
+	*  flush sequence shoulde be txq -> psq -> hanger/afq, hanger has to be last one
+	*/
+	/* flush bus->txq */
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+
+	/* flush psq, search all entries, include nodes as well as interfaces */
+	total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+	table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+	for (i = 0; i < total_entries; i++) {
+		if (table[i].occupied) {
+			/* release packets held in PSQ (both delayed and suppressed) */
+			if (table[i].psq.len) {
+				WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
+					__FUNCTION__, i, table[i].psq.len));
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
+					fn, arg, Q_TYPE_PSQ);
+			}
+
+			/* free packets held in AFQ */
+			if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) {
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
+					fn, arg, Q_TYPE_AFQ);
+			}
+
+			if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) {
+				table[i].occupied = 0;
+				if (table[i].transit_count || table[i].suppr_transit_count) {
+					DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n",
+						__FUNCTION__, i,
+						table[i].transit_count,
+						table[i].suppr_transit_count));
+				}
+			}
+		}
+	}
+
+	/*
+		. flush remained pkt in hanger queue, not in bus->txq nor psq.
+		. the remained pkt was successfully downloaded to dongle already.
+		. hanger slot state cannot be set to free until receive txstatus update.
+	*/
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		for (i = 0; i < h->max_items; i++) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
+					_dhd_wlfc_hanger_free_pkt(wlfc, i,
+						WLFC_HANGER_PKT_STATE_CLEANUP, FALSE);
+				}
+			}
+		}
+	}
+
+	return;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
+	f_processpkt_t fn, void *arg)
+{
+	int rc = BCME_OK;
+
+
+	if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
+		entry->occupied = 1;
+		entry->state = WLFC_STATE_OPEN;
+		entry->requested_credit = 0;
+		entry->interface_id = ifid;
+		entry->iftype = iftype;
+		entry->ac_bitmap = 0xff; /* update this when handling APSD */
+		/* for an interface entry we may not care about the MAC address */
+		if (ea != NULL)
+			memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+
+		if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+			dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+			pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+			if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
+			}
+
+			if (entry->next == NULL) {
+				/* not linked to anywhere, add to tail */
+				if (ctx->active_entry_head) {
+					entry->prev = ctx->active_entry_head->prev;
+					ctx->active_entry_head->prev->next = entry;
+					ctx->active_entry_head->prev = entry;
+					entry->next = ctx->active_entry_head;
+
+				} else {
+					ASSERT(ctx->active_entry_count == 0);
+					entry->prev = entry->next = entry;
+					ctx->active_entry_head = entry;
+				}
+				ctx->active_entry_count++;
+			} else {
+				DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__,
+					(int)(entry - &ctx->destination_entries.nodes[0])));
+			}
+		}
+	} else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+		/* When the entry is deleted, the packets that are queued in the entry must be
+		   cleanup. The cleanup action should be before the occupied is set as 0.
+		*/
+		_dhd_wlfc_cleanup(ctx->dhdp, fn, arg);
+		_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+		entry->occupied = 0;
+		entry->suppressed = 0;
+		entry->state = WLFC_STATE_CLOSE;
+		entry->requested_credit = 0;
+		entry->transit_count = 0;
+		entry->suppr_transit_count = 0;
+		memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
+
+		if (entry->next) {
+			/* not floating, remove from Q */
+			if (ctx->active_entry_count <= 1) {
+				/* last item */
+				ctx->active_entry_head = NULL;
+				ctx->active_entry_count = 0;
+			} else {
+				entry->prev->next = entry->next;
+				entry->next->prev = entry->prev;
+				if (entry == ctx->active_entry_head) {
+					ctx->active_entry_head = entry->next;
+				}
+				ctx->active_entry_count--;
+			}
+			entry->next = entry->prev = NULL;
+		} else {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		}
+	}
+	return rc;
+}
+
+#ifdef LIMIT_BORROW
+static int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac,
+	bool bBorrowAll)
+{
+	int lender_ac, borrow_limit = 0;
+	int rc = -1;
+
+	if (ctx == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+
+	/* Borrow from lowest priority available AC (including BC/MC credits) */
+	for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+		if (!bBorrowAll) {
+			borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO;
+		} else {
+			borrow_limit = 0;
+		}
+
+		if (ctx->FIFO_credit[lender_ac] > borrow_limit) {
+			ctx->credits_borrowed[borrower_ac][lender_ac]++;
+			ctx->FIFO_credit[lender_ac]--;
+			rc = lender_ac;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
+{
+	if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
+		(borrower_ac < 0) || (borrower_ac > AC_COUNT)) {
+		DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n",
+			__FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac));
+
+		return BCME_BADARG;
+	}
+
+	ctx->credits_borrowed[borrower_ac][lender_ac]--;
+	ctx->FIFO_credit[lender_ac]++;
+
+	return BCME_OK;
+}
+#endif /* LIMIT_BORROW */
+
+static int
+_dhd_wlfc_interface_entry_update(void* state,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	wlfc_mac_descriptor_t* entry;
+
+	if (ifid >= WLFC_MAX_IFNUM)
+		return BCME_BADARG;
+
+	entry = &ctx->destination_entries.interfaces[ifid];
+
+	return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea,
+		_dhd_wlfc_ifpkt_fn, &ifid);
+}
+
+static int
+_dhd_wlfc_BCMCCredit_support_update(void* state)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+	ctx->bcmc_credit_supported = TRUE;
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	int i;
+
+	for (i = 0; i <= 4; i++) {
+		if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) {
+			DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n",
+				__FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i]));
+		}
+	}
+
+	/* update the AC FIFO credit map */
+	ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]);
+	ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]);
+	ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]);
+	ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]);
+	ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]);
+
+	ctx->Init_FIFO_credit[0] = credits[0];
+	ctx->Init_FIFO_credit[1] = credits[1];
+	ctx->Init_FIFO_credit[2] = credits[2];
+	ctx->Init_FIFO_credit[3] = credits[3];
+	ctx->Init_FIFO_credit[4] = credits[4];
+
+	/* credit for ATIM FIFO is not used yet. */
+	ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+    dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+	uint32 hslot;
+	int	rc;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+	/*
+		if ac_fifo_credit_spent = 0
+
+		This packet will not count against the FIFO credit.
+		To ensure the txstatus corresponding to this packet
+		does not provide an implied credit (default behavior)
+		mark the packet accordingly.
+
+		if ac_fifo_credit_spent = 1
+
+		This is a normal packet and it counts against the FIFO
+		credit count.
+	*/
+	DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+	rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+	     commit_info->needs_hdr, &hslot);
+
+	if (rc == BCME_OK) {
+		rc = fcommit(commit_ctx, commit_info->p);
+		if (rc == BCME_OK) {
+			uint8 gen = WL_TXSTATUS_GET_GENERATION(
+				DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p)));
+			ctx->stats.pkt2bus++;
+			if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) {
+				ctx->stats.send_pkts[ac]++;
+				WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+			}
+
+			if (gen != commit_info->mac_entry->generation) {
+				/* will be suppressed back by design */
+				if (!commit_info->mac_entry->suppressed) {
+					commit_info->mac_entry->suppressed = TRUE;
+				}
+				commit_info->mac_entry->suppr_transit_count++;
+			}
+			commit_info->mac_entry->transit_count++;
+		} else if (commit_info->needs_hdr) {
+			if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				void *pout = NULL;
+				/* pop hanger for delayed packet */
+				_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(
+					DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE);
+				ASSERT(commit_info->p == pout);
+			}
+		}
+	} else {
+		ctx->stats.generic_error++;
+	}
+
+	if (rc != BCME_OK) {
+		/*
+		   pretx pkt process or bus commit has failed, rollback.
+		   - remove wl-header for a delayed packet
+		   - save wl-header header for suppressed packets
+		   - reset credit check flag
+		*/
+		_dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot);
+		DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0);
+	}
+
+	return rc;
+}
+
+static uint8
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+	wlfc_mac_descriptor_t* table =
+		((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+	uint8 table_index;
+
+	if (ea != NULL) {
+		for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+			if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+				table[table_index].occupied)
+				return table_index;
+		}
+	}
+	return WLFC_MAC_DESC_ID_INVALID;
+}
+
+static int
+_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
+{
+	uint8 status_flag;
+	uint32 status;
+	int ret = BCME_OK;
+	int remove_from_hanger = 1;
+	void* pktbuf = NULL;
+	uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
+	uint16 hslot;
+	wlfc_mac_descriptor_t* entry = NULL;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
+
+	memcpy(&status, pkt_info, sizeof(uint32));
+	status_flag = WL_TXSTATUS_GET_FLAGS(status);
+	hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
+	hslot = WL_TXSTATUS_GET_HSLOT(status);
+	fifo_id = WL_TXSTATUS_GET_FIFO(status);
+	gen = WL_TXSTATUS_GET_GENERATION(status);
+
+	if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+		memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+		seq_fromfw = WL_SEQ_GET_FROMFW(seq);
+		seq_num = WL_SEQ_GET_NUM(seq);
+	}
+
+	wlfc->stats.txstatus_in += len;
+
+	if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+		wlfc->stats.d11_suppress += len;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+		wlfc->stats.wl_suppress += len;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+		wlfc->stats.wlc_tossed_pkts += len;
+	}
+
+	if (dhd->proptxstatus_txstatus_ignore) {
+		if (!remove_from_hanger) {
+			DHD_ERROR(("suppress txstatus: %d\n", status_flag));
+		}
+		return BCME_OK;
+	}
+
+	while (count < len) {
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
+		} else {
+			ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE);
+			if (!pktbuf) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+					WLFC_HANGER_PKT_STATE_TXSTATUS, -1);
+				goto cont;
+			}
+		}
+
+		if ((ret != BCME_OK) || !pktbuf) {
+			goto cont;
+		}
+
+		/* set fifo_id to correct value because not all FW does that */
+		fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+
+		if (!remove_from_hanger) {
+			/* this packet was suppressed */
+			if (!entry->suppressed || (entry->generation != gen)) {
+				if (!entry->suppressed) {
+					entry->suppr_transit_count = entry->transit_count;
+					if (p_mac) {
+						*p_mac = entry;
+					}
+				} else {
+					DHD_ERROR(("gen(%d), entry->generation(%d)\n",
+						gen, entry->generation));
+				}
+				entry->suppressed = TRUE;
+
+			}
+			entry->generation = gen;
+		}
+
+#ifdef PROP_TXSTATUS_DEBUG
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+		{
+			uint32 new_t = OSL_SYSUPTIME();
+			uint32 old_t;
+			uint32 delta;
+			old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
+
+
+			wlfc->stats.latency_sample_count++;
+			if (new_t > old_t)
+				delta = new_t - old_t;
+			else
+				delta = 0xffffffff + new_t - old_t;
+			wlfc->stats.total_status_latency += delta;
+			wlfc->stats.latency_most_recent = delta;
+
+			wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+			if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+				wlfc->stats.idx_delta = 0;
+		}
+#endif /* PROP_TXSTATUS_DEBUG */
+
+		/* pick up the implicit credit from this packet */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+			_dhd_wlfc_return_implied_credit(wlfc, pktbuf);
+		} else {
+			/*
+			if this packet did not count against FIFO credit, it must have
+			taken a requested_credit from the destination entry (for pspoll etc.)
+			*/
+			if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+				entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+			entry->dstncredit_acks++;
+#endif
+		}
+
+		if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+			(status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+			/* save generation bit inside packet */
+			WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen);
+
+			if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+				WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw);
+				WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num);
+			}
+
+			ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+			if (ret != BCME_OK) {
+				/* delay q is full, drop this packet */
+				DHD_WLFC_QMON_COMPLETE(entry);
+				_dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE);
+			} else {
+				if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+					/* Mark suppressed to avoid a double free
+					during wlfc cleanup
+					*/
+					_dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen);
+				}
+			}
+		} else {
+
+			DHD_WLFC_QMON_COMPLETE(entry);
+
+			if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+				_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+					WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE);
+			} else {
+				dhd_txcomplete(dhd, pktbuf, TRUE);
+				wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))]
+					[DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--;
+				wlfc->stats.pktout++;
+				/* free the packet */
+				PKTFREE(wlfc->osh, pktbuf, TRUE);
+			}
+		}
+		/* pkt back from firmware side */
+		entry->transit_count--;
+		if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+
+cont:
+		hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK;
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK;
+		}
+
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) {
+			seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK;
+		}
+
+		count++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+	int i;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+
+		/* update FIFO credits */
+		if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+		{
+			int lender; /* Note that borrower is i */
+
+			/* Return credits to highest priority lender first */
+			for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+				if (wlfc->credits_borrowed[i][lender] > 0) {
+					if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+						credits[i] -=
+							(uint8)wlfc->credits_borrowed[i][lender];
+						wlfc->FIFO_credit[lender] +=
+						    wlfc->credits_borrowed[i][lender];
+						wlfc->credits_borrowed[i][lender] = 0;
+					}
+					else {
+						wlfc->credits_borrowed[i][lender] -= credits[i];
+						wlfc->FIFO_credit[lender] += credits[i];
+						credits[i] = 0;
+					}
+				}
+			}
+
+			/* If we have more credits left over, these must belong to the AC */
+			if (credits[i] > 0) {
+				wlfc->FIFO_credit[i] += credits[i];
+			}
+
+			if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) {
+				wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i];
+			}
+		}
+	}
+
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* entry;
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	uint8	results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ];
+	uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0};
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	bool bCreditUpdate = FALSE;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		/* fake a suppression txstatus */
+		htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
+		WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
+		WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+		memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
+			if (WL_SEQ_GET_FROMDRV(htodseq)) {
+				WL_SEQ_SET_FROMFW(htodseq, 1);
+				WL_SEQ_SET_FROMDRV(htodseq, 0);
+			}
+			memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
+				WLFC_CTL_VALUE_LEN_SEQ);
+		}
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, pkt);
+		}
+		_dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL);
+
+		/* fake a fifo credit back */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+			credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++;
+			bCreditUpdate = TRUE;
+		}
+	}
+
+	if (bCreditUpdate) {
+		_dhd_wlfc_fifocreditback_indicate(dhd, credits);
+	}
+}
+
+
+static int
+_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
+{
+	uint32 timestamp;
+
+	(void)dhd;
+
+	bcopy(&value[2], &timestamp, sizeof(uint32));
+	DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+	(void)dhd;
+	(void)rssi;
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i == wlfc->requested_entry_count) {
+		/* no match entry found */
+		ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1));
+		wlfc->requested_entry[wlfc->requested_entry_count++] = entry;
+	}
+}
+
+static void
+_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i < wlfc->requested_entry_count) {
+		/* found */
+		ASSERT(wlfc->requested_entry_count > 0);
+		wlfc->requested_entry_count--;
+		if (i != wlfc->requested_entry_count) {
+			wlfc->requested_entry[i] =
+				wlfc->requested_entry[wlfc->requested_entry_count];
+		}
+		wlfc->requested_entry[wlfc->requested_entry_count] = NULL;
+	}
+}
+
+static int
+_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	int rc;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 existing_index;
+	uint8 table_index;
+	uint8 ifid;
+	uint8* ea;
+
+	WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+		__FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+		((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+		WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+	table = wlfc->destination_entries.nodes;
+	table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+	ifid = value[1];
+	ea = &value[2];
+
+	_dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]);
+	if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+		existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+		if ((existing_index != WLFC_MAC_DESC_ID_INVALID) &&
+			(existing_index != table_index) && table[existing_index].occupied) {
+			/*
+			there is an existing different entry, free the old one
+			and move it to new index if necessary.
+			*/
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index],
+				eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id,
+				table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn,
+				&table[existing_index]);
+		}
+
+		if (!table[table_index].occupied) {
+			/* this new MAC entry does not exist, create one */
+			table[table_index].mac_handle = value[0];
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+				eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+				wlfc->destination_entries.interfaces[ifid].iftype,
+				ea, NULL, NULL);
+		} else {
+			/* the space should have been empty, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+
+	if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+		if (table[table_index].occupied) {
+				rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+					eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+					wlfc->destination_entries.interfaces[ifid].iftype,
+					ea, _dhd_wlfc_entrypkt_fn, &table[table_index]);
+		} else {
+			/* the space should have been occupied, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+	BCM_REFERENCE(rc);
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle = value[0];
+	int i;
+
+	table = wlfc->destination_entries.nodes;
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+			desc->state = WLFC_STATE_OPEN;
+			desc->ac_bitmap = 0xff;
+			DHD_WLFC_CTRINC_MAC_OPEN(desc);
+			desc->requested_credit = 0;
+			desc->requested_packet = 0;
+			_dhd_wlfc_remove_requested_entry(wlfc, desc);
+		}
+		else {
+			desc->state = WLFC_STATE_CLOSE;
+			DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+			/*
+			Indicate to firmware if there is any traffic pending.
+			*/
+			for (i = 0; i < AC_COUNT; i++) {
+				_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+			}
+		}
+	}
+	else {
+		wlfc->stats.psmode_update_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 if_id = value[0];
+
+	if (if_id < WLFC_MAX_IFNUM) {
+		table = wlfc->destination_entries.interfaces;
+		if (table[if_id].occupied) {
+			if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+				table[if_id].state = WLFC_STATE_OPEN;
+				/* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+			}
+			else {
+				table[if_id].state = WLFC_STATE_CLOSE;
+				/* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+			}
+			return BCME_OK;
+		}
+	}
+	wlfc->stats.interface_update_failed++;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 credit;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	credit = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_credit = credit;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+	}
+	else {
+		wlfc->stats.credit_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 packet_count;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	packet_count = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_packet = packet_count;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+	}
+	else {
+		wlfc->stats.packet_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+	if (info_len) {
+		if (info_buf) {
+			bcopy(val, info_buf, len);
+			*info_len = len;
+		}
+		else
+			*info_len = 0;
+	}
+}
+
+/*
+ * public functions
+ */
+
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd)
+{
+	bool rc = TRUE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rc =  FALSE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+	int i, rc = BCME_OK;
+	athost_wl_status_info_t* wlfc;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_enabled || dhd->wlfc_state) {
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* allocate space to track txstatus propagated from firmware */
+#ifdef WLFC_STATE_PREALLOC
+	if (!dhd->wlfc_state)
+#endif
+	dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+	if (dhd->wlfc_state == NULL) {
+		rc = BCME_NOMEM;
+		goto exit;
+	}
+
+	/* initialize state space */
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+	/* remember osh & dhdp */
+	wlfc->osh = dhd->osh;
+	wlfc->dhdp = dhd;
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+		if (wlfc->hanger == NULL) {
+#ifndef WLFC_STATE_PREALLOC
+			MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+			dhd->wlfc_state = NULL;
+#endif
+			rc = BCME_NOMEM;
+			goto exit;
+		}
+	}
+
+	dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+	/* default to check rx pkt */
+	if (dhd->op_mode & DHD_FLAG_IBSS_MODE) {
+		dhd->wlfc_rxpkt_chk = FALSE;
+	} else {
+		dhd->wlfc_rxpkt_chk = TRUE;
+	}
+
+
+	/* initialize all interfaces to accept traffic */
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		wlfc->hostif_flow_state[i] = OFF;
+	}
+
+	_dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other,
+		eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL);
+
+	wlfc->allow_credit_borrow = 0;
+	wlfc->single_ac = 0;
+	wlfc->single_ac_timestamp = 0;
+
+
+exit:
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+#ifdef SUPPORT_P2P_GO_PS
+int
+dhd_wlfc_suspend(dhd_pub_t *dhd)
+{
+
+	uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+		DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+		return -1;
+	}
+	tlv = iovbuf[0];
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
+		return 0;
+	tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+			__FUNCTION__, tlv));
+		return -1;
+	}
+
+	return 0;
+}
+
+	int
+dhd_wlfc_resume(dhd_pub_t *dhd)
+{
+	uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+		DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+		return -1;
+	}
+	tlv = iovbuf[0];
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
+		(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
+		return 0;
+	tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, (char*)iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+			__FUNCTION__, tlv));
+		return -1;
+	}
+
+	return 0;
+}
+#endif /* SUPPORT_P2P_GO_PS */
+
+int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+	uint *reorder_info_len)
+{
+	uint8 type, len;
+	uint8* value;
+	uint8* tmpbuf;
+	uint16 remainder = (uint16)tlv_hdr_len;
+	uint16 processed = 0;
+	athost_wl_status_info_t* wlfc = NULL;
+	void* entry;
+
+	if ((dhd == NULL) || (pktbuf == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) {
+		if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+			dhd_os_wlfc_unblock(dhd);
+			return WLFC_UNSUPPORTED;
+		}
+		wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	}
+
+	tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+
+	if (remainder) {
+		while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+			type = tmpbuf[processed];
+			if (type == WLFC_CTL_TYPE_FILLER) {
+				remainder -= 1;
+				processed += 1;
+				continue;
+			}
+
+			len  = tmpbuf[processed + 1];
+			value = &tmpbuf[processed + 2];
+
+			if (remainder < (2 + len))
+				break;
+
+			remainder -= 2 + len;
+			processed += 2 + len;
+			entry = NULL;
+
+			DHD_INFO(("%s():%d type %d remainder %d processed %d\n",
+				__FUNCTION__, __LINE__, type, remainder, processed));
+
+			if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+				_dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+					reorder_info_len);
+
+			if (wlfc == NULL) {
+				ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER);
+
+				if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS &&
+					type != WLFC_CTL_TYPE_TRANS_ID)
+					DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!"
+					" type %d remainder %d processed %d\n",
+					__FUNCTION__, __LINE__, type, remainder, processed));
+				continue;
+			}
+
+			if (type == WLFC_CTL_TYPE_TXSTATUS) {
+				_dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
+			}
+			else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+				uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
+
+				if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+					compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ;
+				}
+				_dhd_wlfc_compressed_txstatus_update(dhd, value,
+					value[compcnt_offset], &entry);
+			}
+			else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+				_dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_RSSI)
+				_dhd_wlfc_rssi_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+				_dhd_wlfc_credit_request(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+				_dhd_wlfc_packet_request(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+				(type == WLFC_CTL_TYPE_MAC_CLOSE))
+				_dhd_wlfc_psmode_update(dhd, value, type);
+
+			else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+				(type == WLFC_CTL_TYPE_MACDESC_DEL))
+				_dhd_wlfc_mac_table_update(dhd, value, type);
+
+			else if (type == WLFC_CTL_TYPE_TRANS_ID)
+				_dhd_wlfc_dbg_senum_check(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+				(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+				_dhd_wlfc_interface_update(dhd, value, type);
+			}
+
+			if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) {
+				/* suppress all packets for this mac entry from bus->txq */
+				_dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
+			}
+		}
+		if (remainder != 0 && wlfc) {
+			/* trouble..., something is not right */
+			wlfc->stats.tlv_parse_failed++;
+		}
+	}
+
+	if (wlfc)
+		wlfc->stats.dhd_hdrpulls++;
+
+	dhd_os_wlfc_unblock(dhd);
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
+	bool need_toggle_host_if)
+{
+	int ac, single_ac = 0, rc = BCME_OK;
+	dhd_wlfc_commit_info_t  commit_info;
+	athost_wl_status_info_t* ctx;
+	int bus_retry_count = 0;
+
+	uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
+	uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */
+	uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */
+	bool no_credit = FALSE;
+
+	int lender;
+
+	if ((dhdp == NULL) || (fcommit == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		if (pktbuf) {
+			DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+		}
+		rc =  WLFC_UNSUPPORTED;
+		goto exit2;
+	}
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+
+	if (dhdp->proptxstatus_module_ignore) {
+		if (pktbuf) {
+			uint32 htod = 0;
+			WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+			_dhd_wlfc_pushheader(ctx, pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+			if (fcommit(commit_ctx, pktbuf))
+				PKTFREE(ctx->osh, pktbuf, TRUE);
+			rc = BCME_OK;
+		}
+		goto exit;
+	}
+
+	memset(&commit_info, 0, sizeof(commit_info));
+
+	/*
+	Commit packets for regular AC traffic. Higher priority first.
+	First, use up FIFO credits available to each AC. Based on distribution
+	and credits left, borrow from other ACs as applicable
+
+	-NOTE:
+	If the bus between the host and firmware is overwhelmed by the
+	traffic from host, it is possible that higher priority traffic
+	starves the lower priority queue. If that occurs often, we may
+	have to employ weighted round-robin or ucode scheme to avoid
+	low priority packet starvation.
+	*/
+
+	if (pktbuf) {
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
+		ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+		/* en-queue the packets to respective queue. */
+		rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+		if (rc) {
+			_dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+		} else {
+			ctx->stats.pktin++;
+			ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
+		}
+	}
+
+	for (ac = AC_COUNT; ac >= 0; ac--) {
+		if (dhdp->wlfc_rxpkt_chk) {
+			/* check rx packet */
+			uint32 curr_t = OSL_SYSUPTIME(), delta;
+
+			delta = curr_t - ctx->rx_timestamp[ac];
+			if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) {
+				rx_map |= (1 << ac);
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac] == 0) {
+			continue;
+		}
+		tx_map |= (1 << ac);
+		single_ac = ac + 1;
+		while (FALSE == dhdp->proptxstatus_txoff) {
+			/* packets from delayQ with less priority are fresh and
+			 * they'd need header and have no MAC entry
+			 */
+			no_credit = (ctx->FIFO_credit[ac] < 1);
+			if (dhdp->proptxstatus_credit_ignore ||
+				((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) {
+				no_credit = FALSE;
+			}
+
+			lender = -1;
+#ifdef LIMIT_BORROW
+			if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map)) {
+				/* try borrow from lower priority */
+				lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE);
+				if (lender != -1) {
+					no_credit = FALSE;
+				}
+			}
+#endif
+			commit_info.needs_hdr = 1;
+			commit_info.mac_entry = NULL;
+			commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+				&(commit_info.ac_fifo_credit_spent),
+				&(commit_info.needs_hdr),
+				&(commit_info.mac_entry),
+				no_credit);
+			commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+				eWLFC_PKTTYPE_SUPPRESSED;
+
+			if (commit_info.p == NULL) {
+#ifdef LIMIT_BORROW
+				if (lender != -1) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+				break;
+			}
+
+			if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) {
+				ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
+			}
+			/* here we can ensure have credit or no credit needed */
+			rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, fcommit,
+				commit_ctx);
+
+			/* Bus commits may fail (e.g. flow control); abort after retries */
+			if (rc == BCME_OK) {
+				if (commit_info.ac_fifo_credit_spent && (lender == -1)) {
+					ctx->FIFO_credit[ac]--;
+				}
+#ifdef LIMIT_BORROW
+				else if (!commit_info.ac_fifo_credit_spent && (lender != -1)) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				if (lender != -1) {
+					_dhd_wlfc_return_credit(ctx, lender, ac);
+				}
+#endif
+				bus_retry_count++;
+				if (bus_retry_count >= BUS_RETRIES) {
+					DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+					goto exit;
+				}
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac]) {
+			packets_map |= (1 << ac);
+		}
+	}
+
+	if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) {
+		/* nothing send out or remain in queue */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) {
+		/* only one tx ac exist and no higher rx ac */
+		if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) {
+			ac = single_ac - 1;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (single_ac != ctx->single_ac) {
+				/* new single ac traffic (first single ac or different single ac) */
+				ctx->allow_credit_borrow = 0;
+				ctx->single_ac_timestamp = curr_t;
+				ctx->single_ac = (uint8)single_ac;
+				rc = BCME_OK;
+				goto exit;
+			}
+			/* same ac traffic, check if it lasts enough time */
+			delta = curr_t - ctx->single_ac_timestamp;
+
+			if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+				/* wait enough time, can borrow now */
+				ctx->allow_credit_borrow = 1;
+				ac = single_ac - 1;
+			} else {
+				rc = BCME_OK;
+				goto exit;
+			}
+		}
+	} else {
+		/* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+		ctx->allow_credit_borrow = 0;
+		ctx->single_ac_timestamp = 0;
+		ctx->single_ac = 0;
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (packets_map == 0) {
+		/* nothing to send, skip borrow */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* At this point, borrow all credits only for ac */
+	while (FALSE == dhdp->proptxstatus_txoff) {
+#ifdef LIMIT_BORROW
+		if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) {
+			break;
+		}
+#endif
+		commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+			&(commit_info.ac_fifo_credit_spent),
+			&(commit_info.needs_hdr),
+			&(commit_info.mac_entry),
+			FALSE);
+		if (commit_info.p == NULL) {
+			/* before borrow only one ac exists and now this only ac is empty */
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			break;
+		}
+
+		commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+			eWLFC_PKTTYPE_SUPPRESSED;
+
+		rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+		     fcommit, commit_ctx);
+
+		/* Bus commits may fail (e.g. flow control); abort after retries */
+		if (rc == BCME_OK) {
+
+			if (commit_info.ac_fifo_credit_spent) {
+#ifndef LIMIT_BORROW
+				ctx->FIFO_credit[ac]--;
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			}
+		} else {
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			bus_retry_count++;
+			if (bus_retry_count >= BUS_RETRIES) {
+				DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	if (need_toggle_host_if && ctx->toggle_host_if) {
+		ctx->toggle_host_if = 0;
+	}
+
+exit2:
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+	athost_wl_status_info_t* wlfc;
+	void* pout = NULL;
+	int rtn = BCME_OK;
+	if ((dhd == NULL) || (txp == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rtn = WLFC_UNSUPPORTED;
+		goto EXIT;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.signal_only_pkts_freed++;
+#endif
+		/* is this a signal-only packet? */
+		_dhd_wlfc_pullheader(wlfc, txp);
+		PKTFREE(wlfc->osh, txp, TRUE);
+		goto EXIT;
+	}
+
+	if (!success || dhd->proptxstatus_txstatus_ignore) {
+		wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+
+		WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+			__FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT(
+				DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE);
+			ASSERT(txp == pout);
+		}
+
+		/* indicate failure and free the packet */
+		dhd_txcomplete(dhd, txp, success);
+
+		/* return the credit, if necessary */
+		_dhd_wlfc_return_implied_credit(wlfc, txp);
+
+		entry->transit_count--;
+		if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+		wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--;
+		wlfc->stats.pktout++;
+		PKTFREE(wlfc->osh, txp, TRUE);
+	} else {
+		/* bus confirmed pkt went to firmware side */
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, txp);
+		} else {
+			int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
+			_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+				WLFC_HANGER_PKT_STATE_TXCOMPLETE, -1);
+		}
+	}
+
+EXIT:
+	dhd_os_wlfc_unblock(dhd);
+	return rtn;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	/* enable all signals & indicate host proptxstatus logic is active */
+	uint32 tlv, mode, fw_caps;
+	int ret = 0;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = TRUE;
+	dhd_os_wlfc_unblock(dhd);
+
+	tlv = WLFC_FLAGS_RSSI_SIGNALS |
+		WLFC_FLAGS_XONXOFF_SIGNALS |
+		WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+		WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+		WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+
+	/*
+	try to enable/disable signaling by sending "tlv" iovar. if that fails,
+	fallback to no flow control? Print a message for now.
+	*/
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	}
+
+	/* query caps */
+	ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+	if (ret > 0) {
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	}
+
+	if (ret >= 0) {
+		fw_caps = *((uint32 *)iovbuf);
+		mode = 0;
+		DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+
+		if (WLFC_IS_OLD_DEF(fw_caps)) {
+			/* enable proptxtstatus v2 by default */
+			mode = WLFC_MODE_AFQ;
+		} else {
+			WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
+			WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
+			WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
+		}
+		ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+		if (ret > 0) {
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+		}
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_mode = 0;
+	if (ret >= 0) {
+		if (WLFC_IS_OLD_DEF(mode)) {
+			WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ));
+		} else {
+			dhd->wlfc_mode = mode;
+		}
+	}
+	DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_init)
+		dhd->plat_init((void *)dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	/* enable only ampdu hostreorder here */
+	uint32 tlv;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__));
+
+	tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
+			__FUNCTION__));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, tlv));
+	}
+
+	dhd_os_wlfc_block(dhd);
+	dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER;
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/* release all packet resources */
+int
+dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+	char iovbuf[32]; /* Room for "ampdu_hostreorder" or "tlv" + '\0' + parameter */
+	/* cleanup all psq related resources */
+	athost_wl_status_info_t* wlfc;
+	uint32 tlv = 0;
+	uint32 hostreorder = 0;
+	int ret = BCME_OK;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (!dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = FALSE;
+	dhd_os_wlfc_unblock(dhd);
+
+	/* query ampdu hostreorder */
+	bcm_mkiovar("ampdu_hostreorder", NULL, 0, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	if (ret == BCME_OK)
+		hostreorder = *((uint32 *)iovbuf);
+	else {
+		hostreorder = 0;
+		DHD_ERROR(("%s():%d, ampdu_hostreorder get failed Err = %d\n",
+			__FUNCTION__, __LINE__, ret));
+	}
+
+	if (hostreorder) {
+		tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n",
+			__FUNCTION__, __LINE__));
+	}
+
+	/* Disable proptxtstatus signaling for deinit */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	if (ret == BCME_OK) {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s():%d successfully %s bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, __LINE__,
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	} else
+		DHD_ERROR(("%s():%d failed to enable/disable bdcv2 tlv signaling Err = %d\n",
+			__FUNCTION__, __LINE__, ret));
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+#ifdef PROP_TXSTATUS_DEBUG
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+	{
+		int i;
+		wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+		for (i = 0; i < h->max_items; i++) {
+			if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
+				WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+					__FUNCTION__, i, h->items[i].pkt,
+					DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+			}
+		}
+	}
+#endif
+
+	_dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		/* delete hanger */
+		_dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+	}
+
+
+	/* free top structure */
+#ifndef WLFC_STATE_PREALLOC
+	MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+	dhd->wlfc_state = NULL;
+#endif
+	dhd->proptxstatus_mode = hostreorder ?
+		WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_deinit)
+		dhd->plat_deinit((void *)dhd);
+	return BCME_OK;
+}
+
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data);
+
+	dhd_os_wlfc_unblock(dhdp);
+
+	return rc;
+}
+
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	int i;
+	uint8* ea;
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* h;
+	wlfc_mac_descriptor_t* mac_table;
+	wlfc_mac_descriptor_t* interfaces;
+	char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+	if (!dhdp || !strbuf) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	h = (wlfc_hanger_t*)wlfc->hanger;
+	if (h == NULL) {
+		bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+	}
+
+	mac_table = wlfc->destination_entries.nodes;
+	interfaces = wlfc->destination_entries.interfaces;
+	bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+		h = (wlfc_hanger_t*)wlfc->hanger;
+		if (h == NULL) {
+			bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+		} else {
+			bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+				"f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+				h->pushed,
+				h->popped,
+				h->failed_to_push,
+				h->failed_to_pop,
+				h->failed_slotfind,
+				(h->pushed - h->popped));
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+		"(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
+		wlfc->stats.tlv_parse_failed,
+		wlfc->stats.credit_request_failed,
+		wlfc->stats.mac_update_failed,
+		wlfc->stats.psmode_update_failed,
+		wlfc->stats.delayq_full_error,
+		wlfc->stats.rollback_failed);
+
+	bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) "
+		"(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d],"
+		"AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n",
+		wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
+		wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0],
+		wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
+		wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1],
+		wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
+		wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2],
+		wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
+		wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3],
+		wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4],
+		wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]);
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		if (interfaces[i].occupied) {
+			char* iftype_desc;
+
+			if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+				iftype_desc = "<Unknown";
+			else
+				iftype_desc = iftypes[interfaces[i].iftype];
+
+			ea = interfaces[i].ea;
+			bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s "
+				"netif_flow_control:%s\n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				interfaces[i].interface_id,
+				iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+				? " OFF":" ON"));
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),(trans,supp_trans)"
+				"= (%d,%s,%d),(%d,%d)\n",
+				i,
+				interfaces[i].psq.len,
+				((interfaces[i].state ==
+				WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
+				interfaces[i].requested_credit,
+				interfaces[i].transit_count, interfaces[i].suppr_transit_count);
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				interfaces[i].psq.q[0].len,
+				interfaces[i].psq.q[1].len,
+				interfaces[i].afq.q[0].len,
+				interfaces[i].psq.q[2].len,
+				interfaces[i].psq.q[3].len,
+				interfaces[i].afq.q[1].len,
+				interfaces[i].psq.q[4].len,
+				interfaces[i].psq.q[5].len,
+				interfaces[i].afq.q[2].len,
+				interfaces[i].psq.q[6].len,
+				interfaces[i].psq.q[7].len,
+				interfaces[i].afq.q[3].len,
+				interfaces[i].psq.q[8].len,
+				interfaces[i].psq.q[9].len,
+				interfaces[i].afq.q[4].len);
+		}
+	}
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (mac_table[i].occupied) {
+			ea = mac_table[i].ea;
+			bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				mac_table[i].interface_id);
+
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),(trans,supp_trans)"
+				"= (%d,%s,%d),(%d,%d)\n",
+				i,
+				mac_table[i].psq.len,
+				((mac_table[i].state ==
+				WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+				mac_table[i].requested_credit,
+				mac_table[i].transit_count, mac_table[i].suppr_transit_count);
+#ifdef PROP_TXSTATUS_DEBUG
+			bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+				i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				mac_table[i].psq.q[0].len,
+				mac_table[i].psq.q[1].len,
+				mac_table[i].afq.q[0].len,
+				mac_table[i].psq.q[2].len,
+				mac_table[i].psq.q[3].len,
+				mac_table[i].afq.q[1].len,
+				mac_table[i].psq.q[4].len,
+				mac_table[i].psq.q[5].len,
+				mac_table[i].afq.q[2].len,
+				mac_table[i].psq.q[6].len,
+				mac_table[i].psq.q[7].len,
+				mac_table[i].afq.q[3].len,
+				mac_table[i].psq.q[8].len,
+				mac_table[i].psq.q[9].len,
+				mac_table[i].afq.q[4].len);
+
+		}
+	}
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		int avg;
+		int moving_avg = 0;
+		int moving_samples;
+
+		if (wlfc->stats.latency_sample_count) {
+			moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+			for (i = 0; i < moving_samples; i++)
+				moving_avg += wlfc->stats.deltas[i];
+			moving_avg /= moving_samples;
+
+			avg = (100 * wlfc->stats.total_status_latency) /
+				wlfc->stats.latency_sample_count;
+			bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+				"(%d.%d, %03d, %03d)\n",
+				moving_samples, avg/100, (avg - (avg/100)*100),
+				wlfc->stats.latency_most_recent,
+				moving_avg);
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+		"back = (%d,%d,%d,%d,%d,%d)\n",
+		wlfc->stats.fifo_credits_sent[0],
+		wlfc->stats.fifo_credits_sent[1],
+		wlfc->stats.fifo_credits_sent[2],
+		wlfc->stats.fifo_credits_sent[3],
+		wlfc->stats.fifo_credits_sent[4],
+		wlfc->stats.fifo_credits_sent[5],
+
+		wlfc->stats.fifo_credits_back[0],
+		wlfc->stats.fifo_credits_back[1],
+		wlfc->stats.fifo_credits_back[2],
+		wlfc->stats.fifo_credits_back[3],
+		wlfc->stats.fifo_credits_back[4],
+		wlfc->stats.fifo_credits_back[5]);
+	{
+		uint32 fifo_cr_sent = 0;
+		uint32 fifo_cr_acked = 0;
+		uint32 request_cr_sent = 0;
+		uint32 request_cr_ack = 0;
+		uint32 bc_mc_cr_ack = 0;
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+			fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+		}
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+			fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+		}
+
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_sent +=
+					wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_sent +=
+				wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.nodes[i].dstncredit_acks;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.interfaces[i].dstncredit_acks;
+			}
+		}
+		bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+			"other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+			fifo_cr_sent, fifo_cr_acked,
+			request_cr_sent, request_cr_ack,
+			wlfc->destination_entries.other.dstncredit_acks,
+			bc_mc_cr_ack,
+			wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+	}
+#endif /* PROP_TXSTATUS_DEBUG */
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)"
+		"(freed,free_err,rollback)) = "
+		"((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.pktin,
+		wlfc->stats.pkt2bus,
+		wlfc->stats.txstatus_in,
+		wlfc->stats.dhd_hdrpulls,
+		wlfc->stats.pktout,
+
+		wlfc->stats.pktdropped,
+		wlfc->stats.wlfc_header_only_pkt,
+		wlfc->stats.wlc_tossed_pkts,
+
+		wlfc->stats.pkt_freed,
+		wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+	bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+		"((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.d11_suppress,
+		wlfc->stats.wl_suppress,
+		wlfc->stats.bad_suppress,
+
+		wlfc->stats.psq_d11sup_enq,
+		wlfc->stats.psq_wlsup_enq,
+		wlfc->stats.psq_hostq_enq,
+		wlfc->stats.mac_handle_notfound,
+
+		wlfc->stats.psq_d11sup_retx,
+		wlfc->stats.psq_wlsup_retx,
+		wlfc->stats.psq_hostq_retx);
+
+	bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n",
+		wlfc->stats.cleanup_txq_cnt,
+		wlfc->stats.cleanup_psq_cnt,
+		wlfc->stats.cleanup_fw_cnt);
+
+	bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error);
+
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i,
+			wlfc->pkt_cnt_in_q[i][0],
+			wlfc->pkt_cnt_in_q[i][1],
+			wlfc->pkt_cnt_in_q[i][2],
+			wlfc->pkt_cnt_in_q[i][3],
+			wlfc->pkt_cnt_in_q[i][4]);
+	}
+	bcm_bprintf(strbuf, "\n");
+
+	dhd_os_wlfc_unblock(dhdp);
+	return BCME_OK;
+}
+
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
+{
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* hanger;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		hanger = (wlfc_hanger_t*)wlfc->hanger;
+
+		hanger->pushed = 0;
+		hanger->popped = 0;
+		hanger->failed_slotfind = 0;
+		hanger->failed_to_pop = 0;
+		hanger->failed_to_push = 0;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_enabled;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->wlfc_state) {
+		dhd->proptxstatus_mode = val & 0xff;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
+{
+	athost_wl_status_info_t* wlfc;
+	bool rc = FALSE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return FALSE;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	if (PKTLEN(wlfc->osh, pktbuf) == 0) {
+		wlfc->stats.wlfc_header_only_pkt++;
+		rc = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock)
+{
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_block(dhdp);
+	}
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) ||
+		dhdp->proptxstatus_module_ignore) {
+		if (bAcquireLock) {
+			dhd_os_wlfc_unblock(dhdp);
+		}
+		return WLFC_UNSUPPORTED;
+	}
+
+	if (state != dhdp->proptxstatus_txoff) {
+		dhdp->proptxstatus_txoff = state;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_unblock(dhdp);
+	}
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio)
+{
+	athost_wl_status_info_t* wlfc;
+	int rx_path_ac = -1;
+
+	if ((dhd == NULL) || (prio >= NUMPRIO)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_rxpkt_chk) {
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	rx_path_ac = prio2fifo[prio];
+	wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME();
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_module_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+	bool bChanged = FALSE;
+
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if ((bool)val != dhd->proptxstatus_module_ignore) {
+		dhd->proptxstatus_module_ignore = (val != 0);
+		/* force txstatus_ignore sync with proptxstatus_module_ignore */
+		dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore;
+		if (FALSE == dhd->proptxstatus_module_ignore) {
+			tlv = WLFC_FLAGS_RSSI_SIGNALS |
+				WLFC_FLAGS_XONXOFF_SIGNALS |
+				WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+				WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+		}
+		/* always enable host reorder */
+		tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		bChanged = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (bChanged) {
+		/* select enable proptxtstatus signaling */
+		bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+		if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+			DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+		else {
+			DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+	}
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_credit_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->proptxstatus_credit_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_txstatus_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->proptxstatus_txstatus_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_rxpkt_chk;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_rxpkt_chk = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+#endif /* PROP_TXSTATUS */
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 0000000..1ac120c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,517 @@
+/*
+* Copyright (C) 1999-2014, Broadcom Corporation
+*
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+*
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: dhd_wlfc.h 490028 2014-07-09 05:58:25Z $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+
+/* #define OOO_DEBUG */
+
+#define WLFC_UNSUPPORTED -9999
+
+#define WLFC_NO_TRAFFIC	-1
+#define WLFC_MULTI_TRAFFIC 0
+
+#define BUS_RETRIES 1	/* # of retries before aborting a bus tx operation */
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 3072
+
+#define WLFC_HANGER_ITEM_STATE_FREE			1
+#define WLFC_HANGER_ITEM_STATE_INUSE			2
+#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED		3
+
+#define WLFC_HANGER_PKT_STATE_TXSTATUS			1
+#define WLFC_HANGER_PKT_STATE_TXCOMPLETE		2
+#define WLFC_HANGER_PKT_STATE_CLEANUP			4
+
+typedef enum {
+	Q_TYPE_PSQ,
+	Q_TYPE_AFQ
+} q_type_t;
+
+typedef enum ewlfc_packet_state {
+	eWLFC_PKTTYPE_NEW,
+	eWLFC_PKTTYPE_DELAYED,
+	eWLFC_PKTTYPE_SUPPRESSED,
+	eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+	eWLFC_MAC_ENTRY_ACTION_ADD,
+	eWLFC_MAC_ENTRY_ACTION_DEL,
+	eWLFC_MAC_ENTRY_ACTION_UPDATE,
+	eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+	uint8	state;
+	uint8   gen;
+	uint8	pkt_state;
+	uint8	pkt_txstatus;
+	uint32	identifier;
+	void*	pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32	push_time;
+#endif
+	struct wlfc_hanger_item *next;
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+	int max_items;
+	uint32 pushed;
+	uint32 popped;
+	uint32 failed_to_push;
+	uint32 failed_to_pop;
+	uint32 failed_slotfind;
+	uint32 slot_pos;
+	wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n)	((sizeof(wlfc_hanger_t) - \
+	sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN		1
+#define WLFC_STATE_CLOSE	2
+
+#define WLFC_PSQ_PREC_COUNT		((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_AFQ_PREC_COUNT		(AC_COUNT + 1)
+
+#define WLFC_PSQ_LEN			2048
+
+#define WLFC_FLOWCONTROL_HIWATER	(2048 - 256)
+#define WLFC_FLOWCONTROL_LOWATER	256
+
+#define WLFC_LOG_BUF_SIZE		(1024*1024)
+
+typedef struct wlfc_mac_descriptor {
+	uint8 occupied;
+	uint8 interface_id;
+	uint8 iftype;
+	uint8 state;
+	uint8 ac_bitmap; /* for APSD */
+	uint8 requested_credit;
+	uint8 requested_packet;
+	uint8 ea[ETHER_ADDR_LEN];
+	/*
+	maintain (MAC,AC) based seq count for
+	packets going to the device. As well as bc/mc.
+	*/
+	uint8 seq[AC_COUNT + 1];
+	uint8 generation;
+	struct pktq	psq;
+	/* packets at firmware */
+	struct pktq	afq;
+	/* The AC pending bitmap that was reported to the fw at last change */
+	uint8 traffic_lastreported_bmp;
+	/* The new AC pending bitmap */
+	uint8 traffic_pending_bmp;
+	/* 1= send on next opportunity */
+	uint8 send_tim_signal;
+	uint8 mac_handle;
+	/* Number of packets at dongle for this entry. */
+	uint transit_count;
+	/* Numbe of suppression to wait before evict from delayQ */
+	uint suppr_transit_count;
+	/* flag. TRUE when in suppress state */
+	uint8 suppressed;
+
+
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32 dstncredit_sent_packets;
+	uint32 dstncredit_acks;
+	uint32 opened_ct;
+	uint32 closed_ct;
+#endif
+	struct wlfc_mac_descriptor* prev;
+	struct wlfc_mac_descriptor* next;
+} wlfc_mac_descriptor_t;
+
+typedef struct dhd_wlfc_commit_info {
+	uint8					needs_hdr;
+	uint8					ac_fifo_credit_spent;
+	ewlfc_packet_state_t	pkt_type;
+	wlfc_mac_descriptor_t*	mac_entry;
+	void*					p;
+} dhd_wlfc_commit_info_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+	entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+	uint32	pktin;
+	uint32	pktout;
+	uint32	pkt2bus;
+	uint32	pktdropped;
+	uint32	tlv_parse_failed;
+	uint32	rollback;
+	uint32	rollback_failed;
+	uint32	delayq_full_error;
+	uint32	credit_request_failed;
+	uint32	packet_request_failed;
+	uint32	mac_update_failed;
+	uint32	psmode_update_failed;
+	uint32	interface_update_failed;
+	uint32	wlfc_header_only_pkt;
+	uint32	txstatus_in;
+	uint32	d11_suppress;
+	uint32	wl_suppress;
+	uint32	bad_suppress;
+	uint32	pkt_freed;
+	uint32	pkt_free_err;
+	uint32	psq_wlsup_retx;
+	uint32	psq_wlsup_enq;
+	uint32	psq_d11sup_retx;
+	uint32	psq_d11sup_enq;
+	uint32	psq_hostq_retx;
+	uint32	psq_hostq_enq;
+	uint32	mac_handle_notfound;
+	uint32	wlc_tossed_pkts;
+	uint32	dhd_hdrpulls;
+	uint32	generic_error;
+	/* an extra one for bc/mc traffic */
+	uint32	send_pkts[AC_COUNT + 1];
+	uint32	drop_pkts[WLFC_PSQ_PREC_COUNT];
+	uint32	ooo_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+	/* all pkt2bus -> txstatus latency accumulated */
+	uint32	latency_sample_count;
+	uint32	total_status_latency;
+	uint32	latency_most_recent;
+	int	idx_delta;
+	uint32	deltas[10];
+	uint32	fifo_credits_sent[6];
+	uint32	fifo_credits_back[6];
+	uint32	dropped_qfull[6];
+	uint32	signal_only_pkts_sent;
+	uint32	signal_only_pkts_freed;
+#endif
+	uint32	cleanup_txq_cnt;
+	uint32	cleanup_psq_cnt;
+	uint32	cleanup_fw_cnt;
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+	(ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE				0
+#define WLFC_FCMODE_IMPLIED_CREDIT		1
+#define WLFC_FCMODE_EXPLICIT_CREDIT		2
+#define WLFC_ONLY_AMPDU_HOSTREORDER		3
+
+/* Reserved credits ratio when borrowed by hihger priority */
+#define WLFC_BORROW_LIMIT_RATIO		4
+
+/* How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/* How long to defer flow control in milliseconds */
+#define WLFC_FC_DEFER_PERIOD_MS 200
+
+/* How long to detect occurance per AC in miliseconds */
+#define WLFC_RX_DETECTION_THRESHOLD_MS	100
+
+/* Mask to represent available ACs (note: BC/MC is ignored */
+#define WLFC_AC_MASK 0xF
+
+typedef struct athost_wl_status_info {
+	uint8	last_seqid_to_wlc;
+
+	/* OSL handle */
+	osl_t*	osh;
+	/* dhd pub */
+	void*	dhdp;
+
+	/* stats */
+	athost_wl_stat_counters_t stats;
+
+	int		Init_FIFO_credit[AC_COUNT + 2];
+
+	/* the additional ones are for bc/mc and ATIM FIFO */
+	int		FIFO_credit[AC_COUNT + 2];
+
+	/* Credit borrow counts for each FIFO from each of the other FIFOs */
+	int		credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+	/* packet hanger and MAC->handle lookup table */
+	void*	hanger;
+	struct {
+		/* table for individual nodes */
+		wlfc_mac_descriptor_t	nodes[WLFC_MAC_DESC_TABLE_SIZE];
+		/* table for interfaces */
+		wlfc_mac_descriptor_t	interfaces[WLFC_MAX_IFNUM];
+		/* OS may send packets to unknown (unassociated) destinations */
+		/* A place holder for bc/mc and packets to unknown destinations */
+		wlfc_mac_descriptor_t	other;
+	} destination_entries;
+
+	wlfc_mac_descriptor_t *active_entry_head;
+	int active_entry_count;
+
+	wlfc_mac_descriptor_t* requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+	int requested_entry_count;
+
+	/* pkt counts for each interface and ac */
+	int	pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
+	int	pkt_cnt_per_ac[AC_COUNT+1];
+	int	pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1];
+	uint8	allow_fc;
+	uint32  fc_defer_timestamp;
+	uint32	rx_timestamp[AC_COUNT+1];
+	/* ON/OFF state for flow control to the host network interface */
+	uint8	hostif_flow_state[WLFC_MAX_IFNUM];
+	uint8	host_ifidx;
+	/* to flow control an OS interface */
+	uint8	toggle_host_if;
+
+	/* To borrow credits */
+	uint8   allow_credit_borrow;
+
+	/* ac number for the first single ac traffic */
+	uint8	single_ac;
+
+	/* Timestamp for the first single ac traffic */
+	uint32  single_ac_timestamp;
+
+	bool	bcmc_credit_supported;
+
+} athost_wl_status_info_t;
+
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+	/*
+	b[15]  - 1 = wlfc packet
+	b[14:13]  - encryption exemption
+	b[12 ] - 1 = event channel
+	b[11 ] - 1 = this packet was sent in response to one time packet request,
+	do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+	b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+	b[9  ] - 1 = packet is host->firmware (transmit direction)
+	       - 0 = packet received from firmware (firmware->host)
+	b[8  ] - 1 = packet was sent due to credit_request (pspoll),
+	             packet does not count against FIFO credit.
+	       - 0 = normal transaction, packet counts against FIFO credit
+	b[7  ] - 1 = AP, 0 = STA
+	b[6:4] - AC FIFO number
+	b[3:0] - interface index
+	*/
+	uint16	if_flags;
+	/* destination MAC address for this packet so that not every
+	module needs to open the packet to find this
+	*/
+	uint8	dstn_ether[ETHER_ADDR_LEN];
+	/*
+	This 32-bit goes from host to device for every packet.
+	*/
+	uint32	htod_tag;
+
+	/*
+	This 16-bit is original seq number for every suppress packet.
+	*/
+	uint16	htod_seq;
+
+	/*
+	This address is mac entry for every packet.
+	*/
+	void*	entry;
+	/* bus specific stuff */
+	union {
+		struct {
+			void* stuff;
+			uint32 thing1;
+			uint32 thing2;
+		} sd;
+		struct {
+			void* bus;
+			void* urb;
+		} usb;
+	} bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_WLFCPKT_MASK			0x1
+#define DHD_PKTTAG_WLFCPKT_SHIFT		15
+#define DHD_PKTTAG_WLFCPKT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT)
+#define DHD_PKTTAG_WLFCPKT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK)
+
+#define DHD_PKTTAG_EXEMPT_MASK			0x3
+#define DHD_PKTTAG_EXEMPT_SHIFT			13
+#define DHD_PKTTAG_EXEMPT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
+#define DHD_PKTTAG_EXEMPT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
+
+#define DHD_PKTTAG_EVENT_MASK			0x1
+#define DHD_PKTTAG_EVENT_SHIFT			12
+#define DHD_PKTTAG_SETEVENT(tag, event)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
+	(((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
+#define DHD_PKTTAG_EVENT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK		0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT		11
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+	(1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SIGNALONLY_MASK		0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT		10
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+	(((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_PKTDIR_MASK			0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT			9
+#define DHD_PKTTAG_SETPKTDIR(tag, dir)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+	(((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_CREDITCHECK_MASK		0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT		8
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+	(((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_IFTYPE_MASK			0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT			7
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+	(((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_FIFO_MASK			0x7
+#define DHD_PKTTAG_FIFO_SHIFT			4
+#define DHD_PKTTAG_SETFIFO(tag, fifo)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+	(((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_IF_MASK			0xf
+#define DHD_PKTTAG_IF_SHIFT			0
+#define DHD_PKTTAG_SETIF(tag, if)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \
+	(((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT)
+#define DHD_PKTTAG_IF(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea)	memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+	(dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag)	((dhd_pkttag_t*)(tag))->dstn_ether
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue)	((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag)			(((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_SET_H2DSEQ(tag, seq)		((dhd_pkttag_t*)(tag))->htod_seq = (seq)
+#define DHD_PKTTAG_H2DSEQ(tag)			(((dhd_pkttag_t*)(tag))->htod_seq)
+
+#define DHD_PKTTAG_SET_ENTRY(tag, entry)	((dhd_pkttag_t*)(tag))->entry = (entry)
+#define DHD_PKTTAG_ENTRY(tag)			(((dhd_pkttag_t*)(tag))->entry)
+
+#define PSQ_SUP_IDX(x) (x * 2 + 1)
+#define PSQ_DLY_IDX(x) (x * 2)
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do {} while (0)
+#endif
+
+/* public functions */
+int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
+	uchar *reorder_info_buf, uint *reorder_info_len);
+int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+	void* commit_ctx, void *pktbuf, bool need_toggle_host_if);
+int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+int dhd_wlfc_init(dhd_pub_t *dhd);
+#ifdef SUPPORT_P2P_GO_PS
+int dhd_wlfc_suspend(dhd_pub_t *dhd);
+int dhd_wlfc_resume(dhd_pub_t *dhd);
+#endif /* SUPPORT_P2P_GO_PS */
+int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd);
+int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg);
+int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg);
+int dhd_wlfc_deinit(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data);
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp);
+int dhd_wlfc_enable(dhd_pub_t *dhdp);
+int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd);
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val);
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val);
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd);
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf);
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock);
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio);
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val);
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 0000000..cd37e44
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,222 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h 464743 2014-03-25 21:04:32Z $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+	unsigned long	rx_packets;		/* total packets received */
+	unsigned long	tx_packets;		/* total packets transmitted */
+	unsigned long	rx_bytes;		/* total bytes received */
+	unsigned long	tx_bytes;		/* total bytes transmitted */
+	unsigned long	rx_errors;		/* bad packets received */
+	unsigned long	tx_errors;		/* packet transmit problems */
+	unsigned long	rx_dropped;		/* packets dropped by dongle */
+	unsigned long	tx_dropped;		/* packets dropped by dongle */
+	unsigned long   multicast;      /* multicast packets received */
+} dngl_stats_t;
+
+typedef int wifi_radio;
+typedef int wifi_channel;
+typedef int wifi_rssi;
+
+typedef enum wifi_channel_width {
+	WIFI_CHAN_WIDTH_20	  = 0,
+	WIFI_CHAN_WIDTH_40	  = 1,
+	WIFI_CHAN_WIDTH_80	  = 2,
+	WIFI_CHAN_WIDTH_160   = 3,
+	WIFI_CHAN_WIDTH_80P80 = 4,
+	WIFI_CHAN_WIDTH_5	  = 5,
+	WIFI_CHAN_WIDTH_10	  = 6,
+	WIFI_CHAN_WIDTH_INVALID = -1
+} wifi_channel_width_t;
+
+typedef enum {
+    WIFI_DISCONNECTED = 0,
+    WIFI_AUTHENTICATING = 1,
+    WIFI_ASSOCIATING = 2,
+    WIFI_ASSOCIATED = 3,
+    WIFI_EAPOL_STARTED = 4,   // if done by firmware/driver
+    WIFI_EAPOL_COMPLETED = 5, // if done by firmware/driver
+} wifi_connection_state;
+
+typedef enum {
+    WIFI_ROAMING_IDLE = 0,
+    WIFI_ROAMING_ACTIVE = 1,
+} wifi_roam_state;
+
+typedef enum {
+    WIFI_INTERFACE_STA = 0,
+    WIFI_INTERFACE_SOFTAP = 1,
+    WIFI_INTERFACE_IBSS = 2,
+    WIFI_INTERFACE_P2P_CLIENT = 3,
+    WIFI_INTERFACE_P2P_GO = 4,
+    WIFI_INTERFACE_NAN = 5,
+    WIFI_INTERFACE_MESH = 6,
+ } wifi_interface_mode;
+
+#define WIFI_CAPABILITY_QOS          0x00000001     // set for QOS association
+#define WIFI_CAPABILITY_PROTECTED    0x00000002     // set for protected association (802.11 beacon frame control protected bit set)
+#define WIFI_CAPABILITY_INTERWORKING 0x00000004     // set if 802.11 Extended Capabilities element interworking bit is set
+#define WIFI_CAPABILITY_HS20         0x00000008     // set for HS20 association
+#define WIFI_CAPABILITY_SSID_UTF8    0x00000010     // set is 802.11 Extended Capabilities element UTF-8 SSID bit is set
+#define WIFI_CAPABILITY_COUNTRY      0x00000020     // set is 802.11 Country Element is present
+
+typedef struct {
+   wifi_interface_mode mode;     // interface mode
+   u8 mac_addr[6];               // interface mac address (self)
+   wifi_connection_state state;  // connection state (valid for STA, CLI only)
+   wifi_roam_state roaming;      // roaming state
+   u32 capabilities;             // WIFI_CAPABILITY_XXX (self)
+   u8 ssid[33];                  // null terminated SSID
+   u8 bssid[6];                  // bssid
+   u8 ap_country_str[3];         // country string advertised by AP
+   u8 country_str[3];            // country string for this association
+} wifi_interface_info;
+
+typedef wifi_interface_info *wifi_interface_handle;
+
+/* channel information */
+typedef struct {
+   wifi_channel_width_t width;   // channel width (20, 40, 80, 80+80, 160)
+   wifi_channel center_freq;   // primary 20 MHz channel
+   wifi_channel center_freq0;  // center frequency (MHz) first segment
+   wifi_channel center_freq1;  // center frequency (MHz) second segment
+} wifi_channel_info;
+
+/* wifi rate */
+typedef struct {
+   u32 preamble   :3;   // 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved
+   u32 nss        :2;   // 0:1x1, 1:2x2, 3:3x3, 4:4x4
+   u32 bw         :3;   // 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz
+   u32 rateMcsIdx :8;   // OFDM/CCK rate code would be as per ieee std in the units of 0.5mbps
+                        // HT/VHT it would be mcs index
+   u32 reserved  :16;   // reserved
+   u32 bitrate;         // units of 100 Kbps
+} wifi_rate;
+
+/* channel statistics */
+typedef struct {
+   wifi_channel_info channel;  // channel
+   u32 on_time;                // msecs the radio is awake (32 bits number accruing over time)
+   u32 cca_busy_time;          // msecs the CCA register is busy (32 bits number accruing over time)
+} wifi_channel_stat;
+
+/* radio statistics */
+typedef struct {
+   wifi_radio radio;               // wifi radio (if multiple radio supported)
+   u32 on_time;                    // msecs the radio is awake (32 bits number accruing over time)
+   u32 tx_time;                    // msecs the radio is transmitting (32 bits number accruing over time)
+   u32 rx_time;                    // msecs the radio is in active receive (32 bits number accruing over time)
+   u32 on_time_scan;               // msecs the radio is awake due to all scan (32 bits number accruing over time)
+   u32 on_time_nbd;                // msecs the radio is awake due to NAN (32 bits number accruing over time)
+   u32 on_time_gscan;              // msecs the radio is awake due to G?scan (32 bits number accruing over time)
+   u32 on_time_roam_scan;          // msecs the radio is awake due to roam?scan (32 bits number accruing over time)
+   u32 on_time_pno_scan;           // msecs the radio is awake due to PNO scan (32 bits number accruing over time)
+   u32 on_time_hs20;               // msecs the radio is awake due to HS2.0 scans and GAS exchange (32 bits number accruing over time)
+   u32 num_channels;               // number of channels
+   wifi_channel_stat channels[];   // channel statistics
+} wifi_radio_stat;
+
+/* per rate statistics */
+typedef struct {
+   wifi_rate rate;     // rate information
+   u32 tx_mpdu;        // number of successfully transmitted data pkts (ACK rcvd)
+   u32 rx_mpdu;        // number of received data pkts
+   u32 mpdu_lost;      // number of data packet losses (no ACK)
+   u32 retries;        // total number of data pkt retries
+   u32 retries_short;  // number of short data pkt retries
+   u32 retries_long;   // number of long data pkt retries
+} wifi_rate_stat;
+
+/* access categories */
+typedef enum {
+   WIFI_AC_VO  = 0,
+   WIFI_AC_VI  = 1,
+   WIFI_AC_BE  = 2,
+   WIFI_AC_BK  = 3,
+   WIFI_AC_MAX = 4,
+} wifi_traffic_ac;
+
+/* wifi peer type */
+typedef enum
+{
+   WIFI_PEER_STA,
+   WIFI_PEER_AP,
+   WIFI_PEER_P2P_GO,
+   WIFI_PEER_P2P_CLIENT,
+   WIFI_PEER_NAN,
+   WIFI_PEER_TDLS,
+   WIFI_PEER_INVALID,
+} wifi_peer_type;
+
+/* per peer statistics */
+typedef struct {
+   wifi_peer_type type;           // peer type (AP, TDLS, GO etc.)
+   u8 peer_mac_address[6];        // mac address
+   u32 capabilities;              // peer WIFI_CAPABILITY_XXX
+   u32 num_rate;                  // number of rates
+   wifi_rate_stat rate_stats[];   // per rate statistics, number of entries  = num_rate
+} wifi_peer_info;
+
+/* per access category statistics */
+typedef struct {
+   wifi_traffic_ac ac;             // access category (VI, VO, BE, BK)
+   u32 tx_mpdu;                    // number of successfully transmitted unicast data pkts (ACK rcvd)
+   u32 rx_mpdu;                    // number of received unicast mpdus
+   u32 tx_mcast;                   // number of succesfully transmitted multicast data packets
+                                   // STA case: implies ACK received from AP for the unicast packet in which mcast pkt was sent
+   u32 rx_mcast;                   // number of received multicast data packets
+   u32 rx_ampdu;                   // number of received unicast a-mpdus
+   u32 tx_ampdu;                   // number of transmitted unicast a-mpdus
+   u32 mpdu_lost;                  // number of data pkt losses (no ACK)
+   u32 retries;                    // total number of data pkt retries
+   u32 retries_short;              // number of short data pkt retries
+   u32 retries_long;               // number of long data pkt retries
+   u32 contention_time_min;        // data pkt min contention time (usecs)
+   u32 contention_time_max;        // data pkt max contention time (usecs)
+   u32 contention_time_avg;        // data pkt avg contention time (usecs)
+   u32 contention_num_samples;     // num of data pkts used for contention statistics
+} wifi_wmm_ac_stat;
+
+/* interface statistics */
+typedef struct {
+   wifi_interface_handle iface;          // wifi interface
+   wifi_interface_info info;             // current state of the interface
+   u32 beacon_rx;                        // access point beacon received count from connected AP
+   u32 mgmt_rx;                          // access point mgmt frames received count from connected AP (including Beacon)
+   u32 mgmt_action_rx;                   // action frames received count
+   u32 mgmt_action_tx;                   // action frames transmit count
+   wifi_rssi rssi_mgmt;                  // access Point Beacon and Management frames RSSI (averaged)
+   wifi_rssi rssi_data;                  // access Point Data Frames RSSI (averaged) from connected AP
+   wifi_rssi rssi_ack;                   // access Point ACK RSSI (averaged) from connected AP
+   wifi_wmm_ac_stat ac[WIFI_AC_MAX];     // per ac data packet statistics
+   u32 num_peers;                        // number of peers
+   wifi_peer_info peer_info[];           // per peer statistics
+} wifi_iface_stat;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 0000000..fbd3209
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,40 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_wlhdr.h 464743 2014-03-25 21:04:32Z $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+    uint8   type;           /* Header type */
+    uint8   version;        /* Header version */
+	int8	rssi;			/* RSSI */
+	uint8	pad;			/* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN   sizeof(wl_header_t)
+#define WL_HEADER_TYPE  0
+#define WL_HEADER_VER   1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/drivers/net/wireless/bcmdhd/hnd_pktpool.c
new file mode 100644
index 0000000..bf48b6d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hnd_pktpool.c
@@ -0,0 +1,751 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <hnd_pktpool.h>
+
+/* Registry size is one larger than max pools, as slot #0 is reserved */
+#define PKTPOOLREG_RSVD_ID				(0U)
+#define PKTPOOLREG_RSVD_PTR				(POOLPTR(0xdeaddead))
+#define PKTPOOLREG_FREE_PTR				(POOLPTR(NULL))
+
+#define PKTPOOL_REGISTRY_SET(id, pp)	(pktpool_registry_set((id), (pp)))
+#define PKTPOOL_REGISTRY_CMP(id, pp)	(pktpool_registry_cmp((id), (pp)))
+
+/* Tag a registry entry as free for use */
+#define PKTPOOL_REGISTRY_CLR(id)		\
+		PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
+#define PKTPOOL_REGISTRY_ISCLR(id)		\
+		(PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
+
+/* Tag registry entry 0 as reserved */
+#define PKTPOOL_REGISTRY_RSV()			\
+		PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
+#define PKTPOOL_REGISTRY_ISRSVD()		\
+		(PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
+
+/* Walk all un-reserved entries in registry */
+#define PKTPOOL_REGISTRY_FOREACH(id)	\
+		for ((id) = 1U; (id) <= pktpools_max; (id)++)
+
+uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
+pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
+
+/* Register/Deregister a pktpool with registry during pktpool_init/deinit */
+static int pktpool_register(pktpool_t * poolptr);
+static int pktpool_deregister(pktpool_t * poolptr);
+
+/** accessor functions required when ROMming this file, forced into RAM */
+static void
+BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
+{
+	pktpools_registry[id] = pp;
+}
+
+static bool
+BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
+{
+	return pktpools_registry[id] == pp;
+}
+
+int /* Construct a pool registry to serve a maximum of total_pools */
+pktpool_attach(osl_t *osh, uint32 total_pools)
+{
+	uint32 poolid;
+
+	if (pktpools_max != 0U) {
+		return BCME_ERROR;
+	}
+
+	ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
+
+	/* Initialize registry: reserve slot#0 and tag others as free */
+	PKTPOOL_REGISTRY_RSV();		/* reserve slot#0 */
+
+	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* tag all unreserved entries as free */
+		PKTPOOL_REGISTRY_CLR(poolid);
+	}
+
+	pktpools_max = total_pools;
+
+	return (int)pktpools_max;
+}
+
+int /* Destruct the pool registry. Ascertain all pools were first de-inited */
+pktpool_dettach(osl_t *osh)
+{
+	uint32 poolid;
+
+	if (pktpools_max == 0U) {
+		return BCME_OK;
+	}
+
+	/* Ascertain that no pools are still registered */
+	ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
+
+	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* ascertain all others are free */
+		ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
+	}
+
+	pktpools_max = 0U; /* restore boot state */
+
+	return BCME_OK;
+}
+
+static int	/* Register a pool in a free slot; return the registry slot index */
+pktpool_register(pktpool_t * poolptr)
+{
+	uint32 poolid;
+
+	if (pktpools_max == 0U) {
+		return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
+	}
+
+	ASSERT(pktpools_max != 0U);
+
+	/* find an empty slot in pktpools_registry */
+	PKTPOOL_REGISTRY_FOREACH(poolid) {
+		if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
+			PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
+			return (int)poolid; /* return pool ID */
+		}
+	} /* FOREACH */
+
+	return PKTPOOL_INVALID_ID;	/* error: registry is full */
+}
+
+static int	/* Deregister a pktpool, given the pool pointer; tag slot as free */
+pktpool_deregister(pktpool_t * poolptr)
+{
+	uint32 poolid;
+
+	ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
+
+	poolid = POOLID(poolptr);
+	ASSERT(poolid <= pktpools_max);
+
+	/* Asertain that a previously registered poolptr is being de-registered */
+	if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
+		PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
+	} else {
+		ASSERT(0);
+		return BCME_ERROR; /* mismatch in registry */
+	}
+
+	return BCME_OK;
+}
+
+
+/*
+ * pktpool_init:
+ * User provides a pktpool_t sturcture and specifies the number of packets to
+ * be pre-filled into the pool (pplen). The size of all packets in a pool must
+ * be the same and is specified by plen.
+ * pktpool_init first attempts to register the pool and fetch a unique poolid.
+ * If registration fails, it is considered an BCME_ERR, caused by either the
+ * registry was not pre-created (pktpool_attach) or the registry is full.
+ * If registration succeeds, then the requested number of packets will be filled
+ * into the pool as part of initialization. In the event that there is no
+ * available memory to service the request, then BCME_NOMEM will be returned
+ * along with the count of how many packets were successfully allocated.
+ * In dongle builds, prior to memory reclaimation, one should limit the number
+ * of packets to be allocated during pktpool_init and fill the pool up after
+ * reclaim stage.
+ */
+int
+pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
+{
+	int i, err = BCME_OK;
+	int pktplen;
+	uint8 pktp_id;
+
+	ASSERT(pktp != NULL);
+	ASSERT(osh != NULL);
+	ASSERT(pplen != NULL);
+
+	pktplen = *pplen;
+
+	bzero(pktp, sizeof(pktpool_t));
+
+	/* assign a unique pktpool id */
+	if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
+		return BCME_ERROR;
+	}
+	POOLSETID(pktp, pktp_id);
+
+	pktp->inited = TRUE;
+	pktp->istx = istx ? TRUE : FALSE;
+	pktp->plen = (uint16)plen;
+	pktp->type = type;
+
+	pktp->maxlen = PKTPOOL_LEN_MAX;
+	pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
+
+	for (i = 0; i < pktplen; i++) {
+		void *p;
+		p = PKTGET(osh, plen, TRUE);
+
+		if (p == NULL) {
+			/* Not able to allocate all requested pkts
+			 * so just return what was actually allocated
+			 * We can add to the pool later
+			 */
+			if (pktp->freelist == NULL) /* pktpool free list is empty */
+				err = BCME_NOMEM;
+
+			goto exit;
+		}
+
+		PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
+
+		PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
+		pktp->freelist = p;
+
+		pktp->avail++;
+
+#ifdef BCMDBG_POOL
+		pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+	}
+
+exit:
+	pktp->len = pktp->avail;
+
+	*pplen = pktp->len;
+	return err;
+}
+
+/*
+ * pktpool_deinit:
+ * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
+ * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
+ * An assert is in place to ensure that there are no packets still lingering
+ * around. Packets freed to a pool after the deinit will cause a memory
+ * corruption as the pktpool_t structure no longer exists.
+ */
+int
+pktpool_deinit(osl_t *osh, pktpool_t *pktp)
+{
+	uint16 freed = 0;
+
+	ASSERT(osh != NULL);
+	ASSERT(pktp != NULL);
+
+#ifdef BCMDBG_POOL
+	{
+		int i;
+		for (i = 0; i <= pktp->len; i++) {
+			pktp->dbg_q[i].p = NULL;
+		}
+	}
+#endif
+
+	while (pktp->freelist != NULL) {
+		void * p = pktp->freelist;
+
+		pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+		PKTSETFREELIST(p, NULL);
+
+		PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+		PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+		freed++;
+		ASSERT(freed <= pktp->len);
+	}
+
+	pktp->avail -= freed;
+	ASSERT(pktp->avail == 0);
+
+	pktp->len -= freed;
+
+	pktpool_deregister(pktp); /* release previously acquired unique pool id */
+	POOLSETID(pktp, PKTPOOL_INVALID_ID);
+
+	pktp->inited = FALSE;
+
+	/* Are there still pending pkts? */
+	ASSERT(pktp->len == 0);
+
+	return 0;
+}
+
+int
+pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
+{
+	void *p;
+	int err = 0;
+	int len, psize, maxlen;
+
+	ASSERT(pktp->plen != 0);
+
+	maxlen = pktp->maxlen;
+	psize = minimal ? (maxlen >> 2) : maxlen;
+	for (len = (int)pktp->len; len < psize; len++) {
+
+		p = PKTGET(osh, pktp->len, TRUE);
+
+		if (p == NULL) {
+			err = BCME_NOMEM;
+			break;
+		}
+
+		if (pktpool_add(pktp, p) != BCME_OK) {
+			PKTFREE(osh, p, FALSE);
+			err = BCME_ERROR;
+			break;
+		}
+	}
+
+	return err;
+}
+
+static void *
+pktpool_deq(pktpool_t *pktp)
+{
+	void *p;
+
+	if (pktp->avail == 0)
+		return NULL;
+
+	ASSERT(pktp->freelist != NULL);
+
+	p = pktp->freelist;  /* dequeue packet from head of pktpool free list */
+	pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
+	PKTSETFREELIST(p, NULL);
+
+	pktp->avail--;
+
+	return p;
+}
+
+static void
+pktpool_enq(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+
+	PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
+	pktp->freelist = p; /* free list points to newly inserted packet */
+
+	pktp->avail++;
+	ASSERT(pktp->avail <= pktp->len);
+}
+
+/* utility for registering host addr fill function called from pciedev */
+int
+/* BCMATTACHFN */
+(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	ASSERT(pktp->cbext.cb == NULL);
+	pktp->cbext.cb = cb;
+	pktp->cbext.arg = arg;
+	return 0;
+}
+
+int
+pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	ASSERT(pktp->rxcplidfn.cb == NULL);
+	pktp->rxcplidfn.cb = cb;
+	pktp->rxcplidfn.arg = arg;
+	return 0;
+}
+/* Callback functions for split rx modes */
+/* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */
+void
+pktpool_invoke_dmarxfill(pktpool_t *pktp)
+{
+	ASSERT(pktp->dmarxfill.cb);
+	ASSERT(pktp->dmarxfill.arg);
+
+	if (pktp->dmarxfill.cb)
+		pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
+}
+int
+pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+
+	ASSERT(cb != NULL);
+
+	pktp->dmarxfill.cb = cb;
+	pktp->dmarxfill.arg = arg;
+
+	return 0;
+}
+/* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */
+int
+pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int i;
+
+	ASSERT(cb != NULL);
+
+	i = pktp->cbcnt;
+	if (i == PKTPOOL_CB_MAX)
+		return BCME_ERROR;
+
+	ASSERT(pktp->cbs[i].cb == NULL);
+	pktp->cbs[i].cb = cb;
+	pktp->cbs[i].arg = arg;
+	pktp->cbcnt++;
+
+	return 0;
+}
+
+int
+pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int i;
+
+	ASSERT(cb != NULL);
+
+	i = pktp->ecbcnt;
+	if (i == PKTPOOL_CB_MAX)
+		return BCME_ERROR;
+
+	ASSERT(pktp->ecbs[i].cb == NULL);
+	pktp->ecbs[i].cb = cb;
+	pktp->ecbs[i].arg = arg;
+	pktp->ecbcnt++;
+
+	return 0;
+}
+
+static int
+pktpool_empty_notify(pktpool_t *pktp)
+{
+	int i;
+
+	pktp->empty = TRUE;
+	for (i = 0; i < pktp->ecbcnt; i++) {
+		ASSERT(pktp->ecbs[i].cb != NULL);
+		pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
+	}
+	pktp->empty = FALSE;
+
+	return 0;
+}
+
+#ifdef BCMDBG_POOL
+int
+pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+	int i;
+
+	ASSERT(cb);
+
+	i = pktp->dbg_cbcnt;
+	if (i == PKTPOOL_CB_MAX)
+		return BCME_ERROR;
+
+	ASSERT(pktp->dbg_cbs[i].cb == NULL);
+	pktp->dbg_cbs[i].cb = cb;
+	pktp->dbg_cbs[i].arg = arg;
+	pktp->dbg_cbcnt++;
+
+	return 0;
+}
+
+int pktpool_dbg_notify(pktpool_t *pktp);
+
+int
+pktpool_dbg_notify(pktpool_t *pktp)
+{
+	int i;
+
+	for (i = 0; i < pktp->dbg_cbcnt; i++) {
+		ASSERT(pktp->dbg_cbs[i].cb);
+		pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
+	}
+
+	return 0;
+}
+
+int
+pktpool_dbg_dump(pktpool_t *pktp)
+{
+	int i;
+
+	printf("pool len=%d maxlen=%d\n",  pktp->dbg_qlen, pktp->maxlen);
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p);
+		printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
+			pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
+	}
+
+	return 0;
+}
+
+int
+pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
+{
+	int i;
+	int state;
+
+	bzero(stats, sizeof(pktpool_stats_t));
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		state = PKTPOOLSTATE(pktp->dbg_q[i].p);
+		switch (state) {
+			case POOL_TXENQ:
+				stats->enq++; break;
+			case POOL_TXDH:
+				stats->txdh++; break;
+			case POOL_TXD11:
+				stats->txd11++; break;
+			case POOL_RXDH:
+				stats->rxdh++; break;
+			case POOL_RXD11:
+				stats->rxd11++; break;
+			case POOL_RXFILL:
+				stats->rxfill++; break;
+			case POOL_IDLE:
+				stats->idle++; break;
+		}
+	}
+
+	return 0;
+}
+
+int
+pktpool_start_trigger(pktpool_t *pktp, void *p)
+{
+	uint32 cycles, i;
+
+	if (!PKTPOOL(OSH_NULL, p))
+		return 0;
+
+	OSL_GETCYCLES(cycles);
+
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		if (pktp->dbg_q[i].p == p) {
+			pktp->dbg_q[i].cycles = cycles;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+int pktpool_stop_trigger(pktpool_t *pktp, void *p);
+int
+pktpool_stop_trigger(pktpool_t *pktp, void *p)
+{
+	uint32 cycles, i;
+
+	if (!PKTPOOL(OSH_NULL, p))
+		return 0;
+
+	OSL_GETCYCLES(cycles);
+
+	for (i = 0; i < pktp->dbg_qlen; i++) {
+		ASSERT(pktp->dbg_q[i].p != NULL);
+
+		if (pktp->dbg_q[i].p == p) {
+			if (pktp->dbg_q[i].cycles == 0)
+				break;
+
+			if (cycles >= pktp->dbg_q[i].cycles)
+				pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
+			else
+				pktp->dbg_q[i].dur =
+					(((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
+
+			pktp->dbg_q[i].cycles = 0;
+			break;
+		}
+	}
+
+	return 0;
+}
+#endif /* BCMDBG_POOL */
+
+int
+pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
+{
+	ASSERT(pktp);
+	pktp->availcb_excl = NULL;
+	return 0;
+}
+
+int
+pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
+{
+	int i;
+
+	ASSERT(pktp);
+	ASSERT(pktp->availcb_excl == NULL);
+	for (i = 0; i < pktp->cbcnt; i++) {
+		if (cb == pktp->cbs[i].cb) {
+			pktp->availcb_excl = &pktp->cbs[i];
+			break;
+		}
+	}
+
+	if (pktp->availcb_excl == NULL)
+		return BCME_ERROR;
+	else
+		return 0;
+}
+
+static int
+pktpool_avail_notify(pktpool_t *pktp)
+{
+	int i, k, idx;
+	int avail;
+
+	ASSERT(pktp);
+	if (pktp->availcb_excl != NULL) {
+		pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
+		return 0;
+	}
+
+	k = pktp->cbcnt - 1;
+	for (i = 0; i < pktp->cbcnt; i++) {
+		avail = pktp->avail;
+
+		if (avail) {
+			if (pktp->cbtoggle)
+				idx = i;
+			else
+				idx = k--;
+
+			ASSERT(pktp->cbs[idx].cb != NULL);
+			pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
+		}
+	}
+
+	/* Alternate between filling from head or tail
+	 */
+	pktp->cbtoggle ^= 1;
+
+	return 0;
+}
+
+void *
+pktpool_get(pktpool_t *pktp)
+{
+	void *p;
+
+	p = pktpool_deq(pktp);
+
+	if (p == NULL) {
+		/* Notify and try to reclaim tx pkts */
+		if (pktp->ecbcnt)
+			pktpool_empty_notify(pktp);
+
+		p = pktpool_deq(pktp);
+		if (p == NULL)
+			return NULL;
+	}
+
+	return p;
+}
+
+void
+pktpool_free(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+#ifdef BCMDBG_POOL
+	/* pktpool_stop_trigger(pktp, p); */
+#endif
+
+	pktpool_enq(pktp, p);
+
+	if (pktp->emptycb_disable)
+		return;
+
+	if (pktp->cbcnt) {
+		if (pktp->empty == FALSE)
+			pktpool_avail_notify(pktp);
+	}
+}
+
+int
+pktpool_add(pktpool_t *pktp, void *p)
+{
+	ASSERT(p != NULL);
+
+	if (pktp->len == pktp->maxlen)
+		return BCME_RANGE;
+
+	/* pkts in pool have same length */
+	ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
+	PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
+
+	pktp->len++;
+	pktpool_enq(pktp, p);
+
+#ifdef BCMDBG_POOL
+	pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+
+	return 0;
+}
+
+/* Force pktpool_setmaxlen () into RAM as it uses a constant
+ * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
+ */
+int
+BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
+{
+	if (maxlen > PKTPOOL_LEN_MAX)
+		maxlen = PKTPOOL_LEN_MAX;
+
+	/* if pool is already beyond maxlen, then just cap it
+	 * since we currently do not reduce the pool len
+	 * already allocated
+	 */
+	pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
+
+	return pktp->maxlen;
+}
+
+void
+pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
+{
+	ASSERT(pktp);
+
+	pktp->emptycb_disable = disable;
+}
+
+bool
+pktpool_emptycb_disabled(pktpool_t *pktp)
+{
+	ASSERT(pktp);
+	return pktp->emptycb_disable;
+}
diff --git a/drivers/net/wireless/bcmdhd/hnd_pktq.c b/drivers/net/wireless/bcmdhd/hnd_pktq.c
new file mode 100644
index 0000000..221c30c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hnd_pktq.c
@@ -0,0 +1,602 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <hnd_pktq.h>
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, p);
+	else
+		q->head = p;
+
+	q->tail = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head == NULL)
+		q->tail = p;
+
+	PKTSETLINK(p, q->head);
+	q->head = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+/*
+ * Append spktq 'list' to the tail of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_append(struct pktq *pq, int prec, struct spktq *list)
+{
+	struct pktq_prec *q;
+	struct pktq_prec *list_q;
+
+	list_q = &list->q[0];
+
+	/* empty list check */
+	if (list_q->head == NULL)
+		return;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(list_q->tail) == NULL);         /* terminated list */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, list_q->head);
+	else
+		q->head = list_q->head;
+
+	q->tail = list_q->tail;
+	q->len += list_q->len;
+	pq->len += list_q->len;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	list_q->head = NULL;
+	list_q->tail = NULL;
+	list_q->len = 0;
+	list->len = 0;
+}
+
+/*
+ * Prepend spktq 'list' to the head of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_prepend(struct pktq *pq, int prec, struct spktq *list)
+{
+	struct pktq_prec *q;
+	struct pktq_prec *list_q;
+
+	list_q = &list->q[0];
+
+	/* empty list check */
+	if (list_q->head == NULL)
+		return;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(list_q->tail) == NULL);         /* terminated list */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	/* set the tail packet of list to point at the former pq head */
+	PKTSETLINK(list_q->tail, q->head);
+	/* the new q head is the head of list */
+	q->head = list_q->head;
+
+	/* If the q tail was non-null, then it stays as is.
+	 * If the q tail was null, it is now the tail of list
+	 */
+	if (q->tail == NULL) {
+		q->tail = list_q->tail;
+	}
+
+	q->len += list_q->len;
+	pq->len += list_q->len;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	list_q->head = NULL;
+	list_q->tail = NULL;
+	list_q->len = 0;
+	list->len = 0;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if (prev_p == NULL)
+		return NULL;
+
+	if ((p = PKTLINK(prev_p)) == NULL)
+		return NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(prev_p, PKTLINK(p));
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	q = &pq->q[prec];
+	p = q->head;
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			bool head = (p == q->head);
+			if (head)
+				q->head = PKTLINK(p);
+			else
+				PKTSETLINK(prev, PKTLINK(p));
+			PKTSETLINK(p, NULL);
+			PKTFREE(osh, p, dir);
+			q->len--;
+			pq->len--;
+			p = (head ? q->head : PKTLINK(prev));
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+
+	if (q->head == NULL) {
+		ASSERT(q->len == 0);
+		q->tail = NULL;
+	}
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	/* Should this just assert pktbuf? */
+	if (!pktbuf)
+		return FALSE;
+
+	q = &pq->q[prec];
+
+	if (q->head == pktbuf) {
+		if ((q->head = PKTLINK(pktbuf)) == NULL)
+			q->tail = NULL;
+	} else {
+		for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+			;
+		if (p == NULL)
+			return FALSE;
+
+		PKTSETLINK(p, PKTLINK(pktbuf));
+		if (q->tail == pktbuf)
+			q->tail = p;
+	}
+
+	q->len--;
+	pq->len--;
+	PKTSETLINK(pktbuf, NULL);
+	return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+	int prec;
+
+	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+	/* pq is variable size; only zero out what's requested */
+	bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+	pq->num_prec = (uint16)num_prec;
+
+	pq->max = (uint16)max_len;
+
+	for (prec = 0; prec < num_prec; prec++)
+		pq->q[prec].max = pq->max;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	if (prec < pq->num_prec)
+		pq->q[prec].max = (uint16)max_len;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+	int prec;
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+	for (prec = 0; prec < pq->num_prec; prec++)
+		pktq_pflush(osh, pq, prec, dir, fn, arg);
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+	int prec, len;
+
+	len = 0;
+
+	for (prec = 0; prec <= pq->hi_prec; prec++)
+		if (prec_bmp & (1 << prec))
+			len += pq->q[prec].len;
+
+	return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+	{
+		return NULL;
+	}
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 0000000..f0a2d9c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,274 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c 475037 2014-05-02 23:55:49Z $
+ */
+
+
+/*
+ * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
+ * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
+ *
+ * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used.
+ * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
+ * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
+ * fractional frequency generation. pmu2_ does not support fractional frequency generation.
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define	PMU_ERROR(args)
+
+#define	PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define	PMU_NONE(args)
+
+/** contains resource bit positions for a specific chip */
+struct rsc_per_chip_s {
+	uint8 ht_avail;
+	uint8 macphy_clkavail;
+	uint8 ht_start;
+	uint8 otp_pu;
+};
+
+typedef struct rsc_per_chip_s rsc_per_chip_t;
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+	uint8 strength;			/* Pad Drive Strength in mA */
+	uint8 sel;			/* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+	{4, 0x2},
+	{2, 0x3},
+	{1, 0x0},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+	{32, 0x7},
+	{26, 0x6},
+	{22, 0x5},
+	{16, 0x4},
+	{12, 0x3},
+	{8, 0x2},
+	{4, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+	{6, 0x7},
+	{5, 0x6},
+	{4, 0x5},
+	{3, 0x4},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = {
+	{3, 0x3},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+
+/**
+ * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel
+ * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture
+ * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has
+ * been written '1'.
+ */
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = {
+	/* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */
+	{16, 0x7},
+	{12, 0x5},
+	{8,  0x3},
+	{4,  0x1} }; /* note: 43143 does not support tristate */
+
+#else
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = {
+	/* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */
+	{8, 0x7},
+	{6, 0x5},
+	{4,  0x3},
+	{2,  0x1} }; /* note: 43143 does not support tristate */
+
+#endif /* BCM_SDIO_VDDIO */
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
+
+/**
+ * Balance between stable SDIO operation and power consumption is achieved using this function.
+ * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
+ * function should read the VDDIO itself to select the correct table. For now it has been solved
+ * with the 'BCM_SDIO_VDDIO' preprocessor constant.
+ *
+ * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
+ *		    hardware supports this), if no hw support drive strength is not programmed.
+ */
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+	sdiod_drive_str_t *str_tab = NULL;
+	uint32 str_mask = 0;	/* only alter desired bits in PMU chipcontrol 1 register */
+	uint32 str_shift = 0;
+	uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */
+	uint32 str_ovr_pmuval = 0;            /* position of bit within this register */
+
+	if (!(sih->cccaps & CC_CAP_PMU)) {
+		return;
+	}
+
+	switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+		str_mask = 0x30000000;
+		str_shift = 28;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+	case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+		if (sih->pmurev == 8) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+		}
+		else if (sih->pmurev == 11) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		}
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8;
+		str_mask = 0x00001800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3;
+		}
+#else
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8;
+		}
+#endif /* BCM_SDIO_VDDIO */
+		str_mask = 0x00000007;
+		str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR;
+		break;
+	default:
+		PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+		         bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+		break;
+	}
+
+	if (str_tab != NULL) {
+		uint32 cc_data_temp;
+		int i;
+
+		/* Pick the lowest available drive strength equal or greater than the
+		 * requested strength.	Drive strength of 0 requests tri-state.
+		 */
+		for (i = 0; drivestrength < str_tab[i].strength; i++)
+			;
+
+		if (i > 0 && drivestrength > str_tab[i].strength)
+			i--;
+
+		W_REG(osh, PMUREG(sih, chipcontrol_addr), PMU_CHIPCTL1);
+		cc_data_temp = R_REG(osh, PMUREG(sih, chipcontrol_data));
+		cc_data_temp &= ~str_mask;
+		cc_data_temp |= str_tab[i].sel << str_shift;
+		W_REG(osh, PMUREG(sih, chipcontrol_data), cc_data_temp);
+		if (str_ovr_pmuval) { /* enables the selected drive strength */
+			W_REG(osh,  PMUREG(sih, chipcontrol_addr), str_ovr_pmuctl);
+			OR_REG(osh, PMUREG(sih, chipcontrol_data), str_ovr_pmuval);
+		}
+		PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+		         drivestrength, str_tab[i].strength));
+	}
+} /* si_sdiod_drive_strength_init */
diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile
new file mode 100644
index 0000000..bc90f3c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/Makefile
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+#    automerger maintained database to see where src/include
+#    came from
+# 2. For select components, as listed in compvers.sh
+#    it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 347587 2012-07-27 09:13:31Z $
+#
+
+export SRCBASE:=..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch url
+epivers:
+	bash epivers.sh
+
+# Generate component versions based on component url
+compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "Generating component versions, if any"; \
+		bash compvers.sh; \
+	else \
+		echo "Skipping component version generation"; \
+	fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "bash compvers.sh clean"; \
+		bash compvers.sh clean; \
+	else \
+		echo "Skipping component version clean"; \
+	fi
+
+clean:
+	rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 0000000..4e07525
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,386 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h 456346 2014-02-18 16:48:52Z $
+ */
+
+#ifndef	_AIDMP_H
+#define	_AIDMP_H
+
+/* Manufacturer Ids */
+#define	MFGID_ARM		0x43b
+#define	MFGID_BRCM		0x4bf
+#define	MFGID_MIPS		0x4a7
+
+/* Component Classes */
+#define	CC_SIM			0
+#define	CC_EROM			1
+#define	CC_CORESIGHT		9
+#define	CC_VERIF		0xb
+#define	CC_OPTIMO		0xd
+#define	CC_GEN			0xe
+#define	CC_PRIMECELL		0xf
+
+/* Enumeration ROM registers */
+#define	ER_EROMENTRY		0x000
+#define	ER_REMAPCONTROL		0xe00
+#define	ER_REMAPSELECT		0xe04
+#define	ER_MASTERSELECT		0xe10
+#define	ER_ITCR			0xf00
+#define	ER_ITIP			0xf04
+
+/* Erom entries */
+#define	ER_TAG			0xe
+#define	ER_TAG1			0x6
+#define	ER_VALID		1
+#define	ER_CI			0
+#define	ER_MP			2
+#define	ER_ADD			4
+#define	ER_END			0xe
+#define	ER_BAD			0xffffffff
+
+/* EROM CompIdentA */
+#define	CIA_MFG_MASK		0xfff00000
+#define	CIA_MFG_SHIFT		20
+#define	CIA_CID_MASK		0x000fff00
+#define	CIA_CID_SHIFT		8
+#define	CIA_CCL_MASK		0x000000f0
+#define	CIA_CCL_SHIFT		4
+
+/* EROM CompIdentB */
+#define	CIB_REV_MASK		0xff000000
+#define	CIB_REV_SHIFT		24
+#define	CIB_NSW_MASK		0x00f80000
+#define	CIB_NSW_SHIFT		19
+#define	CIB_NMW_MASK		0x0007c000
+#define	CIB_NMW_SHIFT		14
+#define	CIB_NSP_MASK		0x00003e00
+#define	CIB_NSP_SHIFT		9
+#define	CIB_NMP_MASK		0x000001f0
+#define	CIB_NMP_SHIFT		4
+
+/* EROM MasterPortDesc */
+#define	MPD_MUI_MASK		0x0000ff00
+#define	MPD_MUI_SHIFT		8
+#define	MPD_MP_MASK		0x000000f0
+#define	MPD_MP_SHIFT		4
+
+/* EROM AddrDesc */
+#define	AD_ADDR_MASK		0xfffff000
+#define	AD_SP_MASK		0x00000f00
+#define	AD_SP_SHIFT		8
+#define	AD_ST_MASK		0x000000c0
+#define	AD_ST_SHIFT		6
+#define	AD_ST_SLAVE		0x00000000
+#define	AD_ST_BRIDGE		0x00000040
+#define	AD_ST_SWRAP		0x00000080
+#define	AD_ST_MWRAP		0x000000c0
+#define	AD_SZ_MASK		0x00000030
+#define	AD_SZ_SHIFT		4
+#define	AD_SZ_4K		0x00000000
+#define	AD_SZ_8K		0x00000010
+#define	AD_SZ_16K		0x00000020
+#define	AD_SZ_SZD		0x00000030
+#define	AD_AG32			0x00000008
+#define	AD_ADDR_ALIGN		0x00000fff
+#define	AD_SZ_BASE		0x00001000	/* 4KB */
+
+/* EROM SizeDesc */
+#define	SD_SZ_MASK		0xfffff000
+#define	SD_SG32			0x00000008
+#define	SD_SZ_ALIGN		0x00000fff
+
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _aidmp {
+	uint32	oobselina30;	/* 0x000 */
+	uint32	oobselina74;	/* 0x004 */
+	uint32	PAD[6];
+	uint32	oobselinb30;	/* 0x020 */
+	uint32	oobselinb74;	/* 0x024 */
+	uint32	PAD[6];
+	uint32	oobselinc30;	/* 0x040 */
+	uint32	oobselinc74;	/* 0x044 */
+	uint32	PAD[6];
+	uint32	oobselind30;	/* 0x060 */
+	uint32	oobselind74;	/* 0x064 */
+	uint32	PAD[38];
+	uint32	oobselouta30;	/* 0x100 */
+	uint32	oobselouta74;	/* 0x104 */
+	uint32	PAD[6];
+	uint32	oobseloutb30;	/* 0x120 */
+	uint32	oobseloutb74;	/* 0x124 */
+	uint32	PAD[6];
+	uint32	oobseloutc30;	/* 0x140 */
+	uint32	oobseloutc74;	/* 0x144 */
+	uint32	PAD[6];
+	uint32	oobseloutd30;	/* 0x160 */
+	uint32	oobseloutd74;	/* 0x164 */
+	uint32	PAD[38];
+	uint32	oobsynca;	/* 0x200 */
+	uint32	oobseloutaen;	/* 0x204 */
+	uint32	PAD[6];
+	uint32	oobsyncb;	/* 0x220 */
+	uint32	oobseloutben;	/* 0x224 */
+	uint32	PAD[6];
+	uint32	oobsyncc;	/* 0x240 */
+	uint32	oobseloutcen;	/* 0x244 */
+	uint32	PAD[6];
+	uint32	oobsyncd;	/* 0x260 */
+	uint32	oobseloutden;	/* 0x264 */
+	uint32	PAD[38];
+	uint32	oobaextwidth;	/* 0x300 */
+	uint32	oobainwidth;	/* 0x304 */
+	uint32	oobaoutwidth;	/* 0x308 */
+	uint32	PAD[5];
+	uint32	oobbextwidth;	/* 0x320 */
+	uint32	oobbinwidth;	/* 0x324 */
+	uint32	oobboutwidth;	/* 0x328 */
+	uint32	PAD[5];
+	uint32	oobcextwidth;	/* 0x340 */
+	uint32	oobcinwidth;	/* 0x344 */
+	uint32	oobcoutwidth;	/* 0x348 */
+	uint32	PAD[5];
+	uint32	oobdextwidth;	/* 0x360 */
+	uint32	oobdinwidth;	/* 0x364 */
+	uint32	oobdoutwidth;	/* 0x368 */
+	uint32	PAD[37];
+	uint32	ioctrlset;	/* 0x400 */
+	uint32	ioctrlclear;	/* 0x404 */
+	uint32	ioctrl;		/* 0x408 */
+	uint32	PAD[61];
+	uint32	iostatus;	/* 0x500 */
+	uint32	PAD[127];
+	uint32	ioctrlwidth;	/* 0x700 */
+	uint32	iostatuswidth;	/* 0x704 */
+	uint32	PAD[62];
+	uint32	resetctrl;	/* 0x800 */
+	uint32	resetstatus;	/* 0x804 */
+	uint32	resetreadid;	/* 0x808 */
+	uint32	resetwriteid;	/* 0x80c */
+	uint32	PAD[60];
+	uint32	errlogctrl;	/* 0x900 */
+	uint32	errlogdone;	/* 0x904 */
+	uint32	errlogstatus;	/* 0x908 */
+	uint32	errlogaddrlo;	/* 0x90c */
+	uint32	errlogaddrhi;	/* 0x910 */
+	uint32	errlogid;	/* 0x914 */
+	uint32	errloguser;	/* 0x918 */
+	uint32	errlogflags;	/* 0x91c */
+	uint32	PAD[56];
+	uint32	intstatus;	/* 0xa00 */
+	uint32	PAD[255];
+	uint32	config;		/* 0xe00 */
+	uint32	PAD[63];
+	uint32	itcr;		/* 0xf00 */
+	uint32	PAD[3];
+	uint32	itipooba;	/* 0xf10 */
+	uint32	itipoobb;	/* 0xf14 */
+	uint32	itipoobc;	/* 0xf18 */
+	uint32	itipoobd;	/* 0xf1c */
+	uint32	PAD[4];
+	uint32	itipoobaout;	/* 0xf30 */
+	uint32	itipoobbout;	/* 0xf34 */
+	uint32	itipoobcout;	/* 0xf38 */
+	uint32	itipoobdout;	/* 0xf3c */
+	uint32	PAD[4];
+	uint32	itopooba;	/* 0xf50 */
+	uint32	itopoobb;	/* 0xf54 */
+	uint32	itopoobc;	/* 0xf58 */
+	uint32	itopoobd;	/* 0xf5c */
+	uint32	PAD[4];
+	uint32	itopoobain;	/* 0xf70 */
+	uint32	itopoobbin;	/* 0xf74 */
+	uint32	itopoobcin;	/* 0xf78 */
+	uint32	itopoobdin;	/* 0xf7c */
+	uint32	PAD[4];
+	uint32	itopreset;	/* 0xf90 */
+	uint32	PAD[15];
+	uint32	peripherialid4;	/* 0xfd0 */
+	uint32	peripherialid5;	/* 0xfd4 */
+	uint32	peripherialid6;	/* 0xfd8 */
+	uint32	peripherialid7;	/* 0xfdc */
+	uint32	peripherialid0;	/* 0xfe0 */
+	uint32	peripherialid1;	/* 0xfe4 */
+	uint32	peripherialid2;	/* 0xfe8 */
+	uint32	peripherialid3;	/* 0xfec */
+	uint32	componentid0;	/* 0xff0 */
+	uint32	componentid1;	/* 0xff4 */
+	uint32	componentid2;	/* 0xff8 */
+	uint32	componentid3;	/* 0xffc */
+} aidmp_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* Out-of-band Router registers */
+#define	OOB_BUSCONFIG		0x020
+#define	OOB_STATUSA		0x100
+#define	OOB_STATUSB		0x104
+#define	OOB_STATUSC		0x108
+#define	OOB_STATUSD		0x10c
+#define	OOB_ENABLEA0		0x200
+#define	OOB_ENABLEA1		0x204
+#define	OOB_ENABLEA2		0x208
+#define	OOB_ENABLEA3		0x20c
+#define	OOB_ENABLEB0		0x280
+#define	OOB_ENABLEB1		0x284
+#define	OOB_ENABLEB2		0x288
+#define	OOB_ENABLEB3		0x28c
+#define	OOB_ENABLEC0		0x300
+#define	OOB_ENABLEC1		0x304
+#define	OOB_ENABLEC2		0x308
+#define	OOB_ENABLEC3		0x30c
+#define	OOB_ENABLED0		0x380
+#define	OOB_ENABLED1		0x384
+#define	OOB_ENABLED2		0x388
+#define	OOB_ENABLED3		0x38c
+#define	OOB_ITCR		0xf00
+#define	OOB_ITIPOOBA		0xf10
+#define	OOB_ITIPOOBB		0xf14
+#define	OOB_ITIPOOBC		0xf18
+#define	OOB_ITIPOOBD		0xf1c
+#define	OOB_ITOPOOBA		0xf30
+#define	OOB_ITOPOOBB		0xf34
+#define	OOB_ITOPOOBC		0xf38
+#define	OOB_ITOPOOBD		0xf3c
+
+/* DMP wrapper registers */
+#define	AI_OOBSELINA30		0x000
+#define	AI_OOBSELINA74		0x004
+#define	AI_OOBSELINB30		0x020
+#define	AI_OOBSELINB74		0x024
+#define	AI_OOBSELINC30		0x040
+#define	AI_OOBSELINC74		0x044
+#define	AI_OOBSELIND30		0x060
+#define	AI_OOBSELIND74		0x064
+#define	AI_OOBSELOUTA30		0x100
+#define	AI_OOBSELOUTA74		0x104
+#define	AI_OOBSELOUTB30		0x120
+#define	AI_OOBSELOUTB74		0x124
+#define	AI_OOBSELOUTC30		0x140
+#define	AI_OOBSELOUTC74		0x144
+#define	AI_OOBSELOUTD30		0x160
+#define	AI_OOBSELOUTD74		0x164
+#define	AI_OOBSYNCA		0x200
+#define	AI_OOBSELOUTAEN		0x204
+#define	AI_OOBSYNCB		0x220
+#define	AI_OOBSELOUTBEN		0x224
+#define	AI_OOBSYNCC		0x240
+#define	AI_OOBSELOUTCEN		0x244
+#define	AI_OOBSYNCD		0x260
+#define	AI_OOBSELOUTDEN		0x264
+#define	AI_OOBAEXTWIDTH		0x300
+#define	AI_OOBAINWIDTH		0x304
+#define	AI_OOBAOUTWIDTH		0x308
+#define	AI_OOBBEXTWIDTH		0x320
+#define	AI_OOBBINWIDTH		0x324
+#define	AI_OOBBOUTWIDTH		0x328
+#define	AI_OOBCEXTWIDTH		0x340
+#define	AI_OOBCINWIDTH		0x344
+#define	AI_OOBCOUTWIDTH		0x348
+#define	AI_OOBDEXTWIDTH		0x360
+#define	AI_OOBDINWIDTH		0x364
+#define	AI_OOBDOUTWIDTH		0x368
+
+
+#define	AI_IOCTRLSET		0x400
+#define	AI_IOCTRLCLEAR		0x404
+#define	AI_IOCTRL		0x408
+#define	AI_IOSTATUS		0x500
+#define	AI_RESETCTRL		0x800
+#define	AI_RESETSTATUS		0x804
+
+#define	AI_IOCTRLWIDTH		0x700
+#define	AI_IOSTATUSWIDTH	0x704
+
+#define	AI_RESETREADID		0x808
+#define	AI_RESETWRITEID		0x80c
+#define	AI_ERRLOGCTRL		0xa00
+#define	AI_ERRLOGDONE		0xa04
+#define	AI_ERRLOGSTATUS		0xa08
+#define	AI_ERRLOGADDRLO		0xa0c
+#define	AI_ERRLOGADDRHI		0xa10
+#define	AI_ERRLOGID		0xa14
+#define	AI_ERRLOGUSER		0xa18
+#define	AI_ERRLOGFLAGS		0xa1c
+#define	AI_INTSTATUS		0xa00
+#define	AI_CONFIG		0xe00
+#define	AI_ITCR			0xf00
+#define	AI_ITIPOOBA		0xf10
+#define	AI_ITIPOOBB		0xf14
+#define	AI_ITIPOOBC		0xf18
+#define	AI_ITIPOOBD		0xf1c
+#define	AI_ITIPOOBAOUT		0xf30
+#define	AI_ITIPOOBBOUT		0xf34
+#define	AI_ITIPOOBCOUT		0xf38
+#define	AI_ITIPOOBDOUT		0xf3c
+#define	AI_ITOPOOBA		0xf50
+#define	AI_ITOPOOBB		0xf54
+#define	AI_ITOPOOBC		0xf58
+#define	AI_ITOPOOBD		0xf5c
+#define	AI_ITOPOOBAIN		0xf70
+#define	AI_ITOPOOBBIN		0xf74
+#define	AI_ITOPOOBCIN		0xf78
+#define	AI_ITOPOOBDIN		0xf7c
+#define	AI_ITOPRESET		0xf90
+#define	AI_PERIPHERIALID4	0xfd0
+#define	AI_PERIPHERIALID5	0xfd4
+#define	AI_PERIPHERIALID6	0xfd8
+#define	AI_PERIPHERIALID7	0xfdc
+#define	AI_PERIPHERIALID0	0xfe0
+#define	AI_PERIPHERIALID1	0xfe4
+#define	AI_PERIPHERIALID2	0xfe8
+#define	AI_PERIPHERIALID3	0xfec
+#define	AI_COMPONENTID0		0xff0
+#define	AI_COMPONENTID1		0xff4
+#define	AI_COMPONENTID2		0xff8
+#define	AI_COMPONENTID3		0xffc
+
+/* resetctrl */
+#define	AIRC_RESET		1
+
+/* config */
+#define	AICFG_OOB		0x00000020
+#define	AICFG_IOS		0x00000010
+#define	AICFG_IOC		0x00000008
+#define	AICFG_TO		0x00000004
+#define	AICFG_ERRL		0x00000002
+#define	AICFG_RST		0x00000001
+
+/* bit defines for AI_OOBSELOUTB74 reg */
+#define OOB_SEL_OUTEN_B_5	15
+#define OOB_SEL_OUTEN_B_6	23
+
+/* AI_OOBSEL for A/B/C/D, 0-7 */
+#define AI_OOBSEL_MASK		0x1F
+#define AI_OOBSEL_0_SHIFT	0
+#define AI_OOBSEL_1_SHIFT	8
+#define AI_OOBSEL_2_SHIFT	16
+#define AI_OOBSEL_3_SHIFT	24
+#define AI_OOBSEL_4_SHIFT	0
+#define AI_OOBSEL_5_SHIFT	8
+#define AI_OOBSEL_6_SHIFT	16
+#define AI_OOBSEL_7_SHIFT	24
+
+#endif	/* _AIDMP_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
new file mode 100644
index 0000000..fa2db7c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
@@ -0,0 +1,29 @@
+/*
+ * BCM common config options
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_cfg.h 351867 2012-08-21 18:46:16Z $
+ */
+
+#ifndef _bcm_cfg_h_
+#define _bcm_cfg_h_
+#endif /* _bcm_cfg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
new file mode 100644
index 0000000..ee06f3b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
@@ -0,0 +1,361 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ *    pool manager: A singleton object that acts as a factory for
+ *                  pool allocators. It also is used for global
+ *                  instrumentation, such as reporting all blocks
+ *                  in use across all data structures. The pool manager
+ *                  creates and provides individual memory pools
+ *                  upon request to application code.
+ *
+ *    memory pool:  An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ *    bcm_mpm_*     Memory pool manager
+ *    bcm_mp_*      Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ *    prealloc: The contiguous memory block of objects can either be supplied
+ *              by the client or malloc'ed by the memory manager. The objects are
+ *              allocated out of a block of memory and freed back to the block.
+ *
+ *    heap:     The memory pool allocator uses the heap (malloc/free) for memory.
+ *              In this case, the pool allocator is just providing statistics
+ *              and instrumentation on top of the heap, without modifying the heap
+ *              allocation implementation.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_mpool_pub.h 407097 2013-06-11 18:43:16Z $
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+	char name[BCM_MP_NAMELEN];  /* Name of this pool. */
+	unsigned int objsz;         /* Object size allocated in this pool */
+	uint16 nobj;                /* Total number of objects in this pool */
+	uint16 num_alloc;           /* Number of objects currently allocated */
+	uint16 high_water;          /* Max number of allocated objects. */
+	uint16 failed_alloc;        /* Failed allocations. */
+} bcm_mp_stats_t;
+
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    osh:       INPUT  Operating system handle. Needed for heap memory allocation.
+ *    max_pools: INPUT Maximum number of mempools supported.
+ *    mgr:       OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ *    BCME_OK     Object initialized successfully. May be used.
+ *    BCME_NOMEM  Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    mgr:     INPUT  Pointer to pool manager handle.
+ *
+ * Returns:
+ *    BCME_OK  Memory pool manager successfully de-initialized.
+ *    other    Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ *                                  pool uses a contiguous block of pre-alloced
+ *                                  memory. The memory block may either be provided
+ *                                  by the client or dynamically allocated by the
+ *                                  pool manager.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *                     Must be >= sizeof(void *).
+ *    nobj:     INPUT  Maximum number of concurrently existing objects to support
+ *    memstart  INPUT  Pointer to the memory to use, or NULL to malloc()
+ *    memsize   INPUT  Number of bytes referenced from memstart (for error checking).
+ *                     Must be 0 if 'memstart' is NULL.
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+                                 unsigned int obj_sz,
+                                 int nobj,
+                                 void *memstart,
+                                 unsigned int memsize,
+                                 const char poolname[BCM_MP_NAMELEN],
+                                 bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ *                                  all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ *                              pool allocator uses the heap (malloc/free) for memory.
+ *                              In this case, the pool allocator is just providing
+ *                              statistics and instrumentation on top of the heap,
+ *                              without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+                             const char poolname[BCM_MP_NAMELEN],
+                             bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ *                              all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ *    mgr:         INPUT   The handle to the pools manager
+ *    stats:       OUTPUT  Array of pool statistics.
+ *    nentries:    MOD     Max elements in 'stats' array on INPUT. Actual number
+ *                         of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ *    mgr:     INPUT  The handle to the pools manager
+ *    b:       OUTPUT Output buffer.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ *                          compensate for alignment requirements of the objects.
+ *                          This function provides the padded object size. If clients
+ *                          pre-allocate a memory slab for a memory pool, the
+ *                          padded object size should be used by the client to allocate
+ *                          the memory slab (in order to provide sufficent space for
+ *                          the maximum number of objects).
+ *
+ * Parameters:
+ *    mgr:            INPUT   The handle to the pools manager.
+ *    obj_sz:         INPUT   Input object size.
+ *    padded_obj_sz:  OUTPUT  Padded object size.
+ *
+ * Returns:
+ *    BCME_OK      Ok
+ *    BCME_BADARG  Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool.
+ *
+ * Returns:
+ *    A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ *    pool:  INPUT   The handle to the pool.
+ *    objp:  INPUT   A pointer to the object to free.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ *    pool:     INPUT    The handle to the pool
+ *    stats:    OUTPUT   Pool statistics
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting statistics.
+ *
+ */
+int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool
+ *    b        OUTPUT   Output buffer
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 0000000..1028bb3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,132 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $
+ */
+#ifndef _bcmcdc_h_
+#define	_bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+	uint32 cmd;      /* ioctl command value */
+	uint32 len;      /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+	uint32 flags;    /* flag defns given below */
+	uint32 status;   /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK   0x0000FFFF  /* maximum or expected response length, */
+					   /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT  0
+#define CDCL_IOC_INLEN_MASK    0xFFFF0000   /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT   16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR		0x01	/* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET		0x02	/* 0=get, 1=set cmd */
+#define CDCF_IOC_OVL_IDX_MASK	0x3c	/* overlay region index mask */
+#define CDCF_IOC_OVL_RSV	0x40	/* 1=reserve this overlay region */
+#define CDCF_IOC_OVL		0x80	/* 1=this ioctl corresponds to an overlay */
+#define CDCF_IOC_ACTION_MASK	0xfe	/* SET/GET, OVL_IDX, OVL_RSV, OVL mask */
+#define CDCF_IOC_ACTION_SHIFT	1	/* SET/GET, OVL_IDX, OVL_RSV, OVL shift */
+#define CDCF_IOC_IF_MASK	0xF000	/* I/F index */
+#define CDCF_IOC_IF_SHIFT	12
+#define CDCF_IOC_ID_MASK	0xFFFF0000	/* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT	16		/* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags)	(((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags)	(((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ *   The BDC header is used on data packets to convey priority across USB.
+ */
+
+struct bdc_header {
+	uint8	flags;			/* Flags */
+	uint8	priority;		/* 802.1d Priority 0:2 bits, 4:7 USB flow control info */
+	uint8	flags2;
+	uint8	dataOffset;		/* Offset from end of BDC header to packet data, in
+					 * 4-byte words.  Leaves room for optional headers.
+					 */
+};
+
+#define	BDC_HEADER_LEN		4
+
+/* flags field bitmap */
+#define BDC_FLAG_80211_PKT	0x01	/* Packet is in 802.11 format (dongle -> host) */
+#define BDC_FLAG_SUM_GOOD	0x04	/* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums: host->device */
+#define BDC_FLAG_EVENT_MSG	0x08	/* Payload contains an event msg: device->host */
+#define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+
+/* priority field bitmap */
+#define BDC_PRIORITY_MASK	0x07
+#define BDC_PRIORITY_FC_MASK	0xf0	/* flow control info mask */
+#define BDC_PRIORITY_FC_SHIFT	4	/* flow control info shift */
+
+/* flags2 field bitmap */
+#define BDC_FLAG2_IF_MASK	0x0f	/* interface index (host <-> dongle) */
+#define BDC_FLAG2_IF_SHIFT	0
+#define BDC_FLAG2_FC_FLAG	0x10	/* flag to indicate if pkt contains */
+					/* FLOW CONTROL info only */
+
+/* version numbers */
+#define BDC_PROTO_VER_1		1	/* Old Protocol version */
+#define BDC_PROTO_VER		2	/* Protocol version */
+
+/* flags2.if field access macros */
+#define BDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#define BDC_FLAG2_PAD_MASK		0xf0
+#define BDC_FLAG_PAD_MASK		0x03
+#define BDC_FLAG2_PAD_SHIFT		2
+#define BDC_FLAG_PAD_SHIFT		0
+#define BDC_FLAG2_PAD_IDX		0x3c
+#define BDC_FLAG_PAD_IDX		0x03
+#define BDC_GET_PAD_LEN(hdr) \
+	((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+	((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+	(((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+	((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+	(((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#endif /* _bcmcdc_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 0000000..755b853
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,338 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdefs.h 474209 2014-04-30 12:16:47Z $
+ */
+
+#ifndef	_bcmdefs_h_
+#define	_bcmdefs_h_
+
+/*
+ * One doesn't need to include this file explicitly, gets included automatically if
+ * typedefs.h is included.
+ */
+
+/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function
+ * arguments or local variables.
+ */
+#define BCM_REFERENCE(data)	((void)(data))
+
+/* Allow for suppressing unused variable warnings. */
+#ifdef __GNUC__
+#define UNUSED_VAR     __attribute__ ((unused))
+#else
+#define UNUSED_VAR
+#endif
+
+/* Compile-time assert can be used in place of ASSERT if the expression evaluates
+ * to a constant at compile time.
+ */
+#define STATIC_ASSERT(expr) { \
+	/* Make sure the expression is constant. */ \
+	typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \
+	/* Make sure the expression is true. */ \
+	typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
+}
+
+/* Reclaiming text and data :
+ * The following macros specify special linker sections that can be reclaimed
+ * after a system is considered 'up'.
+ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN,
+ * as in most cases, the attach function calls the detach function to clean up on error).
+ */
+
+#define bcmreclaimed 		0
+#define _data	_data
+#define _fn	_fn
+#define BCMPREATTACHDATA(_data)	_data
+#define BCMPREATTACHFN(_fn)	_fn
+#define _data	_data
+#define _fn		_fn
+#define _fn	_fn
+#define	BCMNMIATTACHFN(_fn)	_fn
+#define	BCMNMIATTACHDATA(_data)	_data
+#define CONST	const
+
+#undef BCM47XX_CA9
+
+#ifndef BCMFASTPATH
+#if defined(BCM47XX_CA9)
+#define BCMFASTPATH		__attribute__ ((__section__ (".text.fastpath")))
+#define BCMFASTPATH_HOST	__attribute__ ((__section__ (".text.fastpath_host")))
+#else
+#define BCMFASTPATH
+#define BCMFASTPATH_HOST
+#endif
+#endif /* BCMFASTPATH */
+
+
+/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from
+ * ROM). This should eliminate the need to manually specify these functions in the ROM config file.
+ * It should only be used in special cases where the function must be in RAM for *all* ROM-based
+ * chips.
+ */
+	#define BCMRAMFN(_fn)	_fn
+
+#define STATIC	static
+
+/* Bus types */
+#define	SI_BUS			0	/* SOC Interconnect */
+#define	PCI_BUS			1	/* PCI target */
+#define	PCMCIA_BUS		2	/* PCMCIA target */
+#define SDIO_BUS		3	/* SDIO target */
+#define JTAG_BUS		4	/* JTAG */
+#define USB_BUS			5	/* USB (does not support R/W REG) */
+#define SPI_BUS			6	/* gSPI target */
+#define RPC_BUS			7	/* RPC target */
+
+/* Allows size optimization for single-bus image */
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) 	(BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) 	(bus)
+#endif
+
+/* Allows size optimization for single-backplane image */
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) 	(BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) 	(bus)
+#endif
+
+
+/* Allows size optimization for SPROM support */
+#if defined(BCMSPROMBUS)
+#define SPROMBUS	(BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS	(PCMCIA_BUS)
+#else
+#define SPROMBUS	(PCI_BUS)
+#endif
+
+/* Allows size optimization for single-chip image */
+#ifdef BCMCHIPID
+#define CHIPID(chip)	(BCMCHIPID)
+#else
+#define CHIPID(chip)	(chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev)	(BCMCHIPREV)
+#else
+#define CHIPREV(rev)	(rev)
+#endif
+
+/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
+#define DMADDR_MASK_32 0x0		/* Address mask for 32-bits */
+#define DMADDR_MASK_30 0xc0000000	/* Address mask for 30-bits */
+#define DMADDR_MASK_26 0xFC000000	/* Address maks for 26-bits */
+#define DMADDR_MASK_0  0xffffffff	/* Address mask for 0-bits (hi-part) */
+
+#define	DMADDRWIDTH_26  26 /* 26-bit addressing capability */
+#define	DMADDRWIDTH_30  30 /* 30-bit addressing capability */
+#define	DMADDRWIDTH_32  32 /* 32-bit addressing capability */
+#define	DMADDRWIDTH_63  63 /* 64-bit addressing capability */
+#define	DMADDRWIDTH_64  64 /* 64-bit addressing capability */
+
+typedef struct {
+	uint32 loaddr;
+	uint32 hiaddr;
+} dma64addr_t;
+
+#define PHYSADDR64HI(_pa) ((_pa).hiaddr)
+#define PHYSADDR64HISET(_pa, _val) \
+	do { \
+		(_pa).hiaddr = (_val);		\
+	} while (0)
+#define PHYSADDR64LO(_pa) ((_pa).loaddr)
+#define PHYSADDR64LOSET(_pa, _val) \
+	do { \
+		(_pa).loaddr = (_val);		\
+	} while (0)
+
+#ifdef BCMDMA64OSL
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa)
+#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
+#define PHYSADDRLO(_pa)  PHYSADDR64LO(_pa)
+#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+	do { \
+		(_pa) = (_val);			\
+	} while (0)
+#endif /* BCMDMA64OSL */
+#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0)
+
+/* One physical DMA segment */
+typedef struct  {
+	dmaaddr_t addr;
+	uint32	  length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 8
+
+
+typedef struct {
+	void *oshdmah; /* Opaque handle for OSL to store its information */
+	uint origsize; /* Size of the virtual packet */
+	uint nsegs;
+	hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need  to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (hnddma.c).
+ */
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+/* add 40 bytes to allow for extra RPC header and info  */
+#define BCMEXTRAHDROOM 260
+#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+#if defined(BCM47XX_CA9)
+#define BCMEXTRAHDROOM 224
+#else
+#define BCMEXTRAHDROOM 204
+#endif /* linux && BCM47XX_CA9 */
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef SDALIGN
+#define SDALIGN	32
+#endif
+
+/* Headroom required for dongle-to-host communication.  Packets allocated
+ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
+ * leave this much room in front for low-level message headers which may
+ * be needed to get across the dongle bus to the host.  (These messages
+ * don't go over the network, so room for the full WL header above would
+ * be a waste.).
+*/
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(NO_BCMDBG_ASSERT)
+# undef BCMDBG_ASSERT
+# undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif 
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ *    #define <NAME>_M	BITFIELD_MASK(3)
+ *    #define <NAME>_S	4
+ * ...
+ *    regval = R_REG(osh, &regs->regfoo);
+ *    field = GFIELD(regval, <NAME>);
+ *    regval = SFIELD(regval, <NAME>, 1);
+ *    W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+		(((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+		(((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+		(((val) & (~(field ## _M << field ## _S))) | \
+		 ((unsigned)(bits) << field ## _S))
+
+/* define BCMSMALL to remove misc features for memory-constrained environments */
+#ifdef BCMSMALL
+#undef	BCMSPACE
+#define bcmspace	FALSE	/* if (bcmspace) code is discarded */
+#else
+#define	BCMSPACE
+#define bcmspace	TRUE	/* if (bcmspace) code is retained */
+#endif
+
+/* Max. nvram variable table size */
+#ifndef MAXSZ_NVRAM_VARS
+#define	MAXSZ_NVRAM_VARS	4096
+#endif
+
+
+
+/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+ * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
+ */
+
+
+#ifdef BCMLFRAG /* BCMLFRAG support enab macros  */
+	extern bool _bcmlfrag;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMLFRAG_ENAB() (_bcmlfrag)
+	#elif defined(BCMLFRAG_DISABLED)
+		#define BCMLFRAG_ENAB()	(0)
+	#else
+		#define BCMLFRAG_ENAB()	(1)
+	#endif
+#else
+	#define BCMLFRAG_ENAB()		(0)
+#endif /* BCMLFRAG_ENAB */
+#ifdef BCMSPLITRX /* BCMLFRAG support enab macros  */
+	extern bool _bcmsplitrx;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMSPLITRX_ENAB() (_bcmsplitrx)
+	#elif defined(BCMSPLITRX_DISABLED)
+		#define BCMSPLITRX_ENAB()	(0)
+	#else
+		#define BCMSPLITRX_ENAB()	(1)
+	#endif
+#else
+	#define BCMSPLITRX_ENAB()		(0)
+#endif /* BCMSPLITRX */
+#ifdef BCM_SPLITBUF
+	extern bool _bcmsplitbuf;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCM_SPLITBUF_ENAB() (_bcmsplitbuf)
+	#elif defined(BCM_SPLITBUF_DISABLED)
+		#define BCM_SPLITBUF_ENAB()	(0)
+	#else
+		#define BCM_SPLITBUF_ENAB()	(1)
+	#endif
+#else
+	#define BCM_SPLITBUF_ENAB()		(0)
+#endif	/* BCM_SPLITBUF */
+/* Max size for reclaimable NVRAM array */
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE	DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE	MAXSZ_NVRAM_VARS
+#endif /* DL_NVRAM */
+
+extern uint32 gFWID;
+
+
+#endif /* _bcmdefs_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 0000000..678b860
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,729 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h 474307 2014-04-30 20:58:03Z $
+ */
+
+#ifndef	_BCMDEVS_H
+#define	_BCMDEVS_H
+
+/* PCI vendor IDs */
+#define	VENDOR_EPIGRAM		0xfeda
+#define	VENDOR_BROADCOM		0x14e4
+#define	VENDOR_3COM		0x10b7
+#define	VENDOR_NETGEAR		0x1385
+#define	VENDOR_DIAMOND		0x1092
+#define	VENDOR_INTEL		0x8086
+#define	VENDOR_DELL		0x1028
+#define	VENDOR_HP		0x103c
+#define	VENDOR_HP_COMPAQ	0x0e11
+#define	VENDOR_APPLE		0x106b
+#define VENDOR_SI_IMAGE		0x1095		/* Silicon Image, used by Arasan SDIO Host */
+#define VENDOR_BUFFALO		0x1154		/* Buffalo vendor id */
+#define VENDOR_TI		0x104c		/* Texas Instruments */
+#define VENDOR_RICOH		0x1180		/* Ricoh */
+#define VENDOR_JMICRON		0x197b
+
+
+/* PCMCIA vendor IDs */
+#define	VENDOR_BROADCOM_PCMCIA	0x02d0
+
+/* SDIO vendor IDs */
+#define	VENDOR_BROADCOM_SDIO	0x00BF
+
+/* DONGLE VID/PIDs */
+#define BCM_DNGL_VID		0x0a5c
+#define BCM_DNGL_BL_PID_4328	0xbd12
+#define BCM_DNGL_BL_PID_4322	0xbd13
+#define BCM_DNGL_BL_PID_4319    0xbd16
+#define BCM_DNGL_BL_PID_43236   0xbd17
+#define BCM_DNGL_BL_PID_4332	0xbd18
+#define BCM_DNGL_BL_PID_4330	0xbd19
+#define BCM_DNGL_BL_PID_4334	0xbd1a
+#define BCM_DNGL_BL_PID_43239   0xbd1b
+#define BCM_DNGL_BL_PID_4324	0xbd1c
+#define BCM_DNGL_BL_PID_4360	0xbd1d
+#define BCM_DNGL_BL_PID_43143	0xbd1e
+#define BCM_DNGL_BL_PID_43242	0xbd1f
+#define BCM_DNGL_BL_PID_43342	0xbd21
+#define BCM_DNGL_BL_PID_4335	0xbd20
+#define BCM_DNGL_BL_PID_43341	0xbd22
+#define BCM_DNGL_BL_PID_4350    0xbd23
+#define BCM_DNGL_BL_PID_4345    0xbd24
+#define BCM_DNGL_BL_PID_4349	0xbd25
+#define BCM_DNGL_BL_PID_4354	0xbd26
+#define BCM_DNGL_BL_PID_43569   0xbd27
+#define BCM_DNGL_BL_PID_43909	0xbd28
+
+#define BCM_DNGL_BDC_PID	0x0bdc
+#define BCM_DNGL_JTAG_PID	0x4a44
+
+/* HW USB BLOCK [CPULESS USB] PIDs */
+#define BCM_HWUSB_PID_43239     43239
+
+/* PCI Device IDs */
+#define	BCM4210_DEVICE_ID	0x1072		/* never used */
+#define	BCM4230_DEVICE_ID	0x1086		/* never used */
+#define	BCM4401_ENET_ID		0x170c		/* 4401b0 production enet cards */
+#define	BCM3352_DEVICE_ID	0x3352		/* bcm3352 device id */
+#define	BCM3360_DEVICE_ID	0x3360		/* bcm3360 device id */
+#define	BCM4211_DEVICE_ID	0x4211
+#define	BCM4231_DEVICE_ID	0x4231
+#define	BCM4303_D11B_ID		0x4303		/* 4303 802.11b */
+#define	BCM4311_D11G_ID		0x4311		/* 4311 802.11b/g id */
+#define	BCM4311_D11DUAL_ID	0x4312		/* 4311 802.11a/b/g id */
+#define	BCM4311_D11A_ID		0x4313		/* 4311 802.11a id */
+#define	BCM4328_D11DUAL_ID	0x4314		/* 4328/4312 802.11a/g id */
+#define	BCM4328_D11G_ID		0x4315		/* 4328/4312 802.11g id */
+#define	BCM4328_D11A_ID		0x4316		/* 4328/4312 802.11a id */
+#define	BCM4318_D11G_ID		0x4318		/* 4318 802.11b/g id */
+#define	BCM4318_D11DUAL_ID	0x4319		/* 4318 802.11a/b/g id */
+#define	BCM4318_D11A_ID		0x431a		/* 4318 802.11a id */
+#define	BCM4325_D11DUAL_ID	0x431b		/* 4325 802.11a/g id */
+#define	BCM4325_D11G_ID		0x431c		/* 4325 802.11g id */
+#define	BCM4325_D11A_ID		0x431d		/* 4325 802.11a id */
+#define	BCM4306_D11G_ID		0x4320		/* 4306 802.11g */
+#define	BCM4306_D11A_ID		0x4321		/* 4306 802.11a */
+#define	BCM4306_UART_ID		0x4322		/* 4306 uart */
+#define	BCM4306_V90_ID		0x4323		/* 4306 v90 codec */
+#define	BCM4306_D11DUAL_ID	0x4324		/* 4306 dual A+B */
+#define	BCM4306_D11G_ID2	0x4325		/* BCM4306_D11G_ID; INF w/loose binding war */
+#define	BCM4321_D11N_ID		0x4328		/* 4321 802.11n dualband id */
+#define	BCM4321_D11N2G_ID	0x4329		/* 4321 802.11n 2.4Ghz band id */
+#define	BCM4321_D11N5G_ID	0x432a		/* 4321 802.11n 5Ghz band id */
+#define BCM4322_D11N_ID		0x432b		/* 4322 802.11n dualband device */
+#define BCM4322_D11N2G_ID	0x432c		/* 4322 802.11n 2.4GHz device */
+#define BCM4322_D11N5G_ID	0x432d		/* 4322 802.11n 5GHz device */
+#define BCM4329_D11N_ID		0x432e		/* 4329 802.11n dualband device */
+#define BCM4329_D11N2G_ID	0x432f		/* 4329 802.11n 2.4G device */
+#define BCM4329_D11N5G_ID	0x4330		/* 4329 802.11n 5G device */
+#define	BCM4315_D11DUAL_ID	0x4334		/* 4315 802.11a/g id */
+#define	BCM4315_D11G_ID		0x4335		/* 4315 802.11g id */
+#define	BCM4315_D11A_ID		0x4336		/* 4315 802.11a id */
+#define BCM4319_D11N_ID		0x4337		/* 4319 802.11n dualband device */
+#define BCM4319_D11N2G_ID	0x4338		/* 4319 802.11n 2.4G device */
+#define BCM4319_D11N5G_ID	0x4339		/* 4319 802.11n 5G device */
+#define BCM43231_D11N2G_ID	0x4340		/* 43231 802.11n 2.4GHz device */
+#define BCM43221_D11N2G_ID	0x4341		/* 43221 802.11n 2.4GHz device */
+#define BCM43222_D11N_ID	0x4350		/* 43222 802.11n dualband device */
+#define BCM43222_D11N2G_ID	0x4351		/* 43222 802.11n 2.4GHz device */
+#define BCM43222_D11N5G_ID	0x4352		/* 43222 802.11n 5GHz device */
+#define BCM43224_D11N_ID	0x4353		/* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1	0x0576		/* Vendor specific 43224 802.11n db device */
+#define BCM43226_D11N_ID	0x4354		/* 43226 802.11n dualband device */
+#define BCM43236_D11N_ID	0x4346		/* 43236 802.11n dualband device */
+#define BCM43236_D11N2G_ID	0x4347		/* 43236 802.11n 2.4GHz device */
+#define BCM43236_D11N5G_ID	0x4348		/* 43236 802.11n 5GHz device */
+#define BCM43225_D11N2G_ID	0x4357		/* 43225 802.11n 2.4GHz device */
+#define BCM43421_D11N_ID	0xA99D		/* 43421 802.11n dualband device */
+#define BCM4313_D11N2G_ID	0x4727		/* 4313 802.11n 2.4G device */
+#define BCM4330_D11N_ID         0x4360          /* 4330 802.11n dualband device */
+#define BCM4330_D11N2G_ID       0x4361          /* 4330 802.11n 2.4G device */
+#define BCM4330_D11N5G_ID       0x4362          /* 4330 802.11n 5G device */
+#define BCM4336_D11N_ID		0x4343		/* 4336 802.11n 2.4GHz device */
+#define BCM6362_D11N_ID		0x435f		/* 6362 802.11n dualband device */
+#define BCM6362_D11N2G_ID	0x433f		/* 6362 802.11n 2.4Ghz band id */
+#define BCM6362_D11N5G_ID	0x434f		/* 6362 802.11n 5Ghz band id */
+#define BCM4331_D11N_ID		0x4331		/* 4331 802.11n dualband id */
+#define BCM4331_D11N2G_ID	0x4332		/* 4331 802.11n 2.4Ghz band id */
+#define BCM4331_D11N5G_ID	0x4333		/* 4331 802.11n 5Ghz band id */
+#define BCM43237_D11N_ID	0x4355		/* 43237 802.11n dualband device */
+#define BCM43237_D11N5G_ID	0x4356		/* 43237 802.11n 5GHz device */
+#define BCM43227_D11N2G_ID	0x4358		/* 43228 802.11n 2.4GHz device */
+#define BCM43228_D11N_ID	0x4359		/* 43228 802.11n DualBand device */
+#define BCM43228_D11N5G_ID	0x435a		/* 43228 802.11n 5GHz device */
+#define BCM43362_D11N_ID	0x4363		/* 43362 802.11n 2.4GHz device */
+#define BCM43239_D11N_ID	0x4370		/* 43239 802.11n dualband device */
+#define BCM4324_D11N_ID		0x4374		/* 4324 802.11n dualband device */
+#define BCM43217_D11N2G_ID	0x43a9		/* 43217 802.11n 2.4GHz device */
+#define BCM43131_D11N2G_ID	0x43aa		/* 43131 802.11n 2.4GHz device */
+#define BCM4314_D11N2G_ID	0x4364		/* 4314 802.11n 2.4G device */
+#define BCM43142_D11N2G_ID	0x4365		/* 43142 802.11n 2.4G device */
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+#define BCM4334_D11N_ID		0x4380		/* 4334 802.11n dualband device */
+#define BCM4334_D11N2G_ID	0x4381		/* 4334 802.11n 2.4G device */
+#define BCM4334_D11N5G_ID	0x4382		/* 4334 802.11n 5G device */
+#define BCM43342_D11N_ID	0x4383		/* 43342 802.11n dualband device */
+#define BCM43342_D11N2G_ID	0x4384		/* 43342 802.11n 2.4G device */
+#define BCM43342_D11N5G_ID	0x4385		/* 43342 802.11n 5G device */
+#define BCM43341_D11N_ID	0x4386		/* 43341 802.11n dualband device */
+#define BCM43341_D11N2G_ID	0x4387		/* 43341 802.11n 2.4G device */
+#define BCM43341_D11N5G_ID	0x4388		/* 43341 802.11n 5G device */
+#define BCM4360_D11AC_ID	0x43a0
+#define BCM4360_D11AC2G_ID	0x43a1
+#define BCM4360_D11AC5G_ID	0x43a2
+#define BCM4345_D11AC_ID	0x43ab		/* 4345 802.11ac dualband device */
+#define BCM4345_D11AC2G_ID	0x43ac		/* 4345 802.11ac 2.4G device */
+#define BCM4345_D11AC5G_ID	0x43ad		/* 4345 802.11ac 5G device */
+#define BCM4335_D11AC_ID	0x43ae
+#define BCM4335_D11AC2G_ID	0x43af
+#define BCM4335_D11AC5G_ID	0x43b0
+#define BCM4352_D11AC_ID	0x43b1		/* 4352 802.11ac dualband device */
+#define BCM4352_D11AC2G_ID	0x43b2		/* 4352 802.11ac 2.4G device */
+#define BCM4352_D11AC5G_ID	0x43b3		/* 4352 802.11ac 5G device */
+#define BCM43602_D11AC_ID	0x43ba		/* ac dualband PCI devid SPROM programmed */
+#define BCM43602_D11AC2G_ID	0x43bb		/* 43602 802.11ac 2.4G device */
+#define BCM43602_D11AC5G_ID	0x43bc		/* 43602 802.11ac 5G device */
+#define BCM4349_D11AC_ID	0x4349		/* 4349 802.11ac dualband device */
+#define BCM4349_D11AC2G_ID	0x43dd		/* 4349 802.11ac 2.4G device */
+#define BCM4349_D11AC5G_ID	0x43de		/* 4349 802.11ac 5G device */
+#define BCM4355_D11AC_ID	0x43d3		/* 4355 802.11ac dualband device */
+#define BCM4355_D11AC2G_ID	0x43d4		/* 4355 802.11ac 2.4G device */
+#define BCM4355_D11AC5G_ID	0x43d5		/* 4355 802.11ac 5G device */
+#define BCM4359_D11AC_ID	0x43d6		/* 4359 802.11ac dualband device */
+#define BCM4359_D11AC2G_ID	0x43d7		/* 4359 802.11ac 2.4G device */
+#define BCM4359_D11AC5G_ID	0x43d8		/* 4359 802.11ac 5G device */
+
+/* PCI Subsystem ID */
+#define BCM943228HMB_SSID_VEN1	0x0607
+#define BCM94313HMGBL_SSID_VEN1	0x0608
+#define BCM94313HMG_SSID_VEN1	0x0609
+#define BCM943142HM_SSID_VEN1	0x0611
+
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+
+#define BCM43242_D11N_ID	0x4367		/* 43242 802.11n dualband device */
+#define BCM43242_D11N2G_ID	0x4368		/* 43242 802.11n 2.4G device */
+#define BCM43242_D11N5G_ID	0x4369		/* 43242 802.11n 5G device */
+
+#define BCM4350_D11AC_ID	0x43a3
+#define BCM4350_D11AC2G_ID	0x43a4
+#define BCM4350_D11AC5G_ID	0x43a5
+
+#define BCM43556_D11AC_ID	0x43b7
+#define BCM43556_D11AC2G_ID	0x43b8
+#define BCM43556_D11AC5G_ID	0x43b9
+
+#define BCM43558_D11AC_ID	0x43c0
+#define BCM43558_D11AC2G_ID	0x43c1
+#define BCM43558_D11AC5G_ID	0x43c2
+
+#define BCM43566_D11AC_ID	0x43d3
+#define BCM43566_D11AC2G_ID	0x43d4
+#define BCM43566_D11AC5G_ID	0x43d5
+
+#define BCM43568_D11AC_ID	0x43d6
+#define BCM43568_D11AC2G_ID	0x43d7
+#define BCM43568_D11AC5G_ID	0x43d8
+
+#define BCM43569_D11AC_ID	0x43d9
+#define BCM43569_D11AC2G_ID	0x43da
+#define BCM43569_D11AC5G_ID	0x43db
+
+#define BCM43570_D11AC_ID	0x43d9
+#define BCM43570_D11AC2G_ID	0x43da
+#define BCM43570_D11AC5G_ID	0x43db
+
+#define BCM4354_D11AC_ID	0x43df		/* 4354 802.11ac dualband device */
+#define BCM4354_D11AC2G_ID	0x43e0		/* 4354 802.11ac 2.4G device */
+#define BCM4354_D11AC5G_ID	0x43e1		/* 4354 802.11ac 5G device */
+#define BCM43430_D11N2G_ID	0x43e2		/* 43430 802.11n 2.4G device */
+
+
+#define BCM43349_D11N_ID	0x43e6		/* 43349 802.11n dualband id */
+#define BCM43349_D11N2G_ID	0x43e7		/* 43349 802.11n 2.4Ghz band id */
+#define BCM43349_D11N5G_ID	0x43e8		/* 43349 802.11n 5Ghz band id */
+
+#define BCM4358_D11AC_ID        0x43e9          /* 4358 802.11ac dualband device */
+#define BCM4358_D11AC2G_ID      0x43ea          /* 4358 802.11ac 2.4G device */
+#define BCM4358_D11AC5G_ID      0x43eb          /* 4358 802.11ac 5G device */
+
+#define BCM4356_D11AC_ID	0x43ec		/* 4356 802.11ac dualband device */
+#define BCM4356_D11AC2G_ID	0x43ed		/* 4356 802.11ac 2.4G device */
+#define BCM4356_D11AC5G_ID	0x43ee		/* 4356 802.11ac 5G device */
+
+#define	BCMGPRS_UART_ID		0x4333		/* Uart id used by 4306/gprs card */
+#define	BCMGPRS2_UART_ID	0x4344		/* Uart id used by 4306/gprs card */
+#define FPGA_JTAGM_ID		0x43f0		/* FPGA jtagm device id */
+#define BCM_JTAGM_ID		0x43f1		/* BCM jtagm device id */
+#define SDIOH_FPGA_ID		0x43f2		/* sdio host fpga */
+#define BCM_SDIOH_ID		0x43f3		/* BCM sdio host id */
+#define SDIOD_FPGA_ID		0x43f4		/* sdio device fpga */
+#define SPIH_FPGA_ID		0x43f5		/* PCI SPI Host Controller FPGA */
+#define BCM_SPIH_ID		0x43f6		/* Synopsis SPI Host Controller */
+#define MIMO_FPGA_ID		0x43f8		/* FPGA mimo minimacphy device id */
+#define BCM_JTAGM2_ID		0x43f9		/* BCM alternate jtagm device id */
+#define SDHCI_FPGA_ID		0x43fa		/* Standard SDIO Host Controller FPGA */
+#define	BCM4402_ENET_ID		0x4402		/* 4402 enet */
+#define	BCM4402_V90_ID		0x4403		/* 4402 v90 codec */
+#define	BCM4410_DEVICE_ID	0x4410		/* bcm44xx family pci iline */
+#define	BCM4412_DEVICE_ID	0x4412		/* bcm44xx family pci enet */
+#define	BCM4430_DEVICE_ID	0x4430		/* bcm44xx family cardbus iline */
+#define	BCM4432_DEVICE_ID	0x4432		/* bcm44xx family cardbus enet */
+#define	BCM4704_ENET_ID		0x4706		/* 4704 enet (Use 47XX_ENET_ID instead!) */
+#define	BCM4710_DEVICE_ID	0x4710		/* 4710 primary function 0 */
+#define	BCM47XX_AUDIO_ID	0x4711		/* 47xx audio codec */
+#define	BCM47XX_V90_ID		0x4712		/* 47xx v90 codec */
+#define	BCM47XX_ENET_ID		0x4713		/* 47xx enet */
+#define	BCM47XX_EXT_ID		0x4714		/* 47xx external i/f */
+#define	BCM47XX_GMAC_ID		0x4715		/* 47xx Unimac based GbE */
+#define	BCM47XX_USBH_ID		0x4716		/* 47xx usb host */
+#define	BCM47XX_USBD_ID		0x4717		/* 47xx usb device */
+#define	BCM47XX_IPSEC_ID	0x4718		/* 47xx ipsec */
+#define	BCM47XX_ROBO_ID		0x4719		/* 47xx/53xx roboswitch core */
+#define	BCM47XX_USB20H_ID	0x471a		/* 47xx usb 2.0 host */
+#define	BCM47XX_USB20D_ID	0x471b		/* 47xx usb 2.0 device */
+#define	BCM47XX_ATA100_ID	0x471d		/* 47xx parallel ATA */
+#define	BCM47XX_SATAXOR_ID	0x471e		/* 47xx serial ATA & XOR DMA */
+#define	BCM47XX_GIGETH_ID	0x471f		/* 47xx GbE (5700) */
+#define	BCM4712_MIPS_ID		0x4720		/* 4712 base devid */
+#define	BCM4716_DEVICE_ID	0x4722		/* 4716 base devid */
+#define	BCM47XX_USB30H_ID	0x472a		/* 47xx usb 3.0 host */
+#define	BCM47XX_USB30D_ID	0x472b		/* 47xx usb 3.0 device */
+#define BCM47XX_SMBUS_EMU_ID	0x47fe		/* 47xx emulated SMBus device */
+#define	BCM47XX_XOR_EMU_ID	0x47ff		/* 47xx emulated XOR engine */
+#define	EPI41210_DEVICE_ID	0xa0fa		/* bcm4210 */
+#define	EPI41230_DEVICE_ID	0xa10e		/* bcm4230 */
+#define JINVANI_SDIOH_ID	0x4743		/* Jinvani SDIO Gold Host */
+#define BCM27XX_SDIOH_ID	0x2702		/* BCM27xx Standard SDIO Host */
+#define PCIXX21_FLASHMEDIA_ID	0x803b		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH_ID	0x803c		/* TI PCI xx21 Standard Host Controller */
+#define R5C822_SDIOH_ID		0x0822		/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */
+#define JMICRON_SDIOH_ID	0x2381		/* JMicron Standard SDIO Host Controller */
+
+/* Chip IDs */
+#define	BCM4306_CHIP_ID		0x4306		/* 4306 chipcommon chipid */
+#define	BCM4311_CHIP_ID		0x4311		/* 4311 PCIe 802.11a/b/g */
+#define	BCM43111_CHIP_ID	43111		/* 43111 chipcommon chipid (OTP chipid) */
+#define	BCM43112_CHIP_ID	43112		/* 43112 chipcommon chipid (OTP chipid) */
+#define	BCM4312_CHIP_ID		0x4312		/* 4312 chipcommon chipid */
+#define BCM4313_CHIP_ID		0x4313		/* 4313 chip id */
+#define	BCM43131_CHIP_ID	43131		/* 43131 chip id (OTP chipid) */
+#define	BCM4315_CHIP_ID		0x4315		/* 4315 chip id */
+#define	BCM4318_CHIP_ID		0x4318		/* 4318 chipcommon chipid */
+#define	BCM4319_CHIP_ID		0x4319		/* 4319 chip id */
+#define	BCM4320_CHIP_ID		0x4320		/* 4320 chipcommon chipid */
+#define	BCM4321_CHIP_ID		0x4321		/* 4321 chipcommon chipid */
+#define	BCM43217_CHIP_ID	43217		/* 43217 chip id (OTP chipid) */
+#define	BCM4322_CHIP_ID		0x4322		/* 4322 chipcommon chipid */
+#define	BCM43221_CHIP_ID	43221		/* 43221 chipcommon chipid (OTP chipid) */
+#define	BCM43222_CHIP_ID	43222		/* 43222 chipcommon chipid */
+#define	BCM43224_CHIP_ID	43224		/* 43224 chipcommon chipid */
+#define	BCM43225_CHIP_ID	43225		/* 43225 chipcommon chipid */
+#define	BCM43227_CHIP_ID	43227		/* 43227 chipcommon chipid */
+#define	BCM43228_CHIP_ID	43228		/* 43228 chipcommon chipid */
+#define	BCM43226_CHIP_ID	43226		/* 43226 chipcommon chipid */
+#define	BCM43231_CHIP_ID	43231		/* 43231 chipcommon chipid (OTP chipid) */
+#define	BCM43234_CHIP_ID	43234		/* 43234 chipcommon chipid */
+#define	BCM43235_CHIP_ID	43235		/* 43235 chipcommon chipid */
+#define	BCM43236_CHIP_ID	43236		/* 43236 chipcommon chipid */
+#define	BCM43237_CHIP_ID	43237		/* 43237 chipcommon chipid */
+#define	BCM43238_CHIP_ID	43238		/* 43238 chipcommon chipid */
+#define	BCM43239_CHIP_ID	43239		/* 43239 chipcommon chipid */
+#define	BCM43420_CHIP_ID	43420		/* 43222 chipcommon chipid (OTP, RBBU) */
+#define	BCM43421_CHIP_ID	43421		/* 43224 chipcommon chipid (OTP, RBBU) */
+#define	BCM43428_CHIP_ID	43428		/* 43228 chipcommon chipid (OTP, RBBU) */
+#define	BCM43431_CHIP_ID	43431		/* 4331  chipcommon chipid (OTP, RBBU) */
+#define	BCM43460_CHIP_ID	43460		/* 4360  chipcommon chipid (OTP, RBBU) */
+#define	BCM4325_CHIP_ID		0x4325		/* 4325 chip id */
+#define	BCM4328_CHIP_ID		0x4328		/* 4328 chip id */
+#define	BCM4329_CHIP_ID		0x4329		/* 4329 chipcommon chipid */
+#define	BCM4331_CHIP_ID		0x4331		/* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID		0x4336		/* 4336 chipcommon chipid */
+#define BCM43362_CHIP_ID	43362		/* 43362 chipcommon chipid */
+#define BCM4330_CHIP_ID		0x4330		/* 4330 chipcommon chipid */
+#define BCM6362_CHIP_ID		0x6362		/* 6362 chipcommon chipid */
+#define BCM4314_CHIP_ID		0x4314		/* 4314 chipcommon chipid */
+#define BCM43142_CHIP_ID	43142		/* 43142 chipcommon chipid */
+#define BCM43143_CHIP_ID	43143		/* 43143 chipcommon chipid */
+#define	BCM4324_CHIP_ID		0x4324		/* 4324 chipcommon chipid */
+#define	BCM43242_CHIP_ID	43242		/* 43242 chipcommon chipid */
+#define	BCM43243_CHIP_ID	43243		/* 43243 chipcommon chipid */
+#define BCM4334_CHIP_ID		0x4334		/* 4334 chipcommon chipid */
+#define BCM4335_CHIP_ID		0x4335		/* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID		0x4339		/* 4339 chipcommon chipid */
+#define BCM43349_CHIP_ID	43349			/* 43349(0xA955) chipcommon chipid */
+#define BCM4360_CHIP_ID		0x4360          /* 4360 chipcommon chipid */
+#define BCM4352_CHIP_ID		0x4352          /* 4352 chipcommon chipid */
+#define BCM43526_CHIP_ID	0xAA06
+#define BCM43340_CHIP_ID	43340		/* 43340 chipcommon chipid */
+#define BCM43341_CHIP_ID	43341		/* 43341 chipcommon chipid */
+#define BCM43342_CHIP_ID	43342		/* 43342 chipcommon chipid */
+#define BCM4350_CHIP_ID		0x4350          /* 4350 chipcommon chipid */
+#define BCM4354_CHIP_ID		0x4354          /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID		0x4356          /* 4356 chipcommon chipid */
+#define BCM43556_CHIP_ID	0xAA24          /* 43556 chipcommon chipid */
+#define BCM43558_CHIP_ID	0xAA26          /* 43558 chipcommon chipid */
+#define BCM43566_CHIP_ID	0xAA2E          /* 43566 chipcommon chipid */
+#define BCM43568_CHIP_ID	0xAA30          /* 43568 chipcommon chipid */
+#define BCM43569_CHIP_ID	0xAA31          /* 43569 chipcommon chipid */
+#define BCM43570_CHIP_ID	0xAA32          /* 43570 chipcommon chipid */
+#define BCM4358_CHIP_ID         0x4358          /* 4358 chipcommon chipid */
+#define BCM4350_CHIP(chipid)	((CHIPID(chipid) == BCM4350_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4354_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4356_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43556_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43558_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43566_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43568_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43569_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43570_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */
+#define BCM4345_CHIP_ID		0x4345		/* 4345 chipcommon chipid */
+#define BCM43430_CHIP_ID	43430		/* 43430 chipcommon chipid */
+#define BCM4349_CHIP_ID		0x4349		/* 4349 chipcommon chipid */
+#define BCM4355_CHIP_ID		0x4355		/* 4355 chipcommon chipid */
+#define BCM4359_CHIP_ID		0x4359		/* 4359 chipcommon chipid */
+#define BCM4349_CHIP(chipid)	((CHIPID(chipid) == BCM4349_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4355_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4359_CHIP_ID))
+#define BCM4349_CHIP_GRPID		BCM4349_CHIP_ID: \
+					case BCM4355_CHIP_ID: \
+					case BCM4359_CHIP_ID
+
+#define BCM43602_CHIP_ID	0xaa52		/* 43602 chipcommon chipid */
+#define BCM43462_CHIP_ID	0xa9c6		/* 43462 chipcommon chipid */
+
+#define	BCM4342_CHIP_ID		4342		/* 4342 chipcommon chipid (OTP, RBBU) */
+#define	BCM4402_CHIP_ID		0x4402		/* 4402 chipid */
+#define	BCM4704_CHIP_ID		0x4704		/* 4704 chipcommon chipid */
+#define	BCM4706_CHIP_ID		0x5300		/* 4706 chipcommon chipid */
+#define BCM4707_CHIP_ID		53010		/* 4707 chipcommon chipid */
+#define BCM53018_CHIP_ID	53018		/* 53018 chipcommon chipid */
+#define BCM4707_CHIP(chipid)	(((chipid) == BCM4707_CHIP_ID) || ((chipid) == BCM53018_CHIP_ID))
+#define	BCM4710_CHIP_ID		0x4710		/* 4710 chipid */
+#define	BCM4712_CHIP_ID		0x4712		/* 4712 chipcommon chipid */
+#define	BCM4716_CHIP_ID		0x4716		/* 4716 chipcommon chipid */
+#define	BCM47162_CHIP_ID	47162		/* 47162 chipcommon chipid */
+#define	BCM4748_CHIP_ID		0x4748		/* 4716 chipcommon chipid (OTP, RBBU) */
+#define	BCM4749_CHIP_ID		0x4749		/* 5357 chipcommon chipid (OTP, RBBU) */
+#define BCM4785_CHIP_ID		0x4785		/* 4785 chipcommon chipid */
+#define	BCM5350_CHIP_ID		0x5350		/* 5350 chipcommon chipid */
+#define	BCM5352_CHIP_ID		0x5352		/* 5352 chipcommon chipid */
+#define	BCM5354_CHIP_ID		0x5354		/* 5354 chipcommon chipid */
+#define BCM5365_CHIP_ID		0x5365		/* 5365 chipcommon chipid */
+#define	BCM5356_CHIP_ID		0x5356		/* 5356 chipcommon chipid */
+#define	BCM5357_CHIP_ID		0x5357		/* 5357 chipcommon chipid */
+#define	BCM53572_CHIP_ID	53572		/* 53572 chipcommon chipid */
+
+/* Package IDs */
+#define	BCM4303_PKG_ID		2		/* 4303 package id */
+#define	BCM4309_PKG_ID		1		/* 4309 package id */
+#define	BCM4712LARGE_PKG_ID	0		/* 340pin 4712 package id */
+#define	BCM4712SMALL_PKG_ID	1		/* 200pin 4712 package id */
+#define	BCM4712MID_PKG_ID	2		/* 225pin 4712 package id */
+#define BCM4328USBD11G_PKG_ID	2		/* 4328 802.11g USB package id */
+#define BCM4328USBDUAL_PKG_ID	3		/* 4328 802.11a/g USB package id */
+#define BCM4328SDIOD11G_PKG_ID	4		/* 4328 802.11g SDIO package id */
+#define BCM4328SDIODUAL_PKG_ID	5		/* 4328 802.11a/g SDIO package id */
+#define BCM4329_289PIN_PKG_ID	0		/* 4329 289-pin package id */
+#define BCM4329_182PIN_PKG_ID	1		/* 4329N 182-pin package id */
+#define BCM5354E_PKG_ID		1		/* 5354E package id */
+#define	BCM4716_PKG_ID		8		/* 4716 package id */
+#define	BCM4717_PKG_ID		9		/* 4717 package id */
+#define	BCM4718_PKG_ID		10		/* 4718 package id */
+#define BCM5356_PKG_NONMODE	1		/* 5356 package without nmode suppport */
+#define BCM5358U_PKG_ID		8		/* 5358U package id */
+#define BCM5358_PKG_ID		9		/* 5358 package id */
+#define BCM47186_PKG_ID		10		/* 47186 package id */
+#define BCM5357_PKG_ID		11		/* 5357 package id */
+#define BCM5356U_PKG_ID		12		/* 5356U package id */
+#define BCM53572_PKG_ID		8		/* 53572 package id */
+#define BCM5357C0_PKG_ID	8		/* 5357c0 package id (the same as 53572) */
+#define BCM47188_PKG_ID		9		/* 47188 package id */
+#define BCM5358C0_PKG_ID	0xa		/* 5358c0 package id */
+#define BCM5356C0_PKG_ID	0xb		/* 5356c0 package id */
+#define BCM4331TT_PKG_ID        8		/* 4331 12x12 package id */
+#define BCM4331TN_PKG_ID        9		/* 4331 12x9 package id */
+#define BCM4331TNA0_PKG_ID     0xb		/* 4331 12x9 package id */
+#define	BCM4706L_PKG_ID		1		/* 4706L package id */
+
+#define HDLSIM5350_PKG_ID	1		/* HDL simulator package id for a 5350 */
+#define HDLSIM_PKG_ID		14		/* HDL simulator package id */
+#define HWSIM_PKG_ID		15		/* Hardware simulator package id */
+#define BCM43224_FAB_CSM	0x8		/* the chip is manufactured by CSM */
+#define BCM43224_FAB_SMIC	0xa		/* the chip is manufactured by SMIC */
+#define BCM4336_WLBGA_PKG_ID	0x8
+#define BCM4330_WLBGA_PKG_ID	0x0
+#define BCM4314PCIE_ARM_PKG_ID		(8 | 0)	/* 4314 QFN PCI package id, bit 3 tie high */
+#define BCM4314SDIO_PKG_ID		(8 | 1)	/* 4314 QFN SDIO package id */
+#define BCM4314PCIE_PKG_ID		(8 | 2)	/* 4314 QFN PCI (ARM-less) package id */
+#define BCM4314SDIO_ARM_PKG_ID		(8 | 3)	/* 4314 QFN SDIO (ARM-less) package id */
+#define BCM4314SDIO_FPBGA_PKG_ID	(8 | 4)	/* 4314 FpBGA SDIO package id */
+#define BCM4314DEV_PKG_ID		(8 | 6)	/* 4314 Developement package id */
+
+#define BCM4707_PKG_ID		1		/* 4707 package id */
+#define BCM4708_PKG_ID		2		/* 4708 package id */
+#define BCM4709_PKG_ID		0		/* 4709 package id */
+
+#define PCIXX21_FLASHMEDIA0_ID	0x8033		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH0_ID	0x8034		/* TI PCI xx21 Standard Host Controller */
+
+#define BCM4335_WLCSP_PKG_ID	(0x0)	/* WLCSP Module/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGA_PKG_ID	(0x1)	/* FCBGA PC/Embeded/Media PCIE/SDIO */
+#define BCM4335_WLBGA_PKG_ID	(0x2)	/* WLBGA COB/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGAD_PKG_ID	(0x3)	/* FCBGA Debug Debug/Dev All if's. */
+#define BCM4335_PKG_MASK	(0x3)
+
+/* boardflags */
+#define	BFL_BTC2WIRE		0x00000001  /* old 2wire Bluetooth coexistence, OBSOLETE */
+#define BFL_BTCOEX      0x00000001      /* Board supports BTCOEX */
+#define	BFL_PACTRL		0x00000002  /* Board has gpio 9 controlling the PA */
+#define BFL_AIRLINEMODE	0x00000004  /* Board implements gpio 13 radio disable indication, UNUSED */
+#define	BFL_ADCDIV		0x00000008  /* Board has the rssi ADC divider */
+#define BFL_DIS_256QAM		0x00000008
+#define	BFL_ENETROBO		0x00000010  /* Board has robo switch or core */
+#define	BFL_TSSIAVG   		0x00000010  /* TSSI averaging for ACPHY chips */
+#define	BFL_NOPLLDOWN		0x00000020  /* Not ok to power down the chip pll and oscillator */
+#define	BFL_CCKHIPWR		0x00000040  /* Can do high-power CCK transmission */
+#define	BFL_ENETADM		0x00000080  /* Board has ADMtek switch */
+#define	BFL_ENETVLAN		0x00000100  /* Board has VLAN capability */
+#define	BFL_LTECOEX		0x00000200  /* LTE Coex enabled */
+#define BFL_NOPCI		0x00000400  /* Board leaves PCI floating */
+#define BFL_FEM			0x00000800  /* Board supports the Front End Module */
+#define BFL_EXTLNA		0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_HGPA		0x00002000  /* Board has a high gain PA */
+#define	BFL_BTC2WIRE_ALTGPIO	0x00004000  /* Board's BTC 2wire is in the alternate gpios */
+#define	BFL_ALTIQ		0x00008000  /* Alternate I/Q settings */
+#define BFL_NOPA		0x00010000  /* Board has no PA */
+#define BFL_RSSIINV		0x00020000  /* Board's RSSI uses positive slope(not TSSI) */
+#define BFL_PAREF		0x00040000  /* Board uses the PARef LDO */
+#define BFL_3TSWITCH		0x00080000  /* Board uses a triple throw switch shared with BT */
+#define BFL_PHASESHIFT		0x00100000  /* Board can support phase shifter */
+#define BFL_BUCKBOOST		0x00200000  /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT		0x00400000  /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK		0x00800000  /* Power topology doesn't use CBUCK */
+#define BFL_CCKFAVOREVM		0x01000000  /* Favor CCK EVM over spectral mask */
+#define BFL_PALDO		0x02000000  /* Power topology uses PALDO */
+#define BFL_LNLDO2_2P5		0x04000000  /* Select 2.5V as LNLDO2 output voltage */
+#define BFL_FASTPWR		0x08000000
+#define BFL_UCPWRCTL_MININDX	0x08000000  /* Enforce min power index to avoid FEM damage */
+#define BFL_EXTLNA_5GHz		0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_TRSW_1by2		0x20000000  /* Board has 2 TRSW's in 1by2 designs */
+#define BFL_GAINBOOSTA01        0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL_LO_TRSW_R_5GHz	0x40000000  /* In 5G do not throw TRSW to T for clipLO gain */
+#define BFL_ELNA_GAINDEF	0x80000000  /* Backoff InitGain based on elna_2g/5g field
+					     * when this flag is set
+					     */
+#define BFL_EXTLNA_TX	0x20000000	/* Temp boardflag to indicate to */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS	0x00000001  /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR		0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN	0x00000004  /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV		0x00000008  /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN		0x00000010  /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR	0x00000020  /* Board overrides ASPM and Clkreq settings */
+#define BFL2_CAESERS_BRD	0x00000040  /* Board is Caesers brd (unused by sw) */
+#define BFL2_BTC3WIRE		0x00000080  /* Board support legacy 3 wire or 4 wire */
+#define BFL2_BTCLEGACY          0x00000080  /* Board support legacy 3/4 wire, to replace
+					     * BFL2_BTC3WIRE
+					     */
+#define BFL2_SKWRKFEM_BRD	0x00000100  /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR		0x00000200  /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR		0x00000400  /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_TRISTATE_LED	0x00000800  /* Tri-state the LED */
+#define BFL2_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR	0x00002000  /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_BPHY_ALL_TXCORES	0x00004000  /* Transmit bphy frames using all tx cores */
+#define BFL2_FCC_BANDEDGE_WAR	0x00008000  /* Activates WAR to improve FCC bandedge performance */
+#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000       /* Reducing DAC Spurs */
+#define BFL2_GPLL_WAR2	        0x00010000  /* Flag to widen G-band PLL loop b/w */
+#define BFL2_REDUCED_PA_TURNONTIME 0x00010000  /* Flag to reduce PA turn on Time */
+#define BFL2_IPALVLSHIFT_3P3    0x00020000
+#define BFL2_INTERNDET_TXIQCAL  0x00040000  /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN       0x00080000  /* Keep the buffered Xtal output from radio on */
+				/* Most drivers will turn it off without this flag */
+				/* to save power. */
+
+#define BFL2_ANAPACTRL_2G	0x00100000  /* 2G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ANAPACTRL_5G	0x00200000  /* 5G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ELNACTRL_TRSW_2G	0x00400000  /* AZW4329: 2G gmode_elna_gain controls TR Switch */
+#define BFL2_BT_SHARE_ANT0	0x00800000  /* share core0 antenna with BT */
+#define BFL2_TEMPSENSE_HIGHER	0x01000000  /* The tempsense threshold can sustain higher value
+					     * than programmed. The exact delta is decided by
+					     * driver per chip/boardtype. This can be used
+					     * when tempsense qualification happens after shipment
+					     */
+#define BFL2_BTC3WIREONLY       0x02000000  /* standard 3 wire btc only.  4 wire not supported */
+#define BFL2_PWR_NOMINAL	0x04000000  /* 0: power reduction on, 1: no power reduction */
+#define BFL2_EXTLNA_PWRSAVE	0x08000000  /* boardflag to enable ucode to apply power save */
+						/* ucode control of eLNA during Tx */
+#define BFL2_4313_RADIOREG	0x10000000
+									   /*  board rework */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* enable dynamic Vmid in idle TSSI CAL for 4331 */
+
+#define BFL2_SDR_EN		0x20000000  /* SDR enabled or disabled */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* boardflag to enable dynamic Vmid idle TSSI CAL */
+#define BFL2_LNA1BYPFORTR2G	0x40000000  /* acphy, enable lna1 bypass for clip gain, 2g */
+#define BFL2_LNA1BYPFORTR5G	0x80000000  /* acphy, enable lna1 bypass for clip gain, 5g */
+
+/* SROM 11 - 11ac boardflag definitions */
+#define BFL_SROM11_BTCOEX  0x00000001  /* Board supports BTCOEX */
+#define BFL_SROM11_WLAN_BT_SH_XTL  0x00000002  /* bluetooth and wlan share same crystal */
+#define BFL_SROM11_EXTLNA	0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_SROM11_EPA_TURNON_TIME     0x00018000  /* 2 bits for different PA turn on times */
+#define BFL_SROM11_EPA_TURNON_TIME_SHIFT  15
+#define BFL_SROM11_EXTLNA_5GHz	0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_SROM11_GAINBOOSTA01	0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL2_SROM11_APLL_WAR	0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_SROM11_ANAPACTRL_2G  0x00100000  /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_ANAPACTRL_5G  0x00200000  /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+
+/* boardflags3 */
+#define BFL3_FEMCTRL_SUB	  0x00000007  /* acphy, subrevs of femctrl on top of srom_femctrl */
+#define BFL3_RCAL_WAR		  0x00000008  /* acphy, rcal war active on this board (4335a0) */
+#define BFL3_TXGAINTBLID	  0x00000070  /* acphy, txgain table id */
+#define BFL3_TXGAINTBLID_SHIFT	  0x4         /* acphy, txgain table id shift bit */
+#define BFL3_TSSI_DIV_WAR	  0x00000080  /* acphy, Seperate paparam for 20/40/80 */
+#define BFL3_TSSI_DIV_WAR_SHIFT	  0x7         /* acphy, Seperate paparam for 20/40/80 shift bit */
+#define BFL3_FEMTBL_FROM_NVRAM    0x00000100  /* acphy, femctrl table is read from nvram */
+#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8         /* acphy, femctrl table is read from nvram */
+#define BFL3_AGC_CFG_2G           0x00000200  /* acphy, gain control configuration for 2G */
+#define BFL3_AGC_CFG_5G           0x00000400  /* acphy, gain control configuration for 5G */
+#define BFL3_PPR_BIT_EXT          0x00000800  /* acphy, bit position for 1bit extension for ppr */
+#define BFL3_PPR_BIT_EXT_SHIFT    11          /* acphy, bit shift for 1bit extension for ppr */
+#define BFL3_BBPLL_SPR_MODE_DIS	  0x00001000  /* acphy, disables bbpll spur modes */
+#define BFL3_RCAL_OTP_VAL_EN      0x00002000  /* acphy, to read rcal_trim value from otp */
+#define BFL3_2GTXGAINTBL_BLANK	  0x00004000  /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14       /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK	  0x00008000  /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15       /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_PHASETRACK_MAX_ALPHABETA	  0x00010000  /* acphy, to max out alpha,beta to 511 */
+#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16       /* acphy, to max out alpha,beta to 511 */
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN           0x00060000
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17
+#define BFL3_5G_SPUR_WAR          0x00080000  /* acphy, enable spur WAR in 5G band */
+#define BFL3_1X1_RSDB_ANT	  0x01000000  /* to find if 2-ant RSDB board or 1-ant RSDB board */
+#define BFL3_1X1_RSDB_ANT_SHIFT           24
+
+/* acphy: lpmode2g and lpmode_5g related boardflags */
+#define BFL3_ACPHY_LPMODE_2G	  0x00300000  /* bits 20:21 for lpmode_2g choice */
+#define BFL3_ACPHY_LPMODE_2G_SHIFT	  20
+
+#define BFL3_ACPHY_LPMODE_5G	  0x00C00000  /* bits 22:23 for lpmode_5g choice */
+#define BFL3_ACPHY_LPMODE_5G_SHIFT	  22
+
+#define BFL3_EXT_LPO_ISCLOCK      0x02000000  /* External LPO is clock, not x-tal */
+#define BFL3_FORCE_INT_LPO_SEL    0x04000000  /* Force internal lpo */
+#define BFL3_FORCE_EXT_LPO_SEL    0x08000000  /* Force external lpo */
+
+#define BFL3_EN_BRCM_IMPBF        0x10000000  /* acphy, Allow BRCM Implicit TxBF */
+#define BFL3_AVVMID_FROM_NVRAM    0x40000000  /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM    0x80000000  /* Read Vlin En from NVRAM  */
+
+#define BFL3_AVVMID_FROM_NVRAM_SHIFT   30   /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT   31   /* Enable Vlin  from NVRAM  */
+
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define	BOARD_GPIO_BTC3W_IN	0x850	/* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
+#define	BOARD_GPIO_BTC3W_OUT	0x020	/* bit 5 is TX_CONF */
+#define	BOARD_GPIO_BTCMOD_IN	0x010	/* bit 4 is the alternate BT Coexistence Input */
+#define	BOARD_GPIO_BTCMOD_OUT	0x020	/* bit 5 is the alternate BT Coexistence Out */
+#define	BOARD_GPIO_BTC_IN	0x080	/* bit 7 is BT Coexistence Input */
+#define	BOARD_GPIO_BTC_OUT	0x100	/* bit 8 is BT Coexistence Out */
+#define	BOARD_GPIO_PACTRL	0x200	/* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12		0x1000	/* gpio 12 */
+#define BOARD_GPIO_13		0x2000	/* gpio 13 */
+#define BOARD_GPIO_BTC4_IN	0x0800	/* gpio 11, coex4, in */
+#define BOARD_GPIO_BTC4_BT	0x2000	/* gpio 12, coex4, bt active */
+#define BOARD_GPIO_BTC4_STAT	0x4000	/* gpio 14, coex4, status */
+#define BOARD_GPIO_BTC4_WLAN	0x8000	/* gpio 15, coex4, wlan active */
+#define	BOARD_GPIO_1_WLAN_PWR	0x02	/* throttle WLAN power on X21 board */
+#define	BOARD_GPIO_2_WLAN_PWR	0x04	/* throttle WLAN power on X29C board */
+#define	BOARD_GPIO_3_WLAN_PWR	0x08	/* throttle WLAN power on X28 board */
+#define	BOARD_GPIO_4_WLAN_PWR	0x10	/* throttle WLAN power on X19 board */
+
+#define GPIO_BTC4W_OUT_4312  0x010  /* bit 4 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224_SHARED  0x0e0  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43225  0x0e0  /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */
+#define GPIO_BTC4W_OUT_43421  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_4313  0x060  /* bit 5 SW_BT, bit 6 SW_WL */
+#define GPIO_BTC4W_OUT_4331_SHARED  0x010  /* GPIO 4  */
+
+#define	PCI_CFG_GPIO_SCS	0x10	/* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_HWRAD	0x20	/* PCI config space GPIO 13 for hw radio disable */
+#define PCI_CFG_GPIO_XTAL	0x40	/* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL	0x80	/* PCI config space GPIO 15 for PLL power-down */
+
+/* power control defines */
+#define PLL_DELAY		150		/* us pll on delay */
+#define FREF_DELAY		200		/* us fref change delay */
+#define MIN_SLOW_CLK		32		/* us Slow clock period */
+#define	XTAL_ON_DELAY		1000		/* us crystal power-on delay */
+
+
+/* 43341 Boards */
+#define BCM943341WLABGS_SSID	0x062d
+
+/* 43342 Boards */
+#define BCM943342FCAGBI_SSID	0x0641
+
+/* 43602 Boards, unclear yet what boards will be created. */
+#define BCM943602RSVD1_SSID	0x06a5
+#define BCM943602RSVD2_SSID	0x06a6
+#define BCM943602X87            0X0133
+#define BCM943602X238           0X0132
+
+/* # of GPIO pins */
+#define GPIO_NUMPINS		32
+
+/* These values are used by dhd host driver. */
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329  0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+#define RDL_RAM_SIZE_4360  0xA0000
+#define RDL_RAM_BASE_4360  0x60000000
+#define RDL_RAM_SIZE_43242  0x90000
+#define RDL_RAM_BASE_43242  0x60000000
+#define RDL_RAM_SIZE_43143  0x70000
+#define RDL_RAM_BASE_43143  0x60000000
+#define RDL_RAM_SIZE_4350  0xC0000
+#define RDL_RAM_BASE_4350  0x180800
+
+/* generic defs for nvram "muxenab" bits
+* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options.
+*/
+#define MUXENAB_UART		0x00000001
+#define MUXENAB_GPIO		0x00000002
+#define MUXENAB_ERCX		0x00000004	/* External Radio BT coex */
+#define MUXENAB_JTAG		0x00000008
+#define MUXENAB_HOST_WAKE	0x00000010	/* configure GPIO for SDIO host_wake */
+#define MUXENAB_I2S_EN		0x00000020
+#define MUXENAB_I2S_MASTER	0x00000040
+#define MUXENAB_I2S_FULL	0x00000080
+#define MUXENAB_SFLASH		0x00000100
+#define MUXENAB_RFSWCTRL0	0x00000200
+#define MUXENAB_RFSWCTRL1	0x00000400
+#define MUXENAB_RFSWCTRL2	0x00000800
+#define MUXENAB_SECI		0x00001000
+#define MUXENAB_BT_LEGACY	0x00002000
+#define MUXENAB_HOST_WAKE1	0x00004000	/* configure alternative GPIO for SDIO host_wake */
+
+/* Boot flags */
+#define FLASH_KERNEL_NFLASH	0x00000001
+#define FLASH_BOOT_NFLASH	0x00000002
+
+#endif /* _BCMDEVS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 0000000..ff527f6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,329 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *  $Id: bcmendian.h 402715 2013-05-16 18:50:09Z $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+/* Reverse the bytes in a 16-bit value */
+#define BCMSWAP16(val) \
+	((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+		  (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+/* Reverse the bytes in a 32-bit value */
+#define BCMSWAP32(val) \
+	((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+		  (((uint32)(val) & (uint32)0x0000ff00U) <<  8) | \
+		  (((uint32)(val) & (uint32)0x00ff0000U) >>  8) | \
+		  (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+/* Reverse the two 16-bit halves of a 32-bit value */
+#define BCMSWAP32BY16(val) \
+	((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+		  (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+/* Reverse the bytes in a 64-bit value */
+#define BCMSWAP64(val) \
+	((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \
+	          (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \
+	          (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \
+	          (((uint64)(val) & 0x00000000ff000000ULL) <<  8) | \
+	          (((uint64)(val) & 0x000000ff00000000ULL) >>  8) | \
+	          (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \
+	          (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \
+	          (((uint64)(val) & 0xff00000000000000ULL) >> 56)))
+
+/* Reverse the two 32-bit halves of a 64-bit value */
+#define BCMSWAP64BY32(val) \
+	((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
+	          (((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
+
+/* Byte swapping macros
+ *    Host <=> Network (Big Endian) for 16- and 32-bit values
+ *    Host <=> Little-Endian for 16- and 32-bit values
+ */
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define	hton16(i) bcmswap16(i)
+#define	HTON32(i) BCMSWAP32(i)
+#define	hton32(i) bcmswap32(i)
+#define	NTOH16(i) BCMSWAP16(i)
+#define	ntoh16(i) bcmswap16(i)
+#define	NTOH32(i) BCMSWAP32(i)
+#define	ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#define HTOL64(i) (i)
+#define htol64(i) (i)
+#endif /* hton16 */
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+/* Unaligned loads and stores in host byte order */
+#define load32_ua(a)		ltoh32_ua(a)
+#define store32_ua(a, v)	htol32_ua_store(v, a)
+#define load16_ua(a)		ltoh16_ua(a)
+#define store16_ua(a, v)	htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp)	((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp)	((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp)	(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp)	(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#ifdef __GNUC__
+
+/* GNU macro versions avoid referencing the argument multiple times, while also
+ * avoiding the -fno-inline used in ROM builds.
+ */
+
+#define bcmswap16(val) ({ \
+	uint16 _val = (val); \
+	BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32(_val); \
+})
+
+#define bcmswap64(val) ({ \
+	uint64 _val = (val); \
+	BCMSWAP64(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+	uint16 *_buf = (uint16 *)(buf); \
+	uint _wds = (len) / 2; \
+	while (_wds--) { \
+		*_buf = bcmswap16(*_buf); \
+		_buf++; \
+	} \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = (_val >> 8) & 0xff; \
+	_bytes[2] = (_val >> 16) & 0xff; \
+	_bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 8; \
+	_bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 24; \
+	_bytes[1] = (_val >> 16) & 0xff; \
+	_bytes[2] = (_val >> 8) & 0xff; \
+	_bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH32_UA(_bytes); \
+})
+
+#else /* !__GNUC__ */
+
+/* Inline versions avoid referencing the argument multiple times */
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+	return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+	return BCMSWAP32(val);
+}
+
+static INLINE uint64
+bcmswap64(uint64 val)
+{
+	return BCMSWAP64(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+	return BCMSWAP32BY16(val);
+}
+
+/* Reverse pairs of bytes in a buffer (not for high-performance use) */
+/* buf	- start of buffer of shorts to swap */
+/* len  - byte length of buffer */
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+	len = len / 2;
+
+	while (len--) {
+		*buf = bcmswap16(*buf);
+		buf++;
+	}
+}
+
+/*
+ * Store 16-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = val >> 8;
+}
+
+/*
+ * Store 32-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = (val >> 16) & 0xff;
+	bytes[3] = val >> 24;
+}
+
+/*
+ * Store 16-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val >> 8;
+	bytes[1] = val & 0xff;
+}
+
+/*
+ * Store 32-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val >> 24;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+/*
+ * Load 16-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+	return _LTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+	return _LTOH32_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 16-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+	return _NTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+	return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif /* !__GNUC__ */
+#endif /* !_BCMENDIAN_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
new file mode 100644
index 0000000..e4281b3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h
@@ -0,0 +1,749 @@
+/*
+ * MSGBUF network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmmsgbuf.h 472643 2014-04-24 21:19:22Z $
+ */
+#ifndef _bcmmsgbuf_h_
+#define	_bcmmsgbuf_h_
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <bcmpcie.h>
+
+#define MSGBUF_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+#define D2H_EPOCH_MODULO			253 /* sequence number wrap */
+#define D2H_EPOCH_INIT_VAL			(D2H_EPOCH_MODULO + 1)
+
+#define H2DRING_TXPOST_ITEMSIZE		48
+#define H2DRING_RXPOST_ITEMSIZE		32
+#define H2DRING_CTRL_SUB_ITEMSIZE	40
+#define D2HRING_TXCMPLT_ITEMSIZE	16
+#define D2HRING_RXCMPLT_ITEMSIZE	32
+#define D2HRING_CTRL_CMPLT_ITEMSIZE	24
+
+#define H2DRING_TXPOST_MAX_ITEM			512
+#define H2DRING_RXPOST_MAX_ITEM			256
+#define H2DRING_CTRL_SUB_MAX_ITEM		20
+#define D2HRING_TXCMPLT_MAX_ITEM		1024
+#define D2HRING_RXCMPLT_MAX_ITEM		256
+#define D2HRING_CTRL_CMPLT_MAX_ITEM		20
+enum {
+	DNGL_TO_HOST_MSGBUF,
+	HOST_TO_DNGL_MSGBUF
+};
+
+enum {
+	HOST_TO_DNGL_TXP_DATA,
+	HOST_TO_DNGL_RXP_DATA,
+	HOST_TO_DNGL_CTRL,
+	DNGL_TO_HOST_DATA,
+	DNGL_TO_HOST_CTRL
+};
+
+#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE
+
+#ifdef PCIE_API_REV1
+
+#define BCMMSGBUF_DUMMY_REF(a, b)	do {BCM_REFERENCE((a));BCM_REFERENCE((b));}  while (0)
+
+#define BCMMSGBUF_API_IFIDX(a)		0
+#define BCMMSGBUF_API_SEQNUM(a)		0
+#define BCMMSGBUF_IOCTL_XTID(a)		0
+#define BCMMSGBUF_IOCTL_PKTID(a)	((a)->cmd_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_SET_API_SEQNUM(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b)	(BCMMSGBUF_IOCTL_PKTID(a) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b)	BCMMSGBUF_DUMMY_REF(a, b)
+
+#else /* PCIE_API_REV1 */
+
+#define BCMMSGBUF_API_IFIDX(a)		((a)->if_id)
+#define BCMMSGBUF_IOCTL_PKTID(a)	((a)->pkt_id)
+#define BCMMSGBUF_API_SEQNUM(a)		((a)->u.seq.seq_no)
+#define BCMMSGBUF_IOCTL_XTID(a)		((a)->xt_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b)	(BCMMSGBUF_API_IFIDX((a)) = (b))
+#define BCMMSGBUF_SET_API_SEQNUM(a, b)	(BCMMSGBUF_API_SEQNUM((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b)	(BCMMSGBUF_IOCTL_PKTID((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b)	(BCMMSGBUF_IOCTL_XTID((a)) = (b))
+
+#endif /* PCIE_API_REV1 */
+
+/* utility data structures */
+union addr64 {
+	struct {
+		uint32 low;
+		uint32 high;
+	};
+	struct {
+		uint32 low_addr;
+		uint32 high_addr;
+	};
+	uint64 u64;
+} DECLSPEC_ALIGN(8);
+
+typedef union addr64 addr64_t;
+
+/* IOCTL req Hdr */
+/* cmn Msg Hdr */
+typedef struct cmn_msg_hdr {
+	/* message type */
+	uint8 msg_type;
+	/* interface index this is valid for */
+	uint8 if_id;
+	/* flags */
+	uint8 flags;
+	/* sequence number */
+	uint8 epoch;
+	/* packet Identifier for the associated host buffer */
+	uint32 request_id;
+} cmn_msg_hdr_t;
+
+/* message type */
+typedef enum bcmpcie_msgtype {
+	MSG_TYPE_GEN_STATUS 		= 0x1,
+	MSG_TYPE_RING_STATUS		= 0x2,
+	MSG_TYPE_FLOW_RING_CREATE	= 0x3,
+	MSG_TYPE_FLOW_RING_CREATE_CMPLT	= 0x4,
+	MSG_TYPE_FLOW_RING_DELETE	= 0x5,
+	MSG_TYPE_FLOW_RING_DELETE_CMPLT	= 0x6,
+	MSG_TYPE_FLOW_RING_FLUSH	= 0x7,
+	MSG_TYPE_FLOW_RING_FLUSH_CMPLT	= 0x8,
+	MSG_TYPE_IOCTLPTR_REQ		= 0x9,
+	MSG_TYPE_IOCTLPTR_REQ_ACK	= 0xA,
+	MSG_TYPE_IOCTLRESP_BUF_POST	= 0xB,
+	MSG_TYPE_IOCTL_CMPLT		= 0xC,
+	MSG_TYPE_EVENT_BUF_POST		= 0xD,
+	MSG_TYPE_WL_EVENT		= 0xE,
+	MSG_TYPE_TX_POST		= 0xF,
+	MSG_TYPE_TX_STATUS		= 0x10,
+	MSG_TYPE_RXBUF_POST		= 0x11,
+	MSG_TYPE_RX_CMPLT		= 0x12,
+	MSG_TYPE_LPBK_DMAXFER 		= 0x13,
+	MSG_TYPE_LPBK_DMAXFER_CMPLT	= 0x14,
+	MSG_TYPE_API_MAX_RSVD		= 0x3F
+} bcmpcie_msg_type_t;
+
+typedef enum bcmpcie_msgtype_int {
+	MSG_TYPE_INTERNAL_USE_START	= 0x40,
+	MSG_TYPE_EVENT_PYLD		= 0x41,
+	MSG_TYPE_IOCT_PYLD		= 0x42,
+	MSG_TYPE_RX_PYLD		= 0x43,
+	MSG_TYPE_HOST_FETCH		= 0x44,
+	MSG_TYPE_LPBK_DMAXFER_PYLD	= 0x45,
+	MSG_TYPE_TXMETADATA_PYLD	= 0x46,
+	MSG_TYPE_HOSTDMA_PTRS		= 0x47
+} bcmpcie_msgtype_int_t;
+
+typedef enum bcmpcie_msgtype_u {
+	MSG_TYPE_TX_BATCH_POST		= 0x80,
+	MSG_TYPE_IOCTL_REQ		= 0x81,
+	MSG_TYPE_HOST_EVNT		= 0x82,
+	MSG_TYPE_LOOPBACK		= 0x83
+} bcmpcie_msgtype_u_t;
+
+
+/* if_id */
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT	5
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX	0x7
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK	\
+	(BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT	0
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX	0x1F
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK	\
+	(BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+
+/* flags */
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX		0x1
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR	0x2
+#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT		0x80
+
+
+/* IOCTL request message */
+typedef struct ioctl_req_msg {
+	/* common message header */
+	cmn_msg_hdr_t 	cmn_hdr;
+
+	/* ioctl command type */
+	uint32		cmd;
+	/* ioctl transaction ID, to pair with a ioctl response */
+	uint16		trans_id;
+	/* input arguments buffer len */
+	uint16		input_buf_len;
+	/* expected output len */
+	uint16		output_buf_len;
+	/* to aling the host address on 8 byte boundary */
+	uint16		rsvd[3];
+	/* always aling on 8 byte boundary */
+	addr64_t	host_input_buf_addr;
+	/* rsvd */
+	uint32		rsvd1[2];
+} ioctl_req_msg_t;
+
+/* buffer post messages for device to use to return IOCTL responses, Events */
+typedef struct ioctl_resp_evt_buf_post_msg {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/* length of the host buffer supplied */
+	uint16		host_buf_len;
+	/* to aling the host address on 8 byte boundary */
+	uint16		reserved[3];
+	/* always aling on 8 byte boundary */
+	addr64_t	host_buf_addr;
+	uint32		rsvd[4];
+} ioctl_resp_evt_buf_post_msg_t;
+
+
+typedef struct pcie_dma_xfer_params {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+
+	/* always aling on 8 byte boundary */
+	addr64_t	host_input_buf_addr;
+
+	/* always aling on 8 byte boundary */
+	addr64_t	host_ouput_buf_addr;
+
+	/* length of transfer */
+	uint32		xfer_len;
+	/* delay before doing the src txfer */
+	uint32		srcdelay;
+	/* delay before doing the dest txfer */
+	uint32		destdelay;
+	uint32		rsvd;
+} pcie_dma_xfer_params_t;
+
+/* Complete msgbuf hdr for flow ring update from host to dongle */
+typedef struct tx_flowring_create_request {
+	cmn_msg_hdr_t   msg;
+	uint8	da[ETHER_ADDR_LEN];
+	uint8	sa[ETHER_ADDR_LEN];
+	uint8	tid;
+	uint8 	if_flags;
+	uint16	flow_ring_id;
+	uint8 	tc;
+	uint8	priority;
+	uint16 	int_vector;
+	uint16	max_items;
+	uint16	len_item;
+	addr64_t flow_ring_ptr;
+} tx_flowring_create_request_t;
+
+typedef struct tx_flowring_delete_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16 	reason;
+	uint32	rsvd[7];
+} tx_flowring_delete_request_t;
+
+typedef struct tx_flowring_flush_request {
+	cmn_msg_hdr_t   msg;
+	uint16	flow_ring_id;
+	uint16 	reason;
+	uint32	rsvd[7];
+} tx_flowring_flush_request_t;
+
+typedef union ctrl_submit_item {
+	ioctl_req_msg_t			ioctl_req;
+	ioctl_resp_evt_buf_post_msg_t	resp_buf_post;
+	pcie_dma_xfer_params_t		dma_xfer;
+	tx_flowring_create_request_t	flow_create;
+	tx_flowring_delete_request_t	flow_delete;
+	tx_flowring_flush_request_t	flow_flush;
+	unsigned char			check[H2DRING_CTRL_SUB_ITEMSIZE];
+} ctrl_submit_item_t;
+
+/* Control Completion messages (20 bytes) */
+typedef struct compl_msg_hdr {
+	/* status for the completion */
+	int16	status;
+	/* submisison flow ring id which generated this status */
+	uint16	flow_ring_id;
+} compl_msg_hdr_t;
+
+/* XOR checksum or a magic number to audit DMA done */
+typedef uint32 dma_done_t;
+
+/* completion header status codes */
+#define	BCMPCIE_SUCCESS			0
+#define BCMPCIE_NOTFOUND		1
+#define BCMPCIE_NOMEM			2
+#define BCMPCIE_BADOPTION		3
+#define BCMPCIE_RING_IN_USE		4
+#define BCMPCIE_RING_ID_INVALID		5
+#define BCMPCIE_PKT_FLUSH		6
+#define BCMPCIE_NO_EVENT_BUF		7
+#define BCMPCIE_NO_RX_BUF		8
+#define BCMPCIE_NO_IOCTLRESP_BUF	9
+#define BCMPCIE_MAX_IOCTLRESP_BUF	10
+#define BCMPCIE_MAX_EVENT_BUF		11
+
+/* IOCTL completion response */
+typedef struct ioctl_compl_resp_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completeion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* response buffer len where a host buffer is involved */
+	uint16			resp_len;
+	/* transaction id to pair with a request */
+	uint16			trans_id;
+	/* cmd id */
+	uint32			cmd;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ioctl_comp_resp_msg_t;
+
+/* IOCTL request acknowledgement */
+typedef struct ioctl_req_ack_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t 	compl_hdr;
+	/* cmd id */
+	uint32			cmd;
+	uint32			rsvd[1];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ioctl_req_ack_msg_t;
+
+/* WL event message: send from device to host */
+typedef struct wlevent_req_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completeion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* event data len valid with the event buffer */
+	uint16			event_data_len;
+	/* sequence number */
+	uint16			seqnum;
+	/* rsvd	*/
+	uint32			rsvd;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} wlevent_req_msg_t;
+
+/* dma xfer complete message */
+typedef struct pcie_dmaxfer_cmplt {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_dmaxfer_cmplt_t;
+
+/* general status message */
+typedef struct pcie_gen_status {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completeion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_gen_status_t;
+
+/* ring status message */
+typedef struct pcie_ring_status {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	/* message which firmware couldn't decode */
+	uint16			write_idx;
+	uint16			rsvd[3];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} pcie_ring_status_t;
+
+typedef struct tx_flowring_create_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_create_response_t;
+typedef struct tx_flowring_delete_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_delete_response_t;
+
+typedef struct tx_flowring_flush_response {
+	cmn_msg_hdr_t		msg;
+	compl_msg_hdr_t 	cmplt;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} tx_flowring_flush_response_t;
+
+/* Common layout of all d2h control messages */
+typedef struct ctrl_compl_msg {
+	/* common message header */
+	cmn_msg_hdr_t		cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t		compl_hdr;
+	uint32			rsvd[2];
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} ctrl_compl_msg_t;
+
+typedef union ctrl_completion_item {
+	ioctl_comp_resp_msg_t		ioctl_resp;
+	wlevent_req_msg_t		event;
+	ioctl_req_ack_msg_t		ioct_ack;
+	pcie_dmaxfer_cmplt_t		pcie_xfer_cmplt;
+	pcie_gen_status_t		pcie_gen_status;
+	pcie_ring_status_t		pcie_ring_status;
+	tx_flowring_create_response_t	txfl_create_resp;
+	tx_flowring_delete_response_t	txfl_delete_resp;
+	tx_flowring_flush_response_t	txfl_flush_resp;
+	ctrl_compl_msg_t		ctrl_compl;
+	unsigned char		check[D2HRING_CTRL_CMPLT_ITEMSIZE];
+} ctrl_completion_item_t;
+
+/* H2D Rxpost ring work items */
+typedef struct host_rxbuf_post {
+	/* common message header */
+	cmn_msg_hdr_t   cmn_hdr;
+	/* provided meta data buffer len */
+	uint16		metadata_buf_len;
+	/* provided data buffer len to receive data */
+	uint16		data_buf_len;
+	/* alignment to make the host buffers start on 8 byte boundary */
+	uint32		rsvd;
+	/* provided meta data buffer */
+	addr64_t	metadata_buf_addr;
+	/* provided data buffer to receive data */
+	addr64_t	data_buf_addr;
+} host_rxbuf_post_t;
+
+typedef union rxbuf_submit_item {
+	host_rxbuf_post_t	rxpost;
+	unsigned char		check[H2DRING_RXPOST_ITEMSIZE];
+} rxbuf_submit_item_t;
+
+
+/* D2H Rxcompletion ring work items */
+typedef struct host_rxbuf_cmpl {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/* completeion message header */
+	compl_msg_hdr_t	compl_hdr;
+	/*  filled up meta data len */
+	uint16		metadata_len;
+	/* filled up buffer len to receive data */
+	uint16		data_len;
+	/* offset in the host rx buffer where the data starts */
+	uint16		data_offset;
+	/* offset in the host rx buffer where the data starts */
+	uint16		flags;
+	/* rx status */
+	uint32		rx_status_0;
+	uint32		rx_status_1;
+	/* XOR checksum or a magic number to audit DMA done */
+	dma_done_t		marker;
+} host_rxbuf_cmpl_t;
+
+typedef union rxbuf_complete_item {
+	host_rxbuf_cmpl_t	rxcmpl;
+	unsigned char		check[D2HRING_RXCMPLT_ITEMSIZE];
+} rxbuf_complete_item_t;
+
+
+typedef struct host_txbuf_post {
+	/* common message header */
+	cmn_msg_hdr_t   cmn_hdr;
+	/* eth header */
+	uint8		txhdr[ETHER_HDR_LEN];
+	/* flags */
+	uint8		flags;
+	/* number of segments */
+	uint8		seg_cnt;
+
+	/* provided meta data buffer for txstatus */
+	addr64_t	metadata_buf_addr;
+	/* provided data buffer to receive data */
+	addr64_t	data_buf_addr;
+	/* provided meta data buffer len */
+	uint16		metadata_buf_len;
+	/* provided data buffer len to receive data */
+	uint16		data_len;
+	uint32		flag2;
+} host_txbuf_post_t;
+
+#define BCMPCIE_PKT_FLAGS_FRAME_802_3	0x01
+#define BCMPCIE_PKT_FLAGS_FRAME_802_11	0x02
+
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK	0x03	/* Exempt uses 2 bits */
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT	0x02	/* needs to be shifted past other bits */
+
+
+#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT		5
+#define BCMPCIE_PKT_FLAGS_PRIO_MASK		(7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
+
+/* These are added to fix up teh compile issues */
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3	BCMPCIE_PKT_FLAGS_FRAME_802_3
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11	BCMPCIE_PKT_FLAGS_FRAME_802_11
+#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT		BCMPCIE_PKT_FLAGS_PRIO_SHIFT
+#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK		BCMPCIE_PKT_FLAGS_PRIO_MASK
+
+#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK	0x01
+#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT	0
+
+/* H2D Txpost ring work items */
+typedef union txbuf_submit_item {
+	host_txbuf_post_t	txpost;
+	unsigned char		check[H2DRING_TXPOST_ITEMSIZE];
+} txbuf_submit_item_t;
+
+/* D2H Txcompletion ring work items */
+typedef struct host_txbuf_cmpl {
+	/* common message header */
+	cmn_msg_hdr_t	cmn_hdr;
+	/* completion message header */
+	compl_msg_hdr_t	compl_hdr;
+	union {
+		struct {
+			/* provided meta data len */
+			uint16	metadata_len;
+			/* WLAN side txstatus */
+			uint16	tx_status;
+		};
+		/* XOR checksum or a magic number to audit DMA done */
+		dma_done_t		marker;
+	};
+} host_txbuf_cmpl_t;
+
+typedef union txbuf_complete_item {
+	host_txbuf_cmpl_t	txcmpl;
+	unsigned char		check[D2HRING_TXCMPLT_ITEMSIZE];
+} txbuf_complete_item_t;
+
+#define BCMPCIE_D2H_METADATA_HDRLEN	4
+#define BCMPCIE_D2H_METADATA_MINLEN	(BCMPCIE_D2H_METADATA_HDRLEN + 4)
+
+/* ret buf struct */
+typedef struct ret_buf_ptr {
+	uint32 low_addr;
+	uint32 high_addr;
+} ret_buf_t;
+
+#ifdef PCIE_API_REV1
+/* ioctl specific hdr */
+typedef struct ioctl_hdr {
+	uint16 		cmd;
+	uint16		retbuf_len;
+	uint32		cmd_id;
+} ioctl_hdr_t;
+typedef struct ioctlptr_hdr {
+	uint16 		cmd;
+	uint16		retbuf_len;
+	uint16 		buflen;
+	uint16		rsvd;
+	uint32		cmd_id;
+} ioctlptr_hdr_t;
+#else /* PCIE_API_REV1 */
+typedef struct ioctl_req_hdr {
+	uint32		pkt_id; /* Packet ID */
+	uint32 		cmd; /* IOCTL ID */
+	uint16		retbuf_len;
+	uint16 		buflen;
+	uint16		xt_id; /* transaction ID */
+	uint16		rsvd[1];
+} ioctl_req_hdr_t;
+#endif /* PCIE_API_REV1 */
+
+
+/* Complete msgbuf hdr for ioctl from host to dongle */
+typedef struct ioct_reqst_hdr {
+	cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+	ioctl_hdr_t ioct_hdr;
+#else
+	ioctl_req_hdr_t ioct_hdr;
+#endif
+	ret_buf_t ret_buf;
+} ioct_reqst_hdr_t;
+typedef struct ioctptr_reqst_hdr {
+	cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+	ioctlptr_hdr_t ioct_hdr;
+#else
+	ioctl_req_hdr_t ioct_hdr;
+#endif
+	ret_buf_t ret_buf;
+	ret_buf_t ioct_buf;
+} ioctptr_reqst_hdr_t;
+
+/* ioctl response header */
+typedef struct ioct_resp_hdr {
+	cmn_msg_hdr_t   msg;
+#ifdef PCIE_API_REV1
+	uint32	cmd_id;
+#else
+	uint32	pkt_id;
+#endif
+	uint32	status;
+	uint32	ret_len;
+	uint32  inline_data;
+#ifdef PCIE_API_REV1
+#else
+	uint16	xt_id;	/* transaction ID */
+	uint16	rsvd[1];
+#endif
+} ioct_resp_hdr_t;
+
+/* ioct resp header used in dongle */
+/* ret buf hdr will be stripped off inside dongle itself */
+typedef struct msgbuf_ioctl_resp {
+	ioct_resp_hdr_t	ioct_hdr;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} msgbuf_ioct_resp_t;
+
+/* WL evet hdr info */
+typedef struct wl_event_hdr {
+	cmn_msg_hdr_t   msg;
+	uint16 event;
+	uint8 flags;
+	uint8 rsvd;
+	uint16 retbuf_len;
+	uint16 rsvd1;
+	uint32 rxbufid;
+} wl_event_hdr_t;
+
+#define TXDESCR_FLOWID_PCIELPBK_1	0xFF
+#define TXDESCR_FLOWID_PCIELPBK_2	0xFE
+
+typedef struct txbatch_lenptr_tup {
+	uint32 pktid;
+	uint16 pktlen;
+	uint16 rsvd;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} txbatch_lenptr_tup_t;
+
+typedef struct txbatch_cmn_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint8 priority;
+	uint8 hdrlen;
+	uint8 pktcnt;
+	uint8 flowid;
+	uint8 txhdr[ETHER_HDR_LEN];
+	uint16 rsvd;
+} txbatch_cmn_msghdr_t;
+
+typedef struct txbatch_msghdr {
+	txbatch_cmn_msghdr_t txcmn;
+	txbatch_lenptr_tup_t tx_tup[0]; /* Based on packet count */
+} txbatch_msghdr_t;
+
+/* TX desc posting header */
+typedef struct tx_lenptr_tup {
+	uint16 pktlen;
+	uint16 rsvd;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} tx_lenptr_tup_t;
+
+typedef struct txdescr_cmn_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint8 priority;
+	uint8 hdrlen;
+	uint8 descrcnt;
+	uint8 flowid;
+	uint32 pktid;
+} txdescr_cmn_msghdr_t;
+
+typedef struct txdescr_msghdr {
+	txdescr_cmn_msghdr_t txcmn;
+	uint8 txhdr[ETHER_HDR_LEN];
+	uint16 rsvd;
+	tx_lenptr_tup_t tx_tup[0]; /* Based on descriptor count */
+} txdescr_msghdr_t;
+
+/* Tx status header info */
+typedef struct txstatus_hdr {
+	cmn_msg_hdr_t   msg;
+	uint32 pktid;
+} txstatus_hdr_t;
+/* RX bufid-len-ptr tuple */
+typedef struct rx_lenptr_tup {
+	uint32 rxbufid;
+	uint16 len;
+	uint16 rsvd2;
+	ret_buf_t	ret_buf;	/* ret buf pointers */
+} rx_lenptr_tup_t;
+/* Rx descr Post hdr info */
+typedef struct rxdesc_msghdr {
+	cmn_msg_hdr_t   msg;
+	uint16 rsvd0;
+	uint8 rsvd1;
+	uint8 descnt;
+	rx_lenptr_tup_t rx_tup[0];
+} rxdesc_msghdr_t;
+
+/* RX complete tuples */
+typedef struct rxcmplt_tup {
+	uint16 retbuf_len;
+	uint16 data_offset;
+	uint32 rxstatus0;
+	uint32 rxstatus1;
+	uint32 rxbufid;
+} rxcmplt_tup_t;
+/* RX complete messge hdr */
+typedef struct rxcmplt_hdr {
+	cmn_msg_hdr_t   msg;
+	uint16 rsvd0;
+	uint16 rxcmpltcnt;
+	rxcmplt_tup_t rx_tup[0];
+} rxcmplt_hdr_t;
+typedef struct hostevent_hdr {
+	cmn_msg_hdr_t   msg;
+	uint32 evnt_pyld;
+} hostevent_hdr_t;
+
+typedef struct dma_xfer_params {
+	uint32 src_physaddr_hi;
+	uint32 src_physaddr_lo;
+	uint32 dest_physaddr_hi;
+	uint32 dest_physaddr_lo;
+	uint32 len;
+	uint32 srcdelay;
+	uint32 destdelay;
+} dma_xfer_params_t;
+
+enum {
+	HOST_EVENT_CONS_CMD = 1
+};
+
+/* defines for flags */
+#define MSGBUF_IOC_ACTION_MASK 0x1
+
+#endif /* _bcmmsgbuf_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
new file mode 100644
index 0000000..d9f2b4a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
@@ -0,0 +1,272 @@
+/*
+ * NVRAM variable manipulation
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmnvram.h 428512 2013-10-09 02:12:11Z $
+ */
+
+#ifndef _bcmnvram_h_
+#define _bcmnvram_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+struct nvram_header {
+	uint32 magic;
+	uint32 len;
+	uint32 crc_ver_init;	/* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+	uint32 config_refresh;	/* 0:15 sdram_config, 16:31 sdram_refresh */
+	uint32 config_ncdl;	/* ncdl values for memc */
+};
+
+struct nvram_tuple {
+	char *name;
+	char *value;
+	struct nvram_tuple *next;
+};
+
+/*
+ * Get default value for an NVRAM variable
+ */
+extern char *nvram_default_get(const char *name);
+/*
+ * validate/restore all per-interface related variables
+ */
+extern void nvram_validate_all(char *prefix, bool restore);
+
+/*
+ * restore specific per-interface variable
+ */
+extern void nvram_restore_var(char *prefix, char *name);
+
+/*
+ * Initialize NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern int nvram_init(void *sih);
+extern int nvram_deinit(void *sih);
+
+
+/*
+ * Append a chunk of nvram variables to the global list
+ */
+extern int nvram_append(void *si, char *vars, uint varsz);
+
+extern void nvram_get_global_vars(char **varlst, uint *varsz);
+
+
+/*
+ * Check for reset button press for restoring factory defaults.
+ */
+extern int nvram_reset(void *sih);
+
+/*
+ * Disable NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern void nvram_exit(void *sih);
+
+/*
+ * Get the value of an NVRAM variable. The pointer returned may be
+ * invalid after a set.
+ * @param	name	name of variable to get
+ * @return	value of variable or NULL if undefined
+ */
+extern char * nvram_get(const char *name);
+
+/*
+ * Read the reset GPIO value from the nvram and set the GPIO
+ * as input
+ */
+extern int nvram_resetgpio_init(void *sih);
+
+/*
+ * Get the value of an NVRAM variable.
+ * @param	name	name of variable to get
+ * @return	value of variable or NUL if undefined
+ */
+static INLINE char *
+nvram_safe_get(const char *name)
+{
+	char *p = nvram_get(name);
+	return p ? p : "";
+}
+
+/*
+ * Match an NVRAM variable.
+ * @param	name	name of variable to match
+ * @param	match	value to compare against value of variable
+ * @return	TRUE if variable is defined and its value is string equal
+ *		to match or FALSE otherwise
+ */
+static INLINE int
+nvram_match(const char *name, const char *match)
+{
+	const char *value = nvram_get(name);
+	return (value && !strcmp(value, match));
+}
+
+/*
+ * Inversely match an NVRAM variable.
+ * @param	name	name of variable to match
+ * @param	match	value to compare against value of variable
+ * @return	TRUE if variable is defined and its value is not string
+ *		equal to invmatch or FALSE otherwise
+ */
+static INLINE int
+nvram_invmatch(const char *name, const char *invmatch)
+{
+	const char *value = nvram_get(name);
+	return (value && strcmp(value, invmatch));
+}
+
+/*
+ * Set the value of an NVRAM variable. The name and value strings are
+ * copied into private storage. Pointers to previously set values
+ * may become invalid. The new value may be immediately
+ * retrieved but will not be permanently stored until a commit.
+ * @param	name	name of variable to set
+ * @param	value	value of variable
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_set(const char *name, const char *value);
+
+/*
+ * Unset an NVRAM variable. Pointers to previously set values
+ * remain valid until a set.
+ * @param	name	name of variable to unset
+ * @return	0 on success and errno on failure
+ * NOTE: use nvram_commit to commit this change to flash.
+ */
+extern int nvram_unset(const char *name);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @param   nvram_corrupt    true to corrupt nvram, false otherwise.
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_commit_internal(bool nvram_corrupt);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_commit(void);
+
+/*
+ * Get all NVRAM variables (format name=value\0 ... \0\0).
+ * @param	buf	buffer to store variables
+ * @param	count	size of buffer in bytes
+ * @return	0 on success and errno on failure
+ */
+extern int nvram_getall(char *nvram_buf, int count);
+
+/*
+ * returns the crc value of the nvram
+ * @param	nvh	nvram header pointer
+ */
+uint8 nvram_calc_crc(struct nvram_header * nvh);
+
+extern int nvram_space;
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* The NVRAM version number stored as an NVRAM variable */
+#define NVRAM_SOFTWARE_VERSION	"1"
+
+#define NVRAM_MAGIC		0x48534C46	/* 'FLSH' */
+#define NVRAM_CLEAR_MAGIC	0x0
+#define NVRAM_INVALID_MAGIC	0xFFFFFFFF
+#define NVRAM_VERSION		1
+#define NVRAM_HEADER_SIZE	20
+/* This definition is for precommit staging, and will be removed */
+#define NVRAM_SPACE		0x8000
+/* For CFE builds this gets passed in thru the makefile */
+#ifndef MAX_NVRAM_SPACE
+#define MAX_NVRAM_SPACE		0x10000
+#endif
+#define DEF_NVRAM_SPACE		0x8000
+#define ROM_ENVRAM_SPACE	0x1000
+#define NVRAM_LZMA_MAGIC	0x4c5a4d41	/* 'LZMA' */
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+#define NVRAM_CRC_START_POSITION	9 /* magic, len, crc8 to be skipped */
+#define NVRAM_CRC_VER_MASK	0xffffff00 /* for crc_ver_init */
+
+/* Offsets to embedded nvram area */
+#define NVRAM_START_COMPRESSED	0x400
+#define NVRAM_START		0x1000
+
+#define BCM_JUMBO_NVRAM_DELIMIT '\n'
+#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
+
+#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \
+	defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__))
+#define IMAGE_SIZE "image_size"
+#define BOOTPARTITION "bootpartition"
+#define IMAGE_BOOT BOOTPARTITION
+#define PARTIALBOOTS "partialboots"
+#define MAXPARTIALBOOTS "maxpartialboots"
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#endif
+
+#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \
+	defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__))
+/* Shared by all: CFE, Linux Kernel, and Ap */
+#define IMAGE_BOOT "image_boot"
+#define BOOTPARTITION IMAGE_BOOT
+/* CFE variables */
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_SIZE "image_size"
+
+/* CFE and Linux Kernel shared variables */
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+
+/* Linux application variables */
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#define POLICY_TOGGLE "toggle"
+#define LINUX_PART_TO_FLASH "linux_to_flash"
+#define LINUX_FLASH_POLICY "linux_flash_policy"
+
+#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */
+
+#endif /* _bcmnvram_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcie.h b/drivers/net/wireless/bcmdhd/include/bcmpcie.h
new file mode 100644
index 0000000..530e235
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcie.h
@@ -0,0 +1,215 @@
+/*
+ * Broadcom PCIE
+ * Software-specific definitions shared between device and host side
+ * Explains the shared area between host and dongle
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcie.h 472405 2014-04-23 23:46:55Z $
+ */
+
+#ifndef	_bcmpcie_h_
+#define	_bcmpcie_h_
+
+#include <bcmutils.h>
+
+#define ADDR_64(x)			(x.addr)
+#define HIGH_ADDR_32(x)     ((uint32) (((sh_addr_t) x).high_addr))
+#define LOW_ADDR_32(x)      ((uint32) (((sh_addr_t) x).low_addr))
+
+typedef struct {
+	uint32 low_addr;
+	uint32 high_addr;
+} sh_addr_t;
+
+
+
+#ifdef BCMPCIE_SUPPORT_TX_PUSH_RING
+#define BCMPCIE_PUSH_TX_RING	1
+#else
+#define BCMPCIE_PUSH_TX_RING	0
+#endif /* BCMPCIE_SUPPORT_TX_PUSH_RING */
+
+/* May be overridden by 43xxxxx-roml.mk */
+#if !defined(BCMPCIE_MAX_TX_FLOWS)
+#define BCMPCIE_MAX_TX_FLOWS	40
+#endif /* ! BCMPCIE_MAX_TX_FLOWS */
+
+#define PCIE_SHARED_VERSION		0x00005
+#define PCIE_SHARED_VERSION_MASK	0x000FF
+#define PCIE_SHARED_ASSERT_BUILT	0x00100
+#define PCIE_SHARED_ASSERT		0x00200
+#define PCIE_SHARED_TRAP		0x00400
+#define PCIE_SHARED_IN_BRPT		0x00800
+#define PCIE_SHARED_SET_BRPT		0x01000
+#define PCIE_SHARED_PENDING_BRPT	0x02000
+#define PCIE_SHARED_TXPUSH_SPRT		0x04000
+#define PCIE_SHARED_EVT_SEQNUM		0x08000
+#define PCIE_SHARED_DMA_INDEX		0x10000
+
+#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT		0
+#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT		1
+#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE		2
+#define BCMPCIE_D2H_MSGRING_TX_COMPLETE			3
+#define BCMPCIE_D2H_MSGRING_RX_COMPLETE			4
+#define BCMPCIE_COMMON_MSGRING_MAX_ID			4
+
+/* Added only for single tx ring */
+#define BCMPCIE_H2D_TXFLOWRINGID			5
+
+#define BCMPCIE_H2D_COMMON_MSGRINGS			2
+#define BCMPCIE_D2H_COMMON_MSGRINGS			3
+#define BCMPCIE_COMMON_MSGRINGS				5
+
+enum h2dring_idx {
+	BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = 0,
+	BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX =	1,
+	BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = 2
+};
+
+enum d2hring_idx {
+	BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = 0,
+	BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = 1,
+	BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = 2
+};
+
+typedef struct ring_mem {
+	uint16		idx;
+	uint8		type;
+	uint8		rsvd;
+	uint16		max_item;
+	uint16		len_items;
+	sh_addr_t	base_addr;
+} ring_mem_t;
+
+#define RINGSTATE_INITED	1
+
+typedef struct ring_state {
+	uint8 idx;
+	uint8 state;
+	uint16 r_offset;
+	uint16 w_offset;
+	uint16 e_offset;
+} ring_state_t;
+
+
+
+typedef struct ring_info {
+	/* locations in the TCM where the ringmem is and ringstate are defined */
+	uint32		ringmem_ptr;	/* ring mem location in TCM */
+	uint32		h2d_w_idx_ptr;
+
+	uint32		h2d_r_idx_ptr;
+	uint32		d2h_w_idx_ptr;
+
+	uint32		d2h_r_idx_ptr;
+	/* host locations where the DMA of read/write indices are */
+	sh_addr_t	h2d_w_idx_hostaddr;
+	sh_addr_t	h2d_r_idx_hostaddr;
+	sh_addr_t	d2h_w_idx_hostaddr;
+	sh_addr_t	d2h_r_idx_hostaddr;
+	uint16		max_sub_queues;
+	uint16		rsvd;
+} ring_info_t;
+
+typedef struct {
+	/* shared area version captured at flags 7:0 */
+	uint32	flags;
+
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hnd_cons_t */
+
+	uint32  msgtrace_addr;
+
+	uint32  fwid;
+
+	/* Used for debug/flow control */
+	uint16  total_lfrag_pkt_cnt;
+	uint16  max_host_rxbufs; /* rsvd in spec */
+
+	uint32 dma_rxoffset; /* rsvd in spec */
+
+	/* these will be used for sleep request/ack, d3 req/ack */
+	uint32  h2d_mb_data_ptr;
+	uint32  d2h_mb_data_ptr;
+
+	/* information pertinent to host IPC/msgbuf channels */
+	/* location in the TCM memory which has the ring_info */
+	uint32	rings_info_ptr;
+
+	/* block of host memory for the scratch buffer */
+	uint32		host_dma_scratch_buffer_len;
+	sh_addr_t	host_dma_scratch_buffer;
+
+	/* block of host memory for the dongle to push the status into */
+	uint32		device_rings_stsblk_len;
+	sh_addr_t	device_rings_stsblk;
+#ifdef BCM_BUZZZ
+	uint32	buzzz;	/* BUZZZ state format strings and trace buffer */
+#endif
+} pciedev_shared_t;
+
+
+/* H2D mail box Data */
+#define H2D_HOST_D3_INFORM	0x00000001
+#define H2D_HOST_DS_ACK		0x00000002
+#define H2D_HOST_CONS_INT	0x80000000	/* h2d int for console cmds  */
+
+/* D2H mail box Data */
+#define D2H_DEV_D3_ACK		0x00000001
+#define D2H_DEV_DS_ENTER_REQ	0x00000002
+#define D2H_DEV_DS_EXIT_NOTE	0x00000004
+#define D2H_DEV_FWHALT		0x10000000
+
+
+extern pciedev_shared_t pciedev_shared;
+#define NEXTTXP(i, d)           ((((i)+1) >= (d)) ? 0 : ((i)+1))
+#define NTXPACTIVE(r, w, d)     (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
+#define NTXPAVAIL(r, w, d)      (((d) - NTXPACTIVE((r), (w), (d))) > 1)
+
+/* Function can be used to notify host of FW halt */
+#define READ_AVAIL_SPACE(w, r, d)		\
+			((w >= r) ? (w - r) : (d - r))
+
+#define WRT_PEND(x)	((x)->wr_pending)
+#define DNGL_RING_WPTR(msgbuf)		(*((msgbuf)->tcm_rs_w_ptr))
+#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a)	(DNGL_RING_WPTR(msgbuf) = (a))
+
+#define DNGL_RING_RPTR(msgbuf)		(*((msgbuf)->tcm_rs_r_ptr))
+#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a)	(DNGL_RING_RPTR(msgbuf) = (a))
+
+#define  RING_READ_PTR(x)	((x)->ringstate->r_offset)
+#define  RING_WRITE_PTR(x)	((x)->ringstate->w_offset)
+#define  RING_START_PTR(x)	((x)->ringmem->base_addr.low_addr)
+#define  RING_MAX_ITEM(x)	((x)->ringmem->max_item)
+#define  RING_LEN_ITEMS(x)	((x)->ringmem->len_items)
+#define	 HOST_RING_BASE(x)	((x)->ring_base.va)
+#define	 HOST_RING_END(x)	((uint8 *)HOST_RING_BASE((x)) + \
+			 ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
+
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)		((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d)	(d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d)	\
+	MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d))
+
+#endif	/* _bcmpcie_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 0000000..8ecb7c2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,181 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef	_BCM_PCI_SPI_H
+#define	_BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	uint32 spih_ctrl;		/* 0x00 SPI Control Register */
+	uint32 spih_stat;		/* 0x04 SPI Status Register */
+	uint32 spih_data;		/* 0x08 SPI Data Register, 32-bits wide */
+	uint32 spih_ext;		/* 0x0C SPI Extension Register */
+	uint32 PAD[4];			/* 0x10-0x1F PADDING */
+
+	uint32 spih_gpio_ctrl;		/* 0x20 SPI GPIO Control Register */
+	uint32 spih_gpio_data;		/* 0x24 SPI GPIO Data Register */
+	uint32 PAD[6];			/* 0x28-0x3F PADDING */
+
+	uint32 spih_int_edge;		/* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+	uint32 spih_int_pol;		/* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+							/* 1=Active High) */
+	uint32 spih_int_mask;		/* 0x48 SPI Interrupt Mask */
+	uint32 spih_int_status;		/* 0x4C SPI Interrupt Status */
+	uint32 PAD[4];			/* 0x50-0x5F PADDING */
+
+	uint32 spih_hex_disp;		/* 0x60 SPI 4-digit hex display value */
+	uint32 spih_current_ma;		/* 0x64 SPI SD card current consumption in mA */
+	uint32 PAD[1];			/* 0x68 PADDING */
+	uint32 spih_disp_sel;		/* 0x6c SPI 4-digit hex display mode select (1=current) */
+	uint32 PAD[4];			/* 0x70-0x7F PADDING */
+	uint32 PAD[8];			/* 0x80-0x9F PADDING */
+	uint32 PAD[8];			/* 0xA0-0xBF PADDING */
+	uint32 spih_pll_ctrl;	/* 0xC0 PLL Control Register */
+	uint32 spih_pll_status;	/* 0xC4 PLL Status Register */
+	uint32 spih_xtal_freq;	/* 0xC8 External Clock Frequency in units of 10000Hz */
+	uint32 spih_clk_count;	/* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+	uint32 cfg_space[0x40];		/* 0x000-0x0FF PCI Configuration Space (Read Only) */
+	uint32 P_IMG_CTRL0;		/* 0x100 PCI Image0 Control Register */
+
+	uint32 P_BA0;			/* 0x104 32 R/W PCI Image0 Base Address register */
+	uint32 P_AM0;			/* 0x108 32 R/W PCI Image0 Address Mask register */
+	uint32 P_TA0;			/* 0x10C 32 R/W PCI Image0 Translation Address register */
+	uint32 P_IMG_CTRL1;		/* 0x110 32 R/W PCI Image1 Control register */
+	uint32 P_BA1;			/* 0x114 32 R/W PCI Image1 Base Address register */
+	uint32 P_AM1;			/* 0x118 32 R/W PCI Image1 Address Mask register */
+	uint32 P_TA1;			/* 0x11C 32 R/W PCI Image1 Translation Address register */
+	uint32 P_IMG_CTRL2;		/* 0x120 32 R/W PCI Image2 Control register */
+	uint32 P_BA2;			/* 0x124 32 R/W PCI Image2 Base Address register */
+	uint32 P_AM2;			/* 0x128 32 R/W PCI Image2 Address Mask register */
+	uint32 P_TA2;			/* 0x12C 32 R/W PCI Image2 Translation Address register */
+	uint32 P_IMG_CTRL3;		/* 0x130 32 R/W PCI Image3 Control register */
+	uint32 P_BA3;			/* 0x134 32 R/W PCI Image3 Base Address register */
+	uint32 P_AM3;			/* 0x138 32 R/W PCI Image3 Address Mask register */
+	uint32 P_TA3;			/* 0x13C 32 R/W PCI Image3 Translation Address register */
+	uint32 P_IMG_CTRL4;		/* 0x140 32 R/W PCI Image4 Control register */
+	uint32 P_BA4;			/* 0x144 32 R/W PCI Image4 Base Address register */
+	uint32 P_AM4;			/* 0x148 32 R/W PCI Image4 Address Mask register */
+	uint32 P_TA4;			/* 0x14C 32 R/W PCI Image4 Translation Address register */
+	uint32 P_IMG_CTRL5;		/* 0x150 32 R/W PCI Image5 Control register */
+	uint32 P_BA5;			/* 0x154 32 R/W PCI Image5 Base Address register */
+	uint32 P_AM5;			/* 0x158 32 R/W PCI Image5 Address Mask register */
+	uint32 P_TA5;			/* 0x15C 32 R/W PCI Image5 Translation Address register */
+	uint32 P_ERR_CS;		/* 0x160 32 R/W PCI Error Control and Status register */
+	uint32 P_ERR_ADDR;		/* 0x164 32 R PCI Erroneous Address register */
+	uint32 P_ERR_DATA;		/* 0x168 32 R PCI Erroneous Data register */
+
+	uint32 PAD[5];			/* 0x16C-0x17F PADDING */
+
+	uint32 WB_CONF_SPC_BAR;		/* 0x180 32 R WISHBONE Configuration Space Base Address */
+	uint32 W_IMG_CTRL1;		/* 0x184 32 R/W WISHBONE Image1 Control register */
+	uint32 W_BA1;			/* 0x188 32 R/W WISHBONE Image1 Base Address register */
+	uint32 W_AM1;			/* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+	uint32 W_TA1;			/* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+	uint32 W_IMG_CTRL2;		/* 0x194 32 R/W WISHBONE Image2 Control register */
+	uint32 W_BA2;			/* 0x198 32 R/W WISHBONE Image2 Base Address register */
+	uint32 W_AM2;			/* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+	uint32 W_TA2;			/* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+	uint32 W_IMG_CTRL3;		/* 0x1A4 32 R/W WISHBONE Image3 Control register */
+	uint32 W_BA3;			/* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+	uint32 W_AM3;			/* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+	uint32 W_TA3;			/* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+	uint32 W_IMG_CTRL4;		/* 0x1B4 32 R/W WISHBONE Image4 Control register */
+	uint32 W_BA4;			/* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+	uint32 W_AM4;			/* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+	uint32 W_TA4;			/* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+	uint32 W_IMG_CTRL5;		/* 0x1C4 32 R/W WISHBONE Image5 Control register */
+	uint32 W_BA5;			/* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+	uint32 W_AM5;			/* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+	uint32 W_TA5;			/* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+	uint32 W_ERR_CS;		/* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+	uint32 W_ERR_ADDR;		/* 0x1D8 32 R WISHBONE Erroneous Address register */
+	uint32 W_ERR_DATA;		/* 0x1DC 32 R WISHBONE Erroneous Data register */
+	uint32 CNF_ADDR;		/* 0x1E0 32 R/W Configuration Cycle register */
+	uint32 CNF_DATA;		/* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+	uint32 INT_ACK;			/* 0x1E8 32 R Interrupt Acknowledge register */
+	uint32 ICR;			/* 0x1EC 32 R/W Interrupt Control register */
+	uint32 ISR;			/* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN		(1 << 0)	/* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN	(1 << 1)	/* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN	(1 << 2)	/* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN	(1 << 3)	/* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN	(1 << 4)	/* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET	(1U << 31)	/* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST		(1 << 0)	/* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST	(1 << 1)	/* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST	(1 << 2)	/* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST	(1 << 3)	/* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST	(1 << 4)	/* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR		(1 << 0)	/* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR		(1 << 1)	/* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR		(1 << 2)	/* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS			(1 << 0)	/* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER		(1 << 1)	/* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT	(1 << 2)	/* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK		0x30		/* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT	4		/* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL		(1 << 3)	/* SPI Write FIFO Full */
+#define SPIH_WFEMPTY		(1 << 2)	/* SPI Write FIFO Empty */
+#define SPIH_RFFULL		(1 << 1)	/* SPI Read FIFO Full */
+#define SPIH_RFEMPTY		(1 << 0)	/* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK		(1U << 31)	/* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK		(1 << 1)	/* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED		(1 << 3)	/* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND		0xf4240		/* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 0000000..acebfa3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define	BCMPERF_GETICACHE_MISS(x)	((x) = 0)
+#define	BCMPERF_GETICACHE_HIT(x)	((x) = 0)
+#define	BCMPERF_GETINSTRCOUNT(x)	((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 0000000..a0cd8dc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,143 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h 408158 2013-06-17 22:15:35Z $
+ */
+
+#ifndef	_sdio_api_h_
+#define	_sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS                          (0x00)
+#define SDIOH_API_RC_FAIL	                      (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL   0       /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND   1       /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU  2       /* Cut-through command */
+
+#define SDIOH_DATA_PIO          0       /* PIO mode */
+#define SDIOH_DATA_DMA          1       /* DMA mode */
+
+/* Max number of glommed pkts */
+#ifdef CUSTOM_MAX_TXGLOM_SIZE
+#define SDPCM_MAXGLOM_SIZE  CUSTOM_MAX_TXGLOM_SIZE
+#else
+#define SDPCM_MAXGLOM_SIZE	40
+#endif /* CUSTOM_MAX_TXGLOM_SIZE */
+
+#define SDPCM_TXGLOM_CPY 0			/* SDIO 2.0 should use copy mode */
+#define SDPCM_TXGLOM_MDESC	1		/* SDIO 3.0 should use multi-desc mode */
+
+#ifdef CUSTOM_DEF_TXGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE  CUSTOM_DEF_TXGLOM_SIZE
+#else
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif /* CUSTOM_DEF_TXGLOM_SIZE */
+
+#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE
+#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
+#undef SDPCM_DEFGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+	uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+	uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+	void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                          void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+
+
+#if defined(BCMSDIOH_STD)
+	#define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 0000000..5520aa8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,262 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ *     export functions to client drivers
+ *     abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h 450676 2014-01-22 22:45:13Z $
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef	_bcmsdh_h_
+#define	_bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL	0x0001 /* Error */
+#define BCMSDH_INFO_VAL		0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \
+	defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+	bool	init_success;	/* underlying driver successfully attached */
+	void	*sdioh;		/* handler for sdioh */
+	uint32  vendevid;	/* Target Vendor and Device ID on SD bus */
+	osl_t   *osh;
+	bool	regfail;	/* Save status of last reg_read/reg_write call */
+	uint32	sbwad;		/* Save backplane window address */
+	void	*os_cxt;        /* Pointer to per-OS private data */
+#ifdef DHD_WAKE_STATUS
+	unsigned int	total_wake_count;
+	int	pkt_wake;
+	int	wake_irq;
+#endif
+};
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+#ifdef DHD_WAKE_STATUS
+int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh);
+int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ *   fn:   function number
+ *   addr: unmodified SDIO-space address
+ *   data: data byte to write
+ *   err:  pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ *   fn:     function whose CIS is being requested (0 is common CIS)
+ *   cis:    pointer to memory location to place results
+ *   length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ *   addr: backplane address (i.e. >= regsva from attach)
+ *   size: register width in bytes (2 or 4)
+ *   data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   addr:     backplane address (i.e. >= regsva from attach)
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len);
+extern void bcmsdh_glom_clear(void *sdh);
+extern uint bcmsdh_set_mode(void *sdh, uint mode);
+extern bool bcmsdh_glom_enabled(void);
+/* Flags bits */
+#define SDIO_REQ_4BYTE	0x1	/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED	0x2	/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC	0x4	/* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE	0x8	/* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING	1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+                           void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+/* callback functions */
+typedef struct {
+	/* probe the device */
+	void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+	                uint16 func, uint bustype, void * regsva, osl_t * osh,
+	                void * param);
+	/* remove the device */
+	void (*remove)(void *context);
+	/* can we suspend now */
+	int (*suspend)(void *context);
+	/* resume from suspend */
+	int (*resume)(void *context);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context);
+extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
+extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
+#endif 
+extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
+extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
+extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh);
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh);
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif	/* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 0000000..af265df
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,117 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h 408158 2013-06-17 22:15:35Z $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4		2
+#define CLIENT_INTR			0x100	/* Get rid of this! */
+#define SDIOH_SDMMC_MAX_SG_ENTRIES	(SDPCM_MAXGLOM_SIZE+2)
+
+struct sdioh_info {
+	osl_t		*osh;			/* osh handler */
+	void		*bcmsdh;		/* upper layer handle */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	uint16		intmask;		/* Current active interrupts */
+
+	int		intrcount;		/* Client interrupts */
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	bool		use_rxchain;
+	struct scatterlist	sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+	struct sdio_func	fake_func0;
+	struct sdio_func	*func[SDIOD_MAX_IOFUNCS];
+
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 0000000..e80cdc2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,278 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h 472405 2014-04-23 23:46:55Z $
+ */
+
+#ifndef	_bcmsdpcm_h_
+#define	_bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK	I_SMB_SW0	/* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK	I_SMB_SW1	/* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB	I_SMB_SW2	/* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT	I_SMB_SW3	/* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL      (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK	0x0000000f	/* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK	0x00ff0000	/* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT	16		/* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL    (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON	(1 << 0)	/* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE	(1 << 1)	/* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND	(1 << 2)	/* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT	(1 << 3)	/* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK	0x0000000f	/* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	0x01	/* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY	0x02	/* we're ready to to talk to host after enable */
+#define HMB_DATA_FC		0x04	/* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY	0x08	/* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT		0x10	/* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK	0xff000000	/* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT	24		/* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000	/* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT	16		/* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK		0x000000ff	/* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK		0x00000f00	/* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT		8		/* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK		0x0000f000	/* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT		12		/* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK		0x00ff0000	/* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT		16		/* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET		2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET		3		/* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) 		(((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+
+#define SDPCM_FCMASK_OFFSET		4		/* Flow control */
+#define SDPCM_FCMASK_VALUE(p)		(((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET		5		/* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p)		(((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET		6		/* Version # */
+#define SDPCM_VERSION_VALUE(p)		(((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET		7		/* Spare */
+#define SDPCM_UNUSED_VALUE(p)		(((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN	8	/* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL	0	/* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL	1	/* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL	2	/* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL	3	/* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL	15	/* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL	15
+
+#define SDPCM_SEQUENCE_WRAP	256	/* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0	0x01
+#define SDPCM_FLAG_RESVD1	0x02
+#define SDPCM_FLAG_GSPI_TXENAB	0x04
+#define SDPCM_FLAG_GLOMDESC	0x08	/* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG	(SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p)	(((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN		4	/* Generally: Cmd(1), Ext(1), Len(2);
+						 * Semantics of Ext byte depend on command.
+						 * Len is current or requested frame length, not
+						 * including test header; sent little-endian.
+						 */
+#define SDPCM_TEST_PKT_CNT_FLD_LEN	4	/* Packet count filed legth */
+#define SDPCM_TEST_DISCARD		0x01	/* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ		0x02	/* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP		0x03	/* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST		0x04	/* Receiver to send a burst. Ext is a frame count
+						 * (Backward compatabilty) Set frame count in a
+						 * 4 byte filed adjacent to the HDR
+						 */
+#define SDPCM_TEST_SEND			0x05	/* Receiver sets send mode. Ext is boolean on/off
+						 * Set frame count in a 4 byte filed adjacent to
+						 * the HDR
+						 */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id)	((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+	uint32 cmd52rd;		/* Cmd52RdCount, SDIO: cmd52 reads */
+	uint32 cmd52wr;		/* Cmd52WrCount, SDIO: cmd52 writes */
+	uint32 cmd53rd;		/* Cmd53RdCount, SDIO: cmd53 reads */
+	uint32 cmd53wr;		/* Cmd53WrCount, SDIO: cmd53 writes */
+	uint32 abort;		/* AbortCount, SDIO: aborts */
+	uint32 datacrcerror;	/* DataCrcErrorCount, SDIO: frames w/CRC error */
+	uint32 rdoutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+	uint32 wroutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+	uint32 writebusy;	/* WriteBusyCount, SDIO: device asserted "busy" */
+	uint32 readwait;	/* ReadWaitCount, SDIO: no data ready for a read cmd */
+	uint32 readterm;	/* ReadTermCount, SDIO: read frame termination cmds */
+	uint32 writeterm;	/* WriteTermCount, SDIO: write frames termination cmds */
+	uint32 rxdescuflo;	/* receive descriptor underflows */
+	uint32 rxfifooflo;	/* receive fifo overflows */
+	uint32 txfifouflo;	/* transmit fifo underflows */
+	uint32 runt;		/* runt (too short) frames recv'd from bus */
+	uint32 badlen;		/* frame's rxh len does not match its hw tag len */
+	uint32 badcksum;	/* frame's hw tag chksum doesn't agree with len value */
+	uint32 seqbreak;	/* break in sequence # space from one rx frame to the next */
+	uint32 rxfcrc;		/* frame rx header indicates crc error */
+	uint32 rxfwoos;		/* frame rx header indicates write out of sync */
+	uint32 rxfwft;		/* frame rx header indicates write frame termination */
+	uint32 rxfabort;	/* frame rx header indicates frame aborted */
+	uint32 woosint;		/* write out of sync interrupt */
+	uint32 roosint;		/* read out of sync interrupt */
+	uint32 rftermint;	/* read frame terminate interrupt */
+	uint32 wftermint;	/* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val)	((var) == (val))
+#define SDIODREV_GE(var, val)	((var) >= (val))
+#define SDIODREV_GT(var, val)	((var) > (val))
+#define SDIODREV_LT(var, val)	((var) < (val))
+#define SDIODREV_LE(var, val)	((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+	(SDIODREV_LT((h)->corerev, 1) ? \
+	 SDIODDMAREG32((h), (dir), (chnl)) : \
+	 SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODDMAREG(h, dir, chnl) : \
+	 PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+	(SDIODREV_LT((corerev), 1) ? \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+	((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODFIFOREG(h, corerev) : \
+	 PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION       0x0001
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+#define SDPCM_SHARED_IN_BRPT       0x0800
+#define SDPCM_SHARED_SET_BRPT      0x1000
+#define SDPCM_SHARED_PENDING_BRPT  0x2000
+
+typedef struct {
+	uint32	flags;
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hnd_cons_t */
+	uint32  msgtrace_addr;
+	uint32  fwid;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+#endif	/* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 0000000..b5a0caf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,135 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $
+ */
+#ifndef	_BCM_SD_SPI_H
+#define	_BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint		bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of sdspi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	bool		got_hcint;		/* Host Controller interrupt. */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current register transfer size */
+	uint32		cmd53_wr_data;		/* Used to pass CMD53 write data */
+	uint32		card_response;		/* Used to pass back response status byte */
+	uint32		card_rsp_data;		/* Used to pass back response data word */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 0000000..4607879
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,282 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h 455390 2014-02-13 22:14:56Z $
+ */
+#ifndef	_BCM_SD_STD_H
+#define	_BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+#define SDIOH_MODE_SD1		1
+#define SDIOH_MODE_SD4		2
+
+#define MAX_SLOTS 6 	/* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ	0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK	1
+#define SDIOH_TYPE_BCM27XX	2
+#define SDIOH_TYPE_TI_PCIXX21	4	/* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822	5	/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON	6	/* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS   0x00001E00
+
+#define RETRIES_LARGE 100000
+#define sdstd_os_yield(sd)	do {} while (0)
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+#define USE_FIFO		0x8	/* Fifo vs non-fifo */
+
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+#define HC_INTR_RETUNING	0x1000
+
+
+#ifdef BCMSDIOH_TXGLOM
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE	(40+1)
+
+typedef struct glom_buf {
+	uint32 count;				/* Total number of pkts queued */
+	void *dma_buf_arr[SDIOH_MAXGLOM_SIZE];	/* Frame address */
+	ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
+	uint16 nbytes[SDIOH_MAXGLOM_SIZE];	/* Size of each frame */
+} glom_buf_t;
+#endif
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint32 curr_caps;                    	/* max current capabilities reg */
+
+	osl_t 		*osh;			/* osh handler */
+	volatile char 	*mem_space;		/* pci device memory va */
+	uint		lockcount; 		/* nest count of sdstd_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint		target_dev;		/* Target device ID */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+	void		*bcmsdh;		/* handler to upper layer stack (bcmsdh) */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint		irq;			/* Client irq */
+	int		intrcount;		/* Client interrupts */
+	int		local_intrcount;	/* Controller interrupts */
+	bool		host_init_done;		/* Controller initted */
+	bool		card_init_done;		/* Client SDIO interface initted */
+	bool		polled_mode;		/* polling for command completion */
+
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32		data_xfer_count;	/* Current transfer */
+	uint16		card_rca;		/* Current Address */
+	int8		sd_dma_mode;		/* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;		/* DMA Buffer virtual address */
+	ulong		dma_phys;		/* DMA Buffer physical address */
+	void		*adma2_dscr_buf;	/* ADMA2 Descriptor Buffer virtual address */
+	ulong		adma2_dscr_phys;	/* ADMA2 Descriptor Buffer physical address */
+
+	/* adjustments needed to make the dma align properly */
+	void		*dma_start_buf;
+	ulong		dma_start_phys;
+	uint		alloced_dma_size;
+	void		*adma2_dscr_start_buf;
+	ulong		adma2_dscr_start_phys;
+	uint		alloced_adma2_dscr_size;
+
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	bool		got_hcint;		/* local interrupt flag */
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int 	host_UHSISupported;		/* whether UHSI is supported for HC. */
+	int 	card_UHSI_voltage_Supported; 	/* whether UHSI is supported for
+						 * Card in terms of Voltage [1.8 or 3.3].
+						 */
+	int	global_UHSI_Supp;	/* type of UHSI support in both host and card.
+					 * HOST_SDR_UNSUPP: capabilities not supported/matched
+					 * HOST_SDR_12_25: SDR12 and SDR25 supported
+					 * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+					 */
+	volatile int	sd3_dat_state; 		/* data transfer state used for retuning check */
+	volatile int	sd3_tun_state; 		/* tuning state used for retuning check */
+	bool	sd3_tuning_reqd; 	/* tuning requirement parameter */
+	uint32	caps3;			/* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+#ifdef BCMSDIOH_TXGLOM
+	glom_buf_t glom_info;		/* pkt information used for glomming */
+	uint	txglom_mode;		/* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#endif
+};
+
+#define DMA_MODE_NONE	0
+#define DMA_MODE_SDMA	1
+#define DMA_MODE_ADMA1	2
+#define DMA_MODE_ADMA2	3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO	-1
+
+#define USE_DMA(sd)		((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 			0
+#define TUNING_START 			1
+#define TUNING_START_AFTER_DAT 	2
+#define TUNING_ONGOING 			3
+
+#define DATA_TRANSFER_IDLE 		0
+#define DATA_TRANSFER_ONGOING	1
+
+#define CHECK_TUNING_PRE_DATA	1
+#define CHECK_TUNING_POST_DATA	2
+
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING  0x00
+#endif
+
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 0000000..cf814ce
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,40 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef	_BCM_SPI_H
+#define	_BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
new file mode 100644
index 0000000..d055ff1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
@@ -0,0 +1,162 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspibrcm.h 373331 2012-12-07 04:46:22Z $
+ */
+#ifndef	_BCM_SPI_BRCM_H
+#define	_BCM_SPI_BRCM_H
+
+#ifndef SPI_MAX_IOFUNCS
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS		4
+#endif
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#if defined(DHD_DEBUG)
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)	do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x)	do { if (sd_msglevel & SDH_INFO_VAL)  printf x; } while (0)
+#define sd_debug(x)	do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x)	do { if (sd_msglevel & SDH_DATA_VAL)  printf x; } while (0)
+#define sd_ctrl(x)	do { if (sd_msglevel & SDH_CTRL_VAL)  printf x; } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_F1		64
+#define BLOCK_SIZE_F2 		2048
+#define BLOCK_SIZE_F3 		2048
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+#define ERROR_UF	2
+#define ERROR_OF	3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint		cfg_bar;		/* pci cfg address for bar */
+	uint32		caps;			/* cached value of capabilities reg */
+	void		*bar0;			/* BAR0 for PCI Device */
+	osl_t		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+	uint		lockcount;		/* nest count of spi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint		irq;			/* Client irq */
+	uint32		intrcount;		/* Client interrupts */
+	uint32		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SPI_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current transfer */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		card_dstatus;		/* 32bit device status */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SPI_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	uint32		wordlen;			/* host processor 16/32bits */
+	uint32		prev_fun;
+	uint32		chip;
+	uint32		chiprev;
+	bool		resp_delay_all;
+	bool		dwordmode;
+	bool		resp_delay_new;
+
+	struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M			BITFIELD_MASK(1)	/* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S			31
+#define SPI_ACCESS_M			BITFIELD_MASK(1)	/* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S			30
+#define SPI_FUNCTION_M			BITFIELD_MASK(2)	/* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S			28
+#define SPI_REG_ADDR_M			BITFIELD_MASK(17)	/* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S			11
+#define SPI_LEN_M			BITFIELD_MASK(11)	/* Bit [10:0] - Packet length */
+#define SPI_LEN_S			0
+
+#endif /* _BCM_SPI_BRCM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
new file mode 100644
index 0000000..82eba65
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
@@ -0,0 +1,633 @@
+/*
+ * SROM format definition.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsrom_fmt.h 473704 2014-04-29 15:49:57Z $
+ */
+
+#ifndef	_bcmsrom_fmt_h_
+#define	_bcmsrom_fmt_h_
+
+#define SROM_MAXREV		11	/* max revisiton supported by driver */
+
+/* Maximum srom: 12 Kilobits == 1536 bytes */
+#define	SROM_MAX		1536
+#define SROM_MAXW		384
+#define VARS_MAX		4096
+
+/* PCI fields */
+#define PCI_F0DEVID		48
+
+
+#define	SROM_WORDS		64
+
+#define SROM3_SWRGN_OFF		28	/* s/w region offset in words */
+
+#define	SROM_SSID		2
+#define	SROM_SVID		3
+
+#define	SROM_WL1LHMAXP		29
+
+#define	SROM_WL1LPAB0		30
+#define	SROM_WL1LPAB1		31
+#define	SROM_WL1LPAB2		32
+
+#define	SROM_WL1HPAB0		33
+#define	SROM_WL1HPAB1		34
+#define	SROM_WL1HPAB2		35
+
+#define	SROM_MACHI_IL0		36
+#define	SROM_MACMID_IL0		37
+#define	SROM_MACLO_IL0		38
+#define	SROM_MACHI_ET0		39
+#define	SROM_MACMID_ET0		40
+#define	SROM_MACLO_ET0		41
+#define	SROM_MACHI_ET1		42
+#define	SROM_MACMID_ET1		43
+#define	SROM_MACLO_ET1		44
+#define	SROM3_MACHI		37
+#define	SROM3_MACMID		38
+#define	SROM3_MACLO		39
+
+#define	SROM_BXARSSI2G		40
+#define	SROM_BXARSSI5G		41
+
+#define	SROM_TRI52G		42
+#define	SROM_TRI5GHL		43
+
+#define	SROM_RXPO52G		45
+
+#define	SROM2_ENETPHY		45
+
+#define	SROM_AABREV		46
+/* Fields in AABREV */
+#define	SROM_BR_MASK		0x00ff
+#define	SROM_CC_MASK		0x0f00
+#define	SROM_CC_SHIFT		8
+#define	SROM_AA0_MASK		0x3000
+#define	SROM_AA0_SHIFT		12
+#define	SROM_AA1_MASK		0xc000
+#define	SROM_AA1_SHIFT		14
+
+#define	SROM_WL0PAB0		47
+#define	SROM_WL0PAB1		48
+#define	SROM_WL0PAB2		49
+
+#define	SROM_LEDBH10		50
+#define	SROM_LEDBH32		51
+
+#define	SROM_WL10MAXP		52
+
+#define	SROM_WL1PAB0		53
+#define	SROM_WL1PAB1		54
+#define	SROM_WL1PAB2		55
+
+#define	SROM_ITT		56
+
+#define	SROM_BFL		57
+#define	SROM_BFL2		28
+#define	SROM3_BFL2		61
+
+#define	SROM_AG10		58
+
+#define	SROM_CCODE		59
+
+#define	SROM_OPO		60
+
+#define	SROM3_LEDDC		62
+
+#define	SROM_CRCREV		63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accomodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of useable srom i.e. the useable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define	SROM4_WORDS		220
+
+#define	SROM4_SIGN		32
+#define	SROM4_SIGNATURE		0x5372
+
+#define	SROM4_BREV		33
+
+#define	SROM4_BFL0		34
+#define	SROM4_BFL1		35
+#define	SROM4_BFL2		36
+#define	SROM4_BFL3		37
+#define	SROM5_BFL0		37
+#define	SROM5_BFL1		38
+#define	SROM5_BFL2		39
+#define	SROM5_BFL3		40
+
+#define	SROM4_MACHI		38
+#define	SROM4_MACMID		39
+#define	SROM4_MACLO		40
+#define	SROM5_MACHI		41
+#define	SROM5_MACMID		42
+#define	SROM5_MACLO		43
+
+#define	SROM4_CCODE		41
+#define	SROM4_REGREV		42
+#define	SROM5_CCODE		34
+#define	SROM5_REGREV		35
+
+#define	SROM4_LEDBH10		43
+#define	SROM4_LEDBH32		44
+#define	SROM5_LEDBH10		59
+#define	SROM5_LEDBH32		60
+
+#define	SROM4_LEDDC		45
+#define	SROM5_LEDDC		45
+
+#define	SROM4_AA		46
+#define	SROM4_AA2G_MASK		0x00ff
+#define	SROM4_AA2G_SHIFT	0
+#define	SROM4_AA5G_MASK		0xff00
+#define	SROM4_AA5G_SHIFT	8
+
+#define	SROM4_AG10		47
+#define	SROM4_AG32		48
+
+#define	SROM4_TXPID2G		49
+#define	SROM4_TXPID5G		51
+#define	SROM4_TXPID5GL		53
+#define	SROM4_TXPID5GH		55
+
+#define SROM4_TXRXC		61
+#define SROM4_TXCHAIN_MASK	0x000f
+#define SROM4_TXCHAIN_SHIFT	0
+#define SROM4_RXCHAIN_MASK	0x00f0
+#define SROM4_RXCHAIN_SHIFT	4
+#define SROM4_SWITCH_MASK	0xff00
+#define SROM4_SWITCH_SHIFT	8
+
+
+/* Per-path fields */
+#define	MAX_PATH_SROM		4
+#define	SROM4_PATH0		64
+#define	SROM4_PATH1		87
+#define	SROM4_PATH2		110
+#define	SROM4_PATH3		133
+
+#define	SROM4_2G_ITT_MAXP	0
+#define	SROM4_2G_PA		1
+#define	SROM4_5G_ITT_MAXP	5
+#define	SROM4_5GLH_MAXP		6
+#define	SROM4_5G_PA		7
+#define	SROM4_5GL_PA		11
+#define	SROM4_5GH_PA		15
+
+/* Fields in the ITT_MAXP and 5GLH_MAXP words */
+#define	B2G_MAXP_MASK		0xff
+#define	B2G_ITT_SHIFT		8
+#define	B5G_MAXP_MASK		0xff
+#define	B5G_ITT_SHIFT		8
+#define	B5GH_MAXP_MASK		0xff
+#define	B5GL_MAXP_SHIFT		8
+
+/* All the miriad power offsets */
+#define	SROM4_2G_CCKPO		156
+#define	SROM4_2G_OFDMPO		157
+#define	SROM4_5G_OFDMPO		159
+#define	SROM4_5GL_OFDMPO	161
+#define	SROM4_5GH_OFDMPO	163
+#define	SROM4_2G_MCSPO		165
+#define	SROM4_5G_MCSPO		173
+#define	SROM4_5GL_MCSPO		181
+#define	SROM4_5GH_MCSPO		189
+#define	SROM4_CDDPO		197
+#define	SROM4_STBCPO		198
+#define	SROM4_BW40PO		199
+#define	SROM4_BWDUPPO		200
+
+#define	SROM4_CRCREV		219
+
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+
+#define	SROM8_SIGN		64
+
+#define	SROM8_BREV		65
+
+#define	SROM8_BFL0		66
+#define	SROM8_BFL1		67
+#define	SROM8_BFL2		68
+#define	SROM8_BFL3		69
+
+#define	SROM8_MACHI		70
+#define	SROM8_MACMID		71
+#define	SROM8_MACLO		72
+
+#define	SROM8_CCODE		73
+#define	SROM8_REGREV		74
+
+#define	SROM8_LEDBH10		75
+#define	SROM8_LEDBH32		76
+
+#define	SROM8_LEDDC		77
+
+#define	SROM8_AA		78
+
+#define	SROM8_AG10		79
+#define	SROM8_AG32		80
+
+#define	SROM8_TXRXC		81
+
+#define	SROM8_BXARSSI2G		82
+#define	SROM8_BXARSSI5G		83
+#define	SROM8_TRI52G		84
+#define	SROM8_TRI5GHL		85
+#define	SROM8_RXPO52G		86
+
+#define SROM8_FEM2G		87
+#define SROM8_FEM5G		88
+#define SROM8_FEM_ANTSWLUT_MASK		0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT	11
+#define SROM8_FEM_TR_ISO_MASK		0x0700
+#define SROM8_FEM_TR_ISO_SHIFT		8
+#define SROM8_FEM_PDET_RANGE_MASK	0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT	3
+#define SROM8_FEM_EXTPA_GAIN_MASK	0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT	1
+#define SROM8_FEM_TSSIPOS_MASK		0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT		0
+
+#define SROM8_THERMAL		89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS		90
+#define SROM8_TS_SLP_OPT_CORRX	91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP	92
+
+#define SROM8_EXTLNAGAIN        93
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA	94
+
+/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
+#define SROM8_MPWR_1_AND_2	95
+
+
+/* Per-path offsets & fields */
+#define	SROM8_PATH0		96
+#define	SROM8_PATH1		112
+#define	SROM8_PATH2		128
+#define	SROM8_PATH3		144
+
+#define	SROM8_2G_ITT_MAXP	0
+#define	SROM8_2G_PA		1
+#define	SROM8_5G_ITT_MAXP	4
+#define	SROM8_5GLH_MAXP		5
+#define	SROM8_5G_PA		6
+#define	SROM8_5GL_PA		9
+#define	SROM8_5GH_PA		12
+
+/* All the miriad power offsets */
+#define	SROM8_2G_CCKPO		160
+
+#define	SROM8_2G_OFDMPO		161
+#define	SROM8_5G_OFDMPO		163
+#define	SROM8_5GL_OFDMPO	165
+#define	SROM8_5GH_OFDMPO	167
+
+#define	SROM8_2G_MCSPO		169
+#define	SROM8_5G_MCSPO		177
+#define	SROM8_5GL_MCSPO		185
+#define	SROM8_5GH_MCSPO		193
+
+#define	SROM8_CDDPO		201
+#define	SROM8_STBCPO		202
+#define	SROM8_BW40PO		203
+#define	SROM8_BWDUPPO		204
+
+/* SISO PA parameters are in the path0 spaces */
+#define	SROM8_SISO		96
+
+/* Legacy names for SISO PA paramters */
+#define	SROM8_W0_ITTMAXP	(SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define	SROM8_W0_PAB0		(SROM8_SISO + SROM8_2G_PA)
+#define	SROM8_W0_PAB1		(SROM8_SISO + SROM8_2G_PA + 1)
+#define	SROM8_W0_PAB2		(SROM8_SISO + SROM8_2G_PA + 2)
+#define	SROM8_W1_ITTMAXP	(SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define	SROM8_W1_MAXP_LCHC	(SROM8_SISO + SROM8_5GLH_MAXP)
+#define	SROM8_W1_PAB0		(SROM8_SISO + SROM8_5G_PA)
+#define	SROM8_W1_PAB1		(SROM8_SISO + SROM8_5G_PA + 1)
+#define	SROM8_W1_PAB2		(SROM8_SISO + SROM8_5G_PA + 2)
+#define	SROM8_W1_PAB0_LC	(SROM8_SISO + SROM8_5GL_PA)
+#define	SROM8_W1_PAB1_LC	(SROM8_SISO + SROM8_5GL_PA + 1)
+#define	SROM8_W1_PAB2_LC	(SROM8_SISO + SROM8_5GL_PA + 2)
+#define	SROM8_W1_PAB0_HC	(SROM8_SISO + SROM8_5GH_PA)
+#define	SROM8_W1_PAB1_HC	(SROM8_SISO + SROM8_5GH_PA + 1)
+#define	SROM8_W1_PAB2_HC	(SROM8_SISO + SROM8_5GH_PA + 2)
+
+#define	SROM8_CRCREV		219
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20	160
+#define SROM9_2GPO_CCKBW20UL	161
+#define SROM9_2GPO_LOFDMBW20	162
+#define SROM9_2GPO_LOFDMBW20UL	164
+
+#define SROM9_5GLPO_LOFDMBW20	166
+#define SROM9_5GLPO_LOFDMBW20UL	168
+#define SROM9_5GMPO_LOFDMBW20	170
+#define SROM9_5GMPO_LOFDMBW20UL	172
+#define SROM9_5GHPO_LOFDMBW20	174
+#define SROM9_5GHPO_LOFDMBW20UL	176
+
+#define SROM9_2GPO_MCSBW20	178
+#define SROM9_2GPO_MCSBW20UL	180
+#define SROM9_2GPO_MCSBW40	182
+
+#define SROM9_5GLPO_MCSBW20	184
+#define SROM9_5GLPO_MCSBW20UL	186
+#define SROM9_5GLPO_MCSBW40	188
+#define SROM9_5GMPO_MCSBW20	190
+#define SROM9_5GMPO_MCSBW20UL	192
+#define SROM9_5GMPO_MCSBW40	194
+#define SROM9_5GHPO_MCSBW20	196
+#define SROM9_5GHPO_MCSBW20UL	198
+#define SROM9_5GHPO_MCSBW40	200
+
+#define SROM9_PO_MCS32		202
+#define SROM9_PO_LOFDM40DUP	203
+#define SROM8_RXGAINERR_2G	205
+#define SROM8_RXGAINERR_5GL	206
+#define SROM8_RXGAINERR_5GM	207
+#define SROM8_RXGAINERR_5GH	208
+#define SROM8_RXGAINERR_5GU	209
+#define SROM8_SUBBAND_PPR	210
+#define SROM8_PCIEINGRESS_WAR	211
+#define SROM9_SAR		212
+
+#define SROM8_NOISELVL_2G	213
+#define SROM8_NOISELVL_5GL	214
+#define SROM8_NOISELVL_5GM	215
+#define SROM8_NOISELVL_5GH	216
+#define SROM8_NOISELVL_5GU	217
+#define SROM8_NOISECALOFFSET	218
+
+#define SROM9_REV_CRC		219
+
+#define SROM10_CCKPWROFFSET	218
+#define SROM10_SIGN		219
+#define SROM10_SWCTRLMAP_2G	220
+#define SROM10_CRCREV		229
+
+#define	SROM10_WORDS		230
+#define	SROM10_SIGNATURE	SROM4_SIGNATURE
+
+
+/* SROM REV 11 */
+#define SROM11_BREV			65
+
+#define SROM11_BFL0			66
+#define SROM11_BFL1			67
+#define SROM11_BFL2			68
+#define SROM11_BFL3			69
+#define SROM11_BFL4			70
+#define SROM11_BFL5			71
+
+#define SROM11_MACHI			72
+#define SROM11_MACMID			73
+#define SROM11_MACLO			74
+
+#define SROM11_CCODE			75
+#define SROM11_REGREV			76
+
+#define SROM11_LEDBH10			77
+#define SROM11_LEDBH32			78
+
+#define SROM11_LEDDC			79
+
+#define SROM11_AA			80
+
+#define SROM11_AGBG10			81
+#define SROM11_AGBG2A0			82
+#define SROM11_AGA21			83
+
+#define SROM11_TXRXC			84
+
+#define SROM11_FEM_CFG1			85
+#define SROM11_FEM_CFG2			86
+
+/* Masks and offsets for FEM_CFG */
+#define SROM11_FEMCTRL_MASK		0xf800
+#define SROM11_FEMCTRL_SHIFT		11
+#define SROM11_PAPDCAP_MASK		0x0400
+#define SROM11_PAPDCAP_SHIFT		10
+#define SROM11_TWORANGETSSI_MASK	0x0200
+#define SROM11_TWORANGETSSI_SHIFT	9
+#define SROM11_PDGAIN_MASK		0x01f0
+#define SROM11_PDGAIN_SHIFT		4
+#define SROM11_EPAGAIN_MASK		0x000e
+#define SROM11_EPAGAIN_SHIFT		1
+#define SROM11_TSSIPOSSLOPE_MASK	0x0001
+#define SROM11_TSSIPOSSLOPE_SHIFT	0
+#define SROM11_GAINCTRLSPH_MASK		0xf800
+#define SROM11_GAINCTRLSPH_SHIFT	11
+
+#define SROM11_THERMAL			87
+#define SROM11_MPWR_RAWTS		88
+#define SROM11_TS_SLP_OPT_CORRX		89
+#define SROM11_XTAL_FREQ		90
+#define SROM11_5GB0_4080_W0_A1          91
+#define SROM11_PHYCAL_TEMPDELTA  	92
+#define SROM11_MPWR_1_AND_2 		93
+#define SROM11_5GB0_4080_W1_A1          94
+#define SROM11_TSSIFLOOR_2G 		95
+#define SROM11_TSSIFLOOR_5GL 		96
+#define SROM11_TSSIFLOOR_5GM 		97
+#define SROM11_TSSIFLOOR_5GH 		98
+#define SROM11_TSSIFLOOR_5GU 		99
+
+/* Masks and offsets for Terrmal parameters */
+#define SROM11_TEMPS_PERIOD_MASK	0xf0
+#define SROM11_TEMPS_PERIOD_SHIFT	4
+#define SROM11_TEMPS_HYSTERESIS_MASK	0x0f
+#define SROM11_TEMPS_HYSTERESIS_SHIFT	0
+#define SROM11_TEMPCORRX_MASK		0xfc
+#define SROM11_TEMPCORRX_SHIFT		2
+#define SROM11_TEMPSENSE_OPTION_MASK	0x3
+#define SROM11_TEMPSENSE_OPTION_SHIFT	0
+
+#define SROM11_PDOFF_2G_40M_A0_MASK     0x000f
+#define SROM11_PDOFF_2G_40M_A0_SHIFT    0
+#define SROM11_PDOFF_2G_40M_A1_MASK     0x00f0
+#define SROM11_PDOFF_2G_40M_A1_SHIFT    4
+#define SROM11_PDOFF_2G_40M_A2_MASK     0x0f00
+#define SROM11_PDOFF_2G_40M_A2_SHIFT    8
+#define SROM11_PDOFF_2G_40M_VALID_MASK  0x8000
+#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15
+
+#define SROM11_PDOFF_2G_40M     	100
+#define SROM11_PDOFF_40M_A0		101
+#define SROM11_PDOFF_40M_A1		102
+#define SROM11_PDOFF_40M_A2		103
+#define SROM11_5GB0_4080_W2_A1          103
+#define SROM11_PDOFF_80M_A0		104
+#define SROM11_PDOFF_80M_A1		105
+#define SROM11_PDOFF_80M_A2		106
+#define SROM11_5GB1_4080_W0_A1          106
+
+#define SROM11_SUBBAND5GVER 		107
+
+/* Per-path fields and offset */
+#define	MAX_PATH_SROM_11		3
+#define SROM11_PATH0			108
+#define SROM11_PATH1			128
+#define SROM11_PATH2			148
+
+#define	SROM11_2G_MAXP			0
+#define SROM11_5GB1_4080_PA             0
+#define	SROM11_2G_PA			1
+#define SROM11_5GB2_4080_PA             2
+#define	SROM11_RXGAINS1			4
+#define	SROM11_RXGAINS			5
+#define SROM11_5GB3_4080_PA             5
+#define	SROM11_5GB1B0_MAXP		6
+#define	SROM11_5GB3B2_MAXP		7
+#define	SROM11_5GB0_PA			8
+#define	SROM11_5GB1_PA			11
+#define	SROM11_5GB2_PA			14
+#define	SROM11_5GB3_PA			17
+
+/* Masks and offsets for rxgains */
+#define SROM11_RXGAINS5GTRELNABYPA_MASK		0x8000
+#define SROM11_RXGAINS5GTRELNABYPA_SHIFT	15
+#define SROM11_RXGAINS5GTRISOA_MASK		0x7800
+#define SROM11_RXGAINS5GTRISOA_SHIFT		11
+#define SROM11_RXGAINS5GELNAGAINA_MASK		0x0700
+#define SROM11_RXGAINS5GELNAGAINA_SHIFT		8
+#define SROM11_RXGAINS2GTRELNABYPA_MASK		0x0080
+#define SROM11_RXGAINS2GTRELNABYPA_SHIFT	7
+#define SROM11_RXGAINS2GTRISOA_MASK		0x0078
+#define SROM11_RXGAINS2GTRISOA_SHIFT		3
+#define SROM11_RXGAINS2GELNAGAINA_MASK		0x0007
+#define SROM11_RXGAINS2GELNAGAINA_SHIFT		0
+#define SROM11_RXGAINS5GHTRELNABYPA_MASK	0x8000
+#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT	15
+#define SROM11_RXGAINS5GHTRISOA_MASK		0x7800
+#define SROM11_RXGAINS5GHTRISOA_SHIFT		11
+#define SROM11_RXGAINS5GHELNAGAINA_MASK		0x0700
+#define SROM11_RXGAINS5GHELNAGAINA_SHIFT	8
+#define SROM11_RXGAINS5GMTRELNABYPA_MASK	0x0080
+#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT	7
+#define SROM11_RXGAINS5GMTRISOA_MASK		0x0078
+#define SROM11_RXGAINS5GMTRISOA_SHIFT		3
+#define SROM11_RXGAINS5GMELNAGAINA_MASK		0x0007
+#define SROM11_RXGAINS5GMELNAGAINA_SHIFT	0
+
+/* Power per rate */
+#define SROM11_CCKBW202GPO		168
+#define SROM11_CCKBW20UL2GPO		169
+#define SROM11_MCSBW202GPO		170
+#define SROM11_MCSBW202GPO_1		171
+#define SROM11_MCSBW402GPO		172
+#define SROM11_MCSBW402GPO_1		173
+#define SROM11_DOT11AGOFDMHRBW202GPO	174
+#define SROM11_OFDMLRBW202GPO		175
+
+#define SROM11_MCSBW205GLPO 		176
+#define SROM11_MCSBW205GLPO_1		177
+#define SROM11_MCSBW405GLPO 		178
+#define SROM11_MCSBW405GLPO_1		179
+#define SROM11_MCSBW805GLPO 		180
+#define SROM11_MCSBW805GLPO_1		181
+#define SROM11_RPCAL_2G			182
+#define SROM11_RPCAL_5GL		183
+#define SROM11_MCSBW205GMPO 		184
+#define SROM11_MCSBW205GMPO_1		185
+#define SROM11_MCSBW405GMPO 		186
+#define SROM11_MCSBW405GMPO_1		187
+#define SROM11_MCSBW805GMPO 		188
+#define SROM11_MCSBW805GMPO_1		189
+#define SROM11_RPCAL_5GM		190
+#define SROM11_RPCAL_5GH		191
+#define SROM11_MCSBW205GHPO 		192
+#define SROM11_MCSBW205GHPO_1		193
+#define SROM11_MCSBW405GHPO 		194
+#define SROM11_MCSBW405GHPO_1		195
+#define SROM11_MCSBW805GHPO 		196
+#define SROM11_MCSBW805GHPO_1		197
+#define SROM11_RPCAL_5GU		198
+#define SROM11_PDOFF_2G_CCK	        199
+#define SROM11_MCSLR5GLPO		200
+#define SROM11_MCSLR5GMPO		201
+#define SROM11_MCSLR5GHPO		202
+
+#define SROM11_SB20IN40HRPO		203
+#define SROM11_SB20IN80AND160HR5GLPO 	204
+#define SROM11_SB40AND80HR5GLPO		205
+#define SROM11_SB20IN80AND160HR5GMPO 	206
+#define SROM11_SB40AND80HR5GMPO		207
+#define SROM11_SB20IN80AND160HR5GHPO 	208
+#define SROM11_SB40AND80HR5GHPO		209
+#define SROM11_SB20IN40LRPO 		210
+#define SROM11_SB20IN80AND160LR5GLPO	211
+#define SROM11_SB40AND80LR5GLPO		212
+#define SROM11_TXIDXCAP2G               212
+#define SROM11_SB20IN80AND160LR5GMPO	213
+#define SROM11_SB40AND80LR5GMPO		214
+#define SROM11_TXIDXCAP5G               214
+#define SROM11_SB20IN80AND160LR5GHPO	215
+#define SROM11_SB40AND80LR5GHPO		216
+
+#define SROM11_DOT11AGDUPHRPO 		217
+#define SROM11_DOT11AGDUPLRPO		218
+
+/* MISC */
+#define SROM11_PCIEINGRESS_WAR		220
+#define SROM11_SAR			221
+
+#define SROM11_NOISELVL_2G		222
+#define SROM11_NOISELVL_5GL 		223
+#define SROM11_NOISELVL_5GM 		224
+#define SROM11_NOISELVL_5GH 		225
+#define SROM11_NOISELVL_5GU 		226
+
+#define SROM11_RXGAINERR_2G		227
+#define SROM11_RXGAINERR_5GL		228
+#define SROM11_RXGAINERR_5GM		229
+#define SROM11_RXGAINERR_5GH		230
+#define SROM11_RXGAINERR_5GU		231
+
+#define SROM11_SIGN 			64
+#define SROM11_CRCREV 			233
+
+#define	SROM11_WORDS			234
+#define	SROM11_SIGNATURE		0x0634
+
+typedef struct {
+	uint8 tssipos;		/* TSSI positive slope, 1: positive, 0: negative */
+	uint8 extpagain;	/* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+	uint8 pdetrange;	/* support 32 combinations of different Pdet dynamic ranges */
+	uint8 triso;		/* TR switch isolation */
+	uint8 antswctrllut;	/* antswctrl lookup table configuration: 32 possible choices */
+} srom_fem_t;
+
+#endif	/* _bcmsrom_fmt_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
new file mode 100644
index 0000000..6de9d3c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
@@ -0,0 +1,1029 @@
+/*
+ * Table that encodes the srom formats for PCI/PCIe NICs.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsrom_tbl.h 471127 2014-04-17 23:24:23Z $
+ */
+
+#ifndef	_bcmsrom_tbl_h_
+#define	_bcmsrom_tbl_h_
+
+#include "sbpcmcia.h"
+#include "wlioctl.h"
+#include <bcmsrom_fmt.h>
+
+typedef struct {
+	const char *name;
+	uint32	revmask;
+	uint32	flags;
+	uint16	off;
+	uint16	mask;
+} sromvar_t;
+
+#define SRFL_MORE	1		/* value continues as described by the next entry */
+#define	SRFL_NOFFS	2		/* value bits can't be all one's */
+#define	SRFL_PRHEX	4		/* value is in hexdecimal format */
+#define	SRFL_PRSIGN	8		/* value is in signed decimal format */
+#define	SRFL_CCODE	0x10		/* value is in country code format */
+#define	SRFL_ETHADDR	0x20		/* value is an Ethernet address */
+#define SRFL_LEDDC	0x40		/* value is an LED duty cycle */
+#define SRFL_NOVAR	0x80		/* do not generate a nvram param, entry is for mfgc */
+#define SRFL_ARRAY	0x100		/* value is in an array. All elements EXCEPT FOR THE LAST
+					 * ONE in the array should have this flag set.
+					 */
+
+
+#define SROM_DEVID_PCIE	48
+
+/* Assumptions:
+ * - Ethernet address spans across 3 consective words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple words
+ *   (even multiple fields in the same word) with each entry except the last having
+ *   it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
+ *   bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table. Other
+ *   entries must have non-NULL name.
+ */
+
+static const sromvar_t pci_sromvars[] = {
+#if defined(CABLECPE)
+	{"devid",	0xffffff00,	SRFL_PRHEX,	PCI_F0DEVID,	0xffff},
+#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED)
+	{"devid",	0xffffff00,	SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff},
+#else
+	{"devid",	0xffffff00,	SRFL_PRHEX|SRFL_NOVAR,	PCI_F0DEVID,	0xffff},
+#endif 
+	{"boardrev",	0x0000000e,	SRFL_PRHEX,	SROM_AABREV,		SROM_BR_MASK},
+	{"boardrev",	0x000000f0,	SRFL_PRHEX,	SROM4_BREV,		0xffff},
+	{"boardrev",	0xffffff00,	SRFL_PRHEX,	SROM8_BREV,		0xffff},
+	{"boardflags",	0x00000002,	SRFL_PRHEX,	SROM_BFL,		0xffff},
+	{"boardflags",	0x00000004,	SRFL_PRHEX|SRFL_MORE,	SROM_BFL,	0xffff},
+	{"",		0,		0,		SROM_BFL2,		0xffff},
+	{"boardflags",	0x00000008,	SRFL_PRHEX|SRFL_MORE,	SROM_BFL,	0xffff},
+	{"",		0,		0,		SROM3_BFL2,		0xffff},
+	{"boardflags",	0x00000010,	SRFL_PRHEX|SRFL_MORE,	SROM4_BFL0,	0xffff},
+	{"",		0,		0,		SROM4_BFL1,		0xffff},
+	{"boardflags",	0x000000e0,	SRFL_PRHEX|SRFL_MORE,	SROM5_BFL0,	0xffff},
+	{"",		0,		0,		SROM5_BFL1,		0xffff},
+	{"boardflags",	0xffffff00,	SRFL_PRHEX|SRFL_MORE,	SROM8_BFL0,	0xffff},
+	{"",		0,		0,		SROM8_BFL1,		0xffff},
+	{"boardflags2", 0x00000010,	SRFL_PRHEX|SRFL_MORE,	SROM4_BFL2,	0xffff},
+	{"",		0,		0,		SROM4_BFL3,		0xffff},
+	{"boardflags2", 0x000000e0,	SRFL_PRHEX|SRFL_MORE,	SROM5_BFL2,	0xffff},
+	{"",		0,		0,		SROM5_BFL3,		0xffff},
+	{"boardflags2", 0xffffff00,	SRFL_PRHEX|SRFL_MORE,	SROM8_BFL2,	0xffff},
+	{"",		0,		0,		SROM8_BFL3,		0xffff},
+	{"boardtype",	0xfffffffc,	SRFL_PRHEX,	SROM_SSID,		0xffff},
+	{"subvid",	0xfffffffc,	SRFL_PRHEX,	SROM_SVID,		0xffff},
+	{"boardnum",	0x00000006,	0,		SROM_MACLO_IL0,		0xffff},
+	{"boardnum",	0x00000008,	0,		SROM3_MACLO,		0xffff},
+	{"boardnum",	0x00000010,	0,		SROM4_MACLO,		0xffff},
+	{"boardnum",	0x000000e0,	0,		SROM5_MACLO,		0xffff},
+	{"boardnum",	0x00000700,	0,		SROM8_MACLO,		0xffff},
+	{"cc",		0x00000002,	0,		SROM_AABREV,		SROM_CC_MASK},
+	{"regrev",	0x00000008,	0,		SROM_OPO,		0xff00},
+	{"regrev",	0x00000010,	0,		SROM4_REGREV,		0x00ff},
+	{"regrev",	0x000000e0,	0,		SROM5_REGREV,		0x00ff},
+	{"regrev",	0x00000700,	0,		SROM8_REGREV,		0x00ff},
+	{"ledbh0",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH10,		0x00ff},
+	{"ledbh1",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH10,		0xff00},
+	{"ledbh2",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH32,		0x00ff},
+	{"ledbh3",	0x0000000e,	SRFL_NOFFS,	SROM_LEDBH32,		0xff00},
+	{"ledbh0",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH10,		0x00ff},
+	{"ledbh1",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH10,		0xff00},
+	{"ledbh2",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH32,		0x00ff},
+	{"ledbh3",	0x00000010,	SRFL_NOFFS,	SROM4_LEDBH32,		0xff00},
+	{"ledbh0",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH10,		0x00ff},
+	{"ledbh1",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH10,		0xff00},
+	{"ledbh2",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH32,		0x00ff},
+	{"ledbh3",	0x000000e0,	SRFL_NOFFS,	SROM5_LEDBH32,		0xff00},
+	{"ledbh0",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH10,		0x00ff},
+	{"ledbh1",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH10,		0xff00},
+	{"ledbh2",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH32,		0x00ff},
+	{"ledbh3",	0x00000700,	SRFL_NOFFS,	SROM8_LEDBH32,		0xff00},
+	{"pa0b0",	0x0000000e,	SRFL_PRHEX,	SROM_WL0PAB0,		0xffff},
+	{"pa0b1",	0x0000000e,	SRFL_PRHEX,	SROM_WL0PAB1,		0xffff},
+	{"pa0b2",	0x0000000e,	SRFL_PRHEX,	SROM_WL0PAB2,		0xffff},
+	{"pa0itssit",	0x0000000e,	0,		SROM_ITT,		0x00ff},
+	{"pa0maxpwr",	0x0000000e,	0,		SROM_WL10MAXP,		0x00ff},
+	{"pa0b0",	0x00000700,	SRFL_PRHEX,	SROM8_W0_PAB0,		0xffff},
+	{"pa0b1",	0x00000700,	SRFL_PRHEX,	SROM8_W0_PAB1,		0xffff},
+	{"pa0b2",	0x00000700,	SRFL_PRHEX,	SROM8_W0_PAB2,		0xffff},
+	{"pa0itssit",	0x00000700,	0,		SROM8_W0_ITTMAXP,	0xff00},
+	{"pa0maxpwr",	0x00000700,	0,		SROM8_W0_ITTMAXP,	0x00ff},
+	{"opo",		0x0000000c,	0,		SROM_OPO,		0x00ff},
+	{"opo",		0x00000700,	0,		SROM8_2G_OFDMPO,	0x00ff},
+	{"aa2g",	0x0000000e,	0,		SROM_AABREV,		SROM_AA0_MASK},
+	{"aa2g",	0x000000f0,	0,		SROM4_AA,		0x00ff},
+	{"aa2g",	0x00000700,	0,		SROM8_AA,		0x00ff},
+	{"aa5g",	0x0000000e,	0,		SROM_AABREV,		SROM_AA1_MASK},
+	{"aa5g",	0x000000f0,	0,		SROM4_AA,		0xff00},
+	{"aa5g",	0x00000700,	0,		SROM8_AA,		0xff00},
+	{"ag0",		0x0000000e,	0,		SROM_AG10,		0x00ff},
+	{"ag1",		0x0000000e,	0,		SROM_AG10,		0xff00},
+	{"ag0",		0x000000f0,	0,		SROM4_AG10,		0x00ff},
+	{"ag1",		0x000000f0,	0,		SROM4_AG10,		0xff00},
+	{"ag2",		0x000000f0,	0,		SROM4_AG32,		0x00ff},
+	{"ag3",		0x000000f0,	0,		SROM4_AG32,		0xff00},
+	{"ag0",		0x00000700,	0,		SROM8_AG10,		0x00ff},
+	{"ag1",		0x00000700,	0,		SROM8_AG10,		0xff00},
+	{"ag2",		0x00000700,	0,		SROM8_AG32,		0x00ff},
+	{"ag3",		0x00000700,	0,		SROM8_AG32,		0xff00},
+	{"pa1b0",	0x0000000e,	SRFL_PRHEX,	SROM_WL1PAB0,		0xffff},
+	{"pa1b1",	0x0000000e,	SRFL_PRHEX,	SROM_WL1PAB1,		0xffff},
+	{"pa1b2",	0x0000000e,	SRFL_PRHEX,	SROM_WL1PAB2,		0xffff},
+	{"pa1lob0",	0x0000000c,	SRFL_PRHEX,	SROM_WL1LPAB0,		0xffff},
+	{"pa1lob1",	0x0000000c,	SRFL_PRHEX,	SROM_WL1LPAB1,		0xffff},
+	{"pa1lob2",	0x0000000c,	SRFL_PRHEX,	SROM_WL1LPAB2,		0xffff},
+	{"pa1hib0",	0x0000000c,	SRFL_PRHEX,	SROM_WL1HPAB0,		0xffff},
+	{"pa1hib1",	0x0000000c,	SRFL_PRHEX,	SROM_WL1HPAB1,		0xffff},
+	{"pa1hib2",	0x0000000c,	SRFL_PRHEX,	SROM_WL1HPAB2,		0xffff},
+	{"pa1itssit",	0x0000000e,	0,		SROM_ITT,		0xff00},
+	{"pa1maxpwr",	0x0000000e,	0,		SROM_WL10MAXP,		0xff00},
+	{"pa1lomaxpwr",	0x0000000c,	0,		SROM_WL1LHMAXP,		0xff00},
+	{"pa1himaxpwr",	0x0000000c,	0,		SROM_WL1LHMAXP,		0x00ff},
+	{"pa1b0",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB0,		0xffff},
+	{"pa1b1",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB1,		0xffff},
+	{"pa1b2",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB2,		0xffff},
+	{"pa1lob0",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB0_LC,	0xffff},
+	{"pa1lob1",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB1_LC,	0xffff},
+	{"pa1lob2",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB2_LC,	0xffff},
+	{"pa1hib0",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB0_HC,	0xffff},
+	{"pa1hib1",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB1_HC,	0xffff},
+	{"pa1hib2",	0x00000700,	SRFL_PRHEX,	SROM8_W1_PAB2_HC,	0xffff},
+	{"pa1itssit",	0x00000700,	0,		SROM8_W1_ITTMAXP,	0xff00},
+	{"pa1maxpwr",	0x00000700,	0,		SROM8_W1_ITTMAXP,	0x00ff},
+	{"pa1lomaxpwr",	0x00000700,	0,		SROM8_W1_MAXP_LCHC,	0xff00},
+	{"pa1himaxpwr",	0x00000700,	0,		SROM8_W1_MAXP_LCHC,	0x00ff},
+	{"bxa2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x1800},
+	{"rssisav2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x0700},
+	{"rssismc2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x00f0},
+	{"rssismf2g",	0x00000008,	0,		SROM_BXARSSI2G,		0x000f},
+	{"bxa2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x1800},
+	{"rssisav2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x0700},
+	{"rssismc2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x00f0},
+	{"rssismf2g",	0x00000700,	0,		SROM8_BXARSSI2G,	0x000f},
+	{"bxa5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x1800},
+	{"rssisav5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x0700},
+	{"rssismc5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x00f0},
+	{"rssismf5g",	0x00000008,	0,		SROM_BXARSSI5G,		0x000f},
+	{"bxa5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x1800},
+	{"rssisav5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x0700},
+	{"rssismc5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x00f0},
+	{"rssismf5g",	0x00000700,	0,		SROM8_BXARSSI5G,	0x000f},
+	{"tri2g",	0x00000008,	0,		SROM_TRI52G,		0x00ff},
+	{"tri5g",	0x00000008,	0,		SROM_TRI52G,		0xff00},
+	{"tri5gl",	0x00000008,	0,		SROM_TRI5GHL,		0x00ff},
+	{"tri5gh",	0x00000008,	0,		SROM_TRI5GHL,		0xff00},
+	{"tri2g",	0x00000700,	0,		SROM8_TRI52G,		0x00ff},
+	{"tri5g",	0x00000700,	0,		SROM8_TRI52G,		0xff00},
+	{"tri5gl",	0x00000700,	0,		SROM8_TRI5GHL,		0x00ff},
+	{"tri5gh",	0x00000700,	0,		SROM8_TRI5GHL,		0xff00},
+	{"rxpo2g",	0x00000008,	SRFL_PRSIGN,	SROM_RXPO52G,		0x00ff},
+	{"rxpo5g",	0x00000008,	SRFL_PRSIGN,	SROM_RXPO52G,		0xff00},
+	{"rxpo2g",	0x00000700,	SRFL_PRSIGN,	SROM8_RXPO52G,		0x00ff},
+	{"rxpo5g",	0x00000700,	SRFL_PRSIGN,	SROM8_RXPO52G,		0xff00},
+	{"txchain",	0x000000f0,	SRFL_NOFFS,	SROM4_TXRXC,		SROM4_TXCHAIN_MASK},
+	{"rxchain",	0x000000f0,	SRFL_NOFFS,	SROM4_TXRXC,		SROM4_RXCHAIN_MASK},
+	{"antswitch",	0x000000f0,	SRFL_NOFFS,	SROM4_TXRXC,		SROM4_SWITCH_MASK},
+	{"txchain",	0x00000700,	SRFL_NOFFS,	SROM8_TXRXC,		SROM4_TXCHAIN_MASK},
+	{"rxchain",	0x00000700,	SRFL_NOFFS,	SROM8_TXRXC,		SROM4_RXCHAIN_MASK},
+	{"antswitch",	0x00000700,	SRFL_NOFFS,	SROM8_TXRXC,		SROM4_SWITCH_MASK},
+	{"tssipos2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_TSSIPOS_MASK},
+	{"extpagain2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_EXTPA_GAIN_MASK},
+	{"pdetrange2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_PDET_RANGE_MASK},
+	{"triso2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_TR_ISO_MASK},
+	{"antswctl2g",	0x00000700,	0,		SROM8_FEM2G,	SROM8_FEM_ANTSWLUT_MASK},
+	{"tssipos5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_TSSIPOS_MASK},
+	{"extpagain5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_EXTPA_GAIN_MASK},
+	{"pdetrange5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_PDET_RANGE_MASK},
+	{"triso5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_TR_ISO_MASK},
+	{"antswctl5g",	0x00000700,	0,		SROM8_FEM5G,	SROM8_FEM_ANTSWLUT_MASK},
+	{"txpid2ga0",	0x000000f0,	0,		SROM4_TXPID2G,		0x00ff},
+	{"txpid2ga1",	0x000000f0,	0,		SROM4_TXPID2G,		0xff00},
+	{"txpid2ga2",	0x000000f0,	0,		SROM4_TXPID2G + 1,	0x00ff},
+	{"txpid2ga3",	0x000000f0,	0,		SROM4_TXPID2G + 1,	0xff00},
+	{"txpid5ga0",	0x000000f0,	0,		SROM4_TXPID5G,		0x00ff},
+	{"txpid5ga1",	0x000000f0,	0,		SROM4_TXPID5G,		0xff00},
+	{"txpid5ga2",	0x000000f0,	0,		SROM4_TXPID5G + 1,	0x00ff},
+	{"txpid5ga3",	0x000000f0,	0,		SROM4_TXPID5G + 1,	0xff00},
+	{"txpid5gla0",	0x000000f0,	0,		SROM4_TXPID5GL,		0x00ff},
+	{"txpid5gla1",	0x000000f0,	0,		SROM4_TXPID5GL,		0xff00},
+	{"txpid5gla2",	0x000000f0,	0,		SROM4_TXPID5GL + 1,	0x00ff},
+	{"txpid5gla3",	0x000000f0,	0,		SROM4_TXPID5GL + 1,	0xff00},
+	{"txpid5gha0",	0x000000f0,	0,		SROM4_TXPID5GH,		0x00ff},
+	{"txpid5gha1",	0x000000f0,	0,		SROM4_TXPID5GH,		0xff00},
+	{"txpid5gha2",	0x000000f0,	0,		SROM4_TXPID5GH + 1,	0x00ff},
+	{"txpid5gha3",	0x000000f0,	0,		SROM4_TXPID5GH + 1,	0xff00},
+
+	{"ccode",	0x0000000f,	SRFL_CCODE,	SROM_CCODE,		0xffff},
+	{"ccode",	0x00000010,	SRFL_CCODE,	SROM4_CCODE,		0xffff},
+	{"ccode",	0x000000e0,	SRFL_CCODE,	SROM5_CCODE,		0xffff},
+	{"ccode",	0x00000700,	SRFL_CCODE,	SROM8_CCODE,		0xffff},
+	{"macaddr",	0x00000700,	SRFL_ETHADDR,	SROM8_MACHI,		0xffff},
+	{"macaddr",	0x000000e0,	SRFL_ETHADDR,	SROM5_MACHI,		0xffff},
+	{"macaddr",	0x00000010,	SRFL_ETHADDR,	SROM4_MACHI,		0xffff},
+	{"macaddr",	0x00000008,	SRFL_ETHADDR,	SROM3_MACHI,		0xffff},
+	{"il0macaddr",	0x00000007,	SRFL_ETHADDR,	SROM_MACHI_IL0,		0xffff},
+	{"et1macaddr",	0x00000007,	SRFL_ETHADDR,	SROM_MACHI_ET1,		0xffff},
+	{"leddc",	0x00000700,	SRFL_NOFFS|SRFL_LEDDC,	SROM8_LEDDC,	0xffff},
+	{"leddc",	0x000000e0,	SRFL_NOFFS|SRFL_LEDDC,	SROM5_LEDDC,	0xffff},
+	{"leddc",	0x00000010,	SRFL_NOFFS|SRFL_LEDDC,	SROM4_LEDDC,	0xffff},
+	{"leddc",	0x00000008,	SRFL_NOFFS|SRFL_LEDDC,	SROM3_LEDDC,	0xffff},
+
+	{"tempthresh",	0x00000700,	0,		SROM8_THERMAL,		0xff00},
+	{"tempoffset",	0x00000700,	0,		SROM8_THERMAL,		0x00ff},
+	{"rawtempsense", 0x00000700,	SRFL_PRHEX,	SROM8_MPWR_RAWTS,	0x01ff},
+	{"measpower",	0x00000700,	SRFL_PRHEX,	SROM8_MPWR_RAWTS,	0xfe00},
+	{"tempsense_slope",	0x00000700,	SRFL_PRHEX, 	SROM8_TS_SLP_OPT_CORRX,	0x00ff},
+	{"tempcorrx",	0x00000700,	SRFL_PRHEX, 	SROM8_TS_SLP_OPT_CORRX,	0xfc00},
+	{"tempsense_option",	0x00000700,	SRFL_PRHEX,	SROM8_TS_SLP_OPT_CORRX,	0x0300},
+	{"freqoffset_corr",	0x00000700,	SRFL_PRHEX,	SROM8_FOC_HWIQ_IQSWP,	0x000f},
+	{"iqcal_swp_dis",	0x00000700,	SRFL_PRHEX,	SROM8_FOC_HWIQ_IQSWP,	0x0010},
+	{"hw_iqcal_en",	0x00000700,	SRFL_PRHEX,	SROM8_FOC_HWIQ_IQSWP,	0x0020},
+	{"elna2g",      0x00000700,     0,              SROM8_EXTLNAGAIN,       0x00ff},
+	{"elna5g",      0x00000700,     0,              SROM8_EXTLNAGAIN,       0xff00},
+	{"phycal_tempdelta",	0x00000700,	0,	SROM8_PHYCAL_TEMPDELTA,	0x00ff},
+	{"temps_period",	0x00000700,	0,	SROM8_PHYCAL_TEMPDELTA,	0x0f00},
+	{"temps_hysteresis",	0x00000700,	0,	SROM8_PHYCAL_TEMPDELTA,	0xf000},
+	{"measpower1", 0x00000700,	SRFL_PRHEX, SROM8_MPWR_1_AND_2, 	0x007f},
+	{"measpower2",	0x00000700, 	SRFL_PRHEX, SROM8_MPWR_1_AND_2, 	0x3f80},
+
+	{"cck2gpo",	0x000000f0,	0,		SROM4_2G_CCKPO,		0xffff},
+	{"cck2gpo",	0x00000100,	0,		SROM8_2G_CCKPO,		0xffff},
+	{"ofdm2gpo",	0x000000f0,	SRFL_MORE,	SROM4_2G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_2G_OFDMPO + 1,	0xffff},
+	{"ofdm5gpo",	0x000000f0,	SRFL_MORE,	SROM4_5G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_5G_OFDMPO + 1,	0xffff},
+	{"ofdm5glpo",	0x000000f0,	SRFL_MORE,	SROM4_5GL_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_5GL_OFDMPO + 1,	0xffff},
+	{"ofdm5ghpo",	0x000000f0,	SRFL_MORE,	SROM4_5GH_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM4_5GH_OFDMPO + 1,	0xffff},
+	{"ofdm2gpo",	0x00000100,	SRFL_MORE,	SROM8_2G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_2G_OFDMPO + 1,	0xffff},
+	{"ofdm5gpo",	0x00000100,	SRFL_MORE,	SROM8_5G_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_5G_OFDMPO + 1,	0xffff},
+	{"ofdm5glpo",	0x00000100,	SRFL_MORE,	SROM8_5GL_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_5GL_OFDMPO + 1,	0xffff},
+	{"ofdm5ghpo",	0x00000100,	SRFL_MORE,	SROM8_5GH_OFDMPO,	0xffff},
+	{"",		0,		0,		SROM8_5GH_OFDMPO + 1,	0xffff},
+	{"mcs2gpo0",	0x000000f0,	0,		SROM4_2G_MCSPO,		0xffff},
+	{"mcs2gpo1",	0x000000f0,	0,		SROM4_2G_MCSPO + 1,	0xffff},
+	{"mcs2gpo2",	0x000000f0,	0,		SROM4_2G_MCSPO + 2,	0xffff},
+	{"mcs2gpo3",	0x000000f0,	0,		SROM4_2G_MCSPO + 3,	0xffff},
+	{"mcs2gpo4",	0x000000f0,	0,		SROM4_2G_MCSPO + 4,	0xffff},
+	{"mcs2gpo5",	0x000000f0,	0,		SROM4_2G_MCSPO + 5,	0xffff},
+	{"mcs2gpo6",	0x000000f0,	0,		SROM4_2G_MCSPO + 6,	0xffff},
+	{"mcs2gpo7",	0x000000f0,	0,		SROM4_2G_MCSPO + 7,	0xffff},
+	{"mcs5gpo0",	0x000000f0,	0,		SROM4_5G_MCSPO,		0xffff},
+	{"mcs5gpo1",	0x000000f0,	0,		SROM4_5G_MCSPO + 1,	0xffff},
+	{"mcs5gpo2",	0x000000f0,	0,		SROM4_5G_MCSPO + 2,	0xffff},
+	{"mcs5gpo3",	0x000000f0,	0,		SROM4_5G_MCSPO + 3,	0xffff},
+	{"mcs5gpo4",	0x000000f0,	0,		SROM4_5G_MCSPO + 4,	0xffff},
+	{"mcs5gpo5",	0x000000f0,	0,		SROM4_5G_MCSPO + 5,	0xffff},
+	{"mcs5gpo6",	0x000000f0,	0,		SROM4_5G_MCSPO + 6,	0xffff},
+	{"mcs5gpo7",	0x000000f0,	0,		SROM4_5G_MCSPO + 7,	0xffff},
+	{"mcs5glpo0",	0x000000f0,	0,		SROM4_5GL_MCSPO,	0xffff},
+	{"mcs5glpo1",	0x000000f0,	0,		SROM4_5GL_MCSPO + 1,	0xffff},
+	{"mcs5glpo2",	0x000000f0,	0,		SROM4_5GL_MCSPO + 2,	0xffff},
+	{"mcs5glpo3",	0x000000f0,	0,		SROM4_5GL_MCSPO + 3,	0xffff},
+	{"mcs5glpo4",	0x000000f0,	0,		SROM4_5GL_MCSPO + 4,	0xffff},
+	{"mcs5glpo5",	0x000000f0,	0,		SROM4_5GL_MCSPO + 5,	0xffff},
+	{"mcs5glpo6",	0x000000f0,	0,		SROM4_5GL_MCSPO + 6,	0xffff},
+	{"mcs5glpo7",	0x000000f0,	0,		SROM4_5GL_MCSPO + 7,	0xffff},
+	{"mcs5ghpo0",	0x000000f0,	0,		SROM4_5GH_MCSPO,	0xffff},
+	{"mcs5ghpo1",	0x000000f0,	0,		SROM4_5GH_MCSPO + 1,	0xffff},
+	{"mcs5ghpo2",	0x000000f0,	0,		SROM4_5GH_MCSPO + 2,	0xffff},
+	{"mcs5ghpo3",	0x000000f0,	0,		SROM4_5GH_MCSPO + 3,	0xffff},
+	{"mcs5ghpo4",	0x000000f0,	0,		SROM4_5GH_MCSPO + 4,	0xffff},
+	{"mcs5ghpo5",	0x000000f0,	0,		SROM4_5GH_MCSPO + 5,	0xffff},
+	{"mcs5ghpo6",	0x000000f0,	0,		SROM4_5GH_MCSPO + 6,	0xffff},
+	{"mcs5ghpo7",	0x000000f0,	0,		SROM4_5GH_MCSPO + 7,	0xffff},
+	{"mcs2gpo0",	0x00000100,	0,		SROM8_2G_MCSPO,		0xffff},
+	{"mcs2gpo1",	0x00000100,	0,		SROM8_2G_MCSPO + 1,	0xffff},
+	{"mcs2gpo2",	0x00000100,	0,		SROM8_2G_MCSPO + 2,	0xffff},
+	{"mcs2gpo3",	0x00000100,	0,		SROM8_2G_MCSPO + 3,	0xffff},
+	{"mcs2gpo4",	0x00000100,	0,		SROM8_2G_MCSPO + 4,	0xffff},
+	{"mcs2gpo5",	0x00000100,	0,		SROM8_2G_MCSPO + 5,	0xffff},
+	{"mcs2gpo6",	0x00000100,	0,		SROM8_2G_MCSPO + 6,	0xffff},
+	{"mcs2gpo7",	0x00000100,	0,		SROM8_2G_MCSPO + 7,	0xffff},
+	{"mcs5gpo0",	0x00000100,	0,		SROM8_5G_MCSPO,		0xffff},
+	{"mcs5gpo1",	0x00000100,	0,		SROM8_5G_MCSPO + 1,	0xffff},
+	{"mcs5gpo2",	0x00000100,	0,		SROM8_5G_MCSPO + 2,	0xffff},
+	{"mcs5gpo3",	0x00000100,	0,		SROM8_5G_MCSPO + 3,	0xffff},
+	{"mcs5gpo4",	0x00000100,	0,		SROM8_5G_MCSPO + 4,	0xffff},
+	{"mcs5gpo5",	0x00000100,	0,		SROM8_5G_MCSPO + 5,	0xffff},
+	{"mcs5gpo6",	0x00000100,	0,		SROM8_5G_MCSPO + 6,	0xffff},
+	{"mcs5gpo7",	0x00000100,	0,		SROM8_5G_MCSPO + 7,	0xffff},
+	{"mcs5glpo0",	0x00000100,	0,		SROM8_5GL_MCSPO,	0xffff},
+	{"mcs5glpo1",	0x00000100,	0,		SROM8_5GL_MCSPO + 1,	0xffff},
+	{"mcs5glpo2",	0x00000100,	0,		SROM8_5GL_MCSPO + 2,	0xffff},
+	{"mcs5glpo3",	0x00000100,	0,		SROM8_5GL_MCSPO + 3,	0xffff},
+	{"mcs5glpo4",	0x00000100,	0,		SROM8_5GL_MCSPO + 4,	0xffff},
+	{"mcs5glpo5",	0x00000100,	0,		SROM8_5GL_MCSPO + 5,	0xffff},
+	{"mcs5glpo6",	0x00000100,	0,		SROM8_5GL_MCSPO + 6,	0xffff},
+	{"mcs5glpo7",	0x00000100,	0,		SROM8_5GL_MCSPO + 7,	0xffff},
+	{"mcs5ghpo0",	0x00000100,	0,		SROM8_5GH_MCSPO,	0xffff},
+	{"mcs5ghpo1",	0x00000100,	0,		SROM8_5GH_MCSPO + 1,	0xffff},
+	{"mcs5ghpo2",	0x00000100,	0,		SROM8_5GH_MCSPO + 2,	0xffff},
+	{"mcs5ghpo3",	0x00000100,	0,		SROM8_5GH_MCSPO + 3,	0xffff},
+	{"mcs5ghpo4",	0x00000100,	0,		SROM8_5GH_MCSPO + 4,	0xffff},
+	{"mcs5ghpo5",	0x00000100,	0,		SROM8_5GH_MCSPO + 5,	0xffff},
+	{"mcs5ghpo6",	0x00000100,	0,		SROM8_5GH_MCSPO + 6,	0xffff},
+	{"mcs5ghpo7",	0x00000100,	0,		SROM8_5GH_MCSPO + 7,	0xffff},
+	{"cddpo",	0x000000f0,	0,		SROM4_CDDPO,		0xffff},
+	{"stbcpo",	0x000000f0,	0,		SROM4_STBCPO,		0xffff},
+	{"bw40po",	0x000000f0,	0,		SROM4_BW40PO,		0xffff},
+	{"bwduppo",	0x000000f0,	0,		SROM4_BWDUPPO,		0xffff},
+	{"cddpo",	0x00000100,	0,		SROM8_CDDPO,		0xffff},
+	{"stbcpo",	0x00000100,	0,		SROM8_STBCPO,		0xffff},
+	{"bw40po",	0x00000100,	0,		SROM8_BW40PO,		0xffff},
+	{"bwduppo",	0x00000100,	0,		SROM8_BWDUPPO,		0xffff},
+
+	/* power per rate from sromrev 9 */
+	{"cckbw202gpo",		0x00000600,	0,	SROM9_2GPO_CCKBW20,		0xffff},
+	{"cckbw20ul2gpo",	0x00000600,	0,	SROM9_2GPO_CCKBW20UL,		0xffff},
+	{"legofdmbw202gpo",	0x00000600,	SRFL_MORE, SROM9_2GPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_2GPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul2gpo",	0x00000600,	SRFL_MORE, SROM9_2GPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_2GPO_LOFDMBW20UL + 1,	0xffff},
+	{"legofdmbw205glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_5GLPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul5glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GLPO_LOFDMBW20UL + 1,	0xffff},
+	{"legofdmbw205gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_5GMPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul5gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GMPO_LOFDMBW20UL + 1,	0xffff},
+	{"legofdmbw205ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_LOFDMBW20,	0xffff},
+	{"",			0,		0,	SROM9_5GHPO_LOFDMBW20 + 1,	0xffff},
+	{"legofdmbw20ul5ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GHPO_LOFDMBW20UL + 1,	0xffff},
+	{"mcsbw202gpo",		0x00000600,	SRFL_MORE, SROM9_2GPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_2GPO_MCSBW20 + 1,		0xffff},
+	{"mcsbw20ul2gpo",      	0x00000600,	SRFL_MORE, SROM9_2GPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_2GPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw402gpo",		0x00000600,	SRFL_MORE, SROM9_2GPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_2GPO_MCSBW40 + 1,		0xffff},
+	{"mcsbw205glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_5GLPO_MCSBW20 + 1,	0xffff},
+	{"mcsbw20ul5glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GLPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw405glpo",	0x00000600,	SRFL_MORE, SROM9_5GLPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_5GLPO_MCSBW40 + 1,	0xffff},
+	{"mcsbw205gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_5GMPO_MCSBW20 + 1,	0xffff},
+	{"mcsbw20ul5gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GMPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw405gmpo",	0x00000600,	SRFL_MORE, SROM9_5GMPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_5GMPO_MCSBW40 + 1,	0xffff},
+	{"mcsbw205ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_MCSBW20,		0xffff},
+	{"",			0,		0,	SROM9_5GHPO_MCSBW20 + 1,	0xffff},
+	{"mcsbw20ul5ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_MCSBW20UL,	0xffff},
+	{"",			0,		0,	SROM9_5GHPO_MCSBW20UL + 1,	0xffff},
+	{"mcsbw405ghpo",	0x00000600,	SRFL_MORE, SROM9_5GHPO_MCSBW40,		0xffff},
+	{"",			0,		0,	SROM9_5GHPO_MCSBW40 + 1,	0xffff},
+	{"mcs32po",		0x00000600,	0,	SROM9_PO_MCS32,			0xffff},
+	{"legofdm40duppo",	0x00000600,	0,	SROM9_PO_LOFDM40DUP,	0xffff},
+	{"pcieingress_war",	0x00000700,	0,	SROM8_PCIEINGRESS_WAR,	0xf},
+	{"rxgainerr2ga0",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0x003f},
+	{"rxgainerr2ga1",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0x07c0},
+	{"rxgainerr2ga2",	0x00000700,	0,	SROM8_RXGAINERR_2G,		0xf800},
+	{"rxgainerr5gla0",	0x00000700,	0,	SROM8_RXGAINERR_5GL,	0x003f},
+	{"rxgainerr5gla1",	0x00000700,	0,	SROM8_RXGAINERR_5GL,	0x07c0},
+	{"rxgainerr5gla2",	0x00000700,	0,	SROM8_RXGAINERR_5GL,	0xf800},
+	{"rxgainerr5gma0",	0x00000700,	0,	SROM8_RXGAINERR_5GM,	0x003f},
+	{"rxgainerr5gma1",	0x00000700,	0,	SROM8_RXGAINERR_5GM,	0x07c0},
+	{"rxgainerr5gma2",	0x00000700,	0,	SROM8_RXGAINERR_5GM,	0xf800},
+	{"rxgainerr5gha0",	0x00000700,	0,	SROM8_RXGAINERR_5GH,	0x003f},
+	{"rxgainerr5gha1",	0x00000700,	0,	SROM8_RXGAINERR_5GH,	0x07c0},
+	{"rxgainerr5gha2",	0x00000700,	0,	SROM8_RXGAINERR_5GH,	0xf800},
+	{"rxgainerr5gua0",	0x00000700,	0,	SROM8_RXGAINERR_5GU,	0x003f},
+	{"rxgainerr5gua1",	0x00000700,	0,	SROM8_RXGAINERR_5GU,	0x07c0},
+	{"rxgainerr5gua2",	0x00000700,	0,	SROM8_RXGAINERR_5GU,	0xf800},
+	{"sar2g",       	0x00000600,	0,	SROM9_SAR,          	0x00ff},
+	{"sar5g",           0x00000600,	0,	SROM9_SAR,	            0xff00},
+	{"noiselvl2ga0",	0x00000700,	0,	SROM8_NOISELVL_2G,		0x001f},
+	{"noiselvl2ga1",	0x00000700,	0,	SROM8_NOISELVL_2G,		0x03e0},
+	{"noiselvl2ga2",	0x00000700,	0,	SROM8_NOISELVL_2G,		0x7c00},
+	{"noiselvl5gla0",	0x00000700,	0,	SROM8_NOISELVL_5GL,		0x001f},
+	{"noiselvl5gla1",	0x00000700,	0,	SROM8_NOISELVL_5GL,		0x03e0},
+	{"noiselvl5gla2",	0x00000700,	0,	SROM8_NOISELVL_5GL,		0x7c00},
+	{"noiselvl5gma0",	0x00000700,	0,	SROM8_NOISELVL_5GM,		0x001f},
+	{"noiselvl5gma1",	0x00000700,	0,	SROM8_NOISELVL_5GM,		0x03e0},
+	{"noiselvl5gma2",	0x00000700,	0,	SROM8_NOISELVL_5GM,		0x7c00},
+	{"noiselvl5gha0",	0x00000700,	0,	SROM8_NOISELVL_5GH,		0x001f},
+	{"noiselvl5gha1",	0x00000700,	0,	SROM8_NOISELVL_5GH,		0x03e0},
+	{"noiselvl5gha2",	0x00000700,	0,	SROM8_NOISELVL_5GH,		0x7c00},
+	{"noiselvl5gua0",	0x00000700,	0,	SROM8_NOISELVL_5GU,		0x001f},
+	{"noiselvl5gua1",	0x00000700,	0,	SROM8_NOISELVL_5GU,		0x03e0},
+	{"noiselvl5gua2",	0x00000700,	0,	SROM8_NOISELVL_5GU,		0x7c00},
+	{"noisecaloffset",	0x00000300,	0,	SROM8_NOISECALOFFSET,		0x00ff},
+	{"noisecaloffset5g",	0x00000300,	0,	SROM8_NOISECALOFFSET,		0xff00},
+	{"subband5gver",	0x00000700,	0,	SROM8_SUBBAND_PPR,		0x7},
+
+	{"cckPwrOffset",	0x00000400,	0,	SROM10_CCKPWROFFSET,		0xffff},
+	/* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */
+	{"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 1,			0xffff},
+	{"",	0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 	0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 3,			0xffff},
+	{"",	0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4,	0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 5,			0xffff},
+	{"",	0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6,	0xffff},
+	{"",	0x00000400, SRFL_ARRAY,	SROM10_SWCTRLMAP_2G + 7,			0xffff},
+	{"",	0x00000400, SRFL_PRHEX,	SROM10_SWCTRLMAP_2G + 8,			0xffff},
+
+	/* sromrev 11 */
+	{"boardflags3",	0xfffff800,	SRFL_PRHEX|SRFL_MORE,	SROM11_BFL4,	0xffff},
+	{"",		0,		0,			SROM11_BFL5,	0xffff},
+	{"boardnum",	0xfffff800,	0,			SROM11_MACLO,	0xffff},
+	{"macaddr",	0xfffff800,	SRFL_ETHADDR,		SROM11_MACHI,	0xffff},
+	{"ccode",	0xfffff800,	SRFL_CCODE,		SROM11_CCODE,	0xffff},
+	{"regrev",	0xfffff800,	0,			SROM11_REGREV,	0x00ff},
+	{"ledbh0",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH10,	0x00ff},
+	{"ledbh1",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH10,	0xff00},
+	{"ledbh2",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH32,	0x00ff},
+	{"ledbh3",	0xfffff800,	SRFL_NOFFS,		SROM11_LEDBH32,	0xff00},
+	{"leddc",	0xfffff800,	SRFL_NOFFS|SRFL_LEDDC,	SROM11_LEDDC,	0xffff},
+	{"aa2g",	0xfffff800,	0,			SROM11_AA,	0x00ff},
+	{"aa5g",	0xfffff800,	0,			SROM11_AA,	0xff00},
+	{"agbg0",	0xfffff800,	0,			SROM11_AGBG10,  0xff00},
+	{"agbg1",	0xfffff800,	0,			SROM11_AGBG10,	0x00ff},
+	{"agbg2",	0xfffff800,	0,			SROM11_AGBG2A0,	0xff00},
+	{"aga0",	0xfffff800,	0,			SROM11_AGBG2A0,	0x00ff},
+	{"aga1",	0xfffff800,	0,			SROM11_AGA21,   0xff00},
+	{"aga2",	0xfffff800,	0,			SROM11_AGA21,	0x00ff},
+	{"txchain",	0xfffff800,	SRFL_NOFFS,	SROM11_TXRXC,	SROM4_TXCHAIN_MASK},
+	{"rxchain",	0xfffff800,	SRFL_NOFFS,	SROM11_TXRXC,	SROM4_RXCHAIN_MASK},
+	{"antswitch",	0xfffff800,	SRFL_NOFFS,	SROM11_TXRXC,	SROM4_SWITCH_MASK},
+
+	{"tssiposslope2g",	0xfffff800,	0,		SROM11_FEM_CFG1, 	0x0001},
+	{"epagain2g",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0x000e},
+	{"pdgain2g",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0x01f0},
+	{"tworangetssi2g",	0xfffff800,	0,		SROM11_FEM_CFG1, 	0x0200},
+	{"papdcap2g",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0x0400},
+	{"femctrl",		0xfffff800,	0,		SROM11_FEM_CFG1, 	0xf800},
+
+	{"tssiposslope5g",	0xfffff800,	0,		SROM11_FEM_CFG2, 	0x0001},
+	{"epagain5g",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0x000e},
+	{"pdgain5g",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0x01f0},
+	{"tworangetssi5g",	0xfffff800,	0,		SROM11_FEM_CFG2, 	0x0200},
+	{"papdcap5g",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0x0400},
+	{"gainctrlsph",		0xfffff800,	0,		SROM11_FEM_CFG2, 	0xf800},
+
+	{"tempthresh",		0xfffff800,	0,		SROM11_THERMAL,		0xff00},
+	{"tempoffset",		0xfffff800,	0,		SROM11_THERMAL,		0x00ff},
+	{"rawtempsense", 	0xfffff800,	SRFL_PRHEX,	SROM11_MPWR_RAWTS,	0x01ff},
+	{"measpower",		0xfffff800,	SRFL_PRHEX,	SROM11_MPWR_RAWTS,	0xfe00},
+	{"tempsense_slope",	0xfffff800,	SRFL_PRHEX, 	SROM11_TS_SLP_OPT_CORRX, 0x00ff},
+	{"tempcorrx",		0xfffff800,	SRFL_PRHEX, 	SROM11_TS_SLP_OPT_CORRX, 0xfc00},
+	{"tempsense_option",	0xfffff800,	SRFL_PRHEX,	SROM11_TS_SLP_OPT_CORRX, 0x0300},
+	{"xtalfreq",		0xfffff800,	0,		SROM11_XTAL_FREQ, 	0xffff},
+	/* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */
+	{"pa5gbw4080a1", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1,                 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1,                 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1,                 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA,     0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA,     0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA,     0xffff},
+	{"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff},
+	{"", 0xfffff800, SRFL_PRHEX,              SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff},
+	{"phycal_tempdelta",	0xfffff800,	0,		SROM11_PHYCAL_TEMPDELTA, 0x00ff},
+	{"temps_period",	0xfffff800,	0,		SROM11_PHYCAL_TEMPDELTA, 0x0f00},
+	{"temps_hysteresis",	0xfffff800,	0,		SROM11_PHYCAL_TEMPDELTA, 0xf000},
+	{"measpower1", 		0xfffff800,	SRFL_PRHEX,	SROM11_MPWR_1_AND_2, 	0x007f},
+	{"measpower2",		0xfffff800, 	SRFL_PRHEX,	SROM11_MPWR_1_AND_2, 	0x3f80},
+	{"tssifloor2g",		0xfffff800,	SRFL_PRHEX,	SROM11_TSSIFLOOR_2G,	0x03ff},
+	{"tssifloor5g",	0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL,	0x03ff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM,	0x03ff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH,	0x03ff},
+	{"",		0xfffff800,	SRFL_PRHEX,		SROM11_TSSIFLOOR_5GU,	0x03ff},
+	{"pdoffset2g40ma0",     0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x000f},
+	{"pdoffset2g40ma1",     0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x00f0},
+	{"pdoffset2g40ma2",     0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x0f00},
+	{"pdoffset2g40mvalid",  0xfffff800, 0,      SROM11_PDOFF_2G_40M,    0x8000},
+	{"pdoffset40ma0",      	0xfffff800,	0,		SROM11_PDOFF_40M_A0,   	0xffff},
+	{"pdoffset40ma1",      	0xfffff800,	0,		SROM11_PDOFF_40M_A1,   	0xffff},
+	{"pdoffset40ma2",      	0xfffff800,	0,		SROM11_PDOFF_40M_A2,   	0xffff},
+	{"pdoffset80ma0",      	0xfffff800,	0,		SROM11_PDOFF_80M_A0,   	0xffff},
+	{"pdoffset80ma1",      	0xfffff800,	0,		SROM11_PDOFF_80M_A1,   	0xffff},
+	{"pdoffset80ma2",      	0xfffff800,	0,		SROM11_PDOFF_80M_A2,   	0xffff},
+
+	{"subband5gver",	0xfffff800, 	SRFL_PRHEX,	SROM11_SUBBAND5GVER, 	0xffff},
+	{"paparambwver",	0xfffff800, 	0,		SROM11_MCSLR5GLPO, 	0xf000},
+	/* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */
+	{"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+	/* Special PA Params for 4335 5G Band, 40 MHz BW */
+	{"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff},
+	/* Special PA Params for 4335 5G Band, 80 MHz BW */
+	{"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA,     0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+	/* Special PA Params for 4335 2G Band, CCK */
+	{"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff},
+	{"", 0xfffff800,	SRFL_PRHEX,              SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff},
+
+	/* power per rate */
+	{"cckbw202gpo",		0xfffff800,	0,		SROM11_CCKBW202GPO, 	0xffff},
+	{"cckbw20ul2gpo",	0xfffff800,	0,		SROM11_CCKBW20UL2GPO, 	0xffff},
+	{"mcsbw202gpo",		0xfffff800,	SRFL_MORE,	SROM11_MCSBW202GPO,   	0xffff},
+	{"",            	0xfffff800, 	0,          	SROM11_MCSBW202GPO_1, 	0xffff},
+	{"mcsbw402gpo",		0xfffff800,	SRFL_MORE,	SROM11_MCSBW402GPO,   	0xffff},
+	{"",            	0xfffff800, 	0,   		SROM11_MCSBW402GPO_1, 	0xffff},
+	{"dot11agofdmhrbw202gpo", 0xfffff800, 	0, 	SROM11_DOT11AGOFDMHRBW202GPO, 	0xffff},
+	{"ofdmlrbw202gpo",	0xfffff800, 	0, 		SROM11_OFDMLRBW202GPO,	0xffff},
+	{"mcsbw205glpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW205GLPO, 	0xffff},
+	{"",           		0xfffff800, 	0,   		SROM11_MCSBW205GLPO_1, 	0xffff},
+	{"mcsbw405glpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW405GLPO, 	0xffff},
+	{"",           		0xfffff800, 	0,     		SROM11_MCSBW405GLPO_1, 	0xffff},
+	{"mcsbw805glpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW805GLPO, 	0xffff},
+	{"",           		0xfffff800, 	0,    		SROM11_MCSBW805GLPO_1, 	0xffff},
+	{"mcsbw205gmpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW205GMPO, 	0xffff},
+	{"",           		0xfffff800, 	0,     		SROM11_MCSBW205GMPO_1, 	0xffff},
+	{"mcsbw405gmpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW405GMPO, 	0xffff},
+	{"",           		0xfffff800, 	0,     		SROM11_MCSBW405GMPO_1, 	0xffff},
+	{"mcsbw805gmpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW805GMPO, 	0xffff},
+	{"",           		0xfffff800, 	0,   		SROM11_MCSBW805GMPO_1, 	0xffff},
+	{"mcsbw205ghpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW205GHPO, 	0xffff},
+	{"",           		0xfffff800, 	0,  		SROM11_MCSBW205GHPO_1, 	0xffff},
+	{"mcsbw405ghpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW405GHPO, 	0xffff},
+	{"",           		0xfffff800, 	0,   		SROM11_MCSBW405GHPO_1, 	0xffff},
+	{"mcsbw805ghpo",	0xfffff800,	SRFL_MORE,	SROM11_MCSBW805GHPO, 	0xffff},
+	{"",           		0xfffff800, 	0,    		SROM11_MCSBW805GHPO_1, 	0xffff},
+	{"mcslr5glpo",		0xfffff800,	0,		SROM11_MCSLR5GLPO, 	0x0fff},
+	{"mcslr5gmpo",		0xfffff800,	0,		SROM11_MCSLR5GMPO, 	0xffff},
+	{"mcslr5ghpo",		0xfffff800,	0,		SROM11_MCSLR5GHPO, 	0xffff},
+	{"sb20in40hrpo", 	0xfffff800,	0,	SROM11_SB20IN40HRPO,		0xffff},
+	{"sb20in80and160hr5glpo", 0xfffff800, 	0, 	SROM11_SB20IN80AND160HR5GLPO, 	0xffff},
+	{"sb40and80hr5glpo",	  0xfffff800, 	0,	SROM11_SB40AND80HR5GLPO,	0xffff},
+	{"sb20in80and160hr5gmpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160HR5GMPO, 	0xffff},
+	{"sb40and80hr5gmpo",	  0xfffff800, 	0,	SROM11_SB40AND80HR5GMPO,	0xffff},
+	{"sb20in80and160hr5ghpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160HR5GHPO, 	0xffff},
+	{"sb40and80hr5ghpo",	  0xfffff800, 	0,	SROM11_SB40AND80HR5GHPO,	0xffff},
+	{"sb20in40lrpo",	  0xfffff800, 	0,	SROM11_SB20IN40LRPO,		0xffff},
+	{"sb20in80and160lr5glpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160LR5GLPO, 	0xffff},
+	{"sb40and80lr5glpo",	  0xfffff800, 	0,	SROM11_SB40AND80LR5GLPO,	0xffff},
+	{"sb20in80and160lr5gmpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160LR5GMPO, 	0xffff},
+	{"sb40and80lr5gmpo",	  0xfffff800, 	0,	SROM11_SB40AND80LR5GMPO,	0xffff},
+	{"sb20in80and160lr5ghpo", 0xfffff800, 	0,	SROM11_SB20IN80AND160LR5GHPO, 	0xffff},
+	{"sb40and80lr5ghpo",	  0xfffff800, 	0,	SROM11_SB40AND80LR5GHPO,	0xffff},
+	{"dot11agduphrpo",	  0xfffff800, 	0,	SROM11_DOT11AGDUPHRPO,		0xffff},
+	{"dot11agduplrpo",	  0xfffff800, 	0,	SROM11_DOT11AGDUPLRPO,		0xffff},
+
+	/* Misc */
+	{"sar2g",       	0xfffff800,	0,	SROM11_SAR,          	0x00ff},
+	{"sar5g",           	0xfffff800,	0,	SROM11_SAR,		0xff00},
+
+	{"noiselvl2ga0",	0xfffff800,	0,		SROM11_NOISELVL_2G,	0x001f},
+	{"noiselvl2ga1",	0xfffff800,	0,		SROM11_NOISELVL_2G,	0x03e0},
+	{"noiselvl2ga2",	0xfffff800,	0,		SROM11_NOISELVL_2G,	0x7c00},
+	{"noiselvl5ga0",	0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GL,	0x001f},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GM,	0x001f},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GH,	0x001f},
+	{"",			0xfffff800,	0,		SROM11_NOISELVL_5GU,	0x001f},
+	{"noiselvl5ga1",	0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GL,	0x03e0},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GM,	0x03e0},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GH,	0x03e0},
+	{"",			0xfffff800,	0,		SROM11_NOISELVL_5GU,	0x03e0},
+	{"noiselvl5ga2",	0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GL,	0x7c00},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GM,	0x7c00},
+	{"",			0xfffff800,	SRFL_ARRAY,	SROM11_NOISELVL_5GH,	0x7c00},
+	{"",			0xfffff800,	0,		SROM11_NOISELVL_5GU,	0x7c00},
+
+	{"rxgainerr2ga0", 	0xfffff800, 	0,    		SROM11_RXGAINERR_2G,    0x003f},
+	{"rxgainerr2ga1", 	0xfffff800, 	0,    		SROM11_RXGAINERR_2G,    0x07c0},
+	{"rxgainerr2ga2", 	0xfffff800, 	0,    		SROM11_RXGAINERR_2G,    0xf800},
+	{"rxgainerr5ga0",      	0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GL,   0x003f},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GM,   0x003f},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GH,   0x003f},
+	{"",      		0xfffff800, 	0,    		SROM11_RXGAINERR_5GU,   0x003f},
+	{"rxgainerr5ga1",      	0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GL,   0x07c0},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GM,   0x07c0},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GH,   0x07c0},
+	{"",      		0xfffff800, 	0,    		SROM11_RXGAINERR_5GU,   0x07c0},
+	{"rxgainerr5ga2",      	0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GL,   0xf800},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GM,   0xf800},
+	{"",      		0xfffff800, 	SRFL_ARRAY,    	SROM11_RXGAINERR_5GH,   0xf800},
+	{"",      		0xfffff800, 	0,    		SROM11_RXGAINERR_5GU,   0xf800},
+	{"rpcal2g",      	0xfffff800, 	0,		SROM11_RPCAL_2G,        0xffff},
+	{"rpcal5gb0",      	0xfffff800, 	0,		SROM11_RPCAL_5GL,       0xffff},
+	{"rpcal5gb1",      	0xfffff800, 	0,		SROM11_RPCAL_5GM,       0xffff},
+	{"rpcal5gb2",      	0xfffff800, 	0,		SROM11_RPCAL_5GH,       0xffff},
+	{"rpcal5gb3",      	0xfffff800, 	0,		SROM11_RPCAL_5GU,       0xffff},
+	{"txidxcap2g",      	0xfffff800, 	0,		SROM11_TXIDXCAP2G,      0x0ff0},
+	{"txidxcap5g",      	0xfffff800, 	0,		SROM11_TXIDXCAP5G,      0x0ff0},
+	{"pdoffsetcckma0",      0xfffff800,     0,              SROM11_PDOFF_2G_CCK,    0x000f},
+	{"pdoffsetcckma1",      0xfffff800,     0,              SROM11_PDOFF_2G_CCK,    0x00f0},
+	{"pdoffsetcckma2",      0xfffff800,     0,              SROM11_PDOFF_2G_CCK,    0x0f00},
+	{NULL,		0,		0,		0,			0}
+};
+
+static const sromvar_t perpath_pci_sromvars[] = {
+	{"maxp2ga",	0x000000f0,	0,		SROM4_2G_ITT_MAXP,	0x00ff},
+	{"itt2ga",	0x000000f0,	0,		SROM4_2G_ITT_MAXP,	0xff00},
+	{"itt5ga",	0x000000f0,	0,		SROM4_5G_ITT_MAXP,	0xff00},
+	{"pa2gw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA,		0xffff},
+	{"pa2gw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA + 1,	0xffff},
+	{"pa2gw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA + 2,	0xffff},
+	{"pa2gw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_2G_PA + 3,	0xffff},
+	{"maxp5ga",	0x000000f0,	0,		SROM4_5G_ITT_MAXP,	0x00ff},
+	{"maxp5gha",	0x000000f0,	0,		SROM4_5GLH_MAXP,	0x00ff},
+	{"maxp5gla",	0x000000f0,	0,		SROM4_5GLH_MAXP,	0xff00},
+	{"pa5gw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA,		0xffff},
+	{"pa5gw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA + 1,	0xffff},
+	{"pa5gw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA + 2,	0xffff},
+	{"pa5gw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_5G_PA + 3,	0xffff},
+	{"pa5glw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA,		0xffff},
+	{"pa5glw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA + 1,	0xffff},
+	{"pa5glw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA + 2,	0xffff},
+	{"pa5glw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GL_PA + 3,	0xffff},
+	{"pa5ghw0a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA,		0xffff},
+	{"pa5ghw1a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA + 1,	0xffff},
+	{"pa5ghw2a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA + 2,	0xffff},
+	{"pa5ghw3a",	0x000000f0,	SRFL_PRHEX,	SROM4_5GH_PA + 3,	0xffff},
+	{"maxp2ga",	0x00000700,	0,		SROM8_2G_ITT_MAXP,	0x00ff},
+	{"itt2ga",	0x00000700,	0,		SROM8_2G_ITT_MAXP,	0xff00},
+	{"itt5ga",	0x00000700,	0,		SROM8_5G_ITT_MAXP,	0xff00},
+	{"pa2gw0a",	0x00000700,	SRFL_PRHEX,	SROM8_2G_PA,		0xffff},
+	{"pa2gw1a",	0x00000700,	SRFL_PRHEX,	SROM8_2G_PA + 1,	0xffff},
+	{"pa2gw2a",	0x00000700,	SRFL_PRHEX,	SROM8_2G_PA + 2,	0xffff},
+	{"maxp5ga",	0x00000700,	0,		SROM8_5G_ITT_MAXP,	0x00ff},
+	{"maxp5gha",	0x00000700,	0,		SROM8_5GLH_MAXP,	0x00ff},
+	{"maxp5gla",	0x00000700,	0,		SROM8_5GLH_MAXP,	0xff00},
+	{"pa5gw0a",	0x00000700,	SRFL_PRHEX,	SROM8_5G_PA,		0xffff},
+	{"pa5gw1a",	0x00000700,	SRFL_PRHEX,	SROM8_5G_PA + 1,	0xffff},
+	{"pa5gw2a",	0x00000700,	SRFL_PRHEX,	SROM8_5G_PA + 2,	0xffff},
+	{"pa5glw0a",	0x00000700,	SRFL_PRHEX,	SROM8_5GL_PA,		0xffff},
+	{"pa5glw1a",	0x00000700,	SRFL_PRHEX,	SROM8_5GL_PA + 1,	0xffff},
+	{"pa5glw2a",	0x00000700,	SRFL_PRHEX,	SROM8_5GL_PA + 2,	0xffff},
+	{"pa5ghw0a",	0x00000700,	SRFL_PRHEX,	SROM8_5GH_PA,		0xffff},
+	{"pa5ghw1a",	0x00000700,	SRFL_PRHEX,	SROM8_5GH_PA + 1,	0xffff},
+	{"pa5ghw2a",	0x00000700,	SRFL_PRHEX,	SROM8_5GH_PA + 2,	0xffff},
+
+	/* sromrev 11 */
+	{"maxp2ga",	0xfffff800,	0,			 SROM11_2G_MAXP,	0x00ff},
+	{"pa2ga",	0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA,		0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX,		 SROM11_2G_PA + 2,	0xffff},
+	{"rxgains5gmelnagaina",	0xfffff800,	0,		 SROM11_RXGAINS1,	0x0007},
+	{"rxgains5gmtrisoa",	0xfffff800,	0,		 SROM11_RXGAINS1,	0x0078},
+	{"rxgains5gmtrelnabypa", 0xfffff800,	0,		 SROM11_RXGAINS1,	0x0080},
+	{"rxgains5ghelnagaina",	0xfffff800,	0,		 SROM11_RXGAINS1,	0x0700},
+	{"rxgains5ghtrisoa",	0xfffff800,	0,		 SROM11_RXGAINS1,	0x7800},
+	{"rxgains5ghtrelnabypa", 0xfffff800,	0,		 SROM11_RXGAINS1,	0x8000},
+	{"rxgains2gelnagaina",	0xfffff800,	0,		 SROM11_RXGAINS,	0x0007},
+	{"rxgains2gtrisoa",	0xfffff800,	0,		 SROM11_RXGAINS,	0x0078},
+	{"rxgains2gtrelnabypa",	0xfffff800,	0,		 SROM11_RXGAINS,	0x0080},
+	{"rxgains5gelnagaina",	0xfffff800,	0,		 SROM11_RXGAINS,	0x0700},
+	{"rxgains5gtrisoa",	0xfffff800,	0,		 SROM11_RXGAINS,	0x7800},
+	{"rxgains5gtrelnabypa",	0xfffff800,	0,		 SROM11_RXGAINS,	0x8000},
+	{"maxp5ga",	0xfffff800,	SRFL_ARRAY,		 SROM11_5GB1B0_MAXP,	0x00ff},
+	{"",		0xfffff800,	SRFL_ARRAY,		 SROM11_5GB1B0_MAXP,	0xff00},
+	{"",		0xfffff800,	SRFL_ARRAY,		 SROM11_5GB3B2_MAXP,	0x00ff},
+	{"",		0xfffff800,	0,			 SROM11_5GB3B2_MAXP,	0xff00},
+	{"pa5ga",	0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1,	0xffff},
+	{"",		0xfffff800,	SRFL_PRHEX,		 SROM11_5GB3_PA + 2,	0xffff},
+
+	{NULL,		0,		0,		0, 			0}
+};
+
+#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N) && defined(PHY_TYPE_LP))
+#define	PHY_TYPE_HT		7	/* HT-Phy value */
+#define	PHY_TYPE_N		4	/* N-Phy value */
+#define	PHY_TYPE_LP		5	/* LP-Phy value */
+#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N) && defined(PHY_TYPE_LP)) */
+#if !defined(PHY_TYPE_AC)
+#define	PHY_TYPE_AC		11	/* AC-Phy value */
+#endif /* !defined(PHY_TYPE_AC) */
+#if !defined(PHY_TYPE_NULL)
+#define	PHY_TYPE_NULL		0xf	/* Invalid Phy value */
+#endif /* !defined(PHY_TYPE_NULL) */
+
+typedef struct {
+	uint16	phy_type;
+	uint16	bandrange;
+	uint16	chain;
+	const char *vars;
+} pavars_t;
+
+static const pavars_t pavars[] = {
+	/* HTPHY */
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G,  0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G,  1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G,  2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1,  "pa5glw0a3 pa5glw1a3 pa5glw2a3"},
+	{PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"},
+	/* NPHY */
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,  0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,  1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+	/* LPPHY */
+	{PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_2G,  0, "pa0b0 pa0b1 pa0b2"},
+	{PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GL, 0, "pa1lob0 pa1lob1 pa1lob2"},
+	{PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GM, 0, "pa1b0 pa1b1 pa1b2"},
+	{PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GH, 0, "pa1hib0 pa1hib1 pa1hib2"},
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  2, "pa2ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5ga2"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 1 */
+static const pavars_t pavars_bwver_1[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2gccka0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga2"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5gbw40a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5gbw80a0"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 2 */
+static const pavars_t pavars_bwver_2[] = {
+	/* ACPHY */
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  0, "pa2ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G,  1, "pa2ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  0, "pa5ga0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  1, "pa5ga1"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  2, "pa5gbw4080a0"},
+	{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND,  3, "pa5gbw4080a1"},
+	{PHY_TYPE_NULL, 0, 0, ""}
+};
+
+typedef struct {
+	uint16	phy_type;
+	uint16	bandrange;
+	const char *vars;
+} povars_t;
+
+static const povars_t povars[] = {
+	/* NPHY */
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,  "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 "
+	"mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 "
+	"mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 "
+	"mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"},
+	{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 "
+	"mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"},
+	{PHY_TYPE_NULL, 0, ""}
+};
+
+typedef struct {
+	uint8	tag;		/* Broadcom subtag name */
+	uint32	revmask;	/* Supported cis_sromrev */
+	uint8	len;		/* Length field of the tuple, note that it includes the
+				 * subtag name (1 byte): 1 + tuple content length
+				 */
+	const char *params;
+} cis_tuple_t;
+
+#define OTP_RAW		(0xff - 1)	/* Reserved tuple number for wrvar Raw input */
+#define OTP_VERS_1	(0xff - 2)	/* CISTPL_VERS_1 */
+#define OTP_MANFID	(0xff - 3)	/* CISTPL_MANFID */
+#define OTP_RAW1	(0xff - 4)	/* Like RAW, but comes first */
+
+static const cis_tuple_t cis_hnbuvars[] = {
+	{OTP_RAW1,		0xffffffff, 0, ""},	/* special case */
+	{OTP_VERS_1,	0xffffffff, 0, "smanf sproductname"},	/* special case (non BRCM tuple) */
+	{OTP_MANFID,	0xffffffff, 4, "2manfid 2prodid"},	/* special case (non BRCM tuple) */
+	/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+	{HNBU_UMANFID,		0xffffffff, 8, "8usbmanfid"},
+	{HNBU_SROMREV,		0xffffffff, 2, "1sromrev"},
+	/* NOTE: subdevid is also written to boardtype.
+	 *       Need to write HNBU_BOARDTYPE to change it if it is different.
+	 */
+	{HNBU_CHIPID,		0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"},
+	{HNBU_BOARDREV,		0xffffffff, 3, "2boardrev"},
+	{HNBU_PAPARMS,		0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"},
+	{HNBU_AA,		0xffffffff, 3, "1aa2g 1aa5g"},
+	{HNBU_AA,		0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */
+	{HNBU_AG,		0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"},
+	{HNBU_BOARDFLAGS,	0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 "
+	"4boardflags4 4boardflags5 "},
+	{HNBU_LEDS,		0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 "
+	"1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"},
+	{HNBU_CCODE,		0xffffffff, 4, "2ccode 1cctl"},
+	{HNBU_CCKPO,		0xffffffff, 3, "2cckpo"},
+	{HNBU_OFDMPO,		0xffffffff, 5, "4ofdmpo"},
+	{HNBU_PAPARMS5G,	0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 "
+	"2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit "
+	"1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"},
+	{HNBU_RDLID,		0xffffffff, 3, "2rdlid"},
+	{HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g "
+	"0rssisav2g 0bxa2g"}, /* special case */
+	{HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g "
+	"0rssisav5g 0bxa5g"}, /* special case */
+	{HNBU_XTALFREQ,		0xffffffff, 5, "4xtalfreq"},
+	{HNBU_TRI2G,		0xffffffff, 2, "1tri2g"},
+	{HNBU_TRI5G,		0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"},
+	{HNBU_RXPO2G,		0xffffffff, 2, "1rxpo2g"},
+	{HNBU_RXPO5G,		0xffffffff, 2, "1rxpo5g"},
+	{HNBU_BOARDNUM,		0xffffffff, 3, "2boardnum"},
+	{HNBU_MACADDR,		0xffffffff, 7, "6macaddr"},	/* special case */
+	{HNBU_RDLSN,		0xffffffff, 3, "2rdlsn"},
+	{HNBU_BOARDTYPE,	0xffffffff, 3, "2boardtype"},
+	{HNBU_LEDDC,		0xffffffff, 3, "2leddc"},
+	{HNBU_RDLRNDIS,		0xffffffff, 2, "1rdlndis"},
+	{HNBU_CHAINSWITCH,	0xffffffff, 5, "1txchain 1rxchain 2antswitch"},
+	{HNBU_REGREV,		0xffffffff, 2, "1regrev"},
+	{HNBU_FEM,		0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g "
+	"0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */
+	{HNBU_PAPARMS_C0,	0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 "
+	"2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 "
+	"2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"},
+	{HNBU_PAPARMS_C1,	0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 "
+	"2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 "
+	"2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"},
+	{HNBU_PO_CCKOFDM,	0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo "
+	"4ofdm5ghpo"},
+	{HNBU_PO_MCS2G,		0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 "
+	"2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"},
+	{HNBU_PO_MCS5GM,	0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 "
+	"2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"},
+	{HNBU_PO_MCS5GLH,	0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 "
+	"2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 "
+	"2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 "
+	"2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"},
+	{HNBU_CCKFILTTYPE,	0xffffffff, 2, "1cckdigfilttype"},
+	{HNBU_PO_CDD,		0xffffffff, 3, "2cddpo"},
+	{HNBU_PO_STBC,		0xffffffff, 3, "2stbcpo"},
+	{HNBU_PO_40M,		0xffffffff, 3, "2bw40po"},
+	{HNBU_PO_40MDUP,	0xffffffff, 3, "2bwduppo"},
+	{HNBU_RDLRWU,		0xffffffff, 2, "1rdlrwu"},
+	{HNBU_WPS,		0xffffffff, 3, "1wpsgpio 1wpsled"},
+	{HNBU_USBFS,		0xffffffff, 2, "1usbfs"},
+	{HNBU_ELNA2G,           0xffffffff, 2, "1elna2g"},
+	{HNBU_ELNA5G,           0xffffffff, 2, "1elna5g"},
+	{HNBU_CUSTOM1,		0xffffffff, 5, "4customvar1"},
+	{OTP_RAW,		0xffffffff, 0, ""},	/* special case */
+	{HNBU_OFDMPO5G,		0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"},
+	{HNBU_USBEPNUM,		0xffffffff, 3, "2usbepnum"},
+	{HNBU_CCKBW202GPO,	0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"},
+	{HNBU_LEGOFDMBW202GPO,	0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"},
+	{HNBU_LEGOFDMBW205GPO,	0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo "
+	"4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"},
+	{HNBU_MCS2GPO,	0xffffffff, 17,	"4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"},
+	{HNBU_MCS5GLPO,	0xffffffff, 13,	"4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"},
+	{HNBU_MCS5GMPO,	0xffffffff, 13,	"4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"},
+	{HNBU_MCS5GHPO,	0xffffffff, 13,	"4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"},
+	{HNBU_MCS32PO,	0xffffffff, 3,	"2mcs32po"},
+	{HNBU_LEG40DUPPO,	0xffffffff, 3,	"2legofdm40duppo"},
+	{HNBU_TEMPTHRESH,	0xffffffff, 7,	"1tempthresh 0temps_period 0temps_hysteresis "
+	"1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option "
+	"1phycal_tempdelta"}, /* special case */
+	{HNBU_MUXENAB,		0xffffffff, 2,	"1muxenab"},
+	{HNBU_FEM_CFG,		0xfffff800, 5,	"0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
+	"0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
+	"0tssiposslope5g"}, /* special case */
+	{HNBU_ACPA_C0,		0xfffff800, 39,	"2subband5gver 2maxp2ga0 2*3pa2ga0 "
+	"1*4maxp5ga0 2*12pa5ga0"},
+	{HNBU_ACPA_C1,		0xfffff800, 37,	"2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+	{HNBU_ACPA_C2,		0xfffff800, 37,	"2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+	{HNBU_MEAS_PWR,		0xfffff800, 5,	"1measpower 1measpower1 1measpower2 2rawtempsense"},
+	{HNBU_PDOFF,		0xfffff800, 13,	"2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
+	"2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
+	{HNBU_ACPPR_2GPO,	0xfffff800, 13,	"2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo "
+	"2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo "
+	"2sb20in80ofdmlrbw202gpo"},
+	{HNBU_ACPPR_5GPO,	0xfffff800, 59,	"4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo "
+	"4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo "
+	"4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po "
+	"2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"},
+	{HNBU_MCS5Gx1PO,	0xfffff800, 9,	"4mcsbw205gx1po 4mcsbw405gx1po"},
+	{HNBU_ACPPR_SBPO,	0xfffff800, 49,	"2sb20in40hrpo 2sb20in80and160hr5glpo "
+	"2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo "
+	"2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo "
+	"2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo "
+	"4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo "
+	"2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po "
+	},
+	{HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo "
+	"2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo "
+	"2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo "
+	"2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo "
+	"2sb20in80p80lr5gpo 2dot11agduppo"},
+	{HNBU_NOISELVL,		0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 "
+	"1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"},
+	{HNBU_RXGAIN_ERR,	0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 "
+	"1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"},
+	{HNBU_AGBGA,		0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"},
+	{HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"},
+	{HNBU_UUID, 		0xffffffff, 17,	"16uuid"},
+	{HNBU_WOWLGPIO,		0xffffffff, 2,  "1wowl_gpio"},
+	{HNBU_ACRXGAINS_C0,	0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 "
+	"0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 "
+	"0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 "
+	"0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"},	/* special case */
+	{HNBU_ACRXGAINS_C1,	0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 "
+	"0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 "
+	"0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 "
+	"0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"},	/* special case */
+	{HNBU_ACRXGAINS_C2,	0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 "
+	"0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 "
+	"0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 "
+	"0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"},	/* special case */
+	{HNBU_TXDUTY, 		0xfffff800, 9,	"2tx_duty_cycle_ofdm_40_5g "
+	"2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"},
+	{HNBU_PDOFF_2G,		0xfffff800, 3,	"0pdoffset2g40ma0 0pdoffset2g40ma1 "
+	"0pdoffset2g40ma2 0pdoffset2g40mvalid"},
+	{HNBU_ACPA_CCK,		0xfffff800, 7,	"2*3pa2gccka0"},
+	{HNBU_ACPA_40,		0xfffff800, 25,	"2*12pa5gbw40a0"},
+	{HNBU_ACPA_80,		0xfffff800, 25,	"2*12pa5gbw80a0"},
+	{HNBU_ACPA_4080,	0xfffff800, 49,	"2*12pa5gbw4080a0 2*12pa5gbw4080a1"},
+	{HNBU_SUBBAND5GVER,	0xfffff800, 3,	"2subband5gver"},
+	{HNBU_PAPARAMBWVER,	0xfffff800, 2,	"1paparambwver"},
+	{0xFF,			0xffffffff, 0, ""}
+};
+
+#endif /* _bcmsrom_tbl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 0000000..b74fc9a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,1127 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmutils.h 469595 2014-04-10 21:19:06Z $
+ */
+
+#ifndef	_bcmutils_h_
+#define	_bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src)            strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count)    strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src)            strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef PKTQ_LOG
+#include <wlioctl.h>
+#endif
+
+/* ctype replacement */
+#define _BCM_U	0x01	/* upper */
+#define _BCM_L	0x02	/* lower */
+#define _BCM_D	0x04	/* digit */
+#define _BCM_C	0x08	/* cntrl */
+#define _BCM_P	0x10	/* punct */
+#define _BCM_S	0x20	/* white space (space/lf/tab) */
+#define _BCM_X	0x40	/* hex digit */
+#define _BCM_SP	0x80	/* hard space (0x20) */
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x)	(bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c)	((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c)	((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c)	((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c)	((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c)	((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c)	((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c)	((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c)	(bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c)	(bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx)
+
+/* Buffer structure for collecting string-formatted data
+* using bcm_bprintf() API.
+* Use bcm_binit() to initialize before use
+*/
+
+struct bcmstrbuf {
+	char *buf;	/* pointer to current position in origbuf */
+	unsigned int size;	/* current (residual) size in bytes */
+	char *origbuf;	/* unmodified pointer to orignal buffer */
+	unsigned int origsize;	/* unmodified orignal buffer size in bytes */
+};
+
+/* ** driver-only section ** */
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <hnd_pktq.h>
+#include <hnd_pktpool.h>
+
+#define GPIO_PIN_NOTDEFINED 	0x20	/* Pin not defined */
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#ifndef SPINWAIT_POLL_PERIOD
+#define SPINWAIT_POLL_PERIOD	10
+#endif
+
+#define SPINWAIT(exp, us) { \
+	uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \
+	while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \
+		OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+		countdown -= SPINWAIT_POLL_PERIOD; \
+	} \
+}
+
+/* forward definition of ether_addr structure used by some function prototypes */
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+#define BCM_MAC_RXCPL_IDX_BITS			12
+#define BCM_MAX_RXCPL_IDX_INVALID		0
+#define BCM_MAC_RXCPL_IFIDX_BITS		3
+#define BCM_MAC_RXCPL_DOT11_BITS		1
+#define BCM_MAX_RXCPL_IFIDX			((1 << BCM_MAC_RXCPL_IFIDX_BITS) - 1)
+#define BCM_MAC_RXCPL_FLAG_BITS			4
+#define BCM_RXCPL_FLAGS_IN_TRANSIT		0x1
+#define BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST	0x2
+#define BCM_RXCPL_FLAGS_RXCPLVALID		0x4
+#define BCM_RXCPL_FLAGS_RSVD			0x8
+
+#define BCM_RXCPL_SET_IN_TRANSIT(a)	((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_IN_TRANSIT)
+#define BCM_RXCPL_CLR_IN_TRANSIT(a)	((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_IN_TRANSIT)
+#define BCM_RXCPL_IN_TRANSIT(a)		((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_IN_TRANSIT)
+
+#define BCM_RXCPL_SET_FRST_IN_FLUSH(a)	((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+#define BCM_RXCPL_CLR_FRST_IN_FLUSH(a)	((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+#define BCM_RXCPL_FRST_IN_FLUSH(a)	((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+
+#define BCM_RXCPL_SET_VALID_INFO(a)	((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_RXCPLVALID)
+#define BCM_RXCPL_CLR_VALID_INFO(a)	((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID)
+#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE)
+
+
+struct reorder_rxcpl_id_list {
+	uint16 head;
+	uint16 tail;
+	uint32 cnt;
+};
+
+typedef struct rxcpl_id {
+	uint32		idx : BCM_MAC_RXCPL_IDX_BITS;
+	uint32		next_idx : BCM_MAC_RXCPL_IDX_BITS;
+	uint32		ifidx : BCM_MAC_RXCPL_IFIDX_BITS;
+	uint32		dot11 : BCM_MAC_RXCPL_DOT11_BITS;
+	uint32		flags : BCM_MAC_RXCPL_FLAG_BITS;
+} rxcpl_idx_id_t;
+
+typedef struct rxcpl_data_len {
+	uint32		metadata_len_w : 6;
+	uint32		dataoffset: 10;
+	uint32		datalen : 16;
+} rxcpl_data_len_t;
+
+typedef struct rxcpl_info {
+	rxcpl_idx_id_t		rxcpl_id;
+	uint32		host_pktref;
+	union {
+		rxcpl_data_len_t	rxcpl_len;
+		struct rxcpl_info	*free_next;
+	};
+} rxcpl_info_t;
+
+/* rx completion list */
+typedef struct bcm_rxcplid_list {
+	uint32			max;
+	uint32			avail;
+	rxcpl_info_t		*rxcpl_ptr;
+	rxcpl_info_t		*free_list;
+} bcm_rxcplid_list_t;
+
+extern bool bcm_alloc_rxcplid_list(osl_t *osh, uint32 max);
+extern rxcpl_info_t * bcm_alloc_rxcplinfo(void);
+extern void bcm_free_rxcplinfo(rxcpl_info_t *ptr);
+extern void bcm_chain_rxcplid(uint16 first,  uint16 next);
+extern rxcpl_info_t *bcm_id2rxcplinfo(uint16 id);
+extern uint16 bcm_rxcplinfo2id(rxcpl_info_t *ptr);
+extern rxcpl_info_t *bcm_rxcpllist_end(rxcpl_info_t *ptr, uint32 *count);
+
+/* externs */
+/* packet */
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
+extern uint8 *pktdataoffset(osl_t *osh, void *p,  uint offset);
+extern void *pktoffset(osl_t *osh, void *p,  uint offset);
+
+/* Get priority from a packet and pass it back in scb (or equiv) */
+#define	PKTPRIO_VDSCP	0x100		/* DSCP prio found after VLAN tag */
+#define	PKTPRIO_VLAN	0x200		/* VLAN prio found */
+#define	PKTPRIO_UPD	0x400		/* DSCP used to update VLAN prio */
+#define	PKTPRIO_DSCP	0x800		/* DSCP prio found */
+
+/* DSCP type definitions (RFC4594) */
+/* AF1x: High-Throughput Data (RFC2597) */
+#define DSCP_AF11	0x0A
+#define DSCP_AF12	0x0C
+#define DSCP_AF13	0x0E
+/* AF2x: Low-Latency Data (RFC2597) */
+#define DSCP_AF21	0x12
+#define DSCP_AF22	0x14
+#define DSCP_AF23	0x16
+/* AF3x: Multimedia Streaming (RFC2597) */
+#define DSCP_AF31	0x1A
+#define DSCP_AF32	0x1C
+#define DSCP_AF33	0x1E
+/* EF: Telephony (RFC3246) */
+#define DSCP_EF		0x2E
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp);
+
+/* string */
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+/* ethernet address */
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+/* ip address */
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+extern char *bcm_ipv6_ntoa(void *ipv6, char *buf);
+extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip);
+
+/* delay */
+extern void bcm_mdelay(uint ms);
+/* variable access */
+#define NVRAM_RECLAIM_CHECK(name)
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define	bcmlog(fmt, a1, a2)
+#define	bcmdumplog(buf, size)	*buf = '\0'
+#define	bcmdumplogent(buf, idx)	-1
+
+#define TSF_TICKS_PER_MS	1000
+#define TS_ENTER		0xdeadbeef	/* Timestamp profiling enter */
+#define TS_EXIT			0xbeefcafe	/* Timestamp profiling exit */
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(buf, size)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry.  Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
+
+/* iovar structure */
+typedef struct bcm_iovar {
+	const char *name;	/* name for lookup and display */
+	uint16 varid;		/* id for switch */
+	uint16 flags;		/* driver-specific flag bits */
+	uint16 type;		/* base type of argument */
+	uint16 minlen;		/* min length for buffer vars */
+} bcm_iovar_t;
+
+/* varid definitions are per-driver, may use these get/set bits */
+
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
+
+/* Varid to actionid mapping */
+#define IOV_GVAL(id)		((id) * 2)
+#define IOV_SVAL(id)		((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid)	((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid)	(actionid >> 1)
+
+/* flags are per-driver based on driver attributes */
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif
+#endif	/* BCMDRIVER */
+
+/* Base type definitions */
+#define IOVT_VOID	0	/* no value (implictly set only) */
+#define IOVT_BOOL	1	/* any value ok (zero/nonzero) */
+#define IOVT_INT8	2	/* integer values are range-checked */
+#define IOVT_UINT8	3	/* unsigned int 8 bits */
+#define IOVT_INT16	4	/* int 16 bits */
+#define IOVT_UINT16	5	/* unsigned int 16 bits */
+#define IOVT_INT32	6	/* int 32 bits */
+#define IOVT_UINT32	7	/* unsigned int 32 bits */
+#define IOVT_BUFFER	8	/* buffer is size-checked as per minlen */
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+/* Initializer for IOV type strings */
+#define BCM_IOV_TYPE_INIT { \
+	"void", \
+	"bool", \
+	"int8", \
+	"uint8", \
+	"int16", \
+	"uint16", \
+	"int32", \
+	"uint32", \
+	"buffer", \
+	"" }
+
+#define BCM_IOVT_IS_INT(type) (\
+	(type == IOVT_BOOL) || \
+	(type == IOVT_INT8) || \
+	(type == IOVT_UINT8) || \
+	(type == IOVT_INT16) || \
+	(type == IOVT_UINT16) || \
+	(type == IOVT_INT32) || \
+	(type == IOVT_UINT32))
+
+/* ** driver/apps-shared section ** */
+
+#define BCME_STRLEN 		64	/* Max string length for BCM errors */
+#define VALID_BCMERROR(e)  ((e <= 0) && (e >= BCME_LAST))
+
+
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK				0	/* Success */
+#define BCME_ERROR			-1	/* Error generic */
+#define BCME_BADARG			-2	/* Bad Argument */
+#define BCME_BADOPTION			-3	/* Bad option */
+#define BCME_NOTUP			-4	/* Not up */
+#define BCME_NOTDOWN			-5	/* Not down */
+#define BCME_NOTAP			-6	/* Not AP */
+#define BCME_NOTSTA			-7	/* Not STA  */
+#define BCME_BADKEYIDX			-8	/* BAD Key Index */
+#define BCME_RADIOOFF 			-9	/* Radio Off */
+#define BCME_NOTBANDLOCKED		-10	/* Not  band locked */
+#define BCME_NOCLK			-11	/* No Clock */
+#define BCME_BADRATESET			-12	/* BAD Rate valueset */
+#define BCME_BADBAND			-13	/* BAD Band */
+#define BCME_BUFTOOSHORT		-14	/* Buffer too short */
+#define BCME_BUFTOOLONG			-15	/* Buffer too long */
+#define BCME_BUSY			-16	/* Busy */
+#define BCME_NOTASSOCIATED		-17	/* Not Associated */
+#define BCME_BADSSIDLEN			-18	/* Bad SSID len */
+#define BCME_OUTOFRANGECHAN		-19	/* Out of Range Channel */
+#define BCME_BADCHAN			-20	/* Bad Channel */
+#define BCME_BADADDR			-21	/* Bad Address */
+#define BCME_NORESOURCE			-22	/* Not Enough Resources */
+#define BCME_UNSUPPORTED		-23	/* Unsupported */
+#define BCME_BADLEN			-24	/* Bad length */
+#define BCME_NOTREADY			-25	/* Not Ready */
+#define BCME_EPERM			-26	/* Not Permitted */
+#define BCME_NOMEM			-27	/* No Memory */
+#define BCME_ASSOCIATED			-28	/* Associated */
+#define BCME_RANGE			-29	/* Not In Range */
+#define BCME_NOTFOUND			-30	/* Not Found */
+#define BCME_WME_NOT_ENABLED		-31	/* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND		-32	/* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED		-33	/* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION	-34	/* Not WME Association */
+#define BCME_SDIO_ERROR			-35	/* SDIO Bus Error */
+#define BCME_DONGLE_DOWN		-36	/* Dongle Not Accessible */
+#define BCME_VERSION			-37 	/* Incorrect version */
+#define BCME_TXFAIL			-38 	/* TX failure */
+#define BCME_RXFAIL			-39	/* RX failure */
+#define BCME_NODEVICE			-40 	/* Device not present */
+#define BCME_NMODE_DISABLED		-41 	/* NMODE disabled */
+#define BCME_NONRESIDENT		-42 /* access to nonresident overlay */
+#define BCME_SCANREJECT			-43 	/* reject scan request */
+#define BCME_USAGE_ERROR                -44     /* WLCMD usage error */
+#define BCME_IOCTL_ERROR                -45     /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR            -46     /* RWL serial port error */
+#define BCME_DISABLED			-47     /* Disabled in this build */
+#define BCME_DECERR				-48		/* Decrypt error */
+#define BCME_ENCERR				-49		/* Encrypt error */
+#define BCME_MICERR				-50		/* Integrity/MIC error */
+#define BCME_REPLAY				-51		/* Replay */
+#define BCME_IE_NOTFOUND		-52		/* IE not found */
+#define BCME_LAST			BCME_IE_NOTFOUND
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* These are collection of BCME Error strings */
+#define BCMERRSTRINGTABLE {		\
+	"OK",				\
+	"Undefined error",		\
+	"Bad Argument",			\
+	"Bad Option",			\
+	"Not up",			\
+	"Not down",			\
+	"Not AP",			\
+	"Not STA",			\
+	"Bad Key Index",		\
+	"Radio Off",			\
+	"Not band locked",		\
+	"No clock",			\
+	"Bad Rate valueset",		\
+	"Bad Band",			\
+	"Buffer too short",		\
+	"Buffer too long",		\
+	"Busy",				\
+	"Not Associated",		\
+	"Bad SSID len",			\
+	"Out of Range Channel",		\
+	"Bad Channel",			\
+	"Bad Address",			\
+	"Not Enough Resources",		\
+	"Unsupported",			\
+	"Bad length",			\
+	"Not Ready",			\
+	"Not Permitted",		\
+	"No Memory",			\
+	"Associated",			\
+	"Not In Range",			\
+	"Not Found",			\
+	"WME Not Enabled",		\
+	"TSPEC Not Found",		\
+	"ACM Not Supported",		\
+	"Not WME Association",		\
+	"SDIO Bus Error",		\
+	"Dongle Not Accessible",	\
+	"Incorrect version",		\
+	"TX Failure",			\
+	"RX Failure",			\
+	"Device Not Present",		\
+	"NMODE Disabled",		\
+	"Nonresident overlay access", \
+	"Scan Rejected",		\
+	"WLCMD usage error",		\
+	"WLCMD ioctl error",		\
+	"RWL serial port error", 	\
+	"Disabled",			\
+	"Decrypt error", \
+	"Encrypt error", \
+	"MIC error", \
+	"Replay", \
+	"IE not found", \
+}
+
+#ifndef ABS
+#define	ABS(a)			(((a) < 0) ? -(a) : (a))
+#endif /* ABS */
+
+#ifndef MIN
+#define	MIN(a, b)		(((a) < (b)) ? (a) : (b))
+#endif /* MIN */
+
+#ifndef MAX
+#define	MAX(a, b)		(((a) > (b)) ? (a) : (b))
+#endif /* MAX */
+
+/* limit to [min, max] */
+#ifndef LIMIT_TO_RANGE
+#define LIMIT_TO_RANGE(x, min, max) \
+	((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_RANGE */
+
+/* limit to  max */
+#ifndef LIMIT_TO_MAX
+#define LIMIT_TO_MAX(x, max) \
+	(((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_MAX */
+
+/* limit to min */
+#ifndef LIMIT_TO_MIN
+#define LIMIT_TO_MIN(x, min) \
+	(((x) < (min) ? (min) : (x)))
+#endif /* LIMIT_TO_MIN */
+
+#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \
+	(0xffffffff - (prev) + (curr) + 1))
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y)		((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDDN(p, align)	((p) & ~((align) - 1))
+#define	ISALIGNED(a, x)		(((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define	ISPOWEROF2(x)		((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask)	!((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#ifdef __ARMCC_VERSION
+/*
+ * The ARM RVCT compiler complains when using OFFSETOF where a constant
+ * expression is expected, such as an initializer for a static object.
+ * offsetof from the runtime library doesn't have that problem.
+ */
+#include <stddef.h>
+#define	OFFSETOF(type, member)	offsetof(type, member)
+#else
+#  if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8))
+/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */
+#    define	OFFSETOF(type, member)	__builtin_offsetof(type, member)
+#  else
+#    define	OFFSETOF(type, member)	((uint)(uintptr)&((type *)0)->member)
+#  endif /* GCC 4.8 or newer */
+#endif /* __ARMCC_VERSION */
+#endif /* OFFSETOF */
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a)		(sizeof(a) / sizeof(a[0]))
+#endif
+
+#ifndef ARRAYLAST /* returns pointer to last array element */
+#define ARRAYLAST(a)		(&a[ARRAYSIZE(a)-1])
+#endif
+
+/* Reference a function; used to prevent a static function from being optimized out */
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f)	(_bcmutils_dummy_fn = (void *)(f))
+
+/* bit map related macros */
+#ifndef setbit
+#ifndef NBBY		/* the BSD family defines NBBY */
+#define	NBBY	8	/* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+extern void setbit(void *array, uint bit);
+extern void clrbit(void *array, uint bit);
+extern bool isset(const void *array, uint bit);
+extern bool isclr(const void *array, uint bit);
+#else
+#define	setbit(a, i)	(((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define	clrbit(a, i)	(((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define	isset(a, i)	(((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define	isclr(a, i)	((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+#endif /* setbit */
+extern void set_bitrange(void *array, uint start, uint end, uint maxbit);
+
+#define	isbitset(a, i)	(((a) & (1 << (i))) != 0)
+
+#define	NBITS(type)	(sizeof(type) * 8)
+#define NBITVAL(nbits)	(1 << (nbits))
+#define MAXBITVAL(nbits)	((1 << (nbits)) - 1)
+#define	NBITMASK(nbits)	MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte)	MAXBITVAL((nbyte) * 8)
+
+extern void bcm_bitprint32(const uint32 u32);
+
+/*
+ * ----------------------------------------------------------------------------
+ * Multiword map of 2bits, nibbles
+ * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val)
+ * getbit2 getbit4 (void *ptr, uint32 ix)
+ * ----------------------------------------------------------------------------
+ */
+
+#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK)                     \
+static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val)     \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */              \
+	uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */      \
+	uint32 mask = (MSK << pos);                                     \
+	uint32 tmp = *a & ~mask;                                        \
+	*a = tmp | (val << pos);                                        \
+}                                                                   \
+static INLINE uint32 getbit##NB(void *ptr, uint32 ix)               \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH);                                 \
+	uint32 pos = (ix & OFF) << LSH;                                 \
+	return ((*a >> pos) & MSK);                                     \
+}
+
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
+#define MODADD(x, y, bound) \
+    MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+    MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+/* module add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+/* crc defines */
+#define CRC8_INIT_VALUE  0xff		/* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE  0x9f		/* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffff		/* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8		/* Good final CRC16 checksum value */
+#define CRC32_INIT_VALUE 0xffffffff	/* Initial CRC32 checksum value */
+#define CRC32_GOOD_VALUE 0xdebb20e3	/* Good final CRC32 checksum value */
+
+/* use for direct output of MAC address in printf etc */
+#define MACF				"%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea)	((struct ether_addr *) (ea))->octet[0], \
+							((struct ether_addr *) (ea))->octet[1], \
+							((struct ether_addr *) (ea))->octet[2], \
+							((struct ether_addr *) (ea))->octet[3], \
+							((struct ether_addr *) (ea))->octet[4], \
+							((struct ether_addr *) (ea))->octet[5]
+
+#define ETHER_TO_MACF(ea) 	(ea).octet[0], \
+							(ea).octet[1], \
+							(ea).octet[2], \
+							(ea).octet[3], \
+							(ea).octet[4], \
+							(ea).octet[5]
+#if !defined(SIMPLE_MAC_PRINT)
+#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5]
+#else
+#define MACDBG				"%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5]
+#endif /* SIMPLE_MAC_PRINT */
+
+/* bcm_format_flags() bit description structure */
+typedef struct bcm_bit_desc {
+	uint32	bit;
+	const char* name;
+} bcm_bit_desc_t;
+
+/* bcm_format_field */
+typedef struct bcm_bit_desc_ex {
+	uint32 mask;
+	const bcm_bit_desc_t *bitfield;
+} bcm_bit_desc_ex_t;
+
+/* buffer length for ethernet address from bcm_ether_ntoa() */
+#define ETHER_ADDR_STR_LEN	18	/* 18-bytes of Ethernet address buffer length */
+
+/* crypto utility function */
+/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+	if (
+#ifdef __i386__
+	    1 ||
+#endif
+	    (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+		/* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
+		/* x86 supports unaligned.  This version runs 6x-9x faster on x86. */
+		((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+		((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+		((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+		((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+	} else {
+		/* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
+		int k;
+		for (k = 0; k < 16; k++)
+			dst[k] = src1[k] ^ src2[k];
+	}
+}
+
+/* externs */
+/* crc */
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+/* format/print */
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC)
+/* print out the value a field has: fields may have 1-32 bits and may hold any value */
+extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len);
+/* print out which bits in flags are set */
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+/* IE parsing */
+
+/* tag_ID/length/value_buffer tuple */
+typedef struct bcm_tlv {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} bcm_tlv_t;
+
+/* bcm tlv w/ 16 bit id/len */
+typedef struct bcm_xtlv {
+	uint16	id;
+	uint16	len;
+	uint8	data[1];
+} bcm_xtlv_t;
+
+#define BCM_TLV_MAX_DATA_SIZE (255)
+#define BCM_XTLV_MAX_DATA_SIZE (65535)
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data))
+#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len))
+#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id))
+#define BCM_XTLV_SIZE(elt) (BCM_XTLV_HDR_SIZE + BCM_XTLV_LEN(elt))
+
+/* Check that bcm_tlv_t fits into the given buflen */
+#define bcm_valid_tlv(elt, buflen) (\
+	 ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
+	 ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+#define bcm_valid_xtlv(elt, buflen) (\
+	 ((int)(buflen) >= (int)BCM_XTLV_HDR_SIZE) && \
+	 ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt)))
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen);
+
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type,
+	int type_len);
+
+extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
+extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
+	int dst_maxlen);
+
+extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
+
+/* xtlv */
+extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen);
+
+/* bcmerror */
+extern const char *bcmerrorstr(int bcmerror);
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+typedef uint32 mbool;
+#define mboolset(mb, bit)		((mb) |= (bit))		/* set one bool */
+#define mboolclr(mb, bit)		((mb) &= ~(bit))	/* clear one bool */
+#define mboolisset(mb, bit)		(((mb) & (bit)) != 0)	/* TRUE if one bool is set */
+#define	mboolmaskset(mb, mask, val)	((mb) = (((mb) & ~(mask)) | (val)))
+
+/* generic datastruct to help dump routines */
+struct fielddesc {
+	const char *nameandfmt;
+	uint32 	offset;
+	uint32 	len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, int len);
+
+typedef  uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+                          char *buf, uint32 bufsize);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+/* power conversion */
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+/* calculate a * b + c */
+extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
+/* calculate a / b */
+extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+
+/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
+
+/* Table driven count set bits. */
+static const uint8 /* Table only for use by bcm_cntsetbits */
+_CSBTBL[256] =
+{
+#	define B2(n)    n,     n + 1,     n + 1,     n + 2
+#	define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2)
+#	define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2)
+	B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2)
+};
+
+static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
+bcm_cntsetbits(const uint32 u32)
+{
+	/* function local scope declaration of const _CSBTBL[] */
+	const uint8 * p = (const uint8 *)&u32;
+	return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
+}
+
+
+static INLINE int /* C equivalent count of leading 0's in a u32 */
+C_bcm_count_leading_zeros(uint32 u32)
+{
+	int shifts = 0;
+	while (u32) {
+		shifts++; u32 >>= 1;
+	}
+	return (32U - shifts);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Assembly instructions: Count Leading Zeros
+ * "clz"	: MIPS, ARM
+ * "cntlzw"	: PowerPC
+ * "BSF"	: x86
+ * "lzcnt"	: AMD, SPARC
+ */
+
+#if defined(__arm__)
+
+#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7R__ */
+
+#endif /* __arm__ */
+
+static INLINE int
+bcm_count_leading_zeros(uint32 u32)
+{
+#if defined(__USE_ASM_CLZ__)
+	int zeros;
+	__asm__ volatile("clz    %0, %1 \n" : "=r" (zeros) : "r"  (u32));
+	return zeros;
+#else	/* C equivalent */
+	return C_bcm_count_leading_zeros(u32);
+#endif  /* C equivalent */
+}
+
+/* INTERFACE: Multiword bitmap based small id allocator. */
+struct bcm_mwbmap;	/* forward declaration for use as an opaque mwbmap handle */
+
+#define BCM_MWBMAP_INVALID_HDL	((struct bcm_mwbmap *)NULL)
+#define BCM_MWBMAP_INVALID_IDX	((uint32)(~0U))
+
+/* Incarnate a multiword bitmap based small index allocator */
+extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max);
+
+/* Free up the multiword bitmap index allocator */
+extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl);
+
+/* Allocate a unique small index using a multiword bitmap index allocator */
+extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Force an index at a specified position to be in use */
+extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Determine whether an index is inuse or free */
+extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Debug dump a multiword bitmap allocator */
+extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl);
+
+extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
+/* End - Multiword bitmap based small Id allocator. */
+
+
+/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
+
+#define ID16_INVALID                ((uint16)(~0))
+
+/*
+ * Construct a 16bit id allocator, managing 16bit ids in the range:
+ *    [start_val16 .. start_val16+total_ids)
+ * Note: start_val16 is inclusive.
+ * Returns an opaque handle to the 16bit id allocator.
+ */
+extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16);
+extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl);
+extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16);
+
+/* Allocate a unique 16bit id */
+extern uint16 id16_map_alloc(void * id16_map_hndl);
+
+/* Free a 16bit id value into the id16 allocator */
+extern void id16_map_free(void * id16_map_hndl, uint16 val16);
+
+/* Get the number of failures encountered during id allocation. */
+extern uint32 id16_map_failures(void * id16_map_hndl);
+
+/* Audit the 16bit id allocator state. */
+extern bool id16_map_audit(void * id16_map_hndl);
+/* End - Simple 16bit Id Allocator. */
+
+#endif /* BCMDRIVER */
+
+extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum);
+
+#ifndef _dll_t_
+#define _dll_t_
+/*
+ * -----------------------------------------------------------------------------
+ *                      Double Linked List Macros
+ * -----------------------------------------------------------------------------
+ *
+ * All dll operations must be performed on a pre-initialized node.
+ * Inserting an uninitialized node into a list effectively initialized it.
+ *
+ * When a node is deleted from a list, you may initialize it to avoid corruption
+ * incurred by double deletion. You may skip initialization if the node is
+ * immediately inserted into another list.
+ *
+ * By placing a dll_t element at the start of a struct, you may cast a dll_t *
+ * to the struct or vice versa.
+ *
+ * Example of declaring an initializing someList and inserting nodeA, nodeB
+ *
+ *     typedef struct item {
+ *         dll_t node;
+ *         int someData;
+ *     } Item_t;
+ *     Item_t nodeA, nodeB, nodeC;
+ *     nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333;
+ *
+ *     dll_t someList;
+ *     dll_init(&someList);
+ *
+ *     dll_append(&someList, (dll_t *) &nodeA);
+ *     dll_prepend(&someList, &nodeB.node);
+ *     dll_insert((dll_t *)&nodeC, &nodeA.node);
+ *
+ *     dll_delete((dll_t *) &nodeB);
+ *
+ * Example of a for loop to walk someList of node_p
+ *
+ *   extern void mydisplay(Item_t * item_p);
+ *
+ *   dll_t * item_p, * next_p;
+ *   for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p);
+ *        item_p = next_p)
+ *   {
+ *       next_p = dll_next_p(item_p);
+ *       ... use item_p at will, including removing it from list ...
+ *       mydisplay((PItem_t)item_p);
+ *   }
+ *
+ * -----------------------------------------------------------------------------
+ */
+typedef struct dll {
+	struct dll * next_p;
+	struct dll * prev_p;
+} dll_t;
+
+static INLINE void
+dll_init(dll_t *node_p)
+{
+	node_p->next_p = node_p;
+	node_p->prev_p = node_p;
+}
+/* dll macros returing a pointer to dll_t */
+
+static INLINE dll_t *
+dll_head_p(dll_t *list_p)
+{
+	return list_p->next_p;
+}
+
+
+static INLINE dll_t *
+dll_tail_p(dll_t *list_p)
+{
+	return (list_p)->prev_p;
+}
+
+
+static INLINE dll_t *
+dll_next_p(dll_t *node_p)
+{
+	return (node_p)->next_p;
+}
+
+
+static INLINE dll_t *
+dll_prev_p(dll_t *node_p)
+{
+	return (node_p)->next_p;
+}
+
+
+static INLINE bool
+dll_empty(dll_t *list_p)
+{
+	return ((list_p)->next_p == (list_p));
+}
+
+
+static INLINE bool
+dll_end(dll_t *list_p, dll_t * node_p)
+{
+	return (list_p == node_p);
+}
+
+
+/* inserts the node new_p "after" the node at_p */
+static INLINE void
+dll_insert(dll_t *new_p, dll_t * at_p)
+{
+	new_p->next_p = at_p->next_p;
+	new_p->prev_p = at_p;
+	at_p->next_p = new_p;
+	(new_p->next_p)->prev_p = new_p;
+}
+
+static INLINE void
+dll_append(dll_t *list_p, dll_t *node_p)
+{
+	dll_insert(node_p, dll_tail_p(list_p));
+}
+
+static INLINE void
+dll_prepend(dll_t *list_p, dll_t *node_p)
+{
+	dll_insert(node_p, list_p);
+}
+
+
+/* deletes a node from any list that it "may" be in, if at all. */
+static INLINE void
+dll_delete(dll_t *node_p)
+{
+	node_p->prev_p->next_p = node_p->next_p;
+	node_p->next_p->prev_p = node_p->prev_p;
+}
+#endif  /* ! defined(_dll_t_) */
+
+/* Elements managed in a double linked list */
+
+typedef struct dll_pool {
+	dll_t       free_list;
+	uint16      free_count;
+	uint16      elems_max;
+	uint16      elem_size;
+	dll_t       elements[1];
+} dll_pool_t;
+
+dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size);
+void * dll_pool_alloc(dll_pool_t * dll_pool_p);
+void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p);
+void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p);
+typedef void (* dll_elem_dump)(void * elem_p);
+void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size);
+
+#ifdef __cplusplus
+	}
+#endif
+
+/* #define DEBUG_COUNTER */
+#ifdef DEBUG_COUNTER
+#define CNTR_TBL_MAX 10
+typedef struct _counter_tbl_t {
+	char name[16];				/* name of this counter table */
+	uint32 prev_log_print;		/* Internal use. Timestamp of the previous log print */
+	uint log_print_interval;	/* Desired interval to print logs in ms */
+	uint needed_cnt;			/* How many counters need to be used */
+	uint32 cnt[CNTR_TBL_MAX];		/* Counting entries to increase at desired places */
+	bool enabled;				/* Whether to enable printing log */
+} counter_tbl_t;
+
+
+void counter_printlog(counter_tbl_t *ctr_tbl);
+#endif /* DEBUG_COUNTER */
+
+#endif	/* _bcmutils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
new file mode 100644
index 0000000..95712c9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h
@@ -0,0 +1,56 @@
+/*
+ * Definitions for nl80211 testmode access to host driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: brcm_nl80211.h 438755 2013-11-22 23:20:40Z $
+ *
+ */
+
+#ifndef _brcm_nl80211_h_
+#define _brcm_nl80211_h_
+
+struct bcm_nlmsg_hdr {
+	uint cmd;	/* common ioctl definition */
+	uint len;	/* attached buffer length */
+	uint offset;	/* user buffer offset */
+	uint set;	/* get or set request optional */
+	uint magic;	/* magic number for verification */
+};
+
+enum bcmnl_attrs {
+	BCM_NLATTR_UNSPEC,
+
+	BCM_NLATTR_LEN,
+	BCM_NLATTR_DATA,
+
+	__BCM_NLATTR_AFTER_LAST,
+	BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1
+};
+
+struct nl_prv_data {
+	int err;			/* return result */
+	void *data;			/* ioctl return buffer pointer */
+	uint len;			/* ioctl return buffer length */
+	struct bcm_nlmsg_hdr *nlioc;	/* bcm_nlmsg_hdr header pointer */
+};
+
+#endif /* _brcm_nl80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/dbus.h b/drivers/net/wireless/bcmdhd/include/dbus.h
new file mode 100644
index 0000000..daef6c5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dbus.h
@@ -0,0 +1,582 @@
+/*
+ * Dongle BUS interface Abstraction layer
+ *   target serial buses like USB, SDIO, SPI, etc.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dbus.h 423346 2013-09-11 22:38:40Z $
+ */
+
+#ifndef __DBUS_H__
+#define __DBUS_H__
+
+#include "typedefs.h"
+
+#define DBUSTRACE(args)
+#define DBUSERR(args)
+#define DBUSINFO(args)
+#define DBUSDBGLOCK(args)
+
+enum {
+	DBUS_OK = 0,
+	DBUS_ERR = -200,
+	DBUS_ERR_TIMEOUT,
+	DBUS_ERR_DISCONNECT,
+	DBUS_ERR_NODEVICE,
+	DBUS_ERR_UNSUPPORTED,
+	DBUS_ERR_PENDING,
+	DBUS_ERR_NOMEM,
+	DBUS_ERR_TXFAIL,
+	DBUS_ERR_TXTIMEOUT,
+	DBUS_ERR_TXDROP,
+	DBUS_ERR_RXFAIL,
+	DBUS_ERR_RXDROP,
+	DBUS_ERR_TXCTLFAIL,
+	DBUS_ERR_RXCTLFAIL,
+	DBUS_ERR_REG_PARAM,
+	DBUS_STATUS_CANCELLED,
+	DBUS_ERR_NVRAM,
+	DBUS_JUMBO_NOMATCH,
+	DBUS_JUMBO_BAD_FORMAT,
+	DBUS_NVRAM_NONTXT
+};
+
+#define BCM_OTP_SIZE_43236  84	/* number of 16 bit values */
+#define BCM_OTP_SW_RGN_43236	24  /* start offset of SW config region */
+#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */
+
+#define ERR_CBMASK_TXFAIL		0x00000001
+#define ERR_CBMASK_RXFAIL		0x00000002
+#define ERR_CBMASK_ALL			0xFFFFFFFF
+
+#define DBUS_CBCTL_WRITE			0
+#define DBUS_CBCTL_READ				1
+#if defined(INTR_EP_ENABLE)
+#define DBUS_CBINTR_POLL			2
+#endif /* defined(INTR_EP_ENABLE) */
+
+#define DBUS_TX_RETRY_LIMIT		3		/* retries for failed txirb */
+#define DBUS_TX_TIMEOUT_INTERVAL	250		/* timeout for txirb complete, in ms */
+
+#define DBUS_BUFFER_SIZE_TX	32000
+#define DBUS_BUFFER_SIZE_RX	24000
+
+#define DBUS_BUFFER_SIZE_TX_NOAGG	2048
+#define DBUS_BUFFER_SIZE_RX_NOAGG	2048
+
+/* DBUS types */
+enum {
+	DBUS_USB,
+	DBUS_SDIO,
+	DBUS_SPI,
+	DBUS_UNKNOWN
+};
+
+enum dbus_state {
+	DBUS_STATE_DL_PENDING,
+	DBUS_STATE_DL_DONE,
+	DBUS_STATE_UP,
+	DBUS_STATE_DOWN,
+	DBUS_STATE_PNP_FWDL,
+	DBUS_STATE_DISCONNECT,
+	DBUS_STATE_SLEEP
+};
+
+enum dbus_pnp_state {
+	DBUS_PNP_DISCONNECT,
+	DBUS_PNP_SLEEP,
+	DBUS_PNP_RESUME
+};
+
+enum dbus_file {
+    DBUS_FIRMWARE,
+    DBUS_NVFILE
+};
+
+typedef enum _DEVICE_SPEED {
+	INVALID_SPEED = -1,
+	LOW_SPEED     =  1,	/* USB 1.1: 1.5 Mbps */
+	FULL_SPEED,     	/* USB 1.1: 12  Mbps */
+	HIGH_SPEED,		/* USB 2.0: 480 Mbps */
+	SUPER_SPEED,		/* USB 3.0: 4.8 Gbps */
+} DEVICE_SPEED;
+
+typedef struct {
+	int bustype;
+	int vid;
+	int pid;
+	int devid;
+	int chiprev; /* chip revsion number */
+	int mtu;
+	int nchan; /* Data Channels */
+	int has_2nd_bulk_in_ep;
+} dbus_attrib_t;
+
+/* FIX: Account for errors related to DBUS;
+ * Let upper layer account for packets/bytes
+ */
+typedef struct {
+	uint32 rx_errors;
+	uint32 tx_errors;
+	uint32 rx_dropped;
+	uint32 tx_dropped;
+} dbus_stats_t;
+
+/*
+ * Configurable BUS parameters
+ */
+enum {
+	DBUS_CONFIG_ID_RXCTL_DEFERRES = 1,
+	DBUS_CONFIG_ID_TXRXQUEUE
+};
+typedef struct {
+	uint32 config_id;
+	union {
+		bool rxctl_deferrespok;
+		struct {
+			int maxrxq;
+			int rxbufsize;
+			int maxtxq;
+			int txbufsize;
+		} txrxqueue;
+	};
+} dbus_config_t;
+
+/*
+ * External Download Info
+ */
+typedef struct dbus_extdl {
+	uint8 *fw;
+	int fwlen;
+	uint8 *vars;
+	int varslen;
+} dbus_extdl_t;
+
+struct dbus_callbacks;
+struct exec_parms;
+
+typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen);
+typedef void (*disconnect_cb_t)(void *arg);
+typedef void *(*exec_cb_t)(struct exec_parms *args);
+
+/* Client callbacks registered during dbus_attach() */
+typedef struct dbus_callbacks {
+	void (*send_complete)(void *cbarg, void *info, int status);
+	void (*recv_buf)(void *cbarg, uint8 *buf, int len);
+	void (*recv_pkt)(void *cbarg, void *pkt);
+	void (*txflowcontrol)(void *cbarg, bool onoff);
+	void (*errhandler)(void *cbarg, int err);
+	void (*ctl_complete)(void *cbarg, int type, int status);
+	void (*state_change)(void *cbarg, int state);
+	void *(*pktget)(void *cbarg, uint len, bool send);
+	void (*pktfree)(void *cbarg, void *p, bool send);
+} dbus_callbacks_t;
+
+struct dbus_pub;
+struct bcmstrbuf;
+struct dbus_irb;
+struct dbus_irb_rx;
+struct dbus_irb_tx;
+struct dbus_intf_callbacks;
+
+typedef struct {
+	void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs);
+	void (*detach)(struct dbus_pub *pub, void *bus);
+
+	int (*up)(void *bus);
+	int (*down)(void *bus);
+	int (*send_irb)(void *bus, struct dbus_irb_tx *txirb);
+	int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb);
+	int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb);
+	int (*send_ctl)(void *bus, uint8 *buf, int len);
+	int (*recv_ctl)(void *bus, uint8 *buf, int len);
+	int (*get_stats)(void *bus, dbus_stats_t *stats);
+	int (*get_attrib)(void *bus, dbus_attrib_t *attrib);
+
+	int (*pnp)(void *bus, int evnt);
+	int (*remove)(void *bus);
+	int (*resume)(void *bus);
+	int (*suspend)(void *bus);
+	int (*stop)(void *bus);
+	int (*reset)(void *bus);
+
+	/* Access to bus buffers directly */
+	void *(*pktget)(void *bus, int len);
+	void (*pktfree)(void *bus, void *pkt);
+
+	int  (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len,
+		bool set);
+	void (*dump)(void *bus, struct bcmstrbuf *strbuf);
+	int  (*set_config)(void *bus, dbus_config_t *config);
+	int  (*get_config)(void *bus, dbus_config_t *config);
+
+	bool (*device_exists)(void *bus);
+	bool (*dlneeded)(void *bus);
+	int  (*dlstart)(void *bus, uint8 *fw, int len);
+	int  (*dlrun)(void *bus);
+	bool (*recv_needed)(void *bus);
+
+	void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+	void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+
+	int (*tx_timer_init)(void *bus);
+	int (*tx_timer_start)(void *bus, uint timeout);
+	int (*tx_timer_stop)(void *bus);
+
+	int (*sched_dpc)(void *bus);
+	int (*lock)(void *bus);
+	int (*unlock)(void *bus);
+	int (*sched_probe_cb)(void *bus);
+
+	int (*shutdown)(void *bus);
+
+	int (*recv_stop)(void *bus);
+	int (*recv_resume)(void *bus);
+
+	int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx);
+
+	int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value);
+
+	/* Add from the bottom */
+} dbus_intf_t;
+
+typedef struct dbus_pub {
+	struct osl_info *osh;
+	dbus_stats_t stats;
+	dbus_attrib_t attrib;
+	enum dbus_state busstate;
+	DEVICE_SPEED device_speed;
+	int ntxq, nrxq, rxsize;
+	void *bus;
+	struct shared_info *sh;
+    void *dev_info;
+} dbus_pub_t;
+
+#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
+
+#define	ALIGNED_LOCAL_VARIABLE(var, align)					\
+	uint8	buffer[SDALIGN+64];						\
+	uint8	*var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align;
+
+/*
+ * Public Bus Function Interface
+ */
+
+/*
+ * FIX: Is there better way to pass OS/Host handles to DBUS but still
+ *      maintain common interface for all OS??
+ * Under NDIS, param1 needs to be MiniportHandle
+ *  For NDIS60, param2 is WdfDevice
+ * Under Linux, param1 and param2 are NULL;
+ */
+extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+	void *param1, void *param2);
+extern int dbus_deregister(void);
+
+extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq,
+	void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh);
+extern void dbus_detach(dbus_pub_t *pub);
+
+extern int dbus_up(dbus_pub_t *pub);
+extern int dbus_down(dbus_pub_t *pub);
+extern int dbus_stop(dbus_pub_t *pub);
+extern int dbus_shutdown(dbus_pub_t *pub);
+extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on);
+
+extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf);
+extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info);
+extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info);
+extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx);
+extern int dbus_poll_intr(dbus_pub_t *pub);
+extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats);
+extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib);
+extern int dbus_get_device_speed(dbus_pub_t *pub);
+extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config);
+extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config);
+extern void * dbus_get_devinfo(dbus_pub_t *pub);
+
+extern void *dbus_pktget(dbus_pub_t *pub, int len);
+extern void dbus_pktfree(dbus_pub_t *pub, void* pkt);
+
+extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask);
+extern int dbus_pnp_sleep(dbus_pub_t *pub);
+extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload);
+extern int dbus_pnp_disconnect(dbus_pub_t *pub);
+
+extern int dbus_iovar_op(dbus_pub_t *pub, const char *name,
+	void *params, int plen, void *arg, int len, bool set);
+
+extern void *dhd_dbus_txq(const dbus_pub_t *pub);
+extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub);
+
+/*
+ * Private Common Bus Interface
+ */
+
+/* IO Request Block (IRB) */
+typedef struct dbus_irb {
+	struct dbus_irb *next;	/* it's casted from dbus_irb_tx or dbus_irb_rx struct */
+} dbus_irb_t;
+
+typedef struct dbus_irb_rx {
+	struct dbus_irb irb; /* Must be first */
+	uint8 *buf;
+	int buf_len;
+	int actual_len;
+	void *pkt;
+	void *info;
+	void *arg;
+} dbus_irb_rx_t;
+
+typedef struct dbus_irb_tx {
+	struct dbus_irb irb; /* Must be first */
+	uint8 *buf;
+	int len;
+	void *pkt;
+	int retry_count;
+	void *info;
+	void *arg;
+	void *send_buf; /* linear  bufffer for LINUX when aggreagtion is enabled */
+} dbus_irb_tx_t;
+
+/* DBUS interface callbacks are different from user callbacks
+ * so, internally, different info can be passed to upper layer
+ */
+typedef struct dbus_intf_callbacks {
+	void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb);
+	void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status);
+	void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status);
+	void (*errhandler)(void *cbarg, int err);
+	void (*ctl_complete)(void *cbarg, int type, int status);
+	void (*state_change)(void *cbarg, int state);
+	bool (*isr)(void *cbarg, bool *wantdpc);
+	bool (*dpc)(void *cbarg, bool bounded);
+	void (*watchdog)(void *cbarg);
+	void *(*pktget)(void *cbarg, uint len, bool send);
+	void (*pktfree)(void *cbarg, void *p, bool send);
+	struct dbus_irb* (*getirb)(void *cbarg, bool send);
+	void (*rxerr_indicate)(void *cbarg, bool on);
+} dbus_intf_callbacks_t;
+
+/*
+ * Porting: To support new bus, port these functions below
+ */
+
+/*
+ * Bus specific Interface
+ * Implemented by dbus_usb.c/dbus_sdio.c
+ */
+extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+	dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_deregister(void);
+extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp);
+
+/*
+ * Bus-specific and OS-specific Interface
+ * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
+ */
+extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+	void *prarg, dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_osl_deregister(void);
+
+/*
+ * Bus-specific, OS-specific, HW-specific Interface
+ * Mainly for SDIO Host HW controller
+ */
+extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+	void *prarg, dbus_intf_t **intf);
+extern int dbus_bus_osl_hw_deregister(void);
+
+extern uint usbdev_bulkin_eps(void);
+#if defined(BCM_REQUEST_FW)
+extern void *dbus_get_fw_nvfile(int devid, uint8 **fw, int *fwlen, int type,
+  uint16 boardtype, uint16 boardrev);
+extern void dbus_release_fw_nvfile(void *firmware);
+#endif  /* #if defined(BCM_REQUEST_FW) */
+
+
+#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+	/* Backward compatibility */
+	typedef unsigned int gfp_t;
+
+	#define dma_pool pci_pool
+	#define dma_pool_create(name, dev, size, align, alloc) \
+		pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC)
+	#define dma_pool_destroy(pool) pci_pool_destroy(pool)
+	#define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle)
+	#define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr)
+
+	#define dma_map_single(dev, addr, size, dir)	pci_map_single(dev, addr, size, dir)
+	#define dma_unmap_single(dev, hnd, size, dir)	pci_unmap_single(dev, hnd, size, dir)
+	#define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE
+	#define DMA_TO_DEVICE PCI_DMA_TODEVICE
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+/* Availability of these functions varies (when present, they have two arguments) */
+#ifndef hc32_to_cpu
+	#define hc32_to_cpu(x)	le32_to_cpu(x)
+	#define cpu_to_hc32(x)	cpu_to_le32(x)
+	typedef unsigned int __hc32;
+#else
+	#error Two-argument functions needed
+#endif
+
+/* Private USB opcode base */
+#define EHCI_FASTPATH		0x31
+#define	EHCI_SET_EP_BYPASS	EHCI_FASTPATH
+#define	EHCI_SET_BYPASS_CB	(EHCI_FASTPATH + 1)
+#define	EHCI_SET_BYPASS_DEV	(EHCI_FASTPATH + 2)
+#define	EHCI_DUMP_STATE		(EHCI_FASTPATH + 3)
+#define	EHCI_SET_BYPASS_POOL	(EHCI_FASTPATH + 4)
+#define	EHCI_CLR_EP_BYPASS	(EHCI_FASTPATH + 5)
+
+/*
+ * EHCI QTD structure (hardware and extension)
+ * NOTE that is does not need to (and does not) match its kernel counterpart
+ */
+#define EHCI_QTD_NBUFFERS       5
+#define EHCI_QTD_ALIGN  	32
+#define EHCI_BULK_PACKET_SIZE	512
+#define EHCI_QTD_XACTERR_MAX	32
+
+struct ehci_qtd {
+	/* Hardware map */
+	volatile uint32_t	qtd_next;
+	volatile uint32_t	qtd_altnext;
+	volatile uint32_t	qtd_status;
+#define	EHCI_QTD_GET_BYTES(x)	(((x)>>16) & 0x7fff)
+#define	EHCI_QTD_IOC            0x00008000
+#define	EHCI_QTD_GET_CERR(x)	(((x)>>10) & 0x3)
+#define EHCI_QTD_SET_CERR(x)    ((x) << 10)
+#define	EHCI_QTD_GET_PID(x)	(((x)>>8) & 0x3)
+#define EHCI_QTD_SET_PID(x)     ((x) <<  8)
+#define EHCI_QTD_ACTIVE         0x80
+#define EHCI_QTD_HALTED         0x40
+#define EHCI_QTD_BUFERR         0x20
+#define EHCI_QTD_BABBLE         0x10
+#define EHCI_QTD_XACTERR        0x08
+#define EHCI_QTD_MISSEDMICRO    0x04
+	volatile uint32_t 	qtd_buffer[EHCI_QTD_NBUFFERS];
+	volatile uint32_t 	qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+
+	/* Implementation extension */
+	dma_addr_t		qtd_self;		/* own hardware address */
+	struct ehci_qtd		*obj_next;		/* software link to the next QTD */
+	void			*rpc;			/* pointer to the rpc buffer */
+	size_t			length;			/* length of the data in the buffer */
+	void			*buff;			/* pointer to the reassembly buffer */
+	int			xacterrs;		/* retry counter for qtd xact error */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#define	EHCI_NULL	__constant_cpu_to_le32(1) /* HW null pointer shall be odd */
+
+#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1)
+
+/* Queue Head */
+/* NOTE This structure is slightly different from the one in the kernel; but needs to stay
+ * compatible
+ */
+struct ehci_qh {
+	/* Hardware map */
+	volatile uint32_t 	qh_link;
+	volatile uint32_t 	qh_endp;
+	volatile uint32_t 	qh_endphub;
+	volatile uint32_t 	qh_curqtd;
+
+	/* QTD overlay */
+	volatile uint32_t	ow_next;
+	volatile uint32_t	ow_altnext;
+	volatile uint32_t	ow_status;
+	volatile uint32_t	ow_buffer [EHCI_QTD_NBUFFERS];
+	volatile uint32_t	ow_buffer_hi [EHCI_QTD_NBUFFERS];
+
+	/* Extension (should match the kernel layout) */
+	dma_addr_t		unused0;
+	void 			*unused1;
+	struct list_head	unused2;
+	struct ehci_qtd		*dummy;
+	struct ehci_qh		*unused3;
+
+	struct ehci_hcd		*unused4;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct kref		unused5;
+	unsigned		unused6;
+
+	uint8_t			unused7;
+
+	/* periodic schedule info */
+	uint8_t			unused8;
+	uint8_t			unused9;
+	uint8_t			unused10;
+	uint16_t		unused11;
+	uint16_t		unused12;
+	uint16_t		unused13;
+	struct usb_device	*unused14;
+#else
+	unsigned		unused5;
+
+	u8			unused6;
+
+	/* periodic schedule info */
+	u8			unused7;
+	u8			unused8;
+	u8			unused9;
+	unsigned short		unused10;
+	unsigned short		unused11;
+#define NO_FRAME ((unsigned short)~0)
+#ifdef EHCI_QUIRK_FIX
+	struct usb_device	*unused12;
+#endif /* EHCI_QUIRK_FIX */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+	struct ehci_qtd		*first_qtd;
+		/* Link to the first QTD; this is an optimized equivalent of the qtd_list field */
+		/* NOTE that ehci_qh in ehci.h shall reserve this word */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* The corresponding structure in the kernel is used to get the QH */
+struct hcd_dev {	/* usb_device.hcpriv points to this */
+	struct list_head	unused0;
+	struct list_head	unused1;
+
+	/* array of QH pointers */
+	void			*ep[32];
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub,  int epn, struct ehci_qtd *qtd, void *rpc,
+	int token, int len);
+int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data,
+	int token, int len);
+int optimize_submit_async(struct ehci_qtd *qtd, int epn);
+void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma);
+struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags);
+void optimize_ehci_qtd_free(struct ehci_qtd *qtd);
+void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf);
+#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */
+
+void  dbus_flowctrl_tx(void *dbi, bool on);
+#endif /* __DBUS_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h b/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h
new file mode 100644
index 0000000..03bfabb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h
@@ -0,0 +1,2063 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $
+ */
+
+
+#ifndef wlioctl_defs_h
+#define wlioctl_defs_h
+
+
+
+
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK      0x000000FF      /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK   0x0000000F      /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK   0x000000F0      /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT  4               /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK     0x00000300
+#define WL_RSPEC_TXEXP_SHIFT    8
+#define WL_RSPEC_BW_MASK        0x00070000      /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT       16              /* bandwidth shift */
+#define WL_RSPEC_STBC           0x00100000      /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_TXBF           0x00200000      /* bit indicates TXBF mode */
+#define WL_RSPEC_LDPC           0x00400000      /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI            0x00800000      /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK  0x03000000      /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE  0x40000000      /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE  0x80000000      /* bit indicates override both rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE    0x00000000      /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT      0x01000000      /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT     0x02000000      /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ       0x00010000
+#define WL_RSPEC_BW_40MHZ       0x00020000
+#define WL_RSPEC_BW_80MHZ       0x00030000
+#define WL_RSPEC_BW_160MHZ      0x00040000
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE         0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK         0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK          0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT         8          /* stf mode shift */
+#define OLD_NRATE_OVERRIDE          0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI               0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING       0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO	0		/* stf mode SISO */
+#define OLD_NRATE_STF_CDD	1		/* stf mode CDD */
+#define OLD_NRATE_STF_STBC	2		/* stf mode STBC */
+#define OLD_NRATE_STF_SDM	3		/* stf mode SDM */
+
+#define HIGHEST_SINGLE_STREAM_MCS	7 /* MCS values greater than this enable multiple streams */
+
+/* given a proprietary MCS, get number of spatial streams */
+#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+
+#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \
+				: ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
+
+#define MAX_CCA_CHANNELS 38	/* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS	60	/* CCA keeps this many seconds history */
+
+#define IBSS_MED        15	/* Mediom in-bss congestion percentage */
+#define IBSS_HI         25	/* Hi in-bss congestion percentage */
+#define OBSS_MED        12
+#define OBSS_HI         25
+#define INTERFER_MED    5
+#define INTERFER_HI     10
+
+#define  CCA_FLAG_2G_ONLY		0x01	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_5G_ONLY		0x02	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_IGNORE_DURATION	0x04	/* Ignore dwell time for each channel */
+#define  CCA_FLAGS_PREFER_1_6_11	0x10
+#define  CCA_FLAG_IGNORE_INTERFER 	0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 		1	/* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION	2	/* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN	3	/* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER	4	/* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW	5	/* Only 1 channel was input */
+
+#define WL_STA_AID(a)		((a) &~ 0xc000)
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM		0x00000001	/* Running a Broadcom driver */
+#define WL_STA_WME		0x00000002	/* WMM association */
+#define WL_STA_NONERP		0x00000004	/* No ERP */
+#define WL_STA_AUTHE		0x00000008	/* Authenticated */
+#define WL_STA_ASSOC		0x00000010	/* Associated */
+#define WL_STA_AUTHO		0x00000020	/* Authorized */
+#define WL_STA_WDS		0x00000040	/* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP	0x00000080	/* WDS traffic/probes flowing properly */
+#define WL_STA_PS		0x00000100	/* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE		0x00000200	/* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK		0x00000400	/* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI		0x00000800	/* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO		0x00001000	/* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP		0x00002000	/* STA 802.11n capable */
+#define WL_STA_SCBSTATS		0x00004000	/* Per STA debug stats */
+#define WL_STA_AMPDU_CAP	0x00008000	/* STA AMPDU capable */
+#define WL_STA_AMSDU_CAP	0x00010000	/* STA AMSDU capable */
+#define WL_STA_MIMO_PS		0x00020000	/* mimo ps mode is enabled */
+#define WL_STA_MIMO_RTS		0x00040000	/* send rts in mimo ps mode */
+#define WL_STA_RIFS_CAP		0x00080000	/* rifs enabled */
+#define WL_STA_VHT_CAP		0x00100000	/* STA VHT(11ac) capable */
+#define WL_STA_WPS		0x00200000	/* WPS state */
+
+#define WL_WDS_LINKUP		WL_STA_WDS_LINKUP	/* deprecated */
+
+/* STA HT cap fields */
+#define WL_STA_CAP_LDPC_CODING		0x0001	/* Support for rx of LDPC coded pkts */
+#define WL_STA_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define WL_STA_CAP_MIMO_PS_MASK		0x000C  /* Mimo PS mask */
+#define WL_STA_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define WL_STA_CAP_MIMO_PS_OFF		0x0003	/* Mimo PS, no restriction */
+#define WL_STA_CAP_MIMO_PS_RTS		0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define WL_STA_CAP_MIMO_PS_ON		0x0000	/* Mimo PS, MIMO disallowed */
+#define WL_STA_CAP_GF			0x0010	/* Greenfield preamble support */
+#define WL_STA_CAP_SHORT_GI_20		0x0020	/* 20MHZ short guard interval support */
+#define WL_STA_CAP_SHORT_GI_40		0x0040	/* 40Mhz short guard interval support */
+#define WL_STA_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define WL_STA_CAP_RX_STBC_MASK		0x0300	/* Rx STBC mask */
+#define WL_STA_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define WL_STA_CAP_DELAYED_BA		0x0400	/* delayed BA support */
+#define WL_STA_CAP_MAX_AMSDU		0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+#define WL_STA_CAP_DSSS_CCK		0x1000	/* DSSS/CCK supported by the BSS */
+#define WL_STA_CAP_PSMP			0x2000	/* Power Save Multi Poll support */
+#define WL_STA_CAP_40MHZ_INTOLERANT	0x4000	/* 40MHz Intolerant */
+#define WL_STA_CAP_LSIG_TXOP		0x8000	/* L-SIG TXOP protection support */
+
+#define WL_STA_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define WL_STA_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define WL_STA_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define WL_STA_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+/* scb vht flags */
+#define WL_STA_VHT_LDPCCAP	0x0001
+#define WL_STA_SGI80		0x0002
+#define WL_STA_SGI160		0x0004
+#define WL_STA_VHT_TX_STBCCAP	0x0008
+#define WL_STA_VHT_RX_STBCCAP	0x0010
+#define WL_STA_SU_BEAMFORMER	0x0020
+#define WL_STA_SU_BEAMFORMEE	0x0040
+#define WL_STA_MU_BEAMFORMER	0x0080
+#define WL_STA_MU_BEAMFORMEE	0x0100
+#define WL_STA_VHT_TXOP_PS	0x0200
+#define WL_STA_HTC_VHT_CAP	0x0400
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+#define WL_IOCTL_ACTION_GET				0x0
+#define WL_IOCTL_ACTION_SET				0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK	0x1e
+#define WL_IOCTL_ACTION_OVL_RSV			0x20
+#define WL_IOCTL_ACTION_OVL				0x40
+#define WL_IOCTL_ACTION_MASK			0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT		1
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY   2
+
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE	0x01	/* force passive scan */
+#define WL_SCANFLAGS_RESERVED	0x02	/* Reserved */
+#define WL_SCANFLAGS_PROHIBITED	0x04	/* allow scanning prohibited channels */
+#define WL_SCANFLAGS_OFFCHAN	0x08	/* allow scanning/reporting off-channel APs */
+#define WL_SCANFLAGS_HOTSPOT	0x10	/* automatic ANQP to hotspot APs */
+#define WL_SCANFLAGS_SWTCHAN	0x20	/* Force channel switch for differerent bandwidth */
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS	0
+#define WL_SCAN_RESULTS_PARTIAL	1
+#define WL_SCAN_RESULTS_PENDING	2
+#define WL_SCAN_RESULTS_ABORTED	3
+#define WL_SCAN_RESULTS_NO_MEM  4
+
+#define SCANOL_ENABLED			(1 << 0)
+#define SCANOL_BCAST_SSID		(1 << 1)
+#define SCANOL_NOTIFY_BCAST_SSID	(1 << 2)
+#define SCANOL_RESULTS_PER_CYCLE	(1 << 3)
+
+/* scan times in milliseconds */
+#define SCANOL_HOME_TIME		45	/* for home channel processing */
+#define SCANOL_ASSOC_TIME		20	/* dwell on a channel while associated */
+#define SCANOL_UNASSOC_TIME		40	/* dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME		110	/* listen on a channelfor passive scan */
+#define SCANOL_AWAY_LIMIT		100	/* max time to be away from home channel */
+#define SCANOL_IDLE_REST_TIME		40
+#define SCANOL_IDLE_REST_MULTIPLIER	0
+#define SCANOL_ACTIVE_REST_TIME		20
+#define SCANOL_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_CYCLE_IDLE_REST_TIME	300000	/* Idle Rest Time between Scan Cycle (msec) */
+#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER	0	/* Idle Rest Time Multiplier */
+#define SCANOL_CYCLE_ACTIVE_REST_TIME	200
+#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_MAX_REST_TIME		3600000	/* max rest time between scan cycle (msec) */
+#define SCANOL_CYCLE_DEFAULT		0	/* default for Max Scan Cycle, 0 = forever */
+#define SCANOL_CYCLE_MAX		864000	/* Max Scan Cycle */
+						/* 10 sec/scan cycle => 100 days */
+#define SCANOL_NPROBES			2	/* for Active scan; send n probes on each channel */
+#define SCANOL_NPROBES_MAX		5	/* for Active scan; send n probes on each channel */
+#define SCANOL_SCAN_START_DLY		10	/* delay start of offload scan (sec) */
+#define SCANOL_SCAN_START_DLY_MAX	240	/* delay start of offload scan (sec) */
+#define SCANOL_MULTIPLIER_MAX		10	/* Max Multiplier */
+#define SCANOL_UNASSOC_TIME_MAX		100	/* max dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME_MAX		500	/* max listen on a channel for passive scan */
+#define SCANOL_SSID_MAX			16	/* max supported preferred SSID */
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START      1
+#define WL_SCAN_ACTION_CONTINUE   2
+#define WL_SCAN_ACTION_ABORT      3
+
+
+#define ANTENNA_NUM_1	1		/* total number of antennas to be used */
+#define ANTENNA_NUM_2	2
+#define ANTENNA_NUM_3	3
+#define ANTENNA_NUM_4	4
+
+#define ANT_SELCFG_AUTO		0x80	/* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK		0x33	/* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST	0	/* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST	1	/* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF	2	/* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF	3	/* default rx antenna configuration */
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE	0	/* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE	1	/* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE	2	/* enable auto detection */
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED	1	/* interference detected */
+#define ITFR_HOME_CHANNEL	2	/* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT	4	/* noisy environemnt so feature stopped */
+
+#define WL_NUM_RPI_BINS		8
+#define WL_RM_TYPE_BASIC	1
+#define WL_RM_TYPE_CCA		2
+#define WL_RM_TYPE_RPI		3
+#define WL_RM_TYPE_ABORT	-1	/* ABORT any in-progress RM request */
+
+#define WL_RM_FLAG_PARALLEL	(1<<0)
+
+#define WL_RM_FLAG_LATE		(1<<1)
+#define WL_RM_FLAG_INCAPABLE	(1<<2)
+#define WL_RM_FLAG_REFUSED	(1<<3)
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#define WLC_CIS_DEFAULT	0	/* built-in default */
+#define WLC_CIS_SROM	1	/* source is sprom */
+#define WLC_CIS_OTP	2	/* source is otp */
+
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF	0	/* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON			1	/* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF		2	/* turn off PCL. */
+
+#define	PLC_CMD_FAILOVER	1
+#define	PLC_CMD_MAC_COST	2
+#define	PLC_CMD_LINK_COST	3
+#define	PLC_CMD_NODE_LIST	4
+
+#define NODE_TYPE_UNKNOWN	0	/* Unknown link */
+#define NODE_TYPE_WIFI_ONLY	1	/* Pure Wireless STA node */
+#define NODE_TYPE_PLC_ONLY	2	/* Pure PLC only node */
+#define NODE_TYPE_WIFI_PLC	3	/* WiFi PLC capable node */
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF	-2	/* turn off PCL.  */
+#define WL_PWRIDX_PCL_ON	-1	/* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT	-2	/* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT	63	/* upper limit */
+/* value >= 0 causes
+ *	- input to be set to that value
+ *	- PCL to be off
+ */
+
+#define BCM_MAC_STATUS_INDICATION	(0x40010200L)
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC	0xa5a5
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC		0x14e46c77
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON	0x01	/* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE		0x02	/* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL	0x04	/* rssi info received on channel (vs offchannel) */
+#define WL_BSS_FLAGS_HS20		0x08	/* hotspot 2.0 capable */
+#define WL_BSS_FLAGS_RSSI_INVALID	0x10	/* BSS contains invalid RSSI */
+#define WL_BSS_FLAGS_RSSI_INACCURATE	0x20	/* BSS contains inaccurate RSSI */
+#define WL_BSS_FLAGS_SNR_INVALID	0x40	/* BSS contains invalid SNR */
+#define WL_BSS_FLAGS_NF_INVALID		0x80	/* BSS contains invalid noise floor */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ			0x00000100
+#define VHT_BI_80MHZ			    0x00000200
+#define VHT_BI_160MHZ			    0x00000400
+#define VHT_BI_8080MHZ			    0x00000800
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype	set		/* subtype param */
+#define ioctl_pid	used		/* pid param */
+#define ioctl_status	needed		/* status param */
+
+
+/* Enumerate crypto algorithms */
+#define	CRYPTO_ALGO_OFF			0
+#define	CRYPTO_ALGO_WEP1		1
+#define	CRYPTO_ALGO_TKIP		2
+#define	CRYPTO_ALGO_WEP128		3
+#define CRYPTO_ALGO_AES_CCM		4
+#define CRYPTO_ALGO_AES_OCB_MSDU	5
+#define CRYPTO_ALGO_AES_OCB_MPDU	6
+#if !defined(BCMEXTCCX)
+#define CRYPTO_ALGO_NALG		7
+#else
+#define CRYPTO_ALGO_CKIP		7
+#define CRYPTO_ALGO_CKIP_MMH	8
+#define CRYPTO_ALGO_WEP_MMH		9
+#define CRYPTO_ALGO_NALG		10
+#endif 
+
+#define CRYPTO_ALGO_SMS4		11
+#define CRYPTO_ALGO_PMK			12	/* for 802.1x supp to set PMK before 4-way */
+#define CRYPTO_ALGO_BIP			13  /* 802.11w BIP (aes cmac) */
+
+#define CRYPTO_ALGO_AES_GCM     14  /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256  15  /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256  16  /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17  /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC    18  /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19  /* 256 bit BIP GMAC */
+
+#define CRYPTO_ALGO_NONE        CRYPTO_ALGO_OFF
+
+#define WSEC_GEN_MIC_ERROR	0x0001
+#define WSEC_GEN_REPLAY		0x0002
+#define WSEC_GEN_ICV_ERROR	0x0004
+#define WSEC_GEN_MFP_ACT_ERROR	0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR	0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR	0x0020
+
+#define WL_SOFT_KEY	(1 << 0)	/* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY	(1 << 1)	/* Indicates this key is the primary (ie tx) key */
+#if defined(BCMEXTCCX)
+#define WL_CKIP_KP	(1 << 4)	/* CMIC */
+#define WL_CKIP_MMH	(1 << 5)	/* CKIP */
+#else
+#define WL_KF_RES_4	(1 << 4)	/* Reserved for backward compat */
+#define WL_KF_RES_5	(1 << 5)	/* Reserved for backward compat */
+#endif 
+#define WL_IBSS_PEER_GROUP_KEY	(1 << 6)	/* Indicates a group key for a IBSS PEER */
+
+/* wireless security bitvec */
+#define WEP_ENABLED		0x0001
+#define TKIP_ENABLED		0x0002
+#define AES_ENABLED		0x0004
+#define WSEC_SWFLAG		0x0008
+#define SES_OW_ENABLED		0x0040	/* to go into transition mode without setting wep */
+
+/* wsec macros for operating on the above definitions */
+#define WSEC_WEP_ENABLED(wsec)	((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec)	((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec)	((wsec) & AES_ENABLED)
+
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#define WSEC_SES_OW_ENABLED(wsec)	((wsec) & SES_OW_ENABLED)
+
+#define MFP_CAPABLE		0x0200
+#define MFP_REQUIRED	0x0400
+#define MFP_SHA256		0x0800 /* a special configuration for STA for WIFI test tool */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED	0x0000	/* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE		0x0001	/* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED	0x0002	/* over 802.1x */
+#define WPA_AUTH_PSK		0x0004	/* Pre-shared key */
+#if defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM		0x0008	/* CCKM */
+#define WPA2_AUTH_CCKM		0x0010	/* CCKM2 */
+#endif	
+/* #define WPA_AUTH_8021X 0x0020 */	/* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED	0x0040	/* over 802.1x */
+#define WPA2_AUTH_PSK		0x0080	/* Pre-shared key */
+#define BRCM_AUTH_PSK           0x0100  /* BRCM specific PSK */
+#define BRCM_AUTH_DPT		0x0200	/* DPT PSK without group keys */
+#define WPA2_AUTH_MFP           0x1000  /* MFP (11w) in contrast to CCX */
+#define WPA2_AUTH_TPK		0x2000 	/* TDLS Peer Key */
+#define WPA2_AUTH_FT		0x4000 	/* Fast Transition. */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+/* pmkid */
+#define	MAXPMKID		16
+
+#ifdef SROM12
+#define	WLC_IOCTL_MAXLEN		10000	/* max length ioctl buffer required */
+#else
+#define	WLC_IOCTL_MAXLEN		8192	/* max length ioctl buffer required */
+#endif /* SROM12 */
+
+#define	WLC_IOCTL_SMLEN			256	/* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN		1536    /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN	1024	/* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN	10240	/* Max Sample Collect buffer for two cores */
+#endif
+#define WLC_SAMPLECOLLECT_MAXLEN_LCN40  8192
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC				0
+#define WLC_GET_VERSION				1
+#define WLC_UP					2
+#define WLC_DOWN				3
+#define WLC_GET_LOOP				4
+#define WLC_SET_LOOP				5
+#define WLC_DUMP				6
+#define WLC_GET_MSGLEVEL			7
+#define WLC_SET_MSGLEVEL			8
+#define WLC_GET_PROMISC				9
+#define WLC_SET_PROMISC				10
+/* #define WLC_OVERLAY_IOCTL			11 */ /* not supported */
+#define WLC_GET_RATE				12
+#define WLC_GET_MAX_RATE			13
+#define WLC_GET_INSTANCE			14
+/* #define WLC_GET_FRAG				15 */ /* no longer supported */
+/* #define WLC_SET_FRAG				16 */ /* no longer supported */
+/* #define WLC_GET_RTS				17 */ /* no longer supported */
+/* #define WLC_SET_RTS				18 */ /* no longer supported */
+#define WLC_GET_INFRA				19
+#define WLC_SET_INFRA				20
+#define WLC_GET_AUTH				21
+#define WLC_SET_AUTH				22
+#define WLC_GET_BSSID				23
+#define WLC_SET_BSSID				24
+#define WLC_GET_SSID				25
+#define WLC_SET_SSID				26
+#define WLC_RESTART				27
+#define WLC_TERMINATED				28
+/* #define WLC_DUMP_SCB				28 */ /* no longer supported */
+#define WLC_GET_CHANNEL				29
+#define WLC_SET_CHANNEL				30
+#define WLC_GET_SRL				31
+#define WLC_SET_SRL				32
+#define WLC_GET_LRL				33
+#define WLC_SET_LRL				34
+#define WLC_GET_PLCPHDR				35
+#define WLC_SET_PLCPHDR				36
+#define WLC_GET_RADIO				37
+#define WLC_SET_RADIO				38
+#define WLC_GET_PHYTYPE				39
+#define WLC_DUMP_RATE				40
+#define WLC_SET_RATE_PARAMS			41
+#define WLC_GET_FIXRATE				42
+#define WLC_SET_FIXRATE				43
+/* #define WLC_GET_WEP				42 */ /* no longer supported */
+/* #define WLC_SET_WEP				43 */ /* no longer supported */
+#define WLC_GET_KEY				44
+#define WLC_SET_KEY				45
+#define WLC_GET_REGULATORY			46
+#define WLC_SET_REGULATORY			47
+#define WLC_GET_PASSIVE_SCAN			48
+#define WLC_SET_PASSIVE_SCAN			49
+#define WLC_SCAN				50
+#define WLC_SCAN_RESULTS			51
+#define WLC_DISASSOC				52
+#define WLC_REASSOC				53
+#define WLC_GET_ROAM_TRIGGER			54
+#define WLC_SET_ROAM_TRIGGER			55
+#define WLC_GET_ROAM_DELTA			56
+#define WLC_SET_ROAM_DELTA			57
+#define WLC_GET_ROAM_SCAN_PERIOD		58
+#define WLC_SET_ROAM_SCAN_PERIOD		59
+#define WLC_EVM					60	/* diag */
+#define WLC_GET_TXANT				61
+#define WLC_SET_TXANT				62
+#define WLC_GET_ANTDIV				63
+#define WLC_SET_ANTDIV				64
+/* #define WLC_GET_TXPWR			65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR			66 */ /* no longer supported */
+#define WLC_GET_CLOSED				67
+#define WLC_SET_CLOSED				68
+#define WLC_GET_MACLIST				69
+#define WLC_SET_MACLIST				70
+#define WLC_GET_RATESET				71
+#define WLC_SET_RATESET				72
+/* #define WLC_GET_LOCALE			73 */ /* no longer supported */
+#define WLC_LONGTRAIN				74
+#define WLC_GET_BCNPRD				75
+#define WLC_SET_BCNPRD				76
+#define WLC_GET_DTIMPRD				77
+#define WLC_SET_DTIMPRD				78
+#define WLC_GET_SROM				79
+#define WLC_SET_SROM				80
+#define WLC_GET_WEP_RESTRICT			81
+#define WLC_SET_WEP_RESTRICT			82
+#define WLC_GET_COUNTRY				83
+#define WLC_SET_COUNTRY				84
+#define WLC_GET_PM				85
+#define WLC_SET_PM				86
+#define WLC_GET_WAKE				87
+#define WLC_SET_WAKE				88
+/* #define WLC_GET_D11CNTS			89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK			90	/* ndis only */
+#define WLC_SET_FORCELINK			91	/* ndis only */
+#define WLC_FREQ_ACCURACY			92	/* diag */
+#define WLC_CARRIER_SUPPRESS			93	/* diag */
+#define WLC_GET_PHYREG				94
+#define WLC_SET_PHYREG				95
+#define WLC_GET_RADIOREG			96
+#define WLC_SET_RADIOREG			97
+#define WLC_GET_REVINFO				98
+#define WLC_GET_UCANTDIV			99
+#define WLC_SET_UCANTDIV			100
+#define WLC_R_REG				101
+#define WLC_W_REG				102
+/* #define WLC_DIAG_LOOPBACK			103	old tray diag */
+/* #define WLC_RESET_D11CNTS			104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE				105
+#define WLC_SET_MACMODE				106
+#define WLC_GET_MONITOR				107
+#define WLC_SET_MONITOR				108
+#define WLC_GET_GMODE				109
+#define WLC_SET_GMODE				110
+#define WLC_GET_LEGACY_ERP			111
+#define WLC_SET_LEGACY_ERP			112
+#define WLC_GET_RX_ANT				113
+#define WLC_GET_CURR_RATESET			114	/* current rateset */
+#define WLC_GET_SCANSUPPRESS			115
+#define WLC_SET_SCANSUPPRESS			116
+#define WLC_GET_AP				117
+#define WLC_SET_AP				118
+#define WLC_GET_EAP_RESTRICT			119
+#define WLC_SET_EAP_RESTRICT			120
+#define WLC_SCB_AUTHORIZE			121
+#define WLC_SCB_DEAUTHORIZE			122
+#define WLC_GET_WDSLIST				123
+#define WLC_SET_WDSLIST				124
+#define WLC_GET_ATIM				125
+#define WLC_SET_ATIM				126
+#define WLC_GET_RSSI				127
+#define WLC_GET_PHYANTDIV			128
+#define WLC_SET_PHYANTDIV			129
+#define WLC_AP_RX_ONLY				130
+#define WLC_GET_TX_PATH_PWR			131
+#define WLC_SET_TX_PATH_PWR			132
+#define WLC_GET_WSEC				133
+#define WLC_SET_WSEC				134
+#define WLC_GET_PHY_NOISE			135
+#define WLC_GET_BSS_INFO			136
+#define WLC_GET_PKTCNTS				137
+#define WLC_GET_LAZYWDS				138
+#define WLC_SET_LAZYWDS				139
+#define WLC_GET_BANDLIST			140
+
+#define WLC_GET_BAND				141
+#define WLC_SET_BAND				142
+#define WLC_SCB_DEAUTHENTICATE			143
+#define WLC_GET_SHORTSLOT			144
+#define WLC_GET_SHORTSLOT_OVERRIDE		145
+#define WLC_SET_SHORTSLOT_OVERRIDE		146
+#define WLC_GET_SHORTSLOT_RESTRICT		147
+#define WLC_SET_SHORTSLOT_RESTRICT		148
+#define WLC_GET_GMODE_PROTECTION		149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE	150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE	151
+#define WLC_UPGRADE				152
+/* #define WLC_GET_MRATE			153 */ /* no longer supported */
+/* #define WLC_SET_MRATE			154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS			155
+#define WLC_SET_IGNORE_BCNS			156
+#define WLC_GET_SCB_TIMEOUT			157
+#define WLC_SET_SCB_TIMEOUT			158
+#define WLC_GET_ASSOCLIST			159
+#define WLC_GET_CLK				160
+#define WLC_SET_CLK				161
+#define WLC_GET_UP				162
+#define WLC_OUT					163
+#define WLC_GET_WPA_AUTH			164
+#define WLC_SET_WPA_AUTH			165
+#define WLC_GET_UCFLAGS				166
+#define WLC_SET_UCFLAGS				167
+#define WLC_GET_PWRIDX				168
+#define WLC_SET_PWRIDX				169
+#define WLC_GET_TSSI				170
+#define WLC_GET_SUP_RATESET_OVERRIDE		171
+#define WLC_SET_SUP_RATESET_OVERRIDE		172
+/* #define WLC_SET_FAST_TIMER			173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER			174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER			175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER			176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS			177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL		178
+#define WLC_SET_PROTECTION_CONTROL		179
+#define WLC_GET_PHYLIST				180
+#define WLC_ENCRYPT_STRENGTH			181	/* ndis only */
+#define WLC_DECRYPT_STATUS			182	/* ndis only */
+#define WLC_GET_KEY_SEQ				183
+#define WLC_GET_SCAN_CHANNEL_TIME		184
+#define WLC_SET_SCAN_CHANNEL_TIME		185
+#define WLC_GET_SCAN_UNASSOC_TIME		186
+#define WLC_SET_SCAN_UNASSOC_TIME		187
+#define WLC_GET_SCAN_HOME_TIME			188
+#define WLC_SET_SCAN_HOME_TIME			189
+#define WLC_GET_SCAN_NPROBES			190
+#define WLC_SET_SCAN_NPROBES			191
+#define WLC_GET_PRB_RESP_TIMEOUT		192
+#define WLC_SET_PRB_RESP_TIMEOUT		193
+#define WLC_GET_ATTEN				194
+#define WLC_SET_ATTEN				195
+#define WLC_GET_SHMEM				196	/* diag */
+#define WLC_SET_SHMEM				197	/* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS		198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS		199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST			200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define WLC_TKIP_COUNTERMEASURES		202
+#define WLC_GET_PIOMODE				203
+#define WLC_SET_PIOMODE				204
+#define WLC_SET_ASSOC_PREFER			205
+#define WLC_GET_ASSOC_PREFER			206
+#define WLC_SET_ROAM_PREFER			207
+#define WLC_GET_ROAM_PREFER			208
+#define WLC_SET_LED				209
+#define WLC_GET_LED				210
+#define WLC_GET_INTERFERENCE_MODE		211
+#define WLC_SET_INTERFERENCE_MODE		212
+#define WLC_GET_CHANNEL_QA			213
+#define WLC_START_CHANNEL_QA			214
+#define WLC_GET_CHANNEL_SEL			215
+#define WLC_START_CHANNEL_SEL			216
+#define WLC_GET_VALID_CHANNELS			217
+#define WLC_GET_FAKEFRAG			218
+#define WLC_SET_FAKEFRAG			219
+#define WLC_GET_PWROUT_PERCENTAGE		220
+#define WLC_SET_PWROUT_PERCENTAGE		221
+#define WLC_SET_BAD_FRAME_PREEMPT		222
+#define WLC_GET_BAD_FRAME_PREEMPT		223
+#define WLC_SET_LEAP_LIST			224
+#define WLC_GET_LEAP_LIST			225
+#define WLC_GET_CWMIN				226
+#define WLC_SET_CWMIN				227
+#define WLC_GET_CWMAX				228
+#define WLC_SET_CWMAX				229
+#define WLC_GET_WET				230
+#define WLC_SET_WET				231
+#define WLC_GET_PUB				232
+/* #define WLC_SET_GLACIAL_TIMER		233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER		234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY			235
+#define WLC_SET_KEY_PRIMARY			236
+
+
+/* #define WLC_DUMP_RADIOREGS			237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS			238
+#define WLC_SET_ACI_ARGS			239
+#define WLC_UNSET_CALLBACK			240
+#define WLC_SET_CALLBACK			241
+#define WLC_GET_RADAR				242
+#define WLC_SET_RADAR				243
+#define WLC_SET_SPECT_MANAGMENT			244
+#define WLC_GET_SPECT_MANAGMENT			245
+#define WLC_WDS_GET_REMOTE_HWADDR		246	/* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP			247
+#define WLC_SET_CS_SCAN_TIMER			248
+#define WLC_GET_CS_SCAN_TIMER			249
+#define WLC_MEASURE_REQUEST			250
+#define WLC_INIT				251
+#define WLC_SEND_QUIET				252
+#define WLC_KEEPALIVE			253
+#define WLC_SEND_PWR_CONSTRAINT			254
+#define WLC_UPGRADE_STATUS			255
+#define WLC_CURRENT_PWR				256
+#define WLC_GET_SCAN_PASSIVE_TIME		257
+#define WLC_SET_SCAN_PASSIVE_TIME		258
+#define WLC_LEGACY_LINK_BEHAVIOR		259
+#define WLC_GET_CHANNELS_IN_COUNTRY		260
+#define WLC_GET_COUNTRY_LIST			261
+#define WLC_GET_VAR				262	/* get value of named variable */
+#define WLC_SET_VAR				263	/* set named variable to value */
+#define WLC_NVRAM_GET				264	/* deprecated */
+#define WLC_NVRAM_SET				265
+#define WLC_NVRAM_DUMP				266
+#define WLC_REBOOT				267
+#define WLC_SET_WSEC_PMK			268
+#define WLC_GET_AUTH_MODE			269
+#define WLC_SET_AUTH_MODE			270
+#define WLC_GET_WAKEENTRY			271
+#define WLC_SET_WAKEENTRY			272
+#define WLC_NDCONFIG_ITEM			273	/* currently handled in wl_oid.c */
+#define WLC_NVOTPW				274
+#define WLC_OTPW				275
+#define WLC_IOV_BLOCK_GET			276
+#define WLC_IOV_MODULES_GET			277
+#define WLC_SOFT_RESET				278
+#define WLC_GET_ALLOW_MODE			279
+#define WLC_SET_ALLOW_MODE			280
+#define WLC_GET_DESIRED_BSSID			281
+#define WLC_SET_DESIRED_BSSID			282
+#define	WLC_DISASSOC_MYAP			283
+#define WLC_GET_NBANDS				284	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES			285	/* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO			286	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO			287	/* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY				288	/* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY				289	/* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME			290	/* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID			291	/* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC			292	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE			293	/* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE			294	/* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING			295	/* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING			296	/* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON		297	/* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON		298	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI			299	/* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE			300	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN			301	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN			302	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS			303	/* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC			304	/* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS			305     /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS				306	/* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT			307	/* phy sample collect mode */
+/* #define WLC_UM_PRIV				308 */	/* Deprecated: usermode driver */
+#define WLC_GET_CMD				309
+/* #define WLC_LAST				310 */	/* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE	311	/* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE	312	/* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT			313 */	/* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT			314 */	/* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY			315 */	/* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG			316	/* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE			317
+#define WLC_GET_TXBF_RATESET			318
+#define WLC_SET_TXBF_RATESET			319
+#define WLC_SCAN_CQ				320
+#define WLC_GET_RSSI_QDB			321 /* qdB portion of the RSSI */
+#define WLC_DUMP_RATESET			322
+#define WLC_ECHO				323
+#define WLC_LAST				324
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE		0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE		0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE	(WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK	(WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK	(WL_OID_BASE + WLC_SET_FORCELINK)
+#define	OID_WL_ENCRYPT_STRENGTH	(WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS	(WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM	(WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC	(WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS		(WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY		(WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY		(WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME	(WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID	(WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE	(WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING	(WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE	(WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC	(WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS	(WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS	(WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG	(WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE	(WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS	1
+#define WL_DECRYPT_STATUS_FAILURE	2
+#define WL_DECRYPT_STATUS_UNKNOWN	3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS			0
+#define WLC_UPGRADE_PENDING			1
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM		0	/* d11 open authentication */
+#define WL_AUTH_SHARED_KEY		1	/* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED		2	/* try open, then shared if open failed w/rc 13 */
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX		(127)	/* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT			1	/* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK			2	/* d11 loopback data test */
+#define WL_DIAG_MEMORY				3	/* d11 memory test */
+#define WL_DIAG_LED				4	/* LED test */
+#define WL_DIAG_REG				5	/* d11/phy register test */
+#define WL_DIAG_SROM				6	/* srom read/crc test */
+#define WL_DIAG_DMA				7	/* DMA test */
+#define WL_DIAG_LOOPBACK_EXT			8	/* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS			0
+#define WL_DIAGERR_FAIL_TO_RUN			1	/* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED		2	/* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL		3	/* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL		4	/* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL			5	/* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC			6	/* srom crc failed */
+#define WL_DIAGERR_REG_FAIL			7	/* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL			8	/* d11 memory test failed */
+#define WL_DIAGERR_NOMEM			9	/* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL			10	/* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT		11	/* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN		12	/* d11 memory test result in bad pattern */
+
+/* band types */
+#define	WLC_BAND_AUTO		0	/* auto-select */
+#define	WLC_BAND_5G		1	/* 5 Ghz */
+#define	WLC_BAND_2G		2	/* 2.4 Ghz */
+#define	WLC_BAND_ALL		3	/* all bands */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G      0
+#define WL_CHAN_FREQ_RANGE_5GL     1
+#define WL_CHAN_FREQ_RANGE_5GM     2
+#define WL_CHAN_FREQ_RANGE_5GH     3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND    4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND    5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND    6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND    7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND     8
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0     1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1     2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2     3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3     4
+
+#ifdef SROM12
+#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
+#define WL_CHAN_FREQ_RANGE_2G_40 6
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16
+
+#define WL_CHAN_FREQ_RANGE_5G_4BAND	17
+#define WL_CHAN_FREQ_RANGE_5G_5BAND	18
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_40	19
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_80	20
+#else
+#define WL_CHAN_FREQ_RANGE_5G_4BAND	5
+#endif /* SROM12 */
+/* MAC list modes */
+#define WLC_MACMODE_DISABLED	0	/* MAC list disabled */
+#define WLC_MACMODE_DENY	1	/* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW	2	/* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B			Rateset: 1b, 2b, 5.5, 11
+ *					Preamble: Long
+ *					Shortslot: Off
+ * GMODE_AUTO				Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: Auto
+ * GMODE_ONLY				Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ *					Extended Rateset: 6b, 9, 12b, 48
+ *					Preamble: Short required
+ *					Shortslot: Auto
+ * GMODE_B_DEFERRED			Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: On
+ * GMODE_PERFORMANCE			Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ *					Preamble: Short required
+ *					Shortslot: On and required
+ * GMODE_LRS				Rateset: 1b, 2b, 5.5b, 11b
+ *					Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ *					Preamble: Long
+ *					Shortslot: Auto
+ */
+#define GMODE_LEGACY_B		0
+#define GMODE_AUTO		1
+#define GMODE_ONLY		2
+#define GMODE_B_DEFERRED	3
+#define GMODE_PERFORMANCE	4
+#define GMODE_LRS		5
+#define GMODE_MAX		6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO	-1
+#define WLC_PLCP_SHORT	0
+#define WLC_PLCP_LONG	1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO		-1
+#define WLC_PROTECTION_OFF		0
+#define WLC_PROTECTION_ON		1
+#define WLC_PROTECTION_MMHDR_ONLY	2
+#define WLC_PROTECTION_CTS_ONLY		3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF		0
+#define WLC_PROTECTION_CTL_LOCAL	1
+#define WLC_PROTECTION_CTL_OVERLAP	2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF		0
+#define WLC_N_PROTECTION_OPTIONAL	1
+#define WLC_N_PROTECTION_20IN40		2
+#define WLC_N_PROTECTION_MIXEDMODE	3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE	0
+#define WLC_N_PREAMBLE_GF		1
+#define WLC_N_PREAMBLE_GF_BRCM          2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL			0
+#define WLC_N_BW_40ALL			1
+#define WLC_N_BW_20IN2G_40IN5G		2
+
+#define WLC_BW_20MHZ_BIT		(1<<0)
+#define WLC_BW_40MHZ_BIT		(1<<1)
+#define WLC_BW_80MHZ_BIT		(1<<2)
+#define WLC_BW_160MHZ_BIT		(1<<3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ		(WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ		(WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ		(WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ		(WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+	WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED		0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap)	(((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap)	(((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap)	(((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0		0
+#define WLC_N_TXRX_CHAIN1		1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20			0x01
+#define WLC_N_SGI_40			0x02
+#define WLC_VHT_SGI_80			0x04
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL				0x02
+
+#define LISTEN_INTERVAL			10
+/* interference mitigation options */
+#define	INTERFERE_OVRRIDE_OFF	-1	/* interference override off */
+#define	INTERFERE_NONE	0	/* off */
+#define	NON_WLAN	1	/* foreign/non 802.11 interference, no auto detect */
+#define	WLAN_MANUAL	2	/* ACI: no auto detection */
+#define	WLAN_AUTO	3	/* ACI: auto detect */
+#define	WLAN_AUTO_W_NOISE	4	/* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE	(1 << 7) /* Auto is currently active */
+
+/* interfernece mode bit-masks (ACPHY) */
+#define ACPHY_ACI_GLITCHBASED_DESENSE 1   /* bit 0 */
+#define ACPHY_ACI_HWACI_PKTGAINLMT 2      /* bit 1 */
+#define ACPHY_ACI_W2NB_PKTGAINLMT 4       /* bit 2 */
+#define ACPHY_ACI_PREEMPTION 8            /* bit 3 */
+#define ACPHY_HWACI_MITIGATION 16            /* bit 4 */
+#define ACPHY_ACI_MAX_MODE 31
+
+/* AP environment */
+#define AP_ENV_DETECT_NOT_USED		0 /* We aren't using AP environment detection */
+#define AP_ENV_DENSE			1 /* "Corporate" or other AP dense environment */
+#define AP_ENV_SPARSE			2 /* "Home" or other sparse environment */
+#define AP_ENV_INDETERMINATE		3 /* AP environment hasn't been identified */
+
+#define TRIGGER_NOW				0
+#define TRIGGER_CRS				0x01
+#define TRIGGER_CRSDEASSERT			0x02
+#define TRIGGER_GOODFCS				0x04
+#define TRIGGER_BADFCS				0x08
+#define TRIGGER_BADPLCP				0x10
+#define TRIGGER_CRSGLITCH			0x20
+
+#define	WL_SAMPLEDATA_HEADER_TYPE	1
+#define WL_SAMPLEDATA_HEADER_SIZE	80	/* sample collect header size (bytes) */
+#define	WL_SAMPLEDATA_TYPE		2
+#define	WL_SAMPLEDATA_SEQ		0xff	/* sequence # */
+#define	WL_SAMPLEDATA_MORE_DATA		0x100	/* more data mask */
+
+/* WL_OTA START */
+#define WL_OTA_ARG_PARSE_BLK_SIZE	1200
+#define WL_OTA_TEST_MAX_NUM_RATE	30
+#define WL_OTA_TEST_MAX_NUM_SEQ		100
+
+#define WL_THRESHOLD_LO_BAND	70	/* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF		0	/* radar detector off */
+#define WL_RADAR_DETECTOR_ON		1	/* radar detector on */
+#define WL_RADAR_SIMULATED		2	/* force radar detector to declare
+						 * detection once
+						 */
+#define WL_RSSI_ANT_VERSION	1	/* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX		2	/* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX	3	/* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1		0	/* antenna index 1 */
+#define WL_ANT_IDX_2		1	/* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX		4	/* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE		0	/* state for operating in non-radar channel */
+#define	WL_DFS_CACSTATE_PREISM_CAC	1	/* CAC in progress */
+#define WL_DFS_CACSTATE_ISM		2	/* ISM in progress */
+#define WL_DFS_CACSTATE_CSA		3	/* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC	4	/* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC	5	/* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC	6	/* POSTISM OOC */
+#define WL_DFS_CACSTATES		7	/* this many states exist */
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ		0
+#define WL_BW_40MHZ		1
+#define WL_BW_80MHZ		2
+#define WL_BW_160MHZ		3
+#define WL_BW_8080MHZ		4
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED	1
+#define WL_TX_POWER_F_HW		2
+#define WL_TX_POWER_F_MIMO		4
+#define WL_TX_POWER_F_SISO		8
+#define WL_TX_POWER_F_HT		0x10
+#define WL_TX_POWER_F_VHT		0x20
+#define WL_TX_POWER_F_OPENLOOP		0x40
+
+/* Message levels */
+#define WL_ERROR_VAL		0x00000001
+#define WL_TRACE_VAL		0x00000002
+#define WL_PRHDRS_VAL		0x00000004
+#define WL_PRPKT_VAL		0x00000008
+#define WL_INFORM_VAL		0x00000010
+#define WL_TMP_VAL		0x00000020
+#define WL_OID_VAL		0x00000040
+#define WL_RATE_VAL		0x00000080
+#define WL_ASSOC_VAL		0x00000100
+#define WL_PRUSR_VAL		0x00000200
+#define WL_PS_VAL		0x00000400
+#define WL_TXPWR_VAL		0x00000800	/* retired in TOT on 6/10/2009 */
+#define WL_MODE_SWITCH_VAL	0x00000800 /* Using retired TXPWR val */
+#define WL_PORT_VAL		0x00001000
+#define WL_DUAL_VAL		0x00002000
+#define WL_WSEC_VAL		0x00004000
+#define WL_WSEC_DUMP_VAL	0x00008000
+#define WL_LOG_VAL		0x00010000
+#define WL_NRSSI_VAL		0x00020000	/* retired in TOT on 6/10/2009 */
+#define WL_LOFT_VAL		0x00040000	/* retired in TOT on 6/10/2009 */
+#define WL_REGULATORY_VAL	0x00080000
+#define WL_TAF_VAL		0x00100000
+#define WL_RADAR_VAL		0x00200000	/* retired in TOT on 6/10/2009 */
+#define WL_MPC_VAL		0x00400000
+#define WL_APSTA_VAL		0x00800000
+#define WL_DFS_VAL		0x01000000
+#define WL_BA_VAL		0x02000000	/* retired in TOT on 6/14/2010 */
+#define WL_ACI_VAL		0x04000000
+#define WL_PRMAC_VAL		0x04000000
+#define WL_MBSS_VAL		0x04000000
+#define WL_CAC_VAL		0x08000000
+#define WL_AMSDU_VAL		0x10000000
+#define WL_AMPDU_VAL		0x20000000
+#define WL_FFPLD_VAL		0x40000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL		0x00000001
+#define WL_SCAN_VAL		0x00000002
+#define WL_WOWL_VAL		0x00000004
+#define WL_COEX_VAL		0x00000008
+#define WL_RTDC_VAL		0x00000010
+#define WL_PROTO_VAL		0x00000020
+#define WL_BTA_VAL		0x00000040
+#define WL_CHANINT_VAL		0x00000080
+#define WL_WMF_VAL		0x00000100
+#define WL_P2P_VAL		0x00000200
+#define WL_ITFR_VAL		0x00000400
+#define WL_MCHAN_VAL		0x00000800
+#define WL_TDLS_VAL		0x00001000
+#define WL_MCNX_VAL		0x00002000
+#define WL_PROT_VAL		0x00004000
+#define WL_PSTA_VAL		0x00008000
+#define WL_TSO_VAL		0x00010000
+#define WL_TRF_MGMT_VAL		0x00020000
+#define WL_LPC_VAL	        0x00040000
+#define WL_L2FILTER_VAL		0x00080000
+#define WL_TXBF_VAL		0x00100000
+#define WL_P2PO_VAL		0x00200000
+#define WL_TBTT_VAL		0x00400000
+#define WL_MQ_VAL		0x01000000
+
+/* This level is currently used in Phoenix2 only */
+#define WL_SRSCAN_VAL		0x02000000
+
+#define WL_WNM_VAL		0x04000000
+#define WL_PWRSEL_VAL		0x10000000
+#define WL_NET_DETECT_VAL	0x20000000
+#define WL_PCIE_VAL		0x40000000
+
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL        0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define	WL_LED_NUMGPIO		32	/* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define	WL_LED_OFF		0		/* always off */
+#define	WL_LED_ON		1		/* always on */
+#define	WL_LED_ACTIVITY		2		/* activity */
+#define	WL_LED_RADIO		3		/* radio enabled */
+#define	WL_LED_ARADIO		4		/* 5  Ghz radio enabled */
+#define	WL_LED_BRADIO		5		/* 2.4Ghz radio enabled */
+#define	WL_LED_BGMODE		6		/* on if gmode, off if bmode */
+#define	WL_LED_WI1		7
+#define	WL_LED_WI2		8
+#define	WL_LED_WI3		9
+#define	WL_LED_ASSOC		10		/* associated state indicator */
+#define	WL_LED_INACTIVE		11		/* null behavior (clears default behavior) */
+#define	WL_LED_ASSOCACT		12		/* on when associated; blink fast for activity */
+#define WL_LED_WI4		13
+#define WL_LED_WI5		14
+#define	WL_LED_BLINKSLOW	15		/* blink slow */
+#define	WL_LED_BLINKMED		16		/* blink med */
+#define	WL_LED_BLINKFAST	17		/* blink fast */
+#define	WL_LED_BLINKCUSTOM	18		/* blink custom */
+#define	WL_LED_BLINKPERIODIC	19		/* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC	20		/* when connected with security */
+						/* keep on for 300 sec */
+#define WL_LED_START_OFF	21		/* off upon boot, could be turned on later */
+#define WL_LED_WI6		22
+#define WL_LED_WI7		23
+#define WL_LED_WI8		24
+#define	WL_LED_NUMBEHAVIOR	25
+
+/* led behavior numeric value format */
+#define	WL_LED_BEH_MASK		0x7f		/* behavior mask */
+#define	WL_LED_AL_MASK		0x80		/* activelow (polarity) bit */
+
+/* number of bytes needed to define a proper bit mask for MAC event reporting */
+#define BCMIO_ROUNDUP(x, y)	((((x) + ((y) - 1)) / (y)) * (y))
+#define BCMIO_NBBY		8
+#define WL_EVENTING_MASK_LEN	16
+
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI	1	/* by RSSI */
+#define WL_JOIN_PREF_WPA	2	/* by akm and ciphers */
+#define WL_JOIN_PREF_BAND	3	/* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA	4	/* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF	5	/* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF	255	/* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY	"\x00\x00\x00\x00"
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC			1
+#define WLC_MEASURE_CHANNEL_BASIC	2
+#define WLC_MEASURE_CHANNEL_CCA		3
+#define WLC_MEASURE_CHANNEL_RPI		4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF			0		/* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H		1		/* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H		2		/* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D		3		/* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D		4		/* operation defined above */
+
+#define WL_CHAN_VALID_HW	(1 << 0)	/* valid with current HW */
+#define WL_CHAN_VALID_SW	(1 << 1)	/* valid with current country setting */
+#define WL_CHAN_BAND_5G		(1 << 2)	/* 5GHz-band channel */
+#define WL_CHAN_RADAR		(1 << 3)	/* radar sensitive  channel */
+#define WL_CHAN_INACTIVE	(1 << 4)	/* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE		(1 << 5)	/* channel is in passive mode */
+#define WL_CHAN_RESTRICTED	(1 << 6)	/* restricted use channel */
+
+/* BTC mode used by "btc_mode" iovar */
+#define	WL_BTC_DISABLE		0	/* disable BT coexistence */
+#define WL_BTC_FULLTDM      1	/* full TDM COEX */
+#define WL_BTC_ENABLE       1	/* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT      2    /* full TDM COEX with preemption */
+#define WL_BTC_LITE        3	/* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL		4   /* BT and WLAN run in parallel with separate antenna  */
+#define WL_BTC_HYBRID		5   /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT		8	/* set the default mode for the device */
+#define WL_INF_BTC_DISABLE      0
+#define WL_INF_BTC_ENABLE       1
+#define WL_INF_BTC_AUTO         3
+
+/* BTC wire used by "btc_wire" iovar */
+#define	WL_BTC_DEFWIRE		0	/* use default wire setting */
+#define WL_BTC_2WIRE		2	/* use 2-wire BTC */
+#define WL_BTC_3WIRE		3	/* use 3-wire BTC */
+#define WL_BTC_4WIRE		4	/* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT               (1 << 0)
+#define WL_BTC_FLAG_BT_DEF               (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT          (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP              (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT           (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP	         (1 << 5)
+#define WL_BTC_FLAG_ECI                  (1 << 6)
+#define WL_BTC_FLAG_LIGHT                (1 << 7)
+#define WL_BTC_FLAG_PARALLEL             (1 << 8)
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#ifdef WL11AC_80P80
+#define WL_NUMCHANSPECS 206
+#else
+#define WL_NUMCHANSPECS 110
+#endif
+
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH	0	/* authenticator */
+#define WL_WDS_WPA_ROLE_SUP	1	/* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO	255	/* auto, based on mac addr value */
+
+/* Base offset values */
+#define WL_PKT_FILTER_BASE_PKT   0
+#define WL_PKT_FILTER_BASE_END   1
+#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */
+#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */
+#define WL_PKT_FILTER_BASE_ETH_H 4
+#define WL_PKT_FILTER_BASE_ETH_D 5
+#define WL_PKT_FILTER_BASE_ARP_H 6
+#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */
+#define WL_PKT_FILTER_BASE_IP4_H 8
+#define WL_PKT_FILTER_BASE_IP4_D 9
+#define WL_PKT_FILTER_BASE_IP6_H 10
+#define WL_PKT_FILTER_BASE_IP6_D 11
+#define WL_PKT_FILTER_BASE_TCP_H 12
+#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */
+#define WL_PKT_FILTER_BASE_UDP_H 14
+#define WL_PKT_FILTER_BASE_UDP_D 15
+#define WL_PKT_FILTER_BASE_IP6_P 16
+#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */
+
+/* String mapping for bases that may be used by applications or debug */
+#define WL_PKT_FILTER_BASE_NAMES \
+	{ "START", WL_PKT_FILTER_BASE_PKT },   \
+	{ "END",   WL_PKT_FILTER_BASE_END },   \
+	{ "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \
+	{ "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \
+	{ "D11_H", WL_PKT_FILTER_BASE_D11_H }, \
+	{ "D11_D", WL_PKT_FILTER_BASE_D11_D }, \
+	{ "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \
+	{ "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \
+	{ "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \
+	{ "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \
+	{ "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \
+	{ "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \
+	{ "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \
+	{ "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \
+	{ "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \
+	{ "UDP_D", WL_PKT_FILTER_BASE_UDP_D }
+
+/* Flags for a pattern list element */
+#define WL_PKT_FILTER_MFLAG_NEG 0x0001
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START			0x01
+#define WL_PKTENG_PER_TX_STOP			0x02
+#define WL_PKTENG_PER_RX_START			0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START		0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START		0x06
+#define WL_PKTENG_PER_RX_STOP			0x08
+#define WL_PKTENG_PER_MASK			0xff
+
+#define WL_PKTENG_SYNCHRONOUS			0x100	/* synchronous flag */
+
+#define WL_PKTENG_MAXPKTSZ				16384	/* max pktsz limit for pkteng */
+
+#define NUM_80211b_RATES	4
+#define NUM_80211ag_RATES	8
+#define NUM_80211n_RATES	32
+#define NUM_80211_RATES		(NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+
+/*
+ * WOWL capability/override settings
+ */
+#define WL_WOWL_MAGIC           (1 << 0)    /* Wakeup on Magic packet */
+#define WL_WOWL_NET             (1 << 1)    /* Wakeup on Netpattern */
+#define WL_WOWL_DIS             (1 << 2)    /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR            (1 << 3)    /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN             (1 << 4)    /* Wakeup on loss of beacon */
+#define WL_WOWL_TST             (1 << 5)    /* Wakeup after test */
+#define WL_WOWL_M1              (1 << 6)    /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID           (1 << 7)    /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO        (1 << 8)    /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_NEEDTKIP1       (1 << 9)    /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE     (1 << 10)   /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT       (1 << 11)   /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD      (1 << 12)   /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2            (1 << 13)   /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT          (1 << 14)   /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST           (1 << 15)   /* If the bit is set, frm received was bcast frame */
+#define WL_WOWL_SCANOL          (1 << 16)   /* If the bit is set, scan offload is enabled */
+#define WL_WOWL_TCPKEEP_TIME    (1 << 17)   /* Wakeup on tcpkeep alive timeout */
+#define WL_WOWL_MDNS_CONFLICT   (1 << 18)   /* Wakeup on mDNS Conflict Resolution */
+#define WL_WOWL_MDNS_SERVICE    (1 << 19)   /* Wakeup on mDNS Service Connect */
+#define WL_WOWL_TCPKEEP_DATA    (1 << 20)   /* tcp keepalive got data */
+#define WL_WOWL_FW_HALT         (1 << 21)   /* Firmware died in wowl mode */
+#define WL_WOWL_ENAB_HWRADIO    (1 << 22)   /* Enable detection of radio button changes */
+#define WL_WOWL_MIC_FAIL        (1 << 23)   /* Offloads detected MIC failure(s) */
+#define WL_WOWL_UNASSOC         (1 << 24)   /* Wakeup in Unassociated state (Net/Magic Pattern) */
+#define WL_WOWL_SECURE          (1 << 25)   /* Wakeup if received matched secured pattern */
+#define WL_WOWL_LINKDOWN        (1 << 31)   /* Link Down indication in WoWL mode */
+
+#define WL_WOWL_TCPKEEP         (1 << 20)   /* temp copy to satisfy automerger */
+#define MAGIC_PKT_MINLEN 102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP	(1 << 0)	/* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA	(1 << 1)	/* NA offload Pattern */
+
+#define MAGIC_PKT_MINLEN	102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+#define MAGIC_PKT_NUM_MAC_ADDRS	16
+
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT		20	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN			5	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT		10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN			10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT	300	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN		10	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX		900	/* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX	100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT	25	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN		0	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX		100	/* unit percent */
+
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7	/* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK		0x07
+#define WL_COEX_INFO_REQ		0x01
+#define	WL_COEX_40MHZ_INTOLERANT	0x02
+#define	WL_COEX_WIDTH20			0x04
+
+#define	WLC_RSSI_INVALID	 0	/* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER		0x0100
+
+#define MAX_ARGSTR_LEN		18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON	0x0001
+#define LOG_MODULE_ASSOC	0x0002
+#define LOG_MODULE_EVENT	0x0004
+#define LOG_MODULE_MAX		3			/* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE	0
+#define WL_LOG_LEVEL_ERR	1
+#define WL_LOG_LEVEL_WARN	2
+#define WL_LOG_LEVEL_INFO	3
+#define WL_LOG_LEVEL_MAX	WL_LOG_LEVEL_INFO	/* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT		1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL	0
+#define LOG_ARGTYPE_STR		1	/* %s */
+#define LOG_ARGTYPE_INT		2	/* %d */
+#define LOG_ARGTYPE_INT_STR	3	/* %d...%s */
+#define LOG_ARGTYPE_STR_INT	4	/* %s...%d */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG	0x1
+#define VNDR_IE_PRBRSP_FLAG	0x2
+#define VNDR_IE_ASSOCRSP_FLAG	0x4
+#define VNDR_IE_AUTHRSP_FLAG	0x8
+#define VNDR_IE_PRBREQ_FLAG	0x10
+#define VNDR_IE_ASSOCREQ_FLAG	0x20
+#define VNDR_IE_IWAPID_FLAG	0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_CUSTOM_FLAG	0x100 /* allow custom IE id */
+
+#if defined(WLP2P)
+/* P2P Action Frames flags (spec ordered) */
+#define VNDR_IE_GONREQ_FLAG     0x001000
+#define VNDR_IE_GONRSP_FLAG     0x002000
+#define VNDR_IE_GONCFM_FLAG     0x004000
+#define VNDR_IE_INVREQ_FLAG     0x008000
+#define VNDR_IE_INVRSP_FLAG     0x010000
+#define VNDR_IE_DISREQ_FLAG     0x020000
+#define VNDR_IE_DISRSP_FLAG     0x040000
+#define VNDR_IE_PRDREQ_FLAG     0x080000
+#define VNDR_IE_PRDRSP_FLAG     0x100000
+
+#define VNDR_IE_P2PAF_SHIFT	12
+#endif /* WLP2P */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE	0	/* disabled */
+#define CHANIM_DETECT	1	/* detection only */
+#define CHANIM_EXT		2	/* external state machine */
+#define CHANIM_ACT		3	/* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT		0
+#define APCS_IOCTL		1
+#define APCS_CHANIM		2
+#define APCS_CSTIMER		3
+#define APCS_BTA		4
+#define APCS_TXDLY		5
+#define APCS_NONACSD		6
+#define APCS_DFS_REENTRY	7
+#define APCS_TXFAIL		8
+#define APCS_MAX		9
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD			10
+
+/* CHANIM */
+#define CCASTATS_TXDUR  0
+#define CCASTATS_INBSS  1
+#define CCASTATS_OBSS   2
+#define CCASTATS_NOCTG  3
+#define CCASTATS_NOPKT  4
+#define CCASTATS_DOZE   5
+#define CCASTATS_TXOP	6
+#define CCASTATS_GDTXDUR        7
+#define CCASTATS_BDTXDUR        8
+#define CCASTATS_MAX    9
+
+#define WL_CHANIM_COUNT_ALL	0xff
+#define WL_CHANIM_COUNT_ONE	0x1
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN	0
+#define WL_P2P_DISC_ST_LISTEN	1
+#define WL_P2P_DISC_ST_SEARCH	2
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT	0
+#define WL_P2P_IF_GO		1
+#define WL_P2P_IF_DYNBCN_GO	2
+#define WL_P2P_IF_DEV		3
+
+/* count */
+#define WL_P2P_SCHED_RSVD	0
+#define WL_P2P_SCHED_REPEAT	255	/* anything > 255 will be treated as 255 */
+
+#define WL_P2P_SCHED_FIXED_LEN		3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS		0	/* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS	1	/* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE	0	/* no action */
+#define WL_P2P_SCHED_ACTION_DOZE	1	/* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF	2	/* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET	255	/* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL	0	/* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT	1	/* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS	2	/* normal start/internal/duration/count with
+						 * start being an offset of the 'current' TSF
+						 */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA	(1 << 0)	/* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY	(1 << 1)	/* GO does not probe respond to non-p2p probe
+						 * requests
+						 */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2)	/* Restrict p2p dev interface from responding */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2			1
+#define WL_11N_3x3			3
+#define WL_11N_4x4			4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N		0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX	0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX	0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX	0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX	0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX	0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX	0x00000040
+#define WLFEATURE_DISABLE_11N_GF	0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED		0
+#define PSTA_MODE_PROXY			1
+#define PSTA_MODE_REPEATER		2
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE		1	/* enable NAT on given interface */
+#define NAT_OP_DISABLE		2	/* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL	3	/* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED	1	/* NAT is enabled */
+#define NAT_STATE_DISABLED	2	/* NAT is disabled */
+
+#define CHANNEL_5G_LOW_START	36	/* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START	52	/* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START	100	/* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START	149	/* 5G upper (149..161) CDD enable/disable bit mask */
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER		0x0001
+#define IPV4_NETBT_FILTER	0x0002
+#define IPV4_LLMNR_FILTER	0x0004
+#define IPV4_SSDP_FILTER	0x0008
+#define IPV4_WSD_FILTER		0x0010
+#define IPV6_NETBT_FILTER	0x0200
+#define IPV6_LLMNR_FILTER	0x0400
+#define IPV6_SSDP_FILTER	0x0800
+#define IPV6_WSD_FILTER		0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE		0x00000001
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES                 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP                  0x0001  /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING           0x0002  /* Don't shape traffic */
+#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC      0x0008  /* Manage traffic over our local subnet */
+#define TRF_MGMT_FLAG_FILTER_ON_MACADDR         0x0010  /* filter on MAC address */
+#define TRF_MGMT_FLAG_NO_RX                     0x0020  /* do not apply fiters to rx packets */
+
+#define TRF_FILTER_MAC_ADDR              0x0001 /* L2 filter use dst mac address for filtering */
+#define TRF_FILTER_IP_ADDR               0x0002 /* L3 filter use ip ddress for filtering */
+#define TRF_FILTER_L4                    0x0004 /* L4 filter use tcp/udp for filtering */
+#define TRF_FILTER_DWM                   0x0008 /* L3 filter use DSCP for filtering */
+#define TRF_FILTER_FAVORED               0x0010 /* Tag the packet FAVORED */
+
+/* WNM/NPS subfeatures mask */
+#define WL_WNM_BSSTRANS		0x00000001
+#define WL_WNM_PROXYARP		0x00000002
+#define WL_WNM_MAXIDLE		0x00000004
+#define WL_WNM_TIMBC		0x00000008
+#define WL_WNM_TFS		0x00000010
+#define WL_WNM_SLEEP		0x00000020
+#define WL_WNM_DMS		0x00000040
+#define WL_WNM_FMS		0x00000080
+#define WL_WNM_NOTIF		0x00000100
+#define WL_WNM_MAX		0x00000200
+
+#ifndef ETHER_MAX_DATA
+#define ETHER_MAX_DATA	1500
+#endif /* ETHER_MAX_DATA */
+
+/* Different discovery modes for dpt */
+#define	DPT_DISCOVERY_MANUAL	0x01	/* manual discovery mode */
+#define	DPT_DISCOVERY_AUTO	0x02	/* auto discovery mode */
+#define	DPT_DISCOVERY_SCAN	0x04	/* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO	0	/* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT	1	/* always use direct DPT path */
+#define DPT_PATHSEL_APPATH	2	/* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD	1	/* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE	2	/* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+
+/* flags to indicate DPT status */
+#define	DPT_STATUS_ACTIVE	0x01	/* link active (though may be suspended) */
+#define	DPT_STATUS_AES		0x02	/* link secured through AES encryption */
+#define	DPT_STATUS_FAILED	0x04	/* DPT link failed */
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM		4	/*  put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE		5	/* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY	6	/* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW		7	/* channel switch */
+#define TDLS_MANUAL_EP_WFD_TPQ	8	/* WiFi-Display Tunneled Probe reQuest */
+
+/* modes */
+#define TDLS_WFD_IE_TX			0
+#define TDLS_WFD_IE_RX			1
+#define TDLS_WFD_PROBE_IE_TX	2
+#define TDLS_WFD_PROBE_IE_RX	3
+#endif /* WLTDLS */
+
+/* define for flag */
+#define TSPEC_PENDING		0	/* TSPEC pending */
+#define TSPEC_ACCEPTED		1	/* TSPEC accepted */
+#define TSPEC_REJECTED		2	/* TSPEC rejected */
+#define TSPEC_UNKNOWN		3	/* TSPEC unknown */
+#define TSPEC_STATUS_MASK	7	/* TSPEC status mask */
+
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL       0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE    0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO	0x0004
+#define WL_SWFL_FLOWCONTROL     0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT	0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+#define CSA_BROADCAST_ACTION_FRAME	0	/* csa broadcast action frame */
+#define CSA_UNICAST_ACTION_FRAME	  1 /* csa unicast action frame */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0)   value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ *                      the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT	0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH	1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE	2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO		3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE	3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER	(-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+#define BESTN_BSSID_ONLY_BIT		12
+
+#define SORT_CRITERIA_MASK		0x0001
+#define AUTO_NET_SWITCH_MASK		0x0002
+#define ENABLE_BKGRD_SCAN_MASK		0x0004
+#define IMMEDIATE_SCAN_MASK		0x0008
+#define	AUTO_CONNECT_MASK		0x0010
+
+#define ENABLE_BD_SCAN_MASK		0x0020
+#define ENABLE_ADAPTSCAN_MASK		0x00c0
+#define IMMEDIATE_EVENT_MASK		0x0100
+#define SUPPRESS_SSID_MASK		0x0200
+#define ENABLE_NET_OFFLOAD_MASK		0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK		0x0800
+#define BESTN_BSSID_ONLY_MASK		0x1000
+
+#define PFN_VERSION			2
+#define PFN_SCANRESULT_VERSION		1
+#define MAX_PFN_LIST_COUNT		16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP				2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_HISTORY_OFF	0x00000002	/* Scan history suppressed */
+
+#define WL_PFN_HIDDEN_BIT		2
+#define PNO_SCAN_MAX_FW			508*1000	/* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC		PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC		10			/* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK		0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			8
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			32
+#endif
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM	0x00000001
+#define TOE_ERRTEST_RX_CSUM	0x00000002
+#define TOE_ERRTEST_RX_CSUM2	0x00000004
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT		0x00000001
+#define ARP_OL_SNOOP		0x00000002
+#define ARP_OL_HOST_AUTO_REPLY	0x00000004
+#define ARP_OL_PEER_AUTO_REPLY	0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER	0x1
+#define ARP_ERRTEST_REPLY_HOST	0x2
+
+#define ARP_MULTIHOMING_MAX	8	/* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 10	/* Maximum local host IP addresses */
+#define ND_REQUEST_MAX		5	/* Max set of offload params */
+
+
+/* AOAC wake event flag */
+#define WAKE_EVENT_NLO_DISCOVERY_BIT		1
+#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT	2
+#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
+#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+
+
+#define MAX_NUM_WOL_PATTERN	22 /* LOGO requirements min 22 */
+
+
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH		1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE					2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH		4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+
+#ifdef DONGLEOVERLAYS
+#define OVERLAY_IDX_MASK		0x000000ff
+#define OVERLAY_IDX_SHIFT		0
+#define OVERLAY_FLAGS_MASK		0xffffff00
+#define OVERLAY_FLAGS_SHIFT		8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD	0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL	0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP	0x400
+#define OVERLAY_DOWNLOAD_CHUNKSIZE	1024
+#endif /* DONGLEOVERLAYS */
+
+/* reuse two number in the sc/rc space */
+#define	SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED	0xFFFD
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE		0x77
+#define BCM_ACTION_RFAWARE_DCS  0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR		0x1
+#define BCM_DCS_UNKNOWN		0xFF
+
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS			0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS		0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS	0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE	0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE	0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE	0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE	0x0040
+#define WLFC_FLAGS_PKT_STAMP_SIGNALS		0x0080
+
+#endif /* PROP_TXSTATUS */
+
+#define WL_TIMBC_STATUS_AP_UNKNOWN	255	/* AP status for internal use only */
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RELMCAST_MAX_CLIENT		32
+#define WL_RELMCAST_FLAG_INBLACKLIST	1
+#define WL_RELMCAST_FLAG_ACTIVEACKER	2
+#define WL_RELMCAST_FLAG_RELMCAST	4
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+
+
+#ifdef NET_DETECT
+#define NET_DETECT_MAX_WAKE_DATA_SIZE	2048
+#define NET_DETECT_MAX_PROFILES		16
+#define NET_DETECT_MAX_CHANNELS		50
+#endif /* NET_DETECT */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE		(1<<0)
+#define WL_RADIO_HW_DISABLE		(1<<1)
+#define WL_RADIO_MPC_DISABLE		(1<<2)
+#define WL_RADIO_COUNTRY_DISABLE	(1<<3)	/* some countries don't support any channel */
+
+#define	WL_SPURAVOID_OFF	0
+#define	WL_SPURAVOID_ON1	1
+#define	WL_SPURAVOID_ON2	2
+
+
+#define WL_4335_SPURAVOID_ON1	1
+#define WL_4335_SPURAVOID_ON2	2
+#define WL_4335_SPURAVOID_ON3	3
+#define WL_4335_SPURAVOID_ON4	4
+#define WL_4335_SPURAVOID_ON5	5
+#define WL_4335_SPURAVOID_ON6	6
+#define WL_4335_SPURAVOID_ON7	7
+#define WL_4335_SPURAVOID_ON8	8
+#define WL_4335_SPURAVOID_ON9	9
+
+/* Override bit for WLC_SET_TXPWR.  if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE	(1U<<31)
+#define WL_TXPWR_NEG   (1U<<30)
+
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define	WLC_PHY_TYPE_A		0
+#define	WLC_PHY_TYPE_B		1
+#define	WLC_PHY_TYPE_G		2
+#define	WLC_PHY_TYPE_N		4
+#define	WLC_PHY_TYPE_LP		5
+#define	WLC_PHY_TYPE_SSN	6
+#define	WLC_PHY_TYPE_HT		7
+#define	WLC_PHY_TYPE_LCN	8
+#define	WLC_PHY_TYPE_LCN40	10
+#define WLC_PHY_TYPE_AC		11
+#define	WLC_PHY_TYPE_NULL	0xf
+
+/* Values for PM */
+#define PM_OFF	0
+#define PM_MAX	1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3		/* use this bit to force PM off even bt is active */
+
+#define WL_WME_CNT_VERSION	1	/* current version of wl_wme_cnt_t */
+
+/* fbt_cap: FBT assoc / reassoc modes. */
+#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC  1 /* Driver 4-way handshake & reassoc (WLFBT). */
+
+/* monitor_promisc_level bits */
+#define WL_MONPROMISC_PROMISC 0x0001
+#define WL_MONPROMISC_CTRL 0x0002
+#define WL_MONPROMISC_FCS 0x0004
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL		0x00000001
+#define TOE_RX_CSUM_OL		0x00000002
+
+/* Wi-Fi Display Services (WFDS) */
+#define WL_P2P_SOCIAL_CHANNELS_MAX  WL_NUMCHANNELS
+#define MAX_WFDS_SEEK_SVC 4	/* Max # of wfds services to seek */
+#define MAX_WFDS_ADVERT_SVC 4	/* Max # of wfds services to advertise */
+#define MAX_WFDS_SVC_NAME_LEN 200	/* maximum service_name length */
+#define MAX_WFDS_ADV_SVC_INFO_LEN 65000	/* maximum adv service_info length */
+#define P2P_WFDS_HASH_LEN 6		/* Length of a WFDS service hash */
+#define MAX_WFDS_SEEK_SVC_INFO_LEN 255	/* maximum seek service_info req length */
+#define MAX_WFDS_SEEK_SVC_NAME_LEN 200	/* maximum service_name length */
+
+/* ap_isolate bitmaps */
+#define AP_ISOLATE_DISABLED		0x0
+#define AP_ISOLATE_SENDUP_ALL		0x01
+#define AP_ISOLATE_SENDUP_MCAST		0x02
+
+#endif /* wlioctl_defs_h */
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 0000000..63cddc8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,136 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h 438755 2013-11-22 23:20:40Z $
+ */
+
+#ifndef _dhdioctl_h_
+#define	_dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	bool set;	/* get or set request (optional) */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+	uint driver;	/* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+	BUS_TYPE_USB = 0, /* for USB dongles */
+	BUS_TYPE_SDIO, /* for SDIO dongles */
+	BUS_TYPE_PCIE /* for PCIE dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC		0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION	1
+
+#define	DHD_IOCTL_MAXLEN	8192		/* max length ioctl buffer required */
+#define	DHD_IOCTL_SMLEN		256		/* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC				0
+#define DHD_GET_VERSION				1
+#define DHD_GET_VAR				2
+#define DHD_SET_VAR				3
+
+/* message levels */
+#define DHD_ERROR_VAL	0x0001
+#define DHD_TRACE_VAL	0x0002
+#define DHD_INFO_VAL	0x0004
+#define DHD_DATA_VAL	0x0008
+#define DHD_CTL_VAL	0x0010
+#define DHD_TIMER_VAL	0x0020
+#define DHD_HDRS_VAL	0x0040
+#define DHD_BYTES_VAL	0x0080
+#define DHD_INTR_VAL	0x0100
+#define DHD_LOG_VAL	0x0200
+#define DHD_GLOM_VAL	0x0400
+#define DHD_EVENT_VAL	0x0800
+#define DHD_BTA_VAL	0x1000
+#define DHD_ISCAN_VAL	0x2000
+#define DHD_ARPOE_VAL	0x4000
+#define DHD_REORDER_VAL	0x8000
+#define DHD_WL_VAL		0x10000
+#define DHD_NOCHECKDIED_VAL		0x20000 /* UTF WAR */
+#define DHD_WL_VAL2		0x40000
+#define DHD_PNO_VAL		0x80000
+#define DHD_RTT_VAL		0x100000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+	uint version;		/* To allow structure change tracking */
+	uint freq;		/* Max ticks between tx/rx attempts */
+	uint count;		/* Test packets to send/rcv each attempt */
+	uint print;		/* Print counts every <print> attempts */
+	uint total;		/* Total packets (or bursts) */
+	uint minlen;		/* Minimum length of packets to send */
+	uint maxlen;		/* Maximum length of packets to send */
+	uint numsent;		/* Count of test packets sent */
+	uint numrcvd;		/* Count of test packets received */
+	uint numfail;		/* Count of test send failures */
+	uint mode;		/* Test mode (type of test packets) */
+	uint stop;		/* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO		1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 	2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST	3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV		4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE	(-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE	0	/* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP   (-1)	/* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 0000000..ad50e1a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	1
+
+#define	EPI_MINOR_VERSION	201
+
+#define	EPI_RC_NUMBER		2
+
+#define	EPI_INCREMENTAL_NUMBER	0
+
+#define	EPI_BUILD_NUMBER	0
+
+#define	EPI_VERSION		1, 201, 31, 0
+
+#define	EPI_VERSION_NUM		0x01c90200
+
+#define EPI_VERSION_DEV		1.201.31
+
+/* Driver Version String, ASCII, 32 chars max */
+#define	EPI_VERSION_STR		"1.201.31 (r)"
+
+#endif /* _epivers_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/event_log.h b/drivers/net/wireless/bcmdhd/include/event_log.h
new file mode 100644
index 0000000..36089fd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/event_log.h
@@ -0,0 +1,301 @@
+/*
+ * EVENT_LOG system definitions
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: event_log.h 241182 2011-02-17 21:50:03Z jmb $
+ */
+
+#ifndef _EVENT_LOG_H_
+#define _EVENT_LOG_H_
+
+
+/* Set a maximum number of sets here.  It is not dynamic for
+ *  efficiency of the EVENT_LOG calls.
+ */
+#define NUM_EVENT_LOG_SETS 4
+#define EVENT_LOG_SET_BUS	0
+#define EVENT_LOG_SET_WL	1
+#define EVENT_LOG_SET_PSM	2
+#define EVENT_LOG_SET_DBG	3
+
+/* Define new event log tags here */
+#define EVENT_LOG_TAG_NULL	0	/* Special null tag */
+#define EVENT_LOG_TAG_TS	1	/* Special timestamp tag */
+#define EVENT_LOG_TAG_BUS_OOB	2
+#define EVENT_LOG_TAG_BUS_STATE	3
+#define EVENT_LOG_TAG_BUS_PROTO	4
+#define EVENT_LOG_TAG_BUS_CTL	5
+#define EVENT_LOG_TAG_BUS_EVENT	6
+#define EVENT_LOG_TAG_BUS_PKT	7
+#define EVENT_LOG_TAG_BUS_FRAME	8
+#define EVENT_LOG_TAG_BUS_DESC	9
+#define EVENT_LOG_TAG_BUS_SETUP	10
+#define EVENT_LOG_TAG_BUS_MISC	11
+#define EVENT_LOG_TAG_AWDL_ERR	12
+#define EVENT_LOG_TAG_AWDL_WARN	13
+#define EVENT_LOG_TAG_AWDL_INFO	14
+#define EVENT_LOG_TAG_AWDL_DEBUG	15
+#define EVENT_LOG_TAG_AWDL_TRACE_TIMER	16
+#define EVENT_LOG_TAG_AWDL_TRACE_SYNC	17
+#define EVENT_LOG_TAG_AWDL_TRACE_CHAN	18
+#define EVENT_LOG_TAG_AWDL_TRACE_DP		19
+#define EVENT_LOG_TAG_AWDL_TRACE_MISC	20
+#define EVENT_LOG_TAG_AWDL_TEST		21
+#define EVENT_LOG_TAG_SRSCAN		22
+#define EVENT_LOG_TAG_PWRSTATS_INFO	23
+#define EVENT_LOG_TAG_AWDL_TRACE_CHANSW	24
+#define EVENT_LOG_TAG_AWDL_TRACE_PEER_OPENCLOSE	25
+#define EVENT_LOG_TAG_UCODE_WATCHDOG 26
+#define EVENT_LOG_TAG_UCODE_FIFO 27
+#define EVENT_LOG_TAG_SCAN_TRACE_LOW	28
+#define EVENT_LOG_TAG_SCAN_TRACE_HIGH	29
+#define EVENT_LOG_TAG_SCAN_ERROR	30
+#define EVENT_LOG_TAG_SCAN_WARN	31
+#define EVENT_LOG_TAG_MPF_ERR	32
+#define EVENT_LOG_TAG_MPF_WARN	33
+#define EVENT_LOG_TAG_MPF_INFO	34
+#define EVENT_LOG_TAG_MPF_DEBUG	35
+#define EVENT_LOG_TAG_EVENT_INFO	36
+#define EVENT_LOG_TAG_EVENT_ERR	37
+#define EVENT_LOG_TAG_PWRSTATS_ERROR	38
+#define EVENT_LOG_TAG_EXCESS_PM_ERROR	39
+#define EVENT_LOG_TAG_IOCTL_LOG			40
+#define EVENT_LOG_TAG_PFN_ERR	41
+#define EVENT_LOG_TAG_PFN_WARN	42
+#define EVENT_LOG_TAG_PFN_INFO	43
+#define EVENT_LOG_TAG_PFN_DEBUG	44
+#define EVENT_LOG_TAG_BEACON_LOG	45
+#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46
+#define EVENT_LOG_TAG_TRACE_CHANSW 47
+#define EVENT_LOG_TAG_PCI_ERROR	48
+#define EVENT_LOG_TAG_PCI_TRACE	49
+#define EVENT_LOG_TAG_PCI_WARN	50
+#define EVENT_LOG_TAG_PCI_INFO	51
+#define EVENT_LOG_TAG_PCI_DBG	52
+#define EVENT_LOG_TAG_PCI_DATA  53
+#define EVENT_LOG_TAG_PCI_RING	54
+#define EVENT_LOG_TAG_AWDL_TRACE_RANGING	55
+#define EVENT_LOG_TAG_MAX	55      /* Set to the same value of last tag, not last tag + 1 */
+/* Note: New event should be added/reserved in trunk before adding it to branches */
+
+/* Flags for tag control */
+#define EVENT_LOG_TAG_FLAG_NONE		0
+#define EVENT_LOG_TAG_FLAG_LOG		0x80
+#define EVENT_LOG_TAG_FLAG_PRINT	0x40
+#define EVENT_LOG_TAG_FLAG_MASK		0x3f
+
+/* logstrs header */
+#define LOGSTRS_MAGIC   0x4C4F4753
+#define LOGSTRS_VERSION 0x1
+
+
+/* We make sure that the block size will fit in a single packet
+ *  (allowing for a bit of overhead on each packet
+ */
+#define EVENT_LOG_MAX_BLOCK_SIZE 1400
+#define EVENT_LOG_PSM_BLOCK 0x200
+
+/*
+ * There are multiple levels of objects define here:
+ *   event_log_set - a set of buffers
+ *   event log groups - every event log call is part of just one.  All
+ *                      event log calls in a group are handled the
+ *                      same way.  Each event log group is associated
+ *                      with an event log set or is off.
+ */
+
+#ifndef __ASSEMBLER__
+
+/* On the external system where the dumper is we need to make sure
+ * that these types are the same size as they are on the ARM the
+ * produced them
+ */
+#ifdef EVENT_LOG_DUMPER
+#define _EL_BLOCK_PTR uint32
+#define _EL_TYPE_PTR uint32
+#define _EL_SET_PTR uint32
+#define _EL_TOP_PTR uint32
+#else
+#define _EL_BLOCK_PTR struct event_log_block *
+#define _EL_TYPE_PTR uint32 *
+#define _EL_SET_PTR struct event_log_set **
+#define _EL_TOP_PTR struct event_log_top *
+#endif /* EVENT_LOG_DUMPER */
+
+/* Each event log entry has a type.  The type is the LAST word of the
+ * event log.  The printing code walks the event entries in reverse
+ * order to find the first entry.
+ */
+typedef union event_log_hdr {
+	struct {
+		uint8 tag;		/* Event_log entry tag */
+		uint8 count;		/* Count of 4-byte entries */
+		uint16 fmt_num;		/* Format number */
+	};
+	uint32 t;			/* Type cheat */
+} event_log_hdr_t;
+
+/* Event log sets (a logical circurlar buffer) consist of one or more
+ * event_log_blocks.  The blocks themselves form a logical circular
+ * list.  The log entries are placed in each event_log_block until it
+ * is full.  Logging continues with the next event_log_block in the
+ * event_set until the last event_log_block is reached and then
+ * logging starts over with the first event_log_block in the
+ * event_set.
+ */
+typedef struct event_log_block {
+	_EL_BLOCK_PTR next_block;
+	_EL_BLOCK_PTR prev_block;
+	_EL_TYPE_PTR end_ptr;
+
+	/* Start of packet sent for log tracing */
+	uint16 pktlen;			/* Size of rest of block */
+	uint16 count;			/* Logtrace counter */
+	uint32 timestamp;		/* Timestamp at start of use */
+	uint32 event_logs;
+} event_log_block_t;
+
+/* There can be multiple event_sets with each logging a set of
+ * associated events (i.e, "fast" and "slow" events).
+ */
+typedef struct event_log_set {
+	_EL_BLOCK_PTR first_block; 	/* Pointer to first event_log block */
+	_EL_BLOCK_PTR last_block; 	/* Pointer to last event_log block */
+	_EL_BLOCK_PTR logtrace_block;	/* next block traced */
+	_EL_BLOCK_PTR cur_block;   	/* Pointer to current event_log block */
+	_EL_TYPE_PTR cur_ptr;      	/* Current event_log pointer */
+	uint32 blockcount;		/* Number of blocks */
+	uint16 logtrace_count;		/* Last count for logtrace */
+	uint16 blockfill_count;		/* Fill count for logtrace */
+	uint32 timestamp;		/* Last timestamp event */
+	uint32 cyclecount;		/* Cycles at last timestamp event */
+} event_log_set_t;
+
+/* Top data structure for access to everything else */
+typedef struct event_log_top {
+	uint32 magic;
+#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */
+	uint32 version;
+#define EVENT_LOG_VERSION 1
+	uint32 num_sets;
+	uint32 logstrs_size;		/* Size of lognums + logstrs area */
+	uint32 timestamp;		/* Last timestamp event */
+	uint32 cyclecount;		/* Cycles at last timestamp event */
+	_EL_SET_PTR sets; 		/* Ptr to array of <num_sets> set ptrs */
+} event_log_top_t;
+
+/* Data structure of Keeping the Header from logstrs.bin */
+typedef struct {
+	uint32 logstrs_size;    /* Size of the file */
+	uint32 rom_lognums_offset; /* Offset to the ROM lognum */
+	uint32 ram_lognums_offset; /* Offset to the RAM lognum */
+	uint32 rom_logstrs_offset; /* Offset to the ROM logstr */
+	uint32 ram_logstrs_offset; /* Offset to the RAM logstr */
+	/* Keep version and magic last since "header" is appended to the end of logstrs file. */
+	uint32 version;            /* Header version */
+	uint32 log_magic;       /* MAGIC number for verification 'LOGS' */
+} logstr_header_t;
+
+#ifndef EVENT_LOG_DUMPER
+
+#ifndef EVENT_LOG_COMPILE
+
+/* Null define if no tracing */
+#define EVENT_LOG(format, ...)
+
+#else  /* EVENT_LOG_COMPILE */
+
+/* The first few are special because they can be done more efficiently
+ * this way and they are the common case.  Once there are too many
+ * parameters the code size starts to be an issue and a loop is better
+ */
+#define _EVENT_LOG0(tag, fmt_num) 			\
+	event_log0(tag, fmt_num)
+#define _EVENT_LOG1(tag, fmt_num, t1) 			\
+	event_log1(tag, fmt_num, t1)
+#define _EVENT_LOG2(tag, fmt_num, t1, t2) 		\
+	event_log2(tag, fmt_num, t1, t2)
+#define _EVENT_LOG3(tag, fmt_num, t1, t2, t3) 		\
+	event_log3(tag, fmt_num, t1, t2, t3)
+#define _EVENT_LOG4(tag, fmt_num, t1, t2, t3, t4) 	\
+	event_log4(tag, fmt_num, t1, t2, t3, t4)
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG5(tag, fmt_num, ...) event_logn(5, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG6(tag, fmt_num, ...) event_logn(6, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG7(tag, fmt_num, ...) event_logn(7, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG8(tag, fmt_num, ...) event_logn(8, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG9(tag, fmt_num, ...) event_logn(9, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGA(tag, fmt_num, ...) event_logn(10, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGB(tag, fmt_num, ...) event_logn(11, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGC(tag, fmt_num, ...) event_logn(12, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGD(tag, fmt_num, ...) event_logn(13, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__)
+
+/* Hack to make the proper routine call when variadic macros get
+ * passed.  Note the max of 15 arguments.  More than that can't be
+ * handled by the event_log entries anyways so best to catch it at compile
+ * time
+ */
+
+#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9,	\
+			       _A, _B, _C, _D, _E, _F, N, ...) F ## N
+
+#define _EVENT_LOG(tag, fmt, ...)					\
+	static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \
+	static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \
+	_EVENT_LOG_VA_NUM_ARGS(_EVENT_LOG, ##__VA_ARGS__,		\
+			       F, E, D, C, B, A, 9, 8,			\
+			       7, 6, 5, 4, 3, 2, 1, 0)			\
+	(tag, (int) &fmtnum , ## __VA_ARGS__);				\
+
+
+#define EVENT_LOG_FAST(tag, fmt, ...)					\
+	if (event_log_tag_sets != NULL) {				\
+		uint8 tag_flag = *(event_log_tag_sets + tag);		\
+		if (tag_flag != 0) {					\
+			_EVENT_LOG(tag, fmt , ## __VA_ARGS__);		\
+		}							\
+	}
+
+#define EVENT_LOG_COMPACT(tag, fmt, ...)				\
+	if (1) {							\
+		_EVENT_LOG(tag, fmt , ## __VA_ARGS__);			\
+	}
+
+#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__)
+
+#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG)
+
+#define EVENT_DUMP	event_log_buffer
+
+extern uint8 *event_log_tag_sets;
+
+extern int event_log_init(si_t *sih);
+extern int event_log_set_init(si_t *sih, int set_num, int size);
+extern int event_log_set_expand(si_t *sih, int set_num, int size);
+extern int event_log_set_shrink(si_t *sih, int set_num, int size);
+extern int event_log_tag_start(int tag, int set_num, int flags);
+extern int event_log_tag_stop(int tag);
+extern int event_log_get(int set_num, int buflen, void *buf);
+extern uint8 * event_log_next_logtrace(int set_num);
+
+extern void event_log0(int tag, int fmtNum);
+extern void event_log1(int tag, int fmtNum, uint32 t1);
+extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2);
+extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3);
+extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4);
+extern void event_logn(int num_args, int tag, int fmtNum, ...);
+
+extern void event_log_time_sync(void);
+extern void event_log_buffer(int tag, uint8 *buf, int size);
+
+#endif /* EVENT_LOG_DUMPER */
+
+#endif /* EVENT_LOG_COMPILE */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _EVENT_LOG_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
new file mode 100644
index 0000000..93f353e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HND arm trap handling.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hnd_armtrap.h 470663 2014-04-16 00:24:43Z $
+ */
+
+#ifndef	_hnd_armtrap_h_
+#define	_hnd_armtrap_h_
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define	TRAP_STRIDE	4
+#define FIRST_TRAP	TR_RST
+#define LAST_TRAP	(TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define	MAX_TRAP_TYPE	(TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define	MAX_TRAP_TYPE	(TR_ISR + ARMCM3_NUMINTS)
+#endif	/* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define	TR_TYPE		0x00
+#define	TR_EPC		0x04
+#define	TR_CPSR		0x08
+#define	TR_SPSR		0x0c
+#define	TR_REGS		0x10
+#define	TR_REG(n)	(TR_REGS + (n) * 4)
+#define	TR_SP		TR_REG(13)
+#define	TR_LR		TR_REG(14)
+#define	TR_PC		TR_REG(15)
+
+#define	TRAP_T_SIZE	80
+
+#ifndef	_LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+	uint32		type;
+	uint32		epc;
+	uint32		cpsr;
+	uint32		spsr;
+	uint32		r0;	/* a1 */
+	uint32		r1;	/* a2 */
+	uint32		r2;	/* a3 */
+	uint32		r3;	/* a4 */
+	uint32		r4;	/* v1 */
+	uint32		r5;	/* v2 */
+	uint32		r6;	/* v3 */
+	uint32		r7;	/* v4 */
+	uint32		r8;	/* v5 */
+	uint32		r9;	/* sb/v6 */
+	uint32		r10;	/* sl/v7 */
+	uint32		r11;	/* fp/v8 */
+	uint32		r12;	/* ip */
+	uint32		r13;	/* sp */
+	uint32		r14;	/* lr */
+	uint32		pc;	/* r15 */
+} trap_t;
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+#endif	/* _hnd_armtrap_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/drivers/net/wireless/bcmdhd/include/hnd_cons.h
new file mode 100644
index 0000000..0b48ef8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_cons.h
@@ -0,0 +1,86 @@
+/*
+ * Console support for RTE - for host use only.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hnd_cons.h 473343 2014-04-29 01:45:22Z $
+ */
+#ifndef	_hnd_cons_h_
+#define	_hnd_cons_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+#if defined(RWL_DONGLE) || defined(UART_REFLECTOR)
+/* For Dongle uart tranport max cmd len is 256 bytes + header length (16 bytes)
+ *  In case of ASD commands we are not sure about how much is the command size
+ *  To be on the safe side, input buf len CBUF_LEN is increased to max (512) bytes.
+ */
+#define RWL_MAX_DATA_LEN 	(512 + 8)	/* allow some extra bytes for '/n' termination */
+#define CBUF_LEN	(RWL_MAX_DATA_LEN + 64)  /* allow 64 bytes for header ("rwl...") */
+#else
+#define CBUF_LEN	(128)
+#endif /* RWL_DONGLE || UART_REFLECTOR */
+
+#define LOG_BUF_LEN	1024
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+#undef RWL_MAX_DATA_LEN
+#undef CBUF_LEN
+#undef LOG_BUF_LEN
+#define RWL_MAX_DATA_LEN (4 * 1024 + 8)
+#define CBUF_LEN	(RWL_MAX_DATA_LEN + 64)
+#define LOG_BUF_LEN (16 * 1024)
+#endif
+
+typedef struct {
+	uint32		buf;		/* Can't be pointer on (64-bit) hosts */
+	uint		buf_size;
+	uint		idx;
+	uint		out_idx;	/* output index */
+} hnd_log_t;
+
+typedef struct {
+	/* Virtual UART
+	 *   When there is no UART (e.g. Quickturn), the host should write a complete
+	 *   input line directly into cbuf and then write the length into vcons_in.
+	 *   This may also be used when there is a real UART (at risk of conflicting with
+	 *   the real UART).  vcons_out is currently unused.
+	 */
+	volatile uint	vcons_in;
+	volatile uint	vcons_out;
+
+	/* Output (logging) buffer
+	 *   Console output is written to a ring buffer log_buf at index log_idx.
+	 *   The host may read the output when it sees log_idx advance.
+	 *   Output will be lost if the output wraps around faster than the host polls.
+	 */
+	hnd_log_t	log;
+
+	/* Console input line buffer
+	 *   Characters are read one at a time into cbuf until <CR> is received, then
+	 *   the buffer is processed as a command line.  Also used for virtual UART.
+	 */
+	uint		cbuf_idx;
+	char		cbuf[CBUF_LEN];
+} hnd_cons_t;
+
+#endif /* _hnd_cons_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
new file mode 100644
index 0000000..4b78a21
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h
@@ -0,0 +1,204 @@
+/*
+ * HND generic packet pool operation primitives
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: $
+ */
+
+#ifndef _hnd_pktpool_h_
+#define _hnd_pktpool_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool)		((pool) && (pool)->inited)
+#define SHARED_POOL		(pktpool_shared)
+#else /* BCMPKTPOOL */
+#define POOL_ENAB(bus)		0
+#define SHARED_POOL		((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL	(pktpool_shared_lfrag)
+#endif
+#define SHARED_RXFRAG_POOL	(pktpool_shared_rxlfrag)
+
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX		40
+#endif /* PKTPOOL_LEN_MAX */
+#define PKTPOOL_CB_MAX		3
+
+/* forward declaration */
+struct pktpool;
+
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+	pktpool_cb_t cb;
+	void *arg;
+} pktpool_cbinfo_t;
+/* call back fn extension to populate host address in pool pkt */
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2);
+typedef struct {
+	pktpool_cb_extn_t cb;
+	void *arg;
+} pktpool_cbextn_info_t;
+
+
+#ifdef BCMDBG_POOL
+/* pkt pool debug states */
+#define POOL_IDLE	0
+#define POOL_RXFILL	1
+#define POOL_RXDH	2
+#define POOL_RXD11	3
+#define POOL_TXDH	4
+#define POOL_TXD11	5
+#define POOL_AMPDU	6
+#define POOL_TXENQ	7
+
+typedef struct {
+	void *p;
+	uint32 cycles;
+	uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+	uint8 txdh;	/* tx to host */
+	uint8 txd11;	/* tx to d11 */
+	uint8 enq;	/* waiting in q */
+	uint8 rxdh;	/* rx from host */
+	uint8 rxd11;	/* rx from d11 */
+	uint8 rxfill;	/* dma_rxfill */
+	uint8 idle;	/* avail in pool */
+} pktpool_stats_t;
+#endif /* BCMDBG_POOL */
+
+typedef struct pktpool {
+	bool inited;            /* pktpool_init was successful */
+	uint8 type;             /* type of lbuf: basic, frag, etc */
+	uint8 id;               /* pktpool ID:  index in registry */
+	bool istx;              /* direction: transmit or receive data path */
+
+	void * freelist;        /* free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+	uint16 avail;           /* number of packets in pool's free list */
+	uint16 len;             /* number of packets managed by pool */
+	uint16 maxlen;          /* maximum size of pool <= PKTPOOL_LEN_MAX */
+	uint16 plen;            /* size of pkt buffer, excluding lbuf|lbuf_frag */
+
+	bool empty;
+	uint8 cbtoggle;
+	uint8 cbcnt;
+	uint8 ecbcnt;
+	bool emptycb_disable;
+	pktpool_cbinfo_t *availcb_excl;
+	pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+	pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+	pktpool_cbextn_info_t cbext;
+	pktpool_cbextn_info_t rxcplidfn;
+#ifdef BCMDBG_POOL
+	uint8 dbg_cbcnt;
+	pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+	uint16 dbg_qlen;
+	pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+	pktpool_cbinfo_t dmarxfill;
+} pktpool_t;
+
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+/* Incarnate a pktpool registry. On success returns total_pools. */
+extern int pktpool_attach(osl_t *osh, uint32 total_pools);
+extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1);
+extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg);
+extern void pktpool_invoke_dmarxfill(pktpool_t *pktp);
+extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+
+#define POOLPTR(pp)         ((pktpool_t *)(pp))
+#define POOLID(pp)          (POOLPTR(pp)->id)
+
+#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
+
+#define pktpool_len(pp)     (POOLPTR(pp)->len)
+#define pktpool_avail(pp)   (POOLPTR(pp)->avail)
+#define pktpool_plen(pp)    (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp)  (POOLPTR(pp)->maxlen)
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * A pool ID is assigned with a pkt pool during pool initialization. This is
+ * done by maintaining a registry of all initialized pools, and the registry
+ * index at which the pool is registered is used as the pool's unique ID.
+ * ID 0 is reserved and is used to signify an invalid pool ID.
+ * All packets henceforth allocated from a pool will be tagged with the pool's
+ * unique ID. Packets allocated from the heap will use the reserved ID = 0.
+ * Packets with non-zero pool id signify that they were allocated from a pool.
+ * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used
+ * in place of a 32bit pool pointer in each packet.
+ * ----------------------------------------------------------------------------
+ */
+#define PKTPOOL_INVALID_ID          (0)
+#define PKTPOOL_MAXIMUM_ID          (15)
+
+/* Registry of pktpool(s) */
+extern pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1];
+
+/* Pool ID to/from Pool Pointer converters */
+#define PKTPOOL_ID2PTR(id)          (pktpools_registry[id])
+#define PKTPOOL_PTR2ID(pp)          (POOLID(pp))
+
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif /* BCMDBG_POOL */
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif /* _hnd_pktpool_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
new file mode 100644
index 0000000..ef3d4c8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h
@@ -0,0 +1,186 @@
+/*
+ * HND generic pktq operation primitives
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: $
+ */
+
+#ifndef _hnd_pktq_h_
+#define _hnd_pktq_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_MAX            0xFFFF  /* Max uint16 65535 packets */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT        128	/* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC           16	/* Maximum precedence levels */
+#endif
+
+typedef struct pktq_prec {
+	void *head;     /* first packet to dequeue */
+	void *tail;     /* last packet to dequeue */
+	uint16 len;     /* number of queued packets */
+	uint16 max;     /* maximum number of queued packets */
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+	uint32 requested;    /* packets requested to be stored */
+	uint32 stored;	     /* packets stored */
+	uint32 saved;	     /* packets saved,
+	                            because a lowest priority queue has given away one packet
+	                      */
+	uint32 selfsaved;    /* packets saved,
+	                            because an older packet from the same queue has been dropped
+	                      */
+	uint32 full_dropped; /* packets dropped,
+	                            because pktq is full with higher precedence packets
+	                      */
+	uint32 dropped;      /* packets dropped because pktq per that precedence is full */
+	uint32 sacrificed;   /* packets dropped,
+	                            in order to save one from a queue of a highest priority
+	                      */
+	uint32 busy;         /* packets droped because of hardware/transmission error */
+	uint32 retry;        /* packets re-sent because they were not received */
+	uint32 ps_retry;     /* packets retried again prior to moving power save mode */
+	uint32 suppress;     /* packets which were suppressed and not transmitted */
+	uint32 retry_drop;   /* packets finally dropped after retry limit */
+	uint32 max_avail;    /* the high-water mark of the queue capacity for packets -
+	                            goes to zero as queue fills
+	                      */
+	uint32 max_used;     /* the high-water mark of the queue utilisation for packets -
+						        increases with use ('inverse' of max_avail)
+				          */
+	uint32 queue_capacity; /* the maximum capacity of the queue */
+	uint32 rtsfail;        /* count of rts attempts that failed to receive cts */
+	uint32 acked;          /* count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /* running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /* running totoal of primary phy rate of all packets */
+	uint32 throughput;     /* actual data transferred successfully */
+	uint32 airtime;        /* cumulative total medium access delay in useconds */
+	uint32  _logtime;      /* timestamp of last counter clear  */
+} pktq_counters_t;
+
+typedef struct {
+	uint32                  _prec_log;
+	pktq_counters_t*        _prec_cnt[PKTQ_MAX_PREC];     /* Counters per queue  */
+} pktq_log_t;
+#endif /* PKTQ_LOG */
+
+
+#define PKTQ_COMMON	\
+	uint16 num_prec;        /* number of precedences in use */			\
+	uint16 hi_prec;         /* rapid dequeue hint (>= highest non-empty prec) */	\
+	uint16 max;             /* total max packets */					\
+	uint16 len;             /* total number of packets */
+
+/* multi-priority pkt queue */
+struct pktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[PKTQ_MAX_PREC];
+#ifdef PKTQ_LOG
+	pktq_log_t*      pktqlog;
+#endif
+};
+
+/* simple, non-priority pkt queue */
+struct spktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* fn(pkt, arg).  return true if pkt belongs to if */
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max)	((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec)		((pq)->q[prec].max)
+#define pktq_plen(pq, prec)		((pq)->q[prec].len)
+#define pktq_pavail(pq, prec)		((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec)		((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec)		((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec)		((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec)	((pq)->q[prec].tail)
+
+extern void  pktq_append(struct pktq *pq, int prec, struct spktq *list);
+extern void  pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/* Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+	ifpkt_cb_t fn, int arg);
+/* Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq)		((int)(pq)->len)
+#define pktq_max(pq)		((int)(pq)->max)
+#define pktq_avail(pq)		((int)((pq)->max - (pq)->len))
+#define pktq_full(pq)		((pq)->len >= (pq)->max)
+#define pktq_empty(pq)		((pq)->len == 0)
+
+/* operations for single precedence queues */
+#define pktenq(pq, p)		pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p)	pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq)		pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq)		pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqflush(osh, pq)	pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0)
+#define pktqinit(pq, len)	pktq_init(((struct pktq *)(void *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif /* _hnd_pktq_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 0000000..9a31663
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,41 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h 471127 2014-04-17 23:24:23Z $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 0000000..947db00
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,286 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h 473238 2014-04-28 19:14:56Z $
+ */
+
+#ifndef	_HNDSOC_H
+#define	_HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE		0x00000000	/* Physical SDRAM */
+#define SI_PCI_MEM		0x08000000	/* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ		(64 * 1024 * 1024)
+#define SI_PCI_CFG		0x0c000000	/* Host Mode sb2pcitranslation1 (64 MB) */
+#define	SI_SDRAM_SWAPPED	0x10000000	/* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2		0x80000000	/* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE    	0x18000000	/* Enumeration space base */
+
+#define SI_WRAP_BASE    	0x18100000	/* Wrapper space base */
+#define SI_CORE_SIZE    	0x1000		/* each core gets 4Kbytes for registers */
+
+#ifndef SI_MAXCORES
+#define	SI_MAXCORES		32		/* NorthStar has more cores */
+#endif /* SI_MAXCORES */
+
+#define	SI_FASTRAM		0x19000000	/* On-chip RAM on chips that also have DDR */
+#define	SI_FASTRAM_SWAPPED	0x19800000
+
+#define	SI_FLASH2		0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define	SI_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
+#define	SI_ARMCM3_ROM		0x1e000000	/* ARM Cortex-M3 ROM */
+#define	SI_FLASH1		0x1fc00000	/* MIPS Flash Region 1 */
+#define	SI_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define	SI_FLASH_WINDOW		0x01000000	/* Flash XIP Window */
+
+#define SI_NS_NANDFLASH		0x1c000000	/* NorthStar NAND flash base */
+#define SI_NS_NORFLASH		0x1e000000	/* NorthStar NOR flash base */
+#define SI_NS_ROM		0xfffd0000	/* NorthStar ROM */
+#define	SI_NS_FLASH_WINDOW	0x02000000	/* Flash XIP Window */
+
+#define	SI_ARM7S_ROM		0x20000000	/* ARM7TDMI-S ROM */
+#define	SI_ARMCR4_ROM		0x000f0000	/* ARM Cortex-R4 ROM */
+#define	SI_ARMCM3_SRAM2		0x60000000	/* ARM Cortex-M3 SRAM Region 2 */
+#define	SI_ARM7S_SRAM2		0x80000000	/* ARM7TDMI-S SRAM Region 2 */
+#define	SI_ARM_FLASH1		0xffff0000	/* ARM Flash Region 1 */
+#define	SI_ARM_FLASH1_SZ	0x00010000	/* ARM Size of Flash Region 1 */
+
+#define SI_SFLASH		0x14000000
+#define SI_PCI_DMA		0x40000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2		0x80000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ		0x40000000	/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32		0x00000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), low 32 bits
+						 */
+#define SI_PCIE_DMA_H32		0x80000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+/* core codes */
+#define	NODEV_CORE_ID		0x700		/* Invalid coreid */
+#define	CC_CORE_ID		0x800		/* chipcommon core */
+#define	ILINE20_CORE_ID		0x801		/* iline20 core */
+#define	SRAM_CORE_ID		0x802		/* sram core */
+#define	SDRAM_CORE_ID		0x803		/* sdram core */
+#define	PCI_CORE_ID		0x804		/* pci core */
+#define	MIPS_CORE_ID		0x805		/* mips core */
+#define	ENET_CORE_ID		0x806		/* enet mac core */
+#define	CODEC_CORE_ID		0x807		/* v90 codec core */
+#define	USB_CORE_ID		0x808		/* usb 1.1 host/device core */
+#define	ADSL_CORE_ID		0x809		/* ADSL core */
+#define	ILINE100_CORE_ID	0x80a		/* iline100 core */
+#define	IPSEC_CORE_ID		0x80b		/* ipsec core */
+#define	UTOPIA_CORE_ID		0x80c		/* utopia core */
+#define	PCMCIA_CORE_ID		0x80d		/* pcmcia core */
+#define	SOCRAM_CORE_ID		0x80e		/* internal memory core */
+#define	MEMC_CORE_ID		0x80f		/* memc sdram core */
+#define	OFDM_CORE_ID		0x810		/* OFDM phy core */
+#define	EXTIF_CORE_ID		0x811		/* external interface core */
+#define	D11_CORE_ID		0x812		/* 802.11 MAC core */
+#define	APHY_CORE_ID		0x813		/* 802.11a phy core */
+#define	BPHY_CORE_ID		0x814		/* 802.11b phy core */
+#define	GPHY_CORE_ID		0x815		/* 802.11g phy core */
+#define	MIPS33_CORE_ID		0x816		/* mips3302 core */
+#define	USB11H_CORE_ID		0x817		/* usb 1.1 host core */
+#define	USB11D_CORE_ID		0x818		/* usb 1.1 device core */
+#define	USB20H_CORE_ID		0x819		/* usb 2.0 host core */
+#define	USB20D_CORE_ID		0x81a		/* usb 2.0 device core */
+#define	SDIOH_CORE_ID		0x81b		/* sdio host core */
+#define	ROBO_CORE_ID		0x81c		/* roboswitch core */
+#define	ATA100_CORE_ID		0x81d		/* parallel ATA core */
+#define	SATAXOR_CORE_ID		0x81e		/* serial ATA & XOR DMA core */
+#define	GIGETH_CORE_ID		0x81f		/* gigabit ethernet core */
+#define	PCIE_CORE_ID		0x820		/* pci express core */
+#define	NPHY_CORE_ID		0x821		/* 802.11n 2x2 phy core */
+#define	SRAMC_CORE_ID		0x822		/* SRAM controller core */
+#define	MINIMAC_CORE_ID		0x823		/* MINI MAC/phy core */
+#define	ARM11_CORE_ID		0x824		/* ARM 1176 core */
+#define	ARM7S_CORE_ID		0x825		/* ARM7tdmi-s core */
+#define	LPPHY_CORE_ID		0x826		/* 802.11a/b/g phy core */
+#define	PMU_CORE_ID		0x827		/* PMU core */
+#define	SSNPHY_CORE_ID		0x828		/* 802.11n single-stream phy core */
+#define	SDIOD_CORE_ID		0x829		/* SDIO device core */
+#define	ARMCM3_CORE_ID		0x82a		/* ARM Cortex M3 core */
+#define	HTPHY_CORE_ID		0x82b		/* 802.11n 4x4 phy core */
+#define	MIPS74K_CORE_ID		0x82c		/* mips 74k core */
+#define	GMAC_CORE_ID		0x82d		/* Gigabit MAC core */
+#define	DMEMC_CORE_ID		0x82e		/* DDR1/2 memory controller core */
+#define	PCIERC_CORE_ID		0x82f		/* PCIE Root Complex core */
+#define	OCP_CORE_ID		0x830		/* OCP2OCP bridge core */
+#define	SC_CORE_ID		0x831		/* shared common core */
+#define	AHB_CORE_ID		0x832		/* OCP2AHB bridge core */
+#define	SPIH_CORE_ID		0x833		/* SPI host core */
+#define	I2S_CORE_ID		0x834		/* I2S core */
+#define	DMEMS_CORE_ID		0x835		/* SDR/DDR1 memory controller core */
+#define	DEF_SHIM_COMP		0x837		/* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID		0x83b		/* Dot11 ACPHY */
+#define PCIE2_CORE_ID		0x83c		/* pci express Gen2 core */
+#define USB30D_CORE_ID		0x83d		/* usb 3.0 device core */
+#define ARMCR4_CORE_ID		0x83e		/* ARM CR4 CPU */
+#define GCI_CORE_ID		0x840		/* GCI Core */
+#define M2MDMA_CORE_ID          0x844           /* memory to memory dma */
+#define APB_BRIDGE_CORE_ID	0x135		/* APB bridge core ID */
+#define AXI_CORE_ID		0x301		/* AXI/GPV core ID */
+#define EROM_CORE_ID		0x366		/* EROM core ID */
+#define OOB_ROUTER_CORE_ID	0x367		/* OOB router core ID */
+#define DEF_AI_COMP		0xfff		/* Default component, in ai chips it maps all
+						 * unused address ranges
+						 */
+
+#define CC_4706_CORE_ID		0x500		/* chipcommon core */
+#define NS_PCIEG2_CORE_ID	0x501		/* PCIE Gen 2 core */
+#define NS_DMA_CORE_ID		0x502		/* DMA core */
+#define NS_SDIO3_CORE_ID	0x503		/* SDIO3 core */
+#define NS_USB20_CORE_ID	0x504		/* USB2.0 core */
+#define NS_USB30_CORE_ID	0x505		/* USB3.0 core */
+#define NS_A9JTAG_CORE_ID	0x506		/* ARM Cortex A9 JTAG core */
+#define NS_DDR23_CORE_ID	0x507		/* Denali DDR2/DDR3 memory controller */
+#define NS_ROM_CORE_ID		0x508		/* ROM core */
+#define NS_NAND_CORE_ID		0x509		/* NAND flash controller core */
+#define NS_QSPI_CORE_ID		0x50a		/* SPI flash controller core */
+#define NS_CCB_CORE_ID		0x50b		/* ChipcommonB core */
+#define SOCRAM_4706_CORE_ID	0x50e		/* internal memory core */
+#define NS_SOCRAM_CORE_ID	SOCRAM_4706_CORE_ID
+#define	ARMCA9_CORE_ID		0x510		/* ARM Cortex A9 core (ihost) */
+#define	NS_IHOST_CORE_ID	ARMCA9_CORE_ID	/* ARM Cortex A9 core (ihost) */
+#define GMAC_COMMON_4706_CORE_ID	0x5dc		/* Gigabit MAC core */
+#define GMAC_4706_CORE_ID	0x52d		/* Gigabit MAC core */
+#define AMEMC_CORE_ID		0x52e		/* DDR1/2 memory controller core */
+#define ALTA_CORE_ID		0x534		/* I2S core */
+#define DDR23_PHY_CORE_ID	0x5dd
+
+#define SI_PCI1_MEM     0x40000000  /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG     0x44000000  /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32		0xc0000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+#define CC_4706B0_CORE_REV	0x8000001f		/* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV	0x80000005		/* internal memory core */
+#define GMAC_4706B0_CORE_REV	0x80000000		/* Gigabit MAC core */
+#define NS_PCIEG2_CORE_REV_B0	0x7		/* NS-B0 PCIE Gen 2 core rev */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define	SI_CC_IDX		0
+/* SOC Interconnect types (aka chip types) */
+#define	SOCI_SB			0
+#define	SOCI_AI			1
+#define	SOCI_UBUS		2
+#define	SOCI_NAI		3
+
+/* Common core control flags */
+#define	SICF_BIST_EN		0x8000
+#define	SICF_PME_EN		0x4000
+#define	SICF_CORE_BITS		0x3ffc
+#define	SICF_FGC		0x0002
+#define	SICF_CLOCK_EN		0x0001
+
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
+/* Norstar core status flags */
+#define SISF_NS_BOOTDEV_MASK	0x0003	/* ROM core */
+#define SISF_NS_BOOTDEV_NOR	0x0000	/* ROM core */
+#define SISF_NS_BOOTDEV_NAND	0x0001	/* ROM core */
+#define SISF_NS_BOOTDEV_ROM	0x0002	/* ROM core */
+#define SISF_NS_BOOTDEV_OFFLOAD	0x0003	/* ROM core */
+#define SISF_NS_SKUVEC_MASK	0x000c	/* ROM core */
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST		0x1e0		/* clock control and status */
+#define SI_PWR_CTL_ST		0x1e8		/* For memory clock gating */
+
+/* clk_ctl_st register */
+#define	CCS_FORCEALP		0x00000001	/* force ALP request */
+#define	CCS_FORCEHT		0x00000002	/* force HT request */
+#define	CCS_FORCEILP		0x00000004	/* force ILP request */
+#define	CCS_ALPAREQ		0x00000008	/* ALP Avail Request */
+#define	CCS_HTAREQ		0x00000010	/* HT Avail Request */
+#define	CCS_FORCEHWREQOFF	0x00000020	/* Force HW Clock Request Off */
+#define CCS_HQCLKREQ		0x00000040	/* HQ Clock Required */
+#define CCS_USBCLKREQ		0x00000100	/* USB Clock Req */
+#define CCS_SECICLKREQ		0x00000100	/* SECI Clock Req */
+#define CCS_ARMFASTCLOCKREQ	0x00000100	/* ARM CR4 fast clock request */
+#define CCS_AVBCLKREQ		0x00000400	/* AVB Clock enable request */
+#define CCS_ERSRC_REQ_MASK	0x00000700	/* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT	8
+#define	CCS_ALPAVAIL		0x00010000	/* ALP is available */
+#define	CCS_HTAVAIL		0x00020000	/* HT is available */
+#define CCS_BP_ON_APL		0x00040000	/* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT		0x00080000	/* RO: Backplane is running on HT clock */
+#define CCS_ARMFASTCLOCKSTATUS	0x01000000	/* Fast CPU clock is running */
+#define CCS_ERSRC_STS_MASK	0x07000000	/* external resource status */
+#define CCS_ERSRC_STS_SHIFT	24
+
+#define	CCS0_HTAVAIL		0x00010000	/* HT avail in chipc and pcmcia on 4328a0 */
+#define	CCS0_ALPAVAIL		0x00020000	/* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size  */
+#define	BISZ_OFFSET		0x3e0		/* At this offset into the binary */
+#define	BISZ_MAGIC		0x4249535a	/* Marked with this value: 'BISZ' */
+#define	BISZ_MAGIC_IDX		0		/* Word 0: magic */
+#define	BISZ_TXTST_IDX		1		/*	1: text start */
+#define	BISZ_TXTEND_IDX		2		/*	2: text end */
+#define	BISZ_DATAST_IDX		3		/*	3: data start */
+#define	BISZ_DATAEND_IDX	4		/*	4: data end */
+#define	BISZ_BSSST_IDX		5		/*	5: bss start */
+#define	BISZ_BSSEND_IDX		6		/*	6: bss end */
+#define BISZ_SIZE		7		/* descriptor size in 32-bit integers */
+
+/* Boot/Kernel related defintion and functions */
+#define	SOC_BOOTDEV_ROM		0x00000001
+#define	SOC_BOOTDEV_PFLASH	0x00000002
+#define	SOC_BOOTDEV_SFLASH	0x00000004
+#define	SOC_BOOTDEV_NANDFLASH	0x00000008
+
+#define	SOC_KNLDEV_NORFLASH	0x00000002
+#define	SOC_KNLDEV_NANDFLASH	0x00000004
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+int soc_boot_dev(void *sih);
+int soc_knl_dev(void *sih);
+#endif	/* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 0000000..a7dca28
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,964 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h 474317 2014-04-30 21:49:42Z $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+#define DECLSPEC_ALIGN(x)	__attribute__ ((aligned(x)))
+
+/* Linux Kernel: File Operations: start */
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCMDRIVER
+
+/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+#endif /* SHARED_OSL_CMN */
+
+extern void osl_detach(osl_t *osh);
+extern int osl_static_mem_init(osl_t *osh, void *adapter);
+extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
+extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
+extern void* osl_get_bus_handle(osl_t *osh);
+
+/* Global ASSERT type */
+extern uint32 g_assert_type;
+
+/* ASSERT */
+#if defined(BCMASSERT_LOG)
+	#define ASSERT(exp) \
+	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+	#ifdef __GNUC__
+		#define GCC_VERSION \
+			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+		#if GCC_VERSION > 30100
+			#define ASSERT(exp)	do {} while (0)
+		#else
+			/* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
+			#define ASSERT(exp)
+		#endif /* GCC_VERSION > 30100 */
+	#endif /* __GNUC__ */
+#endif
+
+/* bcm_prefetch_32B */
+static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
+{
+#if defined(BCM47XX_CA9) && (__LINUX_ARM_ARCH__ >= 5)
+	switch (cachelines_32B) {
+		case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
+		case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
+		case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
+		case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr +  0) : "cc");
+	}
+#endif
+}
+
+/* microsecond delay */
+#define	OSL_DELAY(usec)		osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_SLEEP(ms)			osl_sleep(ms)
+extern void osl_sleep(uint ms);
+
+#define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+/* PCI configuration space access macros */
+#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
+	osl_pci_read_config((osh), (offset), (size))
+#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+	osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+#define OSL_PCIE_DOMAIN(osh)	osl_pcie_domain(osh)
+#define OSL_PCIE_BUS(osh)	osl_pcie_bus(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern uint osl_pcie_domain(osl_t *osh);
+extern uint osl_pcie_bus(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+
+
+/* Pkttag flag should be part of public information */
+typedef struct {
+	bool pkttag;
+	bool mmbus;		/* Bus supports memory-mapped register accesses */
+	pktfree_cb_fn_t tx_fn;  /* Callback function for PKTFREE */
+	void *tx_ctx;		/* Context to the callback function */
+	void	*unused[3];
+} osl_pubinfo_t;
+
+extern void osl_flag_set(osl_t *osh, uint32 mask);
+extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx)		\
+	do {						\
+	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn;	\
+	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx;	\
+	} while (0)
+
+
+/* host/bus architecture-specific byte swap */
+#define BUS_SWAP32(v)		(v)
+	#define MALLOC(osh, size)	osl_malloc((osh), (size))
+	#define MALLOCZ(osh, size)	osl_mallocz((osh), (size))
+	#define MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
+	#define MALLOCED(osh)		osl_malloced((osh))
+	#define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
+	extern void *osl_malloc(osl_t *osh, uint size);
+	extern void *osl_mallocz(osl_t *osh, uint size);
+	extern void osl_mfree(osl_t *osh, void *addr, uint size);
+	extern uint osl_malloced(osl_t *osh);
+	extern uint osl_check_memleak(osl_t *osh);
+
+
+#define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+/* allocate/free shared (dma-able) consistent memory */
+#define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
+#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+#define	DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
+	uint *tot, dmaaddr_t *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+
+/* map/unmap direction */
+#define	DMA_TX	1	/* TX direction for DMA */
+#define	DMA_RX	2	/* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *txp_dmah);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+/* API for DMA addressing capability */
+#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
+
+#if defined(__mips__) || (defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__))
+	extern void osl_cache_flush(void *va, uint size);
+	extern void osl_cache_inv(void *va, uint size);
+	extern void osl_prefetch(const void *ptr);
+	#define OSL_CACHE_FLUSH(va, len)	osl_cache_flush((void *) va, len)
+	#define OSL_CACHE_INV(va, len)		osl_cache_inv((void *) va, len)
+	#define OSL_PREFETCH(ptr)			osl_prefetch(ptr)
+#ifdef __ARM_ARCH_7A__
+	extern int osl_arch_is_coherent(void);
+	#define OSL_ARCH_IS_COHERENT()		osl_arch_is_coherent()
+#else
+	#define OSL_ARCH_IS_COHERENT()		NULL
+#endif /* __ARM_ARCH_7A__ */
+#else
+	#define OSL_CACHE_FLUSH(va, len)	BCM_REFERENCE(va)
+	#define OSL_CACHE_INV(va, len)		BCM_REFERENCE(va)
+	#define OSL_PREFETCH(ptr)		BCM_REFERENCE(ptr)
+
+	#define OSL_ARCH_IS_COHERENT()		NULL
+#endif
+
+/* register access macros */
+#if defined(BCMSDIO)
+	#include <bcmsdh.h>
+	#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r)), (v)))
+	#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r))))
+#elif defined(BCM47XX_ACP_WAR)
+extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
+
+#define OSL_READ_REG(osh, r) \
+	({\
+		__typeof(*(r)) __osl_v; \
+		osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
+		__osl_v; \
+	})
+#endif
+
+#if defined(BCM47XX_ACP_WAR)
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
+#else
+
+#if defined(BCMSDIO)
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+		mmap_op else bus_op
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+		mmap_op : bus_op
+#else
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+#endif
+#endif /* BCM47XX_ACP_WAR */
+
+#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define	PKTBUFSZ	2048   /* largest reasonable packet buffer, driver uses for ethernet MTU */
+
+#define OSH_NULL   NULL
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#include <linuxver.h>           /* use current 2.4.x calling conventions */
+#include <linux/kernel.h>       /* for vsn/printf's */
+#include <linux/string.h>       /* for mem*, str* */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
+#define OSL_SYSUPTIME()		((uint32)jiffies_to_msecs(jiffies))
+#else
+#define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+#define	printf(fmt, args...)	printk(fmt , ## args)
+#include <linux/kernel.h>	/* for vsn/printf's */
+#include <linux/string.h>	/* for mem*, str* */
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* register access macros */
+
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh, \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+
+#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+
+/* bcopy, bcmp, and bzero functions */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* uncached/cached virtual address */
+#define OSL_UNCACHED(va)	((void *)va)
+#define OSL_CACHED(va)		((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
+#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
+
+/* get processor cycle count */
+#if defined(__i386__)
+#define	OSL_GETCYCLES(x)	rdtscl((x))
+#else
+#define OSL_GETCYCLES(x)	((x) = 0)
+#endif
+
+/* dereference an address that may cause a bus exception */
+#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
+
+/* map/unmap physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define	REG_MAP(pa, size)	ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif /* !defined(CONFIG_MMC_MSM7X00A */
+#define	REG_UNMAP(va)		iounmap((va))
+
+/* shared (dma-able) memory access macros */
+#define	R_SM(r)			*(r)
+#define	W_SM(r, v)		(*(r) = (v))
+#define	BZERO_SM(r, len)	memset((r), '\0', (len))
+
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons),  we need the Linux headers.
+ */
+#include <linuxver.h>		/* use current 2.4.x calling conventions */
+
+/* packet primitives */
+#ifdef BCMDBG_CTRACE
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FILE__)
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#else
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf)		BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
+#define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
+#else
+#define	PKTGET_STATIC	PKTGET
+#define	PKTFREE_STATIC	PKTFREE
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#define	PKTDATA(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define	PKTLEN(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
+#define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTEXPHEADROOM(osh, skb, b)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+	 })
+#define PKTTAILROOM(osh, skb)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_tailroom((struct sk_buff*)(skb)); \
+	 })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pad((struct sk_buff*)(skb), (padlen)); \
+	 })
+#define	PKTNEXT(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define	PKTSETNEXT(osh, skb, x)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+	 })
+#define	PKTSETLEN(osh, skb, len)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 __skb_trim((struct sk_buff*)(skb), (len)); \
+	 })
+#define	PKTPUSH(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_push((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTPULL(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pull((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTSETPOOL(osh, skb, x, y)	BCM_REFERENCE(osh)
+#define	PKTPOOL(osh, skb)		({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb)        PKTLINK(skb)
+#define PKTSETFREELIST(skb, x)  PKTSETLINK((skb), (x))
+#define PKTPTR(skb)             (skb)
+#define PKTID(skb)              ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id)       ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTSHRINK(osh, m)		({BCM_REFERENCE(osh); m;})
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+#define PKTORPHAN(skb)          skb_orphan(skb)
+#else
+#define PKTORPHAN(skb)          ({BCM_REFERENCE(skb); 0;})
+#endif /* LINUX VERSION >= 3.6 */
+
+
+#ifdef BCMDBG_CTRACE
+#define	DEL_CTRACE(zosh, zskb) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_del(&(zskb)->ctrace_list); \
+	(zosh)->ctrace_num--; \
+	(zskb)->ctrace_start = 0; \
+	(zskb)->ctrace_count = 0; \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define	UPDATE_CTRACE(zskb, zfile, zline) { \
+	struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
+	if (_zskb->ctrace_count < CTRACE_NUM) { \
+		_zskb->func[_zskb->ctrace_count] = zfile; \
+		_zskb->line[_zskb->ctrace_count] = zline; \
+		_zskb->ctrace_count++; \
+	} \
+	else { \
+		_zskb->func[_zskb->ctrace_start] = zfile; \
+		_zskb->line[_zskb->ctrace_start] = zline; \
+		_zskb->ctrace_start++; \
+		if (_zskb->ctrace_start >= CTRACE_NUM) \
+			_zskb->ctrace_start = 0; \
+	} \
+}
+
+#define	ADD_CTRACE(zosh, zskb, zfile, zline) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
+	(zosh)->ctrace_num++; \
+	UPDATE_CTRACE(zskb, zfile, zline); \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define PKTCALLER(zskb)	UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
+#endif /* BCMDBG_CTRACE */
+
+#ifdef CTFPOOL
+#define	CTFPOOL_REFILL_THRESH	3
+typedef struct ctfpool {
+	void		*head;
+	spinlock_t	lock;
+	uint		max_obj;
+	uint		curr_obj;
+	uint		obj_size;
+	uint		refills;
+	uint		fast_allocs;
+	uint 		fast_frees;
+	uint 		slow_allocs;
+} ctfpool_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->pktc_flags)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	FASTBUF	(1 << 16)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->mac_len)
+#else
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->__unused)
+#endif /* 2.6.22 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->ctfpool)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
+#else
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->sk)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+#endif
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#else /* CTFPOOL */
+#define	PKTSETFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#endif /* CTFPOOL */
+
+#define	PKTSETCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#ifdef HNDCTF
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->pktc_flags & CHAINED)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	SKIPCT	(1 << 18)
+#define	CHAINED	(1 << 19)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->mac_len & CHAINED)
+#else /* 2.6.22 */
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->__unused & CHAINED)
+#endif /* 2.6.22 */
+typedef struct ctf_mark {
+	uint32	value;
+}	ctf_mark_t;
+#define CTF_MARK(m)				(m.value)
+#else /* HNDCTF */
+#define	PKTSETSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define CTF_MARK(m)		({BCM_REFERENCE(m); 0;})
+#endif /* HNDCTF */
+
+#if defined(BCM_GMAC3)
+
+/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
+
+/* Account for packets delivered to downstream forwarder by GMAC interface. */
+extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTTOFWDER(osh, skbs, skb_cnt)  \
+	osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+
+/* Account for packets received from downstream forwarder. */
+#if defined(BCMDBG_CTRACE) /* pkt logging */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
+                             int line, char *file);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
+	                 __LINE__, __FILE__)
+#else  /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+	osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+#endif
+
+
+/** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
+ * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
+ * been accessed in the GMAC forwarder. This may be used to limit the number of
+ * cachelines that need to be flushed or invalidated.
+ * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
+ * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
+ * Likewise, a debug print of a packet payload in say the ethernet driver needs
+ * to be accompanied with a clear of the FWDERBUF tag.
+ */
+
+/** Forwarded packets, have a HWRXOFF sized rx header (etc.h) */
+#define FWDER_HWRXOFF       (30)
+
+/** Maximum amount of a pktadat that a downstream forwarder (GMAC) may have
+ * read into the L1 cache (not dirty). This may be used in reduced cache ops.
+ *
+ * Max 56: ET HWRXOFF[30] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
+ */
+#define FWDER_PKTMAPSZ      (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+
+#define FWDERBUF            (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
+	 })
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+
+#define FWDERBUF	        (1 << 20)
+#define PKTSETFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
+	 })
+
+#else /* 2.6.22 */
+
+#define FWDERBUF            (1 << 4)
+#define PKTSETFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
+	 })
+#define PKTCLRFWDERBUF(osh, skb)  \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
+	 })
+#define PKTISFWDERBUF(osh, skb) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
+	 })
+
+#endif /* 2.6.22 */
+
+#else  /* ! BCM_GMAC3 */
+
+#define PKTSETFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTCLRFWDERBUF(osh, skb)  ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTISFWDERBUF(osh, skb)   ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#endif /* ! BCM_GMAC3 */
+
+
+#ifdef HNDCTF
+/* For broadstream iqos */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	TOBR		(1 << 5)
+#define	PKTSETTOBR(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
+	 })
+#define	PKTCLRTOBR(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
+	 })
+#define	PKTISTOBR(skb)	(((struct sk_buff*)(skb))->pktc_flags & TOBR)
+#define	PKTSETCTFIPCTXIF(skb, ifp)	(((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#else /* 2.6.22 */
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTSETCTFIPCTXIF(skb, ifp)	({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#endif /* 2.6.22 */
+#else /* HNDCTF */
+#define	PKTSETTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRTOBR(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISTOBR(skb)	({BCM_REFERENCE(skb); FALSE;})
+#endif /* HNDCTF */
+
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx)	(((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx)	({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb)	(((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp)	(((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb)	(((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define	AUX_TCP_FIN_RST	(1 << 0)
+#define	AUX_FREED	(1 << 1)
+#define PKTSETFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define	PKTCLRFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define	PKTISFAAUX(skb)		(((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define	PKTCLRFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define	PKTISFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define	PKTISFABRIDGED(skb)	PKTISFAAUX(skb)
+#else
+#define	PKTISFAAUX(skb)		({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFABRIDGED(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFAFREED(skb)	({BCM_REFERENCE(skb); FALSE;})
+
+#define	PKTCLRFAAUX(skb)	BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb)	BCM_REFERENCE(skb)
+#define	PKTCLRFAFREED(skb)	BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void osl_pktclone(osl_t *osh, void **pkt);
+
+#ifdef BCMDBG_CTRACE
+#define PKT_CTRACE_DUMP(osh, b)	osl_ctrace_dump((osh), (b))
+extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+struct bcmstrbuf;
+extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
+#else
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCMDBG_CTRACE */
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#ifdef BCMDBG_CTRACE
+#define PKTFRMNATIVE(osh, skb)  osl_pkt_frmnative(((osl_t *)osh), \
+				(struct sk_buff*)(skb), __LINE__, __FILE__)
+#define	PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
+#else
+#define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
+#define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
+#define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
+						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
+#define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->mark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->mark = (m)
+#else /* !2.6.0 */
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->nfmark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->nfmark = (m)
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
+#define PKTMARK(p)                     0
+#define PKTSETMARK(p, m)
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#define PKTALLOCED(osh)		osl_pktalloced(osh)
+extern uint osl_pktalloced(osl_t *osh);
+
+#define OSL_RAND()		osl_rand()
+extern uint32 osl_rand(void);
+
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
+
+#ifdef PKTC
+/* Use 8 bytes of skb tstamp field to store below info */
+struct chain_node {
+	struct sk_buff	*link;
+	unsigned int	flags:3, pkts:9, bytes:20;
+};
+
+#define CHAIN_NODE(skb)		((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
+
+#define	PKTCSETATTR(s, f, p, b)	({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
+	                         CHAIN_NODE(s)->bytes = (b);})
+#define	PKTCCLRATTR(s)		({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
+	                         CHAIN_NODE(s)->bytes = 0;})
+#define	PKTCGETATTR(s)		(CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
+	                         CHAIN_NODE(s)->bytes)
+#define	PKTCCNT(skb)		(CHAIN_NODE(skb)->pkts)
+#define	PKTCLEN(skb)		(CHAIN_NODE(skb)->bytes)
+#define	PKTCGETFLAGS(skb)	(CHAIN_NODE(skb)->flags)
+#define	PKTCSETFLAGS(skb, f)	(CHAIN_NODE(skb)->flags = (f))
+#define	PKTCCLRFLAGS(skb)	(CHAIN_NODE(skb)->flags = 0)
+#define	PKTCFLAGS(skb)		(CHAIN_NODE(skb)->flags)
+#define	PKTCSETCNT(skb, c)	(CHAIN_NODE(skb)->pkts = (c))
+#define	PKTCINCRCNT(skb)	(CHAIN_NODE(skb)->pkts++)
+#define	PKTCADDCNT(skb, c)	(CHAIN_NODE(skb)->pkts += (c))
+#define	PKTCSETLEN(skb, l)	(CHAIN_NODE(skb)->bytes = (l))
+#define	PKTCADDLEN(skb, l)	(CHAIN_NODE(skb)->bytes += (l))
+#define	PKTCSETFLAG(skb, fb)	(CHAIN_NODE(skb)->flags |= (fb))
+#define	PKTCCLRFLAG(skb, fb)	(CHAIN_NODE(skb)->flags &= ~(fb))
+#define	PKTCLINK(skb)		(CHAIN_NODE(skb)->link)
+#define	PKTSETCLINK(skb, x)	(CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for (; (skb) != NULL; (skb) = (nskb)) \
+		if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
+		    PKTSETCLINK((skb), NULL), 1)
+#define	PKTCFREE(osh, skb, send) \
+do { \
+	void *nskb; \
+	ASSERT((skb) != NULL); \
+	FOREACH_CHAINED_PKT((skb), nskb) { \
+		PKTCLRCHAINED((osh), (skb)); \
+		PKTCCLRFLAGS((skb)); \
+		PKTFREE((osh), (skb), (send)); \
+	} \
+} while (0)
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} else { \
+		PKTSETCLINK((t), (p)); \
+		(t) = (p); \
+	} \
+} while (0)
+#endif /* PKTC */
+
+#else /* ! BCMDRIVER */
+
+
+/* ASSERT */
+	#define ASSERT(exp)	do {} while (0)
+
+/* MALLOC and MFREE */
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+/* str* and mem* functions */
+#include <string.h>
+
+/* *printf functions */
+#include <stdio.h>
+
+/* bcopy, bcmp, and bzero */
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif /* ! BCMDRIVER */
+
+#endif	/* _linux_osl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 0000000..c0d8017
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,748 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h 431983 2013-10-25 06:53:27Z $
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <typedefs.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#include <linux/kconfig.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+/* linux/malloc.h is deprecated, use linux/slab.h instead. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
+#else
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
+#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
+	(RHEL_MAJOR == 5))
+/* Exclude RHEL 5 */
+typedef void (*work_func_t)(void *work);
+#endif
+#endif	/* >= 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* Some distributions have their own 2.6.x compatibility layers */
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED	SA_SHIRQ
+#endif /* < 2.6.18 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef	CONFIG_NET_RADIO
+#define	CONFIG_WIRELESS_EXT
+#endif
+#endif	/* < 2.6.17 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#include <linux/sched/rt.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#    define __devinit	__init
+#  else
+/* All devices are hotpluggable since linux 3.8.0 */
+#    define __devinit
+#  endif
+#endif /* !__devinit */
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x)	x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev)		(dev)->sysdata
+#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
+
+/*
+ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
+ */
+
+struct pci_device_id {
+	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
+	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
+	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
+	unsigned long driver_data;		/* Data private to the driver */
+};
+
+struct pci_driver {
+	struct list_head node;
+	char *name;
+	const struct pci_device_id *id_table;	/* NULL if wants all devices */
+	int (*probe)(struct pci_dev *dev,
+	             const struct pci_device_id *id); /* New device inserted */
+	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
+						 * capable driver)
+						 */
+	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
+	void (*resume)(struct pci_dev *dev);	/* Device woken up */
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+/* compatpci.c */
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif /* PCI registration */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x)	__initcall(x);
+#define module_exit(x)	__exitcall(x);
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+/*
+ * DMA mapping
+ *
+ * See linux/Documentation/DMA-mapping.txt
+ */
+
+#ifndef PCI_DMA_TODEVICE
+#define	PCI_DMA_TODEVICE	1
+#define	PCI_DMA_FROMDEVICE	2
+#endif
+
+typedef u32 dma_addr_t;
+
+/* Pure 2^n version of get_order */
+static inline int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                                         dma_addr_t *dma_handle)
+{
+	void *ret;
+	int gfp = GFP_ATOMIC | GFP_DMA;
+
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_bus(ret);
+	}
+	return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                                       void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif /* DMA mapping */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
+#define netif_down(dev)			do { (dev)->start = 0; } while (0)
+
+/* pcmcia-cs provides its own netdevice compatibility layer */
+#ifndef _COMPAT_NETDEVICE_H
+
+/*
+ * SoftNet
+ *
+ * For pre-softnet kernels we need to tell the upper layer not to
+ * re-enter start_xmit() while we are in there. However softnet
+ * guarantees not to enter while we are in there so there is no need
+ * to do the netif_stop_queue() dance unless the transmit queue really
+ * gets stuck. This should also improve performance according to tests
+ * done by Aman Singla.
+ */
+
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define netif_queue_stopped(dev)	(dev)->tbusy
+#define netif_running(dev)		(dev)->start
+
+#endif /* _COMPAT_NETDEVICE_H */
+
+#define netif_device_attach(dev)	netif_start_queue(dev)
+#define netif_device_detach(dev)	netif_stop_queue(dev)
+
+/* 2.4.x renamed bottom halves to tasklets */
+#define tasklet_struct				tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+	queue_task(tasklet, &tq_immediate);
+	mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+                                void (*func)(unsigned long),
+                                unsigned long data)
+{
+	tasklet->next = NULL;
+	tasklet->sync = 0;
+	tasklet->routine = (void (*)(void *))func;
+	tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet)	{ do {} while (0); }
+
+/* 2.4.x introduced del_timer_sync() */
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif /* SoftNet */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/*
+ * Emit code to initialise a tq_struct's routine and data pointers
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		(_tq)->routine = _routine;			\
+		(_tq)->data = _data;				\
+	} while (0)
+
+/*
+ * Emit code to initialise all of a tq_struct
+ */
+#define INIT_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		INIT_LIST_HEAD(&(_tq)->list);			\
+		(_tq)->sync = 0;				\
+		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
+	} while (0)
+
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
+
+/* Power management related macro & routines */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
+#else
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_read_config_dword(dev, i * 4, &buffer[i]);
+	}
+	return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_write_config_dword(dev, i * 4, buffer[i]);
+	}
+	/*
+	 * otherwise, write the context information we know from bootup.
+	 * This works around a problem where warm-booting from Windows
+	 * combined with a D3(hot)->D0 transition causes PCI config
+	 * header data to be forgotten.
+	 */
+	else {
+		for (i = 0; i < 6; i ++)
+			pci_write_config_dword(dev,
+			                       PCI_BASE_ADDRESS_0 + (i * 4),
+			                       pci_resource_start(dev, i));
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+	return 0;
+}
+#endif /* PCI power management */
+
+/* Old cp0 access macros deprecated in 2.4.19 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+/* Module refcount handled internally in 2.6.x */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT		do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
+#endif
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT			do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT			do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev)		kfree(dev)
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* struct packet_type redefined in 2.6.x */
+#define af_packet_priv			data
+#endif
+
+/* suspend args */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW	CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+	void	*parent;  /* some external entity that the thread supposed to work for */
+	char	*proc_name;
+	struct	task_struct *p_task;
+	long	thr_pid;
+	int		prio; /* priority */
+	struct	semaphore sema;
+	int	terminated;
+	struct	completion completed;
+	spinlock_t	spinlock;
+	int		up_cnt;
+} tsk_ctl_t;
+
+
+/* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
+/* note this macro assumes there may be only one context waiting on thread's completion */
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+static inline bool binary_sema_down(tsk_ctl_t *tsk)
+{
+	if (down_interruptible(&tsk->sema) == 0) {
+		unsigned long flags = 0;
+		spin_lock_irqsave(&tsk->spinlock, flags);
+		if (tsk->up_cnt == 1)
+			tsk->up_cnt--;
+		else {
+			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
+		}
+		spin_unlock_irqrestore(&tsk->spinlock, flags);
+		return false;
+	} else
+		return true;
+}
+
+static inline bool binary_sema_up(tsk_ctl_t *tsk)
+{
+	bool sem_up = false;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&tsk->spinlock, flags);
+	if (tsk->up_cnt == 0) {
+		tsk->up_cnt++;
+		sem_up = true;
+	} else if (tsk->up_cnt == 1) {
+		/* dhd_sched_dpc: dpc is alread up! */
+	} else
+		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
+
+	spin_unlock_irqrestore(&tsk->spinlock, flags);
+
+	if (sem_up)
+		up(&tsk->sema);
+
+	return sem_up;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
+#else
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+#endif
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
+{ \
+	sema_init(&((tsk_ctl)->sema), 0); \
+	init_completion(&((tsk_ctl)->completed)); \
+	(tsk_ctl)->parent = owner; \
+	(tsk_ctl)->proc_name = name;  \
+	(tsk_ctl)->terminated = FALSE; \
+	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
+	(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
+	spin_lock_init(&((tsk_ctl)->spinlock)); \
+	DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
+		(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+	(tsk_ctl)->terminated = TRUE; \
+	smp_wmb(); \
+	up(&((tsk_ctl)->sema));	\
+	wait_for_completion(&((tsk_ctl)->completed)); \
+	DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+	(tsk_ctl)->thr_pid = -1; \
+}
+
+/*  ----------------------- */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid;    \
+pid = find_get_pid((pid_t)nr);    \
+tsk = pid_task(pid, PIDTYPE_PID);    \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+	struct task_struct *tsk; \
+	tsk = find_task_by_vpid(pid); \
+	if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+	kill_proc(pid, sig, 1); \
+}
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret)		\
+do {									\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			ret = schedule_timeout(ret);			\
+			if (!ret)					\
+				break;					\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout)	\
+({									\
+	long __ret = timeout;						\
+	if (!(condition))						\
+		__wait_event_interruptible_timeout(wq, condition, __ret); \
+	__ret;								\
+})
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+/*
+For < 2.6.24, wl creates its own netdev but doesn't
+align the priv area like the genuine alloc_netdev().
+Since netdev_priv() always gives us the aligned address, it will
+not match our unaligned address for < 2.6.24
+*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev)	(dev->priv)
+#else
+#define DEV_PRIV(dev)	netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p)         wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
+#endif  /* < 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+#define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
+#else
+#define CAN_SLEEP()	(FALSE)
+#endif
+
+#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define RANDOM32	prandom_u32
+#else
+#define RANDOM32	random32
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define SRANDOM32(entropy)	prandom_seed(entropy)
+#else
+#define SRANDOM32(entropy)	srandom32(entropy)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+/*
+ * Overide latest kfifo functions with
+ * older version to work on older kernels
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
+#define kfifo_esize(a)				1
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
+#define kfifo_esize(a)				1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#endif /* _linuxver_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 0000000..73212a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY	128	/* Max options */
+typedef struct miniopt {
+
+	/* These are persistent after miniopt_init() */
+	const char* name;		/* name for prompt in error strings */
+	const char* flags;		/* option chars that take no args */
+	bool longflags;		/* long options may be flags */
+	bool opt_end;		/* at end of options (passed a "--") */
+
+	/* These are per-call to miniopt() */
+
+	int consumed;		/* number of argv entries cosumed in
+				 * the most recent call to miniopt()
+				 */
+	bool positional;
+	bool good_int;		/* 'val' member is the result of a sucessful
+				 * strtol conversion of the option value
+				 */
+	char opt;
+	char key[MINIOPT_MAXKEY];
+	char* valstr;		/* positional param, or value for the option,
+				 * or null if the option had
+				 * no accompanying value
+				 */
+	uint uval;		/* strtol translation of valstr */
+	int  val;		/* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif  /* MINI_OPT_H  */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 0000000..228c045
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,78 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h 439681 2013-11-27 15:39:50Z $
+ */
+
+#ifndef	_MSGTRACE_H
+#define	_MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+/* for osl_t */
+#include <osl_decl.h>
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+	uint8	version;
+	uint8   trace_type;
+#define MSGTRACE_HDR_TYPE_MSG 0
+#define MSGTRACE_HDR_TYPE_LOG 1
+	uint16	len;	/* Len of the trace */
+	uint32	seqnum;	/* Sequence number of message. Useful if the messsage has been lost
+			 * because of DMA error or a bus reset (ex: SDIO Func2)
+			 */
+	/* Msgtrace type  only */
+	uint32  discarded_bytes;  /* Number of discarded bytes because of trace overflow  */
+	uint32  discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN 	sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+                                     uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern int msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 0000000..1e0455a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,149 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: osl.h 474639 2014-05-01 23:52:31Z $
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+#include <osl_decl.h>
+
+#define OSL_PKTTAG_SZ	32 /* Size of PktTag */
+
+/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+/* Drivers use REGOPSSET() to register register read/write funcitons */
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void  (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#endif
+
+#define PKTCTFMAP(osh, p)		BCM_REFERENCE(osh)
+
+/* --------------------------------------------------------------------------
+** Register manipulation macros.
+*/
+
+#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif   /* !AND_REG */
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif   /* !OR_REG */
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif /* OSL_SYSUPTIME */
+
+#if !defined(PKTC) && !defined(PKTC_DONGLE)
+#define	PKTCGETATTR(skb)	(0)
+#define	PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
+#define	PKTCCLRATTR(skb)	BCM_REFERENCE(skb)
+#define	PKTCCNT(skb)		(1)
+#define	PKTCLEN(skb)		PKTLEN(NULL, skb)
+#define	PKTCGETFLAGS(skb)	(0)
+#define	PKTCSETFLAGS(skb, f)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAGS(skb)	BCM_REFERENCE(skb)
+#define	PKTCFLAGS(skb)		(0)
+#define	PKTCSETCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCINCRCNT(skb)	BCM_REFERENCE(skb)
+#define	PKTCADDCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCSETLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCADDLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCSETFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCLINK(skb)		NULL
+#define	PKTSETCLINK(skb, x)	BCM_REFERENCE(skb)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define	PKTCFREE		PKTFREE
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} \
+} while (0)
+#endif /* !linux || !PKTC */
+
+#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE)
+#define PKTSETCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTCLRCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTISCHAINED(skb)		FALSE
+#endif
+
+/* Lbuf with fraglist */
+#define PKTFRAGPKTID(osh, lb)		(0)
+#define PKTSETFRAGPKTID(osh, lb, id)	BCM_REFERENCE(osh)
+#define PKTFRAGTOTNUM(osh, lb)		(0)
+#define PKTSETFRAGTOTNUM(osh, lb, tot)	BCM_REFERENCE(osh)
+#define PKTFRAGTOTLEN(osh, lb)		(0)
+#define PKTSETFRAGTOTLEN(osh, lb, len)	BCM_REFERENCE(osh)
+#define PKTIFINDEX(osh, lb)		(0)
+#define PKTSETIFINDEX(osh, lb, idx)	BCM_REFERENCE(osh)
+#define	PKTGETLF(osh, len, send, lbuf_type)	(0)
+
+/* in rx path, reuse totlen as used len */
+#define PKTFRAGUSEDLEN(osh, lb)			(0)
+#define PKTSETFRAGUSEDLEN(osh, lb, len)		BCM_REFERENCE(osh)
+
+#define PKTFRAGLEN(osh, lb, ix)			(0)
+#define PKTSETFRAGLEN(osh, lb, ix, len)		BCM_REFERENCE(osh)
+#define PKTFRAGDATA_LO(osh, lb, ix)		(0)
+#define PKTSETFRAGDATA_LO(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+#define PKTFRAGDATA_HI(osh, lb, ix)		(0)
+#define PKTSETFRAGDATA_HI(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+
+/* RX FRAG */
+#define PKTISRXFRAG(osh, lb)    	(0)
+#define PKTSETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+#define PKTRESETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+
+/* TX FRAG */
+#define PKTISTXFRAG(osh, lb)		(0)
+#define PKTSETTXFRAG(osh, lb)		BCM_REFERENCE(osh)
+
+/* Need Rx completion used for AMPDU reordering */
+#define PKTNEEDRXCPL(osh, lb)           (TRUE)
+#define PKTSETNORXCPL(osh, lb)          BCM_REFERENCE(osh)
+#define PKTRESETNORXCPL(osh, lb)        BCM_REFERENCE(osh)
+
+#define PKTISFRAG(osh, lb)		(0)
+#define PKTFRAGISCHAINED(osh, i)	(0)
+/* TRIM Tail bytes from lfrag */
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len)	PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+
+#endif	/* _osl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/osl_decl.h b/drivers/net/wireless/bcmdhd/include/osl_decl.h
new file mode 100644
index 0000000..aafad10
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl_decl.h
@@ -0,0 +1,34 @@
+/*
+ * osl forward declarations
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id$
+ */
+
+#ifndef _osl_decl_h_
+#define _osl_decl_h_
+
+/* osl handle type forward declaration */
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 0000000..08c2d56
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,59 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h 437241 2013-11-18 07:39:24Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is NOT defined at this
+ * point, then there is a missing include of packed_section_start.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#undef BWL_PACKED_SECTION
+#else
+	#error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+/* Compiler-specific directives for structure packing are declared in
+ * packed_section_start.h. This marks the end of the structure packing section,
+ * so, undef them here.
+ */
+#undef	BWL_PRE_PACKED_STRUCT
+#undef	BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 0000000..52dec03
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,63 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h 437241 2013-11-18 07:39:24Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is already defined at this
+ * point, then there is a missing include of packed_section_end.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#error "BWL_PACKED_SECTION is already defined!"
+#else
+	#define BWL_PACKED_SECTION
+#endif
+
+
+
+
+/* Declare compiler-specific directives for structure packing. */
+#if defined(__GNUC__) || defined(__lint)
+	#define	BWL_PRE_PACKED_STRUCT
+	#define	BWL_POST_PACKED_STRUCT	__attribute__ ((packed))
+#elif defined(__CC_ARM)
+	#define	BWL_PRE_PACKED_STRUCT	__packed
+	#define	BWL_POST_PACKED_STRUCT
+#else
+	#error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 0000000..3390e77
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,121 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h 465082 2014-03-26 17:37:28Z $
+ */
+
+#ifndef	_h_pcicfg_
+#define	_h_pcicfg_
+
+/* A structure for the config registers is nice, but in most
+ * systems the config space is not memory mapped, so we need
+ * field offsetts. :-(
+ */
+#define	PCI_CFG_VID		0
+#define	PCI_CFG_DID		2
+#define	PCI_CFG_CMD		4
+#define	PCI_CFG_STAT		6
+#define	PCI_CFG_REV		8
+#define	PCI_CFG_PROGIF		9
+#define	PCI_CFG_SUBCL		0xa
+#define	PCI_CFG_BASECL		0xb
+#define	PCI_CFG_CLSZ		0xc
+#define	PCI_CFG_LATTIM		0xd
+#define	PCI_CFG_HDR		0xe
+#define	PCI_CFG_BIST		0xf
+#define	PCI_CFG_BAR0		0x10
+#define	PCI_CFG_BAR1		0x14
+#define	PCI_CFG_BAR2		0x18
+#define	PCI_CFG_BAR3		0x1c
+#define	PCI_CFG_BAR4		0x20
+#define	PCI_CFG_BAR5		0x24
+#define	PCI_CFG_CIS		0x28
+#define	PCI_CFG_SVID		0x2c
+#define	PCI_CFG_SSID		0x2e
+#define	PCI_CFG_ROMBAR		0x30
+#define PCI_CFG_CAPPTR		0x34
+#define	PCI_CFG_INT		0x3c
+#define	PCI_CFG_PIN		0x3d
+#define	PCI_CFG_MINGNT		0x3e
+#define	PCI_CFG_MAXLAT		0x3f
+#define	PCI_CFG_DEVCTRL		0xd8
+#define	PCI_BAR0_WIN		0x80	/* backplane addres space accessed by BAR0 */
+#define	PCI_BAR1_WIN		0x84	/* backplane addres space accessed by BAR1 */
+#define	PCI_SPROM_CONTROL	0x88	/* sprom property control */
+#define	PCI_BAR1_CONTROL	0x8c	/* BAR1 region burst control */
+#define	PCI_INT_STATUS		0x90	/* PCI and other cores interrupts */
+#define	PCI_INT_MASK		0x94	/* mask of PCI and other cores interrupts */
+#define PCI_TO_SB_MB		0x98	/* signal backplane interrupts */
+#define PCI_BACKPLANE_ADDR	0xa0	/* address an arbitrary location on the system backplane */
+#define PCI_BACKPLANE_DATA	0xa4	/* data at the location specified by above address */
+#define	PCI_CLK_CTL_ST		0xa8	/* pci config space clock control/status (>=rev14) */
+#define	PCI_BAR0_WIN2		0xac	/* backplane addres space accessed by second 4KB of BAR0 */
+#define	PCI_GPIO_IN		0xb0	/* pci config space gpio input (>=rev3) */
+#define	PCI_GPIO_OUT		0xb4	/* pci config space gpio output (>=rev3) */
+#define	PCI_GPIO_OUTEN		0xb8	/* pci config space gpio output enable (>=rev3) */
+#define	PCI_L1SS_CTRL2		0x24c	/* The L1 PM Substates Control register */
+
+/* Private Registers */
+#define	PCI_STAT_CTRL		0xa80
+#define	PCI_L0_EVENTCNT		0xa84
+#define	PCI_L0_STATETMR		0xa88
+#define	PCI_L1_EVENTCNT		0xa8c
+#define	PCI_L1_STATETMR		0xa90
+#define	PCI_L1_1_EVENTCNT	0xa94
+#define	PCI_L1_1_STATETMR	0xa98
+#define	PCI_L1_2_EVENTCNT	0xa9c
+#define	PCI_L1_2_STATETMR	0xaa0
+#define	PCI_L2_EVENTCNT		0xaa4
+#define	PCI_L2_STATETMR		0xaa8
+
+#define	PCI_PMCR_REFUP		0x1814	/* Trefup time */
+#define	PCI_PMCR_REFUP_EXT	0x1818	/* Trefup extend Max */
+#define PCI_TPOWER_SCALE_MASK 0x3
+#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */
+
+
+#define	PCI_BAR0_SHADOW_OFFSET	(2 * 1024)	/* bar0 + 2K accesses sprom shadow (in pci core) */
+#define	PCI_BAR0_SPROM_OFFSET	(4 * 1024)	/* bar0 + 4K accesses external sprom */
+#define	PCI_BAR0_PCIREGS_OFFSET	(6 * 1024)	/* bar0 + 6K accesses pci core registers */
+#define	PCI_BAR0_PCISBR_OFFSET	(4 * 1024)	/* pci core SB registers are at the end of the
+						 * 8KB window, so their address is the "regular"
+						 * address plus 4K
+						 */
+/*
+ * PCIE GEN2 changed some of the above locations for
+ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase
+ * BAR0 maps 32K of register space
+*/
+#define PCIE2_BAR0_WIN2		0x70 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN	0x74 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN2	0x78 /* backplane addres space accessed by second 4KB of BAR0 */
+
+#define PCI_BAR0_WINSZ		(16 * 1024)	/* bar0 window size Match with corerev 13 */
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+#define	PCI_16KB0_PCIREGS_OFFSET (8 * 1024)	/* bar0 + 8K accesses pci/pcie core registers */
+#define	PCI_16KB0_CCREGS_OFFSET	(12 * 1024)	/* bar0 + 12K accesses chipc core registers */
+#define PCI_16KBB0_WINSZ	(16 * 1024)	/* bar0 window size */
+
+
+#define PCI_CONFIG_SPACE_SIZE	256
+#endif	/* _h_pcicfg_ */
diff --git a/drivers/net/wireless/bcmdhd/include/pcie_core.h b/drivers/net/wireless/bcmdhd/include/pcie_core.h
new file mode 100644
index 0000000..242a9a2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcie_core.h
@@ -0,0 +1,636 @@
+/*
+ * BCM43XX PCIE core hardware definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcie_core.h 468449 2014-04-07 21:50:10Z $
+ */
+#ifndef	_PCIE_CORE_H
+#define	_PCIE_CORE_H
+
+#include <sbhnddma.h>
+#include <siutils.h>
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* PCIE Enumeration space offsets */
+#define  PCIE_CORE_CONFIG_OFFSET	0x0
+#define  PCIE_FUNC0_CONFIG_OFFSET	0x400
+#define  PCIE_FUNC1_CONFIG_OFFSET	0x500
+#define  PCIE_FUNC2_CONFIG_OFFSET	0x600
+#define  PCIE_FUNC3_CONFIG_OFFSET	0x700
+#define  PCIE_SPROM_SHADOW_OFFSET	0x800
+#define  PCIE_SBCONFIG_OFFSET		0xE00
+
+
+#define PCIEDEV_MAX_DMAS			4
+
+/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
+#define PCIE_DEV_BAR0_SIZE		0x4000
+#define PCIE_BAR0_WINMAPCORE_OFFSET	0x0
+#define PCIE_BAR0_EXTSPROM_OFFSET	0x1000
+#define PCIE_BAR0_PCIECORE_OFFSET	0x2000
+#define PCIE_BAR0_CCCOREREG_OFFSET	0x3000
+
+/* different register spaces to access thr'u pcie indirect access */
+#define PCIE_CONFIGREGS 	1		/* Access to config space */
+#define PCIE_PCIEREGS 		2		/* Access to pcie registers */
+
+/* dma regs to control the flow between host2dev and dev2host  */
+typedef struct pcie_devdmaregs {
+	dma64regs_t	tx;
+	uint32		PAD[2];
+	dma64regs_t	rx;
+	uint32		PAD[2];
+} pcie_devdmaregs_t;
+
+#define PCIE_DB_HOST2DEV_0		0x1
+#define PCIE_DB_HOST2DEV_1		0x2
+#define PCIE_DB_DEV2HOST_0		0x3
+#define PCIE_DB_DEV2HOST_1		0x4
+
+/* door bell register sets */
+typedef struct pcie_doorbell {
+	uint32		host2dev_0;
+	uint32		host2dev_1;
+	uint32		dev2host_0;
+	uint32		dev2host_1;
+} pcie_doorbell_t;
+
+/* SB side: PCIE core and host control registers */
+typedef struct sbpcieregs {
+	uint32 control;		/* host mode only */
+	uint32 iocstatus;	/* PCIE2: iostatus */
+	uint32 PAD[1];
+	uint32 biststatus;	/* bist Status: 0x00C */
+	uint32 gpiosel;		/* PCIE gpio sel: 0x010 */
+	uint32 gpioouten;	/* PCIE gpio outen: 0x14 */
+	uint32 PAD[2];
+	uint32 intstatus;	/* Interrupt status: 0x20 */
+	uint32 intmask;		/* Interrupt mask: 0x24 */
+	uint32 sbtopcimailbox;	/* sb to pcie mailbox: 0x028 */
+	uint32 obffcontrol;	/* PCIE2: 0x2C */
+	uint32 obffintstatus;	/* PCIE2: 0x30 */
+	uint32 obffdatastatus;	/* PCIE2: 0x34 */
+	uint32 PAD[2];
+	uint32 errlog;		/* PCIE2: 0x40 */
+	uint32 errlogaddr;	/* PCIE2: 0x44 */
+	uint32 mailboxint;	/* PCIE2: 0x48 */
+	uint32 mailboxintmsk; /* PCIE2: 0x4c */
+	uint32 ltrspacing;	/* PCIE2: 0x50 */
+	uint32 ltrhysteresiscnt;	/* PCIE2: 0x54 */
+	uint32 PAD[42];
+
+	uint32 sbtopcie0;	/* sb to pcie translation 0: 0x100 */
+	uint32 sbtopcie1;	/* sb to pcie translation 1: 0x104 */
+	uint32 sbtopcie2;	/* sb to pcie translation 2: 0x108 */
+	uint32 PAD[5];
+
+	/* pcie core supports in direct access to config space */
+	uint32 configaddr;	/* pcie config space access: Address field: 0x120 */
+	uint32 configdata;	/* pcie config space access: Data field: 0x124 */
+	union {
+		struct {
+			/* mdio access to serdes */
+			uint32 mdiocontrol;	/* controls the mdio access: 0x128 */
+			uint32 mdiodata;	/* Data to the mdio access: 0x12c */
+			/* pcie protocol phy/dllp/tlp register indirect access mechanism */
+			uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */
+			uint32 pcieinddata;	/* Data to/from the internal regsiter: 0x134 */
+			uint32 clkreqenctrl;	/* >= rev 6, Clkreq rdma control : 0x138 */
+			uint32 PAD[177];
+		} pcie1;
+		struct {
+			/* mdio access to serdes */
+			uint32 mdiocontrol;	/* controls the mdio access: 0x128 */
+			uint32 mdiowrdata;	/* write data to mdio 0x12C */
+			uint32 mdiorddata;	/* read data to mdio 0x130 */
+			uint32	PAD[3]; 	/* 0x134-0x138-0x13c */
+			/* door bell registers available from gen2 rev5 onwards */
+			pcie_doorbell_t	   dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */
+			uint32	dataintf;	/* 0x180 */
+			uint32  PAD[1];		/* 0x184 */
+			uint32	d2h_intrlazy_0; /* 0x188 */
+			uint32	h2d_intrlazy_0; /* 0x18c */
+			uint32  h2d_intstat_0;  /* 0x190 */
+			uint32  h2d_intmask_0;	/* 0x194 */
+			uint32  d2h_intstat_0;  /* 0x198 */
+			uint32  d2h_intmask_0;  /* 0x19c */
+			uint32	ltr_state;	/* 0x1A0 */
+			uint32	pwr_int_status;	/* 0x1A4 */
+			uint32	pwr_int_mask;	/* 0x1A8 */
+			uint32  PAD[21]; 	/* 0x1AC - 0x200 */
+			pcie_devdmaregs_t  h2d0_dmaregs; /* 0x200 - 0x23c */
+			pcie_devdmaregs_t  d2h0_dmaregs; /* 0x240 - 0x27c */
+			pcie_devdmaregs_t  h2d1_dmaregs; /* 0x280 - 0x2bc */
+			pcie_devdmaregs_t  d2h1_dmaregs; /* 0x2c0 - 0x2fc */
+			pcie_devdmaregs_t  h2d2_dmaregs; /* 0x300 - 0x33c */
+			pcie_devdmaregs_t  d2h2_dmaregs; /* 0x340 - 0x37c */
+			pcie_devdmaregs_t  h2d3_dmaregs; /* 0x380 - 0x3bc */
+			pcie_devdmaregs_t  d2h3_dmaregs; /* 0x3c0 - 0x3fc */
+		} pcie2;
+	} u;
+	uint32 pciecfg[4][64];	/* 0x400 - 0x7FF, PCIE Cfg Space */
+	uint16 sprom[64];	/* SPROM shadow Area */
+} sbpcieregs_t;
+
+/* PCI control */
+#define PCIE_RST_OE	0x01	/* When set, drives PCI_RESET out to pin */
+#define PCIE_RST	0x02	/* Value driven out to pin */
+#define PCIE_SPERST	0x04	/* SurvivePeRst */
+#define PCIE_DISABLE_L1CLK_GATING	0x10
+#define PCIE_DLYPERST	0x100	/* Delay PeRst to CoE Core */
+#define PCIE_DISSPROMLD	0x200	/* DisableSpromLoadOnPerst */
+#define PCIE_WakeModeL2	0x1000	/* Wake on L2 */
+
+#define	PCIE_CFGADDR	0x120	/* offsetof(configaddr) */
+#define	PCIE_CFGDATA	0x124	/* offsetof(configdata) */
+
+/* Interrupt status/mask */
+#define PCIE_INTA	0x01	/* PCIE INTA message is received */
+#define PCIE_INTB	0x02	/* PCIE INTB message is received */
+#define PCIE_INTFATAL	0x04	/* PCIE INTFATAL message is received */
+#define PCIE_INTNFATAL	0x08	/* PCIE INTNONFATAL message is received */
+#define PCIE_INTCORR	0x10	/* PCIE INTCORR message is received */
+#define PCIE_INTPME	0x20	/* PCIE INTPME message is received */
+#define PCIE_PERST	0x40	/* PCIE Reset Interrupt */
+
+#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */
+#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */
+#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */
+#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */
+#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */
+#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */
+#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */
+#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */
+
+/* PCIE MailboxInt/MailboxIntMask register */
+#define PCIE_MB_TOSB_FN0_0   	0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
+#define PCIE_MB_TOSB_FN0_1   	0x0002
+#define PCIE_MB_TOSB_FN1_0   	0x0004
+#define PCIE_MB_TOSB_FN1_1   	0x0008
+#define PCIE_MB_TOSB_FN2_0   	0x0010
+#define PCIE_MB_TOSB_FN2_1   	0x0020
+#define PCIE_MB_TOSB_FN3_0   	0x0040
+#define PCIE_MB_TOSB_FN3_1   	0x0080
+#define PCIE_MB_TOPCIE_FN0_0 	0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
+#define PCIE_MB_TOPCIE_FN0_1 	0x0200
+#define PCIE_MB_TOPCIE_FN1_0 	0x0400
+#define PCIE_MB_TOPCIE_FN1_1 	0x0800
+#define PCIE_MB_TOPCIE_FN2_0 	0x1000
+#define PCIE_MB_TOPCIE_FN2_1 	0x2000
+#define PCIE_MB_TOPCIE_FN3_0 	0x4000
+#define PCIE_MB_TOPCIE_FN3_1 	0x8000
+#define	PCIE_MB_TOPCIE_D2H0_DB0	0x10000
+#define	PCIE_MB_TOPCIE_D2H0_DB1	0x20000
+#define	PCIE_MB_TOPCIE_D2H1_DB0	0x40000
+#define	PCIE_MB_TOPCIE_D2H1_DB1	0x80000
+#define	PCIE_MB_TOPCIE_D2H2_DB0	0x100000
+#define	PCIE_MB_TOPCIE_D2H2_DB1	0x200000
+#define	PCIE_MB_TOPCIE_D2H3_DB0	0x400000
+#define	PCIE_MB_TOPCIE_D2H3_DB1	0x800000
+
+#define PCIE_MB_D2H_MB_MASK		\
+	(PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 |	\
+	PCIE_MB_TOPCIE_D2H1_DB1  | PCIE_MB_TOPCIE_D2H1_DB1 |	\
+	PCIE_MB_TOPCIE_D2H2_DB1  | PCIE_MB_TOPCIE_D2H2_DB1 |	\
+	PCIE_MB_TOPCIE_D2H3_DB1  | PCIE_MB_TOPCIE_D2H3_DB1)
+
+/* SB to PCIE translation masks */
+#define SBTOPCIE0_MASK	0xfc000000
+#define SBTOPCIE1_MASK	0xfc000000
+#define SBTOPCIE2_MASK	0xc0000000
+
+/* Access type bits (0:1) */
+#define SBTOPCIE_MEM	0
+#define SBTOPCIE_IO	1
+#define SBTOPCIE_CFG0	2
+#define SBTOPCIE_CFG1	3
+
+/* Prefetch enable bit 2 */
+#define SBTOPCIE_PF		4
+
+/* Write Burst enable for memory write bit 3 */
+#define SBTOPCIE_WR_BURST	8
+
+/* config access */
+#define CONFIGADDR_FUNC_MASK	0x7000
+#define CONFIGADDR_FUNC_SHF	12
+#define CONFIGADDR_REG_MASK	0x0FFF
+#define CONFIGADDR_REG_SHF	0
+
+#define PCIE_CONFIG_INDADDR(f, r)	((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \
+			                 (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF))
+
+/* PCIE protocol regs Indirect Address */
+#define PCIEADDR_PROT_MASK	0x300
+#define PCIEADDR_PROT_SHF	8
+#define PCIEADDR_PL_TLP		0
+#define PCIEADDR_PL_DLLP	1
+#define PCIEADDR_PL_PLP		2
+
+/* PCIE protocol PHY diagnostic registers */
+#define	PCIE_PLP_MODEREG		0x200 /* Mode */
+#define	PCIE_PLP_STATUSREG		0x204 /* Status */
+#define PCIE_PLP_LTSSMCTRLREG		0x208 /* LTSSM control */
+#define PCIE_PLP_LTLINKNUMREG		0x20c /* Link Training Link number */
+#define PCIE_PLP_LTLANENUMREG		0x210 /* Link Training Lane number */
+#define PCIE_PLP_LTNFTSREG		0x214 /* Link Training N_FTS */
+#define PCIE_PLP_ATTNREG		0x218 /* Attention */
+#define PCIE_PLP_ATTNMASKREG		0x21C /* Attention Mask */
+#define PCIE_PLP_RXERRCTR		0x220 /* Rx Error */
+#define PCIE_PLP_RXFRMERRCTR		0x224 /* Rx Framing Error */
+#define PCIE_PLP_RXERRTHRESHREG		0x228 /* Rx Error threshold */
+#define PCIE_PLP_TESTCTRLREG		0x22C /* Test Control reg */
+#define PCIE_PLP_SERDESCTRLOVRDREG	0x230 /* SERDES Control Override */
+#define PCIE_PLP_TIMINGOVRDREG		0x234 /* Timing param override */
+#define PCIE_PLP_RXTXSMDIAGREG		0x238 /* RXTX State Machine Diag */
+#define PCIE_PLP_LTSSMDIAGREG		0x23C /* LTSSM State Machine Diag */
+
+/* PCIE protocol DLLP diagnostic registers */
+#define PCIE_DLLP_LCREG			0x100 /* Link Control */
+#define PCIE_DLLP_LSREG			0x104 /* Link Status */
+#define PCIE_DLLP_LAREG			0x108 /* Link Attention */
+#define PCIE_DLLP_LAMASKREG		0x10C /* Link Attention Mask */
+#define PCIE_DLLP_NEXTTXSEQNUMREG	0x110 /* Next Tx Seq Num */
+#define PCIE_DLLP_ACKEDTXSEQNUMREG	0x114 /* Acked Tx Seq Num */
+#define PCIE_DLLP_PURGEDTXSEQNUMREG	0x118 /* Purged Tx Seq Num */
+#define PCIE_DLLP_RXSEQNUMREG		0x11C /* Rx Sequence Number */
+#define PCIE_DLLP_LRREG			0x120 /* Link Replay */
+#define PCIE_DLLP_LACKTOREG		0x124 /* Link Ack Timeout */
+#define PCIE_DLLP_PMTHRESHREG		0x128 /* Power Management Threshold */
+#define PCIE_DLLP_RTRYWPREG		0x12C /* Retry buffer write ptr */
+#define PCIE_DLLP_RTRYRPREG		0x130 /* Retry buffer Read ptr */
+#define PCIE_DLLP_RTRYPPREG		0x134 /* Retry buffer Purged ptr */
+#define PCIE_DLLP_RTRRWREG		0x138 /* Retry buffer Read/Write */
+#define PCIE_DLLP_ECTHRESHREG		0x13C /* Error Count Threshold */
+#define PCIE_DLLP_TLPERRCTRREG		0x140 /* TLP Error Counter */
+#define PCIE_DLLP_ERRCTRREG		0x144 /* Error Counter */
+#define PCIE_DLLP_NAKRXCTRREG		0x148 /* NAK Received Counter */
+#define PCIE_DLLP_TESTREG		0x14C /* Test */
+#define PCIE_DLLP_PKTBIST		0x150 /* Packet BIST */
+#define PCIE_DLLP_PCIE11		0x154 /* DLLP PCIE 1.1 reg */
+
+#define PCIE_DLLP_LSREG_LINKUP		(1 << 16)
+
+/* PCIE protocol TLP diagnostic registers */
+#define PCIE_TLP_CONFIGREG		0x000 /* Configuration */
+#define PCIE_TLP_WORKAROUNDSREG		0x004 /* TLP Workarounds */
+#define PCIE_TLP_WRDMAUPPER		0x010 /* Write DMA Upper Address */
+#define PCIE_TLP_WRDMALOWER		0x014 /* Write DMA Lower Address */
+#define PCIE_TLP_WRDMAREQ_LBEREG	0x018 /* Write DMA Len/ByteEn Req */
+#define PCIE_TLP_RDDMAUPPER		0x01C /* Read DMA Upper Address */
+#define PCIE_TLP_RDDMALOWER		0x020 /* Read DMA Lower Address */
+#define PCIE_TLP_RDDMALENREG		0x024 /* Read DMA Len Req */
+#define PCIE_TLP_MSIDMAUPPER		0x028 /* MSI DMA Upper Address */
+#define PCIE_TLP_MSIDMALOWER		0x02C /* MSI DMA Lower Address */
+#define PCIE_TLP_MSIDMALENREG		0x030 /* MSI DMA Len Req */
+#define PCIE_TLP_SLVREQLENREG		0x034 /* Slave Request Len */
+#define PCIE_TLP_FCINPUTSREQ		0x038 /* Flow Control Inputs */
+#define PCIE_TLP_TXSMGRSREQ		0x03C /* Tx StateMachine and Gated Req */
+#define PCIE_TLP_ADRACKCNTARBLEN	0x040 /* Address Ack XferCnt and ARB Len */
+#define PCIE_TLP_DMACPLHDR0		0x044 /* DMA Completion Hdr 0 */
+#define PCIE_TLP_DMACPLHDR1		0x048 /* DMA Completion Hdr 1 */
+#define PCIE_TLP_DMACPLHDR2		0x04C /* DMA Completion Hdr 2 */
+#define PCIE_TLP_DMACPLMISC0		0x050 /* DMA Completion Misc0 */
+#define PCIE_TLP_DMACPLMISC1		0x054 /* DMA Completion Misc1 */
+#define PCIE_TLP_DMACPLMISC2		0x058 /* DMA Completion Misc2 */
+#define PCIE_TLP_SPTCTRLLEN		0x05C /* Split Controller Req len */
+#define PCIE_TLP_SPTCTRLMSIC0		0x060 /* Split Controller Misc 0 */
+#define PCIE_TLP_SPTCTRLMSIC1		0x064 /* Split Controller Misc 1 */
+#define PCIE_TLP_BUSDEVFUNC		0x068 /* Bus/Device/Func */
+#define PCIE_TLP_RESETCTR		0x06C /* Reset Counter */
+#define PCIE_TLP_RTRYBUF		0x070 /* Retry Buffer value */
+#define PCIE_TLP_TGTDEBUG1		0x074 /* Target Debug Reg1 */
+#define PCIE_TLP_TGTDEBUG2		0x078 /* Target Debug Reg2 */
+#define PCIE_TLP_TGTDEBUG3		0x07C /* Target Debug Reg3 */
+#define PCIE_TLP_TGTDEBUG4		0x080 /* Target Debug Reg4 */
+
+/* MDIO control */
+#define MDIOCTL_DIVISOR_MASK		0x7f	/* clock to be used on MDIO */
+#define MDIOCTL_DIVISOR_VAL		0x2
+#define MDIOCTL_PREAM_EN		0x80	/* Enable preamble sequnce */
+#define MDIOCTL_ACCESS_DONE		0x100   /* Tranaction complete */
+
+/* MDIO Data */
+#define MDIODATA_MASK			0x0000ffff	/* data 2 bytes */
+#define MDIODATA_TA			0x00020000	/* Turnaround */
+#define MDIODATA_REGADDR_SHF_OLD	18		/* Regaddr shift (rev < 10) */
+#define MDIODATA_REGADDR_MASK_OLD	0x003c0000	/* Regaddr Mask (rev < 10) */
+#define MDIODATA_DEVADDR_SHF_OLD	22		/* Physmedia devaddr shift (rev < 10) */
+#define MDIODATA_DEVADDR_MASK_OLD	0x0fc00000	/* Physmedia devaddr Mask (rev < 10) */
+#define MDIODATA_REGADDR_SHF		18		/* Regaddr shift */
+#define MDIODATA_REGADDR_MASK		0x007c0000	/* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF		23		/* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK		0x0f800000	/* Physmedia devaddr Mask */
+#define MDIODATA_WRITE			0x10000000	/* write Transaction */
+#define MDIODATA_READ			0x20000000	/* Read Transaction */
+#define MDIODATA_START			0x40000000	/* start of Transaction */
+
+#define MDIODATA_DEV_ADDR		0x0		/* dev address for serdes */
+#define	MDIODATA_BLK_ADDR		0x1F		/* blk address for serdes */
+
+/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */
+#define MDIOCTL2_DIVISOR_MASK		0x7f	/* clock to be used on MDIO */
+#define MDIOCTL2_DIVISOR_VAL		0x2
+#define MDIOCTL2_REGADDR_SHF		8		/* Regaddr shift */
+#define MDIOCTL2_REGADDR_MASK		0x00FFFF00	/* Regaddr Mask */
+#define MDIOCTL2_DEVADDR_SHF		24		/* Physmedia devaddr shift */
+#define MDIOCTL2_DEVADDR_MASK		0x0f000000	/* Physmedia devaddr Mask */
+#define MDIOCTL2_SLAVE_BYPASS		0x10000000	/* IP slave bypass */
+#define MDIOCTL2_READ			0x20000000	/* IP slave bypass */
+
+#define MDIODATA2_DONE			0x80000000	/* rd/wr transaction done */
+#define MDIODATA2_MASK			0x7FFFFFFF	/* rd/wr transaction data */
+#define MDIODATA2_DEVADDR_SHF		4		/* Physmedia devaddr shift */
+
+
+/* MDIO devices (SERDES modules)
+ *  unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
+ *  two layers mapping (blockidx, register offset) is required
+ */
+#define MDIO_DEV_IEEE0		0x000
+#define MDIO_DEV_IEEE1		0x001
+#define MDIO_DEV_BLK0		0x800
+#define MDIO_DEV_BLK1		0x801
+#define MDIO_DEV_BLK2		0x802
+#define MDIO_DEV_BLK3		0x803
+#define MDIO_DEV_BLK4		0x804
+#define MDIO_DEV_TXPLL		0x808	/* TXPLL register block idx */
+#define MDIO_DEV_TXCTRL0	0x820
+#define MDIO_DEV_SERDESID	0x831
+#define MDIO_DEV_RXCTRL0	0x840
+
+
+/* XgxsBlk1_A Register Offsets */
+#define BLK1_PWR_MGMT0		0x16
+#define BLK1_PWR_MGMT1		0x17
+#define BLK1_PWR_MGMT2		0x18
+#define BLK1_PWR_MGMT3		0x19
+#define BLK1_PWR_MGMT4		0x1A
+
+/* serdes regs (rev < 10) */
+#define MDIODATA_DEV_PLL       		0x1d	/* SERDES PLL Dev */
+#define MDIODATA_DEV_TX        		0x1e	/* SERDES TX Dev */
+#define MDIODATA_DEV_RX        		0x1f	/* SERDES RX Dev */
+	/* SERDES RX registers */
+#define SERDES_RX_CTRL			1	/* Rx cntrl */
+#define SERDES_RX_TIMER1		2	/* Rx Timer1 */
+#define SERDES_RX_CDR			6	/* CDR */
+#define SERDES_RX_CDRBW			7	/* CDR BW */
+
+	/* SERDES RX control register */
+#define SERDES_RX_CTRL_FORCE		0x80	/* rxpolarity_force */
+#define SERDES_RX_CTRL_POLARITY		0x40	/* rxpolarity_value */
+
+	/* SERDES PLL registers */
+#define SERDES_PLL_CTRL                 1       /* PLL control reg */
+#define PLL_CTRL_FREQDET_EN             0x4000  /* bit 14 is FREQDET on */
+
+/* Power management threshold */
+#define PCIE_L0THRESHOLDTIME_MASK       0xFF00	/* bits 0 - 7 */
+#define PCIE_L1THRESHOLDTIME_MASK       0xFF00	/* bits 8 - 15 */
+#define PCIE_L1THRESHOLDTIME_SHIFT      8	/* PCIE_L1THRESHOLDTIME_SHIFT */
+#define PCIE_L1THRESHOLD_WARVAL         0x72	/* WAR value */
+#define PCIE_ASPMTIMER_EXTEND		0x01000000	/* > rev7: enable extend ASPM timer */
+
+/* SPROM offsets */
+#define SRSH_ASPM_OFFSET		4	/* word 4 */
+#define SRSH_ASPM_ENB			0x18	/* bit 3, 4 */
+#define SRSH_ASPM_L1_ENB		0x10	/* bit 4 */
+#define SRSH_ASPM_L0s_ENB		0x8	/* bit 3 */
+#define SRSH_PCIE_MISC_CONFIG		5	/* word 5 */
+#define SRSH_L23READY_EXIT_NOPERST	0x8000	/* bit 15 */
+#define SRSH_CLKREQ_OFFSET_REV5		20	/* word 20 for srom rev <= 5 */
+#define SRSH_CLKREQ_OFFSET_REV8		52	/* word 52 for srom rev 8 */
+#define SRSH_CLKREQ_ENB			0x0800	/* bit 11 */
+#define SRSH_BD_OFFSET                  6       /* word 6 */
+#define SRSH_AUTOINIT_OFFSET            18      /* auto initialization enable */
+
+/* Linkcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCTRL_OFFSET	16	/* linkctrl offset in pcie cap */
+#define PCIE_CAP_LCREG_ASPML0s		0x01	/* ASPM L0s in linkctrl */
+#define PCIE_CAP_LCREG_ASPML1		0x02	/* ASPM L1 in linkctrl */
+#define PCIE_CLKREQ_ENAB		0x100	/* CLKREQ Enab in linkctrl */
+#define PCIE_LINKSPEED_MASK       	0xF0000	/* bits 0 - 3 of high word */
+#define PCIE_LINKSPEED_SHIFT      	16	/* PCIE_LINKSPEED_SHIFT */
+
+/* Devcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL_OFFSET		8	/* devctrl offset in pcie cap */
+#define PCIE_CAP_DEVCTRL_MRRS_MASK	0x7000	/* Max read request size mask */
+#define PCIE_CAP_DEVCTRL_MRRS_SHIFT	12	/* Max read request size shift */
+#define PCIE_CAP_DEVCTRL_MRRS_128B	0	/* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_256B	1	/* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_512B	2	/* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_1024B	3	/* 1024 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_MASK	0x00e0	/* Max payload size mask */
+#define PCIE_CAP_DEVCTRL_MPS_SHIFT	5	/* Max payload size shift */
+#define PCIE_CAP_DEVCTRL_MPS_128B	0	/* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_256B	1	/* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_512B	2	/* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_1024B	3	/* 1024 Byte */
+
+#define PCIE_ASPM_ENAB			3	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L1_ENAB		2	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L0s_ENAB		1	/* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_DISAB			0	/* ASPM L0s & L1 in linkctrl */
+
+#define PCIE_ASPM_L11_ENAB		8	/* ASPM L1.1 in PML1_sub_control2 */
+#define PCIE_ASPM_L12_ENAB		4	/* ASPM L1.2 in PML1_sub_control2 */
+
+/* Devcontrol2 reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL2_OFFSET	0x28	/* devctrl2 offset in pcie cap */
+#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK	0x400	/* Latency Tolerance Reporting Enable */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13	/* Enable OBFF mechanism, select signaling method */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000	/* Enable OBFF mechanism, select signaling method */
+
+/* LTR registers in PCIE Cap */
+#define PCIE_LTR0_REG_OFFSET	0x844	/* ltr0_reg offset in pcie cap */
+#define PCIE_LTR1_REG_OFFSET	0x848	/* ltr1_reg offset in pcie cap */
+#define PCIE_LTR2_REG_OFFSET	0x84c	/* ltr2_reg offset in pcie cap */
+#define PCIE_LTR0_REG_DEFAULT_60	0x883c883c	/* active latency default to 60usec */
+#define PCIE_LTR0_REG_DEFAULT_150	0x88968896	/* active latency default to 150usec */
+#define PCIE_LTR1_REG_DEFAULT		0x88648864	/* idle latency default to 100usec */
+#define PCIE_LTR2_REG_DEFAULT		0x90039003	/* sleep latency default to 3msec */
+
+/* Status reg PCIE_PLP_STATUSREG */
+#define PCIE_PLP_POLARITYINV_STAT	0x10
+
+
+/* PCIE BRCM Vendor CAP REVID reg  bits */
+#define BRCMCAP_PCIEREV_CT_MASK			0xF00
+#define BRCMCAP_PCIEREV_CT_SHIFT		8
+#define BRCMCAP_PCIEREV_REVID_MASK		0xFF
+#define BRCMCAP_PCIEREV_REVID_SHIFT		0
+
+#define PCIE_REVREG_CT_PCIE1		0
+#define PCIE_REVREG_CT_PCIE2		1
+
+/* PCIE GEN2 specific defines */
+/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */
+#define PCIE2R0_BRCMCAP_REVID_OFFSET		4
+#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET	8
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET	12
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET	16
+#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET		20
+#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET		24
+#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET	28
+#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET		32
+#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET	36
+#define PCIE2R0_BRCMCAP_INTMASK_OFFSET		40
+#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET	44
+#define PCIE2R0_BRCMCAP_BPADDR_OFFSET		48
+#define PCIE2R0_BRCMCAP_BPDATA_OFFSET		52
+#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET	56
+
+/* definition of configuration space registers of PCIe gen2
+ * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm
+ */
+#define PCIECFGREG_STATUS_CMD		0x4
+#define PCIECFGREG_PM_CSR		0x4C
+#define PCIECFGREG_MSI_CAP		0x58
+#define PCIECFGREG_MSI_ADDR_L		0x5C
+#define PCIECFGREG_MSI_ADDR_H		0x60
+#define PCIECFGREG_MSI_DATA		0x64
+#define PCIECFGREG_LINK_STATUS_CTRL	0xBC
+#define PCIECFGREG_LINK_STATUS_CTRL2	0xDC
+#define PCIECFGREG_RBAR_CTRL		0x228
+#define PCIECFGREG_PML1_SUB_CTRL1	0x248
+#define PCIECFGREG_REG_BAR2_CONFIG	0x4E0
+#define PCIECFGREG_REG_BAR3_CONFIG	0x4F4
+#define PCIECFGREG_PDL_CTRL1		0x1004
+#define PCIECFGREG_PDL_IDDQ		0x1814
+#define PCIECFGREG_REG_PHY_CTL7		0x181c
+
+/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
+#define PCI_PM_L1_2_ENA_MASK		0x00000001	/* PCI-PM L1.2 Enabled */
+#define PCI_PM_L1_1_ENA_MASK		0x00000002	/* PCI-PM L1.1 Enabled */
+#define ASPM_L1_2_ENA_MASK		0x00000004	/* ASPM L1.2 Enabled */
+#define ASPM_L1_1_ENA_MASK		0x00000008	/* ASPM L1.1 Enabled */
+
+/* PCIe gen2 mailbox interrupt masks */
+#define I_MB    0x3
+#define I_BIT0  0x1
+#define I_BIT1  0x2
+
+/* PCIE gen2 config regs */
+#define PCIIntstatus	0x090
+#define PCIIntmask	0x094
+#define PCISBMbx	0x98
+
+/* enumeration Core regs */
+#define PCIH2D_MailBox  0x140
+#define PCIH2D_DB1 0x144
+#define PCID2H_MailBox  0x148
+#define PCIMailBoxInt	0x48
+#define PCIMailBoxMask	0x4C
+
+#define I_F0_B0         (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */
+#define I_F0_B1         (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
+
+#define PCIECFGREG_DEVCONTROL	0xB4
+
+/* SROM hardware region */
+#define SROM_OFFSET_BAR1_CTRL  52
+
+#define BAR1_ENC_SIZE_MASK	0x000e
+#define BAR1_ENC_SIZE_SHIFT	1
+
+#define BAR1_ENC_SIZE_1M	0
+#define BAR1_ENC_SIZE_2M	1
+#define BAR1_ENC_SIZE_4M	2
+
+#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET		0xD4
+#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB	0x400
+
+/*
+ * Latency Tolerance Reporting (LTR) states
+ * Active has the least tolerant latency requirement
+ * Sleep is most tolerant
+ */
+#define LTR_ACTIVE				2
+#define LTR_ACTIVE_IDLE				1
+#define LTR_SLEEP				0
+#define LTR_FINAL_MASK				0x300
+#define LTR_FINAL_SHIFT				8
+
+/* pwrinstatus, pwrintmask regs */
+#define PCIEGEN2_PWRINT_D0_STATE_SHIFT		0
+#define PCIEGEN2_PWRINT_D1_STATE_SHIFT		1
+#define PCIEGEN2_PWRINT_D2_STATE_SHIFT		2
+#define PCIEGEN2_PWRINT_D3_STATE_SHIFT		3
+#define PCIEGEN2_PWRINT_L0_LINK_SHIFT		4
+#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT		5
+#define PCIEGEN2_PWRINT_L1_LINK_SHIFT		6
+#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT	7
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT	8
+
+#define PCIEGEN2_PWRINT_D0_STATE_MASK		(1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D1_STATE_MASK		(1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D2_STATE_MASK		(1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D3_STATE_MASK		(1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_L0_LINK_MASK		(1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L0s_LINK_MASK		(1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L1_LINK_MASK		(1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK		(1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK	(1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT)
+
+/* sbtopcie mail box */
+#define SBTOPCIE_MB_FUNC0_SHIFT 8
+#define SBTOPCIE_MB_FUNC1_SHIFT 10
+#define SBTOPCIE_MB_FUNC2_SHIFT 12
+#define SBTOPCIE_MB_FUNC3_SHIFT 14
+
+/* pcieiocstatus */
+#define PCIEGEN2_IOC_D0_STATE_SHIFT		8
+#define PCIEGEN2_IOC_D1_STATE_SHIFT		9
+#define PCIEGEN2_IOC_D2_STATE_SHIFT		10
+#define PCIEGEN2_IOC_D3_STATE_SHIFT		11
+#define PCIEGEN2_IOC_L0_LINK_SHIFT		12
+#define PCIEGEN2_IOC_L1_LINK_SHIFT		13
+#define PCIEGEN2_IOC_L1L2_LINK_SHIFT		14
+#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT		15
+
+#define PCIEGEN2_IOC_D0_STATE_MASK		(1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
+#define PCIEGEN2_IOC_D1_STATE_MASK		(1 << PCIEGEN2_IOC_D1_STATE_SHIF)
+#define PCIEGEN2_IOC_D2_STATE_MASK		(1 << PCIEGEN2_IOC_D2_STATE_SHIF)
+#define PCIEGEN2_IOC_D3_STATE_MASK		(1 << PCIEGEN2_IOC_D3_STATE_SHIF)
+#define PCIEGEN2_IOC_L0_LINK_MASK		(1 << PCIEGEN2_IOC_L0_LINK_SHIF)
+#define PCIEGEN2_IOC_L1_LINK_MASK		(1 << PCIEGEN2_IOC_L1_LINK_SHIF)
+#define PCIEGEN2_IOC_L1L2_LINK_MASK		(1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
+#define PCIEGEN2_IOC_L2_L3_LINK_MASK		(1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
+
+/* stat_ctrl */
+#define PCIE_STAT_CTRL_RESET		0x1
+#define PCIE_STAT_CTRL_ENABLE		0x2
+#define PCIE_STAT_CTRL_INTENABLE	0x4
+#define PCIE_STAT_CTRL_INTSTATUS	0x8
+
+#ifdef BCMDRIVER
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+#endif /* BCMDRIVER */
+
+#endif	/* _PCIE_CORE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
new file mode 100644
index 0000000..7a584f4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
@@ -0,0 +1,3847 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h 469158 2014-04-09 21:31:31Z $
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US			1024	/* 802.11 Time Unit is 1024 microseconds */
+
+/* Generic 802.11 frame constants */
+#define DOT11_A3_HDR_LEN		24	/* d11 header length with A3 */
+#define DOT11_A4_HDR_LEN		30	/* d11 header length with A4 */
+#define DOT11_MAC_HDR_LEN		DOT11_A3_HDR_LEN	/* MAC header length */
+#define DOT11_FCS_LEN			4	/* d11 FCS length */
+#define DOT11_ICV_LEN			4	/* d11 ICV length */
+#define DOT11_ICV_AES_LEN		8	/* d11 ICV/AES length */
+#define DOT11_QOS_LEN			2	/* d11 QoS length */
+#define DOT11_HTC_LEN			4	/* d11 HT Control field length */
+
+#define DOT11_KEY_INDEX_SHIFT		6	/* d11 key index shift */
+#define DOT11_IV_LEN			4	/* d11 IV length */
+#define DOT11_IV_TKIP_LEN		8	/* d11 IV TKIP length */
+#define DOT11_IV_AES_OCB_LEN		4	/* d11 IV/AES/OCB length */
+#define DOT11_IV_AES_CCM_LEN		8	/* d11 IV/AES/CCM length */
+#define DOT11_IV_MAX_LEN		8	/* maximum iv len for any encryption */
+
+/* Includes MIC */
+#define DOT11_MAX_MPDU_BODY_LEN		2304	/* max MPDU body length */
+/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */
+#define DOT11_MAX_MPDU_LEN		(DOT11_A4_HDR_LEN + \
+					 DOT11_QOS_LEN + \
+					 DOT11_IV_AES_CCM_LEN + \
+					 DOT11_MAX_MPDU_BODY_LEN + \
+					 DOT11_ICV_LEN + \
+					 DOT11_FCS_LEN)	/* d11 max MPDU length */
+
+#define DOT11_MAX_SSID_LEN		32	/* d11 max ssid length */
+
+/* dot11RTSThreshold */
+#define DOT11_DEFAULT_RTS_LEN		2347	/* d11 default RTS length */
+#define DOT11_MAX_RTS_LEN		2347	/* d11 max RTS length */
+
+/* dot11FragmentationThreshold */
+#define DOT11_MIN_FRAG_LEN		256	/* d11 min fragmentation length */
+#define DOT11_MAX_FRAG_LEN		2346	/* Max frag is also limited by aMPDUMaxLength
+						* of the attached PHY
+						*/
+#define DOT11_DEFAULT_FRAG_LEN		2346	/* d11 default fragmentation length */
+
+/* dot11BeaconPeriod */
+#define DOT11_MIN_BEACON_PERIOD		1	/* d11 min beacon period */
+#define DOT11_MAX_BEACON_PERIOD		0xFFFF	/* d11 max beacon period */
+
+/* dot11DTIMPeriod */
+#define DOT11_MIN_DTIM_PERIOD		1	/* d11 min DTIM period */
+#define DOT11_MAX_DTIM_PERIOD		0xFF	/* d11 max DTIM period */
+
+/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */
+#define DOT11_LLC_SNAP_HDR_LEN		8	/* d11 LLC/SNAP header length */
+#define DOT11_OUI_LEN			3	/* d11 OUI length */
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT11_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* RFC1042 header used by 802.11 per 802.1H */
+#define RFC1042_HDR_LEN	(ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)	/* RCF1042 header length */
+
+/* Generic 802.11 MAC header */
+/**
+ * N.B.: This struct reflects the full 4 address 802.11 MAC header.
+ *		 The fields are defined such that the shorter 1, 2, and 3
+ *		 address headers just use the first k fields.
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	a1;		/* address 1 */
+	struct ether_addr	a2;		/* address 2 */
+	struct ether_addr	a3;		/* address 3 */
+	uint16			seq;		/* sequence control */
+	struct ether_addr	a4;		/* address 4 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Control frames */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_RTS_LEN		16		/* d11 RTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CTS_LEN		10		/* d11 CTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_ACK_LEN		10		/* d11 ACK frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* AID */
+	struct ether_addr	bssid;		/* receiver address, STA in AP */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_PS_POLL_LEN	16		/* d11 PS poll frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	bssid;		/* transmitter address, STA in AP */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CS_END_LEN	16		/* d11 CF-END frame length */
+
+/**
+ * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling
+ *  category+OUI+vendor specific content ( this can be variable)
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+/** generic vendor specific action frame with variable length */
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+
+#define DOT11_ACTION_VS_HDR_LEN	6
+
+#define BCM_ACTION_OUI_BYTE0	0x00
+#define BCM_ACTION_OUI_BYTE1	0x90
+#define BCM_ACTION_OUI_BYTE2	0x4c
+
+/* BA/BAR Control parameters */
+#define DOT11_BA_CTL_POLICY_NORMAL	0x0000	/* normal ack */
+#define DOT11_BA_CTL_POLICY_NOACK	0x0001	/* no ack */
+#define DOT11_BA_CTL_POLICY_MASK	0x0001	/* ack policy mask */
+
+#define DOT11_BA_CTL_MTID		0x0002	/* multi tid BA */
+#define DOT11_BA_CTL_COMPRESSED		0x0004	/* compressed bitmap */
+
+#define DOT11_BA_CTL_NUMMSDU_MASK	0x0FC0	/* num msdu in bitmap mask */
+#define DOT11_BA_CTL_NUMMSDU_SHIFT	6	/* num msdu in bitmap shift */
+
+#define DOT11_BA_CTL_TID_MASK		0xF000	/* tid mask */
+#define DOT11_BA_CTL_TID_SHIFT		12	/* tid shift */
+
+/** control frame header (BA/BAR) */
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN	16		/* control frame hdr len */
+
+/** BAR frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+	uint16			bar_control;	/* BAR Control */
+	uint16			seqnum;		/* Starting Sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN		4		/* BAR frame payload length */
+
+#define DOT11_BA_BITMAP_LEN	128		/* bitmap length */
+#define DOT11_BA_CMP_BITMAP_LEN	8		/* compressed bitmap length */
+/** BA frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+	uint16			ba_control;	/* BA Control */
+	uint16			seqnum;		/* Starting Sequence control */
+	uint8			bitmap[DOT11_BA_BITMAP_LEN];	/* Block Ack Bitmap */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN		4		/* BA frame payload len (wo bitmap) */
+
+/** Management frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	da;		/* receiver address */
+	struct ether_addr	sa;		/* transmitter address */
+	struct ether_addr	bssid;		/* BSS ID */
+	uint16			seq;		/* sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_MGMT_HDR_LEN	24		/* d11 management header length */
+
+/* Management frame payloads */
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+	uint32			timestamp[2];
+	uint16			beacon_interval;
+	uint16			capability;
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_BCN_PRB_LEN	12		/* 802.11 beacon/probe frame fixed length */
+#define	DOT11_BCN_PRB_FIXED_LEN	12		/* 802.11 beacon/probe frame fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+	uint16			alg;		/* algorithm */
+	uint16			seq;		/* sequence control */
+	uint16			status;		/* status code */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN	6		/* length of auth frame without challenge IE */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN	4	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+	struct ether_addr	ap;		/* Current AP address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN	10	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+	uint16			capability;	/* capability information */
+	uint16			status;		/* status code */
+	uint16			aid;		/* association ID */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN	6	/* length of assoc resp frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+	uint8	category;
+	uint8	action;
+	uint8	token;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN	3	/* d11 action measurement header length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+	uint8	category;
+	uint8	action;
+	uint8	ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+	uint8	category;
+	uint8	action;
+	uint8	control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+	uint8	category;
+	uint8	action;
+	uint16	id;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode {
+	uint8	category;
+	uint8	action;
+	uint8	mode;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE	1
+#define SM_PWRSAVE_MODE		2
+
+/* ************* 802.11h related definitions. ************* */
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+	uint8 id;
+	uint8 len;
+	uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+	int8 min;
+	int8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+	uint8 id;
+	uint8 len;
+	uint8 tx_pwr;
+	uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN	2 	/* length of IE data, not including 2 byte header */
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+	uint8 id;
+	uint8 len;
+	uint8 first_channel;
+	uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+/**
+ * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband
+ * offset for 40MHz operation.  The possible 3 values are:
+ * 1 = above control channel
+ * 3 = below control channel
+ * 0 = no extension channel
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+	uint8	id;		/* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */
+	uint8	len;		/* IE length */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;           /* type indicates what follows */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN	5
+#define BRCM_EXTCH_IE_TYPE	53	/* 802.11n ID not yet assigned */
+#define DOT11_EXTCH_IE_LEN	1
+#define DOT11_EXT_CH_MASK	0x03	/* extension channel mask */
+#define DOT11_EXT_CH_UPPER	0x01	/* ext. ch. on upper sb */
+#define DOT11_EXT_CH_LOWER	0x03	/* ext. ch. on lower sb */
+#define DOT11_EXT_CH_NONE	0x00	/* no extension ch.  */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+	uint8	category;
+	uint8	action;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN	2
+
+/** CSA IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+	uint8 id;	/* id DOT11_MNG_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN	3	/* length of IE data, not including 2 byte header */
+/* CSA mode - 802.11h-2003 $7.3.2.20 */
+#define DOT11_CSA_MODE_ADVISORY		0	/* no DOT11_CSA_MODE_NO_TX restriction imposed */
+#define DOT11_CSA_MODE_NO_TX		1	/* no transmission upon receiving CSA frame. */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+	uint8	category;
+	uint8	action;
+	dot11_chan_switch_ie_t chan_switch_ie;	/* for switch IE */
+	dot11_brcm_extch_ie_t extch_ie;		/* extension channel offset */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 reg;	/* regulatory class */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+
+/** 11n Extended Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+	uint8 id;	/* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN	4	/* length of extended channel switch IE body */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	dot11_ext_csa_ie_t chan_switch_ie;	/* for switch IE */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+
+/**  Wide Bandwidth Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 channel_width;			/* new channel width */
+	uint8 center_frequency_segment_0;	/* center frequency segment 0 */
+	uint8 center_frequency_segment_1;	/* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t;
+
+#define DOT11_WIDE_BW_SWITCH_IE_LEN     3       /* length of IE data, not including 2 byte header */
+
+/** Channel Switch Wrapper IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t;
+
+/** VHT Transmit Power Envelope IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 transmit_power_info;
+	uint8 local_max_transmit_power_20;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t;
+
+/* vht transmit power envelope IE length depends on channel width */
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ	1
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ	2
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ	3
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+	uint8	id;
+	uint8	len;
+	uint8	info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN	1	/* length of OBSS Coexistence INFO IE */
+
+#define	DOT11_OBSS_COEX_INFO_REQ		0x01
+#define	DOT11_OBSS_COEX_40MHZ_INTOLERANT	0x02
+#define	DOT11_OBSS_COEX_20MHZ_WIDTH_REQ	0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+	uint8	id;
+	uint8	len;
+	uint8	regclass;
+	uint8	chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN	1	/* fixed length of regclass */
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+	uint8 id;
+	uint8 len;
+	uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_MAX	8
+
+#define DOT11_EXTCAP_LEN_COEX	1
+#define DOT11_EXTCAP_LEN_BT	3
+#define DOT11_EXTCAP_LEN_IW	4
+#define DOT11_EXTCAP_LEN_SI	6
+
+#define DOT11_EXTCAP_LEN_TDLS	5
+#define DOT11_11AC_EXTCAP_LEN_TDLS	8
+
+#define DOT11_EXTCAP_LEN_FMS			2
+#define DOT11_EXTCAP_LEN_PROXY_ARP		2
+#define DOT11_EXTCAP_LEN_TFS			3
+#define DOT11_EXTCAP_LEN_WNM_SLEEP		3
+#define DOT11_EXTCAP_LEN_TIMBC			3
+#define DOT11_EXTCAP_LEN_BSSTRANS		3
+#define DOT11_EXTCAP_LEN_DMS			4
+#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION	6
+#define DOT11_EXTCAP_LEN_TDLS_WBW		8
+#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION	8
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+	uint8 extcap[DOT11_EXTCAP_LEN_MAX];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+/* TDLS Capabilities */
+#define DOT11_TDLS_CAP_TDLS			37		/* TDLS support */
+#define DOT11_TDLS_CAP_PU_BUFFER_STA	28		/* TDLS Peer U-APSD buffer STA support */
+#define DOT11_TDLS_CAP_PEER_PSM		20		/* TDLS Peer PSM support */
+#define DOT11_TDLS_CAP_CH_SW			30		/* TDLS Channel switch */
+#define DOT11_TDLS_CAP_PROH			38		/* TDLS prohibited */
+#define DOT11_TDLS_CAP_CH_SW_PROH		39		/* TDLS Channel switch prohibited */
+#define DOT11_TDLS_CAP_TDLS_WIDER_BW	61	/* TDLS Wider Band-Width */
+
+#define TDLS_CAP_MAX_BIT		39		/* TDLS max bit defined in ext cap */
+
+/* 802.11h/802.11k Measurement Request/Report IEs */
+/* Measurement Type field */
+#define DOT11_MEASURE_TYPE_BASIC 	0	/* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 		1	/* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI		2	/* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD		3	/* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE		4	/* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON		5	/* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME	6	/* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT		7	/* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI		8	/* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM		9	/* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_PAUSE		255	/* d11 measurement pause type */
+
+/* Measurement Request Modes */
+#define DOT11_MEASURE_MODE_PARALLEL 	(1<<0)	/* d11 measurement parallel */
+#define DOT11_MEASURE_MODE_ENABLE 	(1<<1)	/* d11 measurement enable */
+#define DOT11_MEASURE_MODE_REQUEST	(1<<2)	/* d11 measurement request */
+#define DOT11_MEASURE_MODE_REPORT 	(1<<3)	/* d11 measurement report */
+#define DOT11_MEASURE_MODE_DUR 	(1<<4)	/* d11 measurement dur mandatory */
+/* Measurement Report Modes */
+#define DOT11_MEASURE_MODE_LATE 	(1<<0)	/* d11 measurement late */
+#define DOT11_MEASURE_MODE_INCAPABLE	(1<<1)	/* d11 measurement incapable */
+#define DOT11_MEASURE_MODE_REFUSED	(1<<2)	/* d11 measurement refuse */
+/* Basic Measurement Map bits */
+#define DOT11_MEASURE_BASIC_MAP_BSS	((uint8)(1<<0))	/* d11 measurement basic map BSS */
+#define DOT11_MEASURE_BASIC_MAP_OFDM	((uint8)(1<<1))	/* d11 measurement map OFDM */
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN	((uint8)(1<<2))	/* d11 measurement map unknown */
+#define DOT11_MEASURE_BASIC_MAP_RADAR	((uint8)(1<<3))	/* d11 measurement map radar */
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS	((uint8)(1<<4))	/* d11 measurement map unmeasuremnt */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14	/* d11 measurement request IE length */
+/* length of Measure Request IE data not including variable len */
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3	/* d11 measurement request IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 channel;
+			uint8 start_time[8];
+			uint16 duration;
+			uint8 map;
+		} BWL_POST_PACKED_STRUCT basic;
+		uint8 data[1];
+	} BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+/* length of Measure Report IE data not including variable len */
+#define DOT11_MNG_IE_MREP_FIXED_LEN	3	/* d11 measurement response IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN	12	/* d11 measurement basic report length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+	uint8 id;
+	uint8 len;
+	uint8 count;	/* TBTTs until beacon interval in quiet starts */
+	uint8 period;	/* Beacon intervals between periodic quiet periods ? */
+	uint16 duration;	/* Length of quiet period, in TU's */
+	uint16 offset;	/* TU's offset from TBTT in Count field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+	uint8 channel;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+	uint8 id;
+	uint8 len;
+	uint8 eaddr[ETHER_ADDR_LEN];
+	uint8 interval;
+	chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+/* WME Elements */
+#define WME_OUI			"\x00\x50\xf2"	/* WME OUI */
+#define WME_OUI_LEN		3
+#define WME_OUI_TYPE		2	/* WME type */
+#define WME_TYPE		2	/* WME type, deprecated */
+#define WME_SUBTYPE_IE		0	/* Information Element */
+#define WME_SUBTYPE_PARAM_IE	1	/* Parameter Element */
+#define WME_SUBTYPE_TSPEC	2	/* Traffic Specification */
+#define WME_VER			1	/* WME version */
+
+/* WME Access Category Indices (ACIs) */
+#define AC_BE			0	/* Best Effort */
+#define AC_BK			1	/* Background */
+#define AC_VI			2	/* Video */
+#define AC_VO			3	/* Voice */
+#define AC_COUNT		4	/* number of ACs */
+
+typedef uint8 ac_bitmap_t;	/* AC bitmap of (1 << AC_xx) */
+
+#define AC_BITMAP_NONE		0x0	/* No ACs */
+#define AC_BITMAP_ALL		0xf	/* All ACs */
+#define AC_BITMAP_TST(ab, ac)	(((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac)	(((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+/** WME Information Element (IE) */
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7	/* WME IE length */
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+	uint8	ACI;
+	uint8	ECW;
+	uint16  TXOP;		/* stored in network order (ls octet first) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+/** WME Parameter Element (PE) */
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN            24          /* WME Parameter IE length */
+
+/* QoS Info field for IE as sent from AP */
+#define WME_QI_AP_APSD_MASK         0x80        /* U-APSD Supported mask */
+#define WME_QI_AP_APSD_SHIFT        7           /* U-APSD Supported shift */
+#define WME_QI_AP_COUNT_MASK        0x0f        /* Parameter set count mask */
+#define WME_QI_AP_COUNT_SHIFT       0           /* Parameter set count shift */
+
+/* QoS Info field for IE as sent from STA */
+#define WME_QI_STA_MAXSPLEN_MASK    0x60        /* Max Service Period Length mask */
+#define WME_QI_STA_MAXSPLEN_SHIFT   5           /* Max Service Period Length shift */
+#define WME_QI_STA_APSD_ALL_MASK    0xf         /* APSD all AC bits mask */
+#define WME_QI_STA_APSD_ALL_SHIFT   0           /* APSD all AC bits shift */
+#define WME_QI_STA_APSD_BE_MASK     0x8         /* APSD AC_BE mask */
+#define WME_QI_STA_APSD_BE_SHIFT    3           /* APSD AC_BE shift */
+#define WME_QI_STA_APSD_BK_MASK     0x4         /* APSD AC_BK mask */
+#define WME_QI_STA_APSD_BK_SHIFT    2           /* APSD AC_BK shift */
+#define WME_QI_STA_APSD_VI_MASK     0x2         /* APSD AC_VI mask */
+#define WME_QI_STA_APSD_VI_SHIFT    1           /* APSD AC_VI shift */
+#define WME_QI_STA_APSD_VO_MASK     0x1         /* APSD AC_VO mask */
+#define WME_QI_STA_APSD_VO_SHIFT    0           /* APSD AC_VO shift */
+
+/* ACI */
+#define EDCF_AIFSN_MIN               1           /* AIFSN minimum value */
+#define EDCF_AIFSN_MAX               15          /* AIFSN maximum value */
+#define EDCF_AIFSN_MASK              0x0f        /* AIFSN mask */
+#define EDCF_ACM_MASK                0x10        /* ACM mask */
+#define EDCF_ACI_MASK                0x60        /* ACI mask */
+#define EDCF_ACI_SHIFT               5           /* ACI shift */
+#define EDCF_AIFSN_SHIFT             12          /* 4 MSB(0xFFF) in ifs_ctl for AC idx */
+
+/* ECW */
+#define EDCF_ECW_MIN                 0           /* cwmin/cwmax exponent minimum value */
+#define EDCF_ECW_MAX                 15          /* cwmin/cwmax exponent maximum value */
+#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK             0x0f        /* cwmin exponent form mask */
+#define EDCF_ECWMAX_MASK             0xf0        /* cwmax exponent form mask */
+#define EDCF_ECWMAX_SHIFT            4           /* cwmax exponent form shift */
+
+/* TXOP */
+#define EDCF_TXOP_MIN                0           /* TXOP minimum value */
+#define EDCF_TXOP_MAX                65535       /* TXOP maximum value */
+#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
+
+/* Default BE ACI value for non-WME connection STA */
+#define NON_EDCF_AC_BE_ACI_STA          0x02
+
+/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */
+#define EDCF_AC_BE_ACI_STA           0x03	/* STA ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_STA           0xA4	/* STA ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_STA          0x0000	/* STA TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_STA           0x27	/* STA ACI value for background AC */
+#define EDCF_AC_BK_ECW_STA           0xA4	/* STA ECW value for background AC */
+#define EDCF_AC_BK_TXOP_STA          0x0000	/* STA TXOP value for background AC */
+#define EDCF_AC_VI_ACI_STA           0x42	/* STA ACI value for video AC */
+#define EDCF_AC_VI_ECW_STA           0x43	/* STA ECW value for video AC */
+#define EDCF_AC_VI_TXOP_STA          0x005e	/* STA TXOP value for video AC */
+#define EDCF_AC_VO_ACI_STA           0x62	/* STA ACI value for audio AC */
+#define EDCF_AC_VO_ECW_STA           0x32	/* STA ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_STA          0x002f	/* STA TXOP value for audio AC */
+
+/* Default EDCF parameters that AP uses; WMM draft Table 14 */
+#define EDCF_AC_BE_ACI_AP            0x03	/* AP ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_AP            0x64	/* AP ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_AP           0x0000	/* AP TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_AP            0x27	/* AP ACI value for background AC */
+#define EDCF_AC_BK_ECW_AP            0xA4	/* AP ECW value for background AC */
+#define EDCF_AC_BK_TXOP_AP           0x0000	/* AP TXOP value for background AC */
+#define EDCF_AC_VI_ACI_AP            0x41	/* AP ACI value for video AC */
+#define EDCF_AC_VI_ECW_AP            0x43	/* AP ECW value for video AC */
+#define EDCF_AC_VI_TXOP_AP           0x005e	/* AP TXOP value for video AC */
+#define EDCF_AC_VO_ACI_AP            0x61	/* AP ACI value for audio AC */
+#define EDCF_AC_VO_ECW_AP            0x32	/* AP ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_AP           0x002f	/* AP TXOP value for audio AC */
+
+/** EDCA Parameter IE */
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN            18          /* EDCA Parameter IE length */
+
+/** QoS Capability IE */
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+	uint8 id; 			/* 11, DOT11_MNG_QBSS_LOAD_ID */
+	uint8 length;
+	uint16 station_count; 		/* total number of STAs associated */
+	uint8 channel_utilization;	/* % of time, normalized to 255, QAP sensed medium busy */
+	uint16 aac; 			/* available admission capacity */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 	7	/* BSS load IE size */
+
+#define WLC_QBSS_LOAD_CHAN_FREE_MAX	0xff	/* max for channel free score */
+
+/* nom_msdu_size */
+#define FIXED_MSDU_SIZE 0x8000		/* MSDU size is fixed */
+#define MSDU_SIZE_MASK	0x7fff		/* (Nominal or fixed) MSDU size */
+
+/* surplus_bandwidth */
+/* Represented as 3 bits of integer, binary point, 13 bits fraction */
+#define	INTEGER_SHIFT	13	/* integer shift */
+#define FRACTION_MASK	0x1FFF	/* fraction mask */
+
+/** Management Notification Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+	uint8 category;			/* DOT11_ACTION_NOTIFICATION */
+	uint8 action;
+	uint8 token;
+	uint8 status;
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4	/* Fixed length */
+
+/** Timeout Interval IE */
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+	uint8 ti_type;
+	uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE	1
+#define TI_TYPE_KEY_LIFETIME		2
+
+/* WME Action Codes */
+#define WME_ADDTS_REQUEST	0	/* WME ADDTS request */
+#define WME_ADDTS_RESPONSE	1	/* WME ADDTS response */
+#define WME_DELTS_REQUEST	2	/* WME DELTS request */
+
+/* WME Setup Response Status Codes */
+#define WME_ADMISSION_ACCEPTED		0	/* WME admission accepted */
+#define WME_INVALID_PARAMETERS		1	/* WME invalide parameters */
+#define WME_ADMISSION_REFUSED		3	/* WME admission refused */
+
+/* Macro to take a pointer to a beacon or probe response
+ * body and return the char* pointer to the SSID info element
+ */
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+/* Authentication frame payload constants */
+#define DOT11_OPEN_SYSTEM	0	/* d11 open authentication */
+#define DOT11_SHARED_KEY	1	/* d11 shared authentication */
+#define DOT11_FAST_BSS		2	/* d11 fast bss authentication */
+#define DOT11_CHALLENGE_LEN	128	/* d11 challenge text length */
+
+/* Frame control macros */
+#define FC_PVER_MASK		0x3	/* PVER mask */
+#define FC_PVER_SHIFT		0	/* PVER shift */
+#define FC_TYPE_MASK		0xC	/* type mask */
+#define FC_TYPE_SHIFT		2	/* type shift */
+#define FC_SUBTYPE_MASK		0xF0	/* subtype mask */
+#define FC_SUBTYPE_SHIFT	4	/* subtype shift */
+#define FC_TODS			0x100	/* to DS */
+#define FC_TODS_SHIFT		8	/* to DS shift */
+#define FC_FROMDS		0x200	/* from DS */
+#define FC_FROMDS_SHIFT		9	/* from DS shift */
+#define FC_MOREFRAG		0x400	/* more frag. */
+#define FC_MOREFRAG_SHIFT	10	/* more frag. shift */
+#define FC_RETRY		0x800	/* retry */
+#define FC_RETRY_SHIFT		11	/* retry shift */
+#define FC_PM			0x1000	/* PM */
+#define FC_PM_SHIFT		12	/* PM shift */
+#define FC_MOREDATA		0x2000	/* more data */
+#define FC_MOREDATA_SHIFT	13	/* more data shift */
+#define FC_WEP			0x4000	/* WEP */
+#define FC_WEP_SHIFT		14	/* WEP shift */
+#define FC_ORDER		0x8000	/* order */
+#define FC_ORDER_SHIFT		15	/* order shift */
+
+/* sequence control macros */
+#define SEQNUM_SHIFT		4	/* seq. number shift */
+#define SEQNUM_MAX		0x1000	/* max seqnum + 1 */
+#define FRAGNUM_MASK		0xF	/* frag. number mask */
+
+/* Frame Control type/subtype defs */
+
+/* FC Types */
+#define FC_TYPE_MNG		0	/* management type */
+#define FC_TYPE_CTL		1	/* control type */
+#define FC_TYPE_DATA		2	/* data type */
+
+/* Management Subtypes */
+#define FC_SUBTYPE_ASSOC_REQ		0	/* assoc. request */
+#define FC_SUBTYPE_ASSOC_RESP		1	/* assoc. response */
+#define FC_SUBTYPE_REASSOC_REQ		2	/* reassoc. request */
+#define FC_SUBTYPE_REASSOC_RESP		3	/* reassoc. response */
+#define FC_SUBTYPE_PROBE_REQ		4	/* probe request */
+#define FC_SUBTYPE_PROBE_RESP		5	/* probe response */
+#define FC_SUBTYPE_BEACON		8	/* beacon */
+#define FC_SUBTYPE_ATIM			9	/* ATIM */
+#define FC_SUBTYPE_DISASSOC		10	/* disassoc. */
+#define FC_SUBTYPE_AUTH			11	/* authentication */
+#define FC_SUBTYPE_DEAUTH		12	/* de-authentication */
+#define FC_SUBTYPE_ACTION		13	/* action */
+#define FC_SUBTYPE_ACTION_NOACK		14	/* action no-ack */
+
+/* Control Subtypes */
+#define FC_SUBTYPE_CTL_WRAPPER		7	/* Control Wrapper */
+#define FC_SUBTYPE_BLOCKACK_REQ		8	/* Block Ack Req */
+#define FC_SUBTYPE_BLOCKACK		9	/* Block Ack */
+#define FC_SUBTYPE_PS_POLL		10	/* PS poll */
+#define FC_SUBTYPE_RTS			11	/* RTS */
+#define FC_SUBTYPE_CTS			12	/* CTS */
+#define FC_SUBTYPE_ACK			13	/* ACK */
+#define FC_SUBTYPE_CF_END		14	/* CF-END */
+#define FC_SUBTYPE_CF_END_ACK		15	/* CF-END ACK */
+
+/* Data Subtypes */
+#define FC_SUBTYPE_DATA			0	/* Data */
+#define FC_SUBTYPE_DATA_CF_ACK		1	/* Data + CF-ACK */
+#define FC_SUBTYPE_DATA_CF_POLL		2	/* Data + CF-Poll */
+#define FC_SUBTYPE_DATA_CF_ACK_POLL	3	/* Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_NULL			4	/* Null */
+#define FC_SUBTYPE_CF_ACK		5	/* CF-Ack */
+#define FC_SUBTYPE_CF_POLL		6	/* CF-Poll */
+#define FC_SUBTYPE_CF_ACK_POLL		7	/* CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA		8	/* QoS Data */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK	9	/* QoS Data + CF-Ack */
+#define FC_SUBTYPE_QOS_DATA_CF_POLL	10	/* QoS Data + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL	11	/* QoS Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_NULL		12	/* QoS Null */
+#define FC_SUBTYPE_QOS_CF_POLL		14	/* QoS CF-Poll */
+#define FC_SUBTYPE_QOS_CF_ACK_POLL	15	/* QoS CF-Ack + CF-Poll */
+
+/* Data Subtype Groups */
+#define FC_SUBTYPE_ANY_QOS(s)		(((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s)		(((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s)	(((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s)	(((s) & 1) != 0)
+#define FC_SUBTYPE_ANY_PSPOLL(s)	(((s) & 10) != 0)
+
+/* Type/Subtype Combos */
+#define FC_KIND_MASK		(FC_TYPE_MASK | FC_SUBTYPE_MASK)	/* FC kind mask */
+
+#define FC_KIND(t, s)	(((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))	/* FC kind */
+
+#define FC_SUBTYPE(fc)	(((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)	/* Subtype from FC */
+#define FC_TYPE(fc)	(((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)	/* Type from FC */
+
+#define FC_ASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)	/* assoc. request */
+#define FC_ASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)	/* assoc. response */
+#define FC_REASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)	/* reassoc. request */
+#define FC_REASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)	/* reassoc. response */
+#define FC_PROBE_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)	/* probe request */
+#define FC_PROBE_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)	/* probe response */
+#define FC_BEACON	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)		/* beacon */
+#define FC_ATIM		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM)		/* ATIM */
+#define FC_DISASSOC	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)	/* disassoc */
+#define FC_AUTH		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)		/* authentication */
+#define FC_DEAUTH	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)		/* deauthentication */
+#define FC_ACTION	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)		/* action */
+#define FC_ACTION_NOACK	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)	/* action no-ack */
+
+#define FC_CTL_WRAPPER	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)	/* Control Wrapper */
+#define FC_BLOCKACK_REQ	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)	/* Block Ack Req */
+#define FC_BLOCKACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)	/* Block Ack */
+#define FC_PS_POLL	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)	/* PS poll */
+#define FC_RTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)		/* RTS */
+#define FC_CTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)		/* CTS */
+#define FC_ACK		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)		/* ACK */
+#define FC_CF_END	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)		/* CF-END */
+#define FC_CF_END_ACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)	/* CF-END ACK */
+
+#define FC_DATA		FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)		/* data */
+#define FC_NULL_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)		/* null data */
+#define FC_DATA_CF_ACK	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)	/* data CF ACK */
+#define FC_QOS_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)	/* QoS data */
+#define FC_QOS_NULL	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)	/* QoS null */
+
+/* QoS Control Field */
+
+/* 802.1D Priority */
+#define QOS_PRIO_SHIFT		0	/* QoS priority shift */
+#define QOS_PRIO_MASK		0x0007	/* QoS priority mask */
+#define QOS_PRIO(qos)		(((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)	/* QoS priority */
+
+/* Traffic Identifier */
+#define QOS_TID_SHIFT		0	/* QoS TID shift */
+#define QOS_TID_MASK		0x000f	/* QoS TID mask */
+#define QOS_TID(qos)		(((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)	/* QoS TID */
+
+/* End of Service Period (U-APSD) */
+#define QOS_EOSP_SHIFT		4	/* QoS End of Service Period shift */
+#define QOS_EOSP_MASK		0x0010	/* QoS End of Service Period mask */
+#define QOS_EOSP(qos)		(((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)	/* Qos EOSP */
+
+/* Ack Policy */
+#define QOS_ACK_NORMAL_ACK	0	/* Normal Ack */
+#define QOS_ACK_NO_ACK		1	/* No Ack (eg mcast) */
+#define QOS_ACK_NO_EXP_ACK	2	/* No Explicit Ack */
+#define QOS_ACK_BLOCK_ACK	3	/* Block Ack */
+#define QOS_ACK_SHIFT		5	/* QoS ACK shift */
+#define QOS_ACK_MASK		0x0060	/* QoS ACK mask */
+#define QOS_ACK(qos)		(((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)	/* QoS ACK */
+
+/* A-MSDU flag */
+#define QOS_AMSDU_SHIFT		7	/* AMSDU shift */
+#define QOS_AMSDU_MASK		0x0080	/* AMSDU mask */
+
+/* Management Frames */
+
+/* Management Frame Constants */
+
+/* Fixed fields */
+#define DOT11_MNG_AUTH_ALGO_LEN		2	/* d11 management auth. algo. length */
+#define DOT11_MNG_AUTH_SEQ_LEN		2	/* d11 management auth. seq. length */
+#define DOT11_MNG_BEACON_INT_LEN	2	/* d11 management beacon interval length */
+#define DOT11_MNG_CAP_LEN		2	/* d11 management cap. length */
+#define DOT11_MNG_AP_ADDR_LEN		6	/* d11 management AP address length */
+#define DOT11_MNG_LISTEN_INT_LEN	2	/* d11 management listen interval length */
+#define DOT11_MNG_REASON_LEN		2	/* d11 management reason length */
+#define DOT11_MNG_AID_LEN		2	/* d11 management AID length */
+#define DOT11_MNG_STATUS_LEN		2	/* d11 management status length */
+#define DOT11_MNG_TIMESTAMP_LEN		8	/* d11 management timestamp length */
+
+/* DUR/ID field in assoc resp is 0xc000 | AID */
+#define DOT11_AID_MASK			0x3fff	/* d11 AID mask */
+
+/* Reason Codes */
+#define DOT11_RC_RESERVED		0	/* d11 RC reserved */
+#define DOT11_RC_UNSPECIFIED		1	/* Unspecified reason */
+#define DOT11_RC_AUTH_INVAL		2	/* Previous authentication no longer valid */
+#define DOT11_RC_DEAUTH_LEAVING		3	/* Deauthenticated because sending station
+						 * is leaving (or has left) IBSS or ESS
+						 */
+#define DOT11_RC_INACTIVITY		4	/* Disassociated due to inactivity */
+#define DOT11_RC_BUSY			5	/* Disassociated because AP is unable to handle
+						 * all currently associated stations
+						 */
+#define DOT11_RC_INVAL_CLASS_2		6	/* Class 2 frame received from
+						 * nonauthenticated station
+						 */
+#define DOT11_RC_INVAL_CLASS_3		7	/* Class 3 frame received from
+						 *  nonassociated station
+						 */
+#define DOT11_RC_DISASSOC_LEAVING	8	/* Disassociated because sending station is
+						 * leaving (or has left) BSS
+						 */
+#define DOT11_RC_NOT_AUTH		9	/* Station requesting (re)association is not
+						 * authenticated with responding station
+						 */
+#define DOT11_RC_BAD_PC			10	/* Unacceptable power capability element */
+#define DOT11_RC_BAD_CHANNELS		11	/* Unacceptable supported channels element */
+/* 12 is unused */
+
+/* 32-39 are QSTA specific reasons added in 11e */
+#define DOT11_RC_UNSPECIFIED_QOS	32	/* unspecified QoS-related reason */
+#define DOT11_RC_INSUFFCIENT_BW		33	/* QAP lacks sufficient bandwidth */
+#define DOT11_RC_EXCESSIVE_FRAMES	34	/* excessive number of frames need ack */
+#define DOT11_RC_TX_OUTSIDE_TXOP	35	/* transmitting outside the limits of txop */
+#define DOT11_RC_LEAVING_QBSS		36	/* QSTA is leaving the QBSS (or restting) */
+#define DOT11_RC_BAD_MECHANISM		37	/* does not want to use the mechanism */
+#define DOT11_RC_SETUP_NEEDED		38	/* mechanism needs a setup */
+#define DOT11_RC_TIMEOUT		39	/* timeout */
+
+#define DOT11_RC_MAX			23	/* Reason codes > 23 are reserved */
+
+#define DOT11_RC_TDLS_PEER_UNREACH	25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED	26
+
+/* Status Codes */
+#define DOT11_SC_SUCCESS		0	/* Successful */
+#define DOT11_SC_FAILURE		1	/* Unspecified failure */
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2	/* TDLS wakeup schedule rejected but alternative  */
+					/* schedule provided */
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3	/* TDLS wakeup schedule rejected */
+#define DOT11_SC_TDLS_SEC_DISABLED	5	/* TDLS Security disabled */
+#define DOT11_SC_LIFETIME_REJ		6	/* Unacceptable lifetime */
+#define DOT11_SC_NOT_SAME_BSS		7	/* Not in same BSS */
+#define DOT11_SC_CAP_MISMATCH		10	/* Cannot support all requested
+						 * capabilities in the Capability
+						 * Information field
+						 */
+#define DOT11_SC_REASSOC_FAIL		11	/* Reassociation denied due to inability
+						 * to confirm that association exists
+						 */
+#define DOT11_SC_ASSOC_FAIL		12	/* Association denied due to reason
+						 * outside the scope of this standard
+						 */
+#define DOT11_SC_AUTH_MISMATCH		13	/* Responding station does not support
+						 * the specified authentication
+						 * algorithm
+						 */
+#define DOT11_SC_AUTH_SEQ		14	/* Received an Authentication frame
+						 * with authentication transaction
+						 * sequence number out of expected
+						 * sequence
+						 */
+#define DOT11_SC_AUTH_CHALLENGE_FAIL	15	/* Authentication rejected because of
+						 * challenge failure
+						 */
+#define DOT11_SC_AUTH_TIMEOUT		16	/* Authentication rejected due to timeout
+						 * waiting for next frame in sequence
+						 */
+#define DOT11_SC_ASSOC_BUSY_FAIL	17	/* Association denied because AP is
+						 * unable to handle additional
+						 * associated stations
+						 */
+#define DOT11_SC_ASSOC_RATE_MISMATCH	18	/* Association denied due to requesting
+						 * station not supporting all of the
+						 * data rates in the BSSBasicRateSet
+						 * parameter
+						 */
+#define DOT11_SC_ASSOC_SHORT_REQUIRED	19	/* Association denied due to requesting
+						 * station not supporting the Short
+						 * Preamble option
+						 */
+#define DOT11_SC_ASSOC_PBCC_REQUIRED	20	/* Association denied due to requesting
+						 * station not supporting the PBCC
+						 * Modulation option
+						 */
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED	21	/* Association denied due to requesting
+						 * station not supporting the Channel
+						 * Agility option
+						 */
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED	22	/* Association denied because Spectrum
+							 * Management capability is required.
+							 */
+#define DOT11_SC_ASSOC_BAD_POWER_CAP	23	/* Association denied because the info
+						 * in the Power Cap element is
+						 * unacceptable.
+						 */
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS	24	/* Association denied because the info
+						 * in the Supported Channel element is
+						 * unacceptable
+						 */
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED	25	/* Association denied due to requesting
+							 * station not supporting the Short Slot
+							 * Time option
+							 */
+#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26	/* Association denied because requesting station
+						 * does not support the DSSS-OFDM option
+						 */
+#define DOT11_SC_ASSOC_HT_REQUIRED	27	/* Association denied because the requesting
+						 * station does not support HT features
+						 */
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE	28	/* Association denied due to AP
+						 * being unable to reach the R0 Key Holder
+						 */
+#define DOT11_SC_ASSOC_TRY_LATER	30	/* Association denied temporarily, try again later
+						 */
+#define DOT11_SC_ASSOC_MFP_VIOLATION	31	/* Association denied due to Robust Management
+						 * frame policy violation
+						 */
+
+#define	DOT11_SC_DECLINED		37	/* request declined */
+#define	DOT11_SC_INVALID_PARAMS		38	/* One or more params have invalid values */
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER	42 /* invalid pairwise cipher */
+#define	DOT11_SC_INVALID_AKMP		43	/* Association denied due to invalid AKMP */
+#define DOT11_SC_INVALID_RSNIE_CAP	45	/* invalid RSN IE capabilities */
+#define DOT11_SC_DLS_NOT_ALLOWED	48	/* DLS is not allowed in the BSS by policy */
+#define	DOT11_SC_INVALID_PMKID		53	/* Association denied due to invalid PMKID */
+#define	DOT11_SC_INVALID_MDID		54	/* Association denied due to invalid MDID */
+#define	DOT11_SC_INVALID_FTIE		55	/* Association denied due to invalid FTIE */
+
+#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED	59	/* ad proto not supported */
+#define DOT11_SC_NO_OUTSTAND_REQ			60	/* no outstanding req */
+#define DOT11_SC_RSP_NOT_RX_FROM_SERVER		61	/* no response from server */
+#define DOT11_SC_TIMEOUT					62	/* timeout */
+#define DOT11_SC_QUERY_RSP_TOO_LARGE		63	/* query rsp too large */
+#define DOT11_SC_SERVER_UNREACHABLE			65	/* server unreachable */
+
+#define DOT11_SC_UNEXP_MSG			70	/* Unexpected message */
+#define DOT11_SC_INVALID_SNONCE		71	/* Invalid SNonce */
+#define DOT11_SC_INVALID_RSNIE		72	/* Invalid contents of RSNIE */
+#define DOT11_SC_ASSOC_VHT_REQUIRED	104	/* Association denied because the requesting
+						 * station does not support VHT features.
+						 */
+
+#define DOT11_SC_TRANSMIT_FAILURE	79	/* transmission failure */
+
+/* Info Elts, length of INFORMATION portion of Info Elts */
+#define DOT11_MNG_DS_PARAM_LEN			1	/* d11 management DS parameter length */
+#define DOT11_MNG_IBSS_PARAM_LEN		2	/* d11 management IBSS parameter length */
+
+/* TIM Info element has 3 bytes fixed info in INFORMATION field,
+ * followed by 1 to 251 bytes of Partial Virtual Bitmap
+ */
+#define DOT11_MNG_TIM_FIXED_LEN			3	/* d11 management TIM fixed length */
+#define DOT11_MNG_TIM_DTIM_COUNT		0	/* d11 management DTIM count */
+#define DOT11_MNG_TIM_DTIM_PERIOD		1	/* d11 management DTIM period */
+#define DOT11_MNG_TIM_BITMAP_CTL		2	/* d11 management TIM BITMAP control  */
+#define DOT11_MNG_TIM_PVB			3	/* d11 management TIM PVB */
+
+/* TLV defines */
+#define TLV_TAG_OFF		0	/* tag offset */
+#define TLV_LEN_OFF		1	/* length offset */
+#define TLV_HDR_LEN		2	/* header length */
+#define TLV_BODY_OFF		2	/* body offset */
+#define TLV_BODY_LEN_MAX	255	/* max body length */
+
+/* Management Frame Information Element IDs */
+#define DOT11_MNG_SSID_ID			0	/* d11 management SSID id */
+#define DOT11_MNG_RATES_ID			1	/* d11 management rates id */
+#define DOT11_MNG_FH_PARMS_ID			2	/* d11 management FH parameter id */
+#define DOT11_MNG_DS_PARMS_ID			3	/* d11 management DS parameter id */
+#define DOT11_MNG_CF_PARMS_ID			4	/* d11 management CF parameter id */
+#define DOT11_MNG_TIM_ID			5	/* d11 management TIM id */
+#define DOT11_MNG_IBSS_PARMS_ID			6	/* d11 management IBSS parameter id */
+#define DOT11_MNG_COUNTRY_ID			7	/* d11 management country id */
+#define DOT11_MNG_HOPPING_PARMS_ID		8	/* d11 management hopping parameter id */
+#define DOT11_MNG_HOPPING_TABLE_ID		9	/* d11 management hopping table id */
+#define DOT11_MNG_REQUEST_ID			10	/* d11 management request id */
+#define DOT11_MNG_QBSS_LOAD_ID 			11	/* d11 management QBSS Load id */
+#define DOT11_MNG_EDCA_PARAM_ID			12	/* 11E EDCA Parameter id */
+#define DOT11_MNG_TSPEC_ID			13	/* d11 management TSPEC id */
+#define DOT11_MNG_TCLAS_ID			14	/* d11 management TCLAS id */
+#define DOT11_MNG_CHALLENGE_ID			16	/* d11 management chanllenge id */
+#define DOT11_MNG_PWR_CONSTRAINT_ID		32	/* 11H PowerConstraint */
+#define DOT11_MNG_PWR_CAP_ID			33	/* 11H PowerCapability */
+#define DOT11_MNG_TPC_REQUEST_ID 		34	/* 11H TPC Request */
+#define DOT11_MNG_TPC_REPORT_ID			35	/* 11H TPC Report */
+#define DOT11_MNG_SUPP_CHANNELS_ID		36	/* 11H Supported Channels */
+#define DOT11_MNG_CHANNEL_SWITCH_ID		37	/* 11H ChannelSwitch Announcement */
+#define DOT11_MNG_MEASURE_REQUEST_ID		38	/* 11H MeasurementRequest */
+#define DOT11_MNG_MEASURE_REPORT_ID		39	/* 11H MeasurementReport */
+#define DOT11_MNG_QUIET_ID			40	/* 11H Quiet */
+#define DOT11_MNG_IBSS_DFS_ID			41	/* 11H IBSS_DFS */
+#define DOT11_MNG_ERP_ID			42	/* d11 management ERP id */
+#define DOT11_MNG_TS_DELAY_ID			43	/* d11 management TS Delay id */
+#define DOT11_MNG_TCLAS_PROC_ID			44	/* d11 management TCLAS processing id */
+#define	DOT11_MNG_HT_CAP			45	/* d11 mgmt HT cap id */
+#define DOT11_MNG_QOS_CAP_ID			46	/* 11E QoS Capability id */
+#define DOT11_MNG_NONERP_ID			47	/* d11 management NON-ERP id */
+#define DOT11_MNG_RSN_ID			48	/* d11 management RSN id */
+#define DOT11_MNG_EXT_RATES_ID			50	/* d11 management ext. rates id */
+#define DOT11_MNG_AP_CHREP_ID			51	/* 11k AP Channel report id */
+#define DOT11_MNG_NEIGHBOR_REP_ID		52	/* 11k & 11v Neighbor report id */
+#define DOT11_MNG_RCPI_ID			53	/* 11k RCPI */
+#define DOT11_MNG_MDIE_ID			54	/* 11r Mobility domain id */
+#define DOT11_MNG_FTIE_ID			55	/* 11r Fast Bss Transition id */
+#define DOT11_MNG_FT_TI_ID			56	/* 11r Timeout Interval id */
+#define DOT11_MNG_RDE_ID			57	/* 11r RIC Data Element id */
+#define	DOT11_MNG_REGCLASS_ID			59	/* d11 management regulatory class id */
+#define DOT11_MNG_EXT_CSA_ID			60	/* d11 Extended CSA */
+#define	DOT11_MNG_HT_ADD			61	/* d11 mgmt additional HT info */
+#define	DOT11_MNG_EXT_CHANNEL_OFFSET		62	/* d11 mgmt ext channel offset */
+#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID	63	/* 11k bss average access delay */
+#define DOT11_MNG_ANTENNA_ID			64	/* 11k antenna id */
+#define DOT11_MNG_RSNI_ID			65	/* 11k RSNI id */
+#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID	66	/* 11k measurement pilot tx info id */
+#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID	67	/* 11k bss aval admission cap id */
+#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID	68	/* 11k bss AC access delay id */
+#define DOT11_MNG_WAPI_ID			68	/* d11 management WAPI id */
+#define DOT11_MNG_TIME_ADVERTISE_ID	69	/* 11p time advertisement */
+#define DOT11_MNG_RRM_CAP_ID		70	/* 11k radio measurement capability */
+#define DOT11_MNG_MULTIPLE_BSSID_ID		71	/* 11k multiple BSSID id */
+#define	DOT11_MNG_HT_BSS_COEXINFO_ID		72	/* d11 mgmt OBSS Coexistence INFO */
+#define	DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID	73	/* d11 mgmt OBSS Intolerant Channel list */
+#define	DOT11_MNG_HT_OBSS_ID			74	/* d11 mgmt OBSS HT info */
+#define DOT11_MNG_MMIE_ID			76	/* d11 mgmt MIC IE */
+#define DOT11_MNG_FMS_DESCR_ID			86	/* 11v FMS descriptor */
+#define DOT11_MNG_FMS_REQ_ID			87	/* 11v FMS request id */
+#define DOT11_MNG_FMS_RESP_ID			88	/* 11v FMS response id */
+#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID	90	/* 11v bss max idle id */
+#define DOT11_MNG_TFS_REQUEST_ID		91	/* 11v tfs request id */
+#define DOT11_MNG_TFS_RESPONSE_ID		92	/* 11v tfs response id */
+#define DOT11_MNG_WNM_SLEEP_MODE_ID		93	/* 11v wnm-sleep mode id */
+#define DOT11_MNG_TIMBC_REQ_ID			94	/* 11v TIM broadcast request id */
+#define DOT11_MNG_TIMBC_RESP_ID			95	/* 11v TIM broadcast response id */
+#define DOT11_MNG_CHANNEL_USAGE			97	/* 11v channel usage */
+#define DOT11_MNG_TIME_ZONE_ID			98	/* 11v time zone */
+#define DOT11_MNG_DMS_REQUEST_ID		99	/* 11v dms request id */
+#define DOT11_MNG_DMS_RESPONSE_ID		100	/* 11v dms response id */
+#define DOT11_MNG_LINK_IDENTIFIER_ID		101	/* 11z TDLS Link Identifier IE */
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID		102	/* 11z TDLS Wakeup Schedule IE */
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID	104	/* 11z TDLS Channel Switch Timing IE */
+#define DOT11_MNG_PTI_CONTROL_ID		105	/* 11z TDLS PTI Control IE */
+#define DOT11_MNG_PU_BUFFER_STATUS_ID	106	/* 11z TDLS PU Buffer Status IE */
+#define DOT11_MNG_INTERWORKING_ID		107	/* 11u interworking */
+#define DOT11_MNG_ADVERTISEMENT_ID		108	/* 11u advertisement protocol */
+#define DOT11_MNG_EXP_BW_REQ_ID			109	/* 11u expedited bandwith request */
+#define DOT11_MNG_QOS_MAP_ID			110	/* 11u QoS map set */
+#define DOT11_MNG_ROAM_CONSORT_ID		111	/* 11u roaming consortium */
+#define DOT11_MNG_EMERGCY_ALERT_ID		112	/* 11u emergency alert identifier */
+#define	DOT11_MNG_EXT_CAP_ID			127	/* d11 mgmt ext capability */
+#define	DOT11_MNG_VHT_CAP_ID			191	/* d11 mgmt VHT cap id */
+#define	DOT11_MNG_VHT_OPERATION_ID		192	/* d11 mgmt VHT op id */
+#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID		194	/* Wide BW Channel Switch IE */
+#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID	195	/* VHT transmit Power Envelope IE */
+#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID		196	/* Channel Switch Wrapper IE */
+#define DOT11_MNG_AID_ID					197	/* Association ID  IE */
+#define	DOT11_MNG_OPER_MODE_NOTIF_ID	199	/* d11 mgmt VHT oper mode notif */
+
+
+#define DOT11_MNG_WPA_ID			221	/* d11 management WPA id */
+#define DOT11_MNG_PROPR_ID			221
+/* should start using this one instead of above two */
+#define DOT11_MNG_VS_ID				221	/* d11 management Vendor Specific IE */
+
+/* Rate Defines */
+
+/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
+ * Encoding is the rate in 500kbps units, rouding up for fractional values.
+ * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values.
+ * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates.
+ * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27},
+ * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices.
+ */
+
+#define DOT11_RATE_1M   2       /* 1  Mbps in 500kbps units */
+#define DOT11_RATE_2M   4       /* 2  Mbps in 500kbps units */
+#define DOT11_RATE_5M5  11      /* 5.5 Mbps in 500kbps units */
+#define DOT11_RATE_11M  22      /* 11 Mbps in 500kbps units */
+#define DOT11_RATE_6M   12      /* 6  Mbps in 500kbps units */
+#define DOT11_RATE_9M   18      /* 9  Mbps in 500kbps units */
+#define DOT11_RATE_12M  24      /* 12 Mbps in 500kbps units */
+#define DOT11_RATE_18M  36      /* 18 Mbps in 500kbps units */
+#define DOT11_RATE_24M  48      /* 24 Mbps in 500kbps units */
+#define DOT11_RATE_36M  72      /* 36 Mbps in 500kbps units */
+#define DOT11_RATE_48M  96      /* 48 Mbps in 500kbps units */
+#define DOT11_RATE_54M  108     /* 54 Mbps in 500kbps units */
+#define DOT11_RATE_MAX  108     /* highest rate (54 Mbps) in 500kbps units */
+
+/* Supported Rates and Extended Supported Rates IEs
+ * The supported rates octets are defined a the MSB indicatin a Basic Rate
+ * and bits 0-6 as the rate value
+ */
+#define DOT11_RATE_BASIC                0x80 /* flag for a Basic Rate */
+#define DOT11_RATE_MASK                 0x7F /* mask for numeric part of rate */
+
+/* BSS Membership Selector parameters
+ * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3
+ * These selector values are advertised in Supported Rates and Extended Supported Rates IEs
+ * in the supported rates list with the Basic rate bit set.
+ * Constants below include the basic bit.
+ */
+#define DOT11_BSS_MEMBERSHIP_HT         0xFF  /* Basic 0x80 + 127, HT Required to join */
+#define DOT11_BSS_MEMBERSHIP_VHT        0xFE  /* Basic 0x80 + 126, VHT Required to join */
+
+/* ERP info element bit values */
+#define DOT11_MNG_ERP_LEN			1	/* ERP is currently 1 byte long */
+#define DOT11_MNG_NONERP_PRESENT		0x01	/* NonERP (802.11b) STAs are present
+							 *in the BSS
+							 */
+#define DOT11_MNG_USE_PROTECTION		0x02	/* Use protection mechanisms for
+							 *ERP-OFDM frames
+							 */
+#define DOT11_MNG_BARKER_PREAMBLE		0x04	/* Short Preambles: 0 == allowed,
+							 * 1 == not allowed
+							 */
+/* TS Delay element offset & size */
+#define DOT11_MGN_TS_DELAY_LEN		4	/* length of TS DELAY IE */
+#define TS_DELAY_FIELD_SIZE			4	/* TS DELAY field size */
+
+/* Capability Information Field */
+#define DOT11_CAP_ESS				0x0001	/* d11 cap. ESS */
+#define DOT11_CAP_IBSS				0x0002	/* d11 cap. IBSS */
+#define DOT11_CAP_POLLABLE			0x0004	/* d11 cap. pollable */
+#define DOT11_CAP_POLL_RQ			0x0008	/* d11 cap. poll request */
+#define DOT11_CAP_PRIVACY			0x0010	/* d11 cap. privacy */
+#define DOT11_CAP_SHORT				0x0020	/* d11 cap. short */
+#define DOT11_CAP_PBCC				0x0040	/* d11 cap. PBCC */
+#define DOT11_CAP_AGILITY			0x0080	/* d11 cap. agility */
+#define DOT11_CAP_SPECTRUM			0x0100	/* d11 cap. spectrum */
+#define DOT11_CAP_QOS				0x0200	/* d11 cap. qos */
+#define DOT11_CAP_SHORTSLOT			0x0400	/* d11 cap. shortslot */
+#define DOT11_CAP_APSD				0x0800	/* d11 cap. apsd */
+#define DOT11_CAP_RRM				0x1000	/* d11 cap. 11k radio measurement */
+#define DOT11_CAP_CCK_OFDM			0x2000	/* d11 cap. CCK/OFDM */
+#define DOT11_CAP_DELAY_BA			0x4000	/* d11 cap. delayed block ack */
+#define DOT11_CAP_IMMEDIATE_BA			0x8000	/* d11 cap. immediate block ack */
+
+/* Extended capabilities IE bitfields */
+/* 20/40 BSS Coexistence Management support bit position */
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT		0
+/* Extended Channel Switching support bit position */
+#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING	2
+/* scheduled PSMP support bit position */
+#define DOT11_EXT_CAP_SPSMP			6
+/*  Flexible Multicast Service */
+#define DOT11_EXT_CAP_FMS			11
+/* proxy ARP service support bit position */
+#define DOT11_EXT_CAP_PROXY_ARP			12
+/* Traffic Filter Service */
+#define DOT11_EXT_CAP_TFS			16
+/* WNM-Sleep Mode */
+#define DOT11_EXT_CAP_WNM_SLEEP			17
+/* TIM Broadcast service */
+#define DOT11_EXT_CAP_TIMBC			18
+/* BSS Transition Management support bit position */
+#define DOT11_EXT_CAP_BSSTRANS_MGMT		19
+/* Direct Multicast Service */
+#define DOT11_EXT_CAP_DMS			26
+/* Interworking support bit position */
+#define DOT11_EXT_CAP_IW			31
+/* QoS map support bit position */
+#define DOT11_EXT_CAP_QOS_MAP		32
+/* service Interval granularity bit position and mask */
+#define DOT11_EXT_CAP_SI			41
+#define DOT11_EXT_CAP_SI_MASK			0x0E
+/* WNM notification */
+#define DOT11_EXT_CAP_WNM_NOTIF			46
+/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
+#define DOT11_EXT_CAP_OPER_MODE_NOTIF		62
+
+/* VHT Operating mode bit fields -  (11ac D3.0 - 8.4.1.50) */
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3
+#define DOT11_OPER_MODE_RXNSS_SHIFT 4
+#define DOT11_OPER_MODE_RXNSS_MASK 0x70
+#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7
+#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80
+
+#define DOT11_OPER_MODE(type, nss, chanw) (\
+	((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+		 DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+	(((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+	((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+		 DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \
+	(((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\
+		>> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT)
+#define DOT11_OPER_MODE_RXNSS(mode) \
+	((((mode) & DOT11_OPER_MODE_RXNSS_MASK)		\
+		>> DOT11_OPER_MODE_RXNSS_SHIFT) + 1)
+#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \
+	(((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\
+		>> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT)
+
+#define DOT11_OPER_MODE_20MHZ 0
+#define DOT11_OPER_MODE_40MHZ 1
+#define DOT11_OPER_MODE_80MHZ 2
+#define DOT11_OPER_MODE_160MHZ 3
+#define DOT11_OPER_MODE_8080MHZ 3
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_160MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ)
+
+/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */
+BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie {
+	uint8 mode;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t;
+
+#define DOT11_OPER_MODE_NOTIF_IE_LEN 1
+
+/* Extended Capability Information Field */
+#define DOT11_OBSS_COEX_MNG_SUPPORT	0x01	/* 20/40 BSS Coexistence Management support */
+
+/*
+ * Action Frame Constants
+ */
+#define DOT11_ACTION_HDR_LEN		2	/* action frame category + action field */
+#define DOT11_ACTION_CAT_OFF		0	/* category offset */
+#define DOT11_ACTION_ACT_OFF		1	/* action offset */
+
+/* Action Category field (sec 8.4.1.11) */
+#define DOT11_ACTION_CAT_ERR_MASK	0x80	/* category error mask */
+#define DOT11_ACTION_CAT_MASK		0x7F	/* category mask */
+#define DOT11_ACTION_CAT_SPECT_MNG	0	/* category spectrum management */
+#define DOT11_ACTION_CAT_QOS		1	/* category QoS */
+#define DOT11_ACTION_CAT_DLS		2	/* category DLS */
+#define DOT11_ACTION_CAT_BLOCKACK	3	/* category block ack */
+#define DOT11_ACTION_CAT_PUBLIC		4	/* category public */
+#define DOT11_ACTION_CAT_RRM		5	/* category radio measurements */
+#define DOT11_ACTION_CAT_FBT	6	/* category fast bss transition */
+#define DOT11_ACTION_CAT_HT		7	/* category for HT */
+#define	DOT11_ACTION_CAT_SA_QUERY	8	/* security association query */
+#define	DOT11_ACTION_CAT_PDPA		9	/* protected dual of public action */
+#define DOT11_ACTION_CAT_WNM		10	/* category for WNM */
+#define DOT11_ACTION_CAT_UWNM		11	/* category for Unprotected WNM */
+#define DOT11_ACTION_NOTIFICATION	17
+#define DOT11_ACTION_CAT_VHT		21	/* VHT action */
+#define DOT11_ACTION_CAT_VSP		126	/* protected vendor specific */
+#define DOT11_ACTION_CAT_VS		127	/* category Vendor Specific */
+
+/* Spectrum Management Action IDs (sec 7.4.1) */
+#define DOT11_SM_ACTION_M_REQ		0	/* d11 action measurement request */
+#define DOT11_SM_ACTION_M_REP		1	/* d11 action measurement response */
+#define DOT11_SM_ACTION_TPC_REQ		2	/* d11 action TPC request */
+#define DOT11_SM_ACTION_TPC_REP		3	/* d11 action TPC response */
+#define DOT11_SM_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_SM_ACTION_EXT_CSA		5	/* d11 extened CSA for 11n */
+
+/* QoS action ids */
+#define DOT11_QOS_ACTION_ADDTS_REQ	0	/* d11 action ADDTS request */
+#define DOT11_QOS_ACTION_ADDTS_RESP	1	/* d11 action ADDTS response */
+#define DOT11_QOS_ACTION_DELTS		2	/* d11 action DELTS */
+#define DOT11_QOS_ACTION_SCHEDULE	3	/* d11 action schedule */
+#define DOT11_QOS_ACTION_QOS_MAP	4	/* d11 action QOS map */
+
+/* HT action ids */
+#define DOT11_ACTION_ID_HT_CH_WIDTH	0	/* notify channel width action id */
+#define DOT11_ACTION_ID_HT_MIMO_PS	1	/* mimo ps action id */
+
+/* Public action ids */
+#define DOT11_PUB_ACTION_BSS_COEX_MNG	0	/* 20/40 Coexistence Management action id */
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_PUB_ACTION_GAS_CB_REQ	12	/* GAS Comeback Request */
+
+/* Block Ack action types */
+#define DOT11_BA_ACTION_ADDBA_REQ	0	/* ADDBA Req action frame type */
+#define DOT11_BA_ACTION_ADDBA_RESP	1	/* ADDBA Resp action frame type */
+#define DOT11_BA_ACTION_DELBA		2	/* DELBA action frame type */
+
+/* ADDBA action parameters */
+#define DOT11_ADDBA_PARAM_AMSDU_SUP	0x0001	/* AMSDU supported under BA */
+#define DOT11_ADDBA_PARAM_POLICY_MASK	0x0002	/* policy mask(ack vs delayed) */
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT	1	/* policy shift */
+#define DOT11_ADDBA_PARAM_TID_MASK	0x003c	/* tid mask */
+#define DOT11_ADDBA_PARAM_TID_SHIFT	2	/* tid shift */
+#define DOT11_ADDBA_PARAM_BSIZE_MASK	0xffc0	/* buffer size mask */
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT	6	/* buffer size shift */
+
+#define DOT11_ADDBA_POLICY_DELAYED	0	/* delayed BA policy */
+#define DOT11_ADDBA_POLICY_IMMEDIATE	1	/* immediate BA policy */
+
+/* Fast Transition action types */
+#define DOT11_FT_ACTION_FT_RESERVED		0
+#define DOT11_FT_ACTION_FT_REQ			1	/* FBT request - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_RES			2	/* FBT response - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_CON			3	/* FBT confirm - for OTDS with RRP */
+#define DOT11_FT_ACTION_FT_ACK			4	/* FBT ack */
+
+/* DLS action types */
+#define DOT11_DLS_ACTION_REQ			0	/* DLS Request */
+#define DOT11_DLS_ACTION_RESP			1	/* DLS Response */
+#define DOT11_DLS_ACTION_TD			2	/* DLS Teardown */
+
+/* Wireless Network Management (WNM) action types */
+#define DOT11_WNM_ACTION_EVENT_REQ		0
+#define DOT11_WNM_ACTION_EVENT_REP		1
+#define DOT11_WNM_ACTION_DIAG_REQ		2
+#define DOT11_WNM_ACTION_DIAG_REP		3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ		4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP		5
+#define DOT11_WNM_ACTION_BSSTRANS_QUERY		6
+#define DOT11_WNM_ACTION_BSSTRANS_REQ		7
+#define DOT11_WNM_ACTION_BSSTRANS_RESP		8
+#define DOT11_WNM_ACTION_FMS_REQ		9
+#define DOT11_WNM_ACTION_FMS_RESP		10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ	11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP	12
+#define DOT11_WNM_ACTION_TFS_REQ		13
+#define DOT11_WNM_ACTION_TFS_RESP		14
+#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ		15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ		16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP		17
+#define DOT11_WNM_ACTION_TIMBC_REQ		18
+#define DOT11_WNM_ACTION_TIMBC_RESP		19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD	20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ		21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP	22
+#define DOT11_WNM_ACTION_DMS_REQ		23
+#define DOT11_WNM_ACTION_DMS_RESP		24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ	25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ		26
+#define DOT11_WNM_ACTION_NOTFCTN_RESP		27
+#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP	28
+
+/* Unprotected Wireless Network Management (WNM) action types */
+#define DOT11_UWNM_ACTION_TIM			0
+#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT	1
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */
+#define DOT11_VHT_ACTION_CBF				0	/* Compressed Beamforming */
+#define DOT11_VHT_ACTION_GID_MGMT			1	/* Group ID Management */
+#define DOT11_VHT_ACTION_OPER_MODE_NOTIF	2	/* Operating mode notif'n */
+
+/** DLS Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint16 cap;				/* capability */
+	uint16 timeout;			/* timeout value */
+	uint8 data[1];				/* IE:support rate, extend support rate, HT cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18	/* Fixed length */
+
+/** DLS response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	uint16 status;				/* status code field */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint8 data[1];				/* optional: capability, rate ... */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16	/* Fixed length */
+
+
+/* ************* 802.11v related definitions. ************* */
+
+/** BSS Management Transition Query frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_query (6) */
+	uint8 token;			/* dialog token */
+	uint8 reason;			/* transition query reason */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
+#define DOT11_BSSTRANS_QUERY_LEN 4	/* Fixed length */
+
+/** BSS Management Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_req (7) */
+	uint8 token;			/* dialog token */
+	uint8 reqmode;			/* transition request mode */
+	uint16 disassoc_tmr;		/* disassociation timer */
+	uint8 validity_intrvl;		/* validity interval */
+	uint8 data[1];			/* optional: BSS term duration, ... */
+						/* ...session info URL, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
+#define DOT11_BSSTRANS_REQ_LEN 7	/* Fixed length */
+
+/* BSS Mgmt Transition Request Mode Field - 802.11v */
+#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL		0x01
+#define DOT11_BSSTRANS_REQMODE_ABRIDGED			0x02
+#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT	0x04
+#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL		0x08
+#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT	0x10
+
+/** BSS Management transition response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_resp (8) */
+	uint8 token;			/* dialog token */
+	uint8 status;			/* transition status */
+	uint8 term_delay;		/* validity interval */
+	uint8 data[1];			/* optional: BSSID target, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t;
+#define DOT11_BSSTRANS_RESP_LEN 5	/* Fixed length */
+
+/* BSS Mgmt Transition Response Status Field */
+#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT			0
+#define DOT11_BSSTRANS_RESP_STATUS_REJECT			1
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN		2
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP		3
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED		4
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ		5
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED	6
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS		7
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS		8
+
+
+/** BSS Max Idle Period element */
+BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
+	uint8 id;				/* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
+	uint8 len;
+	uint16 max_idle_period;			/* in unit of 1000 TUs */
+	uint8 idle_opt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t;
+#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN	3	/* bss max idle period IE size */
+#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED	1	/* BSS max idle option */
+
+/** TIM Broadcast request element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie {
+	uint8 id;				/* 94, DOT11_MNG_TIMBC_REQ_ID */
+	uint8 len;
+	uint8 interval;				/* in unit of beacon interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t;
+#define DOT11_TIMBC_REQ_IE_LEN		1	/* Fixed length */
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* TIM broadcast request element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req dot11_timbc_req_t;
+#define DOT11_TIMBC_REQ_LEN		3	/* Fixed length */
+
+/** TIM Broadcast response element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie {
+	uint8 id;				/* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */
+	uint8 len;
+	uint8 status;				/* status of add request */
+	uint8 interval;				/* in unit of beacon interval */
+	int32 offset;				/* in unit of ms */
+	uint16 high_rate;			/* in unit of 0.5 Mb/s */
+	uint16 low_rate;			/* in unit of 0.5 Mb/s */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t;
+#define DOT11_TIMBC_DENY_RESP_IE_LEN	1	/* Deny. Fixed length */
+#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN	10	/* Accept. Fixed length */
+
+#define DOT11_TIMBC_STATUS_ACCEPT		0
+#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP	1
+#define DOT11_TIMBC_STATUS_DENY			2
+#define DOT11_TIMBC_STATUS_OVERRIDDEN		3
+#define DOT11_TIMBC_STATUS_RESERVED		4
+
+/** TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */
+	uint8 token;			/* dialog token */
+	uint8 data[1];			/* TIM broadcast response element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp dot11_timbc_resp_t;
+#define DOT11_TIMBC_RESP_LEN	3	/* Fixed length */
+
+/** TIM element */
+BWL_PRE_PACKED_STRUCT struct dot11_tim_ie {
+	uint8 id;			/* 5, DOT11_MNG_TIM_ID	 */
+	uint8 len;			/* 4 - 255 */
+	uint8 dtim_count;		/* DTIM decrementing counter */
+	uint8 dtim_period;		/* DTIM period */
+	uint8 bitmap_control;		/* AID 0 + bitmap offset */
+	uint8 pvb[1];			/* Partial Virtual Bitmap, variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tim_ie dot11_tim_ie_t;
+#define DOT11_TIM_IE_FIXED_LEN	3	/* Fixed length, without id and len */
+#define DOT11_TIM_IE_FIXED_TOTAL_LEN	5	/* Fixed length, with id and len */
+
+/** TIM Broadcast frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc {
+	uint8 category;			/* category of action frame (11) */
+	uint8 action;			/* action: TIM (0) */
+	uint8 check_beacon;		/* need to check-beacon */
+	uint8 tsf[8];			/* Time Synchronization Function */
+	dot11_tim_ie_t tim_ie;		/* TIM element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc dot11_timbc_t;
+#define DOT11_TIMBC_HDR_LEN	(sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t))
+#define DOT11_TIMBC_FIXED_LEN	(sizeof(dot11_timbc_t) - 1)	/* Fixed length */
+#define DOT11_TIMBC_LEN			11	/* Fixed length */
+
+/** TCLAS frame classifier type */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr {
+	uint8 type;
+	uint8 mask;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t;
+#define DOT11_TCLAS_FC_HDR_LEN		2	/* Fixed length */
+
+#define DOT11_TCLAS_MASK_0		0x1
+#define DOT11_TCLAS_MASK_1		0x2
+#define DOT11_TCLAS_MASK_2		0x4
+#define DOT11_TCLAS_MASK_3		0x8
+#define DOT11_TCLAS_MASK_4		0x10
+#define DOT11_TCLAS_MASK_5		0x20
+#define DOT11_TCLAS_MASK_6		0x40
+#define DOT11_TCLAS_MASK_7		0x80
+
+#define DOT11_TCLAS_FC_0_ETH		0
+#define DOT11_TCLAS_FC_1_IP		1
+#define DOT11_TCLAS_FC_2_8021Q		2
+#define DOT11_TCLAS_FC_3_OFFSET		3
+#define DOT11_TCLAS_FC_4_IP_HIGHER	4
+#define DOT11_TCLAS_FC_5_8021D		5
+
+/** TCLAS frame classifier type 0 parameters for Ethernet */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth {
+	uint8 type;
+	uint8 mask;
+	uint8 sa[ETHER_ADDR_LEN];
+	uint8 da[ETHER_ADDR_LEN];
+	uint16 eth_type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t;
+#define DOT11_TCLAS_FC_0_ETH_LEN	16
+
+/** TCLAS frame classifier type 1 parameters for IPV4 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint32 src_ip;
+	uint32 dst_ip;
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 protocol;
+	uint8 reserved;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t;
+#define DOT11_TCLAS_FC_1_IPV4_LEN	18
+
+/** TCLAS frame classifier type 2 parameters for 802.1Q */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q {
+	uint8 type;
+	uint8 mask;
+	uint16 tci;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t;
+#define DOT11_TCLAS_FC_2_8021Q_LEN	4
+
+/** TCLAS frame classifier type 3 parameters for filter offset */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter {
+	uint8 type;
+	uint8 mask;
+	uint16 offset;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t;
+#define DOT11_TCLAS_FC_3_FILTER_LEN	4
+
+/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t;
+#define DOT11_TCLAS_FC_4_IPV4_LEN	DOT11_TCLAS_FC_1_IPV4_LEN
+
+/** TCLAS frame classifier type 4 parameters for IPV6 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint8 saddr[16];
+	uint8 daddr[16];
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 nexthdr;
+	uint8 flow_lbl[3];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t;
+#define DOT11_TCLAS_FC_4_IPV6_LEN	44
+
+/** TCLAS frame classifier type 5 parameters for 802.1D */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d {
+	uint8 type;
+	uint8 mask;
+	uint8 pcp;
+	uint8 cfi;
+	uint16 vid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t;
+#define DOT11_TCLAS_FC_5_8021D_LEN	6
+
+/** TCLAS frame classifier type parameters */
+BWL_PRE_PACKED_STRUCT union dot11_tclas_fc {
+	uint8 data[1];
+	dot11_tclas_fc_hdr_t hdr;
+	dot11_tclas_fc_0_eth_t t0_eth;
+	dot11_tclas_fc_1_ipv4_t	t1_ipv4;
+	dot11_tclas_fc_2_8021q_t t2_8021q;
+	dot11_tclas_fc_3_filter_t t3_filter;
+	dot11_tclas_fc_4_ipv4_t	t4_ipv4;
+	dot11_tclas_fc_4_ipv6_t	t4_ipv6;
+	dot11_tclas_fc_5_8021d_t t5_8021d;
+} BWL_POST_PACKED_STRUCT;
+typedef union dot11_tclas_fc dot11_tclas_fc_t;
+
+#define DOT11_TCLAS_FC_MIN_LEN		4	/* Classifier Type 2 has the min size */
+#define DOT11_TCLAS_FC_MAX_LEN		254
+
+/** TCLAS element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie {
+	uint8 id;				/* 14, DOT11_MNG_TCLAS_ID */
+	uint8 len;
+	uint8 user_priority;
+	dot11_tclas_fc_t fc;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_ie dot11_tclas_ie_t;
+#define DOT11_TCLAS_IE_LEN		3	/* Fixed length, include id and len */
+
+/** TCLAS processing element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
+	uint8 id;				/* 44, DOT11_MNG_TCLAS_PROC_ID */
+	uint8 len;
+	uint8 process;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
+#define DOT11_TCLAS_PROC_IE_LEN		3	/* Fixed length, include id and len */
+
+#define DOT11_TCLAS_PROC_MATCHALL	0	/* All high level element need to match */
+#define DOT11_TCLAS_PROC_MATCHONE	1	/* One high level element need to match */
+#define DOT11_TCLAS_PROC_NONMATCH	2	/* Non match to any high level element */
+
+
+/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
+#define DOT11_TSPEC_IE_LEN		57	/* Fixed length */
+
+/** TFS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie {
+	uint8 id;				/* 91, DOT11_MNG_TFS_REQUEST_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 actcode;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t;
+#define DOT11_TFS_REQ_IE_LEN		2	/* Fixed length, without id and len */
+
+/** TFS request action codes (bitfield) */
+#define DOT11_TFS_ACTCODE_DELETE	1
+#define DOT11_TFS_ACTCODE_NOTIFY	2
+
+/** TFS request subelement IDs */
+#define DOT11_TFS_REQ_TFS_SE_ID		1
+#define DOT11_TFS_REQ_VENDOR_SE_ID	221
+
+/** TFS subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 data[1];				/* TCLAS element(s) + optional TCLAS proc */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_se dot11_tfs_se_t;
+
+
+/** TFS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
+	uint8 id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
+#define DOT11_TFS_RESP_IE_LEN		1	/* Fixed length, without id and len */
+
+/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */
+#define DOT11_TFS_RESP_TFS_STATUS_SE_ID		1
+#define DOT11_TFS_RESP_TFS_SE_ID		2
+#define DOT11_TFS_RESP_VENDOR_SE_ID		221
+
+/** TFS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se {
+	uint8 sub_id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 resp_st;
+	uint8 data[1];				/* Potential dot11_tfs_se_t included */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_status_se dot11_tfs_status_se_t;
+#define DOT11_TFS_STATUS_SE_LEN			1	/* Fixed length, without id and len */
+
+/* Following Definition should be merged to FMS_TFS macro below */
+/* TFS Response status code. Identical to FMS Element status, without N/A  */
+#define DOT11_TFS_STATUS_ACCEPT			0
+#define DOT11_TFS_STATUS_DENY_FORMAT		1
+#define DOT11_TFS_STATUS_DENY_RESOURCE		2
+#define DOT11_TFS_STATUS_DENY_POLICY		4
+#define DOT11_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_TFS_STATUS_ALTPREF_POLICY		7
+#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP	14
+
+/* FMS Element Status and TFS Response Status Definition */
+#define DOT11_FMS_TFS_STATUS_ACCEPT		0
+#define DOT11_FMS_TFS_STATUS_DENY_FORMAT	1
+#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE	2
+#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI	3
+#define DOT11_FMS_TFS_STATUS_DENY_POLICY	4
+#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI	6
+#define DOT11_FMS_TFS_STATUS_ALT_POLICY		7
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI	8
+#define DOT11_FMS_TFS_STATUS_ALT_MCRATE		9
+#define DOT11_FMS_TFS_STATUS_TERM_POLICY	10
+#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE	11
+#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO	12
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI	13
+#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP	14
+
+/** TFS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (13) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req dot11_tfs_req_t;
+#define DOT11_TFS_REQ_LEN		3	/* Fixed length */
+
+/** TFS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (14) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp dot11_tfs_resp_t;
+#define DOT11_TFS_RESP_LEN		3	/* Fixed length */
+
+/** TFS Management Notify frame request header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify request (15) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t;
+#define DOT11_TFS_NOTIFY_REQ_LEN	3	/* Fixed length */
+
+/** TFS Management Notify frame response header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify response (28) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
+#define DOT11_TFS_NOTIFY_RESP_LEN	3	/* Fixed length */
+
+
+/** WNM-Sleep Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (16) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t;
+#define DOT11_WNM_SLEEP_REQ_LEN		3	/* Fixed length */
+
+/** WNM-Sleep Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (17) */
+	uint8 token;				/* dialog token */
+	uint16 key_len;				/* key data length */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t;
+#define DOT11_WNM_SLEEP_RESP_LEN	5	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK	0
+#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK	1
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_length;
+	uint8 rsc[8];
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN	11	/* without sub_id, len, and key */
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN	43	/* without sub_id and len */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_id;
+	uint8 pn[6];
+	uint8 key[16];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie {
+	uint8 id;				/* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */
+	uint8 len;
+	uint8 act_type;
+	uint8 resp_status;
+	uint16 interval;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t;
+#define DOT11_WNM_SLEEP_IE_LEN		4	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER	0
+#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT	1
+
+#define DOT11_WNM_SLEEP_RESP_ACCEPT	0
+#define DOT11_WNM_SLEEP_RESP_UPDATE	1
+#define DOT11_WNM_SLEEP_RESP_DENY	2
+#define DOT11_WNM_SLEEP_RESP_DENY_TEMP	3
+#define DOT11_WNM_SLEEP_RESP_DENY_KEY	4
+#define DOT11_WNM_SLEEP_RESP_DENY_INUSE	5
+#define DOT11_WNM_SLEEP_RESP_LAST	6
+
+/** DMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (23) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req dot11_dms_req_t;
+#define DOT11_DMS_REQ_LEN		3	/* Fixed length */
+
+/** DMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (24) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp dot11_dms_resp_t;
+#define DOT11_DMS_RESP_LEN		3	/* Fixed length */
+
+/** DMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie {
+	uint8 id;				/* 99, DOT11_MNG_DMS_REQUEST_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_ie dot11_dms_req_ie_t;
+#define DOT11_DMS_REQ_IE_LEN		2	/* Fixed length */
+
+/** DMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie {
+	uint8 id;				/* 100, DOT11_MNG_DMS_RESPONSE_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t;
+#define DOT11_DMS_RESP_IE_LEN		2	/* Fixed length */
+
+/** DMS request descriptor */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_desc dot11_dms_req_desc_t;
+#define DOT11_DMS_REQ_DESC_LEN		3	/* Fixed length */
+
+#define DOT11_DMS_REQ_TYPE_ADD		0
+#define DOT11_DMS_REQ_TYPE_REMOVE	1
+#define DOT11_DMS_REQ_TYPE_CHANGE	2
+
+/** DMS response status */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint16 lsc;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_st dot11_dms_resp_st_t;
+#define DOT11_DMS_RESP_STATUS_LEN	5	/* Fixed length */
+
+#define DOT11_DMS_RESP_TYPE_ACCEPT	0
+#define DOT11_DMS_RESP_TYPE_DENY	1
+#define DOT11_DMS_RESP_TYPE_TERM	2
+
+#define DOT11_DMS_RESP_LSC_UNSUPPORTED	0xFFFF
+
+/** FMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (9) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req dot11_fms_req_t;
+#define DOT11_FMS_REQ_LEN		3	/* Fixed length */
+
+/** FMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (10) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp dot11_fms_resp_t;
+#define DOT11_FMS_RESP_LEN		3	/* Fixed length */
+
+/** FMS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_desc {
+	uint8 id;
+	uint8 len;
+	uint8 num_fms_cnt;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_desc dot11_fms_desc_t;
+#define DOT11_FMS_DESC_LEN		1	/* Fixed length */
+
+#define DOT11_FMS_CNTR_MAX		0x8
+#define DOT11_FMS_CNTR_ID_MASK		0x7
+#define DOT11_FMS_CNTR_ID_SHIFT		0x0
+#define DOT11_FMS_CNTR_COUNT_MASK	0xf1
+#define DOT11_FMS_CNTR_SHIFT		0x3
+
+/** FMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;			/* token used to identify fms stream set */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req_ie dot11_fms_req_ie_t;
+#define DOT11_FMS_REQ_IE_FIX_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field {
+	uint8 mask;
+	uint8 mcs_idx;
+	uint16 rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rate_id_field dot11_rate_id_field_t;
+#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK	0x7
+#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET	0
+#define DOT11_RATE_ID_FIELD_RATETYPE_MASK	0x18
+#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET	3
+#define DOT11_RATE_ID_FIELD_LEN		sizeof(dot11_rate_id_field_t)
+
+/** FMS request subelements */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 interval;
+	uint8 max_interval;
+	dot11_rate_id_field_t rate;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_se dot11_fms_se_t;
+#define DOT11_FMS_REQ_SE_LEN		6	/* Fixed length */
+
+#define DOT11_FMS_REQ_SE_ID_FMS		1	/* FMS subelement */
+#define DOT11_FMS_REQ_SE_ID_VS		221	/* Vendor Specific subelement */
+
+/** FMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t;
+#define DOT11_FMS_RESP_IE_FIX_LEN		1	/* Fixed length */
+
+/* FMS status subelements */
+#define DOT11_FMS_STATUS_SE_ID_FMS	1	/* FMS Status */
+#define DOT11_FMS_STATUS_SE_ID_TCLAS	2	/* TCLAS Status */
+#define DOT11_FMS_STATUS_SE_ID_VS	221	/* Vendor Specific subelement */
+
+/** FMS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 status;
+	uint8 interval;
+	uint8 max_interval;
+	uint8 fmsid;
+	uint8 counter;
+	dot11_rate_id_field_t rate;
+	uint8 mcast_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_status_se dot11_fms_status_se_t;
+#define DOT11_FMS_STATUS_SE_LEN		15	/* Fixed length */
+
+/** TCLAS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 fmsid;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_status_se dot11_tclas_status_se_t;
+#define DOT11_TCLAS_STATUS_SE_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint8 token;				/* identifier */
+	uint16 addba_param_set;		/* parameter set */
+	uint16 timeout;				/* timeout in seconds */
+	uint16 start_seqnum;		/* starting sequence number */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN		9	/* length of addba req frame */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba resp */
+	uint8 token;				/* identifier */
+	uint16 status;				/* status of add request */
+	uint16 addba_param_set;			/* negotiated parameter set */
+	uint16 timeout;				/* negotiated timeout in seconds */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN		9	/* length of addba resp frame */
+
+/* DELBA action parameters */
+#define DOT11_DELBA_PARAM_INIT_MASK	0x0800	/* initiator mask */
+#define DOT11_DELBA_PARAM_INIT_SHIFT	11	/* initiator shift */
+#define DOT11_DELBA_PARAM_TID_MASK	0xf000	/* tid mask */
+#define DOT11_DELBA_PARAM_TID_SHIFT	12	/* tid shift */
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint16 delba_param_set;			/* paarmeter set */
+	uint16 reason;				/* reason for dellba */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN			6	/* length of delba frame */
+
+/* SA Query action field value */
+#define SA_QUERY_REQUEST		0
+#define SA_QUERY_RESPONSE		1
+
+/* ************* 802.11r related definitions. ************* */
+
+/** Over-the-DS Fast Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft req */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+/** Over-the-DS Fast Transition Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft resp */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint16 status;			/* status code */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+/** RDE RIC Data Element. */
+BWL_PRE_PACKED_STRUCT struct dot11_rde_ie {
+	uint8 id;			/* 11r, DOT11_MNG_RDE_ID */
+	uint8 length;
+	uint8 rde_id;			/* RDE identifier. */
+	uint8 rd_count;			/* Resource Descriptor Count. */
+	uint16 status;			/* Status Code. */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rde_ie dot11_rde_ie_t;
+
+/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
+#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+
+
+/* ************* 802.11k related definitions. ************* */
+
+/* Radio measurements enabled capability ie */
+#define DOT11_RRM_CAP_LEN		5	/* length of rrm cap bitmap */
+#define RCPI_IE_LEN 1
+#define RSNI_IE_LEN 1
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+	uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+/* Bitmap definitions for cap ie */
+#define DOT11_RRM_CAP_LINK		0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT	1
+#define DOT11_RRM_CAP_PARALLEL		2
+#define DOT11_RRM_CAP_REPEATED		3
+#define DOT11_RRM_CAP_BCN_PASSIVE	4
+#define DOT11_RRM_CAP_BCN_ACTIVE	5
+#define DOT11_RRM_CAP_BCN_TABLE		6
+#define DOT11_RRM_CAP_BCN_REP_COND	7
+#define DOT11_RRM_CAP_FM		8
+#define DOT11_RRM_CAP_CLM		9
+#define DOT11_RRM_CAP_NHM		10
+#define DOT11_RRM_CAP_SM		11
+#define DOT11_RRM_CAP_LCIM		12
+#define DOT11_RRM_CAP_LCIA		13
+#define DOT11_RRM_CAP_TSCM		14
+#define DOT11_RRM_CAP_TTSCM		15
+#define DOT11_RRM_CAP_AP_CHANREP	16
+#define DOT11_RRM_CAP_RMMIB		17
+/* bit18-bit26, not used for RRM_IOVAR */
+#define DOT11_RRM_CAP_MPTI		27
+#define DOT11_RRM_CAP_NBRTSFO		28
+#define DOT11_RRM_CAP_RCPI		29
+#define DOT11_RRM_CAP_RSNI		30
+#define DOT11_RRM_CAP_BSSAAD		31
+#define DOT11_RRM_CAP_BSSAAC		32
+#define DOT11_RRM_CAP_AI		33
+
+/* Operating Class (formerly "Regulatory Class") definitions */
+#define DOT11_OP_CLASS_NONE			255
+
+BWL_PRE_PACKED_STRUCT struct do11_ap_chrep {
+	uint8 id;
+	uint8 len;
+	uint8 reg;
+	uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct do11_ap_chrep dot11_ap_chrep_t;
+
+/* Radio Measurements action ids */
+#define DOT11_RM_ACTION_RM_REQ		0	/* Radio measurement request */
+#define DOT11_RM_ACTION_RM_REP		1	/* Radio measurement report */
+#define DOT11_RM_ACTION_LM_REQ		2	/* Link measurement request */
+#define DOT11_RM_ACTION_LM_REP		3	/* Link measurement report */
+#define DOT11_RM_ACTION_NR_REQ		4	/* Neighbor report request */
+#define DOT11_RM_ACTION_NR_REP		5	/* Neighbor report response */
+
+/** Generic radio measurement action frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint16 reps;				/* no. of repetitions */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN	5
+
+/* Definitions for "mode" bits in rm req */
+#define DOT11_RMREQ_MODE_PARALLEL	1
+#define DOT11_RMREQ_MODE_ENABLE		2
+#define DOT11_RMREQ_MODE_REQUEST	4
+#define DOT11_RMREQ_MODE_REPORT		8
+#define DOT11_RMREQ_MODE_DURMAND	0x10	/* Duration Mandatory */
+
+/* Definitions for "mode" bits in rm rep */
+#define DOT11_RMREP_MODE_LATE		1
+#define DOT11_RMREP_MODE_INCAPABLE	2
+#define DOT11_RMREP_MODE_REFUSED	4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 bcn_mode;
+	struct ether_addr	bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN	18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 frame_info;
+	uint8 rcpi;
+	uint8 rsni;
+	struct ether_addr	bssid;
+	uint8 antenna_id;
+	uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN	26
+
+/* Beacon request measurement mode */
+#define DOT11_RMREQ_BCN_PASSIVE	0
+#define DOT11_RMREQ_BCN_ACTIVE	1
+#define DOT11_RMREQ_BCN_TABLE	2
+
+/* Sub-element IDs for Beacon Request */
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID  1
+#define DOT11_RMREQ_BCN_REPDET_ID   2
+#define DOT11_RMREQ_BCN_REQUEST_ID  10
+#define DOT11_RMREQ_BCN_APCHREP_ID  DOT11_MNG_AP_CHREP_ID
+
+/* Reporting Detail element definition */
+#define DOT11_RMREQ_BCN_REPDET_FIXED	0	/* Fixed length fields only */
+#define DOT11_RMREQ_BCN_REPDET_REQUEST	1	/* + requested information elems */
+#define DOT11_RMREQ_BCN_REPDET_ALL	2	/* All fields */
+
+/* Sub-element IDs for Beacon Report */
+#define DOT11_RMREP_BCN_FRM_BODY	1
+
+/* Sub-element IDs for Frame Report */
+#define DOT11_RMREP_FRAME_COUNT_REPORT 1
+
+/** Channel load request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t;
+#define DOT11_RMREQ_CHANLOAD_LEN	11
+
+/** Channel load report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 channel_load;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t;
+#define DOT11_RMREP_CHANLOAD_LEN	13
+
+/** Noise histogram request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_noise dot11_rmreq_noise_t;
+#define DOT11_RMREQ_NOISE_LEN 11
+
+/** Noise histogram report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 antid;
+	uint8 anpi;
+	uint8 ipi0_dens;
+	uint8 ipi1_dens;
+	uint8 ipi2_dens;
+	uint8 ipi3_dens;
+	uint8 ipi4_dens;
+	uint8 ipi5_dens;
+	uint8 ipi6_dens;
+	uint8 ipi7_dens;
+	uint8 ipi8_dens;
+	uint8 ipi9_dens;
+	uint8 ipi10_dens;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_noise dot11_rmrep_noise_t;
+#define DOT11_RMREP_NOISE_LEN 25
+
+/** Frame request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 req_type;
+	struct ether_addr	ta;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_frame dot11_rmreq_frame_t;
+#define DOT11_RMREQ_FRAME_LEN 18
+
+/** Frame report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frame dot11_rmrep_frame_t;
+#define DOT11_RMREP_FRAME_LEN 12
+
+/** Frame report entry */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry {
+	struct ether_addr	ta;
+	struct ether_addr	bssid;
+	uint8 phy_type;
+	uint8 avg_rcpi;
+	uint8 last_rsni;
+	uint8 last_rcpi;
+	uint8 ant_id;
+	uint16 frame_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t;
+#define DOT11_RMREP_FRMENTRY_LEN 19
+
+/** STA statistics request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	struct ether_addr	peer;
+	uint16 interval;
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_stat dot11_rmreq_stat_t;
+#define DOT11_RMREQ_STAT_LEN 16
+
+/** STA statistics report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat {
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_stat dot11_rmrep_stat_t;
+
+/** Transmit stream/category measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 interval;
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 bin0_range;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t;
+
+/** Transmit stream/category measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream {
+	uint32 starttime[2];
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 reason;
+	uint32 txmsdu_cnt;
+	uint32 msdu_discarded_cnt;
+	uint32 msdufailed_cnt;
+	uint32 msduretry_cnt;
+	uint32 cfpolls_lost_cnt;
+	uint32 avrqueue_delay;
+	uint32 avrtx_delay;
+	uint8 bin0_range;
+	uint32 bin0;
+	uint32 bin1;
+	uint32 bin2;
+	uint32 bin3;
+	uint32 bin4;
+	uint32 bin5;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
+
+/** Measurement pause request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 pause_time;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
+
+
+/* Neighbor Report subelements ID (11k & 11v) */
+#define DOT11_NGBR_TSF_INFO_SE_ID	1
+#define DOT11_NGBR_CCS_SE_ID		2
+#define DOT11_NGBR_BSSTRANS_PREF_SE_ID	3
+#define DOT11_NGBR_BSS_TERM_DUR_SE_ID	4
+#define DOT11_NGBR_BEARING_SE_ID	5
+
+/** Neighbor Report, BSS Transition Candidate Preference subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 preference;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t;
+#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN	1
+
+/** Neighbor Report, BSS Termination Duration subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 tsf[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t;
+#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN	10
+
+/* Neighbor Report BSSID Information Field */
+#define DOT11_NGBR_BI_REACHABILTY_UNKN	0x0002
+#define DOT11_NGBR_BI_REACHABILTY	0x0003
+#define DOT11_NGBR_BI_SEC		0x0004
+#define DOT11_NGBR_BI_KEY_SCOPE		0x0008
+#define DOT11_NGBR_BI_CAP		0x03f0
+#define DOT11_NGBR_BI_CAP_SPEC_MGMT	0x0010
+#define DOT11_NGBR_BI_CAP_QOS		0x0020
+#define DOT11_NGBR_BI_CAP_APSD		0x0040
+#define DOT11_NGBR_BI_CAP_RDIO_MSMT	0x0080
+#define DOT11_NGBR_BI_CAP_DEL_BA	0x0100
+#define DOT11_NGBR_BI_CAP_IMM_BA	0x0200
+#define DOT11_NGBR_BI_MOBILITY		0x0400
+#define DOT11_NGBR_BI_HT		0x0800
+
+/** Neighbor Report element (11k & 11v) */
+BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;		/* Operating class */
+	uint8 channel;
+	uint8 phytype;
+	uint8 data[1]; 		/* Variable size subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
+#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN	13
+
+
+/* MLME Enumerations */
+#define DOT11_BSSTYPE_INFRASTRUCTURE		0	/* d11 infrastructure */
+#define DOT11_BSSTYPE_INDEPENDENT		1	/* d11 independent */
+#define DOT11_BSSTYPE_ANY			2	/* d11 any BSS type */
+#define DOT11_SCANTYPE_ACTIVE			0	/* d11 scan active */
+#define DOT11_SCANTYPE_PASSIVE			1	/* d11 scan passive */
+
+/** Link Measurement */
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 txpwr;				/* Transmit Power Used */
+	uint8 maxtxpwr;				/* Max Transmit Power */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	dot11_tpc_rep_t tpc;			/* TPC element */
+	uint8 rxant;				/* Receive Antenna ID */
+	uint8 txant;				/* Transmit Antenna ID */
+	uint8 rcpi;				/* RCPI */
+	uint8 rsni;				/* RSNI */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN	11
+
+/* 802.11 BRCM "Compromise" Pre N constants */
+#define PREN_PREAMBLE		24	/* green field preamble time */
+#define PREN_MM_EXT		12	/* extra mixed mode preamble time */
+#define PREN_PREAMBLE_EXT	4	/* extra preamble (multiply by unique_streams-1) */
+
+/* 802.11N PHY constants */
+#define RIFS_11N_TIME		2	/* NPHY RIFS time */
+
+/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3
+ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2
+ */
+/* HT-SIG1 */
+#define HT_SIG1_MCS_MASK        0x00007F
+#define HT_SIG1_CBW             0x000080
+#define HT_SIG1_HT_LENGTH       0xFFFF00
+
+/* HT-SIG2 */
+#define HT_SIG2_SMOOTHING       0x000001
+#define HT_SIG2_NOT_SOUNDING    0x000002
+#define HT_SIG2_RESERVED        0x000004
+#define HT_SIG2_AGGREGATION     0x000008
+#define HT_SIG2_STBC_MASK       0x000030
+#define HT_SIG2_STBC_SHIFT      4
+#define HT_SIG2_FEC_CODING      0x000040
+#define HT_SIG2_SHORT_GI        0x000080
+#define HT_SIG2_ESS_MASK        0x000300
+#define HT_SIG2_ESS_SHIFT       8
+#define HT_SIG2_CRC             0x03FC00
+#define HT_SIG2_TAIL            0x1C0000
+
+/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */
+#define HT_T_LEG_PREAMBLE      16
+#define HT_T_L_SIG              4
+#define HT_T_SIG                8
+#define HT_T_LTF1               4
+#define HT_T_GF_LTF1            8
+#define HT_T_LTFs               4
+#define HT_T_STF                4
+#define HT_T_GF_STF             8
+#define HT_T_SYML               4
+
+#define HT_N_SERVICE           16       /* bits in SERVICE field */
+#define HT_N_TAIL               6       /* tail bits per BCC encoder */
+
+/* 802.11 A PHY constants */
+#define APHY_SLOT_TIME          9       /* APHY slot time */
+#define APHY_SIFS_TIME          16      /* APHY SIFS time */
+#define APHY_DIFS_TIME          (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))  /* APHY DIFS time */
+#define APHY_PREAMBLE_TIME      16      /* APHY preamble time */
+#define APHY_SIGNAL_TIME        4       /* APHY signal time */
+#define APHY_SYMBOL_TIME        4       /* APHY symbol time */
+#define APHY_SERVICE_NBITS      16      /* APHY service nbits */
+#define APHY_TAIL_NBITS         6       /* APHY tail nbits */
+#define APHY_CWMIN              15      /* APHY cwmin */
+
+/* 802.11 B PHY constants */
+#define BPHY_SLOT_TIME          20      /* BPHY slot time */
+#define BPHY_SIFS_TIME          10      /* BPHY SIFS time */
+#define BPHY_DIFS_TIME          50      /* BPHY DIFS time */
+#define BPHY_PLCP_TIME          192     /* BPHY PLCP time */
+#define BPHY_PLCP_SHORT_TIME    96      /* BPHY PLCP short time */
+#define BPHY_CWMIN              31      /* BPHY cwmin */
+
+/* 802.11 G constants */
+#define DOT11_OFDM_SIGNAL_EXTENSION	6	/* d11 OFDM signal extension */
+
+#define PHY_CWMAX		1023	/* PHY cwmax */
+
+#define	DOT11_MAXNUMFRAGS	16	/* max # fragments per MSDU */
+
+/* 802.11 VHT constants */
+
+typedef int vht_group_id_t;
+
+/* for VHT-A1 */
+/* SIG-A1 reserved bits */
+#define VHT_SIGA1_CONST_MASK            0x800004
+
+#define VHT_SIGA1_BW_MASK               0x000003
+#define VHT_SIGA1_20MHZ_VAL             0x000000
+#define VHT_SIGA1_40MHZ_VAL             0x000001
+#define VHT_SIGA1_80MHZ_VAL             0x000002
+#define VHT_SIGA1_160MHZ_VAL            0x000003
+
+#define VHT_SIGA1_STBC                  0x000008
+
+#define VHT_SIGA1_GID_MASK              0x0003f0
+#define VHT_SIGA1_GID_SHIFT             4
+#define VHT_SIGA1_GID_TO_AP             0x00
+#define VHT_SIGA1_GID_NOT_TO_AP         0x3f
+#define VHT_SIGA1_GID_MAX_GID           0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+#define VHT_SIGA1_NSTS_SHIFT            10
+
+#define VHT_SIGA1_PARTIAL_AID_MASK      0x3fe000
+#define VHT_SIGA1_PARTIAL_AID_SHIFT     13
+
+#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED   0x400000
+
+/* for VHT-A2 */
+#define VHT_SIGA2_GI_NONE               0x000000
+#define VHT_SIGA2_GI_SHORT              0x000001
+#define VHT_SIGA2_GI_W_MOD10            0x000002
+#define VHT_SIGA2_CODING_LDPC           0x000004
+#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM   0x000008
+#define VHT_SIGA2_BEAMFORM_ENABLE       0x000100
+#define VHT_SIGA2_MCS_SHIFT             4
+
+#define VHT_SIGA2_B9_RESERVED           0x000200
+#define VHT_SIGA2_TAIL_MASK             0xfc0000
+#define VHT_SIGA2_TAIL_VALUE            0x000000
+
+/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */
+#define VHT_T_LEG_PREAMBLE      16
+#define VHT_T_L_SIG              4
+#define VHT_T_SIG_A              8
+#define VHT_T_LTF                4
+#define VHT_T_STF                4
+#define VHT_T_SIG_B              4
+#define VHT_T_SYML               4
+
+#define VHT_N_SERVICE           16	/* bits in SERVICE field */
+#define VHT_N_TAIL               6	/* tail bits per BCC encoder */
+
+
+/** dot11Counters Table - 802.11 spec., Annex D */
+typedef struct d11cnt {
+	uint32		txfrag;		/* dot11TransmittedFragmentCount */
+	uint32		txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32		txfail;		/* dot11FailedCount */
+	uint32		txretry;	/* dot11RetryCount */
+	uint32		txretrie;	/* dot11MultipleRetryCount */
+	uint32		rxdup;		/* dot11FrameduplicateCount */
+	uint32		txrts;		/* dot11RTSSuccessCount */
+	uint32		txnocts;	/* dot11RTSFailureCount */
+	uint32		txnoack;	/* dot11ACKFailureCount */
+	uint32		rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32		rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32		rxcrc;		/* dot11FCSErrorCount */
+	uint32		txfrmsnt;	/* dot11TransmittedFrameCount */
+	uint32		rxundec;	/* dot11WEPUndecryptableCount */
+} d11cnt_t;
+
+#define BRCM_PROP_OUI		"\x00\x90\x4C"
+
+
+/* Action frame type for RWL */
+#define RWL_WIFI_DEFAULT		0
+#define RWL_WIFI_FIND_MY_PEER		9 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER		10 /* Server response to the client  */
+#define RWL_ACTION_WIFI_FRAG_TYPE	85 /* Fragment indicator for receiver */
+
+#define PROXD_AF_TYPE			11 /* Wifi proximity action frame type */
+#define BRCM_RELMACST_AF_TYPE	        12 /* RMC action frame type */
+
+
+/* brcm syscap_ie cap */
+#define BRCM_SYSCAP_WET_TUNNEL	0x0100	/* Device with WET_TUNNEL support */
+
+#define BRCM_OUI		"\x00\x10\x18"	/* Broadcom OUI */
+
+/** BRCM info element */
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	ver;		/* type/ver of this IE */
+	uint8	assoc;		/* # of assoc STAs */
+	uint8	flags;		/* misc flags */
+	uint8	flags1;		/* misc flags */
+	uint16	amsdu_mtu_pref;	/* preferred A-MSDU MTU */
+} BWL_POST_PACKED_STRUCT;
+typedef	struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN		11	/* BRCM IE length */
+#define BRCM_IE_VER		2	/* BRCM IE version */
+#define BRCM_IE_LEGACY_AES_VER	1	/* BRCM IE legacy AES version */
+
+/* brcm_ie flags */
+#define	BRF_ABCAP		0x1	/* afterburner is obsolete,  defined for backward compat */
+#define	BRF_ABRQRD		0x2	/* afterburner is obsolete,  defined for backward compat */
+#define	BRF_LZWDS		0x4	/* lazy wds enabled */
+#define	BRF_BLOCKACK		0x8	/* BlockACK capable */
+#define BRF_ABCOUNTER_MASK	0xf0	/* afterburner is obsolete,  defined for backward compat */
+#define BRF_PROP_11N_MCS	0x10	/* re-use afterburner bit */
+
+/**
+ * Support for Broadcom proprietary HT MCS rates. Re-uses afterburner bits since afterburner is not
+ * used anymore. Checks for BRF_ABCAP to stay compliant with 'old' images in the field.
+ */
+#define GET_BRF_PROP_11N_MCS(brcm_ie) \
+	(!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
+
+/* brcm_ie flags1 */
+#define	BRF1_AMSDU		0x1	/* A-MSDU capable */
+#define BRF1_WMEPS		0x4	/* AP is capable of handling WME + PS w/o APSD */
+#define BRF1_PSOFIX		0x8	/* AP has fixed PS mode out-of-order packets */
+#define	BRF1_RX_LARGE_AGG	0x10	/* device can rx large aggregates */
+#define BRF1_RFAWARE_DCS	0x20    /* RFAWARE dynamic channel selection (DCS) */
+#define BRF1_SOFTAP		0x40    /* Configure as Broadcom SOFTAP */
+#define BRF1_DWDS		0x80    /* DWDS capable */
+
+/** Vendor IE structure */
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+	uchar id;
+	uchar len;
+	uchar oui [3];
+	uchar data [1]; 	/* Variable size data */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN		2	/* id + len field */
+#define VNDR_IE_MIN_LEN		3	/* size of the oui field */
+#define VNDR_IE_FIXED_LEN	(VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
+
+#define VNDR_IE_MAX_LEN		255	/* vendor IE max length, without ID and len */
+
+/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
+BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
+	uchar id;
+	uchar len;
+	uchar oui[3];
+	uint8	type;           /* type indicates what follows */
+	struct ether_addr ea;   /* Device Primary MAC Adrress */
+} BWL_POST_PACKED_STRUCT;
+typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t;
+
+#define MEMBER_OF_BRCM_PROP_IE_LEN		10	/* IE max length */
+#define MEMBER_OF_BRCM_PROP_IE_HDRLEN	        (sizeof(member_of_brcm_prop_ie_t))
+#define MEMBER_OF_BRCM_PROP_IE_TYPE		54
+
+/** BRCM Reliable Multicast IE */
+BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie {
+	uint8 id;
+	uint8 len;
+	uint8 oui[3];
+	uint8 type;           /* type indicates what follows */
+	struct ether_addr ea;   /* The ack sender's MAC Adrress */
+	struct ether_addr mcast_ea;  /* The multicast MAC address */
+	uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */
+} BWL_POST_PACKED_STRUCT;
+typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t;
+
+/* IE length */
+/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */
+#define RELMCAST_BRCM_PROP_IE_LEN	(sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8)))
+
+#define RELMCAST_BRCM_PROP_IE_TYPE	55
+
+/* ************* HT definitions. ************* */
+#define MCSSET_LEN	16	/* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
+#define MAX_MCS_NUM	(128)	/* max mcs number = 128 */
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+	uint16	cap;
+	uint8	params;
+	uint8	supp_mcs[MCSSET_LEN];
+	uint16	ext_htcap;
+	uint32	txbf_cap;
+	uint8	as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie {
+	uint8	id;
+	uint8	len;
+	ht_cap_ie_t ht_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t;
+
+/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the capability IE is primarily used to convey this nodes abilities */
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;           /* type indicates what follows */
+	ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD	4	/* overhead bytes for prop oui ie */
+#define HT_CAP_IE_LEN		26	/* HT capability len (based on .11n d2.0) */
+#define HT_CAP_IE_TYPE		51
+
+#define HT_CAP_LDPC_CODING	0x0001	/* Support for rx of LDPC coded pkts */
+#define HT_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define HT_CAP_MIMO_PS_MASK	0x000C  /* Mimo PS mask */
+#define HT_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define HT_CAP_MIMO_PS_OFF	0x0003	/* Mimo PS, no restriction */
+#define HT_CAP_MIMO_PS_RTS	0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define HT_CAP_MIMO_PS_ON	0x0000	/* Mimo PS, MIMO disallowed */
+#define HT_CAP_GF		0x0010	/* Greenfield preamble support */
+#define HT_CAP_SHORT_GI_20	0x0020	/* 20MHZ short guard interval support */
+#define HT_CAP_SHORT_GI_40	0x0040	/* 40Mhz short guard interval support */
+#define HT_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define HT_CAP_RX_STBC_MASK	0x0300	/* Rx STBC mask */
+#define HT_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define HT_CAP_DELAYED_BA	0x0400	/* delayed BA support */
+#define HT_CAP_MAX_AMSDU	0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+
+#define HT_CAP_DSSS_CCK	0x1000	/* DSSS/CCK supported by the BSS */
+#define HT_CAP_PSMP		0x2000	/* Power Save Multi Poll support */
+#define HT_CAP_40MHZ_INTOLERANT 0x4000	/* 40MHz Intolerant */
+#define HT_CAP_LSIG_TXOP	0x8000	/* L-SIG TXOP protection support */
+
+#define HT_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define HT_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define HT_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define HT_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+
+#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX	0x1
+#define HT_CAP_TXBF_CAP_NDP_RX			0x8
+#define HT_CAP_TXBF_CAP_NDP_TX			0x10
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI		0x100
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING	0x200
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING	0x400
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK	0x1800
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT	11
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK	0x6000
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT	13
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK	0x18000
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT	15
+#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT	19
+#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT	21
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT		23
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK		0x1800000
+
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT	27
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK		0x18000000
+
+#define HT_CAP_TXBF_FB_TYPE_NONE 	0
+#define HT_CAP_TXBF_FB_TYPE_DELAYED 	1
+#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 	2
+#define HT_CAP_TXBF_FB_TYPE_BOTH 	3
+
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK	0x400
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT	10
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15
+
+#define VHT_MAX_MPDU		11454	/* max mpdu size for now (bytes) */
+#define VHT_MPDU_MSDU_DELTA	56		/* Difference in spec - vht mpdu, amsdu len */
+/* Max AMSDU len - per spec */
+#define VHT_MAX_AMSDU		(VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU		7935	/* max amsdu size (bytes) per the HT spec */
+#define HT_MIN_AMSDU		3835	/* min amsdu size (bytes) per the HT spec */
+
+#define HT_PARAMS_RX_FACTOR_MASK	0x03	/* ampdu rcv factor mask */
+#define HT_PARAMS_DENSITY_MASK		0x1C	/* ampdu density mask */
+#define HT_PARAMS_DENSITY_SHIFT	2	/* ampdu density shift */
+
+/* HT/AMPDU specific define */
+#define AMPDU_MAX_MPDU_DENSITY  7       /* max mpdu density; in 1/4 usec units */
+#define AMPDU_DENSITY_NONE      0       /* No density requirement */
+#define AMPDU_DENSITY_1over4_US 1       /* 1/4 us density */
+#define AMPDU_DENSITY_1over2_US 2       /* 1/2 us density */
+#define AMPDU_DENSITY_1_US      3       /*   1 us density */
+#define AMPDU_DENSITY_2_US      4       /*   2 us density */
+#define AMPDU_DENSITY_4_US      5       /*   4 us density */
+#define AMPDU_DENSITY_8_US      6       /*   8 us density */
+#define AMPDU_DENSITY_16_US     7       /*  16 us density */
+#define AMPDU_RX_FACTOR_8K      0       /* max rcv ampdu len (8kb) */
+#define AMPDU_RX_FACTOR_16K     1       /* max rcv ampdu len (16kb) */
+#define AMPDU_RX_FACTOR_32K     2       /* max rcv ampdu len (32kb) */
+#define AMPDU_RX_FACTOR_64K     3       /* max rcv ampdu len (64kb) */
+
+/* AMPDU RX factors for VHT rates */
+#define AMPDU_RX_FACTOR_128K    4       /* max rcv ampdu len (128kb) */
+#define AMPDU_RX_FACTOR_256K    5       /* max rcv ampdu len (256kb) */
+#define AMPDU_RX_FACTOR_512K    6       /* max rcv ampdu len (512kb) */
+#define AMPDU_RX_FACTOR_1024K   7       /* max rcv ampdu len (1024kb) */
+
+#define AMPDU_RX_FACTOR_BASE    8*1024  /* ampdu factor base for rx len */
+#define AMPDU_RX_FACTOR_BASE_PWR	13	/* ampdu factor base for rx len in power of 2 */
+
+#define AMPDU_DELIMITER_LEN	4	/* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN_MAX	63	/* max length of ampdu delimiter(enforced in HW) */
+
+#define HT_CAP_EXT_PCO			0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK	0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT	1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK	0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT	8
+#define HT_CAP_EXT_HTC			0x0400
+#define HT_CAP_EXT_RD_RESP		0x0800
+
+/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+	uint8	ctl_ch;			/* control channel number */
+	uint8	byte1;			/* ext ch,rec. ch. width, RIFS support */
+	uint16	opmode;			/* operation mode */
+	uint16	misc_bits;		/* misc bits */
+	uint8	basic_mcs[MCSSET_LEN];  /* required MCS set */
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the additional IE is primarily used to convey the current BSS configuration */
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];
+	uint8	type;		/* indicates what follows */
+	ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN	22
+#define HT_ADD_IE_TYPE	52
+
+/* byte1 defn's */
+#define HT_BW_ANY		0x04	/* set, STA can use 20 or 40MHz */
+#define HT_RIFS_PERMITTED     	0x08	/* RIFS allowed */
+
+/* opmode defn's */
+#define HT_OPMODE_MASK	        0x0003	/* protection mode mask */
+#define HT_OPMODE_SHIFT		0	/* protection mode shift */
+#define HT_OPMODE_PURE		0x0000	/* protection mode PURE */
+#define HT_OPMODE_OPTIONAL	0x0001	/* protection mode optional */
+#define HT_OPMODE_HT20IN40	0x0002	/* protection mode 20MHz HT in 40MHz BSS */
+#define HT_OPMODE_MIXED	0x0003	/* protection mode Mixed Mode */
+#define HT_OPMODE_NONGF	0x0004	/* protection mode non-GF */
+#define DOT11N_TXBURST		0x0008	/* Tx burst limit */
+#define DOT11N_OBSS_NONHT	0x0010	/* OBSS Non-HT STA present */
+
+/* misc_bites defn's */
+#define HT_BASIC_STBC_MCS	0x007f	/* basic STBC MCS */
+#define HT_DUAL_STBC_PROT	0x0080	/* Dual STBC Protection */
+#define HT_SECOND_BCN		0x0100	/* Secondary beacon support */
+#define HT_LSIG_TXOP		0x0200	/* L-SIG TXOP Protection full support */
+#define HT_PCO_ACTIVE		0x0400	/* PCO active */
+#define HT_PCO_PHASE		0x0800	/* PCO phase */
+#define HT_DUALCTS_PROTECTION	0x0080	/* DUAL CTS protection needed */
+
+/* Tx Burst Limits */
+#define DOT11N_2G_TXBURST_LIMIT	6160	/* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+#define DOT11N_5G_TXBURST_LIMIT	3080	/* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+
+/* Macros for opmode */
+#define GET_HT_OPMODE(add_ie)		((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					>> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_MIXED)	/* mixed mode present */
+#define HT_HT20_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_HT20IN40)	/* 20MHz HT present */
+#define HT_OPTIONAL_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_OPTIONAL)	/* Optional protection present */
+#define HT_USE_PROTECTION(add_ie)	(HT_HT20_PRESENT((add_ie)) || \
+					HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */
+#define HT_NONGF_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+					== HT_OPMODE_NONGF)	/* non-GF present */
+#define DOT11N_TXBURST_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+					== DOT11N_TXBURST)	/* Tx Burst present */
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+					== DOT11N_OBSS_NONHT)	/* OBSS Non-HT present */
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+	uint16	passive_dwell;
+	uint16	active_dwell;
+	uint16	bss_widthscan_interval;
+	uint16	passive_total;
+	uint16	active_total;
+	uint16	chanwidth_transition_dly;
+	uint16	activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+	uint8	id;
+	uint8	len;
+	obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN	sizeof(obss_params_t)	/* HT OBSS len (based on 802.11n d3.0) */
+
+/* HT control field */
+#define HT_CTRL_LA_TRQ		0x00000002	/* sounding request */
+#define HT_CTRL_LA_MAI		0x0000003C	/* MCS request or antenna selection indication */
+#define HT_CTRL_LA_MAI_SHIFT	2
+#define HT_CTRL_LA_MAI_MRQ	0x00000004	/* MCS request */
+#define HT_CTRL_LA_MAI_MSI	0x00000038	/* MCS request sequence identifier */
+#define HT_CTRL_LA_MFSI		0x000001C0	/* MFB sequence identifier */
+#define HT_CTRL_LA_MFSI_SHIFT	6
+#define HT_CTRL_LA_MFB_ASELC	0x0000FE00	/* MCS feedback, antenna selection command/data */
+#define HT_CTRL_LA_MFB_ASELC_SH	9
+#define HT_CTRL_LA_ASELC_CMD	0x00000C00	/* ASEL command */
+#define HT_CTRL_LA_ASELC_DATA	0x0000F000	/* ASEL data */
+#define HT_CTRL_CAL_POS		0x00030000	/* Calibration position */
+#define HT_CTRL_CAL_SEQ		0x000C0000	/* Calibration sequence */
+#define HT_CTRL_CSI_STEERING	0x00C00000	/* CSI/Steering */
+#define HT_CTRL_CSI_STEER_SHIFT	22
+#define HT_CTRL_CSI_STEER_NFB	0		/* no fedback required */
+#define HT_CTRL_CSI_STEER_CSI	1		/* CSI, H matrix */
+#define HT_CTRL_CSI_STEER_NCOM	2		/* non-compressed beamforming */
+#define HT_CTRL_CSI_STEER_COM	3		/* compressed beamforming */
+#define HT_CTRL_NDP_ANNOUNCE	0x01000000	/* NDP announcement */
+#define HT_CTRL_AC_CONSTRAINT	0x40000000	/* AC Constraint */
+#define HT_CTRL_RDG_MOREPPDU	0x80000000	/* RDG/More PPDU */
+
+/* ************* VHT definitions. ************* */
+
+/**
+ * VHT Capabilites IE (sec 8.4.2.160)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+	uint32  vht_cap_info;
+	/* supported MCS set - 64 bit field */
+	uint16	rx_mcs_map;
+	uint16  rx_max_rate;
+	uint16  tx_mcs_map;
+	uint16	tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+/* 4B cap_info + 8B supp_mcs */
+#define VHT_CAP_IE_LEN 12
+
+/* VHT Capabilities Info field - 32bit - in VHT Cap IE */
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK          0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK       0x0000000c
+#define VHT_CAP_INFO_LDPC                       0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ                  0x00000020
+#define VHT_CAP_INFO_SGI_160MHZ                 0x00000040
+#define VHT_CAP_INFO_TX_STBC                    0x00000080
+#define VHT_CAP_INFO_RX_STBC_MASK               0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT              8
+#define VHT_CAP_INFO_SU_BEAMFMR                 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE                0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK         0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT        13
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK      0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT     16
+#define VHT_CAP_INFO_MU_BEAMFMR                 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE                0x00100000
+#define VHT_CAP_INFO_TXOPPS                     0x00200000
+#define VHT_CAP_INFO_HTCVHT                     0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK      0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT     23
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK        0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT       26
+
+/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT  0
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT  0
+
+#define VHT_CAP_MCS_MAP_0_7                     0
+#define VHT_CAP_MCS_MAP_0_8                     1
+#define VHT_CAP_MCS_MAP_0_9                     2
+#define VHT_CAP_MCS_MAP_NONE                    3
+#define VHT_CAP_MCS_MAP_S                       2 /* num bits for 1-stream */
+#define VHT_CAP_MCS_MAP_M                       0x3 /* mask for 1-stream */
+/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
+#define VHT_CAP_MCS_MAP_NONE_ALL                0xffff
+/* mcsmap with MCS0-9 for Nss = 3 */
+#define VHT_CAP_MCS_MAP_0_9_NSS3 \
+	        ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3)))
+
+#define VHT_CAP_MCS_MAP_NSS_MAX                 8
+
+/* get mcsmap with given mcs for given nss streams */
+#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \
+	do { \
+		int i; \
+		for (i = 1; i <= nss; i++) { \
+			VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \
+		} \
+	} while (0)
+
+/* Map the mcs code to mcs bit map */
+#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
+	((mcs_code == VHT_CAP_MCS_MAP_0_7) ? 0xff : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_8) ? 0x1ff : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_9) ? 0x3ff : 0)
+
+/* Map the mcs bit map to mcs code */
+#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
+	((mcs_map == 0xff)  ? VHT_CAP_MCS_MAP_0_7 : \
+	 (mcs_map == 0x1ff) ? VHT_CAP_MCS_MAP_0_8 : \
+	 (mcs_map == 0x3ff) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+
+/** VHT Capabilities Supported Channel Width */
+typedef enum vht_cap_chan_width {
+	VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160       = 0x04,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080  = 0x08
+} vht_cap_chan_width_t;
+
+/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */
+typedef enum vht_cap_max_mpdu_len {
+	VHT_CAP_MPDU_MAX_4K     = 0x00,
+	VHT_CAP_MPDU_MAX_8K     = 0x01,
+	VHT_CAP_MPDU_MAX_11K    = 0x02
+} vht_cap_max_mpdu_len_t;
+
+/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */
+#define VHT_MPDU_LIMIT_4K        3895
+#define VHT_MPDU_LIMIT_8K        7991
+#define VHT_MPDU_LIMIT_11K      11454
+
+
+/**
+ * VHT Operation IE (sec 8.4.2.161)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+	uint8	chan_width;
+	uint8	chan1;
+	uint8	chan2;
+	uint16	supp_mcs;  /*  same def as above in vht cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+/* 3B VHT Op info + 2B Basic MCS */
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+	VHT_OP_CHAN_WIDTH_20_40	= 0,
+	VHT_OP_CHAN_WIDTH_80	= 1,
+	VHT_OP_CHAN_WIDTH_160	= 2,
+	VHT_OP_CHAN_WIDTH_80_80	= 3
+} vht_op_chan_width_t;
+
+/* AID length */
+#define AID_IE_LEN		2
+/**
+ * BRCM vht features IE header
+ * The header if the fixed part of the IE
+ * On the 5GHz band this is the entire IE,
+ * on 2.4GHz the VHT IEs as defined in the 802.11ac
+ * specification follows
+ *
+ *
+ * VHT features rates  bitmap.
+ * Bit0:		5G MCS 0-9 BW 160MHz
+ * Bit1:		5G MCS 0-9 support BW 80MHz
+ * Bit2:		5G MCS 0-9 support BW 20MHz
+ * Bit3:		2.4G MCS 0-9 support BW 20MHz
+ * Bits:4-7	Reserved for future use
+ *
+ */
+#define VHT_FEATURES_IE_TYPE	0x4
+BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr {
+	uint8 oui[3];
+	uint8 type;		/* type of this IE = 4 */
+	uint8 rate_mask;	/* VHT rate mask */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_features_ie_hdr vht_features_ie_hdr_t;
+
+/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */
+#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \
+	(((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \
+	do { \
+	 (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \
+	 (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \
+	} while (0)
+#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \
+		 (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE)
+
+
+/* ************* WPA definitions. ************* */
+#define WPA_OUI			"\x00\x50\xF2"	/* WPA OUI */
+#define WPA_OUI_LEN		3		/* WPA OUI length */
+#define WPA_OUI_TYPE		1
+#define WPA_VERSION		1		/* WPA version */
+#define WPA2_OUI		"\x00\x0F\xAC"	/* WPA2 OUI */
+#define WPA2_OUI_LEN		3		/* WPA2 OUI length */
+#define WPA2_VERSION		1		/* WPA2 version */
+#define WPA2_VERSION_LEN	2		/* WAP2 version length */
+
+/* ************* WPS definitions. ************* */
+#define WPS_OUI			"\x00\x50\xF2"	/* WPS OUI */
+#define WPS_OUI_LEN		3		/* WPS OUI length */
+#define WPS_OUI_TYPE		4
+
+/* ************* WFA definitions. ************* */
+
+#ifdef P2P_IE_OVRD
+#define WFA_OUI			MAC_OUI
+#else
+#define WFA_OUI			"\x50\x6F\x9A"	/* WFA OUI */
+#endif /* P2P_IE_OVRD */
+#define WFA_OUI_LEN		3		/* WFA OUI length */
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P	MAC_OUI_TYPE_P2P
+#else
+#define WFA_OUI_TYPE_TPC	8
+#define WFA_OUI_TYPE_P2P	9
+#endif
+
+#define WFA_OUI_TYPE_TPC	8
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_TPQ	4	/* WFD Tunneled Probe ReQuest */
+#define WFA_OUI_TYPE_TPS	5	/* WFD Tunneled Probe ReSponse */
+#define WFA_OUI_TYPE_WFD	10
+#endif /* WTDLS */
+#define WFA_OUI_TYPE_HS20	0x10
+#define WFA_OUI_TYPE_OSEN	0x12
+#define WFA_OUI_TYPE_NAN	0x13
+
+/* RSN authenticated key managment suite */
+#define RSN_AKM_NONE		0	/* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED	1	/* Over 802.1x */
+#define RSN_AKM_PSK		2	/* Pre-shared Key */
+#define RSN_AKM_FBT_1X		3	/* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK		4	/* Fast Bss transition using Pre-shared Key */
+#define RSN_AKM_MFP_1X		5	/* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK		6	/* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK			7	/* TPK(TDLS Peer Key) handshake */
+
+/* OSEN authenticated key managment suite */
+#define OSEN_AKM_UNSPECIFIED	RSN_AKM_UNSPECIFIED	/* Over 802.1x */
+
+/* Key related defines */
+#define DOT11_MAX_DEFAULT_KEYS	4	/* number of default keys */
+#define DOT11_MAX_IGTK_KEYS		2
+#define DOT11_MAX_KEY_SIZE	32	/* max size of any key */
+#define DOT11_MAX_IV_SIZE	16	/* max size of any IV */
+#define DOT11_EXT_IV_FLAG	(1<<5)	/* flag to indicate IV is > 4 bytes */
+#define DOT11_WPA_KEY_RSC_LEN   8       /* WPA RSC key len */
+
+#define WEP1_KEY_SIZE		5	/* max size of any WEP key */
+#define WEP1_KEY_HEX_SIZE	10	/* size of WEP key in hex. */
+#define WEP128_KEY_SIZE		13	/* max size of any WEP key */
+#define WEP128_KEY_HEX_SIZE	26	/* size of WEP key in hex. */
+#define TKIP_MIC_SIZE		8	/* size of TKIP MIC */
+#define TKIP_EOM_SIZE		7	/* max size of TKIP EOM */
+#define TKIP_EOM_FLAG		0x5a	/* TKIP EOM flag byte */
+#define TKIP_KEY_SIZE		32	/* size of any TKIP key, includs MIC keys */
+#define TKIP_TK_SIZE		16
+#define TKIP_MIC_KEY_SIZE	8
+#define TKIP_MIC_AUTH_TX	16	/* offset to Authenticator MIC TX key */
+#define TKIP_MIC_AUTH_RX	24	/* offset to Authenticator MIC RX key */
+#define TKIP_MIC_SUP_RX		TKIP_MIC_AUTH_TX	/* offset to Supplicant MIC RX key */
+#define TKIP_MIC_SUP_TX		TKIP_MIC_AUTH_RX	/* offset to Supplicant MIC TX key */
+#define AES_KEY_SIZE		16	/* size of AES key */
+#define AES_MIC_SIZE		8	/* size of AES MIC */
+#define BIP_KEY_SIZE		16	/* size of BIP key */
+#define BIP_MIC_SIZE		8   /* sizeof BIP MIC */
+
+#define AES_GCM_MIC_SIZE	16	/* size of MIC for 128-bit GCM - .11adD9 */
+
+#define AES256_KEY_SIZE		32	/* size of AES 256 key - .11acD5 */
+#define AES256_MIC_SIZE		16	/* size of MIC for 256 bit keys, incl BIP */
+
+/* WCN */
+#define WCN_OUI			"\x00\x50\xf2"	/* WCN OUI */
+#define WCN_TYPE		4	/* WCN type */
+
+
+/* 802.11r protocol definitions */
+
+/** Mobility Domain IE */
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mdid;		/* Mobility Domain Id */
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS	0x01	/* Fast Bss transition over the DS support */
+#define FBT_MDID_CAP_RRP	0x02	/* Resource request protocol support */
+
+/** Fast Bss Transition IE */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mic_control;		/* Mic Control */
+	uint8 mic[16];
+	uint8 anonce[32];
+	uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+#define TIE_TYPE_RESERVED		0
+#define TIE_TYPE_REASSOC_DEADLINE	1
+#define TIE_TYPE_KEY_LIEFTIME		2
+#define TIE_TYPE_ASSOC_COMEBACK		3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+	uint8 id;
+	uint8 len;
+	uint8 type;		/* timeout interval type */
+	uint32 value;		/* timeout interval value */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+/** GTK ie */
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+	uint8 id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_len;
+	uint8 rsc[8];
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+/** Management MIC ie */
+BWL_PRE_PACKED_STRUCT struct mmic_ie {
+	uint8   id;					/* IE ID: DOT11_MNG_MMIE_ID */
+	uint8   len;				/* IE length */
+	uint16  key_id;				/* key id */
+	uint8   ipn[6];				/* ipn */
+	uint8   mic[16];			/* mic */
+} BWL_POST_PACKED_STRUCT;
+typedef struct mmic_ie mmic_ie_t;
+
+#define BSSID_INVALID           "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST         "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+
+/* ************* WMM Parameter definitions. ************* */
+#define WMM_OUI			"\x00\x50\xF2"	/* WNN OUI */
+#define WMM_OUI_LEN		3		/* WMM OUI length */
+#define WMM_OUI_TYPE	2		/* WMM OUT type */
+#define WMM_VERSION		1
+#define WMM_VERSION_LEN	1
+
+/* WMM OUI subtype */
+#define WMM_OUI_SUBTYPE_PARAMETER	1
+#define WMM_PARAMETER_IE_LEN		24
+
+/** Link Identifier Element */
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr	bssid;
+	struct ether_addr	tdls_init_mac;
+	struct ether_addr	tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN		18
+
+/** Link Wakeup Schedule Element */
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+	uint8 id;
+	uint8 len;
+	uint32 offset;			/* in ms between TSF0 and start of 1st Awake Window */
+	uint32 interval;		/* in ms bwtween the start of 2 Awake Windows */
+	uint32 awake_win_slots;	/* in backof slots, duration of Awake Window */
+	uint32 max_wake_win;	/* in ms, max duration of Awake Window */
+	uint16 idle_cnt;		/* number of consecutive Awake Windows */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN		18
+
+/** Channel Switch Timing Element */
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+	uint8 id;
+	uint8 len;
+	uint16 switch_time;		/* in ms, time to switch channels */
+	uint16 switch_timeout;	/* in ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN		4
+
+/** PTI Control Element */
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+	uint8 id;
+	uint8 len;
+	uint8 tid;
+	uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN		3
+
+/** PU Buffer Status Element */
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+	uint8 id;
+	uint8 len;
+	uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN	1
+#define TDLS_PU_BUFFER_STATUS_AC_BK		1
+#define TDLS_PU_BUFFER_STATUS_AC_BE		2
+#define TDLS_PU_BUFFER_STATUS_AC_VI		4
+#define TDLS_PU_BUFFER_STATUS_AC_VO		8
+
+/* TDLS Action Field Values */
+#define TDLS_SETUP_REQ				0
+#define TDLS_SETUP_RESP				1
+#define TDLS_SETUP_CONFIRM			2
+#define TDLS_TEARDOWN				3
+#define TDLS_PEER_TRAFFIC_IND			4
+#define TDLS_CHANNEL_SWITCH_REQ			5
+#define TDLS_CHANNEL_SWITCH_RESP		6
+#define TDLS_PEER_PSM_REQ			7
+#define TDLS_PEER_PSM_RESP			8
+#define TDLS_PEER_TRAFFIC_RESP			9
+#define TDLS_DISCOVERY_REQ			10
+
+/* 802.11z TDLS Public Action Frame action field */
+#define TDLS_DISCOVERY_RESP			14
+
+/* 802.11u GAS action frames */
+#define GAS_REQUEST_ACTION_FRAME				10
+#define GAS_RESPONSE_ACTION_FRAME				11
+#define GAS_COMEBACK_REQUEST_ACTION_FRAME		12
+#define GAS_COMEBACK_RESPONSE_ACTION_FRAME		13
+
+/* 802.11u interworking access network options */
+#define IW_ANT_MASK				0x0f
+#define IW_INTERNET_MASK		0x10
+#define IW_ASRA_MASK			0x20
+#define IW_ESR_MASK				0x40
+#define IW_UESA_MASK			0x80
+
+/* 802.11u interworking access network type */
+#define IW_ANT_PRIVATE_NETWORK					0
+#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST		1
+#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK		2
+#define IW_ANT_FREE_PUBLIC_NETWORK				3
+#define IW_ANT_PERSONAL_DEVICE_NETWORK			4
+#define IW_ANT_EMERGENCY_SERVICES_NETWORK		5
+#define IW_ANT_TEST_NETWORK						14
+#define IW_ANT_WILDCARD_NETWORK					15
+
+/* 802.11u advertisement protocol */
+#define ADVP_ANQP_PROTOCOL_ID	0
+
+/* 802.11u advertisement protocol masks */
+#define ADVP_QRL_MASK					0x7f
+#define ADVP_PAME_BI_MASK				0x80
+
+/* 802.11u advertisement protocol values */
+#define ADVP_QRL_REQUEST				0x00
+#define ADVP_QRL_RESPONSE				0x7f
+#define ADVP_PAME_BI_DEPENDENT			0x00
+#define ADVP_PAME_BI_INDEPENDENT		ADVP_PAME_BI_MASK
+
+/* 802.11u ANQP information ID */
+#define ANQP_ID_QUERY_LIST							256
+#define ANQP_ID_CAPABILITY_LIST						257
+#define ANQP_ID_VENUE_NAME_INFO						258
+#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO			259
+#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO	260
+#define ANQP_ID_ROAMING_CONSORTIUM_LIST				261
+#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO	262
+#define ANQP_ID_NAI_REALM_LIST						263
+#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO			264
+#define ANQP_ID_AP_GEOSPATIAL_LOCATION				265
+#define ANQP_ID_AP_CIVIC_LOCATION					266
+#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI			267
+#define ANQP_ID_DOMAIN_NAME_LIST					268
+#define ANQP_ID_EMERGENCY_ALERT_ID_URI				269
+#define ANQP_ID_EMERGENCY_NAI						271
+#define ANQP_ID_VENDOR_SPECIFIC_LIST				56797
+
+/* 802.11u ANQP OUI */
+#define ANQP_OUI_SUBTYPE	9
+
+/* 802.11u venue name */
+#define VENUE_LANGUAGE_CODE_SIZE		3
+#define VENUE_NAME_SIZE					255
+
+/* 802.11u venue groups */
+#define VENUE_UNSPECIFIED				0
+#define VENUE_ASSEMBLY					1
+#define VENUE_BUSINESS					2
+#define VENUE_EDUCATIONAL				3
+#define VENUE_FACTORY					4
+#define VENUE_INSTITUTIONAL				5
+#define VENUE_MERCANTILE				6
+#define VENUE_RESIDENTIAL				7
+#define VENUE_STORAGE					8
+#define VENUE_UTILITY					9
+#define VENUE_VEHICULAR					10
+#define VENUE_OUTDOOR					11
+
+/* 802.11u network authentication type indicator */
+#define NATI_UNSPECIFIED							-1
+#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS			0
+#define NATI_ONLINE_ENROLLMENT_SUPPORTED			1
+#define NATI_HTTP_HTTPS_REDIRECTION					2
+#define NATI_DNS_REDIRECTION						3
+
+/* 802.11u IP address type availability - IPv6 */
+#define IPA_IPV6_SHIFT						0
+#define IPA_IPV6_MASK						(0x03 << IPA_IPV6_SHIFT)
+#define	IPA_IPV6_NOT_AVAILABLE				0x00
+#define IPA_IPV6_AVAILABLE					0x01
+#define IPA_IPV6_UNKNOWN_AVAILABILITY		0x02
+
+/* 802.11u IP address type availability - IPv4 */
+#define IPA_IPV4_SHIFT						2
+#define IPA_IPV4_MASK						(0x3f << IPA_IPV4_SHIFT)
+#define	IPA_IPV4_NOT_AVAILABLE				0x00
+#define IPA_IPV4_PUBLIC						0x01
+#define IPA_IPV4_PORT_RESTRICT				0x02
+#define IPA_IPV4_SINGLE_NAT					0x03
+#define IPA_IPV4_DOUBLE_NAT					0x04
+#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT	0x05
+#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT	0x06
+#define IPA_IPV4_UNKNOWN_AVAILABILITY		0x07
+
+/* 802.11u NAI realm encoding */
+#define REALM_ENCODING_RFC4282	0
+#define REALM_ENCODING_UTF8		1
+
+/* 802.11u IANA EAP method type numbers */
+#define REALM_EAP_TLS					13
+#define REALM_EAP_LEAP					17
+#define REALM_EAP_SIM					18
+#define REALM_EAP_TTLS					21
+#define REALM_EAP_AKA					23
+#define REALM_EAP_PEAP					25
+#define REALM_EAP_FAST					43
+#define REALM_EAP_PSK					47
+#define REALM_EAP_AKAP					50
+#define REALM_EAP_EXPANDED				254
+
+/* 802.11u authentication ID */
+#define REALM_EXPANDED_EAP						1
+#define REALM_NON_EAP_INNER_AUTHENTICATION		2
+#define REALM_INNER_AUTHENTICATION_EAP			3
+#define REALM_EXPANDED_INNER_EAP				4
+#define REALM_CREDENTIAL						5
+#define REALM_TUNNELED_EAP_CREDENTIAL			6
+#define REALM_VENDOR_SPECIFIC_EAP				221
+
+/* 802.11u non-EAP inner authentication type */
+#define REALM_RESERVED_AUTH			0
+#define REALM_PAP					1
+#define REALM_CHAP					2
+#define REALM_MSCHAP				3
+#define REALM_MSCHAPV2				4
+
+/* 802.11u credential type */
+#define REALM_SIM					1
+#define REALM_USIM					2
+#define REALM_NFC					3
+#define REALM_HARDWARE_TOKEN		4
+#define REALM_SOFTOKEN				5
+#define REALM_CERTIFICATE			6
+#define REALM_USERNAME_PASSWORD		7
+#define REALM_SERVER_SIDE			8
+#define REALM_RESERVED_CRED			9
+#define REALM_VENDOR_SPECIFIC_CRED	10
+
+/* 802.11u 3GPP PLMN */
+#define G3PP_GUD_VERSION		0
+#define G3PP_PLMN_LIST_IE		0
+
+/** hotspot2.0 indication element (vendor specific) */
+BWL_PRE_PACKED_STRUCT struct hs20_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 config;
+} BWL_POST_PACKED_STRUCT;
+typedef struct hs20_ie hs20_ie_t;
+#define HS20_IE_LEN 5	/* HS20 IE length */
+
+/** IEEE 802.11 Annex E */
+typedef enum {
+	DOT11_2GHZ_20MHZ_CLASS_12		= 81,	/* Ch 1-11			 */
+	DOT11_5GHZ_20MHZ_CLASS_1		= 115,	/* Ch 36-48			 */
+	DOT11_5GHZ_20MHZ_CLASS_2_DFS	= 118,	/* Ch 52-64			 */
+	DOT11_5GHZ_20MHZ_CLASS_3		= 124,	/* Ch 149-161		 */
+	DOT11_5GHZ_20MHZ_CLASS_4_DFS	= 121,	/* Ch 100-140		 */
+	DOT11_5GHZ_20MHZ_CLASS_5		= 125,	/* Ch 149-165		 */
+	DOT11_5GHZ_40MHZ_CLASS_22		= 116,	/* Ch 36-44,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_23_DFS 	= 119,	/* Ch 52-60,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_24_DFS	= 122,	/* Ch 100-132, lower */
+	DOT11_5GHZ_40MHZ_CLASS_25		= 126,	/* Ch 149-157, lower */
+	DOT11_5GHZ_40MHZ_CLASS_27		= 117,	/* Ch 40-48,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_28_DFS	= 120,	/* Ch 56-64,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_29_DFS	= 123,	/* Ch 104-136, upper */
+	DOT11_5GHZ_40MHZ_CLASS_30		= 127,	/* Ch 153-161, upper */
+	DOT11_2GHZ_40MHZ_CLASS_32		= 83,	/* Ch 1-7,     lower */
+	DOT11_2GHZ_40MHZ_CLASS_33		= 84,	/* Ch 5-11,    upper */
+} dot11_op_class_t;
+
+/* QoS map */
+#define QOS_MAP_FIXED_LENGTH	(8 * 2)	/* DSCP ranges fixed with 8 entries */
+
+/* BCM proprietary IE type for AIBSS */
+#define BCM_AIBSS_IE_TYPE 56
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
new file mode 100644
index 0000000..19d898b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
@@ -0,0 +1,45 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11_bta.h 382882 2013-02-04 23:24:31Z $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT		"\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP				1
+#define	BTA_PROT_ACTIVITY_REPORT		2
+#define BTA_PROT_SECURITY			3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST	4
+#define BTA_PROT_LINK_SUPERVISION_REPLY		5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS			1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS		2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS		3
+#define BTA_TYPE_ID_CAPABILITIES		4
+#define BTA_TYPE_ID_VERSION			5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
new file mode 100644
index 0000000..f990f21
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
@@ -0,0 +1,132 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN           2           /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF          2           /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET	0		/* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET		1		/* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET		2		/* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET		3		/* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+	uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+	uint8 oui[DOT11_OUI_LEN];	/* WME_OUI */
+	uint8 type;					/* WME_TYPE */
+	uint8 subtype;				/* WME_SUBTYPE_TSPEC */
+	uint8 version;				/* WME_VERSION */
+	tsinfo_t tsinfo;			/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;	/* Minimum Service Interval (us) */
+	uint32 max_srv_interval;	/* Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/* Inactivity Interval (us) */
+	uint32 suspension_interval; /* Suspension Interval (us) */
+	uint32 srv_start_time;		/* Service Start Time (us) */
+	uint32 min_data_rate;		/* Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/* Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/* Peak Data Rate (bps) */
+	uint32 max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint32 delay_bound;			/* Delay Bound (us) */
+	uint32 min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;			/* Surplus Bandwidth Allowance (range 1.0-8.0) */
+	uint16 medium_time;			/* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN	(sizeof(tspec_t))		/* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT		1	/* TS info. TID shift */
+#define TS_INFO_TID_MASK		(0xf << TS_INFO_TID_SHIFT)	/* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT	7	/* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK	(0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT	5	/* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK	(0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT		2		/* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK		(1 << TS_INFO_PSB_SHIFT)	/* TS info. PSB mask */
+#define TS_INFO_UPLINK			(0 << TS_INFO_DIRECTION_SHIFT)	/* TS info. uplink */
+#define TS_INFO_DOWNLINK		(1 << TS_INFO_DIRECTION_SHIFT)	/* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL	(3 << TS_INFO_DIRECTION_SHIFT)	/* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT	3	/* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK	(0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt)	((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt)	((((pt).octets[0]) & \
+	TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt)	((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt)	((((pt).octets[1]) & \
+	TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id)	((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+	((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio)	((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+	((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN		5	/* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF		3	/* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT		1000	/* default ADDTS response timeout in ms */
+						/* DEFVAL dot11ADDTSResponseTimeout = 1s */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED	0	/* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM	1	/* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW	3	/* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE	47	/* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS		36	/* STA leave QBSS */
+#define DOT11E_STATUS_END_TS				37	/* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS			38	/* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT		39	/* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
new file mode 100644
index 0000000..17a5430
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h 382882 2013-02-04 23:24:31Z $
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+/* 802.1D priority defines */
+#define	PRIO_8021D_NONE		2	/* None = - */
+#define	PRIO_8021D_BK		1	/* BK - Background */
+#define	PRIO_8021D_BE		0	/* BE - Best-effort */
+#define	PRIO_8021D_EE		3	/* EE - Excellent-effort */
+#define	PRIO_8021D_CL		4	/* CL - Controlled Load */
+#define	PRIO_8021D_VI		5	/* Vi - Video */
+#define	PRIO_8021D_VO		6	/* Vo - Voice */
+#define	PRIO_8021D_NC		7	/* NC - Network Control */
+#define	MAXPRIO			7	/* 0-7 */
+#define NUMPRIO			(MAXPRIO + 1)
+
+#define ALLPRIO		-1	/* All prioirty */
+
+/* Converts prio to precedence since the numerical value of
+ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped.
+ */
+#define PRIO2PREC(prio) \
+	(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif /* _802_1_D__ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.3.h b/drivers/net/wireless/bcmdhd/include/proto/802.3.h
new file mode 100644
index 0000000..841e6da
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.3.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to 802.3
+ *
+ * $Id: 802.3.h 417943 2013-08-13 07:54:04Z $
+ */
+
+#ifndef _802_3_h_
+#define _802_3_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define SNAP_HDR_LEN	6	/* 802.3 SNAP header length */
+#define DOT3_OUI_LEN	3	/* 802.3 oui length */
+
+BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT3_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _802_3_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h
new file mode 100644
index 0000000..5a7695e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * Fundamental constants relating to DHCP Protocol
+ *
+ * $Id: bcmdhcp.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _bcmdhcp_h_
+#define _bcmdhcp_h_
+
+/* DHCP params */
+#define DHCP_TYPE_OFFSET	0	/* DHCP type (request|reply) offset */
+#define DHCP_TID_OFFSET		4	/* DHCP transition id offset */
+#define DHCP_FLAGS_OFFSET	10	/* DHCP flags offset */
+#define DHCP_CIADDR_OFFSET	12	/* DHCP client IP address offset */
+#define DHCP_YIADDR_OFFSET	16	/* DHCP your IP address offset */
+#define DHCP_GIADDR_OFFSET	24	/* DHCP relay agent IP address offset */
+#define DHCP_CHADDR_OFFSET	28	/* DHCP client h/w address offset */
+#define DHCP_OPT_OFFSET		236	/* DHCP options offset */
+
+#define DHCP_OPT_MSGTYPE	53	/* DHCP message type */
+#define DHCP_OPT_MSGTYPE_REQ	3
+#define DHCP_OPT_MSGTYPE_ACK	5	/* DHCP message type - ACK */
+
+#define DHCP_OPT_CODE_OFFSET	0	/* Option identifier */
+#define DHCP_OPT_LEN_OFFSET	1	/* Option data length */
+#define DHCP_OPT_DATA_OFFSET	2	/* Option data */
+
+#define DHCP_OPT_CODE_CLIENTID	61	/* Option identifier */
+
+#define DHCP_TYPE_REQUEST	1	/* DHCP request (discover|request) */
+#define DHCP_TYPE_REPLY		2	/* DHCP reply (offset|ack) */
+
+#define DHCP_PORT_SERVER	67	/* DHCP server UDP port */
+#define DHCP_PORT_CLIENT	68	/* DHCP client UDP port */
+
+#define DHCP_FLAG_BCAST	0x8000	/* DHCP broadcast flag */
+
+#define DHCP_FLAGS_LEN	2	/* DHCP flags field length */
+
+#define DHCP6_TYPE_SOLICIT	1	/* DHCP6 solicit */
+#define DHCP6_TYPE_ADVERTISE	2	/* DHCP6 advertise */
+#define DHCP6_TYPE_REQUEST	3	/* DHCP6 request */
+#define DHCP6_TYPE_CONFIRM	4	/* DHCP6 confirm */
+#define DHCP6_TYPE_RENEW	5	/* DHCP6 renew */
+#define DHCP6_TYPE_REBIND	6	/* DHCP6 rebind */
+#define DHCP6_TYPE_REPLY	7	/* DHCP6 reply */
+#define DHCP6_TYPE_RELEASE	8	/* DHCP6 release */
+#define DHCP6_TYPE_DECLINE	9	/* DHCP6 decline */
+#define DHCP6_TYPE_RECONFIGURE	10	/* DHCP6 reconfigure */
+#define DHCP6_TYPE_INFOREQ	11	/* DHCP6 information request */
+#define DHCP6_TYPE_RELAYFWD	12	/* DHCP6 relay forward */
+#define DHCP6_TYPE_RELAYREPLY	13	/* DHCP6 relay reply */
+
+#define DHCP6_TYPE_OFFSET	0	/* DHCP6 type offset */
+
+#define	DHCP6_MSG_OPT_OFFSET	4	/* Offset of options in client server messages */
+#define	DHCP6_RELAY_OPT_OFFSET	34	/* Offset of options in relay messages */
+
+#define	DHCP6_OPT_CODE_OFFSET	0	/* Option identifier */
+#define	DHCP6_OPT_LEN_OFFSET	2	/* Option data length */
+#define	DHCP6_OPT_DATA_OFFSET	4	/* Option data */
+
+#define	DHCP6_OPT_CODE_CLIENTID	1	/* DHCP6 CLIENTID option */
+#define	DHCP6_OPT_CODE_SERVERID	2	/* DHCP6 SERVERID option */
+
+#define DHCP6_PORT_SERVER	547	/* DHCP6 server UDP port */
+#define DHCP6_PORT_CLIENT	546	/* DHCP6 client UDP port */
+
+#endif	/* #ifndef _bcmdhcp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
new file mode 100644
index 0000000..ac33978
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
@@ -0,0 +1,112 @@
+/*
+ * Broadcom Ethernettype  protocol definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h 445746 2013-12-30 12:57:26Z $
+ */
+
+/*
+ * Broadcom Ethernet protocol defines
+ */
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* ETHER_TYPE_BRCM is defined in ethernet.h */
+
+/*
+ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field
+ * in one of two formats: (only subtypes 32768-65535 are in use now)
+ *
+ * subtypes 0-32767:
+ *     8 bit subtype (0-127)
+ *     8 bit length in bytes (0-255)
+ *
+ * subtypes 32768-65535:
+ *     16 bit big-endian subtype
+ *     16 bit big-endian length in bytes (0-65535)
+ *
+ * length is the number of additional bytes beyond the 4 or 6 byte header
+ *
+ * Reserved values:
+ * 0 reserved
+ * 5-15 reserved for iLine protocol assignments
+ * 17-126 reserved, assignable
+ * 127 reserved
+ * 32768 reserved
+ * 32769-65534 reserved, assignable
+ * 65535 reserved
+ */
+
+/*
+ * While adding the subtypes and their specific processing code make sure
+ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition
+ */
+
+#define	BCMILCP_SUBTYPE_RATE		1
+#define	BCMILCP_SUBTYPE_LINK		2
+#define	BCMILCP_SUBTYPE_CSA		3
+#define	BCMILCP_SUBTYPE_LARQ		4
+#define BCMILCP_SUBTYPE_VENDOR		5
+#define	BCMILCP_SUBTYPE_FLH		17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG	32769
+#define BCMILCP_SUBTYPE_CERT		32770
+#define BCMILCP_SUBTYPE_SES		32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED		0
+#define BCMILCP_BCM_SUBTYPE_EVENT		1
+#define BCMILCP_BCM_SUBTYPE_SES			2
+/*
+ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded
+ * within BCMILCP_BCM_SUBTYPE_EVENT type messages
+ */
+/* #define BCMILCP_BCM_SUBTYPE_EAPOL		3 */
+#define BCMILCP_BCM_SUBTYPE_DPT                 4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH	8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION		0
+
+/* These fields are stored in network order */
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+	uint16	subtype;	/* Vendor specific..32769 */
+	uint16	length;
+	uint8	version;	/* Version is 0 */
+	uint8	oui[3];		/* Broadcom OUI */
+	/* user specific Data */
+	uint16	usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/*  _BCMETH_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
new file mode 100644
index 0000000..51006b5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
@@ -0,0 +1,492 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h 474305 2014-04-30 20:54:29Z $
+ *
+ */
+
+/*
+ * Broadcom Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
+#include <proto/bcmeth.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION		2	/* wl_event_msg_t struct version */
+#define BCM_MSG_IFNAME_MAX		16	/* max length of interface name */
+
+/* flags */
+#define WLC_EVENT_MSG_LINK		0x01	/* link is up */
+#define WLC_EVENT_MSG_FLUSHTXQ		0x02	/* flush tx queue on MIC error */
+#define WLC_EVENT_MSG_GROUP		0x04	/* group MIC error */
+#define WLC_EVENT_MSG_UNKBSS		0x08	/* unknown source bsscfg */
+#define WLC_EVENT_MSG_UNKIF		0x10	/* unknown source OS i/f */
+
+/* these fields are stored in network order */
+
+/* version 1 */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+/* the current version */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+	uint8	ifidx;			/* destination OS i/f index */
+	uint8	bsscfgidx;		/* source bsscfg index */
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+/* used by driver msgs */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+	struct ether_header eth;
+	bcmeth_hdr_t		bcm_hdr;
+	wl_event_msg_t		event;
+	/* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN	(sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+/* Event messages */
+#define WLC_E_SET_SSID		0	/* indicates status of set SSID */
+#define WLC_E_JOIN		1	/* differentiates join IBSS from found (WLC_E_START) IBSS */
+#define WLC_E_START		2	/* STA founded an IBSS or AP started a BSS */
+#define WLC_E_AUTH		3	/* 802.11 AUTH request */
+#define WLC_E_AUTH_IND		4	/* 802.11 AUTH indication */
+#define WLC_E_DEAUTH		5	/* 802.11 DEAUTH request */
+#define WLC_E_DEAUTH_IND	6	/* 802.11 DEAUTH indication */
+#define WLC_E_ASSOC		7	/* 802.11 ASSOC request */
+#define WLC_E_ASSOC_IND		8	/* 802.11 ASSOC indication */
+#define WLC_E_REASSOC		9	/* 802.11 REASSOC request */
+#define WLC_E_REASSOC_IND	10	/* 802.11 REASSOC indication */
+#define WLC_E_DISASSOC		11	/* 802.11 DISASSOC request */
+#define WLC_E_DISASSOC_IND	12	/* 802.11 DISASSOC indication */
+#define WLC_E_QUIET_START	13	/* 802.11h Quiet period started */
+#define WLC_E_QUIET_END		14	/* 802.11h Quiet period ended */
+#define WLC_E_BEACON_RX		15	/* BEACONS received/lost indication */
+#define WLC_E_LINK		16	/* generic link indication */
+#define WLC_E_MIC_ERROR		17	/* TKIP MIC error occurred */
+#define WLC_E_NDIS_LINK		18	/* NDIS style link indication */
+#define WLC_E_ROAM		19	/* roam attempt occurred: indicate status & reason */
+#define WLC_E_TXFAIL		20	/* change in dot11FailedCount (txfail) */
+#define WLC_E_PMKID_CACHE	21	/* WPA2 pmkid cache indication */
+#define WLC_E_RETROGRADE_TSF	22	/* current AP's TSF value went backward */
+#define WLC_E_PRUNE		23	/* AP was pruned from join list for reason */
+#define WLC_E_AUTOAUTH		24	/* report AutoAuth table entry match for join attempt */
+#define WLC_E_EAPOL_MSG		25	/* Event encapsulating an EAPOL message */
+#define WLC_E_SCAN_COMPLETE	26	/* Scan results are ready or scan was aborted */
+#define WLC_E_ADDTS_IND		27	/* indicate to host addts fail/success */
+#define WLC_E_DELTS_IND		28	/* indicate to host delts fail/success */
+#define WLC_E_BCNSENT_IND	29	/* indicate to host of beacon transmit */
+#define WLC_E_BCNRX_MSG		30	/* Send the received beacon up to the host */
+#define WLC_E_BCNLOST_MSG	31	/* indicate to host loss of beacon */
+#define WLC_E_ROAM_PREP		32	/* before attempting to roam */
+#define WLC_E_PFN_NET_FOUND	33	/* PFN network found event */
+#define WLC_E_PFN_NET_LOST	34	/* PFN network lost event */
+#define WLC_E_RESET_COMPLETE	35
+#define WLC_E_JOIN_START	36
+#define WLC_E_ROAM_START	37
+#define WLC_E_ASSOC_START	38
+#define WLC_E_IBSS_ASSOC	39
+#define WLC_E_RADIO		40
+#define WLC_E_PSM_WATCHDOG	41	/* PSM microcode watchdog fired */
+#define WLC_E_PROBREQ_MSG       44      /* probe request received */
+#define WLC_E_SCAN_CONFIRM_IND  45
+#define WLC_E_PSK_SUP		46	/* WPA Handshake fail */
+#define WLC_E_COUNTRY_CODE_CHANGED	47
+#define	WLC_E_EXCEEDED_MEDIUM_TIME	48	/* WMMAC excedded medium time */
+#define WLC_E_ICV_ERROR		49	/* WEP ICV error occurred */
+#define WLC_E_UNICAST_DECODE_ERROR	50	/* Unsupported unicast encrypted frame */
+#define WLC_E_MULTICAST_DECODE_ERROR	51	/* Unsupported multicast encrypted frame */
+#define WLC_E_TRACE		52
+#define WLC_E_IF		54	/* I/F change (for dongle host notification) */
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE	55	/* listen state expires */
+#define WLC_E_RSSI		56	/* indicate RSSI change based on configured levels */
+#define WLC_E_PFN_BEST_BATCHING	57 /* PFN best network batching event */
+#define WLC_E_EXTLOG_MSG	58
+#define WLC_E_ACTION_FRAME      59	/* Action frame Rx */
+#define WLC_E_ACTION_FRAME_COMPLETE	60	/* Action frame Tx complete */
+#define WLC_E_PRE_ASSOC_IND	61	/* assoc request received */
+#define WLC_E_PRE_REASSOC_IND	62	/* re-assoc request received */
+#define WLC_E_CHANNEL_ADOPTED	63
+#define WLC_E_AP_STARTED	64	/* AP started */
+#define WLC_E_DFS_AP_STOP	65	/* AP stopped due to DFS */
+#define WLC_E_DFS_AP_RESUME	66	/* AP resumed due to DFS */
+#define WLC_E_WAI_STA_EVENT	67	/* WAI stations event */
+#define WLC_E_WAI_MSG 		68	/* event encapsulating an WAI message */
+#define WLC_E_ESCAN_RESULT 	69	/* escan result event */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 	70	/* action frame off channel complete */
+#define WLC_E_PROBRESP_MSG	71	/* probe response received */
+#define WLC_E_P2P_PROBREQ_MSG	72	/* P2P Probe request received */
+#define WLC_E_DCS_REQUEST	73
+#define WLC_E_FIFO_CREDIT_MAP	74	/* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */
+#define WLC_E_ACTION_FRAME_RX	75	/* Received action frame event WITH
+					 * wl_event_rx_frame_data_t header
+					 */
+#define WLC_E_WAKE_EVENT	76	/* Wake Event timer fired, used for wake WLAN test mode */
+#define WLC_E_RM_COMPLETE	77	/* Radio measurement complete */
+#define WLC_E_HTSFSYNC		78	/* Synchronize TSF with the host */
+#define WLC_E_OVERLAY_REQ	79	/* request an overlay IOCTL/iovar from the host */
+#define WLC_E_CSA_COMPLETE_IND		80	/* 802.11 CHANNEL SWITCH ACTION completed */
+#define WLC_E_EXCESS_PM_WAKE_EVENT	81	/* excess PM Wake Event to inform host  */
+#define WLC_E_PFN_SCAN_NONE		82	/* no PFN networks around */
+/* PFN BSSID network found event, conflict/share with  WLC_E_PFN_SCAN_NONE */
+#define WLC_E_PFN_BSSID_NET_FOUND	82
+#define WLC_E_PFN_SCAN_ALLGONE		83	/* last found PFN network gets lost */
+/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */
+#define WLC_E_PFN_BSSID_NET_LOST	83
+#define WLC_E_GTK_PLUMBED		84
+#define WLC_E_ASSOC_IND_NDIS		85	/* 802.11 ASSOC indication for NDIS only */
+#define WLC_E_REASSOC_IND_NDIS		86	/* 802.11 REASSOC indication for NDIS only */
+#define WLC_E_ASSOC_REQ_IE		87
+#define WLC_E_ASSOC_RESP_IE		88
+#define WLC_E_ASSOC_RECREATED		89	/* association recreated on resume */
+#define WLC_E_ACTION_FRAME_RX_NDIS	90	/* rx action frame event for NDIS only */
+#define WLC_E_AUTH_REQ			91	/* authentication request received */
+#define WLC_E_TDLS_PEER_EVENT		92	/* discovered peer, connected/disconnected peer */
+#define WLC_E_SPEEDY_RECREATE_FAIL	93	/* fast assoc recreation failed */
+#define WLC_E_NATIVE			94	/* port-specific event and payload (e.g. NDIS) */
+#define WLC_E_PKTDELAY_IND		95	/* event for tx pkt delay suddently jump */
+#define WLC_E_PSTA_PRIMARY_INTF_IND	99	/* psta primary interface indication */
+#define WLC_E_NAN			100     /* NAN event */
+#define WLC_E_BEACON_FRAME_RX		101
+#define WLC_E_SERVICE_FOUND		102	/* desired service found */
+#define WLC_E_GAS_FRAGMENT_RX		103	/* GAS fragment received */
+#define WLC_E_GAS_COMPLETE		104	/* GAS sessions all complete */
+#define WLC_E_P2PO_ADD_DEVICE		105	/* New device found by p2p offload */
+#define WLC_E_P2PO_DEL_DEVICE		106	/* device has been removed by p2p offload */
+#define WLC_E_WNM_STA_SLEEP		107	/* WNM event to notify STA enter sleep mode */
+#define WLC_E_TXFAIL_THRESH		108	/* Indication of MAC tx failures (exhaustion of
+						 * 802.11 retries) exceeding threshold(s)
+						 */
+#define WLC_E_PROXD			109	/* Proximity Detection event */
+#define WLC_E_IBSS_COALESCE		110	/* IBSS Coalescing */
+#define WLC_E_AIBSS_TXFAIL		110	/* TXFAIL event for AIBSS, re using event 110 */
+#define WLC_E_BSS_LOAD			114	/* Inform host of beacon bss load */
+#define WLC_E_CSA_START_IND		121
+#define WLC_E_CSA_DONE_IND		122
+#define WLC_E_CSA_FAILURE_IND		123
+#define WLC_E_CCA_CHAN_QUAL		124	/* CCA based channel quality report */
+#define WLC_E_BSSID		125	/* to report change in BSSID while roaming */
+#define WLC_E_TX_STAT_ERROR		126	/* tx error indication */
+#define WLC_E_BCMC_CREDIT_SUPPORT	127	/* credit check for BCMC supported */
+#define WLC_E_BT_WIFI_HANDOVER_REQ	130	/* Handover Request Initiated */
+#define WLC_E_SPW_TXINHIBIT		131     /* Southpaw TxInhibit notification */
+#define WLC_E_FBT_AUTH_REQ_IND		132	/* FBT Authentication Request Indication */
+#define WLC_E_RSSI_LQM			133	/* Enhancement addition for WLC_E_RSSI */
+#define WLC_E_PFN_GSCAN_FULL_RESULT		134 /* Full probe/beacon (IEs etc) results */
+#define WLC_E_PFN_SWC		135 /* Significant change in rssi of bssids being tracked */
+#define WLC_E_PFN_SCAN_COMPLETE		138	/* PFN completed scan of network list */
+#define WLC_E_RMC_EVENT			139	/* RMC event */
+#define WLC_E_LAST			140	/* highest val + 1 for range checking */
+
+/* define an API for getting the string name of an event */
+extern const char *bcmevent_get_name(uint event_type);
+
+
+
+/* Event status codes */
+#define WLC_E_STATUS_SUCCESS		0	/* operation was successful */
+#define WLC_E_STATUS_FAIL		1	/* operation failed */
+#define WLC_E_STATUS_TIMEOUT		2	/* operation timed out */
+#define WLC_E_STATUS_NO_NETWORKS	3	/* failed due to no matching network found */
+#define WLC_E_STATUS_ABORT		4	/* operation was aborted */
+#define WLC_E_STATUS_NO_ACK		5	/* protocol failure: packet not ack'd */
+#define WLC_E_STATUS_UNSOLICITED	6	/* AUTH or ASSOC packet was unsolicited */
+#define WLC_E_STATUS_ATTEMPT		7	/* attempt to assoc to an auto auth configuration */
+#define WLC_E_STATUS_PARTIAL		8	/* scan results are incomplete */
+#define WLC_E_STATUS_NEWSCAN		9	/* scan aborted by another scan */
+#define WLC_E_STATUS_NEWASSOC		10	/* scan aborted due to assoc in progress */
+#define WLC_E_STATUS_11HQUIET		11	/* 802.11h quiet period started */
+#define WLC_E_STATUS_SUPPRESS		12	/* user disabled scanning (WLC_SET_SCANSUPPRESS) */
+#define WLC_E_STATUS_NOCHANS		13	/* no allowable channels to scan */
+#define WLC_E_STATUS_CS_ABORT		15	/* abort channel select */
+#define WLC_E_STATUS_ERROR		16	/* request failed due to error */
+#define WLC_E_STATUS_INVALID 0xff  /* Invalid status code to init variables. */
+
+
+/* roam reason codes */
+#define WLC_E_REASON_INITIAL_ASSOC	0	/* initial assoc */
+#define WLC_E_REASON_LOW_RSSI		1	/* roamed due to low RSSI */
+#define WLC_E_REASON_DEAUTH		2	/* roamed due to DEAUTH indication */
+#define WLC_E_REASON_DISASSOC		3	/* roamed due to DISASSOC indication */
+#define WLC_E_REASON_BCNS_LOST		4	/* roamed due to lost beacons */
+
+/* Roam codes used primarily by CCX */
+#define WLC_E_REASON_FAST_ROAM_FAILED	5	/* roamed due to fast roam failure */
+#define WLC_E_REASON_DIRECTED_ROAM	6	/* roamed due to request by AP */
+#define WLC_E_REASON_TSPEC_REJECTED	7	/* roamed due to TSPEC rejection */
+#define WLC_E_REASON_BETTER_AP		8	/* roamed due to finding better AP */
+#define WLC_E_REASON_MINTXRATE		9	/* roamed because at mintxrate for too long */
+#define WLC_E_REASON_TXFAIL		10	/* We can hear AP, but AP can't hear us */
+/* retained for precommit auto-merging errors; remove once all branches are synced */
+#define WLC_E_REASON_REQUESTED_ROAM	11
+#define WLC_E_REASON_BSSTRANS_REQ	11	/* roamed due to BSS Transition request by AP */
+
+/* prune reason codes */
+#define WLC_E_PRUNE_ENCR_MISMATCH	1	/* encryption mismatch */
+#define WLC_E_PRUNE_BCAST_BSSID		2	/* AP uses a broadcast BSSID */
+#define WLC_E_PRUNE_MAC_DENY		3	/* STA's MAC addr is in AP's MAC deny list */
+#define WLC_E_PRUNE_MAC_NA		4	/* STA's MAC addr is not in AP's MAC allow list */
+#define WLC_E_PRUNE_REG_PASSV		5	/* AP not allowed due to regulatory restriction */
+#define WLC_E_PRUNE_SPCT_MGMT		6	/* AP does not support STA locale spectrum mgmt */
+#define WLC_E_PRUNE_RADAR		7	/* AP is on a radar channel of STA locale */
+#define WLC_E_RSN_MISMATCH		8	/* STA does not support AP's RSN */
+#define WLC_E_PRUNE_NO_COMMON_RATES	9	/* No rates in common with AP */
+#define WLC_E_PRUNE_BASIC_RATES		10	/* STA does not support all basic rates of BSS */
+#define WLC_E_PRUNE_CIPHER_NA		12	/* BSS's cipher not supported */
+#define WLC_E_PRUNE_KNOWN_STA		13	/* AP is already known to us as a STA */
+#define WLC_E_PRUNE_WDS_PEER		15	/* AP is already known to us as a WDS peer */
+#define WLC_E_PRUNE_QBSS_LOAD		16	/* QBSS LOAD - AAC is too low */
+#define WLC_E_PRUNE_HOME_AP		17	/* prune home AP */
+
+/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
+#define WLC_E_SUP_OTHER			0	/* Other reason */
+#define WLC_E_SUP_DECRYPT_KEY_DATA	1	/* Decryption of key data failed */
+#define WLC_E_SUP_BAD_UCAST_WEP128	2	/* Illegal use of ucast WEP128 */
+#define WLC_E_SUP_BAD_UCAST_WEP40	3	/* Illegal use of ucast WEP40 */
+#define WLC_E_SUP_UNSUP_KEY_LEN		4	/* Unsupported key length */
+#define WLC_E_SUP_PW_KEY_CIPHER		5	/* Unicast cipher mismatch in pairwise key */
+#define WLC_E_SUP_MSG3_TOO_MANY_IE	6	/* WPA IE contains > 1 RSN IE in key msg 3 */
+#define WLC_E_SUP_MSG3_IE_MISMATCH	7	/* WPA IE mismatch in key message 3 */
+#define WLC_E_SUP_NO_INSTALL_FLAG	8	/* INSTALL flag unset in 4-way msg */
+#define WLC_E_SUP_MSG3_NO_GTK		9	/* encapsulated GTK missing from msg 3 */
+#define WLC_E_SUP_GRP_KEY_CIPHER	10	/* Multicast cipher mismatch in group key */
+#define WLC_E_SUP_GRP_MSG1_NO_GTK	11	/* encapsulated GTK missing from group msg 1 */
+#define WLC_E_SUP_GTK_DECRYPT_FAIL	12	/* GTK decrypt failure */
+#define WLC_E_SUP_SEND_FAIL		13	/* message send failure */
+#define WLC_E_SUP_DEAUTH		14	/* received FC_DEAUTH */
+#define WLC_E_SUP_WPA_PSK_TMO		15	/* WPA PSK 4-way handshake timeout */
+
+/* Event data for events that include frames received over the air */
+/* WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+	uint16	version;
+	uint16	channel;	/* Matches chanspec_t format from bcmwifi_channels.h */
+	int32	rssi;
+	uint32	mactime;
+	uint32	rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+/* WLC_E_IF event data */
+typedef struct wl_event_data_if {
+	uint8 ifidx;		/* RTE virtual device index (for dongle) */
+	uint8 opcode;		/* see I/F opcode */
+	uint8 reserved;		/* bit mask (WLC_E_IF_FLAGS_XXX ) */
+	uint8 bssidx;		/* bsscfg index */
+	uint8 role;		/* see I/F role */
+} wl_event_data_if_t;
+
+/* opcode in WLC_E_IF event */
+#define WLC_E_IF_ADD		1	/* bsscfg add */
+#define WLC_E_IF_DEL		2	/* bsscfg delete */
+#define WLC_E_IF_CHANGE		3	/* bsscfg role change */
+
+/* I/F role code in WLC_E_IF event */
+#define WLC_E_IF_ROLE_STA		0	/* Infra STA */
+#define WLC_E_IF_ROLE_AP		1	/* Access Point */
+#define WLC_E_IF_ROLE_WDS		2	/* WDS link */
+#define WLC_E_IF_ROLE_P2P_GO		3	/* P2P Group Owner */
+#define WLC_E_IF_ROLE_P2P_CLIENT	4	/* P2P Client */
+
+/* WLC_E_RSSI event data */
+typedef struct wl_event_data_rssi {
+	int32 rssi;
+	int32 snr;
+	int32 noise;
+} wl_event_data_rssi_t;
+
+/* WLC_E_IF flag */
+#define WLC_E_IF_FLAGS_BSSCFG_NOIF	0x1	/* no host I/F creation needed */
+
+/* Reason codes for LINK */
+#define WLC_E_LINK_BCN_LOSS	1	/* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC	2	/* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC	3	/* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS	4	/* Link down due to bsscfg down */
+
+/* reason codes for WLC_E_OVERLAY_REQ event */
+#define WLC_E_OVL_DOWNLOAD		0	/* overlay download request */
+#define WLC_E_OVL_UPDATE_IND	1	/* device indication of host overlay update */
+
+/* reason codes for WLC_E_TDLS_PEER_EVENT event */
+#define WLC_E_TDLS_PEER_DISCOVERED		0	/* peer is ready to establish TDLS */
+#define WLC_E_TDLS_PEER_CONNECTED		1
+#define WLC_E_TDLS_PEER_DISCONNECTED	2
+
+/* reason codes for WLC_E_RMC_EVENT event */
+#define WLC_E_REASON_RMC_NONE		0
+#define WLC_E_REASON_RMC_AR_LOST		1
+#define WLC_E_REASON_RMC_AR_NO_ACK		2
+
+
+/* GAS event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
+	uint16	channel;		/* channel of GAS protocol */
+	uint8	dialog_token;	/* GAS dialog token */
+	uint8	fragment_id;	/* fragment id */
+	uint16	status_code;	/* status code on GAS completion */
+	uint16 	data_len;		/* length of data to follow */
+	uint8	data[1];		/* variable length specified by data_len */
+} BWL_POST_PACKED_STRUCT wl_event_gas_t;
+
+/* service discovery TLV */
+typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv {
+	uint16	length;			/* length of response_data */
+	uint8	protocol;		/* service protocol type */
+	uint8	transaction_id;		/* service transaction id */
+	uint8	status_code;		/* status code */
+	uint8	data[1];		/* response data */
+} BWL_POST_PACKED_STRUCT wl_sd_tlv_t;
+
+/* service discovery event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd {
+	uint16	channel;		/* channel */
+	uint8	count;			/* number of tlvs */
+	wl_sd_tlv_t	tlv[1];		/* service discovery TLV */
+} BWL_POST_PACKED_STRUCT wl_event_sd_t;
+
+/* Reason codes for WLC_E_PROXD */
+#define WLC_E_PROXD_FOUND		1	/* Found a proximity device */
+#define WLC_E_PROXD_GONE		2	/* Lost a proximity device */
+#define WLC_E_PROXD_START		3	/* used by: target  */
+#define WLC_E_PROXD_STOP		4	/* used by: target   */
+#define WLC_E_PROXD_COMPLETED		5	/* used by: initiator completed */
+#define WLC_E_PROXD_ERROR		6	/* used by both initiator and target */
+#define WLC_E_PROXD_COLLECT_START	7	/* used by: target & initiator */
+#define WLC_E_PROXD_COLLECT_STOP	8	/* used by: target */
+#define WLC_E_PROXD_COLLECT_COMPLETED	9	/* used by: initiator completed */
+#define WLC_E_PROXD_COLLECT_ERROR	10	/* used by both initiator and target */
+#define WLC_E_PROXD_NAN_EVENT		11	/* used by both initiator and target */
+
+/*  proxd_event data */
+typedef struct ftm_sample {
+	uint32 value;	/* RTT in ns */
+	int8 rssi;	/* RSSI */
+} ftm_sample_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data {
+	uint16 ver;			/* version */
+	uint16 mode;			/* mode: target/initiator */
+	uint16 method;			/* method: rssi/TOF/AOA */
+	uint8  err_code;		/* error classification */
+	uint8  TOF_type;		/* one way or two way TOF */
+	uint8  OFDM_frame_type;		/* legacy or VHT */
+	uint8  bandwidth;		/* Bandwidth is 20, 40,80, MHZ */
+	struct ether_addr peer_mac;	/* (e.g for tgt:initiator's */
+	uint32 distance;		/* dst to tgt, units meter */
+	uint32 meanrtt;			/* mean delta */
+	uint32 modertt;			/* Mode delta */
+	uint32 medianrtt;		/* median RTT */
+	uint32 sdrtt;			/* Standard deviation of RTT */
+	int    gdcalcresult;		/* Software or Hardware Kind of redundant, but if */
+					/* frame type is VHT, then we should do it by hardware */
+	int16  avg_rssi;		/* avg rssi accroos the ftm frames */
+	int16  validfrmcnt;		/* Firmware's valid frame counts */
+	char  *peer_router_info;	/* Peer router information if available in TLV, */
+					/* We will add this field later  */
+	int32 var1;			/* average of group delay */
+	int32 var2;			/* average of threshold crossing */
+	int32 var3;			/* difference between group delay and threshold crossing */
+					/* raw Fine Time Measurements (ftm) data */
+	uint16 ftm_unit;		/* ftm cnt resolution in picoseconds , 6250ps - default */
+	uint16 ftm_cnt;			/*  num of rtd measurments/length in the ftm buffer  */
+	ftm_sample_t ftm_buff[1];	/* 1 ... ftm_cnt  */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t;
+
+
+/* Video Traffic Interference Monitor Event */
+#define INTFER_EVENT_VERSION		1
+#define INTFER_STREAM_TYPE_NONTCP	1
+#define INTFER_STREAM_TYPE_TCP		2
+#define WLINTFER_STATS_NSMPLS		4
+typedef struct wl_intfer_event {
+	uint16 version;			/* version */
+	uint16 status;			/* status */
+	uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */
+} wl_intfer_event_t;
+
+/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
+typedef struct wl_psta_primary_intf_event {
+	struct ether_addr prim_ea;	/* primary intf ether addr */
+} wl_psta_primary_intf_event_t;
+
+
+/*  **********  NAN protocol events/subevents  ********** */
+#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
+/* nan application events to the host driver */
+enum nan_app_events {
+	WL_NAN_EVENT_START = 1,     /* NAN cluster started */
+	WL_NAN_EVENT_JOIN = 2,      /* Joined to a NAN cluster */
+	WL_NAN_EVENT_ROLE = 3,      /* Role or State changed */
+	WL_NAN_EVENT_SCAN_COMPLETE = 4,
+	WL_NAN_EVENT_DISCOVERY_RESULT = 5,
+	WL_NAN_EVENT_REPLIED = 6,
+	WL_NAN_EVENT_TERMINATED = 7,	/* the instance ID will be present in the ev data */
+	WL_NAN_EVENT_RECEIVE = 8,
+	WL_NAN_EVENT_STATUS_CHG = 9,  /* generated on any change in nan_mac status */
+	WL_NAN_EVENT_MERGE = 10,      /* Merged to a NAN cluster */
+	WL_NAN_EVENT_STOP = 11,       /* NAN stopped */
+	WL_NAN_EVENT_INVALID = 12,	/* delimiter for max value */
+};
+#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
+/*  ******************* end of NAN section *************** */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _BCMEVENT_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
new file mode 100644
index 0000000..05813e0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET		0x0	/* offset to version field */
+#define IP_VER_MASK		0xf0	/* version mask */
+#define IP_VER_SHIFT		4	/* version shift */
+#define IP_VER_4		4	/* version number for IPV4 */
+#define IP_VER_6		6	/* version number for IPV6 */
+
+#define IP_VER(ip_body) \
+	((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP		0x1	/* ICMP protocol */
+#define IP_PROT_IGMP		0x2	/* IGMP protocol */
+#define IP_PROT_TCP		0x6	/* TCP protocol */
+#define IP_PROT_UDP		0x11	/* UDP protocol type */
+#define IP_PROT_ICMP6		0x3a	/* ICMPv6 protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET      0       /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET         1       /* type of service offset */
+#define IPV4_PKTLEN_OFFSET      2       /* packet length offset */
+#define IPV4_PKTFLAG_OFFSET     6       /* more-frag,dont-frag flag offset */
+#define IPV4_PROT_OFFSET        9       /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET      10      /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET      12      /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET     16      /* dest IP addr offset */
+#define IPV4_OPTIONS_OFFSET     20      /* IP options offset */
+#define IPV4_MIN_HEADER_LEN     20      /* Minimum size for an IP header (no options) */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK		0xf0	/* IPV4 version mask */
+#define IPV4_VER_SHIFT		4	/* IPV4 version shift */
+
+#define IPV4_HLEN_MASK		0x0f	/* IPV4 header length mask */
+#define IPV4_HLEN(ipv4_body)	(4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN		4	/* IPV4 address length */
+
+#define IPV4_ADDR_NULL(a)	((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+				  ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a)	((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+				  ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define	IPV4_TOS_DSCP_MASK	0xfc	/* DiffServ codepoint mask */
+#define	IPV4_TOS_DSCP_SHIFT	2	/* DiffServ codepoint shift */
+
+#define	IPV4_TOS(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define	IPV4_TOS_PREC_MASK	0xe0	/* Historical precedence mask */
+#define	IPV4_TOS_PREC_SHIFT	5	/* Historical precedence shift */
+
+#define IPV4_TOS_LOWDELAY	0x10	/* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT	0x8	/* Best throughput requested */
+#define IPV4_TOS_RELIABILITY	0x4	/* Most reliable delivery requested */
+
+#define IPV4_TOS_ROUTINE        0
+#define IPV4_TOS_PRIORITY       1
+#define IPV4_TOS_IMMEDIATE      2
+#define IPV4_TOS_FLASH          3
+#define IPV4_TOS_FLASHOVERRIDE  4
+#define IPV4_TOS_CRITICAL       5
+#define IPV4_TOS_INETWORK_CTRL  6
+#define IPV4_TOS_NETWORK_CTRL   7
+
+#define IPV4_PROT(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV		0x8000	/* Reserved */
+#define IPV4_FRAG_DONT		0x4000	/* Don't fragment */
+#define IPV4_FRAG_MORE		0x2000	/* More fragments */
+#define IPV4_FRAG_OFFSET_MASK	0x1fff	/* Fragment offset */
+
+#define IPV4_ADDR_STR_LEN	16	/* Max IP address length in string format */
+
+/* IPV4 packet formats */
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+	uint8	addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+	uint8	version_ihl;		/* Version and Internet Header Length */
+	uint8	tos;			/* Type Of Service */
+	uint16	tot_len;		/* Number of bytes in packet (max 65535) */
+	uint16	id;
+	uint16	frag;			/* 3 flag bits and fragment offset */
+	uint8	ttl;			/* Time To Live */
+	uint8	prot;			/* Protocol */
+	uint16	hdr_chksum;		/* IP header checksum */
+	uint8	src_ip[IPV4_ADDR_LEN];	/* Source IP Address */
+	uint8	dst_ip[IPV4_ADDR_LEN];	/* Destination IP Address */
+} BWL_POST_PACKED_STRUCT;
+
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET	4	/* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET	6	/* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET	7	/* hop limit offset */
+#define IPV6_SRC_IP_OFFSET	8	/* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET	24	/* dst IP addr offset */
+
+/* IPV6 field decodes */
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+	 ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+	 (((uint8 *)(ipv6_body))[2] << 8) | \
+	 (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+	((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+	 ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+	(((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body)	IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN		16	/* IPV6 address length */
+
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
+#define IP_TOS46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT);
+
+/* IPV4 or IPV6 Protocol Classifier or 0 */
+#define IP_PROT46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0)
+
+/* IPV6 extension headers (options) */
+#define IPV6_EXTHDR_HOP		0
+#define IPV6_EXTHDR_ROUTING	43
+#define IPV6_EXTHDR_FRAGMENT	44
+#define IPV6_EXTHDR_AUTH	51
+#define IPV6_EXTHDR_NONE	59
+#define IPV6_EXTHDR_DEST	60
+
+#define IPV6_EXTHDR(prot)	(((prot) == IPV6_EXTHDR_HOP) || \
+	                         ((prot) == IPV6_EXTHDR_ROUTING) || \
+	                         ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+	                         ((prot) == IPV6_EXTHDR_AUTH) || \
+	                         ((prot) == IPV6_EXTHDR_NONE) || \
+	                         ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 		40
+
+#define IPV6_EXTHDR_LEN(eh)	((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+	uint8	nexthdr;
+	uint8	hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+	uint8	nexthdr;
+	uint8	rsvd;
+	uint16	frag_off;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+	uint16 len = 0, hlen;
+	struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+	while (IPV6_EXTHDR(eh->nexthdr)) {
+		if (eh->nexthdr == IPV6_EXTHDR_NONE)
+			return -1;
+		else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+			hlen = 8;
+		else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+			hlen = (eh->hdrlen + 2) << 2;
+		else
+			hlen = IPV6_EXTHDR_LEN(eh);
+
+		len += hlen;
+		eh = (struct ipv6_exthdr *)(h + len);
+	}
+
+	*proto = eh->nexthdr;
+	return len;
+}
+
+#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000)
+
+#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \
+{ \
+	ether[0] = 0x01; \
+	ether[1] = 0x00; \
+	ether[2] = 0x5E; \
+	ether[3] = (ipv4 & 0x7f0000) >> 16; \
+	ether[4] = (ipv4 & 0xff00) >> 8; \
+	ether[5] = (ipv4 & 0xff); \
+}
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define IPV4_ADDR_STR "%d.%d.%d.%d"
+#define IPV4_ADDR_TO_STR(addr)	((uint32)addr & 0xff000000) >> 24, \
+								((uint32)addr & 0x00ff0000) >> 16, \
+								((uint32)addr & 0x0000ff00) >> 8, \
+								((uint32)addr & 0x000000ff)
+
+#endif	/* _bcmip_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
new file mode 100644
index 0000000..e3351da
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * $Id: bcmipv6.h 439574 2013-11-27 06:37:37Z $
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Extension headers */
+#define IPV6_EXT_HOP	0
+#define IPV6_EXT_ROUTE	43
+#define IPV6_EXT_FRAG	44
+#define IPV6_EXT_DEST	60
+#define IPV6_EXT_ESEC	50
+#define IPV6_EXT_AUTH	51
+
+/* Minimum size (extension header "word" length) */
+#define IPV6_EXT_WORD	8
+
+/* Offsets for most extension headers */
+#define IPV6_EXT_NEXTHDR	0
+#define IPV6_EXT_HDRLEN		1
+
+/* Constants specific to fragmentation header */
+#define IPV6_FRAG_MORE_MASK	0x0001
+#define IPV6_FRAG_MORE_SHIFT	0
+#define IPV6_FRAG_OFFS_MASK	0xfff8
+#define IPV6_FRAG_OFFS_SHIFT	3
+
+/* For icmpv6 */
+#define ICMPV6_HEADER_TYPE	0x3A
+#define ICMPV6_PKT_TYPE_RA	134
+#define ICMPV6_PKT_TYPE_NS	135
+#define ICMPV6_PKT_TYPE_NA	136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC	2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC		1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR		1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR		1
+
+#define IPV6_VERSION 	6
+#define IPV6_HOP_LIMIT 	255
+
+#define IPV6_ADDR_NULL(a)	((a[0] | a[1] | a[2] | a[3] | a[4] | \
+							 a[5] | a[6] | a[7] | a[8] | a[9] | \
+							 a[10] | a[11] | a[12] | a[13] | \
+							 a[14] | a[15]) == 0)
+
+#define IPV6_ADDR_LOCAL(a)	(((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+		uint8		addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+	uint8	icmp6_type;
+	uint8	icmp6_code;
+	uint16	icmp6_cksum;
+	BWL_PRE_PACKED_STRUCT union {
+		uint32 reserved;
+		BWL_PRE_PACKED_STRUCT struct nd_advt {
+			uint32	reserved1:5,
+				override:1,
+				solicited:1,
+				router:1,
+				reserved2:24;
+		} BWL_POST_PACKED_STRUCT nd_advt;
+	} BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+	uint8	priority:4,
+		version:4;
+	uint8	flow_lbl[3];
+	uint16	payload_len;
+	uint8	nexthdr;
+	uint8 	hop_limit;
+	struct	ipv6_addr	saddr;
+	struct	ipv6_addr	daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg {
+	struct icmp6_hdr	icmph;
+	struct ipv6_addr target;
+} BWL_POST_PACKED_STRUCT;
+
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+	uint8 type;
+	uint8 len;
+	uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Fragmentation Header */
+BWL_PRE_PACKED_STRUCT struct ipv6_frag {
+	uint8	nexthdr;
+	uint8	reserved;
+	uint16	frag_offset;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+static const struct ipv6_addr all_node_ipv6_maddr = {
+									{ 0xff, 0x2, 0, 0,
+									0, 0, 0, 0,
+									0, 0, 0, 0,
+									0, 0, 0, 1
+									}};
+
+#define IPV6_ISMULTI(a) (a[0] == 0xff)
+
+#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \
+{ \
+	ether[0] = 0x33; \
+	ether[1] = 0x33; \
+	ether[2] = ipv6[12]; \
+	ether[3] = ipv6[13]; \
+	ether[4] = ipv6[14]; \
+	ether[5] = ipv6[15]; \
+}
+
+#endif	/* !defined(_bcmipv6_h_) */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h
new file mode 100644
index 0000000..84ab805
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h
@@ -0,0 +1,90 @@
+/*
+ * Fundamental constants relating to TCP Protocol
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmtcp.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _bcmtcp_h_
+#define _bcmtcp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define TCP_SRC_PORT_OFFSET	0	/* TCP source port offset */
+#define TCP_DEST_PORT_OFFSET	2	/* TCP dest port offset */
+#define TCP_SEQ_NUM_OFFSET	4	/* TCP sequence number offset */
+#define TCP_ACK_NUM_OFFSET	8	/* TCP acknowledgement number offset */
+#define TCP_HLEN_OFFSET		12	/* HLEN and reserved bits offset */
+#define TCP_FLAGS_OFFSET	13	/* FLAGS and reserved bits offset */
+#define TCP_CHKSUM_OFFSET	16	/* TCP body checksum offset */
+
+#define TCP_PORT_LEN		2	/* TCP port field length */
+
+/* 8bit TCP flag field */
+#define TCP_FLAG_URG            0x20
+#define TCP_FLAG_ACK            0x10
+#define TCP_FLAG_PSH            0x08
+#define TCP_FLAG_RST            0x04
+#define TCP_FLAG_SYN            0x02
+#define TCP_FLAG_FIN            0x01
+
+#define TCP_HLEN_MASK           0xf000
+#define TCP_HLEN_SHIFT          12
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint32	seq_num;	/* TCP Sequence Number */
+	uint32	ack_num;	/* TCP Sequence Number */
+	uint16	hdrlen_rsvd_flags;	/* Header length, reserved bits and flags */
+	uint16	tcpwin;		/* TCP window */
+	uint16	chksum;		/* Segment checksum with pseudoheader */
+	uint16	urg_ptr;	/* Points to seq-num of byte following urg data */
+} BWL_POST_PACKED_STRUCT;
+
+#define TCP_MIN_HEADER_LEN 20
+
+#define TCP_HDRLEN_MASK 0xf0
+#define TCP_HDRLEN_SHIFT 4
+#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT)
+
+#define TCP_FLAGS_MASK  0x1f
+#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* To address round up by 32bit. */
+#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31))		/* a >= b */
+#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31))		/* a =< b */
+#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b)		/* a > b */
+#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b)		/* a < b */
+
+#endif	/* #ifndef _bcmtcp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h
new file mode 100644
index 0000000..32407f3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * Fundamental constants relating to UDP Protocol
+ *
+ * $Id: bcmudp.h 382882 2013-02-04 23:24:31Z $
+ */
+
+#ifndef _bcmudp_h_
+#define _bcmudp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* UDP header */
+#define UDP_DEST_PORT_OFFSET	2	/* UDP dest port offset */
+#define UDP_LEN_OFFSET		4	/* UDP length offset */
+#define UDP_CHKSUM_OFFSET	6	/* UDP body checksum offset */
+
+#define UDP_HDR_LEN	8	/* UDP header length */
+#define UDP_PORT_LEN	2	/* UDP port length */
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmudp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint16	len;		/* Number of bytes in datagram including header */
+	uint16	chksum;		/* entire datagram checksum with pseudoheader */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _bcmudp_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
new file mode 100644
index 0000000..fc05ab1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
@@ -0,0 +1,441 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bt_amp_hci.h 382882 2013-02-04 23:24:31Z $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+	uint16 opcode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE		OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE		255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf)	((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode)		((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode)		((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter		HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter	HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality			HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info			HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request	HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link		HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel			HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify			HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode		HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode			HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset				HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush			HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2		HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info		HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands	HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size			HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size		HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];			/* length so far */
+	uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];
+	uint8 len[2];
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+	uint8 plh;
+	uint8 key_length;
+	uint8 key_type;
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+	uint8 plh;
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+	uint8 id;
+	uint8 service_type;
+	uint8 max_sdu[2];
+	uint8 sdu_ia_time[4];
+	uint8 access_latency[4];
+	uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+	uint8 llh[2];
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+	uint8 plh;
+	uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+	uint16 bredr;
+	plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+	hci_handle_t handle;
+	uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+	uint8 llh[2];
+	uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+	uint8 llh[2];
+	uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC		0
+#define EFS_SVCTYPE_BEST_EFFORT		1
+#define EFS_SVCTYPE_GUARANTEED		2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+	uint8 ecode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE			OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete			0x0E
+#define HCI_Command_Status			0x0F
+#define HCI_Flush_Occurred			0x11
+#define HCI_Enhanced_Flush_Complete		0x39
+#define HCI_Physical_Link_Complete		0x40
+#define HCI_Channel_Select			0x41
+#define HCI_Disconnect_Physical_Link_Complete	0x42
+#define HCI_Logical_Link_Complete		0x45
+#define HCI_Disconnect_Logical_Link_Complete	0x46
+#define HCI_Flow_Spec_Modify_Complete		0x47
+#define HCI_Number_of_Completed_Data_Blocks	0x48
+#define HCI_Short_Range_Mode_Change_Complete	0x4C
+#define HCI_Status_Change_Event			0x4D
+#define HCI_Vendor_Specific			0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask			0x0001
+#define HCI_Channel_Select_Event_Mask				0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask	0x0004
+#define HCI_Logical_Link_Complete_Event_Mask			0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask		0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask		0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask		0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask		0x1000
+#define HCI_Status_Change_Event_Mask				0x2000
+#define HCI_All_Event_Mask					0x31e7
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+	uint8 status;
+	uint8 cmdpkts;
+	uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+	uint8 cmdpkts;
+	uint16 opcode;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint16 len;
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+	uint8 status;
+	uint8 AMP_status;
+	uint32 bandwidth;
+	uint32 gbandwidth;
+	uint32 latency;
+	uint32 PDU_size;
+	uint8 ctrl_type;
+	uint16 PAL_cap;
+	uint16 AMP_ASSOC_len;
+	uint32 max_flush_timeout;
+	uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+	uint8 status;
+	uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+	uint8 status;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+	uint8 status;
+	uint16 ACL_pkt_len;
+	uint16 data_block_len;
+	uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+	uint16 handle;
+	uint16 pkts;
+	uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+	uint16 num_blocks;
+	uint8 num_handles;
+	data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+	uint8 status;
+	uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+	uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+	uint8 status;
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+	uint8 len;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+	uint8 status;
+	uint8 hci_version;
+	uint16 hci_revision;
+	uint8 pal_version;
+	uint16 mfg_name;
+	uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE	64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+	uint8 status;
+	uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+	uint8 status;
+	uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS				0x00
+#define HCI_ERR_ILLEGAL_COMMAND			0x01
+#define HCI_ERR_NO_CONNECTION			0x02
+#define HCI_ERR_MEMORY_FULL			0x07
+#define HCI_ERR_CONNECTION_TIMEOUT		0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS		0x09
+#define HCI_ERR_CONNECTION_EXISTS		0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED		0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT	0x10
+#define HCI_ERR_UNSUPPORTED_VALUE		0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT		0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST		0x16
+#define HCI_ERR_UNSPECIFIED			0x1F
+#define HCI_ERR_UNIT_KEY_USED			0x26
+#define HCI_ERR_QOS_REJECTED			0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE		0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL		0x39
+#define HCI_ERR_CHANNEL_MOVE			0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+	uint16	handle;			/* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+	uint16	dlen;			/* data total length */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE	OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS		(0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS		(0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle)	((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle)	((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+	uint8	ScheduleKnown;
+	uint8	NumReports;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+	uint32	StartTime;
+	uint32	Duration;
+	uint32	Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN		0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
new file mode 100644
index 0000000..d3bff33
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
@@ -0,0 +1,194 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright Open Broadcom Corporation
+ *
+ * $Id: eapol.h 452703 2014-01-31 20:33:06Z $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_header eth;	/* 802.3/Ethernet header */
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+	unsigned char body[1];		/* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION	2
+#define WPA_EAPOL_VERSION	1
+#define LEAP_EAPOL_VERSION	1
+#define SES_EAPOL_VERSION	1
+
+/* EAPOL types */
+#define EAP_PACKET		0
+#define EAPOL_START		1
+#define EAPOL_LOGOFF		2
+#define EAPOL_KEY		3
+#define EAPOL_ASF		4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY		1
+#define EAPOL_WPA2_KEY		2	/* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY		254	/* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN	8
+#define EAPOL_KEY_IV_LEN	16
+#define EAPOL_KEY_SIG_LEN	16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;			/* Key Descriptor Type */
+	unsigned short length;			/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char iv[EAPOL_KEY_IV_LEN];		/* Key IV */
+	unsigned char index;				/* Key Flags & Index */
+	unsigned char signature[EAPOL_KEY_SIG_LEN];	/* Key Signature */
+	unsigned char key[1];				/* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 	44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK	0x80
+#define EAPOL_KEY_BROADCAST	0
+#define EAPOL_KEY_UNICAST	0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK	0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN	8
+#define EAPOL_WPA_KEY_NONCE_LEN		32
+#define EAPOL_WPA_KEY_IV_LEN		16
+#define EAPOL_WPA_KEY_RSC_LEN		8
+#define EAPOL_WPA_KEY_ID_LEN		8
+#define EAPOL_WPA_KEY_MIC_LEN		16
+#define EAPOL_WPA_KEY_DATA_LEN		(EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE		32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;		/* Key Descriptor Type */
+	unsigned short key_info;	/* Key Information (unaligned) */
+	unsigned short key_len;		/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN];	/* Nonce */
+	unsigned char iv[EAPOL_WPA_KEY_IV_LEN];		/* Key IV */
+	unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN];	/* Key RSC */
+	unsigned char id[EAPOL_WPA_KEY_ID_LEN];		/* WPA:Key ID, 802.11i/WPA2: Reserved */
+	unsigned char mic[EAPOL_WPA_KEY_MIC_LEN];	/* Key MIC */
+	unsigned short data_len;			/* Key Data Length */
+	unsigned char data[EAPOL_WPA_KEY_DATA_LEN];	/* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 		95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_OSEN	0x0
+#define WPA_KEY_DESC_V1		0x01
+#define WPA_KEY_DESC_V2		0x02
+#define WPA_KEY_DESC_V3		0x03
+#define WPA_KEY_PAIRWISE	0x08
+#define WPA_KEY_INSTALL		0x40
+#define WPA_KEY_ACK		0x80
+#define WPA_KEY_MIC		0x100
+#define WPA_KEY_SECURE		0x200
+#define WPA_KEY_ERROR		0x400
+#define WPA_KEY_REQ		0x800
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0		0x00
+#define WPA_KEY_INDEX_1		0x10
+#define WPA_KEY_INDEX_2		0x20
+#define WPA_KEY_INDEX_3		0x30
+#define WPA_KEY_INDEX_MASK	0x30
+#define WPA_KEY_INDEX_SHIFT	0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA	0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 type;
+	uint8 length;
+	uint8 oui[3];
+	uint8 subtype;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 	6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK	1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY	2
+#define WPA2_KEY_DATA_SUBTYPE_MAC	3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID	4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK	9
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	flags;
+	uint8	reserved;
+	uint8	gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 	2
+
+#define WPA2_GTK_INDEX_MASK	0x03
+#define WPA2_GTK_INDEX_SHIFT	0x00
+
+#define WPA2_GTK_TRANSMIT	0x04
+
+/* IGTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	key_id;
+	uint8	ipn[6];
+	uint8	key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 	8
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	reserved[2];
+	uint8	mac[ETHER_ADDR_LEN];
+	uint8	stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD	0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
new file mode 100644
index 0000000..d3ef8c5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
@@ -0,0 +1,228 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h 473238 2014-04-28 19:14:56Z $
+ */
+
+#ifndef _NET_ETHERNET_H_	/* use native BSD ethernet.h when available */
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
+#define	ETHER_ADDR_LEN		6
+
+/*
+ * The number of bytes in the type field.
+ */
+#define	ETHER_TYPE_LEN		2
+
+/*
+ * The number of bytes in the trailing CRC field.
+ */
+#define	ETHER_CRC_LEN		4
+
+/*
+ * The length of the combined header.
+ */
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+/*
+ * The minimum packet length.
+ */
+#define	ETHER_MIN_LEN		64
+
+/*
+ * The minimum packet user data length.
+ */
+#define	ETHER_MIN_DATA		46
+
+/*
+ * The maximum packet length.
+ */
+#define	ETHER_MAX_LEN		1518
+
+/*
+ * The maximum packet user data length.
+ */
+#define	ETHER_MAX_DATA		1500
+
+/* ether types */
+#define ETHER_TYPE_MIN		0x0600		/* Anything less than MIN is a length */
+#define	ETHER_TYPE_IP		0x0800		/* IP */
+#define ETHER_TYPE_ARP		0x0806		/* ARP */
+#define ETHER_TYPE_8021Q	0x8100		/* 802.1Q */
+#define	ETHER_TYPE_IPV6		0x86dd		/* IPv6 */
+#define	ETHER_TYPE_BRCM		0x886c		/* Broadcom Corp. */
+#define	ETHER_TYPE_802_1X	0x888e		/* 802.1x */
+#ifdef PLC
+#define	ETHER_TYPE_88E1		0x88e1		/* GIGLE */
+#define	ETHER_TYPE_8912		0x8912		/* GIGLE */
+#define ETHER_TYPE_GIGLED	0xffff		/* GIGLE */
+#endif /* PLC */
+#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7	/* 802.1x preauthentication */
+#define ETHER_TYPE_WAI		0x88b4		/* WAI */
+#define ETHER_TYPE_89_0D	0x890d		/* 89-0d frame for TDLS */
+
+#define ETHER_TYPE_PPP_SES	0x8864		/* PPPoE Session */
+
+#define ETHER_TYPE_IAPP_L2_UPDATE	0x6	/* IAPP L2 update frame */
+
+/* Broadcom subtype follows ethertype;  First 2 bytes are reserved; Next 2 are subtype; */
+#define	ETHER_BRCM_SUBTYPE_LEN	4	/* Broadcom 4 byte subtype */
+
+/* ether header */
+#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)	/* dest address offset */
+#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)	/* src address offset */
+#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)	/* ether type offset */
+
+/*
+ * A macro to validate a length with
+ */
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) {		\
+		((uint8 *)ea)[0] = 0x01;			\
+		((uint8 *)ea)[1] = 0x00;			\
+		((uint8 *)ea)[2] = 0x5e;			\
+		((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f;	\
+		((uint8 *)ea)[4] = ((mgrp_ip) >>  8) & 0xff;	\
+		((uint8 *)ea)[5] = ((mgrp_ip) >>  0) & 0xff;	\
+}
+
+#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+BWL_PRE_PACKED_STRUCT struct ether_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+BWL_PRE_PACKED_STRUCT struct	ether_addr {
+	uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif	/* !__INCif_etherh Quick and ugly hack for VxWorks */
+
+/*
+ * Takes a pointer, set, test, clear, toggle locally admininistered
+ * address bit in the 48-bit Ethernet address.
+ */
+#define ETHER_SET_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) 	(((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+/* Takes a pointer, marks unicast address bit in the MAC address */
+#define ETHER_SET_UNICAST(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+/*
+ * Takes a pointer, returns true if a 48-bit multicast address
+ * (including broadcast, since it is all ones)
+ */
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
+#define eacmp(a, b)	((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
+	                 (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
+	                 (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]))
+
+#define	ether_cmp(a, b)	eacmp(a, b)
+
+/* copy an ethernet address - assumes the pointers can be referenced as shorts */
+#define eacopy(s, d) \
+do { \
+	((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
+	((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \
+	((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \
+} while (0)
+
+#define	ether_copy(s, d) eacopy(s, d)
+
+/* Copy an ethernet address in reverse order */
+#define	ether_rcopy(s, d) \
+do { \
+	((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \
+	((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \
+	((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \
+} while (0)
+
+/* Copy 14B ethernet header: 32bit aligned source and destination. */
+#define ehcopy32(s, d) \
+do { \
+	((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \
+	((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \
+	((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \
+	((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \
+} while (0)
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}};
+
+#define ETHER_ISBCAST(ea)	((((const uint8 *)(ea))[0] &		\
+	                          ((const uint8 *)(ea))[1] &		\
+				  ((const uint8 *)(ea))[2] &		\
+				  ((const uint8 *)(ea))[3] &		\
+				  ((const uint8 *)(ea))[4] &		\
+				  ((const uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea)	((((const uint8 *)(ea))[0] |		\
+				  ((const uint8 *)(ea))[1] |		\
+				  ((const uint8 *)(ea))[2] |		\
+				  ((const uint8 *)(ea))[3] |		\
+				  ((const uint8 *)(ea))[4] |		\
+				  ((const uint8 *)(ea))[5]) == 0)
+
+#define ETHER_ISNULLDEST(da)	((((const uint16 *)(da))[0] |           \
+				  ((const uint16 *)(da))[1] |           \
+				  ((const uint16 *)(da))[2]) == 0)
+#define ETHER_ISNULLSRC(sa)	ETHER_ISNULLDEST(sa)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+	struct ether_header t; \
+	t = *(struct ether_header *)(s); \
+	*(struct ether_header *)(d) = t; \
+} while (0)
+
+#define  ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NET_ETHERNET_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
new file mode 100644
index 0000000..be73c8b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h 457033 2014-02-20 19:39:45Z $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WiFi P2P OUI values */
+#define P2P_OUI			WFA_OUI			/* WiFi P2P OUI */
+#define P2P_VER			WFA_OUI_TYPE_P2P	/* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID		0xdd			/* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+	uint8	id;		/* IE ID: 0xDD */
+	uint8	len;		/* IE length */
+	uint8	OUI[3];		/* WiFi P2P specific OUI: P2P_OUI */
+	uint8	oui_type;	/* Identifies P2P version: P2P_VER */
+	uint8	subelts[1];	/* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN	6
+
+#define P2P_ATTR_ID_OFF		0
+#define P2P_ATTR_LEN_OFF	1
+#define P2P_ATTR_DATA_OFF	3
+
+#define P2P_ATTR_ID_LEN		1	/* ID filed length */
+#define P2P_ATTR_LEN_LEN	2	/* length field length */
+#define P2P_ATTR_HDR_LEN	3 /* ID + 2-byte length field spec 1.02 */
+
+#define P2P_WFDS_HASH_LEN		6
+#define P2P_WFDS_MAX_SVC_NAME_LEN	32
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS			0	/* Status */
+#define P2P_SEID_MINOR_RC		1	/* Minor Reason Code */
+#define P2P_SEID_P2P_INFO		2	/* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID			3	/* P2P Device ID */
+#define P2P_SEID_INTENT			4	/* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT		5	/* Configuration Timeout */
+#define P2P_SEID_CHANNEL		6	/* Listen channel */
+#define P2P_SEID_GRP_BSSID		7	/* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING		8	/* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR		9	/* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY		10	/* P2P Manageability */
+#define P2P_SEID_CHAN_LIST		11	/* Channel List */
+#define P2P_SEID_ABSENCE		12	/* Notice of Absence */
+#define P2P_SEID_DEV_INFO		13	/* Device Info */
+#define P2P_SEID_GROUP_INFO		14	/* Group Info */
+#define P2P_SEID_GROUP_ID		15	/* Group ID */
+#define P2P_SEID_P2P_IF			16	/* P2P Interface */
+#define P2P_SEID_OP_CHANNEL		17	/* Operating Channel */
+#define P2P_SEID_INVITE_FLAGS		18	/* Invitation Flags */
+#define P2P_SEID_SERVICE_HASH		21	/* Service hash */
+#define P2P_SEID_SESSION		22	/* Session information */
+#define P2P_SEID_CONNECT_CAP		23	/* Connection capability */
+#define P2P_SEID_ADVERTISE_ID		24	/* Advertisement ID */
+#define P2P_SEID_ADVERTISE_SERVICE	25	/* Advertised service */
+#define P2P_SEID_SESSION_ID		26	/* Session ID */
+#define P2P_SEID_FEATURE_CAP		27	/* Feature capability */
+#define	P2P_SEID_PERSISTENT_GROUP	28	/* Persistent group */
+#define P2P_SEID_SESSION_INFO_RESP	29	/* Session Information Response */
+#define P2P_SEID_VNDR			221	/* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES	0x1b
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_INFO */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	dev;		/* Device Capability Bitmap */
+	uint8	group;		/* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS	0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS	0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT	0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN		0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT			0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC		0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER			0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP		0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT			0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS		0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT		0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT	0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION		0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTENT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	intent;		/* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CFG_TIMEOUT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	go_tmo;		/* GO config timeout in units of 10 ms */
+	uint8	client_tmo;	/* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+/* WiFi P2P IE subelement: Listen Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+/* WiFi P2P IE subelement: P2P Group BSSID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GRP_BSSID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P group bssid */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+/* WiFi P2P IE subelement: P2P Group ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GROUP_ID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ssid[1];	/* ssid. device id. variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+/* WiFi P2P IE subelement: P2P Interface */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_IF */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ifaddrs;	/* P2P Interface Address count */
+	uint8	ifaddr[1][6];	/* P2P Interface Address list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	status;		/* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS			0
+				/* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL	1
+				/* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP			P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+				/* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS		2
+				/* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED		3
+				/* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS		4
+				/* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM		5
+				/* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR		6
+				/* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN		7
+				/* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP		8
+				/* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT			9
+				/* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS		10
+				/* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT		11
+				/* Failed, rejected by user */
+#define P2P_STATSE_SUCCESS_USER_ACCEPT		12
+				/* Success, accepted by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+	uint8	eltId;		/* ID: P2P_SEID_EXT_TIMING */
+	uint8	len[2];		/* length not including eltId, len fields */
+	uint8	avail[2];	/* availibility period */
+	uint8	interval[2];	/* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN	10	/* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTINTADDR */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	band;		/* Regulatory Class (band) */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+	uint8	band;						/* Regulatory Class (band) */
+	uint8	num_channels;				/* # of channels in the channel list */
+	uint8	channels[WL_NUMCHANNELS];	/* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHAN_LIST */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	num_entries;	/* # of channel entries */
+	wifi_p2p_chanlist_entry_t	entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+						/* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi Primary Device Type structure */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+	uint16	cat_id;		/* Category ID */
+	uint8	OUI[3];		/* WFA OUI: 0x0050F2 */
+	uint8	oui_type;	/* WPS_OUI_TYPE */
+	uint16	sub_cat_id;	/* Sub Category ID */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category
+ * maximum values for each category
+ */
+#define P2P_DISE_SUBCATEGORY_MINVAL		1
+#define P2P_DISE_CATEGORY_COMPUTER		1
+#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL		8
+#define P2P_DISE_CATEGORY_INPUT_DEVICE		2
+#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL	9
+#define P2P_DISE_CATEGORY_PRINTER		3
+#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL		5
+#define P2P_DISE_CATEGORY_CAMERA		4
+#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL		4
+#define P2P_DISE_CATEGORY_STORAGE		5
+#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL		1
+#define P2P_DISE_CATEGORY_NETWORK_INFRA		6
+#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL	4
+#define P2P_DISE_CATEGORY_DISPLAY		7
+#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL		4
+#define P2P_DISE_CATEGORY_MULTIMEDIA		8
+#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL		6
+#define P2P_DISE_CATEGORY_GAMING		9
+#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL		5
+#define P2P_DISE_CATEGORY_TELEPHONE		10
+#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL		5
+#define P2P_DISE_CATEGORY_AUDIO			11
+#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL		6
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_DEVINFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+	uint8	mac[6];			/* P2P Device MAC address */
+	uint16	wps_cfg_meths;		/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pri_devtype[8];		/* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN	8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+	uint8	len;
+	uint8	devaddr[ETHER_ADDR_LEN];	/* P2P Device Address */
+	uint8	ifaddr[ETHER_ADDR_LEN];		/* P2P Interface Address */
+	uint8	devcap;				/* Device Capability */
+	uint8	cfg_meths[2];			/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pridt[P2P_DEV_TYPE_LEN];	/* Primary Device Type */
+	uint8	secdts;				/* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+	uint8	eltId;
+	uint8	len[2];
+	struct ether_addr	addr;			/* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_MGBTY */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mg_bitmap;	/* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG   0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_GROUP_INFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+/* WiFi IE subelement: Operating Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_OP_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+/* WiFi IE subelement: INVITATION FLAGS */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INVITE_FLAGS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	flags;		/* Flags */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+/* WiFi P2P IE subelement: Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SERVICE_HASH */
+	uint8	len[2];			/* SE length not including eltId, len fields
+					 * in multiple of 6 Bytes
+					*/
+	uint8	hash[1];		/* Variable length - SHA256 hash of
+					 * service names (can be more than one hashes)
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t;
+
+/* WiFi P2P IE subelement: Service Instance Data */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SESSION */
+	uint8	len[2];			/* SE length not including eltId, len */
+	uint8	ssn_info[1];		/* Variable length - Session information as specified by
+					 * the service layer, type matches serv. name
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t;
+
+
+/* WiFi P2P IE subelement: Connection capability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_CONNECT_CAP */
+	uint8	len[2];			/* SE length not including eltId, len */
+	uint8	conn_cap;		/* 1byte capability as specified by the
+					 * service layer, valid bitmask/values
+					*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t;
+
+
+/* WiFi P2P IE subelement: Advertisement ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_ADVERTISE_ID */
+	uint8	len[2];			/* SE length not including eltId, len fixed 4 Bytes */
+	uint8	advt_id[4];		/* 4byte Advertisement ID of the peer device sent in
+					 * PROV Disc in Network byte order
+					*/
+	uint8	advt_mac[6];			/* P2P device address of the service advertiser */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s {
+	uint8	advt_id[4];		/* SE Advertise ID for the service */
+	uint16	nw_cfg_method;	/* SE Network Config method for the service */
+	uint8	serv_name_len;	/* SE length of the service name */
+	uint8	serv_name[1];	/* Variable length service name field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_ADVERTISE_SERVICE */
+	uint8	len[2];			/* SE length not including eltId, len fields mutiple len of
+					 * wifi_p2p_adv_serv_info_t entries
+					*/
+	wifi_p2p_adv_serv_info_t	p_advt_serv_info[1]; /* Variable length
+								of multiple instances
+								of the advertise service info
+								*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t;
+
+
+/* WiFi P2P IE subelement: Session ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_SESSION_ID */
+	uint8	len[2];			/* SE length not including eltId, len fixed 4 Bytes */
+	uint8	ssn_id[4];		/* 4byte Session ID of the peer device sent in
+							 * PROV Disc in Network byte order
+							 */
+	uint8	ssn_mac[6];		/* P2P device address of the seeker - session mac */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t;
+
+
+#define P2P_ADVT_SERV_SE_FIXED_LEN	3	/* Includes only the element ID and len */
+#define P2P_ADVT_SERV_INFO_FIXED_LEN	7	/* Per ADV Service Instance advt_id +
+						 * nw_config_method + serv_name_len
+						 */
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+	uint8	category;	/* P2P_AF_CATEGORY */
+	uint8	OUI[3];		/* OUI - P2P_OUI */
+	uint8	type;		/* OUI Type - P2P_VER */
+	uint8	subtype;	/* OUI Subtype - P2P_AF_* */
+	uint8	dialog_token;	/* nonzero, identifies req/resp tranaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY		0x7f
+
+#define P2P_AF_FIXED_LEN	7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE	0	/* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ		1	/* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP		2	/* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ		3	/* GO Discoverability Request */
+
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+	uint8	category;	/* P2P_PUB_AF_CATEGORY */
+	uint8	action;		/* P2P_PUB_AF_ACTION */
+	uint8	oui[3];		/* P2P_OUI */
+	uint8	oui_type;	/* OUI type - P2P_VER */
+	uint8	subtype;	/* OUI subtype - P2P_TYPE_* */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN	8
+#define P2P_PUB_AF_CATEGORY	0x04
+#define P2P_PUB_AF_ACTION	0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ		0	/* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP		1	/* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF	2	/* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ	3	/* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP	4	/* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ	5	/* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP	6	/* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ	7	/* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP	8	/* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID	255	/* Invalid Subtype */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ		P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP		P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF		P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+	uint8	cnt_type;	/* Count/Type */
+	uint32	duration;	/* Duration */
+	uint32	interval;	/* Interval */
+	uint32	start;		/* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+	uint8	eltId;		/* Subelement ID */
+	uint8	len[2];		/* Length */
+	uint8	index;		/* Index */
+	uint8	ops_ctw_parms;	/* CTWindow and OppPS Parameters */
+	wifi_p2p_noa_desc_t	desc[1];	/* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN	5
+
+#define P2P_NOA_SE_MAX_DESC	2	/* max NoA descriptors in presence request */
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED	0	/* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT		255	/* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED	1	/* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE	2	/* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK	0x7f
+#define P2P_NOA_OPS_MASK	0x80
+#define P2P_NOA_OPS_SHIFT	7
+
+#define P2P_CTW_MIN	10	/* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define	P2PSD_ACTION_CATEGORY		0x04
+				/* Public action frame */
+#define	P2PSD_ACTION_ID_GAS_IREQ	0x0a
+				/* Action value for GAS Initial Request AF */
+#define	P2PSD_ACTION_ID_GAS_IRESP	0x0b
+				/* Action value for GAS Initial Response AF */
+#define	P2PSD_ACTION_ID_GAS_CREQ	0x0c
+				/* Action value for GAS Comback Request AF */
+#define	P2PSD_ACTION_ID_GAS_CRESP	0x0d
+				/* Action value for GAS Comback Response AF */
+#define P2PSD_AD_EID				0x6c
+				/* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI	0x00
+				/* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID			0x00
+				/* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI				P2P_OUI
+				/* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE		P2P_VER
+				/* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID		0xDDDD
+				/* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY		0x00
+				/* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+	SVC_RPOTYPE_ALL = 0,
+	SVC_RPOTYPE_BONJOUR = 1,
+	SVC_RPOTYPE_UPNP = 2,
+	SVC_RPOTYPE_WSD = 3,
+	SVC_RPOTYPE_WFDS = 11,
+	SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+	P2PSD_RESP_STATUS_SUCCESS = 0,
+	P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+	P2PSD_RESP_STATUS_DATA_NA = 2,
+	P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+	uint8	llm_pamebi;	/* Query Response Length Limit bit 0-6, set to 0 plus
+				* Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+				*/
+	uint8	adp_id;		/* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+	uint8	id;		/* IE ID: 0x6c - 108 */
+	uint8	len;	/* IE length */
+	wifi_p2psd_adp_tpl_t adp_tpl;  /* Advertisement Protocol Tuple field. Only one
+				* tuple is defined for P2P Service Discovery
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+	uint8	oui_subtype;	/* OUI Subtype: 0x09 */
+	uint16	svc_updi;		/* Service Update Indicator */
+	uint8	svc_tlvs[1];	/* wifi_p2psd_qreq_tlv_t type for service request,
+				* wifi_p2psd_qresp_tlv_t type for service response
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+	uint16	len;			/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;		/* Service Protocol Type */
+	uint8	svc_tscid;		/* Service Transaction ID */
+	uint8	query_data[1];	/* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Length of service request TLV, 5 plus the size of request data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+	wifi_p2psd_adp_ie_t		adp_ie;		/* Advertisement Protocol IE */
+	uint16					qreq_len;	/* Query Request Length */
+	uint8	qreq_frm[1];	/* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+	uint16	len;				/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;			/* Service Protocol Type */
+	uint8	svc_tscid;			/* Service Transaction ID */
+	uint8	status;				/* Value defined in Table 57 of P2P spec. */
+	uint8	query_data[1];		/* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Lenth of service response TLV, 6 plus the size of resp data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16		qresp_len;	/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint8	fragment_id;	/* Fragmentation ID */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16	qresp_len;		/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+	uint8	category;		/* 0x04 Public Action Frame */
+	uint8	action;			/* 0x6c Advertisement Protocol */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	query_data[1];	/* Query Data. wifi_p2psd_gas_ireq_frame_t
+					 * or wifi_p2psd_gas_iresp_frame_t format
+					 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
new file mode 100644
index 0000000..f84d0b8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
@@ -0,0 +1,75 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h 382882 2013-02-04 23:24:31Z $
+ */
+#ifndef	_SD_SPI_H
+#define	_SD_SPI_H
+
+#define SPI_START_M		BITFIELD_MASK(1)	/* Bit [31] 	- Start Bit */
+#define SPI_START_S		31
+#define SPI_DIR_M		BITFIELD_MASK(1)	/* Bit [30] 	- Direction */
+#define SPI_DIR_S		30
+#define SPI_CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S		24
+#define SPI_RW_M		BITFIELD_MASK(1)	/* Bit [23] 	- Read=0, Write=1 */
+#define SPI_RW_S		23
+#define SPI_FUNC_M		BITFIELD_MASK(3)	/* Bits [22:20]	- Function Number */
+#define SPI_FUNC_S		20
+#define SPI_RAW_M		BITFIELD_MASK(1)	/* Bit [19] 	- Read After Wr */
+#define SPI_RAW_S		19
+#define SPI_STUFF_M		BITFIELD_MASK(1)	/* Bit [18] 	- Stuff bit */
+#define SPI_STUFF_S		18
+#define SPI_BLKMODE_M		BITFIELD_MASK(1)	/* Bit [19] 	- Blockmode 1=blk */
+#define SPI_BLKMODE_S		19
+#define SPI_OPCODE_M		BITFIELD_MASK(1)	/* Bit [18] 	- OP Code */
+#define SPI_OPCODE_S		18
+#define SPI_ADDR_M		BITFIELD_MASK(17)	/* Bits [17:1] 	- Address */
+#define SPI_ADDR_S		1
+#define SPI_STUFF0_M		BITFIELD_MASK(1)	/* Bit [0] 	- Stuff bit */
+#define SPI_STUFF0_S		0
+
+#define SPI_RSP_START_M		BITFIELD_MASK(1)	/* Bit [7] 	- Start Bit (always 0) */
+#define SPI_RSP_START_S		7
+#define SPI_RSP_PARAM_ERR_M	BITFIELD_MASK(1)	/* Bit [6] 	- Parameter Error */
+#define SPI_RSP_PARAM_ERR_S	6
+#define SPI_RSP_RFU5_M		BITFIELD_MASK(1)	/* Bit [5] 	- RFU (Always 0) */
+#define SPI_RSP_RFU5_S		5
+#define SPI_RSP_FUNC_ERR_M	BITFIELD_MASK(1)	/* Bit [4] 	- Function number error */
+#define SPI_RSP_FUNC_ERR_S	4
+#define SPI_RSP_CRC_ERR_M	BITFIELD_MASK(1)	/* Bit [3] 	- COM CRC Error */
+#define SPI_RSP_CRC_ERR_S	3
+#define SPI_RSP_ILL_CMD_M	BITFIELD_MASK(1)	/* Bit [2] 	- Illegal Command error */
+#define SPI_RSP_ILL_CMD_S	2
+#define SPI_RSP_RFU1_M		BITFIELD_MASK(1)	/* Bit [1] 	- RFU (Always 0) */
+#define SPI_RSP_RFU1_S		1
+#define SPI_RSP_IDLE_M		BITFIELD_MASK(1)	/* Bit [0] 	- In idle state */
+#define SPI_RSP_IDLE_S		0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN	6	/* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK	0xFE	/* SD Start Block Token */
+#define SDSPI_IDLE_PAD		0xFF	/* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK	0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
new file mode 100644
index 0000000..a474cfd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
@@ -0,0 +1,95 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#ifndef	 VLAN_VID_MASK
+#define VLAN_VID_MASK		0xfff	/* low 12 bits are vlan id */
+#endif
+
+#define	VLAN_CFI_SHIFT		12	/* canonical format indicator bit */
+#define VLAN_PRI_SHIFT		13	/* user priority */
+
+#define VLAN_PRI_MASK		7	/* 3 bits of priority */
+
+#define	VLAN_TPID_OFFSET	12	/* offset of tag protocol id field */
+#define	VLAN_TCI_OFFSET		14	/* offset of tag ctrl info field */
+
+#define	VLAN_TAG_LEN		4
+#define	VLAN_TAG_OFFSET		(2 * ETHER_ADDR_LEN)	/* offset in Ethernet II packet only */
+
+#define VLAN_TPID		0x8100	/* VLAN ethertype/Tag Protocol ID */
+
+struct vlan_header {
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+};
+
+struct ethervlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+	uint16	ether_type;
+};
+
+struct dot3_mac_llc_snapvlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[3];				/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	vlan_type;			/* 0x8100 */
+	uint16	vlan_tag;			/* priority, cfi and vid */
+	uint16	ether_type;			/* ethertype */
+};
+
+#define	ETHERVLAN_HDR_LEN	(ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+	struct ethervlan_header t; \
+	t = *(struct ethervlan_header *)(s); \
+	*(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif /* _vlan_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
new file mode 100644
index 0000000..26fdb26
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
@@ -0,0 +1,177 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h 450928 2014-01-23 14:13:38Z $
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Reason Codes */
+
+/* 13 through 23 taken from IEEE Std 802.11i-2004 */
+#define DOT11_RC_INVALID_WPA_IE		13	/* Invalid info. element */
+#define DOT11_RC_MIC_FAILURE		14	/* Michael failure */
+#define DOT11_RC_4WH_TIMEOUT		15	/* 4-way handshake timeout */
+#define DOT11_RC_GTK_UPDATE_TIMEOUT	16	/* Group key update timeout */
+#define DOT11_RC_WPA_IE_MISMATCH	17	/* WPA IE in 4-way handshake differs from
+						 * (re-)assoc. request/probe response
+						 */
+#define DOT11_RC_INVALID_MC_CIPHER	18	/* Invalid multicast cipher */
+#define DOT11_RC_INVALID_UC_CIPHER	19	/* Invalid unicast cipher */
+#define DOT11_RC_INVALID_AKMP		20	/* Invalid authenticated key management protocol */
+#define DOT11_RC_BAD_WPA_VERSION	21	/* Unsupported WPA version */
+#define DOT11_RC_INVALID_WPA_CAP	22	/* Invalid WPA IE capabilities */
+#define DOT11_RC_8021X_AUTH_FAIL	23	/* 802.1X authentication failure */
+
+#define WPA2_PMKID_LEN	16
+
+/* WPA IE fixed portion */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	uint8 oui[3];	/* IE OUI */
+	uint8 oui_type;	/* OUI type */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN	4
+#define WPA_IE_FIXED_LEN	8
+#define WPA_IE_TAG_FIXED_LEN	6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN	4
+#define WPA_RSN_IE_TAG_FIXED_LEN	2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+#define WFA_OSEN_IE_FIXED_LEN	6
+
+/* WPA suite/multicast suite */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 oui[3];
+	uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN	4
+
+/* WPA unicast suite list/key management suite list */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN	2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+/* WPA cipher suites */
+#define WPA_CIPHER_NONE		0	/* None */
+#define WPA_CIPHER_WEP_40	1	/* WEP (40-bit) */
+#define WPA_CIPHER_TKIP		2	/* TKIP: default for WPA */
+#define WPA_CIPHER_AES_OCB	3	/* AES (OCB) */
+#define WPA_CIPHER_AES_CCM	4	/* AES (CCM) */
+#define WPA_CIPHER_WEP_104	5	/* WEP (104-bit) */
+#define WPA_CIPHER_BIP		6	/* WEP (104-bit) */
+#define WPA_CIPHER_TPK		7	/* Group addressed traffic not allowed */
+
+
+#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
+				 (cipher) == WPA_CIPHER_WEP_40 || \
+				 (cipher) == WPA_CIPHER_WEP_104 || \
+				 (cipher) == WPA_CIPHER_TKIP || \
+				 (cipher) == WPA_CIPHER_AES_OCB || \
+				 (cipher) == WPA_CIPHER_AES_CCM || \
+				 (cipher) == WPA_CIPHER_TPK)
+
+
+/* WPA TKIP countermeasures parameters */
+#define WPA_TKIP_CM_DETECT	60	/* multiple MIC failure window (seconds) */
+#define WPA_TKIP_CM_BLOCK	60	/* countermeasures active window (seconds) */
+
+/* RSN IE defines */
+#define RSN_CAP_LEN		2	/* Length of RSN capabilities field (2 octets) */
+
+/* RSN Capabilities defined in 802.11i */
+#define RSN_CAP_PREAUTH			0x0001
+#define RSN_CAP_NOPAIRWISE		0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
+#define RSN_CAP_1_REPLAY_CNTR		0
+#define RSN_CAP_2_REPLAY_CNTRS		1
+#define RSN_CAP_4_REPLAY_CNTRS		2
+#define RSN_CAP_16_REPLAY_CNTRS		3
+#define RSN_CAP_MFPR			0x0040
+#define RSN_CAP_MFPC			0x0080
+#define RSN_CAP_SPPC			0x0400
+#define RSN_CAP_SPPR			0x0800
+
+/* WPA capabilities defined in 802.11i */
+#define WPA_CAP_4_REPLAY_CNTRS		RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS		RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+/* WPA capabilities defined in 802.11zD9.0 */
+#define WPA_CAP_PEER_KEY_ENABLE		(0x1 << 1)	/* bit 9 */
+
+/* WPA Specific defines */
+#define WPA_CAP_LEN	RSN_CAP_LEN	/* Length of RSN capabilities in RSN IE (2 octets) */
+#define WPA_PMKID_CNT_LEN	2 	/* Length of RSN PMKID count (2 octests) */
+
+#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN	2
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _proto_wpa_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wps.h b/drivers/net/wireless/bcmdhd/include/proto/wps.h
new file mode 100644
index 0000000..41424fa
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wps.h
@@ -0,0 +1,386 @@
+/*
+ * WPS IE definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id$
+ */
+
+#ifndef _WPS_
+#define _WPS_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Data Element Definitions */
+#define WPS_ID_AP_CHANNEL         0x1001
+#define WPS_ID_ASSOC_STATE        0x1002
+#define WPS_ID_AUTH_TYPE          0x1003
+#define WPS_ID_AUTH_TYPE_FLAGS    0x1004
+#define WPS_ID_AUTHENTICATOR      0x1005
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_CONFIG_ERROR       0x1009
+#define WPS_ID_CONF_URL4          0x100A
+#define WPS_ID_CONF_URL6          0x100B
+#define WPS_ID_CONN_TYPE          0x100C
+#define WPS_ID_CONN_TYPE_FLAGS    0x100D
+#define WPS_ID_CREDENTIAL         0x100E
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_E_HASH1            0x1014
+#define WPS_ID_E_HASH2            0x1015
+#define WPS_ID_E_SNONCE1          0x1016
+#define WPS_ID_E_SNONCE2          0x1017
+#define WPS_ID_ENCR_SETTINGS      0x1018
+#define WPS_ID_ENCR_TYPE          0x100F
+#define WPS_ID_ENCR_TYPE_FLAGS    0x1010
+#define WPS_ID_ENROLLEE_NONCE     0x101A
+#define WPS_ID_FEATURE_ID         0x101B
+#define WPS_ID_IDENTITY           0x101C
+#define WPS_ID_IDENTITY_PROOF     0x101D
+#define WPS_ID_KEY_WRAP_AUTH      0x101E
+#define WPS_ID_KEY_IDENTIFIER     0x101F
+#define WPS_ID_MAC_ADDR           0x1020
+#define WPS_ID_MANUFACTURER       0x1021
+#define WPS_ID_MSG_TYPE           0x1022
+#define WPS_ID_MODEL_NAME         0x1023
+#define WPS_ID_MODEL_NUMBER       0x1024
+#define WPS_ID_NW_INDEX           0x1026
+#define WPS_ID_NW_KEY             0x1027
+#define WPS_ID_NW_KEY_INDEX       0x1028
+#define WPS_ID_NEW_DEVICE_NAME    0x1029
+#define WPS_ID_NEW_PWD            0x102A
+#define WPS_ID_OOB_DEV_PWD        0x102C
+#define WPS_ID_OS_VERSION         0x102D
+#define WPS_ID_POWER_LEVEL        0x102F
+#define WPS_ID_PSK_CURRENT        0x1030
+#define WPS_ID_PSK_MAX            0x1031
+#define WPS_ID_PUBLIC_KEY         0x1032
+#define WPS_ID_RADIO_ENABLED      0x1033
+#define WPS_ID_REBOOT             0x1034
+#define WPS_ID_REGISTRAR_CURRENT  0x1035
+#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036
+#define WPS_ID_REGISTRAR_LIST     0x1037
+#define WPS_ID_REGISTRAR_MAX      0x1038
+#define WPS_ID_REGISTRAR_NONCE    0x1039
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_RESP_TYPE          0x103B
+#define WPS_ID_RF_BAND            0x103C
+#define WPS_ID_R_HASH1            0x103D
+#define WPS_ID_R_HASH2            0x103E
+#define WPS_ID_R_SNONCE1          0x103F
+#define WPS_ID_R_SNONCE2          0x1040
+#define WPS_ID_SEL_REGISTRAR      0x1041
+#define WPS_ID_SERIAL_NUM         0x1042
+#define WPS_ID_SC_STATE           0x1044
+#define WPS_ID_SSID               0x1045
+#define WPS_ID_TOT_NETWORKS       0x1046
+#define WPS_ID_UUID_E             0x1047
+#define WPS_ID_UUID_R             0x1048
+#define WPS_ID_VENDOR_EXT         0x1049
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_X509_CERT_REQ      0x104B
+#define WPS_ID_X509_CERT          0x104C
+#define WPS_ID_EAP_IDENTITY       0x104D
+#define WPS_ID_MSG_COUNTER        0x104E
+#define WPS_ID_PUBKEY_HASH        0x104F
+#define WPS_ID_REKEY_KEY          0x1050
+#define WPS_ID_KEY_LIFETIME       0x1051
+#define WPS_ID_PERM_CFG_METHODS   0x1052
+#define WPS_ID_SEL_REG_CFG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+#define WPS_ID_SEC_DEV_TYPE_LIST  0x1055
+#define WPS_ID_PORTABLE_DEVICE    0x1056
+#define WPS_ID_AP_SETUP_LOCKED    0x1057
+#define WPS_ID_APP_LIST           0x1058
+#define WPS_ID_EAP_TYPE           0x1059
+#define WPS_ID_INIT_VECTOR        0x1060
+#define WPS_ID_KEY_PROVIDED_AUTO  0x1061
+#define WPS_ID_8021X_ENABLED      0x1062
+#define WPS_ID_WEP_TRANSMIT_KEY   0x1064
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WFA_VENDOR_EXT_ID                 "\x00\x37\x2A"
+#define WPS_WFA_SUBID_VERSION2            0x00
+#define WPS_WFA_SUBID_AUTHORIZED_MACS     0x01
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE    0x02
+#define WPS_WFA_SUBID_REQ_TO_ENROLL       0x03
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
+#define WPS_WFA_SUBID_REG_CFG_METHODS     0x05
+
+
+/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
+#define MS_VENDOR_EXT_ID           "\x00\x01\x37"
+#define WPS_MS_ID_VPI               0x1001	/* Vertical Pairing Identifier TLV */
+#define WPS_MS_ID_TRANSPORT_UUID    0x1002      /* Transport UUID TLV */
+
+/* Vertical Pairing Identifier TLV Definitions */
+#define WPS_MS_VPI_TRANSPORT_NONE   0x00        /* None */
+#define WPS_MS_VPI_TRANSPORT_DPWS   0x01        /* Devices Profile for Web Services */
+#define WPS_MS_VPI_TRANSPORT_UPNP   0x02        /* uPnP */
+#define WPS_MS_VPI_TRANSPORT_SDNWS  0x03        /* Secure Devices Profile for Web Services */
+#define WPS_MS_VPI_NO_PROFILE_REQ   0x00        /* Wi-Fi profile not requested.
+						 * Not supported in Windows 7
+						 */
+#define WPS_MS_VPI_PROFILE_REQ      0x01        /* Wi-Fi profile requested.  */
+
+/* sizes of the fixed size elements */
+#define WPS_ID_AP_CHANNEL_S       2
+#define WPS_ID_ASSOC_STATE_S      2
+#define WPS_ID_AUTH_TYPE_S        2
+#define WPS_ID_AUTH_TYPE_FLAGS_S  2
+#define WPS_ID_AUTHENTICATOR_S    8
+#define WPS_ID_CONFIG_METHODS_S   2
+#define WPS_ID_CONFIG_ERROR_S     2
+#define WPS_ID_CONN_TYPE_S          1
+#define WPS_ID_CONN_TYPE_FLAGS_S    1
+#define WPS_ID_DEVICE_PWD_ID_S      2
+#define WPS_ID_ENCR_TYPE_S          2
+#define WPS_ID_ENCR_TYPE_FLAGS_S    2
+#define WPS_ID_FEATURE_ID_S         4
+#define WPS_ID_MAC_ADDR_S           6
+#define WPS_ID_MSG_TYPE_S           1
+#define WPS_ID_SC_STATE_S           1
+#define WPS_ID_RF_BAND_S            1
+#define WPS_ID_OS_VERSION_S         4
+#define WPS_ID_VERSION_S            1
+#define WPS_ID_SEL_REGISTRAR_S      1
+#define WPS_ID_SEL_REG_CFG_METHODS_S 2
+#define WPS_ID_REQ_TYPE_S           1
+#define WPS_ID_RESP_TYPE_S          1
+#define WPS_ID_AP_SETUP_LOCKED_S    1
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WPS_WFA_SUBID_VERSION2_S            1
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S    1
+#define WPS_WFA_SUBID_REQ_TO_ENROLL_S       1
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1
+#define WPS_WFA_SUBID_REG_CFG_METHODS_S     2
+
+/* Association states */
+#define WPS_ASSOC_NOT_ASSOCIATED  0
+#define WPS_ASSOC_CONN_SUCCESS    1
+#define WPS_ASSOC_CONFIG_FAIL     2
+#define WPS_ASSOC_ASSOC_FAIL      3
+#define WPS_ASSOC_IP_FAIL         4
+
+/* Authentication types */
+#define WPS_AUTHTYPE_OPEN        0x0001
+#define WPS_AUTHTYPE_WPAPSK      0x0002	/* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_SHARED      0x0004	/* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA         0x0008	/* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA2        0x0010
+#define WPS_AUTHTYPE_WPA2PSK     0x0020
+
+/* Config methods */
+#define WPS_CONFMET_USBA            0x0001	/* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_ETHERNET        0x0002	/* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_LABEL           0x0004
+#define WPS_CONFMET_DISPLAY         0x0008
+#define WPS_CONFMET_EXT_NFC_TOK     0x0010
+#define WPS_CONFMET_INT_NFC_TOK     0x0020
+#define WPS_CONFMET_NFC_INTF        0x0040
+#define WPS_CONFMET_PBC             0x0080
+#define WPS_CONFMET_KEYPAD          0x0100
+/* WSC 2.0 */
+#define WPS_CONFMET_VIRT_PBC        0x0280
+#define WPS_CONFMET_PHY_PBC         0x0480
+#define WPS_CONFMET_VIRT_DISPLAY    0x2008
+#define WPS_CONFMET_PHY_DISPLAY     0x4008
+
+/* WPS error messages */
+#define WPS_ERROR_NO_ERROR                0
+#define WPS_ERROR_OOB_INT_READ_ERR        1
+#define WPS_ERROR_DECRYPT_CRC_FAIL        2
+#define WPS_ERROR_CHAN24_NOT_SUPP         3
+#define WPS_ERROR_CHAN50_NOT_SUPP         4
+#define WPS_ERROR_SIGNAL_WEAK             5	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_AUTH_FAIL            6	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_ASSOC_FAIL           7	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NO_DHCP_RESP            8	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAILED_DHCP_CONF        9	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_IP_ADDR_CONFLICT        10	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAIL_CONN_REGISTRAR     11
+#define WPS_ERROR_MULTI_PBC_DETECTED      12
+#define WPS_ERROR_ROGUE_SUSPECTED         13
+#define WPS_ERROR_DEVICE_BUSY             14
+#define WPS_ERROR_SETUP_LOCKED            15
+#define WPS_ERROR_MSG_TIMEOUT             16	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_REG_SESSION_TIMEOUT     17	/* Deprecated in WSC 2.0 */
+#define WPS_ERROR_DEV_PWD_AUTH_FAIL       18
+#define WPS_ERROR_60GHZ_NOT_SUPPORT       19
+#define WPS_ERROR_PKH_MISMATCH            20	/* Public Key Hash Mismatch */
+
+/* Connection types */
+#define WPS_CONNTYPE_ESS    0x01
+#define WPS_CONNTYPE_IBSS   0x02
+
+/* Device password ID */
+#define WPS_DEVICEPWDID_DEFAULT          0x0000
+#define WPS_DEVICEPWDID_USER_SPEC        0x0001
+#define WPS_DEVICEPWDID_MACHINE_SPEC     0x0002
+#define WPS_DEVICEPWDID_REKEY            0x0003
+#define WPS_DEVICEPWDID_PUSH_BTN         0x0004
+#define WPS_DEVICEPWDID_REG_SPEC         0x0005
+#define WPS_DEVICEPWDID_IBSS             0x0006
+#define WPS_DEVICEPWDID_NFC_CHO          0x0007	/* NFC-Connection-Handover */
+#define WPS_DEVICEPWDID_WFDS             0x0008	/* Wi-Fi Direct Services Specification */
+
+/* Encryption type */
+#define WPS_ENCRTYPE_NONE    0x0001
+#define WPS_ENCRTYPE_WEP     0x0002	/* Deprecated in WSC 2.0 */
+#define WPS_ENCRTYPE_TKIP    0x0004	/* Deprecated in version 2.0. TKIP can only
+					  * be advertised on the AP when Mixed Mode
+					  * is enabled (Encryption Type is 0x000c).
+					  */
+#define WPS_ENCRTYPE_AES     0x0008
+
+
+/* WPS Message Types */
+#define WPS_ID_BEACON            0x01
+#define WPS_ID_PROBE_REQ         0x02
+#define WPS_ID_PROBE_RESP        0x03
+#define WPS_ID_MESSAGE_M1        0x04
+#define WPS_ID_MESSAGE_M2        0x05
+#define WPS_ID_MESSAGE_M2D       0x06
+#define WPS_ID_MESSAGE_M3        0x07
+#define WPS_ID_MESSAGE_M4        0x08
+#define WPS_ID_MESSAGE_M5        0x09
+#define WPS_ID_MESSAGE_M6        0x0A
+#define WPS_ID_MESSAGE_M7        0x0B
+#define WPS_ID_MESSAGE_M8        0x0C
+#define WPS_ID_MESSAGE_ACK       0x0D
+#define WPS_ID_MESSAGE_NACK      0x0E
+#define WPS_ID_MESSAGE_DONE      0x0F
+
+/* WSP private ID for local use */
+#define WPS_PRIVATE_ID_IDENTITY		(WPS_ID_MESSAGE_DONE + 1)
+#define WPS_PRIVATE_ID_WPS_START	(WPS_ID_MESSAGE_DONE + 2)
+#define WPS_PRIVATE_ID_FAILURE		(WPS_ID_MESSAGE_DONE + 3)
+#define WPS_PRIVATE_ID_FRAG		(WPS_ID_MESSAGE_DONE + 4)
+#define WPS_PRIVATE_ID_FRAG_ACK		(WPS_ID_MESSAGE_DONE + 5)
+#define WPS_PRIVATE_ID_EAPOL_START	(WPS_ID_MESSAGE_DONE + 6)
+
+
+/* Device Type categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_CAT_COMPUTER        1
+#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE    2
+#define WPS_DEVICE_TYPE_CAT_PRINTER         3
+#define WPS_DEVICE_TYPE_CAT_CAMERA          4
+#define WPS_DEVICE_TYPE_CAT_STORAGE         5
+#define WPS_DEVICE_TYPE_CAT_NW_INFRA        6
+#define WPS_DEVICE_TYPE_CAT_DISPLAYS        7
+#define WPS_DEVICE_TYPE_CAT_MM_DEVICES      8
+#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES    9
+#define WPS_DEVICE_TYPE_CAT_TELEPHONE       10
+#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES   11	/* WSC 2.0 */
+
+/* Device Type sub categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC         1
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER     2
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR  3
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC      4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK   5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP    6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID        7	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK    8	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard    1	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE       2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK    3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL   4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL    5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE      6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER  8	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER  9	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER    1
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER    2
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX        3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER     4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE   5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL  1
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM   2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM     3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM    4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS        1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP           1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER       2
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH       3
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY      4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE       5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV         1
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME  2
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR  3
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR    4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR          1
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR          2
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX          3
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB          4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME        5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP          6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX        1
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360    2
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS          3
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC          4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD         5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM        1
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM       2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM       3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM       4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM       5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER     1	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS  2	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP       3	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET   4	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE    5	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE    6	/* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS       7	/* WSC 2.0 */
+
+
+/* Device request/response type */
+#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY    0x00
+#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X   0x01
+#define WPS_MSGTYPE_REGISTRAR             0x02
+#define WPS_MSGTYPE_AP_WLAN_MGR           0x03
+
+/* RF Band */
+#define WPS_RFBAND_24GHZ    0x01
+#define WPS_RFBAND_50GHZ    0x02
+
+/* Simple Config state */
+#define WPS_SCSTATE_UNCONFIGURED    0x01
+#define WPS_SCSTATE_CONFIGURED      0x02
+#define WPS_SCSTATE_OFF 11
+
+/* WPS Vendor extension key */
+#define WPS_OUI_HEADER_LEN 2
+#define WPS_OUI_HEADER_SIZE 4
+#define WPS_OUI_FIXED_HEADER_OFF 16
+#define WPS_WFA_SUBID_V2_OFF 3
+#define WPS_WFA_V2_OFF 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WPS_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 0000000..1fbeced
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,3646 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h 474281 2014-04-30 18:24:55Z $
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+#ifndef	_SBCHIPC_H
+#define	_SBCHIPC_H
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/**
+ * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the
+ * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to
+ * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead
+ * be assigned their respective chipc-specific address space and connected to the Always On
+ * Backplane via the APB interface.
+ */
+typedef volatile struct {
+	uint32  PAD[384];
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32  PAD[3];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  PAD[20];
+	uint32	pmucontrol_ext;		/* 0x6d8 */
+	uint32	slowclkperiod;		/* 0x6dc */
+	uint32	PAD[8];
+	uint32	pmuintmask0;		/* 0x700 */
+	uint32	pmuintmask1;		/* 0x704 */
+	uint32  PAD[14];
+	uint32  pmuintstatus;		/* 0x740 */
+} pmuregs_t;
+
+typedef struct eci_prerev35 {
+	uint32	eci_output;
+	uint32	eci_control;
+	uint32	eci_inputlo;
+	uint32	eci_inputmi;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolaritymi;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskmi;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventmi;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskmi;
+	uint32	eci_eventmaskhi;
+	uint32	PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+	uint32	eci_outputlo;
+	uint32	eci_outputhi;
+	uint32	eci_controllo;
+	uint32	eci_controlhi;
+	uint32	eci_inputlo;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskhi;
+	uint32	eci_auxtx;
+	uint32	eci_auxrx;
+	uint32	eci_datatag;
+	uint32	eci_uartescvalue;
+	uint32	eci_autobaudctr;
+	uint32	eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+	uint32	PAD[19];
+	/* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */
+	uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+	uint32	chipid;			/* 0x0 */
+	uint32	capabilities;
+	uint32	corecontrol;		/* corerev >= 1 */
+	uint32	bist;
+
+	/* OTP */
+	uint32	otpstatus;		/* 0x10, corerev >= 10 */
+	uint32	otpcontrol;
+	uint32	otpprog;
+	uint32	otplayout;		/* corerev >= 23 */
+
+	/* Interrupt control */
+	uint32	intstatus;		/* 0x20 */
+	uint32	intmask;
+
+	/* Chip specific regs */
+	uint32	chipcontrol;		/* 0x28, rev >= 11 */
+	uint32	chipstatus;		/* 0x2c, rev >= 11 */
+
+	/* Jtag Master */
+	uint32	jtagcmd;		/* 0x30, rev >= 10 */
+	uint32	jtagir;
+	uint32	jtagdr;
+	uint32	jtagctrl;
+
+	/* serial flash interface registers */
+	uint32	flashcontrol;		/* 0x40 */
+	uint32	flashaddress;
+	uint32	flashdata;
+	uint32	otplayoutextension;	/* rev >= 35 */
+
+	/* Silicon backplane configuration broadcast control */
+	uint32	broadcastaddress;	/* 0x50 */
+	uint32	broadcastdata;
+
+	/* gpio - cleared only by power-on-reset */
+	uint32	gpiopullup;		/* 0x58, corerev >= 20 */
+	uint32	gpiopulldown;		/* 0x5c, corerev >= 20 */
+	uint32	gpioin;			/* 0x60 */
+	uint32	gpioout;		/* 0x64 */
+	uint32	gpioouten;		/* 0x68 */
+	uint32	gpiocontrol;		/* 0x6C */
+	uint32	gpiointpolarity;	/* 0x70 */
+	uint32	gpiointmask;		/* 0x74 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioevent;
+	uint32	gpioeventintmask;
+
+	/* Watchdog timer */
+	uint32	watchdog;		/* 0x80 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioeventintpolarity;
+
+	/* GPIO based LED powersave registers corerev >= 16 */
+	uint32  gpiotimerval;		/* 0x88 */
+	uint32  gpiotimeroutmask;
+
+	/* clock control */
+	uint32	clockcontrol_n;		/* 0x90 */
+	uint32	clockcontrol_sb;	/* aka m0 */
+	uint32	clockcontrol_pci;	/* aka m1 */
+	uint32	clockcontrol_m2;	/* mii/uart/mipsref */
+	uint32	clockcontrol_m3;	/* cpu */
+	uint32	clkdiv;			/* corerev >= 3 */
+	uint32	gpiodebugsel;		/* corerev >= 28 */
+	uint32	capabilities_ext;               	/* 0xac  */
+
+	/* pll delay registers (corerev >= 4) */
+	uint32	pll_on_delay;		/* 0xb0 */
+	uint32	fref_sel_delay;
+	uint32	slow_clk_ctl;		/* 5 < corerev < 10 */
+	uint32	PAD;
+
+	/* Instaclock registers (corerev >= 10) */
+	uint32	system_clk_ctl;		/* 0xc0 */
+	uint32	clkstatestretch;
+	uint32	PAD[2];
+
+	/* Indirect backplane access (corerev >= 22) */
+	uint32	bp_addrlow;		/* 0xd0 */
+	uint32	bp_addrhigh;
+	uint32	bp_data;
+	uint32	PAD;
+	uint32	bp_indaccess;
+	/* SPI registers, corerev >= 37 */
+	uint32	gsioctrl;
+	uint32	gsioaddress;
+	uint32	gsiodata;
+
+	/* More clock dividers (corerev >= 32) */
+	uint32	clkdiv2;
+	/* FAB ID (corerev >= 40) */
+	uint32	otpcontrol1;
+	uint32	fabid;			/* 0xf8 */
+
+	/* In AI chips, pointer to erom */
+	uint32	eromptr;		/* 0xfc */
+
+	/* ExtBus control registers (corerev >= 3) */
+	uint32	pcmcia_config;		/* 0x100 */
+	uint32	pcmcia_memwait;
+	uint32	pcmcia_attrwait;
+	uint32	pcmcia_iowait;
+	uint32	ide_config;
+	uint32	ide_memwait;
+	uint32	ide_attrwait;
+	uint32	ide_iowait;
+	uint32	prog_config;
+	uint32	prog_waitcount;
+	uint32	flash_config;
+	uint32	flash_waitcount;
+	uint32  SECI_config;		/* 0x130 SECI configuration */
+	uint32	SECI_status;
+	uint32	SECI_statusmask;
+	uint32	SECI_rxnibchanged;
+
+	uint32	PAD[20];
+
+	/* SROM interface (corerev >= 32) */
+	uint32	sromcontrol;		/* 0x190 */
+	uint32	sromaddress;
+	uint32	sromdata;
+	uint32	PAD[1];				/* 0x19C */
+	/* NAND flash registers for BCM4706 (corerev = 31) */
+	uint32  nflashctrl;         /* 0x1a0 */
+	uint32  nflashconf;
+	uint32  nflashcoladdr;
+	uint32  nflashrowaddr;
+	uint32  nflashdata;
+	uint32  nflashwaitcnt0;		/* 0x1b4 */
+	uint32  PAD[2];
+
+	uint32  seci_uart_data;		/* 0x1C0 */
+	uint32  seci_uart_bauddiv;
+	uint32  seci_uart_fcr;
+	uint32  seci_uart_lcr;
+	uint32  seci_uart_mcr;
+	uint32  seci_uart_lsr;
+	uint32  seci_uart_msr;
+	uint32  seci_uart_baudadj;
+	/* Clock control and hardware workarounds (corerev >= 20) */
+	uint32	clk_ctl_st;		/* 0x1e0 */
+	uint32	hw_war;
+	uint32	PAD[70];
+
+	/* UARTs */
+	uint8	uart0data;		/* 0x300 */
+	uint8	uart0imr;
+	uint8	uart0fcr;
+	uint8	uart0lcr;
+	uint8	uart0mcr;
+	uint8	uart0lsr;
+	uint8	uart0msr;
+	uint8	uart0scratch;
+	uint8	PAD[248];		/* corerev >= 1 */
+
+	uint8	uart1data;		/* 0x400 */
+	uint8	uart1imr;
+	uint8	uart1fcr;
+	uint8	uart1lcr;
+	uint8	uart1mcr;
+	uint8	uart1lsr;
+	uint8	uart1msr;
+	uint8	uart1scratch;		/* 0x407 */
+	uint32	PAD[62];
+
+	/* save/restore, corerev >= 48 */
+	uint32	sr_capability;		/* 0x500 */
+	uint32	sr_control0;		/* 0x504 */
+	uint32	sr_control1;		/* 0x508 */
+	uint32  gpio_control;		/* 0x50C */
+	uint32	PAD[60];
+
+	/* PMU registers (corerev >= 20) */
+	/* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP.
+	 * The CPU must read them twice, compare, and retry if different.
+	 */
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32  PAD[3];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  PAD[20];
+	uint32	pmucontrol_ext;		/* 0x6d8 */
+	uint32	slowclkperiod;		/* 0x6dc */
+	uint32	PAD[8];
+	uint32	pmuintmask0;		/* 0x700 */
+	uint32	pmuintmask1;		/* 0x704 */
+	uint32  PAD[14];
+	uint32  pmuintstatus;		/* 0x740 */
+	uint32	PAD[47];
+	uint16	sromotp[512];		/* 0x800 */
+#ifdef NFLASH_SUPPORT
+	/* Nand flash MLC controller registers (corerev >= 38) */
+	uint32	nand_revision;		/* 0xC00 */
+	uint32	nand_cmd_start;
+	uint32	nand_cmd_addr_x;
+	uint32	nand_cmd_addr;
+	uint32	nand_cmd_end_addr;
+	uint32	nand_cs_nand_select;
+	uint32	nand_cs_nand_xor;
+	uint32	PAD;
+	uint32	nand_spare_rd0;
+	uint32	nand_spare_rd4;
+	uint32	nand_spare_rd8;
+	uint32	nand_spare_rd12;
+	uint32	nand_spare_wr0;
+	uint32	nand_spare_wr4;
+	uint32	nand_spare_wr8;
+	uint32	nand_spare_wr12;
+	uint32	nand_acc_control;
+	uint32	PAD;
+	uint32	nand_config;
+	uint32	PAD;
+	uint32	nand_timing_1;
+	uint32	nand_timing_2;
+	uint32	nand_semaphore;
+	uint32	PAD;
+	uint32	nand_devid;
+	uint32	nand_devid_x;
+	uint32	nand_block_lock_status;
+	uint32	nand_intfc_status;
+	uint32	nand_ecc_corr_addr_x;
+	uint32	nand_ecc_corr_addr;
+	uint32	nand_ecc_unc_addr_x;
+	uint32	nand_ecc_unc_addr;
+	uint32	nand_read_error_count;
+	uint32	nand_corr_stat_threshold;
+	uint32	PAD[2];
+	uint32	nand_read_addr_x;
+	uint32	nand_read_addr;
+	uint32	nand_page_program_addr_x;
+	uint32	nand_page_program_addr;
+	uint32	nand_copy_back_addr_x;
+	uint32	nand_copy_back_addr;
+	uint32	nand_block_erase_addr_x;
+	uint32	nand_block_erase_addr;
+	uint32	nand_inv_read_addr_x;
+	uint32	nand_inv_read_addr;
+	uint32	PAD[2];
+	uint32	nand_blk_wr_protect;
+	uint32	PAD[3];
+	uint32	nand_acc_control_cs1;
+	uint32	nand_config_cs1;
+	uint32	nand_timing_1_cs1;
+	uint32	nand_timing_2_cs1;
+	uint32	PAD[20];
+	uint32	nand_spare_rd16;
+	uint32	nand_spare_rd20;
+	uint32	nand_spare_rd24;
+	uint32	nand_spare_rd28;
+	uint32	nand_cache_addr;
+	uint32	nand_cache_data;
+	uint32	nand_ctrl_config;
+	uint32	nand_ctrl_status;
+#endif /* NFLASH_SUPPORT */
+	uint32  gci_corecaps0; /* GCI starting at 0xC00 */
+	uint32  gci_corecaps1;
+	uint32  gci_corecaps2;
+	uint32  gci_corectrl;
+	uint32  gci_corestat; /* 0xC10 */
+	uint32  gci_intstat; /* 0xC14 */
+	uint32  gci_intmask; /* 0xC18 */
+	uint32  gci_wakemask; /* 0xC1C */
+	uint32  gci_levelintstat; /* 0xC20 */
+	uint32  gci_eventintstat; /* 0xC24 */
+	uint32  PAD[6];
+	uint32  gci_indirect_addr; /* 0xC40 */
+	uint32  gci_gpioctl; /* 0xC44 */
+	uint32	gci_gpiostatus;
+	uint32  gci_gpiomask; /* 0xC4C */
+	uint32  PAD;
+	uint32  gci_miscctl; /* 0xC54 */
+	uint32	gci_gpiointmask;
+	uint32	gci_gpiowakemask;
+	uint32  gci_input[32]; /* C60 */
+	uint32  gci_event[32]; /* CE0 */
+	uint32  gci_output[4]; /* D60 */
+	uint32  gci_control_0; /* 0xD70 */
+	uint32  gci_control_1; /* 0xD74 */
+	uint32  gci_intpolreg; /* 0xD78 */
+	uint32  gci_levelintmask; /* 0xD7C */
+	uint32  gci_eventintmask; /* 0xD80 */
+	uint32  PAD[3];
+	uint32  gci_inbandlevelintmask; /* 0xD90 */
+	uint32  gci_inbandeventintmask; /* 0xD94 */
+	uint32  PAD[2];
+	uint32  gci_seciauxtx; /* 0xDA0 */
+	uint32  gci_seciauxrx; /* 0xDA4 */
+	uint32  gci_secitx_datatag; /* 0xDA8 */
+	uint32  gci_secirx_datatag; /* 0xDAC */
+	uint32  gci_secitx_datamask; /* 0xDB0 */
+	uint32  gci_seciusef0tx_reg; /* 0xDB4 */
+	uint32  gci_secif0tx_offset; /* 0xDB8 */
+	uint32  gci_secif0rx_offset; /* 0xDBC */
+	uint32  gci_secif1tx_offset; /* 0xDC0 */
+	uint32	gci_rxfifo_common_ctrl; /* 0xDC4 */
+	uint32	gci_rxfifoctrl; /* 0xDC8 */
+	uint32	gci_uartreadid; /* DCC */
+	uint32  gci_uartescval; /* DD0 */
+	uint32	PAD;
+	uint32	gci_secififolevel; /* DD8 */
+	uint32	gci_seciuartdata; /* DDC */
+	uint32  gci_secibauddiv; /* DE0 */
+	uint32  gci_secifcr; /* DE4 */
+	uint32  gci_secilcr; /* DE8 */
+	uint32  gci_secimcr; /* DEC */
+	uint32	gci_secilsr; /* DF0 */
+	uint32	gci_secimsr; /* DF4 */
+	uint32  gci_baudadj; /* DF8 */
+	uint32  PAD;
+	uint32  gci_chipctrl; /* 0xE00 */
+	uint32  gci_chipsts; /* 0xE04 */
+	uint32	gci_gpioout; /* 0xE08 */
+	uint32	gci_gpioout_read; /* 0xE0C */
+	uint32	gci_mpwaketx; /* 0xE10 */
+	uint32	gci_mpwakedetect; /* 0xE14 */
+	uint32	gci_seciin_ctrl; /* 0xE18 */
+	uint32	gci_seciout_ctrl; /* 0xE1C */
+	uint32	gci_seciin_auxfifo_en; /* 0xE20 */
+	uint32	gci_seciout_txen_txbr; /* 0xE24 */
+	uint32	gci_seciin_rxbrstatus; /* 0xE28 */
+	uint32	gci_seciin_rxerrstatus; /* 0xE2C */
+	uint32	gci_seciin_fcstatus; /* 0xE30 */
+	uint32	gci_seciout_txstatus; /* 0xE34 */
+	uint32	gci_seciout_txbrstatus; /* 0xE38 */
+} chipcregs_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+
+#define	CC_CHIPID		0
+#define	CC_CAPABILITIES		4
+#define	CC_CHIPST		0x2c
+#define	CC_EROMPTR		0xfc
+
+#define	CC_OTPST		0x10
+#define	CC_INTSTATUS		0x20
+#define	CC_INTMASK		0x24
+#define	CC_JTAGCMD		0x30
+#define	CC_JTAGIR		0x34
+#define	CC_JTAGDR		0x38
+#define	CC_JTAGCTRL		0x3c
+#define	CC_GPIOPU		0x58
+#define	CC_GPIOPD		0x5c
+#define	CC_GPIOIN		0x60
+#define	CC_GPIOOUT		0x64
+#define	CC_GPIOOUTEN		0x68
+#define	CC_GPIOCTRL		0x6c
+#define	CC_GPIOPOL		0x70
+#define	CC_GPIOINTM		0x74
+#define	CC_GPIOEVENT		0x78
+#define	CC_GPIOEVENTMASK	0x7c
+#define	CC_WATCHDOG		0x80
+#define	CC_GPIOEVENTPOL		0x84
+#define	CC_CLKC_N		0x90
+#define	CC_CLKC_M0		0x94
+#define	CC_CLKC_M1		0x98
+#define	CC_CLKC_M2		0x9c
+#define	CC_CLKC_M3		0xa0
+#define	CC_CLKDIV		0xa4
+#define	CC_SYS_CLK_CTL		0xc0
+#define	CC_CLK_CTL_ST		SI_CLK_CTL_ST
+#define	PMU_CTL			0x600
+#define	PMU_CAP			0x604
+#define	PMU_ST			0x608
+#define PMU_RES_STATE		0x60c
+#define PMU_RES_PENDING		0x610
+#define PMU_TIMER		0x614
+#define	PMU_MIN_RES_MASK	0x618
+#define	PMU_MAX_RES_MASK	0x61c
+#define CC_CHIPCTL_ADDR         0x650
+#define CC_CHIPCTL_DATA         0x654
+#define PMU_REG_CONTROL_ADDR	0x658
+#define PMU_REG_CONTROL_DATA	0x65C
+#define PMU_PLL_CONTROL_ADDR 	0x660
+#define PMU_PLL_CONTROL_DATA 	0x664
+#define CC_SROM_CTRL		0x190
+#define	CC_SROM_OTP		0x800		/* SROM/OTP address space */
+#define CC_GCI_INDIRECT_ADDR_REG	0xC40
+#define CC_GCI_CHIP_CTRL_REG	0xE00
+#define CC_GCI_CC_OFFSET_2	2
+#define CC_GCI_CC_OFFSET_5	5
+#define CC_SWD_CTRL		0x380
+#define CC_SWD_REQACK		0x384
+#define CC_SWD_DATA		0x388
+
+
+#define CHIPCTRLREG0 0x0
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define CHIPCTRLREG6 0x6
+#define REGCTRLREG4 0x4
+#define REGCTRLREG5 0x5
+#define REGCTRLREG6 0x6
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define PMU_RES_DEP_MASK 0x624
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+#define EXT_LPO_AVAIL 0x100
+#define LPO_SEL					(1 << 0)
+#define CC_EXT_LPO_PU 0x200000
+#define GC_EXT_LPO_PU 0x2
+#define CC_INT_LPO_PU 0x100000
+#define GC_INT_LPO_PU 0x1
+#define EXT_LPO_SEL 0x8
+#define INT_LPO_SEL 0x4
+#define ENABLE_FINE_CBUCK_CTRL 			(1 << 30)
+#define REGCTRL5_PWM_AUTO_CTRL_MASK 		0x007e0000
+#define REGCTRL5_PWM_AUTO_CTRL_SHIFT		17
+#define REGCTRL6_PWM_AUTO_CTRL_MASK 		0x3fff0000
+#define REGCTRL6_PWM_AUTO_CTRL_SHIFT		16
+
+#ifdef SR_DEBUG
+#define SUBCORE_POWER_ON 0x0001
+#define PHY_POWER_ON 0x0010
+#define VDDM_POWER_ON 0x0100
+#define MEMLPLDO_POWER_ON 0x1000
+#define SUBCORE_POWER_ON_CHK 0x00040000
+#define PHY_POWER_ON_CHK 0x00080000
+#define VDDM_POWER_ON_CHK 0x00100000
+#define MEMLPLDO_POWER_ON_CHK 0x00200000
+#endif /* SR_DEBUG */
+
+#ifdef NFLASH_SUPPORT
+/* NAND flash support */
+#define CC_NAND_REVISION	0xC00
+#define CC_NAND_CMD_START	0xC04
+#define CC_NAND_CMD_ADDR	0xC0C
+#define CC_NAND_SPARE_RD_0	0xC20
+#define CC_NAND_SPARE_RD_4	0xC24
+#define CC_NAND_SPARE_RD_8	0xC28
+#define CC_NAND_SPARE_RD_C	0xC2C
+#define CC_NAND_CONFIG		0xC48
+#define CC_NAND_DEVID		0xC60
+#define CC_NAND_DEVID_EXT	0xC64
+#define CC_NAND_INTFC_STATUS	0xC6C
+#endif /* NFLASH_SUPPORT */
+
+/* chipid */
+#define	CID_ID_MASK		0x0000ffff	/* Chip Id mask */
+#define	CID_REV_MASK		0x000f0000	/* Chip Revision mask */
+#define	CID_REV_SHIFT		16		/* Chip Revision shift */
+#define	CID_PKG_MASK		0x00f00000	/* Package Option mask */
+#define	CID_PKG_SHIFT		20		/* Package Option shift */
+#define	CID_CC_MASK		0x0f000000	/* CoreCount (corerev >= 4) */
+#define CID_CC_SHIFT		24
+#define	CID_TYPE_MASK		0xf0000000	/* Chip Type */
+#define CID_TYPE_SHIFT		28
+
+/* capabilities */
+#define	CC_CAP_UARTS_MASK	0x00000003	/* Number of UARTs */
+#define CC_CAP_MIPSEB		0x00000004	/* MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL		0x00000018	/* UARTs clock select */
+#define CC_CAP_UINTCLK		0x00000008	/* UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO		0x00000020	/* UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK	0x000000c0	/* External bus mask */
+#define CC_CAP_EXTBUS_NONE	0x00000000	/* No ExtBus present */
+#define CC_CAP_EXTBUS_FULL	0x00000040	/* ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG	0x00000080	/* ExtBus: ProgIf only */
+#define	CC_CAP_FLASH_MASK	0x00000700	/* Type of flash */
+#define	CC_CAP_PLL_MASK		0x00038000	/* Type of PLL */
+#define CC_CAP_PWR_CTL		0x00040000	/* Power control */
+#define CC_CAP_OTPSIZE		0x00380000	/* OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT	19		/* OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE	5		/* OTP Size base */
+#define CC_CAP_JTAGP		0x00400000	/* JTAG Master Present */
+#define CC_CAP_ROM		0x00800000	/* Internal boot rom active */
+#define CC_CAP_BKPLN64		0x08000000	/* 64-bit backplane */
+#define	CC_CAP_PMU		0x10000000	/* PMU Present, rev >= 20 */
+#define	CC_CAP_ECI		0x20000000	/* ECI Present, rev >= 21 */
+#define	CC_CAP_SROM		0x40000000	/* Srom Present, rev >= 32 */
+#define	CC_CAP_NFLASH		0x80000000	/* Nand flash present, rev >= 35 */
+
+#define	CC_CAP2_SECI		0x00000001	/* SECI Present, rev >= 36 */
+#define	CC_CAP2_GSIO		0x00000002	/* GSIO (spi/i2c) present, rev >= 37 */
+
+/* capabilities extension */
+#define CC_CAP_EXT_SECI_PRESENT	0x00000001    /* SECI present */
+#define CC_CAP_EXT_GSIO_PRESENT	0x00000002    /* GSIO present */
+#define CC_CAP_EXT_GCI_PRESENT  0x00000004    /* GCI present */
+#define CC_CAP_EXT_AOB_PRESENT  0x00000040    /* AOB present */
+
+/* WL Channel Info to BT via GCI - bits 40 - 47 */
+#define GCI_WL_CHN_INFO_MASK 	(0xFF00)
+/* PLL type */
+#define PLL_NONE		0x00000000
+#define PLL_TYPE1		0x00010000	/* 48MHz base, 3 dividers */
+#define PLL_TYPE2		0x00020000	/* 48MHz, 4 dividers */
+#define PLL_TYPE3		0x00030000	/* 25MHz, 2 dividers */
+#define PLL_TYPE4		0x00008000	/* 48MHz, 4 dividers */
+#define PLL_TYPE5		0x00018000	/* 25MHz, 4 dividers */
+#define PLL_TYPE6		0x00028000	/* 100/200 or 120/240 only */
+#define PLL_TYPE7		0x00038000	/* 25MHz, 4 dividers */
+
+/* ILP clock */
+#define	ILP_CLOCK		32000
+
+/* ALP clock on pre-PMU chips */
+#define	ALP_CLOCK		20000000
+
+#ifdef CFG_SIM
+#define NS_ALP_CLOCK		84922
+#define NS_SLOW_ALP_CLOCK	84922
+#define NS_CPU_CLOCK		534500
+#define NS_SLOW_CPU_CLOCK	534500
+#define NS_SI_CLOCK		271750
+#define NS_SLOW_SI_CLOCK	271750
+#define NS_FAST_MEM_CLOCK	271750
+#define NS_MEM_CLOCK		271750
+#define NS_SLOW_MEM_CLOCK	271750
+#else
+#define NS_ALP_CLOCK		125000000
+#define NS_SLOW_ALP_CLOCK	100000000
+#define NS_CPU_CLOCK		1000000000
+#define NS_SLOW_CPU_CLOCK	800000000
+#define NS_SI_CLOCK		250000000
+#define NS_SLOW_SI_CLOCK	200000000
+#define NS_FAST_MEM_CLOCK	800000000
+#define NS_MEM_CLOCK		533000000
+#define NS_SLOW_MEM_CLOCK	400000000
+#endif /* CFG_SIM */
+
+/* HT clock */
+#define	HT_CLOCK		80000000
+
+/* corecontrol */
+#define CC_UARTCLKO		0x00000001	/* Drive UART with internal clock */
+#define	CC_SE			0x00000002	/* sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO	0x00000004	/* 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN		0x00000008	/* enable UART Clock (corerev > = 21 */
+
+/* 4321 chipcontrol */
+#define CHIPCTRL_4321A0_DEFAULT	0x3a4
+#define CHIPCTRL_4321A1_DEFAULT	0x0a4
+#define CHIPCTRL_4321_PLL_DOWN	0x800000	/* serdes PLL down override */
+
+/* Fields in the otpstatus register in rev >= 21 */
+#define OTPS_OL_MASK		0x000000ff
+#define OTPS_OL_MFG		0x00000001	/* manuf row is locked */
+#define OTPS_OL_OR1		0x00000002	/* otp redundancy row 1 is locked */
+#define OTPS_OL_OR2		0x00000004	/* otp redundancy row 2 is locked */
+#define OTPS_OL_GU		0x00000008	/* general use region is locked */
+#define OTPS_GUP_MASK		0x00000f00
+#define OTPS_GUP_SHIFT		8
+#define OTPS_GUP_HW		0x00000100	/* h/w subregion is programmed */
+#define OTPS_GUP_SW		0x00000200	/* s/w subregion is programmed */
+#define OTPS_GUP_CI		0x00000400	/* chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE		0x00000800	/* fuse subregion is programmed */
+#define OTPS_READY		0x00001000
+#define OTPS_RV(x)		(1 << (16 + (x)))	/* redundancy entry valid */
+#define OTPS_RV_MASK		0x0fff0000
+#define OTPS_PROGOK     0x40000000
+
+/* Fields in the otpcontrol register in rev >= 21 */
+#define OTPC_PROGSEL		0x00000001
+#define OTPC_PCOUNT_MASK	0x0000000e
+#define OTPC_PCOUNT_SHIFT	1
+#define OTPC_VSEL_MASK		0x000000f0
+#define OTPC_VSEL_SHIFT		4
+#define OTPC_TMM_MASK		0x00000700
+#define OTPC_TMM_SHIFT		8
+#define OTPC_ODM		0x00000800
+#define OTPC_PROGEN		0x80000000
+
+/* Fields in the 40nm otpcontrol register in rev >= 40 */
+#define OTPC_40NM_PROGSEL_SHIFT	0
+#define OTPC_40NM_PCOUNT_SHIFT	1
+#define OTPC_40NM_PCOUNT_WR	0xA
+#define OTPC_40NM_PCOUNT_V1X	0xB
+#define OTPC_40NM_REGCSEL_SHIFT	5
+#define OTPC_40NM_REGCSEL_DEF	0x4
+#define OTPC_40NM_PROGIN_SHIFT	8
+#define OTPC_40NM_R2X_SHIFT	10
+#define OTPC_40NM_ODM_SHIFT	11
+#define OTPC_40NM_DF_SHIFT	15
+#define OTPC_40NM_VSEL_SHIFT	16
+#define OTPC_40NM_VSEL_WR	0xA
+#define OTPC_40NM_VSEL_V1X	0xA
+#define OTPC_40NM_VSEL_R1X	0x5
+#define OTPC_40NM_COFAIL_SHIFT	30
+
+#define OTPC1_CPCSEL_SHIFT	0
+#define OTPC1_CPCSEL_DEF	6
+#define OTPC1_TM_SHIFT		8
+#define OTPC1_TM_WR		0x84
+#define OTPC1_TM_V1X		0x84
+#define OTPC1_TM_R1X		0x4
+#define OTPC1_CLK_EN_MASK	0x00020000
+#define OTPC1_CLK_DIV_MASK	0x00FC0000
+
+/* Fields in otpprog in rev >= 21 and HND OTP */
+#define OTPP_COL_MASK		0x000000ff
+#define OTPP_COL_SHIFT		0
+#define OTPP_ROW_MASK		0x0000ff00
+#define OTPP_ROW_MASK9		0x0001ff00		/* for ccrev >= 49 */
+#define OTPP_ROW_SHIFT		8
+#define OTPP_OC_MASK		0x0f000000
+#define OTPP_OC_SHIFT		24
+#define OTPP_READERR		0x10000000
+#define OTPP_VALUE_MASK		0x20000000
+#define OTPP_VALUE_SHIFT	29
+#define OTPP_START_BUSY		0x80000000
+#define	OTPP_READ		0x40000000	/* HND OTP */
+
+/* Fields in otplayout register */
+#define OTPL_HWRGN_OFF_MASK	0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT	0
+#define OTPL_WRAP_REVID_MASK	0x00F80000
+#define OTPL_WRAP_REVID_SHIFT	19
+#define OTPL_WRAP_TYPE_MASK	0x00070000
+#define OTPL_WRAP_TYPE_SHIFT	16
+#define OTPL_WRAP_TYPE_65NM	0
+#define OTPL_WRAP_TYPE_40NM	1
+#define OTPL_ROW_SIZE_MASK	0x0000F000
+#define OTPL_ROW_SIZE_SHIFT	12
+
+/* otplayout reg corerev >= 36 */
+#define OTP_CISFORMAT_NEW	0x80000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ		0
+#define OTPPOC_BIT_PROG		1
+#define OTPPOC_VERIFY		3
+#define OTPPOC_INIT		4
+#define OTPPOC_SET		5
+#define OTPPOC_RESET		6
+#define OTPPOC_OCST		7
+#define OTPPOC_ROW_LOCK		8
+#define OTPPOC_PRESCN_TEST	9
+
+/* Opcodes for OTPP_OC field (40NM) */
+#define OTPPOC_READ_40NM	0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM	2
+#define OTPPOC_VERIFY_40NM	3
+#define OTPPOC_WORD_VERIFY_1_40NM	4
+#define OTPPOC_ROW_LOCK_40NM	5
+#define OTPPOC_STBY_40NM	6
+#define OTPPOC_WAKEUP_40NM	7
+#define OTPPOC_WORD_VERIFY_0_40NM	8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM	10
+#define OTPPOC_WORDPROG_40NM	11
+#define OTPPOC_BURNIN_40NM	12
+#define OTPPOC_AUTORELOAD_40NM	13
+#define OTPPOC_OVST_READ_40NM	14
+#define OTPPOC_OVST_PROG_40NM	15
+
+/* Fields in otplayoutextension */
+#define OTPLAYOUTEXT_FUSE_MASK	0x3FF
+
+
+/* Jtagm characteristics that appeared at a given corerev */
+#define	JTAGM_CREV_OLD		10	/* Old command set, 16bit max IR */
+#define	JTAGM_CREV_IRP		22	/* Able to do pause-ir */
+#define	JTAGM_CREV_RTI		28	/* Able to do return-to-idle */
+
+/* jtagcmd */
+#define JCMD_START		0x80000000
+#define JCMD_BUSY		0x80000000
+#define JCMD_STATE_MASK		0x60000000
+#define JCMD_STATE_TLR		0x00000000	/* Test-logic-reset */
+#define JCMD_STATE_PIR		0x20000000	/* Pause IR */
+#define JCMD_STATE_PDR		0x40000000	/* Pause DR */
+#define JCMD_STATE_RTI		0x60000000	/* Run-test-idle */
+#define JCMD0_ACC_MASK		0x0000f000
+#define JCMD0_ACC_IRDR		0x00000000
+#define JCMD0_ACC_DR		0x00001000
+#define JCMD0_ACC_IR		0x00002000
+#define JCMD0_ACC_RESET		0x00003000
+#define JCMD0_ACC_IRPDR		0x00004000
+#define JCMD0_ACC_PDR		0x00005000
+#define JCMD0_IRW_MASK		0x00000f00
+#define JCMD_ACC_MASK		0x000f0000	/* Changes for corerev 11 */
+#define JCMD_ACC_IRDR		0x00000000
+#define JCMD_ACC_DR		0x00010000
+#define JCMD_ACC_IR		0x00020000
+#define JCMD_ACC_RESET		0x00030000
+#define JCMD_ACC_IRPDR		0x00040000
+#define JCMD_ACC_PDR		0x00050000
+#define JCMD_ACC_PIR		0x00060000
+#define JCMD_ACC_IRDR_I		0x00070000	/* rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I		0x00080000	/* rev 28: return to run-test-idle */
+#define JCMD_IRW_MASK		0x00001f00
+#define JCMD_IRW_SHIFT		8
+#define JCMD_DRW_MASK		0x0000003f
+
+/* jtagctrl */
+#define JCTRL_FORCE_CLK		4		/* Force clock */
+#define JCTRL_EXT_EN		2		/* Enable external targets */
+#define JCTRL_EN		1		/* Enable Jtag master */
+
+#define JCTRL_TAPSEL_BIT	0x00000008	/* JtagMasterCtrl tap_sel bit */
+
+/* Fields in clkdiv */
+#define	CLKD_SFLASH		0x0f000000
+#define	CLKD_SFLASH_SHIFT	24
+#define	CLKD_OTP		0x000f0000
+#define	CLKD_OTP_SHIFT		16
+#define	CLKD_JTAG		0x00000f00
+#define	CLKD_JTAG_SHIFT		8
+#define	CLKD_UART		0x000000ff
+
+#define	CLKD2_SROM		0x00000003
+
+/* intstatus/intmask */
+#define	CI_GPIO			0x00000001	/* gpio intr */
+#define	CI_EI			0x00000002	/* extif intr (corerev >= 3) */
+#define	CI_TEMP			0x00000004	/* temp. ctrl intr (corerev >= 15) */
+#define	CI_SIRQ			0x00000008	/* serial IRQ intr (corerev >= 15) */
+#define	CI_ECI			0x00000010	/* eci intr (corerev >= 21) */
+#define	CI_PMU			0x00000020	/* pmu intr (corerev >= 21) */
+#define	CI_UART			0x00000040	/* uart intr (corerev >= 21) */
+#define	CI_WDRESET		0x80000000	/* watchdog reset occurred */
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK		0x00000007	/* slow clock source mask */
+#define	SCC_SS_LPO		0x00000000	/* source of slow clock is LPO */
+#define	SCC_SS_XTAL		0x00000001	/* source of slow clock is crystal */
+#define	SCC_SS_PCI		0x00000002	/* source of slow clock is PCI */
+#define SCC_LF			0x00000200	/* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP			0x00000400	/* LPOPowerDown, 1: LPO is disabled,
+						 * 0: LPO is enabled
+						 */
+#define SCC_FS			0x00000800	/* ForceSlowClk, 1: sb/cores running on slow clock,
+						 * 0: power logic control
+						 */
+#define SCC_IP			0x00001000	/* IgnorePllOffReq, 1/0: power logic ignores/honors
+						 * PLL clock disable requests from core
+						 */
+#define SCC_XC			0x00002000	/* XtalControlEn, 1/0: power logic does/doesn't
+						 * disable crystal when appropriate
+						 */
+#define SCC_XP			0x00004000	/* XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK		0xffff0000	/* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_SHIFT		16
+
+/* system_clk_ctl */
+#define	SYCC_IE			0x00000001	/* ILPen: Enable Idle Low Power */
+#define	SYCC_AE			0x00000002	/* ALPen: Enable Active Low Power */
+#define	SYCC_FP			0x00000004	/* ForcePLLOn */
+#define	SYCC_AR			0x00000008	/* Force ALP (or HT if ALPen is not set */
+#define	SYCC_HR			0x00000010	/* Force HT */
+#define SYCC_CD_MASK		0xffff0000	/* ClkDiv  (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_SHIFT		16
+
+/* Indirect backplane access */
+#define	BPIA_BYTEEN		0x0000000f
+#define	BPIA_SZ1		0x00000001
+#define	BPIA_SZ2		0x00000003
+#define	BPIA_SZ4		0x00000007
+#define	BPIA_SZ8		0x0000000f
+#define	BPIA_WRITE		0x00000100
+#define	BPIA_START		0x00000200
+#define	BPIA_BUSY		0x00000200
+#define	BPIA_ERROR		0x00000400
+
+/* pcmcia/prog/flash_config */
+#define	CF_EN			0x00000001	/* enable */
+#define	CF_EM_MASK		0x0000000e	/* mode */
+#define	CF_EM_SHIFT		1
+#define	CF_EM_FLASH		0		/* flash/asynchronous mode */
+#define	CF_EM_SYNC		2		/* synchronous mode */
+#define	CF_EM_PCMCIA		4		/* pcmcia mode */
+#define	CF_DS			0x00000010	/* destsize:  0=8bit, 1=16bit */
+#define	CF_BS			0x00000020	/* byteswap */
+#define	CF_CD_MASK		0x000000c0	/* clock divider */
+#define	CF_CD_SHIFT		6
+#define	CF_CD_DIV2		0x00000000	/* backplane/2 */
+#define	CF_CD_DIV3		0x00000040	/* backplane/3 */
+#define	CF_CD_DIV4		0x00000080	/* backplane/4 */
+#define	CF_CE			0x00000100	/* clock enable */
+#define	CF_SB			0x00000200	/* size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define	PM_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PM_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PM_W1_SHIFT		8
+#define	PM_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PM_W2_SHIFT		16
+#define	PM_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PM_W3_SHIFT		24
+
+/* pcmcia_attrwait */
+#define	PA_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PA_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PA_W1_SHIFT		8
+#define	PA_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PA_W2_SHIFT		16
+#define	PA_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PA_W3_SHIFT		24
+
+/* pcmcia_iowait */
+#define	PI_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PI_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PI_W1_SHIFT		8
+#define	PI_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PI_W2_SHIFT		16
+#define	PI_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PI_W3_SHIFT		24
+
+/* prog_waitcount */
+#define	PW_W0_MASK		0x0000001f	/* waitcount0 */
+#define	PW_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PW_W1_SHIFT		8
+#define	PW_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PW_W2_SHIFT		16
+#define	PW_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PW_W3_SHIFT		24
+
+#define PW_W0       		0x0000000c
+#define PW_W1       		0x00000a00
+#define PW_W2       		0x00020000
+#define PW_W3       		0x01000000
+
+/* flash_waitcount */
+#define	FW_W0_MASK		0x0000003f	/* waitcount0 */
+#define	FW_W1_MASK		0x00001f00	/* waitcount1 */
+#define	FW_W1_SHIFT		8
+#define	FW_W2_MASK		0x001f0000	/* waitcount2 */
+#define	FW_W2_SHIFT		16
+#define	FW_W3_MASK		0x1f000000	/* waitcount3 */
+#define	FW_W3_SHIFT		24
+
+/* When Srom support present, fields in sromcontrol */
+#define	SRC_START		0x80000000
+#define	SRC_BUSY		0x80000000
+#define	SRC_OPCODE		0x60000000
+#define	SRC_OP_READ		0x00000000
+#define	SRC_OP_WRITE		0x20000000
+#define	SRC_OP_WRDIS		0x40000000
+#define	SRC_OP_WREN		0x60000000
+#define	SRC_OTPSEL		0x00000010
+#define SRC_OTPPRESENT		0x00000020
+#define	SRC_LOCK		0x00000008
+#define	SRC_SIZE_MASK		0x00000006
+#define	SRC_SIZE_1K		0x00000000
+#define	SRC_SIZE_4K		0x00000002
+#define	SRC_SIZE_16K		0x00000004
+#define	SRC_SIZE_SHIFT		1
+#define	SRC_PRESENT		0x00000001
+
+/* Fields in pmucontrol */
+#define	PCTL_ILP_DIV_MASK	0xffff0000
+#define	PCTL_ILP_DIV_SHIFT	16
+#define PCTL_LQ_REQ_EN		0x00008000
+#define PCTL_PLL_PLLCTL_UPD	0x00000400	/* rev 2 */
+#define PCTL_NOILP_ON_WAIT	0x00000200	/* rev 1 */
+#define	PCTL_HT_REQ_EN		0x00000100
+#define	PCTL_ALP_REQ_EN		0x00000080
+#define	PCTL_XTALFREQ_MASK	0x0000007c
+#define	PCTL_XTALFREQ_SHIFT	2
+#define	PCTL_ILP_DIV_EN		0x00000002
+#define	PCTL_LPO_SEL		0x00000001
+
+/*  Retention Control */
+#define PMU_RCTL_CLK_DIV_SHIFT		0
+#define PMU_RCTL_CHAIN_LEN_SHIFT	12
+#define PMU_RCTL_MACPHY_DISABLE_SHIFT	26
+#define PMU_RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define PMU_RCTL_LOGIC_DISABLE_SHIFT	27
+#define PMU_RCTL_LOGIC_DISABLE_MASK	(1 << 27)
+#define PMU_RCTL_MEMSLP_LOG_SHIFT	28
+#define PMU_RCTL_MEMSLP_LOG_MASK	(1 << 28)
+#define PMU_RCTL_MEMRETSLP_LOG_SHIFT	29
+#define PMU_RCTL_MEMRETSLP_LOG_MASK	(1 << 29)
+
+/*  Retention Group Control */
+#define PMU_RCTLGRP_CHAIN_LEN_SHIFT	0
+#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT	14
+#define PMU_RCTLGRP_RMODE_ENABLE_MASK	(1 << 14)
+#define PMU_RCTLGRP_DFT_ENABLE_SHIFT	15
+#define PMU_RCTLGRP_DFT_ENABLE_MASK	(1 << 15)
+#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT	16
+#define PMU_RCTLGRP_NSRST_DISABLE_MASK	(1 << 16)
+/*  Retention Group Control special for 4334 */
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0	338
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1	315
+/*  Retention Group Control special for 43341 */
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0	366
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1	330
+
+/* Fields in clkstretch */
+#define CSTRETCH_HT		0xffff0000
+#define CSTRETCH_ALP		0x0000ffff
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT	16
+
+/* clockcontrol_n */
+#define	CN_N1_MASK		0x3f		/* n1 control */
+#define	CN_N2_MASK		0x3f00		/* n2 control */
+#define	CN_N2_SHIFT		8
+#define	CN_PLLC_MASK		0xf0000		/* pll control */
+#define	CN_PLLC_SHIFT		16
+
+/* clockcontrol_sb/pci/uart */
+#define	CC_M1_MASK		0x3f		/* m1 control */
+#define	CC_M2_MASK		0x3f00		/* m2 control */
+#define	CC_M2_SHIFT		8
+#define	CC_M3_MASK		0x3f0000	/* m3 control */
+#define	CC_M3_SHIFT		16
+#define	CC_MC_MASK		0x1f000000	/* mux control */
+#define	CC_MC_SHIFT		24
+
+/* N3M Clock control magic field values */
+#define	CC_F6_2			0x02		/* A factor of 2 in */
+#define	CC_F6_3			0x03		/* 6-bit fields like */
+#define	CC_F6_4			0x05		/* N1, M1 or M3 */
+#define	CC_F6_5			0x09
+#define	CC_F6_6			0x11
+#define	CC_F6_7			0x21
+
+#define	CC_F5_BIAS		5		/* 5-bit fields get this added */
+
+#define	CC_MC_BYPASS		0x08
+#define	CC_MC_M1		0x04
+#define	CC_MC_M1M2		0x02
+#define	CC_MC_M1M2M3		0x01
+#define	CC_MC_M1M3		0x11
+
+/* Type 2 Clock control magic field values */
+#define	CC_T2_BIAS		2		/* n1, n2, m1 & m3 bias */
+#define	CC_T2M2_BIAS		3		/* m2 bias */
+
+#define	CC_T2MC_M1BYP		1
+#define	CC_T2MC_M2BYP		2
+#define	CC_T2MC_M3BYP		4
+
+/* Type 6 Clock control magic field values */
+#define	CC_T6_MMASK		1		/* bits of interest in m */
+#define	CC_T6_M0		120000000	/* sb clock for m = 0 */
+#define	CC_T6_M1		100000000	/* sb clock for m = 1 */
+#define	SB2MIPS_T6(sb)		(2 * (sb))
+
+/* Common clock base */
+#define	CC_CLOCK_BASE1		24000000	/* Half the clock freq */
+#define CC_CLOCK_BASE2		12500000	/* Alternate crystal on some PLLs */
+
+/* Clock control values for 200MHz in 5350 */
+#define	CLKC_5350_N		0x0311
+#define	CLKC_5350_M		0x04020009
+
+/* Flash types in the chipcommon capabilities register */
+#define FLASH_NONE		0x000		/* No flash */
+#define SFLASH_ST		0x100		/* ST serial flash */
+#define SFLASH_AT		0x200		/* Atmel serial flash */
+#define NFLASH			0x300
+#define	PFLASH			0x700		/* Parallel flash */
+#define QSPIFLASH_ST		0x800
+#define QSPIFLASH_AT		0x900
+
+/* Bits in the ExtBus config registers */
+#define	CC_CFG_EN		0x0001		/* Enable */
+#define	CC_CFG_EM_MASK		0x000e		/* Extif Mode */
+#define	CC_CFG_EM_ASYNC		0x0000		/*   Async/Parallel flash */
+#define	CC_CFG_EM_SYNC		0x0002		/*   Synchronous */
+#define	CC_CFG_EM_PCMCIA	0x0004		/*   PCMCIA */
+#define	CC_CFG_EM_IDE		0x0006		/*   IDE */
+#define	CC_CFG_DS		0x0010		/* Data size, 0=8bit, 1=16bit */
+#define	CC_CFG_CD_MASK		0x00e0		/* Sync: Clock divisor, rev >= 20 */
+#define	CC_CFG_CE		0x0100		/* Sync: Clock enable, rev >= 20 */
+#define	CC_CFG_SB		0x0200		/* Sync: Size/Bytestrobe, rev >= 20 */
+#define	CC_CFG_IS		0x0400		/* Extif Sync Clk Select, rev >= 20 */
+
+/* ExtBus address space */
+#define	CC_EB_BASE		0x1a000000	/* Chipc ExtBus base address */
+#define	CC_EB_PCMCIA_MEM	0x1a000000	/* PCMCIA 0 memory base address */
+#define	CC_EB_PCMCIA_IO		0x1a200000	/* PCMCIA 0 I/O base address */
+#define	CC_EB_PCMCIA_CFG	0x1a400000	/* PCMCIA 0 config base address */
+#define	CC_EB_IDE		0x1a800000	/* IDE memory base */
+#define	CC_EB_PCMCIA1_MEM	0x1a800000	/* PCMCIA 1 memory base address */
+#define	CC_EB_PCMCIA1_IO	0x1aa00000	/* PCMCIA 1 I/O base address */
+#define	CC_EB_PCMCIA1_CFG	0x1ac00000	/* PCMCIA 1 config base address */
+#define	CC_EB_PROGIF		0x1b000000	/* ProgIF Async/Sync base address */
+
+
+/* Start/busy bit in flashcontrol */
+#define SFLASH_OPCODE		0x000000ff
+#define SFLASH_ACTION		0x00000700
+#define	SFLASH_CS_ACTIVE	0x00001000	/* Chip Select Active, rev >= 20 */
+#define SFLASH_START		0x80000000
+#define SFLASH_BUSY		SFLASH_START
+
+/* flashcontrol action codes */
+#define	SFLASH_ACT_OPONLY	0x0000		/* Issue opcode only */
+#define	SFLASH_ACT_OP1D		0x0100		/* opcode + 1 data byte */
+#define	SFLASH_ACT_OP3A		0x0200		/* opcode + 3 addr bytes */
+#define	SFLASH_ACT_OP3A1D	0x0300		/* opcode + 3 addr & 1 data bytes */
+#define	SFLASH_ACT_OP3A4D	0x0400		/* opcode + 3 addr & 4 data bytes */
+#define	SFLASH_ACT_OP3A4X4D	0x0500		/* opcode + 3 addr, 4 don't care & 4 data bytes */
+#define	SFLASH_ACT_OP3A1X4D	0x0700		/* opcode + 3 addr, 1 don't care & 4 data bytes */
+
+/* flashcontrol action+opcodes for ST flashes */
+#define SFLASH_ST_WREN		0x0006		/* Write Enable */
+#define SFLASH_ST_WRDIS		0x0004		/* Write Disable */
+#define SFLASH_ST_RDSR		0x0105		/* Read Status Register */
+#define SFLASH_ST_WRSR		0x0101		/* Write Status Register */
+#define SFLASH_ST_READ		0x0303		/* Read Data Bytes */
+#define SFLASH_ST_PP		0x0302		/* Page Program */
+#define SFLASH_ST_SE		0x02d8		/* Sector Erase */
+#define SFLASH_ST_BE		0x00c7		/* Bulk Erase */
+#define SFLASH_ST_DP		0x00b9		/* Deep Power-down */
+#define SFLASH_ST_RES		0x03ab		/* Read Electronic Signature */
+#define SFLASH_ST_CSA		0x1000		/* Keep chip select asserted */
+#define SFLASH_ST_SSE		0x0220		/* Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID	0x0390		/* Read Manufacture ID */
+#define SFLASH_MXIC_MFID	0xc2		/* MXIC Manufacture ID */
+
+/* Status register bits for ST flashes */
+#define SFLASH_ST_WIP		0x01		/* Write In Progress */
+#define SFLASH_ST_WEL		0x02		/* Write Enable Latch */
+#define SFLASH_ST_BP_MASK	0x1c		/* Block Protect */
+#define SFLASH_ST_BP_SHIFT	2
+#define SFLASH_ST_SRWD		0x80		/* Status Register Write Disable */
+
+/* flashcontrol action+opcodes for Atmel flashes */
+#define SFLASH_AT_READ				0x07e8
+#define SFLASH_AT_PAGE_READ			0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS			0x01d7
+#define SFLASH_AT_BUF1_WRITE			0x0384
+#define SFLASH_AT_BUF2_WRITE			0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM		0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM		0x0286
+#define SFLASH_AT_BUF1_PROGRAM			0x0288
+#define SFLASH_AT_BUF2_PROGRAM			0x0289
+#define SFLASH_AT_PAGE_ERASE			0x0281
+#define SFLASH_AT_BLOCK_ERASE			0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define SFLASH_AT_BUF1_LOAD			0x0253
+#define SFLASH_AT_BUF2_LOAD			0x0255
+#define SFLASH_AT_BUF1_COMPARE			0x0260
+#define SFLASH_AT_BUF2_COMPARE			0x0261
+#define SFLASH_AT_BUF1_REPROGRAM		0x0258
+#define SFLASH_AT_BUF2_REPROGRAM		0x0259
+
+/* Status register bits for Atmel flashes */
+#define SFLASH_AT_READY				0x80
+#define SFLASH_AT_MISMATCH			0x40
+#define SFLASH_AT_ID_MASK			0x38
+#define SFLASH_AT_ID_SHIFT			3
+
+/* SPI register bits, corerev >= 37 */
+#define GSIO_START			0x80000000
+#define GSIO_BUSY			GSIO_START
+
+/*
+ * These are the UART port assignments, expressed as offsets from the base
+ * register.  These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
+
+#define UART_RX		0	/* In:  Receive buffer (DLAB=0) */
+#define UART_TX		0	/* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL	0	/* Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER	1	/* In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM	1	/* Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR	2	/* In: Interrupt Identity Register  */
+#define UART_FCR	2	/* Out: FIFO Control Register */
+#define UART_LCR	3	/* Out: Line Control Register */
+#define UART_MCR	4	/* Out: Modem Control Register */
+#define UART_LSR	5	/* In:  Line Status Register */
+#define UART_MSR	6	/* In:  Modem Status Register */
+#define UART_SCR	7	/* I/O: Scratch Register */
+#define UART_LCR_DLAB	0x80	/* Divisor latch access bit */
+#define UART_LCR_WLEN8	0x03	/* Word length: 8 bits */
+#define UART_MCR_OUT2	0x08	/* MCR GPIO out 2 */
+#define UART_MCR_LOOP	0x10	/* Enable loopback test mode */
+#define UART_LSR_RX_FIFO 	0x80	/* Receive FIFO error */
+#define UART_LSR_TDHR		0x40	/* Data-hold-register empty */
+#define UART_LSR_THRE		0x20	/* Transmit-hold-register empty */
+#define UART_LSR_BREAK		0x10	/* Break interrupt */
+#define UART_LSR_FRAMING	0x08	/* Framing error */
+#define UART_LSR_PARITY		0x04	/* Parity error */
+#define UART_LSR_OVERRUN	0x02	/* Overrun error */
+#define UART_LSR_RXRDY		0x01	/* Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1	/* FIFO control register bit controlling FIFO enable/disable */
+
+/* Interrupt Identity Register (IIR) bits */
+#define UART_IIR_FIFO_MASK	0xc0	/* IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK	0xf	/* IIR interrupt ID source */
+#define UART_IIR_MDM_CHG	0x0	/* Modem status changed */
+#define UART_IIR_NOINT		0x1	/* No interrupt pending */
+#define UART_IIR_THRE		0x2	/* THR empty */
+#define UART_IIR_RCVD_DATA	0x4	/* Received data available */
+#define UART_IIR_RCVR_STATUS 	0x6	/* Receiver status */
+#define UART_IIR_CHAR_TIME 	0xc	/* Character time */
+
+/* Interrupt Enable Register (IER) bits */
+#define UART_IER_PTIME	128	/* Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI	8	/* enable modem status interrupt */
+#define UART_IER_ELSI	4	/* enable receiver line status interrupt */
+#define UART_IER_ETBEI  2	/* enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI	1	/* enable data available interrupt */
+
+/* pmustatus */
+#define PST_SLOW_WR_PENDING 0x0400
+#define PST_EXTLPOAVAIL	0x0100
+#define PST_WDRESET	0x0080
+#define	PST_INTPEND	0x0040
+#define	PST_SBCLKST	0x0030
+#define	PST_SBCLKST_ILP	0x0010
+#define	PST_SBCLKST_ALP	0x0020
+#define	PST_SBCLKST_HT	0x0030
+#define	PST_ALPAVAIL	0x0008
+#define	PST_HTAVAIL	0x0004
+#define	PST_RESINIT	0x0003
+
+/* pmucapabilities */
+#define PCAP_REV_MASK	0x000000ff
+#define PCAP_RC_MASK	0x00001f00
+#define PCAP_RC_SHIFT	8
+#define PCAP_TC_MASK	0x0001e000
+#define PCAP_TC_SHIFT	13
+#define PCAP_PC_MASK	0x001e0000
+#define PCAP_PC_SHIFT	17
+#define PCAP_VC_MASK	0x01e00000
+#define PCAP_VC_SHIFT	21
+#define PCAP_CC_MASK	0x1e000000
+#define PCAP_CC_SHIFT	25
+#define PCAP5_PC_MASK	0x003e0000	/* PMU corerev >= 5 */
+#define PCAP5_PC_SHIFT	17
+#define PCAP5_VC_MASK	0x07c00000
+#define PCAP5_VC_SHIFT	22
+#define PCAP5_CC_MASK	0xf8000000
+#define PCAP5_CC_SHIFT	27
+
+/* PMU Resource Request Timer registers */
+/* This is based on PmuRev0 */
+#define	PRRT_TIME_MASK	0x03ff
+#define	PRRT_INTEN	0x0400
+#define	PRRT_REQ_ACTIVE	0x0800
+#define	PRRT_ALP_REQ	0x1000
+#define	PRRT_HT_REQ	0x2000
+#define PRRT_HQ_REQ 0x4000
+
+/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
+#define RSRC_INTR_MASK_TIMER_INT_0 1
+
+/* PMU resource bit position */
+#define PMURES_BIT(bit)	(1 << (bit))
+
+/* PMU resource number limit */
+#define PMURES_MAX_RESNUM	30
+
+/* PMU chip control0 register */
+#define	PMU_CHIPCTL0		0
+#define PMU43143_CC0_SDIO_DRSTR_OVR	(1 << 31) /* sdio drive strength override enable */
+
+/* clock req types */
+#define PMU_CC1_CLKREQ_TYPE_SHIFT	19
+#define PMU_CC1_CLKREQ_TYPE_MASK	(1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN		0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL		1
+
+/* PMU chip control1 register */
+#define	PMU_CHIPCTL1			1
+#define	PMU_CC1_RXC_DLL_BYPASS		0x00010000
+#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN	0x00000010
+
+#define PMU_CC1_IF_TYPE_MASK   		0x00000030
+#define PMU_CC1_IF_TYPE_RMII    	0x00000000
+#define PMU_CC1_IF_TYPE_MII     	0x00000010
+#define PMU_CC1_IF_TYPE_RGMII   	0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK    	0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY    	0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 	0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII	0x00000080
+#define PMU_CC1_SW_TYPE_RGMII   	0x000000c0
+
+/* PMU chip control2 register */
+#define	PMU_CHIPCTL2		2
+#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON   	(1 << 18)
+#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON   	(1 << 19)
+#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON   	(1 << 20)
+#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON   	(1 << 21)
+
+/* PMU chip control3 register */
+#define	PMU_CHIPCTL3		3
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT  19
+#define PMU_CC3_ENABLE_RF_SHIFT           22
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT   23
+
+/* PMU chip control5 register */
+#define PMU_CHIPCTL5                    5
+
+/* PMU chip control6 register */
+#define PMU_CHIPCTL6                    6
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP    (1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP   (1 << 6)
+
+/* PMU chip control7 register */
+#define PMU_CHIPCTL7				7
+#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN	(1 << 25)
+#define PMU_CC7_ENABLE_MDIO_RESET_WAR		(1 << 27)
+
+
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
+#define	PMU0_PLL0_PLLCTL0		0
+#define	PMU0_PLL0_PC0_PDIV_MASK		1
+#define	PMU0_PLL0_PC0_PDIV_FREQ		25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK	0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT	3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE	8
+
+/* PC0_DIV_ARM for PLLOUT_ARM */
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ	0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ	1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ	2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ	3 /* Default */
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ	4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ	5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ	6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ	7
+
+/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
+#define	PMU0_PLL0_PLLCTL1		1
+#define	PMU0_PLL0_PC1_WILD_INT_MASK	0xf0000000
+#define	PMU0_PLL0_PC1_WILD_INT_SHIFT	28
+#define	PMU0_PLL0_PC1_WILD_FRAC_MASK	0x0fffff00
+#define	PMU0_PLL0_PC1_WILD_FRAC_SHIFT	8
+#define	PMU0_PLL0_PC1_STOP_MOD		0x00000040
+
+/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
+#define	PMU0_PLL0_PLLCTL2		2
+#define	PMU0_PLL0_PC2_WILD_INT_MASK	0xf
+#define	PMU0_PLL0_PC2_WILD_INT_SHIFT	4
+
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU1_PLL0_PLLCTL0		0
+#define PMU1_PLL0_PC0_P1DIV_MASK	0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT	20
+#define PMU1_PLL0_PC0_P2DIV_MASK	0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU1_PLL0_PLLCTL1		1
+#define PMU1_PLL0_PC1_M1DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT	0
+#define PMU1_PLL0_PC1_M2DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT	8
+#define PMU1_PLL0_PC1_M3DIV_MASK	0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT	16
+#define PMU1_PLL0_PC1_M4DIV_MASK	0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT	24
+#define PMU1_PLL0_PC1_M4DIV_BY_9	9
+#define PMU1_PLL0_PC1_M4DIV_BY_18	0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36	0x24
+#define PMU1_PLL0_PC1_M4DIV_BY_60	0x3C
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL  (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU1_PLL0_PLLCTL2		2
+#define PMU1_PLL0_PC2_M5DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT	0
+#define PMU1_PLL0_PC2_M5DIV_BY_12	0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT	8
+#define PMU1_PLL0_PC2_M6DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT	17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH	1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB	2	/* recommended for 4319 */
+#define PMU1_PLL0_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU1_PLL0_PLLCTL3		3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU1_PLL0_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU1_PLL0_PLLCTL5		5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+#define PMU1_PLL0_PLLCTL6		6
+#define PMU1_PLL0_PLLCTL7		7
+
+#define PMU1_PLL0_PLLCTL8		8
+#define PMU1_PLLCTL8_OPENLOOP_MASK	0x2
+
+/* PMU rev 2 control words */
+#define PMU2_PHY_PLL_PLLCTL		4
+#define PMU2_SI_PLL_PLLCTL		10
+
+/* PMU rev 2 */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU2_PLL_PLLCTL0		0
+#define PMU2_PLL_PC0_P1DIV_MASK 	0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT	20
+#define PMU2_PLL_PC0_P2DIV_MASK 	0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU2_PLL_PLLCTL1		1
+#define PMU2_PLL_PC1_M1DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT	0
+#define PMU2_PLL_PC1_M2DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT	8
+#define PMU2_PLL_PC1_M3DIV_MASK 	0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT	16
+#define PMU2_PLL_PC1_M4DIV_MASK 	0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT	24
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU2_PLL_PLLCTL2		2
+#define PMU2_PLL_PC2_M5DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT	0
+#define PMU2_PLL_PC2_M6DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT	8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT	17
+#define PMU2_PLL_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU2_PLL_PLLCTL3		3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU2_PLL_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU2_PLL_PLLCTL5		5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK	0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT	8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK	0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT	12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK	0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT	16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK	0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT	20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK	0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT	24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK	0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT	28
+
+/* PMU rev 5 (& 6) */
+#define	PMU5_PLL_P1P2_OFF		0
+#define	PMU5_PLL_P1_MASK		0x0f000000
+#define	PMU5_PLL_P1_SHIFT		24
+#define	PMU5_PLL_P2_MASK		0x00f00000
+#define	PMU5_PLL_P2_SHIFT		20
+#define	PMU5_PLL_M14_OFF		1
+#define	PMU5_PLL_MDIV_MASK		0x000000ff
+#define	PMU5_PLL_MDIV_WIDTH		8
+#define	PMU5_PLL_NM5_OFF		2
+#define	PMU5_PLL_NDIV_MASK		0xfff00000
+#define	PMU5_PLL_NDIV_SHIFT		20
+#define	PMU5_PLL_NDIV_MODE_MASK		0x000e0000
+#define	PMU5_PLL_NDIV_MODE_SHIFT	17
+#define	PMU5_PLL_FMAB_OFF		3
+#define	PMU5_PLL_MRAT_MASK		0xf0000000
+#define	PMU5_PLL_MRAT_SHIFT		28
+#define	PMU5_PLL_ABRAT_MASK		0x08000000
+#define	PMU5_PLL_ABRAT_SHIFT		27
+#define	PMU5_PLL_FDIV_MASK		0x07ffffff
+#define	PMU5_PLL_PLLCTL_OFF		4
+#define	PMU5_PLL_PCHI_OFF		5
+#define	PMU5_PLL_PCHI_MASK		0x0000003f
+
+/* pmu XtalFreqRatio */
+#define	PMU_XTALFREQ_REG_ILPCTR_MASK	0x00001FFF
+#define	PMU_XTALFREQ_REG_MEASURE_MASK	0x80000000
+#define	PMU_XTALFREQ_REG_MEASURE_SHIFT	31
+
+/* Divider allocation in 4716/47162/5356/5357 */
+#define	PMU5_MAINPLL_CPU		1
+#define	PMU5_MAINPLL_MEM		2
+#define	PMU5_MAINPLL_SI			3
+
+/* 4706 PMU */
+#define PMU4706_MAINPLL_PLL0	0
+#define PMU6_4706_PROCPLL_OFF	4	/* The CPU PLL */
+#define PMU6_4706_PROC_P2DIV_MASK		0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT	16
+#define PMU6_4706_PROC_P1DIV_MASK		0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT	12
+#define PMU6_4706_PROC_NDIV_INT_MASK	0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT	3
+#define PMU6_4706_PROC_NDIV_MODE_MASK		0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT	0
+
+#define PMU7_PLL_PLLCTL7                7
+#define PMU7_PLL_CTL7_M4DIV_MASK	0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 	24
+#define PMU7_PLL_CTL7_M4DIV_BY_6	6
+#define PMU7_PLL_CTL7_M4DIV_BY_12	0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL8                8
+#define PMU7_PLL_CTL8_M5DIV_MASK	0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT	0
+#define PMU7_PLL_CTL8_M5DIV_BY_8	8
+#define PMU7_PLL_CTL8_M5DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24	0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK	0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT	8
+#define PMU7_PLL_CTL8_M6DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL11		11
+#define PMU7_PLL_PLLCTL11_MASK		0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL		0x22222200
+
+/* PMU rev 15 */
+#define PMU15_PLL_PLLCTL0		0
+#define PMU15_PLL_PC0_CLKSEL_MASK	0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT	0
+#define PMU15_PLL_PC0_FREQTGT_MASK	0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT	2
+#define PMU15_PLL_PC0_PRESCALE_MASK	0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT	22
+#define PMU15_PLL_PC0_KPCTRL_MASK	0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT	24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK	0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT	27
+#define PMU15_PLL_PC0_FDCMODE_MASK	0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT	30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK	0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT	31
+
+#define PMU15_PLL_PLLCTL1			1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK		0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT		5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK	0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT	6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK		0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT	7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK	0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT	17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK		0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT	26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK	0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT	28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK		0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT		30
+
+#define PMU15_PLL_PLLCTL2			2
+#define PMU15_PLL_PC2_CTEN_MASK			0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT		0
+
+#define PMU15_PLL_PLLCTL3			3
+#define PMU15_PLL_PC3_DITHER_EN_MASK		0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT		0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK		0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT		25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK	0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT	0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK	0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT	1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK	0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT	2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK	0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT	3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK	0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT	5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1	0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2	1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3	2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5	3
+
+#define PMU15_PLL_PLLCTL4			4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK		0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT		0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK		0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT		3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK		0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT		6
+#define PMU15_PLL_PC4_DBGMODE_MASK		0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT		9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK	0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT	12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK		0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT	13
+#define PMU15_PLL_PC4_DINPOL_MASK		0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT		20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK		0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT		21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK		0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT		22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK		0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT		23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK		0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT		24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK		0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT		25
+#define PMU15_PLL_PC4_TEST_EN_MASK		0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT		26
+
+#define PMU15_PLL_PLLCTL5			5
+#define PMU15_PLL_PC5_FREQTGT_MASK		0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT		0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK		0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT		20
+#define PMU15_PLL_PC5_PRESCALE_MASK		0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT		27
+
+#define PMU15_PLL_PLLCTL6		6
+#define PMU15_PLL_PC6_FREQTGT_MASK	0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT	0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK	0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT	20
+#define PMU15_PLL_PC6_PRESCALE_MASK	0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT	27
+
+#define PMU15_FREQTGT_480_DEFAULT	0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT	0x1A4F5
+#define PMU15_ARM_96MHZ			96000000	/* 96 Mhz */
+#define PMU15_ARM_98MHZ			98400000	/* 98.4 Mhz */
+#define PMU15_ARM_97MHZ			97000000	/* 97 Mhz */
+
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK		0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT		4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT		0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8		1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111		2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8	3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN		0
+#define PMU17_PLLCTL0_BBPLL_DRST		3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK		8
+
+/* PLL usage in 4716/47162 */
+#define	PMU4716_MAINPLL_PLL0		12
+
+/* PLL usage in 4335 */
+#define PMU4335_PLL0_PC2_P1DIV_MASK			0x000f0000
+#define PMU4335_PLL0_PC2_P1DIV_SHIFT		16
+#define PMU4335_PLL0_PC2_NDIV_INT_MASK		0xff800000
+#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT		23
+#define PMU4335_PLL0_PC1_MDIV2_MASK			0x0000ff00
+#define PMU4335_PLL0_PC1_MDIV2_SHIFT		8
+
+
+/* PLL usage in 5356/5357 */
+#define	PMU5356_MAINPLL_PLL0		0
+#define	PMU5357_MAINPLL_PLL0		0
+
+/* 4716/47162 resources */
+#define RES4716_PROC_PLL_ON		0x00000040
+#define RES4716_PROC_HT_AVAIL		0x00000080
+
+/* 4716/4717/4718 Chip specific ChipControl register bits */
+#define CCTRL_471X_I2S_PINS_ENABLE	0x0080 /* I2S pins off by default, shared w/ pflash */
+
+/* 5357 Chip specific ChipControl register bits */
+/* 2nd - 32-bit reg */
+#define CCTRL_5357_I2S_PINS_ENABLE	0x00040000 /* I2S pins enable */
+#define CCTRL_5357_I2CSPI_PINS_ENABLE	0x00080000 /* I2C/SPI pins enable */
+
+/* 5354 resources */
+#define RES5354_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES5354_BB_SWITCHER_PWM		1	/* 0x00002 */
+#define RES5354_BB_SWITCHER_BURST	2	/* 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST	3	/* 0x00008 */
+#define RES5354_ILP_REQUEST		4	/* 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM	5	/* 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST	6	/* 0x00040 */
+#define RES5354_ROM_SWITCH		7	/* 0x00080 */
+#define RES5354_PA_REF_LDO		8	/* 0x00100 */
+#define RES5354_RADIO_LDO		9	/* 0x00200 */
+#define RES5354_AFE_LDO			10	/* 0x00400 */
+#define RES5354_PLL_LDO			11	/* 0x00800 */
+#define RES5354_BG_FILTBYP		12	/* 0x01000 */
+#define RES5354_TX_FILTBYP		13	/* 0x02000 */
+#define RES5354_RX_FILTBYP		14	/* 0x04000 */
+#define RES5354_XTAL_PU			15	/* 0x08000 */
+#define RES5354_XTAL_EN			16	/* 0x10000 */
+#define RES5354_BB_PLL_FILTBYP		17	/* 0x20000 */
+#define RES5354_RF_PLL_FILTBYP		18	/* 0x40000 */
+#define RES5354_BB_PLL_PU		19	/* 0x80000 */
+
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA                 (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3		(1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_NFLASH		(1<<16) /* Nandflash in ChipControl 1, bit 16 */
+
+/* 43217 Chip specific ChipControl register bits */
+#define CCTRL43217_EXTPA_C0             (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
+#define CCTRL43217_EXTPA_C1             (1<<8)  /* core1 extPA in ChipControl 1, bit 8 */
+
+/* 43228 Chip specific ChipControl register bits */
+#define CCTRL43228_EXTPA_C0             (1<<14) /* core1 extPA in ChipControl 1, bit 14 */
+#define CCTRL43228_EXTPA_C1             (1<<9)  /* core0 extPA in ChipControl 1, bit 1 */
+
+/* 4328 resources */
+#define RES4328_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES4328_BB_SWITCHER_PWM		1	/* 0x00002 */
+#define RES4328_BB_SWITCHER_BURST	2	/* 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST	3	/* 0x00008 */
+#define RES4328_ILP_REQUEST		4	/* 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM	5	/* 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST	6	/* 0x00040 */
+#define RES4328_ROM_SWITCH		7	/* 0x00080 */
+#define RES4328_PA_REF_LDO		8	/* 0x00100 */
+#define RES4328_RADIO_LDO		9	/* 0x00200 */
+#define RES4328_AFE_LDO			10	/* 0x00400 */
+#define RES4328_PLL_LDO			11	/* 0x00800 */
+#define RES4328_BG_FILTBYP		12	/* 0x01000 */
+#define RES4328_TX_FILTBYP		13	/* 0x02000 */
+#define RES4328_RX_FILTBYP		14	/* 0x04000 */
+#define RES4328_XTAL_PU			15	/* 0x08000 */
+#define RES4328_XTAL_EN			16	/* 0x10000 */
+#define RES4328_BB_PLL_FILTBYP		17	/* 0x20000 */
+#define RES4328_RF_PLL_FILTBYP		18	/* 0x40000 */
+#define RES4328_BB_PLL_PU		19	/* 0x80000 */
+
+/* 4325 A0/A1 resources */
+#define RES4325_BUCK_BOOST_BURST	0	/* 0x00000001 */
+#define RES4325_CBUCK_BURST		1	/* 0x00000002 */
+#define RES4325_CBUCK_PWM		2	/* 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST	3	/* 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM		4	/* 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM		5	/* 0x00000020 */
+#define RES4325_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4325_ABUCK_BURST		7	/* 0x00000080 */
+#define RES4325_ABUCK_PWM		8	/* 0x00000100 */
+#define RES4325_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4325_OTP_PU			10	/* 0x00000400 */
+#define RES4325_LNLDO3_PU		11	/* 0x00000800 */
+#define RES4325_LNLDO4_PU		12	/* 0x00001000 */
+#define RES4325_XTAL_PU			13	/* 0x00002000 */
+#define RES4325_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4325_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4325_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4325_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4325_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4325 B0/C0 resources */
+#define RES4325B0_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4325B0_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4325B0_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4325B0_CLDO_PU		4	/* 0x00000010 */
+
+/* 4325 C1 resources */
+#define RES4325C1_LNLDO2_PU		12	/* 0x00001000 */
+
+/* 4325 chip-specific ChipStatus register bits */
+#define CST4325_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4325_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN		3	/* OTP is powered down, SPROM is present */
+#define CST4325_SDIO_USB_MODE_MASK	0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT	2
+#define CST4325_RCAL_VALID_MASK		0x00000008
+#define CST4325_RCAL_VALID_SHIFT	3
+#define CST4325_RCAL_VALUE_MASK		0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT	4
+#define CST4325_PMUTOP_2B_MASK 		0x00000200	/* 1 for 2b, 0 for to 2a */
+#define CST4325_PMUTOP_2B_SHIFT   	9
+
+#define RES4329_RESERVED0		0	/* 0x00000001 */
+#define RES4329_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4329_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4329_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4329_CLDO_PU			4	/* 0x00000010 */
+#define RES4329_PALDO_PU		5	/* 0x00000020 */
+#define RES4329_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4329_RESERVED7		7	/* 0x00000080 */
+#define RES4329_RESERVED8		8	/* 0x00000100 */
+#define RES4329_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4329_OTP_PU			10	/* 0x00000400 */
+#define RES4329_RESERVED11		11	/* 0x00000800 */
+#define RES4329_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4329_XTAL_PU			13	/* 0x00002000 */
+#define RES4329_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4329_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4329_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4329_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4329_HT_AVAIL		21	/* 0x00200000 */
+
+#define CST4329_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4329_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN		3	/* OTP is powered down, SPROM is present */
+#define CST4329_SPI_SDIO_MODE_MASK	0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT	2
+
+/* 4312 chip-specific ChipStatus register bits */
+#define CST4312_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4312_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD			3	/* OTP is broken, SPROM is present */
+
+/* 4312 resources (all PMU chips with little memory constraint) */
+#define RES4312_SWITCHER_BURST		0	/* 0x00000001 */
+#define RES4312_SWITCHER_PWM    	1	/* 0x00000002 */
+#define RES4312_PA_REF_LDO		2	/* 0x00000004 */
+#define RES4312_CORE_LDO_BURST		3	/* 0x00000008 */
+#define RES4312_CORE_LDO_PWM		4	/* 0x00000010 */
+#define RES4312_RADIO_LDO		5	/* 0x00000020 */
+#define RES4312_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4312_BG_FILTBYP		7	/* 0x00000080 */
+#define RES4312_TX_FILTBYP		8	/* 0x00000100 */
+#define RES4312_RX_FILTBYP		9	/* 0x00000200 */
+#define RES4312_XTAL_PU			10	/* 0x00000400 */
+#define RES4312_ALP_AVAIL		11	/* 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP		12	/* 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP		13	/* 0x00002000 */
+#define RES4312_HT_AVAIL		14	/* 0x00004000 */
+
+/* 4322 resources */
+#define RES4322_RF_LDO			0
+#define RES4322_ILP_REQUEST		1
+#define RES4322_XTAL_PU			2
+#define RES4322_ALP_AVAIL		3
+#define RES4322_SI_PLL_ON		4
+#define RES4322_HT_SI_AVAIL		5
+#define RES4322_PHY_PLL_ON		6
+#define RES4322_HT_PHY_AVAIL		7
+#define RES4322_OTP_PU			8
+
+/* 4322 chip-specific ChipStatus register bits */
+#define CST4322_XTAL_FREQ_20_40MHZ	0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK	0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT	6
+#define CST4322_NO_SPROM_OTP		0	/* no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT		1	/* SPROM is present */
+#define CST4322_OTP_PRESENT		2	/* OTP is present */
+#define CST4322_PCI_OR_USB		0x00000100
+#define CST4322_BOOT_MASK		0x00000600
+#define CST4322_BOOT_SHIFT		9
+#define CST4322_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST4322_BOOT_FROM_FLASH		2	/* boot from FLASH */
+#define CST4322_BOOT_FROM_INVALID	3
+#define CST4322_ILP_DIV_EN		0x00000800
+#define CST4322_FLASH_TYPE_MASK		0x00001000
+#define CST4322_FLASH_TYPE_SHIFT	12
+#define CST4322_FLASH_TYPE_SHIFT_ST	0	/* ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL	1	/* ATMEL flash */
+#define CST4322_ARM_TAP_SEL		0x00002000
+#define CST4322_RES_INIT_MODE_MASK	0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT	14
+#define CST4322_RES_INIT_MODE_ILPAVAIL	0	/* resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ	1	/* resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL	2	/* resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL	3	/* resinitmode: HT available */
+#define CST4322_PCIPLLCLK_GATING	0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP	0x00020000
+#define CST4322_PCI_CARDBUS_MODE	0x00040000
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE          0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define CCTRL_43224A0_12MA_LED_DRIVE    0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE    0xF0    /* 12 mA drive strength for later 43224s */
+
+/* 43236 resources */
+#define RES43236_REGULATOR		0
+#define RES43236_ILP_REQUEST		1
+#define RES43236_XTAL_PU		2
+#define RES43236_ALP_AVAIL		3
+#define RES43236_SI_PLL_ON		4
+#define RES43236_HT_SI_AVAIL		5
+
+/* 43236 chip-specific ChipControl register bits */
+#define CCTRL43236_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL43236_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL43236_EXT_LNA		(1<<2)	/* 0 disable */
+#define CCTRL43236_ANT_MUX_2o3          (1<<3)	/* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43236_GSIO			(1<<4)	/* 0 disable */
+
+/* 43236 Chip specific ChipStatus register bits */
+#define CST43236_SFLASH_MASK		0x00000040
+#define CST43236_OTP_SEL_MASK		0x00000080
+#define CST43236_OTP_SEL_SHIFT		7
+#define CST43236_HSIC_MASK		0x00000100	/* USB/HSIC */
+#define CST43236_BP_CLK			0x00000200	/* 120/96Mbps */
+#define CST43236_BOOT_MASK		0x00001800
+#define CST43236_BOOT_SHIFT		11
+#define CST43236_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST43236_BOOT_FROM_FLASH	2	/* boot from FLASH */
+#define CST43236_BOOT_FROM_INVALID	3
+
+/* 43237 resources */
+#define RES43237_REGULATOR		0
+#define RES43237_ILP_REQUEST		1
+#define RES43237_XTAL_PU		2
+#define RES43237_ALP_AVAIL		3
+#define RES43237_SI_PLL_ON		4
+#define RES43237_HT_SI_AVAIL		5
+
+/* 43237 chip-specific ChipControl register bits */
+#define CCTRL43237_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL43237_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA		(1<<2)	/* 0 disable */
+#define CCTRL43237_ANT_MUX_2o3          (1<<3)	/* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO			(1<<4)	/* 0 disable */
+
+/* 43237 Chip specific ChipStatus register bits */
+#define CST43237_SFLASH_MASK		0x00000040
+#define CST43237_OTP_SEL_MASK		0x00000080
+#define CST43237_OTP_SEL_SHIFT		7
+#define CST43237_HSIC_MASK		0x00000100	/* USB/HSIC */
+#define CST43237_BP_CLK			0x00000200	/* 120/96Mbps */
+#define CST43237_BOOT_MASK		0x00001800
+#define CST43237_BOOT_SHIFT		11
+#define CST43237_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST43237_BOOT_FROM_FLASH	2	/* boot from FLASH */
+#define CST43237_BOOT_FROM_INVALID	3
+
+/* 43239 resources */
+#define RES43239_OTP_PU			9
+#define RES43239_MACPHY_CLKAVAIL	23
+#define RES43239_HT_AVAIL		24
+
+/* 43239 Chip specific ChipStatus register bits */
+#define CST43239_SPROM_MASK			0x00000002
+#define CST43239_SFLASH_MASK		0x00000004
+#define	CST43239_RES_INIT_MODE_SHIFT	7
+#define	CST43239_RES_INIT_MODE_MASK		0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs)	((cs) & (1 << 15))	/* SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs)	(~(cs) & (1 << 15))	/* USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs)	(((cs) & (1 << 0)) == 0)	/* SDIO */
+#define CST43239_CHIPMODE_GSPI(cs)	(((cs) & (1 << 0)) == (1 << 0))	/* gSPI */
+
+/* 4324 resources */
+/* 43242 use same PMU as 4324 */
+#define RES4324_LPLDO_PU			0
+#define RES4324_RESET_PULLDN_DIS		1
+#define RES4324_PMU_BG_PU			2
+#define RES4324_HSIC_LDO_PU			3
+#define RES4324_CBUCK_LPOM_PU			4
+#define RES4324_CBUCK_PFM_PU			5
+#define RES4324_CLDO_PU				6
+#define RES4324_LPLDO2_LVM			7
+#define RES4324_LNLDO1_PU			8
+#define RES4324_LNLDO2_PU			9
+#define RES4324_LDO3P3_PU			10
+#define RES4324_OTP_PU				11
+#define RES4324_XTAL_PU				12
+#define RES4324_BBPLL_PU			13
+#define RES4324_LQ_AVAIL			14
+#define RES4324_WL_CORE_READY			17
+#define RES4324_ILP_REQ				18
+#define RES4324_ALP_AVAIL			19
+#define RES4324_PALDO_PU			20
+#define RES4324_RADIO_PU			21
+#define RES4324_SR_CLK_STABLE			22
+#define RES4324_SR_SAVE_RESTORE			23
+#define RES4324_SR_PHY_PWRSW			24
+#define RES4324_SR_PHY_PIC			25
+#define RES4324_SR_SUBCORE_PWRSW		26
+#define RES4324_SR_SUBCORE_PIC			27
+#define RES4324_SR_MEM_PM0			28
+#define RES4324_HT_AVAIL			29
+#define RES4324_MACPHY_CLKAVAIL			30
+
+/* 4324 Chip specific ChipStatus register bits */
+#define CST4324_SPROM_MASK			0x00000080
+#define CST4324_SFLASH_MASK			0x00400000
+#define	CST4324_RES_INIT_MODE_SHIFT	10
+#define	CST4324_RES_INIT_MODE_MASK	0x00000c00
+#define CST4324_CHIPMODE_MASK		0x7
+#define CST4324_CHIPMODE_SDIOD(cs)	((~(cs)) & (1 << 2))	/* SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs)	(((cs) & CST4324_CHIPMODE_MASK) == 0x6)	/* USB || USBDA */
+
+/* 43242 Chip specific ChipStatus register bits */
+#define CST43242_SFLASH_MASK                    0x00000008
+#define CST43242_SR_HALT			(1<<25)
+#define CST43242_SR_CHIP_STATUS_2		27 /* bit 27 */
+
+/* 4331 resources */
+#define RES4331_REGULATOR		0
+#define RES4331_ILP_REQUEST		1
+#define RES4331_XTAL_PU			2
+#define RES4331_ALP_AVAIL		3
+#define RES4331_SI_PLL_ON		4
+#define RES4331_HT_SI_AVAIL		5
+
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL4331_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G		(1<<2)	/* 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15       (1<<3)  /* sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN		(1<<4)	/* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS	(1<<5)	/* set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS	(1<<6)	/* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5	(1<<7)	/* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN	(1<<8)	/* override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN	(1<<9)	/* override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN		(1<<10)	/* pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN	(1<<11)	/* pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2		(1<<12)	/* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A		(1<<13)	/* 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4	(1<<16)	/* enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5	(1<<17)	/* enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN		(1<<24)	/* 0 ext pa disable, 1 ext pa enabled */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define	CST4331_XTAL_FREQ		0x00000001	/* crystal frequency 20/40Mhz */
+#define	CST4331_SPROM_OTP_SEL_MASK	0x00000006
+#define	CST4331_SPROM_OTP_SEL_SHIFT	1
+#define	CST4331_SPROM_PRESENT		0x00000002
+#define	CST4331_OTP_PRESENT		0x00000004
+#define	CST4331_LDO_RF			0x00000008
+#define	CST4331_LDO_PAR			0x00000010
+
+/* 4315 resource */
+#define RES4315_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4315_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4315_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4315_CLDO_PU			4	/* 0x00000010 */
+#define RES4315_PALDO_PU		5	/* 0x00000020 */
+#define RES4315_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4315_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4315_OTP_PU			10	/* 0x00000400 */
+#define RES4315_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4315_XTAL_PU			13	/* 0x00002000 */
+#define RES4315_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4315_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4315_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4315_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4315_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4315 chip-specific ChipStatus register bits */
+#define CST4315_SPROM_OTP_SEL_MASK	0x00000003	/* gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL		0x00000000	/* use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL		0x00000001	/* use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL			0x00000002	/* use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN		0x00000003	/* use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE		0x00000004	/* gpio [8], sdio/usb mode */
+#define CST4315_RCAL_VALID		0x00000008
+#define CST4315_RCAL_VALUE_MASK		0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT	4
+#define CST4315_PALDO_EXTPNP		0x00000200	/* PALDO is configured with external PNP */
+#define CST4315_CBUCK_MODE_MASK		0x00000c00
+#define CST4315_CBUCK_MODE_BURST	0x00000400
+#define CST4315_CBUCK_MODE_LPBURST	0x00000c00
+
+/* 4319 resources */
+#define RES4319_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4319_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4319_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4319_CLDO_PU			4	/* 0x00000010 */
+#define RES4319_PALDO_PU		5	/* 0x00000020 */
+#define RES4319_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4319_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4319_OTP_PU			10	/* 0x00000400 */
+#define RES4319_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4319_XTAL_PU			13	/* 0x00002000 */
+#define RES4319_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4319_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4319_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4319_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4319_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4319 chip-specific ChipStatus register bits */
+#define	CST4319_SPI_CPULESSUSB		0x00000001
+#define	CST4319_SPI_CLK_POL		0x00000002
+#define	CST4319_SPI_CLK_PH		0x00000008
+#define	CST4319_SPROM_OTP_SEL_MASK	0x000000c0	/* gpio [7:6], SDIO CIS selection */
+#define	CST4319_SPROM_OTP_SEL_SHIFT	6
+#define	CST4319_DEFCIS_SEL		0x00000000	/* use default CIS, OTP is powered up */
+#define	CST4319_SPROM_SEL		0x00000040	/* use SPROM, OTP is powered up */
+#define	CST4319_OTP_SEL			0x00000080      /* use OTP, OTP is powered up */
+#define	CST4319_OTP_PWRDN		0x000000c0      /* use SPROM, OTP is powered down */
+#define	CST4319_SDIO_USB_MODE		0x00000100	/* gpio [8], sdio/usb mode */
+#define	CST4319_REMAP_SEL_MASK		0x00000600
+#define	CST4319_ILPDIV_EN		0x00000800
+#define	CST4319_XTAL_PD_POL		0x00001000
+#define	CST4319_LPO_SEL			0x00002000
+#define	CST4319_RES_INIT_MODE		0x0000c000
+#define	CST4319_PALDO_EXTPNP		0x00010000	/* PALDO is configured with external PNP */
+#define	CST4319_CBUCK_MODE_MASK		0x00060000
+#define CST4319_CBUCK_MODE_BURST	0x00020000
+#define CST4319_CBUCK_MODE_LPBURST	0x00060000
+#define	CST4319_RCAL_VALID		0x01000000
+#define	CST4319_RCAL_VALUE_MASK		0x3e000000
+#define	CST4319_RCAL_VALUE_SHIFT	25
+
+#define PMU1_PLL0_CHIPCTL0		0
+#define PMU1_PLL0_CHIPCTL1		1
+#define PMU1_PLL0_CHIPCTL2		2
+#define CCTL_4319USB_XTAL_SEL_MASK	0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT	19
+#define CCTL_4319USB_48MHZ_PLL_SEL	1
+#define CCTL_4319USB_24MHZ_PLL_SEL	2
+
+/* PMU resources for 4336 */
+#define	RES4336_CBUCK_LPOM		0
+#define	RES4336_CBUCK_BURST		1
+#define	RES4336_CBUCK_LP_PWM		2
+#define	RES4336_CBUCK_PWM		3
+#define	RES4336_CLDO_PU			4
+#define	RES4336_DIS_INT_RESET_PD	5
+#define	RES4336_ILP_REQUEST		6
+#define	RES4336_LNLDO_PU		7
+#define	RES4336_LDO3P3_PU		8
+#define	RES4336_OTP_PU			9
+#define	RES4336_XTAL_PU			10
+#define	RES4336_ALP_AVAIL		11
+#define	RES4336_RADIO_PU		12
+#define	RES4336_BG_PU			13
+#define	RES4336_VREG1p4_PU_PU		14
+#define	RES4336_AFE_PWRSW_PU		15
+#define	RES4336_RX_PWRSW_PU		16
+#define	RES4336_TX_PWRSW_PU		17
+#define	RES4336_BB_PWRSW_PU		18
+#define	RES4336_SYNTH_PWRSW_PU		19
+#define	RES4336_MISC_PWRSW_PU		20
+#define	RES4336_LOGEN_PWRSW_PU		21
+#define	RES4336_BBPLL_PWRSW_PU		22
+#define	RES4336_MACPHY_CLKAVAIL		23
+#define	RES4336_HT_AVAIL		24
+#define	RES4336_RSVD			25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define	CST4336_SPI_MODE_MASK		0x00000001
+#define	CST4336_SPROM_PRESENT		0x00000002
+#define	CST4336_OTP_PRESENT		0x00000004
+#define	CST4336_ARMREMAP_0		0x00000008
+#define	CST4336_ILPDIV_EN_MASK		0x00000010
+#define	CST4336_ILPDIV_EN_SHIFT		4
+#define	CST4336_XTAL_PD_POL_MASK	0x00000020
+#define	CST4336_XTAL_PD_POL_SHIFT	5
+#define	CST4336_LPO_SEL_MASK		0x00000040
+#define	CST4336_LPO_SEL_SHIFT		6
+#define	CST4336_RES_INIT_MODE_MASK	0x00000180
+#define	CST4336_RES_INIT_MODE_SHIFT	7
+#define	CST4336_CBUCK_MODE_MASK		0x00000600
+#define	CST4336_CBUCK_MODE_SHIFT	9
+
+/* 4336 Chip specific PMU ChipControl register bits */
+#define PCTL_4336_SERIAL_ENAB	(1  << 24)
+
+/* 4330 resources */
+#define	RES4330_CBUCK_LPOM		0
+#define	RES4330_CBUCK_BURST		1
+#define	RES4330_CBUCK_LP_PWM		2
+#define	RES4330_CBUCK_PWM		3
+#define	RES4330_CLDO_PU			4
+#define	RES4330_DIS_INT_RESET_PD	5
+#define	RES4330_ILP_REQUEST		6
+#define	RES4330_LNLDO_PU		7
+#define	RES4330_LDO3P3_PU		8
+#define	RES4330_OTP_PU			9
+#define	RES4330_XTAL_PU			10
+#define	RES4330_ALP_AVAIL		11
+#define	RES4330_RADIO_PU		12
+#define	RES4330_BG_PU			13
+#define	RES4330_VREG1p4_PU_PU		14
+#define	RES4330_AFE_PWRSW_PU		15
+#define	RES4330_RX_PWRSW_PU		16
+#define	RES4330_TX_PWRSW_PU		17
+#define	RES4330_BB_PWRSW_PU		18
+#define	RES4330_SYNTH_PWRSW_PU		19
+#define	RES4330_MISC_PWRSW_PU		20
+#define	RES4330_LOGEN_PWRSW_PU		21
+#define	RES4330_BBPLL_PWRSW_PU		22
+#define	RES4330_MACPHY_CLKAVAIL		23
+#define	RES4330_HT_AVAIL		24
+#define	RES4330_5gRX_PWRSW_PU		25
+#define	RES4330_5gTX_PWRSW_PU		26
+#define	RES4330_5g_LOGEN_PWRSW_PU	27
+
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs)	(((cs) & 0x7) < 6)	/* SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs)	(((cs) & 0x7) >= 6)	/* USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs)	(((cs) & 0x4) == 0)	/* SDIO */
+#define CST4330_CHIPMODE_GSPI(cs)	(((cs) & 0x6) == 4)	/* gSPI */
+#define CST4330_CHIPMODE_USB(cs)	(((cs) & 0x7) == 6)	/* USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs)	(((cs) & 0x7) == 7)	/* USB Direct Access */
+#define	CST4330_OTP_PRESENT		0x00000010
+#define	CST4330_LPO_AUTODET_EN		0x00000020
+#define	CST4330_ARMREMAP_0		0x00000040
+#define	CST4330_SPROM_PRESENT		0x00000080	/* takes priority over OTP if both set */
+#define	CST4330_ILPDIV_EN		0x00000100
+#define	CST4330_LPO_SEL			0x00000200
+#define	CST4330_RES_INIT_MODE_SHIFT	10
+#define	CST4330_RES_INIT_MODE_MASK	0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT	12
+#define CST4330_CBUCK_MODE_MASK		0x00003000
+#define	CST4330_CBUCK_POWER_OK		0x00004000
+#define	CST4330_BB_PLL_LOCKED		0x00008000
+#define SOCDEVRAM_BP_ADDR		0x1E000000
+#define SOCDEVRAM_ARM_ADDR		0x00800000
+
+/* 4330 Chip specific PMU ChipControl register bits */
+#define PCTL_4330_SERIAL_ENAB	(1  << 24)
+
+/* 4330 Chip specific ChipControl register bits */
+#define CCTRL_4330_GPIO_SEL		0x00000001    /* 1=select GPIOs to be muxed out */
+#define CCTRL_4330_ERCX_SEL		0x00000002    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL_4330_SDIO_HOST_WAKE	0x00000004    /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL_4330_JTAG_DISABLE	0x00000008    /* 1=disable JTAG interface on mux'd pins */
+
+#define PMU_VREG0_ADDR				0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT	2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT	3
+
+#define PMU_VREG4_ADDR			4
+
+#define PMU_VREG4_CLDO_PWM_SHIFT	4
+#define PMU_VREG4_CLDO_PWM_MASK		0x7
+
+#define PMU_VREG4_LPLDO1_SHIFT		15
+#define PMU_VREG4_LPLDO1_MASK		0x7
+#define PMU_VREG4_LPLDO1_1p20V		0
+#define PMU_VREG4_LPLDO1_1p15V		1
+#define PMU_VREG4_LPLDO1_1p10V		2
+#define PMU_VREG4_LPLDO1_1p25V		3
+#define PMU_VREG4_LPLDO1_1p05V		4
+#define PMU_VREG4_LPLDO1_1p00V		5
+#define PMU_VREG4_LPLDO1_0p95V		6
+#define PMU_VREG4_LPLDO1_0p90V		7
+
+/* 4350/4345 VREG4 settings */
+#define PMU4350_VREG4_LPLDO1_1p10V	0
+#define PMU4350_VREG4_LPLDO1_1p15V	1
+#define PMU4350_VREG4_LPLDO1_1p21V	2
+#define PMU4350_VREG4_LPLDO1_1p24V	3
+#define PMU4350_VREG4_LPLDO1_0p90V	4
+#define PMU4350_VREG4_LPLDO1_0p96V	5
+#define PMU4350_VREG4_LPLDO1_1p01V	6
+#define PMU4350_VREG4_LPLDO1_1p04V	7
+
+#define PMU_VREG4_LPLDO2_LVM_SHIFT	18
+#define PMU_VREG4_LPLDO2_LVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_HVM_SHIFT	21
+#define PMU_VREG4_LPLDO2_HVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_LVM_HVM_MASK	0x3f
+#define PMU_VREG4_LPLDO2_1p00V		0
+#define PMU_VREG4_LPLDO2_1p15V		1
+#define PMU_VREG4_LPLDO2_1p20V		2
+#define PMU_VREG4_LPLDO2_1p10V		3
+#define PMU_VREG4_LPLDO2_0p90V		4	/* 4 - 7 is 0.90V */
+
+#define PMU_VREG4_HSICLDO_BYPASS_SHIFT	27
+#define PMU_VREG4_HSICLDO_BYPASS_MASK	0x1
+
+#define PMU_VREG5_ADDR			5
+#define PMU_VREG5_HSICAVDD_PD_SHIFT	6
+#define PMU_VREG5_HSICAVDD_PD_MASK	0x1
+#define PMU_VREG5_HSICDVDD_PD_SHIFT	11
+#define PMU_VREG5_HSICDVDD_PD_MASK	0x1
+
+/* 4334 resources */
+#define RES4334_LPLDO_PU		0
+#define RES4334_RESET_PULLDN_DIS	1
+#define RES4334_PMU_BG_PU		2
+#define RES4334_HSIC_LDO_PU		3
+#define RES4334_CBUCK_LPOM_PU		4
+#define RES4334_CBUCK_PFM_PU		5
+#define RES4334_CLDO_PU			6
+#define RES4334_LPLDO2_LVM		7
+#define RES4334_LNLDO_PU		8
+#define RES4334_LDO3P3_PU		9
+#define RES4334_OTP_PU			10
+#define RES4334_XTAL_PU			11
+#define RES4334_WL_PWRSW_PU		12
+#define RES4334_LQ_AVAIL		13
+#define RES4334_LOGIC_RET		14
+#define RES4334_MEM_SLEEP		15
+#define RES4334_MACPHY_RET		16
+#define RES4334_WL_CORE_READY		17
+#define RES4334_ILP_REQ			18
+#define RES4334_ALP_AVAIL		19
+#define RES4334_MISC_PWRSW_PU		20
+#define RES4334_SYNTH_PWRSW_PU		21
+#define RES4334_RX_PWRSW_PU		22
+#define RES4334_RADIO_PU		23
+#define RES4334_WL_PMU_PU		24
+#define RES4334_VCO_LDO_PU		25
+#define RES4334_AFE_LDO_PU		26
+#define RES4334_RX_LDO_PU		27
+#define RES4334_TX_LDO_PU		28
+#define RES4334_HT_AVAIL		29
+#define RES4334_MACPHY_CLK_AVAIL	30
+
+/* 4334 chip-specific ChipStatus register bits */
+#define CST4334_CHIPMODE_MASK		7
+#define CST4334_SDIO_MODE		0x00000000
+#define CST4334_SPI_MODE		0x00000004
+#define CST4334_HSIC_MODE		0x00000006
+#define CST4334_BLUSB_MODE		0x00000007
+#define CST4334_CHIPMODE_HSIC(cs)	(((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT		0x00000010
+#define CST4334_LPO_AUTODET_EN		0x00000020
+#define CST4334_ARMREMAP_0		0x00000040
+#define CST4334_SPROM_PRESENT		0x00000080
+#define CST4334_ILPDIV_EN_MASK		0x00000100
+#define CST4334_ILPDIV_EN_SHIFT		8
+#define CST4334_LPO_SEL_MASK		0x00000200
+#define CST4334_LPO_SEL_SHIFT		9
+#define CST4334_RES_INIT_MODE_MASK	0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT	10
+
+/* 4334 Chip specific PMU ChipControl register bits */
+#define PCTL_4334_GPIO3_ENAB    (1  << 3)
+
+/* 4334 Chip control */
+#define CCTRL4334_PMU_WAKEUP_GPIO1	(1  << 0)
+#define CCTRL4334_PMU_WAKEUP_HSIC	(1  << 1)
+#define CCTRL4334_PMU_WAKEUP_AOS	(1  << 2)
+#define CCTRL4334_HSIC_WAKE_MODE	(1  << 3)
+#define CCTRL4334_HSIC_INBAND_GPIO1	(1  << 4)
+#define CCTRL4334_HSIC_LDO_PU		(1  << 23)
+
+/* 4334 Chip control 3 */
+#define CCTRL4334_BLOCK_EXTRNL_WAKE		(1  << 4)
+#define CCTRL4334_SAVERESTORE_FIX		(1  << 5)
+
+/* 43341 Chip control 3 */
+#define CCTRL43341_BLOCK_EXTRNL_WAKE		(1  << 13)
+#define CCTRL43341_SAVERESTORE_FIX		(1  << 14)
+#define CCTRL43341_BT_ISO_SEL			(1  << 16)
+
+/* 4334 Chip specific ChipControl1 register bits */
+#define CCTRL1_4334_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4334_ERCX_SEL		(1 << 1)    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL1_4334_JTAG_DISABLE	(1 << 3)    /* 1=disable JTAG interface on mux'd pins */
+#define CCTRL1_4334_UART_ON_4_5	(1 << 28)  	/* 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+
+/* 4324 Chip specific ChipControl1 register bits */
+#define CCTRL1_4324_GPIO_SEL            (1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */
+/* register contains strap values sampled during POR */
+#define CST43143_REMAP_TO_ROM	 (3 << 0)    /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */
+#define CST43143_SDIO_EN	 (1 << 2)    /* 0 = USB Enab, SDIO pins are GPIO or I2S */
+#define CST43143_SDIO_ISO	 (1 << 3)    /* 1 = SDIO isolated */
+#define CST43143_USB_CPU_LESS	 (1 << 4)   /* 1 = CPULess mode Enabled */
+#define CST43143_CBUCK_MODE	 (3 << 6)   /* Indicates what controller mode CBUCK is in */
+#define CST43143_POK_CBUCK	 (1 << 8)   /* 1 = 1.2V CBUCK voltage ready */
+#define CST43143_PMU_OVRSPIKE	 (1 << 9)
+#define CST43143_PMU_OVRTEMP	 (0xF << 10)
+#define CST43143_SR_FLL_CAL_DONE (1 << 14)
+#define CST43143_USB_PLL_LOCKDET (1 << 15)
+#define CST43143_PMU_PLL_LOCKDET (1 << 16)
+#define CST43143_CHIPMODE_SDIOD(cs)	(((cs) & CST43143_SDIO_EN) != 0) /* SDIO */
+
+/* 43143 Chip specific ChipControl register bits */
+/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire  */
+#define CCTRL_43143_SECI		(1<<0)
+#define CCTRL_43143_BT_LEGACY		(1<<1)
+#define CCTRL_43143_I2S_MODE		(1<<2)	/* 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER		(1<<3)	/* 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL		(1<<4)	/* 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO		(1<<5)	/* 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK	(7<<6)	/* 0: disabled */
+#define CCTRL_43143_RF_SWCTRL_0		(1<<6)
+#define CCTRL_43143_RF_SWCTRL_1		(2<<6)
+#define CCTRL_43143_RF_SWCTRL_2		(4<<6)
+#define CCTRL_43143_RF_XSWCTRL		(1<<9)	/* 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0		(1<<11)	/* 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_HOST_WAKE1		(1<<12)	/* 1: SDIO separate interrupt output from GPIO16 */
+
+/* 43143 resources, based on pmu_params.xls V1.19 */
+#define RES43143_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES43143_XTAL_PU		1	/* 0x00002 */
+#define RES43143_ILP_REQUEST		2	/* 0x00004 */
+#define RES43143_ALP_AVAIL		3	/* 0x00008 */
+#define RES43143_WL_CORE_READY		4	/* 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU		5	/* 0x00020 */
+#define RES43143_HT_AVAIL		6	/* 0x00040 */
+#define RES43143_RADIO_PU		7	/* 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL	8	/* 0x00100 */
+#define RES43143_OTP_PU			9	/* 0x00200 */
+#define RES43143_LQ_AVAIL		10	/* 0x00400 */
+
+#define PMU43143_XTAL_CORE_SIZE_MASK	0x3F
+
+/* 4313 resources */
+#define	RES4313_BB_PU_RSRC		0
+#define	RES4313_ILP_REQ_RSRC		1
+#define	RES4313_XTAL_PU_RSRC		2
+#define	RES4313_ALP_AVAIL_RSRC		3
+#define	RES4313_RADIO_PU_RSRC		4
+#define	RES4313_BG_PU_RSRC		5
+#define	RES4313_VREG1P4_PU_RSRC		6
+#define	RES4313_AFE_PWRSW_RSRC		7
+#define	RES4313_RX_PWRSW_RSRC		8
+#define	RES4313_TX_PWRSW_RSRC		9
+#define	RES4313_BB_PWRSW_RSRC		10
+#define	RES4313_SYNTH_PWRSW_RSRC	11
+#define	RES4313_MISC_PWRSW_RSRC		12
+#define	RES4313_BB_PLL_PWRSW_RSRC	13
+#define	RES4313_HT_AVAIL_RSRC		14
+#define	RES4313_MACPHY_CLK_AVAIL_RSRC	15
+
+/* 4313 chip-specific ChipStatus register bits */
+#define	CST4313_SPROM_PRESENT			1
+#define	CST4313_OTP_PRESENT			2
+#define	CST4313_SPROM_OTP_SEL_MASK		0x00000002
+#define	CST4313_SPROM_OTP_SEL_SHIFT		0
+
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE    0x00000007    /* 12 mA drive strengh for later 4313 */
+
+/* PMU respources for 4314 */
+#define RES4314_LPLDO_PU		0
+#define RES4314_PMU_SLEEP_DIS		1
+#define RES4314_PMU_BG_PU		2
+#define RES4314_CBUCK_LPOM_PU		3
+#define RES4314_CBUCK_PFM_PU		4
+#define RES4314_CLDO_PU			5
+#define RES4314_LPLDO2_LVM		6
+#define RES4314_WL_PMU_PU		7
+#define RES4314_LNLDO_PU		8
+#define RES4314_LDO3P3_PU		9
+#define RES4314_OTP_PU			10
+#define RES4314_XTAL_PU			11
+#define RES4314_WL_PWRSW_PU		12
+#define RES4314_LQ_AVAIL		13
+#define RES4314_LOGIC_RET		14
+#define RES4314_MEM_SLEEP		15
+#define RES4314_MACPHY_RET		16
+#define RES4314_WL_CORE_READY		17
+#define RES4314_ILP_REQ			18
+#define RES4314_ALP_AVAIL		19
+#define RES4314_MISC_PWRSW_PU		20
+#define RES4314_SYNTH_PWRSW_PU		21
+#define RES4314_RX_PWRSW_PU		22
+#define RES4314_RADIO_PU		23
+#define RES4314_VCO_LDO_PU		24
+#define RES4314_AFE_LDO_PU		25
+#define RES4314_RX_LDO_PU		26
+#define RES4314_TX_LDO_PU		27
+#define RES4314_HT_AVAIL		28
+#define RES4314_MACPHY_CLK_AVAIL	29
+
+/* 4314 chip-specific ChipStatus register bits */
+#define CST4314_OTP_ENABLED		0x00200000
+
+/* 43228 resources */
+#define RES43228_NOT_USED		0
+#define RES43228_ILP_REQUEST		1
+#define RES43228_XTAL_PU		2
+#define RES43228_ALP_AVAIL		3
+#define RES43228_PLL_EN			4
+#define RES43228_HT_PHY_AVAIL		5
+
+/* 43228 chipstatus  reg bits */
+#define CST43228_ILP_DIV_EN		0x1
+#define	CST43228_OTP_PRESENT		0x2
+#define	CST43228_SERDES_REFCLK_PADSEL	0x4
+#define	CST43228_SDIO_MODE		0x8
+#define	CST43228_SDIO_OTP_PRESENT	0x10
+#define	CST43228_SDIO_RESET		0x20
+
+/* 4706 chipstatus reg bits */
+#define	CST4706_PKG_OPTION		(1<<0) /* 0: full-featured package 1: low-cost package */
+#define	CST4706_SFLASH_PRESENT	(1<<1) /* 0: parallel, 1: serial flash is present */
+#define	CST4706_SFLASH_TYPE		(1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define	CST4706_MIPS_BENDIAN	(1<<3) /* 0: little,  1: big endian */
+#define	CST4706_PCIE1_DISABLE	(1<<5) /* PCIE1 enable strap pin */
+
+/* 4706 flashstrconfig reg bits */
+#define FLSTRCF4706_MASK		0x000000ff
+#define FLSTRCF4706_SF1			0x00000001	/* 2nd serial flash present */
+#define FLSTRCF4706_PF1			0x00000002	/* 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE	0x00000004	/* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1			0x00000008	/* 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK		0x000000f0	/* Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB		0x00000010	/* 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB		0x00000020	/* 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB		0x00000030	/* 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB		0x00000040	/* 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB		0x00000050	/* 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB		0x00000060	/* 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB		0x00000070	/* 256MB */
+
+/* 4360 Chip specific ChipControl register bits */
+#define CCTRL4360_I2C_MODE			(1 << 0)
+#define CCTRL4360_UART_MODE			(1 << 1)
+#define CCTRL4360_SECI_MODE			(1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE			(1 << 3)
+#define CCTRL4360_DISCRETE_FEMCTRL_MODE		(1 << 4)
+#define CCTRL4360_DIGITAL_PACTRL_MODE		(1 << 5)
+#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT	(1 << 6)
+#define CCTRL4360_EXTRA_GPIO_MODE		(1 << 7)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE		(1 << 8)
+#define CCTRL4360_BT_LGCY_MODE			(1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON		(1 << 21)
+#define CCTRL4360_SECI_ON_GPIO01		(1 << 24)
+
+/* 4360 Chip specific Regulator Control register bits */
+#define RCTRL4360_RFLDO_PWR_DOWN		(1 << 1)
+
+/* 4360 PMU resources and chip status bits */
+#define RES4360_REGULATOR          0
+#define RES4360_ILP_AVAIL          1
+#define RES4360_ILP_REQ            2
+#define RES4360_XTAL_LDO_PU        3
+#define RES4360_XTAL_PU            4
+#define RES4360_ALP_AVAIL          5
+#define RES4360_BBPLLPWRSW_PU      6
+#define RES4360_HT_AVAIL           7
+#define RES4360_OTP_PU             8
+
+#define CST4360_XTAL_40MZ                  0x00000001
+#define CST4360_SFLASH                     0x00000002
+#define CST4360_SPROM_PRESENT              0x00000004
+#define CST4360_SFLASH_TYPE                0x00000004
+#define CST4360_OTP_ENABLED                0x00000008
+#define CST4360_REMAP_ROM                  0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK        0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT       5
+#define CST4360_ILP_DIVEN                  0x00000080
+#define CST4360_MODE_USB                   0x00000100
+#define CST4360_SPROM_SIZE_MASK            0x00000600
+#define CST4360_SPROM_SIZE_SHIFT           9
+#define CST4360_BBPLL_LOCK                 0x00000800
+#define CST4360_AVBBPLL_LOCK               0x00001000
+#define CST4360_USBBBPLL_LOCK              0x00002000
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define CCTRL_4360_UART_SEL	0x2
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+
+/* 43602 PMU resources based on pmu_params.xls version v0.95 */
+#define RES43602_LPLDO_PU		0
+#define RES43602_REGULATOR		1
+#define RES43602_PMU_SLEEP		2
+#define RES43602_RSVD_3			3
+#define RES43602_XTALLDO_PU		4
+#define RES43602_SERDES_PU		5
+#define RES43602_BBPLL_PWRSW_PU		6
+#define RES43602_SR_CLK_START		7
+#define RES43602_SR_PHY_PWRSW		8
+#define RES43602_SR_SUBCORE_PWRSW	9
+#define RES43602_XTAL_PU		10
+#define	RES43602_PERST_OVR		11
+#define RES43602_SR_CLK_STABLE		12
+#define RES43602_SR_SAVE_RESTORE	13
+#define RES43602_SR_SLEEP		14
+#define RES43602_LQ_START		15
+#define RES43602_LQ_AVAIL		16
+#define RES43602_WL_CORE_RDY		17
+#define RES43602_ILP_REQ		18
+#define RES43602_ALP_AVAIL		19
+#define RES43602_RADIO_PU		20
+#define RES43602_RFLDO_PU		21
+#define RES43602_HT_START		22
+#define RES43602_HT_AVAIL		23
+#define RES43602_MACPHY_CLKAVAIL	24
+#define RES43602_PARLDO_PU		25
+#define RES43602_RSVD_26		26
+
+/* 43602 chip status bits */
+#define CST43602_SPROM_PRESENT             (1<<1)
+#define CST43602_SPROM_SIZE                (1<<10) /* 0 = 16K, 1 = 4K */
+#define CST43602_BBPLL_LOCK                (1<<11)
+#define CST43602_RF_LDO_OUT_OK             (1<<15) /* RF LDO output OK */
+
+#define PMU43602_CC1_GPIO12_OVRD           (1<<28) /* GPIO12 override */
+
+#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1)  /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN  (1<<2)  /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3)
+#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5)  /* enable pmu_wakeup to request for ALP_AVAIL */
+#define PMU43602_CC2_PERST_L_EXTEND_EN     (1<<9)  /* extend perst_l until rsc PERST_OVR comes up */
+#define PMU43602_CC2_FORCE_EXT_LPO         (1<<19) /* 1=ext LPO clock is the final LPO clock */
+#define PMU43602_CC2_XTAL32_SEL            (1<<30) /* 0=ext_clock, 1=xtal */
+
+#define CC_SR1_43602_SR_ASM_ADDR	(0x0)
+
+/* PLL CTL register values for open loop, used during S/R operation */
+#define PMU43602_PLL_CTL6_VAL		0x68000528
+#define PMU43602_PLL_CTL7_VAL		0x6
+
+#define PMU43602_CC3_ARMCR4_DBG_CLK	(1 << 29)
+
+/* 4349 related */
+#define RES4349_LPLDO_PU			0
+#define RES4349_BG_PU				1
+#define RES4349_PMU_SLEEP			2
+#define RES4349_PALDO3P3_PU			3
+#define RES4349_CBUCK_LPOM_PU		4
+#define RES4349_CBUCK_PFM_PU		5
+#define RES4349_COLD_START_WAIT		6
+#define RES4349_RSVD_7				7
+#define RES4349_LNLDO_PU			8
+#define RES4349_XTALLDO_PU			9
+#define RES4349_LDO3P3_PU			10
+#define RES4349_OTP_PU				11
+#define RES4349_XTAL_PU				12
+#define RES4349_SR_CLK_START		13
+#define RES4349_LQ_AVAIL			14
+#define RES4349_LQ_START			15
+#define RES4349_PERST_OVR			16
+#define RES4349_WL_CORE_RDY			17
+#define RES4349_ILP_REQ				18
+#define RES4349_ALP_AVAIL			19
+#define RES4349_MINI_PMU			20
+#define RES4349_RADIO_PU			21
+#define RES4349_SR_CLK_STABLE		22
+#define RES4349_SR_SAVE_RESTORE		23
+#define RES4349_SR_PHY_PWRSW		24
+#define RES4349_SR_VDDM_PWRSW		25
+#define RES4349_SR_SUBCORE_PWRSW	26
+#define RES4349_SR_SLEEP			27
+#define RES4349_HT_START			28
+#define RES4349_HT_AVAIL			29
+#define RES4349_MACPHY_CLKAVAIL		30
+
+#define CR4_4349_RAM_BASE			(0x180000)
+#define CC4_4349_SR_ASM_ADDR		(0x48)
+
+#define CST4349_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 6)) != 0)	/* SDIO */
+#define CST4349_CHIPMODE_PCIE(cs)	(((cs) & (1 << 7)) != 0)	/* PCIE */
+
+#define CST4349_SPROM_PRESENT		0x00000010
+
+
+/* 43430 PMU resources based on pmu_params.xls */
+#define RES43430_LPLDO_PU				0
+#define RES43430_BG_PU					1
+#define RES43430_PMU_SLEEP				2
+#define RES43430_RSVD_3					3
+#define RES43430_CBUCK_LPOM_PU			4
+#define RES43430_CBUCK_PFM_PU			5
+#define RES43430_COLD_START_WAIT		6
+#define RES43430_RSVD_7					7
+#define RES43430_LNLDO_PU				8
+#define RES43430_RSVD_9					9
+#define RES43430_LDO3P3_PU				10
+#define RES43430_OTP_PU					11
+#define RES43430_XTAL_PU				12
+#define RES43430_SR_CLK_START			13
+#define RES43430_LQ_AVAIL				14
+#define RES43430_LQ_START				15
+#define RES43430_RSVD_16				16
+#define RES43430_WL_CORE_RDY			17
+#define RES43430_ILP_REQ				18
+#define RES43430_ALP_AVAIL				19
+#define RES43430_MINI_PMU				20
+#define RES43430_RADIO_PU				21
+#define RES43430_SR_CLK_STABLE			22
+#define RES43430_SR_SAVE_RESTORE		23
+#define RES43430_SR_PHY_PWRSW			24
+#define RES43430_SR_VDDM_PWRSW			25
+#define RES43430_SR_SUBCORE_PWRSW		26
+#define RES43430_SR_SLEEP				27
+#define RES43430_HT_START				28
+#define RES43430_HT_AVAIL				29
+#define RES43430_MACPHY_CLK_AVAIL		30
+
+/* 43430 chip status bits */
+#define CST43430_SDIO_MODE				0x00000001
+#define CST43430_GSPI_MODE				0x00000002
+#define CST43430_RSRC_INIT_MODE_0		0x00000080
+#define CST43430_RSRC_INIT_MODE_1		0x00000100
+#define CST43430_SEL0_SDIO				0x00000200
+#define CST43430_SEL1_SDIO				0x00000400
+#define CST43430_SEL2_SDIO				0x00000800
+#define CST43430_BBPLL_LOCKED			0x00001000
+#define CST43430_DBG_INST_DETECT		0x00004000
+#define CST43430_CLB2WL_BT_READY		0x00020000
+#define CST43430_JTAG_MODE				0x00100000
+#define CST43430_HOST_IFACE				0x00400000
+#define CST43430_TRIM_EN				0x00800000
+#define CST43430_DIN_PACKAGE_OPTION		0x10000000
+
+/* defines to detect active host interface in use */
+#define CHIP_HOSTIF_PCIEMODE	0x1
+#define CHIP_HOSTIF_USBMODE	0x2
+#define CHIP_HOSTIF_SDIOMODE	0x4
+#define CHIP_HOSTIF_PCIE(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE)
+#define CHIP_HOSTIF_USB(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE)
+#define CHIP_HOSTIF_SDIO(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE)
+
+/* 4335 resources */
+#define RES4335_LPLDO_PO           0
+#define RES4335_PMU_BG_PU          1
+#define RES4335_PMU_SLEEP          2
+#define RES4335_RSVD_3             3
+#define RES4335_CBUCK_LPOM_PU		4
+#define RES4335_CBUCK_PFM_PU		5
+#define RES4335_RSVD_6             6
+#define RES4335_RSVD_7             7
+#define RES4335_LNLDO_PU           8
+#define RES4335_XTALLDO_PU         9
+#define RES4335_LDO3P3_PU			10
+#define RES4335_OTP_PU				11
+#define RES4335_XTAL_PU				12
+#define RES4335_SR_CLK_START       13
+#define RES4335_LQ_AVAIL			14
+#define RES4335_LQ_START           15
+#define RES4335_RSVD_16            16
+#define RES4335_WL_CORE_RDY        17
+#define RES4335_ILP_REQ				18
+#define RES4335_ALP_AVAIL			19
+#define RES4335_MINI_PMU           20
+#define RES4335_RADIO_PU			21
+#define RES4335_SR_CLK_STABLE		22
+#define RES4335_SR_SAVE_RESTORE		23
+#define RES4335_SR_PHY_PWRSW		24
+#define RES4335_SR_VDDM_PWRSW      25
+#define RES4335_SR_SUBCORE_PWRSW	26
+#define RES4335_SR_SLEEP           27
+#define RES4335_HT_START           28
+#define RES4335_HT_AVAIL			29
+#define RES4335_MACPHY_CLKAVAIL		30
+
+/* 4335 Chip specific ChipStatus register bits */
+#define CST4335_SPROM_MASK			0x00000020
+#define CST4335_SFLASH_MASK			0x00000040
+#define	CST4335_RES_INIT_MODE_SHIFT	7
+#define	CST4335_RES_INIT_MODE_MASK	0x00000180
+#define CST4335_CHIPMODE_MASK		0xF
+#define CST4335_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4335_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4335_CHIPMODE_USB20D(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC || USBDA */
+#define CST4335_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+
+/* 4335 Chip specific ChipControl1 register bits */
+#define CCTRL1_4335_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 4335 Chip specific ChipControl2 register bits */
+#define CCTRL2_4335_AOSBLOCK		(1 << 30)
+#define CCTRL2_4335_PMUWAKE		(1 << 31)
+#define PATCHTBL_SIZE			(0x800)
+#define CR4_4335_RAM_BASE                    (0x180000)
+#define CR4_4345_RAM_BASE                    (0x1b0000)
+#define CR4_4349_RAM_BASE                    (0x180000)
+#define CR4_4350_RAM_BASE                    (0x180000)
+#define CR4_4360_RAM_BASE                    (0x0)
+#define CR4_43602_RAM_BASE                   (0x180000)
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+/* 4335 resources--END */
+
+/* 4345 Chip specific ChipStatus register bits */
+#define CST4345_SPROM_MASK		0x00000020
+#define CST4345_SFLASH_MASK		0x00000040
+#define CST4345_RES_INIT_MODE_SHIFT	7
+#define CST4345_RES_INIT_MODE_MASK	0x00000180
+#define CST4345_CHIPMODE_MASK		0x4000F
+#define CST4345_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4345_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4345_CHIPMODE_HSIC(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC */
+#define CST4345_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+#define CST4345_CHIPMODE_USB20D(cs)	(((cs) & (1 << 18)) != 0)	/* USBDA */
+
+/* 4350 Chipcommon ChipStatus bits */
+#define CST4350_SDIO_MODE		0x00000001
+#define CST4350_HSIC20D_MODE		0x00000002
+#define CST4350_BP_ON_HSIC_CLK		0x00000004
+#define CST4350_PCIE_MODE		0x00000008
+#define CST4350_USB20D_MODE		0x00000010
+#define CST4350_USB30D_MODE		0x00000020
+#define CST4350_SPROM_PRESENT		0x00000040
+#define CST4350_RSRC_INIT_MODE_0	0x00000080
+#define CST4350_RSRC_INIT_MODE_1	0x00000100
+#define CST4350_SEL0_SDIO		0x00000200
+#define CST4350_SEL1_SDIO		0x00000400
+#define CST4350_SDIO_PAD_MODE		0x00000800
+#define CST4350_BBPLL_LOCKED		0x00001000
+#define CST4350_USBPLL_LOCKED		0x00002000
+#define CST4350_LINE_STATE		0x0000C000
+#define CST4350_SERDES_PIPE_PLLLOCK	0x00010000
+#define CST4350_BT_READY		0x00020000
+#define CST4350_SFLASH_PRESENT		0x00040000
+#define CST4350_CPULESS_ENABLE		0x00080000
+#define CST4350_STRAP_HOST_IFC_1	0x00100000
+#define CST4350_STRAP_HOST_IFC_2	0x00200000
+#define CST4350_STRAP_HOST_IFC_3	0x00400000
+#define CST4350_RAW_SPROM_PRESENT	0x00800000
+#define CST4350_APP_CLK_SWITCH_SEL_RDBACK	0x01000000
+#define CST4350_RAW_RSRC_INIT_MODE_0	0x02000000
+#define CST4350_SDIO_PAD_VDDIO		0x04000000
+#define CST4350_GSPI_MODE		0x08000000
+#define CST4350_PACKAGE_OPTION		0xF0000000
+#define CST4350_PACKAGE_SHIFT		28
+
+/* package option for 4350 */
+#define CST4350_PACKAGE_WLCSP		0x0
+#define CST4350_PACKAGE_PCIE		0x1
+#define CST4350_PACKAGE_WLBGA		0x2
+#define CST4350_PACKAGE_DBG		0x3
+#define CST4350_PACKAGE_USB		0x4
+#define CST4350_PACKAGE_USB_HSIC	0x4
+
+#define CST4350_PKG_MODE(cs)	((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT)
+
+#define CST4350_PKG_WLCSP(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP))
+#define CST4350_PKG_PCIE(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE))
+#define CST4350_PKG_WLBGA(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA))
+#define CST4350_PKG_USB(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB))
+#define CST4350_PKG_USB_HSIC(cs)	(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC))
+
+/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */
+#define CST4350_PKG_USB_40M(cs)		(cs & CST4350_RAW_SPROM_PRESENT)
+
+#define CST4350_CHIPMODE_SDIOD(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
+#define CST4350_CHIPMODE_USB20D(cs)	((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
+#define CST4350_CHIPMODE_HSIC20D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
+#define CST4350_CHIPMODE_HSIC30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
+#define CST4350_CHIPMODE_USB30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
+#define CST4350_CHIPMODE_USB30D_WL(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
+#define CST4350_CHIPMODE_PCIE(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
+
+/* strap_host_ifc strap value */
+#define CST4350_HOST_IFC_MASK		0x00700000
+#define CST4350_HOST_IFC_SHIFT		20
+
+/* host_ifc raw mode */
+#define CST4350_IFC_MODE_SDIOD			0x0
+#define CST4350_IFC_MODE_HSIC20D		0x1
+#define CST4350_IFC_MODE_HSIC30D		0x2
+#define CST4350_IFC_MODE_PCIE			0x3
+#define CST4350_IFC_MODE_USB20D			0x4
+#define CST4350_IFC_MODE_USB30D			0x5
+#define CST4350_IFC_MODE_USB30D_WL		0x6
+#define CST4350_IFC_MODE_USB30D_BT		0x7
+
+#define CST4350_IFC_MODE(cs)	((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT)
+
+/* 4350 PMU resources */
+#define RES4350_LPLDO_PU	0
+#define RES4350_PMU_BG_PU	1
+#define RES4350_PMU_SLEEP	2
+#define RES4350_RSVD_3		3
+#define RES4350_CBUCK_LPOM_PU	4
+#define RES4350_CBUCK_PFM_PU	5
+#define RES4350_COLD_START_WAIT	6
+#define RES4350_RSVD_7		7
+#define RES4350_LNLDO_PU	8
+#define RES4350_XTALLDO_PU	9
+#define RES4350_LDO3P3_PU	10
+#define RES4350_OTP_PU		11
+#define RES4350_XTAL_PU		12
+#define RES4350_SR_CLK_START	13
+#define RES4350_LQ_AVAIL	14
+#define RES4350_LQ_START	15
+#define RES4350_PERST_OVR	16
+#define RES4350_WL_CORE_RDY	17
+#define RES4350_ILP_REQ		18
+#define RES4350_ALP_AVAIL	19
+#define RES4350_MINI_PMU	20
+#define RES4350_RADIO_PU	21
+#define RES4350_SR_CLK_STABLE	22
+#define RES4350_SR_SAVE_RESTORE	23
+#define RES4350_SR_PHY_PWRSW	24
+#define RES4350_SR_VDDM_PWRSW	25
+#define RES4350_SR_SUBCORE_PWRSW	26
+#define RES4350_SR_SLEEP	27
+#define RES4350_HT_START	28
+#define RES4350_HT_AVAIL	29
+#define RES4350_MACPHY_CLKAVAIL	30
+
+#define MUXENAB4350_UART_MASK		(0x0000000f)
+#define MUXENAB4350_UART_SHIFT		0
+#define MUXENAB4350_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB4350_HOSTWAKE_SHIFT	4
+
+
+/* 4350 GCI function sel values */
+#define CC4350_FNSEL_HWDEF		(0)
+#define CC4350_FNSEL_SAMEASPIN		(1)
+#define CC4350_FNSEL_UART		(2)
+#define CC4350_FNSEL_SFLASH		(3)
+#define CC4350_FNSEL_SPROM		(4)
+#define CC4350_FNSEL_I2C		(5)
+#define CC4350_FNSEL_MISC0		(6)
+#define CC4350_FNSEL_GCI		(7)
+#define CC4350_FNSEL_MISC1		(8)
+#define CC4350_FNSEL_MISC2		(9)
+#define CC4350_FNSEL_PWDOG 		(10)
+#define CC4350_FNSEL_IND		(12)
+#define CC4350_FNSEL_PDN		(13)
+#define CC4350_FNSEL_PUP		(14)
+#define CC4350_FNSEL_TRISTATE		(15)
+#define CC4350C_FNSEL_UART		(3)
+
+
+/* 4350 GPIO */
+#define CC4350_PIN_GPIO_00		(0)
+#define CC4350_PIN_GPIO_01		(1)
+#define CC4350_PIN_GPIO_02		(2)
+#define CC4350_PIN_GPIO_03		(3)
+#define CC4350_PIN_GPIO_04		(4)
+#define CC4350_PIN_GPIO_05		(5)
+#define CC4350_PIN_GPIO_06		(6)
+#define CC4350_PIN_GPIO_07		(7)
+#define CC4350_PIN_GPIO_08		(8)
+#define CC4350_PIN_GPIO_09		(9)
+#define CC4350_PIN_GPIO_10		(10)
+#define CC4350_PIN_GPIO_11		(11)
+#define CC4350_PIN_GPIO_12		(12)
+#define CC4350_PIN_GPIO_13		(13)
+#define CC4350_PIN_GPIO_14		(14)
+#define CC4350_PIN_GPIO_15		(15)
+
+#define CC4350_RSVD_16_SHIFT		16
+
+#define CC2_4350_PHY_PWRSW_UPTIME_MASK		(0xf << 0)
+#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT		(0)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK	(0xf << 4)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT	(4)
+#define CC2_4350_VDDM_PWRSW_UPTIME_MASK		(0xf << 8)
+#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT	(8)
+#define CC2_4350_SBC_PWRSW_DNDELAY_MASK		(0x3 << 12)
+#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT	(12)
+#define CC2_4350_PHY_PWRSW_DNDELAY_MASK		(0x3 << 14)
+#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT	(14)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK	(0x3 << 16)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT	(16)
+#define CC2_4350_VDDM_PWRSW_EN_MASK		(1 << 20)
+#define CC2_4350_VDDM_PWRSW_EN_SHIFT		(20)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK		(1 << 21)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT	(21)
+#define CC2_4350_SDIO_AOS_WAKEUP_MASK		(1 << 24)
+#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT		(24)
+
+/* Applies to 4335/4350/4345 */
+#define CC3_SR_CLK_SR_MEM_MASK			(1 << 0)
+#define CC3_SR_CLK_SR_MEM_SHIFT			(0)
+#define CC3_SR_BIT1_TBD_MASK			(1 << 1)
+#define CC3_SR_BIT1_TBD_SHIFT			(1)
+#define CC3_SR_ENGINE_ENABLE_MASK		(1 << 2)
+#define CC3_SR_ENGINE_ENABLE_SHIFT		(2)
+#define CC3_SR_BIT3_TBD_MASK			(1 << 3)
+#define CC3_SR_BIT3_TBD_SHIFT			(3)
+#define CC3_SR_MINDIV_FAST_CLK_MASK		(0xF << 4)
+#define CC3_SR_MINDIV_FAST_CLK_SHIFT		(4)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK	(1 << 8)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT	(8)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK	(1 << 9)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT	(9)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK	(1 << 10)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT	(10)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK	(1 << 11)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT	(11)
+#define CC3_SR_NUM_CLK_HIGH_MASK		(0x7 << 12)
+#define CC3_SR_NUM_CLK_HIGH_SHIFT		(12)
+#define CC3_SR_BIT15_TBD_MASK			(1 << 15)
+#define CC3_SR_BIT15_TBD_SHIFT			(15)
+#define CC3_SR_PHY_FUNC_PIC_MASK		(1 << 16)
+#define CC3_SR_PHY_FUNC_PIC_SHIFT		(16)
+#define CC3_SR_BIT17_19_TBD_MASK		(0x7 << 17)
+#define CC3_SR_BIT17_19_TBD_SHIFT		(17)
+#define CC3_SR_CHIP_TRIGGER_1_MASK		(1 << 20)
+#define CC3_SR_CHIP_TRIGGER_1_SHIFT		(20)
+#define CC3_SR_CHIP_TRIGGER_2_MASK		(1 << 21)
+#define CC3_SR_CHIP_TRIGGER_2_SHIFT		(21)
+#define CC3_SR_CHIP_TRIGGER_3_MASK		(1 << 22)
+#define CC3_SR_CHIP_TRIGGER_3_SHIFT		(22)
+#define CC3_SR_CHIP_TRIGGER_4_MASK		(1 << 23)
+#define CC3_SR_CHIP_TRIGGER_4_SHIFT		(23)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK		(1 << 24)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT		(24)
+#define CC3_SR_BIT25_26_TBD_MASK		(0x3 << 25)
+#define CC3_SR_BIT25_26_TBD_SHIFT		(25)
+#define CC3_SR_ALLOW_SBC_STBY_MASK		(1 << 27)
+#define CC3_SR_ALLOW_SBC_STBY_SHIFT		(27)
+#define CC3_SR_GPIO_MUX_MASK			(0xF << 28)
+#define CC3_SR_GPIO_MUX_SHIFT			(28)
+
+/* Applies to 4335/4350/4345 */
+#define CC4_SR_INIT_ADDR_MASK		(0x3FF0000)
+#define 	CC4_4350_SR_ASM_ADDR	(0x30)
+#define CC4_4350_C0_SR_ASM_ADDR		(0x0)
+#define 	CC4_4335_SR_ASM_ADDR	(0x48)
+#define 	CC4_4345_SR_ASM_ADDR	(0x48)
+#define CC4_SR_INIT_ADDR_SHIFT		(16)
+
+#define CC4_4350_EN_SR_CLK_ALP_MASK	(1 << 30)
+#define CC4_4350_EN_SR_CLK_ALP_SHIFT	(30)
+#define CC4_4350_EN_SR_CLK_HT_MASK	(1 << 31)
+#define CC4_4350_EN_SR_CLK_HT_SHIFT	(31)
+
+#define VREG4_4350_MEMLPDO_PU_MASK	(1 << 31)
+#define VREG4_4350_MEMLPDO_PU_SHIFT	31
+
+#define VREG6_4350_SR_EXT_CLKDIR_MASK	(1 << 20)
+#define VREG6_4350_SR_EXT_CLKDIR_SHIFT	20
+#define VREG6_4350_SR_EXT_CLKDIV_MASK	(0x3 << 21)
+#define VREG6_4350_SR_EXT_CLKDIV_SHIFT	21
+#define VREG6_4350_SR_EXT_CLKEN_MASK	(1 << 23)
+#define VREG6_4350_SR_EXT_CLKEN_SHIFT	23
+
+#define CC5_4350_PMU_EN_ASSERT_MASK	(1 << 13)
+#define CC5_4350_PMU_EN_ASSERT_SHIFT	(13)
+
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK	(1 << 4)
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT	(4)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK	(1 << 6)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT	(6)
+#define CC6_4350_PMU_EN_EXT_PERST_MASK		(1 << 17)
+#define CC6_4350_PMU_EN_EXT_PERST_SHIFT		(17)
+#define CC6_4350_PMU_EN_WAKEUP_MASK		(1 << 18)
+#define CC6_4350_PMU_EN_WAKEUP_SHIFT		(18)
+
+#define CC7_4350_PMU_EN_ASSERT_L2_MASK	(1 << 26)
+#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT	(26)
+#define CC7_4350_PMU_EN_MDIO_MASK	(1 << 27)
+#define CC7_4350_PMU_EN_MDIO_SHIFT	(27)
+
+#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK		(1 << 13)
+#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF		(13)
+#define CC6_4345_PMU_EN_L2_DEASSERT_MASK		(1 << 14)
+#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF		(14)
+#define CC6_4345_PMU_EN_ASSERT_L2_MASK		(1 << 15)
+#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT		(15)
+#define CC6_4345_PMU_EN_MDIO_MASK		(1 << 24)
+#define CC6_4345_PMU_EN_MDIO_SHIFT		(24)
+
+/* GCI chipcontrol register indices */
+#define CC_GCI_CHIPCTRL_00	(0)
+#define CC_GCI_CHIPCTRL_01	(1)
+#define CC_GCI_CHIPCTRL_02	(2)
+#define CC_GCI_CHIPCTRL_03	(3)
+#define CC_GCI_CHIPCTRL_04	(4)
+#define CC_GCI_CHIPCTRL_05	(5)
+#define CC_GCI_CHIPCTRL_06	(6)
+#define CC_GCI_CHIPCTRL_07	(7)
+#define CC_GCI_CHIPCTRL_08	(8)
+#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
+
+#define CC_GCI_06_JTAG_SEL_SHIFT	4
+#define CC_GCI_06_JTAG_SEL_MASK		(1 << 4)
+
+#define CC_GCI_NUMCHIPCTRLREGS(cap1)	((cap1 & 0xF00) >> 8)
+
+/* 4345 PMU resources */
+#define RES4345_LPLDO_PU		0
+#define RES4345_PMU_BG_PU		1
+#define RES4345_PMU_SLEEP 		2
+#define RES4345_HSICLDO_PU		3
+#define RES4345_CBUCK_LPOM_PU		4
+#define RES4345_CBUCK_PFM_PU		5
+#define RES4345_COLD_START_WAIT		6
+#define RES4345_RSVD_7			7
+#define RES4345_LNLDO_PU		8
+#define RES4345_XTALLDO_PU		9
+#define RES4345_LDO3P3_PU		10
+#define RES4345_OTP_PU			11
+#define RES4345_XTAL_PU			12
+#define RES4345_SR_CLK_START		13
+#define RES4345_LQ_AVAIL		14
+#define RES4345_LQ_START		15
+#define RES4345_PERST_OVR		16
+#define RES4345_WL_CORE_RDY		17
+#define RES4345_ILP_REQ			18
+#define RES4345_ALP_AVAIL		19
+#define RES4345_MINI_PMU		20
+#define RES4345_RADIO_PU		21
+#define RES4345_SR_CLK_STABLE		22
+#define RES4345_SR_SAVE_RESTORE		23
+#define RES4345_SR_PHY_PWRSW		24
+#define RES4345_SR_VDDM_PWRSW		25
+#define RES4345_SR_SUBCORE_PWRSW	26
+#define RES4345_SR_SLEEP		27
+#define RES4345_HT_START		28
+#define RES4345_HT_AVAIL		29
+#define RES4345_MACPHY_CLK_AVAIL	30
+
+/* 4335 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4335_PIN_GPIO_00		(0)
+#define CC4335_PIN_GPIO_01		(1)
+#define CC4335_PIN_GPIO_02		(2)
+#define CC4335_PIN_GPIO_03		(3)
+#define CC4335_PIN_GPIO_04		(4)
+#define CC4335_PIN_GPIO_05		(5)
+#define CC4335_PIN_GPIO_06		(6)
+#define CC4335_PIN_GPIO_07		(7)
+#define CC4335_PIN_GPIO_08		(8)
+#define CC4335_PIN_GPIO_09		(9)
+#define CC4335_PIN_GPIO_10		(10)
+#define CC4335_PIN_GPIO_11		(11)
+#define CC4335_PIN_GPIO_12		(12)
+#define CC4335_PIN_GPIO_13		(13)
+#define CC4335_PIN_GPIO_14		(14)
+#define CC4335_PIN_GPIO_15		(15)
+#define CC4335_PIN_SDIO_CLK		(16)
+#define CC4335_PIN_SDIO_CMD		(17)
+#define CC4335_PIN_SDIO_DATA0	(18)
+#define CC4335_PIN_SDIO_DATA1	(19)
+#define CC4335_PIN_SDIO_DATA2	(20)
+#define CC4335_PIN_SDIO_DATA3	(21)
+#define CC4335_PIN_RF_SW_CTRL_6	(22)
+#define CC4335_PIN_RF_SW_CTRL_7	(23)
+#define CC4335_PIN_RF_SW_CTRL_8	(24)
+#define CC4335_PIN_RF_SW_CTRL_9	(25)
+/* Last GPIO Pad */
+#define CC4335_PIN_GPIO_LAST	(31)
+
+/* 4335 GCI function sel values
+*/
+#define CC4335_FNSEL_HWDEF		(0)
+#define CC4335_FNSEL_SAMEASPIN	(1)
+#define CC4335_FNSEL_GPIO0		(2)
+#define CC4335_FNSEL_GPIO1		(3)
+#define CC4335_FNSEL_GCI0		(4)
+#define CC4335_FNSEL_GCI1		(5)
+#define CC4335_FNSEL_UART		(6)
+#define CC4335_FNSEL_SFLASH		(7)
+#define CC4335_FNSEL_SPROM		(8)
+#define CC4335_FNSEL_MISC0		(9)
+#define CC4335_FNSEL_MISC1		(10)
+#define CC4335_FNSEL_MISC2		(11)
+#define CC4335_FNSEL_IND		(12)
+#define CC4335_FNSEL_PDN		(13)
+#define CC4335_FNSEL_PUP		(14)
+#define CC4335_FNSEL_TRI		(15)
+
+/* GCI Core Control Reg */
+#define	GCI_CORECTRL_SR_MASK	(1 << 0)	/* SECI block Reset */
+#define	GCI_CORECTRL_RSL_MASK	(1 << 1)	/* ResetSECILogic */
+#define	GCI_CORECTRL_ES_MASK	(1 << 2)	/* EnableSECI */
+#define	GCI_CORECTRL_FSL_MASK	(1 << 3)	/* Force SECI Out Low */
+#define	GCI_CORECTRL_SOM_MASK	(7 << 4)	/* SECI Op Mode */
+#define	GCI_CORECTRL_US_MASK	(1 << 7)	/* Update SECI */
+#define	GCI_CORECTRL_BOS_MASK	(1 << 8)	/* Break On Sleep */
+
+/* 4345 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4345_PIN_GPIO_00		(0)
+#define CC4345_PIN_GPIO_01		(1)
+#define CC4345_PIN_GPIO_02		(2)
+#define CC4345_PIN_GPIO_03		(3)
+#define CC4345_PIN_GPIO_04		(4)
+#define CC4345_PIN_GPIO_05		(5)
+#define CC4345_PIN_GPIO_06		(6)
+#define CC4345_PIN_GPIO_07		(7)
+#define CC4345_PIN_GPIO_08		(8)
+#define CC4345_PIN_GPIO_09		(9)
+#define CC4345_PIN_GPIO_10		(10)
+#define CC4345_PIN_GPIO_11		(11)
+#define CC4345_PIN_GPIO_12		(12)
+#define CC4345_PIN_GPIO_13		(13)
+#define CC4345_PIN_GPIO_14		(14)
+#define CC4345_PIN_GPIO_15		(15)
+#define CC4345_PIN_GPIO_16		(16)
+#define CC4345_PIN_SDIO_CLK		(17)
+#define CC4345_PIN_SDIO_CMD		(18)
+#define CC4345_PIN_SDIO_DATA0	(19)
+#define CC4345_PIN_SDIO_DATA1	(20)
+#define CC4345_PIN_SDIO_DATA2	(21)
+#define CC4345_PIN_SDIO_DATA3	(22)
+#define CC4345_PIN_RF_SW_CTRL_0	(23)
+#define CC4345_PIN_RF_SW_CTRL_1	(24)
+#define CC4345_PIN_RF_SW_CTRL_2	(25)
+#define CC4345_PIN_RF_SW_CTRL_3	(26)
+#define CC4345_PIN_RF_SW_CTRL_4	(27)
+#define CC4345_PIN_RF_SW_CTRL_5	(28)
+#define CC4345_PIN_RF_SW_CTRL_6	(29)
+#define CC4345_PIN_RF_SW_CTRL_7	(30)
+#define CC4345_PIN_RF_SW_CTRL_8	(31)
+#define CC4345_PIN_RF_SW_CTRL_9	(32)
+
+/* 4345 GCI function sel values
+*/
+#define CC4345_FNSEL_HWDEF		(0)
+#define CC4345_FNSEL_SAMEASPIN		(1)
+#define CC4345_FNSEL_GPIO0		(2)
+#define CC4345_FNSEL_GPIO1		(3)
+#define CC4345_FNSEL_GCI0		(4)
+#define CC4345_FNSEL_GCI1		(5)
+#define CC4345_FNSEL_UART		(6)
+#define CC4345_FNSEL_SFLASH		(7)
+#define CC4345_FNSEL_SPROM		(8)
+#define CC4345_FNSEL_MISC0		(9)
+#define CC4345_FNSEL_MISC1		(10)
+#define CC4345_FNSEL_MISC2		(11)
+#define CC4345_FNSEL_IND		(12)
+#define CC4345_FNSEL_PDN		(13)
+#define CC4345_FNSEL_PUP		(14)
+#define CC4345_FNSEL_TRI		(15)
+
+#define MUXENAB4345_UART_MASK		(0x0000000f)
+#define MUXENAB4345_UART_SHIFT		0
+#define MUXENAB4345_HOSTWAKE_MASK	(0x000000f0)
+#define MUXENAB4345_HOSTWAKE_SHIFT	4
+
+/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */
+#define CC4349_GRP_GCI_AVS_CTRL_MASK   (0xffe00000)
+#define CC4349_GRP_GCI_AVS_CTRL_SHIFT  (21)
+#define CC4349_GRP_GCI_AVS_CTRL_ENAB   (1 << 5)
+
+/* 4345 GCI AVS function sel values */
+#define CC4345_GCI_AVS_CTRL_MASK   (0xfc)
+#define CC4345_GCI_AVS_CTRL_SHIFT  (2)
+#define CC4345_GCI_AVS_CTRL_ENAB   (1 << 5)
+
+/* GCI GPIO for function sel GCI-0/GCI-1 */
+#define CC_GCI_GPIO_0			(0)
+#define CC_GCI_GPIO_1			(1)
+#define CC_GCI_GPIO_2			(2)
+#define CC_GCI_GPIO_3			(3)
+#define CC_GCI_GPIO_4			(4)
+#define CC_GCI_GPIO_5			(5)
+#define CC_GCI_GPIO_6			(6)
+#define CC_GCI_GPIO_7			(7)
+#define CC_GCI_GPIO_8			(8)
+#define CC_GCI_GPIO_9			(9)
+#define CC_GCI_GPIO_10			(10)
+#define CC_GCI_GPIO_11			(11)
+#define CC_GCI_GPIO_12			(12)
+#define CC_GCI_GPIO_13			(13)
+#define CC_GCI_GPIO_14			(14)
+#define CC_GCI_GPIO_15			(15)
+
+
+/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
+#define CC_GCI_GPIO_INVALID		0xFF
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL(val, pos)  ((((uint32)val) << pos) & GCIMASK(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL(val, pos)	((val >> pos) & 0xF)
+
+
+/* find the 8 bit mask given the bit position */
+#define GCIMASK_8B(pos)  (((uint32)0xFF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_8B(val, pos)  ((((uint32)val) << pos) & GCIMASK_8B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_8B(val, pos)	((val >> pos) & 0xFF)
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK_4B(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_4B(val, pos)  ((((uint32)val) << pos) & GCIMASK_4B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_4B(val, pos)	((val >> pos) & 0xF)
+
+
+/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */
+#define GCI_INTSTATUS_RBI	(1 << 0)	/* Rx Break Interrupt */
+#define GCI_INTSTATUS_UB	(1 << 1)	/* UART Break Interrupt */
+#define GCI_INTSTATUS_SPE	(1 << 2)	/* SECI Parity Error Interrupt */
+#define GCI_INTSTATUS_SFE	(1 << 3)	/* SECI Framing Error Interrupt */
+#define GCI_INTSTATUS_SRITI	(1 << 9)	/* SECI Rx Idle Timer Interrupt */
+#define GCI_INTSTATUS_STFF	(1 << 10)	/* SECI Tx FIFO Full Interrupt */
+#define GCI_INTSTATUS_STFAE	(1 << 11)	/* SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTSTATUS_SRFAF	(1 << 12)	/* SECI Rx FIFO Almost Full */
+#define GCI_INTSTATUS_SRFNE	(1 << 14)	/* SECI Rx FIFO Not Empty */
+#define GCI_INTSTATUS_SRFOF	(1 << 15)	/* SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTSTATUS_GPIOINT	(1 << 25)	/* GCIGpioInt */
+#define GCI_INTSTATUS_GPIOWAKE	(1 << 26)	/* GCIGpioWake */
+
+/* 4335 GCI IntMask Register bits. */
+#define GCI_INTMASK_RBI		(1 << 0)	/* Rx Break Interrupt */
+#define GCI_INTMASK_UB		(1 << 1)	/* UART Break Interrupt */
+#define GCI_INTMASK_SPE		(1 << 2)	/* SECI Parity Error Interrupt */
+#define GCI_INTMASK_SFE		(1 << 3)	/* SECI Framing Error Interrupt */
+#define GCI_INTMASK_SRITI	(1 << 9)	/* SECI Rx Idle Timer Interrupt */
+#define GCI_INTMASK_STFF	(1 << 10)	/* SECI Tx FIFO Full Interrupt */
+#define GCI_INTMASK_STFAE	(1 << 11)	/* SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTMASK_SRFAF	(1 << 12)	/* SECI Rx FIFO Almost Full */
+#define GCI_INTMASK_SRFNE	(1 << 14)	/* SECI Rx FIFO Not Empty */
+#define GCI_INTMASK_SRFOF	(1 << 15)	/* SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTMASK_GPIOINT	(1 << 25)	/* GCIGpioInt */
+#define GCI_INTMASK_GPIOWAKE	(1 << 26)	/* GCIGpioWake */
+
+/* 4335 GCI WakeMask Register bits. */
+#define GCI_WAKEMASK_RBI	(1 << 0)	/* Rx Break Interrupt */
+#define GCI_WAKEMASK_UB		(1 << 1)	/* UART Break Interrupt */
+#define GCI_WAKEMASK_SPE	(1 << 2)	/* SECI Parity Error Interrupt */
+#define GCI_WAKEMASK_SFE	(1 << 3)	/* SECI Framing Error Interrupt */
+#define GCI_WAKE_SRITI		(1 << 9)	/* SECI Rx Idle Timer Interrupt */
+#define GCI_WAKEMASK_STFF	(1 << 10)	/* SECI Tx FIFO Full Interrupt */
+#define GCI_WAKEMASK_STFAE	(1 << 11)	/* SECI Tx FIFO Almost Empty Intr */
+#define GCI_WAKEMASK_SRFAF	(1 << 12)	/* SECI Rx FIFO Almost Full */
+#define GCI_WAKEMASK_SRFNE	(1 << 14)	/* SECI Rx FIFO Not Empty */
+#define GCI_WAKEMASK_SRFOF	(1 << 15)	/* SECI Rx FIFO Not Empty Timeout */
+#define GCI_WAKEMASK_GPIOINT	(1 << 25)	/* GCIGpioInt */
+#define GCI_WAKEMASK_GPIOWAKE	(1 << 26)	/* GCIGpioWake */
+
+#define	GCI_WAKE_ON_GCI_GPIO1	1
+#define	GCI_WAKE_ON_GCI_GPIO2	2
+#define	GCI_WAKE_ON_GCI_GPIO3	3
+#define	GCI_WAKE_ON_GCI_GPIO4	4
+#define	GCI_WAKE_ON_GCI_GPIO5	5
+#define	GCI_WAKE_ON_GCI_GPIO6	6
+#define	GCI_WAKE_ON_GCI_GPIO7	7
+#define	GCI_WAKE_ON_GCI_GPIO8	8
+#define	GCI_WAKE_ON_GCI_SECI_IN	9
+
+/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic
+* for now only UART for bootloader.
+*/
+#define MUXENAB4335_UART_MASK		(0x0000000f)
+
+#define MUXENAB4335_UART_SHIFT		0
+#define MUXENAB4335_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB4335_HOSTWAKE_SHIFT	4
+#define MUXENAB4335_GETIX(val, name) \
+	((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1)
+
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
+#define PMU_MAX_TRANSITION_DLY	15000
+
+/* PMU resource up transition time in ILP cycles */
+#define PMURES_UP_TRANSITION	2
+
+
+/* SECI configuration */
+#define SECI_MODE_UART			0x0
+#define SECI_MODE_SECI			0x1
+#define SECI_MODE_LEGACY_3WIRE_BT	0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN	0x3
+#define SECI_MODE_HALF_SECI		0x4
+
+#define SECI_RESET		(1 << 0)
+#define SECI_RESET_BAR_UART	(1 << 1)
+#define SECI_ENAB_SECI_ECI	(1 << 2)
+#define SECI_ENAB_SECIOUT_DIS	(1 << 3)
+#define SECI_MODE_MASK		0x7
+#define SECI_MODE_SHIFT		4 /* (bits 5, 6, 7) */
+#define SECI_UPD_SECI		(1 << 7)
+
+#define SECI_SLIP_ESC_CHAR	0xDB
+#define SECI_SIGNOFF_0		SECI_SLIP_ESC_CHAR
+#define SECI_SIGNOFF_1     0
+#define SECI_REFRESH_REQ	0xDA
+
+/* seci clk_ctl_st bits */
+#define CLKCTL_STS_SECI_CLK_REQ		(1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL	(1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE		(1 << 0)
+#define SECI_UART_MSR_RTS_STATE		(1 << 1)
+#define SECI_UART_SECI_IN_STATE		(1 << 2)
+#define SECI_UART_SECI_IN2_STATE	(1 << 3)
+
+/* GCI RX FIFO Control Register */
+#define	GCI_RXF_LVL_MASK	(0xFF << 0)
+#define	GCI_RXF_TIMEOUT_MASK	(0xFF << 8)
+
+/* GCI UART Registers' Bit definitions */
+/* Seci Fifo Level Register */
+#define	SECI_TXF_LVL_MASK	(0x3F << 8)
+#define	TXF_AE_LVL_DEFAULT	0x4
+#define	SECI_RXF_LVL_FC_MASK	(0x3F << 16)
+
+/* SeciUARTFCR Bit definitions */
+#define	SECI_UART_FCR_RFR		(1 << 0)
+#define	SECI_UART_FCR_TFR		(1 << 1)
+#define	SECI_UART_FCR_SR		(1 << 2)
+#define	SECI_UART_FCR_THP		(1 << 3)
+#define	SECI_UART_FCR_AB		(1 << 4)
+#define	SECI_UART_FCR_ATOE		(1 << 5)
+#define	SECI_UART_FCR_ARTSOE		(1 << 6)
+#define	SECI_UART_FCR_ABV		(1 << 7)
+#define	SECI_UART_FCR_ALM		(1 << 8)
+
+/* SECI UART LCR register bits */
+#define SECI_UART_LCR_STOP_BITS		(1 << 0) /* 0 - 1bit, 1 - 2bits */
+#define SECI_UART_LCR_PARITY_EN		(1 << 1)
+#define SECI_UART_LCR_PARITY		(1 << 2) /* 0 - odd, 1 - even */
+#define SECI_UART_LCR_RX_EN		(1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL		(1 << 4) /* 1 => SECI_OUT held low */
+#define SECI_UART_LCR_TXO_EN		(1 << 5)
+#define SECI_UART_LCR_RTSO_EN		(1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN	(1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK		(1 << 8)
+#define SECI_UART_LCR_TXCRC_INV		(1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF	(1 << 10)
+#define SECI_UART_LCR_TXCRC_EN		(1 << 11)
+#define	SECI_UART_LCR_RXSYNC_EN		(1 << 12)
+
+#define SECI_UART_MCR_TX_EN		(1 << 0)
+#define SECI_UART_MCR_PRTS		(1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN	(1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN	(1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN		(1 << 4)
+#define SECI_UART_MCR_AUTO_RTS		(1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS	(1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN	(1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT	(1 << 9)
+
+/* SeciUARTLSR Bit Mask */
+#define	SECI_UART_LSR_RXOVR_MASK	(1 << 0)
+#define	SECI_UART_LSR_RFF_MASK		(1 << 1)
+#define	SECI_UART_LSR_TFNE_MASK		(1 << 2)
+#define	SECI_UART_LSR_TI_MASK		(1 << 3)
+#define	SECI_UART_LSR_TPR_MASK		(1 << 4)
+#define	SECI_UART_LSR_TXHALT_MASK	(1 << 5)
+
+/* SeciUARTMSR Bit Mask */
+#define	SECI_UART_MSR_CTSS_MASK		(1 << 0)
+#define	SECI_UART_MSR_RTSS_MASK		(1 << 1)
+#define	SECI_UART_MSR_SIS_MASK		(1 << 2)
+#define	SECI_UART_MSR_SIS2_MASK		(1 << 3)
+
+/* SeciUARTData Bits */
+#define SECI_UART_DATA_RF_NOT_EMPTY_BIT	(1 << 12)
+#define SECI_UART_DATA_RF_FULL_BIT	(1 << 13)
+#define SECI_UART_DATA_RF_OVRFLOW_BIT	(1 << 14)
+#define	SECI_UART_DATA_FIFO_PTR_MASK	0xFF
+#define	SECI_UART_DATA_RF_RD_PTR_SHIFT	16
+#define	SECI_UART_DATA_RF_WR_PTR_SHIFT	24
+
+/* LTECX: ltecxmux */
+#define LTECX_EXTRACT_MUX(val, idx)	(getbit4(&(val), (idx)))
+
+/* LTECX: ltecxmux MODE */
+#define LTECX_MUX_MODE_IDX		0
+#define LTECX_MUX_MODE_WCI2		0x0
+#define LTECX_MUX_MODE_GPIO		0x1
+
+
+/* LTECX GPIO Information Index */
+#define LTECX_NVRAM_FSYNC_IDX	0
+#define LTECX_NVRAM_LTERX_IDX	1
+#define LTECX_NVRAM_LTETX_IDX	2
+#define LTECX_NVRAM_WLPRIO_IDX	3
+
+/* LTECX WCI2 Information Index */
+#define LTECX_NVRAM_WCI2IN_IDX	0
+#define LTECX_NVRAM_WCI2OUT_IDX	1
+
+/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */
+#define LTECX_EXTRACT_PADNUM(val, idx)	(getbit8(&(val), (idx)))
+#define LTECX_EXTRACT_FNSEL(val, idx)	(getbit4(&(val), (idx)))
+#define LTECX_EXTRACT_GCIGPIO(val, idx)	(getbit4(&(val), (idx)))
+
+/* WLAN channel numbers - used from wifi.h */
+
+/* WLAN BW */
+#define ECI_BW_20   0x0
+#define ECI_BW_25   0x1
+#define ECI_BW_30   0x2
+#define ECI_BW_35   0x3
+#define ECI_BW_40   0x4
+#define ECI_BW_45   0x5
+#define ECI_BW_50   0x6
+#define ECI_BW_ALL  0x7
+
+/* WLAN - number of antenna */
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+/* otpctrl1 0xF4 */
+#define OTPC_FORCE_PWR_OFF	0x02000000
+/* chipcommon s/r registers introduced with cc rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK             0x1
+#define CC_SR_CTL0_ENABLE_SHIFT              0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT       1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT        2 /* Rising edge resource trigger 0 to sr_engine  */
+#define CC_SR_CTL0_MIN_DIV_SHIFT             6 /* Min division value for fast clk in sr_engine */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT        16 /* Allow Subcore mem StandBy? */
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT       19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT          20 /* Allow pic to separate power domains */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT  25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
+#define CC_SR_CTL1_SR_INIT_MASK             0x3FF
+#define CC_SR_CTL1_SR_INIT_SHIFT            0
+
+#define	ECI_INLO_PKTDUR_MASK	0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT	4
+
+/* gci chip control bits */
+#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT		0
+#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT		1
+#define GCI_GPIO_CHIPCTRL_INVERT_BIT		2
+#define GCI_GPIO_CHIPCTRL_PULLUP_BIT		3
+#define GCI_GPIO_CHIPCTRL_PULLDN_BIT		4
+#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT	5
+#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT	6
+#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT	7
+
+/* gci GPIO input status bits */
+#define GCI_GPIO_STS_VALUE_BIT			0
+#define GCI_GPIO_STS_POS_EDGE_BIT		1
+#define GCI_GPIO_STS_NEG_EDGE_BIT		2
+#define GCI_GPIO_STS_FAST_EDGE_BIT		3
+#define GCI_GPIO_STS_CLEAR			0xF
+
+#define GCI_GPIO_STS_VALUE	(1 << GCI_GPIO_STS_VALUE_BIT)
+
+#endif	/* _SBCHIPC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 0000000..812e325
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,282 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h 456346 2014-02-18 16:48:52Z $
+ */
+
+#ifndef	_SBCONFIG_H
+#define	_SBCONFIG_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* enumeration in SB is based on the premise that cores are contiguos in the
+ * enumeration space.
+ */
+#define SB_BUS_SIZE		0x10000		/* Each bus gets 64Kbytes for cores */
+#define SB_BUS_BASE(b)		(SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define	SB_BUS_MAXCORES		(SB_BUS_SIZE / SI_CORE_SIZE)	/* Max cores per bus */
+
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define	SBCONFIGOFF		0xf00		/* core sbconfig regs are top 256bytes of regs */
+#define	SBCONFIGSIZE		256		/* sizeof (sbconfig_t) */
+
+#define SBIPSFLAG		0x08
+#define SBTPSFLAG		0x18
+#define	SBTMERRLOGA		0x48		/* sonics >= 2.3 */
+#define	SBTMERRLOG		0x50		/* sonics >= 2.3 */
+#define SBADMATCH3		0x60
+#define SBADMATCH2		0x68
+#define SBADMATCH1		0x70
+#define SBIMSTATE		0x90
+#define SBINTVEC		0x94
+#define SBTMSTATELOW		0x98
+#define SBTMSTATEHIGH		0x9c
+#define SBBWA0			0xa0
+#define SBIMCONFIGLOW		0xa8
+#define SBIMCONFIGHIGH		0xac
+#define SBADMATCH0		0xb0
+#define SBTMCONFIGLOW		0xb8
+#define SBTMCONFIGHIGH		0xbc
+#define SBBCONFIG		0xc0
+#define SBBSTATE		0xc8
+#define SBACTCNFG		0xd8
+#define	SBFLAGST		0xe8
+#define SBIDLOW			0xf8
+#define SBIDHIGH		0xfc
+
+/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
+ * a few registers *below* that line. I think it would be very confusing to try
+ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
+ */
+
+#define SBIMERRLOGA		0xea8
+#define SBIMERRLOG		0xeb0
+#define SBTMPORTCONNID0		0xed8
+#define SBTMPORTLOCK0		0xef8
+
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+
+typedef volatile struct _sbconfig {
+	uint32	PAD[2];
+	uint32	sbipsflag;		/* initiator port ocp slave flag */
+	uint32	PAD[3];
+	uint32	sbtpsflag;		/* target port ocp slave flag */
+	uint32	PAD[11];
+	uint32	sbtmerrloga;		/* (sonics >= 2.3) */
+	uint32	PAD;
+	uint32	sbtmerrlog;		/* (sonics >= 2.3) */
+	uint32	PAD[3];
+	uint32	sbadmatch3;		/* address match3 */
+	uint32	PAD;
+	uint32	sbadmatch2;		/* address match2 */
+	uint32	PAD;
+	uint32	sbadmatch1;		/* address match1 */
+	uint32	PAD[7];
+	uint32	sbimstate;		/* initiator agent state */
+	uint32	sbintvec;		/* interrupt mask */
+	uint32	sbtmstatelow;		/* target state */
+	uint32	sbtmstatehigh;		/* target state */
+	uint32	sbbwa0;			/* bandwidth allocation table0 */
+	uint32	PAD;
+	uint32	sbimconfiglow;		/* initiator configuration */
+	uint32	sbimconfighigh;		/* initiator configuration */
+	uint32	sbadmatch0;		/* address match0 */
+	uint32	PAD;
+	uint32	sbtmconfiglow;		/* target configuration */
+	uint32	sbtmconfighigh;		/* target configuration */
+	uint32	sbbconfig;		/* broadcast configuration */
+	uint32	PAD;
+	uint32	sbbstate;		/* broadcast state */
+	uint32	PAD[3];
+	uint32	sbactcnfg;		/* activate configuration */
+	uint32	PAD[3];
+	uint32	sbflagst;		/* current sbflags */
+	uint32	PAD[3];
+	uint32	sbidlow;		/* identification */
+	uint32	sbidhigh;		/* identification */
+} sbconfig_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
+/* sbipsflag */
+#define	SBIPS_INT1_MASK		0x3f		/* which sbflags get routed to mips interrupt 1 */
+#define	SBIPS_INT1_SHIFT	0
+#define	SBIPS_INT2_MASK		0x3f00		/* which sbflags get routed to mips interrupt 2 */
+#define	SBIPS_INT2_SHIFT	8
+#define	SBIPS_INT3_MASK		0x3f0000	/* which sbflags get routed to mips interrupt 3 */
+#define	SBIPS_INT3_SHIFT	16
+#define	SBIPS_INT4_MASK		0x3f000000	/* which sbflags get routed to mips interrupt 4 */
+#define	SBIPS_INT4_SHIFT	24
+
+/* sbtpsflag */
+#define	SBTPS_NUM0_MASK		0x3f		/* interrupt sbFlag # generated by this core */
+#define	SBTPS_F0EN0		0x40		/* interrupt is always sent on the backplane */
+
+/* sbtmerrlog */
+#define	SBTMEL_CM		0x00000007	/* command */
+#define	SBTMEL_CI		0x0000ff00	/* connection id */
+#define	SBTMEL_EC		0x0f000000	/* error code */
+#define	SBTMEL_ME		0x80000000	/* multiple error */
+
+/* sbimstate */
+#define	SBIM_PC			0xf		/* pipecount */
+#define	SBIM_AP_MASK		0x30		/* arbitration policy */
+#define	SBIM_AP_BOTH		0x00		/* use both timeslaces and token */
+#define	SBIM_AP_TS		0x10		/* use timesliaces only */
+#define	SBIM_AP_TK		0x20		/* use token only */
+#define	SBIM_AP_RSV		0x30		/* reserved */
+#define	SBIM_IBE		0x20000		/* inbanderror */
+#define	SBIM_TO			0x40000		/* timeout */
+#define	SBIM_BY			0x01800000	/* busy (sonics >= 2.3) */
+#define	SBIM_RJ			0x02000000	/* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define	SBTML_RESET		0x0001		/* reset */
+#define	SBTML_REJ_MASK		0x0006		/* reject field */
+#define	SBTML_REJ		0x0002		/* reject */
+#define	SBTML_TMPREJ		0x0004		/* temporary reject, for error recovery */
+
+#define	SBTML_SICF_SHIFT	16		/* Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define	SBTMH_SERR		0x0001		/* serror */
+#define	SBTMH_INT		0x0002		/* interrupt */
+#define	SBTMH_BUSY		0x0004		/* busy */
+#define	SBTMH_TO		0x0020		/* timeout (sonics >= 2.3) */
+
+#define	SBTMH_SISF_SHIFT	16		/* Shift to locate the SI status flags in sbtmh */
+
+/* sbbwa0 */
+#define	SBBWA_TAB0_MASK		0xffff		/* lookup table 0 */
+#define	SBBWA_TAB1_MASK		0xffff		/* lookup table 1 */
+#define	SBBWA_TAB1_SHIFT	16
+
+/* sbimconfiglow */
+#define	SBIMCL_STO_MASK		0x7		/* service timeout */
+#define	SBIMCL_RTO_MASK		0x70		/* request timeout */
+#define	SBIMCL_RTO_SHIFT	4
+#define	SBIMCL_CID_MASK		0xff0000	/* connection id */
+#define	SBIMCL_CID_SHIFT	16
+
+/* sbimconfighigh */
+#define	SBIMCH_IEM_MASK		0xc		/* inband error mode */
+#define	SBIMCH_TEM_MASK		0x30		/* timeout error mode */
+#define	SBIMCH_TEM_SHIFT	4
+#define	SBIMCH_BEM_MASK		0xc0		/* bus error mode */
+#define	SBIMCH_BEM_SHIFT	6
+
+/* sbadmatch0 */
+#define	SBAM_TYPE_MASK		0x3		/* address type */
+#define	SBAM_AD64		0x4		/* reserved */
+#define	SBAM_ADINT0_MASK	0xf8		/* type0 size */
+#define	SBAM_ADINT0_SHIFT	3
+#define	SBAM_ADINT1_MASK	0x1f8		/* type1 size */
+#define	SBAM_ADINT1_SHIFT	3
+#define	SBAM_ADINT2_MASK	0x1f8		/* type2 size */
+#define	SBAM_ADINT2_SHIFT	3
+#define	SBAM_ADEN		0x400		/* enable */
+#define	SBAM_ADNEG		0x800		/* negative decode */
+#define	SBAM_BASE0_MASK		0xffffff00	/* type0 base address */
+#define	SBAM_BASE0_SHIFT	8
+#define	SBAM_BASE1_MASK		0xfffff000	/* type1 base address for the core */
+#define	SBAM_BASE1_SHIFT	12
+#define	SBAM_BASE2_MASK		0xffff0000	/* type2 base address for the core */
+#define	SBAM_BASE2_SHIFT	16
+
+/* sbtmconfiglow */
+#define	SBTMCL_CD_MASK		0xff		/* clock divide */
+#define	SBTMCL_CO_MASK		0xf800		/* clock offset */
+#define	SBTMCL_CO_SHIFT		11
+#define	SBTMCL_IF_MASK		0xfc0000	/* interrupt flags */
+#define	SBTMCL_IF_SHIFT		18
+#define	SBTMCL_IM_MASK		0x3000000	/* interrupt mode */
+#define	SBTMCL_IM_SHIFT		24
+
+/* sbtmconfighigh */
+#define	SBTMCH_BM_MASK		0x3		/* busy mode */
+#define	SBTMCH_RM_MASK		0x3		/* retry mode */
+#define	SBTMCH_RM_SHIFT		2
+#define	SBTMCH_SM_MASK		0x30		/* stop mode */
+#define	SBTMCH_SM_SHIFT		4
+#define	SBTMCH_EM_MASK		0x300		/* sb error mode */
+#define	SBTMCH_EM_SHIFT		8
+#define	SBTMCH_IM_MASK		0xc00		/* int mode */
+#define	SBTMCH_IM_SHIFT		10
+
+/* sbbconfig */
+#define	SBBC_LAT_MASK		0x3		/* sb latency */
+#define	SBBC_MAX0_MASK		0xf0000		/* maxccntr0 */
+#define	SBBC_MAX0_SHIFT		16
+#define	SBBC_MAX1_MASK		0xf00000	/* maxccntr1 */
+#define	SBBC_MAX1_SHIFT		20
+
+/* sbbstate */
+#define	SBBS_SRD		0x1		/* st reg disable */
+#define	SBBS_HRD		0x2		/* hold reg disable */
+
+/* sbidlow */
+#define	SBIDL_CS_MASK		0x3		/* config space */
+#define	SBIDL_AR_MASK		0x38		/* # address ranges supported */
+#define	SBIDL_AR_SHIFT		3
+#define	SBIDL_SYNCH		0x40		/* sync */
+#define	SBIDL_INIT		0x80		/* initiator */
+#define	SBIDL_MINLAT_MASK	0xf00		/* minimum backplane latency */
+#define	SBIDL_MINLAT_SHIFT	8
+#define	SBIDL_MAXLAT		0xf000		/* maximum backplane latency */
+#define	SBIDL_MAXLAT_SHIFT	12
+#define	SBIDL_FIRST		0x10000		/* this initiator is first */
+#define	SBIDL_CW_MASK		0xc0000		/* cycle counter width */
+#define	SBIDL_CW_SHIFT		18
+#define	SBIDL_TP_MASK		0xf00000	/* target ports */
+#define	SBIDL_TP_SHIFT		20
+#define	SBIDL_IP_MASK		0xf000000	/* initiator ports */
+#define	SBIDL_IP_SHIFT		24
+#define	SBIDL_RV_MASK		0xf0000000	/* sonics backplane revision code */
+#define	SBIDL_RV_SHIFT		28
+#define	SBIDL_RV_2_2		0x00000000	/* version 2.2 or earlier */
+#define	SBIDL_RV_2_3		0x10000000	/* version 2.3 */
+
+/* sbidhigh */
+#define	SBIDH_RC_MASK		0x000f		/* revision code */
+#define	SBIDH_RCE_MASK		0x7000		/* revision code extension field */
+#define	SBIDH_RCE_SHIFT		8
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define	SBIDH_CC_MASK		0x8ff0		/* core code */
+#define	SBIDH_CC_SHIFT		4
+#define	SBIDH_VC_MASK		0xffff0000	/* vendor code */
+#define	SBIDH_VC_SHIFT		16
+
+#define	SB_COMMIT		0xfd8		/* update buffered registers value */
+
+/* vendor codes */
+#define	SB_VEND_BCM		0x4243		/* Broadcom's SB vendor code */
+
+#endif	/* _SBCONFIG_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 0000000..cbd9f0a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,417 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h 452424 2014-01-30 09:43:39Z $
+ */
+
+#ifndef	_sbhnddma_h_
+#define	_sbhnddma_h_
+
+/* DMA structure:
+ *  support two DMA engines: 32 bits address or 64 bit addressing
+ *  basic DMA register set is per channel(transmit or receive)
+ *  a pair of channels is defined for convenience
+ */
+
+
+/* 32 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/* enable, et al */
+	uint32	addr;			/* descriptor ring base address (4K aligned) */
+	uint32	ptr;			/* last descriptor posted to chip */
+	uint32	status;			/* current active descriptor, et al */
+} dma32regs_t;
+
+typedef volatile struct {
+	dma32regs_t	xmt;		/* dma tx channel */
+	dma32regs_t	rcv;		/* dma rx channel */
+} dma32regp_t;
+
+typedef volatile struct {	/* diag access */
+	uint32	fifoaddr;		/* diag address */
+	uint32	fifodatalow;		/* low 32bits of data */
+	uint32	fifodatahigh;		/* high 32bits of data */
+	uint32	pad;			/* reserved */
+} dma32diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl;		/* misc control bits & bufcount */
+	uint32	addr;		/* data buffer address */
+} dma32dd_t;
+
+/*
+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
+ */
+#define	D32RINGALIGN_BITS	12
+#define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
+#define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
+
+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
+
+/* transmit channel control */
+#define	XC_XE		((uint32)1 << 0)	/* transmit enable */
+#define	XC_SE		((uint32)1 << 1)	/* transmit suspend request */
+#define	XC_LE		((uint32)1 << 2)	/* loopback enable */
+#define	XC_FL		((uint32)1 << 4)	/* flush request */
+#define XC_MR_MASK	0x000001C0		/* Multiple outstanding reads */
+#define XC_MR_SHIFT	6
+#define	XC_PD		((uint32)1 << 11)	/* parity check disable */
+#define	XC_AE		((uint32)3 << 16)	/* address extension bits */
+#define	XC_AE_SHIFT	16
+#define XC_BL_MASK	0x001C0000		/* BurstLen bits */
+#define XC_BL_SHIFT	18
+#define XC_PC_MASK	0x00E00000		/* Prefetch control */
+#define XC_PC_SHIFT	21
+#define XC_PT_MASK	0x03000000		/* Prefetch threshold */
+#define XC_PT_SHIFT	24
+
+/* Multiple outstanding reads */
+#define DMA_MR_1	0
+#define DMA_MR_2	1
+#define DMA_MR_4	2
+#define DMA_MR_8	3
+#define DMA_MR_12	4
+#define DMA_MR_16	5
+#define DMA_MR_20	6
+#define DMA_MR_32	7
+
+/* DMA Burst Length in bytes */
+#define DMA_BL_16	0
+#define DMA_BL_32	1
+#define DMA_BL_64	2
+#define DMA_BL_128	3
+#define DMA_BL_256	4
+#define DMA_BL_512	5
+#define DMA_BL_1024	6
+
+/* Prefetch control */
+#define DMA_PC_0	0
+#define DMA_PC_4	1
+#define DMA_PC_8	2
+#define DMA_PC_16	3
+/* others: reserved */
+
+/* Prefetch threshold */
+#define DMA_PT_1	0
+#define DMA_PT_2	1
+#define DMA_PT_4	2
+#define DMA_PT_8	3
+
+/* transmit descriptor table pointer */
+#define	XP_LD_MASK	0xfff			/* last valid descriptor */
+
+/* transmit channel status */
+#define	XS_CD_MASK	0x0fff			/* current descriptor pointer */
+#define	XS_XS_MASK	0xf000			/* transmit state */
+#define	XS_XS_SHIFT	12
+#define	XS_XS_DISABLED	0x0000			/* disabled */
+#define	XS_XS_ACTIVE	0x1000			/* active */
+#define	XS_XS_IDLE	0x2000			/* idle wait */
+#define	XS_XS_STOPPED	0x3000			/* stopped */
+#define	XS_XS_SUSP	0x4000			/* suspend pending */
+#define	XS_XE_MASK	0xf0000			/* transmit errors */
+#define	XS_XE_SHIFT	16
+#define	XS_XE_NOERR	0x00000			/* no error */
+#define	XS_XE_DPE	0x10000			/* descriptor protocol error */
+#define	XS_XE_DFU	0x20000			/* data fifo underrun */
+#define	XS_XE_BEBR	0x30000			/* bus error on buffer read */
+#define	XS_XE_BEDA	0x40000			/* bus error on descriptor access */
+#define	XS_AD_MASK	0xfff00000		/* active descriptor */
+#define	XS_AD_SHIFT	20
+
+/* receive channel control */
+#define	RC_RE		((uint32)1 << 0)	/* receive enable */
+#define	RC_RO_MASK	0xfe			/* receive frame offset */
+#define	RC_RO_SHIFT	1
+#define	RC_FM		((uint32)1 << 8)	/* direct fifo receive (pio) mode */
+#define	RC_SH		((uint32)1 << 9)	/* separate rx header descriptor enable */
+#define	RC_OC		((uint32)1 << 10)	/* overflow continue */
+#define	RC_PD		((uint32)1 << 11)	/* parity check disable */
+#define	RC_AE		((uint32)3 << 16)	/* address extension bits */
+#define	RC_AE_SHIFT	16
+#define RC_BL_MASK	0x001C0000		/* BurstLen bits */
+#define RC_BL_SHIFT	18
+#define RC_PC_MASK	0x00E00000		/* Prefetch control */
+#define RC_PC_SHIFT	21
+#define RC_PT_MASK	0x03000000		/* Prefetch threshold */
+#define RC_PT_SHIFT	24
+
+/* receive descriptor table pointer */
+#define	RP_LD_MASK	0xfff			/* last valid descriptor */
+
+/* receive channel status */
+#define	RS_CD_MASK	0x0fff			/* current descriptor pointer */
+#define	RS_RS_MASK	0xf000			/* receive state */
+#define	RS_RS_SHIFT	12
+#define	RS_RS_DISABLED	0x0000			/* disabled */
+#define	RS_RS_ACTIVE	0x1000			/* active */
+#define	RS_RS_IDLE	0x2000			/* idle wait */
+#define	RS_RS_STOPPED	0x3000			/* reserved */
+#define	RS_RE_MASK	0xf0000			/* receive errors */
+#define	RS_RE_SHIFT	16
+#define	RS_RE_NOERR	0x00000			/* no error */
+#define	RS_RE_DPE	0x10000			/* descriptor protocol error */
+#define	RS_RE_DFO	0x20000			/* data fifo overflow */
+#define	RS_RE_BEBW	0x30000			/* bus error on buffer write */
+#define	RS_RE_BEDA	0x40000			/* bus error on descriptor access */
+#define	RS_AD_MASK	0xfff00000		/* active descriptor */
+#define	RS_AD_SHIFT	20
+
+/* fifoaddr */
+#define	FA_OFF_MASK	0xffff			/* offset */
+#define	FA_SEL_MASK	0xf0000			/* select */
+#define	FA_SEL_SHIFT	16
+#define	FA_SEL_XDD	0x00000			/* transmit dma data */
+#define	FA_SEL_XDP	0x10000			/* transmit dma pointers */
+#define	FA_SEL_RDD	0x40000			/* receive dma data */
+#define	FA_SEL_RDP	0x50000			/* receive dma pointers */
+#define	FA_SEL_XFD	0x80000			/* transmit fifo data */
+#define	FA_SEL_XFP	0x90000			/* transmit fifo pointers */
+#define	FA_SEL_RFD	0xc0000			/* receive fifo data */
+#define	FA_SEL_RFP	0xd0000			/* receive fifo pointers */
+#define	FA_SEL_RSD	0xe0000			/* receive frame status data */
+#define	FA_SEL_RSP	0xf0000			/* receive frame status pointers */
+
+/* descriptor control flags */
+#define	CTRL_BC_MASK	0x00001fff		/* buffer byte count, real data len must <= 4KB */
+#define	CTRL_AE		((uint32)3 << 16)	/* address extension bits */
+#define	CTRL_AE_SHIFT	16
+#define	CTRL_PARITY	((uint32)3 << 18)	/* parity bit */
+#define	CTRL_EOT	((uint32)1 << 28)	/* end of descriptor table */
+#define	CTRL_IOC	((uint32)1 << 29)	/* interrupt on completion */
+#define	CTRL_EOF	((uint32)1 << 30)	/* end of frame */
+#define	CTRL_SOF	((uint32)1 << 31)	/* start of frame */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define	CTRL_CORE_MASK	0x0ff00000
+
+/* 64 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/* enable, et al */
+	uint32	ptr;			/* last descriptor posted to chip */
+	uint32	addrlow;		/* descriptor ring base address low 32-bits (8K aligned) */
+	uint32	addrhigh;		/* descriptor ring base address bits 63:32 (8K aligned) */
+	uint32	status0;		/* current descriptor, xmt state */
+	uint32	status1;		/* active descriptor, xmt error */
+} dma64regs_t;
+
+typedef volatile struct {
+	dma64regs_t	tx;		/* dma64 tx channel */
+	dma64regs_t	rx;		/* dma64 rx channel */
+} dma64regp_t;
+
+typedef volatile struct {		/* diag access */
+	uint32	fifoaddr;		/* diag address */
+	uint32	fifodatalow;		/* low 32bits of data */
+	uint32	fifodatahigh;		/* high 32bits of data */
+	uint32	pad;			/* reserved */
+} dma64diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl1;		/* misc control bits */
+	uint32	ctrl2;		/* buffer count and address extension */
+	uint32	addrlow;	/* memory address of the date buffer, bits 31:0 */
+	uint32	addrhigh;	/* memory address of the date buffer, bits 63:32 */
+} dma64dd_t;
+
+/*
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
+#define D64RINGALIGN_BITS	13
+#define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
+#define	D64RINGBOUNDARY		(1 << D64RINGALIGN_BITS)
+
+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
+
+/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
+#define	D64MAXDD_LARGE		((1 << 16) / sizeof (dma64dd_t))
+
+/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+ * 64K boundary
+ */
+#define	D64RINGBOUNDARY_LARGE	(1 << 16)
+
+/*
+ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
+ * When this field contains the value N, the burst length is 2**(N + 4) bytes.
+ */
+#define D64_DEF_USBBURSTLEN     2
+#define D64_DEF_SDIOBURSTLEN    1
+
+
+#ifndef D64_USBBURSTLEN
+#define D64_USBBURSTLEN	DMA_BL_64
+#endif
+#ifndef D64_SDIOBURSTLEN
+#define D64_SDIOBURSTLEN	DMA_BL_32
+#endif
+
+/* transmit channel control */
+#define	D64_XC_XE		0x00000001	/* transmit enable */
+#define	D64_XC_SE		0x00000002	/* transmit suspend request */
+#define	D64_XC_LE		0x00000004	/* loopback enable */
+#define	D64_XC_FL		0x00000010	/* flush request */
+#define D64_XC_MR_MASK		0x000001C0	/* Multiple outstanding reads */
+#define D64_XC_MR_SHIFT		6
+#define	D64_XC_PD		0x00000800	/* parity check disable */
+#define	D64_XC_AE		0x00030000	/* address extension bits */
+#define	D64_XC_AE_SHIFT		16
+#define D64_XC_BL_MASK		0x001C0000	/* BurstLen bits */
+#define D64_XC_BL_SHIFT		18
+#define D64_XC_PC_MASK		0x00E00000		/* Prefetch control */
+#define D64_XC_PC_SHIFT		21
+#define D64_XC_PT_MASK		0x03000000		/* Prefetch threshold */
+#define D64_XC_PT_SHIFT		24
+
+/* transmit descriptor table pointer */
+#define	D64_XP_LD_MASK		0x00001fff	/* last valid descriptor */
+
+/* transmit channel status */
+#define	D64_XS0_CD_MASK		(di->d64_xs0_cd_mask)	/* current descriptor pointer */
+#define	D64_XS0_XS_MASK		0xf0000000     	/* transmit state */
+#define	D64_XS0_XS_SHIFT		28
+#define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
+#define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
+#define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
+#define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
+#define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
+
+#define	D64_XS1_AD_MASK		(di->d64_xs1_ad_mask)	/* active descriptor */
+#define	D64_XS1_XE_MASK		0xf0000000     	/* transmit errors */
+#define	D64_XS1_XE_SHIFT		28
+#define	D64_XS1_XE_NOERR	0x00000000	/* no error */
+#define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
+#define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
+#define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
+#define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
+#define	D64_XS1_XE_COREE	0x50000000	/* core error */
+
+/* receive channel control */
+#define	D64_RC_RE		0x00000001	/* receive enable */
+#define	D64_RC_RO_MASK		0x000000fe	/* receive frame offset */
+#define	D64_RC_RO_SHIFT		1
+#define	D64_RC_FM		0x00000100	/* direct fifo receive (pio) mode */
+#define	D64_RC_SH		0x00000200	/* separate rx header descriptor enable */
+#define	D64_RC_SHIFT		9	/* separate rx header descriptor enable */
+#define	D64_RC_OC		0x00000400	/* overflow continue */
+#define	D64_RC_PD		0x00000800	/* parity check disable */
+#define D64_RC_GE		0x00004000	/* Glom enable */
+#define	D64_RC_AE		0x00030000	/* address extension bits */
+#define	D64_RC_AE_SHIFT		16
+#define D64_RC_BL_MASK		0x001C0000	/* BurstLen bits */
+#define D64_RC_BL_SHIFT		18
+#define D64_RC_PC_MASK		0x00E00000	/* Prefetch control */
+#define D64_RC_PC_SHIFT		21
+#define D64_RC_PT_MASK		0x03000000	/* Prefetch threshold */
+#define D64_RC_PT_SHIFT		24
+
+/* flags for dma controller */
+#define DMA_CTRL_PEN		(1 << 0)	/* partity enable */
+#define DMA_CTRL_ROC		(1 << 1)	/* rx overflow continue */
+#define DMA_CTRL_RXMULTI	(1 << 2)	/* allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED	(1 << 3)	/* Unframed Rx/Tx data */
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)	/* DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE	(1 << 6)	/* always single buffer */
+#define DMA_CTRL_SDIO_RXGLOM	(1 << 7)	/* DMA Rx glome is enabled */
+
+/* receive descriptor table pointer */
+#define	D64_RP_LD_MASK		0x00001fff	/* last valid descriptor */
+
+/* receive channel status */
+#define	D64_RS0_CD_MASK		(di->d64_rs0_cd_mask)	/* current descriptor pointer */
+#define	D64_RS0_RS_MASK		0xf0000000     	/* receive state */
+#define	D64_RS0_RS_SHIFT		28
+#define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
+#define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
+#define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
+#define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
+#define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
+
+#define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
+#define	D64_RS1_RE_MASK		0xf0000000     	/* receive errors */
+#define	D64_RS1_RE_SHIFT		28
+#define	D64_RS1_RE_NOERR	0x00000000	/* no error */
+#define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
+#define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
+#define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
+#define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
+#define	D64_RS1_RE_COREE	0x50000000	/* core error */
+
+/* fifoaddr */
+#define	D64_FA_OFF_MASK		0xffff		/* offset */
+#define	D64_FA_SEL_MASK		0xf0000		/* select */
+#define	D64_FA_SEL_SHIFT	16
+#define	D64_FA_SEL_XDD		0x00000		/* transmit dma data */
+#define	D64_FA_SEL_XDP		0x10000		/* transmit dma pointers */
+#define	D64_FA_SEL_RDD		0x40000		/* receive dma data */
+#define	D64_FA_SEL_RDP		0x50000		/* receive dma pointers */
+#define	D64_FA_SEL_XFD		0x80000		/* transmit fifo data */
+#define	D64_FA_SEL_XFP		0x90000		/* transmit fifo pointers */
+#define	D64_FA_SEL_RFD		0xc0000		/* receive fifo data */
+#define	D64_FA_SEL_RFP		0xd0000		/* receive fifo pointers */
+#define	D64_FA_SEL_RSD		0xe0000		/* receive frame status data */
+#define	D64_FA_SEL_RSP		0xf0000		/* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS	0x0ff00000	/* core specific flags */
+#define	D64_CTRL1_NOTPCIE	((uint32)1 << 18)	/* buirst size control */
+#define	D64_CTRL1_EOT		((uint32)1 << 28)	/* end of descriptor table */
+#define	D64_CTRL1_IOC		((uint32)1 << 29)	/* interrupt on completion */
+#define	D64_CTRL1_EOF		((uint32)1 << 30)	/* end of frame */
+#define	D64_CTRL1_SOF		((uint32)1 << 31)	/* start of frame */
+
+/* descriptor control flags 2 */
+#define	D64_CTRL2_BC_MASK	0x00007fff	/* buffer byte count. real data len must <= 16KB */
+#define	D64_CTRL2_AE		0x00030000	/* address extension bits */
+#define	D64_CTRL2_AE_SHIFT	16
+#define D64_CTRL2_PARITY	0x00040000      /* parity bit */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define	D64_CTRL_CORE_MASK	0x0ff00000
+
+#define D64_RX_FRM_STS_LEN	0x0000ffff	/* frame length mask */
+#define D64_RX_FRM_STS_OVFL	0x00800000	/* RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT	0x0f000000	/* no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DATATYPE	0xf0000000	/* core-dependent data type */
+
+/* receive frame status */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} dma_rxh_t;
+
+#endif	/* _sbhnddma_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 0000000..f34fc18
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,113 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h 446298 2014-01-03 11:30:17Z $
+ */
+
+#ifndef	_SBPCMCIA_H
+#define	_SBPCMCIA_H
+
+/* All the addresses that are offsets in attribute space are divided
+ * by two to account for the fact that odd bytes are invalid in
+ * attribute space and our read/write routines make the space appear
+ * as if they didn't exist. Still we want to show the original numbers
+ * as documented in the hnd_pcmcia core manual.
+ */
+
+/* PCMCIA Function Configuration Registers */
+#define	PCMCIA_FCR		(0x700 / 2)
+
+#define	FCR0_OFF		0
+#define	FCR1_OFF		(0x40 / 2)
+#define	FCR2_OFF		(0x80 / 2)
+#define	FCR3_OFF		(0xc0 / 2)
+
+#define	PCMCIA_FCR0		(0x700 / 2)
+#define	PCMCIA_FCR1		(0x740 / 2)
+#define	PCMCIA_FCR2		(0x780 / 2)
+#define	PCMCIA_FCR3		(0x7c0 / 2)
+
+/* Standard PCMCIA FCR registers */
+
+#define	PCMCIA_COR		0
+
+#define	COR_RST			0x80
+#define	COR_LEV			0x40
+#define	COR_IRQEN		0x04
+#define	COR_BLREN		0x01
+#define	COR_FUNEN		0x01
+
+
+#define	PCICIA_FCSR		(2 / 2)
+#define	PCICIA_PRR		(4 / 2)
+#define	PCICIA_SCR		(6 / 2)
+#define	PCICIA_ESR		(8 / 2)
+
+
+#define PCM_MEMOFF		0x0000
+#define F0_MEMOFF		0x1000
+#define F1_MEMOFF		0x2000
+#define F2_MEMOFF		0x3000
+#define F3_MEMOFF		0x4000
+
+/* Memory base in the function fcr's */
+#define MEM_ADDR0		(0x728 / 2)
+#define MEM_ADDR1		(0x72a / 2)
+#define MEM_ADDR2		(0x72c / 2)
+
+/* PCMCIA base plus Srom access in fcr0: */
+#define PCMCIA_ADDR0		(0x072e / 2)
+#define PCMCIA_ADDR1		(0x0730 / 2)
+#define PCMCIA_ADDR2		(0x0732 / 2)
+
+#define MEM_SEG			(0x0734 / 2)
+#define SROM_CS			(0x0736 / 2)
+#define SROM_DATAL		(0x0738 / 2)
+#define SROM_DATAH		(0x073a / 2)
+#define SROM_ADDRL		(0x073c / 2)
+#define SROM_ADDRH		(0x073e / 2)
+#define	SROM_INFO2		(0x0772 / 2)	/* Corerev >= 2 && <= 5 */
+#define	SROM_INFO		(0x07be / 2)	/* Corerev >= 6 */
+
+/*  Values for srom_cs: */
+#define SROM_IDLE		0
+#define SROM_WRITE		1
+#define SROM_READ		2
+#define SROM_WEN		4
+#define SROM_WDS		7
+#define SROM_DONE		8
+
+/* Fields in srom_info: */
+#define	SRI_SZ_MASK		0x03
+#define	SRI_BLANK		0x04
+#define	SRI_OTP			0x80
+
+
+/* sbtmstatelow */
+#define SBTML_INT_ACK		0x40000		/* ack the sb interrupt */
+#define SBTML_INT_EN		0x20000		/* enable sb interrupt */
+
+/* sbtmstatehigh */
+#define SBTMH_INT_STATUS	0x40000		/* sb interrupt status */
+
+#endif	/* _SBPCMCIA_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 0000000..01962576
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,186 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h 383835 2013-02-07 23:32:39Z $
+ */
+
+#ifndef	_SBSDIO_H
+#define	_SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION		3	/* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS			0x10000		/* sprom command and status */
+#define SBSDIO_SPROM_INFO		0x10001		/* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW		0x10002		/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003 	/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004		/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH		0x10005		/* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA		0x10006		/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN		0x10007		/* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK		0x10008		/* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL		0x10009		/* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A		/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B		/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C		/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D		/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E		/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 	0x1000F		/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019		/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A		/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B		/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C		/* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL	0x1001D		/* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000 	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001C 	/* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL			0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK		0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT	0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK		0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT		1
+#define SBSDIO_FUNC1_SLEEPCSR			0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK		0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT		0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN		1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK	0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT	1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE		0
+#define SBSDIO_SPROM_WRITE		1
+#define SBSDIO_SPROM_READ		2
+#define SBSDIO_SPROM_WEN		4
+#define SBSDIO_SPROM_WDS		7
+#define SBSDIO_SPROM_DONE		8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK			0x03		/* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK			0x04		/* depreciated in corerev 6 */
+#define	SROM_OTP			0x80		/* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL		0x01		/* or'd with onchip xtal_pu,
+							 * 1: power on oscillator
+							 * (for 4318 only)
+							 */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK		0x7f		/* number of words - 1 for sd device
+							 * to wait before sending data to host
+							 */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK		0x7f
+#define SBSDIO_MESBUSYCTRL_ENAB		0x80		/* Enable busy capability for MES access */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY		0x01		/* 1: device will assert busy signal when
+							 * receiving CMD53
+							 */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02		/* 1: assertion of sdio interrupt is
+							 * synchronous to the sdio clock
+							 */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04		/* 1: mask all interrupts to host
+							 * except the chipActive (rev 8)
+							 */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08		/* 1: isolate internal sdio signals, put
+							 * external pads in tri-state; requires
+							 * sdio bus power cycle to clear (rev 9)
+							 */
+#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10  /* Enable function 2 tx for each block */
+#define SBSDIO_DEVCTL_F2WM_ENAB		0x10		/* Enable F2 Watermark */
+#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 	0x20		/* Isolate sdio clk and cmd (non-data) */
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP		0x01		/* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT			0x02		/* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP		0x04		/* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ		0x08		/* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ		0x10		/* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20		/* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL		0x40		/* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL			0x80		/* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL		0x40
+#define SBSDIO_Rev8_ALP_AVAIL		0x80
+#define SBSDIO_CSR_MASK			0x1F
+
+#define SBSDIO_AVBITS			(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)		((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)		(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)		(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly)	(SBSDIO_ALPAV(regval) && \
+					(alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0		0x01		/* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1		0x02		/* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2		0x04		/* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD		0x08		/* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL		0x0f		/* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF		/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000		/* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK		0x80		/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff		/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU		/* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000	/* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON		0x1000		/* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200		/* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT       0x078           /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF		/* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6		/* manfid tuple length, include tuple,
+							 * link bytes
+							 */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET		0x8		/* 8 control bytes first, CIS starts from
+							 * 8th byte
+							 */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX	64		/* sdio byte mode: maximum length of one
+							 * data comamnd
+							 */
+
+#define SBSDIO_CORE_ADDR_MASK		0x1FFFF		/* sdio core function one address mask */
+
+#endif	/* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 0000000..97051ce
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,295 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h 416730 2013-08-06 09:33:19Z $
+ */
+
+#ifndef	_sbsdpcmdev_h_
+#define	_sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	dma64regs_t	xmt;		/* dma tx */
+	uint32 PAD[2];
+	dma64regs_t	rcv;		/* dma rx */
+	uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+	dma64p_t dma64regs[2];
+	dma64diag_t dmafifo;		/* DMA Diagnostic Regs, 0x280-0x28c */
+	uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+	dma32regp_t dma32regs[2];	/* dma tx & rx, 0x200-0x23c */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x240-0x24c */
+	uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+	dma32regp_t dmaregs;		/* DMA Regs, 0x200-0x21c, rev8 */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x220-0x22c */
+	uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+	uint32 corecontrol;		/* CoreControl, 0x000, rev8 */
+	uint32 corestatus;		/* CoreStatus, 0x004, rev8  */
+	uint32 PAD[1];
+	uint32 biststatus;		/* BistStatus, 0x00c, rev8  */
+
+	/* PCMCIA access */
+	uint16 pcmciamesportaladdr;	/* PcmciaMesPortalAddr, 0x010, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciamesportalmask;	/* PcmciaMesPortalMask, 0x014, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciawrframebc;		/* PcmciaWrFrameBC, 0x018, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciaunderflowtimer;	/* PcmciaUnderflowTimer, 0x01c, rev8   */
+	uint16 PAD[1];
+
+	/* interrupt */
+	uint32 intstatus;		/* IntStatus, 0x020, rev8   */
+	uint32 hostintmask;		/* IntHostMask, 0x024, rev8   */
+	uint32 intmask;			/* IntSbMask, 0x028, rev8   */
+	uint32 sbintstatus;		/* SBIntStatus, 0x02c, rev8   */
+	uint32 sbintmask;		/* SBIntMask, 0x030, rev8   */
+	uint32 funcintmask;		/* SDIO Function Interrupt Mask, SDIO rev4 */
+	uint32 PAD[2];
+	uint32 tosbmailbox;		/* ToSBMailbox, 0x040, rev8   */
+	uint32 tohostmailbox;		/* ToHostMailbox, 0x044, rev8   */
+	uint32 tosbmailboxdata;		/* ToSbMailboxData, 0x048, rev8   */
+	uint32 tohostmailboxdata;	/* ToHostMailboxData, 0x04c, rev8   */
+
+	/* synchronized access to registers in SDIO clock domain */
+	uint32 sdioaccess;		/* SdioAccess, 0x050, rev8   */
+	uint32 PAD[3];
+
+	/* PCMCIA frame control */
+	uint8 pcmciaframectrl;		/* pcmciaFrameCtrl, 0x060, rev8   */
+	uint8 PAD[3];
+	uint8 pcmciawatermark;		/* pcmciaWaterMark, 0x064, rev8   */
+	uint8 PAD[155];
+
+	/* interrupt batching control */
+	uint32 intrcvlazy;		/* IntRcvLazy, 0x100, rev8 */
+	uint32 PAD[3];
+
+	/* counters */
+	uint32 cmd52rd;			/* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+	uint32 cmd52wr;			/* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+	uint32 cmd53rd;			/* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+	uint32 cmd53wr;			/* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+	uint32 abort;			/* AbortCount, 0x120, rev8, SDIO: aborts */
+	uint32 datacrcerror;		/* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+	uint32 rdoutofsync;		/* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+	uint32 wroutofsync;		/* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+	uint32 writebusy;		/* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+	uint32 readwait;		/* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+	uint32 readterm;		/* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+	uint32 writeterm;		/* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+	uint32 PAD[40];
+	uint32 clockctlstatus;		/* ClockCtlStatus, 0x1e0, rev8 */
+	uint32 PAD[7];
+
+	/* DMA engines */
+	volatile union {
+		pcmdma32_t pcm32;
+		sdiodma32_t sdiod32;
+		sdiodma64_t sdiod64;
+	} dma;
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* PCMCIA FCR, 0x600-6ff, rev6 */
+	uint16 PAD[55];
+
+	/* PCMCIA backplane access */
+	uint16 backplanecsr;		/* BackplaneCSR, 0x76E, rev6 */
+	uint16 backplaneaddr0;		/* BackplaneAddr0, 0x770, rev6 */
+	uint16 backplaneaddr1;		/* BackplaneAddr1, 0x772, rev6 */
+	uint16 backplaneaddr2;		/* BackplaneAddr2, 0x774, rev6 */
+	uint16 backplaneaddr3;		/* BackplaneAddr3, 0x776, rev6 */
+	uint16 backplanedata0;		/* BackplaneData0, 0x778, rev6 */
+	uint16 backplanedata1;		/* BackplaneData1, 0x77a, rev6 */
+	uint16 backplanedata2;		/* BackplaneData2, 0x77c, rev6 */
+	uint16 backplanedata3;		/* BackplaneData3, 0x77e, rev6 */
+	uint16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	uint16 spromstatus;		/* SPROMStatus, 0x7BE, rev2 */
+	uint32 PAD[464];
+
+	/* Sonics SiliconBackplane registers */
+	sbconfig_t sbconfig;		/* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY		(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal causes backplane reset */
+#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE	(1 << 4)	/* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL	(1 << 5)	/* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE	(1 << 0)	/* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV	(1 << 1)	/* 1=smartDev enabled */
+#define CS_F2ENABLED	(1 << 2)	/* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK	0x7fff	/* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK	0x7fff	/* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK	0xffff	/* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK		0x07ff	/* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0		/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4		/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip transitioned from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)	/* DMA Errors */
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR	(1 << 8)	/* Backplane SError (write) */
+#define I_SB_RESPERR	(1 << 9)	/* Backplane Response Error (read) */
+#define I_SB_SPROMERR	(1 << 10)	/* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK	0x000000ff	/* Read/Write Data Mask */
+#define SDA_ADDR_MASK	0x000fff00	/* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT	8		/* Read/Write Address Shift */
+#define SDA_WRITE	0x01000000	/* Write bit  */
+#define SDA_READ	0x00000000	/* Write bit cleared for Read */
+#define SDA_BUSY	0x80000000	/* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE		0x000	/* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE	0x100	/* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE	0x200	/* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE	0x300	/* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA	0x006	/* ChipControlData */
+#define SDA_CHIPCONTROLENAB	0x007	/* ChipControlEnable */
+#define SDA_F2WATERMARK		0x008	/* Function 2 Watermark */
+#define SDA_DEVICECONTROL	0x009	/* DeviceControl */
+#define SDA_SBADDRLOW		0x00a	/* SbAddrLow */
+#define SDA_SBADDRMID		0x00b	/* SbAddrMid */
+#define SDA_SBADDRHIGH		0x00c	/* SbAddrHigh */
+#define SDA_FRAMECTRL		0x00d	/* FrameCtrl */
+#define SDA_CHIPCLOCKCSR	0x00e	/* ChipClockCSR */
+#define SDA_SDIOPULLUP		0x00f	/* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW	0x019	/* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH	0x01a	/* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW	0x01b	/* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH	0x01c	/* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK	0x7f	/* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK	0x80	/* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK	0xff	/* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK	0xff	/* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define PFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+
+/* intrcvlazy */
+#define	IRL_TO_MASK	0x00ffffff	/* timeout */
+#define	IRL_FC_MASK	0xff000000	/* frame count */
+#define	IRL_FC_SHIFT	24		/* frame count */
+
+/* rx header */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC		0x0001		/* CRC error detected */
+#define RXF_WOOS	0x0002		/* write frame out of sync */
+#define RXF_WF_TERM	0x0004		/* write frame terminated */
+#define RXF_ABORT	0x0008		/* write frame aborted */
+#define RXF_DISCARD	(RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT)	/* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN	4	/* HW frametag: 2 bytes len, 2 bytes check val */
+
+#define SDPCM_HWEXT_LEN	8
+
+#endif	/* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 0000000..33442f8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,200 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h 271781 2011-07-13 20:00:06Z $
+ */
+
+#ifndef	_SBSOCRAM_H
+#define	_SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/* Memcsocram core registers */
+typedef volatile struct sbsocramregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;	/* rev 6 */
+	uint32	errlogaddr;	/* rev 6 */
+	/* used for patching rev 3 & 5 */
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[1];
+	uint32	bankinfo;	/* corev 8 */
+	uint32	bankpda;
+	uint32	PAD[14];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;		/* corerev >= 2 */
+	uint32	PAD[133];
+	uint32  sr_control;     /* corerev >= 15 */
+	uint32  sr_status;      /* corerev >= 15 */
+	uint32  sr_address;     /* corerev >= 15 */
+	uint32  sr_data;        /* corerev >= 15 */
+} sbsocramregs_t;
+
+#endif	/* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+/* Coreinfo register */
+#define	SRCI_PT_MASK		0x00070000	/* corerev >= 6; port type[18:16] */
+#define	SRCI_PT_SHIFT		16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+/* corerev >= 3 */
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define	SRCI_ROMNB_MASK		0xf000
+#define	SRCI_ROMNB_SHIFT	12
+#define	SRCI_ROMBSZ_MASK	0xf00
+#define	SRCI_ROMBSZ_SHIFT	8
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+
+#define SR_BSZ_BASE		14
+
+/* Standby control register */
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000	/* rev >= 3 */
+#define	SRSC_SBYEN_SHIFT	24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK	0x00000010	/* rev >= 3 */
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_SZMASK		0x7f
+#define SOCRAM_BANKIDX_ROM_MASK		0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM		0
+#define SOCRAM_MEMTYPE_R0M		1
+#define SOCRAM_MEMTYPE_DEVRAM		2
+
+#define	SOCRAM_BANKINFO_REG		0x40
+#define	SOCRAM_BANKIDX_REG		0x10
+#define	SOCRAM_BANKINFO_STDBY_MASK	0x400
+#define	SOCRAM_BANKINFO_STDBY_TIMER	0x800
+
+/* bankinfo rev >= 10 */
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT		13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK		0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT		14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK		0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT		15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK		0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT		16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK		0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT		17
+#define SOCRAM_BANKINFO_PDASZ_MASK		0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT	24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK	0x01000000
+
+/* extracoreinfo register */
+#define SOCRAM_DEVRAMBANK_MASK		0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT		12
+
+/* bank info to calculate bank size */
+#define   SOCRAM_BANKINFO_SZBASE          8192
+#define SOCRAM_BANKSIZE_SHIFT         13      /* SOCRAM_BANKINFO_SZBASE */
+
+
+#endif	/* _SBSOCRAM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 0000000..6b8d437
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,622 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h 416730 2013-08-06 09:33:19Z $
+ */
+
+#ifndef	_SDIO_H
+#define	_SDIO_H
+
+#ifdef BCMSDIO
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+	uint8	cccr_sdio_rev;		/* RO, cccr and sdio revision */
+	uint8	sd_rev;			/* RO, sd spec revision */
+	uint8	io_en;			/* I/O enable */
+	uint8	io_rdy;			/* I/O ready reg */
+	uint8	intr_ctl;		/* Master and per function interrupt enable control */
+	uint8	intr_status;		/* RO, interrupt pending status */
+	uint8	io_abort;		/* read/write abort or reset all functions */
+	uint8	bus_inter;		/* bus interface control */
+	uint8	capability;		/* RO, card capability */
+
+	uint8	cis_base_low;		/* 0x9 RO, common CIS base address, LSB */
+	uint8	cis_base_mid;
+	uint8	cis_base_high;		/* 0xB RO, common CIS base address, MSB */
+
+	/* suspend/resume registers */
+	uint8	bus_suspend;		/* 0xC */
+	uint8	func_select;		/* 0xD */
+	uint8	exec_flag;		/* 0xE */
+	uint8	ready_flag;		/* 0xF */
+
+	uint8	fn0_blk_size[2];	/* 0x10(LSB), 0x11(MSB) */
+
+	uint8	power_control;		/* 0x12 (SDIO version 1.10) */
+
+	uint8	speed_control;		/* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV			0x00
+#define SDIOD_CCCR_SDREV		0x01
+#define SDIOD_CCCR_IOEN			0x02
+#define SDIOD_CCCR_IORDY		0x03
+#define SDIOD_CCCR_INTEN		0x04
+#define SDIOD_CCCR_INTPEND		0x05
+#define SDIOD_CCCR_IOABORT		0x06
+#define SDIOD_CCCR_BICTRL		0x07
+#define SDIOD_CCCR_CAPABLITIES		0x08
+#define SDIOD_CCCR_CISPTR_0		0x09
+#define SDIOD_CCCR_CISPTR_1		0x0A
+#define SDIOD_CCCR_CISPTR_2		0x0B
+#define SDIOD_CCCR_BUSSUSP		0x0C
+#define SDIOD_CCCR_FUNCSEL		0x0D
+#define SDIOD_CCCR_EXECFLAGS		0x0E
+#define SDIOD_CCCR_RDYFLAGS		0x0F
+#define SDIOD_CCCR_BLKSIZE_0		0x10
+#define SDIOD_CCCR_BLKSIZE_1		0x11
+#define SDIOD_CCCR_POWER_CONTROL	0x12
+#define SDIOD_CCCR_SPEED_CONTROL	0x13
+#define SDIOD_CCCR_UHSI_SUPPORT		0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH	0x15
+#define SDIOD_CCCR_INTR_EXTN		0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP		0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
+#define SDIOD_CCCR_BRCM_CARDCTL			0xf1
+#define SDIOD_CCCR_BRCM_SEPINT			0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK	0xf0	/* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK	0x0f	/* CCCR format version number */
+#define SDIO_SPEC_VERSION_3_0	0x40	/* SDIO spec version 3.0 */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK		0x0f	/* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02	/* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2	0x04	/* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02	/* function 1 I/O ready */
+#define SDIO_FUNC_READY_2	0x04	/* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN	0x1	/* interrupt enable master */
+#define INTR_CTL_FUNC1_EN	0x2	/* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN	0x4	/* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2	/* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2	0x4	/* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL	0x08	/* I/O card reset */
+#define IO_ABORT_FUNC_MASK	0x07	/* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS	0x80	/* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP	0x40	/* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN	0x20	/* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK	0x03	/* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT	0x02	/* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT	0x00	/* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS		0x80	/* 4-bit support for low speed card */
+#define SDIO_CAP_LSC		0x40	/* low speed card */
+#define SDIO_CAP_E4MI		0x20	/* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI		0x10	/* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS		0x08	/* support suspend/resume */
+#define SDIO_CAP_SRW		0x04	/* support read wait */
+#define SDIO_CAP_SMB		0x02	/* support multi-block transfer */
+#define SDIO_CAP_SDC		0x01	/* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC		0x01	/* supports master power control (RO) */
+#define SDIO_POWER_EMPC		0x02	/* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS		0x01	/* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS		0x02	/* enable high-speed [clocking] mode (RW) */
+#define SDIO_SPEED_UHSI_DDR50	   0x08
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S	1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S	0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S	0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M	BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S	4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S	0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S	1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK	0x01	/* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE		0x02	/* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI	0x04	/* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+	uint8	devctr;			/* device interface, CSA control */
+	uint8	ext_dev;		/* extended standard I/O device type code */
+	uint8	pwr_sel;		/* power selection support */
+	uint8	PAD[6];			/* reserved */
+
+	uint8	cis_low;		/* CIS LSB */
+	uint8	cis_mid;
+	uint8	cis_high;		/* CIS MSB */
+	uint8	csa_low;		/* code storage area, LSB */
+	uint8	csa_mid;
+	uint8	csa_high;		/* code storage area, MSB */
+	uint8	csa_dat_win;		/* data access window to function */
+
+	uint8	fnx_blk_size[2];	/* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS			8
+#define SDIOD_MAX_IOFUNCS		7
+
+/* SDIO Device FBR Start Address  */
+#define SDIOD_FBR_STARTADDR		0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE			0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n)		((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR		0x00	/* basic info for function */
+#define SDIOD_FBR_EXT_DEV		0x01	/* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL		0x02	/* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0		0x09
+#define SDIOD_FBR_CISPTR_1		0x0A
+#define SDIOD_FBR_CISPTR_2		0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0		0x0C
+#define SDIOD_FBR_CSA_ADDR_1		0x0D
+#define SDIOD_FBR_CSA_ADDR_2		0x0E
+#define SDIOD_FBR_CSA_DATA		0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0		0x10
+#define SDIOD_FBR_BLKSIZE_1		0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC	0x0f	/* device interface code */
+#define SDIOD_FBR_DECVTR_CSA	0x40	/* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN	0x80	/* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE		0	/* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART		1
+#define SDIOD_DIC_BLUETOOTH_A	2
+#define SDIOD_DIC_BLUETOOTH_B	3
+#define SDIOD_DIC_GPS		4
+#define SDIOD_DIC_CAMERA	5
+#define SDIOD_DIC_PHS		6
+#define SDIOD_DIC_WLAN		7
+#define SDIOD_DIC_EXT		0xf	/* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS	0x01	/* supports power selection */
+#define SDIOD_PWR_SEL_EPS	0x02	/* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+#define SDIO_FUNC_3		3
+#define SDIO_FUNC_4		4
+#define SDIO_FUNC_5		5
+#define SDIO_FUNC_6		6
+#define SDIO_FUNC_7		7
+
+#define SD_CARD_TYPE_UNKNOWN	0	/* bad type or unrecognized */
+#define SD_CARD_TYPE_IO		1	/* IO only card */
+#define SD_CARD_TYPE_MEMORY	2	/* memory only card */
+#define SD_CARD_TYPE_COMBO	3	/* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE	2048	/* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE	1	/* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE		31
+#define CARDREG_STATUS_BIT_COMCRCERROR		23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND	22
+#define CARDREG_STATUS_BIT_ERROR		19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3	12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2	11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1	10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0	9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR	4
+
+
+
+#define SD_CMD_GO_IDLE_STATE		0	/* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND		1
+#define SD_CMD_MMC_SET_RCA		3
+#define SD_CMD_IO_SEND_OP_COND		5	/* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD	7
+#define SD_CMD_SEND_CSD			9
+#define SD_CMD_SEND_CID			10
+#define SD_CMD_STOP_TRANSMISSION	12
+#define SD_CMD_SEND_STATUS		13
+#define SD_CMD_GO_INACTIVE_STATE	15
+#define SD_CMD_SET_BLOCKLEN		16
+#define SD_CMD_READ_SINGLE_BLOCK	17
+#define SD_CMD_READ_MULTIPLE_BLOCK	18
+#define SD_CMD_WRITE_BLOCK		24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK	25
+#define SD_CMD_PROGRAM_CSD		27
+#define SD_CMD_SET_WRITE_PROT		28
+#define SD_CMD_CLR_WRITE_PROT		29
+#define SD_CMD_SEND_WRITE_PROT		30
+#define SD_CMD_ERASE_WR_BLK_START	32
+#define SD_CMD_ERASE_WR_BLK_END		33
+#define SD_CMD_ERASE			38
+#define SD_CMD_LOCK_UNLOCK		42
+#define SD_CMD_IO_RW_DIRECT		52	/* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED		53	/* mandatory for SDIO */
+#define SD_CMD_APP_CMD			55
+#define SD_CMD_GEN_CMD			56
+#define SD_CMD_READ_OCR			58
+#define SD_CMD_CRC_ON_OFF		59	/* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS		13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS	22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT	23
+#define SD_ACMD_SD_SEND_OP_COND		41
+#define SD_ACMD_SET_CLR_CARD_DETECT	42
+#define SD_ACMD_SEND_SCR		51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ		0   /* Read_Write: Read */
+#define SD_IO_OP_WRITE		1   /* Read_Write: Write */
+#define SD_IO_RW_NORMAL		0   /* no RAW */
+#define SD_IO_RW_RAW		1   /* RAW */
+#define SD_IO_BYTE_MODE		0   /* Byte Mode */
+#define SD_IO_BLOCK_MODE	1   /* BlockMode */
+#define SD_IO_FIXED_ADDRESS	0   /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS	1   /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+	 (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+	 (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE			0
+#define SD_RSP_NO_1			1
+#define SD_RSP_NO_2			2
+#define SD_RSP_NO_3			3
+#define SD_RSP_NO_4			4
+#define SD_RSP_NO_5			5
+#define SD_RSP_NO_6			6
+
+	/* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR	0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND	0x4000
+#define SD_RSP_MR6_ERROR		0x2000
+
+	/* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT			0x80
+#define SD_RSP_MR1_PARAMETER_ERROR	0x40
+#define SD_RSP_MR1_RFU5			0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR	0x10
+#define SD_RSP_MR1_COM_CRC_ERROR	0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND	0x04
+#define SD_RSP_MR1_RFU1			0x02
+#define SD_RSP_MR1_IDLE_STATE		0x01
+
+	/* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR		0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND	0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1	0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0	0x10
+#define SD_RSP_R5_ERROR			0x08
+#define SD_RSP_R5_RFU			0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR	0x02
+#define SD_RSP_R5_OUT_OF_RANGE		0x01
+
+#define SD_RSP_R5_ERRBITS		0xCB
+
+
+/* ------------------------------------------------
+ *  SDIO Commands and responses
+ *
+ *  I/O only commands are:
+ *      CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0		0
+#define SDIOH_CMD_3		3
+#define SDIOH_CMD_5		5
+#define SDIOH_CMD_7		7
+#define SDIOH_CMD_11		11
+#define SDIOH_CMD_14		14
+#define SDIOH_CMD_15		15
+#define SDIOH_CMD_19		19
+#define SDIOH_CMD_52		52
+#define SDIOH_CMD_53		53
+#define SDIOH_CMD_59		59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE		0
+#define SDIOH_RSP_R1		1
+#define SDIOH_RSP_R2		2
+#define SDIOH_RSP_R3		3
+#define SDIOH_RSP_R4		4
+#define SDIOH_RSP_R5		5
+#define SDIOH_RSP_R6		6
+
+/*
+ *  SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS	0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * 	CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M		BITFIELD_MASK(24)
+#define CMD5_OCR_S		0
+
+#define CMD5_S18R_M		BITFIELD_MASK(1)
+#define CMD5_S18R_S		24
+
+#define CMD7_RCA_M		BITFIELD_MASK(16)
+#define CMD7_RCA_S		16
+
+#define CMD14_RCA_M		BITFIELD_MASK(16)
+#define CMD14_RCA_S		16
+#define CMD14_SLEEP_M		BITFIELD_MASK(1)
+#define CMD14_SLEEP_S		15
+
+#define CMD_15_RCA_M		BITFIELD_MASK(16)
+#define CMD_15_RCA_S		16
+
+#define CMD52_DATA_M		BITFIELD_MASK(8)  /* Bits [7:0]    - Write Data/Stuff bits of CMD52
+						   */
+#define CMD52_DATA_S		0
+#define CMD52_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD52_REG_ADDR_S	9
+#define CMD52_RAW_M		BITFIELD_MASK(1)  /* Bit  27       - Read after Write flag */
+#define CMD52_RAW_S		27
+#define CMD52_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD52_FUNCTION_S	28
+#define CMD52_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD52_RW_FLAG_S		31
+
+
+#define CMD53_BYTE_BLK_CNT_M	BITFIELD_MASK(9) /* Bits [8:0]     - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S	0
+#define CMD53_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD53_REG_ADDR_S	9
+#define CMD53_OP_CODE_M		BITFIELD_MASK(1)  /* Bit  26       - R/W Operation Code */
+#define CMD53_OP_CODE_S		26
+#define CMD53_BLK_MODE_M	BITFIELD_MASK(1)  /* Bit  27       - Block Mode */
+#define CMD53_BLK_MODE_S	27
+#define CMD53_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD53_FUNCTION_S	28
+#define CMD53_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD53_RW_FLAG_S		31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ *  -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M		BITFIELD_MASK(24) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S		0
+
+#define RSP4_S18A_M			BITFIELD_MASK(1) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S			24
+
+#define RSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S		24
+#define RSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  27      - Memory present */
+#define RSP4_MEM_PRESENT_S	27
+#define RSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S	28
+#define RSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  31      - SDIO card ready */
+#define RSP4_CARD_READY_S	31
+
+#define RSP6_STATUS_M		BITFIELD_MASK(16) /* Bits [15:0]  - Card status bits [19,22,23,12:0]
+						   */
+#define RSP6_STATUS_S		0
+#define RSP6_IO_RCA_M		BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S		16
+
+#define RSP1_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3       - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S	3
+#define RSP1_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5       - Card expects ACMD */
+#define RSP1_APP_CMD_S		5
+#define RSP1_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8       - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S	8
+#define RSP1_CURR_STATE_M	BITFIELD_MASK(4)  /* Bits [12:9] - State of card
+						   * when Cmd was received
+						   */
+#define RSP1_CURR_STATE_S	9
+#define RSP1_EARSE_RESET_M	BITFIELD_MASK(1)  /* Bit 13   - Erase seq cleared */
+#define RSP1_EARSE_RESET_S	13
+#define RSP1_CARD_ECC_DISABLE_M	BITFIELD_MASK(1)  /* Bit 14   - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S	14
+#define RSP1_WP_ERASE_SKIP_M	BITFIELD_MASK(1)  /* Bit 15   - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S	15
+#define RSP1_CID_CSD_OVERW_M	BITFIELD_MASK(1)  /* Bit 16   - Illegal write to CID or R/O bits
+						   * of CSD
+						   */
+#define RSP1_CID_CSD_OVERW_S	16
+#define RSP1_ERROR_M		BITFIELD_MASK(1)  /* Bit 19   - General/Unknown error */
+#define RSP1_ERROR_S		19
+#define RSP1_CC_ERROR_M		BITFIELD_MASK(1)  /* Bit 20   - Internal Card Control error */
+#define RSP1_CC_ERROR_S		20
+#define RSP1_CARD_ECC_FAILED_M	BITFIELD_MASK(1)  /* Bit 21   - Card internal ECC failed
+						   * to correct data
+						   */
+#define RSP1_CARD_ECC_FAILED_S	21
+#define RSP1_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit 22   - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S	22
+#define RSP1_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 23   - CRC check of previous command failed
+						   */
+#define RSP1_COM_CRC_ERROR_S	23
+#define RSP1_LOCK_UNLOCK_FAIL_M	BITFIELD_MASK(1)  /* Bit 24   - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S	24
+#define RSP1_CARD_LOCKED_M	BITFIELD_MASK(1)  /* Bit 25   - Card locked by the host */
+#define RSP1_CARD_LOCKED_S	25
+#define RSP1_WP_VIOLATION_M	BITFIELD_MASK(1)  /* Bit 26   - Attempt to program
+						   * write-protected blocks
+						   */
+#define RSP1_WP_VIOLATION_S	26
+#define RSP1_ERASE_PARAM_M	BITFIELD_MASK(1)  /* Bit 27   - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S	27
+#define RSP1_ERASE_SEQ_ERR_M	BITFIELD_MASK(1)  /* Bit 28   - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S	28
+#define RSP1_BLK_LEN_ERR_M	BITFIELD_MASK(1)  /* Bit 29   - Block length error */
+#define RSP1_BLK_LEN_ERR_S	29
+#define RSP1_ADDR_ERR_M		BITFIELD_MASK(1)  /* Bit 30   - Misaligned address */
+#define RSP1_ADDR_ERR_S		30
+#define RSP1_OUT_OF_RANGE_M	BITFIELD_MASK(1)  /* Bit 31   - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S	31
+
+
+#define RSP5_DATA_M		BITFIELD_MASK(8)  /* Bits [0:7]   - data */
+#define RSP5_DATA_S		0
+#define RSP5_FLAGS_M		BITFIELD_MASK(8)  /* Bit  [15:8]  - Rsp flags */
+#define RSP5_FLAGS_S		8
+#define RSP5_STUFF_M		BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S		16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M	BITFIELD_MASK(16) /* Bits [15:0]    - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S	0
+#define SPIRSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [18:16]   - Stuff bits */
+#define SPIRSP4_STUFF_S		16
+#define SPIRSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  19        - Memory present */
+#define SPIRSP4_MEM_PRESENT_S	19
+#define SPIRSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [22:20]   - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S	20
+#define SPIRSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  23        - SDIO card ready */
+#define SPIRSP4_CARD_READY_S	23
+#define SPIRSP4_IDLE_STATE_M	BITFIELD_MASK(1)  /* Bit  24        - idle state */
+#define SPIRSP4_IDLE_STATE_S	24
+#define SPIRSP4_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S	26
+#define SPIRSP4_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S	27
+#define SPIRSP4_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP4_FUNC_NUM_ERROR_S	28
+#define SPIRSP4_PARAM_ERROR_M	BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S	30
+#define SPIRSP4_START_BIT_M	BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP4_START_BIT_S	31
+
+#define SPIRSP5_DATA_M			BITFIELD_MASK(8)  /* Bits [23:16]   - R/W Data */
+#define SPIRSP5_DATA_S			16
+#define SPIRSP5_IDLE_STATE_M		BITFIELD_MASK(1)  /* Bit  24        - Idle state */
+#define SPIRSP5_IDLE_STATE_S		24
+#define SPIRSP5_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S		26
+#define SPIRSP5_COM_CRC_ERROR_M		BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S		27
+#define SPIRSP5_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP5_FUNC_NUM_ERROR_S	28
+#define SPIRSP5_PARAM_ERROR_M		BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S		30
+#define SPIRSP5_START_BIT_M		BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP5_START_BIT_S		31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3	- Authentication seq error
+							   */
+#define RSP6STAT_AKE_SEQ_ERROR_S	3
+#define RSP6STAT_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5	- Card expects ACMD */
+#define RSP6STAT_APP_CMD_S		5
+#define RSP6STAT_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8	- Ready for data
+							   * (buff empty)
+							   */
+#define RSP6STAT_READY_FOR_DATA_S	8
+#define RSP6STAT_CURR_STATE_M		BITFIELD_MASK(4)  /* Bits [12:9] - Card state at
+							   * Cmd reception
+							   */
+#define RSP6STAT_CURR_STATE_S		9
+#define RSP6STAT_ERROR_M		BITFIELD_MASK(1)  /* Bit 13  - General/Unknown error Bit 19
+							   */
+#define RSP6STAT_ERROR_S		13
+#define RSP6STAT_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit 14  - Illegal cmd for
+							   * card state Bit 22
+							   */
+#define RSP6STAT_ILLEGAL_CMD_S		14
+#define RSP6STAT_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 15  - CRC previous command
+							   * failed Bit 23
+							   */
+#define RSP6STAT_COM_CRC_ERROR_S	15
+
+#define SDIOH_XFER_TYPE_READ    SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE   SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT	0
+#define CMD_OPTION_TUNING	1
+
+#endif /* def BCMSDIO */
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 0000000..f2bd9ae
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,445 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h 345499 2012-07-18 06:59:05Z $
+ */
+
+#ifndef	_SDIOH_H
+#define	_SDIOH_H
+
+#define SD_SysAddr			0x000
+#define SD_BlockSize			0x004
+#define SD_BlockCount 			0x006
+#define SD_Arg0				0x008
+#define SD_Arg1 			0x00A
+#define SD_TransferMode			0x00C
+#define SD_Command 			0x00E
+#define SD_Response0			0x010
+#define SD_Response1 			0x012
+#define SD_Response2			0x014
+#define SD_Response3 			0x016
+#define SD_Response4			0x018
+#define SD_Response5 			0x01A
+#define SD_Response6			0x01C
+#define SD_Response7 			0x01E
+#define SD_BufferDataPort0		0x020
+#define SD_BufferDataPort1 		0x022
+#define SD_PresentState			0x024
+#define SD_HostCntrl			0x028
+#define SD_PwrCntrl			0x029
+#define SD_BlockGapCntrl 		0x02A
+#define SD_WakeupCntrl 			0x02B
+#define SD_ClockCntrl			0x02C
+#define SD_TimeoutCntrl 		0x02E
+#define SD_SoftwareReset		0x02F
+#define SD_IntrStatus			0x030
+#define SD_ErrorIntrStatus 		0x032
+#define SD_IntrStatusEnable		0x034
+#define SD_ErrorIntrStatusEnable 	0x036
+#define SD_IntrSignalEnable		0x038
+#define SD_ErrorIntrSignalEnable 	0x03A
+#define SD_CMD12ErrorStatus		0x03C
+#define SD_Capabilities			0x040
+#define SD_Capabilities3		0x044
+#define SD_MaxCurCap			0x048
+#define SD_MaxCurCap_Reserved		0x04C
+#define SD_ADMA_ErrStatus		0x054
+#define SD_ADMA_SysAddr			0x58
+#define SD_SlotInterruptStatus		0x0FC
+#define SD_HostControllerVersion 	0x0FE
+#define	SD_GPIO_Reg			0x100
+#define	SD_GPIO_OE			0x104
+#define	SD_GPIO_Enable			0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo	0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2			0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart		0x060
+#define SD3_PresetValCount		8
+/* preset-indiv regs */
+#define SD3_PresetVal_init		0x060
+#define SD3_PresetVal_default	0x062
+#define SD3_PresetVal_HS		0x064
+#define SD3_PresetVal_SDR12		0x066
+#define SD3_PresetVal_SDR25		0x068
+#define SD3_PresetVal_SDR50		0x06a
+#define SD3_PresetVal_SDR104	0x06c
+#define SD3_PresetVal_DDR50		0x06e
+/* SDIO3.0 Revx specific Registers */
+#define SD3_Tuning_Info_Register 0x0EC
+#define SD3_WL_BT_reset_register 0x0F0
+
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX	0
+#define SD3_PRESETVAL_DESPEED_IX	1
+#define SD3_PRESETVAL_HISPEED_IX	2
+#define SD3_PRESETVAL_SDR12_IX		3
+#define SD3_PRESETVAL_SDR25_IX		4
+#define SD3_PRESETVAL_SDR50_IX		5
+#define SD3_PRESETVAL_SDR104_IX		6
+#define SD3_PRESETVAL_DDR50_IX		7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M 	BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 	0
+#define CAP_TO_CLKUNIT_M  	BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 	7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+	bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M 		BITFIELD_MASK(8)
+#define CAP_BASECLK_S 		8
+#define CAP_MAXBLOCK_M 		BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S		16
+#define CAP_ADMA2_M		BITFIELD_MASK(1)
+#define CAP_ADMA2_S		19
+#define CAP_ADMA1_M		BITFIELD_MASK(1)
+#define CAP_ADMA1_S		20
+#define CAP_HIGHSPEED_M		BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S		21
+#define CAP_DMA_M		BITFIELD_MASK(1)
+#define CAP_DMA_S		22
+#define CAP_SUSPEND_M		BITFIELD_MASK(1)
+#define CAP_SUSPEND_S		23
+#define CAP_VOLT_3_3_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S		24
+#define CAP_VOLT_3_0_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S		25
+#define CAP_VOLT_1_8_M		BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S		26
+#define CAP_64BIT_HOST_M	BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S	28
+
+#define SDIO_OCR_READ_FAIL	(2)
+
+
+#define CAP_ASYNCINT_SUP_M	BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S	29
+
+#define CAP_SLOTTYPE_M		BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S		30
+
+#define CAP3_MSBits_OFFSET	(32)
+/* note: following are caps MSB32 bits.
+	So the bits start from 0, instead of 32. that is why
+	CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M		BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M	BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S	(33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M	BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S	(34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M		BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S	(36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S	(37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S	(38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M	BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S	(40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M	BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S	(45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M	BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S	(46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M		BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S		(48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M	BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S	14
+
+#define PRESET_CLK_DIV_M	BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S	0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S		0
+#define CAP_CURR_3_0_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S		8
+#define CAP_CURR_1_8_M		BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S		16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M		BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S		0
+#define BLKSZ_BNDRY_M		BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S		12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes  */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M   	BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S	0
+#define XFER_BLK_COUNT_EN_M 	BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S	1
+#define XFER_CMD_12_EN_M    	BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 	2
+#define XFER_DATA_DIRECTION_M	BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S	4
+#define XFER_MULTI_BLOCK_M	BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S	5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 		0
+#define RESP_TYPE_136  		1
+#define RESP_TYPE_48   		2
+#define RESP_TYPE_48_BUSY	3
+/* type field */
+#define CMD_TYPE_NORMAL		0
+#define CMD_TYPE_SUSPEND	1
+#define CMD_TYPE_RESUME		2
+#define CMD_TYPE_ABORT		3
+
+#define CMD_RESP_TYPE_M		BITFIELD_MASK(2)	/* Bits [0-1] 	- Response type */
+#define CMD_RESP_TYPE_S		0
+#define CMD_CRC_EN_M		BITFIELD_MASK(1)	/* Bit 3 	- CRC enable */
+#define CMD_CRC_EN_S		3
+#define CMD_INDEX_EN_M		BITFIELD_MASK(1)	/* Bit 4 	- Enable index checking */
+#define CMD_INDEX_EN_S		4
+#define CMD_DATA_EN_M		BITFIELD_MASK(1)	/* Bit 5 	- Using DAT line */
+#define CMD_DATA_EN_S		5
+#define CMD_TYPE_M		BITFIELD_MASK(2)	/* Bit [6-7] 	- Normal, abort, resume, etc
+							 */
+#define CMD_TYPE_S		6
+#define CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [8-13] 	- Command number */
+#define CMD_INDEX_S		8
+
+/* SD_BufferDataPort0	: Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 	: Offset 0x022, size = 2 bytes */
+/* SD_PresentState	: Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 0	May use CMD */
+#define PRES_CMD_INHIBIT_S	0
+#define PRES_DAT_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 1	May use DAT */
+#define PRES_DAT_INHIBIT_S	1
+#define PRES_DAT_BUSY_M		BITFIELD_MASK(1)	/* Bit 2	DAT is busy */
+#define PRES_DAT_BUSY_S		2
+#define PRES_PRESENT_RSVD_M	BITFIELD_MASK(5)	/* Bit [3-7]	rsvd */
+#define PRES_PRESENT_RSVD_S	3
+#define PRES_WRITE_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 8	Write is active */
+#define PRES_WRITE_ACTIVE_S	8
+#define PRES_READ_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 9	Read is active */
+#define PRES_READ_ACTIVE_S	9
+#define PRES_WRITE_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 10	Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S	10
+#define PRES_READ_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 11	Read buf data avail */
+#define PRES_READ_DATA_RDY_S	11
+#define PRES_CARD_PRESENT_M	BITFIELD_MASK(1)	/* Bit 16	Card present - debounced */
+#define PRES_CARD_PRESENT_S	16
+#define PRES_CARD_STABLE_M	BITFIELD_MASK(1)	/* Bit 17	Debugging */
+#define PRES_CARD_STABLE_S	17
+#define PRES_CARD_PRESENT_RAW_M	BITFIELD_MASK(1)	/* Bit 18	Not debounced */
+#define PRES_CARD_PRESENT_RAW_S	18
+#define PRES_WRITE_ENABLED_M	BITFIELD_MASK(1)	/* Bit 19	Write protected? */
+#define PRES_WRITE_ENABLED_S	19
+#define PRES_DAT_SIGNAL_M	BITFIELD_MASK(4)	/* Bit [20-23]	Debugging */
+#define PRES_DAT_SIGNAL_S	20
+#define PRES_CMD_SIGNAL_M	BITFIELD_MASK(1)	/* Bit 24	Debugging */
+#define PRES_CMD_SIGNAL_S	24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M		BITFIELD_MASK(1)	/* Bit 0	LED On/Off */
+#define HOST_LED_S		0
+#define HOST_DATA_WIDTH_M	BITFIELD_MASK(1)	/* Bit 1	4 bit enable */
+#define HOST_DATA_WIDTH_S	1
+#define HOST_HI_SPEED_EN_M	BITFIELD_MASK(1)	/* Bit 2	High speed vs low speed */
+#define HOST_DMA_SEL_S		3
+#define HOST_DMA_SEL_M		BITFIELD_MASK(2)	/* Bit 4:3	DMA Select */
+#define HOST_HI_SPEED_EN_S	2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S	15					/* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S	14					/* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S	7					/* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S	6					/* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M	BITFIELD_MASK(2)	/* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S	4					/* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S	3					/* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M	BITFIELD_MASK(3)	/* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S	0					/* bit# */
+
+#define HOST_CONTR_VER_2		(1)
+#define HOST_CONTR_VER_3		(2)
+
+/* misc defines */
+#define SD1_MODE 		0x1	/* SD Host Cntrlr Spec */
+#define SD4_MODE 		0x2	/* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M		BITFIELD_MASK(1)	/* Bit 0	Power the bus */
+#define PWR_BUS_EN_S		0
+#define PWR_VOLTS_M		BITFIELD_MASK(3)	/* Bit [1-3]	Voltage Select */
+#define PWR_VOLTS_S		1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M		BITFIELD_MASK(1)	/* Bit 0	Reset All */
+#define SW_RESET_ALL_S		0
+#define SW_RESET_CMD_M		BITFIELD_MASK(1)	/* Bit 1	CMD Line Reset */
+#define SW_RESET_CMD_S		1
+#define SW_RESET_DAT_M		BITFIELD_MASK(1)	/* Bit 2	DAT Line Reset */
+#define SW_RESET_DAT_S		2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M		BITFIELD_MASK(1)	/* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S		0
+#define INTSTAT_XFER_COMPLETE_M		BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S		1
+#define INTSTAT_BLOCK_GAP_EVENT_M	BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S	2
+#define INTSTAT_DMA_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S		3
+#define INTSTAT_BUF_WRITE_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S	4
+#define INTSTAT_BUF_READ_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S	5
+#define INTSTAT_CARD_INSERTION_M	BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S	6
+#define INTSTAT_CARD_REMOVAL_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S		7
+#define INTSTAT_CARD_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S		8
+#define INTSTAT_RETUNING_INT_M		BITFIELD_MASK(1)	/* Bit 12 */
+#define INTSTAT_RETUNING_INT_S		12
+#define INTSTAT_ERROR_INT_M		BITFIELD_MASK(1)	/* Bit 15 */
+#define INTSTAT_ERROR_INT_S		15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S		0
+#define ERRINT_CMD_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S		1
+#define ERRINT_CMD_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S		2
+#define ERRINT_CMD_INDEX_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S		3
+#define ERRINT_DATA_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S		4
+#define ERRINT_DATA_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S		5
+#define ERRINT_DATA_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S		6
+#define ERRINT_CURRENT_LIMIT_M		BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S		7
+#define ERRINT_AUTO_CMD12_M		BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S		8
+#define ERRINT_VENDOR_M			BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S			12
+#define ERRINT_ADMA_M			BITFIELD_MASK(1)
+#define ERRINT_ADMA_S			9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT		0x0001
+#define ERRINT_CMD_CRC_BIT		0x0002
+#define ERRINT_CMD_ENDBIT_BIT		0x0004
+#define ERRINT_CMD_INDEX_BIT		0x0008
+#define ERRINT_DATA_TIMEOUT_BIT		0x0010
+#define ERRINT_DATA_CRC_BIT		0x0020
+#define ERRINT_DATA_ENDBIT_BIT		0x0040
+#define ERRINT_CURRENT_LIMIT_BIT	0x0080
+#define ERRINT_AUTO_CMD12_BIT		0x0100
+#define ERRINT_ADMA_BIT		0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS		(ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+				 ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS	(ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+				 ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS	(ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl	: Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl 	: Offset 0x02E , size = bytes */
+/* SD_IntrStatus	: Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus 	: Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable	: Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable	: Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus	: Offset 0x03C , size = bytes */
+/* SD_Capabilities	: Offset 0x040 , size = bytes */
+/* SD_MaxCurCap		: Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE			0
+#define SDIOH_ADMA1_MODE		1
+#define SDIOH_ADMA2_MODE		2
+#define SDIOH_ADMA2_64_MODE		3
+
+#define ADMA2_ATTRIBUTE_VALID		(1 << 0)	/* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END			(1 << 1)	/* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT			(1 << 2)	/* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP		(0 << 4)	/* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV		(1 << 4)	/* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET		(1 << 4)	/* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN	(2 << 4)	/* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK	(3 << 4)	/* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+	uint32 len_attr;
+	uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+	uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 0000000..5335ea1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+	int func;
+	int offset;
+	int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL		0x0001	/* Error */
+#define SDH_TRACE_VAL		0x0002	/* Trace */
+#define SDH_INFO_VAL		0x0004	/* Info */
+#define SDH_DEBUG_VAL		0x0008	/* Debug */
+#define SDH_DATA_VAL		0x0010	/* Data */
+#define SDH_CTRL_VAL		0x0020	/* Control Regs */
+#define SDH_LOG_VAL		0x0040	/* Enable bcmlog */
+#define SDH_DMA_VAL		0x0080	/* DMA */
+
+#define NUM_PREV_TRANSACTIONS	16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 0000000..bf51f8f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,588 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h 474902 2014-05-02 18:31:33Z $
+ */
+
+#ifndef	_siutils_h_
+#define	_siutils_h_
+
+#ifdef SR_DEBUG
+#include "wlioctl.h"
+#endif /* SR_DEBUG */
+
+
+/*
+ * Data structure to export all chip specific common variables
+ *   public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
+ */
+struct si_pub {
+	uint	socitype;		/* SOCI_SB, SOCI_AI */
+
+	uint	bustype;		/* SI_BUS, PCI_BUS */
+	uint	buscoretype;		/* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+	uint	buscorerev;		/* buscore rev */
+	uint	buscoreidx;		/* buscore index */
+	int	ccrev;			/* chip common core rev */
+	uint32	cccaps;			/* chip common capabilities */
+	uint32  cccaps_ext;			/* chip common capabilities extension */
+	int	pmurev;			/* pmu core rev */
+	uint32	pmucaps;		/* pmu capabilities */
+	uint	boardtype;		/* board type */
+	uint    boardrev;               /* board rev */
+	uint	boardvendor;		/* board vendor */
+	uint	boardflags;		/* board flags */
+	uint	boardflags2;		/* board flags2 */
+	uint	chip;			/* chip number */
+	uint	chiprev;		/* chip revision */
+	uint	chippkg;		/* chip package option */
+	uint32	chipst;			/* chip status */
+	bool	issim;			/* chip is in simulation or emulation */
+	uint    socirev;		/* SOC interconnect rev */
+	bool	pci_pr32414;
+
+};
+
+/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
+ * for monolithic driver, it is readonly to prevent accident change
+ */
+typedef const struct si_pub si_t;
+
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach().  Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core.
+ */
+#define	SI_OSH		NULL	/* Use for si_kattach when no osh is available */
+
+#define	BADIDX		(SI_MAXCORES + 1)
+
+/* clkctl xtal what flags */
+#define	XTAL			0x1	/* primary crystal oscillator (2050) */
+#define	PLL			0x2	/* main chip pll */
+
+/* clkctl clk mode */
+#define	CLK_FAST		0	/* force fast (pll) clock */
+#define	CLK_DYNAMIC		2	/* enable dynamic clock control */
+
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY	0	/* Driver */
+#define GPIO_APP_PRIORITY	1	/* Application */
+#define GPIO_HI_PRIORITY	2	/* Highest priority. Ignore GPIO reservation */
+
+/* GPIO pull up/down */
+#define GPIO_PULLUP		0
+#define GPIO_PULLDN		1
+
+/* GPIO event regtype */
+#define GPIO_REGEVT		0	/* GPIO register event */
+#define GPIO_REGEVT_INTMSK	1	/* GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL	2	/* GPIO register event int polarity */
+
+/* device path */
+#define SI_DEVPATH_BUFSZ	16	/* min buffer size in bytes */
+
+/* SI routine enumeration: to be used by update function with multiple hooks */
+#define	SI_DOATTACH	1
+#define SI_PCIDOWN	2	/* wireless interface is down */
+#define SI_PCIUP	3	/* wireless interface is up */
+
+#ifdef SR_DEBUG
+#define PMU_RES		31
+#endif /* SR_DEBUG */
+
+#define	ISSIM_ENAB(sih)	FALSE
+
+/* PMU clock/power control */
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih)	(BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih)	((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+#define AOB_ENAB(sih)	((sih)->ccrev >= 35 ? \
+			((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0)
+
+/* chipcommon clock/power control (exclusive with PMU's) */
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih)		(0)
+#define CCPLL_ENAB(sih)		(0)
+#else
+#define CCCTL_ENAB(sih)		((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih)		((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+/* External BT Coex enable mask */
+#define CC_BTCOEX_EN_MASK  0x01
+/* External PA enable mask */
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+/* WL/BT control enable mask */
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+
+/* CR4 specific defines used by the host driver */
+#define SI_CR4_CAP			(0x04)
+#define SI_CR4_BANKIDX		(0x40)
+#define SI_CR4_BANKINFO		(0x44)
+#define SI_CR4_BANKPDA		(0x4C)
+
+#define	ARMCR4_TCBBNB_MASK	0xf0
+#define	ARMCR4_TCBBNB_SHIFT	4
+#define	ARMCR4_TCBANB_MASK	0xf
+#define	ARMCR4_TCBANB_SHIFT	0
+
+#define	SICF_CPUHALT		(0x0020)
+#define	ARMCR4_BSZ_MASK		0x3f
+#define	ARMCR4_BSZ_MULT		8192
+
+#include <osl_decl.h>
+/* === exported functions === */
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+extern void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit);
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_flag_alt(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
+extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val);
+extern void *si_wrapperregs(si_t *sih);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_numcoreunits(si_t *sih, uint coreid);
+extern uint si_numd11coreunits(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint si_chip_hostif(si_t *sih);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */
+extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+	void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
+extern bool si_socdevram_pkg(si_t *sih);
+extern bool si_socdevram_remap_isenb(si_t *sih);
+extern uint32 si_socdevram_remap_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode);
+extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
+extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
+extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
+
+/* GPIO event handlers */
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+/* GCI interrupt handlers */
+extern void si_gci_handler_process(si_t *sih);
+
+/* GCI GPIO event handlers */
+extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
+	gci_gpio_handler_t cb, void *arg);
+extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
+extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value);
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool si_pci_pmecap(si_t *sih);
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern void si_pci_pmestatclr(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+
+
+#ifdef BCMSDIO
+extern void si_sdio_init(si_t *sih);
+#endif
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+static INLINE void * si_eci_init(si_t *sih) {return NULL;}
+#define si_eci_notify_bt(sih, type, val)  (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a)	do {} while (0)
+static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;}
+static INLINE void * si_gci_init(si_t *sih) {return NULL;}
+#define si_seci_down(sih) do {} while (0)
+#define si_gci(sih) 0
+
+/* OTP status */
+extern bool si_is_otp_disabled(si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask);
+
+/* SPROM availability */
+extern bool si_is_sprom_available(si_t *sih);
+extern bool si_is_sprom_enabled(si_t *sih);
+extern void si_sprom_enable(si_t *sih, bool enable);
+
+/* OTP/SROM CIS stuff */
+extern int si_cis_source(si_t *sih);
+#define CIS_DEFAULT	0
+#define CIS_SROM	1
+#define CIS_OTP		2
+
+/* Fab-id information */
+#define	DEFAULT_FAB	0x0	/* Original/first fab used for this chip */
+#define	CSM_FAB7	0x1	/* CSM Fab7 chip */
+#define	TSMC_FAB12	0x2	/* TSMC Fab12/Fab14 chip */
+#define	SMIC_FAB4	0x3	/* SMIC Fab4 chip */
+
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
+extern uint16 si_fabid(si_t *sih);
+extern uint16 si_chipid(si_t *sih);
+
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
+extern int si_devpath(si_t *sih, char *path, int size);
+extern int si_devpath_pcie(si_t *sih, char *path, int size);
+/* Read variable with prepending the devpath to the name */
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val);
+extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val);
+extern void si_pcie_set_error_injection(si_t *sih, uint32 mode);
+extern void si_pcie_set_L1substate(si_t *sih, uint32 substate);
+extern uint32 si_pcie_get_L1substate(si_t *sih);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(si_t *sih, bool enable);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern void si_chippkg_set(si_t *sih, uint);
+
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+/* Enable BT-COEX & Ex-PA for 4313 */
+extern void si_epa_4313war(si_t *sih);
+extern void si_btc_enable_chipcontrol(si_t *sih);
+/* BT/WL selection for 4313 bt combo >= P250 boards */
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
+/* === debug routines === */
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+#if defined(BCMDBG_PHYDUMP)
+extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+#ifdef SR_DEBUG
+extern void si_dump_pmu(si_t *sih, void *pmu_var);
+extern void si_pmu_keep_on(si_t *sih, int32 int_val);
+extern uint32 si_pmu_keep_on_get(si_t *sih);
+extern uint32 si_power_island_set(si_t *sih, uint32 int_val);
+extern uint32 si_power_island_get(si_t *sih);
+#endif /* SR_DEBUG */
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(si_t *sih);
+extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_maxpayload_size(si_t *sih);
+extern uint16 si_pcie_get_ssid(si_t *sih);
+extern uint32 si_pcie_get_bar0(si_t *sih);
+extern int si_pcie_configspace_cache(si_t *sih);
+extern int si_pcie_configspace_restore(si_t *sih);
+extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+
+extern uint32 si_tcm_size(si_t *sih);
+extern bool si_has_flops(si_t *sih);
+
+extern int si_set_sromctl(si_t *sih, uint32 value);
+extern uint32 si_get_sromctl(si_t *sih);
+
+extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_input(si_t *sih, uint reg);
+extern uint32 si_gci_int_enable(si_t *sih, bool enable);
+extern void si_gci_reset(si_t *sih);
+#ifdef BCMLTECOEX
+extern void si_gci_seci_init(si_t *sih);
+extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
+	uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
+	uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+#endif /* BCMLTECOEX */
+extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel);
+extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin);
+extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel);
+extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos);
+extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
+extern uint16 si_cc_get_reg16(uint32 reg_offs);
+extern uint32 si_cc_get_reg32(uint32 reg_offs);
+extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val);
+extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask);
+extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
+extern void si_swdenable(si_t *sih, uint32 swdflag);
+
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+
+void si_update_masks(si_t *sih);
+void si_force_islanding(si_t *sih, bool enable);
+extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
+extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val);
+extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val);
+extern void si_pcie_ltr_war(si_t *sih);
+extern void si_pcie_hw_LTR_war(si_t *sih);
+extern void si_pcie_hw_L1SS_war(si_t *sih);
+extern void si_pciedev_crwlpciegen2(si_t *sih);
+extern void si_pcie_prep_D3(si_t *sih, bool enter_D3);
+extern void si_pciedev_reg_pm_clk_period(si_t *sih);
+
+#ifdef WLRSDB
+extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits);
+extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+#endif
+
+
+/* Macro to enable clock gating changes in different cores */
+#define MEM_CLK_GATE_BIT 	5
+#define GCI_CLK_GATE_BIT 	18
+
+#define USBAPP_CLK_BIT		0
+#define PCIE_CLK_BIT		3
+#define ARMCR4_DBG_CLK_BIT	4
+#define SAMPLE_SYNC_CLK_BIT 	17
+#define PCIE_TL_CLK_BIT		18
+#define HQ_REQ_BIT		24
+#define PLL_DIV2_BIT_START	9
+#define PLL_DIV2_MASK		(0x37 << PLL_DIV2_BIT_START)
+#define PLL_DIV2_DIS_OP		(0x37 << PLL_DIV2_BIT_START)
+
+#define PMUREG(si, member) \
+	(AOB_ENAB(si) ? \
+		si_corereg_addr(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+			OFFSETOF(pmuregs_t, member)): \
+		si_corereg_addr(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member)))
+
+#define pmu_corereg(si, cc_idx, member, mask, val) \
+	(AOB_ENAB(si) ? \
+		si_pmu_corereg(si, si_findcoreidx(sih, PMU_CORE_ID, 0), \
+			       OFFSETOF(pmuregs_t, member), mask, val): \
+		si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val))
+
+/* GCI Macros */
+#define ALLONES_32				0xFFFFFFFF
+#define GCI_CCTL_SECIRST_OFFSET			0 /* SeciReset */
+#define GCI_CCTL_RSTSL_OFFSET			1 /* ResetSeciLogic */
+#define GCI_CCTL_SECIEN_OFFSET			2 /* EnableSeci  */
+#define GCI_CCTL_FSL_OFFSET			3 /* ForceSeciOutLow */
+#define GCI_CCTL_SMODE_OFFSET			4 /* SeciOpMode, 6:4 */
+#define GCI_CCTL_US_OFFSET			7 /* UpdateSeci */
+#define GCI_CCTL_BRKONSLP_OFFSET		8 /* BreakOnSleep */
+#define GCI_CCTL_SILOWTOUT_OFFSET		9 /* SeciInLowTimeout, 10:9 */
+#define GCI_CCTL_RSTOCC_OFFSET			11 /* ResetOffChipCoex */
+#define GCI_CCTL_ARESEND_OFFSET			12 /* AutoBTSigResend */
+#define GCI_CCTL_FGCR_OFFSET			16 /* ForceGciClkReq */
+#define GCI_CCTL_FHCRO_OFFSET			17 /* ForceHWClockReqOff */
+#define GCI_CCTL_FREGCLK_OFFSET			18 /* ForceRegClk */
+#define GCI_CCTL_FSECICLK_OFFSET		19 /* ForceSeciClk */
+#define GCI_CCTL_FGCA_OFFSET			20 /* ForceGciClkAvail */
+#define GCI_CCTL_FGCAV_OFFSET			21 /* ForceGciClkAvailValue */
+#define GCI_CCTL_SCS_OFFSET			24 /* SeciClkStretch, 31:24 */
+
+#define GCI_MODE_UART				0x0
+#define GCI_MODE_SECI				0x1
+#define GCI_MODE_BTSIG				0x2
+#define GCI_MODE_GPIO				0x3
+#define GCI_MODE_MASK				0x7
+
+#define GCI_CCTL_LOWTOUT_DIS			0x0
+#define GCI_CCTL_LOWTOUT_10BIT			0x1
+#define GCI_CCTL_LOWTOUT_20BIT			0x2
+#define GCI_CCTL_LOWTOUT_30BIT			0x3
+#define GCI_CCTL_LOWTOUT_MASK			0x3
+
+#define GCI_CCTL_SCS_DEF			0x19
+#define GCI_CCTL_SCS_MASK			0xFF
+
+#define GCI_SECIIN_MODE_OFFSET			0
+#define GCI_SECIIN_GCIGPIO_OFFSET		4
+#define GCI_SECIIN_RXID2IP_OFFSET		8
+
+#define GCI_SECIOUT_MODE_OFFSET			0
+#define GCI_SECIOUT_GCIGPIO_OFFSET		4
+#define GCI_SECIOUT_SECIINRELATED_OFFSET	16
+
+#define GCI_SECIAUX_RXENABLE_OFFSET		0
+#define GCI_SECIFIFO_RXENABLE_OFFSET		16
+
+#define GCI_SECITX_ENABLE_OFFSET		0
+
+#define GCI_GPIOCTL_INEN_OFFSET			0
+#define GCI_GPIOCTL_OUTEN_OFFSET		1
+#define GCI_GPIOCTL_PDN_OFFSET			4
+
+#define GCI_GPIOIDX_OFFSET			16
+
+#define GCI_LTECX_SECI_ID			0 /* SECI port for LTECX */
+
+/* To access per GCI bit registers */
+#define GCI_REG_WIDTH				32
+
+/* GCI bit positions */
+/* GCI [127:000] = WLAN [127:0] */
+#define GCI_WLAN_IP_ID				0
+#define GCI_WLAN_BEGIN				0
+#define GCI_WLAN_PRIO_POS			(GCI_WLAN_BEGIN + 4)
+
+/* GCI [639:512] = LTE [127:0] */
+#define GCI_LTE_IP_ID				4
+#define GCI_LTE_BEGIN				512
+#define GCI_LTE_FRAMESYNC_POS			(GCI_LTE_BEGIN + 0)
+#define GCI_LTE_RX_POS				(GCI_LTE_BEGIN + 1)
+#define GCI_LTE_TX_POS				(GCI_LTE_BEGIN + 2)
+#define GCI_LTE_AUXRXDVALID_POS			(GCI_LTE_BEGIN + 56)
+
+/* Reg Index corresponding to ECI bit no x of ECI space */
+#define GCI_REGIDX(x)				((x)/GCI_REG_WIDTH)
+/* Bit offset of ECI bit no x in 32-bit words */
+#define GCI_BITOFFSET(x)			((x)%GCI_REG_WIDTH)
+
+/* End - GCI Macros */
+
+#ifdef REROUTE_OOBINT
+#define CC_OOB          0x0
+#define M2MDMA_OOB      0x1
+#define PMU_OOB         0x2
+#define D11_OOB         0x3
+#define SDIOD_OOB       0x4
+#define PMU_OOB_BIT     (0x10 | PMU_OOB)
+#endif /* REROUTE_OOBINT */
+
+
+#endif	/* _siutils_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/spid.h b/drivers/net/wireless/bcmdhd/include/spid.h
new file mode 100644
index 0000000..4dc5191
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/spid.h
@@ -0,0 +1,165 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: spid.h 358377 2012-09-23 11:30:22Z $
+ */
+
+#ifndef	_SPI_H
+#define	_SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+	uint8	config;			/* 0x00, len, endian, clock, speed, polarity, wakeup */
+	uint8	response_delay;		/* 0x01, read response delay in bytes (corerev < 3) */
+	uint8	status_enable;		/* 0x02, status-enable, intr with status, response_delay
+					 * function selection, command/data error check
+					 */
+	uint8	reset_bp;		/* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+	uint16	intr_reg;		/* 0x04, Intr status register */
+	uint16	intr_en_reg;		/* 0x06, Intr mask register */
+	uint32	status_reg;		/* 0x08, RO, Status bits of last spi transfer */
+	uint16	f1_info_reg;		/* 0x0c, RO, enabled, ready for data transfer, blocksize */
+	uint16	f2_info_reg;		/* 0x0e, RO, enabled, ready for data transfer, blocksize */
+	uint16	f3_info_reg;		/* 0x10, RO, enabled, ready for data transfer, blocksize */
+	uint32	test_read;		/* 0x14, RO 0xfeedbead signature */
+	uint32	test_rw;		/* 0x18, RW */
+	uint8	resp_delay_f0;		/* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+	uint8	resp_delay_f1;		/* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+	uint8	resp_delay_f2;		/* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+	uint8	resp_delay_f3;		/* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG			0x00
+#define SPID_RESPONSE_DELAY		0x01
+#define SPID_STATUS_ENABLE		0x02
+#define SPID_RESET_BP			0x03	/* (corerev >= 1) */
+#define SPID_INTR_REG			0x04	/* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG		0x06	/* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG			0x08	/* 32 bits */
+#define SPID_F1_INFO_REG		0x0C	/* 16 bits */
+#define SPID_F2_INFO_REG		0x0E	/* 16 bits */
+#define SPID_F3_INFO_REG		0x10	/* 16 bits */
+#define SPID_TEST_READ			0x14	/* 32 bits */
+#define SPID_TEST_RW			0x18	/* 32 bits */
+#define SPID_RESP_DELAY_F0		0x1c	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1		0x1d	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2		0x1e	/* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3		0x1f	/* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32	0x1	/* 0/1 16/32 bit word length */
+#define ENDIAN_BIG	0x2	/* 0/1 Little/Big Endian */
+#define CLOCK_PHASE	0x4	/* 0/1 clock phase delay */
+#define CLOCK_POLARITY	0x8	/* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE	0x10	/* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY	0x20	/* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP		0x80	/* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK	0xFF	/* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE		0x1	/* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS	0x2	/* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL		0x4	/* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN	0x8	/* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN		0x20	/* Command error check enable */
+#define DATA_ERR_CHK_EN		0x40	/* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET	0x4	/* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET	0x8	/* enable reset for BT backplane */
+#define RESET_SPI		0x80	/* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE	0x0001	/* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW	0x0002
+#define F2_F3_FIFO_WR_OVERFLOW	0x0004
+#define COMMAND_ERROR		0x0008	/* Cleared by writing 1 */
+#define DATA_ERROR		0x0010	/* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE	0x0020
+#define F3_PACKET_AVAILABLE	0x0040
+#define F1_OVERFLOW		0x0080	/* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0		0x0100
+#define MISC_INTR1		0x0200
+#define MISC_INTR2		0x0400
+#define MISC_INTR3		0x0800
+#define MISC_INTR4		0x1000
+#define F1_INTR			0x2000
+#define F2_INTR			0x4000
+#define F3_INTR			0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE	0x00000001
+#define STATUS_UNDERFLOW		0x00000002
+#define STATUS_OVERFLOW			0x00000004
+#define STATUS_F2_INTR			0x00000008
+#define STATUS_F3_INTR			0x00000010
+#define STATUS_F2_RX_READY		0x00000020
+#define STATUS_F3_RX_READY		0x00000040
+#define STATUS_HOST_CMD_DATA_ERR	0x00000080
+#define STATUS_F2_PKT_AVAILABLE		0x00000100
+#define STATUS_F2_PKT_LEN_MASK		0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT		9
+#define STATUS_F3_PKT_AVAILABLE		0x00100000
+#define STATUS_F3_PKT_LEN_MASK		0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT		21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 			0x0001
+#define F1_RDY_FOR_DATA_TRANSFER	0x0002
+#define F1_MAX_PKT_SIZE			0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 			0x0001
+#define F2_RDY_FOR_DATA_TRANSFER	0x0002
+#define F2_MAX_PKT_SIZE			0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 			0x0001
+#define F3_RDY_FOR_DATA_TRANSFER	0x0002
+#define F3_MAX_PKT_SIZE			0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE		0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS		4
+
+#define SPI_MAX_PKT_LEN		(2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0		0
+#define SPI_FUNC_1		1
+#define SPI_FUNC_2		2
+#define SPI_FUNC_3		3
+
+#define WAIT_F2RXFIFORDY	100
+#define WAIT_F2RXFIFORDY_DELAY	20
+
+#endif /* _SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 0000000..6e55b15
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,92 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h 349211 2012-08-07 09:45:24Z $
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC	0x30524448	/* "HDR0" */
+#define TRX_MAX_LEN	0x3B0000	/* Max length */
+#define TRX_NO_HEADER	1		/* Do not write TRX header */
+#define TRX_GZ_FILES	0x2     /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE	0x8	/* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE	0x10	/* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE	0x20	/* Trx contains uncompressed rtecdc.bin image */
+#define TRX_BOOTLOADER		0x40	/* the image is a bootloader */
+
+#define TRX_V1		1
+#define TRX_V1_MAX_OFFSETS	3		/* V1: Max number of individual files */
+
+#ifndef BCMTRXV2
+#define TRX_VERSION	TRX_V1		/* Version 1 */
+#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS
+#endif
+
+/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
+ * Ver 2 of trx header. To make it generic, trx_header is structure is modified
+ * as below where size of "offsets" field will vary as per the TRX version.
+ * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well.
+ * To make sure, other applications like "dhdl" which are yet to be enhanced to support
+ * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2
+ * is defined.
+ */
+struct trx_header {
+	uint32 magic;		/* "HDR0" */
+	uint32 len;		/* Length of file including header */
+	uint32 crc32;		/* 32-bit CRC from flag_version to end of file */
+	uint32 flag_version;	/* 0:15 flags, 16:31 version */
+#ifndef BCMTRXV2
+	uint32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of header */
+#else
+	uint32 offsets[1];	/* Offsets of partitions from start of header */
+#endif
+};
+
+#ifdef BCMTRXV2
+#define TRX_VERSION		TRX_V2		/* Version 2 */
+#define TRX_MAX_OFFSET  TRX_V2_MAX_OFFSETS
+
+#define TRX_V2		2
+/* V2: Max number of individual files
+ * To support SDR signature + Config data region
+ */
+#define TRX_V2_MAX_OFFSETS	5
+#define SIZEOF_TRXHDR_V1	(sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32))
+#define SIZEOF_TRXHDR_V2	(sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32))
+#define TRX_VER(trx)		(trx->flag_version>>16)
+#define ISTRX_V1(trx)		(TRX_VER(trx) == TRX_V1)
+#define ISTRX_V2(trx)		(TRX_VER(trx) == TRX_V2)
+/* For V2, return size of V2 size: others, return V1 size */
+#define SIZEOF_TRX(trx)	    (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1)
+#else
+#define SIZEOF_TRX(trx)	    (sizeof(struct trx_header))
+#endif /* BCMTRXV2 */
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 0000000..ce593f3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h 473326 2014-04-29 00:37:35Z $
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+/*
+ * Define SITE_TYPEDEFS in the compile to include a site-specific
+ * typedef file "site_typedefs.h".
+ *
+ * If SITE_TYPEDEFS is not defined, then the code section below makes
+ * inferences about the compile environment based on defined symbols and
+ * possibly compiler pragmas.
+ *
+ * Following these two sections is the Default Typedefs section.
+ * This section is only processed if USE_TYPEDEF_DEFAULTS is
+ * defined. This section has a default set of typedefs and a few
+ * preprocessor symbols (TRUE, FALSE, NULL, ...).
+ */
+
+#include "site_typedefs.h"
+
+#else
+
+/*
+ * Infer the compile environment based on preprocessor symbols and pragmas.
+ * Override type definitions as needed, and include configuration-dependent
+ * header files to define types.
+ */
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE	false
+#endif
+#ifndef TRUE
+#define TRUE	true
+#endif
+
+#else	/* ! __cplusplus */
+
+
+#endif	/* ! __cplusplus */
+
+#if defined(__LP64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+/*
+ * If this is either a Linux hybrid build or the per-port code of a hybrid build
+ * then use the Linux header files to get some of the typedefs.  Otherwise, define
+ * them entirely in this file.  We can't always define the types because we get
+ * a duplicate typedef error; there is no way to "undefine" a typedef.
+ * We know when it's per-port code because each file defines LINUX_PORT at the top.
+ */
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif /* TARGETENV_android */
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif	/* >= 2.6.19 */
+/* special detection for 2.6.18-128.7.1.0.1.el5 */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif	/* == 2.6.18 */
+#endif	/* __KERNEL__ */
+#endif  /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+
+
+/* Do not support the (u)int64 types with strict ansi for GNU C */
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */
+
+/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode
+ * for signed or unsigned
+ */
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif /* __ICL */
+
+#if !defined(__DJGPP__)
+
+/* pick up ushort & uint from standard types.h */
+#if defined(__KERNEL__)
+
+/* See note above */
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>	/* sys/types.h and linux/types.h are oil and water */
+#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+
+#else
+
+#include <sys/types.h>
+
+#endif /* linux && __KERNEL__ */
+
+#endif 
+
+
+/* use the default typedefs in the next section of this file */
+#define USE_TYPEDEF_DEFAULTS
+
+#endif /* SITE_TYPEDEFS */
+
+
+/*
+ * Default Typedefs
+ */
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef	/* @abstract@ */ unsigned char	bool;
+#endif
+
+/* define uchar, ushort, uint, ulong */
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char	uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short	ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int	uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long	ulong;
+#endif
+
+/* define [u]int8/16/32/64, uintptr */
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char	uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short	uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int	uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int	uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char	int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short	int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int	int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+/* define float32/64, float_t */
+
+#ifndef TYPEDEF_FLOAT32
+typedef float		float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double		float64;
+#endif
+
+/*
+ * abstracted floating point type allows for compile time selection of
+ * single or double precision arithmetic.  Compiling with -DFLOAT32
+ * selects single precision; the default is double precision.
+ */
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else /* default to double precision floating point */
+typedef float64 float_t;
+#endif
+
+#endif /* TYPEDEF_FLOAT_T */
+
+/* define macro values */
+
+#ifndef FALSE
+#define FALSE	0
+#endif
+
+#ifndef TRUE
+#define TRUE	1  /* TRUE */
+#endif
+
+#ifndef NULL
+#define	NULL	0
+#endif
+
+#ifndef OFF
+#define	OFF	0
+#endif
+
+#ifndef ON
+#define	ON	1  /* ON = 1 */
+#endif
+
+#define	AUTO	(-1) /* Auto = -1 */
+
+/* define PTRSZ, INLINE */
+
+#ifndef PTRSZ
+#define	PTRSZ	sizeof(char*)
+#endif
+
+
+/* Detect compiler type. */
+#if defined(__GNUC__) || defined(__lint)
+	#define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+	#define BWL_COMPILER_ARMCC
+#else
+	#error "Unknown compiler!"
+#endif 
+
+
+#ifndef INLINE
+	#if defined(BWL_COMPILER_MICROSOFT)
+		#define INLINE __inline
+	#elif defined(BWL_COMPILER_GNU)
+		#define INLINE __inline__
+	#elif defined(BWL_COMPILER_ARMCC)
+		#define INLINE	__inline
+	#else
+		#define INLINE
+	#endif 
+#endif /* INLINE */
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif /* USE_TYPEDEF_DEFAULTS */
+
+/* Suppress unused parameter warning */
+#define UNUSED_PARAMETER(x) (void)(x)
+
+/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+/*
+ * Including the bcmdefs.h here, to make sure everyone including typedefs.h
+ * gets this automatically
+*/
+#include <bcmdefs.h>
+#endif /* _TYPEDEFS_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 0000000..937b86d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,302 @@
+/*
+* Copyright (C) 1999-2014, Broadcom Corporation
+*
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+*
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: wlfc_proto.h 455301 2014-02-13 12:42:13Z $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+	/* Use TLV to convey WLFC information.
+	 ---------------------------------------------------------------------------
+	| Type |  Len | value                    | Description
+	 ---------------------------------------------------------------------------
+	|  1   |   1  | (handle)                 | MAC OPEN
+	 ---------------------------------------------------------------------------
+	|  2   |   1  | (handle)                 | MAC CLOSE
+	 ---------------------------------------------------------------------------
+	|  3   |   2  | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+	 ---------------------------------------------------------------------------
+	|  4   |   4+ | see pkttag comments      | TXSTATUS
+	|      |      | TX status & timestamps   | Present only when pkt timestamp is enabled
+	 ---------------------------------------------------------------------------
+	|  5   |   4  | see pkttag comments      | PKKTTAG [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  6   |   8  | (handle, ifid, MAC)      | MAC ADD
+	 ---------------------------------------------------------------------------
+	|  7   |   8  | (handle, ifid, MAC)      | MAC DEL
+	 ---------------------------------------------------------------------------
+	|  8   |   1  | (rssi)                   | RSSI - RSSI value for the packet.
+	 ---------------------------------------------------------------------------
+	|  9   |   1  | (interface ID)           | Interface OPEN
+	 ---------------------------------------------------------------------------
+	|  10  |   1  | (interface ID)           | Interface CLOSE
+	 ---------------------------------------------------------------------------
+	|  11  |   8  | fifo credit returns map  | FIFO credits back to the host
+	|      |      |                          |
+	|      |      |                          | --------------------------------------
+	|      |      |                          | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+	|      |      |                          | --------------------------------------
+	|      |      |                          |
+	 ---------------------------------------------------------------------------
+	|  12  |   2  | MAC handle,              | Host provides a bitmap of pending
+	|      |      | AC[0-3] traffic bitmap   | unicast traffic for MAC-handle dstn.
+	|      |      |                          | [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  13  |   3  | (count, handle, prec_bmp)| One time request for packet to a specific
+	|      |      |                          | MAC destination.
+	 ---------------------------------------------------------------------------
+	|  15  |  12  | (pkttag, timestamps)     | Send TX timestamp at reception from host
+	 ---------------------------------------------------------------------------
+	|  16  |  12  | (pkttag, timestamps)     | Send WLAN RX timestamp along with RX frame
+	 ---------------------------------------------------------------------------
+	| 255  |  N/A |  N/A                     | FILLER - This is a special type
+	|      |      |                          | that has no length or value.
+	|      |      |                          | Typically used for padding.
+	 ---------------------------------------------------------------------------
+	*/
+
+#define WLFC_CTL_TYPE_MAC_OPEN			1
+#define WLFC_CTL_TYPE_MAC_CLOSE			2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT	3
+#define WLFC_CTL_TYPE_TXSTATUS			4
+#define WLFC_CTL_TYPE_PKTTAG			5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD		6
+#define WLFC_CTL_TYPE_MACDESC_DEL		7
+#define WLFC_CTL_TYPE_RSSI			8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN		9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE		10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK		11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP	12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET	13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS	14
+
+
+#define WLFC_CTL_TYPE_TX_ENTRY_STAMP		15
+#define WLFC_CTL_TYPE_RX_STAMP			16
+
+#define WLFC_CTL_TYPE_TRANS_ID			18
+#define WLFC_CTL_TYPE_COMP_TXSTATUS		19
+
+#define WLFC_CTL_TYPE_TID_OPEN			20
+#define WLFC_CTL_TYPE_TID_CLOSE			21
+
+
+#define WLFC_CTL_TYPE_FILLER			255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC		8	/* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC			1	/* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI			1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE		1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP	2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS		4
+#define WLFC_CTL_VALUE_LEN_PKTTAG		4
+
+#define WLFC_CTL_VALUE_LEN_SEQ			2
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK	6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT	3	/* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET	3	/* credit, MAC-handle, prec_bitmap */
+
+
+#define WLFC_PKTFLAG_PKTFROMHOST	0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED	0x02
+#define WLFC_PKTFLAG_PKT_FORCELOWRATE	0x04 /* force low rate for this packet */
+
+#define WL_TXSTATUS_STATUS_MASK			0xff /* allow 8 bits */
+#define WL_TXSTATUS_STATUS_SHIFT		24
+
+#define WL_TXSTATUS_SET_STATUS(x, status)	((x)  = \
+	((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \
+	(((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT))
+#define WL_TXSTATUS_GET_STATUS(x)	(((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
+	WL_TXSTATUS_STATUS_MASK)
+
+#define WL_TXSTATUS_GENERATION_MASK		1 /* allow 1 bit */
+#define WL_TXSTATUS_GENERATION_SHIFT		31
+
+#define WL_TXSTATUS_SET_GENERATION(x, gen)	((x) = \
+	((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+	(((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WL_TXSTATUS_GET_GENERATION(x)	(((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+	WL_TXSTATUS_GENERATION_MASK)
+
+#define WL_TXSTATUS_FLAGS_MASK			0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT			27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x)		(((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+	WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK			0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT			24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x)		(((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK			0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num)	((x) = \
+	((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x)		((x) & WL_TXSTATUS_PKTID_MASK)
+
+#define WL_TXSTATUS_HSLOT_MASK			0xffff /* allow 16 bits */
+#define WL_TXSTATUS_HSLOT_SHIFT			8
+
+#define WL_TXSTATUS_SET_HSLOT(x, hslot)	((x)  = \
+	((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \
+	(((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT))
+#define WL_TXSTATUS_GET_HSLOT(x)	(((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \
+	WL_TXSTATUS_HSLOT_MASK)
+
+#define WL_TXSTATUS_FREERUNCTR_MASK		0xff /* allow 8 bits */
+
+#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \
+	((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
+#define WL_TXSTATUS_GET_FREERUNCTR(x)		((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+
+#define WL_SEQ_FROMFW_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMFW_SHIFT		13
+#define WL_SEQ_SET_FROMFW(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \
+	(((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT))
+#define WL_SEQ_GET_FROMFW(x)	(((x) >> WL_SEQ_FROMFW_SHIFT) & \
+	WL_SEQ_FROMFW_MASK)
+
+#define WL_SEQ_FROMDRV_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMDRV_SHIFT		12
+#define WL_SEQ_SET_FROMDRV(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \
+	(((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT))
+#define WL_SEQ_GET_FROMDRV(x)	(((x) >> WL_SEQ_FROMDRV_SHIFT) & \
+	WL_SEQ_FROMDRV_MASK)
+
+#define WL_SEQ_NUM_MASK			0xfff /* allow 12 bit */
+#define WL_SEQ_NUM_SHIFT		0
+#define WL_SEQ_SET_NUM(x, val)	((x) = \
+	((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \
+	(((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT))
+#define WL_SEQ_GET_NUM(x)	(((x) >> WL_SEQ_NUM_SHIFT) & \
+	WL_SEQ_NUM_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE	32
+#define WLFC_MAX_IFNUM				16
+#define WLFC_MAC_DESC_ID_INVALID	0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+
+#define WLFC_MAX_PENDING_DATALEN	120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD	0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS	1
+/* WL firmware suppressed a packet because MAC is
+	already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS	2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC	3
+/* Firmware tossed after retries */
+#define WLFC_CTL_PKTFLAG_DISCARD_NOACK	4
+
+#define WLFC_D11_STATUS_INTERPRET(txs)	\
+	(((txs)->status.suppr_ind !=  TX_STATUS_SUPR_NONE) ? \
+	WLFC_CTL_PKTFLAG_D11SUPPRESS : \
+	((txs)->status.was_acked ? \
+		WLFC_CTL_PKTFLAG_DISCARD : WLFC_CTL_PKTFLAG_DISCARD_NOACK))
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+	{printf("WLFC: %s():%d:caller:%p\n", \
+	__FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+	banner, ea[0], 	ea[1], 	ea[2], 	ea[3], 	ea[4], 	ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS		256
+#define WLHOST_REORDERDATA_LEN		 10
+#define WLHOST_REORDERDATA_TOTLEN	(WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET		0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET		2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET			4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET		6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET		8
+
+#define WLHOST_REORDERDATA_DEL_FLOW		0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL		0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID		0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID		0x08
+#define WLHOST_REORDERDATA_NEW_HOLE		0x10
+
+/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */
+#define WLFC_CTL_TRANS_ID_LEN			6
+#define WLFC_TYPE_TRANS_ID_LEN			6
+
+#define WLFC_MODE_HANGER	1 /* use hanger */
+#define WLFC_MODE_AFQ		2 /* use afq */
+#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
+
+#define WLFC_MODE_AFQ_SHIFT		2	/* afq bit */
+#define WLFC_SET_AFQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_AFQ_SHIFT))
+#define WLFC_GET_AFQ(x)	(((x) >> WLFC_MODE_AFQ_SHIFT) & 1)
+
+#define WLFC_MODE_REUSESEQ_SHIFT	3	/* seq reuse bit */
+#define WLFC_SET_REUSESEQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
+#define WLFC_GET_REUSESEQ(x)	(((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
+
+#define WLFC_MODE_REORDERSUPP_SHIFT	4	/* host reorder suppress pkt bit */
+#define WLFC_SET_REORDERSUPP(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT))
+#define WLFC_GET_REORDERSUPP(x)	(((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1)
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 0000000..890abfe
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,5876 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h 490639 2014-07-11 08:31:53Z $
+ */
+
+#ifndef _wlioctl_h_
+#define	_wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmip.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmip.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <proto/802.1d.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_rates.h>
+#include <devctrl_if/wlioctl_defs.h>
+
+
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+
+
+
+
+
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ	16
+#endif
+
+/* Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+	cdc_ioctl_t	msg;
+	uint32		data_len;
+	char           intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE	sizeof(rem_ioctl_t)
+
+typedef struct {
+	uint32 num;
+	chanspec_t list[1];
+} chanspec_list_t;
+
+/* DFS Forced param */
+typedef struct wl_dfs_forced_params {
+	chanspec_t chspec;
+	uint16 version;
+	chanspec_list_t chspec_list;
+} wl_dfs_forced_t;
+
+#define DFS_PREFCHANLIST_VER 0x01
+#define WL_CHSPEC_LIST_FIXED_SIZE	OFFSETOF(chanspec_list_t, list)
+#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \
+	(WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list))
+#define WL_DFS_FORCED_PARAMS_MAX_SIZE \
+	WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t))
+
+/* association decision information */
+typedef struct {
+	bool		assoc_approved;		/* (re)association approved */
+	uint16		reject_reason;		/* reason code for rejecting association */
+	struct		ether_addr   da;
+	int64		sys_time;		/* current system time */
+} assoc_decision_t;
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+	struct ether_addr 	da;
+	uint16 			len;
+	uint32 			packetId;
+	uint8			data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+	uint8		ssid_len;	/* the length of SSID */
+	uint8		ssid[32];	/* SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+	uint32 			channel;
+	int32 			dwell_time;
+	struct ether_addr 	BSSID;
+	wl_action_frame_t	action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL	0
+#define MFP_TEST_FLAG_ANY_KEY	1
+typedef struct wl_sa_query {
+	uint32			flag;
+	uint8 			action;
+	uint16 			id;
+	struct ether_addr 	da;
+} wl_sa_query_t;
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Flags for OBSS IOVAR Parameters */
+#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD        (0x01)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD      (0x02)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04)
+#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD    (0x08)
+#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD          (0x10)
+#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD          (0x20)
+#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD            (0x40)
+
+/* OBSS IOVAR Version information */
+#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 obss_bwsw_activity_cfm_count_cfg; /* configurable count in
+		* seconds before we confirm that OBSS is present and
+		* dynamically activate dynamic bwswitch.
+		*/
+	uint8 obss_bwsw_no_activity_cfm_count_cfg; /* configurable count in
+		* seconds before we confirm that OBSS is GONE and
+		* dynamically start pseudo upgrade. If in pseudo sense time, we
+		* will see OBSS, [means that, we false detected that OBSS-is-gone
+		* in watchdog] this count will be incremented in steps of
+		* obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS
+		* detection again. Note that, at present, max 30seconds is
+		* allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT]
+		*/
+	uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above
+		*/
+	uint16 obss_bwsw_pseudo_sense_count_cfg; /* number of msecs/cnt to be in
+		* pseudo state. This is used to sense/measure the stats from lq.
+		*/
+	uint8 obss_bwsw_rx_crs_threshold_cfg; /* RX CRS default threshold */
+	uint8 obss_bwsw_dur_thres; /* OBSS dyn bwsw trigger/RX CRS Sec */
+	uint8 obss_bwsw_txop_threshold_cfg; /* TXOP default threshold */
+} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 version;	/* version field */
+	uint32 config_mask;
+	uint32 reset_mask;
+	wlc_prot_dynbwsw_config_t config_params;
+} BWL_POST_PACKED_STRUCT obss_config_params_t;
+
+
+
+/* Legacy structure to help keep backward compatible wl tool and tray app */
+
+#define	LEGACY_WL_BSS_INFO_VERSION	107	/* older version of wl_bss_info struct */
+
+typedef struct wl_bss_info_107 {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	uint8		channel;		/* Channel no. */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+	uint32		ie_length;		/* byte length of Information Elements */
+	/* variable length Information Elements */
+} wl_bss_info_107_t;
+
+/*
+ * Per-BSS information structure.
+ */
+
+#define	LEGACY2_WL_BSS_INFO_VERSION	108		/* old version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_108 {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	chanspec_t	chanspec;		/* chanspec for bss */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+
+	uint8		n_cap;			/* BSS is 802.11N Capable */
+	uint32		nbss_cap;		/* 802.11N BSS Capabilities (based on HT_CAP_*) */
+	uint8		ctl_ch;			/* 802.11N BSS control channel number */
+	uint32		reserved32[1];		/* Reserved for expansion of BSS properties */
+	uint8		flags;			/* flags */
+	uint8		reserved[3];		/* Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/* offset at which IEs start, from beginning */
+	uint32		ie_length;		/* byte length of Information Elements */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_108_t;
+
+#define	WL_BSS_INFO_VERSION	109		/* current version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	chanspec_t	chanspec;		/* chanspec for bss */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+
+	uint8		n_cap;			/* BSS is 802.11N Capable */
+	uint32		nbss_cap;		/* 802.11N+AC BSS Capabilities */
+	uint8		ctl_ch;			/* 802.11N BSS control channel number */
+	uint8		padding1[3];		/* explicit struct alignment padding */
+	uint16		vht_rxmcsmap;	/* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint16		vht_txmcsmap;	/* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint8		flags;			/* flags */
+	uint8		vht_cap;		/* BSS is vht capable */
+	uint8		reserved[2];		/* Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/* offset at which IEs start, from beginning */
+	uint32		ie_length;		/* byte length of Information Elements */
+	int16		SNR;			/* average SNR of during frame reception */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_t;
+
+#define	WL_GSCAN_BSS_INFO_VERSION	1	/* current version of wl_gscan_bss_info struct */
+#define WL_GSCAN_INFO_FIXED_FIELD_SIZE   (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t))
+
+typedef struct wl_gscan_bss_info {
+	uint32      timestamp[2];
+	wl_bss_info_t info;
+	/* variable length Information Elements */
+} wl_gscan_bss_info_t;
+
+
+typedef struct wl_bsscfg {
+	uint32  bsscfg_idx;
+	uint32  wsec;
+	uint32  WPA_auth;
+	uint32  wsec_index;
+	uint32  associated;
+	uint32  BSS;
+	uint32  phytest_on;
+	struct ether_addr   prev_BSSID;
+	struct ether_addr   BSSID;
+	uint32  targetbss_wpa2_flags;
+	uint32 assoc_type;
+	uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_if_add {
+	uint32  bsscfg_flags;
+	uint32  if_flags;
+	uint32  ap;
+	struct ether_addr   mac_addr;
+} wl_if_add_t;
+
+typedef struct wl_bss_config {
+	uint32	atim_window;
+	uint32	beacon_period;
+	uint32	chanspec;
+} wl_bss_config_t;
+
+#define WL_BSS_USER_RADAR_CHAN_SELECT	0x1	/* User application will randomly select
+						 * radar channel.
+						 */
+
+#define DLOAD_HANDLER_VER			1	/* Downloader version */
+#define DLOAD_FLAG_VER_MASK		0xf000	/* Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT	12	/* Downloader version shift */
+
+#define DL_CRC_NOT_INUSE 			0x0001
+
+/* generic download types & flags */
+enum {
+	DL_TYPE_UCODE = 1,
+	DL_TYPE_CLM = 2
+};
+
+/* ucode type values */
+enum {
+	UCODE_FW,
+	INIT_VALS,
+	BS_INIT_VALS
+};
+
+struct wl_dload_data {
+	uint16 flag;
+	uint16 dload_type;
+	uint32 len;
+	uint32 crc;
+	uint8  data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+	uint32 ucode_type;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_num;
+	uint8  data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+	uint32 ds_id;
+	uint32 clm_total_len;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_offset;
+	uint8  data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+typedef struct wlc_ssid {
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_t;
+
+typedef struct wlc_ssid_ext {
+	bool       hidden;
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_ext_t;
+
+
+#define MAX_PREFERRED_AP_NUM     5
+typedef struct wlc_fastssidinfo {
+	uint32				SSID_channel[MAX_PREFERRED_AP_NUM];
+	wlc_ssid_t		SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wnm_url {
+	uint8   len;
+	uint8   data[1];
+} BWL_POST_PACKED_STRUCT wnm_url_t;
+
+typedef struct chan_scandata {
+	uint8		txpower;
+	uint8		pad;
+	chanspec_t	channel;	/* Channel num, bw, ctrl_sb and band */
+	uint32		channel_mintime;
+	uint32		channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+	EXTDSCAN_FOREGROUND_SCAN,
+	EXTDSCAN_BACKGROUND_SCAN,
+	EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID		5
+
+typedef struct wl_extdscan_params {
+	int8 		nprobes;		/* 0, passive, otherwise active */
+	int8    	split_scan;		/* split scan */
+	int8		band;			/* band */
+	int8		pad;
+	wlc_ssid_t 	ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
+	uint32		tx_rate;		/* in 500ksec units */
+	wl_scan_type_t	scan_type;		/* enum */
+	int32 		channel_num;
+	chan_scandata_t channel_list[1];	/* list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE 	(sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+
+typedef struct wl_scan_params {
+	wlc_ssid_t ssid;		/* default: {0, ""} */
+	struct ether_addr bssid;	/* default: bcast */
+	int8 bss_type;			/* default: any,
+					 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					 */
+	uint8 scan_type;		/* flags, 0 use default */
+	int32 nprobes;			/* -1 use default, number of probes per channel */
+	int32 active_time;		/* -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/* -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/* -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+	int32 channel_num;		/* count of channels and ssids that follow
+					 *
+					 * low half is count of channels in channel_list, 0
+					 * means default (use all available channels)
+					 *
+					 * high half is entries in wlc_ssid_t array that
+					 * follows channel_list, aligned for int32 (4 bytes)
+					 * meaning an odd channel count implies a 2-byte pad
+					 * between end of channel_list and first ssid
+					 *
+					 * if ssid count is zero, single ssid in the fixed
+					 * parameter portion is assumed, otherwise ssid in
+					 * the fixed portion is ignored
+					 */
+	uint16 channel_list[1];		/* list of chanspecs */
+} wl_scan_params_t;
+
+/* size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+#define WL_MAX_ROAMSCAN_DATSZ	(WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+
+#define ISCAN_REQ_VERSION 1
+
+/* incremental scan struct */
+typedef struct wl_iscan_params {
+	uint32 version;
+	uint16 action;
+	uint16 scan_duration;
+	wl_scan_params_t params;
+} wl_iscan_params_t;
+
+/* 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+/* size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+	uint32 version;
+	uint16 action;
+	uint16 sync_id;
+	wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+	uint32 buflen;
+	uint32 version;
+	uint16 sync_id;
+	uint16 bss_count;
+	wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+typedef struct wl_gscan_result {
+	uint32 buflen;
+	uint32 version;
+	wl_gscan_bss_info_t bss_info[1];
+} wl_gscan_result_t;
+
+#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t))
+
+/* incremental scan results struct */
+typedef struct wl_iscan_results {
+	uint32 status;
+	wl_scan_results_t results;
+} wl_iscan_results_t;
+
+/* size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+	(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+#define SCANOL_PARAMS_VERSION	1
+
+typedef struct scanol_params {
+	uint32 version;
+	uint32 flags;	/* offload scanning flags */
+	int32 active_time;	/* -1 use default, dwell time per channel for active scanning */
+	int32 passive_time;	/* -1 use default, dwell time per channel for passive scanning */
+	int32 idle_rest_time;	/* -1 use default, time idle between scan cycle */
+	int32 idle_rest_time_multiplier;
+	int32 active_rest_time;
+	int32 active_rest_time_multiplier;
+	int32 scan_cycle_idle_rest_time;
+	int32 scan_cycle_idle_rest_multiplier;
+	int32 scan_cycle_active_rest_time;
+	int32 scan_cycle_active_rest_multiplier;
+	int32 max_rest_time;
+	int32 max_scan_cycles;
+	int32 nprobes;		/* -1 use default, number of probes per channel */
+	int32 scan_start_delay;
+	uint32 nchannels;
+	uint32 ssid_count;
+	wlc_ssid_t ssidlist[1];
+} scanol_params_t;
+
+typedef struct wl_probe_params {
+	wlc_ssid_t ssid;
+	struct ether_addr bssid;
+	struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_MAXRATES_IN_SET		16	/* max # of rates in a rateset */
+typedef struct wl_rateset {
+	uint32	count;			/* # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/* rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+	uint32	count;			/* # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/* rates in 500kbps units w/hi bit set if basic */
+	uint8   mcs[MCSSET_LEN];        /* supported mcs index bit map */
+	uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+} wl_rateset_args_t;
+
+#define TXBF_RATE_MCS_ALL		4
+#define TXBF_RATE_VHT_ALL		4
+#define TXBF_RATE_OFDM_ALL		8
+
+typedef struct wl_txbf_rateset {
+	uint8	txbf_rate_mcs[TXBF_RATE_MCS_ALL];	/* one for each stream */
+	uint8	txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL];	/* one for each stream */
+	uint16	txbf_rate_vht[TXBF_RATE_VHT_ALL];	/* one for each stream */
+	uint16	txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL];	/* one for each stream */
+	uint8	txbf_rate_ofdm[TXBF_RATE_OFDM_ALL];	/* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_cnt;
+	uint8	txbf_rate_ofdm_cnt_bcm;
+} wl_txbf_rateset_t;
+
+#define OFDM_RATE_MASK			0x0000007f
+typedef uint8 ofdm_rates_t;
+
+typedef struct wl_rates_info {
+	wl_rateset_t rs_tgt;
+	uint32 phy_type;
+	int32 bandtype;
+	uint8 cck_only;
+	uint8 rate_mask;
+	uint8 mcsallow;
+	uint8 bw;
+	uint8 txstreams;
+} wl_rates_info_t;
+
+/* uint32 list */
+typedef struct wl_uint32_list {
+	/* in - # of elements, out - # of entries */
+	uint32 count;
+	/* variable length uint32 list */
+	uint32 element[1];
+} wl_uint32_list_t;
+
+/* used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+	struct ether_addr bssid;	/* 00:00:00:00:00:00: broadcast scan */
+	uint16 bssid_cnt;		/* 0: use chanspec_num, and the single bssid,
+					* otherwise count of chanspecs in chanspec_list
+					* AND paired bssids following chanspec_list
+					* also, chanspec_num has to be set to zero
+					* for bssid list to be used
+					*/
+	int32 chanspec_num;		/* 0: all available channels,
+					* otherwise count of chanspecs in chanspec_list
+					*/
+	chanspec_t chanspec_list[1];	/* list of chanspecs */
+} wl_assoc_params_t;
+
+#define WL_ASSOC_PARAMS_FIXED_SIZE 	OFFSETOF(wl_assoc_params_t, chanspec_list)
+
+/* used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+	wlc_ssid_t ssid;
+	wl_assoc_params_t params;	/* optional field, but it must include the fixed portion
+					 * of the wl_assoc_params_t struct when it does present.
+					 */
+} wl_join_params_t;
+
+#define WL_JOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_join_params_t, params) + \
+					 WL_ASSOC_PARAMS_FIXED_SIZE)
+/* scan params for extended join */
+typedef struct wl_join_scan_params {
+	uint8 scan_type;		/* 0 use default, active or passive scan */
+	int32 nprobes;			/* -1 use default, number of probes per channel */
+	int32 active_time;		/* -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/* -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/* -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+} wl_join_scan_params_t;
+
+/* extended join params */
+typedef struct wl_extjoin_params {
+	wlc_ssid_t ssid;		/* {0, ""}: wildcard scan */
+	wl_join_scan_params_t scan;
+	wl_join_assoc_params_t assoc;	/* optional field, but it must include the fixed portion
+					 * of the wl_join_assoc_params_t struct when it does
+					 * present.
+					 */
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_extjoin_params_t, assoc) + \
+					 WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+
+#define ANT_SELCFG_MAX		4	/* max number of antenna configurations */
+#define MAX_STREAMS_SUPPORTED	4	/* max number of streams supported */
+typedef struct {
+	uint8 ant_config[ANT_SELCFG_MAX];	/* antenna configuration */
+	uint8 num_antcfg;	/* number of available antenna configurations */
+} wlc_antselcfg_t;
+
+typedef struct {
+	uint32 duration;	/* millisecs spent sampling this channel */
+	uint32 congest_ibss;	/* millisecs in our bss (presumably this traffic will */
+				/*  move if cur bss moves channels) */
+	uint32 congest_obss;	/* traffic not in our bss */
+	uint32 interference;	/* millisecs detecting a non 802.11 interferer. */
+	uint32 timestamp;	/* second timestamp */
+} cca_congest_t;
+
+typedef struct {
+	chanspec_t chanspec;	/* Which channel? */
+	uint8 num_secs;		/* How many secs worth of data */
+	cca_congest_t  secs[1];	/* Data */
+} cca_congest_channel_req_t;
+
+
+/* interference sources */
+enum interference_source {
+	ITFR_NONE = 0,		/* interference */
+	ITFR_PHONE,		/* wireless phone */
+	ITFR_VIDEO_CAMERA,	/* wireless video camera */
+	ITFR_MICROWAVE_OVEN,	/* microwave oven */
+	ITFR_BABY_MONITOR,	/* wireless baby monitor */
+	ITFR_BLUETOOTH,		/* bluetooth */
+	ITFR_VIDEO_CAMERA_OR_BABY_MONITOR,	/* wireless camera or baby monitor */
+	ITFR_BLUETOOTH_OR_BABY_MONITOR,	/* bluetooth or baby monitor */
+	ITFR_VIDEO_CAMERA_OR_PHONE,	/* video camera or phone */
+	ITFR_UNIDENTIFIED	/* interference from unidentified source */
+};
+
+/* structure for interference source report */
+typedef struct {
+	uint32 flags;	/* flags.  bit definitions below */
+	uint32 source;	/* last detected interference source */
+	uint32 timestamp;	/* second timestamp on interferenced flag change */
+} interference_source_rep_t;
+
+#define WLC_CNTRY_BUF_SZ	4		/* Country string is 3 bytes + NUL */
+
+
+typedef struct wl_country {
+	char country_abbrev[WLC_CNTRY_BUF_SZ];	/* nul-terminated country code used in
+						 * the Country IE
+						 */
+	int32 rev;				/* revision specifier for ccode
+						 * on set, -1 indicates unspecified.
+						 * on get, rev >= 0
+						 */
+	char ccode[WLC_CNTRY_BUF_SZ];		/* nul-terminated built-in country code.
+						 * variable length, but fixed size in
+						 * struct allows simple allocation for
+						 * expected country strings <= 3 chars.
+						 */
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+	uint32 buflen;
+	uint32 band;
+	char country_abbrev[WLC_CNTRY_BUF_SZ];
+	uint32 count;
+	uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+	uint32 buflen;
+	uint32 band_set;
+	uint32 band;
+	uint32 count;
+	char country_abbrev[1];
+} wl_country_list_t;
+
+typedef struct wl_rm_req_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/* token for this measurement */
+	uint32	tsf_h;		/* TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/* TSF low 32-bits */
+	uint32	dur;		/* TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+	uint32	token;		/* overall measurement set token */
+	uint32	count;		/* number of measurement requests */
+	void	*cb;		/* completion callback function: may be NULL */
+	void	*cb_arg;	/* arg to completion callback function */
+	wl_rm_req_elt_t	req[1];	/* variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN	OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/* token for this measurement */
+	uint32	tsf_h;		/* TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/* TSF low 32-bits */
+	uint32	dur;		/* TUs */
+	uint32	len;		/* byte length of data block */
+	uint8	data[1];	/* variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN	24	/* length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+	uint8	rpi[WL_RPI_REP_BIN_NUM];
+	int8	rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+	uint32	token;		/* overall measurement set token */
+	uint32	len;		/* length of measurement report block */
+	wl_rm_rep_elt_t	rep[1];	/* variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN	8
+
+
+typedef enum sup_auth_status {
+	/* Basic supplicant authentication states */
+	WLC_SUP_DISCONNECTED = 0,
+	WLC_SUP_CONNECTING,
+	WLC_SUP_IDREQUIRED,
+	WLC_SUP_AUTHENTICATING,
+	WLC_SUP_AUTHENTICATED,
+	WLC_SUP_KEYXCHANGE,
+	WLC_SUP_KEYED,
+	WLC_SUP_TIMEOUT,
+	WLC_SUP_LAST_BASIC_STATE,
+
+	/* Extended supplicant authentication states */
+	/* Waiting to receive handshake msg M1 */
+	WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+	/* Preparing to send handshake msg M2 */
+	WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+	/* Waiting to receive handshake msg M3 */
+	WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+	WLC_SUP_KEYXCHANGE_PREP_M4,	/* Preparing to send handshake msg M4 */
+	WLC_SUP_KEYXCHANGE_WAIT_G1,	/* Waiting to receive handshake msg G1 */
+	WLC_SUP_KEYXCHANGE_PREP_G2	/* Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+
+typedef struct wl_wsec_key {
+	uint32		index;		/* key index */
+	uint32		len;		/* key length */
+	uint8		data[DOT11_MAX_KEY_SIZE];	/* key data */
+	uint32		pad_1[18];
+	uint32		algo;		/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	uint32		flags;		/* misc flags */
+	uint32		pad_2[2];
+	int		pad_3;
+	int		iv_initialized;	/* has IV been initialized already? */
+	int		pad_4;
+	/* Rx IV */
+	struct {
+		uint32	hi;		/* upper 32 bits of IV */
+		uint16	lo;		/* lower 16 bits of IV */
+	} rxiv;
+	uint32		pad_5[2];
+	struct ether_addr ea;		/* per station */
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN	8
+#define WSEC_MAX_PSK_LEN	64
+
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE		(1<<0)
+
+/* receptacle for WLC_SET_WSEC_PMK parameter */
+typedef struct {
+	ushort	key_len;		/* octets in key material */
+	ushort	flags;			/* key handling qualification */
+	uint8	key[WSEC_MAX_PSK_LEN];	/* PMK material */
+} wsec_pmk_t;
+
+typedef struct _pmkid {
+	struct ether_addr	BSSID;
+	uint8			PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+	uint32	npmkid;
+	pmkid_t	pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+	struct ether_addr	BSSID;
+	uint8			preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+	uint32	npmkid_cand;
+	pmkid_cand_t	pmkid_cand[1];
+} pmkid_cand_list_t;
+
+#define WL_STA_ANT_MAX		4	/* max possible rx antennas */
+
+typedef struct wl_assoc_info {
+	uint32		req_len;
+	uint32		resp_len;
+	uint32		flags;
+	struct dot11_assoc_req req;
+	struct ether_addr reassoc_bssid; /* used in reassoc's */
+	struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+typedef struct wl_led_info {
+	uint32      index;      /* led index */
+	uint32      behavior;
+	uint8       activehi;
+} wl_led_info_t;
+
+
+/* srom read/write struct passed through ioctl */
+typedef struct {
+	uint	byteoff;	/* byte offset */
+	uint	nbytes;		/* number of bytes */
+	uint16	buf[1];
+} srom_rw_t;
+
+#define CISH_FLAG_PCIECIS	(1 << 15)	/* write CIS format bit for PCIe CIS */
+/* similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+	uint16	source;		/* cis source */
+	uint16	flags;		/* flags */
+	uint32	byteoff;	/* byte offset */
+	uint32	nbytes;		/* number of bytes */
+	/* data follows here */
+} cis_rw_t;
+
+/* R_REG and W_REG struct passed through ioctl */
+typedef struct {
+	uint32	byteoff;	/* byte offset of the field in d11regs_t */
+	uint32	val;		/* read/write value of the field */
+	uint32	size;		/* sizeof the field */
+	uint	band;		/* band (optional) */
+} rw_reg_t;
+
+/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
+/* PCL - Power Control Loop */
+typedef struct {
+	uint16	auto_ctrl;	/* WL_ATTEN_XX */
+	uint16	bb;		/* Baseband attenuation */
+	uint16	radio;		/* Radio attenuation */
+	uint16	txctl1;		/* Radio TX_CTL1 value */
+} atten_t;
+
+/* Per-AC retry parameters */
+struct wme_tx_params_s {
+	uint8  short_retry;
+	uint8  short_fallback;
+	uint8  long_retry;
+	uint8  long_fallback;
+	uint16 max_rate;  /* In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+typedef struct wl_plc_nodelist {
+	uint32 count;			/* Number of nodes */
+	struct _node {
+		struct ether_addr ea;	/* Node ether address */
+		uint32 node_type;	/* Node type */
+		uint32 cost;		/* PLC affinity */
+	} node[1];
+} wl_plc_nodelist_t;
+
+typedef struct wl_plc_params {
+	uint32	cmd;			/* Command */
+	uint8	plc_failover;		/* PLC failover control/status */
+	struct	ether_addr node_ea;	/* Node ether address */
+	uint32	cost;			/* Link cost or mac cost */
+} wl_plc_params_t;
+
+/* Used to get specific link/ac parameters */
+typedef struct {
+	int32 ac;
+	uint8 val;
+	struct ether_addr ea;
+} link_val_t;
+
+
+#define WL_PM_MUTE_TX_VER 1
+
+typedef struct wl_pm_mute_tx {
+	uint16 version;		/* version */
+	uint16 len;		/* length */
+	uint16 deadline;	/* deadline timer (in milliseconds) */
+	uint8  enable;		/* set to 1 to enable mode; set to 0 to disable it */
+} wl_pm_mute_tx_t;
+
+
+typedef struct {
+	uint16			ver;		/* version of this struct */
+	uint16			len;		/* length in bytes of this structure */
+	uint16			cap;		/* sta's advertised capabilities */
+	uint32			flags;		/* flags defined below */
+	uint32			idle;		/* time since data pkt rx'd from sta */
+	struct ether_addr	ea;		/* Station address */
+	wl_rateset_t		rateset;	/* rateset in use */
+	uint32			in;		/* seconds elapsed since associated */
+	uint32			listen_interval_inms; /* Min Listen interval in ms for this STA */
+	uint32			tx_pkts;	/* # of user packets transmitted (unicast) */
+	uint32			tx_failures;	/* # of user packets failed */
+	uint32			rx_ucast_pkts;	/* # of unicast packets received */
+	uint32			rx_mcast_pkts;	/* # of multicast packets received */
+	uint32			tx_rate;	/* Rate used by last tx frame */
+	uint32			rx_rate;	/* Rate of last successful rx frame */
+	uint32			rx_decrypt_succeeds;	/* # of packet decrypted successfully */
+	uint32			rx_decrypt_failures;	/* # of packet decrypted unsuccessfully */
+	uint32			tx_tot_pkts;	/* # of user tx pkts (ucast + mcast) */
+	uint32			rx_tot_pkts;	/* # of data packets recvd (uni + mcast) */
+	uint32			tx_mcast_pkts;	/* # of mcast pkts txed */
+	uint64			tx_tot_bytes;	/* data bytes txed (ucast + mcast) */
+	uint64			rx_tot_bytes;	/* data bytes recvd (ucast + mcast) */
+	uint64			tx_ucast_bytes;	/* data bytes txed (ucast) */
+	uint64			tx_mcast_bytes;	/* # data bytes txed (mcast) */
+	uint64			rx_ucast_bytes;	/* data bytes recvd (ucast) */
+	uint64			rx_mcast_bytes;	/* data bytes recvd (mcast) */
+	int8			rssi[WL_STA_ANT_MAX]; /* average rssi per antenna
+										   * of data frames
+										   */
+	int8			nf[WL_STA_ANT_MAX];	/* per antenna noise floor */
+	uint16			aid;		/* association ID */
+	uint16			ht_capabilities;	/* advertised ht caps */
+	uint16			vht_flags;		/* converted vht flags */
+	uint32			tx_pkts_retried;	/* # of frames where a retry was
+							 * necessary
+							 */
+	uint32			tx_pkts_retry_exhausted; /* # of user frames where a retry
+							  * was exhausted
+							  */
+	int8			rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last
+								  * received data frame.
+								  */
+	/* TX WLAN retry/failure statistics:
+	 * Separated for host requested frames and WLAN locally generated frames.
+	 * Include unicast frame only where the retries/failures can be counted.
+	 */
+	uint32			tx_pkts_total;		/* # user frames sent successfully */
+	uint32			tx_pkts_retries;	/* # user frames retries */
+	uint32			tx_pkts_fw_total;	/* # FW generated sent successfully */
+	uint32			tx_pkts_fw_retries;	/* # retries for FW generated frames */
+	uint32			tx_pkts_fw_retry_exhausted;	/* # FW generated where a retry
+								 * was exhausted
+								 */
+	uint32			rx_pkts_retried;	/* # rx with retry bit set */
+	uint32			tx_rate_fallback;	/* lowest fallback TX rate */
+} sta_info_t;
+
+#define WL_OLD_STAINFO_SIZE	OFFSETOF(sta_info_t, tx_tot_pkts)
+
+#define WL_STA_VER		4
+
+#define	WLC_NUMRATES	16	/* max # of rates in a rateset */
+
+typedef struct wlc_rateset {
+	uint32	count;			/* number of rates in rates[] */
+	uint8	rates[WLC_NUMRATES];	/* rates in 500kbps units w/hi bit set if basic */
+	uint8	htphy_membership;	/* HT PHY Membership */
+	uint8	mcs[MCSSET_LEN];	/* supported mcs index bit map */
+	uint16  vht_mcsmap;		/* supported vht mcs nss bit map */
+} wlc_rateset_t;
+
+/* Used to get specific STA parameters */
+typedef struct {
+	uint32	val;
+	struct ether_addr ea;
+} scb_val_t;
+
+/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+	uint32 code;
+	scb_val_t ioctl_args;
+} authops_t;
+
+/* channel encoding */
+typedef struct channel_info {
+	int hw_channel;
+	int target_channel;
+	int scan_channel;
+} channel_info_t;
+
+/* For ioctls that take a list of MAC addresses */
+typedef struct maclist {
+	uint count;			/* number of MAC addresses */
+	struct ether_addr ea[1];	/* variable length array of MAC addresses */
+} maclist_t;
+
+/* get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+	uint rx_good_pkt;
+	uint rx_bad_pkt;
+	uint tx_good_pkt;
+	uint tx_bad_pkt;
+	uint rx_ocast_good_pkt; /* unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN              0
+#define LQ_IDX_MAX              1
+#define LQ_IDX_AVG              2
+#define LQ_IDX_SUM              2
+#define LQ_IDX_LAST             3
+#define LQ_STOP_MONITOR         0
+#define LQ_START_MONITOR        1
+
+/* Get averages RSSI, Rx PHY rate and SNR values */
+typedef struct {
+	int rssi[LQ_IDX_LAST];  /* Array to keep min, max, avg rssi */
+	int snr[LQ_IDX_LAST];   /* Array to keep min, max, avg snr */
+	int isvalid;            /* Flag indicating whether above data is valid */
+} wl_lq_t; /* Link Quality */
+
+typedef enum wl_wakeup_reason_type {
+	LCD_ON = 1,
+	LCD_OFF,
+	DRC1_WAKE,
+	DRC2_WAKE,
+	REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+/* Unique filter id */
+	uint32	id;
+
+/* stores the reason for the last wake up */
+	uint8	reason;
+} wl_wr_t;
+
+/* Get MAC specific rate histogram command */
+typedef struct {
+	struct	ether_addr ea;	/* MAC Address */
+	uint8	ac_cat;	/* Access Category */
+	uint8	num_pkts;	/* Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t;	/* MAC Specific Rate Histogram command */
+
+/* Get MAC rate histogram response */
+typedef struct {
+	uint32	rate[DOT11_RATE_MAX + 1];	/* Rates */
+	uint32	mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX];	/* MCS counts */
+	uint32	vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX];	/* VHT counts */
+	uint32	tsf_timer[2][2];	/* Start and End time for 8bytes value */
+} wl_mac_ratehisto_res_t;	/* MAC Specific Rate Histogram Response */
+
+/* Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	uint8 set;		/* 1=set IOCTL; 0=query IOCTL */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+} wl_ioctl_t;
+
+#ifdef CONFIG_COMPAT
+typedef struct compat_wl_ioctl {
+	uint cmd;	/* common ioctl definition */
+	uint32 buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	uint8 set;		/* 1=set IOCTL; 0=query IOCTL */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+} compat_wl_ioctl_t;
+#endif /* CONFIG_COMPAT */
+
+#define WL_NUM_RATES_CCK			4 /* 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM			8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM	8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT		2 /* Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT			10
+#define WL_NUM_RATES_MCS32			1
+
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+	uint		vendorid;	/* PCI vendor id */
+	uint		deviceid;	/* device id of chip */
+	uint		radiorev;	/* radio revision */
+	uint		chiprev;	/* chip revision */
+	uint		corerev;	/* core revision */
+	uint		boardid;	/* board identifier (usu. PCI sub-device id) */
+	uint		boardvendor;	/* board vendor (usu. PCI sub-vendor id) */
+	uint		boardrev;	/* board revision */
+	uint		driverrev;	/* driver version */
+	uint		ucoderev;	/* microcode version */
+	uint		bus;		/* bus type */
+	uint		chipnum;	/* chip number */
+	uint		phytype;	/* phy type */
+	uint		phyrev;		/* phy revision */
+	uint		anarev;		/* anacore rev */
+	uint		chippkg;	/* chip package info */
+	uint		nvramrev;	/* nvram revision number */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH	48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+	uint instance;
+	char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+/* structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+	uint16	magic;
+	uint16	fifo;
+	uint16	size;
+} wl_txfifo_sz_t;
+
+/* Transfer info about an IOVar from the driver */
+/* Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+	uint8 module;
+	uint8 type;
+	char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/* bump this number if you change the ioctl interface */
+#define WLC_IOCTL_VERSION	2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES	1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+/* struct passed in for WLC_NDCONFIG_ITEM */
+typedef struct {
+	char *name;
+	void *param;
+} ndconfig_item_t;
+#endif
+
+
+#define WL_PHY_PAVARS_LEN	32	/* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
+
+#define WL_PHY_PAVAR_VER	1	/* pavars version */
+#define WL_PHY_PAVARS2_NUM	3	/* a1, b0, b1 */
+typedef struct wl_pavars2 {
+	uint16 ver;		/* version of this struct */
+	uint16 len;		/* len of this structure */
+	uint16 inuse;		/* driver return 1 for a1,b0,b1 in current band range */
+	uint16 phy_type;	/* phy type */
+	uint16 bandrange;
+	uint16 chain;
+	uint16 inpa[WL_PHY_PAVARS2_NUM];	/* phy pavars for one band range */
+} wl_pavars2_t;
+
+typedef struct wl_po {
+	uint16	phy_type;	/* Phy type */
+	uint16	band;
+	uint16	cckpo;
+	uint32	ofdmpo;
+	uint16	mcspo[8];
+} wl_po_t;
+
+#define WL_NUM_RPCALVARS 5	/* number of rpcal vars */
+
+typedef struct wl_rpcal {
+	uint16 value;
+	uint16 update;
+} wl_rpcal_t;
+
+typedef struct wl_aci_args {
+	int enter_aci_thresh; /* Trigger level to start detecting ACI */
+	int exit_aci_thresh; /* Trigger level to exit ACI mode */
+	int usec_spin; /* microsecs to delay between rssi samples */
+	int glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+	uint16 nphy_adcpwr_enter_thresh;	/* ADC power to enter ACI mitigation mode */
+	uint16 nphy_adcpwr_exit_thresh;	/* ADC power to exit ACI mitigation mode */
+	uint16 nphy_repeat_ctr;		/* Number of tries per channel to compute power */
+	uint16 nphy_num_samples;	/* Number of samples to compute power on one channel */
+	uint16 nphy_undetect_window_sz;	/* num of undetects to exit ACI Mitigation mode */
+	uint16 nphy_b_energy_lo_aci;	/* low ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_md_aci;	/* mid ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_hi_aci;	/* high ACI power energy threshold for bphy */
+	uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */
+	uint16 nphy_noise_noassoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_glitch_th_up;
+	uint16 nphy_noise_assoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_aci_glitch_th_up;
+	uint16 nphy_noise_assoc_aci_glitch_th_dn;
+	uint16 nphy_noise_assoc_enter_th;
+	uint16 nphy_noise_noassoc_enter_th;
+	uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+	uint16 nphy_noise_noassoc_crsidx_incr;
+	uint16 nphy_noise_assoc_crsidx_incr;
+	uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH	16	/* bytes of pre NPHY aci args */
+#define	WL_SAMPLECOLLECT_T_VERSION	2	/* version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+	/* version 0 fields */
+	uint8 coll_us;
+	int cores;
+	/* add'l version 1 fields */
+	uint16 version;     /* see definition of WL_SAMPLECOLLECT_T_VERSION */
+	uint16 length;      /* length of entire structure */
+	int8 trigger;
+	uint16 timeout;
+	uint16 mode;
+	uint32 pre_dur;
+	uint32 post_dur;
+	uint8 gpio_sel;
+	uint8 downsamp;
+	uint8 be_deaf;
+	uint8 agc;		/* loop from init gain and going down */
+	uint8 filter;		/* override high pass corners to lowest */
+	/* add'l version 2 fields */
+	uint8 trigger_state;
+	uint8 module_sel1;
+	uint8 module_sel2;
+	uint16 nsamps;
+	int bitStart;
+	uint32 gpioCapMask;
+} wl_samplecollect_args_t;
+
+#define	WL_SAMPLEDATA_T_VERSION		1	/* version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define	WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+	uint16 version;	/* structure version */
+	uint16 size;	/* size of structure */
+	uint16 tag;	/* Header/Data */
+	uint16 length;	/* data length */
+	uint32 flag;	/* bit def */
+} wl_sampledata_t;
+
+
+/* WL_OTA START */
+/* OTA Test Status */
+enum {
+	WL_OTA_TEST_IDLE = 0,	/* Default Idle state */
+	WL_OTA_TEST_ACTIVE = 1,	/* Test Running */
+	WL_OTA_TEST_SUCCESS = 2,	/* Successfully Finished Test */
+	WL_OTA_TEST_FAIL = 3	/* Test Failed in the Middle */
+};
+/* OTA SYNC Status */
+enum {
+	WL_OTA_SYNC_IDLE = 0,	/* Idle state */
+	WL_OTA_SYNC_ACTIVE = 1,	/* Waiting for Sync */
+	WL_OTA_SYNC_FAIL = 2	/* Sync pkt not recieved */
+};
+
+/* Various error states dut can get stuck during test */
+enum {
+	WL_OTA_SKIP_TEST_CAL_FAIL = 1,		/* Phy calibration failed */
+	WL_OTA_SKIP_TEST_SYNCH_FAIL = 2,		/* Sync Packet not recieved */
+	WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3,	/* Cmd flow file download failed */
+	WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4,	/* No test found in Flow file */
+	WL_OTA_SKIP_TEST_WL_NOT_UP = 5,		/* WL UP failed */
+	WL_OTA_SKIP_TEST_UNKNOWN_CALL		/* Unintentional scheduling on ota test */
+};
+
+/* Differentiator for ota_tx and ota_rx */
+enum {
+	WL_OTA_TEST_TX = 0,		/* ota_tx */
+	WL_OTA_TEST_RX = 1,		/* ota_rx */
+};
+
+/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
+enum {
+	WL_OTA_TEST_BW_20_IN_40MHZ = 0,	/* 20 in 40 operation */
+	WL_OTA_TEST_BW_20MHZ = 1,		/* 20 Mhz operation */
+	WL_OTA_TEST_BW_40MHZ = 2		/* full 40Mhz operation */
+};
+typedef struct ota_rate_info {
+	uint8 rate_cnt;					/* Total number of rates */
+	uint8 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE];	/* array of rates from 1mbps to 130mbps */
+							/* for legacy rates : ratein mbps * 2 */
+							/* for HT rates : mcs index */
+} ota_rate_info_t;
+
+typedef struct ota_power_info {
+	int8 pwr_ctrl_on;	/* power control on/off */
+	int8 start_pwr;		/* starting power/index */
+	int8 delta_pwr;		/* delta power/index */
+	int8 end_pwr;		/* end power/index */
+} ota_power_info_t;
+
+typedef struct ota_packetengine {
+	uint16 delay;           /* Inter-packet delay */
+				/* for ota_tx, delay is tx ifs in micro seconds */
+				/* for ota_rx, delay is wait time in milliseconds */
+	uint16 nframes;         /* Number of frames */
+	uint16 length;          /* Packet length */
+} ota_packetengine_t;
+
+/* Test info vector */
+typedef struct wl_ota_test_args {
+	uint8 cur_test;			/* test phase */
+	uint8 chan;			/* channel */
+	uint8 bw;			/* bandwidth */
+	uint8 control_band;		/* control band */
+	uint8 stf_mode;			/* stf mode */
+	ota_rate_info_t rt_info;	/* Rate info */
+	ota_packetengine_t pkteng;	/* packeteng info */
+	uint8 txant;			/* tx antenna */
+	uint8 rxant;			/* rx antenna */
+	ota_power_info_t pwr_info;	/* power sweep info */
+	uint8 wait_for_sync;		/* wait for sync or not */
+} wl_ota_test_args_t;
+
+typedef struct wl_ota_test_vector {
+	wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ];	/* Test argument struct */
+	uint16 test_cnt;					/* Total no of test */
+	uint8 file_dwnld_valid;					/* File successfully downloaded */
+	uint8 sync_timeout;					/* sync packet timeout */
+	int8 sync_fail_action;					/* sync fail action */
+	struct ether_addr sync_mac;				/* macaddress for sync pkt */
+	struct ether_addr tx_mac;				/* macaddress for tx */
+	struct ether_addr rx_mac;				/* macaddress for rx */
+	int8 loop_test;					/* dbg feature to loop the test */
+} wl_ota_test_vector_t;
+
+
+/* struct copied back form dongle to host to query the status */
+typedef struct wl_ota_test_status {
+	int16 cur_test_cnt;		/* test phase */
+	int8 skip_test_reason;		/* skip test reasoin */
+	wl_ota_test_args_t test_arg;	/* cur test arg details */
+	uint16 test_cnt;		/* total no of test downloaded */
+	uint8 file_dwnld_valid;		/* file successfully downloaded ? */
+	uint8 sync_timeout;		/* sync timeout */
+	int8 sync_fail_action;		/* sync fail action */
+	struct ether_addr sync_mac;	/* macaddress for sync pkt */
+	struct ether_addr tx_mac;	/* tx mac address */
+	struct ether_addr rx_mac;	/* rx mac address */
+	uint8  test_stage;		/* check the test status */
+	int8 loop_test;		/* Debug feature to puts test enfine in a loop */
+	uint8 sync_status;		/* sync status */
+} wl_ota_test_status_t;
+
+/* WL_OTA END */
+
+/* wl_radar_args_t */
+typedef struct {
+	int npulses;	/* required number of pulses at n * t_int */
+	int ncontig;	/* required number of pulses at t_int */
+	int min_pw;	/* minimum pulse width (20 MHz clocks) */
+	int max_pw;	/* maximum pulse width (20 MHz clocks) */
+	uint16 thresh0;	/* Radar detection, thresh 0 */
+	uint16 thresh1;	/* Radar detection, thresh 1 */
+	uint16 blank;	/* Radar detection, blank control */
+	uint16 fmdemodcfg;	/* Radar detection, fmdemod config */
+	int npulses_lp;  /* Radar detection, minimum long pulses */
+	int min_pw_lp; /* Minimum pulsewidth for long pulses */
+	int max_pw_lp; /* Maximum pulsewidth for long pulses */
+	int min_fm_lp; /* Minimum fm for long pulses */
+	int max_span_lp;  /* Maximum deltat for long pulses */
+	int min_deltat; /* Minimum spacing between pulses */
+	int max_deltat; /* Maximum spacing between pulses */
+	uint16 autocorr;	/* Radar detection, autocorr on or off */
+	uint16 st_level_time;	/* Radar detection, start_timing level */
+	uint16 t2_min; /* minimum clocks needed to remain in state 2 */
+	uint32 version; /* version */
+	uint32 fra_pulse_err;	/* sample error margin for detecting French radar pulsed */
+	int npulses_fra;  /* Radar detection, minimum French pulses set */
+	int npulses_stg2;  /* Radar detection, minimum staggered-2 pulses set */
+	int npulses_stg3;  /* Radar detection, minimum staggered-3 pulses set */
+	uint16 percal_mask;	/* defines which period cal is masked from radar detection */
+	int quant;	/* quantization resolution to pulse positions */
+	uint32 min_burst_intv_lp;	/* minimum burst to burst interval for bin3 radar */
+	uint32 max_burst_intv_lp;	/* maximum burst to burst interval for bin3 radar */
+	int nskip_rst_lp;	/* number of skipped pulses before resetting lp buffer */
+	int max_pw_tol;	/* maximum tollerance allowed in detected pulse width for radar detection */
+	uint16 feature_mask; /* 16-bit mask to specify enabled features */
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+	uint32 version; /* version */
+	uint16 thresh0_20_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh1_20_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh0_40_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh1_40_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh0_80_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh1_80_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh0_20_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh1_20_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh0_40_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh1_40_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh0_80_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+	uint16 thresh1_80_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+#ifdef WL11AC160
+	uint16 thresh0_160_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh1_160_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh0_160_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+	uint16 thresh1_160_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+#endif /* WL11AC160 */
+} wl_radar_thr_t;
+
+#define WL_RADAR_THR_VERSION	2
+
+/* RSSI per antenna */
+typedef struct {
+	uint32	version;		/* version field */
+	uint32	count;			/* number of valid antenna rssi */
+	int8 rssi_ant[WL_RSSI_ANT_MAX];	/* rssi per antenna */
+} wl_rssi_ant_t;
+
+/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+	uint state;		/* noted by WL_DFS_CACSTATE_XX. */
+	uint duration;		/* time spent in ms in state. */
+	/* as dfs enters ISM state, it removes the operational channel from quiet channel
+	 * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+	 */
+	chanspec_t chanspec_cleared;
+	/* chanspec cleared used to be a uint, add another to uint16 to maintain size */
+	uint16 pad;
+} wl_dfs_status_t;
+
+/* data structure used in 'radar_status' wl interface, which is use to query radar det status */
+typedef struct {
+	bool detected;
+	int count;
+	bool pretended;
+	uint32 radartype;
+	uint32 timenow;
+	uint32 timefromL;
+	int lp_csect_single;
+	int detected_pulse_index;
+	int nconsecq_pulses;
+	chanspec_t ch;
+	int pw[10];
+	int intv[10];
+	int fm[10];
+} wl_radar_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+	uint8 txpwr_band_max[NUM_PWRCTRL_RATES];	/* User set target */
+	uint8 txpwr_limit[NUM_PWRCTRL_RATES];		/* reg and local power limit */
+	uint8 txpwr_local_max;				/* local max according to the AP */
+	uint8 txpwr_local_constraint;			/* local constraint according to the AP */
+	uint8 txpwr_chan_reg_max;			/* Regulatory max for this channel */
+	uint8 txpwr_target[2][NUM_PWRCTRL_RATES];	/* Latest target for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout[2];			/* Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_opo[NUM_PWRCTRL_RATES];		/* On G phy, OFDM power offset */
+	uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES];	/* Max CCK power for this band (SROM) */
+	uint8 txpwr_bphy_ofdm_max;			/* Max OFDM power for this band (SROM) */
+	uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES];	/* Max power for A band (SROM) */
+	int8  txpwr_antgain[2];				/* Ant gain for each band - from SROM */
+	uint8 txpwr_est_Pout_gofdm;			/* Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY    45
+#define WL_TX_POWER_MCS20_FIRST         12
+#define WL_TX_POWER_MCS20_NUM           16
+#define WL_TX_POWER_MCS40_FIRST         28
+#define WL_TX_POWER_MCS40_NUM           17
+
+typedef struct {
+	uint32 flags;
+	chanspec_t chanspec;                 /* txpwr report for this channel */
+	chanspec_t local_chanspec;           /* channel on which we are associated */
+	uint8 local_max;                 /* local max according to the AP */
+	uint8 local_constraint;              /* local constraint according to the AP */
+	int8  antgain[2];                /* Ant gain for each band - from SROM */
+	uint8 rf_cores;                  /* count of RF Cores being reported */
+	uint8 est_Pout[4];                           /* Latest tx power out estimate per RF
+							  * chain without adjustment
+							  */
+	uint8 est_Pout_cck;                          /* Latest CCK tx power out estimate */
+	uint8 user_limit[WL_TX_POWER_RATES_LEGACY];  /* User limit */
+	uint8 reg_limit[WL_TX_POWER_RATES_LEGACY];   /* Regulatory power limit */
+	uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */
+	uint8 target[WL_TX_POWER_RATES_LEGACY];      /* Latest target power */
+} tx_power_legacy2_t;
+
+/* TX Power index defines */
+#define WLC_NUM_RATES_CCK       WL_NUM_RATES_CCK
+#define WLC_NUM_RATES_OFDM      WL_NUM_RATES_OFDM
+#define WLC_NUM_RATES_MCS_1_STREAM  WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS_2_STREAM  WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS32     WL_NUM_RATES_MCS32
+#define WL_TX_POWER_CCK_NUM     WL_NUM_RATES_CCK
+#define WL_TX_POWER_OFDM_NUM        WL_NUM_RATES_OFDM
+#define WL_TX_POWER_MCS_1_STREAM_NUM    WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_2_STREAM_NUM    WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_32_NUM      WL_NUM_RATES_MCS32
+
+#define WL_NUM_2x2_ELEMENTS		4
+#define WL_NUM_3x3_ELEMENTS		6
+
+typedef struct {
+	uint16 ver;				/* version of this struct */
+	uint16 len;				/* length in bytes of this structure */
+	uint32 flags;
+	chanspec_t chanspec;			/* txpwr report for this channel */
+	chanspec_t local_chanspec;		/* channel on which we are associated */
+	uint32     buflen;			/* ppr buffer length */
+	uint8      pprbuf[1];			/* Latest target power buffer */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION	1
+#define WL_TXPPR_LENGTH	(sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION	45
+/* number of ppr serialization buffers, it should be reg, board and target */
+#define WL_TXPPR_SER_BUF_NUM	(3)
+
+typedef struct chanspec_txpwr_max {
+	chanspec_t chanspec;   /* chanspec */
+	uint8 txpwr_max;       /* max txpwr in all the rates */
+	uint8 padding;
+} chanspec_txpwr_max_t;
+
+typedef struct  wl_chanspec_txpwr_max {
+	uint16 ver;			/* version of this struct */
+	uint16 len;			/* length in bytes of this structure */
+	uint32 count;		/* number of elements of (chanspec, txpwr_max) pair */
+	chanspec_txpwr_max_t txpwr[1];	/* array of (chanspec, max_txpwr) pair */
+} wl_chanspec_txpwr_max_t;
+
+#define WL_CHANSPEC_TXPWR_MAX_VER	1
+#define WL_CHANSPEC_TXPWR_MAX_LEN	(sizeof(wl_chanspec_txpwr_max_t))
+
+typedef struct tx_inst_power {
+	uint8 txpwr_est_Pout[2];			/* Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout_gofdm;			/* Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+#define WL_NUM_TXCHAIN_MAX	4
+typedef struct wl_txchain_pwr_offsets {
+	int8 offset[WL_NUM_TXCHAIN_MAX];	/* quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value.  RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ *   uint8 akm[DOT11_OUI_LEN+1];     akm suite
+ *   uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ *   uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost value in dB
+ */
+
+struct tsinfo_arg {
+	uint8 octets[3];
+};
+
+#define RATE_CCK_1MBPS 0
+#define RATE_CCK_2MBPS 1
+#define RATE_CCK_5_5MBPS 2
+#define RATE_CCK_11MBPS 3
+
+#define RATE_LEGACY_OFDM_6MBPS 0
+#define RATE_LEGACY_OFDM_9MBPS 1
+#define RATE_LEGACY_OFDM_12MBPS 2
+#define RATE_LEGACY_OFDM_18MBPS 3
+#define RATE_LEGACY_OFDM_24MBPS 4
+#define RATE_LEGACY_OFDM_36MBPS 5
+#define RATE_LEGACY_OFDM_48MBPS 6
+#define RATE_LEGACY_OFDM_54MBPS 7
+
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
+
+typedef struct wl_bsstrans_rssi {
+	int8 rssi_2g;	/* RSSI in dbm for 2.4 G */
+	int8 rssi_5g;	/* RSSI in dbm for 5G, unused for cck */
+} wl_bsstrans_rssi_t;
+
+#define RSSI_RATE_MAP_MAX_STREAMS 4	/* max streams supported */
+
+/* RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map {
+	uint16 ver;
+	uint16 len; /* length of entire structure */
+	wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /* 2.4G only */
+	wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /* 6 to 54mbps */
+	wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+	wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /* MCS0-9 */
+} wl_bsstrans_rssi_rate_map_t;
+
+#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1
+
+/* Configure number of scans allowed per throttle period */
+typedef struct wl_bsstrans_roamthrottle {
+	uint16 ver;
+	uint16 period;
+	uint16 scans_allowed;
+} wl_bsstrans_roamthrottle_t;
+
+#define	NFIFO			6	/* # tx/rx fifopairs */
+#define NREINITREASONCOUNT	8
+#define REINITREASONIDX(_x)	(((_x) < NREINITREASONCOUNT) ? (_x) : 0)
+
+#define	WL_CNT_T_VERSION	10	/* current version of wl_cnt_t struct */
+
+typedef struct {
+	uint16	version;	/* see definition of WL_CNT_T_VERSION */
+	uint16	length;		/* length of entire structure */
+
+	/* transmit stat counters */
+	uint32	txframe;	/* tx data frames */
+	uint32	txbyte;		/* tx data bytes */
+	uint32	txretrans;	/* tx mac retransmits */
+	uint32	txerror;	/* tx data errors (derived: sum of others) */
+	uint32	txctl;		/* tx management frames */
+	uint32	txprshort;	/* tx short preamble frames */
+	uint32	txserr;		/* tx status errors */
+	uint32	txnobuf;	/* tx out of buffers errors */
+	uint32	txnoassoc;	/* tx discard because we're not associated */
+	uint32	txrunt;		/* tx runt frames */
+	uint32	txchit;		/* tx header cache hit (fastpath) */
+	uint32	txcmiss;	/* tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32	txuflo;		/* tx fifo underflows */
+	uint32	txphyerr;	/* tx phy errors (indicated in tx status) */
+	uint32	txphycrs;
+
+	/* receive stat counters */
+	uint32	rxframe;	/* rx data frames */
+	uint32	rxbyte;		/* rx data bytes */
+	uint32	rxerror;	/* rx data errors (derived: sum of others) */
+	uint32	rxctl;		/* rx management frames */
+	uint32	rxnobuf;	/* rx out of buffers errors */
+	uint32	rxnondata;	/* rx non data frames in the data channel errors */
+	uint32	rxbadds;	/* rx bad DS errors */
+	uint32	rxbadcm;	/* rx bad control or management frames */
+	uint32	rxfragerr;	/* rx fragmentation errors */
+	uint32	rxrunt;		/* rx runt frames */
+	uint32	rxgiant;	/* rx giant frames */
+	uint32	rxnoscb;	/* rx no scb error */
+	uint32	rxbadproto;	/* rx invalid frames */
+	uint32	rxbadsrcmac;	/* rx frames with Invalid Src Mac */
+	uint32	rxbadda;	/* rx frames tossed for invalid da */
+	uint32	rxfilter;	/* rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32	rxoflo;		/* rx fifo overflow errors */
+	uint32	rxuflo[NFIFO];	/* rx dma descriptor underflow errors */
+
+	uint32	d11cnt_txrts_off;	/* d11cnt txrts value when reset d11cnt */
+	uint32	d11cnt_rxcrc_off;	/* d11cnt rxcrc value when reset d11cnt */
+	uint32	d11cnt_txnocts_off;	/* d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32	dmade;		/* tx/rx dma descriptor errors */
+	uint32	dmada;		/* tx/rx dma data errors */
+	uint32	dmape;		/* tx/rx dma descriptor protocol errors */
+	uint32	reset;		/* reset count */
+	uint32	tbtt;		/* cnts the TBTT int's */
+	uint32	txdmawar;
+	uint32	pkt_callback_reg_fail;	/* callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/* total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/* number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/* number of CTS sent out by the MAC */
+	uint32	txackfrm;	/* number of ACK frames sent out */
+	uint32	txdnlfrm;	/* Not used */
+	uint32	txbcnfrm;	/* beacons transmitted */
+	uint32	txfunfl[6];	/* per-fifo tx underflows */
+	uint32	rxtoolate;	/* receive too late */
+	uint32  txfbw;		/* transmit at fallback bw (dynamic bw) */
+	uint32	txtplunfl;	/* Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/* Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32	rxfrmtoolong;	/* Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt;	/* Received frame did not contain enough bytes for its frame type */
+	uint32	rxinvmachdr;	/* Either the protocol version != 0 or frame type not
+				 * data/control/management
+				 */
+	uint32	rxbadfcs;	/* number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/* parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/* PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/* Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32	rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32	rxcfrmucast;	/* number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;	/* number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;	/* number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/* number of ucast ACKS received (good FCS) */
+	uint32	rxdfrmocast;	/* number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmfrmocast;	/* number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxcfrmocast;	/* number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/* number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/* number of received CTS not addressed to the MAC */
+	uint32	rxdfrmmcast;	/* number of RX Data multicast frames received by the MAC */
+	uint32	rxmfrmmcast;	/* number of RX Management multicast frames received by the MAC */
+	uint32	rxcfrmmcast;	/* number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/* beacons received from member of BSS */
+	uint32	rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/* beacons received from other BSS */
+	uint32	rxrsptmout;	/* Number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/* transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	rxf0ovfl;	/* Number of receive fifo 0 overflows */
+	uint32	rxf1ovfl;	/* Number of receive fifo 1 overflows (obsolete) */
+	uint32	rxf2ovfl;	/* Number of receive fifo 2 overflows (obsolete) */
+	uint32	txsfovfl;	/* Number of transmit status fifo overflows (obsolete) */
+	uint32	pmqovfl;	/* Number of PMQ overflows */
+	uint32	rxcgprqfrm;	/* Number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/* Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/* Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/* Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/* Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	rxnack;		/* obsolete */
+	uint32	frmscons;	/* obsolete */
+	uint32  txnack;		/* obsolete */
+	uint32	rxback;		/* blockack rxcnt */
+	uint32	txback;		/* blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32	txfrag;		/* dot11TransmittedFragmentCount */
+	uint32	txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32	txfail;		/* dot11FailedCount */
+	uint32	txretry;	/* dot11RetryCount */
+	uint32	txretrie;	/* dot11MultipleRetryCount */
+	uint32	rxdup;		/* dot11FrameduplicateCount */
+	uint32	txrts;		/* dot11RTSSuccessCount */
+	uint32	txnocts;	/* dot11RTSFailureCount */
+	uint32	txnoack;	/* dot11ACKFailureCount */
+	uint32	rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32	rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32	rxcrc;		/* dot11FCSErrorCount */
+	uint32	txfrmsnt;	/* dot11TransmittedFrameCount (bogus MIB?) */
+	uint32	rxundec;	/* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill;	/* TKIPLocalMICFailures */
+	uint32	tkipcntrmsr;	/* TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay;	/* TKIPReplays */
+	uint32	ccmpfmterr;	/* CCMPFormatErrors */
+	uint32	ccmpreplay;	/* CCMPReplays */
+	uint32	ccmpundec;	/* CCMPDecryptErrors */
+	uint32	fourwayfail;	/* FourWayHandshakeFailures */
+	uint32	wepundec;	/* dot11WEPUndecryptableCount */
+	uint32	wepicverr;	/* dot11WEPICVErrorCount */
+	uint32	decsuccess;	/* DecryptSuccessCount */
+	uint32	tkipicverr;	/* TKIPICVErrorCount */
+	uint32	wepexcluded;	/* dot11WEPExcludedCount */
+
+	uint32	txchanrej;	/* Tx frames suppressed due to channel rejection */
+	uint32	psmwds;		/* Count PSM watchdogs */
+	uint32	phywatchdog;	/* Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32	prq_entries_handled;	/* PRQ entries read in */
+	uint32	prq_undirected_entries;	/*    which were bcast bss & ssid */
+	uint32	prq_bad_entries;	/*    which could not be translated to info */
+	uint32	atim_suppress_count;	/* TX suppressions on ATIM fifo */
+	uint32	bcn_template_not_ready;	/* Template marked in use on send bcn ... */
+	uint32	bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+	uint32	late_tbtt_dpc;	/* TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/* packets rx at 1Mbps */
+	uint32  rx2mbps;	/* packets rx at 2Mbps */
+	uint32  rx5mbps5;	/* packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/* packets rx at 6Mbps */
+	uint32  rx9mbps;	/* packets rx at 9Mbps */
+	uint32  rx11mbps;	/* packets rx at 11Mbps */
+	uint32  rx12mbps;	/* packets rx at 12Mbps */
+	uint32  rx18mbps;	/* packets rx at 18Mbps */
+	uint32  rx24mbps;	/* packets rx at 24Mbps */
+	uint32  rx36mbps;	/* packets rx at 36Mbps */
+	uint32  rx48mbps;	/* packets rx at 48Mbps */
+	uint32  rx54mbps;	/* packets rx at 54Mbps */
+	uint32  rx108mbps;	/* packets rx at 108mbps */
+	uint32  rx162mbps;	/* packets rx at 162mbps */
+	uint32  rx216mbps;	/* packets rx at 216 mbps */
+	uint32  rx270mbps;	/* packets rx at 270 mbps */
+	uint32  rx324mbps;	/* packets rx at 324 mbps */
+	uint32  rx378mbps;	/* packets rx at 378 mbps */
+	uint32  rx432mbps;	/* packets rx at 432 mbps */
+	uint32  rx486mbps;	/* packets rx at 486 mbps */
+	uint32  rx540mbps;	/* packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32	pktengrxducast; /* unicast frames rxed by the pkteng code */
+	uint32	pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+	uint32	rfdisable;	/* count of radio disables */
+	uint32	bphy_rxcrsglitch;	/* PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32	txexptime;	/* Tx frames suppressed due to timer expiration */
+
+	uint32	txmpdu_sgi;	/* count for sgi transmit */
+	uint32	rxmpdu_sgi;	/* count for sgi received */
+	uint32	txmpdu_stbc;	/* count for stbc transmit */
+	uint32	rxmpdu_stbc;	/* count for stbc received */
+
+	uint32	rxundec_mcst;	/* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill_mcst;	/* TKIPLocalMICFailures */
+	uint32	tkipcntrmsr_mcst;	/* TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay_mcst;	/* TKIPReplays */
+	uint32	ccmpfmterr_mcst;	/* CCMPFormatErrors */
+	uint32	ccmpreplay_mcst;	/* CCMPReplays */
+	uint32	ccmpundec_mcst;	/* CCMPDecryptErrors */
+	uint32	fourwayfail_mcst;	/* FourWayHandshakeFailures */
+	uint32	wepundec_mcst;	/* dot11WEPUndecryptableCount */
+	uint32	wepicverr_mcst;	/* dot11WEPICVErrorCount */
+	uint32	decsuccess_mcst;	/* DecryptSuccessCount */
+	uint32	tkipicverr_mcst;	/* TKIPICVErrorCount */
+	uint32	wepexcluded_mcst;	/* dot11WEPExcludedCount */
+
+	uint32	dma_hang;	/* count for dma hang */
+	uint32	reinit;		/* count for reinit */
+
+	uint32  pstatxucast;	/* count of ucast frames xmitted on all psta assoc */
+	uint32  pstatxnoassoc;	/* count of txnoassoc frames xmitted on all psta assoc */
+	uint32  pstarxucast;	/* count of ucast frames received on all psta assoc */
+	uint32  pstarxbcmc;	/* count of bcmc frames received on all psta */
+	uint32  pstatxbcmc;	/* count of bcmc frames transmitted on all psta */
+
+	uint32  cso_passthrough; /* hw cso required but passthrough */
+	uint32	cso_normal;	/* hw cso hdr for normal process */
+	uint32	chained;	/* number of frames chained */
+	uint32	chainedsz1;	/* number of chain size 1 frames */
+	uint32	unchained;	/* number of frames not chained */
+	uint32	maxchainsz;	/* max chain size so far */
+	uint32	currchainsz;	/* current chain size */
+	uint32	rxdrop20s;	/* drop secondary cnt */
+	uint32	pciereset;	/* Secondary Bus Reset issued by driver */
+	uint32	cfgrestore;	/* configspace restore by driver */
+	uint32	reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */
+} wl_cnt_t;
+
+typedef struct {
+	uint16  version;    /* see definition of WL_CNT_T_VERSION */
+	uint16  length;     /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32  txframe;    /* tx data frames */
+	uint32  txbyte;     /* tx data bytes */
+	uint32  txretrans;  /* tx mac retransmits */
+	uint32  txerror;    /* tx data errors (derived: sum of others) */
+	uint32  txctl;      /* tx management frames */
+	uint32  txprshort;  /* tx short preamble frames */
+	uint32  txserr;     /* tx status errors */
+	uint32  txnobuf;    /* tx out of buffers errors */
+	uint32  txnoassoc;  /* tx discard because we're not associated */
+	uint32  txrunt;     /* tx runt frames */
+	uint32  txchit;     /* tx header cache hit (fastpath) */
+	uint32  txcmiss;    /* tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32  txuflo;     /* tx fifo underflows */
+	uint32  txphyerr;   /* tx phy errors (indicated in tx status) */
+	uint32  txphycrs;
+
+	/* receive stat counters */
+	uint32  rxframe;    /* rx data frames */
+	uint32  rxbyte;     /* rx data bytes */
+	uint32  rxerror;    /* rx data errors (derived: sum of others) */
+	uint32  rxctl;      /* rx management frames */
+	uint32  rxnobuf;    /* rx out of buffers errors */
+	uint32  rxnondata;  /* rx non data frames in the data channel errors */
+	uint32  rxbadds;    /* rx bad DS errors */
+	uint32  rxbadcm;    /* rx bad control or management frames */
+	uint32  rxfragerr;  /* rx fragmentation errors */
+	uint32  rxrunt;     /* rx runt frames */
+	uint32  rxgiant;    /* rx giant frames */
+	uint32  rxnoscb;    /* rx no scb error */
+	uint32  rxbadproto; /* rx invalid frames */
+	uint32  rxbadsrcmac;    /* rx frames with Invalid Src Mac */
+	uint32  rxbadda;    /* rx frames tossed for invalid da */
+	uint32  rxfilter;   /* rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32  rxoflo;     /* rx fifo overflow errors */
+	uint32  rxuflo[NFIFO];  /* rx dma descriptor underflow errors */
+
+	uint32  d11cnt_txrts_off;   /* d11cnt txrts value when reset d11cnt */
+	uint32  d11cnt_rxcrc_off;   /* d11cnt rxcrc value when reset d11cnt */
+	uint32  d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32  dmade;      /* tx/rx dma descriptor errors */
+	uint32  dmada;      /* tx/rx dma data errors */
+	uint32  dmape;      /* tx/rx dma descriptor protocol errors */
+	uint32  reset;      /* reset count */
+	uint32  tbtt;       /* cnts the TBTT int's */
+	uint32  txdmawar;
+	uint32  pkt_callback_reg_fail;  /* callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32  txallfrm;   /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+			     * Control Management (includes retransmissions)
+			     */
+	uint32  txrtsfrm;   /* number of RTS sent out by the MAC */
+	uint32  txctsfrm;   /* number of CTS sent out by the MAC */
+	uint32  txackfrm;   /* number of ACK frames sent out */
+	uint32  txdnlfrm;   /* Not used */
+	uint32  txbcnfrm;   /* beacons transmitted */
+	uint32  txfunfl[6]; /* per-fifo tx underflows */
+	uint32	rxtoolate;	/* receive too late */
+	uint32  txfbw;	    /* transmit at fallback bw (dynamic bw) */
+	uint32  txtplunfl;  /* Template underflows (mac was too slow to transmit ACK/CTS
+			     * or BCN)
+			     */
+	uint32  txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+			     * driver enqueued frames
+			     */
+	uint32  rxfrmtoolong;   /* Received frame longer than legal limit (2346 bytes) */
+	uint32  rxfrmtooshrt;   /* Received frame did not contain enough bytes for its frame type */
+	uint32  rxinvmachdr;    /* Either the protocol version != 0 or frame type not
+				 * data/control/management
+			   */
+	uint32  rxbadfcs;   /* number of frames for which the CRC check failed in the MAC */
+	uint32  rxbadplcp;  /* parity check of the PLCP header failed */
+	uint32  rxcrsglitch;    /* PHY was able to correlate the preamble but not the header */
+	uint32  rxstrt;     /* Number of received frames with a good PLCP
+			     * (i.e. passing parity check)
+			     */
+	uint32  rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32  rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32  rxcfrmucast;    /* number of received CNTRL frames with good FCS and matching RA */
+	uint32  rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+	uint32  rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+	uint32  rxackucast; /* number of ucast ACKS received (good FCS) */
+	uint32  rxdfrmocast;    /* number of received DATA frames (good FCS and not matching RA) */
+	uint32  rxmfrmocast;    /* number of received MGMT frames (good FCS and not matching RA) */
+	uint32  rxcfrmocast;    /* number of received CNTRL frame (good FCS and not matching RA) */
+	uint32  rxrtsocast; /* number of received RTS not addressed to the MAC */
+	uint32  rxctsocast; /* number of received CTS not addressed to the MAC */
+	uint32  rxdfrmmcast;    /* number of RX Data multicast frames received by the MAC */
+	uint32  rxmfrmmcast;    /* number of RX Management multicast frames received by the MAC */
+	uint32  rxcfrmmcast;    /* number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32  rxbeaconmbss;   /* beacons received from member of BSS */
+	uint32  rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32  rxbeaconobss;   /* beacons received from other BSS */
+	uint32  rxrsptmout; /* Number of response timeouts for transmitted frames
+			     * expecting a response
+			     */
+	uint32  bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32  rxf0ovfl;   /* Number of receive fifo 0 overflows */
+	uint32  rxf1ovfl;   /* Number of receive fifo 1 overflows (obsolete) */
+	uint32  rxf2ovfl;   /* Number of receive fifo 2 overflows (obsolete) */
+	uint32  txsfovfl;   /* Number of transmit status fifo overflows (obsolete) */
+	uint32  pmqovfl;    /* Number of PMQ overflows */
+	uint32  rxcgprqfrm; /* Number of received Probe requests that made it into
+			     * the PRQ fifo
+			     */
+	uint32  rxcgprsqovfl;   /* Rx Probe Request Que overflow in the AP */
+	uint32  txcgprsfail;    /* Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32  txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+	uint32  prs_timeout;    /* Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32  rxnack;
+	uint32  frmscons;
+	uint32  txnack;		/* obsolete */
+	uint32	rxback;		/* blockack rxcnt */
+	uint32	txback;		/* blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32  txfrag;     /* dot11TransmittedFragmentCount */
+	uint32  txmulti;    /* dot11MulticastTransmittedFrameCount */
+	uint32  txfail;     /* dot11FailedCount */
+	uint32  txretry;    /* dot11RetryCount */
+	uint32  txretrie;   /* dot11MultipleRetryCount */
+	uint32  rxdup;      /* dot11FrameduplicateCount */
+	uint32  txrts;      /* dot11RTSSuccessCount */
+	uint32  txnocts;    /* dot11RTSFailureCount */
+	uint32  txnoack;    /* dot11ACKFailureCount */
+	uint32  rxfrag;     /* dot11ReceivedFragmentCount */
+	uint32  rxmulti;    /* dot11MulticastReceivedFrameCount */
+	uint32  rxcrc;      /* dot11FCSErrorCount */
+	uint32  txfrmsnt;   /* dot11TransmittedFrameCount (bogus MIB?) */
+	uint32  rxundec;    /* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill;   /* TKIPLocalMICFailures */
+	uint32  tkipcntrmsr;    /* TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay; /* TKIPReplays */
+	uint32  ccmpfmterr; /* CCMPFormatErrors */
+	uint32  ccmpreplay; /* CCMPReplays */
+	uint32  ccmpundec;  /* CCMPDecryptErrors */
+	uint32  fourwayfail;    /* FourWayHandshakeFailures */
+	uint32  wepundec;   /* dot11WEPUndecryptableCount */
+	uint32  wepicverr;  /* dot11WEPICVErrorCount */
+	uint32  decsuccess; /* DecryptSuccessCount */
+	uint32  tkipicverr; /* TKIPICVErrorCount */
+	uint32  wepexcluded;    /* dot11WEPExcludedCount */
+
+	uint32  rxundec_mcst;   /* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill_mcst;  /* TKIPLocalMICFailures */
+	uint32  tkipcntrmsr_mcst;   /* TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay_mcst;    /* TKIPReplays */
+	uint32  ccmpfmterr_mcst;    /* CCMPFormatErrors */
+	uint32  ccmpreplay_mcst;    /* CCMPReplays */
+	uint32  ccmpundec_mcst; /* CCMPDecryptErrors */
+	uint32  fourwayfail_mcst;   /* FourWayHandshakeFailures */
+	uint32  wepundec_mcst;  /* dot11WEPUndecryptableCount */
+	uint32  wepicverr_mcst; /* dot11WEPICVErrorCount */
+	uint32  decsuccess_mcst;    /* DecryptSuccessCount */
+	uint32  tkipicverr_mcst;    /* TKIPICVErrorCount */
+	uint32  wepexcluded_mcst;   /* dot11WEPExcludedCount */
+
+	uint32  txchanrej;  /* Tx frames suppressed due to channel rejection */
+	uint32  txexptime;  /* Tx frames suppressed due to timer expiration */
+	uint32  psmwds;     /* Count PSM watchdogs */
+	uint32  phywatchdog;    /* Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32  prq_entries_handled;    /* PRQ entries read in */
+	uint32  prq_undirected_entries; /*    which were bcast bss & ssid */
+	uint32  prq_bad_entries;    /*    which could not be translated to info */
+	uint32  atim_suppress_count;    /* TX suppressions on ATIM fifo */
+	uint32  bcn_template_not_ready; /* Template marked in use on send bcn ... */
+	uint32  bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+	uint32  late_tbtt_dpc;  /* TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;    /* packets rx at 1Mbps */
+	uint32  rx2mbps;    /* packets rx at 2Mbps */
+	uint32  rx5mbps5;   /* packets rx at 5.5Mbps */
+	uint32  rx6mbps;    /* packets rx at 6Mbps */
+	uint32  rx9mbps;    /* packets rx at 9Mbps */
+	uint32  rx11mbps;   /* packets rx at 11Mbps */
+	uint32  rx12mbps;   /* packets rx at 12Mbps */
+	uint32  rx18mbps;   /* packets rx at 18Mbps */
+	uint32  rx24mbps;   /* packets rx at 24Mbps */
+	uint32  rx36mbps;   /* packets rx at 36Mbps */
+	uint32  rx48mbps;   /* packets rx at 48Mbps */
+	uint32  rx54mbps;   /* packets rx at 54Mbps */
+	uint32  rx108mbps;  /* packets rx at 108mbps */
+	uint32  rx162mbps;  /* packets rx at 162mbps */
+	uint32  rx216mbps;  /* packets rx at 216 mbps */
+	uint32  rx270mbps;  /* packets rx at 270 mbps */
+	uint32  rx324mbps;  /* packets rx at 324 mbps */
+	uint32  rx378mbps;  /* packets rx at 378 mbps */
+	uint32  rx432mbps;  /* packets rx at 432 mbps */
+	uint32  rx486mbps;  /* packets rx at 486 mbps */
+	uint32  rx540mbps;  /* packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32  pktengrxducast; /* unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+	uint32  rfdisable;  /* count of radio disables */
+	uint32  bphy_rxcrsglitch;   /* PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32  txmpdu_sgi; /* count for sgi transmit */
+	uint32  rxmpdu_sgi; /* count for sgi received */
+	uint32  txmpdu_stbc;    /* count for stbc transmit */
+	uint32  rxmpdu_stbc;    /* count for stbc received */
+
+	uint32	rxdrop20s;	/* drop secondary cnt */
+
+} wl_cnt_ver_six_t;
+
+#define	WL_DELTA_STATS_T_VERSION	2	/* current version of wl_delta_stats_t struct */
+
+typedef struct {
+	uint16 version;     /* see definition of WL_DELTA_STATS_T_VERSION */
+	uint16 length;      /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txframe;     /* tx data frames */
+	uint32 txbyte;      /* tx data bytes */
+	uint32 txretrans;   /* tx mac retransmits */
+	uint32 txfail;      /* tx failures */
+
+	/* receive stat counters */
+	uint32 rxframe;     /* rx data frames */
+	uint32 rxbyte;      /* rx data bytes */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/* packets rx at 1Mbps */
+	uint32  rx2mbps;	/* packets rx at 2Mbps */
+	uint32  rx5mbps5;	/* packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/* packets rx at 6Mbps */
+	uint32  rx9mbps;	/* packets rx at 9Mbps */
+	uint32  rx11mbps;	/* packets rx at 11Mbps */
+	uint32  rx12mbps;	/* packets rx at 12Mbps */
+	uint32  rx18mbps;	/* packets rx at 18Mbps */
+	uint32  rx24mbps;	/* packets rx at 24Mbps */
+	uint32  rx36mbps;	/* packets rx at 36Mbps */
+	uint32  rx48mbps;	/* packets rx at 48Mbps */
+	uint32  rx54mbps;	/* packets rx at 54Mbps */
+	uint32  rx108mbps;	/* packets rx at 108mbps */
+	uint32  rx162mbps;	/* packets rx at 162mbps */
+	uint32  rx216mbps;	/* packets rx at 216 mbps */
+	uint32  rx270mbps;	/* packets rx at 270 mbps */
+	uint32  rx324mbps;	/* packets rx at 324 mbps */
+	uint32  rx378mbps;	/* packets rx at 378 mbps */
+	uint32  rx432mbps;	/* packets rx at 432 mbps */
+	uint32  rx486mbps;	/* packets rx at 486 mbps */
+	uint32  rx540mbps;	/* packets rx at 540 mbps */
+
+	/* phy stats */
+	uint32 rxbadplcp;
+	uint32 rxcrsglitch;
+	uint32 bphy_rxcrsglitch;
+	uint32 bphy_badplcp;
+
+} wl_delta_stats_t;
+
+typedef struct {
+	uint32 packets;
+	uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+	uint16	version;	/* see definition of WL_WME_CNT_VERSION */
+	uint16	length;		/* length of entire structure */
+
+	wl_traffic_stats_t tx[AC_COUNT];	/* Packets transmitted */
+	wl_traffic_stats_t tx_failed[AC_COUNT];	/* Packets dropped or failed to transmit */
+	wl_traffic_stats_t rx[AC_COUNT];	/* Packets received */
+	wl_traffic_stats_t rx_failed[AC_COUNT];	/* Packets failed to receive */
+
+	wl_traffic_stats_t forward[AC_COUNT];	/* Packets forwarded by AP */
+
+	wl_traffic_stats_t tx_expired[AC_COUNT];	/* packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+	uint32 low;
+	uint32 high;
+};
+
+typedef struct wl_mkeep_alive_pkt {
+	uint16	version; /* Version for mkeep_alive */
+	uint16	length; /* length of fixed parameters in the structure */
+	uint32	period_msec;
+	uint16	len_bytes;
+	uint8	keep_alive_id; /* 0 - 3 for N = 4 */
+	uint8	data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION		1
+#define WL_MKEEP_ALIVE_FIXED_LEN	OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION	500
+
+/* TCP Keep-Alive conn struct */
+typedef struct wl_mtcpkeep_alive_conn_pkt {
+	struct ether_addr saddr;		/* src mac address */
+	struct ether_addr daddr;		/* dst mac address */
+	struct ipv4_addr sipaddr;		/* source IP addr */
+	struct ipv4_addr dipaddr;		/* dest IP addr */
+	uint16 sport;				/* src port */
+	uint16 dport;				/* dest port */
+	uint32 seq;				/* seq number */
+	uint32 ack;				/* ACK number */
+	uint16 tcpwin;				/* TCP window */
+} wl_mtcpkeep_alive_conn_pkt_t;
+
+/* TCP Keep-Alive interval struct */
+typedef struct wl_mtcpkeep_alive_timers_pkt {
+	uint16 interval;		/* interval timer */
+	uint16 retry_interval;		/* retry_interval timer */
+	uint16 retry_count;		/* retry_count */
+} wl_mtcpkeep_alive_timers_pkt_t;
+
+typedef struct wake_info {
+	uint32 wake_reason;
+	uint32 wake_info_len;		/* size of packet */
+	uchar  packet[1];
+} wake_info_t;
+
+typedef struct wake_pkt {
+	uint32 wake_pkt_len;		/* size of packet */
+	uchar  packet[1];
+} wake_pkt_t;
+
+
+#define WL_MTCPKEEP_ALIVE_VERSION		1
+
+#ifdef WLBA
+
+#define WLC_BA_CNT_VERSION  1   /* current version of wlc_ba_cnt_t */
+
+/* block ack related stats */
+typedef struct wlc_ba_cnt {
+	uint16  version;    /* WLC_BA_CNT_VERSION */
+	uint16  length;     /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txpdu;       /* pdus sent */
+	uint32 txsdu;       /* sdus sent */
+	uint32 txfc;        /* tx side flow controlled packets */
+	uint32 txfci;       /* tx side flow control initiated */
+	uint32 txretrans;   /* retransmitted pdus */
+	uint32 txbatimer;   /* ba resend due to timer */
+	uint32 txdrop;      /* dropped packets */
+	uint32 txaddbareq;  /* addba req sent */
+	uint32 txaddbaresp; /* addba resp sent */
+	uint32 txdelba;     /* delba sent */
+	uint32 txba;        /* ba sent */
+	uint32 txbar;       /* bar sent */
+	uint32 txpad[4];    /* future */
+
+	/* receive side counters */
+	uint32 rxpdu;       /* pdus recd */
+	uint32 rxqed;       /* pdus buffered before sending up */
+	uint32 rxdup;       /* duplicate pdus */
+	uint32 rxnobuf;     /* pdus discarded due to no buf */
+	uint32 rxaddbareq;  /* addba req recd */
+	uint32 rxaddbaresp; /* addba resp recd */
+	uint32 rxdelba;     /* delba recd */
+	uint32 rxba;        /* ba recd */
+	uint32 rxbar;       /* bar recd */
+	uint32 rxinvba;     /* invalid ba recd */
+	uint32 rxbaholes;   /* ba recd with holes */
+	uint32 rxunexp;     /* unexpected packets */
+	uint32 rxpad[4];    /* future */
+} wlc_ba_cnt_t;
+#endif /* WLBA */
+
+/* structure for per-tid ampdu control */
+struct ampdu_tid_control {
+	uint8 tid;			/* tid */
+	uint8 enable;			/* enable/disable */
+};
+
+/* struct for ampdu tx/rx aggregation control */
+struct ampdu_aggr {
+	int8 aggr_override;	/* aggr overrided by dongle. Not to be set by host. */
+	uint16 conf_TID_bmap;	/* bitmap of TIDs to configure */
+	uint16 enab_TID_bmap;	/* enable/disable per TID */
+};
+
+/* structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+	struct ether_addr ea;		/* Station address */
+	uint8 tid;			/* tid */
+	uint8 initiator;	/* 0 is recipient, 1 is originator */
+};
+/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+	uint8 tid;	/* tid */
+	uint8 retry;	/* retry value */
+};
+
+#define BDD_FNAME_LEN       32  /* Max length of friendly name */
+typedef struct bdd_fname {
+	uint8 len;          /* length of friendly name */
+	uchar name[BDD_FNAME_LEN];  /* friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/* For ioctls that take a list of TSPEC */
+struct tslist {
+	int count;			/* number of tspecs */
+	struct tsinfo_arg tsinfo[1];	/* variable length array of tsinfo */
+};
+
+#ifdef WLTDLS
+/* structure for tdls iovars */
+typedef struct tdls_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;			/* mode: depends on iovar */
+	chanspec_t chanspec;
+	uint32 pad;			/* future */
+} tdls_iovar_t;
+
+#define TDLS_WFD_IE_SIZE		512
+/* structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;
+	uint16 length;
+	uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+#endif /* WLTDLS */
+
+/* structure for addts/delts arguments */
+typedef struct tspec_arg {
+	uint16 version;			/* see definition of TSPEC_ARG_VERSION */
+	uint16 length;			/* length of entire structure */
+	uint flag;			/* bit field */
+	/* TSPEC Arguments */
+	struct tsinfo_arg tsinfo;	/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint min_srv_interval;		/* Minimum Service Interval (us) */
+	uint max_srv_interval;		/* Maximum Service Interval (us) */
+	uint inactivity_interval;	/* Inactivity Interval (us) */
+	uint suspension_interval;	/* Suspension Interval (us) */
+	uint srv_start_time;		/* Service Start Time (us) */
+	uint min_data_rate;		/* Minimum Data Rate (bps) */
+	uint mean_data_rate;		/* Mean Data Rate (bps) */
+	uint peak_data_rate;		/* Peak Data Rate (bps) */
+	uint max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint delay_bound;		/* Delay Bound (us) */
+	uint min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;		/* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+	uint16 medium_time;		/* Medium Time (32 us/s periods) */
+	uint8 dialog_token;		/* dialog token */
+} tspec_arg_t;
+
+/* tspec arg for desired station */
+typedef	struct tspec_per_sta_arg {
+	struct ether_addr ea;
+	struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/* structure for max bandwidth for each access category */
+typedef	struct wme_max_bandwidth {
+	uint32	ac[AC_COUNT];	/* max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define	TSPEC_ARG_VERSION		2	/* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH		55	/* argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN	42	/* default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR	0x3000	/* default surplus bw */
+
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE  80
+#define WLC_WOWL_MAX_KEEPALIVE	2
+
+/* Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+	uint32 ac;	        /* access class */
+	uint32 lifetime;    /* Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/* Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+	uint8 mode;		/* value 0 or 1 */
+	uint8 count;		/* count # of beacons before switching */
+	chanspec_t chspec;	/* chanspec */
+	uint8 reg;		/* regulatory class */
+	uint8 frame_type;		/* csa frame type, unicast or broadcast */
+} wl_chan_switch_t;
+
+enum {
+	PFN_LIST_ORDER,
+	PFN_RSSI
+};
+
+enum {
+	DISABLE,
+	ENABLE
+};
+
+enum {
+	OFF_ADAPT,
+	SMART_ADAPT,
+	STRICT_ADAPT,
+	SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+
+#define SORT_CRITERIA_MASK	0x0001
+#define AUTO_NET_SWITCH_MASK	0x0002
+#define ENABLE_BKGRD_SCAN_MASK	0x0004
+#define IMMEDIATE_SCAN_MASK	0x0008
+#define AUTO_CONNECT_MASK	0x0010
+
+#define ENABLE_BD_SCAN_MASK	0x0020
+#define ENABLE_ADAPTSCAN_MASK	0x00c0
+#define IMMEDIATE_EVENT_MASK	0x0100
+#define SUPPRESS_SSID_MASK	0x0200
+#define ENABLE_NET_OFFLOAD_MASK	0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK	0x0800
+
+#define PFN_VERSION			2
+#define PFN_SCANRESULT_VERSION		1
+#define MAX_PFN_LIST_COUNT		16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP			2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+#define PFN_SWC_RSSI_WINDOW_MAX   8
+#define PFN_SWC_MAX_NUM_APS       16
+#define PFN_HOTLIST_MAX_NUM_APS   64
+
+/* PFN network info structure */
+typedef struct wl_pfn_subnet_info {
+	struct ether_addr BSSID;
+	uint8	channel; /* channel number only */
+	uint8	SSID_len;
+	uint8	SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+	wl_pfn_subnet_info_t pfnsubnet;
+	int16	RSSI; /* receive signal strength (in dBm) */
+	uint16	timestamp; /* age in seconds */
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_lnet_info {
+	wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */
+	uint16	flags; /* partial scan, etc */
+	int16	RSSI; /* receive signal strength (in dBm) */
+	uint32	timestamp; /* age in miliseconds */
+	uint16	rtt0; /* estimated distance to this AP in centimeters */
+	uint16	rtt1; /* standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_t;
+
+typedef struct wl_pfn_lscanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_lnet_info_t netinfo[1];
+} wl_pfn_lscanresults_t;
+
+/* this is used to report on 1-* pfn scan results */
+typedef struct wl_pfn_scanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+typedef struct wl_pfn_significant_net {
+	uint16 flags;
+	uint16 channel;
+	struct ether_addr BSSID;
+	int8 rssi[PFN_SWC_RSSI_WINDOW_MAX];
+} wl_pfn_significant_net_t;
+
+typedef struct wl_pfn_swc_results {
+	uint32 version;
+	uint32 pkt_count;
+	uint32 total_count;
+	wl_pfn_significant_net_t list[1];
+} wl_pfn_swc_results_t;
+
+/* used to report exactly one scan result */
+/* plus reports detailed scan info in bss_info */
+typedef struct wl_pfn_scanresult {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_t netinfo;
+	wl_bss_info_t bss_info;
+} wl_pfn_scanresult_t;
+
+/* PFN data structure */
+typedef struct wl_pfn_param {
+	int32 version;			/* PNO parameters version */
+	int32 scan_freq;		/* Scan frequency */
+	int32 lost_network_timeout;	/* Timeout in sec. to declare
+								* discovered network as lost
+								*/
+	int16 flags;			/* Bit field to control features
+							* of PFN such as sort criteria auto
+							* enable switch and background scan
+							*/
+	int16 rssi_margin;		/* Margin to avoid jitter for choosing a
+							* PFN based on RSSI sort criteria
+							*/
+	uint8 bestn; /* number of best networks in each scan */
+	uint8 mscan; /* number of scans recorded */
+	uint8 repeat; /* Minimum number of scan intervals
+				     *before scan frequency changes in adaptive scan
+				     */
+	uint8 exp; /* Exponent of 2 for maximum scan interval */
+	int32 slow_freq; /* slow scan period */
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+	struct ether_addr  macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16             flags;
+} wl_pfn_bssid_t;
+
+typedef struct wl_pfn_significant_bssid {
+	struct ether_addr	macaddr;
+	int8    rssi_low_threshold;
+	int8    rssi_high_threshold;
+} wl_pfn_significant_bssid_t;
+
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+typedef struct wl_pfn_cfg {
+	uint32	reporttype;
+	int32	channel_num;
+	uint16	channel_list[WL_NUMCHANNELS];
+	uint32	flags;
+} wl_pfn_cfg_t;
+
+#define CH_BUCKET_REPORT_REGULAR            0
+#define CH_BUCKET_REPORT_FULL_RESULT        2
+#define CH_BUCKET_GSCAN                     4
+
+typedef struct wl_pfn_gscan_channel_bucket {
+	uint16 bucket_end_index;
+	uint8 bucket_freq_multiple;
+	uint8 flag;
+} wl_pfn_gscan_channel_bucket_t;
+
+#define GSCAN_SEND_ALL_RESULTS_MASK    (1 << 0)
+#define GSCAN_CFG_FLAGS_ONLY_MASK      (1 << 7)
+#define WL_GSCAN_CFG_VERSION            1
+typedef struct wl_pfn_gscan_cfg {
+	uint16 version;
+	/* BIT0 1 = send probes/beacons to HOST
+	 * BIT1 Reserved
+	 * BIT2 Reserved
+	 * Add any future flags here
+	 * BIT7 1 = no other useful cfg sent
+	 */
+	uint8 flags;
+	/* Buffer filled threshold in % to generate an event */
+	uint8   buffer_threshold;
+	/* No. of BSSIDs with "change" to generate an evt
+	 * change - crosses rssi threshold/lost
+	 */
+	uint8   swc_nbssid_threshold;
+	/* Max=8 (for now) Size of rssi cache buffer */
+	uint8  swc_rssi_window_size;
+	uint8  count_of_channel_buckets;
+	uint8  retry_threshold;
+	uint16  lost_ap_window;
+	wl_pfn_gscan_channel_bucket_t channel_bucket[1];
+} wl_pfn_gscan_cfg_t;
+
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED	0xfffffffe	/* Remaining reserved for future use */
+
+typedef struct wl_pfn {
+	wlc_ssid_t		ssid;			/* ssid name and its length */
+	int32			flags;			/* bit2: hidden */
+	int32			infra;			/* BSS Vs IBSS */
+	int32			auth;			/* Open Vs Closed */
+	int32			wpa_auth;		/* WPA type */
+	int32			wsec;			/* wsec value */
+} wl_pfn_t;
+
+typedef struct wl_pfn_list {
+	uint32		version;
+	uint32		enabled;
+	uint32		count;
+	wl_pfn_t	pfn[1];
+} wl_pfn_list_t;
+
+#define WL_PFN_MAC_OUI_ONLY_MASK      1
+#define WL_PFN_SET_MAC_UNASSOC_MASK   2
+/* To configure pfn_macaddr */
+typedef struct wl_pfn_macaddr_cfg {
+	uint8 version;
+	uint8 flags;
+	struct ether_addr macaddr;
+} wl_pfn_macaddr_cfg_t;
+#define WL_PFN_MACADDR_CFG_VER 1
+
+typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t {
+	wlc_ssid_t ssid;
+	uint32	cipher_type;
+	uint32	auth_type;
+	uint8	channels[4];
+} BWL_POST_PACKED_STRUCT pfn_olmsg_params;
+
+#define WL_PFN_HIDDEN_BIT		2
+#define WL_PFN_HIDDEN_MASK		0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			3
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			90
+#endif
+
+/* Service discovery */
+typedef struct {
+	uint8	transaction_id;	/* Transaction id */
+	uint8	protocol;	/* Service protocol type */
+	uint16	query_len;	/* Length of query */
+	uint16	response_len;	/* Length of response */
+	uint8	qrbuf[1];
+} wl_p2po_qr_t;
+
+typedef struct {
+	uint16			period;			/* extended listen period */
+	uint16			interval;		/* extended listen interval */
+} wl_p2po_listen_t;
+
+/* GAS state machine tunable parameters.  Structure field values of 0 means use the default. */
+typedef struct wl_gas_config {
+	uint16 max_retransmit;		/* Max # of firmware/driver retransmits on no Ack
+					 * from peer (on top of the ucode retries).
+					 */
+	uint16 response_timeout;	/* Max time to wait for a GAS-level response
+					 * after sending a packet.
+					 */
+	uint16 max_comeback_delay;	/* Max GAS response comeback delay.
+					 * Exceeding this fails the GAS exchange.
+					 */
+	uint16 max_retries;		/* Max # of GAS state machine retries on failure
+					 * of a GAS frame exchange.
+					 */
+} wl_gas_config_t;
+
+/* P2P Find Offload parameters */
+typedef BWL_PRE_PACKED_STRUCT struct wl_p2po_find_config {
+	uint16 version;			/* Version of this struct */
+	uint16 length;			/* sizeof(wl_p2po_find_config_t) */
+	int32 search_home_time;		/* P2P search state home time when concurrent
+					 * connection exists.  -1 for default.
+					 */
+	uint8 num_social_channels;
+			/* Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX.
+			 * 0 means use default social channels.
+			 */
+	uint8 flags;
+	uint16 social_channels[1];	/* Variable length array of social channels */
+} BWL_POST_PACKED_STRUCT wl_p2po_find_config_t;
+#define WL_P2PO_FIND_CONFIG_VERSION 2	/* value for version field */
+
+/* wl_p2po_find_config_t flags */
+#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01	/* Whether to scan for all APs in the p2po_find
+						 * periodic scans of all channels.
+						 * 0 means scan for only P2P devices.
+						 * 1 means scan for P2P devices plus non-P2P APs.
+						 */
+
+
+/* For adding a WFDS service to seek */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 seek_hdl;		/* unique id chosen by host */
+	uint8 addr[6];			/* Seek service from a specific device with this
+					 * MAC address, all 1's for any device.
+					 */
+	uint8 service_hash[P2P_WFDS_HASH_LEN];
+	uint8 service_name_len;
+	uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN];
+					/* Service name to seek, not null terminated */
+	uint8 service_info_req_len;
+	uint8 service_info_req[1];	/* Service info request, not null terminated.
+					 * Variable length specified by service_info_req_len.
+					 * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN.
+					 */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_add_t;
+
+/* For deleting a WFDS service to seek */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 seek_hdl;		/* delete service specified by id */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_del_t;
+
+
+/* For adding a WFDS service to advertise */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 advertise_hdl;		/* unique id chosen by host */
+	uint8 service_hash[P2P_WFDS_HASH_LEN];
+	uint32 advertisement_id;
+	uint16 service_config_method;
+	uint8 service_name_len;
+	uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+					/* Service name , not null terminated */
+	uint8 service_status;
+	uint16 service_info_len;
+	uint8 service_info[1];		/* Service info, not null terminated.
+					 * Variable length specified by service_info_len.
+					 * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN.
+					 */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t;
+
+/* For deleting a WFDS service to advertise */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 advertise_hdl;	/* delete service specified by hdl */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_del_t;
+
+/* P2P Offload discovery mode for the p2po_state iovar */
+typedef enum {
+	WL_P2PO_DISC_STOP,
+	WL_P2PO_DISC_LISTEN,
+	WL_P2PO_DISC_DISCOVERY
+} disc_mode_t;
+
+/* ANQP offload */
+
+#define ANQPO_MAX_QUERY_SIZE		256
+typedef struct {
+	uint16 max_retransmit;		/* ~0 use default, max retransmit on no ACK from peer */
+	uint16 response_timeout;	/* ~0 use default, msec to wait for resp after tx packet */
+	uint16 max_comeback_delay;	/* ~0 use default, max comeback delay in resp else fail */
+	uint16 max_retries;			/* ~0 use default, max retries on failure */
+	uint16 query_len;			/* length of ANQP query */
+	uint8 query_data[1];		/* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
+} wl_anqpo_set_t;
+
+typedef struct {
+	uint16 channel;				/* channel of the peer */
+	struct ether_addr addr;		/* addr of the peer */
+} wl_anqpo_peer_t;
+
+#define ANQPO_MAX_PEER_LIST			64
+typedef struct {
+	uint16 count;				/* number of peers in list */
+	wl_anqpo_peer_t peer[1];	/* max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_t;
+
+#define ANQPO_MAX_IGNORE_SSID		64
+typedef struct {
+	bool is_clear;				/* set to clear list (not used on GET) */
+	uint16 count;				/* number of SSID in list */
+	wlc_ssid_t ssid[1];			/* max ANQPO_MAX_IGNORE_SSID */
+} wl_anqpo_ignore_ssid_list_t;
+
+#define ANQPO_MAX_IGNORE_BSSID		64
+typedef struct {
+	bool is_clear;				/* set to clear list (not used on GET) */
+	uint16 count;				/* number of addr in list */
+	struct ether_addr bssid[1];	/* max ANQPO_MAX_IGNORE_BSSID */
+} wl_anqpo_ignore_bssid_list_t;
+
+
+struct toe_ol_stats_t {
+	/* Num of tx packets that don't need to be checksummed */
+	uint32 tx_summed;
+
+	/* Num of tx packets where checksum is filled by offload engine */
+	uint32 tx_iph_fill;
+	uint32 tx_tcp_fill;
+	uint32 tx_udp_fill;
+	uint32 tx_icmp_fill;
+
+	/*  Num of rx packets where toe finds out if checksum is good or bad */
+	uint32 rx_iph_good;
+	uint32 rx_iph_bad;
+	uint32 rx_tcp_good;
+	uint32 rx_tcp_bad;
+	uint32 rx_udp_good;
+	uint32 rx_udp_bad;
+	uint32 rx_icmp_good;
+	uint32 rx_icmp_bad;
+
+	/* Num of tx packets in which csum error is injected */
+	uint32 tx_tcp_errinj;
+	uint32 tx_udp_errinj;
+	uint32 tx_icmp_errinj;
+
+	/* Num of rx packets in which csum error is injected */
+	uint32 rx_tcp_errinj;
+	uint32 rx_udp_errinj;
+	uint32 rx_icmp_errinj;
+};
+
+/* Arp offload statistic counts */
+struct arp_ol_stats_t {
+	uint32  host_ip_entries;	/* Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;	/* Host IP table additions skipped due to overflow */
+
+	uint32  arp_table_entries;	/* ARP table entries */
+	uint32  arp_table_overflow;	/* ARP table additions skipped due to overflow */
+
+	uint32  host_request;		/* ARP requests from host */
+	uint32  host_reply;		/* ARP replies from host */
+	uint32  host_service;		/* ARP requests from host serviced by ARP Agent */
+
+	uint32  peer_request;		/* ARP requests received from network */
+	uint32  peer_request_drop;	/* ARP requests from network that were dropped */
+	uint32  peer_reply;		/* ARP replies received from network */
+	uint32  peer_reply_drop;	/* ARP replies from network that were dropped */
+	uint32  peer_service;		/* ARP request from host serviced by ARP Agent */
+};
+
+/* NS offload statistic counts */
+struct nd_ol_stats_t {
+	uint32  host_ip_entries;    /* Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;   /* Host IP table additions skipped due to overflow */
+	uint32  peer_request;       /* NS requests received from network */
+	uint32  peer_request_drop;  /* NS requests from network that were dropped */
+	uint32  peer_reply_drop;    /* NA replies from network that were dropped */
+	uint32  peer_service;       /* NS request from host serviced by firmware */
+};
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/* NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+	uint32	period_msec;	/* Retransmission period (0 to disable packet re-transmits) */
+	uint16	len_bytes;	/* Size of packet to transmit (0 to disable packet re-transmits) */
+	uint8	data[1];	/* Variable length packet to transmit.  Contents should include
+				 * entire ethernet packet (enet header, IP header, UDP header,
+				 * and UDP payload) in network byte order.
+				 */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN		OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */
+
+#define MAX_WAKE_PACKET_BYTES	    (DOT11_A3_HDR_LEN +			    \
+				     DOT11_QOS_LEN +			    \
+				     sizeof(struct dot11_llc_snap_header) + \
+				     ETHER_MAX_DATA)
+
+typedef struct pm_wake_packet {
+	uint32	status;		/* Is the wake reason a packet (if all the other field's valid) */
+	uint32	pattern_id;	/* Pattern ID that matched */
+	uint32	original_packet_size;
+	uint32	saved_packet_size;
+	uchar	packet[MAX_WAKE_PACKET_CACHE_BYTES];
+} pm_wake_packet_t;
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH=0,	/* Pattern matching filter */
+	WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */
+	WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /* A pattern list (match all to match filter) */
+	WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /* SECURE WOWL magic / net pattern match */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+/* String mapping for types that may be used by applications or debug */
+#define WL_PKT_FILTER_TYPE_NAMES \
+	{ "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH },       \
+	{ "MAGIC",   WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \
+	{ "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH }
+
+/* Secured WOWL packet was encrypted, need decrypted before check filter match */
+typedef struct wl_pkt_decrypter {
+		uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
+		void*  dec_ctx;
+} wl_pkt_decrypter_t;
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+	union {
+		uint32	offset;		/* Offset within received packet to start pattern matching.
+				 * Offset '0' is the first byte of the ethernet header.
+				 */
+	};
+	uint32	size_bytes;	/* Size of the pattern.  Bitmask must be the same size. */
+	uint8   mask_and_pattern[1]; /* Variable length mask and pattern data.  mask starts
+				      * at offset 0.  Pattern immediately follows mask.
+				      */
+} wl_pkt_filter_pattern_t;
+
+/* A pattern list is a numerically specified list of modified pattern structures. */
+typedef struct wl_pkt_filter_pattern_listel {
+	uint16 rel_offs;	/* Offset to begin match (relative to 'base' below) */
+	uint16 base_offs;	/* Base for offset (defined below) */
+	uint16 size_bytes;	/* Size of mask/pattern */
+	uint16 match_flags;	/* Addition flags controlling the match */
+	uint8  mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */
+} wl_pkt_filter_pattern_listel_t;
+
+typedef struct wl_pkt_filter_pattern_list {
+	uint8 list_cnt;		/* Number of elements in the list */
+	uint8 PAD1[1];		/* Reserved (possible version: reserved) */
+	uint16 totsize;		/* Total size of this pattern list (includes this struct) */
+	wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */
+} wl_pkt_filter_pattern_list_t;
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+	uint32	id;		/* Unique filter id, specified by app. */
+	uint32	type;		/* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+	uint32	negate_match;	/* Negate the result of filter matches */
+	union {			/* Filter definitions */
+		wl_pkt_filter_pattern_t pattern;	/* Pattern matching filter */
+		wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */
+	} u;
+} wl_pkt_filter_t;
+
+/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
+typedef struct wl_tcp_keep_set {
+	uint32	val1;
+	uint32	val2;
+} wl_tcp_keep_set_t;
+
+#define WL_PKT_FILTER_FIXED_LEN		  OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN	  OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns)
+#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN	\
+			OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data)
+
+/* IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+	uint32	id;		/* Unique filter id */
+	uint32	enable;		/* Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+	uint32	num;		/* Number of installed packet filters */
+	wl_pkt_filter_t	filter[1];	/* Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN	  OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+	uint32	num_pkts_matched;	/* # filter matches for specified filter id */
+	uint32	num_pkts_forwarded;	/* # packets fwded from dongle to host for all filters */
+	uint32	num_pkts_discarded;	/* # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/* IOVAR "pkt_filter_ports" parameter.  Configure TCP/UDP port filters. */
+typedef struct wl_pkt_filter_ports {
+	uint8 version;		/* Be proper */
+	uint8 reserved;		/* Be really proper */
+	uint16 count;		/* Number of ports following */
+	/* End of fixed data */
+	uint16 ports[1];	/* Placeholder for ports[<count>] */
+} wl_pkt_filter_ports_t;
+
+#define WL_PKT_FILTER_PORTS_FIXED_LEN	OFFSETOF(wl_pkt_filter_ports_t, ports)
+
+#define WL_PKT_FILTER_PORTS_VERSION	0
+#define WL_PKT_FILTER_PORTS_MAX		128
+
+#define RSN_KCK_LENGTH 16
+#define RSN_KEK_LENGTH 16
+#define RSN_REPLAY_LEN 8
+typedef struct _gtkrefresh {
+	uchar	KCK[RSN_KCK_LENGTH];
+	uchar	KEK[RSN_KEK_LENGTH];
+	uchar	ReplayCounter[RSN_REPLAY_LEN];
+} gtk_keyinfo_t, *pgtk_keyinfo_t;
+
+/* Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+	uint32 cmd;		/* common ioctl definition */
+	uint32 len;		/* length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES	4
+
+/* These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+	(((cmd) == WLC_GET_MAGIC)		|| \
+	 ((cmd) == WLC_GET_VERSION)		|| \
+	 ((cmd) == WLC_GET_AP)			|| \
+	 ((cmd) == WLC_GET_INSTANCE))
+
+typedef struct wl_pkteng {
+	uint32 flags;
+	uint32 delay;			/* Inter-packet delay */
+	uint32 nframes;			/* Number of frames */
+	uint32 length;			/* Packet length */
+	uint8  seqno;			/* Enable/disable sequence no. */
+	struct ether_addr dest;		/* Destination address */
+	struct ether_addr src;		/* Source address */
+} wl_pkteng_t;
+
+typedef struct wl_pkteng_stats {
+	uint32 lostfrmcnt;		/* RX PER test: no of frames lost (skip seqno) */
+	int32 rssi;			/* RSSI */
+	int32 snr;			/* signal to noise ratio */
+	uint16 rxpktcnt[NUM_80211_RATES+1];
+	uint8 rssi_qdb;			/* qdB portion of the computed rssi */
+} wl_pkteng_stats_t;
+
+typedef struct wl_txcal_params {
+	wl_pkteng_t pkteng;
+	uint8 gidx_start;
+	int8 gidx_step;
+	uint8 gidx_stop;
+} wl_txcal_params_t;
+
+
+typedef enum {
+	wowl_pattern_type_bitmap = 0,
+	wowl_pattern_type_arp,
+	wowl_pattern_type_na
+} wowl_pattern_type_t;
+
+typedef struct wl_wowl_pattern {
+	uint32		    masksize;		/* Size of the mask in #of bytes */
+	uint32		    offset;		/* Pattern byte offset in packet */
+	uint32		    patternoffset;	/* Offset of start of pattern in the structure */
+	uint32		    patternsize;	/* Size of the pattern itself in #of bytes */
+	uint32		    id;			/* id */
+	uint32		    reasonsize;		/* Size of the wakeup reason code */
+	wowl_pattern_type_t type;		/* Type of pattern */
+	/* Mask follows the structure above */
+	/* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct wl_wowl_pattern_list {
+	uint			count;
+	wl_wowl_pattern_t	pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct wl_wowl_wakeind {
+	uint8	pci_wakeind;	/* Whether PCI PMECSR PMEStatus bit was set */
+	uint32	ucode_wakeind;	/* What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+typedef struct {
+	uint32		pktlen;		    /* size of packet */
+	void		*sdu;
+} tcp_keepalive_wake_pkt_infop_t;
+
+/* per AC rate control related data structure */
+typedef struct wl_txrate_class {
+	uint8		init_rate;
+	uint8		min_rate;
+	uint8		max_rate;
+} wl_txrate_class_t;
+
+/* structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+	int16	passive_dwell;
+	int16	active_dwell;
+	int16	bss_widthscan_interval;
+	int16	passive_total;
+	int16	active_total;
+	int16	chanwidth_transition_delay;
+	int16	activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN	sizeof(wl_obss_scan_arg_t)
+
+/* RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+	uint32 rate_limit_msec;		/* # of events posted to application will be limited to
+					 * one per specified period (0 to disable rate limit).
+					 */
+	uint8 num_rssi_levels;		/* Number of entries in rssi_levels[] below */
+	int8 rssi_levels[MAX_RSSI_LEVELS];	/* Variable number of RSSI levels. An event
+						 * will be posted each time the RSSI of received
+						 * beacons/packets crosses a level.
+						 */
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+	uint8 info;
+	uint8 num;
+	uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+/* IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN  4
+
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_addrs;
+	char   addr_type[WL_IOV_MAC_PARAM_LEN];
+	struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t;
+
+/* This is extra info that follows wl_iov_mac_params_t */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 addr_info[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_extra_params_t;
+
+/* Combined structure */
+typedef struct {
+	wl_iov_mac_params_t params;
+	wl_iov_mac_extra_params_t extra_params;
+} wl_iov_mac_full_params_t;
+
+/* Parameter block for PKTQ_LOG statistics */
+#define PKTQ_LOG_COUNTERS_V4 \
+	/* packets requested to be stored */ \
+	uint32 requested; \
+	/* packets stored */ \
+	uint32 stored; \
+	/* packets saved, because a lowest priority queue has given away one packet */ \
+	uint32 saved; \
+	/* packets saved, because an older packet from the same queue has been dropped */ \
+	uint32 selfsaved; \
+	/* packets dropped, because pktq is full with higher precedence packets */ \
+	uint32 full_dropped; \
+	 /* packets dropped because pktq per that precedence is full */ \
+	uint32 dropped; \
+	/* packets dropped, in order to save one from a queue of a highest priority */ \
+	uint32 sacrificed; \
+	/* packets droped because of hardware/transmission error */ \
+	uint32 busy; \
+	/* packets re-sent because they were not received */ \
+	uint32 retry; \
+	/* packets retried again (ps pretend) prior to moving power save mode */ \
+	uint32 ps_retry; \
+	 /* suppressed packet count */ \
+	uint32 suppress; \
+	/* packets finally dropped after retry limit */ \
+	uint32 retry_drop; \
+	/* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \
+	uint32 max_avail; \
+	/* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \
+	uint32 max_used; \
+	 /* the maximum capacity of the queue */ \
+	uint32 queue_capacity; \
+	/* count of rts attempts that failed to receive cts */ \
+	uint32 rtsfail; \
+	/* count of packets sent (acked) successfully */ \
+	uint32 acked; \
+	/* running total of phy rate of packets sent successfully */ \
+	uint32 txrate_succ; \
+	/* running total of phy 'main' rate */ \
+	uint32 txrate_main; \
+	/* actual data transferred successfully */ \
+	uint32 throughput; \
+	/* time difference since last pktq_stats */ \
+	uint32 time_delta;
+
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+} pktq_log_counters_v04_t;
+
+/* v5 is the same as V4 with extra parameter */
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+	/* cumulative time to transmit */
+	uint32 airtime;
+} pktq_log_counters_v05_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v04_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[1];
+} pktq_log_format_v04_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v05_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[1];
+} pktq_log_format_v05_t;
+
+
+typedef struct {
+	uint32               version;
+	wl_iov_mac_params_t  params;
+	union {
+		pktq_log_format_v04_t v04;
+		pktq_log_format_v05_t v05;
+	} pktq_log;
+} wl_iov_pktq_log_t;
+
+/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */
+#define PKTQ_LOG_AUTO     (1 << 31)
+#define PKTQ_LOG_DEF_PREC (1 << 30)
+
+/*
+ * SCB_BS_DATA iovar definitions start.
+ */
+#define SCB_BS_DATA_STRUCT_VERSION	1
+
+/* The actual counters maintained for each station */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	/* The following counters are a subset of what pktq_stats provides per precedence. */
+	uint32 retry;          /* packets re-sent because they were not received */
+	uint32 retry_drop;     /* packets finally dropped after retry limit */
+	uint32 rtsfail;        /* count of rts attempts that failed to receive cts */
+	uint32 acked;          /* count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /* running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /* running total of phy 'main' rate */
+	uint32 throughput;     /* actual data transferred successfully */
+	uint32 time_delta;     /* time difference since last pktq_stats */
+	uint32 airtime;        /* cumulative total medium access delay in useconds */
+} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t;
+
+/* The structure for individual station information. */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_addr	station_address;	/* The station MAC address */
+	uint16			station_flags;		/* Bit mask of flags, for future use. */
+	iov_bs_data_counters_t	station_counters;	/* The actual counter values */
+} BWL_POST_PACKED_STRUCT iov_bs_data_record_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	structure_version;	/* Structure version number (for wl/wlu matching) */
+	uint16	structure_count;	/* Number of iov_bs_data_record_t records following */
+	iov_bs_data_record_t	structure_record[1];	/* 0 - structure_count records */
+} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t;
+
+/* Bitmask of options that can be passed in to the iovar. */
+enum {
+	SCB_BS_DATA_FLAG_NO_RESET = (1<<0)	/* Do not clear the counters after reading */
+};
+/*
+ * SCB_BS_DATA iovar definitions end.
+ */
+
+typedef struct wlc_extlog_cfg {
+	int max_number;
+	uint16 module;	/* bitmap */
+	uint8 level;
+	uint8 flag;
+	uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+	uint32 time;
+	uint16 module;
+	uint16 id;
+	uint8 level;
+	uint8 sub_unit;
+	uint8 seq_num;
+	int32 arg;
+	char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+	uint32 from_last;
+	uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+	uint16 version;
+	uint16 record_len;
+	uint32 num;
+	log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+	uint16	id;
+	uint16	flag;
+	uint8	arg_type;
+	const char	*fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER		1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+	FMTSTR_DRIVER_UP_ID = 0,
+	FMTSTR_DRIVER_DOWN_ID = 1,
+	FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+	FMTSTR_NO_PROGRESS_ID = 3,
+	FMTSTR_RFDISABLE_ID = 4,
+	FMTSTR_REG_PRINT_ID = 5,
+	FMTSTR_EXPTIME_ID = 6,
+	FMTSTR_JOIN_START_ID = 7,
+	FMTSTR_JOIN_COMPLETE_ID = 8,
+	FMTSTR_NO_NETWORKS_ID = 9,
+	FMTSTR_SECURITY_MISMATCH_ID = 10,
+	FMTSTR_RATE_MISMATCH_ID = 11,
+	FMTSTR_AP_PRUNED_ID = 12,
+	FMTSTR_KEY_INSERTED_ID = 13,
+	FMTSTR_DEAUTH_ID = 14,
+	FMTSTR_DISASSOC_ID = 15,
+	FMTSTR_LINK_UP_ID = 16,
+	FMTSTR_LINK_DOWN_ID = 17,
+	FMTSTR_RADIO_HW_OFF_ID = 18,
+	FMTSTR_RADIO_HW_ON_ID = 19,
+	FMTSTR_EVENT_DESC_ID = 20,
+	FMTSTR_PNP_SET_POWER_ID = 21,
+	FMTSTR_RADIO_SW_OFF_ID = 22,
+	FMTSTR_RADIO_SW_ON_ID = 23,
+	FMTSTR_PWD_MISMATCH_ID = 24,
+	FMTSTR_FATAL_ERROR_ID = 25,
+	FMTSTR_AUTH_FAIL_ID = 26,
+	FMTSTR_ASSOC_FAIL_ID = 27,
+	FMTSTR_IBSS_FAIL_ID = 28,
+	FMTSTR_EXTAP_FAIL_ID = 29,
+	FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+	uint32 flags_idx;	/* lower 8 bits: overlay index; upper 24 bits: flags */
+	uint32 offset;		/* offset into overlay region to write code */
+	uint32 len;			/* overlay code len */
+	/* overlay code follows this struct */
+} wl_ioctl_overlay_t;
+#endif /* DONGLEOVERLAYS */
+
+/* 11k Neighbor Report element */
+typedef struct nbr_element {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;
+	uint8 channel;
+	uint8 phytype;
+	uint8 pad;
+} nbr_element_t;
+
+
+typedef enum event_msgs_ext_command {
+	EVENTMSGS_NONE		=	0,
+	EVENTMSGS_SET_BIT	=	1,
+	EVENTMSGS_RESET_BIT	=	2,
+	EVENTMSGS_SET_MASK	=	3
+} event_msgs_ext_command_t;
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE	OFFSETOF(eventmsgs_ext_t, mask[0])
+
+/* len-	for SET it would be mask size from the application to the firmware */
+/*		for GET it would be actual firmware mask size */
+/* maxgetsize -	is only used for GET. indicate max mask size that the */
+/*				application can read from the firmware */
+typedef struct eventmsgs_ext
+{
+	uint8	ver;
+	uint8	command;
+	uint8	len;
+	uint8	maxgetsize;
+	uint8	mask[1];
+} eventmsgs_ext_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params {
+	/* no of host dma descriptors programmed by the firmware before a commit */
+	uint16		max_dma_descriptors;
+
+	uint16		host_buf_len; /* length of host buffer */
+	dmaaddr_t	host_buf_addr; /* physical address for bus_throughput_buf */
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t;
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats {
+	uint16		time_taken; /* no of secs the test is run */
+	uint16		nbytes_per_descriptor; /* no of bytes of data dma ed per descriptor */
+
+	/* no of desciptors fo which dma is sucessfully completed within the test time */
+	uint32		count;
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+typedef struct keepalives_max_idle {
+	uint16  keepalive_count;        /* nmbr of keepalives per bss_max_idle period */
+	uint8   mkeepalive_index;       /* mkeepalive_index for keepalive frame to be used */
+	uint8   PAD;			/* to align next field */
+	uint16  max_interval;           /* seconds */
+} keepalives_max_idle_t;
+
+#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
+#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+/* ##### Power Stats section ##### */
+
+#define WL_PWRSTATS_VERSION	2
+
+/* Input structure for pwrstats IOVAR */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats_query {
+	uint16 length;		/* Number of entries in type array. */
+	uint16 type[1];		/* Types (tags) to retrieve.
+				 * Length 0 (no types) means get all.
+				 */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_query_t;
+
+/* This structure is for version 2; version 1 will be deprecated in by FW */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats {
+	uint16 version;		      /* Version = 2 is TLV format */
+	uint16 length;		      /* Length of entire structure */
+	uint8 data[1];		      /* TLV data, a series of structures,
+				       * each starting with type and length.
+				       *
+				       * Padded as necessary so each section
+				       * starts on a 4-byte boundary.
+				       *
+				       * Both type and len are uint16, but the
+				       * upper nibble of length is reserved so
+				       * valid len values are 0-4095.
+				       */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_t;
+#define WL_PWR_STATS_HDRLEN	OFFSETOF(wl_pwrstats_t, data)
+
+/* Type values for the data section */
+#define WL_PWRSTATS_TYPE_PHY		0 /* struct wl_pwr_phy_stats */
+#define WL_PWRSTATS_TYPE_SCAN		1 /* struct wl_pwr_scan_stats */
+#define WL_PWRSTATS_TYPE_USB_HSIC	2 /* struct wl_pwr_usb_hsic_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE	3 /* struct wl_pwr_pm_awake_stats */
+#define WL_PWRSTATS_TYPE_CONNECTION	4 /* struct wl_pwr_connect_stats; assoc and key-exch time */
+#define WL_PWRSTATS_TYPE_PCIE		6 /* struct wl_pwr_pcie_stats */
+
+/* Bits for wake reasons */
+#define WLC_PMD_WAKE_SET		0x1
+#define WLC_PMD_PM_AWAKE_BCN		0x2
+#define WLC_PMD_BTA_ACTIVE		0x4
+#define WLC_PMD_SCAN_IN_PROGRESS	0x8
+#define WLC_PMD_RM_IN_PROGRESS		0x10
+#define WLC_PMD_AS_IN_PROGRESS		0x20
+#define WLC_PMD_PM_PEND			0x40
+#define WLC_PMD_PS_POLL			0x80
+#define WLC_PMD_CHK_UNALIGN_TBTT	0x100
+#define WLC_PMD_APSD_STA_UP		0x200
+#define WLC_PMD_TX_PEND_WAR		0x400
+#define WLC_PMD_GPTIMER_STAY_AWAKE	0x800
+#define WLC_PMD_PM2_RADIO_SOFF_PEND	0x2000
+#define WLC_PMD_NON_PRIM_STA_UP		0x4000
+#define WLC_PMD_AP_UP			0x8000
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug {
+	uint32 timestamp;	     /* timestamp in millisecond */
+	uint32 reason;		     /* reason(s) for staying awake */
+} BWL_POST_PACKED_STRUCT wlc_pm_debug_t;
+
+/* Data sent as part of pwrstats IOVAR */
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data {
+	uint32 curr_time;	/* ms */
+	uint32 hw_macc;		/* HW maccontrol */
+	uint32 sw_macc;		/* SW maccontrol */
+	uint32 pm_dur;		/* Total sleep time in PM, usecs */
+	uint32 mpc_dur;		/* Total sleep time in MPC, usecs */
+
+	/* int32 drifts = remote - local; +ve drift => local-clk slow */
+	int32 last_drift;	/* Most recent TSF drift from beacon */
+	int32 min_drift;	/* Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/* Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/* Avg TSF drift from beacon */
+
+	/* Wake history tracking */
+
+	/* pmstate array (type wlc_pm_debug_t) start offset */
+	uint16 pm_state_offset;
+	/* pmstate number of array entries */
+	uint16 pm_state_len;
+
+	/* array (type uint32) start offset */
+	uint16 pmd_event_wake_dur_offset;
+	/* pmd_event_wake_dur number of array entries */
+	uint16 pmd_event_wake_dur_len;
+
+	uint32 drift_cnt;	/* Count of drift readings over which avg_drift was computed */
+	uint8  pmwake_idx;	/* for stepping through pm_state */
+	uint8  pad[3];
+	uint32 frts_time;	/* Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT pm_awake_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_PM_AWAKE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	pm_awake_data_t awake_data;
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_t;
+
+/* Original bus structure is for HSIC */
+typedef BWL_PRE_PACKED_STRUCT struct bus_metrics {
+	uint32 suspend_ct;	/* suspend count */
+	uint32 resume_ct;	/* resume count */
+	uint32 disconnect_ct;	/* disconnect count */
+	uint32 reconnect_ct;	/* reconnect count */
+	uint32 active_dur;	/* msecs in bus, usecs for user */
+	uint32 suspend_dur;	/* msecs in bus, usecs for user */
+	uint32 disconnect_dur;	/* msecs in bus, usecs for user */
+} BWL_POST_PACKED_STRUCT bus_metrics_t;
+
+/* Bus interface info for USB/HSIC */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_USB_HSIC */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	bus_metrics_t hsic;	/* stats from hsic bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics {
+	uint32 d3_suspend_ct;	/* suspend count */
+	uint32 d0_resume_ct;	/* resume count */
+	uint32 perst_assrt_ct;	/* PERST# assert count */
+	uint32 perst_deassrt_ct;	/* PERST# de-assert count */
+	uint32 active_dur;	/* msecs */
+	uint32 d3_suspend_dur;	/* msecs */
+	uint32 perst_dur;	/* msecs */
+	uint32 l0_cnt;		/* L0 entry count */
+	uint32 l0_usecs;	/* L0 duration in usecs */
+	uint32 l1_cnt;		/* L1 entry count */
+	uint32 l1_usecs;	/* L1 duration in usecs */
+	uint32 l1_1_cnt;	/* L1_1ss entry count */
+	uint32 l1_1_usecs;	/* L1_1ss duration in usecs */
+	uint32 l1_2_cnt;	/* L1_2ss entry count */
+	uint32 l1_2_usecs;	/* L1_2ss duration in usecs */
+	uint32 l2_cnt;		/* L2 entry count */
+	uint32 l2_usecs;	/* L2 duration in usecs */
+} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t;
+
+/* Bus interface info for PCIE */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pcie_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_PCIE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+	pcie_bus_metrics_t pcie;	/* stats from pcie bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_pcie_stats_t;
+
+/* Scan information history per category */
+typedef BWL_PRE_PACKED_STRUCT struct scan_data {
+	uint32 count;		/* Number of scans performed */
+	uint32 dur;		/* Total time (in us) used */
+} BWL_POST_PACKED_STRUCT scan_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_scan_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_SCAN */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	/* Scan history */
+	scan_data_t user_scans;	  /* User-requested scans: (i/e/p)scan */
+	scan_data_t assoc_scans;  /* Scans initiated by association requests */
+	scan_data_t roam_scans;	  /* Scans initiated by the roam engine */
+	scan_data_t pno_scans[8]; /* For future PNO bucketing (BSSID, SSID, etc) */
+	scan_data_t other_scans;  /* Scan engine usage not assigned to the above */
+} BWL_POST_PACKED_STRUCT wl_pwr_scan_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_connect_stats {
+	uint16 type;	     /* WL_PWRSTATS_TYPE_SCAN */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	/* Connection (Association + Key exchange) data */
+	uint32 count;	/* Number of connections performed */
+	uint32 dur;		/* Total time (in ms) used */
+} BWL_POST_PACKED_STRUCT wl_pwr_connect_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_phy_stats {
+	uint16 type;	    /* WL_PWRSTATS_TYPE_PHY */
+	uint16 len;	    /* Up to 4K-1, top 4 bits are reserved */
+	uint32 tx_dur;	    /* TX Active duration in us */
+	uint32 rx_dur;	    /* RX Active duration in us */
+} BWL_POST_PACKED_STRUCT wl_pwr_phy_stats_t;
+
+
+/* ##### End of Power Stats section ##### */
+
+/* IPV4 Arp offloads for ndis context */
+BWL_PRE_PACKED_STRUCT struct hostip_id {
+	struct ipv4_addr ipa;
+	uint8 id;
+} BWL_POST_PACKED_STRUCT;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh {
+	uint32 pfn_alert_thresh; /* time in ms */
+	uint32 roam_alert_thresh; /* time in ms */
+} BWL_POST_PACKED_STRUCT wl_pfn_roam_thresh_t;
+
+
+/* Reasons for wl_pmalert_t */
+#define PM_DUR_EXCEEDED			(1<<0)
+#define MPC_DUR_EXCEEDED		(1<<1)
+#define ROAM_ALERT_THRESH_EXCEEDED	(1<<2)
+#define PFN_ALERT_THRESH_EXCEEDED	(1<<3)
+#define CONST_AWAKE_DUR_ALERT		(1<<4)
+#define CONST_AWAKE_DUR_RECOVERY	(1<<5)
+
+#define MIN_PM_ALERT_LEN 9
+
+/* Data sent in EXCESS_PM_WAKE event */
+#define WL_PM_ALERT_VERSION 3
+
+#define MAX_P2P_BSS_DTIM_PRD 4
+
+/* This structure is for version 3; version 2 will be deprecated in by FW */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert {
+	uint16 version;		/* Version = 3 is TLV format */
+	uint16 length;		/* Length of entire structure */
+	uint32 reasons;		/* reason(s) for pm_alert */
+	uint8 data[1];		/* TLV data, a series of structures,
+				 * each starting with type and length.
+				 *
+				 * Padded as necessary so each section
+				 * starts on a 4-byte boundary.
+				 *
+				 * Both type and len are uint16, but the
+				 * upper nibble of length is reserved so
+				 * valid len values are 0-4095.
+				*/
+} BWL_POST_PACKED_STRUCT wl_pmalert_t;
+
+/* Type values for the data section */
+#define WL_PMALERT_FIXED	0 /* struct wl_pmalert_fixed_t, fixed fields */
+#define WL_PMALERT_PMSTATE	1 /* struct wl_pmalert_pmstate_t, variable */
+#define WL_PMALERT_EVENT_DUR	2 /* struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG	3 /* struct wl_pmalert_ucode_dbg_t, variable */
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed {
+	uint16 type;	     /* WL_PMALERT_FIXED */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+	uint32 prev_stats_time;	/* msecs */
+	uint32 curr_time;	/* ms */
+	uint32 prev_pm_dur;	/* usecs */
+	uint32 pm_dur;		/* Total sleep time in PM, usecs */
+	uint32 prev_mpc_dur;	/* usecs */
+	uint32 mpc_dur;		/* Total sleep time in MPC, usecs */
+	uint32 hw_macc;		/* HW maccontrol */
+	uint32 sw_macc;		/* SW maccontrol */
+
+	/* int32 drifts = remote - local; +ve drift -> local-clk slow */
+	int32 last_drift;	/* Most recent TSF drift from beacon */
+	int32 min_drift;	/* Min TSF drift from beacon in magnitude */
+	int32 max_drift;	/* Max TSF drift from beacon in magnitude */
+
+	uint32 avg_drift;	/* Avg TSF drift from beacon */
+	uint32 drift_cnt;	/* Count of drift readings over which avg_drift was computed */
+	uint32 frts_time;	/* Cumulative ms spent in frts since driver load */
+	uint32 frts_end_cnt;	/* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate {
+	uint16 type;	     /* WL_PMALERT_PMSTATE */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	uint8 pmwake_idx;   /* for stepping through pm_state */
+	uint8 pad[3];
+	/* Array of pmstate; len of array is based on tlv len */
+	wlc_pm_debug_t pmstate[1];
+} BWL_POST_PACKED_STRUCT wl_pmalert_pmstate_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_event_dur {
+	uint16 type;	     /* WL_PMALERT_EVENT_DUR */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+
+	/* Array of event_dur, len of array is based on tlv len */
+	uint32 event_dur[1];
+} BWL_POST_PACKED_STRUCT wl_pmalert_event_dur_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg {
+	uint16 type;	     /* WL_PMALERT_UCODE_DBG */
+	uint16 len;	     /* Up to 4K-1, top 4 bits are reserved */
+	uint32 macctrl;
+	uint16 m_p2p_hps;
+	uint32 psm_brc;
+	uint32 ifsstat;
+	uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+	uint32 psmdebug[20];
+	uint32 phydebug[20];
+} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t;
+
+
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN		4	/* length of the set command string:
+					 * "add", "del" (+ NUL)
+					 */
+
+#define VNDR_IE_INFO_HDR_LEN	(sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/* bitmask indicating which packet(s) contain this IE */
+	vndr_ie_t vndr_ie_data;		/* vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;			/* number of entries in the vndr_ie_list[] array */
+	vndr_ie_info_t vndr_ie_list[1];	/* variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/* vndr_ie IOVar set command : "add", "del" + NUL */
+	vndr_ie_buf_t vndr_ie_buffer;	/* buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/* bitmask indicating which packet(s) contain this IE */
+	tlv_t ie_data;		/* IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;			/* number of entries in the ie_list[] array */
+	ie_info_t ie_list[1];	/* variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/* ie IOVar set command : "add" + NUL */
+	ie_buf_t ie_buffer;	/* buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;		/* bitmask indicating which packet(s) contain this IE */
+	uint8 id;		/* IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+	struct ether_addr staAddr;
+	uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+	sta_prbreq_wps_ie_hdr_t hdr;
+	uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+	uint32 totLen;
+	uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char   dest[ETHER_ADDR_LEN]; /* destination MAC */
+	uint8  prio;            /* Packet Priority */
+	uint8  flags;           /* Flags           */
+	uint32 tsf_l;           /* TSF timer low   */
+	uint32 tsf_h;           /* TSF timer high  */
+	uint16 rates;           /* Main Rates      */
+	uint16 txstatus;        /* TX Status       */
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif /* WLMEDIA_TXFAILEVENT */
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 flags;
+	chanspec_t chanspec;			/* txpwr report for this channel */
+	chanspec_t local_chanspec;		/* channel on which we are associated */
+	uint8 local_max;			/* local max according to the AP */
+	uint8 local_constraint;			/* local constraint according to the AP */
+	int8  antgain[2];			/* Ant gain for each band - from SROM */
+	uint8 rf_cores;				/* count of RF Cores being reported */
+	uint8 est_Pout[4];			/* Latest tx power out estimate per RF chain */
+	uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */
+	uint8 est_Pout_cck;			/* Latest CCK tx power out estimate */
+	uint8 tx_power_max[4];		/* Maximum target power among all rates */
+	uint tx_power_max_rate_ind[4];		/* Index of the rate with the max target power */
+	int8 sar;					/* SAR limit for display by wl executable */
+	int8 channel_bandwidth;		/* 20, 40 or 80 MHz bandwidth? */
+	uint8 version;				/* Version of the data format wlu <--> driver */
+	uint8 display_core;			/* Displayed curpower core */
+	int8 target_offsets[4];		/* Target power offsets for current rate per core */
+	uint32 last_tx_ratespec;	/* Ratespec for last transmition */
+	uint   user_target;		/* user limit */
+	uint32 ppr_len;		/* length of each ppr serialization buffer */
+	int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+	uint8  pprdata[1];		/* ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ipv4_addr	ipv4_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_entry;
+	ibss_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
+
+#define MAX_IBSS_ROUTE_TBL_ENTRY	64
+
+#define TXPWR_TARGET_VERSION  0
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int32 version;		/* version number */
+	chanspec_t chanspec;	/* txpwr report for this channel */
+	int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */
+	uint8 rf_cores;		/* count of RF Cores being reported */
+} BWL_POST_PACKED_STRUCT txpwr_target_max_t;
+
+#define BSS_PEER_INFO_PARAM_CUR_VER	0
+/* Input structure for IOV_BSS_PEER_INFO */
+typedef BWL_PRE_PACKED_STRUCT	struct {
+	uint16			version;
+	struct	ether_addr ea;	/* peer MAC address */
+} BWL_POST_PACKED_STRUCT bss_peer_info_param_t;
+
+#define BSS_PEER_INFO_CUR_VER		0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	struct ether_addr	ea;
+	int32			rssi;
+	uint32			tx_rate;	/* current tx rate */
+	uint32			rx_rate;	/* current rx rate */
+	wl_rateset_t		rateset;	/* rateset in use */
+	uint32			age;		/* age in seconds */
+} BWL_POST_PACKED_STRUCT bss_peer_info_t;
+
+#define BSS_PEER_LIST_INFO_CUR_VER	0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	uint16			bss_peer_info_len;	/* length of bss_peer_info_t */
+	uint32			count;			/* number of peer info */
+	bss_peer_info_t		peer_info[1];		/* peer info */
+} BWL_POST_PACKED_STRUCT bss_peer_list_info_t;
+
+#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info)
+
+#define AIBSS_BCN_FORCE_CONFIG_VER_0	0
+
+/* structure used to configure AIBSS beacon force xmit */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16	len;
+	uint32 initial_min_bcn_dur;	/* dur in ms to check a bcn in bcn_flood period */
+	uint32 min_bcn_dur;	/* dur in ms to check a bcn after bcn_flood period */
+	uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */
+} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
+
+#define AIBSS_TXFAIL_CONFIG_VER_0    0
+
+/* structure used to configure aibss tx fail event */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16  len;
+	uint32 bcn_timeout;     /* dur in seconds to receive 1 bcn */
+	uint32 max_tx_retry;     /* no of consecutive no acks to send txfail event */
+} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if {
+	uint16 version;
+	uint16 len;
+	uint32 flags;
+	struct ether_addr addr;
+	chanspec_t chspec;
+} BWL_POST_PACKED_STRUCT wl_aibss_if_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry {
+	struct ipv4_addr ip_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl {
+	uint32 num_entry;
+	wlc_ipfo_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t;
+
+#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
+#define WL_MAX_IPFO_ROUTE_TBL_ENTRY	64
+
+/* no strict structure packing */
+#include <packed_section_end.h>
+
+	/* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER	0x0100
+#define MAX_ASSRTSTR_LEN	64
+
+	typedef struct assert_record {
+		uint32 time;
+		uint8 seq_num;
+		char str[MAX_ASSRTSTR_LEN];
+	} assert_record_t;
+
+	typedef struct assertlog_results {
+		uint16 version;
+		uint16 record_len;
+		uint32 num;
+		assert_record_t logs[1];
+	} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN	8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+
+	/* chanim acs record */
+	typedef struct {
+		bool valid;
+		uint8 trigger;
+		chanspec_t selected_chspc;
+		int8 bgnoise;
+		uint32 glitch_cnt;
+		uint8 ccastats;
+		uint timestamp;
+	} chanim_acs_record_t;
+
+	typedef struct {
+		chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+		uint8 count;
+		uint timestamp;
+	} wl_acs_record_t;
+
+	typedef struct chanim_stats {
+		uint32 glitchcnt;               /* normalized as per second count */
+		uint32 badplcp;                 /* normalized as per second count */
+		uint8 ccastats[CCASTATS_MAX];   /* normalized as 0-255 */
+		int8 bgnoise;			/* background noise level (in dBm) */
+		chanspec_t chanspec;
+		uint32 timestamp;
+		uint32 bphy_glitchcnt;          /* normalized as per second count */
+		uint32 bphy_badplcp;            /* normalized as per second count */
+		uint8 chan_idle;                /* normalized as 0~255 */
+	} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 2
+
+typedef struct {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+/* Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE	0x1
+
+/* scb probe parameter */
+typedef struct {
+	uint32 scb_timeout;
+	uint32 scb_activity_time;
+	uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/* selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+	uint32 count;
+	uint16 code;  /* SC or RC code */
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+	uint32 version;
+	uint16 length;	/* reserved for future usage */
+	uint8 type;
+	uint8 codetype;
+	uint32 ignored_cnt;
+	uint32 malformed_cnt;
+	uint32 count_total; /* count included the interested group */
+	wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+	SMFS_CODETYPE_SC,
+	SMFS_CODETYPE_RC
+};
+
+typedef enum smfs_type {
+	SMFS_TYPE_AUTH,
+	SMFS_TYPE_ASSOC,
+	SMFS_TYPE_REASSOC,
+	SMFS_TYPE_DISASSOC_TX,
+	SMFS_TYPE_DISASSOC_RX,
+	SMFS_TYPE_DEAUTH_TX,
+	SMFS_TYPE_DEAUTH_RX,
+	SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+	/* Tx IQ/LO calibration coeffs */
+	int16 tx_iqlocal_a;
+	int16 tx_iqlocal_b;
+	int8 tx_iqlocal_ci;
+	int8 tx_iqlocal_cq;
+	int8 tx_iqlocal_di;
+	int8 tx_iqlocal_dq;
+	int8 tx_iqlocal_ei;
+	int8 tx_iqlocal_eq;
+	int8 tx_iqlocal_fi;
+	int8 tx_iqlocal_fq;
+
+	/* Rx IQ calibration coeffs */
+	int16 rx_iqcal_a;
+	int16 rx_iqcal_b;
+
+	uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */
+	uint32 papd_epsilon_table[64]; /* PAPD epsilon table */
+	int16 papd_epsilon_offset; /* PAPD epsilon offset */
+	uint8 curr_tx_pwrindex; /* Tx power index */
+	int8 idle_tssi; /* Idle TSSI */
+	int8 est_tx_pwr; /* Estimated Tx Power (dB) */
+	int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */
+	uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */
+	uint16 init_gaincode; /* initgain required for ACI */
+	int8 estirr_tx;
+	int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+	int version;
+	int8 num_phy_cores; /* number of cores */
+	int8 curr_temperature; /* on-chip temperature sensor reading */
+	chanspec_t chspec; /* channspec for this state */
+	bool aci_state; /* ACI state: ON/OFF */
+	uint16 crsminpower; /* crsminpower required for ACI */
+	uint16 crsminpowerl; /* crsminpowerl required for ACI */
+	uint16 crsminpoweru; /* crsminpoweru required for ACI */
+	wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif /* PHYMON */
+
+/* discovery state */
+typedef struct wl_p2p_disc_st {
+	uint8 state;	/* see state */
+	chanspec_t chspec;	/* valid in listen state */
+	uint16 dwell;	/* valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/* scan request */
+typedef struct wl_p2p_scan {
+	uint8 type;		/* 'S' for WLC_SCAN, 'E' for "escan" */
+	uint8 reserved[3];
+	/* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/* i/f request */
+typedef struct wl_p2p_if {
+	struct ether_addr addr;
+	uint8 type;	/* see i/f type */
+	chanspec_t chspec;	/* for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/* i/f query */
+typedef struct wl_p2p_ifq {
+	uint bsscfgidx;
+	char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/* OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+	uint8 ops;	/* 0: disable 1: enable */
+	uint8 ctw;	/* >= 10 */
+} wl_p2p_ops_t;
+
+/* absence and presence request */
+typedef struct wl_p2p_sched_desc {
+	uint32 start;
+	uint32 interval;
+	uint32 duration;
+	uint32 count;	/* see count */
+} wl_p2p_sched_desc_t;
+
+typedef struct wl_p2p_sched {
+	uint8 type;	/* see schedule type */
+	uint8 action;	/* see schedule action */
+	uint8 option;	/* see schedule option */
+	wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+
+typedef struct wl_p2p_wfds_hash {
+	uint32	advt_id;
+	uint16	nw_cfg_method;
+	uint8	wfds_hash[6];
+	uint8	name_len;
+	uint8	service_name[MAX_WFDS_SVC_NAME_LEN];
+} wl_p2p_wfds_hash_t;
+
+typedef struct wl_bcmdcs_data {
+	uint reason;
+	chanspec_t chspec;
+} wl_bcmdcs_data_t;
+
+
+/* NAT configuration */
+typedef struct {
+	uint32 ipaddr;		/* interface ip address */
+	uint32 ipaddr_mask;	/* interface ip address mask */
+	uint32 ipaddr_gateway;	/* gateway ip address */
+	uint8 mac_gateway[6];	/* gateway mac address */
+	uint32 ipaddr_dns;	/* DNS server ip address, valid only for public if */
+	uint8 mac_dns[6];	/* DNS server mac address,  valid only for public if */
+	uint8 GUID[38];		/* interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+	uint op;		/* operation code */
+	bool pub_if;		/* set for public if, clear for private if */
+	nat_if_info_t if_info;	/* interface info */
+} nat_cfg_t;
+
+typedef struct {
+	int state;	/* NAT state returned */
+} nat_state_t;
+
+
+#define BTA_STATE_LOG_SZ	64
+
+/* BTAMP Statemachine states */
+enum {
+	HCIReset = 1,
+	HCIReadLocalAMPInfo,
+	HCIReadLocalAMPASSOC,
+	HCIWriteRemoteAMPASSOC,
+	HCICreatePhysicalLink,
+	HCIAcceptPhysicalLinkRequest,
+	HCIDisconnectPhysicalLink,
+	HCICreateLogicalLink,
+	HCIAcceptLogicalLink,
+	HCIDisconnectLogicalLink,
+	HCILogicalLinkCancel,
+	HCIAmpStateChange,
+	HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+	uint32 txfifobmp;
+	uint32 hwtxfifoflush;
+	struct ether_addr ea;
+} flush_txfifo_t;
+
+enum {
+	SPATIAL_MODE_2G_IDX = 0,
+	SPATIAL_MODE_5G_LOW_IDX,
+	SPATIAL_MODE_5G_MID_IDX,
+	SPATIAL_MODE_5G_HIGH_IDX,
+	SPATIAL_MODE_5G_UPPER_IDX,
+	SPATIAL_MODE_MAX_IDX
+};
+
+#define WLC_TXCORE_MAX	4	/* max number of txcore supports */
+#define WLC_SUBBAND_MAX	4	/* max number of sub-band supports */
+typedef struct {
+	uint8	band2g[WLC_TXCORE_MAX];
+	uint8	band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+} sar_limit_t;
+
+#define WLC_TXCAL_CORE_MAX 2	/* max number of txcore supports for txcal */
+#define MAX_NUM_TXCAL_MEAS 128
+
+typedef struct wl_txcal_meas {
+	uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+	int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+	uint8 valid_cnt;
+} wl_txcal_meas_t;
+
+typedef struct wl_txcal_power_tssi {
+	uint8 set_core;
+	uint8 channel;
+	int16 pwr_start[WLC_TXCAL_CORE_MAX];
+	uint8 num_entries[WLC_TXCAL_CORE_MAX];
+	uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+	bool gen_tbl;
+} wl_txcal_power_tssi_t;
+
+/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+	int	num;		/* Number of memory pools */
+	bcm_mp_stats_t s[1];	/* Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+typedef struct {
+	uint32 ipaddr;
+	uint32 ipaddr_netmask;
+	uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/* Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+	trf_mgmt_priority_low           = 0,        /* Maps to 802.1p BK */
+	trf_mgmt_priority_medium        = 1,        /* Maps to 802.1p BE */
+	trf_mgmt_priority_high          = 2,        /* Maps to 802.1p VI */
+	trf_mgmt_priority_nochange	= 3,	    /* do not update the priority */
+	trf_mgmt_priority_invalid       = (trf_mgmt_priority_nochange + 1)
+} trf_mgmt_priority_class_t;
+
+/* Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+	uint32  trf_mgmt_enabled;                           /* 0 - disabled, 1 - enabled */
+	uint32  flags;                                      /* See TRF_MGMT_FLAG_xxx defines */
+	uint32  host_ip_addr;                               /* My IP address to determine subnet */
+	uint32  host_subnet_mask;                           /* My subnet mask */
+	uint32  downlink_bandwidth;                         /* In units of kbps */
+	uint32  uplink_bandwidth;                           /* In units of kbps */
+	uint32  min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /* Minimum guaranteed tx bandwidth */
+	uint32  min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /* Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/* Traffic management filter */
+typedef struct trf_mgmt_filter {
+	struct ether_addr           dst_ether_addr;         /* His L2 address */
+	uint32                      dst_ip_addr;            /* His IP address */
+	uint16                      dst_port;               /* His L4 port */
+	uint16                      src_port;               /* My L4 port */
+	uint16                      prot;                   /* L4 protocol (only TCP or UDP) */
+	uint16                      flags;                  /* TBD. For now, this must be zero. */
+	trf_mgmt_priority_class_t   priority;               /* Priority for filtered packets */
+	uint32                      dscp;                   /* DSCP */
+} trf_mgmt_filter_t;
+
+/* Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list     {
+	uint32              num_filters;
+	trf_mgmt_filter_t   filter[1];
+} trf_mgmt_filter_list_t;
+
+/* Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+	uint32  maximum_bytes_per_second;
+	uint32  maximum_bytes_per_sampling_period;
+	uint32  total_bytes_consumed_per_second;
+	uint32  total_bytes_consumed_per_sampling_period;
+	uint32  total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/* Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+	uint32  gauranteed_bandwidth_percentage;
+	uint32  guaranteed_bytes_per_second;
+	uint32  guaranteed_bytes_per_sampling_period;
+	uint32  num_bytes_produced_per_second;
+	uint32  num_bytes_consumed_per_second;
+	uint32  num_queued_packets;                         /* Number of packets in queue */
+	uint32  num_queued_bytes;                           /* Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/* Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+	trf_mgmt_global_info_t   tx_global_shaping_info;
+	trf_mgmt_shaping_info_t  tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_global_info_t   rx_global_shaping_info;
+	trf_mgmt_shaping_info_t  rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/* Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+	uint32  num_processed_packets;      /* Number of packets processed */
+	uint32  num_processed_bytes;        /* Number of bytes processed */
+	uint32  num_discarded_packets;      /* Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/* Traffic management statisics array */
+typedef struct trf_mgmt_stats_array {
+	trf_mgmt_stats_t  tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_stats_t  rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
+typedef struct powersel_params {
+	/* LPC Params exposed via IOVAR */
+	int32		tp_ratio_thresh;  /* Throughput ratio threshold */
+	uint8		rate_stab_thresh; /* Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /* Number of successes before power step down */
+	uint8		pwr_sel_exp_time; /* Time lapse for expiry of database */
+} powersel_params_t;
+
+typedef struct lpc_params {
+	/* LPC Params exposed via IOVAR */
+	uint8		rate_stab_thresh; /* Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /* Number of successes before power step down */
+	uint8		lpc_exp_time; /* Time lapse for expiry of database */
+	uint8		pwrup_slow_step; /* Step size for slow step up */
+	uint8		pwrup_fast_step; /* Step size for fast step up */
+	uint8		pwrdn_slow_step; /* Step size for slow step down */
+} lpc_params_t;
+
+/* tx pkt delay statistics */
+#define	SCB_RETRY_SHORT_DEF	7	/* Default Short retry Limit */
+#define WLPKTDLY_HIST_NBINS	16	/* number of bins used in the Delay histogram */
+
+/* structure to store per-AC delay statistics */
+typedef struct scb_delay_stats {
+	uint32 txmpdu_lost;	/* number of MPDUs lost */
+	uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /* retry times histogram */
+	uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /* cumulative packet latency */
+	uint32 delay_min;	/* minimum packet latency observed */
+	uint32 delay_max;	/* maximum packet latency observed */
+	uint32 delay_avg;	/* packet latency average */
+	uint32 delay_hist[WLPKTDLY_HIST_NBINS];	/* delay histogram */
+} scb_delay_stats_t;
+
+/* structure for txdelay event */
+typedef struct txdelay_event {
+	uint8	status;
+	int		rssi;
+	chanim_stats_t		chanim_stats;
+	scb_delay_stats_t	delay_stats[AC_COUNT];
+} txdelay_event_t;
+
+/* structure for txdelay parameters */
+typedef struct txdelay_params {
+	uint16	ratio;	/* Avg Txdelay Delta */
+	uint8	cnt;	/* Sample cnt */
+	uint8	period;	/* Sample period */
+	uint8	tune;	/* Debug */
+} txdelay_params_t;
+
+enum {
+	WNM_SERVICE_DMS = 1,
+	WNM_SERVICE_FMS = 2,
+	WNM_SERVICE_TFS = 3
+};
+
+/* Definitions for WNM/NPS TCLAS */
+typedef struct wl_tclas {
+	uint8 user_priority;
+	uint8 fc_len;
+	dot11_tclas_fc_t fc;
+} wl_tclas_t;
+
+#define WL_TCLAS_FIXED_SIZE	OFFSETOF(wl_tclas_t, fc)
+
+typedef struct wl_tclas_list {
+	uint32 num;
+	wl_tclas_t tclas[1];
+} wl_tclas_list_t;
+
+/* Definitions for WNM/NPS Traffic Filter Service */
+typedef struct wl_tfs_req {
+	uint8 tfs_id;
+	uint8 tfs_actcode;
+	uint8 tfs_subelem_id;
+	uint8 send;
+} wl_tfs_req_t;
+
+typedef struct wl_tfs_filter {
+	uint8 status;			/* Status returned by the AP */
+	uint8 tclas_proc;		/* TCLAS processing value (0:and, 1:or)  */
+	uint8 tclas_cnt;		/* count of all wl_tclas_t in tclas array */
+	uint8 tclas[1];			/* VLA of wl_tclas_t */
+} wl_tfs_filter_t;
+#define WL_TFS_FILTER_FIXED_SIZE	OFFSETOF(wl_tfs_filter_t, tclas)
+
+typedef struct wl_tfs_fset {
+	struct ether_addr ea;		/* Address of AP/STA involved with this filter set */
+	uint8 tfs_id;			/* TFS ID field chosen by STA host */
+	uint8 status;			/* Internal status TFS_STATUS_xxx */
+	uint8 actcode;			/* Action code DOT11_TFS_ACTCODE_xxx */
+	uint8 token;			/* Token used in last request frame */
+	uint8 notify;			/* Notify frame sent/received because of this set */
+	uint8 filter_cnt;		/* count of all wl_tfs_filter_t in filter array */
+	uint8 filter[1];		/* VLA of wl_tfs_filter_t */
+} wl_tfs_fset_t;
+#define WL_TFS_FSET_FIXED_SIZE		OFFSETOF(wl_tfs_fset_t, filter)
+
+enum {
+	TFS_STATUS_DISABLED = 0,	/* TFS filter set disabled by user */
+	TFS_STATUS_DISABLING = 1,	/* Empty request just sent to AP */
+	TFS_STATUS_VALIDATED = 2,	/* Filter set validated by AP (but maybe not enabled!) */
+	TFS_STATUS_VALIDATING = 3,	/* Filter set just sent to AP */
+	TFS_STATUS_NOT_ASSOC = 4,	/* STA not associated */
+	TFS_STATUS_NOT_SUPPORT = 5,	/* TFS not supported by AP */
+	TFS_STATUS_DENIED = 6,		/* Filter set refused by AP (=> all sets are disabled!) */
+};
+
+typedef struct wl_tfs_status {
+	uint8 fset_cnt;			/* count of all wl_tfs_fset_t in fset array */
+	wl_tfs_fset_t fset[1];		/* VLA of wl_tfs_fset_t */
+} wl_tfs_status_t;
+
+typedef struct wl_tfs_set {
+	uint8 send;			/* Immediatly register registered sets on AP side */
+	uint8 tfs_id;			/* ID of a specific set (existing or new), or nul for all */
+	uint8 actcode;			/* Action code for this filter set */
+	uint8 tclas_proc;		/* TCLAS processing operator for this filter set */
+} wl_tfs_set_t;
+
+typedef struct wl_tfs_term {
+	uint8 del;			/* Delete internal set once confirmation received */
+	uint8 tfs_id;			/* ID of a specific set (existing), or nul for all */
+} wl_tfs_term_t;
+
+
+#define DMS_DEP_PROXY_ARP (1 << 0)
+
+/* Definitions for WNM/NPS Directed Multicast Service */
+enum {
+	DMS_STATUS_DISABLED = 0,	/* DMS desc disabled by user */
+	DMS_STATUS_ACCEPTED = 1,	/* Request accepted by AP */
+	DMS_STATUS_NOT_ASSOC = 2,	/* STA not associated */
+	DMS_STATUS_NOT_SUPPORT = 3,	/* DMS not supported by AP */
+	DMS_STATUS_DENIED = 4,		/* Request denied by AP */
+	DMS_STATUS_TERM = 5,		/* Request terminated by AP */
+	DMS_STATUS_REMOVING = 6,	/* Remove request just sent */
+	DMS_STATUS_ADDING = 7,		/* Add request just sent */
+	DMS_STATUS_ERROR = 8,		/* Non compliant AP behvior */
+	DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */
+	DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */
+};
+
+typedef struct wl_dms_desc {
+	uint8 user_id;
+	uint8 status;
+	uint8 token;
+	uint8 dms_id;
+	uint8 tclas_proc;
+	uint8 mac_len;		/* length of all ether_addr in data array, 0 if STA */
+	uint8 tclas_len;	/* length of all wl_tclas_t in data array */
+	uint8 data[1];		/* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */
+} wl_dms_desc_t;
+
+#define WL_DMS_DESC_FIXED_SIZE	OFFSETOF(wl_dms_desc_t, data)
+
+typedef struct wl_dms_status {
+	uint32 cnt;
+	wl_dms_desc_t desc[1];
+} wl_dms_status_t;
+
+typedef struct wl_dms_set {
+	uint8 send;
+	uint8 user_id;
+	uint8 tclas_proc;
+} wl_dms_set_t;
+
+typedef struct wl_dms_term {
+	uint8 del;
+	uint8 user_id;
+} wl_dms_term_t;
+
+typedef struct wl_service_term {
+	uint8 service;
+	union {
+		wl_dms_term_t dms;
+	} u;
+} wl_service_term_t;
+
+/* Definitions for WNM/NPS BSS Transistion */
+typedef struct wl_bsstrans_req {
+	uint16 tbtt;			/* time of BSS to end of life, in unit of TBTT */
+	uint16 dur;			/* time of BSS to keep off, in unit of minute */
+	uint8 reqmode;			/* request mode of BSS transition request */
+	uint8 unicast;			/* request by unicast or by broadcast */
+} wl_bsstrans_req_t;
+
+enum {
+	BSSTRANS_RESP_AUTO = 0,		/* Currently equivalent to ENABLE */
+	BSSTRANS_RESP_DISABLE = 1,	/* Never answer BSS Trans Req frames */
+	BSSTRANS_RESP_ENABLE = 2,	/* Always answer Req frames with preset data */
+	BSSTRANS_RESP_WAIT = 3,		/* Send ind, wait and/or send preset data (NOT IMPL) */
+	BSSTRANS_RESP_IMMEDIATE = 4	/* After an ind, set data and send resp (NOT IMPL) */
+};
+
+typedef struct wl_bsstrans_resp {
+	uint8 policy;
+	uint8 status;
+	uint8 delay;
+	struct ether_addr target;
+} wl_bsstrans_resp_t;
+
+/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception.
+ * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF,
+ * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan
+ * mandates different behavior on receiving BSS-transition request. To accomodate
+ * such divergent behaviors these policies have been created.
+ */
+enum {
+	WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0,	/* Roam (or disassociate) in all cases */
+	WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1,	/* Roam only if requested by Request Mode field */
+	WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2,	/* Roam only if Preferred BSS provided */
+	WL_BSSTRANS_POLICY_WAIT = 3,		/* Wait for deauth and send Accepted status */
+	WL_BSSTRANS_POLICY_PRODUCT = 4,		/* Policy for real product use cases (non-pf) */
+};
+
+/* Definitions for WNM/NPS TIM Broadcast */
+typedef struct wl_timbc_offset {
+	int16 offset;		/* offset in us */
+	uint16 fix_intv;	/* override interval sent from STA */
+	uint16 rate_override;	/* use rate override to send high rate TIM broadcast frame */
+	uint8 tsf_present;	/* show timestamp in TIM broadcast frame */
+} wl_timbc_offset_t;
+
+typedef struct wl_timbc_set {
+	uint8 interval;		/* Interval in DTIM wished or required. */
+	uint8 flags;		/* Bitfield described below */
+	uint16 rate_min;	/* Minimum rate required for High/Low TIM frames. Optionnal */
+	uint16 rate_max;	/* Maximum rate required for High/Low TIM frames. Optionnal */
+} wl_timbc_set_t;
+
+enum {
+	WL_TIMBC_SET_TSF_REQUIRED = 1,	/* Enable TIMBC only if TSF in TIM frames */
+	WL_TIMBC_SET_NO_OVERRIDE = 2,	/* ... if AP does not override interval */
+	WL_TIMBC_SET_PROXY_ARP = 4,	/* ... if AP support Proxy ARP */
+	WL_TIMBC_SET_DMS_ACCEPTED = 8	/* ... if all DMS desc have been accepted */
+};
+
+typedef struct wl_timbc_status {
+	uint8 status_sta;		/* Status from internal state machine (check below) */
+	uint8 status_ap;		/* From AP response frame (check 8.4.2.86 from 802.11) */
+	uint8 interval;
+	uint8 pad;
+	int32 offset;
+	uint16 rate_high;
+	uint16 rate_low;
+} wl_timbc_status_t;
+
+enum {
+	WL_TIMBC_STATUS_DISABLE = 0,		/* TIMBC disabled by user */
+	WL_TIMBC_STATUS_REQ_MISMATCH = 1,	/* AP settings do no match user requirements */
+	WL_TIMBC_STATUS_NOT_ASSOC = 2,		/* STA not associated */
+	WL_TIMBC_STATUS_NOT_SUPPORT = 3,	/* TIMBC not supported by AP */
+	WL_TIMBC_STATUS_DENIED = 4,		/* Req to disable TIMBC sent to AP */
+	WL_TIMBC_STATUS_ENABLE = 5		/* TIMBC enabled */
+};
+
+/* Definitions for PM2 Dynamic Fast Return To Sleep */
+typedef struct wl_pm2_sleep_ret_ext {
+	uint8 logic;			/* DFRTS logic: see WL_DFRTS_LOGIC_* below */
+	uint16 low_ms;			/* Low FRTS timeout */
+	uint16 high_ms;			/* High FRTS timeout */
+	uint16 rx_pkts_threshold;	/* switching threshold: # rx pkts */
+	uint16 tx_pkts_threshold;	/* switching threshold: # tx pkts */
+	uint16 txrx_pkts_threshold;	/* switching threshold: # (tx+rx) pkts */
+	uint32 rx_bytes_threshold;	/* switching threshold: # rx bytes */
+	uint32 tx_bytes_threshold;	/* switching threshold: # tx bytes */
+	uint32 txrx_bytes_threshold;	/* switching threshold: # (tx+rx) bytes */
+} wl_pm2_sleep_ret_ext_t;
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Values for the passive_on_restricted_mode iovar.  When set to non-zero, this iovar
+ * disables automatic conversions of a channel from passively scanned to
+ * actively scanned.  These values only have an effect for country codes such
+ * as XZ where some 5 GHz channels are defined to be passively scanned.
+ */
+#define WL_PASSACTCONV_DISABLE_NONE	0	/* Enable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_ALL	1	/* Disable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_PERM	2	/* Disable only permanent conversions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RMC_CNT_VERSION	   1
+#define WL_RMC_TR_VERSION	   1
+#define WL_RMC_MAX_CLIENT	   32
+#define WL_RMC_FLAG_INBLACKLIST	   1
+#define WL_RMC_FLAG_ACTIVEACKER	   2
+#define WL_RMC_FLAG_RELMCAST	   4
+#define WL_RMC_MAX_TABLE_ENTRY     4
+
+#define WL_RMC_VER		   1
+#define WL_RMC_INDEX_ACK_ALL       255
+#define WL_RMC_NUM_OF_MC_STREAMS   4
+#define WL_RMC_MAX_TRS_PER_GROUP   1
+#define WL_RMC_MAX_TRS_IN_ACKALL   1
+#define WL_RMC_ACK_MCAST0          0x02
+#define WL_RMC_ACK_MCAST_ALL       0x01
+#define WL_RMC_ACTF_TIME_MIN       300	 /* time in ms */
+#define WL_RMC_ACTF_TIME_MAX       20000 /* time in ms */
+#define WL_RMC_MAX_NUM_TRS	   32	 /* maximun transmitters allowed */
+#define WL_RMC_ARTMO_MIN           350	 /* time in ms */
+#define WL_RMC_ARTMO_MAX           40000	 /* time in ms */
+
+/* RMC events in action frames */
+enum rmc_opcodes {
+	RELMCAST_ENTRY_OP_DISABLE = 0,   /* Disable multi-cast group */
+	RELMCAST_ENTRY_OP_DELETE  = 1,   /* Delete multi-cast group */
+	RELMCAST_ENTRY_OP_ENABLE  = 2,   /* Enable multi-cast group */
+	RELMCAST_ENTRY_OP_ACK_ALL = 3    /* Enable ACK ALL bit in AMT */
+};
+
+/* RMC operational modes */
+enum rmc_modes {
+	WL_RMC_MODE_RECEIVER    = 0,	 /* Receiver mode by default */
+	WL_RMC_MODE_TRANSMITTER = 1,	 /* Transmitter mode using wl ackreq */
+	WL_RMC_MODE_INITIATOR   = 2	 /* Initiator mode using wl ackreq */
+};
+
+/* Each RMC mcast client info */
+typedef struct wl_relmcast_client {
+	uint8 flag;			/* status of client such as AR, R, or blacklisted */
+	int16 rssi;			/* rssi value of RMC client */
+	struct ether_addr addr;		/* mac address of RMC client */
+} wl_relmcast_client_t;
+
+/* RMC Counters */
+typedef struct wl_rmc_cnts {
+	uint16  version;		/* see definition of WL_CNT_T_VERSION */
+	uint16  length;			/* length of entire structure */
+	uint16	dupcnt;			/* counter for duplicate rmc MPDU */
+	uint16	ackreq_err;		/* counter for wl ackreq error    */
+	uint16	af_tx_err;		/* error count for action frame transmit   */
+	uint16	null_tx_err;		/* error count for rmc null frame transmit */
+	uint16	af_unicast_tx_err;	/* error count for rmc unicast frame transmit */
+	uint16	mc_no_amt_slot;		/* No mcast AMT entry available */
+	/* Unused. Keep for rom compatibility */
+	uint16	mc_no_glb_slot;		/* No mcast entry available in global table */
+	uint16	mc_not_mirrored;	/* mcast group is not mirrored */
+	uint16	mc_existing_tr;		/* mcast group is already taken by transmitter */
+	uint16	mc_exist_in_amt;	/* mcast group is already programmed in amt */
+	/* Unused. Keep for rom compatibility */
+	uint16	mc_not_exist_in_gbl;	/* mcast group is not in global table */
+	uint16	mc_not_exist_in_amt;	/* mcast group is not in AMT table */
+	uint16	mc_utilized;		/* mcast addressed is already taken */
+	uint16	mc_taken_other_tr;	/* multi-cast addressed is already taken */
+	uint32	rmc_rx_frames_mac;      /* no of mc frames received from mac */
+	uint32	rmc_tx_frames_mac;      /* no of mc frames transmitted to mac */
+	uint32	mc_null_ar_cnt;         /* no. of times NULL AR is received */
+	uint32	mc_ar_role_selected;	/* no. of times took AR role */
+	uint32	mc_ar_role_deleted;	/* no. of times AR role cancelled */
+	uint32	mc_noacktimer_expired;  /* no. of times noack timer expired */
+	uint16  mc_no_wl_clk;           /* no wl clk detected when trying to access amt */
+	uint16  mc_tr_cnt_exceeded;     /* No of transmitters in the network exceeded */
+} wl_rmc_cnts_t;
+
+/* RMC Status */
+typedef struct wl_relmcast_st {
+	uint8         ver;		/* version of RMC */
+	uint8         num;		/* number of clients detected by transmitter */
+	wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT];
+	uint16        err;		/* error status (used in infra) */
+	uint16        actf_time;	/* action frame time period */
+} wl_relmcast_status_t;
+
+/* Entry for each STA/node */
+typedef struct wl_rmc_entry {
+	/* operation on multi-cast entry such add,
+	 * delete, ack-all
+	 */
+	int8    flag;
+	struct ether_addr addr;		/* multi-cast group mac address */
+} wl_rmc_entry_t;
+
+/* RMC table */
+typedef struct wl_rmc_entry_table {
+	uint8   index;			/* index to a particular mac entry in table */
+	uint8   opcode;			/* opcodes or operation on entry */
+	wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY];
+} wl_rmc_entry_table_t;
+
+typedef struct wl_rmc_trans_elem {
+	struct ether_addr tr_mac;	/* transmitter mac */
+	struct ether_addr ar_mac;	/* ar mac */
+	uint16 artmo;			/* AR timeout */
+	uint8 amt_idx;			/* amt table entry */
+	uint16 flag;			/* entry will be acked, not acked, programmed, full etc */
+} wl_rmc_trans_elem_t;
+
+/* RMC transmitters */
+typedef struct wl_rmc_trans_in_network {
+	uint8         ver;		/* version of RMC */
+	uint8         num_tr;		/* number of transmitters in the network */
+	wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS];
+} wl_rmc_trans_in_network_t;
+
+/* To update vendor specific ie for RMC */
+typedef struct wl_rmc_vsie {
+	uint8	oui[DOT11_OUI_LEN];
+	uint16	payload;	/* IE Data Payload */
+} wl_rmc_vsie_t;
+
+
+/* structures  & defines for proximity detection  */
+enum proxd_method {
+	PROXD_UNDEFINED_METHOD = 0,
+	PROXD_RSSI_METHOD = 1,
+	PROXD_TOF_METHOD = 2
+};
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+
+#define WL_PROXD_ACTION_STOP		0
+#define WL_PROXD_ACTION_START		1
+
+#define WL_PROXD_FLAG_TARGET_REPORT	0x1
+#define WL_PROXD_FLAG_REPORT_FAILURE	0x2
+#define WL_PROXD_FLAG_INITIATOR_REPORT	0x4
+#define WL_PROXD_FLAG_NOCHANSWT		0x8
+#define WL_PROXD_FLAG_NETRUAL		0x10
+#define WL_PROXD_FLAG_INITIATOR_RPTRTT	0x20
+#define WL_PROXD_FLAG_ONEWAY		0x40
+#define WL_PROXD_FLAG_SEQ_EN		0x80
+
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+
+typedef struct wl_proxd_iovar {
+	uint16	method;		/* Proxmity Detection method */
+	uint16	mode;		/* Mode (neutral, initiator, target) */
+} wl_proxd_iovar_t;
+
+/*
+ * structures for proximity detection parameters
+ * consists of two parts, common and method specific params
+ * common params should be placed at the beginning
+ */
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+typedef	BWL_PRE_PACKED_STRUCT struct	wl_proxd_params_common	{
+	chanspec_t	chanspec;	/* channel spec */
+	int16		tx_power;	/* tx power of Proximity Detection(PD) frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of PD rames  (in 500kbps units) */
+	uint16		timeout;	/* timeout value */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_common_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_rssi_method {
+	chanspec_t	chanspec;	/* chanspec for home channel */
+	int16		tx_power;	/* tx power of Proximity Detection frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of PD frames, 500kbps units */
+	uint16		timeout;	/* state machine wait timeout of the frames (in ms) */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+					/* method specific ones go after this line */
+	int16		rssi_thresh;	/* RSSI threshold (in dBm) */
+	uint16		maxconvergtmo;	/* max wait converge timeout (in ms) */
+} wl_proxd_params_rssi_method_t;
+
+#define Q1_NS			25	/* Q1 time units */
+
+#define TOF_BW_NUM		3	/* number of bandwidth that the TOF can support */
+#define TOF_BW_SEQ_NUM		(TOF_BW_NUM+2)	/* number of total index */
+enum tof_bw_index {
+	TOF_BW_20MHZ_INDEX = 0,
+	TOF_BW_40MHZ_INDEX = 1,
+	TOF_BW_80MHZ_INDEX = 2,
+	TOF_BW_SEQTX_INDEX = 3,
+	TOF_BW_SEQRX_INDEX = 4
+};
+
+#define BANDWIDTH_BASE	20	/* base value of bandwidth */
+#define TOF_BW_20MHZ    (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_BW_40MHZ    (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_BW_80MHZ    (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX)
+#define TOF_BW_10MHZ    10
+
+#define NFFT_BASE		64	/* base size of fft */
+#define TOF_NFFT_20MHZ  (NFFT_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_NFFT_40MHZ  (NFFT_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_NFFT_80MHZ  (NFFT_BASE << TOF_BW_80MHZ_INDEX)
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method {
+	chanspec_t	chanspec;	/* chanspec for home channel */
+	int16		tx_power;	/* tx power of Proximity Detection(PD) frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of PD rames  (in 500kbps units) */
+	uint16		timeout;	/* state machine wait timeout of the frames (in ms) */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+	/* specific for the method go after this line */
+	struct ether_addr tgt_mac;	/* target mac addr for TOF method */
+	uint16		ftm_cnt;	/* number of the frames txed by initiator */
+	uint16		retry_cnt;	/* number of retransmit attampts for ftm frames */
+	int16		vht_rate;	/* ht or vht rate */
+	/* add more params required for other methods can be added here  */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune {
+	uint32		Ki;			/* h/w delay K factor for initiator */
+	uint32		Kt;			/* h/w delay K factor for target */
+	int16		vhtack;			/* enable/disable VHT ACK */
+	int16		N_log2[TOF_BW_SEQ_NUM]; /* simple threshold crossing */
+	int16		w_offset[TOF_BW_NUM];	/* offset of threshold crossing window(per BW) */
+	int16		w_len[TOF_BW_NUM];	/* length of threshold crossing window(per BW) */
+	int32		maxDT;			/* max time difference of T4/T1 or T3/T2 */
+	int32		minDT;			/* min time difference of T4/T1 or T3/T2 */
+	uint8		totalfrmcnt;	/* total count of transfered measurement frames */
+	uint16		rsv_media;		/* reserve media value for TOF */
+	uint32		flags;			/* flags */
+	uint8		core;			/* core to use for tx */
+	uint8		force_K;		/* set to force value of K  */
+	int16		N_scale[TOF_BW_SEQ_NUM]; /* simple threshold crossing */
+	uint8		sw_adj;			/* enable sw assisted timestamp adjustment */
+	uint8		hw_adj;			/* enable hw assisted timestamp adjustment */
+	uint8		seq_en;			/* enable ranging sequence */
+	uint8		ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t;
+
+typedef struct wl_proxd_params_iovar {
+	uint16	method;			/* Proxmity Detection method */
+	union {
+		/* common params for pdsvc */
+		wl_proxd_params_common_t	cmn_params;	/* common parameters */
+		/*  method specific */
+		wl_proxd_params_rssi_method_t	rssi_params;	/* RSSI method parameters */
+		wl_proxd_params_tof_method_t	tof_params;	/* TOF meothod parameters */
+		/* tune parameters */
+		wl_proxd_params_tof_tune_t	tof_tune;	/* TOF tune parameters */
+	} u;				/* Method specific optional parameters */
+} wl_proxd_params_iovar_t;
+
+#define PROXD_COLLECT_GET_STATUS	0
+#define PROXD_COLLECT_SET_STATUS	1
+#define PROXD_COLLECT_QUERY_HEADER	2
+#define PROXD_COLLECT_QUERY_DATA	3
+#define PROXD_COLLECT_QUERY_DEBUG	4
+#define PROXD_COLLECT_REMOTE_REQUEST	5
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query {
+	uint32		method;		/* method */
+	uint8		request;	/* Query request. */
+	uint8		status;		/* 0 -- disable, 1 -- enable collection, */
+					/* 2 -- enable collection & debug */
+	uint16		index;		/* The current frame index [0 to total_frames - 1]. */
+	uint16		mode;		/* Initiator or Target */
+	bool		busy;		/* tof sm is busy */
+	bool		remote;		/* Remote collect data */
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header {
+	uint16		total_frames;			/* The totral frames for this collect. */
+	uint16		nfft;				/* nfft value */
+	uint16		bandwidth;			/* bandwidth */
+	uint16		channel;			/* channel number */
+	uint32		chanspec;			/* channel spec */
+	uint32		fpfactor;			/* avb timer value factor */
+	uint16		fpfactor_shift;			/* avb timer value shift bits */
+	int32		distance;			/* distance calculated by fw */
+	uint32		meanrtt;			/* mean of RTTs */
+	uint32		modertt;			/* mode of RTTs */
+	uint32		medianrtt;			/* median of RTTs */
+	uint32		sdrtt;				/* standard deviation of RTTs */
+	uint32		clkdivisor;			/* clock divisor */
+	uint16		chipnum;			/* chip type */
+	uint8		chiprev;			/* chip revision */
+	uint8		phyver;				/* phy version */
+	struct ether_addr	loaclMacAddr;		/* local mac address */
+	struct ether_addr	remoteMacAddr;		/* remote mac address */
+	wl_proxd_params_tof_tune_t params;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
+
+
+/*  ********************** NAN wl interface struct types and defs ******************** */
+
+#define WL_NAN_IOCTL_VERSION	0x1
+
+/*   wl_nan_sub_cmd may also be used in dhd  */
+typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t;
+typedef int (cmd_handler_t)(void *wl, const wl_nan_sub_cmd_t *cmd, char **argv);
+/* nan cmd list entry  */
+struct wl_nan_sub_cmd {
+	char *name;
+	uint8  version;		/* cmd  version */
+	uint16 id;			/* id for the dongle f/w switch/case  */
+	uint16 type;		/* base type of argument */
+	cmd_handler_t *handler; /* cmd handler  */
+};
+
+/* container for nan iovtls & events */
+typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc {
+	uint16	version;	/* interface command or event version */
+	uint16	id;			/* nan ioctl cmd  ID  */
+	uint16	len;		/* total length of all tlv records in data[]  */
+	uint8	data [1];	/* var len payload of bcm_xtlv_t type */
+} BWL_POST_PACKED_STRUCT wl_nan_ioc_t;
+
+typedef struct wl_nan_status {
+	uint8 inited;
+	uint8 joined;
+	uint8 role;
+	uint8 hop_count;
+	uint32 chspec;
+	uint8 amr[8];			/* Anchor Master Rank */
+	uint32 cnt_pend_txfrm;		/* pending TX frames */
+	uint32 cnt_bcn_tx;		/* TX disc/sync beacon count */
+	uint32 cnt_bcn_rx;		/* RX disc/sync beacon count */
+	uint32 cnt_svc_disc_tx;		/* TX svc disc frame count */
+	uint32 cnt_svc_disc_rx;		/* RX svc disc frame count */
+	struct ether_addr cid;
+} wl_nan_status_t;
+
+/* various params and ctl swithce for nan_debug instance  */
+typedef struct nan_debug_params {
+	uint8	enabled; /* runtime debuging enabled */
+	uint8	collect; /* enables debug svc sdf monitor mode  */
+	uint16	cmd;	/* debug cmd to perform a debug action */
+	uint32	msglevel; /* msg level if enabled */
+	uint16	status;
+} nan_debug_params_t;
+
+
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+typedef BWL_PRE_PACKED_STRUCT struct nan_scan_params {
+	uint16 scan_time;
+	uint16 home_time;
+	uint16 chspec_num;
+	chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */
+} BWL_POST_PACKED_STRUCT nan_scan_params_t;
+
+enum wl_nan_role {
+	WL_NAN_ROLE_AUTO = 0,
+	WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1,
+	WL_NAN_ROLE_NON_MASTER_SYNC = 2,
+	WL_NAN_ROLE_MASTER = 3,
+	WL_NAN_ROLE_ANCHOR_MASTER = 4
+};
+#define NAN_MASTER_RANK_LEN 8
+/* nan cmd IDs */
+enum wl_nan_cmds {
+	 /* nan cfg /disc & dbg ioctls */
+	WL_NAN_CMD_ENABLE = 1,
+	WL_NAN_CMD_ATTR = 2,
+	WL_NAN_CMD_NAN_JOIN = 3,
+	WL_NAN_CMD_LEAVE = 4,
+	WL_NAN_CMD_MERGE = 5,
+	WL_NAN_CMD_STATUS = 6,
+	/*  discovery engine commands */
+	WL_NAN_CMD_PUBLISH = 20,
+	WL_NAN_CMD_SUBSCRIBE = 21,
+	WL_NAN_CMD_CANCEL_PUBLISH = 22,
+	WL_NAN_CMD_CANCEL_SUBSCRIBE = 23,
+	WL_NAN_CMD_TRANSMIT = 24,
+	WL_NAN_CMD_CONNECTION = 25,
+	WL_NAN_CMD_SHOW = 26,
+	WL_NAN_CMD_STOP = 27,	/* stop nan for a given cluster ID  */
+	/*  nan debug iovars & cmds  */
+	WL_NAN_CMD_SCAN_PARAMS = 46,
+	WL_NAN_CMD_SCAN = 47,
+	WL_NAN_CMD_SCAN_RESULTS = 48,
+	WL_NAN_CMD_EVENT_MASK = 49,
+	WL_NAN_CMD_EVENT_CHECK = 50,
+
+	WL_NAN_CMD_DEBUG = 60,
+	WL_NAN_CMD_TEST1 = 61,
+	WL_NAN_CMD_TEST2 = 62,
+	WL_NAN_CMD_TEST3 = 63
+};
+
+/*
+ * tlv IDs uniquely identifies  cmd parameters
+ * packed into wl_nan_ioc_t container
+ */
+enum wl_nan_cmd_xtlv_id {
+	/* 0x00 ~ 0xFF: standard TLV ID whose data format is the same as NAN attribute TLV */
+	WL_NAN_XTLV_ZERO = 0,		/* used as tlv buf end marker */
+#ifdef NAN_STD_TLV 				/* rfu, don't use yet */
+	WL_NAN_XTLV_MASTER_IND = 1, /* == NAN_ATTR_MASTER_IND, */
+	WL_NAN_XTLV_CLUSTER = 2,	/* == NAN_ATTR_CLUSTER, */
+	WL_NAN_XTLV_VENDOR = 221,	/* == NAN_ATTR_VENDOR, */
+#endif
+	/* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */
+	/* 0x100 ~ : private TLV ID defined just for NAN command */
+	/* common types */
+	WL_NAN_XTLV_BUFFER = 0x101, /* generic type, function depends on cmd context */
+	WL_NAN_XTLV_MAC_ADDR = 0x102,	/* used in various cmds */
+	WL_NAN_XTLV_REASON = 0x103,
+	WL_NAN_XTLV_ENABLE = 0x104,
+	/* explicit types, primarily for discovery engine iovars  */
+	WL_NAN_XTLV_SVC_PARAMS = 0x120,     /* Contains required params: wl_nan_disc_params_t */
+	WL_NAN_XTLV_MATCH_RX = 0x121,       /* Matching filter to evaluate on receive */
+	WL_NAN_XTLV_MATCH_TX = 0x122,       /* Matching filter to send */
+	WL_NAN_XTLV_SVC_INFO = 0x123,       /* Service specific info */
+	WL_NAN_XTLV_SVC_NAME = 0x124,       /* Optional UTF-8 service name, for debugging. */
+	WL_NAN_XTLV_INSTANCE_ID = 0x125,    /* Identifies unique publish or subscribe instance */
+	WL_NAN_XTLV_PRIORITY = 0x126,       /* used in transmit cmd context */
+	WL_NAN_XTLV_REQUESTOR_ID = 0x127,	/* Requestor instance ID */
+	WL_NAN_XTLV_VNDR = 0x128,		/* Vendor specific attribute */
+	/* explicit types, primarily for NAN MAC iovars   */
+	WL_NAN_XTLV_DW_LEN = 0x140,            /* discovery win length */
+	WL_NAN_XTLV_BCN_INTERVAL = 0x141,      /* beacon interval, both sync and descovery bcns?  */
+	WL_NAN_XTLV_CLUSTER_ID = 0x142,
+	WL_NAN_XTLV_IF_ADDR = 0x143,
+	WL_NAN_XTLV_MC_ADDR = 0x144,
+	WL_NAN_XTLV_ROLE = 0x145,
+	WL_NAN_XTLV_START = 0x146,
+
+	WL_NAN_XTLV_MASTER_PREF = 0x147,
+	WL_NAN_XTLV_DW_INTERVAL = 0x148,
+	WL_NAN_XTLV_PTBTT_OVERRIDE = 0x149,
+	/*  nan status command xtlvs  */
+	WL_NAN_XTLV_MAC_INITED = 0x14a,
+	WL_NAN_XTLV_MAC_ENABLED = 0x14b,
+	WL_NAN_XTLV_MAC_CHANSPEC = 0x14c,
+	WL_NAN_XTLV_MAC_AMR = 0x14d,	/* anchormaster rank u8 amr[8] */
+	WL_NAN_XTLV_MAC_HOPCNT = 0x14e,
+	WL_NAN_XTLV_MAC_AMBTT = 0x14f,
+	WL_NAN_XTLV_MAC_TXRATE = 0x150,
+	WL_NAN_XTLV_MAC_STATUS = 0x151,  /* xtlv payload is nan_status_t */
+	WL_NAN_XTLV_NAN_SCANPARAMS = 0x152,  /* payload is nan_scan_params_t */
+	WL_NAN_XTLV_DEBUGPARAMS = 0x153,  /* payload is nan_scan_params_t */
+	WL_NAN_XTLV_SUBSCR_ID = 0x154,   /* subscriber id  */
+	WL_NAN_XTLV_PUBLR_ID = 0x155,	/* publisher id */
+	WL_NAN_XTLV_EVENT_MASK = 0x156,
+	WL_NAN_XTLV_MERGE = 0x157
+};
+
+/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */
+#define WL_NAN_RANGE_LIMITED           0x0040
+/* Bits specific to Publish */
+/* Unsolicited transmissions */
+#define WL_NAN_PUB_UNSOLICIT           0x1000
+/* Solicited transmissions */
+#define WL_NAN_PUB_SOLICIT             0x2000
+#define WL_NAN_PUB_BOTH                0x3000
+/* Set for broadcast solicited transmission
+ * Do not set for unicast solicited transmission
+ */
+#define WL_NAN_PUB_BCAST               0x4000
+/* Generate event on each solicited transmission */
+#define WL_NAN_PUB_EVENT               0x8000
+/* Used for one-time solicited Publish functions to indicate transmision occurred */
+#define WL_NAN_PUB_SOLICIT_PENDING	0x10000
+/* Follow-up frames */
+#define WL_NAN_FOLLOWUP			0x20000
+/* Bits specific to Subscribe */
+/* Active subscribe mode (Leave unset for passive) */
+#define WL_NAN_SUB_ACTIVE              0x1000
+
+/* Special values for time to live (ttl) parameter */
+#define WL_NAN_TTL_UNTIL_CANCEL	0xFFFFFFFF
+/* Publish -  runs until first transmission
+ * Subscribe - runs until first  DiscoveryResult event
+ */
+#define WL_NAN_TTL_FIRST	0
+
+/* The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN	6
+
+/* Instance ID type (unique identifier) */
+typedef uint8 wl_nan_instance_id_t;
+
+/* Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */
+typedef struct wl_nan_disc_params_s {
+	/* Periodicity of unsolicited/query transmissions, in DWs */
+	uint32 period;
+	/* Time to live in DWs */
+	uint32 ttl;
+	/* Flag bits */
+	uint32 flags;
+	/* Publish or subscribe service id, i.e. hash of the service name */
+	uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
+	/* Publish or subscribe id */
+	wl_nan_instance_id_t instance_id;
+} wl_nan_disc_params_t;
+
+/*
+* desovery interface event structures *
+*/
+
+/* NAN Ranging */
+
+/* Bit defines for global flags */
+#define WL_NAN_RANGING_ENABLE		1 /* enable RTT */
+#define WL_NAN_RANGING_RANGED		2 /* Report to host if ranged as target */
+typedef struct nan_ranging_config {
+	uint32 chanspec;		/* Ranging chanspec */
+	uint16 timeslot;		/* NAN RTT start time slot  1-511 */
+	uint16 duration;		/* NAN RTT duration in ms */
+	struct ether_addr allow_mac;	/* peer initiated ranging: the allowed peer mac
+					 * address, a unicast (for one peer) or
+					 * a broadcast for all. Setting it to all zeros
+					 * means responding to none,same as not setting
+					 * the flag bit NAN_RANGING_RESPOND
+					 */
+	uint16 flags;
+} wl_nan_ranging_config_t;
+
+/* list of peers for self initiated ranging */
+/* Bit defines for per peer flags */
+#define WL_NAN_RANGING_REPORT (1<<0)	/* Enable reporting range to target */
+typedef struct nan_ranging_peer {
+	uint32 chanspec;		/* desired chanspec for this peer */
+	uint32 abitmap;			/* available bitmap */
+	struct ether_addr ea;		/* peer MAC address */
+	uint8 frmcnt;			/* frame count */
+	uint8 retrycnt;			/* retry count */
+	uint16 flags;			/* per peer flags, report or not */
+} wl_nan_ranging_peer_t;
+typedef struct nan_ranging_list {
+	uint8 count;			/* number of MAC addresses */
+	uint8 num_peers_done;		/* host set to 0, when read, shows number of peers
+					 * completed, success or fail
+					 */
+	uint8 num_dws;			/* time period to do the ranging, specified in dws */
+	uint8 reserve;			/* reserved field */
+	wl_nan_ranging_peer_t rp[1];	/* variable length array of peers */
+} wl_nan_ranging_list_t;
+
+/* ranging results, a list for self initiated ranging and one for peer initiated ranging */
+/* There will be one structure for each peer */
+#define WL_NAN_RANGING_STATUS_SUCCESS		1
+#define WL_NAN_RANGING_STATUS_FAIL			2
+#define WL_NAN_RANGING_STATUS_TIMEOUT		3
+#define WL_NAN_RANGING_STATUS_ABORT		4 /* with partial results if sounding count > 0 */
+typedef struct nan_ranging_result {
+	uint8 status;			/* 1: Success, 2: Fail 3: Timeout 4: Aborted */
+	uint8 sounding_count;		/* number of measurements completed (0 = failure) */
+	struct ether_addr ea;		/* initiator MAC address */
+	uint32 chanspec;		/* Chanspec where the ranging was done */
+	uint32 timestamp;		/* 32bits of the TSF timestamp ranging was completed at */
+	uint32 distance;		/* mean distance in meters expressed as Q4 number.
+					 * Only valid when sounding_count > 0. Examples:
+					 * 0x08 = 0.5m
+					 * 0x10 = 1m
+					 * 0x18 = 1.5m
+					 * set to 0xffffffff to indicate invalid number
+					 */
+	int32 rtt_var;			/* standard deviation in 10th of ns of RTTs measured.
+					 * Only valid when sounding_count > 0
+					 */
+	struct ether_addr tgtea;	/* target MAC address */
+} wl_nan_ranging_result_t;
+typedef struct nan_ranging_event_data {
+	uint8 mode;			/* 1: Result of host initiated ranging */
+					/* 2: Result of peer initiated ranging */
+	uint8 reserved;
+	uint8 success_count;		/* number of peers completed successfully */
+	uint8 count;			/* number of peers in the list */
+	wl_nan_ranging_result_t rr[1];	/* variable array of ranging peers */
+} wl_nan_ranging_event_data_t;
+
+/* ********************* end of NAN section ******************************** */
+
+
+#define RSSI_THRESHOLD_SIZE 16
+#define MAX_IMP_RESP_SIZE 256
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias {
+	int32		version;			/* version */
+	int32		threshold[RSSI_THRESHOLD_SIZE];	/* threshold */
+	int32		peak_offset;		/* peak offset */
+	int32		bias;				/* rssi bias */
+	int32		gd_delta;			/* GD - GD_ADJ */
+	int32		imp_resp[MAX_IMP_RESP_SIZE];	/* (Hi*Hi)+(Hr*Hr) */
+} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias_avg {
+	int32		avg_threshold[RSSI_THRESHOLD_SIZE];	/* avg threshold */
+	int32		avg_peak_offset;			/* avg peak offset */
+	int32		avg_rssi;				/* avg rssi */
+	int32		avg_bias;				/* avg bias */
+} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_avg_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info {
+	uint16		type;	 /* type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */
+	uint16		index;		/* The current frame index, from 1 to total_frames. */
+	uint16		tof_cmd;	/* M_TOF_CMD      */
+	uint16		tof_rsp;	/* M_TOF_RSP      */
+	uint16		tof_avb_rxl;	/* M_TOF_AVB_RX_L */
+	uint16		tof_avb_rxh;	/* M_TOF_AVB_RX_H */
+	uint16		tof_avb_txl;	/* M_TOF_AVB_TX_L */
+	uint16		tof_avb_txh;	/* M_TOF_AVB_TX_H */
+	uint16		tof_id;		/* M_TOF_ID */
+	uint8		tof_frame_type;
+	uint8		tof_frame_bw;
+	int8		tof_rssi;
+	int32		tof_cfo;
+	int32		gd_adj_ns;	/* gound delay */
+	int32		gd_h_adj_ns;	/* group delay + threshold crossing */
+#ifdef RSSI_REFINE
+	wl_proxd_rssi_bias_t rssi_bias; /* RSSI refinement info */
+#endif
+	int16		nfft;		/* number of samples stored in H */
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t;
+
+#define k_tof_collect_H_pad  1
+#define k_tof_collect_H_size (256+16+k_tof_collect_H_pad)
+#define k_tof_collect_Hraw_size (2*k_tof_collect_H_size)
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data {
+	wl_proxd_collect_info_t  info;
+	uint32	H[k_tof_collect_H_size]; /* raw data read from phy used to adjust timestamps */
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_debug_data {
+	uint8		count;		/* number of packets */
+	uint8		stage;		/* state machone stage */
+	uint8		received;	/* received or txed */
+	uint8		paket_type;	/* packet type */
+	uint8		category;	/* category field */
+	uint8		action;		/* action field */
+	uint8		token;		/* token number */
+	uint8		follow_token;	/* following token number */
+	uint16		index;		/* index of the packet */
+	uint16		tof_cmd;	/* M_TOF_CMD */
+	uint16		tof_rsp;	/* M_TOF_RSP */
+	uint16		tof_avb_rxl;	/* M_TOF_AVB_RX_L */
+	uint16		tof_avb_rxh;	/* M_TOF_AVB_RX_H */
+	uint16		tof_avb_txl;	/* M_TOF_AVB_TX_L */
+	uint16		tof_avb_txh;	/* M_TOF_AVB_TX_H */
+	uint16		tof_id;		/* M_TOF_ID */
+	uint16		tof_status0;	/* M_TOF_STATUS_0 */
+	uint16		tof_status2;	/* M_TOF_STATUS_2 */
+	uint16		tof_chsm0;	/* M_TOF_CHNSM_0 */
+	uint16		tof_phyctl0;	/* M_TOF_PHYCTL0 */
+	uint16		tof_phyctl1;	/* M_TOF_PHYCTL1 */
+	uint16		tof_phyctl2;	/* M_TOF_PHYCTL2 */
+	uint16		tof_lsig;	/* M_TOF_LSIG */
+	uint16		tof_vhta0;	/* M_TOF_VHTA0 */
+	uint16		tof_vhta1;	/* M_TOF_VHTA1 */
+	uint16		tof_vhta2;	/* M_TOF_VHTA2 */
+	uint16		tof_vhtb0;	/* M_TOF_VHTB0 */
+	uint16		tof_vhtb1;	/* M_TOF_VHTB1 */
+	uint16		tof_apmductl;	/* M_TOF_AMPDU_CTL */
+	uint16		tof_apmdudlim;	/* M_TOF_AMPDU_DLIM */
+	uint16		tof_apmdulen;	/* M_TOF_AMPDU_LEN */
+} BWL_POST_PACKED_STRUCT wl_proxd_debug_data_t;
+
+/* version of the wl_wsec_info structure */
+#define WL_WSEC_INFO_VERSION 0x01
+
+/* start enum value for BSS properties */
+#define WL_WSEC_INFO_BSS_BASE 0x0100
+
+/* size of len and type fields of wl_wsec_info_tlv_t struct */
+#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data)
+
+/* Allowed wl_wsec_info properties; not all of them may be supported. */
+typedef enum {
+	WL_WSEC_INFO_NONE = 0,
+	WL_WSEC_INFO_MAX_KEYS = 1,
+	WL_WSEC_INFO_NUM_KEYS = 2,
+	WL_WSEC_INFO_NUM_HW_KEYS = 3,
+	WL_WSEC_INFO_MAX_KEY_IDX = 4,
+	WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5,
+	WL_WSEC_INFO_SUPPORTED_ALGOS = 6,
+	WL_WSEC_INFO_MAX_KEY_LEN = 7,
+	WL_WSEC_INFO_FLAGS = 8,
+	/* add global/per-wlc properties above */
+	WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1),
+	WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2),
+	WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3),
+	WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
+	WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
+	/* add per-BSS properties above */
+	WL_WSEC_INFO_MAX = 0xffff
+} wl_wsec_info_type_t;
+
+/* tlv used to return wl_wsec_info properties */
+typedef struct {
+	uint16 type;
+	uint16 len;		/* data length */
+	uint8 data[1];	/* data follows */
+} wl_wsec_info_tlv_t;
+
+/* input/output data type for wsec_info iovar */
+typedef struct wl_wsec_info {
+	uint8 version; /* structure version */
+	uint8 pad[2];
+	uint8 num_tlvs;
+	wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */
+} wl_wsec_info_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+enum rssi_reason {
+	RSSI_REASON_UNKNOW = 0,
+	RSSI_REASON_LOWRSSI = 1,
+	RSSI_REASON_NSYC = 2,
+	RSSI_REASON_TIMEOUT = 3
+};
+
+enum tof_reason {
+	TOF_REASON_OK = 0,
+	TOF_REASON_REQEND = 1,
+	TOF_REASON_TIMEOUT = 2,
+	TOF_REASON_NOACK = 3,
+	TOF_REASON_INVALIDAVB = 4,
+	TOF_REASON_INITIAL = 5,
+	TOF_REASON_ABORT = 6
+};
+
+enum rssi_state {
+	RSSI_STATE_POLL = 0,
+	RSSI_STATE_TPAIRING = 1,
+	RSSI_STATE_IPAIRING = 2,
+	RSSI_STATE_THANDSHAKE = 3,
+	RSSI_STATE_IHANDSHAKE = 4,
+	RSSI_STATE_CONFIRMED = 5,
+	RSSI_STATE_PIPELINE = 6,
+	RSSI_STATE_NEGMODE = 7,
+	RSSI_STATE_MONITOR = 8,
+	RSSI_STATE_LAST = 9
+};
+
+enum tof_state {
+	TOF_STATE_IDLE	 = 0,
+	TOF_STATE_IWAITM = 1,
+	TOF_STATE_TWAITM = 2,
+	TOF_STATE_ILEGACY = 3,
+	TOF_STATE_IWAITCL = 4,
+	TOF_STATE_TWAITCL = 5,
+	TOF_STATE_ICONFIRM = 6,
+	TOF_STATE_IREPORT = 7
+};
+
+enum tof_mode_type {
+	TOF_LEGACY_UNKNOWN	= 0,
+	TOF_LEGACY_AP		= 1,
+	TOF_NONLEGACY_AP	= 2
+};
+
+enum tof_way_type {
+	TOF_TYPE_ONE_WAY = 0,
+	TOF_TYPE_TWO_WAY = 1,
+	TOF_TYPE_REPORT = 2
+};
+
+enum tof_rate_type {
+	TOF_FRAME_RATE_VHT = 0,
+	TOF_FRAME_RATE_LEGACY = 1
+};
+
+#define TOF_ADJ_TYPE_NUM	4	/* number of assisted timestamp adjustment */
+enum tof_adj_mode {
+	TOF_ADJ_SOFTWARE = 0,
+	TOF_ADJ_HARDWARE = 1,
+	TOF_ADJ_SEQ = 2,
+	TOF_ADJ_NONE = 3
+};
+
+#define FRAME_TYPE_NUM		4	/* number of frame type */
+enum frame_type {
+	FRAME_TYPE_CCK	= 0,
+	FRAME_TYPE_OFDM	= 1,
+	FRAME_TYPE_11N	= 2,
+	FRAME_TYPE_11AC	= 3
+};
+
+typedef struct wl_proxd_status_iovar {
+	uint16			method;				/* method */
+	uint8			mode;				/* mode */
+	uint8			peermode;			/* peer mode */
+	uint8			state;				/* state */
+	uint8			reason;				/* reason code */
+	uint32			distance;			/* distance */
+	uint32			txcnt;				/* tx pkt counter */
+	uint32			rxcnt;				/* rx pkt counter */
+	struct ether_addr	peer;				/* peer mac address */
+	int8			avg_rssi;			/* average rssi */
+	int8			hi_rssi;			/* highest rssi */
+	int8			low_rssi;			/* lowest rssi */
+	uint32			dbgstatus;			/* debug status */
+	uint16			frame_type_cnt[FRAME_TYPE_NUM];	/* frame types */
+	uint8			adj_type_cnt[TOF_ADJ_TYPE_NUM];	/* adj types HW/SW */
+} wl_proxd_status_iovar_t;
+
+#ifdef NET_DETECT
+typedef struct net_detect_adapter_features {
+	bool	wowl_enabled;
+	bool	net_detect_enabled;
+	bool	nlo_enabled;
+} net_detect_adapter_features_t;
+
+typedef enum net_detect_bss_type {
+	nd_bss_any = 0,
+	nd_ibss,
+	nd_ess
+} net_detect_bss_type_t;
+
+typedef struct net_detect_profile {
+	wlc_ssid_t		ssid;
+	net_detect_bss_type_t   bss_type;	/* Ignore for now since Phase 1 is only for ESS */
+	uint32			cipher_type;	/* DOT11_CIPHER_ALGORITHM enumeration values */
+	uint32			auth_type;	/* DOT11_AUTH_ALGORITHM enumeration values */
+} net_detect_profile_t;
+
+typedef struct net_detect_profile_list {
+	uint32			num_nd_profiles;
+	net_detect_profile_t	nd_profile[0];
+} net_detect_profile_list_t;
+
+typedef struct net_detect_config {
+	bool			    nd_enabled;
+	uint32			    scan_interval;
+	uint32			    wait_period;
+	bool			    wake_if_connected;
+	bool			    wake_if_disconnected;
+	net_detect_profile_list_t   nd_profile_list;
+} net_detect_config_t;
+
+typedef enum net_detect_wake_reason {
+	nd_reason_unknown,
+	nd_net_detected,
+	nd_wowl_event,
+	nd_ucode_error
+} net_detect_wake_reason_t;
+
+typedef struct net_detect_wake_data {
+	net_detect_wake_reason_t    nd_wake_reason;
+	uint32			    nd_wake_date_length;
+	uint8			    nd_wake_data[0];	    /* Wake data (currently unused) */
+} net_detect_wake_data_t;
+
+#endif /* NET_DETECT */
+
+typedef struct bcnreq {
+	uint8 bcn_mode;
+	int dur;
+	int channel;
+	struct ether_addr da;
+	uint16 random_int;
+	wlc_ssid_t ssid;
+	uint16 reps;
+} bcnreq_t;
+
+typedef struct rrmreq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	uint16 reps;
+} rrmreq_t;
+
+typedef struct framereq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	struct ether_addr ta;
+	uint16 reps;
+} framereq_t;
+
+typedef struct statreq {
+	struct ether_addr da;
+	struct ether_addr peer;
+	uint16 random_int;
+	uint16 dur;
+	uint8 group_id;
+	uint16 reps;
+} statreq_t;
+
+#define WL_RRM_RPT_VER		0
+#define WL_RRM_RPT_MAX_PAYLOAD	64
+#define WL_RRM_RPT_MIN_PAYLOAD	7
+#define WL_RRM_RPT_FALG_ERR	0
+#define WL_RRM_RPT_FALG_OK	1
+typedef struct {
+	uint16 ver;		/* version */
+	struct ether_addr addr;	/* STA MAC addr */
+	uint32 timestamp;	/* timestamp of the report */
+	uint16 flag;		/* flag */
+	uint16 len;		/* length of payload data */
+	unsigned char data[WL_RRM_RPT_MAX_PAYLOAD];
+} statrpt_t;
+
+typedef struct wlc_l2keepalive_ol_params {
+	uint8	flags;
+	uint8	prio;
+	uint16	period_ms;
+} wlc_l2keepalive_ol_params_t;
+
+typedef struct wlc_dwds_config {
+	uint32		enable;
+	uint32		mode; /* STA/AP interface */
+	struct ether_addr ea;
+} wlc_dwds_config_t;
+
+typedef struct wl_el_set_params_s {
+	uint8 set;	/* Set number */
+	uint32 size;	/* Size to make/expand */
+} wl_el_set_params_t;
+
+typedef struct wl_el_tag_params_s {
+	uint16 tag;
+	uint8 set;
+	uint8 flags;
+} wl_el_tag_params_t;
+
+/* Video Traffic Interference Monitor config */
+#define INTFER_VERSION		1
+typedef struct wl_intfer_params {
+	uint16 version;			/* version */
+	uint8 period;			/* sample period */
+	uint8 cnt;			/* sample cnt */
+	uint8 txfail_thresh;	/* non-TCP txfail threshold */
+	uint8 tcptxfail_thresh;	/* tcptxfail threshold */
+} wl_intfer_params_t;
+
+typedef struct wl_staprio_cfg {
+	struct ether_addr ea;	/* mac addr */
+	uint8 prio;		/* scb priority */
+} wl_staprio_cfg_t;
+
+typedef enum wl_stamon_cfg_cmd_type {
+	STAMON_CFG_CMD_DEL = 0,
+	STAMON_CFG_CMD_ADD = 1
+} wl_stamon_cfg_cmd_type_t;
+
+typedef struct wlc_stamon_sta_config {
+	wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */
+	struct ether_addr ea;
+} wlc_stamon_sta_config_t;
+
+#ifdef SR_DEBUG
+typedef struct /* pmu_reg */{
+	uint32  pmu_control;
+	uint32  pmu_capabilities;
+	uint32  pmu_status;
+	uint32  res_state;
+	uint32  res_pending;
+	uint32  pmu_timer1;
+	uint32  min_res_mask;
+	uint32  max_res_mask;
+	uint32  pmu_chipcontrol1[4];
+	uint32  pmu_regcontrol[5];
+	uint32  pmu_pllcontrol[5];
+	uint32  pmu_rsrc_up_down_timer[31];
+	uint32  rsrc_dep_mask[31];
+} pmu_reg_t;
+#endif /* pmu_reg */
+
+typedef struct wl_taf_define {
+	struct ether_addr ea;	/* STA MAC or 0xFF... */
+	uint16 version;         /* version */
+	uint32 sch;             /* method index */
+	uint32 prio;            /* priority */
+	uint32 misc;            /* used for return value */
+	char   text[1];         /* used to pass and return ascii text */
+} wl_taf_define_t;
+
+/* Received Beacons lengths information */
+#define WL_LAST_BCNS_INFO_FIXED_LEN		OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring)
+typedef struct wlc_bcn_len_hist {
+	uint16	ver;				/* version field */
+	uint16	cur_index;			/* current pointed index in ring buffer */
+	uint32	max_bcnlen;		/* Max beacon length received */
+	uint32	min_bcnlen;		/* Min beacon length received */
+	uint32	ringbuff_len;		/* Length of the ring buffer 'bcnlen_ring' */
+	uint32	bcnlen_ring[1];	/* ring buffer storing received beacon lengths */
+} wlc_bcn_len_hist_t;
+
+/* WDS net interface types */
+#define WL_WDSIFTYPE_NONE  0x0 /* The interface type is neither WDS nor DWDS. */
+#define WL_WDSIFTYPE_WDS   0x1 /* The interface is WDS type. */
+#define WL_WDSIFTYPE_DWDS  0x2 /* The interface is DWDS type. */
+
+typedef struct wl_bssload_static {
+	bool is_static;
+	uint16 sta_count;
+	uint8 chan_util;
+	uint16 aac;
+} wl_bssload_static_t;
+
+
+/* LTE coex info */
+/* Analogue of HCI Set MWS Signaling cmd */
+typedef struct {
+	uint16	mws_rx_assert_offset;
+	uint16	mws_rx_assert_jitter;
+	uint16	mws_rx_deassert_offset;
+	uint16	mws_rx_deassert_jitter;
+	uint16	mws_tx_assert_offset;
+	uint16	mws_tx_assert_jitter;
+	uint16	mws_tx_deassert_offset;
+	uint16	mws_tx_deassert_jitter;
+	uint16	mws_pattern_assert_offset;
+	uint16	mws_pattern_assert_jitter;
+	uint16	mws_inact_dur_assert_offset;
+	uint16	mws_inact_dur_assert_jitter;
+	uint16	mws_scan_freq_assert_offset;
+	uint16	mws_scan_freq_assert_jitter;
+	uint16	mws_prio_assert_offset_req;
+} wci2_config_t;
+
+/* Analogue of HCI MWS Channel Params */
+typedef struct {
+	uint16	mws_rx_center_freq; /* MHz */
+	uint16	mws_tx_center_freq;
+	uint16	mws_rx_channel_bw;  /* KHz */
+	uint16	mws_tx_channel_bw;
+	uint8	mws_channel_en;
+	uint8	mws_channel_type;   /* Don't care for WLAN? */
+} mws_params_t;
+
+/* MWS wci2 message */
+typedef struct {
+	uint8	mws_wci2_data; /* BT-SIG msg */
+	uint16	mws_wci2_interval; /* Interval in us */
+	uint16	mws_wci2_repeat; /* No of msgs to send */
+} mws_wci2_msg_t;
+
+typedef struct {
+	uint32 config;	/* MODE: AUTO (-1), Disable (0), Enable (1) */
+	uint32 status;	/* Current state: Disabled (0), Enabled (1) */
+} wl_config_t;
+
+#define WLC_RSDB_MODE_AUTO_MASK 0x80
+#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK))))
+
+#define	WL_IF_STATS_T_VERSION 1	/* current version of wl_if_stats structure */
+
+/* per interface counters */
+typedef struct wl_if_stats {
+	uint16	version;		/* version of the structure */
+	uint16	length;			/* length of the entire structure */
+	uint32	PAD;			/* padding */
+
+	/* transmit stat counters */
+	uint64	txframe;		/* tx data frames */
+	uint64	txbyte;			/* tx data bytes */
+	uint64	txerror;		/* tx data errors (derived: sum of others) */
+	uint64  txnobuf;		/* tx out of buffer errors */
+	uint64  txrunt;			/* tx runt frames */
+	uint64  txfail;			/* tx failed frames */
+	uint64	txretry;		/* tx retry frames */
+	uint64	txretrie;		/* tx multiple retry frames */
+	uint64	txfrmsnt;		/* tx sent frames */
+	uint64	txmulti;		/* tx mulitcast sent frames */
+	uint64	txfrag;			/* tx fragments sent */
+
+	/* receive stat counters */
+	uint64	rxframe;		/* rx data frames */
+	uint64	rxbyte;			/* rx data bytes */
+	uint64	rxerror;		/* rx data errors (derived: sum of others) */
+	uint64	rxnobuf;		/* rx out of buffer errors */
+	uint64  rxrunt;			/* rx runt frames */
+	uint64  rxfragerr;		/* rx fragment errors */
+	uint64	rxmulti;		/* rx multicast frames */
+}
+wl_if_stats_t;
+
+typedef struct wl_band {
+	uint16		bandtype;		/* WL_BAND_2G, WL_BAND_5G */
+	uint16		bandunit;		/* bandstate[] index */
+	uint16		phytype;		/* phytype */
+	uint16		phyrev;
+}
+wl_band_t;
+
+#define	WL_WLC_VERSION_T_VERSION 1 /* current version of wlc_version structure */
+
+/* wlc interface version */
+typedef struct wl_wlc_version {
+	uint16	version;		/* version of the structure */
+	uint16	length;			/* length of the entire structure */
+
+	/* epi version numbers */
+	uint16	epi_ver_major;		/* epi major version number */
+	uint16	epi_ver_minor;		/* epi minor version number */
+	uint16	epi_rc_num;		/* epi RC number */
+	uint16	epi_incr_num;		/* epi increment number */
+
+	/* wlc interface version numbers */
+	uint16	wlc_ver_major;		/* wlc interface major version number */
+	uint16	wlc_ver_minor;		/* wlc interface minor version number */
+}
+wl_wlc_version_t;
+
+/* Version of WLC interface to be returned as a part of wl_wlc_version structure.
+ * For the discussion related to versions update policy refer to
+ * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlShimAbstractionLayer
+ * For now the policy is to increment WLC_VERSION_MAJOR each time
+ * there is a change that involves both WLC layer and per-port layer.
+ * WLC_VERSION_MINOR is currently not in use.
+ */
+#define WLC_VERSION_MAJOR	3
+#define WLC_VERSION_MINOR	0
+
+
+/* require strict packing */
+#include <packed_section_start.h>
+/* Data returned by the bssload_report iovar.
+ * This is also the WLC_E_BSS_LOAD event data.
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_bssload {
+	uint16 sta_count;		/* station count */
+	uint16 aac;			/* available admission capacity */
+	uint8 chan_util;		/* channel utilization */
+} BWL_POST_PACKED_STRUCT wl_bssload_t;
+
+/* Maximum number of configurable BSS Load levels.  The number of BSS Load
+ * ranges is always 1 more than the number of configured levels.  eg. if
+ * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges:
+ * 0-10, 11-20, 21-30, 31-255.  A WLC_E_BSS_LOAD event is generated each time
+ * the utilization level crosses into another range, subject to the rate limit.
+ */
+#define MAX_BSSLOAD_LEVELS 8
+#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1)
+
+/* BSS Load event notification configuration. */
+typedef struct wl_bssload_cfg {
+	uint32 rate_limit_msec;	/* # of events posted to application will be limited to
+				 * one per specified period (0 to disable rate limit).
+				 */
+	uint8 num_util_levels;	/* Number of entries in util_levels[] below */
+	uint8 util_levels[MAX_BSSLOAD_LEVELS];
+				/* Variable number of BSS Load utilization levels in
+				 * low to high order.  An event will be posted each time
+				 * a received beacon's BSS Load IE channel utilization
+				 * value crosses a level.
+				 */
+} wl_bssload_cfg_t;
+
+/* Multiple roaming profile suport */
+#define WL_MAX_ROAM_PROF_BRACKETS	4
+
+#define WL_MAX_ROAM_PROF_VER	0
+
+#define WL_ROAM_PROF_NONE	(0 << 0)
+#define WL_ROAM_PROF_LAZY	(1 << 0)
+#define WL_ROAM_PROF_NO_CI	(1 << 1)
+#define WL_ROAM_PROF_SUSPEND	(1 << 2)
+#define WL_ROAM_PROF_SYNC_DTIM	(1 << 6)
+#define WL_ROAM_PROF_DEFAULT	(1 << 7)	/* backward compatible single default profile */
+
+typedef struct wl_roam_prof {
+	int8	roam_flags;		/* bit flags */
+	int8	roam_trigger;		/* RSSI trigger level per profile/RSSI bracket */
+	int8	rssi_lower;
+	int8	roam_delta;
+	int8	rssi_boost_thresh;	/* Min RSSI to qualify for RSSI boost */
+	int8	rssi_boost_delta;	/* RSSI boost for AP in the other band */
+	uint16	nfscan;			/* nuber of full scan to start with */
+	uint16	fullscan_period;
+	uint16	init_scan_period;
+	uint16	backoff_multiplier;
+	uint16	max_scan_period;
+} wl_roam_prof_t;
+
+typedef struct wl_roam_prof_band {
+	uint32	band;			/* Must be just one band */
+	uint16	ver;			/* version of this struct */
+	uint16	len;			/* length in bytes of this structure */
+	wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _wlioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 0000000..58ad452
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,1596 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c 474402 2014-05-01 03:50:41Z $
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+
+#if defined(USE_KMALLOC_FOR_FLOW_RING) && defined(__ARM_ARCH_7A__)
+#include <asm/cacheflush.h>
+#endif
+
+#include <linux/random.h>
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+
+
+#include <linux/fs.h>
+
+
+#ifdef BCMPCIE
+#include <bcmpcie.h>
+#endif /* BCMPCIE */
+
+#define PCI_CFG_RETRY		10
+
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN	24		/* Mem. filename length */
+#define DUMPBUFSZ 1024
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define DHD_SKB_1PAGE_BUFSIZE	(PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE	(PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE	(PAGE_SIZE*4)
+
+#define STATIC_BUF_MAX_NUM	16
+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+	struct semaphore static_sem;
+	unsigned char *buf_ptr;
+	unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#if defined(BCMPCIE)
+#define STATIC_PKT_4PAGE_NUM	0
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_2PAGE_BUFSIZE
+#elif defined(ENHANCED_STATIC_BUF)
+#define STATIC_PKT_4PAGE_NUM	1
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_4PAGE_BUFSIZE
+#else
+#define STATIC_PKT_4PAGE_NUM	0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#endif /* BCMPCIE */
+
+#ifdef BCMPCIE
+#define STATIC_PKT_1PAGE_NUM	0
+#define STATIC_PKT_2PAGE_NUM	16
+#else
+#define STATIC_PKT_1PAGE_NUM	8
+#define STATIC_PKT_2PAGE_NUM	8
+#endif /* BCMPCIE */
+
+#define STATIC_PKT_1_2PAGE_NUM	\
+	((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
+#define STATIC_PKT_MAX_NUM	\
+	((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
+
+typedef struct bcm_static_pkt {
+	struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM+1];
+	struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+#if !defined(BCMPCIE)
+#ifdef ENHANCED_STATIC_BUF
+	struct sk_buff *skb_16k;
+#endif /* ENHANCED_STATIC_BUF */
+	struct semaphore osl_pkt_sem;
+#else
+	spinlock_t osl_pkt_lock;
+#endif /* !BCMPCIE */
+	unsigned char pkt_use[STATIC_PKT_MAX_NUM];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+typedef struct bcm_mem_link {
+	struct bcm_mem_link *prev;
+	struct bcm_mem_link *next;
+	uint	size;
+	int	line;
+	void 	*osh;
+	char	file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_cmn_info {
+	atomic_t malloced;
+	atomic_t pktalloced;    /* Number of allocated packet buffers */
+	spinlock_t dbgmem_lock;
+	bcm_mem_link_t *dbgmem_list;
+	spinlock_t pktalloc_lock;
+	atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
+struct osl_info {
+	osl_pubinfo_t pub;
+#ifdef CTFPOOL
+	ctfpool_t *ctfpool;
+#endif /* CTFPOOL */
+	uint magic;
+	void *pdev;
+	uint failed;
+	uint bustype;
+	osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+	void *bus_handle;
+	uint32  flags;		/* If specific cases to be handled in the OSL */
+};
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+/* PCMCIA attribute space access macros */
+
+/* Global ASSERT type flag */
+uint32 g_assert_type = 0;
+module_param(g_assert_type, int, 0);
+
+static int16 linuxbcmerrormap[] =
+{	0, 			/* 0 */
+	-EINVAL,		/* BCME_ERROR */
+	-EINVAL,		/* BCME_BADARG */
+	-EINVAL,		/* BCME_BADOPTION */
+	-EINVAL,		/* BCME_NOTUP */
+	-EINVAL,		/* BCME_NOTDOWN */
+	-EINVAL,		/* BCME_NOTAP */
+	-EINVAL,		/* BCME_NOTSTA */
+	-EINVAL,		/* BCME_BADKEYIDX */
+	-EINVAL,		/* BCME_RADIOOFF */
+	-EINVAL,		/* BCME_NOTBANDLOCKED */
+	-EINVAL, 		/* BCME_NOCLK */
+	-EINVAL, 		/* BCME_BADRATESET */
+	-EINVAL, 		/* BCME_BADBAND */
+	-E2BIG,			/* BCME_BUFTOOSHORT */
+	-E2BIG,			/* BCME_BUFTOOLONG */
+	-EBUSY, 		/* BCME_BUSY */
+	-EINVAL, 		/* BCME_NOTASSOCIATED */
+	-EINVAL, 		/* BCME_BADSSIDLEN */
+	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
+	-EINVAL, 		/* BCME_BADCHAN */
+	-EFAULT, 		/* BCME_BADADDR */
+	-ENOMEM, 		/* BCME_NORESOURCE */
+	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
+	-EMSGSIZE,		/* BCME_BADLENGTH */
+	-EINVAL,		/* BCME_NOTREADY */
+	-EPERM,			/* BCME_EPERM */
+	-ENOMEM, 		/* BCME_NOMEM */
+	-EINVAL, 		/* BCME_ASSOCIATED */
+	-ERANGE, 		/* BCME_RANGE */
+	-EINVAL, 		/* BCME_NOTFOUND */
+	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
+	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
+	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
+	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
+	-EIO,			/* BCME_SDIO_ERROR */
+	-ENODEV,		/* BCME_DONGLE_DOWN */
+	-EINVAL,		/* BCME_VERSION */
+	-EIO,			/* BCME_TXFAIL */
+	-EIO,			/* BCME_RXFAIL */
+	-ENODEV,		/* BCME_NODEVICE */
+	-EINVAL,		/* BCME_NMODE_DISABLED */
+	-ENODATA,		/* BCME_NONRESIDENT */
+	-EINVAL,		/* BCME_SCANREJECT */
+	-EINVAL,		/* BCME_USAGE_ERROR */
+	-EIO,     		/* BCME_IOCTL_ERROR */
+	-EIO,			/* BCME_SERIAL_PORT_ERR */
+	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
+	-EIO,			/* BCME_DECERR */
+	-EIO,			/* BCME_ENCERR */
+	-EIO,			/* BCME_MICERR */
+	-ERANGE,		/* BCME_REPLAY */
+	-EINVAL,		/* BCME_IE_NOTFOUND */
+
+/* When an new error code is added to bcmutils.h, add os
+ * specific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != -52
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+	for new error code defined in bcmutils.h"
+#endif
+};
+
+/* translate bcmerrors into linux errors */
+int
+osl_error(int bcmerror)
+{
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	/* Array bounds covered by ASSERT in osl_attach */
+	return linuxbcmerrormap[-bcmerror];
+}
+#ifdef SHARED_OSL_CMN
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+{
+#else
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+	void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
+	osl_t *osh;
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if (!(osh = kmalloc(sizeof(osl_t), flags)))
+		return osh;
+
+	ASSERT(osh);
+
+	bzero(osh, sizeof(osl_t));
+
+	if (osl_cmn == NULL || *osl_cmn == NULL) {
+		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
+			kfree(osh);
+			return NULL;
+		}
+		bzero(osh->cmn, sizeof(osl_cmn_t));
+		if (osl_cmn)
+			*osl_cmn = osh->cmn;
+		atomic_set(&osh->cmn->malloced, 0);
+		osh->cmn->dbgmem_list = NULL;
+		spin_lock_init(&(osh->cmn->dbgmem_lock));
+
+		spin_lock_init(&(osh->cmn->pktalloc_lock));
+
+	} else {
+		osh->cmn = *osl_cmn;
+	}
+	atomic_add(1, &osh->cmn->refcount);
+
+	/* Check that error map has the right number of entries in it */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+	osh->failed = 0;
+	osh->pdev = pdev;
+	osh->pub.pkttag = pkttag;
+	osh->bustype = bustype;
+	osh->magic = OS_HANDLE_MAGIC;
+
+	switch (bustype) {
+		case PCI_BUS:
+		case SI_BUS:
+		case PCMCIA_BUS:
+			osh->pub.mmbus = TRUE;
+			break;
+		case JTAG_BUS:
+		case SDIO_BUS:
+		case USB_BUS:
+		case SPI_BUS:
+		case RPC_BUS:
+			osh->pub.mmbus = FALSE;
+			break;
+		default:
+			ASSERT(FALSE);
+			break;
+	}
+
+
+
+	return osh;
+}
+
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (!bcm_static_buf && adapter) {
+		if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+			3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+			printk("can not alloc static buf!\n");
+			bcm_static_skb = NULL;
+			ASSERT(osh->magic == OS_HANDLE_MAGIC);
+			return -ENOMEM;
+		}
+		else
+			printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+		sema_init(&bcm_static_buf->static_sem, 1);
+
+		bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+	}
+
+	if (!bcm_static_skb && adapter) {
+		int i;
+		void *skb_buff_ptr = 0;
+		bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+		skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+		if (!skb_buff_ptr) {
+			printk("cannot alloc static buf!\n");
+			bcm_static_buf = NULL;
+			bcm_static_skb = NULL;
+			ASSERT(osh->magic == OS_HANDLE_MAGIC);
+			return -ENOMEM;
+		}
+
+		bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+			(STATIC_PKT_MAX_NUM));
+		for (i = 0; i < STATIC_PKT_MAX_NUM; i++)
+			bcm_static_skb->pkt_use[i] = 0;
+
+#if defined(BCMPCIE)
+		spin_lock_init(&bcm_static_skb->osl_pkt_lock);
+#else
+		sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+#endif /* BCMPCIE */
+	}
+
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	return 0;
+}
+
+void osl_set_bus_handle(osl_t *osh, void *bus_handle)
+{
+	osh->bus_handle = bus_handle;
+}
+
+void* osl_get_bus_handle(osl_t *osh)
+{
+	return osh->bus_handle;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+	if (osh == NULL)
+		return;
+
+	ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	atomic_sub(1, &osh->cmn->refcount);
+	if (atomic_read(&osh->cmn->refcount) == 0) {
+			kfree(osh->cmn);
+	}
+	kfree(osh);
+}
+
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	return 0;
+}
+
+
+static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
+{
+	struct sk_buff *skb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
+	flags |= GFP_ATOMIC;
+#endif
+
+	skb = __dev_alloc_skb(len, flags);
+#else
+	skb = dev_alloc_skb(len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
+	return skb;
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
+#endif /* CTFPOOL_SPINLOCK */
+/*
+ * Allocate and add an object to packet pool.
+ */
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+	/* No need to allocate more objects */
+	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Allocate a new skb and add it to the ctfpool */
+	skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
+	if (skb == NULL) {
+		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+		       osh->ctfpool->obj_size);
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Add to ctfpool */
+	skb->next = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = skb;
+	osh->ctfpool->fast_frees++;
+	osh->ctfpool->curr_obj++;
+
+	/* Hijack a skb member to store ptr to ctfpool */
+	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+	/* Use bit flag to indicate skb from fast ctfpool */
+	PKTFAST(osh, skb) = FASTBUF;
+
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	return skb;
+}
+
+/*
+ * Add new objects to the pool.
+ */
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	/* Do nothing if no refills are required */
+	while ((osh->ctfpool->refills > 0) && (thresh--)) {
+		osl_ctfpool_add(osh);
+		osh->ctfpool->refills--;
+	}
+}
+
+/*
+ * Initialize the packet pool with specified number of objects.
+ */
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
+	ASSERT(osh->ctfpool);
+
+	osh->ctfpool->max_obj = numobj;
+	osh->ctfpool->obj_size = size;
+
+	spin_lock_init(&osh->ctfpool->lock);
+
+	while (numobj--) {
+		if (!osl_ctfpool_add(osh))
+			return -1;
+		osh->ctfpool->fast_frees--;
+	}
+
+	return 0;
+}
+
+/*
+ * Cleanup the packet pool objects.
+ */
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+	struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+
+	skb = osh->ctfpool->head;
+
+	while (skb != NULL) {
+		nskb = skb->next;
+		dev_kfree_skb(skb);
+		skb = nskb;
+		osh->ctfpool->curr_obj--;
+	}
+
+	ASSERT(osh->ctfpool->curr_obj == 0);
+	osh->ctfpool->head = NULL;
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	kfree(osh->ctfpool);
+	osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+	struct bcmstrbuf *bb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	bb = b;
+
+	ASSERT((osh != NULL) && (bb != NULL));
+
+	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
+	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+	            osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	/* Try to do fast allocate. Return null if ctfpool is not in use
+	 * or if there are no items in the ctfpool.
+	 */
+	if (osh->ctfpool == NULL)
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	if (osh->ctfpool->head == NULL) {
+		ASSERT(osh->ctfpool->curr_obj == 0);
+		osh->ctfpool->slow_allocs++;
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	if (len > osh->ctfpool->obj_size) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	ASSERT(len <= osh->ctfpool->obj_size);
+
+	/* Get an object from ctfpool */
+	skb = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = (void *)skb->next;
+
+	osh->ctfpool->fast_allocs++;
+	osh->ctfpool->curr_obj--;
+	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	/* Init skb struct */
+	skb->next = skb->prev = NULL;
+#if defined(__ARM_ARCH_7A__)
+	skb->data = skb->head + NET_SKB_PAD;
+	skb->tail = skb->head + NET_SKB_PAD;
+#else
+	skb->data = skb->head + 16;
+	skb->tail = skb->head + 16;
+#endif /* __ARM_ARCH_7A__ */
+	skb->len = 0;
+	skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+	skb->list = NULL;
+#endif
+	atomic_set(&skb->users, 1);
+
+	PKTSETCLINK(skb, NULL);
+	PKTCCLRATTR(skb);
+	PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
+
+	return skb;
+}
+#endif /* CTFPOOL */
+
+#if defined(BCM_GMAC3)
+/* Account for a packet delivered to downstream forwarder.
+ * Decrement a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
+{
+
+	atomic_sub(skb_cnt, &osh->cmn->pktalloced);
+}
+
+/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
+ * Increment a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
+{
+
+
+	atomic_add(skb_cnt, &osh->cmn->pktalloced);
+}
+
+#endif /* BCM_GMAC3 */
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Decrement the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+	}
+	return (struct sk_buff *)pkt;
+}
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Increment the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+	}
+	return (void *)pkt;
+}
+
+/* Return a new packet. zero out pkttag */
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+
+#ifdef CTFPOOL
+	/* Allocate from local pool */
+	skb = osl_pktfastget(osh, len);
+	if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
+#else /* CTFPOOL */
+	if ((skb = osl_alloc_skb(osh, len))) {
+#endif /* CTFPOOL */
+		skb->tail += len;
+		skb->len  += len;
+		skb->priority = 0;
+
+		atomic_inc(&osh->cmn->pktalloced);
+	}
+	return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+	ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+	skb->tstamp.tv.sec = 0;
+#else
+	skb->stamp.tv_sec = 0;
+#endif
+
+	/* We only need to init the fields that we change */
+	skb->dev = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	skb->dst = NULL;
+#endif
+	OSL_PKTTAG_CLEAR(skb);
+	skb->ip_summed = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	skb_orphan(skb);
+#else
+	skb->destructor = NULL;
+#endif
+
+	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+	ASSERT(ctfpool != NULL);
+
+	/* Add object to the ctfpool */
+	CTFPOOL_LOCK(ctfpool, flags);
+	skb->next = (struct sk_buff *)ctfpool->head;
+	ctfpool->head = (void *)skb;
+
+	ctfpool->fast_frees++;
+	ctfpool->curr_obj++;
+
+	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+	CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif /* CTFPOOL */
+
+/* Free the driver packet. Free the tag if present */
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+	struct sk_buff *skb, *nskb;
+	if (osh == NULL)
+		return;
+
+	skb = (struct sk_buff*) p;
+
+	if (send && osh->pub.tx_fn)
+		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+	/* perversion: we use skb->next to chain multi-skb packets */
+	while (skb) {
+		nskb = skb->next;
+		skb->next = NULL;
+
+
+
+#ifdef CTFPOOL
+		if (PKTISFAST(osh, skb)) {
+			if (atomic_read(&skb->users) == 1)
+				smp_rmb();
+			else if (!atomic_dec_and_test(&skb->users))
+				goto next_skb;
+			osl_pktfastfree(osh, skb);
+		} else
+#endif
+		{
+			dev_kfree_skb_any(skb);
+		}
+#ifdef CTFPOOL
+next_skb:
+#endif
+		atomic_dec(&osh->cmn->pktalloced);
+		skb = nskb;
+	}
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+	int i = 0;
+	struct sk_buff *skb;
+#if defined(BCMPCIE)
+	unsigned long flags;
+#endif /* BCMPCIE */
+
+	if (!bcm_static_skb)
+		return osl_pktget(osh, len);
+
+	if (len > DHD_SKB_MAX_BUFSIZE) {
+		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+		return osl_pktget(osh, len);
+	}
+
+#if defined(BCMPCIE)
+	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+	down(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+
+	if (len <= DHD_SKB_1PAGE_BUFSIZE) {
+		for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++)
+		{
+			if (bcm_static_skb->pkt_use[i] == 0) {
+				break;
+			}
+		}
+
+		if (i != STATIC_PKT_1PAGE_NUM)
+		{
+			bcm_static_skb->pkt_use[i] = 1;
+
+			skb = bcm_static_skb->skb_4k[i];
+			skb->len = len;
+
+#if defined(BCMPCIE)
+#if defined(__ARM_ARCH_7A__)
+			skb->data = skb->head + NET_SKB_PAD;
+			skb->tail = skb->head + NET_SKB_PAD;
+#else
+			skb->data = skb->head + 16;
+			skb->tail = skb->head + 16;
+#endif /* __ARM_ARCH_7A__ */
+			skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+			skb->list = NULL;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) */
+			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+			skb->tail = skb->data + len;
+			up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+			return skb;
+		}
+	}
+
+	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+		for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+			if (bcm_static_skb->pkt_use[i] == 0)
+				break;
+		}
+
+		if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
+			bcm_static_skb->pkt_use[i] = 1;
+			skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
+			skb->tail = skb->data + len;
+			skb->len = len;
+#if defined(BCMPCIE)
+			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+			up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+			return skb;
+		}
+	}
+
+#if !defined(BCMPCIE)
+#if defined(ENHANCED_STATIC_BUF)
+	if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
+
+		skb = bcm_static_skb->skb_16k;
+		skb->tail = skb->data + len;
+		skb->len = len;
+
+		up(&bcm_static_skb->osl_pkt_sem);
+		return skb;
+	}
+#endif /* ENHANCED_STATIC_BUF */
+#endif /* !BCMPCIE */
+
+#if defined(BCMPCIE)
+	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+	up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+	printk("%s: all static pkt in use!\n", __FUNCTION__);
+	return osl_pktget(osh, len);
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+	int i;
+#if defined(BCMPCIE)
+	unsigned long flags;
+#endif /* BCMPCIE */
+
+	if (!bcm_static_skb) {
+		osl_pktfree(osh, p, send);
+		return;
+	}
+
+#if defined(BCMPCIE)
+	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+	down(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+
+	for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+		if (p == bcm_static_skb->skb_4k[i]) {
+			bcm_static_skb->pkt_use[i] = 0;
+#if defined(BCMPCIE)
+			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+			up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+			return;
+		}
+	}
+
+	for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+		if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
+			bcm_static_skb->pkt_use[i] = 0;
+#if defined(BCMPCIE)
+			spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+			up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+			return;
+		}
+	}
+#if !defined(BCMPCIE)
+#ifdef ENHANCED_STATIC_BUF
+	if (p == bcm_static_skb->skb_16k) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
+		up(&bcm_static_skb->osl_pkt_sem);
+		return;
+	}
+#endif /* ENHANCED_STATIC_BUF */
+#endif /* !BCMPCIE */
+
+#if defined(BCMPCIE)
+	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+	up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+	osl_pktfree(osh, p, send);
+}
+
+void
+osl_pktclear_static(osl_t *osh)
+{
+	int i;
+#if defined(BCMPCIE)
+	unsigned long flags;
+#endif /* BCMPCIE */
+
+	if (!bcm_static_skb) {
+		printk("%s: bcm_static_skb is NULL\n", __FUNCTION__);
+		return;
+	}
+
+#if defined(BCMPCIE)
+	spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+	down(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+		if (bcm_static_skb->pkt_use[i]) {
+			bcm_static_skb->pkt_use[i] = 0;
+		}
+	}
+
+#if defined(BCMPCIE)
+	spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+#else
+	up(&bcm_static_skb->osl_pkt_sem);
+#endif /* BCMPCIE */
+}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+	uint val = 0;
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_read_config_dword(osh->pdev, offset, &val);
+		if (val != 0xffffffff)
+			break;
+	} while (retry--);
+
+
+	return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_write_config_dword(osh->pdev, offset, val);
+		if (offset != PCI_BAR0_WIN)
+			break;
+		if (osl_pci_read_config(osh, offset, size) == val)
+			break;
+	} while (retry--);
+
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pci_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+#else
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+#endif
+}
+
+/* return slot # for the pci device pointed by osh->pdev */
+uint
+osl_pci_slot(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
+#else
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+#endif
+}
+
+/* return domain # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_domain(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+/* return the pci device pointed by osh->pdev */
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return osh->pdev;
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+	void *addr;
+	gfp_t flags;
+
+	/* only ASSERT if osh is defined */
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		int i = 0;
+		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+		{
+			down(&bcm_static_buf->static_sem);
+
+			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+			{
+				if (bcm_static_buf->buf_use[i] == 0)
+					break;
+			}
+
+			if (i == STATIC_BUF_MAX_NUM)
+			{
+				up(&bcm_static_buf->static_sem);
+				printk("all static buff in use!\n");
+				goto original;
+			}
+
+			bcm_static_buf->buf_use[i] = 1;
+			up(&bcm_static_buf->static_sem);
+
+			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+			if (osh)
+				atomic_add(size, &osh->cmn->malloced);
+
+			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+		}
+	}
+original:
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if ((addr = kmalloc(size, flags)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh && osh->cmn)
+		atomic_add(size, &osh->cmn->malloced);
+
+	return (addr);
+}
+
+void *
+osl_mallocz(osl_t *osh, uint size)
+{
+	void *ptr;
+
+	ptr = osl_malloc(osh, size);
+
+	if (ptr != NULL) {
+		bzero(ptr, size);
+	}
+
+	return ptr;
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+		{
+			int buf_idx = 0;
+
+			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+			down(&bcm_static_buf->static_sem);
+			bcm_static_buf->buf_use[buf_idx] = 0;
+			up(&bcm_static_buf->static_sem);
+
+			if (osh && osh->cmn) {
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				atomic_sub(size, &osh->cmn->malloced);
+			}
+			return;
+		}
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	if (osh && osh->cmn) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+		ASSERT(size <= osl_malloced(osh));
+
+		atomic_sub(size, &osh->cmn->malloced);
+	}
+	kfree(addr);
+}
+
+uint
+osl_check_memleak(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->malloced));
+	else
+		return 0;
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (atomic_read(&osh->cmn->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->failed);
+}
+
+
+uint
+osl_dma_consistent_align(void)
+{
+	return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
+{
+	void *va;
+	uint16 align = (1 << align_bits);
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+		size += align;
+	*alloced = size;
+
+#if defined(USE_KMALLOC_FOR_FLOW_RING) && defined(__ARM_ARCH_7A__)
+	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
+	if (va)
+		*pap = (ulong)__virt_to_phys((ulong)va);
+#else
+	{
+		dma_addr_t pap_lin;
+		va = pci_alloc_consistent(osh->pdev, size, &pap_lin);
+		*pap = (dmaaddr_t)pap_lin;
+	}
+#endif
+	return va;
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+#if defined(USE_KMALLOC_FOR_FLOW_RING) && defined(__ARM_ARCH_7A__)
+	kfree(va);
+#else
+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+#endif
+}
+
+dmaaddr_t BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+
+#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
+	if (dmah != NULL) {
+		int32 nsegs, i, totsegs = 0, totlen = 0;
+		struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
+		struct sk_buff *skb;
+		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
+			sg = &_sg[totsegs];
+			if (skb_is_nonlinear(skb)) {
+				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
+				ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
+				pci_map_sg(osh->pdev, sg, nsegs, dir);
+			} else {
+				nsegs = 1;
+				ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
+				sg->page_link = 0;
+				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
+				pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
+			}
+			totsegs += nsegs;
+			totlen += PKTLEN(osh, skb);
+		}
+		dmah->nsegs = totsegs;
+		dmah->origsize = totlen;
+		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
+			dmah->segs[i].addr = sg_phys(sg);
+			dmah->segs[i].length = sg->length;
+		}
+		return dmah->segs[0].addr;
+	}
+#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
+
+	return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+
+#if defined(USE_KMALLOC_FOR_FLOW_RING) && defined(__ARM_ARCH_7A__)
+
+inline void BCMFASTPATH
+osl_cache_flush(void *va, uint size)
+{
+	if (size > 0)
+		dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
+}
+
+inline void BCMFASTPATH
+osl_cache_inv(void *va, uint size)
+{
+	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
+}
+
+inline void osl_prefetch(const void *ptr)
+{
+	/* Borrowed from linux/linux-2.6/include/asm-arm/processor.h */
+	__asm__ __volatile__(
+		"pld\t%0"
+		:
+		: "o" (*(char *)ptr)
+		: "cc");
+}
+
+int osl_arch_is_coherent(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+	return 0;
+#else
+	return arch_is_coherent();
+#endif
+}
+#endif
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+	char tempbuf[256];
+	const char *basename;
+
+	basename = strrchr(file, '/');
+	/* skip the '/' */
+	if (basename)
+		basename++;
+
+	if (!basename)
+		basename = file;
+
+#ifdef BCMASSERT_LOG
+	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+		exp, basename, line);
+	printk("%s", tempbuf);
+#endif /* BCMASSERT_LOG */
+
+
+}
+#endif
+
+void
+osl_delay(uint usec)
+{
+	uint d;
+
+	while (usec > 0) {
+		d = MIN(usec, 1000);
+		udelay(d);
+		usec -= d;
+	}
+}
+
+void
+osl_sleep(uint ms)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if (ms < 20)
+		usleep_range(ms*1000, ms*1000 + 1000);
+	else
+#endif
+	msleep(ms);
+}
+
+
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+	void * p;
+
+	ASSERT(!PKTISCHAINED(skb));
+
+	/* clear the CTFBUF flag if set and map the rest of the buffer
+	 * before cloning.
+	 */
+	PKTCTFMAP(osh, skb);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#else
+	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#endif
+		return NULL;
+
+#ifdef CTFPOOL
+	if (PKTISFAST(osh, skb)) {
+		ctfpool_t *ctfpool;
+
+		/* if the buffer allocated from ctfpool is cloned then
+		 * we can't be sure when it will be freed. since there
+		 * is a chance that we will be losing a buffer
+		 * from our pool, we increment the refill count for the
+		 * object to be alloced later.
+		 */
+		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+		ASSERT(ctfpool != NULL);
+		PKTCLRFAST(osh, p);
+		PKTCLRFAST(osh, skb);
+		ctfpool->refills++;
+	}
+#endif /* CTFPOOL */
+
+	/* Clear PKTC  context */
+	PKTSETCLINK(p, NULL);
+	PKTCCLRFLAGS(p);
+	PKTCSETCNT(p, 1);
+	PKTCSETLEN(p, PKTLEN(osh, skb));
+
+	/* skb_clone copies skb->cb.. we don't want that */
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(p);
+
+	/* Increment the packet counter */
+	atomic_inc(&osh->cmn->pktalloced);
+	return (p);
+}
+
+
+
+/*
+ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
+ */
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
+
+uint
+osl_pktalloced(osl_t *osh)
+{
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->pktalloced));
+	else
+		return 0;
+}
+
+uint32
+osl_rand(void)
+{
+	uint32 rand;
+
+	get_random_bytes(&rand, sizeof(rand));
+
+	return rand;
+}
+
+/* Linux Kernel: File Operations: start */
+void *
+osl_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+int
+osl_os_image_size(void *image)
+{
+	int len = 0, curroffset;
+
+	if (image) {
+		/* store the current offset */
+		curroffset = generic_file_llseek(image, 0, 1);
+		/* goto end of file to get length */
+		len = generic_file_llseek(image, 0, 2);
+		/* restore back the offset */
+		generic_file_llseek(image, curroffset, 0);
+	}
+	return len;
+}
+
+/* Linux Kernel: File Operations: end */
+
+
+/* APIs to set/get specific quirks in OSL layer */
+void
+osl_flag_set(osl_t *osh, uint32 mask)
+{
+	osh->flags |= mask;
+}
+
+bool
+osl_is_flag_set(osl_t *osh, uint32 mask)
+{
+	return (osh->flags & mask);
+}
diff --git a/drivers/net/wireless/bcmdhd/pcie_core.c b/drivers/net/wireless/bcmdhd/pcie_core.c
new file mode 100644
index 0000000..1eaedf5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/pcie_core.c
@@ -0,0 +1,83 @@
+/** @file pcie_core.c
+ *
+ * Contains PCIe related functions that are shared between different driver models (e.g. firmware
+ * builds, DHD builds, BMAC builds), in order to avoid code duplication.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcie_core.c 444841 2013-12-21 04:32:29Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+
+#include "pcie_core.h"
+
+/* local prototypes */
+
+/* local variables */
+
+/* function definitions */
+
+#ifdef BCMDRIVER
+
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+	uint32 val, i, lsc;
+	uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
+		PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L,
+		PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA,
+		PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL,
+		PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG,
+		PCIECFGREG_REG_BAR3_CONFIG};
+	uint32 origidx = si_coreidx(sih);
+
+	/* Disable/restore ASPM Control to protect the watchdog reset */
+	W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+	lsc = R_REG(osh, &sbpcieregs->configdata);
+	val = lsc & (~PCIE_ASPM_ENAB);
+	W_REG(osh, &sbpcieregs->configdata, val);
+
+	si_setcore(sih, PCIE2_CORE_ID, 0);
+	si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+	OSL_DELAY(100000);
+
+	W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+	W_REG(osh, &sbpcieregs->configdata, lsc);
+
+	/* Write configuration registers back to the shadow registers
+	 * cause shadow registers are cleared out after watchdog reset.
+	 */
+	for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
+		W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
+		val = R_REG(osh, &sbpcieregs->configdata);
+		W_REG(osh, &sbpcieregs->configdata, val);
+	}
+	si_setcoreidx(sih, origidx);
+}
+
+#endif /* BCMDRIVER */
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 0000000..12c4559
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,1105 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c 467150 2014-04-02 17:30:43Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+                     uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+#define	SET_SBREG(sii, r, mask, val)	\
+		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
+#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
+#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint8 tmp;
+	uint32 val, intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	val = R_REG(sii->osh, sbr);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint8 tmp;
+	volatile uint32 dummy;
+	uint32 intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+	} else
+		W_REG(sii->osh, sbr, v);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	void *corereg;
+	sbconfig_t *sb;
+	uint origidx, intflag, intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+	corereg = si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(corereg != NULL);
+	sb = REGS2SB(corereg);
+	intflag = R_SBREG(sii, &sb->sbflagst);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+
+	return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 vec;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	if (siflag == -1)
+		vec = 0;
+	else
+		vec = 1 << siflag;
+	W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	for (i = 0; i < sii->numcores; i ++)
+		if (sba == cores_info->coresba[i])
+			return i;
+	return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+	uint32 sbaddr;
+
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS: {
+		sbconfig_t *sb = REGS2SB(sii->curmap);
+		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+		break;
+	}
+
+	case PCI_BUS:
+		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = 0;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		sbaddr  = (uint32)tmp << 12;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		sbaddr |= (uint32)tmp << 16;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		sbaddr |= (uint32)tmp << 24;
+		break;
+	}
+
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		sbaddr = (uint32)(uintptr)sii->curmap;
+		break;
+#endif
+
+
+	default:
+		sbaddr = BADCOREADDR;
+		break;
+	}
+
+	return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint sbidh;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+	sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+	return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+	        (val << SBTML_SICF_SHIFT);
+	W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+		        (val << SBTML_SICF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatelow, w);
+	}
+
+	/* return the new value
+	 * for write operation, the following readback ensures the completion of write opration.
+	 */
+	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+		        (val << SBTMH_SISF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatehigh, w);
+	}
+
+	/* return the new value */
+	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbtmstatelow) &
+	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		if (regoff >= SBCONFIGOFF) {
+			w = (R_SBREG(sii, r) & ~mask) | val;
+			W_SBREG(sii, r, w);
+		} else {
+			w = (R_REG(sii->osh, r) & ~mask) | val;
+			W_REG(sii->osh, r, w);
+		}
+	}
+
+	/* readback */
+	if (regoff >= SBCONFIGOFF)
+		w = R_SBREG(sii, r);
+	else {
+		if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+		    (coreidx == SI_CC_IDX) &&
+		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+			w = val;
+		} else
+			w = R_REG(sii->osh, r);
+	}
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			sb_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES	2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+	uint next;
+	uint ncc = 0;
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (bus >= SB_MAXBUSES) {
+		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+		return 0;
+	}
+	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+	/* Scan all cores on the bus starting from core 0.
+	 * Core addresses must be contiguous on each bus.
+	 */
+	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+		cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+		/* keep and reuse the initial register mapping */
+		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
+			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+			cores_info->regs[next] = regs;
+		}
+
+		/* change core to 'next' and read its coreid */
+		sii->curmap = _sb_setcoreidx(sii, next);
+		sii->curidx = next;
+
+		cores_info->coreid[next] = sb_coreid(&sii->pub);
+
+		/* core specific processing... */
+		/* chipc provides # cores */
+		if (cores_info->coreid[next] == CC_CORE_ID) {
+			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+			uint32 ccrev = sb_corerev(&sii->pub);
+
+			/* determine numcores - this is the total # cores in the chip */
+			if (((ccrev == 4) || (ccrev >= 6))) {
+				ASSERT(cc);
+				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+				        CID_CC_SHIFT;
+			} else {
+				/* Older chips */
+				uint chip = CHIPID(sii->pub.chip);
+
+				if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
+					numcores = 6;
+				else if (chip == BCM4704_CHIP_ID)
+					numcores = 9;
+				else if (chip == BCM5365_CHIP_ID)
+					numcores = 7;
+				else {
+					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+					          chip));
+					ASSERT(0);
+					numcores = 1;
+				}
+			}
+			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+				sii->pub.issim ? "QT" : ""));
+		}
+		/* scan bridged SB(s) and add results to the end of the list */
+		else if (cores_info->coreid[next] == OCP_CORE_ID) {
+			sbconfig_t *sb = REGS2SB(sii->curmap);
+			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+			uint nsbcc;
+
+			sii->numcores = next + 1;
+
+			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+				continue;
+			nsbba &= 0xfffff000;
+			if (_sb_coreidx(sii, nsbba) != BADIDX)
+				continue;
+
+			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+			if (sbba == SI_ENUM_BASE)
+				numcores -= nsbcc;
+			ncc += nsbcc;
+		}
+	}
+
+	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+	sii->numcores = i + ncc;
+	return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+	uint32 origsba;
+	sbconfig_t *sb;
+	si_info_t *sii = SI_INFO(sih);
+
+	sb = REGS2SB(sii->curmap);
+
+	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+	/* Save the current core info and validate it later till we know
+	 * for sure what is good and what is bad.
+	 */
+	origsba = _sb_coresba(sii);
+
+	/* scan all SB(s) starting from SI_ENUM_BASE */
+	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	sii->curmap = _sb_setcoreidx(sii, coreidx);
+	sii->curidx = coreidx;
+
+	return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 sbaddr = cores_info->coresba[coreidx];
+	void *regs;
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+		regs = sii->curmap;
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = (sbaddr >> 12) & 0x0f;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		tmp = (sbaddr >> 16) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		tmp = (sbaddr >> 24) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		regs = sii->curmap;
+		break;
+	}
+#ifdef BCMSDIO
+	case SPI_BUS:
+	case SDIO_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+#endif	/* BCMSDIO */
+
+
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+	sbconfig_t *sb;
+	volatile uint32 *addrm;
+
+	sb = REGS2SB(sii->curmap);
+
+	switch (asidx) {
+	case 0:
+		addrm =  &sb->sbadmatch0;
+		break;
+
+	case 1:
+		addrm =  &sb->sbadmatch1;
+		break;
+
+	case 2:
+		addrm =  &sb->sbadmatch2;
+		break;
+
+	case 3:
+		addrm =  &sb->sbadmatch3;
+		break;
+
+	default:
+		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+		return 0;
+	}
+
+	return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	/* + 1 because of enumeration space */
+	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	origidx = sii->curidx;
+	ASSERT(GOODIDX(origidx));
+
+	INTR_OFF(sii, intr_val);
+
+	/* switch over to chipcommon core if there is one, else use pci */
+	if (sii->pub.ccrev != NOREV) {
+		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+		ASSERT(ccregs != NULL);
+
+		/* do the buffer registers update */
+		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+	} else
+		ASSERT(0);
+
+	/* restore core index */
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/* if core is already in reset, just return */
+	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+		return;
+
+	/* if clocks are not enabled, put into reset and return */
+	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+		goto disable;
+
+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
+	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+		dummy = R_SBREG(sii, &sb->sbimstate);
+		BCM_REFERENCE(dummy);
+		OSL_DELAY(1);
+		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+	}
+
+	/* set reset and reject while enabling the clocks */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_REJ | SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+
+	/* don't forget to clear the initiator reject bit */
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+	/* leave reset and reject asserted */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	sb_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+
+	/* set reset while enabling the clock and forcing them on throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+		W_SBREG(sii, &sb->sbtmstatehigh, 0);
+	}
+	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	/* leave clock enabled */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ *	SI_BUS => mips
+ *	JTAG_BUS => chipc
+ *	PCI_BUS => pci or pcie
+ *	PCMCIA_BUS => pcmcia
+ *	SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 tmp, ret = 0xffffffff;
+	sbconfig_t *sb;
+
+
+	if ((to & ~TO_MASK) != 0)
+		return ret;
+
+	/* Figure out the master core */
+	if (idx == BADIDX) {
+		switch (BUSTYPE(sii->pub.bustype)) {
+		case PCI_BUS:
+			idx = sii->pub.buscoreidx;
+			break;
+		case JTAG_BUS:
+			idx = SI_CC_IDX;
+			break;
+		case PCMCIA_BUS:
+#ifdef BCMSDIO
+		case SDIO_BUS:
+#endif
+			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+			break;
+		case SI_BUS:
+			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+			break;
+		default:
+			ASSERT(0);
+		}
+		if (idx == BADIDX)
+			return ret;
+	}
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+	tmp = R_SBREG(sii, &sb->sbimconfiglow);
+	ret = tmp & TO_MASK;
+	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+	sb_commit(sih);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+	uint32 base;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	base = 0;
+
+	if (type == 0) {
+		base = admatch & SBAM_BASE0_MASK;
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE1_MASK;
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE2_MASK;
+	}
+
+	return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+	uint32 size;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	size = 0;
+
+	if (type == 0) {
+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+	}
+
+	return (size);
+}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting sbconfig registers */
+void
+sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+	sbconfig_t *sb;
+	uint origidx, i, intr_val = 0;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	origidx = sii->curidx;
+
+	INTR_OFF(sii, intr_val);
+
+	for (i = 0; i < sii->numcores; i++) {
+		sb = REGS2SB(sb_setcoreidx(sih, i));
+
+		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+
+		if (sii->pub.socirev > SONICS_2_2)
+			bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
+			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
+			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
+
+		bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
+		            "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
+		            R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
+		            R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
+		            R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
+	}
+
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+#endif	
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 0000000..0121025
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,2982 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c 474902 2014-05-02 18:31:33Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#endif /* BCMSDIO */
+#include <hndpmu.h>
+
+#ifdef BCM_SDRBL
+#include <hndcpu.h>
+#endif /* BCM_SDRBL */
+#ifdef HNDGCI
+#include <hndgci.h>
+#endif /* HNDGCI */
+
+#include "siutils_priv.h"
+
+/**
+ * A set of PMU registers is clocked in the ILP domain, which has an implication on register write
+ * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
+ * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set.
+ */
+#define PMUREGS_ILP_SENSITIVE(regoff) \
+	((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \
+	 (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \
+	 (regoff) == OFFSETOF(pmuregs_t, res_req_timer))
+
+#define CHIPCREGS_ILP_SENSITIVE(regoff) \
+	((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \
+	 (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \
+	 (regoff) == OFFSETOF(chipcregs_t, res_req_timer))
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                              uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs);
+
+
+static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
+
+#ifdef BCMLTECOEX
+static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio,
+	uint8 gpioctl_mask, uint8 gpioctl_val);
+#endif /* BCMLTECOEX */
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+#ifdef SR_DEBUG
+static const uint32 si_power_island_test_array[] = {
+	0x0000, 0x0001, 0x0010, 0x0011,
+	0x0100, 0x0101, 0x0110, 0x0111,
+	0x1000, 0x1001, 0x1010, 0x1011,
+	0x1100, 0x1101, 0x1110, 0x1111
+};
+#endif /* SR_DEBUG */
+
+int do_4360_pcie2_war = 0;
+
+/* global kernel resource */
+static si_info_t ksii;
+static si_cores_info_t ksii_cores_info;
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL, making dereferencing of this parameter undesired.
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	si_info_t *sii;
+	si_cores_info_t *cores_info;
+	/* alloc si_info_t */
+	if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		return (NULL);
+	}
+
+	/* alloc si_cores_info_t */
+	if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		MFREE(osh, sii, sizeof(si_info_t));
+		return (NULL);
+	}
+	sii->cores_info = cores_info;
+
+	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+		MFREE(osh, sii, sizeof(si_info_t));
+		MFREE(osh, cores_info, sizeof(si_cores_info_t));
+		return (NULL);
+	}
+	sii->vars = vars ? *vars : NULL;
+	sii->varsz = varsz ? *varsz : 0;
+
+	return (si_t *)sii;
+}
+
+
+static uint32	wd_msticks;		/* watchdog timer ticks normalized to ms */
+
+/** generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+	static bool ksii_attached = FALSE;
+	si_cores_info_t *cores_info;
+
+	if (!ksii_attached) {
+		void *regs = NULL;
+		regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+		cores_info = (si_cores_info_t *)&ksii_cores_info;
+		ksii.cores_info = cores_info;
+
+		ASSERT(osh);
+		if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+		                SI_BUS, NULL,
+		                osh != SI_OSH ? &(ksii.vars) : NULL,
+		                osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
+			SI_ERROR(("si_kattach: si_doattach failed\n"));
+			REG_UNMAP(regs);
+			return NULL;
+		}
+		REG_UNMAP(regs);
+
+		/* save ticks normalized to ms for si_watchdog_ms() */
+		if (PMUCTL_ENAB(&ksii.pub)) {
+				/* based on 32KHz ILP clock */
+				wd_msticks = 32;
+		} else {
+			wd_msticks = ALP_CLOCK / 1000;
+		}
+
+		ksii_attached = TRUE;
+		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+		        ksii.pub.ccrev, wd_msticks));
+	}
+
+	return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+	/* need to set memseg flag for CF card first before any sb registers access */
+	if (BUSTYPE(bustype) == PCMCIA_BUS)
+		sii->memseg = TRUE;
+
+
+#if defined(BCMSDIO)
+	if (BUSTYPE(bustype) == SDIO_BUS) {
+		int err;
+		uint8 clkset;
+
+		/* Try forcing SDIO core to do ALPAvail request only */
+		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+		if (!err) {
+			uint8 clkval;
+
+			/* If register supported, wait for ALPAvail and then force ALP */
+			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+					PMU_MAX_TRANSITION_DLY);
+				if (!SBSDIO_ALPAV(clkval)) {
+					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+						clkval));
+					return FALSE;
+				}
+				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					clkset, &err);
+				OSL_DELAY(65);
+			}
+		}
+
+		/* Also, disable the extra SDIO pull-ups */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	}
+
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+	return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	bool pci, pcie, pcie_gen2 = FALSE;
+	uint i;
+	uint pciidx, pcieidx, pcirev, pcierev;
+
+	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+	/* get chipcommon chipstatus */
+	if (sii->pub.ccrev >= 11)
+		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+	/* get chipcommon capabilites */
+	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+	/* get chipcommon extended capabilities */
+
+	if (sii->pub.ccrev >= 35)
+		sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+	/* get pmu rev and caps */
+	if (sii->pub.cccaps & CC_CAP_PMU) {
+		if (AOB_ENAB(&sii->pub)) {
+			uint pmucoreidx;
+			pmuregs_t *pmu;
+			pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+			pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+			sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities);
+			si_setcoreidx(&sii->pub, SI_CC_IDX);
+		} else
+			sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+
+		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+	}
+
+	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+		sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+		sii->pub.pmucaps));
+
+	/* figure out bus/orignal core idx */
+	sii->pub.buscoretype = NODEV_CORE_ID;
+	sii->pub.buscorerev = (uint)NOREV;
+	sii->pub.buscoreidx = BADIDX;
+
+	pci = pcie = FALSE;
+	pcirev = pcierev = (uint)NOREV;
+	pciidx = pcieidx = BADIDX;
+
+	for (i = 0; i < sii->numcores; i++) {
+		uint cid, crev;
+
+		si_setcoreidx(&sii->pub, i);
+		cid = si_coreid(&sii->pub);
+		crev = si_corerev(&sii->pub);
+
+		/* Display cores found */
+		SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+		        i, cid, crev, cores_info->coresba[i], cores_info->regs[i]));
+
+		if (BUSTYPE(bustype) == SI_BUS) {
+			/* now look at the chipstatus register to figure the pacakge */
+			/* for SDIO but downloaded on PCIE dev */
+			if (cid == PCIE2_CORE_ID) {
+				if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) ||
+					((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID) &&
+					CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
+					pcieidx = i;
+					pcierev = crev;
+					pcie = TRUE;
+					pcie_gen2 = TRUE;
+				}
+			}
+
+		}
+		else if (BUSTYPE(bustype) == PCI_BUS) {
+			if (cid == PCI_CORE_ID) {
+				pciidx = i;
+				pcirev = crev;
+				pci = TRUE;
+			} else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+				pcieidx = i;
+				pcierev = crev;
+				pcie = TRUE;
+				if (cid == PCIE2_CORE_ID)
+					pcie_gen2 = TRUE;
+			}
+		} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+		           (cid == PCMCIA_CORE_ID)) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+#ifdef BCMSDIO
+		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+		          (BUSTYPE(bustype) == SPI_BUS)) &&
+		         ((cid == PCMCIA_CORE_ID) ||
+		          (cid == SDIOD_CORE_ID))) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+#endif /* BCMSDIO */
+
+		/* find the core idx before entering this func. */
+		if ((savewin && (savewin == cores_info->coresba[i])) ||
+		    (regs == cores_info->regs[i]))
+			*origidx = i;
+	}
+
+#if defined(PCIE_FULL_DONGLE)
+	pci = FALSE;
+#endif
+	if (pci) {
+		sii->pub.buscoretype = PCI_CORE_ID;
+		sii->pub.buscorerev = pcirev;
+		sii->pub.buscoreidx = pciidx;
+	} else if (pcie) {
+		if (pcie_gen2)
+			sii->pub.buscoretype = PCIE2_CORE_ID;
+		else
+			sii->pub.buscoretype = PCIE_CORE_ID;
+		sii->pub.buscorerev = pcierev;
+		sii->pub.buscoreidx = pcieidx;
+	}
+
+	SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+	         sii->pub.buscorerev));
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+	    (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+		OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+#if defined(BCMSDIO)
+	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+	 * already running.
+	 */
+	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+			si_core_disable(&sii->pub, 0);
+	}
+#endif /* BCMSDIO && BCMDONGLEHOST */
+
+	/* return to the original core */
+	si_setcoreidx(&sii->pub, *origidx);
+
+	return TRUE;
+}
+
+
+
+
+uint16
+si_chipid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	return (sii->chipnew) ? sii->chipnew : sih->chip;
+}
+
+static void
+si_chipid_fixup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	ASSERT(sii->chipnew == 0);
+	switch (sih->chip) {
+		case BCM43570_CHIP_ID:
+		case BCM4358_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM43569_CHIP_ID; /* chip class */
+		break;
+		case BCM4356_CHIP_ID:
+			sii->chipnew = sih->chip; /* save it */
+			sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
+		break;
+		default:
+		ASSERT(0);
+		break;
+	}
+}
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL.
+ */
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	struct si_pub *sih = &sii->pub;
+	uint32 w, savewin;
+	chipcregs_t *cc;
+	char *pvars = NULL;
+	uint origidx;
+#if !defined(_CFEZ_) || defined(CFG_WL)
+#endif 
+
+	ASSERT(GOODREGS(regs));
+
+	savewin = 0;
+
+	sih->buscoreidx = BADIDX;
+
+	sii->curmap = regs;
+	sii->sdh = sdh;
+	sii->osh = osh;
+
+
+	/* check to see if we are a si core mimic'ing a pci core */
+	if ((bustype == PCI_BUS) &&
+	    (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
+		SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI "
+		          "devid:0x%x\n", __FUNCTION__, devid));
+		bustype = SI_BUS;
+	}
+
+	/* find Chipcommon address */
+	if (bustype == PCI_BUS) {
+		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+			savewin = SI_ENUM_BASE;
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		if (!regs)
+			return NULL;
+		cc = (chipcregs_t *)regs;
+#ifdef BCMSDIO
+	} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+		cc = (chipcregs_t *)sii->curmap;
+#endif
+	} else {
+		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+	}
+
+	sih->bustype = bustype;
+	if (bustype != BUSTYPE(bustype)) {
+		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+			bustype, BUSTYPE(bustype)));
+		return NULL;
+	}
+
+	/* bus/core/clk setup for register access */
+	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+		return NULL;
+	}
+
+	/* ChipID recognition.
+	*   We assume we can read chipid at offset 0 from the regs arg.
+	*   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+	*   some way of recognizing them needs to be added here.
+	*/
+	if (!cc) {
+		SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
+		return NULL;
+	}
+	w = R_REG(osh, &cc->chipid);
+	if ((w & 0xfffff) == 148277) w -= 65532;
+	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	/* Might as wll fill in chip id rev & pkg */
+	sih->chip = w & CID_ID_MASK;
+	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+	if ((sih->chip == BCM4358_CHIP_ID) ||
+		(sih->chip == BCM43570_CHIP_ID) ||
+		(sih->chip == BCM4358_CHIP_ID)) {
+		si_chipid_fixup(sih);
+	}
+
+	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+		(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+		sih->chippkg = BCM4329_182PIN_PKG_ID;
+	}
+	sih->issim = IS_SIM(sih->chippkg);
+
+	/* scan for cores */
+	if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+		SI_MSG(("Found chip type SB (0x%08x)\n", w));
+		sb_scan(&sii->pub, regs, devid);
+	} else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
+		(CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) {
+		if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
+			SI_MSG(("Found chip type AI (0x%08x)\n", w));
+		else
+			SI_MSG(("Found chip type NAI (0x%08x)\n", w));
+		/* pass chipc address instead of original core base */
+		ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+		SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+		/* pass chipc address instead of original core base */
+		ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else {
+		SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+		return NULL;
+	}
+	/* no cores found, bail out */
+	if (sii->numcores == 0) {
+		SI_ERROR(("si_doattach: could not find any cores\n"));
+		return NULL;
+	}
+	/* bus/core/clk setup */
+	origidx = SI_CC_IDX;
+	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+		SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+		goto exit;
+	}
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+	if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+		>> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+		CST4322_SPROM_PRESENT))) {
+		SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+		return NULL;
+	}
+
+	/* assume current core is CC */
+	if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43234_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+	                                 (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+		if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+			uint clkdiv;
+			clkdiv = R_REG(osh, &cc->clkdiv);
+			/* otp_clk_div is even number, 120/14 < 9mhz */
+			clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+			W_REG(osh, &cc->clkdiv, clkdiv);
+			SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+		}
+		OSL_DELAY(10);
+	}
+
+	if (bustype == PCI_BUS) {
+
+	}
+#endif 
+#ifdef BCM_SDRBL
+	/* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
+	 * not turned on, then we want to hold arm in reset.
+	 * Bottomline: In sdrenable case, we allow arm to boot only when protection is
+	 * turned on.
+	 */
+	if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
+		uint32 sflags = si_arm_sflags(&(sii->pub));
+
+		/* If SDR is enabled but protection is not turned on
+		* then we want to force arm to WFI.
+		*/
+		if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
+			disable_arm_irq();
+			while (1) {
+				hnd_cpu_wait(sih);
+			}
+		}
+	}
+#endif /* BCM_SDRBL */
+
+	pvars = NULL;
+	BCM_REFERENCE(pvars);
+
+
+
+		if (sii->pub.ccrev >= 20) {
+			uint32 gpiopullup = 0, gpiopulldown = 0;
+			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+			ASSERT(cc != NULL);
+
+			/* 4314/43142 has pin muxing, don't clear gpio bits */
+			if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+				(CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+				gpiopullup |= 0x402e0;
+				gpiopulldown |= 0x20500;
+			}
+
+			W_REG(osh, &cc->gpiopullup, gpiopullup);
+			W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+			si_setcoreidx(sih, origidx);
+		}
+
+
+	/* clear any previous epidiag-induced target abort */
+	ASSERT(!si_taclear(sih, FALSE));
+
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+	/* Enable console prints */
+	si_muxenab(sii, 3);
+#endif
+
+	return (sii);
+
+exit:
+
+	return NULL;
+}
+
+/** may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+
+
+	if (BUSTYPE(sih->bustype) == SI_BUS)
+		for (idx = 0; idx < SI_MAXCORES; idx++)
+			if (cores_info->regs[idx]) {
+				REG_UNMAP(cores_info->regs[idx]);
+				cores_info->regs[idx] = NULL;
+			}
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (cores_info != &ksii_cores_info)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, cores_info, sizeof(si_cores_info_t));
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (sii != &ksii)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sii->osh != NULL) {
+		SI_ERROR(("osh is already set....\n"));
+		ASSERT(!sii->osh);
+	}
+	sii->osh = osh;
+}
+
+/** register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+                          void *intrsenabled_fn, void *intr_arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	sii->intr_arg = intr_arg;
+	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+	/* save current core id.  when this function called, the current core
+	 * must be the core which provides driver functions(il, et, wl, etc.)
+	 */
+	sii->dev_coreid = cores_info->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intrsoff_fn = NULL;
+	sii->intrsrestore_fn = NULL;
+	sii->intrsenabled_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_intflag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return R_REG(sii->osh, ((uint32 *)(uintptr)
+			    (sii->oob_router + OOB_STATUSA)));
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_flag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_flag(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag_alt(si_t *sih)
+{
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag_alt(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_setint(sih, siflag);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_setint(sih, siflag);
+	else
+		ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	return cores_info->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->curidx;
+}
+
+void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit)
+{
+	return si_setcore(sih,  D11_CORE_ID, coreunit);
+}
+
+/** return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+	uint coreid;
+	uint coreunit;
+	uint i;
+
+	coreunit = 0;
+
+	idx = sii->curidx;
+
+	ASSERT(GOODREGS(sii->curmap));
+	coreid = si_coreid(sih);
+
+	/* count the cores of our type */
+	for (i = 0; i < idx; i++)
+		if (cores_info->coreid[i] == coreid)
+			coreunit++;
+
+	return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corevendor(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corevendor(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corerev(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corerev(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found;
+	uint i;
+
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (cores_info->coreid[i] == coreid) {
+			if (found == coreunit)
+				return (i);
+			found++;
+		}
+
+	return (BADIDX);
+}
+
+/** return total coreunit of coreid or zero if not found */
+uint
+si_numcoreunits(si_t *sih, uint coreid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found = 0;
+	uint i;
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == coreid) {
+			found++;
+		}
+	}
+
+	return found;
+}
+
+/** return total D11 coreunits */
+uint
+BCMRAMFN(si_numd11coreunits)(si_t *sih)
+{
+	uint found = 0;
+
+	found = si_numcoreunits(sih, D11_CORE_ID);
+
+#if defined(WLRSDB) && defined(WLRSDB_DISABLED)
+	/* If RSDB functionality is compiled out,
+	 * then ignore any D11 cores beyond the first
+	 * Used in norsdb dongle build variants for rsdb chip.
+	 */
+	found = 1;
+#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */
+
+	return found;
+}
+
+/** return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+	return (sii->numcores);
+}
+
+/** return current wrapper mapping */
+void *
+si_wrapperregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+
+	return (sii->curwrap);
+}
+
+/** return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+
+	return (sii->curmap);
+}
+
+/**
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+	uint idx;
+
+	idx = si_findcoreidx(sih, coreid, coreunit);
+	if (!GOODIDX(idx))
+		return (NULL);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, idx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, idx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, coreidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, coreidx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+/** Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+	void *cc;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (SI_FAST(sii)) {
+		/* Overloading the origidx variable to remember the coreid,
+		 * this works because the core ids cannot be confused with
+		 * core indices.
+		 */
+		*origidx = coreid;
+		if (coreid == CC_CORE_ID)
+			return (void *)CCREGS_FAST(sii);
+		else if (coreid == sih->buscoretype)
+			return (void *)PCIEREGS(sii);
+	}
+	INTR_OFF(sii, *intr_val);
+	*origidx = sii->curidx;
+	cc = si_setcore(sih, coreid, 0);
+	ASSERT(cc != NULL);
+
+	return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+		return;
+
+	si_setcoreidx(sih, coreid);
+	INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_numaddrspaces(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_numaddrspaces(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspace(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspace(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspacesize(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspacesize(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	/* Only supported for SOCI_AI */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_coreaddrspaceX(sih, asidx, addr, size);
+	else
+		*size = 0;
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_cflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_cflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_cflags_wo(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_cflags_wo(sih, mask, val);
+	else
+		ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_sflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_sflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_iscoreup(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_iscoreup(sih);
+	else {
+		ASSERT(0);
+		return FALSE;
+	}
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	/* only for AI back plane chips */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return (ai_wrap_reg(sih, offset, mask, val));
+	return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg(sih, coreidx, regoff, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corereg(sih, coreidx, regoff, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/** ILP sensitive register access needs special treatment to avoid backplane stalls */
+bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff)
+{
+	if (idx == SI_CC_IDX) {
+		if (CHIPCREGS_ILP_SENSITIVE(regoff))
+			return TRUE;
+	} else if (PMUREGS_ILP_SENSITIVE(regoff)) {
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+/** 'idx' should refer either to the chipcommon core or the PMU core */
+uint
+si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val)
+{
+	int pmustatus_offset;
+
+	/* prevent backplane stall on double write to 'ILP domain' registers in the PMU */
+	if (mask != 0 && sih->pmurev >= 22 &&
+	    si_pmu_is_ilp_sensitive(idx, regoff)) {
+		pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) :
+			OFFSETOF(chipcregs_t, pmustatus);
+
+		while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING)
+			{};
+	}
+
+	return si_corereg(sih, idx, regoff, mask, val);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+si_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg_addr(sih, coreidx, regoff);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg_addr(sih, coreidx, regoff);
+	else {
+		return 0;
+	}
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_disable(sih, bits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_reset(sih, bits, resetbits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_reset(sih, bits, resetbits);
+}
+
+/** Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+	uint32 cflags;
+	int result = 0;
+
+	/* Read core control flags */
+	cflags = si_core_cflags(sih, 0, 0);
+
+	/* Set bist & fgc */
+	si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+	/* Wait for bist done */
+	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+		result = BCME_ERROR;
+
+	/* Reset core control flags */
+	si_core_cflags(sih, 0xffff, cflags);
+
+	return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+	switch (x) {
+	case CC_F6_2:	return 2;
+	case CC_F6_3:	return 3;
+	case CC_F6_4:	return 4;
+	case CC_F6_5:	return 5;
+	case CC_F6_6:	return 6;
+	case CC_F6_7:	return 7;
+	default:	return 0;
+	}
+}
+
+/** calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+	uint32 n1, n2, clock, m1, m2, m3, mc;
+
+	n1 = n & CN_N1_MASK;
+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+	if (pll_type == PLL_TYPE6) {
+		if (m & CC_T6_MMASK)
+			return CC_T6_M1;
+		else
+			return CC_T6_M0;
+	} else if ((pll_type == PLL_TYPE1) ||
+	           (pll_type == PLL_TYPE3) ||
+	           (pll_type == PLL_TYPE4) ||
+	           (pll_type == PLL_TYPE7)) {
+		n1 = factor6(n1);
+		n2 += CC_F5_BIAS;
+	} else if (pll_type == PLL_TYPE2) {
+		n1 += CC_T2_BIAS;
+		n2 += CC_T2_BIAS;
+		ASSERT((n1 >= 2) && (n1 <= 7));
+		ASSERT((n2 >= 5) && (n2 <= 23));
+	} else if (pll_type == PLL_TYPE5) {
+		return (100000000);
+	} else
+		ASSERT(0);
+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
+	if ((pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE7)) {
+		clock = CC_CLOCK_BASE2 * n1 * n2;
+	} else
+		clock = CC_CLOCK_BASE1 * n1 * n2;
+
+	if (clock == 0)
+		return 0;
+
+	m1 = m & CC_M1_MASK;
+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+	if ((pll_type == PLL_TYPE1) ||
+	    (pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE4) ||
+	    (pll_type == PLL_TYPE7)) {
+		m1 = factor6(m1);
+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+			m2 += CC_F5_BIAS;
+		else
+			m2 = factor6(m2);
+		m3 = factor6(m3);
+
+		switch (mc) {
+		case CC_MC_BYPASS:	return (clock);
+		case CC_MC_M1:		return (clock / m1);
+		case CC_MC_M1M2:	return (clock / (m1 * m2));
+		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
+		case CC_MC_M1M3:	return (clock / (m1 * m3));
+		default:		return (0);
+		}
+	} else {
+		ASSERT(pll_type == PLL_TYPE2);
+
+		m1 += CC_T2_BIAS;
+		m2 += CC_T2M2_BIAS;
+		m3 += CC_T2_BIAS;
+		ASSERT((m1 >= 2) && (m1 <= 7));
+		ASSERT((m2 >= 3) && (m2 <= 10));
+		ASSERT((m3 >= 2) && (m3 <= 7));
+
+		if ((mc & CC_T2MC_M1BYP) == 0)
+			clock /= m1;
+		if ((mc & CC_T2MC_M2BYP) == 0)
+			clock /= m2;
+		if ((mc & CC_T2MC_M3BYP) == 0)
+			clock /= m3;
+
+		return (clock);
+	}
+}
+
+/**
+ * Some chips could have multiple host interfaces, however only one will be active.
+ * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
+ */
+uint
+si_chip_hostif(si_t *sih)
+{
+	uint hosti = 0;
+
+	switch (CHIPID(sih->chip)) {
+
+	case BCM43602_CHIP_ID:
+		hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4360_CHIP_ID:
+		/* chippkg bit-0 == 0 is PCIE only pkgs
+		 * chippkg bit-0 == 1 has both PCIE and USB cores enabled
+		 */
+		if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+
+		break;
+
+	case BCM4335_CHIP_ID:
+		/* TBD: like in 4360, do we need to check pkg? */
+		if (CST4335_CHIPMODE_USB20D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4335_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4345_CHIP_ID:
+		if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4345_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4345_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4349_CHIP_GRPID:
+		if (CST4349_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4349_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM4356_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+		if (CST4350_CHIPMODE_USB20D(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC20D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D_WL(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC30D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4350_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4350_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	default:
+		break;
+	}
+
+	return hosti;
+}
+
+
+/** set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+	uint nb, maxt;
+
+	if (PMUCTL_ENAB(sih)) {
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+		if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+		    (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+			si_setcore(sih, USB20D_CORE_ID, 0);
+			si_core_disable(sih, 1);
+			si_setcore(sih, CC_CORE_ID, 0);
+		}
+#endif 
+
+			nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+		/* The mips compiler uses the sllv instruction,
+		 * so we specially handle the 32-bit case.
+		 */
+		if (nb == 32)
+			maxt = 0xffffffff;
+		else
+			maxt = ((1 << nb) - 1);
+
+		if (ticks == 1)
+			ticks = 2;
+		else if (ticks > maxt)
+			ticks = maxt;
+
+		pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
+	} else {
+		maxt = (1 << 28) - 1;
+		if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+	}
+}
+
+/** trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+	si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+	return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+	return FALSE;
+}
+
+
+
+/** return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+	chipcregs_t *cc;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	if (sii->pub.ccrev < 6) {
+		if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+		    (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+		     PCI_CFG_GPIO_SCS))
+			return (SCC_SS_PCI);
+		else
+			return (SCC_SS_XTAL);
+	} else if (sii->pub.ccrev < 10) {
+		cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+		ASSERT(cc);
+		return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+	} else	/* Insta-clock */
+		return (SCC_SS_XTAL);
+}
+
+/** return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+	uint32 slowclk;
+	uint div;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	/* shouldn't be here unless we've established the chip has dynamic clk control */
+	ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+	slowclk = si_slowclk_src(sii);
+	if (sii->pub.ccrev < 6) {
+		if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+		else
+			return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+	} else if (sii->pub.ccrev < 10) {
+		div = 4 *
+		        (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+		if (slowclk == SCC_SS_LPO)
+			return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+		else if (slowclk == SCC_SS_XTAL)
+			return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+		else if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+		else
+			ASSERT(0);
+	} else {
+		/* Chipc rev 10 is InstaClock */
+		div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+		div = 4 * (div + 1);
+		return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+	}
+	return (0);
+}
+
+static void
+si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+{
+	chipcregs_t *cc = (chipcregs_t *)chipcregs;
+	uint slowmaxfreq, pll_delay, slowclk;
+	uint pll_on_delay, fref_sel_delay;
+
+	pll_delay = PLL_DELAY;
+
+	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+	 * since the xtal will also be powered down by dynamic clk control logic.
+	 */
+
+	slowclk = si_slowclk_src(sii);
+	if (slowclk != SCC_SS_XTAL)
+		pll_delay += XTAL_ON_DELAY;
+
+	/* Starting with 4318 it is ILP that is used for the delays */
+	slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
+
+	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+	W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+	W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/** initialize power control delay registers */
+void
+si_clkctl_init(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx = 0;
+	chipcregs_t *cc;
+	bool fast;
+
+	if (!CCCTL_ENAB(sih))
+		return;
+
+	sii = SI_INFO(sih);
+	fast = SI_FAST(sii);
+	if (!fast) {
+		origidx = sii->curidx;
+		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+			return;
+	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+		return;
+	ASSERT(cc != NULL);
+
+	/* set all Instaclk chip ILP to 1 MHz */
+	if (sih->ccrev >= 10)
+		SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+		        (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+	si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+	OSL_DELAY(20000);
+
+	if (!fast)
+		si_setcoreidx(sih, origidx);
+}
+
+
+/** change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+	return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/**
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ *   to some chip-specific purpose.
+ */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioouten);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioout);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already reserved */
+	if (si_gpioreservation & gpio_bitmask)
+		return 0xffffffff;
+	/* set reservation */
+	si_gpioreservation |= gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/**
+ * release one gpio.
+ *
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till someone overwrites it.
+ */
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already released */
+	if (!(si_gpioreservation & gpio_bitmask))
+		return 0xffffffff;
+
+	/* clear reservation */
+	si_gpioreservation &= ~gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+	uint regoff;
+
+	regoff = OFFSETOF(chipcregs_t, gpioin);
+	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	/* gpio led powersave reg */
+	return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX,
+		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (sih->ccrev < 20)
+		return 0xffffffff;
+
+	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	if (regtype == GPIO_REGEVT)
+		offs = OFFSETOF(chipcregs_t, gpioevent);
+	else if (regtype == GPIO_REGEVT_INTMSK)
+		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+	else if (regtype == GPIO_REGEVT_INTPOL)
+		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+	else
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+	bool level, gpio_handler_t cb, void *arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *gi;
+
+	ASSERT(event);
+	ASSERT(cb != NULL);
+
+	if (sih->ccrev < 11)
+		return NULL;
+
+	if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+		return NULL;
+
+	bzero(gi, sizeof(gpioh_item_t));
+	gi->event = event;
+	gi->handler = cb;
+	gi->arg = arg;
+	gi->level = level;
+
+	gi->next = sii->gpioh_head;
+	sii->gpioh_head = gi;
+
+	return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *p, *n;
+
+	if (sih->ccrev < 11)
+		return;
+
+	ASSERT(sii->gpioh_head != NULL);
+	if ((void*)sii->gpioh_head == gpioh) {
+		sii->gpioh_head = sii->gpioh_head->next;
+		MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+		return;
+	} else {
+		p = sii->gpioh_head;
+		n = p->next;
+		while (n) {
+			if ((void*)n == gpioh) {
+				p->next = n->next;
+				MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+				return;
+			}
+			p = n;
+			n = n->next;
+		}
+	}
+
+	ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *h;
+	uint32 level = si_gpioin(sih);
+	uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+	uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+	uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+	for (h = sii->gpioh_head; h != NULL; h = h->next) {
+		if (h->handler) {
+			uint32 status = (h->level ? level : edge) & h->event;
+			uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+			/* polarity bitval is opposite of status bitval */
+			if ((h->level && (status ^ polarity)) || (!h->level && status))
+				h->handler(status, h->arg);
+		}
+	}
+
+	si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+	uint offs;
+
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	offs = OFFSETOF(chipcregs_t, intmask);
+	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/** Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+	uint banksize, bankinfo;
+	uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+	ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+	W_REG(sii->osh, &regs->bankidx, bankidx);
+	bankinfo = R_REG(sii->osh, &regs->bankinfo);
+	banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+	return banksize;
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	if (!set)
+		*enable = *protect = *remap = 0;
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+		uint32 bankidx, bankinfo;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (set) {
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
+				if (*enable) {
+					bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+					if (*protect)
+						bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+					if ((corerev >= 16) && *remap)
+						bankinfo |=
+							(1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
+				}
+				W_REG(sii->osh, &regs->bankinfo, bankinfo);
+			}
+			else if (i == 0) {
+				if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+					*enable = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+						*protect = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
+						*remap = 1;
+				}
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_remap_isenb(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup, remap = FALSE;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				remap = TRUE;
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+	return remap;
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+	if (si_socdevram_size(sih) > 0)
+		return TRUE;
+	else
+		return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+uint32
+si_socdevram_remap_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0, banksz;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+
+		/*
+		 * FIX: A0 Issue: Max addressable is 512KB, instead 640KB
+		 * Only four banks are accessible to ARM
+		 */
+		if ((corerev == 16) && (nb == 5))
+			nb = 4;
+
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+				memsize += banksz;
+			} else {
+				/* Account only consecutive banks for now */
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+/** Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev == 0)
+		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+	else if (corerev < 3) {
+		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	} else if ((corerev <= 7) || (corerev == 12)) {
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb --;
+		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+		if (lss != 0)
+			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	} else {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+uint32
+si_tcm_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint8 *regs;
+	bool wasup;
+	uint32 corecap;
+	uint memsize = 0;
+	uint32 nab = 0;
+	uint32 nbb = 0;
+	uint32 totb = 0;
+	uint32 bxinfo = 0;
+	uint32 idx = 0;
+	uint32 *arm_cap_reg;
+	uint32 *arm_bidx;
+	uint32 *arm_binfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to CR4 core */
+	if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size. If in reset, come out of reset,
+	 * but remain in halt
+	 */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
+
+	arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP);
+	corecap = R_REG(sii->osh, arm_cap_reg);
+
+	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+	totb = nab + nbb;
+
+	arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX);
+	arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO);
+	for (idx = 0; idx < totb; idx++) {
+		W_REG(sii->osh, arm_bidx, idx);
+
+		bxinfo = R_REG(sii->osh, arm_binfo);
+		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+bool
+si_has_flops(si_t *sih)
+{
+	uint origidx, cr4_rev;
+
+	/* Find out CR4 core revision */
+	origidx = si_coreidx(sih);
+	if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
+		cr4_rev = si_corerev(sih);
+		si_setcoreidx(sih, origidx);
+
+		if (cr4_rev == 1 || cr4_rev >= 3)
+			return TRUE;
+	}
+	return FALSE;
+}
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+		return (32 * 1024);
+	}
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev >= 16) {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++) {
+			W_REG(sii->osh, &regs->bankidx, i);
+			if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+				memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+void
+si_btcgpiowar(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	chipcregs_t *cc;
+
+	/* Make sure that there is ChipCommon core present &&
+	 * UART_TX is strapped to 1
+	 */
+	if (!(sih->cccaps & CC_CAP_UARTGPIO))
+		return;
+
+	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(cc != NULL);
+
+	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	uint intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	/* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+	if (on) {
+		/* Enable bt_shd0 on gpio4: */
+		val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	W_REG(sii->osh, &cc->chipcontrol, val);
+	si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+	si_setcoreidx(sih, origidx);
+	return val;
+}
+
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+			val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+			/* Ext PA Controls for 4331 12x9 Package */
+			W_REG(sii->osh, &cc->chipcontrol, val);
+		} else {
+			/* Ext PA Controls for 4331 12x12 Package */
+			if (sih->chiprev > 0) {
+				W_REG(sii->osh, &cc->chipcontrol, val |
+				      (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+			} else {
+				W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+			}
+		}
+	} else {
+		val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		val &= ~(CCTRL4360_SECI_MODE |
+			CCTRL4360_BTSWCTRL_MODE |
+			CCTRL4360_EXTRA_FEMCTRL_MODE |
+			CCTRL4360_BT_LGCY_MODE |
+			CCTRL4360_CORE2FEMCTRL4_ON);
+
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+	si_info_t *sii;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	bool sel_chip;
+
+	sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+		(CHIPID(sih->chip) == BCM43431_CHIP_ID);
+	sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+	if (!sel_chip)
+		return;
+
+	sii = SI_INFO(sih);
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (enter_wowl) {
+		val |= CCTRL4331_EXTPA_EN;
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+	si_setcoreidx(sih, origidx);
+}
+#endif 
+
+uint
+si_pll_reset(si_t *sih)
+{
+	uint err = 0;
+
+	return (err);
+}
+
+/** Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	/* EPA Fix */
+	W_REG(sii->osh, &cc->gpiocontrol,
+	R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
+
+/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
+void
+si_pmu_synth_pwrsw_4313_war(si_t *sih)
+{
+}
+
+/** WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	W_REG(sii->osh, &cc->gpiocontrol,
+		R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+	W_REG(sii->osh, &cc->gpioouten,
+		R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	/* BT fix */
+	W_REG(sii->osh, &cc->chipcontrol,
+		R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+	W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+	uint32 w;
+
+	switch (BUSTYPE(sih->bustype)) {
+	case PCI_BUS:
+		ASSERT(SI_INFO(sih)->osh != NULL);
+		w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((w & 0xFFFF) != VENDOR_BROADCOM)
+			return TRUE;
+		break;
+	}
+	return FALSE;
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+	if (sih->ccrev >= 31) {
+		si_info_t *sii;
+		uint origidx;
+		chipcregs_t *cc;
+		uint32 sromctrl;
+
+		if ((sih->cccaps & CC_CAP_SROM) == 0)
+			return FALSE;
+
+		sii = SI_INFO(sih);
+		origidx = sii->curidx;
+		cc = si_setcoreidx(sih, SI_CC_IDX);
+		ASSERT(cc);
+		sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+		si_setcoreidx(sih, origidx);
+		return (sromctrl & SRC_PRESENT);
+	}
+
+	switch (CHIPID(sih->chip)) {
+	case BCM4312_CHIP_ID:
+		return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL);
+	case BCM4325_CHIP_ID:
+		return (sih->chipst & CST4325_SPROM_SEL) != 0;
+	case BCM4322_CHIP_ID:	case BCM43221_CHIP_ID:	case BCM43231_CHIP_ID:
+	case BCM43222_CHIP_ID:	case BCM43111_CHIP_ID:	case BCM43112_CHIP_ID:
+	case BCM4342_CHIP_ID: {
+		uint32 spromotp;
+		spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >>
+		        CST4322_SPROM_OTP_SEL_SHIFT;
+		return (spromotp & CST4322_SPROM_PRESENT) != 0;
+	}
+	case BCM4329_CHIP_ID:
+		return (sih->chipst & CST4329_SPROM_SEL) != 0;
+	case BCM4315_CHIP_ID:
+		return (sih->chipst & CST4315_SPROM_SEL) != 0;
+	case BCM4319_CHIP_ID:
+		return (sih->chipst & CST4319_SPROM_SEL) != 0;
+	case BCM4336_CHIP_ID:
+	case BCM43362_CHIP_ID:
+		return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+	case BCM4330_CHIP_ID:
+		return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+	case BCM4313_CHIP_ID:
+		return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+	case BCM4331_CHIP_ID:
+	case BCM43431_CHIP_ID:
+		return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+	case BCM43239_CHIP_ID:
+		return ((sih->chipst & CST43239_SPROM_MASK) &&
+			!(sih->chipst & CST43239_SFLASH_MASK));
+	case BCM4324_CHIP_ID:
+	case BCM43242_CHIP_ID:
+		return ((sih->chipst & CST4324_SPROM_MASK) &&
+			!(sih->chipst & CST4324_SFLASH_MASK));
+	case BCM4335_CHIP_ID:
+	case BCM4345_CHIP_ID:
+		return ((sih->chipst & CST4335_SPROM_MASK) &&
+			!(sih->chipst & CST4335_SFLASH_MASK));
+	case BCM4349_CHIP_GRPID:
+		return (sih->chipst & CST4349_SPROM_PRESENT) != 0;
+		break;
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM4356_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+	case BCM43570_CHIP_ID:
+	case BCM4358_CHIP_ID:
+		return (sih->chipst & CST4350_SPROM_PRESENT) != 0;
+	case BCM43602_CHIP_ID:
+		return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
+	case BCM43131_CHIP_ID:
+	case BCM43217_CHIP_ID:
+	case BCM43227_CHIP_ID:
+	case BCM43228_CHIP_ID:
+	case BCM43428_CHIP_ID:
+		return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
+	default:
+		return TRUE;
+	}
+}
+
+
+uint32 si_get_sromctl(si_t *sih)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 sromctl;
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	sromctl = R_REG(osh, &cc->sromcontrol);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return sromctl;
+}
+
+int si_set_sromctl(si_t *sih, uint32 value)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	if (si_corerev(sih) < 32)
+		return BCME_UNSUPPORTED;
+
+	W_REG(osh, &cc->sromcontrol, value);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return BCME_OK;
+
+}
+
+uint
+si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
+{
+	uint origidx, intr_val = 0;
+	uint ret_val;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	origidx = si_coreidx(sih);
+
+	INTR_OFF(sii, intr_val);
+	si_setcoreidx(sih, coreidx);
+
+	ret_val = si_wrapperreg(sih, offset, mask, val);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret_val;
+}
+
+
+/* cleanup the timer from the host when ARM is been halted
+ * without a chance for ARM cleanup its resources
+ * If left not cleanup, Intr from a software timer can still
+ * request HT clk when ARM is halted.
+ */
+uint32
+si_pmu_res_req_timer_clr(si_t *sih)
+{
+	uint32 mask;
+
+	mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ;
+	if (CHIPID(sih->chip) != BCM4328_CHIP_ID)
+		mask <<= 14;
+	/* clear mask bits */
+	pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0);
+	/* readback to ensure write completes */
+	return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0);
+}
+
+/** turn on/off rfldo */
+void
+si_pmu_rfldo(si_t *sih, bool on)
+{
+}
+
+
+#ifdef SURVIVE_PERST_ENAB
+static uint32
+si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (!PCIE(sii))
+		return (0);
+
+	return pcie_survive_perst(sii->pch, mask, val);
+}
+
+static void
+si_watchdog_reset(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 i;
+
+	/* issue a watchdog reset */
+	pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2);
+	/* do busy wait for 20ms */
+	for (i = 0; i < 2000; i++) {
+		OSL_DELAY(10);
+	}
+}
+#endif /* SURVIVE_PERST_ENAB */
+
+void
+si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val)
+{
+#ifdef SURVIVE_PERST_ENAB
+	if (BUSTYPE(sih->bustype) != PCI_BUS)
+		  return;
+
+	if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) ||
+	    (CHIPREV(sih->chiprev) >= 4))
+		return;
+
+	if (reset) {
+		si_info_t *sii = SI_INFO(sih);
+		uint32 bar0win, bar0win_after;
+
+		/* save the bar0win */
+		bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+
+		si_watchdog_reset(sih);
+
+		bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (bar0win_after != bar0win) {
+			SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n",
+				__FUNCTION__, bar0win, bar0win_after));
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win);
+		}
+	}
+	if (sperst_mask) {
+		/* enable survive perst */
+		si_pcie_survive_perst(sih, sperst_mask, sperst_val);
+	}
+#endif /* SURVIVE_PERST_ENAB */
+}
+
+void
+si_pcie_ltr_war(si_t *sih)
+{
+}
+
+void
+si_pcie_hw_LTR_war(si_t *sih)
+{
+}
+
+void
+si_pciedev_reg_pm_clk_period(si_t *sih)
+{
+}
+
+void
+si_pciedev_crwlpciegen2(si_t *sih)
+{
+}
+
+void
+si_pcie_prep_D3(si_t *sih, bool enter_D3)
+{
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 0000000..38c60a8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,283 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h 474902 2014-05-02 18:31:33Z $
+ */
+
+#ifndef	_siutils_priv_h_
+#define	_siutils_priv_h_
+
+#define	SI_ERROR(args)
+
+#define	SI_MSG(args)
+
+#ifdef BCMDBG_SI
+#define	SI_VMSG(args)	printf args
+#else
+#define	SI_VMSG(args)
+#endif
+
+#define	IS_SIM(chippkg)	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+	void			*arg;
+	bool			level;
+	gpio_handler_t		handler;
+	uint32			event;
+	struct gpioh_item	*next;
+} gpioh_item_t;
+
+
+#define SI_GPIO_MAX		16
+
+typedef struct gci_gpio_item {
+	void			*arg;
+	uint8			gci_gpio;
+	uint8			status;
+	gci_gpio_handler_t	handler;
+	struct gci_gpio_item	*next;
+} gci_gpio_item_t;
+
+
+typedef struct si_cores_info {
+	void	*regs[SI_MAXCORES];	/* other regs va */
+
+	uint	coreid[SI_MAXCORES];	/* id of each core */
+	uint32	coresba[SI_MAXCORES];	/* backplane address of each core */
+	void	*regs2[SI_MAXCORES];	/* va of each core second register set (usbh20) */
+	uint32	coresba2[SI_MAXCORES];	/* address of each core second register set (usbh20) */
+	uint32	coresba_size[SI_MAXCORES]; /* backplane address space size */
+	uint32	coresba2_size[SI_MAXCORES]; /* second address space size */
+
+	void	*wrappers[SI_MAXCORES];	/* other cores wrapper va */
+	uint32	wrapba[SI_MAXCORES];	/* address of controlling wrapper */
+
+	uint32	cia[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	cib[SI_MAXCORES];	/* erom cia entry for each core */
+} si_cores_info_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+	struct si_pub pub;		/* back plane public state (must be first field) */
+
+	void	*osh;			/* osl os handle */
+	void	*sdh;			/* bcmsdh handle */
+
+	uint	dev_coreid;		/* the core provides driver functions */
+	void	*intr_arg;		/* interrupt callback function arg */
+	si_intrsoff_t intrsoff_fn;	/* turns chip interrupts off */
+	si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+	si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+	void *pch;			/* PCI/E core handle */
+
+	gpioh_item_t *gpioh_head; 	/* GPIO event handlers list */
+
+	bool	memseg;			/* flag to toggle MEM_SEG register */
+
+	char *vars;
+	uint varsz;
+
+	void	*curmap;		/* current regs va */
+
+	uint	curidx;			/* current core index */
+	uint	numcores;		/* # discovered cores */
+
+	void	*curwrap;		/* current wrapper va */
+
+	uint32	oob_router;		/* oob router registers for axi */
+
+	void *cores_info;
+	gci_gpio_item_t	*gci_gpio_head;	/* gci gpio interrupts head */
+	uint	chipnew;		/* new chip number */
+} si_info_t;
+
+
+#define	SI_INFO(sih)	((si_info_t *)(uintptr)sih)
+
+#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+		ISALIGNED((x), SI_CORE_SIZE))
+#define	GOODREGS(regs)	((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR	0
+#define	GOODIDX(idx)	(((uint)idx) < SI_MAXCORES)
+#define	NOREV		-1		/* Invalid rev */
+
+#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si)	(PCIE_GEN1(si) || PCIE_GEN2(si))
+
+#define PCMCIA(si)	((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+	if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+	if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define	LPOMINFREQ		25000		/* low power oscillator min */
+#define	LPOMAXFREQ		43000		/* low power oscillator max */
+#define	XTALMINFREQ		19800000	/* 20 MHz - 1% */
+#define	XTALMAXFREQ		20200000	/* 20 MHz + 1% */
+#define	PCIMINFREQ		25000000	/* 25 MHz */
+#define	PCIMAXFREQ		34000000	/* 33 MHz + fudge */
+
+#define	ILP_DIV_5MHZ		0		/* ILP = 5 MHz */
+#define	ILP_DIV_1MHZ		4		/* ILP = 1 MHz */
+
+/* Force fast clock for 4360b0 */
+#define PCI_FORCEHT(si)	\
+	(((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+	((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+	(PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \
+	(PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME	10		/* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME	90		/* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+#if defined(BCMDBG_PHYDUMP)
+extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern uint ai_flag_alt(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
+	uint32 resetbits, void *p, void *s);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
+	aidmp_t *pmacai, aidmp_t *smacai);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+#if defined(BCMDBG_PHYDUMP)
+extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif 
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b)  (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif	/* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h
new file mode 100644
index 0000000..2bd0629
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/uamp_api.h
@@ -0,0 +1,178 @@
+/*
+ *  Name:       uamp_api.h
+ *
+ *  Description: Universal AMP API
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: uamp_api.h 467328 2014-04-03 01:23:40Z $
+ *
+ */
+
+
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+**  Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool	BOOLEAN;
+typedef uint8	UINT8;
+typedef uint16	UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1   1
+#define UAMP_ID_2   2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY           0   /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED       1   /* Controller removed */
+#define UAMP_EVT_CTLR_READY         2   /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD            0   /* HCI Command channel */
+#define UAMP_CH_HCI_EVT            1   /* HCI Event channel */
+#define UAMP_CH_HCI_DATA           2   /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+    tUAMP_CH channel;       /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters   amp_id:         AMP device identifier that generated the event
+**              amp_evt:        event id
+**              p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+**  external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function:    UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters   p_cback:    Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer. This value
+**                      will be included in AMP messages sent to the
+**                      BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer to write
+**              num_bytes:  number of bytes to write
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns:     number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+**              UAMP_EVT_RX_READY callback event.
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer for holding incoming AMP data
+**              buf_size:   size of p_buf
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns:     number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 0000000..f2bed33
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,1573 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c 470703 2014-04-16 02:25:28Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <proto/bcmip.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef BCMSDIO
+#include <bcmsdbus.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START		"START"
+#define CMD_STOP		"STOP"
+#define	CMD_SCAN_ACTIVE		"SCAN-ACTIVE"
+#define	CMD_SCAN_PASSIVE	"SCAN-PASSIVE"
+#define CMD_RSSI		"RSSI"
+#define CMD_LINKSPEED		"LINKSPEED"
+#define CMD_RXFILTER_START	"RXFILTER-START"
+#define CMD_RXFILTER_STOP	"RXFILTER-STOP"
+#define CMD_RXFILTER_ADD	"RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE	"RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START	"BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP	"BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE		"BTCOEXMODE"
+#define CMD_SETSUSPENDOPT	"SETSUSPENDOPT"
+#define CMD_SETSUSPENDMODE      "SETSUSPENDMODE"
+#define CMD_P2P_DEV_ADDR	"P2P_DEV_ADDR"
+#define CMD_SETFWPATH		"SETFWPATH"
+#define CMD_SETBAND		"SETBAND"
+#define CMD_GETBAND		"GETBAND"
+#define CMD_COUNTRY		"COUNTRY"
+#define CMD_P2P_SET_NOA		"P2P_SET_NOA"
+#if !defined WL_ENABLE_P2P_IF
+#define CMD_P2P_GET_NOA			"P2P_GET_NOA"
+#endif /* WL_ENABLE_P2P_IF */
+#define CMD_P2P_SD_OFFLOAD		"P2P_SD_"
+#define CMD_P2P_SET_PS		"P2P_SET_PS"
+#define CMD_SET_AP_WPS_P2P_IE 		"SET_AP_WPS_P2P_IE"
+#define CMD_SETROAMMODE 	"SETROAMMODE"
+#define CMD_SETIBSSBEACONOUIDATA	"SETIBSSBEACONOUIDATA"
+#define CMD_MIRACAST		"MIRACAST"
+
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+#define CMD_GET_BEST_CHANNELS	"GET_BEST_CHANNELS"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#define CMD_KEEP_ALIVE		"KEEPALIVE"
+
+/* CCX Private Commands */
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET	"PNOSSIDCLR"
+#define CMD_PNOSETUP_SET	"PNOSETUP "
+#define CMD_PNOENABLE_SET	"PNOFORCE"
+#define CMD_PNODEBUG_SET	"PNODEBUG"
+#define CMD_WLS_BATCHING	"WLS_BATCHING"
+#endif /* PNO_SUPPORT */
+
+#define CMD_OKC_SET_PMK		"SET_PMK"
+#define CMD_OKC_ENABLE		"OKC_ENABLE"
+
+#define	CMD_HAPD_MAC_FILTER	"HAPD_MAC_FILTER"
+
+
+
+#define CMD_ROAM_OFFLOAD			"SETROAMOFFLOAD"
+
+/* miracast related definition */
+#define MIRACAST_MODE_OFF	0
+#define MIRACAST_MODE_SOURCE	1
+#define MIRACAST_MODE_SINK	2
+
+#ifndef MIRACAST_AMPDU_SIZE
+#define MIRACAST_AMPDU_SIZE	8
+#endif
+
+#ifndef MIRACAST_MCHAN_ALGO
+#define MIRACAST_MCHAN_ALGO     1
+#endif
+
+#ifndef MIRACAST_MCHAN_BW
+#define MIRACAST_MCHAN_BW       25
+#endif
+
+static LIST_HEAD(miracast_resume_list);
+static u8 miracast_cur_mode;
+
+struct io_cfg {
+	s8 *iovar;
+	s32 param;
+	u32 ioctl;
+	void *arg;
+	u32 len;
+	struct list_head list;
+};
+
+typedef struct _android_wifi_priv_cmd {
+	char *buf;
+	int used_len;
+	int total_len;
+} android_wifi_priv_cmd;
+
+#ifdef CONFIG_COMPAT
+typedef struct _compat_android_wifi_priv_cmd {
+	compat_caddr_t buf;
+	int used_len;
+	int total_len;
+} compat_android_wifi_priv_cmd;
+#endif /* CONFIG_COMPAT */
+
+#if defined(BCMFW_ROAM_ENABLE)
+#define CMD_SET_ROAMPREF	"SET_ROAMPREF"
+
+#define MAX_NUM_SUITES		10
+#define WIDTH_AKM_SUITE		8
+#define JOIN_PREF_RSSI_LEN		0x02
+#define JOIN_PREF_RSSI_SIZE		4	/* RSSI pref header size in bytes */
+#define JOIN_PREF_WPA_HDR_SIZE		4 /* WPA pref header size in bytes */
+#define JOIN_PREF_WPA_TUPLE_SIZE	12	/* Tuple size in bytes */
+#define JOIN_PREF_MAX_WPA_TUPLES	16
+#define MAX_BUF_SIZE		(JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +	\
+				           (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
+#endif /* BCMFW_ROAM_ENABLE */
+
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+int dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif /* WK_CFG80211 */
+
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+extern bool ap_fw_loaded;
+#if defined(CUSTOMER_HW2)
+extern char iface_name[IFNAMSIZ];
+#endif
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+	int link_speed;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_link_speed(net, &link_speed);
+	if (error)
+		return -1;
+
+	/* Convert Kbps to Android Mbps */
+	link_speed = link_speed / 1000;
+	bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+	DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+	return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+	wlc_ssid_t ssid = {0};
+	int rssi;
+	int bytes_written = 0;
+	int error;
+
+	error = wldev_get_rssi(net, &rssi);
+	if (error)
+		return -1;
+
+	error = wldev_get_ssid(net, &ssid);
+	if (error)
+		return -1;
+	if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+		DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+	} else {
+		memcpy(command, ssid.SSID, ssid.SSID_len);
+		bytes_written = ssid.SSID_len;
+	}
+	bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+	DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+	return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+		suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+		if (suspend_flag != 0)
+			suspend_flag = 1;
+		ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+		if (ret_now != suspend_flag) {
+			if (!(ret = net_os_set_suspend(dev, ret_now, 1)))
+				DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+					__FUNCTION__, ret_now, suspend_flag));
+			else
+				DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+		}
+	return ret;
+}
+
+static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len)
+{
+	int ret = 0;
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND)
+	int suspend_flag;
+
+	suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0';
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+
+	if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
+		DHD_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
+	else
+		DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+#endif
+
+	return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+	uint band;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_band(dev, &band);
+	if (error)
+		return -1;
+	bytes_written = snprintf(command, total_len, "Band %d", band);
+	return bytes_written;
+}
+
+
+#ifdef PNO_SUPPORT
+#define PNO_PARAM_SIZE 50
+#define VALUE_SIZE 50
+#define LIMIT_STR_FMT  ("%50s %50s")
+static int
+wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
+{
+	int err = BCME_OK;
+	uint i, tokens;
+	char *pos, *pos2, *token, *token2, *delim;
+	char param[PNO_PARAM_SIZE], value[VALUE_SIZE];
+	struct dhd_pno_batch_params batch_params;
+	DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+	if (total_len < strlen(CMD_WLS_BATCHING)) {
+		DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		err = BCME_ERROR;
+		goto exit;
+	}
+	pos = command + strlen(CMD_WLS_BATCHING) + 1;
+	memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+
+	if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
+		pos += strlen(PNO_BATCHING_SET) + 1;
+		while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
+			memset(param, 0, sizeof(param));
+			memset(value, 0, sizeof(value));
+			if (token == NULL || !*token)
+				break;
+			if (*token == '\0')
+				continue;
+			delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER);
+			if (delim != NULL)
+				*delim = ' ';
+
+			tokens = sscanf(token, LIMIT_STR_FMT, param, value);
+			if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) {
+				batch_params.scan_fr = simple_strtol(value, NULL, 0);
+				DHD_PNO(("scan_freq : %d\n", batch_params.scan_fr));
+			} else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) {
+				batch_params.bestn = simple_strtol(value, NULL, 0);
+				DHD_PNO(("bestn : %d\n", batch_params.bestn));
+			} else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) {
+				batch_params.mscan = simple_strtol(value, NULL, 0);
+				DHD_PNO(("mscan : %d\n", batch_params.mscan));
+			} else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) {
+				i = 0;
+				pos2 = value;
+				tokens = sscanf(value, "<%s>", value);
+				if (tokens != 1) {
+					err = BCME_ERROR;
+					DHD_ERROR(("%s : invalid format for channel"
+					" <> params\n", __FUNCTION__));
+					goto exit;
+				}
+					while ((token2 = strsep(&pos2,
+					PNO_PARAM_CHANNEL_DELIMETER)) != NULL) {
+					if (token2 == NULL || !*token2)
+						break;
+					if (*token2 == '\0')
+						continue;
+					if (*token2 == 'A' || *token2 == 'B') {
+						batch_params.band = (*token2 == 'A')?
+							WLC_BAND_5G : WLC_BAND_2G;
+						DHD_PNO(("band : %s\n",
+							(*token2 == 'A')? "A" : "B"));
+					} else {
+						batch_params.chan_list[i++] =
+						simple_strtol(token2, NULL, 0);
+						batch_params.nchan++;
+						DHD_PNO(("channel :%d\n",
+						batch_params.chan_list[i-1]));
+					}
+				 }
+			} else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) {
+				batch_params.rtt = simple_strtol(value, NULL, 0);
+				DHD_PNO(("rtt : %d\n", batch_params.rtt));
+			} else {
+				DHD_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
+				err = BCME_ERROR;
+				goto exit;
+			}
+		}
+		err = dhd_dev_pno_set_for_batch(dev, &batch_params);
+		if (err < 0) {
+			DHD_ERROR(("failed to configure batch scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = sprintf(command, "%d", err);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
+		err = dhd_dev_pno_get_for_batch(dev, command, total_len);
+		if (err < 0) {
+			DHD_ERROR(("failed to getting batching results\n"));
+		} else {
+			err = strlen(command);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) {
+		err = dhd_dev_pno_stop_for_batch(dev);
+		if (err < 0) {
+			DHD_ERROR(("failed to stop batching scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = sprintf(command, "OK");
+		}
+	} else {
+		DHD_ERROR(("%s : unknown command\n", __FUNCTION__));
+		err = BCME_ERROR;
+		goto exit;
+	}
+exit:
+	return err;
+}
+#ifndef WL_SCHED_SCAN
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+	wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+	int res = -1;
+	int nssid = 0;
+	cmd_tlv_t *cmd_tlv_temp;
+	char *str_ptr;
+	int tlv_size_left;
+	int pno_time = 0;
+	int pno_repeat = 0;
+	int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+	int i;
+	char pno_in_example[] = {
+		'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+		'S', '1', '2', '0',
+		'S',
+		0x05,
+		'd', 'l', 'i', 'n', 'k',
+		'S',
+		0x04,
+		'G', 'O', 'O', 'G',
+		'T',
+		'0', 'B',
+		'R',
+		'2',
+		'M',
+		'2',
+		0x00
+		};
+#endif /* PNO_SET_DEBUG */
+	DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+		DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		goto exit_proc;
+	}
+#ifdef PNO_SET_DEBUG
+	memcpy(command, pno_in_example, sizeof(pno_in_example));
+	total_len = sizeof(pno_in_example);
+#endif
+	str_ptr = command + strlen(CMD_PNOSETUP_SET);
+	tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+	cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+
+	if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+		(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+		(cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) {
+
+		str_ptr += sizeof(cmd_tlv_t);
+		tlv_size_left -= sizeof(cmd_tlv_t);
+
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+			MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+			DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		} else {
+			if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+				DHD_ERROR(("%s scan duration corrupted field size %d\n",
+					__FUNCTION__, tlv_size_left));
+				goto exit_proc;
+			}
+			str_ptr++;
+			pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+			DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+			if (str_ptr[0] != 0) {
+				if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+					DHD_ERROR(("%s pno repeat : corrupted field\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+				DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+				if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+					DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+				DHD_PNO(("%s: pno_freq_expo_max=%d\n",
+					__FUNCTION__, pno_freq_expo_max));
+			}
+		}
+	} else {
+		DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat,
+		pno_freq_expo_max, NULL, 0);
+exit_proc:
+	return res;
+}
+#endif /* !WL_SCHED_SCAN */
+#endif /* PNO_SUPPORT  */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+	int ret;
+	int bytes_written = 0;
+
+	ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+	if (ret)
+		return 0;
+	bytes_written = sizeof(struct ether_addr);
+	return bytes_written;
+}
+
+
+int
+wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
+{
+	int i, j, match;
+	int ret	= 0;
+	char mac_buf[MAX_NUM_OF_ASSOCLIST *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+	/* set filtering mode */
+	if ((ret = wldev_ioctl(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode), true)) != 0) {
+		DHD_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
+		return ret;
+	}
+	if (macmode != MACLIST_MODE_DISABLED) {
+		/* set the MAC filter list */
+		if ((ret = wldev_ioctl(dev, WLC_SET_MACLIST, maclist,
+			sizeof(int) + sizeof(struct ether_addr) * maclist->count, true)) != 0) {
+			DHD_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* get the current list of associated STAs */
+		assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
+		if ((ret = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist,
+			sizeof(mac_buf), false)) != 0) {
+			DHD_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* do we have any STA associated?  */
+		if (assoc_maclist->count) {
+			/* iterate each associated STA */
+			for (i = 0; i < assoc_maclist->count; i++) {
+				match = 0;
+				/* compare with each entry */
+				for (j = 0; j < maclist->count; j++) {
+					DHD_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
+					MAC2STRDBG(maclist->ea[j].octet)));
+					if (memcmp(assoc_maclist->ea[i].octet,
+						maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
+						match = 1;
+						break;
+					}
+				}
+				/* do conditional deauth */
+				/*   "if not in the allow list" or "if in the deny list" */
+				if ((macmode == MACLIST_MODE_ALLOW && !match) ||
+					(macmode == MACLIST_MODE_DENY && match)) {
+					scb_val_t scbval;
+
+					scbval.val = htod32(1);
+					memcpy(&scbval.ea, &assoc_maclist->ea[i],
+						ETHER_ADDR_LEN);
+					if ((ret = wldev_ioctl(dev,
+						WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+						&scbval, sizeof(scb_val_t), true)) != 0)
+						DHD_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
+							__FUNCTION__, ret));
+				}
+			}
+		}
+	}
+	return ret;
+}
+
+/*
+ * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2
+ *
+ */
+static int
+wl_android_set_mac_address_filter(struct net_device *dev, const char* str)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	/* string should look like below (macmode/macnum/maclist) */
+	/*   1 2 00:11:22:33:44:55 00:11:22:33:44:ff  */
+
+	/* get the MAC filter mode */
+	macmode = bcm_atoi(strsep((char**)&str, " "));
+
+	if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
+		DHD_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
+		return -1;
+	}
+
+	macnum = bcm_atoi(strsep((char**)&str, " "));
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		DHD_ERROR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		DHD_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	for (i = 0; i < list->count; i++) {
+		strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1);
+		if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
+			DHD_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
+				__FUNCTION__, i, eabuf));
+			list->count--;
+			break;
+		}
+		DHD_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
+		DHD_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return 0;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+	int ret = 0;
+	int retry = POWERUP_MAX_RETRY;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+	if (!dev) {
+		DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (!g_wifi_on) {
+		do {
+			dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
+#ifdef BCMSDIO
+			ret = dhd_net_bus_resume(dev, 0);
+#endif
+			if (ret == 0)
+				break;
+			DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+				retry+1));
+			dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		} while (retry-- >= 0);
+		if (ret != 0) {
+			DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+			goto exit;
+		}
+#if defined(BCMSDIO) || defined(BCMPCIE)
+		ret = dhd_net_bus_devreset(dev, FALSE);
+#ifdef BCMSDIO
+		dhd_net_bus_resume(dev, 1);
+#endif
+#endif /* BCMSDIO || BCMPCIE */
+#ifndef BCMPCIE
+		if (!ret) {
+			if (dhd_dev_init_ioctl(dev) < 0)
+				ret = -EFAULT;
+		}
+#endif
+	if (!ret)
+		g_wifi_on = TRUE;
+	}
+
+exit:
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev, bool on_failure)
+{
+	int ret = 0;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+	if (!dev) {
+		DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (g_wifi_on || on_failure) {
+#if defined(BCMSDIO) || defined(BCMPCIE)
+		ret = dhd_net_bus_devreset(dev, TRUE);
+#ifdef BCMSDIO
+		dhd_net_bus_suspend(dev);
+#endif
+#endif /* BCMSDIO || BCMPCIE */
+		dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		g_wifi_on = FALSE;
+	}
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+	if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+		return -1;
+	return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
+}
+
+
+static int
+wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
+{
+	uchar pmk[33];
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+#ifdef OKC_DEBUG
+	int i = 0;
+#endif
+
+	bzero(pmk, sizeof(pmk));
+	memcpy((char *)pmk, command + strlen("SET_PMK "), 32);
+	error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		DHD_ERROR(("Failed to set PMK for OKC, error = %d\n", error));
+	}
+#ifdef OKC_DEBUG
+	DHD_ERROR(("PMK is "));
+	for (i = 0; i < 32; i++)
+		DHD_ERROR(("%02X ", pmk[i]));
+
+	DHD_ERROR(("\n"));
+#endif
+	return error;
+}
+
+static int
+wl_android_okc_enable(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char okc_enable = 0;
+
+	okc_enable = command[strlen(CMD_OKC_ENABLE) + 1] - '0';
+	error = wldev_iovar_setint(dev, "okc_enable", okc_enable);
+	if (error) {
+		DHD_ERROR(("Failed to %s OKC, error = %d\n",
+			okc_enable ? "enable" : "disable", error));
+	}
+
+	wldev_iovar_setint(dev, "ccx_enable", 0);
+
+	return error;
+}
+
+
+
+int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int mode = 0;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	error = wldev_iovar_setint(dev, "roam_off", mode);
+	if (error) {
+		DHD_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+		return -1;
+	}
+	else
+		DHD_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+	return 0;
+}
+
+int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len)
+{
+	char ie_buf[VNDR_IE_MAX_LEN];
+	char *ioctl_buf = NULL;
+	char hex[] = "XX";
+	char *pcmd = NULL;
+	int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+	vndr_ie_setbuf_t *vndr_ie = NULL;
+	s32 iecount;
+	uint32 pktflag;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	s32 err = BCME_OK;
+
+	/* Check the VSIE (Vendor Specific IE) which was added.
+	 *  If exist then send IOVAR to delete it
+	 */
+	if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) {
+		return -EINVAL;
+	}
+
+	pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1;
+	for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx] =  (uint8)simple_strtoul(hex, NULL, 16);
+	}
+	pcmd++;
+	while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx++] =  (uint8)simple_strtoul(hex, NULL, 16);
+		datalen++;
+	}
+	tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1);
+	vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+	if (!vndr_ie) {
+		WL_ERR(("IE memory alloc failed\n"));
+		return -ENOMEM;
+	}
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Set packet flag to indicate that BEACON's will contain this IE */
+	pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+	/* Set the IE ID */
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+		DOT11_OUI_LEN);
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+		&ie_buf[DOT11_OUI_LEN], datalen);
+
+	ielen = DOT11_OUI_LEN + datalen;
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+	ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+	if (!ioctl_buf) {
+		WL_ERR(("ioctl memory alloc failed\n"));
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+		return -ENOMEM;
+	}
+	memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);	/* init the buffer */
+	err = wldev_iovar_setbuf(dev, "ie", vndr_ie, tot_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+
+
+	if (err != BCME_OK) {
+		err = -EINVAL;
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+	}
+	else {
+		/* do NOT free 'vndr_ie' for the next process */
+		wl_cfg80211_ibss_vsie_set_buffer(vndr_ie, tot_len);
+	}
+
+	if (ioctl_buf) {
+		kfree(ioctl_buf);
+	}
+
+	return err;
+}
+
+#if defined(BCMFW_ROAM_ENABLE)
+static int
+wl_android_set_roampref(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+	uint8 buf[MAX_BUF_SIZE];
+	uint8 *pref = buf;
+	char *pcmd;
+	int num_ucipher_suites = 0;
+	int num_akm_suites = 0;
+	wpa_suite_t ucipher_suites[MAX_NUM_SUITES];
+	wpa_suite_t akm_suites[MAX_NUM_SUITES];
+	int num_tuples = 0;
+	int total_bytes = 0;
+	int total_len_left;
+	int i, j;
+	char hex[] = "XX";
+
+	pcmd = command + strlen(CMD_SET_ROAMPREF) + 1;
+	total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1;
+
+	num_akm_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of AKM suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	/* check to make sure pcmd does not overrun */
+	if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	memset(buf, 0, sizeof(buf));
+	memset(akm_suites, 0, sizeof(akm_suites));
+	memset(ucipher_suites, 0, sizeof(ucipher_suites));
+
+	/* Save the AKM suites passed in the command */
+	for (i = 0; i < num_akm_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32));
+	}
+
+	total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE);
+	num_ucipher_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of cipher suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	/* Save the cipher suites passed in the command */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32));
+	}
+
+	/* Join preference for RSSI
+	 * Type	  : 1 byte (0x01)
+	 * Length : 1 byte (0x02)
+	 * Value  : 2 bytes	(reserved)
+	 */
+	*pref++ = WL_JOIN_PREF_RSSI;
+	*pref++ = JOIN_PREF_RSSI_LEN;
+	*pref++ = 0;
+	*pref++ = 0;
+
+	/* Join preference for WPA
+	 * Type	  : 1 byte (0x02)
+	 * Length : 1 byte (not used)
+	 * Value  : (variable length)
+	 *		reserved: 1 byte
+	 *      count	: 1 byte (no of tuples)
+	 *		Tuple1	: 12 bytes
+	 *			akm[4]
+	 *			ucipher[4]
+	 *			mcipher[4]
+	 *		Tuple2	: 12 bytes
+	 *		Tuplen	: 12 bytes
+	 */
+	num_tuples = num_akm_suites * num_ucipher_suites;
+	if (num_tuples != 0) {
+		if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) {
+			*pref++ = WL_JOIN_PREF_WPA;
+			*pref++ = 0;
+			*pref++ = 0;
+			*pref++ = (uint8)num_tuples;
+			total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
+				(JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
+		} else {
+			DHD_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+			return -1;
+		}
+	} else {
+		/* No WPA config, configure only RSSI preference */
+		total_bytes = JOIN_PREF_RSSI_SIZE;
+	}
+
+	/* akm-ucipher-mcipher tuples in the format required for join_pref */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		for (j = 0; j < num_akm_suites; j++) {
+			memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			/* Set to 0 to match any available multicast cipher */
+			memset(pref, 0, WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+		}
+	}
+
+	prhex("join pref", (uint8 *)buf, total_bytes);
+	error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		DHD_ERROR(("Failed to set join_pref, error = %d\n", error));
+	}
+	return error;
+}
+#endif /* defined(BCMFW_ROAM_ENABLE */
+
+static int
+wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config)
+{
+	struct io_cfg *resume_cfg;
+	s32 ret;
+
+	resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL);
+	if (!resume_cfg)
+		return -ENOMEM;
+
+	if (config->iovar) {
+		ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to get current %s value\n",
+				__FUNCTION__, config->iovar));
+			goto error;
+		}
+
+		ret = wldev_iovar_setint(dev, config->iovar, config->param);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+
+		resume_cfg->iovar = config->iovar;
+	} else {
+		resume_cfg->arg = kzalloc(config->len, GFP_KERNEL);
+		if (!resume_cfg->arg) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		ret = wldev_ioctl(dev, config->ioctl, resume_cfg->arg, config->len, false);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__,
+				config->ioctl));
+			goto error;
+		}
+		ret = wldev_ioctl(dev, config->ioctl + 1, config->arg, config->len, true);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+		if (config->ioctl + 1 == WLC_SET_PM)
+			wl_cfg80211_update_power_mode(dev);
+		resume_cfg->ioctl = config->ioctl;
+		resume_cfg->len = config->len;
+	}
+
+	list_add(&resume_cfg->list, head);
+
+	return 0;
+error:
+	kfree(resume_cfg->arg);
+	kfree(resume_cfg);
+	return ret;
+}
+
+static void
+wl_android_iolist_resume(struct net_device *dev, struct list_head *head)
+{
+	struct io_cfg *config;
+	struct list_head *cur, *q;
+	s32 ret = 0;
+
+	list_for_each_safe(cur, q, head) {
+		config = list_entry(cur, struct io_cfg, list);
+		if (config->iovar) {
+			if (!ret)
+				ret = wldev_iovar_setint(dev, config->iovar,
+					config->param);
+		} else {
+			if (!ret)
+				ret = wldev_ioctl(dev, config->ioctl + 1,
+					config->arg, config->len, true);
+			if (config->ioctl + 1 == WLC_SET_PM)
+				wl_cfg80211_update_power_mode(dev);
+			kfree(config->arg);
+		}
+		list_del(cur);
+		kfree(config);
+	}
+}
+
+static int
+wl_android_set_miracast(struct net_device *dev, char *command, int total_len)
+{
+	int mode, val;
+	int ret = 0;
+	struct io_cfg config;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	DHD_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
+
+	if (miracast_cur_mode == mode)
+		return 0;
+
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	miracast_cur_mode = MIRACAST_MODE_OFF;
+
+	switch (mode) {
+	case MIRACAST_MODE_SOURCE:
+		/* setting mchan_algo to platform specific value */
+		config.iovar = "mchan_algo";
+
+		ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false);
+		if (!ret && val > 100) {
+			config.param = 0;
+			DHD_ERROR(("%s: Connected station's beacon interval: "
+				"%d and set mchan_algo to %d \n",
+				__FUNCTION__, val, config.param));
+		}
+		else {
+			config.param = MIRACAST_MCHAN_ALGO;
+		}
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		/* setting mchan_bw to platform specific value */
+		config.iovar = "mchan_bw";
+		config.param = MIRACAST_MCHAN_BW;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		/* setting apmdu to platform specific value */
+		config.iovar = "ampdu_mpdu";
+		config.param = MIRACAST_AMPDU_SIZE;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+		/* FALLTROUGH */
+		/* Source mode shares most configurations with sink mode.
+		 * Fall through here to avoid code duplication
+		 */
+	case MIRACAST_MODE_SINK:
+		/* disable internal roaming */
+		config.iovar = "roam_off";
+		config.param = 1;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+		/* tunr off pm */
+		val = 0;
+		config.iovar = NULL;
+		config.ioctl = WLC_GET_PM;
+		config.arg = &val;
+		config.len = sizeof(int);
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		break;
+	case MIRACAST_MODE_OFF:
+	default:
+		break;
+	}
+	miracast_cur_mode = mode;
+
+	return 0;
+
+resume:
+	DHD_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret));
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	return ret;
+}
+
+
+int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
+{
+	char 				buf[256];
+	const char 			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt;
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res 				= -1;
+	uint period_msec = 0;
+
+	if (extra == NULL)
+	{
+		 DHD_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+		 return -1;
+	}
+	if (sscanf(extra, "%d", &period_msec) != 1)
+	{
+		 DHD_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+		 return -EINVAL;
+	}
+	DHD_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+
+	memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[ str_len ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = period_msec;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0)
+	{
+		DHD_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res));
+	}
+	else
+	{
+		DHD_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res));
+	}
+
+	return res;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+#define PRIVATE_COMMAND_MAX_LEN	8192
+	int ret = 0;
+	char *command = NULL;
+	int bytes_written = 0;
+	android_wifi_priv_cmd priv_cmd;
+
+	net_os_wake_lock(net);
+
+	if (!ifr->ifr_data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		compat_android_wifi_priv_cmd compat_priv_cmd;
+		if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+			sizeof(compat_android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+
+		}
+		priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+		priv_cmd.used_len = compat_priv_cmd.used_len;
+		priv_cmd.total_len = compat_priv_cmd.total_len;
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+		}
+	}
+	if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+		DHD_ERROR(("%s: too long priavte command\n", __FUNCTION__));
+		ret = -EINVAL;
+		goto exit;
+	}
+	command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL);
+	if (!command)
+	{
+		DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+	command[priv_cmd.total_len] = '\0';
+
+	DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+	if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+		DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+		bytes_written = wl_android_wifi_on(net);
+	}
+	else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+		bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+	}
+
+	if (!g_wifi_on) {
+		DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+			__FUNCTION__, command, ifr->ifr_name));
+		ret = 0;
+		goto exit;
+	}
+
+	if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+		bytes_written = wl_android_wifi_off(net, FALSE);
+	}
+	else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+		/* TBD: SCAN-ACTIVE */
+	}
+	else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+		/* TBD: SCAN-PASSIVE */
+	}
+	else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+		bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+		bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+	}
+#ifdef PKT_FILTER_SUPPORT
+	else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 1);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 0);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+	}
+#endif /* PKT_FILTER_SUPPORT */
+	else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+		/* TBD: BTCOEXSCAN-START */
+	}
+	else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+		/* TBD: BTCOEXSCAN-STOP */
+	}
+	else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+#ifdef WL_CFG80211
+		void *dhdp = wl_cfg80211_get_dhdp();
+		bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command);
+#else
+#ifdef PKT_FILTER_SUPPORT
+		uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+		if (mode == 1)
+			net_os_enable_packet_filter(net, 0); /* DHCP starts */
+		else
+			net_os_enable_packet_filter(net, 1); /* DHCP ends */
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* WL_CFG80211 */
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+		bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+		bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+		uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+		bytes_written = wldev_set_band(net, band);
+	}
+	else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+		bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+	/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
+	else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+		char *country_code = command + strlen(CMD_COUNTRY) + 1;
+		bytes_written = wldev_set_country(net, country_code, true, true);
+	}
+#endif /* WL_CFG80211 */
+
+
+#ifdef PNO_SUPPORT
+	else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+		bytes_written = dhd_dev_pno_stop_for_ssid(net);
+	}
+#ifndef WL_SCHED_SCAN
+	else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+		bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+	}
+#endif /* !WL_SCHED_SCAN */
+	else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+		int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+		bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net);
+	}
+	else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) {
+		bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len);
+	}
+#endif /* PNO_SUPPORT */
+	else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+		bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+		int skip = strlen(CMD_P2P_SET_NOA) + 1;
+		bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#if !defined WL_ENABLE_P2P_IF
+	else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
+		bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+		int skip = strlen(CMD_P2P_SET_PS) + 1;
+		bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+		strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+		int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+		bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+			priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+	}
+#endif /* WL_CFG80211 */
+	else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0)
+		bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_OKC_ENABLE, strlen(CMD_OKC_ENABLE)) == 0)
+		bytes_written = wl_android_okc_enable(net, command, priv_cmd.total_len);
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+	else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
+		strlen(CMD_GET_BEST_CHANNELS)) == 0) {
+		bytes_written = wl_cfg80211_get_best_channels(net, command,
+			priv_cmd.total_len);
+	}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+	else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
+		int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
+		wl_android_set_mac_address_filter(net, (const char*)command+skip);
+	}
+	else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
+		bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len);
+#if defined(BCMFW_ROAM_ENABLE)
+	else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
+		bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
+	}
+#endif /* BCMFW_ROAM_ENABLE */
+	else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
+		bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
+		bytes_written = wl_android_set_ibss_beacon_ouidata(net,
+		command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+		int skip = strlen(CMD_KEEP_ALIVE) + 1;
+		bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
+	}
+	else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
+		int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
+		bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
+	}
+	else {
+		DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+		snprintf(command, 3, "OK");
+		bytes_written = strlen("OK");
+	}
+
+	if (bytes_written >= 0) {
+		if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+			command[0] = '\0';
+		if (bytes_written >= priv_cmd.total_len) {
+			DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
+			bytes_written = priv_cmd.total_len;
+		} else {
+			bytes_written++;
+		}
+		priv_cmd.used_len = bytes_written;
+		if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+			DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+			ret = -EFAULT;
+		}
+	}
+	else {
+		ret = bytes_written;
+	}
+
+exit:
+	net_os_wake_unlock(net);
+	if (command) {
+		kfree(command);
+	}
+
+	return ret;
+}
+
+int wl_android_init(void)
+{
+	int ret = 0;
+
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+	dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#if defined(CUSTOMER_HW2)
+	if (!iface_name[0]) {
+		memset(iface_name, 0, IFNAMSIZ);
+		bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+	}
+#endif
+
+
+	return ret;
+}
+
+int wl_android_exit(void)
+{
+	int ret = 0;
+	struct io_cfg *cur, *q;
+
+
+	list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+		list_del(&cur->list);
+		kfree(cur);
+	}
+
+	return ret;
+}
+
+void wl_android_post_init(void)
+{
+
+#ifdef ENABLE_4335BT_WAR
+	bcm_bt_unlock(lock_cookie_wifi);
+	printk("%s: btlock released\n", __FUNCTION__);
+#endif /* ENABLE_4335BT_WAR */
+
+	if (!dhd_download_fw_on_driverload)
+		g_wifi_on = FALSE;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 0000000..2827132
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,71 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.h 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
+ * automatically
+ */
+#if defined(BT_WIFI_HANDOVER)
+#define WL_GENL
+#endif
+
+
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev, bool on_failure);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+
+/* hostap mac mode */
+#define MACLIST_MODE_DISABLED   0
+#define MACLIST_MODE_DENY       1
+#define MACLIST_MODE_ALLOW      2
+
+/* max number of assoc list */
+#define MAX_NUM_OF_ASSOCLIST    64
+
+/* max number of mac filter list
+ * restrict max number to 10 as maximum cmd string size is 255
+ */
+#define MAX_NUM_MAC_FILT        10
+
+int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 0000000..079794e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,12892 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c 477711 2014-05-14 08:45:17Z $
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+#include <wl_cfgvendor.h>
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+#ifdef WL11U
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+#error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \
+	according to Kernel version and is supported only in Android-JB
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+
+static struct device *cfg80211_parent_dev = NULL;
+/* g_bcm_cfg should be static. Do not change */
+static struct bcm_cfg80211 *g_bcm_cfg = NULL;
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define MAX_WAIT_TIME 1500
+
+#define CHAN_INFO_LEN 128
+#define IBSS_IF_NAME "ibss%d"
+
+#ifdef VSDB
+/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */
+#define DEFAULT_SLEEP_TIME_VSDB		120
+#define OFF_CHAN_TIME_THRESHOLD_MS	200
+#define AF_RETRY_DELAY_TIME			40
+
+/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)	\
+	do {	\
+		if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||	\
+			wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {	\
+			OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB);			\
+		}	\
+	} while (0)
+#else /* VSDB */
+/* if not VSDB, do nothing */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
+#endif /* VSDB */
+
+#ifdef WL_CFG80211_SYNC_GON
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+	(wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
+#else
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
+#endif /* WL_CFG80211_SYNC_GON */
+#define WL_IS_P2P_DEV_EVENT(e) ((e->emsg.ifidx == 0) && \
+		(e->emsg.bsscfgidx == P2PAPI_BSSCFG_DEVICE))
+
+#define DNGL_FUNC(func, parameters) func parameters
+#define COEX_DHCP
+
+#define WLAN_EID_SSID	0
+#define CH_MIN_5G_CHANNEL 34
+#define CH_MIN_2G_CHANNEL 1
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcm_regdom = {
+	.n_reg_rules = 4,
+	.alpha2 =  "99",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..11 */
+		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		/* If any */
+		/* IEEE 802.11 channel 14 - Only JP enables
+		 * this and for 802.11b only
+		 */
+		REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+		/* IEEE 802.11a, channel 36..64 */
+		REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+		/* IEEE 802.11a, channel 100..165 */
+		REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+/*
+ * Possible interface combinations supported by driver
+ *
+ * ADHOC Mode     - #ADHOC <= 1 on channels = 1
+ * SoftAP Mode    - #AP <= 1 on channels = 1
+ * STA + P2P Mode - #STA <= 2, #{P2P-GO, P2P-client} <= 1, #P2P-device <= 1
+ *                  on channels = 2
+ */
+static const struct ieee80211_iface_limit common_if_limits[] = {
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+	/*
+	 * During P2P-GO removal, P2P-GO is first changed to STA and later only
+	 * removed. So setting maximum possible number of STA interfaces according
+	 * to kernel version.
+	 *
+	 * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
+	 * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x)
+	 */
+#ifdef WL_ENABLE_P2P_IF
+	.max = 3,
+#else
+	.max = 2,
+#endif /* WL_ENABLE_P2P_IF */
+	.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+	.max = 2,
+	.types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+#ifdef BCM4330_CHIP
+#define NUM_DIFF_CHANNELS 1
+#else
+#define NUM_DIFF_CHANNELS 2
+#endif
+static const struct ieee80211_iface_combination
+common_iface_combinations[] = {
+	{
+	.num_different_channels = NUM_DIFF_CHANNELS,
+	.max_interfaces = 4,
+	.limits = common_if_limits,
+	.n_limits = ARRAY_SIZE(common_if_limits),
+	},
+};
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+#define PM_BLOCK 1
+#define PM_ENABLE 0
+
+#ifdef MFP
+#define WL_AKM_SUITE_MFP_1X  0x000FAC05
+#define WL_AKM_SUITE_MFP_PSK 0x000FAC06
+#endif /* MFP */
+
+
+#ifndef IBSS_COALESCE_ALLOWED
+#define IBSS_COALESCE_ALLOWED 0
+#endif
+
+#ifndef IBSS_INITIAL_SCAN_ALLOWED
+#define IBSS_INITIAL_SCAN_ALLOWED 0
+#endif
+
+#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
+static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+	struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac,
+	struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+	struct net_device *dev, bool enabled,
+	s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm);
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm);
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	void *cookie, void (*callback) (void *cookie,
+	struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev,	u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie);
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+	struct net_device *ndev, u8* mac_addr);
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac, struct station_parameters *params);
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+	struct net_device *dev);
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, bool aborted, bool fw_abort);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper);
+#endif
+#ifdef WL_SCHED_SCAN
+static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype
+		 iface_type, u8 *mac_addr, const char *name);
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct bcm_cfg80211 *cfg);
+static void wl_flush_eq(struct bcm_cfg80211 *cfg);
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg);
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags);
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg);
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg);
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
+static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
+	const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#ifdef BT_WIFI_HANDOVER
+static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef WL_SCHED_SCAN
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WL_SCHED_SCAN */
+#ifdef PNO_SUPPORT
+static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set);
+
+#ifdef WLTDLS
+static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * cfg profile utilities
+ */
+static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, s32 item);
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+	struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct bcm_cfg80211 *cfg);
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam);
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa);
+#endif
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len);
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len);
+#endif /* WL11U */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data);
+static void wl_free_wdev(struct bcm_cfg80211 *cfg);
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int
+wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+s32 wl_cfg80211_channel_to_freq(u32 channel);
+
+
+static void wl_cfg80211_work_handler(struct work_struct *work);
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr,
+	struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * bcm_cfg80211 memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg);
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
+	struct net_device *ndev);
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static void wl_link_up(struct bcm_cfg80211 *cfg);
+static void wl_link_down(struct bcm_cfg80211 *cfg);
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev);
+
+int wl_cfg80211_get_ioctl_version(void);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+#ifdef DEBUGFS_CFG80211
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
+#endif
+
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel,
+	int nprobes, int *out_params_size);
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+
+#ifdef WL_CFG80211_ACL
+/* ACL */
+static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+	const struct cfg80211_acl_data *acl);
+#endif /* WL_CFG80211_ACL */
+
+static void wl_send_event(struct net_device *dev, uint32 event_type, uint32 status, uint32 reason);
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+#ifdef ROAM_CHANNEL_CACHE
+void init_roam(int ioctl_ver);
+void reset_roam_cache(void);
+void add_roam_cache(wl_bss_info_t *bi);
+int  get_roam_channel_list(int target_chan,
+	chanspec_t *channels, const wlc_ssid_t *ssid, int ioctl_ver);
+void print_roam_cache(void);
+void set_roam_band(int band);
+void update_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver);
+#define MAX_ROAM_CACHE_NUM 100
+#endif /* ROAM_CHANNEL_CACHE */
+
+static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid);
+
+static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
+	WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
+
+#define RETURN_EIO_IF_NOT_UP(wlpriv)						\
+do {									\
+	struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv);       	\
+	if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) {	\
+		WL_INFORM(("device is not ready\n"));			\
+		return -EIO;						\
+	}								\
+} while (0)
+
+#ifdef RSSI_OFFSET
+static s32 wl_rssi_offset(s32 rssi)
+{
+	rssi += RSSI_OFFSET;
+	if (rssi > 0)
+		rssi = 0;
+	return rssi;
+}
+#else
+#define wl_rssi_offset(x)	x
+#endif
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || 			\
+				 (akm) == RSN_AKM_UNSPECIFIED || 	\
+				 (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#ifdef PROP_TXSTATUS_VSDB
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX	50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+	"SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+	"DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+	"REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+	"BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+	"TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+	"EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+	"BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+	"PFN_NET_LOST",
+	"RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+	"IBSS_ASSOC",
+	"RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+	"PROBREQ_MSG",
+	"SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+	"EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+	"UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+	"WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+	"RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+	"ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+	"WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+	"WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+	"WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+	"WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+	"WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif				/* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) {			\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel, _flags) {				\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+	{								\
+		.bitrate	= RATE_TO_BASE100KBPS(_rateid),     \
+		.hw_value	= (_rateid),			    \
+		.flags	  = (_flags),			     \
+	}
+
+static struct ieee80211_rate __wl_rates[] = {
+	RATETAB_ENT(DOT11_RATE_1M, 0),
+	RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_6M, 0),
+	RATETAB_ENT(DOT11_RATE_9M, 0),
+	RATETAB_ENT(DOT11_RATE_12M, 0),
+	RATETAB_ENT(DOT11_RATE_18M, 0),
+	RATETAB_ENT(DOT11_RATE_24M, 0),
+	RATETAB_ENT(DOT11_RATE_36M, 0),
+	RATETAB_ENT(DOT11_RATE_48M, 0),
+	RATETAB_ENT(DOT11_RATE_54M, 0)
+};
+
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size	8
+#define wl_g_rates		(__wl_rates + 0)
+#define wl_g_rates_size	12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412, 0),
+	CHAN2G(2, 2417, 0),
+	CHAN2G(3, 2422, 0),
+	CHAN2G(4, 2427, 0),
+	CHAN2G(5, 2432, 0),
+	CHAN2G(6, 2437, 0),
+	CHAN2G(7, 2442, 0),
+	CHAN2G(8, 2447, 0),
+	CHAN2G(9, 2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+	CHAN5G(34, 0), CHAN5G(36, 0),
+	CHAN5G(38, 0), CHAN5G(40, 0),
+	CHAN5G(42, 0), CHAN5G(44, 0),
+	CHAN5G(46, 0), CHAN5G(48, 0),
+	CHAN5G(52, 0), CHAN5G(56, 0),
+	CHAN5G(60, 0), CHAN5G(64, 0),
+	CHAN5G(100, 0), CHAN5G(104, 0),
+	CHAN5G(108, 0), CHAN5G(112, 0),
+	CHAN5G(116, 0), CHAN5G(120, 0),
+	CHAN5G(124, 0), CHAN5G(128, 0),
+	CHAN5G(132, 0), CHAN5G(136, 0),
+	CHAN5G(140, 0), CHAN5G(144, 0),
+	CHAN5G(149, 0),	CHAN5G(153, 0),
+	CHAN5G(157, 0),	CHAN5G(161, 0),
+	CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+	.channels = __wl_2ghz_channels,
+	.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+	.bitrates = wl_g_rates,
+	.n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+	.band = IEEE80211_BAND_5GHZ,
+	.channels = __wl_5ghz_a_channels,
+	.n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+	WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * The firmware code required for this feature to work is currently under
+ * BCMINTERNAL flag. In future if this is to enabled we need to bring the
+ * required firmware code out of the BCMINTERNAL flag.
+ */
+struct wl_dump_survey {
+	u32 obss;
+	u32 ibss;
+	u32 no_ctg;
+	u32 no_pckt;
+	u32 tx;
+	u32 idle;
+};
+#endif /* WL_SUPPORT_ACS */
+
+
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+static int maxrxpktglom = 0;
+#endif
+
+/* IOCtl version read from targeted driver */
+static int ioctl_version;
+#ifdef DEBUGFS_CFG80211
+#define S_SUBLOGLEVEL 20
+static const struct {
+	u32 log_level;
+	char *sublogname;
+} sublogname_map[] = {
+	{WL_DBG_ERR, "ERR"},
+	{WL_DBG_INFO, "INFO"},
+	{WL_DBG_DBG, "DBG"},
+	{WL_DBG_SCAN, "SCAN"},
+	{WL_DBG_TRACE, "TRACE"},
+	{WL_DBG_P2P_ACTION, "P2PACTION"}
+};
+#endif
+
+
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, bool add_remove,
+	enum wl_handler_del_type type)
+{
+	if (cfg == NULL)
+		return;
+
+	if (cfg->pm_enable_work_on) {
+		if (add_remove) {
+			schedule_delayed_work(&cfg->pm_enable_work,
+				msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+		} else {
+			cancel_delayed_work_sync(&cfg->pm_enable_work);
+			switch (type) {
+				case WL_HANDLER_MAINTAIN:
+					schedule_delayed_work(&cfg->pm_enable_work,
+						msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+					break;
+				case WL_HANDLER_PEND:
+					schedule_delayed_work(&cfg->pm_enable_work,
+						msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT*2));
+					break;
+				case WL_HANDLER_DEL:
+				default:
+					cfg->pm_enable_work_on = false;
+					break;
+			}
+		}
+	}
+}
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+	chanspec_t chspec;
+
+	/* get the channel number */
+	chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+	/* convert the band */
+	if (LCHSPEC_IS2G(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BAND_2G;
+	} else {
+		chspec |= WL_CHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (LCHSPEC_IS20(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BW_20;
+	} else {
+		chspec |= WL_CHANSPEC_BW_40;
+		if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+			chspec |= WL_CHANSPEC_CTL_SB_L;
+		} else {
+			chspec |= WL_CHANSPEC_CTL_SB_U;
+		}
+	}
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+	chanspec_t lchspec;
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	/* get the channel number */
+	lchspec = CHSPEC_CHANNEL(chspec);
+
+	/* convert the band */
+	if (CHSPEC_IS2G(chspec)) {
+		lchspec |= WL_LCHANSPEC_BAND_2G;
+	} else {
+		lchspec |= WL_LCHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (CHSPEC_IS20(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_20;
+		lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+	} else if (CHSPEC_IS40(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_40;
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+			lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+		} else {
+			lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+		}
+	} else {
+		/* cannot express the bandwidth */
+		char chanbuf[CHANSPEC_STR_LEN];
+		WL_ERR((
+		        "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+		        "to pre-11ac format\n",
+		        wf_chspec_ntoa(chspec, chanbuf), chspec));
+		return INVCHANSPEC;
+	}
+
+	return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_to_legacy(chanspec);
+		if (chanspec == INVCHANSPEC) {
+			return chanspec;
+		}
+	}
+	chanspec = htodchanspec(chanspec);
+
+	return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(u16 channel)
+{
+
+	chanspec_t chanspec;
+
+	chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		chanspec |= WL_CHANSPEC_BAND_2G;
+	else
+		chanspec |= WL_CHANSPEC_BAND_5G;
+
+	chanspec |= WL_CHANSPEC_BW_20;
+	chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+	return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+	chanspec = dtohchanspec(chanspec);
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_from_legacy(chanspec);
+	}
+
+	return chanspec;
+}
+
+/*
+ * convert ASCII string to MAC address (colon-delimited format)
+ * eg: 00:11:22:33:44:55
+ */
+int
+wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n)
+{
+	char *c = NULL;
+	int count = 0;
+
+	memset(n, 0, ETHER_ADDR_LEN);
+	for (;;) {
+		n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
+		if (!*c++ || count == ETHER_ADDR_LEN)
+			break;
+		a = c;
+	}
+	return (count == ETHER_ADDR_LEN);
+}
+
+/* convert hex string buffer to binary */
+int
+wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str)
+{
+	int count, slen;
+	int hvalue;
+	char tmp[3] = {0};
+	char *ptr = str, *endp = NULL;
+
+	if (!data || !str || !dlen) {
+		WL_DBG((" passed buffer is empty \n"));
+		return 0;
+	}
+
+	slen = strlen(str);
+	if (dlen * 2 < slen) {
+		WL_DBG((" destination buffer too short \n"));
+		return 0;
+	}
+
+	if (slen % 2) {
+		WL_DBG((" source buffer is of odd length \n"));
+		return 0;
+	}
+
+	for (count = 0; count < slen; count += 2) {
+		memcpy(tmp, ptr, 2);
+		hvalue = simple_strtol(tmp, &endp, 16);
+		if (*endp != '\0') {
+			WL_DBG((" non hexadecimal character encountered \n"));
+			return 0;
+		}
+		*data++ = (unsigned char)hvalue;
+		ptr += 2;
+	}
+
+	return (slen / 2);
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+	[NL80211_IFTYPE_ADHOC] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_AP] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_AP_VLAN] = {
+		/* copy AP */
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
+static void
+wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc)
+{
+	#define WPS_IE_FIXED_LEN 6
+	u16 len;
+	u8 *subel = NULL;
+	u16 subelt_id;
+	u16 subelt_len;
+	u16 val;
+	u8 *valptr = (uint8*) &val;
+	if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) {
+		WL_ERR(("invalid argument : NULL\n"));
+		return;
+	}
+	len = (u16)wps_ie[TLV_LEN_OFF];
+
+	if (len > wps_ie_len) {
+		WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len));
+		return;
+	}
+	WL_DBG(("wps_ie len=%d\n", len));
+	len -= 4;	/* for the WPS IE's OUI, oui_type fields */
+	subel = wps_ie + WPS_IE_FIXED_LEN;
+	while (len >= 4) {		/* must have attr id, attr len fields */
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_id = HTON16(val);
+
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_len = HTON16(val);
+
+		len -= 4;			/* for the attr id, attr len fields */
+
+		if (len < subelt_len) {
+			WL_ERR(("not enough data, len %d, subelt_len %d\n", len,
+				subelt_len));
+			break;
+		}
+		len -= subelt_len;	/* for the remaining fields in this attribute */
+
+		WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+			subel, subelt_id, subelt_len));
+
+		if (subelt_id == WPS_ID_VERSION) {
+			WL_DBG(("  attr WPS_ID_VERSION: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_REQ_TYPE) {
+			WL_DBG(("  attr WPS_ID_REQ_TYPE: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_DEVICE_NAME) {
+			char devname[100];
+			size_t namelen = MIN(subelt_len, sizeof(devname));
+			if (namelen) {
+				memcpy(devname, subel, namelen);
+				devname[namelen - 1] = '\0';
+				WL_DBG(("  attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+					devname, subelt_len));
+			}
+		} else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+			*pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+		} else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+				": cat=%u\n", HTON16(val)));
+		} else {
+			WL_DBG(("  unknown attr 0x%x\n", subelt_id));
+		}
+
+		subel += subelt_len;
+	}
+}
+
+s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm)
+{
+	s32 err = 0;
+	s32 disable = 0;
+	s32 txpwrqdbm;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = WL_RADIO_SW_DISABLE << 16;
+	disable = htod32(disable);
+	err = wldev_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+		return err;
+	}
+
+	if (dbm > 0xffff)
+		dbm = 0xffff;
+	txpwrqdbm = dbm * 4;
+	err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
+		sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (unlikely(err))
+		WL_ERR(("qtxpower error (%d)\n", err));
+	else
+		WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm));
+
+	return err;
+}
+
+s32 wl_get_tx_power(struct net_device *dev, s32 *dbm)
+{
+	s32 err = 0;
+	s32 txpwrdbm;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm));
+	txpwrdbm = dtoh32(txpwrdbm);
+	*dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+	WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+
+	return err;
+}
+
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+	chanspec_t chspec;
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr bssid;
+	struct wl_bss_info *bss = NULL;
+
+	if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) {
+		/* STA interface is not associated. So start the new interface on a temp
+		 * channel . Later proper channel will be applied by the above framework
+		 * via set_channel (cfg80211 API).
+		 */
+		WL_DBG(("Not associated. Return a temp channel. \n"));
+		return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+	}
+
+
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+	if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf,
+		WL_EXTRA_BUF_MAX, false))) {
+			WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+			chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+	}
+	else {
+			bss = (struct wl_bss_info *) (cfg->extra_buf + 4);
+			chspec =  bss->chanspec;
+
+			WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
+	}
+	return chspec;
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_monitor_if(char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+	WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+	return ERR_PTR(-EOPNOTSUPP);
+#else
+	struct net_device* ndev = NULL;
+
+	dhd_add_monitor(name, &ndev);
+	WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+	return ndev_to_cfgdev(ndev);
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	const char *name,
+#else
+	char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 err;
+	s32 timeout = -1;
+	s32 wlif_type = -1;
+	s32 mode = 0;
+	s32 val = 0;
+	s32 dhd_mode = 0;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *primary_ndev;
+	struct net_device *new_ndev;
+	struct ether_addr primary_mac;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	s32 up = 1;
+	dhd_pub_t *dhd;
+	bool enabled;
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	dhd = (dhd_pub_t *)(cfg->pub);
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+
+	/* Use primary I/F for sending cmds down to firmware */
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+		WL_ERR(("device is not ready\n"));
+		return ERR_PTR(-ENODEV);
+	}
+
+	WL_DBG(("if name: %s, type: %d\n", name, type));
+	switch (type) {
+	case NL80211_IFTYPE_ADHOC:
+		return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		WL_ERR(("Unsupported interface type\n"));
+		mode = WL_MODE_IBSS;
+		return NULL;
+	case NL80211_IFTYPE_MONITOR:
+		return wl_cfg80211_add_monitor_if((char *)name);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return wl_cfgp2p_add_p2p_disc_if(cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	case NL80211_IFTYPE_STATION:
+#ifdef DUAL_STA
+		if (cfg->ibss_cfgdev) {
+			WL_ERR(("AIBSS is already operational. "
+					" AIBSS & DUALSTA can't be used together \n"));
+			return NULL;
+		}
+		if (!name) {
+			WL_ERR(("Interface name not provided \n"));
+			return NULL;
+		}
+		return wl_cfg80211_create_iface(cfg->wdev->wiphy,
+			NL80211_IFTYPE_STATION, NULL, name);
+#endif /* DUAL_STA */
+	case NL80211_IFTYPE_P2P_CLIENT:
+		wlif_type = WL_P2P_IF_CLIENT;
+		mode = WL_MODE_BSS;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_AP:
+		wlif_type = WL_P2P_IF_GO;
+		mode = WL_MODE_AP;
+		break;
+	default:
+		WL_ERR(("Unsupported interface type\n"));
+		return NULL;
+		break;
+	}
+
+	if (!name) {
+		WL_ERR(("name is NULL\n"));
+		return NULL;
+	}
+	if (cfg->p2p_supported && (wlif_type != -1)) {
+		ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (!dhd)
+			return ERR_PTR(-ENODEV);
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+		if (!cfg->p2p)
+			return ERR_PTR(-ENODEV);
+
+		if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+			p2p_on(cfg) = true;
+			wl_cfgp2p_set_firm_p2p(cfg);
+			wl_cfgp2p_init_discovery(cfg);
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(&primary_mac,
+				&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+		}
+
+		memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
+		strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+
+		wl_cfg80211_scan_abort(cfg);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (!cfg->wlfc_on && !disable_proptx) {
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_init(dhd);
+				err = wldev_ioctl(primary_ndev, WLC_UP, &up, sizeof(s32), true);
+				if (err < 0)
+					WL_ERR(("WLC_UP return err:%d\n", err));
+			}
+			cfg->wlfc_on = true;
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+		/* In concurrency case, STA may be already associated in a particular channel.
+		 * so retrieve the current channel of primary interface and then start the virtual
+		 * interface on that.
+		 */
+		 chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+		/* For P2P mode, use P2P-specific driver features to create the
+		 * bss: "cfg p2p_ifadd"
+		 */
+		wl_set_p2p_status(cfg, IF_ADDING);
+		memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+		if (wlif_type == WL_P2P_IF_GO)
+			wldev_iovar_setint(primary_ndev, "mpc", 0);
+		err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+		if (unlikely(err)) {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual iface add failed (%d) \n", err));
+			return ERR_PTR(-ENOMEM);
+		}
+
+		timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+			(wl_get_p2p_status(cfg, IF_ADDING) == false),
+			msecs_to_jiffies(MAX_WAIT_TIME));
+
+		if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+			struct wireless_dev *vwdev;
+			int pm_mode = PM_ENABLE;
+			wl_if_event_info *event = &cfg->if_event_info;
+
+			/* IF_ADD event has come back, we can proceed to to register
+			 * the new interface now, use the interface name provided by caller (thus
+			 * ignore the one from wlc)
+			 */
+			strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
+			new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+				event->mac, event->bssidx);
+			if (new_ndev == NULL)
+				goto fail;
+
+			wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
+			vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+			if (unlikely(!vwdev)) {
+				WL_ERR(("Could not allocate wireless device\n"));
+				goto fail;
+			}
+			vwdev->wiphy = cfg->wdev->wiphy;
+			WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+			vwdev->iftype = type;
+			vwdev->netdev = new_ndev;
+			new_ndev->ieee80211_ptr = vwdev;
+			SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+			wl_set_drv_status(cfg, READY, new_ndev);
+			cfg->p2p->vif_created = true;
+			wl_set_mode_by_netdev(cfg, new_ndev, mode);
+
+			if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+				wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+				goto fail;
+			}
+			wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
+			val = 1;
+			/* Disable firmware roaming for P2P interface  */
+			wldev_iovar_setint(new_ndev, "roam_off", val);
+
+			if (mode != WL_MODE_AP)
+				wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
+
+			WL_ERR((" virtual interface(%s) is "
+				"created net attach done\n", cfg->p2p->vir_ifname));
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, new_ndev);
+			if (type == NL80211_IFTYPE_P2P_CLIENT)
+				dhd_mode = DHD_FLAG_P2P_GC_MODE;
+			else if (type == NL80211_IFTYPE_P2P_GO)
+				dhd_mode = DHD_FLAG_P2P_GO_MODE;
+			DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+			/* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+			INIT_COMPLETION(cfg->iface_disable);
+#else
+			init_completion(&cfg->iface_disable);
+#endif
+			return ndev_to_cfgdev(new_ndev);
+		} else {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+			memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+			cfg->p2p->vif_created = false;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+			dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+		}
+	}
+
+fail:
+	if (wlif_type == WL_P2P_IF_GO)
+		wldev_iovar_setint(primary_ndev, "mpc", 1);
+	return ERR_PTR(-ENODEV);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct net_device *dev = NULL;
+	struct ether_addr p2p_mac;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 timeout = -1;
+	s32 ret = 0;
+	s32 index = -1;
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+	WL_DBG(("Enter\n"));
+
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+	if (!(dhd->chan_isvht80))
+		dhd_set_cpucore(dhd, FALSE);
+#endif /* CUSTOM_SET_CPUCORE */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+	}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (cfgdev == cfg->ibss_cfgdev)
+		return bcm_cfg80211_del_ibss_if(wiphy, cfgdev);
+
+#ifdef DUAL_STA
+	if (cfgdev == cfg->bss_cfgdev)
+		return wl_cfg80211_del_iface(wiphy, cfgdev);
+#endif /* DUAL_STA */
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &index) != BCME_OK) {
+		WL_ERR(("Find p2p index from ndev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (cfg->p2p_supported) {
+		memcpy(p2p_mac.octet, cfg->p2p->int_addr.octet, ETHER_ADDR_LEN);
+
+		/* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
+		 */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		if (cfg->p2p->vif_created) {
+			if (wl_get_drv_status(cfg, SCANNING, dev)) {
+				wl_notify_escan_complete(cfg, dev, true, true);
+			}
+			wldev_iovar_setint(dev, "mpc", 1);
+			/* Delete pm_enable_work */
+			wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+			/* for GC */
+			if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
+				(wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) {
+				WL_ERR(("Wait for Link Down event for GC !\n"));
+				wait_for_completion_timeout
+					(&cfg->iface_disable, msecs_to_jiffies(500));
+			}
+
+			memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+			wl_set_p2p_status(cfg, IF_DELETING);
+			DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+
+			/* for GO */
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+				/* disable interface before bsscfg free */
+				ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
+				/* if fw doesn't support "ifdis",
+				   do not wait for link down of ap mode
+				 */
+				if (ret == 0) {
+					WL_ERR(("Wait for Link Down event for GO !!!\n"));
+					wait_for_completion_timeout(&cfg->iface_disable,
+						msecs_to_jiffies(500));
+				} else if (ret != BCME_UNSUPPORTED) {
+					msleep(300);
+				}
+			}
+			wl_cfgp2p_clear_management_ie(cfg, index);
+
+			if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
+				wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
+
+			/* delete interface after link down */
+			ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
+
+			if (ret != BCME_OK) {
+				struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+				WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+					ret, ndev->name));
+				#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+				net_os_send_hang_message(ndev);
+				#endif
+			} else {
+				/* Wait for IF_DEL operation to be finished */
+				timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+					(wl_get_p2p_status(cfg, IF_DELETING) == false),
+					msecs_to_jiffies(MAX_WAIT_TIME));
+				if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+					cfg->if_event_info.valid) {
+
+					WL_DBG(("IFDEL operation done\n"));
+					wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev);
+				} else {
+					WL_ERR(("IFDEL didn't complete properly\n"));
+				}
+			}
+
+			ret = dhd_del_monitor(dev);
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub));
+			}
+		}
+	}
+	return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 ap = 0;
+	s32 infra = 0;
+	s32 ibss = 0;
+	s32 wlif_type;
+	s32 mode = 0;
+	s32 err = BCME_OK;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+	WL_DBG(("Enter type %d\n", type));
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		ap = 1;
+		WL_ERR(("type (%d) : currently we do not support this type\n",
+			type));
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		ibss = 1;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		ap = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (!dhd)
+		return -EINVAL;
+	if (ap) {
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		if (cfg->p2p_supported && cfg->p2p->vif_created) {
+			WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
+			p2p_on(cfg)));
+			wldev_iovar_setint(ndev, "mpc", 0);
+			wl_notify_escan_complete(cfg, ndev, true, true);
+
+			/* In concurrency case, STA may be already associated in a particular
+			 * channel. so retrieve the current channel of primary interface and
+			 * then start the virtual interface on that.
+			 */
+			chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+			wlif_type = WL_P2P_IF_GO;
+			WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n",
+				ndev->name, ap, infra, type));
+			wl_set_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+			wait_event_interruptible_timeout(cfg->netif_change_event,
+				(wl_get_p2p_status(cfg, IF_CHANGED) == true),
+				msecs_to_jiffies(MAX_WAIT_TIME));
+			wl_set_mode_by_netdev(cfg, ndev, mode);
+			dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
+			dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
+			wl_clr_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, ndev);
+		} else if (ndev == bcmcfg_to_prmry_ndev(cfg) &&
+			!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+			wl_set_drv_status(cfg, AP_CREATING, ndev);
+			if (!cfg->ap_info &&
+				!(cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+				WL_ERR(("struct ap_saved_ie allocation failed\n"));
+				return -ENOMEM;
+			}
+		} else {
+			WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+			return -EINVAL;
+		}
+	} else {
+		WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA"));
+	}
+
+	if (ibss) {
+		infra = 0;
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET Adhoc error %d\n", err));
+			return -EINVAL;
+		}
+	}
+
+	ndev->ieee80211_ptr->iftype = type;
+	return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	bool ifadd_expected = FALSE;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+	 * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+	 */
+	if (wl_get_p2p_status(cfg, IF_CHANGING))
+		return wl_cfg80211_notify_ifchange(ifidx, name, mac, bssidx);
+
+	/* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+	if (wl_get_p2p_status(cfg, IF_ADDING)) {
+		ifadd_expected = TRUE;
+		wl_clr_p2p_status(cfg, IF_ADDING);
+	} else if (cfg->bss_pending_op) {
+		ifadd_expected = TRUE;
+		cfg->bss_pending_op = FALSE;
+	}
+
+	if (ifadd_expected) {
+		wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		strncpy(if_event_info->name, name, IFNAMSIZ);
+		if_event_info->name[IFNAMSIZ] = '\0';
+		if (mac)
+			memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	bool ifdel_expected = FALSE;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+	if (wl_get_p2p_status(cfg, IF_DELETING)) {
+		ifdel_expected = TRUE;
+		wl_clr_p2p_status(cfg, IF_DELETING);
+	} else if (cfg->bss_pending_op) {
+		ifdel_expected = TRUE;
+		cfg->bss_pending_op = FALSE;
+	}
+
+	if (ifdel_expected) {
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+		wl_set_p2p_status(cfg, IF_CHANGED);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev)
+{
+	s32 type = -1;
+	s32 bssidx = -1;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	bool enabled;
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+
+	bssidx = if_event_info->bssidx;
+	if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION)) {
+		WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
+		return BCME_ERROR;
+	}
+
+	if (p2p_is_on(cfg) && cfg->p2p->vif_created) {
+
+		if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
+			/* Abort any pending scan requests */
+			cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+			WL_DBG(("ESCAN COMPLETED\n"));
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false);
+		}
+
+		memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) {
+			WL_ERR(("Find p2p type from bssidx(%d) failed\n", bssidx));
+			return BCME_ERROR;
+		}
+		wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+		wl_to_p2p_bss_ndev(cfg, type) = NULL;
+		wl_to_p2p_bss_bssidx(cfg, type) = WL_INVALID;
+		cfg->p2p->vif_created = false;
+
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+	wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev);
+	return BCME_OK;
+}
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+	const u8 *ie, u32 ie_len)
+{
+	wifi_p2p_ie_t *p2p_ie;
+	u8 *end, *pos;
+	s32 listen_channel;
+
+	pos = (u8 *)ie;
+	p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+	if (p2p_ie == NULL)
+		return 0;
+
+	pos = p2p_ie->subelts;
+	end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+	CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+		p2p_ie->len));
+
+	while (pos < end) {
+		uint16 attr_len;
+		if (pos + 2 >= end) {
+			CFGP2P_DBG((" -- Invalid P2P attribute"));
+			return 0;
+		}
+		attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+		if (pos + 3 + attr_len > end) {
+			CFGP2P_DBG(("P2P: Attribute underflow "
+				   "(len=%u left=%d)",
+				   attr_len, (int) (end - pos - 3)));
+			return 0;
+		}
+
+		/* if Listen Channel att id is 6 and the vailue is valid,
+		 * return the listen channel
+		 */
+		if (pos[0] == 6) {
+			/* listen channel subel length format
+			 * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+			 */
+			listen_channel = pos[1 + 2 + 3 + 1];
+
+			if (listen_channel == SOCIAL_CHAN_1 ||
+				listen_channel == SOCIAL_CHAN_2 ||
+				listen_channel == SOCIAL_CHAN_3) {
+				CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+				return listen_channel;
+			}
+		}
+		pos += 3 + attr_len;
+	}
+	return 0;
+}
+
+static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u16 channel;
+	chanspec_t chanspec;
+	s32 i = 0, j = 0, offset;
+	char *ptr;
+	wlc_ssid_t ssid;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+	memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+	WL_SCAN(("Preparing Scan request\n"));
+	WL_SCAN(("nprobes=%d\n", params->nprobes));
+	WL_SCAN(("active_time=%d\n", params->active_time));
+	WL_SCAN(("passive_time=%d\n", params->passive_time));
+	WL_SCAN(("home_time=%d\n", params->home_time));
+	WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+
+	/* if request is null just exit so it will be all channel broadcast scan */
+	if (!request)
+		return;
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	WL_SCAN(("### List of channelspecs to scan ###\n"));
+	if (n_channels > 0) {
+		for (i = 0; i < n_channels; i++) {
+			chanspec = 0;
+			channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+			/* SKIP DFS channels for Secondary interface */
+			if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+				(request->channels[i]->flags &
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
+#else
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+				continue;
+
+			if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+				chanspec |= WL_CHANSPEC_BAND_2G;
+			} else {
+				chanspec |= WL_CHANSPEC_BAND_5G;
+			}
+
+			chanspec |= WL_CHANSPEC_BW_20;
+			chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+			params->channel_list[j] = channel;
+			params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+			params->channel_list[j] |= chanspec;
+			WL_SCAN(("Chan : %d, Channel spec: %x \n",
+				channel, params->channel_list[j]));
+			params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
+			j++;
+		}
+	} else {
+		WL_SCAN(("Scanning all channels\n"));
+	}
+	n_channels = j;
+	/* Copy ssid array if applicable */
+	WL_SCAN(("### List of SSIDs to scan ###\n"));
+	if (n_ssids > 0) {
+		offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		ptr = (char*)params + offset;
+		for (i = 0; i < n_ssids; i++) {
+			memset(&ssid, 0, sizeof(wlc_ssid_t));
+			ssid.SSID_len = request->ssids[i].ssid_len;
+			memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+			if (!ssid.SSID_len)
+				WL_SCAN(("%d: Broadcast scan\n", i));
+			else
+				WL_SCAN(("%d: scan  for  %s size =%d\n", i,
+				ssid.SSID, ssid.SSID_len));
+			memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+			ptr += sizeof(wlc_ssid_t);
+		}
+	} else {
+		WL_SCAN(("Broadcast scan\n"));
+	}
+	/* Adding mask to channel numbers */
+	params->channel_num =
+	        htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	               (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+
+	if (n_channels == 1) {
+		params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+		params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	}
+}
+
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+	wl_uint32_list_t *list;
+	s32 err = BCME_OK;
+	if (valid_chan_list == NULL || size <= 0)
+		return -ENOMEM;
+
+	memset(valid_chan_list, 0, size);
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
+	if (err != 0) {
+		WL_ERR(("get channels failed with %d\n", err));
+	}
+
+	return err;
+}
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct cfg80211_scan_request *request, uint16 action)
+{
+	s32 err = BCME_OK;
+	u32 n_channels;
+	u32 n_ssids;
+	s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+	wl_escan_params_t *params = NULL;
+	u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	u32 num_chans = 0;
+	s32 channel;
+	s32 n_valid_chan;
+	s32 search_state = WL_P2P_DISC_ST_SCAN;
+	u32 i, j, n_nodfs = 0;
+	u16 *default_chan_list = NULL;
+	wl_uint32_list_t *list;
+	struct net_device *dev = NULL;
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+	bool is_first_init_2g_scan = false;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+	p2p_scan_purpose_t	p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+
+	WL_DBG(("Enter \n"));
+
+	/* scan request can come with empty request : perform all default scan */
+	if (!cfg) {
+		err = -EINVAL;
+		goto exit;
+	}
+	if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+		/* LEGACY SCAN TRIGGER */
+		WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+		if (!request) {
+			err = -EINVAL;
+			goto exit;
+		}
+		if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+#ifdef USE_INITIAL_2G_SCAN
+			struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
+			/* allow one 5G channel to add previous connected channel in 5G */
+			bool allow_one_5g_channel = TRUE;
+			j = 0;
+			for (i = 0; i < request->n_channels; i++) {
+				int tmp_chan = ieee80211_frequency_to_channel
+					(request->channels[i]->center_freq);
+				if (tmp_chan > CH_MAX_2G_CHANNEL) {
+					if (allow_one_5g_channel)
+						allow_one_5g_channel = FALSE;
+					else
+						continue;
+				}
+				if (j > CH_MAX_2G_CHANNEL) {
+					WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
+						" and previous 5G connected channel\n",
+						j, CH_MAX_2G_CHANNEL));
+					break;
+				}
+				bcopy(request->channels[i], &tmp_channel_list[j],
+					sizeof(struct ieee80211_channel));
+				WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
+				j++;
+			}
+			if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
+				for (i = 0; i < j; i++)
+					bcopy(&tmp_channel_list[i], request->channels[i],
+						sizeof(struct ieee80211_channel));
+
+				request->n_channels = j;
+				is_first_init_2g_scan = true;
+			}
+			else
+				WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
+
+			WL_SCAN(("request->n_channels=%d\n", request->n_channels));
+#else /* USE_INITIAL_SHORT_DWELL_TIME */
+			is_first_init_2g_scan = true;
+#endif /* USE_INITIAL_2G_SCAN */
+			g_first_broadcast_scan = false;
+		}
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+		/* if scan request is not empty parse scan request paramters */
+		if (request != NULL) {
+			n_channels = request->n_channels;
+			n_ssids = request->n_ssids;
+			if (n_channels % 2)
+				/* If n_channels is odd, add a padd of u16 */
+				params_size += sizeof(u16) * (n_channels + 1);
+			else
+				params_size += sizeof(u16) * n_channels;
+
+			/* Allocate space for populating ssids in wl_escan_params_t struct */
+			params_size += sizeof(struct wlc_ssid) * n_ssids;
+		}
+		params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+		if (params == NULL) {
+			err = -ENOMEM;
+			goto exit;
+		}
+		wl_scan_prep(&params->params, request);
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+		/* Override active_time to reduce scan time if it's first bradcast scan. */
+		if (is_first_init_2g_scan)
+			params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+		params->version = htod32(ESCAN_REQ_VERSION);
+		params->action =  htod16(action);
+		wl_escan_set_sync_id(params->sync_id, cfg);
+		wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+		if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+			WL_ERR(("ioctl buffer length not sufficient\n"));
+			kfree(params);
+			err = -ENOMEM;
+			goto exit;
+		}
+		err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+			cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (unlikely(err)) {
+			if (err == BCME_EPERM)
+				/* Scan Not permitted at this point of time */
+				WL_DBG((" Escan not permitted at this time (%d)\n", err));
+			else
+				WL_ERR((" Escan set error (%d)\n", err));
+		}
+		kfree(params);
+	}
+	else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+		/* P2P SCAN TRIGGER */
+		s32 _freq = 0;
+		n_nodfs = 0;
+		if (request && request->n_channels) {
+			num_chans = request->n_channels;
+			WL_SCAN((" chann number : %d\n", num_chans));
+			default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+				GFP_KERNEL);
+			if (default_chan_list == NULL) {
+				WL_ERR(("channel list allocation failed \n"));
+				err = -ENOMEM;
+				goto exit;
+			}
+			if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+				list = (wl_uint32_list_t *) chan_buf;
+				n_valid_chan = dtoh32(list->count);
+				for (i = 0; i < num_chans; i++)
+				{
+					_freq = request->channels[i]->center_freq;
+					channel = ieee80211_frequency_to_channel(_freq);
+
+					/* ignore DFS channels */
+					if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+						(IEEE80211_CHAN_NO_IR
+						| IEEE80211_CHAN_RADAR))
+#else
+						(IEEE80211_CHAN_RADAR
+						| IEEE80211_CHAN_PASSIVE_SCAN))
+#endif
+						continue;
+
+					for (j = 0; j < n_valid_chan; j++) {
+						/* allows only supported channel on
+						*  current reguatory
+						*/
+						if (channel == (dtoh32(list->element[j])))
+							default_chan_list[n_nodfs++] =
+								channel;
+					}
+
+				}
+			}
+			if (num_chans == SOCIAL_CHAN_CNT && (
+						(default_chan_list[0] == SOCIAL_CHAN_1) &&
+						(default_chan_list[1] == SOCIAL_CHAN_2) &&
+						(default_chan_list[2] == SOCIAL_CHAN_3))) {
+				/* SOCIAL CHANNELS 1, 6, 11 */
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+				WL_INFORM(("P2P SEARCH PHASE START \n"));
+			} else if ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+				(wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+				/* If you are already a GO, then do SEARCH only */
+				WL_INFORM(("Already a GO. Do SEARCH Only"));
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+			} else if (num_chans == 1) {
+				p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+			} else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+			/* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+			 * the supplicant
+			 */
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+			} else {
+				WL_INFORM(("P2P SCAN STATE START \n"));
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+			}
+		} else {
+			err = -EINVAL;
+			goto exit;
+		}
+		err = wl_cfgp2p_escan(cfg, ndev, cfg->active_scan, num_chans, default_chan_list,
+			search_state, action,
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+			p2p_scan_purpose);
+
+		if (!err)
+			cfg->p2p->search_state = search_state;
+
+		kfree(default_chan_list);
+	}
+exit:
+	if (unlikely(err)) {
+		/* Don't print Error incase of Scan suppress */
+		if ((err == BCME_EPERM) && cfg->scan_suppressed)
+			WL_DBG(("Escan failed: Scan Suppressed \n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+	}
+	return err;
+}
+
+
+static s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+{
+	s32 err = BCME_OK;
+	s32 passive_scan;
+	wl_scan_results_t *results;
+	WL_SCAN(("Enter \n"));
+	mutex_lock(&cfg->usr_sync);
+
+	results = wl_escan_get_buf(cfg, FALSE);
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+	cfg->escan_info.ndev = ndev;
+	cfg->escan_info.wiphy = wiphy;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+	passive_scan = cfg->active_scan ? 0 : 1;
+	err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+		&passive_scan, sizeof(passive_scan), true);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto exit;
+	}
+
+	err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssids;
+	struct ether_addr primary_mac;
+	bool p2p_ssid;
+#ifdef WL11U
+	bcm_tlv_t *interworking_ie;
+#endif
+	s32 err = 0;
+	s32 bssidx = -1;
+	s32 i;
+
+	unsigned long flags;
+	static s32 busy_count = 0;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	struct net_device *remain_on_channel_ndev = NULL;
+#endif
+
+	dhd_pub_t *dhd;
+
+	dhd = (dhd_pub_t *)(cfg->pub);
+	/*
+	 * Hostapd triggers scan before starting automatic channel selection
+	 * also Dump stats IOVAR scans each channel hence returning from here.
+	 */
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+#ifdef WL_SUPPORT_ACS
+		WL_INFORM(("Scan Command at SoftAP mode\n"));
+		return 0;
+#else
+		WL_ERR(("Invalid Scan Command at SoftAP mode\n"));
+		return -EINVAL;
+#endif /* WL_SUPPORT_ACS */
+	}
+
+	ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+		WL_ERR(("Sending Action Frames. Try it again.\n"));
+		return -EAGAIN;
+	}
+
+	WL_DBG(("Enter wiphy (%p)\n", wiphy));
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		if (cfg->scan_request == NULL) {
+			wl_clr_drv_status_all(cfg, SCANNING);
+			WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+		} else {
+			WL_ERR(("Scanning already\n"));
+			return -EAGAIN;
+		}
+	}
+	if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+		WL_ERR(("Scanning being aborted\n"));
+		return -EAGAIN;
+	}
+	if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+		WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+		return -EOPNOTSUPP;
+	}
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+	if (remain_on_channel_ndev) {
+		WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+		wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+
+	/* Arm scan timeout timer */
+	mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS));
+	if (request) {		/* scan bss */
+		ssids = request->ssids;
+		p2p_ssid = false;
+		for (i = 0; i < request->n_ssids; i++) {
+			if (ssids[i].ssid_len &&
+				IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+				p2p_ssid = true;
+				break;
+			}
+		}
+		if (p2p_ssid) {
+			if (cfg->p2p_supported) {
+				/* p2p scan trigger */
+				if (p2p_on(cfg) == false) {
+					/* p2p on at the first time */
+					p2p_on(cfg) = true;
+					wl_cfgp2p_set_firm_p2p(cfg);
+					get_primary_mac(cfg, &primary_mac);
+					wl_cfgp2p_generate_bss_mac(&primary_mac,
+						&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+				}
+				wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+				WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+				p2p_scan(cfg) = true;
+			}
+		} else {
+			/* legacy scan trigger
+			 * So, we have to disable p2p discovery if p2p discovery is on
+			 */
+			if (cfg->p2p_supported) {
+				p2p_scan(cfg) = false;
+				/* If Netdevice is not equals to primary and p2p is on
+				*  , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+				*/
+
+				if (p2p_scan(cfg) == false) {
+					if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+						err = wl_cfgp2p_discover_enable_search(cfg,
+						false);
+						if (unlikely(err)) {
+							goto scan_out;
+						}
+
+					}
+				}
+			}
+			if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+
+				if (wl_cfgp2p_find_idx(cfg, ndev, &bssidx) != BCME_OK) {
+					WL_ERR(("Find p2p index from ndev(%p) failed\n",
+						ndev));
+					err = BCME_ERROR;
+					goto scan_out;
+				}
+#ifdef WL11U
+				if ((interworking_ie = wl_cfg80211_find_interworking_ie(
+					(u8 *)request->ie, request->ie_len)) != NULL) {
+					err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+					       VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+					       interworking_ie->data, interworking_ie->len);
+
+					if (unlikely(err)) {
+						goto scan_out;
+					}
+				} else if (cfg->iw_ie_len != 0) {
+				/* we have to clear IW IE and disable gratuitous APR */
+					wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+						VNDR_IE_CUSTOM_FLAG,
+						DOT11_MNG_INTERWORKING_ID,
+						0, 0);
+
+					wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
+						bssidx);
+					cfg->wl11u = FALSE;
+					/* we don't care about error */
+				}
+#endif /* WL11U */
+				err = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx,
+					VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie,
+					request->ie_len);
+
+				if (unlikely(err)) {
+					goto scan_out;
+				}
+
+			}
+		}
+	} else {		/* scan in ibss */
+		ssids = this_ssid;
+	}
+
+	cfg->scan_request = request;
+	wl_set_drv_status(cfg, SCANNING, ndev);
+
+	if (cfg->p2p_supported) {
+		if (p2p_on(cfg) && p2p_scan(cfg)) {
+
+			/* find my listen channel */
+			cfg->afx_hdl->my_listen_chan =
+				wl_find_listen_channel(cfg, request->ie,
+				request->ie_len);
+			err = wl_cfgp2p_enable_discovery(cfg, ndev,
+			request->ie, request->ie_len);
+
+			if (unlikely(err)) {
+				goto scan_out;
+			}
+		}
+	}
+	err = wl_do_escan(cfg, wiphy, ndev, request);
+	if (likely(!err))
+		goto scan_success;
+	else
+		goto scan_out;
+
+scan_success:
+	busy_count = 0;
+
+	return 0;
+
+scan_out:
+	if (err == BCME_BUSY || err == BCME_NOTREADY) {
+		WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+		err = -EBUSY;
+	}
+
+#define SCAN_EBUSY_RETRY_LIMIT 10
+	if (err == -EBUSY) {
+		if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) {
+			struct ether_addr bssid;
+			s32 ret = 0;
+			busy_count = 0;
+			WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+				wl_get_drv_status(cfg, SCANNING, ndev),
+				wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+				wl_get_drv_status(cfg, CONNECTING, ndev),
+				wl_get_drv_status(cfg, CONNECTED, ndev),
+				wl_get_drv_status(cfg, DISCONNECTING, ndev),
+				wl_get_drv_status(cfg, AP_CREATING, ndev),
+				wl_get_drv_status(cfg, AP_CREATED, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+			bzero(&bssid, sizeof(bssid));
+			if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID,
+				&bssid, ETHER_ADDR_LEN, false)) == 0)
+				WL_ERR(("FW is connected with " MACDBG "/n",
+					MAC2STRDBG(bssid.octet)));
+			else
+				WL_ERR(("GET BSSID failed with %d\n", ret));
+
+			wl_cfg80211_scan_abort(cfg);
+
+		}
+	} else {
+		busy_count = 0;
+	}
+
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	cfg->scan_request = NULL;
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Enter \n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+	if (unlikely(err)) {
+		if ((err == BCME_EPERM) && cfg->scan_suppressed)
+			WL_DBG(("scan not permitted at this time (%d)\n", err));
+		else
+			WL_ERR(("scan error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+	s32 err = 0;
+	u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+	retry = htod32(retry);
+	err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), true);
+	if (unlikely(err)) {
+		WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+		(cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+		cfg->conf->rts_threshold = wiphy->rts_threshold;
+		err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+		(cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+		cfg->conf->frag_threshold = wiphy->frag_threshold;
+		err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG &&
+		(cfg->conf->retry_long != wiphy->retry_long)) {
+		cfg->conf->retry_long = wiphy->retry_long;
+		err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_SHORT &&
+		(cfg->conf->retry_short != wiphy->retry_short)) {
+		cfg->conf->retry_short = wiphy->retry_short;
+		err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+		if (!err) {
+			return err;
+		}
+	}
+
+	return err;
+}
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u8 *buf = NULL;
+	wl_uint32_list_t *list;
+	int err = BCME_OK;
+	chanspec_t c = 0, ret_c = 0;
+	int bw = 0, tmp_bw = 0;
+	int i;
+	u32 tmp_c;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#define LOCAL_BUF_SIZE	1024
+	buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags);
+	if (!buf) {
+		WL_ERR(("buf memory alloc failed\n"));
+		goto exit;
+	}
+	list = (wl_uint32_list_t *)(void *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+	if (err != BCME_OK) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		goto exit;
+	}
+	for (i = 0; i < dtoh32(list->count); i++) {
+		c = dtoh32(list->element[i]);
+		if (channel <= CH_MAX_2G_CHANNEL) {
+			if (!CHSPEC_IS20(c))
+				continue;
+			if (channel == CHSPEC_CHANNEL(c)) {
+				ret_c = c;
+				bw = 20;
+				goto exit;
+			}
+		}
+		tmp_c = wf_chspec_ctlchan(c);
+		tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
+		if (tmp_c != channel)
+			continue;
+
+		if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+			bw = tmp_bw;
+			ret_c = c;
+			if (bw == bw_cap)
+				goto exit;
+		}
+	}
+exit:
+	if (buf)
+		kfree(buf);
+#undef LOCAL_BUF_SIZE
+	WL_INFORM(("return chanspec %x %d\n", ret_c, bw));
+	return ret_c;
+}
+
+void
+wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (cfg != NULL && ibss_vsie != NULL) {
+		if (cfg->ibss_vsie != NULL) {
+			kfree(cfg->ibss_vsie);
+		}
+		cfg->ibss_vsie = ibss_vsie;
+		cfg->ibss_vsie_len = ibss_vsie_len;
+	}
+}
+
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+	/* free & initiralize VSIE (Vendor Specific IE) */
+	if (cfg->ibss_vsie != NULL) {
+		kfree(cfg->ibss_vsie);
+		cfg->ibss_vsie = NULL;
+		cfg->ibss_vsie_len = 0;
+	}
+}
+
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	char *ioctl_buf = NULL;
+	s32 ret = BCME_OK;
+
+	if (cfg != NULL && cfg->ibss_vsie != NULL) {
+		ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+		if (!ioctl_buf) {
+			WL_ERR(("ioctl memory alloc failed\n"));
+			return -ENOMEM;
+		}
+
+		/* change the command from "add" to "del" */
+		strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+		cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+		ret = wldev_iovar_setbuf(dev, "ie",
+			cfg->ibss_vsie, cfg->ibss_vsie_len,
+			ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		WL_ERR(("ret=%d\n", ret));
+
+		if (ret == BCME_OK) {
+			/* free & initiralize VSIE */
+			kfree(cfg->ibss_vsie);
+			cfg->ibss_vsie = NULL;
+			cfg->ibss_vsie_len = 0;
+		}
+
+		if (ioctl_buf) {
+			kfree(ioctl_buf);
+		}
+	}
+
+	return ret;
+}
+
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wireless_dev* wdev = NULL;
+	struct net_device *new_ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 timeout;
+	wl_aibss_if_t aibss_if;
+	wl_if_event_info *event = NULL;
+
+	if (cfg->ibss_cfgdev != NULL) {
+		WL_ERR(("IBSS interface %s already exists\n", name));
+		return NULL;
+	}
+
+	WL_ERR(("Try to create IBSS interface %s\n", name));
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+	/* generate a new MAC address for the IBSS interface */
+	get_primary_mac(cfg, &cfg->ibss_if_addr);
+	cfg->ibss_if_addr.octet[4] ^= 0x40;
+	memset(&aibss_if, sizeof(aibss_if), 0);
+	memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+	aibss_if.chspec = 0;
+	aibss_if.len = sizeof(aibss_if);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+	err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+		sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (err) {
+		WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
+		goto fail;
+	}
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op)
+		goto fail;
+
+	event = &cfg->if_event_info;
+	strncpy(event->name, name, IFNAMSIZ - 1);
+	/* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+	 * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+	 * and will be freed by dhd_detach unless it gets unregistered before that. The
+	 * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+	 * be freed by wl_dealloc_netinfo
+	 */
+	new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+		event->mac, event->bssidx);
+	if (new_ndev == NULL)
+		goto fail;
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (wdev == NULL)
+		goto fail;
+	wdev->wiphy = wiphy;
+	wdev->iftype = NL80211_IFTYPE_ADHOC;
+	wdev->netdev = new_ndev;
+	new_ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+	/* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+	* needs to be modified to take one parameter (bool need_rtnl_lock)
+	 */
+	ASSERT_RTNL();
+	if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK)
+		goto fail;
+
+	wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE);
+	cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+	WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+	return cfg->ibss_cfgdev;
+
+fail:
+	WL_ERR(("failed to create IBSS interface %s \n", name));
+	cfg->bss_pending_op = FALSE;
+	if (new_ndev)
+		wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+	if (wdev)
+		kfree(wdev);
+	return NULL;
+}
+
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, s32 bsscfg_idx,
+	enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+	s32 ret = BCME_OK;
+	s32 val = 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+		struct ether_addr ea;
+	} bss_setbuf;
+
+	WL_INFORM(("iface_type:%d del:%d \n", iface_type, del));
+
+	bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+	/* AP=3, STA=2, up=1, down=0, val=-1 */
+	if (del) {
+		val = -1;
+	} else if (iface_type == NL80211_IFTYPE_AP) {
+		/* AP Interface */
+		WL_DBG(("Adding AP Interface \n"));
+		val = 3;
+	} else if (iface_type == NL80211_IFTYPE_STATION) {
+		WL_DBG(("Adding STA Interface \n"));
+		val = 2;
+	} else {
+		WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type));
+		return -EINVAL;
+	}
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+
+	if (addr) {
+		memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+	}
+
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (ret != 0)
+		WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+	return ret;
+}
+
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+	enum nl80211_iftype iface_type,
+	u8 *mac_addr, const char *name)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *new_ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 ret = BCME_OK;
+	s32 bsscfg_idx = 1;
+	u32 timeout;
+	wl_if_event_info *event = NULL;
+	struct wireless_dev *wdev = NULL;
+	u8 addr[ETH_ALEN];
+
+	WL_DBG(("Enter\n"));
+
+	if (!name) {
+		WL_ERR(("Interface name not provided\n"));
+		return NULL;
+	}
+
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (likely(!mac_addr)) {
+		/* Use primary MAC with the locally administered bit for the Secondary STA I/F */
+		memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+		addr[0] |= 0x02;
+	} else {
+		/* Use the application provided mac address (if any) */
+		memcpy(addr, mac_addr, ETH_ALEN);
+	}
+
+	if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) {
+		WL_ERR(("IFACE type:%d not supported. STA "
+					"or AP IFACE is only supported\n", iface_type));
+		return NULL;
+	}
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+	/* De-initialize the p2p discovery interface, if operational */
+	if (p2p_is_on(cfg)) {
+		WL_DBG(("Disabling P2P Discovery Interface \n"));
+#ifdef WL_CFG80211_P2P_DEV_IF
+		ret = wl_cfg80211_scan_stop(bcmcfg_to_p2p_wdev(cfg));
+#else
+		ret = wl_cfg80211_scan_stop(cfg->p2p_net);
+#endif
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+		}
+
+		wl_cfgp2p_disable_discovery(cfg);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		p2p_on(cfg) = false;
+	}
+
+	/*
+	 * Intialize the firmware I/F.
+	 */
+	if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+		bsscfg_idx, iface_type, 0, addr)) < 0) {
+		return NULL;
+	}
+
+	/*
+	 * Wait till the firmware send a confirmation event back.
+	 */
+	WL_DBG(("Wait for the FW I/F Event\n"));
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("ADD_IF event, didn't come. Return \n"));
+		goto fail;
+	}
+
+	/*
+	 * Since FW operation is successful,we can go ahead with the
+	 * the host interface creation.
+	 */
+	event = &cfg->if_event_info;
+	strncpy(event->name, name, IFNAMSIZ - 1);
+	new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+		event->name, addr, event->bssidx);
+	if (!new_ndev) {
+		WL_ERR(("I/F allocation failed! \n"));
+		goto fail;
+	} else
+		WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+		 event->ifidx, event->bssidx));
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (!wdev) {
+		WL_ERR(("wireless_dev alloc failed! \n"));
+		goto fail;
+	}
+
+	wdev->wiphy = wiphy;
+	wdev->iftype = iface_type;
+	new_ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+	/* RTNL lock must have been acquired. */
+	ASSERT_RTNL();
+
+	/* Set the locally administed mac addr, if not applied already */
+	if (memcmp(addr, event->mac, ETH_ALEN) != 0) {
+		ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", addr, ETH_ALEN,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, event->bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(ret)) {
+				WL_ERR(("set cur_etheraddr Error (%d)\n", ret));
+				goto fail;
+		}
+		memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+	}
+
+	if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+		WL_ERR(("IFACE register failed \n"));
+		goto fail;
+	}
+
+	/* Initialize with the station mode params */
+	wl_alloc_netinfo(cfg, new_ndev, wdev,
+		(iface_type == NL80211_IFTYPE_STATION) ?
+		WL_MODE_BSS : WL_MODE_AP, PM_ENABLE);
+	cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev);
+	cfg->cfgdev_bssidx = event->bssidx;
+
+	WL_DBG(("Host Network Interface for Secondary I/F created"));
+
+	return cfg->bss_cfgdev;
+
+fail:
+	cfg->bss_pending_op = FALSE;
+	if (new_ndev)
+		wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+	if (wdev)
+		kfree(wdev);
+
+	return NULL;
+}
+
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 ret = BCME_OK;
+	s32 bsscfg_idx = 1;
+	u32 timeout;
+	enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
+
+	WL_DBG(("Enter\n"));
+
+	if (!cfg->bss_cfgdev)
+		return 0;
+
+	/* If any scan is going on, abort it */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		WL_DBG(("Scan in progress. Aborting the scan!\n"));
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+	ndev = cfgdev_to_ndev(cfg->bss_cfgdev);
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+	/* Delete the firmware interface */
+	if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+		bsscfg_idx, iface_type, true, NULL)) < 0) {
+		WL_ERR(("DEL bss failed ret:%d \n", ret));
+		return ret;
+	}
+
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("timeout in waiting IF_DEL event\n"));
+	}
+
+	wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+	cfg->bss_cfgdev = NULL;
+	cfg->cfgdev_bssidx = -1;
+	cfg->bss_pending_op = FALSE;
+
+	WL_DBG(("IF_DEL Done.\n"));
+
+	return ret;
+}
+#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+
+static s32
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = NULL;
+	struct net_device *primary_ndev = NULL;
+	s32 timeout;
+
+	if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+		return -EINVAL;
+	ndev = cfgdev_to_ndev(cfg->ibss_cfgdev);
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	cfg->bss_pending_op = TRUE;
+	memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+	err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+		sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (err) {
+		WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+		goto fail;
+	}
+	timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+		!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+	if (timeout <= 0 || cfg->bss_pending_op) {
+		WL_ERR(("timeout in waiting IF_DEL event\n"));
+		goto fail;
+	}
+
+	wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+	cfg->ibss_cfgdev = NULL;
+	return 0;
+
+fail:
+	cfg->bss_pending_op = FALSE;
+	return -1;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_bss *bss;
+	struct ieee80211_channel *chan;
+	struct wl_join_params join_params;
+	int scan_suppress;
+	struct cfg80211_ssid ssid;
+	s32 scan_retry = 0;
+	s32 err = 0;
+	size_t join_params_size;
+	chanspec_t chanspec = 0;
+	u32 param[2] = {0, 0};
+	u32 bw_cap = 0;
+
+	WL_TRACE(("In\n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+	if (!params->ssid || params->ssid_len <= 0) {
+		WL_ERR(("Invalid parameter\n"));
+		return -EINVAL;
+	}
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	chan = params->chandef.chan;
+#else
+	chan = params->channel;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	if (chan)
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+	if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+		struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+		u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+		if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
+			(memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0) &&
+			(*channel == cfg->channel))) {
+			WL_ERR(("Connection already existed to " MACDBG "\n",
+				MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+			return -EISCONN;
+		}
+		WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
+			ssid->SSID, MAC2STRDBG(bssid)));
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+	if (!bss) {
+		if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) {
+			memcpy(ssid.ssid, params->ssid, params->ssid_len);
+			ssid.ssid_len = params->ssid_len;
+			do {
+				if (unlikely
+					(__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+					 -EBUSY)) {
+					wl_delay(150);
+				} else {
+					break;
+				}
+			} while (++scan_retry < WL_SCAN_RETRY_MAX);
+
+			/* rtnl lock code is removed here. don't see why rtnl lock
+			 * needs to be released.
+			 */
+
+			/* wait 4 secons till scan done.... */
+			schedule_timeout_interruptible(msecs_to_jiffies(4000));
+
+			bss = cfg80211_get_ibss(wiphy, NULL,
+				params->ssid, params->ssid_len);
+		}
+	}
+	if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) ||
+		((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid &&
+		!memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) {
+		cfg->ibss_starter = false;
+		WL_DBG(("Found IBSS\n"));
+	} else {
+		cfg->ibss_starter = true;
+	}
+	if (chan) {
+		if (chan->band == IEEE80211_BAND_5GHZ)
+			param[0] = WLC_BAND_5G;
+		else if (chan->band == IEEE80211_BAND_2GHZ)
+			param[0] = WLC_BAND_2G;
+		err = wldev_iovar_getint(dev, "bw_cap", param);
+		if (unlikely(err)) {
+			WL_ERR(("Get bw_cap Failed (%d)\n", err));
+			return err;
+		}
+		bw_cap = param[0];
+		chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
+	}
+	/*
+	 * Join with specific BSSID and cached SSID
+	 * If SSID is zero join based on BSSID only
+	 */
+	memset(&join_params, 0, sizeof(join_params));
+	memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+		params->ssid_len);
+	join_params.ssid.SSID_len = htod32(params->ssid_len);
+	if (params->bssid) {
+		memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN);
+		err = wldev_ioctl(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid,
+			ETHER_ADDR_LEN, true);
+		if (unlikely(err)) {
+			WL_ERR(("Error (%d)\n", err));
+			return err;
+		}
+	} else
+		memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+	wldev_iovar_setint(dev, "ibss_coalesce_allowed", IBSS_COALESCE_ALLOWED);
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = TRUE;
+		/* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */
+		err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int), true);
+		if (unlikely(err)) {
+			WL_ERR(("Scan Suppress Setting Failed (%d)\n", err));
+			return err;
+		}
+	}
+
+	join_params.params.chanspec_list[0] = chanspec;
+	join_params.params.chanspec_num = 1;
+	wldev_iovar_setint(dev, "chanspec", chanspec);
+	join_params_size = sizeof(join_params);
+
+	/* Disable Authentication, IBSS will add key if it required */
+	wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+	wldev_iovar_setint(dev, "wsec", 0);
+
+
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+		join_params_size, true);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = FALSE;
+		/* Reset the SCAN SUPPRESS Flag */
+		err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int), true);
+		if (unlikely(err)) {
+			WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err));
+			return err;
+		}
+	}
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+	return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	scb_val_t scbval;
+	u8 *curbssid;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	wl_link_down(cfg);
+
+	WL_ERR(("Leave IBSS\n"));
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	wl_set_drv_status(cfg, DISCONNECTING, dev);
+	scbval.val = 0;
+	memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+	err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+		sizeof(scb_val_t), true);
+	if (unlikely(err)) {
+		wl_clr_drv_status(cfg, DISCONNECTING, dev);
+		WL_ERR(("error(%d)\n", err));
+		return err;
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	return err;
+}
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa)
+{
+	u16 suite_count;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	u16 len;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	if (!wpa2ie)
+		return -1;
+
+	len = wpa2ie->len;
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		capa[0] = *(u8 *)&mgmt->list[suite_count];
+		capa[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+	} else
+		return BCME_BADLEN;
+
+	return 0;
+}
+#endif /* MFP */
+
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+		val = WPA_AUTH_PSK |
+			WPA_AUTH_UNSPECIFIED;
+	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+		val = WPA2_AUTH_PSK|
+			WPA2_AUTH_UNSPECIFIED;
+	else
+		val = WPA_AUTH_DISABLED;
+
+	if (is_wps_conn(sme))
+		val = WPA_AUTH_DISABLED;
+
+	WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wpa_auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_versions = sme->crypto.wpa_versions;
+	return err;
+}
+
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	switch (sme->auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+		val = WL_AUTH_OPEN_SYSTEM;
+		WL_DBG(("open system\n"));
+		break;
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		val = WL_AUTH_SHARED_KEY;
+		WL_DBG(("shared key\n"));
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		val = WL_AUTH_OPEN_SHARED;
+		WL_DBG(("automatic\n"));
+		break;
+	default:
+		val = 2;
+		WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+		break;
+	}
+
+	err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->auth_type = sme->auth_type;
+	return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 pval = 0;
+	s32 gval = 0;
+	s32 err = 0;
+	s32 wsec_val = 0;
+#ifdef MFP
+	s32 mfp = 0;
+	bcm_tlv_t *wpa2_ie;
+	u8 rsn_cap[2];
+#endif /* MFP */
+
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_ciphers_pairwise) {
+		switch (sme->crypto.ciphers_pairwise[0]) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			pval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			pval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("invalid cipher pairwise (%d)\n",
+				sme->crypto.ciphers_pairwise[0]));
+			return -EINVAL;
+		}
+	}
+	if (sme->crypto.cipher_group) {
+		switch (sme->crypto.cipher_group) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			gval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			gval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			gval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("invalid cipher group (%d)\n",
+				sme->crypto.cipher_group));
+			return -EINVAL;
+		}
+	}
+
+	WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+	if (is_wps_conn(sme)) {
+		if (sme->privacy)
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+		else
+			/* WPS-2.0 allows no security */
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	} else {
+			WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+			wsec_val = pval | gval;
+#ifdef MFP
+			if (pval == AES_ENABLED) {
+				if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+					DOT11_MNG_RSN_ID)) != NULL) &&
+					(wl_cfg80211_get_rsn_capa(wpa2_ie, rsn_cap) == 0)) {
+
+					if (rsn_cap[0] & RSN_CAP_MFPC) {
+						/* MFP Capability advertised by supplicant. Check
+						 * whether MFP is supported in the firmware
+						 */
+						if ((err = wldev_iovar_getint_bsscfg(dev,
+								"mfp", &mfp, bssidx)) < 0) {
+							WL_ERR(("Get MFP failed! "
+								"Check MFP support in FW \n"));
+							return -1;
+						}
+
+						if ((sme->crypto.n_akm_suites == 1) &&
+							((sme->crypto.akm_suites[0] ==
+							WL_AKM_SUITE_MFP_PSK) ||
+							(sme->crypto.akm_suites[0] ==
+							WL_AKM_SUITE_MFP_1X))) {
+							wsec_val |= MFP_SHA256;
+						} else if (sme->crypto.n_akm_suites > 1) {
+							WL_ERR(("Multiple AKM Specified \n"));
+							return -EINVAL;
+						}
+
+						wsec_val |= MFP_CAPABLE;
+						if (rsn_cap[0] & RSN_CAP_MFPR)
+							wsec_val |= MFP_REQUIRED;
+					}
+				}
+			}
+#endif /* MFP */
+
+			WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec",
+				wsec_val, bssidx);
+	}
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+	sec->cipher_group = sme->crypto.cipher_group;
+
+	return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_akm_suites) {
+		err = wldev_iovar_getint(dev, "wpa_auth", &val);
+		if (unlikely(err)) {
+			WL_ERR(("could not get wpa_auth (%d)\n", err));
+			return err;
+		}
+		if (val & (WPA_AUTH_PSK |
+			WPA_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		} else if (val & (WPA2_AUTH_PSK |
+			WPA2_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+#ifdef MFP
+			case WL_AKM_SUITE_MFP_1X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+			case WL_AKM_SUITE_MFP_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+#endif
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+		WL_DBG(("setting wpa_auth to %d\n", val));
+
+		err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("could not set wpa_auth (%d)\n", err));
+			return err;
+		}
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_auth = sme->crypto.akm_suites[0];
+
+	return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	struct wl_wsec_key key;
+	s32 val;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key len (%d)\n", sme->key_len));
+	if (sme->key_len) {
+		sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+		WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+			sec->wpa_versions, sec->cipher_pairwise));
+		if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+			NL80211_WPA_VERSION_2)) &&
+			(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+		WLAN_CIPHER_SUITE_WEP104)))
+		{
+			memset(&key, 0, sizeof(key));
+			key.len = (u32) sme->key_len;
+			key.index = (u32) sme->key_idx;
+			if (unlikely(key.len > sizeof(key.data))) {
+				WL_ERR(("Too long key length (%u)\n", key.len));
+				return -EINVAL;
+			}
+			memcpy(key.data, sme->key, key.len);
+			key.flags = WL_PRIMARY_KEY;
+			switch (sec->cipher_pairwise) {
+			case WLAN_CIPHER_SUITE_WEP40:
+				key.algo = CRYPTO_ALGO_WEP1;
+				break;
+			case WLAN_CIPHER_SUITE_WEP104:
+				key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			default:
+				WL_ERR(("Invalid algorithm (%d)\n",
+					sme->crypto.ciphers_pairwise[0]));
+				return -EINVAL;
+			}
+			/* Set the new key/index */
+			WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+				key.len, key.index, key.algo));
+			WL_DBG(("key \"%s\"\n", key.data));
+			swap_key_from_BE(&key);
+			err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+			if (unlikely(err)) {
+				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				return err;
+			}
+			if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+				WL_DBG(("set auth_type to shared key\n"));
+				val = WL_AUTH_SHARED_KEY;	/* shared key */
+				err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+				if (unlikely(err)) {
+					WL_ERR(("set auth failed (%d)\n", err));
+					return err;
+				}
+			}
+		}
+	}
+	return err;
+}
+
+#if defined(ESCAN_RESULT_PATCH)
+static u8 connect_req_bssid[6];
+static u8 broad_bssid[6];
+#endif /* ESCAN_RESULT_PATCH */
+
+
+
+#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
+static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
+{
+	u32 chanspec = 0;
+	bool isvht80 = 0;
+
+	if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK)
+		chanspec = wl_chspec_driver_to_host(chanspec);
+
+	isvht80 = chanspec & WL_CHANSPEC_BW_80;
+	WL_INFO(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+
+	return isvht80;
+}
+#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct ieee80211_channel *chan = sme->channel;
+	struct wl_join_params join_params;
+	struct ether_addr bssid;
+	wl_extjoin_params_t *ext_join_params;
+	size_t join_params_size;
+#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	s32 roam_trigger[2] = {0, 0};
+#endif /* ROAM_AP_ENV_DETECTION */
+	u8* wpaie  = 0;
+	u8 chan_info[CHAN_INFO_LEN] = {0}, *chan_ptr;
+	u32 wpaie_len = 0;
+	u32 timeout;
+	u32 chan_cnt = 0, i, w_count = 0;
+	s32 wait_cnt;
+	s32 bssidx;
+	s32 err = 0;
+#ifdef ROAM_CHANNEL_CACHE
+	chanspec_t chanspec_list[MAX_ROAM_CACHE_NUM];
+#endif /* ROAM_CHANNEL_CACHE */
+	wpa_ie_fixed_t *wpa_ie;
+	bcm_tlv_t *wpa2_ie;
+	bool use_chan_cache = FALSE;
+	WL_DBG(("In\n"));
+
+	if (unlikely(!sme->ssid)) {
+		WL_ERR(("Invalid ssid\n"));
+		return -EOPNOTSUPP;
+	}
+
+	if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) {
+		WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n",
+			sme->ssid, sme->ssid_len));
+		return -EINVAL;
+	}
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	chan_ptr = chan_info;
+	/*
+	 * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+	 */
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req) {
+		wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
+	}
+#endif
+#if defined(ESCAN_RESULT_PATCH)
+	if (sme->bssid)
+		memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN);
+	else
+		bzero(connect_req_bssid, ETHER_ADDR_LEN);
+	bzero(broad_bssid, ETHER_ADDR_LEN);
+#endif
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+	maxrxpktglom = 0;
+#endif
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, CONNECTED, dev)&&
+		(err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) {
+		if (!ETHER_ISNULLADDR(&bssid)) {
+			scb_val_t scbval;
+			wl_set_drv_status(cfg, DISCONNECTING, dev);
+			scbval.val = DOT11_RC_DISASSOC_LEAVING;
+			memcpy(&scbval.ea, &bssid, ETHER_ADDR_LEN);
+			scbval.val = htod32(scbval.val);
+
+			WL_DBG(("drv status CONNECTED is not set, but connected in FW!" MACDBG "/n",
+				MAC2STRDBG(bssid.octet)));
+			err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+				sizeof(scb_val_t), true);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, DISCONNECTING, dev);
+				WL_ERR(("error (%d)\n", err));
+				return err;
+			}
+			wait_cnt = 500/10;
+			while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+				WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
+					wait_cnt));
+				wait_cnt--;
+				OSL_SLEEP(10);
+			}
+		} else
+			WL_DBG(("Currently not associated!\n"));
+	} else if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+		timeout = wait_event_interruptible_timeout(cfg->event_sync_wq,
+					!wl_get_drv_status(cfg, DISCONNECTING, dev),
+					msecs_to_jiffies(MAX_WAIT_TIME/3));
+		if 	(timeout <= 0 || wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+			WL_ERR(("timeout in waiting disconnect event\n"));
+		}
+		wl_clr_drv_status(cfg, DISCONNECTING, dev);
+	}
+
+	/* Clean BSSID */
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, DISCONNECTING, dev))
+		wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+	if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) {
+		/* we only allow to connect using virtual interface in case of P2P */
+			if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+				WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+				return BCME_ERROR;
+			}
+			wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+				VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+	} else if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		/* find the RSN_IE */
+		if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+			DOT11_MNG_RSN_ID)) != NULL) {
+			WL_DBG((" WPA2 IE is found\n"));
+		}
+		/* find the WPA_IE */
+		if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+			sme->ie_len)) != NULL) {
+			WL_DBG((" WPA IE is found\n"));
+		}
+		if (wpa_ie != NULL || wpa2_ie != NULL) {
+			wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+			wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+			wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+			wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		} else {
+			wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		}
+
+		if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+		err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+			VNDR_IE_ASSOCREQ_FLAG, (u8 *)sme->ie, sme->ie_len);
+		if (unlikely(err)) {
+			return err;
+		}
+	}
+#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
+	if (dhd->roam_env_detection) {
+		bool is_roamtrig_reset = TRUE;
+		bool is_roam_env_ok = (wldev_iovar_setint(dev, "roam_env_detection",
+			AP_ENV_DETECT_NOT_USED) == BCME_OK);
+		if (is_roamtrig_reset && is_roam_env_ok) {
+			roam_trigger[0] = WL_AUTO_ROAM_TRIGGER;
+			roam_trigger[1] = WLC_BAND_ALL;
+		err = wldev_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+			sizeof(roam_trigger), true);
+		if (unlikely(err)) {
+				WL_ERR((" failed to restore roam_trigger for auto env"
+					" detection\n"));
+			}
+		}
+	}
+#endif /* ROAM_ENABLE && ROAM_AP_ENV_DETECTION */
+	if (chan) {
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+		chan_cnt = 1;
+		WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
+			chan->center_freq, chan_cnt));
+	} else {
+#ifdef ROAM_CHANNEL_CACHE
+		wlc_ssid_t ssid;
+		int band;
+		use_chan_cache = TRUE;
+		err = wldev_get_band(dev, &band);
+		if (!err) {
+			set_roam_band(band);
+		}
+
+		cfg->channel = 0;
+		memcpy(ssid.SSID, sme->ssid, sme->ssid_len);
+		ssid.SSID_len = sme->ssid_len;
+		chan_cnt = get_roam_channel_list(cfg->channel, chanspec_list, &ssid, ioctl_version);
+#else
+		cfg->channel = 0;
+#endif /* ROAM_CHANNEL_CACHE */
+
+	}
+	WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+	WL_DBG(("3. set wapi version \n"));
+	err = wl_set_wpa_version(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid wpa_version\n"));
+		return err;
+	}
+		err = wl_set_auth_type(dev, sme);
+		if (unlikely(err)) {
+			WL_ERR(("Invalid auth type\n"));
+			return err;
+		}
+
+	err = wl_set_set_cipher(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid ciper\n"));
+		return err;
+	}
+
+	err = wl_set_key_mgmt(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid key mgmt\n"));
+		return err;
+	}
+
+	err = wl_set_set_sharedkey(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid shared key\n"));
+		return err;
+	}
+
+	/*
+	 *  Join with specific BSSID and cached SSID
+	 *  If SSID is zero join based on BSSID only
+	 */
+	join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+		chan_cnt * sizeof(chanspec_t);
+	ext_join_params =  (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+	if (ext_join_params == NULL) {
+		err = -ENOMEM;
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		goto exit;
+	}
+	ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+	memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+	ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+	/* increate dwell time to receive probe response or detect Beacon
+	* from target AP at a noisy air only during connect command
+	*/
+	ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+	ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+	/* Set up join scan parameters */
+	ext_join_params->scan.scan_type = -1;
+	ext_join_params->scan.nprobes = chan_cnt ?
+		(ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+	ext_join_params->scan.home_time = -1;
+
+	if (sme->bssid)
+		memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+	ext_join_params->assoc.chanspec_num = chan_cnt;
+	if (chan_cnt) {
+		if (use_chan_cache) {
+			memcpy(ext_join_params->assoc.chanspec_list, chanspec_list,
+				sizeof(chanspec_t) * chan_cnt);
+			for (i = 0; i < chan_cnt; i++) {
+				w_count += snprintf(chan_ptr + w_count, sizeof(chan_info) - w_count, "%d",
+					wf_chspec_ctlchan(chanspec_list[i]));
+				if (i != chan_cnt - 1) {
+					w_count += snprintf(chan_ptr + w_count, sizeof(chan_info) - w_count, ", ");
+				}
+			}
+		} else {
+			u16 channel, band, bw, ctl_sb;
+			chanspec_t chspec;
+			channel = cfg->channel;
+			band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+				: WL_CHANSPEC_BAND_5G;
+			bw = WL_CHANSPEC_BW_20;
+			ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+			chspec = (channel | band | bw | ctl_sb);
+			ext_join_params->assoc.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+			ext_join_params->assoc.chanspec_list[0] |= chspec;
+			ext_join_params->assoc.chanspec_list[0] =
+				wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+			snprintf(chan_ptr, sizeof(chan_info), "%d", channel);
+		}
+	} else {
+		snprintf(chan_ptr, sizeof(chan_info), "0");
+	}
+	ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+	if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+			ext_join_params->ssid.SSID_len));
+	}
+	wl_set_drv_status(cfg, CONNECTING, dev);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	WL_ERR(("Connecting to " MACDBG " with channel (%s) ssid %s\n",
+		MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+		chan_info, ext_join_params->ssid.SSID));
+
+	kfree(ext_join_params);
+	if (err) {
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		if (err == BCME_UNSUPPORTED) {
+			WL_DBG(("join iovar is not supported\n"));
+			goto set_ssid;
+		} else {
+			WL_ERR(("error (%d)\n", err));
+			goto exit;
+		}
+	} else
+		goto exit;
+
+set_ssid:
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+	memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	if (sme->bssid)
+		memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+	wl_ch_to_chanspec(cfg->channel, &join_params, &join_params_size);
+	WL_DBG(("join_param_size %zu\n", join_params_size));
+
+	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+			join_params.ssid.SSID_len));
+	}
+	wl_set_drv_status(cfg, CONNECTING, dev);
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
+	if (err) {
+		WL_ERR(("error (%d)\n", err));
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+	}
+exit:
+	return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scbval;
+	bool act = false;
+	s32 err = 0;
+	u8 *curbssid;
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+	WL_ERR(("Reason %d\n", reason_code));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+
+	if (act || wl_get_drv_status(cfg, CONNECTING, dev)) {
+		/*
+		* Cancel ongoing scan to sync up with sme state machine of cfg80211.
+		*/
+		/* Let scan aborted by F/W */
+		if (cfg->scan_request) {
+			wl_notify_escan_complete(cfg, dev, true, true);
+		}
+		wl_set_drv_status(cfg, DISCONNECTING, dev);
+		if (wl_get_drv_status(cfg, CONNECTING, dev)) {
+			/* in case of associating status, this will abort assoc procedure */
+			wl_notify_escan_complete(cfg, dev, false, true);
+			/* send pseudo connection failure event */
+			wl_send_event(dev, WLC_E_SET_SSID, WLC_E_STATUS_ABORT, 0);
+		} else {
+			scbval.val = reason_code;
+			memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+			scbval.val = htod32(scbval.val);
+			err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+				sizeof(scb_val_t), true);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, DISCONNECTING, dev);
+				WL_ERR(("error (%d)\n", err));
+				return err;
+			}
+		}
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	/* set default cpucore */
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dhd->chan_isvht80 &= ~DHD_FLAG_STA_MODE;
+		if (!(dhd->chan_isvht80))
+			dhd_set_cpucore(dhd, FALSE);
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm)
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	s32 dbm = MBM_TO_DBM(mbm);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+	defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES)
+	dbm = MBM_TO_DBM(dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	switch (type) {
+	case NL80211_TX_POWER_AUTOMATIC:
+		break;
+	case NL80211_TX_POWER_LIMITED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+			return -EINVAL;
+		}
+		break;
+	case NL80211_TX_POWER_FIXED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+			return -EINVAL;
+		}
+		break;
+	}
+
+	err = wl_set_tx_power(ndev, type, dbm);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	cfg->conf->tx_power = dbm;
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm)
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wl_get_tx_power(ndev, dbm);
+	if (unlikely(err))
+		WL_ERR(("error (%d)\n", err));
+
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u32 index;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	if (wsec == WEP_ENABLED) {
+		/* Just select a new current key */
+		index = (u32) key_idx;
+		index = htod32(index);
+		err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+			sizeof(index), true);
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+		}
+	}
+	return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_wsec_key key;
+	s32 err = 0;
+	s32 bssidx;
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	memset(&key, 0, sizeof(key));
+	key.index = (u32) key_idx;
+
+	if (!ETHER_ISMULTI(mac_addr))
+		memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+	key.len = (u32) params->key_len;
+
+	/* check for key index change */
+	if (key.len == 0) {
+		/* key delete */
+		swap_key_from_BE(&key);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("key delete error (%d)\n", err));
+			return err;
+		}
+	} else {
+		if (key.len > sizeof(key.data)) {
+			WL_ERR(("Invalid key length (%d)\n", key.len));
+			return -EINVAL;
+		}
+		WL_DBG(("Setting the key index %d\n", key.index));
+		memcpy(key.data, params->key, key.len);
+
+		if ((mode == WL_MODE_BSS) &&
+			(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+			u8 keybuf[8];
+			memcpy(keybuf, &key.data[24], sizeof(keybuf));
+			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+			memcpy(&key.data[16], keybuf, sizeof(keybuf));
+		}
+
+		/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+		if (params->seq && params->seq_len == 6) {
+			/* rx iv */
+			u8 *ivptr;
+			ivptr = (u8 *) params->seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = true;
+		}
+
+		switch (params->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+			key.algo = CRYPTO_ALGO_WEP1;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			break;
+		case WLAN_CIPHER_SUITE_WEP104:
+			key.algo = CRYPTO_ALGO_WEP128;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			key.algo = CRYPTO_ALGO_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+			break;
+		default:
+			WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+			return -EINVAL;
+		}
+		swap_key_from_BE(&key);
+		/* need to guarantee EAPOL 4/4 send out before set key */
+		dhd_wait_pend8021x(dev);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			return err;
+		}
+	}
+	return err;
+}
+
+int
+wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable)
+{
+	int err;
+	wl_eventmsg_buf_t ev_buf;
+
+	if (dev != bcmcfg_to_prmry_ndev(g_bcm_cfg)) {
+		/* roam offload is only for the primary device */
+		return -1;
+	}
+	err = wldev_iovar_setint(dev, "roam_offload", (int)enable);
+	if (err)
+		return err;
+
+	bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+	err = wl_cfg80211_apply_eventbuffer(dev, g_bcm_cfg, &ev_buf);
+	if (!err) {
+		g_bcm_cfg->roam_offload = enable;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params)
+{
+	struct wl_wsec_key key;
+	s32 val = 0;
+	s32 wsec = 0;
+	s32 err = 0;
+	u8 keybuf[8];
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (mac_addr &&
+		((params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+		(params->cipher != WLAN_CIPHER_SUITE_WEP104))) {
+			wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+			goto exit;
+	}
+	memset(&key, 0, sizeof(key));
+
+	key.len = (u32) params->key_len;
+	key.index = (u32) key_idx;
+
+	if (unlikely(key.len > sizeof(key.data))) {
+		WL_ERR(("Too long key length (%u)\n", key.len));
+		return -EINVAL;
+	}
+	memcpy(key.data, params->key, key.len);
+
+	key.flags = WL_PRIMARY_KEY;
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key.algo = CRYPTO_ALGO_WEP1;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key.algo = CRYPTO_ALGO_WEP128;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key.algo = CRYPTO_ALGO_TKIP;
+		val = TKIP_ENABLED;
+		/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+		if (mode == WL_MODE_BSS) {
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+		break;
+	default:
+		WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+		return -EINVAL;
+	}
+
+	/* Set the new key/index */
+	if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) {
+		WL_ERR(("IBSS KEY setted\n"));
+		wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
+	}
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		return err;
+	}
+
+exit:
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("get wsec error (%d)\n", err));
+		return err;
+	}
+
+	wsec |= val;
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wsec error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	WL_DBG(("Enter\n"));
+
+#ifndef IEEE80211W
+	if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
+		return -EINVAL;
+#endif
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+
+	key.flags = WL_PRIMARY_KEY;
+	key.algo = CRYPTO_ALGO_OFF;
+	key.index = (u32) key_idx;
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	/* Set the new key/index */
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		if (err == -EINVAL) {
+			if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+				/* we ignore this key index in this case */
+				WL_DBG(("invalid key index (%d)\n", key_idx));
+			}
+		} else {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		}
+		return err;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+	void (*callback) (void *cookie, struct key_params * params))
+{
+	struct key_params params;
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_security *sec;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+	key.index = key_idx;
+	swap_key_to_BE(&key);
+	memset(&params, 0, sizeof(params));
+	params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+	memcpy(params.key, key.data, params.key_len);
+
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	switch (wsec & ~SES_OW_ENABLED) {
+		case WEP_ENABLED:
+			sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+			if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP40;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP104;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			}
+			break;
+		case TKIP_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case AES_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+		default:
+			WL_ERR(("Invalid algo (0x%x)\n", wsec));
+			return -EINVAL;
+	}
+
+	callback(cookie, &params);
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev, u8 key_idx)
+{
+	WL_INFORM(("Not supported\n"));
+	return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+	u8 *mac, struct station_info *sinfo)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s32 rssi;
+	s32 rate;
+	s32 err = 0;
+	sta_info_t *sta;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	RETURN_EIO_IF_NOT_UP(cfg);
+	if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+		err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+			ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("GET STA INFO failed, %d\n", err));
+			return err;
+		}
+		sinfo->filled = STATION_INFO_INACTIVE_TIME;
+		sta = (sta_info_t *)cfg->ioctl_buf;
+		sta->len = dtoh16(sta->len);
+		sta->cap = dtoh16(sta->cap);
+		sta->flags = dtoh32(sta->flags);
+		sta->idle = dtoh32(sta->idle);
+		sta->in = dtoh32(sta->in);
+		sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (sta->flags & WL_STA_ASSOC) {
+			sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+			sinfo->connected_time = sta->in;
+		}
+		WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n",
+			bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+			sta->idle * 1000));
+#endif
+	} else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS ||
+		wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) {
+		get_pktcnt_t pktcnt;
+		u8 *curmacp;
+
+		if (cfg->roam_offload) {
+			struct ether_addr bssid;
+			err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+			if (err) {
+				WL_ERR(("Failed to get current BSSID\n"));
+			} else {
+				if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+					/* roaming is detected */
+					err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+					if (err)
+						WL_ERR(("Failed to handle the delayed roam, "
+							"err=%d", err));
+					mac = (u8 *)bssid.octet;
+				}
+			}
+		}
+		if (!wl_get_drv_status(cfg, CONNECTED, dev) ||
+			(dhd_is_associated(dhd, NULL, &err) == FALSE)) {
+			WL_ERR(("NOT assoc\n"));
+			if (err == -ERESTARTSYS)
+				return err;
+			err = -ENODEV;
+			return err;
+		}
+		curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+			WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+				MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+		}
+
+		/* Report the current tx rate */
+		err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+		if (err) {
+			WL_ERR(("Could not get rate (%d)\n", err));
+		} else {
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			int rxpktglom;
+#endif
+			rate = dtoh32(rate);
+			sinfo->filled |= STATION_INFO_TX_BITRATE;
+			sinfo->txrate.legacy = rate * 5;
+			WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+			if (maxrxpktglom != rxpktglom) {
+				maxrxpktglom = rxpktglom;
+				WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
+					maxrxpktglom));
+				err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+					(char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+					WLC_IOCTL_MAXLEN, NULL);
+				if (err < 0) {
+					WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+				}
+			}
+#endif
+		}
+
+		memset(&scb_val, 0, sizeof(scb_val));
+		scb_val.val = 0;
+		err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+			sizeof(scb_val_t), false);
+		if (err) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+			goto get_station_err;
+		}
+		rssi = wl_rssi_offset(dtoh32(scb_val.val));
+		sinfo->filled |= STATION_INFO_SIGNAL;
+		sinfo->signal = rssi;
+		WL_DBG(("RSSI %d dBm\n", rssi));
+		err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt,
+			sizeof(pktcnt), false);
+		if (!err) {
+			sinfo->filled |= (STATION_INFO_RX_PACKETS |
+				STATION_INFO_RX_DROP_MISC |
+				STATION_INFO_TX_PACKETS |
+				STATION_INFO_TX_FAILED);
+			sinfo->rx_packets = pktcnt.rx_good_pkt;
+			sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+			sinfo->tx_packets = pktcnt.tx_good_pkt;
+			sinfo->tx_failed  = pktcnt.tx_bad_pkt;
+		}
+get_station_err:
+		if (err && (err != -ERESTARTSYS)) {
+			/* Disconnect due to zero BSSID or error to get RSSI */
+			WL_ERR(("force cfg80211_disconnected: %d\n", err));
+			wl_clr_drv_status(cfg, CONNECTED, dev);
+			cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+			wl_link_down(cfg);
+		}
+	}
+	else {
+		WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	bool enabled, s32 timeout)
+{
+	s32 pm;
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (cfg->p2p_net == dev || _net_info == NULL || cfg->vsdb_mode ||
+		!wl_get_drv_status(cfg, CONNECTED, dev)) {
+		return err;
+	}
+
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_PEND);
+
+	pm = enabled ? PM_FAST : PM_OFF;
+	if (_net_info->pm_block) {
+		WL_ERR(("%s:Do not enable the power save for pm_block %d\n",
+			dev->name, _net_info->pm_block));
+		pm = PM_OFF;
+	}
+	pm = htod32(pm);
+	WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled")));
+	err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true);
+	if (unlikely(err)) {
+		if (err == -ENODEV)
+			WL_DBG(("net_device is not ready yet\n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	wl_cfg80211_update_power_mode(dev);
+	return err;
+}
+
+void wl_cfg80211_update_power_mode(struct net_device *dev)
+{
+	int err, pm = -1;
+
+	err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), true);
+	if (err)
+		WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+	else if (pm != -1 && dev->ieee80211_ptr)
+		dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+	u32 ret = 0;
+
+	if (bit16 & 0xff00) {
+		ret += 8;
+		bit16 >>= 8;
+	}
+
+	if (bit16 & 0xf0) {
+		ret += 4;
+		bit16 >>= 4;
+	}
+
+	if (bit16 & 0xc) {
+		ret += 2;
+		bit16 >>= 2;
+	}
+
+	if (bit16 & 2)
+		ret += bit16 & 2;
+	else if (bit16)
+		ret += bit16;
+
+	return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFORM(("device is not ready\n"));
+		return 0;
+	}
+
+	return err;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+#ifdef DHD_CLEAR_ON_SUSPEND
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	unsigned long flags;
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFORM(("device is not ready : status (%d)\n",
+			(int)cfg->status));
+		return 0;
+	}
+	for_each_ndev(cfg, iter, next)
+		wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+	}
+	for_each_ndev(cfg, iter, next) {
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	for_each_ndev(cfg, iter, next) {
+		if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+			wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+		}
+	}
+#endif /* DHD_CLEAR_ON_SUSPEND */
+	return 0;
+}
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+	s32 err)
+{
+	int i, j;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (!pmk_list) {
+		printk("pmk_list is NULL\n");
+		return -EINVAL;
+	}
+	/* pmk list is supported only for STA interface i.e. primary interface
+	 * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+	 */
+	if (primary_dev != dev) {
+		WL_INFORM(("Not supporting Flushing pmklist on virtual"
+			" interfaces than primary interface\n"));
+		return err;
+	}
+
+	WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+		WL_DBG(("PMKID[%d]: %pM =\n", i,
+			&pmk_list->pmkids.pmkid[i].BSSID));
+		for (j = 0; j < WPA2_PMKID_LEN; j++) {
+			WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+		}
+	}
+	if (likely(!err)) {
+		err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+			sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+			ETHER_ADDR_LEN))
+			break;
+	if (i < WL_NUM_PMKIDS_MAX) {
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+			ETHER_ADDR_LEN);
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+			WPA2_PMKID_LEN);
+		if (i == cfg->pmk_list->pmkids.npmkid)
+			cfg->pmk_list->pmkids.npmkid++;
+	} else {
+		err = -EINVAL;
+	}
+	WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+		&cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n",
+			cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
+			PMKID[i]));
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct _pmkid_list pmkid = {0};
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+	memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+	WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+		&pmkid.pmkid[0].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+	}
+
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp
+		    (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+		     ETHER_ADDR_LEN))
+			break;
+
+	if ((cfg->pmk_list->pmkids.npmkid > 0) &&
+		(i < cfg->pmk_list->pmkids.npmkid)) {
+		memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+		for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+				ETHER_ADDR_LEN);
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		cfg->pmk_list->pmkids.npmkid--;
+	} else {
+		err = -EINVAL;
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+	return err;
+
+}
+
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+	wl_scan_params_t *params;
+	int params_size;
+	int num_chans;
+
+	*out_params_size = 0;
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+	params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+		return params;
+	}
+	memset(params, 0, params_size);
+	params->nprobes = nprobes;
+
+	num_chans = (channel == 0) ? 0 : 1;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = DOT11_SCANTYPE_ACTIVE;
+	params->nprobes = htod32(1);
+	params->active_time = htod32(-1);
+	params->passive_time = htod32(-1);
+	params->home_time = htod32(10);
+	if (channel == -1)
+		params->channel_list[0] = htodchanspec(channel);
+	else
+		params->channel_list[0] = wl_ch_host_to_driver(channel);
+
+	/* Our scan params have 1 channel and 0 ssids */
+	params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+		(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	*out_params_size = params_size;	/* rtn size to the caller */
+	return params;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, unsigned int duration, u64 *cookie)
+#else
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel * channel,
+	enum nl80211_channel_type channel_type,
+	unsigned int duration, u64 *cookie)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 target_channel;
+	u32 id;
+	s32 err = BCME_OK;
+	struct ether_addr primary_mac;
+	struct net_device *ndev = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
+		ieee80211_frequency_to_channel(channel->center_freq),
+		duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
+
+	if (!cfg->p2p) {
+		WL_ERR(("cfg->p2p is not initialized\n"));
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+	target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+	memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+#if defined(WL_ENABLE_P2P_IF)
+	cfg->remain_on_chan_type = channel_type;
+#endif /* WL_ENABLE_P2P_IF */
+	id = ++cfg->last_roc_id;
+	if (id == 0)
+		id = ++cfg->last_roc_id;
+	*cookie = id;
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status(cfg, SCANNING, ndev)) {
+		struct timer_list *_timer;
+		WL_DBG(("scan is running. go to fake listen state\n"));
+
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			WL_DBG(("cancel current listen timer \n"));
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		_timer = &cfg->p2p->listen_timer;
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+		INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
+
+		err = BCME_OK;
+		goto exit;
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_CFG80211_SYNC_GON
+	if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		/* do not enter listen mode again if we are in listen mode already for next af.
+		 * remain on channel completion will be returned by waiting next af completion.
+		 */
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#else
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		goto exit;
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	if (cfg->p2p && !cfg->p2p->on) {
+		/* In case of p2p_listen command, supplicant send remain_on_channel
+		 * without turning on P2P
+		 */
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+		p2p_on(cfg) = true;
+	}
+
+	if (p2p_is_on(cfg)) {
+		err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+		if (unlikely(err)) {
+			goto exit;
+		}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		err = wl_cfgp2p_discover_listen(cfg, target_channel, duration);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (err == BCME_OK) {
+			wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+		} else {
+			/* if failed, firmware may be internal scanning state.
+			 * so other scan request shall not abort it
+			 */
+			wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+		}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		/* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
+		 * and expire timer will send a completion to the upper layer
+		 */
+		err = BCME_OK;
+	}
+
+exit:
+	if (err == BCME_OK) {
+		WL_INFORM(("Success\n"));
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			duration, GFP_KERNEL);
+#else
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			channel_type, duration, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	} else {
+		WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	s32 err = 0;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		WL_DBG((" enter ) on P2P dedicated discover interface\n"));
+	}
+#else
+	WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex));
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	return err;
+}
+
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+	struct afx_hdl *afx_instance;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 ret = BCME_OK;
+
+	afx_instance = container_of(work, struct afx_hdl, work);
+	if (afx_instance != NULL && cfg->afx_hdl->is_active) {
+		if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
+			ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
+				(100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
+		} else {
+			ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev,
+				cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan,
+				NULL);
+		}
+		if (unlikely(ret != BCME_OK)) {
+			WL_ERR(("ERROR occurred! returned value is (%d)\n", ret));
+			if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL))
+				complete(&cfg->act_frm_scan);
+		}
+	}
+}
+
+static s32
+wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	u32 max_retry = WL_CHANNEL_SYNC_RETRY;
+
+	if (dev == NULL)
+		return -1;
+
+	WL_DBG((" enter ) \n"));
+
+	wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+	cfg->afx_hdl->is_active = TRUE;
+
+	/* Loop to wait until we find a peer's channel or the
+	 * pending action frame tx is cancelled.
+	 */
+	while ((cfg->afx_hdl->retry < max_retry) &&
+		(cfg->afx_hdl->peer_chan == WL_INVALID)) {
+		cfg->afx_hdl->is_listen = FALSE;
+		wl_set_drv_status(cfg, SCANNING, dev);
+		WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+			cfg->afx_hdl->retry));
+		/* search peer on peer's listen channel */
+		schedule_work(&cfg->afx_hdl->work);
+		wait_for_completion_timeout(&cfg->act_frm_scan,
+			msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		if (cfg->afx_hdl->my_listen_chan) {
+			WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
+				cfg->afx_hdl->my_listen_chan));
+			/* listen on my listen channel */
+			cfg->afx_hdl->is_listen = TRUE;
+			schedule_work(&cfg->afx_hdl->work);
+			wait_for_completion_timeout(&cfg->act_frm_scan,
+				msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+		}
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		cfg->afx_hdl->retry++;
+
+		WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+	}
+
+	cfg->afx_hdl->is_active = FALSE;
+
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+
+	return (cfg->afx_hdl->peer_chan);
+}
+
+struct p2p_config_af_params {
+	s32 max_tx_retry;	/* max tx retry count if tx no ack */
+	/* To make sure to send successfully action frame, we have to turn off mpc
+	 * 0: off, 1: on,  (-1): do nothing
+	 */
+	s32 mpc_onoff;
+#ifdef WL_CFG80211_SYNC_GON
+	bool extra_listen;
+#endif
+	bool search_channel;	/* 1: search peer's channel to send af */
+};
+
+static s32
+wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
+	wl_action_frame_t *action_frame, wl_af_params_t *af_params,
+	struct p2p_config_af_params *config_af_params)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wifi_p2p_pub_act_frame_t *act_frm =
+		(wifi_p2p_pub_act_frame_t *) (action_frame->data);
+
+	/* initialize default value */
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params->extra_listen = true;
+#endif
+	config_af_params->search_channel = false;
+	config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params->mpc_onoff = -1;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+
+	switch (act_frm->subtype) {
+	case P2P_PAF_GON_REQ: {
+		WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
+		wl_set_p2p_status(cfg, GO_NEG_PHASE);
+
+		config_af_params->mpc_onoff = 0;
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+
+		break;
+	}
+	case P2P_PAF_GON_RSP: {
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for CONF frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME + 100;
+		break;
+	}
+	case P2P_PAF_GON_CONF: {
+		/* If we reached till GO Neg confirmation reset the filter */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+
+		/* turn on mpc again if go nego is done */
+		config_af_params->mpc_onoff = 1;
+
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	case P2P_PAF_INVITE_REQ: {
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_INVITE_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_DEVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* maximize dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_LONG_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_DEVDIS_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_PROVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		config_af_params->mpc_onoff = 0;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_PROVDIS_RSP: {
+		cfg->next_af_subtype = P2P_PAF_GON_REQ;
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	default:
+		WL_DBG(("Unknown p2p pub act frame subtype: %d\n",
+			act_frm->subtype));
+		err = BCME_BADARG;
+	}
+	return err;
+}
+
+#ifdef WL11U
+static bool
+wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params,
+	void *frame, u16 frame_len)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;
+	bool result = false;
+	s32 i;
+	chanspec_t chanspec;
+
+	/* If DFS channel is 52~148, check to block it or not */
+	if (af_params &&
+		(af_params->channel >= 52 && af_params->channel <= 148)) {
+		if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+			bss_list = cfg->bss_list;
+			bi = next_bss(bss_list, bi);
+			for_each_bss(bss_list, bi, i) {
+				chanspec = wl_chspec_driver_to_host(bi->chanspec);
+				if (CHSPEC_IS5G(chanspec) &&
+					((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec))
+					== af_params->channel)) {
+					result = true;	/* do not block the action frame */
+					break;
+				}
+			}
+		}
+	}
+	else {
+		result = true;
+	}
+
+	WL_DBG(("result=%s", result?"true":"false"));
+	return result;
+}
+#endif /* WL11U */
+
+
+static bool
+wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
+	bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params,
+	wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx)
+{
+#ifdef WL11U
+	struct net_device *ndev = NULL;
+#endif /* WL11U */
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	bool ack = false;
+	u8 category, action;
+	s32 tx_retry;
+	struct p2p_config_af_params config_af_params;
+#ifdef VSDB
+	ulong off_chan_started_jiffies = 0;
+#endif
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+
+	/* Add the default dwell time
+	 * Dwell time to stay off-channel to wait for a response action frame
+	 * after transmitting an GO Negotiation action frame
+	 */
+	af_params->dwell_time = WL_DWELL_TIME;
+
+#ifdef WL11U
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = dev;
+#else
+	ndev = ndev_to_cfgdev(cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+	category = action_frame->data[DOT11_ACTION_CAT_OFF];
+	action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+	/* initialize variables */
+	tx_retry = 0;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+	config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params.mpc_onoff = -1;
+	config_af_params.search_channel = false;
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params.extra_listen = false;
+#endif
+
+	/* config parameters */
+	/* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */
+	if (category == DOT11_ACTION_CAT_PUBLIC) {
+		if ((action == P2P_PUB_AF_ACTION) &&
+			(action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) {
+			/* p2p public action frame process */
+			if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy,
+				action_frame, af_params, &config_af_params)) {
+				WL_DBG(("Unknown subtype.\n"));
+			}
+
+		} else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) {
+			/* service discovery process */
+			if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+				action == P2PSD_ACTION_ID_GAS_CREQ) {
+				/* configure service discovery query frame */
+
+				config_af_params.search_channel = true;
+
+				/* save next af suptype to cancel remained dwell time */
+				cfg->next_af_subtype = action + 1;
+
+				af_params->dwell_time = WL_MED_DWELL_TIME;
+			} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+				action == P2PSD_ACTION_ID_GAS_CRESP) {
+				/* configure service discovery response frame */
+				af_params->dwell_time = WL_MIN_DWELL_TIME;
+			} else {
+				WL_DBG(("Unknown action type: %d\n", action));
+			}
+		} else {
+			WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n",
+				category, action, action_frame_len));
+	}
+	} else if (category == P2P_AF_CATEGORY) {
+		/* do not configure anything. it will be sent with a default configuration */
+	} else {
+		WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n",
+			category, action));
+		if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+			wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+			return false;
+		}
+	}
+
+	/* To make sure to send successfully action frame, we have to turn off mpc */
+	if (config_af_params.mpc_onoff == 0) {
+		wldev_iovar_setint(dev, "mpc", 0);
+	}
+
+	/* validate channel and p2p ies */
+	if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
+		wl_to_p2p_bss_saved_ie(cfg, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+		config_af_params.search_channel = true;
+	} else {
+		config_af_params.search_channel = false;
+	}
+#ifdef WL11U
+	if (ndev == bcmcfg_to_prmry_ndev(cfg))
+		config_af_params.search_channel = false;
+#endif /* WL11U */
+
+#ifdef VSDB
+	/* if connecting on primary iface, sleep for a while before sending af tx for VSDB */
+	if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+		OSL_SLEEP(50);
+	}
+#endif
+
+	/* if scan is ongoing, abort current scan. */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+#ifdef WL11U
+	/* handling DFS channel exceptions */
+	if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
+		return false;	/* the action frame was blocked */
+	}
+#endif /* WL11U */
+
+	/* set status and destination address before sending af */
+	if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+		/* set this status to cancel the remained dwell time in rx process */
+		wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+	}
+	wl_set_drv_status(cfg, SENDING_ACT_FRM, dev);
+	memcpy(cfg->afx_hdl->tx_dst_addr.octet,
+		af_params->action_frame.da.octet,
+		sizeof(cfg->afx_hdl->tx_dst_addr.octet));
+
+	/* save af_params for rx process */
+	cfg->afx_hdl->pending_tx_act_frm = af_params;
+
+	/* search peer's channel */
+	if (config_af_params.search_channel) {
+		/* initialize afx_hdl */
+		if (wl_cfgp2p_find_idx(cfg, dev, &cfg->afx_hdl->bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			goto exit;
+		}
+		cfg->afx_hdl->dev = dev;
+		cfg->afx_hdl->retry = 0;
+		cfg->afx_hdl->peer_chan = WL_INVALID;
+
+		if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) {
+			WL_ERR(("couldn't find peer's channel.\n"));
+			wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len,
+				af_params->channel);
+			goto exit;
+		}
+
+		wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+		/*
+		 * Abort scan even for VSDB scenarios. Scan gets aborted in firmware
+		 * but after the check of piggyback algorithm.
+		 * To take care of current piggback algo, lets abort the scan here itself.
+		 */
+		wl_notify_escan_complete(cfg, dev, true, true);
+		/* Suspend P2P discovery's search-listen to prevent it from
+		 * starting a scan or changing the channel.
+		 */
+		wl_cfgp2p_discover_enable_search(cfg, false);
+
+		/* update channel */
+		af_params->channel = cfg->afx_hdl->peer_chan;
+	}
+
+#ifdef VSDB
+	off_chan_started_jiffies = jiffies;
+#endif /* VSDB */
+
+	wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
+
+	/* Now send a tx action frame */
+	ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
+
+	/* if failed, retry it. tx_retry_max value is configure by .... */
+	while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry)) {
+#ifdef VSDB
+		if (af_params->channel) {
+			if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
+				OFF_CHAN_TIME_THRESHOLD_MS) {
+				WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+				off_chan_started_jiffies = jiffies;
+			} else
+				OSL_SLEEP(AF_RETRY_DELAY_TIME);
+		}
+#endif /* VSDB */
+		ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
+			false : true;
+	}
+
+	if (ack == false) {
+		WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry));
+	}
+	WL_DBG(("Complete to send action frame\n"));
+exit:
+	/* Clear SENDING_ACT_FRM after all sending af is done */
+	wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+
+#ifdef WL_CFG80211_SYNC_GON
+	/* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+	 * if we coundn't get the next action response frame and dongle does not keep
+	 * the dwell time, go to listen state again to get next action response frame.
+	 */
+	if (ack && config_af_params.extra_listen &&
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
+		cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
+		s32 extar_listen_time;
+
+		extar_listen_time = af_params->dwell_time -
+			jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies);
+
+		if (extar_listen_time > 50) {
+			wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+			WL_DBG(("Wait more time! actual af time:%d,"
+				"calculated extar listen:%d\n",
+				af_params->dwell_time, extar_listen_time));
+			if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel,
+				extar_listen_time + 100) == BCME_OK) {
+				wait_for_completion_timeout(&cfg->wait_next_af,
+					msecs_to_jiffies(extar_listen_time + 100 + 300));
+			}
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+		}
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+
+	if (cfg->afx_hdl->pending_tx_act_frm)
+		cfg->afx_hdl->pending_tx_act_frm = NULL;
+
+	WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n",
+		(ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan));
+
+
+	/* if all done, turn mpc on again */
+	if (config_af_params.mpc_onoff == 1) {
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+
+	return ack;
+}
+
+#define MAX_NUM_OF_ASSOCIATED_DEV       64
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+#else
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, bool offchan,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
+	enum nl80211_channel_type channel_type,
+	bool channel_type_valid,
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
+	unsigned int wait, const u8* buf, size_t len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	bool no_cck,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+	bool dont_wait_for_ack,
+#endif
+	u64 *cookie)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+{
+	wl_action_frame_t *action_frame;
+	wl_af_params_t *af_params;
+	scb_val_t scb_val;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	struct ieee80211_channel *channel = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
+#endif
+	const struct ieee80211_mgmt *mgmt;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = NULL;
+	s32 err = BCME_OK;
+	s32 bssidx = 0;
+	u32 id;
+	bool ack = false;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+
+	WL_DBG(("Enter \n"));
+
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	/* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE)	*/
+	if (discover_cfgdev(cfgdev, cfg)) {
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	}
+	else {
+		if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+	}
+
+	WL_DBG(("TX target bssidx=%d\n", bssidx));
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+	*cookie = 0;
+	id = cfg->send_action_id++;
+	if (id == 0)
+		id = cfg->send_action_id++;
+	*cookie = id;
+	mgmt = (const struct ieee80211_mgmt *)buf;
+	if (ieee80211_is_mgmt(mgmt->frame_control)) {
+		if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+			s32 ie_offset =  DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+			s32 ie_len = len - ie_offset;
+			if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p)
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+				wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+				VNDR_IE_PRBRSP_FLAG, (u8 *)(buf + ie_offset), ie_len);
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+		} else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+			ieee80211_is_deauth(mgmt->frame_control)) {
+			char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+				sizeof(struct ether_addr) + sizeof(uint)] = {0};
+			int num_associated = 0;
+			struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+			if (!bcmp((const uint8 *)BSSID_BROADCAST,
+				(const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) {
+				assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+				err = wldev_ioctl(dev, WLC_GET_ASSOCLIST,
+					assoc_maclist, sizeof(mac_buf), false);
+				if (err < 0)
+					WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+				else
+					num_associated = assoc_maclist->count;
+			}
+			memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+			scb_val.val = mgmt->u.disassoc.reason_code;
+			err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+				sizeof(scb_val_t), true);
+			if (err < 0)
+				WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err));
+			WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+				bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf),
+				scb_val.val));
+
+			if (num_associated > 0 && ETHER_ISBCAST(mgmt->da))
+				wl_delay(400);
+
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+
+		} else if (ieee80211_is_action(mgmt->frame_control)) {
+			/* Abort the dwell time of any previous off-channel
+			* action frame that may be still in effect.  Sending
+			* off-channel action frames relies on the driver's
+			* scan engine.  If a previous off-channel action frame
+			* tx is still in progress (including the dwell time),
+			* then this new action frame will not be sent out.
+			*/
+/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary.
+ * And previous off-channel action frame must be ended before new af tx.
+ */
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_notify_escan_complete(cfg, dev, true, true);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		}
+
+	} else {
+		WL_ERR(("Driver only allows MGMT packet type\n"));
+		goto exit;
+	}
+
+	af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+	if (af_params == NULL)
+	{
+		WL_ERR(("unable to allocate frame\n"));
+		return -ENOMEM;
+	}
+
+	action_frame = &af_params->action_frame;
+
+	/* Add the packet Id */
+	action_frame->packetId = *cookie;
+	WL_DBG(("action frame %d\n", action_frame->packetId));
+	/* Add BSSID */
+	memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+	memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+	/* Add the length exepted for 802.11 header  */
+	action_frame->len = len - DOT11_MGMT_HDR_LEN;
+	WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+	/* Add the channel */
+	af_params->channel =
+		ieee80211_frequency_to_channel(channel->center_freq);
+	/* Save listen_chan for searching common channel */
+	cfg->afx_hdl->peer_listen_chan = af_params->channel;
+	WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	af_params->dwell_time = params->wait;
+#else
+	af_params->dwell_time = wait;
+#endif
+
+	memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+	ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params,
+		action_frame, action_frame->len, bssidx);
+	cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
+
+	kfree(af_params);
+exit:
+	return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	u16 frame_type, bool reg)
+{
+
+	WL_DBG(("frame_type: %x, reg: %d\n", frame_type, reg));
+
+	if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+		return;
+
+	return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+	struct net_device *dev,
+	struct bss_parameters *params)
+{
+	s32 err = 0;
+	s32 ap_isolate = 0;
+
+	if (params->use_cts_prot >= 0) {
+	}
+
+	if (params->use_short_preamble >= 0) {
+	}
+
+	if (params->use_short_slot_time >= 0) {
+	}
+
+	if (params->basic_rates) {
+	}
+
+	if (params->ap_isolate >= 0) {
+		ap_isolate = params->ap_isolate;
+		err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+		if (unlikely(err))
+		{
+			WL_ERR(("set ap_isolate Error (%d)\n", err));
+		}
+	}
+
+	if (params->ht_opmode >= 0) {
+	}
+
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel *chan,
+	enum nl80211_channel_type channel_type)
+{
+	s32 _chan;
+	chanspec_t chspec = 0;
+	chanspec_t fw_chspec = 0;
+	u32 bw = WL_CHANSPEC_BW_20;
+
+	s32 err = BCME_OK;
+	s32 bw_cap = 0;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+
+	dev = ndev_to_wlc_ndev(dev, cfg);
+	_chan = ieee80211_frequency_to_channel(chan->center_freq);
+	WL_ERR(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+		dev->ifindex, channel_type, _chan));
+
+
+	if (chan->band == IEEE80211_BAND_5GHZ) {
+		param.band = WLC_BAND_5G;
+		err = wldev_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err) {
+			if (err != BCME_UNSUPPORTED) {
+				WL_ERR(("bw_cap failed, %d\n", err));
+				return err;
+			} else {
+				err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+				if (err) {
+					WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+				}
+				if (bw_cap != WLC_N_BW_20ALL)
+					bw = WL_CHANSPEC_BW_40;
+			}
+		} else {
+			if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_80;
+			else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_40;
+			else
+				bw = WL_CHANSPEC_BW_20;
+
+		}
+
+	} else if (chan->band == IEEE80211_BAND_2GHZ)
+		bw = WL_CHANSPEC_BW_20;
+set_channel:
+	chspec = wf_channel2chspec(_chan, bw);
+	if (wf_chspec_valid(chspec)) {
+		fw_chspec = wl_chspec_host_to_driver(chspec);
+		if (fw_chspec != INVCHANSPEC) {
+			if ((err = wldev_iovar_setint(dev, "chanspec",
+				fw_chspec)) == BCME_BADCHAN) {
+				if (bw == WL_CHANSPEC_BW_80)
+					goto change_bw;
+				err = wldev_ioctl(dev, WLC_SET_CHANNEL,
+					&_chan, sizeof(_chan), true);
+				if (err < 0) {
+					WL_ERR(("WLC_SET_CHANNEL error %d"
+					"chip may not be supporting this channel\n", err));
+				}
+			} else if (err) {
+				WL_ERR(("failed to set chanspec error %d\n", err));
+			}
+		} else {
+			WL_ERR(("failed to convert host chanspec to fw chanspec\n"));
+			err = BCME_ERROR;
+		}
+	} else {
+change_bw:
+		if (bw == WL_CHANSPEC_BW_80)
+			bw = WL_CHANSPEC_BW_40;
+		else if (bw == WL_CHANSPEC_BW_40)
+			bw = WL_CHANSPEC_BW_20;
+		else
+			bw = 0;
+		if (bw)
+			goto set_channel;
+		WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+		err = BCME_ERROR;
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+		WL_DBG(("SoftAP mode do not need to set cpucore\n"));
+	} else if ((dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+		(chspec & WL_CHANSPEC_BW_80)) {
+		/* If GO is vht80 */
+		dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+		dhd_set_cpucore(dhd, TRUE);
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+	return err;
+}
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *
+wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
+			return _net_info->ndev;
+	}
+	return NULL;
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
+{
+	s32 err = BCME_OK;
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	return 0;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	s32 len = 0;
+	s32 err = BCME_OK;
+	u16 auth = 0; /* d11 open authentication */
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	u16 suite_count;
+	u8 rsn_cap[2];
+	u32 wme_bss_disable;
+
+	if (wpa2ie == NULL)
+		goto exit;
+
+	WL_DBG(("Enter \n"));
+	len =  wpa2ie->len;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	switch (mcast->type) {
+		case WPA_CIPHER_NONE:
+			gval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			gval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			gval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("No Security Info\n"));
+			break;
+	}
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	switch (ucast->list[0].type) {
+		case WPA_CIPHER_NONE:
+			pval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			pval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			pval = AES_ENABLED;
+			break;
+		default:
+			WL_ERR(("No Security Info\n"));
+	}
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+	switch (mgmt->list[0].type) {
+		case RSN_AKM_NONE:
+			wpa_auth = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			wpa_auth = WPA2_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			wpa_auth = WPA2_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+	}
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+		rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+
+		if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+			wme_bss_disable = 0;
+		} else {
+			wme_bss_disable = 1;
+		}
+
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+		if (err < 0) {
+			WL_ERR(("wme_bss_disable error %d\n", err));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+	}
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	u16 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = BCME_OK;
+	s32 len = 0;
+	u32 i;
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 tmp = 0;
+
+	if (wpaie == NULL)
+		goto exit;
+	WL_DBG(("Enter \n"));
+	len = wpaie->length;    /* value length */
+	len -= WPA_IE_TAG_FIXED_LEN;
+	/* check for multicast cipher suite */
+	if (len < WPA_SUITE_LEN) {
+		WL_INFORM(("no multicast cipher suite\n"));
+		goto exit;
+	}
+
+	/* pick up multicast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpaie[1];
+	len -= WPA_SUITE_LEN;
+	if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+		if (IS_WPA_CIPHER(mcast->type)) {
+			tmp = 0;
+			switch (mcast->type) {
+				case WPA_CIPHER_NONE:
+					tmp = 0;
+					break;
+				case WPA_CIPHER_WEP_40:
+				case WPA_CIPHER_WEP_104:
+					tmp = WEP_ENABLED;
+					break;
+				case WPA_CIPHER_TKIP:
+					tmp = TKIP_ENABLED;
+					break;
+				case WPA_CIPHER_AES_CCM:
+					tmp = AES_ENABLED;
+					break;
+				default:
+					WL_ERR(("No Security Info\n"));
+			}
+			gval |= tmp;
+		}
+	}
+	/* Check for unicast suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM(("no unicast suite\n"));
+		goto exit;
+	}
+	/* walk thru unicast cipher list and pick up what we recognize */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_CIPHER(ucast->list[i].type)) {
+				tmp = 0;
+				switch (ucast->list[i].type) {
+					case WPA_CIPHER_NONE:
+						tmp = 0;
+						break;
+					case WPA_CIPHER_WEP_40:
+					case WPA_CIPHER_WEP_104:
+						tmp = WEP_ENABLED;
+						break;
+					case WPA_CIPHER_TKIP:
+						tmp = TKIP_ENABLED;
+						break;
+					case WPA_CIPHER_AES_CCM:
+						tmp = AES_ENABLED;
+						break;
+					default:
+						WL_ERR(("No Security Info\n"));
+				}
+				pval |= tmp;
+			}
+		}
+	}
+	len -= (count - i) * WPA_SUITE_LEN;
+	/* Check for auth key management suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFORM((" no auth key mgmt suite\n"));
+		goto exit;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+	count = ltoh16_ua(&mgmt->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_AKM(mgmt->list[i].type)) {
+				tmp = 0;
+				switch (mgmt->list[i].type) {
+					case RSN_AKM_NONE:
+						tmp = WPA_AUTH_NONE;
+						break;
+					case RSN_AKM_UNSPECIFIED:
+						tmp = WPA_AUTH_UNSPECIFIED;
+						break;
+					case RSN_AKM_PSK:
+						tmp = WPA_AUTH_PSK;
+						break;
+					default:
+						WL_ERR(("No Key Mgmt Info\n"));
+				}
+				wpa_auth |= tmp;
+			}
+		}
+
+	}
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+
+static s32
+wl_cfg80211_bcn_validate_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
+		/* For P2P GO, the sec type is WPA2-PSK */
+		WL_DBG(("P2P GO: validating wpa2_ie"));
+		if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0)
+			return BCME_ERROR;
+
+	} else if (dev_role == NL80211_IFTYPE_AP) {
+
+		WL_DBG(("SoftAP: validating security"));
+		/* If wpa2_ie or wpa_ie is present validate it */
+
+		if ((ies->wpa2_ie || ies->wpa_ie) &&
+			((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+			wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
+			cfg->ap_info->security_mode = false;
+			return BCME_ERROR;
+		}
+
+		cfg->ap_info->security_mode = true;
+		if (cfg->ap_info->rsn_ie) {
+			kfree(cfg->ap_info->rsn_ie);
+			cfg->ap_info->rsn_ie = NULL;
+		}
+		if (cfg->ap_info->wpa_ie) {
+			kfree(cfg->ap_info->wpa_ie);
+			cfg->ap_info->wpa_ie = NULL;
+		}
+		if (cfg->ap_info->wps_ie) {
+			kfree(cfg->ap_info->wps_ie);
+			cfg->ap_info->wps_ie = NULL;
+		}
+		if (ies->wpa_ie != NULL) {
+			/* WPAIE */
+			cfg->ap_info->rsn_ie = NULL;
+			cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+				ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		} else if (ies->wpa2_ie != NULL) {
+			/* RSNIE */
+			cfg->ap_info->wpa_ie = NULL;
+			cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+				ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		}
+		if (!ies->wpa2_ie && !ies->wpa_ie) {
+			wl_validate_opensecurity(dev, bssidx);
+			cfg->ap_info->security_mode = false;
+		}
+
+		if (ies->wps_ie) {
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+	}
+
+	return 0;
+
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32 wl_cfg80211_bcn_set_params(
+	struct cfg80211_ap_settings *info,
+	struct net_device *dev,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 err = BCME_OK;
+
+	WL_DBG(("interval (%d) \ndtim_period (%d) \n",
+		info->beacon_interval, info->dtim_period));
+
+	if (info->beacon_interval) {
+		if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+			&info->beacon_interval, sizeof(s32), true)) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32), true)) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if ((info->ssid) && (info->ssid_len > 0) &&
+		(info->ssid_len <= 32)) {
+		WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(cfg->hostapd_ssid.SSID, 0x00, 32);
+			memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+			cfg->hostapd_ssid.SSID_len = info->ssid_len;
+		} else {
+				/* P2P GO */
+			memset(cfg->p2p->ssid.SSID, 0x00, 32);
+			memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+			cfg->p2p->ssid.SSID_len = info->ssid_len;
+		}
+	}
+
+	if (info->hidden_ssid) {
+		if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+			WL_ERR(("failed to set hidden : %d\n", err));
+		WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+	}
+
+	return err;
+}
+#endif
+
+static s32
+wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies)
+{
+	s32 err = BCME_OK;
+
+	memset(ies, 0, sizeof(struct parsed_ies));
+
+	/* find the WPSIE */
+	if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
+		WL_DBG(("WPSIE in beacon \n"));
+		ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+
+	/* find the RSN_IE */
+	if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len,
+		DOT11_MNG_RSN_ID)) != NULL) {
+		WL_DBG((" WPA2 IE found\n"));
+		ies->wpa2_ie_len = ies->wpa2_ie->len;
+	}
+
+	/* find the WPA_IE */
+	if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) {
+		WL_DBG((" WPA found\n"));
+		ies->wpa_ie_len = ies->wpa_ie->length;
+	}
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_bcn_bringup_ap(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_join_params join_params;
+	bool is_bssup = false;
+	s32 infra = 1;
+	s32 join_params_size = 0;
+	s32 ap = 1;
+#ifdef DISABLE_11H_SOFTAP
+	s32 spect = 0;
+#endif /* DISABLE_11H_SOFTAP */
+#ifdef MAX_GO_CLIENT_CNT
+	s32 bss_maxassoc = MAX_GO_CLIENT_CNT;
+#endif
+	s32 err = BCME_OK;
+
+	WL_DBG(("Enter dev_role: %d\n", dev_role));
+
+	/* Common code for SoftAP and P2P GO */
+	wldev_iovar_setint(dev, "mpc", 0);
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO) {
+		is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+		if (!is_bssup && (ies->wpa2_ie != NULL)) {
+
+			err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+			if (err < 0) {
+				WL_ERR(("SET INFRA error %d\n", err));
+				goto exit;
+			}
+
+			err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
+				sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (err < 0) {
+				WL_ERR(("GO SSID setting error %d\n", err));
+				goto exit;
+			}
+
+			/* Do abort scan before creating GO */
+			wl_cfg80211_scan_abort(cfg);
+
+			if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) {
+				WL_ERR(("GO Bring up error %d\n", err));
+				goto exit;
+			}
+#ifdef MAX_GO_CLIENT_CNT
+			err = wldev_iovar_setint_bsscfg(dev, "bss_maxassoc", bss_maxassoc, bssidx);
+			if (unlikely(err)) {
+				WL_ERR(("bss_maxassoc error (%d)\n", err));
+				goto exit;
+			}
+#endif
+		} else
+			WL_DBG(("Bss is already up\n"));
+	} else if ((dev_role == NL80211_IFTYPE_AP) &&
+		(wl_get_drv_status(cfg, AP_CREATING, dev))) {
+		/* Device role SoftAP */
+		err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("WLC_DOWN error %d\n", err));
+			goto exit;
+		}
+		err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			goto exit;
+		}
+		if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+			WL_ERR(("setting AP mode failed %d \n", err));
+			goto exit;
+		}
+#ifdef DISABLE_11H_SOFTAP
+		err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET SPECT_MANAGMENT error %d\n", err));
+			goto exit;
+		}
+#endif /* DISABLE_11H_SOFTAP */
+
+		err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			goto exit;
+		}
+
+		memset(&join_params, 0, sizeof(join_params));
+		/* join parameters starts with ssid */
+		join_params_size = sizeof(join_params.ssid);
+		memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+			cfg->hostapd_ssid.SSID_len);
+		join_params.ssid.SSID_len = htod32(cfg->hostapd_ssid.SSID_len);
+
+		/* create softap */
+		if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+			join_params_size, true)) == 0) {
+			WL_DBG(("SoftAP set SSID (%s) success\n", join_params.ssid.SSID));
+			wl_clr_drv_status(cfg, AP_CREATING, dev);
+			wl_set_drv_status(cfg, AP_CREATED, dev);
+		}
+	}
+
+
+exit:
+	return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+s32
+wl_cfg80211_parse_ap_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	struct parsed_ies *ies)
+{
+	struct parsed_ies prb_ies;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Parse Beacon IEs */
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vndr = (u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		struct ieee80211_mgmt *mgmt;
+		mgmt = (struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Parse Probe Response IEs */
+	if (wl_cfg80211_parse_ies(vndr, vndr_ie_len, &prb_ies) < 0) {
+		WL_ERR(("PROBE RESP get IEs failed \n"));
+		err = -EINVAL;
+	}
+
+fail:
+
+	return err;
+}
+
+s32
+wl_cfg80211_set_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Set Beacon IEs to FW */
+	if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len)) < 0) {
+		WL_ERR(("Set Beacon IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+	vndr = (u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		struct ieee80211_mgmt *mgmt;
+		mgmt = (struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Set Probe Response IEs to FW */
+	if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
+		WL_ERR(("Set Probe Resp IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Probe Resp \n"));
+	}
+
+	return err;
+}
+#endif
+
+static s32 wl_cfg80211_hostapd_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	s32 bssidx)
+{
+	bool update_bss = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+
+	if (ies->wps_ie) {
+		if (cfg->ap_info->wps_ie &&
+			memcmp(cfg->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+			WL_DBG((" WPS IE is changed\n"));
+			kfree(cfg->ap_info->wps_ie);
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		} else if (cfg->ap_info->wps_ie == NULL) {
+			WL_DBG((" WPS IE is added\n"));
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+
+		if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
+			if (!cfg->ap_info->security_mode) {
+				/* change from open mode to security mode */
+				update_bss = true;
+				if (ies->wpa_ie != NULL) {
+					cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else {
+					cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				}
+			} else if (cfg->ap_info->wpa_ie) {
+				/* change from WPA2 mode to WPA mode */
+				if (ies->wpa_ie != NULL) {
+					update_bss = true;
+					kfree(cfg->ap_info->rsn_ie);
+					cfg->ap_info->rsn_ie = NULL;
+					cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else if (memcmp(cfg->ap_info->rsn_ie,
+					ies->wpa2_ie, ies->wpa2_ie->len
+					+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+					update_bss = true;
+					kfree(cfg->ap_info->rsn_ie);
+					cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+					cfg->ap_info->wpa_ie = NULL;
+				}
+			}
+			if (update_bss) {
+				cfg->ap_info->security_mode = true;
+				wl_cfgp2p_bss(cfg, dev, bssidx, 0);
+				if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+					wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
+					return BCME_ERROR;
+				}
+				wl_cfgp2p_bss(cfg, dev, bssidx, 1);
+			}
+		}
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+	return 0;
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_del_station(
+	struct wiphy *wiphy,
+	struct net_device *ndev,
+	u8* mac_addr)
+{
+	struct net_device *dev;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+	int err;
+	char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+	int num_associated = 0;
+
+	WL_DBG(("Entry\n"));
+	if (mac_addr == NULL) {
+		WL_DBG(("mac_addr is NULL ignore it\n"));
+		return 0;
+	}
+
+	dev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+
+	assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+	err = wldev_ioctl(ndev, WLC_GET_ASSOCLIST,
+		assoc_maclist, sizeof(mac_buf), false);
+	if (err < 0)
+		WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+	else
+		num_associated = assoc_maclist->count;
+
+	memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN);
+	scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+	err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+		sizeof(scb_val_t), true);
+	if (err < 0)
+		WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+	WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+		bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf),
+		scb_val.val));
+
+	if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
+		wl_delay(400);
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_change_station(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 *mac,
+	struct station_parameters *params)
+{
+	int err;
+
+	WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
+				"sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
+				params->sta_flags_mask, params->sta_flags_set, dev->name));
+
+	/* Processing only authorize/de-authorize flag for now */
+	if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+		WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
+		return -ENOTSUPP;
+	}
+
+	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+		err = wldev_ioctl(dev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true);
+		if (err)
+			WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
+		return err;
+	}
+
+	err = wldev_ioctl(dev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true);
+	if (err)
+		WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+	return err;
+}
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32
+wl_cfg80211_start_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_ap_settings *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = BCME_OK;
+	struct parsed_ies ies;
+	s32 bssidx = 0;
+	u32 dev_role = 0;
+
+	WL_DBG(("Enter \n"));
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		WL_DBG(("Start AP req on primary iface: Softap\n"));
+		dev_role = NL80211_IFTYPE_AP;
+		if (!cfg->ap_info) {
+			if ((cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+				WL_ERR(("%s: struct ap_info re-allocated\n", __FUNCTION__));
+			} else {
+				WL_ERR(("%s: struct ap_info re-allocation failed\n", __FUNCTION__));
+				err = -ENOMEM;
+				goto fail;
+			}
+		}
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		WL_DBG(("Start AP req on P2P iface: GO\n"));
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+		WL_DBG(("Start AP req on P2P connection iface\n"));
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	if ((err = wl_cfg80211_set_channel(wiphy, dev,
+		dev->ieee80211_ptr->preset_chandef.chan,
+		NL80211_CHAN_HT20) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+		goto fail;
+	}
+#endif
+
+	if ((err = wl_cfg80211_bcn_set_params(info, dev,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon params set failed \n"));
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if ((wl_cfg80211_bcn_validate_sec(dev, &ies,
+		dev_role, bssidx)) < 0)
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	WL_DBG(("** AP/GO Created **\n"));
+
+#ifdef WL_CFG80211_ACL
+	/* Enfoce Admission Control. */
+	if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) {
+		WL_ERR(("Set ACL failed\n"));
+	}
+#endif /* WL_CFG80211_ACL */
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
+		WL_ERR(("Set IEs failed \n"));
+
+	/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+	if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+		bool pbc = 0;
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc) {
+			WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+		}
+	}
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_stop_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev)
+{
+	int err = 0;
+	u32 dev_role = 0;
+	int infra = 0;
+	int ap = 0;
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Enter \n"));
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto exit;
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		/* SoftAp on primary Interface.
+		 * Shut down AP and turn on MPC
+		 */
+		if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+			WL_ERR(("setting AP mode failed %d \n", err));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+		err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+
+		err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			err = -EINVAL;
+			goto exit;
+		}
+
+		wl_clr_drv_status(cfg, AP_CREATED, dev);
+		/* Turn on the MPC */
+		wldev_iovar_setint(dev, "mpc", 1);
+		if (cfg->ap_info) {
+			kfree(cfg->ap_info->wpa_ie);
+			kfree(cfg->ap_info->rsn_ie);
+			kfree(cfg->ap_info->wps_ie);
+			kfree(cfg->ap_info);
+			cfg->ap_info = NULL;
+		}
+	} else {
+		WL_DBG(("Stopping P2P GO \n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
+			DHD_EVENT_TIMEOUT_MS*3);
+		DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
+	}
+
+exit:
+	return err;
+}
+
+static s32
+wl_cfg80211_change_beacon(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct parsed_ies ies;
+	u32 dev_role = 0;
+	s32 bssidx = 0;
+	bool pbc = 0;
+
+	WL_DBG(("Enter \n"));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+		WL_ERR(("P2P already down status!\n"));
+		err = BCME_ERROR;
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) {
+		WL_ERR(("Parse IEs failed \n"));
+		goto fail;
+	}
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+		/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+		if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+			wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+			WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
+			if (pbc)
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+			else
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+		}
+	}
+
+fail:
+	return err;
+}
+#else
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+	struct beacon_parameters *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 ie_offset = 0;
+	s32 bssidx = 0;
+	u32 dev_role = NL80211_IFTYPE_AP;
+	struct parsed_ies ies;
+	bcm_tlv_t *ssid_ie;
+	bool pbc = 0;
+	WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+		info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+	if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+		WL_ERR(("P2P already down status!\n"));
+		err = BCME_ERROR;
+		goto fail;
+	}
+
+	ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+	/* find the SSID */
+	if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+		info->head_len - ie_offset,
+		DOT11_MNG_SSID_ID)) != NULL) {
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(&cfg->hostapd_ssid.SSID[0], 0x00, 32);
+			memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len);
+			cfg->hostapd_ssid.SSID_len = ssid_ie->len;
+		} else {
+				/* P2P GO */
+			memset(&cfg->p2p->ssid.SSID[0], 0x00, 32);
+			memcpy(cfg->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+			cfg->p2p->ssid.SSID_len = ssid_ie->len;
+		}
+	}
+
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, &ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len) < 0) {
+		WL_ERR(("Beacon set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies,
+		info->proberesp_ies_len) < 0) {
+		WL_ERR(("ProbeRsp set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for ProbeRsp \n"));
+	}
+#endif
+
+	if (!wl_cfgp2p_bss_isup(dev, bssidx) &&
+		(wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx) < 0))
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	/* Set BI and DTIM period */
+	if (info->interval) {
+		if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+			&info->interval, sizeof(s32), true)) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32), true)) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
+		/* Soft AP already running. Update changed params */
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+
+	/* Enable Probe Req filter */
+	if (((dev_role == NL80211_IFTYPE_P2P_GO) ||
+		(dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) {
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc)
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+	}
+
+	WL_DBG(("** ADD/SET beacon done **\n"));
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+	return err;
+
+}
+#endif
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME		30
+#define PNO_REPEAT		4
+#define PNO_FREQ_EXPO_MAX	2
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+	int i;
+
+	if (!ssid || !ssid_list)
+		return FALSE;
+
+	for (i = 0; i < count; i++) {
+		if (ssid->ssid_len == ssid_list[i].ssid_len) {
+			if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+				return TRUE;
+		}
+	}
+	return FALSE;
+}
+
+static int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                             struct net_device *dev,
+                             struct cfg80211_sched_scan_request *request)
+{
+	ushort pno_time = PNO_TIME;
+	int pno_repeat = PNO_REPEAT;
+	int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+	wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssid = NULL;
+	struct cfg80211_ssid *hidden_ssid_list = NULL;
+	int ssid_cnt = 0;
+	int i;
+	int ret = 0;
+
+	WL_DBG(("Enter \n"));
+	WL_ERR((">>> SCHED SCAN START\n"));
+	WL_PNO(("Enter n_match_sets:%d   n_ssids:%d \n",
+		request->n_match_sets, request->n_ssids));
+	WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+		request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+
+	if (!request || !request->n_ssids || !request->n_match_sets) {
+		WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+		return -EINVAL;
+	}
+
+	memset(&ssids_local, 0, sizeof(ssids_local));
+
+	if (request->n_ssids > 0)
+		hidden_ssid_list = request->ssids;
+
+	for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+		ssid = &request->match_sets[i].ssid;
+		/* No need to include null ssid */
+		if (ssid->ssid_len) {
+			memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid, ssid->ssid_len);
+			ssids_local[ssid_cnt].SSID_len = ssid->ssid_len;
+			if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+				ssids_local[ssid_cnt].hidden = TRUE;
+				WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+			} else {
+				ssids_local[ssid_cnt].hidden = FALSE;
+				WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+			}
+			ssid_cnt++;
+		}
+	}
+
+	if (ssid_cnt) {
+		if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt, pno_time,
+		        pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+			WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+			return -EINVAL;
+		}
+		cfg->sched_scan_req = request;
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Enter \n"));
+	WL_ERR((">>> SCHED SCAN STOP\n"));
+
+	if (dhd_dev_pno_stop_for_ssid(dev) < 0)
+		WL_ERR(("PNO Stop for SSID failed"));
+
+	if (cfg->scan_request && cfg->sched_scan_running) {
+		WL_PNO((">>> Sched scan running. Aborting it..\n"));
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+
+	 cfg->sched_scan_req = NULL;
+	 cfg->sched_scan_running = FALSE;
+
+	return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * Currently the dump_obss IOVAR is returning string as output so we need to
+ * parse the output buffer in an unoptimized way. Going forward if we get the
+ * IOVAR output in binary format this method can be optimized
+ */
+static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey)
+{
+	int i;
+	char *token;
+	char delim[] = " \n";
+
+	token = strsep(&buf, delim);
+	while (token != NULL) {
+		if (!strcmp(token, "OBSS")) {
+			for (i = 0; i < OBSS_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->obss = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "IBSS")) {
+			for (i = 0; i < IBSS_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->ibss = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "TXDur")) {
+			for (i = 0; i < TX_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->tx = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Category")) {
+			for (i = 0; i < CTG_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->no_ctg = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Packet")) {
+			for (i = 0; i < PKT_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->no_pckt = simple_strtoul(token, NULL, 10);
+		}
+
+		if (!strcmp(token, "Opp(time):")) {
+			for (i = 0; i < IDLE_TOKEN_IDX; i++)
+				token = strsep(&buf, delim);
+			survey->idle = simple_strtoul(token, NULL, 10);
+		}
+
+		token = strsep(&buf, delim);
+	}
+
+	return 0;
+}
+
+static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req,
+	struct wl_dump_survey *survey)
+{
+	cca_stats_n_flags *results;
+	char *buf;
+	int retry, err;
+
+	buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!buf)) {
+		WL_ERR(("%s: buf alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	retry = IOCTL_RETRY_COUNT;
+	while (retry--) {
+		err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req),
+			buf, WLC_IOCTL_MAXLEN, NULL);
+		if (err >=  0) {
+			break;
+		}
+		WL_DBG(("attempt = %d, err = %d, \n",
+			(IOCTL_RETRY_COUNT - retry), err));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, dump_obss IOVAR failed\n"));
+		err = -BCME_ERROR;
+		goto exit;
+	}
+
+	results = (cca_stats_n_flags *)(buf);
+	wl_parse_dump_obss(results->buf, survey);
+	kfree(buf);
+
+	return 0;
+exit:
+	kfree(buf);
+	return err;
+}
+
+static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
+	int idx, struct survey_info *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_dump_survey *survey;
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel*chan;
+	cca_msrmnt_query req;
+	int val, err, noise, retry;
+
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		return -ENOENT;
+	}
+	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	if (band && idx >= band->n_channels) {
+		idx -= band->n_channels;
+		band = NULL;
+	}
+
+	if (!band || idx >= band->n_channels) {
+		/* Move to 5G band */
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		if (idx >= band->n_channels) {
+			return -ENOENT;
+		}
+	}
+
+	chan = &band->channels[idx];
+	/* Setting current channel to the requested channel */
+	if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan,
+		NL80211_CHAN_HT20) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+	}
+
+	if (!idx) {
+		/* Disable mpc */
+		val = 0;
+		err = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+			sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+			&cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("set 'mpc' failed, error = %d\n", err));
+		}
+
+		/* Set interface up, explicitly. */
+		val = 1;
+		err = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+		if (err < 0) {
+			WL_ERR(("set interface up failed, error = %d\n", err));
+		}
+	}
+
+	/* Get noise value */
+	retry = IOCTL_RETRY_COUNT;
+	while (retry--) {
+		err = wldev_ioctl(ndev, WLC_GET_PHY_NOISE, &noise,
+			sizeof(noise), false);
+		if (err >=  0) {
+			break;
+		}
+		WL_DBG(("attempt = %d, err = %d, \n",
+			(IOCTL_RETRY_COUNT - retry), err));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("Get Phy Noise failed, error = %d\n", err));
+		noise = CHAN_NOISE_DUMMY;
+	}
+
+	survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey),
+		GFP_KERNEL);
+	if (unlikely(!survey)) {
+		WL_ERR(("%s: alloc failed\n", __func__));
+		return -ENOMEM;
+	}
+
+	/* Start Measurement for obss stats on current channel */
+	req.msrmnt_query = 0;
+	req.time_req = ACS_MSRMNT_DELAY;
+	if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+		goto exit;
+	}
+
+	/*
+	 * Wait for the meaurement to complete, adding a buffer value of 10 to take
+	 * into consideration any delay in IOVAR completion
+	 */
+	msleep(ACS_MSRMNT_DELAY + 10);
+
+	/* Issue IOVAR to collect measurement results */
+	req.msrmnt_query = 1;
+	if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+		goto exit;
+	}
+
+	info->channel = chan;
+	info->noise = noise;
+	info->channel_time = ACS_MSRMNT_DELAY;
+	info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle;
+	info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg +
+		survey->no_pckt;
+	info->channel_time_tx = survey->tx;
+	info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME |
+		SURVEY_INFO_CHANNEL_TIME_BUSY |	SURVEY_INFO_CHANNEL_TIME_RX |
+		SURVEY_INFO_CHANNEL_TIME_TX;
+	kfree(survey);
+
+	return 0;
+exit:
+	kfree(survey);
+	return err;
+}
+#endif /* WL_SUPPORT_ACS */
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+	.add_virtual_intf = wl_cfg80211_add_virtual_iface,
+	.del_virtual_intf = wl_cfg80211_del_virtual_iface,
+	.change_virtual_intf = wl_cfg80211_change_virtual_iface,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	.start_p2p_device = wl_cfgp2p_start_p2p_device,
+	.stop_p2p_device = wl_cfgp2p_stop_p2p_device,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	.scan = wl_cfg80211_scan,
+	.set_wiphy_params = wl_cfg80211_set_wiphy_params,
+	.join_ibss = wl_cfg80211_join_ibss,
+	.leave_ibss = wl_cfg80211_leave_ibss,
+	.get_station = wl_cfg80211_get_station,
+	.set_tx_power = wl_cfg80211_set_tx_power,
+	.get_tx_power = wl_cfg80211_get_tx_power,
+	.add_key = wl_cfg80211_add_key,
+	.del_key = wl_cfg80211_del_key,
+	.get_key = wl_cfg80211_get_key,
+	.set_default_key = wl_cfg80211_config_default_key,
+	.set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+	.set_power_mgmt = wl_cfg80211_set_power_mgmt,
+	.connect = wl_cfg80211_connect,
+	.disconnect = wl_cfg80211_disconnect,
+	.suspend = wl_cfg80211_suspend,
+	.resume = wl_cfg80211_resume,
+	.set_pmksa = wl_cfg80211_set_pmksa,
+	.del_pmksa = wl_cfg80211_del_pmksa,
+	.flush_pmksa = wl_cfg80211_flush_pmksa,
+	.remain_on_channel = wl_cfg80211_remain_on_channel,
+	.cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+	.mgmt_tx = wl_cfg80211_mgmt_tx,
+	.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+	.change_bss = wl_cfg80211_change_bss,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	.set_channel = wl_cfg80211_set_channel,
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+	.set_beacon = wl_cfg80211_add_set_beacon,
+	.add_beacon = wl_cfg80211_add_set_beacon,
+#else
+	.change_beacon = wl_cfg80211_change_beacon,
+	.start_ap = wl_cfg80211_start_ap,
+	.stop_ap = wl_cfg80211_stop_ap,
+#endif
+#ifdef WL_SCHED_SCAN
+	.sched_scan_start = wl_cfg80211_sched_scan_start,
+	.sched_scan_stop = wl_cfg80211_sched_scan_stop,
+#endif /* WL_SCHED_SCAN */
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+	.del_station = wl_cfg80211_del_station,
+	.change_station = wl_cfg80211_change_station,
+	.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+	.tdls_oper = wl_cfg80211_tdls_oper,
+#endif
+#ifdef WL_SUPPORT_ACS
+	.dump_survey = wl_cfg80211_dump_survey,
+#endif /* WL_SUPPORT_ACS */
+#ifdef WL_CFG80211_ACL
+	.set_mac_acl = wl_cfg80211_set_mac_acl,
+#endif /* WL_CFG80211_ACL */
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+	s32 err = 0;
+
+	switch (mode) {
+	case WL_MODE_BSS:
+		return NL80211_IFTYPE_STATION;
+	case WL_MODE_IBSS:
+		return NL80211_IFTYPE_ADHOC;
+	case WL_MODE_AP:
+		return NL80211_IFTYPE_AP;
+	default:
+		return NL80211_IFTYPE_UNSPECIFIED;
+	}
+
+	return err;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int
+wl_cfg80211_reg_notifier(
+	struct wiphy *wiphy,
+	struct regulatory_request *request)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	int ret = 0;
+
+	if (!request || !cfg) {
+		WL_ERR(("Invalid arg\n"));
+		return -EINVAL;
+	}
+
+	WL_DBG(("ccode: %c%c Initiator: %d\n",
+		request->alpha2[0], request->alpha2[1], request->initiator));
+
+	/* We support only REGDOM_SET_BY_USER as of now */
+	if ((request->initiator != NL80211_REGDOM_SET_BY_USER) &&
+		(request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+		WL_ERR(("reg_notifier for intiator:%d not supported : set default\n",
+			request->initiator));
+		/* in case of no supported country by regdb
+		     lets driver setup platform default Locale
+		*/
+	}
+
+	WL_ERR(("Set country code %c%c from %s\n",
+		request->alpha2[0], request->alpha2[1],
+		((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
+
+	if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2,
+		false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false))) < 0) {
+		WL_ERR(("set country Failed :%d\n", ret));
+	}
+
+	return ret;
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static const struct wiphy_wowlan_support brcm_wowlan_support = {
+	.flags = WIPHY_WOWLAN_ANY,
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+#endif /* CONFIG_PM */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context)
+{
+	s32 err = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	dhd_pub_t *dhd = (dhd_pub_t *)context;
+	BCM_REFERENCE(dhd);
+
+	if (!dhd) {
+		WL_ERR(("DHD is NULL!!"));
+		err = -ENODEV;
+		return err;
+	}
+#endif
+
+	wdev->wiphy =
+	    wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
+	if (unlikely(!wdev->wiphy)) {
+		WL_ERR(("Couldn not allocate wiphy device\n"));
+		err = -ENOMEM;
+		return err;
+	}
+	set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+	wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	/* Report  how many SSIDs Driver can support per Scan request */
+	wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+	wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+#ifdef WL_SCHED_SCAN
+	wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif /* WL_SCHED_SCAN */
+	wdev->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION)
+		| BIT(NL80211_IFTYPE_ADHOC)
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_MONITOR)
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO)
+#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_DEVICE)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		| BIT(NL80211_IFTYPE_AP);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+	WL_DBG(("Setting interface combinations for common mode\n"));
+	wdev->wiphy->iface_combinations = common_iface_combinations;
+	wdev->wiphy->n_iface_combinations =
+		ARRAY_SIZE(common_iface_combinations);
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wdev->wiphy->cipher_suites = __wl_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wdev->wiphy->max_remain_on_channel_duration = 5000;
+	wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+	wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+	wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif				/* !WL_POWERSAVE_DISABLED */
+	wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+		WIPHY_FLAG_4ADDR_AP |
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
+		WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+		WIPHY_FLAG_4ADDR_STATION;
+#if (defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(3, 2, 0))
+	/* Please use supplicant ver >= 76 if FW_ROAM is enabled
+	 * If driver advertises FW_ROAM, older supplicant wouldn't
+	 * send the BSSID & Freq in the connect req command. This
+	 * will delay the ASSOC as the FW need to do a full scan
+	 * before attempting to connect. Supplicant >=76 has patch
+	 * to allow bssid & freq to be sent down to driver even if
+	 * FW ROAM is advertised.
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+	wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+		WIPHY_FLAG_OFFCHAN_TX;
+#endif
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	4, 0))
+	/* From 3.4 kernel ownards AP_SME flag can be advertised
+	 * to remove the patch from supplicant
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
+#ifdef WL_CFG80211_ACL
+	/* Configure ACL capabilities. */
+	wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	/* Supplicant distinguish between the SoftAP mode and other
+	 * modes (e.g. P2P, WPS, HS2.0) when it builds the probe
+	 * response frame from Supplicant MR1 and Kernel 3.4.0 or
+	 * later version. To add Vendor specific IE into the
+	 * probe response frame in case of SoftAP mode,
+	 * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable.
+	 */
+	if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) {
+		wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+		wdev->wiphy->probe_resp_offload = 0;
+	}
+#endif
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+	wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+#endif
+
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+	/*
+	 * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the
+	 * disconnection of connected network before suspend. So a dummy wowlan
+	 * filter is configured for kernels linux-3.8 and above.
+	 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	wdev->wiphy->wowlan = &brcm_wowlan_support;
+#else
+	wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 10) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Registering custom regulatory)\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
+	wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
+	wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+	WL_ERR(("Registering Vendor80211\n"));
+	err = wl_cfgvendor_attach(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
+	}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+
+	/* Now we can register wiphy with cfg80211 module */
+	err = wiphy_register(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+		wiphy_free(wdev->wiphy);
+	}
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
+#endif
+
+	return err;
+}
+
+static void wl_free_wdev(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = cfg->wdev;
+	struct wiphy *wiphy;
+	if (!wdev) {
+		WL_ERR(("wdev is invalid\n"));
+		return;
+	}
+	wiphy = wdev->wiphy;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+	wl_cfgvendor_detach(wdev->wiphy);
+#endif /* if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+	wiphy_unregister(wdev->wiphy);
+	wdev->wiphy->dev.parent = NULL;
+
+	wl_delete_all_netinfo(cfg);
+	wiphy_free(wiphy);
+	/* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
+	 * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+	 */
+}
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;	/* must be initialized */
+	s32 err = 0;
+	s32 i;
+
+	bss_list = cfg->bss_list;
+	WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+#ifdef ROAM_CHANNEL_CACHE
+	reset_roam_cache();
+#endif /* ROAM_CHANNEL_CACHE */
+	bi = next_bss(bss_list, bi);
+	for_each_bss(bss_list, bi, i) {
+#ifdef ROAM_CHANNEL_CACHE
+		add_roam_cache(bi);
+#endif /* ROAM_CHANNEL_CACHE */
+		err = wl_inform_single_bss(cfg, bi, false);
+		if (unlikely(err))
+			break;
+	}
+#ifdef ROAM_CHANNEL_CACHE
+	/* print_roam_cache(); */
+	update_roam_cache(cfg, ioctl_version);
+#endif /* ROAM_CHANNEL_CACHE */
+	return err;
+}
+
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *band;
+	struct wl_cfg80211_bss_info *notif_bss_info;
+	struct wl_scan_req *sr = wl_to_sr(cfg);
+	struct beacon_proberesp *beacon_proberesp;
+	struct cfg80211_bss *cbss = NULL;
+	s32 mgmt_type;
+	s32 signal;
+	u32 freq;
+	s32 err = 0;
+	gfp_t aflags;
+
+	if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+		WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+		return err;
+	}
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+		- sizeof(u8) + WL_BSS_INFO_MAX, aflags);
+	if (unlikely(!notif_bss_info)) {
+		WL_ERR(("notif_bss_info alloc failed\n"));
+		return -ENOMEM;
+	}
+	mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+	notif_bss_info->channel =
+		wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+
+	if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	notif_bss_info->rssi = wl_rssi_offset(dtoh16(bi->RSSI));
+	memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+	mgmt_type = cfg->active_scan ?
+		IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+	if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+	    mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+	}
+	beacon_proberesp = cfg->active_scan ?
+		(struct beacon_proberesp *)&mgmt->u.probe_resp :
+		(struct beacon_proberesp *)&mgmt->u.beacon;
+	beacon_proberesp->timestamp = 0;
+	beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+	beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+	wl_rst_ie(cfg);
+	wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+	wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+	wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+		offsetof(struct wl_cfg80211_bss_info, frame_buf));
+	notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+		u.beacon.variable) + wl_get_ielen(cfg);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+	if (freq == 0) {
+		WL_ERR(("Invalid channel, fail to chcnage channel to freq\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (unlikely(!channel)) {
+		WL_ERR(("ieee80211_get_channel error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
+			"mgmt_type %d frame_len %d\n", bi->SSID,
+			notif_bss_info->rssi, notif_bss_info->channel,
+			mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
+			notif_bss_info->frame_len));
+
+	signal = notif_bss_info->rssi * 100;
+	if (!mgmt->u.probe_resp.timestamp) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+		struct timespec ts;
+		get_monotonic_boottime(&ts);
+		mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+				+ ts.tv_nsec / 1000;
+#else
+		struct timeval tv;
+		do_gettimeofday(&tv);
+		mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+				+ tv.tv_usec;
+#endif
+	}
+
+
+	cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+		le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
+	if (unlikely(!cbss)) {
+		WL_ERR(("cfg80211_inform_bss_frame error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+	cfg80211_put_bss(wiphy, cbss);
+#else
+	cfg80211_put_bss(cbss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+	kfree(notif_bss_info);
+	return err;
+}
+
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status =  ntoh32(e->status);
+	u16 flags = ntoh16(e->flags);
+
+	WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
+	if (event == WLC_E_SET_SSID) {
+		if (status == WLC_E_STATUS_SUCCESS) {
+			if (!wl_is_ibssmode(cfg, ndev))
+				return true;
+		}
+	} else if (event == WLC_E_LINK) {
+		if (flags & WLC_EVENT_MSG_LINK)
+			return true;
+	}
+
+	WL_DBG(("wl_is_linkup false\n"));
+	return false;
+}
+
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+
+	if (event == WLC_E_DEAUTH_IND ||
+	event == WLC_E_DISASSOC_IND ||
+	event == WLC_E_DISASSOC ||
+	event == WLC_E_DEAUTH) {
+#if (WL_DBG_LEVEL > 0)
+	WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event]));
+#endif /* (WL_DBG_LEVEL > 0) */
+		return true;
+	} else if (event == WLC_E_LINK) {
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+#if (WL_DBG_LEVEL > 0)
+	WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event]));
+#endif /* (WL_DBG_LEVEL > 0) */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+
+	if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+		return true;
+	if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+		return true;
+
+	return false;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	u32 len = ntoh32(e->datalen);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+	bool isfree = false;
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	s32 freq;
+	s32 channel;
+	u8 *body = NULL;
+	u16 fc = 0;
+
+	struct ieee80211_supported_band *band;
+	struct ether_addr da;
+	struct ether_addr bssid;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	channel_info_t ci;
+#else
+	struct station_info sinfo;
+#endif
+
+	WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
+	/* if link down, bsscfg is disabled. */
+	if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
+		wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
+		wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+		WL_INFORM(("AP mode link down !! \n"));
+		complete(&cfg->iface_disable);
+		return 0;
+	}
+
+	if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+		WL_ERR(("event %s(%d) status %d reason %d\n",
+		bcmevent_get_name(event), event, ntoh32(e->status), reason));
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+	WL_DBG(("Enter \n"));
+	if (!len && (event == WLC_E_DEAUTH)) {
+		len = 2; /* reason code field */
+		data = &reason;
+	}
+	if (len) {
+		body = kzalloc(len, GFP_KERNEL);
+
+		if (body == NULL) {
+			WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
+			return WL_INVALID;
+		}
+	}
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+	WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
+		kfree(body);
+		return WL_INVALID;
+	}
+	if (len)
+		memcpy(body, data, len);
+
+	wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+	memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+	err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+	switch (event) {
+		case WLC_E_ASSOC_IND:
+			fc = FC_ASSOC_REQ;
+			break;
+		case WLC_E_REASSOC_IND:
+			fc = FC_REASSOC_REQ;
+			break;
+		case WLC_E_DISASSOC_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH:
+			fc = FC_DISASSOC;
+			break;
+		default:
+			fc = 0;
+			goto exit;
+	}
+	if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) {
+		kfree(body);
+		return err;
+	}
+
+	channel = dtoh32(ci.hw_channel);
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		if (body)
+			kfree(body);
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+	err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+		&mgmt_frame, &len, body);
+	if (err < 0)
+		goto exit;
+	isfree = true;
+
+	if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif
+	} else if (event == WLC_E_DISASSOC_IND) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif
+	} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif
+	}
+
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	if (body)
+		kfree(body);
+#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+	sinfo.filled = 0;
+	if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+		reason == DOT11_SC_SUCCESS) {
+		sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+		if (!data) {
+			WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+			return -EINVAL;
+		}
+		sinfo.assoc_req_ies = data;
+		sinfo.assoc_req_ies_len = len;
+		cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+	} else if (event == WLC_E_DISASSOC_IND) {
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	}
+#endif
+	return err;
+}
+
+static s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e)
+{
+	u32 reason = ntoh32(e->reason);
+	u32 event = ntoh32(e->event_type);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+	WL_DBG(("event type : %d, reason : %d\n", event, reason));
+	if (sec) {
+		switch (event) {
+		case WLC_E_ASSOC:
+		case WLC_E_AUTH:
+				sec->auth_assoc_res_status = reason;
+		default:
+			break;
+		}
+	} else
+		WL_ERR(("sec is NULL\n"));
+	return 0;
+}
+
+static s32
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+	u32 status =  ntoh32(e->status);
+	bool active;
+
+	if (event == WLC_E_JOIN) {
+		WL_DBG(("joined in IBSS network\n"));
+	}
+	if (event == WLC_E_START) {
+		WL_DBG(("started IBSS network\n"));
+	}
+	if (event == WLC_E_JOIN || event == WLC_E_START ||
+		(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+			/* ROAM or Redundant */
+			u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+				WL_DBG(("IBSS connected event from same BSSID("
+					MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
+				return err;
+			}
+			WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+				MAC2STRDBG(cur_bssid), MAC2STRDBG((u8 *)&e->addr)));
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+		}
+		else {
+			/* New connection */
+			WL_INFORM(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr)));
+			wl_link_up(cfg);
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+			active = true;
+			wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT);
+		}
+	} else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
+		event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+		wl_clr_drv_status(cfg, CONNECTED, ndev);
+		wl_link_down(cfg);
+		wl_init_prof(cfg, ndev);
+	}
+	else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
+		WL_DBG(("no action - join fail (IBSS mode)\n"));
+	}
+	else {
+		WL_DBG(("no action (IBSS mode)\n"));
+}
+	return err;
+}
+
+static s32
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		err = wl_notify_connect_status_ap(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) {
+		err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+			ntoh32(e->event_type), ntoh32(e->status), ndev));
+		if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
+			wl_get_auth_assoc_status(cfg, ndev, e);
+			return 0;
+		}
+		if (wl_is_linkup(cfg, e, ndev)) {
+			wl_link_up(cfg);
+			act = true;
+			if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+				printk("wl_bss_connect_done succeeded with " MACDBG "\n",
+					MAC2STRDBG((u8*)(&e->addr)));
+				wl_bss_connect_done(cfg, ndev, e, data, true);
+				WL_DBG(("joined in BSS network \"%s\"\n",
+				((struct wlc_ssid *)
+				 wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+			}
+			wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+		} else if (wl_is_linkdown(cfg, e)) {
+			if (cfg->scan_request)
+				wl_notify_escan_complete(cfg, ndev, true, true);
+			if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+				scb_val_t scbval;
+				u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+				s32 reason = 0;
+				if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
+					reason = ntoh32(e->reason);
+				/* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */
+				reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason;
+
+				WL_ERR(("link down if %s may call cfg80211_disconnected. "
+					"event : %d, reason=%d from " MACDBG "\n",
+					ndev->name, event, ntoh32(e->reason),
+					MAC2STRDBG((u8*)(&e->addr))));
+				if (!cfg->roam_offload &&
+					memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+					WL_ERR(("BSSID of event is not the connected BSSID"
+						"(ignore it) cur: " MACDBG " event: " MACDBG"\n",
+						MAC2STRDBG(curbssid), MAC2STRDBG((u8*)(&e->addr))));
+					return 0;
+				}
+				wl_clr_drv_status(cfg, CONNECTED, ndev);
+				if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+					/* To make sure disconnect, explictly send dissassoc
+					*  for BSSID 00:00:00:00:00:00 issue
+					*/
+					scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+					memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+					scbval.val = htod32(scbval.val);
+					err = wldev_ioctl(ndev, WLC_DISASSOC, &scbval,
+						sizeof(scb_val_t), true);
+					if (err < 0) {
+						WL_ERR(("WLC_DISASSOC error %d\n", err));
+						err = 0;
+					}
+					cfg80211_disconnected(ndev, reason, NULL, 0, GFP_KERNEL);
+					wl_link_down(cfg);
+					wl_init_prof(cfg, ndev);
+				} else {
+					wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+				}
+			} else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+				WL_ERR(("link down, during connecting\n"));
+#ifdef ESCAN_RESULT_PATCH
+				if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
+					/* In case this event comes while associating another AP */
+#endif /* ESCAN_RESULT_PATCH */
+					if (!wl_get_drv_status(cfg, DISCONNECTING, ndev))
+						wl_bss_connect_done(cfg, ndev, e, data, false);
+			}
+			wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+
+			/* if link down, bsscfg is diabled */
+			if (ndev != bcmcfg_to_prmry_ndev(cfg))
+				complete(&cfg->iface_disable);
+
+		} else if (wl_is_nonetwork(cfg, e)) {
+			WL_ERR(("connect failed event=%d e->status %d e->reason %d\n",
+				event, (int)ntoh32(e->status), (int)ntoh32(e->reason)));
+			/* Clean up any pending scan request */
+			if (cfg->scan_request)
+				wl_notify_escan_complete(cfg, ndev, true, true);
+			if (wl_get_drv_status(cfg, CONNECTING, ndev) &&
+				!wl_get_drv_status(cfg, DISCONNECTING, ndev))
+				wl_bss_connect_done(cfg, ndev, e, data, false);
+			wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+			wl_clr_drv_status(cfg, CONNECTING, ndev);
+		} else {
+			WL_DBG(("%s nothing\n", __FUNCTION__));
+		}
+	}
+		else {
+		WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev)));
+	}
+	return err;
+}
+
+
+static s32
+wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	u32 status = be32_to_cpu(e->status);
+	WL_DBG(("Enter \n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
+		wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false);
+		cfg->disable_roam_event = TRUE;
+	}
+
+	if ((cfg->disable_roam_event) && (event == WLC_E_ROAM))
+		return err;
+
+	if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev))
+			wl_bss_roaming_done(cfg, ndev, e, data);
+		else
+			wl_bss_connect_done(cfg, ndev, e, data, true);
+		act = true;
+		wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+		wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+	}
+	return err;
+}
+
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	wl_assoc_info_t assoc_info;
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
+		WL_ASSOC_INFO_MAX, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("could not get assoc info (%d)\n", err));
+		return err;
+	}
+	memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t));
+	assoc_info.req_len = htod32(assoc_info.req_len);
+	assoc_info.resp_len = htod32(assoc_info.resp_len);
+	assoc_info.flags = htod32(assoc_info.flags);
+	if (conn_info->req_ie_len) {
+		conn_info->req_ie_len = 0;
+		bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+	}
+	if (conn_info->resp_ie_len) {
+		conn_info->resp_ie_len = 0;
+		bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+	}
+	if (assoc_info.req_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc req (%d)\n", err));
+			return err;
+		}
+		conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+		if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+			conn_info->req_ie_len -= ETHER_ADDR_LEN;
+		}
+		if (conn_info->req_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->req_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->req_ie_len = 0;
+	}
+	if (assoc_info.resp_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc resp (%d)\n", err));
+			return err;
+		}
+		conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+		if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->resp_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->resp_ie_len = 0;
+	}
+	WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+		conn_info->resp_ie_len));
+
+	return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+        size_t *join_params_size)
+{
+#ifndef ROAM_CHANNEL_CACHE
+	chanspec_t chanspec = 0;
+#endif
+
+	if (ch != 0) {
+#ifdef ROAM_CHANNEL_CACHE
+		int n_channels;
+
+		n_channels = get_roam_channel_list(ch, join_params->params.chanspec_list,
+			&join_params->ssid, ioctl_version);
+		join_params->params.chanspec_num = htod32(n_channels);
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+#else
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+			wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num =
+			htod32(join_params->params.chanspec_num);
+#endif /* ROAM_CHANNEL_CACHE */
+		WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+			join_params->params.chanspec_list[0],
+			join_params->params.chanspec_num));
+	}
+}
+
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
+{
+	struct cfg80211_bss *bss;
+	struct wl_bss_info *bi;
+	struct wlc_ssid *ssid;
+	struct bcm_tlv *tim;
+	s32 beacon_interval;
+	s32 dtim_period;
+	size_t ie_len;
+	u8 *ie;
+	u8 *curbssid;
+	s32 err = 0;
+	struct wiphy *wiphy;
+	u32 channel;
+#ifdef  ROAM_CHANNEL_CACHE
+	struct ieee80211_channel *cur_channel;
+	u32 freq, band;
+#endif /* ROAM_CHANNEL_CACHE */
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+
+	ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	bss = cfg80211_get_bss(wiphy, NULL, curbssid,
+		ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+		WLAN_CAPABILITY_ESS);
+
+	mutex_lock(&cfg->usr_sync);
+
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+	err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+		cfg->extra_buf, WL_EXTRA_BUF_MAX, false);
+	if (unlikely(err)) {
+		WL_ERR(("Could not get bss info %d\n", err));
+		goto update_bss_info_out;
+	}
+	bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+	channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+	wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
+
+	if (!bss) {
+		WL_DBG(("Could not find the AP\n"));
+		if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+			WL_ERR(("Bssid doesn't match\n"));
+			err = -EIO;
+			goto update_bss_info_out;
+		}
+		err = wl_inform_single_bss(cfg, bi, roam);
+		if (unlikely(err))
+			goto update_bss_info_out;
+
+		ie = ((u8 *)bi) + bi->ie_offset;
+		ie_len = bi->ie_length;
+		beacon_interval = cpu_to_le16(bi->beacon_period);
+	} else {
+		WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+#ifdef  ROAM_CHANNEL_CACHE
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+		freq = ieee80211_channel_to_frequency(channel);
+#else
+		band = (channel <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(channel, band);
+#endif
+		cur_channel = ieee80211_get_channel(wiphy, freq);
+		bss->channel = cur_channel;
+#endif /* ROAM_CHANNEL_CACHE */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		ie = (u8 *)bss->ies->data;
+		ie_len = bss->ies->len;
+#else
+		ie = bss->information_elements;
+		ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		beacon_interval = bss->beacon_interval;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+		cfg80211_put_bss(wiphy, bss);
+#else
+		cfg80211_put_bss(bss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+	}
+
+	tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+	if (tim) {
+		dtim_period = tim->data[1];
+	} else {
+		/*
+		* active scan was done so we could not get dtim
+		* information out of probe response.
+		* so we speficially query dtim information.
+		*/
+		err = wldev_ioctl(ndev, WLC_GET_DTIMPRD,
+			&dtim_period, sizeof(dtim_period), false);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+			goto update_bss_info_out;
+		}
+	}
+
+	wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+	wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+	if (unlikely(err)) {
+		WL_ERR(("Failed with error %d\n", err));
+	}
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+	u8 *curbssid;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel *notify_channel = NULL;
+	u32 *channel;
+	u32 freq;
+#endif
+
+
+	wl_get_assoc_ies(cfg, ndev);
+	wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	wl_update_bss_info(cfg, ndev, true);
+	wl_update_pmklist(ndev, cfg->pmk_list, err);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+	/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
+	channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+	if (*channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	freq = ieee80211_channel_to_frequency(*channel, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+#endif
+	printk("wl_bss_roaming_done succeeded to " MACDBG "\n",
+		MAC2STRDBG((u8*)(&e->addr)));
+#ifdef PCIE_FULL_DONGLE
+	wl_roam_flowring_cleanup(cfg);
+#endif /* PCIE_FULL_DONGLE */
+
+	cfg80211_roamed(ndev,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+		notify_channel,
+#endif
+		curbssid,
+		conn_info->req_ie, conn_info->req_ie_len,
+		conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+	WL_DBG(("Report roaming result\n"));
+
+	wl_set_drv_status(cfg, CONNECTED, ndev);
+
+	return err;
+}
+
+static s32
+wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#if (defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)) || \
+	defined(CUSTOM_SET_CPUCORE)
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* (ROAM_ENABLE && ROAM_AP_ENV_DETECTION) || CUSTOM_SET_CPUCORE */
+	s32 err = 0;
+	u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	if (!sec) {
+		WL_ERR(("sec is NULL\n"));
+		return -ENODEV;
+	}
+	WL_DBG((" enter\n"));
+#ifdef ESCAN_RESULT_PATCH
+	if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+		if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
+			WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n",
+				ntoh32(e->event_type), ntoh32(e->status)));
+			return err;
+		}
+	}
+	if (memcmp(curbssid, broad_bssid, ETHER_ADDR_LEN) == 0 &&
+		memcmp(broad_bssid, connect_req_bssid, ETHER_ADDR_LEN) != 0) {
+		WL_DBG(("copy bssid\n"));
+		memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN);
+	}
+
+#else
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, ndev, true, true);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+		wl_cfg80211_scan_abort(cfg);
+		wl_clr_drv_status(cfg, CONNECTING, ndev);
+		if (completed) {
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+			curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			wl_update_pmklist(ndev, cfg->pmk_list, err);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
+			if (dhd->roam_env_detection)
+				wldev_iovar_setint(ndev, "roam_env_detection",
+					AP_ENV_INDETERMINATE);
+#endif /* ROAM_AP_ENV_DETECTION */
+			if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+				init_completion(&cfg->iface_disable);
+#else
+				/* reinitialize completion to clear previous count */
+				INIT_COMPLETION(cfg->iface_disable);
+#endif
+			}
+#ifdef CUSTOM_SET_CPUCORE
+			if (wl_get_chan_isvht80(ndev, dhd)) {
+				if (ndev == bcmcfg_to_prmry_ndev(cfg))
+					dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
+				else if (ndev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
+					dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
+				dhd_set_cpucore(dhd, TRUE);
+			}
+#endif /* CUSTOM_SET_CPUCORE */
+
+		}
+		cfg80211_connect_result(ndev,
+			curbssid,
+			conn_info->req_ie,
+			conn_info->req_ie_len,
+			conn_info->resp_ie,
+			conn_info->resp_ie_len,
+			completed ? WLAN_STATUS_SUCCESS :
+			(sec->auth_assoc_res_status) ?
+			sec->auth_assoc_res_status :
+			WLAN_STATUS_UNSPECIFIED_FAILURE,
+			GFP_KERNEL);
+		if (completed)
+			WL_INFORM(("Report connect result - connection succeeded\n"));
+		else
+			WL_ERR(("Report connect result - connection failed\n"));
+	}
+#ifdef CONFIG_TCPACK_FASTTX
+	if (wl_get_chan_isvht80(ndev, dhd))
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 0);
+	else
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 1);
+#endif /* CONFIG_TCPACK_FASTTX */
+
+	return err;
+}
+
+static s32
+wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u16 flags = ntoh16(e->flags);
+	enum nl80211_key_type key_type;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	if (flags & WLC_EVENT_MSG_GROUP)
+		key_type = NL80211_KEYTYPE_GROUP;
+	else
+		key_type = NL80211_KEYTYPE_PAIRWISE;
+
+	cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+		NULL, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+
+	return 0;
+}
+
+#ifdef BT_WIFI_HANDOVER
+static s32
+wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u32 event = ntoh32(e->event_type);
+	u32 datalen = ntoh32(e->datalen);
+	s32 err;
+
+	WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen));
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0);
+
+	return err;
+}
+#endif /* BT_WIFI_HANDOVER */
+
+#ifdef PNO_SUPPORT
+static s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+
+	WL_ERR((">>> PNO Event\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifndef WL_SCHED_SCAN
+	mutex_lock(&cfg->usr_sync);
+	/* TODO: Use cfg80211_sched_scan_results(wiphy); */
+	cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+#else
+	/* If cfg80211 scheduled scan is supported, report the pno results via sched
+	 * scan results
+	 */
+	wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+	return 0;
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+static s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	void *ptr;
+	int send_evt_bytes = 0;
+	int batch_event_result_dummy = 0;
+	struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	u32 len = ntoh32(e->datalen);
+
+	switch (event) {
+		case WLC_E_PFN_SWC:
+			ptr = dhd_dev_swc_scan_event(ndev, data, &send_evt_bytes);
+			if (send_evt_bytes) {
+				wl_cfgvendor_send_async_event(wiphy, ndev,
+				    GOOGLE_GSCAN_SIGNIFICANT_EVENT, ptr, send_evt_bytes);
+				kfree(ptr);
+			}
+			break;
+		case WLC_E_PFN_BEST_BATCHING:
+			err = dhd_dev_retrieve_batch_scan(ndev);
+			if (err < 0) {
+				WL_ERR(("Batch retrieval already in progress %d\n", err));
+			} else {
+				wl_cfgvendor_send_async_event(wiphy, ndev,
+				    GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+				     &batch_event_result_dummy, sizeof(int));
+			}
+			break;
+		case WLC_E_PFN_SCAN_COMPLETE:
+			batch_event_result_dummy = WIFI_SCAN_COMPLETE;
+			wl_cfgvendor_send_async_event(wiphy, ndev,
+				GOOGLE_SCAN_COMPLETE_EVENT,
+				&batch_event_result_dummy, sizeof(int));
+			break;
+		case WLC_E_PFN_BSSID_NET_FOUND:
+			ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+			      HOTLIST_FOUND);
+			if (ptr) {
+				wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+				 ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+				dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+			}
+			break;
+		case WLC_E_PFN_BSSID_NET_LOST:
+			/* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+			 * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+			 */
+			if (len) {
+				ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+				            HOTLIST_LOST);
+				if (ptr) {
+					wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+					 ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+					dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+				}
+			}
+			break;
+		case WLC_E_PFN_GSCAN_FULL_RESULT:
+			ptr = dhd_dev_process_full_gscan_result(ndev, data, &send_evt_bytes);
+			if (ptr) {
+				wl_cfgvendor_send_async_event(wiphy, ndev,
+				    GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+				kfree(ptr);
+			}
+			break;
+
+	}
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct channel_info channel_inform;
+	struct wl_scan_results *bss_list;
+	struct net_device *ndev = NULL;
+	u32 len = WL_SCAN_BUF_MAX;
+	s32 err = 0;
+	unsigned long flags;
+
+	WL_DBG(("Enter \n"));
+	if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+		WL_ERR(("scan is not ready \n"));
+		return err;
+	}
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+		sizeof(channel_inform), false);
+	if (unlikely(err)) {
+		WL_ERR(("scan busy (%d)\n", err));
+		goto scan_done_out;
+	}
+	channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+	if (unlikely(channel_inform.scan_channel)) {
+
+		WL_DBG(("channel_inform.scan_channel (%d)\n",
+			channel_inform.scan_channel));
+	}
+	cfg->bss_list = cfg->scan_results;
+	bss_list = cfg->bss_list;
+	memset(bss_list, 0, len);
+	bss_list->buflen = htod32(len);
+	err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
+	if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+		WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+		err = -EINVAL;
+		goto scan_done_out;
+	}
+	bss_list->buflen = dtoh32(bss_list->buflen);
+	bss_list->version = dtoh32(bss_list->version);
+	bss_list->count = dtoh32(bss_list->count);
+
+	err = wl_inform_bss(cfg);
+
+scan_done_out:
+	del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, false);
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	WL_DBG(("cfg80211_scan_done\n"));
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody)
+{
+	struct dot11_management_header *hdr;
+	u32 totlen = 0;
+	s32 err = 0;
+	u8 *offset;
+	u32 prebody_len = *body_len;
+	switch (fc) {
+		case FC_ASSOC_REQ:
+			/* capability , listen interval */
+			totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+			break;
+
+		case FC_REASSOC_REQ:
+			/* capability, listen inteval, ap address */
+			totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+			break;
+	}
+	totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+	*pheader = kzalloc(totlen, GFP_KERNEL);
+	if (*pheader == NULL) {
+		WL_ERR(("memory alloc failed \n"));
+		return -ENOMEM;
+	}
+	hdr = (struct dot11_management_header *) (*pheader);
+	hdr->fc = htol16(fc);
+	hdr->durid = 0;
+	hdr->seq = 0;
+	offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+	bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+	bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+	bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+	if ((pbody != NULL) && prebody_len)
+		bcopy((const char*)pbody, offset, prebody_len);
+	*body_len = totlen;
+	return err;
+}
+
+
+void
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+		if (cfg->afx_hdl != NULL) {
+			if (cfg->afx_hdl->dev != NULL) {
+				wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+				wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev);
+			}
+			cfg->afx_hdl->peer_chan = WL_INVALID;
+		}
+		complete(&cfg->act_frm_scan);
+		WL_DBG(("*** Wake UP ** Working afx searching is cleared\n"));
+	} else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) ||
+			wl_get_p2p_status(cfg, ACTION_TX_NOACK)))
+			wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+
+		WL_DBG(("*** Wake UP ** abort actframe iovar\n"));
+		/* if channel is not zero, "actfame" uses off channel scan.
+		 * So abort scan for off channel completion.
+		 */
+		if (cfg->af_sent_channel)
+			wl_cfg80211_scan_abort(cfg);
+	}
+#ifdef WL_CFG80211_SYNC_GON
+	else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		WL_DBG(("*** Wake UP ** abort listen for next af frame\n"));
+		/* So abort scan to cancel listen */
+		wl_cfg80211_scan_abort(cfg);
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+}
+
+
+int wl_cfg80211_get_ioctl_version(void)
+{
+	return ioctl_version;
+}
+
+static s32
+wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct ieee80211_supported_band *band;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ether_addr da;
+	struct ether_addr bssid;
+	bool isfree = false;
+	s32 err = 0;
+	s32 freq;
+	struct net_device *ndev = NULL;
+	wifi_p2p_pub_act_frame_t *act_frm = NULL;
+	wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+	wl_event_rx_frame_data_t *rxframe =
+		(wl_event_rx_frame_data_t*)data;
+	u32 event = ntoh32(e->event_type);
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+	u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+	if (event == WLC_E_ACTION_FRAME_RX) {
+		wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+			NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+
+		err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+		if (err < 0)
+			 WL_ERR(("WLC_GET_BSSID error %d\n", err));
+		memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+		err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+			&mgmt_frame, &mgmt_frame_len,
+			(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+		if (err < 0) {
+			WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n",
+				mgmt_frame_len, channel, freq));
+			goto exit;
+		}
+		isfree = true;
+		if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			act_frm = (wifi_p2p_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+		} else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			p2p_act_frm = (wifi_p2p_action_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			(void) p2p_act_frm;
+		} else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+
+			sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == sd_act_frm->action) {
+					WL_DBG(("We got a right next frame of SD!(%d)\n",
+						sd_act_frm->action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+			(void) sd_act_frm;
+		} else {
+
+			if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+				u8 action = 0;
+				if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+					mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) {
+					WL_DBG(("Recived action is not public action frame\n"));
+				} else if (cfg->next_af_subtype == action) {
+					WL_DBG(("Recived action is the waiting action(%d)\n",
+						action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+		}
+
+		if (act_frm) {
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == act_frm->subtype) {
+					WL_DBG(("We got a right next frame!(%d)\n",
+						act_frm->subtype));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
+						OSL_SLEEP(20);
+					}
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+		}
+
+		wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN, channel);
+		/*
+		 * After complete GO Negotiation, roll back to mpc mode
+		 */
+		if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) ||
+			(act_frm->subtype == P2P_PAF_PROVDIS_RSP))) {
+			wldev_iovar_setint(ndev, "mpc", 1);
+		}
+		if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) {
+			WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+			wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		}
+	} else if (event == WLC_E_PROBREQ_MSG) {
+
+		/* Handle probe reqs frame
+		 * WPS-AP certification 4.2.13
+		 */
+		struct parsed_ies prbreq_ies;
+		u32 prbreq_ie_len = 0;
+		bool pbc = 0;
+
+		WL_DBG((" Event WLC_E_PROBREQ_MSG received\n"));
+		mgmt_frame = (u8 *)(data);
+		mgmt_frame_len = ntoh32(e->datalen);
+
+		prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN;
+
+		/* Parse prob_req IEs */
+		if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			prbreq_ie_len, &prbreq_ies) < 0) {
+			WL_ERR(("Prob req get IEs failed\n"));
+			return 0;
+		}
+		if (prbreq_ies.wps_ie != NULL) {
+			wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+			WL_DBG((" wps_ie exist pbc = %d\n", pbc));
+			/* if pbc method, send prob_req mgmt frame to upper layer */
+			if (!pbc)
+				return 0;
+		} else
+			return 0;
+	} else {
+		mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+
+		/* wpa supplicant use probe request event for restarting another GON Req.
+		 * but it makes GON Req repetition.
+		 * so if src addr of prb req is same as my target device,
+		 * do not send probe request event during sending action frame.
+		 */
+		if (event == WLC_E_P2P_PROBREQ_MSG) {
+			WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ?
+				"WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG"));
+
+
+			/* Filter any P2P probe reqs arriving during the
+			 * GO-NEG Phase
+			 */
+			if (cfg->p2p &&
+				wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
+				WL_DBG(("Filtering P2P probe_req while "
+					"being in GO-Neg state\n"));
+				return 0;
+			}
+		}
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	cfg80211_rx_mgmt(cfgdev, freq, 0,  mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+	defined(WL_COMPAT_WIRELESS)
+	cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#else
+	cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */
+
+	WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
+		mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	return 0;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND		0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	wl_pfn_net_info_t *netinfo, *pnetinfo;
+	struct wiphy *wiphy	= bcmcfg_to_wiphy(cfg);
+	int err = 0;
+	struct cfg80211_scan_request *request = NULL;
+	struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+	struct ieee80211_channel *channel = NULL;
+	int channel_req = 0;
+	int band = 0;
+	struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data;
+	int n_pfn_results = pfn_result->count;
+
+	WL_DBG(("Enter\n"));
+
+	if (e->event_type == WLC_E_PFN_NET_LOST) {
+		WL_PNO(("PFN NET LOST event. Do Nothing \n"));
+		return 0;
+	}
+	WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results));
+	if (n_pfn_results > 0) {
+		int i;
+
+		if (n_pfn_results > MAX_PFN_LIST_COUNT)
+			n_pfn_results = MAX_PFN_LIST_COUNT;
+		pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t)
+				- sizeof(wl_pfn_net_info_t));
+
+		memset(&ssid, 0x00, sizeof(ssid));
+
+		request = kzalloc(sizeof(*request)
+			+ sizeof(*request->channels) * n_pfn_results,
+			GFP_KERNEL);
+		channel = (struct ieee80211_channel *)kzalloc(
+			(sizeof(struct ieee80211_channel) * n_pfn_results),
+			GFP_KERNEL);
+		if (!request || !channel) {
+			WL_ERR(("No memory"));
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		request->wiphy = wiphy;
+
+		for (i = 0; i < n_pfn_results; i++) {
+			netinfo = &pnetinfo[i];
+			if (!netinfo) {
+				WL_ERR(("Invalid netinfo ptr. index:%d", i));
+				err = -EINVAL;
+				goto out_err;
+			}
+			WL_PNO((">>> SSID:%s Channel:%d \n",
+				netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel));
+			/* PFN result doesn't have all the info which are required by the supplicant
+			 * (For e.g IEs) Do a target Escan so that sched scan results are reported
+			 * via wl_inform_single_bss in the required format. Escan does require the
+			 * scan request in the form of cfg80211_scan_request. For timebeing, create
+			 * cfg80211_scan_request one out of the received PNO event.
+			 */
+			ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN, netinfo->pfnsubnet.SSID_len);
+ 			memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID, ssid[i].ssid_len);
+ 			request->n_ssids++;
+
+			channel_req = netinfo->pfnsubnet.channel;
+			band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+				: NL80211_BAND_5GHZ;
+			channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band);
+			channel[i].band = band;
+			channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+			request->channels[i] = &channel[i];
+			request->n_channels++;
+		}
+
+		/* assign parsed ssid array */
+		if (request->n_ssids)
+			request->ssids = &ssid[0];
+
+		if (wl_get_drv_status_all(cfg, SCANNING)) {
+			/* Abort any on-going scan */
+			wl_notify_escan_complete(cfg, ndev, true, true);
+		}
+
+		if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+			WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+			err = wl_cfgp2p_discover_enable_search(cfg, false);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, SCANNING, ndev);
+				goto out_err;
+			}
+			p2p_scan(cfg) = false;
+		}
+
+		wl_set_drv_status(cfg, SCANNING, ndev);
+#ifdef CUSTOM_SET_SHORT_DWELL_TIME
+		net_set_short_dwell_time(ndev, FALSE);
+#endif
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+		WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+		WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif
+		if (err) {
+			wl_clr_drv_status(cfg, SCANNING, ndev);
+			goto out_err;
+		}
+		cfg->sched_scan_running = TRUE;
+	}
+	else {
+		WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+	}
+out_err:
+	if (request)
+		kfree(request);
+	if (channel)
+		kfree(channel);
+	return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+	WL_DBG(("Enter \n"));
+	conf->frag_threshold = (u32)-1;
+	conf->rts_threshold = (u32)-1;
+	conf->retry_short = (u32)-1;
+	conf->retry_long = (u32)-1;
+	conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	memset(profile, 0, sizeof(struct wl_profile));
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
+{
+	memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+
+	cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+	cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+	cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+	cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+#ifdef PNO_SUPPORT
+	cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
+#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+	cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_SWC] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event;
+	cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event;
+#endif /* GSCAN_SUPPORT */
+#ifdef WLTDLS
+	cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
+#endif /* WLTDLS */
+	cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
+#ifdef BT_WIFI_HANDOVER
+	cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
+#endif
+}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+static void
+wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub,
+		DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
+	bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
+}
+
+static void
+wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = NULL;
+
+}
+#endif /* STATIC_WL_PRIV_STRUCT */
+
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	WL_DBG(("Enter \n"));
+	cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->scan_results)) {
+		WL_ERR(("Scan results alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+	if (unlikely(!cfg->conf)) {
+		WL_ERR(("wl_conf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->scan_req_int =
+	    (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL);
+	if (unlikely(!cfg->scan_req_int)) {
+		WL_ERR(("Scan req alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->escan_ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->extra_buf)) {
+		WL_ERR(("Extra buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+	if (unlikely(!cfg->pmk_list)) {
+		WL_ERR(("pmk list alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->sta_info = (void *)kzalloc(sizeof(*cfg->sta_info), GFP_KERNEL);
+	if (unlikely(!cfg->sta_info)) {
+		WL_ERR(("sta info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+	cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
+	if (unlikely(!cfg->conn_info)) {
+		WL_ERR(("cfg->conn_info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL);
+	if (unlikely(!cfg->ie)) {
+		WL_ERR(("cfg->ie  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl_init_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL);
+	if (unlikely(!cfg->afx_hdl)) {
+		WL_ERR(("afx hdl  alloc failed\n"));
+		goto init_priv_mem_out;
+	} else {
+		init_completion(&cfg->act_frm_scan);
+		init_completion(&cfg->wait_next_af);
+
+		INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
+	}
+	return 0;
+
+init_priv_mem_out:
+	wl_deinit_priv_mem(cfg);
+
+	return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	kfree(cfg->scan_results);
+	cfg->scan_results = NULL;
+	kfree(cfg->conf);
+	cfg->conf = NULL;
+	kfree(cfg->scan_req_int);
+	cfg->scan_req_int = NULL;
+	kfree(cfg->ioctl_buf);
+	cfg->ioctl_buf = NULL;
+	kfree(cfg->escan_ioctl_buf);
+	cfg->escan_ioctl_buf = NULL;
+	kfree(cfg->extra_buf);
+	cfg->extra_buf = NULL;
+	kfree(cfg->pmk_list);
+	cfg->pmk_list = NULL;
+	kfree(cfg->sta_info);
+	cfg->sta_info = NULL;
+#if defined(STATIC_WL_PRIV_STRUCT)
+	kfree(cfg->conn_info);
+	cfg->conn_info = NULL;
+	kfree(cfg->ie);
+	cfg->ie = NULL;
+	wl_deinit_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	if (cfg->afx_hdl) {
+		cancel_work_sync(&cfg->afx_hdl->work);
+		kfree(cfg->afx_hdl);
+		cfg->afx_hdl = NULL;
+	}
+
+	if (cfg->ap_info) {
+		kfree(cfg->ap_info->wpa_ie);
+		kfree(cfg->ap_info->rsn_ie);
+		kfree(cfg->ap_info->wps_ie);
+		kfree(cfg->ap_info);
+		cfg->ap_info = NULL;
+	}
+}
+
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	WL_DBG(("Enter \n"));
+
+	/* Do not use DHD in cfg driver */
+	cfg->event_tsk.thr_pid = -1;
+
+	PROC_START(wl_event_handler, cfg, &cfg->event_tsk, 0, "wl_event_handler");
+	if (cfg->event_tsk.thr_pid < 0)
+		ret = -ENOMEM;
+	return ret;
+}
+
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->event_tsk.thr_pid >= 0)
+		PROC_STOP(&cfg->event_tsk);
+}
+
+static void wl_scan_timeout(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	if (!(cfg->scan_request)) {
+		WL_ERR(("timer expired but no scan request\n"));
+		return;
+	}
+	bzero(&msg, sizeof(wl_event_msg_t));
+	WL_ERR(("timer expired\n"));
+	msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+	msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+	msg.reason = 0xFFFFFFFF;
+	wl_cfg80211_event(bcmcfg_to_prmry_ndev(cfg), &msg, NULL);
+}
+
+static void wl_send_event(struct net_device *dev, uint32 event_type,
+				uint32 status, uint32 reason)
+{
+	wl_event_msg_t msg;
+	bzero(&msg, sizeof(wl_event_msg_t));
+	msg.event_type = hton32(event_type);
+	msg.status = hton32(status);
+	msg.reason = hton32(reason);
+	wl_cfg80211_event(dev, &msg, NULL);
+}
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+	unsigned long state,
+	void *ndev)
+{
+	struct net_device *dev = ndev;
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_DBG(("Enter \n"));
+
+	if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
+		return NOTIFY_DONE;
+
+	switch (state) {
+		case NETDEV_DOWN:
+		{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+			int max_wait_timeout = 2;
+			int max_wait_count = 100;
+			int refcnt = 0;
+			unsigned long limit = jiffies + max_wait_timeout * HZ;
+			while (work_pending(&wdev->cleanup_work)) {
+				if (refcnt%5 == 0) {
+					WL_ERR(("[NETDEV_DOWN] wait for "
+						"complete of cleanup_work"
+						" (%d th)\n", refcnt));
+				}
+				if (!time_before(jiffies, limit)) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d sec\n",
+						max_wait_timeout));
+					break;
+				}
+				if (refcnt >= max_wait_count) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d loop\n",
+						max_wait_count));
+					break;
+				}
+				set_current_state(TASK_INTERRUPTIBLE);
+				(void)schedule_timeout(100);
+				set_current_state(TASK_RUNNING);
+				refcnt++;
+			}
+#endif /* LINUX_VERSION <  VERSION(3, 14, 0) */
+			break;
+		}
+
+		case NETDEV_UNREGISTER:
+			/* after calling list_del_rcu(&wdev->list) */
+			wl_dealloc_netinfo(cfg, ndev);
+			break;
+		case NETDEV_GOING_DOWN:
+			/* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+			*  In front of door, the function checks
+			*  whether current scan is working or not.
+			*  If the scanning is still working, wdev_cleanup_work call WARN_ON and
+			*  make the scan done forcibly.
+			*/
+			if (wl_get_drv_status(cfg, SCANNING, dev))
+				wl_notify_escan_complete(cfg, dev, true, true);
+			break;
+	}
+	return NOTIFY_DONE;
+}
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+	.notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool wl_cfg80211_netdev_notifier_registered = FALSE;
+
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+	wl_scan_params_t *params = NULL;
+	s32 params_size = 0;
+	s32 err = BCME_OK;
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	if (!in_atomic()) {
+		/* Our scan params only need space for 1 channel and 0 ssids */
+		params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+		if (params == NULL) {
+			WL_ERR(("scan params allocation failed \n"));
+			err = -ENOMEM;
+		} else {
+			/* Do a scan abort to stop the driver's scan engine */
+			err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+			if (err < 0) {
+				WL_ERR(("scan abort  failed \n"));
+			}
+			kfree(params);
+		}
+	}
+}
+
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev,
+	bool aborted, bool fw_abort)
+{
+	s32 err = BCME_OK;
+	unsigned long flags;
+	struct net_device *dev;
+
+	WL_DBG(("Enter \n"));
+	if (!ndev) {
+		WL_ERR(("ndev is null\n"));
+		err = BCME_ERROR;
+		return err;
+	}
+
+	if (cfg->escan_info.ndev != ndev) {
+		WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
+		err = BCME_ERROR;
+		return err;
+	}
+
+	if (cfg->scan_request) {
+		dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+		if (cfg->scan_request->dev != cfg->p2p_net)
+			dev = cfg->scan_request->dev;
+#endif /* WL_ENABLE_P2P_IF */
+	}
+	else {
+		WL_DBG(("cfg->scan_request is NULL may be internal scan."
+			"doing scan_abort for ndev %p primary %p",
+				ndev, bcmcfg_to_prmry_ndev(cfg)));
+		dev = ndev;
+	}
+	if (fw_abort && !in_atomic())
+		wl_cfg80211_scan_abort(cfg);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+#if defined(ESCAN_RESULT_PATCH)
+	if (likely(cfg->scan_request)) {
+		cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+		wl_inform_bss(cfg);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req && !cfg->scan_request) {
+		WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n"));
+		if (!aborted)
+			cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+		cfg->sched_scan_running = FALSE;
+		cfg->sched_scan_req = NULL;
+	}
+#endif /* WL_SCHED_SCAN */
+	if (likely(cfg->scan_request)) {
+		cfg80211_scan_done(cfg->scan_request, aborted);
+		cfg->scan_request = NULL;
+	}
+	if (p2p_is_on(cfg))
+		wl_clr_p2p_status(cfg, SCANNING);
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	return err;
+}
+
+static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	s32 status = ntoh32(e->status);
+	wl_bss_info_t *bi;
+	wl_escan_result_t *escan_result;
+	wl_bss_info_t *bss = NULL;
+	wl_scan_results_t *list;
+	wifi_p2p_ie_t * p2p_ie;
+	struct net_device *ndev = NULL;
+	u32 bi_length;
+	u32 i;
+	u8 *p2p_dev_addr = NULL;
+
+	WL_DBG((" enter event type : %d, status : %d \n",
+		ntoh32(e->event_type), ntoh32(e->status)));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	/* P2P SCAN is coming from primary interface */
+	if (wl_get_p2p_status(cfg, SCANNING)) {
+		if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+			ndev = cfg->afx_hdl->dev;
+		else
+			ndev = cfg->escan_info.ndev;
+
+	}
+	if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+		WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n",
+			ndev, wl_get_drv_status(cfg, SCANNING, ndev),
+			ntoh32(e->event_type), ntoh32(e->status)));
+		goto exit;
+	}
+	escan_result = (wl_escan_result_t *)data;
+
+	if (status == WLC_E_STATUS_PARTIAL) {
+		WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
+		if (!escan_result) {
+			WL_ERR(("Invalid escan result (NULL pointer)\n"));
+			goto exit;
+		}
+		if (dtoh16(escan_result->bss_count) != 1) {
+			WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			goto exit;
+		}
+		bi = escan_result->bss_info;
+		if (!bi) {
+			WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+			goto exit;
+		}
+		bi_length = dtoh32(bi->length);
+		if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+			WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+			goto exit;
+		}
+		if (wl_escan_check_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id) < 0)
+			goto exit;
+
+		if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+			if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+				WL_DBG(("Ignoring IBSS result\n"));
+				goto exit;
+			}
+		}
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+			if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+				cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+				s32 channel = wf_chspec_ctlchan(
+					wl_chspec_driver_to_host(bi->chanspec));
+
+				if ((channel > MAXCHANNEL) || (channel <= 0))
+					channel = WL_INVALID;
+				else
+					WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+						" channel : %d\n",
+						MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+						channel));
+
+				wl_clr_p2p_status(cfg, SCANNING);
+				cfg->afx_hdl->peer_chan = channel;
+				complete(&cfg->act_frm_scan);
+				goto exit;
+			}
+
+		} else {
+			int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+			list = wl_escan_get_buf(cfg, FALSE);
+			if (scan_req_match(cfg)) {
+				/* p2p scan && allow only probe response */
+				if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+					(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+					goto exit;
+				if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+					bi->ie_length)) == NULL) {
+						WL_ERR(("Couldn't find P2PIE in probe"
+							" response/beacon\n"));
+						goto exit;
+				}
+			}
+			for (i = 0; i < list->count; i++) {
+				bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+					: list->bss_info;
+
+				if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+					(CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+					== CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+					bi->SSID_len == bss->SSID_len &&
+					!bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+					/* do not allow beacon data to update
+					*the data recd from a probe response
+					*/
+					if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+						(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+						goto exit;
+
+					WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
+						" flags 0x%x, new: RSSI %d flags 0x%x\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+						bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+					if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+						/* preserve max RSSI if the measurements are
+						* both on-channel or both off-channel
+						*/
+						WL_SCAN(("%s("MACDBG"), same onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+					} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+						/* preserve the on-channel rssi measurement
+						* if the new measurement is off channel
+						*/
+						WL_SCAN(("%s("MACDBG"), prev onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = bss->RSSI;
+						bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+					}
+					if (dtoh32(bss->length) != bi_length) {
+						u32 prev_len = dtoh32(bss->length);
+
+						WL_SCAN(("bss info replacement"
+							" is occured(bcast:%d->probresp%d)\n",
+							bss->ie_length, bi->ie_length));
+						WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						prev_len, bi_length));
+
+						if (list->buflen - prev_len + bi_length
+							> ESCAN_BUF_SIZE) {
+							WL_ERR(("Buffer is too small: keep the"
+								" previous result of this AP\n"));
+							/* Only update RSSI */
+							bss->RSSI = bi->RSSI;
+							bss->flags |= (bi->flags
+								& WL_BSS_FLAGS_RSSI_ONCHANNEL);
+							goto exit;
+						}
+
+						if (i < list->count - 1) {
+							/* memory copy required by this case only */
+							memmove((u8 *)bss + bi_length,
+								(u8 *)bss + prev_len,
+								list->buflen - cur_len - prev_len);
+						}
+						list->buflen -= prev_len;
+						list->buflen += bi_length;
+					}
+					list->version = dtoh32(bi->version);
+					memcpy((u8 *)bss, (u8 *)bi, bi_length);
+					goto exit;
+				}
+				cur_len += dtoh32(bss->length);
+			}
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+				WL_ERR(("Buffer is too small: ignoring\n"));
+				goto exit;
+			}
+
+			memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+			list->version = dtoh32(bi->version);
+			list->buflen += bi_length;
+			list->count++;
+
+		}
+
+	}
+	else if (status == WLC_E_STATUS_SUCCESS) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+			escan_result->sync_id);
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFORM(("ESCAN COMPLETED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, false, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+	}
+#ifdef GSCAN_SUPPORT
+	else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN)) {
+		if (status == WLC_E_STATUS_NEWSCAN) {
+			WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request));
+			WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id,
+				escan_result->bss_count));
+		}
+#else
+	else if (status == WLC_E_STATUS_ABORT) {
+#endif /* GSCAN_SUPPORT */
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			wl_clr_p2p_status(cfg, SCANNING);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFORM(("ESCAN ABORTED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+	} else if (status == WLC_E_STATUS_NEWSCAN) {
+		WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id,
+			escan_result->bss_count));
+	} else if (status == WLC_E_STATUS_TIMEOUT) {
+		WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("reason[0x%x]\n", e->reason));
+		if (e->reason == 0xFFFFFFFF) {
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+		}
+	} else {
+		WL_ERR(("unexpected Escan Event %d : abort\n", status));
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+					"scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, 2);
+	}
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+{
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	struct net_info *iter, *next;
+	int err;
+
+	if (!cfg->roamoff_on_concurrent)
+		return;
+	if (enable && connected_cnt > 1) {
+		for_each_ndev(cfg, iter, next) {
+			/* Save the current roam setting */
+			if ((err = wldev_iovar_getint(iter->ndev, "roam_off",
+				(s32 *)&iter->roam_off)) != BCME_OK) {
+				WL_ERR(("%s:Failed to get current roam setting err %d\n",
+					iter->ndev->name, err));
+				continue;
+			}
+			if ((err = wldev_iovar_setint(iter->ndev, "roam_off", 1)) != BCME_OK) {
+				WL_ERR((" %s:failed to set roam_off : %d\n",
+					iter->ndev->name, err));
+			}
+		}
+	}
+	else if (!enable) {
+		for_each_ndev(cfg, iter, next) {
+			if (iter->roam_off != WL_INVALID) {
+				if ((err = wldev_iovar_setint(iter->ndev, "roam_off",
+					iter->roam_off)) == BCME_OK)
+					iter->roam_off = WL_INVALID;
+				else {
+					WL_ERR((" %s:failed to set roam_off : %d\n",
+						iter->ndev->name, err));
+				}
+			}
+		}
+	}
+	return;
+}
+
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *iter, *next;
+	u32 ctl_chan = 0;
+	u32 chanspec = 0;
+	u32 pre_ctl_chan = 0;
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	cfg->vsdb_mode = false;
+
+	if (connected_cnt <= 1)  {
+		return;
+	}
+	for_each_ndev(cfg, iter, next) {
+		chanspec = 0;
+		ctl_chan = 0;
+		if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+			if (wldev_iovar_getint(iter->ndev, "chanspec",
+				(s32 *)&chanspec) == BCME_OK) {
+				chanspec = wl_chspec_driver_to_host(chanspec);
+				ctl_chan = wf_chspec_ctlchan(chanspec);
+				wl_update_prof(cfg, iter->ndev, NULL,
+					&ctl_chan, WL_PROF_CHAN);
+			}
+			if (!cfg->vsdb_mode) {
+				if (!pre_ctl_chan && ctl_chan)
+					pre_ctl_chan = ctl_chan;
+				else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+					cfg->vsdb_mode = true;
+				}
+			}
+		}
+	}
+	WL_ERR(("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel"));
+	return;
+}
+
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set)
+{
+	s32 pm = PM_FAST;
+	s32 err = BCME_OK;
+	u32 mode;
+	u32 chan = 0;
+	u32 frameburst;
+	struct net_info *iter, *next;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+	WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+		state, set, _net_info->pm_restore, _net_info->ndev->name));
+
+	mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+	if (set) {
+		if (state == WL_STATUS_CONNECTED) {
+			wl_cfg80211_concurrent_roam(cfg, 1);
+
+			if (mode == WL_MODE_AP) {
+				if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+					WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+			}
+			wl_cfg80211_determine_vsdb_mode(cfg);
+			if (cfg->vsdb_mode || _net_info->pm_block) {
+				/* Delete pm_enable_work */
+				wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_MAINTAIN);
+				/* save PM_FAST in _net_info to restore this
+				 * if _net_info->pm_block is false
+				 */
+				if (!_net_info->pm_block && (mode == WL_MODE_BSS)) {
+					_net_info->pm = PM_FAST;
+					_net_info->pm_restore = true;
+				}
+				pm = PM_OFF;
+				for_each_ndev(cfg, iter, next) {
+					if (iter->pm_restore)
+						continue;
+					/* Save the current power mode */
+					err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
+							sizeof(iter->pm), false);
+					WL_DBG(("%s:power save %s\n", iter->ndev->name,
+								iter->pm ? "enabled" : "disabled"));
+					if (!err && iter->pm) {
+						iter->pm_restore = true;
+					}
+				}
+				for_each_ndev(cfg, iter, next) {
+					if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
+									sizeof(pm), true)) != 0) {
+						if (err == -ENODEV)
+							WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+						else
+							WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+					} else {
+						wl_cfg80211_update_power_mode(iter->ndev);
+					}
+				}
+			} else {
+				/*
+				 * Re-enable PM2 mode for static IP and roaming event
+				 */
+				pm = PM_FAST;
+
+				for_each_ndev(cfg, iter, next) {
+					if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
+									sizeof(pm), true)) != 0) {
+						if (err == -ENODEV)
+							WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+						else
+							WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+					}
+				}
+
+				if (cfg->pm_enable_work_on) {
+					wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+				}
+			}
+#if defined(WLTDLS)
+#if defined(DISABLE_TDLS_IN_P2P)
+			if (cfg->vsdb_mode || p2p_is_on(cfg))
+#else
+			if (cfg->vsdb_mode)
+#endif /* defined(DISABLE_TDLS_IN_P2P) */
+			{
+
+				err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
+			}
+#endif /* defined(WLTDLS) */
+			if (cfg->vsdb_mode) {
+				/* disable frameburst on multichannel */
+				frameburst = 0;
+				if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst,
+					sizeof(frameburst), true) != 0) {
+					WL_DBG(("frameburst set 0 error\n"));
+				} else {
+					WL_DBG(("Frameburst Disabled\n"));
+				}
+			}
+		}
+	} else { /* clear */
+		if (state == WL_STATUS_CONNECTED) {
+			chan = 0;
+			/* clear chan information when the net device is disconnected */
+			wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+			wl_cfg80211_determine_vsdb_mode(cfg);
+			for_each_ndev(cfg, iter, next) {
+				if (iter->pm_restore && iter->pm) {
+					WL_DBG(("%s:restoring power save %s\n",
+							iter->ndev->name, (iter->pm ? "enabled" : "disabled")));
+					err = wldev_ioctl(iter->ndev, WLC_SET_PM, &iter->pm,
+								sizeof(iter->pm), true);
+					if (unlikely(err)) {
+						if (err == -ENODEV)
+							WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+						else
+							WL_ERR(("%s:error(%d)\n", iter->ndev->name, err));
+						break;
+					}
+					iter->pm_restore = 0;
+					wl_cfg80211_update_power_mode(iter->ndev);
+				}
+			}
+			wl_cfg80211_concurrent_roam(cfg, 0);
+
+			if (!cfg->vsdb_mode) {
+#if defined(WLTDLS)
+				err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
+#endif /* defined(WLTDLS) */
+				/* enable frameburst on single channel */
+				frameburst = 1;
+				if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst,
+					sizeof(frameburst), true) != 0) {
+					WL_DBG(("frameburst set 1 error\n"));
+				} else {
+					WL_DBG(("Frameburst Enabled\n"));
+				}
+			}
+		} else if (state == WL_STATUS_DISCONNECTING) {
+			wake_up_interruptible(&cfg->event_sync_wq);
+		}
+	}
+	return err;
+}
+static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+	int err = 0;
+
+	cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	wl_escan_init_sync_id(cfg);
+
+	/* Init scan_timeout timer */
+	init_timer(&cfg->scan_timeout);
+	cfg->scan_timeout.data = (unsigned long) cfg;
+	cfg->scan_timeout.function = wl_scan_timeout;
+
+	return err;
+}
+
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	cfg->scan_request = NULL;
+	cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+	cfg->roam_on = false;
+	cfg->active_scan = true;
+	cfg->rf_blocked = false;
+	cfg->vsdb_mode = false;
+#if defined(BCMSDIO)
+	cfg->wlfc_on = false;
+#endif
+	cfg->roamoff_on_concurrent = true;
+	cfg->disable_roam_event = false;
+	/* register interested state */
+	set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+	set_bit(WL_STATUS_DISCONNECTING, &cfg->interrested_state);
+	spin_lock_init(&cfg->cfgdrv_lock);
+	mutex_init(&cfg->ioctl_buf_sync);
+	init_waitqueue_head(&cfg->netif_change_event);
+	init_waitqueue_head(&cfg->event_sync_wq);
+	init_completion(&cfg->send_af_done);
+	init_completion(&cfg->iface_disable);
+	wl_init_eq(cfg);
+	err = wl_init_priv_mem(cfg);
+	if (err)
+		return err;
+	if (wl_create_event_handler(cfg))
+		return -ENOMEM;
+	wl_init_event_handler(cfg);
+	mutex_init(&cfg->usr_sync);
+	mutex_init(&cfg->event_sync);
+	err = wl_init_scan(cfg);
+	if (err)
+		return err;
+	wl_init_conf(cfg->conf);
+	wl_init_prof(cfg, ndev);
+	wl_link_down(cfg);
+	DNGL_FUNC(dhd_cfg80211_init, (cfg));
+
+	return err;
+}
+
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+	wl_destroy_event_handler(cfg);
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	del_timer_sync(&cfg->scan_timeout);
+	wl_deinit_priv_mem(cfg);
+	if (wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = FALSE;
+		unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+	}
+}
+
+#if defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(void)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_TRACE(("Enter \n"));
+
+	if (wl_cfgp2p_register_ndev(cfg) < 0) {
+		WL_ERR(("P2P attach failed. \n"));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static s32  wl_cfg80211_detach_p2p(void)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wireless_dev *wdev;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	} else
+		wdev = cfg->p2p_wdev;
+
+	if (!wdev) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+
+	wl_cfgp2p_unregister_ndev(cfg);
+
+	cfg->p2p_wdev = NULL;
+	cfg->p2p_net = NULL;
+	WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
+	kfree(wdev);
+
+	return 0;
+}
+#endif
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+	struct bcm_cfg80211 * cfg = NULL;
+	s32 err = 0;
+	s32 ret = 0;
+	WL_TRACE(("In\n"));
+	if (unlikely(!ndev)) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	cfg = g_bcm_cfg;
+	if (unlikely(!cfg)) {
+		WL_ERR(("cfg is invaild\n"));
+		return -EINVAL;
+	}
+	if (!wl_get_drv_status(cfg, READY, ndev)) {
+		if (cfg->wdev) {
+			ret = wl_cfgp2p_supported(cfg, ndev);
+			if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+				cfg->wdev->wiphy->interface_modes |=
+					(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+
+#if defined(WL_ENABLE_P2P_IF)
+				if (cfg->p2p_net) {
+					/* Update MAC addr for p2p0 interface here. */
+					memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+					cfg->p2p_net->dev_addr[0] |= 0x02;
+					WL_ERR(("%s: p2p_dev_addr="MACDBG "\n",
+						cfg->p2p_net->name,
+						MAC2STRDBG(cfg->p2p_net->dev_addr)));
+				} else {
+					WL_ERR(("p2p_net not yet populated."
+					" Couldn't update the MAC Address for p2p0 \n"));
+					return -ENODEV;
+				}
+#endif /* WL_ENABLE_P2P_IF */
+				cfg->p2p_supported = true;
+			} else if (ret == 0) {
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+			} else {
+				/* SDIO bus timeout */
+				err = -ENODEV;
+				goto fail;
+			}
+		}
+	}
+	wl_set_drv_status(cfg, READY, ndev);
+fail:
+	return err;
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+{
+	struct wireless_dev *wdev;
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	struct device *dev;
+
+	WL_TRACE(("In\n"));
+	if (!ndev) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+	dev = wl_cfg80211_get_parent_dev();
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return -ENOMEM;
+	}
+	err = wl_setup_wiphy(wdev, dev, context);
+	if (unlikely(err)) {
+		kfree(wdev);
+		return -ENOMEM;
+	}
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+	cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+	cfg->wdev = wdev;
+	cfg->pub = context;
+	INIT_LIST_HEAD(&cfg->net_list);
+	ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+	wdev->netdev = ndev;
+	cfg->state_notifier = wl_notifier_change_state;
+	err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE);
+	if (err) {
+		WL_ERR(("Failed to alloc net_info (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+	err = wl_init_priv(cfg);
+	if (err) {
+		WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+
+	err = wl_setup_rfkill(cfg, TRUE);
+	if (err) {
+		WL_ERR(("Failed to setup rfkill %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#ifdef DEBUGFS_CFG80211
+	err = wl_setup_debugfs(cfg);
+	if (err) {
+		WL_ERR(("Failed to setup debugfs %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#endif
+	if (!wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = TRUE;
+		err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+		if (err) {
+			wl_cfg80211_netdev_notifier_registered = FALSE;
+			WL_ERR(("Failed to register notifierl %d\n", err));
+			goto cfg80211_attach_out;
+		}
+	}
+#if defined(COEX_DHCP)
+	cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+	if (!cfg->btcoex_info)
+		goto cfg80211_attach_out;
+#endif
+
+	g_bcm_cfg = cfg;
+
+#if defined(WL_ENABLE_P2P_IF)
+	err = wl_cfg80211_attach_p2p();
+	if (err)
+		goto cfg80211_attach_out;
+#endif
+
+	return err;
+
+cfg80211_attach_out:
+	wl_setup_rfkill(cfg, FALSE);
+	wl_free_wdev(cfg);
+	return err;
+}
+
+void wl_cfg80211_detach(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+
+	(void)para;
+	cfg = g_bcm_cfg;
+
+	WL_TRACE(("In\n"));
+
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+#if defined(COEX_DHCP)
+	wl_cfg80211_btcoex_deinit();
+	cfg->btcoex_info = NULL;
+#endif
+
+	wl_setup_rfkill(cfg, FALSE);
+#ifdef DEBUGFS_CFG80211
+	wl_free_debugfs(cfg);
+#endif
+	if (cfg->p2p_supported) {
+		if (timer_pending(&cfg->p2p->listen_timer))
+			del_timer_sync(&cfg->p2p->listen_timer);
+		wl_cfgp2p_deinit_priv(cfg);
+	}
+
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF  */
+#if defined(WL_ENABLE_P2P_IF)
+	wl_cfg80211_detach_p2p();
+#endif
+
+	wl_cfg80211_ibss_vsie_free(cfg);
+	wl_deinit_priv(cfg);
+	g_bcm_cfg = NULL;
+	wl_cfg80211_clear_parent_dev();
+	wl_free_wdev(cfg);
+	/* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+	 * structure "cfg", which is the private part of wiphy, has been freed in
+	 * wl_free_wdev !!!!!!!!!!!
+	 */
+}
+
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->event_tsk.thr_pid >= 0) {
+		DHD_OS_WAKE_LOCK(cfg->pub);
+		up(&cfg->event_tsk.sema);
+	}
+}
+
+static s32 wl_event_handler(void *data)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct wl_event_q *e;
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	bcm_struct_cfgdev *cfgdev = NULL;
+
+	cfg = (struct bcm_cfg80211 *)tsk->parent;
+
+	WL_ERR(("tsk Enter, tsk = 0x%p\n", tsk));
+
+	while (down_interruptible (&tsk->sema) == 0) {
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk->terminated)
+			break;
+		while ((e = wl_deq_event(cfg))) {
+			WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+			/* All P2P device address related events comes on primary interface since
+			 * there is no corresponding bsscfg for P2P interface. Map it to p2p0
+			 * interface.
+			 */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev)) {
+				cfgdev = bcmcfg_to_p2p_wdev(cfg);
+			} else {
+				struct net_device *ndev = NULL;
+
+				ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx);
+				if (ndev)
+					cfgdev = ndev_to_wdev(ndev);
+			}
+#elif defined(WL_ENABLE_P2P_IF)
+			if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) {
+				cfgdev = cfg->p2p_net;
+			} else {
+				cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub),
+					e->emsg.ifidx);
+			}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+			if (!cfgdev) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfgdev = bcmcfg_to_prmry_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+				cfgdev = bcmcfg_to_prmry_ndev(cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+			if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+				cfg->evt_handler[e->etype] (cfg, cfgdev, &e->emsg, e->edata);
+			} else {
+				WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+			}
+			wl_put_event(e);
+		}
+		DHD_OS_WAKE_UNLOCK(cfg->pub);
+	}
+	WL_ERR(("was terminated\n"));
+	complete_and_exit(&tsk->completed, 0);
+	return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+	u32 event_type = ntoh32(e->event_type);
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+#if (WL_DBG_LEVEL > 0)
+	s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+	    wl_dbg_estr[event_type] : (s8 *) "Unknown";
+	WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
+		WL_ERR(("during IF change, ignore event %d\n", event_type));
+		return;
+	}
+
+	if (ndev != bcmcfg_to_prmry_ndev(cfg) && cfg->p2p_supported) {
+		if (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) &&
+#if defined(WL_ENABLE_P2P_IF)
+			(ndev != (cfg->p2p_net ? cfg->p2p_net :
+			wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE))) &&
+#else
+			(ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE)) &&
+#endif /* WL_ENABLE_P2P_IF */
+			TRUE) {
+			WL_ERR(("ignore event %d, not interested\n", event_type));
+			return;
+		}
+	}
+
+	if (event_type == WLC_E_PFN_NET_FOUND) {
+		WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+	}
+	else if (event_type == WLC_E_PFN_NET_LOST) {
+		WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+	}
+
+	if (likely(!wl_enq_event(cfg, ndev, event_type, e, data)))
+		wl_wakeup_event(cfg);
+}
+
+static void wl_init_eq(struct bcm_cfg80211 *cfg)
+{
+	wl_init_eq_lock(cfg);
+	INIT_LIST_HEAD(&cfg->eq_list);
+}
+
+static void wl_flush_eq(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	while (!list_empty(&cfg->eq_list)) {
+		e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+		kfree(e);
+	}
+	wl_unlock_eq(cfg, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e = NULL;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	if (likely(!list_empty(&cfg->eq_list))) {
+		e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+	}
+	wl_unlock_eq(cfg, flags);
+
+	return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event,
+	const wl_event_msg_t *msg, void *data)
+{
+	struct wl_event_q *e;
+	s32 err = 0;
+	uint32 evtq_size;
+	uint32 data_len;
+	unsigned long flags;
+	gfp_t aflags;
+
+	data_len = 0;
+	if (data)
+		data_len = ntoh32(msg->datalen);
+	evtq_size = sizeof(struct wl_event_q) + data_len;
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	e = kzalloc(evtq_size, aflags);
+	if (unlikely(!e)) {
+		WL_ERR(("event alloc failed\n"));
+		return -ENOMEM;
+	}
+	e->etype = event;
+	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+	if (data)
+		memcpy(e->edata, data, data_len);
+	flags = wl_lock_eq(cfg);
+	list_add_tail(&e->eq_list, &cfg->eq_list);
+	wl_unlock_eq(cfg, flags);
+
+	return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+	kfree(e);
+}
+
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype)
+{
+	s32 infra = 0;
+	s32 err = 0;
+	s32 mode = 0;
+	switch (iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+		WL_ERR(("type (%d) : currently we do not support this mode\n",
+			iftype));
+		err = -EINVAL;
+		return err;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		infra = 1;
+		break;
+	default:
+		err = -EINVAL;
+		WL_ERR(("invalid type (%d)\n", iftype));
+		return err;
+	}
+	infra = htod32(infra);
+	err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		return err;
+	}
+
+	wl_set_mode_by_netdev(cfg, ndev, mode);
+
+	return 0;
+}
+
+void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set)
+{
+	if (!ev || (event > WLC_E_LAST))
+		return;
+
+	if (ev->num < MAX_EVENT_BUF_NUM) {
+		ev->event[ev->num].type = event;
+		ev->event[ev->num].set = set;
+		ev->num++;
+	} else {
+		WL_ERR(("evenbuffer doesn't support > %u events. Update"
+			" the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));
+		ASSERT(0);
+	}
+}
+
+s32 wl_cfg80211_apply_eventbuffer(
+	struct net_device *ndev,
+	struct bcm_cfg80211 *cfg,
+	wl_eventmsg_buf_t *ev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int i, ret = 0;
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	if (!ev || (!ev->num))
+		return -EINVAL;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Read event_msgs mask */
+	bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+		sizeof(iovbuf));
+	ret = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+	if (unlikely(ret)) {
+		WL_ERR(("Get event_msgs error (%d)\n", ret));
+		goto exit;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+
+	/* apply the set bits */
+	for (i = 0; i < ev->num; i++) {
+		if (ev->event[i].set)
+			setbit(eventmask, ev->event[i].type);
+		else
+			clrbit(eventmask, ev->event[i].type);
+	}
+
+	/* Write updated Event mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	ret = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+	if (unlikely(ret)) {
+		WL_ERR(("Set event_msgs error (%d)\n", ret));
+	}
+
+exit:
+	mutex_unlock(&cfg->event_sync);
+	return ret;
+}
+
+s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (!ndev || !cfg)
+		return -ENODEV;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Setup event_msgs */
+	bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+	if (unlikely(err)) {
+		WL_ERR(("Get event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+	if (add) {
+		setbit(eventmask, event);
+	} else {
+		clrbit(eventmask, event);
+	}
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+	if (unlikely(err)) {
+		WL_ERR(("Set event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+
+eventmsg_out:
+	mutex_unlock(&cfg->event_sync);
+	return err;
+}
+
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
+{
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ieee80211_channel *band_chan_arr = NULL;
+	wl_uint32_list_t *list;
+	u32 i, j, index, n_2g, n_5g, band, channel, array_size;
+	u32 *n_cnt = NULL;
+	chanspec_t c = 0;
+	s32 err = BCME_OK;
+	bool update;
+	bool ht40_allowed;
+	u8 *pbuf = NULL;
+	bool dfs_radar_disabled = FALSE;
+
+#define LOCAL_BUF_LEN 1024
+	pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL);
+
+	if (pbuf == NULL) {
+		WL_ERR(("failed to allocate local buf\n"));
+		return -ENOMEM;
+	}
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
+	if (err != 0) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		kfree(pbuf);
+		return err;
+	}
+#undef LOCAL_BUF_LEN
+
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	band = array_size = n_2g = n_5g = 0;
+	for (i = 0; i < dtoh32(list->count); i++) {
+		index = 0;
+		update = false;
+		ht40_allowed = false;
+		c = (chanspec_t)dtoh32(list->element[i]);
+		c = wl_chspec_driver_to_host(c);
+		channel = wf_chspec_ctlchan(c);
+
+		if (!CHSPEC_IS40(c) && ! CHSPEC_IS20(c)) {
+			WL_DBG(("HT80/160/80p80 center channel : %d\n", channel));
+			continue;
+		}
+		if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
+			(channel <= CH_MAX_2G_CHANNEL)) {
+			band_chan_arr = __wl_2ghz_channels;
+			array_size = ARRAYSIZE(__wl_2ghz_channels);
+			n_cnt = &n_2g;
+			band = IEEE80211_BAND_2GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_40ALL)? true : false;
+		} else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
+			band_chan_arr = __wl_5ghz_a_channels;
+			array_size = ARRAYSIZE(__wl_5ghz_a_channels);
+			n_cnt = &n_5g;
+			band = IEEE80211_BAND_5GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_20ALL)? false : true;
+		} else {
+			WL_ERR(("Invalid channel Sepc. 0x%x.\n", c));
+			continue;
+		}
+		if (!ht40_allowed && CHSPEC_IS40(c))
+			continue;
+		for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
+			if (band_chan_arr[j].hw_value == channel) {
+				update = true;
+				break;
+			}
+		}
+		if (update)
+			index = j;
+		else
+			index = *n_cnt;
+		if (index <  array_size) {
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel);
+#else
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel, band);
+#endif
+			band_chan_arr[index].hw_value = channel;
+
+			if (CHSPEC_IS40(c) && ht40_allowed) {
+				/* assuming the order is HT20, HT40 Upper,
+				 *  HT40 lower from chanspecs
+				 */
+				u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40;
+				if (CHSPEC_SB_UPPER(c)) {
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags &=
+							~IEEE80211_CHAN_NO_HT40;
+					band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS;
+				} else {
+					/* It should be one of
+					 * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS
+					 */
+					band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40;
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags |=
+							IEEE80211_CHAN_NO_HT40MINUS;
+				}
+			} else {
+				band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40;
+				if (!dfs_radar_disabled) {
+					if (band == IEEE80211_BAND_2GHZ)
+						channel |= WL_CHANSPEC_BAND_2G;
+					else
+						channel |= WL_CHANSPEC_BAND_5G;
+					channel |= WL_CHANSPEC_BW_20;
+					channel = wl_chspec_host_to_driver(channel);
+					err = wldev_iovar_getint(dev, "per_chan_info", &channel);
+					if (!err) {
+						if (channel & WL_CHAN_RADAR) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								(IEEE80211_CHAN_RADAR
+								| IEEE80211_CHAN_NO_IBSS);
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_RADAR;
+#endif
+						}
+
+						if (channel & WL_CHAN_PASSIVE)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_PASSIVE_SCAN;
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_NO_IR;
+#endif
+					} else if (err == BCME_UNSUPPORTED) {
+						dfs_radar_disabled = TRUE;
+						WL_ERR(("does not support per_chan_info\n"));
+					}
+				}
+			}
+			if (!update)
+				(*n_cnt)++;
+		}
+
+	}
+	__wl_band_2ghz.n_channels = n_2g;
+	__wl_band_5ghz_a.n_channels = n_5g;
+	kfree(pbuf);
+	return err;
+}
+
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+	struct wiphy *wiphy;
+	struct net_device *dev;
+	u32 bandlist[3];
+	u32 nband = 0;
+	u32 i = 0;
+	s32 err = 0;
+	s32 index = 0;
+	s32 nmode = 0;
+	bool rollback_lock = false;
+	s32 bw_cap = 0;
+	s32 cur_band = -1;
+	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
+
+	if (cfg == NULL) {
+		cfg = g_bcm_cfg;
+		mutex_lock(&cfg->usr_sync);
+		rollback_lock = true;
+	}
+	dev = bcmcfg_to_prmry_ndev(cfg);
+
+	memset(bandlist, 0, sizeof(bandlist));
+	err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist,
+		sizeof(bandlist), false);
+	if (unlikely(err)) {
+		WL_ERR(("error read bandlist (%d)\n", err));
+		goto end_bands;
+	}
+	err = wldev_ioctl(dev, WLC_GET_BAND, &cur_band,
+		sizeof(s32), false);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto end_bands;
+	}
+
+	err = wldev_iovar_getint(dev, "nmode", &nmode);
+	if (unlikely(err)) {
+		WL_ERR(("error reading nmode (%d)\n", err));
+	} else {
+		/* For nmodeonly  check bw cap */
+		err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+		}
+	}
+
+	err = wl_construct_reginfo(cfg, bw_cap);
+	if (err) {
+		WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
+		if (err != BCME_UNSUPPORTED)
+			goto end_bands;
+		err = 0;
+	}
+	wiphy = bcmcfg_to_wiphy(cfg);
+	nband = bandlist[0];
+
+	for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
+		index = -1;
+		if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) {
+			bands[IEEE80211_BAND_5GHZ] =
+				&__wl_band_5ghz_a;
+			index = IEEE80211_BAND_5GHZ;
+			if (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G)
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		}
+		else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
+			bands[IEEE80211_BAND_2GHZ] =
+				&__wl_band_2ghz;
+			index = IEEE80211_BAND_2GHZ;
+			if (bw_cap == WLC_N_BW_40ALL)
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		}
+
+		if ((index >= 0) && nmode) {
+			bands[index]->ht_cap.cap |=
+				(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
+			bands[index]->ht_cap.ht_supported = TRUE;
+			bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+			bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+			/* An HT shall support all EQM rates for one spatial stream */
+			bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+		}
+
+	}
+
+	wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
+	wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
+
+	/* check if any bands populated otherwise makes 2Ghz as default */
+	if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
+		wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
+		/* Setup 2Ghz band as default */
+		wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+	}
+
+	if (notify)
+		wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+
+	end_bands:
+		if (rollback_lock)
+			mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+	WL_DBG(("In\n"));
+
+	err = dhd_config_dongle(cfg);
+	if (unlikely(err))
+		return err;
+
+	err = wl_config_ifmode(cfg, ndev, wdev->iftype);
+	if (unlikely(err && err != -EINPROGRESS)) {
+		WL_ERR(("wl_config_ifmode failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+	err = wl_update_wiphybands(cfg, true);
+	if (unlikely(err)) {
+		WL_ERR(("wl_update_wiphybands failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+
+	err = dhd_monitor_init(cfg->pub);
+
+	INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+	wl_set_drv_status(cfg, READY, ndev);
+	return err;
+}
+
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	unsigned long flags;
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
+	struct net_device *p2p_net = cfg->p2p_net;
+#endif
+	u32 bssidx = 0;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	WL_DBG(("In\n"));
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+	if (cfg->p2p_supported) {
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+		if (cfg->p2p->vif_created) {
+			bool enabled = false;
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_deinit(dhd);
+				cfg->wlfc_on = false;
+			}
+		}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+
+	/* If primary BSS is operational (for e.g SoftAP), bring it down */
+	if (!(wl_cfgp2p_find_idx(cfg, ndev, &bssidx)) &&
+		wl_cfgp2p_bss_isup(ndev, bssidx)) {
+		if (wl_cfgp2p_bss(cfg, ndev, bssidx, 0) < 0)
+			WL_ERR(("BSS down failed \n"));
+	}
+
+	/* Check if cfg80211 interface is already down */
+	if (!wl_get_drv_status(cfg, READY, ndev))
+		return err;	/* it is even not ready */
+	for_each_ndev(cfg, iter, next)
+		wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	for_each_ndev(cfg, iter, next) {
+		wl_clr_drv_status(cfg, READY, iter->ndev);
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+		wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
+	}
+	bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
+		NL80211_IFTYPE_STATION;
+#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
+		if (p2p_net)
+			dev_close(p2p_net);
+#endif
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	if (cfg->p2p_supported)
+		wl_cfgp2p_down(cfg);
+	if (cfg->ap_info) {
+		kfree(cfg->ap_info->wpa_ie);
+		kfree(cfg->ap_info->rsn_ie);
+		kfree(cfg->ap_info->wps_ie);
+		kfree(cfg->ap_info);
+		cfg->ap_info = NULL;
+	}
+	dhd_monitor_uninit();
+
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+	/* Clean up if not removed already */
+	if (cfg->bss_cfgdev)
+		wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev);
+#endif /* defined (DUAL_STA) || defined (DUAL_STA_STATIC_IF) */
+
+	DNGL_FUNC(dhd_cfg80211_down, (cfg));
+
+	return err;
+}
+
+s32 wl_cfg80211_up(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	int val = 1;
+	dhd_pub_t *dhd;
+
+	(void)para;
+	WL_DBG(("In\n"));
+	cfg = g_bcm_cfg;
+
+	if ((err = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
+		sizeof(int), false) < 0)) {
+		WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+		return err;
+	}
+	val = dtoh32(val);
+	if (val != WLC_IOCTL_VERSION && val != 1) {
+		WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+			val, WLC_IOCTL_VERSION));
+		return BCME_VERSION;
+	}
+	ioctl_version = val;
+	WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
+
+	mutex_lock(&cfg->usr_sync);
+	dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
+		if (unlikely(err))
+			return err;
+	}
+	err = __wl_cfg80211_up(cfg);
+	if (unlikely(err))
+		WL_ERR(("__wl_cfg80211_up failed\n"));
+#ifdef ROAM_CHANNEL_CACHE
+	init_roam(ioctl_version);
+#endif
+	mutex_unlock(&cfg->usr_sync);
+
+
+#ifdef DUAL_STA_STATIC_IF
+#ifdef DUAL_STA
+#error "Both DUAL_STA and DUAL_STA_STATIC_IF can't be enabled together"
+#endif
+	/* Static Interface support is currently supported only for STA only builds (without P2P) */
+	wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d");
+#endif /* DUAL_STA_STATIC_IF */
+
+	return err;
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	WL_ERR(("In : chip crash eventing\n"));
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+	cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+	if (cfg != NULL) {
+		wl_link_down(cfg);
+	}
+	return 0;
+}
+
+s32 wl_cfg80211_down(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+
+	(void)para;
+	WL_DBG(("In\n"));
+	cfg = g_bcm_cfg;
+	mutex_lock(&cfg->usr_sync);
+	err = __wl_cfg80211_down(cfg);
+	mutex_unlock(&cfg->usr_sync);
+
+	return err;
+}
+
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+{
+	unsigned long flags;
+	void *rptr = NULL;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return NULL;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SEC:
+		rptr = &profile->sec;
+		break;
+	case WL_PROF_ACT:
+		rptr = &profile->active;
+		break;
+	case WL_PROF_BSSID:
+		rptr = profile->bssid;
+		break;
+	case WL_PROF_SSID:
+		rptr = &profile->ssid;
+		break;
+	case WL_PROF_CHAN:
+		rptr = &profile->channel;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	if (!rptr)
+		WL_ERR(("invalid item (%d)\n", item));
+	return rptr;
+}
+
+static s32
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, s32 item)
+{
+	s32 err = 0;
+	struct wlc_ssid *ssid;
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return WL_INVALID;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SSID:
+		ssid = (wlc_ssid_t *) data;
+		memset(profile->ssid.SSID, 0,
+			sizeof(profile->ssid.SSID));
+		memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+		profile->ssid.SSID_len = ssid->SSID_len;
+		break;
+	case WL_PROF_BSSID:
+		if (data)
+			memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+		else
+			memset(profile->bssid, 0, ETHER_ADDR_LEN);
+		break;
+	case WL_PROF_SEC:
+		memcpy(&profile->sec, data, sizeof(profile->sec));
+		break;
+	case WL_PROF_ACT:
+		profile->active = *(bool *)data;
+		break;
+	case WL_PROF_BEACONINT:
+		profile->beacon_interval = *(u16 *)data;
+		break;
+	case WL_PROF_DTIMPERIOD:
+		profile->dtim_period = *(u8 *)data;
+		break;
+	case WL_PROF_CHAN:
+		profile->channel = *(u32*)data;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (err == -EOPNOTSUPP)
+		WL_ERR(("unsupported item (%d)\n", item));
+
+	return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+	/*
+	* prohibit to change debug level
+	* by insmod parameter.
+	* eventually debug level will be configured
+	* in compile time by using CONFIG_XXX
+	*/
+	/* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
+{
+	return cfg->ibss_starter;
+}
+
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	ie->buf[ie->offset] = t;
+	ie->buf[ie->offset + 1] = l;
+	memcpy(&ie->buf[ie->offset + 2], v, l);
+	ie->offset += l + 2;
+
+	return err;
+}
+
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam)
+{
+	u8 *ssidie;
+	ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+	if (!ssidie)
+		return;
+	if (ssidie[1] != bi->SSID_len) {
+		if (ssidie[1]) {
+			WL_ERR(("%s: Wrong SSID len: %d != %d\n",
+				__FUNCTION__, ssidie[1], bi->SSID_len));
+		}
+		if (roam) {
+			WL_ERR(("Changing the SSID Info.\n"));
+			memmove(ssidie + bi->SSID_len + 2,
+				(ssidie + 2) + ssidie[1],
+				*ie_size - (ssidie + 2 + ssidie[1] - ie_stream));
+			memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+			*ie_size = *ie_size + bi->SSID_len - ssidie[1];
+			ssidie[1] = bi->SSID_len;
+		}
+		return;
+	}
+	if (*(ssidie + 2) == '\0')
+		 memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+	return;
+}
+
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei_stream crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+	ie->offset += ie_size;
+
+	return err;
+}
+
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset > dst_size)) {
+		WL_ERR(("dst_size is not enough\n"));
+		return -ENOSPC;
+	}
+	memcpy(dst, &ie->buf[0], ie->offset);
+
+	return err;
+}
+
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	return ie->offset;
+}
+
+static void wl_link_up(struct bcm_cfg80211 *cfg)
+{
+	cfg->link_up = true;
+}
+
+static void wl_link_down(struct bcm_cfg80211 *cfg)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+	WL_DBG(("In\n"));
+	cfg->link_up = false;
+	conn_info->req_ie_len = 0;
+	conn_info->resp_ie_len = 0;
+}
+
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cfg->eq_lock, flags);
+	return flags;
+}
+
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
+{
+	spin_unlock_irqrestore(&cfg->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
+{
+	spin_lock_init(&cfg->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+	if (in_atomic() || (ms < jiffies_to_msecs(1))) {
+		OSL_DELAY(ms*1000);
+	} else {
+		OSL_SLEEP(ms);
+	}
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct ether_addr p2pif_addr;
+	struct ether_addr primary_mac;
+	if (!cfg->p2p)
+		return -1;
+	if (!p2p_is_on(cfg)) {
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
+	} else {
+		memcpy(p2pdev_addr->octet,
+			cfg->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+	}
+
+
+	return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_channel_to_freq(u32 channel)
+{
+	int freq = 0;
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(channel);
+#else
+	{
+		u16 band = 0;
+		if (channel <= CH_MAX_2G_CHANNEL)
+			band = IEEE80211_BAND_2GHZ;
+		else
+			band = IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(channel, band);
+	}
+#endif
+	return freq;
+}
+
+
+#ifdef WLTDLS
+static s32
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data) {
+
+	struct net_device *ndev = NULL;
+	u32 reason = ntoh32(e->reason);
+	s8 *msg = NULL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	switch (reason) {
+	case WLC_E_TDLS_PEER_DISCOVERED :
+		msg = " TDLS PEER DISCOVERD ";
+		break;
+	case WLC_E_TDLS_PEER_CONNECTED :
+#ifdef PCIE_FULL_DONGLE
+		dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
+		msg = " TDLS PEER CONNECTED ";
+		break;
+	case WLC_E_TDLS_PEER_DISCONNECTED :
+#ifdef PCIE_FULL_DONGLE
+		dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
+		msg = "TDLS PEER DISCONNECTED ";
+		break;
+	}
+	if (msg) {
+		WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)),
+			(bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+	}
+	return 0;
+
+}
+#endif  /* WLTDLS */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static s32
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper)
+{
+	s32 ret = 0;
+#ifdef WLTDLS
+	struct bcm_cfg80211 *cfg;
+	tdls_iovar_t info;
+	cfg = g_bcm_cfg;
+	memset(&info, 0, sizeof(tdls_iovar_t));
+	if (peer)
+		memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+	switch (oper) {
+	case NL80211_TDLS_DISCOVERY_REQ:
+		/* turn on TDLS */
+		ret = dhd_tdls_enable(dev, true, false, NULL);
+		if (ret < 0)
+			return ret;
+		info.mode = TDLS_MANUAL_EP_DISCOVERY;
+		break;
+	case NL80211_TDLS_SETUP:
+		/* auto mode on */
+		ret = dhd_tdls_enable(dev, true, true, (struct ether_addr *)peer);
+		if (ret < 0)
+			return ret;
+		break;
+	case NL80211_TDLS_TEARDOWN:
+		info.mode = TDLS_MANUAL_EP_DELETE;
+		/* auto mode off */
+		ret = dhd_tdls_enable(dev, true, false, (struct ether_addr *)peer);
+		if (ret < 0)
+			return ret;
+		break;
+	default:
+		WL_ERR(("Unsupported operation : %d\n", oper));
+		goto out;
+	}
+	if (info.mode) {
+		ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret) {
+			WL_ERR(("tdls_endpoint error %d\n", ret));
+		}
+	}
+out:
+#endif /* WLTDLS */
+	return ret;
+}
+#endif
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type)
+{
+	struct bcm_cfg80211 *cfg;
+	struct net_device *ndev = NULL;
+	struct ether_addr primary_mac;
+	s32 ret = 0;
+	s32 bssidx = 0;
+	s32 pktflag = 0;
+	cfg = g_bcm_cfg;
+
+	if (wl_get_drv_status(cfg, AP_CREATING, net)) {
+		/* Vendor IEs should be set to FW
+		 * after SoftAP interface is brought up
+		 */
+		goto exit;
+	} else if (wl_get_drv_status(cfg, AP_CREATED, net)) {
+		ndev = net;
+		bssidx = 0;
+	} else if (cfg->p2p) {
+		net = ndev_to_wlc_ndev(net, cfg);
+		if (!cfg->p2p->on) {
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr,
+				&cfg->p2p->int_addr);
+			/* In case of p2p_listen command, supplicant send remain_on_channel
+			* without turning on P2P
+			*/
+
+			p2p_on(cfg) = true;
+			ret = wl_cfgp2p_enable_discovery(cfg, net, NULL, 0);
+
+			if (unlikely(ret)) {
+				goto exit;
+			}
+		}
+		if (net  != bcmcfg_to_prmry_ndev(cfg)) {
+			if (wl_get_mode_by_netdev(cfg, net) == WL_MODE_AP) {
+				ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION);
+			}
+		} else {
+				ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		}
+	}
+	if (ndev != NULL) {
+		switch (type) {
+			case WL_BEACON:
+				pktflag = VNDR_IE_BEACON_FLAG;
+				break;
+			case WL_PROBE_RESP:
+				pktflag = VNDR_IE_PRBRSP_FLAG;
+				break;
+			case WL_ASSOC_RESP:
+				pktflag = VNDR_IE_ASSOCRSP_FLAG;
+				break;
+		}
+		if (pktflag)
+			ret = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, pktflag, buf, len);
+	}
+exit:
+	return ret;
+}
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+static s32
+wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Disable mpc, to avoid automatic interface down. */
+	val = 0;
+
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+		sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Set interface up, explicitly. */
+	val = 1;
+
+	ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+	if (ret < 0) {
+		WL_ERR(("set interface up failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Stop all scan explicitly, till auto channel selection complete. */
+	wl_set_drv_status(cfg, SCANNING, ndev);
+	if (cfg->escan_info.ndev == NULL) {
+		ret = BCME_OK;
+		goto done;
+	}
+	ret = wl_notify_escan_complete(cfg, ndev, true, true);
+	if (ret < 0) {
+		WL_ERR(("set scan abort failed, error = %d\n", ret));
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static bool
+wl_cfg80211_valid_chanspec_p2p(chanspec_t chanspec)
+{
+	bool valid = false;
+	char chanbuf[CHANSPEC_STR_LEN];
+
+	/* channel 1 to 14 */
+	if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) {
+		valid = true;
+	}
+	/* channel 36 to 48 */
+	else if ((chanspec >= 0x1b24) && (chanspec <= 0x1b30)) {
+		valid = true;
+	}
+	/* channel 149 to 161 */
+	else if ((chanspec >= 0x1b95) && (chanspec <= 0x1ba1)) {
+		valid = true;
+	}
+	else {
+		valid = false;
+		WL_INFORM(("invalid P2P chanspec, chanspec = %s\n",
+			wf_chspec_ntoa_ex(chanspec, chanbuf)));
+	}
+
+	return valid;
+}
+
+static s32
+wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = NULL;
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	memset(buf, 0, buflen);
+
+	cfg = g_bcm_cfg;
+	list = (wl_uint32_list_t *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+	/* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	u32 channel = 0;
+	s32 ret = BCME_ERROR;
+	s32 i = 0;
+	s32 j = 0;
+	struct bcm_cfg80211 *cfg = NULL;
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	memset(buf, 0, buflen);
+
+	cfg = g_bcm_cfg;
+	list = (wl_uint32_list_t *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+	/* Restrict channels to 5GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Skip DFS and inavlid P2P channel. */
+	for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+		chanspec = (chanspec_t) dtoh32(list->element[i]);
+		channel = CHSPEC_CHANNEL(chanspec);
+
+		ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+		if (ret < 0) {
+			WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
+			goto done;
+		}
+
+		if (CHANNEL_IS_RADAR(channel) ||
+			!(wl_cfg80211_valid_chanspec_p2p(chanspec))) {
+			continue;
+		} else {
+			list->element[j] = list->element[i];
+		}
+
+		j++;
+	}
+
+	list->count = j;
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+	int *channel)
+{
+	s32 ret = BCME_ERROR;
+	int chosen = 0;
+	int retry = 0;
+
+	/* Start auto channel selection scan. */
+	ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true);
+	if (ret < 0) {
+		WL_ERR(("can't start auto channel scan, error = %d\n", ret));
+		*channel = 0;
+		goto done;
+	}
+
+	/* Wait for auto channel selection, worst case possible delay is 5250ms. */
+	retry = CHAN_SEL_RETRY_COUNT;
+
+	while (retry--) {
+		OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
+
+		ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen),
+			false);
+		if ((ret == 0) && (dtoh32(chosen) != 0)) {
+			*channel = (u16)(chosen & 0x00FF);
+			WL_INFORM(("selected channel = %d\n", *channel));
+			break;
+		}
+		WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
+			(CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, auto channel selection timed out\n"));
+		*channel = 0;
+		ret = BCME_ERROR;
+	}
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Clear scan stop driver status. */
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	/* Enable mpc back to 1, irrespective of initial state. */
+	val = 1;
+
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+		sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+s32
+wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
+{
+	int channel = 0;
+	s32 ret = BCME_ERROR;
+	u8 *buf = NULL;
+	char *pos = cmd;
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+
+	memset(cmd, 0, total_len);
+
+	buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+	if (buf == NULL) {
+		WL_ERR(("failed to allocate chanspec buffer\n"));
+		return -ENOMEM;
+	}
+
+	/*
+	 * Always use primary interface, irrespective of interface on which
+	 * command came.
+	 */
+	cfg = g_bcm_cfg;
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/*
+	 * Make sure that FW and driver are in right state to do auto channel
+	 * selection scan.
+	 */
+	ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Best channel selection in 2.4GHz band. */
+	ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+	if (ret < 0) {
+		WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+		&channel);
+	if (ret < 0) {
+		WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	if (CHANNEL_IS_2G(channel)) {
+		channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+	} else {
+		WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
+		channel = 0;
+	}
+
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+	/* Best channel selection in 5GHz band. */
+	ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+	if (ret < 0) {
+		WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+		&channel);
+	if (ret < 0) {
+		WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	if (CHANNEL_IS_5G(channel)) {
+		channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+	} else {
+		WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
+		channel = 0;
+	}
+
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+	/* Set overall best channel same as 5GHz best channel. */
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+done:
+	if (NULL != buf) {
+		kfree(buf);
+	}
+
+	/* Restore FW and driver back to normal state. */
+	ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+	}
+
+	return (pos - cmd);
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+static const struct rfkill_ops wl_rfkill_ops = {
+	.set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	WL_DBG(("Enter \n"));
+	WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+	if (!cfg)
+		return -EINVAL;
+
+	cfg->rf_blocked = blocked;
+
+	return 0;
+}
+
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+{
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg)
+		return -EINVAL;
+	if (setup) {
+		cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+			wl_cfg80211_get_parent_dev(),
+			RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		err = rfkill_register(cfg->rfkill);
+
+		if (err)
+			rfkill_destroy(cfg->rfkill);
+	} else {
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		rfkill_unregister(cfg->rfkill);
+		rfkill_destroy(cfg->rfkill);
+	}
+
+err_out:
+	return err;
+}
+
+#ifdef DEBUGFS_CFG80211
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
+* To see current setting of debug level,
+* cat /sys/kernel/debug/dhd/debug_level
+*/
+static ssize_t
+wl_debuglevel_write(struct file *file, const char __user *userbuf,
+	size_t count, loff_t *ppos)
+{
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+	char *params, *token, *colon;
+	uint i, tokens, log_on = 0;
+	memset(tbuf, 0, sizeof(tbuf));
+	memset(sublog, 0, sizeof(sublog));
+	if (copy_from_user(&tbuf, userbuf, min_t(size_t, sizeof(tbuf), count)))
+		return -EFAULT;
+
+	params = &tbuf[0];
+	colon = strchr(params, '\n');
+	if (colon != NULL)
+		*colon = '\0';
+	while ((token = strsep(&params, " ")) != NULL) {
+		memset(sublog, 0, sizeof(sublog));
+		if (token == NULL || !*token)
+			break;
+		if (*token == '\0')
+			continue;
+		colon = strchr(token, ':');
+		if (colon != NULL) {
+			*colon = ' ';
+		}
+		tokens = sscanf(token, "%s %u", sublog, &log_on);
+		if (colon != NULL)
+			*colon = ':';
+
+		if (tokens == 2) {
+				for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+					if (!strncmp(sublog, sublogname_map[i].sublogname,
+						strlen(sublogname_map[i].sublogname))) {
+						if (log_on)
+							wl_dbg_level |=
+							(sublogname_map[i].log_level);
+						else
+							wl_dbg_level &=
+							~(sublogname_map[i].log_level);
+					}
+				}
+		} else
+			WL_ERR(("%s: can't parse '%s' as a "
+			       "SUBMODULE:LEVEL (%d tokens)\n",
+			       tbuf, token, tokens));
+
+
+	}
+	return count;
+}
+
+static ssize_t
+wl_debuglevel_read(struct file *file, char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	char *param;
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+	uint i;
+	memset(tbuf, 0, sizeof(tbuf));
+	param = &tbuf[0];
+	for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+		param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+			sublogname_map[i].sublogname,
+			(wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+	}
+	*param = '\n';
+	return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+
+}
+static const struct file_operations fops_debuglevel = {
+	.open = NULL,
+	.write = wl_debuglevel_write,
+	.read = wl_debuglevel_read,
+	.owner = THIS_MODULE,
+	.llseek = NULL,
+};
+
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct dentry *_dentry;
+	if (!cfg)
+		return -EINVAL;
+	cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+		if (cfg->debugfs == ERR_PTR(-ENODEV))
+			WL_ERR(("Debugfs is not enabled on this kernel\n"));
+		else
+			WL_ERR(("Can not create debugfs directory\n"));
+		cfg->debugfs = NULL;
+		goto exit;
+
+	}
+	_dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
+		cfg->debugfs, cfg, &fops_debuglevel);
+	if (!_dentry || IS_ERR(_dentry)) {
+		WL_ERR(("failed to create debug_level debug file\n"));
+		wl_free_debugfs(cfg);
+	}
+exit:
+	return err;
+}
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+{
+	if (!cfg)
+		return -EINVAL;
+	if (cfg->debugfs)
+		debugfs_remove_recursive(cfg->debugfs);
+	cfg->debugfs = NULL;
+	return 0;
+}
+#endif /* DEBUGFS_CFG80211 */
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+	return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+	cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+	cfg80211_parent_dev = NULL;
+}
+
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+		0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+}
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (((dev_role == NL80211_IFTYPE_AP) &&
+		!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+		((dev_role == NL80211_IFTYPE_P2P_GO) &&
+		!(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+	{
+		WL_ERR(("device role select failed\n"));
+		return false;
+	}
+	return true;
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+
+	if (!cfg || !cfg->wdev)
+		return -EINVAL;
+
+	if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+		return -1;
+
+	return 0;
+}
+
+void wl_cfg80211_enable_trace(bool set, u32 level)
+{
+	if (set)
+		wl_dbg_level = level & WL_DBG_LEVEL;
+	else
+		wl_dbg_level |= (WL_DBG_LEVEL & level);
+}
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	/* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+	 * is passed with CMD_FRAME. This callback is supposed to cancel
+	 * the OFFCHANNEL Wait. Since we are already taking care of that
+	 *  with the tx_mgmt logic, do nothing here.
+	 */
+
+	return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) {
+			return (bcm_tlv_t *)ie;
+	}
+	return NULL;
+}
+
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len)
+{
+	s32 err = BCME_OK;
+	s32 buf_len;
+	s32 iecount;
+	ie_setbuf_t *ie_setbuf;
+
+	if (ie_id != DOT11_MNG_INTERWORKING_ID)
+		return BCME_UNSUPPORTED;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+	            VNDR_IE_CUSTOM_FLAG))) {
+		WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+	pktflag = htod32(pktflag);
+
+	buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+	ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+	if (!ie_setbuf) {
+		WL_ERR(("Error allocating buffer for IE\n"));
+		return -ENOMEM;
+	}
+
+	if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) {
+		WL_ERR(("Previous IW IE is equals to current IE\n"));
+		err = BCME_OK;
+		goto exit;
+	}
+
+	strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int));
+	memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32));
+
+	/* Now, add the IE to the buffer */
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id;
+
+	/* if already set with previous values, delete it first */
+	if (cfg->iw_ie_len != 0) {
+		WL_DBG(("Different IW_IE was already set. clear first\n"));
+
+		ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0;
+
+		err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+		if (err != BCME_OK)
+			goto exit;
+	}
+
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+	memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
+
+	err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (err == BCME_OK) {
+		memcpy(cfg->iw_ie, data, data_len);
+		cfg->iw_ie_len = data_len;
+		cfg->wl11u = TRUE;
+
+		err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+	}
+
+exit:
+	if (ie_setbuf)
+		kfree(ie_setbuf);
+	return err;
+}
+#endif /* WL11U */
+
+
+
+int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+	unsigned long flags;
+	int clear_flag = 0;
+	int ret = 0;
+
+	WL_TRACE(("Enter\n"));
+
+	cfg = g_bcm_cfg;
+	if (!cfg)
+		return -EINVAL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+	if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) {
+#else
+	if (cfg->scan_request && cfg->scan_request->dev == cfgdev) {
+#endif
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+		clear_flag = 1;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (clear_flag)
+		wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	return ret;
+}
+
+bool wl_cfg80211_is_vsdb_mode(void)
+{
+	return (g_bcm_cfg && g_bcm_cfg->vsdb_mode);
+}
+
+void* wl_cfg80211_get_dhdp()
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(void)
+{
+	return (g_bcm_cfg && g_bcm_cfg->p2p);
+}
+
+static void wl_cfg80211_work_handler(struct work_struct * work)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_info *iter, *next;
+	s32 err = BCME_OK;
+	s32 pm = PM_FAST;
+
+	cfg = container_of(work, struct bcm_cfg80211, pm_enable_work.work);
+	WL_DBG(("Enter \n"));
+	if (cfg->pm_enable_work_on) {
+		cfg->pm_enable_work_on = false;
+		for_each_ndev(cfg, iter, next) {
+			if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+				(wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS))
+				continue;
+			if (iter->ndev) {
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM,
+					&pm, sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+				} else
+					wl_cfg80211_update_power_mode(iter->ndev);
+			}
+		}
+	}
+}
+
+u8
+wl_get_action_category(void *frame, u32 frame_len)
+{
+	u8 category;
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	category = ptr[DOT11_ACTION_CAT_OFF];
+	WL_INFORM(("Action Category: %d\n", category));
+	return category;
+}
+
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
+{
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL || ret_action == NULL)
+		return BCME_ERROR;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return BCME_ERROR;
+	if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+		return BCME_ERROR;
+	*ret_action = ptr[DOT11_ACTION_ACT_OFF];
+	WL_INFORM(("Public Action : %d\n", *ret_action));
+	return BCME_OK;
+}
+
+
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid)
+{
+	s32 err;
+	wl_event_msg_t e;
+
+	bzero(&e, sizeof(e));
+	e.event_type = cpu_to_be32(WLC_E_ROAM);
+	memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+	/* trigger the roam event handler */
+	err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+
+	return err;
+}
+
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+	const struct cfg80211_acl_data *acl)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+
+	/* get the MAC filter mode */
+	if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+		macmode = MACLIST_MODE_ALLOW;
+	} else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+	acl->n_acl_entries) {
+		macmode = MACLIST_MODE_DENY;
+	}
+
+	/* if acl == NULL, macmode is still disabled.. */
+	if (macmode == MACLIST_MODE_DISABLED) {
+		if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+			WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+		return ret;
+	}
+
+	macnum = acl->n_acl_entries;
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		WL_ERR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	for (i = 0; i < macnum; i++) {
+		memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+		WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return ret;
+}
+#endif /* WL_CFG80211_ACL */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 0000000..1d58c82
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,961 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h 472818 2014-04-25 08:07:56Z $
+ */
+
+/**
+ * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface.
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct bcm_cfg80211;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define WL_DBG_NONE	0
+#define WL_DBG_P2P_ACTION (1 << 5)
+#define WL_DBG_TRACE	(1 << 4)
+#define WL_DBG_SCAN 	(1 << 3)
+#define WL_DBG_DBG 	(1 << 2)
+#define WL_DBG_INFO	(1 << 1)
+#define WL_DBG_ERR	(1 << 0)
+
+/* 0 invalidates all debug messages.  default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define CFG80211_ERROR_TEXT		"CFG80211-ERROR) "
+
+#if defined(DHD_DEBUG)
+#define	WL_ERR(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#else /* defined(DHD_DEBUG) */
+#define	WL_ERR(args)									\
+do {										\
+	if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) {				\
+			printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#endif /* defined(DHD_DEBUG) */
+
+#ifdef WL_INFORM
+#undef WL_INFORM
+#endif
+
+#define	WL_INFORM(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_INFO "CFG80211-INFO) %s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+
+
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define	WL_SCAN(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_SCAN) {			\
+		printk(KERN_INFO "CFG80211-SCAN) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define	WL_TRACE(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_TRACE) {			\
+		printk(KERN_INFO "CFG80211-TRACE) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE_HW4
+#undef WL_TRACE_HW4
+#endif
+#define	WL_TRACE_HW4			WL_TRACE
+#if (WL_DBG_LEVEL > 0)
+#define	WL_DBG(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_DBG) {			\
+		printk(KERN_DEBUG "CFG80211-DEBUG) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#else				/* !(WL_DBG_LEVEL > 0) */
+#define	WL_DBG(args)
+#endif				/* (WL_DBG_LEVEL > 0) */
+#define WL_PNO(x)
+#define WL_SD(x)
+
+
+#define WL_SCAN_RETRY_MAX	3
+#define WL_NUM_PMKIDS_MAX	MAXPMKID
+#define WL_SCAN_BUF_MAX 	(1024 * 8)
+#define WL_TLV_INFO_MAX 	1500
+#define WL_SCAN_IE_LEN_MAX      2048
+#define WL_BSS_INFO_MAX		2048
+#define WL_ASSOC_INFO_MAX	512
+#define WL_IOCTL_LEN_MAX	2048
+#define WL_EXTRA_BUF_MAX	2048
+#define WL_SCAN_ERSULTS_LAST 	(WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX		256
+#define WL_FILE_NAME_MAX	256
+#define WL_DWELL_TIME 		200
+#define WL_MED_DWELL_TIME       400
+#define WL_MIN_DWELL_TIME	100
+#define WL_LONG_DWELL_TIME 	1000
+#define IFACE_MAX_CNT 		2
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 		200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 		20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 	320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 	400
+#define WL_AF_TX_MAX_RETRY 	5
+
+#define WL_AF_SEARCH_TIME_MAX           450
+#define WL_AF_TX_EXTRA_TIME_MAX         200
+
+#define WL_SCAN_TIMER_INTERVAL_MS	10000 /* Scan timeout */
+#define WL_CHANNEL_SYNC_RETRY 	5
+#define WL_INVALID 		-1
+
+/* Bring down SCB Timeout to 20secs from 60secs default */
+#ifndef WL_SCB_TIMEOUT
+#define WL_SCB_TIMEOUT 20
+#endif
+
+/* SCAN_SUPPRESS timer values in ms */
+#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */
+#define WL_SCAN_SUPPRESS_RETRY 3000
+
+#define WL_PM_ENABLE_TIMEOUT 10000
+
+
+/* driver status */
+enum wl_status {
+	WL_STATUS_READY = 0,
+	WL_STATUS_SCANNING,
+	WL_STATUS_SCAN_ABORTING,
+	WL_STATUS_CONNECTING,
+	WL_STATUS_CONNECTED,
+	WL_STATUS_DISCONNECTING,
+	WL_STATUS_AP_CREATING,
+	WL_STATUS_AP_CREATED,
+	/* whole sending action frame procedure:
+	 * includes a) 'finding common channel' for public action request frame
+	 * and b) 'sending af via 'actframe' iovar'
+	 */
+	WL_STATUS_SENDING_ACT_FRM,
+	/* find a peer to go to a common channel before sending public action req frame */
+	WL_STATUS_FINDING_COMMON_CHANNEL,
+	/* waiting for next af to sync time of supplicant.
+	 * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN
+	 */
+	WL_STATUS_WAITING_NEXT_ACT_FRM,
+#ifdef WL_CFG80211_SYNC_GON
+	/* go to listen state to wait for next af after SENDING_ACT_FRM */
+	WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN,
+#endif /* WL_CFG80211_SYNC_GON */
+	/* it will be set when upper layer requests listen and succeed in setting listen mode.
+	 * if set, other scan request can abort current listen state
+	 */
+	WL_STATUS_REMAINING_ON_CHANNEL,
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	/* it's fake listen state to keep current scan state.
+	 * it will be set when upper layer requests listen but scan is running. then just run
+	 * a expire timer without actual listen state.
+	 * if set, other scan request does not need to abort scan.
+	 */
+	WL_STATUS_FAKE_REMAINING_ON_CHANNEL
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+};
+
+/* wi-fi mode */
+enum wl_mode {
+	WL_MODE_BSS,
+	WL_MODE_IBSS,
+	WL_MODE_AP
+};
+
+/* driver profile list */
+enum wl_prof_list {
+	WL_PROF_MODE,
+	WL_PROF_SSID,
+	WL_PROF_SEC,
+	WL_PROF_IBSS,
+	WL_PROF_BAND,
+	WL_PROF_CHAN,
+	WL_PROF_BSSID,
+	WL_PROF_ACT,
+	WL_PROF_BEACONINT,
+	WL_PROF_DTIMPERIOD
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+    WL_ESCAN_STATE_IDLE,
+    WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+	WL_FW_LOADING_DONE,
+	WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+	WL_BEACON = 0x1,
+	WL_PROBE_RESP = 0x2,
+	WL_ASSOC_RESP = 0x4
+};
+
+enum wl_handler_del_type {
+	WL_HANDLER_NOTUSE,
+	WL_HANDLER_DEL,
+	WL_HANDLER_MAINTAIN,
+	WL_HANDLER_PEND
+};
+
+/* beacon / probe_response */
+struct beacon_proberesp {
+	__le64 timestamp;
+	__le16 beacon_int;
+	__le16 capab_info;
+	u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+	u32 frag_threshold;
+	u32 rts_threshold;
+	u32 retry_short;
+	u32 retry_long;
+	s32 tx_power;
+	struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+                            const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+	u16 band;
+	u16 channel;
+	s16 rssi;
+	u16 frame_len;
+	u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+	struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+	u16 offset;
+	u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+	struct list_head eq_list;
+	u32 etype;
+	wl_event_msg_t emsg;
+	s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+	u32 wpa_versions;
+	u32 auth_type;
+	u32 cipher_pairwise;
+	u32 cipher_group;
+	u32 wpa_auth;
+	u32 auth_assoc_res_status;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+	u8 beacon_interval;	/* in millisecond */
+	u8 atim;		/* in millisecond */
+	s8 join_only;
+	u8 band;
+	u8 channel;
+};
+
+/* cfg driver profile */
+struct wl_profile {
+	u32 mode;
+	s32 band;
+	u32 channel;
+	struct wlc_ssid ssid;
+	struct wl_security sec;
+	struct wl_ibss ibss;
+	u8 bssid[ETHER_ADDR_LEN];
+	u16 beacon_interval;
+	u8 dtim_period;
+	bool active;
+};
+
+struct net_info {
+	struct net_device *ndev;
+	struct wireless_dev *wdev;
+	struct wl_profile profile;
+	s32 mode;
+	s32 roam_off;
+	unsigned long sme_state;
+	bool pm_restore;
+	bool pm_block;
+	s32 pm;
+	struct list_head list; /* list of all net_info structure */
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+	u8 req_ie[MAX_REQ_LINE];
+	s32 req_ie_len;
+	u8 resp_ie[MAX_REQ_LINE];
+	s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+	const struct firmware *fw_entry;
+	unsigned long status;
+	u32 ptr;
+	s8 fw_name[WL_FILE_NAME_MAX];
+	s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+	u32 req_len;
+	u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+	u32 escan_state;
+#if defined(STATIC_WL_PRIV_STRUCT)
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	u8 *escan_buf;
+#else
+	u8 escan_buf[ESCAN_BUF_SIZE];
+#endif /* STATIC_WL_PRIV_STRUCT */
+	struct wiphy *wiphy;
+	struct net_device *ndev;
+};
+
+struct ap_info {
+/* Structure to hold WPS, WPA IEs for a AP */
+	u8   probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8   beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u8   assoc_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 probe_res_ie_len;
+	u32 beacon_ie_len;
+	u32 assoc_res_ie_len;
+	u8 *wpa_ie;
+	u8 *rsn_ie;
+	u8 *wps_ie;
+	bool security_mode;
+};
+
+struct sta_info {
+	/* Structure to hold WPS IE for a STA */
+	u8  probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  assoc_req_ie[VNDR_IES_BUF_LEN];
+	u32 probe_req_ie_len;
+	u32 assoc_req_ie_len;
+};
+
+struct afx_hdl {
+	wl_af_params_t *pending_tx_act_frm;
+	struct ether_addr	tx_dst_addr;
+	struct net_device *dev;
+	struct work_struct work;
+	u32 bssidx;
+	u32 retry;
+	s32 peer_chan;
+	s32 peer_listen_chan; /* search channel: configured by upper layer */
+	s32 my_listen_chan;	/* listen chanel: extract it from prb req or gon req */
+	bool is_listen;
+	bool ack_recv;
+	bool is_active;
+};
+
+struct parsed_ies {
+	wpa_ie_fixed_t *wps_ie;
+	u32 wps_ie_len;
+	wpa_ie_fixed_t *wpa_ie;
+	u32 wpa_ie_len;
+	bcm_tlv_t *wpa2_ie;
+	u32 wpa2_ie_len;
+};
+
+
+#ifdef WL11U
+/* Max length of Interworking element */
+#define IW_IES_MAX_BUF_LEN 		9
+#endif
+#define MAX_EVENT_BUF_NUM 16
+typedef struct wl_eventmsg_buf {
+    u16 num;
+    struct {
+		u16 type;
+		bool set;
+	} event [MAX_EVENT_BUF_NUM];
+} wl_eventmsg_buf_t;
+
+typedef struct wl_if_event_info {
+	bool valid;
+	int ifidx;
+	int bssidx;
+	uint8 mac[ETHER_ADDR_LEN];
+	char name[IFNAMSIZ+1];
+} wl_if_event_info;
+
+/* private data of cfg80211 interface */
+struct bcm_cfg80211 {
+	struct wireless_dev *wdev;	/* representing cfg cfg80211 device */
+
+	struct wireless_dev *p2p_wdev;	/* representing cfg cfg80211 device for P2P */
+	struct net_device *p2p_net;    /* reference to p2p0 interface */
+
+	struct wl_conf *conf;
+	struct cfg80211_scan_request *scan_request;	/* scan request object */
+	EVENT_HANDLER evt_handler[WLC_E_LAST];
+	struct list_head eq_list;	/* used for event queue */
+	struct list_head net_list;     /* used for struct net_info */
+	spinlock_t eq_lock;	/* for event queue synchronization */
+	spinlock_t cfgdrv_lock;	/* to protect scan status (and others if needed) */
+	struct completion act_frm_scan;
+	struct completion iface_disable;
+	struct completion wait_next_af;
+	struct mutex usr_sync;	/* maily for up/down synchronization */
+	struct wl_scan_results *bss_list;
+	struct wl_scan_results *scan_results;
+
+	/* scan request object for internal purpose */
+	struct wl_scan_req *scan_req_int;
+	/* information element object for internal purpose */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_ie *ie;
+#else
+	struct wl_ie ie;
+#endif
+
+	/* association information container */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_connect_info *conn_info;
+#else
+	struct wl_connect_info conn_info;
+#endif
+#ifdef DEBUGFS_CFG80211
+	struct dentry		*debugfs;
+#endif /* DEBUGFS_CFG80211 */
+	struct wl_pmk_list *pmk_list;	/* wpa2 pmk list */
+	tsk_ctl_t event_tsk;  		/* task of main event handler thread */
+	void *pub;
+	u32 iface_cnt;
+	u32 channel;		/* current channel */
+	u32 af_sent_channel;	/* channel action frame is sent */
+	/* next af subtype to cancel the remained dwell time in rx process */
+	u8 next_af_subtype;
+#ifdef WL_CFG80211_SYNC_GON
+	ulong af_tx_sent_jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+	struct escan_info escan_info;   /* escan information */
+	bool active_scan;	/* current scan mode */
+	bool ibss_starter;	/* indicates this sta is ibss starter */
+	bool link_up;		/* link/connection up flag */
+
+	/* indicate whether chip to support power save mode */
+	bool pwr_save;
+	bool roam_on;		/* on/off switch for self-roaming */
+	bool scan_tried;	/* indicates if first scan attempted */
+#if defined(BCMSDIO) || defined(BCMPCIE)
+	bool wlfc_on;
+#endif
+	bool vsdb_mode;
+	bool roamoff_on_concurrent;
+	u8 *ioctl_buf;		/* ioctl buffer */
+	struct mutex ioctl_buf_sync;
+	u8 *escan_ioctl_buf;
+	u8 *extra_buf;	/* maily to grab assoc information */
+	struct dentry *debugfsdir;
+	struct rfkill *rfkill;
+	bool rf_blocked;
+	struct ieee80211_channel remain_on_chan;
+	enum nl80211_channel_type remain_on_chan_type;
+	u64 send_action_id;
+	u64 last_roc_id;
+	wait_queue_head_t netif_change_event;
+	wait_queue_head_t event_sync_wq;
+	wl_if_event_info if_event_info;
+	struct completion send_af_done;
+	struct afx_hdl *afx_hdl;
+	struct ap_info *ap_info;
+	struct sta_info *sta_info;
+	struct p2p_info *p2p;
+	bool p2p_supported;
+	void *btcoex_info;
+	struct timer_list scan_timeout;   /* Timer for catch scan event timeout */
+	s32(*state_notifier) (struct bcm_cfg80211 *cfg,
+		struct net_info *_net_info, enum wl_status state, bool set);
+	unsigned long interrested_state;
+	wlc_ssid_t hostapd_ssid;
+#ifdef WL11U
+	bool wl11u;
+	u8 iw_ie[IW_IES_MAX_BUF_LEN];
+	u32 iw_ie_len;
+#endif /* WL11U */
+	bool sched_scan_running;	/* scheduled scan req status */
+#ifdef WL_SCHED_SCAN
+	struct cfg80211_sched_scan_request *sched_scan_req;	/* scheduled scan req */
+#endif /* WL_SCHED_SCAN */
+	bool scan_suppressed;
+	struct timer_list scan_supp_timer;
+	struct work_struct wlan_work;
+	struct mutex event_sync;	/* maily for up/down synchronization */
+	bool disable_roam_event;
+	bool pm_enable_work_on;
+	struct delayed_work pm_enable_work;
+	vndr_ie_setbuf_t *ibss_vsie;	/* keep the VSIE for IBSS */
+	int ibss_vsie_len;
+	struct ether_addr ibss_if_addr;
+	bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */
+	bcm_struct_cfgdev *bss_cfgdev;  /* For DUAL STA/STA+AP */
+	s32 cfgdev_bssidx;
+	bool bss_pending_op;		/* indicate where there is a pending IF operation */
+	bool roam_offload;
+};
+
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+	return bss = bss ?
+		(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32
+wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev * wdev, s32 mode, bool pm_block)
+{
+	struct net_info *_net_info;
+	s32 err = 0;
+	if (cfg->iface_cnt == IFACE_MAX_CNT)
+		return -ENOMEM;
+	_net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
+	if (!_net_info)
+		err = -ENOMEM;
+	else {
+		_net_info->mode = mode;
+		_net_info->ndev = ndev;
+		_net_info->wdev = wdev;
+		_net_info->pm_restore = 0;
+		_net_info->pm = 0;
+		_net_info->pm_block = pm_block;
+		_net_info->roam_off = WL_INVALID;
+		cfg->iface_cnt++;
+		list_add(&_net_info->list, &cfg->net_list);
+	}
+	return err;
+}
+static inline void
+wl_dealloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			list_del(&_net_info->list);
+			cfg->iface_cnt--;
+			kfree(_net_info);
+		}
+	}
+
+}
+static inline void
+wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		list_del(&_net_info->list);
+			if (_net_info->wdev)
+				kfree(_net_info->wdev);
+			kfree(_net_info);
+	}
+	cfg->iface_cnt = 0;
+}
+static inline u32
+wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
+
+{
+	struct net_info *_net_info, *next;
+	u32 cnt = 0;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(status, &_net_info->sme_state))
+			cnt++;
+	}
+	return cnt;
+}
+static inline void
+wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
+{
+	struct net_info *_net_info, *next;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		switch (op) {
+			case 1:
+				return; /* set all status is not allowed */
+			case 2:
+				clear_bit(status, &_net_info->sme_state);
+				if (cfg->state_notifier &&
+					test_bit(status, &(cfg->interrested_state)))
+					cfg->state_notifier(cfg, _net_info, status, false);
+				break;
+			case 4:
+				return; /* change all status is not allowed */
+			default:
+				return; /* unknown operation */
+		}
+	}
+}
+static inline void
+wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev, u32 op)
+{
+
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			switch (op) {
+				case 1:
+					set_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, true);
+					break;
+				case 2:
+					clear_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, false);
+					break;
+				case 4:
+					change_bit(status, &_net_info->sme_state);
+					break;
+			}
+		}
+
+	}
+
+}
+
+static inline u32
+wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return test_bit(status, &_net_info->sme_state);
+	}
+	return 0;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return _net_info->mode;
+	}
+	return -1;
+}
+
+
+static inline void
+wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 mode)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					_net_info->mode = mode;
+	}
+}
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return &_net_info->profile;
+	}
+	return NULL;
+}
+static inline struct net_info *
+wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return _net_info;
+	}
+	return NULL;
+}
+#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
+#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
+#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
+#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
+#define wdev_to_ndev(wdev) (wdev->netdev)
+
+#if defined(WL_ENABLE_P2P_IF)
+#define ndev_to_wlc_ndev(ndev, cfg)	((ndev == cfg->p2p_net) ? \
+	bcmcfg_to_prmry_ndev(cfg) : ndev)
+#else
+#define ndev_to_wlc_ndev(ndev, cfg)	(ndev)
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define wdev_to_wlc_ndev(wdev, cfg)	\
+	((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) ? \
+	bcmcfg_to_prmry_ndev(cfg) : wdev_to_ndev(wdev))
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	wdev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
+#elif defined(WL_ENABLE_P2P_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	ndev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg)
+#else
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	(cfgdev)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define ndev_to_cfgdev(ndev)	ndev_to_wdev(ndev)
+#define cfgdev_to_ndev(cfgdev)	(cfgdev->netdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
+#else
+#define ndev_to_cfgdev(ndev)	(ndev)
+#define cfgdev_to_ndev(cfgdev)	(cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false)
+#elif defined(WL_ENABLE_P2P_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->dev == cfg->p2p_net)) ? true : false)
+#else
+#define scan_req_match(cfg)	(((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \
+	true : false)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#define wl_to_sr(w) (w->scan_req_int)
+#if defined(STATIC_WL_PRIV_STRUCT)
+#define wl_to_ie(w) (w->ie)
+#define wl_to_conn(w) (w->conn_info)
+#else
+#define wl_to_ie(w) (&w->ie)
+#define wl_to_conn(w) (&w->conn_info)
+#endif
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(cfg, stat) \
+	(wl_get_status_all(cfg, WL_STATUS_ ## stat))
+#define wl_get_drv_status(cfg, stat, ndev)  \
+	(wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2))
+#define wl_clr_drv_status_all(cfg, stat)  \
+	(wl_set_status_all(cfg, WL_STATUS_ ## stat, 2))
+#define wl_chg_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i)	\
+	for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(cfg, iter, next) \
+	list_for_each_entry_safe(iter, next, &cfg->net_list, list)
+
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+	((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
+	 (!_sme->crypto.n_ciphers_pairwise) && \
+	 (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void *para);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+            void *data);
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+
+extern s32 wl_cfg80211_up(void *para);
+extern s32 wl_cfg80211_down(void *para);
+extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx);
+extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev);
+extern bool wl_cfg80211_is_vsdb_mode(void);
+extern void* wl_cfg80211_get_dhdp(void);
+extern bool wl_cfg80211_is_p2p_active(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+
+/* btcoex functions */
+void* wl_cfg80211_btcoex_init(struct net_device *ndev);
+void wl_cfg80211_btcoex_deinit(void);
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CHANSPEC_BUF_SIZE	1024
+#define CHAN_SEL_IOCTL_DELAY	300
+#define CHAN_SEL_RETRY_COUNT	15
+#define CHANNEL_IS_RADAR(channel)	(((channel & WL_CHAN_RADAR) || \
+	(channel & WL_CHAN_PASSIVE)) ? true : false)
+#define CHANNEL_IS_2G(channel)	(((channel >= 1) && (channel <= 14)) ? \
+	true : false)
+#define CHANNEL_IS_5G(channel)	(((channel >= 36) && (channel <= 165)) ? \
+	true : false)
+extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
+	int total_len);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(bool set, u32 level);
+extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm);
+extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
+extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
+extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
+extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+#define SCAN_BUF_CNT	2
+#define SCAN_BUF_NEXT	1
+#define WL_SCANTYPE_LEGACY	0x1
+#define WL_SCANTYPE_P2P		0x2
+#define wl_escan_set_sync_id(a, b) ((a) = htod16(0x1234))
+#define wl_escan_set_type(a, b)
+#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
+#define wl_escan_check_sync_id(a, b, c) 0
+#define wl_escan_print_sync_id(a, b, c)
+#define wl_escan_increment_sync_id(a, b)
+#define wl_escan_init_sync_id(a)
+extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len);
+extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
+
+/* Action frame specific functions */
+extern u8 wl_get_action_category(void *frame, u32 frame_len);
+extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_SUPPORT_ACS
+#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */
+#define IOCTL_RETRY_COUNT 5
+#define CHAN_NOISE_DUMMY -80
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+#endif /* WL_SUPPORT_ACS */
+
+extern int wl_cfg80211_get_ioctl_version(void);
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable);
+#endif				/* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
new file mode 100644
index 0000000..0220da1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c
@@ -0,0 +1,549 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg_btcoex.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+struct btcoex_info {
+	struct timer_list timer;
+	u32 timer_ms;
+	u32 timer_on;
+	u32 ts_dhcp_start;	/* ms ts ecord time stats */
+	u32 ts_dhcp_ok;		/* ms ts ecord time stats */
+	bool dhcp_done;	/* flag, indicates that host done with
+					 * dhcp before t1/t2 expiration
+					 */
+	s32 bt_state;
+	struct work_struct work;
+	struct net_device *dev;
+};
+
+static struct btcoex_info *btcoex_info_loc = NULL;
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME	2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+	BT_DHCP_IDLE,
+	BT_DHCP_START,
+	BT_DHCP_OPPR_WIN,
+	BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+	uint reg, int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+		(char *)(&var), sizeof(var.buf));
+	error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+	*retval = dtoh32(var.val);
+	return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	char ioctlbuf_local[1024];
+#else
+	static char ioctlbuf_local[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+	return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+	char reg_addr[8];
+
+	memset(reg_addr, 0, sizeof(reg_addr));
+	memcpy((char *)&reg_addr[0], (char *)addr, 4);
+	memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+	return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+	int ioc_res = 0;
+	bool res = FALSE;
+	int sco_id_cnt = 0;
+	int param27;
+	int i;
+
+	for (i = 0; i < 12; i++) {
+
+		ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+		WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
+
+		if (ioc_res < 0) {
+			WL_ERR(("ioc read btc params error\n"));
+			break;
+		}
+
+		if ((param27 & 0x6) == 2) { /* count both sco & esco  */
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			WL_TRACE(("sco/esco detected, pkt id_cnt:%d  samples:%d\n",
+				sco_id_cnt, i));
+			res = TRUE;
+			break;
+		}
+
+		OSL_SLEEP(5);
+	}
+
+	return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+	static bool saved_status = FALSE;
+
+	char buf_reg50va_dhcp_on[8] =
+		{ 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+	char buf_reg51va_dhcp_on[8] =
+		{ 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg64va_dhcp_on[8] =
+		{ 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg65va_dhcp_on[8] =
+		{ 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg71va_dhcp_on[8] =
+		{ 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	uint32 regaddr;
+	static uint32 saved_reg50;
+	static uint32 saved_reg51;
+	static uint32 saved_reg64;
+	static uint32 saved_reg65;
+	static uint32 saved_reg71;
+
+	if (trump_sco) {
+		/* this should reduce eSCO agressive retransmit
+		 * w/o breaking it
+		 */
+
+		/* 1st save current */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+		if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+			saved_status = TRUE;
+			WL_TRACE(("saved bt_params[50,51,64,65,71]:"
+				  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				  saved_reg50, saved_reg51,
+				  saved_reg64, saved_reg65, saved_reg71));
+		} else {
+			WL_ERR((":%s: save btc_params failed\n",
+				__FUNCTION__));
+			saved_status = FALSE;
+			return -1;
+		}
+
+		WL_TRACE(("override with [50,51,64,65,71]:"
+			  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			  *(u32 *)(buf_reg50va_dhcp_on+4),
+			  *(u32 *)(buf_reg51va_dhcp_on+4),
+			  *(u32 *)(buf_reg64va_dhcp_on+4),
+			  *(u32 *)(buf_reg65va_dhcp_on+4),
+			  *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg50va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg51va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg64va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg65va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg71va_dhcp_on[0], 8);
+
+		saved_status = TRUE;
+	} else if (saved_status) {
+		/* restore previously saved bt params */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+
+		regaddr = 50;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg50);
+		regaddr = 51;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg51);
+		regaddr = 64;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg64);
+		regaddr = 65;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg65);
+		regaddr = 71;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg71);
+
+		WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+			"0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			saved_reg50, saved_reg51, saved_reg64,
+			saved_reg65, saved_reg71));
+
+		saved_status = FALSE;
+	} else {
+		WL_ERR((":%s att to restore not saved BTCOEX params\n",
+			__FUNCTION__));
+		return -1;
+	}
+	return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+	char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+	char buf_flag7_default[8]   = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+	/* set = 1, save & turn on  0 - off & restore prev settings */
+	set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+	WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+	if (set == TRUE)
+		/* Forcing bt_flag7  */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_dhcp_on[0],
+			sizeof(buf_flag7_dhcp_on));
+	else
+		/* Restoring default bt flag7 */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_default[0],
+			sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+	struct btcoex_info *bt_local = (struct btcoex_info *)data;
+	WL_TRACE(("Enter\n"));
+	bt_local->timer_on = 0;
+	schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+	struct btcoex_info *btcx_inf;
+
+	btcx_inf = container_of(work, struct btcoex_info, work);
+
+	if (btcx_inf->timer_on) {
+		btcx_inf->timer_on = 0;
+		del_timer_sync(&btcx_inf->timer);
+	}
+
+	switch (btcx_inf->bt_state) {
+		case BT_DHCP_START:
+			/* DHCP started
+			 * provide OPPORTUNITY window to get DHCP address
+			 */
+			WL_TRACE(("bt_dhcp stm: started \n"));
+
+			btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_OPPR_WIN:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T1 expiration\n"));
+				goto btc_coex_idle;
+			}
+
+			/* DHCP is not over yet, start lowering BT priority
+			 * enforce btc_params + flags if necessary
+			 */
+			WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+			btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_FLAG_FORCE_TIMEOUT:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T2 expiration\n"));
+			} else {
+				/* Noo dhcp during T1+T2, restore BT priority */
+				WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
+					BT_DHCP_FLAG_FORCE_TIME));
+			}
+
+			/* Restoring default bt priority */
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+
+		default:
+			WL_ERR(("error g_status=%d !!!\n",	btcx_inf->bt_state));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+	}
+
+	net_os_wake_unlock(btcx_inf->dev);
+}
+
+void* wl_cfg80211_btcoex_init(struct net_device *ndev)
+{
+	struct btcoex_info *btco_inf = NULL;
+
+	btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+	if (!btco_inf)
+		return NULL;
+
+	btco_inf->bt_state = BT_DHCP_IDLE;
+	btco_inf->ts_dhcp_start = 0;
+	btco_inf->ts_dhcp_ok = 0;
+	/* Set up timer for BT  */
+	btco_inf->timer_ms = 10;
+	init_timer(&btco_inf->timer);
+	btco_inf->timer.data = (ulong)btco_inf;
+	btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+	btco_inf->dev = ndev;
+
+	INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+	btcoex_info_loc = btco_inf;
+	return btco_inf;
+}
+
+void wl_cfg80211_btcoex_deinit()
+{
+	if (!btcoex_info_loc)
+		return;
+
+	if (btcoex_info_loc->timer_on) {
+		btcoex_info_loc->timer_on = 0;
+		del_timer_sync(&btcoex_info_loc->timer);
+	}
+
+	cancel_work_sync(&btcoex_info_loc->work);
+
+	kfree(btcoex_info_loc);
+}
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command)
+{
+
+	struct btcoex_info *btco_inf = btcoex_info_loc;
+	char powermode_val = 0;
+	char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+	char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+	char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg66;
+	static uint32 saved_reg41;
+	static uint32 saved_reg68;
+	static bool saved_status = FALSE;
+
+	char buf_flag7_default[8] =   { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+	/* Figure out powermode 1 or o command */
+	strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+		WL_TRACE_HW4(("DHCP session starts\n"));
+
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 1;
+
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
+			dhd_enable_packet_filter(0, dhd);
+		}
+#endif
+
+		/* Retrieve and saved orig regs value */
+		if ((saved_status == FALSE) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 66,  &saved_reg66)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 41,  &saved_reg41)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 68,  &saved_reg68)))   {
+				saved_status = TRUE;
+				WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+					saved_reg66, saved_reg41, saved_reg68));
+
+				/* Disable PM mode during dhpc session */
+
+				/* Disable PM mode during dhpc session */
+				/* Start  BT timer only for SCO connection */
+				if (btcoex_is_sco_active(dev)) {
+					/* btc_params 66 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg66va_dhcp_on[0],
+						sizeof(buf_reg66va_dhcp_on));
+					/* btc_params 41 0x33 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg41va_dhcp_on[0],
+						sizeof(buf_reg41va_dhcp_on));
+					/* btc_params 68 0x190 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg68va_dhcp_on[0],
+						sizeof(buf_reg68va_dhcp_on));
+					saved_status = TRUE;
+
+					btco_inf->bt_state = BT_DHCP_START;
+					btco_inf->timer_on = 1;
+					mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+					WL_TRACE(("enable BT DHCP Timer\n"));
+				}
+		}
+		else if (saved_status == TRUE) {
+			WL_ERR(("was called w/o DHCP OFF. Continue\n"));
+		}
+	}
+	else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 0;
+		WL_TRACE_HW4(("DHCP is complete \n"));
+
+		/* Enable packet filtering */
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
+			dhd_enable_packet_filter(1, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+
+		/* Restoring PM mode */
+
+		/* Stop any bt timer because DHCP session is done */
+		WL_TRACE(("disable BT DHCP Timer\n"));
+		if (btco_inf->timer_on) {
+			btco_inf->timer_on = 0;
+			del_timer_sync(&btco_inf->timer);
+
+			if (btco_inf->bt_state != BT_DHCP_IDLE) {
+			/* need to restore original btc flags & extra btc params */
+				WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
+				/* wake up btcoex thread to restore btlags+params  */
+				schedule_work(&btco_inf->work);
+			}
+		}
+
+		/* Restoring btc_flag paramter anyway */
+		if (saved_status == TRUE)
+			dev_wlc_bufvar_set(dev, "btc_flags",
+				(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+		/* Restore original values */
+		if (saved_status == TRUE) {
+			regaddr = 66;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg66);
+			regaddr = 41;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg41);
+			regaddr = 68;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg68);
+
+			WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+				saved_reg66, saved_reg41, saved_reg68));
+		}
+		saved_status = FALSE;
+
+	}
+	else {
+		WL_ERR(("Unkwown yet power setting, ignored\n"));
+	}
+
+	snprintf(command, 3, "OK");
+
+	return (strlen("OK"));
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 0000000..70904f1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,2658 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.c 472818 2014-04-25 08:07:56Z $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <net/rtnetlink.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+#include <wl_android.h>
+
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+static s8 g_mgmt_ie_buf[2048];
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
+static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev *wdev, bool notify);
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+	.ndo_open       = wl_cfgp2p_if_open,
+	.ndo_stop       = wl_cfgp2p_if_stop,
+	.ndo_do_ioctl   = wl_cfgp2p_do_ioctl,
+	.ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_ENABLE_P2P_IF */
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+
+	if (frame == NULL)
+		return false;
+	pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+		return false;
+
+	if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+		pact_frm->action == P2P_PUB_AF_ACTION &&
+		pact_frm->oui_type == P2P_VER &&
+		memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_action_frame_t *act_frm;
+
+	if (frame == NULL)
+		return false;
+	act_frm = (wifi_p2p_action_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+		return false;
+
+	if (act_frm->category == P2P_AF_CATEGORY &&
+		act_frm->type  == P2P_VER &&
+		memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+#define GAS_RESP_LEN		2
+#define DOUBLE_TLV_BODY_OFF	4
+#define GAS_RESP_OFFSET		4
+#define GAS_CRESP_OFFSET	5
+
+bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len)
+{
+	bcm_tlv_t *ie = (bcm_tlv_t *)data;
+	u8 *frame = NULL;
+	u16 id, flen;
+
+	/* Skipped first ANQP Element, if frame has anqp elemnt */
+	ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
+
+	if (ie == NULL)
+		return false;
+
+	frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
+	id = ((u16) (((frame)[1] << 8) | (frame)[0]));
+	flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
+
+	/* If the contents match the OUI and the type */
+	if (flen >= WFA_OUI_LEN + 1 &&
+		id ==  P2PSD_GAS_NQP_INFOID &&
+		!bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) &&
+		subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+	if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+		return false;
+	if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+		return false;
+
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return true;
+	else
+		return false;
+}
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+	wifi_p2p_action_frame_t *act_frm;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+	if (!frame || frame_len <= 2)
+		return;
+
+	if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+		pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+		switch (pact_frm->subtype) {
+			case P2P_PAF_GON_REQ:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_RSP:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_CONF:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_REQ:
+				CFGP2P_ACTION(("%s P2P Invitation Request  Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_RSP:
+				CFGP2P_ACTION(("%s P2P Invitation Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Public Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+
+		}
+
+	} else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+		act_frm = (wifi_p2p_action_frame_t *)frame;
+		switch (act_frm->subtype) {
+			case P2P_AF_NOTICE_OF_ABSENCE:
+				CFGP2P_ACTION(("%s P2P Notice of Absence Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_REQ:
+				CFGP2P_ACTION(("%s P2P Presence Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_RSP:
+				CFGP2P_ACTION(("%s P2P Presence Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_GO_DISC_REQ:
+				CFGP2P_ACTION(("%s P2P Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+		}
+
+	} else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+		sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+		switch (sd_act_frm->action) {
+			case P2PSD_ACTION_ID_GAS_IREQ:
+				CFGP2P_ACTION(("%s P2P GAS Initial Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_IRESP:
+				CFGP2P_ACTION(("%s P2P GAS Initial Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CREQ:
+				CFGP2P_ACTION(("%s P2P GAS Comback Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CRESP:
+				CFGP2P_ACTION(("%s P2P GAS Comback Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P GAS Frame,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+		}
+
+
+	}
+}
+
+/*
+ *  Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
+{
+	if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+		CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+		return -ENOMEM;
+	}
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = 0;
+	return BCME_OK;
+
+}
+/*
+ *  Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	CFGP2P_DBG(("In\n"));
+	if (cfg->p2p) {
+		kfree(cfg->p2p);
+		cfg->p2p = NULL;
+	}
+	cfg->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+	s32 ret = BCME_OK;
+	s32 val = 0;
+	/* Do we have to check whether APSTA is enabled or not ? */
+	ret = wldev_iovar_getint(ndev, "apsta", &val);
+	if (ret < 0) {
+		CFGP2P_ERR(("get apsta error %d\n", ret));
+		return ret;
+	}
+	if (val == 0) {
+		val = 1;
+		ret = wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
+			return ret;
+		}
+		wldev_iovar_setint(ndev, "apsta", val);
+		ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_UP error %d\n", ret));
+			return ret;
+		}
+	}
+
+	/* In case of COB type, firmware has default mac address
+	 * After Initializing firmware, we have to set current mac address to
+	 * firmware for P2P device address
+	 */
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+		sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+	if (ret && ret != BCME_UNSUPPORTED) {
+		CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+	}
+	return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to create
+ * @if_type  : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec   : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	u32 scb_timeout = WL_SCB_TIMEOUT;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_DBG(("---cfg p2p_ifadd "MACDBG" %s %u\n",
+		MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+	        (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+	err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (unlikely(err < 0))
+		printk("'cfg p2p_ifadd' error %d\n", err);
+	else if (if_type == WL_P2P_IF_GO) {
+		err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
+		if (unlikely(err < 0))
+			printk("'cfg scb_timeout' error %d\n", err);
+	}
+	return err;
+}
+
+/* Disable a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to disable
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+		netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printk("'cfg p2p_ifdis' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to delete
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n",
+	    netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printk("'cfg p2p_ifdel' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac      : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	u32 scb_timeout = WL_SCB_TIMEOUT;
+
+	struct net_device *netdev =  wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u"
+		" chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+		(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
+		ifreq.chspec));
+
+	err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (unlikely(err < 0)) {
+		printk("'cfg p2p_ifupd' error %d\n", err);
+	} else if (if_type == WL_P2P_IF_GO) {
+		err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
+		if (unlikely(err < 0))
+			printk("'cfg scb_timeout' error %d\n", err);
+	}
+	return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the created BSS
+ * @index    : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index)
+{
+	s32 ret;
+	u8 getbuf[64];
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
+
+	ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+		sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+	if (ret == 0) {
+		memcpy(index, getbuf, sizeof(s32));
+		CFGP2P_INFO(("---cfg p2p_if   ==> %d\n", *index));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+	}
+
+	return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode      : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel   : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx    : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+	wl_p2p_disc_st_t discovery_mode;
+	s32 ret;
+	struct net_device *dev;
+	CFGP2P_DBG(("enter\n"));
+
+	if (unlikely(bssidx == WL_INVALID)) {
+		CFGP2P_ERR((" %d index out of range\n", bssidx));
+		return -1;
+	}
+
+	dev = wl_cfgp2p_find_ndev(cfg, bssidx);
+	if (unlikely(dev == NULL)) {
+		CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+		return BCME_NOTFOUND;
+	}
+
+	/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+	discovery_mode.state = mode;
+	discovery_mode.chspec = wl_ch_host_to_driver(channel);
+	discovery_mode.dwell = listen_ms;
+	ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+		sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync);
+
+	return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index)
+{
+	s32 ret;
+	struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+
+	ret = wldev_iovar_getint(dev, "p2p_dev", index);
+	CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+	if (unlikely(ret <  0)) {
+	    CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+		return ret;
+	}
+	return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
+{
+
+	s32 index = 0;
+	s32 ret = BCME_OK;
+
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
+		CFGP2P_ERR(("do nothing, already initialized\n"));
+		return ret;
+	}
+
+	ret = wl_cfgp2p_set_discovery(cfg, 1);
+	if (ret < 0) {
+		CFGP2P_ERR(("set discover error\n"));
+		return ret;
+	}
+	/* Enable P2P Discovery in the WL Driver */
+	ret = wl_cfgp2p_get_disc_idx(cfg, &index);
+
+	if (ret < 0) {
+		return ret;
+	}
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
+	    wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = index;
+
+	/* Set the initial discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret != 0)) {
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+		wl_cfgp2p_set_discovery(cfg, 0);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+		return 0;
+	}
+	return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @cfg        : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR(("do nothing, not initialized\n"));
+		return -1;
+	}
+	/* Set the discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+	ret = wl_cfgp2p_set_discovery(cfg, 0);
+
+	/* Clear our saved WPS and P2P IEs for the discovery BSS.  The driver
+	 * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+	 * BSS.
+	 */
+
+	/* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+	 * have no discovery BSS.
+	 */
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+	return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @cfg	: wl_private data
+ * @ie  : probe request ie (WPS IE + P2P IE)
+ * @ie_len   : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	const u8 *ie, u32 ie_len)
+{
+	s32 ret = BCME_OK;
+	s32 bssidx;
+
+	if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+		CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+		goto set_ie;
+	}
+
+	wl_set_p2p_status(cfg, DISCOVERY_ON);
+
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wl_cfgp2p_init_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" init discovery error %d\n", ret));
+		goto exit;
+	}
+	/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+	 * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+	 * Some peer devices may not initiate WPS with us if this bit is not set.
+	 */
+	ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE),
+			"wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" wsec error %d\n", ret));
+	}
+set_ie:
+	if (ie_len) {
+		if (bcmcfg_to_prmry_ndev(cfg) == dev) {
+			bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		} else if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+
+		ret = wl_cfgp2p_set_management_ie(cfg, dev,
+			bssidx,
+			VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @cfg       : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" enter\n"));
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR((" do nothing, not initialized\n"));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret < 0)) {
+
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+	}
+	/* Do a scan abort to stop the driver's scan engine in case it is still
+	 * waiting out an action frame tx dwell time.
+	 */
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+	ret = wl_cfgp2p_deinit_discovery(cfg);
+
+exit:
+	return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
+	u32 num_chans, u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose)
+{
+	s32 ret = BCME_OK;
+	s32 memsize;
+	s32 eparams_size;
+	u32 i;
+	s8 *memblk;
+	wl_p2p_scan_t *p2p_params;
+	wl_escan_params_t *eparams;
+	wlc_ssid_t ssid;
+	/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+
+	struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	/* Allocate scan params which need space for 3 channels and 0 ssids */
+	eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+	    OFFSETOF(wl_escan_params_t, params)) +
+		num_chans * sizeof(eparams->params.channel_list[0]);
+
+	memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+	memblk = scanparambuf;
+	if (memsize > sizeof(scanparambuf)) {
+		CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n",
+		    memsize, sizeof(scanparambuf)));
+		return -1;
+	}
+	memset(memblk, 0, memsize);
+	memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+	if (search_state == WL_P2P_DISC_ST_SEARCH) {
+		/*
+		 * If we in SEARCH STATE, we don't need to set SSID explictly
+		 * because dongle use P2P WILDCARD internally by default
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+		/* use null ssid */
+		ssid.SSID_len = 0;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+	} else if (search_state == WL_P2P_DISC_ST_SCAN) {
+		/* SCAN STATE 802.11 SCAN
+		 * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+		 * So if P2P_find command with type=progressive,
+		 * we have to set ssid to P2P WILDCARD because
+		 * we just do broadcast scan unless setting SSID
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+		/* use wild card ssid */
+		ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+		memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
+	} else {
+		CFGP2P_ERR((" invalid search state %d\n", search_state));
+		return -1;
+	}
+
+
+	/* Fill in the P2P scan structure at the start of the iovar param block */
+	p2p_params = (wl_p2p_scan_t*) memblk;
+	p2p_params->type = 'E';
+	/* Fill in the Scan structure that follows the P2P scan structure */
+	eparams = (wl_escan_params_t*) (p2p_params + 1);
+	eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+	if (active)
+		eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+	else
+		eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+	if (tx_dst_addr == NULL)
+		memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	else
+		memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
+
+	if (ssid.SSID_len)
+		memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+	eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+
+	switch (p2p_scan_purpose) {
+		case P2P_SCAN_SOCIAL_CHANNEL:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_AFX_PEER_NORMAL:
+		case P2P_SCAN_AFX_PEER_REDUCED:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_CONNECT_TRY:
+			eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+			break;
+		default :
+			if (wl_get_drv_status_all(cfg, CONNECTED))
+		eparams->params.active_time = -1;
+	else
+		eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+			break;
+	}
+
+	if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
+		eparams->params.nprobes = htod32(eparams->params.active_time /
+			WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	else
+	eparams->params.nprobes = htod32((eparams->params.active_time /
+		P2PAPI_SCAN_NPROBS_TIME_MS));
+
+
+	if (eparams->params.nprobes <= 0)
+		eparams->params.nprobes = 1;
+	CFGP2P_DBG(("nprobes # %d, active_time %d\n",
+		eparams->params.nprobes, eparams->params.active_time));
+	eparams->params.passive_time = htod32(-1);
+	eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	    (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	for (i = 0; i < num_chans; i++) {
+		eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
+	}
+	eparams->version = htod32(ESCAN_REQ_VERSION);
+	eparams->action =  htod16(action);
+	wl_escan_set_sync_id(eparams->sync_id, cfg);
+	wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
+	CFGP2P_INFO(("SCAN CHANNELS : "));
+
+	for (i = 0; i < num_chans; i++) {
+		if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+		else CFGP2P_INFO((",%d", channels[i]));
+	}
+
+	CFGP2P_INFO(("\n"));
+
+	ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+		memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (ret == BCME_OK)
+		wl_set_p2p_status(cfg, SCANNING);
+	return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
+{
+	s32 ret = 0;
+	u32 chan_cnt = 0;
+	u16 *default_chan_list = NULL;
+	p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
+	if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
+		return -BCME_ERROR;
+	WL_TRACE_HW4((" Enter\n"));
+	if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (channel)
+		chan_cnt = AF_PEER_SEARCH_CNT;
+	else
+		chan_cnt = SOCIAL_CHAN_CNT;
+	default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
+	if (default_chan_list == NULL) {
+		CFGP2P_ERR(("channel list allocation failed \n"));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (channel) {
+		u32 i;
+		/* insert same channel to the chan_list */
+		for (i = 0; i < chan_cnt; i++) {
+			default_chan_list[i] = channel;
+		}
+	} else {
+		default_chan_list[0] = SOCIAL_CHAN_1;
+		default_chan_list[1] = SOCIAL_CHAN_2;
+		default_chan_list[2] = SOCIAL_CHAN_3;
+	}
+	ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
+		default_chan_list, WL_P2P_DISC_ST_SEARCH,
+		WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
+	kfree(default_chan_list);
+exit:
+	return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#ifndef WFA_OUI_TYPE_WFD
+#define WFA_OUI_TYPE_WFD	0x0a			/* WiFi Display OUI TYPE */
+#endif
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+
+static s32
+wl_cfgp2p_parse_vndr_ies(u8 *parse, u32 len,
+	struct parsed_vndr_ies *vndr_ies)
+{
+	s32 err = BCME_OK;
+	vndr_ie_t *vndrie;
+	bcm_tlv_t *ie;
+	struct parsed_vndr_ie_info *parsed_info;
+	u32	count = 0;
+	s32 remained_len;
+
+	remained_len = (s32)len;
+	memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+	WL_INFORM(("---> len %d\n", len));
+	ie = (bcm_tlv_t *) parse;
+	if (!bcm_valid_tlv(ie, remained_len))
+		ie = NULL;
+	while (ie) {
+		if (count >= MAX_VNDR_IE_NUMBER)
+			break;
+		if (ie->id == DOT11_MNG_VS_ID) {
+			vndrie = (vndr_ie_t *) ie;
+			/* len should be bigger than OUI length + one data length at least */
+			if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+				CFGP2P_ERR(("%s: invalid vndr ie. length is too small %d\n",
+					__FUNCTION__, vndrie->len));
+				goto end;
+			}
+			/* if wpa or wme ie, do not add ie */
+			if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+				((vndrie->data[0] == WPA_OUI_TYPE) ||
+				(vndrie->data[0] == WME_OUI_TYPE))) {
+				CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
+				goto end;
+			}
+
+			parsed_info = &vndr_ies->ie_info[count++];
+
+			/* save vndr ie information */
+			parsed_info->ie_ptr = (char *)vndrie;
+			parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+			memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+
+			vndr_ies->count = count;
+
+			CFGP2P_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x \n",
+				parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
+				parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0]));
+		}
+end:
+		ie = bcm_next_tlv(ie, &remained_len);
+	}
+	return err;
+}
+
+
+/* Delete and Set a management vndr ie to firmware
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * @pktflag  : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ *                                 VNDR_IE_ASSOCREQ_FLAG)
+ * @ie       :  VNDR IE (such as P2P IE , WPS IE)
+ * @ie_len   : VNDR IE Length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+    s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+	s32 ret = BCME_OK;
+	u8  *curr_ie_buf = NULL;
+	u8  *mgmt_ie_buf = NULL;
+	u32 mgmt_ie_buf_len = 0;
+	u32 *mgmt_ie_len = 0;
+	u32 del_add_ie_buf_len = 0;
+	u32 total_ie_buf_len = 0;
+	u32 parsed_ie_buf_len = 0;
+	struct parsed_vndr_ies old_vndr_ies;
+	struct parsed_vndr_ies new_vndr_ies;
+	s32 i;
+	u8 *ptr;
+	s32 type = -1;
+	s32 remained_buf_len;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie_len)
+	memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+	curr_ie_buf = g_mgmt_ie_buf;
+	CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
+
+#ifdef DUAL_STA
+	if ((cfg->p2p != NULL) && (bssidx != cfg->cfgdev_bssidx)) {
+#else
+	if (cfg->p2p != NULL) {
+#endif
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
+			CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx));
+			return BCME_ERROR;
+		}
+
+		switch (pktflag) {
+			case VNDR_IE_PRBREQ_FLAG :
+				mgmt_ie_buf = IE_TYPE(probe_req, type);
+				mgmt_ie_len = &IE_TYPE_LEN(probe_req, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, type));
+				break;
+			case VNDR_IE_PRBRSP_FLAG :
+				mgmt_ie_buf = IE_TYPE(probe_res, type);
+				mgmt_ie_len = &IE_TYPE_LEN(probe_res, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, type));
+				break;
+			case VNDR_IE_ASSOCREQ_FLAG :
+				mgmt_ie_buf = IE_TYPE(assoc_req, type);
+				mgmt_ie_len = &IE_TYPE_LEN(assoc_req, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, type));
+				break;
+			case VNDR_IE_ASSOCRSP_FLAG :
+				mgmt_ie_buf = IE_TYPE(assoc_res, type);
+				mgmt_ie_len = &IE_TYPE_LEN(assoc_res, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, type));
+				break;
+			case VNDR_IE_BEACON_FLAG :
+				mgmt_ie_buf = IE_TYPE(beacon, type);
+				mgmt_ie_len = &IE_TYPE_LEN(beacon, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, type));
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		switch (pktflag) {
+			case VNDR_IE_PRBRSP_FLAG :
+				mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+				mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->probe_res_ie);
+				break;
+			case VNDR_IE_BEACON_FLAG :
+				mgmt_ie_buf = cfg->ap_info->beacon_ie;
+				mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+				break;
+			case VNDR_IE_ASSOCRSP_FLAG :
+				/* WPS-AP WSC2.0 assoc res includes wps_ie */
+				mgmt_ie_buf = cfg->ap_info->assoc_res_ie;
+				mgmt_ie_len = &cfg->ap_info->assoc_res_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->assoc_res_ie);
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+		bssidx = 0;
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		switch (pktflag) {
+			case VNDR_IE_PRBREQ_FLAG :
+				mgmt_ie_buf = cfg->sta_info->probe_req_ie;
+				mgmt_ie_len = &cfg->sta_info->probe_req_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->sta_info->probe_req_ie);
+				break;
+			case VNDR_IE_ASSOCREQ_FLAG :
+				mgmt_ie_buf = cfg->sta_info->assoc_req_ie;
+				mgmt_ie_len = &cfg->sta_info->assoc_req_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->sta_info->assoc_req_ie);
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+		bssidx = 0;
+	} else {
+		CFGP2P_ERR(("not suitable type\n"));
+		return BCME_ERROR;
+	}
+
+	if (vndr_ie_len > mgmt_ie_buf_len) {
+		CFGP2P_ERR(("extra IE size too big\n"));
+		ret = -ENOMEM;
+	} else {
+		/* parse and save new vndr_ie in curr_ie_buff before comparing it */
+		if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+			ptr = curr_ie_buf;
+
+			wl_cfgp2p_parse_vndr_ies((u8*)vndr_ie,
+				vndr_ie_len, &new_vndr_ies);
+
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				parsed_ie_buf_len += vndrie_info->ie_len;
+			}
+		}
+
+		if (mgmt_ie_buf != NULL) {
+			if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+			     (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+				CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+				goto exit;
+			}
+
+			/* parse old vndr_ie */
+			wl_cfgp2p_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+				&old_vndr_ies);
+
+			/* make a command to delete old ie */
+			for (i = 0; i < old_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&old_vndr_ies.ie_info[i];
+
+				CFGP2P_INFO(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"del");
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+
+		*mgmt_ie_len = 0;
+		/* Add if there is any extra IE */
+		if (mgmt_ie_buf && parsed_ie_buf_len) {
+			ptr = mgmt_ie_buf;
+
+			remained_buf_len = mgmt_ie_buf_len;
+
+			/* make a command to add new ie */
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				CFGP2P_INFO(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->ie_len - 2,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"add");
+
+				/* verify remained buf size before copy data */
+				if (remained_buf_len >= vndrie_info->ie_len) {
+					remained_buf_len -= vndrie_info->ie_len;
+				} else {
+					CFGP2P_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+						"found vndr ies # = %d(cur %d), remained len %d, "
+						"cur mgmt_ie_len %d, new ie len = %d\n",
+						pktflag, new_vndr_ies.count, i, remained_buf_len,
+						*mgmt_ie_len, vndrie_info->ie_len));
+					break;
+				}
+
+				/* save the parsed IE in cfg struct */
+				memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				*mgmt_ie_len += vndrie_info->ie_len;
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+		if (total_ie_buf_len) {
+			ret  = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+				total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (ret)
+				CFGP2P_ERR(("vndr ie set error : %d\n", ret));
+		}
+	}
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+	return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @cfg       : wl_private data
+ * @bssidx   : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+
+	s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+		VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+	s32 index = -1;
+	s32 type = -1;
+	struct net_device *ndev = wl_cfgp2p_find_ndev(cfg, bssidx);
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	if (bssidx < 0 || ndev == NULL) {
+		CFGP2P_ERR(("invalid %s\n", (bssidx < 0) ? "bssidx" : "ndev"));
+		return BCME_BADARG;
+	}
+
+	if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
+		CFGP2P_ERR(("invalid argument\n"));
+		return BCME_BADARG;
+	}
+	for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+		/* clean up vndr ies in dongle */
+		wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, vndrie_flag[index], NULL, 0);
+	}
+	INIT_IE(probe_req, type);
+	INIT_IE(probe_res, type);
+	INIT_IE(assoc_req, type);
+	INIT_IE(assoc_res, type);
+	INIT_IE(beacon, type);
+	return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+	/* If the contents match the OUI and the type */
+	if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+		!bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+		type == ie[TLV_BODY_OFF + oui_len]) {
+		return TRUE;
+	}
+
+	if (tlvs == NULL)
+		return FALSE;
+	/* point to the next ie */
+	ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+
+	return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_p2p_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_wfd_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+static u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
+{
+	vndr_ie_setbuf_t hdr;	/* aligned temporary vndr_ie buffer header */
+	s32 iecount;
+	u32 data_offset;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+		CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+	hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Copy packet flags that indicate which packets will contain this IE */
+	pktflag = htod32(pktflag);
+	memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+
+	/* Add the IE ID to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+
+	/* Add the IE length to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len =
+		(uint8) VNDR_IE_MIN_LEN + datalen;
+
+	/* Add the IE OUI to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2];
+
+	/* Copy the aligned temporary vndr_ie buffer header to the IE buffer */
+	memcpy(iebuf, &hdr, sizeof(hdr) - 1);
+
+	/* Copy the IE data to the IE buffer */
+	data_offset =
+		(u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] -
+		(u8*)&hdr;
+	memcpy(iebuf + data_offset, data, datalen);
+	return data_offset + datalen;
+
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device to search bssidx
+ * @bssidx  : output arg to store bssidx of the bsscfg of firmware.
+ * Returns error
+ */
+s32
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *bssidx)
+{
+	u32 i;
+	if (ndev == NULL || bssidx == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		return BCME_BADARG;
+	}
+	if (!cfg->p2p_supported) {
+		*bssidx = P2PAPI_BSSCFG_PRIMARY;
+		return BCME_OK;
+	}
+	/* we cannot find the bssidx of DISCOVERY BSS
+	 *  because the ndev is same with ndev of PRIMARY BSS.
+	 */
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (ndev == wl_to_p2p_bss_ndev(cfg, i)) {
+			*bssidx = wl_to_p2p_bss_bssidx(cfg, i);
+			return BCME_OK;
+		}
+	}
+
+#ifdef DUAL_STA
+	if (cfg->bss_cfgdev && (cfg->bss_cfgdev == ndev_to_cfgdev(ndev))) {
+		CFGP2P_INFO(("cfgdev is present, return the bssidx"));
+		*bssidx = cfg->cfgdev_bssidx;
+		return BCME_OK;
+	}
+#endif
+
+	return BCME_BADARG;
+
+}
+struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	u32 i;
+	struct net_device *ndev = NULL;
+	if (bssidx < 0) {
+		CFGP2P_ERR((" bsscfg idx is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			ndev = wl_to_p2p_bss_ndev(cfg, i);
+			break;
+		}
+	}
+
+exit:
+	return ndev;
+}
+/*
+ * Search the driver array idx based on bssidx argument
+ * Parameters:
+ * @cfg     : wl_private data
+ * @bssidx : bssidx which indicate bsscfg->idx of firmware.
+ * @type   : output arg to store array idx of p2p->bss.
+ * Returns error
+ */
+
+s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type)
+{
+	u32 i;
+	if (bssidx < 0 || type == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			*type = i;
+			return BCME_OK;
+		}
+	}
+
+#ifdef DUAL_STA
+	if (bssidx == cfg->cfgdev_bssidx) {
+		CFGP2P_DBG(("bssidx matching with the virtual I/F \n"));
+		*type = 1;
+		return BCME_OK;
+	}
+#endif
+
+exit:
+	return BCME_BADARG;
+}
+
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = NULL;
+
+	if (!cfg || !cfg->p2p)
+		return BCME_ERROR;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
+		wl_set_p2p_status(cfg, LISTEN_EXPIRED);
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		if (cfg->afx_hdl->is_listen == TRUE &&
+			wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_DBG(("Listen DONE for action frame\n"));
+			complete(&cfg->act_frm_scan);
+		}
+#ifdef WL_CFG80211_SYNC_GON
+		else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
+			WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
+				jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies)));
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))
+				wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+			complete(&cfg->wait_next_af);
+		}
+#endif /* WL_CFG80211_SYNC_GON */
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL)) {
+#else
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) ||
+			wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL)) {
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			WL_DBG(("Listen DONE for ramain on channel expired\n"));
+			wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg), cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+				cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+		}
+		if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg),
+			WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
+			CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+	} else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+	return ret;
+
+}
+
+/*
+ *  Timer expire callback function for LISTEN
+ *  We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ *  so lets do it from thread context.
+ */
+void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
+	CFGP2P_DBG((" Enter\n"));
+	bzero(&msg, sizeof(wl_event_msg_t));
+	msg.event_type =  hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+#if defined(WL_ENABLE_P2P_IF)
+	wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net :
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+#else
+	wl_cfg80211_event(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg,
+		NULL);
+#endif /* WL_ENABLE_P2P_IF */
+}
+/*
+ *  Routine for cancelling the P2P LISTEN
+ */
+static s32
+wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+                         struct wireless_dev *wdev, bool notify)
+{
+	WL_DBG(("Enter \n"));
+	/* Irrespective of whether timer is running or not, reset
+	 * the LISTEN state.
+	 */
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		del_timer_sync(&cfg->p2p->listen_timer);
+		if (notify) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if (wdev)
+				cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg), cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+			if (ndev && ndev->ieee80211_ptr)
+				cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		}
+	}
+	return 0;
+}
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters   :
+ * @cfg          : wl_private data
+ * @channel     : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms)
+{
+#define EXTRA_DELAY_TIME	100
+	s32 ret = BCME_OK;
+	struct timer_list *_timer;
+	s32 extra_delay;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
+	if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) {
+
+		CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+		ret = BCME_NOTREADY;
+		goto exit;
+	}
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+		goto exit;
+
+	}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+	if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
+			CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	_timer = &cfg->p2p->listen_timer;
+
+	/*  We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+	 *  otherwise we will wait up to duration_ms + 100ms + duration / 10
+	 */
+	if (ret == BCME_OK) {
+		extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10);
+	} else {
+		/* if failed to set listen, it doesn't need to wait whole duration. */
+		duration_ms = 100 + duration_ms / 20;
+		extra_delay = 0;
+	}
+
+	INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#undef EXTRA_DELAY_TIME
+exit:
+	return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" Enter\n"));
+	if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+
+		CFGP2P_DBG((" do nothing, discovery is off\n"));
+		return ret;
+	}
+	if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) {
+		CFGP2P_DBG(("already : %d\n", enable));
+		return ret;
+	}
+
+	wl_chg_p2p_status(cfg, SEARCH_ENABLED);
+	/* When disabling Search, reset the WL driver's p2p discovery state to
+	 * WL_P2P_DISC_ST_SCAN.
+	 */
+	if (!enable) {
+		wl_clr_p2p_status(cfg, SCANNING);
+		ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	}
+
+	return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+            const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	u32 event_type = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+	struct net_device *ndev = NULL;
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+			if (status == WLC_E_STATUS_SUCCESS) {
+				wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+				CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+			}
+			else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+				wl_set_p2p_status(cfg, ACTION_TX_NOACK);
+				CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+				wl_stop_wait_next_action_frame(cfg, ndev);
+			}
+		} else {
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+						"status : %d\n", status));
+
+			if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+				complete(&cfg->send_af_done);
+		}
+	}
+	return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx)
+{
+	s32 ret = BCME_OK;
+	s32 evt_ret = BCME_OK;
+	s32 timeout = 0;
+	wl_eventmsg_buf_t buf;
+
+
+	CFGP2P_INFO(("\n"));
+	CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+	    af_params->channel, af_params->dwell_time));
+
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0)
+		return evt_ret;
+
+	cfg->af_sent_channel  = af_params->channel;
+#ifdef WL_CFG80211_SYNC_GON
+	cfg->af_tx_sent_jiffies = jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (ret < 0) {
+		CFGP2P_ERR((" sending action frame is failed\n"));
+		goto exit;
+	}
+
+	timeout = wait_for_completion_timeout(&cfg->send_af_done,
+		msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
+
+	if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+		CFGP2P_INFO(("tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+		CFGP2P_INFO(("bcast tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else {
+		ret = BCME_ERROR;
+		CFGP2P_INFO(("tx action frame operation is failed\n"));
+	}
+	/* clear status bit for action tx */
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+exit:
+	CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) {
+		WL_ERR(("TX frame events revert back failed \n"));
+		return evt_ret;
+	}
+
+	return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+            struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+	memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+	memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+	/* Generate the P2P Device Address.  This consists of the device's
+	 * primary MAC address with the locally administered bit set.
+	 */
+	memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+	out_dev_addr->octet[0] |= 0x02;
+
+	/* Generate the P2P Interface Address.  If the discovery and connection
+	 * BSSCFGs need to simultaneously co-exist, then this address must be
+	 * different from the P2P Device Address.
+	 */
+	memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+	out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+	u16 len = ie->len;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+	CFGP2P_DBG((" Enter\n"));
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+	/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			if (subelt_id == P2P_SEID_INTINTADDR) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_INFO) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_GROUP_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+			}			return;
+		} else {
+			CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+		}
+		subel += subelt_len;
+	}
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup().  DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+	s32 result, val;
+	bool isup = false;
+	s8 getbuf[64];
+
+	/* Check if the BSS is up */
+	*(int*)getbuf = -1;
+	result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+		sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+	if (result != 0) {
+		CFGP2P_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+		CFGP2P_ERR(("NOTE: this ioctl error is normal "
+					"when the BSS has not been created yet.\n"));
+	} else {
+		val = *(int*)getbuf;
+		val = dtoh32(val);
+		CFGP2P_INFO(("---cfg bss -C %d   ==> %d\n", bsscfg_idx, val));
+		isup = (val ? TRUE : FALSE);
+	}
+	return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+	s32 ret = BCME_OK;
+	s32 val = up ? 1 : 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+	CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (ret != 0) {
+		CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+	}
+
+	return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	s32 ret = BCME_OK;
+	s32 p2p_supported = 0;
+	ret = wldev_iovar_getint(ndev, "p2p",
+	               &p2p_supported);
+	if (ret < 0) {
+		if (ret == BCME_UNSUPPORTED) {
+			CFGP2P_INFO(("p2p is unsupported\n"));
+			return 0;
+		} else {
+			CFGP2P_ERR(("cfg p2p error %d\n", ret));
+			return ret;
+		}
+	}
+	if (p2p_supported == 1) {
+		CFGP2P_INFO(("p2p is supported\n"));
+	} else {
+		CFGP2P_INFO(("p2p is unsupported\n"));
+		p2p_supported = 0;
+	}
+	return p2p_supported;
+}
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = NULL;
+	struct wireless_dev *wdev = NULL;
+	s32 i = 0, index = -1;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wdev = bcmcfg_to_p2p_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+	ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
+	wdev = ndev_to_wdev(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+			index = wl_to_p2p_bss_bssidx(cfg, i);
+			if (index != WL_INVALID)
+				wl_cfgp2p_clear_management_ie(cfg, index);
+	}
+	wl_cfgp2p_deinit_priv(cfg);
+	return 0;
+}
+s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	s32 ret = -1;
+	int count, start, duration;
+	wl_p2p_sched_t dongle_noa;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+	if (cfg->p2p && cfg->p2p->vif_created) {
+
+		cfg->p2p->noa.desc[0].start = 0;
+
+		sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
+		CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+			count, start, duration));
+		if (count != -1)
+			cfg->p2p->noa.desc[0].count = count;
+
+		/* supplicant gives interval as start */
+		if (start != -1)
+			cfg->p2p->noa.desc[0].interval = start;
+
+		if (duration != -1)
+			cfg->p2p->noa.desc[0].duration = duration;
+
+		if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) {
+			cfg->p2p->noa.desc[0].start = 200;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+		}
+		else if (cfg->p2p->noa.desc[0].count == 0) {
+			cfg->p2p->noa.desc[0].start = 0;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_RESET;
+		}
+		else {
+			/* Continuous NoA interval. */
+			dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			if ((cfg->p2p->noa.desc[0].interval == 102) ||
+				(cfg->p2p->noa.desc[0].interval == 100)) {
+				cfg->p2p->noa.desc[0].start = 100 -
+					cfg->p2p->noa.desc[0].duration;
+				dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+			}
+			else {
+				dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			}
+		}
+		/* Put the noa descriptor in dongle format for dongle */
+		dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count);
+		if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration);
+		}
+		else {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
+		}
+		dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
+
+		ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION),
+			"p2p_noa", &dongle_noa, sizeof(dongle_noa), cfg->ioctl_buf,
+			WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+	}
+	return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len)
+{
+
+	wifi_p2p_noa_desc_t *noa_desc;
+	int len = 0, i;
+	char _buf[200];
+
+	CFGP2P_DBG((" Enter\n"));
+	buf[0] = '\0';
+	if (cfg->p2p && cfg->p2p->vif_created) {
+		if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
+			_buf[0] = 1; /* noa index */
+			_buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
+				(cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */
+			len += 2;
+			if (cfg->p2p->noa.desc[0].count) {
+				noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+				noa_desc->cnt_type = cfg->p2p->noa.desc[0].count;
+				noa_desc->duration = cfg->p2p->noa.desc[0].duration;
+				noa_desc->interval = cfg->p2p->noa.desc[0].interval;
+				noa_desc->start = cfg->p2p->noa.desc[0].start;
+				len += sizeof(wifi_p2p_noa_desc_t);
+			}
+			if (buf_len <= len * 2) {
+				CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+					"returning noa in string format\n", buf_len));
+				return -1;
+			}
+			/* We have to convert the buffer data into ASCII strings */
+			for (i = 0; i < len; i++) {
+				snprintf(buf, 3, "%02x", _buf[i]);
+				buf += 2;
+			}
+			buf[i*2] = '\0';
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+		return -1;
+	}
+	return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	int ps, ctw;
+	int ret = -1;
+	s32 legacy_ps;
+	struct net_device *dev;
+
+	CFGP2P_DBG((" Enter\n"));
+	if (cfg->p2p && cfg->p2p->vif_created) {
+		sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
+		CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+		dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+		if (ctw != -1) {
+			cfg->p2p->ops.ctw = ctw;
+			ret = 0;
+		}
+		if (ps != -1) {
+			cfg->p2p->ops.ops = ps;
+			ret = wldev_iovar_setbuf(dev,
+				"p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+			if (ret < 0) {
+				CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+			}
+		}
+
+		if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+			ret = wldev_ioctl(dev,
+				WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
+			if (unlikely(ret))
+				CFGP2P_ERR(("error (%d)\n", ret));
+			wl_cfg80211_update_power_mode(dev);
+		}
+		else
+			CFGP2P_ERR(("ilegal setting\n"));
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+		ret = -1;
+	}
+	return ret;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = NULL;
+	u16 len = 0;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+
+	if (!buf) {
+		WL_ERR(("P2P IE not present"));
+		return 0;
+	}
+
+	ie = (wifi_p2p_ie_t*) buf;
+	len = ie->len;
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+		/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			/* This will point to start of subelement attrib after
+			 * attribute id & len
+			 */
+			return subel;
+		}
+
+		/* Go to next subelement */
+		subel += subelt_len;
+	}
+
+	/* Not Found */
+	return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT	0x01
+
+u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib)
+{
+	bcm_tlv_t *ie;
+	u8* pAttrib;
+
+	CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) {
+			/* Have the P2p ie. Now check for attribute */
+			if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) {
+				CFGP2P_INFO(("P2P attribute %d was found at parse %p",
+					attrib, parse));
+				return pAttrib;
+			}
+			else {
+				parse += (ie->len + TLV_HDR_LEN);
+				len -= (ie->len + TLV_HDR_LEN);
+				CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
+					" to %p len to %d", attrib, parse, len));
+			}
+		}
+		else {
+			/* It was not p2p IE. parse will get updated automatically to next TLV */
+			CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
+		}
+	}
+	CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
+	return NULL;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+	u8 *capability = NULL;
+	bool p2p_go	= 0;
+	u8 *ptr = NULL;
+
+	if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
+		WL_ERR(("P2P Capability attribute not found"));
+		return NULL;
+	}
+
+	/* Check Group capability for Group Owner bit */
+	p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+	if (!p2p_go) {
+		return bi->BSSID.octet;
+	}
+
+	/* In probe responses, DEVICE INFO attribute will be present */
+	if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length,  P2P_SEID_DEV_INFO))) {
+		/* If DEVICE_INFO is not found, this might be a beacon frame.
+		 * check for DEVICE_ID in the beacon frame.
+		 */
+		ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+		bi->ie_length,  P2P_SEID_DEV_ID);
+	}
+
+	if (!ptr)
+		WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+	return ptr;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	snprintf(info->driver, sizeof(info->driver), "p2p");
+	snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0));
+}
+
+struct ethtool_ops cfgp2p_ethtool_ops = {
+	.get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_ENABLE_P2P_IF)
+s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	struct net_device* net = NULL;
+	struct wireless_dev *wdev = NULL;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+	if (cfg->p2p_net) {
+		CFGP2P_ERR(("p2p_net defined already.\n"));
+		return -EINVAL;
+	}
+
+	/* Allocate etherdev, including space for private structure */
+	if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) {
+		CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		free_netdev(net);
+		return -ENOMEM;
+	}
+
+	strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
+	net->name[IFNAMSIZ - 1] = '\0';
+
+	/* Copy the reference to bcm_cfg80211 */
+	memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->do_ioctl = wl_cfgp2p_do_ioctl;
+	net->hard_start_xmit = wl_cfgp2p_start_xmit;
+	net->open = wl_cfgp2p_if_open;
+	net->stop = wl_cfgp2p_if_stop;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+	/* Register with a dummy MAC addr */
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+	net->ieee80211_ptr = wdev;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &cfgp2p_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+	SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+	/* Associate p2p0 network interface with new wdev */
+	wdev->netdev = net;
+
+	ret = register_netdev(net);
+	if (ret) {
+		CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+		free_netdev(net);
+		kfree(wdev);
+		return -ENODEV;
+	}
+
+	/* store p2p net ptr for further reference. Note that iflist won't have this
+	 * entry as there corresponding firmware interface is a "Hidden" interface.
+	 */
+	cfg->p2p_wdev = wdev;
+	cfg->p2p_net = net;
+
+	printk("%s: P2P Interface Registered\n", net->name);
+
+	return ret;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
+{
+
+	if (!cfg || !cfg->p2p_net) {
+		CFGP2P_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+
+	unregister_netdev(cfg->p2p_net);
+	free_netdev(cfg->p2p_net);
+
+	return 0;
+}
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+
+	if (skb)
+	{
+		CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n",
+			ndev->name));
+		dev_kfree_skb_any(skb);
+	}
+
+	return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* There is no ifidx corresponding to p2p0 in our firmware. So we should
+	 * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+	 * For Android PRIV CMD handling map it to primary I/F
+	 */
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(ndev, ifr, cmd);
+
+	} else {
+		CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+		__FUNCTION__, cmd));
+		return -1;
+	}
+
+	return ret;
+}
+#endif
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev || !wl_cfg80211_is_p2p_active())
+		return -EINVAL;
+	WL_TRACE(("Enter\n"));
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	/* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+	 * do it here. This will make sure that in concurrent mode, supplicant
+	 * is not dependent on a particular order of interface initialization.
+	 * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+	 * -iwlan0.
+	 */
+	wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	wl_cfg80211_do_driver_init(net);
+
+	return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev)
+		return -EINVAL;
+
+	wl_cfg80211_scan_stop(net);
+
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+					& (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO)));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	return 0;
+}
+
+bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
+{
+	return (if_ops == &wl_cfgp2p_if_ops);
+}
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = NULL;
+	struct ether_addr primary_mac;
+
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+	WL_TRACE(("Enter\n"));
+
+	if (cfg->p2p_wdev) {
+		wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+		CFGP2P_ERR(("p2p_wdev deleted.\n"));
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(&primary_mac, 0, sizeof(primary_mac));
+	get_primary_mac(cfg, &primary_mac);
+	wl_cfgp2p_generate_bss_mac(&primary_mac,
+			&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+	wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
+	memcpy(wdev->address, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+
+
+	/* store p2p wdev ptr for further reference. */
+	cfg->p2p_wdev = wdev;
+
+	CFGP2P_ERR(("P2P interface registered\n"));
+
+	return wdev;
+}
+
+int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfgp2p_set_firm_p2p(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	p2p_on(cfg) = true;
+
+	CFGP2P_DBG(("P2P interface started\n"));
+
+exit:
+	return ret;
+}
+
+void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfg80211_scan_stop(wdev);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+	}
+
+	if (!cfg->p2p)
+		return;
+
+	ret = wl_cfgp2p_disable_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
+	}
+
+	p2p_on(cfg) = false;
+
+	CFGP2P_DBG(("P2P interface stopped\n"));
+
+	return;
+}
+
+int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg)
+{
+	bool rollback_lock = false;
+
+	if (!wdev)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+
+	if (!rtnl_is_locked()) {
+		rtnl_lock();
+		rollback_lock = true;
+	}
+
+	cfg80211_unregister_wdev(wdev);
+
+	if (rollback_lock)
+		rtnl_unlock();
+
+	kfree(wdev);
+
+	if (cfg)
+		cfg->p2p_wdev = NULL;
+
+	CFGP2P_ERR(("P2P interface unregistered\n"));
+
+	return 0;
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 0000000..f4c7c4f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,413 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.h 472818 2014-04-25 08:07:56Z $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct bcm_cfg80211;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library.  Do not
+ * confuse this with a bsscfg index.  This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+	P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+	P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+	P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+typedef enum {
+	P2P_SCAN_PURPOSE_MIN,
+	P2P_SCAN_SOCIAL_CHANNEL, /* scan for social channel */
+	P2P_SCAN_AFX_PEER_NORMAL, /* scan for action frame search */
+	P2P_SCAN_AFX_PEER_REDUCED, /* scan for action frame search with short time */
+	P2P_SCAN_DURING_CONNECTED, /* scan during connected status */
+	P2P_SCAN_CONNECT_TRY, /* scan for connecting */
+	P2P_SCAN_NORMAL, /* scan during not-connected status */
+	P2P_SCAN_PURPOSE_MAX
+} p2p_scan_purpose_t;
+
+/* vendor ies max buffer length for probe response or beacon */
+#define VNDR_IES_MAX_BUF_LEN	1400
+/* normal vendor ies buffer length */
+#define VNDR_IES_BUF_LEN 		512
+
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+	u8  p2p_probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8  p2p_assoc_req_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_assoc_res_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 p2p_probe_req_ie_len;
+	u32 p2p_probe_res_ie_len;
+	u32 p2p_assoc_req_ie_len;
+	u32 p2p_assoc_res_ie_len;
+	u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+	s32 bssidx;
+	struct net_device *dev;
+	struct p2p_saved_ie saved_ie;
+	void *private_data;
+};
+
+struct p2p_info {
+	bool on;    /* p2p on/off switch */
+	bool scan;
+	int16 search_state;
+	bool vif_created;
+	s8 vir_ifname[IFNAMSIZ];
+	unsigned long status;
+	struct ether_addr dev_addr;
+	struct ether_addr int_addr;
+	struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
+	struct timer_list listen_timer;
+	wl_p2p_sched_t noa;
+	wl_p2p_ops_t ops;
+	wlc_ssid_t ssid;
+};
+
+#define MAX_VNDR_IE_NUMBER	5
+
+struct parsed_vndr_ie_info {
+	char *ie_ptr;
+	u32 ie_len;	/* total length including id & length field */
+	vndr_ie_t vndrie;
+};
+
+struct parsed_vndr_ies {
+	u32 count;
+	struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+	WLP2P_STATUS_DISCOVERY_ON = 0,
+	WLP2P_STATUS_SEARCH_ENABLED,
+	WLP2P_STATUS_IF_ADDING,
+	WLP2P_STATUS_IF_DELETING,
+	WLP2P_STATUS_IF_CHANGING,
+	WLP2P_STATUS_IF_CHANGED,
+	WLP2P_STATUS_LISTEN_EXPIRED,
+	WLP2P_STATUS_ACTION_TX_COMPLETED,
+	WLP2P_STATUS_ACTION_TX_NOACK,
+	WLP2P_STATUS_SCANNING,
+	WLP2P_STATUS_GO_NEG_PHASE,
+	WLP2P_STATUS_DISC_IN_PROGRESS
+};
+
+
+#define wl_to_p2p_bss_ndev(cfg, type)		((cfg)->p2p->bss[type].dev)
+#define wl_to_p2p_bss_bssidx(cfg, type)		((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(cfg, type)	((cfg)->p2p->bss[type].saved_ie)
+#define wl_to_p2p_bss_private(cfg, type)		((cfg)->p2p->bss[type].private_data)
+#define wl_to_p2p_bss(cfg, type)			((cfg)->p2p->bss[type])
+#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+	change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define p2p_on(cfg) ((cfg)->p2p->on)
+#define p2p_scan(cfg) ((cfg)->p2p->scan)
+#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+
+#define CFGP2P_ERROR_TEXT		"CFGP2P-ERROR) "
+
+
+#define CFGP2P_ERR(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_INFO(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_INFO "CFGP2P-INFO) %s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_DBG(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_DBG) {			\
+			printk(KERN_DEBUG "CFGP2P-DEBUG) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+
+#define	CFGP2P_ACTION(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_P2P_ACTION) {			\
+			printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+#define INIT_TIMER(timer, func, duration, extra_delay)	\
+	do {				   \
+		init_timer(timer); \
+		timer->function = func; \
+		timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+		timer->data = (unsigned long) cfg; \
+		add_timer(timer); \
+	} while (0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF)
+#define WL_CFG80211_P2P_DEV_IF
+
+#ifdef WL_ENABLE_P2P_IF
+#undef WL_ENABLE_P2P_IF
+#endif
+
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#else
+#ifdef WLP2P
+#ifndef WL_ENABLE_P2P_IF
+/* Enable P2P network Interface if P2P support is enabled */
+#define WL_ENABLE_P2P_IF
+#endif /* WL_ENABLE_P2P_IF */
+#endif /* WLP2P */
+#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
+
+#ifndef WL_CFG80211_P2P_DEV_IF
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)))
+#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \
+	or kernel version is 3.8.0 or above
+#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */
+
+#if !defined(WLP2P) && (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF))
+#error WLP2P not defined
+#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define bcm_struct_cfgdev	struct wireless_dev
+#else
+#define bcm_struct_cfgdev	struct net_device
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+extern void
+wl_cfgp2p_listen_expired(unsigned long data);
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
+extern s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg);
+extern void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode,
+            u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie,
+	u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans,
+	u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+            s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *index);
+extern struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
+extern s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+
+extern s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+            struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
+
+extern u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib);
+
+extern u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg);
+
+extern bool
+wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
+
+extern int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+					(channel == SOCIAL_CHAN_2) || \
+					(channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+
+/* If the provision discovery is for JOIN operations,
+ * or the device discoverablity frame is destined to GO
+ * then we need not do an internal scan to find GO.
+ */
+#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \
+	(wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL)
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+					((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+					(frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+
+#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \
+							((subtype == P2P_PAF_GON_CONF) || \
+							(subtype == P2P_PAF_INVITE_RSP) || \
+							(subtype == P2P_PAF_PROVDIS_RSP)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \
+					(len == WL_P2P_WILDCARD_SSID_LEN))
+#endif				/* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.c b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
new file mode 100644
index 0000000..babadce
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c
@@ -0,0 +1,1342 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgvendor.c 473890 2014-04-30 01:55:06Z $
+*/
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+#include <wl_cfgvendor.h>
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+
+/*
+ * This API is to be used for asynchronous vendor events. This
+ * shouldn't be used in response to a vendor command from its
+ * do_it handler context (instead wl_cfgvendor_send_cmd_reply should
+ * be used).
+ */
+int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+	struct net_device *dev, int event_id, const void  *data, int len)
+{
+	u16 kflags;
+	struct sk_buff *skb;
+
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
+	if (!skb) {
+		WL_ERR(("skb alloc failed"));
+		return -ENOMEM;
+	}
+
+	/* Push the data to the skb */
+	nla_put_nohdr(skb, len, data);
+
+	cfg80211_vendor_event(skb, kflags);
+
+	return 0;
+}
+
+static int wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy,
+	struct net_device *dev, const void  *data, int len)
+{
+	struct sk_buff *skb;
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		return -ENOMEM;
+	}
+
+	/* Push the data to the skb */
+	nla_put_nohdr(skb, len, data);
+
+	return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int wl_cfgvendor_get_feature_set(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int reply;
+
+	reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
+
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        &reply, sizeof(int));
+
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+	return err;
+}
+
+static int wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct sk_buff *skb;
+	int *reply;
+	int num, mem_needed, i;
+
+	reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), &num);
+
+	if (!reply) {
+		WL_ERR(("Could not get feature list matrix\n"));
+		err = -EINVAL;
+		return err;
+	}
+
+	mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * num) +
+	             ATTRIBUTE_U32_LEN;
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, num);
+	for (i = 0; i < num; i++) {
+		nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply[i]);
+	}
+
+	err =  cfg80211_vendor_cmd_reply(skb);
+
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+exit:
+	kfree(reply);
+	return err;
+}
+
+static int wl_cfgvendor_set_pno_mac_oui(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	uint8 pno_random_mac_oui[DOT11_OUI_LEN];
+
+	type = nla_type(data);
+
+	if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) {
+		memcpy(pno_random_mac_oui, nla_data(data), DOT11_OUI_LEN);
+
+		err = dhd_dev_pno_set_mac_oui(bcmcfg_to_prmry_ndev(cfg), pno_random_mac_oui);
+
+		if (unlikely(err))
+			WL_ERR(("Bad OUI, could not set:%d \n", err));
+
+	} else {
+		err = -1;
+	}
+
+	return err;
+}
+
+static int wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	u32 nodfs;
+
+	type = nla_type(data);
+	if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
+		nodfs = nla_get_u32(data);
+		err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
+	} else {
+		err = -1;
+	}
+	return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+	struct net_device *dev, void  *data, int len, wl_vendor_event_t event)
+{
+	u16 kflags;
+	const void *ptr;
+	struct sk_buff *skb;
+	int malloc_len, total, iter_cnt_to_send, cnt;
+	gscan_results_cache_t *cache = (gscan_results_cache_t *)data;
+
+	total = len/sizeof(wifi_gscan_result_t);
+	while (total > 0) {
+		malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD;
+		if (malloc_len > NLMSG_DEFAULT_SIZE) {
+			malloc_len = NLMSG_DEFAULT_SIZE;
+		}
+		iter_cnt_to_send =
+		   (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t);
+		total = total - iter_cnt_to_send;
+
+		kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+		/* Alloc the SKB for vendor_event */
+		skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags);
+		if (!skb) {
+			WL_ERR(("skb alloc failed"));
+			return -ENOMEM;
+		}
+
+		while (cache && iter_cnt_to_send) {
+			ptr = (const void *) &cache->results[cache->tot_consumed];
+
+			if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed))
+				cnt = iter_cnt_to_send;
+			else
+				cnt = (cache->tot_count - cache->tot_consumed);
+
+			iter_cnt_to_send -= cnt;
+			cache->tot_consumed += cnt;
+			/* Push the data to the skb */
+			nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr);
+			if (cache->tot_consumed == cache->tot_count)
+				cache = cache->next;
+
+		}
+
+		cfg80211_vendor_event(skb, kflags);
+	}
+
+	return 0;
+}
+
+
+static int wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pno_gscan_capabilities_t *reply = NULL;
+	uint32 reply_len = 0;
+
+
+	reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+	   DHD_PNO_GET_CAPABILITIES, NULL, &reply_len);
+	if (!reply) {
+		WL_ERR(("Could not get capabilities\n"));
+		err = -EINVAL;
+		return err;
+	}
+
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        reply, reply_len);
+
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+	kfree(reply);
+	return err;
+}
+
+static int wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, type, band;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	uint16 *reply = NULL;
+	uint32 reply_len = 0, num_channels, mem_needed;
+	struct sk_buff *skb;
+
+	type = nla_type(data);
+
+	if (type == GSCAN_ATTRIBUTE_BAND) {
+		band = nla_get_u32(data);
+	} else {
+		return -1;
+	}
+
+	reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+	   DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
+
+	if (!reply) {
+		WL_ERR(("Could not get channel list\n"));
+		err = -EINVAL;
+		return err;
+	}
+	num_channels =  reply_len/ sizeof(uint32);
+	mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2);
+
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels);
+	nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply);
+
+	err =  cfg80211_vendor_cmd_reply(skb);
+
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+exit:
+	kfree(reply);
+	return err;
+}
+
+static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_results_cache_t *results, *iter;
+	uint32 reply_len, complete = 0;
+	int32 mem_needed, num_results_iter;
+	wifi_gscan_result_t *ptr;
+	uint16 num_scan_ids, num_results;
+	struct sk_buff *skb;
+	struct nlattr *scan_hdr;
+
+	dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg));
+	dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+	results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+	             DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);
+
+	if (!results) {
+		WL_ERR(("No results to send %d\n", err));
+		err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+		        results, 0);
+
+		if (unlikely(err))
+			WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+		dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+		return err;
+	}
+	num_scan_ids = reply_len & 0xFFFF;
+	num_results = (reply_len & 0xFFFF0000) >> 16;
+	mem_needed = (num_results * sizeof(wifi_gscan_result_t)) +
+	             (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) +
+	             VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN;
+
+	if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) {
+		mem_needed = (int32)NLMSG_DEFAULT_SIZE;
+		complete = 0;
+	} else {
+		complete = 1;
+	}
+
+	WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed,
+		(int)NLMSG_DEFAULT_SIZE));
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+	if (unlikely(!skb)) {
+		WL_ERR(("skb alloc failed"));
+		dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+		return -ENOMEM;
+	}
+	iter = results;
+
+	nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete);
+
+	mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
+
+	while (iter) {
+		num_results_iter =
+		    (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t);
+		if (num_results_iter <= 0 ||
+		    ((iter->tot_count - iter->tot_consumed) > num_results_iter))
+			break;
+		scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS);
+		nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+		nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+		num_results_iter = iter->tot_count - iter->tot_consumed;
+
+		nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+		if (num_results_iter) {
+			ptr = &iter->results[iter->tot_consumed];
+			iter->tot_consumed += num_results_iter;
+			nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+			 num_results_iter * sizeof(wifi_gscan_result_t), ptr);
+		}
+		nla_nest_end(skb, scan_hdr);
+		mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
+		    (num_results_iter * sizeof(wifi_gscan_result_t));
+		iter = iter->next;
+	}
+
+	dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+	dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+
+	return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int wl_cfgvendor_initiate_gscan(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type, tmp = len;
+	int run = 0xFF;
+	int flush = 0;
+	const struct nlattr *iter;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+		if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE)
+			run = nla_get_u32(iter);
+		else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE)
+			flush = nla_get_u32(iter);
+	}
+
+	if (run != 0xFF) {
+		err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush);
+
+		if (unlikely(err))
+			WL_ERR(("Could not run gscan:%d \n", err));
+		return err;
+	} else {
+		return -1;
+	}
+
+
+}
+
+static int wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int type;
+	bool real_time = FALSE;
+
+	type = nla_type(data);
+
+	if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
+		real_time = nla_get_u32(data);
+
+		err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time);
+
+		if (unlikely(err))
+			WL_ERR(("Could not run gscan:%d \n", err));
+
+	} else {
+		err = -1;
+	}
+
+	return err;
+}
+
+static int wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_scan_params_t *scan_param;
+	int j = 0;
+	int type, tmp, tmp1, tmp2, k = 0;
+	const struct nlattr *iter, *iter1, *iter2;
+	struct dhd_pno_gscan_channel_bucket  *ch_bucket;
+
+	scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL);
+	if (!scan_param) {
+		WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n"));
+		err = -EINVAL;
+		return err;
+
+	}
+
+	scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC;
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+
+		if (j >= GSCAN_MAX_CH_BUCKETS)
+			break;
+
+		switch (type) {
+			case GSCAN_ATTRIBUTE_BASE_PERIOD:
+				scan_param->scan_fr = nla_get_u32(iter)/1000;
+				break;
+			case GSCAN_ATTRIBUTE_NUM_BUCKETS:
+				scan_param->nchannel_buckets = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_CH_BUCKET_1:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_2:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_3:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_4:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_5:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_6:
+			case GSCAN_ATTRIBUTE_CH_BUCKET_7:
+				nla_for_each_nested(iter1, iter, tmp1) {
+					type = nla_type(iter1);
+					ch_bucket =
+					scan_param->channel_bucket;
+
+					switch (type) {
+						case GSCAN_ATTRIBUTE_BUCKET_ID:
+						break;
+						case GSCAN_ATTRIBUTE_BUCKET_PERIOD:
+							ch_bucket[j].bucket_freq_multiple =
+							    nla_get_u32(iter1)/1000;
+							break;
+						case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS:
+							ch_bucket[j].num_channels =
+							     nla_get_u32(iter1);
+							break;
+						case GSCAN_ATTRIBUTE_BUCKET_CHANNELS:
+							nla_for_each_nested(iter2, iter1, tmp2) {
+								if (k >= PFN_SWC_RSSI_WINDOW_MAX)
+									break;
+								ch_bucket[j].chan_list[k] =
+								     nla_get_u32(iter2);
+								k++;
+							}
+							k = 0;
+							break;
+						case GSCAN_ATTRIBUTE_BUCKETS_BAND:
+							ch_bucket[j].band = (uint16)
+							     nla_get_u32(iter1);
+							break;
+						case GSCAN_ATTRIBUTE_REPORT_EVENTS:
+							ch_bucket[j].report_flag = (uint8)
+							     nla_get_u32(iter1);
+							break;
+					}
+				}
+				j++;
+				break;
+		}
+	}
+
+	if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	     DHD_PNO_SCAN_CFG_ID, scan_param, 0) < 0) {
+		WL_ERR(("Could not set GSCAN scan cfg\n"));
+		err = -EINVAL;
+	}
+
+	kfree(scan_param);
+	return err;
+
+}
+
+static int wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_hotlist_scan_params_t *hotlist_params;
+	int tmp, tmp1, tmp2, type, j = 0, dummy;
+	const struct nlattr *outer, *inner, *iter;
+	uint8 flush = 0;
+	struct bssid_t *pbssid;
+
+	hotlist_params = (gscan_hotlist_scan_params_t *)kzalloc(len, GFP_KERNEL);
+	if (!hotlist_params) {
+		WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len));
+		return -1;
+	}
+
+	hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT;
+
+	nla_for_each_attr(iter, data, len, tmp2) {
+		type = nla_type(iter);
+		switch (type) {
+			case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS:
+				pbssid = hotlist_params->bssid;
+				nla_for_each_nested(outer, iter, tmp) {
+					nla_for_each_nested(inner, outer, tmp1) {
+						type = nla_type(inner);
+
+						switch (type) {
+							case GSCAN_ATTRIBUTE_BSSID:
+								memcpy(&(pbssid[j].macaddr),
+								  nla_data(inner), ETHER_ADDR_LEN);
+								break;
+							case GSCAN_ATTRIBUTE_RSSI_LOW:
+								pbssid[j].rssi_reporting_threshold =
+								         (int8) nla_get_u8(inner);
+								break;
+							case GSCAN_ATTRIBUTE_RSSI_HIGH:
+								dummy = (int8) nla_get_u8(inner);
+								break;
+						}
+					}
+					j++;
+				}
+				hotlist_params->nbssid = j;
+				break;
+			case GSCAN_ATTRIBUTE_HOTLIST_FLUSH:
+				flush = nla_get_u8(iter);
+				break;
+			case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
+				hotlist_params->lost_ap_window = nla_get_u32(iter);
+				break;
+			}
+
+	}
+
+	if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	      DHD_PNO_GEOFENCE_SCAN_CFG_ID, hotlist_params, flush) < 0) {
+		WL_ERR(("Could not set GSCAN HOTLIST cfg\n"));
+		err = -EINVAL;
+		goto exit;
+	}
+exit:
+	kfree(hotlist_params);
+	return err;
+}
+static int wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0, tmp, type;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_batch_params_t batch_param;
+	const struct nlattr *iter;
+
+	batch_param.mscan = batch_param.bestn = 0;
+	batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+	nla_for_each_attr(iter, data, len, tmp) {
+		type = nla_type(iter);
+
+		switch (type) {
+			case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN:
+				batch_param.bestn = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE:
+				batch_param.mscan = nla_get_u32(iter);
+				break;
+			case GSCAN_ATTRIBUTE_REPORT_THRESHOLD:
+				batch_param.buffer_threshold = nla_get_u32(iter);
+				break;
+		}
+	}
+
+	if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	       DHD_PNO_BATCH_SCAN_CFG_ID, &batch_param, 0) < 0) {
+		WL_ERR(("Could not set batch cfg\n"));
+		err = -EINVAL;
+		return err;
+	}
+
+	return err;
+}
+
+static int wl_cfgvendor_significant_change_cfg(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	gscan_swc_params_t *significant_params;
+	int tmp, tmp1, tmp2, type, j = 0;
+	const struct nlattr *outer, *inner, *iter;
+	uint8 flush = 0;
+	wl_pfn_significant_bssid_t *pbssid;
+
+	significant_params = (gscan_swc_params_t *) kzalloc(len, GFP_KERNEL);
+	if (!significant_params) {
+		WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len));
+		return -1;
+	}
+
+
+	nla_for_each_attr(iter, data, len, tmp2) {
+		type = nla_type(iter);
+
+		switch (type) {
+			case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH:
+			flush = nla_get_u8(iter);
+			break;
+			case GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE:
+				significant_params->rssi_window = nla_get_u16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
+				significant_params->lost_ap_window = nla_get_u16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_MIN_BREACHING:
+				significant_params->swc_threshold = nla_get_u16(iter);
+				break;
+			case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS:
+				pbssid = significant_params->bssid_elem_list;
+				nla_for_each_nested(outer, iter, tmp) {
+					nla_for_each_nested(inner, outer, tmp1) {
+							switch (nla_type(inner)) {
+								case GSCAN_ATTRIBUTE_BSSID:
+								memcpy(&(pbssid[j].macaddr),
+								     nla_data(inner),
+								     ETHER_ADDR_LEN);
+								break;
+								case GSCAN_ATTRIBUTE_RSSI_HIGH:
+								pbssid[j].rssi_high_threshold =
+								       (int8) nla_get_u8(inner);
+								break;
+								case GSCAN_ATTRIBUTE_RSSI_LOW:
+								pbssid[j].rssi_low_threshold =
+								      (int8) nla_get_u8(inner);
+								break;
+							}
+						}
+					j++;
+				}
+				break;
+		}
+	}
+	significant_params->nbssid = j;
+
+	if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+	    DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, significant_params, flush) < 0) {
+		WL_ERR(("Could not set GSCAN significant cfg\n"));
+		err = -EINVAL;
+		goto exit;
+	}
+exit:
+	kfree(significant_params);
+	return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+#ifdef RTT_SUPPORT
+void wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
+{
+	struct wireless_dev *wdev = (struct wireless_dev *)ctx;
+	struct wiphy *wiphy;
+	struct sk_buff *skb;
+	uint32 tot_len = NLMSG_DEFAULT_SIZE, entry_len = 0;
+	gfp_t kflags;
+	rtt_report_t *rtt_report = NULL;
+	rtt_result_t *rtt_result = NULL;
+	struct list_head *rtt_list;
+	wiphy = wdev->wiphy;
+
+	WL_DBG(("In\n"));
+	/* Push the data to the skb */
+	if (!rtt_data) {
+		WL_ERR(("rtt_data is NULL\n"));
+		goto exit;
+	}
+	rtt_list = (struct list_head *)rtt_data;
+	kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	/* Alloc the SKB for vendor_event */
+	skb = cfg80211_vendor_event_alloc(wiphy, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+	if (!skb) {
+		WL_ERR(("skb alloc failed"));
+		goto exit;
+	}
+	/* fill in the rtt results on each entry */
+	list_for_each_entry(rtt_result, rtt_list, list) {
+		entry_len = 0;
+		entry_len = sizeof(rtt_report_t);
+		rtt_report = kzalloc(entry_len, kflags);
+		if (!rtt_report) {
+			WL_ERR(("rtt_report alloc failed"));
+			goto exit;
+		}
+		rtt_report->addr = rtt_result->peer_mac;
+		rtt_report->num_measurement = 1; /* ONE SHOT */
+		rtt_report->status = rtt_result->err_code;
+		rtt_report->type = (rtt_result->TOF_type == TOF_TYPE_ONE_WAY) ? RTT_ONE_WAY: RTT_TWO_WAY;
+		rtt_report->peer = rtt_result->target_info->peer;
+		rtt_report->channel = rtt_result->target_info->channel;
+		rtt_report->rssi = rtt_result->avg_rssi;
+		/* tx_rate */
+		rtt_report->tx_rate = rtt_result->tx_rate;
+		/* RTT */
+		rtt_report->rtt = rtt_result->meanrtt;
+		rtt_report->rtt_sd = rtt_result->sdrtt/10;
+		/* convert to centi meter */
+		if (rtt_result->distance != 0xffffffff)
+			rtt_report->distance = (rtt_result->distance >> 2) * 25;
+		else /* invalid distance */
+			rtt_report->distance = -1;
+		rtt_report->ts = rtt_result->ts;
+		nla_append(skb, entry_len, rtt_report);
+		kfree(rtt_report);
+	}
+	cfg80211_vendor_event(skb, kflags);
+exit:
+	return;
+}
+
+static int wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+					const void *data, int len) {
+	int err = 0, rem, rem1, rem2, type;
+	rtt_config_params_t rtt_param;
+	rtt_target_info_t* rtt_target = NULL;
+	const struct nlattr *iter, *iter1, *iter2;
+	int8 eabuf[ETHER_ADDR_STR_LEN];
+	int8 chanbuf[CHANSPEC_STR_LEN];
+	int32 feature_set = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	feature_set = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
+
+	WL_DBG(("In\n"));
+	err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
+	if (err < 0) {
+		WL_ERR(("failed to register rtt_noti_callback\n"));
+		goto exit;
+	}
+	memset(&rtt_param, 0, sizeof(rtt_param));
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+		case RTT_ATTRIBUTE_TARGET_CNT:
+			rtt_param.rtt_target_cnt = nla_get_u8(iter);
+			if (rtt_param.rtt_target_cnt > RTT_MAX_TARGET_CNT) {
+				WL_ERR(("exceed max target count : %d\n",
+					rtt_param.rtt_target_cnt));
+				err = BCME_RANGE;
+				goto exit;
+			}
+			break;
+		case RTT_ATTRIBUTE_TARGET_INFO:
+			rtt_target = rtt_param.target_info;
+			nla_for_each_nested(iter1, iter, rem1) {
+				nla_for_each_nested(iter2, iter1, rem2) {
+					type = nla_type(iter2);
+					switch (type) {
+					case RTT_ATTRIBUTE_TARGET_MAC:
+						memcpy(&rtt_target->addr, nla_data(iter2),
+							ETHER_ADDR_LEN);
+						break;
+					case RTT_ATTRIBUTE_TARGET_TYPE:
+						rtt_target->type = nla_get_u8(iter2);
+						if (!(feature_set & WIFI_FEATURE_D2D_RTT)) {
+							if (rtt_target->type == RTT_TWO_WAY ||
+								rtt_target->type == RTT_INVALID) {
+								WL_ERR(("doesn't support RTT type : %d\n",
+									rtt_target->type));
+								err = -EINVAL;
+								goto exit;
+							} else if (rtt_target->type == RTT_AUTO) {
+								rtt_target->type = RTT_ONE_WAY;
+							}
+						} else {
+							if (rtt_target->type == RTT_INVALID) {
+								WL_ERR(("doesn't support RTT type : %d\n",
+									rtt_target->type));
+								err = -EINVAL;
+								goto exit;
+							}
+						}
+						break;
+					case RTT_ATTRIBUTE_TARGET_PEER:
+						rtt_target->peer= nla_get_u8(iter2);
+						if (rtt_target->peer != RTT_PEER_AP) {
+							WL_ERR(("doesn't support peer type : %d\n",
+								rtt_target->peer));
+							err = -EINVAL;
+							goto exit;
+						}
+						break;
+					case RTT_ATTRIBUTE_TARGET_CHAN:
+						memcpy(&rtt_target->channel, nla_data(iter2),
+							sizeof(rtt_target->channel));
+						break;
+					case RTT_ATTRIBUTE_TARGET_MODE:
+						rtt_target->continuous = nla_get_u8(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_INTERVAL:
+						rtt_target->interval = nla_get_u32(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT:
+						rtt_target->measure_cnt = nla_get_u32(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_PKT:
+						rtt_target->ftm_cnt = nla_get_u32(iter2);
+						break;
+					case RTT_ATTRIBUTE_TARGET_NUM_RETRY:
+						rtt_target->retry_cnt = nla_get_u32(iter2);
+					}
+				}
+				/* convert to chanspec value */
+				rtt_target->chanspec = dhd_rtt_convert_to_chspec(rtt_target->channel);
+				if (rtt_target->chanspec == 0) {
+					WL_ERR(("Channel is not valid \n"));
+					goto exit;
+				}
+				WL_INFORM(("Target addr %s, Channel : %s for RTT \n",
+					bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf),
+					wf_chspec_ntoa(rtt_target->chanspec, chanbuf)));
+				rtt_target++;
+			}
+			break;
+		}
+	}
+	WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt));
+	if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) {
+		WL_ERR(("Could not set RTT configuration\n"));
+		err = -EINVAL;
+	}
+exit:
+	return err;
+}
+
+static int wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+					const void *data, int len)
+{
+	int err = 0, rem, type, target_cnt = 0;
+	const struct nlattr *iter;
+	struct ether_addr *mac_list = NULL, *mac_addr = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	nla_for_each_attr(iter, data, len, rem) {
+		type = nla_type(iter);
+		switch (type) {
+		case RTT_ATTRIBUTE_TARGET_CNT:
+			target_cnt = nla_get_u8(iter);
+			mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN , GFP_KERNEL);
+			if (mac_list == NULL) {
+				WL_ERR(("failed to allocate mem for mac list\n"));
+				goto exit;
+			}
+			mac_addr = &mac_list[0];
+			break;
+		case RTT_ATTRIBUTE_TARGET_MAC:
+			if (mac_addr)
+				memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN);
+			else {
+				WL_ERR(("mac_list is NULL\n"));
+				goto exit;
+			}
+			break;
+		}
+		if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+			WL_ERR(("Could not cancel RTT configuration\n"));
+			err = -EINVAL;
+			goto exit;
+		}
+	}
+exit:
+	if (mac_list)
+		kfree(mac_list);
+	return err;
+}
+static int wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev,
+					const void *data, int len)
+{
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	rtt_capabilities_t capability;
+
+	err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+		goto exit;
+	}
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+	        &capability, sizeof(capability));
+
+	if (unlikely(err)) {
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+	}
+exit:
+	return err;
+}
+
+#endif /* RTT_SUPPORT */
+static int wl_cfgvendor_priv_string_handler(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int err = 0;
+	int data_len = 0;
+
+	bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+
+	if (strncmp((char *)data, BRCM_VENDOR_SCMD_CAPA, strlen(BRCM_VENDOR_SCMD_CAPA)) == 0) {
+		err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cap", NULL, 0,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+			return err;
+		}
+		data_len = strlen(cfg->ioctl_buf);
+		cfg->ioctl_buf[data_len] = '\0';
+	}
+
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+		cfg->ioctl_buf, data_len+1);
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+	else
+		WL_INFORM(("Vendor Command reply sent successfully!\n"));
+
+	return err;
+}
+
+#ifdef LINKSTAT_SUPPORT
+#define NUM_RATE 32
+#define NUM_PEER 1
+#define NUM_CHAN 11
+static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy,
+	struct wireless_dev *wdev, const void  *data, int len)
+{
+	static char iovar_buf[WLC_IOCTL_MAXLEN];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	int err = 0;
+	wifi_iface_stat *iface;
+	wifi_radio_stat *radio;
+	wl_wme_cnt_t *wl_wme_cnt;
+	wl_cnt_t *wl_cnt;
+	char *output;
+
+	WL_INFORM(("%s: Enter \n", __func__));
+
+	bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+	bzero(iovar_buf, WLC_IOCTL_MAXLEN);
+
+	output = cfg->ioctl_buf;
+	radio = (wifi_radio_stat *)output;
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat)));
+		return err;
+	}
+	memcpy(output, iovar_buf, sizeof(wifi_radio_stat));
+
+	radio->num_channels = NUM_CHAN;
+	output += sizeof(wifi_radio_stat);
+	output += (NUM_CHAN*sizeof(wifi_channel_stat));
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf;
+	iface = (wifi_iface_stat *)output;
+
+	iface->ac[WIFI_AC_VO].ac = WIFI_AC_VO;
+	iface->ac[WIFI_AC_VO].tx_mpdu = wl_wme_cnt->tx[AC_VO].packets;
+	iface->ac[WIFI_AC_VO].rx_mpdu = wl_wme_cnt->rx[AC_VO].packets;
+	iface->ac[WIFI_AC_VO].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VO].packets;
+
+	iface->ac[WIFI_AC_VI].ac = WIFI_AC_VI;
+	iface->ac[WIFI_AC_VI].tx_mpdu = wl_wme_cnt->tx[AC_VI].packets;
+	iface->ac[WIFI_AC_VI].rx_mpdu = wl_wme_cnt->rx[AC_VI].packets;
+	iface->ac[WIFI_AC_VI].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VI].packets;
+
+	iface->ac[WIFI_AC_BE].ac = WIFI_AC_BE;
+	iface->ac[WIFI_AC_BE].tx_mpdu = wl_wme_cnt->tx[AC_BE].packets;
+	iface->ac[WIFI_AC_BE].rx_mpdu = wl_wme_cnt->rx[AC_BE].packets;
+	iface->ac[WIFI_AC_BE].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BE].packets;
+
+	iface->ac[WIFI_AC_BK].ac = WIFI_AC_BK;
+	iface->ac[WIFI_AC_BK].tx_mpdu = wl_wme_cnt->tx[AC_BK].packets;
+	iface->ac[WIFI_AC_BK].rx_mpdu = wl_wme_cnt->rx[AC_BK].packets;
+	iface->ac[WIFI_AC_BK].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BK].packets;
+	bzero(iovar_buf, WLC_IOCTL_MAXLEN);
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_t)));
+		return err;
+	}
+	wl_cnt = (wl_cnt_t *)iovar_buf;
+	iface->ac[WIFI_AC_BE].retries = wl_cnt->txretry;
+	iface->beacon_rx = wl_cnt->rxbeaconmbss;
+
+	err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &iface->rssi_mgmt);
+	if (unlikely(err)) {
+		WL_ERR(("get_rssi error (%d)\n", err));
+		return err;
+	}
+
+	iface->num_peers = NUM_PEER;
+	iface->peer_info->num_rate = NUM_RATE;
+
+	bzero(iovar_buf, WLC_IOCTL_MAXLEN);
+	output = (char *)iface + sizeof(wifi_iface_stat) + NUM_PEER*sizeof(wifi_peer_info);
+
+	err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0,
+		iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat)));
+		return err;
+	}
+	memcpy(output, iovar_buf, NUM_RATE*sizeof(wifi_rate_stat));
+
+	err =  wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+		cfg->ioctl_buf, sizeof(wifi_radio_stat)+NUM_CHAN*sizeof(wifi_channel_stat)+
+		sizeof(wifi_iface_stat)+NUM_PEER*sizeof(wifi_peer_info)+
+		NUM_RATE*sizeof(wifi_rate_stat));
+	if (unlikely(err))
+		WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+	return err;
+}
+#endif /* LINKSTAT_SUPPORT */
+
+static const struct wiphy_vendor_command wl_vendor_cmds [] = {
+	{
+		{
+			.vendor_id = OUI_BRCM,
+			.subcmd = BRCM_VENDOR_SCMD_PRIV_STR
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_priv_string_handler
+	},
+#ifdef GSCAN_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_GET_CAPABILITIES
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_gscan_get_capabilities
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_set_scan_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_set_batch_scan_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_ENABLE_GSCAN
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_initiate_gscan
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_enable_full_scan_result
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_HOTLIST
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_hotlist_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_significant_change_cfg
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_gscan_get_batch_results
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_gscan_get_channel_list
+	},
+#endif /* GSCAN_SUPPORT */
+#ifdef RTT_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_SET_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_rtt_set_config
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_CANCEL_CONFIG
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_rtt_cancel_config
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = RTT_SUBCMD_GETCAPABILITY
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_rtt_get_capability
+	},
+#endif /* RTT_SUPPORT */
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_get_feature_set
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_get_feature_set_matrix
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_PNO_RANDOM_MAC_OUI
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_set_pno_mac_oui
+	},
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = ANDR_WIFI_NODFS_CHANNELS
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_set_nodfs_flag
+
+	},
+#ifdef LINKSTAT_SUPPORT
+	{
+		{
+			.vendor_id = OUI_GOOGLE,
+			.subcmd = LSTATS_SUBCMD_GET_INFO
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = wl_cfgvendor_lstats_get_info
+	},
+#endif /* LINKSTAT_SUPPORT */
+};
+
+static const struct  nl80211_vendor_cmd_info wl_vendor_events [] = {
+		{ OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC },
+		{ OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR },
+#ifdef GSCAN_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT },
+		{ OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT },
+		{ OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT },
+		{ OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT },
+#endif /* GSCAN_SUPPORT */
+#ifdef RTT_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT },
+#endif /* RTT_SUPPORT */
+#ifdef GSCAN_SUPPORT
+		{ OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT },
+		{ OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT }
+#endif /* GSCAN_SUPPORT */
+};
+
+int wl_cfgvendor_attach(struct wiphy *wiphy)
+{
+
+	WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n",
+		NL80211_CMD_VENDOR));
+
+	wiphy->vendor_commands	= wl_vendor_cmds;
+	wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds);
+	wiphy->vendor_events	= wl_vendor_events;
+	wiphy->n_vendor_events	= ARRAY_SIZE(wl_vendor_events);
+
+	return 0;
+}
+
+int wl_cfgvendor_detach(struct wiphy *wiphy)
+{
+	WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n"));
+
+	wiphy->vendor_commands  = NULL;
+	wiphy->vendor_events    = NULL;
+	wiphy->n_vendor_commands = 0;
+	wiphy->n_vendor_events  = 0;
+
+	return 0;
+}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
new file mode 100644
index 0000000..5bc840f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h
@@ -0,0 +1,260 @@
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgvendor.h 473890 2014-04-30 01:55:06Z $
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+ */
+
+#ifndef _wl_cfgvendor_h_
+#define _wl_cfgvendor_h_
+
+#define OUI_BRCM    0x001018
+#define OUI_GOOGLE  0x001A11
+#define BRCM_VENDOR_SUBCMD_PRIV_STR	1
+#define ATTRIBUTE_U32_LEN                  (NLA_HDRLEN  + 4)
+#define VENDOR_ID_OVERHEAD                 ATTRIBUTE_U32_LEN
+#define VENDOR_SUBCMD_OVERHEAD             ATTRIBUTE_U32_LEN
+#define VENDOR_DATA_OVERHEAD               (NLA_HDRLEN)
+
+#define SCAN_RESULTS_COMPLETE_FLAG_LEN       ATTRIBUTE_U32_LEN
+#define SCAN_INDEX_HDR_LEN                   (NLA_HDRLEN)
+#define SCAN_ID_HDR_LEN                      ATTRIBUTE_U32_LEN
+#define SCAN_FLAGS_HDR_LEN                   ATTRIBUTE_U32_LEN
+#define GSCAN_NUM_RESULTS_HDR_LEN            ATTRIBUTE_U32_LEN
+#define GSCAN_RESULTS_HDR_LEN                (NLA_HDRLEN)
+#define GSCAN_BATCH_RESULT_HDR_LEN  (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \
+									SCAN_FLAGS_HDR_LEN + \
+							        GSCAN_NUM_RESULTS_HDR_LEN + \
+									GSCAN_RESULTS_HDR_LEN)
+
+#define VENDOR_REPLY_OVERHEAD       (VENDOR_ID_OVERHEAD + \
+									VENDOR_SUBCMD_OVERHEAD + \
+									VENDOR_DATA_OVERHEAD)
+typedef enum {
+	/* don't use 0 as a valid subcommand */
+	VENDOR_NL80211_SUBCMD_UNSPECIFIED,
+
+	/* define all vendor startup commands between 0x0 and 0x0FFF */
+	VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001,
+	VENDOR_NL80211_SUBCMD_RANGE_END   = 0x0FFF,
+
+	/* define all GScan related commands between 0x1000 and 0x10FF */
+	ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000,
+	ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END   = 0x10FF,
+
+	/* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */
+	ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100,
+	ANDROID_NL80211_SUBCMD_NBD_RANGE_END   = 0x11FF,
+
+	/* define all RTT related commands between 0x1100 and 0x11FF */
+	ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100,
+	ANDROID_NL80211_SUBCMD_RTT_RANGE_END   = 0x11FF,
+
+	ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200,
+	ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END   = 0x12FF,
+
+	ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300,
+	ANDROID_NL80211_SUBCMD_TDLS_RANGE_END	= 0x13FF,
+	/* This is reserved for future usage */
+
+} ANDROID_VENDOR_SUB_COMMAND;
+
+enum wl_vendor_subcmd {
+	BRCM_VENDOR_SCMD_UNSPEC,
+	BRCM_VENDOR_SCMD_PRIV_STR,
+	GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START,
+	GSCAN_SUBCMD_SET_CONFIG,
+	GSCAN_SUBCMD_SET_SCAN_CONFIG,
+	GSCAN_SUBCMD_ENABLE_GSCAN,
+	GSCAN_SUBCMD_GET_SCAN_RESULTS,
+	GSCAN_SUBCMD_SCAN_RESULTS,
+	GSCAN_SUBCMD_SET_HOTLIST,
+	GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG,
+	GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS,
+	GSCAN_SUBCMD_GET_CHANNEL_LIST,
+	ANDR_WIFI_SUBCMD_GET_FEATURE_SET,
+	ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX,
+	ANDR_WIFI_PNO_RANDOM_MAC_OUI,
+	ANDR_WIFI_NODFS_CHANNELS,
+	RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
+	RTT_SUBCMD_CANCEL_CONFIG,
+	RTT_SUBCMD_GETCAPABILITY,
+
+	LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START,
+    /* Add more sub commands here */
+    VENDOR_SUBCMD_MAX
+};
+
+enum gscan_attributes {
+    GSCAN_ATTRIBUTE_NUM_BUCKETS = 10,
+    GSCAN_ATTRIBUTE_BASE_PERIOD,
+    GSCAN_ATTRIBUTE_BUCKETS_BAND,
+    GSCAN_ATTRIBUTE_BUCKET_ID,
+    GSCAN_ATTRIBUTE_BUCKET_PERIOD,
+    GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
+    GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
+    GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
+    GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
+    GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
+    GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
+
+    GSCAN_ATTRIBUTE_ENABLE_FEATURE = 20,
+    GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
+    GSCAN_ATTRIBUTE_FLUSH_FEATURE,
+    GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS,
+    GSCAN_ATTRIBUTE_REPORT_EVENTS,
+    /* remaining reserved for additional attributes */
+    GSCAN_ATTRIBUTE_NUM_OF_RESULTS = 30,
+    GSCAN_ATTRIBUTE_FLUSH_RESULTS,
+    GSCAN_ATTRIBUTE_SCAN_RESULTS,                       /* flat array of wifi_scan_result */
+    GSCAN_ATTRIBUTE_SCAN_ID,                            /* indicates scan number */
+    GSCAN_ATTRIBUTE_SCAN_FLAGS,                         /* indicates if scan was aborted */
+    GSCAN_ATTRIBUTE_AP_FLAGS,                           /* flags on significant change event */
+    GSCAN_ATTRIBUTE_NUM_CHANNELS,
+    GSCAN_ATTRIBUTE_CHANNEL_LIST,
+
+	/* remaining reserved for additional attributes */
+
+    GSCAN_ATTRIBUTE_SSID = 40,
+    GSCAN_ATTRIBUTE_BSSID,
+    GSCAN_ATTRIBUTE_CHANNEL,
+    GSCAN_ATTRIBUTE_RSSI,
+    GSCAN_ATTRIBUTE_TIMESTAMP,
+    GSCAN_ATTRIBUTE_RTT,
+    GSCAN_ATTRIBUTE_RTTSD,
+
+    /* remaining reserved for additional attributes */
+
+    GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = 50,
+    GSCAN_ATTRIBUTE_RSSI_LOW,
+    GSCAN_ATTRIBUTE_RSSI_HIGH,
+    GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM,
+    GSCAN_ATTRIBUTE_HOTLIST_FLUSH,
+
+    /* remaining reserved for additional attributes */
+    GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = 60,
+    GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE,
+    GSCAN_ATTRIBUTE_MIN_BREACHING,
+    GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS,
+    GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH,
+    GSCAN_ATTRIBUTE_MAX
+};
+
+enum gscan_bucket_attributes {
+	GSCAN_ATTRIBUTE_CH_BUCKET_1,
+	GSCAN_ATTRIBUTE_CH_BUCKET_2,
+	GSCAN_ATTRIBUTE_CH_BUCKET_3,
+	GSCAN_ATTRIBUTE_CH_BUCKET_4,
+	GSCAN_ATTRIBUTE_CH_BUCKET_5,
+	GSCAN_ATTRIBUTE_CH_BUCKET_6,
+	GSCAN_ATTRIBUTE_CH_BUCKET_7
+};
+
+enum gscan_ch_attributes {
+	GSCAN_ATTRIBUTE_CH_ID_1,
+	GSCAN_ATTRIBUTE_CH_ID_2,
+	GSCAN_ATTRIBUTE_CH_ID_3,
+	GSCAN_ATTRIBUTE_CH_ID_4,
+	GSCAN_ATTRIBUTE_CH_ID_5,
+	GSCAN_ATTRIBUTE_CH_ID_6,
+	GSCAN_ATTRIBUTE_CH_ID_7
+};
+
+enum rtt_attributes {
+	RTT_ATTRIBUTE_TARGET_CNT,
+	RTT_ATTRIBUTE_TARGET_INFO,
+	RTT_ATTRIBUTE_TARGET_MAC,
+	RTT_ATTRIBUTE_TARGET_TYPE,
+	RTT_ATTRIBUTE_TARGET_PEER,
+	RTT_ATTRIBUTE_TARGET_CHAN,
+	RTT_ATTRIBUTE_TARGET_MODE,
+	RTT_ATTRIBUTE_TARGET_INTERVAL,
+	RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT,
+	RTT_ATTRIBUTE_TARGET_NUM_PKT,
+	RTT_ATTRIBUTE_TARGET_NUM_RETRY
+};
+
+typedef enum wl_vendor_event {
+	BRCM_VENDOR_EVENT_UNSPEC,
+	BRCM_VENDOR_EVENT_PRIV_STR,
+	GOOGLE_GSCAN_SIGNIFICANT_EVENT,
+	GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT,
+	GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+	GOOGLE_SCAN_FULL_RESULTS_EVENT,
+	GOOGLE_RTT_COMPLETE_EVENT,
+	GOOGLE_SCAN_COMPLETE_EVENT,
+	GOOGLE_GSCAN_GEOFENCE_LOST_EVENT
+} wl_vendor_event_t;
+
+enum andr_wifi_attr {
+    ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+    ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+    ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI,
+    ANDR_WIFI_ATTRIBUTE_NODFS_SET,
+};
+
+typedef enum wl_vendor_gscan_attribute {
+	ATTR_START_GSCAN,
+	ATTR_STOP_GSCAN,
+	ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */
+	ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */
+	ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */
+	ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */
+	ATTR_GET_GSCAN_CAPABILITIES_ID,
+    /* Add more sub commands here */
+    ATTR_GSCAN_MAX
+} wl_vendor_gscan_attribute_t;
+
+typedef enum gscan_batch_attribute {
+	ATTR_GSCAN_BATCH_BESTN,
+	ATTR_GSCAN_BATCH_MSCAN,
+	ATTR_GSCAN_BATCH_BUFFER_THRESHOLD
+} gscan_batch_attribute_t;
+
+typedef enum gscan_geofence_attribute {
+	ATTR_GSCAN_NUM_HOTLIST_BSSID,
+	ATTR_GSCAN_HOTLIST_BSSID
+} gscan_geofence_attribute_t;
+
+typedef enum gscan_complete_event {
+	WIFI_SCAN_BUFFER_FULL,
+	WIFI_SCAN_COMPLETE
+} gscan_complete_event_t;
+
+/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
+#define BRCM_VENDOR_SCMD_CAPA	"cap"
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
+extern int wl_cfgvendor_attach(struct wiphy *wiphy);
+extern int wl_cfgvendor_detach(struct wiphy *wiphy);
+extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+                  struct net_device *dev, int event_id, const void  *data, int len);
+extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+                struct net_device *dev, void  *data, int len, wl_vendor_event_t event);
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
+
+#endif /* _wl_cfgvendor_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 0000000..083d0c3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,206 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_dbg.h 472390 2014-04-23 23:32:01Z $
+ */
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+/* wl_msg_level is a bit vector with defs in wlioctl.h */
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_TIMESTAMP()
+
+#define WL_PRINT(args)		do { WL_TIMESTAMP(); printf args; } while (0)
+
+#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN)
+#define _WL_SRSCAN(fmt, ...)	EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__)
+#define WL_SRSCAN(args)		_WL_SRSCAN args
+#else
+#define WL_SRSCAN(args)
+#endif
+
+#if defined(BCMCONDITIONAL_LOGGING)
+
+/* Ideally this should be some include file that vendors can include to conditionalize logging */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+#define WL_ERROR(args)		do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args);} while (0)
+#define WL_TRACE(args)
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#define WL_PRPKT(m, b, n)
+#define WL_INFORM(args)
+#define WL_TMP(args)
+#define WL_OID(args)
+#define WL_RATE(args)		do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC(args)		do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args);} while (0)
+#define WL_PRUSR(m, b, n)
+#define WL_PS(args)		do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0)
+
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args)	do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args);} while (0)
+
+#define WL_MPC(args)
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_PROTO(args)
+
+#define	WL_CAC(args)		do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+#define WL_DFS(args)
+#define WL_WOWL(args)
+#define WL_DPT(args)
+#define WL_ASSOC_OR_DPT(args)
+#define WL_SCAN(args)		do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#define WL_BTA(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_TXBF(args)
+#define WL_P2PO(args)
+#define WL_NET_DETECT(args)
+#define WL_ROAM(args)
+#define WL_WNM(args)
+
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON()       0
+#define WL_AMPDU_HW_ON()        0
+#define WL_AMPDU_HWTXS_ON()     0
+
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#define WL_PCIE(args)
+#define WL_CHANLOG(w, s, i, j)
+
+#define WL_ERROR_ON()		(wl_msg_level & WL_ERROR_VAL)
+#define WL_TRACE_ON()		0
+#define WL_PRHDRS_ON()		0
+#define WL_PRPKT_ON()		0
+#define WL_INFORM_ON()		0
+#define WL_TMP_ON()		0
+#define WL_OID_ON()		0
+#define WL_RATE_ON()		(wl_msg_level & WL_RATE_VAL)
+#define WL_ASSOC_ON()		(wl_msg_level & WL_ASSOC_VAL)
+#define WL_PRUSR_ON()		0
+#define WL_PS_ON()		(wl_msg_level & WL_PS_VAL)
+#define WL_PORT_ON()		0
+#define WL_WSEC_ON()		0
+#define WL_WSEC_DUMP_ON()	0
+#define WL_MPC_ON()		0
+#define WL_REGULATORY_ON()	(wl_msg_level & WL_REGULATORY_VAL)
+#define WL_APSTA_ON()		0
+#define WL_DFS_ON()		0
+#define WL_MBSS_ON()		0
+#define WL_CAC_ON()		(wl_msg_level & WL_CAC_VAL)
+#define WL_AMPDU_ON()		0
+#define WL_DPT_ON()		0
+#define WL_WOWL_ON()		0
+#define WL_SCAN_ON()		(wl_msg_level2 & WL_SCAN_VAL)
+#define WL_BTA_ON()		0
+#define WL_P2P_ON()		0
+#define WL_ITFR_ON()		0
+#define WL_MCHAN_ON()		0
+#define WL_TDLS_ON()		0
+#define WL_MCNX_ON()		0
+#define WL_PROT_ON()		0
+#define WL_PSTA_ON()		0
+#define WL_TRF_MGMT_ON()	0
+#define WL_LPC_ON()		0
+#define WL_L2FILTER_ON()	0
+#define WL_TXBF_ON()		0
+#define WL_P2PO_ON()		0
+#define WL_CHANLOG_ON()		0
+#define WL_NET_DETECT_ON()	0
+#define WL_WNM_ON()		0
+#define WL_PCIE_ON()		0
+
+#else /* !BCMDBG */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+
+#define	WL_ERROR(args)
+#define	WL_TRACE(args)
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#ifdef WLMSG_WSEC
+#define WL_WSEC(args)		WL_PRINT(args)
+#define WL_WSEC_DUMP(args)	WL_PRINT(args)
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif
+#define WL_PCIE(args)		do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE_ON()		(wl_msg_level2 & WL_PCIE_VAL)
+#endif 
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif /* _wl_dbg_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 0000000..6a0b676
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,3702 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+typedef const struct si_pub	si_t;
+#include <wlioctl.h>
+
+
+#include <wl_dbg.h>
+#include <wl_iw.h>
+
+
+/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
+#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
+#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_FT_PSK
+#define IW_AUTH_KEY_MGMT_FT_PSK 0x08
+#endif
+
+#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE
+#define IW_ENC_CAPA_FW_ROAM_ENABLE	0x00000020
+#endif
+
+
+/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest
+ * version 22.
+ */
+#ifndef IW_ENCODE_ALG_PMK
+#define IW_ENCODE_ALG_PMK 4
+#endif
+#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE
+#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
+#endif
+/* End FC9. */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+#if defined(SOFTAP)
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;  /* apsta AP netdev waiter thread */
+#endif /* SOFTAP */
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+	uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle.  Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd)	((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd)	((cmd) - IWEVFIRST)
+#endif /* WIRELESS_EXT < 19 */
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	int    iscan_state;
+	iscan_buf_t * list_hdr;
+	iscan_buf_t * list_cur;
+
+	/* Thread to work on iscan */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	struct task_struct *kthread;
+#endif
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+
+
+	char ioctlbuf[WLC_IOCTL_SMLEN];
+} iscan_info_t;
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+
+/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
+typedef struct priv_link {
+	wl_iw_t *wliw;
+} priv_link_t;
+
+/* dev to priv_link */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev)       (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev)       (priv_link_t*)netdev_priv(dev)
+#endif
+
+/* dev to wl_iw_t */
+#define IW_DEV_IF(dev)          ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+	struct net_device *dev,
+	int cmd,
+	void *arg,
+	int len
+)
+{
+	struct ifreq ifr;
+	wl_ioctl_t ioc;
+	mm_segment_t fs;
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+
+	strcpy(ifr.ifr_name, dev->name);
+	ifr.ifr_data = (caddr_t) &ioc;
+
+	fs = get_fs();
+	set_fs(get_ds());
+#if defined(WL_USE_NETDEV_OPS)
+	ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+	ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+	set_fs(fs);
+
+	return ret;
+}
+
+/*
+set named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_set(dev, "arate", rate)
+*/
+
+static int
+dev_wlc_intvar_set(
+	struct net_device *dev,
+	char *name,
+	int val)
+{
+	char buf[WLC_IOCTL_SMLEN];
+	uint len;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	ASSERT(len);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+static int
+dev_iw_iovar_setbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+	struct net_device *dev,
+	char *name,
+	char *buf, int len)
+{
+	char *ioctlbuf;
+	uint buflen;
+	int error;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+
+	buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(buflen);
+	error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+	kfree(ioctlbuf);
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_bufvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_bufvar_get(
+	struct net_device *dev,
+	char *name,
+	char *buf, int buflen)
+{
+	char *ioctlbuf;
+	int error;
+
+	uint len;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+	len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(len);
+	BCM_REFERENCE(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	if (!error)
+		bcopy(ioctlbuf, buf, buflen);
+
+	kfree(ioctlbuf);
+	return (error);
+}
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_intvar_get(
+	struct net_device *dev,
+	char *name,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	uint data_null;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+
+	return (error);
+}
+
+/* Maintain backward compatibility */
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+	__u16		cmd;		/* Wireless Extension command */
+	__u16		flags;		/* More to come ;-) */
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+	void *wrqu, char *extra);
+#endif /* WIRELESS_EXT < 13 */
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int dc = *(int *)extra;
+	int error;
+
+	error = dev_wlc_intvar_set(dev, "leddc", dc);
+	return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int mode = *(int *)extra;
+	int error;
+
+	mode = htod32(mode);
+	error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+	return error;
+}
+
+static int
+wl_iw_set_pm(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int pm = *(int *)extra;
+	int error;
+
+	pm = htod32(pm);
+	error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+	return error;
+}
+
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_send_priv_event(
+	struct net_device *dev,
+	char *flag
+)
+{
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd;
+
+	cmd = IWEVCUSTOM;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (strlen(flag) > sizeof(extra))
+		return -1;
+
+	strcpy(extra, flag);
+	wrqu.data.length = strlen(extra);
+	wireless_send_event(dev, cmd, &wrqu, extra);
+	WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+	return 0;
+}
+
+static int
+wl_iw_config_commit(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *zwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct sockaddr bssid;
+
+	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+		return error;
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	if (!ssid.SSID_len)
+		return 0;
+
+	bzero(&bssid, sizeof(struct sockaddr));
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_name(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *cwrq,
+	char *extra
+)
+{
+	int phytype, err;
+	uint band[3];
+	char cap[5];
+
+	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+	cap[0] = 0;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+		goto done;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+		goto done;
+
+	band[0] = dtoh32(band[0]);
+	switch (phytype) {
+		case WLC_PHY_TYPE_A:
+			strcpy(cap, "a");
+			break;
+		case WLC_PHY_TYPE_B:
+			strcpy(cap, "b");
+			break;
+		case WLC_PHY_TYPE_LP:
+		case WLC_PHY_TYPE_G:
+			if (band[0] >= 2)
+				strcpy(cap, "abg");
+			else
+				strcpy(cap, "bg");
+			break;
+		case WLC_PHY_TYPE_N:
+			if (band[0] >= 2)
+				strcpy(cap, "abgn");
+			else
+				strcpy(cap, "bgn");
+			break;
+	}
+done:
+	snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+	return 0;
+}
+
+static int
+wl_iw_set_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error, chan;
+	uint sf = 0;
+
+	WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+
+	/* Setting by channel number */
+	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+		chan = fwrq->m;
+	}
+
+	/* Setting by frequency */
+	else {
+		/* Convert to MHz as best we can */
+		if (fwrq->e >= 6) {
+			fwrq->e -= 6;
+			while (fwrq->e--)
+				fwrq->m *= 10;
+		} else if (fwrq->e < 6) {
+			while (fwrq->e++ < 6)
+				fwrq->m /= 10;
+		}
+	/* handle 4.9GHz frequencies as Japan 4 GHz based channelization */
+	if (fwrq->m > 4000 && fwrq->m < 5000)
+		sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */
+
+		chan = wf_mhz2channel(fwrq->m, sf);
+	}
+	chan = htod32(chan);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+
+	/* Return radio channel in channel form */
+	fwrq->m = dtoh32(ci.hw_channel);
+	fwrq->e = dtoh32(0);
+	return 0;
+}
+
+static int
+wl_iw_set_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int infra = 0, ap = 0, error = 0;
+
+	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+	switch (*uwrq) {
+	case IW_MODE_MASTER:
+		infra = ap = 1;
+		break;
+	case IW_MODE_ADHOC:
+	case IW_MODE_AUTO:
+		break;
+	case IW_MODE_INFRA:
+		infra = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	infra = htod32(infra);
+	ap = htod32(ap);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int error, infra = 0, ap = 0;
+
+	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+		return error;
+
+	infra = dtoh32(infra);
+	ap = dtoh32(ap);
+	*uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+	return 0;
+}
+
+static int
+wl_iw_get_range(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	struct iw_range *range = (struct iw_range *) extra;
+	static int channels[MAXCHANNEL+1];
+	wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+	wl_rateset_t rateset;
+	int error, i, k;
+	uint sf, ch;
+
+	int phytype;
+	int bw_cap = 0, sgi_tx = 0, nmode = 0;
+	channel_info_t ci;
+	uint8 nrate_list2copy = 0;
+	uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+		{14, 29, 43, 58, 87, 116, 130, 144},
+		{27, 54, 81, 108, 162, 216, 243, 270},
+		{30, 60, 90, 120, 180, 240, 270, 300}};
+	int fbt_cap = 0;
+
+	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(*range));
+
+	/* We don't use nwids */
+	range->min_nwid = range->max_nwid = 0;
+
+	/* Set available channels/frequencies */
+	list->count = htod32(MAXCHANNEL);
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+		return error;
+	for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+		range->freq[i].i = dtoh32(list->element[i]);
+
+		ch = dtoh32(list->element[i]);
+		if (ch <= CH_MAX_2G_CHANNEL)
+			sf = WF_CHAN_FACTOR_2_4_G;
+		else
+			sf = WF_CHAN_FACTOR_5_G;
+
+		range->freq[i].m = wf_channel2mhz(ch, sf);
+		range->freq[i].e = 6;
+	}
+	range->num_frequency = range->num_channels = i;
+
+	/* Link quality (use NDIS cutoffs) */
+	range->max_qual.qual = 5;
+	/* Signal level (use RSSI) */
+	range->max_qual.level = 0x100 - 200;	/* -200 dBm */
+	/* Noise level (use noise) */
+	range->max_qual.noise = 0x100 - 200;	/* -200 dBm */
+	/* Signal level threshold range (?) */
+	range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+	/* Link quality (use NDIS cutoffs) */
+	range->avg_qual.qual = 3;
+	/* Signal level (use RSSI) */
+	range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+	/* Noise level (use noise) */
+	range->avg_qual.noise = 0x100 - 75;	/* -75 dBm */
+#endif /* WIRELESS_EXT > 11 */
+
+	/* Set available bitrates */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+	rateset.count = dtoh32(rateset.count);
+	range->num_bitrates = rateset.count;
+	for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+		range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */
+	if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode)))
+		return error;
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+		return error;
+	if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) ||
+		(phytype == WLC_PHY_TYPE_LCN40))) {
+		if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap)))
+			return error;
+		if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx)))
+			return error;
+		if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t))))
+			return error;
+		ci.hw_channel = dtoh32(ci.hw_channel);
+
+		if (bw_cap == 0 ||
+			(bw_cap == 2 && ci.hw_channel <= 14)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 0;
+			else
+				nrate_list2copy = 1;
+		}
+		if (bw_cap == 1 ||
+			(bw_cap == 2 && ci.hw_channel >= 36)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 2;
+			else
+				nrate_list2copy = 3;
+		}
+		range->num_bitrates += 8;
+		ASSERT(range->num_bitrates < IW_MAX_BITRATES);
+		for (k = 0; i < range->num_bitrates; k++, i++) {
+			/* convert to bps */
+			range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+		}
+	}
+
+	/* Set an indication of the max TCP throughput
+	 * in bit/s that we can expect using this interface.
+	 * May be use for QoS stuff... Jean II
+	 */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+		return error;
+	i = dtoh32(i);
+	if (i == WLC_PHY_TYPE_A)
+		range->throughput = 24000000;	/* 24 Mbits/s */
+	else
+		range->throughput = 1500000;	/* 1.5 Mbits/s */
+
+	/* RTS and fragmentation thresholds */
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+	range->num_encoding_sizes = 4;
+	range->encoding_size[0] = WEP1_KEY_SIZE;
+	range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+	range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+	range->encoding_size[2] = 0;
+#endif
+	range->encoding_size[3] = AES_KEY_SIZE;
+
+	/* Do not support power micro-management */
+	range->min_pmp = 0;
+	range->max_pmp = 0;
+	range->min_pmt = 0;
+	range->max_pmt = 0;
+	range->pmp_flags = 0;
+	range->pm_capa = 0;
+
+	/* Transmit Power - values are in mW */
+	range->num_txpower = 2;
+	range->txpower[0] = 1;
+	range->txpower[1] = 255;
+	range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 19;
+
+	/* Only support retry limits */
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->r_time_flags = 0;
+	/* SRL and LRL limits */
+	range->min_retry = 1;
+	range->max_retry = 255;
+	/* Retry lifetime limits unsupported */
+	range->min_r_time = 0;
+	range->max_r_time = 0;
+#endif /* WIRELESS_EXT > 10 */
+
+#if WIRELESS_EXT > 17
+	range->enc_capa = IW_ENC_CAPA_WPA;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+	range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+	/* Determine driver FBT capability. */
+	if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+		if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+			/* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */
+			range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+		}
+	}
+
+#ifdef BCMFW_ROAM_ENABLE_WEXT
+	/* Advertise firmware roam capability to the external supplicant */
+	range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE;
+#endif /* BCMFW_ROAM_ENABLE_WEXT */
+
+	/* Event capability (kernel) */
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+	/* Event capability (driver) */
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+	/* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */
+	range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif /* WIRELESS_EXT > 17 */
+
+	return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+
+static int
+wl_iw_set_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	int i;
+
+	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+	for (i = 0; i < iw->spy_num; i++)
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+	return 0;
+}
+
+static int
+wl_iw_get_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = iw->spy_num;
+	for (i = 0; i < iw->spy_num; i++) {
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		addr[i].sa_family = AF_UNIX;
+		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+		iw->spy_qual[i].updated = 0;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_set_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	int error = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+	if (awrq->sa_family != ARPHRD_ETHER) {
+		WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	/* Ignore "auto" or "off" */
+	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+			WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+		}
+		return 0;
+	}
+	/* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data),
+	 * eabuf)));
+	 */
+	/* Reassociate to the specified AP */
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+	awrq->sa_family = ARPHRD_ETHER;
+	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+	/* Ignore error (may be down or disassociated) */
+	(void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	struct iw_mlme *mlme;
+	scb_val_t scbval;
+	int error  = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+	mlme = (struct iw_mlme *)extra;
+	if (mlme == NULL) {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	scbval.val = mlme->reason_code;
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+	if (mlme->cmd == IW_MLME_DISASSOC) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+	}
+	else if (mlme->cmd == IW_MLME_DEAUTH) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+			sizeof(scb_val_t));
+	}
+	else {
+		WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+		return error;
+	}
+
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static int
+wl_iw_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int error, i;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	}
+
+	kfree(list);
+
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	iscan_buf_t * buf;
+	iscan_info_t *iscan = g_iscan;
+
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_aplist(dev, info, dwrq, extra);
+	}
+
+	buf = iscan->list_hdr;
+	/* Get scan results (too large to put on the stack) */
+	while (buf) {
+	    list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+	    ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	    bi = NULL;
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	    }
+	    buf = buf->next;
+	}
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 13
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+	/* Ignore error (most likely scan in progress) */
+	(void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_set_scan(dev, info, wrqu, extra);
+	}
+	if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+		return 0;
+	}
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+	wl_iw_set_event_mask(dev);
+	wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+	iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+	add_timer(&iscan->timer);
+	iscan->timer_on = 1;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPA entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpaie;
+
+	/* If the contents match the WPA_OUI and type=1 */
+	if ((ie[1] >= 6) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPS entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpsie;
+
+	/* If the contents match the WPA_OUI and type=4 */
+	if ((ie[1] >= 4) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+	struct iw_event	iwe;
+	char *event;
+
+	event = *event_p;
+	if (bi->ie_length) {
+		/* look for wpa/rsn ies in the ie list... */
+		bcm_tlv_t *ie;
+		uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		int ptr_len = bi->ie_length;
+
+		/* OSEN IE */
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) &&
+			ie->len > WFA_OUI_LEN + 1 &&
+			!bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) &&
+			ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			/* look for WPS IE */
+			if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+	*event_p = event;
+	}
+
+#endif /* WIRELESS_EXT > 17 */
+	return 0;
+}
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int error, i, j;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check for scan in progress */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+	ci.scan_channel = dtoh32(ci.scan_channel);
+	if (ci.scan_channel)
+		return -EAGAIN;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			(CHSPEC_IS2G(bi->chanspec)) ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		/* WPA, WPA2, WPS, WAPI IEs */
+		 wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count) {
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	}
+
+	kfree(list);
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int ii, j;
+	int apcnt;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+
+	WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_scan(dev, info, dwrq, extra);
+	}
+
+	/* Check for scan in progress */
+	if (iscan->iscan_state == ISCAN_STATE_SCANING)
+		return -EAGAIN;
+
+	apcnt = 0;
+	p_buf = iscan->list_hdr;
+	/* Get scan results */
+	while (p_buf != iscan->list_cur) {
+	    list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+	    if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+	    }
+
+	    bi = NULL;
+	    for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* overflow check cover fields before wpa IEs */
+		if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+			IW_EV_QUAL_LEN >= end)
+			return -E2BIG;
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			(CHSPEC_IS2G(bi->chanspec)) ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		/* WPA, WPA2, WPS, WAPI IEs */
+		wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+			if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+				return -E2BIG;
+
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	    }
+	    p_buf = p_buf->next;
+	} /* while (p_buf) */
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+#endif /* WIRELESS_EXT > 13 */
+
+
+static int
+wl_iw_set_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+	/* default Broadcast SSID */
+	memset(&ssid, 0, sizeof(ssid));
+	if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+		memcpy(ssid.SSID, extra, ssid.SSID_len);
+		ssid.SSID_len = htod32(ssid.SSID_len);
+
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid))))
+			return error;
+	}
+	/* If essid null then it is "iwconfig <interface> essid off" command */
+	else {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t))))
+			return error;
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+		WL_ERROR(("Error getting the SSID\n"));
+		return error;
+	}
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	/* Get the current SSID */
+	memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+	dwrq->length = ssid.SSID_len;
+
+	dwrq->flags = 1; /* active */
+
+	return 0;
+}
+
+static int
+wl_iw_set_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check the size of the string */
+	if (dwrq->length > sizeof(iw->nickname))
+		return -E2BIG;
+
+	memcpy(iw->nickname, extra, dwrq->length);
+	iw->nickname[dwrq->length - 1] = '\0';
+
+	return 0;
+}
+
+static int
+wl_iw_get_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	strcpy(extra, iw->nickname);
+	dwrq->length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int wl_iw_set_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	wl_rateset_t rateset;
+	int error, rate, i, error_bg, error_a;
+
+	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+	/* Get current rateset */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+
+	rateset.count = dtoh32(rateset.count);
+
+	if (vwrq->value < 0) {
+		/* Select maximum rate */
+		rate = rateset.rates[rateset.count - 1] & 0x7f;
+	} else if (vwrq->value < rateset.count) {
+		/* Select rate by rateset index */
+		rate = rateset.rates[vwrq->value] & 0x7f;
+	} else {
+		/* Specified rate in bps */
+		rate = vwrq->value / 500000;
+	}
+
+	if (vwrq->fixed) {
+		/*
+			Set rate override,
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+		error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+	} else {
+		/*
+			clear rate override
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		/* 0 is for clearing rate override */
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+		/* 0 is for clearing rate override */
+		error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+
+		/* Remove rates above selected rate */
+		for (i = 0; i < rateset.count; i++)
+			if ((rateset.rates[i] & 0x7f) > rate)
+				break;
+		rateset.count = htod32(i);
+
+		/* Set current rateset */
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+			return error;
+	}
+
+	return 0;
+}
+
+static int wl_iw_get_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rate;
+
+	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+	/* Report the current tx rate */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+		return error;
+	rate = dtoh32(rate);
+	vwrq->value = rate * 500000;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+	if (vwrq->disabled)
+		rts = DOT11_DEFAULT_RTS_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+		return -EINVAL;
+	else
+		rts = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+		return error;
+
+	vwrq->value = rts;
+	vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, frag;
+
+	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+	if (vwrq->disabled)
+		frag = DOT11_DEFAULT_FRAG_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+		return -EINVAL;
+	else
+		frag = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, fragthreshold;
+
+	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+		return error;
+
+	vwrq->value = fragthreshold;
+	vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable;
+	uint16 txpwrmw;
+	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+	disable += WL_RADIO_SW_DISABLE << 16;
+
+	disable = htod32(disable);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+		return error;
+
+	/* If Radio is off, nothing more to do */
+	if (disable & WL_RADIO_SW_DISABLE)
+		return 0;
+
+	/* Only handle mW */
+	if (!(vwrq->flags & IW_TXPOW_MWATT))
+		return -EINVAL;
+
+	/* Value < 0 means just "on" or "off" */
+	if (vwrq->value < 0)
+		return 0;
+
+	if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+	else txpwrmw = (uint16)vwrq->value;
+
+
+	error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+	return error;
+}
+
+static int
+wl_iw_get_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable, txpwrdbm;
+	uint8 result;
+
+	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+	    (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+		return error;
+
+	disable = dtoh32(disable);
+	result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	vwrq->value = (int32)bcm_qdbm_to_mw(result);
+	vwrq->fixed = 0;
+	vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+	vwrq->flags = IW_TXPOW_MWATT;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+	/* Do not handle "off" or "lifetime" */
+	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+		return -EINVAL;
+
+	/* Handle "[min|max] limit" */
+	if (vwrq->flags & IW_RETRY_LIMIT) {
+		/* "max limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+			!((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif /* WIRELESS_EXT > 20 */
+
+			lrl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+				return error;
+		}
+		/* "min limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+			!((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif /* WIRELESS_EXT > 20 */
+
+			srl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+				return error;
+		}
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+	vwrq->disabled = 0;      /* Can't be disabled */
+
+	/* Do not handle lifetime queries */
+	if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	/* Get retry limits */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+		return error;
+
+	lrl = dtoh32(lrl);
+	srl = dtoh32(srl);
+
+	/* Note : by default, display the min retry number */
+	if (vwrq->flags & IW_RETRY_MAX) {
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+		vwrq->value = lrl;
+	} else {
+		vwrq->flags = IW_RETRY_LIMIT;
+		vwrq->value = srl;
+		if (srl != lrl)
+			vwrq->flags |= IW_RETRY_MIN;
+	}
+
+	return 0;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int
+wl_iw_set_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec;
+
+	WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = htod32(key.index);
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+		/* Default to 0 */
+		if (key.index == DOT11_MAX_DEFAULT_KEYS)
+			key.index = 0;
+	} else {
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+		if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+			return -EINVAL;
+	}
+
+	/* Interpret "off" to mean no encryption */
+	wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+	if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+		return error;
+
+	/* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */
+	if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+		/* Just select a new current key */
+		val = htod32(key.index);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+			return error;
+	} else {
+		key.len = dwrq->length;
+
+		if (dwrq->length > sizeof(key.data))
+			return -EINVAL;
+
+		memcpy(key.data, extra, dwrq->length);
+
+		key.flags = WL_PRIMARY_KEY;
+		switch (key.len) {
+		case WEP1_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP1;
+			break;
+		case WEP128_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP128;
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+		case TKIP_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_TKIP;
+			break;
+#endif
+		case AES_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* Set the new key/index */
+		swap_key_from_BE(&key);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+			return error;
+	}
+
+	/* Interpret "restricted" to mean shared key authentication */
+	val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+	val = htod32(val);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec, auth;
+
+	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+	/* assure default values of zero for things we don't touch */
+	bzero(&key, sizeof(wl_wsec_key_t));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = key.index;
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+	} else
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+		key.index = 0;
+
+	/* Get info */
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+		return error;
+
+	swap_key_to_BE(&key);
+
+	wsec = dtoh32(wsec);
+	auth = dtoh32(auth);
+	/* Get key length */
+	dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+	/* Get flags */
+	dwrq->flags = key.index + 1;
+	if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+		/* Interpret "off" to mean no encryption */
+		dwrq->flags |= IW_ENCODE_DISABLED;
+	}
+	if (auth) {
+		/* Interpret "restricted" to mean shared key authentication */
+		dwrq->flags |= IW_ENCODE_RESTRICTED;
+	}
+
+	/* Get key */
+	if (dwrq->length && extra)
+		memcpy(extra, key.data, dwrq->length);
+
+	return 0;
+}
+
+static int
+wl_iw_set_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+	pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+	pm = htod32(pm);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+		return error;
+
+	pm = dtoh32(pm);
+	vwrq->disabled = pm ? 0 : 1;
+	vwrq->flags = IW_POWER_ALL_R;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+		dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	iwp->length = 64;
+	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+	return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error;
+	struct iw_encode_ext *iwe;
+
+	WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+	iwe = (struct iw_encode_ext *)extra;
+
+	/* disable encryption completely  */
+	if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+	}
+
+	/* get the key index */
+	key.index = 0;
+	if (dwrq->flags & IW_ENCODE_INDEX)
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	key.len = iwe->key_len;
+
+	/* Instead of bcast for ea address for default wep keys, driver needs it to be Null */
+	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+	/* check for key index change */
+	if (key.len == 0) {
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+			/* change the key index .... */
+			key.index = htod32(key.index);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+				&key.index, sizeof(key.index));
+			if (error)
+				return error;
+		}
+		/* key delete */
+		else {
+			swap_key_from_BE(&key);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+			if (error)
+				return error;
+		}
+	}
+	/* This case is used to allow an external 802.1x supplicant
+	 * to pass the PMK to the in-driver supplicant for use in
+	 * the 4-way handshake.
+	 */
+	else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+		int j;
+		wsec_pmk_t pmk;
+		char keystring[WSEC_MAX_PSK_LEN + 1];
+		char* charptr = keystring;
+		uint len;
+
+		/* copy the raw hex key to the appropriate format */
+		for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+			sprintf(charptr, "%02x", iwe->key[j]);
+			charptr += 2;
+		}
+		len = strlen(keystring);
+		pmk.key_len = htod16(len);
+		bcopy(keystring, pmk.key, len);
+		pmk.flags = htod16(WSEC_PASSPHRASE);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+		if (error)
+			return error;
+	}
+
+	else {
+		if (iwe->key_len > sizeof(key.data))
+			return -EINVAL;
+
+		WL_WSEC(("Setting the key index %d\n", key.index));
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("key is a Primary Key\n"));
+			key.flags = WL_PRIMARY_KEY;
+		}
+
+		bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+		if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+			uint8 keybuf[8];
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+
+		/* rx iv */
+		if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+			uchar *ivptr;
+			ivptr = (uchar *)iwe->rx_seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = TRUE;
+		}
+
+		switch (iwe->alg) {
+			case IW_ENCODE_ALG_NONE:
+				key.algo = CRYPTO_ALGO_OFF;
+				break;
+			case IW_ENCODE_ALG_WEP:
+				if (iwe->key_len == WEP1_KEY_SIZE)
+					key.algo = CRYPTO_ALGO_WEP1;
+				else
+					key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			case IW_ENCODE_ALG_TKIP:
+				key.algo = CRYPTO_ALGO_TKIP;
+				break;
+			case IW_ENCODE_ALG_CCMP:
+				key.algo = CRYPTO_ALGO_AES_CCM;
+				break;
+			default:
+				break;
+		}
+		swap_key_from_BE(&key);
+
+		dhd_wait_pend8021x(dev);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		if (error)
+			return error;
+	}
+	return 0;
+}
+
+
+#if WIRELESS_EXT > 17
+struct {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+static int
+wl_iw_set_pmksa(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	struct iw_pmksa *iwpmksa;
+	uint i;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
+
+	WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+	iwpmksa = (struct iw_pmksa *)extra;
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+		WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+		bzero((char *)&pmkid_list, sizeof(pmkid_list));
+	}
+	if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+		pmkid_list_t pmkid, *pmkidptr;
+		pmkidptr = &pmkid;
+		bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+		{
+			uint j;
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+		for (; i < pmkid_list.pmkids.npmkid; i++) {
+			bcopy(&pmkid_array[i+1].BSSID,
+				&pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&pmkid_array[i+1].PMKID,
+				&pmkid_array[i].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		pmkid_list.pmkids.npmkid--;
+	}
+	if (iwpmksa->cmd == IW_PMKSA_ADD) {
+		bcopy(&iwpmksa->bssid.sa_data[0],
+			&pmkid_array[pmkid_list.pmkids.npmkid].BSSID,
+			ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID,
+			WPA2_PMKID_LEN);
+		{
+			uint j;
+			uint k;
+			k = pmkid_list.pmkids.npmkid;
+			BCM_REFERENCE(k);
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkid_array[k].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		pmkid_list.pmkids.npmkid++;
+	}
+	WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid));
+	for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+		uint j;
+		WL_TRACE(("PMKID[%d]: %s = ", i,
+			bcm_ether_ntoa(&pmkid_array[i].BSSID,
+			eabuf)));
+		for (j = 0; j < WPA2_PMKID_LEN; j++)
+			WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+		printf("\n");
+	}
+	WL_TRACE(("\n"));
+	dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static int
+wl_iw_get_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error = 0;
+	int paramid;
+	int paramval;
+	uint32 cipher_combined;
+	int val = 0;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+	paramval = vwrq->value;
+
+	WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+		dev->name, paramid, paramval));
+
+	switch (paramid) {
+
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+			val = WPA_AUTH_DISABLED;
+		else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+			val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+		else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+		WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP: {
+		int fbt_cap = 0;
+
+		if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+			iw->pwsec = paramval;
+		}
+		else {
+			iw->gwsec = paramval;
+		}
+
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+
+		cipher_combined = iw->gwsec | iw->pwsec;
+		val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+		if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+			val |= WEP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+			val |= TKIP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+			val |= AES_ENABLED;
+
+		if (iw->privacy_invoked && !val) {
+			WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+			         "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+				WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else if (val) {
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		}
+
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+			return error;
+
+		/* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+		 * handshake.
+		 */
+		if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+			if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+				if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1)))
+						return error;
+				}
+				else if (val == 0) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0)))
+						return error;
+				}
+			}
+		}
+		break;
+	}
+
+	case IW_AUTH_KEY_MGMT:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA_AUTH_PSK;
+			else
+				val = WPA_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA2_AUTH_PSK;
+			else
+				val = WPA2_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+		WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open shared */
+		WL_ERROR(("Setting the D11auth %d\n", paramval));
+		if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+			val = 0;
+		else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+			val = 1;
+		else
+			error = 1;
+		if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (paramval == 0) {
+			val = 0;
+			WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+			error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+			return error;
+		}
+		else {
+			/* If WPA is enabled, wpa_auth is set elsewhere */
+		}
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED: {
+		int wsec;
+
+		if (paramval == 0) {
+			iw->privacy_invoked = FALSE;
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else {
+			iw->privacy_invoked = TRUE;
+			if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+				return error;
+
+			if (!WSEC_ENABLED(wsec)) {
+				/* if privacy is true, but wsec is false, we are a WPS enrollee */
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+					WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			} else {
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+					WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			}
+		}
+		break;
+	}
+
+
+#endif /* WIRELESS_EXT > 17 */
+
+
+	default:
+		break;
+	}
+	return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error;
+	int paramid;
+	int paramval = 0;
+	int val;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+			paramval = IW_AUTH_WPA_VERSION_DISABLED;
+		else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA;
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA2;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+		paramval = iw->pwsec;
+		break;
+
+	case IW_AUTH_CIPHER_GROUP:
+		paramval = iw->gwsec;
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		/* psk, 1x */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (VAL_PSK(val))
+			paramval = IW_AUTH_KEY_MGMT_PSK;
+		else
+			paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open, shared, leap */
+		if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+			return error;
+		if (!val)
+			paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+		else
+			paramval = IW_AUTH_ALG_SHARED_KEY;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val)
+			paramval = TRUE;
+		else
+			paramval = FALSE;
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED:
+		paramval = iw->privacy_invoked;
+		break;
+
+#endif /* WIRELESS_EXT > 17 */
+	}
+	vwrq->value = paramval;
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static const iw_handler wl_iw_handler[] =
+{
+	(iw_handler) wl_iw_config_commit,	/* SIOCSIWCOMMIT */
+	(iw_handler) wl_iw_get_name,		/* SIOCGIWNAME */
+	(iw_handler) NULL,			/* SIOCSIWNWID */
+	(iw_handler) NULL,			/* SIOCGIWNWID */
+	(iw_handler) wl_iw_set_freq,		/* SIOCSIWFREQ */
+	(iw_handler) wl_iw_get_freq,		/* SIOCGIWFREQ */
+	(iw_handler) wl_iw_set_mode,		/* SIOCSIWMODE */
+	(iw_handler) wl_iw_get_mode,		/* SIOCGIWMODE */
+	(iw_handler) NULL,			/* SIOCSIWSENS */
+	(iw_handler) NULL,			/* SIOCGIWSENS */
+	(iw_handler) NULL,			/* SIOCSIWRANGE */
+	(iw_handler) wl_iw_get_range,		/* SIOCGIWRANGE */
+	(iw_handler) NULL,			/* SIOCSIWPRIV */
+	(iw_handler) NULL,			/* SIOCGIWPRIV */
+	(iw_handler) NULL,			/* SIOCSIWSTATS */
+	(iw_handler) NULL,			/* SIOCGIWSTATS */
+	(iw_handler) wl_iw_set_spy,		/* SIOCSIWSPY */
+	(iw_handler) wl_iw_get_spy,		/* SIOCGIWSPY */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wap,		/* SIOCSIWAP */
+	(iw_handler) wl_iw_get_wap,		/* SIOCGIWAP */
+#if WIRELESS_EXT > 17
+	(iw_handler) wl_iw_mlme,		/* SIOCSIWMLME */
+#else
+	(iw_handler) NULL,			/* -- hole -- */
+#endif
+	(iw_handler) wl_iw_iscan_get_aplist,	/* SIOCGIWAPLIST */
+#if WIRELESS_EXT > 13
+	(iw_handler) wl_iw_iscan_set_scan,	/* SIOCSIWSCAN */
+	(iw_handler) wl_iw_iscan_get_scan,	/* SIOCGIWSCAN */
+#else	/* WIRELESS_EXT > 13 */
+	(iw_handler) NULL,			/* SIOCSIWSCAN */
+	(iw_handler) NULL,			/* SIOCGIWSCAN */
+#endif	/* WIRELESS_EXT > 13 */
+	(iw_handler) wl_iw_set_essid,		/* SIOCSIWESSID */
+	(iw_handler) wl_iw_get_essid,		/* SIOCGIWESSID */
+	(iw_handler) wl_iw_set_nick,		/* SIOCSIWNICKN */
+	(iw_handler) wl_iw_get_nick,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_rate,		/* SIOCSIWRATE */
+	(iw_handler) wl_iw_get_rate,		/* SIOCGIWRATE */
+	(iw_handler) wl_iw_set_rts,		/* SIOCSIWRTS */
+	(iw_handler) wl_iw_get_rts,		/* SIOCGIWRTS */
+	(iw_handler) wl_iw_set_frag,		/* SIOCSIWFRAG */
+	(iw_handler) wl_iw_get_frag,		/* SIOCGIWFRAG */
+	(iw_handler) wl_iw_set_txpow,		/* SIOCSIWTXPOW */
+	(iw_handler) wl_iw_get_txpow,		/* SIOCGIWTXPOW */
+#if WIRELESS_EXT > 10
+	(iw_handler) wl_iw_set_retry,		/* SIOCSIWRETRY */
+	(iw_handler) wl_iw_get_retry,		/* SIOCGIWRETRY */
+#endif /* WIRELESS_EXT > 10 */
+	(iw_handler) wl_iw_set_encode,		/* SIOCSIWENCODE */
+	(iw_handler) wl_iw_get_encode,		/* SIOCGIWENCODE */
+	(iw_handler) wl_iw_set_power,		/* SIOCSIWPOWER */
+	(iw_handler) wl_iw_get_power,		/* SIOCGIWPOWER */
+#if WIRELESS_EXT > 17
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wpaie,		/* SIOCSIWGENIE */
+	(iw_handler) wl_iw_get_wpaie,		/* SIOCGIWGENIE */
+	(iw_handler) wl_iw_set_wpaauth,		/* SIOCSIWAUTH */
+	(iw_handler) wl_iw_get_wpaauth,		/* SIOCGIWAUTH */
+	(iw_handler) wl_iw_set_encodeext,	/* SIOCSIWENCODEEXT */
+	(iw_handler) wl_iw_get_encodeext,	/* SIOCGIWENCODEEXT */
+	(iw_handler) wl_iw_set_pmksa,		/* SIOCSIWPMKSA */
+#endif /* WIRELESS_EXT > 17 */
+};
+
+#if WIRELESS_EXT > 12
+enum {
+	WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+	WL_IW_SET_VLANMODE,
+	WL_IW_SET_PM,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	WL_IW_SET_LAST
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+	wl_iw_set_leddc,
+	wl_iw_set_vlanmode,
+	wl_iw_set_pm,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	NULL
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+	{
+		WL_IW_SET_LEDDC,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_leddc"
+	},
+	{
+		WL_IW_SET_VLANMODE,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_vlanmode"
+	},
+	{
+		WL_IW_SET_PM,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_pm"
+	},
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	{ 0, 0, 0, { 0 } }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+	.num_standard = ARRAYSIZE(wl_iw_handler),
+	.num_private = ARRAY_SIZE(wl_iw_priv_handler),
+	.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+	.standard = (iw_handler *) wl_iw_handler,
+	.private = wl_iw_priv_handler,
+	.private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+	get_wireless_stats: dhd_get_wireless_stats,
+#endif /* WIRELESS_EXT >= 19 */
+	};
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_ioctl(
+	struct net_device *dev,
+	struct ifreq *rq,
+	int cmd
+)
+{
+	struct iwreq *wrq = (struct iwreq *) rq;
+	struct iw_request_info info;
+	iw_handler handler;
+	char *extra = NULL;
+	size_t token_size = 1;
+	int max_tokens = 0, ret = 0;
+
+	if (cmd < SIOCIWFIRST ||
+		IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+		!(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+
+	case SIOCSIWESSID:
+	case SIOCGIWESSID:
+	case SIOCSIWNICKN:
+	case SIOCGIWNICKN:
+		max_tokens = IW_ESSID_MAX_SIZE + 1;
+		break;
+
+	case SIOCSIWENCODE:
+	case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+	case SIOCSIWENCODEEXT:
+	case SIOCGIWENCODEEXT:
+#endif
+		max_tokens = IW_ENCODING_TOKEN_MAX;
+		break;
+
+	case SIOCGIWRANGE:
+		max_tokens = sizeof(struct iw_range);
+		break;
+
+	case SIOCGIWAPLIST:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_AP;
+		break;
+
+#if WIRELESS_EXT > 13
+	case SIOCGIWSCAN:
+	if (g_iscan)
+		max_tokens = wrq->u.data.length;
+	else
+		max_tokens = IW_SCAN_MAX_DATA;
+		break;
+#endif /* WIRELESS_EXT > 13 */
+
+	case SIOCSIWSPY:
+		token_size = sizeof(struct sockaddr);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+	case SIOCGIWSPY:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_SPY;
+		break;
+	default:
+		break;
+	}
+
+	if (max_tokens && wrq->u.data.pointer) {
+		if (wrq->u.data.length > max_tokens)
+			return -E2BIG;
+
+		if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+	}
+
+	info.cmd = cmd;
+	info.flags = 0;
+
+	ret = handler(dev, &info, &wrq->u, extra);
+
+	if (extra) {
+		if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		kfree(extra);
+	}
+
+	return ret;
+}
+
+/* Convert a connection status event into a connection status string.
+ * Returns TRUE if a matching connection status string was found.
+ */
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen)
+{
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			/* input: event type to match */
+		uint32 inStatus;		/* input: event status code to match */
+		uint32 inReason;		/* input: event reason code to match */
+		const char* outName;	/* output: failure type */
+		const char* outCause;	/* output: failure cause */
+	} conn_fail_event_map_t;
+
+	/* Map of WLC_E events to connection failure strings */
+#	define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map [] = {
+		/* inEvent           inStatus                inReason         */
+		/* outName outCause                                           */
+		{WLC_E_SET_SSID,     WLC_E_STATUS_SUCCESS,   WL_IW_DONT_CARE,
+		"Conn", "Success"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+		"Conn", "NoNetworks"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ConfigMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_PRUNE_ENCR_MISMATCH,
+		"Conn", "EncrypMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_RSN_MISMATCH,
+		"Conn", "RsnMismatch"},
+		{WLC_E_AUTH,         WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "AuthTimeout"},
+		{WLC_E_AUTH,         WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "AuthFail"},
+		{WLC_E_AUTH,         WLC_E_STATUS_NO_ACK,    WL_IW_DONT_CARE,
+		"Conn", "AuthNoAck"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ReassocFail"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "ReassocTimeout"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_ABORT,     WL_IW_DONT_CARE,
+		"Conn", "ReassocAbort"},
+		{WLC_E_PSK_SUP,      WLC_SUP_KEYED,          WL_IW_DONT_CARE,
+		"Sup", "ConnSuccess"},
+		{WLC_E_PSK_SUP,      WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Sup", "WpaHandshakeFail"},
+		{WLC_E_DEAUTH_IND,   WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Deauth"},
+		{WLC_E_DISASSOC_IND, WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "DisassocInd"},
+		{WLC_E_DISASSOC,     WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Disassoc"}
+	};
+
+	const char* name = "";
+	const char* cause = NULL;
+	int i;
+
+	/* Search the event map table for a matching event */
+	for (i = 0;  i < sizeof(event_map)/sizeof(event_map[0]);  i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			name = row->outName;
+			cause = row->outCause;
+			break;
+		}
+	}
+
+	/* If found, generate a connection failure string and return TRUE */
+	if (cause) {
+		memset(stringBuf, 0, buflen);
+		snprintf(stringBuf, buflen, "%s %s %02d %02d",
+			name, cause, status, reason);
+		WL_TRACE(("Connection status: %s\n", stringBuf));
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#if (WIRELESS_EXT > 14)
+/* Check if we have received an event that indicates connection failure
+ * If so, generate a connection failure report string.
+ * The caller supplies a buffer to hold the generated string.
+ */
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+	uint32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+
+	if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+		return TRUE;
+	} else
+	{
+		return FALSE;
+	}
+}
+#endif /* WIRELESS_EXT > 14 */
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd = 0;
+	uint32 event_type = ntoh32(e->event_type);
+	uint16 flags =  ntoh16(e->flags);
+	uint32 datalen = ntoh32(e->datalen);
+	uint32 status =  ntoh32(e->status);
+
+	memset(&wrqu, 0, sizeof(wrqu));
+	memset(extra, 0, sizeof(extra));
+
+	memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+
+	switch (event_type) {
+	case WLC_E_TXFAIL:
+		cmd = IWEVTXDROP;
+		break;
+#if WIRELESS_EXT > 14
+	case WLC_E_JOIN:
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+		cmd = IWEVREGISTERED;
+		break;
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		bzero(&extra, ETHER_ADDR_LEN);
+		break;
+
+	case WLC_E_LINK:
+	case WLC_E_NDIS_LINK:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+			bzero(&extra, ETHER_ADDR_LEN);
+		}
+		break;
+	case WLC_E_ACTION_FRAME:
+		cmd = IWEVCUSTOM;
+		if (datalen + 1 <= sizeof(extra)) {
+			wrqu.data.length = datalen + 1;
+			extra[0] = WLC_E_ACTION_FRAME;
+			memcpy(&extra[1], data, datalen);
+			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+		}
+		break;
+
+	case WLC_E_ACTION_FRAME_COMPLETE:
+		cmd = IWEVCUSTOM;
+		if (sizeof(status) + 1 <= sizeof(extra)) {
+			wrqu.data.length = sizeof(status) + 1;
+			extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+			memcpy(&extra[1], &status, sizeof(status));
+			WL_TRACE(("wl_iw_event status %d  \n", status));
+		}
+		break;
+#endif /* WIRELESS_EXT > 14 */
+#if WIRELESS_EXT > 17
+	case WLC_E_MIC_ERROR: {
+		struct	iw_michaelmicfailure  *micerrevt = (struct  iw_michaelmicfailure  *)&extra;
+		cmd = IWEVMICHAELMICFAILURE;
+		wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+		if (flags & WLC_EVENT_MSG_GROUP)
+			micerrevt->flags |= IW_MICFAILURE_GROUP;
+		else
+			micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+		memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+		break;
+	}
+
+	case WLC_E_ASSOC_REQ_IE:
+		cmd = IWEVASSOCREQIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_ASSOC_RESP_IE:
+		cmd = IWEVASSOCRESPIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_PMKID_CACHE: {
+		struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+		pmkid_cand_list_t *pmkcandlist;
+		pmkid_cand_t	*pmkidcand;
+		int count;
+
+		if (data == NULL)
+			break;
+
+		cmd = IWEVPMKIDCAND;
+		pmkcandlist = data;
+		count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+		wrqu.data.length = sizeof(struct iw_pmkid_cand);
+		pmkidcand = pmkcandlist->pmkid_cand;
+		while (count) {
+			bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+			if (pmkidcand->preauth)
+				iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+			bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+			      ETHER_ADDR_LEN);
+			wireless_send_event(dev, cmd, &wrqu, extra);
+			pmkidcand++;
+			count--;
+		}
+		break;
+	}
+#endif /* WIRELESS_EXT > 17 */
+
+	case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+		cmd = SIOCGIWSCAN;
+#endif
+		WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+		if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+			(g_iscan->iscan_state != ISCAN_STATE_IDLE))
+			up(&g_iscan->sysioc_sem);
+		break;
+
+	default:
+		/* Cannot translate event */
+		break;
+	}
+
+	if (cmd) {
+		if (cmd == SIOCGIWSCAN)
+			wireless_send_event(dev, cmd, &wrqu, NULL);
+		else
+			wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+
+#if WIRELESS_EXT > 14
+	/* Look for WLC events that indicate a connection failure.
+	 * If found, generate an IWEVCUSTOM event.
+	 */
+	memset(extra, 0, sizeof(extra));
+	if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+		cmd = IWEVCUSTOM;
+		wrqu.data.length = strlen(extra);
+		wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+#endif /* WIRELESS_EXT > 14 */
+
+#endif /* WIRELESS_EXT > 13 */
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+	int res = 0;
+	wl_cnt_t cnt;
+	int phy_noise;
+	int rssi;
+	scb_val_t scb_val;
+
+	phy_noise = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+		goto done;
+
+	phy_noise = dtoh32(phy_noise);
+	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+	scb_val.val = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+		goto done;
+
+	rssi = dtoh32(scb_val.val);
+	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		wstats->qual.qual = 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		wstats->qual.qual = 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		wstats->qual.qual = 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		wstats->qual.qual = 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		wstats->qual.qual = 4;
+	else
+		wstats->qual.qual = 5;
+
+	/* Wraps to 0 if RSSI is 0 */
+	wstats->qual.level = 0x100 + rssi;
+	wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+	wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+	wstats->qual.updated |= 7;
+#endif /* WIRELESS_EXT > 18 */
+
+#if WIRELESS_EXT > 11
+	WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t)));
+
+	memset(&cnt, 0, sizeof(wl_cnt_t));
+	res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+	if (res)
+	{
+		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+		goto done;
+	}
+
+	cnt.version = dtoh16(cnt.version);
+	if (cnt.version != WL_CNT_T_VERSION) {
+		WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+			WL_CNT_T_VERSION, cnt.version));
+		goto done;
+	}
+
+	wstats->discard.nwid = 0;
+	wstats->discard.code = dtoh32(cnt.rxundec);
+	wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+	wstats->discard.retries = dtoh32(cnt.txfail);
+	wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+	wstats->miss.beacon = 0;
+
+	WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+		dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif /* WIRELESS_EXT > 11 */
+
+done:
+	return res;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+	iscan_info_t *iscan = (iscan_info_t *)data;
+	iscan->timer_on = 0;
+	if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+		WL_TRACE(("timer trigger\n"));
+		up(&iscan->sysioc_sem);
+	}
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/* Room for "event_msgs" + '\0' + bitvec */
+
+	dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+		iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+	return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+	int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+	wl_iscan_params_t *params;
+	int err = 0;
+
+	if (ssid && ssid->SSID_len) {
+		params_size += sizeof(wlc_ssid_t);
+	}
+	params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		return -ENOMEM;
+	}
+	memset(params, 0, params_size);
+	ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+	err = wl_iw_iscan_prep(&params->params, ssid);
+
+	if (!err) {
+		params->version = htod32(ISCAN_REQ_VERSION);
+		params->action = htod16(action);
+		params->scan_duration = htod16(0);
+
+		/* params_size += OFFSETOF(wl_iscan_params_t, params); */
+		(void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+			iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+	}
+
+	kfree(params);
+	return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+	iscan_buf_t * buf;
+	iscan_buf_t * ptr;
+	wl_iscan_results_t * list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	uint32 status;
+
+	/* buffers are allocated on demand */
+	if (iscan->list_cur) {
+		buf = iscan->list_cur;
+		iscan->list_cur = buf->next;
+	}
+	else {
+		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+		if (!buf)
+			return WL_SCAN_RESULTS_ABORTED;
+		buf->next = NULL;
+		if (!iscan->list_hdr)
+			iscan->list_hdr = buf;
+		else {
+			ptr = iscan->list_hdr;
+			while (ptr->next) {
+				ptr = ptr->next;
+			}
+			ptr->next = buf;
+		}
+	}
+	memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	(void) dev_iw_iovar_getbuf(
+		iscan->dev,
+		"iscanresults",
+		&list,
+		WL_ISCAN_RESULTS_FIXED_SIZE,
+		buf->iscan_buf,
+		WLC_IW_ISCAN_MAXLEN);
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	results->count = dtoh32(results->count);
+	WL_TRACE(("results->count = %d\n", results->count));
+
+	WL_TRACE(("results->buflen = %d\n", results->buflen));
+	status = dtoh32(list_buf->status);
+	return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+	union iwreq_data wrqu;
+
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	/* wext expects to get no data for SIOCGIWSCAN Event  */
+	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+	uint32 status;
+	iscan_info_t *iscan = (iscan_info_t *)data;
+
+	DAEMONIZE("iscan_sysioc");
+
+	status = WL_SCAN_RESULTS_PARTIAL;
+	while (down_interruptible(&iscan->sysioc_sem) == 0) {
+		if (iscan->timer_on) {
+			del_timer(&iscan->timer);
+			iscan->timer_on = 0;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_lock();
+#endif
+		status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_unlock();
+#endif
+
+		switch (status) {
+			case WL_SCAN_RESULTS_PARTIAL:
+				WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_lock();
+#endif
+				/* make sure our buffer size is enough before going next round */
+				wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_unlock();
+#endif
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_SUCCESS:
+				WL_TRACE(("iscanresults complete\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			case WL_SCAN_RESULTS_PENDING:
+				WL_TRACE(("iscanresults pending\n"));
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_ABORTED:
+				WL_TRACE(("iscanresults aborted\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			default:
+				WL_TRACE(("iscanresults returned unknown status %d\n", status));
+				break;
+		 }
+	}
+	complete_and_exit(&iscan->sysioc_exited, 0);
+}
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+	iscan_info_t *iscan = NULL;
+
+	if (!dev)
+		return 0;
+
+	iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+	if (!iscan)
+		return -ENOMEM;
+	memset(iscan, 0, sizeof(iscan_info_t));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = NULL;
+#endif
+	iscan->sysioc_pid = -1;
+	/* we only care about main interface so save a global here */
+	g_iscan = iscan;
+	iscan->dev = dev;
+	iscan->iscan_state = ISCAN_STATE_IDLE;
+
+
+	/* Set up the timer */
+	iscan->timer_ms    = 2000;
+	init_timer(&iscan->timer);
+	iscan->timer.data = (ulong)iscan;
+	iscan->timer.function = wl_iw_timerfunc;
+
+	sema_init(&iscan->sysioc_sem, 0);
+	init_completion(&iscan->sysioc_exited);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc");
+	iscan->sysioc_pid = iscan->kthread->pid;
+#else
+	iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+#endif
+	if (iscan->sysioc_pid < 0)
+		return -ENOMEM;
+	return 0;
+}
+
+void wl_iw_detach(void)
+{
+	iscan_buf_t  *buf;
+	iscan_info_t *iscan = g_iscan;
+	if (!iscan)
+		return;
+	if (iscan->sysioc_pid >= 0) {
+		KILL_PROC(iscan->sysioc_pid, SIGTERM);
+		wait_for_completion(&iscan->sysioc_exited);
+	}
+
+	while (iscan->list_hdr) {
+		buf = iscan->list_hdr->next;
+		kfree(iscan->list_hdr);
+		iscan->list_hdr = buf;
+	}
+	kfree(iscan);
+	g_iscan = NULL;
+}
+
+#endif /* USE_IW */
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 0000000..95b2abd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,161 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h 467328 2014-04-03 01:23:40Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+#define GET_SSID			"SSID="
+#define GET_CHANNEL			"CH="
+#define GET_NPROBE 			"NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL  	"ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL  	"PASSIVE="
+#define GET_HOME_DWELL  		"HOME="
+#define GET_SCAN_TYPE			"TYPE="
+
+#define BAND_GET_CMD				"GETBAND"
+#define BAND_SET_CMD				"SETBAND"
+#define DTIM_SKIP_GET_CMD			"DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD			"DTIMSKIPSET"
+#define SETSUSPEND_CMD				"SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD			"PNOSSIDCLR"
+/* Lin - Is the extra space needed? */
+#define PNOSETUP_SET_CMD			"PNOSETUP " /* TLV command has extra end space */
+#define PNOENABLE_SET_CMD			"PNOFORCE"
+#define PNODEBUG_SET_CMD			"PNODEBUG"
+#define TXPOWER_SET_CMD			"TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+/* Structure to keep global parameters */
+typedef struct wl_iw_extra_params {
+	int 	target_channel; /* target channel */
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];	/* ISO 3166-1 country abbreviation */
+	char custom_locale[WLC_CNTRY_BUF_SZ];	/* Custom firmware locale */
+	int32 custom_locale_rev;		/* Custom local revisin default -1 */
+};
+/* ============================================== */
+/* Defines from wlc_pub.h */
+#define	WL_IW_RSSI_MINVAL		-200	/* Low value, e.g. for forcing roam */
+#define	WL_IW_RSSI_NO_SIGNAL	-91	/* NDIS RSSI link quality cutoffs */
+#define	WL_IW_RSSI_VERY_LOW	-80	/* Very low quality cutoffs */
+#define	WL_IW_RSSI_LOW		-70	/* Low quality cutoffs */
+#define	WL_IW_RSSI_GOOD		-68	/* Good quality cutoffs */
+#define	WL_IW_RSSI_VERY_GOOD	-58	/* Very good quality cutoffs */
+#define	WL_IW_RSSI_EXCELLENT	-57	/* Excellent quality cutoffs */
+#define	WL_IW_RSSI_INVALID	 0	/* invalid RSSI value */
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN	((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN	(SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI			(SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN	(SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED	(SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR	(SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP				(SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START			(SIOCIWFIRSTPRIV+13)
+
+#define 		G_SCAN_RESULTS 8*1024
+#define 		WE_ADD_EVENT_FIX	0x80
+#define          G_WLAN_SET_ON	0
+#define          G_WLAN_SET_OFF	1
+
+
+typedef struct wl_iw {
+	char nickname[IW_ESSID_MAX_SIZE];
+
+	struct iw_statistics wstats;
+
+	int spy_num;
+	uint32 pwsec;			/* pairwise wsec setting */
+	uint32 gwsec;			/* group wsec setting  */
+	bool privacy_invoked; 		/* IW_AUTH_PRIVACY_INVOKED setting */
+	struct ether_addr spy_addr[IW_MAX_SPY];
+	struct iw_quality spy_qual[IW_MAX_SPY];
+	void  *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+	struct timer_list *timer;
+	struct net_device *dev;
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+
+void wl_iw_detach(void);
+
+#define CSCAN_COMMAND				"CSCAN "
+#define CSCAN_TLV_PREFIX 			'S'
+#define CSCAN_TLV_VERSION			1
+#define CSCAN_TLV_SUBVERSION			0
+#define CSCAN_TLV_TYPE_SSID_IE          'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE   'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE     'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE      'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE    'P'
+#define CSCAN_TLV_TYPE_HOME_IE         'H'
+#define CSCAN_TLV_TYPE_STYPE_IE        'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif /* _wl_iw_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
new file mode 100644
index 0000000..2dc6aeb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
@@ -0,0 +1,403 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_linux_mon.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+	MONITOR_STATE_DEINIT = 0x0,
+	MONITOR_STATE_INIT = 0x1,
+	MONITOR_STATE_INTERFACE_ADDED = 0x2,
+	MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+	int radiotap_enabled;
+	struct net_device* real_ndev;	/* The real interface that the monitor is on */
+	struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+	void *dhd_pub;
+	monitor_states_t monitor_state;
+	monitor_interface mon_if[DHD_MAX_IFS];
+	struct mutex lock;		/* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+	.ndo_open		= dhd_mon_if_open,
+	.ndo_stop		= dhd_mon_if_stop,
+	.ndo_start_xmit		= dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+	.ndo_set_mac_address 	= dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(char *name)
+{
+	struct net_device *ndev_found = NULL;
+
+	int i;
+	int len = 0;
+	int last_name_len = 0;
+	struct net_device *ndev;
+
+	/* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+	 * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+	 * iface would be mon-p2p0-0.
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+		/* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+		 * it matches, then this netdev is the corresponding real_netdev.
+		 */
+		if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+			len = strlen("p2p");
+		} else {
+		/* if p2p- is not present, then the IFNAMSIZ have reached and name
+		 * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+		 */
+			len = 0;
+		}
+		if (ndev && strstr(name, (ndev->name + len))) {
+			if (strlen(ndev->name) > last_name_len) {
+				ndev_found = ndev;
+				last_name_len = strlen(ndev->name);
+			}
+		}
+	}
+
+	return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev)
+			return &g_monitor.mon_if[i];
+	}
+
+	return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	int ret = 0;
+	int rtap_len;
+	int qos_len = 0;
+	int dot11_hdr_len = 24;
+	int snap_len = 6;
+	unsigned char *pdata;
+	unsigned short frame_ctl;
+	unsigned char src_mac_addr[6];
+	unsigned char dst_mac_addr[6];
+	struct ieee80211_hdr *dot11_hdr;
+	struct ieee80211_radiotap_header *rtap_hdr;
+	monitor_interface* mon_if;
+
+	MON_PRINT("enter\n");
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+		goto fail;
+	}
+
+	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+		goto fail;
+
+	rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+	if (unlikely(rtap_hdr->it_version))
+		goto fail;
+
+	rtap_len = ieee80211_get_radiotap_len(skb->data);
+	if (unlikely(skb->len < rtap_len))
+		goto fail;
+
+	MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+	/* Skip the ratio tap header */
+	skb_pull(skb, rtap_len);
+
+	dot11_hdr = (struct ieee80211_hdr *)skb->data;
+	frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+	/* Check if the QoS bit is set */
+	if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+		/* Check if this ia a Wireless Distribution System (WDS) frame
+		 * which has 4 MAC addresses
+		 */
+		if (dot11_hdr->frame_control & 0x0080)
+			qos_len = 2;
+		if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+			dot11_hdr_len += 6;
+
+		memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+		memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+		/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+		 * for two MAC addresses
+		 */
+		skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+		pdata = (unsigned char*)skb->data;
+		memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+		memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+		PKTSETPRIO(skb, 0);
+
+		MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+		/* Use the real net device to transmit the packet */
+		ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+		return ret;
+	}
+fail:
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+	int ret = 0;
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+	return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(char *name, struct net_device **new_ndev)
+{
+	int i;
+	int idx = -1;
+	int ret = 0;
+	struct net_device* ndev = NULL;
+	dhd_linux_monitor_t **dhd_mon;
+
+	mutex_lock(&g_monitor.lock);
+
+	MON_TRACE("enter, if name: %s\n", name);
+	if (!name || !new_ndev) {
+		MON_PRINT("invalid parameters\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Find a vacancy
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++)
+		if (g_monitor.mon_if[i].mon_ndev == NULL) {
+			idx = i;
+			break;
+		}
+	if (idx == -1) {
+		MON_PRINT("exceeds maximum interfaces\n");
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+	if (!ndev) {
+		MON_PRINT("failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+	strncpy(ndev->name, name, IFNAMSIZ);
+	ndev->name[IFNAMSIZ - 1] = 0;
+	ndev->netdev_ops = &dhd_mon_if_ops;
+
+	ret = register_netdevice(ndev);
+	if (ret) {
+		MON_PRINT(" register_netdevice failed (%d)\n", ret);
+		goto out;
+	}
+
+	*new_ndev = ndev;
+	g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+	g_monitor.mon_if[idx].mon_ndev = ndev;
+	g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+	dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+	*dhd_mon = &g_monitor;
+	g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+	MON_PRINT("net device returned: 0x%p\n", ndev);
+	MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+	if (ret && ndev)
+		free_netdev(ndev);
+
+	mutex_unlock(&g_monitor.lock);
+	return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+	int i;
+	if (!ndev)
+		return -EINVAL;
+	mutex_lock(&g_monitor.lock);
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev ||
+			g_monitor.mon_if[i].real_ndev == ndev) {
+
+			g_monitor.mon_if[i].real_ndev = NULL;
+			unregister_netdevice(g_monitor.mon_if[i].mon_ndev);
+			free_netdev(g_monitor.mon_if[i].mon_ndev);
+			g_monitor.mon_if[i].mon_ndev = NULL;
+			g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+			break;
+		}
+	}
+
+	if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED)
+		MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev);
+	mutex_unlock(&g_monitor.lock);
+
+	return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+	if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+		g_monitor.dhd_pub = dhd_pub;
+		mutex_init(&g_monitor.lock);
+		g_monitor.monitor_state = MONITOR_STATE_INIT;
+	}
+	return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+	int i;
+	struct net_device *ndev;
+	mutex_lock(&g_monitor.lock);
+	if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			ndev = g_monitor.mon_if[i].mon_ndev;
+			if (ndev) {
+				unregister_netdevice(ndev);
+				free_netdev(ndev);
+				g_monitor.mon_if[i].real_ndev = NULL;
+				g_monitor.mon_if[i].mon_ndev = NULL;
+			}
+		}
+		g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+	}
+	mutex_unlock(&g_monitor.lock);
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_roam.c b/drivers/net/wireless/bcmdhd/wl_roam.c
new file mode 100644
index 0000000..3fc9e76
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_roam.c
@@ -0,0 +1,308 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ *
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ *
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_roam.c 477711 2014-05-14 08:45:17Z $
+ */
+
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmwifi_channels.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <wl_cfg80211.h>
+#include <wldev_common.h>
+
+#define MAX_ROAM_CACHE		100
+#define MAX_CHANNEL_LIST	20
+#define MAX_SSID_BUFSIZE	36
+
+#define ROAMSCAN_MODE_NORMAL	0
+#define ROAMSCAN_MODE_WES		1
+
+typedef struct {
+	chanspec_t chanspec;
+	int ssid_len;
+	char ssid[DOT11_MAX_SSID_LEN];
+} roam_channel_cache;
+
+typedef struct {
+	int n;
+	chanspec_t channels[MAX_CHANNEL_LIST];
+} channel_list_t;
+
+static int n_roam_cache = 0;
+static int roam_band = WLC_BAND_AUTO;
+static roam_channel_cache roam_cache[MAX_ROAM_CACHE];
+static uint band2G, band5G, band_bw;
+
+void init_roam(int ioctl_ver)
+{
+#ifdef D11AC_IOTYPES
+	if (ioctl_ver == 1) {
+		/* legacy chanspec */
+		band2G = WL_LCHANSPEC_BAND_2G;
+		band5G = WL_LCHANSPEC_BAND_5G;
+		band_bw = WL_LCHANSPEC_BW_20 | WL_LCHANSPEC_CTL_SB_NONE;
+	} else {
+		band2G = WL_CHANSPEC_BAND_2G;
+		band5G = WL_CHANSPEC_BAND_5G;
+		band_bw = WL_CHANSPEC_BW_20;
+	}
+#else
+	band2G = WL_CHANSPEC_BAND_2G;
+	band5G = WL_CHANSPEC_BAND_5G;
+	band_bw = WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+#endif /* D11AC_IOTYPES */
+
+	n_roam_cache = 0;
+	roam_band = WLC_BAND_AUTO;
+
+}
+
+
+void set_roam_band(int band)
+{
+	roam_band = band;
+}
+
+void reset_roam_cache(void)
+{
+	n_roam_cache = 0;
+}
+
+void add_roam_cache(wl_bss_info_t *bi)
+{
+	int i;
+	uint8 channel;
+	char chanbuf[CHANSPEC_STR_LEN];
+
+
+	if (n_roam_cache >= MAX_ROAM_CACHE)
+		return;
+
+	if (bi->SSID_len > DOT11_MAX_SSID_LEN)
+		return;
+
+	for (i = 0; i < n_roam_cache; i++) {
+		if ((roam_cache[i].ssid_len == bi->SSID_len) &&
+			(roam_cache[i].chanspec == bi->chanspec) &&
+			(memcmp(roam_cache[i].ssid, bi->SSID, bi->SSID_len) == 0)) {
+			/* identical one found, just return */
+			return;
+		}
+	}
+
+	roam_cache[n_roam_cache].ssid_len = bi->SSID_len;
+	channel = wf_chspec_ctlchan(bi->chanspec);
+	WL_DBG(("CHSPEC  = %s, CTL %d\n", wf_chspec_ntoa_ex(bi->chanspec, chanbuf), channel));
+	roam_cache[n_roam_cache].chanspec =
+		(channel <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw | channel;
+	memcpy(roam_cache[n_roam_cache].ssid, bi->SSID, bi->SSID_len);
+
+	n_roam_cache++;
+}
+
+static bool is_duplicated_channel(const chanspec_t *channels,
+							int n_channels, chanspec_t new)
+{
+	int i;
+
+	for (i = 0; i < n_channels; i++) {
+		if (channels[i] == new)
+			return TRUE;
+	}
+
+	return FALSE;
+}
+
+int get_roam_channel_list(int target_chan, chanspec_t *channels,
+						const wlc_ssid_t *ssid, int ioctl_ver)
+{
+	int i, n = 0;
+	char chanbuf[CHANSPEC_STR_LEN];
+	if (target_chan) {
+		/* first index is filled with the given target channel */
+		channels[n++] = (target_chan & WL_CHANSPEC_CHAN_MASK) |
+			(target_chan <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw;
+		WL_DBG((" %s: %03d 0x%04X\n", __FUNCTION__, target_chan, channels[0]));
+	}
+
+	for (i = 0; i < n_roam_cache; i++) {
+		chanspec_t ch = roam_cache[i].chanspec;
+		bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(ch) : CHSPEC_IS2G(ch);
+		bool is_5G = ioctl_ver == 1 ? LCHSPEC_IS5G(ch) : CHSPEC_IS5G(ch);
+		bool band_match = ((roam_band == WLC_BAND_AUTO) ||
+			((roam_band == WLC_BAND_2G) && is_2G) ||
+			((roam_band == WLC_BAND_5G) && is_5G));
+
+		/* XXX: JIRA:SW4349-173 : 80p80 Support Required */
+		ch = CHSPEC_CHANNEL(ch) | (is_2G ? band2G : band5G) | band_bw;
+		if ((roam_cache[i].ssid_len == ssid->SSID_len) &&
+			band_match && !is_duplicated_channel(channels, n, ch) &&
+			(memcmp(roam_cache[i].ssid, ssid->SSID, ssid->SSID_len) == 0)) {
+			/* match found, add it */
+			WL_DBG(("%s: channel = %s\n", __FUNCTION__,
+				wf_chspec_ntoa_ex(ch, chanbuf)));
+			channels[n++] = ch;
+		}
+	}
+
+	return n;
+}
+
+
+void print_roam_cache(void)
+{
+	int i;
+
+	WL_DBG((" %d cache\n", n_roam_cache));
+
+	for (i = 0; i < n_roam_cache; i++) {
+		roam_cache[i].ssid[roam_cache[i].ssid_len] = 0;
+		WL_DBG(("0x%02X %02d %s\n", roam_cache[i].chanspec,
+			roam_cache[i].ssid_len, roam_cache[i].ssid));
+	}
+}
+
+static void add_roamcache_channel(channel_list_t *channels, chanspec_t ch)
+{
+	int i;
+
+	if (channels->n >= MAX_CHANNEL_LIST) /* buffer full */
+		return;
+
+	for (i = 0; i < channels->n; i++) {
+		if (channels->channels[i] == ch) /* already in the list */
+			return;
+	}
+
+	channels->channels[i] = ch;
+	channels->n++;
+
+	WL_DBG((" RCC: %02d 0x%04X\n",
+		ch & WL_CHANSPEC_CHAN_MASK, ch));
+}
+
+void update_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver)
+{
+	int error, i, prev_channels;
+	channel_list_t channel_list;
+	char iobuf[WLC_IOCTL_SMLEN];
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	wlc_ssid_t ssid;
+
+	if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
+		WL_DBG(("Not associated\n"));
+		return;
+	}
+
+	/* need to read out the current cache list
+	   as the firmware may change dynamically
+	*/
+	error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+		(void *)&channel_list, sizeof(channel_list), NULL);
+
+	WL_DBG(("%d AP, %d cache item(s), err=%d\n", n_roam_cache, channel_list.n, error));
+
+	error = wldev_get_ssid(dev, &ssid);
+	if (error) {
+		WL_ERR(("Failed to get SSID, err=%d\n", error));
+		return;
+	}
+
+	prev_channels = channel_list.n;
+	for (i = 0; i < n_roam_cache; i++) {
+		chanspec_t ch = roam_cache[i].chanspec;
+		bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(ch) : CHSPEC_IS2G(ch);
+		bool is_5G = ioctl_ver == 1 ? LCHSPEC_IS5G(ch) : CHSPEC_IS5G(ch);
+		bool band_match = ((roam_band == WLC_BAND_AUTO) ||
+			((roam_band == WLC_BAND_2G) && is_2G) ||
+			((roam_band == WLC_BAND_5G) && is_5G));
+
+		if ((roam_cache[i].ssid_len == ssid.SSID_len) &&
+			band_match && (memcmp(roam_cache[i].ssid, ssid.SSID, ssid.SSID_len) == 0)) {
+			/* match found, add it */
+			/* XXX: JIRA:SW4349-173 : 80p80 Support Required */
+			ch = CHSPEC_CHANNEL(ch) | (is_2G ? band2G : band5G) | band_bw;
+			add_roamcache_channel(&channel_list, ch);
+		}
+	}
+	if (prev_channels != channel_list.n) {
+		/* channel list updated */
+		error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
+			sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
+		if (error) {
+			WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
+		}
+	}
+}
+
+void wl_update_roamscan_cache_by_band(struct net_device *dev, int band)
+{
+	int i, error, ioctl_ver, wes_mode;
+	channel_list_t chanlist_before, chanlist_after;
+	char iobuf[WLC_IOCTL_SMLEN];
+
+	roam_band = band;
+	if (band == WLC_BAND_AUTO)
+		return;
+
+	error = wldev_iovar_getint(dev, "roamscan_mode", &wes_mode);
+	if (error) {
+		WL_ERR(("Failed to get roamscan mode, error = %d\n", error));
+		return;
+	}
+	/* in case of WES mode, then skip the update */
+	if (wes_mode)
+		return;
+
+	error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+		(void *)&chanlist_before, sizeof(channel_list_t), NULL);
+	if (error) {
+		WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
+		return;
+	}
+	ioctl_ver = wl_cfg80211_get_ioctl_version();
+	chanlist_after.n = 0;
+	/* filtering by the given band */
+	for (i = 0; i < chanlist_before.n; i++) {
+		chanspec_t chspec = chanlist_before.channels[i];
+		bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(chspec) : CHSPEC_IS2G(chspec);
+		bool is_5G = ioctl_ver == 1 ? LCHSPEC_IS5G(chspec) : CHSPEC_IS5G(chspec);
+		bool band_match = ((band == WLC_BAND_2G) && is_2G) ||
+			((band == WLC_BAND_5G) && is_5G);
+		if (band_match) {
+			chanlist_after.channels[chanlist_after.n++] = chspec;
+		}
+	}
+
+	if (chanlist_before.n == chanlist_after.n)
+		return;
+
+	error = wldev_iovar_setbuf(dev, "roamscan_channels", &chanlist_after,
+		sizeof(channel_list_t), iobuf, sizeof(iobuf), NULL);
+	if (error) {
+		WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
+	}
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 0000000..11ffa5c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,385 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define	WLDEV_ERROR(args)						\
+	do {										\
+		printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__);	\
+		printk args;							\
+	} while (0)
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+	s32 ret = 0;
+	struct wl_ioctl ioc;
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+
+	return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+	s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, u32 buflen)
+{
+	s32 iolen = 0;
+
+	iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+	return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	if (iovar_len > 0)
+		ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	else
+		ret = BCME_BUFTOOSHORT;
+
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), NULL);
+}
+
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ *  taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ *  wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+
+	if (bssidx == 0) {
+		return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+			(s8 *) iovar_buf, buflen);
+	}
+
+	prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+	namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+	if (buflen < 0 || iolen > (u32)buflen)
+	{
+		WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+		return BCME_BUFTOOSHORT;
+	}
+
+	p = (s8 *)iovar_buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, iovar_name, namelen);
+	p += namelen;
+
+	/* bss config index as first param */
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(u32));
+	p += sizeof(u32);
+
+	/* parameter buffer follows */
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+
+	wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	if (iovar_len > 0)
+		ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	else {
+		ret = BCME_BUFTOOSHORT;
+	}
+
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+int wldev_get_link_speed(
+	struct net_device *dev, int *plink_speed)
+{
+	int error;
+
+	if (!plink_speed)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+	if (unlikely(error))
+		return error;
+
+	/* Convert internal 500Kbps to Kbps */
+	*plink_speed *= 500;
+	return error;
+}
+
+int wldev_get_rssi(
+	struct net_device *dev, int *prssi)
+{
+	scb_val_t scb_val;
+	int error;
+
+	if (!prssi)
+		return -ENOMEM;
+	bzero(&scb_val, sizeof(scb_val_t));
+
+	error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+	if (unlikely(error))
+		return error;
+
+	*prssi = dtoh32(scb_val.val);
+	return error;
+}
+
+int wldev_get_ssid(
+	struct net_device *dev, wlc_ssid_t *pssid)
+{
+	int error;
+
+	if (!pssid)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+	if (unlikely(error))
+		return error;
+	pssid->SSID_len = dtoh32(pssid->SSID_len);
+	return error;
+}
+
+int wldev_get_band(
+	struct net_device *dev, uint *pband)
+{
+	int error;
+
+	error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+	return error;
+}
+
+int wldev_set_band(
+	struct net_device *dev, uint band)
+{
+	int error = -1;
+
+	if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+		error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
+		if (!error)
+			dhd_bus_band_set(dev, band);
+	}
+	return error;
+}
+
+int wldev_set_country(
+	struct net_device *dev, char *country_code, bool notify, bool user_enforced)
+{
+	int error = -1;
+	wl_country_t cspec = {{0}, 0, {0}};
+	scb_val_t scbval;
+	char smbuf[WLC_IOCTL_SMLEN];
+
+	if (!country_code)
+		return error;
+
+	bzero(&scbval, sizeof(scb_val_t));
+	error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cspec, sizeof(cspec), NULL);
+	if (error < 0) {
+		WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+		return error;
+	}
+
+	if ((error < 0) || dhd_force_country_change(dev) ||
+	    (strncmp(country_code, cspec.country_abbrev, WLC_CNTRY_BUF_SZ) != 0)) {
+
+		if (user_enforced) {
+			bzero(&scbval, sizeof(scb_val_t));
+			error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
+			if (error < 0) {
+				WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
+					__FUNCTION__, error));
+				return error;
+			}
+		}
+
+		cspec.rev = -1;
+		memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+		memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+		dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+		error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+			smbuf, sizeof(smbuf), NULL);
+		if (error < 0) {
+			WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+				__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+			return error;
+		}
+		dhd_bus_country_set(dev, &cspec, notify);
+		WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
+			__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 0000000..7944ef6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,119 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.h 467328 2014-04-03 01:23:40Z $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ *  netdev_ops->ndo_do_ioctl in new kernels)
+ *  @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ *  indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern int dhd_net_set_fw_path(struct net_device *dev, char *fw);
+extern int dhd_net_bus_suspend(struct net_device *dev);
+extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage);
+extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on,
+	unsigned long delay_msec);
+extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
+extern bool dhd_force_country_change(struct net_device *dev);
+extern void dhd_bus_band_set(struct net_device *dev, uint band);
+extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,
+	bool user_enforced);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
+	int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 44fa0cd..0f6eb2b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -575,8 +575,6 @@
 
 static int brcmf_sdio_pd_probe(struct platform_device *pdev)
 {
-	int ret;
-
 	brcmf_dbg(SDIO, "Enter\n");
 
 	brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -584,11 +582,7 @@
 	if (brcmfmac_sdio_pdata->power_on)
 		brcmfmac_sdio_pdata->power_on();
 
-	ret = sdio_register_driver(&brcmf_sdmmc_driver);
-	if (ret)
-		brcmf_err("sdio_register_driver failed: %d\n", ret);
-
-	return ret;
+	return 0;
 }
 
 static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -610,6 +604,15 @@
 	}
 };
 
+void brcmf_sdio_register(void)
+{
+	int ret;
+
+	ret = sdio_register_driver(&brcmf_sdmmc_driver);
+	if (ret)
+		brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
 void brcmf_sdio_exit(void)
 {
 	brcmf_dbg(SDIO, "Enter\n");
@@ -620,18 +623,13 @@
 		sdio_unregister_driver(&brcmf_sdmmc_driver);
 }
 
-void brcmf_sdio_init(void)
+void __init brcmf_sdio_init(void)
 {
 	int ret;
 
 	brcmf_dbg(SDIO, "Enter\n");
 
 	ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
-	if (ret == -ENODEV) {
-		brcmf_dbg(SDIO, "No platform data available, registering without.\n");
-		ret = sdio_register_driver(&brcmf_sdmmc_driver);
-	}
-
-	if (ret)
-		brcmf_err("driver registration failed: %d\n", ret);
+	if (ret == -ENODEV)
+		brcmf_dbg(SDIO, "No platform data available.\n");
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 080395f..e715d33 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -154,10 +154,11 @@
 #ifdef CONFIG_BRCMFMAC_SDIO
 extern void brcmf_sdio_exit(void);
 extern void brcmf_sdio_init(void);
+extern void brcmf_sdio_register(void);
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
 extern void brcmf_usb_exit(void);
-extern void brcmf_usb_init(void);
+extern void brcmf_usb_register(void);
 #endif
 
 #endif				/* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2c59357..95d7099 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1034,21 +1034,23 @@
 	return bus->chip << 4 | bus->chiprev;
 }
 
-static void brcmf_driver_init(struct work_struct *work)
+static void brcmf_driver_register(struct work_struct *work)
 {
-	brcmf_debugfs_init();
-
 #ifdef CONFIG_BRCMFMAC_SDIO
-	brcmf_sdio_init();
+	brcmf_sdio_register();
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-	brcmf_usb_init();
+	brcmf_usb_register();
 #endif
 }
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
+static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
 
 static int __init brcmfmac_module_init(void)
 {
+	brcmf_debugfs_init();
+#ifdef CONFIG_BRCMFMAC_SDIO
+	brcmf_sdio_init();
+#endif
 	if (!schedule_work(&brcmf_driver_work))
 		return -EBUSY;
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 01aed7a..b0ab98b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1532,7 +1532,7 @@
 	brcmf_release_fw(&fw_image_list);
 }
 
-void brcmf_usb_init(void)
+void brcmf_usb_register(void)
 {
 	brcmf_dbg(USB, "Enter\n");
 	INIT_LIST_HEAD(&fw_image_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index 9035cc4..4d5a33d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -1879,9 +1879,6 @@
 #define	TST_TXTEST_RATE_11MBPS	3
 #define	TST_TXTEST_RATE_SHIFT	3
 
-#define SHM_BYT_CNT	0x2	/* IHR location */
-#define MAX_BYT_CNT	0x600	/* Maximum frame len */
-
 struct d11cnt {
 	u32 txfrag;
 	u32 txmulti;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 1860c57..4fb9635 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -1015,9 +1015,10 @@
 
 /*
  * post receive buffers
- *  return false is refill failed completely and ring is empty this will stall
- *  the rx dma and user might want to call rxfill again asap. This unlikely
- *  happens on memory-rich NIC, but often on memory-constrained dongle
+ *  Return false if refill failed completely or dma mapping failed. The ring
+ *  is empty, which will stall the rx dma and user might want to call rxfill
+ *  again asap. This is unlikely to happen on a memory-rich NIC, but often on
+ *  memory-constrained dongle.
  */
 bool dma_rxfill(struct dma_pub *pub)
 {
@@ -1078,6 +1079,8 @@
 
 		pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
 				    DMA_FROM_DEVICE);
+		if (dma_mapping_error(di->dmadev, pa))
+			return false;
 
 		/* save the free packet pointer */
 		di->rxp[rxout] = p;
@@ -1284,7 +1287,11 @@
 
 	/* get physical address of buffer start */
 	pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
-
+	/* if mapping failed, free skb */
+	if (dma_mapping_error(di->dmadev, pa)) {
+		brcmu_pkt_buf_free_skb(p);
+		return;
+	}
 	/* With a DMA segment list, Descriptor table is filled
 	 * using the segment list instead of looping over
 	 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac07473..e509030 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@
 
 	data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
 
-	memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+	memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
 	data->flags = 1; /* has quality information */
-	memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+	memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
 	       sizeof(struct iw_quality) * data->length);
 
 	kfree(addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 9a95045..a8eff95 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4442,13 +4442,13 @@
 		 * is killed. Hence update the killswitch state here. The
 		 * rfkill handler will care about restarting if needed.
 		 */
-		if (!test_bit(S_ALIVE, &il->status)) {
-			if (hw_rf_kill)
-				set_bit(S_RFKILL, &il->status);
-			else
-				clear_bit(S_RFKILL, &il->status);
-			wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+		if (hw_rf_kill) {
+			set_bit(S_RFKILL, &il->status);
+		} else {
+			clear_bit(S_RFKILL, &il->status);
+			il_force_reset(il, true);
 		}
+		wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
 
 		handled |= CSR_INT_BIT_RF_KILL;
 	}
@@ -5316,6 +5316,9 @@
 
 	il->active_rate = RATES_MASK;
 
+	il_power_update_mode(il, true);
+	D_INFO("Updated power mode\n");
+
 	if (il_is_associated(il)) {
 		struct il_rxon_cmd *active_rxon =
 		    (struct il_rxon_cmd *)&il->active;
@@ -5346,9 +5349,6 @@
 	D_INFO("ALIVE processing complete.\n");
 	wake_up(&il->wait_command_queue);
 
-	il_power_update_mode(il, true);
-	D_INFO("Updated power mode\n");
-
 	return;
 
 restart:
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e9a3cbc..9c9ebad 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(il_force_reset);
 
 int
 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cab23af..e04f3da 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1059,7 +1059,10 @@
 	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 		return;
 
-	if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+	if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+		return;
+
+	if (ctx->vif)
 		ieee80211_chswitch_done(ctx->vif, is_success);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 74d7572..a8afc7b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -758,7 +758,7 @@
 					 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
 		if (ret)
 			return ret;
-	} else {
+	} else if (priv->cfg->bt_params) {
 		/*
 		 * default is 2-wire BT coexexistence support
 		 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index b5ab8d1..5282088 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -268,6 +268,12 @@
 	.ht_params = &iwl6000_ht_params,
 };
 
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
+	.name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
+	IWL_DEVICE_6035,
+	.ht_params = &iwl6000_ht_params,
+};
+
 const struct iwl_cfg iwl1030_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
 	IWL_DEVICE_6030,
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 50263e8..822443c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,16 +67,16 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX	6
-#define IWL3160_UCODE_API_MAX	6
+#define IWL7260_UCODE_API_MAX	7
+#define IWL3160_UCODE_API_MAX	7
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK	6
-#define IWL3160_UCODE_API_OK	6
+#define IWL7260_UCODE_API_OK	7
+#define IWL3160_UCODE_API_OK	7
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN	6
-#define IWL3160_UCODE_API_MIN	6
+#define IWL7260_UCODE_API_MIN	7
+#define IWL3160_UCODE_API_MIN	7
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION		0x0a1d
@@ -125,7 +125,7 @@
 
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
-	.name = "Intel(R) Dual Band Wireless AC7260",
+	.name = "Intel(R) Dual Band Wireless AC 7260",
 	.fw_name_pre = IWL7260_FW_PRE,
 	IWL_DEVICE_7000,
 	.ht_params = &iwl7000_ht_params,
@@ -133,8 +133,44 @@
 	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
 };
 
-const struct iwl_cfg iwl3160_ac_cfg = {
-	.name = "Intel(R) Dual Band Wireless AC3160",
+const struct iwl_cfg iwl7260_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 7260",
+	.fw_name_pre = IWL7260_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7260_NVM_VERSION,
+	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl7260_n_cfg = {
+	.name = "Intel(R) Wireless N 7260",
+	.fw_name_pre = IWL7260_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7260_NVM_VERSION,
+	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 3160",
+	.fw_name_pre = IWL3160_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3160_NVM_VERSION,
+	.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 3160",
+	.fw_name_pre = IWL3160_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3160_NVM_VERSION,
+	.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_n_cfg = {
+	.name = "Intel(R) Wireless N 3160",
 	.fw_name_pre = IWL3160_FW_PRE,
 	IWL_DEVICE_7000,
 	.ht_params = &iwl7000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index c38aa8f..44e3370 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -316,10 +316,15 @@
 extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
 extern const struct iwl_cfg iwl2030_2bgn_cfg;
 extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
 extern const struct iwl_cfg iwl105_bgn_cfg;
 extern const struct iwl_cfg iwl105_bgn_d_cfg;
 extern const struct iwl_cfg iwl135_bgn_cfg;
 extern const struct iwl_cfg iwl7260_2ac_cfg;
-extern const struct iwl_cfg iwl3160_ac_cfg;
+extern const struct iwl_cfg iwl7260_2n_cfg;
+extern const struct iwl_cfg iwl7260_n_cfg;
+extern const struct iwl_cfg iwl3160_2ac_cfg;
+extern const struct iwl_cfg iwl3160_2n_cfg;
+extern const struct iwl_cfg iwl3160_n_cfg;
 
 #endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 51e015d..6f8b2c1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -75,13 +75,15 @@
  * struct iwl_d3_manager_config - D3 manager configuration command
  * @min_sleep_time: minimum sleep time (in usec)
  * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ * @wakeup_host_timer: force wakeup after this many seconds
  *
  * The structure is used for the D3_CONFIG_CMD command.
  */
 struct iwl_d3_manager_config {
 	__le32 min_sleep_time;
 	__le32 wakeup_flags;
-} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
+	__le32 wakeup_host_timer;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
 
 
 /* TODO: OFFLOADS_QUERY_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index d68640e..98b1feb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -71,7 +71,13 @@
 #define MAC_INDEX_MIN_DRIVER	0
 #define NUM_MAC_INDEX_DRIVER	MAC_INDEX_AUX
 
-#define AC_NUM	4 /* Number of access categories */
+enum iwl_ac {
+	AC_BK,
+	AC_BE,
+	AC_VI,
+	AC_VO,
+	AC_NUM,
+};
 
 /**
  * enum iwl_mac_protection_flags - MAC context flags
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index b60d141..365095a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -69,7 +69,6 @@
 /* Scan Commands, Responses, Notifications */
 
 /* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_PASSIVE	0
 #define SCAN_CHANNEL_TYPE_ACTIVE	BIT(0)
 #define SCAN_CHANNEL_NARROW_BAND	BIT(22)
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index b2cc3d9..d8e858c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -193,14 +193,11 @@
 u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
 				struct ieee80211_vif *vif)
 {
-	u32 qmask, ac;
+	u32 qmask = 0, ac;
 
 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
 		return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
 
-	qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
-		BIT(vif->cab_queue) : 0;
-
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 		if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
 			qmask |= BIT(vif->hw_queue[ac]);
@@ -362,7 +359,7 @@
 		break;
 	case NL80211_IFTYPE_AP:
 		iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
-					IWL_MVM_TX_FIFO_VO);
+					IWL_MVM_TX_FIFO_MCAST);
 		/* fall through */
 	default:
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -550,6 +547,10 @@
 		cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
 	}
 
+	/* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+	if (vif->type == NL80211_IFTYPE_AP)
+		cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
 	if (vif->bss_conf.qos)
 		cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index a5eb8c8..f7545e0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -243,7 +243,11 @@
 	if (ret)
 		return ret;
 
-	return ieee80211_register_hw(mvm->hw);
+	ret = ieee80211_register_hw(mvm->hw);
+	if (ret)
+		iwl_mvm_leds_exit(mvm);
+
+	return ret;
 }
 
 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -987,6 +991,21 @@
 	mutex_lock(&mvm->mutex);
 	if (old_state == IEEE80211_STA_NOTEXIST &&
 	    new_state == IEEE80211_STA_NONE) {
+		/*
+		 * Firmware bug - it'll crash if the beacon interval is less
+		 * than 16. We can't avoid connecting at all, so refuse the
+		 * station state change, this will cause mac80211 to abandon
+		 * attempts to connect to this AP, and eventually wpa_s will
+		 * blacklist the AP...
+		 */
+		if (vif->type == NL80211_IFTYPE_STATION &&
+		    vif->bss_conf.beacon_int < 16) {
+			IWL_ERR(mvm,
+				"AP %pM beacon interval is %d, refusing due to firmware bug!\n",
+				sta->addr, vif->bss_conf.beacon_int);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
 		ret = iwl_mvm_add_sta(mvm, vif, sta);
 	} else if (old_state == IEEE80211_STA_NONE &&
 		   new_state == IEEE80211_STA_AUTH) {
@@ -1015,6 +1034,7 @@
 	} else {
 		ret = -EIO;
 	}
+ out_unlock:
 	mutex_unlock(&mvm->mutex);
 
 	return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 9f46b23..8086231 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -88,6 +88,7 @@
 	IWL_MVM_TX_FIFO_BE,
 	IWL_MVM_TX_FIFO_VI,
 	IWL_MVM_TX_FIFO_VO,
+	IWL_MVM_TX_FIFO_MCAST = 5,
 };
 
 extern struct ieee80211_ops iwl_mvm_hw_ops;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2476e43..8e1f6c0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -137,8 +137,8 @@
 {
 	int fw_idx, req_idx;
 
-	fw_idx = 0;
-	for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+	for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+	     req_idx--, fw_idx++) {
 		cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
 		cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
 		memcpy(cmd->direct_scan[fw_idx].ssid,
@@ -176,19 +176,12 @@
 	struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
 		(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
 	int i;
-	__le32 chan_type_value;
-
-	if (req->n_ssids > 0)
-		chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
-	else
-		chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
 
 	for (i = 0; i < cmd->channel_count; i++) {
 		chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+		chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
 		if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-			chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
-		else
-			chan->type = chan_type_value;
+			chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
 		chan->active_dwell = cpu_to_le16(active_dwell);
 		chan->passive_dwell = cpu_to_le16(passive_dwell);
 		chan->iteration_count = cpu_to_le16(1);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 5c664ed..68f0bbe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -226,9 +226,6 @@
 		if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
 			mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 
-	if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
-		mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
-
 	/* for HW restart - need to reset the seq_number etc... */
 	memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
 
@@ -621,8 +618,12 @@
 	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
 	cmd.sta_id = mvm_sta->sta_id;
 	cmd.add_modify = STA_MODE_MODIFY;
-	cmd.add_immediate_ba_tid = (u8) tid;
-	cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+	if (start) {
+		cmd.add_immediate_ba_tid = (u8) tid;
+		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+	} else {
+		cmd.remove_immediate_ba_tid = (u8) tid;
+	}
 	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
 				  STA_MODIFY_REMOVE_BA_TID;
 
@@ -894,6 +895,7 @@
 	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 	u16 txq_id;
+	enum iwl_mvm_agg_state old_state;
 
 	/*
 	 * First set the agg state to OFF to avoid calling
@@ -903,13 +905,17 @@
 	txq_id = tid_data->txq_id;
 	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
 			    mvmsta->sta_id, tid, txq_id, tid_data->state);
+	old_state = tid_data->state;
 	tid_data->state = IWL_AGG_OFF;
 	spin_unlock_bh(&mvmsta->lock);
 
-	if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
-		IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+	if (old_state >= IWL_AGG_ON) {
+		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+			IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
 
-	iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+		iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+	}
+
 	mvm->queue_to_mac80211[tid_data->txq_id] =
 				IWL_INVALID_MAC80211_QUEUE;
 
@@ -1287,17 +1293,11 @@
 	struct iwl_mvm_add_sta_cmd cmd = {
 		.add_modify = STA_MODE_MODIFY,
 		.sta_id = mvmsta->sta_id,
-		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
-		.sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
+		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
 	};
 	int ret;
 
-	/*
-	 * Same modify mask for sleep_tx_count and sleep_state_flags but this
-	 * should be fine since if we set the STA as "awake", then
-	 * sleep_tx_count is not relevant.
-	 */
 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
 	if (ret)
 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 48c1891..a2e6112 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -175,7 +175,7 @@
 	 * table is controlled by LINK_QUALITY commands
 	 */
 
-	if (ieee80211_is_data(fc)) {
+	if (ieee80211_is_data(fc) && sta) {
 		tx_cmd->initial_rate_index = 0;
 		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
 		return;
@@ -610,8 +610,8 @@
 		    !(info->flags & IEEE80211_TX_STAT_ACK))
 			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
-		/* W/A FW bug: seq_ctl is wrong when the queue is flushed */
-		if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+		/* W/A FW bug: seq_ctl is wrong when the status isn't success */
+		if (status != TX_STATUS_SUCCESS) {
 			struct ieee80211_hdr *hdr = (void *)skb->data;
 			seq_ctl = le16_to_cpu(hdr->seq_ctrl);
 		}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 8cb53ec..b53e5c3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -129,6 +129,7 @@
 	{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
 	{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
 	{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
 
 	{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
 	{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
@@ -137,13 +138,16 @@
 
 /* 6x00 Series */
 	{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
 	{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
 	{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
 	{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
 	{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
 	{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
 	{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
 
@@ -151,12 +155,16 @@
 	{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
 	{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
 	{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -238,8 +246,11 @@
 
 /* 6x35 Series */
 	{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
 	{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
 	{IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
 
@@ -256,10 +267,83 @@
 
 /* 7000 Series */
 	{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
 	{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
-	{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+/* 3160 Series */
+	{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
 
 	{0}
 };
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 50ba0a4..aeb70e1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1481,16 +1481,16 @@
 	spin_lock_init(&trans_pcie->reg_lock);
 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
-	/* W/A - seems to solve weird behavior. We need to remove this if we
-	 * don't want to stay in L1 all the time. This wastes a lot of power */
-	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-			       PCIE_LINK_STATE_CLKPM);
-
 	if (pci_enable_device(pdev)) {
 		err = -ENODEV;
 		goto out_no_pci;
 	}
 
+	/* W/A - seems to solve weird behavior. We need to remove this if we
+	 * don't want to stay in L1 all the time. This wastes a lot of power */
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
 	pci_set_master(pdev);
 
 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c5e3029..48acfc6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -576,10 +576,16 @@
 
 	spin_lock_bh(&txq->lock);
 	while (q->write_ptr != q->read_ptr) {
+		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+				   txq_id, q->read_ptr);
 		iwl_pcie_txq_free_tfd(trans, txq);
 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
 	}
+	txq->active = false;
 	spin_unlock_bh(&txq->lock);
+
+	/* just in case - this queue may have been stopped */
+	iwl_wake_queue(trans, txq);
 }
 
 /*
@@ -927,6 +933,12 @@
 
 	spin_lock_bh(&txq->lock);
 
+	if (!txq->active) {
+		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+				    txq_id, ssn);
+		goto out;
+	}
+
 	if (txq->q.read_ptr == tfd_num)
 		goto out;
 
@@ -1103,6 +1115,7 @@
 		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
 		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
 		       SCD_QUEUE_STTS_REG_MSK);
+	trans_pcie->txq[txq_id].active = true;
 	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
 			    txq_id, fifo, ssn & 0xff);
 }
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index a78e065..d69d024 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -149,7 +149,7 @@
  */
 int
 mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
-			  struct mwifiex_ra_list_tbl *pra_list, int headroom,
+			  struct mwifiex_ra_list_tbl *pra_list,
 			  int ptrindex, unsigned long ra_list_flags)
 			  __releases(&priv->wmm.ra_list_spinlock)
 {
@@ -159,6 +159,7 @@
 	int pad = 0, ret;
 	struct mwifiex_tx_param tx_param;
 	struct txpd *ptx_pd = NULL;
+	int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
 	skb_src = skb_peek(&pra_list->skb_head);
 	if (!skb_src) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 900e1c6..892098d 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -26,7 +26,7 @@
 int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
 				struct sk_buff *skb);
 int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
-			      struct mwifiex_ra_list_tbl *ptr, int headroom,
+			      struct mwifiex_ra_list_tbl *ptr,
 			      int ptr_index, unsigned long flags)
 			      __releases(&priv->wmm.ra_list_spinlock);
 
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e42b266..e7f7cdf 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1668,9 +1668,9 @@
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	int ret;
 
-	if (priv->bss_mode != NL80211_IFTYPE_STATION) {
+	if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
 		wiphy_err(wiphy,
-			  "%s: reject infra assoc request in non-STA mode\n",
+			  "%s: reject infra assoc request in non-STA role\n",
 			  dev->name);
 		return -EINVAL;
 	}
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 988552d..5178c46 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -415,7 +415,8 @@
 	u32 k = 0;
 	struct mwifiex_adapter *adapter = priv->adapter;
 
-	if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+	if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+	    priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
 		switch (adapter->config_bands) {
 		case BAND_B:
 			dev_dbg(adapter->dev, "info: infra band=%d "
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 26755d9..7a97f6c 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1154,7 +1154,7 @@
 	uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
 
 	if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
-	    adapter->iface_type == MWIFIEX_SDIO) {
+	    adapter->iface_type != MWIFIEX_USB) {
 		mwifiex_hs_activated_event(priv, true);
 		return 0;
 	} else {
@@ -1166,8 +1166,7 @@
 	}
 	if (conditions != HS_CFG_CANCEL) {
 		adapter->is_hs_configured = true;
-		if (adapter->iface_type == MWIFIEX_USB ||
-		    adapter->iface_type == MWIFIEX_PCIE)
+		if (adapter->iface_type == MWIFIEX_USB)
 			mwifiex_hs_activated_event(priv, true);
 	} else {
 		adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 6bcb66e..96bda6c 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1290,8 +1290,10 @@
 {
 	u8 current_bssid[ETH_ALEN];
 
-	/* Return error if the adapter or table entry is not marked as infra */
-	if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
+	/* Return error if the adapter is not STA role or table entry
+	 * is not marked as infra.
+	 */
+	if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
 	    (bss_desc->bss_mode != NL80211_IFTYPE_STATION))
 		return -1;
 
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 2eb88ea..fc3fe8d 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -270,10 +270,12 @@
 		}
 	} while (true);
 
-	if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
-		goto process_start;
-
 	spin_lock_irqsave(&adapter->main_proc_lock, flags);
+	if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
+		spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+		goto process_start;
+	}
+
 	adapter->mwifiex_processing = false;
 	spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
@@ -363,20 +365,6 @@
 		dev_err(adapter->dev, "cannot create default STA interface\n");
 		goto err_add_intf;
 	}
-
-	/* Create AP interface by default */
-	if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
-				      NL80211_IFTYPE_AP, NULL, NULL)) {
-		dev_err(adapter->dev, "cannot create default AP interface\n");
-		goto err_add_intf;
-	}
-
-	/* Create P2P interface by default */
-	if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
-				      NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
-		dev_err(adapter->dev, "cannot create default P2P interface\n");
-		goto err_add_intf;
-	}
 	rtnl_unlock();
 
 	mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -573,9 +561,8 @@
 		mcast_list.mode = MWIFIEX_ALL_MULTI_MODE;
 	} else {
 		mcast_list.mode = MWIFIEX_MULTICAST_MODE;
-		if (netdev_mc_count(dev))
-			mcast_list.num_multicast_addr =
-				mwifiex_copy_mcast_addr(&mcast_list, dev);
+		mcast_list.num_multicast_addr =
+			mwifiex_copy_mcast_addr(&mcast_list, dev);
 	}
 	mwifiex_request_set_multicast_list(priv, &mcast_list);
 }
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 363ba31..139c958 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1441,8 +1441,8 @@
 	/* Allocate buffer and copy payload */
 	blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
 	buf_block_len = (pkt_len + blk_size - 1) / blk_size;
-	*(u16 *) &payload[0] = (u16) pkt_len;
-	*(u16 *) &payload[2] = type;
+	*(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
+	*(__le16 *)&payload[2] = cpu_to_le16(type);
 
 	/*
 	 * This is SDIO specific header
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1a8a19d..23aa910 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -104,16 +104,14 @@
 		} else {
 			priv->curr_pkt_filter &=
 				~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
-			if (mcast_list->num_multicast_addr) {
-				dev_dbg(priv->adapter->dev,
-					"info: Set multicast list=%d\n",
-				       mcast_list->num_multicast_addr);
-				/* Send multicast addresses to firmware */
-				ret = mwifiex_send_cmd_async(priv,
-					HostCmd_CMD_MAC_MULTICAST_ADR,
-					HostCmd_ACT_GEN_SET, 0,
-					mcast_list);
-			}
+			dev_dbg(priv->adapter->dev,
+				"info: Set multicast list=%d\n",
+				mcast_list->num_multicast_addr);
+			/* Send multicast addresses to firmware */
+			ret = mwifiex_send_cmd_async(priv,
+				HostCmd_CMD_MAC_MULTICAST_ADR,
+				HostCmd_ACT_GEN_SET, 0,
+				mcast_list);
 		}
 	}
 	dev_dbg(priv->adapter->dev,
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index f90fe21..b7adf3d 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -446,9 +446,6 @@
 	 */
 	adapter->is_suspended = true;
 
-	for (i = 0; i < adapter->priv_num; i++)
-		netif_carrier_off(adapter->priv[i]->netdev);
-
 	if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
 		usb_kill_urb(card->rx_cmd.urb);
 
@@ -508,10 +505,6 @@
 						  MWIFIEX_RX_CMD_BUF_SIZE);
 	}
 
-	for (i = 0; i < adapter->priv_num; i++)
-		if (adapter->priv[i]->media_connected)
-			netif_carrier_on(adapter->priv[i]->netdev);
-
 	/* Disable Host Sleep */
 	if (adapter->hs_activated)
 		mwifiex_cancel_hs(mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 4be3d33..ae31e8d 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1236,8 +1236,7 @@
 		if (mwifiex_is_amsdu_allowed(priv, tid) &&
 		    mwifiex_is_11n_aggragation_possible(priv, ptr,
 							adapter->tx_buf_size))
-			mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
-						  ptr_index, flags);
+			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
 			/* ra_list_spinlock has been freed in
 			   mwifiex_11n_aggregate_pkt() */
 		else
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b9deef6..f42dc3c 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -83,6 +83,7 @@
 	{USB_DEVICE(0x06a9, 0x000e)},	/* Westell 802.11g USB (A90-211WG-01) */
 	{USB_DEVICE(0x06b9, 0x0121)},	/* Thomson SpeedTouch 121g */
 	{USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+	{USB_DEVICE(0x07aa, 0x0020)},	/* Corega WLUSB2GTST USB */
 	{USB_DEVICE(0x0803, 0x4310)},	/* Zoom 4410a */
 	{USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
 	{USB_DEVICE(0x083a, 0x4531)},	/* T-Com Sinus 154 data II */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 72f32e5..f281971 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2392,7 +2392,7 @@
 	rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
 
 	rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
-	if (info->default_power1 > power_bound)
+	if (info->default_power2 > power_bound)
 		rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
 	else
 		rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
@@ -2767,6 +2767,13 @@
 	int i;
 
 	/*
+	 * First check if temperature compensation is supported.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+	if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
+		return 0;
+
+	/*
 	 * Read TSSI boundaries for temperature compensation from
 	 * the EEPROM.
 	 *
@@ -4041,10 +4048,6 @@
 	u8 reg_id;
 	u8 value;
 
-	if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
-		     rt2800_wait_bbp_ready(rt2x00dev)))
-		return -EACCES;
-
 	if (rt2x00_rt(rt2x00dev, RT5592)) {
 		rt2800_init_bbp_5592(rt2x00dev);
 		return 0;
@@ -5185,20 +5188,23 @@
 		     rt2800_init_registers(rt2x00dev)))
 		return -EIO;
 
+	if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
+		return -EIO;
+
 	/*
 	 * Send signal to firmware during boot time.
 	 */
 	rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
 	rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
-	if (rt2x00_is_usb(rt2x00dev)) {
+	if (rt2x00_is_usb(rt2x00dev))
 		rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
-		rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
-	}
+	rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
 	msleep(1);
 
-	if (unlikely(rt2800_init_bbp(rt2x00dev)))
+	if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
 		return -EIO;
 
+	rt2800_init_bbp(rt2x00dev);
 	rt2800_init_rfcsr(rt2x00dev);
 
 	if (rt2x00_is_usb(rt2x00dev) &&
@@ -5912,7 +5918,8 @@
 	    IEEE80211_HW_SUPPORTS_PS |
 	    IEEE80211_HW_PS_NULLFUNC_STACK |
 	    IEEE80211_HW_AMPDU_AGGREGATION |
-	    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+	    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+	    IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
 	/*
 	 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
@@ -6056,8 +6063,8 @@
 		default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
 
 		for (i = 14; i < spec->num_channels; i++) {
-			info[i].default_power1 = default_power1[i];
-			info[i].default_power2 = default_power2[i];
+			info[i].default_power1 = default_power1[i - 14];
+			info[i].default_power2 = default_power2[i - 14];
 		}
 	}
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 2c12311..d955741 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,13 +936,8 @@
 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
 }
 
-void rt2x00queue_pause_queue(struct data_queue *queue)
+void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
 {
-	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
-	    !test_bit(QUEUE_STARTED, &queue->flags) ||
-	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
-		return;
-
 	switch (queue->qid) {
 	case QID_AC_VO:
 	case QID_AC_VI:
@@ -958,6 +953,15 @@
 		break;
 	}
 }
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+	    !test_bit(QUEUE_STARTED, &queue->flags) ||
+	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+		return;
+
+	rt2x00queue_pause_queue_nocheck(queue);
+}
 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
 
 void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -1019,7 +1023,7 @@
 		return;
 	}
 
-	rt2x00queue_pause_queue(queue);
+	rt2x00queue_pause_queue_nocheck(queue);
 
 	queue->rt2x00dev->ops->lib->stop_queue(queue);
 
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0dc8180..883a54c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2825,7 +2825,8 @@
 		tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
 		for (i = 14; i < spec->num_channels; i++) {
 			info[i].max_power = MAX_TXPOWER;
-			info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+			info[i].default_power1 =
+					TXPOWER_FROM_DEV(tx_power[i - 14]);
 		}
 	}
 
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 377e09b..2bbca18 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2167,7 +2167,8 @@
 		tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
 		for (i = 14; i < spec->num_channels; i++) {
 			info[i].max_power = MAX_TXPOWER;
-			info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+			info[i].default_power1 =
+					TXPOWER_FROM_DEV(tx_power[i - 14]);
 		}
 	}
 
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d3..e70b4ff 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1008,19 +1008,6 @@
 	return;
 }
 
-static void rtl_lps_change_work_callback(struct work_struct *work)
-{
-	struct rtl_works *rtlworks =
-	    container_of(work, struct rtl_works, lps_change_work);
-	struct ieee80211_hw *hw = rtlworks->hw;
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	if (rtlpriv->enter_ps)
-		rtl_lps_enter(hw);
-	else
-		rtl_lps_leave(hw);
-}
-
 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
 {
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bcea..71e917d 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -611,6 +611,18 @@
 			MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
 }
 
+void rtl_lps_change_work_callback(struct work_struct *work)
+{
+	struct rtl_works *rtlworks =
+	    container_of(work, struct rtl_works, lps_change_work);
+	struct ieee80211_hw *hw = rtlworks->hw;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+	if (rtlpriv->enter_ps)
+		rtl_lps_enter(hw);
+	else
+		rtl_lps_leave(hw);
+}
 
 void rtl_swlps_wq_callback(void *data)
 {
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b7..88bd76e 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@
 void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
 void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
 void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_lps_change_work_callback(struct work_struct *work);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 953f1a0..2119313 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -104,7 +104,7 @@
 			tx_agc[RF90_PATH_A] = 0x10101010;
 			tx_agc[RF90_PATH_B] = 0x10101010;
 		} else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
-			   TXHIGHPWRLEVEL_LEVEL1) {
+			   TXHIGHPWRLEVEL_LEVEL2) {
 			tx_agc[RF90_PATH_A] = 0x00000000;
 			tx_agc[RF90_PATH_B] = 0x00000000;
 		} else{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 826f085..2bd5985 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -359,6 +359,7 @@
 	{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
 	{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
 	{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+	{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
 	{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
 	{}
 };
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 763cf1d..5a060e5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -343,7 +343,8 @@
 					(bool)GET_RX_DESC_PAGGR(pdesc));
 	rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
 	if (phystatus) {
-		p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+		p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+						     stats->rx_bufshift);
 		rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
 						 p_drvinfo);
 	}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index e4c4cdc..d9ee2ef 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -251,7 +251,7 @@
 	.bar_id = 2,
 	.write_readback = true,
 	.name = "rtl8723ae_pci",
-	.fw_name = "rtlwifi/rtl8723aefw.bin",
+	.fw_name = "rtlwifi/rtl8723fw.bin",
 	.ops = &rtl8723ae_hal_ops,
 	.mod_params = &rtl8723ae_mod_params,
 	.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
@@ -353,8 +353,8 @@
 MODULE_AUTHOR("Larry Finger	<Larry.Finger@lwfinger.net>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
-MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin");
-MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin");
 
 module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
 module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e0..1feebdc 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -1070,6 +1070,8 @@
 	spin_lock_init(&rtlpriv->locks.usb_lock);
 	INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
 		  rtl_fill_h2c_cmd_work_callback);
+	INIT_WORK(&rtlpriv->works.lps_change_work,
+		  rtl_lps_change_work_callback);
 
 	rtlpriv->usb_data_index = 0;
 	init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c..7032587 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2057,7 +2057,7 @@
 	   that it points to the data allocated
 	   beyond  this structure like:
 	   rtl_pci_priv or rtl_usb_priv */
-	u8 priv[0];
+	u8 priv[0] __aligned(sizeof(void *));
 };
 
 #define rtl_priv(hw)		(((struct rtl_priv *)(hw)->priv))
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f20..b8ba1f9 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@
 		goto exit;
 
 	err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
-	    USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+	    USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
 	if (err < 0)
 		goto exit;
 
+	memcpy(&ret, buf, sizeof(ret));
+
 	if (ret & 0x80) {
 		err = -EIO;
 		goto exit;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 9d7f172..f2faa77 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -88,6 +88,7 @@
 	unsigned long   credit_usec;
 	unsigned long   remaining_credit;
 	struct timer_list credit_timeout;
+	u64 credit_window_start;
 
 	/* Statistics */
 	unsigned long rx_gso_checksum_fixup;
@@ -115,6 +116,7 @@
 int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
 		   unsigned long rx_ring_ref, unsigned int evtchn);
 void xenvif_disconnect(struct xenvif *vif);
+void xenvif_free(struct xenvif *vif);
 
 void xenvif_get(struct xenvif *vif);
 void xenvif_put(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index d984141..c4a2eb2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -275,8 +275,7 @@
 	vif->credit_bytes = vif->remaining_credit = ~0UL;
 	vif->credit_usec  = 0UL;
 	init_timer(&vif->credit_timeout);
-	/* Initialize 'expires' now: it's used to track the credit window. */
-	vif->credit_timeout.expires = jiffies;
+	vif->credit_window_start = get_jiffies_64();
 
 	dev->netdev_ops	= &xenvif_netdev_ops;
 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
@@ -304,6 +303,9 @@
 	}
 
 	netdev_dbg(dev, "Successfully created xenvif\n");
+
+	__module_get(THIS_MODULE);
+
 	return vif;
 }
 
@@ -369,9 +371,14 @@
 	if (vif->irq)
 		unbind_from_irqhandler(vif->irq, vif);
 
+	xen_netbk_unmap_frontend_rings(vif);
+}
+
+void xenvif_free(struct xenvif *vif)
+{
 	unregister_netdev(vif->dev);
 
-	xen_netbk_unmap_frontend_rings(vif);
-
 	free_netdev(vif->dev);
+
+	module_put(THIS_MODULE);
 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 8c20935..36efb41 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -354,6 +354,49 @@
 	return false;
 }
 
+struct xenvif_count_slot_state {
+	unsigned long copy_off;
+	bool head;
+};
+
+unsigned int xenvif_count_frag_slots(struct xenvif *vif,
+				     unsigned long offset, unsigned long size,
+				     struct xenvif_count_slot_state *state)
+{
+	unsigned count = 0;
+
+	offset &= ~PAGE_MASK;
+
+	while (size > 0) {
+		unsigned long bytes;
+
+		bytes = PAGE_SIZE - offset;
+
+		if (bytes > size)
+			bytes = size;
+
+		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
+			count++;
+			state->copy_off = 0;
+		}
+
+		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
+			bytes = MAX_BUFFER_OFFSET - state->copy_off;
+
+		state->copy_off += bytes;
+
+		offset += bytes;
+		size -= bytes;
+
+		if (offset == PAGE_SIZE)
+			offset = 0;
+
+		state->head = false;
+	}
+
+	return count;
+}
+
 /*
  * Figure out how many ring slots we're going to need to send @skb to
  * the guest. This function is essentially a dry run of
@@ -361,48 +404,39 @@
  */
 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
 {
+	struct xenvif_count_slot_state state;
 	unsigned int count;
-	int i, copy_off;
+	unsigned char *data;
+	unsigned i;
 
-	count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
+	state.head = true;
+	state.copy_off = 0;
 
-	copy_off = skb_headlen(skb) % PAGE_SIZE;
+	/* Slot for the first (partial) page of data. */
+	count = 1;
 
+	/* Need a slot for the GSO prefix for GSO extra data? */
 	if (skb_shinfo(skb)->gso_size)
 		count++;
 
+	data = skb->data;
+	while (data < skb_tail_pointer(skb)) {
+		unsigned long offset = offset_in_page(data);
+		unsigned long size = PAGE_SIZE - offset;
+
+		if (data + size > skb_tail_pointer(skb))
+			size = skb_tail_pointer(skb) - data;
+
+		count += xenvif_count_frag_slots(vif, offset, size, &state);
+
+		data += size;
+	}
+
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-		unsigned long bytes;
 
-		offset &= ~PAGE_MASK;
-
-		while (size > 0) {
-			BUG_ON(offset >= PAGE_SIZE);
-			BUG_ON(copy_off > MAX_BUFFER_OFFSET);
-
-			bytes = PAGE_SIZE - offset;
-
-			if (bytes > size)
-				bytes = size;
-
-			if (start_new_rx_buffer(copy_off, bytes, 0)) {
-				count++;
-				copy_off = 0;
-			}
-
-			if (copy_off + bytes > MAX_BUFFER_OFFSET)
-				bytes = MAX_BUFFER_OFFSET - copy_off;
-
-			copy_off += bytes;
-
-			offset += bytes;
-			size -= bytes;
-
-			if (offset == PAGE_SIZE)
-				offset = 0;
-		}
+		count += xenvif_count_frag_slots(vif, offset, size, &state);
 	}
 	return count;
 }
@@ -1389,9 +1423,8 @@
 
 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
 {
-	unsigned long now = jiffies;
-	unsigned long next_credit =
-		vif->credit_timeout.expires +
+	u64 now = get_jiffies_64();
+	u64 next_credit = vif->credit_window_start +
 		msecs_to_jiffies(vif->credit_usec / 1000);
 
 	/* Timer could already be pending in rare cases. */
@@ -1399,8 +1432,8 @@
 		return true;
 
 	/* Passed the point where we can replenish credit? */
-	if (time_after_eq(now, next_credit)) {
-		vif->credit_timeout.expires = now;
+	if (time_after_eq64(now, next_credit)) {
+		vif->credit_window_start = now;
 		tx_add_credit(vif);
 	}
 
@@ -1412,6 +1445,7 @@
 			tx_credit_callback;
 		mod_timer(&vif->credit_timeout,
 			  next_credit);
+		vif->credit_window_start = next_credit;
 
 		return true;
 	}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 410018c..8a9e875 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -24,6 +24,12 @@
 struct backend_info {
 	struct xenbus_device *dev;
 	struct xenvif *vif;
+
+	/* This is the state that will be reflected in xenstore when any
+	 * active hotplug script completes.
+	 */
+	enum xenbus_state state;
+
 	enum xenbus_state frontend_state;
 	struct xenbus_watch hotplug_status_watch;
 	u8 have_hotplug_status_watch:1;
@@ -33,16 +39,20 @@
 static void connect(struct backend_info *);
 static void backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+			      enum xenbus_state state);
 
 static int netback_remove(struct xenbus_device *dev)
 {
 	struct backend_info *be = dev_get_drvdata(&dev->dev);
 
+	set_backend_state(be, XenbusStateClosed);
+
 	unregister_hotplug_status_watch(be);
 	if (be->vif) {
 		kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
 		xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
-		xenvif_disconnect(be->vif);
+		xenvif_free(be->vif);
 		be->vif = NULL;
 	}
 	kfree(be);
@@ -126,6 +136,8 @@
 	if (err)
 		goto fail;
 
+	be->state = XenbusStateInitWait;
+
 	/* This kicks hotplug scripts, so do it immediately. */
 	backend_create_xenvif(be);
 
@@ -198,15 +210,113 @@
 	kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
 }
 
-
-static void disconnect_backend(struct xenbus_device *dev)
+static void backend_disconnect(struct backend_info *be)
 {
-	struct backend_info *be = dev_get_drvdata(&dev->dev);
-
-	if (be->vif) {
-		xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+	if (be->vif)
 		xenvif_disconnect(be->vif);
-		be->vif = NULL;
+}
+
+static void backend_connect(struct backend_info *be)
+{
+	if (be->vif)
+		connect(be);
+}
+
+static inline void backend_switch_state(struct backend_info *be,
+					enum xenbus_state state)
+{
+	struct xenbus_device *dev = be->dev;
+
+	pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
+	be->state = state;
+
+	/* If we are waiting for a hotplug script then defer the
+	 * actual xenbus state change.
+	 */
+	if (!be->have_hotplug_status_watch)
+		xenbus_switch_state(dev, state);
+}
+
+/* Handle backend state transitions:
+ *
+ * The backend state starts in InitWait and the following transitions are
+ * allowed.
+ *
+ * InitWait -> Connected
+ *
+ *    ^    \         |
+ *    |     \        |
+ *    |      \       |
+ *    |       \      |
+ *    |        \     |
+ *    |         \    |
+ *    |          V   V
+ *
+ *  Closed  <-> Closing
+ *
+ * The state argument specifies the eventual state of the backend and the
+ * function transitions to that state via the shortest path.
+ */
+static void set_backend_state(struct backend_info *be,
+			      enum xenbus_state state)
+{
+	while (be->state != state) {
+		switch (be->state) {
+		case XenbusStateClosed:
+			switch (state) {
+			case XenbusStateInitWait:
+			case XenbusStateConnected:
+				pr_info("%s: prepare for reconnect\n",
+					be->dev->nodename);
+				backend_switch_state(be, XenbusStateInitWait);
+				break;
+			case XenbusStateClosing:
+				backend_switch_state(be, XenbusStateClosing);
+				break;
+			default:
+				BUG();
+			}
+			break;
+		case XenbusStateInitWait:
+			switch (state) {
+			case XenbusStateConnected:
+				backend_connect(be);
+				backend_switch_state(be, XenbusStateConnected);
+				break;
+			case XenbusStateClosing:
+			case XenbusStateClosed:
+				backend_switch_state(be, XenbusStateClosing);
+				break;
+			default:
+				BUG();
+			}
+			break;
+		case XenbusStateConnected:
+			switch (state) {
+			case XenbusStateInitWait:
+			case XenbusStateClosing:
+			case XenbusStateClosed:
+				backend_disconnect(be);
+				backend_switch_state(be, XenbusStateClosing);
+				break;
+			default:
+				BUG();
+			}
+			break;
+		case XenbusStateClosing:
+			switch (state) {
+			case XenbusStateInitWait:
+			case XenbusStateConnected:
+			case XenbusStateClosed:
+				backend_switch_state(be, XenbusStateClosed);
+				break;
+			default:
+				BUG();
+			}
+			break;
+		default:
+			BUG();
+		}
 	}
 }
 
@@ -218,43 +328,33 @@
 {
 	struct backend_info *be = dev_get_drvdata(&dev->dev);
 
-	pr_debug("frontend state %s", xenbus_strstate(frontend_state));
+	pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
 
 	be->frontend_state = frontend_state;
 
 	switch (frontend_state) {
 	case XenbusStateInitialising:
-		if (dev->state == XenbusStateClosed) {
-			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
-			       __func__, dev->nodename);
-			xenbus_switch_state(dev, XenbusStateInitWait);
-		}
+		set_backend_state(be, XenbusStateInitWait);
 		break;
 
 	case XenbusStateInitialised:
 		break;
 
 	case XenbusStateConnected:
-		if (dev->state == XenbusStateConnected)
-			break;
-		backend_create_xenvif(be);
-		if (be->vif)
-			connect(be);
+		set_backend_state(be, XenbusStateConnected);
 		break;
 
 	case XenbusStateClosing:
-		if (be->vif)
-			kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-		disconnect_backend(dev);
-		xenbus_switch_state(dev, XenbusStateClosing);
+		set_backend_state(be, XenbusStateClosing);
 		break;
 
 	case XenbusStateClosed:
-		xenbus_switch_state(dev, XenbusStateClosed);
+		set_backend_state(be, XenbusStateClosed);
 		if (xenbus_dev_is_online(dev))
 			break;
 		/* fall through if not online */
 	case XenbusStateUnknown:
+		set_backend_state(be, XenbusStateClosed);
 		device_unregister(&dev->dev);
 		break;
 
@@ -347,7 +447,9 @@
 	if (IS_ERR(str))
 		return;
 	if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
-		xenbus_switch_state(be->dev, XenbusStateConnected);
+		/* Complete any pending state change */
+		xenbus_switch_state(be->dev, be->state);
+
 		/* Not interested in this watch anymore. */
 		unregister_hotplug_status_watch(be);
 	}
@@ -377,12 +479,8 @@
 	err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
 				   hotplug_status_changed,
 				   "%s/%s", dev->nodename, "hotplug-status");
-	if (err) {
-		/* Switch now, since we can't do a watch. */
-		xenbus_switch_state(dev, XenbusStateConnected);
-	} else {
+	if (!err)
 		be->have_hotplug_status_watch = 1;
-	}
 
 	netif_wake_queue(be->vif->dev);
 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1db10141..0c01b8e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -276,8 +276,7 @@
 			break;
 		}
 
-		__skb_fill_page_desc(skb, 0, page, 0, 0);
-		skb_shinfo(skb)->nr_frags = 1;
+		skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 		__skb_queue_tail(&np->rx_batch, skb);
 	}
 
@@ -822,7 +821,6 @@
 				  struct sk_buff_head *list)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
-	int nr_frags = shinfo->nr_frags;
 	RING_IDX cons = np->rx.rsp_cons;
 	struct sk_buff *nskb;
 
@@ -831,19 +829,21 @@
 			RING_GET_RESPONSE(&np->rx, ++cons);
 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 
-		__skb_fill_page_desc(skb, nr_frags,
-				     skb_frag_page(nfrag),
-				     rx->offset, rx->status);
+		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-		skb->data_len += rx->status;
+			BUG_ON(pull_to <= skb_headlen(skb));
+			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+		}
+		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+
+		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+				rx->offset, rx->status, PAGE_SIZE);
 
 		skb_shinfo(nskb)->nr_frags = 0;
 		kfree_skb(nskb);
-
-		nr_frags++;
 	}
 
-	shinfo->nr_frags = nr_frags;
 	return cons;
 }
 
@@ -929,7 +929,8 @@
 	while ((skb = __skb_dequeue(rxq)) != NULL) {
 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-		__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+		if (pull_to > skb_headlen(skb))
+			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 
 		/* Ethernet work: Delayed to here as it peeks the header. */
 		skb->protocol = eth_type_trans(skb, dev);
@@ -1015,16 +1016,10 @@
 		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
 		skb->data_len = rx->status;
+		skb->len += rx->status;
 
 		i = xennet_fill_frags(np, skb, &tmpq);
 
-		/*
-                 * Truesize is the actual allocation size, even if the
-                 * allocation is only partially used.
-                 */
-		skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
-		skb->len += skb->data_len;
-
 		if (rx->flags & XEN_NETRXF_csum_blank)
 			skb->ip_summed = CHECKSUM_PARTIAL;
 		else if (rx->flags & XEN_NETRXF_data_validated)
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index 2dacd19..b9bf8b5 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -78,6 +78,8 @@
 	BWD_HW,
 };
 
+static struct dentry *debugfs_dir;
+
 /* Translate memory window 0,1 to BAR 2,4 */
 #define MW_TO_BAR(mw)	(mw * 2 + 2)
 
@@ -531,9 +533,9 @@
 	}
 
 	if (val & SNB_PPD_DEV_TYPE)
-		ndev->dev_type = NTB_DEV_DSD;
-	else
 		ndev->dev_type = NTB_DEV_USD;
+	else
+		ndev->dev_type = NTB_DEV_DSD;
 
 	ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
 	ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
@@ -547,7 +549,7 @@
 	if (ndev->conn_type == NTB_CONN_B2B) {
 		ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
 		ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
-		ndev->limits.max_spads = SNB_MAX_SPADS;
+		ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
 	} else {
 		ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
 		ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
@@ -644,10 +646,16 @@
 		rc = -ENODEV;
 	}
 
+	if (rc)
+		return rc;
+
+	dev_info(&ndev->pdev->dev, "Device Type = %s\n",
+		 ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
+
 	/* Enable Bus Master and Memory Space on the secondary side */
 	writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
 
-	return rc;
+	return 0;
 }
 
 static void ntb_device_free(struct ntb_device *ndev)
@@ -992,6 +1000,28 @@
 	kfree(ndev->db_cb);
 }
 
+static void ntb_setup_debugfs(struct ntb_device *ndev)
+{
+	if (!debugfs_initialized())
+		return;
+
+	if (!debugfs_dir)
+		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+	ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
+					       debugfs_dir);
+}
+
+static void ntb_free_debugfs(struct ntb_device *ndev)
+{
+	debugfs_remove_recursive(ndev->debugfs_dir);
+
+	if (debugfs_dir && simple_empty(debugfs_dir)) {
+		debugfs_remove_recursive(debugfs_dir);
+		debugfs_dir = NULL;
+	}
+}
+
 static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct ntb_device *ndev;
@@ -1004,6 +1034,7 @@
 	ndev->pdev = pdev;
 	ndev->link_status = NTB_LINK_DOWN;
 	pci_set_drvdata(pdev, ndev);
+	ntb_setup_debugfs(ndev);
 
 	rc = pci_enable_device(pdev);
 	if (rc)
@@ -1100,6 +1131,7 @@
 err1:
 	pci_disable_device(pdev);
 err:
+	ntb_free_debugfs(ndev);
 	kfree(ndev);
 
 	dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
@@ -1129,6 +1161,7 @@
 	iounmap(ndev->reg_base);
 	pci_release_selected_regions(pdev, NTB_BAR_MASK);
 	pci_disable_device(pdev);
+	ntb_free_debugfs(ndev);
 	kfree(ndev);
 }
 
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
index 3a3038c..6a4f56f 100644
--- a/drivers/ntb/ntb_hw.h
+++ b/drivers/ntb/ntb_hw.h
@@ -127,6 +127,8 @@
 	unsigned char link_status;
 	struct delayed_work hb_timer;
 	unsigned long last_ts;
+
+	struct dentry *debugfs_dir;
 };
 
 /**
@@ -155,6 +157,20 @@
 	return ndev->pdev;
 }
 
+/**
+ * ntb_query_debugfs() - return the debugfs pointer
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer, return the debugfs directory pointer for the NTB
+ * hardware device
+ *
+ * RETURNS: a pointer to the debugfs directory
+ */
+static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
+{
+	return ndev->debugfs_dir;
+}
+
 struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
 					  void *transport);
 void ntb_unregister_transport(struct ntb_device *ndev);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
index 5bfa8c0..96209b4 100644
--- a/drivers/ntb/ntb_regs.h
+++ b/drivers/ntb/ntb_regs.h
@@ -53,8 +53,8 @@
 #define NTB_LINK_WIDTH_MASK	0x03f0
 
 #define SNB_MSIX_CNT		4
-#define SNB_MAX_SPADS		16
-#define SNB_MAX_COMPAT_SPADS	8
+#define SNB_MAX_B2B_SPADS	16
+#define SNB_MAX_COMPAT_SPADS	16
 /* Reserve the uppermost bit for link interrupt */
 #define SNB_MAX_DB_BITS		15
 #define SNB_DB_BITS_PER_VEC	5
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f8d7081..c308915 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -157,7 +157,6 @@
 	bool transport_link;
 	struct delayed_work link_work;
 	struct work_struct link_cleanup;
-	struct dentry *debugfs_dir;
 };
 
 enum {
@@ -824,12 +823,12 @@
 	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
 	qp->tx_max_entry = tx_size / qp->tx_max_frame;
 
-	if (nt->debugfs_dir) {
+	if (ntb_query_debugfs(nt->ndev)) {
 		char debugfs_name[4];
 
 		snprintf(debugfs_name, 4, "qp%d", qp_num);
 		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
-						     nt->debugfs_dir);
+						 ntb_query_debugfs(nt->ndev));
 
 		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
 							qp->debugfs_dir, qp,
@@ -857,11 +856,6 @@
 	if (!nt)
 		return -ENOMEM;
 
-	if (debugfs_initialized())
-		nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
-	else
-		nt->debugfs_dir = NULL;
-
 	nt->ndev = ntb_register_transport(pdev, nt);
 	if (!nt->ndev) {
 		rc = -EIO;
@@ -907,7 +901,6 @@
 err1:
 	ntb_unregister_transport(nt->ndev);
 err:
-	debugfs_remove_recursive(nt->debugfs_dir);
 	kfree(nt);
 	return rc;
 }
@@ -921,16 +914,16 @@
 	nt->transport_link = NTB_LINK_DOWN;
 
 	/* verify that all the qp's are freed */
-	for (i = 0; i < nt->max_qps; i++)
+	for (i = 0; i < nt->max_qps; i++) {
 		if (!test_bit(i, &nt->qp_bitmap))
 			ntb_transport_free_queue(&nt->qps[i]);
+		debugfs_remove_recursive(nt->qps[i].debugfs_dir);
+	}
 
 	ntb_bus_remove(nt);
 
 	cancel_delayed_work_sync(&nt->link_work);
 
-	debugfs_remove_recursive(nt->debugfs_dir);
-
 	ntb_unregister_event_callback(nt->ndev);
 
 	pdev = ntb_query_pdev(nt->ndev);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 04da786..7c8221d 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -106,8 +106,12 @@
 
 static int of_bus_pci_match(struct device_node *np)
 {
-	/* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
-	return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
+	/*
+	 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
+	 * "ht" is hypertransport
+	 */
+	return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
+		!strcmp(np->type, "ht");
 }
 
 static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a6f584a..1d10b4e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1629,6 +1629,7 @@
 		ap = dt_alloc(sizeof(*ap) + len + 1, 4);
 		if (!ap)
 			continue;
+		memset(ap, 0, sizeof(*ap) + len + 1);
 		ap->alias = start;
 		of_alias_add(ap, np, id, start, len);
 	}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 99a2f78..8c5b334 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@
 	mem = (unsigned long)
 		dt_alloc(size + 4, __alignof__(struct device_node));
 
+	memset((void *)mem, 0, size);
+
 	((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
 
 	pr_debug("  unflattening %lx...\n", mem);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index e79e006..9ee04b4 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,18 +811,28 @@
 	return pcidev->irq;
 }
 
-static struct iosapic_info *first_isi = NULL;
+static struct iosapic_info *iosapic_list;
 
 #ifdef CONFIG_64BIT
-int iosapic_serial_irq(int num)
+int iosapic_serial_irq(struct parisc_device *dev)
 {
-	struct iosapic_info *isi = first_isi;
-	struct irt_entry *irte = NULL;  /* only used if PAT PDC */
+	struct iosapic_info *isi;
+	struct irt_entry *irte;
 	struct vector_info *vi;
-	int isi_line;	/* line used by device */
+	int cnt;
+	int intin;
+
+	intin = (dev->mod_info >> 24) & 15;
 
 	/* lookup IRT entry for isi/slot/pin set */
-	irte = &irt_cell[num];
+	for (cnt = 0; cnt < irt_num_entry; cnt++) {
+		irte = &irt_cell[cnt];
+		if (COMPARE_IRTE_ADDR(irte, dev->mod0) &&
+		    irte->dest_iosapic_intin == intin)
+			break;
+	}
+	if (cnt >= irt_num_entry)
+		return 0; /* no irq found, force polling */
 
 	DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
 		irte,
@@ -834,11 +844,17 @@
 		irte->src_seg_id,
 		irte->dest_iosapic_intin,
 		(u32) irte->dest_iosapic_addr);
-	isi_line = irte->dest_iosapic_intin;
+
+	/* search for iosapic */
+	for (isi = iosapic_list; isi; isi = isi->isi_next)
+		if (isi->isi_hpa == dev->mod0)
+			break;
+	if (!isi)
+		return 0; /* no iosapic found, force polling */
 
 	/* get vector info for this input line */
-	vi = isi->isi_vector + isi_line;
-	DBG_IRT("iosapic_serial_irq:  line %d vi 0x%p\n", isi_line, vi);
+	vi = isi->isi_vector + intin;
+	DBG_IRT("iosapic_serial_irq:  line %d vi 0x%p\n", iosapic_intin, vi);
 
 	/* If this IRQ line has already been setup, skip it */
 	if (vi->irte)
@@ -941,8 +957,8 @@
 		vip->irqline = (unsigned char) cnt;
 		vip->iosapic = isi;
 	}
-	if (!first_isi)
-		first_isi = isi;
+	isi->isi_next = iosapic_list;
+	iosapic_list = isi;
 	return isi;
 }
 
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 1f05913..19f6f70 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -613,6 +613,54 @@
 	return 0;	/* truncation successful */
 }
 
+/*
+ * extend_lmmio_len: extend lmmio range to maximum length
+ *
+ * This is needed at least on C8000 systems to get the ATI FireGL card
+ * working. On other systems we will currently not extend the lmmio space.
+ */
+static unsigned long
+extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len)
+{
+	struct resource *tmp;
+
+	pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n",
+		end - start, lba_len);
+
+	lba_len = min(lba_len+1, 256UL*1024*1024); /* limit to 256 MB */
+
+	pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end);
+
+	if (boot_cpu_data.cpu_type < mako) {
+		pr_info("LBA: Not a C8000 system - not extending LMMIO range.\n");
+		return end;
+	}
+
+	end += lba_len;
+	if (end < start) /* fix overflow */
+		end = -1ULL;
+
+	pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end);
+
+	/* first overlap */
+	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
+		pr_debug("LBA: testing %pR\n", tmp);
+		if (tmp->start == start)
+			continue; /* ignore ourself */
+		if (tmp->end < start)
+			continue;
+		if (tmp->start > end)
+			continue;
+		if (end >= tmp->start)
+			end = tmp->start - 1;
+	}
+
+	pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end);
+
+	/* return new end */
+	return end;
+}
+
 #else
 #define truncate_pat_collision(r,n)  (0)
 #endif
@@ -994,6 +1042,14 @@
 		case PAT_LMMIO:
 			/* used to fix up pre-initialized MEM BARs */
 			if (!lba_dev->hba.lmmio_space.flags) {
+				unsigned long lba_len;
+
+				lba_len = ~READ_REG32(lba_dev->hba.base_addr
+						+ LBA_LMMIO_MASK);
+				if ((p->end - p->start) != lba_len)
+					p->end = extend_lmmio_len(p->start,
+						p->end, lba_len);
+
 				sprintf(lba_dev->hba.lmmio_name,
 						"PCI%02x LMMIO",
 						(int)lba_dev->hba.bus_num.start);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0c3efcf..5651527 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -34,6 +34,7 @@
 # Some architectures use the generic PCI setup functions
 #
 obj-$(CONFIG_X86) += setup-bus.o
+obj-$(CONFIG_ATOM_SOC_POWER) += pci-atom_soc.o
 obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
 obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
 obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index aac7a40..0e0d0f7 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -92,7 +92,14 @@
 	if (ret)
 		presence = 0;
 
-	list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+	/*
+	 * Stopping an SR-IOV PF device removes all the associated VFs,
+	 * which will update the bus->devices list and confuse the
+	 * iterator.  Therefore, iterate in reverse so we remove the VFs
+	 * first, then the PF.  We do the same in pci_stop_bus_device().
+	 */
+	list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+					 bus_list) {
 		pci_dev_get(dev);
 		if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
 			pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c93071d..a971a6f 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -92,6 +92,8 @@
 	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
 	pci_setup_device(virtfn);
 	virtfn->dev.parent = dev->dev.parent;
+	virtfn->physfn = pci_dev_get(dev);
+	virtfn->is_virtfn = 1;
 
 	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
 		res = dev->resource + PCI_IOV_RESOURCES + i;
@@ -113,9 +115,6 @@
 	pci_device_add(virtfn, virtfn->bus);
 	mutex_unlock(&iov->dev->sriov->lock);
 
-	virtfn->physfn = pci_dev_get(dev);
-	virtfn->is_virtfn = 1;
-
 	rc = pci_bus_add_device(virtfn);
 	sprintf(buf, "virtfn%u", id);
 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e4b1fb2..0eab3a3 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -47,6 +47,9 @@
 	if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
 		return;
 
+	if (pci_dev->pme_poll)
+		pci_dev->pme_poll = false;
+
 	if (pci_dev->current_state == PCI_D3cold) {
 		pci_wakeup_event(pci_dev);
 		pm_runtime_resume(&pci_dev->dev);
@@ -57,9 +60,6 @@
 	if (pci_dev->pme_support)
 		pci_check_pme_status(pci_dev);
 
-	if (pci_dev->pme_poll)
-		pci_dev->pme_poll = false;
-
 	pci_wakeup_event(pci_dev);
 	pm_runtime_resume(&pci_dev->dev);
 
@@ -317,13 +317,20 @@
 /* ACPI bus type */
 static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
 {
-	struct pci_dev * pci_dev;
-	u64	addr;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	bool is_bridge;
+	u64 addr;
 
-	pci_dev = to_pci_dev(dev);
+	/*
+	 * pci_is_bridge() is not suitable here, because pci_dev->subordinate
+	 * is set only after acpi_pci_find_device() has been called for the
+	 * given device.
+	 */
+	is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+			|| pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
 	/* Please ref to ACPI spec for the syntax of _ADR */
 	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
-	*handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
+	*handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
 	if (!*handle)
 		return -ENODEV;
 	return 0;
diff --git a/drivers/pci/pci-atom_soc.c b/drivers/pci/pci-atom_soc.c
new file mode 100644
index 0000000..ee7ea6d
--- /dev/null
+++ b/drivers/pci/pci-atom_soc.c
@@ -0,0 +1,77 @@
+/*
+ * pci-atom_soc.c - register Intel MID PCI plaform ops
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/kernel.h>
+
+#include "pci.h"
+
+static bool mid_pci_power_manageable(struct pci_dev *dev)
+{
+	return true;
+}
+
+static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
+{
+	return PCI_D3hot;
+}
+
+static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+	return 0;
+}
+
+static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+	return 0;
+}
+
+static struct pci_platform_pm_ops mid_pci_platform_pm = {
+	.is_manageable = mid_pci_power_manageable,
+	.choose_state = mid_pci_choose_state,
+	.sleep_wake = mid_pci_sleep_wake,
+	.run_wake = mid_pci_run_wake,
+	.set_state = pmu_pci_set_power_state,
+	.choose_state = pmu_pci_choose_state,
+};
+
+/**
+ * mid_pci_init - It registers callback function for all the PCI devices
+ * for platform specific device power on/shutdown acticities.
+ */
+static int __init mid_pci_init(void)
+{
+	if (boot_cpu_data.x86 != 6)
+		return 0;
+
+	/*
+	 * n.b. this model check does not uniquely identify the platform,
+	 * and additional checks are necessary inside the pmu driver
+	 */
+	switch (boot_cpu_data.x86_model) {
+	case INTEL_ATOM_MRFLD:
+	case INTEL_ATOM_MOORFLD:
+		pci_set_platform_pm(&mid_pci_platform_pm);
+		break;
+	}
+
+	return 0;
+}
+arch_initcall(mid_pci_init);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 79277fb..d5e20d0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -994,7 +994,6 @@
 	pci_dev->state_saved = false;
 	pci_dev->no_d3cold = false;
 	error = pm->runtime_suspend(dev);
-	suspend_report_result(pm->runtime_suspend, error);
 	if (error)
 		return error;
 	if (!pci_dev->d3cold_allowed)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a899d8b..ef00a08 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -59,7 +59,19 @@
 	if (delay < pci_pm_d3_delay)
 		delay = pci_pm_d3_delay;
 
-	msleep(delay);
+	if (delay) {
+		/*
+		* convert delay from ms to us
+		* if oops in progress, interrupts are disabled
+		* so do not call usleep that reenables interrupts
+		* but udelay that does not reenable interrupts
+		*/
+		delay = 1000*delay;
+		if (oops_in_progress)
+			udelay(delay);
+		else
+			usleep_range(delay-10, delay+10);
+	}
 }
 
 #ifdef CONFIG_PCI_DOMAINS
@@ -562,8 +574,11 @@
 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 		pci_dev_d3_sleep(dev);
 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
+#ifdef CONFIG_ATOM_SOC_POWER
+		; /* On Intel mid platforms pci delays are handled by SCU */
+#else
 		udelay(PCI_PM_D2_DELAY);
-
+#endif
 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 	if (dev->current_state != state && printk_ratelimit())
@@ -672,7 +687,12 @@
 		 * because have already delayed for the bridge.
 		 */
 		if (dev->runtime_d3cold) {
-			msleep(dev->d3cold_delay);
+			/*
+			 * msleep(0) will actually sleep for 1 jiffy.
+			 * if d3cold_delay is 0 we don't want to sleep at all.
+			*/
+			if (dev->d3cold_delay > 0)
+				msleep(dev->d3cold_delay);
 			/*
 			 * When powering on a bridge from D3cold, the
 			 * whole hierarchy may be powered on into
@@ -761,10 +781,6 @@
 		 */
 		return 0;
 
-	/* Check if we're already there */
-	if (dev->current_state == state)
-		return 0;
-
 	__pci_start_power_transition(dev, state);
 
 	/* This device is quirked not to be put into D3, so
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 31063ac..7f75fe6 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -425,7 +425,17 @@
  */
 int pcie_port_device_suspend(struct device *dev)
 {
-	return device_for_each_child(dev, NULL, suspend_iter);
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int ret;
+	ret = device_for_each_child(dev, NULL, suspend_iter);
+	if (ret)
+		return ret;
+
+	ret = pci_save_state(pdev);
+	if (ret)
+		return ret;
+
+	return pci_set_power_state(pdev, PCI_D3hot);
 }
 
 static int resume_iter(struct device *dev, void *data)
@@ -442,11 +452,19 @@
 }
 
 /**
- * pcie_port_device_suspend - resume port services associated with a PCIe port
+ * pcie_port_device_resume - resume port services associated with a PCIe port
  * @dev: PCI Express port to handle
  */
 int pcie_port_device_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int ret;
+	ret = pci_set_power_state(pdev, PCI_D0);
+	if (ret)
+		return ret;
+
+	pci_restore_state(pdev);
+
 	return device_for_each_child(dev, NULL, resume_iter);
 }
 #endif /* PM */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 70f10fa..ea37072 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1703,12 +1703,16 @@
 	bridge->dev.release = pci_release_bus_bridge_dev;
 	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
 	error = pcibios_root_bridge_prepare(bridge);
-	if (error)
-		goto bridge_dev_reg_err;
+	if (error) {
+		kfree(bridge);
+		goto err_out;
+	}
 
 	error = device_register(&bridge->dev);
-	if (error)
-		goto bridge_dev_reg_err;
+	if (error) {
+		put_device(&bridge->dev);
+		goto err_out;
+	}
 	b->bridge = get_device(&bridge->dev);
 	device_enable_async_suspend(b->bridge);
 	pci_set_bus_of_node(b);
@@ -1764,8 +1768,6 @@
 class_dev_reg_err:
 	put_device(&bridge->dev);
 	device_unregister(&bridge->dev);
-bridge_dev_reg_err:
-	kfree(bridge);
 err_out:
 	kfree(b);
 	return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7d68aee..df4655c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1022,6 +1022,8 @@
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
 
 /*
  *	Serverworks CSB5 IDE does not fully support native mode
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index d254e23..64a7de2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -300,6 +300,47 @@
 	}
 }
 
+static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
+{
+	struct pci_dev_resource *fail_res;
+	unsigned long mask = 0;
+
+	/* check failed type */
+	list_for_each_entry(fail_res, fail_head, list)
+		mask |= fail_res->flags;
+
+	/*
+	 * one pref failed resource will set IORESOURCE_MEM,
+	 * as we can allocate pref in non-pref range.
+	 * Will release all assigned non-pref sibling resources
+	 * according to that bit.
+	 */
+	return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
+}
+
+static bool pci_need_to_release(unsigned long mask, struct resource *res)
+{
+	if (res->flags & IORESOURCE_IO)
+		return !!(mask & IORESOURCE_IO);
+
+	/* check pref at first */
+	if (res->flags & IORESOURCE_PREFETCH) {
+		if (mask & IORESOURCE_PREFETCH)
+			return true;
+		/* count pref if its parent is non-pref */
+		else if ((mask & IORESOURCE_MEM) &&
+			 !(res->parent->flags & IORESOURCE_PREFETCH))
+			return true;
+		else
+			return false;
+	}
+
+	if (res->flags & IORESOURCE_MEM)
+		return !!(mask & IORESOURCE_MEM);
+
+	return false;	/* should not get here */
+}
+
 static void __assign_resources_sorted(struct list_head *head,
 				 struct list_head *realloc_head,
 				 struct list_head *fail_head)
@@ -312,11 +353,24 @@
 	 *  if could do that, could get out early.
 	 *  if could not do that, we still try to assign requested at first,
 	 *    then try to reassign add_size for some resources.
+	 *
+	 * Separate three resource type checking if we need to release
+	 * assigned resource after requested + add_size try.
+	 *	1. if there is io port assign fail, will release assigned
+	 *	   io port.
+	 *	2. if there is pref mmio assign fail, release assigned
+	 *	   pref mmio.
+	 *	   if assigned pref mmio's parent is non-pref mmio and there
+	 *	   is non-pref mmio assign fail, will release that assigned
+	 *	   pref mmio.
+	 *	3. if there is non-pref mmio assign fail or pref mmio
+	 *	   assigned fail, will release assigned non-pref mmio.
 	 */
 	LIST_HEAD(save_head);
 	LIST_HEAD(local_fail_head);
 	struct pci_dev_resource *save_res;
-	struct pci_dev_resource *dev_res;
+	struct pci_dev_resource *dev_res, *tmp_res;
+	unsigned long fail_type;
 
 	/* Check if optional add_size is there */
 	if (!realloc_head || list_empty(realloc_head))
@@ -348,6 +402,19 @@
 		return;
 	}
 
+	/* check failed type */
+	fail_type = pci_fail_res_type_mask(&local_fail_head);
+	/* remove not need to be released assigned res from head list etc */
+	list_for_each_entry_safe(dev_res, tmp_res, head, list)
+		if (dev_res->res->parent &&
+		    !pci_need_to_release(fail_type, dev_res->res)) {
+			/* remove it from realloc_head list */
+			remove_from_list(realloc_head, dev_res->res);
+			remove_from_list(&save_head, dev_res->res);
+			list_del(&dev_res->list);
+			kfree(dev_res);
+		}
+
 	free_list(&local_fail_head);
 	/* Release assigned resource */
 	list_for_each_entry(dev_res, head, list)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 966abc6..f7197a7 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -678,10 +678,9 @@
 	if (!pcifront_dev) {
 		dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
 		pcifront_dev = pdev;
-	} else {
-		dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
+	} else
 		err = -EEXIST;
-	}
+
 	spin_unlock(&pcifront_dev_lock);
 
 	if (!err && !swiotlb_nr_tbl()) {
@@ -848,7 +847,7 @@
 		goto out;
 
 	err = pcifront_connect_and_init_dma(pdev);
-	if (err) {
+	if (err && err != -EEXIST) {
 		xenbus_dev_fatal(pdev->xdev, err,
 				 "Error setting up PCI Frontend");
 		goto out;
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 01463c7..1b2c631 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -100,9 +100,9 @@
 		int vcc	= gpio_is_valid(cf->board->vcc_pin);
 
 		*sp = SS_DETECT | SS_3VCARD;
-		if (!rdy || gpio_get_value(rdy))
+		if (!rdy || gpio_get_value(cf->board->irq_pin))
 			*sp |= SS_READY;
-		if (!vcc || gpio_get_value(vcc))
+		if (!vcc || gpio_get_value(cf->board->vcc_pin))
 			*sp |= SS_POWERON;
 	} else
 		*sp = 0;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 5d7529e..314e5e8 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -325,7 +325,7 @@
 
 static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
 {
-	return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1;
+	return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1);
 }
 
 static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
@@ -445,7 +445,7 @@
 
 static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
 {
-	return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1;
+	return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
 }
 
 static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f859..f911952 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@
 	return platform_driver_register(&olpc_ec_plat_driver);
 }
 
-module_init(olpc_ec_init_module);
+arch_initcall(olpc_ec_init_module);
 
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 8577261..e339587 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -652,6 +652,21 @@
 	  some embedded Intel x86 platforms. This is not needed for PC-type
 	  machines.
 
+choice
+	prompt "IPC access mode"
+	depends on INTEL_SCU_IPC
+	default INTEL_SCU_IPC_INTR_MODE
+	---help---
+	Select the desired access mode for IPC call.
+
+config INTEL_SCU_IPC_INTR_MODE
+	bool "Intel SCU IPC interrupt mode"
+
+config INTEL_SCU_IPC_POLL_MODE
+	bool "Intel SCU IPC polling mode"
+
+endchoice
+
 config INTEL_SCU_IPC_UTIL
 	tristate "Intel SCU IPC utility driver"
 	depends on INTEL_SCU_IPC
@@ -676,12 +691,36 @@
 
 	  If unsure, say N.
 
-config INTEL_MFLD_THERMAL
-       tristate "Thermal driver for Intel Medfield platform"
-       depends on MFD_INTEL_MSIC && THERMAL
-       help
-         Say Y here to enable thermal driver support for the  Intel Medfield
-         platform.
+config INTEL_PMIC_CHARGER
+	tristate "pmic charger driver config"
+	default y
+	---help---
+	  This driver handles the USBDET interrupt on CLVP for USB connect/disconnect.
+	  With the powering off the USB PHY for D3 if nothing attached, there is no interrupt
+	  from USB PHY for USB connect, we should use the UDBDET interrupt for USB connect.
+
+	  If unsure, say N.
+
+config INTEL_FG_HELPER
+	tristate "Intel Fuel gauge helper"
+	help
+	  This driver exposes a device file interface to store and restore
+          Fuel gauge configuration data.
+          Any fuel gauge driver can use the interfaces exposed by this
+          driver to store and retrieve their configuration data.
+
+          If unsure, say N.
+
+config INTEL_MODEM_NVRAM
+	tristate "modem NVRAM driver for Intel CTP/MERR platforms"
+	default n
+	---help---
+	This driver adds NVRAM support for telephony use.
+	It provides SYSFS entries to allow any user application to read/write
+	in this zone.
+	The driver also ensure that there is no buffer overflow.
+
+	If unsure, say N.
 
 config INTEL_IPS
 	tristate "Intel Intelligent Power Sharing"
@@ -789,4 +828,13 @@
 	  a paravirtualized device provided by QEMU; it lets a virtual machine
 	  (guest) communicate panic events to the host.
 
+config INTEL_SCU_FLIS
+	bool "scu flis driver config"
+	depends on INTEL_SCU_IPC
+	default y
+	help
+	  This driver builds the SCU Flis Access Sysfs Interfaces.
+	  We could read write the flis address and configure the
+	  pin pull up/down using these interfaces.
+
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index ef0ec74..6f00efc 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -2,6 +2,10 @@
 # Makefile for linux/drivers/platform/x86
 # x86 Platform-Specific Drivers
 #
+CFLAGS_intel_scu_fw_update.o := -Werror
+CFLAGS_intel_scu_ipcutil.o := -Werror
+CFLAGS_intel_modem_nvram.o := -Werror
+
 obj-$(CONFIG_ASUS_LAPTOP)	+= asus-laptop.o
 obj-$(CONFIG_ASUS_WMI)		+= asus-wmi.o
 obj-$(CONFIG_ASUS_NB_WMI)	+= asus-nb-wmi.o
@@ -36,20 +40,26 @@
 obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
 
 obj-$(CONFIG_TOSHIBA_BT_RFKILL)	+= toshiba_bluetooth.o
-obj-$(CONFIG_INTEL_SCU_IPC)	+= intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC)	+= intel_scu_ipc.o intel_scu_pmic.o intel_scu_mip.o intel_scu_fw_update.o
 obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
+obj-$(CONFIG_INTEL_MODEM_NVRAM)	+= intel_modem_nvram.o
+obj-$(CONFIG_INTEL_SCU_FLIS)	+= intel_scu_flis.o
 obj-$(CONFIG_INTEL_IPS)		+= intel_ips.o
+obj-$(CONFIG_INTEL_SOC_PMC)	+= intel_soc_pmc.o
 obj-$(CONFIG_GPIO_INTEL_PMIC)	+= intel_pmic_gpio.o
 obj-$(CONFIG_XO1_RFKILL)	+= xo1-rfkill.o
 obj-$(CONFIG_XO15_EBOOK)	+= xo15-ebook.o
 obj-$(CONFIG_IBM_RTL)		+= ibm_rtl.o
 obj-$(CONFIG_SAMSUNG_LAPTOP)	+= samsung-laptop.o
 obj-$(CONFIG_MXM_WMI)		+= mxm-wmi.o
+obj-$(CONFIG_INTEL_CRYSTALCOVE_PWRSRC)  += intel_crystalcove_pwrsrc.o
 obj-$(CONFIG_INTEL_MID_POWER_BUTTON)	+= intel_mid_powerbtn.o
+obj-$(CONFIG_INTEL_PMIC_CHARGER)	+= intel_pmic_charger.o
 obj-$(CONFIG_INTEL_OAKTRAIL)	+= intel_oaktrail.o
 obj-$(CONFIG_SAMSUNG_Q10)	+= samsung-q10.o
 obj-$(CONFIG_APPLE_GMUX)	+= apple-gmux.o
 obj-$(CONFIG_CHROMEOS_LAPTOP)	+= chromeos_laptop.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
+obj-$(CONFIG_INTEL_FG_HELPER)	+= intel_fg_helper.o
+obj-$(CONFIG_INTEL_MID_PMIC)		+= dc_xpwr_pwrsrc.o
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index f59683a..669a254 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -21,56 +21,98 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
-#include <linux/mfd/intel_msic.h>
+#include <linux/io.h>
+#include <linux/rpmsg.h>
+#include <linux/async.h>
+#include <asm/intel_mid_powerbtn.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
 
 #define DRIVER_NAME "msic_power_btn"
 
-#define MSIC_PB_LEVEL	(1 << 3) /* 1 - release, 0 - press */
+struct mid_pb_priv {
+	struct input_dev *input;
+	int irq;
+	void __iomem *pb_stat;
+	u16 pb_level;
+	u16 irq_lvl1_mask;
+	bool irq_ack;
+};
 
-/*
- * MSIC document ti_datasheet defines the 1st bit reg 0x21 is used to mask
- * power button interrupt
- */
-#define MSIC_PWRBTNM    (1 << 0)
-
-static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
+static inline int pb_clear_bits(u16 addr, u8 mask)
 {
-	struct input_dev *input = dev_id;
-	int ret;
+	return intel_scu_ipc_update_register(addr, 0, mask);
+}
+
+static irqreturn_t mid_pb_isr(int irq, void *dev_id)
+{
+	struct mid_pb_priv *priv = dev_id;
 	u8 pbstat;
 
-	ret = intel_msic_reg_read(INTEL_MSIC_PBSTATUS, &pbstat);
-	dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat);
+	pbstat = readb(priv->pb_stat);
+	dev_dbg(&priv->input->dev, "pbstat: 0x%x\n", pbstat);
 
-	if (ret < 0) {
-		dev_err(input->dev.parent, "Read error %d while reading"
-			       " MSIC_PB_STATUS\n", ret);
-	} else {
-		input_event(input, EV_KEY, KEY_POWER,
-			       !(pbstat & MSIC_PB_LEVEL));
-		input_sync(input);
-	}
+	input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & priv->pb_level));
+	input_sync(priv->input);
+
+	if (pbstat & priv->pb_level)
+		pr_info("[%s] power button released\n", priv->input->name);
+	else
+		pr_info("[%s] power button pressed\n", priv->input->name);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mid_pb_threaded_isr(int irq, void *dev_id)
+{
+	struct mid_pb_priv *priv = dev_id;
+
+	if (priv->irq_ack)
+		pb_clear_bits(priv->irq_lvl1_mask, MSIC_PWRBTNM);
 
 	return IRQ_HANDLED;
 }
 
-static int mfld_pb_probe(struct platform_device *pdev)
+static int mid_pb_probe(struct platform_device *pdev)
 {
 	struct input_dev *input;
-	int irq = platform_get_irq(pdev, 0);
-	int error;
+	struct mid_pb_priv *priv;
+	int irq;
+	int ret;
+	struct intel_msic_power_btn_platform_data *pdata;
 
+	if (pdev == NULL)
+		return -ENODEV;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "No power button platform data\n");
+		return -EINVAL;
+	}
+
+	dev_info(&pdev->dev, "Probed mid powerbutton devivce\n");
+
+	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
 		return -EINVAL;
 
+	priv = kzalloc(sizeof(struct mid_pb_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
 	input = input_allocate_device();
 	if (!input) {
-		dev_err(&pdev->dev, "Input device allocation error\n");
+		kfree(priv);
 		return -ENOMEM;
 	}
 
+	priv->input = input;
+	priv->irq = irq;
+	platform_set_drvdata(pdev, priv);
+
 	input->name = pdev->name;
 	input->phys = "power-button/input0";
 	input->id.bustype = BUS_HOST;
@@ -78,71 +120,159 @@
 
 	input_set_capability(input, EV_KEY, KEY_POWER);
 
-	error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
-			DRIVER_NAME, input);
-	if (error) {
-		dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
-				"button\n", irq);
-		goto err_free_input;
+	priv->pb_stat = ioremap(pdata->pbstat, MSIC_PB_LEN);
+	if (!priv->pb_stat) {
+		ret = -ENOMEM;
+		goto fail;
 	}
 
-	error = input_register_device(input);
-	if (error) {
-		dev_err(&pdev->dev, "Unable to register input dev, error "
-				"%d\n", error);
-		goto err_free_irq;
+	ret = input_register_device(input);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"unable to register input dev, error %d\n", ret);
+		goto out_iounmap;
 	}
 
-	platform_set_drvdata(pdev, input);
+	priv->pb_level = pdata->pb_level;
+	priv->irq_lvl1_mask = pdata->irq_lvl1_mask;
 
-	/*
-	 * SCU firmware might send power button interrupts to IA core before
+	/* Unmask the PBIRQ and MPBIRQ on Tangier */
+	if (pdata->irq_ack) {
+		pdata->irq_ack(pdata);
+		priv->irq_ack = true;
+	}
+
+	ret = request_threaded_irq(priv->irq, mid_pb_isr, mid_pb_threaded_isr,
+		IRQF_NO_SUSPEND, DRIVER_NAME, priv);
+
+	if (ret) {
+		dev_err(&pdev->dev,
+			"unable to request irq %d for power button\n", irq);
+		goto out_unregister_input;
+	}
+
+	/* SCU firmware might send power button interrupts to IA core before
 	 * kernel boots and doesn't get EOI from IA core. The first bit of
-	 * MSIC reg 0x21 is kept masked, and SCU firmware doesn't send new
+	 * MSIC lvl1 mask reg is kept masked, and SCU firmware doesn't send new
 	 * power interrupt to Android kernel. Unmask the bit when probing
 	 * power button in kernel.
-	 * There is a very narrow race between irq handler and power button
-	 * initialization. The race happens rarely. So we needn't worry
-	 * about it.
 	 */
-	error = intel_msic_reg_update(INTEL_MSIC_IRQLVL1MSK, 0, MSIC_PWRBTNM);
-	if (error) {
-		dev_err(&pdev->dev, "Unable to clear power button interrupt, "
-				"error: %d\n", error);
-		goto err_free_irq;
-	}
+	pb_clear_bits(priv->irq_lvl1_mask, MSIC_PWRBTNM);
 
 	return 0;
 
-err_free_irq:
-	free_irq(irq, input);
-err_free_input:
-	input_free_device(input);
-	return error;
-}
-
-static int mfld_pb_remove(struct platform_device *pdev)
-{
-	struct input_dev *input = platform_get_drvdata(pdev);
-	int irq = platform_get_irq(pdev, 0);
-
-	free_irq(irq, input);
+out_unregister_input:
 	input_unregister_device(input);
+	input = NULL;
+out_iounmap:
+	iounmap(priv->pb_stat);
+fail:
 	platform_set_drvdata(pdev, NULL);
+	input_free_device(input);
+	kfree(priv);
+	return ret;
+}
+
+static int mid_pb_remove(struct platform_device *pdev)
+{
+	struct mid_pb_priv *priv = platform_get_drvdata(pdev);
+
+	iounmap(priv->pb_stat);
+	free_irq(priv->irq, priv);
+	platform_set_drvdata(pdev, NULL);
+	input_unregister_device(priv->input);
+	kfree(priv);
 
 	return 0;
 }
 
-static struct platform_driver mfld_pb_driver = {
+static const struct platform_device_id mid_pb_table[] = {
+	{"mid_powerbtn", 1},
+};
+
+static struct platform_driver mid_pb_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.owner = THIS_MODULE,
 	},
-	.probe	= mfld_pb_probe,
-	.remove	= mfld_pb_remove,
+	.probe	= mid_pb_probe,
+	.remove	= mid_pb_remove,
+	.id_table = mid_pb_table,
 };
 
-module_platform_driver(mfld_pb_driver);
+static int __init mid_pb_module_init(void)
+{
+	return platform_driver_register(&mid_pb_driver);
+}
+
+static void  mid_pb_module_exit(void)
+{
+	platform_driver_unregister(&mid_pb_driver);
+}
+
+/* RPMSG related functionality */
+
+static int mid_pb_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed mid_pb rpmsg device\n");
+
+	ret = mid_pb_module_init();
+out:
+	return ret;
+}
+
+
+static void mid_pb_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	mid_pb_module_exit();
+	dev_info(&rpdev->dev, "Removed mid_pb rpmsg device\n");
+}
+
+static void mid_pb_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id mid_pb_id_table[] = {
+	{ .name	= "rpmsg_mid_powerbtn" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, mid_pb_id_table);
+
+
+static struct rpmsg_driver mid_pb_rpmsg_driver = {
+	.drv.name	= DRIVER_NAME,
+	.drv.owner	= THIS_MODULE,
+	.probe		= mid_pb_rpmsg_probe,
+	.callback	= mid_pb_rpmsg_cb,
+	.remove		= mid_pb_rpmsg_remove,
+	.id_table	= mid_pb_id_table,
+};
+
+static int __init mid_pb_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&mid_pb_rpmsg_driver);
+}
+
+static void __exit mid_pb_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&mid_pb_rpmsg_driver);
+}
+
+late_initcall(mid_pb_rpmsg_init);
+
+module_exit(mid_pb_rpmsg_exit);
 
 MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>");
 MODULE_DESCRIPTION("Intel Medfield Power Button Driver");
diff --git a/drivers/platform/x86/intel_modem_nvram.c b/drivers/platform/x86/intel_modem_nvram.c
new file mode 100644
index 0000000..2e0e5f2
--- /dev/null
+++ b/drivers/platform/x86/intel_modem_nvram.c
@@ -0,0 +1,395 @@
+/*
+ * Clovertrail / Merrifield NVRAM implementation for modem
+ *
+ * Copyright (C) 2013 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define NVRAM_MAX_SIZE	240	/* NVRAM maximum size (in bytes) for MRFLD. */
+
+/* NVRAM access */
+static u32 nvram_size;
+static phys_addr_t nvram_addr;
+
+static int platform_type;	/* Identifies the platform. */
+static void *nvram_ptr;
+
+#define DRIVER_NAME "modem_nvram"
+
+static struct rpmsg_instance *modem_nvram_instance;
+
+/* size interface */
+static ssize_t size_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", nvram_size);
+}
+static struct kobj_attribute size_attribute =
+	__ATTR(size, S_IRUSR|S_IRGRP, size_show, NULL);
+
+/* dump interface */
+static ssize_t dump_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+	void __iomem *nv_base;
+
+	if (((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) &&
+		(nvram_addr == 0)) {
+		memcpy(buf, nvram_ptr, nvram_size);
+	} else {
+		nv_base = ioremap_nocache(nvram_addr, nvram_size);
+		if (nv_base != NULL) {
+			memcpy(buf, nv_base, nvram_size);
+			iounmap(nv_base);
+		} else
+			pr_err("%s : ioremap error\n", __func__);
+	}
+
+	pr_debug("%s : %d NVRAM bytes dumped\n", __func__, nvram_size);
+
+	return nvram_size;
+}
+
+static ssize_t dump_store(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	u32 sptr_dw_mask;
+	int dptr_reg;
+	void __iomem *nv_base;
+	int ret;
+
+	if (count == 0)
+		return 0;
+
+	if (((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) &&
+		(nvram_addr == 0)) {
+		count = min((size_t)nvram_size, count);
+		memcpy(nvram_ptr, buf, count);
+	} else {
+		nv_base = ioremap_nocache(nvram_addr, nvram_size);
+		if (nv_base != NULL) {
+			count = min((size_t)nvram_size, count);
+			memcpy(nv_base, buf, count);
+
+			if ((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+			(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+				sptr_dw_mask = 0xFFFFFFFF;
+				/* Modem data lies on region 4, 5, 6 and 7 */
+				for (dptr_reg = 4; dptr_reg <= 7; dptr_reg++) {
+					pr_info("%s : ipc_write_osnib update osnib region %d\n",
+							__func__, dptr_reg);
+					ret = rpmsg_send_raw_command(
+						modem_nvram_instance,
+						RP_WRITE_OSNIB,
+						0,
+						NULL,
+						NULL,
+						0,
+						0,
+						sptr_dw_mask,
+						dptr_reg);
+					if (ret < 0) {
+						pr_err("%s : ipc_write_osnib failed (%d)!!\n",
+								__func__, ret);
+					}
+				}
+			} else {
+				ret = rpmsg_send_simple_command(
+						modem_nvram_instance,
+						IPCMSG_STORE_NV_DATA,
+						0);
+				if (ret)
+					pr_err("%s : rpmsg_send_simple_command failed (%d)\n",
+								__func__, ret);
+			}
+			iounmap(nv_base);
+		} else
+			pr_err("%s : ioremap error\n", __func__);
+	}
+
+	pr_info("%s : %zd bytes stored in NVRAM\n", __func__, count);
+
+	return count;
+}
+
+static ssize_t dump_clear(void)
+{
+	int dptr_reg;
+	u32 sptr_dw_mask;
+	void __iomem *nv_base;
+	int ret;
+
+	if (((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) &&
+		(nvram_addr == 0)) {
+		memset(nvram_ptr, 0, nvram_size);
+	} else {
+		nv_base = ioremap_nocache(nvram_addr, nvram_size);
+		if (nv_base != NULL) {
+			memset(nv_base, 0, nvram_size);
+			if ((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+			(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+				sptr_dw_mask = 0xFFFFFFFF;
+				/* Modem data lies on regions 4, 5, 6 and 7 */
+				for (dptr_reg = 4; dptr_reg <= 7; dptr_reg++) {
+					ret = rpmsg_send_raw_command(
+						modem_nvram_instance,
+						RP_WRITE_OSNIB,
+						0, NULL, NULL, 0, 0,
+						sptr_dw_mask, dptr_reg);
+					if (ret < 0) {
+						pr_err("%s : ipc_write_osnib failed (%d)!!\n",
+								__func__, ret);
+					}
+				}
+			} else {
+				ret = rpmsg_send_simple_command(
+							modem_nvram_instance,
+							IPCMSG_STORE_NV_DATA,
+							0);
+
+				if (ret)
+					pr_err("%s : rpmsg_send_simple_command failed (%d)\n",
+								__func__, ret);
+			}
+			iounmap(nv_base);
+		} else
+			pr_err("%s : ioremap error\n", __func__);
+	}
+
+	pr_info("%s : NVRAM cleared\n", __func__);
+
+	return 0;
+}
+
+static struct kobj_attribute dump_attribute =
+	__ATTR(dump, S_IRGRP|S_IWGRP, dump_show, dump_store);
+
+/* clear interface */
+static ssize_t clear_store(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	pr_debug("%s : clearing NVRAM buffer", __func__);
+	dump_clear();
+	return count;
+}
+static struct kobj_attribute clear_attribute =
+	__ATTR(clear, S_IWGRP, NULL, clear_store);
+
+static struct attribute *nvram_attrs[] = {
+	&size_attribute.attr,
+	&dump_attribute.attr,
+	&clear_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group nvram_attr_group = {
+	.attrs = nvram_attrs,
+};
+
+static int modem_nvram_reboot_notify(struct notifier_block *notifier,
+				     unsigned long what, void *data)
+{
+	/* Always take the same action : clear the zone */
+	/* The NVMRAM will be cleared on a clean shutdown, */
+	/* whatever the reason. Its goal is to keep data on spurious shutdown*/
+	/* like Watchdog reboot */
+	switch (what) {
+	case SYS_RESTART:
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		pr_info("%s : Clearing NVRAM on reboot notification.",
+								__func__);
+		dump_clear();
+		break;
+	default:
+		pr_err("%s : Unknown reboot notification. Clearing NVRAM.",
+								__func__);
+		dump_clear();
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int modem_nvram_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("%s : rpmsg channel not created\n", __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed modem NVRAM rpmsg device\n");
+
+	/* Allocate rpmsg instance for driver*/
+	alloc_rpmsg_instance(rpdev, &modem_nvram_instance);
+	if (!modem_nvram_instance) {
+		dev_err(&rpdev->dev, "kzalloc modem nvram instance failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(modem_nvram_instance);
+out:
+	return ret;
+}
+
+static void modem_nvram_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	free_rpmsg_instance(rpdev, &modem_nvram_instance);
+	dev_info(&rpdev->dev, "Removed modem NVRAM rpmsg device\n");
+}
+
+static void modem_nvram_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id modem_nvram_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_modem_nvram" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, modem_nvram_rpmsg_id_table);
+
+static struct rpmsg_driver modem_nvram_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= modem_nvram_rpmsg_id_table,
+	.probe		= modem_nvram_rpmsg_probe,
+	.callback	= modem_nvram_rpmsg_cb,
+	.remove		= modem_nvram_rpmsg_remove,
+};
+
+static struct notifier_block modem_nvram_reboot = {
+	.notifier_call	= modem_nvram_reboot_notify,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+static struct kobject *modem_nvram_kobj;
+
+static int __init modem_nvram_init(void)
+{
+	int retval;
+
+	retval = register_rpmsg_driver(&modem_nvram_rpmsg);
+	if (retval) {
+		pr_err("%s : register_rpmsg_driver error (%d)",
+						__func__, retval);
+		return retval;
+	}
+
+	/* get NVRAM parameters */
+
+	platform_type = intel_mid_identify_cpu();
+
+	nvram_size = intel_scu_ipc_get_nvram_size();
+	nvram_addr = intel_scu_ipc_get_nvram_addr();
+
+	if (((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) &&
+		(nvram_addr == 0)) {
+		pr_info("%s : Using local NVRAM storage\n",
+				__func__);
+		nvram_size = NVRAM_MAX_SIZE;
+		nvram_ptr = kzalloc(nvram_size, GFP_KERNEL);
+		if (nvram_ptr == NULL) {
+			pr_err("%s : failed to allocate memory for nvram buffer!\n",
+								__func__);
+			retval = -ENOMEM;
+			goto exit;
+		}
+		pr_info("Modem NVRAM: PTR: 0x%p\n", nvram_ptr);
+	} else {
+		pr_info("Modem NVRAM: ADDR: %pa\n", &nvram_addr);
+	}
+
+	pr_info("Modem NVRAM: SIZE: 0x%x\n", nvram_size);
+
+	if ((((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) ||
+		(nvram_addr != 0)) && (nvram_size > 0)) {
+		if (register_reboot_notifier(&modem_nvram_reboot))
+			pr_err("%s : can't register reboot_notifier\n",
+								__func__);
+		modem_nvram_kobj = kobject_create_and_add(DRIVER_NAME,
+								kernel_kobj);
+		if (!modem_nvram_kobj) {
+			retval = -ENOMEM;
+			goto error;
+		}
+
+		retval = sysfs_create_group(modem_nvram_kobj,
+					&nvram_attr_group);
+		if (retval) {
+			retval = -ENODEV;
+			kobject_put(modem_nvram_kobj);
+			goto error;
+		}
+
+	} else {
+		pr_err("NVRAM not initialized. Aborting.\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	return 0;
+
+error:
+	unregister_reboot_notifier(&modem_nvram_reboot);
+exit:
+	unregister_rpmsg_driver(&modem_nvram_rpmsg);
+	return retval;
+}
+
+static void __exit modem_nvram_exit(void)
+{
+	if ((platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		kfree(nvram_ptr);
+
+	kobject_put(modem_nvram_kobj);
+	unregister_rpmsg_driver(&modem_nvram_rpmsg);
+	unregister_reboot_notifier(&modem_nvram_reboot);
+	sysfs_remove_group(modem_nvram_kobj, &nvram_attr_group);
+}
+
+module_init(modem_nvram_init);
+module_exit(modem_nvram_exit);
+
+MODULE_DESCRIPTION("Intel modem NVRAM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_pmic_charger.c b/drivers/platform/x86/intel_pmic_charger.c
new file mode 100644
index 0000000..1becdea
--- /dev/null
+++ b/drivers/platform/x86/intel_pmic_charger.c
@@ -0,0 +1,215 @@
+/* Cloverview Plus PMIC Charger (USBDET interrupt) driver
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/wakelock.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <linux/reboot.h>
+
+/* Wake lock to prevent platform from going to
+	S3 when USBDET interrupt Trigger */
+static struct wake_lock wakelock;
+
+#define DRIVER_NAME "pmic_charger"
+#define MSIC_SPWRSRCINT 0x192
+#define MSIC_SUSBDET_MASK_BIT	0x2
+#define MSIC_SBATTDET_MASK_BIT  0x1
+
+static irqreturn_t pmic_charger_thread_handler(int irq, void *devid)
+{
+	int retval = 0;
+	uint8_t spwrsrcint;
+	struct device *dev = (struct device *)devid;
+
+	retval = intel_scu_ipc_ioread8(MSIC_SPWRSRCINT, &spwrsrcint);
+	if (retval) {
+		dev_err(dev, "IPC Failed to read %d\n", retval);
+		return retval;
+	}
+
+	if ((spwrsrcint & MSIC_SUSBDET_MASK_BIT) == 0) {
+		if (wake_lock_active(&wakelock))
+			wake_unlock(&wakelock);
+	}
+
+	/* Shutdown upon battery removal */
+	if ((spwrsrcint & MSIC_SBATTDET_MASK_BIT) == 0) {
+		dev_info(dev, "battery removal shutdown\n");
+		kernel_power_off();
+	}
+
+	dev_info(dev, "pmic charger interrupt: %d\n", irq);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pmic_charger_irq_handler(int irq, void *devid)
+{
+	if (!wake_lock_active(&wakelock))
+		wake_lock(&wakelock);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static int pmic_charger_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int irq = platform_get_irq(pdev, 0);
+	int ret;
+
+	/* Initialize the wakelock */
+	wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "pmic_charger_wakelock");
+
+	/* Register a handler for USBDET interrupt */
+	ret = request_threaded_irq(irq, pmic_charger_irq_handler,
+			pmic_charger_thread_handler,
+			IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+			"pmic_usbdet_interrupt", dev);
+	if (ret) {
+		dev_info(dev, "register USBDET IRQ with error %d\n", ret);
+		return ret;
+	}
+
+	dev_info(dev, "registered USBDET IRQ %d\n", irq);
+
+	return 0;
+}
+
+static int pmic_charger_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int irq = platform_get_irq(pdev, 0);
+	wake_lock_destroy(&wakelock);
+	free_irq(irq, dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_charger_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pmic charger suspend\n");
+	return 0;
+}
+
+static int pmic_charger_resume(struct device *dev)
+{
+	dev_dbg(dev, "pmic charger resume\n");
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops pmic_charger_pm_ops = {
+	.suspend                = pmic_charger_suspend,
+	.resume                 = pmic_charger_resume,
+};
+
+static struct platform_driver pmic_charger_driver = {
+	.driver = {
+		.name		= DRIVER_NAME,
+		.owner		= THIS_MODULE,
+		.pm		= &pmic_charger_pm_ops,
+	},
+	.probe		= pmic_charger_probe,
+	.remove = pmic_charger_remove,
+};
+
+static int __init pmic_charger_module_init(void)
+{
+	return platform_driver_register(&pmic_charger_driver);
+}
+
+static void pmic_charger_module_exit(void)
+{
+	platform_driver_unregister(&pmic_charger_driver);
+}
+
+/* RPMSG related functionality */
+static int pmic_charger_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic_charger rpmsg device\n");
+
+	ret = pmic_charger_module_init();
+
+out:
+	return ret;
+}
+
+static void pmic_charger_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	pmic_charger_module_exit();
+	dev_info(&rpdev->dev, "Removed pmic_charger rpmsg device\n");
+}
+
+static void pmic_charger_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_charger_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_pmic_charger" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_charger_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_charger_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_charger_rpmsg_id_table,
+	.probe		= pmic_charger_rpmsg_probe,
+	.callback	= pmic_charger_rpmsg_cb,
+	.remove		= pmic_charger_rpmsg_remove,
+};
+
+static int __init pmic_charger_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_charger_rpmsg);
+}
+
+static void __exit pmic_charger_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_charger_rpmsg);
+}
+
+module_init(pmic_charger_rpmsg_init);
+module_exit(pmic_charger_rpmsg_exit);
+
+MODULE_AUTHOR("Dongsheng Zhang <dongsheng.zhang@intel.com>");
+MODULE_AUTHOR("Rapaka, Naveen <naveen.rapaka@intel.com>");
+MODULE_DESCRIPTION("Intel Pmic charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 6f4b728..c2d271a 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -31,7 +31,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
 #include <linux/device.h>
 #include <linux/intel_pmic_gpio.h>
 #include <linux/platform_device.h>
diff --git a/drivers/platform/x86/intel_scu_flis.c b/drivers/platform/x86/intel_scu_flis.c
new file mode 100644
index 0000000..7902fd1
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_flis.c
@@ -0,0 +1,780 @@
+/* intel_scu_flis.c SCU FLIS INTERFACES
+ *
+ * Copyright (c) 2012,  Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/rpmsg.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_flis.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+static struct rpmsg_instance *flis_instance;
+
+static u32 shim_flis_addr;
+static u32 shim_offset;
+static u32 shim_data;
+static char shim_ops[OPS_STR_LEN];
+
+static u32 param_type;	/* flis param type: PULL/PIN DIRECTION/OPEN_DRAIN */
+static u32 param_value;	/* value of certain flis param */
+static unsigned int pin_name;
+static char ops[OPS_STR_LEN];
+
+static int platform;
+
+struct intel_scu_flis_info {
+	struct pinstruct_t *pin_t;
+	struct pin_mmio_flis_t *mmio_flis_t;
+	int pin_num;
+	int initialized;
+	void *flis_base;
+	u32 flis_paddr;
+	bool shim_access;
+};
+
+static struct intel_scu_flis_info flis_info;
+
+static DEFINE_SPINLOCK(mmio_flis_lock);
+
+u32 get_flis_value(u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	u32 __iomem *mem;
+
+	if (!isfi->initialized || !isfi->flis_base)
+		return -ENODEV;
+
+	mem = (void __iomem *)(isfi->flis_base + offset);
+
+	return readl(mem);
+}
+EXPORT_SYMBOL(get_flis_value);
+
+void set_flis_value(u32 value, u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	u32 __iomem *mem;
+	unsigned long flags;
+
+	if (!isfi->initialized || !isfi->flis_base)
+		return;
+
+	/*
+	 * There is one security region for Merrifield FLIS, which
+	 * are read only to OS side. Use IPC when write access is needed.
+	 */
+	if ((platform == INTEL_MID_CPU_CHIP_TANGIER ||
+		platform == INTEL_MID_CPU_CHIP_ANNIEDALE)
+			&& offset >= I2C_FLIS_START
+			&& offset <= I2C_FLIS_END) {
+		/* IPC call should not be called in atomic context */
+		might_sleep();
+		rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&value, 4,
+					NULL, 0,
+					isfi->flis_paddr + offset, 0);
+
+	} else {
+		mem = (void __iomem *)(isfi->flis_base + offset);
+		spin_lock_irqsave(&mmio_flis_lock, flags);
+		writel(value, mem);
+		spin_unlock_irqrestore(&mmio_flis_lock, flags);
+	}
+}
+EXPORT_SYMBOL(set_flis_value);
+
+/* directly write to flis address */
+int intel_scu_ipc_write_shim(u32 data, u32 flis_addr, u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	int ret;
+	u32 ipc_wbuf[3];
+
+	if (!isfi->shim_access)
+		return -EINVAL;
+
+	/* offset 0xff means the flis is reserved, just return 0*/
+	if (offset == 0xFF)
+		return 0;
+
+	ipc_wbuf[0] = flis_addr; /* wbuf[0]: flis address */
+	ipc_wbuf[1] = offset;	/* wbuf[1]: register offset */
+	ipc_wbuf[2] = data;	/* wbuf[2]: data */
+
+	ret = rpmsg_send_command(flis_instance,	IPCMSG_SHIM_CONFIG,
+				IPC_CMD_SHIM_WR, (u8 *)ipc_wbuf, NULL, 12, 0);
+	if (ret)
+		pr_err("%s: failed to write shim, flis addr: 0x%x, offset: 0x%x\n",
+			__func__, flis_addr, offset);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_write_shim);
+
+/* directly read from flis address */
+int intel_scu_ipc_read_shim(u32 *data, u32 flis_addr, u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	int ret;
+	u32 ipc_wbuf[2];
+
+	if (!isfi->shim_access)
+		return -EINVAL;
+
+	/* offset 0xff means the flis is reserved, just return 0 */
+	if (offset == 0xFF)
+		return 0;
+
+	ipc_wbuf[0] = flis_addr;
+	ipc_wbuf[1] = offset;
+
+	ret = rpmsg_send_command(flis_instance,	IPCMSG_SHIM_CONFIG,
+				IPC_CMD_SHIM_RD, (u8 *)ipc_wbuf, data, 8, 1);
+	if (ret)
+		pr_err("%s: failed to read shim, flis addr: 0x%x, offset: 0x%x\n",
+			__func__, flis_addr, offset);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_read_shim);
+
+int intel_scu_ipc_update_shim(u32 data, u32 mask, u32 flis_addr, u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	u32 tmp = 0;
+	int ret;
+
+	if (!isfi->shim_access)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_read_shim(&tmp, flis_addr, offset);
+	if (ret) {
+		pr_err("read shim failed, addr = 0x%x, off = 0x%x\n",
+			flis_addr, offset);
+		return ret;
+	}
+
+	tmp &= ~mask;
+	tmp |= (data & mask);
+
+	ret = intel_scu_ipc_write_shim(tmp, flis_addr, offset);
+	if (ret) {
+		pr_err("write shim failed, addr = 0x%x, off = 0x%x\n",
+			flis_addr, offset);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_shim);
+
+/**
+ * config_pin_flis -- configure pin mux,
+ *		      pull direction and strength and open-drain enable.
+ *
+ * @name: pin name
+ * @param: flis param
+ * @val: value to be set
+ *
+ * example:
+ * config pull up/down:
+ *	config_pin_flis(i2s_2_clk, PULL, UP_20K);
+ *	config_pin_flis(i2s_2_clk, PULL, DOWN_20K);
+ *
+ * config pin mux:
+ *	config_pin_flis(i2s_2_clk, MUX, MUX_EN_INPUT_EN);
+ *	config_pin_flis(i2s_2_clk, MUX, INPUT_EN);
+ *	config_pin_flis(i2s_2_clk, MUX, MUX_EN_OUTPUT_EN);
+ *	config_pin_flis(i2s_2_clk, MUX, OUTPUT_EN);
+ *
+ * config pin open-drain:
+ *	config_pin_flis(i2s_2_clk, OPEN_DRAIN, OD_ENABLE);
+ *	config_pin_flis(i2s_2_clk, OPEN_DRAIN, OD_DISABLE);
+ *
+ */
+int config_pin_flis(unsigned int name, enum flis_param_t param, u32 val)
+{
+	u32 flis_addr, off, data, mask;
+	int ret;
+	int pos;
+	struct intel_scu_flis_info *isfi = &flis_info;
+	struct pin_mmio_flis_t *mmft;
+	u32 old_val;
+
+	if (!isfi->initialized)
+		return -ENODEV;
+
+	if (name < 0 || name >= isfi->pin_num)
+		return -EINVAL;
+
+	if (platform == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		/* Check if the pin is configurable */
+		if (isfi->pin_t[name].valid == false)
+			return -EINVAL;
+
+		flis_addr = isfi->pin_t[name].bus_address;
+
+		switch (param) {
+		case PULL:
+			off = isfi->pin_t[name].pullup_offset;
+			pos = isfi->pin_t[name].pullup_lsb_pos;
+			mask = (PULL_MASK << pos);
+			break;
+		case MUX:
+			off = isfi->pin_t[name].direction_offset;
+			pos = isfi->pin_t[name].direction_lsb_pos;
+			mask = (MUX_MASK << pos);
+			break;
+		case OPEN_DRAIN:
+			off = isfi->pin_t[name].open_drain_offset;
+			pos = isfi->pin_t[name].open_drain_bit;
+			mask = (OPEN_DRAIN_MASK << pos);
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		data = (val << pos);
+		pr_debug("addr = 0x%x, off = 0x%x, pos = %d, mask = 0x%x, data = 0x%x\n",
+				flis_addr, off, pos, mask, data);
+
+		ret = intel_scu_ipc_update_shim(data, mask, flis_addr, off);
+		if (ret) {
+			pr_err("update shim failed\n");
+			return ret;
+		}
+	} else if (platform == INTEL_MID_CPU_CHIP_TANGIER ||
+		platform == INTEL_MID_CPU_CHIP_ANNIEDALE) {
+		mmft = isfi->mmio_flis_t;
+		off = mmft[name].offset;
+
+		/* Check if the FLIS is writable by mmio access */
+		if (!(mmft[name].access_ctrl & writable))
+			return -EINVAL;
+
+		old_val = get_flis_value(off);
+
+		switch (param) {
+		case PULL:
+			mask = PULL_MASK;
+			break;
+		case MUX:
+			mask = MUX_MASK;
+			break;
+		case OPEN_DRAIN:
+			mask = OPEN_DRAIN_MASK;
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		set_flis_value((old_val & ~mask) | val, off);
+
+	} else
+		return -EINVAL;
+
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(config_pin_flis);
+
+int get_pin_flis(unsigned int name, enum flis_param_t param, u32 *val)
+{
+	u32 flis_addr, off;
+	u32 data = 0;
+	int ret;
+	int pos;
+	u32 mask;
+	struct intel_scu_flis_info *isfi = &flis_info;
+	struct pin_mmio_flis_t *mmft;
+	u32 old_val;
+
+	if (!isfi->initialized)
+		return -ENODEV;
+
+	if (name < 0 || name >= isfi->pin_num)
+		return -EINVAL;
+
+	if (platform == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if (isfi->pin_t[name].valid == false)
+			return -EINVAL;
+
+		flis_addr = isfi->pin_t[name].bus_address;
+
+		switch (param) {
+		case PULL:
+			off = isfi->pin_t[name].pullup_offset;
+			pos = isfi->pin_t[name].pullup_lsb_pos;
+			mask = PULL_MASK;
+			break;
+		case MUX:
+			off = isfi->pin_t[name].direction_offset;
+			pos = isfi->pin_t[name].direction_lsb_pos;
+			mask = MUX_MASK;
+			break;
+		case OPEN_DRAIN:
+			off = isfi->pin_t[name].open_drain_offset;
+			pos = isfi->pin_t[name].open_drain_bit;
+			mask = OPEN_DRAIN_MASK;
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		ret = intel_scu_ipc_read_shim(&data, flis_addr, off);
+		if (ret) {
+			pr_err("read shim failed, addr = 0x%x, off = 0x%x\n",
+				flis_addr, off);
+			return ret;
+		}
+
+		*val = (data >> pos) & mask;
+
+		pr_debug("read: data = 0x%x, val = 0x%x\n", data, *val);
+	} else if (platform == INTEL_MID_CPU_CHIP_TANGIER ||
+		platform == INTEL_MID_CPU_CHIP_ANNIEDALE) {
+		mmft = isfi->mmio_flis_t;
+		off = mmft[name].offset;
+
+		old_val = get_flis_value(off);
+
+		switch (param) {
+		case PULL:
+			mask = PULL_MASK;
+			break;
+		case MUX:
+			mask = MUX_MASK;
+			break;
+		case OPEN_DRAIN:
+			mask = OPEN_DRAIN_MASK;
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		*val = (old_val & mask);
+
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(get_pin_flis);
+
+static void flis_generic_store(const char *buf, int type)
+{
+	u32 tmp;
+	int ret;
+
+	/* use decimal for pin number */
+	if (type == DBG_PIN_NAME)
+		ret = sscanf(buf, "%d", &tmp);
+	else
+		ret = sscanf(buf, "%x", &tmp);
+
+	if (ret != 1)
+		return;
+
+	switch (type) {
+	case DBG_SHIM_FLIS_ADDR:
+		shim_flis_addr = tmp;
+		break;
+	case DBG_SHIM_OFFSET:
+		shim_offset = tmp;
+		break;
+	case DBG_SHIM_DATA:
+		shim_data = tmp;
+		break;
+	case DBG_PARAM_VAL:
+		param_value = tmp;
+		break;
+	case DBG_PARAM_TYPE:
+		param_type = tmp;
+		break;
+	case DBG_PIN_NAME:
+		pin_name = tmp;
+		break;
+	default:
+		break;
+	}
+}
+
+#ifdef CONFIG_X86_CTP
+static ssize_t shim_flis_addr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_SHIM_FLIS_ADDR);
+	return size;
+}
+
+static ssize_t shim_flis_addr_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_flis_addr);
+}
+#endif
+
+static ssize_t shim_offset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_SHIM_OFFSET);
+	return size;
+}
+
+static ssize_t shim_offset_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_offset);
+}
+
+static ssize_t shim_data_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_SHIM_DATA);
+	return size;
+}
+
+static ssize_t shim_data_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_data);
+}
+
+static ssize_t shim_ops_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	memset(shim_ops, 0, sizeof(shim_ops));
+
+	ret = sscanf(buf, "%9s", shim_ops);
+	if (ret != 1)
+		return -EINVAL;
+
+	ret = 0;
+	if (!strncmp("get", shim_ops, OPS_STR_LEN)) {
+		if (platform == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+			ret = intel_scu_ipc_read_shim(&shim_data,
+					shim_flis_addr,	shim_offset);
+		} else {
+			/* use the same variable name to be compatible */
+			shim_data = get_flis_value(shim_offset);
+		}
+	} else if (!strncmp("set", shim_ops, OPS_STR_LEN)) {
+		if (platform == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+			ret = intel_scu_ipc_write_shim(shim_data,
+					shim_flis_addr,	shim_offset);
+		} else {
+			set_flis_value(shim_data, shim_offset);
+		}
+	} else {
+		dev_err(dev, "Not supported ops\n");
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		dev_err(dev, "get/set flis error, ret = %d\n", ret);
+		return ret;
+	}
+
+	return size;
+}
+
+static ssize_t param_val_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", param_value);
+}
+
+static ssize_t param_val_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_PARAM_VAL);
+	return size;
+}
+
+static ssize_t flis_param_type_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", param_type);
+}
+
+static ssize_t flis_param_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_PARAM_TYPE);
+	return size;
+}
+
+static ssize_t pinname_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", pin_name);
+}
+
+static ssize_t pinname_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_PIN_NAME);
+	return size;
+}
+
+static ssize_t ops_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	memset(ops, 0, sizeof(ops));
+
+	ret = sscanf(buf, "%9s", ops);
+	if (ret != 1) {
+		dev_err(dev, "input error\n");
+		return -EINVAL;
+	}
+
+	if (!strncmp("get", ops, OPS_STR_LEN))
+		ret = get_pin_flis(pin_name, param_type, &param_value);
+	else if (!strncmp("set", ops, OPS_STR_LEN))
+		ret = config_pin_flis(pin_name, param_type, param_value);
+	else {
+		dev_err(dev, "wrong ops\n");
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		dev_err(dev, "Access flis error, ret = %d\n", ret);
+		return ret;
+	}
+
+	return size;
+}
+
+#ifdef CONFIG_X86_CTP
+static DEVICE_ATTR(flis_addr, S_IRUSR|S_IWUSR,
+		shim_flis_addr_show, shim_flis_addr_store);
+#endif
+static DEVICE_ATTR(offset, S_IRUSR|S_IWUSR,
+		shim_offset_show, shim_offset_store);
+static DEVICE_ATTR(data, S_IRUSR|S_IWUSR, shim_data_show, shim_data_store);
+static DEVICE_ATTR(flis_ops, S_IWUSR, NULL, shim_ops_store);
+
+static struct attribute *flis_attrs[] = {
+#ifdef CONFIG_X86_CTP
+	&dev_attr_flis_addr.attr,
+#endif
+	&dev_attr_offset.attr,
+	&dev_attr_data.attr,
+	&dev_attr_flis_ops.attr,
+	NULL,
+};
+
+static struct attribute_group flis_attr_group = {
+	.name = "flis_debug",
+	.attrs = flis_attrs,
+};
+
+static DEVICE_ATTR(pin_name, S_IRUSR|S_IWUSR, pinname_show, pinname_store);
+static DEVICE_ATTR(flis_param, S_IRUSR|S_IWUSR, flis_param_type_show,
+						flis_param_type_store);
+static DEVICE_ATTR(val, S_IRUSR|S_IWUSR, param_val_show, param_val_store);
+static DEVICE_ATTR(ops, S_IWUSR, NULL, ops_store);
+
+static struct attribute *pin_config_attrs[] = {
+	&dev_attr_pin_name.attr,
+	&dev_attr_flis_param.attr,
+	&dev_attr_val.attr,
+	&dev_attr_ops.attr,
+	NULL,
+};
+
+static struct attribute_group pin_config_attr_group = {
+	.name = "pin_config_debug",
+	.attrs = pin_config_attrs,
+};
+
+static int scu_flis_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct intel_scu_flis_info *isfi = &flis_info;
+	struct intel_scu_flis_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	platform = intel_mid_identify_cpu();
+	isfi->pin_t = pdata->pin_t;
+	isfi->pin_num = pdata->pin_num;
+	isfi->shim_access = pdata->shim_access;
+	isfi->mmio_flis_t = pdata->mmio_flis_t;
+	if (pdata->mmio_flis_t && pdata->flis_base) {
+		isfi->flis_paddr = pdata->flis_base;
+		isfi->flis_base = ioremap_nocache(pdata->flis_base,
+					pdata->flis_len);
+		if (!isfi->flis_base) {
+			dev_err(&pdev->dev, "error mapping flis base\n");
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+
+	if ((isfi->pin_t || isfi->mmio_flis_t)&&isfi->pin_num)
+		isfi->initialized = 1;
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &flis_attr_group);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create flis sysfs interface\n");
+		goto err1;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &pin_config_attr_group);
+	if (ret) {
+		dev_err(&pdev->dev,
+				"Failed to create pin config sysfs interface\n");
+		goto err2;
+	}
+
+	dev_info(&pdev->dev, "scu flis probed\n");
+	return 0;
+
+err2:
+	sysfs_remove_group(&pdev->dev.kobj, &flis_attr_group);
+err1:
+	if (pdata->flis_base)
+		iounmap(isfi->flis_base);
+out:
+	isfi->initialized = 0;
+	return ret;
+}
+
+static int scu_flis_remove(struct platform_device *pdev)
+{
+	sysfs_remove_group(&pdev->dev.kobj, &pin_config_attr_group);
+	sysfs_remove_group(&pdev->dev.kobj, &flis_attr_group);
+
+	return 0;
+}
+
+static struct platform_driver scu_flis_driver = {
+	.driver = {
+		   .name = "intel_scu_flis",
+		   .owner = THIS_MODULE,
+		   },
+	.probe = scu_flis_probe,
+	.remove = scu_flis_remove,
+};
+
+static int scu_flis_module_init(void)
+{
+	return platform_driver_register(&scu_flis_driver);
+}
+
+static void scu_flis_module_exit(void)
+{
+	platform_driver_unregister(&scu_flis_driver);
+}
+
+static int flis_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed flis rpmsg device\n");
+
+	/* Allocate rpmsg instance for flis*/
+	ret = alloc_rpmsg_instance(rpdev, &flis_instance);
+	if (!flis_instance) {
+		dev_err(&rpdev->dev, "kzalloc flis instance failed\n");
+		goto out;
+	}
+
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(flis_instance);
+
+	ret = scu_flis_module_init();
+	if (ret)
+		free_rpmsg_instance(rpdev, &flis_instance);
+
+out:
+	return ret;
+}
+
+static void flis_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	scu_flis_module_exit();
+	free_rpmsg_instance(rpdev, &flis_instance);
+	dev_info(&rpdev->dev, "Removed flis rpmsg device\n");
+}
+
+static void flis_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id flis_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_flis" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, flis_rpmsg_id_table);
+
+static struct rpmsg_driver flis_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= flis_rpmsg_id_table,
+	.probe		= flis_rpmsg_probe,
+	.callback	= flis_rpmsg_cb,
+	.remove		= flis_rpmsg_remove,
+};
+
+static int __init flis_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&flis_rpmsg);
+}
+
+static void __exit flis_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&flis_rpmsg);
+}
+
+fs_initcall(flis_rpmsg_init);
+module_exit(flis_rpmsg_exit);
+
+MODULE_AUTHOR("Ning Li <ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel FLIS Access Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_fw_update.c b/drivers/platform/x86/intel_scu_fw_update.c
new file mode 100644
index 0000000..74f85ea
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_fw_update.c
@@ -0,0 +1,1387 @@
+/*
+ * fw_update.c - Intel SCU Firmware Update Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/rpmsg.h>
+#include <linux/intel_mid_pm.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+#include <asm/msr.h>
+#include <asm/proto.h>
+
+/* Medfield & Cloverview firmware update.
+ * The flow and communication between IA and SCU has changed for
+ * Medfield firmware update. For more details, please refer to
+ * Firmware Arch Spec.
+ * Below macros and structs apply for medfield firmware update
+ */
+
+#define IPC_CMD_FW_UPDATE_GO	0x02
+
+#define MAX_FW_CHUNK		(128*1024)
+#define IFWX_CHUNK_SIZE		(96*1024)
+
+#define SRAM_ADDR		0xFFFC0000
+#define MAILBOX_ADDR		0xFFFE0000
+
+#define SCU_FLAG_OFFSET		8
+#define IA_FLAG_OFFSET		12
+
+#define MIP_HEADER_OFFSET	0
+#define SUCP_OFFSET		0x1D8000
+#define VEDFW_OFFSET		0x1A6000
+
+#define DNX_HDR_LEN		24
+#define FUPH_HDR_LEN		36
+
+#define DNX_IMAGE	"DXBL"
+#define FUPH_HDR_SIZE	"RUPHS"
+#define FUPH		"RUPH"
+#define MIP		"DMIP"
+#define IFWI		"IFW"
+#define LOWER_128K	"LOFW"
+#define UPPER_128K	"HIFW"
+#define PSFW1		"PSFW1"
+#define PSFW2		"PSFW2"
+#define SSFW		"SSFW"
+#define SUCP		"SuCP"
+#define VEDFW		"VEDFW"
+#define UPDATE_DONE	"HLT$"
+#define UPDATE_ABORT	"HLT0"
+#define UPDATE_ERROR	"ER"
+
+#define MAX_LEN_IFW	4
+#define MAX_LEN_PSFW	7
+#define MAX_LEN_SSFW	6
+#define MAX_LEN_SUCP	6
+#define MAX_LEN_VEDFW	7
+
+#define FUPH_MIP_OFFSET		0x04
+#define FUPH_IFWI_OFFSET	0x08
+#define FUPH_PSFW1_OFFSET	0x0c
+#define FUPH_PSFW2_OFFSET	0x10
+#define FUPH_SSFW_OFFSET	0x14
+#define FUPH_SUCP_OFFSET	0x18
+#define FUPH_VEDFW_OFFSET	0x1c
+
+#define DNX_MAX_SIZE	(128*1024)
+#define IFWI_MAX_SIZE	(3*1024*1024)
+#define FOTA_MEM_SIZE	(4*1024*1024)
+
+#define DNX_SIZE_OFFSET	0
+#define GP_FLAG_OFFSET	4
+#define XOR_CHK_OFFSET	20
+
+#define GPF_BIT32	1
+#define FUPH_STR	"UPH$"
+#define FUPH_MAX_LEN	36
+#define SKIP_BYTES	8
+
+static struct kobject *scu_fw_update_kobj;
+static struct rpmsg_instance *fw_update_instance;
+
+/* Modified IA-SCU mailbox for medfield firmware update. */
+struct ia_scu_mailbox {
+	char mail[8];
+	u32 scu_flag;
+	u32 ia_flag;
+};
+
+/* Structure to parse input from firmware-update application. */
+struct fw_ud {
+	u8 *fw_file_data;
+	u32 fsize;
+	u8 *dnx_hdr;
+	u8 *dnx_file_data;
+	u32 dnx_size;
+	u32 fuph_hdr_len;
+};
+
+struct mfld_fw_update {
+	void __iomem *sram;
+	void __iomem *mailbox;
+	u32 wscu;
+	u32 wia;
+	char mb_status[8];
+};
+
+/* Holds size parameters read from fuph header */
+struct fuph_hdr_attrs {
+	u32 mip_size;
+	u32 ifwi_size;
+	u32 psfw1_size;
+	u32 psfw2_size;
+	u32 ssfw_size;
+	u32 sucp_size;
+	u32 vedfw_size;
+};
+
+enum mailbox_status {
+	MB_DONE,
+	MB_CONTINUE,
+	MB_ERROR
+};
+
+/* Misc. firmware components that are part of integrated firmware */
+struct misc_fw {
+	const char *fw_type;
+	u8 str_len;
+};
+
+/* lock used to prevent multiple calls to fw update sysfs interface */
+static DEFINE_MUTEX(fwud_lock);
+
+static char err_buf[50];
+static u8 *pending_data;
+
+struct fw_update_info {
+	struct device *dev;
+	struct fw_ud *fwud_pending;
+};
+
+/* Used to store firmware version. */
+#define FW_VERSION_SIZE		16
+#define FW_VERSION_MAX_SIZE	36
+static u8 fw_version_raw_data[FW_VERSION_MAX_SIZE] = { 0 };
+
+static u8 pmic_nvm_version;
+
+static struct fw_update_info fui;
+
+static struct misc_fw misc_fw_table[] = {
+	{ .fw_type = IFWI, .str_len  = MAX_LEN_IFW },
+	{ .fw_type = PSFW1, .str_len  = MAX_LEN_PSFW },
+	{ .fw_type = SSFW, .str_len  = MAX_LEN_SSFW },
+	{ .fw_type = PSFW2, .str_len  = MAX_LEN_PSFW },
+	{ .fw_type = SUCP, .str_len  = MAX_LEN_SUCP },
+	{ .fw_type = VEDFW, .str_len  = MAX_LEN_VEDFW }
+};
+
+static int alloc_fota_mem_early;
+
+int __init alloc_mem_fota_early_flag(char *p)
+{
+	alloc_fota_mem_early = 1;
+	return 0;
+}
+early_param("alloc_fota_mem_early", alloc_mem_fota_early_flag);
+
+/*
+ * IA will wait in busy-state, and poll mailbox, to check
+ * if SCU is done processing.
+ * If it has to wait for more than a second, it will exit with
+ * error code.
+ */
+static int busy_wait(struct mfld_fw_update *mfld_fw_upd)
+{
+	u32 count = 0;
+	u32 flag;
+
+	flag = mfld_fw_upd->wscu;
+
+	while (ioread32(mfld_fw_upd->mailbox + SCU_FLAG_OFFSET) != flag
+		&& count < 500) {
+		/* There are synchronization issues between IA and SCU */
+		mb();
+		/* FIXME: we must use mdelay currently */
+		mdelay(10);
+		count++;
+	}
+
+	if (ioread32(mfld_fw_upd->mailbox + SCU_FLAG_OFFSET) != flag) {
+		dev_err(fui.dev, "IA-waited and quitting\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/* This function will
+ * 1)copy firmware chunk from user-space to kernel-space.
+ * 2) Copy from kernel-space to shared SRAM.
+ * 3) Write to mailbox.
+ * 4) And wait for SCU to process that firmware chunk.
+ * Returns 0 on success, and < 0 for failure.
+ */
+static int process_fw_chunk(u8 *fws, u8 *userptr, u32 chunklen,
+					struct mfld_fw_update *mfld_fw_upd)
+{
+	memcpy(fws, userptr, chunklen);
+
+	/* IA copy to sram */
+	memcpy_toio(mfld_fw_upd->sram, fws, chunklen);
+
+	/* There are synchronization issues between IA and SCU */
+	mb();
+	mfld_fw_upd->wia = !(mfld_fw_upd->wia);
+	iowrite32(mfld_fw_upd->wia, mfld_fw_upd->mailbox + IA_FLAG_OFFSET);
+
+	mb();
+	dev_dbg(fui.dev, "wrote ia_flag=%d\n",
+		 ioread32(mfld_fw_upd->mailbox + IA_FLAG_OFFSET));
+
+	mfld_fw_upd->wscu = !mfld_fw_upd->wscu;
+	return busy_wait(mfld_fw_upd);
+}
+
+/*
+ * This function will check mailbox status flag, and return state of mailbox.
+ */
+static enum mailbox_status check_mb_status(struct mfld_fw_update *mfld_fw_upd)
+{
+
+	enum mailbox_status mb_state;
+
+	/* There are synchronization issues between IA and SCU */
+	mb();
+
+	memcpy_fromio(mfld_fw_upd->mb_status, mfld_fw_upd->mailbox, 8);
+
+	if (!strncmp(mfld_fw_upd->mb_status, UPDATE_ERROR,
+					sizeof(UPDATE_ERROR) - 1) ||
+		!strncmp(mfld_fw_upd->mb_status, UPDATE_ABORT,
+					sizeof(UPDATE_ABORT) - 1)) {
+		dev_dbg(fui.dev,
+			"mailbox error=%s\n", mfld_fw_upd->mb_status);
+		return MB_ERROR;
+	} else {
+		mb_state = (!strncmp(mfld_fw_upd->mb_status, UPDATE_DONE,
+			sizeof(UPDATE_DONE) - 1)) ? MB_DONE : MB_CONTINUE;
+		dev_dbg(fui.dev,
+			"mailbox pass=%s, mb_state=%d\n",
+			mfld_fw_upd->mb_status, mb_state);
+	}
+
+	return mb_state;
+}
+
+/* Helper function used to calculate length and offset.  */
+int helper_for_calc_offset_length(struct fw_ud *fw_ud_ptr, char *scu_req,
+			void **offset, u32 *len, struct fuph_hdr_attrs *fuph,
+			const char *fw_type)
+{
+	unsigned long chunk_no;
+	u32 chunk_rem;
+	u32 max_chunk_cnt;
+	u32 fw_size;
+	u32 fw_offset;
+	u32 max_fw_chunk_size = MAX_FW_CHUNK;
+
+	if (!strncmp(fw_type, IFWI, strlen(IFWI))) {
+
+		if (kstrtoul(scu_req + strlen(IFWI), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		/* On CTP, IFWx starts from IFW1, not IFW0, thus adjust the
+		 * chunk_no to make '*offset' point to the correct address.
+		 * Besides, the size of each IFWx chunk is 96k, not 128k
+		 */
+		chunk_no = chunk_no - 1;
+		fw_size = fuph->ifwi_size;
+		fw_offset = fuph->mip_size;
+		max_fw_chunk_size = IFWX_CHUNK_SIZE;
+	} else if (!strncmp(fw_type, PSFW1, strlen(PSFW1))) {
+
+		if (kstrtoul(scu_req + strlen(PSFW1), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->psfw1_size;
+		fw_offset = fuph->mip_size + fuph->ifwi_size;
+	} else if (!strncmp(fw_type, PSFW2, strlen(PSFW2))) {
+
+		if (kstrtoul(scu_req + strlen(PSFW2), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->psfw2_size;
+		fw_offset = fuph->mip_size + fuph->ifwi_size +
+				fuph->psfw1_size + fuph->ssfw_size;
+	} else if (!strncmp(fw_type, SSFW, strlen(SSFW))) {
+
+		if (kstrtoul(scu_req + strlen(SSFW), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->ssfw_size;
+		fw_offset = fuph->mip_size + fuph->ifwi_size +
+				fuph->psfw1_size;
+	} else if (!strncmp(fw_type, SUCP, strlen(SUCP))) {
+
+		if (kstrtoul(scu_req + strlen(SUCP), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->sucp_size;
+		fw_offset = SUCP_OFFSET;
+	} else if (!strncmp(fw_type, VEDFW, strlen(VEDFW))) {
+
+		if (kstrtoul(scu_req + strlen(VEDFW), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->vedfw_size;
+		fw_offset = VEDFW_OFFSET;
+	} else
+		return -EINVAL;
+
+	chunk_rem = fw_size % max_fw_chunk_size;
+	max_chunk_cnt = (fw_size/max_fw_chunk_size) + (chunk_rem ? 1 : 0);
+
+	dev_dbg(fui.dev,
+		"str=%s,chunk_no=%lx, chunk_rem=%d,max_chunk_cnt=%d\n",
+		fw_type, chunk_no, chunk_rem, max_chunk_cnt);
+
+	if ((chunk_no + 1) > max_chunk_cnt)
+		return -EINVAL;
+
+	/* Note::Logic below will make sure, that we get right length if input
+	 is 128K or multiple. */
+	*len = (chunk_no == (max_chunk_cnt - 1)) ?
+		(chunk_rem ? chunk_rem : max_fw_chunk_size) : max_fw_chunk_size;
+
+	*offset = fw_ud_ptr->fw_file_data + fw_offset +
+		chunk_no * max_fw_chunk_size;
+
+	return 0;
+}
+
+/*
+ * This api calculates offset and length depending on type of firmware chunk
+ * requested by SCU. Note: Intent is to follow the architecture such that,
+ * SCU controls the flow, and IA simply hands out, what is requested by SCU.
+ * IA will simply follow SCU's commands, unless SCU requests for something
+ * IA cannot give. TODO:That will be a special error case, need to figure out
+ * how to handle that.
+ */
+int calc_offset_and_length(struct fw_ud *fw_ud_ptr, char *scu_req,
+			void **offset, u32 *len, struct fuph_hdr_attrs *fuph)
+{
+	u8 cnt;
+
+	if (!strncmp(DNX_IMAGE, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->dnx_file_data;
+		*len = fw_ud_ptr->dnx_size;
+		return 0;
+	} else if (!strncmp(FUPH, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data + fw_ud_ptr->fsize
+				- fw_ud_ptr->fuph_hdr_len;
+		*len = fw_ud_ptr->fuph_hdr_len;
+		return 0;
+	} else if (!strncmp(MIP, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data + MIP_HEADER_OFFSET;
+		*len = fuph->mip_size;
+		return 0;
+	} else if (!strncmp(LOWER_128K, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data + fuph->mip_size;
+		*len = MAX_FW_CHUNK;
+		return 0;
+	} else if (!strncmp(UPPER_128K, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data
+				+ fuph->mip_size + MAX_FW_CHUNK;
+		*len = MAX_FW_CHUNK;
+		return 0;
+	} else {
+		for (cnt = 0; cnt < ARRAY_SIZE(misc_fw_table); cnt++) {
+
+			if (!strncmp(misc_fw_table[cnt].fw_type, scu_req,
+					strlen(misc_fw_table[cnt].fw_type))) {
+
+				if (strlen(scu_req) ==
+						misc_fw_table[cnt].str_len) {
+
+					if (helper_for_calc_offset_length
+						(fw_ud_ptr, scu_req,
+						offset, len, fuph,
+						misc_fw_table[cnt].fw_type) < 0)
+						goto error_case;
+
+					dev_dbg(fui.dev,
+					"\nmisc fw type=%s, len=%u,offset=%p",
+					misc_fw_table[cnt].fw_type, *len,
+					*offset);
+
+					return 0;
+
+				} else
+					goto error_case;
+			}
+		}
+
+	}
+
+	dev_dbg(fui.dev, "Unexpected mailbox request from scu\n");
+
+error_case:
+	/* TODO::Need to test this error case..and see how SCU reacts
+	* and how IA handles
+	* subsequent error response and whether exit is graceful...
+	*/
+
+	dev_dbg(fui.dev, "error case,respond back to SCU..\n");
+	dev_dbg(fui.dev, "scu_req=%s\n", scu_req);
+	*len = 0;
+	*offset = 0;
+
+	return -EINVAL;
+}
+
+/**
+ * intel_scu_ipc_medfw_upgrade - Medfield Firmware update utility
+ *
+ * The flow and communication between IA and SCU has changed for
+ * Medfield firmware update. So we have a different api below
+ * to support Medfield firmware update.
+ *
+ * On success returns 0, for failure , returns < 0.
+ */
+static int intel_scu_ipc_medfw_upgrade(void)
+{
+	struct fw_ud *fw_ud_param = fui.fwud_pending;
+	struct mfld_fw_update	mfld_fw_upd;
+	u8 *fw_file_data = NULL;
+	u8 *fws = NULL;
+	u8 *fuph_start = NULL;
+	int ret_val = 0;
+
+	struct fuph_hdr_attrs fuph;
+	u32 length = 0;
+	void *offset;
+	enum mailbox_status mb_state;
+
+	/* set all devices in d0i0 before IFWI upgrade */
+	if (unlikely(pmu_set_devices_in_d0i0())) {
+		pr_debug("pmu: failed to set all devices in d0i0...\n");
+		BUG();
+	}
+
+	if (reboot_force)
+		rpmsg_global_lock();
+
+	mfld_fw_upd.wscu = 0;
+	mfld_fw_upd.wia = 0;
+	memset(mfld_fw_upd.mb_status, 0, sizeof(char) * 8);
+
+	fw_file_data = fw_ud_param->fw_file_data;
+	mfld_fw_upd.sram = ioremap_nocache(SRAM_ADDR, MAX_FW_CHUNK);
+	if (mfld_fw_upd.sram == NULL) {
+		dev_err(fui.dev, "unable to map sram\n");
+		ret_val = -ENOMEM;
+		goto out_unlock;
+	}
+
+	mfld_fw_upd.mailbox = ioremap_nocache(MAILBOX_ADDR,
+					sizeof(struct ia_scu_mailbox));
+
+	if (mfld_fw_upd.mailbox == NULL) {
+		dev_err(fui.dev, "unable to map the mailbox\n");
+		ret_val = -ENOMEM;
+		goto unmap_sram;
+	}
+
+	/*IA initializes both IAFlag and SCUFlag to zero */
+	iowrite32(0, mfld_fw_upd.mailbox + SCU_FLAG_OFFSET);
+	iowrite32(0, mfld_fw_upd.mailbox + IA_FLAG_OFFSET);
+	memset_io(mfld_fw_upd.mailbox, 0, 8);
+
+	fws = kmalloc(MAX_FW_CHUNK, GFP_KERNEL);
+	if (fws == NULL) {
+		ret_val = -ENOMEM;
+		goto unmap_mb;
+	}
+
+	/* fuph header start */
+	fuph_start = fw_ud_param->fw_file_data + (fw_ud_param->fsize - 1)
+					- (fw_ud_param->fuph_hdr_len - 1);
+
+	/* Convert sizes in DWORDS to number of bytes. */
+	fuph.mip_size = (*((u32 *)(fuph_start + FUPH_MIP_OFFSET)))*4;
+	fuph.ifwi_size = (*((u32 *)(fuph_start + FUPH_IFWI_OFFSET)))*4;
+	fuph.psfw1_size = (*((u32 *)(fuph_start + FUPH_PSFW1_OFFSET)))*4;
+	fuph.psfw2_size = (*((u32 *)(fuph_start + FUPH_PSFW2_OFFSET)))*4;
+	fuph.ssfw_size = (*((u32 *)(fuph_start + FUPH_SSFW_OFFSET)))*4;
+	fuph.sucp_size = (*((u32 *)(fuph_start + FUPH_SUCP_OFFSET)))*4;
+
+	if (fw_ud_param->fuph_hdr_len == FUPH_HDR_LEN) {
+		fuph.vedfw_size =
+				(*((u32 *)(fuph_start + FUPH_VEDFW_OFFSET)))*4;
+	} else
+		fuph.vedfw_size = 0;
+
+	dev_dbg(fui.dev,
+		"ln=%d, mi=%d, if=%d, ps1=%d, ps2=%d, sfw=%d, sucp=%d, vd=%d\n",
+		fw_ud_param->fuph_hdr_len, fuph.mip_size, fuph.ifwi_size,
+		fuph.psfw1_size, fuph.psfw2_size, fuph.ssfw_size,
+		fuph.sucp_size,	fuph.vedfw_size);
+
+	/* TODO_SK::There is just
+	 *  1 write required from IA side for DFU.
+	 *  So commenting this-out, until it gets confirmed */
+	/*ipc_command(IPC_CMD_FW_UPDATE_READY); */
+
+	/*1. DNX SIZE HEADER   */
+	memcpy(fws, fw_ud_param->dnx_hdr, DNX_HDR_LEN);
+
+	memcpy_toio(mfld_fw_upd.sram, fws, DNX_HDR_LEN);
+
+	/* There are synchronization issues between IA and SCU */
+	mb();
+
+	/* Write cmd to trigger an interrupt to SCU for firmware update*/
+	if (reboot_force)
+		ret_val = rpmsg_send_simple_command(fw_update_instance,
+					    IPCMSG_FW_UPDATE,
+					    IPC_CMD_FW_UPDATE_GO);
+	else
+		ret_val = intel_scu_ipc_raw_cmd(IPCMSG_FW_UPDATE,
+				IPC_CMD_FW_UPDATE_GO, NULL, 0, NULL, 0, 0, 0);
+
+	if (ret_val) {
+		dev_err(fui.dev, "IPC_CMD_FW_UPDATE_GO failed\n");
+		goto term;
+	}
+
+	mfld_fw_upd.wscu = !mfld_fw_upd.wscu;
+
+	if (busy_wait(&mfld_fw_upd) < 0) {
+		ret_val = -1;
+		goto term;
+	}
+
+	/* TODO:Add a count for iteration, based on sizes of security firmware,
+	 * so that we determine finite number of iterations to loop thro.
+	 * That way at the very least, we can atleast control the number
+	 * of iterations, and prevent infinite looping if there are any bugs.
+	 * The only catch being for B0, SCU will request twice for each firmware
+	 * chunk, since its writing to 2 partitions.
+	 * TODO::Investigate if we need to increase timeout for busy_wait,
+	 * since SCU is now writing to 2 partitions.
+	 */
+
+	while ((mb_state = check_mb_status(&mfld_fw_upd)) != MB_DONE) {
+
+		if (mb_state == MB_ERROR) {
+			dev_dbg(fui.dev, "check_mb_status,error\n");
+			ret_val = -1;
+			goto term;
+		}
+
+		if (!strncmp(mfld_fw_upd.mb_status, FUPH_HDR_SIZE,
+				strlen(FUPH_HDR_SIZE))) {
+			iowrite32(fw_ud_param->fuph_hdr_len, mfld_fw_upd.sram);
+			/* There are synchronization issues between IA-SCU */
+			mb();
+			dev_dbg(fui.dev,
+				"copied fuph hdr size=%d\n",
+				ioread32(mfld_fw_upd.sram));
+			mfld_fw_upd.wia = !mfld_fw_upd.wia;
+			iowrite32(mfld_fw_upd.wia, mfld_fw_upd.mailbox +
+				IA_FLAG_OFFSET);
+			dev_dbg(fui.dev, "ia_flag=%d\n",
+				ioread32(mfld_fw_upd.mailbox + IA_FLAG_OFFSET));
+			mb();
+			mfld_fw_upd.wscu = !mfld_fw_upd.wscu;
+
+			if (busy_wait(&mfld_fw_upd) < 0) {
+				ret_val = -1;
+				goto term;
+			}
+
+			continue;
+		}
+
+		if (calc_offset_and_length(fw_ud_param, mfld_fw_upd.mb_status,
+					&offset, &length, &fuph) < 0) {
+			dev_err(fui.dev,
+			"calc_offset_and_length_error,error\n");
+			ret_val = -1;
+			goto term;
+		}
+
+		if ((process_fw_chunk(fws, offset, length,
+				      &mfld_fw_upd)) != 0) {
+			dev_err(fui.dev,
+			"Error processing fw chunk=%s\n",
+			mfld_fw_upd.mb_status);
+			ret_val = -1;
+			goto term;
+		} else
+			dev_dbg(fui.dev,
+				"PASS processing fw chunk=%s\n",
+				mfld_fw_upd.mb_status);
+	}
+	ret_val = intel_scu_ipc_check_status();
+
+term:
+	kfree(fws);
+unmap_mb:
+	iounmap(mfld_fw_upd.mailbox);
+unmap_sram:
+	iounmap(mfld_fw_upd.sram);
+out_unlock:
+	if (reboot_force)
+		rpmsg_global_unlock();
+
+	return ret_val;
+}
+
+static void cur_err(const char *err_info)
+{
+	strncpy(err_buf, err_info, sizeof(err_buf) - 1);
+}
+
+static ssize_t write_dnx(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+	int ret;
+
+	mutex_lock(&fwud_lock);
+
+	if (!pending_data) {
+		pending_data = vmalloc(FOTA_MEM_SIZE);
+		if (NULL == pending_data) {
+			cur_err("alloc fota memory by sysfs failed\n");
+			ret = -ENOMEM;
+			goto end;
+		}
+	}
+
+	fui.fwud_pending->dnx_file_data = pending_data + IFWI_MAX_SIZE;
+
+	if (unlikely(off >= DNX_MAX_SIZE)) {
+		fui.fwud_pending->dnx_file_data = NULL;
+		cur_err("too large dnx binary stream!");
+		ret = -EFBIG;
+		goto end;
+	}
+
+	memcpy(fui.fwud_pending->dnx_file_data + off, buf, count);
+
+	if (!off)
+		fui.fwud_pending->dnx_size = count;
+	else
+		fui.fwud_pending->dnx_size += count;
+
+	mutex_unlock(&fwud_lock);
+	return count;
+
+end:
+	mutex_unlock(&fwud_lock);
+	return ret;
+}
+
+/* Parses from the end of IFWI, and looks for UPH$,
+ * to determine length of FUPH header
+ */
+static int find_fuph_header_len(unsigned int *len,
+		unsigned char *file_data, unsigned int file_size)
+{
+	int ret = -EINVAL;
+	unsigned char *temp;
+	unsigned int cnt = 0;
+
+	if (!len || !file_data || !file_size) {
+		dev_err(fui.dev, "find_fuph_header_len: Invalid inputs\n");
+		return ret;
+	}
+
+	/* Skipping the checksum at the end, and moving to the
+	 * start of the last add-on firmware size in fuph.
+	 */
+	temp = file_data + file_size - SKIP_BYTES;
+
+	while (cnt <= FUPH_MAX_LEN) {
+		if (!strncmp(temp, FUPH_STR, sizeof(FUPH_STR) - 1)) {
+			pr_info("Fuph_hdr_len=%d\n", cnt + SKIP_BYTES);
+			*len = cnt + SKIP_BYTES;
+			ret = 0;
+			break;
+		}
+		temp -= 4;
+		cnt += 4;
+	}
+
+	return ret;
+}
+
+static ssize_t write_ifwi(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+	int ret;
+
+	mutex_lock(&fwud_lock);
+
+	if (!pending_data) {
+		pending_data = vmalloc(FOTA_MEM_SIZE);
+		if (NULL == pending_data) {
+			cur_err("alloc fota memory by sysfs failed\n");
+			ret = -ENOMEM;
+			goto end;
+		}
+	}
+
+	fui.fwud_pending->fw_file_data = pending_data;
+
+	if (unlikely(off >= IFWI_MAX_SIZE)) {
+		fui.fwud_pending->fw_file_data = NULL;
+		cur_err("too large ifwi binary stream!\n");
+		ret = -EFBIG;
+		goto end;
+	}
+
+	memcpy(fui.fwud_pending->fw_file_data + off, buf, count);
+
+	if (!off)
+		fui.fwud_pending->fsize = count;
+	else
+		fui.fwud_pending->fsize += count;
+
+	mutex_unlock(&fwud_lock);
+	return count;
+
+end:
+	mutex_unlock(&fwud_lock);
+	return ret;
+}
+
+/*
+ * intel_scu_fw_prepare - prepare dnx_hdr and fuph
+ *
+ * This function will be invoked at reboot, when DNX and IFWI data are ready.
+ */
+static int intel_scu_fw_prepare(struct fw_ud *fwud_pending)
+{
+	unsigned int size;
+	unsigned int gpFlags = 0;
+	unsigned int xorcs;
+	unsigned char dnxSH[DNX_HDR_LEN] = { 0 };
+
+	mutex_lock(&fwud_lock);
+
+	size = fui.fwud_pending->dnx_size;
+
+	/* Set GPFlags parameter */
+	gpFlags = gpFlags | (GPF_BIT32 << 31);
+	xorcs = (size ^ gpFlags);
+
+	memcpy((dnxSH + DNX_SIZE_OFFSET), (unsigned char *)(&size), 4);
+	memcpy((dnxSH + GP_FLAG_OFFSET), (unsigned char *)(&gpFlags), 4);
+	memcpy((dnxSH + XOR_CHK_OFFSET), (unsigned char *)(&xorcs), 4);
+
+	/* assign the last DNX_HDR_LEN bytes memory to dnx header */
+	fui.fwud_pending->dnx_hdr = pending_data + FOTA_MEM_SIZE - DNX_HDR_LEN;
+
+	/* directly memcpy to dnx_hdr */
+	memcpy(fui.fwud_pending->dnx_hdr, dnxSH, DNX_HDR_LEN);
+
+	if (find_fuph_header_len(&(fui.fwud_pending->fuph_hdr_len),
+			fui.fwud_pending->fw_file_data,
+			fui.fwud_pending->fsize) < 0) {
+		dev_err(fui.dev, "Error with FUPH header\n");
+		mutex_unlock(&fwud_lock);
+		return -EINVAL;
+	}
+
+	dev_dbg(fui.dev, "fupd_hdr_len=%d, fsize=%d, dnx_size=%d",
+		fui.fwud_pending->fuph_hdr_len,	fui.fwud_pending->fsize,
+		fui.fwud_pending->dnx_size);
+
+	mutex_unlock(&fwud_lock);
+	return 0;
+}
+
+int intel_scu_ipc_fw_update(void)
+{
+	int ret = 0;
+
+	/* jump fw upgrade process when fota memory not allocated
+	 * or when user cancels update
+	 * or when one of dnx and ifwi is not written
+	 * or when failure happens in writing one of dnx and ifwi
+	 */
+	if (!pending_data || !fui.fwud_pending ||
+		!fui.fwud_pending->dnx_file_data ||
+		!fui.fwud_pending->fw_file_data) {
+		pr_info("Jump FW upgrade process\n");
+		goto end;
+	}
+
+	ret = intel_scu_fw_prepare(fui.fwud_pending);
+	if (ret) {
+		dev_err(fui.dev, "intel_scu_fw_prepare failed\n");
+		goto end;
+	}
+
+	ret = intel_scu_ipc_medfw_upgrade();
+	if (ret)
+		dev_err(fui.dev, "intel_scu_ipc_medfw_upgrade failed\n");
+
+end:
+	return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_fw_update);
+
+static ssize_t fw_version_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int data_to_copy, i;
+	int used = 0;
+
+	if (intel_mid_identify_cpu() > INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		data_to_copy = FW_VERSION_MAX_SIZE;
+	else
+		data_to_copy = FW_VERSION_SIZE;
+
+	for (i = 0; i < data_to_copy; i++)
+		used += snprintf(buf + used, PAGE_SIZE - used, "%x ",
+				fw_version_raw_data[i]);
+
+	return used;
+}
+
+/*
+ * Read IFWI version
+ */
+
+#define INTE_SCU_FW_BUF_LENGTH              256
+#define INTE_SCU_IPC_FW_VERSION_LENGTH      16
+
+#define INTE_SCU_IPC_FW_REVISION_MAJ_REG    15
+#define INTE_SCU_IPC_FW_REVISION_MIN_REG    14
+#define INTE_SCU_IPC_SCU_RT_FW_REVISION_MAJ_REG    1
+#define INTE_SCU_IPC_SCU_RT_FW_REVISION_MIN_REG    0
+#define INTE_SCU_IPC_PUNIT_FW_REVISION_MAJ_REG     5
+#define INTE_SCU_IPC_PUNIT_FW_REVISION_MIN_REG     4
+#define INTE_SCU_IPC_IA32_FW_REVISION_MAJ_REG      7
+#define INTE_SCU_IPC_IA32_FW_REVISION_MIN_REG      6
+#define INTE_SCU_IPC_SUPP_IA32_FW_REVISION_MAJ_REG 9
+#define INTE_SCU_IPC_SUPP_IA32_FW_REVISION_MIN_REG 8
+#define INTE_SCU_IPC_VALHOOKS_FW_REVISION_MAJ_REG  11
+#define INTE_SCU_IPC_VALHOOKS_FW_REVISION_MIN_REG  10
+
+#define INTE_SCU_IPC_SCU_BS_FW_REVISION_EXT_MIN_REG    0
+#define INTE_SCU_IPC_SCU_BS_FW_REVISION_EXT_MAJ_REG    2
+#define INTE_SCU_IPC_SCU_RT_FW_REVISION_EXT_MIN_REG    4
+#define INTE_SCU_IPC_SCU_RT_FW_REVISION_EXT_MAJ_REG    6
+#define INTE_SCU_IPC_IA32_FW_REVISION_EXT_MIN_REG      8
+#define INTE_SCU_IPC_IA32_FW_REVISION_EXT_MAJ_REG      10
+#define INTE_SCU_IPC_VALHOOKS_FW_REVISION_EXT_MIN_REG  12
+#define INTE_SCU_IPC_VALHOOKS_FW_REVISION_EXT_MAJ_REG  14
+
+#define INTE_SCU_IPC_FW_REVISION_EXT_MIN_REG           0
+#define INTE_SCU_IPC_FW_REVISION_EXT_MAJ_REG           2
+#define INTE_SCU_IPC_CHAABI_FW_REVISION_EXT_MIN_REG    4
+#define INTE_SCU_IPC_CHAABI_FW_REVISION_EXT_MAJ_REG    6
+#define INTE_SCU_IPC_MIA_FW_REVISION_EXT_MIN_REG       8
+#define INTE_SCU_IPC_MIA_FW_REVISION_EXT_MAJ_REG       10
+#define INTE_PUNIT_FW_REVISION_EXT_MIN_REG             12
+#define INTE_PUNIT_FW_REVISION_EXT_MAJ_REG             14
+#define INTE_UCODE_FW_REVISION_EXT_MIN_REG             16
+#define INTE_UCODE_FW_REVISION_EXT_MAJ_REG             18
+
+#define INTE_SCU_FW_OFFS	0
+#define INTE_SCU_FW_EXT_OFFS	1
+
+struct scu_ipc_version {
+	unsigned int    count;    /* length of version info */
+	unsigned char   data[FW_VERSION_MAX_SIZE]; /* version data */
+	char            scu_bs[FW_VERSION_SIZE];
+	char            scu_rt[FW_VERSION_SIZE];
+	char            ia32fw[FW_VERSION_SIZE];
+	char            supp_ia32fw[FW_VERSION_SIZE];
+	char            valhooks[FW_VERSION_SIZE];
+	char            ifwi[FW_VERSION_SIZE];
+	char            chaabi[FW_VERSION_SIZE];
+	char            mia[FW_VERSION_SIZE];
+	char            punit[FW_VERSION_SIZE];
+	char            ucode[FW_VERSION_SIZE];
+};
+
+struct scu_ipc_version version;
+
+static void format_rev_4_digit(struct scu_ipc_version ver, int vers_ext, char *buf,
+		int pos_maj, int pos_min)
+{
+	int offs;
+
+	if (vers_ext)
+		offs = FW_VERSION_SIZE;
+	else
+		offs = 0;
+	snprintf(buf, FW_VERSION_SIZE, "%.4X.%.4X",
+		ver.data[offs + pos_maj + 1] << 8 | ver.data[offs + pos_maj],
+		ver.data[offs + pos_min + 1] << 8 | ver.data[offs + pos_min]);
+}
+
+static void format_rev_2_digit(struct scu_ipc_version ver, char *buf,
+		int pos_maj, int pos_min)
+{
+	snprintf(buf, FW_VERSION_SIZE, "%.2X.%.2X",
+		ver.data[pos_maj], ver.data[pos_min]);
+}
+
+static void read_ifwi_version(void)
+{
+	bool ifwi_rev_ext = false;
+
+	/* Check how to read components versions. For example, with older */
+	/* CPU, major/minor version is coded on 2 digits only.            */
+	if (intel_mid_identify_cpu() > INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		ifwi_rev_ext = true;
+
+	version.count = FW_VERSION_SIZE;
+	memcpy(version.data, fw_version_raw_data, FW_VERSION_SIZE);
+
+	if (ifwi_rev_ext) {
+		memcpy(version.data + FW_VERSION_SIZE,
+			fw_version_raw_data + FW_VERSION_SIZE,
+			FW_VERSION_MAX_SIZE - FW_VERSION_SIZE);
+
+		format_rev_4_digit(version, INTE_SCU_FW_OFFS, version.scu_bs,
+				INTE_SCU_IPC_SCU_BS_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_SCU_BS_FW_REVISION_EXT_MIN_REG);
+		pr_info("SCU BS Version: %s\n", version.scu_bs);
+
+		format_rev_4_digit(version, INTE_SCU_FW_OFFS, version.scu_rt,
+				INTE_SCU_IPC_SCU_RT_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_SCU_RT_FW_REVISION_EXT_MIN_REG);
+		pr_info("SCU RT Version: %s\n", version.scu_rt);
+
+		format_rev_4_digit(version, INTE_SCU_FW_OFFS, version.ia32fw,
+				INTE_SCU_IPC_IA32_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_IA32_FW_REVISION_EXT_MIN_REG);
+		pr_info("IA32FW Version: %s\n", version.ia32fw);
+
+		format_rev_4_digit(version, INTE_SCU_FW_OFFS, version.valhooks,
+				INTE_SCU_IPC_VALHOOKS_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_VALHOOKS_FW_REVISION_EXT_MIN_REG);
+		pr_info("ValHooks Version: %s\n", version.valhooks);
+
+		format_rev_4_digit(version, INTE_SCU_FW_EXT_OFFS, version.ifwi,
+				INTE_SCU_IPC_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_FW_REVISION_EXT_MIN_REG);
+		pr_info("IFWI Version: %s\n", version.ifwi);
+
+		format_rev_4_digit(version, INTE_SCU_FW_EXT_OFFS, version.chaabi,
+				INTE_SCU_IPC_CHAABI_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_CHAABI_FW_REVISION_EXT_MIN_REG);
+		pr_info("CHAABI Version: %s\n", version.chaabi);
+
+		format_rev_4_digit(version, INTE_SCU_FW_EXT_OFFS, version.mia,
+				INTE_SCU_IPC_MIA_FW_REVISION_EXT_MAJ_REG,
+				INTE_SCU_IPC_MIA_FW_REVISION_EXT_MIN_REG);
+		pr_info("mIA Version: %s\n", version.mia);
+
+		format_rev_4_digit(version, INTE_SCU_FW_EXT_OFFS, version.punit,
+				INTE_PUNIT_FW_REVISION_EXT_MAJ_REG,
+				INTE_PUNIT_FW_REVISION_EXT_MIN_REG);
+		pr_info("PUnit Version: %s\n", version.punit);
+
+		format_rev_4_digit(version, INTE_SCU_FW_EXT_OFFS, version.ucode,
+				INTE_UCODE_FW_REVISION_EXT_MAJ_REG,
+				INTE_UCODE_FW_REVISION_EXT_MIN_REG);
+		pr_info("uCode Version: %s\n", version.ucode);
+	} else {
+		format_rev_2_digit(version, version.ifwi,
+				INTE_SCU_IPC_FW_REVISION_MAJ_REG,
+				INTE_SCU_IPC_FW_REVISION_MIN_REG);
+		pr_info("IFWI Version: %s\n", version.ifwi);
+
+		format_rev_2_digit(version, version.scu_rt,
+				INTE_SCU_IPC_SCU_RT_FW_REVISION_MAJ_REG,
+				INTE_SCU_IPC_SCU_RT_FW_REVISION_MIN_REG);
+		pr_info("SCU Version: %s\n", version.scu_rt);
+
+		format_rev_2_digit(version, version.punit,
+				INTE_SCU_IPC_PUNIT_FW_REVISION_MAJ_REG,
+				INTE_SCU_IPC_PUNIT_FW_REVISION_MIN_REG);
+		pr_info("PUnit Version: %s\n", version.punit);
+
+		format_rev_2_digit(version, version.ia32fw,
+				INTE_SCU_IPC_IA32_FW_REVISION_MAJ_REG,
+				INTE_SCU_IPC_IA32_FW_REVISION_MIN_REG);
+		pr_info("IA32FW Version: %s\n", version.ia32fw);
+
+		format_rev_2_digit(version, version.supp_ia32fw,
+				INTE_SCU_IPC_SUPP_IA32_FW_REVISION_MAJ_REG,
+				INTE_SCU_IPC_SUPP_IA32_FW_REVISION_MIN_REG);
+		pr_info("SUPP IA32FW Version: %s\n", version.supp_ia32fw);
+
+		format_rev_2_digit(version, version.valhooks,
+				INTE_SCU_IPC_VALHOOKS_FW_REVISION_MAJ_REG,
+				INTE_SCU_IPC_VALHOOKS_FW_REVISION_MIN_REG);
+		pr_info("ValHooks Version: %s\n", version.valhooks);
+	}
+	return;
+}
+
+#define MSR_PUNIT_VERSION_ADDR 0x667
+#define MSR_UCODE_VERSION_ADDR 0x8b
+
+static int fw_version_info(void)
+{
+	int ret;
+	u32 low, high;
+
+	memset(fw_version_raw_data, 0, FW_VERSION_MAX_SIZE);
+
+	ret = rpmsg_send_command(fw_update_instance, IPCMSG_FW_REVISION, 0,
+				NULL, (u32 *)fw_version_raw_data, 0, 4);
+	if (ret < 0) {
+		cur_err("Error getting fw version");
+		return -EINVAL;
+	}
+
+	if (intel_mid_identify_cpu() > INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		ret = rpmsg_send_command(fw_update_instance,
+			IPCMSG_FW_REVISION, 1, NULL,
+			(u32 *)(fw_version_raw_data + FW_VERSION_SIZE), 0, 4);
+		if (ret < 0) {
+			cur_err("Error getting fw version");
+			return -EINVAL;
+		}
+		/* PUnit version is not availabe in SMIP, we get it with a
+		   machine-specific register read, same for uCode */
+		rdmsr(MSR_PUNIT_VERSION_ADDR, low, high);
+		*(u32 *)(fw_version_raw_data + FW_VERSION_SIZE +
+				INTE_PUNIT_FW_REVISION_EXT_MIN_REG) = low;
+		rdmsr(MSR_UCODE_VERSION_ADDR, low, high);
+		*(u32 *)(fw_version_raw_data + FW_VERSION_SIZE +
+				INTE_UCODE_FW_REVISION_EXT_MIN_REG) = high;
+	}
+
+	read_ifwi_version();
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE) {
+		ret = intel_scu_ipc_ioread8(0x6E08, &pmic_nvm_version);
+		if (ret < 0) {
+			cur_err("Error getting PMIC NVM version");
+			return -EINVAL;
+		}
+		pr_info("PMIC NVM Version: %.2X\n", pmic_nvm_version);
+	}
+	return 0;
+}
+
+static ssize_t sys_version_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	if (intel_mid_identify_cpu() > INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if (strcmp(attr->attr.name, "chaabi_version") == 0)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					version.chaabi);
+		if (strcmp(attr->attr.name, "mia_version") == 0)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					version.mia);
+		if (strcmp(attr->attr.name, "scu_bs_version") == 0)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					version.scu_bs);
+		if (strcmp(attr->attr.name, "ucode_version") == 0)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					version.ucode);
+	} else {
+		if (strcmp(attr->attr.name, "supp_ia32fw_version") == 0)
+			return snprintf(buf, PAGE_SIZE, "%s\n", version.supp_ia32fw);
+	}
+
+	if (strcmp(attr->attr.name, "punit_version") == 0)
+		return snprintf(buf, PAGE_SIZE, "%s\n", version.punit);
+	if (strcmp(attr->attr.name, "ifwi_version") == 0)
+		return snprintf(buf, PAGE_SIZE, "%s\n", version.ifwi);
+	if (strcmp(attr->attr.name, "scu_version") == 0)
+		return snprintf(buf, PAGE_SIZE, "%s\n", version.scu_rt);
+	if (strcmp(attr->attr.name, "ia32fw_version") == 0)
+		return snprintf(buf, PAGE_SIZE, "%s\n", version.ia32fw);
+	if (strcmp(attr->attr.name, "valhooks_version") == 0)
+		return snprintf(buf, PAGE_SIZE, "%s\n", version.valhooks);
+
+	pr_err("component version not found\n");
+	return 0;
+}
+
+static ssize_t pmic_nvm_version_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%.2X\n", pmic_nvm_version);
+}
+
+static ssize_t last_error_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", err_buf);
+}
+
+static ssize_t cancel_update_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	int value;
+
+	if (sscanf(buf, "%d", &value) != 1) {
+		cur_err("One argument is needed\n");
+		return -EINVAL;
+	}
+
+	if (value == 1) {
+		mutex_lock(&fwud_lock);
+		fui.fwud_pending->fw_file_data = NULL;
+		fui.fwud_pending->dnx_file_data = NULL;
+		mutex_unlock(&fwud_lock);
+	} else {
+		cur_err("input '1' to cancel fw upgrade\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+#define __BIN_ATTR(_name, _mode, _size, _read, _write) { \
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.size	= _size,					\
+	.read	= _read,					\
+	.write	= _write,					\
+}
+
+#define BIN_ATTR(_name, _mode, _size, _read, _write) \
+struct bin_attribute bin_attr_##_name =	\
+	__BIN_ATTR(_name, _mode, _size, _read, _write)
+
+#define KOBJ_FW_UPDATE_ATTR(_name, _mode, _show, _store) \
+	struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+static KOBJ_FW_UPDATE_ATTR(cancel_update, S_IWUSR, NULL, cancel_update_store);
+static KOBJ_FW_UPDATE_ATTR(fw_version, S_IRUSR, fw_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(ifwi_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(chaabi_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(mia_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(scu_bs_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(scu_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(punit_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(ia32fw_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(supp_ia32fw_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(valhooks_version, S_IRUSR, sys_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(ucode_version, S_IRUSR, sys_version_show, NULL);
+
+static KOBJ_FW_UPDATE_ATTR(last_error, S_IRUSR, last_error_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(pmic_nvm_version, S_IRUSR, pmic_nvm_version_show, NULL);
+static BIN_ATTR(dnx, S_IWUSR, DNX_MAX_SIZE, NULL, write_dnx);
+static BIN_ATTR(ifwi, S_IWUSR, IFWI_MAX_SIZE, NULL, write_ifwi);
+
+static struct attribute *fw_update_attrs[] = {
+	&cancel_update_attr.attr,
+	&fw_version_attr.attr,
+	&ifwi_version_attr.attr,
+	&chaabi_version_attr.attr,
+	&mia_version_attr.attr,
+	&scu_bs_version_attr.attr,
+	&scu_version_attr.attr,
+	&punit_version_attr.attr,
+	&ia32fw_version_attr.attr,
+	&supp_ia32fw_version_attr.attr,
+	&valhooks_version_attr.attr,
+	&ucode_version_attr.attr,
+	&pmic_nvm_version_attr.attr,
+	&last_error_attr.attr,
+	NULL,
+};
+
+static struct attribute_group fw_update_attr_group = {
+	.name = "fw_info",
+	.attrs = fw_update_attrs,
+};
+
+static int intel_fw_update_sysfs_create(struct kobject *kobj)
+{
+	int ret;
+
+	ret = sysfs_create_group(kobj, &fw_update_attr_group);
+	if (ret) {
+		dev_err(fui.dev, "Unable to export sysfs interface\n");
+		goto out;
+	}
+
+	ret = sysfs_create_bin_file(kobj, &bin_attr_dnx);
+	if (ret) {
+		dev_err(fui.dev, "Unable to create dnx bin file\n");
+		goto err_dnx_bin;
+	}
+
+	ret = sysfs_create_bin_file(kobj, &bin_attr_ifwi);
+	if (ret) {
+		dev_err(fui.dev, "Unable to create ifwi bin file\n");
+		goto err_ifwi_bin;
+	}
+
+	return 0;
+
+err_ifwi_bin:
+	sysfs_remove_bin_file(kobj, &bin_attr_dnx);
+err_dnx_bin:
+	sysfs_remove_group(kobj, &fw_update_attr_group);
+out:
+	return ret;
+}
+
+static void intel_fw_update_sysfs_remove(struct kobject *kobj)
+{
+	sysfs_remove_bin_file(kobj, &bin_attr_ifwi);
+	sysfs_remove_bin_file(kobj, &bin_attr_dnx);
+	sysfs_remove_group(kobj, &fw_update_attr_group);
+}
+
+static int fw_update_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret;
+	struct fw_update_info *fu_info = &fui;
+
+	if (rpdev == NULL) {
+		pr_err("fw_update rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed fw_update rpmsg device\n");
+
+	/* Allocate rpmsg instance for fw_update*/
+	ret = alloc_rpmsg_instance(rpdev, &fw_update_instance);
+	if (!fw_update_instance) {
+		dev_err(&rpdev->dev, "kzalloc fw_update instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(fw_update_instance);
+
+	fu_info->dev = &rpdev->dev;
+
+	fui.fwud_pending = kzalloc(sizeof(struct fw_ud), GFP_KERNEL);
+	if (NULL == fui.fwud_pending) {
+		ret = -ENOMEM;
+		dev_err(fui.dev, "alloc fwud_pending memory failed\n");
+		goto err_fwud_pending;
+	}
+
+	scu_fw_update_kobj = kobject_create_and_add("fw_update", kernel_kobj);
+	if (!scu_fw_update_kobj) {
+		ret = -ENOMEM;
+		dev_err(fui.dev, "create kobject failed\n");
+		goto err_kobj;
+	}
+
+	ret = intel_fw_update_sysfs_create(scu_fw_update_kobj);
+	if (ret) {
+		dev_err(fui.dev, "creating fw update sysfs failed\n");
+		goto err_free_fwud;
+	}
+
+	dev_info(&rpdev->dev, "Getting current fw version\n");
+	ret = fw_version_info();
+	if (ret) {
+		dev_err(fui.dev, "cannot get current fw version\n");
+		goto err_sysfs;
+	}
+
+	/* If alloc_fota_mem_early flag is set, allocate FOTA_MEM_SIZE
+	 * bytes memory.
+	 * reserve the first contiguous IFWI_MAX_SIZE bytes for IFWI,
+	 * the next contiguous DNX_MAX_SIZE bytes are reserved for DNX,
+	 * the last DNX_HDR_LEN bytes for DNX Header
+	 */
+	if (alloc_fota_mem_early) {
+		pending_data = vmalloc(FOTA_MEM_SIZE);
+		if (NULL == pending_data) {
+			ret = -ENOMEM;
+			dev_err(fui.dev, "early alloc fota memory failed\n");
+			goto err_sysfs;
+		}
+	}
+
+	return 0;
+
+err_sysfs:
+	intel_fw_update_sysfs_remove(scu_fw_update_kobj);
+err_free_fwud:
+	kobject_put(scu_fw_update_kobj);
+err_kobj:
+	kfree(fui.fwud_pending);
+	fui.fwud_pending = NULL;
+err_fwud_pending:
+	free_rpmsg_instance(rpdev, &fw_update_instance);
+out:
+	return ret;
+}
+
+static void fw_update_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	free_rpmsg_instance(rpdev, &fw_update_instance);
+	intel_fw_update_sysfs_remove(scu_fw_update_kobj);
+	kobject_put(scu_fw_update_kobj);
+
+	vfree(pending_data);
+	pending_data = NULL;
+	kfree(fui.fwud_pending);
+	fui.fwud_pending = NULL;
+}
+
+static void fw_update_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id fw_update_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_fw_update" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, fw_update_rpmsg_id_table);
+
+static struct rpmsg_driver fw_update_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= fw_update_rpmsg_id_table,
+	.probe		= fw_update_rpmsg_probe,
+	.callback	= fw_update_rpmsg_cb,
+	.remove		= fw_update_rpmsg_remove,
+};
+
+static int __init fw_update_module_init(void)
+{
+	return register_rpmsg_driver(&fw_update_rpmsg);
+}
+
+static void __exit fw_update_module_exit(void)
+{
+	unregister_rpmsg_driver(&fw_update_rpmsg);
+}
+
+module_init(fw_update_module_init);
+module_exit(fw_update_module_exit);
+
+MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
+MODULE_AUTHOR("Ning Li <ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel SCU Firmware Update Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 9215ed7..d1f6a62 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -23,22 +23,68 @@
 #include <linux/pm.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
-#include <linux/sfi.h>
 #include <linux/module.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/intel_scu_ipc.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/wakelock.h>
 
-/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY        0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE      0xFE /* Firmware update */
-#define IPCMSG_PCNTRL         0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION    0xF4 /* Get firmware revision */
+enum {
+	SCU_IPC_LINCROFT,
+	SCU_IPC_PENWELL,
+	SCU_IPC_CLOVERVIEW,
+	SCU_IPC_TANGIER,
+};
 
-/* Command id associated with message IPCMSG_PCNTRL */
-#define IPC_CMD_PCNTRL_W      0 /* Register write */
-#define IPC_CMD_PCNTRL_R      1 /* Register read */
-#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
+/* intel scu ipc driver data*/
+struct intel_scu_ipc_pdata_t {
+	u32 ipc_base;
+	u32 i2c_base;
+	u32 ipc_len;
+	u32 i2c_len;
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
+	[SCU_IPC_LINCROFT] = {
+		.ipc_base = 0xff11c000,
+		.i2c_base = 0xff12b000,
+		.ipc_len = 0x100,
+		.i2c_len = 0x10,
+	},
+	[SCU_IPC_PENWELL] = {
+		.ipc_base = 0xff11c000,
+		.i2c_base = 0xff12b000,
+		.ipc_len = 0x100,
+		.i2c_len = 0x10,
+	},
+	[SCU_IPC_CLOVERVIEW] = {
+		.ipc_base = 0xff11c000,
+		.i2c_base = 0xff12b000,
+		.ipc_len = 0x100,
+		.i2c_len = 0x10,
+	},
+	[SCU_IPC_TANGIER] = {
+		.ipc_base = 0xff009000,
+		.i2c_base  = 0xff00d000,
+		.ipc_len  = 0x100,
+		.i2c_len = 0x10,
+	},
+};
+static int  scu_ipc_pm_callback(struct notifier_block *nb,
+					unsigned long action,
+					void *ignored);
+
+static struct notifier_block scu_ipc_pm_notifier = {
+	.notifier_call = scu_ipc_pm_callback,
+	.priority = 1,
+};
 
 /*
  * IPC register summary
@@ -58,37 +104,87 @@
  *    message handler is called within firmware.
  */
 
-#define IPC_BASE_ADDR     0xFF11C000	/* IPC1 base register address */
-#define IPC_MAX_ADDR      0x100		/* Maximum IPC regisers */
-#define IPC_WWBUF_SIZE    20		/* IPC Write buffer Size */
-#define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
-#define IPC_I2C_BASE      0xFF12B000	/* I2C control register base address */
-#define IPC_I2C_MAX_ADDR  0x10		/* Maximum I2C regisers */
+#define IPC_STATUS_ADDR		0X04
+#define IPC_SPTR_ADDR		0x08
+#define IPC_DPTR_ADDR		0x0C
+#define IPC_READ_BUFFER		0x90
+#define IPC_WRITE_BUFFER	0x80
+#define IPC_IOC			0x100
 
-static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
-static void ipc_remove(struct pci_dev *pdev);
-
-struct intel_scu_ipc_dev {
+struct intel_ipc_controller {
 	struct pci_dev *pdev;
 	void __iomem *ipc_base;
 	void __iomem *i2c_base;
+	int ioc;
+	int cmd;
+	struct completion cmd_complete;
 };
 
-static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
+static struct intel_ipc_controller  ipcdev; /* Only one for now */
 
-static int platform;		/* Platform type */
+static int platform; /* Platform type */
 
-/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
- */
-#define IPC_READ_BUFFER		0x90
+static char *ipc_err_sources[] = {
+	[IPC_ERR_NONE] =
+		"no error",
+	[IPC_ERR_CMD_NOT_SUPPORTED] =
+		"command not supported",
+	[IPC_ERR_CMD_NOT_SERVICED] =
+		"command not serviced",
+	[IPC_ERR_UNABLE_TO_SERVICE] =
+		"unable to service",
+	[IPC_ERR_CMD_INVALID] =
+		"command invalid",
+	[IPC_ERR_CMD_FAILED] =
+		"command failed",
+	[IPC_ERR_EMSECURITY] =
+		"Invalid Battery",
+	[IPC_ERR_UNSIGNEDKERNEL] =
+		"Unsigned kernel",
+};
 
 #define IPC_I2C_CNTRL_ADDR	0
 #define I2C_DATA_ADDR		0x04
 
-static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
+static struct wake_lock ipc_wake_lock;
+
+/* PM Qos struct */
+static struct pm_qos_request *qos;
+
+/* Suspend status*/
+static bool suspend_status;
+static DEFINE_MUTEX(scu_suspend_lock);
+
+/* Suspend status get */
+bool suspend_in_progress(void)
+{
+	return suspend_status;
+}
+
+/* Suspend status set */
+void set_suspend_status(bool status)
+{
+	mutex_lock(&scu_suspend_lock);
+	suspend_status = status;
+	mutex_unlock(&scu_suspend_lock);
+}
+
+/* IPC PM notifier callback */
+static int scu_ipc_pm_callback(struct notifier_block *nb,
+					unsigned long action,
+					void *ignored)
+{
+	switch (action) {
+	case PM_SUSPEND_PREPARE:
+		set_suspend_status(true);
+		return NOTIFY_OK;
+	case PM_POST_SUSPEND:
+		set_suspend_status(false);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
 
 /*
  * Command Register (Write Only):
@@ -96,9 +192,18 @@
  * Format:
  * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
  */
-static inline void ipc_command(u32 cmd) /* Send ipc command */
+void intel_scu_ipc_send_command(u32 cmd) /* Send ipc command */
 {
-	writel(cmd, ipcdev.ipc_base);
+	ipcdev.cmd = cmd;
+	INIT_COMPLETION(ipcdev.cmd_complete);
+
+	if (system_state == SYSTEM_RUNNING && !suspend_in_progress()) {
+		ipcdev.ioc = 1;
+		writel(cmd | IPC_IOC, ipcdev.ipc_base);
+	} else {
+		ipcdev.ioc = 0;
+		writel(cmd, ipcdev.ipc_base);
+	}
 }
 
 /*
@@ -108,7 +213,7 @@
  */
 static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
 {
-	writel(data, ipcdev.ipc_base + 0x80 + offset);
+	writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
 }
 
 /*
@@ -119,9 +224,9 @@
  * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
  */
 
-static inline u8 ipc_read_status(void)
+static inline u32 ipc_read_status(void)
 {
-	return __raw_readl(ipcdev.ipc_base + 0x04);
+	return __raw_readl(ipcdev.ipc_base + IPC_STATUS_ADDR);
 }
 
 static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */
@@ -134,243 +239,78 @@
 	return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
 }
 
-static inline int busy_loop(void) /* Wait till scu status is busy */
+int intel_scu_ipc_check_status(void)
 {
-	u32 status = 0;
-	u32 loop_count = 0;
+	int i;
+	int ret = 0;
+	int status;
+	int loop_count = 3000000;
+
+	if (ipcdev.ioc && (system_state == SYSTEM_RUNNING) &&
+			(!suspend_in_progress()) &&
+			!in_interrupt() && !irqs_disabled()) {
+		if (0 == wait_for_completion_timeout(
+				&ipcdev.cmd_complete, 3 * HZ))
+			ret = -ETIMEDOUT;
+	} else {
+		while ((ipc_read_status() & 1) && --loop_count)
+			udelay(1);
+		if (loop_count == 0)
+			ret = -ETIMEDOUT;
+	}
 
 	status = ipc_read_status();
-	while (status & 1) {
-		udelay(1); /* scu processing time is in few u secods */
-		status = ipc_read_status();
-		loop_count++;
-		/* break if scu doesn't reset busy bit after huge retry */
-		if (loop_count > 100000) {
-			dev_err(&ipcdev.pdev->dev, "IPC timed out");
-			return -ETIMEDOUT;
-		}
-	}
-	if ((status >> 1) & 1)
-		return -EIO;
+	if (ret == -ETIMEDOUT)
+		dev_err(&ipcdev.pdev->dev,
+			"IPC timed out, IPC_STS=0x%x, IPC_CMD=0x%x\n",
+			status, ipcdev.cmd);
 
-	return 0;
-}
-
-/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
-static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
-{
-	int nc;
-	u32 offset = 0;
-	int err;
-	u8 cbuf[IPC_WWBUF_SIZE] = { };
-	u32 *wbuf = (u32 *)&cbuf;
-
-	mutex_lock(&ipclock);
-
-	memset(cbuf, 0, sizeof(cbuf));
-
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
-		return -ENODEV;
+	if (status & 0x2) {
+		ret = -EIO;
+		i = (status >> 16) & 0xFF;
+		if (i < ARRAY_SIZE(ipc_err_sources))
+			dev_err(&ipcdev.pdev->dev,
+				"IPC failed: %s, IPC_STS=0x%x, IPC_CMD=0x%x\n",
+				ipc_err_sources[i], status, ipcdev.cmd);
+		else
+			dev_err(&ipcdev.pdev->dev,
+				"IPC failed: unknown error, IPC_STS=0x%x, "
+				"IPC_CMD=0x%x\n", status, ipcdev.cmd);
+		if ((i == IPC_ERR_UNSIGNEDKERNEL) || (i == IPC_ERR_EMSECURITY))
+			ret = -EACCES;
 	}
 
-	for (nc = 0; nc < count; nc++, offset += 2) {
-		cbuf[offset] = addr[nc];
-		cbuf[offset + 1] = addr[nc] >> 8;
-	}
-
-	if (id == IPC_CMD_PCNTRL_R) {
-		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
-			ipc_data_writel(wbuf[nc], offset);
-		ipc_command((count*2) << 16 |  id << 12 | 0 << 8 | op);
-	} else if (id == IPC_CMD_PCNTRL_W) {
-		for (nc = 0; nc < count; nc++, offset += 1)
-			cbuf[offset] = data[nc];
-		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
-			ipc_data_writel(wbuf[nc], offset);
-		ipc_command((count*3) << 16 |  id << 12 | 0 << 8 | op);
-	} else if (id == IPC_CMD_PCNTRL_M) {
-		cbuf[offset] = data[0];
-		cbuf[offset + 1] = data[1];
-		ipc_data_writel(wbuf[0], 0); /* Write wbuff */
-		ipc_command(4 << 16 |  id << 12 | 0 << 8 | op);
-	}
-
-	err = busy_loop();
-	if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
-		/* Workaround: values are read as 0 without memcpy_fromio */
-		memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
-		for (nc = 0; nc < count; nc++)
-			data[nc] = ipc_data_readb(nc);
-	}
-	mutex_unlock(&ipclock);
-	return err;
+	return ret;
 }
 
-/**
- *	intel_scu_ipc_ioread8		-	read a word via the SCU
- *	@addr: register on SCU
- *	@data: return pointer for read byte
- *
- *	Read a single register. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+void intel_scu_ipc_lock(void)
 {
-	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+	/* Prevent C-states beyond C6 */
+	pm_qos_update_request(qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
 
-/**
- *	intel_scu_ipc_ioread16		-	read a word via the SCU
- *	@addr: register on SCU
- *	@data: return pointer for read word
- *
- *	Read a register pair. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
+	/* Prevent S3 */
+	mutex_lock(&scu_suspend_lock);
+
+	if (!suspend_in_progress())
+		wake_lock(&ipc_wake_lock);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_lock);
+
+void intel_scu_ipc_unlock(void)
 {
-	u16 x[2] = {addr, addr + 1 };
-	return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+	/* Re-enable S3 */
+	if (!suspend_in_progress())
+		wake_unlock(&ipc_wake_lock);
+
+	mutex_unlock(&scu_suspend_lock);
+
+	/* Re-enable Deeper C-states beyond C6 */
+	pm_qos_update_request(qos, PM_QOS_DEFAULT_VALUE);
 }
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_unlock);
 
 /**
- *	intel_scu_ipc_ioread32		-	read a dword via the SCU
- *	@addr: register on SCU
- *	@data: return pointer for read dword
- *
- *	Read four registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
-	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-	return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- *	intel_scu_ipc_iowrite8		-	write a byte via the SCU
- *	@addr: register on SCU
- *	@data: byte to write
- *
- *	Write a single register. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_iowrite8(u16 addr, u8 data)
-{
-	return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
-
-/**
- *	intel_scu_ipc_iowrite16		-	write a word via the SCU
- *	@addr: register on SCU
- *	@data: word to write
- *
- *	Write two registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
-	u16 x[2] = {addr, addr + 1 };
-	return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- *	intel_scu_ipc_iowrite32		-	write a dword via the SCU
- *	@addr: register on SCU
- *	@data: dword to write
- *
- *	Write four registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
-	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-	return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- *	intel_scu_ipc_readvv		-	read a set of registers
- *	@addr: register list
- *	@data: bytes to return
- *	@len: length of array
- *
- *	Read registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	The largest array length permitted by the hardware is 5 items.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
-{
-	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_readv);
-
-/**
- *	intel_scu_ipc_writev		-	write a set of registers
- *	@addr: register list
- *	@data: bytes to write
- *	@len: length of array
- *
- *	Write registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	The largest array length permitted by the hardware is 5 items.
- *
- *	This function may sleep.
- *
- */
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
-{
-	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_writev);
-
-
-/**
- *	intel_scu_ipc_update_register	-	r/m/w a register
- *	@addr: register address
- *	@bits: bits to update
- *	@mask: mask of bits to update
- *
- *	Read-modify-write power control unit register. The first data argument
- *	must be register value and second is mask value
- *	mask is a bitmap that indicates which bits to update.
- *	0 = masked. Don't modify this bit, 1 = modify this bit.
- *	returns 0 on success or an error code.
- *
- *	This function may sleep. Locking between SCU accesses is handled
- *	for the caller.
- */
-int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
-{
-	u8 data[2] = { bits, mask };
-	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
-}
-EXPORT_SYMBOL(intel_scu_ipc_update_register);
-
-/**
- *	intel_scu_ipc_simple_command	-	send a simple command
+ *	intel_scu_ipc_simple_command - send a simple command
  *	@cmd: command
  *	@sub: sub type
  *
@@ -385,117 +325,115 @@
 {
 	int err;
 
-	mutex_lock(&ipclock);
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
+	if (ipcdev.pdev == NULL)
 		return -ENODEV;
-	}
-	ipc_command(sub << 12 | cmd);
-	err = busy_loop();
-	mutex_unlock(&ipclock);
+
+	intel_scu_ipc_lock();
+	intel_scu_ipc_send_command(sub << 12 | cmd);
+	err = intel_scu_ipc_check_status();
+	intel_scu_ipc_unlock();
 	return err;
 }
 EXPORT_SYMBOL(intel_scu_ipc_simple_command);
 
 /**
- *	intel_scu_ipc_command	-	command with data
- *	@cmd: command
- *	@sub: sub type
- *	@in: input data
- *	@inlen: input length in dwords
- *	@out: output data
- *	@outlein: output length in dwords
+ * intel_scu_ipc_raw_cmd - raw ipc command with data
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ * @sptr: data writing to SPTR register
+ * @dptr: data writing to DPTR register
  *
- *	Issue a command to the SCU which involves data transfers. Do the
- *	data copies under the lock but leave it for the caller to interpret
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret
+ * Note: This function should be called with the holding of ipclock
  */
-
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
-							u32 *out, int outlen)
+int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+		u32 outlen, u32 dptr, u32 sptr)
 {
 	int i, err;
+	u32 wbuf[4] = { 0 };
 
-	mutex_lock(&ipclock);
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
+	if (ipcdev.pdev == NULL)
 		return -ENODEV;
+
+	if (inlen > 16)
+		return -EINVAL;
+
+	memcpy(wbuf, in, inlen);
+
+	writel(dptr, ipcdev.ipc_base + IPC_DPTR_ADDR);
+	writel(sptr, ipcdev.ipc_base + IPC_SPTR_ADDR);
+
+	/**
+	 * SRAM controller doesn't support 8bit write, it only supports
+	 * 32bit write, so we have to write into the WBUF in 32bit,
+	 * and SCU FW will use the inlen to determine the actual input
+	 * data length in the WBUF.
+	 */
+	for (i = 0; i < ((inlen + 3) / 4); i++)
+		ipc_data_writel(wbuf[i], 4 * i);
+
+	/**
+	 * Watchdog IPC command is an exception here using double word
+	 * as the unit of input data size because of historical reasons
+	 * and SCU FW is doing so.
+	 */
+	if ((cmd & 0xFF) == IPCMSG_WATCHDOG_TIMER)
+		inlen = (inlen + 3) / 4;
+	/*
+	 *  In case of 3 pmic writes or read-modify-writes
+	 *  there are holes in the middle of the buffer which are
+	 *  ignored by SCU. These bytes should not be included into
+	 *  size of the ipc msg. Holes are as follows:
+	 *  write: wbuf[6 & 7]
+	 *  read-modifu-write: wbuf[6 & 7 & 11]
+	 */
+	else if ((cmd & 0xFF) == IPCMSG_PCNTRL) {
+		if (sub == IPC_CMD_PCNTRL_W && inlen == 11)
+			inlen -= 2;
+		else if (sub == IPC_CMD_PCNTRL_M && inlen == 15)
+			inlen -= 3;
 	}
-
-	for (i = 0; i < inlen; i++)
-		ipc_data_writel(*in++, 4 * i);
-
-	ipc_command((inlen << 16) | (sub << 12) | cmd);
-	err = busy_loop();
+	intel_scu_ipc_send_command((inlen << 16) | (sub << 12) | cmd);
+	err = intel_scu_ipc_check_status();
 
 	for (i = 0; i < outlen; i++)
 		*out++ = ipc_data_readl(4 * i);
 
-	mutex_unlock(&ipclock);
 	return err;
 }
-EXPORT_SYMBOL(intel_scu_ipc_command);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_cmd);
 
-/*I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ  2 /* I2C Read command */
-
-/**
- *	intel_scu_ipc_i2c_cntrl		-	I2C read/write operations
- *	@addr: I2C address + command bits
- *	@data: data to read/write
- *
- *	Perform an an I2C read/write operation via the SCU. All locking is
- *	handled for the caller. This function may sleep.
- *
- *	Returns an error code or 0 on success.
- *
- *	This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
+int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen)
 {
-	u32 cmd = 0;
-
-	mutex_lock(&ipclock);
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
-		return -ENODEV;
-	}
-	cmd = (addr >> 24) & 0xFF;
-	if (cmd == IPC_I2C_READ) {
-		writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
-		/* Write not getting updated without delay */
-		mdelay(1);
-		*data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
-	} else if (cmd == IPC_I2C_WRITE) {
-		writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
-		mdelay(1);
-		writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
-	} else {
-		dev_err(&ipcdev.pdev->dev,
-			"intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
-		mutex_unlock(&ipclock);
-		return -EIO;
-	}
-	mutex_unlock(&ipclock);
-	return 0;
+	int ret;
+	intel_scu_ipc_lock();
+	ret = intel_scu_ipc_raw_cmd(cmd, sub, in, inlen, out, outlen, 0, 0);
+	intel_scu_ipc_unlock();
+	return ret;
 }
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_command);
 
 /*
  * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
  * When ioc bit is set to 1, caller api must wait for interrupt handler called
- * which in turn unlocks the caller api. Currently this is not used
+ * which in turn unlocks the caller api.
  *
  * This is edge triggered so we need take no action to clear anything
  */
 static irqreturn_t ioc(int irq, void *dev_id)
 {
+	complete(&ipcdev.cmd_complete);
 	return IRQ_HANDLED;
 }
 
 /**
- *	ipc_probe	-	probe an Intel SCU IPC
+ *	ipc_probe - probe an Intel SCU IPC
  *	@dev: the PCI device matching
  *	@id: entry in the match table
  *
@@ -504,12 +442,16 @@
  */
 static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	int err;
+	int err, pid;
+	struct intel_scu_ipc_pdata_t *pdata;
 	resource_size_t pci_resource;
 
 	if (ipcdev.pdev)		/* We support only one SCU */
 		return -EBUSY;
 
+	pid = id->driver_data;
+	pdata = &intel_scu_ipc_pdata[pid];
+
 	ipcdev.pdev = pci_dev_get(dev);
 
 	err = pci_enable_device(dev);
@@ -524,14 +466,17 @@
 	if (!pci_resource)
 		return -ENOMEM;
 
-	if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
+	init_completion(&ipcdev.cmd_complete);
+
+	if (request_irq(dev->irq, ioc, IRQF_NO_SUSPEND, "intel_scu_ipc",
+		&ipcdev))
 		return -EBUSY;
 
-	ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR);
+	ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
 	if (!ipcdev.ipc_base)
 		return -ENOMEM;
 
-	ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR);
+	ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
 	if (!ipcdev.i2c_base) {
 		iounmap(ipcdev.ipc_base);
 		return -ENOMEM;
@@ -543,7 +488,7 @@
 }
 
 /**
- *	ipc_remove	-	remove a bound IPC device
+ *	ipc_remove - remove a bound IPC device
  *	@pdev: PCI device
  *
  *	In practice the SCU is not removable but this function is also
@@ -564,7 +509,10 @@
 }
 
 static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
+	{PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
+	{PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
+	{PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
+	{PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
@@ -576,17 +524,29 @@
 	.remove = ipc_remove,
 };
 
-
-static int __init intel_scu_ipc_init(void)
+static int intel_scu_ipc_init(void)
 {
-	platform = mrst_identify_cpu();
+	platform = intel_mid_identify_cpu();
 	if (platform == 0)
 		return -ENODEV;
+
+	qos = kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+	if (!qos)
+		return -ENOMEM;
+
+	pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+	register_pm_notifier(&scu_ipc_pm_notifier);
+
+	wake_lock_init(&ipc_wake_lock, WAKE_LOCK_SUSPEND, "intel_scu_ipc");
+
 	return  pci_register_driver(&ipc_driver);
 }
 
 static void __exit intel_scu_ipc_exit(void)
 {
+	pm_qos_remove_request(qos);
+
 	pci_unregister_driver(&ipc_driver);
 }
 
@@ -594,5 +554,5 @@
 MODULE_DESCRIPTION("Intel SCU IPC driver");
 MODULE_LICENSE("GPL");
 
-module_init(intel_scu_ipc_init);
+fs_initcall(intel_scu_ipc_init);
 module_exit(intel_scu_ipc_exit);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6..b55cf8c 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -3,6 +3,8 @@
  *
  * (C) Copyright 2008-2010 Intel Corporation
  * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ * (C) Copyright 2010 Intel Corporation
+ * Author: Sudha Krishnakumar (sudha.krishnakumar@intel.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -22,22 +24,438 @@
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/io.h>
+#include <linux/rpmsg.h>
 #include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 
-static int major;
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
 
-/* ioctl commnds */
-#define	INTE_SCU_IPC_REGISTER_READ	0
-#define INTE_SCU_IPC_REGISTER_WRITE	1
-#define INTE_SCU_IPC_REGISTER_UPDATE	2
+#define MAX_FW_SIZE 264192
 
-struct scu_ipc_data {
-	u32     count;  /* No. of registers */
-	u16     addr[5]; /* Register addresses */
-	u8      data[5]; /* Register data */
-	u8      mask; /* Valid for read-modify-write */
+#define PMIT_RESET1_OFFSET		14
+#define PMIT_RESET2_OFFSET		15
+
+#define IPC_RESIDENCY_CMD_ID_START	0
+#define IPC_RESIDENCY_CMD_ID_DUMP	2
+
+#define SRAM_ADDR_S0IX_RESIDENCY	0xFFFF71E0
+#define ALL_RESIDENCY_DATA_SIZE		12
+
+#define DUMP_OSNIB
+
+#define OSHOB_EXTEND_DESC_SIZE	52  /* OSHOB header+osnib+oem info: 52 bytes.*/
+
+#define OSHOB_HEADER_MAGIC_SIZE	4   /* Size (bytes) of magic number in OSHOB */
+				    /* header.                               */
+
+#define OSHOB_MAGIC_NUMBER	"$OH$"	/* If found when reading the first   */
+					/* 4 bytes of the OSOHB zone, it     */
+					/* means that the new extended OSHOB */
+					/* is going to be used.              */
+
+#define OSHOB_REV_MAJ_DEFAULT	0	/* Default revision number of OSHOB. */
+#define OSHOB_REV_MIN_DEFAULT	1	/* If 0.1 the default OSHOB is used  */
+					/* instead of the extended one.      */
+
+/* Defines for the SCU buffer included in OSHOB structure. */
+#define OSHOB_SCU_BUF_BASE_DW_SIZE	1   /* In dwords. By default SCU     */
+					    /* buffer size is 1 dword.       */
+
+#define OSHOB_SCU_BUF_MRFLD_DW_SIZE (4*OSHOB_SCU_BUF_BASE_DW_SIZE)
+					    /* In dwords. On Merrifield the  */
+					    /* SCU trace buffer size is      */
+					    /* 4 dwords.                     */
+#define OSHOB_DEF_FABRIC_ERR_MRFLD_SIZE   50	/* In DWORDS. For Merrifield.*/
+					/* Fabric error log size (in DWORDS).*/
+					/* From offs 0x44 to 0x10C.          */
+					/* Used in default OSHOB.            */
+
+#define OSNIB_SIZE		32	/* Size (bytes) of the default OSNIB.*/
+
+#define OSNIB_INTEL_RSVD_SIZE	24	/* Size (bytes) of Intel RESERVED in */
+					/* OSNIB.                            */
+#define OSNIB_OEM_RSVD_SIZE	96	/* Size (bytes) of OEM RESERVED      */
+					/* in OSNIB.                         */
+
+#define OSNIB_NVRAM_SIZE	128	/* Size (bytes) of NVRAM             */
+					/* in OSNIB.                         */
+
+#define OSHOB_DEF_FABRIC_ERR_SIZE   50	/* In DWORDS.                        */
+					/* Fabric error log size (in DWORDS).*/
+					/* From offs 0x44 to 0x10C.          */
+					/* Used in default OSHOB.            */
+
+#define OSHOB_FABRIC_ERROR1_SIZE  12    /* 1st part of Fabric error dump.    */
+					/* Used in extended OSHOB.           */
+
+#define OSHOB_FABRIC_ERROR2_SIZE  9     /* 2nd part of Fabric error dump.    */
+					/* Used in extended OSHOB.           */
+
+#define OSHOB_RESERVED_DEBUG_SIZE 5     /* Reserved for debug                */
+
+/* Size (bytes) of the default OSHOB structure. Includes the default OSNIB   */
+/* size.                                                                     */
+#define OSHOB_SIZE	(68 + (4*OSHOB_SCU_BUF_BASE_DW_SIZE) + \
+			    (4*OSHOB_DEF_FABRIC_ERR_SIZE))	/* In bytes. */
+
+#define OSHOB_MRFLD_SIZE (68 + (4*OSHOB_SCU_BUF_MRFLD_DW_SIZE) + \
+			    (4*OSHOB_DEF_FABRIC_ERR_MRFLD_SIZE))/* In bytes. */
+
+/* SCU buffer size is give in dwords. So it is x4 here to get the total      */
+/* number of bytes.                                                          */
+
+#define SCU_TRACE_HEADER_SIZE    16     /* SCU trace header                  */
+
+#define CHAABI_DEBUG_DATA_SIZE   5      /* Reserved for chaabi debug         */
+
+#define OSHOB_RESERVED_SIZE      184    /* Reserved                          */
+
+
+struct chip_reset_event {
+	int id;
+	const char *reset_ev1_name;
+	const char *reset_ev2_name;
 };
 
+static struct chip_reset_event chip_reset_events[] = {
+	{ INTEL_MID_CPU_CHIP_ANNIEDALE, "RESETSRC0", "RESETSRC1" },
+	{ INTEL_MID_CPU_CHIP_TANGIER, "RESETSRC0", "RESETSRC1" },
+	{ INTEL_MID_CPU_CHIP_CLOVERVIEW, "RESETIRQ1", "RESETIRQ2" },
+	{ INTEL_MID_CPU_CHIP_PENWELL, "RESETIRQ1", "RESETIRQ2" },
+};
+
+struct osnib_target_os {
+	const char *target_os_name;
+	int id;
+};
+
+static struct osnib_target_os osnib_target_oses[] = {
+	{ "main", SIGNED_MOS_ATTR },
+	{ "charging", SIGNED_COS_ATTR  },
+	{ "recovery", SIGNED_RECOVERY_ATTR },
+	{ "fastboot", SIGNED_POS_ATTR },
+	{ "factory", SIGNED_FACTORY_ATTR },
+	{ "factory2", SIGNED_FACTORY2_ATTR },
+	{ "bootoneshot", SIGNED_BOOTONESHOOT_ATTR },
+};
+
+
+struct osnib_wake_src {
+	u8 id;
+	const char *wakesrc_name;
+};
+
+static struct osnib_wake_src osnib_wake_srcs[] = {
+	{ WAKE_BATT_INSERT, "battery inserted" },
+	{ WAKE_PWR_BUTTON_PRESS, "power button pressed" },
+	{ WAKE_RTC_TIMER, "rtc timer" },
+	{ WAKE_USB_CHRG_INSERT, "usb charger inserted" },
+	{ WAKE_RESERVED, "reserved" },
+	{ WAKE_REAL_RESET, "real reset" },
+	{ WAKE_COLD_BOOT, "cold boot" },
+	{ WAKE_UNKNOWN, "unknown" },
+	{ WAKE_KERNEL_WATCHDOG_RESET, "kernel watchdog reset" },
+	{ WAKE_SECURITY_WATCHDOG_RESET, "security watchdog reset" },
+	{ WAKE_WATCHDOG_COUNTER_EXCEEDED, "watchdog counter exceeded" },
+	{ WAKE_POWER_SUPPLY_DETECTED, "power supply detected" },
+	{ WAKE_FASTBOOT_BUTTONS_COMBO, "fastboot combo" },
+	{ WAKE_NO_MATCHING_OSIP_ENTRY, "no matching osip entry" },
+	{ WAKE_CRITICAL_BATTERY, "critical battery" },
+	{ WAKE_INVALID_CHECKSUM, "invalid checksum" },
+	{ WAKE_FORCED_RESET, "forced reset"},
+	{ WAKE_ACDC_CHRG_INSERT, "ac charger inserted" },
+	{ WAKE_PMIC_WATCHDOG_RESET, "pmic watchdog reset" },
+	{ WAKE_PLATFORM_WATCHDOG_RESET, "HWWDT reset platform" },
+	{ WAKE_SC_WATCHDOG_RESET, "HWWDT reset SC" },
+	{ WAKE_KERNEL_PANIC, "kernel panic" },
+};
+
+
+/* OSNIB allocation. */
+struct scu_ipc_osnib {
+	u8 target_mode;        /* Target mode.                      */
+	u8 wd_count;           /* Software watchdog.                */
+	u8 alarm;              /* RTC alarm.                        */
+	u8 wakesrc;            /* WAKESRC.                          */
+	u8 reset_ev1;          /* RESETIRQ1 or RESETSRC0.           */
+	u8 reset_ev2;          /* RESETIRQ2 or RESETSRC1.           */
+	u8 spare;              /* Spare.                            */
+	u8 intel_reserved[OSNIB_INTEL_RSVD_SIZE]; /* INTEL RESERVED */
+			       /* (offsets 7 to 30).                */
+	u8 checksum;           /* CHECKSUM.                         */
+	u8 oem_reserved[OSNIB_OEM_RSVD_SIZE];     /* OEM RESERVED   */
+			       /* (offsets 32 to 127).              */
+	u8 nvram[OSNIB_NVRAM_SIZE];               /* NVRAM          */
+			       /* (offsets 128 to 255).             */
+};
+
+/* Default OSHOB allocation. */
+struct scu_ipc_oshob {
+	u32 scutxl;             /* SCUTxl offset position.      */
+	u32 iatxl;              /* IATxl offset.                */
+	u32 bocv;               /* BOCV offset.                 */
+	u8 osnibr[OSNIB_SIZE];  /* OSNIB area offset.           */
+	u32 pmit;               /* PMIT offset.                 */
+	u32 pemmcmhki;          /* PeMMCMHKI offset.            */
+	u32 osnibw_ptr;         /* OSNIB Write at offset 0x34.  */
+	u32 fab_err_log[OSHOB_DEF_FABRIC_ERR_SIZE]; /* Fabric   */
+				/* error log buffer.            */
+};
+
+/* Extended OSHOB allocation. version 1.3 */
+struct scu_ipc_oshob_extend {
+	u32 magic;              /* MAGIC number.                           */
+	u8  rev_major;          /* Revision major.                         */
+	u8  rev_minor;          /* Revision minor.                         */
+	u16 oshob_size;         /* OSHOB size.                             */
+	u32 nvram_addr;         /* NVRAM phys addres                       */
+	u32 scutxl;             /* SCUTxl offset position.                 */
+				/* If on MRFLD platform, next param may be */
+				/* shifted by                              */
+				/* (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) bytes.*/
+	u32 iatxl;              /* IATxl.                                  */
+	u32 bocv;               /* BOCV.                                   */
+
+	u16 intel_size;         /* Intel size (in OSNIB area).             */
+	u16 oem_size;           /* OEM size (of OEM area).                 */
+	u32 r_intel_ptr;        /* Read Intel pointer.                     */
+	u32 w_intel_ptr;        /* Write Intel pointer.                    */
+	u32 r_oem_ptr;          /* Read OEM pointer.                       */
+	u32 w_oem_ptr;          /* Write OEM pointer.                      */
+
+	u32 pmit;               /* PMIT.                       */
+	u32 pemmcmhki;          /* PeMMCMHKI.                  */
+
+	/* OSHOB as defined for CLOVERVIEW */
+	u32 nvram_size;         /* NVRAM max size in bytes     */
+	u32 fabricerrlog1[OSHOB_FABRIC_ERROR1_SIZE]; /* fabric error data */
+	u8  vrtc_alarm_dow;     /* Alarm sync                  */
+	u8  vrtc_alarm_dom;     /* Alarm sync                  */
+	u8  vrtc_alarm_month;   /* Alarm sync                  */
+	u8  vrtc_alarm_year;    /* Alarm sync                  */
+	u32 reserved_debug[OSHOB_RESERVED_DEBUG_SIZE];/* Reserved Debug data */
+	u32 reserved2;          /* Reserved                    */
+	u32 fabricerrlog2[OSHOB_FABRIC_ERROR2_SIZE]; /* fabric error data2 */
+	u32 sculogbufferaddr;   /* phys addr of scu log buffer   */
+	u32 sculogbuffersize;   /* size of scu log buffer      */
+};
+
+/* Extended OSHOB allocation. version 1.4. */
+struct scu_ipc_oshob_extend_v14 {
+	u32 magic;              /* MAGIC number.                           */
+	u8  rev_major;          /* Revision major.                         */
+	u8  rev_minor;          /* Revision minor.                         */
+	u16 oshob_size;         /* OSHOB size.                             */
+
+	u32 scutxl;             /* SCUTxl offset position.                 */
+				/* If on MRFLD platform, next param may be */
+				/* shifted by                              */
+				/* (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) bytes.*/
+	u32 iatxl;              /* IATxl.                                  */
+	u32 bocv;               /* BOCV.                                   */
+
+	u32 osnib_ptr;          /* The unique OSNIB pointer.               */
+
+	u32 pmit;               /* PMIT.                                   */
+	u8  scutraceheader[SCU_TRACE_HEADER_SIZE];   /* SCU trace header   */
+	u32 fabricerrlog[OSHOB_DEF_FABRIC_ERR_SIZE]; /* fabric error data  */
+	u32 chaabidebugdata[CHAABI_DEBUG_DATA_SIZE]; /* fabric error data  */
+	u32 pmuemergency;       /* pmu emergency                           */
+	u32 sculogbufferaddr;   /* scu log buffer address                  */
+	u32 sculogbuffersize;   /* size of scu log buffer                  */
+	u32 oshob_reserved[OSHOB_RESERVED_SIZE];     /* oshob reserved     */
+};
+
+struct scu_ipc_oshob_info {
+	u8      oshob_majrev;   /* Major revision number of OSHOB structure. */
+	u8      oshob_minrev;   /* Minor revision number of OSHOB structure. */
+	u16     oshob_size;     /* Total size (bytes) of OSHOB structure.    */
+	u32     scu_trace[OSHOB_SCU_BUF_BASE_DW_SIZE*4]; /* SCU trace buffer.*/
+				/* Set to max SCU buffer size (dwords) to    */
+				/* adapt to MRFLD. On other platforms, only  */
+				/* the first dword is stored and read.       */
+	u32     ia_trace;       /* IA trace buffer.                          */
+	u16     osnib_size;     /* Total size (bytes) of OSNIB structure.    */
+	u16     oemnib_size;    /* Total size (bytes) of OEMNIB area.        */
+	u32     scu_trace_size; /* SCU extended trace buffer size            */
+	u32     nvram_size;     /* NV ram size in bytes                      */
+
+	phys_addr_t   oshob_base;     /* Base address of OSHOB. Use ioremap  */
+				      /* to remap for access.                */
+	phys_addr_t   osnibr_ptr;     /* Pointer to Intel read zone.         */
+	phys_addr_t   osnibw_ptr;     /* Pointer to Intel write zone.        */
+	phys_addr_t   oemnibr_ptr;    /* Pointer to OEM read zone.           */
+	phys_addr_t   oemnibw_ptr;    /* Pointer to OEM write zone.          */
+	phys_addr_t   scu_trace_buf;  /* SCU extended trace buffer           */
+	phys_addr_t   nvram_addr;     /* NV ram phys addr                    */
+
+	int (*scu_ipc_write_osnib)(u8 *data, int len, int offset);
+	int (*scu_ipc_read_osnib)(u8 *data, int len, int offset);
+
+	int platform_type;     /* Identifies the platform (list of supported */
+			       /* platforms is given in intel-mid.h).        */
+
+	u16 offs_add;          /* The additional shift bytes to consider     */
+			       /* giving the offset at which the OSHOB params*/
+			       /* will be read. If MRFLD it must be set to   */
+			       /* take into account the extra SCU dwords.    */
+
+};
+
+/* Structure for OSHOB info */
+struct scu_ipc_oshob_info *oshob_info;
+
+static struct rpmsg_instance *ipcutil_instance;
+
+/* Mode for Audio clock */
+static DEFINE_MUTEX(osc_clk0_lock);
+static unsigned int osc_clk0_mode;
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz)
+{
+	/* SCU IPC COMMAND(osc clk on/off) definition:
+	 * ipc_wbuf[0] = clock to act on {0, 1, 2, 3}
+	 * ipc_wbuf[1] =
+	 * bit 0 - 1:on  0:off
+	 * bit 1 - if 1, read divider setting from bits 3:2 as follows:
+	 * bit [3:2] - 00: clk/1, 01: clk/2, 10: clk/4, 11: reserved
+	 */
+	unsigned int base_freq;
+	unsigned int div;
+	u8 ipc_wbuf[2];
+	int ipc_ret;
+
+	if (clk > 3)
+		return -EINVAL;
+
+	ipc_wbuf[0] = clk;
+	ipc_wbuf[1] = 0;
+	if (khz) {
+#ifdef CONFIG_CTP_CRYSTAL_38M4
+		base_freq = 38400;
+#else
+		base_freq = 19200;
+#endif
+		div = fls(base_freq / khz) - 1;
+		if (div >= 3 || (1 << div) * khz != base_freq)
+			return -EINVAL;	/* Allow only exact frequencies */
+		ipc_wbuf[1] = 0x03 | (div << 2);
+	}
+
+	ipc_ret = rpmsg_send_command(ipcutil_instance,
+		RP_OSC_CLK_CTRL, 0, ipc_wbuf, NULL, 2, 0);
+	if (ipc_ret != 0)
+		pr_err("%s: failed to set osc clk(%d) output\n", __func__, clk);
+
+	return ipc_ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_osc_clk);
+
+/*
+ * OSC_CLK_AUDIO is connected to the MSIC as well as Audience, so it should be
+ * turned on if any one of them requests it to be on and it should be turned off
+ * only if no one needs it on.
+ */
+int intel_scu_ipc_set_osc_clk0(unsigned int enable, enum clk0_mode mode)
+{
+	int ret = 0, clk_enable;
+	static const unsigned int clk_khz = 19200;
+
+	pr_info("set_clk0 request %s for Mode 0x%x\n",
+				enable ? "ON" : "OFF", mode);
+	mutex_lock(&osc_clk0_lock);
+	if (mode == CLK0_QUERY) {
+		ret = osc_clk0_mode;
+		goto out;
+	}
+	if (enable) {
+		/* if clock is already on, just add new user */
+		if (osc_clk0_mode) {
+			osc_clk0_mode |= mode;
+			goto out;
+		}
+		osc_clk0_mode |= mode;
+		pr_info("set_clk0: enabling clk, mode 0x%x\n", osc_clk0_mode);
+		clk_enable = 1;
+	} else {
+		osc_clk0_mode &= ~mode;
+		pr_info("set_clk0: disabling clk, mode 0x%x\n", osc_clk0_mode);
+		/* others using the clock, cannot turn it of */
+		if (osc_clk0_mode)
+			goto out;
+		clk_enable = 0;
+	}
+	pr_info("configuring OSC_CLK_AUDIO now\n");
+	ret = intel_scu_ipc_osc_clk(OSC_CLK_AUDIO, clk_enable ? clk_khz : 0);
+out:
+	mutex_unlock(&osc_clk0_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_set_osc_clk0);
+
+#define MSIC_VPROG1_CTRL        0xD6
+#define MSIC_VPROG2_CTRL        0xD7
+
+#define MSIC_VPROG2_ON          0x36 /*1.200V and Auto mode*/
+#define MSIC_VPROG1_ON          0xF6 /*2.800V and Auto mode*/
+#define MSIC_VPROG_OFF          0x24 /*1.200V and OFF*/
+
+/* Defines specific of MRFLD platform (CONFIG_X86_MRFLD). */
+#define MSIC_VPROG1_MRFLD_CTRL	0xAC
+#define MSIC_VPROG2_MRFLD_CTRL	0xAD
+#define MSIC_VPROG3_MRFLD_CTRL	0xAE
+
+#define MSIC_VPROG1_MRFLD_ON	0xC1	/* 2.80V and Auto mode */
+#define MSIC_VPROG2_MRFLD_ON	0xC1	/* 2.80V and Auto mode */
+#define MSIC_VPROG3_MRFLD_ON	0x01	/* 1.05V and Auto mode */
+#define MSIC_VPROG_MRFLD_OFF	0	/* OFF */
+/* End of MRFLD specific.*/
+
+/* Helpers to turn on/off msic vprog1, vprog2 and vprog3 */
+int intel_scu_ipc_msic_vprog1(int on)
+{
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		return intel_scu_ipc_iowrite8(MSIC_VPROG1_MRFLD_CTRL,
+			on ? MSIC_VPROG1_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+	else
+		return intel_scu_ipc_iowrite8(MSIC_VPROG1_CTRL,
+			on ? MSIC_VPROG1_ON : MSIC_VPROG_OFF);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog1);
+
+int intel_scu_ipc_msic_vprog2(int on)
+{
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		return intel_scu_ipc_iowrite8(MSIC_VPROG2_MRFLD_CTRL,
+			on ? MSIC_VPROG2_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+	else
+		return intel_scu_ipc_iowrite8(MSIC_VPROG2_CTRL,
+			on ? MSIC_VPROG2_ON : MSIC_VPROG_OFF);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog2);
+
+int intel_scu_ipc_msic_vprog3(int on)
+{
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return intel_scu_ipc_iowrite8(MSIC_VPROG3_MRFLD_CTRL,
+			on ? MSIC_VPROG3_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog3);
+
 /**
  *	scu_reg_access		-	implement register access ioctls
  *	@cmd: command we are doing (read/write/update)
@@ -49,22 +467,169 @@
 
 static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
 {
-	int count = data->count;
+	int ret;
 
-	if (count == 0 || count == 3 || count > 4)
+	if (data->count == 0 || data->count > 5)
 		return -EINVAL;
 
 	switch (cmd) {
-	case INTE_SCU_IPC_REGISTER_READ:
-		return intel_scu_ipc_readv(data->addr, data->data, count);
-	case INTE_SCU_IPC_REGISTER_WRITE:
-		return intel_scu_ipc_writev(data->addr, data->data, count);
-	case INTE_SCU_IPC_REGISTER_UPDATE:
-		return intel_scu_ipc_update_register(data->addr[0],
-						    data->data[0], data->mask);
+	case INTEL_SCU_IPC_REGISTER_READ:
+		ret = intel_scu_ipc_readv(data->addr, data->data, data->count);
+		break;
+	case INTEL_SCU_IPC_REGISTER_WRITE:
+		ret = intel_scu_ipc_writev(data->addr, data->data, data->count);
+		break;
+	case INTEL_SCU_IPC_REGISTER_UPDATE:
+		ret = intel_scu_ipc_update_register(data->addr[0],
+							data->data[0],
+							data->mask);
+		break;
 	default:
 		return -ENOTTY;
 	}
+	return ret;
+}
+
+#define check_pmdb_sub_cmd(x)	(x == PMDB_SUB_CMD_R_OTPCTL || \
+		x == PMDB_SUB_CMD_R_WMDB || x == PMDB_SUB_CMD_W_WMDB || \
+		x == PMDB_SUB_CMD_R_OTPDB || x == PMDB_SUB_CMD_W_OTPDB)
+#define pmdb_sub_cmd_is_read(x)	(x == PMDB_SUB_CMD_R_OTPCTL || \
+		x == PMDB_SUB_CMD_R_WMDB || x == PMDB_SUB_CMD_R_OTPDB)
+
+static int check_pmdb_buffer(struct scu_ipc_pmdb_buffer *p_buf)
+{
+	int size;
+
+	switch (p_buf->sub) {
+	case PMDB_SUB_CMD_R_WMDB:
+	case PMDB_SUB_CMD_W_WMDB:
+		size = PMDB_WMDB_SIZE;
+		break;
+	case PMDB_SUB_CMD_R_OTPDB:
+	case PMDB_SUB_CMD_W_OTPDB:
+		size = PMDB_OTPDB_SIZE;
+		break;
+	case PMDB_SUB_CMD_R_OTPCTL:
+		size = PMDB_OTPCTL_SIZE;
+		break;
+	default:
+		size = 0;
+	}
+
+	return check_pmdb_sub_cmd(p_buf->sub) &&
+		(p_buf->count + p_buf->offset < size) &&
+		(p_buf->count % 4 == 0);
+}
+
+/**
+ *	scu_pmdb_access	-	access PMDB data through SCU IPC cmds
+ *	@p_buf: PMDB access buffer, it describe the data to write/read.
+ *		p_buf->sub - SCU IPC sub cmd of PMDB access,
+ *			this sub cmd distinguish different componet
+ *			in PMDB which to be accessd. (WMDB, OTPDB, OTPCTL)
+ *		p_buf->count - access data's count;
+ *		p_buf->offset - access data's offset for each component in PMDB;
+ *		p_buf->data - data to write/read.
+ *
+ *	Write/read data to/from PMDB.
+ *
+ */
+static int scu_pmdb_access(struct scu_ipc_pmdb_buffer *p_buf)
+{
+	int i, offset, ret = -EINVAL;
+	u8 *p_data;
+
+	if (!check_pmdb_buffer(p_buf)) {
+		pr_err("Invalid PMDB buffer!\n");
+		return -EINVAL;
+	}
+
+	/* 1. we use rpmsg_send_raw_command() IPC cmd interface
+	 *    to access PMDB data. Each call of rpmsg_send_raw_command()
+	 *    can only access at most PMDB_ACCESS_SIZE bytes' data.
+	 * 2. There are two kinds of pmdb sub commands, read command
+	 *    and write command. For read command, we must transport
+	 *    in and out buffer to rpmsg_send_raw_command(), because
+	 *    in buffer length is pass as access length which must
+	 *    be transported to SCU.
+	 */
+	p_data = p_buf->data;
+	offset = p_buf->offset;
+	for (i = 0; i < p_buf->count/PMDB_ACCESS_SIZE; i++) {
+		if (pmdb_sub_cmd_is_read(p_buf->sub))
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, (u32 *)p_data,
+					PMDB_ACCESS_SIZE, PMDB_ACCESS_SIZE / 4,
+					0, offset);
+		else
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, NULL, PMDB_ACCESS_SIZE,
+					0, 0, offset);
+		if (ret < 0) {
+			pr_err("intel_scu_ipc_raw_cmd failed!\n");
+			return ret;
+		}
+		offset += PMDB_ACCESS_SIZE;
+		p_data += PMDB_ACCESS_SIZE;
+	}
+	if (p_buf->count % PMDB_ACCESS_SIZE > 0) {
+		if (pmdb_sub_cmd_is_read(p_buf->sub))
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, (u32 *)p_data,
+					p_buf->count % PMDB_ACCESS_SIZE,
+					(p_buf->count % PMDB_ACCESS_SIZE) / 4,
+					0, offset);
+		else
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, NULL,
+					p_buf->count % PMDB_ACCESS_SIZE,
+					0, 0, offset);
+		if (ret < 0) {
+			pr_err("intel_scu_ipc_raw_cmd failed!\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int do_pmdb_user_buf_access(void __user *argp)
+{
+	int ret;
+	struct scu_ipc_pmdb_buffer *p_buf;
+
+	p_buf = kzalloc(sizeof(struct scu_ipc_pmdb_buffer), GFP_KERNEL);
+	if (p_buf == NULL) {
+		pr_err("failed to allocate memory for pmdb buffer!\n");
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(p_buf, argp, sizeof(struct scu_ipc_pmdb_buffer));
+	if (ret < 0) {
+		pr_err("copy from user failed!!\n");
+		goto err;
+	}
+
+	ret = scu_pmdb_access(p_buf);
+	if (ret < 0) {
+		pr_err("scu_pmdb_access error!\n");
+		goto err;
+	}
+
+	if (pmdb_sub_cmd_is_read(p_buf->sub)) {
+		ret = copy_to_user(argp + 3 * sizeof(u32),
+					p_buf->data, p_buf->count);
+		if (ret < 0)
+			pr_err("copy to user failed!!\n");
+	}
+
+err:
+	kfree(p_buf);
+	return ret;
 }
 
 /**
@@ -78,43 +643,2473 @@
 static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
 							unsigned long arg)
 {
-	int ret;
+	int ret = -EINVAL;
 	struct scu_ipc_data  data;
 	void __user *argp = (void __user *)arg;
 
-	if (!capable(CAP_SYS_RAWIO))
+	/* Only IOCTL cmd allowed to pass through without capability check */
+	/* is getting fw version info, all others need to check to prevent */
+	/* arbitrary access to all sort of bit of the hardware exposed here*/
+
+	if ((cmd != INTEL_SCU_IPC_FW_REVISION_GET &&
+		cmd != INTEL_SCU_IPC_FW_REVISION_EXT_GET &&
+		cmd != INTEL_SCU_IPC_S0IX_RESIDENCY) &&
+		!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 
-	if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
-		return -EFAULT;
-	ret = scu_reg_access(cmd, &data);
+	switch (cmd) {
+	case INTEL_SCU_IPC_S0IX_RESIDENCY:
+	{
+		void __iomem *s0ix_residencies_addr;
+		u8 dump_results[ALL_RESIDENCY_DATA_SIZE] = {0};
+		u32 cmd_id;
+
+		if (copy_from_user(&cmd_id, argp, sizeof(u32))) {
+			pr_err("copy from user failed!!\n");
+			return -EFAULT;
+		}
+
+		/* Check get residency counter valid cmd range */
+
+		if (cmd_id > IPC_RESIDENCY_CMD_ID_DUMP) {
+			pr_err("invalid si0x residency sub-cmd id!\n");
+			return -EINVAL;
+		}
+
+		ret = rpmsg_send_simple_command(ipcutil_instance,
+					RP_S0IX_COUNTER, cmd_id);
+
+		if (ret < 0) {
+			pr_err("ipc_get_s0ix_counter failed!\n");
+			return ret;
+		}
+
+		if (cmd_id == IPC_RESIDENCY_CMD_ID_DUMP) {
+			s0ix_residencies_addr = ioremap_nocache(
+				SRAM_ADDR_S0IX_RESIDENCY,
+				ALL_RESIDENCY_DATA_SIZE);
+
+			if (!s0ix_residencies_addr) {
+				pr_err("ioremap SRAM address failed!!\n");
+				return -EFAULT;
+			}
+
+			memcpy(&dump_results[0], s0ix_residencies_addr,
+				ALL_RESIDENCY_DATA_SIZE);
+
+			iounmap(s0ix_residencies_addr);
+			ret = copy_to_user(argp, &dump_results[0],
+					ALL_RESIDENCY_DATA_SIZE);
+		}
+
+		break;
+	}
+	case INTEL_SCU_IPC_READ_RR_FROM_OSNIB:
+	{
+		u8 reboot_reason;
+		ret = intel_scu_ipc_read_osnib_rr(&reboot_reason);
+		if (ret < 0)
+			return ret;
+		ret = copy_to_user(argp, &reboot_reason, 1);
+		break;
+	}
+	case INTEL_SCU_IPC_WRITE_RR_TO_OSNIB:
+	{
+		u8 data;
+
+		ret = copy_from_user(&data, (u8 *)arg, 1);
+		if (ret < 0) {
+			pr_err("copy from user failed!!\n");
+			return ret;
+		}
+		ret = intel_scu_ipc_write_osnib_rr(data);
+		break;
+	}
+	case INTEL_SCU_IPC_WRITE_ALARM_FLAG_TO_OSNIB:
+	{
+		u8 flag, data;
+		ret = copy_from_user(&flag, (u8 *)arg, 1);
+		if (ret < 0) {
+			pr_err("copy from user failed!!\n");
+			return ret;
+		}
+
+		ret = oshob_info->scu_ipc_read_osnib(
+				&data,
+				1,
+				offsetof(struct scu_ipc_osnib, alarm));
+
+		if (ret < 0)
+			return ret;
+		if (flag) {
+			data = data | 0x1; /* set alarm flag */
+			pr_info("scu_ipc_ioctl: set alarm flag\n");
+		} else {
+			data = data & 0xFE; /* clear alarm flag */
+			pr_info("scu_ipc_ioctl: clear alarm flag\n");
+		}
+
+		ret = oshob_info->scu_ipc_write_osnib(
+				&data,
+				1,
+				offsetof(struct scu_ipc_osnib, alarm));
+
+		break;
+	}
+	case INTEL_SCU_IPC_READ_VBATTCRIT:
+	{
+		u32 value = 0;
+
+		pr_info("cmd = INTEL_SCU_IPC_READ_VBATTCRIT");
+		ret = intel_scu_ipc_read_mip((u8 *)&value, 4, 0x318, 1);
+		if (ret < 0)
+			return ret;
+		pr_info("VBATTCRIT VALUE = %x\n", value);
+		ret = copy_to_user(argp, &value, 4);
+		break;
+	}
+	case INTEL_SCU_IPC_FW_REVISION_GET:
+	case INTEL_SCU_IPC_FW_REVISION_EXT_GET:
+	{
+		struct scu_ipc_version version;
+
+		if (copy_from_user(&version, argp, sizeof(u32)))
+			return -EFAULT;
+
+		if (version.count > 16)
+			return -EINVAL;
+
+		ret = rpmsg_send_command(ipcutil_instance, RP_GET_FW_REVISION,
+			cmd & 0x1, NULL, (u32 *)version.data, 0, 4);
+		if (ret < 0)
+			return ret;
+
+		if (copy_to_user(argp + sizeof(u32),
+					version.data, version.count))
+			ret = -EFAULT;
+		break;
+	}
+	case INTEL_SCU_IPC_OSC_CLK_CNTL:
+	{
+		struct osc_clk_t osc_clk;
+
+		if (copy_from_user(&osc_clk, argp, sizeof(struct osc_clk_t)))
+			return -EFAULT;
+
+		ret = intel_scu_ipc_osc_clk(osc_clk.id, osc_clk.khz);
+		if (ret)
+			pr_err("%s: failed to set osc clk\n", __func__);
+
+		break;
+	}
+	case INTEL_SCU_IPC_PMDB_ACCESS:
+	{
+		ret = do_pmdb_user_buf_access(argp);
+
+		break;
+	}
+	default:
+		if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
+			return -EFAULT;
+		ret = scu_reg_access(cmd, &data);
+		if (ret < 0)
+			return ret;
+		if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+			return -EFAULT;
+		return 0;
+	}
+
+	return ret;
+}
+
+phys_addr_t intel_scu_ipc_get_oshob_base(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	return oshob_info->oshob_base;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_oshob_base);
+
+int intel_scu_ipc_get_oshob_size(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	return oshob_info->oshob_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_oshob_size);
+
+int intel_scu_ipc_read_oshob(u8 *data, int len, int offset)
+{
+	int ret = 0, i;
+	void __iomem *oshob_addr;
+	u8 *ptr = data;
+
+	oshob_addr = ioremap_nocache(
+				    oshob_info->oshob_base,
+				    oshob_info->oshob_size);
+
+	if (!oshob_addr) {
+		pr_err("ipc_read_oshob: addr ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < len; i = i+1) {
+		*ptr = readb(oshob_addr + offset + i);
+		pr_debug("addr(remapped)=%p, offset=%2x, value=%2x\n",
+			(oshob_addr + i),
+			offset + i, *ptr);
+		ptr++;
+	}
+
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_oshob);
+
+/* This function is used for the default OSNIB. */
+int intel_scu_ipc_read_osnib(u8 *data, int len, int offset)
+{
+	int i, ret = 0;
+	phys_addr_t osnibw_ptr;
+	u8 *ptr, check = 0;
+	u16 struct_offs;
+	void __iomem *oshob_addr, *osnibr_addr, *osnibw_addr;
+
+	pr_debug("OSHOB base addr value is %pa\n", &oshob_info->oshob_base);
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+			    oshob_info->offs_add;
+	osnibr_addr = oshob_addr + struct_offs;
+
+	if (!osnibr_addr) {
+		pr_err("Bad osnib address!\n");
+		ret = -EFAULT;
+		iounmap(oshob_addr);
+		goto exit;
+	}
+
+	pr_debug("OSNIB read addr (remapped) is %p\n", osnibr_addr);
+
+	/* Make a chksum verification for osnib */
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		check += readb(osnibr_addr + i);
+	if (check) {
+		pr_err("WARNING!!! osnib chksum verification faild, reset all osnib data!\n");
+		struct_offs = offsetof(struct scu_ipc_oshob, osnibw_ptr) +
+				    oshob_info->offs_add;
+		osnibw_ptr = readl(oshob_addr + struct_offs);
+		osnibw_addr = ioremap_nocache(
+					osnibw_ptr, oshob_info->osnib_size);
+		if (osnibw_addr) {
+			for (i = 0; i < oshob_info->osnib_size; i++)
+				writeb(0, osnibw_addr + i);
+			rpmsg_send_raw_command(ipcutil_instance,
+				RP_WRITE_OSNIB, 0,
+				NULL, NULL, 0, 0,
+				0xFFFFFFFF, 0);
+			iounmap(osnibw_addr);
+		}
+	}
+
+	ptr = data;
+	for (i = 0; i < len; i++) {
+		*ptr = readb(osnibr_addr + offset + i);
+		pr_debug("addr(remapped)=%p offset=%2x, value=%2x\n",
+			(osnibr_addr+offset+i), offset+i, *ptr);
+		ptr++;
+	}
+
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+
+static u32 invalid_checksum;
+
+/* This function is used for the default OSNIB. */
+int intel_scu_ipc_write_osnib(u8 *data, int len, int offset)
+{
+	int i;
+	int ret = 0;
+	phys_addr_t osnibw_ptr;
+	u8 osnib_data[oshob_info->osnib_size];
+	u8 check = 0, chksum = 0;
+	u16 struct_offs;
+	void __iomem *oshob_addr, *osnibw_addr, *osnibr_addr;
+
+	pr_debug("OSHOB base addr value is %pa\n", &oshob_info->oshob_base);
+
+	rpmsg_global_lock();
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/*Dump osnib data for generate chksum */
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+			    oshob_info->offs_add;
+	osnibr_addr = oshob_addr + struct_offs;
+
+	pr_debug("OSNIB read addr (remapped) in OSHOB at %p\n",
+						osnibr_addr);
+
+	for (i = 0; i < oshob_info->osnib_size; i++) {
+		osnib_data[i] = readb(osnibr_addr + i);
+		check += osnib_data[i];
+	}
+	memcpy(osnib_data + offset, data, len);
+
+	if (check) {
+		pr_err("WARNING!!! OSNIB data chksum verification FAILED!\n");
+	} else {
+		/* generate chksum */
+		for (i = 0; i < oshob_info->osnib_size - 1; i++)
+			chksum += osnib_data[i];
+		osnib_data[oshob_info->osnib_size - 1] = ~chksum + 1;
+
+		if (invalid_checksum)
+			osnib_data[oshob_info->osnib_size - 1] = ~chksum;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibw_ptr) +
+			    oshob_info->offs_add;
+	osnibw_ptr = readl(oshob_addr + struct_offs);
+	if (osnibw_ptr == 0) { /* workaround here for BZ 2914 */
+		osnibw_ptr = 0xFFFF3400;
+		pr_err("ERR: osnibw ptr from oshob is 0, manually set it here\n");
+	}
+
+	pr_debug("POSNIB write address: %pa\n", &osnibw_ptr);
+
+	osnibw_addr = ioremap_nocache(osnibw_ptr, oshob_info->osnib_size);
+	if (!osnibw_addr) {
+		pr_err("ioremap failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		writeb(*(osnib_data + i), (osnibw_addr + i));
+
+	ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0,
+			NULL, NULL, 0, 0,
+			0xFFFFFFFF, 0);
 	if (ret < 0)
-		return ret;
-	if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+		pr_err("ipc_write_osnib failed!!\n");
+
+	iounmap(osnibw_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	rpmsg_global_unlock();
+
+	return ret;
+}
+
+/* This function is used for the extended OSHOB/OSNIB. */
+int intel_scu_ipc_read_osnib_extend(u8 *data, int len, int offset)
+{
+	int i, ret = 0;
+	u8 *ptr, check = 0;
+	void __iomem *oshob_addr, *osnibr_addr, *osnibw_addr;
+	u32 sptr_dw_mask;
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_read_osnib_extend: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_debug("ipc_read_osnib_extend: OSNIB addr=%pa size %d\n",
+		 &oshob_info->osnibr_ptr, oshob_info->osnib_size);
+
+	osnibr_addr = ioremap_nocache(oshob_info->osnibr_ptr,
+				      oshob_info->osnib_size);
+
+	if (!osnibr_addr) {
+		pr_err("ipc_read_osnib_extend: ioremap of osnib failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	/* Make a chksum verification for osnib */
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		check += readb(osnibr_addr + i);
+
+	if (check) {
+		pr_err("ipc_read_osnib_extend: WARNING!!! osnib chksum verification faild, reset all osnib data!\n");
+		pr_debug("ipc_read_osnib_extend: remap osnibw ptr addr=%pa size %d\n",
+			&oshob_info->osnibw_ptr, oshob_info->osnib_size);
+
+		osnibw_addr = ioremap_nocache(oshob_info->osnibw_ptr,
+					      oshob_info->osnib_size);
+		if (!osnibw_addr) {
+			pr_err("ipc_read_osnib_extend: cannot remap osnib write ptr\n");
+			goto unmap_oshob_addr;
+		}
+
+		for (i = 0; i < oshob_info->osnib_size; i++)
+			writeb(0, osnibw_addr + i);
+
+		/* Send command. The mask to be written identifies which      */
+		/* double words of the OSNIB osnib_size bytes will be written.*/
+		/* So the mask is coded on 4 bytes.                           */
+		sptr_dw_mask = 0xFFFFFFFF;
+		if (!oops_in_progress)
+			rpmsg_send_raw_command(ipcutil_instance,
+				RP_WRITE_OSNIB,
+				0, NULL, NULL, 0, 0, sptr_dw_mask, 0);
+		else
+			intel_scu_ipc_raw_cmd(RP_WRITE_OSNIB, 0,
+					NULL, 0, NULL, 0,
+					0, sptr_dw_mask);
+
+		iounmap(osnibw_addr);
+	}
+
+	ptr = data;
+	pr_debug("ipc_read_osnib_extend: OSNIB content:\n");
+	for (i = 0; i < len; i++) {
+		*ptr = readb(osnibr_addr + offset + i);
+		pr_debug("addr(remapped)=%p, offset=%2x, value=%2x\n",
+			(osnibr_addr+offset+i), offset+i, *ptr);
+		ptr++;
+	}
+
+	iounmap(osnibr_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+
+/* This function is used for the extended OSHOB/OSNIB. */
+int intel_scu_ipc_write_osnib_extend(u8 *data, int len, int offset)
+{
+	int i;
+	int ret = 0;
+	u8 *posnib_data, *ptr;
+	u8 check = 0, chksum = 0;
+	void __iomem *oshob_addr, *osnibw_addr, *osnibr_addr;
+	u32 sptr_dw_mask;
+
+	if (!oops_in_progress)
+		rpmsg_global_lock();
+
+	pr_debug("ipc_write_osnib_extend: remap OSHOB addr %pa size %d\n",
+		 &oshob_info->oshob_base, oshob_info->oshob_size);
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_write_osnib_extend: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	osnibr_addr = ioremap_nocache(oshob_info->osnibr_ptr,
+				      oshob_info->osnib_size);
+
+	if (!osnibr_addr) {
+		pr_err("ipc_write_osnib_extend: ioremap of osnib failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	/* Dump osnib data for generate chksum */
+	posnib_data = kzalloc(oshob_info->osnib_size, GFP_KERNEL);
+
+	if (posnib_data == NULL) {
+		pr_err("ipc_write_osnib_extend: The buffer for getting OSNIB is NULL\n");
+		ret = -EFAULT;
+		iounmap(osnibr_addr);
+		goto unmap_oshob_addr;
+	}
+
+	ptr = posnib_data;
+	for (i = 0; i < oshob_info->osnib_size; i++) {
+		*ptr = readb(osnibr_addr + i);
+		check += *ptr;
+		ptr++;
+	}
+
+	memcpy(posnib_data + offset, data, len);
+
+	if (check) {
+		pr_err("ipc_write_osnib_extend: WARNING!!! OSNIB data chksum verification FAILED!\n");
+	} else {
+		/* generate chksum.  */
+		pr_debug("ipc_write_osnib_extend: generating checksum\n");
+		for (i = 0; i < oshob_info->osnib_size - 1; i++)
+			chksum += *(posnib_data + i);
+		/* Fill checksum at the CHECKSUM offset place in OSNIB. */
+		*(posnib_data +
+		    offsetof(struct scu_ipc_osnib, checksum)) = ~chksum + 1;
+
+		if (invalid_checksum)
+			*(posnib_data +
+			    offsetof(struct scu_ipc_osnib, checksum)) = ~chksum;
+	}
+
+	pr_debug("ipc_write_osnib_extend: osnibw ptr addr=%pa size %d\n",
+		 &oshob_info->osnibw_ptr, oshob_info->osnib_size);
+
+	osnibw_addr = ioremap_nocache(oshob_info->osnibw_ptr,
+				      oshob_info->osnib_size);
+	if (!osnibw_addr) {
+		pr_err("scu_ipc_write_osnib_extend: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit_osnib;
+	}
+
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		writeb(*(posnib_data + i), (osnibw_addr + i));
+
+	/* Send command. The mask to be written identifies which            */
+	/* double words of the OSNIB osnib_size bytes will be written.*/
+	/* So the mask is coded on 4 bytes.                                 */
+	sptr_dw_mask = 0xFFFFFFFF;
+	if (!oops_in_progress)
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 0);
+	else
+		ret = intel_scu_ipc_raw_cmd(
+			RP_WRITE_OSNIB, 0,
+			NULL, 0,
+			NULL, 0,
+			0, sptr_dw_mask);
+
+	if (ret < 0)
+		pr_err("scu_ipc_write_osnib_extend: ipc_write_osnib failed!!\n");
+
+	iounmap(osnibw_addr);
+
+exit_osnib:
+	iounmap(osnibr_addr);
+
+	kfree(posnib_data);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	if (!oops_in_progress)
+		rpmsg_global_unlock();
+
+	return ret;
+}
+
+/*
+ * This writes the reboot reason in the OSNIB (factor and avoid any overlap)
+ */
+int intel_scu_ipc_write_osnib_rr(u8 rr)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(osnib_target_oses); i++) {
+		if (osnib_target_oses[i].id == rr) {
+			pr_info("intel_scu_ipc_write_osnib_rr: reboot reason: %s\n",
+				osnib_target_oses[i].target_os_name);
+			return oshob_info->scu_ipc_write_osnib(
+				&rr,
+				1,
+				offsetof(struct scu_ipc_osnib, target_mode));
+		}
+	}
+
+	pr_warn("intel_scu_ipc_write_osnib_rr: reboot reason [0x%x] not found\n",
+			rr);
+	return -1;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_osnib_rr);
+
+/*
+ * This reads the reboot reason from the OSNIB (factor)
+ */
+int intel_scu_ipc_read_osnib_rr(u8 *rr)
+{
+	pr_debug("intel_scu_ipc_read_osnib_rr: read reboot reason\n");
+	return oshob_info->scu_ipc_read_osnib(
+			rr,
+			1,
+			offsetof(struct scu_ipc_osnib, target_mode));
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_osnib_rr);
+
+
+int intel_scu_ipc_read_oshob_extend_param(void __iomem *poshob_addr)
+{
+	u16 struct_offs;
+	int buff_size;
+
+	/* Get defined OSNIB space size. */
+	oshob_info->osnib_size = readw(
+			    poshob_addr +
+			    offsetof(struct scu_ipc_oshob_extend, intel_size));
+
+	if (oshob_info->osnib_size == 0) {
+		pr_err("ipc_read_oshob_extend_param: OSNIB size is null!\n");
 		return -EFAULT;
+	}
+
+	/* Get defined OEM space size. */
+	oshob_info->oemnib_size = readw(
+			    poshob_addr +
+			    offsetof(struct scu_ipc_oshob_extend, oem_size));
+
+	if (oshob_info->oemnib_size == 0) {
+		pr_err("ipc_read_oshob_extend_param: OEMNIB size is null!\n");
+		return -EFAULT;
+	}
+
+	/* Set SCU and IA trace buffers. Size calculated in bytes here. */
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+	else
+		buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+	intel_scu_ipc_read_oshob(
+		(u8 *)(oshob_info->scu_trace),
+		buff_size,
+		offsetof(struct scu_ipc_oshob_extend, scutxl));
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, iatxl) +
+			    oshob_info->offs_add;
+	oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+	/* Set pointers */
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, r_intel_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibr_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->osnibr_ptr) {
+		pr_err("ipc_read_oshob_extend_param: R_INTEL_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, w_intel_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibw_ptr = readl(poshob_addr + struct_offs);
+
+	if (oshob_info->osnibw_ptr == 0) {
+		/* workaround here for BZ 2914 */
+		oshob_info->osnibw_ptr = 0xFFFF3400;
+		pr_err(
+		    "ipc_read_oshob_extend_param: ERR: osnibw from oshob is 0, manually set it here\n");
+	}
+
+	pr_info("(extend oshob) osnib read ptr = %pa\n",
+		&oshob_info->osnibr_ptr);
+	pr_info("(extend oshob) osnib write ptr = %pa\n",
+		&oshob_info->osnibw_ptr);
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, r_oem_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->oemnibr_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->oemnibr_ptr) {
+		pr_err("ipc_read_oshob_extend_param: R_OEM_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, w_oem_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->oemnibw_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->oemnibw_ptr) {
+		pr_err("ipc_read_oshob_extend_param: W_OEM_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	oshob_info->scu_ipc_write_osnib =
+					&intel_scu_ipc_write_osnib_extend;
+	oshob_info->scu_ipc_read_osnib =
+					&intel_scu_ipc_read_osnib_extend;
+
+	pr_info(
+		"Using extended oshob structure size = %d bytes\n",
+		oshob_info->oshob_size);
+	pr_info(
+		"OSNIB Intel size = %d bytes OEMNIB size = %d bytes\n",
+		oshob_info->osnib_size, oshob_info->oemnib_size);
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 1)) {
+			/* CLVP and correct version of the oshob. */
+			oshob_info->scu_trace_buf =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       sculogbufferaddr));
+			oshob_info->scu_trace_size =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       sculogbuffersize));
+		}
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 3)) {
+			/* CLVP and correct version of the oshob. */
+			oshob_info->nvram_addr =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       nvram_addr));
+			oshob_info->nvram_size =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       nvram_size));
+		}
+	}
 	return 0;
 }
 
+int intel_scu_ipc_read_oshob_extend_param_v14(void __iomem *poshob_addr)
+{
+	u16 struct_offs;
+	int buff_size;
+
+	/* set intel OSNIB space size. */
+	oshob_info->osnib_size = OSNIB_SIZE;
+
+	/* set OEM OSNIB space size. */
+	oshob_info->oemnib_size = OSNIB_OEM_RSVD_SIZE;
+
+	/* Set SCU and IA trace buffers. Size calculated in bytes here. */
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+	else
+		buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+	intel_scu_ipc_read_oshob(
+		(u8 *)(oshob_info->scu_trace),
+		buff_size,
+		offsetof(struct scu_ipc_oshob_extend_v14, scutxl));
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, iatxl) +
+			    oshob_info->offs_add;
+	oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+	/* Set pointers */
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, osnib_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibr_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->osnibr_ptr) {
+		pr_err("ipc_read_oshob_extend_param_v14: R_INTEL_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	/* write and read pointer are the same */
+	oshob_info->osnibw_ptr = oshob_info->osnibr_ptr;
+
+	pr_info("(latest extend oshob) osnib ptr = %pa\n",
+		&oshob_info->osnibr_ptr);
+
+	/* OEM NIB point at offset OSNIB_SIZE */
+	oshob_info->oemnibr_ptr = oshob_info->osnibr_ptr + OSNIB_SIZE;
+
+	/* write and read pinter are the same */
+	oshob_info->oemnibw_ptr = oshob_info->oemnibr_ptr;
+
+	/* we use tha same function for all extended OSHOB structure */
+	oshob_info->scu_ipc_write_osnib =
+					&intel_scu_ipc_write_osnib_extend;
+	oshob_info->scu_ipc_read_osnib =
+					&intel_scu_ipc_read_osnib_extend;
+
+	pr_info(
+		"Using latest extended oshob structure size = %d bytes\n",
+		oshob_info->oshob_size);
+	pr_info(
+		"OSNIB Intel size = %d bytes OEMNIB size = %d bytes\n",
+		oshob_info->osnib_size, oshob_info->oemnib_size);
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+			    sculogbufferaddr) + oshob_info->offs_add;
+	oshob_info->scu_trace_buf = readl(poshob_addr + struct_offs);
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+			    sculogbuffersize) + oshob_info->offs_add;
+	oshob_info->scu_trace_size = readl(poshob_addr + struct_offs);
+
+	/* NVRAM after Intel and OEM OSNIB */
+	oshob_info->nvram_addr = oshob_info->oemnibr_ptr + OSNIB_OEM_RSVD_SIZE;
+	oshob_info->nvram_size = OSNIB_NVRAM_SIZE;
+
+	return 0;
+}
+
+int intel_scu_ipc_read_oshob_def_param(void __iomem *poshob_addr)
+{
+	u16 struct_offs;
+	int ret = 0;
+	int buff_size;
+
+	oshob_info->oshob_majrev = OSHOB_REV_MAJ_DEFAULT;
+	oshob_info->oshob_minrev = OSHOB_REV_MIN_DEFAULT;
+	oshob_info->osnib_size = OSNIB_SIZE;
+	oshob_info->oemnib_size = 0;
+
+	/* Set OSHOB total size */
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		oshob_info->oshob_size = OSHOB_MRFLD_SIZE;
+	else
+		oshob_info->oshob_size = OSHOB_SIZE;
+
+	/* Set SCU and IA trace buffers. Size calculated in bytes here. */
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+	else
+		buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+	ret = intel_scu_ipc_read_oshob(
+		(u8 *)(oshob_info->scu_trace),
+		buff_size,
+		offsetof(struct scu_ipc_oshob, scutxl));
+
+	if (ret != 0) {
+		pr_err("Cannot get scutxl data from OSHOB\n");
+		return ret;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob, iatxl) +
+			    oshob_info->offs_add;
+	oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+	oshob_info->scu_ipc_write_osnib =
+					&intel_scu_ipc_write_osnib;
+	oshob_info->scu_ipc_read_osnib =
+					&intel_scu_ipc_read_osnib;
+
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibr_ptr = (unsigned long)(poshob_addr + struct_offs);
+
+	pr_info("Using default oshob structure size = %d bytes\n",
+		oshob_info->oshob_size);
+
+	pr_debug("Using default oshob structure OSNIB read ptr %pa\n",
+		 &oshob_info->osnibr_ptr);
+
+	return ret;
+}
+
+int intel_scu_ipc_read_oshob_info(void)
+{
+	int i, ret = 0;
+	u32 oshob_base = 0;
+	void __iomem *oshob_addr;
+	unsigned char oshob_magic[4];
+
+	/* Notice that SCU still returns address coded in 4 bytes. */
+	ret = rpmsg_send_command(ipcutil_instance,
+				 RP_GET_HOBADDR, 0, NULL, &oshob_base, 0, 1);
+
+	if (ret < 0) {
+		pr_err("ipc_read_oshob cmd failed!!\n");
+		goto exit;
+	}
+
+	/* At this stage, we still don't know which OSHOB type (default or  */
+	/* extended) can be used, and the size of resource to be remapped   */
+	/* depends on the type of OSHOB structure to be used.               */
+	/* So just remap the minimum size to get the needed bytes of the    */
+	/* OSHOB zone.                                                      */
+	oshob_addr = ioremap_nocache(oshob_base, OSHOB_EXTEND_DESC_SIZE);
+
+	if (!oshob_addr) {
+		pr_err("oshob addr ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_info("(oshob) base addr = 0x%x\n", oshob_base);
+
+	/* Store base address. */
+	oshob_info->oshob_base = oshob_base;
+
+	oshob_info->platform_type = intel_mid_identify_cpu();
+
+	/*
+	 * Buffer is allocated using kmalloc. Memory is not initialized and
+	 * these fields are not updated in all the branches.
+	 */
+	oshob_info->scu_trace_buf = 0;
+	oshob_info->scu_trace_size = 0;
+	oshob_info->nvram_addr = 0;
+	oshob_info->nvram_size = 0;
+
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+		pr_info("(oshob) identified platform = INTEL_MID_CPU_CHIP_TANGIER|ANNIEDALE\n");
+
+		/* By default we already have 1 dword reserved in the OSHOB */
+		/* structures for SCU buffer. For Merrifield, SCU size to   */
+		/* consider is OSHOB_SCU_BUF_MRFLD_DW_SIZE dwords. So with  */
+		/* Merrifield, when calculating structures offsets, we have */
+		/* to add (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) dwords, with    */
+		/* the offsets calculated in bytes.                         */
+		oshob_info->offs_add = (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1)*4;
+	} else
+		oshob_info->offs_add = 0;
+
+	pr_debug("(oshob) additional offset = 0x%x\n", oshob_info->offs_add);
+
+	/* Extract magic number that will help identifying the good OSHOB  */
+	/* that is going to be used.                                       */
+	for (i = 0; i < OSHOB_HEADER_MAGIC_SIZE; i = i+1)
+		oshob_magic[i] = readb(oshob_addr + i);
+
+	pr_debug("(oshob) OSHOB magic = %x %x %x %x\n",
+		oshob_magic[0], oshob_magic[1], oshob_magic[2], oshob_magic[3]);
+
+	if (strncmp(oshob_magic, OSHOB_MAGIC_NUMBER,
+		    OSHOB_HEADER_MAGIC_SIZE) == 0) {
+		/* Get OSHOB version and size which are commoon to all */
+		/* extended OSHOB structure. */
+		oshob_info->oshob_majrev = readb(oshob_addr +
+			offsetof(struct scu_ipc_oshob_extend, rev_major));
+		oshob_info->oshob_minrev = readb(oshob_addr +
+			offsetof(struct scu_ipc_oshob_extend, rev_minor));
+		oshob_info->oshob_size = readw(oshob_addr +
+			offsetof(struct scu_ipc_oshob_extend, oshob_size));
+
+		pr_info("(oshob) oshob version = %x.%x\n",
+			oshob_info->oshob_majrev, oshob_info->oshob_minrev);
+
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			if (intel_scu_ipc_read_oshob_extend_param_v14(
+					oshob_addr) != 0) {
+				ret = -EFAULT;
+				goto unmap_oshob;
+			}
+		} else {
+			if (intel_scu_ipc_read_oshob_extend_param(
+					oshob_addr) != 0) {
+				ret = -EFAULT;
+				goto unmap_oshob;
+			}
+		}
+
+		if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			pr_info("(extend oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_MRFLD_DW_SIZE*4);
+		} else {
+			pr_debug("(extend oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_BASE_DW_SIZE*4);
+		}
+	} else {
+		ret = intel_scu_ipc_read_oshob_def_param(oshob_addr);
+
+		if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			pr_info("(default oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_MRFLD_DW_SIZE*4);
+		} else {
+			pr_debug("(default oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_BASE_DW_SIZE*4);
+		}
+	}
+
+unmap_oshob:
+	iounmap(oshob_addr);
+
+exit:
+	return ret;
+}
+
+/*
+ * This writes the OEMNIB buffer in the internal RAM of the SCU.
+ */
+int intel_scu_ipc_write_oemnib(u8 *oemnib, int len, int offset)
+{
+	int i;
+	int ret = 0;
+	void __iomem *oshob_addr, *oemnibw_addr;
+	u32 sptr_dw_mask;
+
+	if (oemnib == NULL) {
+		pr_err("ipc_write_oemnib: passed buffer for writting OEMNIB is NULL\n");
+		return -EINVAL;
+	}
+
+	rpmsg_global_lock();
+
+	pr_debug("ipc_write_oemnib: OSHOB addr %pa size %d\n",
+		&oshob_info->oshob_base, oshob_info->oshob_size);
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_write_oemnib: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if ((len == 0) || (len > oshob_info->oemnib_size)) {
+		pr_err(
+			"ipc_write_oemnib: bad OEMNIB data length (%d) to write (max=%d bytes)\n",
+			    len, oshob_info->oemnib_size);
+		ret = -EINVAL;
+		goto unmap_oshob_addr;
+	}
+
+	/* offset shall start at 0 from the OEMNIB base address and shall */
+	/* not exceed the OEMNIB allowed size.                            */
+	if ((offset < 0) || (offset >= oshob_info->oemnib_size) ||
+	    (len + offset > oshob_info->oemnib_size)) {
+		pr_err(
+			"ipc_write_oemnib: Bad OEMNIB data offset/len for writing (offset=%d , len=%d)\n",
+			offset, len);
+		ret = -EINVAL;
+		goto unmap_oshob_addr;
+	}
+
+	pr_debug("ipc_write_oemnib: POEMNIB oemnibw ptr %pa size %d\n",
+		&oshob_info->oemnibw_ptr, oshob_info->oemnib_size);
+
+	oemnibw_addr = ioremap_nocache(oshob_info->oemnibw_ptr,
+				       oshob_info->oemnib_size);
+	if (!oemnibw_addr) {
+		pr_err("ipc_write_oemnib: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	for (i = 0; i < len; i++)
+		writeb(*(oemnib + i), (oemnibw_addr + offset + i));
+
+	/* Send command. The mask to be written identifies which double */
+	/* words of the OSNIB oemnib_size bytes will be written.        */
+	/* So the mask is coded on 4 bytes.                             */
+	sptr_dw_mask = 0xFFFFFFFF;
+	if ((oshob_info->oshob_majrev >= 1) &&
+	    (oshob_info->oshob_minrev >= 4)) {
+		sptr_dw_mask = 0xFFFFFFFF;
+		/* OEM NIB lies on region 1, 2, and 3 */
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 1);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 2);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 3);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+	} else {
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OEMNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 0);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+	}
+
+unmap_oemnibw_addr:
+	iounmap(oemnibw_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	rpmsg_global_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_oemnib);
+
+#ifdef DUMP_OSNIB
+/*
+ * This reads the PMIT from the OSHOB (pointer to interrupt tree)
+ */
+static int intel_scu_ipc_read_oshob_it_tree(u32 *ptr)
+{
+	u16 struct_offs;
+
+	pr_debug("intel_scu_ipc_read_oshob_it_tree: read IT tree\n");
+
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	    (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		struct_offs = offsetof(struct scu_ipc_oshob, pmit) +
+				    oshob_info->offs_add;
+	} else if ((oshob_info->oshob_majrev >= 1) &&
+		   (oshob_info->oshob_minrev >= 4)) {
+		struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, pmit) +
+				    oshob_info->offs_add;
+	} else {
+		struct_offs = offsetof(struct scu_ipc_oshob_extend, pmit) +
+				    oshob_info->offs_add;
+	}
+	return intel_scu_ipc_read_oshob(
+			(u8 *) ptr,
+			4,
+			struct_offs);
+}
+#endif
+
+/*
+ * This reads the RESETIRQ1 or RESETSRC0 from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_reset_ev1(u8 *rev1)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_debug(
+				"intel_scu_ipc_read_osnib_rst_ev1: read %s\n",
+				chip_reset_events[i].reset_ev1_name);
+
+			return oshob_info->scu_ipc_read_osnib(
+				    rev1,
+				    1,
+				    offsetof(struct scu_ipc_osnib, reset_ev1));
+		}
+	}
+
+	pr_err("intel_scu_ipc_read_osnib_reset_ev1: param not found\n");
+	return -EFAULT;
+}
+#endif
+
+/*
+ * This reads the RESETIRQ2 or RESETSRC1 from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_reset_ev2(u8 *rev2)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_debug(
+				"intel_scu_ipc_read_osnib_rst_ev2: read %s\n",
+				chip_reset_events[i].reset_ev2_name);
+
+			return oshob_info->scu_ipc_read_osnib(
+				rev2,
+				1,
+				offsetof(struct scu_ipc_osnib, reset_ev2));
+		}
+	}
+
+	pr_err("intel_scu_ipc_read_osnib_reset_ev2: param not found\n");
+	return -EFAULT;
+}
+#endif
+
+/*
+ * This reads the WD from the OSNIB
+ */
+int intel_scu_ipc_read_osnib_wd(u8 *wd)
+{
+	pr_debug("intel_scu_ipc_read_osnib_wd: read WATCHDOG\n");
+
+	return oshob_info->scu_ipc_read_osnib(
+			wd,
+			1,
+			offsetof(struct scu_ipc_osnib, wd_count));
+}
+
+/*
+ * This writes the WD in the OSNIB
+ */
+int intel_scu_ipc_write_osnib_wd(u8 *wd)
+{
+	pr_info("intel_scu_ipc_write_osnib_wd: write WATCHDOG %x\n", *wd);
+
+	return oshob_info->scu_ipc_write_osnib(
+			wd,
+			1,
+			offsetof(struct scu_ipc_osnib, wd_count));
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_osnib_wd);
+
+/*
+ * Get SCU trace buffer physical address if available
+ */
+phys_addr_t intel_scu_ipc_get_scu_trace_buffer(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->scu_trace_buf;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_scu_trace_buffer);
+
+/*
+ * Get SCU trace buffer size
+ */
+u32 intel_scu_ipc_get_scu_trace_buffer_size(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->scu_trace_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_scu_trace_buffer_size);
+
+/*
+ * Get nvram size
+ */
+u32 intel_scu_ipc_get_nvram_size(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->nvram_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_nvram_size);
+
+/*
+ * Get nvram addr
+ */
+phys_addr_t intel_scu_ipc_get_nvram_addr(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->nvram_addr;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_nvram_addr);
+
+/*
+ * Get SCU fabric error buffer1 offset
+ */
+u32 intel_scu_ipc_get_fabricerror_buf1_offset(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		return offsetof(struct scu_ipc_oshob_extend, fabricerrlog1);
+	else if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			return offsetof(struct scu_ipc_oshob_extend_v14,
+					fabricerrlog) + oshob_info->offs_add;
+		} else {
+			return offsetof(struct scu_ipc_oshob,
+					fab_err_log) + oshob_info->offs_add;
+		}
+	else {
+		pr_err("scu_ipc_get_fabricerror_buf_offset: platform not recognized!\n");
+		return 0;
+	}
+}
+
+/*
+ * Get SCU fabric error buffer2 offset
+ */
+u32 intel_scu_ipc_get_fabricerror_buf2_offset(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		return offsetof(struct scu_ipc_oshob_extend, fabricerrlog2);
+	else {
+		pr_warn("scu_ipc_get_fabricerror_buf2_offset: not supported for this platform!\n");
+		return 0;
+	}
+}
+
+
+/*
+ * This reads the ALARM from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_alarm(u8 *alarm)
+{
+	pr_debug("intel_scu_ipc_read_osnib_alarm: read ALARM\n");
+
+	return oshob_info->scu_ipc_read_osnib(
+			alarm,
+			1,
+			offsetof(struct scu_ipc_osnib, alarm));
+}
+#endif
+
+/*
+ * This reads the WAKESRC from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+#define WD_KERNEL_PANIC_BIT 0x08 /* bit 3 to set kernel panic */
+static int intel_scu_ipc_read_osnib_wakesrc(u8 *wksrc)
+{
+	u8 wd;
+	int ret;
+
+	pr_debug("intel_scu_ipc_read_osnib_wakesrc: read WAKESRC\n");
+
+	ret = oshob_info->scu_ipc_read_osnib(
+			wksrc,
+			1,
+			offsetof(struct scu_ipc_osnib, wakesrc));
+
+	/* Check kernel panic wake source */
+	if (!ret && *wksrc == WAKE_REAL_RESET) {
+		ret = intel_scu_ipc_read_osnib_wd(&wd);
+
+		if (!ret && (wd & WD_KERNEL_PANIC_BIT)) {
+			*wksrc = WAKE_KERNEL_PANIC;
+
+			wd &= (~WD_KERNEL_PANIC_BIT);
+			ret = intel_scu_ipc_write_osnib_wd(&wd);
+			ret |= oshob_info->scu_ipc_write_osnib(
+				wksrc,
+				1,
+				offsetof(struct scu_ipc_osnib, wakesrc));
+
+			pr_debug("%s: wksrc set WAKE_KERNEL_PANIC ret:%d\n",
+					__func__, ret);
+		}
+	}
+
+	return ret;
+}
+#endif
+
+
+#define OEMNIB_BUF_DESC_LEN	4096
+
+#ifdef CONFIG_DEBUG_FS
+static int intel_scu_ipc_oshob_stat(struct seq_file *m, void *unused)
+{
+	void __iomem *osnib;
+	int i, count;
+	int ret = 0;
+
+	u32 value;
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	     (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		seq_printf(m, "DEFAULT OSHOB\n");
+		seq_printf(m, "OSHOB size : %d\n", oshob_info->oshob_size);
+		if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			seq_printf(m, "SCU trace : ");
+
+			for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+				seq_printf(m, "%x ", oshob_info->scu_trace[i]);
+
+			seq_printf(m, "\n");
+		} else
+			seq_printf(m, "SCU trace : %x\n",
+					oshob_info->scu_trace[0]);
+
+		seq_printf(m, "IA trace  : %x\n", oshob_info->ia_trace);
+	} else {
+		seq_printf(m, "EXTENDED OSHOB v%d.%d\n",
+						oshob_info->oshob_majrev,
+						oshob_info->oshob_minrev);
+		seq_printf(m, "OSHOB size : %d\n\n", oshob_info->oshob_size);
+		if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			seq_printf(m, "SCU trace : ");
+
+			for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+				seq_printf(m, "%x ", oshob_info->scu_trace[i]);
+
+			seq_printf(m, "\n");
+		} else
+			seq_printf(m, "SCU trace : %x\n",
+					oshob_info->scu_trace[0]);
+
+		seq_printf(m, "IA trace  : %x\n\n", oshob_info->ia_trace);
+
+		seq_printf(m, "OSNIB size : %d\n", oshob_info->osnib_size);
+		seq_printf(m, "OSNIB  read address  : %pa\n",
+			   &oshob_info->osnibr_ptr);
+		seq_printf(m, "OSNIB  write address : %pa\n",
+			   &oshob_info->osnibw_ptr);
+		/* Dump OSNIB */
+		osnib = ioremap_nocache(oshob_info->osnibr_ptr,
+						oshob_info->osnib_size);
+		if (!osnib) {
+			pr_err("Cannot remap OSNIB\n");
+			ret = -ENOMEM;
+			return ret;
+		}
+
+		i = 0;
+		count = 0; /* used for fancy presentation */
+		while (i < oshob_info->osnib_size) {
+			if (count%4 == 0) {
+				phys_addr_t cur = oshob_info->osnibr_ptr+i;
+				seq_printf(m, "\nOSNIB[%pa] ", &cur);
+			}
+
+			value = readl(osnib+i);
+			seq_printf(m, "%08x ", value);
+			i += 4;
+			count++;
+		}
+		seq_printf(m, "\n\n");
+		iounmap(osnib);
+
+		seq_printf(m, "OEMNIB size : %d\n", oshob_info->oemnib_size);
+		seq_printf(m, "OEMNIB read address  : %pa\n",
+			   &oshob_info->oemnibr_ptr);
+		seq_printf(m, "OEMNIB write address : %pa\n",
+			   &oshob_info->oemnibw_ptr);
+		seq_printf(m, "\n\n");
+	}
+	return 0;
+}
+
+static int intel_scu_ipc_oemnib_stat(struct seq_file *m, void *unused)
+{
+	void __iomem *oemnib;
+	int i, count;
+	u32 value;
+
+	/* Dump OEMNIB */
+	oemnib = ioremap_nocache(oshob_info->oemnibr_ptr,
+				oshob_info->oemnib_size);
+
+	if (!oemnib) {
+		pr_err("Cannot remap OEMNIB\n");
+		return -ENOMEM;
+	}
+
+	i = 0;
+	count = 0; /* used for fancy presentation */
+	while (i < oshob_info->oemnib_size) {
+		if (count%4 == 0) {
+			phys_addr_t cur = oshob_info->oemnibr_ptr+i;
+			seq_printf(m, "\nOEMNIB[%pa] ", &cur);
+		}
+
+		value = readl(oemnib+i);
+		seq_printf(m, "%08x ", value);
+		i += 4;
+		count++;
+	}
+	seq_printf(m, "\n\n");
+	iounmap(oemnib);
+
+	return 0;
+}
+
+static int intel_scu_ipc_oshob_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, intel_scu_ipc_oshob_stat, NULL);
+}
+
+static int intel_scu_ipc_oemnib_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, intel_scu_ipc_oemnib_stat, NULL);
+}
+
+
+/*
+*	debugfs interface: the "oemnib_write" stores the OEMNIB part of OSNIB,
+*       starting at offset ppos.
+*/
+static ssize_t intel_scu_ipc_oemnib_write(struct file *file,
+					  const char __user *buf,
+					    size_t count, loff_t *ppos)
+{
+	int ret, i;
+	u8 *posnib_data, *ptr;
+	char *ptrchar, *temp;
+
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	    (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		/* OEMNIB only usable with extended OSHOB structure. */
+		pr_err(
+		"Write OEMNIB: OEMNIB only usable with extended OSHOB structure.\n");
+		return -EFAULT;
+	}
+
+	pr_info("Write OEMNIB: number bytes = %zd\n", count);
+
+	/* Note: when the string is passed through debugfs interface, the  */
+	/* real count value includes the end of line \n. So we must take   */
+	/* care to consider count - 1 as the real number of OEM bytes.     */
+
+	if (buf == NULL) {
+		pr_err("Write OEMNIB: The passed OEMNIB buffer is NULL\n");
+		return -EINVAL;
+	}
+
+	if (count == 0) {
+		pr_err("Write OEMNIB: The OEMNIB data length to write is NULL\n");
+		return -EINVAL;
+	}
+
+	posnib_data = kzalloc(count - 1, GFP_KERNEL);
+
+	if (posnib_data == NULL) {
+		pr_err("Write OEMNIB: Cannot allocate buffer for writting OEMNIB\n");
+		return -ENOMEM;
+	}
+
+	memset(posnib_data, 0, count - 1);
+
+	temp = kzalloc(count - 1, GFP_KERNEL);
+
+	if (temp == NULL) {
+		pr_err(
+		"Write OEMNIB: Cannot allocate temp buffer for writting OEMNIB\n");
+		return -ENOMEM;
+	}
+
+	memset(temp, 0, count - 1);
+
+	if (copy_from_user(temp, buf, count - 1)) {
+		pr_err(
+		"Write OEMNIB: Cannot transfer from user buf to OEMNIB buf\n");
+		kfree(posnib_data);
+		return -EFAULT;
+	}
+
+	ptrchar = temp;
+	ptr = posnib_data;
+
+	for (i = 0; i <= count - 1; i++) {
+		if (*ptrchar >= '0' && *ptrchar <= '9')
+			*ptr = *ptrchar - '0';
+		if (*ptrchar >= 'A' && *ptrchar <= 'F')
+			*ptr = *ptrchar - 'A' + 10;
+		if (*ptrchar >= 'a' && *ptrchar <= 'f')
+			*ptr = *ptrchar - 'a' + 10;
+
+		ptrchar++;
+		ptr++;
+	}
+
+	ret = intel_scu_ipc_write_oemnib(posnib_data, count - 1, *ppos);
+
+	if (ret < 0) {
+		pr_err("Write OEMNIB: ipc write of OEMNIB failed!!\n");
+		kfree(posnib_data);
+		return ret;
+	}
+
+	kfree(posnib_data);
+	kfree(temp);
+
+	pr_info("Write OEMNIB: OEMNIB updated: count=%zd bytes\n", count);
+
+	return count;
+}
+
+/* Attach the debugfs operations methods */
+static const struct file_operations scu_ipc_oemnib_fops = {
+	.owner = THIS_MODULE,
+	.open = intel_scu_ipc_oemnib_open,
+	.read = seq_read,
+	.write = intel_scu_ipc_oemnib_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static const struct file_operations scu_ipc_oshob_fops = {
+	.owner = THIS_MODULE,
+	.open = intel_scu_ipc_oshob_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct dentry *scu_ipc_oemnib_dir;
+static struct dentry *scu_ipc_oemnib_file;
+static struct dentry *scu_ipc_oshob_file;
+
+/*
+*	debugfs interface: init interface.
+*/
+static int intel_mid_scu_ipc_oemnib_debugfs_init(void)
+{
+	/* Create debugfs directory /sys/kernel/debug/intel_scu_oshob */
+	scu_ipc_oemnib_dir = debugfs_create_dir("intel_scu_oshob", NULL);
+
+	if (!scu_ipc_oemnib_dir) {
+		pr_err("cannot create OSHOB debugfs directory\n");
+		return -1;
+	}
+
+	/* Add operations /sys/kernel/debug/intel_scu_oshob to control */
+	/* the OEM.                                                     */
+	scu_ipc_oemnib_file = debugfs_create_file("oemnib_debug",
+				S_IFREG | S_IRUSR | S_IWUSR,
+				scu_ipc_oemnib_dir,
+				NULL, &scu_ipc_oemnib_fops);
+
+	if (!scu_ipc_oemnib_file) {
+		pr_err("cannot create OEMNIB debugfs file\n");
+		debugfs_remove(scu_ipc_oemnib_dir);
+		return -1;
+	}
+
+	/* Add operations /sys/kernel/debug/intel_scu_oshob to debug OSHOB */
+	/* content.                                                         */
+	scu_ipc_oshob_file = debugfs_create_file("oshob_dump",
+				S_IFREG | S_IRUSR | S_IWUSR,
+				scu_ipc_oemnib_dir, NULL, &scu_ipc_oshob_fops);
+
+	if (!scu_ipc_oshob_file) {
+		pr_err("cannot create OSHOB debugfs file\n");
+		debugfs_remove_recursive(scu_ipc_oemnib_dir);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+*	debugfs interface: exit interface.
+*/
+static void intel_mid_scu_ipc_oemnib_debugfs_exit(void)
+{
+	debugfs_remove_recursive(scu_ipc_oemnib_dir);
+}
+
+#define IPC_CMD_RXTX_BUF_SIZE 16
+#define IPC_CMD_INPUT_ENTRY_SIZE 16
+
+struct scu_ipc_cmd {
+	u32 sptr;
+	u32 dptr;
+	u8 cmd;
+	u8 cmdid;
+	u8 wbuf[IPC_CMD_RXTX_BUF_SIZE];
+	u32 rbuf[IPC_CMD_RXTX_BUF_SIZE / sizeof(u32)];
+	u8 inlen;
+	u8 outlen;
+};
+
+static struct scu_ipc_cmd ipc_cmd;
+
+static ssize_t intel_scu_ipc_trigger_write(struct file *file,
+					  const char __user *buf,
+					    size_t count, loff_t *ppos)
+{
+	int ret;
+
+	if (ipc_cmd.inlen > IPC_CMD_RXTX_BUF_SIZE ||
+	    ipc_cmd.outlen > IPC_CMD_RXTX_BUF_SIZE / sizeof(u32)) {
+		pr_err("Given RX/TX length is too big");
+		return -EFAULT;
+	}
+
+	ret = rpmsg_send_generic_raw_command(ipc_cmd.cmd, ipc_cmd.cmdid,
+					     ipc_cmd.wbuf, ipc_cmd.inlen,
+					     ipc_cmd.rbuf, ipc_cmd.outlen,
+					     ipc_cmd.dptr, ipc_cmd.sptr);
+	if (ret) {
+		pr_err("Failed to send ipc command");
+		return ret;
+	}
+
+	return count;
+}
+
+static int intel_scu_ipc_rwbuf_show(struct seq_file *m, void *unused)
+{
+	int i, ret = 0;
+	u8 *buf = (u8 *)m->private;
+
+	for (i = 0; i < IPC_CMD_RXTX_BUF_SIZE; i++) {
+		ret = seq_printf(m, "%02d:0x%02x\n", i, buf[i]);
+		if (ret) {
+			pr_err("Failed to perform sequential print");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int intel_scu_ipc_rbuf_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, intel_scu_ipc_rwbuf_show, &ipc_cmd.rbuf);
+}
+
+static int intel_scu_ipc_wbuf_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, intel_scu_ipc_rwbuf_show, &ipc_cmd.wbuf);
+}
+
+static ssize_t intel_scu_ipc_wbuf_write(struct file *file,
+					  const char __user *buf,
+					    size_t count, loff_t *ppos)
+{
+	int ret;
+	unsigned long idx, val;
+	char tmp[IPC_CMD_INPUT_ENTRY_SIZE] = {0}; /* "01:0xff" */
+
+	if (!count || count > sizeof(tmp))
+		return -EFAULT;
+
+	ret = copy_from_user(&tmp, buf, count);
+	if (ret) {
+		pr_err("Failed to copy from user space");
+		return ret;
+	}
+
+	tmp[2] = 0; /* "01\0" */
+	ret = kstrtoul(tmp, 10, &idx);
+	if (ret) {
+		pr_err("Given index is invalid");
+		return -EFAULT;
+	}
+	if (idx + 1 > IPC_CMD_RXTX_BUF_SIZE || idx < 0) {
+		pr_err("Given index is out of range. Should be 0...15");
+		return -EFAULT;
+	}
+
+	tmp[7] = 0; /* "01\00xff\0" */
+	ret = kstrtoul(&tmp[3], 16, &val);
+	if (ret)
+		return -EFAULT;
+	if (val > 0xff || val < 0x00)
+		return -EFAULT;
+
+	ipc_cmd.wbuf[idx] = val;
+
+	return count;
+}
+
+static const struct file_operations scu_ipc_trigger_fops = {
+	.owner = THIS_MODULE,
+	.write = intel_scu_ipc_trigger_write,
+};
+
+static const struct file_operations scu_ipc_rbuf_fops = {
+	.owner = THIS_MODULE,
+	.open = intel_scu_ipc_rbuf_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static const struct file_operations scu_ipc_wbuf_fops = {
+	.owner = THIS_MODULE,
+	.open = intel_scu_ipc_wbuf_open,
+	.read = seq_read,
+	.write = intel_scu_ipc_wbuf_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct dentry *scu_ipc_cmd_dir, *scu_ipc_cmd_file,
+	*scu_ipc_cmdid_file, *scu_ipc_trigger_file, *scu_ipc_rbuf_file,
+	*scu_ipc_wbuf_file, *scu_ipc_sptr_file, *scu_ipc_dptr_file,
+	*scu_ipc_inlen_file, *scu_ipc_outlen_file;
+
+static int intel_mid_scu_ipc_cmd_debugfs_init(void)
+{
+	scu_ipc_cmd_dir = debugfs_create_dir("intel_scu_ipc_cmd", NULL);
+	if (!scu_ipc_cmd_dir) {
+		pr_err("cannot create ipc cmd debugfs directory\n");
+		return -ENOMEM;
+	}
+
+	scu_ipc_cmd_file = debugfs_create_x8("ipc_cmd", S_IWUSR | S_IRUSR,
+						scu_ipc_cmd_dir, &ipc_cmd.cmd);
+	if (!scu_ipc_cmd_file) {
+		pr_err("cannot create ipc cmd debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_cmdid_file = debugfs_create_x8("ipc_cmdid", S_IWUSR | S_IRUSR,
+						scu_ipc_cmd_dir,
+					       &ipc_cmd.cmdid);
+	if (!scu_ipc_cmdid_file) {
+		pr_err("cannot create ipc cmdid debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_trigger_file = debugfs_create_file("ipc_trigger", S_IWUSR,
+						   scu_ipc_cmd_dir, NULL,
+						   &scu_ipc_trigger_fops);
+	if (!scu_ipc_trigger_file) {
+		pr_err("cannot create ipc trigger debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_wbuf_file = debugfs_create_file("ipc_wbuf", S_IWUSR | S_IRUSR,
+						scu_ipc_cmd_dir, NULL,
+						&scu_ipc_wbuf_fops);
+	if (!scu_ipc_wbuf_file) {
+		pr_err("cannot create ipc wbuf debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_rbuf_file = debugfs_create_file("ipc_rbuf", S_IWUSR | S_IRUSR,
+						scu_ipc_cmd_dir, NULL,
+						&scu_ipc_rbuf_fops);
+	if (!scu_ipc_rbuf_file) {
+		pr_err("cannot create ipc rbuf debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_sptr_file = debugfs_create_x32("ipc_sptr", S_IWUSR | S_IRUSR,
+					       scu_ipc_cmd_dir, &ipc_cmd.sptr);
+	if (!scu_ipc_sptr_file) {
+		pr_err("cannot create ipc sptr debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_dptr_file = debugfs_create_x32("ipc_dptr", S_IWUSR | S_IRUSR,
+					       scu_ipc_cmd_dir, &ipc_cmd.dptr);
+	if (!scu_ipc_dptr_file) {
+		pr_err("cannot create ipc dptr debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_inlen_file = debugfs_create_u8("ipc_inlen", S_IWUSR | S_IRUSR,
+					     scu_ipc_cmd_dir, &ipc_cmd.inlen);
+	if (!scu_ipc_inlen_file) {
+		pr_err("cannot create ipc inlen debugfs file\n");
+		goto err;
+	}
+
+	scu_ipc_outlen_file = debugfs_create_u8("ipc_outlen", S_IWUSR | S_IRUSR,
+					     scu_ipc_cmd_dir, &ipc_cmd.outlen);
+	if (!scu_ipc_outlen_file) {
+		pr_err("cannot create ipc outlen debugfs file\n");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(scu_ipc_cmd_dir);
+	return -ENOMEM;
+}
+
+static void intel_mid_scu_ipc_cmd_debugfs_exit(void)
+{
+	debugfs_remove_recursive(scu_ipc_cmd_dir);
+}
+
+#ifdef DUMP_OSNIB
+
+static ssize_t intel_scu_ipc_osnib_read_reset_event(
+	struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	u8 rev[1] = {0};
+	int ret, i;
+
+	if (pos > 0)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			if (strcmp(
+				file->f_path.dentry->d_name.name,
+				chip_reset_events[i].reset_ev1_name) == 0)
+				ret = intel_scu_ipc_read_osnib_reset_ev1(rev);
+			else
+				ret = intel_scu_ipc_read_osnib_reset_ev2(rev);
+
+			if (ret != 0) {
+				pr_err("%s: cannot read %s, ret=%d",
+					__func__,
+					file->f_path.dentry->d_name.name,
+					ret);
+				return ret;
+			}
+
+			/*
+			*  buf is allocated by the kernel (4ko) and we will
+			*  never write more than 6 bytes so no need to check
+			*/
+			ret = sprintf(buf, "0x%x\n", rev[0]);
+			if (ret < 0) {
+				pr_err(
+					"%s: cannot convert the value, ret = %d",
+					__func__,
+					ret);
+				return ret;
+			}
+
+			*ppos += ret;
+			return ret;
+		}
+	}
+
+	pr_err("%s: param not found\n", __func__);
+	return -EFAULT;
+}
+
+/* Attach the debugfs operations methods */
+static const struct file_operations scu_ipc_osnib_reset_event_fops = {
+	.owner = THIS_MODULE,
+	.read  = intel_scu_ipc_osnib_read_reset_event,
+};
+
+static struct dentry *scu_ipc_osnib_dir;
+static struct dentry *scu_ipc_osnib_file_reset_ev1;
+static struct dentry *scu_ipc_osnib_file_reset_ev2;
+
+static ssize_t intel_scu_ipc_osnib_read_checksum(
+	struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	u8 checksum = 0;
+	int ret;
+
+	if (pos > 0)
+		return 0;
+
+	ret = oshob_info->scu_ipc_read_osnib(
+	    &checksum,
+	    1,
+	    offsetof(struct scu_ipc_osnib, checksum));
+
+	if (ret != 0) {
+		pr_err("%s: cannot read CHECKSUM, ret=%d", __func__, ret);
+		return ret;
+	}
+
+	/*
+	*  buf is allocated by the kernel (4ko) and we will
+	*  never write more than 6 bytes so no need to check
+	*/
+	ret = sprintf(buf, "0x%x\n", checksum);
+	if (ret < 0) {
+		pr_err("%s: cannot convert the value, ret = %d", __func__, ret);
+		return ret;
+	}
+
+	*ppos += ret;
+	return ret;
+}
+
+static const struct file_operations scu_ipc_osnib_checksum_fops = {
+	.owner = THIS_MODULE,
+	.read  = intel_scu_ipc_osnib_read_checksum,
+};
+
+static struct dentry *scu_ipc_osnib_file_checksum;
+
+/*
+*	debugfs interface: init interface.
+*/
+static int intel_mid_scu_ipc_osnib_debugfs_init(void)
+{
+	int i;
+	int ret = 0;
+	bool found = false;
+
+	/* Create debugfs directory /sys/kernel/debug/intel_scu_osnib */
+	scu_ipc_osnib_dir = debugfs_create_dir("intel_scu_osnib", NULL);
+
+	if (!scu_ipc_osnib_dir) {
+		pr_err("%s: cannot create OSNIB debugfs directory\n", __func__);
+		return -1;
+	}
+
+	scu_ipc_osnib_file_checksum = debugfs_create_file(
+		"CHECKSUM",
+		S_IFREG | S_IRUSR,
+		scu_ipc_osnib_dir,
+		NULL,
+		&scu_ipc_osnib_checksum_fops);
+
+	if (!scu_ipc_osnib_file_checksum) {
+		pr_err("%s: cannot create CHECKSUM debugfs file\n", __func__);
+		ret = -1;
+	}
+
+	if (!debugfs_create_bool("invalid_checksum", S_IFREG | S_IRUSR | S_IWUSR,
+		scu_ipc_osnib_dir, &invalid_checksum)) {
+		pr_err("%s: cannot create invalid_checksum debugfs file\n", __func__);
+		ret = -1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+
+			scu_ipc_osnib_file_reset_ev1 = debugfs_create_file(
+					chip_reset_events[i].reset_ev1_name,
+					S_IFREG | S_IRUSR,
+					scu_ipc_osnib_dir,
+					NULL, &scu_ipc_osnib_reset_event_fops);
+
+			if (!scu_ipc_osnib_file_reset_ev1) {
+				pr_err("%s: cannot create %s debugfs file\n",
+					__func__,
+					chip_reset_events[i].reset_ev1_name);
+				ret = -1;
+			}
+
+			scu_ipc_osnib_file_reset_ev2 = debugfs_create_file(
+					chip_reset_events[i].reset_ev2_name,
+					S_IFREG | S_IRUSR,
+					scu_ipc_osnib_dir,
+					NULL, &scu_ipc_osnib_reset_event_fops);
+
+			if (!scu_ipc_osnib_file_reset_ev2) {
+				pr_err("%s: cannot create %s debugfs file\n",
+					__func__,
+					chip_reset_events[i].reset_ev1_name);
+				ret = -1;
+			}
+
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		pr_err("%s: param not found\n", __func__);
+		ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+/*
+*	debugfs interface: exit interface.
+*/
+static void intel_mid_scu_ipc_osnib_debugfs_exit(void)
+{
+	debugfs_remove_recursive(scu_ipc_osnib_dir);
+}
+
+#endif /* DUMP_OSNIB */
+
+#endif /* CONFIG_DEBUG_FS */
+
 static const struct file_operations scu_ipc_fops = {
 	.unlocked_ioctl = scu_ipc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = scu_ipc_ioctl,
+#endif
 };
 
-static int __init ipc_module_init(void)
-{
-	major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
-	if (major < 0)
-		return major;
+static struct miscdevice scu_ipcutil = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "mid_ipc",
+	.fops = &scu_ipc_fops,
+};
 
-	return 0;
+static int oshob_init(void)
+{
+	int ret, i;
+	u16 struct_offs;
+
+#ifdef DUMP_OSNIB
+	u8 rr, reset_ev1, reset_ev2, wd, alarm, wakesrc, *ptr;
+	int rr_found = 0, wksrc_found = 0;
+	u32 pmit, scu_trace[OSHOB_SCU_BUF_BASE_DW_SIZE*4], ia_trace;
+	int buff_size;
+#endif
+
+	/* Identify the type and size of OSHOB to be used. */
+	ret = intel_scu_ipc_read_oshob_info();
+
+	if (ret != 0) {
+		pr_err("Cannot init ipc module: oshob info not read\n");
+		goto exit;
+	}
+
+#ifdef DUMP_OSNIB
+	/* Dumping reset events from the interrupt tree */
+	ret = intel_scu_ipc_read_oshob_it_tree(&pmit);
+
+	if (ret != 0) {
+		pr_err("Cannot read interrupt tree\n");
+		goto exit;
+	}
+
+	ptr = ioremap_nocache(pmit + PMIT_RESET1_OFFSET, 2);
+
+	if (!ptr) {
+		pr_err("Cannot remap PMIT\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_debug("PMIT addr 0x%8x remapped to 0x%p\n", pmit, ptr);
+
+	reset_ev1 = readb(ptr);
+	reset_ev2 = readb(ptr+1);
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_warn("[BOOT] %s=0x%02x %s=0x%02x (PMIT interrupt tree)\n",
+				chip_reset_events[i].reset_ev1_name,
+				reset_ev1,
+				chip_reset_events[i].reset_ev2_name,
+				reset_ev2);
+		}
+	}
+	iounmap(ptr);
+
+	/* Dumping OSHOB content */
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	    (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		/* Use default OSHOB here. Calculate in bytes here. */
+		if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+			buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+		else
+			buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+		ret = intel_scu_ipc_read_oshob(
+			(u8 *)(scu_trace),
+			buff_size,
+			offsetof(struct scu_ipc_oshob, scutxl));
+
+		if (ret != 0) {
+			pr_err("Cannot read SCU data\n");
+			goto exit;
+		}
+
+		struct_offs = offsetof(struct scu_ipc_oshob, iatxl) +
+				oshob_info->offs_add;
+		ret = intel_scu_ipc_read_oshob(
+			    (u8 *)(&ia_trace),
+			    4,
+			    struct_offs);
+
+		if (ret != 0) {
+			pr_err("Cannot read IA data\n");
+			goto exit;
+		}
+	    } else {
+		/* Use extended OSHOB here. Calculate in bytes here. */
+		if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE))
+			buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+		else
+			buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			ret = intel_scu_ipc_read_oshob(
+				(u8 *)(scu_trace),
+				buff_size,
+				offsetof(struct scu_ipc_oshob_extend_v14,
+								scutxl));
+		} else {
+			ret = intel_scu_ipc_read_oshob(
+				(u8 *)(scu_trace),
+				buff_size,
+				offsetof(struct scu_ipc_oshob_extend, scutxl));
+		}
+
+		if (ret != 0) {
+			pr_err("Cannot read SCU data\n");
+			goto exit;
+		}
+
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+						iatxl) + oshob_info->offs_add;
+		} else {
+			struct_offs = offsetof(struct scu_ipc_oshob_extend,
+						iatxl) + oshob_info->offs_add;
+		}
+
+		ret = intel_scu_ipc_read_oshob(
+				(u8 *)(&ia_trace),
+				4,
+				struct_offs);
+
+		if (ret != 0) {
+			pr_err("Cannot read IA data\n");
+			goto exit;
+		}
+	}
+
+	if ((oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(oshob_info->platform_type == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+		for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+			pr_warn("[BOOT] SCU_TR[%d]=0x%08x\n", i, scu_trace[i]);
+	} else
+		pr_warn("[BOOT] SCU_TR=0x%08x (oshob)\n", scu_trace[0]);
+
+	pr_warn("[BOOT] IA_TR=0x%08x (oshob)\n", ia_trace);
+
+	/* Dumping OSNIB content */
+	ret = 0;
+	ret |= intel_scu_ipc_read_osnib_rr(&rr);
+	ret |= intel_scu_ipc_read_osnib_reset_ev1(&reset_ev1);
+	ret |= intel_scu_ipc_read_osnib_reset_ev2(&reset_ev2);
+	ret |= intel_scu_ipc_read_osnib_wd(&wd);
+	ret |= intel_scu_ipc_read_osnib_alarm(&alarm);
+	ret |= intel_scu_ipc_read_osnib_wakesrc(&wakesrc);
+
+	if (ret) {
+		pr_err("Cannot read OSNIB content\n");
+		goto exit;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(osnib_target_oses); i++) {
+		if (osnib_target_oses[i].id == rr) {
+			pr_warn("[BOOT] RR=[%s] WD=0x%02x ALARM=0x%02x (osnib)\n",
+				osnib_target_oses[i].target_os_name, wd, alarm);
+			rr_found++;
+			break;
+		}
+	}
+
+	if (!rr_found)
+		pr_warn("[BOOT] RR=[UNKNOWN 0x%02x] WD=0x%02x ALARM=0x%02x (osnib)\n",
+			rr, wd, alarm);
+
+	pr_warn("[BOOT] WD[3..0] bits %scleared by IA FW (osnib)\n",
+		(wd & 0x0F) ? "NOT " : "");
+
+	for (i = 0; i < ARRAY_SIZE(osnib_wake_srcs); i++) {
+		if (osnib_wake_srcs[i].id == wakesrc) {
+			pr_warn("[BOOT] WAKESRC=[%s] (osnib)\n",
+				osnib_wake_srcs[i].wakesrc_name);
+			wksrc_found++;
+			break;
+		}
+	}
+
+	if (!wksrc_found)
+		pr_warn("[BOOT] WAKESRC=[UNKNOWN 0x%02x] (osnib)\n", wakesrc);
+
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_warn("[BOOT] %s=0x%02x %s=0x%02x (osnib)\n",
+				chip_reset_events[i].reset_ev1_name,
+				reset_ev1,
+				chip_reset_events[i].reset_ev2_name,
+				reset_ev2);
+			break;
+		}
+	}
+
+#endif /* DUMP_OSNIB */
+
+#ifdef CONFIG_DEBUG_FS
+	if (oshob_info->oshob_majrev != OSHOB_REV_MAJ_DEFAULT) {
+		/* OEMNIB only usable with extended OSHOB structure. */
+		ret = intel_mid_scu_ipc_oemnib_debugfs_init();
+
+		if (ret != 0) {
+			pr_err("Cannot register OEMNIB interface to debugfs\n");
+			goto exit;
+		} else {
+			pr_info("OEMNIB interface registered to debugfs\n");
+		}
+	}
+	ret = intel_mid_scu_ipc_cmd_debugfs_init();
+	if (ret) {
+		pr_err("Cannot register ipc cmd interface to debugfs");
+		goto exit;
+	}
+
+#ifdef DUMP_OSNIB
+	if (intel_mid_scu_ipc_osnib_debugfs_init() != 0)
+		pr_err("Problem when register OSNIB interface to debugfs\n");
+	else
+		pr_info("OSNIB interface registered to debugfs\n");
+#endif /* DUMP_OSNIB */
+#endif /* CONFIG_DEBUG_FS */
+
+exit:
+	return ret;
 }
 
-static void __exit ipc_module_exit(void)
+#ifdef DUMP_OSNIB
+static int intel_ipcutil_panic_handler(struct notifier_block *this,
+					unsigned long event, void *unused)
 {
-	unregister_chrdev(major, "intel_mid_scu");
+	u8 wd;
+
+	int ret;
+
+	pr_debug("%s: Set kernel panic reason to OSNIB\n", __func__);
+
+	ret = intel_scu_ipc_read_osnib_wd(&wd);
+	if (ret) {
+		pr_err("Fail reading kernel panic bit\n");
+		goto out;
+	}
+
+	wd |= WD_KERNEL_PANIC_BIT;
+
+	ret = intel_scu_ipc_write_osnib_wd(&wd);
+	if (ret)
+		pr_err("Fail setting kernel panic bit\n");
+out:
+	return NOTIFY_OK;
+}
+#endif
+
+#ifdef DUMP_OSNIB
+static struct notifier_block intel_ipcutil_panic_notifier = {
+	.notifier_call	= intel_ipcutil_panic_handler,
+	.next		= NULL,
+	.priority	= INT_MAX
+};
+#endif
+
+static int ipcutil_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+#ifdef DUMP_OSNIB
+	ret = atomic_notifier_chain_register(
+		&panic_notifier_list,
+		&intel_ipcutil_panic_notifier);
+	if (ret) {
+		pr_err("Failed to register notifier!\n");
+		goto err;
+	}
+#endif
+	oshob_info = kmalloc(sizeof(struct scu_ipc_oshob_info), GFP_KERNEL);
+	if (oshob_info == NULL) {
+		pr_err(
+		"Cannot init ipc module: oshob info struct not allocated\n");
+		return -ENOMEM;
+	}
+
+	if (rpdev == NULL) {
+		pr_err("ipcutil rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed ipcutil rpmsg device\n");
+
+	/* Allocate rpmsg instance for mip*/
+	ret = alloc_rpmsg_instance(rpdev, &ipcutil_instance);
+	if (!ipcutil_instance) {
+		dev_err(&rpdev->dev, "kzalloc ipcutil instance failed\n");
+		goto out;
+	}
+
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(ipcutil_instance);
+
+	ret = oshob_init();
+	if (ret)
+		goto misc_err;
+
+	ret = misc_register(&scu_ipcutil);
+	if (ret) {
+		pr_err("misc register failed\n");
+		goto misc_err;
+	}
+
+	return ret;
+
+misc_err:
+	free_rpmsg_instance(rpdev, &ipcutil_instance);
+out:
+	kfree(oshob_info);
+
+#ifdef DUMP_OSNIB
+	ret = atomic_notifier_chain_unregister(&panic_notifier_list,
+					&intel_ipcutil_panic_notifier);
+err:
+#endif
+	return ret;
 }
 
-module_init(ipc_module_init);
-module_exit(ipc_module_exit);
+static void ipcutil_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+#ifdef CONFIG_DEBUG_FS
+	if (oshob_info->oshob_majrev != OSHOB_REV_MAJ_DEFAULT) {
+		/* OEMNIB only usable with extended OSHOB structure. */
+		/* unregister from debugfs.                     */
+		intel_mid_scu_ipc_oemnib_debugfs_exit();
+	}
+	intel_mid_scu_ipc_cmd_debugfs_exit();
+
+#ifdef DUMP_OSNIB
+	intel_mid_scu_ipc_osnib_debugfs_exit();
+#endif /* DUMP_OSNIB */
+#endif /* CONFIG_DEBUG_FS */
+
+	kfree(oshob_info);
+
+	/* unregister scu_ipc_ioctl from sysfs. */
+	misc_deregister(&scu_ipcutil);
+	free_rpmsg_instance(rpdev, &ipcutil_instance);
+#ifdef DUMP_OSNIB
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					&intel_ipcutil_panic_notifier);
+#endif
+}
+
+static void ipcutil_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id ipcutil_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_ipc_util" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, ipcutil_rpmsg_id_table);
+
+static struct rpmsg_driver ipcutil_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= ipcutil_rpmsg_id_table,
+	.probe		= ipcutil_rpmsg_probe,
+	.callback	= ipcutil_rpmsg_cb,
+	.remove		= ipcutil_rpmsg_remove,
+};
+
+static int __init ipcutil_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&ipcutil_rpmsg);
+}
+
+static void __exit ipcutil_rpmsg_exit(void)
+{
+	unregister_rpmsg_driver(&ipcutil_rpmsg);
+}
+
+rootfs_initcall(ipcutil_rpmsg_init);
+module_exit(ipcutil_rpmsg_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Utility driver for intel scu ipc");
diff --git a/drivers/platform/x86/intel_scu_mip.c b/drivers/platform/x86/intel_scu_mip.c
new file mode 100644
index 0000000..6befefc
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_mip.c
@@ -0,0 +1,768 @@
+/*
+ * intel_scu_mip.c: Driver for the Intel scu mip and umip access
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Shijie Zhang (shijie.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/rpmsg.h>
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mip.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define DRIVER_NAME "intel_scu_mip"
+
+#define IPC_MIP_BASE     0xFFFD8000	/* sram base address for mip accessing*/
+#define IPC_MIP_MAX_ADDR 0x1000
+
+#define KOBJ_MIP_ATTR(_name, _mode, _show, _store) \
+	struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+static struct kobject *scu_mip_kobj;
+static struct rpmsg_instance *mip_instance;
+static struct scu_mip_platform_data *pdata;
+
+static void __iomem *intel_mip_base;
+#define SECTOR_SIZE			512
+#define UMIP_TOTAL_CHKSUM_ENTRY		126
+#define UMIP_HEADER_HEADROOM_SECTOR	1
+#define UMIP_HEADER_SECTOR		0
+#define UMIP_HEADER_CHKSUM_ADDR		7
+#define UMIP_START_CHKSUM_ADDR		8
+#define UMIP_TOTAL_HEADER_SECTOR_NO	2
+
+#define UMIP_BLKDEVICE			"mmcblk0boot0"
+
+static int xorblock(u32 *buf, u32 size)
+{
+	u32 cs = 0;
+
+	size >>= 2;
+	while (size--)
+		cs ^= *buf++;
+
+	return cs;
+}
+
+static u8 dword_to_byte_chksum(u32 dw)
+{
+	int n = 0;
+	u32 cs = dw;
+	for (n = 0; n < 3; n++) {
+		dw >>= 8;
+		cs ^= dw;
+	}
+
+	return (u8)cs;
+}
+
+static u8 calc_checksum(void *_buf, int size)
+{
+	int i;
+	u8 checksum = 0, *buf = (u8 *)_buf;
+
+	for (i = 0; i < size; i++)
+		checksum = checksum ^ (buf[i]);
+
+	return checksum;
+}
+
+static int mmcblk0boot0_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), UMIP_BLKDEVICE) == 0)
+		return 1;
+
+	return 0;
+}
+
+static struct block_device *get_emmc_bdev(void)
+{
+	struct block_device *bdev;
+	struct device *emmc_disk;
+
+	emmc_disk = class_find_device(&block_class, NULL, NULL,
+					mmcblk0boot0_match);
+	if (emmc_disk == 0) {
+		pr_err("emmc not found!\n");
+		return NULL;
+	}
+
+	/* partition 0 means raw disk */
+	bdev = bdget_disk(dev_to_disk(emmc_disk), 0);
+	if (bdev == NULL) {
+		dev_err(emmc_disk, "unable to get disk\n");
+		return NULL;
+	}
+
+	/* Note: this bdev ref will be freed after first
+	 * bdev_get/bdev_put cycle
+	 */
+
+	return bdev;
+}
+
+
+static int read_mip(u8 *data, int len, int offset, int issigned)
+{
+	int ret;
+	u32 sptr, dptr, cmd, cmdid, data_off;
+
+	dptr = offset;
+	sptr = (len + 3) / 4;
+
+	cmdid = issigned ? IPC_CMD_SMIP_RD : IPC_CMD_UMIP_RD;
+	cmd = 4 << 16 | cmdid << 12 | IPCMSG_MIP_ACCESS;
+
+	ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL,
+		(u32 *)&data_off, 0, 1, sptr, dptr);
+
+
+	if (!ret)
+		memcpy(data, intel_mip_base + data_off, len);
+
+	return ret;
+}
+
+int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned)
+{
+	int ret = 0;
+	Sector sect;
+	struct block_device *bdev;
+	char *buffer = NULL;
+	int *holderId = NULL;
+	int sect_no, remainder;
+
+	/* Only SMIP read for Cloverview is supported */
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+			&& (issigned != 1)) { /* CTP read UMIP from eMMC */
+
+		/* Opening the mmcblk0boot0 */
+		bdev = get_emmc_bdev();
+		if (bdev == NULL) {
+			pr_err("%s: get_emmc failed!\n", __func__);
+			return -ENODEV;
+		}
+
+		/* make sure the block device is open read only */
+		ret = blkdev_get(bdev, FMODE_READ, holderId);
+		if (ret < 0) {
+			pr_err("%s: blk_dev_get failed!\n", __func__);
+			return -ret;
+		}
+
+		/* Get sector number of where data located */
+		sect_no = offset / SECTOR_SIZE;
+		remainder = offset % SECTOR_SIZE;
+		buffer = read_dev_sector(bdev, sect_no +
+					UMIP_HEADER_HEADROOM_SECTOR, &sect);
+
+		/* Shouldn't need to access UMIP sector 0/1 */
+		if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) {
+			pr_err("invalid umip offset\n");
+			ret = -EINVAL;
+			goto bd_put;
+		} else if (data == NULL || buffer == NULL) {
+			pr_err("buffer is empty\n");
+			ret = -ENODEV;
+			goto bd_put;
+		} else if (len > (SECTOR_SIZE - remainder)) {
+			pr_err("not enough data to read\n");
+			ret = -EINVAL;
+			goto bd_put;
+		}
+
+		memcpy(data, buffer + remainder, len);
+bd_put:
+		if (buffer)
+			put_dev_sector(sect);
+
+		blkdev_put(bdev, FMODE_READ);
+		return ret;
+	} else {
+
+		if (!intel_mip_base)
+			return -ENODEV;
+
+		if (offset + len > IPC_MIP_MAX_ADDR)
+			return -EINVAL;
+
+		rpmsg_global_lock();
+		ret = read_mip(data, len, offset, issigned);
+		rpmsg_global_unlock();
+
+		return ret;
+	}
+}
+EXPORT_SYMBOL(intel_scu_ipc_read_mip);
+
+int get_smip_property_by_name(enum platform_prop pp)
+{
+	u8 data[SMIP_MAX_PROP_LEN];
+	int i, val, ret;
+	struct smip_platform_prop prop[SMIP_NUM_CONFIG_PROPS];
+
+	if (!pdata->smip_prop)
+		return -EINVAL;
+
+	for (i = 0; i < SMIP_NUM_CONFIG_PROPS; i++)
+		prop[i] = pdata->smip_prop[i];
+
+	/* Read the property requested by the caller */
+	ret = intel_scu_ipc_read_mip(data, prop[pp].len, prop[pp].offset, 1);
+	if (ret)
+		return ret;
+
+	/* Adjust the bytes according to the length and return the int */
+	val = data[0];
+	for (i = 1; i < prop[pp].len; i++)
+		val = val << 8 | data[i];
+
+	/* If the requested property is a bit field, return that bit value */
+	if (prop[pp].is_bit_field)
+		val &= prop[pp].mask;
+
+	return val;
+}
+EXPORT_SYMBOL(get_smip_property_by_name);
+
+int intel_scu_ipc_write_umip(u8 *data, int len, int offset)
+{
+	int i, ret = 0, offset_align;
+	int remainder, len_align = 0;
+	u32 dptr, sptr, cmd;
+	u8 cs, tbl_cs = 0, *buf = NULL;
+	Sector sect;
+	struct block_device *bdev;
+	char *buffer = NULL;
+	int *holderId = NULL;
+	int sect_no;
+	u8 checksum;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+
+		/* Opening the mmcblk0boot0 */
+		bdev = get_emmc_bdev();
+		if (bdev == NULL) {
+			pr_err("%s: get_emmc failed!\n", __func__);
+			return -ENODEV;
+		}
+
+		/* make sure the block device is open rw */
+		ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, holderId);
+		if (ret < 0) {
+			pr_err("%s: blk_dev_get failed!\n", __func__);
+			return -ret;
+		}
+
+		/* get memmap of the UMIP header */
+		sect_no = offset / SECTOR_SIZE;
+		remainder = offset % SECTOR_SIZE;
+		buffer = read_dev_sector(bdev, sect_no +
+					UMIP_HEADER_HEADROOM_SECTOR, &sect);
+
+		/* Shouldn't need to access UMIP sector 0/1 */
+		if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) {
+			pr_err("invalid umip offset\n");
+			ret = -EINVAL;
+			goto bd_put;
+		} else if (data == NULL || buffer == NULL) {
+			pr_err("buffer is empty\n");
+			ret = -ENODEV;
+			goto bd_put;
+		} else if (len > (SECTOR_SIZE - remainder)) {
+			pr_err("too much data to write\n");
+			ret = -EINVAL;
+			goto bd_put;
+		}
+
+		lock_page(sect.v);
+		memcpy(buffer + remainder, data, len);
+		checksum = calc_checksum(buffer, SECTOR_SIZE);
+
+		set_page_dirty(sect.v);
+		unlock_page(sect.v);
+		sync_blockdev(bdev);
+		put_dev_sector(sect);
+
+		/*
+		 * Updating the checksum, sector 0 (starting from UMIP
+		 * offset 0x08), we maintains 4 bytes for tracking each of
+		 * sector changes individually. For example, the dword at
+		 * offset 0x08 is used to checksum data integrity of sector
+		 * number 2, and so on so forth. It's worthnoting that only
+		 * the first byte in each 4 bytes stores checksum.
+		 * For detail, please check CTP FAS UMIP header definition
+		 */
+
+		buffer = read_dev_sector(bdev, UMIP_HEADER_SECTOR +
+					UMIP_HEADER_HEADROOM_SECTOR, &sect);
+
+		if (buffer == NULL) {
+			pr_err("buffer is empty\n");
+			ret = -ENODEV;
+			goto bd_put;
+		}
+
+		lock_page(sect.v);
+		memcpy(buffer + 4 * (sect_no - UMIP_TOTAL_HEADER_SECTOR_NO) +
+			UMIP_START_CHKSUM_ADDR, &checksum, 1/* one byte */);
+
+		/* Change UMIP prologue chksum to zero */
+		*(buffer + UMIP_HEADER_CHKSUM_ADDR) = 0;
+
+		for (i = 0; i < UMIP_TOTAL_CHKSUM_ENTRY; i++) {
+			tbl_cs ^= *(u8 *)(buffer + 4 * i +
+					UMIP_START_CHKSUM_ADDR);
+		}
+
+		/* Finish up with re-calcuating UMIP prologue checksum */
+		cs = dword_to_byte_chksum(xorblock((u32 *)buffer,
+							SECTOR_SIZE));
+
+		*(buffer + UMIP_HEADER_CHKSUM_ADDR) = tbl_cs ^ cs;
+
+		set_page_dirty(sect.v);
+		unlock_page(sect.v);
+		sync_blockdev(bdev);
+bd_put:
+		if (buffer)
+			put_dev_sector(sect);
+
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+		return ret;
+	} else {
+
+		if (!intel_mip_base)
+			return -ENODEV;
+
+		if (offset + len > IPC_MIP_MAX_ADDR)
+			return -EINVAL;
+
+		rpmsg_global_lock();
+
+		offset_align = offset & (~0x3);
+		len_align = (len + (offset - offset_align) + 3) & (~0x3);
+
+		if (len != len_align) {
+			buf = kzalloc(len_align, GFP_KERNEL);
+			if (!buf) {
+				pr_err("Alloc memory failed\n");
+				ret = -ENOMEM;
+				goto fail;
+			}
+			ret = read_mip(buf, len_align, offset_align, 0);
+			if (ret)
+				goto fail;
+			memcpy(buf + offset - offset_align, data, len);
+		} else {
+			buf = data;
+		}
+
+		dptr = offset_align;
+		sptr = len_align / 4;
+		cmd = IPC_CMD_UMIP_WR << 12 | IPCMSG_MIP_ACCESS;
+
+		memcpy(intel_mip_base, buf, len_align);
+
+		ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL,
+			NULL, 0, 0, sptr, dptr);
+
+fail:
+		if (buf && len_align != len)
+			kfree(buf);
+
+		rpmsg_global_unlock();
+
+		return ret;
+	}
+}
+EXPORT_SYMBOL(intel_scu_ipc_write_umip);
+
+
+#define MAX_DATA_NR 8
+#define MIP_CMD_LEN 11
+
+enum {
+	MIP_DBG_DATA,
+	MIP_DBG_LEN,
+	MIP_DBG_OFFSET,
+	MIP_DBG_ISSIGNED,
+	MIP_DBG_ERROR,
+};
+
+static u8 mip_data[MAX_DATA_NR];
+static int valid_data_nr;
+static int mip_len;
+static int mip_offset;
+static int mip_issigned;
+static int mip_dbg_error;
+static char mip_cmd[MIP_CMD_LEN];
+
+static ssize_t mip_generic_show(char *buf, int type, int *data)
+{
+	int i;
+	ssize_t ret = 0;
+
+	switch (type) {
+	case MIP_DBG_DATA:
+		for (i = 0; i < valid_data_nr; i++) {
+			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+					"data[%d]: %#x\n",
+					i, mip_data[i]);
+		}
+		break;
+	case MIP_DBG_LEN:
+		ret = snprintf(buf, PAGE_SIZE, "len: %d\n", *data);
+		break;
+	case MIP_DBG_OFFSET:
+		ret = snprintf(buf, PAGE_SIZE, "offset: %#x\n", *data);
+		break;
+	case MIP_DBG_ISSIGNED:
+		ret = snprintf(buf, PAGE_SIZE, "issigned: %d\n", *data);
+		break;
+	case MIP_DBG_ERROR:
+		ret = snprintf(buf, PAGE_SIZE, "error: %d\n", *data);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void mip_generic_store(const char *buf, int type, int *data)
+{
+	int i, ret;
+
+	if (type == MIP_DBG_DATA) {
+		u32 t[MAX_DATA_NR];
+
+		valid_data_nr = 0;
+		memset(mip_data, 0, sizeof(mip_data));
+
+		ret = sscanf(buf, "%x %x %x %x %x %x %x %x", &t[0], &t[1],
+				&t[2], &t[3], &t[4], &t[5], &t[6], &t[7]);
+		if (ret == 0 || ret > MAX_DATA_NR) {
+			mip_dbg_error = -EINVAL;
+			return;
+		} else {
+			for (i = 0; i < ret; i++)
+				mip_data[i] = (u8)t[i];
+			valid_data_nr = ret;
+		}
+	} else {
+		*data = 0;
+		switch (type) {
+		case MIP_DBG_OFFSET:
+			ret = sscanf(buf, "%x", data);
+			break;
+		case MIP_DBG_LEN:
+		case MIP_DBG_ISSIGNED:
+			ret = sscanf(buf, "%d", data);
+			break;
+		default:
+			ret = -1;
+			break;
+		}
+	}
+
+	if (ret)
+		mip_dbg_error = 0;
+	else
+		mip_dbg_error = -EINVAL;
+
+	return;
+}
+
+static ssize_t mip_data_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_DATA, NULL);
+}
+
+static ssize_t mip_data_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_DATA, NULL);
+	return size;
+}
+
+static ssize_t mip_len_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_LEN, &mip_len);
+}
+
+static ssize_t mip_len_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_LEN, &mip_len);
+	return size;
+}
+
+static ssize_t mip_offset_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_OFFSET, &mip_offset);
+}
+
+static ssize_t mip_offset_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_OFFSET, &mip_offset);
+	return size;
+}
+
+static ssize_t mip_issigned_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_ISSIGNED, &mip_issigned);
+}
+
+static ssize_t mip_issigned_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_ISSIGNED, &mip_issigned);
+	return size;
+}
+
+static ssize_t mip_error_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_ERROR, &mip_dbg_error);
+}
+
+static ssize_t mip_cmd_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+
+	int ret;
+
+	memset(mip_cmd, 0, sizeof(mip_cmd));
+
+	ret = sscanf(buf, "%10s", mip_cmd);
+	if (ret == 0) {
+		mip_dbg_error = -EINVAL;
+		goto end;
+	}
+
+	if (!strncmp("read_mip", mip_cmd, MIP_CMD_LEN)) {
+		memset(mip_data, 0, sizeof(mip_data));
+		ret = intel_scu_ipc_read_mip(mip_data, mip_len, mip_offset,
+				mip_issigned);
+		if (!ret)
+			valid_data_nr = mip_len;
+
+	} else if (!strncmp("write_umip", mip_cmd, MIP_CMD_LEN)) {
+		if (mip_len == valid_data_nr) {
+			ret = intel_scu_ipc_write_umip(mip_data, mip_len,
+					mip_offset);
+		} else
+			goto error;
+	} else
+		goto error;
+
+	if (ret)
+		goto error;
+	else
+		goto end;
+
+error:
+	mip_dbg_error = -EINVAL;
+
+end:
+	return size;
+}
+
+static KOBJ_MIP_ATTR(data, S_IRUSR|S_IWUSR, mip_data_show, mip_data_store);
+static KOBJ_MIP_ATTR(len, S_IRUSR|S_IWUSR, mip_len_show, mip_len_store);
+static KOBJ_MIP_ATTR(offset, S_IRUSR|S_IWUSR, mip_offset_show,
+		mip_offset_store);
+static KOBJ_MIP_ATTR(issigned, S_IRUSR|S_IWUSR, mip_issigned_show,
+		mip_issigned_store);
+static KOBJ_MIP_ATTR(cmd, S_IWUSR, NULL, mip_cmd_store);
+static KOBJ_MIP_ATTR(error, S_IRUSR, mip_error_show, NULL);
+
+static struct attribute *mip_attrs[] = {
+	&data_attr.attr,
+	&len_attr.attr,
+	&offset_attr.attr,
+	&issigned_attr.attr,
+	&cmd_attr.attr,
+	&error_attr.attr,
+	NULL,
+};
+
+static struct attribute_group mip_attr_group = {
+	.name = "mip_debug",
+	.attrs = mip_attrs,
+};
+
+static int scu_mip_probe(struct platform_device *pdev)
+{
+	if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_PENWELL) {
+		if (!pdev->dev.platform_data)
+			return -EINVAL;
+		pdata =
+		(struct scu_mip_platform_data *)pdev->dev.platform_data;
+	}
+	return 0;
+}
+
+static int scu_mip_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct platform_device_id scu_mip_table[] = {
+		{DRIVER_NAME, 1 },
+};
+
+static struct platform_driver scu_mip_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = scu_mip_probe,
+	.remove = scu_mip_remove,
+	.id_table = scu_mip_table,
+};
+
+static int __init scu_mip_init(void)
+{
+	return platform_driver_register(&scu_mip_driver);
+}
+
+static void scu_mip_exit(void)
+{
+	platform_driver_unregister(&scu_mip_driver);
+}
+
+static int mip_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed mip rpmsg device\n");
+
+	/* Allocate rpmsg instance for mip*/
+	ret = alloc_rpmsg_instance(rpdev, &mip_instance);
+	if (!mip_instance) {
+		dev_err(&rpdev->dev, "kzalloc mip instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(mip_instance);
+
+	/* Init mip base */
+	intel_mip_base = ioremap_nocache(IPC_MIP_BASE, IPC_MIP_MAX_ADDR);
+	if (!intel_mip_base) {
+		ret = -ENOMEM;
+		goto rpmsg_err;
+	}
+
+	/* Create debugfs for mip regs */
+	scu_mip_kobj = kobject_create_and_add(mip_attr_group.name,
+						kernel_kobj);
+
+	if (!scu_mip_kobj) {
+		ret = -ENOMEM;
+		goto mip_base_err;
+	}
+
+	ret = sysfs_create_group(scu_mip_kobj, &mip_attr_group);
+
+	if (ret) {
+		kobject_put(scu_mip_kobj);
+		goto mip_base_err;
+	}
+
+	ret = scu_mip_init();
+	goto out;
+mip_base_err:
+	iounmap(intel_mip_base);
+rpmsg_err:
+	free_rpmsg_instance(rpdev, &mip_instance);
+out:
+	return ret;
+}
+
+static void mip_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	scu_mip_exit();
+	iounmap(intel_mip_base);
+	free_rpmsg_instance(rpdev, &mip_instance);
+	sysfs_remove_group(scu_mip_kobj, &mip_attr_group);
+	kobject_put(scu_mip_kobj);
+	dev_info(&rpdev->dev, "Removed mip rpmsg device\n");
+}
+
+static void mip_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id mip_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_mip" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, mip_rpmsg_id_table);
+
+static struct rpmsg_driver mip_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= mip_rpmsg_id_table,
+	.probe		= mip_rpmsg_probe,
+	.callback	= mip_rpmsg_cb,
+	.remove		= mip_rpmsg_remove,
+};
+
+static int __init mip_rpmsg_init(void)
+{
+	if ((intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_PENWELL)
+		&& (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_CLOVERVIEW))
+		return -EINVAL;
+
+	return register_rpmsg_driver(&mip_rpmsg);
+}
+
+#ifdef MODULE
+module_init(mip_rpmsg_init);
+#else
+fs_initcall_sync(mip_rpmsg_init);
+#endif
+
+static void __exit mip_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&mip_rpmsg);
+}
+module_exit(mip_rpmsg_exit);
+
+MODULE_AUTHOR("Shijie Zhang <shijie.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel SCU MIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_pmic.c b/drivers/platform/x86/intel_scu_pmic.c
new file mode 100644
index 0000000..517010b
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_pmic.c
@@ -0,0 +1,477 @@
+/*
+ * pmic.c - Intel MSIC Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define IPC_WWBUF_SIZE    20
+#define IPC_RWBUF_SIZE    20
+
+static struct kobject *scu_pmic_kobj;
+static struct rpmsg_instance *pmic_instance;
+
+static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 cmd, u32 sub)
+{
+	int i, err, inlen = 0, outlen = 0;
+
+	u8 wbuf[IPC_WWBUF_SIZE] = {};
+	u8 rbuf[IPC_RWBUF_SIZE] = {};
+
+	memset(wbuf, 0, sizeof(wbuf));
+
+	for (i = 0; i < count; i++) {
+		wbuf[inlen++] = addr[i] & 0xff;
+		wbuf[inlen++] = (addr[i] >> 8) & 0xff;
+	}
+
+	if (sub == IPC_CMD_PCNTRL_R) {
+		outlen = count > 0 ? ((count - 1) / 4) + 1 : 0;
+	} else if (sub == IPC_CMD_PCNTRL_W) {
+		if (count == 3)
+			inlen += 2;
+
+		for (i = 0; i < count; i++)
+			wbuf[inlen++] = data[i] & 0xff;
+
+		if (count == 3)
+			inlen -= 2;
+
+		outlen = 0;
+	} else if (sub == IPC_CMD_PCNTRL_M) {
+		wbuf[inlen++] = data[0] & 0xff;
+		wbuf[inlen++] = data[1] & 0xff;
+		outlen = 0;
+	} else
+		pr_err("IPC command not supported\n");
+
+	err = rpmsg_send_command(pmic_instance, cmd, sub, wbuf,
+			(u32 *)rbuf, inlen, outlen);
+
+	if (sub == IPC_CMD_PCNTRL_R) {
+		for (i = 0; i < count; i++)
+			data[i] = rbuf[i];
+	}
+
+	return err;
+}
+
+int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+{
+	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+
+int intel_scu_ipc_iowrite8(u16 addr, u8 data)
+{
+	return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
+
+int intel_scu_ipc_iowrite32(u16 addr, u32 data)
+{
+	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
+	return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
+
+int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
+{
+	if (len < 1 || len > 8)
+		return -EINVAL;
+
+	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_readv);
+
+int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
+{
+	if (len < 1 || len > 4)
+		return -EINVAL;
+
+	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_writev);
+
+int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
+{
+	u8 data[2] = { bits, mask };
+	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_register);
+
+/* pmic sysfs for debug */
+
+#define MAX_PMIC_REG_NR 4
+#define PMIC_OPS_LEN 10
+
+enum {
+	PMIC_DBG_ADDR,
+	PMIC_DBG_BITS,
+	PMIC_DBG_DATA,
+	PMIC_DBG_MASK,
+};
+
+static char *pmic_msg_format[] = {
+	"addr[%d]: %#x\n",
+	"bits[%d]: %#x\n",
+	"data[%d]: %#x\n",
+	"mask[%d]: %#x\n",
+};
+
+static u16 pmic_reg_addr[MAX_PMIC_REG_NR];
+static u8 pmic_reg_bits[MAX_PMIC_REG_NR];
+static u8 pmic_reg_data[MAX_PMIC_REG_NR];
+static u8 pmic_reg_mask[MAX_PMIC_REG_NR];
+static int valid_addr_nr;
+static int valid_bits_nr;
+static int valid_data_nr;
+static int valid_mask_nr;
+static char pmic_ops[PMIC_OPS_LEN];
+
+static int pmic_dbg_error;
+
+static ssize_t pmic_generic_show(char *buf, int valid, u8 *array, int type)
+{
+	int i, buf_size;
+	ssize_t ret = 0;
+
+	switch (type) {
+	case PMIC_DBG_ADDR:
+		for (i = 0; i < valid; i++) {
+			buf_size = PAGE_SIZE - ret;
+			ret += snprintf(buf + ret, buf_size,
+					pmic_msg_format[type],
+					i, pmic_reg_addr[i]);
+		}
+		break;
+	case PMIC_DBG_BITS:
+	case PMIC_DBG_DATA:
+	case PMIC_DBG_MASK:
+		for (i = 0; i < valid; i++) {
+			buf_size = PAGE_SIZE - ret;
+			ret += snprintf(buf + ret, buf_size,
+					pmic_msg_format[type],
+					i, array[i]);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void pmic_generic_store(const char *buf, int *valid, u8 *array, int type)
+{
+	u32 tmp[MAX_PMIC_REG_NR];
+	int i, ret;
+
+	ret = sscanf(buf, "%x %x %x %x", &tmp[0], &tmp[1], &tmp[2], &tmp[3]);
+	if (ret == 0 || ret > MAX_PMIC_REG_NR) {
+		*valid = 0;
+		pmic_dbg_error = -EINVAL;
+		return;
+	}
+
+	*valid = ret;
+
+	switch (type) {
+	case PMIC_DBG_ADDR:
+		memset(pmic_reg_addr, 0, sizeof(pmic_reg_addr));
+		for (i = 0; i < ret; i++)
+			pmic_reg_addr[i] = (u16)tmp[i];
+		break;
+	case PMIC_DBG_BITS:
+	case PMIC_DBG_DATA:
+	case PMIC_DBG_MASK:
+		memset(array, 0, sizeof(*array) * MAX_PMIC_REG_NR);
+		for (i = 0; i < ret; i++)
+			array[i] = (u8)tmp[i];
+		break;
+	default:
+		break;
+	}
+}
+
+static ssize_t pmic_addr_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_addr_nr, NULL, PMIC_DBG_ADDR);
+}
+
+static ssize_t pmic_addr_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_addr_nr, NULL, PMIC_DBG_ADDR);
+	return size;
+}
+
+static ssize_t pmic_bits_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_bits_nr, pmic_reg_bits,
+			PMIC_DBG_BITS);
+}
+static ssize_t pmic_bits_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_bits_nr, pmic_reg_bits, PMIC_DBG_BITS);
+	return size;
+}
+
+static ssize_t pmic_data_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_data_nr, pmic_reg_data,
+			PMIC_DBG_DATA);
+}
+
+static ssize_t pmic_data_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_data_nr, pmic_reg_data, PMIC_DBG_DATA);
+	return size;
+}
+
+static ssize_t pmic_mask_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_mask_nr, pmic_reg_mask,
+			PMIC_DBG_MASK);
+}
+
+static ssize_t pmic_mask_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_mask_nr, pmic_reg_mask, PMIC_DBG_MASK);
+	return size;
+}
+
+static ssize_t pmic_ops_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	int i, ret;
+
+	memset(pmic_ops, 0, sizeof(pmic_ops));
+
+	ret = sscanf(buf, "%9s", pmic_ops);
+	if (ret == 0) {
+		pmic_dbg_error = -EINVAL;
+		goto end;
+	}
+
+	if (valid_addr_nr <= 0) {
+		pmic_dbg_error = -EINVAL;
+		goto end;
+	}
+
+	if (!strncmp("read", pmic_ops, PMIC_OPS_LEN)) {
+		valid_data_nr = valid_addr_nr;
+		for (i = 0; i < valid_addr_nr; i++) {
+			ret = intel_scu_ipc_ioread8(pmic_reg_addr[i],
+					&pmic_reg_data[i]);
+			if (ret) {
+				pmic_dbg_error = ret;
+				goto end;
+			}
+		}
+	} else if (!strncmp("write", pmic_ops, PMIC_OPS_LEN)) {
+		if (valid_addr_nr == valid_data_nr) {
+			for (i = 0; i < valid_addr_nr; i++) {
+				ret = intel_scu_ipc_iowrite8(pmic_reg_addr[i],
+						pmic_reg_data[i]);
+				if (ret) {
+					pmic_dbg_error = ret;
+					goto end;
+				}
+			}
+		} else {
+			pmic_dbg_error = -EINVAL;
+			goto end;
+		}
+	} else if (!strncmp("update", pmic_ops, PMIC_OPS_LEN)) {
+		if (valid_addr_nr == valid_mask_nr &&
+				valid_mask_nr == valid_bits_nr) {
+			for (i = 0; i < valid_addr_nr; i++) {
+				ret = intel_scu_ipc_update_register(
+						pmic_reg_addr[i],
+						pmic_reg_bits[i],
+						pmic_reg_mask[i]);
+				if (ret) {
+					pmic_dbg_error = ret;
+					goto end;
+				}
+			}
+		} else {
+			pmic_dbg_error = -EINVAL;
+			goto end;
+		}
+	} else {
+		pmic_dbg_error = -EINVAL;
+		goto end;
+	}
+
+		pmic_dbg_error = 0;
+
+end:
+	return size;
+}
+
+static ssize_t pmic_show_error(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", pmic_dbg_error);
+}
+
+static KOBJ_PMIC_ATTR(addr, S_IRUSR|S_IWUSR, pmic_addr_show, pmic_addr_store);
+static KOBJ_PMIC_ATTR(bits, S_IRUSR|S_IWUSR, pmic_bits_show, pmic_bits_store);
+static KOBJ_PMIC_ATTR(data, S_IRUSR|S_IWUSR, pmic_data_show, pmic_data_store);
+static KOBJ_PMIC_ATTR(mask, S_IRUSR|S_IWUSR, pmic_mask_show, pmic_mask_store);
+static KOBJ_PMIC_ATTR(ops, S_IWUSR, NULL, pmic_ops_store);
+static KOBJ_PMIC_ATTR(error, S_IRUSR, pmic_show_error, NULL);
+
+static struct attribute *pmic_attrs[] = {
+	&addr_attr.attr,
+	&bits_attr.attr,
+	&data_attr.attr,
+	&mask_attr.attr,
+	&ops_attr.attr,
+	&error_attr.attr,
+	NULL,
+};
+
+static struct attribute_group pmic_attr_group = {
+	.name = "pmic_debug",
+	.attrs = pmic_attrs,
+};
+
+static int pmic_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic rpmsg device\n");
+
+	/* Allocate rpmsg instance for pmic*/
+	ret = alloc_rpmsg_instance(rpdev, &pmic_instance);
+	if (!pmic_instance) {
+		dev_err(&rpdev->dev, "kzalloc pmic instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(pmic_instance);
+
+	/* Create debugfs for pmic regs */
+	scu_pmic_kobj = kobject_create_and_add(pmic_attr_group.name,
+						kernel_kobj);
+
+	if (!scu_pmic_kobj) {
+		ret = -ENOMEM;
+		goto rpmsg_err;
+	}
+
+	ret = sysfs_create_group(scu_pmic_kobj, &pmic_attr_group);
+
+	if (ret) {
+		kobject_put(scu_pmic_kobj);
+		goto rpmsg_err;
+	}
+
+	goto out;
+
+rpmsg_err:
+	free_rpmsg_instance(rpdev, &pmic_instance);
+out:
+	return ret;
+}
+
+static void pmic_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	free_rpmsg_instance(rpdev, &pmic_instance);
+	sysfs_remove_group(scu_pmic_kobj, &pmic_attr_group);
+	kobject_put(scu_pmic_kobj);
+	dev_info(&rpdev->dev, "Removed pmic rpmsg device\n");
+}
+
+static void pmic_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_pmic" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_rpmsg_id_table,
+	.probe		= pmic_rpmsg_probe,
+	.callback	= pmic_rpmsg_cb,
+	.remove		= pmic_rpmsg_remove,
+};
+
+static int __init pmic_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_rpmsg);
+}
+
+#ifdef MODULE
+module_init(pmic_rpmsg_init);
+#else
+fs_initcall_sync(pmic_rpmsg_init);
+#endif
+
+static void __exit pmic_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_rpmsg);
+}
+module_exit(pmic_rpmsg_exit);
+
+MODULE_AUTHOR("Bin Yang<bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel PMIC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7b8979c..6175f24 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -8,6 +8,36 @@
 
 if POWER_SUPPLY
 
+config POWER_SUPPLY_CHARGER
+        bool "Power Supply Charger"
+        select EXTCON
+        select POWER_SUPPLY_BATTID
+        help
+          Say Y here to enable the power supply charger framework. Charger
+          framework supports charging in a generic way. This allows the charger
+          drivers to keep the charging logic outside and the charger driver
+          just need to abstract the charger hardware
+
+config POWER_SUPPLY_CHARGING_ALGO_PSE
+        bool "PSE compliant charging algorithm"
+        help
+          Say Y here to select PSE compliant charging algorithm. As per PSE
+          standard the battery characteristics and thereby the charging rates
+          can vary on different temperature zones. This config will enable PSE
+          compliant charging algorithm with maintenance charging support. The
+          algorithm can be selected by the charging framework based on the type
+          of the battery charging profile.
+
+        depends on POWER_SUPPLY_CHARGER
+
+config POWER_SUPPLY_BATTID
+        bool "Power Supply Battery Identification Framework"
+        help
+         Say Y here to enable the power supply battery idnetification
+         framework. The framework would allow different battery identification
+         drivers to interface with power supply subsystem. Also it allows consumer
+         drivers to register for notification from the power_supply subsystem.
+
 config POWER_SUPPLY_DEBUG
 	bool "Power supply debug"
 	help
@@ -334,14 +364,6 @@
 	  You'll need this driver to charge batteries on e.g. Nokia
 	  RX-51/N900.
 
-config CHARGER_SMB347
-	tristate "Summit Microelectronics SMB347 Battery Charger"
-	depends on I2C
-	select REGMAP_I2C
-	help
-	  Say Y to include support for Summit Microelectronics SMB347
-	  Battery Charger.
-
 config CHARGER_TPS65090
 	tristate "TPS65090 battery charger driver"
 	depends on MFD_TPS65090
@@ -362,6 +384,16 @@
 	  Say Y to enable support for the battery and AC power in the
 	  Goldfish emulator.
 
+config DC_XPWR_BATTERY
+	bool "Dollar Cove(Xpower) PMIC Fuel Gauge driver"
+	help
+	  Say Y here to enable support for Dollar Cove(Xpower) PMIC Fuel Gauge.
+
+config DC_XPWR_CHARGER
+	bool "Dollar Cove(Xpower) PMIC Charger driver"
+	help
+	  Say Y here to enable support for Dollar Cove(Xpower) PMIC Charger.
+
 source "drivers/power/reset/Kconfig"
 
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 653bf6c..bbac97b 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,12 +1,15 @@
 ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
 
-power_supply-y				:= power_supply_core.o
-power_supply-$(CONFIG_SYSFS)		+= power_supply_sysfs.o
-power_supply-$(CONFIG_LEDS_TRIGGERS)	+= power_supply_leds.o
+power_supply-y					:= power_supply_core.o
+power_supply-$(CONFIG_SYSFS)			+= power_supply_sysfs.o
+power_supply-$(CONFIG_LEDS_TRIGGERS)		+= power_supply_leds.o
+power_supply-$(CONFIG_POWER_SUPPLY_CHARGER)	+= power_supply_charger.o
+power_supply-$(CONFIG_POWER_SUPPLY_BATTID)	+= battery_id.o
 
 obj-$(CONFIG_POWER_SUPPLY)	+= power_supply.o
 obj-$(CONFIG_GENERIC_ADC_BATTERY)	+= generic-adc-battery.o
 
+obj-$(CONFIG_POWER_SUPPLY_CHARGING_ALGO_PSE)	+= charging_algo_pse.o
 obj-$(CONFIG_PDA_POWER)		+= pda_power.o
 obj-$(CONFIG_APM_POWER)		+= apm_power.o
 obj-$(CONFIG_MAX8925_POWER)	+= max8925_power.o
@@ -51,6 +54,7 @@
 obj-$(CONFIG_CHARGER_MAX8998)	+= max8998_charger.o
 obj-$(CONFIG_CHARGER_BQ2415X)	+= bq2415x_charger.o
 obj-$(CONFIG_POWER_AVS)		+= avs/
-obj-$(CONFIG_CHARGER_SMB347)	+= smb347-charger.o
 obj-$(CONFIG_CHARGER_TPS65090)	+= tps65090-charger.o
 obj-$(CONFIG_POWER_RESET)	+= reset/
+obj-$(CONFIG_DC_XPWR_BATTERY)	+= dc_xpwr_battery.o
+obj-$(CONFIG_DC_XPWR_CHARGER)	+= dc_xpwr_charger.o
diff --git a/drivers/power/battery_id.c b/drivers/power/battery_id.c
new file mode 100644
index 0000000..13425b6
--- /dev/null
+++ b/drivers/power/battery_id.c
@@ -0,0 +1,68 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/power/battery_id.h>
+
+ATOMIC_NOTIFIER_HEAD(batt_id_notifier);
+
+static struct ps_batt_chg_prof *batt_property;
+static int batt_status;
+
+int batt_id_reg_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&batt_id_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(batt_id_reg_notifier);
+
+void batt_id_unreg_notifier(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&batt_id_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(batt_id_unreg_notifier);
+
+
+/**
+ * battery_prop_changed - Update properties when  battery connection status
+ *                        changes
+ * @battery_conn_stat : The current connection status of battery
+ * @batt_prop : Address of the ps_batt_chg_prof structure with the updated
+ *              values passed from the calling function
+ *
+ * Whenever the battery connection status changes this function will be called
+ * to indicate a change in the status and to update the status and value of
+ * properties
+ */
+void battery_prop_changed(int battery_conn_stat,
+			struct ps_batt_chg_prof *batt_prop)
+{
+	if (batt_status != battery_conn_stat) {
+		if (battery_conn_stat == POWER_SUPPLY_BATTERY_INSERTED)
+			batt_property = batt_prop;
+		else
+			batt_property = NULL;
+
+		batt_status = battery_conn_stat;
+	}
+
+	atomic_notifier_call_chain(&batt_id_notifier,
+			0, &(batt_property));
+
+}
+EXPORT_SYMBOL_GPL(battery_prop_changed);
+
+/**
+ * get_batt_prop - Get the battery connection status and updated properties
+ * @batt_prop : battery properties structure copied to this address
+ */
+int get_batt_prop(struct ps_batt_chg_prof *batt_prop)
+{
+	if (batt_property)
+		memcpy(batt_prop, batt_property,
+			sizeof(struct ps_batt_chg_prof));
+	else
+		return -ENOMEM;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(get_batt_prop);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index fefc39f..98de1dd 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -450,7 +450,7 @@
 	strncpy(env_str, event, UEVENT_BUF_SIZE);
 	kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
 
-	dev_info(cm->dev, event);
+	dev_info(cm->dev, "%s", event);
 }
 
 /**
diff --git a/drivers/power/charging_algo_pse.c b/drivers/power/charging_algo_pse.c
new file mode 100644
index 0000000..d84d9cd
--- /dev/null
+++ b/drivers/power/charging_algo_pse.c
@@ -0,0 +1,191 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/power/battery_id.h>
+#include "power_supply.h"
+#include "power_supply_charger.h"
+
+/* 98% of CV is considered as voltage to detect Full */
+#define FULL_CV_MIN 98
+
+/* Offset to exit from maintenance charging. In maintenance charging
+*  if the volatge is less than the (maintenance_lower_threshold -
+*  MAINT_EXIT_OFFSET) then system can switch to normal charging
+*/
+#define MAINT_EXIT_OFFSET 50  /* mV */
+
+static int get_tempzone(struct ps_pse_mod_prof *pse_mod_bprof,
+		int temp)
+{
+
+	int i = 0;
+	int temp_range_cnt = min_t(u16, pse_mod_bprof->temp_mon_ranges,
+					BATT_TEMP_NR_RNG);
+
+	if ((temp < pse_mod_bprof->temp_low_lim) ||
+		(temp > pse_mod_bprof->temp_mon_range[0].temp_up_lim))
+		return -EINVAL;
+
+	for (i = 0; i < temp_range_cnt; ++i)
+		if (temp > pse_mod_bprof->temp_mon_range[i].temp_up_lim)
+			break;
+	return i-1;
+}
+
+static inline bool __is_battery_full
+	(long volt, long cur, long iterm, unsigned long cv)
+{
+	pr_devel("%s:current=%ld pse_mod_bprof->chrg_term_ma =%ld voltage_now=%ld full_cond=%lu",
+			__func__, cur, iterm, volt * 100, (FULL_CV_MIN * cv));
+
+	return ((cur > 0) && (cur <= iterm) &&
+	((volt * 100)  >= (FULL_CV_MIN * cv)));
+
+}
+
+static inline bool is_battery_full(struct batt_props bat_prop,
+		struct ps_pse_mod_prof *pse_mod_bprof, unsigned long cv)
+{
+
+	int i;
+	/* Software full detection. Check the battery charge current to detect
+	*  battery Full. The voltage also verified to avoid false charge
+	*  full detection.
+	*/
+	pr_devel("%s:current=%ld pse_mod_bprof->chrg_term_ma =%d bat_prop.voltage_now=%ld full_cond=%ld",
+		__func__, bat_prop.current_now, (pse_mod_bprof->chrg_term_ma),
+		bat_prop.voltage_now * 100, (FULL_CV_MIN * cv));
+
+	for (i = (MAX_CUR_VOLT_SAMPLES - 1); i >= 0; --i) {
+
+		if (!(__is_battery_full(bat_prop.voltage_now_cache[i],
+				bat_prop.current_now_cache[i],
+				pse_mod_bprof->chrg_term_ma, cv)))
+			return false;
+	}
+
+	return true;
+}
+
+static int  pse_get_bat_thresholds(struct ps_batt_chg_prof  bprof,
+			struct psy_batt_thresholds *bat_thresh)
+{
+	struct ps_pse_mod_prof *pse_mod_bprof =
+			(struct ps_pse_mod_prof *) bprof.batt_prof;
+
+	if ((bprof.chrg_prof_type != PSE_MOD_CHRG_PROF) || (!pse_mod_bprof))
+		return -EINVAL;
+
+	bat_thresh->iterm = pse_mod_bprof->chrg_term_ma;
+	bat_thresh->temp_min = pse_mod_bprof->temp_low_lim;
+	bat_thresh->temp_max = pse_mod_bprof->temp_mon_range[0].temp_up_lim;
+
+	return 0;
+}
+
+static enum psy_algo_stat pse_get_next_cc_cv(struct batt_props bat_prop,
+	struct ps_batt_chg_prof  bprof, unsigned long *cc, unsigned long *cv)
+{
+	int tzone;
+	struct ps_pse_mod_prof *pse_mod_bprof =
+			(struct ps_pse_mod_prof *) bprof.batt_prof;
+	enum psy_algo_stat algo_stat = bat_prop.algo_stat;
+	int maint_exit_volt;
+
+	*cc = *cv = 0;
+
+	/* If STATUS is discharging, assume that charger is not connected.
+	*  If charger is not connected, no need to take any action.
+	*  If charge profile type is not PSE_MOD_CHRG_PROF or the charge profile
+	*  is not present, no need to take any action.
+	*/
+
+	pr_devel("%s:battery status = %ld algo_status=%d\n",
+			__func__, bat_prop.status, algo_stat);
+
+	if ((bprof.chrg_prof_type != PSE_MOD_CHRG_PROF) || (!pse_mod_bprof))
+		return PSY_ALGO_STAT_NOT_CHARGE;
+
+	tzone = get_tempzone(pse_mod_bprof, bat_prop.temperature);
+
+	if (tzone < 0)
+		return PSY_ALGO_STAT_NOT_CHARGE;
+
+	/* Change the algo status to not charging, if battery is
+	*  not really charging or less than maintenance exit threshold.
+	*  This way algorithm can switch to normal
+	*  charging if current status is full/maintenace
+	*/
+	maint_exit_volt = pse_mod_bprof->
+			temp_mon_range[tzone].maint_chrg_vol_ll -
+				MAINT_EXIT_OFFSET;
+
+	if ((bat_prop.status == POWER_SUPPLY_STATUS_DISCHARGING) ||
+		(bat_prop.status == POWER_SUPPLY_STATUS_NOT_CHARGING) ||
+			bat_prop.voltage_now < maint_exit_volt) {
+		algo_stat = PSY_ALGO_STAT_NOT_CHARGE;
+	}
+
+	/* read cc and cv based on temperature and algorithm status*/
+	if (algo_stat == PSY_ALGO_STAT_FULL ||
+			algo_stat == PSY_ALGO_STAT_MAINT) {
+
+		/* if status is full and voltage is lower than maintenance lower
+		*  threshold change status to maintenenance
+		*/
+
+		if (algo_stat == PSY_ALGO_STAT_FULL) {
+			*cv = pse_mod_bprof->temp_mon_range
+					[tzone].full_chrg_vol;
+			*cc = pse_mod_bprof->temp_mon_range
+					[tzone].full_chrg_cur;
+		}
+
+		if (algo_stat == PSY_ALGO_STAT_FULL && (bat_prop.voltage_now <=
+			pse_mod_bprof->temp_mon_range[tzone].maint_chrg_vol_ll))
+				algo_stat = PSY_ALGO_STAT_MAINT;
+
+		/* Read maintenance CC and CV */
+		if (algo_stat == PSY_ALGO_STAT_MAINT) {
+			*cv = pse_mod_bprof->temp_mon_range
+					[tzone].maint_chrg_vol_ul;
+			*cc = pse_mod_bprof->temp_mon_range
+					[tzone].maint_chrg_cur;
+		}
+	} else {
+		*cv = pse_mod_bprof->temp_mon_range[tzone].full_chrg_vol;
+		*cc = pse_mod_bprof->temp_mon_range[tzone].full_chrg_cur;
+		algo_stat = PSY_ALGO_STAT_CHARGE;
+	}
+
+	if (bat_prop.voltage_now > *cv) {
+		algo_stat = PSY_ALGO_STAT_NOT_CHARGE;
+		return algo_stat;
+	}
+
+	if (algo_stat == PSY_ALGO_STAT_FULL)
+		return algo_stat;
+
+	if (is_battery_full(bat_prop, pse_mod_bprof, *cv))
+		algo_stat = PSY_ALGO_STAT_FULL;
+
+	return algo_stat;
+}
+
+static int __init pse_algo_init(void)
+{
+	struct charging_algo pse_algo;
+	pse_algo.chrg_prof_type = PSE_MOD_CHRG_PROF;
+	pse_algo.name = "pse_algo";
+	pse_algo.get_next_cc_cv = pse_get_next_cc_cv;
+	pse_algo.get_batt_thresholds = pse_get_bat_thresholds;
+	power_supply_register_charging_algo(&pse_algo);
+	return 0;
+}
+
+module_init(pse_algo_init);
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index d664ef5..4d276d0 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -1,5 +1,5 @@
 /*
- * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ * max17042_battery.c - Fuel gauge driver for Maxim 17042 / 8966 / 8997
  *  Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
  *
  * Copyright (C) 2011 Samsung Electronics
@@ -22,102 +22,864 @@
  * This driver is based on max17040_battery.c
  */
 
-#include <linux/init.h>
 #include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/pm.h>
-#include <linux/mod_devicetable.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
 #include <linux/power_supply.h>
 #include <linux/power/max17042_battery.h>
-#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/miscdevice.h>
+#include <linux/atomic.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
 
 /* Status register bits */
-#define STATUS_POR_BIT         (1 << 1)
-#define STATUS_BST_BIT         (1 << 3)
-#define STATUS_VMN_BIT         (1 << 8)
-#define STATUS_TMN_BIT         (1 << 9)
-#define STATUS_SMN_BIT         (1 << 10)
-#define STATUS_BI_BIT          (1 << 11)
-#define STATUS_VMX_BIT         (1 << 12)
-#define STATUS_TMX_BIT         (1 << 13)
-#define STATUS_SMX_BIT         (1 << 14)
-#define STATUS_BR_BIT          (1 << 15)
+#define STATUS_MASK		0xFF0A
+#define STATUS_POR_BIT		(1 << 1)
+#define STATUS_BST_BIT		(1 << 3)
+#define STATUS_VMN_BIT		(1 << 8)
+#define STATUS_TMN_BIT		(1 << 9)
+#define STATUS_SMN_BIT		(1 << 10)
+#define STATUS_BI_BIT		(1 << 11)
+#define STATUS_VMX_BIT		(1 << 12)
+#define STATUS_TMX_BIT		(1 << 13)
+#define STATUS_SMX_BIT		(1 << 14)
+#define STATUS_BR_BIT		(1 << 15)
 
-/* Interrupt mask bits */
+#define MAX17042_IC_VERSION	0x0092
+#define MAX17050_IC_VERSION	0x00AC
+
+/* Vmax disabled, Vmin disabled */
+#define VOLT_DEF_MAX_MIN_THRLD  0xFF00
+
+/* Vmax disabled, Vmin set to 3300mV */
+#define VOLT_MIN_THRLD_ENBL	0xFFA5
+
+/* Tmax disabled, Tmin disabled */
+#define TEMP_DEF_MAX_MIN_THRLD  0x7F80
+
+/* SoCmax disabled, SoCmin can be set to 15%, 4% and 1%.
+ * INT will trigger when the thresholds are voilated.
+ */
+#define SOC_DEF_MAX_MIN1_THRLD	0xFF0E
+#define SOC_DEF_MAX_MIN2_THRLD	0xFF04
+#define SOC_DEF_MAX_MIN3_THRLD	0xFF01
+
+/* SOC threshold for 1% interrupt */
+#define SOC_INTR_S0_THR		1
+
+#define MISCCFG_CONFIG_REPSOC	0x0000
+#define MISCCFG_CONFIG_VFSOC	0x0003
+
+/* low battery notification warning level */
+#define SOC_WARNING_LEVEL1	14
+#define SOC_WARNING_LEVEL2	4
+#define SOC_SHUTDOWN_LEVEL	1
+
+#define CONFIG_BER_BIT_ENBL	(1 << 0)
+#define CONFIG_BEI_BIT_ENBL	(1 << 1)
 #define CONFIG_ALRT_BIT_ENBL	(1 << 2)
-#define STATUS_INTR_SOCMIN_BIT	(1 << 10)
-#define STATUS_INTR_SOCMAX_BIT	(1 << 14)
+#define CONFIG_VSTICKY_BIT_SET	(1 << 12)
+#define CONFIG_TSTICKY_BIT_SET	(1 << 13)
+#define CONFIG_SSTICKY_BIT_SET	(1 << 14)
+#define CONFIG_ALP_BIT_ENBL	(1 << 11)
+#define CONFIG_TEX_BIT_ENBL	(1 << 8)
 
 #define VFSOC0_LOCK		0x0000
 #define VFSOC0_UNLOCK		0x0080
-#define MODEL_UNLOCK1	0X0059
-#define MODEL_UNLOCK2	0X00C4
-#define MODEL_LOCK1		0X0000
-#define MODEL_LOCK2		0X0000
+#define FG_MODEL_UNLOCK1	0X0059
+#define FG_MODEL_UNLOCK2	0X00C4
+#define FG_MODEL_LOCK1		0X0000
+#define FG_MODEL_LOCK2		0X0000
 
 #define dQ_ACC_DIV	0x4
 #define dP_ACC_100	0x1900
 #define dP_ACC_200	0x3200
 
-#define MAX17042_IC_VERSION	0x0092
-#define MAX17047_IC_VERSION	0x00AC	/* same for max17050 */
+#define	NTC_47K_TGAIN	0xE4E4
+#define	NTC_47K_TOFF	0x2F1D
+
+#define BATT_CHRG_FULL_DES		1550000
+#define MAX17042_VOLT_CONV_FCTR		625
+#define MAX17042_CURR_CONV_FCTR		156
+#define MAX17042_CHRG_CONV_FCTR		500
+
+#define MAX17042_TEMP_SIGN_MASK		0x8000
+
+#define MAX17042_MAX_MEM	(0xFF + 1)
+
+#define MAX17042_MODEL_MUL_FACTOR(a, b)	((a * 100) / b)
+#define MAX17042_MODEL_DIV_FACTOR(a, b)	((a * b) / 100)
+
+#define CONSTANT_TEMP_IN_POWER_SUPPLY	350
+#define POWER_SUPPLY_VOLT_MIN_THRESHOLD	3500000
+#define BATTERY_VOLT_MIN_THRESHOLD	3400000
+
+#define CYCLES_ROLLOVER_CUTOFF		0x00FF
+#define MAX17042_DEF_RO_LRNCFG		0x0076
+
+#define MAX17042_CGAIN_DISABLE		0x0000
+#define MAX17042_EN_VOLT_FG		0x0007
+#define MAX17042_CFG_INTR_SOCVF		0x0003
+
+/* Vempty value set to 2500mV */
+#define MAX17042_DEF_VEMPTY_VAL		0x7D5A
+
+#define MAX17042_SIGN_INDICATOR		0x8000
+
+#define SHUTDOWN_DEF_FG_MASK_BIT	(1 << 0)
+#define SHUTDOWN_OCV_MASK_BIT		(1 << 1)
+#define SHUTDOWN_LOWBATT_MASK_BIT	(1 << 2)
+
+#define BYTE_VALUE			1
+#define WORD_VALUE			0
+
+/* Time interval to write temperature values from host, if needed (in milliseconds) */
+#define TEMP_WRITE_INTERVAL		120000
+enum max17042_register {
+	MAX17042_STATUS		= 0x00,
+	MAX17042_VALRT_Th	= 0x01,
+	MAX17042_TALRT_Th	= 0x02,
+	MAX17042_SALRT_Th	= 0x03,
+	MAX17042_AtRate		= 0x04,
+	MAX17042_RepCap		= 0x05,
+	MAX17042_RepSOC		= 0x06,
+	MAX17042_Age		= 0x07,
+	MAX17042_TEMP		= 0x08,
+	MAX17042_VCELL		= 0x09,
+	MAX17042_Current	= 0x0A,
+	MAX17042_AvgCurrent	= 0x0B,
+	MAX17042_Qresidual	= 0x0C,
+	MAX17042_SOC		= 0x0D,
+	MAX17042_AvSOC		= 0x0E,
+	MAX17042_RemCap		= 0x0F,
+	MAX17042_FullCAP	= 0x10,
+	MAX17042_TTE		= 0x11,
+	MAX17042_V_empty	= 0x12,
+
+	MAX17042_RSLOW		= 0x14,
+
+	MAX17042_AvgTA		= 0x16,
+	MAX17042_Cycles		= 0x17,
+	MAX17042_DesignCap	= 0x18,
+	MAX17042_AvgVCELL	= 0x19,
+	MAX17042_MinMaxTemp	= 0x1A,
+	MAX17042_MinMaxVolt	= 0x1B,
+	MAX17042_MinMaxCurr	= 0x1C,
+	MAX17042_CONFIG		= 0x1D,
+	MAX17042_ICHGTerm	= 0x1E,
+	MAX17042_AvCap		= 0x1F,
+	MAX17042_ManName	= 0x20,
+	MAX17042_DevName	= 0x21,
+	MAX17042_DevChem	= 0x22,
+	MAX17042_FullCAPNom	= 0x23,
+
+	MAX17042_TempNom	= 0x24,
+	MAX17042_TempCold	= 0x25,
+	MAX17042_TempHot	= 0x26,
+	MAX17042_AIN		= 0x27,
+	MAX17042_LearnCFG	= 0x28,
+	MAX17042_SHFTCFG	= 0x29,
+	MAX17042_RelaxCFG	= 0x2A,
+	MAX17042_MiscCFG	= 0x2B,
+	MAX17042_TGAIN		= 0x2C,
+	MAx17042_TOFF		= 0x2D,
+	MAX17042_CGAIN		= 0x2E,
+	MAX17042_COFF		= 0x2F,
+
+	MAX17042_SOCempty	= 0x33,
+	MAX17042_T_empty	= 0x34,
+	MAX17042_FullCAP0	= 0x35,
+
+	MAX17042_LAvg_empty	= 0x36,
+	MAX17042_FCTC		= 0x37,
+	MAX17042_RCOMP0		= 0x38,
+	MAX17042_TempCo		= 0x39,
+	MAX17042_ETC		= 0x3A,
+	MAX17042_K_empty0	= 0x3B,
+	MAX17042_TaskPeriod	= 0x3C,
+	MAX17042_FSTAT		= 0x3D,
+
+	MAX17042_SHDNTIMER	= 0x3F,
+
+	MAX17042_dQacc		= 0x45,
+	MAX17042_dPacc		= 0x46,
+	MAX17042_VFSOC0         = 0x48,
+	MAX17042_VFRemCap	= 0x4A,
+
+	MAX17042_QH		= 0x4D,
+	MAX17042_QL		= 0x4E,
+
+	MAX17042_VFSOC0Enable	= 0x60,
+	MAX17042_MLOCKReg1	= 0x62,
+	MAX17042_MLOCKReg2	= 0x63,
+	MAX17042_MODELChrTbl	= 0x80,
+	MAX17042_OCV		= 0xEE,
+	MAX17042_OCVInternal	= 0xFB,
+	MAX17042_VFSOC		= 0xFF,
+
+};
+
+/* Registers specific to max17047/50 */
+enum max17050_register {
+	MAX17050_QRTbl00	= 0x12,
+	MAX17050_FullSOCThr	= 0x13,
+	MAX17050_QRTbl10	= 0x22,
+	MAX17050_QRTbl20	= 0x32,
+	MAX17050_V_empty	= 0x3A,
+	MAX17050_QRTbl30	= 0x42,
+};
+
+#define DRV_NAME "max170xx_battery"
+
+enum max170xx_chip_type {MAX17042, MAX17050};
+
+/* No of times we should retry on -EAGAIN error */
+#define NR_RETRY_CNT	3
+
+/* No of times we should process interrupt reasons @irq handler */
+/* Probably all values >1 are ok, Normally It just goes once thought
+ * all bits and everything is handled. Also chips seems to limit
+ * interrupts to ~3/s, so we have ~300ms to process, until we will
+ * miss interrupt. What ever value it's, it doesn't have any
+ * performance impact. */
+#define NR_RETRY_INT	3
+
+/* No of times we should reset I2C lines */
+#define NR_I2C_RESET_CNT	8
+
+#define VBATT_MAX 4200000	/* 4200mV */
+#define VBATT_MIN 3400000	/* 3400mV */
+
+#define VBATT_MIN_OFFSET	100 /* 100mV from VMMIN */
+#define VBATT_MAX_OFFSET	50 /* 50mV from VMAX */
+#define VALERT_VOLT_OFFSET	20 /* each bit corresponds to 20mV */
+
+#if 0
+/* default fuel gauge cell data for debug purpose only */
+static uint16_t cell_char_tbl[] = {
+	/* Data to be written from 0x80h */
+	0xA250, 0xB720, 0xB800, 0xB880, 0xB920, 0xBA00, 0xBA60, 0xBBF0,
+	0xBCF0, 0xBE50, 0xC060, 0xC2D0, 0xC520, 0xC750, 0xCA00, 0xD090,
+	/* Data to be written from 0x90h */
+	0x0120, 0x1C80, 0x0470, 0x0440, 0x0100, 0x5500, 0x0960, 0x2410,
+	0x2250, 0x15F0, 0x0BD0, 0x0D00, 0x0B00, 0x0BB0, 0x08A0, 0x08A0,
+	/* Data to be written from 0xA0h */
+	0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100,
+	0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100, 0x0100,
+};
+#endif
 
 struct max17042_chip {
 	struct i2c_client *client;
-	struct power_supply battery;
 	enum max170xx_chip_type chip_type;
+	struct power_supply battery;
 	struct max17042_platform_data *pdata;
-	struct work_struct work;
-	int    init_complete;
+	struct mutex batt_lock;
+	struct mutex init_lock;
+
+	int present;
+	int status;
+	int health;
+	int technology;
+	int charge_full_des;
+
+	struct work_struct	init_worker;
+	struct work_struct	evt_worker;
+	struct delayed_work	temp_worker;
+
+	bool plat_rebooting;
+	/*
+	 * user space can disable default shutdown
+	 * methods set by platform.
+	 */
+	int	disable_shdwn_methods;
+
+	/*
+	 * user space can set this variable to report constant
+	 * batery temperature for conformence testing.
+	 */
+	bool	enable_fake_temp;
+	int	extra_resv_cap;
+	int	voltage_max;
+	int	model_algo_factor;
+};
+
+/* Sysfs entry for disable shutdown methods from user space */
+static ssize_t override_shutdown_methods(struct device *device,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count);
+static ssize_t get_shutdown_methods(struct device *device,
+			       struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(disable_shutdown_methods, S_IRUGO | S_IWUSR,
+	get_shutdown_methods, override_shutdown_methods);
+
+/* Sysfs entry to enter shutdown voltage from user space */
+static int shutdown_volt;
+static ssize_t set_shutdown_voltage(struct device *device,
+				struct device_attribute *attr, const char *buf,
+				size_t count);
+static ssize_t get_shutdown_voltage_set_by_user(struct device *device,
+				struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(shutdown_voltage, S_IRUGO | S_IWUSR,
+	get_shutdown_voltage_set_by_user, set_shutdown_voltage);
+
+/*
+ * Sysfs entry to report fake battery temperature. This
+ * interface is needed to support conformence testing
+ */
+static ssize_t set_fake_temp_enable(struct device *device,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count);
+static ssize_t get_fake_temp_enable(struct device *device,
+			       struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(enable_fake_temp, S_IRUGO | S_IWUSR,
+	get_fake_temp_enable, set_fake_temp_enable);
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *max17042_dbgfs_root;
+static char max17042_dbg_regs[MAX17042_MAX_MEM][4];
+#endif
+
+static int max17042_reboot_callback(struct notifier_block *nfb,
+					unsigned long event, void *data);
+
+static struct notifier_block max17042_reboot_notifier_block = {
+	.notifier_call = max17042_reboot_callback,
+	.priority = 0,
+};
+
+static bool is_battery_online(struct max17042_chip *chip);
+static void configure_interrupts(struct max17042_chip *chip);
+/* Set SOC threshold in S3 state */
+static void set_soc_intr_thresholds_s3(struct max17042_chip *chip);
+/* Set SOC threshold to offset percentage in S0 state */
+static void set_soc_intr_thresholds_s0(struct max17042_chip *chip, int offset);
+static void save_runtime_params(struct max17042_chip *chip);
+static void set_chip_config(struct max17042_chip *chip);
+static u16 fg_vfSoc;
+static bool fake_batt_full;
+static struct max17042_config_data *fg_conf_data;
+static struct i2c_client *max17042_client;
+
+
+atomic_t fopen_count;
+
+static void update_runtime_params(struct max17042_chip *chip);
+static int read_batt_pack_temp(struct max17042_chip *chip, int *temp);
+
+/* Voltage-Capacity lookup function to get
+ * capacity value against a given voltage */
+static unsigned int voltage_capacity_lookup(unsigned int val)
+{
+	unsigned int max = VBATT_MAX / 1000;
+	unsigned int min = VBATT_MIN / 1000;
+	unsigned int capacity;
+	unsigned int total_diff;
+	unsigned int val_diff;
+
+	if (val > max)
+		return 100;
+
+	if (val < min)
+		return 0;
+
+	total_diff = max - min;
+	val_diff = max - val;
+
+	capacity = (total_diff - val_diff) * 100 / total_diff;
+
+	return capacity;
+}
+
+static int max17042_property_is_privileged_read(struct power_supply *psy,
+						enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+		return 1;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int dev_file_open(struct inode *i, struct file *f)
+{
+	if (atomic_read(&fopen_count))
+		return -EBUSY;
+	atomic_inc(&fopen_count);
+	return 0;
+}
+
+static int dev_file_close(struct inode *i, struct file *f)
+{
+	atomic_dec(&fopen_count);
+	return 0;
+}
+
+static ssize_t dev_file_read(struct file *f, char __user *buf,
+			size_t len, loff_t *off)
+{
+	struct max17042_chip *chip = i2c_get_clientdata(max17042_client);
+	int ret;
+
+	if (!chip->pdata->is_init_done) {
+		dev_err(&max17042_client->dev,
+			"MAX17042 is not initialized.\n");
+		return -ECANCELED;
+	}
+
+	update_runtime_params(chip);
+
+	if (sizeof(*fg_conf_data) > len)
+		return -EINVAL;
+
+	ret = copy_to_user(buf, fg_conf_data, sizeof(*fg_conf_data));
+	if (!ret)
+		return sizeof(*fg_conf_data);
+
+	return -EINVAL;
+}
+
+static ssize_t dev_file_write(struct file *f, const char __user *buf,
+			size_t len, loff_t *off)
+{
+	struct max17042_chip *chip = i2c_get_clientdata(max17042_client);
+
+	if (chip->pdata->is_init_done) {
+		dev_err(&max17042_client->dev,
+			"Already initialized.So ignoring new set of data\n");
+		return -ECANCELED;
+	}
+
+	if (len > sizeof(*fg_conf_data))
+		return -EINVAL;
+
+	if (copy_from_user(fg_conf_data, buf, len))
+		return -EINVAL;
+
+	set_chip_config(chip);
+
+	if (chip->pdata->is_init_done) {
+		dev_info(&max17042_client->dev,
+				"MAX17042 initialized successfully\n");
+		fg_conf_data->config_init = 0x1;
+	}
+
+	/* Return no. of bytes written */
+	return len;
+}
+
+static const struct file_operations helper_fops = {
+	.owner = THIS_MODULE,
+	.open = &dev_file_open,
+	.release = &dev_file_close,
+	.read = &dev_file_read,
+	.write = &dev_file_write,
+};
+
+static struct miscdevice fg_helper = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "max170xx",
+	.fops = &helper_fops,
+};
+
+static enum power_supply_property max17042_battery_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
+	POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
 static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
 {
-	int ret = i2c_smbus_write_word_data(client, reg, value);
+	int ret, i;
+	struct max17042_chip *chip = i2c_get_clientdata(client);
+
+	/* if the shutdown or reboot sequence started
+	 * then block the access to maxim registers as chip
+	 * cannot be recovered from broken i2c transactions
+	 */
+	if (chip->plat_rebooting) {
+		dev_warn(&client->dev, "rebooting is in progress\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < NR_RETRY_CNT; i++) {
+		ret = i2c_smbus_write_word_data(client, reg, value);
+		if (ret == -EAGAIN || ret == -ETIMEDOUT)
+			continue;
+		else
+			break;
+	}
 
 	if (ret < 0)
-		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+		dev_err(&client->dev, "I2C SMbus Write error:%d\n", ret);
 
 	return ret;
 }
 
 static int max17042_read_reg(struct i2c_client *client, u8 reg)
 {
-	int ret = i2c_smbus_read_word_data(client, reg);
+	int ret, i;
+	struct max17042_chip *chip = i2c_get_clientdata(client);
+
+	/* if the shutdown or reboot sequence started
+	 * then block the access to maxim registers as chip
+	 * cannot be recovered from broken i2c transactions
+	 */
+	if (chip->plat_rebooting) {
+		dev_warn(&client->dev, "rebooting is in progress\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < NR_RETRY_CNT; i++) {
+		ret = i2c_smbus_read_word_data(client, reg);
+		if (ret == -EAGAIN || ret == -ETIMEDOUT)
+			continue;
+		else
+			break;
+	}
 
 	if (ret < 0)
-		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+		dev_err(&client->dev, "I2C SMbus Read error:%d\n", ret);
 
 	return ret;
 }
 
-static void max17042_set_reg(struct i2c_client *client,
-			     struct max17042_reg_data *data, int size)
+/*
+ * max17042 chip has few registers which could get modified by the
+ * chip as well during its fuel gauge learning process. So we need
+ * to do a write verify on those registers and if the write fails
+ * then we have to retry.
+ */
+static int max17042_write_verify_reg(struct i2c_client *client,
+						u8 reg, u16 value)
 {
-	int i;
+	int ret, i;
 
-	for (i = 0; i < size; i++)
-		max17042_write_reg(client, data[i].addr, data[i].data);
+	for (i = 0; i < NR_RETRY_CNT; i++) {
+		/* Write the value to register */
+		ret = max17042_write_reg(client, reg, value);
+		if (ret < 0)
+			continue;
+		/* Read the value from register */
+		ret = max17042_read_reg(client, reg);
+		if (ret < 0)
+			continue;
+		/* compare the both the values */
+		if (value != ret)
+			dev_err(&client->dev,
+				"write verify failed on Register:0x%x\n", reg);
+		else
+			break;
+	}
+
+	return ret;
 }
 
-static enum power_supply_property max17042_battery_props[] = {
-	POWER_SUPPLY_PROP_PRESENT,
-	POWER_SUPPLY_PROP_CYCLE_COUNT,
-	POWER_SUPPLY_PROP_VOLTAGE_MAX,
-	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
-	POWER_SUPPLY_PROP_VOLTAGE_NOW,
-	POWER_SUPPLY_PROP_VOLTAGE_AVG,
-	POWER_SUPPLY_PROP_VOLTAGE_OCV,
-	POWER_SUPPLY_PROP_CAPACITY,
-	POWER_SUPPLY_PROP_CHARGE_FULL,
-	POWER_SUPPLY_PROP_CHARGE_COUNTER,
-	POWER_SUPPLY_PROP_TEMP,
-	POWER_SUPPLY_PROP_CURRENT_NOW,
-	POWER_SUPPLY_PROP_CURRENT_AVG,
-};
+static int max17042_reg_read_modify(struct i2c_client *client, u8 reg,
+							u16 val, int bit_set)
+{
+	int ret;
+
+	ret = max17042_read_reg(client, reg);
+	if (ret < 0)
+		return ret;
+
+	if (bit_set)
+		ret |= val;
+	else
+		ret &= (~val);
+
+	ret = max17042_write_reg(client, reg, ret);
+	return ret;
+}
+
+static irqreturn_t max17042_intr_handler(int id, void *dev)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t max17042_thread_handler(int id, void *dev)
+{
+	struct max17042_chip *chip = dev;
+	struct device *device = &chip->client->dev;
+	int stat, temp, val, count = 0;
+	u16 processed, ignored, config;
+
+	pm_runtime_get_sync(device);
+
+	/* read current configuration */
+	val = max17042_read_reg(chip->client, MAX17042_CONFIG);
+	if (val < 0)
+		config = fg_conf_data->cfg;
+	else
+		config = val;
+
+	stat = max17042_read_reg(chip->client, MAX17042_STATUS);
+	do {
+		dev_dbg(device, "%s: Status-val: 0x%x\n", __func__, stat);
+		if (stat < 0) {
+			dev_err(device,
+				"max17042-INTR: status read failed:%d\n", stat);
+			pm_runtime_put_sync(device);
+			return IRQ_HANDLED;
+		}
+
+		processed = 0;
+		ignored = 0;
+
+		if ((stat & STATUS_VMN_BIT) || (stat & STATUS_VMX_BIT)) {
+			dev_info(device, "VOLT threshold INTR\n");
+			/* nothing yet */
+			if (stat & STATUS_VMN_BIT) {
+				if (config & CONFIG_VSTICKY_BIT_SET)
+					processed |= STATUS_VMN_BIT;
+				else
+					ignored |= STATUS_VMN_BIT;
+			}
+			if (stat & STATUS_VMX_BIT) {
+				if (config & CONFIG_VSTICKY_BIT_SET)
+					processed |= STATUS_VMX_BIT;
+				else
+					ignored |= STATUS_VMX_BIT;
+			}
+		}
+
+		if ((stat & STATUS_SMN_BIT) || (stat & STATUS_SMX_BIT)) {
+			dev_info(device, "SOC threshold INTR\n");
+			/* Actual processing is done in evt_worker */
+			/* so we might get interrupt again or miss */
+			if (stat & STATUS_SMN_BIT) {
+				if (config & CONFIG_SSTICKY_BIT_SET)
+					processed |= STATUS_SMN_BIT;
+				else
+					ignored |= STATUS_SMN_BIT;
+			}
+			if (stat & STATUS_SMX_BIT) {
+				if (config & CONFIG_SSTICKY_BIT_SET)
+					processed |= STATUS_SMX_BIT;
+				else
+					ignored |= STATUS_SMX_BIT;
+			}
+		}
+
+		if (stat & STATUS_BR_BIT) {
+			dev_info(device, "Battery removed INTR\n");
+			if ((config & CONFIG_BER_BIT_ENBL) &&
+			    (stat & STATUS_BST_BIT)) {
+				dev_warn(device, "battery unplugged\n");
+				mutex_lock(&chip->batt_lock);
+				chip->present = 0;
+				mutex_unlock(&chip->batt_lock);
+				kernel_power_off();
+			}
+			processed |= STATUS_BR_BIT;
+		}
+
+		if ((stat & STATUS_TMN_BIT) || (stat & STATUS_TMX_BIT)) {
+			val = read_batt_pack_temp(chip, &temp);
+			if (val) {
+				dev_warn(device, "Can't read temp: %d\n", val);
+			} else {
+				val = max17042_read_reg(chip->client,
+							MAX17042_TALRT_Th);
+				dev_info(device,
+					"Thermal threshold INTR: %d (%d, %d)\n",
+					 temp, (int8_t)(val & 0xff),
+					 (int8_t)(val >> 8));
+			}
+			if (stat & STATUS_TMN_BIT) {
+				if (config & CONFIG_TSTICKY_BIT_SET)
+					processed |= STATUS_TMN_BIT;
+				else
+					ignored |= STATUS_TMN_BIT;
+			}
+			if (stat & STATUS_TMX_BIT) {
+				if (config & CONFIG_TSTICKY_BIT_SET)
+					processed |= STATUS_TMX_BIT;
+				else
+					ignored |= STATUS_TMX_BIT;
+			}
+		}
+
+		if (stat & STATUS_POR_BIT) {
+			dev_info(device, "Power On Reset event\n");
+			ignored |= STATUS_POR_BIT;
+		}
+
+		if (stat & STATUS_BST_BIT)
+			ignored |= STATUS_BST_BIT;
+
+		if (stat & STATUS_BI_BIT) {
+			dev_info(device, "Battery Insert INTR\n");
+			/* nothing yet */
+			processed |= STATUS_BI_BIT;
+		}
+
+		/* clear int */
+		max17042_reg_read_modify(chip->client, MAX17042_STATUS,
+					 processed, 0);
+
+		stat = max17042_read_reg(chip->client, MAX17042_STATUS);
+	} while ((stat & STATUS_MASK & ~ignored) && (count++ < NR_RETRY_INT));
+
+	/* update battery status and health */
+	schedule_work(&chip->evt_worker);
+	pm_runtime_put_sync(device);
+	if (count >= NR_RETRY_INT) {
+		dev_err(device, "%s: can't process all IRQ reasons: 0x%x\n",
+			__func__, stat);
+		/* desperate */
+		max17042_write_reg(max17042_client, MAX17042_STATUS, 0x0000);
+	}
+	return IRQ_HANDLED;
+}
+
+static short adjust_sign_value(int value, int is_byte)
+{
+	short result, temp = (short)value;
+	if (temp & MAX17042_SIGN_INDICATOR) {
+
+		if (is_byte) {
+			result = (~temp) >> 8;
+			result &= 0xff;
+		} else {
+			result = ~temp;
+		}
+
+		result++;
+		result *= -1;
+	} else {
+		if (is_byte)
+			result = temp >> 8;
+		else
+			result = temp;
+	}
+
+	return result;
+}
+
+static int read_batt_pack_temp(struct max17042_chip *chip, int *temp)
+{
+	int ret;
+	u16 val;
+
+	/* Read battery pack temperature */
+	if (chip->pdata->battery_pack_temp) {
+		ret = chip->pdata->battery_pack_temp(temp);
+		if (ret < 0)
+			goto temp_read_err;
+
+		/* Convert the temperature to 2's complement form.
+		 * Most significant byte contains the decimal
+		 * equivalent of the data */
+		if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL) {
+			if (*temp < 0) {
+				val = (*temp + 0xff + 1);
+				val <<= 8;
+			} else {
+				val = *temp;
+				val <<= 8;
+			}
+			ret = max17042_write_reg(chip->client,
+							MAX17042_TEMP, val);
+			if (ret < 0)
+				dev_err(&chip->client->dev,
+					"Temp write to maxim failed:%d", ret);
+		}
+	} else {
+		ret = max17042_read_reg(chip->client, MAX17042_TEMP);
+		if (ret < 0)
+			goto temp_read_err;
+
+		/* MAX17042_TEMP register gives the signed
+		 * value and we are ignoring the lower byte
+		 * which represents the decimal point */
+
+		*temp = adjust_sign_value(ret, BYTE_VALUE);
+	}
+	return 0;
+
+temp_read_err:
+	dev_err(&chip->client->dev, "BP Temp read error:%d", ret);
+	return ret;
+}
+
+static int max17042_set_property(struct power_supply *psy,
+				    enum power_supply_property psp,
+				    const union power_supply_propval *val)
+{
+	struct max17042_chip *chip = container_of(psy,
+				struct max17042_chip, battery);
+	int ret = 0;
+	int8_t temp;
+
+	mutex_lock(&chip->batt_lock);
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		chip->status = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+		ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th);
+		if (ret < 0)
+			break;
+		temp = val->intval / 10; /* 0.1C prop to 1C reg */
+		/* Force that min is under max */
+		if (temp >= (int8_t)(ret >> 8))
+			temp = (int8_t)(ret >> 8) - 1;
+		ret = (ret & 0xff00) + (uint8_t)temp;
+		ret = max17042_write_reg(chip->client, MAX17042_TALRT_Th, ret);
+		break;
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+		ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th);
+		if (ret < 0)
+			break;
+		temp = val->intval / 10; /* 0.1C prop to 1C reg */
+		/* Force that max is over min */
+		if (temp <= (int8_t)(ret & 0xff))
+			temp = (int8_t)(ret & 0xff) + 1;
+		ret = (temp << 8) + (ret & 0xff);
+		ret = max17042_write_reg(chip->client, MAX17042_TALRT_Th, ret);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&chip->batt_lock);
+
+	return ret;
+}
 
 static int max17042_get_property(struct power_supply *psy,
 			    enum power_supply_property psp,
@@ -125,465 +887,615 @@
 {
 	struct max17042_chip *chip = container_of(psy,
 				struct max17042_chip, battery);
-	int ret;
+	short int cur;
+	int volt_ocv, ret, batt_temp, batt_vmin;
 
-	if (!chip->init_complete)
-		return -EAGAIN;
-
+	mutex_lock(&chip->batt_lock);
 	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		/*
+		 * status is being read from external
+		 * module so check for error case before
+		 * assigning to intval.
+		 */
+		if (chip->status < 0) {
+			ret = chip->status;
+			goto ps_prop_read_err;
+		} else {
+			val->intval = chip->status;
+		}
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		/*
+		 * health is being read from external
+		 * module so check for error case before
+		 * assigning to intval.
+		 */
+		if (chip->health < 0) {
+			ret = chip->health;
+			goto ps_prop_read_err;
+		} else {
+			val->intval = chip->health;
+		}
+		break;
 	case POWER_SUPPLY_PROP_PRESENT:
-		ret = max17042_read_reg(chip->client, MAX17042_STATUS);
+		val->intval = chip->present;
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = chip->technology;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		val->intval = chip->charge_full_des;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		ret = max17042_read_reg(chip->client, MAX17042_RepCap);
 		if (ret < 0)
-			return ret;
+			goto ps_prop_read_err;
+		val->intval = ret * MAX17042_CHRG_CONV_FCTR;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = ret * MAX17042_CHRG_CONV_FCTR;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		ret = max17042_read_reg(chip->client, MAX17042_QH);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = ret * MAX17042_CHRG_CONV_FCTR;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		ret = max17042_read_reg(chip->client, MAX17042_Current);
+		if (ret < 0)
+			goto ps_prop_read_err;
 
-		if (ret & MAX17042_STATUS_BattAbsent)
-			val->intval = 0;
+		cur = adjust_sign_value(ret, WORD_VALUE);
+
+		if (fg_conf_data->rsense)
+			val->intval = (cur * MAX17042_CURR_CONV_FCTR)
+						/ fg_conf_data->rsense;
 		else
-			val->intval = 1;
+			val->intval = cur * MAX17042_CURR_CONV_FCTR;
 		break;
-	case POWER_SUPPLY_PROP_CYCLE_COUNT:
-		ret = max17042_read_reg(chip->client, MAX17042_Cycles);
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		ret = max17042_read_reg(chip->client, MAX17042_AvgCurrent);
 		if (ret < 0)
-			return ret;
+			goto ps_prop_read_err;
 
-		val->intval = ret;
+		cur = adjust_sign_value(ret, WORD_VALUE);
+
+		if (fg_conf_data->rsense)
+			val->intval = (cur * MAX17042_CURR_CONV_FCTR)
+						/ fg_conf_data->rsense;
+		else
+			val->intval = cur * MAX17042_CURR_CONV_FCTR;
 		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
-		ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt);
+	case POWER_SUPPLY_PROP_TEMP:
+		if (!chip->pdata->enable_current_sense ||
+				chip->enable_fake_temp) {
+			val->intval = CONSTANT_TEMP_IN_POWER_SUPPLY;
+			break;
+		}
+		ret = read_batt_pack_temp(chip, &batt_temp);
 		if (ret < 0)
-			return ret;
-
-		val->intval = ret >> 8;
-		val->intval *= 20000; /* Units of LSB = 20mV */
+			goto ps_prop_read_err;
+		/*
+		 * Temperature is measured in units of degrees celcius, the
+		 * power_supply class measures temperature in tenths of degrees
+		 * celsius.
+		 */
+		val->intval = batt_temp * 10;
+		break;
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+		ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = ((int8_t)(ret & 0xff)) * 10; /* 0.1C */
+		break;
+	case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+		ret = max17042_read_reg(chip->client, MAX17042_TALRT_Th);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = ((int8_t)(ret >> 8)) * 10; /* 0.1C */
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		ret = max17042_read_reg(chip->client, MAX17042_VCELL);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = (ret >> 3) * MAX17042_VOLT_CONV_FCTR;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = (ret >> 3) * MAX17042_VOLT_CONV_FCTR;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+		if (ret < 0)
+			goto ps_prop_read_err;
+		val->intval = (ret >> 3) * MAX17042_VOLT_CONV_FCTR;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
 		if (chip->chip_type == MAX17042)
 			ret = max17042_read_reg(chip->client, MAX17042_V_empty);
 		else
-			ret = max17042_read_reg(chip->client, MAX17047_V_empty);
+			ret = max17042_read_reg(chip->client, MAX17050_V_empty);
 		if (ret < 0)
-			return ret;
-
-		val->intval = ret >> 7;
-		val->intval *= 10000; /* Units of LSB = 10mV */
+			goto ps_prop_read_err;
+		val->intval = (ret >> 7) * 10000; /* Units of LSB = 10mV */
 		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-		ret = max17042_read_reg(chip->client, MAX17042_VCELL);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret * 625 / 8;
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
-		ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret * 625 / 8;
-		break;
-	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
-		ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret * 625 / 8;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = chip->voltage_max;
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
-		ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret >> 8;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_FULL:
-		ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret * 1000 / 2;
-		break;
-	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
-		ret = max17042_read_reg(chip->client, MAX17042_QH);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret * 1000 / 2;
-		break;
-	case POWER_SUPPLY_PROP_TEMP:
-		ret = max17042_read_reg(chip->client, MAX17042_TEMP);
-		if (ret < 0)
-			return ret;
-
-		val->intval = ret;
-		/* The value is signed. */
-		if (val->intval & 0x8000) {
-			val->intval = (0x7fff & ~val->intval) + 1;
-			val->intval *= -1;
+		/*
+		 * WA added to support power supply voltage
+		 * variations b/w supply and FG readings.
+		 */
+		if (fake_batt_full) {
+			val->intval = 100;
+			break;
 		}
-		/* The value is converted into deci-centigrade scale */
-		/* Units of LSB = 1 / 256 degree Celsius */
-		val->intval = val->intval * 10 / 256;
-		break;
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		if (chip->pdata->enable_current_sense) {
-			ret = max17042_read_reg(chip->client, MAX17042_Current);
-			if (ret < 0)
-				return ret;
 
-			val->intval = ret;
-			if (val->intval & 0x8000) {
-				/* Negative */
-				val->intval = ~val->intval & 0x7fff;
-				val->intval++;
-				val->intval *= -1;
-			}
-			val->intval *= 1562500 / chip->pdata->r_sns;
-		} else {
-			return -EINVAL;
-		}
-		break;
-	case POWER_SUPPLY_PROP_CURRENT_AVG:
-		if (chip->pdata->enable_current_sense) {
+		/* Voltage Based shutdown method to avoid modem crash */
+		if (chip->pdata->is_volt_shutdown) {
 			ret = max17042_read_reg(chip->client,
-						MAX17042_AvgCurrent);
+						MAX17042_OCVInternal);
 			if (ret < 0)
-				return ret;
+				goto ps_prop_read_err;
+			volt_ocv = (ret >> 3) * MAX17042_VOLT_CONV_FCTR;
 
-			val->intval = ret;
-			if (val->intval & 0x8000) {
-				/* Negative */
-				val->intval = ~val->intval & 0x7fff;
-				val->intval++;
-				val->intval *= -1;
+			/* Get the minimum voltage thereshold */
+			if (shutdown_volt)
+				batt_vmin = shutdown_volt;
+			else if (chip->pdata->get_vmin_threshold)
+				batt_vmin = chip->pdata->get_vmin_threshold();
+			else
+				batt_vmin = BATTERY_VOLT_MIN_THRESHOLD;
+
+			if (volt_ocv <= batt_vmin) {
+				/* if user disables OCV shutdown method
+				 * report 1% capcity so that platform
+				 * will not get shutdown.
+				 */
+				if (chip->disable_shdwn_methods &
+						SHUTDOWN_OCV_MASK_BIT)
+					val->intval = 1;
+				else
+					val->intval = 0;
+				break;
 			}
-			val->intval *= 1562500 / chip->pdata->r_sns;
-		} else {
-			return -EINVAL;
+
 		}
+
+		/* Check for LOW Battery Shutdown mechanism is enabled */
+		if (chip->pdata->is_lowbatt_shutdown &&
+			(chip->health == POWER_SUPPLY_HEALTH_DEAD)) {
+			/* if user disables LOWBATT INT shutdown method
+			 * report 1% capcity so that platform
+			 * will not get shutdown.
+			 */
+			if (chip->disable_shdwn_methods &
+					SHUTDOWN_LOWBATT_MASK_BIT)
+				val->intval = 1;
+			else
+				val->intval = 0;
+			break;
+		}
+
+		/* If current sensing is not enabled then read the
+		 * voltage based fuel gauge register for SOC */
+		if (chip->pdata->enable_current_sense) {
+			ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+			if (ret < 0)
+				goto ps_prop_read_err;
+			val->intval = ret >> 8;
+			/* Check if MSB of lower byte is set
+			 * then round off the SOC to higher digit
+			 */
+			if ((ret & 0x80) && val->intval)
+				val->intval += 1;
+		} else {
+			ret = max17042_read_reg(chip->client, MAX17042_VCELL);
+			if (ret < 0)
+				goto ps_prop_read_err;
+
+			ret = (ret >> 3) * MAX17042_VOLT_CONV_FCTR / 1000;
+			val->intval = voltage_capacity_lookup(ret);
+		}
+
+		if (val->intval > 100)
+			val->intval = 100;
+
+		/* if user disables default FG shutdown method
+		 * report 1% capcity so that platform
+		 * will not get shutdown.
+		 */
+		if ((val->intval == 0) && (chip->disable_shdwn_methods &
+				SHUTDOWN_DEF_FG_MASK_BIT))
+			val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		if (!strncmp(chip->pdata->battid, "UNKNOWNB", 8))
+			val->strval = chip->pdata->battid;
+		else
+			val->strval = chip->pdata->model_name;
+		break;
+	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+		val->strval = chip->pdata->serial_num;
 		break;
 	default:
+		mutex_unlock(&chip->batt_lock);
 		return -EINVAL;
 	}
+
+	mutex_unlock(&chip->batt_lock);
+	return 0;
+
+ps_prop_read_err:
+	mutex_unlock(&chip->batt_lock);
+	return ret;
+}
+
+static void dump_fg_conf_data(struct max17042_chip *chip)
+{
+	int i;
+
+	dev_info(&chip->client->dev, "size:%x\n", fg_conf_data->size);
+	dev_info(&chip->client->dev, "table_type:%x\n",
+					fg_conf_data->table_type);
+	dev_info(&chip->client->dev, "config_init:%x\n",
+					fg_conf_data->config_init);
+	dev_info(&chip->client->dev, "rcomp0:%x\n", fg_conf_data->rcomp0);
+	dev_info(&chip->client->dev, "tempCo:%x\n", fg_conf_data->tempCo);
+	dev_info(&chip->client->dev, "kempty0:%x\n", fg_conf_data->kempty0);
+	dev_info(&chip->client->dev, "full_cap:%x\n", fg_conf_data->full_cap);
+	dev_info(&chip->client->dev, "cycles:%x\n", fg_conf_data->cycles);
+	dev_info(&chip->client->dev, "full_capnom:%x\n",
+						fg_conf_data->full_capnom);
+	dev_info(&chip->client->dev, "qrtbl00:%x\n", fg_conf_data->qrtbl00);
+	dev_info(&chip->client->dev, "qrtbl10:%x\n", fg_conf_data->qrtbl10);
+	dev_info(&chip->client->dev, "qrtbl20:%x\n", fg_conf_data->qrtbl20);
+	dev_info(&chip->client->dev, "qrtbl30:%x\n", fg_conf_data->qrtbl30);
+	dev_info(&chip->client->dev, "full_soc_thr:%x\n",
+						fg_conf_data->full_soc_thr);
+	dev_info(&chip->client->dev, "vempty:%x\n", fg_conf_data->vempty);
+
+	dev_info(&chip->client->dev, "soc_empty:%x\n",
+						fg_conf_data->soc_empty);
+	dev_info(&chip->client->dev, "ichgt_term:%x\n",
+						fg_conf_data->ichgt_term);
+	dev_info(&chip->client->dev, "design_cap:%x\n",
+						fg_conf_data->design_cap);
+	dev_info(&chip->client->dev, "etc:%x\n", fg_conf_data->etc);
+	dev_info(&chip->client->dev, "rsense:%x\n", fg_conf_data->rsense);
+	dev_info(&chip->client->dev, "cfg:%x\n", fg_conf_data->cfg);
+	dev_info(&chip->client->dev, "learn_cfg:%x\n",
+						fg_conf_data->learn_cfg);
+	dev_info(&chip->client->dev, "filter_cfg:%x\n",
+						fg_conf_data->filter_cfg);
+	dev_info(&chip->client->dev, "relax_cfg:%x\n", fg_conf_data->relax_cfg);
+
+	for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++)
+		dev_info(&chip->client->dev, "%x, ",
+				fg_conf_data->cell_char_tbl[i]);
+	dev_info(&chip->client->dev, "\n");
+}
+
+static void enable_soft_POR(struct max17042_chip *chip)
+{
+	u16 val = 0x0000;
+
+	max17042_write_reg(chip->client, MAX17042_MLOCKReg1, val);
+	max17042_write_reg(chip->client, MAX17042_MLOCKReg2, val);
+	max17042_write_reg(chip->client, MAX17042_STATUS, val);
+
+	val = max17042_read_reg(chip->client, MAX17042_MLOCKReg1);
+	if (val)
+		dev_err(&chip->client->dev, "MLOCKReg1 read failed\n");
+
+	val = max17042_read_reg(chip->client, MAX17042_MLOCKReg2);
+	if (val)
+		dev_err(&chip->client->dev, "MLOCKReg2 read failed\n");
+
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	if (val)
+		dev_err(&chip->client->dev, "STATUS read failed\n");
+
+	/* send POR command */
+	max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, 0x000F);
+	mdelay(2);
+
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	if (val & STATUS_POR_BIT)
+		dev_info(&chip->client->dev, "SoftPOR done!\n");
+	else
+		dev_err(&chip->client->dev, "SoftPOR failed\n");
+}
+
+static int write_characterization_data(struct max17042_chip *chip)
+{
+	uint16_t cell_data[CELL_CHAR_TBL_SAMPLES];
+	uint16_t temp_data[CELL_CHAR_TBL_SAMPLES];
+	int i;
+	u8 addr;
+
+	memset(cell_data, 0x0, sizeof(cell_data));
+	/* Unlock model access */
+	max17042_write_reg(chip->client, MAX17042_MLOCKReg1, FG_MODEL_UNLOCK1);
+	max17042_write_reg(chip->client, MAX17042_MLOCKReg2, FG_MODEL_UNLOCK2);
+	addr = MAX17042_MODELChrTbl;
+
+	/* write the 48 words */
+	for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++)
+		max17042_write_reg(chip->client, addr + i,
+				fg_conf_data->cell_char_tbl[i]);
+
+	/* read the 48 words */
+	for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++)
+		cell_data[i] = max17042_read_reg(chip->client, addr + i);
+
+	/* compare the data */
+	if (memcmp(cell_data, fg_conf_data->cell_char_tbl, sizeof(cell_data))) {
+		dev_err(&chip->client->dev, "%s write failed\n", __func__);
+		for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++)
+			dev_err(&chip->client->dev, "0x%x,0x%x\n", cell_data[i],
+						fg_conf_data->cell_char_tbl[i]);
+		/* Lock Model access regs */
+		max17042_write_reg(chip->client, MAX17042_MLOCKReg1,
+								FG_MODEL_LOCK1);
+		max17042_write_reg(chip->client, MAX17042_MLOCKReg2,
+								FG_MODEL_LOCK2);
+		return -EIO;
+	}
+
+	memset(temp_data, 0x0, sizeof(temp_data));
+	/* Lock Model access regs */
+	max17042_write_reg(chip->client, MAX17042_MLOCKReg1, FG_MODEL_LOCK1);
+	max17042_write_reg(chip->client, MAX17042_MLOCKReg2, FG_MODEL_LOCK2);
+
+	/* read the 48 words */
+	for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++)
+		cell_data[i] = max17042_read_reg(chip->client, addr + i);
+
+	/* compare the data */
+	if (memcmp(cell_data, temp_data, sizeof(temp_data))) {
+		dev_err(&chip->client->dev, "%s verify failed\n", __func__);
+		for (i = 0; i < CELL_CHAR_TBL_SAMPLES; i++)
+			dev_err(&chip->client->dev, "0x%x, ", cell_data[i]);
+		dev_err(&chip->client->dev, "\n");
+		return -EIO;
+	}
+
 	return 0;
 }
 
-static int max17042_write_verify_reg(struct i2c_client *client,
-				u8 reg, u16 value)
+static void configure_learncfg(struct max17042_chip *chip)
 {
-	int retries = 8;
-	int ret;
-	u16 read_value;
 
-	do {
-		ret = i2c_smbus_write_word_data(client, reg, value);
-		read_value =  max17042_read_reg(client, reg);
-		if (read_value != value) {
-			ret = -EIO;
-			retries--;
-		}
-	} while (retries && read_value != value);
+	u16 cycles;
 
-	if (ret < 0)
-		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
-	return ret;
+	/*assigning cycles value from restored data*/
+	cycles = fg_conf_data->cycles;
+	if ((cycles >= CYCLES_ROLLOVER_CUTOFF) &&
+				(chip->chip_type == MAX17042))
+		max17042_write_verify_reg(chip->client, MAX17042_LearnCFG,
+						MAX17042_DEF_RO_LRNCFG);
+	else
+		max17042_write_reg(chip->client, MAX17042_LearnCFG,
+						fg_conf_data->learn_cfg);
 }
 
-static inline void max17042_override_por(
-	struct i2c_client *client, u8 reg, u16 value)
+static void write_config_regs(struct max17042_chip *chip)
 {
-	if (value)
-		max17042_write_reg(client, reg, value);
+	max17042_write_reg(chip->client, MAX17042_CONFIG, fg_conf_data->cfg);
+	configure_learncfg(chip);
+
+	max17042_write_reg(chip->client, MAX17042_SHFTCFG,
+						fg_conf_data->filter_cfg);
+	max17042_write_reg(chip->client, MAX17042_RelaxCFG,
+						fg_conf_data->relax_cfg);
+	if (chip->chip_type == MAX17050)
+		max17042_write_reg(chip->client, MAX17050_FullSOCThr,
+					fg_conf_data->full_soc_thr);
 }
 
-static inline void max10742_unlock_model(struct max17042_chip *chip)
+static void write_custom_regs(struct max17042_chip *chip)
 {
-	struct i2c_client *client = chip->client;
-	max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
-	max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
-}
-
-static inline void max10742_lock_model(struct max17042_chip *chip)
-{
-	struct i2c_client *client = chip->client;
-	max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
-	max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
-}
-
-static inline void max17042_write_model_data(struct max17042_chip *chip,
-					u8 addr, int size)
-{
-	struct i2c_client *client = chip->client;
-	int i;
-	for (i = 0; i < size; i++)
-		max17042_write_reg(client, addr + i,
-				chip->pdata->config_data->cell_char_tbl[i]);
-}
-
-static inline void max17042_read_model_data(struct max17042_chip *chip,
-					u8 addr, u16 *data, int size)
-{
-	struct i2c_client *client = chip->client;
-	int i;
-
-	for (i = 0; i < size; i++)
-		data[i] = max17042_read_reg(client, addr + i);
-}
-
-static inline int max17042_model_data_compare(struct max17042_chip *chip,
-					u16 *data1, u16 *data2, int size)
-{
-	int i;
-
-	if (memcmp(data1, data2, size)) {
-		dev_err(&chip->client->dev, "%s compare failed\n", __func__);
-		for (i = 0; i < size; i++)
-			dev_info(&chip->client->dev, "0x%x, 0x%x",
-				data1[i], data2[i]);
-		dev_info(&chip->client->dev, "\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int max17042_init_model(struct max17042_chip *chip)
-{
-	int ret;
-	int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
-	u16 *temp_data;
-
-	temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
-	if (!temp_data)
-		return -ENOMEM;
-
-	max10742_unlock_model(chip);
-	max17042_write_model_data(chip, MAX17042_MODELChrTbl,
-				table_size);
-	max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
-				table_size);
-
-	ret = max17042_model_data_compare(
-		chip,
-		chip->pdata->config_data->cell_char_tbl,
-		temp_data,
-		table_size);
-
-	max10742_lock_model(chip);
-	kfree(temp_data);
-
-	return ret;
-}
-
-static int max17042_verify_model_lock(struct max17042_chip *chip)
-{
-	int i;
-	int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
-	u16 *temp_data;
-	int ret = 0;
-
-	temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
-	if (!temp_data)
-		return -ENOMEM;
-
-	max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
-				table_size);
-	for (i = 0; i < table_size; i++)
-		if (temp_data[i])
-			ret = -EINVAL;
-
-	kfree(temp_data);
-	return ret;
-}
-
-static void max17042_write_config_regs(struct max17042_chip *chip)
-{
-	struct max17042_config_data *config = chip->pdata->config_data;
-
-	max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
-	max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
-	max17042_write_reg(chip->client, MAX17042_FilterCFG,
-			config->filter_cfg);
-	max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
-	if (chip->chip_type == MAX17047)
-		max17042_write_reg(chip->client, MAX17047_FullSOCThr,
-						config->full_soc_thresh);
-}
-
-static void  max17042_write_custom_regs(struct max17042_chip *chip)
-{
-	struct max17042_config_data *config = chip->pdata->config_data;
-
 	max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
-				config->rcomp0);
+						fg_conf_data->rcomp0);
 	max17042_write_verify_reg(chip->client, MAX17042_TempCo,
-				config->tcompc0);
+						fg_conf_data->tempCo);
 	max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
-				config->ichgt_term);
+						fg_conf_data->ichgt_term);
+	/* adjust Temperature gain and offset */
+	max17042_write_reg(chip->client,
+			MAX17042_TGAIN, chip->pdata->tgain);
+	max17042_write_reg(chip->client,
+			MAx17042_TOFF, chip->pdata->toff);
+
 	if (chip->chip_type == MAX17042) {
-		max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
-					config->empty_tempco);
+		max17042_write_reg(chip->client, MAX17042_ETC,
+						fg_conf_data->etc);
 		max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
-					config->kempty0);
-	} else {
-		max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
-						config->qrtbl00);
-		max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
-						config->qrtbl10);
-		max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
-						config->qrtbl20);
-		max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
-						config->qrtbl30);
+						fg_conf_data->kempty0);
+		max17042_write_verify_reg(chip->client, MAX17042_SOCempty,
+						fg_conf_data->soc_empty);
+		max17042_write_verify_reg(chip->client, MAX17042_V_empty,
+						MAX17042_DEF_VEMPTY_VAL);
+
+	} else {	/* chip type max17050 */
+		max17042_write_verify_reg(chip->client, MAX17050_V_empty,
+							fg_conf_data->vempty);
+		max17042_write_verify_reg(chip->client, MAX17050_QRTbl00,
+			fg_conf_data->qrtbl00 + chip->extra_resv_cap);
+		max17042_write_verify_reg(chip->client, MAX17050_QRTbl10,
+			fg_conf_data->qrtbl10 + chip->extra_resv_cap);
+		max17042_write_verify_reg(chip->client, MAX17050_QRTbl20,
+			fg_conf_data->qrtbl20 + chip->extra_resv_cap);
+		max17042_write_verify_reg(chip->client, MAX17050_QRTbl30,
+			fg_conf_data->qrtbl30 + chip->extra_resv_cap);
 	}
 }
 
-static void max17042_update_capacity_regs(struct max17042_chip *chip)
+static void update_capacity_regs(struct max17042_chip *chip)
 {
-	struct max17042_config_data *config = chip->pdata->config_data;
-
 	max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
-				config->fullcap);
-	max17042_write_reg(chip->client, MAX17042_DesignCap,
-			config->design_cap);
+			MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap,
+				chip->model_algo_factor)
+					* fg_conf_data->rsense);
 	max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
-				config->fullcapnom);
+			MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap,
+				chip->model_algo_factor)
+					* fg_conf_data->rsense);
+	max17042_write_reg(chip->client, MAX17042_DesignCap,
+			MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap,
+				chip->model_algo_factor)
+					* fg_conf_data->rsense);
 }
 
-static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
+static void reset_vfsoc0_reg(struct max17042_chip *chip)
 {
-	u16 vfSoc;
-
-	vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+	fg_vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
 	max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
-	max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
+	max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, fg_vfSoc);
 	max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
 }
 
-static void max17042_load_new_capacity_params(struct max17042_chip *chip)
+static void load_new_capacity_params(struct max17042_chip *chip, bool is_por)
 {
-	u16 full_cap0, rep_cap, dq_acc, vfSoc;
-	u32 rem_cap;
+	u16 rem_cap, rep_cap, dq_acc;
 
-	struct max17042_config_data *config = chip->pdata->config_data;
+	if (is_por) {
+		/* fg_vfSoc needs to shifted by 8 bits to get the
+		 * perc in 1% accuracy, to get the right rem_cap multiply
+		 * full_cap by model multiplication factor,fg_vfSoc
+		 * and divide by 100
+		 */
+		rem_cap = ((fg_vfSoc >> 8) *
+			(u32)(MAX17042_MODEL_MUL_FACTOR
+				(fg_conf_data->full_cap,
+					chip->model_algo_factor))) / 100;
 
-	full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
-	vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
+		max17042_write_verify_reg(chip->client,
+					MAX17042_RemCap, rem_cap);
 
-	/* fg_vfSoc needs to shifted by 8 bits to get the
-	 * perc in 1% accuracy, to get the right rem_cap multiply
-	 * full_cap0, fg_vfSoc and devide by 100
-	 */
-	rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
-	max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
+		rep_cap = rem_cap;
 
-	rep_cap = (u16)rem_cap;
-	max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
+		max17042_write_verify_reg(chip->client,
+					MAX17042_RepCap, rep_cap);
+	}
 
 	/* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
-	dq_acc = config->fullcap / dQ_ACC_DIV;
+	dq_acc = MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap,
+			chip->model_algo_factor) / dQ_ACC_DIV;
 	max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
 	max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
 
 	max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
-			config->fullcap);
+			fg_conf_data->full_cap
+			* fg_conf_data->rsense);
 	max17042_write_reg(chip->client, MAX17042_DesignCap,
-			config->design_cap);
+			MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap,
+			chip->model_algo_factor)
+			* fg_conf_data->rsense);
 	max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
-			config->fullcapnom);
+			MAX17042_MODEL_MUL_FACTOR(fg_conf_data->full_cap,
+			chip->model_algo_factor)
+			* fg_conf_data->rsense);
 	/* Update SOC register with new SOC */
-	max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
+	max17042_write_reg(chip->client, MAX17042_RepSOC, fg_vfSoc);
 }
 
-/*
- * Block write all the override values coming from platform data.
- * This function MUST be called before the POR initialization proceedure
- * specified by maxim.
- */
-static inline void max17042_override_por_values(struct max17042_chip *chip)
+static void update_runtime_params(struct max17042_chip *chip)
 {
-	struct i2c_client *client = chip->client;
-	struct max17042_config_data *config = chip->pdata->config_data;
 
-	max17042_override_por(client, MAX17042_TGAIN, config->tgain);
-	max17042_override_por(client, MAx17042_TOFF, config->toff);
-	max17042_override_por(client, MAX17042_CGAIN, config->cgain);
-	max17042_override_por(client, MAX17042_COFF, config->coff);
-
-	max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
-	max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
-	max17042_override_por(client, MAX17042_SALRT_Th,
-			config->soc_alrt_thresh);
-	max17042_override_por(client, MAX17042_CONFIG, config->config);
-	max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
-
-	max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
-	max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
-
-	max17042_override_por(client, MAX17042_AtRate, config->at_rate);
-	max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
-	max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
-	max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
-	max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
-	max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
-
-	max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
-	max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
-	if (chip->chip_type == MAX17042)
-		max17042_override_por(client, MAX17042_SOC_empty,
-						config->socempty);
-	max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
-	max17042_override_por(client, MAX17042_dQacc, config->dqacc);
-	max17042_override_por(client, MAX17042_dPacc, config->dpacc);
-
-	if (chip->chip_type == MAX17042)
-		max17042_override_por(client, MAX17042_V_empty, config->vempty);
-	else
-		max17042_override_por(client, MAX17047_V_empty, config->vempty);
-	max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
-	max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
-	max17042_override_por(client, MAX17042_FCTC, config->fctc);
-	max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
-	max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
-	if (chip->chip_type) {
-		max17042_override_por(client, MAX17042_EmptyTempCo,
-					config->empty_tempco);
-		max17042_override_por(client, MAX17042_K_empty0,
-					config->kempty0);
-	}
-}
-
-static int max17042_init_chip(struct max17042_chip *chip)
-{
-	int ret;
-	int val;
-
-	max17042_override_por_values(chip);
-	/* After Power up, the MAX17042 requires 500mS in order
-	 * to perform signal debouncing and initial SOC reporting
+	fg_conf_data->rcomp0 = max17042_read_reg(chip->client,
+							MAX17042_RCOMP0);
+	fg_conf_data->tempCo = max17042_read_reg(chip->client,
+							MAX17042_TempCo);
+	/*
+	 * Save only the original qrtbl register values ignoring the
+	 * additionally reserved capacity. We deal with reserved
+	 * capacity while restoring.
 	 */
-	msleep(500);
+	if (chip->chip_type == MAX17050) {
+		fg_conf_data->qrtbl00 = max17042_read_reg(chip->client,
+			MAX17050_QRTbl00) - chip->extra_resv_cap;
+		fg_conf_data->qrtbl10 = max17042_read_reg(chip->client,
+			MAX17050_QRTbl10) - chip->extra_resv_cap;
+		fg_conf_data->qrtbl20 = max17042_read_reg(chip->client,
+			MAX17050_QRTbl20) - chip->extra_resv_cap;
+		fg_conf_data->qrtbl30 = max17042_read_reg(chip->client,
+			MAX17050_QRTbl30) - chip->extra_resv_cap;
+	}
 
-	/* Initialize configaration */
-	max17042_write_config_regs(chip);
+	fg_conf_data->full_capnom = max17042_read_reg(chip->client,
+							MAX17042_FullCAPNom);
+	fg_conf_data->full_cap = max17042_read_reg(chip->client,
+							MAX17042_FullCAP);
+	if (fg_conf_data->rsense) {
+		fg_conf_data->full_capnom = MAX17042_MODEL_DIV_FACTOR(
+			fg_conf_data->full_capnom, chip->model_algo_factor)
+					/ fg_conf_data->rsense;
+
+		fg_conf_data->full_cap /= fg_conf_data->rsense;
+	}
+	fg_conf_data->cycles = max17042_read_reg(chip->client,
+							MAX17042_Cycles);
+
+	/* Dump data before saving */
+	dump_fg_conf_data(chip);
+}
+
+static void save_runtime_params(struct max17042_chip *chip)
+{
+	int size, retval;
+
+	dev_dbg(&chip->client->dev, "%s\n", __func__);
+
+	if (!chip->pdata->save_config_data || !chip->pdata->is_init_done)
+		return ;
+
+	update_runtime_params(chip);
+
+	size = sizeof(*fg_conf_data) - sizeof(fg_conf_data->cell_char_tbl);
+	retval = chip->pdata->save_config_data(DRV_NAME, fg_conf_data, size);
+	if (retval < 0) {
+		dev_err(&chip->client->dev, "%s failed\n", __func__);
+		return ;
+	}
+
+}
+
+static int init_max17042_chip(struct max17042_chip *chip)
+{
+	int ret = 0, val;
+	bool is_por;
+
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	dev_info(&chip->client->dev, "Status reg: %x\n", val);
+
+	if (val & STATUS_POR_BIT)
+		is_por = true;
+	else
+		is_por = false;
+
+	/* Initialize configuration */
+	write_config_regs(chip);
 
 	/* write cell characterization data */
-	ret = max17042_init_model(chip);
-	if (ret) {
-		dev_err(&chip->client->dev, "%s init failed\n",
-			__func__);
-		return -EIO;
-	}
+	ret = write_characterization_data(chip);
+	if (ret < 0)
+		return ret;
 
-	ret = max17042_verify_model_lock(chip);
-	if (ret) {
-		dev_err(&chip->client->dev, "%s lock verify failed\n",
-			__func__);
-		return -EIO;
-	}
 	/* write custom parameters */
-	max17042_write_custom_regs(chip);
+	write_custom_regs(chip);
 
 	/* update capacity params */
-	max17042_update_capacity_regs(chip);
+	update_capacity_regs(chip);
 
 	/* delay must be atleast 350mS to allow VFSOC
 	 * to be calculated from the new configuration
@@ -591,187 +1503,802 @@
 	msleep(350);
 
 	/* reset vfsoc0 reg */
-	max17042_reset_vfsoc0_reg(chip);
+	reset_vfsoc0_reg(chip);
+
+	/* advance to coulomb counter mode */
+	max17042_write_verify_reg(chip->client,
+			MAX17042_Cycles, fg_conf_data->cycles);
 
 	/* load new capacity params */
-	max17042_load_new_capacity_params(chip);
+	load_new_capacity_params(chip, is_por);
 
-	/* Init complete, Clear the POR bit */
-	val = max17042_read_reg(chip->client, MAX17042_STATUS);
-	max17042_write_reg(chip->client, MAX17042_STATUS,
-			val & (~STATUS_POR_BIT));
-	return 0;
-}
-
-static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
-{
-	u16 soc, soc_tr;
-
-	/* program interrupt thesholds such that we should
-	 * get interrupt for every 'off' perc change in the soc
-	 */
-	soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
-	soc_tr = (soc + off) << 8;
-	soc_tr |= (soc - off);
-	max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
-}
-
-static irqreturn_t max17042_thread_handler(int id, void *dev)
-{
-	struct max17042_chip *chip = dev;
-	u16 val;
-
-	val = max17042_read_reg(chip->client, MAX17042_STATUS);
-	if ((val & STATUS_INTR_SOCMIN_BIT) ||
-		(val & STATUS_INTR_SOCMAX_BIT)) {
-		dev_info(&chip->client->dev, "SOC threshold INTR\n");
-		max17042_set_soc_threshold(chip, 1);
+	if (is_por) {
+		/* Init complete, Clear the POR bit */
+		val = max17042_read_reg(chip->client, MAX17042_STATUS);
+		max17042_write_reg(chip->client, MAX17042_STATUS,
+						val & (~STATUS_POR_BIT));
 	}
 
-	power_supply_changed(&chip->battery);
-	return IRQ_HANDLED;
+	/* reset FullCap to non inflated value */
+	max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
+			fg_conf_data->full_cap * fg_conf_data->rsense);
+
+	return ret;
+}
+
+static void reset_max17042(struct max17042_chip *chip)
+{
+	/* do soft power reset */
+	enable_soft_POR(chip);
+
+	/* After Power up, the MAX17042 requires 500mS in order
+	 * to perform signal debouncing and initial SOC reporting
+	 */
+	msleep(500);
+
+	max17042_write_reg(chip->client, MAX17042_CONFIG, 0x2210);
+
+	/* adjust Temperature gain and offset */
+	max17042_write_reg(chip->client, MAX17042_TGAIN, NTC_47K_TGAIN);
+	max17042_write_reg(chip->client, MAx17042_TOFF, NTC_47K_TOFF);
+}
+
+static void max17042_restore_conf_data(struct max17042_chip *chip)
+{
+	int retval = 0, size;
+
+	/* return if lock already acquired */
+	if (!mutex_trylock(&chip->init_lock))
+		return;
+
+	if (!chip->pdata->is_init_done && chip->pdata->restore_config_data) {
+		retval = chip->pdata->restore_config_data(DRV_NAME,
+					fg_conf_data, sizeof(*fg_conf_data));
+
+		if (retval == -ENXIO) {		/* no device found */
+			dev_err(&chip->client->dev, "device not found\n");
+			chip->pdata->is_init_done = 1;
+			chip->pdata->save_config_data = NULL;
+		} else if (retval < 0) {	/* device not ready */
+			dev_warn(&chip->client->dev, "device not ready\n");
+		} else {			/* device ready */
+			set_chip_config(chip);
+			/* mark the dirty byte in non-volatile memory */
+			if (!fg_conf_data->config_init && retval >= 0) {
+				fg_conf_data->config_init = 0x1;
+				size = sizeof(*fg_conf_data) -
+					sizeof(fg_conf_data->cell_char_tbl);
+				retval = chip->pdata->save_config_data(
+					DRV_NAME, fg_conf_data, size);
+				if (retval < 0)
+					dev_err(&chip->client->dev,
+						"%s failed\n", __func__);
+			}
+		}
+	}
+			if (chip->pdata->is_volt_shutdown_enabled)
+				chip->pdata->is_volt_shutdown =
+				chip->pdata->is_volt_shutdown_enabled();
+
+			if (chip->pdata->is_lowbatt_shutdown_enabled)
+				chip->pdata->is_lowbatt_shutdown =
+				chip->pdata->is_lowbatt_shutdown_enabled();
+
+	mutex_unlock(&chip->init_lock);
+}
+
+static void set_chip_config(struct max17042_chip *chip)
+{
+	int val, retval;
+
+	/* Dump data after restoring */
+	dump_fg_conf_data(chip);
+
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	dev_info(&chip->client->dev, "Status reg: %x\n", val);
+	if (!fg_conf_data->config_init || (val & STATUS_POR_BIT)) {
+		dev_info(&chip->client->dev, "Config data should be loaded\n");
+		if (chip->pdata->reset_chip)
+			reset_max17042(chip);
+		retval = init_max17042_chip(chip);
+		if (retval < 0) {
+			dev_err(&chip->client->dev, "maxim chip init failed\n");
+			reset_max17042(chip);
+			chip->pdata->save_config_data = NULL;
+		}
+	}
+	if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL)
+		schedule_delayed_work(&chip->temp_worker, 0);
+	chip->pdata->is_init_done = 1;
+	configure_interrupts(chip);
+
+	/* multiply with 1000 to align with linux power supply sub system */
+	chip->charge_full_des = (fg_conf_data->design_cap / 2) * 1000;
 }
 
 static void max17042_init_worker(struct work_struct *work)
 {
 	struct max17042_chip *chip = container_of(work,
-				struct max17042_chip, work);
-	int ret;
+				struct max17042_chip, init_worker);
 
-	/* Initialize registers according to values from the platform data */
-	if (chip->pdata->enable_por_init && chip->pdata->config_data) {
-		ret = max17042_init_chip(chip);
-		if (ret)
-			return;
-	}
-
-	chip->init_complete = 1;
+	dev_info(&chip->client->dev, "%s\n", __func__);
+	max17042_restore_conf_data(chip);
 }
 
-#ifdef CONFIG_OF
-static struct max17042_platform_data *
-max17042_get_pdata(struct device *dev)
+static void max17042_temp_worker(struct work_struct *w)
 {
-	struct device_node *np = dev->of_node;
-	u32 prop;
-	struct max17042_platform_data *pdata;
+	struct delayed_work *work = to_delayed_work(w);
+	struct max17042_chip *chip = container_of(work,
+				struct max17042_chip, temp_worker);
+	int temp;
+	read_batt_pack_temp(chip, &temp);
+	schedule_delayed_work(&chip->temp_worker, TEMP_WRITE_INTERVAL);
+}
 
-	if (!np)
-		return dev->platform_data;
+/* Set the SOC threshold interrupt to offset percentage in S0 state */
+static void set_soc_intr_thresholds_s0(struct max17042_chip *chip, int offset)
+{
+	u16 soc_tr;
+	int soc, ret;
 
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return NULL;
-
-	/*
-	 * Require current sense resistor value to be specified for
-	 * current-sense functionality to be enabled at all.
+	/* program interrupt thesholds such that we should
+	 * get interrupt for every 'offset' perc change in the soc
 	 */
-	if (of_property_read_u32(np, "maxim,rsns-microohm", &prop) == 0) {
-		pdata->r_sns = prop;
-		pdata->enable_current_sense = true;
+	ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"maxim RepSOC read failed:%d\n", ret);
+		return ;
+	}
+	soc = ret >> 8;
+
+	/* if upper threshold exceeds 100% then stop
+	 * the interrupt for upper thresholds */
+	 if ((soc + offset) > 100)
+		soc_tr = 0xff << 8;
+	else
+		soc_tr = (soc + offset) << 8;
+
+	/* if lower threshold falls
+	 * below 1% limit it to 1% */
+	if ((soc - offset) < 1)
+		soc_tr |= 1;
+	else
+		soc_tr |= soc;
+
+	dev_info(&chip->client->dev,
+		"soc perc: soc: %d, offset: %d\n", soc, offset);
+	ret = max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
+	if (ret < 0)
+		dev_err(&chip->client->dev,
+			"SOC threshold write to maxim fail:%d", ret);
+}
+
+static void set_soc_intr_thresholds_s3(struct max17042_chip *chip)
+{
+	int ret, val, soc;
+
+	if (chip->pdata->enable_current_sense)
+		ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+	else
+		ret = max17042_read_reg(chip->client, MAX17042_VFSOC);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"maxim RepSOC read failed:%d\n", ret);
+		return ;
+	}
+	val = ret;
+	soc = val >> 8;
+	/* Check if MSB of lower byte is set
+	 * then round off the SOC to higher digit
+	 */
+	if (val & 0x80)
+		soc += 1;
+
+	/* If soc > 15% set the alert threshold to 15%
+	 * else if soc > 4% set the threshold to 4%
+	 * else set it to 1%
+	 */
+	if (soc > SOC_WARNING_LEVEL1)
+		val = SOC_DEF_MAX_MIN1_THRLD;
+	else if (soc > SOC_WARNING_LEVEL2)
+		val = SOC_DEF_MAX_MIN2_THRLD;
+	else
+		val = SOC_DEF_MAX_MIN3_THRLD;
+
+	max17042_write_reg(chip->client, MAX17042_SALRT_Th, val);
+}
+
+static int max17042_get_batt_health(void)
+{
+	struct max17042_chip *chip = i2c_get_clientdata(max17042_client);
+	int vavg, temp, ret;
+	int stat;
+
+	if (!chip->pdata->valid_battery) {
+		dev_err(&chip->client->dev, "Invalid battery detected");
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
 	}
 
-	return pdata;
+	ret = read_batt_pack_temp(chip, &temp);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"battery pack temp read fail:%d", ret);
+		return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+	}
+	if ((temp <= chip->pdata->temp_min_lim) ||
+			(temp >= chip->pdata->temp_max_lim)) {
+		dev_info(&chip->client->dev,
+			"Battery Over Temp condition Detected:%d\n", temp);
+		return POWER_SUPPLY_HEALTH_OVERHEAT;
+	}
+
+	stat = max17042_read_reg(chip->client, MAX17042_STATUS);
+	if (stat < 0) {
+		dev_err(&chip->client->dev, "error reading status register");
+		return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+	}
+
+	ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
+	if (ret < 0) {
+		dev_err(&chip->client->dev, "Vavg read fail:%d", ret);
+		return POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+	}
+	/* get the voltage to milli volts */
+	vavg = ((ret >> 3) * MAX17042_VOLT_CONV_FCTR) / 1000;
+	if (vavg < chip->pdata->volt_min_lim) {
+		dev_info(&chip->client->dev,
+			"Low Battery condition Detected:%d\n", vavg);
+		return POWER_SUPPLY_HEALTH_DEAD;
+	}
+	if (vavg > chip->pdata->volt_max_lim + VBATT_MAX_OFFSET) {
+		dev_info(&chip->client->dev,
+			"Battery Over Voltage condition Detected:%d\n", vavg);
+		return POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	}
+	if (stat & STATUS_VMX_BIT) {
+		dev_info(&chip->client->dev,
+			"Battery Over Voltage condition Detected:%d\n", vavg);
+		return POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	}
+
+	return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static void max17042_evt_worker(struct work_struct *work)
+{
+	struct max17042_chip *chip = container_of(work,
+			  struct max17042_chip, evt_worker);
+	int status = 0, health;
+
+	pm_runtime_get_sync(&chip->client->dev);
+
+	/* get the battery status */
+	if (chip->pdata->battery_status)
+		status = chip->pdata->battery_status();
+
+	/* get the battery health */
+	if (chip->pdata->battery_health)
+		health = chip->pdata->battery_health();
+	else
+		health = max17042_get_batt_health();
+
+	mutex_lock(&chip->batt_lock);
+	if (chip->pdata->battery_status)
+		chip->status = status;
+	chip->health = health;
+	mutex_unlock(&chip->batt_lock);
+
+	/* Init maxim chip if it is not already initialized */
+	if (!chip->pdata->is_init_done &&
+	!chip->pdata->file_sys_storage_enabled)
+		schedule_work(&chip->init_worker);
+
+	power_supply_changed(&chip->battery);
+	/* If charging is stopped and there is a sudden drop in SOC below
+	 * minimum threshold currently set, we'll not get further interrupts.
+	 * This call to set thresholds, will take care of this scenario.
+	 */
+	if (chip->pdata->soc_intr_mode_enabled)
+		set_soc_intr_thresholds_s0(chip, SOC_INTR_S0_THR);
+	pm_runtime_put_sync(&chip->client->dev);
+}
+
+static void max17042_external_power_changed(struct power_supply *psy)
+{
+	struct max17042_chip *chip = container_of(psy,
+			struct max17042_chip, battery);
+	schedule_work(&chip->evt_worker);
+}
+
+static bool is_battery_online(struct max17042_chip *chip)
+{
+	int val;
+	bool online = false;
+
+	val = max17042_read_reg(chip->client, MAX17042_STATUS);
+	if (val < 0) {
+		dev_info(&chip->client->dev, "i2c read error\n");
+		return online;
+	}
+
+	/* check battery present bit */
+	if (val & STATUS_BST_BIT)
+		online = false;
+	else
+		online = true;
+
+	return online;
+}
+
+static void init_battery_props(struct max17042_chip *chip)
+{
+	chip->present = 1;
+	chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
+	chip->health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	chip->technology = chip->pdata->technology;
+	chip->charge_full_des = BATT_CHRG_FULL_DES;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * max17042_show - debugfs: show the state of an endpoint.
+ * @seq: The seq_file to write data to.
+ * @unused: not used
+ *
+ * This debugfs entry shows the content of the register
+ * given in the data parameter.
+*/
+static int max17042_show(struct seq_file *seq, void *unused)
+{
+	u16 val;
+	long addr;
+
+	if (kstrtol((char *)seq->private, 16, &addr))
+		return -EINVAL;
+
+	val = max17042_read_reg(max17042_client, addr);
+	seq_printf(seq, "%x\n", val);
+
+	return 0;
+}
+
+static int max17042_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, max17042_show, inode->i_private);
+}
+
+static const struct file_operations max17042_dbgfs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= max17042_dbgfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void max17042_create_debugfs(struct max17042_chip *chip)
+{
+	int i;
+	struct dentry *entry;
+
+	max17042_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+	if (IS_ERR(max17042_dbgfs_root)) {
+		dev_warn(&chip->client->dev, "DEBUGFS DIR create failed\n");
+		return ;
+	}
+
+	for (i = 0; i < MAX17042_MAX_MEM; i++) {
+		sprintf((char *)&max17042_dbg_regs[i], "%x", i);
+		entry = debugfs_create_file(
+					(const char *)&max17042_dbg_regs[i],
+					S_IRUGO,
+					max17042_dbgfs_root,
+					&max17042_dbg_regs[i],
+					&max17042_dbgfs_fops);
+		if (IS_ERR(entry)) {
+			debugfs_remove_recursive(max17042_dbgfs_root);
+			max17042_dbgfs_root = NULL;
+			dev_warn(&chip->client->dev,
+					"DEBUGFS entry Create failed\n");
+			return ;
+		}
+	}
+}
+static inline void max17042_remove_debugfs(struct max17042_chip *chip)
+{
+	if (max17042_dbgfs_root)
+		debugfs_remove_recursive(max17042_dbgfs_root);
 }
 #else
-static struct max17042_platform_data *
-max17042_get_pdata(struct device *dev)
+static inline void max17042_create_debugfs(struct max17042_chip *chip)
 {
-	return dev->platform_data;
+}
+static inline void max17042_remove_debugfs(struct max17042_chip *chip)
+{
 }
 #endif
+/**
+ * override_shutdown_methods - sysfs to set disable_shdwn_methods
+ * Parameter as define by sysfs interface
+ * Context: can sleep
+ *
+ */
+static ssize_t override_shutdown_methods(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+	struct max17042_chip *chip = dev_get_drvdata(dev);
+	unsigned long value;
 
+	if (kstrtoul(buf, 10, &value))
+		return -EINVAL;
+
+	if (value > (SHUTDOWN_DEF_FG_MASK_BIT |
+			SHUTDOWN_OCV_MASK_BIT |
+			SHUTDOWN_LOWBATT_MASK_BIT))
+		return -EINVAL;
+
+	chip->disable_shdwn_methods = value;
+	return count;
+}
+
+/**
+ * get_shutdown_methods - sysfs get disable_shdwn_methods
+ * Parameter as define by sysfs interface
+ * Context: can sleep
+ *
+ */
+static ssize_t get_shutdown_methods(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct max17042_chip *chip = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", chip->disable_shdwn_methods);
+}
+
+/**
+ * get_shutdown_voltage_set_by_user - get function for sysfs shutdown_voltage
+ * Parameters as defined by sysfs interface
+ */
+static ssize_t get_shutdown_voltage_set_by_user(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", shutdown_volt);
+}
+
+/**
+ * set_shutdown_voltage - set function for sysfs shutdown_voltage
+ * Parameters as defined by sysfs interface
+ * shutdown_volt can take the values between 3.4V to 4.2V
+ */
+static ssize_t set_shutdown_voltage(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	unsigned long value;
+	if (kstrtoul(buf, 10, &value))
+		return -EINVAL;
+	if ((value < VBATT_MIN) || (value > VBATT_MAX))
+		return -EINVAL;
+	shutdown_volt = value;
+	return count;
+}
+
+/**
+ * set_fake_temp_enable - sysfs to set enable_fake_temp
+ * Parameter as define by sysfs interface
+ */
+static ssize_t set_fake_temp_enable(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+	struct max17042_chip *chip = dev_get_drvdata(dev);
+	unsigned long value;
+
+	if (kstrtoul(buf, 10, &value))
+		return -EINVAL;
+
+	/* allow only 0 or 1 */
+	if (value > 1)
+		return -EINVAL;
+
+	if (value)
+		chip->enable_fake_temp = true;
+	else
+		chip->enable_fake_temp = false;
+
+	return count;
+}
+
+/**
+ * get_fake_temp_enable - sysfs get enable_fake_temp
+ * Parameter as define by sysfs interface
+ * Context: can sleep
+ */
+static ssize_t get_fake_temp_enable(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct max17042_chip *chip = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", chip->enable_fake_temp);
+}
+
+static void configure_interrupts(struct max17042_chip *chip)
+{
+	int ret;
+	unsigned int edge_type;
+	int vmax, vmin, reg_val;
+
+	/* set SOC-alert threshold sholds to lowest value */
+	max17042_write_reg(chip->client, MAX17042_SALRT_Th,
+					SOC_DEF_MAX_MIN3_THRLD);
+
+	/* enable Alerts for SOCRep */
+	if (chip->pdata->enable_current_sense)
+		max17042_write_reg(chip->client, MAX17042_MiscCFG,
+						MISCCFG_CONFIG_REPSOC);
+	else
+		max17042_write_reg(chip->client, MAX17042_MiscCFG,
+						MISCCFG_CONFIG_VFSOC);
+
+	/* disable the T-alert sticky bit */
+	max17042_reg_read_modify(chip->client, MAX17042_CONFIG,
+					CONFIG_TSTICKY_BIT_SET, 0);
+
+	/* Setting V-alrt threshold register to default values */
+	if (chip->pdata->en_vmax_intr) {
+		vmax = chip->pdata->volt_max_lim + VBATT_MAX_OFFSET;
+		vmin = chip->pdata->volt_min_lim - VBATT_MIN_OFFSET;
+		reg_val = ((vmax / VALERT_VOLT_OFFSET) << 8) |
+				(vmin / VALERT_VOLT_OFFSET);
+		max17042_write_reg(chip->client, MAX17042_VALRT_Th, reg_val);
+	} else {
+		max17042_write_reg(chip->client, MAX17042_VALRT_Th,
+					VOLT_DEF_MAX_MIN_THRLD);
+	}
+
+	/* Setting T-alrt threshold register to default values */
+	max17042_write_reg(chip->client, MAX17042_TALRT_Th,
+					TEMP_DEF_MAX_MIN_THRLD);
+
+	/* clear BI bit */
+	max17042_reg_read_modify(chip->client, MAX17042_STATUS,
+						STATUS_BI_BIT, 0);
+	/* clear BR bit */
+	max17042_reg_read_modify(chip->client, MAX17042_STATUS,
+						STATUS_BR_BIT, 0);
+
+	/* get interrupt edge type from ALP pin */
+	if (fg_conf_data->cfg & CONFIG_ALP_BIT_ENBL)
+		edge_type = IRQF_TRIGGER_RISING;
+	else
+		edge_type = IRQF_TRIGGER_FALLING;
+
+	/* register interrupt */
+	ret = request_threaded_irq(chip->client->irq,
+					max17042_intr_handler,
+					max17042_thread_handler,
+					edge_type,
+					DRV_NAME, chip);
+	if (ret) {
+		dev_warn(&chip->client->dev,
+			"cannot get IRQ:%d\n", chip->client->irq);
+		chip->client->irq = -1;
+	} else {
+		dev_info(&chip->client->dev, "IRQ No:%d\n", chip->client->irq);
+	}
+
+	/* enable interrupts */
+	max17042_reg_read_modify(chip->client, MAX17042_CONFIG,
+						CONFIG_ALRT_BIT_ENBL, 1);
+
+	/* set the Interrupt threshold register for soc */
+	if (chip->pdata->soc_intr_mode_enabled)
+		set_soc_intr_thresholds_s0(chip, SOC_INTR_S0_THR);
+
+	/*
+	 * recheckthe battery present status to
+	 * make sure we didn't miss any battery
+	 * removal event and power off if battery
+	 * is removed/unplugged.
+	 */
+	if ((fg_conf_data->cfg & CONFIG_BER_BIT_ENBL) &&
+		!is_battery_online(chip)) {
+		dev_warn(&chip->client->dev, "battery NOT present\n");
+		mutex_lock(&chip->batt_lock);
+		chip->present = 0;
+		mutex_unlock(&chip->batt_lock);
+		kernel_power_off();
+	}
+}
+
+#ifdef CONFIG_ACPI
+extern void *max17042_platform_data(void *info);
+#endif
 static int max17042_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
 	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 	struct max17042_chip *chip;
-	int ret;
-	int reg;
+	int ret, i;
 
-	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+#ifdef CONFIG_ACPI
+	int gpio;
+	struct acpi_gpio_info gpio_info;
+
+	client->dev.platform_data = max17042_platform_data(NULL);
+	gpio = acpi_get_gpio_by_index(&client->dev, 0, &gpio_info);
+	client->irq = gpio_to_irq(gpio);
+	ret = gpio_request_one(gpio, GPIOF_IN, client->name);
+	if (ret < 0) {
+		dev_warn(&client->dev, "gpio request failed.");
 		return -EIO;
-
-	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
-	if (!chip)
-		return -ENOMEM;
-
-	chip->client = client;
-	chip->pdata = max17042_get_pdata(&client->dev);
-	if (!chip->pdata) {
-		dev_err(&client->dev, "no platform data provided\n");
-		return -EINVAL;
 	}
+#endif
+	if (!client->dev.platform_data) {
+		dev_err(&client->dev, "Platform Data is NULL");
+		return -EFAULT;
+	}
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
+		dev_err(&client->dev,
+				"SM bus doesn't support DWORD transactions\n");
+		return -EIO;
+	}
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip) {
+		dev_err(&client->dev, "mem alloc failed\n");
+		return -ENOMEM;
+	}
+
+	fg_conf_data = kzalloc(sizeof(*fg_conf_data), GFP_KERNEL);
+	if (!fg_conf_data) {
+		dev_err(&client->dev, "mem alloc failed\n");
+		kfree(chip);
+		return -ENOMEM;
+	}
+	chip->client = client;
+	chip->pdata = client->dev.platform_data;
+	/* LSB offset for qrtbl registers is 0.25%
+	 * ie, 0x04 = 1% reserved capacity
+	 */
+	chip->extra_resv_cap = 4 * chip->pdata->resv_cap;
+
+	if (chip->pdata->get_vmax_threshold)
+		chip->voltage_max = chip->pdata->get_vmax_threshold();
+	else
+		chip->voltage_max = VBATT_MAX;
+
+	if (chip->pdata->fg_algo_model)
+		chip->model_algo_factor = chip->pdata->fg_algo_model;
+	else
+		chip->model_algo_factor = 100;
 
 	i2c_set_clientdata(client, chip);
+	max17042_client = client;
 
 	ret = max17042_read_reg(chip->client, MAX17042_DevName);
+	if (ret < 0 && chip->pdata->reset_i2c_lines) {
+		dev_warn(&client->dev, "reset i2c device:%d\n", ret);
+		for (i = 0; i < NR_I2C_RESET_CNT; i++) {
+			chip->pdata->reset_i2c_lines();
+			ret = max17042_read_reg(chip->client, MAX17042_DevName);
+			if (ret < 0)
+				dev_warn(&client->dev,
+						"reset i2c device:%d\n", ret);
+			else
+				break;
+		}
+	}
+
 	if (ret == MAX17042_IC_VERSION) {
-		dev_dbg(&client->dev, "chip type max17042 detected\n");
+		dev_info(&client->dev, "chip type max17042 detected\n");
 		chip->chip_type = MAX17042;
-	} else if (ret == MAX17047_IC_VERSION) {
-		dev_dbg(&client->dev, "chip type max17047/50 detected\n");
-		chip->chip_type = MAX17047;
+	} else if (ret == MAX17050_IC_VERSION) {
+		dev_info(&client->dev, "chip type max17047/50 detected\n");
+		chip->chip_type = MAX17050;
 	} else {
 		dev_err(&client->dev, "device version mismatch: %x\n", ret);
+		kfree(chip);
+		kfree(fg_conf_data);
 		return -EIO;
 	}
 
-	chip->battery.name		= "max170xx_battery";
+	/* init battery properties */
+	init_battery_props(chip);
+	INIT_WORK(&chip->init_worker, max17042_init_worker);
+	INIT_WORK(&chip->evt_worker, max17042_evt_worker);
+	INIT_DEFERRABLE_WORK(&chip->temp_worker, max17042_temp_worker);
+
+	mutex_init(&chip->batt_lock);
+	mutex_init(&chip->init_lock);
+
+	/* disable the Alert pin before setting thresholds */
+	max17042_reg_read_modify(client, MAX17042_CONFIG,
+						CONFIG_ALRT_BIT_ENBL, 0);
+
+	if (chip->pdata->enable_current_sense) {
+		dev_info(&chip->client->dev, "current sensing enabled\n");
+		/* Initialize the chip with battery config data */
+		max17042_restore_conf_data(chip);
+	} else {
+
+		dev_info(&chip->client->dev, "current sensing NOT enabled\n");
+		/* incase of invalid battery no need to init the FG chip */
+		chip->pdata->is_init_done = 1;
+		/* disable coulomb counter based fuel gauging */
+		max17042_write_reg(chip->client, MAX17042_CGAIN,
+						MAX17042_CGAIN_DISABLE);
+		/* Enable voltage based Fuel Gauging */
+		max17042_write_reg(chip->client, MAX17042_LearnCFG,
+						MAX17042_EN_VOLT_FG);
+		/* configure interrupts for SOCvf */
+		max17042_write_reg(chip->client, MAX17042_MiscCFG,
+						MAX17042_CFG_INTR_SOCVF);
+	}
+
+	chip->technology = chip->pdata->technology;
+
+	if (chip->chip_type == MAX17042)
+		chip->battery.name = "max17042_battery";
+	else
+		chip->battery.name = "max17047_battery";
 	chip->battery.type		= POWER_SUPPLY_TYPE_BATTERY;
 	chip->battery.get_property	= max17042_get_property;
+	chip->battery.set_property	= max17042_set_property;
+	chip->battery.property_is_privileged_read =
+					max17042_property_is_privileged_read;
+	chip->battery.external_power_changed = max17042_external_power_changed;
 	chip->battery.properties	= max17042_battery_props;
 	chip->battery.num_properties	= ARRAY_SIZE(max17042_battery_props);
 
-	/* When current is not measured,
-	 * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
-	if (!chip->pdata->enable_current_sense)
-		chip->battery.num_properties -= 2;
-
-	if (chip->pdata->r_sns == 0)
-		chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
-
-	if (chip->pdata->init_data)
-		max17042_set_reg(client, chip->pdata->init_data,
-				chip->pdata->num_init_data);
-
-	if (!chip->pdata->enable_current_sense) {
-		max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
-		max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
-		max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
-	}
-
 	ret = power_supply_register(&client->dev, &chip->battery);
 	if (ret) {
 		dev_err(&client->dev, "failed: power supply register\n");
+		kfree(chip);
+		kfree(fg_conf_data);
 		return ret;
 	}
 
-	if (client->irq) {
-		ret = request_threaded_irq(client->irq, NULL,
-						max17042_thread_handler,
-						IRQF_TRIGGER_FALLING,
-						chip->battery.name, chip);
-		if (!ret) {
-			reg =  max17042_read_reg(client, MAX17042_CONFIG);
-			reg |= CONFIG_ALRT_BIT_ENBL;
-			max17042_write_reg(client, MAX17042_CONFIG, reg);
-			max17042_set_soc_threshold(chip, 1);
-		} else {
-			client->irq = 0;
-			dev_err(&client->dev, "%s(): cannot get IRQ\n",
-				__func__);
-		}
-	}
+	/* Init Runtime PM State */
+	pm_runtime_put_noidle(&chip->client->dev);
+	pm_schedule_suspend(&chip->client->dev, MSEC_PER_SEC);
 
-	reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-	if (reg & STATUS_POR_BIT) {
-		INIT_WORK(&chip->work, max17042_init_worker);
-		schedule_work(&chip->work);
-	} else {
-		chip->init_complete = 1;
-	}
+	/* In case of power supply register INT now
+	 * else the INT will registered after chip init.
+	 */
+	if (!chip->pdata->enable_current_sense)
+		configure_interrupts(chip);
+
+	if (chip->pdata->file_sys_storage_enabled)
+		misc_register(&fg_helper);
+
+	/* Create debugfs for maxim registers */
+	max17042_create_debugfs(chip);
+
+	/* create sysfs file to disable shutdown methods */
+	ret = device_create_file(&client->dev,
+			&dev_attr_disable_shutdown_methods);
+	if (ret)
+		dev_warn(&client->dev, "cannot create sysfs entry\n");
+
+	/* create sysfs file to enter shutdown voltage */
+	ret = device_create_file(&client->dev,
+			&dev_attr_shutdown_voltage);
+	if (ret)
+		dev_warn(&client->dev, "cannot create sysfs entry\n");
+
+	/* create sysfs file to enable fake battery temperature */
+	ret = device_create_file(&client->dev,
+			&dev_attr_enable_fake_temp);
+	if (ret)
+		dev_warn(&client->dev, "cannot create sysfs entry\n");
+
+	/* Register reboot notifier callback */
+	if (!chip->pdata->file_sys_storage_enabled)
+		register_reboot_notifier(&max17042_reboot_notifier_block);
+	schedule_work(&chip->evt_worker);
 
 	return 0;
 }
@@ -780,9 +2307,20 @@
 {
 	struct max17042_chip *chip = i2c_get_clientdata(client);
 
-	if (client->irq)
+	if (chip->pdata->file_sys_storage_enabled)
+		misc_deregister(&fg_helper);
+	else
+		unregister_reboot_notifier(&max17042_reboot_notifier_block);
+	device_remove_file(&client->dev, &dev_attr_disable_shutdown_methods);
+	device_remove_file(&client->dev, &dev_attr_shutdown_voltage);
+	device_remove_file(&client->dev, &dev_attr_enable_fake_temp);
+	max17042_remove_debugfs(chip);
+	if (client->irq > 0)
 		free_irq(client->irq, chip);
 	power_supply_unregister(&chip->battery);
+	pm_runtime_get_noresume(&chip->client->dev);
+	kfree(chip);
+	kfree(fg_conf_data);
 	return 0;
 }
 
@@ -792,13 +2330,31 @@
 	struct max17042_chip *chip = dev_get_drvdata(dev);
 
 	/*
-	 * disable the irq and enable irq_wake
-	 * capability to the interrupt line.
+	 * disable irq here doesn't mean max17042 interrupt
+	 * can't wake up system. max17042 interrupt is triggered
+	 * by GPIO pin, which is always active.
+	 * When resume callback calls enable_irq, kernel
+	 * would deliver the buffered interrupt (if it has) to
+	 * driver.
 	 */
-	if (chip->client->irq) {
+	if (chip->client->irq > 0) {
+		/* set SOC alert thresholds */
+		set_soc_intr_thresholds_s3(chip);
+		/* setting Vmin(3300mV) threshold to wake the
+		 * platfrom in under low battery conditions */
+		max17042_write_reg(chip->client, MAX17042_VALRT_Th,
+					VOLT_MIN_THRLD_ENBL);
 		disable_irq(chip->client->irq);
 		enable_irq_wake(chip->client->irq);
 	}
+	if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL)
+		cancel_delayed_work_sync(&chip->temp_worker);
+
+	/* max17042 IC automatically goes into shutdown mode
+	 * if the SCL and SDA were held low for more than
+	 * timeout of SHDNTIMER register value
+	 */
+	dev_dbg(&chip->client->dev, "max17042 suspend\n");
 
 	return 0;
 }
@@ -806,56 +2362,156 @@
 static int max17042_resume(struct device *dev)
 {
 	struct max17042_chip *chip = dev_get_drvdata(dev);
+	int vmax, vmin, reg_val;
 
-	if (chip->client->irq) {
-		disable_irq_wake(chip->client->irq);
+	if (chip->client->irq > 0) {
+		/* Setting V-alrt threshold register to default values */
+		if (chip->pdata->en_vmax_intr) {
+			vmax = chip->pdata->volt_max_lim +
+					VBATT_MAX_OFFSET;
+			vmin = chip->pdata->volt_min_lim -
+					VBATT_MIN_OFFSET;
+			reg_val = ((vmax / VALERT_VOLT_OFFSET) << 8) |
+					(vmin / VALERT_VOLT_OFFSET);
+			max17042_write_reg(chip->client, MAX17042_VALRT_Th,
+						reg_val);
+		} else {
+			max17042_write_reg(chip->client, MAX17042_VALRT_Th,
+					VOLT_DEF_MAX_MIN_THRLD);
+		}
+		/* set SOC-alert threshold sholds to lowest value */
+		max17042_write_reg(chip->client, MAX17042_SALRT_Th,
+					SOC_DEF_MAX_MIN3_THRLD);
 		enable_irq(chip->client->irq);
-		/* re-program the SOC thresholds to 1% change */
-		max17042_set_soc_threshold(chip, 1);
+		disable_irq_wake(chip->client->irq);
 	}
+	/* update battery status and health */
+	schedule_work(&chip->evt_worker);
+	if (fg_conf_data->cfg & CONFIG_TEX_BIT_ENBL)
+		schedule_delayed_work(&chip->temp_worker, 0);
+
+	/* max17042 IC automatically wakes up if any edge
+	 * on SDCl or SDA if we set I2CSH of CONFG reg
+	 */
+	dev_dbg(&chip->client->dev, "max17042 resume\n");
 
 	return 0;
 }
-
-static const struct dev_pm_ops max17042_pm_ops = {
-	.suspend	= max17042_suspend,
-	.resume		= max17042_resume,
-};
-
-#define MAX17042_PM_OPS (&max17042_pm_ops)
 #else
-#define MAX17042_PM_OPS NULL
+#define max17042_suspend NULL
+#define max17042_resume NULL
 #endif
 
-#ifdef CONFIG_OF
-static const struct of_device_id max17042_dt_match[] = {
-	{ .compatible = "maxim,max17042" },
-	{ .compatible = "maxim,max17047" },
-	{ .compatible = "maxim,max17050" },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, max17042_dt_match);
+#ifdef CONFIG_PM_RUNTIME
+static int max17042_runtime_suspend(struct device *dev)
+{
+
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int max17042_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int max17042_runtime_idle(struct device *dev)
+{
+
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+#else
+#define max17042_runtime_suspend	NULL
+#define max17042_runtime_resume		NULL
+#define max17042_runtime_idle		NULL
 #endif
 
 static const struct i2c_device_id max17042_id[] = {
 	{ "max17042", 0 },
 	{ "max17047", 1 },
 	{ "max17050", 2 },
-	{ }
+	{ "MAX17042", 0 },
+	{ "MAX17047", 1 },
+	{ "MAX17050", 2 },
+	{ },
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
 
+static const struct dev_pm_ops max17042_pm_ops = {
+	.suspend		= max17042_suspend,
+	.resume			= max17042_resume,
+	.runtime_suspend	= max17042_runtime_suspend,
+	.runtime_resume		= max17042_runtime_resume,
+	.runtime_idle		= max17042_runtime_idle,
+};
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id max17042_acpi_match[] = {
+	{"MAX17047", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, max17042_acpi_match);
+
+#endif
+
 static struct i2c_driver max17042_i2c_driver = {
 	.driver	= {
-		.name	= "max17042",
-		.of_match_table = of_match_ptr(max17042_dt_match),
-		.pm	= MAX17042_PM_OPS,
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &max17042_pm_ops,
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(max17042_acpi_match),
+#endif
 	},
 	.probe		= max17042_probe,
 	.remove		= max17042_remove,
 	.id_table	= max17042_id,
 };
-module_i2c_driver(max17042_i2c_driver);
+
+static int max17042_reboot_callback(struct notifier_block *nfb,
+					unsigned long event, void *data)
+{
+	struct max17042_chip *chip = i2c_get_clientdata(max17042_client);
+
+	if (chip->pdata->enable_current_sense)
+		save_runtime_params(chip);
+
+	/* if the shutdown or reboot sequence started
+	 * then block the access to maxim registers as chip
+	 * cannot be recovered from broken i2c transactions
+	 */
+	mutex_lock(&chip->batt_lock);
+	chip->plat_rebooting = true;
+	mutex_unlock(&chip->batt_lock);
+
+	return NOTIFY_OK;
+}
+
+static int __init max17042_init(void)
+{
+	return i2c_add_driver(&max17042_i2c_driver);
+}
+#ifdef CONFIG_ACPI
+late_initcall(max17042_init);
+#else
+module_init(max17042_init);
+#endif
+
+static void __exit max17042_exit(void)
+{
+	i2c_del_driver(&max17042_i2c_driver);
+}
+module_exit(max17042_exit);
+
+int __init set_fake_batt_full(char *p)
+{
+	fake_batt_full = true;
+	return 0;
+}
+
+early_param("fake_batt_full", set_fake_batt_full);
 
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
index cc439fd..d2b3a53 100644
--- a/drivers/power/power_supply.h
+++ b/drivers/power/power_supply.h
@@ -40,3 +40,24 @@
 static inline void power_supply_remove_triggers(struct power_supply *psy) {}
 
 #endif /* CONFIG_LEDS_TRIGGERS */
+#ifdef CONFIG_POWER_SUPPLY_CHARGER
+
+extern void power_supply_trigger_charging_handler(struct power_supply *psy);
+extern int power_supply_register_charger(struct power_supply *psy);
+extern int power_supply_unregister_charger(struct power_supply *psy);
+extern int psy_charger_throttle_charger(struct power_supply *psy,
+					unsigned long state);
+
+#else
+
+static inline void
+	power_supply_trigger_charging_handler(struct power_supply *psy) { }
+static inline int power_supply_register_charger(struct power_supply *psy)
+{ return 0; }
+static inline int power_supply_unregister_charger(struct power_supply *psy)
+{ return 0; }
+static inline int psy_charger_throttle_charger(struct power_supply *psy,
+					unsigned long state)
+{ return 0; }
+
+#endif
diff --git a/drivers/power/power_supply_charger.c b/drivers/power/power_supply_charger.c
new file mode 100644
index 0000000..508d628
--- /dev/null
+++ b/drivers/power/power_supply_charger.c
@@ -0,0 +1,1143 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/extcon.h>
+#include <linux/power/battery_id.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg.h>
+#include "power_supply.h"
+#include "power_supply_charger.h"
+
+struct work_struct notifier_work;
+#define MAX_CHARGER_COUNT 5
+
+static LIST_HEAD(algo_list);
+
+struct power_supply_charger {
+	bool is_cable_evt_reg;
+	/*cache battery and charger properties */
+	struct list_head chrgr_cache_lst;
+	struct list_head batt_cache_lst;
+	struct list_head evt_queue;
+	struct work_struct algo_trigger_work;
+	struct mutex evt_lock;
+	wait_queue_head_t wait_chrg_enable;
+};
+
+struct charger_cable {
+	struct work_struct work;
+	struct notifier_block nb;
+	struct extcon_chrgr_cbl_props cable_props;
+	enum extcon_cable_name extcon_cable_type;
+	enum power_supply_charger_cable_type psy_cable_type;
+	struct extcon_specific_cable_nb extcon_dev;
+	struct extcon_dev *edev;
+};
+
+static struct power_supply_charger psy_chrgr;
+
+static struct charger_cable cable_list[] = {
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP,
+	 .extcon_cable_type = EXTCON_SDP,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP,
+	 .extcon_cable_type = EXTCON_CDP,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP,
+	 .extcon_cable_type = EXTCON_DCP,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_ACA,
+	 .extcon_cable_type = EXTCON_ACA,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK,
+	 .extcon_cable_type = EXTCON_ACA,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_SE1,
+	 .extcon_cable_type = EXTCON_TA,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_AC,
+	 .extcon_cable_type = EXTCON_AC,
+	 },
+};
+
+static int get_supplied_by_list(struct power_supply *psy,
+				struct power_supply *psy_lst[]);
+
+static int handle_cable_notification(struct notifier_block *nb,
+				   unsigned long event, void *data);
+struct usb_phy *otg_xceiver;
+struct notifier_block nb = {
+		   .notifier_call = handle_cable_notification,
+		};
+static void configure_chrgr_source(struct charger_cable *cable_lst);
+
+struct charger_cable *get_cable(unsigned long usb_chrgr_type)
+{
+
+	switch (usb_chrgr_type) {
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+		return &cable_list[0];
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		return &cable_list[1];
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+		return &cable_list[2];
+	case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+		return &cable_list[3];
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+		return &cable_list[4];
+	case POWER_SUPPLY_CHARGER_TYPE_AC:
+		return &cable_list[6];
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		return &cable_list[5];
+	}
+
+	return NULL;
+}
+
+
+static void notifier_event_worker(struct work_struct *work)
+{
+	configure_chrgr_source(cable_list);
+}
+
+static int process_cable_props(struct power_supply_cable_props *cap)
+{
+
+	struct charger_cable *cable = NULL;
+
+	pr_info("%s: event:%d, type:%d, ma:%d\n",
+		__func__, cap->chrg_evt, cap->chrg_type, cap->ma);
+
+	cable = get_cable(cap->chrg_type);
+	if (!cable) {
+
+		pr_err("%s:Error in getting charger cable\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (cap->chrg_evt) {
+	case POWER_SUPPLY_CHARGER_EVENT_CONNECT:
+	case POWER_SUPPLY_CHARGER_EVENT_RESUME:
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_CONNECTED;
+		break;
+	case POWER_SUPPLY_CHARGER_EVENT_UPDATE:
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_UPDATED;
+		break;
+	case POWER_SUPPLY_CHARGER_EVENT_DISCONNECT:
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_DISCONNECTED;
+		break;
+	case POWER_SUPPLY_CHARGER_EVENT_SUSPEND:
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_SUSPENDED;
+		break;
+	default:
+		pr_err("%s:Invalid cable event\n", __func__);
+		return -EINVAL;
+	}
+
+	cable->cable_props.ma = cap->ma;
+	schedule_work(&notifier_work);
+
+	return 0;
+
+}
+
+static int handle_cable_notification(struct notifier_block *nb,
+				   unsigned long event, void *data)
+{
+	struct power_supply_cable_props cap;
+
+	memcpy(&cap, data, sizeof(struct power_supply_cable_props));
+
+	if (event != USB_EVENT_CHARGER && event != POWER_SUPPLY_CABLE_EVENT)
+		return NOTIFY_DONE;
+
+	process_cable_props(&cap);
+
+	return NOTIFY_OK;
+}
+
+static int register_notifier(void)
+{
+	int retval;
+
+	otg_xceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!otg_xceiver) {
+		pr_err("failure to get otg transceiver\n");
+		retval = -EIO;
+		goto notifier_reg_failed;
+	}
+	retval = usb_register_notifier(otg_xceiver, &nb);
+	if (retval) {
+		pr_err("failure to register otg notifier\n");
+		goto notifier_reg_failed;
+	}
+
+	retval = power_supply_reg_notifier(&nb);
+	if (retval) {
+		pr_err("failure to register power_supply notifier\n");
+		goto notifier_reg_failed;
+
+	}
+
+	INIT_WORK(&notifier_work, notifier_event_worker);
+
+	return 0;
+
+notifier_reg_failed:
+	return retval;
+}
+
+static int charger_cable_notifier(struct notifier_block *nb,
+				  unsigned long event, void *ptr);
+static void charger_cable_event_worker(struct work_struct *work);
+struct charging_algo *power_supply_get_charging_algo
+		(struct power_supply *, struct ps_batt_chg_prof *);
+
+static void init_charger_cables(struct charger_cable *cable_lst, int count)
+{
+	struct charger_cable *cable;
+	struct extcon_chrgr_cbl_props cable_props;
+	const char *cable_name;
+	struct power_supply_cable_props cap;
+
+	register_notifier();
+
+	while (--count) {
+		cable = cable_lst++;
+		/* initialize cable instance */
+		INIT_WORK(&cable->work, charger_cable_event_worker);
+		cable->nb.notifier_call = charger_cable_notifier;
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_DISCONNECTED;
+		cable->cable_props.ma = 0;
+		cable_name = extcon_cable_name[cable->extcon_cable_type];
+
+		if (extcon_register_interest(&cable->extcon_dev,
+				NULL, cable_name, &cable->nb))
+				continue;
+
+		cable->edev = cable->extcon_dev.edev;
+
+		if (!cable->edev)
+			continue;
+
+		if (cable->edev->get_cable_properties(cable_name,
+						      (void *)&cable_props)) {
+			continue;
+
+		} else if (cable_props.cable_stat !=
+			   cable->cable_props.cable_stat) {
+			cable->cable_props.cable_stat = cable_props.cable_stat;
+			cable->cable_props.ma = cable_props.ma;
+		}
+	}
+
+	if (!otg_get_chrg_status(otg_xceiver, &cap))
+		process_cable_props(&cap);
+
+}
+
+static inline int is_charging_can_be_enabled(struct power_supply *psy)
+{
+	int health;
+
+	health = HEALTH(psy);
+	if (IS_BATTERY(psy)) {
+		return (health == POWER_SUPPLY_HEALTH_GOOD) ||
+				(health == POWER_SUPPLY_HEALTH_DEAD);
+	} else {
+		return
+	((CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGER) &&
+	(CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGING) &&
+	(INLMT(psy) >= 100) && (health == POWER_SUPPLY_HEALTH_GOOD));
+	}
+}
+
+static inline void get_cur_chrgr_prop(struct power_supply *psy,
+				      struct charger_props *chrgr_prop)
+{
+	chrgr_prop->is_charging = IS_CHARGING_ENABLED(psy);
+	chrgr_prop->name = psy->name;
+	chrgr_prop->online = IS_ONLINE(psy);
+	chrgr_prop->present = IS_PRESENT(psy);
+	chrgr_prop->cable = CABLE_TYPE(psy);
+	chrgr_prop->health = HEALTH(psy);
+	chrgr_prop->tstamp = get_jiffies_64();
+
+}
+
+static inline int get_chrgr_prop_cache(struct power_supply *psy,
+				       struct charger_props *chrgr_cache)
+{
+
+	struct charger_props *chrgr_prop;
+	int ret = -ENODEV;
+
+	list_for_each_entry(chrgr_prop, &psy_chrgr.chrgr_cache_lst, node) {
+		if (!strcmp(chrgr_prop->name, psy->name)) {
+			memcpy(chrgr_cache, chrgr_prop, sizeof(*chrgr_cache));
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void dump_charger_props(struct charger_props *props)
+{
+	pr_devel("%s:name=%s present=%d is_charging=%d health=%d online=%d cable=%lu tstamp=%lu\n",
+		__func__, props->name, props->present, props->is_charging,
+		props->health, props->online, props->cable, props->tstamp);
+}
+
+static void dump_battery_props(struct batt_props *props)
+{
+	pr_devel("%s:name=%s voltage_now=%ld current_now=%ld temperature=%d status=%ld health=%d tstamp=%lu algo_stat=%d ",
+		__func__, props->name, props->voltage_now, props->current_now,
+		props->temperature, props->status, props->health,
+		props->tstamp, props->algo_stat);
+}
+
+static inline void cache_chrgr_prop(struct charger_props *chrgr_prop_new)
+{
+
+	struct charger_props *chrgr_cache;
+
+	list_for_each_entry(chrgr_cache, &psy_chrgr.chrgr_cache_lst, node) {
+		if (!strcmp(chrgr_cache->name, chrgr_prop_new->name))
+			goto update_props;
+	}
+
+	chrgr_cache = kzalloc(sizeof(*chrgr_cache), GFP_KERNEL);
+	if (chrgr_cache == NULL) {
+		pr_err("%s:Error in allocating memory\n", __func__);
+		return;
+	}
+
+	INIT_LIST_HEAD(&chrgr_cache->node);
+	list_add_tail(&chrgr_cache->node, &psy_chrgr.chrgr_cache_lst);
+
+	chrgr_cache->name = chrgr_prop_new->name;
+
+update_props:
+	chrgr_cache->is_charging = chrgr_prop_new->is_charging;
+	chrgr_cache->online = chrgr_prop_new->online;
+	chrgr_cache->health = chrgr_prop_new->health;
+	chrgr_cache->present = chrgr_prop_new->present;
+	chrgr_cache->cable = chrgr_prop_new->cable;
+	chrgr_cache->tstamp = chrgr_prop_new->tstamp;
+}
+
+
+static inline bool is_chrgr_prop_changed(struct power_supply *psy)
+{
+	struct charger_props chrgr_prop_cache, chrgr_prop;
+
+	get_cur_chrgr_prop(psy, &chrgr_prop);
+	/* Get cached battery property. If no cached property available
+	 *  then cache the new property and return true
+	 */
+	if (get_chrgr_prop_cache(psy, &chrgr_prop_cache)) {
+		cache_chrgr_prop(&chrgr_prop);
+		return true;
+	}
+
+	dump_charger_props(&chrgr_prop);
+	dump_charger_props(&chrgr_prop_cache);
+
+	if (!IS_CHARGER_PROP_CHANGED(chrgr_prop, chrgr_prop_cache))
+		return false;
+
+	cache_chrgr_prop(&chrgr_prop);
+	return true;
+}
+static void cache_successive_samples(long *sample_array, long new_sample)
+{
+	int i;
+
+	for (i = 0; i < MAX_CUR_VOLT_SAMPLES - 1; ++i)
+		*(sample_array + i) = *(sample_array + i + 1);
+
+	*(sample_array + i) = new_sample;
+}
+
+static inline void cache_bat_prop(struct batt_props *bat_prop_new, bool force)
+{
+
+	struct batt_props *bat_cache;
+
+	/* Find entry in cache list. If an entry is located update
+	 * the existing entry else create new entry in the list */
+	list_for_each_entry(bat_cache, &psy_chrgr.batt_cache_lst, node) {
+		if (!strcmp(bat_cache->name, bat_prop_new->name))
+			goto update_props;
+	}
+
+	bat_cache = kzalloc(sizeof(*bat_cache), GFP_KERNEL);
+	if (bat_cache == NULL) {
+		pr_err("%s:Error in allocating memory\n", __func__);
+		return;
+	}
+	INIT_LIST_HEAD(&bat_cache->node);
+	list_add_tail(&bat_cache->node, &psy_chrgr.batt_cache_lst);
+
+	bat_cache->name = bat_prop_new->name;
+
+update_props:
+	if (time_after(bat_prop_new->tstamp,
+		(bat_cache->tstamp + DEF_CUR_VOLT_SAMPLE_JIFF)) || force ||
+						bat_cache->tstamp == 0) {
+		cache_successive_samples(bat_cache->voltage_now_cache,
+						bat_prop_new->voltage_now);
+		cache_successive_samples(bat_cache->current_now_cache,
+						bat_prop_new->current_now);
+		bat_cache->tstamp = bat_prop_new->tstamp;
+	}
+
+	bat_cache->voltage_now = bat_prop_new->voltage_now;
+	bat_cache->current_now = bat_prop_new->current_now;
+	bat_cache->health = bat_prop_new->health;
+
+	bat_cache->temperature = bat_prop_new->temperature;
+	bat_cache->status = bat_prop_new->status;
+	bat_cache->algo_stat = bat_prop_new->algo_stat;
+	bat_cache->throttle_state = bat_prop_new->throttle_state;
+}
+
+static inline int get_bat_prop_cache(struct power_supply *psy,
+				     struct batt_props *bat_cache)
+{
+	struct batt_props *bat_prop;
+	int ret = -ENODEV;
+
+	list_for_each_entry(bat_prop, &psy_chrgr.batt_cache_lst, node) {
+		if (!strcmp(bat_prop->name, psy->name)) {
+			memcpy(bat_cache, bat_prop, sizeof(*bat_cache));
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static inline void get_cur_bat_prop(struct power_supply *psy,
+				    struct batt_props *bat_prop)
+{
+	struct batt_props bat_prop_cache;
+	int ret;
+
+	bat_prop->name = psy->name;
+	bat_prop->voltage_now = VOLTAGE_OCV(psy) / 1000;
+	bat_prop->current_now = CURRENT_NOW(psy) / 1000;
+	bat_prop->temperature = TEMPERATURE(psy) / 10;
+	bat_prop->status = STATUS(psy);
+	bat_prop->health = HEALTH(psy);
+	bat_prop->tstamp = get_jiffies_64();
+	bat_prop->throttle_state = CURRENT_THROTTLE_STATE(psy);
+
+	/* Populate cached algo data to new profile */
+	ret = get_bat_prop_cache(psy, &bat_prop_cache);
+	if (!ret)
+		bat_prop->algo_stat = bat_prop_cache.algo_stat;
+}
+
+static inline bool is_batt_prop_changed(struct power_supply *psy)
+{
+	struct batt_props bat_prop_cache, bat_prop;
+
+	/* Get cached battery property. If no cached property available
+	 *  then cache the new property and return true
+	 */
+	get_cur_bat_prop(psy, &bat_prop);
+	if (get_bat_prop_cache(psy, &bat_prop_cache)) {
+		cache_bat_prop(&bat_prop, false);
+		return true;
+	}
+
+	dump_battery_props(&bat_prop);
+	dump_battery_props(&bat_prop_cache);
+
+	if (!IS_BAT_PROP_CHANGED(bat_prop, bat_prop_cache))
+		return false;
+
+	cache_bat_prop(&bat_prop, false);
+	return true;
+}
+
+static inline bool is_supplied_to_has_ext_pwr_changed(struct power_supply *psy)
+{
+	int i;
+	struct power_supply *psb;
+	bool is_pwr_changed_defined = true;
+
+	for (i = 0; i < psy->num_supplicants; i++) {
+		psb =
+		    power_supply_get_by_name(psy->
+					     supplied_to[i]);
+		if (psb && !psb->external_power_changed)
+			is_pwr_changed_defined &= false;
+	}
+
+	return is_pwr_changed_defined;
+
+}
+
+static inline bool is_supplied_by_changed(struct power_supply *psy)
+{
+	int cnt;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+	while (cnt--) {
+		if ((IS_CHARGER(chrgr_lst[cnt])) &&
+			is_chrgr_prop_changed(chrgr_lst[cnt]))
+			return true;
+	}
+
+	return false;
+}
+
+static inline bool is_trigger_charging_algo(struct power_supply *psy)
+{
+	/* trigger charging alorithm if battery or
+	 * charger properties are changed. Also no need to
+	 * invoke algorithm for power_supply_changed from
+	 * charger, if all supplied_to has the ext_port_changed defined.
+	 * On invoking the ext_port_changed the supplied to can send
+	 * power_supplied_changed event.
+	 */
+
+	if ((IS_CHARGER(psy) && !is_supplied_to_has_ext_pwr_changed(psy)) &&
+			is_chrgr_prop_changed(psy))
+		return true;
+
+	if ((IS_BATTERY(psy)) && (is_batt_prop_changed(psy) ||
+				is_supplied_by_changed(psy)))
+		return true;
+
+	return false;
+}
+
+static int get_supplied_by_list(struct power_supply *psy,
+				struct power_supply *psy_lst[])
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	struct power_supply *pst;
+	int cnt = 0, i, j;
+
+	if (!IS_BATTERY(psy))
+		return 0;
+
+	/* Identify chargers which are supplying power to the battery */
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (!IS_CHARGER(pst))
+			continue;
+		for (i = 0; i < pst->num_supplicants; i++) {
+			if (!strcmp(pst->supplied_to[i], psy->name))
+				psy_lst[cnt++] = pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	if (cnt <= 1)
+		return cnt;
+
+	/*sort based on priority. 0 has the highest priority  */
+	for (i = 0; i < cnt; ++i)
+		for (j = 0; j < cnt; ++j)
+			if (PRIORITY(psy_lst[j]) > PRIORITY(psy_lst[i]))
+				swap(psy_lst[j], psy_lst[i]);
+
+	return cnt;
+}
+
+static int get_battery_status(struct power_supply *psy)
+{
+	int cnt, status, ret;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+	struct batt_props bat_prop;
+	int health;
+
+	if (!IS_BATTERY(psy))
+		return -EINVAL;
+
+	ret = get_bat_prop_cache(psy, &bat_prop);
+	if (ret)
+		return ret;
+
+	status = POWER_SUPPLY_STATUS_DISCHARGING;
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+
+
+	while (cnt--) {
+		if (IS_PRESENT(chrgr_lst[cnt]))
+			status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+		if (is_charging_can_be_enabled(chrgr_lst[cnt]) &&
+				(IS_HEALTH_GOOD(chrgr_lst[cnt]))) {
+			health = HEALTH(psy);
+			if ((health == POWER_SUPPLY_HEALTH_GOOD) ||
+				(health == POWER_SUPPLY_HEALTH_DEAD)) {
+				/* do charging with Good / Dead battery */
+				if ((bat_prop.algo_stat ==
+							PSY_ALGO_STAT_FULL) ||
+					(bat_prop.algo_stat ==
+							PSY_ALGO_STAT_MAINT))
+					status = POWER_SUPPLY_STATUS_FULL;
+				else if (IS_CHARGING_ENABLED(chrgr_lst[cnt]))
+					status = POWER_SUPPLY_STATUS_CHARGING;
+			}
+		}
+	}
+	pr_devel("%s: Set status=%d for %s\n", __func__, status, psy->name);
+
+	return status;
+}
+
+static void update_charger_online(struct power_supply *psy)
+{
+	if (IS_CHARGER_ENABLED(psy))
+		set_charger_online(psy, 1);
+	else
+		set_charger_online(psy, 0);
+}
+
+static inline void cache_cur_batt_prop_force(struct power_supply *psb)
+{
+	struct batt_props bat_prop;
+
+	if (!IS_BATTERY(psb))
+		return;
+
+	get_cur_bat_prop(psb, &bat_prop);
+	cache_bat_prop(&bat_prop, true);
+}
+
+static void update_sysfs(struct power_supply *psy)
+{
+	int i, cnt;
+	struct power_supply *psb;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+
+	if (IS_BATTERY(psy)) {
+		/* set charger online */
+		cnt = get_supplied_by_list(psy, chrgr_lst);
+		while (cnt--) {
+			if (!IS_PRESENT(chrgr_lst[cnt]))
+				continue;
+
+			update_charger_online(psy);
+		}
+		/* set battery status */
+		if (set_battery_status(psy, get_battery_status(psy)))
+			/* forcefully cache the battery properties */
+			cache_cur_batt_prop_force(psy);
+	} else {
+		/*set charger online */
+		update_charger_online(psy);
+		/*set battery status */
+		for (i = 0; i < psy->num_supplicants; i++) {
+			psb =
+			    power_supply_get_by_name(psy->
+						     supplied_to[i]);
+			if (psb && IS_BATTERY(psb) && IS_PRESENT(psb))
+				if (set_battery_status(psb,
+					get_battery_status(psb)))
+					/*
+					 * forcefully cache the battery
+					 * properties
+					 */
+					cache_cur_batt_prop_force(psy);
+		}
+	}
+}
+
+static int trigger_algo(struct power_supply *psy)
+{
+	unsigned long cc = 0, cv = 0, cc_min;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+	struct batt_props bat_prop;
+	struct charging_algo *algo;
+	struct ps_batt_chg_prof chrg_profile;
+	int cnt;
+
+	if (psy->type != POWER_SUPPLY_TYPE_BATTERY)
+		return 0;
+
+	if (get_batt_prop(&chrg_profile)) {
+		pr_err("%s:Error in getting charge profile\n", __func__);
+		return -EINVAL;
+	}
+
+
+	get_bat_prop_cache(psy, &bat_prop);
+
+	algo = power_supply_get_charging_algo(psy, &chrg_profile);
+	if (!algo) {
+		pr_err("%s:Error in getting charging algo!!\n", __func__);
+		return -EINVAL;
+	}
+
+	bat_prop.algo_stat = algo->get_next_cc_cv(bat_prop,
+						chrg_profile, &cc, &cv);
+
+	pr_info("%s:Algo_status:%d\n", __func__, bat_prop.algo_stat);
+
+	cache_bat_prop(&bat_prop, false);
+
+	if (!cc || !cv)
+		return -ENODATA;
+
+	/* CC needs to be updated for all chargers which are supplying
+	 *  power to this battery to ensure that the sum of CCs of all
+	 * chargers are never more than the CC selected by the algo.
+	 * The CC is set based on the charger priority.
+	 */
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+
+	while (cnt--) {
+		if (!IS_PRESENT(chrgr_lst[cnt]))
+			continue;
+
+		cc_min = min_t(unsigned long, MAX_CC(chrgr_lst[cnt]), cc);
+		if (cc_min < 0)
+			cc_min = 0;
+		cc -= cc_min;
+		set_cc(chrgr_lst[cnt], cc_min);
+		set_cv(chrgr_lst[cnt], cv);
+	}
+
+	if ((bat_prop.algo_stat == PSY_ALGO_STAT_NOT_CHARGE) ||
+		(bat_prop.algo_stat == PSY_ALGO_STAT_FULL))
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static inline void wait_for_charging_enabled(struct power_supply *psy)
+{
+	wait_event_timeout(psy_chrgr.wait_chrg_enable,
+			(IS_CHARGING_ENABLED(psy)), HZ);
+}
+
+static inline void enable_supplied_by_charging
+		(struct power_supply *psy, bool is_enable)
+{
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+	int cnt;
+
+	if (psy->type != POWER_SUPPLY_TYPE_BATTERY)
+		return;
+	/* Get list of chargers supplying power to this battery and
+	 * disable charging for all chargers
+	 */
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+	if (cnt == 0)
+		return;
+	while (cnt--) {
+		if (!IS_PRESENT(chrgr_lst[cnt]))
+			continue;
+		if (is_enable && is_charging_can_be_enabled(chrgr_lst[cnt]) &&
+				is_charging_can_be_enabled(psy)) {
+			enable_charging(chrgr_lst[cnt]);
+			wait_for_charging_enabled(chrgr_lst[cnt]);
+		} else
+			disable_charging(chrgr_lst[cnt]);
+	}
+}
+
+static void __power_supply_trigger_charging_handler(struct power_supply *psy)
+{
+	int i;
+	struct power_supply *psb = NULL;
+
+	mutex_lock(&psy_chrgr.evt_lock);
+
+	if (is_trigger_charging_algo(psy)) {
+
+		if (IS_BATTERY(psy)) {
+			if (trigger_algo(psy))
+				enable_supplied_by_charging(psy, false);
+			else
+				enable_supplied_by_charging(psy, true);
+		} else if (IS_CHARGER(psy)) {
+			for (i = 0; i < psy->num_supplicants; i++) {
+				psb =
+				    power_supply_get_by_name(psy->
+							     supplied_to[i]);
+
+				if (psb && IS_BATTERY(psb) && IS_PRESENT(psb)) {
+					if (trigger_algo(psb)) {
+						disable_charging(psy);
+						break;
+					} else if (is_charging_can_be_enabled
+								(psy)) {
+						enable_charging(psy);
+						wait_for_charging_enabled(psy);
+					}
+				}
+			}
+		}
+		update_sysfs(psy);
+		power_supply_changed(psy);
+	}
+	mutex_unlock(&psy_chrgr.evt_lock);
+
+}
+
+static int __trigger_charging_handler(struct device *dev, void *data)
+{
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+	__power_supply_trigger_charging_handler(psy);
+
+	return 0;
+}
+
+static void trigger_algo_psy_class(struct work_struct *work)
+{
+	class_for_each_device(power_supply_class, NULL, NULL,
+			__trigger_charging_handler);
+}
+
+static bool is_cable_connected(void)
+{
+	int i;
+	struct charger_cable *cable;
+
+	for (i = 0; i < ARRAY_SIZE(cable_list); ++i) {
+		cable = cable_list + i;
+		if (IS_CABLE_ACTIVE(cable->cable_props.cable_stat))
+			return true;
+	}
+	return false;
+}
+
+void power_supply_trigger_charging_handler(struct power_supply *psy)
+{
+	if (!psy_chrgr.is_cable_evt_reg || !is_cable_connected())
+		return;
+
+	wake_up(&psy_chrgr.wait_chrg_enable);
+
+	if (psy)
+		__power_supply_trigger_charging_handler(psy);
+	else
+		schedule_work(&psy_chrgr.algo_trigger_work);
+
+}
+EXPORT_SYMBOL(power_supply_trigger_charging_handler);
+
+static inline int get_battery_thresholds(struct power_supply *psy,
+	struct psy_batt_thresholds *bat_thresh)
+{
+	struct charging_algo *algo;
+	struct ps_batt_chg_prof chrg_profile;
+
+	/* FIXME: Get iterm only for supplied_to arguments*/
+	if (get_batt_prop(&chrg_profile)) {
+		pr_err("%s:Error in getting charge profile\n", __func__);
+		return -EINVAL;
+	}
+
+	algo = power_supply_get_charging_algo(psy, &chrg_profile);
+	if (!algo) {
+		pr_err("%s:Error in getting charging algo!!\n", __func__);
+		return -EINVAL;
+	}
+
+	if (algo->get_batt_thresholds) {
+		algo->get_batt_thresholds(chrg_profile, bat_thresh);
+	} else {
+		pr_err("%s:Error in getting battery thresholds from: %s\n",
+			__func__, algo->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int select_chrgr_cable(struct device *dev, void *data)
+{
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct charger_cable *cable, *max_ma_cable = NULL;
+	struct charger_cable *cable_lst = (struct charger_cable *)data;
+	int max_ma = -1, i;
+
+	if (!IS_CHARGER(psy))
+		return 0;
+
+	mutex_lock(&psy_chrgr.evt_lock);
+
+	/* get cable with maximum capability */
+	for (i = 0; i < ARRAY_SIZE(cable_list); ++i) {
+		cable = cable_lst + i;
+		if ((!IS_CABLE_ACTIVE(cable->cable_props.cable_stat)) ||
+		    (!IS_SUPPORTED_CABLE(psy, cable->psy_cable_type)))
+			continue;
+
+		if ((int)cable->cable_props.ma > max_ma) {
+			max_ma_cable = cable;
+			max_ma = cable->cable_props.ma;
+		}
+	}
+
+	/* no cable connected. disable charging */
+	if (!max_ma_cable) {
+		if ((IS_CHARGER_ENABLED(psy) || IS_CHARGING_ENABLED(psy))) {
+			disable_charging(psy);
+			disable_charger(psy);
+		}
+		set_cc(psy, 0);
+		set_cv(psy, 0);
+		set_inlmt(psy, 0);
+
+		/* set present and online as 0 */
+		set_present(psy, 0);
+		update_charger_online(psy);
+
+		switch_cable(psy, POWER_SUPPLY_CHARGER_TYPE_NONE);
+
+		mutex_unlock(&psy_chrgr.evt_lock);
+		power_supply_changed(psy);
+		return 0;
+	}
+
+	/* cable type changed.New cable connected or existing cable
+	 * capabilities changed.switch cable and enable charger and charging
+	 */
+	set_present(psy, 1);
+
+	if (CABLE_TYPE(psy) != max_ma_cable->psy_cable_type)
+		switch_cable(psy, max_ma_cable->psy_cable_type);
+
+	if (IS_CHARGER_CAN_BE_ENABLED(psy) &&
+			(max_ma_cable->cable_props.ma >= 100)) {
+		struct psy_batt_thresholds bat_thresh;
+		memset(&bat_thresh, 0, sizeof(bat_thresh));
+		enable_charger(psy);
+
+		update_charger_online(psy);
+
+		set_inlmt(psy, max_ma_cable->cable_props.ma);
+		if (!get_battery_thresholds(psy, &bat_thresh)) {
+			if (!ITERM(psy))
+				SET_ITERM(psy, bat_thresh.iterm);
+			SET_MIN_TEMP(psy, bat_thresh.temp_min);
+			SET_MAX_TEMP(psy, bat_thresh.temp_max);
+		}
+
+	} else {
+		set_inlmt(psy, max_ma_cable->cable_props.ma);
+		disable_charger(psy);
+		update_charger_online(psy);
+	}
+
+	mutex_unlock(&psy_chrgr.evt_lock);
+	power_supply_trigger_charging_handler(NULL);
+	/* Cable status is same as previous. No action to be taken */
+	return 0;
+
+}
+
+static void configure_chrgr_source(struct charger_cable *cable_lst)
+{
+	class_for_each_device(power_supply_class, NULL,
+			      cable_lst, select_chrgr_cable);
+}
+
+static void charger_cable_event_worker(struct work_struct *work)
+{
+	struct charger_cable *cable =
+	    container_of(work, struct charger_cable, work);
+	struct extcon_chrgr_cbl_props cable_props;
+
+	if (cable->edev->
+	    get_cable_properties(extcon_cable_name[cable->extcon_cable_type],
+				 (void *)&cable_props)) {
+		pr_err("%s:Error in getting cable(%s) properties from extcon device(%s)",
+			__func__, extcon_cable_name[cable->extcon_cable_type],
+				cable->edev->name);
+		return;
+	} else {
+		if (cable_props.cable_stat != cable->cable_props.cable_stat) {
+			cable->cable_props.cable_stat = cable_props.cable_stat;
+			cable->cable_props.ma = cable_props.ma;
+			configure_chrgr_source(cable_list);
+		}
+	}
+
+}
+
+static int charger_cable_notifier(struct notifier_block *nb,
+				  unsigned long stat, void *ptr)
+{
+	struct charger_cable *cable =
+	    container_of(nb, struct charger_cable, nb);
+
+	schedule_work(&cable->work);
+
+	return NOTIFY_DONE | NOTIFY_STOP_MASK;
+}
+
+int psy_charger_throttle_charger(struct power_supply *psy,
+					unsigned long state)
+{
+	int ret = 0;
+
+	if (!IS_PRESENT(psy))
+		return 0;
+
+	if (state < 0 || state > MAX_THROTTLE_STATE(psy))
+		return -EINVAL;
+
+	mutex_lock(&psy_chrgr.evt_lock);
+
+	switch THROTTLE_ACTION(psy, state)
+	{
+
+		case PSY_THROTTLE_DISABLE_CHARGER:
+			SET_MAX_CC(psy, 0);
+			disable_charger(psy);
+			break;
+		case PSY_THROTTLE_DISABLE_CHARGING:
+			SET_MAX_CC(psy, 0);
+			disable_charging(psy);
+			break;
+		case PSY_THROTTLE_CC_LIMIT:
+			SET_MAX_CC(psy, THROTTLE_CC_VALUE(psy, state));
+			break;
+		case PSY_THROTTLE_INPUT_LIMIT:
+			set_inlmt(psy, THROTTLE_CC_VALUE(psy, state));
+			break;
+		default:
+			pr_err("%s:Invalid throttle action for %s\n",
+						__func__, psy->name);
+			ret = -EINVAL;
+			break;
+	}
+	mutex_unlock(&psy_chrgr.evt_lock);
+
+	/* Configure the driver based on new state */
+	if (!ret)
+		configure_chrgr_source(cable_list);
+	return ret;
+}
+EXPORT_SYMBOL(psy_charger_throttle_charger);
+
+int power_supply_register_charger(struct power_supply *psy)
+{
+	int ret = 0;
+
+	if (!psy_chrgr.is_cable_evt_reg) {
+		mutex_init(&psy_chrgr.evt_lock);
+		init_waitqueue_head(&psy_chrgr.wait_chrg_enable);
+		init_charger_cables(cable_list, ARRAY_SIZE(cable_list));
+		INIT_LIST_HEAD(&psy_chrgr.chrgr_cache_lst);
+		INIT_LIST_HEAD(&psy_chrgr.batt_cache_lst);
+		INIT_WORK(&psy_chrgr.algo_trigger_work, trigger_algo_psy_class);
+		psy_chrgr.is_cable_evt_reg = true;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(power_supply_register_charger);
+
+static inline void flush_charger_context(struct power_supply *psy)
+{
+	struct charger_props *chrgr_prop, *tmp;
+
+	list_for_each_entry_safe(chrgr_prop, tmp,
+				&psy_chrgr.chrgr_cache_lst, node) {
+		if (!strcmp(chrgr_prop->name, psy->name)) {
+			list_del(&chrgr_prop->node);
+			kfree(chrgr_prop);
+		}
+	}
+}
+int power_supply_unregister_charger(struct power_supply *psy)
+{
+	flush_charger_context(psy);
+	return 0;
+}
+EXPORT_SYMBOL(power_supply_unregister_charger);
+
+int power_supply_register_charging_algo(struct charging_algo *algo)
+{
+	struct charging_algo *algo_new;
+
+	algo_new = kzalloc(sizeof(*algo_new), GFP_KERNEL);
+	if (algo_new == NULL) {
+		pr_err("%s: Error allocating memory for algo!!", __func__);
+		return -ENOMEM;
+	}
+	memcpy(algo_new, algo, sizeof(*algo_new));
+
+	list_add_tail(&algo_new->node, &algo_list);
+	return 0;
+}
+EXPORT_SYMBOL(power_supply_register_charging_algo);
+
+int power_supply_unregister_charging_algo(struct charging_algo *algo)
+{
+	struct charging_algo *algo_l, *tmp;
+
+	list_for_each_entry_safe(algo_l, tmp, &algo_list, node) {
+		if (!strcmp(algo_l->name, algo->name)) {
+			list_del(&algo_l->node);
+			kfree(algo_l);
+		}
+	}
+	return 0;
+
+}
+EXPORT_SYMBOL(power_supply_unregister_charging_algo);
+
+#if 0
+static struct charging_algo *get_charging_algo_byname(char *algo_name)
+{
+	struct charging_algo *algo;
+
+	list_for_each_entry(algo, &algo_list, node) {
+		if (!strcmp(algo->name, algo_name))
+			return algo;
+	}
+
+	return NULL;
+}
+#endif
+
+static struct charging_algo *get_charging_algo_by_type
+		(enum batt_chrg_prof_type chrg_prof_type)
+{
+	struct charging_algo *algo;
+
+	list_for_each_entry(algo, &algo_list, node) {
+		if (algo->chrg_prof_type == chrg_prof_type)
+			return algo;
+	}
+
+	return NULL;
+}
+
+struct charging_algo *power_supply_get_charging_algo
+	(struct power_supply *psy, struct ps_batt_chg_prof *batt_prof)
+{
+	return get_charging_algo_by_type(batt_prof->chrg_prof_type);
+
+}
+EXPORT_SYMBOL_GPL(power_supply_get_charging_algo);
diff --git a/drivers/power/power_supply_charger.h b/drivers/power/power_supply_charger.h
new file mode 100644
index 0000000..63d4b88
--- /dev/null
+++ b/drivers/power/power_supply_charger.h
@@ -0,0 +1,245 @@
+
+#ifndef __POWER_SUPPLY_CHARGER_H__
+
+#define __POWER_SUPPLY_CHARGER_H__
+#include <linux/power/battery_id.h>
+#include <linux/power_supply.h>
+
+#define MAX_CUR_VOLT_SAMPLES 3
+#define DEF_CUR_VOLT_SAMPLE_JIFF (30*HZ)
+
+enum psy_algo_stat {
+	PSY_ALGO_STAT_UNKNOWN,
+	PSY_ALGO_STAT_NOT_CHARGE,
+	PSY_ALGO_STAT_CHARGE,
+	PSY_ALGO_STAT_FULL,
+	PSY_ALGO_STAT_MAINT,
+};
+
+struct batt_props {
+	struct list_head node;
+	const char *name;
+	long voltage_now;
+	long voltage_now_cache[MAX_CUR_VOLT_SAMPLES];
+	long current_now;
+	long current_now_cache[MAX_CUR_VOLT_SAMPLES];
+	int temperature;
+	long status;
+	unsigned long tstamp;
+	enum psy_algo_stat algo_stat;
+	int health;
+	int throttle_state;
+};
+
+struct charger_props {
+	struct list_head node;
+	const char *name;
+	bool present;
+	bool is_charging;
+	int health;
+	bool online;
+	unsigned long cable;
+	unsigned long tstamp;
+};
+
+struct psy_batt_thresholds {
+	int temp_min;
+	int temp_max;
+	unsigned int iterm;
+};
+
+struct charging_algo {
+	struct list_head node;
+	unsigned int chrg_prof_type;
+	char *name;
+	enum psy_algo_stat (*get_next_cc_cv)(struct batt_props,
+			struct ps_batt_chg_prof, unsigned long *cc,
+			unsigned long *cv);
+	int (*get_batt_thresholds)(struct ps_batt_chg_prof,
+			struct psy_batt_thresholds *bat_thr);
+};
+
+
+extern int power_supply_register_charging_algo(struct charging_algo *);
+extern int power_supply_unregister_charging_algo(struct charging_algo *);
+
+static inline int set_ps_int_property(struct power_supply *psy,
+				      enum power_supply_property psp,
+				      int prop_val)
+{
+
+	union power_supply_propval val;
+
+	val.intval = prop_val;
+	return psy->set_property(psy, psp, &val);
+}
+
+static inline int get_ps_int_property(struct power_supply *psy,
+				      enum power_supply_property psp)
+{
+	union power_supply_propval val;
+
+	val.intval = 0;
+
+	psy->get_property(psy, psp, &val);
+	return val.intval;
+}
+/* Define a TTL for some properies to optimize the frequency of
+* algorithm calls. This can be used by properties which will be changed
+* very frequently (eg. current, volatge..)
+*/
+#define PROP_TTL (HZ*10)
+#define enable_charging(psy) \
+		({if ((CABLE_TYPE(psy) != POWER_SUPPLY_CHARGER_TYPE_NONE) &&\
+			!IS_CHARGING_ENABLED(psy)) { \
+		enable_charger(psy); \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGING,\
+					true); } })
+#define disable_charging(psy) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_ENABLE_CHARGING, false);
+
+#define enable_charger(psy) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGER, true)
+#define disable_charger(psy) \
+		({  disable_charging(psy); \
+			set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_ENABLE_CHARGER, false); })
+
+#define set_cc(psy, cc) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_CURRENT, cc)
+
+#define set_cv(psy, cv) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_VOLTAGE, cv)
+
+#define set_inlmt(psy, inlmt) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT, inlmt)
+
+#define set_present(psy, present) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_PRESENT, present)
+
+#define SET_MAX_CC(psy, max_cc) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT, max_cc)
+#define SET_ITERM(psy, iterm) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_CHARGE_TERM_CUR, iterm)
+#define SET_MAX_TEMP(psy, temp) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_MAX_TEMP, temp)
+#define SET_MIN_TEMP(psy, temp) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_MIN_TEMP, temp)
+#define switch_cable(psy, new_cable) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_CABLE_TYPE, new_cable)
+
+#define HEALTH(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_HEALTH)
+#define CV(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_VOLTAGE)
+#define CC(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_CURRENT)
+#define INLMT(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT)
+#define MAX_CC(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT)
+#define MAX_CV(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE)
+#define VOLTAGE_NOW(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW)
+#define VOLTAGE_OCV(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_VOLTAGE_OCV)
+#define CURRENT_NOW(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW)
+#define STATUS(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_STATUS)
+#define TEMPERATURE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_TEMP)
+#define BATTERY_TYPE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY)
+#define PRIORITY(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_PRIORITY)
+#define CABLE_TYPE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CABLE_TYPE)
+#define ONLINE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE)
+#define INLMT(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT)
+#define ITERM(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_TERM_CUR)
+
+#define IS_CHARGING_ENABLED(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGING)
+#define IS_CHARGER_ENABLED(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGER)
+#define IS_BATTERY(psy) (psy->type == POWER_SUPPLY_TYPE_BATTERY)
+#define IS_CHARGER(psy) (psy->type == POWER_SUPPLY_TYPE_USB ||\
+				psy->type == POWER_SUPPLY_TYPE_USB_CDP || \
+			psy->type == POWER_SUPPLY_TYPE_USB_DCP ||\
+			psy->type == POWER_SUPPLY_TYPE_USB_ACA)
+#define IS_ONLINE(psy) \
+		(get_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE) == 1)
+#define IS_PRESENT(psy) \
+		(get_ps_int_property(psy, POWER_SUPPLY_PROP_PRESENT) == 1)
+#define IS_SUPPORTED_CABLE(psy, cable_type) \
+		(psy->supported_cables & cable_type)
+#define IS_CABLE_ACTIVE(status) \
+	((status != EXTCON_CHRGR_CABLE_DISCONNECTED))
+
+#define IS_CHARGER_PROP_CHANGED(prop, cache_prop)\
+	((cache_prop.online != prop.online) || \
+	(cache_prop.present != prop.present) || \
+	(cache_prop.is_charging != prop.is_charging) || \
+	(cache_prop.health != prop.health))
+
+#define IS_BAT_PROP_CHANGED(bat_prop, bat_cache)\
+	((bat_cache.voltage_now != bat_prop.voltage_now) || \
+	 (time_after64((__u64)bat_prop.tstamp, \
+		       (__u64)(bat_cache.tstamp + PROP_TTL)) && \
+	  ((bat_cache.current_now != bat_prop.current_now) || \
+	   (bat_cache.voltage_now != bat_prop.voltage_now))) || \
+	 (bat_cache.temperature != bat_prop.temperature) || \
+	 (bat_cache.health != bat_prop.health) || \
+	(bat_cache.throttle_state != bat_prop.throttle_state))
+
+#define THROTTLE_ACTION(psy, state)\
+		(((psy->throttle_states)+state)->throttle_action)
+
+#define MAX_THROTTLE_STATE(psy)\
+		((psy->num_throttle_states))
+
+#define CURRENT_THROTTLE_STATE(psy)\
+		(get_ps_int_property(psy,\
+			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT))
+
+#define CURRENT_THROTTLE_ACTION(psy)\
+		THROTTLE_ACTION(psy, CURRENT_THROTTLE_STATE(psy))
+
+#define THROTTLE_CC_VALUE(psy, state)\
+		(((psy->throttle_states)+state)->throttle_val)
+
+#define IS_CHARGER_CAN_BE_ENABLED(psy) \
+	(CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGER)
+
+#define IS_HEALTH_GOOD(psy)\
+	(HEALTH(psy) == POWER_SUPPLY_HEALTH_GOOD)
+
+static inline int set_battery_status(struct power_supply *psy, int status)
+{
+	if (STATUS(psy) != status) {
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_STATUS, status);
+		return true;
+	}
+	return false;
+}
+
+static inline void set_charger_online(struct power_supply *psy, int online)
+{
+
+	if (ONLINE(psy) != online)
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE, online);
+
+}
+
+#endif
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 082d3c2..5704893 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -19,13 +19,24 @@
 #include <linux/power_supply.h>
 #include <linux/thermal.h>
 #include "power_supply.h"
+#include "power_supply_charger.h"
 
 /* exported for the APM Power driver, APM emulation */
 struct class *power_supply_class;
 EXPORT_SYMBOL_GPL(power_supply_class);
 
+ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
+EXPORT_SYMBOL_GPL(power_supply_notifier);
+
 static struct device_type power_supply_dev_type;
 
+static struct mutex ps_chrg_evt_lock;
+
+static struct power_supply_charger_cap power_supply_chrg_cap = {
+		.chrg_evt	= POWER_SUPPLY_CHARGER_EVENT_DISCONNECT,
+		.chrg_type	= POWER_SUPPLY_TYPE_USB,
+		.mA		= 0	/* 0 mA */
+};
 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
 					 struct power_supply *supply)
 {
@@ -80,9 +91,13 @@
 
 		class_for_each_device(power_supply_class, NULL, psy,
 				      __power_supply_changed_work);
+		power_supply_trigger_charging_handler(psy);
 
 		power_supply_update_leds(psy);
 
+		atomic_notifier_call_chain(&power_supply_notifier,
+				POWER_SUPPLY_PROP_CHANGED, psy);
+
 		kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
 		spin_lock_irqsave(&psy->changed_lock, flags);
 	}
@@ -95,6 +110,11 @@
 {
 	unsigned long flags;
 
+	if (psy == NULL) {
+		power_supply_trigger_charging_handler(psy);
+		return;
+	}
+
 	dev_dbg(psy->dev, "%s\n", __func__);
 
 	spin_lock_irqsave(&psy->changed_lock, flags);
@@ -105,6 +125,37 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
 
+static int __power_supply_charger_event(struct device *dev, void *data)
+{
+	struct power_supply_charger_cap *cap =
+				(struct power_supply_charger_cap *)data;
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+	if (psy->charging_port_changed)
+		psy->charging_port_changed(psy, cap);
+
+	return 0;
+}
+
+void power_supply_charger_event(struct power_supply_charger_cap cap)
+{
+	class_for_each_device(power_supply_class, NULL, &cap,
+				      __power_supply_charger_event);
+
+	mutex_lock(&ps_chrg_evt_lock);
+	memcpy(&power_supply_chrg_cap, &cap, sizeof(power_supply_chrg_cap));
+	mutex_unlock(&ps_chrg_evt_lock);
+}
+EXPORT_SYMBOL_GPL(power_supply_charger_event);
+
+void power_supply_query_charger_caps(struct power_supply_charger_cap *cap)
+{
+	mutex_lock(&ps_chrg_evt_lock);
+	memcpy(cap, &power_supply_chrg_cap, sizeof(power_supply_chrg_cap));
+	mutex_unlock(&ps_chrg_evt_lock);
+}
+EXPORT_SYMBOL_GPL(power_supply_query_charger_caps);
+
 #ifdef CONFIG_OF
 #include <linux/of.h>
 
@@ -288,12 +339,12 @@
 	unsigned int count = 0;
 
 	error = class_for_each_device(power_supply_class, NULL, &count,
-				      __power_supply_is_system_supplied);
+					__power_supply_is_system_supplied);
 
 	/*
-	 * If no power class device was found at all, most probably we are
-	 * running on a desktop system, so assume we are on mains power.
-	 */
+	* If no power class device was found at all, most probably we are
+	* running on a desktop system, so assume we are on mains power.
+	*/
 	if (count == 0)
 		return 1;
 
@@ -301,6 +352,30 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
 
+static int __power_supply_is_battery_connected(struct device *dev, void *data)
+{
+	union power_supply_propval ret = {0,};
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+	if (psy->type == POWER_SUPPLY_TYPE_BATTERY) {
+		if (psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, &ret))
+			return 0;
+		if (ret.intval)
+			return ret.intval;
+	}
+	return 0;
+}
+
+int power_supply_is_battery_connected(void)
+{
+	int error;
+
+	error = class_for_each_device(power_supply_class, NULL, NULL,
+					__power_supply_is_battery_connected);
+	return error;
+}
+EXPORT_SYMBOL_GPL(power_supply_is_battery_connected);
+
 int power_supply_set_battery_charged(struct power_supply *psy)
 {
 	if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) {
@@ -341,15 +416,28 @@
 	kfree(dev);
 }
 
+int power_supply_reg_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&power_supply_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
+
+void power_supply_unreg_notifier(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&power_supply_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
+
 #ifdef CONFIG_THERMAL
 static int power_supply_read_temp(struct thermal_zone_device *tzd,
-		unsigned long *temp)
+				  long *temp)
 {
 	struct power_supply *psy;
 	union power_supply_propval val;
 	int ret;
 
-	WARN_ON(tzd == NULL);
+	if (WARN_ON(tzd == NULL))
+		return -EINVAL;
 	psy = tzd->devdata;
 	ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
 
@@ -396,6 +484,8 @@
 	union power_supply_propval val;
 	int ret;
 
+	if (WARN_ON(tcd == NULL))
+		return -EINVAL;
 	psy = tcd->devdata;
 	ret = psy->get_property(psy,
 		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
@@ -412,6 +502,8 @@
 	union power_supply_propval val;
 	int ret;
 
+	if (WARN_ON(tcd == NULL))
+		return -EINVAL;
 	psy = tcd->devdata;
 	ret = psy->get_property(psy,
 		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
@@ -428,11 +520,22 @@
 	union power_supply_propval val;
 	int ret;
 
+	if (WARN_ON(tcd == NULL))
+		return -EINVAL;
+
 	psy = tcd->devdata;
+
+	ret = psy->get_property(psy,
+		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+	if (state >= val.intval)
+		return -EINVAL;
+
 	val.intval = state;
 	ret = psy->set_property(psy,
 		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
 
+	psy_charger_throttle_charger(psy, state);
+
 	return ret;
 }
 
@@ -538,10 +641,16 @@
 	if (rc)
 		goto create_triggers_failed;
 
+	if (IS_CHARGER(psy))
+		rc = power_supply_register_charger(psy);
+	if (rc)
+		goto charger_register_failed;
+
 	power_supply_changed(psy);
 
 	goto success;
 
+charger_register_failed:
 create_triggers_failed:
 	psy_unregister_cooler(psy);
 register_cooler_failed:
@@ -562,6 +671,8 @@
 {
 	cancel_work_sync(&psy->changed_work);
 	sysfs_remove_link(&psy->dev->kobj, "powers");
+	if (IS_CHARGER(psy))
+		power_supply_unregister_charger(psy);
 	power_supply_remove_triggers(psy);
 	psy_unregister_cooler(psy);
 	psy_unregister_thermal(psy);
@@ -578,6 +689,7 @@
 
 	power_supply_class->dev_uevent = power_supply_uevent;
 	power_supply_init_attrs(&power_supply_dev_type);
+	mutex_init(&ps_chrg_evt_lock);
 
 	return 0;
 }
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 1f7d79b..4602e54 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -44,7 +44,7 @@
 					  struct device_attribute *attr,
 					  char *buf) {
 	static char *type_text[] = {
-		"Unknown", "Battery", "UPS", "Mains", "USB",
+		"Unknown", "Battery", "UPS", "Mains", "USB", "USB",
 		"USB_DCP", "USB_CDP", "USB_ACA"
 	};
 	static char *status_text[] = {
@@ -168,8 +168,14 @@
 	POWER_SUPPLY_ATTR(constant_charge_current_max),
 	POWER_SUPPLY_ATTR(constant_charge_voltage),
 	POWER_SUPPLY_ATTR(constant_charge_voltage_max),
+	POWER_SUPPLY_ATTR(charge_current_limit),
 	POWER_SUPPLY_ATTR(charge_control_limit),
 	POWER_SUPPLY_ATTR(charge_control_limit_max),
+	POWER_SUPPLY_ATTR(charge_current),
+	POWER_SUPPLY_ATTR(max_charge_current),
+	POWER_SUPPLY_ATTR(charge_voltage),
+	POWER_SUPPLY_ATTR(max_charge_voltage),
+	POWER_SUPPLY_ATTR(input_cur_limit),
 	POWER_SUPPLY_ATTR(energy_full_design),
 	POWER_SUPPLY_ATTR(energy_empty_design),
 	POWER_SUPPLY_ATTR(energy_full),
@@ -183,6 +189,8 @@
 	POWER_SUPPLY_ATTR(temp),
 	POWER_SUPPLY_ATTR(temp_alert_min),
 	POWER_SUPPLY_ATTR(temp_alert_max),
+	POWER_SUPPLY_ATTR(max_temp),
+	POWER_SUPPLY_ATTR(min_temp),
 	POWER_SUPPLY_ATTR(temp_ambient),
 	POWER_SUPPLY_ATTR(temp_ambient_alert_min),
 	POWER_SUPPLY_ATTR(temp_ambient_alert_max),
@@ -191,6 +199,11 @@
 	POWER_SUPPLY_ATTR(time_to_full_now),
 	POWER_SUPPLY_ATTR(time_to_full_avg),
 	POWER_SUPPLY_ATTR(type),
+	POWER_SUPPLY_ATTR(charge_term_cur),
+	POWER_SUPPLY_ATTR(enable_charging),
+	POWER_SUPPLY_ATTR(enable_charger),
+	POWER_SUPPLY_ATTR(cable_type),
+	POWER_SUPPLY_ATTR(priority),
 	POWER_SUPPLY_ATTR(scope),
 	/* Local extensions */
 	POWER_SUPPLY_ATTR(usb_hc),
@@ -223,6 +236,9 @@
 		int property = psy->properties[i];
 
 		if (property == attrno) {
+			if (psy->property_is_privileged_read &&
+			psy->property_is_privileged_read(psy, property) > 0)
+				mode = S_IRUSR | S_IRGRP;
 			if (psy->property_is_writeable &&
 			    psy->property_is_writeable(psy, property) > 0)
 				mode |= S_IWUSR;
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 809b7a3..5d3b0f0 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -15,6 +15,8 @@
 #include <linux/rio_drv.h>
 #include <linux/rio_ids.h>
 #include <linux/delay.h>
+
+#include <asm/page.h>
 #include "../rio.h"
 
 #define LOCAL_RTE_CONF_DESTID_SEL	0x010070
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8bb2644..27b22de 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -514,5 +514,9 @@
 	  This driver provides support for the voltage regulators on the
 	  AS3711 PMIC
 
-endif
+config REGULATOR_PMIC_BASIN_COVE
+        tristate "PMIC Basin Cove voltage regulator"
+        help
+          This driver controls intel Basin Cove pmic voltage output regulator
 
+endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 47a34ff..a89b879 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -2,6 +2,8 @@
 # Makefile for regulator drivers.
 #
 
+CFLAGS_pmic_basin_cove.o		:= -Werror
+CFLAGS_pmic_crystal_cove.0		:= -Werror
 
 obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o
 obj-$(CONFIG_OF) += of_regulator.o
@@ -48,6 +50,8 @@
 obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
+obj-$(CONFIG_REGULATOR_PMIC_BASIN_COVE) += pmic_basin_cove.o
+obj-$(CONFIG_REGULATOR_CRYSTAL_COVE) += pmic_crystal_cove.o
 obj-$(CONFIG_REGULATOR_RC5T583)  += rc5t583-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index e5c03b5..937cffb 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -267,7 +267,7 @@
 {
 	return platform_driver_register(&regulator_fixed_voltage_driver);
 }
-subsys_initcall(regulator_fixed_voltage_init);
+fs_initcall_sync(regulator_fixed_voltage_init);
 
 static void __exit regulator_fixed_voltage_exit(void)
 {
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index d4d377c..97d3774 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -63,4 +63,15 @@
 	  It's safe to say n here if you're not interested in multimedia
 	  offloading.
 
+config INTEL_MID_REMOTEPROC
+	tristate "Intel MID remoteproc support"
+	depends on X86
+	select REMOTEPROC
+	select RPMSG
+	help
+	  Say y to support Intel MID's remote processors core driver
+	  and SCU driver.
+	  Please say y here if you want to enable x86 remoteproc core
+	  driver support.
+
 endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index ac2ff75..efb0eb8 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_OMAP_REMOTEPROC)		+= omap_remoteproc.o
 obj-$(CONFIG_STE_MODEM_RPROC)	 	+= ste_modem_rproc.o
 obj-$(CONFIG_DA8XX_REMOTEPROC)		+= da8xx_remoteproc.o
+obj-$(CONFIG_INTEL_MID_REMOTEPROC)	+= intel_mid_rproc_scu.o intel_mid_rproc_core.o
diff --git a/drivers/remoteproc/intel_mid_rproc_core.c b/drivers/remoteproc/intel_mid_rproc_core.c
new file mode 100644
index 0000000..dc30ba1f
--- /dev/null
+++ b/drivers/remoteproc/intel_mid_rproc_core.c
@@ -0,0 +1,269 @@
+/*
+ * INTEL MID Remote Processor Core driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_ids.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include "intel_mid_rproc_core.h"
+#include "remoteproc_internal.h"
+
+#define RPMSG_NS_ADDR	53
+
+/**
+ * rpmsg_ns_alloc() - allocate a name service annoucement structure
+ * @name: name of remote service
+ * @id: rproc type
+ * @addr: address of remote service
+ */
+struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name, int id, u32 addr)
+{
+	struct rpmsg_ns_info *ns_info;
+
+	ns_info = kzalloc(sizeof(struct rpmsg_ns_info), GFP_KERNEL);
+	if (ns_info) {
+		strcpy(ns_info->name, name);
+		ns_info->type = id;
+		ns_info->addr = addr;
+		ns_info->flags = RPMSG_NS_CREATE;
+	}
+
+	return ns_info;
+};
+EXPORT_SYMBOL_GPL(rpmsg_ns_alloc);
+
+/**
+ * rpmsg_ns_add_to_list() -- add a name service node to the global list
+ * @info: name service node
+ */
+void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+				struct rpmsg_ns_list *nslist)
+{
+	mutex_lock(&nslist->lock);
+	list_add_tail(&info->node, &nslist->list);
+	mutex_unlock(&nslist->lock);
+}
+EXPORT_SYMBOL_GPL(rpmsg_ns_add_to_list);
+
+/**
+ * free_rpmsg_ns() -- free rpmsg name service node
+ * @info: name service node
+ */
+void free_rpmsg_ns(struct rpmsg_ns_info *info)
+{
+	kfree(info);
+}
+
+/**
+ * rpmsg_ns_del_list() -- free rpmsg name service list
+ */
+void rpmsg_ns_del_list(struct rpmsg_ns_list *nslist)
+{
+	struct rpmsg_ns_info *info, *next;
+	mutex_lock(&nslist->lock);
+	list_for_each_entry_safe(info, next, &nslist->list, node) {
+		list_del(&info->node);
+		free_rpmsg_ns(info);
+	}
+	mutex_unlock(&nslist->lock);
+}
+EXPORT_SYMBOL_GPL(rpmsg_ns_del_list);
+
+/**
+ * find_rvdev() - find the rproc state of a supported virtio device
+ * @rproc: rproc handle
+ * @id: virtio device id
+ */
+struct rproc_vdev *find_rvdev(struct rproc *rproc, int id)
+{
+	struct rproc_vdev *rvdev;
+
+	list_for_each_entry(rvdev, &rproc->rvdevs, node)
+		if (rvdev->vdev.id.device == id)
+			return rvdev;
+
+	return NULL;
+}
+
+/*
+ * Since we could not get vring structure directly from rproc_vring
+ * structure, we have to create two local vrings and identify them
+ * by matching with rproc_vrings.
+ * @id: virtio device id.
+ * Currently one rproc_vdev is supported by firmware, and the id is
+ * VIRTIO_ID_RPMSG (declared in linux/virtio_ids.h).
+ */
+int find_vring_index(struct rproc *rproc, int vqid, int id)
+{
+	struct rproc_vdev *rvdev;
+	struct device *dev = rproc->dev.parent;
+	int vring_idx = 0;
+
+	rvdev = find_rvdev(rproc, id);
+	if (rvdev == NULL) {
+		dev_err(dev, "virtio device not found\n");
+		return -EINVAL;
+	}
+
+	while (vring_idx < RVDEV_NUM_VRINGS) {
+		if (rvdev->vring[vring_idx].notifyid == vqid)
+			break;
+		vring_idx++;
+	}
+
+	/* no match found? there's a problem */
+	if (vring_idx == RVDEV_NUM_VRINGS) {
+		dev_err(dev, "Can not find vring\n");
+		return -EINVAL;
+	}
+
+	return vring_idx;
+}
+
+void intel_mid_rproc_vring_init(struct rproc *rproc,
+			struct vring *vring, enum local_vring_idx id)
+{
+	int align, len;
+	void *addr;
+	struct rproc_vdev *rvdev;
+	struct device *dev = rproc->dev.parent;
+
+	rvdev = find_rvdev(rproc, VIRTIO_ID_RPMSG);
+	if (rvdev == NULL) {
+		dev_err(dev, "virtio device not found\n");
+		return;
+	}
+
+	addr = rvdev->vring[id].va;
+	align = rvdev->vring[id].align;
+	len = rvdev->vring[id].len;
+	vring_init(vring, len, addr, align);
+}
+
+/**
+ * intel_mid_rproc_vq_interrupt() - inform a vq interrupt to rproc
+ *				    after vq buffers are handled
+ * @rproc: rproc handle
+ * @msg: vq notify id
+ */
+void intel_mid_rproc_vq_interrupt(struct rproc *rproc, int msg)
+{
+	struct device *dev = rproc->dev.parent;
+
+	if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+		dev_err(dev, "no message was found in vqid %d\n", msg);
+}
+
+/**
+ * intel_mid_rproc_msg_handle() - generic interface as a vq buffer handle
+ *				  during rpmsg transaction
+ * @iproc: intel mid rproc data
+ */
+int intel_mid_rproc_msg_handle(struct intel_mid_rproc *iproc)
+{
+	int ret;
+	struct vring *r_vring, *s_vring;
+	void *r_virt_addr, *s_virt_addr;
+	u16 r_idx, s_idx;
+	u64 r_dma_addr, s_dma_addr;
+	u32 r_len, s_len;
+
+	r_vring = &iproc->rx_vring;
+	s_vring = &iproc->tx_vring;
+
+	r_idx = iproc->r_vring_last_used & (r_vring->num - 1);
+	s_idx = iproc->s_vring_last_used & (s_vring->num - 1);
+
+	r_dma_addr = r_vring->desc[r_idx].addr;
+	s_dma_addr = s_vring->desc[s_idx].addr;
+
+	r_virt_addr = phys_to_virt(r_dma_addr);
+	s_virt_addr = phys_to_virt(s_dma_addr);
+
+	ret = iproc->rproc_rpmsg_handle(r_virt_addr, s_virt_addr,
+						&r_len, &s_len);
+
+	r_vring->used->ring[r_idx].id = r_idx;
+	r_vring->used->ring[r_idx].len = r_len;
+	r_vring->used->idx++;
+
+	s_vring->used->ring[s_idx].id = s_idx;
+	s_vring->used->ring[s_idx].len = s_len;
+	s_vring->used->idx++;
+
+	iproc->r_vring_last_used++;
+	iproc->s_vring_last_used++;
+
+	return ret;
+}
+
+/**
+ * Remoteproc side rx buffer handler during name service creation.
+ * @iproc: intel mid rproc data
+ * @ns_info: name service info
+ *
+ * After remote processor receives name service messages, it needs to
+ * update the elements of its virtio device's rx virtqueue buffer
+ * before next rpmsg transaction.
+ * Here we have this function simulating the above effect.
+ */
+int intel_mid_rproc_ns_handle(struct intel_mid_rproc *iproc,
+				struct rpmsg_ns_info *ns_info)
+{
+	u16 index;
+	u32 len;
+	u64 dma_addr;
+	void *virt_addr;
+
+	struct vring *r_vring;
+	struct rpmsg_hdr *msg;
+	struct rpmsg_ns_msg *nsm;
+
+	if (ns_info == NULL) {
+		pr_err("ns_info = NULL\n");
+		return -ENODEV;
+	}
+
+	r_vring = &iproc->rx_vring;
+
+	index = iproc->r_vring_last_used & (r_vring->num - 1);
+
+	len = sizeof(*msg) + sizeof(*nsm);
+
+	dma_addr = r_vring->desc[index].addr;
+	virt_addr = phys_to_virt(dma_addr);
+
+	msg = (struct rpmsg_hdr *)virt_addr;
+	nsm = (struct rpmsg_ns_msg *)(virt_addr + sizeof(*msg));
+
+	nsm->addr = ns_info->addr;
+	nsm->flags = ns_info->flags;
+	strncpy(nsm->name, ns_info->name, RPMSG_NAME_SIZE);
+
+	msg->len = sizeof(*nsm);
+	msg->src = nsm->addr;
+	msg->dst = RPMSG_NS_ADDR;
+
+	r_vring->used->ring[index].id = index;
+	r_vring->used->ring[index].len = len;
+	r_vring->used->idx++;
+
+	iproc->r_vring_last_used++;
+
+	return 0;
+}
diff --git a/drivers/remoteproc/intel_mid_rproc_core.h b/drivers/remoteproc/intel_mid_rproc_core.h
new file mode 100644
index 0000000..bfe6f6c
--- /dev/null
+++ b/drivers/remoteproc/intel_mid_rproc_core.h
@@ -0,0 +1,82 @@
+/*
+ * INTEL MID Remote Processor Core Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+typedef int (*rpmsg_handle_t)(void *rx_buf, void *tx_buf,
+				u32 *r_len, u32 *s_len);
+
+int scu_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *r_len, u32 *s_len);
+int psh_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *len);
+
+#define RPROC_FW_LOADING_TIMEOUT	(3 * HZ)
+#define IPROC_NAME_SIZE 20
+
+/**
+ * struct intel_mid_rproc - intel mid remote processor
+ * @ns_enabled: name service enabled flag
+ * @name: rproc name
+ * @type: rproc type
+ * @r_vring_last_used: last used index of rx vring
+ * @s_vring_last_used: last used index of tx vring
+ * @rproc: rproc handle
+ * @rx_vring: rproc rx vring
+ * @tx_vring: rproc tx vring
+ * @ns_info: loop cursor when creating ns channels
+ * @rproc_rpmsg_handle: rproc private rpmsg handle
+ */
+struct intel_mid_rproc {
+	bool ns_enabled;
+	char name[IPROC_NAME_SIZE];
+	u32 type;
+	u32 r_vring_last_used;
+	u32 s_vring_last_used;
+	struct rproc *rproc;
+	struct vring rx_vring;
+	struct vring tx_vring;
+	struct rpmsg_ns_info *ns_info;
+	rpmsg_handle_t rproc_rpmsg_handle;
+};
+
+enum local_vring_idx {
+	RX_VRING,
+	TX_VRING,
+};
+
+extern void intel_mid_rproc_vq_interrupt(struct rproc *rproc, int msg);
+extern int intel_mid_rproc_msg_handle(struct intel_mid_rproc *iproc);
+extern int intel_mid_rproc_ns_handle(struct intel_mid_rproc *iproc,
+					struct rpmsg_ns_info *ns_info);
+
+extern struct rproc_vdev *find_rvdev(struct rproc *rproc, int id);
+extern int find_vring_index(struct rproc *rproc, int vqid, int id);
+extern void intel_mid_rproc_vring_init(struct rproc *rproc,
+			struct vring *vring, enum local_vring_idx id);
+
+extern void rpmsg_ns_del_list(struct rpmsg_ns_list *nslist);
+
+/* Please do NOT use these APIs to send ipc commands,
+ * use rpmsg commands defined in <asm/intel_mid_rpmsg.h>
+ */
+extern void intel_scu_ipc_send_command(u32 cmd);
+
+/* Issue commands to the SCU with or without data */
+extern int intel_scu_ipc_simple_command(int cmd, int sub);
+extern int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen);
+extern int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen, u32 dptr, u32 sptr);
+
+/* IPC locking */
+extern void intel_scu_ipc_lock(void);
+extern void intel_scu_ipc_unlock(void);
diff --git a/drivers/remoteproc/intel_mid_rproc_scu.c b/drivers/remoteproc/intel_mid_rproc_scu.c
new file mode 100644
index 0000000..2706283
--- /dev/null
+++ b/drivers/remoteproc/intel_mid_rproc_scu.c
@@ -0,0 +1,444 @@
+/*
+ * INTEL MID Remote Processor - SCU driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+#include <linux/delay.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_ids.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/scu_ipc_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "intel_mid_rproc_core.h"
+#include "remoteproc_internal.h"
+
+static struct rpmsg_ns_list *nslist;
+
+
+static int scu_ipc_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	ret = intel_scu_ipc_command(tx_msg->cmd, tx_msg->sub,
+				tx_msg->in, tx_msg->inlen,
+				tx_msg->out, tx_msg->outlen);
+	return ret;
+}
+
+static int scu_ipc_raw_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	intel_scu_ipc_lock();
+	ret = intel_scu_ipc_raw_cmd(tx_msg->cmd, tx_msg->sub,
+				tx_msg->in, tx_msg->inlen,
+				tx_msg->out, tx_msg->outlen,
+				tx_msg->dptr, tx_msg->sptr);
+	intel_scu_ipc_unlock();
+
+	return ret;
+}
+
+static int scu_ipc_simple_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	ret = intel_scu_ipc_simple_command(tx_msg->cmd, tx_msg->sub);
+
+	return ret;
+}
+
+static void scu_ipc_send_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+	intel_scu_ipc_send_command(tx_msg->sub << 12 | tx_msg->cmd);
+}
+
+static int scu_ipc_fw_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_FW_REVISION:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_FW_UPDATE:
+		/* Only scu_ipc_send_command works for fw update */
+		scu_ipc_send_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+static int scu_ipc_util_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_FW_REVISION:
+	case RP_GET_HOBADDR:
+	case RP_OSC_CLK_CTRL:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_S0IX_COUNTER:
+		ret = scu_ipc_simple_command(tx_buf);
+		break;
+	case RP_WRITE_OSNIB:
+		ret = scu_ipc_raw_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+static int scu_ipc_vrtc_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_HOBADDR:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_VRTC:
+		ret = scu_ipc_simple_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+static int scu_ipc_fw_logging_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_HOBADDR:
+	case RP_SCULOG_TRACE:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_CLEAR_FABERROR:
+	case RP_SCULOG_CTRL:
+		ret = scu_ipc_simple_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+/**
+ * scu_ipc_rpmsg_handle() - scu rproc specified ipc rpmsg handle
+ * @rx_buf: rx buffer to be add
+ * @tx_buf: tx buffer to be get
+ * @r_len: rx buffer length
+ * @s_len: tx buffer length
+ */
+int scu_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *r_len, u32 *s_len)
+{
+	struct rpmsg_hdr *tx_hdr, *tmp_hdr;
+	struct tx_ipc_msg *tx_msg;
+	struct rx_ipc_msg *tmp_msg;
+	int ret = 0;
+
+	*r_len = sizeof(struct rpmsg_hdr) + sizeof(struct rx_ipc_msg);
+	*s_len = sizeof(struct rpmsg_hdr) + sizeof(struct tx_ipc_msg);
+
+	/* get tx_msg and send scu ipc command */
+	tx_hdr = (struct rpmsg_hdr *)tx_buf;
+	tx_msg = (struct tx_ipc_msg *)(tx_buf + sizeof(*tx_hdr));
+
+	tmp_hdr = (struct rpmsg_hdr *)rx_buf;
+	tmp_msg = (struct rx_ipc_msg *)tmp_hdr->data;
+
+	switch (tx_hdr->dst) {
+	case RP_PMIC_ACCESS:
+	case RP_FLIS_ACCESS:
+	case RP_IPC_COMMAND:
+		tmp_msg->status = scu_ipc_command(tx_msg);
+		break;
+	case RP_SET_WATCHDOG:
+		if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+			(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE))
+			tmp_msg->status = scu_ipc_raw_command(tx_msg);
+		else
+			tmp_msg->status = scu_ipc_command(tx_msg);
+		break;
+	case RP_MIP_ACCESS:
+	case RP_IPC_RAW_COMMAND:
+		tmp_msg->status = scu_ipc_raw_command(tx_msg);
+		break;
+	case RP_IPC_SIMPLE_COMMAND:
+		tmp_msg->status = scu_ipc_simple_command(tx_msg);
+		break;
+	case RP_IPC_UTIL:
+		tmp_msg->status = scu_ipc_util_command(tx_msg);
+		break;
+	case RP_FW_ACCESS:
+		tmp_msg->status = scu_ipc_fw_command(tx_msg);
+		break;
+	case RP_VRTC:
+		tmp_msg->status = scu_ipc_vrtc_command(tx_msg);
+		break;
+	case RP_FW_LOGGING:
+		tmp_msg->status = scu_ipc_fw_logging_command(tx_msg);
+		break;
+	default:
+		tmp_msg->status = 0;
+		pr_info("Command %x not supported yet\n", tx_hdr->dst);
+		break;
+	};
+
+	/* prepare rx buffer, switch src and dst */
+	tmp_hdr->src = tx_hdr->dst;
+	tmp_hdr->dst = tx_hdr->src;
+
+	tmp_hdr->flags = tx_hdr->flags;
+	tmp_hdr->len = sizeof(struct rx_ipc_msg);
+
+	return ret;
+}
+
+/* kick a virtqueue */
+static void intel_rproc_scu_kick(struct rproc *rproc, int vqid)
+{
+	int idx;
+	int ret;
+	struct intel_mid_rproc *iproc;
+	struct rproc_vdev *rvdev;
+	struct device *dev = rproc->dev.parent;
+	static unsigned long ns_info_all_received;
+
+	iproc = (struct intel_mid_rproc *)rproc->priv;
+
+	/*
+	 * Remote processor virtqueue being kicked.
+	 * This part simulates remote processor handling messages.
+	 */
+	idx = find_vring_index(rproc, vqid, VIRTIO_ID_RPMSG);
+
+	switch (idx) {
+	case RX_VRING:
+		if (iproc->ns_enabled && !ns_info_all_received) {
+			/* push messages with ns_info for ALL available
+			name services in the list (nslist) into
+			rx buffers. */
+			list_for_each_entry_continue(iproc->ns_info,
+				&nslist->list, node) {
+				ret = intel_mid_rproc_ns_handle(iproc,
+					iproc->ns_info);
+				if (ret) {
+					dev_err(dev, "ns handle error\n");
+					return;
+				}
+			}
+
+			ns_info_all_received = 1;
+			intel_mid_rproc_vq_interrupt(rproc, vqid);
+		}
+		break;
+
+	case TX_VRING:
+
+		dev_dbg(dev, "remote processor got the message ...\n");
+		intel_mid_rproc_msg_handle(iproc);
+		intel_mid_rproc_vq_interrupt(rproc, vqid);
+
+		/*
+		 * After remoteproc handles the message, it calls
+		 * the receive callback.
+		 * TODO: replace this part with real remote processor
+		 * operation.
+		 */
+		rvdev = find_rvdev(rproc, VIRTIO_ID_RPMSG);
+		if (rvdev)
+			intel_mid_rproc_vq_interrupt(rproc,
+				rvdev->vring[RX_VRING].notifyid);
+		else
+			WARN(1, "%s: can't find given rproc state\n", __func__);
+		break;
+
+	default:
+		dev_err(dev, "invalid vring index\n");
+		break;
+	}
+}
+
+/* power up the remote processor */
+static int intel_rproc_scu_start(struct rproc *rproc)
+{
+	struct intel_mid_rproc *iproc;
+
+	pr_info("Started intel scu remote processor\n");
+	iproc = (struct intel_mid_rproc *)rproc->priv;
+	intel_mid_rproc_vring_init(rproc, &iproc->rx_vring, RX_VRING);
+	intel_mid_rproc_vring_init(rproc, &iproc->tx_vring, TX_VRING);
+
+	return 0;
+}
+
+/* power off the remote processor */
+static int intel_rproc_scu_stop(struct rproc *rproc)
+{
+	pr_info("Stopped intel scu remote processor\n");
+	return 0;
+}
+
+static struct rproc_ops intel_rproc_scu_ops = {
+	.start		= intel_rproc_scu_start,
+	.stop		= intel_rproc_scu_stop,
+	.kick		= intel_rproc_scu_kick,
+};
+
+static int intel_rproc_scu_probe(struct platform_device *pdev)
+{
+	struct intel_mid_rproc_pdata *pdata = pdev->dev.platform_data;
+	struct intel_mid_rproc *iproc;
+	struct rproc *rproc;
+	int ret;
+
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+		return ret;
+	}
+
+	rproc = rproc_alloc(&pdev->dev, pdata->name, &intel_rproc_scu_ops,
+				pdata->firmware, sizeof(*iproc));
+	if (!rproc)
+		return -ENOMEM;
+
+	iproc = rproc->priv;
+	iproc->rproc = rproc;
+	nslist = pdata->nslist;
+
+	platform_set_drvdata(pdev, rproc);
+
+	ret = rproc_add(rproc);
+	if (ret)
+		goto free_rproc;
+
+	/*
+	 * Temporarily follow the rproc framework to load firmware
+	 * TODO: modify remoteproc code according to X86 architecture
+	 */
+	if (0 == wait_for_completion_timeout(&rproc->firmware_loading_complete,
+		RPROC_FW_LOADING_TIMEOUT)) {
+		dev_err(pdev->dev.parent, "fw loading not complete\n");
+		goto free_rproc;
+	}
+
+	/* Initialize intel_rproc_scu private data */
+	strncpy(iproc->name, pdev->id_entry->name, sizeof(iproc->name) - 1);
+	iproc->type = pdev->id_entry->driver_data;
+	iproc->r_vring_last_used = 0;
+	iproc->s_vring_last_used = 0;
+	iproc->ns_enabled = true;
+	iproc->rproc_rpmsg_handle = scu_ipc_rpmsg_handle;
+	iproc->ns_info = list_entry(&nslist->list,
+			struct rpmsg_ns_info, node);
+
+	return 0;
+
+free_rproc:
+	rproc_put(rproc);
+	return ret;
+}
+
+static int intel_rproc_scu_remove(struct platform_device *pdev)
+{
+	struct rproc *rproc = platform_get_drvdata(pdev);
+
+	if (nslist)
+		rpmsg_ns_del_list(nslist);
+
+	rproc_del(rproc);
+	rproc_put(rproc);
+
+	return 0;
+}
+
+static const struct platform_device_id intel_rproc_scu_id_table[] = {
+	{ "intel_rproc_scu", RPROC_SCU },
+	{ },
+};
+
+static struct platform_driver intel_rproc_scu_driver = {
+	.probe = intel_rproc_scu_probe,
+	.remove = intel_rproc_scu_remove,
+	.driver = {
+		.name = "intel_rproc_scu",
+		.owner = THIS_MODULE,
+	},
+	.id_table = intel_rproc_scu_id_table,
+};
+
+static int __init intel_rproc_scu_init(void)
+{
+	return platform_driver_register(&intel_rproc_scu_driver);
+}
+
+static void __exit intel_rproc_scu_exit(void)
+{
+	platform_driver_unregister(&intel_rproc_scu_driver);
+}
+
+subsys_initcall(intel_rproc_scu_init);
+module_exit(intel_rproc_scu_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ning Li<ning.li@intel.com>");
+MODULE_DESCRIPTION("INTEL MID Remoteproc Core driver");
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 69a2193..620fef5 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -6,4 +6,13 @@
 	select VIRTIO
 	select VIRTUALIZATION
 
+config RPMSG_IPC
+	tristate "Build rpmsg ipc driver"
+	depends on RPMSG
+	help
+	  Build an rpmsg ipc driver, which demonstrates how IA
+	  communicates with remote processor through IPC rpmsg
+	  over the rpmsg bus. It register a rpmsg driver matched
+	  with the rpmsg device created in remoteproc framework.
+
 endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 7617fcb..d8f5030 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_RPMSG)	+= virtio_rpmsg_bus.o
+obj-$(CONFIG_RPMSG_IPC)	+= intel_mid_rpmsg.o
diff --git a/drivers/rpmsg/intel_mid_rpmsg.c b/drivers/rpmsg/intel_mid_rpmsg.c
new file mode 100644
index 0000000..83ec775
--- /dev/null
+++ b/drivers/rpmsg/intel_mid_rpmsg.c
@@ -0,0 +1,448 @@
+/*
+ * rpmsg_mid_rpmsg.c - Intel RPMSG Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/rpmsg.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include <asm/intel_mid_rpmsg.h>
+
+/* Instance for generic kernel IPC calls */
+static struct rpmsg_device_data rpmsg_ddata[RPMSG_IPC_COMMAND_TYPE_NUM] = {
+	[RPMSG_IPC_COMMAND] = {
+		.name = "rpmsg_ipc_command",
+		.rpdev = NULL,	/* initialized during driver probe */
+		.rpmsg_instance = NULL, /* initialized during driver probe */
+	},
+	[RPMSG_IPC_SIMPLE_COMMAND] = {
+		.name = "rpmsg_ipc_simple_command",
+		.rpdev = NULL,
+		.rpmsg_instance = NULL,
+	},
+	[RPMSG_IPC_RAW_COMMAND] = {
+		.name = "rpmsg_ipc_raw_command",
+		.rpdev = NULL,
+		.rpmsg_instance = NULL,
+	},
+};
+
+/* Providing rpmsg ipc generic interfaces.
+ * Modules can call these API directly without registering rpmsg driver.
+ *
+ * The arg list is the same as intel_scu_ipc_command(),
+ * so simply change intel_scu_ipc_command() to rpmsg_send_generic_command()
+ */
+int rpmsg_send_generic_command(u32 cmd, u32 sub,
+				u8 *in, u32 inlen,
+				u32 *out, u32 outlen)
+{
+	struct rpmsg_instance *rpmsg_ipc_instance =
+		rpmsg_ddata[RPMSG_IPC_COMMAND].rpmsg_instance;
+
+	return rpmsg_send_command(rpmsg_ipc_instance, cmd, sub,
+					in, out, inlen, outlen);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_command);
+
+int rpmsg_send_generic_simple_command(u32 cmd, u32 sub)
+{
+	struct rpmsg_instance *rpmsg_ipc_instance =
+		rpmsg_ddata[RPMSG_IPC_SIMPLE_COMMAND].rpmsg_instance;
+
+	return rpmsg_send_simple_command(rpmsg_ipc_instance, cmd, sub);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_simple_command);
+
+int rpmsg_send_generic_raw_command(u32 cmd, u32 sub,
+				   u8 *in, u32 inlen,
+				   u32 *out, u32 outlen,
+				   u32 dptr, u32 sptr)
+{
+	struct rpmsg_instance *rpmsg_ipc_instance =
+		rpmsg_ddata[RPMSG_IPC_RAW_COMMAND].rpmsg_instance;
+
+	return rpmsg_send_raw_command(rpmsg_ipc_instance, cmd, sub,
+					in, out, inlen, outlen, sptr, dptr);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_raw_command);
+
+/* Global lock for rpmsg framework */
+static struct rpmsg_lock global_lock = {
+	.lock = __MUTEX_INITIALIZER(global_lock.lock),
+	.locked_prev = 0,
+	.pending = ATOMIC_INIT(0),
+};
+
+#define is_global_locked_prev		(global_lock.locked_prev)
+#define get_global_locked_prev()	(global_lock.locked_prev++)
+#define put_global_locked_prev()	(global_lock.locked_prev--)
+#define global_locked_by_current	(global_lock.lock.owner == current)
+
+void rpmsg_global_lock(void)
+{
+	atomic_inc(&global_lock.pending);
+	mutex_lock(&global_lock.lock);
+}
+EXPORT_SYMBOL(rpmsg_global_lock);
+
+void rpmsg_global_unlock(void)
+{
+	mutex_unlock(&global_lock.lock);
+	if (!atomic_dec_and_test(&global_lock.pending))
+		schedule();
+}
+EXPORT_SYMBOL(rpmsg_global_unlock);
+
+static void rpmsg_lock(void)
+{
+	if (!mutex_trylock(&global_lock.lock)) {
+		if (global_locked_by_current)
+			get_global_locked_prev();
+		else
+			rpmsg_global_lock();
+	} else
+		atomic_inc(&global_lock.pending);
+}
+
+static void rpmsg_unlock(void)
+{
+	if (!is_global_locked_prev)
+		rpmsg_global_unlock();
+	else
+		put_global_locked_prev();
+}
+
+int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen)
+{
+	int ret = 0;
+
+	if (!instance) {
+		pr_err("%s: Instance is NULL\n", __func__);
+		return -EFAULT;
+	}
+
+	/* Hold global rpmsg lock */
+	rpmsg_lock();
+
+	mutex_lock(&instance->instance_lock);
+
+	/* Prepare Tx buffer */
+	instance->tx_msg->cmd = cmd;
+	instance->tx_msg->sub = sub;
+	instance->tx_msg->in = in;
+	instance->tx_msg->out = out;
+	instance->tx_msg->inlen = inlen;
+	instance->tx_msg->outlen = outlen;
+
+	/* Preapre Rx buffer */
+	mutex_lock(&instance->rx_lock);
+	instance->rx_msg->status = -1;
+	mutex_unlock(&instance->rx_lock);
+	INIT_COMPLETION(instance->reply_arrived);
+
+	/* Send message to remote processor(SCU) using rpdev channel */
+	ret = rpmsg_send_offchannel(
+					instance->rpdev,
+					instance->endpoint->addr,
+					instance->rpdev->dst,
+					instance->tx_msg,
+					sizeof(*instance->tx_msg)
+					);
+	if (ret) {
+		dev_err(&instance->rpdev->dev, "%s failed: %d\n",
+						 __func__, ret);
+		goto end;
+	}
+
+	if (0 == wait_for_completion_timeout(&instance->reply_arrived,
+						RPMSG_TX_TIMEOUT)) {
+		dev_err(&instance->rpdev->dev,
+				"timeout: %d\n", ret);
+		ret = -ETIMEDOUT;
+		goto end;
+	}
+
+	mutex_lock(&instance->rx_lock);
+	ret = instance->rx_msg->status;
+	mutex_unlock(&instance->rx_lock);
+end:
+	mutex_unlock(&instance->instance_lock);
+	rpmsg_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_command);
+
+int rpmsg_send_raw_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen, u32 sptr,
+						u32 dptr)
+{
+	int ret = 0;
+
+	if (!instance) {
+		pr_err("%s: Instance is NULL\n", __func__);
+		return -EFAULT;
+	}
+
+	mutex_lock(&instance->instance_lock);
+	instance->tx_msg->sptr = sptr;
+	instance->tx_msg->dptr = dptr;
+	mutex_unlock(&instance->instance_lock);
+
+	ret = rpmsg_send_command(instance, cmd, sub, in, out, inlen, outlen);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_raw_command);
+
+int rpmsg_send_simple_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub)
+{
+	int ret;
+
+	ret = rpmsg_send_command(instance, cmd, sub, NULL, NULL, 0, 0);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_simple_command);
+
+static void rpmsg_recv_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+#ifdef DEBUG_RPMSG_MSG
+	static int rx_count;
+#endif
+	struct rpmsg_instance *instance = priv;
+
+	if (len != sizeof(struct rx_ipc_msg)) {
+		dev_warn(&rpdev->dev, "%s, incorrect msg length\n", __func__);
+		return;
+	}
+
+#ifdef DEBUG_RPMSG_MSG
+	dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src);
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+#endif
+
+	mutex_lock(&instance->rx_lock);
+
+	memcpy(instance->rx_msg, data, len);
+
+	mutex_unlock(&instance->rx_lock);
+
+	complete(&instance->reply_arrived);
+
+}
+
+int alloc_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance)
+{
+	int ret = 0;
+	struct rpmsg_instance *instance;
+
+	dev_info(&rpdev->dev, "Allocating rpmsg_instance\n");
+
+	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+	if (!instance) {
+		ret = -ENOMEM;
+		dev_err(&rpdev->dev, "kzalloc rpmsg_instance failed\n");
+		goto alloc_out;
+	}
+
+	instance->rpdev = rpdev;
+
+	instance->tx_msg = kzalloc(sizeof(struct tx_ipc_msg), GFP_KERNEL);
+	if (!instance->tx_msg) {
+		ret = -ENOMEM;
+		dev_err(&rpdev->dev, "kzalloc instance tx_msg failed\n");
+		goto error_tx_msg_create;
+	}
+
+	instance->rx_msg = kzalloc(sizeof(struct rx_ipc_msg), GFP_KERNEL);
+	if (!instance->rx_msg) {
+		ret = -ENOMEM;
+		dev_err(&rpdev->dev, "kzalloc instance rx_msg failed\n");
+		goto error_rx_msg_create;
+	}
+
+	instance->endpoint = rpmsg_create_ept(rpdev, rpmsg_recv_cb,
+							instance,
+							RPMSG_ADDR_ANY);
+	if (!instance->endpoint) {
+		dev_err(&rpdev->dev, "create instance endpoint failed\n");
+		ret = -ENOMEM;
+		goto error_endpoint_create;
+	}
+
+	goto alloc_out;
+
+error_endpoint_create:
+	kfree(instance->rx_msg);
+	instance->rx_msg = NULL;
+error_rx_msg_create:
+	kfree(instance->tx_msg);
+	instance->tx_msg = NULL;
+error_tx_msg_create:
+	kfree(instance);
+	instance = NULL;
+alloc_out:
+	*pInstance = instance;
+	return ret;
+
+}
+EXPORT_SYMBOL(alloc_rpmsg_instance);
+
+void free_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance)
+{
+	struct rpmsg_instance *instance = *pInstance;
+
+	mutex_lock(&instance->instance_lock);
+	rpmsg_destroy_ept(instance->endpoint);
+	kfree(instance->tx_msg);
+	instance->tx_msg = NULL;
+	kfree(instance->rx_msg);
+	instance->rx_msg = NULL;
+	mutex_unlock(&instance->instance_lock);
+	kfree(instance);
+	*pInstance = NULL;
+	dev_info(&rpdev->dev, "Freeing rpmsg device\n");
+}
+EXPORT_SYMBOL(free_rpmsg_instance);
+
+void init_rpmsg_instance(struct rpmsg_instance *instance)
+{
+	init_completion(&instance->reply_arrived);
+	mutex_init(&instance->instance_lock);
+	mutex_init(&instance->rx_lock);
+}
+EXPORT_SYMBOL(init_rpmsg_instance);
+
+static int rpmsg_ipc_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+	int i;
+	struct rpmsg_device_data *ddata = rpmsg_ddata;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel %s not created\n", rpdev->id.name);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed rpmsg_ipc device %s\n", rpdev->id.name);
+
+	for (i = RPMSG_IPC_COMMAND; i < RPMSG_IPC_COMMAND_TYPE_NUM; i++) {
+		if (!strncmp(rpdev->id.name, ddata[i].name, RPMSG_NAME_SIZE)) {
+
+			/* Allocate rpmsg instance for kernel IPC calls*/
+			ret = alloc_rpmsg_instance(rpdev,
+					&ddata[i].rpmsg_instance);
+			if (!ddata[i].rpmsg_instance) {
+				dev_err(&rpdev->dev,
+					"alloc rpmsg instance failed\n");
+				goto out;
+			}
+
+			/* Initialize rpmsg instance */
+			init_rpmsg_instance(ddata[i].rpmsg_instance);
+
+			ddata[i].rpdev = rpdev;
+			break;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static void rpmsg_ipc_remove(struct rpmsg_channel *rpdev)
+{
+	int i;
+	struct rpmsg_device_data *ddata = rpmsg_ddata;
+
+	for (i = RPMSG_IPC_COMMAND; i < RPMSG_IPC_COMMAND_TYPE_NUM; i++) {
+		if (!strncmp(rpdev->id.name, ddata[i].name, RPMSG_NAME_SIZE)) {
+			free_rpmsg_instance(rpdev, &ddata[i].rpmsg_instance);
+			break;
+		}
+	}
+	dev_info(&rpdev->dev, "Removed rpmsg_ipc device\n");
+}
+
+static void rpmsg_ipc_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id rpmsg_ipc_id_table[] = {
+	{ .name	= "rpmsg_ipc_command" },
+	{ .name	= "rpmsg_ipc_simple_command" },
+	{ .name	= "rpmsg_ipc_raw_command" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_ipc_id_table);
+
+static struct rpmsg_driver rpmsg_ipc = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= rpmsg_ipc_id_table,
+	.probe		= rpmsg_ipc_probe,
+	.callback	= rpmsg_ipc_cb,
+	.remove		= rpmsg_ipc_remove,
+};
+
+static int __init rpmsg_ipc_init(void)
+{
+	return register_rpmsg_driver(&rpmsg_ipc);
+}
+subsys_initcall(rpmsg_ipc_init);
+
+static void __exit rpmsg_ipc_exit(void)
+{
+	return unregister_rpmsg_driver(&rpmsg_ipc);
+}
+module_exit(rpmsg_ipc_exit);
+
+MODULE_AUTHOR("Ning Li<ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel IPC RPMSG Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..e7c5927 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -749,8 +749,10 @@
 	dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
 					msg->src, msg->dst, msg->len,
 					msg->flags, msg->reserved);
-	print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+	print_hex_dump_debug("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
 					msg, sizeof(*msg) + msg->len, true);
+#endif
 
 	sg_init_one(&sg, msg, sizeof(*msg) + len);
 
@@ -786,8 +788,10 @@
 	dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
 					msg->src, msg->dst, msg->len,
 					msg->flags, msg->reserved);
-	print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+	print_hex_dump_debug("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
 					msg, sizeof(*msg) + msg->len, true);
+#endif
 
 	/*
 	 * We currently use fixed-sized buffers, so trivially sanitize
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b983813..fe9640c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -614,6 +614,20 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-cmos.
 
+config RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
+	tristate "PC-style 'CMOS' can wakeup from low power states"
+	depends on RTC_DRV_CMOS
+	help
+	  Say "yes" to have rtc-cmos driver capability to wakeup from low
+	  power states (S3 and S4/5).
+
+config RTC_DRV_CMOS_DAYOFMONTH_ALARM
+	tristate "PC-style 'CMOS' supports day of month for alarms"
+	depends on RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
+	help
+	  Say "yes" to have rtc-cmos driver support day of month for
+	  alarms through REG_D register.
+
 config RTC_DRV_VRTC
 	tristate "Virtual RTC for Intel MID platforms"
 	depends on X86_INTEL_MID
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f1cb706..d13b1c7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -681,8 +681,10 @@
 	hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
 	CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
 
+#ifndef CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
 	/* disable irqs */
 	cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE);
+#endif
 
 	rtc_control = CMOS_READ(RTC_CONTROL);
 
@@ -975,6 +977,30 @@
 	device_init_wakeup(dev, 1);
 }
 
+#elif defined(CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES)
+
+#ifdef	CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM
+static struct cmos_rtc_board_info cmos_wakeup_rtc_info;
+#endif
+
+static void cmos_wake_setup(struct device *dev)
+{
+#ifdef	CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM
+	/* add day of month capability for alarms */
+	cmos_wakeup_rtc_info.rtc_day_alarm = RTC_REG_D;
+	cmos_wakeup_rtc_info.rtc_mon_alarm = 0;
+	cmos_wakeup_rtc_info.rtc_century = 0;
+
+	cmos_wakeup_rtc_info.wake_on = NULL;
+	cmos_wakeup_rtc_info.wake_off = NULL;
+
+	dev->platform_data = &cmos_wakeup_rtc_info;
+#endif
+
+	/* RTC always wakes from S1/S2/S3, and often S4/STD */
+	device_init_wakeup(dev, 1);
+}
+
 #else
 
 static void cmos_wake_setup(struct device *dev)
@@ -1120,10 +1146,12 @@
 
 static void cmos_platform_shutdown(struct platform_device *pdev)
 {
+#ifndef CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
 	if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pdev->dev))
 		return;
 
 	cmos_do_shutdown();
+#endif
 }
 
 /* work with hotplug and coldplug */
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 771812d..3bb9401 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -240,9 +240,9 @@
 	}
 
 	alrm->pending = 0;
-	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
+	ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
 	if (ret < 0) {
-		dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
+		dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
 				__func__, __LINE__, ret);
 		goto out;
 	}
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 5032c24..9100a34 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -310,7 +310,7 @@
 		dev_dbg(&client->dev, "alarm IRQ armed\n");
 	} else {
 		/* disable AIE irq */
-		ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
+		ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
 		if (ret)
 			return ret;
 
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index f6adde4..3743ac9 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
  *
  * Module interface and handling of zfcp data structures.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 /*
@@ -23,6 +23,7 @@
  *            Christof Schmitt
  *            Martin Petermann
  *            Sven Schuetz
+ *            Steffen Maier
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -415,6 +416,8 @@
 	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
 	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 
+	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
+
 	if (!zfcp_scsi_adapter_register(adapter))
 		return adapter;
 
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4133ab6..8e8f353 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@
 
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
 		zfcp_erp_action_dismiss(&port->erp_action);
-	else
-		shost_for_each_device(sdev, port->adapter->scsi_host)
+	else {
+		spin_lock(port->adapter->scsi_host->host_lock);
+		__shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
 				zfcp_erp_action_dismiss_lun(sdev);
+		spin_unlock(port->adapter->scsi_host->host_lock);
+	}
 }
 
 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@
 {
 	struct scsi_device *sdev;
 
-	shost_for_each_device(sdev, port->adapter->scsi_host)
+	spin_lock(port->adapter->scsi_host->host_lock);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
 			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+	spin_unlock(port->adapter->scsi_host->host_lock);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1435,8 +1440,10 @@
 		atomic_set_mask(common_mask, &port->status);
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-	shost_for_each_device(sdev, adapter->scsi_host)
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host)
 		atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
 /**
@@ -1470,11 +1477,13 @@
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-	shost_for_each_device(sdev, adapter->scsi_host) {
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host) {
 		atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
 		if (clear_counter)
 			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
 	}
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
 /**
@@ -1488,16 +1497,19 @@
 {
 	struct scsi_device *sdev;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	unsigned long flags;
 
 	atomic_set_mask(mask, &port->status);
 
 	if (!common_mask)
 		return;
 
-	shost_for_each_device(sdev, port->adapter->scsi_host)
+	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
 			atomic_set_mask(common_mask,
 					&sdev_to_zfcp(sdev)->status);
+	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
 
 /**
@@ -1512,6 +1524,7 @@
 	struct scsi_device *sdev;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+	unsigned long flags;
 
 	atomic_clear_mask(mask, &port->status);
 
@@ -1521,13 +1534,15 @@
 	if (clear_counter)
 		atomic_set(&port->erp_counter, 0);
 
-	shost_for_each_device(sdev, port->adapter->scsi_host)
+	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port) {
 			atomic_clear_mask(common_mask,
 					  &sdev_to_zfcp(sdev)->status);
 			if (clear_counter)
 				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
 		}
+	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
 
 /**
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c7e148f..9152999 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
  *
  * Implementation of FSF commands.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -483,12 +483,8 @@
 
 	fc_host_port_name(shost) = nsp->fl_wwpn;
 	fc_host_node_name(shost) = nsp->fl_wwnn;
-	fc_host_port_id(shost) = ntoh24(bottom->s_id);
-	fc_host_speed(shost) =
-		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
 
-	adapter->hydra_version = bottom->adapter_type;
 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
 					 (u16)FSF_STATUS_READS_RECOM);
@@ -496,6 +492,19 @@
 	if (fc_host_permanent_port_name(shost) == -1)
 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
 
+	zfcp_scsi_set_prot(adapter);
+
+	/* no error return above here, otherwise must fix call chains */
+	/* do not evaluate invalid fields */
+	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
+		return 0;
+
+	fc_host_port_id(shost) = ntoh24(bottom->s_id);
+	fc_host_speed(shost) =
+		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+	adapter->hydra_version = bottom->adapter_type;
+
 	switch (bottom->fc_topology) {
 	case FSF_TOPO_P2P:
 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
@@ -517,8 +526,6 @@
 		return -EIO;
 	}
 
-	zfcp_scsi_set_prot(adapter);
-
 	return 0;
 }
 
@@ -563,8 +570,14 @@
 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
 		adapter->hydra_version = 0;
 
+		/* avoids adapter shutdown to be able to recognize
+		 * events such as LINK UP */
+		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+				&adapter->status);
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
+		if (zfcp_fsf_exchange_config_evaluate(req))
+			return;
 		break;
 	default:
 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cf..de0598e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@
 
 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
 {
-	spin_lock_irq(&qdio->req_q_lock);
 	if (atomic_read(&qdio->req_q_free) ||
 	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 		return 1;
-	spin_unlock_irq(&qdio->req_q_lock);
 	return 0;
 }
 
@@ -246,9 +244,8 @@
 {
 	long ret;
 
-	spin_unlock_irq(&qdio->req_q_lock);
-	ret = wait_event_interruptible_timeout(qdio->req_q_wq,
-			       zfcp_qdio_sbal_check(qdio), 5 * HZ);
+	ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+		       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
 
 	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 		return -EIO;
@@ -262,7 +259,6 @@
 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
 	}
 
-	spin_lock_irq(&qdio->req_q_lock);
 	return -EIO;
 }
 
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7b31e3f..7b35364 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2013
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -311,8 +311,12 @@
 	.proc_name		 = "zfcp",
 	.can_queue		 = 4096,
 	.this_id		 = -1,
-	.sg_tablesize		 = 1, /* adjusted later */
-	.max_sectors		 = 8, /* adjusted later */
+	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
+				   /* GCD, adjusted later */
+	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
+				   /* GCD, adjusted later */
 	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
 	.cmd_per_lun		 = 1,
 	.use_clustering		 = 1,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 86af29f..1348fa4 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@
 	tristate "Emulex LightPulse Fibre Channel Support"
 	depends on PCI && SCSI
 	select SCSI_FC_ATTRS
-	select GENERIC_CSUM
 	select CRC_T10DIF
 	help
           This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 408a42e..f0d432c 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,6 +771,8 @@
 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
 	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
 	return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
 }
 
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 0f56d8d..7e17107 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -93,6 +93,9 @@
 			int send_it = 0;
 			extern int aac_sync_mode;
 
+			src_writel(dev, MUnit.ODR_C, bellbits);
+			src_readl(dev, MUnit.ODR_C);
+
 			if (!aac_sync_mode) {
 				src_writel(dev, MUnit.ODR_C, bellbits);
 				src_readl(dev, MUnit.ODR_C);
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 34552bf..55548dc 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -530,7 +530,7 @@
 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 			     struct esp_lun_data *lp)
 {
-	if (!ent->tag[0]) {
+	if (!ent->orig_tag[0]) {
 		/* Non-tagged, slot already taken?  */
 		if (lp->non_tagged_cmd)
 			return -EBUSY;
@@ -564,9 +564,9 @@
 			return -EBUSY;
 	}
 
-	BUG_ON(lp->tagged_cmds[ent->tag[1]]);
+	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 
-	lp->tagged_cmds[ent->tag[1]] = ent;
+	lp->tagged_cmds[ent->orig_tag[1]] = ent;
 	lp->num_tagged++;
 
 	return 0;
@@ -575,9 +575,9 @@
 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 			     struct esp_lun_data *lp)
 {
-	if (ent->tag[0]) {
-		BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
-		lp->tagged_cmds[ent->tag[1]] = NULL;
+	if (ent->orig_tag[0]) {
+		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
+		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
 		lp->num_tagged--;
 	} else {
 		BUG_ON(lp->non_tagged_cmd != ent);
@@ -667,6 +667,8 @@
 			ent->tag[0] = 0;
 			ent->tag[1] = 0;
 		}
+		ent->orig_tag[0] = ent->tag[0];
+		ent->orig_tag[1] = ent->tag[1];
 
 		if (esp_alloc_lun_tag(ent, lp) < 0)
 			continue;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 28e22ac..cd68805 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -271,6 +271,7 @@
 #define ESP_CMD_FLAG_AUTOSENSE	0x04 /* Doing automatic REQUEST_SENSE */
 
 	u8			tag[2];
+	u8			orig_tag[2];
 
 	u8			status;
 	u8			message;
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020a..0d30ca8 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@
 	struct isci_tmf           tmf;
 	int                       ret = TMF_RESP_FUNC_FAILED;
 	unsigned long             flags;
+	int                       target_done_already = 0;
 
 	/* Get the isci_request reference from the task.  Note that
 	 * this check does not depend on the pending request list
@@ -505,9 +506,11 @@
 	/* If task is already done, the request isn't valid */
 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
 	    (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
-	    old_request)
+	    old_request) {
 		idev = isci_get_device(task->dev->lldd_dev);
-
+		target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
+					       &old_request->flags);
+	}
 	spin_unlock(&task->task_state_lock);
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
@@ -561,7 +564,7 @@
 
 	if (task->task_proto == SAS_PROTOCOL_SMP ||
 	    sas_protocol_ata(task->task_proto) ||
-	    test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+	    target_done_already ||
 	    test_bit(IDEV_GONE, &idev->flags)) {
 
 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3a9ddae..9b60dc1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3508,11 +3508,21 @@
 		break;
 	}
 
-	/*
-	 * We expect the FW state to be READY
-	 */
-	if (megasas_transition_to_ready(instance, 0))
-		goto fail_ready_state;
+	if (megasas_transition_to_ready(instance, 0)) {
+		atomic_set(&instance->fw_reset_no_pci_access, 1);
+		instance->instancet->adp_reset
+			(instance, instance->reg_set);
+		atomic_set(&instance->fw_reset_no_pci_access, 0);
+		dev_info(&instance->pdev->dev,
+			"megasas: FW restarted successfully from %s!\n",
+			__func__);
+
+		/*waitting for about 30 second before retry*/
+		ssleep(30);
+
+		if (megasas_transition_to_ready(instance, 0))
+			goto fail_ready_state;
+	}
 
 	/* Check if MSI-X is supported while in ready state */
 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
@@ -4852,10 +4862,12 @@
 				    sense, sense_handle);
 	}
 
-	for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
-		dma_free_coherent(&instance->pdev->dev,
-				    kern_sge32[i].length,
-				    kbuff_arr[i], kern_sge32[i].phys_addr);
+	for (i = 0; i < ioc->sge_count; i++) {
+		if (kbuff_arr[i])
+			dma_free_coherent(&instance->pdev->dev,
+					  kern_sge32[i].length,
+					  kbuff_arr[i],
+					  kern_sge32[i].phys_addr);
 	}
 
 	megasas_return_cmd(instance, cmd);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index bcb23d2..c76b18b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -80,10 +80,6 @@
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
-static int missing_delay[2] = {-1, -1};
-module_param_array(missing_delay, int, NULL, 0);
-MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
-
 static int mpt2sas_fwfault_debug;
 MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
 	"and halt firmware - (default=0)");
@@ -2199,7 +2195,7 @@
 }
 
 /**
- * _base_update_missing_delay - change the missing delay timers
+ * mpt2sas_base_update_missing_delay - change the missing delay timers
  * @ioc: per adapter object
  * @device_missing_delay: amount of time till device is reported missing
  * @io_missing_delay: interval IO is returned when there is a missing device
@@ -2210,8 +2206,8 @@
  * delay, as well as the io missing delay. This should be called at driver
  * load time.
  */
-static void
-_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+void
+mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
 	u16 device_missing_delay, u8 io_missing_delay)
 {
 	u16 dmd, dmd_new, dmd_orignal;
@@ -4407,9 +4403,6 @@
 	if (r)
 		goto out_free_resources;
 
-	if (missing_delay[0] != -1 && missing_delay[1] != -1)
-		_base_update_missing_delay(ioc, missing_delay[0],
-		    missing_delay[1]);
 	ioc->non_operational_loop = 0;
 
 	return 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 4caaac1..1130197 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -1055,6 +1055,9 @@
 
 void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
 
+void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+	u16 device_missing_delay, u8 io_missing_delay);
+
 int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
 
 /* scsih shared API */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index c6bdc92..8dbe500 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -101,6 +101,10 @@
 module_param(max_sectors, ushort, 0);
 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
 
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
 #define MPT2SAS_MAX_LUN (16895)
 static int max_lun = MPT2SAS_MAX_LUN;
@@ -3994,11 +3998,7 @@
 			else
 				mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 		} else
-/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
-/*			mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
- */
-			mpi_control |= (0x500);
-
+			mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 	} else
 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
 	/* Make sure Device is not raid volume.
@@ -7303,7 +7303,9 @@
 	case MPT2SAS_PORT_ENABLE_COMPLETE:
 		ioc->start_scan = 0;
 
-
+		if (missing_delay[0] != -1 && missing_delay[1] != -1)
+			mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
+				missing_delay[1]);
 
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
 		    "from worker thread\n", ioc->name));
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 4c1d2e7..efb0c4c 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -1,5 +1,5 @@
 # mpt3sas makefile
-obj-m += mpt3sas.o
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
 mpt3sas-y +=  mpt3sas_base.o     \
 		mpt3sas_config.o \
 		mpt3sas_scsih.o      \
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index dcbf7c8..f8c4b85 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1273,6 +1273,7 @@
 	struct MPT3SAS_DEVICE *sas_device_priv_data;
 	struct scsi_target *starget;
 	struct _raid_device *raid_device;
+	struct _sas_device *sas_device;
 	unsigned long flags;
 
 	sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1301,6 +1302,19 @@
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 	}
 
+	if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
+		sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+					sas_target_priv_data->sas_address);
+		if (sas_device && (sas_device->starget == NULL)) {
+			sdev_printk(KERN_INFO, sdev,
+			"%s : sas_device->starget set to starget @ %d\n",
+				__func__, __LINE__);
+			sas_device->starget = starget;
+		}
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	}
+
 	return 0;
 }
 
@@ -6392,7 +6406,7 @@
 	    handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -6494,7 +6508,7 @@
 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
 		handle = le16_to_cpu(volume_pg1.DevHandle);
 
@@ -6518,7 +6532,7 @@
 		    phys_disk_num))) {
 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 			    MPI2_IOCSTATUS_MASK;
-			if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 				break;
 			phys_disk_num = pd_pg0.PhysDiskNum;
 			handle = le16_to_cpu(pd_pg0.DevHandle);
@@ -6597,7 +6611,7 @@
 
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 			break;
 
 		handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -6742,8 +6756,6 @@
 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
 			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -6787,8 +6799,6 @@
 	    phys_disk_num))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
 			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -6854,8 +6864,6 @@
 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
 			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -6914,8 +6922,6 @@
 	    handle))) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
-			break;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 			pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
 			    " ioc_status(0x%04x), loginfo(0x%08x)\n",
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 1e3879d..0665f9c 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2899,7 +2899,7 @@
 	 * reset SCSI bus
 	 */
 	nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
-	udelay(RESET_HOLD_TIME);
+	mdelay(RESET_HOLD_TIME / 1000);
 	nsp32_write1(base, SCSI_BUS_CONTROL, 0);
 	for(i = 0; i < 5; i++) {
 		intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 0fab6b5..9d86947 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -485,7 +485,7 @@
 	oud->class_dev.class = &osd_uld_class;
 	oud->class_dev.parent = dev;
 	oud->class_dev.release = __remove;
-	error = dev_set_name(&oud->class_dev, disk->disk_name);
+	error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
 	if (error) {
 		OSD_ERR("dev_set_name failed => %d\n", error);
 		goto err_put_cdev;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 69dd49c..ce3f129d 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -221,7 +221,7 @@
 	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt		= 0x01;
 	for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
 		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
-			PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+			PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
 		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
 			pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
 		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
@@ -247,7 +247,7 @@
 	}
 	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
 		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
-			PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+			PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
 		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
 			pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
 		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 302514d..e1c4896 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -275,7 +275,7 @@
 
 	for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
 		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
-			PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+			PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
 		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
 			pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
 		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
@@ -301,7 +301,7 @@
 	}
 	for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
 		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
-			PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+			PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
 		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
 			pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
 		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 15e4080..51cd27a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -419,6 +419,8 @@
 			    __constant_cpu_to_le16(CF_SIMPLE_TAG);
 			break;
 		}
+	} else {
+		cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
 	}
 
 	/* Load SCSI command packet. */
@@ -1308,11 +1310,11 @@
 		    fcp_cmnd->task_attribute = TSK_ORDERED;
 		    break;
 		default:
-		    fcp_cmnd->task_attribute = 0;
+		    fcp_cmnd->task_attribute = TSK_SIMPLE;
 		    break;
 		}
 	} else {
-		fcp_cmnd->task_attribute = 0;
+		fcp_cmnd->task_attribute = TSK_SIMPLE;
 	}
 
 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1527,7 +1529,12 @@
 		case ORDERED_QUEUE_TAG:
 			cmd_pkt->task = TSK_ORDERED;
 			break;
+		default:
+		    cmd_pkt->task = TSK_SIMPLE;
+		    break;
 		}
+	} else {
+		cmd_pkt->task = TSK_SIMPLE;
 	}
 
 	/* Load SCSI command packet. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2c0d0ec..eaa808e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1031,6 +1031,9 @@
 {
 	int i, result;
 
+	if (sdev->skip_vpd_pages)
+		goto fail;
+
 	/* Ask for all the pages supported by this device */
 	result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
 	if (result)
@@ -1070,8 +1073,8 @@
  * @opcode:	opcode for command to look up
  *
  * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
- * opcode. Returns 0 if RSOC fails or if the command opcode is
- * unsupported. Returns 1 if the device claims to support the command.
+ * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
+ * unsupported and 1 if the device claims to support the command.
  */
 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 		       unsigned int len, unsigned char opcode)
@@ -1081,7 +1084,7 @@
 	int result;
 
 	if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
-		return 0;
+		return -EINVAL;
 
 	memset(cmd, 0, 16);
 	cmd[0] = MAINTENANCE_IN;
@@ -1097,7 +1100,7 @@
 	if (result && scsi_sense_valid(&sshdr) &&
 	    sshdr.sense_key == ILLEGAL_REQUEST &&
 	    (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
-		return 0;
+		return -EINVAL;
 
 	if ((buffer[1] & 3) == 3) /* Command supported */
 		return 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 86d5220..6b9f526 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -434,6 +434,8 @@
 	list_splice_init(&shost->starved_list, &starved_list);
 
 	while (!list_empty(&starved_list)) {
+		struct request_queue *slq;
+
 		/*
 		 * As long as shost is accepting commands and we have
 		 * starved queues, call blk_run_queue. scsi_request_fn
@@ -456,11 +458,25 @@
 			continue;
 		}
 
-		spin_unlock(shost->host_lock);
-		spin_lock(sdev->request_queue->queue_lock);
-		__blk_run_queue(sdev->request_queue);
-		spin_unlock(sdev->request_queue->queue_lock);
-		spin_lock(shost->host_lock);
+		/*
+		 * Once we drop the host lock, a racing scsi_remove_device()
+		 * call may remove the sdev from the starved list and destroy
+		 * it and the queue.  Mitigate by taking a reference to the
+		 * queue and never touching the sdev again after we drop the
+		 * host lock.  Note: if __scsi_remove_device() invokes
+		 * blk_cleanup_queue() before the queue is run from this
+		 * function then blk_run_queue() will return immediately since
+		 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
+		 */
+		slq = sdev->request_queue;
+		if (!blk_get_queue(slq))
+			continue;
+		spin_unlock_irqrestore(shost->host_lock, flags);
+
+		blk_run_queue(slq);
+		blk_put_queue(slq);
+
+		spin_lock_irqsave(shost->host_lock, flags);
 	}
 	/* put any unprocessed entries back */
 	list_splice(&starved_list, &shost->starved_list);
@@ -1554,7 +1570,7 @@
 			break;
 
 		if (unlikely(!scsi_device_online(sdev))) {
-			sdev_printk(KERN_ERR, sdev,
+			sdev_printk_ratelimited(KERN_ERR, sdev,
 				    "rejecting I/O to offline device\n");
 			scsi_kill_request(req, q);
 			continue;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c1c5552..734a29a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -142,7 +142,7 @@
 	char *buffer_data;
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
-	const char *temp = "temporary ";
+	static const char temp[] = "temporary ";
 	int len;
 
 	if (sdp->type != TYPE_DISK)
@@ -442,8 +442,10 @@
 
 	if (max == 0)
 		sdp->no_write_same = 1;
-	else if (max <= SD_MAX_WS16_BLOCKS)
+	else if (max <= SD_MAX_WS16_BLOCKS) {
+		sdp->no_write_same = 0;
 		sdkp->max_ws_blocks = max;
+	}
 
 	sd_config_write_same(sdkp);
 
@@ -740,7 +742,6 @@
 {
 	struct request_queue *q = sdkp->disk->queue;
 	unsigned int logical_block_size = sdkp->device->sector_size;
-	unsigned int blocks = 0;
 
 	if (sdkp->device->no_write_same) {
 		sdkp->max_ws_blocks = 0;
@@ -752,18 +753,20 @@
 	 * blocks per I/O unless the device explicitly advertises a
 	 * bigger limit.
 	 */
-	if (sdkp->max_ws_blocks == 0)
-		sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
-
-	if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
-		blocks = min_not_zero(sdkp->max_ws_blocks,
-				      (u32)SD_MAX_WS16_BLOCKS);
-	else
-		blocks = min_not_zero(sdkp->max_ws_blocks,
-				      (u32)SD_MAX_WS10_BLOCKS);
+	if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
+						   (u32)SD_MAX_WS16_BLOCKS);
+	else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
+		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
+						   (u32)SD_MAX_WS10_BLOCKS);
+	else {
+		sdkp->device->no_write_same = 1;
+		sdkp->max_ws_blocks = 0;
+	}
 
 out:
-	blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
+	blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
+					 (logical_block_size >> 9));
 }
 
 /**
@@ -825,10 +828,17 @@
 
 static void sd_unprep_fn(struct request_queue *q, struct request *rq)
 {
+	struct scsi_cmnd *SCpnt = rq->special;
+
 	if (rq->cmd_flags & REQ_DISCARD) {
 		free_page((unsigned long)rq->buffer);
 		rq->buffer = NULL;
 	}
+	if (SCpnt->cmnd != rq->cmd) {
+		mempool_free(SCpnt->cmnd, sd_cdb_pool);
+		SCpnt->cmnd = NULL;
+		SCpnt->cmd_len = 0;
+	}
 }
 
 /**
@@ -1707,21 +1717,6 @@
 	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
 		sd_dif_complete(SCpnt, good_bytes);
 
-	if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
-	    == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
-
-		/* We have to print a failed command here as the
-		 * extended CDB gets freed before scsi_io_completion()
-		 * is called.
-		 */
-		if (result)
-			scsi_print_command(SCpnt);
-
-		mempool_free(SCpnt->cmnd, sd_cdb_pool);
-		SCpnt->cmnd = NULL;
-		SCpnt->cmd_len = 0;
-	}
-
 	return good_bytes;
 }
 
@@ -2414,14 +2409,9 @@
 			}
 		}
 
-		if (modepage == 0x3F) {
-			sd_printk(KERN_ERR, sdkp, "No Caching mode page "
-				  "present\n");
-			goto defaults;
-		} else if ((buffer[offset] & 0x3f) != modepage) {
-			sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
-			goto defaults;
-		}
+		sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+		goto defaults;
+
 	Page_found:
 		if (modepage == 8) {
 			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
@@ -2635,9 +2625,24 @@
 
 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
 {
-	if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
-			       WRITE_SAME_16))
+	struct scsi_device *sdev = sdkp->device;
+
+	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+		sdev->no_report_opcodes = 1;
+
+		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
+		 * CODES is unsupported and the device has an ATA
+		 * Information VPD page (SAT).
+		 */
+		if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
+			sdev->no_write_same = 1;
+	}
+
+	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
 		sdkp->ws16 = 1;
+
+	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
+		sdkp->ws10 = 1;
 }
 
 static int sd_try_extended_inquiry(struct scsi_device *sdp)
@@ -2838,6 +2843,7 @@
 		gd->events |= DISK_EVENT_MEDIA_CHANGE;
 	}
 
+	blk_pm_runtime_init(sdp->request_queue, dev);
 	add_disk(gd);
 	if (sdkp->capacity)
 		sd_dif_config_host(sdkp);
@@ -2846,7 +2852,6 @@
 
 	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
 		  sdp->removable ? "removable " : "");
-	blk_pm_runtime_init(sdp->request_queue, dev);
 	scsi_autopm_put_device(sdp);
 	put_device(&sdkp->dev);
 }
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 2386aeb..7a049de 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -84,6 +84,7 @@
 	unsigned	lbpws : 1;
 	unsigned	lbpws10 : 1;
 	unsigned	lbpvpd : 1;
+	unsigned	ws10 : 1;
 	unsigned	ws16 : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258..74b88ef 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -751,7 +751,7 @@
 
 		vscsi->affinity_hint_set = true;
 	} else {
-		for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
+		for (i = 0; i < vscsi->num_queues; i++)
 			virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
 
 		vscsi->affinity_hint_set = false;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 92a9345..1c0ce9e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -198,6 +198,15 @@
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel MID platforms (EXPERIMENTAL)"
+	depends on SPI_MASTER && INTEL_MID_DMAC
+	help
+	  This is the unified SSP SPI slave controller driver for the Intel
+	  MID platforms (Moorestown, Medfield, Clovertrial and
+	  Merrifield), primarily used to implement a SPI host controller
+	  driver over a SSP host controller.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 33f9c09..7d70a7b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -7,7 +7,7 @@
 # small core, mostly translating board-specific
 # config declarations into driver model code
 obj-$(CONFIG_SPI_MASTER)		+= spi.o
-obj-$(CONFIG_SPI_SPIDEV)		+= spidev.o
+obj-$(CONFIG_SPI_SPIDEV)		+= spidev.o spidev_info.o
 
 # SPI master controller drivers (bus)
 obj-$(CONFIG_SPI_ALTERA)		+= spi-altera.o
@@ -35,6 +35,7 @@
 obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= intel_mid_ssp_spi.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
diff --git a/drivers/spi/intel_mid_ssp_spi.c b/drivers/spi/intel_mid_ssp_spi.c
new file mode 100644
index 0000000..e4bb724
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi.c
@@ -0,0 +1,1650 @@
+/*
+ * intel_mid_ssp_spi.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/completion.h>
+#include <linux/acpi.h>
+#include <asm/intel-mid.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static int ssp_timing_wr;
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+	int tlen1 = (len < sz ? len : sz);
+	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
+	unsigned char *p;
+	static char msg[MAX_SPI_TRANSFER_SIZE];
+
+	memset(msg, '\0', sizeof(msg));
+	p = buf;
+	while (p < buf + tlen1)
+		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+	if (tlen2 > 0) {
+		sprintf(msg, "%s .....", msg);
+		p = (buf+len) - tlen2;
+		while (p < buf + len)
+			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+	}
+
+	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+		   len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u8 ssp_cfg_get_mode(u8 ssp_cfg)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+	    intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return (ssp_cfg) & 0x03;
+	else
+		return (ssp_cfg) & 0x07;
+}
+
+static inline u8 ssp_cfg_get_spi_bus_nb(u8 ssp_cfg)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+	    intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return ((ssp_cfg) >> 2) & 0x07;
+	else
+		return ((ssp_cfg) >> 3) & 0x07;
+}
+
+static inline u8 ssp_cfg_is_spi_slave(u8 ssp_cfg)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+	    intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return (ssp_cfg) & 0x20;
+	else
+		return (ssp_cfg) & 0x40;
+}
+
+static inline u32 is_tx_fifo_empty(struct ssp_drv_context *sspc)
+{
+	u32 sssr;
+	sssr = read_SSSR(sspc->ioaddr);
+	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+		return 0;
+	else
+		return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_drv_context *sspc)
+{
+	return ((read_SSSR(sspc->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	write_SSCR1(read_SSCR1(reg) & ~sspc->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	u32 i = 0;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (!is_tx_fifo_empty(sspc)) {
+		dev_err(&sspc->pdev->dev, "TX FIFO not empty. Reset of SPI IF");
+		disable_interface(sspc);
+		return;
+	}
+
+	dev_dbg(&sspc->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+	while (!is_rx_fifo_empty(sspc) && (i < SPI_FIFO_SIZE + 1)) {
+		read_SSDR(reg);
+		i++;
+	}
+	WARN(i > 0, "%d words flush occured\n", i);
+
+	return;
+}
+
+static int null_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	u8 n_bytes = sspc->n_bytes;
+
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(0, reg);
+	sspc->tx += n_bytes;
+
+	return 1;
+}
+
+static int null_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	u8 n_bytes = sspc->n_bytes;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (sspc->rx < sspc->rx_end)) {
+		read_SSDR(reg);
+		sspc->rx += n_bytes;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static int u8_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(*(u8 *)(sspc->tx), reg);
+	++sspc->tx;
+
+	return 1;
+}
+
+static int u8_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (sspc->rx < sspc->rx_end)) {
+		*(u8 *)(sspc->rx) = read_SSDR(reg);
+		++sspc->rx;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static int u16_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(*(u16 *)(sspc->tx), reg);
+	sspc->tx += 2;
+
+	return 1;
+}
+
+static int u16_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE) && (sspc->rx < sspc->rx_end)) {
+		*(u16 *)(sspc->rx) = read_SSDR(reg);
+		sspc->rx += 2;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static int u32_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(*(u32 *)(sspc->tx), reg);
+	sspc->tx += 4;
+
+	return 1;
+}
+
+static int u32_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE) && (sspc->rx < sspc->rx_end)) {
+		*(u32 *)(sspc->rx) = read_SSDR(reg);
+		sspc->rx += 4;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct ssp_drv_context *sspc = param;
+	bool ret = false;
+
+	if (!sspc->dmac1)
+		return ret;
+
+	if (chan->device->dev == &sspc->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @sspc:	Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_drv_context *sspc)
+{
+	struct device *dev = &sspc->pdev->dev;
+
+	if (!sspc->dma_mapped)
+		return;
+	dma_unmap_single(dev, sspc->rx_dma, sspc->len, PCI_DMA_FROMDEVICE);
+	dma_unmap_single(dev, sspc->tx_dma, sspc->len, PCI_DMA_TODEVICE);
+	sspc->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg:	Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+	struct callback_param *cb_param = (struct callback_param *)arg;
+	struct ssp_drv_context *sspc = cb_param->drv_context;
+	struct device *dev = &sspc->pdev->dev;
+	void *reg = sspc->ioaddr;
+
+	if (cb_param->direction == TX_DIRECTION) {
+		dma_sync_single_for_cpu(dev, sspc->tx_dma,
+			sspc->len, DMA_TO_DEVICE);
+		sspc->txdma_done = 1;
+	} else {
+		sspc->rxdma_done = 1;
+		dma_sync_single_for_cpu(dev, sspc->rx_dma,
+			sspc->len, DMA_FROM_DEVICE);
+	}
+
+	dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+		cb_param->direction, sspc->rxdma_done,
+		sspc->txdma_done);
+
+	if (sspc->txdma_done && sspc->rxdma_done) {
+		/* Clear Status Register */
+		write_SSSR(sspc->clear_sr, reg);
+		dev_dbg(dev, "DMA done\n");
+		/* Disable Triggers to DMA or to CPU*/
+		disable_triggers(sspc);
+		unmap_dma_buffers(sspc);
+
+		queue_work(sspc->dma_wq, &sspc->complete_work);
+	}
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @sspc:	Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_drv_context *sspc)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	struct dma_slave_config *ds;
+	dma_cap_mask_t mask;
+	struct device *dev = &sspc->pdev->dev;
+	unsigned int device_id;
+
+	/* Configure RX channel parameters */
+	rxs = &sspc->dmas_rx;
+	ds = &rxs->dma_slave;
+
+	ds->direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	ds->dst_addr_width = sspc->n_bytes;
+	ds->src_addr_width = sspc->n_bytes;
+
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		rxs->device_instance = sspc->master->bus_num;
+	else
+		rxs->device_instance = 0;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (sspc->rx_fifo_threshold == 8) {
+		ds->src_maxburst = LNW_DMA_MSIZE_8;
+		ds->dst_maxburst = LNW_DMA_MSIZE_8;
+	} else if (sspc->rx_fifo_threshold == 4) {
+		ds->src_maxburst = LNW_DMA_MSIZE_4;
+		ds->dst_maxburst = LNW_DMA_MSIZE_4;
+	} else {
+		ds->src_maxburst = LNW_DMA_MSIZE_1;
+		ds->dst_maxburst = LNW_DMA_MSIZE_1;
+	}
+
+	/* Configure TX channel parameters */
+	txs = &sspc->dmas_tx;
+	ds = &txs->dma_slave;
+
+	ds->direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	ds->src_addr_width = sspc->n_bytes;
+	ds->dst_addr_width = sspc->n_bytes;
+
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		txs->device_instance = sspc->master->bus_num;
+	else
+		txs->device_instance = 0;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (sspc->rx_fifo_threshold == 8) {
+		ds->src_maxburst = LNW_DMA_MSIZE_8;
+		ds->dst_maxburst = LNW_DMA_MSIZE_8;
+	} else if (sspc->rx_fifo_threshold == 4) {
+		ds->src_maxburst = LNW_DMA_MSIZE_4;
+		ds->dst_maxburst = LNW_DMA_MSIZE_4;
+	} else {
+		ds->src_maxburst = LNW_DMA_MSIZE_1;
+		ds->dst_maxburst = LNW_DMA_MSIZE_1;
+	}
+
+	/* Nothing more to do if already initialized */
+	if (sspc->dma_initialized)
+		return;
+
+	/* Use DMAC1 */
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		device_id = PCI_MRFL_DMAC_ID;
+
+	sspc->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
+	if (!sspc->dmac1) {
+		dev_err(dev, "Can't find DMAC1");
+		return;
+	}
+
+	if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+		sspc->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+				2 * MAX_SPI_TRANSFER_SIZE);
+		if (sspc->virt_addr_sram_rx)
+			sspc->virt_addr_sram_tx = sspc->virt_addr_sram_rx +
+							MAX_SPI_TRANSFER_SIZE;
+		else
+			dev_err(dev, "Virt_addr_sram_rx is null\n");
+	}
+
+	/* 1. Allocate rx channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	sspc->rxchan = dma_request_channel(mask, chan_filter, sspc);
+	if (!sspc->rxchan)
+		goto err_exit;
+
+	sspc->rxchan->private = rxs;
+
+	/* 2. Allocate tx channel */
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	sspc->txchan = dma_request_channel(mask, chan_filter, sspc);
+	if (!sspc->txchan)
+		goto free_rxchan;
+	else
+		sspc->txchan->private = txs;
+
+	/* set the dma done bit to 1 */
+	sspc->txdma_done = 1;
+	sspc->rxdma_done = 1;
+
+	sspc->tx_param.drv_context  = sspc;
+	sspc->tx_param.direction = TX_DIRECTION;
+	sspc->rx_param.drv_context  = sspc;
+	sspc->rx_param.direction = RX_DIRECTION;
+
+	sspc->dma_initialized = 1;
+	return;
+
+free_rxchan:
+	dma_release_channel(sspc->rxchan);
+err_exit:
+	dev_err(dev, "Error : DMA Channel Not available\n");
+
+	if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(sspc->virt_addr_sram_rx);
+
+	pci_dev_put(sspc->dmac1);
+	return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @sspc:	Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_drv_context *sspc)
+{
+	dma_release_channel(sspc->txchan);
+	dma_release_channel(sspc->rxchan);
+
+	if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(sspc->virt_addr_sram_rx);
+
+	pci_dev_put(sspc->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @sspc:	Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_drv_context *sspc)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &sspc->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(sspc->paddr + 0x10);
+
+	if (sspc->tx_dma)
+		sspc->txdma_done = 0;
+
+	if (sspc->rx_dma)
+		sspc->rxdma_done = 0;
+
+	/* 2. prepare the RX dma transfer */
+	txchan = sspc->txchan;
+	rxchan = sspc->rxchan;
+
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* Since the DMA is configured to do 32bits access */
+		/* to/from the DDR, the DMA transfer size must be  */
+		/* a multiple of 4 bytes                           */
+		sspc->len_dma_rx = sspc->len & ~(4 - 1);
+		sspc->len_dma_tx = sspc->len_dma_rx;
+
+		/* In Rx direction, TRAIL Bytes are handled by memcpy */
+		if (sspc->rx_dma &&
+			(sspc->len_dma_rx >
+				sspc->rx_fifo_threshold * sspc->n_bytes))
+			sspc->len_dma_rx = TRUNCATE(sspc->len_dma_rx,
+				sspc->rx_fifo_threshold * sspc->n_bytes);
+		else if (!sspc->rx_dma)
+			dev_err(dev, "ERROR : rx_dma is null\r\n");
+	} else {
+		/* TRAIL Bytes are handled by DMA */
+		if (sspc->rx_dma) {
+			sspc->len_dma_rx = sspc->len;
+			sspc->len_dma_tx = sspc->len;
+		} else
+			dev_err(dev, "ERROR : sspc->rx_dma is null!\n");
+	}
+
+	sspc->dmas_rx.dma_slave.src_addr = ssdr_addr;
+	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+		(unsigned long)&(sspc->dmas_rx.dma_slave));
+	dma_sync_single_for_device(dev, sspc->rx_dma,
+		sspc->len, DMA_FROM_DEVICE);
+
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,			/* DMA Channel */
+		sspc->rx_dma,			/* DAR */
+		ssdr_addr,			/* SAR */
+		sspc->len_dma_rx,		/* Data Length */
+		flag);					/* Flag */
+
+	if (rxdesc) {
+		rxdesc->callback = intel_mid_ssp_spi_dma_done;
+		rxdesc->callback_param = &sspc->rx_param;
+	} else {
+		dev_dbg(dev, "rxdesc is null! (len_dma_rx:%zu)\n",
+			sspc->len_dma_rx);
+		sspc->rxdma_done = 1;
+	}
+
+	/* 3. prepare the TX dma transfer */
+	sspc->dmas_tx.dma_slave.dst_addr = ssdr_addr;
+	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+		(unsigned long)&(sspc->dmas_tx.dma_slave));
+	dma_sync_single_for_device(dev, sspc->tx_dma,
+		sspc->len, DMA_TO_DEVICE);
+
+	if (sspc->tx_dma) {
+		txdesc = txchan->device->device_prep_dma_memcpy
+			(txchan,			/* DMA Channel */
+			ssdr_addr,			/* DAR */
+			sspc->tx_dma,			/* SAR */
+			sspc->len_dma_tx,		/* Data Length */
+			flag);				/* Flag */
+		if (txdesc) {
+			txdesc->callback = intel_mid_ssp_spi_dma_done;
+			txdesc->callback_param = &sspc->tx_param;
+		} else {
+			dev_dbg(dev, "txdesc is null! (len_dma_tx:%zu)\n",
+				sspc->len_dma_tx);
+			sspc->txdma_done = 1;
+		}
+	} else {
+		dev_err(dev, "ERROR : sspc->tx_dma is null!\n");
+		return;
+	}
+
+	dev_dbg(dev, "DMA transfer len:%zu len_dma_tx:%zu len_dma_rx:%zu\n",
+		sspc->len, sspc->len_dma_tx, sspc->len_dma_rx);
+
+	if (rxdesc || txdesc) {
+		if (rxdesc) {
+			dev_dbg(dev, "Firing DMA RX channel\n");
+			rxdesc->tx_submit(rxdesc);
+		}
+		if (txdesc) {
+			dev_dbg(dev, "Firing DMA TX channel\n");
+			txdesc->tx_submit(txdesc);
+		}
+	} else {
+		struct callback_param cb_param;
+		cb_param.drv_context = sspc;
+		dev_dbg(dev, "Bypassing DMA transfer\n");
+		intel_mid_ssp_spi_dma_done(&cb_param);
+	}
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @sspc:	Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_drv_context *sspc)
+{
+	struct device *dev = &sspc->pdev->dev;
+
+	if (unlikely(sspc->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped\n");
+		return 0;
+	}
+	if (unlikely(sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+		/* Copy sspc->tx into sram_tx */
+		memcpy_toio(sspc->virt_addr_sram_tx, sspc->tx, sspc->len);
+#ifdef DUMP_RX
+		dump_trailer(&sspc->pdev->dev, sspc->tx, sspc->len, 16);
+#endif
+		sspc->rx_dma = SRAM_RX_ADDR;
+		sspc->tx_dma = SRAM_TX_ADDR;
+	} else {
+		/* no QUIRKS_SRAM_ADDITIONAL_CPY */
+		if (unlikely(sspc->dma_mapped))
+			return 1;
+
+		sspc->tx_dma = dma_map_single(dev, sspc->tx, sspc->len,
+						PCI_DMA_TODEVICE);
+		if (unlikely(dma_mapping_error(dev, sspc->tx_dma))) {
+			dev_err(dev, "ERROR : tx dma mapping failed\n");
+			return 0;
+		}
+
+		sspc->rx_dma = dma_map_single(dev, sspc->rx, sspc->len,
+						PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(dev, sspc->rx_dma))) {
+			dma_unmap_single(dev, sspc->tx_dma,
+				sspc->len, DMA_TO_DEVICE);
+			dev_err(dev, "ERROR : rx dma mapping failed\n");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @sspc:	Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_drv_context *sspc)
+{
+	struct device *dev = &sspc->pdev->dev;
+	void *reg = sspc->ioaddr;
+
+	if (sspc->len != sspc->len_dma_rx) {
+		dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+			read_SSSR(reg));
+		sspc->rx += sspc->len_dma_rx;
+		sspc->tx += sspc->len_dma_tx;
+
+		while ((sspc->tx != sspc->tx_end) ||
+			(sspc->rx != sspc->rx_end)) {
+			sspc->read(sspc);
+			sspc->write(sspc);
+		}
+	}
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @sspc:	Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_drv_context *sspc)
+{
+	u32 length = sspc->len;
+
+	if ((sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+		&& (sspc->len > sspc->rx_fifo_threshold * sspc->n_bytes))
+		length = TRUNCATE(sspc->len,
+			sspc->rx_fifo_threshold * sspc->n_bytes);
+
+	memcpy_fromio(sspc->rx, sspc->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	struct spi_message *msg;
+	struct device *dev = &sspc->pdev->dev;
+
+	if (unlikely(sspc->quirks & QUIRKS_USE_PM_QOS))
+		pm_qos_update_request(&sspc->pm_qos_req,
+					PM_QOS_DEFAULT_VALUE);
+
+	if (unlikely(sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+		sram_to_ddr_cpy(sspc);
+
+	if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+		drain_trail(sspc);
+	else
+		/* Stop getting Time Outs */
+		write_SSTO(0, reg);
+
+	sspc->cur_msg->status = 0;
+	sspc->cur_msg->actual_length = sspc->len;
+
+#ifdef DUMP_RX
+	dump_trailer(dev, sspc->rx, sspc->len, 16);
+#endif
+
+	if (sspc->cs_control)
+		sspc->cs_control(CS_DEASSERT);
+
+	dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+	msg = sspc->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+	complete(&sspc->msg_done);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+	struct ssp_drv_context *sspc = container_of(work,
+				struct ssp_drv_context, complete_work);
+
+	int_transfer_complete(sspc);
+}
+
+static void poll_transfer_complete(struct ssp_drv_context *sspc)
+{
+	struct spi_message *msg;
+
+	/* Update total byte transfered return count actual bytes read */
+	sspc->cur_msg->actual_length += sspc->len - (sspc->rx_end - sspc->rx);
+
+	sspc->cur_msg->status = 0;
+	if (sspc->cs_control)
+		sspc->cs_control(CS_DEASSERT);
+
+	msg = sspc->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+	complete(&sspc->msg_done);
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct ssp_drv_context *sspc = dev_id;
+	void *reg = sspc->ioaddr;
+	struct device *dev = &sspc->pdev->dev;
+	u32 status = read_SSSR(reg);
+
+	/* It should never be our interrupt since SSP will */
+	/* only trigs interrupt for under/over run.*/
+	if (likely(!(status & sspc->mask_sr)))
+		return IRQ_NONE;
+
+	if (status & SSSR_ROR || status & SSSR_TUR) {
+		dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",	status);
+		WARN_ON(1);
+		if (status & SSSR_ROR)
+			dev_err(dev, "we have Overrun\n");
+		if (status & SSSR_TUR)
+			dev_err(dev, "we have Underrun\n");
+	}
+
+	/* We can fall here when not using DMA mode */
+	if (!sspc->cur_msg) {
+		disable_interface(sspc);
+		disable_triggers(sspc);
+	}
+	/* clear status register */
+	write_SSSR(sspc->clear_sr, reg);
+	return IRQ_HANDLED;
+}
+
+static void poll_transfer(unsigned long data)
+{
+	struct ssp_drv_context *sspc = (void *)data;
+
+	bool delay = false;
+
+	if (sspc->tx)
+		while (sspc->tx != sspc->tx_end) {
+			/* [REVERT ME] Tangier simulator requires a delay */
+			if (delay)
+				udelay(10);
+			if (ssp_timing_wr) {
+				int timeout = 100;
+				/* It is used as debug UART on Tangier. Since
+				   baud rate = 115200, it needs at least 312us
+				   for one word transferring. Becuase of silicon
+				   issue, it MUST check SFIFOL here instead of
+				   TNF. It is the workaround for A0 stepping*/
+				while (--timeout &&
+					((read_SFIFOL(sspc->ioaddr)) & 0xFFFF))
+					udelay(10);
+			}
+			sspc->write(sspc);
+			sspc->read(sspc);
+		}
+
+	while (!sspc->read(sspc))
+		cpu_relax();
+
+	poll_transfer_complete(sspc);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @sspc:	Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_drv_context *sspc)
+{
+	u32 sssr;
+	u32 count = 0;
+	u32 cr0;
+	void *i2c_reg = sspc->I2C_ioaddr;
+	struct device *dev = &sspc->pdev->dev;
+	void *reg = sspc->ioaddr;
+	struct chip_data *chip = spi_get_ctldata(sspc->cur_msg->spi);
+	cr0 = chip->cr0;
+
+	dev_warn(dev, "In %s : Starting bit banging\n",
+		__func__);
+	if (read_SSSR(reg) & SSP_NOT_SYNC)
+		dev_warn(dev, "SSP clock desynchronized.\n");
+	if (!(read_SSCR0(reg) & SSCR0_SSE))
+		dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+	dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+	write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+	write_SSPSP(0x02010007, reg);
+
+	write_SSTO(chip->timeout, reg);
+	write_SSCR0(cr0, reg);
+
+	/*
+	*  This routine uses the DFx block to override the SSP inputs
+	*  and outputs allowing us to bit bang SSPSCLK. On Langwell,
+	*  we have to generate the clock to clear busy.
+	*/
+	write_I2CDATA(0x3, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070034, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CDATA(0x00000099, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	sssr = read_SSSR(reg);
+
+	/* Bit bang the clock until CSS clears */
+	while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+		write_I2CDATA(0x2, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CDATA(0x3, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		sssr = read_SSSR(reg);
+		count++;
+	}
+	if (count >= MAX_BITBANGING_LOOP)
+		dev_err(dev, "ERROR in %s : infinite loop on bit banging. Aborting\n",
+								__func__);
+
+	dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+	write_I2CDATA(0x0, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(struct ssp_drv_context *sspc, int speed)
+{
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		return max(25000000 / speed, 4) - 1;
+	else
+		return max(100000000 / speed, 4) - 1;
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi:	Pointer to the spi_device struct
+ * @msg:	Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct ssp_drv_context *sspc = spi_master_get_devdata(spi->master);
+	unsigned long flags;
+
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	spin_lock_irqsave(&sspc->lock, flags);
+	list_add_tail(&msg->queue, &sspc->queue);
+	if (!sspc->suspended)
+		queue_work(sspc->workqueue, &sspc->pump_messages);
+	spin_unlock_irqrestore(&sspc->lock, flags);
+
+	return 0;
+}
+
+static int handle_message(struct ssp_drv_context *sspc)
+{
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = sspc->ioaddr;
+	u32 cr1;
+	struct device *dev = &sspc->pdev->dev;
+	struct spi_message *msg = sspc->cur_msg;
+	u32 clk_div;
+
+	chip = spi_get_ctldata(msg->spi);
+
+	/* We handle only one transfer message since the protocol module has to
+	   control the out of band signaling. */
+	transfer = list_entry(msg->transfers.next, struct spi_transfer,
+					transfer_list);
+
+	/* Check transfer length */
+	if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+		(transfer->len == 0))) {
+		dev_warn(dev, "transfer length null or greater than %d\n",
+			MAX_SPI_TRANSFER_SIZE);
+		dev_warn(dev, "length = %d\n", transfer->len);
+		msg->status = -EINVAL;
+
+		if (msg->complete)
+			msg->complete(msg->context);
+		complete(&sspc->msg_done);
+		return 0;
+	}
+
+	/* Flush any remaining data (in case of failed previous transfer) */
+	flush(sspc);
+
+	sspc->tx  = (void *)transfer->tx_buf;
+	sspc->rx  = (void *)transfer->rx_buf;
+	sspc->len = transfer->len;
+	sspc->write = chip->write;
+	sspc->read = chip->read;
+	sspc->cs_control = chip->cs_control;
+	sspc->cs_change = transfer->cs_change;
+
+	if (likely(chip->dma_enabled)) {
+		sspc->dma_mapped = map_dma_buffers(sspc);
+		if (unlikely(!sspc->dma_mapped))
+			return 0;
+	} else {
+		sspc->write = sspc->tx ? chip->write : null_writer;
+		sspc->read  = sspc->rx ? chip->read : null_reader;
+	}
+	sspc->tx_end = sspc->tx + transfer->len;
+	sspc->rx_end = sspc->rx + transfer->len;
+
+	write_SSSR(sspc->clear_sr, reg);
+
+	/* setup the CR1 control register */
+	cr1 = chip->cr1 | sspc->cr1_sig;
+
+	if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* in case of len smaller than burst size, adjust the RX     */
+		/* threshold. All other cases will use the default threshold */
+		/* value. The RX fifo threshold must be aligned with the DMA */
+		/* RX transfer size, which may be limited to a multiple of 4 */
+		/* bytes due to 32bits DDR access.                           */
+		if  (sspc->len / sspc->n_bytes <= sspc->rx_fifo_threshold) {
+			u32 rx_fifo_threshold;
+
+			rx_fifo_threshold = (sspc->len & ~(4 - 1)) /
+						sspc->n_bytes;
+			cr1 &= ~(SSCR1_RFT);
+			cr1 |= SSCR1_RxTresh(rx_fifo_threshold) & SSCR1_RFT;
+		} else
+			write_SSTO(chip->timeout, reg);
+	}
+
+	dev_dbg(dev, "transfer len:%zd  n_bytes:%d  cr0:%x  cr1:%x",
+		sspc->len, sspc->n_bytes, chip->cr0, cr1);
+
+	/* first set CR1 */
+	write_SSCR1(cr1, reg);
+
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		write_SSFS((1 << chip->chip_select), reg);
+
+	/* recalculate the frequency for each transfer */
+	clk_div = ssp_get_clk_div(sspc, transfer->speed_hz);
+	chip->cr0 |= clk_div << 8;
+
+	/* Do bitbanging only if SSP not-enabled or not-synchronized */
+	if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+		(!(read_SSCR0(reg) & SSCR0_SSE))) &&
+		(sspc->quirks & QUIRKS_BIT_BANGING))) {
+			start_bitbanging(sspc);
+	} else {
+		/* (re)start the SSP */
+		if (ssp_timing_wr) {
+			dev_dbg(dev, "original cr0 before reset:%x",
+				chip->cr0);
+			/*we should not disable TUM and RIM interrup*/
+			write_SSCR0(0x0000000F, reg);
+			chip->cr0 &= ~(SSCR0_SSE);
+			dev_dbg(dev, "reset ssp:cr0:%x", chip->cr0);
+			write_SSCR0(chip->cr0, reg);
+			chip->cr0 |= SSCR0_SSE;
+			dev_dbg(dev, "reset ssp:cr0:%x", chip->cr0);
+			write_SSCR0(chip->cr0, reg);
+		} else
+			write_SSCR0(chip->cr0, reg);
+	}
+
+	if (sspc->cs_control)
+		sspc->cs_control(CS_ASSERT);
+
+	if (likely(chip->dma_enabled)) {
+		if (unlikely(sspc->quirks & QUIRKS_USE_PM_QOS))
+			pm_qos_update_request(&sspc->pm_qos_req,
+				MIN_EXIT_LATENCY);
+		dma_transfer(sspc);
+	} else
+		tasklet_schedule(&sspc->poll_transfer);
+
+	return 0;
+}
+
+static void pump_messages(struct work_struct *work)
+{
+	struct ssp_drv_context *sspc =
+		container_of(work, struct ssp_drv_context, pump_messages);
+	struct device *dev = &sspc->pdev->dev;
+	unsigned long flags;
+	struct spi_message *msg;
+
+	pm_runtime_get_sync(dev);
+	spin_lock_irqsave(&sspc->lock, flags);
+	while (!list_empty(&sspc->queue)) {
+		if (sspc->suspended)
+			break;
+		msg = list_entry(sspc->queue.next, struct spi_message, queue);
+		list_del_init(&msg->queue);
+		sspc->cur_msg = msg;
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		INIT_COMPLETION(sspc->msg_done);
+		handle_message(sspc);
+		wait_for_completion(&sspc->msg_done);
+		spin_lock_irqsave(&sspc->lock, flags);
+		sspc->cur_msg = NULL;
+	}
+	spin_unlock_irqrestore(&sspc->lock, flags);
+	pm_runtime_put(dev);
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi:	Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+	struct ssp_drv_context *sspc =
+		spi_master_get_devdata(spi->master);
+	u32 tx_fifo_threshold;
+	u32 burst_size;
+	u32 clk_div;
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+	if ((spi->bits_per_word < MIN_BITS_PER_WORD
+		|| spi->bits_per_word > MAX_BITS_PER_WORD))
+		return -EINVAL;
+
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev,
+			"failed setup: can't allocate chip data\n");
+			return -ENOMEM;
+		}
+	}
+	chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+		spi->bits_per_word - 16 : spi->bits_per_word)
+			| SSCR0_SSE
+			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+	/* protocol drivers may change the chip settings, so...  */
+	/* if chip_info exists, use it                           */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	chip->cr1 = 0;
+	if (chip_info) {
+		burst_size = chip_info->burst_size;
+		if (burst_size > IMSS_FIFO_BURST_8)
+			burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->timeout = chip_info->timeout;
+
+		if (chip_info->enable_loopback)
+			chip->cr1 |= SSCR1_LBM;
+
+		chip->dma_enabled = chip_info->dma_enabled;
+		chip->cs_control = chip_info->cs_control;
+
+	} else {
+		/* if no chip_info provided by protocol driver, */
+		/* set default values                           */
+		dev_info(&spi->dev, "setting default chip values\n");
+
+		burst_size = DFLT_FIFO_BURST_SIZE;
+		chip->dma_enabled = 1;
+		if (sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			chip->timeout = 0;
+		else
+			chip->timeout = DFLT_TIMEOUT_VAL;
+	}
+	/* Set FIFO thresholds according to burst_size */
+	if (burst_size == IMSS_FIFO_BURST_8)
+		sspc->rx_fifo_threshold = 8;
+	else if (burst_size == IMSS_FIFO_BURST_4)
+		sspc->rx_fifo_threshold = 4;
+	else
+		sspc->rx_fifo_threshold = 1;
+	/*FIXME:this is workaround.
+	On MRST, in DMA mode, it is very strang that RX fifo can't reach
+	burst size.*/
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL && chip->dma_enabled)
+		sspc->rx_fifo_threshold = 1;
+	tx_fifo_threshold = SPI_FIFO_SIZE - sspc->rx_fifo_threshold;
+	chip->cr1 |= (SSCR1_RxTresh(sspc->rx_fifo_threshold) &
+		SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) & SSCR1_TFT);
+
+	sspc->dma_mapped = 0;
+
+	/* setting phase and polarity. spi->mode comes from boardinfo */
+	if ((spi->mode & SPI_CPHA) != 0)
+		chip->cr1 |= SSCR1_SPH;
+	if ((spi->mode & SPI_CPOL) != 0)
+		chip->cr1 |= SSCR1_SPO;
+
+	if (sspc->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
+		/* set slave mode */
+		chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
+	chip->cr1 |= SSCR1_SCFR;        /* clock is not free running */
+
+	dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
+		spi->bits_per_word, spi->mode & 0x3);
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+		chip->read = u8_reader;
+		chip->write = u8_writer;
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+		chip->read = u16_reader;
+		chip->write = u16_writer;
+	} else if (spi->bits_per_word <= 32) {
+		if (!ssp_timing_wr)
+			chip->cr0 |= SSCR0_EDSS;
+		chip->n_bytes = 4;
+		chip->read = u32_reader;
+		chip->write = u32_writer;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize\n");
+		return -EINVAL;
+	}
+
+	if ((sspc->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
+		chip->speed_hz = spi->max_speed_hz;
+		clk_div = ssp_get_clk_div(sspc, chip->speed_hz);
+		chip->cr0 |= clk_div << 8;
+		dev_dbg(&spi->dev, "spi->max_speed_hz:%d clk_div:%x cr0:%x",
+			spi->max_speed_hz, clk_div, chip->cr0);
+	}
+	chip->bits_per_word = spi->bits_per_word;
+	chip->chip_select = spi->chip_select;
+
+	spi_set_ctldata(spi, chip);
+
+	/* setup of sspc members that will not change across transfers */
+	sspc->n_bytes = chip->n_bytes;
+
+	if (chip->dma_enabled) {
+		intel_mid_ssp_spi_dma_init(sspc);
+		sspc->cr1_sig = SSCR1_TSRE | SSCR1_RSRE;
+		sspc->mask_sr = SSSR_ROR | SSSR_TUR;
+		if (sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			sspc->cr1_sig |= SSCR1_TRAIL;
+	} else {
+		sspc->cr1_sig = SSCR1_TINTE;
+		sspc->mask_sr = SSSR_ROR | SSSR_TUR | SSSR_TINT;
+	}
+	sspc->clear_sr = SSSR_TUR | SSSR_ROR | SSSR_TINT;
+
+	return 0;
+}
+
+/**
+ * cleanup() - Driver cleanup procedure
+ * @spi:	Pointer to the spi_device struct
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct ssp_drv_context *sspc =
+		spi_master_get_devdata(spi->master);
+
+	if (sspc->dma_initialized)
+		intel_mid_ssp_spi_dma_exit(sspc);
+
+	/* Remove the PM_QOS request */
+	if (sspc->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_remove_request(&sspc->pm_qos_req);
+
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+/**
+ * intel_mid_ssp_spi_probe() - Driver probe procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @ent:	Pointer to the pci_device_id struct
+ */
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct ssp_drv_context *sspc = 0;
+	int status;
+	u32 iolen = 0;
+	u8 ssp_cfg;
+	int pos;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+
+	/* Check if the SSP we are probed for has been allocated */
+	/* to operate as SPI. This information is retreived from */
+	/* the field adid of the Vendor-Specific PCI capability  */
+	/* which is used as a configuration register.            */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability\n");
+		goto err_abort_probe;
+	}
+
+	if (ssp_cfg_get_mode(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)\n", ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(dev, "found PCI SSP controller (ID: %04xh:%04xh cfg: %02xh)\n",
+		pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for sspc and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct ssp_drv_context));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_slave\n");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	sspc = spi_master_get_devdata(master);
+	sspc->master = master;
+
+	sspc->pdev = pdev;
+	sspc->quirks = ent->driver_data;
+
+	/* Set platform & configuration quirks */
+	if (sspc->quirks & QUIRKS_PLATFORM_MRST) {
+		/* Apply bit banging workarround on MRST */
+		sspc->quirks |= QUIRKS_BIT_BANGING;
+		/* MRST slave mode workarrounds */
+		if (ssp_cfg_is_spi_slave(ssp_cfg))
+			sspc->quirks |= QUIRKS_USE_PM_QOS |
+					QUIRKS_SRAM_ADDITIONAL_CPY;
+	}
+	sspc->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
+	if (ssp_cfg_is_spi_slave(ssp_cfg))
+		sspc->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
+
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+	master->bus_num = ssp_cfg_get_spi_bus_nb(ssp_cfg);
+	master->num_chipselect = 4;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+	sspc->dma_wq = create_workqueue("intel_mid_ssp_spi");
+	INIT_WORK(&sspc->complete_work, int_transfer_complete_work);
+
+	sspc->dma_initialized = 0;
+	sspc->suspended = 0;
+	sspc->cur_msg = NULL;
+
+	/* get basic io resource and map it */
+	sspc->paddr = pci_resource_start(pdev, 0);
+	iolen = pci_resource_len(pdev, 0);
+
+	status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
+	if (status)
+		goto err_free_1;
+
+	sspc->ioaddr = ioremap_nocache(sspc->paddr, iolen);
+	if (!sspc->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", sspc->paddr);
+	dev_dbg(dev, "ioaddr = : %p\n", sspc->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
+	dev_dbg(dev, "quirks = : %08lx\n", sspc->quirks);
+
+	if (sspc->quirks & QUIRKS_BIT_BANGING) {
+		/* Bit banging on the clock is done through */
+		/* DFT which is available through I2C.      */
+		/* get base address of I2C_Serbus registers */
+		sspc->I2C_paddr = 0xff12b000;
+		sspc->I2C_ioaddr = ioremap_nocache(sspc->I2C_paddr, 0x10);
+		if (!sspc->I2C_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_3;
+		}
+	}
+
+	/* Attach to IRQ */
+	sspc->irq = pdev->irq;
+	status = request_irq(sspc->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", sspc);
+
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+		disable_irq_nosync(sspc->irq);
+		ssp_timing_wr = 1;
+	}
+
+	if (status < 0) {
+		dev_err(&pdev->dev, "can not get IRQ\n");
+		goto err_free_4;
+	}
+
+	if (sspc->quirks & QUIRKS_PLATFORM_MDFL) {
+		/* get base address of DMA selector. */
+		syscfg = sspc->paddr - SYSCFG;
+		syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+		if (!syscfg_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_5;
+		}
+		iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+	}
+
+	INIT_LIST_HEAD(&sspc->queue);
+	init_completion(&sspc->msg_done);
+	spin_lock_init(&sspc->lock);
+	tasklet_init(&sspc->poll_transfer, poll_transfer, (unsigned long)sspc);
+	INIT_WORK(&sspc->pump_messages, pump_messages);
+	sspc->workqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
+
+	/* Register with the SPI framework */
+	dev_info(dev, "register with SPI framework (bus spi%d)\n",
+			master->bus_num);
+
+	status = spi_register_master(master);
+	if (status) {
+		dev_err(dev, "problem registering spi\n");
+		goto err_free_5;
+	}
+
+	pci_set_drvdata(pdev, sspc);
+
+	/* Create the PM_QOS request */
+	if (sspc->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_add_request(&sspc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return status;
+
+err_free_5:
+	free_irq(sspc->irq, sspc);
+err_free_4:
+	iounmap(sspc->I2C_ioaddr);
+err_free_3:
+	iounmap(sspc->ioaddr);
+err_free_2:
+	pci_release_region(pdev, 0);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+/**
+ * intel_mid_ssp_spi_remove() - driver remove procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static void intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+
+	if (!sspc)
+		return;
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+
+	if (sspc->dma_wq)
+		destroy_workqueue(sspc->dma_wq);
+	if (sspc->workqueue)
+		destroy_workqueue(sspc->workqueue);
+
+	/* Release IRQ */
+	free_irq(sspc->irq, sspc);
+
+	if (sspc->ioaddr)
+		iounmap(sspc->ioaddr);
+	if (sspc->quirks & QUIRKS_BIT_BANGING && sspc->I2C_ioaddr)
+		iounmap(sspc->I2C_ioaddr);
+
+	/* disconnect from the SPI framework */
+	if (sspc->master)
+		spi_unregister_master(sspc->master);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+static int intel_mid_ssp_spi_plat_probe(struct platform_device *pdev)
+{
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return 0;
+}
+
+static int intel_mid_ssp_spi_plat_remove(struct platform_device *pdev)
+{
+	pm_runtime_forbid(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int intel_mid_ssp_spi_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+	unsigned long flags;
+	int loop = 26;
+
+	dev_dbg(dev, "suspend\n");
+
+	spin_lock_irqsave(&sspc->lock, flags);
+	sspc->suspended = 1;
+	/*
+	 * If there is one msg being handled, wait 500ms at most,
+	 * if still not done, return busy
+	 */
+	while (sspc->cur_msg && --loop) {
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		msleep(20);
+		spin_lock_irqsave(&sspc->lock, flags);
+		if (!loop)
+			sspc->suspended = 0;
+	}
+	spin_unlock_irqrestore(&sspc->lock, flags);
+
+	if (loop)
+		return 0;
+	else
+		return -EBUSY;
+}
+
+static int intel_mid_ssp_spi_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "resume\n");
+	spin_lock(&sspc->lock);
+	sspc->suspended = 0;
+	if (!list_empty(&sspc->queue))
+		queue_work(sspc->workqueue, &sspc->pump_messages);
+	spin_unlock(&sspc->lock);
+	return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "runtime suspend called\n");
+	return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "runtime resume called\n");
+	return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_idle(struct device *dev)
+{
+	int err;
+
+	dev_dbg(dev, "runtime idle called\n");
+	if (system_state == SYSTEM_BOOTING)
+		/* if SSP SPI UART is set as default console and earlyprintk
+		 * is enabled, it cannot shutdown SSP controller during booting.
+		 */
+		err = pm_schedule_suspend(dev, 30000);
+	else
+		err = pm_schedule_suspend(dev, 500);
+
+	return err;
+}
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#define intel_mid_ssp_spi_runtime_suspend NULL
+#define intel_mid_ssp_spi_runtime_resume NULL
+#define intel_mid_ssp_spi_runtime_idle NULL
+#endif /* CONFIG_PM */
+
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	/* MRFL SSP5 */
+	{ PCI_VDEVICE(INTEL, 0x1194), QUIRKS_PLATFORM_MRFL},
+	{},
+};
+
+static const struct dev_pm_ops intel_mid_ssp_spi_pm_ops = {
+	.suspend = intel_mid_ssp_spi_suspend,
+	.resume = intel_mid_ssp_spi_resume,
+	.runtime_suspend = intel_mid_ssp_spi_runtime_suspend,
+	.runtime_resume = intel_mid_ssp_spi_runtime_resume,
+	.runtime_idle = intel_mid_ssp_spi_runtime_idle,
+};
+
+static const struct dev_pm_ops intel_mid_ssp_spi_plat_pm_ops = {
+	.runtime_suspend = intel_mid_ssp_spi_runtime_suspend,
+	.runtime_resume = intel_mid_ssp_spi_runtime_resume,
+	.runtime_idle = intel_mid_ssp_spi_runtime_idle,
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	intel_mid_ssp_spi_remove,
+	.driver =	{
+		.pm	= &intel_mid_ssp_spi_pm_ops,
+	},
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id intel_mid_ssp_spi_acpi_ids[] = {
+	{ "8086228E", 0},
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, intel_mid_ssp_spi_acpi_ids);
+#endif
+
+static struct platform_driver intel_mid_ssp_spi_plat_driver = {
+	.remove		= intel_mid_ssp_spi_plat_remove,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+		.pm     = &intel_mid_ssp_spi_plat_pm_ops,
+#endif
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(intel_mid_ssp_spi_acpi_ids),
+#endif
+	},
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+
+module_exit(intel_mid_ssp_spi_exit);
+
+static int __init intel_mid_ssp_spi_plat_init(void)
+{
+	return platform_driver_probe(&intel_mid_ssp_spi_plat_driver, intel_mid_ssp_spi_plat_probe);
+}
+
+late_initcall(intel_mid_ssp_spi_plat_init);
+
+static void __exit intel_mid_ssp_spi_plat_exit(void)
+{
+	platform_driver_unregister(&intel_mid_ssp_spi_plat_driver);
+}
+
+module_exit(intel_mid_ssp_spi_plat_exit);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 50b13c9..df0aacc 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -610,7 +610,7 @@
 		else
 			buf = (void *)t->tx_buf;
 		t->tx_dma = dma_map_single(&spi->dev, buf,
-				t->len, DMA_FROM_DEVICE);
+				t->len, DMA_TO_DEVICE);
 		if (!t->tx_dma) {
 			ret = -EFAULT;
 			goto err_tx_map;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index b9f0192..54700a2 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -22,7 +22,6 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/types.h>
 
 #include "spi-dw.h"
 
@@ -55,6 +54,8 @@
 	dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
 	if (!dws->dmac)
 		dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+	if (!dws->dmac)
+		dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x08EF, NULL);
 
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
@@ -111,8 +112,11 @@
 {
 	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
 	struct dma_chan *txchan, *rxchan;
-	struct dma_slave_config txconf, rxconf;
+	struct dma_slave_config *txconf, *rxconf;
 	u16 dma_ctrl = 0;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &dws->master->dev;
+	struct intel_mid_dma_slave *rxs, *txs;
 
 	/* 1. setup DMA related registers */
 	if (cs_change) {
@@ -131,51 +135,65 @@
 	txchan = dws->txchan;
 	rxchan = dws->rxchan;
 
+	txs = txchan->private;
+	rxs = rxchan->private;
+
+	txconf = &txs->dma_slave;
+	rxconf = &rxs->dma_slave;
+
+	flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+
 	/* 2. Prepare the TX dma transfer */
-	txconf.direction = DMA_MEM_TO_DEV;
-	txconf.dst_addr = dws->dma_addr;
-	txconf.dst_maxburst = LNW_DMA_MSIZE_16;
-	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-	txconf.device_fc = false;
+	txconf->direction = DMA_MEM_TO_DEV;
+	txconf->dst_addr = dws->dma_addr;
+	txconf->src_maxburst = LNW_DMA_MSIZE_16;
+	txconf->dst_maxburst = LNW_DMA_MSIZE_16;
+	txconf->src_addr_width = dws->dma_width;
+	txconf->dst_addr_width = dws->dma_width;
+	txconf->device_fc = false;
 
 	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
-				       (unsigned long) &txconf);
+				       (unsigned long) txconf);
 
-	memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
-	dws->tx_sgl.dma_address = dws->tx_dma;
-	dws->tx_sgl.length = dws->len;
-
-	txdesc = dmaengine_prep_slave_sg(txchan,
-				&dws->tx_sgl,
-				1,
-				DMA_MEM_TO_DEV,
-				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
-	txdesc->callback = dw_spi_dma_done;
-	txdesc->callback_param = dws;
+	txdesc = txchan->device->device_prep_dma_memcpy
+		(txchan,			/* DMA Channel */
+		dws->dma_addr,			/* DAR */
+		dws->tx_dma,			/* SAR */
+		dws->len,			/* Data Length */
+		flag);
+	if (txdesc) {
+		txdesc->callback = dw_spi_dma_done;
+		txdesc->callback_param = dws;
+	} else {
+		dev_err(dev, "ERROR: prepare txdesc failed\n");
+		return -EINVAL;
+	}
 
 	/* 3. Prepare the RX dma transfer */
-	rxconf.direction = DMA_DEV_TO_MEM;
-	rxconf.src_addr = dws->dma_addr;
-	rxconf.src_maxburst = LNW_DMA_MSIZE_16;
-	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-	rxconf.device_fc = false;
+	rxconf->direction = DMA_DEV_TO_MEM;
+	rxconf->src_addr = dws->dma_addr;
+	rxconf->src_maxburst = LNW_DMA_MSIZE_16;
+	rxconf->dst_maxburst = LNW_DMA_MSIZE_16;
+	rxconf->dst_addr_width = dws->dma_width;
+	rxconf->src_addr_width = dws->dma_width;
+	rxconf->device_fc = false;
 
 	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
-				       (unsigned long) &rxconf);
+				       (unsigned long) rxconf);
 
-	memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
-	dws->rx_sgl.dma_address = dws->rx_dma;
-	dws->rx_sgl.length = dws->len;
-
-	rxdesc = dmaengine_prep_slave_sg(rxchan,
-				&dws->rx_sgl,
-				1,
-				DMA_DEV_TO_MEM,
-				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
-	rxdesc->callback = dw_spi_dma_done;
-	rxdesc->callback_param = dws;
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,			/* DMA Channel */
+		dws->rx_dma,			/* DAR */
+		dws->dma_addr,			/* SAR */
+		dws->len,			/* Data Length */
+		flag);
+	if (rxdesc) {
+		rxdesc->callback = dw_spi_dma_done;
+		rxdesc->callback_param = dws;
+	} else {
+		dev_err(dev, "ERROR: prepare rxdesc failed\n");
+		return -EINVAL;
+	}
 
 	/* rx must be started before tx due to spi instinct */
 	rxdesc->tx_submit(rxdesc);
@@ -183,10 +201,41 @@
 	return 0;
 }
 
+static int mid_spi_dma_suspend(struct dw_spi *dws)
+{
+	struct dma_chan *txchan, *rxchan;
+
+	txchan = dws->txchan;
+	rxchan = dws->rxchan;
+
+	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+
+	txchan->device->device_control(txchan, DMA_PAUSE, 0);
+	rxchan->device->device_control(rxchan, DMA_PAUSE, 0);
+
+	return 0;
+}
+
+static int mid_spi_dma_resume(struct dw_spi *dws)
+{
+	struct dma_chan *txchan, *rxchan;
+
+	txchan = dws->txchan;
+	rxchan = dws->rxchan;
+
+	txchan->device->device_control(txchan, DMA_RESUME, 0);
+	rxchan->device->device_control(rxchan, DMA_RESUME, 0);
+
+	return 0;
+}
+
 static struct dw_spi_dma_ops mid_dma_ops = {
 	.dma_init	= mid_spi_dma_init,
 	.dma_exit	= mid_spi_dma_exit,
 	.dma_transfer	= mid_spi_dma_transfer,
+	.dma_suspend	= mid_spi_dma_suspend,
+	.dma_resume	= mid_spi_dma_resume,
 };
 #endif
 
@@ -201,12 +250,12 @@
 #define CLK_SPI_CDIV_MASK	0x00000e00
 #define CLK_SPI_DISABLE_OFFSET	8
 
-int dw_spi_mid_init(struct dw_spi *dws)
+int dw_spi_mid_init(struct dw_spi *dws, int bus_num)
 {
 	void __iomem *clk_reg;
 	u32 clk_cdiv;
 
-	clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+	clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG + bus_num * 4, 16);
 	if (!clk_reg)
 		return -ENOMEM;
 
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 6055c8d..c2c0756 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -20,6 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <linux/spi/spi.h>
 #include <linux/module.h>
 
@@ -72,7 +73,7 @@
 	}
 
 	dws->parent_dev = &pdev->dev;
-	dws->bus_num = 0;
+	dws->bus_num = ent->driver_data;
 	dws->num_cs = 4;
 	dws->irq = pdev->irq;
 
@@ -80,11 +81,9 @@
 	 * Specific handling for Intel MID paltforms, like dma setup,
 	 * clock rate, FIFO depth.
 	 */
-	if (pdev->device == 0x0800) {
-		ret = dw_spi_mid_init(dws);
-		if (ret)
-			goto err_unmap;
-	}
+	ret = dw_spi_mid_init(dws, ent->driver_data);
+	if (ret)
+		goto err_unmap;
 
 	ret = dw_spi_add_host(dws);
 	if (ret)
@@ -92,6 +91,11 @@
 
 	/* PCI hook and SPI hook use the same drv data */
 	pci_set_drvdata(pdev, dwpci);
+
+	pm_suspend_ignore_children(&pdev->dev, true);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
 	return 0;
 
 err_unmap:
@@ -111,6 +115,8 @@
 
 	pci_set_drvdata(pdev, NULL);
 	dw_spi_remove_host(&dwpci->dws);
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
 	iounmap(dwpci->dws.regs);
 	pci_release_region(pdev, 0);
 	kfree(dwpci);
@@ -118,8 +124,9 @@
 }
 
 #ifdef CONFIG_PM
-static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int spi_suspend(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
 	int ret;
 
@@ -128,12 +135,13 @@
 		return ret;
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	pci_set_power_state(pdev, PCI_D3hot);
 	return ret;
 }
 
-static int spi_resume(struct pci_dev *pdev)
+static int spi_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
 	int ret;
 
@@ -144,27 +152,93 @@
 		return ret;
 	return dw_spi_resume_host(&dwpci->dws);
 }
+
+static int spi_dw_pci_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "PCI runtime suspend called\n");
+	return dw_spi_suspend_host(&dwpci->dws);
+}
+
+static int spi_dw_pci_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "pci_runtime_resume called\n");
+	return dw_spi_resume_host(&dwpci->dws);
+}
+
+static int spi_dw_pci_runtime_idle(struct device *dev)
+{
+	int err;
+
+	dev_dbg(dev, "pci_runtime_idle called\n");
+	if (system_state == SYSTEM_BOOTING)
+		/* if SPI UART is set as default console and earlyprintk
+		 * is enabled, it cannot shutdown SPI controller during booting.
+		 */
+		err = pm_schedule_suspend(dev, 30000);
+	else
+		err = pm_schedule_suspend(dev, 500);
+
+	if (err != 0)
+		return 0;
+
+	return -EBUSY;
+}
+
 #else
 #define spi_suspend	NULL
 #define spi_resume	NULL
+#define spi_dw_pci_runtime_suspend NULL
+#define spi_dw_pci_runtime_resume NULL
+#define spi_dw_pci_runtime_idle NULL
 #endif
 
 static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
-	/* Intel MID platform SPI controller 0 */
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+	/* Intel Medfield platform SPI controller 1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800), .driver_data = 0 },
+	/* Intel Cloverview platform SPI controller 1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08E1), .driver_data = 0 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08EE), .driver_data = 1 },
+	/* Intel EVx platform SPI controller 1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0812), .driver_data = 2 },
 	{},
 };
 
+static const struct dev_pm_ops dw_spi_pm_ops = {
+	.suspend = spi_suspend,
+	.resume = spi_resume,
+	.runtime_suspend = spi_dw_pci_runtime_suspend,
+	.runtime_resume = spi_dw_pci_runtime_resume,
+	.runtime_idle = spi_dw_pci_runtime_idle,
+};
+
 static struct pci_driver dw_spi_driver = {
 	.name =		DRIVER_NAME,
 	.id_table =	pci_ids,
 	.probe =	spi_pci_probe,
 	.remove =	spi_pci_remove,
-	.suspend =	spi_suspend,
-	.resume	=	spi_resume,
+	.driver =	{
+		.pm	= &dw_spi_pm_ops,
+	},
 };
 
-module_pci_driver(dw_spi_driver);
+static int __init mrst_spi_init(void)
+{
+	return pci_register_driver(&dw_spi_driver);
+}
+
+static void __exit mrst_spi_exit(void)
+{
+	pci_unregister_driver(&dw_spi_driver);
+}
+
+module_init(mrst_spi_init);
+module_exit(mrst_spi_exit);
 
 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
 MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index c1abc06..ce044c5 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -23,6 +23,7 @@
 #include <linux/highmem.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <linux/spi/spi.h>
 
 #include "spi-dw.h"
@@ -63,6 +64,12 @@
 };
 
 #ifdef CONFIG_DEBUG_FS
+static int spi_show_regs_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
 #define SPI_REGS_BUFSIZE	1024
 static ssize_t  spi_show_regs(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
@@ -122,7 +129,7 @@
 
 static const struct file_operations mrst_spi_regs_ops = {
 	.owner		= THIS_MODULE,
-	.open		= simple_open,
+	.open		= spi_show_regs_open,
 	.read		= spi_show_regs,
 	.llseek		= default_llseek,
 };
@@ -191,7 +198,7 @@
 	u16 txw = 0;
 
 	while (max--) {
-		/* Set the tx word if the transfer's original "tx" is not null */
+		/* Set the txw if the transfer's original "tx" is not null */
 		if (dws->tx_end - dws->len) {
 			if (dws->n_bytes == 1)
 				txw = *(u8 *)(dws->tx);
@@ -256,7 +263,40 @@
 	if (dws->cur_transfer->rx_dma)
 		dws->rx_dma = dws->cur_transfer->rx_dma;
 
+	/* map dma buffer if it's not mapped */
+	if (!dws->tx_dma) {
+		dws->tx_dma = dma_map_single(NULL, dws->tx,
+				dws->len, DMA_TO_DEVICE);
+		if (dma_mapping_error(NULL, dws->tx_dma)) {
+			pr_err("map tx dma buffer failed\n");
+			goto err1;
+		}
+	}
+
+	if (!dws->rx_dma) {
+		dws->rx_dma = dma_map_single(NULL, dws->rx,
+				dws->len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(NULL, dws->rx_dma)) {
+			pr_err("map rx dma buffer failed\n");
+			goto err2;
+		}
+	}
+
 	return 1;
+
+err2:
+	dma_unmap_single(NULL, dws->tx_dma, dws->len, DMA_TO_DEVICE);
+err1:
+	dws->cur_msg->is_dma_mapped = 0;
+	return 0;
+}
+
+static void unmap_dma_buffers(struct dw_spi *dws)
+{
+	dma_unmap_single(NULL, dws->rx_dma,
+				dws->len, DMA_FROM_DEVICE);
+	dma_unmap_single(NULL, dws->tx_dma,
+				dws->len, DMA_TO_DEVICE);
 }
 
 /* Caller already set message->status; dma and pio irqs are blocked */
@@ -267,7 +307,12 @@
 	struct spi_message *msg;
 
 	spin_lock_irqsave(&dws->lock, flags);
+
+	if (dws->dma_mapped)
+		unmap_dma_buffers(dws);
+
 	msg = dws->cur_msg;
+	list_del_init(&dws->cur_msg->queue);
 	dws->cur_msg = NULL;
 	dws->cur_transfer = NULL;
 	dws->prev_chip = dws->cur_chip;
@@ -312,6 +357,7 @@
 		giveback(dws);
 	} else
 		tasklet_schedule(&dws->pump_transfers);
+
 }
 EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
 
@@ -324,7 +370,7 @@
 		dw_readw(dws, DW_SPI_TXOICR);
 		dw_readw(dws, DW_SPI_RXOICR);
 		dw_readw(dws, DW_SPI_RXUICR);
-		int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
+		int_error_stop(dws, "interrupt_transfer: fifo over/underrun");
 		return IRQ_HANDLED;
 	}
 
@@ -505,7 +551,8 @@
 		txint_level = dws->fifo_len / 2;
 		txint_level = (templen > txint_level) ? txint_level : templen;
 
-		imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
+		imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI
+			| SPI_INT_RXOI;
 		dws->transfer_handler = interrupt_transfer;
 	}
 
@@ -515,7 +562,8 @@
 	 *	2. clk_div is changed
 	 *	3. control value changes
 	 */
-	if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
+	if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change
+			|| clk_div || imask) {
 		spi_enable_chip(dws, 0);
 
 		if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
@@ -555,23 +603,19 @@
 		container_of(work, struct dw_spi, pump_messages);
 	unsigned long flags;
 
+	pm_runtime_get_sync(dws->parent_dev);
+
 	/* Lock queue and check for queue work */
 	spin_lock_irqsave(&dws->lock, flags);
-	if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
-		dws->busy = 0;
-		spin_unlock_irqrestore(&dws->lock, flags);
-		return;
-	}
+	if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED)
+		goto exit;
 
 	/* Make sure we are not already running a message */
-	if (dws->cur_msg) {
-		spin_unlock_irqrestore(&dws->lock, flags);
-		return;
-	}
+	if (dws->cur_msg)
+		goto exit;
 
 	/* Extract head of queue */
 	dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
-	list_del_init(&dws->cur_msg->queue);
 
 	/* Initial message state*/
 	dws->cur_msg->state = START_STATE;
@@ -583,8 +627,9 @@
 	/* Mark as busy and launch transfers */
 	tasklet_schedule(&dws->pump_transfers);
 
-	dws->busy = 1;
+exit:
 	spin_unlock_irqrestore(&dws->lock, flags);
+	pm_runtime_put_sync(dws->parent_dev);
 }
 
 /* spi_device use this to queue in their spi_msg */
@@ -595,29 +640,13 @@
 
 	spin_lock_irqsave(&dws->lock, flags);
 
-	if (dws->run == QUEUE_STOPPED) {
-		spin_unlock_irqrestore(&dws->lock, flags);
-		return -ESHUTDOWN;
-	}
-
 	msg->actual_length = 0;
 	msg->status = -EINPROGRESS;
 	msg->state = START_STATE;
 
 	list_add_tail(&msg->queue, &dws->queue);
 
-	if (dws->run == QUEUE_RUNNING && !dws->busy) {
-
-		if (dws->cur_transfer || dws->cur_msg)
-			queue_work(dws->workqueue,
-					&dws->pump_messages);
-		else {
-			/* If no other data transaction in air, just go */
-			spin_unlock_irqrestore(&dws->lock, flags);
-			pump_messages(&dws->pump_messages);
-			return 0;
-		}
-	}
+	queue_work(dws->workqueue, &dws->pump_messages);
 
 	spin_unlock_irqrestore(&dws->lock, flags);
 	return 0;
@@ -696,13 +725,12 @@
 	kfree(chip);
 }
 
-static int init_queue(struct dw_spi *dws)
+static int dw_spi_init_queue(struct dw_spi *dws)
 {
 	INIT_LIST_HEAD(&dws->queue);
 	spin_lock_init(&dws->lock);
 
 	dws->run = QUEUE_STOPPED;
-	dws->busy = 0;
 
 	tasklet_init(&dws->pump_transfers,
 			pump_transfers,	(unsigned long)dws);
@@ -716,13 +744,13 @@
 	return 0;
 }
 
-static int start_queue(struct dw_spi *dws)
+static int dw_spi_start_queue(struct dw_spi *dws)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&dws->lock, flags);
 
-	if (dws->run == QUEUE_RUNNING || dws->busy) {
+	if (dws->run == QUEUE_RUNNING) {
 		spin_unlock_irqrestore(&dws->lock, flags);
 		return -EBUSY;
 	}
@@ -739,32 +767,27 @@
 	return 0;
 }
 
-static int stop_queue(struct dw_spi *dws)
+int dw_spi_stop_queue(struct dw_spi *dws)
 {
 	unsigned long flags;
-	unsigned limit = 50;
 	int status = 0;
 
 	spin_lock_irqsave(&dws->lock, flags);
-	dws->run = QUEUE_STOPPED;
-	while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
-		spin_unlock_irqrestore(&dws->lock, flags);
-		msleep(10);
-		spin_lock_irqsave(&dws->lock, flags);
-	}
-
-	if (!list_empty(&dws->queue) || dws->busy)
+	if (!list_empty(&dws->queue))
 		status = -EBUSY;
+	else
+		dws->run = QUEUE_STOPPED;
 	spin_unlock_irqrestore(&dws->lock, flags);
 
 	return status;
 }
+EXPORT_SYMBOL_GPL(dw_spi_stop_queue);
 
-static int destroy_queue(struct dw_spi *dws)
+static int dw_spi_destroy_queue(struct dw_spi *dws)
 {
 	int status;
 
-	status = stop_queue(dws);
+	status = dw_spi_stop_queue(dws);
 	if (status != 0)
 		return status;
 	destroy_workqueue(dws->workqueue);
@@ -772,11 +795,10 @@
 }
 
 /* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct dw_spi *dws)
+static void dw_spi_hw_init(struct dw_spi *dws)
 {
 	spi_enable_chip(dws, 0);
 	spi_mask_intr(dws, 0xff);
-	spi_enable_chip(dws, 1);
 
 	/*
 	 * Try to detect the FIFO depth if not set by interface driver,
@@ -793,6 +815,8 @@
 		dws->fifo_len = (fifo == 257) ? 0 : fifo;
 		dw_writew(dws, DW_SPI_TXFLTR, 0);
 	}
+
+	spi_enable_chip(dws, 1);
 }
 
 int dw_spi_add_host(struct dw_spi *dws)
@@ -831,7 +855,7 @@
 	master->transfer = dw_spi_transfer;
 
 	/* Basic HW init */
-	spi_hw_init(dws);
+	dw_spi_hw_init(dws);
 
 	if (dws->dma_ops && dws->dma_ops->dma_init) {
 		ret = dws->dma_ops->dma_init(dws);
@@ -842,12 +866,12 @@
 	}
 
 	/* Initial and start queue */
-	ret = init_queue(dws);
+	ret = dw_spi_init_queue(dws);
 	if (ret) {
 		dev_err(&master->dev, "problem initializing queue\n");
 		goto err_diable_hw;
 	}
-	ret = start_queue(dws);
+	ret = dw_spi_start_queue(dws);
 	if (ret) {
 		dev_err(&master->dev, "problem starting queue\n");
 		goto err_diable_hw;
@@ -864,7 +888,7 @@
 	return 0;
 
 err_queue_alloc:
-	destroy_queue(dws);
+	dw_spi_destroy_queue(dws);
 	if (dws->dma_ops && dws->dma_ops->dma_exit)
 		dws->dma_ops->dma_exit(dws);
 err_diable_hw:
@@ -886,7 +910,7 @@
 	mrst_spi_debugfs_remove(dws);
 
 	/* Remove the queue */
-	status = destroy_queue(dws);
+	status = dw_spi_destroy_queue(dws);
 	if (status != 0)
 		dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
 			"complete, message memory not freed\n");
@@ -907,11 +931,16 @@
 {
 	int ret = 0;
 
-	ret = stop_queue(dws);
+	ret = dw_spi_stop_queue(dws);
 	if (ret)
 		return ret;
+
 	spi_enable_chip(dws, 0);
 	spi_set_clk(dws, 0);
+
+	if (dws->dma_inited)
+		dws->dma_ops->dma_suspend(dws);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
@@ -920,10 +949,14 @@
 {
 	int ret;
 
-	spi_hw_init(dws);
-	ret = start_queue(dws);
+	if (dws->dma_inited)
+		dws->dma_ops->dma_resume(dws);
+
+	dw_spi_hw_init(dws);
+	ret = dw_spi_start_queue(dws);
 	if (ret)
 		dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 9c57c07..2aaccb7 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -87,6 +87,8 @@
 	int (*dma_init)(struct dw_spi *dws);
 	void (*dma_exit)(struct dw_spi *dws);
 	int (*dma_transfer)(struct dw_spi *dws, int cs_change);
+	int (*dma_suspend)(struct dw_spi *dws);
+	int (*dma_resume)(struct dw_spi *dws);
 };
 
 struct dw_spi {
@@ -111,7 +113,6 @@
 	struct work_struct	pump_messages;
 	spinlock_t		lock;
 	struct list_head	queue;
-	int			busy;
 	int			run;
 
 	/* Message Transfer pump */
@@ -236,7 +237,9 @@
 extern int dw_spi_suspend_host(struct dw_spi *dws);
 extern int dw_spi_resume_host(struct dw_spi *dws);
 extern void dw_spi_xfer_done(struct dw_spi *dws);
+extern int dw_spi_stop_queue(struct dw_spi *dws);
 
 /* platform related setup */
-extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
+/* Intel MID platforms */
+extern int dw_spi_mid_init(struct dw_spi *dws, int bus_num);
 #endif /* DW_SPI_HEADER_H */
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0021fc4..95943c8 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -252,7 +252,7 @@
 		/*
 		 * ... otherwise, take it from spi->controller_data
 		 */
-		cs = (unsigned int) spi->controller_data;
+		cs = (unsigned int) (uintptr_t) spi->controller_data;
 	}
 
 	if (!spi->controller_state) {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 48b396f..b031de5 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -69,6 +69,8 @@
 #define LPSS_TX_HITHRESH_DFLT	224
 
 /* Offset from drv_data->lpss_base */
+#define PRV_CLK_PARAMS		0x00
+#define SSP_RESETS		0x04
 #define SSP_REG			0x0c
 #define SPI_CS_CONTROL		0x18
 #define SPI_CS_CONTROL_SW_MODE	BIT(0)
@@ -96,6 +98,37 @@
 	writel(value, drv_data->lpss_base + offset);
 }
 
+static void lpss_ssp_restore(struct driver_data *drv_data)
+{
+	u32 update_bit, param;
+	u32 m = 1, n = 2;
+	u32 value, orig;
+
+	if (!is_lpss_ssp(drv_data))
+		return;
+
+	/* Reset LPSS SSP Controller */
+	__lpss_ssp_write_priv(drv_data, SSP_RESETS, 0x0);
+	usleep_range(10, 100);
+	__lpss_ssp_write_priv(drv_data, SSP_RESETS, 0x3);
+	usleep_range(10, 100);
+
+	/* Setting the clock divisor */
+	update_bit = 1 << 31;
+	param = (m << 1) | (n << 16) | 0x1;
+	__lpss_ssp_write_priv(drv_data, PRV_CLK_PARAMS, param | update_bit);
+	drv_data->max_clk_rate = 50000000;
+	dev_dbg(&drv_data->pdev->dev, "ssp_clk=%dMHz\n", (100*m/n));
+
+	/* Enable software chip select control */
+	value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
+	__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+
+	/* Enable multiblock DMA transfers */
+	if (drv_data->master_info->enable_dma)
+		__lpss_ssp_write_priv(drv_data, SSP_REG, 1);
+}
+
 /*
  * lpss_ssp_setup - perform LPSS SSP specific setup
  * @drv_data: pointer to the driver private data
@@ -137,13 +170,8 @@
 	/* Now set the LPSS base */
 	drv_data->lpss_base = drv_data->ioaddr + offset;
 
-	/* Enable software chip select control */
-	value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
-	__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
-
-	/* Enable multiblock DMA transfers */
-	if (drv_data->master_info->enable_dma)
-		__lpss_ssp_write_priv(drv_data, SSP_REG, 1);
+	/* Init LPSS private register bits */
+	lpss_ssp_restore(drv_data);
 }
 
 static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
@@ -1119,6 +1147,8 @@
 static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
 	{ "INT33C0", 0 },
 	{ "INT33C1", 0 },
+	{ "80860F0E", 0},
+	{ "8086228E", 0},
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
@@ -1208,6 +1238,9 @@
 		goto out_error_master_alloc;
 	}
 
+	/* Fixme: disable dma, only enable pio mode */
+	platform_info->enable_dma = 0;
+
 	/* Setup DMA if requested */
 	drv_data->tx_channel = -1;
 	drv_data->rx_channel = -1;
@@ -1224,6 +1257,8 @@
 
 	drv_data->max_clk_rate = clk_get_rate(ssp->clk);
 
+	lpss_ssp_setup(drv_data);
+
 	/* Load default SSP configuration */
 	write_SSCR0(0, drv_data->ioaddr);
 	write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
@@ -1237,8 +1272,6 @@
 		write_SSTO(0, drv_data->ioaddr);
 	write_SSPSP(0, drv_data->ioaddr);
 
-	lpss_ssp_setup(drv_data);
-
 	tasklet_init(&drv_data->pump_transfers, pump_transfers,
 		     (unsigned long)drv_data);
 
@@ -1340,6 +1373,9 @@
 	/* Enable the SSP clock */
 	clk_prepare_enable(ssp->clk);
 
+	/* Restore LPSS private register bits */
+	lpss_ssp_restore(drv_data);
+
 	/* Start the queue running */
 	status = spi_master_resume(drv_data->master);
 	if (status != 0) {
@@ -1365,6 +1401,10 @@
 	struct driver_data *drv_data = dev_get_drvdata(dev);
 
 	clk_prepare_enable(drv_data->ssp->clk);
+
+	/* Restore LPSS private register bits */
+	lpss_ssp_restore(drv_data);
+
 	return 0;
 }
 #endif
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 32b7bb1..170a130 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -366,7 +366,11 @@
 	}
 
 	/* Set the bus ID string */
-	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+	if (!strcmp(spi->modalias, "spidev"))
+		dev_set_name(&spi->dev, "spidev%u.%u", spi->master->bus_num,
+			spi->chip_select);
+	else
+		dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
 			spi->chip_select);
 
 
diff --git a/drivers/spi/spidev_info.c b/drivers/spi/spidev_info.c
new file mode 100644
index 0000000..07cc122
--- /dev/null
+++ b/drivers/spi/spidev_info.c
@@ -0,0 +1,92 @@
+/*
+ * SPI debugfs interface for spidev register
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Huiquan Zhong <huiquan.zhong@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/spi/spi.h>
+
+static struct spi_board_info spidev_info = {
+	.modalias = "spidev",
+	.max_speed_hz = 1000000,
+	.bus_num = 1,
+	.chip_select = 0,
+	.mode = SPI_MODE_0,
+};
+
+static int spidev_debug_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t spidev_debug_write(struct file *filp, const char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	char buf[32];
+	ssize_t buf_size;
+	char *start = buf;
+	unsigned int bus_num, cs_num;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf_size = min(cnt, (sizeof(buf)-1));
+
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	while (*start == ' ')
+		start++;
+	bus_num = simple_strtoul(start, &start, 10);
+
+	while (*start == ' ')
+		start++;
+	if (kstrtouint(start, 10, &cs_num))
+		return -EINVAL;
+
+	spidev_info.bus_num = bus_num;
+	spidev_info.chip_select = cs_num;
+
+	spi_register_board_info(&spidev_info, 1);
+
+	return buf_size;
+}
+
+static const struct file_operations spidev_debug_fops = {
+	.open		= spidev_debug_open,
+	.write		= spidev_debug_write,
+	.llseek		= generic_file_llseek,
+};
+
+struct dentry *spidev_node;
+static __init int spidev_debug_init(void)
+{
+
+	spidev_node = debugfs_create_file("spidev_node", S_IFREG | S_IWUSR,
+				NULL, NULL, &spidev_debug_fops);
+	if (!spidev_node) {
+		pr_err("Failed to create spidev_node debug file\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static __exit void spidev_debug_exit(void)
+{
+	debugfs_remove(spidev_node);
+}
+module_init(spidev_debug_init);
+module_exit(spidev_debug_exit);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 25c8bff..7b200b3 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -70,6 +70,8 @@
 
 source "drivers/staging/sep/Kconfig"
 
+source "drivers/staging/sep54/Kconfig"
+
 source "drivers/staging/iio/Kconfig"
 
 source "drivers/staging/wlags49_h2/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index f9d86a4..5dd1465 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_VT6656)		+= vt6656/
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_DX_SEP)            += sep/
+obj-$(CONFIG_DX_SEP54)          += sep54/
 obj-$(CONFIG_IIO)		+= iio/
 obj-$(CONFIG_WLAGS49_H2)	+= wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)	+= wlags49_h25/
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
new file mode 100644
index 0000000..3110033
--- /dev/null
+++ b/drivers/staging/android/alarm-dev.c
@@ -0,0 +1,456 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/alarmtimer.h>
+#include "android_alarm.h"
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define alarm_dbg(debug_level_mask, fmt, ...)				\
+do {									\
+	if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask)	\
+		pr_info(fmt, ##__VA_ARGS__);				\
+} while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+	ANDROID_ALARM_RTC_WAKEUP_MASK | \
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK | \
+	ANDROID_ALARM_POWER_OFF_WAKEUP_MASK)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct wakeup_source alarm_wake_lock;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+struct devalarm {
+	union {
+		struct hrtimer hrt;
+		struct alarm alrm;
+	} u;
+	enum android_alarm_type type;
+};
+
+static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+
+static int is_wakeup(enum android_alarm_type type)
+{
+	return (type == ANDROID_ALARM_RTC_WAKEUP ||
+		type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP ||
+		type == ANDROID_ALARM_POWER_OFF_WAKEUP);
+}
+
+
+static void devalarm_start(struct devalarm *alrm, ktime_t exp)
+{
+	if (is_wakeup(alrm->type))
+		alarm_start(&alrm->u.alrm, exp);
+	else
+		hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS);
+}
+
+
+static int devalarm_try_to_cancel(struct devalarm *alrm)
+{
+	if (is_wakeup(alrm->type))
+		return alarm_try_to_cancel(&alrm->u.alrm);
+	return hrtimer_try_to_cancel(&alrm->u.hrt);
+}
+
+static void devalarm_cancel(struct devalarm *alrm)
+{
+	if (is_wakeup(alrm->type))
+		alarm_cancel(&alrm->u.alrm);
+	else
+		hrtimer_cancel(&alrm->u.hrt);
+}
+
+static void alarm_clear(enum android_alarm_type alarm_type)
+{
+	uint32_t alarm_type_mask = 1U << alarm_type;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	alarm_dbg(IO, "alarm %d clear\n", alarm_type);
+	devalarm_try_to_cancel(&alarms[alarm_type]);
+	if (alarm_pending) {
+		alarm_pending &= ~alarm_type_mask;
+		if (!alarm_pending && !wait_pending)
+			__pm_relax(&alarm_wake_lock);
+	}
+	alarm_enabled &= ~alarm_type_mask;
+	spin_unlock_irqrestore(&alarm_slock, flags);
+
+}
+
+static void alarm_set(enum android_alarm_type alarm_type,
+							struct timespec *ts)
+{
+	uint32_t alarm_type_mask = 1U << alarm_type;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	alarm_dbg(IO, "alarm %d set %ld.%09ld\n",
+			alarm_type, ts->tv_sec, ts->tv_nsec);
+	alarm_enabled |= alarm_type_mask;
+	devalarm_start(&alarms[alarm_type], timespec_to_ktime(*ts));
+	spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static int alarm_wait(void)
+{
+	unsigned long flags;
+	int rv = 0;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	alarm_dbg(IO, "alarm wait\n");
+	if (!alarm_pending && wait_pending) {
+		__pm_relax(&alarm_wake_lock);
+		wait_pending = 0;
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+
+	rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+	if (rv)
+		return rv;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	rv = alarm_pending;
+	wait_pending = 1;
+	alarm_pending = 0;
+	spin_unlock_irqrestore(&alarm_slock, flags);
+
+	return rv;
+}
+
+static int alarm_set_rtc(struct timespec *ts)
+{
+	struct rtc_time new_rtc_tm;
+	struct rtc_device *rtc_dev;
+	unsigned long flags;
+	int rv = 0;
+
+	rtc_time_to_tm(ts->tv_sec, &new_rtc_tm);
+	rtc_dev = alarmtimer_get_rtcdev();
+	rv = do_settimeofday(ts);
+	if (rv < 0)
+		return rv;
+	if (rtc_dev)
+		rv = rtc_set_time(rtc_dev, &new_rtc_tm);
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+	wake_up(&alarm_wait_queue);
+	spin_unlock_irqrestore(&alarm_slock, flags);
+
+	return rv;
+}
+
+static int alarm_get_time(enum android_alarm_type alarm_type,
+							struct timespec *ts)
+{
+	int rv = 0;
+
+	switch (alarm_type) {
+	case ANDROID_ALARM_RTC_WAKEUP:
+	case ANDROID_ALARM_RTC:
+	case ANDROID_ALARM_POWER_OFF_WAKEUP:
+		getnstimeofday(ts);
+		break;
+	case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+	case ANDROID_ALARM_ELAPSED_REALTIME:
+		get_monotonic_boottime(ts);
+		break;
+	case ANDROID_ALARM_SYSTEMTIME:
+		ktime_get_ts(ts);
+		break;
+	default:
+		rv = -EINVAL;
+	}
+	return rv;
+}
+
+static long alarm_do_ioctl(struct file *file, unsigned int cmd,
+							struct timespec *ts)
+{
+	int rv = 0;
+	unsigned long flags;
+	enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+
+	if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+		return -EINVAL;
+
+	if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+		if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+			return -EPERM;
+		if (file->private_data == NULL &&
+		    cmd != ANDROID_ALARM_SET_RTC) {
+			spin_lock_irqsave(&alarm_slock, flags);
+			if (alarm_opened) {
+				spin_unlock_irqrestore(&alarm_slock, flags);
+				return -EBUSY;
+			}
+			alarm_opened = 1;
+			file->private_data = (void *)1;
+			spin_unlock_irqrestore(&alarm_slock, flags);
+		}
+	}
+
+	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+	case ANDROID_ALARM_CLEAR(0):
+		alarm_clear(alarm_type);
+		break;
+	case ANDROID_ALARM_SET(0):
+		alarm_set(alarm_type, ts);
+		break;
+	case ANDROID_ALARM_SET_AND_WAIT(0):
+		alarm_set(alarm_type, ts);
+		/* fall though */
+	case ANDROID_ALARM_WAIT:
+		rv = alarm_wait();
+		break;
+	case ANDROID_ALARM_SET_RTC:
+		rv = alarm_set_rtc(ts);
+		break;
+	case ANDROID_ALARM_GET_TIME(0):
+		rv = alarm_get_time(alarm_type, ts);
+		break;
+
+	default:
+		rv = -EINVAL;
+	}
+	return rv;
+}
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+
+	struct timespec ts;
+	long rv = 0;
+
+
+	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+	case ANDROID_ALARM_SET_AND_WAIT(0):
+	case ANDROID_ALARM_SET(0):
+	case ANDROID_ALARM_SET_RTC:
+		if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
+			return -EFAULT;
+		break;
+	}
+
+	rv = alarm_do_ioctl(file, cmd, &ts);
+	if (rv)
+		return rv;
+
+	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+	case ANDROID_ALARM_GET_TIME(0):
+		if (copy_to_user((void __user *)arg, &ts, sizeof(ts)))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+#ifdef CONFIG_COMPAT
+static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+
+	struct timespec ts;
+	long rv;
+
+	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+	case ANDROID_ALARM_SET_AND_WAIT_COMPAT(0):
+	case ANDROID_ALARM_SET_COMPAT(0):
+	case ANDROID_ALARM_SET_RTC_COMPAT:
+		if (compat_get_timespec(&ts, (void __user *)arg))
+			return -EFAULT;
+		/* fall through */
+	case ANDROID_ALARM_GET_TIME_COMPAT(0):
+		cmd = ANDROID_ALARM_COMPAT_TO_NORM(cmd);
+		break;
+	}
+
+	rv = alarm_do_ioctl(file, cmd, &ts);
+	if (rv)
+		return rv;
+
+	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+	case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
+		if (compat_put_timespec(&ts, (void __user *)arg))
+			return -EFAULT;
+		break;
+	}
+
+	return 0;
+}
+#endif
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+	file->private_data = NULL;
+	return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&alarm_slock, flags);
+	if (file->private_data) {
+		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+			uint32_t alarm_type_mask = 1U << i;
+			if (alarm_enabled & alarm_type_mask) {
+				alarm_dbg(INFO,
+					  "%s: clear alarm, pending %d\n",
+					  __func__,
+					  !!(alarm_pending & alarm_type_mask));
+				alarm_enabled &= ~alarm_type_mask;
+			}
+			spin_unlock_irqrestore(&alarm_slock, flags);
+			devalarm_cancel(&alarms[i]);
+			spin_lock_irqsave(&alarm_slock, flags);
+		}
+		if (alarm_pending | wait_pending) {
+			if (alarm_pending)
+				alarm_dbg(INFO, "%s: clear pending alarms %x\n",
+					  __func__, alarm_pending);
+			__pm_relax(&alarm_wake_lock);
+			wait_pending = 0;
+			alarm_pending = 0;
+		}
+		alarm_opened = 0;
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+	return 0;
+}
+
+static void devalarm_triggered(struct devalarm *alarm)
+{
+	unsigned long flags;
+	uint32_t alarm_type_mask = 1U << alarm->type;
+
+	alarm_dbg(INT, "%s: type %d\n", __func__, alarm->type);
+	spin_lock_irqsave(&alarm_slock, flags);
+	if (alarm_enabled & alarm_type_mask) {
+		__pm_wakeup_event(&alarm_wake_lock, 5000); /* 5secs */
+		alarm_enabled &= ~alarm_type_mask;
+		alarm_pending |= alarm_type_mask;
+		wake_up(&alarm_wait_queue);
+	}
+	spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+
+static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
+{
+	struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
+
+	devalarm_triggered(devalrm);
+	return HRTIMER_NORESTART;
+}
+
+static enum alarmtimer_restart devalarm_alarmhandler(struct alarm *alrm,
+							ktime_t now)
+{
+	struct devalarm *devalrm = container_of(alrm, struct devalarm, u.alrm);
+
+	devalarm_triggered(devalrm);
+	return ALARMTIMER_NORESTART;
+}
+
+
+static const struct file_operations alarm_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = alarm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = alarm_ioctl,
+#endif
+	.open = alarm_open,
+	.release = alarm_release,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = alarm_compat_ioctl,
+#endif
+};
+
+static struct miscdevice alarm_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "alarm",
+	.fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+	int err;
+	int i;
+
+	err = misc_register(&alarm_device);
+	if (err)
+		return err;
+
+	alarm_init(&alarms[ANDROID_ALARM_RTC_WAKEUP].u.alrm,
+			ALARM_REALTIME, devalarm_alarmhandler);
+	alarm_init(&alarms[ANDROID_ALARM_POWER_OFF_WAKEUP].u.alrm,
+			ALARM_REALTIME_OFF, devalarm_alarmhandler);
+	hrtimer_init(&alarms[ANDROID_ALARM_RTC].u.hrt,
+			CLOCK_REALTIME, HRTIMER_MODE_ABS);
+	alarm_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].u.alrm,
+			ALARM_BOOTTIME, devalarm_alarmhandler);
+	hrtimer_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME].u.hrt,
+			CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
+	hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].u.hrt,
+			CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+	for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+		alarms[i].type = i;
+		if (!is_wakeup(i))
+			alarms[i].u.hrt.function = devalarm_hrthandler;
+	}
+
+	wakeup_source_init(&alarm_wake_lock, "alarm");
+	return 0;
+}
+
+static void  __exit alarm_dev_exit(void)
+{
+	misc_deregister(&alarm_device);
+	wakeup_source_trash(&alarm_wake_lock);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 69c2168..2a78681 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -27,7 +27,7 @@
 #include "ion_priv.h"
 
 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
-				     __GFP_NORETRY) & ~__GFP_WAIT;
+				     __GFP_NORETRY | __GFP_NO_KSWAPD) & ~__GFP_WAIT;
 static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
 static const unsigned int orders[] = {8, 4, 0};
 static const int num_orders = ARRAY_SIZE(orders);
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644
index 0000000..fcb0475
--- /dev/null
+++ b/drivers/staging/android/logger.c
@@ -0,0 +1,919 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "logger: " fmt
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <linux/aio.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/**
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ * @buffer:	The actual ring buffer
+ * @misc:	The "misc" device representing the log
+ * @wq:	The wait queue for @readers
+ * @readers:	This log's readers
+ * @mutex:	The mutex that protects the @buffer
+ * @w_off:	The current write head offset
+ * @head:	The head, or location that readers start reading at.
+ * @size:	The size of the log
+ * @logs:	The list of log channels
+ * @plugins:    The list of plugins (to export traces to different outputs)
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+	unsigned char		*buffer;
+	struct miscdevice	misc;
+	wait_queue_head_t	wq;
+	struct list_head	readers;
+	struct mutex		mutex;
+	size_t			w_off;
+	size_t			head;
+	size_t			size;
+	struct list_head	logs;
+	struct list_head        plugins;
+};
+
+static LIST_HEAD(log_list);
+
+
+/**
+ * struct logger_reader - a logging device open for reading
+ * @log:	The associated log
+ * @list:	The associated entry in @logger_log's list
+ * @r_off:	The current read head offset.
+ * @r_all:	Reader can read all entries
+ * @r_ver:	Reader ABI version
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+	struct logger_log	*log;
+	struct list_head	list;
+	size_t			r_off;
+	bool			r_all;
+	int			r_ver;
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+static size_t logger_offset(struct logger_log *log, size_t n)
+{
+	return n & (log->size - 1);
+}
+
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ *	1) Need to quickly obtain the associated log during an I/O operation
+ *	2) Readers need to maintain state (logger_reader)
+ *	3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader = file->private_data;
+		return reader->log;
+	} else
+		return file->private_data;
+}
+
+/*
+ * get_entry_header - returns a pointer to the logger_entry header within
+ * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
+ * be provided. Typically the return value will be a pointer within
+ * 'logger->buf'.  However, a pointer to 'scratch' may be returned if
+ * the log entry spans the end and beginning of the circular buffer.
+ */
+static struct logger_entry *get_entry_header(struct logger_log *log,
+		size_t off, struct logger_entry *scratch)
+{
+	size_t len = min(sizeof(struct logger_entry), log->size - off);
+	if (len != sizeof(struct logger_entry)) {
+		memcpy(((void *) scratch), log->buffer + off, len);
+		memcpy(((void *) scratch) + len, log->buffer,
+			sizeof(struct logger_entry) - len);
+		return scratch;
+	}
+
+	return (struct logger_entry *) (log->buffer + off);
+}
+
+/*
+ * get_entry_msg_len - Grabs the length of the message of the entry
+ * starting from from 'off'.
+ *
+ * An entry length is 2 bytes (16 bits) in host endian order.
+ * In the log, the length does not include the size of the log entry structure.
+ * This function returns the size including the log entry structure.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
+{
+	struct logger_entry scratch;
+	struct logger_entry *entry;
+
+	entry = get_entry_header(log, off, &scratch);
+	return entry->len;
+}
+
+static size_t get_user_hdr_len(int ver)
+{
+	if (ver < 2)
+		return sizeof(struct user_logger_entry_compat);
+	else
+		return sizeof(struct logger_entry);
+}
+
+static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
+					 char __user *buf)
+{
+	void *hdr;
+	size_t hdr_len;
+	struct user_logger_entry_compat v1;
+
+	if (ver < 2) {
+		v1.len      = entry->len;
+		v1.__pad    = 0;
+		v1.pid      = entry->pid;
+		v1.tid      = entry->tid;
+		v1.sec      = entry->sec;
+		v1.nsec     = entry->nsec;
+		hdr         = &v1;
+		hdr_len     = sizeof(struct user_logger_entry_compat);
+	} else {
+		hdr         = entry;
+		hdr_len     = sizeof(struct logger_entry);
+	}
+
+	return copy_to_user(buf, hdr, hdr_len);
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+				   struct logger_reader *reader,
+				   char __user *buf,
+				   size_t count)
+{
+	struct logger_entry scratch;
+	struct logger_entry *entry;
+	size_t len;
+	size_t msg_start;
+
+	/*
+	 * First, copy the header to userspace, using the version of
+	 * the header requested
+	 */
+	entry = get_entry_header(log, reader->r_off, &scratch);
+	if (copy_header_to_user(reader->r_ver, entry, buf))
+		return -EFAULT;
+
+	count -= get_user_hdr_len(reader->r_ver);
+	buf += get_user_hdr_len(reader->r_ver);
+	msg_start = logger_offset(log,
+		reader->r_off + sizeof(struct logger_entry));
+
+	/*
+	 * We read from the msg in two disjoint operations. First, we read from
+	 * the current msg head offset up to 'count' bytes or to the end of
+	 * the log, whichever comes first.
+	 */
+	len = min(count, log->size - msg_start);
+	if (copy_to_user(buf, log->buffer + msg_start, len))
+		return -EFAULT;
+
+	/*
+	 * Second, we read any remaining bytes, starting back at the head of
+	 * the log.
+	 */
+	if (count != len)
+		if (copy_to_user(buf + len, log->buffer, count - len))
+			return -EFAULT;
+
+	reader->r_off = logger_offset(log, reader->r_off +
+		sizeof(struct logger_entry) + count);
+
+	return count + get_user_hdr_len(reader->r_ver);
+}
+
+/*
+ * get_next_entry_by_uid - Starting at 'off', returns an offset into
+ * 'log->buffer' which contains the first entry readable by 'euid'
+ */
+static size_t get_next_entry_by_uid(struct logger_log *log,
+		size_t off, kuid_t euid)
+{
+	while (off != log->w_off) {
+		struct logger_entry *entry;
+		struct logger_entry scratch;
+		size_t next_len;
+
+		entry = get_entry_header(log, off, &scratch);
+
+		if (uid_eq(entry->euid, euid))
+			return off;
+
+		next_len = sizeof(struct logger_entry) + entry->len;
+		off = logger_offset(log, off + next_len);
+	}
+
+	return off;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ *	- O_NONBLOCK works
+ *	- If there are no log entries to read, blocks until log is written to
+ *	- Atomically reads exactly one log entry
+ *
+ * Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *pos)
+{
+	struct logger_reader *reader = file->private_data;
+	struct logger_log *log = reader->log;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+start:
+	while (1) {
+		mutex_lock(&log->mutex);
+
+		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+		ret = (log->w_off == reader->r_off);
+		mutex_unlock(&log->mutex);
+		if (!ret)
+			break;
+
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+
+		schedule();
+	}
+
+	finish_wait(&log->wq, &wait);
+	if (ret)
+		return ret;
+
+	mutex_lock(&log->mutex);
+
+	if (!reader->r_all)
+		reader->r_off = get_next_entry_by_uid(log,
+			reader->r_off, current_euid());
+
+	/* is there still something to read or did we race? */
+	if (unlikely(log->w_off == reader->r_off)) {
+		mutex_unlock(&log->mutex);
+		goto start;
+	}
+
+	/* get the size of the next entry */
+	ret = get_user_hdr_len(reader->r_ver) +
+		get_entry_msg_len(log, reader->r_off);
+	if (count < ret) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* get exactly one entry from the log */
+	ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+	size_t count = 0;
+
+	do {
+		size_t nr = sizeof(struct logger_entry) +
+			get_entry_msg_len(log, off);
+		off = logger_offset(log, off + nr);
+		count += nr;
+	} while (count < len);
+
+	return off;
+}
+
+/*
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ *    positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ *               c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ *    c^
+ *  or                    c^
+ */
+static inline int is_between(size_t a, size_t b, size_t c)
+{
+	if (a < b) {
+		/* is c between a and b? */
+		if (a < c && c <= b)
+			return 1;
+	} else {
+		/* is c outside of b through a? */
+		if (c <= b || a < c)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+	size_t old = log->w_off;
+	size_t new = logger_offset(log, old + len);
+	struct logger_reader *reader;
+
+	if (is_between(old, new, log->head))
+		log->head = get_next_entry(log, log->head, len);
+
+	list_for_each_entry(reader, &log->readers, list)
+		if (is_between(old, new, reader->r_off))
+			reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+	size_t len;
+
+	len = min(count, log->size - log->w_off);
+	memcpy(log->buffer + log->w_off, buf, len);
+
+	if (count != len)
+		memcpy(log->buffer, buf + len, count - len);
+
+	log->w_off = logger_offset(log, log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+				      const void __user *buf, size_t count)
+{
+	size_t len;
+
+	len = min(count, log->size - log->w_off);
+	if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+		return -EFAULT;
+
+	if (count != len)
+		if (copy_from_user(log->buffer, buf + len, count - len))
+			/*
+			 * Note that by not updating w_off, this abandons the
+			 * portion of the new entry that *was* successfully
+			 * copied, just above.  This is intentional to avoid
+			 * message corruption from missing fragments.
+			 */
+			return -EFAULT;
+
+	log->w_off = logger_offset(log, log->w_off + count);
+
+	return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+			 unsigned long nr_segs, loff_t ppos)
+{
+	struct logger_log *log = file_get_log(iocb->ki_filp);
+	size_t orig;
+	struct logger_entry header;
+	struct timespec now;
+	ssize_t ret = 0;
+	unsigned long num_segs = nr_segs;
+	struct logger_plugin *plugin;
+
+	now = current_kernel_time();
+
+	header.pid = current->tgid;
+	header.tid = current->pid;
+	header.sec = now.tv_sec;
+	header.nsec = now.tv_nsec;
+	header.euid = current_euid();
+	header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+	header.hdr_size = sizeof(struct logger_entry);
+
+	/* null writes succeed, return zero */
+	if (unlikely(!header.len))
+		return 0;
+
+	mutex_lock(&log->mutex);
+
+	orig = log->w_off;
+
+	/*
+	 * Fix up any readers, pulling them forward to the first readable
+	 * entry after (what will be) the new write offset. We do this now
+	 * because if we partially fail, we can end up with clobbered log
+	 * entries that encroach on readable buffer.
+	 */
+	fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+	do_write_log(log, &header, sizeof(struct logger_entry));
+
+	while (nr_segs-- > 0) {
+		size_t len;
+		ssize_t nr;
+
+		/* figure out how much of this vector we can keep */
+		len = min_t(size_t, iov->iov_len, header.len - ret);
+
+		/* send this segment's payload to the different plugins */
+		list_for_each_entry(plugin, &log->plugins, list)
+			plugin->write_seg(iov->iov_base, len,
+				true, /* from_user */
+				(nr_segs + 1 == num_segs), /* start of msg ? */
+				(nr_segs == 0), /* end of msg ? */
+				plugin->data); /* call-back data */
+
+		/* write out this segment's payload to the log's buffer */
+		nr = do_write_log_from_user(log, iov->iov_base, len);
+		if (unlikely(nr < 0)) {
+			log->w_off = orig;
+			list_for_each_entry(plugin, &log->plugins, list)
+				plugin->write_seg_recover(plugin->data);
+			mutex_unlock(&log->mutex);
+			return nr;
+		}
+
+		iov++;
+		ret += nr;
+	}
+
+	mutex_unlock(&log->mutex);
+
+	/* wake up any blocked readers */
+	wake_up_interruptible(&log->wq);
+
+	return ret;
+}
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+	struct logger_log *log;
+
+	list_for_each_entry(log, &log_list, logs)
+		if (log->misc.minor == minor)
+			return log;
+	return NULL;
+}
+
+static struct logger_log *get_log_from_name(const char *name)
+{
+	struct logger_log *log;
+
+	list_for_each_entry(log, &log_list, logs)
+		if (strncmp(log->misc.name, name, strlen(name)) == 0)
+			return log;
+
+	return NULL;
+}
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+	struct logger_log *log;
+	int ret;
+
+	ret = nonseekable_open(inode, file);
+	if (ret)
+		return ret;
+
+	log = get_log_from_minor(MINOR(inode->i_rdev));
+	if (!log)
+		return -ENODEV;
+
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader;
+
+		reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+		if (!reader)
+			return -ENOMEM;
+
+		reader->log = log;
+		reader->r_ver = 1;
+		reader->r_all = in_egroup_p(inode->i_gid) ||
+			capable(CAP_SYSLOG);
+
+		INIT_LIST_HEAD(&reader->list);
+
+		mutex_lock(&log->mutex);
+		reader->r_off = log->head;
+		list_add_tail(&reader->list, &log->readers);
+		mutex_unlock(&log->mutex);
+
+		file->private_data = reader;
+	} else
+		file->private_data = log;
+
+	return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		struct logger_reader *reader = file->private_data;
+		struct logger_log *log = reader->log;
+
+		mutex_lock(&log->mutex);
+		list_del(&reader->list);
+		mutex_unlock(&log->mutex);
+
+		kfree(reader);
+	}
+
+	return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+	struct logger_reader *reader;
+	struct logger_log *log;
+	unsigned int ret = POLLOUT | POLLWRNORM;
+
+	if (!(file->f_mode & FMODE_READ))
+		return ret;
+
+	reader = file->private_data;
+	log = reader->log;
+
+	poll_wait(file, &log->wq, wait);
+
+	mutex_lock(&log->mutex);
+	if (!reader->r_all)
+		reader->r_off = get_next_entry_by_uid(log,
+			reader->r_off, current_euid());
+
+	if (log->w_off != reader->r_off)
+		ret |= POLLIN | POLLRDNORM;
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+static long logger_set_version(struct logger_reader *reader, void __user *arg)
+{
+	int version;
+	if (copy_from_user(&version, arg, sizeof(int)))
+		return -EFAULT;
+
+	if ((version < 1) || (version > 2))
+		return -EINVAL;
+
+	reader->r_ver = version;
+	return 0;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct logger_log *log = file_get_log(file);
+	struct logger_reader *reader;
+	long ret = -EINVAL;
+	void __user *argp = (void __user *) arg;
+
+	mutex_lock(&log->mutex);
+
+	switch (cmd) {
+	case LOGGER_GET_LOG_BUF_SIZE:
+		ret = log->size;
+		break;
+	case LOGGER_GET_LOG_LEN:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		if (log->w_off >= reader->r_off)
+			ret = log->w_off - reader->r_off;
+		else
+			ret = (log->size - reader->r_off) + log->w_off;
+		break;
+	case LOGGER_GET_NEXT_ENTRY_LEN:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+
+		if (!reader->r_all)
+			reader->r_off = get_next_entry_by_uid(log,
+				reader->r_off, current_euid());
+
+		if (log->w_off != reader->r_off)
+			ret = get_user_hdr_len(reader->r_ver) +
+				get_entry_msg_len(log, reader->r_off);
+		else
+			ret = 0;
+		break;
+	case LOGGER_FLUSH_LOG:
+		if (!(file->f_mode & FMODE_WRITE)) {
+			ret = -EBADF;
+			break;
+		}
+		if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) ||
+				capable(CAP_SYSLOG))) {
+			ret = -EPERM;
+			break;
+		}
+		list_for_each_entry(reader, &log->readers, list)
+			reader->r_off = log->w_off;
+		log->head = log->w_off;
+		ret = 0;
+		break;
+	case LOGGER_GET_VERSION:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		ret = reader->r_ver;
+		break;
+	case LOGGER_SET_VERSION:
+		if (!(file->f_mode & FMODE_READ)) {
+			ret = -EBADF;
+			break;
+		}
+		reader = file->private_data;
+		ret = logger_set_version(reader, argp);
+		break;
+	}
+
+	mutex_unlock(&log->mutex);
+
+	return ret;
+}
+
+static const struct file_operations logger_fops = {
+	.owner = THIS_MODULE,
+	.read = logger_read,
+	.aio_write = logger_aio_write,
+	.poll = logger_poll,
+	.unlocked_ioctl = logger_ioctl,
+	.compat_ioctl = logger_ioctl,
+	.open = logger_open,
+	.release = logger_release,
+};
+
+/*
+ * Log size must must be a power of two, and greater than
+ * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
+ */
+static int __init create_log(char *log_name, int size)
+{
+	int ret = 0;
+	struct logger_log *log;
+	unsigned char *buffer;
+
+	buffer = vmalloc(size);
+	if (buffer == NULL)
+		return -ENOMEM;
+
+	log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
+	if (log == NULL) {
+		ret = -ENOMEM;
+		goto out_free_buffer;
+	}
+	log->buffer = buffer;
+
+	log->misc.minor = MISC_DYNAMIC_MINOR;
+	log->misc.name = kstrdup(log_name, GFP_KERNEL);
+	if (log->misc.name == NULL) {
+		ret = -ENOMEM;
+		goto out_free_log;
+	}
+
+	log->misc.fops = &logger_fops;
+	log->misc.parent = NULL;
+
+	init_waitqueue_head(&log->wq);
+	INIT_LIST_HEAD(&log->readers);
+	INIT_LIST_HEAD(&log->plugins);
+	mutex_init(&log->mutex);
+	log->w_off = 0;
+	log->head = 0;
+	log->size = size;
+
+	INIT_LIST_HEAD(&log->logs);
+	list_add_tail(&log->logs, &log_list);
+
+	/* finally, initialize the misc device for this log */
+	ret = misc_register(&log->misc);
+	if (unlikely(ret)) {
+		pr_err("failed to register misc device for log '%s'!\n",
+				log->misc.name);
+		goto out_free_log;
+	}
+
+	pr_info("created %luK log '%s'\n",
+		(unsigned long) log->size >> 10, log->misc.name);
+
+	return 0;
+
+out_free_log:
+	kfree(log);
+
+out_free_buffer:
+	vfree(buffer);
+	return ret;
+}
+
+static int __init logger_init(void)
+{
+	int ret;
+
+	ret = create_log(LOGGER_LOG_MAIN, 256*1024);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_log(LOGGER_LOG_RADIO, 256*1024);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
+	if (unlikely(ret))
+		goto out;
+
+out:
+	return ret;
+}
+
+static void __exit logger_exit(void)
+{
+	struct logger_log *current_log, *next_log;
+
+	list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
+		/* we have to delete all the entry inside log_list */
+		misc_deregister(&current_log->misc);
+		vfree(current_log->buffer);
+		kfree(current_log->misc.name);
+		list_del(&current_log->logs);
+		kfree(current_log);
+	}
+}
+
+device_initcall(logger_init);
+module_exit(logger_exit);
+
+#include "logger_kernel.c"
+
+/**
+ * @logger_add_plugin() - adds a plugin to a given log
+ *
+ * @plugin: The @logger_plugin to be added
+ * @name:   The name of the targetted log
+ */
+void logger_add_plugin(struct logger_plugin *plugin, const char *name)
+{
+	struct logger_log *log = get_log_from_name(name);
+
+	if ((plugin == NULL) || (log == NULL))
+		return;
+
+	mutex_lock(&log->mutex);
+	list_add_tail(&plugin->list, &log->plugins);
+	plugin->init(plugin->data);
+	mutex_unlock(&log->mutex);
+}
+EXPORT_SYMBOL(logger_add_plugin);
+
+/**
+ * @logger_remove_plugin() - removes a plugin from a given log
+ *
+ * @plugin: The @logger_plugin to be removed
+ * @name:   The name of the targetted log
+ */
+void logger_remove_plugin(struct logger_plugin *plugin, const char *name)
+{
+	struct logger_log *log = get_log_from_name(name);
+
+	if ((plugin == NULL) || (log == NULL))
+		return;
+
+	mutex_lock(&log->mutex);
+	plugin->exit(plugin->data);
+	list_del(&plugin->list);
+	mutex_unlock(&log->mutex);
+}
+EXPORT_SYMBOL(logger_remove_plugin);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Love, <rlove@google.com>");
+MODULE_DESCRIPTION("Android Logger");
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644
index 0000000..8b0694b
--- /dev/null
+++ b/drivers/staging/android/logger.h
@@ -0,0 +1,123 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * struct user_logger_entry_compat - defines a single entry that is given to a logger
+ * @len:	The length of the payload
+ * @__pad:	Two bytes of padding that appear to be required
+ * @pid:	The generating process' process ID
+ * @tid:	The generating process' thread ID
+ * @sec:	The number of seconds that have elapsed since the Epoch
+ * @nsec:	The number of nanoseconds that have elapsed since @sec
+ * @msg:	The message that is to be logged
+ *
+ * The userspace structure for version 1 of the logger_entry ABI.
+ * This structure is returned to userspace unless the caller requests
+ * an upgrade to a newer ABI version.
+ */
+struct user_logger_entry_compat {
+	__u16		len;
+	__u16		__pad;
+	__s32		pid;
+	__s32		tid;
+	__s32		sec;
+	__s32		nsec;
+	char		msg[0];
+};
+
+/**
+ * struct logger_entry - defines a single entry that is given to a logger
+ * @len:	The length of the payload
+ * @hdr_size:	sizeof(struct logger_entry_v2)
+ * @pid:	The generating process' process ID
+ * @tid:	The generating process' thread ID
+ * @sec:	The number of seconds that have elapsed since the Epoch
+ * @nsec:	The number of nanoseconds that have elapsed since @sec
+ * @euid:	Effective UID of logger
+ * @msg:	The message that is to be logged
+ *
+ * The structure for version 2 of the logger_entry ABI.
+ * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
+ * is called with version >= 2
+ */
+struct logger_entry {
+	__u16		len;
+	__u16		hdr_size;
+	__s32		pid;
+	__s32		tid;
+	__s32		sec;
+	__s32		nsec;
+	kuid_t		euid;
+	char		msg[0];
+};
+
+/**
+ * struct logger_plugin - defines a plugin for a given log, allowing to
+ * export the trace messages to different outputs (e.g. PTI)
+ * @list:      The associated entry in @logger_log's list
+ * @init:      Pointer to an init function that is called when plugin is added
+ * @exit:      Pointer to an exit function that is called when plugin is removed
+ * @write:     Pointer to a write function, used to write a complete msg
+ * @write_seg: Pointer to a write_seg function, used to write a segment of msg
+ * @write_seg_recover: Pointer to a recovery function, called in case of error
+ *                     during writev operation
+ * @data:      Callback data
+ */
+struct logger_plugin {
+	struct list_head list;
+	void (*init) (void * /* callback data */);
+	void (*exit) (void * /* callback data */);
+	void (*write) (unsigned char * /* msg to write */,
+		       unsigned int /* length */,
+		       bool /* from user ? */,
+		       void * /* callback data */);
+	void (*write_seg) (void * /* msg segment to write */,
+			   unsigned int /* length */,
+			   bool /* from user ? */,
+			   bool /* start of msg ? */,
+			   bool /* end of msg ? */,
+			   void * /* callback data*/);
+	void (*write_seg_recover) (void * /* callback data */);
+	void *data;
+};
+
+
+#define LOGGER_LOG_RADIO	"log_radio"	/* radio-related messages */
+#define LOGGER_LOG_EVENTS	"log_events"	/* system/hardware events */
+#define LOGGER_LOG_SYSTEM	"log_system"	/* system/framework messages */
+#define LOGGER_LOG_MAIN	"log_main"	/* everything else */
+
+#define LOGGER_ENTRY_MAX_PAYLOAD	4076
+
+#define __LOGGERIO	0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE		_IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN		_IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN	_IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG		_IO(__LOGGERIO, 4) /* flush log */
+#define LOGGER_GET_VERSION		_IO(__LOGGERIO, 5) /* abi version */
+#define LOGGER_SET_VERSION		_IO(__LOGGERIO, 6) /* abi version */
+
+void logger_add_plugin(struct logger_plugin *plugin, const char *name);
+void logger_remove_plugin(struct logger_plugin *plugin, const char *name);
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/logger_kernel.c b/drivers/staging/android/logger_kernel.c
new file mode 100644
index 0000000..fdc9f6acf
--- /dev/null
+++ b/drivers/staging/android/logger_kernel.c
@@ -0,0 +1,308 @@
+/*
+ * drivers/misc/logger_kernel.c
+ *
+ * A Kernel Logging Subsystem
+ *
+ * Copyright (C) 2013 Intel, Inc.
+ *
+ * This porgram is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/console.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/hardirq.h>
+#include <asm/ioctls.h>
+#include "logger.h"
+#include "logger_kernel.h"
+
+static DEFINE_SPINLOCK(log_lock);
+static struct work_struct write_console_wq;
+
+/*
+ * copy_log_to_log1 - flush the remaining bytes of 'log' into 'log1'
+ *
+ * The caller needs to lock the kernel or/and hold log->mutex
+ */
+void copy_log_to_log1(struct logger_log *log,
+					struct logger_log *log1)
+{
+	struct logger_reader *reader;
+	size_t len, ret;
+
+	list_for_each_entry(reader, &log->readers, list)
+		while (log->w_off != reader->r_off) {
+
+			ret = sizeof(struct logger_entry) +
+				get_entry_msg_len(log, reader->r_off);
+
+			fix_up_readers(log1, ret);
+
+			/*
+			 * We read from the log in two disjoint operations.
+			 * First, we read from the current read head offset
+			 * up to 'count' bytes or to the end of the log,
+			 * whichever comes first.
+			 */
+			len = min(ret, log->size - reader->r_off);
+			do_write_log(log1, log->buffer + reader->r_off, len);
+
+			/*
+			 * Second, we read any remaining bytes, starting back at
+			 * the head of the log.
+			 */
+			if (ret != len)
+				do_write_log(log1, log->buffer, ret - len);
+
+			reader->r_off = logger_offset(log, reader->r_off + ret);
+		}
+}
+
+static void flush_to_bottom_log(struct logger_log *log,
+					const char *buf, unsigned int count)
+{
+	struct logger_entry header;
+	char extendedtag[8] = "\4KERNEL\0";
+	u64 ts;
+	unsigned long rem_nsec;
+	unsigned long flags;
+	struct logger_plugin *plugin;
+	struct timespec boottime;
+
+	ts = local_clock();
+	rem_nsec = do_div(ts, 1000000000);
+
+	getboottime(&boottime);
+
+	header.pid = current->tgid;
+	header.tid = task_pid_nr(current);
+	header.sec = boottime.tv_sec + ts;
+	header.nsec = boottime.tv_nsec + rem_nsec;
+	header.euid = current_euid();
+
+	/* length is computed like this:
+	 * 1 byte for the log priority (harcoded to 4 meaning INFO)
+	 * 6 bytes for the tag string (harcoded to KERNEL)
+	 * 1 byte added at the end of the tag required by logcat
+	 * the length of the buf added into the kernel log buffer
+	 * 1 byte added at the end of the buf required by logcat
+	 */
+	header.len = min_t(size_t, sizeof(extendedtag) + count + 1,
+					LOGGER_ENTRY_MAX_PAYLOAD);
+	header.hdr_size = sizeof(struct logger_entry);
+
+	/* null writes succeed, return zero */
+	if (unlikely(!header.len))
+		return;
+
+	if (oops_in_progress) {
+		if (!spin_trylock_irqsave(&log_lock, flags))
+			return;
+	} else
+		spin_lock_irqsave(&log_lock, flags);
+
+	fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+	do_write_log(log, &header, sizeof(struct logger_entry));
+	do_write_log(log, &extendedtag, sizeof(extendedtag));
+	do_write_log(log, buf, header.len - sizeof(extendedtag) - 1);
+
+	/* send this segment's payload to the plugins */
+	list_for_each_entry(plugin, &log->plugins, list)
+		plugin->write_seg((void *)buf,
+				  header.len - sizeof(extendedtag) - 1,
+				  false, /* not from user */
+				  true,  /* start of msg */
+				  true,  /* end of msg */
+				  plugin->data);
+
+	/* the write offset is updated to add the final extra byte */
+	log->w_off = logger_offset(log, log->w_off + 1);
+
+	spin_unlock_irqrestore(&log_lock, flags);
+};
+
+
+/*
+ * update_log_from_bottom - copy bottom log buffer into a log buffer
+ */
+static void update_log_from_bottom(struct logger_log *log_orig,
+					struct logger_log *log_dst)
+{
+	unsigned long flags;
+
+	mutex_lock(&log_dst->mutex);
+	spin_lock_irqsave(&log_lock, flags);
+
+	copy_log_to_log1(log_orig, log_dst);
+
+	spin_unlock_irqrestore(&log_lock, flags);
+	mutex_unlock(&log_dst->mutex);
+
+	/* wake up any blocked readers */
+	wake_up_interruptible(&log_dst->wq);
+}
+
+/*
+ * write_console - a write method for kernel logs
+ */
+static void write_console(struct work_struct *work)
+{
+	struct logger_log *log_bot = get_log_from_name(LOGGER_LOG_KERNEL_BOT);
+	struct logger_log *log_kernel = get_log_from_name(LOGGER_LOG_KERNEL);
+
+	update_log_from_bottom(log_bot, log_kernel);
+}
+
+static void
+logger_console_write(struct console *console, const char *s, unsigned int count)
+{
+	struct logger_log *log_bot = get_log_from_name(LOGGER_LOG_KERNEL_BOT);
+	struct logger_log *log_kernel = get_log_from_name(LOGGER_LOG_KERNEL);
+
+	if (!log_bot)
+		return;
+
+	flush_to_bottom_log(log_bot, s, count);
+
+	if (unlikely(!log_kernel))
+		return;
+	if (unlikely(!keventd_up()))
+		return;
+	if (!oops_in_progress && !in_nmi())
+		schedule_work(&write_console_wq);
+}
+
+/* logger console uses CON_IGNORELEVEL that provides a way to ignore
+ * the log level set in the kernel command line
+ */
+
+static struct console logger_console = {
+	.name	= "logk",
+	.write	= logger_console_write,
+	.flags	= CON_PRINTBUFFER | CON_IGNORELEVEL,
+	.index	= -1,
+};
+
+/*
+ * Kernel bottom buffer must be allocated in console init phase to ensure
+ * that the first kernel logs will be retrieved when the kernel log device
+ * will be registered. The bottom buffer is not registered
+ */
+static int __init create_console_log(char *log_name, int size)
+{
+	int ret = 0;
+	struct logger_log *log;
+	unsigned char *buffer;
+
+	buffer = vmalloc(size);
+	if (buffer == NULL)
+		return -ENOMEM;
+
+	log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
+	if (log == NULL) {
+		ret = -ENOMEM;
+		goto out_free_buffer;
+	}
+	log->buffer = buffer;
+
+	log->misc.minor = MISC_DYNAMIC_MINOR;
+	log->misc.name = kstrdup(log_name, GFP_KERNEL);
+	if (log->misc.name == NULL) {
+		ret = -ENOMEM;
+		goto out_free_log;
+	}
+
+	init_waitqueue_head(&log->wq);
+	INIT_LIST_HEAD(&log->readers);
+	INIT_LIST_HEAD(&log->plugins);
+	mutex_init(&log->mutex);
+	log->w_off = 0;
+	log->head = 0;
+	log->size = size;
+
+	INIT_LIST_HEAD(&log->logs);
+	list_add_tail(&log->logs, &log_list);
+
+	pr_info("created %luK log '%s'\n",
+		(unsigned long) log->size >> 10, log->misc.name);
+
+	return 0;
+
+out_free_log:
+	kfree(log);
+
+out_free_buffer:
+	vfree(buffer);
+	return ret;
+}
+
+static int init_log_reader(const char *name)
+{
+	struct logger_reader *reader;
+	struct logger_log *log = get_log_from_name(name);
+
+	if (!log)
+		return -ENODEV;
+
+	reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+	if (!reader)
+		return -ENOMEM;
+
+	reader->log = log;
+
+	INIT_LIST_HEAD(&reader->list);
+
+	mutex_lock(&log->mutex);
+	reader->r_off = log->head;
+	list_add_tail(&reader->list, &log->readers);
+	mutex_unlock(&log->mutex);
+
+	return 0;
+}
+
+static int __init logger_console_init(void)
+{
+	int ret;
+
+	INIT_WORK(&write_console_wq, write_console);
+
+	ret = create_console_log(LOGGER_LOG_KERNEL_BOT, 256 * 1024);
+	if (unlikely(ret))
+		goto out;
+
+	ret = init_log_reader(LOGGER_LOG_KERNEL_BOT);
+	if (unlikely(ret))
+		goto out;
+
+	register_console(&logger_console);
+	pr_info("register logcat console\n");
+out:
+	return ret;
+}
+
+console_initcall(logger_console_init);
+
+static int __init logger_kernel_init(void)
+{
+	int ret;
+	if (!(logger_console.flags & CON_ENABLED))
+		return 0;
+
+	ret = create_log(LOGGER_LOG_KERNEL, 256*1024);
+	return ret;
+}
+device_initcall(logger_kernel_init);
diff --git a/drivers/staging/android/logger_kernel.h b/drivers/staging/android/logger_kernel.h
new file mode 100644
index 0000000..45605b8
--- /dev/null
+++ b/drivers/staging/android/logger_kernel.h
@@ -0,0 +1,25 @@
+/*
+ * logger_kernel.h - Kernel Logger Subsytem
+ *
+ *  Copyright (C) Intel 2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_LOGGER_KERNEL_H
+#define _LINUX_LOGGER_KERNEL_H
+
+#include <linux/types.h>
+#include "logger.h"
+
+#define LOGGER_LOG_KERNEL	"log_kernel"   /* kernel-related messages */
+#define LOGGER_LOG_KERNEL_BOT	"log_kern_bot" /* system/hardware events */
+
+#endif /* _LINUX_LOGGER_KERNEL_H */
diff --git a/drivers/staging/android/logger_pti.c b/drivers/staging/android/logger_pti.c
new file mode 100644
index 0000000..2e8a622
--- /dev/null
+++ b/drivers/staging/android/logger_pti.c
@@ -0,0 +1,303 @@
+/*
+ * logger_pti.c - logger messages redirection to PTI
+ *
+ *  Copyright (C) Intel 2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * To active logger to PTI messages, configure 'out' parameter
+ * of 'logger_pti' module in sysfs with one or more values
+ *  # echo "main,system" > /sys/module/logger_pti/parameters/out
+ *
+ * To active logger to PTI messages from boot, add this
+ * commandline parameter to the boot commandline
+ *  logger_pti.out=main,system
+ *
+ * Possible log buffers are : main, system, radio, events, kernel
+ * See logger.h if others.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pti.h>
+#include "logger.h"
+#include "logger_kernel.h"
+
+struct pti_plugin {
+	char *log_name;
+	bool enabled;
+	struct logger_plugin *plugin;
+	struct pti_masterchannel *mc;
+	struct list_head list;
+};
+
+static LIST_HEAD(plugin_list);
+
+/**
+ * @logger_pti_init() - this callback function is called by logger.c
+ * when a plug-in is added (via a call to logger_add_plugin)
+ *
+ * @cb_data: callback data for the plug-in (in our case it is a pointer
+ *           to the pti_plugin structure
+ */
+static void logger_pti_init(void *cb_data)
+{
+	struct pti_plugin *pti_plugin;
+
+	if (unlikely(cb_data == NULL))
+		return;
+
+	/* Channel-ID is reserved at plug-in initialization.
+	 * Each plug-in (associated to a given logger) is associated
+	 * with one channel-ID.
+	 */
+	pti_plugin = (struct pti_plugin *)cb_data;
+	pti_plugin->mc = pti_request_masterchannel(1, pti_plugin->log_name);
+}
+
+/**
+ * @logger_pti_exit() - this callback function is called by logger.c
+ * when a plug-in is removed (via a call to logger_remove_plugin)
+ *
+ * @cb_data: callback data for the plug-in (in our case it is a pointer
+ *           to the pti_plugin structure
+ */
+static void logger_pti_exit(void *cb_data)
+{
+	struct pti_plugin *pti_plugin;
+
+	if (unlikely(cb_data == NULL))
+		return;
+
+	/* Release channel-ID when removing the plug-in */
+	pti_plugin = (struct pti_plugin *)cb_data;
+	pti_release_masterchannel(pti_plugin->mc);
+}
+
+/**
+ * @logger_pti_write_seg() - this callback function is called by logger.c
+ * when writing a segment of message (logger_aio_write)
+ *
+ * @buf:      data to be written (message segment)
+ * @len:      length of the data to be written
+ * @from_usr: true if data is from user-space
+ * @som:      Start Of Message indication
+ * @eom:      End Of Message indication
+ * @cb_data:  callback data for the plug-in (in our case it is a pointer
+ *            to the pti_plugin structure
+ */
+static void logger_pti_write_seg(void *buf, unsigned int len,
+				 bool from_usr, bool som, bool eom,
+				 void *cb_data)
+{
+	struct pti_plugin *pti_plugin = (struct pti_plugin *)cb_data;
+
+	if (unlikely(pti_plugin == NULL))
+		return;
+
+	if (from_usr) {
+		char *tmp_buf = kmalloc(len, GFP_KERNEL);
+		if ((!tmp_buf) || copy_from_user(tmp_buf, buf, len)) {
+			kfree(tmp_buf);
+			return;
+		}
+		pti_writedata(pti_plugin->mc, (u8 *)tmp_buf, len, eom);
+		kfree(tmp_buf);
+	} else
+		pti_writedata(pti_plugin->mc, (u8 *)buf, len, eom);
+}
+
+/**
+ * @logger_pti_write_seg_recover() - this callback function is called
+ * by logger.c when an issue is encountered while writing a segmented
+ * message (logger_aio_write)
+ *
+ * @cb_data: callback data for the plug-in (in our case it is a pointer
+ *           to the pti_plugin structure
+ */
+static void logger_pti_write_seg_recover(void *cb_data)
+{
+	/* An issue has occured in logger_aio_write function.
+	 * To avoid messing up the STP flow, force the End Of Message
+	 * indication by writing a zero byte.
+	 */
+	__u8 data = 0x00;
+	struct pti_plugin *pti_plugin;
+
+	if (unlikely(cb_data == NULL))
+		return;
+
+	pti_plugin = (struct pti_plugin *)cb_data;
+	pti_writedata(pti_plugin->mc, &data, 1, true);
+}
+
+/**
+ * @create_pti_plugin() - creates a @pti_plugin for a given logger
+ *
+ * @name: logger's name
+ */
+static int create_pti_plugin(const char *name)
+{
+	int ret = 0;
+	struct logger_plugin *log_plugin;
+	struct pti_plugin *pti_plugin;
+
+	log_plugin = kzalloc(sizeof(struct logger_plugin), GFP_KERNEL);
+	if (log_plugin == NULL)
+		return -ENOMEM;
+
+	pti_plugin = kzalloc(sizeof(struct pti_plugin), GFP_KERNEL);
+	if (pti_plugin == NULL) {
+		ret = -ENOMEM;
+		goto out_free_log_plugin;
+	}
+
+	pti_plugin->log_name = kstrdup(name, GFP_KERNEL);
+	pti_plugin->enabled = false;
+	pti_plugin->plugin = log_plugin;
+
+	log_plugin->init = logger_pti_init;
+	log_plugin->exit = logger_pti_exit;
+	log_plugin->write_seg = logger_pti_write_seg;
+	log_plugin->write_seg_recover = logger_pti_write_seg_recover;
+	log_plugin->data = (void *)pti_plugin;
+
+	list_add_tail(&pti_plugin->list, &plugin_list);
+
+	return 0;
+
+out_free_log_plugin:
+	kfree(log_plugin);
+	return ret;
+}
+
+static int __init init_logger_pti(void)
+{
+	int ret;
+
+	ret = create_pti_plugin(LOGGER_LOG_RADIO);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_pti_plugin(LOGGER_LOG_EVENTS);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_pti_plugin(LOGGER_LOG_SYSTEM);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_pti_plugin(LOGGER_LOG_MAIN);
+	if (unlikely(ret))
+		goto out;
+
+	ret = create_pti_plugin(LOGGER_LOG_KERNEL_BOT);
+	if (unlikely(ret))
+		goto out;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void __exit exit_logger_pti(void)
+{
+	struct pti_plugin *current_plugin, *next_plugin;
+
+	list_for_each_entry_safe(current_plugin, next_plugin,
+				 &plugin_list, list) {
+		kfree(current_plugin->log_name);
+		kfree(current_plugin->plugin);
+		list_del(&current_plugin->list);
+		kfree(current_plugin);
+	}
+}
+
+module_init(init_logger_pti)
+module_exit(exit_logger_pti)
+
+/*
+ * set_out - 'out' parameter set function from 'logger_pti' module
+ *
+ * called when writing to 'out' parameter from 'logger_pti' module in sysfs
+ */
+static int set_out(const char *val, struct kernel_param *kp)
+{
+	const char *name;
+	struct pti_plugin *plugin;
+
+	list_for_each_entry(plugin, &plugin_list, list) {
+		name = plugin->log_name;
+
+		/* remove "log_" in the log_name string */
+		name += 4;
+
+		/* hack: user asks for "kernel", but the
+		 * plugin is actually associated to "kern_bot" logger
+		 */
+		if (!strcmp(name, "kern_bot"))
+		    name = "kernel";
+
+		if (strstr(val, name)) {
+			if (plugin->enabled == false) {
+				logger_add_plugin(plugin->plugin,
+						  plugin->log_name);
+				plugin->enabled = true;
+			}
+		} else if (plugin->enabled == true) {
+			logger_remove_plugin(plugin->plugin, plugin->log_name);
+			plugin->enabled = false;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * get_out - 'out' parameter get function from 'logger_pti' module
+ *
+ * called when reading 'out' parameter from 'logger_pti' module in sysfs
+ */
+static int get_out(char *buffer, struct kernel_param *kp)
+{
+	const char *name;
+	const char *k = ",";
+	struct pti_plugin *plugin;
+
+	list_for_each_entry(plugin, &plugin_list, list) {
+		if (plugin->enabled == true) {
+			name = plugin->log_name;
+
+			/* remove "log_" in the log_name string */
+			name += 4;
+
+			/* hack: if plugin is associated to "kern_bot" logger,
+			 * user actually wants to see "kernel"
+			 */
+			if (!strcmp(name, "kern_bot"))
+			    name = "kernel";
+
+			strcat(buffer, name);
+			strcat(buffer, k);
+		}
+	}
+	buffer[strlen(buffer)-1] = '\0';
+
+	return strlen(buffer);
+}
+
+module_param_call(out, set_out, get_out, NULL, 0644);
+MODULE_PARM_DESC(out,
+		 "configure logger to pti [main|events|radio|system|kernel]");
+
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 9acb8bf..b84e892 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -122,11 +122,16 @@
 		if (!p)
 			continue;
 
-		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
-		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
-			task_unlock(p);
-			rcu_read_unlock();
-			return 0;
+		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
+			if (time_before_eq(jiffies,
+				lowmem_deathpending_timeout)) {
+				task_unlock(p);
+				rcu_read_unlock();
+				return 0;
+			} else {
+				task_unlock(p);
+				continue;
+			}
 		}
 		oom_score_adj = p->signal->oom_score_adj;
 		if (oom_score_adj < min_score_adj) {
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 61e6249..5eadfc5 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -186,11 +186,11 @@
 
 void sync_pt_free(struct sync_pt *pt)
 {
+	sync_timeline_remove_pt(pt);
+
 	if (pt->parent->ops->free_pt)
 		pt->parent->ops->free_pt(pt);
 
-	sync_timeline_remove_pt(pt);
-
 	kref_put(&pt->parent->kref, sync_timeline_free);
 
 	kfree(pt);
@@ -254,7 +254,6 @@
 static struct sync_fence *sync_fence_alloc(const char *name)
 {
 	struct sync_fence *fence;
-	unsigned long flags;
 
 	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
 	if (fence == NULL)
@@ -274,10 +273,6 @@
 
 	init_waitqueue_head(&fence->wq);
 
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-
 	return fence;
 
 err:
@@ -285,6 +280,15 @@
 	return NULL;
 }
 
+static inline void sync_fence_add_to_list(struct sync_fence *fence)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+}
+
 /* TODO: implement a create which takes more that one sync_pt */
 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
 {
@@ -301,6 +305,7 @@
 	list_add(&pt->pt_list, &fence->pt_list_head);
 	sync_pt_activate(pt);
 
+	sync_fence_add_to_list(fence);
 	/*
 	 * signal the fence in case pt was activated before
 	 * sync_pt_activate(pt) was called
@@ -475,6 +480,7 @@
 		sync_pt_activate(pt);
 	}
 
+	sync_fence_add_to_list(fence);
 	/*
 	 * signal the fence in case one of it's pts were activated before
 	 * they were activated
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644
index 0000000..33ba589
--- /dev/null
+++ b/drivers/staging/android/uapi/android_alarm.h
@@ -0,0 +1,65 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+	/* return code bit numbers or set alarm arg */
+	ANDROID_ALARM_RTC_WAKEUP,
+	ANDROID_ALARM_RTC,
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+	ANDROID_ALARM_ELAPSED_REALTIME,
+	ANDROID_ALARM_SYSTEMTIME,
+	ANDROID_ALARM_POWER_OFF_WAKEUP,
+
+	ANDROID_ALARM_TYPE_COUNT,
+
+	/* return code bit numbers */
+	/* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+	ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+	ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+				1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+	ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+				1U << ANDROID_ALARM_ELAPSED_REALTIME,
+	ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+	ANDROID_ALARM_POWER_OFF_WAKEUP_MASK =
+				1U << ANDROID_ALARM_POWER_OFF_WAKEUP,
+	ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT                  _IO('a', 1)
+
+#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
index 13df42d..dcce503 100644
--- a/drivers/staging/android/uapi/ashmem.h
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -36,9 +36,9 @@
 
 #define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
 #define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, __u32)
 #define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, __u32)
 #define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6)
 #define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin)
 #define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin)
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 35641e5..8fa64d9 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1960,6 +1960,7 @@
 
 		BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
 
+		memset(&DevInfo, 0, sizeof(DevInfo));
 		DevInfo.MaxRDMBufferSize = BUFFER_4K;
 		DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
 		DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 924c54c..0ae406a 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1401,22 +1401,19 @@
 		DPRINTK("subdevice busy\n");
 		return -EBUSY;
 	}
-	s->busy = file;
 
 	/* make sure channel/gain list isn't too long */
 	if (cmd.chanlist_len > s->len_chanlist) {
 		DPRINTK("channel/gain list too long %u > %d\n",
 			cmd.chanlist_len, s->len_chanlist);
-		ret = -EINVAL;
-		goto cleanup;
+		return -EINVAL;
 	}
 
 	/* make sure channel/gain list isn't too short */
 	if (cmd.chanlist_len < 1) {
 		DPRINTK("channel/gain list too short %u < 1\n",
 			cmd.chanlist_len);
-		ret = -EINVAL;
-		goto cleanup;
+		return -EINVAL;
 	}
 
 	async->cmd = cmd;
@@ -1426,8 +1423,7 @@
 	    kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
 	if (!async->cmd.chanlist) {
 		DPRINTK("allocation failed\n");
-		ret = -ENOMEM;
-		goto cleanup;
+		return -ENOMEM;
 	}
 
 	if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1479,6 +1475,9 @@
 
 	comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
 
+	/* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
+	 * comedi_read() or comedi_write() */
+	s->busy = file;
 	ret = s->do_cmd(dev, s);
 	if (ret == 0)
 		return 0;
@@ -1693,6 +1692,7 @@
 			   void *file)
 {
 	struct comedi_subdevice *s;
+	int ret;
 
 	if (arg >= dev->n_subdevices)
 		return -EINVAL;
@@ -1709,7 +1709,11 @@
 	if (s->busy != file)
 		return -EBUSY;
 
-	return do_cancel(dev, s);
+	ret = do_cancel(dev, s);
+	if (comedi_get_subdevice_runflags(s) & SRF_USER)
+		wake_up_interruptible(&s->async->wait_head);
+
+	return ret;
 }
 
 /*
@@ -2041,11 +2045,13 @@
 
 		if (!comedi_is_subdevice_running(s)) {
 			if (count == 0) {
+				mutex_lock(&dev->mutex);
 				if (comedi_is_subdevice_in_error(s))
 					retval = -EPIPE;
 				else
 					retval = 0;
 				do_become_nonbusy(dev, s);
+				mutex_unlock(&dev->mutex);
 			}
 			break;
 		}
@@ -2144,11 +2150,13 @@
 
 		if (n == 0) {
 			if (!comedi_is_subdevice_running(s)) {
+				mutex_lock(&dev->mutex);
 				do_become_nonbusy(dev, s);
 				if (comedi_is_subdevice_in_error(s))
 					retval = -EPIPE;
 				else
 					retval = 0;
+				mutex_unlock(&dev->mutex);
 				break;
 			}
 			if (file->f_flags & O_NONBLOCK) {
@@ -2186,9 +2194,11 @@
 		buf += n;
 		break;		/* makes device work like a pipe */
 	}
-	if (comedi_is_subdevice_idle(s) &&
-	    async->buf_read_count - async->buf_write_count == 0) {
-		do_become_nonbusy(dev, s);
+	if (comedi_is_subdevice_idle(s)) {
+		mutex_lock(&dev->mutex);
+		if (async->buf_read_count - async->buf_write_count == 0)
+			do_become_nonbusy(dev, s);
+		mutex_unlock(&dev->mutex);
 	}
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(&async->wait_head, &wait);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 06d190f..4a2b042 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -464,7 +464,7 @@
 		ret = comedi_device_postconfig(dev);
 	if (ret < 0) {
 		comedi_device_detach(dev);
-		module_put(dev->driver->module);
+		module_put(driv->module);
 	}
 	/* On success, the driver module count has been incremented. */
 	return ret;
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 90f2de9..f4c1e99 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -269,8 +269,9 @@
 			}					\
 			udelay(5);				\
 		}						\
-		if (_i)						\
+		if (_i) {					\
 			b					\
+		}						\
 	} while (0)
 
 static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3f71f0f..05eb6fe 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -383,28 +383,23 @@
 {
 	const struct ni_65xx_board *board = comedi_board(dev);
 	struct ni_65xx_private *devpriv = dev->private;
-	unsigned base_bitfield_channel;
-	const unsigned max_ports_per_bitfield = 5;
+	int base_bitfield_channel;
 	unsigned read_bits = 0;
-	unsigned j;
+	int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
+	int port_offset;
 
 	base_bitfield_channel = CR_CHAN(insn->chanspec);
-	for (j = 0; j < max_ports_per_bitfield; ++j) {
-		const unsigned port_offset =
-			ni_65xx_port_by_channel(base_bitfield_channel) + j;
-		const unsigned port =
-			sprivate(s)->base_port + port_offset;
-		unsigned base_port_channel;
+	for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
+	     port_offset <= last_port_offset; port_offset++) {
+		unsigned port = sprivate(s)->base_port + port_offset;
+		int base_port_channel = port_offset * ni_65xx_channels_per_port;
 		unsigned port_mask, port_data, port_read_bits;
-		int bitshift;
-		if (port >= ni_65xx_total_num_ports(board))
+		int bitshift = base_port_channel - base_bitfield_channel;
+
+		if (bitshift >= 32)
 			break;
-		base_port_channel = port_offset * ni_65xx_channels_per_port;
 		port_mask = data[0];
 		port_data = data[1];
-		bitshift = base_port_channel - base_bitfield_channel;
-		if (bitshift >= 32 || bitshift <= -32)
-			break;
 		if (bitshift > 0) {
 			port_mask >>= bitshift;
 			port_data >>= bitshift;
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 163c638..972a072 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -234,7 +234,6 @@
 {
 	struct mxs_lradc *lradc = iio_priv(iio_dev);
 	int ret;
-	unsigned long mask;
 
 	if (m != IIO_CHAN_INFO_RAW)
 		return -EINVAL;
@@ -243,12 +242,6 @@
 	if (chan->channel > LRADC_MAX_TOTAL_CHANS)
 		return -EINVAL;
 
-	/* Validate the channel if it doesn't intersect with reserved chans. */
-	bitmap_set(&mask, chan->channel, 1);
-	ret = iio_validate_scan_mask_onehot(iio_dev, &mask);
-	if (ret)
-		return -EINVAL;
-
 	/*
 	 * See if there is no buffered operation in progess. If there is, simply
 	 * bail out. This can be improved to support both buffered and raw IO at
@@ -661,12 +654,13 @@
 {
 	int ret;
 	struct iio_trigger *trig;
+	struct mxs_lradc *lradc = iio_priv(iio);
 
 	trig = iio_trigger_alloc("%s-dev%i", iio->name, iio->id);
 	if (trig == NULL)
 		return -ENOMEM;
 
-	trig->dev.parent = iio->dev.parent;
+	trig->dev.parent = lradc->dev;
 	iio_trigger_set_drvdata(trig, iio);
 	trig->ops = &mxs_lradc_trigger_ops;
 
@@ -676,15 +670,17 @@
 		return ret;
 	}
 
-	iio->trig = trig;
+	lradc->trig = trig;
 
 	return 0;
 }
 
 static void mxs_lradc_trigger_remove(struct iio_dev *iio)
 {
-	iio_trigger_unregister(iio->trig);
-	iio_trigger_free(iio->trig);
+	struct mxs_lradc *lradc = iio_priv(iio);
+
+	iio_trigger_unregister(lradc->trig);
+	iio_trigger_free(lradc->trig);
 }
 
 static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6455305..a532ca5 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -681,6 +681,7 @@
 
 	return i;
 }
+EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
 
 /*
  * imx_drm_remove_encoder - remove an encoder
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 02f77d7..a7856ba 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -385,8 +385,11 @@
 */
 static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
 {
-	if (substream->runtime && snd_pcm_running(substream))
+	if (substream->runtime && snd_pcm_running(substream)) {
+		snd_pcm_stream_lock_irq(substream);
 		snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+		snd_pcm_stream_unlock_irq(substream);
+	}
 }
 
 /*
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 27d0666..224ccff 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -153,6 +153,9 @@
 	struct oz_app_hdr *app_hdr;
 	struct oz_serial_ctx *ctx;
 
+	if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+		return -EINVAL;
+
 	spin_lock_bh(&g_cdev.lock);
 	pd = g_cdev.active_pd;
 	if (pd)
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index cd94f6c..b90e96b 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1063,7 +1063,7 @@
 
 static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
 {
-	struct serial_icounter_struct icount;
+	struct serial_icounter_struct icount = {};
 	struct sb_uart_icount cnow;
 	struct sb_uart_port *port = state->port;
 
diff --git a/drivers/staging/sep54/Kconfig b/drivers/staging/sep54/Kconfig
new file mode 100644
index 0000000..d502943
--- /dev/null
+++ b/drivers/staging/sep54/Kconfig
@@ -0,0 +1,13 @@
+config DX_SEP54
+	tristate "Discretix SEP driver (CC54)"
+	depends on PCI && MMC
+	select CRYPTO_BLKCIPHER
+	help
+	  Discretix SEP driver for CC54; used for the security processor subsystem
+	  on board the Intel Mobile Internet Device and adds SEP availability
+	  to the kernel crypto infrastructure
+
+	  The driver's name is sep_driver.
+
+	  If unsure, select N.
+
diff --git a/drivers/staging/sep54/Makefile b/drivers/staging/sep54/Makefile
new file mode 100644
index 0000000..beac089
--- /dev/null
+++ b/drivers/staging/sep54/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_DX_SEP54) += sep54.o
+sep54-objs := dx_driver.o sep_init.o crypto_ctx_mgr.o sep_sysfs.o \
+                   desc_mgr.o lli_mgr.o crypto_api.o sep_request_mgr.o \
+                   sep_power.o sepapp.o crypto_hwk.o sepfs.o
+
+ifeq ($(CONFIG_COMPAT),y)
+	sep54-objs += sep_compat_ioctl.o
+endif
+
+ccflags-y += -DSEP_SUPPORT_SHA=256 -DCONFIG_NOT_COHERENT_CACHE -DSEP_HWK_UNIT_TEST
+
+ifeq ($(CONFIG_PM_RUNTIME),y)
+	ccflags-y += -DSEP_RUNTIME_PM
+endif
diff --git a/drivers/staging/sep54/crypto_api.c b/drivers/staging/sep54/crypto_api.c
new file mode 100644
index 0000000..9a3bc7f
--- /dev/null
+++ b/drivers/staging/sep54/crypto_api.c
@@ -0,0 +1,1527 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/* \file cryto_api.c - Implementation of wrappers for Linux Crypto API */
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_CRYPTO_API
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/workqueue.h>
+#include "dx_driver_abi.h"
+#include "dx_driver.h"
+#include "sep_power.h"
+#include "crypto_ctx_mgr.h"
+#include "crypto_api.h"
+#include "sepapp.h"
+#include "sep_applets.h"
+#include "dx_sepapp_kapi.h"
+
+#include <linux/sched.h>
+
+#define CRYPTO_API_QID 0
+/* Priority assigned to our algorithms implementation */
+#define DX_CRYPTO_PRIO (300 + (100 * CRYPTO_API_QID))
+
+#define SYMCIPHER_ALG_NAME_LEN 8	/* Format: "mod(alg)" */
+#define SYMCIPHER_ALG_NAME_MODE_OFFSET 0
+#define SYMCIPHER_ALG_NAME_MODE_SIZE 3
+#define SYMCIPHER_ALG_NAME_ALG_OFFSET 4
+#define SYMCIPHER_ALG_NAME_ALG_SIZE 3
+
+#define CMD_DO_CRYPTO 7
+#define DISK_ENC_APP_UUID "INTEL DISK ENC01"
+
+/**
+ * struct async_digest_req_ctx - Context for async. digest algorithms requests
+ * @host_ctx:	Host crypto context allocated per request
+ * @result:	Where to copy the digest result.
+ *		When NULL the result is retained in the sep_ctx until "final"
+ *		and this field holds the pointer to its location.
+ * @async_req:	The generic async request context for completion notification
+ */
+struct async_digest_req_ctx {
+	union {
+		struct host_crypto_ctx_hash hash_ctx;
+		struct host_crypto_ctx_mac mac_ctx;
+	} host_ctx;
+	u8 *result;
+	struct async_req_ctx async_req;
+};
+
+/* Client context for the Crypto API operations */
+/* To be initialized by sep_setup */
+static struct sep_client_ctx crypto_api_ctx;
+static struct dma_pool *sep_ctx_pool;
+
+/* Functions from the main driver code that are shared with this module */
+int prepare_data_for_sep(struct sep_op_ctx *op_ctx,
+			 u8 __user *data_in,
+			 struct scatterlist *sgl_in,
+			 u8 __user *data_out,
+			 struct scatterlist *sgl_out,
+			 u32 data_in_size,
+			 enum crypto_data_intent data_intent);
+
+/* Local (static) functions */
+static void release_symcipher_ctx(struct sep_op_ctx *op_ctx,
+				  u8 *iv_crypto);
+
+/****************************************/
+/* Block cipher algorithms declarations */
+/****************************************/
+static int symcipher_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen);
+static int symcipher_encrypt(struct ablkcipher_request *req);
+static int symcipher_decrypt(struct ablkcipher_request *req);
+static int symcipher_ctx_init(struct crypto_tfm *tfm);
+static void crypto_ctx_cleanup(struct crypto_tfm *tfm);
+
+/* Template for block ciphers */
+static struct crypto_alg blkcipher_algs_base = {
+	.cra_priority = DX_CRYPTO_PRIO,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_ctxsize = sizeof(struct host_crypto_ctx_sym_cipher),
+	.cra_alignmask = 0,	/* Cannot use this due to bug in kernel */
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_u = {
+		  .ablkcipher = {
+				 .setkey = symcipher_set_key,
+				 .encrypt = symcipher_encrypt,
+				 .decrypt = symcipher_decrypt}
+		  },
+	.cra_init = symcipher_ctx_init,
+	.cra_exit = crypto_ctx_cleanup
+};
+
+/* Block cipher specific attributes */
+static struct crypto_alg dx_ablkcipher_algs[] = {
+	{			/* xxx(aes) */
+	 .cra_name = "xxx(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-xxx",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+#ifdef USE_SEP54_AES
+	{			/* ecb(aes) */
+	 .cra_name = "ecb(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-ecb",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(aes) */
+	 .cra_name = "cbc(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-cbc",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* ctr(aes) */
+	 .cra_name = "ctr(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-ctr",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* xts(aes) */
+	 .cra_name = "xts(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-xts",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+			/* AES-XTS uses two keys, so the key size is doubled */
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE * 2,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE * 2,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+#endif /* USE_SEP54_AES */
+	{			/* ecb(des) */
+	 .cra_name = "ecb(des)",
+	 .cra_driver_name = MODULE_NAME "-des-ecb",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .max_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(des) */
+	 .cra_name = "cbc(des)",
+	 .cra_driver_name = MODULE_NAME "-des-cbc",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .max_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* ecb(des3_ede) */
+	 .cra_name = "ecb(des3_ede)",
+	 .cra_driver_name = MODULE_NAME "-des3-ecb",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .max_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(des3_ede) */
+	 .cra_name = "cbc(des3_ede)",
+	 .cra_driver_name = MODULE_NAME "-des3-cbc",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .max_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 }
+};				/* ablkcipher_algs[] */
+
+#define DX_ABLKCIPHER_NUM \
+	(sizeof(dx_ablkcipher_algs) / sizeof(struct crypto_alg))
+
+static const enum dxdi_sym_cipher_type dx_algs_cipher_types[] = {
+	DXDI_SYMCIPHER_AES_XXX,
+#ifdef USE_SEP54_AES
+	DXDI_SYMCIPHER_AES_ECB,
+	DXDI_SYMCIPHER_AES_CBC,
+	DXDI_SYMCIPHER_AES_CTR,
+	DXDI_SYMCIPHER_AES_XTS,
+#endif
+	DXDI_SYMCIPHER_DES_ECB,
+	DXDI_SYMCIPHER_DES_CBC,
+	DXDI_SYMCIPHER_DES_ECB,
+	DXDI_SYMCIPHER_DES_CBC,
+};
+
+/*********************************************/
+/* Digest (hash/MAC) algorithms declarations */
+/*********************************************/
+static int digest_tfm_init(struct crypto_tfm *tfm);
+static int digest_init(struct ahash_request *req);
+static int digest_update(struct ahash_request *req);
+static int digest_final(struct ahash_request *req);
+static int digest_finup(struct ahash_request *req);
+static int digest_integrated(struct ahash_request *req);
+static int mac_setkey(struct crypto_ahash *tfm,
+		      const u8 *key,
+		      unsigned int keylen) __attribute__((unused));
+
+/* Save set key in tfm ctx */
+struct mac_key_data {
+	u32 key_size;	/* In octets */
+	u8 key[DXDI_MAC_KEY_SIZE_MAX];
+};
+
+/* Description of a digest (hash/MAC) algorithm */
+struct dx_digest_alg {
+	enum dxdi_hash_type hash_type;
+	enum dxdi_mac_type mac_type;
+	struct ahash_alg ahash;
+};
+
+/* Common attributes for all the digest (hash/MAC) algorithms */
+static struct ahash_alg digest_algs_base = {
+	.init = digest_init,
+	.update = digest_update,
+	.final = digest_final,
+	.finup = digest_finup,
+	.digest = digest_integrated,
+	.halg.base = {
+		      .cra_type = &crypto_ahash_type,
+		      .cra_priority = DX_CRYPTO_PRIO,
+		      .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+		      .cra_alignmask = 0,
+		      .cra_module = THIS_MODULE,
+		      .cra_init = digest_tfm_init}
+};
+
+/* Algorithm specific attributes */
+static struct dx_digest_alg dx_digest_algs[] = {
+#ifdef USE_SEP54_AHASH
+	{			/* sha1 */
+	 .hash_type = DXDI_HASH_SHA1,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha1",
+				 .cra_driver_name = MODULE_NAME "-sha1",
+				 .cra_blocksize = SHA1_BLOCK_SIZE},
+		   .halg.digestsize = SHA1_DIGEST_SIZE,
+		   .halg.statesize = SHA1_BLOCK_SIZE}
+	 },
+	{			/* sha224 */
+	 .hash_type = DXDI_HASH_SHA224,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha224",
+				 .cra_driver_name = MODULE_NAME "-sha224",
+				 .cra_blocksize = SHA224_BLOCK_SIZE},
+		   .halg.digestsize = SHA224_DIGEST_SIZE,
+		   .halg.statesize = SHA224_BLOCK_SIZE}
+	 },
+	{			/* sha256 */
+	 .hash_type = DXDI_HASH_SHA256,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha256",
+				 .cra_driver_name = MODULE_NAME "-sha256",
+				 .cra_blocksize = SHA256_BLOCK_SIZE},
+		   .halg.digestsize = SHA256_DIGEST_SIZE,
+		   .halg.statesize = SHA256_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha1) */
+	 .hash_type = DXDI_HASH_SHA1,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha1)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha1",
+				 .cra_blocksize = SHA1_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA1_DIGEST_SIZE,
+		   .halg.statesize = SHA1_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha224) */
+	 .hash_type = DXDI_HASH_SHA224,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha224)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha224",
+				 .cra_blocksize = SHA224_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA224_DIGEST_SIZE,
+		   .halg.statesize = SHA224_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha256) */
+	 .hash_type = DXDI_HASH_SHA256,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha256)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha256",
+				 .cra_blocksize = SHA256_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA256_DIGEST_SIZE,
+		   .halg.statesize = SHA256_BLOCK_SIZE}
+	 },
+#ifdef USE_SEP54_AES
+	{			/* xcbc(aes) */
+	 .hash_type = DXDI_HASH_NONE,
+	 .mac_type = DXDI_MAC_AES_XCBC_MAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "xcbc(aes)",
+				 .cra_driver_name = MODULE_NAME "-aes-xcbc",
+				 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SEP_AES_BLOCK_SIZE,
+		   .halg.statesize = SEP_AES_BLOCK_SIZE}
+	 },
+	{			/* cmac(aes) */
+	 .hash_type = DXDI_HASH_NONE,
+	 .mac_type = DXDI_MAC_AES_CMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "cmac(aes)",
+				 .cra_driver_name = MODULE_NAME "-aes-cmac",
+				 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SEP_AES_BLOCK_SIZE,
+		   .halg.statesize = SEP_AES_BLOCK_SIZE}
+	 }
+#endif /* USE_SEP54_AES */
+#endif /* USE_SEP54_AHASH */
+};				/*dx_ahash_algs[] */
+
+#define DX_DIGEST_NUM \
+	(sizeof(dx_digest_algs) / sizeof(struct dx_digest_alg))
+
+static void crypto_ctx_cleanup(struct crypto_tfm *tfm)
+{
+	struct host_crypto_ctx *host_ctx_p = crypto_tfm_ctx(tfm);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	int rc;
+
+	pr_debug("Cleaning context @%p for %s\n",
+		      host_ctx_p, crypto_tfm_alg_name(tfm));
+
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, host_ctx_p->alg_class,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping context @%p (rc=%d)\n",
+			    host_ctx_p, rc);
+		return;
+	}
+
+	/* New TEE method */
+	if (!memcmp(crypto_tfm_alg_name(tfm), "xxx(aes)", 8)) {
+		if (dx_sepapp_session_close(host_ctx_p->sctx,
+						host_ctx_p->sess_id))
+			BUG(); /* TODO */
+		dx_sepapp_context_free(host_ctx_p->sctx);
+	}
+
+	ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+}
+
+/**
+ * dispatch_crypto_op() - Dispatch (async.) CRYPTO_OP descriptor operation
+ * @op_ctx:		Operation context
+ * @may_backlog:	If software queue is full, may be put in backlog queue
+ * @do_init:		Initialize given crypto context
+ * @proc_mode:		Processing mode code
+ * @keep_in_cache:	Retain crypto context in cache after dispatching req.
+ *
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+static int dispatch_crypto_op(struct sep_op_ctx *op_ctx, bool may_backlog,
+			      bool do_init, enum sep_proc_mode proc_mode,
+			      bool keep_in_cache)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	int sep_ctx_load_req;
+	struct crypto_ctx_uid ctx_id = ctxmgr_get_ctx_id(ctx_info);
+	int rc;
+	struct sep_sw_desc desc;
+
+	/* Start critical section -
+	   cache allocation must be coupled to descriptor enqueue */
+	mutex_lock(&drvdata->desc_queue_sequencer);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	ctxmgr_set_sep_cache_idx(ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+							ctx_id,
+							&sep_ctx_load_req));
+	desc_q_pack_crypto_op_desc(&desc, op_ctx, sep_ctx_load_req, do_init,
+				   proc_mode);
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, may_backlog);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc)))
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	if ((!keep_in_cache) || unlikely(IS_DESCQ_ENQUEUE_ERR(rc)))
+		ctxmgr_sep_cache_invalidate(drvdata->sep_cache, ctx_id,
+					    CRYPTO_CTX_ID_SINGLE_MASK);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+	return rc;
+}
+
+/**
+ * process_digest_fin() - Process finilization event for hash operation
+ *
+ * @digest_req:	The async. digest request context
+ *
+ */
+static void process_digest_fin(struct async_digest_req_ctx *digest_req)
+{
+	u8 digest_size;
+	u8 *digest_ptr;
+	struct sep_op_ctx *op_ctx = &digest_req->async_req.op_ctx;
+#ifdef DEBUG
+	struct crypto_ahash *ahash_tfm;
+#endif
+
+	if (op_ctx->op_type == SEP_OP_CRYPTO_FINI) {
+		/* Handle digest copy back to "result" */
+		digest_size =
+		    ctxmgr_get_digest_or_mac_ptr(&op_ctx->ctx_info,
+						 &digest_ptr);
+		if (unlikely(digest_ptr == NULL)) {
+			pr_err("Failed fetching digest/MAC\n");
+			return;
+		}
+		if (digest_req->result != NULL)
+			memcpy(digest_req->result, digest_ptr, digest_size);
+		else	/* Save pointer to result (to be copied on "final") */
+			digest_req->result = digest_ptr;
+#ifdef DEBUG
+		dump_byte_array("digest", digest_req->result, digest_size);
+		ahash_tfm =
+		    crypto_ahash_reqtfm(container_of
+					(digest_req->async_req.initiating_req,
+					 struct ahash_request, base));
+		if (digest_size != crypto_ahash_digestsize(ahash_tfm))
+			pr_err("Read digest of %u B. Expected %u B.\n",
+				    digest_size,
+				    crypto_ahash_digestsize(ahash_tfm));
+#endif
+	}
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_kernel_ctx(&op_ctx->ctx_info);
+}
+
+static void dx_crypto_api_handle_op_completion(struct work_struct *work)
+{
+	struct async_req_ctx *areq_ctx =
+	    container_of(work, struct async_req_ctx, comp_work);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct crypto_async_request *initiating_req = areq_ctx->initiating_req;
+	int err = 0;
+	u8 *req_info_p;/* For state persistency in caller's context (IV) */
+
+	pr_debug("req=%p op_ctx=%p\n", initiating_req, op_ctx);
+	if (op_ctx == NULL) {
+		pr_err("Invalid work context (%p)\n", work);
+		return;
+	}
+
+	if (op_ctx->op_state == USER_OP_COMPLETED) {
+
+		if (unlikely(op_ctx->error_info != 0)) {
+			pr_err("SeP crypto-op failed (sep_rc=0x%08X)\n",
+				    op_ctx->error_info);
+		}
+		switch (crypto_tfm_alg_type(initiating_req->tfm)) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			/* Resolve to "info" (IV, etc.) for given alg_type */
+			crypto_op_completion_cleanup(op_ctx);
+			ablkcipher_req = (struct ablkcipher_request *)
+			    container_of(initiating_req,
+					 struct ablkcipher_request, base);
+			req_info_p = ablkcipher_req->info;
+			release_symcipher_ctx(op_ctx, req_info_p);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			process_digest_fin(container_of(areq_ctx,
+					struct async_digest_req_ctx,
+					async_req));
+			break;
+		default:
+			pr_err("Unsupported alg_type (%d)\n",
+				    crypto_tfm_alg_type(initiating_req->tfm));
+		}
+		/* Save ret_code info before cleaning op_ctx */
+		err = -(op_ctx->error_info);
+		if (unlikely(err == -EINPROGRESS)) {
+			/* SeP error code collides with EINPROGRESS */
+			pr_err("Invalid SeP error code 0x%08X\n",
+				    op_ctx->error_info);
+			err = -EINVAL;	/* fallback */
+		}
+		op_ctx_fini(op_ctx);
+	} else if (op_ctx->op_state == USER_OP_INPROC) {
+		/* Report with the callback the dispatch from backlog to
+		   the actual processing in the SW descriptors queue
+		   (Returned -EBUSY when the request was dispatched) */
+		err = -EINPROGRESS;
+	} else {
+		pr_err("Invalid state (%d) for op_ctx %p\n",
+			    op_ctx->op_state, op_ctx);
+		BUG();
+	}
+	if (likely(initiating_req->complete != NULL))
+		initiating_req->complete(initiating_req, err);
+	else
+		pr_err("Async. operation has no completion callback.\n");
+}
+
+/****************************************************/
+/* Block cipher algorithms                          */
+/****************************************************/
+
+/**
+ * get_symcipher_tfm_cipher_type() - Get cipher type of given symcipher
+ *					transform
+ * @tfm:
+ *
+ * Returns enum dxdi_sym_cipher_type (DXDI_SYMCIPHER_NONE for invalid)
+ */
+static enum dxdi_sym_cipher_type get_symcipher_tfm_cipher_type(struct crypto_tfm
+							       *tfm)
+{
+	const int alg_index = tfm->__crt_alg - dx_ablkcipher_algs;
+
+	if ((alg_index < 0) || (alg_index >= DX_ABLKCIPHER_NUM)) {
+		pr_err("Unknown alg: %s\n", crypto_tfm_alg_name(tfm));
+		return DXDI_SYMCIPHER_NONE;
+	}
+
+	return dx_algs_cipher_types[alg_index];
+}
+
+static int symcipher_ctx_init(struct crypto_tfm *tfm)
+{
+	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
+	struct host_crypto_ctx_sym_cipher *host_ctx_p = crypto_tfm_ctx(tfm);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	enum dxdi_sym_cipher_type cipher_type =
+	    get_symcipher_tfm_cipher_type(tfm);
+	int rc;
+
+	pr_debug("Initializing context @%p for %s (%d)\n",
+		      host_ctx_p, crypto_tfm_alg_name(tfm), cipher_type);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	ablktfm->reqsize += sizeof(struct async_req_ctx);
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+#ifdef SEP_RUNTIME_PM
+		dx_sep_pm_runtime_put();
+#endif
+		return rc;
+	}
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(ctx_info, alloc_crypto_ctx_id(&crypto_api_ctx));
+	rc = ctxmgr_init_symcipher_ctx_no_props(ctx_info, cipher_type);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed initializing context\n");
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+	} else {
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_PARTIAL_INIT);
+	}
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+
+	/* New TEE method */
+	if (!memcmp(crypto_tfm_alg_name(tfm), "xxx(aes)", 8)) {
+		u8 uuid[16] = DISK_ENC_APP_UUID;
+		enum dxdi_sep_module ret_origin;
+
+		host_ctx_p->sctx = dx_sepapp_context_alloc();
+		if (unlikely(!host_ctx_p->sctx)) {
+			rc = -ENOMEM;
+			goto init_end;
+		}
+
+		rc = dx_sepapp_session_open(host_ctx_p->sctx,
+				uuid, 0, NULL, NULL, &host_ctx_p->sess_id,
+				&ret_origin);
+		if (unlikely(rc != 0))
+			dx_sepapp_context_free(host_ctx_p->sctx);
+	}
+
+init_end:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	return rc;
+}
+
+/**
+ * symcipher_set_key() - Set key for given symmetric cipher context
+ * @tfm:
+ * @key:
+ * @keylen:
+ *
+ * Set key for given symmetric cipher context
+ * Setting a key implies initialization of the context
+ * Returns int
+ */
+static int symcipher_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	enum dxdi_sym_cipher_type cipher_type =
+	    get_symcipher_tfm_cipher_type(crypto_ablkcipher_tfm(tfm));
+	u32 tfm_flags = crypto_ablkcipher_get_flags(tfm);
+	int rc;
+
+	if (cipher_type == DXDI_SYMCIPHER_NONE)
+		return -EINVAL;
+
+	if (keylen > DXDI_SYM_KEY_SIZE_MAX) {
+		pr_err("keylen=%u > %u\n", keylen, DXDI_SYM_KEY_SIZE_MAX);
+		tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		crypto_ablkcipher_set_flags(tfm, tfm_flags);
+		return -EINVAL;
+	}
+
+	pr_debug("alg=%s (%d) , keylen=%u\n",
+		      crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+		      cipher_type, keylen);
+
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+		return rc;
+	}
+
+	if (ctxmgr_get_ctx_state(ctx_info) == CTX_STATE_UNINITIALIZED) {
+		pr_err("Invoked for uninitialized context @%p\n",
+			    host_ctx_p);
+		rc = -EINVAL;
+	} else {		/* Modify algorithm key */
+		rc = ctxmgr_set_symcipher_key(ctx_info, keylen, key);
+		if (rc != 0) {
+			if (rc == -EINVAL) {
+				pr_info("Invalid keylen=%u\n", keylen);
+				tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			} else if (rc == -EPERM) {
+				pr_info("Invalid/weak key\n");
+				tfm_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			} else {
+				pr_err("Unknown key setting error (%d)\n",
+					    rc);
+			}
+		}
+	}
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	crypto_ablkcipher_set_flags(tfm, tfm_flags);
+	return rc;
+}
+
+/**
+ * prepare_symcipher_ctx_for_processing() - Prepare crypto context resources
+ *						before dispatching an operation
+ *
+ * @op_ctx:	The associate operation context (from async req ctx)
+ * @host_ctx_p:	The host context to use (from tfm)
+ * @iv_crypto:	A pointer to IV (from req->info)
+ * @direction:	Requested cipher direction
+ */
+static int prepare_symcipher_ctx_for_processing(struct sep_op_ctx *op_ctx,
+						struct
+						host_crypto_ctx_sym_cipher
+						*host_ctx_p,
+						u8 *iv_crypto,
+						enum dxdi_cipher_direction
+						direction)
+{
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct sep_ctx_cache_entry *sep_ctx_p;
+	dma_addr_t sep_ctx_dma_addr;
+	int rc;
+
+	sep_ctx_p = dma_pool_alloc(sep_ctx_pool, GFP_KERNEL, &sep_ctx_dma_addr);
+	if (sep_ctx_p == NULL) {
+		pr_err("Failed allocating SeP context buffer\n");
+		return -ENOMEM;
+	}
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev,
+				   ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p,
+				   sep_ctx_p, sep_ctx_dma_addr);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+	} else {
+		ctxmgr_set_symcipher_iv(ctx_info, iv_crypto);
+		rc = ctxmgr_set_symcipher_direction(ctx_info, direction);
+		if (unlikely(rc != 0)) {
+			pr_err("Failed setting direction %d (rc=%d)\n",
+				    direction, rc);
+		}
+	}
+
+	if (unlikely(rc != 0)) {
+		/* Invalidate context on error */
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+		ctxmgr_unmap_kernel_ctx(ctx_info);
+		dma_pool_free(sep_ctx_pool, sep_ctx_p, sep_ctx_dma_addr);
+#ifdef DEBUG
+	} else {		/* Context was changed by host */
+		ctxmgr_dump_sep_ctx(ctx_info);
+#endif
+		/* No need to dma_sync - sep_ctx is dma coherent mem. */
+	}
+
+	return rc;
+}
+
+/**
+ * release_symcipher_ctx() - Sync. back crypto context (IV) to desc->info)
+ *				to be able to track
+ * @op_ctx:	The associated operation context (from async req ctx)
+ * @iv_crypto:	The Crypto API IV buffer (req->info)
+ *
+ * Sync. back crypto context (IV) to desc->info) to be able to track
+ * IV changes, then unmap the context.
+ */
+static void release_symcipher_ctx(struct sep_op_ctx *op_ctx,
+				  u8 *iv_crypto)
+{
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	u8 iv_sep_ctx[DXDI_AES_BLOCK_SIZE];
+	u8 iv_size = DXDI_AES_BLOCK_SIZE;	/* Init. to max. */
+	struct sep_ctx_cache_entry *sep_ctx_p = ctx_info->sep_ctx_kptr;
+	dma_addr_t sep_ctx_dma_addr = ctx_info->sep_ctx_dma_addr;
+	int rc;
+
+	if (iv_crypto != NULL) {	/* Save IV (block state) */
+		rc = ctxmgr_get_symcipher_iv(ctx_info, NULL, iv_sep_ctx,
+					     &iv_size);
+		if (likely(rc == 0)) {
+			if (iv_size > 0) {
+				if (unlikely(iv_crypto == NULL)) {
+					pr_err(
+						    "iv_crypto==NULL when iv_size==%u\n",
+						    iv_size);
+				} else {
+					memcpy(iv_crypto, iv_sep_ctx, iv_size);
+				}
+			}
+		} else {
+			pr_err("Fail: getting IV information for ctx@%p\n",
+				    ctx_info->ctx_kptr);
+		}
+	}
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	dma_pool_free(sep_ctx_pool, sep_ctx_p, sep_ctx_dma_addr);
+}
+
+/**
+ * symcipher_process() - Process (encrypt/decrypt) data for given block-cipher
+ *			algorithm
+ * @req:	The async. request structure
+ * @direction:	Cipher operation direction
+ *
+ */
+static int symcipher_process(struct ablkcipher_request *req,
+			     enum dxdi_cipher_direction direction)
+{
+	struct async_req_ctx *areq_ctx = ablkcipher_request_ctx(req);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    crypto_ablkcipher_ctx(tfm);
+	int rc;
+
+	pr_debug("alg=%s %scrypt (req=%p, op_ctx=%p, host_ctx=%p)\n",
+		      crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+		      direction == DXDI_CDIR_ENC ? "en" : "de",
+		      req, op_ctx, host_ctx_p);
+
+	/* Initialize async. req. context */
+	areq_ctx->initiating_req = &req->base;
+
+	/* Old method */
+	if (memcmp(crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+			"xxx(aes)", 8)) {
+		INIT_WORK(&areq_ctx->comp_work,
+				dx_crypto_api_handle_op_completion);
+		op_ctx_init(op_ctx, &crypto_api_ctx);
+		op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+		op_ctx->comp_work = &areq_ctx->comp_work;
+
+		rc = prepare_symcipher_ctx_for_processing(op_ctx,
+				host_ctx_p, req->info,
+				direction);
+		if (unlikely(rc != 0)) {
+			op_ctx_fini(op_ctx);
+			return rc;
+		}
+
+		rc = prepare_data_for_sep(op_ctx, NULL, req->src,
+					  NULL, req->dst,
+					  req->nbytes, CRYPTO_DATA_TEXT);
+		if (unlikely(rc != 0)) {
+			SEP_LOG_ERR(
+				    "Failed preparing DMA buffers (rc=%d, err_info=0x%08X\n)\n",
+				    rc, op_ctx->error_info);
+			if (op_ctx->error_info == DXDI_ERROR_INVAL_DATA_SIZE) {
+				SEP_LOG_ERR("Invalid data unit size %u\n",
+						req->nbytes);
+				req->base.flags |=
+						CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+			}
+		} else {		/* Initiate processing */
+			/* Async. block cipher op. cannot reuse cache entry
+			   bacause the IV is set on every operation. Invalidate
+			   before releasing the sequencer (that's "async"
+			   invalidation) */
+			rc = dispatch_crypto_op(op_ctx,
+					req->base.
+					flags & CRYPTO_TFM_REQ_MAY_BACKLOG,
+					true /*init. */ , SEP_PROC_MODE_PROC_T,
+					false /*cache */);
+		}
+		if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) { /* Dispatch failure */
+			crypto_op_completion_cleanup(op_ctx);
+			release_symcipher_ctx(op_ctx, req->info);
+			op_ctx_fini(op_ctx);
+		}
+	} else {
+		/* New method vith TEE api */
+		struct dxdi_sepapp_kparams *cmd_params =
+			kzalloc(sizeof(struct dxdi_sepapp_kparams), GFP_KERNEL);
+		enum dxdi_sep_module ret_origin;
+		struct scatterlist sg_iv;
+		u8 iv[SEP_AES_IV_SIZE];
+
+		if (cmd_params == NULL)
+			return -ENOMEM;
+
+		memcpy(iv, req->info, SEP_AES_IV_SIZE);
+		sg_init_one(&sg_iv, iv, SEP_AES_IV_SIZE);
+
+		cmd_params->params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+		cmd_params->params[0].val.data[0] = direction;
+		cmd_params->params[0].val.data[1] = 0;
+		cmd_params->params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+		cmd_params->params_types[1] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[1].kmemref.dma_direction =
+					DXDI_DATA_TO_DEVICE;
+		cmd_params->params[1].kmemref.sgl = &sg_iv;
+		cmd_params->params[1].kmemref.nbytes = SEP_AES_IV_SIZE;
+
+		cmd_params->params_types[2] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[2].kmemref.dma_direction =
+					DXDI_DATA_TO_DEVICE;
+		cmd_params->params[2].kmemref.sgl = req->src;
+		cmd_params->params[2].kmemref.nbytes = req->nbytes;
+
+		cmd_params->params_types[3] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[3].kmemref.dma_direction =
+					DXDI_DATA_FROM_DEVICE;
+		cmd_params->params[3].kmemref.sgl = req->dst;
+		cmd_params->params[3].kmemref.nbytes = req->nbytes;
+
+		rc = async_sepapp_command_invoke(host_ctx_p->sctx,
+					host_ctx_p->sess_id, CMD_DO_CRYPTO,
+					cmd_params, &ret_origin, areq_ctx);
+	}
+
+	return rc;
+}
+
+/**
+ * symcipher_encrypt() - Encrypt for given sym-cipher context
+ * @req: The async. request structure
+ *
+ */
+static int symcipher_encrypt(struct ablkcipher_request *req)
+{
+	return symcipher_process(req, DXDI_CDIR_ENC);
+}
+
+/**
+ * symcipher_encrypt() - Decrypt for given sym-cipher context
+ * @req: The async. request structure
+ *
+ */
+static int symcipher_decrypt(struct ablkcipher_request *req)
+{
+	return symcipher_process(req, DXDI_CDIR_DEC);
+}
+
+static int ablkcipher_algs_init(void)
+{
+	int i, rc;
+	/* scratchpad to build crypto_alg from template + alg.specific data */
+	struct crypto_alg alg_spad;
+
+	/* Create block cipher algorithms from base + specs via scratchpad */
+	for (i = 0; i < DX_ABLKCIPHER_NUM; i++) {
+		/* Get base template */
+		memcpy(&alg_spad, &blkcipher_algs_base,
+		       sizeof(struct crypto_alg));
+		/* Get alg. specific attributes over base */
+		strcpy(alg_spad.cra_name, dx_ablkcipher_algs[i].cra_name);
+		strcpy(alg_spad.cra_driver_name,
+		       dx_ablkcipher_algs[i].cra_driver_name);
+		alg_spad.cra_blocksize = dx_ablkcipher_algs[i].cra_blocksize;
+		alg_spad.cra_u.ablkcipher.min_keysize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.min_keysize;
+		alg_spad.cra_u.ablkcipher.max_keysize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.max_keysize;
+		alg_spad.cra_u.ablkcipher.ivsize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.ivsize;
+		/* Copy scratchpad to real entry */
+		memcpy(&dx_ablkcipher_algs[i], &alg_spad,
+		       sizeof(struct crypto_alg));
+		/* The list must be initialized in place (pointers based) */
+		INIT_LIST_HEAD(&dx_ablkcipher_algs[i].cra_list);
+	}
+
+	/* Register algs */
+	pr_debug("Registering CryptoAPI blkciphers:\n");
+	for (i = 0, rc = 0; (i < DX_ABLKCIPHER_NUM) && (rc == 0); i++) {
+		pr_debug("%d. %s (__crt_alg=%p)\n", i,
+			      dx_ablkcipher_algs[i].cra_name,
+			      &dx_ablkcipher_algs[i]);
+		rc = crypto_register_alg(&dx_ablkcipher_algs[i]);
+		if (rc != 0)
+			break;
+	}
+	/* Failure: cleanup algorithms that already registered */
+	if (rc != 0) {
+		pr_err("Failed registering %s\n",
+			    dx_ablkcipher_algs[i].cra_name);
+		if (i > 0)
+			for (; i >= 0; i--)
+				crypto_unregister_alg(
+						&dx_ablkcipher_algs[i]);
+	}
+	return rc;
+}
+
+static void ablkcipher_algs_exit(void)
+{
+	int i;
+
+	for (i = 0; i < DX_ABLKCIPHER_NUM; i++)
+		crypto_unregister_alg(&dx_ablkcipher_algs[i]);
+}
+
+/****************************************************/
+/* Digest (hash/MAC) algorithms                     */
+/****************************************************/
+
+static struct dx_digest_alg *get_digest_alg(struct crypto_tfm *tfm)
+{
+	struct hash_alg_common *halg_common =
+	    container_of(tfm->__crt_alg, struct hash_alg_common, base);
+	struct ahash_alg *this_ahash =
+	    container_of(halg_common, struct ahash_alg, halg);
+	struct dx_digest_alg *this_digest_alg =
+	    container_of(this_ahash, struct dx_digest_alg, ahash);
+	int alg_index = this_digest_alg - dx_digest_algs;
+
+	/* Verify that the tfm is valid (inside our dx_digest_algs array) */
+	if ((alg_index < 0) || (alg_index >= DX_DIGEST_NUM)) {
+		pr_err("Invalid digest tfm @%p\n", tfm);
+		return NULL;
+	}
+	return this_digest_alg;
+}
+
+/**
+ * prepare_digest_context_for_processing() - Prepare the crypto context of
+ *	async. hash/mac operation. Initialize context if requested.
+ *
+ * @req:		Crypto request context
+ * @do_init:		When "true" the given context is initialized
+ */
+static int prepare_digest_context_for_processing(struct ahash_request *req,
+						 bool do_init)
+{
+	struct crypto_ahash *ahash_tfm = crypto_ahash_reqtfm(req);
+	struct crypto_tfm *tfm = &ahash_tfm->base;
+	struct dx_digest_alg *digest_alg = get_digest_alg(tfm);
+	struct mac_key_data *key_data = crypto_tfm_ctx(tfm);/* For MACS only */
+	struct async_digest_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct sep_op_ctx *op_ctx = &req_ctx->async_req.op_ctx;
+	struct client_crypto_ctx_info *ctx_info;
+	enum dxdi_mac_type mac_type;
+	struct dxdi_mac_props mac_props;	/* For MAC init. */
+#ifdef DEBUG
+	enum host_ctx_state ctx_state;
+#endif
+	int error_info;
+	int rc;
+
+	if (unlikely(digest_alg == NULL))
+		return -EINVAL;
+
+	pr_debug("op_ctx=%p op_state=%d\n", op_ctx, op_ctx->op_state);
+	ctx_info = &op_ctx->ctx_info;
+	mac_type = digest_alg->mac_type;
+
+	if (!do_init) {
+		/* Verify given request context was initialized */
+		if (req_ctx->async_req.initiating_req == NULL) {
+			pr_err(
+				    "Invoked for uninitialized async. req. context\n");
+			return -EINVAL;
+		}
+		/* Verify this request context that is not in use */
+		if (op_ctx->op_state != USER_OP_NOP) {
+			pr_err("Invoked for context in use!\n");
+			return -EINVAL;
+			/*
+			 * We do not return -EBUSY because this is a valid
+			 * return code for async crypto operations that
+			 * indicates the given request was actually dispatched.
+			 */
+		}
+	}
+	op_ctx_init(op_ctx, &crypto_api_ctx);
+	op_ctx->comp_work = &req_ctx->async_req.comp_work;
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev,
+				   (mac_type != DXDI_MAC_NONE) ?
+				   ALG_CLASS_MAC : ALG_CLASS_HASH,
+				   (struct host_crypto_ctx *)&req_ctx->host_ctx,
+				   NULL, 0);
+	if (rc != 0) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+		return rc;
+	}
+	if (do_init) {
+		/* Allocate a new Crypto context ID */
+		ctxmgr_set_ctx_id(ctx_info,
+				  alloc_crypto_ctx_id(&crypto_api_ctx));
+		if (mac_type == DXDI_MAC_NONE) {	/* Hash alg. */
+			rc = ctxmgr_init_hash_ctx(ctx_info,
+						  digest_alg->hash_type,
+						  &error_info);
+		} else {	/* MAC */
+			mac_props.mac_type = mac_type;
+			mac_props.key_size = key_data->key_size;
+			memcpy(mac_props.key, key_data->key,
+			       key_data->key_size);
+			if (mac_type == DXDI_MAC_HMAC)
+				mac_props.alg_specific.hmac.hash_type =
+				    digest_alg->hash_type;
+			rc = ctxmgr_init_mac_ctx(ctx_info, &mac_props,
+						 &error_info);
+		}
+		if (unlikely(rc != 0)) {
+			pr_err("Failed initializing context\n");
+			ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+		} else {
+			ctxmgr_set_ctx_state(ctx_info, CTX_STATE_PARTIAL_INIT);
+			/* Init. the async. request context */
+			req_ctx->async_req.initiating_req = &req->base;
+			INIT_WORK(&req_ctx->async_req.comp_work,
+				  dx_crypto_api_handle_op_completion);
+			req_ctx->result = NULL;
+		}
+#ifdef DEBUG
+	} else {		/* Should have been initialized before */
+		ctx_state = ctxmgr_get_ctx_state(ctx_info);
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err("Invoked for context in state %d!\n",
+				    ctx_state);
+			rc = -EINVAL;
+		}
+#endif		 /*DEBUG*/
+	}
+	if (likely(rc == 0)) {
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(ctx_info);
+#endif
+		/* Flush sep_ctx out of host cache */
+		ctxmgr_sync_sep_ctx(ctx_info, mydev);
+	}
+	return rc;
+}
+
+static int digest_req_dispatch(struct ahash_request *req,
+			       bool do_init, bool is_last,
+			       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_ahash *ahash_tfm = crypto_ahash_reqtfm(req);
+	struct dx_digest_alg *digest_alg = get_digest_alg(&ahash_tfm->base);
+	struct async_digest_req_ctx *req_ctx =
+	    (struct async_digest_req_ctx *)ahash_request_ctx(req);
+	struct sep_op_ctx *op_ctx = &req_ctx->async_req.op_ctx;
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	int rc;
+
+	if (digest_alg == NULL)
+		return -EINVAL;
+	if ((!do_init) && (req_ctx->result != NULL)) {
+		/* already finalized (AES based MACs) */
+		if (unlikely(nbytes > 0)) {
+			pr_err("Invoked with %u B after finalized\n",
+				    nbytes);
+			return -EINVAL;
+		}
+		if (is_last) {
+			/* Fetch saved result */
+			memcpy(req->result, req_ctx->result,
+			       SEP_AES_BLOCK_SIZE);
+			return 0;
+		}
+	}
+	rc = prepare_digest_context_for_processing(req, do_init);
+	if (unlikely(rc != 0))
+		return rc;
+	/* Preapre req_ctx->result */
+	if (is_last) {		/* Plain finalization */
+		req_ctx->result = req->result;
+	} else if (((digest_alg->mac_type == DXDI_MAC_AES_XCBC_MAC) ||
+		    (digest_alg->mac_type == DXDI_MAC_AES_CMAC)) &&
+		   (!IS_MULT_OF(nbytes, SEP_AES_BLOCK_SIZE))) {
+		/* Handle special case of AES based MAC update when not AES
+		   block multiple --> dispatch as final update */
+		is_last = true;
+		/* req_Ctx->result remains NULL. This would cause setting
+		   it to the result location in the sep context,
+		   when completed */
+	}
+
+	op_ctx->op_type = is_last ? SEP_OP_CRYPTO_FINI :
+	    do_init ? SEP_OP_CRYPTO_INIT : SEP_OP_CRYPTO_PROC;
+	if (op_ctx->op_type != SEP_OP_CRYPTO_INIT) {
+		rc = prepare_data_for_sep(op_ctx, NULL, src, NULL, NULL, nbytes,
+					  is_last ? CRYPTO_DATA_TEXT_FINALIZE :
+					  CRYPTO_DATA_TEXT);
+		if (rc == -ENOTBLK) {
+			/* Data was accumulated but less than a hash block */
+			/* Complete operation immediately */
+			rc = 0;
+			goto digest_proc_exit;
+		}
+		if (unlikely(rc != 0)) {
+			pr_err("Failed mapping client DMA buffer.\n");
+			goto digest_proc_exit;
+		}
+	}
+	rc = dispatch_crypto_op(op_ctx,
+				req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG,
+				do_init,
+				is_last ? SEP_PROC_MODE_FIN : do_init ?
+				SEP_PROC_MODE_NOP : SEP_PROC_MODE_PROC_T,
+				true /*cache */);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		pr_err("Failed dispatching CRYPTO_OP (rc=%d)\n", rc);
+		crypto_op_completion_cleanup(op_ctx);
+		ctxmgr_unmap_kernel_ctx(ctx_info);
+		op_ctx_fini(op_ctx);
+	}
+	return rc;
+/* Exit when there is no pending request (error or not enough data) */
+ digest_proc_exit:
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	op_ctx_fini(op_ctx);
+	return rc;
+}
+
+static int digest_init(struct ahash_request *req)
+{
+	pr_debug("\n");
+	return digest_req_dispatch(req, true, false, NULL, 0);
+}
+
+static int digest_update(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	if (req->nbytes == 0)
+		return 0;	/* Nothing to do (but valid for 0 data MACs */
+
+	return digest_req_dispatch(req, false, false, req->src, req->nbytes);
+}
+
+static int digest_final(struct ahash_request *req)
+{
+	pr_debug("\n");
+	return digest_req_dispatch(req, false, true, NULL, 0);
+}
+
+static int digest_finup(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	return digest_req_dispatch(req, false, true, req->src, req->nbytes);
+}
+
+static int digest_integrated(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	return digest_req_dispatch(req, true, true, req->src, req->nbytes);
+}
+
+/**
+ * do_hash_sync() - Do integrated hash operation synchronously
+ *
+ * @hash_type:		The hash type used for this HMAC
+ * @data_in:		The input data
+ * @data_len:		Size data_in in bytes
+ * @digest:		The hash result
+ * @digest_len_p:	Returned digest size
+ *
+ * This function is used to shorten long HMAC keys.
+ */
+static int do_hash_sync(enum dxdi_hash_type hash_type,
+			const u8 *data_in, unsigned int data_len,
+			u8 *digest, unsigned int *digest_len_p)
+{
+	int rc;
+	struct queue_drvdata *drvdata = crypto_api_ctx.drv_data;
+	struct host_crypto_ctx_hash host_ctx;
+	struct sep_op_ctx op_ctx;
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx.ctx_info;
+	struct scatterlist din_sgl;
+
+	op_ctx_init(&op_ctx, &crypto_api_ctx);
+	rc = ctxmgr_map_kernel_ctx(ctx_info_p, drvdata->sep_data->dev,
+				   ALG_CLASS_HASH,
+				   (struct host_crypto_ctx *)&host_ctx, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping crypto context (rc=%d)\n", rc);
+		op_ctx_fini(&op_ctx);
+		return rc;
+	}
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(ctx_info_p, alloc_crypto_ctx_id(op_ctx.client_ctx));
+	/* Algorithm class specific initialization */
+	rc = ctxmgr_init_hash_ctx(ctx_info_p, hash_type, &op_ctx.error_info);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+	ctxmgr_set_ctx_state(ctx_info_p, CTX_STATE_PARTIAL_INIT);
+	op_ctx.op_type = SEP_OP_CRYPTO_FINI;	/* Integrated is also fin. */
+	sg_init_one(&din_sgl, data_in, data_len);
+	rc = prepare_data_for_sep(&op_ctx, NULL, &din_sgl, NULL, NULL,
+				  data_len, CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(ctx_info_p);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(ctx_info_p, drvdata->sep_data->dev);
+	rc = dispatch_crypto_op(&op_ctx, true, true, SEP_PROC_MODE_FIN, false);
+	if (likely(!IS_DESCQ_ENQUEUE_ERR(rc))) {
+		rc = 0;	/* Clear valid return code from dispatch_crypto_op */
+		wait_for_completion(&op_ctx.ioctl_op_compl);
+		if (likely(op_ctx.error_info == 0)) {
+			*digest_len_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p, digest);
+		}
+	}
+	crypto_op_completion_cleanup(&op_ctx);
+
+ unmap_ctx_and_exit:
+	ctxmgr_unmap_kernel_ctx(ctx_info_p);
+	return rc;
+}
+
+static int mac_setkey(struct crypto_ahash *tfm,
+		      const u8 *key, unsigned int keylen)
+{
+	struct dx_digest_alg *digest_alg = get_digest_alg(&tfm->base);
+	u32 tfm_flags = crypto_ahash_get_flags(tfm);
+	struct mac_key_data *key_data = crypto_tfm_ctx(&tfm->base);
+	int rc = 0;
+
+	if (unlikely(digest_alg == NULL))
+		return -EINVAL;
+	if (unlikely(digest_alg->mac_type == DXDI_MAC_NONE)) {
+		pr_err("Given algorithm which is not MAC\n");
+		return -EINVAL;
+	}
+	/* Pre-process HMAC key if larger than hash block size */
+	if ((digest_alg->hash_type != DXDI_HASH_NONE) &&
+	    (keylen > digest_alg->ahash.halg.base.cra_blocksize)) {
+		rc = do_hash_sync(digest_alg->hash_type, key, keylen,
+				  key_data->key, &key_data->key_size);
+		if (unlikely(rc != 0))
+			pr_err("Failed digesting key of %u bytes\n",
+			       keylen);
+		if (key_data->key_size != digest_alg->ahash.halg.digestsize)
+			pr_err("Returned digest size is %u != %u (expected)\n",
+			       key_data->key_size,
+			       digest_alg->ahash.halg.digestsize);
+	} else {		/* No need to digest the key */
+		/* Verify that the key size for AES based MACs is not too
+		   large. */
+		if ((digest_alg->hash_type == DXDI_HASH_NONE) &&
+		    (keylen > SEP_AES_KEY_SIZE_MAX)) {
+			pr_err("Invalid key size %u for %s\n",
+			       keylen,
+			       digest_alg->ahash.halg.base.cra_name);
+			tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			crypto_ahash_set_flags(tfm, tfm_flags);
+			rc = -EINVAL;
+		} else {
+			key_data->key_size = keylen;
+			memcpy(&key_data->key, key, keylen);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * digest_tfm_init() - Initialize tfm with our reqsize (to accomodate context)
+ *	(cra_init entry point)
+ * @tfm:
+ */
+static int digest_tfm_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash_tfm =
+	    container_of(tfm, struct crypto_ahash, base);
+
+	ahash_tfm->reqsize = sizeof(struct async_digest_req_ctx);
+	return 0;
+}
+
+static int digest_algs_init(void)
+{
+	int i, rc;
+
+	/* Create hash algorithms from base + specs via scratchpad */
+	for (i = 0; i < DX_DIGEST_NUM; i++) {
+		/* Apply template values into given algorithms */
+		dx_digest_algs[i].ahash.init = digest_algs_base.init;
+		dx_digest_algs[i].ahash.update = digest_algs_base.update;
+		dx_digest_algs[i].ahash.final = digest_algs_base.final;
+		dx_digest_algs[i].ahash.finup = digest_algs_base.finup;
+		dx_digest_algs[i].ahash.digest = digest_algs_base.digest;
+		dx_digest_algs[i].ahash.halg.base.cra_type =
+		    digest_algs_base.halg.base.cra_type;
+		dx_digest_algs[i].ahash.halg.base.cra_priority =
+		    digest_algs_base.halg.base.cra_priority;
+		dx_digest_algs[i].ahash.halg.base.cra_flags =
+		    digest_algs_base.halg.base.cra_flags;
+		dx_digest_algs[i].ahash.halg.base.cra_module =
+		    digest_algs_base.halg.base.cra_module;
+		dx_digest_algs[i].ahash.halg.base.cra_init =
+		    digest_algs_base.halg.base.cra_init;
+		INIT_LIST_HEAD(&dx_digest_algs[i].ahash.halg.base.cra_list);
+	}
+
+	/* Register algs */
+	pr_debug("Registering CryptoAPI digest algorithms:\n");
+	for (i = 0, rc = 0; (i < DX_DIGEST_NUM) && (rc == 0); i++) {
+		pr_debug("%d. %s (__crt_alg=%p)\n", i,
+			      dx_digest_algs[i].ahash.halg.base.cra_name,
+			      &dx_digest_algs[i].ahash);
+		rc = crypto_register_ahash(&dx_digest_algs[i].ahash);
+		if (rc != 0)
+			break;
+	}
+	if (unlikely(rc != 0)) {
+		/* Failure: cleanup algorithms that already registered */
+		pr_err("Failed registering %s\n",
+			    dx_digest_algs[i].ahash.halg.base.cra_name);
+		if (i > 0)
+			for (; i >= 0; i--)
+				crypto_unregister_ahash(
+						&dx_digest_algs[i].ahash);
+	}
+	return rc;
+}
+
+static void digest_algs_exit(void)
+{
+	int i;
+
+	for (i = 0; i < DX_DIGEST_NUM; i++)
+		crypto_unregister_ahash(&dx_digest_algs[i].ahash);
+}
+
+/****************************************************/
+int dx_crypto_api_init(struct sep_drvdata *drvdata)
+{
+	/* Init. return code of each init. function to know which one to
+	   cleanup (only those with rc==0) */
+	int rc_ablkcipher_init = -EINVAL;
+	int rc_digest_init = -EINVAL;
+	int rc;
+
+	init_client_ctx(drvdata->queue + CRYPTO_API_QID, &crypto_api_ctx);
+
+	sep_ctx_pool = dma_pool_create("dx_sep_ctx",
+				       crypto_api_ctx.drv_data->sep_data->dev,
+				       sizeof(struct sep_ctx_cache_entry),
+				       L1_CACHE_BYTES, 0);
+	if (sep_ctx_pool == NULL) {
+		pr_err("Failed allocating pool for SeP contexts\n");
+		rc = -ENOMEM;
+		goto init_error;
+	}
+	rc_ablkcipher_init = ablkcipher_algs_init();
+	if (unlikely(rc_ablkcipher_init != 0)) {
+		rc = rc_ablkcipher_init;
+		goto init_error;
+	}
+	rc_digest_init = digest_algs_init();
+	if (unlikely(rc_digest_init != 0)) {
+		rc = rc_digest_init;
+		goto init_error;
+	}
+
+	return 0;
+
+ init_error:
+	if (rc_ablkcipher_init == 0)
+		ablkcipher_algs_exit();
+	if (sep_ctx_pool != NULL)
+		dma_pool_destroy(sep_ctx_pool);
+	cleanup_client_ctx(drvdata->queue + CRYPTO_API_QID, &crypto_api_ctx);
+	return rc;
+}
+
+void dx_crypto_api_fini(void)
+{
+	digest_algs_exit();
+	ablkcipher_algs_exit();
+	dma_pool_destroy(sep_ctx_pool);
+	cleanup_client_ctx(crypto_api_ctx.drv_data, &crypto_api_ctx);
+}
diff --git a/drivers/staging/sep54/crypto_api.h b/drivers/staging/sep54/crypto_api.h
new file mode 100644
index 0000000..67a546c
--- /dev/null
+++ b/drivers/staging/sep54/crypto_api.h
@@ -0,0 +1,64 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+/* \file cryto_api.h
+   Definitions of Linux Crypto API shared with the main driver code
+ */
+
+#ifndef __CRYPTO_API_H__
+#define __CRYPTO_API_H__
+
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct async_req_ctx - Context for async. request (__ctx of request)
+ * @op_ctx:		SeP operation context
+ * @initiating_req:	The initiating crypto request
+ * @comp_work:		Completion work handler
+ */
+struct async_req_ctx {
+	struct sep_op_ctx op_ctx;
+	struct crypto_async_request *initiating_req;
+	struct work_struct comp_work;
+};
+
+/* Crypto-API init. entry point (to be used by sep_setup) */
+int dx_crypto_api_init(struct sep_drvdata *drvdata);
+void dx_crypto_api_fini(void);
+
+int hwk_init(void);
+void hwk_fini(void);
+
+#endif /*__CRYPTO_API_H__*/
+
+
+
diff --git a/drivers/staging/sep54/crypto_ctx_mgr.c b/drivers/staging/sep54/crypto_ctx_mgr.c
new file mode 100644
index 0000000..4894221
--- /dev/null
+++ b/drivers/staging/sep54/crypto_ctx_mgr.c
@@ -0,0 +1,2565 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*! \file
+ * This source file implements crypto context management services.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_CTX_MGR
+#include "sep_log.h"
+#include "dx_driver.h"
+#include "crypto_ctx_mgr.h"
+
+struct ctxmgr_cache_entry {
+	struct crypto_ctx_uid ctx_id;/* Allocated context ID or CTX_INVALID_ID */
+	unsigned long lru_time;	/* Monotonically incrementing counter for LRU */
+};
+
+struct sep_ctx_cache {
+	unsigned long lru_clk;	/* Virtual clock counter */
+	/* The virtual clock counter is incremented for each entry
+	   allocation/reuse in order to provide LRU info */
+	int cache_size;		/* Num. of cache entries */
+	struct ctxmgr_cache_entry entries[1];	/*embedded entries */
+	/* The "entries" element is only a start point for an array with
+	   cache_size entries that starts in this location */
+};
+
+/* DES weak keys checking data */
+static const u8 des_key_parity[] = {
+	8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 3,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 5, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 6, 8,
+};
+
+/**
+ * ctxmgr_get_ctx_size() - Get host context size for given algorithm class
+ * @alg_class:	 Queries algorithm class
+ *
+ * Returns size_t Size in bytes of host context
+ */
+size_t ctxmgr_get_ctx_size(enum crypto_alg_class alg_class)
+{
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		return sizeof(struct host_crypto_ctx_sym_cipher);
+	case ALG_CLASS_AUTH_ENC:
+		return sizeof(struct host_crypto_ctx_auth_enc);
+	case ALG_CLASS_MAC:
+		return sizeof(struct host_crypto_ctx_mac);
+	case ALG_CLASS_HASH:
+		return sizeof(struct host_crypto_ctx_hash);
+	default:
+		return 0;
+	}
+}
+
+static size_t get_sep_ctx_offset(enum crypto_alg_class alg_class)
+{
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		return offsetof(struct host_crypto_ctx_sym_cipher, sep_ctx);
+	case ALG_CLASS_AUTH_ENC:
+		return offsetof(struct host_crypto_ctx_auth_enc, sep_ctx);
+	case ALG_CLASS_MAC:
+		return offsetof(struct host_crypto_ctx_mac, sep_ctx);
+	case ALG_CLASS_HASH:
+		return offsetof(struct host_crypto_ctx_hash, sep_ctx);
+	default:
+		pr_err("Invalid algorith class = %d\n", alg_class);
+		return 0;
+	}
+
+}
+
+/**
+ * get_hash_digest_size() - Get hash digest size (in octets) of given (SeP)
+ *				hash mode
+ * @hash_mode:
+ *
+ * Returns u32 Digest size in octets (0 if unknown hash mode)
+ */
+static u32 get_hash_digest_size(enum sep_hash_mode hash_mode)
+{
+	switch (hash_mode) {
+	case SEP_HASH_SHA1:
+		return 160 >> 3;	/* 160 bit */
+	case SEP_HASH_SHA224:
+		return 224 >> 3;
+	case SEP_HASH_SHA256:
+		return 256 >> 3;
+	case SEP_HASH_SHA384:
+		return 384 >> 3;
+	case SEP_HASH_SHA512:
+		return 512 >> 3;
+	default:
+		pr_err("Unknown hash mode %d\n", hash_mode);
+	}
+	return 0;
+}
+
+static u16 get_hash_block_size(enum dxdi_hash_type hash_type)
+{
+
+	switch (hash_type) {
+	case DXDI_HASH_MD5:
+		pr_err("MD5 not supported\n");
+		break;
+	case DXDI_HASH_SHA1:
+	case DXDI_HASH_SHA224:
+	case DXDI_HASH_SHA256:
+		return 512 >> 3;
+	case DXDI_HASH_SHA384:
+	case DXDI_HASH_SHA512:
+		return 1024 >> 3;
+	default:
+		pr_err("Invalid hash type %d", hash_type);
+	}
+	return 0;
+}
+
+enum sep_hash_mode get_sep_hash_mode(enum dxdi_hash_type hash_type)
+{
+	switch (hash_type) {
+	case DXDI_HASH_MD5:
+		pr_err("MD5 not supported\n");
+		return SEP_HASH_NULL;
+	case DXDI_HASH_SHA1:
+		return SEP_HASH_SHA1;
+	case DXDI_HASH_SHA224:
+		return SEP_HASH_SHA224;
+	case DXDI_HASH_SHA256:
+		return SEP_HASH_SHA256;
+	case DXDI_HASH_SHA384:
+		return SEP_HASH_SHA384;
+	case DXDI_HASH_SHA512:
+		return SEP_HASH_SHA512;
+	default:
+		pr_err("Invalid hash type=%d\n", hash_type);
+		return SEP_HASH_NULL;
+	}
+}
+
+/**
+ * ctxmgr_map_user_ctx() - Map given user context to kernel space + DMA
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @user_ctx_ptr: Pointer to user space crypto context
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_user_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct device *mydev,
+			enum crypto_alg_class alg_class,
+			u32 __user *user_ctx_ptr)
+{
+	const unsigned long offset_in_page =
+	    ((unsigned long)user_ctx_ptr & ~PAGE_MASK);
+	const unsigned long dist_from_page_end = PAGE_SIZE - offset_in_page;
+	size_t ctx_size = ctxmgr_get_ctx_size(alg_class);
+	int pages_mapped;
+	int rc;
+
+#ifdef DEBUG
+	if (ctx_info->user_ptr != NULL) {
+		pr_err("User context already mapped to 0x%p\n",
+			    ctx_info->user_ptr);
+		return -EINVAL;
+	}
+#endif
+	ctx_info->dev = mydev;
+
+	/* If unknown class, verify that it is at least host_ctx size */
+	/* (so we can access the alg_class field in it) */
+	ctx_size = (ctx_size == 0) ? sizeof(struct host_crypto_ctx) : ctx_size;
+	if (dist_from_page_end < ctx_size) {
+		pr_err("Given user context that crosses a page (0x%p)\n",
+			    user_ctx_ptr);
+		return -EINVAL;
+	}
+
+	down_read(&current->mm->mmap_sem);
+	pages_mapped = get_user_pages(current, current->mm,
+				      (unsigned long)user_ctx_ptr, 1, 1, 0,
+				      &ctx_info->ctx_page, 0);
+	up_read(&current->mm->mmap_sem);
+	if (pages_mapped < 1) {
+		pr_err("Failed getting user page\n");
+		return -ENOMEM;
+	}
+
+	/* Set pointer to correct offset in mapped page */
+	ctx_info->ctx_kptr = (struct host_crypto_ctx *)
+	    ((unsigned long)ctx_info->ctx_page |
+	     ((unsigned long)user_ctx_ptr & ~PAGE_MASK));
+
+	ctx_info->ctx_kptr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (ctx_info->ctx_kptr == NULL) {
+		SEP_LOG_ERR("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	if (alg_class == ALG_CLASS_NONE) {
+		size_t host_ctx_size = sizeof(struct host_crypto_ctx);
+		/* Copy common header to get the alg class */
+		if (copy_from_user(ctx_info->ctx_kptr,
+				user_ctx_ptr, host_ctx_size)) {
+			pr_err("Copy from user failed\n");
+			rc = -EINVAL;
+			goto copy_from_user_failed;
+		}
+		/* Verify actual context size with class saved in context */
+		alg_class = ctx_info->ctx_kptr->alg_class;
+		ctx_size = ctxmgr_get_ctx_size(alg_class);
+		if (ctx_size == 0) {	/* Unknown class */
+			pr_err("Unknown alg class\n");
+			rc = -EINVAL;
+			goto unknown_alg_class;
+		}
+		if (dist_from_page_end < ctx_size) {
+			pr_err(
+				    "Given user context that crosses a page (0x%p)\n",
+				    user_ctx_ptr);
+			rc = -EINVAL;
+			goto ctx_cross_page;
+		}
+		/* Copy rest of the context when we know the actual size */
+		if (copy_from_user((u8 *)ctx_info->ctx_kptr + host_ctx_size,
+				(u8 *)user_ctx_ptr + host_ctx_size,
+				ctx_size - host_ctx_size)) {
+			pr_err("Copy from user failed\n");
+			rc = -EINVAL;
+			goto copy_from_user_failed;
+		}
+	}
+
+	/* Map sep_ctx */
+	ctx_info->sep_ctx_kptr = (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) +
+	     get_sep_ctx_offset(alg_class));
+	ctx_info->sep_ctx_dma_addr = dma_map_single(mydev,
+						    (void *)ctx_info->
+						    sep_ctx_kptr,
+						    sizeof(struct
+							   sep_ctx_cache_entry),
+						    DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(mydev, ctx_info->sep_ctx_dma_addr)) {
+		pr_err("Mapping sep_ctx for DMA failed");
+		rc = -ENOMEM;
+		goto sep_ctx_dma_map_failed;
+	}
+
+	ctx_info->sep_cache_idx = -1;
+
+	ctx_info->user_ptr = user_ctx_ptr;
+
+	return 0;
+
+ sep_ctx_dma_map_failed:
+ copy_from_user_failed:
+ ctx_cross_page:
+ unknown_alg_class:
+	kfree(ctx_info->ctx_kptr);
+	ctx_info->ctx_kptr = NULL;
+	page_cache_release(ctx_info->ctx_page);
+	ctx_info->ctx_page = NULL;
+	return rc;
+}
+
+/**
+ * ctxmgr_unmap_user_ctx() - Unmap given currently mapped user context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_user_ctx(struct client_crypto_ctx_info *ctx_info)
+{
+	size_t ctx_size;
+
+	if (ctx_info->ctx_kptr == NULL) {
+		/* This is a valid case since we invoke this function in some
+		   error cases without knowing if context was mapped or not */
+		pr_debug("Context not mapped\n");
+		return;
+	}
+
+	ctx_size = ctxmgr_get_ctx_size(ctx_info->ctx_kptr->alg_class);
+
+	dma_unmap_single(ctx_info->dev, ctx_info->sep_ctx_dma_addr,
+			 sizeof(struct sep_ctx_cache_entry), DMA_BIDIRECTIONAL);
+	ctx_info->sep_ctx_dma_addr = 0;
+
+	if (copy_to_user(ctx_info->user_ptr, ctx_info->ctx_kptr, ctx_size))
+		pr_err("Copy to user failed\n");
+
+	kfree(ctx_info->ctx_kptr);
+	ctx_info->ctx_kptr = NULL;
+
+	if (!PageReserved(ctx_info->ctx_page))
+		SetPageDirty(ctx_info->ctx_page);
+	page_cache_release(ctx_info->ctx_page);
+	ctx_info->ctx_page = NULL;
+
+	ctx_info->sep_cache_idx = -1;
+
+	ctx_info->user_ptr = NULL;
+
+}
+
+/**
+ * ctxmgr_map_kernel_ctx() - Map given kernel context + clone SeP context into
+ *				Privately allocated DMA buffer
+ *				(required for async. ops. on the same context)
+ * @ctx_info:	Client crypto context info structure
+ * @mydev:	Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @kernel_ctx_p:	Pointer to kernel space crypto context
+ * @sep_ctx_p:	Pointer to (private) SeP context. If !NULL the embedded sep
+ *		context is copied into this buffer.
+ *		Set to NULL to use the one embedded in host_crypto_ctx.
+ * @sep_ctx_dma_addr:	DMA address of private SeP context (if sep_ctx_p!=NULL)
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_kernel_ctx(struct client_crypto_ctx_info *ctx_info,
+			  struct device *mydev,
+			  enum crypto_alg_class alg_class,
+			  struct host_crypto_ctx *kernel_ctx_p,
+			  struct sep_ctx_cache_entry *sep_ctx_p,
+			  dma_addr_t sep_ctx_dma_addr)
+{
+	int rc = 0;
+	size_t embedded_sep_ctx_offset = get_sep_ctx_offset(alg_class);
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p =
+	    (struct sep_ctx_cache_entry *)(((unsigned long)kernel_ctx_p) +
+					   embedded_sep_ctx_offset);
+
+	if (embedded_sep_ctx_offset == 0)
+		return -EINVAL;
+	if (sep_ctx_p == NULL)	/* Default context is the one inside */
+		sep_ctx_p = embedded_sep_ctx_p;
+
+	pr_debug("kernel_ctx_p=%p\n", kernel_ctx_p);
+
+	ctx_info->dev = mydev;
+	/* These fields are only relevant for user space context mapping */
+	ctx_info->user_ptr = NULL;
+	ctx_info->ctx_page = NULL;
+	ctx_info->ctx_kptr = kernel_ctx_p;
+	ctx_info->sep_ctx_kptr = sep_ctx_p;
+	/* We assume that the CryptoAPI context of kernel is allocated using
+	   the slab pools, thus aligned to one of the standard blocks and
+	   does not cross page boundary (It is required to be physically
+	   contiguous for SeP DMA access) */
+	if ((((unsigned long)sep_ctx_p + sizeof(struct sep_ctx_cache_entry))
+	     >> PAGE_SHIFT) != ((unsigned long)sep_ctx_p >> PAGE_SHIFT)) {
+		pr_err("SeP context cross page boundary start=0x%lx len=0x%zX\n",
+		       (unsigned long)sep_ctx_p,
+		       sizeof(struct sep_ctx_cache_entry));
+		return -EINVAL;
+	}
+
+	/* Map sep_ctx if embedded in given host context */
+	/* (otherwise, assumed to be cache coherent DMA buffer) */
+	if (sep_ctx_p == embedded_sep_ctx_p) {
+		ctx_info->sep_ctx_dma_addr =
+				dma_map_single(mydev, sep_ctx_p,
+					       sizeof(struct
+						      sep_ctx_cache_entry),
+					       DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(mydev, ctx_info->sep_ctx_dma_addr)) {
+			pr_err("Mapping sep_ctx for DMA failed");
+			rc = -ENOMEM;
+		}
+	} else {
+		ctx_info->sep_ctx_dma_addr = sep_ctx_dma_addr;
+		/* Clone base context into external SeP context buffer */
+		memcpy(sep_ctx_p, embedded_sep_ctx_p,
+		       sizeof(struct sep_ctx_cache_entry));
+	}
+
+	ctx_info->sep_cache_idx = -1;
+
+	return rc;
+}
+
+/**
+ * ctxmgr_unmap_kernel_ctx() - Unmap given currently mapped kernel context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_kernel_ctx(struct client_crypto_ctx_info *ctx_info)
+{
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p;
+	size_t embedded_sep_ctx_offset;
+
+	if (ctx_info == NULL) {
+		pr_err("Context not mapped\n");
+		return;
+	}
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		/* This is a valid case since we invoke this function in some
+		   error cases without knowing if context was mapped or not */
+		pr_debug("Context not mapped\n");
+		return;
+	}
+#endif
+
+	embedded_sep_ctx_offset =
+	    get_sep_ctx_offset(ctx_info->ctx_kptr->alg_class);
+
+#ifdef DEBUG
+	if (embedded_sep_ctx_offset == 0) {
+		pr_err("Invalid algorithm class\n");
+		return;
+	}
+#endif
+
+	pr_debug("kernel_ctx_ptr=%p\n", ctx_info->ctx_kptr);
+	embedded_sep_ctx_p = (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) + embedded_sep_ctx_offset);
+
+	if (embedded_sep_ctx_p == ctx_info->sep_ctx_kptr) {
+		dma_unmap_single(ctx_info->dev, ctx_info->sep_ctx_dma_addr,
+				 sizeof(struct sep_ctx_cache_entry),
+				 DMA_BIDIRECTIONAL);
+	}
+
+	ctx_info->sep_ctx_kptr = NULL;
+	ctx_info->sep_ctx_dma_addr = 0;
+	ctx_info->ctx_kptr = NULL;
+	ctx_info->sep_cache_idx = -1;
+	ctx_info->dev = NULL;
+}
+
+/**
+ * get_blk_rem_buf() - Get a pointer to the (hash) block remainder buffer
+ *			structure
+ * @ctx_info:
+ *
+ * Returns struct hash_block_remainder*
+ */
+static struct hash_block_remainder *get_blk_rem_buf(struct
+						    client_crypto_ctx_info
+						    *ctx_info)
+{
+	struct host_crypto_ctx_hash *hash_ctx_p =
+	    (struct host_crypto_ctx_hash *)ctx_info->ctx_kptr;
+	struct host_crypto_ctx_mac *mac_ctx_p =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+	struct hash_block_remainder *blk_rem_p = NULL;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	if ((hash_ctx_p->alg_class != ALG_CLASS_HASH) &&
+	    ctxmgr_get_mac_type(ctx_info) != DXDI_MAC_HMAC) {
+		pr_err("Not a hash/HMAC context\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	/* Get the correct block remainder buffer structure */
+	if (hash_ctx_p->alg_class == ALG_CLASS_HASH)
+		blk_rem_p = &hash_ctx_p->hash_tail;
+	else			/* HMAC */
+		blk_rem_p = &mac_ctx_p->hmac_tail;
+
+	return blk_rem_p;
+}
+
+/**
+ * ctxmgr_map2dev_hash_tail() - Map hash data tail buffer in the host context
+ *				for DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_map2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+			     struct device *mydev)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	if (blk_rem_p->size > 0) {
+		ctx_info->hash_tail_dma_addr = dma_map_single(mydev,
+							      (void *)
+							      blk_rem_p->data,
+							      blk_rem_p->size,
+							      DMA_TO_DEVICE);
+		if (dma_mapping_error(mydev, ctx_info->hash_tail_dma_addr)) {
+			pr_err("Mapping hash_tail for DMA failed");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_unmap2dev_hash_tail() - Unmap hash data tail buffer from DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ */
+void ctxmgr_unmap2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+				struct device *mydev)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	if (blk_rem_p->size > 0) {
+		dma_unmap_single(mydev, ctx_info->hash_tail_dma_addr,
+				 blk_rem_p->size, DMA_TO_DEVICE);
+	}
+}
+
+/**
+ * ctxmgr_set_ctx_state() - Set context state
+ * @ctx_info:	User context info structure
+ * @state:	State to set in context
+ *
+ * Returns void
+ */
+void ctxmgr_set_ctx_state(struct client_crypto_ctx_info *ctx_info,
+			  const enum host_ctx_state state)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	ctx_info->ctx_kptr->state = state;
+}
+
+/**
+ * ctxmgr_get_ctx_state() - Get context state
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current context state
+ */
+enum host_ctx_state ctxmgr_get_ctx_state(const struct client_crypto_ctx_info
+					 *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->state;
+}
+
+/**
+ * ctxmgr_set_ctx_id() - Allocate unique ID for (initialized) user context
+ * @ctx_info:	 Client crypto context info structure
+ * @ctx_id:	 The unique ID allocated for given context
+ *
+ * Allocate unique ID for (initialized) user context
+ * (Assumes invoked within session mutex so no need for counter protection)
+ */
+void ctxmgr_set_ctx_id(struct client_crypto_ctx_info *ctx_info,
+		       const struct crypto_ctx_uid ctx_id)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	pr_debug("ctx_id=0x%16llX for ctx@0x%p\n",
+		      ctx_id, (ctx_info->user_ptr == NULL) ?
+		      (void *)ctx_info->ctx_kptr : (void *)ctx_info->user_ptr);
+#endif
+	memcpy(&ctx_info->ctx_kptr->uid, &ctx_id,
+		sizeof(struct crypto_ctx_uid));
+}
+
+/**
+ * ctxmgr_get_ctx_id() - Return the unique ID for current user context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Allocated ID (or CTX_ID_INVALID if none)
+ */
+struct crypto_ctx_uid ctxmgr_get_ctx_id(struct client_crypto_ctx_info *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->uid;
+}
+
+/**
+ * ctxmgr_get_session_id() - Return the session ID of given context ID
+ * @ctx_info:
+ *
+ * Return the session ID of given context ID
+ * This may be used to validate ID and verify that it was not tampered
+ * in a manner that can allow access to a session of another process
+ * Returns u32
+ */
+u64 ctxmgr_get_session_id(struct client_crypto_ctx_info *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	/* session ID is the 32 MS-bits of the context ID */
+	return ctx_info->ctx_kptr->uid.addr;
+}
+
+/**
+ * ctxmgr_get_alg_class() - Get algorithm class of context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current algorithm class of context
+ */
+enum crypto_alg_class ctxmgr_get_alg_class(const struct client_crypto_ctx_info
+					   *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->alg_class;
+}
+
+/**
+ * ctxmgr_get_crypto_blk_size() - Get the crypto-block length of given context
+ *					in octets
+ * @ctx_info:	 User context info structure
+ *
+ * Returns u32 Cypto-block size in bytes, 0 if invalid/unsupported alg.
+ */
+u32 ctxmgr_get_crypto_blk_size(struct client_crypto_ctx_info *ctx_info)
+{
+	u32 cblk_size = 0;
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	switch (ctx_info->ctx_kptr->alg_class) {
+
+	case ALG_CLASS_SYM_CIPHER:{
+			enum dxdi_sym_cipher_type cipher_type =
+			    ((struct host_crypto_ctx_sym_cipher *)
+			     ctx_info->ctx_kptr)->props.cipher_type;
+			if ((cipher_type >= _DXDI_SYMCIPHER_AES_FIRST) &&
+			    (cipher_type <= _DXDI_SYMCIPHER_AES_LAST)) {
+				cblk_size = SEP_AES_BLOCK_SIZE;
+			} else
+			    if (((cipher_type >= _DXDI_SYMCIPHER_DES_FIRST) &&
+				 (cipher_type <= _DXDI_SYMCIPHER_DES_LAST)) ||
+					    ((cipher_type >=
+					      _DXDI_SYMCIPHER_C2_FIRST) &&
+					     (cipher_type <=
+					      _DXDI_SYMCIPHER_C2_LAST))) {
+				/* DES and C2 have the same block size */
+				cblk_size = SEP_DES_BLOCK_SIZE;
+			} else {
+				pr_err("Invalid sym.cipher type %d",
+					    cipher_type);
+			}
+			break;	/*ALG_CLASS_SYM_CIPHER */
+		}
+
+	case ALG_CLASS_AUTH_ENC:{
+			enum dxdi_auth_enc_type ae_type =
+			    ((struct host_crypto_ctx_auth_enc *)
+			     ctx_info->ctx_kptr)->props.ae_type;
+			if (ae_type == DXDI_AUTHENC_AES_CCM)
+				cblk_size = SEP_AES_BLOCK_SIZE;
+			else
+				pr_err("Invalid auth.enc. type %d",
+					    ae_type);
+			break;
+		}
+
+	case ALG_CLASS_MAC:{
+			struct host_crypto_ctx_mac *ctx_p =
+			    ((struct host_crypto_ctx_mac *)ctx_info->ctx_kptr);
+			const enum dxdi_mac_type mac_type =
+			    ctx_p->props.mac_type;
+			switch (mac_type) {	/* switch for block size */
+			case DXDI_MAC_HMAC:
+				cblk_size =
+				    get_hash_block_size(ctx_p->props.
+							alg_specific.hmac.
+							hash_type);
+				break;
+			case DXDI_MAC_AES_CMAC:
+			case DXDI_MAC_AES_XCBC_MAC:
+			case DXDI_MAC_AES_MAC:
+				cblk_size = SEP_AES_BLOCK_SIZE;
+				break;
+			default:
+				pr_err("Invalid MAC type %d\n",
+					    ctx_p->props.mac_type);
+			}
+			break;	/* ALG_CLASS_MAC */
+		}
+
+	case ALG_CLASS_HASH:{
+			enum dxdi_hash_type hash_type =
+			    ((struct host_crypto_ctx_hash *)
+			     ctx_info->ctx_kptr)->hash_type;
+			cblk_size = get_hash_block_size(hash_type);
+			break;
+		}
+
+	default:
+		pr_err("Invalid algorithm class %d\n",
+			    ctx_info->ctx_kptr->alg_class);
+
+	}			/*switch alg_class */
+
+	return cblk_size;
+}
+
+/**
+ * ctxmgr_is_valid_adata_size() - Validate additional/associated data for
+ * @ctx_info:
+ * @adata_size:
+ *
+ * Validate additional/associated data for
+ * auth/enc algs.
+ * Returns bool
+ */
+bool ctxmgr_is_valid_adata_size(struct client_crypto_ctx_info *ctx_info,
+				unsigned long adata_size)
+{
+	struct host_crypto_ctx_auth_enc *host_ctx_p =
+	    (struct host_crypto_ctx_auth_enc *)ctx_info->ctx_kptr;
+
+	if (ctx_info->ctx_kptr->alg_class != ALG_CLASS_AUTH_ENC)
+		return false;
+
+	if ((adata_size != host_ctx_p->props.adata_size) || (adata_size == 0))
+		return false;
+
+	return true;
+}
+
+/**
+ * ctxmgr_is_valid_data_unit_size() - Validate given data unit for given
+ *					alg./mode
+ * @ctx_info:
+ * @data_unit_size:
+ * @is_finalize:
+ *
+ * Returns bool true is valid.
+ */
+bool ctxmgr_is_valid_size(struct client_crypto_ctx_info *ctx_info,
+				    unsigned long data_unit_size,
+				    bool is_finalize)
+{
+	if (!is_finalize && (data_unit_size == 0)) {
+		/* None allow 0 data for intermediate processing blocks */
+		pr_err("Given 0 B for intermediate processing!");
+		return false;
+	}
+
+	switch (ctx_info->ctx_kptr->alg_class) {
+
+	case ALG_CLASS_SYM_CIPHER:{
+		struct host_crypto_ctx_sym_cipher *host_ctx_p =
+		    (struct host_crypto_ctx_sym_cipher *)ctx_info->
+		    ctx_kptr;
+		struct sep_ctx_cipher *aes_ctx_p;
+
+		switch (host_ctx_p->props.cipher_type) {
+		case DXDI_SYMCIPHER_AES_XTS:
+			if (host_ctx_p->props.alg_specific.aes_xts.
+			    data_unit_size == 0) {
+				/* Initialize on first data unit if not
+				   provided by the user */
+				if (data_unit_size < 32) {
+					pr_err(
+						"AES-XTS data unit size too small (%lu). Must be at least 32B\n",
+						data_unit_size);
+					return false;
+				}
+				host_ctx_p->props.alg_specific.aes_xts.
+				    data_unit_size = data_unit_size;
+				aes_ctx_p = (struct sep_ctx_cipher *)
+				    &(host_ctx_p->sep_ctx);
+				aes_ctx_p->data_unit_size =
+				    cpu_to_le32(host_ctx_p->
+						props.
+						alg_specific.aes_xts.
+						data_unit_size);
+				break;
+			} else if (((is_finalize) &&
+				    (data_unit_size > 0)) ||
+					    (!is_finalize)) {
+				/* finalize which is not empty must be
+				   consistent with prev. data unit */
+				if (host_ctx_p->props.
+				    alg_specific.aes_xts.
+				    data_unit_size != data_unit_size) {
+					pr_err("Data unit mismatch. was %u. now %lu.\n",
+					       host_ctx_p->props.alg_specific.
+					       aes_xts.data_unit_size,
+					       data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_SYMCIPHER_AES_CTR:
+			if (!is_finalize) {	/* !finalize */
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						"Data unit size (%lu) is not AES block multiple\n",
+						data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_SYMCIPHER_AES_ECB:
+		case DXDI_SYMCIPHER_AES_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_AES_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not AES block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		case DXDI_SYMCIPHER_DES_ECB:
+		case DXDI_SYMCIPHER_DES_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_DES_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not DES block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		case DXDI_SYMCIPHER_C2_ECB:
+		case DXDI_SYMCIPHER_C2_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_C2_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not C2 block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		default:
+			pr_err("Invalid cipher type %d\n",
+				    host_ctx_p->props.cipher_type);
+			return false;
+		}
+
+		break;	/*ALG_CLASS_SYM_CIPHER */
+	}
+
+	case ALG_CLASS_AUTH_ENC:{
+		enum dxdi_auth_enc_type ae_type =
+		    ((struct host_crypto_ctx_auth_enc *)
+		     ctx_info->ctx_kptr)->props.ae_type;
+		if (ae_type == DXDI_AUTHENC_AES_CCM) {
+			if (!is_finalize) {	/* !finalize */
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						    "Data unit size (%lu) is not AES block multiple\n",
+						    data_unit_size);
+					return false;
+				}
+			}
+		} else {
+			pr_err("Invalid auth.enc. type %d",
+				    ae_type);
+			return false;
+		}
+		break;
+	}
+
+	case ALG_CLASS_MAC:{
+		struct host_crypto_ctx_mac *ctx_p =
+		    ((struct host_crypto_ctx_mac *)ctx_info->ctx_kptr);
+		const enum dxdi_mac_type mac_type =
+		    ctx_p->props.mac_type;
+		switch (mac_type) {	/* switch for block size */
+		case DXDI_MAC_HMAC:
+			break;	/* Any data unit size is allowed */
+		case DXDI_MAC_AES_CMAC:
+		case DXDI_MAC_AES_XCBC_MAC:
+			if (!is_finalize) {
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						    "Data unit size (%lu) is not AES block multiple\n",
+						    data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_MAC_AES_MAC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_AES_BLOCK_SIZE)) {
+				pr_err(
+					    "Data unit size (%lu) is not AES block multiple\n",
+					    data_unit_size);
+				return false;
+			}
+			break;
+		default:
+			pr_err("Invalid MAC type %d\n",
+				    ctx_p->props.mac_type);
+		}
+
+		ctx_p->client_data_size += data_unit_size;
+		break;	/* ALG_CLASS_MAC */
+	}
+
+	case ALG_CLASS_HASH:{
+		break;	/* Any data unit size is allowed for hash */
+	}
+
+	default:
+		pr_err("Invalid algorithm class %d\n",
+			    ctx_info->ctx_kptr->alg_class);
+
+	}			/*switch alg_class */
+
+	return true;		/* passed validations */
+}
+
+/**
+ * ctxmgr_get_sym_cipher_type() - Returns the sym cipher specific type.
+ * @ctx_info:	 The context info object of the sym cipher alg.
+ *
+ * Returns enum dxdi_sym_cipher_type The sym cipher type.
+ */
+enum dxdi_sym_cipher_type ctxmgr_get_sym_cipher_type(const struct
+						     client_crypto_ctx_info
+						     *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	return ((struct host_crypto_ctx_sym_cipher *)
+		ctx_info->ctx_kptr)->props.cipher_type;
+}
+
+/**
+ * ctxmgr_get_mac_type() - Returns the mac specific type.
+ * @ctx_info:	 The context info object of the mac alg.
+ *
+ * Returns enum dxdi_mac_type The mac type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info)
+{
+	struct host_crypto_ctx_mac *mac_ctx =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	if (mac_ctx->alg_class == ALG_CLASS_MAC)
+		return mac_ctx->props.mac_type;
+	else
+		return DXDI_MAC_NONE;
+}
+
+/**
+ * ctxmgr_get_hash_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns dxdi_hash_type The hash type.
+ */
+enum dxdi_hash_type ctxmgr_get_hash_type(const struct client_crypto_ctx_info
+					 *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	return ((struct host_crypto_ctx_hash *)ctx_info->ctx_kptr)->hash_type;
+}
+
+/**
+ * ctxmgr_save_hash_blk_remainder() - Save hash block tail data in given
+ *	context. The data is taken from the save4next chunk of given client
+ *	buffer.
+ * @ctx_info:	 Client context info structure (HASH's or HMAC's)
+ * @client_dma_buf_p:	A client DMA buffer object. Data is taken from the
+ *			save4next chunk of this buffer.
+ * @append_data:	When true, given data is appended to existing
+ *
+ * Returns 0 on success
+ */
+int ctxmgr_save_hash_blk_remainder(struct client_crypto_ctx_info *ctx_info,
+				   struct client_dma_buffer *client_dma_buf_p,
+				   bool append_data)
+{
+	u16 copy_offset;
+	int rc;
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	copy_offset = append_data ? blk_rem_p->size : 0;
+	rc = llimgr_copy_from_client_buf_save4next(client_dma_buf_p,
+						   blk_rem_p->data +
+						   copy_offset,
+						   HASH_BLK_SIZE_MAX -
+						   copy_offset);
+	if (likely(rc >= 0)) {	/* rc is the num. of bytes copied */
+		blk_rem_p->size = copy_offset + rc;
+		pr_debug("Accumalted %u B at offset %u\n",
+			      rc, copy_offset);
+		rc = 0;	/* Caller of this function expects 0 on success */
+	} else {
+		pr_err("Failed copying hash block tail from user\n");
+	}
+	return rc;
+}
+
+/**
+ * ctxmgr_get_hash_blk_remainder_buf() - Get DMA info for hash block remainder
+ *	buffer from given context
+ * @ctx_info:			User context info structure
+ * @hash_blk_tail_dma_p:	Returned tail buffer DMA address
+ *
+ * Note: This function must be invoked only when tail_buf is mapped2dev
+ *	(using ctxmgr_map2dev_hash_tail)
+ * Returns u16 Number of valid bytes/octets in tail buffer
+ */
+u16 ctxmgr_get_hash_blk_remainder_buf(struct client_crypto_ctx_info *
+					   ctx_info,
+					   dma_addr_t *
+					   hash_blk_remainder_dma_p)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	*hash_blk_remainder_dma_p = ctx_info->hash_tail_dma_addr;
+	return blk_rem_p->size;
+}
+
+/**
+ * ctxmgr_get_digest_or_mac() - Get the digest/MAC result when applicable
+ * @ctx_info:		User context info structure
+ * @digest_or_mac:	Pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ */
+u32 ctxmgr_get_digest_or_mac(struct client_crypto_ctx_info *ctx_info,
+				  u8 *digest_or_mac)
+{
+	u8 *digest_mac_source;
+	u32 digest_or_mac_size;
+
+	digest_or_mac_size = ctxmgr_get_digest_or_mac_ptr(ctx_info,
+							  &digest_mac_source);
+	if (digest_mac_source != NULL)
+		memcpy(digest_or_mac, digest_mac_source, digest_or_mac_size);
+	return digest_or_mac_size;
+}
+
+/**
+ * ctxmgr_get_digest_or_mac_ptr() - Get the digest/MAC pointer in SeP context
+ * @ctx_info:		User context info structure
+ * @digest_or_mac_pp:	Returned pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ * This function may be used for the caller to reference the result instead
+ * of always copying
+ */
+u32 ctxmgr_get_digest_or_mac_ptr(struct client_crypto_ctx_info *ctx_info,
+				      u8 **digest_or_mac_pp)
+{
+	struct sep_ctx_cache_entry *sep_ctx_p;
+	u32 digest_or_mac_size = 0;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	*digest_or_mac_pp = NULL;	/* default */
+	sep_ctx_p = ctx_info->sep_ctx_kptr;
+	switch (le32_to_cpu(sep_ctx_p->alg)) {
+	case SEP_CRYPTO_ALG_HMAC:
+	/* HMAC context holds the MAC (digest) in the same place as
+	   HASH context */
+	case SEP_CRYPTO_ALG_HASH:{
+		struct sep_ctx_hash *hash_ctx_p =
+		    (struct sep_ctx_hash *)sep_ctx_p;
+		digest_or_mac_size =
+		    get_hash_digest_size(le32_to_cpu(hash_ctx_p->mode));
+		*digest_or_mac_pp = hash_ctx_p->digest;
+		break;
+	}
+	case SEP_CRYPTO_ALG_AES:{
+		struct sep_ctx_cipher *aes_ctx_p =
+		    (struct sep_ctx_cipher *)sep_ctx_p;
+		switch (le32_to_cpu(aes_ctx_p->mode)) {
+		case SEP_CIPHER_CBC_MAC:
+		case SEP_CIPHER_XCBC_MAC:
+		case SEP_CIPHER_CMAC:
+			digest_or_mac_size = SEP_AES_BLOCK_SIZE;
+		/* The AES MACs are returned in the block_state field */
+			*digest_or_mac_pp = aes_ctx_p->block_state;
+			break;
+		default:
+			break;	/* No MAC for others */
+		}
+		break;
+	}
+	case SEP_CRYPTO_ALG_AEAD:{
+		struct sep_ctx_aead *aead_ctx_p =
+		    (struct sep_ctx_aead *)sep_ctx_p;
+
+		if (le32_to_cpu(aead_ctx_p->mode) == SEP_CIPHER_CCM) {
+			digest_or_mac_size =
+			    le32_to_cpu(aead_ctx_p->tag_size);
+			*digest_or_mac_pp = aead_ctx_p->mac_state;
+		} else {
+			pr_err(
+				    "Invalid mode (%d) for SEP_CRYPTO_ALG_AEAD\n",
+				    le32_to_cpu(aead_ctx_p->mode));
+		}
+		break;
+	}
+	default:
+		;		/* No MAC/digest for the other algorithms */
+	}
+
+	return digest_or_mac_size;
+}
+
+static int set_sep_ctx_alg_mode(struct client_crypto_ctx_info *ctx_info,
+				const enum dxdi_sym_cipher_type cipher_type)
+{
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	if (ctx_info == NULL) {
+		pr_err("Context not mapped\n");
+		return -EINVAL;
+	}
+
+	aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+	des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+	c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+
+	switch (cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AES);
+		break;
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_DES);
+		break;
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_C2);
+		break;
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		return -EINVAL;
+	}
+
+	/* mode specific initializations */
+	switch (cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_ECB);
+		break;
+	case DXDI_SYMCIPHER_AES_CBC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC);
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CTR);
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_XTS);
+		break;
+	case DXDI_SYMCIPHER_DES_ECB:
+		des_ctx_p->mode = cpu_to_le32(SEP_CIPHER_ECB);
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC);
+		break;
+	case DXDI_SYMCIPHER_C2_ECB:
+		c2_ctx_p->mode = cpu_to_le32(SEP_C2_ECB);
+		break;
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p->mode = cpu_to_le32(SEP_C2_CBC);
+		break;
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_set_symcipher_iv_user() - Set IV of symcipher context given in
+ *					user space pointer
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 The IV to set
+ *
+ * Returns int
+ */
+int ctxmgr_set_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr)
+{
+	struct host_crypto_ctx_sym_cipher __user *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher __user *)user_ctx_ptr;
+	struct sep_ctx_cipher *aes_ctx_p =
+	    (struct sep_ctx_cipher *)&host_ctx_p->sep_ctx;
+	enum dxdi_sym_cipher_type cipher_type;
+
+	/* Copy cypher type from user context */
+	if (copy_from_user(&cipher_type, &host_ctx_p->props.cipher_type,
+			   sizeof(enum dxdi_sym_cipher_type))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	if ((cipher_type != DXDI_SYMCIPHER_AES_CBC) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_CTR) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_XTS)) {
+		return -EINVAL;
+	}
+
+	if (copy_to_user(aes_ctx_p->block_state, iv_ptr, SEP_AES_IV_SIZE) ||
+	    copy_to_user(host_ctx_p->props.alg_specific.aes_cbc.iv, iv_ptr,
+		     SEP_AES_IV_SIZE)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_get_symcipher_iv_user() - Read current "IV"
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 Where to return the read IV
+ *
+ * Read current "IV" (block state - not the actual set IV during initialization)
+ * This function works directly over a user space context
+ * Returns int
+ */
+int ctxmgr_get_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr)
+{
+	struct host_crypto_ctx_sym_cipher __user *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher __user *)user_ctx_ptr;
+	struct sep_ctx_cipher *aes_ctx_p =
+	    (struct sep_ctx_cipher *)&host_ctx_p->sep_ctx;
+	enum dxdi_sym_cipher_type cipher_type;
+
+	/* Copy cypher type from user context */
+	if (copy_from_user(&cipher_type, &host_ctx_p->props.cipher_type,
+			   sizeof(enum dxdi_sym_cipher_type))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	if ((cipher_type != DXDI_SYMCIPHER_AES_CBC) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_CTR) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_XTS)) {
+		return -EINVAL;
+	}
+
+	if (copy_from_user(iv_ptr, aes_ctx_p->block_state, SEP_AES_IV_SIZE)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * get_symcipher_iv_info() - Return the IV location in given context
+ *				(based on symcipher type)
+ * @ctx_info:
+ * @host_ctx_iv:	Returned IV pointer in host props field
+ * @sep_ctx_iv:		Returned IV state block in sep context
+ * @iv_size:	Size of IV in bytes (0 if IV is not applicable for this alg.)
+ *
+ */
+static void get_symcipher_iv_info(struct client_crypto_ctx_info *ctx_info,
+				  u8 **host_ctx_iv, u8 **sep_ctx_iv,
+				  unsigned long *iv_size)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		*sep_ctx_iv = ((struct sep_ctx_cipher *)
+			       ctx_info->sep_ctx_kptr)->block_state;
+		*iv_size = SEP_AES_IV_SIZE;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		*sep_ctx_iv = ((struct sep_ctx_cipher *)
+			       ctx_info->sep_ctx_kptr)->block_state;
+		*iv_size = SEP_DES_IV_SIZE;
+		break;
+	default:
+		*sep_ctx_iv = NULL;
+		*iv_size = 0;
+	}
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.aes_cbc.iv;
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.aes_ctr.cntr;
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		*host_ctx_iv =
+		    host_ctx_p->props.alg_specific.aes_xts.init_tweak;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.des_cbc.iv;
+		break;
+	default:
+		*host_ctx_iv = NULL;
+	}
+
+}
+
+/**
+ * ctxmgr_set_symcipher_iv() - Set IV for given block symcipher algorithm
+ * @ctx_info:	Context to update
+ * @iv:		New IV
+ *
+ * Returns int 0 if changed IV, -EINVAL for error
+ * (given cipher type does not have IV)
+ */
+int ctxmgr_set_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv)
+{
+	u8 *host_ctx_iv;
+	u8 *sep_ctx_iv;
+	unsigned long iv_size = 0;
+
+	get_symcipher_iv_info(ctx_info, &host_ctx_iv, &sep_ctx_iv, &iv_size);
+	if (iv_size > 0 && host_ctx_iv != NULL && sep_ctx_iv != NULL) {
+		memcpy(sep_ctx_iv, iv, iv_size);
+		memcpy(host_ctx_iv, iv, iv_size);
+	}
+
+	return (iv_size > 0) ? 0 : -EINVAL;
+}
+
+/**
+ * ctxmgr_get_symcipher_iv() - Return given cipher IV
+ * @ctx_info:	Context to query
+ * @iv_user:	The IV given by the user on last ctxmgr_set_symcipher_iv
+ * @iv_current:	The current IV state block
+ * @iv_size_p:	[I/O] The given buffers size and returns actual IV size
+ *
+ * Return given cipher IV - Original IV given by user and current state "IV"
+ * The given IV buffers must be large enough to accomodate the IVs
+ * Returns int 0 on success, -ENOMEM if given iv_size is too small
+ */
+int ctxmgr_get_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv_user, u8 *iv_current,
+			    u8 *iv_size_p)
+{
+	u8 *host_ctx_iv;
+	u8 *sep_ctx_iv;
+	unsigned long iv_size;
+	int rc = 0;
+
+	get_symcipher_iv_info(ctx_info, &host_ctx_iv, &sep_ctx_iv, &iv_size);
+	if (iv_size > 0) {
+		if (*iv_size_p < iv_size) {
+			rc = -ENOMEM;
+		} else {
+			if (iv_current != NULL && sep_ctx_iv != NULL)
+				memcpy(iv_current, sep_ctx_iv, iv_size);
+			if (iv_user != NULL && host_ctx_iv != NULL)
+				memcpy(iv_user, host_ctx_iv, iv_size);
+		}
+	}
+
+	/* Always return IV size for informational purposes */
+	*iv_size_p = iv_size;
+	return rc;
+}
+
+/**
+ * ctxmgr_set_symcipher_direction() - Set the operation direction for given
+ *					symcipher context
+ * @ctx_info:		Context to update
+ * @dxdi_direction:	Requested cipher direction
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_set_symcipher_direction(struct client_crypto_ctx_info *ctx_info,
+				   enum dxdi_cipher_direction dxdi_direction)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	enum sep_crypto_direction sep_direction;
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (dxdi_direction == DXDI_CDIR_ENC) {
+		sep_direction = SEP_CRYPTO_DIRECTION_ENCRYPT;
+	} else if (dxdi_direction == DXDI_CDIR_DEC) {
+		sep_direction = SEP_CRYPTO_DIRECTION_DECRYPT;
+	} else {
+		pr_err("Invalid direction=%d\n", dxdi_direction);
+		return -EINVAL;
+	}
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		aes_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		des_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		c2_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		pr_err("Invoked for RC4!\n");
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		pr_err("Invalid symcipher type %d\n",
+			    host_ctx_p->props.cipher_type);
+		return -EINVAL;
+	}
+
+	host_ctx_p->props.direction = dxdi_direction;
+
+	return 0;
+}
+
+/**
+ * ctxmgr_get_symcipher_direction() - Return the operation direction of given
+ *					symcipher context
+ * @ctx_info:	Context to query
+ *
+ * Returns enum dxdi_cipher_direction (<0 on error)
+ */
+enum dxdi_cipher_direction ctxmgr_get_symcipher_direction(struct
+							  client_crypto_ctx_info
+							  *ctx_info)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	enum sep_crypto_direction sep_direction;
+	enum dxdi_cipher_direction dxdi_direction;
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(aes_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(des_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(c2_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		pr_err("Invoked for RC4!\n");
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		pr_err("Invalid symcipher type %d\n",
+			    host_ctx_p->props.cipher_type);
+		return -EINVAL;
+	}
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (sep_direction == SEP_CRYPTO_DIRECTION_ENCRYPT) {
+		dxdi_direction = DXDI_CDIR_ENC;
+	} else if (sep_direction == SEP_CRYPTO_DIRECTION_DECRYPT) {
+		dxdi_direction = DXDI_CDIR_DEC;
+	} else {
+		pr_err("Invalid (sep) direction=%d\n", sep_direction);
+		dxdi_direction = -EINVAL;
+	}
+
+	return dxdi_direction;
+}
+
+/*
+ * RFC2451: Weak DES key check
+ * Returns: 1 (weak), 0 (not weak)
+ */
+/**
+ * is_weak_des_key() - Validate DES weak keys per RFC2451 (section 2.3)
+ * @key:
+ *
+ * @keylen: 8 or 24 bytes
+ *
+ *
+ * Validate DES weak keys per RFC2451 (section 2.3)
+ * (weak key validation based on DX_CRYPTO_DES.c of cc52_crypto)
+ * Returns bool "true" for weak keys
+ */
+static bool is_weak_des_key(const u8 *key, unsigned int keylen)
+{
+	u32 n, w;
+	u64 *k1, *k2, *k3;	/* For 3DES/2DES checks */
+
+	if (keylen > 8) {/* For 3DES/2DES only validate no key repeatition */
+		k1 = (u64 *)key;
+		k2 = k1 + 1;
+		if (*k1 == *k2)
+			return true;
+		if (keylen > 16) {	/* 3DES */
+			k3 = k2 + 1;
+			if (*k2 == *k3)
+				return true;
+		}
+	}
+
+	/* Only for single-DES, check weak keys */
+	n = des_key_parity[key[0]];
+	n <<= 4;
+	n |= des_key_parity[key[1]];
+	n <<= 4;
+	n |= des_key_parity[key[2]];
+	n <<= 4;
+	n |= des_key_parity[key[3]];
+	n <<= 4;
+	n |= des_key_parity[key[4]];
+	n <<= 4;
+	n |= des_key_parity[key[5]];
+	n <<= 4;
+	n |= des_key_parity[key[6]];
+	n <<= 4;
+	n |= des_key_parity[key[7]];
+	w = 0x88888888L;
+
+	/* 1 in 10^10 keys passes this test */
+	if (!((n - (w >> 3)) & w)) {
+		switch (n) {
+		case 0x11111111:
+		case 0x13131212:
+		case 0x14141515:
+		case 0x16161616:
+		case 0x31312121:
+		case 0x33332222:
+		case 0x34342525:
+		case 0x36362626:
+		case 0x41415151:
+		case 0x43435252:
+		case 0x44445555:
+		case 0x46465656:
+		case 0x61616161:
+		case 0x63636262:
+		case 0x64646565:
+		case 0x66666666:
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * ctxmgr_set_symcipher_key() - Set a symcipher context key
+ * @ctx_info:	Context to update
+ * @key_size:	Size of key in bytes
+ * @key:	New key pointer
+ *
+ * Set a symcipher context key
+ * After invoking this function the context should be reinitialized by SeP
+ * (set its state to "partial init" if not done in this sequence)
+ * Returns int 0 on success, -EINVAL Invalid key len, -EPERM Forbidden/weak key
+ */
+int ctxmgr_set_symcipher_key(struct client_crypto_ctx_info *ctx_info,
+			     u8 key_size, const u8 *key)
+{
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_cipher *des_ctx_p = NULL;
+	struct sep_ctx_c2 *c2_ctx_p = NULL;
+	struct dxdi_sym_cipher_props *props =
+	    &((struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr)->props;
+
+	/* Update the respective sep context fields if valid */
+	switch (props->cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		/* Validate key size before copying */
+		if (props->cipher_type == DXDI_SYMCIPHER_AES_XTS) {
+			/* XTS has two keys of either 128b or 256b */
+			if ((key_size != 32) && (key_size != 64)) {
+				pr_err(
+					"Invalid key size for AES-XTS (%u bits)\n",
+					key_size * 8);
+				return -EINVAL;
+			}
+			/* Divide by two (we have two keys of the same size) */
+			key_size >>= 1;
+			/* copy second half of the double-key as XEX-key */
+			memcpy(aes_ctx_p->xex_key, key + key_size, key_size);
+			/* Always clear data_unit_size on key change
+			   (Assumes new data source with possibly different
+			   data unit size). The actual data unit size
+			   would be concluded on the next data processing. */
+			props->alg_specific.aes_xts.data_unit_size = 0;
+			aes_ctx_p->data_unit_size = cpu_to_le32(0);
+		} else {	/* AES engine support 128b/192b/256b keys */
+			if ((key_size != 16) &&
+			    (key_size != 24) && (key_size != 32)) {
+				pr_err(
+					"Invalid key size for AES (%u bits)\n",
+					key_size * 8);
+				return -EINVAL;
+			}
+		}
+		memcpy(aes_ctx_p->key, key, key_size);
+		aes_ctx_p->key_size = cpu_to_le32(key_size);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		if (is_weak_des_key(key, key_size)) {
+			pr_info("Weak DES key.\n");
+			return -EPERM;
+		}
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		des_ctx_p->key_size = cpu_to_le32(key_size);
+		if ((key_size != 8) && (key_size != 16) && (key_size != 24)) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for DES (%u bits)\n",
+				    key_size * 8);
+			return -EINVAL;
+		}
+		memcpy(des_ctx_p->key, key, key_size);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		c2_ctx_p->key_size = cpu_to_le32(key_size);
+		if (key_size != SEP_C2_KEY_SIZE_MAX) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for C2 (%u bits)\n",
+				    key_size * 8);
+			return -EINVAL;
+		}
+		memcpy(c2_ctx_p->key, key, key_size);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		return -EINVAL;
+	}
+
+	/* If reached here then all validations passed */
+	/* Update in props of host context */
+	memcpy(props->key, key, key_size);
+	props->key_size = key_size;
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_symcipher_ctx_no_props() - Initialize symcipher context when full
+ *					props are not available, yet.
+ * @ctx_info:		Context to init.
+ * @cipher_type:	Cipher type for context
+ *
+ * Initialize symcipher context when full props are not available, yet.
+ * Later set_key and set_iv may update the context.
+ * Returns int 0 on success
+ */
+int ctxmgr_init_symcipher_ctx_no_props(struct client_crypto_ctx_info *ctx_info,
+				       enum dxdi_sym_cipher_type cipher_type)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+
+	/* Initialize host context part with just cipher type */
+	host_ctx_p->alg_class = ALG_CLASS_SYM_CIPHER;
+	memset(&(host_ctx_p->props), 0, sizeof(struct dxdi_sym_cipher_props));
+	host_ctx_p->props.cipher_type = cipher_type;
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(ctx_info->sep_ctx_kptr, 0, sizeof(struct sep_ctx_cache_entry));
+
+	/* with only cipher_type we can initialize just the alg/mode fields */
+	return set_sep_ctx_alg_mode(ctx_info, cipher_type);
+}
+
+/**
+ * ctxmgr_init_symcipher_ctx() - Initialize symCipher context based on given
+ *				properties.
+ * @ctx_info:	 User context mapping info.
+ * @props:	 The initialization properties
+ * @postpone_init:	Return "true" if INIT on SeP should be postponed
+ *			to first processing (e.g, in AES-XTS)
+ * @error_info:	Error info
+ *
+ * Returns 0 on success, otherwise on error
+ */
+int ctxmgr_init_symcipher_ctx(struct client_crypto_ctx_info *ctx_info,
+			      struct dxdi_sym_cipher_props *props,
+			      bool *postpone_init, u32 *error_info)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_cipher *des_ctx_p = NULL;
+	int rc;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*postpone_init = false;	/* default */
+	*error_info = DXDI_ERROR_NULL;	/* assume no error */
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_SYM_CIPHER;
+	memcpy(&(host_ctx_p->props), props,
+	       sizeof(struct dxdi_sym_cipher_props));
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	rc = set_sep_ctx_alg_mode(ctx_info, props->cipher_type);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_MODE;
+		return rc;
+	}
+
+	rc = ctxmgr_set_symcipher_direction(ctx_info, props->direction);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_DIRECTION;
+		return rc;
+	}
+
+	rc = ctxmgr_set_symcipher_key(ctx_info, props->key_size, props->key);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+		return rc;
+	}
+
+	/* mode specific initializations */
+	switch (props->cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_cbc.iv,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_ctr.cntr,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state,
+		       props->alg_specific.aes_xts.init_tweak, SEP_AES_IV_SIZE);
+		aes_ctx_p->data_unit_size =
+		    cpu_to_le32(props->alg_specific.aes_xts.data_unit_size);
+		/* update in context because was cleared by
+		   ctxmgr_set_symcipher_key */
+		host_ctx_p->props.alg_specific.aes_xts.data_unit_size =
+		    props->alg_specific.aes_xts.data_unit_size;
+		if (props->alg_specific.aes_xts.data_unit_size == 0)
+			*postpone_init = true;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(des_ctx_p->block_state, props->alg_specific.des_cbc.iv,
+		       SEP_DES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_C2_CBC:
+		/*C2 reset interval is not supported, yet */
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	case DXDI_SYMCIPHER_RC4:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		break;	/* No specific initializations for other modes */
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_auth_enc_ctx() - Initialize Authenticated Encryption class
+ *				context
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_auth_enc_ctx(struct client_crypto_ctx_info *ctx_info,
+			     struct dxdi_auth_enc_props *props,
+			     u32 *error_info)
+{
+	struct host_crypto_ctx_auth_enc *host_ctx_p =
+	    (struct host_crypto_ctx_auth_enc *)ctx_info->ctx_kptr;
+	struct sep_ctx_aead *aead_ctx_p = NULL;
+	enum sep_crypto_direction direction;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_AUTH_ENC;
+	host_ctx_p->is_adata_processed = 0;
+	memcpy(&(host_ctx_p->props), props, sizeof(struct dxdi_auth_enc_props));
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (props->direction == DXDI_CDIR_ENC) {
+		direction = SEP_CRYPTO_DIRECTION_ENCRYPT;
+	} else if (props->direction == DXDI_CDIR_DEC) {
+		direction = SEP_CRYPTO_DIRECTION_DECRYPT;
+	} else {
+		pr_err("Invalid direction=%d\n", props->direction);
+		*error_info = DXDI_ERROR_INVAL_DIRECTION;
+		return -EINVAL;
+	}
+
+	/* initialize SEP context */
+	aead_ctx_p = (struct sep_ctx_aead *)&(host_ctx_p->sep_ctx);
+	aead_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AEAD);
+	aead_ctx_p->direction = cpu_to_le32(direction);
+	aead_ctx_p->header_size = cpu_to_le32(props->adata_size);
+	if (props->nonce_size > SEP_AES_BLOCK_SIZE) {
+		pr_err("Invalid nonce size=%u\n", aead_ctx_p->nonce_size);
+		*error_info = DXDI_ERROR_INVAL_NONCE_SIZE;
+		return -EINVAL;
+	}
+	aead_ctx_p->nonce_size = cpu_to_le32(props->nonce_size);
+	memcpy(aead_ctx_p->nonce, props->nonce, props->nonce_size);
+	if (props->tag_size > SEP_AES_BLOCK_SIZE) {
+		pr_err("Invalid tag_size size=%u\n", aead_ctx_p->tag_size);
+		*error_info = DXDI_ERROR_INVAL_TAG_SIZE;
+		return -EINVAL;
+	}
+	aead_ctx_p->tag_size = cpu_to_le32(props->tag_size);
+	aead_ctx_p->text_size = cpu_to_le32(props->text_size);
+	if ((props->key_size != 16) &&
+	    (props->key_size != 24) && (props->key_size != 32)) {
+		pr_err("Invalid key size for AEAD (%u bits)\n",
+			    props->key_size * 8);
+		*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+		return -EINVAL;
+	}
+	memcpy(aead_ctx_p->key, props->key, props->key_size);
+	aead_ctx_p->key_size = cpu_to_le32(props->key_size);
+
+	/* mode specific initializations */
+	switch (props->ae_type) {
+	case DXDI_AUTHENC_AES_CCM:
+		aead_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CCM);
+		break;
+	case DXDI_AUTHENC_AES_GCM:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;	/* Not supported */
+	default:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_mac_ctx() - Initialize context for MAC algorithm
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_mac_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct dxdi_mac_props *props, u32 *error_info)
+{
+	struct host_crypto_ctx_mac *host_ctx_p =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_hmac *hmac_ctx_p = NULL;
+	enum dxdi_hash_type hash_type;
+	enum sep_hash_mode hash_mode;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_MAC;
+	host_ctx_p->client_data_size = 0;
+	memcpy(&(host_ctx_p->props), props, sizeof(struct dxdi_mac_props));
+	host_ctx_p->hmac_tail.size = 0;
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	switch (props->mac_type) {
+	case DXDI_MAC_HMAC:
+		hmac_ctx_p = (struct sep_ctx_hmac *)&(host_ctx_p->sep_ctx);
+		hmac_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_HMAC);
+		hash_type = props->alg_specific.hmac.hash_type;
+		hash_mode = get_sep_hash_mode(hash_type);
+		if (hash_mode == SEP_HASH_NULL) {
+			*error_info = DXDI_ERROR_INVAL_MODE;
+			return -EINVAL;
+		}
+		if (get_hash_block_size(hash_type) > SEP_HMAC_BLOCK_SIZE_MAX) {
+			pr_err(
+				    "Given hash type (%d) is not supported for HMAC\n",
+				    hash_type);
+			*error_info = DXDI_ERROR_UNSUP;
+			return -EINVAL;
+		}
+		hmac_ctx_p->mode = cpu_to_le32(hash_mode);
+		hmac_ctx_p->k0_size = cpu_to_le32(props->key_size);
+		if (props->key_size > SEP_HMAC_BLOCK_SIZE_MAX) {
+			pr_err("Invalid key size %u bits\n",
+				    props->key_size * 8);
+			*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+			return -EINVAL;
+		}
+		memcpy(&(hmac_ctx_p->k0), props->key, props->key_size);
+		break;
+
+	case DXDI_MAC_AES_MAC:
+	case DXDI_MAC_AES_CMAC:
+	case DXDI_MAC_AES_XCBC_MAC:
+		aes_ctx_p = (struct sep_ctx_cipher *)&(host_ctx_p->sep_ctx);
+		aes_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AES);
+		aes_ctx_p->direction =
+		    cpu_to_le32(SEP_CRYPTO_DIRECTION_ENCRYPT);
+		aes_ctx_p->key_size = cpu_to_le32(props->key_size);
+		if ((props->key_size > SEP_AES_KEY_SIZE_MAX) ||
+		    ((props->mac_type == DXDI_MAC_AES_XCBC_MAC) &&
+		     (props->key_size != SEP_AES_128_BIT_KEY_SIZE))) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for MAC (%u bits)\n",
+				    props->key_size * 8);
+			*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+			return -EINVAL;
+		}
+		memcpy(aes_ctx_p->key, props->key, props->key_size);
+		break;
+
+	default:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -EINVAL;
+	}
+
+	/* AES mode specific initializations */
+	switch (props->mac_type) {
+	case DXDI_MAC_AES_MAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC_MAC);
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_mac.iv,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_MAC_AES_CMAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CMAC);
+		break;
+	case DXDI_MAC_AES_XCBC_MAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_XCBC_MAC);
+		break;
+	default:
+		/* Invalid type was already handled in previous "switch" */
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_hash_ctx() - Initialize hash context
+ * @ctx_info:	 User context mapping info.
+ * @hash_type:	 Assigned hash type
+ * @error_info:	Error info
+ *
+ * Returns int 0 on success, -EINVAL, -ENOSYS
+ */
+int ctxmgr_init_hash_ctx(struct client_crypto_ctx_info *ctx_info,
+			 enum dxdi_hash_type hash_type, u32 *error_info)
+{
+	struct host_crypto_ctx_hash *host_ctx_p =
+	    (struct host_crypto_ctx_hash *)ctx_info->ctx_kptr;
+	struct sep_ctx_hash *sep_ctx_p;
+	enum sep_hash_mode hash_mode;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Limit to hash types supported by SeP */
+	if ((hash_type != DXDI_HASH_SHA1) &&
+	    (hash_type != DXDI_HASH_SHA224) &&
+	    (hash_type != DXDI_HASH_SHA256)) {
+		pr_err("Unsupported hash type %d\n", hash_type);
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	}
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_HASH;
+	host_ctx_p->hash_type = hash_type;
+	host_ctx_p->hash_tail.size = 0;
+	host_ctx_p->is_encrypted = false;
+
+	/* Initialize SeP/FW context part */
+	sep_ctx_p = (struct sep_ctx_hash *)&(host_ctx_p->sep_ctx);
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(sep_ctx_p, 0, sizeof(struct sep_ctx_hash));
+	sep_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_HASH);
+	hash_mode = get_sep_hash_mode(hash_type);
+	if (hash_mode == SEP_HASH_NULL) {
+		*error_info = DXDI_ERROR_INVAL_MODE;
+		return -EINVAL;
+	}
+	sep_ctx_p->mode = cpu_to_le32(hash_mode);
+
+	return 0;
+}
+
+/**
+ * ctxmgr_set_sep_cache_idx() - Set the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ * @sep_cache_idx:	 The allocated index in SeP cache for this context
+ *
+ * Returns void
+ */
+void ctxmgr_set_sep_cache_idx(struct client_crypto_ctx_info *ctx_info,
+			      int sep_cache_idx)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	ctx_info->sep_cache_idx = sep_cache_idx;
+}
+
+/**
+ * ctxmgr_get_sep_cache_idx() - Get the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ *
+ * Returns The allocated index in SeP cache for this context
+ */
+int ctxmgr_get_sep_cache_idx(struct client_crypto_ctx_info *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->sep_cache_idx;
+}
+
+#ifdef DEBUG
+static void dump_sep_aes_ctx(struct sep_ctx_cipher *ctx_p)
+{
+	pr_debug("Alg.=AES , Mode=%d , Direction=%d , Key size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("block_state",
+			ctx_p->block_state, sizeof(ctx_p->block_state));
+	if (le32_to_cpu(ctx_p->mode) == SEP_CIPHER_XTS) {
+		pr_debug("data_unit_size=%u\n",
+			      le32_to_cpu(ctx_p->data_unit_size));
+		dump_byte_array("XEX-Key",
+				ctx_p->xex_key, le32_to_cpu(ctx_p->key_size));
+	}
+}
+
+static void dump_sep_aead_ctx(struct sep_ctx_aead *ctx_p)
+{
+	pr_debug(
+		      "Alg.=AEAD, Mode=%d, Direction=%d, Key size=%d, header size=%d, nonce size=%d, tag size=%d, text size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size),
+		      le32_to_cpu(ctx_p->header_size),
+		      le32_to_cpu(ctx_p->nonce_size),
+		      le32_to_cpu(ctx_p->tag_size),
+		      le32_to_cpu(ctx_p->text_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("block_state",
+			ctx_p->block_state, sizeof(ctx_p->block_state));
+	dump_byte_array("mac_state",
+			ctx_p->mac_state, sizeof(ctx_p->mac_state));
+	dump_byte_array("nonce", ctx_p->nonce, le32_to_cpu(ctx_p->nonce_size));
+}
+
+static void dump_sep_des_ctx(struct sep_ctx_cipher *ctx_p)
+{
+	pr_debug("Alg.=DES, Mode=%d, Direction=%d, Key size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("IV", ctx_p->block_state, SEP_DES_IV_SIZE);
+}
+
+static void dump_sep_c2_ctx(struct sep_ctx_c2 *ctx_p)
+{
+	pr_debug("Alg.=C2, Mode=%d, Direction=%d, KeySz=%d, ResetInt.=%d",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size),
+		      0 /* reset_interval (CBC) not implemented, yet */);
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+}
+
+static const char *hash_mode_str(enum sep_hash_mode mode)
+{
+	switch (mode) {
+	case SEP_HASH_SHA1:
+		return "SHA1";
+	case SEP_HASH_SHA224:
+		return "SHA224";
+	case SEP_HASH_SHA256:
+		return "SHA256";
+	case SEP_HASH_SHA384:
+		return "SHA384";
+	case SEP_HASH_SHA512:
+		return "SHA512";
+	default:
+		return "(unknown)";
+	}
+}
+
+static void dump_sep_hash_ctx(struct sep_ctx_hash *ctx_p)
+{
+	pr_debug("Alg.=Hash , Mode=%s\n",
+		      hash_mode_str(le32_to_cpu(ctx_p->mode)));
+}
+
+static void dump_sep_hmac_ctx(struct sep_ctx_hmac *ctx_p)
+{
+	/* Alg./Mode of HMAC is identical to HASH */
+	pr_debug("Alg.=HMAC , Mode=%s\n",
+		      hash_mode_str(le32_to_cpu(ctx_p->mode)));
+	pr_debug("K0 size = %u B\n", le32_to_cpu(ctx_p->k0_size));
+	dump_byte_array("K0", ctx_p->k0, le32_to_cpu(ctx_p->k0_size));
+}
+
+/**
+ * ctxmgr_dump_sep_ctx() - Dump SeP context data
+ * @ctx_info:
+ *
+ */
+void ctxmgr_dump_sep_ctx(const struct client_crypto_ctx_info *ctx_info)
+{
+	struct sep_ctx_cache_entry *sep_ctx_p = ctx_info->sep_ctx_kptr;
+	enum sep_crypto_alg alg;
+	int ctx_idx;
+
+	/* For combined mode call recursively for each sub-context */
+	if (ctx_info->ctx_kptr->alg_class == ALG_CLASS_COMBINED) {
+		for (ctx_idx = 0; (ctx_info[ctx_idx].ctx_kptr != NULL) &&
+		     (ctx_idx < DXDI_COMBINED_NODES_MAX); ctx_idx++) {
+			ctxmgr_dump_sep_ctx(&ctx_info[ctx_idx]);
+		}
+		return;
+	}
+
+	alg = (enum sep_crypto_alg)le32_to_cpu(sep_ctx_p->alg);
+
+	pr_debug("SeP crypto context at %p: Algorithm=%d\n",
+		      sep_ctx_p, alg);
+	switch (alg) {
+	case SEP_CRYPTO_ALG_NULL:
+		break;		/* Nothing to dump */
+	case SEP_CRYPTO_ALG_AES:
+		dump_sep_aes_ctx((struct sep_ctx_cipher *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_AEAD:
+		dump_sep_aead_ctx((struct sep_ctx_aead *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_DES:
+		dump_sep_des_ctx((struct sep_ctx_cipher *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_C2:
+		dump_sep_c2_ctx((struct sep_ctx_c2 *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_HASH:
+		dump_sep_hash_ctx((struct sep_ctx_hash *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_HMAC:
+		dump_sep_hmac_ctx((struct sep_ctx_hmac *)sep_ctx_p);
+		break;
+	default:
+		pr_debug("(Unsupported algorithm dump - %d)\n", alg);
+	}
+
+}
+#endif
+
+/**
+ * ctxmgr_sync_sep_ctx() - Sync. SeP context to device (flush from cache...)
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ *
+ * Returns void
+ */
+void ctxmgr_sync_sep_ctx(const struct client_crypto_ctx_info *ctx_info,
+			 struct device *mydev)
+{
+	size_t embedded_sep_ctx_offset =
+	    get_sep_ctx_offset(ctx_info->ctx_kptr->alg_class);
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p =
+	    (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) + embedded_sep_ctx_offset);
+
+#ifdef DEBUG
+	if (ctx_info->sep_ctx_dma_addr == 0) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	if (embedded_sep_ctx_offset == 0)
+		pr_err("Invalid alg. class for algorithm\n");
+#endif
+
+	/* Only the embedded SeP context requires sync (it is in user memory)
+	   Otherwise, it is a cache coherent DMA buffer.                      */
+	if (ctx_info->sep_ctx_kptr == embedded_sep_ctx_p) {
+		dma_sync_single_for_device(mydev, ctx_info->sep_ctx_dma_addr,
+					   SEP_CTX_SIZE, DMA_BIDIRECTIONAL);
+	}
+}
+
+/**
+ * ctxmgr_get_sep_ctx_dma_addr() - Return DMA address of SeP (FW) area of
+ *					the context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns DMA address of SeP (FW) area of the context
+ */
+dma_addr_t ctxmgr_get_sep_ctx_dma_addr(const struct client_crypto_ctx_info
+				       *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->sep_ctx_dma_addr == 0) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->sep_ctx_dma_addr;
+}
+
+/**
+ * ctxmgr_sep_cache_create() - Create a SeP (FW) cache manager of given num.
+ *				of entries
+ * @num_of_entries:	 = Number of entries available in cache
+ *
+ * Returns void * handle (NULL on failure)
+ */
+void *ctxmgr_sep_cache_create(int num_of_entries)
+{
+	struct sep_ctx_cache *new_cache;
+	int i;
+
+	/* Allocate the sep_ctx_cache + additional entries beyond the one
+	 * that is included in the sep_ctx_cache structure */
+	new_cache = kmalloc(sizeof(struct sep_ctx_cache) +
+			    (num_of_entries - 1) *
+			    sizeof(struct ctxmgr_cache_entry), GFP_KERNEL);
+	if (new_cache == NULL) {
+		pr_err("Failed allocating SeP cache of %d entries\n",
+			    num_of_entries);
+		return NULL;
+	}
+
+	/* Initialize */
+	for (i = 0; i < num_of_entries; i++)
+		new_cache->entries[i].ctx_id.addr = CTX_ID_INVALID;
+
+	new_cache->cache_size = num_of_entries;
+
+	new_cache->lru_clk = 0;
+
+	return (void *)new_cache;
+}
+
+/**
+ * ctxmgr_sep_cache_destroy() - Destory SeP (FW) cache manager object
+ * @sep_cache:	 = The cache object
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_destroy(void *sep_cache)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+
+	kfree(this_cache);
+}
+
+/**
+ * ctxmgr_sep_cache_get_size() - Get cache size (entries count)
+ * @sep_cache:
+ *
+ * Returns int Number of cache entries available
+ */
+int ctxmgr_sep_cache_get_size(void *sep_cache)
+{
+	struct sep_ctx_cache *this_cache = sep_cache;
+
+	return this_cache->cache_size;
+}
+
+/**
+ * ctxmgr_sep_cache_alloc() - Allocate a cache entry of given SeP context cache
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The host crypto. context ID
+ * @load_required_p:	Pointed int is set to !0 if a cache load is required
+ *			(i.e., if item already loaded in cache it would be 0)
+ *
+ * Returns cache index
+ */
+int ctxmgr_sep_cache_alloc(void *sep_cache,
+			   struct crypto_ctx_uid ctx_id, int *load_required_p)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+	int i;
+	int chosen_idx = 0;	/* first candidate... */
+
+	*load_required_p = 1;	/* until found assume a load is required */
+
+	/* First search for given ID or free/older entry  */
+	for (i = 0; i < this_cache->cache_size; i++) {
+		if (this_cache->entries[i].ctx_id.addr == ctx_id.addr
+		    && this_cache->entries[i].ctx_id.cntr == ctx_id.cntr) {
+			/* if found */
+			chosen_idx = i;
+			*load_required_p = 0;
+			break;
+		}
+		/* else... if no free entry, replace candidate with invalid
+		   or older entry */
+		if (this_cache->entries[chosen_idx].ctx_id.addr
+			!= CTX_ID_INVALID) {
+			if ((this_cache->entries[i].ctx_id.addr
+				== CTX_ID_INVALID) ||
+			    (this_cache->entries[chosen_idx].lru_time >
+			     this_cache->entries[i].lru_time)) {
+				/* Found free OR older entry */
+				chosen_idx = i;
+			}
+		}
+	}
+
+	/* Record allocation + update LRU "timestamp" */
+	this_cache->entries[chosen_idx].ctx_id.addr = ctx_id.addr;
+	this_cache->entries[chosen_idx].ctx_id.cntr = ctx_id.cntr;
+	this_cache->entries[chosen_idx].lru_time = this_cache->lru_clk++;
+
+#ifdef DEBUG
+	if (this_cache->lru_clk == 0xFFFFFFFF) {
+		pr_err("Reached lru_clk limit!\n");
+		SEP_DRIVER_BUG();
+		/* If this limit is found to be a practical real life
+		   case, a few workarounds may be used:
+		   1. Use a larger (64b) lru_clk
+		   2. Invalidate the whole cache before wrapping to 0
+		   3. Ignore this case - old contexts would persist over newer
+		   until they are all FINALIZEd and invalidated "manually".
+		   4. "shift down" existing timestamps so the lowest would be 0
+		   5. "pack" timestamps to be 0,1,2,... based on exisitng order
+		   and set lru_clk to the largest (which is the num. of
+		   entries)
+		 */
+	}
+#endif
+
+	return chosen_idx;
+}
+
+/**
+ * ctxmgr_sep_cache_invalidate() - Invalidate cache entry for given context ID
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The host crypto. context ID
+ * @id_mask:	 A bit mask to be used when comparing the ID
+ *                (to be used for a set of entries from the same client)
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_invalidate(void *sep_cache,
+				 struct crypto_ctx_uid ctx_id,
+				 u64 id_mask)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+	int i;
+
+	/* Search for given ID */
+	for (i = 0; i < this_cache->cache_size; i++) {
+		if ((this_cache->entries[i].ctx_id.addr) == ctx_id.addr) {
+			/* When invalidating single, check also counter */
+			if (id_mask == CRYPTO_CTX_ID_SINGLE_MASK
+			    && this_cache->entries[i].ctx_id.cntr
+			       != ctx_id.cntr)
+				continue;
+			this_cache->entries[i].ctx_id.addr = CTX_ID_INVALID;
+		}
+	}
+
+}
diff --git a/drivers/staging/sep54/crypto_ctx_mgr.h b/drivers/staging/sep54/crypto_ctx_mgr.h
new file mode 100644
index 0000000..a65eaf2
--- /dev/null
+++ b/drivers/staging/sep54/crypto_ctx_mgr.h
@@ -0,0 +1,694 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _CRYPTO_CTX_MGR_H_
+#define _CRYPTO_CTX_MGR_H_
+
+#include "sep_ctx.h"
+#include "dx_driver_abi.h"
+#include "lli_mgr.h"
+
+/* The largest hash block size is for SHA512 - 1024 bits */
+#define HASH_BLK_SIZE_MAX (1024>>3)	/*octets */
+
+/* Unique ID for a user context */
+/* This value is made unique by concatenating session ptr (in kernel)
+   with global counter incremented on each INIT phase                */
+#define CTX_ID_INVALID ((u64)0)
+
+enum host_ctx_state {
+	CTX_STATE_UNINITIALIZED = 0,
+	/* When a context is unitialized is can be any "garbage" since
+	   the context buffer is given from the user... */
+	CTX_STATE_INITIALIZED = 0x10000001,
+	/* INITIALIZED = Initialized */
+	CTX_STATE_PARTIAL_INIT = 0x10101011,
+	/* PARTIAL_INIT = Init. was done only on host. INIT on SeP was postponed
+	   - Requires INIT on next SeP operations. */
+};
+
+struct crypto_ctx_uid {
+	u64 addr;
+	u32 cntr;
+};
+
+/* Algorithm family/class enumeration */
+enum crypto_alg_class {
+	ALG_CLASS_NONE = 0,
+	ALG_CLASS_SYM_CIPHER,
+	ALG_CLASS_AUTH_ENC,
+	ALG_CLASS_MAC,
+	ALG_CLASS_HASH,
+	ALG_CLASS_COMBINED,
+	ALG_CLASS_MAX = ALG_CLASS_HASH
+};
+
+/* The common fields at start of a user context strcuture */
+#define HOST_CTX_COMMON						\
+		struct crypto_ctx_uid uid;					\
+		/* To hold CTX_VALID_SIG when initialized */	\
+		enum host_ctx_state state;			\
+		/*determine the context specification*/		\
+		enum crypto_alg_class alg_class;		\
+		/* Cast the whole struct to matching user_ctx_* struct */ \
+	/* When is_encrypted==true the props are not initialized and */\
+	/* the contained sep_ctx is encrypted (when created in SeP)  */\
+	/* Algorithm properties are encrypted */		\
+		bool is_encrypted; \
+		u32 sess_id; \
+		struct sep_client_ctx *sctx
+
+/* SeP context segment of a context */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/* Allocate cache line bytes before/after sep context to assure
+   its cache line does not enter the cache during its mapping to SeP */
+#define SEP_CTX_SEGMENT                                                        \
+	u8 reserved_before[L1_CACHE_BYTES];			       \
+	struct sep_ctx_cache_entry sep_ctx;                                    \
+	u8 reserved_after[L1_CACHE_BYTES]
+#else
+/* Cache is coherent - no need for "protection" margins */
+#define SEP_CTX_SEGMENT \
+	struct sep_ctx_cache_entry sep_ctx
+#endif
+
+/* Generic host context*/
+struct host_crypto_ctx {
+	HOST_CTX_COMMON;
+};
+
+/* user_ctx specification for symettric ciphers */
+struct host_crypto_ctx_sym_cipher {
+	HOST_CTX_COMMON;
+	struct dxdi_sym_cipher_props props;
+	 SEP_CTX_SEGMENT;
+};
+
+/* user_ctx specification for authenticated encryption */
+struct host_crypto_ctx_auth_enc {
+	HOST_CTX_COMMON;
+	bool is_adata_processed;/* flag indicates if adata was processed */
+	struct dxdi_auth_enc_props props;
+	 SEP_CTX_SEGMENT;
+};
+
+/* Host data for hash block remainders */
+struct hash_block_remainder {
+	u16 size;		/* Octets available in data_blk_tail */
+	u8 data[HASH_BLK_SIZE_MAX] __aligned(8);
+	/* data_blk_tail holds remainder of user data because the HW requires
+	   integral hash blocks but for last data block.
+	   We take it aligned to 8 in order to optimize HW access to it via
+	   the 64bit AXI bus */
+};
+
+/* user_ctx specification for MAC algorithms */
+struct host_crypto_ctx_mac {
+	HOST_CTX_COMMON;
+	u64 client_data_size;	/* Sum. up the data processed so far */
+	struct dxdi_mac_props props;
+	struct hash_block_remainder hmac_tail;
+	 SEP_CTX_SEGMENT;
+};
+
+/* user_ctx specification for Hash algorithms */
+struct host_crypto_ctx_hash {
+	HOST_CTX_COMMON;
+	enum dxdi_hash_type hash_type;
+	struct hash_block_remainder hash_tail;
+	 SEP_CTX_SEGMENT;
+};
+
+/**
+ * struct client_crypto_ctx_info - Meta data on the client application crypto
+ *					context buffer and its mapping
+ * @dev:	Device context of the context (DMA) mapping.
+ * @user_ptr:	address of current context in user space (if no kernel op.)
+ * @ctx_page:	Mapped user page where user_ptr is located
+ * @ctx_kptr:	Mapping context to kernel VA
+ * @sep_ctx_kptr:	Kernel VA of SeP context portion of the host context
+ *			(for async. operations, this may be outside of host
+ *			 context)
+ * @sep_ctx_dma_addr:	DMA address of SeP context
+ * @hash_tail_dma_addr:	DMA of host_ctx_hash:data_blk_tail
+ * @sep_cache_idx:	if >=0, saves the allocated sep cache entry index
+ */
+struct client_crypto_ctx_info {
+	struct device *dev;
+	u32 __user *user_ptr;	/* */
+	struct page *ctx_page;
+	struct host_crypto_ctx *ctx_kptr;
+	struct sep_ctx_cache_entry *sep_ctx_kptr;
+	dma_addr_t sep_ctx_dma_addr;
+	dma_addr_t hash_tail_dma_addr;
+	int sep_cache_idx;
+
+	int sess_id;
+	struct sep_client_ctx *sctx;
+};
+/* Macro to initialize the context info structure */
+#define USER_CTX_INFO_INIT(ctx_info_p)	\
+do {					\
+	memset((ctx_info_p), 0, sizeof(struct client_crypto_ctx_info)); \
+	(ctx_info_p)->sep_cache_idx = -1; /* 0 is a valid entry idx */ \
+} while (0)
+
+#define SEP_CTX_CACHE_NULL_HANDLE NULL
+
+/**
+ * ctxmgr_get_ctx_size() - Get host context size for given algorithm class
+ * @alg_class:	 Queries algorithm class
+ *
+ * Returns size_t Size in bytes of host context
+ */
+size_t ctxmgr_get_ctx_size(enum crypto_alg_class alg_class);
+
+/**
+ * ctxmgr_map_user_ctx() - Map given user context to kernel space + DMA
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_ctx is used to verify mapped buffer size.
+ * @user_ctx_ptr: Pointer to user space context
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_user_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct device *mydev,
+			enum crypto_alg_class alg_class,
+			u32 __user *user_ctx_ptr);
+
+/**
+ * ctxmgr_unmap_user_ctx() - Unmap given currently mapped user context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_user_ctx(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_map_kernel_ctx() - Map given kernel context + clone SeP context into
+ *				Privately allocated DMA buffer
+ *				(required for async. ops. on the same context)
+ * @ctx_info:	Client crypto context info structure
+ * @mydev:	Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @kernel_ctx_p:	Pointer to kernel space crypto context
+ * @sep_ctx_p:	Pointer to (private) SeP context. If !NULL the embedded sep
+ *		context is copied into this buffer.
+ *		Set to NULL to use the one embedded in host_crypto_ctx.
+ * @sep_ctx_dma_addr:	DMA address of private SeP context (if sep_ctx_p!=NULL)
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_kernel_ctx(struct client_crypto_ctx_info *ctx_info,
+			  struct device *mydev,
+			  enum crypto_alg_class alg_class,
+			  struct host_crypto_ctx *kernel_ctx_p,
+			  struct sep_ctx_cache_entry *sep_ctx_p,
+			  dma_addr_t sep_ctx_dma_addr);
+
+/**
+ * ctxmgr_unmap_kernel_ctx() - Unmap given currently mapped kernel context
+ *				(was mapped with map_kernel_ctx)
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_kernel_ctx(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_map2dev_hash_tail() - Map hash data tail buffer in the host context
+ *				for DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_map2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+			     struct device *mydev);
+/**
+ * ctxmgr_unmap2dev_hash_tail() - Unmap hash data tail buffer from DMA tp device
+ * @ctx_info:
+ * @mydev:
+ *
+ */
+void ctxmgr_unmap2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+				struct device *mydev);
+
+/**
+ * ctxmgr_set_ctx_state() - Set context state
+ * @ctx_info:	 User context info structure
+ * @state:	    State to set in context
+ *
+ * Returns void
+ */
+void ctxmgr_set_ctx_state(struct client_crypto_ctx_info *ctx_info,
+			  const enum host_ctx_state state);
+
+/**
+ * ctxmgr_get_ctx_state() - Set context state
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current context state
+ */
+enum host_ctx_state ctxmgr_get_ctx_state(const struct client_crypto_ctx_info
+					 *ctx_info);
+
+/**
+ * ctxmgr_set_ctx_id() - Allocate unique ID for (initialized) user context
+ * @ctx_info:	 Client crypto context info structure
+ * @ctx_id:	 The unique ID allocated for given context
+ *
+ * Allocate unique ID for (initialized) user context
+ * (Assumes invoked within session mutex so no need for counter protection)
+ */
+void ctxmgr_set_ctx_id(struct client_crypto_ctx_info *ctx_info,
+		       const struct crypto_ctx_uid ctx_id);
+
+/**
+ * ctxmgr_get_ctx_id() - Return the unique ID for current user context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Allocated ID (or CTX_INVALID_ID if none)
+ */
+struct crypto_ctx_uid ctxmgr_get_ctx_id(struct client_crypto_ctx_info
+					*ctx_info);
+
+/**
+ * ctxmgr_get_session_id() - Return the session ID of given context ID
+ * @ctx_info:
+ *
+ * Return the session ID of given context ID
+ * This may be used to validate ID and verify that it was not tampered
+ * in a manner that can allow access to a session of another process
+ * Returns u32
+ */
+u64 ctxmgr_get_session_id(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_get_alg_class() - Get algorithm class of context
+ *				(set during _init_ of a context)
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current algorithm class of context
+ */
+enum crypto_alg_class ctxmgr_get_alg_class(const struct client_crypto_ctx_info
+					   *ctx_info);
+
+/**
+ * ctxmgr_get_crypto_blk_size() - Get the crypto-block length of given context
+ *					in octets
+ * @ctx_info:	 User context info structure
+ *
+ * Returns u32 Cypto-block size in bytes, 0 if invalid/unsupported alg.
+ */
+u32 ctxmgr_get_crypto_blk_size(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_is_valid_adata_size() - Validate additional/associated data for
+ * @ctx_info:
+ * @adata_size:
+ *
+ * Validate additional/associated data for
+ * auth/enc algs.
+ * Returns bool
+ */
+bool ctxmgr_is_valid_adata_size(struct client_crypto_ctx_info *ctx_info,
+				unsigned long adata_size);
+
+/**
+ * ctxmgr_is_valid_size() - Validate given data unit for given alg./mode
+ * @ctx_info:
+ * @data_unit_size:
+ * @is_finalize:
+ *
+ * Returns bool true is valid.
+ */
+bool ctxmgr_is_valid_size(struct client_crypto_ctx_info *ctx_info,
+				    unsigned long data_unit_size,
+				    bool is_finalize);
+
+/**
+ * ctxmgr_get_sym_cipher_type() - Returns the sym cipher specific type.
+ * @ctx_info:	 The context info object of the sym cipher alg.
+ *
+ * Returns enum dxdi_sym_cipher_type The sym cipher type.
+ */
+enum dxdi_sym_cipher_type ctxmgr_get_sym_cipher_type(const struct
+						     client_crypto_ctx_info
+						     *ctx_info);
+
+/**
+ * ctxmgr_get_mac_type() - Returns the mac specific type.
+ * @ctx_info:	 The context info object of the mac alg.
+ *
+ * Returns enum host_crypto_ctx_mac The mac type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/**
+ * ctxmgr_get_hash_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns enum dxdi_hash_type The hash type.
+ */
+enum dxdi_hash_type ctxmgr_get_hash_type(const struct client_crypto_ctx_info
+					 *ctx_info);
+
+/**
+ * ctxmgr_get_mac_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns enum dxdi_mac_type The hash type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/**
+ * ctxmgr_save_hash_blk_remainder() - Save hash block tail data in given
+ *	context. The data is taken from the save4next chunk of given client
+ *	buffer.
+ * @ctx_info:	 Client context info structure (HASH's or HMAC's)
+ * @client_dma_buf_p:	A client DMA buffer object. Data is taken from the
+ *			save4next chunk of this buffer.
+ * @append_data:	When true, given data is appended to existing
+ *
+ * Returns 0 on success
+ */
+int ctxmgr_save_hash_blk_remainder(struct client_crypto_ctx_info *ctx_info,
+				   struct client_dma_buffer *client_dma_buf_p,
+				   bool append_data);
+
+/**
+ * ctxmgr_get_hash_blk_remainder_buf() - Get DMA info for hash block remainder
+ *	buffer from given context
+ * @ctx_info:			User context info structure
+ * @hash_blk_tail_dma_p:	Returned tail buffer DMA address
+ *
+ * Note: This function must be invoked only when tail_buf is mapped2dev
+ *	(using ctxmgr_map2dev_hash_tail)
+ * Returns u16 Number of valid bytes/octets in tail buffer
+ */
+u16 ctxmgr_get_hash_blk_remainder_buf(struct client_crypto_ctx_info
+					   *ctx_info,
+					   dma_addr_t *
+					   hash_blk_remainder_dma_p);
+
+/**
+ * ctxmgr_get_digest_or_mac() - Get the digest/MAC result when applicable
+ * @ctx_info:		User context info structure
+ * @digest_or_mac:	Pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ */
+u32 ctxmgr_get_digest_or_mac(struct client_crypto_ctx_info *ctx_info,
+				  u8 *digest_or_mac);
+
+/**
+ * ctxmgr_get_digest_or_mac_ptr() - Get the digest/MAC pointer in SeP context
+ * @ctx_info:		User context info structure
+ * @digest_or_mac_pp:	Returned pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ * This function may be used for the caller to reference the result instead
+ * of always copying
+ */
+u32 ctxmgr_get_digest_or_mac_ptr(struct client_crypto_ctx_info *ctx_info,
+				      u8 **digest_or_mac_pp);
+
+/**
+ * ctxmgr_set_symcipher_iv_user() - Set IV of symcipher context given in
+ *					user space pointer
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 The IV to set
+ *
+ * Returns int
+ */
+int ctxmgr_set_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr);
+
+/**
+ * ctxmgr_get_symcipher_iv_user() - Read current "IV"
+ * @user_ctx_ptr:	A user space pointer to the host context
+ * @iv_ptr:		Where to return the read IV
+ *
+ * Read current "IV" (block state - not the actual set IV during initialization)
+ * This function works directly over a user space context
+ * Returns int
+ */
+int ctxmgr_get_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr);
+
+/**
+ * ctxmgr_set_symcipher_iv() - Set IV for given block symcipher algorithm
+ * @ctx_info:	Context to update
+ * @iv:		New IV
+ *
+ * Returns int 0 if changed IV, -EINVAL for error
+ * (given cipher type does not have IV)
+ */
+int ctxmgr_set_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv);
+
+/**
+ * ctxmgr_get_symcipher_iv() - Return given cipher IV
+ * @ctx_info:	Context to query
+ * @iv_user:	The IV given by the user on last ctxmgr_set_symcipher_iv
+ * @iv_current:	The current IV state block
+ * @iv_size_p:	[I/O] The given buffers size and returns actual IV size
+ *
+ * Return given cipher IV - Original IV given by user and current state "IV"
+ * The given IV buffers must be large enough to accomodate the IVs
+ * Returns int 0 on success, -ENOMEM if given iv_size is too small
+ */
+int ctxmgr_get_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv_user, u8 *iv_current,
+			    u8 *iv_size_p);
+
+/**
+ * ctxmgr_set_symcipher_direction() - Set the operation direction for given
+ *					symcipher context
+ * @ctx_info:		Context to update
+ * @dxdi_direction:	Requested cipher direction
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_set_symcipher_direction(struct client_crypto_ctx_info *ctx_info,
+				   enum dxdi_cipher_direction dxdi_direction);
+
+/**
+ * ctxmgr_get_symcipher_direction() - Return the operation direction of given
+ *					symcipher context
+ * @ctx_info:	Context to query
+ *
+ * Returns enum dxdi_cipher_direction (<0 on error)
+ */
+enum dxdi_cipher_direction ctxmgr_get_symcipher_direction(struct
+							  client_crypto_ctx_info
+							  *ctx_info);
+
+/**
+ * ctxmgr_set_symcipher_key() - Set a symcipher context key
+ * @ctx_info:	Context to update
+ * @key_size:	Size of key in bytes
+ * @key:	New key pointer
+ *
+ * Set a symcipher context key
+ * After invoking this function the context should be reinitialized by SeP
+ * (set its state to "partial init" if not done in this sequence)
+ * Returns int 0 on success, -EINVAL Invalid key len, -EPERM Forbidden/weak key
+ */
+int ctxmgr_set_symcipher_key(struct client_crypto_ctx_info *ctx_info,
+			     u8 key_size, const u8 *key);
+
+/**
+ * ctxmgr_init_symcipher_ctx_no_props() - Initialize symcipher context when full
+ *					props are not available, yet.
+ * @ctx_info:		Context to init.
+ * @cipher_type:	Cipher type for context
+ *
+ * Initialize symcipher context when full props are not available, yet.
+ * Later set_key and set_iv may update the context.
+ * Returns int 0 on success
+ */
+int ctxmgr_init_symcipher_ctx_no_props(struct client_crypto_ctx_info *ctx_info,
+				       enum dxdi_sym_cipher_type cipher_type);
+
+/**
+ * ctxmgr_init_symcipher_ctx() - Initialize symCipher context based on given
+ *				properties.
+ * @ctx_info:	 User context mapping info.
+ * @props:	 The initialization properties
+ * @postpone_init:	Return "true" if INIT on SeP should be postponed
+ *			to first processing (e.g, in AES-XTS)
+ * @error_info:	Error info
+ *
+ * Returns 0 on success, otherwise on error
+ */
+int ctxmgr_init_symcipher_ctx(struct client_crypto_ctx_info *ctx_info,
+			      struct dxdi_sym_cipher_props *props,
+			      bool *postpone_init, u32 *error_info);
+
+/**
+ * ctxmgr_init_auth_enc_ctx() - Initialize Authenticated Encryption class
+ *				context
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_auth_enc_ctx(struct client_crypto_ctx_info *ctx_info,
+			     struct dxdi_auth_enc_props *props,
+			     u32 *error_info);
+
+/**
+ * ctxmgr_init_mac_ctx() - Initialize context for MAC algorithm
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_mac_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct dxdi_mac_props *props, u32 *error_info);
+
+/**
+ * ctxmgr_init_hash_ctx() - Initialize hash context
+ * @ctx_info:	 User context mapping info.
+ * @hash_type:	 Assigned hash type
+ * @error_info:	Error info
+ *
+ * Returns int 0 on success, -EINVAL, -ENOSYS
+ */
+int ctxmgr_init_hash_ctx(struct client_crypto_ctx_info *ctx_info,
+			 enum dxdi_hash_type hash_type, u32 *error_info);
+
+/**
+ * ctxmgr_set_sep_cache_idx() - Set the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ * @sep_cache_idx:	 The allocated index in SeP cache for this context
+ *
+ * Returns void
+ */
+void ctxmgr_set_sep_cache_idx(struct client_crypto_ctx_info *ctx_info,
+			      int sep_cache_idx);
+
+/**
+ * ctxmgr_get_sep_cache_idx() - Get the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ *
+ * Returns The allocated index in SeP cache for this context
+ */
+int ctxmgr_get_sep_cache_idx(struct client_crypto_ctx_info *ctx_info);
+
+#ifdef DEBUG
+/**
+ * ctxmgr_dump_sep_ctx() - Dump SeP context data
+ * @ctx_info:
+ *
+ */
+void ctxmgr_dump_sep_ctx(const struct client_crypto_ctx_info *ctx_info);
+#else
+#define ctxmgr_dump_sep_ctx(ctx_info) do {} while (0)
+#endif /*DEBUG*/
+/**
+ * ctxmgr_sync_sep_ctx() - Sync. SeP context to device (flush from cache...)
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ *
+ * Returns void
+ */
+void ctxmgr_sync_sep_ctx(const struct client_crypto_ctx_info *ctx_info,
+			 struct device *mydev);
+
+/**
+ * ctxmgr_get_sep_ctx_dma_addr() - Return DMA address of SeP (FW) area of the context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns DMA address of SeP (FW) area of the context
+ */
+dma_addr_t ctxmgr_get_sep_ctx_dma_addr(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/******************************
+ * SeP context cache functions
+ ******************************/
+
+/**
+ * ctxmgr_sep_cache_create() - Create a SeP (FW) cache manager of given num. of
+ *				entries
+ * @num_of_entries:	 = Number of entries available in cache
+ *
+ * Returns void * handle (NULL on failure)
+ */
+void *ctxmgr_sep_cache_create(int num_of_entries);
+
+/**
+ * ctxmgr_sep_cache_destroy() - Destory SeP (FW) cache manager object
+ * @sep_cache:	 = The cache object
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_destroy(void *sep_cache);
+
+/**
+ * ctxmgr_sep_cache_get_size() - Get cache size (entries count)
+ * @sep_cache:
+ *
+ * Returns int Number of cache entries available
+ */
+int ctxmgr_sep_cache_get_size(void *sep_cache);
+
+/**
+ * ctxmgr_sep_cache_alloc() - Allocate a cache entry of given SeP context cache
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The user context ID
+ * @load_required_p:	Pointed int is set to !0 if a cache load is required
+ *			(i.e., if item already loaded in cache it would be 0)
+ *
+ * Returns cache index
+ */
+int ctxmgr_sep_cache_alloc(void *sep_cache,
+			   struct crypto_ctx_uid ctx_id, int *load_required_p);
+
+/**
+ * ctxmgr_sep_cache_invalidate() - Invalidate cache entry for given context ID
+ * @sep_cache:	The cache object
+ * @ctx_id:	The host crypto. context ID
+ * @id_mask:	A bit mask to be used when comparing the ID
+ *		(to be used for a set of entries from the same client)
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_invalidate(void *sep_cache,
+				 struct crypto_ctx_uid ctx_id,
+				 u64 id_mask);
+
+#endif /*_CRYPTO_CTX_MGR_H_*/
diff --git a/drivers/staging/sep54/crypto_hwk.c b/drivers/staging/sep54/crypto_hwk.c
new file mode 100644
index 0000000..32d999c
--- /dev/null
+++ b/drivers/staging/sep54/crypto_hwk.c
@@ -0,0 +1,419 @@
+/*
+ *  Copyright(c) 2012-2013 Intel Corporation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "sep_hwk: " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/highmem.h>
+
+#include "dx_driver.h"
+#include "sep_sysfs.h"
+#include "sep_power.h"
+#include "dx_sepapp_kapi.h"
+
+#define HWK_APP_UUID "INTEL HWK 000001"
+#define HWK_CMD_CRYPTO 8
+
+struct hwk_context {
+	struct sep_client_ctx *sctx;
+	u32 sess_id, key_id;
+};
+
+static inline void hwk_pm_runtime_get(void)
+{
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+}
+
+static inline void hwk_pm_runtime_put(void)
+{
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+}
+
+static int hwk_ctx_init(struct crypto_tfm *tfm)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(tfm);
+	u8 uuid[16] = HWK_APP_UUID;
+	enum dxdi_sep_module ret;
+	int rc;
+
+	hwk_pm_runtime_get();
+
+	hctx->sctx = dx_sepapp_context_alloc();
+	if (!hctx->sctx) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	pr_debug("%s: opening session\n", __func__);
+	rc = dx_sepapp_session_open(hctx->sctx, uuid, 0, NULL, NULL,
+				    &hctx->sess_id, &ret);
+	pr_debug("%s: %d: %p %d\n", __func__, rc, hctx->sctx, hctx->sess_id);
+	if (rc != 0)
+		dx_sepapp_context_free(hctx->sctx);
+
+out:
+	hwk_pm_runtime_put();
+
+	return rc;
+}
+
+static void hwk_ctx_cleanup(struct crypto_tfm *tfm)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(tfm);
+
+	pr_debug("%s: %p %d\n", __func__, hctx->sctx, hctx->sess_id);
+	if (dx_sepapp_session_close(hctx->sctx, hctx->sess_id))
+		BUG();
+	dx_sepapp_context_free(hctx->sctx);
+	pr_debug("%s: session closed\n", __func__);
+}
+
+static int hwk_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+	hctx->key_id = *((u32 *)key);
+	pr_debug("%s: key_id=%d\n", __func__, hctx->key_id);
+	return 0;
+}
+
+#if defined(HWK_ST_DUMP_BUF) || defined(HWK_DUMP_BUF)
+static void hwk_dump_buf(u8 *buf, const char *buf_name, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (i % 64 == 0)
+			printk("\n%s: ", buf_name);
+		printk("%02x", buf[i]);
+	}
+	printk("\n");
+}
+#endif
+
+#ifdef HWK_DUMP_BUF
+static void hwk_dump_sg(struct scatterlist *sg, const char *buf_name)
+{
+	u8 *buf = kmap(sg_page(sg));
+	hwk_dump_buf(buf + sg->offset, buf_name, sg->length);
+	kunmap(sg_page(sg));
+}
+#endif
+
+static int hwk_process(struct ablkcipher_request *req, bool encrypt)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct hwk_context *hctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct dxdi_sepapp_kparams p;
+	enum dxdi_sep_module ret_origin;
+	struct scatterlist iv_sg;
+	struct page *iv_page;
+	int rc;
+
+	iv_page = virt_to_page(req->info);
+	sg_init_table(&iv_sg, 1);
+	sg_set_page(&iv_sg, iv_page, SEP_AES_IV_SIZE,
+		    (unsigned long)req->info % PAGE_SIZE);
+
+#ifdef HWK_DUMP_BUF
+	hwk_dump_buf(req->info, "iv", SEP_AES_IV_SIZE);
+	hwk_dump_sg(&iv_sg, "iv");
+	hwk_dump_sg(req->src, "src");
+#endif
+
+	memset(&p, 0, sizeof(p));
+
+	p.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	p.params[0].val.data[0] = hctx->key_id;
+	p.params[0].val.data[1] = req->nbytes | (encrypt << 16);
+	p.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	p.params_types[1] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[1].kmemref.dma_direction = DXDI_DATA_TO_DEVICE;
+	p.params[1].kmemref.sgl = &iv_sg;
+	p.params[1].kmemref.nbytes = SEP_AES_IV_SIZE;
+
+	p.params_types[2] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[2].kmemref.dma_direction = DXDI_DATA_TO_DEVICE;
+	p.params[2].kmemref.sgl = req->src;
+	p.params[2].kmemref.nbytes = req->nbytes;
+
+	p.params_types[3] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[3].kmemref.dma_direction = DXDI_DATA_FROM_DEVICE;
+	p.params[3].kmemref.sgl = req->dst;
+	p.params[3].kmemref.nbytes = req->nbytes;
+
+	pr_debug("%s: size=%d dir=%d\n", __func__, req->nbytes, encrypt);
+	rc = dx_sepapp_command_invoke(hctx->sctx, hctx->sess_id,
+				      HWK_CMD_CRYPTO, &p, &ret_origin);
+	pr_debug("%s: done: %d\n", __func__, rc);
+
+	if (rc != 0) {
+		pr_err("%s: error invoking command %d: %x (ret_origin= %x)\n",
+			__func__, HWK_CMD_CRYPTO, rc, ret_origin);
+		return -EINVAL;
+	}
+
+#ifdef HWK_DUMP_BUF
+	hwk_dump_sg(req->dst, "dst");
+#endif
+
+	return rc;
+}
+
+static int hwk_encrypt(struct ablkcipher_request *req)
+{
+	return hwk_process(req, true);
+}
+
+static int hwk_decrypt(struct ablkcipher_request *req)
+{
+	return hwk_process(req, false);
+}
+
+static struct crypto_alg hwk_alg = {
+	.cra_priority = 0,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_ctxsize = sizeof(struct hwk_context),
+	.cra_alignmask = 0, /* Cannot use this due to bug in kernel */
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_name = "cbchk(aes)",
+	.cra_driver_name = MODULE_NAME "-aes-cbchk",
+	.cra_blocksize = SEP_AES_BLOCK_SIZE,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize = SEP_AES_256_BIT_KEY_SIZE,
+			.max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+			.ivsize = SEP_AES_IV_SIZE,
+			.setkey = hwk_set_key,
+			.encrypt = hwk_encrypt,
+			.decrypt = hwk_decrypt,
+		},
+	},
+	.cra_init = hwk_ctx_init,
+	.cra_exit = hwk_ctx_cleanup
+};
+
+int hwk_init(void)
+{
+	int rc = crypto_register_alg(&hwk_alg);
+	if (rc != 0)
+		pr_err("failed to register %s\n", hwk_alg.cra_name);
+	return rc;
+}
+
+void hwk_fini(void)
+{
+	crypto_unregister_alg(&hwk_alg);
+}
+
+#ifdef SEP_HWK_UNIT_TEST
+enum hwk_self_test {
+	HWK_ST_NOT_STARTED = 0,
+	HWK_ST_RUNNING,
+	HWK_ST_SUCCESS,
+	HWK_ST_ERROR
+};
+
+static enum hwk_self_test hwk_st_status = HWK_ST_NOT_STARTED;
+static const char * const hwk_st_strings[] = {
+	"not started",
+	"running",
+	"success",
+	"error"
+};
+
+ssize_t sys_hwk_st_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+
+	return sprintf(buf, "%s\n", hwk_st_strings[hwk_st_status]);
+}
+
+struct hwk_st_op_result {
+	struct completion completion;
+	int rc;
+};
+
+static void hwk_st_op_complete(struct crypto_async_request *req, int rc)
+{
+	struct hwk_st_op_result *hr = req->data;
+
+	if (rc == -EINPROGRESS)
+		return;
+
+	hr->rc = rc;
+	complete(&hr->completion);
+}
+
+static int hwk_st_do_op(struct ablkcipher_request *req, struct page *src,
+		struct page *dst, bool enc)
+{
+	struct scatterlist src_sg, dst_sg;
+	struct hwk_st_op_result hr = { .rc = 0 };
+	char iv[SEP_AES_IV_SIZE] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+				     12, 13, 14, 15 };
+	int ret;
+
+	init_completion(&hr.completion);
+	ablkcipher_request_set_callback(req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+			hwk_st_op_complete, &hr);
+
+	sg_init_table(&src_sg, 1);
+	sg_set_page(&src_sg, src, PAGE_SIZE, 0);
+	sg_init_table(&dst_sg, 1);
+	sg_set_page(&dst_sg, dst, PAGE_SIZE, 0);
+	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, PAGE_SIZE, iv);
+
+	pr_info("%s: submiting %s op..\n", __func__, enc ? "enc" : "dec");
+	if (enc)
+		ret = crypto_ablkcipher_encrypt(req);
+	else
+		ret = crypto_ablkcipher_decrypt(req);
+	pr_info("%s: op submitted\n", __func__);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		wait_for_completion(&hr.completion);
+		ret = hr.rc;
+	}
+	pr_info("%s: op completed\n", __func__);
+
+	return ret;
+}
+
+ssize_t sys_hwk_st_start(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct ablkcipher_request *req;
+	struct page *src, *enc, *dec;
+	struct crypto_ablkcipher *acipher;
+	char *tmp, *tmp2, *tmp3;
+	int ret = -EINVAL, i;
+	u32 hwk_id;
+
+	if (hwk_st_status == HWK_ST_RUNNING)
+		return count;
+
+	hwk_st_status = HWK_ST_RUNNING;
+
+	ret = kstrtouint(buf, 10, &hwk_id);
+	if (ret) {
+		pr_err("bad hardware key id: %d\n", ret);
+		goto out;
+	}
+
+	ret = -ENOMEM;
+	src = alloc_page(GFP_KERNEL);
+	if (!src) {
+		pr_err("failed to allocate src page\n");
+		goto out;
+	}
+	enc = alloc_page(GFP_KERNEL);
+	if (!enc) {
+		pr_err("failed to allocate enc page\n");
+		goto out_free_src;
+	}
+	dec = alloc_page(GFP_KERNEL);
+	if (!dec) {
+		pr_err("failed to allocate dec page\n");
+		goto out_free_enc;
+	}
+
+	acipher = crypto_alloc_ablkcipher("cbchk(aes)", 0, 0);
+	if (IS_ERR(acipher)) {
+		pr_err("error allocating cipher: %ld\n", PTR_ERR(acipher));
+		ret = -EINVAL;
+		goto out_free_dec;
+	}
+
+	tmp = kmap(src);
+	for (i = 0; i < PAGE_SIZE; i++)
+		tmp[i] = i;
+	kunmap(src);
+
+	crypto_ablkcipher_set_flags(acipher, CRYPTO_TFM_REQ_WEAK_KEY);
+
+	pr_debug("setting hardware key %d\n", hwk_id);
+	ret = crypto_ablkcipher_setkey(acipher, (u8 *)&hwk_id, sizeof(hwk_id));
+	if (ret) {
+		pr_err("error setting hardware key: %d\n", ret);
+		goto out_free_cipher;
+	}
+
+	req = ablkcipher_request_alloc(acipher, GFP_NOFS);
+	if (!req) {
+		ret = -EINVAL;
+		pr_err("failed to allocate cipher request\n");
+		goto out_free_cipher;
+	}
+
+
+	ret = hwk_st_do_op(req, src, enc, true);
+	if (ret) {
+		pr_err("encryption failed: %d\n", ret);
+		goto out_free_req;
+	}
+
+	ret = hwk_st_do_op(req, enc, dec, false);
+	if (ret) {
+		pr_err("decryption failed: %d\n", ret);
+		goto out_free_req;
+	}
+
+	tmp = kmap(src); tmp2 = kmap(enc); tmp3 = kmap(dec);
+#ifdef HWK_ST_DUMP_BUF
+	hwk_dump_buf(tmp, "src", PAGE_SIZE);
+	hwk_dump_buf(tmp2, "enc", PAGE_SIZE);
+	hwk_dump_buf(tmp3, "dec", PAGE_SIZE);
+#endif
+	for (i = 0; i < PAGE_SIZE; i++) {
+		if (tmp[i] != tmp3[i]) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+	kunmap(src); kunmap(enc); kunmap(dec);
+
+	if (ret)
+		pr_err("dec != src\n");
+
+out_free_req:
+	ablkcipher_request_free(req);
+out_free_cipher:
+	crypto_free_ablkcipher(acipher);
+out_free_dec:
+	__free_pages(dec, 0);
+out_free_enc:
+	__free_pages(enc, 0);
+out_free_src:
+	__free_pages(src, 0);
+out:
+	if (ret)
+		hwk_st_status = HWK_ST_ERROR;
+	else
+		hwk_st_status = HWK_ST_SUCCESS;
+	return count;
+}
+#endif
diff --git a/drivers/staging/sep54/desc_mgr.c b/drivers/staging/sep54/desc_mgr.c
new file mode 100644
index 0000000..c8b7e7a
--- /dev/null
+++ b/drivers/staging/sep54/desc_mgr.c
@@ -0,0 +1,1216 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_DESC_MGR
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include "dx_driver.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "sep_sw_desc.h"
+#include "crypto_ctx_mgr.h"
+#include "sep_sysfs.h"
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "sep_power.h"
+#include "desc_mgr.h"
+
+/* Queue buffer log(size in bytes) */
+#define SEP_SW_DESC_Q_MEM_SIZE_LOG 12	/*4KB */
+#define SEP_SW_DESC_Q_MEM_SIZE (1 << SEP_SW_DESC_Q_MEM_SIZE_LOG)
+#define WORD_SIZE_LOG 2		/*32b=4B=2^2 */
+/* Number of entries (descriptors in a queue) */
+#define SEP_DESC_Q_ENTRIES_NUM_LOG \
+	(SEP_SW_DESC_Q_MEM_SIZE_LOG - WORD_SIZE_LOG - SEP_SW_DESC_WORD_SIZE_LOG)
+#define SEP_DESC_Q_ENTRIES_NUM (1 << SEP_DESC_Q_ENTRIES_NUM_LOG)
+#define SEP_DESC_Q_ENTRIES_MASK BITMASK(SEP_DESC_Q_ENTRIES_NUM_LOG)
+
+/* This watermark is used to initiate dispatching after the queue entered
+   the FULL state in order to avoid interrupts flooding at SeP */
+#define SEP_DESC_Q_WATERMARK_MARGIN ((SEP_DESC_Q_ENTRIES_NUM)/4)
+
+/* convert from descriptor counters index in descriptors array */
+#define GET_DESC_IDX(cntr) ((cntr) & SEP_DESC_Q_ENTRIES_MASK)
+#define GET_DESC_PTR(q_p, idx)				\
+	((struct sep_sw_desc *)((q_p)->q_base_p +	\
+				(idx << SEP_SW_DESC_WORD_SIZE_LOG)))
+#define GET_Q_PENDING_DESCS(q_p) ((q_p)->sent_cntr - (q_p)->completed_cntr)
+#define GET_Q_FREE_DESCS(q_p) \
+	 (SEP_DESC_Q_ENTRIES_NUM - GET_Q_PENDING_DESCS(q_p))
+#define IS_Q_FULL(q_p) (GET_Q_FREE_DESCS(q_p) == 0)
+
+/* LUT for GPRs registers offsets (to be added to cc_regs_base) */
+static const unsigned long host_to_sep_gpr_offset[] = {
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR0),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR1),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR2),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR3),
+};
+
+static const unsigned long sep_to_host_gpr_offset[] = {
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR0),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR1),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR2),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR3),
+};
+
+/**
+ * struct descs_backlog_item - Item of the queue descs_backlog_queue
+ */
+struct descs_backlog_item {
+	struct list_head list;
+	struct sep_sw_desc desc;
+};
+
+/**
+ * struct descs_backlog_queue - Queue of backlog descriptors
+ * @list:		List head item
+ * @cur_q_len:		Current number of entries in the backlog_q
+ * @backlog_items_pool:	Memory pool for allocating elements of this queue
+ * @backlog_items_pool_name:	Pool name string for kmem_cache object
+ */
+struct descs_backlog_queue {
+	struct list_head list;
+	unsigned int cur_q_len;
+	struct kmem_cache *backlog_items_pool;
+	char backlog_items_pool_name[24];
+};
+
+/**
+ * struct desc_q - Descriptor queue object
+ * @qid:		The associated software queue ID
+ * @qstate:		Operational state of the queue
+ * @gpr_to_sep:		Pointer to host-to-sep GPR for this queue (requests)
+ * @gpr_from_sep:	Pointer to sep-to-host GPR for this queue (completion)
+ * @qlock:		Protect data structure in non-interrupt context
+ * @q_base_p:		The base address of the descriptors cyclic queue buffer
+ * @q_base_dma:		The DMA address for q_base_p
+ * @sent_cntr:		Sent descriptors counter
+ * @completed_cntr:	Completed descriptors counter as reported by SeP
+ * @idle_jiffies:	jiffies value when the queue became idle (empty)
+ * @backlog_q:		Queue of backlog descriptors - pending to be dispatched
+ *			into the descriptors queue (were not dispatched because
+ *			it was full or in "sleep" state)
+ * @backlog_work:	Work task for handling/equeuing backlog descriptors
+ * @enqueue_time:	Array to save descriptor start [ns] per descriptor
+ */
+struct desc_q {
+	int qid;
+	enum desc_q_state qstate;
+	void __iomem *gpr_to_sep;
+	void __iomem *gpr_from_sep;
+	struct queue_drvdata *drvdata;
+	struct mutex qlock;
+	u32 *q_base_p;
+	dma_addr_t q_base_dma;
+	u32 sent_cntr;
+	u32 completed_cntr;
+	unsigned long idle_jiffies;
+	struct descs_backlog_queue backlog_q;
+	struct work_struct backlog_work;
+	unsigned long long *enqueue_time;
+};
+
+static uintptr_t cookies[SEP_DESC_Q_ENTRIES_NUM];
+DEFINE_MUTEX(cookie_lock);
+
+u32 add_cookie(uintptr_t op_ctx)
+{
+	u32 i;
+
+	mutex_lock(&cookie_lock);
+	for (i = 0; i < SEP_DESC_Q_ENTRIES_NUM; i++) {
+		if (cookies[i] == 0) {
+			cookies[i] = op_ctx;
+			break;
+		}
+	}
+	mutex_unlock(&cookie_lock);
+
+	return i;
+}
+
+void delete_cookie(u32 index)
+{
+	mutex_lock(&cookie_lock);
+	cookies[index] = 0;
+	mutex_unlock(&cookie_lock);
+}
+
+void delete_context(uintptr_t op_ctx)
+{
+	u32 i;
+
+	mutex_lock(&cookie_lock);
+	for (i = 0; i < 128; i++) {
+		if (cookies[i] == op_ctx) {
+			cookies[i] = 0;
+			break;
+		}
+	}
+	mutex_unlock(&cookie_lock);
+}
+
+uintptr_t get_cookie(u32 index)
+{
+	return cookies[index];
+}
+
+#ifdef DEBUG
+static void dump_desc(const struct sep_sw_desc *desc_p);
+#else
+#define dump_desc(desc_p) do {} while (0)
+#endif /*DEBUG*/
+static int backlog_q_init(struct desc_q *q_p);
+static void backlog_q_cleanup(struct desc_q *q_p);
+static void backlog_q_process(struct work_struct *work);
+
+/**
+ * desc_q_create() - Create descriptors queue object
+ * @qid:	 The queue ID (index)
+ * @drvdata:	 The associated queue driver data
+ *
+ * Returns Allocated queue object handle (DESC_Q_INVALID_HANDLE for failure)
+ */
+void *desc_q_create(int qid, struct queue_drvdata *drvdata)
+{
+	struct device *dev = drvdata->sep_data->dev;
+	void __iomem *cc_regs_base = drvdata->sep_data->cc_base;
+	struct desc_q *new_q_p;
+
+	new_q_p = kzalloc(sizeof(struct desc_q), GFP_KERNEL);
+	if (unlikely(new_q_p == NULL)) {
+		pr_err("Q%d: Failed allocating %zu B for new_q\n",
+			    qid, sizeof(struct desc_q));
+		goto desc_q_create_failed;
+	}
+
+	/* Initialize fields */
+	mutex_init(&new_q_p->qlock);
+	new_q_p->drvdata = drvdata;
+	new_q_p->qid = qid;
+	new_q_p->gpr_to_sep = cc_regs_base + host_to_sep_gpr_offset[qid];
+	new_q_p->gpr_from_sep = cc_regs_base + sep_to_host_gpr_offset[qid];
+	new_q_p->sent_cntr = 0;
+	new_q_p->completed_cntr = 0;
+	new_q_p->idle_jiffies = jiffies;
+
+	new_q_p->q_base_p = dma_alloc_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+					       &new_q_p->q_base_dma,
+					       GFP_KERNEL);
+	if (unlikely(new_q_p->q_base_p == NULL)) {
+		pr_err("Q%d: Failed allocating %d B for desc buffer\n",
+			    qid, SEP_SW_DESC_Q_MEM_SIZE);
+		goto desc_q_create_failed;
+	}
+
+	new_q_p->enqueue_time = kmalloc(SEP_DESC_Q_ENTRIES_NUM *
+					sizeof(u64), GFP_KERNEL);
+	if (new_q_p->enqueue_time == NULL) {
+		pr_err("Q%d: Failed allocating time stats array\n", qid);
+		goto desc_q_create_failed;
+	}
+
+	if (backlog_q_init(new_q_p) != 0) {
+		pr_err("Q%d: Failed creating backlog queue\n", qid);
+		goto desc_q_create_failed;
+	}
+	INIT_WORK(&new_q_p->backlog_work, backlog_q_process);
+
+	/* Initialize respective GPR before SeP would be initialized.
+	   Required because the GPR may be non-zero as a result of CC-init
+	   sequence leftovers */
+	WRITE_REGISTER(new_q_p->gpr_to_sep, new_q_p->sent_cntr);
+
+	new_q_p->qstate = DESC_Q_ACTIVE;
+	return (void *)new_q_p;
+
+	/* Error cases cleanup */
+ desc_q_create_failed:
+	if (new_q_p != NULL) {
+		kfree(new_q_p->enqueue_time);
+		if (new_q_p->q_base_p != NULL)
+			dma_free_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+					  new_q_p->q_base_p,
+					  new_q_p->q_base_dma);
+		mutex_destroy(&new_q_p->qlock);
+		kfree(new_q_p);
+	}
+	return DESC_Q_INVALID_HANDLE;
+}
+
+/**
+ * desc_q_destroy() - Destroy descriptors queue object (free resources)
+ * @q_h:	 The queue object handle
+ *
+ */
+void desc_q_destroy(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct device *dev = q_p->drvdata->sep_data->dev;
+
+	if (q_p->sent_cntr != q_p->completed_cntr) {
+		pr_err(
+			    "Q%d: destroyed while there are outstanding descriptors\n",
+			    q_p->qid);
+	}
+	backlog_q_cleanup(q_p);
+	kfree(q_p->enqueue_time);
+	dma_free_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+			  q_p->q_base_p, q_p->q_base_dma);
+	mutex_destroy(&q_p->qlock);
+	kfree(q_p);
+}
+
+/**
+ * desc_q_set_state() - Set queue state (SLEEP or ACTIVE)
+ * @q_h:	The queue object handle
+ * @state:	The requested state
+ */
+int desc_q_set_state(void *q_h, enum desc_q_state state)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	int rc = 0;
+
+#ifdef DEBUG
+	if ((q_p->qstate != DESC_Q_ACTIVE) && (q_p->qstate != DESC_Q_ASLEEP)) {
+		pr_err("Q%d is in invalid state: %d\n",
+			    q_p->qid, q_p->qstate);
+		return -EINVAL;
+	}
+#endif
+	mutex_lock(&q_p->qlock);
+	switch (state) {
+	case DESC_Q_ASLEEP:
+		if (q_p->qstate != DESC_Q_ASLEEP) {
+			/* If not already in this state */
+			if (desc_q_is_idle(q_h, NULL))
+				q_p->qstate = DESC_Q_ASLEEP;
+			else
+				rc = -EBUSY;
+		}		/* else: already asleep */
+		break;
+	case DESC_Q_ACTIVE:
+		if (q_p->qstate != DESC_Q_ACTIVE) {
+			/* Initiate enqueue from backlog if any is pending */
+			if (q_p->backlog_q.cur_q_len > 0)
+				(void)schedule_work(&q_p->backlog_work);
+			else	/* Empty --> Back to idle state */
+				q_p->idle_jiffies = jiffies;
+			q_p->qstate = DESC_Q_ACTIVE;
+		}		/* else: already active */
+		break;
+	default:
+		pr_err("Invalid requested state: %d\n", state);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+/**
+ * desc_q_get_state() - Get queue state
+ * @q_h:	The queue object handle
+ */
+enum desc_q_state desc_q_get_state(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	return q_p->qstate;
+}
+
+/**
+ * desc_q_is_idle() - Report if given queue is active but empty/idle.
+ * @q_h:		The queue object handle
+ * @idle_jiffies_p:	Return jiffies at which the queue became idle
+ */
+bool desc_q_is_idle(void *q_h, unsigned long *idle_jiffies_p)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	if (idle_jiffies_p != NULL)
+		*idle_jiffies_p = q_p->idle_jiffies;
+	/* No need to lock the queue - returned information is "fluid" anyway */
+	return ((q_p->qstate == DESC_Q_ACTIVE) &&
+		(GET_Q_PENDING_DESCS(q_p) == 0) &&
+		(q_p->backlog_q.cur_q_len == 0));
+}
+
+/**
+ * desc_q_reset() - Reset sent/completed counters of queue
+ * @q_h:	The queue object handle
+ *
+ * This function should be invoked only when the queue is in ASLEEP state
+ * after the transition of SeP to sleep state completed.
+ * Returns -EBUSY if the queue is not in the correct state for reset.
+ */
+int desc_q_reset(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	int rc = 0;
+
+	mutex_lock(&q_p->qlock);
+	if ((q_p->qstate == DESC_Q_ASLEEP) && (GET_Q_PENDING_DESCS(q_p) == 0)) {
+		q_p->sent_cntr = 0;
+		q_p->completed_cntr = 0;
+	} else {
+		pr_err("Invoked when queue is not ASLEEP\n");
+		rc = -EBUSY;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+/**
+ * dispatch_sw_desc() - Copy given descriptor into next free entry in the
+ *			descriptors queue and signal SeP.
+ *
+ * @q_p:	Desc. queue context
+ * @desc_p:	The descriptor to dispatch
+ *
+ * This function should be called with qlock locked (non-interrupt context)
+ * and only if queue is not full (i.e., this function does not validate
+ * queue utilization)
+ */
+static inline void dispatch_sw_desc(struct desc_q *q_p,
+				    struct sep_sw_desc *desc_p)
+{
+	const u32 desc_idx = GET_DESC_IDX(q_p->sent_cntr);
+
+	dump_desc(desc_p);
+	preempt_disable_notrace();
+	q_p->enqueue_time[desc_idx] = sched_clock();	/* Save start time */
+	preempt_enable_notrace();
+	/* copy descriptor to free entry in queue */
+	SEP_SW_DESC_COPY_TO_SEP(GET_DESC_PTR(q_p, desc_idx), desc_p);
+	q_p->sent_cntr++;
+}
+
+/**
+ * desc_q_enqueue_sleep_req() - Enqueue SLEEP_REQ descriptor
+ * @q_h:	The queue object handle
+ * @op_ctx:	The operation context for this descriptor
+ * This function may be invoked only when the queue is in ASLEEP state
+ * (assuming SeP is still active).
+ * If the queue is not in ASLEEP state this function would return -EBUSY.
+ */
+int desc_q_enqueue_sleep_req(void *q_h, struct sep_op_ctx *op_ctx)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_sw_desc desc;
+	int rc = 0;
+
+	SEP_SW_DESC_INIT(&desc);
+	SEP_SW_DESC_SET(&desc, TYPE, SEP_SW_DESC_TYPE_SLEEP_REQ);
+	SEP_SW_DESC_SET_COOKIE(&desc, op_ctx);
+
+	mutex_lock(&q_p->qlock);
+	if (q_p->qstate == DESC_Q_ASLEEP) {
+		op_ctx->op_state = USER_OP_INPROC;
+		/* In ASLEEP state the queue assumed to be empty... */
+		dispatch_sw_desc(q_p, &desc);
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+		pr_debug("Sent SLEEP_REQ\n");
+	} else {
+		rc = -EBUSY;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+static int backlog_q_init(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	int rc = 0;
+
+	snprintf(backlog_q_p->backlog_items_pool_name,
+		 sizeof(backlog_q_p->backlog_items_pool_name),
+		 "dx_sep_backlog%d", q_p->qid);
+	backlog_q_p->backlog_items_pool =
+	    kmem_cache_create(backlog_q_p->backlog_items_pool_name,
+			      sizeof(struct descs_backlog_item),
+			      sizeof(u32), 0, NULL);
+	if (unlikely(backlog_q_p->backlog_items_pool == NULL)) {
+		pr_err("Q%d: Failed allocating backlog_items_pool\n",
+			    q_p->qid);
+		rc = -ENOMEM;
+	} else {
+		INIT_LIST_HEAD(&backlog_q_p->list);
+		backlog_q_p->cur_q_len = 0;
+	}
+	return rc;
+}
+
+static void backlog_q_cleanup(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+
+	if (backlog_q_p->cur_q_len > 0) {
+		pr_err("Q%d: Cleanup while have %u pending items!",
+			    q_p->qid, backlog_q_p->cur_q_len);
+		/* TODO: Handle freeing of pending items? */
+	}
+	kmem_cache_destroy(backlog_q_p->backlog_items_pool);
+}
+
+/**
+ * backlog_q_enqueue() - Enqueue given descriptor for postponed processing
+ *				(e.g., in case of full desc_q)
+ *
+ * @q_p:	Desc. queue object
+ * @desc_p:	Descriptor to enqueue
+ *
+ * Caller must call this function with the qlock locked (non-interrupt context
+ * only)
+ */
+static int backlog_q_enqueue(struct desc_q *q_p, struct sep_sw_desc *desc_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	struct sep_op_ctx *op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	struct descs_backlog_item *new_q_item;
+
+	pr_debug("->backlog(op_ctx=%p):\n", op_ctx);
+	dump_desc(desc_p);
+
+	new_q_item =
+	    kmem_cache_alloc(backlog_q_p->backlog_items_pool, GFP_KERNEL);
+	if (unlikely(new_q_item == NULL)) {
+		pr_err("Failed allocating descs_queue_item");
+		op_ctx->op_state = USER_OP_NOP;
+		return -ENOMEM;
+	}
+	op_ctx->op_state = USER_OP_PENDING;
+	memcpy(&new_q_item->desc, desc_p, sizeof(struct sep_sw_desc));
+	list_add_tail(&new_q_item->list, &backlog_q_p->list);
+	op_ctx->backlog_descs_cntr++;
+	backlog_q_p->cur_q_len++;
+	return 0;
+}
+
+/**
+ * backlog_q_dequeue() - Dequeue from pending descriptors queue and dispatch
+ *			into the SW-q the first pending descriptor
+ *
+ * @q_p:	Desc. queue object
+ *
+ * This function must be called with qlock locked and only if there is free
+ * space in the given descriptor queue.
+ * It returns 0 on success and -ENOMEM if there is no pending request
+ */
+static int backlog_q_dequeue(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	struct descs_backlog_item *first_item;
+	struct sep_sw_desc *desc_p;
+	struct sep_op_ctx *op_ctx;
+
+	if (list_empty(&backlog_q_p->list))
+		return -ENOMEM;
+	/* Remove from the first item from the list but keep the item */
+	first_item = list_first_entry(&backlog_q_p->list,
+				      struct descs_backlog_item, list);
+	list_del(&first_item->list);
+	backlog_q_p->cur_q_len--;
+	/* Process/dispatch the descriptor to the SW-q. */
+	desc_p = &first_item->desc;
+	dump_desc(desc_p);
+	op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	if (unlikely(op_ctx == NULL)) {
+		pr_err("Invalid desc - COOKIE is NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("backlog(op_ctx=%p)->descQ:\n", op_ctx);
+	dispatch_sw_desc(q_p, desc_p);
+	op_ctx->backlog_descs_cntr--;
+	/* Now we can free the list item */
+	kmem_cache_free(backlog_q_p->backlog_items_pool, first_item);
+	if (op_ctx->backlog_descs_cntr == 0) {
+		/* All the operation descriptors reached the SW-q. */
+		op_ctx->op_state = USER_OP_INPROC;
+		if (op_ctx->comp_work != NULL)
+			/* Async. (CryptoAPI) */
+			/* Invoke completion callback directly because
+			   we are already in work_queue context and we wish
+			   to assure this state update (EINPROGRESS)
+			   is delivered before the request is completed */
+			op_ctx->comp_work->func(op_ctx->comp_work);
+	}
+	return 0;
+}
+
+/**
+ * backlog_q_process() - Handler for dispatching backlog descriptors
+ *			into the SW desc.Q when possible (dispatched from
+ *			the completion interrupt handler)
+ *
+ * @work:	The work context
+ */
+static void backlog_q_process(struct work_struct *work)
+{
+	int descs_to_enqueue;
+	struct desc_q *q_p = container_of(work, struct desc_q, backlog_work);
+
+	mutex_lock(&q_p->qlock);
+	if (q_p->qstate == DESC_Q_ACTIVE) {	/* Avoid on ASLEEP state */
+		descs_to_enqueue = GET_Q_FREE_DESCS(q_p);
+		/* Not more than pending descriptors */
+		if (descs_to_enqueue > q_p->backlog_q.cur_q_len)
+			descs_to_enqueue = q_p->backlog_q.cur_q_len;
+		pr_debug("Q%d: Dispatching %d descs. from pendQ\n",
+			      q_p->qid, descs_to_enqueue);
+		while (descs_to_enqueue > 0) {
+			/* From backlog queue to SW descriptors queue */
+			if (!backlog_q_dequeue(q_p))
+				descs_to_enqueue--;
+			else
+				break;
+		}
+		/* Signal SeP once of all new descriptors
+		   (interrupt coalescing) */
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+	}
+	mutex_unlock(&q_p->qlock);
+}
+
+/**
+ * desc_q_get_info4sep() - Get queue address and size to be used in FW init
+ *				phase
+ * @q_h:	 The queue object handle
+ * @base_addr_p:	 Base address return parameter
+ * @size_p:	 Queue size (in bytes) return parameter
+ *
+ */
+void desc_q_get_info4sep(void *q_h,
+			 dma_addr_t *base_addr_p, unsigned long *size_p)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+
+	*base_addr_p = q_p->q_base_dma;
+	*size_p = SEP_SW_DESC_Q_MEM_SIZE;
+}
+
+/**
+ * desc_q_enqueue() - Enqueue given descriptor in given queue
+ * @q_h:		The queue object handle
+ * @desc_p:		Pointer to descriptor
+ * @may_backlog:	When "true" and descQ is full or ASLEEP, may enqueue
+ *			the given desc. in the backlog queue.
+ *			When "false", any of the above cases would cause
+ *			returning -ENOMEM.
+ *
+ * The function updates the op_ctx->op_state accoring to its results.
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+int desc_q_enqueue(void *q_h, struct sep_sw_desc *desc_p, bool may_backlog)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_op_ctx *op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	int rc;
+
+	mutex_lock(&q_p->qlock);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	if (IS_Q_FULL(q_p) ||	/* Queue is full */
+	    (q_p->backlog_q.cur_q_len > 0) ||	/* or already have pending d. */
+	    (q_p->qstate == DESC_Q_ASLEEP)) {	/* or in sleep state */
+		if (may_backlog) {
+			pr_debug("Enqueuing desc. to queue@%s\n",
+				 q_p->qstate == DESC_Q_ASLEEP ?
+					 "ASLEEP" : "FULL");
+			rc = backlog_q_enqueue(q_p, desc_p);
+			if (unlikely(rc != 0)) {
+				pr_err("Failed enqueuing desc. to queue@%s\n",
+				       q_p->qstate == DESC_Q_ASLEEP ?
+					       "ASLEEP" : "FULL");
+			} else {
+				rc = -EBUSY;
+			}
+		} else {
+			pr_debug("Q%d: %s and may not backlog.\n",
+				 q_p->qid,
+				 q_p->qstate == DESC_Q_ASLEEP ?
+					 "ASLEEP" : "FULL");
+			rc = -ENOMEM;
+		}
+
+	} else {		/* Can dispatch to actual descriptors queue */
+		op_ctx->op_state = USER_OP_INPROC;
+		dispatch_sw_desc(q_p, desc_p);
+		/* Signal SeP of new descriptors */
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+		pr_debug("Q#%d: Sent SwDesc #%u (op_ctx=%p)\n",
+			      q_p->qid, q_p->sent_cntr, op_ctx);
+		rc = -EINPROGRESS;
+	}
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	mutex_unlock(&q_p->qlock);
+
+	return rc;		/* Enqueued to desc. queue */
+}
+
+/**
+ * desc_q_mark_invalid_cookie() - Mark given cookie as invalid in case marked as
+ *					completed after a timeout
+ * @q_h:	 Descriptor queue handle
+ * @cookie:	 Invalidate descriptors with this cookie
+ *
+ * Mark given cookie as invalid in case marked as completed after a timeout
+ * Invoke this before releasing the op_ctx object.
+ * There is no race with the interrupt because the op_ctx (cookie) is still
+ * valid when invoking this function.
+ */
+void desc_q_mark_invalid_cookie(void *q_h, void *cookie)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_sw_desc desc;
+	u32 cur_desc_cntr, cur_desc_idx;
+	unsigned int drop_cnt = 0;
+
+	mutex_lock(&q_p->qlock);
+
+	for (cur_desc_cntr = q_p->completed_cntr;
+	     cur_desc_cntr < q_p->sent_cntr; cur_desc_cntr++) {
+		/* Mark all outstanding of given cookie as invalid with NULL */
+		cur_desc_idx = GET_DESC_IDX(cur_desc_cntr);
+		/* Copy descriptor to spad (endianess fix-up) */
+		/* TODO: Optimize to avoid full copy back...  */
+		/* (we only need the cookie) */
+		SEP_SW_DESC_COPY_FROM_SEP(&desc,
+					  GET_DESC_PTR(q_p, cur_desc_idx));
+		if (SEP_SW_DESC_GET_COOKIE(&desc) == cookie) {
+			SEP_SW_DESC_SET_COOKIE(&desc, (uintptr_t *)0);	/* Invalidate */
+			SEP_SW_DESC_COPY_TO_SEP(GET_DESC_PTR(q_p, cur_desc_idx),
+						&desc);
+			pr_debug("Invalidated desc at desc_cnt=%u\n",
+				      cur_desc_idx);
+			drop_cnt++;
+		}
+	}
+
+	mutex_unlock(&q_p->qlock);
+
+	if (drop_cnt > 0)
+		pr_warn("Invalidated %u descriptors of cookie=0x%p\n",
+			drop_cnt, cookie);
+
+}
+
+/**
+ * desc_q_process_completed() - Dequeue and process any completed descriptors in
+ *				the queue
+ * @q_h:	 The queue object handle
+ *
+ * Dequeue and process any completed descriptors in the queue
+ * (This function assumes non-reentrancy since it is invoked from
+ *  either interrupt handler or in workqueue context)
+ */
+void desc_q_process_completed(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_op_ctx *op_ctx;
+	struct sep_sw_desc desc;
+	enum sep_sw_desc_type desc_type;
+	struct sep_op_ctx *cookie;
+	u32 ret_code;
+	u32 desc_idx;
+	u32 new_completed_cntr;
+
+	new_completed_cntr = READ_REGISTER(q_p->gpr_from_sep);
+	/* Sanity check for read GPR value (must be between sent and completed).
+	   This arithmetic is cyclic so should work even after the counter
+	   wraps around. */
+	if ((q_p->sent_cntr - new_completed_cntr) >
+	    (q_p->sent_cntr - q_p->completed_cntr)) {
+		/* More completions than outstanding descriptors ?! */
+		pr_err(
+			    "sent_cntr=0x%08X completed_cntr=0x%08X gpr=0x%08X\n",
+			    q_p->sent_cntr, q_p->completed_cntr,
+			    new_completed_cntr);
+		return;		/*SEP_DRIVER_BUG() */
+		/* This is a (sep) bug case that is not supposed to happen,
+		   but we must verify this to avoid accessing stale descriptor
+		   data (which may cause system memory corruption).
+		   Returning to the caller may result in interrupt loss, but we
+		   prefer losing a completion and blocking the caller forever
+		   than invoking BUG() that would crash the whole system and may
+		   even loose the error log message. This would give a chance
+		   for a subsequent pending descriptor completion recover this
+		   case or in the worst case let the system administrator
+		   understand what is going on and let her perform a graceful
+		   reboot. */
+	}
+
+	while (new_completed_cntr > q_p->completed_cntr) {
+
+		desc_idx = GET_DESC_IDX(q_p->completed_cntr);
+		/* Copy descriptor to spad (endianess fix-up) */
+		/* TODO: Optimize to avoid full copy back...  */
+		/* (we only need type the fields: type, retcode, cookie) */
+		SEP_SW_DESC_COPY_FROM_SEP(&desc, GET_DESC_PTR(q_p, desc_idx));
+		desc_type = SEP_SW_DESC_GET(&desc, TYPE);
+		cookie = SEP_SW_DESC_GET_COOKIE(&desc);
+		ret_code = SEP_SW_DESC_GET(&desc, RET_CODE);
+		sysfs_update_sep_stats(q_p->qid, desc_type,
+				       q_p->enqueue_time[desc_idx],
+				       sched_clock());
+		q_p->completed_cntr++;	/* prepare for next */
+		pr_debug("type=%u retcode=0x%08X cookie=0x%p",
+			 desc_type, ret_code, cookie);
+		if (cookie == 0) {
+			/* Probably late completion on invalidated cookie */
+			pr_err("Got completion with NULL cookie\n");
+			continue;
+		}
+
+		op_ctx = (struct sep_op_ctx *)cookie;
+		if (desc_type == SEP_SW_DESC_TYPE_APP_REQ) {/* Applet Req. */
+			/* "internal error" flag is currently available only
+			   in this descriptor type. */
+			op_ctx->internal_error =
+			    SEP_SW_DESC_GET4TYPE(&desc, APP_REQ, INTERNAL_ERR);
+			/* Get session ID for SESSION_OPEN case */
+			op_ctx->session_ctx->sep_session_id =
+			    SEP_SW_DESC_GET4TYPE(&desc, APP_REQ, SESSION_ID);
+		}
+
+#ifdef DEBUG
+		if (op_ctx->pending_descs_cntr > MAX_PENDING_DESCS)
+			pr_err("Invalid num of pending descs %d\n",
+				    op_ctx->pending_descs_cntr);
+#endif
+		/* pending descriptors counter (apply for transactions composed
+		   of more than a single descriptor) */
+		op_ctx->pending_descs_cntr--;
+		/* Update associated operation context and notify it */
+		op_ctx->error_info |= ret_code;
+		if (op_ctx->pending_descs_cntr == 0) {
+			op_ctx->op_state = USER_OP_COMPLETED;
+			if (op_ctx->comp_work != NULL)	/* Async. (CryptoAPI) */
+				(void)schedule_work(op_ctx->comp_work);
+			else	/* Sync. (IOCTL or dx_sepapp_ API) */
+				complete(&(op_ctx->ioctl_op_compl));
+		}
+	}			/* while(new_completed_cntr) */
+
+	/* Dispatch pending requests */
+	/* if any pending descs. & utilization is below watermark & !ASLEEP */
+	if ((q_p->backlog_q.cur_q_len > 0) &&
+	    (GET_Q_FREE_DESCS(q_p) > SEP_DESC_Q_WATERMARK_MARGIN) &&
+	    (q_p->qstate != DESC_Q_ASLEEP)) {
+		(void)schedule_work(&q_p->backlog_work);
+	} else if (desc_q_is_idle(q_h, NULL)) {
+		q_p->idle_jiffies = jiffies;
+	}
+
+}
+
+/**
+ * desq_q_pack_debug_desc() - Create a debug descriptor in given buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ *
+ * TODO: Get additional debug descriptors (in addition to loopback)
+ *
+ */
+void desq_q_pack_debug_desc(struct sep_sw_desc *desc_p,
+			    struct sep_op_ctx *op_ctx)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_DEBUG);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+}
+
+/**
+ * desc_q_pack_crypto_op_desc() - Pack a CRYPTO_OP descriptor in given
+ *				descriptor buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ * @sep_ctx_init_req:	 Context initialize request flag
+ * @proc_mode:	 Descriptor processing mode
+ *
+ */
+void desc_q_pack_crypto_op_desc(struct sep_sw_desc *desc_p,
+				struct sep_op_ctx *op_ctx,
+				int sep_ctx_load_req, int sep_ctx_init_req,
+				enum sep_proc_mode proc_mode)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_CRYPTO_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, FW_CACHE_IDX,
+			     ctxmgr_get_sep_cache_idx(&op_ctx->ctx_info));
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, L, sep_ctx_load_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, HCB_ADDR,
+			     ctxmgr_get_sep_ctx_dma_addr(&op_ctx->ctx_info));
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, I, sep_ctx_init_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, PROC_MODE, proc_mode);
+
+	if (proc_mode != SEP_PROC_MODE_NOP) {	/* no need for IFT/OFT in NOP */
+		/* IFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->ift,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_NUM, table_count);
+
+		/* OFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->oft,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_NUM, table_count);
+	}
+}
+
+/**
+ * desc_q_pack_combined_op_desc() - Pack a COMBINED_OP descriptor in given
+ *					descriptor buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ * @sep_ctx_init_req:	 Context initialize request flag
+ * @proc_mode:	 Descriptor processing mode
+ * @cfg_scheme:	 The SEP format configuration scheme claimed by the user
+ *
+ */
+void desc_q_pack_combined_op_desc(struct sep_sw_desc *desc_p,
+				  struct sep_op_ctx *op_ctx,
+				  int sep_ctx_load_req, int sep_ctx_init_req,
+				  enum sep_proc_mode proc_mode,
+				  u32 cfg_scheme)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_COMBINED_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, L, sep_ctx_load_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, CONFIG_SCHEME, cfg_scheme);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, I, sep_ctx_init_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, PROC_MODE, proc_mode);
+
+	if (proc_mode != SEP_PROC_MODE_NOP) {	/* no need for IFT/OFT in NOP */
+		/* IFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->ift,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_NUM, table_count);
+
+		/* OFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->oft,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_NUM, table_count);
+	}
+}
+
+/**
+ * desc_q_pack_load_op_desc() - Pack a LOAD_OP descriptor in given descriptor
+ *				buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ *
+ */
+void desc_q_pack_load_op_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx, int *sep_ctx_load_req)
+{
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 *p = (u32 *)desc_p;
+	int idx;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_LOAD_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	for (idx = 0; idx < SEP_MAX_COMBINED_ENGINES; idx++, ctx_info_p++) {
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET],
+			     SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET(idx),
+			     SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE,
+			     (ctx_info_p->ctx_kptr == NULL) ? (-1) :
+			     ctxmgr_get_sep_cache_idx(ctx_info_p));
+
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET(idx)],
+			     SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET,
+			     SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE,
+			     (ctx_info_p->ctx_kptr == NULL) ? 0 :
+			     (ctxmgr_get_sep_ctx_dma_addr(ctx_info_p) >>
+			      SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET));
+		/* shiffting DMA address due to the "L" bit in the
+		   LS bit */
+
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(idx)],
+			     SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET,
+			     SEP_SW_DESC_LOAD_OP_L_BIT_SIZE,
+			     sep_ctx_load_req[idx]);
+	}
+}
+
+/**
+ * desc_q_pack_rpc_desc() - Pack the RPC (message) descriptor type
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @agent_id:	 RPC agent (API) ID
+ * @func_id:	 Function ID (index)
+ * @rpc_msg_size:	 Size of RPC parameters message buffer
+ * @rpc_msg_dma_addr:	 DMA address of RPC parameters message buffer
+ *
+ */
+void desc_q_pack_rpc_desc(struct sep_sw_desc *desc_p,
+			  struct sep_op_ctx *op_ctx,
+			  u16 agent_id,
+			  u16 func_id,
+			  unsigned long rpc_msg_size,
+			  dma_addr_t rpc_msg_dma_addr)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_RPC_MSG);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+#ifdef DEBUG
+	/* Verify that given agent_id is not too large for AGENT_ID field */
+	if (agent_id >= (1 << SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_SIZE)) {
+		pr_err(
+			    "Given agent_id=%d is too large for AGENT_ID field. Value truncated!",
+			    agent_id);
+	}
+#endif
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, AGENT_ID, agent_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, FUNC_ID, func_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, HMB_SIZE, rpc_msg_size);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, HMB_ADDR, rpc_msg_dma_addr);
+}
+
+/**
+ * desc_q_pack_app_req_desc() - Pack the Applet Request descriptor
+ * @desc_p:	The descriptor buffer
+ * @op_ctx:	The operation context
+ * @req_type:	The Applet request type
+ * @session_id:	Session ID - Required only for SESSION_CLOSE and
+ *		COMMAND_INVOKE requests
+ * @inparams_addr:	DMA address of the "In Params." structure for the
+ *			request.
+ *
+ */
+void desc_q_pack_app_req_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx,
+			      enum sepapp_req_type req_type,
+			      u16 session_id, dma_addr_t inparams_addr)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_APP_REQ);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, REQ_TYPE, req_type);
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, SESSION_ID, session_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, IN_PARAMS_ADDR, inparams_addr);
+}
+
+/**
+ * crypto_proc_mode_to_str() - Convert from crypto_proc_mode to string
+ * @proc_mode:	 The proc_mode enumeration value
+ *
+ * Returns A string description of the processing mode (NULL if invalid mode)
+ */
+const char *crypto_proc_mode_to_str(enum sep_proc_mode proc_mode)
+{
+	switch (proc_mode) {
+	case SEP_PROC_MODE_NOP:
+		return "NOP";
+	case SEP_PROC_MODE_PROC_T:
+		return "PROC_T";
+	case SEP_PROC_MODE_FIN:
+		return "FIN";
+	case SEP_PROC_MODE_PROC_A:
+		return "PROC_A";
+	default:
+		return "?";
+	}
+}
+
+#ifdef DEBUG
+static void dump_crypto_op_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug("CRYPTO_OP::%s (type=%lu,cookie=0x%08lX)\n",
+		crypto_proc_mode_to_str(SEP_SW_DESC_GET4TYPE
+				(desc_p, CRYPTO_OP, PROC_MODE)),
+				SEP_SW_DESC_GET(desc_p, TYPE),
+				(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	pr_debug("HCB=0x%08lX @ FwIdx=%lu %s%s\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, HCB_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, FW_CACHE_IDX),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, L) ? "(load)" : "",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, I) ? "(init)" : "");
+
+	pr_debug("IFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_NUM));
+
+	pr_debug("OFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_NUM));
+
+	pr_debug("0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+		 ((u32 *)desc_p)[0], ((u32 *)desc_p)[1],
+		 ((u32 *)desc_p)[2], ((u32 *)desc_p)[3],
+		 ((u32 *)desc_p)[4], ((u32 *)desc_p)[5],
+		 ((u32 *)desc_p)[6], ((u32 *)desc_p)[7]);
+}
+
+static void dump_load_op_desc(const struct sep_sw_desc *desc_p)
+{
+	u32 *p = (u32 *)desc_p;
+	u32 hcb, cache_idx, is_load;
+	int idx;
+
+	pr_debug("LOAD_OP (type=%lu,cookie=0x%08lX)\n",
+		SEP_SW_DESC_GET(desc_p, TYPE),
+		(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	for (idx = 0; idx < SEP_MAX_COMBINED_ENGINES; idx++) {
+		cache_idx =
+		    BITFIELD_GET(p
+				 [SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET],
+				 SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET
+				 (idx),
+				 SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE);
+
+		hcb =
+		    BITFIELD_GET(p
+				 [SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET
+				 (idx)],
+				 SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET,
+				 SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE);
+
+		is_load =
+		    BITFIELD_GET(p[SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(idx)],
+				 SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET,
+				 SEP_SW_DESC_LOAD_OP_L_BIT_SIZE);
+
+		pr_debug("[%d] HCB=0x%08X FwIdx=%u %s\n",
+			      idx, hcb, cache_idx,
+			      is_load ? "(load)" : "(do not load)");
+	}
+}
+
+static void dump_combined_op_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug("COMBINED_OP::%s (type=%lu,cookie=0x%08lX)\n",
+		crypto_proc_mode_to_str(SEP_SW_DESC_GET4TYPE
+				(desc_p, COMBINED_OP, PROC_MODE)),
+				SEP_SW_DESC_GET(desc_p, TYPE),
+				(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	pr_debug("SCHEME=0x%08lX %s%s\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, CONFIG_SCHEME),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, L) ? "(load)" : "",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, I) ? "(init)" : "");
+
+	pr_debug("IFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_NUM));
+
+	pr_debug("OFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_NUM));
+}
+
+static void dump_rpc_msg_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug(
+		      "RPC_MSG: agentId=%lu, funcId=%lu, HmbAddr=0x%08lX, HmbSize=%lu\n",
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, AGENT_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, FUNC_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, HMB_ADDR),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, HMB_SIZE));
+}
+
+static void dump_app_req_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug(
+		      "APP_REQ: reqType=%lu, sessionId=%lu, InParamsAddr=0x%08lX\n",
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, REQ_TYPE),
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, SESSION_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, IN_PARAMS_ADDR));
+}
+
+static void dump_desc(const struct sep_sw_desc *desc_p)
+{				/* dump descriptor based on its type */
+	switch (SEP_SW_DESC_GET(desc_p, TYPE)) {
+	case SEP_SW_DESC_TYPE_NULL:
+		pr_debug("NULL descriptor type.\n");
+		break;
+	case SEP_SW_DESC_TYPE_CRYPTO_OP:
+		dump_crypto_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_LOAD_OP:
+		dump_load_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_COMBINED_OP:
+		dump_combined_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_RPC_MSG:
+		dump_rpc_msg_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_APP_REQ:
+		dump_app_req_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_DEBUG:
+		pr_debug("DEBUG descriptor type.\n");
+		break;
+	default:
+		pr_warn("Unknown descriptor type = %lu\n",
+			     SEP_SW_DESC_GET(desc_p, TYPE));
+	}
+}
+#endif /*DEBUG*/
diff --git a/drivers/staging/sep54/desc_mgr.h b/drivers/staging/sep54/desc_mgr.h
new file mode 100644
index 0000000..c549a5a
--- /dev/null
+++ b/drivers/staging/sep54/desc_mgr.h
@@ -0,0 +1,276 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*! \file desc_mgr.h
+    Descriptor manager API and associated data structures
+*/
+
+#ifndef _DESC_MGR_H_
+#define _DESC_MGR_H_
+
+#include "sep_sw_desc.h"
+
+/* Max. from Combined mode operation */
+#define MAX_PENDING_DESCS 2
+
+/* Evaluate desc_q_enqueue() return code (EINPROGRESS and EBUSY are ok) */
+#define IS_DESCQ_ENQUEUE_ERR(_rc) ((_rc != -EINPROGRESS) && (_rc != -EBUSY))
+
+#define DESC_Q_INVALID_HANDLE NULL
+
+/* Opaque structure - accessed using SEP_SW_DESC_* macros */
+struct sep_sw_desc {
+	u32 data[SEP_SW_DESC_WORD_SIZE];
+};
+
+enum desc_q_state {
+	DESC_Q_UNINITIALIZED,	/* Before initializing the queue */
+	DESC_Q_ACTIVE,		/* Queue has pending requests    */
+	DESC_Q_ASLEEP		/* Queue is in "sleep" state (cannot accept
+				   additional requests - descs should be
+				   enqueued in descs backlog queue) */
+};
+
+/* Declare like this to avoid cyclic inclusion with dx_cc54_driver.h */
+struct sep_op_ctx;
+struct queue_drvdata;
+struct sep_app_session;
+
+/**
+ * desc_q_create() - Create descriptors queue object
+ * @qid:	 The queue ID (index)
+ * @drvdata:	 The associated queue driver data
+ *
+ * Returns Allocated queue object handle (DESC_Q_INVALID_HANDLE for failure)
+ */
+void *desc_q_create(int qid, struct queue_drvdata *drvdata);
+
+/**
+ * desc_q_destroy() - Destroy descriptors queue object (free resources)
+ * @q_h: The queue object handle
+ */
+void desc_q_destroy(void *q_h);
+
+/**
+ * desc_q_set_state() - Set queue state (SLEEP or ACTIVE)
+ * @q_h:	The queue object handle
+ * @state:	The requested state
+ */
+int desc_q_set_state(void *q_h, enum desc_q_state state);
+
+/**
+ * desc_q_get_state() - Get queue state
+ * @q_h:	The queue object handle
+ */
+enum desc_q_state desc_q_get_state(void *q_h);
+
+/**
+ * desc_q_is_idle() - Report if given queue is active but empty/idle.
+ * @q_h:		The queue object handle
+ * @idle_jiffies_p:	Return jiffies at which the queue became idle
+ */
+bool desc_q_is_idle(void *q_h, unsigned long *idle_jiffies_p);
+
+/**
+ * desc_q_reset() - Reset sent/completed counters of queue
+ * @q_h:	The queue object handle
+ *
+ * This function should be invoked only when the queue is in ASLEEP state
+ * after the transition of SeP to sleep state completed.
+ * Returns -EBUSY if the queue is not in the correct state for reset.
+ */
+int desc_q_reset(void *q_h);
+
+/**
+ * desc_q_enqueue_sleep_req() - Enqueue SLEEP_REQ descriptor
+ * @q_h:	The queue object handle
+ * @op_ctx:	The operation context for this descriptor
+ */
+int desc_q_enqueue_sleep_req(void *q_h, struct sep_op_ctx *op_ctx);
+
+/**
+ * desc_q_get_info4sep()- Get queue address and size to be used in FW init phase
+ * @q_h:		The queue object handle
+ * @base_addr_p:	Base address return parameter
+ * @size_p:		Queue size (in bytes) return parameter
+ */
+void desc_q_get_info4sep(void *q_h,
+			 dma_addr_t *base_addr_p, unsigned long *size_p);
+
+/**
+ * desc_q_enqueue() - Enqueue given descriptor in given queue
+ * @q_h:		The queue object handle
+ * @desc_p:		Pointer to descriptor
+ * @may_backlog:	When "true" and descQ is full or ASLEEP, may enqueue
+ *			the given desc. in the backlog queue.
+ *			When "false", any of the above cases would cause
+ *			returning -ENOMEM.
+ *
+ * The function updates the op_ctx->op_state accoring to its results.
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+int desc_q_enqueue(void *q_h, struct sep_sw_desc *desc_p, bool may_backlog);
+
+/*!
+ * Mark given cookie as invalid in case marked as completed after a timeout
+ * Invoke this before releasing the op_ctx object.
+ * There is no race with the interrupt because the client_ctx (cookie) is still
+ * valid when invoking this function.
+ *
+ * \param q_h Descriptor queue handle
+ * \param cookie Invalidate descriptors with this cookie
+ */
+void desc_q_mark_invalid_cookie(void *q_h, void *cookie);
+
+/*!
+ * Dequeue and process any completed descriptors in the queue
+ * (This function assumes non-reentrancy since it is invoked from
+ *  either interrupt handler or in workqueue context)
+ *
+ * \param q_h The queue object handle
+ *
+ */
+void desc_q_process_completed(void *q_h);
+
+/*!
+ * Create a debug descriptor in given buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * TODO: Get additional debug descriptors (in addition to loopback)
+ */
+void desq_q_pack_debug_desc(struct sep_sw_desc *desc_p,
+			    struct sep_op_ctx *op_ctx);
+
+/*!
+ * Pack a CRYPTO_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ * \param sep_ctx_init_req Context initialize request flag
+ * \param proc_mode Descriptor processing mode
+ */
+void desc_q_pack_crypto_op_desc(struct sep_sw_desc *desc_p,
+				struct sep_op_ctx *op_ctx,
+				int sep_ctx_load_req, int sep_ctx_init_req,
+				enum sep_proc_mode proc_mode);
+
+/*!
+ * Pack a COMBINED_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ * \param sep_ctx_init_req Context initialize request flag
+ * \param proc_mode Descriptor processing mode
+ * \param cfg_scheme The SEP format configuration scheme claimed by the user
+ */
+void desc_q_pack_combined_op_desc(struct sep_sw_desc *desc_p,
+				  struct sep_op_ctx *op_ctx,
+				  int sep_ctx_load_req, int sep_ctx_init_req,
+				  enum sep_proc_mode proc_mode,
+				  u32 cfg_scheme);
+/*!
+ * Pack a LOAD_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ */
+void desc_q_pack_load_op_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx, int *sep_ctx_load_req);
+
+/*!
+ * Pack the RPC (message) descriptor type
+ *
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param agent_id RPC agent (API) ID
+ * \param func_id Function ID (index)
+ * \param rpc_msg_size Size of RPC parameters message buffer
+ * \param rpc_msg_dma_addr DMA address of RPC parameters message buffer
+ */
+void desc_q_pack_rpc_desc(struct sep_sw_desc *desc_p,
+			  struct sep_op_ctx *op_ctx,
+			  u16 agent_id,
+			  u16 func_id,
+			  unsigned long rpc_msg_size,
+			  dma_addr_t rpc_msg_dma_addr);
+
+/*!
+ * Pack the Applet Request descriptor
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param req_type The Applet request type
+ * \param session_id Session ID - Required only for SESSION_CLOSE and
+ *                   COMMAND_INVOKE requests
+ * \param inparams_addr DMA address of the "In Params." structure for the
+ *                      request.
+ */
+void desc_q_pack_app_req_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx,
+			      enum sepapp_req_type req_type,
+			      u16 session_id, dma_addr_t inparams_addr);
+
+/*!
+ * Convert from crypto_proc_mode to string
+ *
+ * \param proc_mode The proc_mode enumeration value
+ *
+ * \return A string description of the processing mode
+ */
+const char *crypto_proc_mode_to_str(enum sep_proc_mode proc_mode);
+
+inline u32 add_cookie(uintptr_t op_ctx);
+
+inline void delete_cookie(u32 index);
+
+inline void delete_context(uintptr_t op_ctx);
+
+inline uintptr_t get_cookie(u32 index);
+
+#define SEP_SW_DESC_GET_COOKIE(desc_p)\
+	((struct sep_op_ctx *)get_cookie(((u32 *)desc_p)[SEP_SW_DESC_COOKIE_WORD_OFFSET]))
+
+#define SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx) \
+do {\
+	u32 __ctx_ptr__ = 0;\
+	if (op_ctx == NULL) {\
+		delete_cookie(((u32 *)desc_p)[SEP_SW_DESC_COOKIE_WORD_OFFSET]);\
+	} else {\
+		__ctx_ptr__ = add_cookie((uintptr_t)op_ctx);\
+	} \
+	memcpy(((u32 *)desc_p) + SEP_SW_DESC_COOKIE_WORD_OFFSET,\
+	&__ctx_ptr__, sizeof(u32));\
+} while (0)
+
+#endif /*_DESC_MGR_H_*/
diff --git a/drivers/staging/sep54/dx_bitops.h b/drivers/staging/sep54/dx_bitops.h
new file mode 100644
index 0000000..fd9524c
--- /dev/null
+++ b/drivers/staging/sep54/dx_bitops.h
@@ -0,0 +1,58 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!
+ * \file dx_bitops.h
+ * Bit fields operations macros.
+ */
+#ifndef _DX_BITOPS_H_
+#define _DX_BITOPS_H_
+
+#define BITMASK(mask_size) (((mask_size) < 32) ?	\
+	((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
+#define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset))
+
+#define BITFIELD_GET(word, bit_offset, bit_size)	\
+	(((word) >> (bit_offset)) & BITMASK(bit_size))
+#define BITFIELD_SET(word, bit_offset, bit_size, new_val) \
+	(word = ((word) & ~BITMASK_AT(bit_size, bit_offset)) |		\
+		(((new_val) & BITMASK(bit_size)) << (bit_offset)))
+
+/* Is val aligned to "align" ("align" must be power of 2) */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(val, align)	(((u32)(val) & ((align) - 1)) == 0)
+#endif
+
+#define SWAP_ENDIAN(word)		\
+	(((word) >> 24) | (((word) & 0x00FF0000) >> 8) | \
+	(((word) & 0x0000FF00) << 8) | (((word) & 0x000000FF) << 24))
+
+/* Is val a multiple of "mult" ("mult" must be power of 2) */
+#define IS_MULT(val, mult)	(((val) & ((mult) - 1)) == 0)
+
+#define IS_NULL_ADDR(adr)	(!(adr))
+
+#endif /*_DX_BITOPS_H_*/
diff --git a/drivers/staging/sep54/dx_cc_defs.h b/drivers/staging/sep54/dx_cc_defs.h
new file mode 100644
index 0000000..0bdd38d
--- /dev/null
+++ b/drivers/staging/sep54/dx_cc_defs.h
@@ -0,0 +1,42 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _DX_CC_DEFS_H_
+#define _DX_CC_DEFS_H
+
+#define DX_INT32_MAX 0x7FFFFFFFL
+
+enum dx_crypto_key_type {
+	DX_USER_KEY = 0,
+	DX_ROOT_KEY = 1,
+	DX_PROVISIONING_KEY = 2,
+	DX_XOR_HDCP_KEY = 3,
+	DX_APPLET_KEY = 4,
+	DX_SESSION_KEY = 5,
+	DX_END_OF_KEYS = DX_INT32_MAX
+};
+
+#endif
diff --git a/drivers/staging/sep54/dx_cc_regs.h b/drivers/staging/sep54/dx_cc_regs.h
new file mode 100644
index 0000000..2f91bd6
--- /dev/null
+++ b/drivers/staging/sep54/dx_cc_regs.h
@@ -0,0 +1,162 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!
+ * \file dx_cc_regs.h
+ * \brief Macro definitions for accessing Dx CryptoCell register space
+ *
+ * For SeP code define DX_CC_SEP
+ * For Host physical/direct registers access define DX_CC_HOST
+ * For Host virtual mapping of registers define DX_CC_HOST_VIRT
+ */
+
+#ifndef _DX_CC_REGS_H_
+#define _DX_CC_REGS_H_
+
+#include "dx_bitops.h"
+
+/* Include register base addresses data */
+#if defined(DX_CC_SEP)
+#include "dx_reg_base_sep.h"
+
+#elif defined(DX_CC_HOST) || defined(DX_CC_HOST_VIRT) || defined(DX_CC_TEE)
+#include "dx_reg_base_host.h"
+
+#else
+#error Define either DX_CC_SEP or DX_CC_HOST or DX_CC_HOST_VIRT
+#endif
+
+/* CC registers address calculation */
+#if defined(DX_CC_SEP)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	 (DX_BASE_CC_PERIF + DX_BASE_ ## unit_name + \
+	  DX_ ## reg_name ## _REG_OFFSET)
+
+/* In host macros we ignore the unit_name because all offsets are from base */
+#elif defined(DX_CC_HOST)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	(DX_BASE_CC + DX_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_TEE)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	(DX_BASE_CC + DX_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_HOST_VIRT)
+#define DX_CC_REG_ADDR(cc_base_virt, unit_name, reg_name) \
+	(((unsigned long)(cc_base_virt)) + DX_ ## reg_name ## _REG_OFFSET)
+
+#endif
+
+/* Register Offset macros (from registers base address in host) */
+#if defined(DX_CC_HOST) || defined(DX_CC_HOST_VIRT)
+
+#define DX_CC_REG_OFFSET(reg_domain, reg_name)               \
+	(DX_ ## reg_domain ## _ ## reg_name ## _REG_OFFSET)
+
+/* Indexed GPR offset macros - note the (not original) preprocessor tricks...*/
+/* (Using the macro without the "_" prefix is allowed with another macro      *
+ *  as the gpr_idx) */
+#define _SEP_HOST_GPR_REG_OFFSET(gpr_idx) \
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR ## gpr_idx)
+#define SEP_HOST_GPR_REG_OFFSET(gpr_idx) _SEP_HOST_GPR_REG_OFFSET(gpr_idx)
+#define _HOST_SEP_GPR_REG_OFFSET(gpr_idx) \
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR ## gpr_idx)
+#define HOST_SEP_GPR_REG_OFFSET(gpr_idx) _HOST_SEP_GPR_REG_OFFSET(gpr_idx)
+
+/* GPR IRQ bit mask by GPR index */
+#define _SEP_HOST_GPR_IRQ_MASK(gpr_idx) \
+	(1 << DX_HOST_IRR_SEP_HOST_GPR ## gpr_idx ## _INT_BIT_SHIFT)
+#define SEP_HOST_GPR_IRQ_MASK(gpr_idx) _SEP_HOST_GPR_IRQ_MASK(gpr_idx)
+
+#elif defined(DX_CC_SEP)
+
+#define DX_CC_REG_OFFSET(unit_name, reg_name)               \
+	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+/* Indexed GPR address macros - note the (not original) preprocessor tricks...*/
+/* (Using the macro without the "_" prefix is allowed with another macro      *
+ *  as the gpr_idx) */
+#define _SEP_HOST_GPR_REG_ADDR(gpr_idx) \
+	DX_CC_REG_ADDR(SEP_RGF, SEP_SEP_HOST_GPR ## gpr_idx)
+#define SEP_HOST_GPR_REG_ADDR(gpr_idx) _SEP_HOST_GPR_REG_ADDR(gpr_idx)
+#define _HOST_SEP_GPR_REG_ADDR(gpr_idx) \
+	DX_CC_REG_ADDR(SEP_RGF, SEP_HOST_SEP_GPR ## gpr_idx)
+#define HOST_SEP_GPR_REG_ADDR(gpr_idx) _HOST_SEP_GPR_REG_ADDR(gpr_idx)
+
+#elif defined(DX_CC_TEE)
+
+#define DX_CC_REG_OFFSET(unit_name, reg_name) \
+	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+#else
+#error "Undef exec domain,not DX_CC_SEP, DX_CC_HOST, DX_CC_HOST_VIRT, DX_CC_TEE"
+#endif
+
+/* Registers address macros for ENV registers (development FPGA only) */
+#ifdef DX_BASE_ENV_REGS
+
+#if defined(DX_CC_HOST)
+#define DX_ENV_REG_ADDR(reg_name) \
+	(DX_BASE_ENV_REGS + DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_HOST_VIRT)
+/* The OS driver resource address space covers the ENV registers, too */
+/* Since DX_BASE_ENV_REGS is given in absolute address, we calc. the distance */
+#define DX_ENV_REG_ADDR(cc_base_virt, reg_name) \
+	(((cc_base_virt) + (DX_BASE_ENV_REGS - DX_BASE_CC)) + \
+	 DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#endif
+
+#endif				/*DX_BASE_ENV_REGS */
+
+/* Bit fields access */
+#define DX_CC_REG_FLD_GET(unit_name, reg_name, fld_name, reg_val)	      \
+	(DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ?	      \
+	reg_val /* Optimization for 32b fields */ :			      \
+	BITFIELD_GET(reg_val, DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+		     DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
+
+#define DX_CC_REG_FLD_SET(                                               \
+	unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val)      \
+do {                                                                     \
+	if (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20)       \
+		reg_shadow_var = new_fld_val; /* Optimization for 32b fields */\
+	else                                                             \
+		BITFIELD_SET(reg_shadow_var,                             \
+			DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT,  \
+			DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE,   \
+			new_fld_val);                                    \
+} while (0)
+
+/* Usage example:
+   u32 reg_shadow = READ_REGISTER(DX_CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
+   DX_CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3);
+   DX_CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1);
+   WRITE_REGISTER(DX_CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow);
+ */
+
+#endif /*_DX_CC_REGS_H_*/
diff --git a/drivers/staging/sep54/dx_dev_defs.h b/drivers/staging/sep54/dx_dev_defs.h
new file mode 100644
index 0000000..1626df8
--- /dev/null
+++ b/drivers/staging/sep54/dx_dev_defs.h
@@ -0,0 +1,67 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/** @file: dx_dev_defs.h
+ * Device specific definitions for the CC device driver */
+
+#ifndef _DX_DEV_DEFS_H_
+#define _DX_DEV_DEFS_H_
+
+#define DRIVER_NAME MODULE_NAME
+
+/* OpenFirmware matches */
+#define DX_DEV_OF_MATCH  {                        \
+	{.name = "dx_cc54" },                     \
+	{.compatible = "xlnx,plbv46-cc-1.00.c",}, \
+	{}                                        \
+}
+
+/* Firmware images file names (for request_firmware) */
+#define RESIDENT_IMAGE_NAME DRIVER_NAME "-resident.bin"
+#define CACHE_IMAGE_NAME DRIVER_NAME "-cache.bin"
+#define VRL_IMAGE_NAME DRIVER_NAME "-Primary_VRL.bin"
+
+/* OTP index of verification hash for key in VRL */
+#define VRL_KEY_INDEX 0
+
+#define ICACHE_SIZE_LOG2_DEFAULT 20	/* 1MB */
+#define DCACHE_SIZE_LOG2_DEFAULT 20	/* 1MB */
+
+#define EXPECTED_FW_VER 0x01000000
+
+/* The known SeP clock frequency in MHz (30 MHz on Virtex-5 FPGA) */
+/* Comment this line if SeP frequency is already initialized in CC_INIT ext. */
+/*#define SEP_FREQ_MHZ 30*/
+
+/* Number of SEP descriptor queues */
+#define SEP_MAX_NUM_OF_DESC_Q  2
+
+/* Maximum number of registered memory buffers per user context */
+#define MAX_REG_MEMREF_PER_CLIENT_CTX 16
+
+/* Maximum number of SeP Applets session per client context */
+#define MAX_SEPAPP_SESSION_PER_CLIENT_CTX 16
+
+#endif				/* _DX_DEV_DEFS_H_ */
diff --git a/drivers/staging/sep54/dx_driver.c b/drivers/staging/sep54/dx_driver.c
new file mode 100644
index 0000000..6c55c1d
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver.c
@@ -0,0 +1,4916 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_MAIN
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
+#include <linux/cache.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/genhd.h>
+#include <linux/mmc/card.h>
+
+#include <generated/autoconf.h>
+#if defined(DEBUG) && defined(CONFIG_KGDB)
+/* For setup_break option */
+#include <linux/kgdb.h>
+#endif
+
+#include "sep_log.h"
+#include "sep_init.h"
+#include "desc_mgr.h"
+#include "lli_mgr.h"
+#include "sep_sysfs.h"
+#include "dx_driver_abi.h"
+#include "crypto_ctx_mgr.h"
+#include "crypto_api.h"
+#include "sep_request.h"
+#include "dx_sep_kapi.h"
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+#include "sepapp.h"
+#endif
+#include "sep_power.h"
+#include "sep_request_mgr.h"
+#include "sep_applets.h"
+
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#ifdef DX_BASE_ENV_REGS
+#include "dx_env.h"
+#endif
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_init_cc_abi.h"
+#include "dx_driver.h"
+
+#ifdef CONFIG_COMPAT
+#include "sep_compat_ioctl.h"
+#endif
+
+#if SEPAPP_UUID_SIZE != DXDI_SEPAPP_UUID_SIZE
+#error Size mismatch of SEPAPP_UUID_SIZE and DXDI_SEPAPP_UUID_SIZE
+#endif
+
+#define DEVICE_NAME_PREFIX  "dx_sep_q"
+#define SEP_DEVICES         SEP_MAX_NUM_OF_DESC_Q
+
+#define DRIVER_NAME     MODULE_NAME
+#define SEP_DEVICES     SEP_MAX_NUM_OF_DESC_Q
+
+#ifdef SEP_PRINTF
+#define SEP_PRINTF_H2S_GPR_OFFSET \
+	HOST_SEP_GPR_REG_OFFSET(DX_SEP_HOST_PRINTF_GPR_IDX)
+#define SEP_PRINTF_S2H_GPR_OFFSET \
+	SEP_HOST_GPR_REG_OFFSET(DX_SEP_HOST_PRINTF_GPR_IDX)
+/* Ack is allocated only upper 24 bits */
+#define SEP_PRINTF_ACK_MAX 0xFFFFFF
+/* Sync. host-SeP value */
+#define SEP_PRINTF_ACK_SYNC_VAL SEP_PRINTF_ACK_MAX
+#endif
+
+static struct class *sep_class;
+
+int q_num;			/* Initialized to 0 */
+module_param(q_num, int, 0444);
+MODULE_PARM_DESC(q_num, "Num. of active queues 1-2");
+
+int sep_log_level = SEP_BASE_LOG_LEVEL;
+module_param(sep_log_level, int, 0644);
+MODULE_PARM_DESC(sep_log_level, "Log level: min ERR = 0, max TRACE = 4");
+
+int sep_log_mask = SEP_LOG_MASK_ALL;
+module_param(sep_log_mask, int, 0644);
+MODULE_PARM_DESC(sep_log_mask, "Log components mask");
+
+int disable_linux_crypto;
+module_param(disable_linux_crypto, int, 0444);
+MODULE_PARM_DESC(disable_linux_crypto,
+		 "Set to !0 to disable registration with Linux CryptoAPI");
+
+/* Parameters to override default sep cache memories reserved pages */
+#ifdef ICACHE_SIZE_LOG2_DEFAULT
+#include "dx_init_cc_defs.h"
+int icache_size_log2 = ICACHE_SIZE_LOG2_DEFAULT;
+module_param(icache_size_log2, int, 0444);
+MODULE_PARM_DESC(icache_size_log2, "Size of Icache memory in log2(bytes)");
+
+int dcache_size_log2 = DCACHE_SIZE_LOG2_DEFAULT;
+module_param(dcache_size_log2, int, 0444);
+MODULE_PARM_DESC(icache_size_log2, "Size of Dcache memory in log2(bytes)");
+#endif
+
+#ifdef SEP_BACKUP_BUF_SIZE
+int sep_backup_buf_size = SEP_BACKUP_BUF_SIZE;
+module_param(sep_backup_buf_size, int, 0444);
+MODULE_PARM_DESC(sep_backup_buf_size,
+		 "Size of backup buffer of SeP context (for warm-boot)");
+#endif
+
+/* Interrupt mask assigned to GPRs */
+/* Used for run time lookup, where SEP_HOST_GPR_IRQ_MASK() cannot be used */
+static const u32 gpr_interrupt_mask[] = {
+	SEP_HOST_GPR_IRQ_MASK(0),
+	SEP_HOST_GPR_IRQ_MASK(1),
+	SEP_HOST_GPR_IRQ_MASK(2),
+	SEP_HOST_GPR_IRQ_MASK(3),
+	SEP_HOST_GPR_IRQ_MASK(4),
+	SEP_HOST_GPR_IRQ_MASK(5),
+	SEP_HOST_GPR_IRQ_MASK(6),
+	SEP_HOST_GPR_IRQ_MASK(7)
+};
+
+u32 __iomem *security_cfg_reg;
+
+#ifdef DEBUG
+void dump_byte_array(const char *name, const u8 *the_array,
+		     unsigned long size)
+{
+	int i, line_offset = 0;
+	const u8 *cur_byte;
+	char line_buf[80];
+
+	line_offset = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+			       name, size);
+
+	for (i = 0, cur_byte = the_array;
+	     (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
+		line_offset += snprintf(line_buf + line_offset,
+					sizeof(line_buf) - line_offset,
+					"%02X ", *cur_byte);
+		if (line_offset > 75) {	/* Cut before line end */
+			pr_debug("%s\n", line_buf);
+			line_offset = 0;
+		}
+	}
+
+	if (line_offset > 0)	/* Dump remainding line */
+		pr_debug("%s\n", line_buf);
+
+}
+
+void dump_word_array(const char *name, const u32 *the_array,
+		     unsigned long size_in_words)
+{
+	int i, line_offset = 0;
+	const u32 *cur_word;
+	char line_buf[80];
+
+	line_offset = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+			       name, size_in_words);
+
+	for (i = 0, cur_word = the_array;
+	     (i < size_in_words) && (line_offset < sizeof(line_buf));
+	     i++, cur_word++) {
+		line_offset += snprintf(line_buf + line_offset,
+					sizeof(line_buf) - line_offset,
+					"%08X ", *cur_word);
+		if (line_offset > 70) {	/* Cut before line end */
+			pr_debug("%s\n", line_buf);
+			line_offset = 0;
+		}
+	}
+
+	if (line_offset > 0)	/* Dump remainding line */
+		pr_debug("%s\n", line_buf);
+}
+
+#endif /*DEBUG*/
+/**** SeP descriptor operations implementation functions *****/
+/* (send descriptor, wait for completion, process result)    */
+/**
+ * send_crypto_op_desc() - Pack crypto op. descriptor and send
+ * @op_ctx:
+ * @sep_ctx_load_req:	Flag if context loading is required
+ * @sep_ctx_init_req:	Flag if context init is required
+ * @proc_mode:		Processing mode
+ *
+ * On failure desc_type is retained so process_desc_completion cleans up
+ * resources anyway (error_info denotes failure to send/complete)
+ * Returns int 0 on success
+ */
+static int send_crypto_op_desc(struct sep_op_ctx *op_ctx,
+			       int sep_ctx_load_req, int sep_ctx_init_req,
+			       enum sep_proc_mode proc_mode)
+{
+	const struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+
+	desc_q_pack_crypto_op_desc(&desc, op_ctx,
+				   sep_ctx_load_req, sep_ctx_init_req,
+				   proc_mode);
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_INTERNAL;
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+/**
+ * send_combined_op_desc() - Pack combined or/and load operation descriptor(s)
+ * @op_ctx:
+ * @sep_ctx_load_req: Flag if context loading is required
+ * @sep_ctx_init_req: Flag if context init is required
+ * @proc_mode: Processing mode
+ * @cfg_scheme: The SEP format configuration scheme claimed by the user
+ *
+ * On failure desc_type is retained so process_desc_completion cleans up
+ * resources anyway (error_info denotes failure to send/complete)
+ * Returns int 0 on success
+ */
+static int send_combined_op_desc(struct sep_op_ctx *op_ctx,
+				 int *sep_ctx_load_req, int sep_ctx_init_req,
+				 enum sep_proc_mode proc_mode,
+				 u32 cfg_scheme)
+{
+	const struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+
+	/* transaction of two descriptors */
+	op_ctx->pending_descs_cntr = 2;
+
+	/* prepare load descriptor of combined associated contexts */
+	desc_q_pack_load_op_desc(&desc, op_ctx, sep_ctx_load_req);
+
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		goto send_combined_op_exit;
+	}
+
+	/* prepare crypto descriptor for the combined scheme operation */
+	desc_q_pack_combined_op_desc(&desc, op_ctx, 0/* contexts already loaded
+						      * in prior descriptor */ ,
+				     sep_ctx_init_req, proc_mode, cfg_scheme);
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/*invalidate first descriptor (if still pending) */
+		desc_q_mark_invalid_cookie(drvdata->desc_queue, (void *)op_ctx);
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	} else {
+		rc = 0;
+	}
+
+ send_combined_op_exit:
+	return rc;
+}
+
+/**
+ * register_client_memref() - Register given client memory buffer reference
+ * @client_ctx:		User context data
+ * @user_buf_ptr:	Buffer address in user space. NULL if sgl!=NULL.
+ * @sgl:		Scatter/gather list (for kernel buffers)
+ *			NULL if user_buf_ptr!=NULL.
+ * @buf_size:		Buffer size in bytes
+ * @dma_direction:	DMA direction
+ *
+ * Returns int >= is the registered memory reference ID, <0 for error
+ */
+int register_client_memref(struct sep_client_ctx *client_ctx,
+			   u8 __user *user_buf_ptr,
+			   struct scatterlist *sgl,
+			   const unsigned long buf_size,
+			   const enum dma_data_direction dma_direction)
+{
+	int free_memref_idx, rc;
+	struct registered_memref *regmem_p;
+
+	if (unlikely((user_buf_ptr != NULL) && (sgl != NULL))) {
+		pr_err("Both user_buf_ptr and sgl are given!\n");
+		return -EINVAL;
+	}
+
+	/* Find free entry in user_memref */
+	for (free_memref_idx = 0, regmem_p = &client_ctx->reg_memrefs[0];
+	     free_memref_idx < MAX_REG_MEMREF_PER_CLIENT_CTX;
+	     free_memref_idx++, regmem_p++) {
+		mutex_lock(&regmem_p->buf_lock);
+		if (regmem_p->ref_cnt == 0)
+			break;	/* found free entry */
+		mutex_unlock(&regmem_p->buf_lock);
+	}
+	if (unlikely(free_memref_idx == MAX_REG_MEMREF_PER_CLIENT_CTX)) {
+		pr_warn("No free entry for user memory registration\n");
+		free_memref_idx = -ENOMEM;/* Negative error code as index */
+	} else {
+		pr_debug("Allocated memref_idx=%d (regmem_p=%p)\n",
+			      free_memref_idx, regmem_p);
+		regmem_p->ref_cnt = 1;	/* Capture entry */
+		/* Lock user pages for DMA and save pages info.
+		 * (prepare for MLLI) */
+		rc = llimgr_register_client_dma_buf(client_ctx->drv_data->
+						    sep_data->llimgr,
+						    user_buf_ptr, sgl, buf_size,
+						    0, dma_direction,
+						    &regmem_p->dma_obj);
+		if (unlikely(rc < 0)) {
+			/* Release entry */
+			regmem_p->ref_cnt = 0;
+			free_memref_idx = rc;
+		}
+		mutex_unlock(&regmem_p->buf_lock);
+	}
+
+	/* Return user_memref[] entry index as the memory reference ID */
+	return free_memref_idx;
+}
+
+/**
+ * free_user_memref() - Free resources associated with a user memory reference
+ * @client_ctx:	 User context data
+ * @memref_idx:	 Index of the user memory reference
+ *
+ * Free resources associated with a user memory reference
+ * (The referenced memory may be locked user pages or allocated DMA-coherent
+ *  memory mmap'ed to the user space)
+ * Returns int !0 on failure (memref still in use or unknown)
+ */
+int free_client_memref(struct sep_client_ctx *client_ctx,
+		       int memref_idx)
+{
+	struct registered_memref *regmem_p =
+	    &client_ctx->reg_memrefs[memref_idx];
+	int rc = 0;
+
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Invalid memref ID %d\n", memref_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&regmem_p->buf_lock);
+
+	if (likely(regmem_p->ref_cnt == 1)) {
+		/* TODO: support case of allocated DMA-coherent buffer */
+		llimgr_deregister_client_dma_buf(client_ctx->drv_data->
+						 sep_data->llimgr,
+						 &regmem_p->dma_obj);
+		regmem_p->ref_cnt = 0;
+	} else if (unlikely(regmem_p->ref_cnt == 0)) {
+		pr_err("Invoked for free memref ID=%d\n", memref_idx);
+		rc = -EINVAL;
+	} else {		/* ref_cnt > 1 */
+		pr_err(
+			    "BUSY/Invalid memref to release: ref_cnt=%d, user_buf_ptr=%p\n",
+			    regmem_p->ref_cnt, regmem_p->dma_obj.user_buf_ptr);
+		rc = -EBUSY;
+	}
+
+	mutex_unlock(&regmem_p->buf_lock);
+
+	return rc;
+}
+
+/**
+ * acquire_dma_obj() - Get the memref object of given memref_idx and increment
+ *			reference count of it
+ * @client_ctx:	Associated client context
+ * @memref_idx:	Required registered memory reference ID (index)
+ *
+ * Get the memref object of given memref_idx and increment reference count of it
+ * The returned object must be released by invoking release_dma_obj() before
+ * the object (memref) may be freed.
+ * Returns struct user_dma_buffer The memref object or NULL for invalid
+ */
+struct client_dma_buffer *acquire_dma_obj(struct sep_client_ctx *client_ctx,
+					  int memref_idx)
+{
+	struct registered_memref *regmem_p =
+	    &client_ctx->reg_memrefs[memref_idx];
+	struct client_dma_buffer *rc;
+
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Invalid memref ID %d\n", memref_idx);
+		return NULL;
+	}
+
+	mutex_lock(&regmem_p->buf_lock);
+	if (regmem_p->ref_cnt < 1) {
+		pr_err("Invalid memref (ID=%d, ref_cnt=%d)\n",
+			    memref_idx, regmem_p->ref_cnt);
+		rc = NULL;
+	} else {
+		regmem_p->ref_cnt++;
+		rc = &regmem_p->dma_obj;
+	}
+	mutex_unlock(&regmem_p->buf_lock);
+
+	return rc;
+}
+
+/**
+ * release_dma_obj() - Release memref object taken with get_memref_obj
+ *			(Does not free!)
+ * @client_ctx:	Associated client context
+ * @dma_obj:	The DMA object returned from acquire_dma_obj()
+ *
+ * Returns void
+ */
+void release_dma_obj(struct sep_client_ctx *client_ctx,
+		     struct client_dma_buffer *dma_obj)
+{
+	struct registered_memref *regmem_p;
+	int memref_idx;
+
+	if (dma_obj == NULL)	/* Probably failed on acquire_dma_obj */
+		return;
+	/* Verify valid container */
+	memref_idx = DMA_OBJ_TO_MEMREF_IDX(client_ctx, dma_obj);
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Given DMA object is not registered\n");
+		return;
+	}
+	/* Get container */
+	regmem_p = &client_ctx->reg_memrefs[memref_idx];
+	mutex_lock(&regmem_p->buf_lock);
+	if (regmem_p->ref_cnt < 2) {
+		pr_err("Invalid memref (ref_cnt=%d, user_buf_ptr=%p)\n",
+			    regmem_p->ref_cnt, regmem_p->dma_obj.user_buf_ptr);
+	} else {
+		regmem_p->ref_cnt--;
+	}
+	mutex_unlock(&regmem_p->buf_lock);
+}
+
+/**
+ * crypto_op_completion_cleanup() - Cleanup CRYPTO_OP descriptor operation
+ *					resources after completion
+ * @op_ctx:
+ *
+ * Returns int
+ */
+int crypto_op_completion_cleanup(struct sep_op_ctx *op_ctx)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	void *llimgr = drvdata->sep_data->llimgr;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	enum sep_op_type op_type = op_ctx->op_type;
+	u32 error_info = op_ctx->error_info;
+	u32 ctx_info_idx;
+	bool data_in_place;
+	const enum crypto_alg_class alg_class =
+	    ctxmgr_get_alg_class(&op_ctx->ctx_info);
+
+	/* Resources cleanup on data processing operations (PROC/FINI) */
+	if (op_type & (SEP_OP_CRYPTO_PROC | SEP_OP_CRYPTO_FINI)) {
+		if ((alg_class == ALG_CLASS_HASH) ||
+		    (ctxmgr_get_mac_type(ctx_info_p) == DXDI_MAC_HMAC)) {
+			/* Unmap what was mapped in prepare_data_for_sep() */
+			ctxmgr_unmap2dev_hash_tail(ctx_info_p,
+						   drvdata->sep_data->dev);
+			/* Save last data block tail (remainder of crypto-block)
+			 * or clear that buffer after it was used if there is
+			 * no new block remainder data */
+			ctxmgr_save_hash_blk_remainder(ctx_info_p,
+						       &op_ctx->din_dma_obj,
+						       false);
+		}
+		data_in_place = llimgr_is_same_mlli_tables(llimgr,
+							   &op_ctx->ift,
+							   &op_ctx->oft);
+		/* First free IFT resources */
+		llimgr_destroy_mlli(llimgr, &op_ctx->ift);
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+		/* Free OFT resources */
+		if (data_in_place) {
+			/* OFT already destroyed as IFT. Just clean it. */
+			MLLI_TABLES_LIST_INIT(&op_ctx->oft);
+			CLEAN_DMA_BUFFER_INFO(&op_ctx->din_dma_obj);
+		} else {	/* OFT resources cleanup */
+			llimgr_destroy_mlli(llimgr, &op_ctx->oft);
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->dout_dma_obj);
+		}
+	}
+
+	for (ctx_info_idx = 0;
+	     ctx_info_idx < op_ctx->ctx_info_num;
+	     ctx_info_idx++, ctx_info_p++) {
+		if ((op_type & SEP_OP_CRYPTO_FINI) || (error_info != 0)) {
+			/* If this was a finalizing descriptor, or any error,
+			 * invalidate from cache */
+			ctxmgr_sep_cache_invalidate(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(ctx_info_p),
+					CRYPTO_CTX_ID_SINGLE_MASK);
+		}
+		/* Update context state */
+		if ((op_type & SEP_OP_CRYPTO_FINI) ||
+		    ((op_type & SEP_OP_CRYPTO_INIT) && (error_info != 0))) {
+			/* If this was a finalizing descriptor,
+			 * or a failing initializing descriptor: */
+			ctxmgr_set_ctx_state(ctx_info_p,
+					CTX_STATE_UNINITIALIZED);
+		} else if (op_type & SEP_OP_CRYPTO_INIT)
+			ctxmgr_set_ctx_state(ctx_info_p,
+					CTX_STATE_INITIALIZED);
+	}
+
+	return 0;
+}
+
+/**
+ * wait_for_sep_op_result() - Wait for outstanding SeP operation to complete and
+ *				fetch SeP ret-code
+ * @op_ctx:
+ *
+ * Wait for outstanding SeP operation to complete and fetch SeP ret-code
+ * into op_ctx->sep_ret_code
+ * Returns int
+ */
+int wait_for_sep_op_result(struct sep_op_ctx *op_ctx)
+{
+#ifdef DEBUG
+	if (unlikely(op_ctx->op_state == USER_OP_NOP)) {
+		pr_err("Operation context is inactive!\n");
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		return -EINVAL;
+	}
+#endif
+
+	/* wait until crypto operation is completed.
+	 * We cannot timeout this operation because hardware operations may
+	 * be still pending on associated data buffers.
+	 * Only system reboot can take us out of this abnormal state in a safe
+	 * manner (avoiding data corruption) */
+	wait_for_completion(&(op_ctx->ioctl_op_compl));
+#ifdef DEBUG
+	if (unlikely(op_ctx->op_state != USER_OP_COMPLETED)) {
+		pr_err(
+			    "Op. state is not COMPLETED after getting completion event (op_ctx=0x%p, op_state=%d)\n",
+			    op_ctx, op_ctx->op_state);
+		dump_stack();	/*SEP_DRIVER_BUG(); */
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		return -EINVAL;
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * get_num_of_ctx_info() - Count the number of valid contexts assigned for the
+ *				combined operation.
+ * @config:	 The user configuration scheme array
+ *
+ * Returns int
+ */
+static int get_num_of_ctx_info(struct dxdi_combined_props *config)
+{
+	int valid_ctx_n;
+
+	for (valid_ctx_n = 0;
+	     (valid_ctx_n < DXDI_COMBINED_NODES_MAX) &&
+	     (config->node_props[valid_ctx_n].context != NULL)
+	     ; valid_ctx_n++) {
+		/*NOOP*/
+	}
+
+	return valid_ctx_n;
+}
+
+/***** Driver Interface implementation functions *****/
+
+/**
+ * format_sep_combined_cfg_scheme() - Encode the user configuration scheme to
+ *					SeP format.
+ * @config:	 The user configuration scheme array
+ * @op_ctx:	 Operation context
+ *
+ * Returns u32
+ */
+static u32 format_sep_combined_cfg_scheme(struct dxdi_combined_props
+					       *config,
+					       struct sep_op_ctx *op_ctx)
+{
+	enum dxdi_input_engine_type engine_src = DXDI_INPUT_NULL;
+	enum sep_engine_type engine_type = SEP_ENGINE_NULL;
+	enum crypto_alg_class alg_class;
+	u32 sep_cfg_scheme = 0;	/* the encoded config scheme */
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx->ctx_info;
+	enum dxdi_sym_cipher_type symc_type;
+	int eng_idx, done = 0;
+	int prev_direction = -1;
+
+	/* encode engines connections into SEP format */
+	for (eng_idx = 0;
+	     (eng_idx < DXDI_COMBINED_NODES_MAX) && (!done);
+	     eng_idx++, ctx_info_p++) {
+
+		/* set engine source */
+		engine_src = config->node_props[eng_idx].eng_input;
+
+		/* set engine type */
+		if (config->node_props[eng_idx].context != NULL) {
+			int dir;
+			alg_class = ctxmgr_get_alg_class(ctx_info_p);
+			switch (alg_class) {
+			case ALG_CLASS_HASH:
+				engine_type = SEP_ENGINE_HASH;
+				break;
+			case ALG_CLASS_SYM_CIPHER:
+				symc_type =
+				    ctxmgr_get_sym_cipher_type(ctx_info_p);
+				if ((symc_type == DXDI_SYMCIPHER_AES_ECB) ||
+				    (symc_type == DXDI_SYMCIPHER_AES_CBC) ||
+				    (symc_type == DXDI_SYMCIPHER_AES_CTR))
+					engine_type = SEP_ENGINE_AES;
+				else
+					engine_type = SEP_ENGINE_NULL;
+
+				dir =
+				    (int)
+				    ctxmgr_get_symcipher_direction(ctx_info_p);
+				if (prev_direction == -1) {
+					prev_direction = dir;
+				} else {
+					/* Only decrypt->encrypt operation */
+					if (!
+					    (prev_direction ==
+					     SEP_CRYPTO_DIRECTION_DECRYPT &&
+					     (dir ==
+					      SEP_CRYPTO_DIRECTION_ENCRYPT))) {
+						pr_err(
+						    "Invalid direction combination %s->%s\n",
+						    prev_direction ==
+						    SEP_CRYPTO_DIRECTION_DECRYPT
+						    ? "DEC" : "ENC",
+						    dir ==
+						    SEP_CRYPTO_DIRECTION_DECRYPT
+						    ? "DEC" : "ENC");
+						op_ctx->error_info =
+						    DXDI_ERROR_INVAL_DIRECTION;
+					}
+				}
+				break;
+			default:
+				engine_type = SEP_ENGINE_NULL;
+				break;	/*unsupported alg class */
+			}
+		} else if (engine_src != DXDI_INPUT_NULL) {
+			/* incase engine source is not NULL and NULL sub-context
+			 * is passed then DOUT is -DOUT type */
+			engine_type = SEP_ENGINE_DOUT;
+			/* exit after props set */
+			done = 1;
+		} else {
+			/* both context pointer & input type are
+			 * NULL -we're done */
+			break;
+		}
+
+		sep_comb_eng_props_set(&sep_cfg_scheme, eng_idx,
+					  engine_src, engine_type);
+	}
+
+	return sep_cfg_scheme;
+}
+
+/**
+ * init_crypto_context() - Initialize host crypto context
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:	Pointer to configuration properties which match given alg_class:
+ *		ALG_CLASS_SYM_CIPHER: struct dxdi_sym_cipher_props
+ *		ALG_CLASS_AUTH_ENC: struct dxdi_auth_enc_props
+ *		ALG_CLASS_MAC: struct dxdi_mac_props
+ *		ALG_CLASS_HASH: enum dxdi_hash_type
+ *
+ * Returns int 0 if operation executed in SeP.
+ * See error_info for actual results.
+ */
+static int init_crypto_context(struct sep_op_ctx *op_ctx,
+			       u32 __user *context_buf,
+			       enum crypto_alg_class alg_class, void *props)
+{
+	int rc;
+	int sep_cache_load_req;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	bool postpone_init = false;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT;
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = ctxmgr_init_symcipher_ctx(&op_ctx->ctx_info,
+					       (struct dxdi_sym_cipher_props *)
+					       props, &postpone_init,
+					       &op_ctx->error_info);
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+					      (struct dxdi_auth_enc_props *)
+					      props, &op_ctx->error_info);
+		break;
+	case ALG_CLASS_MAC:
+		rc = ctxmgr_init_mac_ctx(&op_ctx->ctx_info,
+					 (struct dxdi_mac_props *)props,
+					 &op_ctx->error_info);
+		break;
+	case ALG_CLASS_HASH:
+		rc = ctxmgr_init_hash_ctx(&op_ctx->ctx_info,
+					  *((enum dxdi_hash_type *)props),
+					  &op_ctx->error_info);
+		break;
+	default:
+		pr_err("Invalid algorithm class %d\n", alg_class);
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		rc = -EINVAL;
+	}
+	if (rc != 0)
+		goto ctx_init_exit;
+
+	/* After inialization above we are partially init. missing SeP init. */
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+
+	/* If not all the init. information is available at this time
+	 * we postpone INIT in SeP to processing phase */
+	if (postpone_init) {
+		pr_debug("Init. postponed to processing phase\n");
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		/* must be valid on "success" */
+		op_ctx->error_info = DXDI_ERROR_NULL;
+		return 0;
+	}
+
+	/* Flush out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc != 0) {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+		       op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		goto ctx_init_exit;
+	}
+
+	ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+							ctxmgr_get_ctx_id
+							(&op_ctx->ctx_info),
+							&sep_cache_load_req));
+	if (!sep_cache_load_req)
+		pr_err("New context already in SeP cache?!");
+
+	rc = send_crypto_op_desc(op_ctx,
+				 1 /*always load on init */ , 1 /*INIT*/,
+				 SEP_PROC_MODE_NOP);
+
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+	if (likely(rc == 0))
+		rc = wait_for_sep_op_result(op_ctx);
+
+ ctx_init_exit:
+	/* Cleanup resources and update context state */
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * map_ctx_for_proc() - Map previously initialized crypto context before data
+ *			processing
+ * @op_ctx:
+ * @context_buf:
+ * @ctx_state:	 Returned current context state
+ *
+ * Returns int
+ */
+static int map_ctx_for_proc(struct sep_client_ctx *client_ctx,
+			    struct client_crypto_ctx_info *ctx_info,
+			    u32 __user *context_buf,
+			    enum host_ctx_state *ctx_state_p)
+{
+	int rc;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	/* default state in case of error */
+	*ctx_state_p = CTX_STATE_UNINITIALIZED;
+
+	rc = ctxmgr_map_user_ctx(ctx_info, drvdata->sep_data->dev,
+				 ALG_CLASS_NONE, context_buf);
+	if (rc != 0) {
+		pr_err("Failed mapping context\n");
+		return rc;
+	}
+	if (ctxmgr_get_session_id(ctx_info) != (uintptr_t) client_ctx) {
+		pr_err("Context ID is not associated with this session\n");
+		rc = -EINVAL;
+	}
+	if (rc == 0)
+		*ctx_state_p = ctx_info->ctx_kptr->state;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(ctx_info);
+	/* Flush out of host cache */
+	ctxmgr_sync_sep_ctx(ctx_info, drvdata->sep_data->dev);
+#endif
+	if (rc != 0)
+		ctxmgr_unmap_user_ctx(ctx_info);
+
+	return rc;
+}
+
+/**
+ * init_combined_context() - Initialize Combined context
+ * @op_ctx:
+ * @config: Pointer to configuration scheme to be validate by the SeP
+ *
+ * Returns int 0 if operation executed in SeP.
+ * See error_info for actual results.
+ */
+static int init_combined_context(struct sep_op_ctx *op_ctx,
+				 struct dxdi_combined_props *config)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx->ctx_info;
+	int sep_ctx_load_req[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	int rc, ctx_idx, ctx_mapped_n = 0;
+
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT;
+
+	/* no context to load -clear buffer */
+	memset(sep_ctx_load_req, 0, sizeof(sep_ctx_load_req));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx, ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto ctx_init_exit;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto ctx_init_exit;
+		}
+	}
+
+	ctx_info_p = &op_ctx->ctx_info;
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_ctx_load_req[ctx_idx]));
+		}
+
+		rc = send_combined_op_desc(op_ctx,
+					   sep_ctx_load_req /*nothing to load */
+					   ,
+					   1 /*INIT*/, SEP_PROC_MODE_NOP,
+					   0 /*no scheme in init */);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ ctx_init_exit:
+	ctx_info_p = &op_ctx->ctx_info;
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * prepare_adata_for_sep() - Generate MLLI tables for additional/associated data
+ *				(input only)
+ * @op_ctx:	 Operation context
+ * @adata_in:
+ * @adata_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_adata_for_sep(struct sep_op_ctx *op_ctx,
+					u8 __user *adata_in,
+					u32 adata_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	struct dma_pool *spad_buf_pool =
+	    client_ctx->drv_data->sep_data->spad_buf_pool;
+	struct mlli_tables_list *ift_p = &op_ctx->ift;
+	unsigned long a0_buf_size;
+	u8 *a0_buf_p;
+	int rc = 0;
+
+	if (adata_in == NULL) {	/*data_out required for this alg_class */
+		pr_err("adata_in==NULL for authentication\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DIN_PTR;
+		return -EINVAL;
+	}
+
+	op_ctx->spad_buf_p = dma_pool_alloc(spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err("Failed allocating from spad_buf_pool for A0\n");
+		return -ENOMEM;
+	}
+	a0_buf_p = op_ctx->spad_buf_p;
+
+	/* format A0 (the first 2 words in the first block) */
+	if (adata_in_size < ((1UL << 16) - (1UL << 8))) {
+		a0_buf_size = 2;
+
+		a0_buf_p[0] = (adata_in_size >> 8) & 0xFF;
+		a0_buf_p[1] = adata_in_size & 0xFF;
+	} else {
+		a0_buf_size = 6;
+
+		a0_buf_p[0] = 0xFF;
+		a0_buf_p[1] = 0xFE;
+		a0_buf_p[2] = (adata_in_size >> 24) & 0xFF;
+		a0_buf_p[3] = (adata_in_size >> 16) & 0xFF;
+		a0_buf_p[4] = (adata_in_size >> 8) & 0xFF;
+		a0_buf_p[5] = adata_in_size & 0xFF;
+	}
+
+	/* Create IFT (MLLI table) */
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    adata_in, NULL, adata_in_size, 0,
+					    DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, ift_p, DMA_TO_DEVICE,
+					&op_ctx->din_dma_obj,
+					op_ctx->spad_buf_dma_addr, a0_buf_size);
+		if (unlikely(rc != 0)) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->din_dma_obj);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_cipher_data_for_sep() - Generate MLLI tables for cipher algorithms
+ *				(input + output)
+ * @op_ctx:	 Operation context
+ * @data_in:	User space pointer for input data (NULL for kernel data)
+ * @sgl_in:	Kernel buffers s/g list for input data (NULL for user data)
+ * @data_out:	User space pointer for output data (NULL for kernel data)
+ * @sgl_out:	Kernel buffers s/g list for output data (NULL for user data)
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_cipher_data_for_sep(struct sep_op_ctx *op_ctx,
+					      u8 __user *data_in,
+					      struct scatterlist *sgl_in,
+					      u8 __user *data_out,
+					      struct scatterlist *sgl_out,
+					      u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	const bool is_data_inplace =
+	    data_in != NULL ? (data_in == data_out) : (sgl_in == sgl_out);
+	const enum dma_data_direction din_dma_direction =
+	    is_data_inplace ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+	int rc;
+
+	/* Check parameters */
+	if (data_in_size == 0) {	/* No data to prepare */
+		return 0;
+	}
+	if ((data_out == NULL) && (sgl_out == NULL)) {
+		/* data_out required for this alg_class */
+		pr_err("data_out/sgl_out==NULL for enc/decryption\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DOUT_PTR;
+		return -EINVAL;
+	}
+
+	/* Avoid partial overlapping of data_in with data_out */
+	if (!is_data_inplace)
+		if (data_in != NULL) {	/* User space buffer */
+			if (((data_in < data_out) &&
+			     ((data_in + data_in_size) > data_out)) ||
+			    ((data_out < data_in) &&
+			     ((data_out + data_in_size) > data_in))) {
+				pr_err("Buffers partially overlap!\n");
+				op_ctx->error_info =
+				    DXDI_ERROR_DIN_DOUT_OVERLAP;
+				return -EINVAL;
+			}
+		}
+	/* else: TODO - scan s/g lists for overlapping */
+
+	/* Create IFT + OFT (MLLI tables) */
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size, 0,
+					    din_dma_direction,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, &op_ctx->ift, din_dma_direction,
+					&op_ctx->din_dma_obj, 0, 0);
+	}
+	if (likely(rc == 0)) {
+		if (is_data_inplace) {
+			/* Mirror IFT data in OFT */
+			op_ctx->dout_dma_obj = op_ctx->din_dma_obj;
+			op_ctx->oft = op_ctx->ift;
+		} else {	/* Create OFT */
+			rc = llimgr_register_client_dma_buf(llimgr,
+							    data_out, sgl_out,
+							    data_in_size, 0,
+							    DMA_FROM_DEVICE,
+							    &op_ctx->
+							    dout_dma_obj);
+			if (likely(rc == 0)) {
+				rc = llimgr_create_mlli(llimgr, &op_ctx->oft,
+							DMA_FROM_DEVICE,
+							&op_ctx->dout_dma_obj,
+							0, 0);
+			}
+		}
+	}
+
+	if (unlikely(rc != 0)) {	/* Failure cleanup */
+		/* No output MLLI to free in error case */
+		if (!is_data_inplace) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->dout_dma_obj);
+		}
+		llimgr_destroy_mlli(llimgr, &op_ctx->ift);
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_hash_data_for_sep() - Prepare data for hash operation
+ * @op_ctx:		Operation context
+ * @is_finalize:	Is hash finialize operation (last)
+ * @data_in:		Pointer to user buffer OR...
+ * @sgl_in:		Gather list for kernel buffers
+ * @data_in_size:	Data size in bytes
+ *
+ * Returns 0 on success
+ */
+static int prepare_hash_data_for_sep(struct sep_op_ctx *op_ctx,
+				     bool is_finalize,
+				     u8 __user *data_in,
+				     struct scatterlist *sgl_in,
+				     u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	void *llimgr = drvdata->sep_data->llimgr;
+	u32 crypto_blk_size;
+	/* data size for processing this op. (incl. prev. block remainder) */
+	u32 data_size4hash;
+	/* amount of data_in to process in this op. */
+	u32 data_in_save4next;
+	u16 last_hash_blk_tail_size;
+	dma_addr_t last_hash_blk_tail_dma;
+	int rc;
+
+	if ((data_in != NULL) && (sgl_in != NULL)) {
+		pr_err("Given valid data_in+sgl_in!\n");
+		return -EINVAL;
+	}
+
+	/*Hash block size required in order to buffer block remainders */
+	crypto_blk_size = ctxmgr_get_crypto_blk_size(&op_ctx->ctx_info);
+	if (crypto_blk_size == 0) {	/* Unsupported algorithm?... */
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	}
+
+	/* Map for DMA the last block tail, if any */
+	rc = ctxmgr_map2dev_hash_tail(&op_ctx->ctx_info,
+				      drvdata->sep_data->dev);
+	if (rc != 0) {
+		pr_err("Failed mapping hash data tail buffer\n");
+		return rc;
+	}
+	last_hash_blk_tail_size =
+	    ctxmgr_get_hash_blk_remainder_buf(&op_ctx->ctx_info,
+					      &last_hash_blk_tail_dma);
+	data_size4hash = data_in_size + last_hash_blk_tail_size;
+	if (!is_finalize) {
+		/* Not last. Round to hash block size. */
+		data_size4hash = (data_size4hash & ~(crypto_blk_size - 1));
+		/* On the last hash op. take all that's left */
+	}
+	data_in_save4next = (data_size4hash > 0) ?
+	    data_in_size - (data_size4hash - last_hash_blk_tail_size) :
+	    data_in_size;
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size,
+					    data_in_save4next, DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed registering client buffer (rc=%d)\n", rc);
+	} else {
+		if ((!is_finalize) && (data_size4hash == 0)) {
+			/* Not enough for even one hash block
+			 * (all saved for next) */
+			ctxmgr_unmap2dev_hash_tail(&op_ctx->ctx_info,
+						   drvdata->sep_data->dev);
+			/* Append to existing tail buffer */
+			rc = ctxmgr_save_hash_blk_remainder(&op_ctx->ctx_info,
+							    &op_ctx->
+							    din_dma_obj,
+							    true /*append */);
+			if (rc == 0)	/* signal: not even one block */
+				rc = -ENOTBLK;
+		} else {
+			rc = llimgr_create_mlli(llimgr, &op_ctx->ift,
+						DMA_TO_DEVICE,
+						&op_ctx->din_dma_obj,
+						last_hash_blk_tail_dma,
+						last_hash_blk_tail_size);
+		}
+	}
+
+	if (unlikely(rc != 0)) {	/* Failed (or -ENOTBLK) */
+		/* No harm if we invoke deregister if it was not registered */
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+		/* Unmap hash block tail buffer */
+		ctxmgr_unmap2dev_hash_tail(&op_ctx->ctx_info,
+					   drvdata->sep_data->dev);
+	}
+	/* Hash block remainder would be copied into tail buffer only after
+	 * operation completion, because this buffer is still in use for
+	 * current operation */
+
+	return rc;
+}
+
+/**
+ * prepare_mac_data_for_sep() - Prepare data memory objects for (AES) MAC
+ *				operations
+ * @op_ctx:
+ * @data_in:		Pointer to user buffer OR...
+ * @sgl_in:		Gather list for kernel buffers
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_mac_data_for_sep(struct sep_op_ctx *op_ctx,
+					   u8 __user *data_in,
+					   struct scatterlist *sgl_in,
+					   u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	int rc;
+
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size, 0,
+					    DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, &op_ctx->ift,
+					DMA_TO_DEVICE, &op_ctx->din_dma_obj, 0,
+					0);
+		if (rc != 0) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->din_dma_obj);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_data_for_sep() - Prepare data for processing by SeP
+ * @op_ctx:
+ * @data_in:	User space pointer for input data (NULL for kernel data)
+ * @sgl_in:	Kernel buffers s/g list for input data (NULL for user data)
+ * @data_out:	User space pointer for output data (NULL for kernel data)
+ * @sgl_out:	Kernel buffers s/g list for output data (NULL for user data)
+ * @data_in_size:	 data_in buffer size (and data_out's if not NULL)
+ * @data_intent:	 the purpose of the given data
+ *
+ * Prepare data for processing by SeP
+ * (common flow for sep_proc_dblk and sep_fin_proc) .
+ * Returns int
+ */
+int prepare_data_for_sep(struct sep_op_ctx *op_ctx,
+			 u8 __user *data_in,
+			 struct scatterlist *sgl_in,
+			 u8 __user *data_out,
+			 struct scatterlist *sgl_out,
+			 u32 data_in_size,
+			 enum crypto_data_intent data_intent)
+{
+	int rc;
+	enum crypto_alg_class alg_class;
+
+	if (data_intent == CRYPTO_DATA_ADATA) {
+		/* additional/associated data */
+		if (!ctxmgr_is_valid_adata_size(&op_ctx->ctx_info,
+						data_in_size)) {
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	} else {
+		/* cipher/text data */
+		if (!ctxmgr_is_valid_size(&op_ctx->ctx_info,
+					  data_in_size,
+					  (data_intent ==
+					   CRYPTO_DATA_TEXT_FINALIZE))) {
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("data_in=0x%p/0x%p data_out=0x%p/0x%p data_in_size=%uB\n",
+		      data_in, sgl_in, data_out, sgl_out, data_in_size);
+
+	alg_class = ctxmgr_get_alg_class(&op_ctx->ctx_info);
+	pr_debug("alg_class = %d\n", alg_class);
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = prepare_cipher_data_for_sep(op_ctx,
+						 data_in, sgl_in, data_out,
+						 sgl_out, data_in_size);
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		if (data_intent == CRYPTO_DATA_ADATA) {
+			struct host_crypto_ctx_auth_enc *aead_ctx_p =
+			    (struct host_crypto_ctx_auth_enc *)op_ctx->ctx_info.
+			    ctx_kptr;
+
+			if (!aead_ctx_p->is_adata_processed) {
+				rc = prepare_adata_for_sep(op_ctx,
+							   data_in,
+							   data_in_size);
+				/* no more invocation to adata process
+				 * is allowed */
+				aead_ctx_p->is_adata_processed = 1;
+			} else {
+				/* additional data may be processed
+				 * only once */
+				return -EPERM;
+			}
+		} else {
+			rc = prepare_cipher_data_for_sep(op_ctx,
+							 data_in, sgl_in,
+							 data_out, sgl_out,
+							 data_in_size);
+		}
+		break;
+	case ALG_CLASS_MAC:
+	case ALG_CLASS_HASH:
+		if ((alg_class == ALG_CLASS_MAC) &&
+		    (ctxmgr_get_mac_type(&op_ctx->ctx_info) != DXDI_MAC_HMAC)) {
+			/* Handle all MACs but HMAC */
+			if (data_in_size == 0) {	/* No data to prepare */
+				rc = 0;
+				break;
+			}
+#if 0
+			/* ignore checking the user out pointer due to crys api
+			 * limitation */
+			if (data_out != NULL) {
+				pr_err("data_out!=NULL for MAC\n");
+				return -EINVAL;
+			}
+#endif
+			rc = prepare_mac_data_for_sep(op_ctx, data_in, sgl_in,
+						      data_in_size);
+			break;
+		}
+
+		/* else: HASH or HMAC require the same handling */
+		rc = prepare_hash_data_for_sep(op_ctx,
+					       (data_intent ==
+						CRYPTO_DATA_TEXT_FINALIZE),
+					       data_in, sgl_in, data_in_size);
+		break;
+
+	default:
+		pr_err("Invalid algorithm class %d in context\n",
+			    alg_class);
+		/* probably context was corrupted since init. phase */
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_combined_data_for_sep() - Prepare combined data for processing by SeP
+ * @op_ctx:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:	 data_in buffer size (and data_out's if not NULL)
+ * @data_intent:	 the purpose of the given data
+ *
+ * Prepare combined data for processing by SeP
+ * (common flow for sep_proc_dblk and sep_fin_proc) .
+ * Returns int
+ */
+static int prepare_combined_data_for_sep(struct sep_op_ctx *op_ctx,
+					 u8 __user *data_in,
+					 u8 __user *data_out,
+					 u32 data_in_size,
+					 enum crypto_data_intent data_intent)
+{
+	int rc;
+
+	if (data_intent == CRYPTO_DATA_TEXT) {
+		/* restrict data unit size to the max block size multiple */
+		if (!IS_MULT_OF(data_in_size, SEP_HASH_BLOCK_SIZE_MAX)) {
+			pr_err(
+				    "Data unit size (%u) is not HASH block multiple\n",
+				    data_in_size);
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	} else if (data_intent == CRYPTO_DATA_TEXT_FINALIZE) {
+		/* user may finalize with zero or AES block size multiple */
+		if (!IS_MULT_OF(data_in_size, SEP_AES_BLOCK_SIZE)) {
+			pr_err("Data size (%u), not AES block multiple\n",
+				    data_in_size);
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("data_in=0x%08lX data_out=0x%08lX data_in_size=%uB\n",
+		      (unsigned long)data_in, (unsigned long)data_out,
+		      data_in_size);
+
+	pr_debug("alg_class = COMBINED\n");
+	if (data_out == NULL)
+		rc = prepare_mac_data_for_sep(op_ctx,
+					      data_in, NULL, data_in_size);
+	else
+		rc = prepare_cipher_data_for_sep(op_ctx,
+						 data_in, NULL, data_out, NULL,
+						 data_in_size);
+
+	return rc;
+}
+
+/**
+ * sep_proc_dblk() - Process data block
+ * @op_ctx:
+ * @context_buf:
+ * @data_block_type:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static int sep_proc_dblk(struct sep_op_ctx *op_ctx,
+			 u32 __user *context_buf,
+			 enum dxdi_data_block_type data_block_type,
+			 u8 __user *data_in,
+			 u8 __user *data_out, u32 data_in_size)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	int rc;
+	int sep_cache_load_required;
+	int sep_ctx_init_required = 0;
+	enum crypto_alg_class alg_class;
+	enum host_ctx_state ctx_state;
+
+	if (data_in_size == 0) {
+		pr_err("Got empty data_in\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+		return -EINVAL;
+	}
+
+	rc = map_ctx_for_proc(op_ctx->client_ctx, &op_ctx->ctx_info,
+			      context_buf, &ctx_state);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+	if (ctx_state == CTX_STATE_PARTIAL_INIT) {
+		/* case of postponed sep context init. */
+		sep_ctx_init_required = 1;
+		op_ctx->op_type |= SEP_OP_CRYPTO_INIT;
+	} else if (ctx_state != CTX_STATE_INITIALIZED) {
+		pr_err("Context in invalid state for processing %d\n",
+			    ctx_state);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto unmap_ctx_and_return;
+	}
+	alg_class = ctxmgr_get_alg_class(&op_ctx->ctx_info);
+
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size,
+				  (data_block_type ==
+				   DXDI_DATA_TYPE_TEXT ? CRYPTO_DATA_TEXT :
+				   CRYPTO_DATA_ADATA));
+	if (rc != 0) {
+		if (rc == -ENOTBLK) {
+			/* Did not accumulate even a single hash block */
+			/* The data_in already copied to context, in addition
+			 * to existing data. Report as success with no op. */
+			op_ctx->error_info = DXDI_ERROR_NULL;
+			rc = 0;
+		}
+		goto unmap_ctx_and_return;
+	}
+
+	if (sep_ctx_init_required) {
+		/* If this flag is set it implies that we have updated
+		 * parts of the sep_ctx structure during data preparation -
+		 * need to sync. context to memory (from host cache...) */
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+		ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	}
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* coupled sequence */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+			 ctxmgr_sep_cache_alloc(drvdata->
+						sep_cache,
+						ctxmgr_get_ctx_id
+						(&op_ctx->ctx_info),
+						&sep_cache_load_required));
+		rc = send_crypto_op_desc(op_ctx, sep_cache_load_required,
+					 sep_ctx_init_required,
+					 (data_block_type ==
+					  DXDI_DATA_TYPE_TEXT ?
+					  SEP_PROC_MODE_PROC_T :
+					  SEP_PROC_MODE_PROC_A));
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * sep_fin_proc() - Finalize processing of given context with given (optional)
+ *			data
+ * @op_ctx:
+ * @context_buf:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @digest_or_mac_p:
+ * @digest_or_mac_size_p:
+ *
+ * Returns int
+ */
+static int sep_fin_proc(struct sep_op_ctx *op_ctx,
+			u32 __user *context_buf,
+			u8 __user *data_in,
+			u8 __user *data_out,
+			u32 data_in_size,
+			u8 *digest_or_mac_p,
+			u8 *digest_or_mac_size_p)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	int rc;
+	int sep_cache_load_required;
+	int sep_ctx_init_required = 0;
+	enum host_ctx_state ctx_state;
+
+	rc = map_ctx_for_proc(op_ctx->client_ctx, &op_ctx->ctx_info,
+			      context_buf, &ctx_state);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	if (ctx_state == CTX_STATE_PARTIAL_INIT) {
+		/* case of postponed sep context init. */
+		sep_ctx_init_required = 1;
+	} else if (ctx_state != CTX_STATE_INITIALIZED) {
+		pr_err("Context in invalid state for finalizing %d\n",
+			    ctx_state);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto data_prepare_err;
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size, CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto data_prepare_err;
+
+	if (sep_ctx_init_required) {
+		/* If this flag is set it implies that we have updated
+		 * parts of the sep_ctx structure during data preparation -
+		 * need to sync. context to memory (from host cache...) */
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+		ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	}
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		rc = send_crypto_op_desc(op_ctx, sep_cache_load_required,
+					 sep_ctx_init_required,
+					 SEP_PROC_MODE_FIN);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if ((rc == 0) && (op_ctx->error_info == 0)) {
+		/* Digest or MAC are embedded in the SeP/FW context */
+		*digest_or_mac_size_p =
+		    ctxmgr_get_digest_or_mac(&op_ctx->ctx_info,
+					     digest_or_mac_p);
+		/* If above is not applicable for given algorithm,
+		 *digest_or_mac_size_p would be set to 0          */
+	} else {
+		/* Nothing valid in digest_or_mac_p */
+		*digest_or_mac_size_p = 0;
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ data_prepare_err:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * sep_combined_proc_dblk() - Process Combined operation block
+ * @op_ctx:
+ * @config:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static int sep_combined_proc_dblk(struct sep_op_ctx *op_ctx,
+				  struct dxdi_combined_props *config,
+				  u8 __user *data_in,
+				  u8 __user *data_out,
+				  u32 data_in_size)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	if (data_in_size == 0) {
+		pr_err("Got empty data_in\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+		return -EINVAL;
+	}
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+
+	/* Construct SeP combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size, CRYPTO_DATA_TEXT);
+	if (unlikely(rc != 0)) {
+		SEP_LOG_ERR(
+			    "Failed preparing DMA buffers (rc=%d, err_info=0x%08X\n)\n",
+			    rc, op_ctx->error_info);
+		return rc;
+	}
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 0 /*INIT*/,
+					   SEP_PROC_MODE_PROC_T, cfg_scheme);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * sep_combined_fin_proc() - Finalize Combined processing
+ *			     with given (optional) data
+ * @op_ctx:
+ * @config:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @auth_data_p:
+ * @auth_data_size_p:
+ *
+ * Returns int
+ */
+static int sep_combined_fin_proc(struct sep_op_ctx *op_ctx,
+				 struct dxdi_combined_props *config,
+				 u8 __user *data_in,
+				 u8 __user *data_out,
+				 u32 data_in_size,
+				 u8 *auth_data_p,
+				 u8 *auth_data_size_p)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			rc = -EINVAL;
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+
+	/* Construct SeP combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size,
+					   CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto unmap_ctx_and_return;
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 0 /*INIT*/,
+					   SEP_PROC_MODE_FIN, cfg_scheme);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (auth_data_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			ctx_info_p = &(op_ctx->ctx_info);
+			ctx_info_p += op_ctx->ctx_info_num - 1;
+
+			/* Auth data embedded in the last SeP/FW context */
+			*auth_data_size_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p,
+						     auth_data_p);
+		} else {	/* Failure */
+			*auth_data_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * process_combined_integrated() - Integrated processing of
+ *				   Combined data (init+proc+fin)
+ * @op_ctx:
+ * @props:	 Initialization properties (see init_context)
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @auth_data_p:
+ * @auth_data_size_p:
+ *
+ * Returns int
+ */
+static int process_combined_integrated(struct sep_op_ctx *op_ctx,
+				       struct dxdi_combined_props *config,
+				       u8 __user *data_in,
+				       u8 __user *data_out,
+				       u32 data_in_size,
+				       u8 *auth_data_p,
+				       u8 *auth_data_size_p)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing 0x%08X\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+	/* reconstruct combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size,
+					   CRYPTO_DATA_TEXT_FINALIZE /*last */
+					   );
+	if (rc != 0)
+		goto unmap_ctx_and_return;
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 1 /*INIT*/,
+					   SEP_PROC_MODE_FIN, cfg_scheme);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (auth_data_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			ctx_info_p = &(op_ctx->ctx_info);
+			ctx_info_p += op_ctx->ctx_info_num - 1;
+
+			/* Auth data embedded in the last SeP/FW context */
+			*auth_data_size_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p,
+						     auth_data_p);
+		} else {	/* Failure */
+			*auth_data_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * process_integrated() - Integrated processing of data (init+proc+fin)
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:	 Initialization properties (see init_context)
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @digest_or_mac_p:
+ * @digest_or_mac_size_p:
+ *
+ * Returns int
+ */
+static int process_integrated(struct sep_op_ctx *op_ctx,
+			      u32 __user *context_buf,
+			      enum crypto_alg_class alg_class,
+			      void *props,
+			      u8 __user *data_in,
+			      u8 __user *data_out,
+			      u32 data_in_size,
+			      u8 *digest_or_mac_p,
+			      u8 *digest_or_mac_size_p)
+{
+	int rc;
+	int sep_cache_load_required;
+	bool postpone_init;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	/* Algorithm class specific initialization */
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = ctxmgr_init_symcipher_ctx(&op_ctx->ctx_info,
+					       (struct dxdi_sym_cipher_props *)
+					       props, &postpone_init,
+					       &op_ctx->error_info);
+		/* postpone_init would be ignored because this is an integrated
+		 * operation - all required data would be updated in the
+		 * context before the descriptor is sent */
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+					      (struct dxdi_auth_enc_props *)
+					      props, &op_ctx->error_info);
+		break;
+	case ALG_CLASS_MAC:
+		rc = ctxmgr_init_mac_ctx(&op_ctx->ctx_info,
+					 (struct dxdi_mac_props *)props,
+					 &op_ctx->error_info);
+		break;
+	case ALG_CLASS_HASH:
+		rc = ctxmgr_init_hash_ctx(&op_ctx->ctx_info,
+					  *((enum dxdi_hash_type *)props),
+					  &op_ctx->error_info);
+		break;
+	default:
+		pr_err("Invalid algorithm class %d\n", alg_class);
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		rc = -EINVAL;
+	}
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;	/* Integrated is also fin. */
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size,
+				  CRYPTO_DATA_TEXT_FINALIZE /*last */);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {
+		/* Allocate SeP context cache entry */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		if (!sep_cache_load_required)
+			pr_err("New context already in SeP cache?!");
+		/* Send descriptor with combined load+init+fin */
+		rc = send_crypto_op_desc(op_ctx, 1 /*load */ , 1 /*INIT*/,
+					 SEP_PROC_MODE_FIN);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	} else {		/* failed acquiring mutex */
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (digest_or_mac_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			/* Digest or MAC are embedded in the SeP/FW context */
+			*digest_or_mac_size_p =
+			    ctxmgr_get_digest_or_mac(&op_ctx->ctx_info,
+						     digest_or_mac_p);
+		} else {	/* Failure */
+			*digest_or_mac_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	/* Hash tail buffer is never used/mapped in integrated op -->
+	 * no need to unmap */
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_exit:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * process_integrated_auth_enc() - Integrated processing of aead
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:
+ * @data_header:
+ * @data_in:
+ * @data_out:
+ * @data_header_size:
+ * @data_in_size:
+ * @mac_p:
+ * @mac_size_p:
+ *
+ * Integrated processing of authenticate & encryption of data
+ * (init+proc_a+proc_t+fin)
+ * Returns int
+ */
+static int process_integrated_auth_enc(struct sep_op_ctx *op_ctx,
+				       u32 __user *context_buf,
+				       enum crypto_alg_class alg_class,
+				       void *props,
+				       u8 __user *data_header,
+				       u8 __user *data_in,
+				       u8 __user *data_out,
+				       u32 data_header_size,
+				       u32 data_in_size,
+				       u8 *mac_p, u8 *mac_size_p)
+{
+	int rc;
+	int sep_cache_load_required;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		goto integ_ae_exit;
+	}
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	/* initialization */
+	rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+				      (struct dxdi_auth_enc_props *)props,
+				      &op_ctx->error_info);
+	if (rc != 0) {
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		goto integ_ae_exit;
+	}
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+	/* Op. type is to init. the context and process Adata */
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT | SEP_OP_CRYPTO_PROC;
+	/* prepare additional/assoc data */
+	rc = prepare_data_for_sep(op_ctx, data_header, NULL, NULL, NULL,
+				  data_header_size, CRYPTO_DATA_ADATA);
+	if (rc != 0) {
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		goto integ_ae_exit;
+	}
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {
+		/* Allocate SeP context cache entry */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+			 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		if (!sep_cache_load_required)
+			pr_err("New context already in SeP cache?!");
+		/* Send descriptor with combined load+init+fin */
+		rc = send_crypto_op_desc(op_ctx, 1 /*load */ , 1 /*INIT*/,
+					 SEP_PROC_MODE_PROC_A);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	} else {		/* failed acquiring mutex */
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+
+	/* set status and cleanup last descriptor */
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	/* process text data only on adata success */
+	if ((rc == 0) && (op_ctx->error_info == 0)) {/* Init+Adata succeeded */
+		/* reset pending descriptor and preserve operation
+		 * context for the finalize phase */
+		op_ctx->pending_descs_cntr = 1;
+		/* process & finalize operation with entire user data */
+		rc = sep_fin_proc(op_ctx, context_buf, data_in,
+				  data_out, data_in_size, mac_p, mac_size_p);
+	}
+
+ integ_ae_exit:
+	return rc;
+}
+
+/**
+ * dxdi_data_dir_to_dma_data_dir() - Convert from DxDI DMA direction type to
+ *					Linux kernel DMA direction type
+ * @dxdi_dir:	 DMA direction in DxDI encoding
+ *
+ * Returns enum dma_data_direction
+ */
+enum dma_data_direction dxdi_data_dir_to_dma_data_dir(enum dxdi_data_direction
+						      dxdi_dir)
+{
+	switch (dxdi_dir) {
+	case DXDI_DATA_BIDIR:
+		return DMA_BIDIRECTIONAL;
+	case DXDI_DATA_TO_DEVICE:
+		return DMA_TO_DEVICE;
+	case DXDI_DATA_FROM_DEVICE:
+		return DMA_FROM_DEVICE;
+	default:
+		return DMA_NONE;
+	}
+}
+
+/**
+ * dispatch_sep_rpc() - Dispatch a SeP RPC descriptor and process results
+ * @op_ctx:
+ * @agent_id:
+ * @func_id:
+ * @mem_refs:
+ * @rpc_params_size:
+ * @rpc_params:
+ *
+ * Returns int
+ */
+static int dispatch_sep_rpc(struct sep_op_ctx *op_ctx,
+			    u16 agent_id,
+			    u16 func_id,
+			    struct dxdi_memref mem_refs[],
+			    unsigned long rpc_params_size,
+			    struct seprpc_params __user *rpc_params)
+{
+	int i, rc = 0;
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	enum dma_data_direction dma_dir;
+	unsigned int num_of_mem_refs;
+	int memref_idx;
+	struct client_dma_buffer *local_dma_objs[SEP_RPC_MAX_MEMREF_PER_FUNC];
+	struct mlli_tables_list mlli_tables[SEP_RPC_MAX_MEMREF_PER_FUNC];
+	struct sep_sw_desc desc;
+	struct seprpc_params *rpc_msg_p;
+
+	/* Verify RPC message size */
+	if (unlikely(SEP_RPC_MAX_MSG_SIZE < rpc_params_size)) {
+		pr_err("Given rpc_params is too big (%lu B)\n",
+		       rpc_params_size);
+		return -EINVAL;
+	}
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err("Fail: alloc from spad_buf_pool for RPC message\n");
+		return -ENOMEM;
+	}
+	rpc_msg_p = (struct seprpc_params *)op_ctx->spad_buf_p;
+
+	/* Copy params to DMA buffer of message */
+	rc = copy_from_user(rpc_msg_p, rpc_params, rpc_params_size);
+	if (rc) {
+		pr_err("Fail: copy RPC message from user at 0x%p, rc=%d\n",
+			    rpc_params, rc);
+		return -EFAULT;
+	}
+	/* Get num. of memory references in host endianess */
+	num_of_mem_refs = le32_to_cpu(rpc_msg_p->num_of_memrefs);
+
+	/* Handle user memory references - prepare DMA buffers */
+	if (unlikely(num_of_mem_refs > SEP_RPC_MAX_MEMREF_PER_FUNC)) {
+		pr_err("agent_id=%d func_id=%d: Invalid # of memref %u\n",
+			    agent_id, func_id, num_of_mem_refs);
+		return -EINVAL;
+	}
+	for (i = 0; i < num_of_mem_refs; i++) {
+		/* Init. tables lists for proper cleanup */
+		MLLI_TABLES_LIST_INIT(mlli_tables + i);
+		local_dma_objs[i] = NULL;
+	}
+	for (i = 0; i < num_of_mem_refs; i++) {
+		pr_debug(
+			"memref[%d]: id=%d dma_dir=%d start/offset 0x%08x size %u\n",
+			i, mem_refs[i].ref_id, mem_refs[i].dma_direction,
+			mem_refs[i].start_or_offset, mem_refs[i].size);
+
+		/* convert DMA direction to enum dma_data_direction */
+		dma_dir =
+		    dxdi_data_dir_to_dma_data_dir(mem_refs[i].dma_direction);
+		if (unlikely(dma_dir == DMA_NONE)) {
+			pr_err(
+				    "agent_id=%d func_id=%d: Invalid DMA direction (%d) for memref %d\n",
+				    agent_id, func_id,
+				    mem_refs[i].dma_direction, i);
+			rc = -EINVAL;
+			break;
+		}
+		/* Temporary memory registration if needed */
+		if (IS_VALID_MEMREF_IDX(mem_refs[i].ref_id)) {
+			memref_idx = mem_refs[i].ref_id;
+			if (unlikely(mem_refs[i].start_or_offset != 0)) {
+				pr_err(
+					    "Offset in memref is not supported for RPC.\n");
+				rc = -EINVAL;
+				break;
+			}
+		} else {
+			memref_idx = register_client_memref(client_ctx,
+							    (u8 __user *)
+							    (uintptr_t)
+							    mem_refs[i].
+							    start_or_offset,
+							    NULL,
+							    mem_refs[i].size,
+							    dma_dir);
+			if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+				pr_err("Fail: temp memory registration\n");
+				rc = -ENOMEM;
+				break;
+			}
+		}
+		/* MLLI table creation */
+		local_dma_objs[i] = acquire_dma_obj(client_ctx, memref_idx);
+		if (unlikely(local_dma_objs[i] == NULL)) {
+			pr_err("Failed acquiring DMA objects.\n");
+			rc = -ENOMEM;
+			break;
+		}
+		if (unlikely(local_dma_objs[i]->buf_size != mem_refs[i].size)) {
+			pr_err("RPC: Partial memory ref not supported.\n");
+			rc = -EINVAL;
+			break;
+		}
+		rc = llimgr_create_mlli(drvdata->sep_data->llimgr,
+					mlli_tables + i, dma_dir,
+					local_dma_objs[i], 0, 0);
+		if (unlikely(rc != 0))
+			break;
+		llimgr_mlli_to_seprpc_memref(&(mlli_tables[i]),
+					     &(rpc_msg_p->memref[i]));
+	}
+
+	op_ctx->op_type = SEP_OP_RPC;
+	if (rc == 0) {
+		/* Pack SW descriptor */
+		desc_q_pack_rpc_desc(&desc, op_ctx, agent_id, func_id,
+				     rpc_params_size,
+				     op_ctx->spad_buf_dma_addr);
+		op_ctx->op_state = USER_OP_INPROC;
+		/* Enqueue descriptor */
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+	}
+
+	if (likely(rc == 0))
+		rc = wait_for_sep_op_result(op_ctx);
+	else
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+
+	/* Process descriptor completion */
+	if ((rc == 0) && (op_ctx->error_info == 0)) {
+		/* Copy back RPC message buffer */
+		rc = copy_to_user(rpc_params, rpc_msg_p, rpc_params_size);
+		if (rc) {
+			pr_err(
+				    "Failed copying back RPC parameters/message to user at 0x%p (rc=%d)\n",
+				    rpc_params, rc);
+			rc = -EFAULT;
+		}
+	}
+	op_ctx->op_state = USER_OP_NOP;
+	for (i = 0; i < num_of_mem_refs; i++) {
+		/* Can call for all - unitialized mlli tables would have
+		 * table_count == 0 */
+		llimgr_destroy_mlli(drvdata->sep_data->llimgr, mlli_tables + i);
+		if (local_dma_objs[i] != NULL) {
+			release_dma_obj(client_ctx, local_dma_objs[i]);
+			memref_idx =
+			    DMA_OBJ_TO_MEMREF_IDX(client_ctx,
+						  local_dma_objs[i]);
+			if (memref_idx != mem_refs[i].ref_id) {
+				/* Memory reference was temp. registered */
+				(void)free_client_memref(client_ctx,
+							 memref_idx);
+			}
+		}
+	}
+
+	return rc;
+}
+
+#if defined(SEP_PRINTF) && defined(DEBUG)
+/* Replace component mask */
+#undef SEP_LOG_CUR_COMPONENT
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_PRINTF
+void sep_printf_handler(struct sep_drvdata *drvdata)
+{
+	int cur_ack_cntr;
+	u32 gpr_val;
+	int i;
+
+	/* Reduce interrupts by polling until no more characters */
+	/* Loop for at most a line - to avoid inifite looping in wq ctx */
+	for (i = 0; i < SEP_PRINTF_LINE_SIZE; i++) {
+
+		gpr_val = READ_REGISTER(drvdata->cc_base +
+					SEP_PRINTF_S2H_GPR_OFFSET);
+		cur_ack_cntr = gpr_val >> 8;
+		/*
+		 * ack as soon as possible (data is already in local variable)
+		 * let SeP push one more character until we finish processing
+		 */
+		WRITE_REGISTER(drvdata->cc_base + SEP_PRINTF_H2S_GPR_OFFSET,
+			       cur_ack_cntr);
+#if 0
+		pr_debug("%d. GPR=0x%08X (cur_ack=0x%08X , last=0x%08X)\n",
+			      i, gpr_val, cur_ack_cntr, drvdata->last_ack_cntr);
+#endif
+		if (cur_ack_cntr == drvdata->last_ack_cntr)
+			break;
+
+		/* Identify lost characters case */
+		if (cur_ack_cntr >
+		    ((drvdata->last_ack_cntr + 1) & SEP_PRINTF_ACK_MAX)) {
+			/* NULL terminate */
+			drvdata->line_buf[drvdata->cur_line_buf_offset] = 0;
+			if (sep_log_mask & SEP_LOG_CUR_COMPONENT)
+				pr_info("SeP(lost %d): %s",
+				       cur_ack_cntr - drvdata->last_ack_cntr
+				       - 1, drvdata->line_buf);
+			drvdata->cur_line_buf_offset = 0;
+		}
+		drvdata->last_ack_cntr = cur_ack_cntr;
+
+		drvdata->line_buf[drvdata->cur_line_buf_offset] =
+		    gpr_val & 0xFF;
+
+		/* Is end of line? */
+		if ((drvdata->line_buf[drvdata->cur_line_buf_offset] == '\n') ||
+		    (drvdata->line_buf[drvdata->cur_line_buf_offset] == 0) ||
+		    (drvdata->cur_line_buf_offset == (SEP_PRINTF_LINE_SIZE - 1))
+		    ) {
+			/* NULL terminate */
+			drvdata->line_buf[drvdata->cur_line_buf_offset + 1] = 0;
+			if (sep_log_mask & SEP_LOG_CUR_COMPONENT)
+				pr_info("SeP: %s", drvdata->line_buf);
+			drvdata->cur_line_buf_offset = 0;
+		} else {
+			drvdata->cur_line_buf_offset++;
+		}
+
+	}
+
+}
+
+/* Restore component mask */
+#undef SEP_LOG_CUR_COMPONENT
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_MAIN
+#endif				/*SEP_PRINTF */
+
+static int sep_interrupt_process(struct sep_drvdata *drvdata)
+{
+	u32 cause_reg = 0;
+	int i;
+
+	/* read the interrupt status */
+	cause_reg = READ_REGISTER(drvdata->cc_base +
+				  DX_CC_REG_OFFSET(HOST, IRR));
+
+	if (cause_reg == 0) {
+		/* pr_debug("Got interrupt with empty cause_reg\n"); */
+		return IRQ_NONE;
+	}
+#if 0
+	pr_debug("cause_reg=0x%08X gpr5=0x%08X\n", cause_reg,
+		      READ_REGISTER(drvdata->cc_base +
+				    SEP_PRINTF_S2H_GPR_OFFSET));
+#endif
+	/* clear interrupt */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       cause_reg);
+
+#ifdef SEP_PRINTF
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX)) {
+#ifdef DEBUG
+		sep_printf_handler(drvdata);
+#else				/* Just ack to SeP so it does not stall */
+		WRITE_REGISTER(drvdata->cc_base + SEP_PRINTF_H2S_GPR_OFFSET,
+			       READ_REGISTER(drvdata->cc_base +
+					     SEP_PRINTF_S2H_GPR_OFFSET) >> 8);
+#endif
+		/* handled */
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX);
+	}
+#endif
+
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX)) {
+		dx_sep_state_change_handler(drvdata);
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX);
+	}
+
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX)) {
+		if (drvdata->
+		    irq_mask & SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX)) {
+			dx_sep_req_handler(drvdata);
+		}
+
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX);
+	}
+
+	/* Check interrupt flag for each queue */
+	for (i = 0; cause_reg && i < drvdata->num_of_desc_queues; i++) {
+		if (cause_reg & gpr_interrupt_mask[i]) {
+			desc_q_process_completed(drvdata->queue[i].desc_queue);
+			cause_reg &= ~gpr_interrupt_mask[i];
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+#ifdef SEP_INTERRUPT_BY_TIMER
+static void sep_timer(unsigned long arg)
+{
+	struct sep_drvdata *drvdata = (struct sep_drvdata *)arg;
+
+	(void)sep_interrupt_process(drvdata);
+
+	mod_timer(&drvdata->delegate, jiffies + msecs_to_jiffies(10));
+}
+#else
+irqreturn_t sep_interrupt(int irq, void *dev_id)
+{
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata((struct device *)dev_id);
+
+	if (drvdata->sep_suspended) {
+		WARN(1, "sep_interrupt rise in suspend!");
+		return IRQ_HANDLED;
+	}
+
+	return sep_interrupt_process(drvdata);
+}
+#endif
+
+/***** IOCTL commands handlers *****/
+
+static int sep_ioctl_get_ver_major(unsigned long arg)
+{
+	u32 __user *ver_p = (u32 __user *)arg;
+	const u32 ver_major = DXDI_VER_MAJOR;
+
+	return put_user(ver_major, ver_p);
+}
+
+static int sep_ioctl_get_ver_minor(unsigned long arg)
+{
+	u32 __user *ver_p = (u32 __user *)arg;
+	const u32 ver_minor = DXDI_VER_MINOR;
+
+	return put_user(ver_minor, ver_p);
+}
+
+static int sep_ioctl_get_sym_cipher_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_sym_cipher_ctx_size_params __user *user_params =
+	    (struct dxdi_get_sym_cipher_ctx_size_params __user *)arg;
+	enum dxdi_sym_cipher_type sym_cipher_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_SYM_CIPHER);
+	int err;
+
+	err = __get_user(sym_cipher_type, &(user_params->sym_cipher_type));
+	if (err)
+		return err;
+
+	if (((sym_cipher_type >= _DXDI_SYMCIPHER_AES_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_AES_LAST)) ||
+	    ((sym_cipher_type >= _DXDI_SYMCIPHER_DES_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_DES_LAST)) ||
+	    ((sym_cipher_type >= _DXDI_SYMCIPHER_C2_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_C2_LAST))
+	    ) {
+		pr_debug("sym_cipher_type=%u\n", sym_cipher_type);
+		return put_user(ctx_size, &(user_params->ctx_size));
+	} else {
+		pr_err("Invalid cipher type=%u\n", sym_cipher_type);
+		return -EINVAL;
+	}
+}
+
+static int sep_ioctl_get_auth_enc_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_auth_enc_ctx_size_params __user *user_params =
+	    (struct dxdi_get_auth_enc_ctx_size_params __user *)arg;
+	enum dxdi_auth_enc_type ae_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_AUTH_ENC);
+	int err;
+
+	err = __get_user(ae_type, &(user_params->ae_type));
+	if (err)
+		return err;
+
+	if ((ae_type == DXDI_AUTHENC_NONE) || (ae_type > DXDI_AUTHENC_MAX)) {
+		pr_err("Invalid auth-enc. type=%u\n", ae_type);
+		return -EINVAL;
+	}
+
+	pr_debug("A.E. type=%u\n", ae_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_get_mac_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_mac_ctx_size_params __user *user_params =
+	    (struct dxdi_get_mac_ctx_size_params __user *)arg;
+	enum dxdi_mac_type mac_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_MAC);
+	int err;
+
+	err = __get_user(mac_type, &(user_params->mac_type));
+	if (err)
+		return err;
+
+	if ((mac_type == DXDI_MAC_NONE) || (mac_type > DXDI_MAC_MAX)) {
+		pr_err("Invalid MAC type=%u\n", mac_type);
+		return -EINVAL;
+	}
+
+	pr_debug("MAC type=%u\n", mac_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_get_hash_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_hash_ctx_size_params __user *user_params =
+	    (struct dxdi_get_hash_ctx_size_params __user *)arg;
+	enum dxdi_hash_type hash_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_HASH);
+	int err;
+
+	err = __get_user(hash_type, &(user_params->hash_type));
+	if (err)
+		return err;
+
+	if ((hash_type == DXDI_HASH_NONE) || (hash_type > DXDI_HASH_MAX)) {
+		pr_err("Invalid hash type=%u\n", hash_type);
+		return -EINVAL;
+	}
+
+	pr_debug("hash type=%u\n", hash_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_sym_cipher_init(struct sep_client_ctx *client_ctx,
+				     unsigned long arg)
+{
+	struct dxdi_sym_cipher_init_params __user *user_init_params =
+			(struct dxdi_sym_cipher_init_params __user *)arg;
+	struct dxdi_sym_cipher_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+		offsetof(struct dxdi_sym_cipher_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_SYM_CIPHER, &(init_params.props));
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_auth_enc_init(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_auth_enc_init_params __user *user_init_params =
+	    (struct dxdi_auth_enc_init_params __user *)arg;
+	struct dxdi_auth_enc_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sym_cipher_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_AUTH_ENC, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_mac_init(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_mac_init_params __user *user_init_params =
+	    (struct dxdi_mac_init_params __user *)arg;
+	struct dxdi_mac_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_mac_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_MAC, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_hash_init(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_hash_init_params __user *user_init_params =
+	    (struct dxdi_hash_init_params __user *)arg;
+	struct dxdi_hash_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_hash_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_HASH, &(init_params.hash_type));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_proc_dblk(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_process_dblk_params __user *user_dblk_params =
+	    (struct dxdi_process_dblk_params __user *)arg;
+	struct dxdi_process_dblk_params dblk_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_process_dblk_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&dblk_params, user_dblk_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_proc_dblk(&op_ctx, dblk_params.context_buf,
+			   dblk_params.data_block_type,
+			   dblk_params.data_in, dblk_params.data_out,
+			   dblk_params.data_in_size);
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_dblk_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_fin_proc(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_fin_process_params __user *user_fin_params =
+	    (struct dxdi_fin_process_params __user *)arg;
+	struct dxdi_fin_process_params fin_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_fin_process_params, digest_or_mac);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&fin_params, user_fin_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_fin_proc(&op_ctx, fin_params.context_buf,
+			  fin_params.data_in, fin_params.data_out,
+			  fin_params.data_in_size,
+			  fin_params.digest_or_mac,
+			  &(fin_params.digest_or_mac_size));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	if (rc == 0) {
+		/* Always copy back digest/mac size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(fin_params.digest_or_mac_size,
+			      &user_fin_params->digest_or_mac_size);
+		rc += put_user(fin_params.error_info,
+				 &user_fin_params->error_info);
+
+		/* We always need to copy back the digest/mac size (even if 0)
+		 * in order to indicate validity of digest_or_mac buffer */
+	}
+	if ((rc == 0) && (op_ctx.error_info == 0) &&
+	    (fin_params.digest_or_mac_size > 0)) {
+		if (likely(fin_params.digest_or_mac_size <=
+			   DXDI_DIGEST_SIZE_MAX)) {
+			/* Copy back digest/mac if valid */
+			rc = copy_to_user(&(user_fin_params->digest_or_mac),
+					    fin_params.digest_or_mac,
+					    fin_params.digest_or_mac_size);
+		} else {	/* Invalid digest/mac size! */
+			pr_err("Got invalid digest/MAC size = %u",
+				    fin_params.digest_or_mac_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_fin_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_init(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_combined_init_params __user *user_init_params =
+	    (struct dxdi_combined_init_params __user *)arg;
+	struct dxdi_combined_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_combined_context(&op_ctx, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_proc_dblk(struct sep_client_ctx *client_ctx,
+					unsigned long arg)
+{
+	struct dxdi_combined_proc_dblk_params __user *user_dblk_params =
+	    (struct dxdi_combined_proc_dblk_params __user *)arg;
+	struct dxdi_combined_proc_dblk_params dblk_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_dblk_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&dblk_params, user_dblk_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_combined_proc_dblk(&op_ctx, &dblk_params.props,
+				    dblk_params.data_in, dblk_params.data_out,
+				    dblk_params.data_in_size);
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_dblk_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_fin_proc(struct sep_client_ctx *client_ctx,
+				       unsigned long arg)
+{
+	struct dxdi_combined_proc_params __user *user_fin_params =
+	    (struct dxdi_combined_proc_params __user *)arg;
+	struct dxdi_combined_proc_params fin_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&fin_params, user_fin_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_combined_fin_proc(&op_ctx, &fin_params.props,
+				   fin_params.data_in, fin_params.data_out,
+				   fin_params.data_in_size,
+				   fin_params.auth_data,
+				   &(fin_params.auth_data_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(fin_params.auth_data_size,
+			      &user_fin_params->auth_data_size);
+		rc += put_user(fin_params.error_info,
+				 &user_fin_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((fin_params.auth_data_size > 0) &&
+			   (fin_params.auth_data_size <=
+			    DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back auth if valid */
+			rc = copy_to_user(&(user_fin_params->auth_data),
+					    fin_params.auth_data,
+					    fin_params.auth_data_size);
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_fin_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_proc(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_combined_proc_params __user *user_params =
+	    (struct dxdi_combined_proc_params __user *)arg;
+	struct dxdi_combined_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_combined_integrated(&op_ctx, &(params.props),
+					 params.data_in, params.data_out,
+					 params.data_in_size, params.auth_data,
+					 &(params.auth_data_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.auth_data_size,
+			      &user_params->auth_data_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.auth_data_size > 0) &&
+			   (params.auth_data_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back auth if valid */
+			rc = copy_to_user(&(user_params->auth_data),
+					  params.auth_data,
+					  params.auth_data_size);
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_sym_cipher_proc(struct sep_client_ctx *client_ctx,
+				     unsigned long arg)
+{
+	struct dxdi_sym_cipher_proc_params __user *user_params =
+	    (struct dxdi_sym_cipher_proc_params __user *)arg;
+	struct dxdi_sym_cipher_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sym_cipher_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_SYM_CIPHER, &(params.props),
+				params.data_in, params.data_out,
+				params.data_in_size, NULL, NULL);
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_auth_enc_proc(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_auth_enc_proc_params __user *user_params =
+	    (struct dxdi_auth_enc_proc_params __user *)arg;
+	struct dxdi_auth_enc_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_auth_enc_proc_params, tag);
+	int rc;
+	u8 tag_size;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+
+	if (params.props.adata_size == 0) {
+		/* without assoc data we can optimize for one descriptor
+		 * sequence */
+		rc = process_integrated(&op_ctx, params.context_buf,
+					ALG_CLASS_AUTH_ENC, &(params.props),
+					params.text_data, params.data_out,
+					params.props.text_size, params.tag,
+					&tag_size);
+	} else {
+		/* Integrated processing with auth. enc. algorithms with
+		 * Additional-Data requires special two-descriptors flow */
+		rc = process_integrated_auth_enc(&op_ctx, params.context_buf,
+						 ALG_CLASS_AUTH_ENC,
+						 &(params.props), params.adata,
+						 params.text_data,
+						 params.data_out,
+						 params.props.adata_size,
+						 params.props.text_size,
+						 params.tag, &tag_size);
+
+	}
+
+	if ((rc == 0) && (tag_size != params.props.tag_size)) {
+		pr_warn(
+			"Tag result size different than requested (%u != %u)\n",
+			tag_size, params.props.tag_size);
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0) && (tag_size > 0)) {
+		if (likely(tag_size <= DXDI_DIGEST_SIZE_MAX)) {
+			/* Copy back digest/mac if valid */
+			rc = __copy_to_user(&(user_params->tag), params.tag,
+					    tag_size);
+		} else {	/* Invalid digest/mac size! */
+			pr_err("Got invalid tag size = %u", tag_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_mac_proc(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_mac_proc_params __user *user_params =
+	    (struct dxdi_mac_proc_params __user *)arg;
+	struct dxdi_mac_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_mac_proc_params, mac);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_MAC, &(params.props),
+				params.data_in, NULL, params.data_in_size,
+				params.mac, &(params.mac_size));
+
+	if (rc == 0) {
+		/* Always copy back mac size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.mac_size, &user_params->mac_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the mac size (even if 0)
+		 * in order to indicate validity of mac buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.mac_size > 0) &&
+			   (params.mac_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back mac if valid */
+			rc = copy_to_user(&(user_params->mac), params.mac,
+					  params.mac_size);
+		} else {	/* Invalid mac size! */
+			pr_err("Got invalid MAC size = %u",
+				    params.mac_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_hash_proc(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_hash_proc_params __user *user_params =
+	    (struct dxdi_hash_proc_params __user *)arg;
+	struct dxdi_hash_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_hash_proc_params, digest);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_HASH, &(params.hash_type),
+				params.data_in, NULL, params.data_in_size,
+				params.digest, &(params.digest_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.digest_size, &user_params->digest_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.digest_size > 0) &&
+			   (params.digest_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back mac if valid */
+			rc = copy_to_user(&(user_params->digest),
+					  params.digest, params.digest_size);
+		} else {	/* Invalid digest size! */
+			pr_err("Got invalid digest size = %u",
+				    params.digest_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_sep_rpc(struct sep_client_ctx *client_ctx,
+			     unsigned long arg)
+{
+
+	struct dxdi_sep_rpc_params __user *user_params =
+	    (struct dxdi_sep_rpc_params __user *)arg;
+	struct dxdi_sep_rpc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sep_rpc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = dispatch_sep_rpc(&op_ctx, params.agent_id, params.func_id,
+			      params.mem_refs, params.rpc_params_size,
+			      params.rpc_params);
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+static int sep_ioctl_register_mem4dma(struct sep_client_ctx *client_ctx,
+				      unsigned long arg)
+{
+
+	struct dxdi_register_mem4dma_params __user *user_params =
+	    (struct dxdi_register_mem4dma_params __user *)arg;
+	struct dxdi_register_mem4dma_params params;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_register_mem4dma_params, memref_id);
+	enum dma_data_direction dma_dir;
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(params.memref.dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d)\n",
+			    params.memref.dma_direction);
+		rc = -EINVAL;
+	} else {
+		params.memref_id = register_client_memref(client_ctx,
+							  (u8 __user *)
+							  (uintptr_t)
+							  params.memref.
+							  start_or_offset, NULL,
+							  params.memref.size,
+							  dma_dir);
+		if (unlikely(!IS_VALID_MEMREF_IDX(params.memref_id))) {
+			rc = -ENOMEM;
+		} else {
+			rc = put_user(params.memref_id,
+				      &(user_params->memref_id));
+			if (rc != 0)	/* revert if failed __put_user */
+				(void)free_client_memref(client_ctx,
+							 params.memref_id);
+		}
+	}
+
+	return rc;
+}
+
+static int sep_ioctl_free_mem4dma(struct sep_client_ctx *client_ctx,
+				  unsigned long arg)
+{
+	struct dxdi_free_mem4dma_params __user *user_params =
+	    (struct dxdi_free_mem4dma_params __user *)arg;
+	int memref_id;
+	int err;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	err = __get_user(memref_id, &user_params->memref_id);
+	if (err) {
+		pr_err("Failed reading input parameter\n");
+		return -EFAULT;
+	}
+
+	return free_client_memref(client_ctx, memref_id);
+}
+#endif
+
+static int sep_ioctl_set_iv(struct sep_client_ctx *client_ctx,
+			    unsigned long arg)
+{
+	struct dxdi_aes_iv_params *user_params =
+	    (struct dxdi_aes_iv_params __user *)arg;
+	struct dxdi_aes_iv_params params;
+	struct host_crypto_ctx_sym_cipher *host_context =
+	    (struct host_crypto_ctx_sym_cipher *)user_params->context_buf;
+	struct crypto_ctx_uid uid;
+	int err;
+
+	/* Copy ctx uid from user context */
+	if (copy_from_user(&uid, &host_context->uid,
+			   sizeof(struct crypto_ctx_uid))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* Copy IV from user context */
+	if (__copy_from_user(&params, user_params,
+			     sizeof(struct dxdi_aes_iv_params))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+	err =
+	    ctxmgr_set_symcipher_iv_user(user_params->context_buf,
+					 params.iv_ptr);
+	if (err != 0)
+		return err;
+
+	ctxmgr_sep_cache_invalidate(client_ctx->drv_data->sep_cache,
+				    uid, CRYPTO_CTX_ID_SINGLE_MASK);
+
+	return 0;
+}
+
+static int sep_ioctl_get_iv(struct sep_client_ctx *client_ctx,
+			    unsigned long arg)
+{
+	struct dxdi_aes_iv_params *user_params =
+	    (struct dxdi_aes_iv_params __user *)arg;
+	struct dxdi_aes_iv_params params;
+	int err;
+
+	/* copy context ptr from user */
+	if (__copy_from_user(&params, user_params, sizeof(u32))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	err =
+	    ctxmgr_get_symcipher_iv_user(user_params->context_buf,
+					 params.iv_ptr);
+	if (err != 0)
+		return err;
+
+	if (copy_to_user(user_params, &params,
+	    sizeof(struct dxdi_aes_iv_params))) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/****** Driver entry points (open, release, ioctl, etc.) ******/
+
+/**
+ * init_client_ctx() - Initialize a client context object given
+ * @drvdata:	Queue driver context
+ * @client_ctx:
+ *
+ * Returns void
+ */
+void init_client_ctx(struct queue_drvdata *drvdata,
+		     struct sep_client_ctx *client_ctx)
+{
+	int i;
+	const unsigned int qid = drvdata - (drvdata->sep_data->queue);
+
+	/* Initialize user data structure */
+	client_ctx->qid = qid;
+	client_ctx->drv_data = drvdata;
+	atomic_set(&client_ctx->uid_cntr, 0);
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	/* Initialize sessions */
+	for (i = 0; i < MAX_SEPAPP_SESSION_PER_CLIENT_CTX; i++) {
+		mutex_init(&client_ctx->sepapp_sessions[i].session_lock);
+		client_ctx->sepapp_sessions[i].sep_session_id =
+		    SEP_SESSION_ID_INVALID;
+		/* The rest of the fields are 0/NULL from kzalloc */
+	}
+#endif
+	/* Initialize memrefs */
+	for (i = 0; i < MAX_REG_MEMREF_PER_CLIENT_CTX; i++) {
+		mutex_init(&client_ctx->reg_memrefs[i].buf_lock);
+		/* The rest of the fields are 0/NULL from kzalloc */
+	}
+
+	init_waitqueue_head(&client_ctx->memref_wq);
+	client_ctx->memref_cnt = 0;
+}
+
+/**
+ * sep_open() - "open" device file entry point.
+ * @inode:
+ * @file:
+ *
+ * Returns int
+ */
+static int sep_open(struct inode *inode, struct file *file)
+{
+	struct queue_drvdata *drvdata;
+	struct sep_client_ctx *client_ctx;
+	unsigned int qid;
+
+	drvdata = container_of(inode->i_cdev, struct queue_drvdata, cdev);
+
+	if (imajor(inode) != MAJOR(drvdata->sep_data->devt_base)) {
+		pr_err("Invalid major device num=%d\n", imajor(inode));
+		return -ENOENT;
+	}
+	qid = iminor(inode) - MINOR(drvdata->sep_data->devt_base);
+	if (qid >= drvdata->sep_data->num_of_desc_queues) {
+		pr_err("Invalid minor device num=%d\n", iminor(inode));
+		return -ENOENT;
+	}
+#ifdef DEBUG
+	/* The qid based on the minor device number must match the offset
+	 * of given drvdata in the queues array of the sep_data context */
+	if (qid != (drvdata - (drvdata->sep_data->queue))) {
+		pr_err("qid=%d but drvdata index is %d\n",
+			    qid, (drvdata - (drvdata->sep_data->queue)));
+		return -EINVAL;
+	}
+#endif
+	pr_debug("qid=%d\n", qid);
+
+	client_ctx = kzalloc(sizeof(*client_ctx), GFP_KERNEL);
+	if (client_ctx == NULL)
+		return -ENOMEM;
+
+	init_client_ctx(drvdata, client_ctx);
+
+	file->private_data = client_ctx;
+
+	return 0;
+}
+
+void cleanup_client_ctx(struct queue_drvdata *drvdata,
+			struct sep_client_ctx *client_ctx)
+{
+	int memref_id;
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	struct sep_op_ctx op_ctx;
+	int session_id;
+	struct crypto_ctx_uid uid;
+
+	/* Free any Applet session left open */
+	for (session_id = 0; session_id < MAX_SEPAPP_SESSION_PER_CLIENT_CTX;
+	     session_id++) {
+		if (IS_VALID_SESSION_CTX
+		    (&client_ctx->sepapp_sessions[session_id])) {
+			pr_debug("Closing session ID=%d\n", session_id);
+			op_ctx_init(&op_ctx, client_ctx);
+			sepapp_session_close(&op_ctx, session_id);
+			/* Note: There is never a problem with the session's
+			 * ref_cnt because when "release" is invoked there
+			 * are no pending IOCTLs, so ref_cnt is at most 1 */
+			op_ctx_fini(&op_ctx);
+		}
+		mutex_destroy(&client_ctx->sepapp_sessions[session_id].
+			      session_lock);
+	}
+#endif				/*MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0 */
+
+	/* Free registered user memory references */
+	for (memref_id = 0; memref_id < MAX_REG_MEMREF_PER_CLIENT_CTX;
+	     memref_id++) {
+		if (client_ctx->reg_memrefs[memref_id].ref_cnt > 0) {
+			pr_debug("Freeing user memref ID=%d\n", memref_id);
+			(void)free_client_memref(client_ctx, memref_id);
+		}
+		/* There is no problem with memref ref_cnt because when
+		 * "release" is invoked there are no pending IOCTLs,
+		 * so ref_cnt is at most 1                             */
+		mutex_destroy(&client_ctx->reg_memrefs[memref_id].buf_lock);
+	}
+
+	/* Invalidate any outstanding descriptors associated with this
+	 * client_ctx */
+	desc_q_mark_invalid_cookie(drvdata->desc_queue, (void *)client_ctx);
+
+	uid.addr = ((u64) (unsigned long)client_ctx);
+	uid.cntr = 0;
+
+	/* Invalidate any crypto context cache entry associated with this client
+	 * context before freeing context data object that may be reused.
+	 * This assures retaining of UIDs uniqueness (and makes sense since all
+	 * associated contexts does not exist anymore) */
+	ctxmgr_sep_cache_invalidate(drvdata->sep_cache, uid,
+				    CRYPTO_CTX_ID_CLIENT_MASK);
+}
+
+static int sep_release(struct inode *inode, struct file *file)
+{
+	struct sep_client_ctx *client_ctx = file->private_data;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	cleanup_client_ctx(drvdata, client_ctx);
+
+	kfree(client_ctx);
+
+	return 0;
+}
+
+static ssize_t
+sep_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	pr_debug("Invoked for %zu bytes", count);
+	return -ENOSYS;		/* nothing to read... IOCTL only */
+}
+
+/*!
+ * The SeP device does not support read/write
+ * We use it for debug purposes. Currently loopback descriptor is sent given
+ * number of times. Usage example: echo 10 > /dev/dx_sep_q0
+ * TODO: Move this functionality to sysfs?
+ *
+ * \param filp
+ * \param buf
+ * \param count
+ * \param ppos
+ *
+ * \return ssize_t
+ */
+static ssize_t
+sep_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+{
+#ifdef DEBUG
+	struct sep_sw_desc desc;
+	struct sep_client_ctx *client_ctx = filp->private_data;
+	struct sep_op_ctx op_ctx;
+	unsigned int loop_times = 1;
+	unsigned int i;
+	int rc = 0;
+	char tmp_buf[80];
+
+	if (count > 79)
+		return -ENOMEM;	/* Avoid buffer overflow */
+	memcpy(tmp_buf, buf, count);	/* Copy to NULL terminate */
+	tmp_buf[count] = 0;	/* NULL terminate */
+
+	/*pr_debug("Invoked for %u bytes", count); */
+	sscanf(buf, "%u", &loop_times);
+	pr_debug("Loopback X %u...\n", loop_times);
+
+	op_ctx_init(&op_ctx, client_ctx);
+	/* prepare loopback descriptor */
+	desq_q_pack_debug_desc(&desc, &op_ctx);
+
+	/* Perform loopback for given times */
+	for (i = 0; i < loop_times; i++) {
+		op_ctx.op_state = USER_OP_INPROC;
+		rc = desc_q_enqueue(client_ctx->drv_data->desc_queue, &desc,
+				    true);
+		if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+			pr_err("Failed sending desc. %u\n", i);
+			break;
+		}
+		rc = wait_for_sep_op_result(&op_ctx);
+		if (rc != 0) {
+			pr_err("Failed completion of desc. %u\n", i);
+			break;
+		}
+		op_ctx.op_state = USER_OP_NOP;
+	}
+
+	op_ctx_fini(&op_ctx);
+
+	pr_debug("Completed loopback of %u desc.\n", i);
+
+	return count;		/* Noting to write for this device... */
+#else /* DEBUG */
+	pr_debug("Invoked for %zu bytes", count);
+	return -ENOSYS;		/* nothing to write... IOCTL only */
+#endif /* DEBUG */
+}
+
+/*!
+ * IOCTL entry point
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ *
+ * \return int
+ * \retval 0 Operation succeeded (but SeP return code may indicate an error)
+ * \retval -ENOTTY  : Unknown IOCTL command
+ * \retval -ENOSYS  : Unsupported/not-implemented (known) operation
+ * \retval -EINVAL  : Invalid parameters
+ * \retval -EFAULT  : Bad pointers for given user memory space
+ * \retval -EPERM   : Not enough permissions for given command
+ * \retval -ENOMEM,-EAGAIN: when not enough resources available for given op.
+ * \retval -EIO     : SeP HW error or another internal error
+ *                    (probably operation timed out or unexpected behavior)
+ */
+long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct sep_client_ctx *client_ctx = filp->private_data;
+	unsigned long long ioctl_start, ioctl_end;
+	int err = 0;
+
+	preempt_disable_notrace();
+	ioctl_start = sched_clock();
+	preempt_enable_notrace();
+
+	/* Verify IOCTL command: magic + number */
+	if (_IOC_TYPE(cmd) != DXDI_IOC_MAGIC) {
+		pr_err("Invalid IOCTL type=%u", _IOC_TYPE(cmd));
+		return -ENOTTY;
+	}
+	if (_IOC_NR(cmd) > DXDI_IOC_NR_MAX) {
+		pr_err("IOCTL NR=%u out of range for ABI ver.=%u.%u",
+			    _IOC_NR(cmd), DXDI_VER_MAJOR, DXDI_VER_MINOR);
+		return -ENOTTY;
+	}
+
+	/* Verify permissions on parameters pointer (arg) */
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		err = !access_ok(ACCESS_WRITE,
+				 (void __user *)arg, _IOC_SIZE(cmd));
+	else if (_IOC_DIR(cmd) & _IOC_WRITE)
+		err = !access_ok(ACCESS_READ,
+				 (void __user *)arg, _IOC_SIZE(cmd));
+	if (err)
+		return -EFAULT;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	switch (_IOC_NR(cmd)) {
+		/* Version info. commands */
+	case DXDI_IOC_NR_GET_VER_MAJOR:
+		pr_debug("DXDI_IOC_NR_GET_VER_MAJOR\n");
+		err = sep_ioctl_get_ver_major(arg);
+		break;
+	case DXDI_IOC_NR_GET_VER_MINOR:
+		pr_debug("DXDI_IOC_NR_GET_VER_MINOR\n");
+		err = sep_ioctl_get_ver_minor(arg);
+		break;
+		/* Context size queries */
+	case DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE\n");
+		err = sep_ioctl_get_sym_cipher_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE\n");
+		err = sep_ioctl_get_auth_enc_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_MAC_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_MAC_CTX_SIZE\n");
+		err = sep_ioctl_get_mac_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_HASH_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_HASH_CTX_SIZE\n");
+		err = sep_ioctl_get_hash_ctx_size(arg);
+		break;
+		/* Init context commands */
+	case DXDI_IOC_NR_SYMCIPHER_INIT:
+		pr_debug("DXDI_IOC_NR_SYMCIPHER_INIT\n");
+		err = sep_ioctl_sym_cipher_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_AUTH_ENC_INIT:
+		pr_debug("DXDI_IOC_NR_AUTH_ENC_INIT\n");
+		err = sep_ioctl_auth_enc_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_MAC_INIT:
+		pr_debug("DXDI_IOC_NR_MAC_INIT\n");
+		err = sep_ioctl_mac_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_HASH_INIT:
+		pr_debug("DXDI_IOC_NR_HASH_INIT\n");
+		err = sep_ioctl_hash_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_INIT:
+		pr_debug("DXDI_IOC_NR_COMBINED_INIT\n");
+		err = sep_ioctl_combined_init(client_ctx, arg);
+		break;
+		/* Processing commands */
+	case DXDI_IOC_NR_PROC_DBLK:
+		pr_debug("DXDI_IOC_NR_PROC_DBLK\n");
+		err = sep_ioctl_proc_dblk(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC_DBLK:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC_DBLK\n");
+		err = sep_ioctl_combined_proc_dblk(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_FIN_PROC:
+		pr_debug("DXDI_IOC_NR_FIN_PROC\n");
+		err = sep_ioctl_fin_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC_FIN:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC_FIN\n");
+		err = sep_ioctl_combined_fin_proc(client_ctx, arg);
+		break;
+		/* "Integrated" processing operations */
+	case DXDI_IOC_NR_SYMCIPHER_PROC:
+		pr_debug("DXDI_IOC_NR_SYMCIPHER_PROC\n");
+		err = sep_ioctl_sym_cipher_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_AUTH_ENC_PROC:
+		pr_debug("DXDI_IOC_NR_AUTH_ENC_PROC\n");
+		err = sep_ioctl_auth_enc_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_MAC_PROC:
+		pr_debug("DXDI_IOC_NR_MAC_PROC\n");
+		err = sep_ioctl_mac_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_HASH_PROC:
+		pr_debug("DXDI_IOC_NR_HASH_PROC\n");
+		err = sep_ioctl_hash_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC\n");
+		err = sep_ioctl_combined_proc(client_ctx, arg);
+		break;
+		/* SeP RPC */
+	case DXDI_IOC_NR_SEP_RPC:
+		err = sep_ioctl_sep_rpc(client_ctx, arg);
+		break;
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+		/* Memory registation */
+	case DXDI_IOC_NR_REGISTER_MEM4DMA:
+		pr_debug("DXDI_IOC_NR_REGISTER_MEM4DMA\n");
+		err = sep_ioctl_register_mem4dma(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_ALLOC_MEM4DMA:
+		pr_err("DXDI_IOC_NR_ALLOC_MEM4DMA: Not supported, yet");
+		err = -ENOTTY;
+		break;
+	case DXDI_IOC_NR_FREE_MEM4DMA:
+		pr_debug("DXDI_IOC_NR_FREE_MEM4DMA\n");
+		err = sep_ioctl_free_mem4dma(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_SESSION_OPEN:
+		pr_debug("DXDI_IOC_NR_SEPAPP_SESSION_OPEN\n");
+		err = sep_ioctl_sepapp_session_open(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_SESSION_CLOSE:
+		pr_debug("DXDI_IOC_NR_SEPAPP_SESSION_CLOSE\n");
+		err = sep_ioctl_sepapp_session_close(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE:
+		pr_debug("DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE\n");
+		err = sep_ioctl_sepapp_command_invoke(client_ctx, arg);
+		break;
+#endif
+	case DXDI_IOC_NR_SET_IV:
+		pr_debug("DXDI_IOC_NR_SET_IV\n");
+		err = sep_ioctl_set_iv(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_GET_IV:
+		pr_debug("DXDI_IOC_NR_GET_IV\n");
+		err = sep_ioctl_get_iv(client_ctx, arg);
+		break;
+	default:/* Not supposed to happen - we already tested for NR range */
+		pr_err("bad IOCTL cmd 0x%08X\n", cmd);
+		err = -ENOTTY;
+	}
+
+	/* Update stats per IOCTL command */
+	if (err == 0) {
+		preempt_disable_notrace();
+		ioctl_end = sched_clock();
+		preempt_enable_notrace();
+		sysfs_update_drv_stats(client_ctx->qid, _IOC_NR(cmd),
+				       ioctl_start, ioctl_end);
+	}
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	return err;
+}
+
+static const struct file_operations sep_fops = {
+	.owner = THIS_MODULE,
+	.open = sep_open,
+	.release = sep_release,
+	.read = sep_read,
+	.write = sep_write,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = sep_compat_ioctl,
+#else
+	.unlocked_ioctl = sep_ioctl,
+#endif
+};
+
+/**
+ * get_q_cache_size() - Get the number of entries to allocate for the SeP/FW
+ *			cache of given queue
+ * @drvdata:	 Driver context
+ * @qid:	 The queue to allocate for
+ *
+ * Get the number of entries to allocate for the SeP/FW cache of given queue
+ * The function assumes that num_of_desc_queues and num_of_sep_cache_entries
+ * are already initialized in drvdata.
+ * Returns Number of cache entries to allocate
+ */
+static int get_q_cache_size(struct sep_drvdata *drvdata, int qid)
+{
+	/* Simple allocation - divide evenly among queues */
+	/* consider prefering higher priority queues...   */
+	return drvdata->num_of_sep_cache_entries / drvdata->num_of_desc_queues;
+}
+
+/**
+ * enable_descq_interrupt() - Enable interrupt for given queue (GPR)
+ * @drvdata:
+ * @qid:
+ *
+ * Returns int
+ */
+static void enable_descq_interrupt(struct sep_drvdata *drvdata, int qid)
+{
+
+	/* clear pending interrupts in GPRs of SW-queues
+	 * (leftovers from init writes to GPRs..) */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       gpr_interrupt_mask[qid]);
+
+	drvdata->irq_mask |= gpr_interrupt_mask[qid];
+	/* set IMR register */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+}
+
+/**
+ * alloc_host_mem_for_sep() - Allocate memory pages for sep icache/dcache
+ *	or for SEP backup memory in case there is not SEP cache memory.
+ *
+ * @drvdata:
+ *
+ * Currently using alloc_pages to allocate the pages.
+ * Consider using CMA feature for the memory allocation
+ */
+static int alloc_host_mem_for_sep(struct sep_drvdata *drvdata)
+{
+#ifdef CACHE_IMAGE_NAME
+	int i;
+	const int icache_sizes_enum2log[] = DX_CC_ICACHE_SIZE_ENUM2LOG;
+
+	pr_debug("icache_size=%uKB dcache_size=%uKB\n",
+		 1 << (icache_size_log2 - 10),
+		 1 << (dcache_size_log2 - 10));
+
+	/* Verify validity of chosen cache memory sizes */
+	if ((dcache_size_log2 > DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2) ||
+	    (dcache_size_log2 < DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2)) {
+		pr_err("Requested Dcache size (%uKB) is invalid\n",
+		       1 << (dcache_size_log2 - 10));
+		return -EINVAL;
+	}
+	/* Icache size must be one of values defined for this device */
+	for (i = 0; i < sizeof(icache_sizes_enum2log) / sizeof(int); i++)
+		if ((icache_size_log2 == icache_sizes_enum2log[i]) &&
+		    (icache_sizes_enum2log[i] >= 0))
+			/* Found valid value */
+			break;
+	if (unlikely(i == sizeof(icache_sizes_enum2log))) {
+		pr_err("Requested Icache size (%uKB) is invalid\n",
+		       1 << (icache_size_log2 - 10));
+	}
+	drvdata->icache_size_log2 = icache_size_log2;
+	/* Allocate pages suitable for 32bit DMA and out of cache (cold) */
+	drvdata->icache_pages = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_COLD,
+					    icache_size_log2 - PAGE_SHIFT);
+	if (drvdata->icache_pages == NULL) {
+		pr_err("Failed allocating %uKB for Icache\n",
+			    1 << (icache_size_log2 - 10));
+		return -ENOMEM;
+	}
+	drvdata->dcache_size_log2 = dcache_size_log2;
+	/* same as for icache */
+	drvdata->dcache_pages = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_COLD,
+					    dcache_size_log2 - PAGE_SHIFT);
+	if (drvdata->dcache_pages == NULL) {
+		pr_err("Failed allocating %uKB for Dcache\n",
+		       1 << (dcache_size_log2 - 10));
+		__free_pages(drvdata->icache_pages,
+			     drvdata->icache_size_log2 - PAGE_SHIFT);
+		return -ENOMEM;
+	}
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	/* This size is not enforced by power of two, so we use
+	 * alloc_pages_exact() */
+	drvdata->sep_backup_buf = alloc_pages_exact(SEP_BACKUP_BUF_SIZE,
+						    GFP_KERNEL | GFP_DMA32 |
+						    __GFP_COLD);
+	if (unlikely(drvdata->sep_backup_buf == NULL)) {
+		pr_err("Failed allocating %d B for SEP backup buffer\n",
+		       SEP_BACKUP_BUF_SIZE);
+		return -ENOMEM;
+	}
+	drvdata->sep_backup_buf_size = SEP_BACKUP_BUF_SIZE;
+#endif
+	return 0;
+}
+
+/**
+ * free_host_mem_for_sep() - Free the memory resources allocated by
+ *	alloc_host_mem_for_sep()
+ *
+ * @drvdata:
+ */
+static void free_host_mem_for_sep(struct sep_drvdata *drvdata)
+{
+#ifdef CACHE_IMAGE_NAME
+	if (drvdata->dcache_pages != NULL) {
+		__free_pages(drvdata->dcache_pages,
+			     drvdata->dcache_size_log2 - PAGE_SHIFT);
+		drvdata->dcache_pages = NULL;
+	}
+	if (drvdata->icache_pages != NULL) {
+		__free_pages(drvdata->icache_pages,
+			     drvdata->icache_size_log2 - PAGE_SHIFT);
+		drvdata->icache_pages = NULL;
+	}
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	if (drvdata->sep_backup_buf != NULL) {
+		free_pages_exact(drvdata->sep_backup_buf,
+				 drvdata->sep_backup_buf_size);
+		drvdata->sep_backup_buf_size = 0;
+		drvdata->sep_backup_buf = NULL;
+	}
+#endif
+}
+
+static int emmc_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), data) == 0)
+		return 1;
+	return 0;
+}
+
+static int mmc_blk_rpmb_req_handle(struct mmc_ioc_rpmb_req *req)
+{
+#define EMMC_BLK_NAME   "mmcblk0rpmb"
+
+	struct device *emmc = NULL;
+
+	if (!req)
+		return -EINVAL;
+
+	emmc = class_find_device(&block_class, NULL, EMMC_BLK_NAME, emmc_match);
+	if (!emmc) {
+		pr_err("eMMC reg failed\n");
+		return -ENODEV;
+	}
+
+	return mmc_rpmb_req_handle(emmc, req);
+}
+
+static int rpmb_agent(void *unused)
+{
+#define AGENT_TIMEOUT_MS (1000 * 60 * 5) /* 5 minutes */
+
+#define AUTH_DAT_WR_REQ 0x0003
+#define AUTH_DAT_RD_REQ 0x0004
+
+#define RPMB_FRAME_LENGTH      512
+#define RPMB_MAC_KEY_LENGTH     32
+#define RPMB_NONCE_LENGTH       16
+#define RPMB_DATA_LENGTH       256
+#define RPMB_STUFFBYTES_LENGTH 196
+#define RPMB_COUNTER_LENGTH      4
+#define RPMB_ADDR_LENGTH         2
+#define RPMB_BLKCNT_LENGTH       2
+#define RPMB_RESULT_LENGTH       2
+#define RPMB_RSPREQ_LENGTH       2
+
+#define RPMB_STUFFBYTES_OFFSET 0
+#define RPMB_MAC_KEY_OFFSET   (RPMB_STUFFBYTES_OFFSET + RPMB_STUFFBYTES_LENGTH)
+#define RPMB_DATA_OFFSET      (RPMB_MAC_KEY_OFFSET + RPMB_MAC_KEY_LENGTH)
+#define RPMB_NONCE_OFFSET     (RPMB_DATA_OFFSET + RPMB_DATA_LENGTH)
+#define RPMB_COUNTER_OFFSET   (RPMB_NONCE_OFFSET + RPMB_NONCE_LENGTH)
+#define RPMB_ADDR_OFFSET      (RPMB_COUNTER_OFFSET + RPMB_COUNTER_LENGTH)
+#define RPMB_BLKCNT_OFFSET    (RPMB_ADDR_OFFSET + RPMB_ADDR_LENGTH)
+#define RPMB_RESULT_OFFSET    (RPMB_BLKCNT_OFFSET + RPMB_BLKCNT_LENGTH)
+#define RPMB_RSPREQ_OFFSET    (RPMB_RESULT_OFFSET + RPMB_RESULT_LENGTH)
+
+	int ret = 0;
+	u32 tmp = 0;
+	u32 max_buf_size = 0;
+	u8 in_buf[RPMB_FRAME_LENGTH];
+	u8 *out_buf = NULL;
+	u32 in_buf_size = RPMB_FRAME_LENGTH;
+	/* structure to pass to the eMMC driver's RPMB API */
+	struct mmc_ioc_rpmb_req req2emmc;
+
+	ret = dx_sep_req_register_agent(RPMB_AGENT_ID, &max_buf_size);
+	if (ret) {
+		pr_err("REG FAIL %d\n", ret);
+		return -EINVAL;
+	}
+
+	out_buf = kmalloc(RPMB_FRAME_LENGTH, GFP_KERNEL);
+	if (!out_buf) {
+		pr_err("MALLOC FAIL\n");
+		return -ENOMEM;
+	}
+
+	while (1) {
+
+		/* Block until called by SEP */
+		pr_debug("RPMB AGENT BLOCKED\n");
+		ret = dx_sep_req_wait_for_request(RPMB_AGENT_ID,
+				in_buf, &in_buf_size);
+		if (ret) {
+			pr_err("WAIT FAILED %d\n", ret);
+			break;
+		}
+
+		pr_debug("RPMB AGENT UNBLOCKED\n");
+
+		/* Process request */
+		memset(&req2emmc, 0x00, sizeof(struct mmc_ioc_rpmb_req));
+
+		/* Copy from incoming buffer into variables and swap
+		 * endianess if needed */
+		req2emmc.addr = *((u16 *)(in_buf+RPMB_ADDR_OFFSET));
+		req2emmc.addr = be16_to_cpu(req2emmc.addr);
+		/* As we are supporting only one block transfers */
+		req2emmc.blk_cnt = 1;
+		req2emmc.data = in_buf+RPMB_DATA_OFFSET;
+		req2emmc.mac = in_buf+RPMB_MAC_KEY_OFFSET;
+		req2emmc.nonce = in_buf+RPMB_NONCE_OFFSET;
+		req2emmc.result = (u16 *)(in_buf+RPMB_RESULT_OFFSET);
+		req2emmc.type = *((u16 *)(in_buf+RPMB_RSPREQ_OFFSET));
+		req2emmc.type = be16_to_cpu(req2emmc.type);
+		req2emmc.wc = (u32 *)(in_buf+RPMB_COUNTER_OFFSET);
+		*req2emmc.wc = be32_to_cpu(*req2emmc.wc);
+
+		/* Send request to eMMC driver */
+		ret = mmc_blk_rpmb_req_handle(&req2emmc);
+		if (ret) {
+			pr_err("mmc_blk_rpmb_req_handle fail %d", ret);
+			/* If access to eMMC driver failed send back
+			 * artificial error */
+			req2emmc.type = 0x0008;
+		}
+
+		/* Rebuild RPMB from response */
+		memset(out_buf, 0, RPMB_FRAME_LENGTH);
+
+		if (req2emmc.type == AUTH_DAT_RD_REQ) {
+			pr_debug("READ OPERATION RETURN\n");
+			memcpy(out_buf+RPMB_DATA_OFFSET,
+					req2emmc.data,  RPMB_DATA_LENGTH);
+			memcpy(out_buf+RPMB_NONCE_OFFSET,
+					req2emmc.nonce, RPMB_NONCE_LENGTH);
+
+			out_buf[RPMB_BLKCNT_OFFSET]   = req2emmc.blk_cnt >> 8;
+			out_buf[RPMB_BLKCNT_OFFSET+1] = req2emmc.blk_cnt;
+		} else {
+			pr_debug("WRITE OPERATION RETURN\n");
+			memcpy(&tmp, req2emmc.wc, RPMB_COUNTER_LENGTH);
+			tmp = cpu_to_be32(tmp);
+			memcpy(out_buf+RPMB_COUNTER_OFFSET,
+					&tmp, RPMB_COUNTER_LENGTH);
+		}
+
+		memcpy(out_buf+RPMB_MAC_KEY_OFFSET,
+				req2emmc.mac,    RPMB_MAC_KEY_LENGTH);
+		memcpy(out_buf+RPMB_RESULT_OFFSET,
+				req2emmc.result, RPMB_RESULT_LENGTH);
+
+		memcpy(out_buf+RPMB_RSPREQ_OFFSET,
+				&req2emmc.type, RPMB_RSPREQ_LENGTH);
+		out_buf[RPMB_ADDR_OFFSET]   = req2emmc.addr >> 8;
+		out_buf[RPMB_ADDR_OFFSET+1] = req2emmc.addr;
+
+		/* Send response */
+		ret = dx_sep_req_send_response(RPMB_AGENT_ID,
+				out_buf, RPMB_FRAME_LENGTH);
+		if (ret) {
+			pr_err("dx_sep_req_send_response fail %d", ret);
+			break;
+		}
+	}
+
+	kfree(out_buf);
+
+	return ret;
+}
+
+static int sep_setup(struct device *dev,
+		     const struct resource *regs_res,
+		     struct resource *r_irq)
+{
+	dev_t devt;
+	struct sep_drvdata *drvdata = NULL;
+	enum dx_sep_state sep_state;
+	int rc = 0;
+	int i;
+	/* Create kernel thread for RPMB agent */
+	static struct task_struct *rpmb_thread;
+	char thread_name[] = "rpmb_agent";
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+
+	pr_info("Discretix %s Driver initializing...\n", DRIVER_NAME);
+
+	drvdata = kzalloc(sizeof(struct sep_drvdata), GFP_KERNEL);
+	if (unlikely(drvdata == NULL)) {
+		pr_err("Unable to allocate device private record\n");
+		rc = -ENOMEM;
+		goto failed0;
+	}
+	dev_set_drvdata(dev, (void *)drvdata);
+
+	if (!regs_res) {
+		pr_err("Couldn't get registers resource\n");
+		rc = -EFAULT;
+		goto failed1;
+	}
+
+	if (q_num > SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err(
+			    "Requested number of queues (%u) is out of range must be no more than %u\n",
+			    q_num, SEP_MAX_NUM_OF_DESC_Q);
+		rc = -EINVAL;
+		goto failed1;
+	}
+
+	/* TODO: Verify number of queues also with SeP capabilities */
+	/* Initialize objects arrays for proper cleanup in case of error */
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; i++) {
+		drvdata->queue[i].desc_queue = DESC_Q_INVALID_HANDLE;
+		drvdata->queue[i].sep_cache = SEP_CTX_CACHE_NULL_HANDLE;
+	}
+
+	drvdata->mem_start = regs_res->start;
+	drvdata->mem_end = regs_res->end;
+	drvdata->mem_size = regs_res->end - regs_res->start + 1;
+
+	if (!request_mem_region(drvdata->mem_start,
+				drvdata->mem_size, DRIVER_NAME)) {
+		pr_err("Couldn't lock memory region at %Lx\n",
+			    (unsigned long long)regs_res->start);
+		rc = -EBUSY;
+		goto failed1;
+	}
+
+	/* create a mask in the lower 4 GB of memory */
+	if (!dma_set_mask(dev, DMA_BIT_MASK(32)))
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+	else
+		pr_warn("sep54: No suitble DMA available\n");
+
+	drvdata->dev = dev;
+
+	drvdata->cc_base = ioremap(drvdata->mem_start, drvdata->mem_size);
+	if (drvdata->cc_base == NULL) {
+		pr_err("ioremap() failed\n");
+		goto failed2;
+	}
+
+	pr_info("regbase_phys=0x%p..0x%p\n", &drvdata->mem_start,
+		&drvdata->mem_end);
+	pr_info("regbase_virt=0x%p\n", drvdata->cc_base);
+
+#ifdef DX_BASE_ENV_REGS
+	pr_info("FPGA ver. = UNKNOWN\n");
+	/* TODO: verify FPGA version against expected version */
+#endif
+
+#ifdef SEP_PRINTF
+	/* Sync. host to SeP initialization counter */
+	/* After seting the interrupt mask, the interrupt from the GPR would
+	 * trigger host_printf_handler to ack this value */
+	drvdata->last_ack_cntr = SEP_PRINTF_ACK_SYNC_VAL;
+#endif
+
+	dx_sep_power_init(drvdata);
+
+	/* Interrupt handler setup */
+#ifdef SEP_INTERRUPT_BY_TIMER
+	init_timer(&drvdata->delegate);
+	drvdata->delegate.function = sep_timer;
+	drvdata->delegate.data = (unsigned long)drvdata;
+	mod_timer(&drvdata->delegate, jiffies);
+
+#else				/* IRQ handler setup */
+	/* Initialize IMR (mask) before registering interrupt handler */
+	/* Enable only state register interrupt */
+	drvdata->irq_mask = SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX);
+#ifdef SEP_PRINTF
+	/* Enable interrupt from host_printf GPR */
+	drvdata->irq_mask |= SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX);
+#endif
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+	/* The GPRs interrupts are set only after sep_init is done to avoid
+	 * "garbage" interrupts as a result of the FW init process */
+	drvdata->irq = r_irq->start;
+	rc = request_irq(drvdata->irq, sep_interrupt,
+			 IRQF_SHARED, DRIVER_NAME, drvdata->dev);
+	if (unlikely(rc != 0)) {
+		pr_err("Could not allocate interrupt %d\n", drvdata->irq);
+		goto failed3;
+	}
+	pr_info("%s at 0x%p mapped to interrupt %d\n",
+		DRIVER_NAME, drvdata->cc_base, drvdata->irq);
+
+#endif				/*SEP_INTERRUPT_BY_TIMER */
+
+	/* SeP FW initialization sequence */
+	/* Cold boot before creating descQ objects */
+	sep_state = GET_SEP_STATE(drvdata);
+	if (sep_state != DX_SEP_STATE_DONE_COLD_BOOT) {
+		pr_debug("sep_state=0x%08X\n", sep_state);
+		/* If INIT_CC was not done externally, take care of it here */
+		rc = alloc_host_mem_for_sep(drvdata);
+		if (unlikely(rc != 0))
+			goto failed4;
+		rc = sepinit_do_cc_init(drvdata);
+		if (unlikely(rc != 0))
+			goto failed5;
+	}
+	sepinit_get_fw_props(drvdata);
+	if (drvdata->fw_ver != EXPECTED_FW_VER) {
+		pr_warn("Expected FW version %u.%u.%u but got %u.%u.%u\n",
+			     VER_MAJOR(EXPECTED_FW_VER),
+			     VER_MINOR(EXPECTED_FW_VER),
+			     VER_PATCH(EXPECTED_FW_VER),
+			     VER_MAJOR(drvdata->fw_ver),
+			     VER_MINOR(drvdata->fw_ver),
+			     VER_PATCH(drvdata->fw_ver));
+	}
+
+	if (q_num > drvdata->num_of_desc_queues) {
+		pr_err(
+			    "Requested number of queues (%u) is greater than SEP could support (%u)\n",
+			    q_num, drvdata->num_of_desc_queues);
+		rc = -EINVAL;
+		goto failed5;
+	}
+
+	if (q_num == 0) {
+		if (drvdata->num_of_desc_queues > SEP_MAX_NUM_OF_DESC_Q) {
+			pr_info(
+				     "The SEP number of queues (%u) is greater than the driver could support (%u)\n",
+				     drvdata->num_of_desc_queues,
+				     SEP_MAX_NUM_OF_DESC_Q);
+			q_num = SEP_MAX_NUM_OF_DESC_Q;
+		} else {
+			q_num = drvdata->num_of_desc_queues;
+		}
+	}
+	drvdata->num_of_desc_queues = q_num;
+
+	pr_info("q_num=%d\n", drvdata->num_of_desc_queues);
+
+	rc = dx_sep_req_init(drvdata);
+	if (unlikely(rc != 0))
+		goto failed6;
+
+	/* Create descriptor queues objects - must be before
+	 *  sepinit_set_fw_init_params to assure GPRs from host are reset */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		drvdata->queue[i].sep_data = drvdata;
+		mutex_init(&drvdata->queue[i].desc_queue_sequencer);
+		drvdata->queue[i].desc_queue =
+		    desc_q_create(i, &drvdata->queue[i]);
+		if (drvdata->queue[i].desc_queue == DESC_Q_INVALID_HANDLE) {
+			pr_err("Unable to allocate desc_q object (%d)\n", i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+	}
+
+	/* Create context cache management objects */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		const int num_of_cache_entries = get_q_cache_size(drvdata, i);
+		if (num_of_cache_entries < 1) {
+			pr_err("No SeP cache entries were assigned for qid=%d",
+			       i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+		drvdata->queue[i].sep_cache =
+		    ctxmgr_sep_cache_create(num_of_cache_entries);
+		if (drvdata->queue[i].sep_cache == SEP_CTX_CACHE_NULL_HANDLE) {
+			pr_err("Unable to allocate SeP cache object (%d)\n", i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+	}
+
+	rc = sepinit_do_fw_init(drvdata);
+	if (unlikely(rc != 0))
+		goto failed7;
+
+	drvdata->llimgr = llimgr_create(drvdata->dev, drvdata->mlli_table_size);
+	if (drvdata->llimgr == LLIMGR_NULL_HANDLE) {
+		pr_err("Failed creating LLI-manager object\n");
+		rc = -ENOMEM;
+		goto failed7;
+	}
+
+	drvdata->spad_buf_pool = dma_pool_create("dx_sep_rpc_msg", drvdata->dev,
+						 USER_SPAD_SIZE,
+						 L1_CACHE_BYTES, 0);
+	if (drvdata->spad_buf_pool == NULL) {
+		pr_err("Failed allocating DMA pool for RPC messages\n");
+		rc = -ENOMEM;
+		goto failed8;
+	}
+
+	/* Add character device nodes */
+	rc = alloc_chrdev_region(&drvdata->devt_base, 0, SEP_DEVICES,
+				 DRIVER_NAME);
+	if (unlikely(rc != 0))
+		goto failed9;
+	pr_debug("Allocated %u chrdevs at %u:%u\n", SEP_DEVICES,
+		 MAJOR(drvdata->devt_base), MINOR(drvdata->devt_base));
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		devt = MKDEV(MAJOR(drvdata->devt_base),
+			     MINOR(drvdata->devt_base) + i);
+		cdev_init(&drvdata->queue[i].cdev, &sep_fops);
+		drvdata->queue[i].cdev.owner = THIS_MODULE;
+		rc = cdev_add(&drvdata->queue[i].cdev, devt, 1);
+		if (unlikely(rc != 0)) {
+			pr_err("cdev_add() failed for q%d\n", i);
+			goto failed9;
+		}
+		drvdata->queue[i].dev = device_create(sep_class, dev, devt,
+						      &drvdata->queue[i],
+						      "%s%d",
+						      DEVICE_NAME_PREFIX, i);
+		drvdata->queue[i].devt = devt;
+	}
+
+	rc = sep_setup_sysfs(&(dev->kobj), drvdata);
+	if (unlikely(rc != 0))
+		goto failed9;
+
+#ifndef SEP_INTERRUPT_BY_TIMER
+	/* Everything is ready - enable interrupts of desc-Qs */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++)
+		enable_descq_interrupt(drvdata, i);
+#endif
+
+	/* Enable sep request interrupt handling */
+	dx_sep_req_enable(drvdata);
+
+	/* Init DX Linux crypto module */
+	if (!disable_linux_crypto) {
+		rc = dx_crypto_api_init(drvdata);
+		if (unlikely(rc != 0))
+			goto failed10;
+		rc = hwk_init();
+		if (unlikely(rc != 0))
+			goto failed10;
+	}
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	dx_sepapp_init(drvdata);
+#endif
+
+	rpmb_thread = kthread_create(rpmb_agent, NULL, thread_name);
+	if (!rpmb_thread) {
+		pr_err("RPMB agent thread create fail");
+		goto failed10;
+	} else {
+		wake_up_process(rpmb_thread);
+	}
+
+	/* Inform SEP RPMB driver that it can enable RPMB access again */
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		goto failed10;
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed11;
+	rc = dx_sepapp_command_invoke(sctx, sess_id, CMD_RPMB_ENABLE, NULL,
+				      &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed11;
+
+	rc = dx_sepapp_session_close(sctx, sess_id);
+	if (unlikely(rc != 0))
+		goto failed11;
+
+	dx_sepapp_context_free(sctx);
+
+	return 0;
+
+/* Error cases cleanup */
+ failed11:
+	dx_sepapp_context_free(sctx);
+ failed10:
+	/* Disable interrupts */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR), ~0);
+	sep_free_sysfs();
+ failed9:
+	dma_pool_destroy(drvdata->spad_buf_pool);
+ failed8:
+	llimgr_destroy(drvdata->llimgr);
+ failed7:
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		if (drvdata->queue[i].devt) {
+			cdev_del(&drvdata->queue[i].cdev);
+			device_destroy(sep_class, drvdata->queue[i].devt);
+		}
+
+		if (drvdata->queue[i].sep_cache != SEP_CTX_CACHE_NULL_HANDLE) {
+			ctxmgr_sep_cache_destroy(drvdata->queue[i].sep_cache);
+			drvdata->queue[i].sep_cache = SEP_CTX_CACHE_NULL_HANDLE;
+		}
+
+		if (drvdata->queue[i].desc_queue != DESC_Q_INVALID_HANDLE) {
+			desc_q_destroy(drvdata->queue[i].desc_queue);
+			drvdata->queue[i].desc_queue = DESC_Q_INVALID_HANDLE;
+			mutex_destroy(&drvdata->queue[i].desc_queue_sequencer);
+		}
+	}
+
+ failed6:
+	dx_sep_req_fini(drvdata);
+ failed5:
+	free_host_mem_for_sep(drvdata);
+ failed4:
+#ifdef SEP_INTERRUPT_BY_TIMER
+	del_timer_sync(&drvdata->delegate);
+#else
+	free_irq(drvdata->irq, dev);
+#endif
+ failed3:
+	dx_sep_power_exit();
+	iounmap(drvdata->cc_base);
+ failed2:
+	release_mem_region(regs_res->start, drvdata->mem_size);
+ failed1:
+	kfree(drvdata);
+ failed0:
+
+	return rc;
+}
+
+static void sep_pci_remove(struct pci_dev *pdev)
+{
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(&pdev->dev);
+	int i;
+
+	if (!drvdata)
+		return;
+	dx_sep_req_fini(drvdata);
+	if (!disable_linux_crypto) {
+		dx_crypto_api_fini();
+		hwk_fini();
+	}
+	/* Disable interrupts */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR), ~0);
+
+#ifdef SEP_RUNTIME_PM
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
+#endif
+
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		if (drvdata->queue[i].desc_queue != DESC_Q_INVALID_HANDLE) {
+			desc_q_destroy(drvdata->queue[i].desc_queue);
+			mutex_destroy(&drvdata->queue[i].desc_queue_sequencer);
+		}
+		if (drvdata->queue[i].sep_cache != NULL)
+			ctxmgr_sep_cache_destroy(drvdata->queue[i].sep_cache);
+		cdev_del(&drvdata->queue[i].cdev);
+		device_destroy(sep_class, drvdata->queue[i].devt);
+	}
+
+	dma_pool_destroy(drvdata->spad_buf_pool);
+	drvdata->spad_buf_pool = NULL;
+	llimgr_destroy(drvdata->llimgr);
+	drvdata->llimgr = LLIMGR_NULL_HANDLE;
+	free_host_mem_for_sep(drvdata);
+#ifdef SEP_INTERRUPT_BY_TIMER
+	del_timer_sync(&drvdata->delegate);
+#else
+	free_irq(drvdata->irq, &pdev->dev);
+#endif
+	dx_sep_power_exit();
+	iounmap(drvdata->cc_base);
+	release_mem_region(drvdata->mem_start, drvdata->mem_size);
+	sep_free_sysfs();
+	kfree(drvdata);
+	dev_set_drvdata(&pdev->dev, NULL);
+	pci_dev_put(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+	{
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, MRLD_SEP_PCI_DEVICE_ID)}, {
+	0}
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/**
+ *	sep_probe - probe a matching PCI device
+ *	@pdev: pci_device
+ *	@end: pci_device_id
+ *
+ *	Attempt to set up and configure a SEP device that has been
+ *	discovered by the PCI layer.
+ */
+static int sep_pci_probe(struct pci_dev *pdev,
+			 const struct pci_device_id *ent)
+{
+	int error;
+	struct resource res;
+	struct resource r_irq;
+
+	security_cfg_reg = ioremap_nocache(SECURITY_CFG_ADDR, 4);
+	if (security_cfg_reg == NULL) {
+		dev_err(&pdev->dev, "ioremap of security_cfg_reg failed\n");
+		return -ENOMEM;
+	}
+
+	/* Enable Chaabi */
+	error = pci_enable_device(pdev);
+	if (error) {
+		dev_err(&pdev->dev, "error enabling SEP device\n");
+		goto end;
+	}
+
+	/* Fill resource variables */
+	res.start = pci_resource_start(pdev, 0);
+
+#ifdef PCI_REGION_BUG		/* TODO for wrong sep address bug */
+	res.start += 0x8000;
+#endif
+
+	if (!res.start) {
+		dev_warn(&pdev->dev, "Error getting register start\n");
+		error = -ENODEV;
+		goto disable_pci;
+	}
+
+	res.end = pci_resource_end(pdev, 0);
+	if (!res.end) {
+		dev_warn(&pdev->dev, "Error getting register end\n");
+		error = -ENODEV;
+		goto disable_pci;
+	}
+
+	/* Fill irq resource variable */
+	r_irq.start = pdev->irq;
+
+	/* Use resource variables */
+	error = sep_setup(&pdev->dev, &res, &r_irq);
+	if (error)
+		goto disable_pci;
+
+	pdev = pci_dev_get(pdev);
+
+#ifdef SEP_RUNTIME_PM
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, SEP_AUTOSUSPEND_DELAY);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+#endif
+
+	return 0;
+
+ disable_pci:
+	pci_disable_device(pdev);
+ end:
+	iounmap(security_cfg_reg);
+
+	return error;
+}
+
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_RUNTIME_PM)
+static int sep_runtime_suspend(struct device *dev)
+{
+	int ret;
+	int count = 0;
+	u32 val;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_HIBERNATED);
+	if (ret) {
+		pr_err("%s failed! ret = %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*poll for chaabi_powerdown_en bit in SECURITY_CFG*/
+	while (count < SEP_TIMEOUT) {
+		val = readl(security_cfg_reg);
+		if (val & PWR_DWN_ENB_MASK)
+			break;
+		usleep_range(40, 60);
+		count++;
+	}
+	if (count >= SEP_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"SEP: timed out waiting for chaabi_powerdown_en\n");
+		WARN_ON(1);
+		/*Let's continue to suspend as chaabi is not stable*/
+	}
+
+	disable_irq(pdev->irq);
+	drvdata->sep_suspended = 1;
+
+	return ret;
+}
+
+static int sep_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	drvdata->sep_suspended = 0;
+	enable_irq(pdev->irq);
+	ret = dx_sep_power_state_set(DX_SEP_POWER_ACTIVE);
+	WARN(ret, "%s failed! ret = %d\n", __func__, ret);
+
+	/*
+	 * sep device might return to ACTIVE in time.
+	 * As sep device is not stable, we choose return 0
+	 * in case it blocks s3.
+	 */
+	return 0;
+}
+
+static int sep_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+	int ret = 0;
+	int count = 0;
+	u32 val;
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_HIBERNATED);
+	if (ret) {
+		pr_err("%s failed! ret = %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*poll for chaabi_powerdown_en bit in SECURITY_CFG*/
+	while (count < SEP_TIMEOUT) {
+		val = readl(security_cfg_reg);
+		if (val & PWR_DWN_ENB_MASK)
+			break;
+		usleep_range(40, 60);
+		count++;
+	}
+	if (count >= SEP_TIMEOUT) {
+		dev_err(dev,
+			"SEP: timed out waiting for chaabi_powerdown_en\n");
+		WARN_ON(1);
+		/*Let's continue to suspend as chaabi is not stable*/
+	}
+
+	disable_irq(pdev->irq);
+	drvdata->sep_suspended = 1;
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return ret;
+}
+
+static int sep_resume(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "SEP: pci_enable_device failed\n");
+		return ret;
+	}
+
+	drvdata->sep_suspended = 0;
+	enable_irq(pdev->irq);
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_ACTIVE);
+	WARN(ret, "%s failed! ret = %d\n", __func__, ret);
+
+	/*
+	 * sep device might return to ACTIVE in time.
+	 * As sep device is not stable, we choose return 0
+	 * in case it blocks s3.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops sep_pm_ops = {
+	.runtime_suspend = sep_runtime_suspend,
+	.runtime_resume = sep_runtime_resume,
+	.suspend = sep_suspend,
+	.resume = sep_resume,
+};
+#endif /* CONFIG_PM_RUNTIME && SEP_RUNTIME_PM */
+
+/* Field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_RUNTIME_PM)
+	.driver = {
+		.pm = &sep_pm_ops,
+	},
+#endif /* CONFIG_PM_RUNTIME && SEP_RUNTIME_PM */
+	.name = DRIVER_NAME,
+	.id_table = sep_pci_id_tbl,
+	.probe = sep_pci_probe,
+	.remove = sep_pci_remove
+};
+
+static int __init sep_module_init(void)
+{
+	int rc;
+
+	sep_class = class_create(THIS_MODULE, "sep_ctl");
+
+	/* Register PCI device */
+	rc = pci_register_driver(&sep_pci_driver);
+	if (rc) {
+		class_destroy(sep_class);
+		return rc;
+	}
+
+	return 0;		/*success */
+}
+
+static void __exit sep_module_cleanup(void)
+{
+	pci_unregister_driver(&sep_pci_driver);
+	class_destroy(sep_class);
+}
+
+/* Entry points  */
+module_init(sep_module_init);
+module_exit(sep_module_cleanup);
+/* Module description */
+MODULE_DESCRIPTION("Discretix " DRIVER_NAME " Driver");
+MODULE_VERSION("0.7");
+MODULE_AUTHOR("Discretix");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/sep54/dx_driver.h b/drivers/staging/sep54/dx_driver.h
new file mode 100644
index 0000000..154161a
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver.h
@@ -0,0 +1,597 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _DX_DRIVER_H_
+#define _DX_DRIVER_H_
+
+#include <generated/autoconf.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/semaphore.h>
+
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_reg_common.h"
+#include "dx_host.h"
+#include "sep_log.h"
+#include "sep_rpc.h"
+#include "crypto_ctx_mgr.h"
+#include "desc_mgr.h"
+#include "lli_mgr.h"
+#include "dx_driver_abi.h"
+#include "dx_dev_defs.h"
+
+/* Control printf's from SeP via GPR.
+ * Keep this macro defined as long as SeP code uses host_printf
+ * (otherwise, SeP would stall waiting for host to ack characters)
+ */
+#define SEP_PRINTF
+/* Note: If DEBUG macro is undefined, SeP prints would not be printed
+ * but the host driver would still ack the characters.                */
+
+#define MODULE_NAME "sep54"
+
+/* PCI ID's */
+#define MRLD_SEP_PCI_DEVICE_ID 0x1198
+
+#define VER_MAJOR(ver)  ((ver) >> 24)
+#define VER_MINOR(ver)  (((ver) >> 16) & 0xFF)
+#define VER_PATCH(ver)  (((ver) >> 8) & 0xFF)
+#define VER_INTERNAL(ver) ((ver) & 0xFF)
+
+#define SECURITY_CFG_ADDR	0xFF03A01C
+#define PWR_DWN_ENB_MASK	0x20
+#define SEP_TIMEOUT		50000
+#define SEP_POWERON_TIMEOUT     10000
+#define SEP_SLEEP_ENABLE 5
+
+#define SEP_AUTOSUSPEND_DELAY 5000
+
+/* GPR that holds SeP state */
+#define SEP_STATE_GPR_OFFSET SEP_HOST_GPR_REG_OFFSET(DX_SEP_STATE_GPR_IDX)
+/* In case of a change in GPR7 (state) we dump also GPR6 */
+#define SEP_STATUS_GPR_OFFSET SEP_HOST_GPR_REG_OFFSET(DX_SEP_STATUS_GPR_IDX)
+
+/* User memref index access macros */
+#define IS_VALID_MEMREF_IDX(idx) \
+	(((idx) >= 0) && ((idx) < MAX_REG_MEMREF_PER_CLIENT_CTX))
+#define INVALIDATE_MEMREF_IDX(idx) ((idx) = DXDI_MEMREF_ID_NULL)
+
+/* Session context access macros - must be invoked with mutex acquired */
+#define SEP_SESSION_ID_INVALID 0xFFFF
+#define IS_VALID_SESSION_CTX(session_ctx_p) \
+	 (((session_ctx_p)->ref_cnt > 0) && \
+	 ((session_ctx_p)->sep_session_id != SEP_SESSION_ID_INVALID))
+#define INVALIDATE_SESSION_CTX(session_ctx_p) do {              \
+	session_ctx_p->sep_session_id = SEP_SESSION_ID_INVALID; \
+	session_ctx_p->ref_cnt = 0;                             \
+} while (0)
+/* Session index access macros */
+/* Index is considered valid even if the pointed session context is not.
+   One should use IS_VALID_SESSION_CTX to verify the validity of the context. */
+#define IS_VALID_SESSION_IDX(idx) \
+	 (((idx) >= 0) && ((idx) < MAX_SEPAPP_SESSION_PER_CLIENT_CTX))
+#define INVALIDATE_SESSION_IDX(idx) ((idx) = DXDI_SEPAPP_SESSION_INVALID)
+
+/*
+   Size of DMA-coherent scratchpad buffer allocated per client_ctx context
+   Currently, this buffer is used for 3 purposes:
+   1. SeP RPC messages.
+   2. SeP Applets messages.
+   3. AES-CCM A0 (prepend) data.
+*/
+#define USER_SPAD_SIZE SEP_RPC_MAX_MSG_SIZE
+#if (SEP_RPC_MAX_MSG_SIZE >= (1 << SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_SIZE))
+#error SEP_RPC_MAX_MSG_SIZE too large for HMB_SIZE field
+#endif
+
+/* Get the memref ID/index for given dma_obj (struct user_dma_buffer) */
+#define DMA_OBJ_TO_MEMREF_IDX(client_ctx, the_dma_obj)                 \
+	(container_of(the_dma_obj, struct registered_memref, dma_obj) - \
+		(client_ctx)->reg_memrefs)
+
+/* Crypto context IDs masks (to be used with ctxmgr_sep_cache_invalidate) */
+#define CRYPTO_CTX_ID_SINGLE_MASK 0xFFFFFFFFFFFFFFFFULL
+#define CRYPTO_CTX_ID_CLIENT_SHIFT 32
+#define CRYPTO_CTX_ID_CLIENT_MASK (0xFFFFFFFFULL << CRYPTO_CTX_ID_CLIENT_SHIFT)
+
+/* Return 'true' if val is a multiple of given blk_size */
+/* blk_size must be a power of 2 */
+#define IS_MULT_OF(val, blk_size)  ((val & (blk_size - 1)) == 0)
+
+struct sep_drvdata;
+
+/**
+ * struct queue_drvdata - Data for a specific SeP queue
+ * @desc_queue:	The associated descriptor queue object
+ * @desc_queue_sequencer: Mutex to assure sequence of operations associated
+ *			  with desc. queue enqueue
+ * @sep_cache:	SeP context cache management object
+ * @cdev:	Assocated character device
+ * @devt:	Associated device
+ * @sep_data:	Associated SeP driver context
+ */
+struct queue_drvdata {
+	void *desc_queue;
+	struct mutex desc_queue_sequencer;
+	void *sep_cache;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t devt;
+	struct sep_drvdata *sep_data;
+};
+
+/**
+ * struct sep_drvdata - SeP driver private data context
+ * @mem_start:	phys. address of the control registers
+ * @mem_end:	phys. address of the control registers
+ * @mem_size:	Control registers memory range size (mem_end - mem_start)
+ * @cc_base:	virt address of the CC registers
+ * @irq:	device IRQ number
+ * @irq_mask:	Interrupt mask
+ * @rom_ver:	SeP ROM version
+ * @fw_ver:	SeP loaded firmware version
+ * @icache_size_log2:	Icache memory size in power of 2 bytes
+ * @dcache_size_log2:	Dcache memory size in power of 2 bytes
+ * @icache_pages:	Pages allocated for Icache
+ * @dcache_pages:	Pages allocated for Dcache
+ * @sep_backup_buf_size:	Size of host memory allocated for sep context
+ * @sep_backup_buf:		Buffer allocated for sep context backup
+ * @sepinit_params_buf_dma:	DMA address of sepinit_params_buf_p
+ * @spad_buf_pool:	DMA pool for RPC messages or scratch pad use
+ * @mlli_table_size:	bytes as set by sepinit_get_fw_props
+ * @llimgr:	LLI-manager object handle
+ * @num_of_desc_queues:	Actual number of (active) queues
+ * @num_of_sep_cache_entries:	Available SeP/FW cache entries
+ * @devt_base:	Allocated char.dev. major/minor (with alloc_chrdev_region)
+ * @dev:	Device context
+ * @queue:	Array of objects for each SeP SW-queue
+ * @last_ack_cntr:	The last counter value ACK'ed over the SEP_PRINTF GPR
+ * @cur_line_buf_offset:	Offset in line_buf
+ * @line_buf:	A buffer to accumulate SEP_PRINTF characters up to EOL
+ * @delegate:	Timer to initiate GPRs polling for interrupt-less system
+ */
+struct sep_drvdata {
+	resource_size_t mem_start;
+	resource_size_t mem_end;
+	resource_size_t mem_size;
+	void __iomem *cc_base;
+	unsigned int irq;
+	u32 irq_mask;
+	u32 rom_ver;
+	u32 fw_ver;
+#ifdef CACHE_IMAGE_NAME
+	u8 icache_size_log2;
+	u8 dcache_size_log2;
+	struct page *icache_pages;
+	struct page *dcache_pages;
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	unsigned long sep_backup_buf_size;
+	void *sep_backup_buf;
+#endif
+	int sep_suspended;
+	struct dma_pool *spad_buf_pool;
+	unsigned long mlli_table_size;
+	void *llimgr;
+	unsigned int num_of_desc_queues;
+	int num_of_sep_cache_entries;
+	dev_t devt_base;
+	struct device *dev;
+	struct queue_drvdata queue[SEP_MAX_NUM_OF_DESC_Q];
+
+#ifdef SEP_PRINTF
+	int last_ack_cntr;
+	int cur_line_buf_offset;
+#define SEP_PRINTF_LINE_SIZE 100
+	char line_buf[SEP_PRINTF_LINE_SIZE + 1];
+#endif
+
+#ifdef SEP_INTERRUPT_BY_TIMER
+	struct timer_list delegate;
+#endif
+};
+
+/* Enumerate the session operational state */
+enum user_op_state {
+	USER_OP_NOP = 0,	/* No operation is in processing */
+	USER_OP_PENDING,	/* Operation waiting to enter the desc. queue */
+	USER_OP_INPROC,		/* Operation is in process       */
+	USER_OP_COMPLETED	/* Operation completed, waiting for "read" */
+};
+
+/* Enumerate the data operation types */
+enum crypto_data_intent {
+	CRYPTO_DATA_NULL = 0,
+	CRYPTO_DATA_TEXT,	/* plain/cipher text */
+	CRYPTO_DATA_TEXT_FINALIZE,
+	CRYPTO_DATA_ADATA,	/* Additional/Associated data for AEAD */
+	CRYPTO_DATA_MAX = CRYPTO_DATA_ADATA,
+};
+
+/* SeP Applet session data */
+struct sep_app_session {
+	struct mutex session_lock;	/* Protect updates in entry */
+	/* Reference count on session (initialized to 1 on opening) */
+	u16 ref_cnt;
+	u16 sep_session_id;
+};
+
+/**
+ * struct registered_memref - Management information for registered memory
+ * @buf_lock:	Mutex on buffer state changes (ref. count, etc.)
+ * @ref_cnt:	Reference count for protecting freeing while in use.
+ * @dma_obj:	The client DMA object container for the registered mem.
+ */
+struct registered_memref {
+	struct mutex buf_lock;
+	unsigned int ref_cnt;
+	struct client_dma_buffer dma_obj;
+};
+
+struct async_ctx_info {
+	struct dxdi_sepapp_params *dxdi_params;
+	struct dxdi_sepapp_kparams *dxdi_kparams;
+	struct sepapp_client_params *sw_desc_params;
+	struct client_dma_buffer *local_dma_objs[SEPAPP_MAX_PARAMS];
+	struct mlli_tables_list mlli_tables[SEPAPP_MAX_PARAMS];
+	int session_id;
+};
+
+/*
+ * struct sep_client_ctx - SeP client application context allocated per each
+ *                         open()
+ * @drv_data:	Associated queue driver context
+ * @qid:	Priority queue ID
+ * @uid_cntr:	Persistent unique ID counter to be used for crypto context UIDs
+ *		allocation
+ * @user_memrefs:	Registered user DMA memory buffers
+ * @sepapp_sessions:	SeP Applet client sessions
+ */
+struct sep_client_ctx {
+	struct queue_drvdata *drv_data;
+	unsigned int qid;
+	atomic_t uid_cntr;
+	struct registered_memref reg_memrefs[MAX_REG_MEMREF_PER_CLIENT_CTX];
+	struct sep_app_session
+	    sepapp_sessions[MAX_SEPAPP_SESSION_PER_CLIENT_CTX];
+
+	wait_queue_head_t memref_wq;
+	int memref_cnt;
+	struct mutex memref_lock;
+};
+
+/**
+ * sep_op_type - Flags to describe dispatched type of sep_op_ctx
+ * The flags may be combined when applicable (primarily for CRYPTO_OP desc.).
+ * Because operations maybe asynchronous, we cannot set operation type in the
+ * crypto context.
+ */
+enum sep_op_type {
+	SEP_OP_NULL = 0,
+	SEP_OP_CRYPTO_INIT = 1,	/* CRYPTO_OP::Init. */
+	SEP_OP_CRYPTO_PROC = (1 << 1),	/* CRYPTO_OP::Process */
+	SEP_OP_CRYPTO_FINI = (1 << 2),	/* CRYPTO_OP::Finalize (integrated) */
+	SEP_OP_RPC = (1 << 3),	/* RPC_MSG */
+	SEP_OP_APP = (1 << 4),	/* APP_REQ */
+	SEP_OP_SLEEP = (1 << 7)	/* SLEEP_REQ */
+};
+
+/*
+ * struct sep_op_ctx - A SeP operation context.
+ * @client_ctx:	The client context associated with this operation
+ * @session_ctx:	For SEP_SW_DESC_TYPE_APP_REQ we need the session context
+ *			(otherwise NULL)
+ * @op_state:	Operation progress state indicator
+ * @ioctl_op_compl:	Operation completion signaling object for IOCTLs
+ *			(updated by desc_mgr on completion)
+ * @comp_work:	Async. completion work. NULL for IOCTLs.
+ * @op_ret_code: The operation return code from SeP (valid on desc. completion)
+ * @internal_error:	Mark that return code (error) is from the SeP FW
+ *			infrastructure and not from the requested operation.
+ *			Currently, this is used only for Applet Manager errors.
+ * @ctx_info:	Current Context. If there are more than one context (such as
+ *		in combined alg.) use (&ctx_info)[] into _ctx_info[].
+ * @_ctx_info:	Extension of ctx_info for additional contexts associated with
+ *		current operation (combined op.)
+ * @ctx_info_num:	number of active ctx_info in (&ctx_info)[]
+ * @pending_descs_cntr:	Pending SW descriptor associated with this operation.
+ *			(Number of descriptor completions required to complete
+ *			this operation)
+ * @backlog_descs_cntr:	Descriptors of this operation enqueued in the backlog q.
+ * @ift:	Input data MLLI table object
+ * @oft:	Output data MLLI table object
+ * @ift_dma_obj:	Temporary user memory registrations for IFT
+ * @oft_dma_obj:	Temporary user memory registrations for OFT
+ * @spad_buf_p:	Scratchpad DMA buffer for different temp. buffers required
+ *		during a specific operation: (allocated from rpc_msg_pool)
+ *		- SeP RPC message buffer         or
+ *		- AES-CCM A0 scratchpad buffers  or
+ *		- Next IV for AES-CBC, AES-CTR, DES-CBC
+ * @spad_buf_dma_addr:	DMA address of spad_buf_p
+ * @next_hash_blk_tail_size:	The tail of data_in which is a remainder of a
+ *				block size. We use this info to copy the
+ *				remainder data to the context after block
+ *				processing completion (saved from
+ *				prepare_data_for_sep).
+ *
+ * Retains the operation status and associated resources while operation in
+ * progress. This object provides threads concurrency support since each thread
+ * may work on different instance of this object, within the scope of the same
+ * client (process) context.
+*/
+struct sep_op_ctx {
+	struct sep_client_ctx *client_ctx;
+	struct sep_app_session *session_ctx;
+	enum sep_op_type op_type;
+	enum user_op_state op_state;
+	struct completion ioctl_op_compl;
+	struct work_struct *comp_work;
+	u32 error_info;
+	bool internal_error;
+	struct client_crypto_ctx_info ctx_info;
+	struct client_crypto_ctx_info _ctx_info[DXDI_COMBINED_NODES_MAX - 1];
+	u8 ctx_info_num;
+	u8 pending_descs_cntr;
+	u8 backlog_descs_cntr;
+	/* Client memory resources for (sym.) crypto-ops */
+	struct mlli_tables_list ift;
+	struct mlli_tables_list oft;
+	struct client_dma_buffer din_dma_obj;
+	struct client_dma_buffer dout_dma_obj;
+	void *spad_buf_p;
+	dma_addr_t spad_buf_dma_addr;
+
+	struct async_ctx_info async_info;
+};
+
+/***************************/
+/* SeP registers access    */
+/***************************/
+/* "Raw" read version to be used in IS_CC_BUS_OPEN and in READ_REGISTER */
+#define _READ_REGISTER(_addr) __raw_readl(        \
+	(const volatile void __iomem *)(_addr))
+/* The device register space is considered accessible when expected
+   device signature value is read from HOST_CC_SIGNATURE register   */
+#define IS_CC_BUS_OPEN(drvdata)                                           \
+	(_READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,                  \
+		CRY_KERNEL, HOST_CC_SIGNATURE)) == DX_DEV_SIGNATURE)
+/*FIXME: Temporary w/a to HW problem with registers reading - double-read */
+#define READ_REGISTER(_addr) \
+	({(void)_READ_REGISTER(_addr); _READ_REGISTER(_addr); })
+#define WRITE_REGISTER(_addr, _data)  __raw_writel(_data, \
+	(volatile void __iomem *)(_addr))
+#define GET_SEP_STATE(drvdata)                                           \
+	(IS_CC_BUS_OPEN(drvdata) ?                                       \
+		READ_REGISTER(drvdata->cc_base + SEP_STATE_GPR_OFFSET) : \
+		DX_SEP_STATE_OFF)
+
+#ifdef DEBUG
+void dump_byte_array(const char *name, const u8 *the_array,
+		     unsigned long size);
+void dump_word_array(const char *name, const u32 *the_array,
+		     unsigned long size_in_words);
+#else
+#define dump_byte_array(name, the_array, size) do {} while (0)
+#define dump_word_array(name, the_array, size_in_words) do {} while (0)
+#endif
+
+
+/**
+ * alloc_crypto_ctx_id() - Allocate unique ID for crypto context
+ * @client_ctx:	 The client context object
+ *
+ */
+static inline struct crypto_ctx_uid alloc_crypto_ctx_id(
+	struct sep_client_ctx *client_ctx)
+{
+	struct crypto_ctx_uid uid;
+
+	/* Assuming 32 bit atomic counter is large enough to never wrap
+	 * during a lifetime of a process...
+	 * Someone would laugh (or cry) on this one day */
+#ifdef DEBUG
+	if (atomic_read(&client_ctx->uid_cntr) == 0xFFFFFFFF) {
+		pr_err("uid_cntr overflow for client_ctx=%p\n",
+			    client_ctx);
+		BUG();
+	}
+#endif
+
+	uid.addr = (uintptr_t)client_ctx;
+	uid.cntr = (u32)atomic_inc_return(&client_ctx->uid_cntr);
+
+	return uid;
+}
+
+/**
+ * op_ctx_init() - Initialize an operation context
+ * @op_ctx:	 The allocated struct sep_op_ctx (may be on caller's stack)
+ * @client_ctx:	 The "parent" client context
+ *
+ */
+static inline void op_ctx_init(struct sep_op_ctx *op_ctx,
+			       struct sep_client_ctx *client_ctx)
+{
+	int i;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+
+	pr_debug("op_ctx=%p\n", op_ctx);
+	memset(op_ctx, 0, sizeof(struct sep_op_ctx));
+	op_ctx->client_ctx = client_ctx;
+	op_ctx->ctx_info_num = 1;	/*assume a signle context operation */
+	op_ctx->pending_descs_cntr = 1;	/* assume a single desc. transcation */
+	init_completion(&(op_ctx->ioctl_op_compl));
+	MLLI_TABLES_LIST_INIT(&(op_ctx->ift));
+	MLLI_TABLES_LIST_INIT(&(op_ctx->oft));
+	for (i = 0; i < DXDI_COMBINED_NODES_MAX; i++, ctx_info_p++)
+		USER_CTX_INFO_INIT(ctx_info_p);
+}
+
+/**
+ * op_ctx_fini() - Finalize op_ctx (free associated resources before freeing
+ *		memory)
+ * @op_ctx:	The op_ctx initialized with op_ctx_init
+ *
+ * Returns void
+ */
+static inline void op_ctx_fini(struct sep_op_ctx *op_ctx)
+{
+	pr_debug("op_ctx=%p\n", op_ctx);
+	if (op_ctx->spad_buf_p != NULL)
+		dma_pool_free(op_ctx->client_ctx->drv_data->sep_data->
+			      spad_buf_pool, op_ctx->spad_buf_p,
+			      op_ctx->spad_buf_dma_addr);
+
+	delete_context((uintptr_t)op_ctx);
+	memset(op_ctx, 0, sizeof(struct sep_op_ctx));
+}
+
+/**
+ * init_client_ctx() - Initialize a client context object given
+ * @drvdata:	Queue driver context
+ * @client_ctx:
+ *
+ * Returns void
+ */
+void init_client_ctx(struct queue_drvdata *drvdata,
+		     struct sep_client_ctx *client_ctx);
+
+void cleanup_client_ctx(struct queue_drvdata *drvdata,
+			struct sep_client_ctx *client_ctx);
+
+/**
+ * register_client_memref() - Register given client memory buffer reference
+ * @client_ctx:		User context data
+ * @user_buf_ptr:	Buffer address in user space. NULL if sgl!=NULL.
+ * @sgl:		Scatter/gather list (for kernel buffers)
+ *			NULL if user_buf_ptr!=NULL.
+ * @buf_size:		Buffer size in bytes
+ * @dma_direction:	DMA direction
+ *
+ * Returns int >= is the registered memory reference ID, <0 for error
+ */
+int register_client_memref(struct sep_client_ctx *client_ctx,
+			   u8 __user *user_buf_ptr,
+			   struct scatterlist *sgl,
+			   const unsigned long buf_size,
+			   const enum dma_data_direction dma_direction);
+
+/**
+ * free_client_memref() - Free resources associated with a client mem. reference
+ * @client_ctx:	 User context data
+ * @memref_idx:	 Index of the user memory reference
+ *
+ * Free resources associated with a user memory reference
+ * (The referenced memory may be locked user pages or allocated DMA-coherent
+ *  memory mmap'ed to the user space)
+ * Returns int !0 on failure (memref still in use or unknown)
+ */
+int free_client_memref(struct sep_client_ctx *client_ctx,
+		       int memref_idx);
+
+/**
+ * acquire_dma_obj() - Get the memref object of given memref_idx and increment
+ *			reference count of it
+ * @client_ctx:	Associated client context
+ * @memref_idx:	Required registered memory reference ID (index)
+ *
+ * Get the memref object of given memref_idx and increment reference count of it
+ * The returned object must be released by invoking release_dma_obj() before
+ * the object (memref) may be freed.
+ * Returns struct user_dma_buffer The memref object or NULL for invalid
+ */
+struct client_dma_buffer *acquire_dma_obj(struct sep_client_ctx *client_ctx,
+					  int memref_idx);
+
+/**
+ * release_dma_obj() - Release memref object taken with get_memref_obj
+ *			(Does not free!)
+ * @client_ctx:	Associated client context
+ * @dma_obj:	The DMA object returned from acquire_dma_obj()
+ *
+ * Returns void
+ */
+void release_dma_obj(struct sep_client_ctx *client_ctx,
+		     struct client_dma_buffer *dma_obj);
+
+/**
+ * dxdi_data_dir_to_dma_data_dir() - Convert from DxDI DMA direction type to
+ *					Linux kernel DMA direction type
+ * @dxdi_dir:	 DMA direction in DxDI encoding
+ *
+ * Returns enum dma_data_direction
+ */
+enum dma_data_direction dxdi_data_dir_to_dma_data_dir(enum dxdi_data_direction
+						      dxdi_dir);
+
+/**
+ * wait_for_sep_op_result() - Wait for outstanding SeP operation to complete and
+ *				fetch SeP ret-code
+ * @op_ctx:
+ *
+ * Wait for outstanding SeP operation to complete and fetch SeP ret-code
+ * into op_ctx->sep_ret_code
+ * Returns int
+ */
+int wait_for_sep_op_result(struct sep_op_ctx *op_ctx);
+
+/**
+ * crypto_op_completion_cleanup() - Cleanup CRYPTO_OP descriptor operation
+ *					resources after completion
+ * @op_ctx:
+ *
+ * Returns int
+ */
+int crypto_op_completion_cleanup(struct sep_op_ctx *op_ctx);
+
+
+/*!
+ * IOCTL entry point
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ *
+ * \return int
+ * \retval 0 Operation succeeded (but SeP return code may indicate an error)
+ * \retval -ENOTTY  : Unknown IOCTL command
+ * \retval -ENOSYS  : Unsupported/not-implemented (known) operation
+ * \retval -EINVAL  : Invalid parameters
+ * \retval -EFAULT  : Bad pointers for given user memory space
+ * \retval -EPERM   : Not enough permissions for given command
+ * \retval -ENOMEM,-EAGAIN: when not enough resources available for given op.
+ * \retval -EIO     : SeP HW error or another internal error
+ *                    (probably operation timed out or unexpected behavior)
+ */
+long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif				/* _DX_DRIVER_H_ */
diff --git a/drivers/staging/sep54/dx_driver_abi.h b/drivers/staging/sep54/dx_driver_abi.h
new file mode 100644
index 0000000..4c86eab
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver_abi.h
@@ -0,0 +1,657 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_DRIVER_ABI_H__
+#define __SEP_DRIVER_ABI_H__
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#ifndef INT32_MAX
+#define INT32_MAX 0x7FFFFFFFL
+#endif
+#else
+/* For inclusion in user space library */
+#include <stdint.h>
+#endif
+
+#include <linux/ioctl.h>
+#include <linux/errno.h>
+#include "sep_rpc.h"
+
+/* Proprietary error code for unexpected internal errors */
+#define EBUG 999
+
+/****************************/
+/**** IOCTL return codes ****/
+/*****************************************************************************
+ ENOTTY  : Unknown IOCTL command					     *
+ ENOSYS  : Unsupported/not-implemented (known) operation		     *
+ EINVAL  : Invalid parameters                                                *
+ EFAULT  : Bad pointers for given user memory space                          *
+ EPERM   : Not enough permissions for given command                          *
+ ENOMEM,EAGAIN: when not enough resources available for given op.            *
+ EIO     : SeP HW error or another internal error (probably operation timed  *
+	   out or unexpected behavior)                                       *
+ EBUG    : Driver bug found ("assertion") - see system log                   *
+*****************************************************************************/
+
+/****** IOCTL commands ********/
+/* The magic number appears free in Documentation/ioctl/ioctl-number.txt */
+#define DXDI_IOC_MAGIC 0xD1
+
+/* IOCTL ordinal numbers */
+/*(for backward compatibility, add new ones only at end of list!) */
+enum dxdi_ioc_nr {
+	/* Version info. commands */
+	DXDI_IOC_NR_GET_VER_MAJOR = 0,
+	DXDI_IOC_NR_GET_VER_MINOR = 1,
+	/* Context size queries */
+	DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE = 2,
+	DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE = 3,
+	DXDI_IOC_NR_GET_MAC_CTX_SIZE = 4,
+	DXDI_IOC_NR_GET_HASH_CTX_SIZE = 5,
+	/* Init context commands */
+	DXDI_IOC_NR_SYMCIPHER_INIT = 7,
+	DXDI_IOC_NR_AUTH_ENC_INIT = 8,
+	DXDI_IOC_NR_MAC_INIT = 9,
+	DXDI_IOC_NR_HASH_INIT = 10,
+	/* Processing commands */
+	DXDI_IOC_NR_PROC_DBLK = 12,
+	DXDI_IOC_NR_FIN_PROC = 13,
+	/* "Integrated" processing operations */
+	DXDI_IOC_NR_SYMCIPHER_PROC = 14,
+	DXDI_IOC_NR_AUTH_ENC_PROC = 15,
+	DXDI_IOC_NR_MAC_PROC = 16,
+	DXDI_IOC_NR_HASH_PROC = 17,
+	/* SeP RPC */
+	DXDI_IOC_NR_SEP_RPC = 19,
+	/* Memory registration */
+	DXDI_IOC_NR_REGISTER_MEM4DMA = 20,
+	DXDI_IOC_NR_ALLOC_MEM4DMA = 21,
+	DXDI_IOC_NR_FREE_MEM4DMA = 22,
+	/* SeP Applets API */
+	DXDI_IOC_NR_SEPAPP_SESSION_OPEN = 23,
+	DXDI_IOC_NR_SEPAPP_SESSION_CLOSE = 24,
+	DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE = 25,
+	/* Combined mode */
+	DXDI_IOC_NR_COMBINED_INIT = 26,
+	DXDI_IOC_NR_COMBINED_PROC_DBLK = 27,
+	DXDI_IOC_NR_COMBINED_PROC_FIN = 28,
+	DXDI_IOC_NR_COMBINED_PROC = 29,
+
+	/* AES IV set/get API */
+	DXDI_IOC_NR_SET_IV = 30,
+	DXDI_IOC_NR_GET_IV = 31,
+	DXDI_IOC_NR_MAX = DXDI_IOC_NR_GET_IV
+};
+
+/* In case error is not DXDI_RET_ESEP these are the
+*  errors embedded in the error info field "error_info" */
+enum dxdi_error_info {
+	DXDI_ERROR_NULL = 0,
+	DXDI_ERROR_BAD_CTX = 1,
+	DXDI_ERROR_UNSUP = 2,
+	DXDI_ERROR_INVAL_MODE = 3,
+	DXDI_ERROR_INVAL_DIRECTION = 4,
+	DXDI_ERROR_INVAL_KEY_SIZE = 5,
+	DXDI_ERROR_INVAL_NONCE_SIZE = 6,
+	DXDI_ERROR_INVAL_TAG_SIZE = 7,
+	DXDI_ERROR_INVAL_DIN_PTR = 8,
+	DXDI_ERROR_INVAL_DOUT_PTR = 9,
+	DXDI_ERROR_INVAL_DATA_SIZE = 10,
+	DXDI_ERROR_DIN_DOUT_OVERLAP = 11,
+	DXDI_ERROR_INTERNAL = 12,
+	DXDI_ERROR_NO_RESOURCE = 13,
+	DXDI_ERROR_FATAL = 14,
+	DXDI_ERROR_INFO_RESERVE32B = INT32_MAX
+};
+
+/* ABI Version info. */
+#define DXDI_VER_MAJOR 1
+#define DXDI_VER_MINOR DXDI_IOC_NR_MAX
+
+/******************************/
+/* IOCTL commands definitions */
+/******************************/
+/* Version info. commands */
+#define DXDI_IOC_GET_VER_MAJOR _IOR(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_VER_MAJOR, u32)
+#define DXDI_IOC_GET_VER_MINOR _IOR(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_VER_MINOR, u32)
+/* Context size queries */
+#define DXDI_IOC_GET_SYMCIPHER_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE,\
+		struct dxdi_get_sym_cipher_ctx_size_params)
+#define DXDI_IOC_GET_AUTH_ENC_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE,\
+		struct dxdi_get_auth_enc_ctx_size_params)
+#define DXDI_IOC_GET_MAC_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_MAC_CTX_SIZE,\
+		struct dxdi_get_mac_ctx_size_params)
+#define DXDI_IOC_GET_HASH_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_HASH_CTX_SIZE,\
+		struct dxdi_get_hash_ctx_size_params)
+/* Init. Sym. Crypto. */
+#define DXDI_IOC_SYMCIPHER_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SYMCIPHER_INIT, struct dxdi_sym_cipher_init_params)
+#define DXDI_IOC_AUTH_ENC_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_AUTH_ENC_INIT, struct dxdi_auth_enc_init_params)
+#define DXDI_IOC_MAC_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_MAC_INIT, struct dxdi_mac_init_params)
+#define DXDI_IOC_HASH_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_HASH_INIT, struct dxdi_hash_init_params)
+
+/* Sym. Crypto. Processing commands */
+#define DXDI_IOC_PROC_DBLK _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_PROC_DBLK, struct dxdi_process_dblk_params)
+#define DXDI_IOC_FIN_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_FIN_PROC, struct dxdi_fin_process_params)
+
+/* Integrated Sym. Crypto. */
+#define DXDI_IOC_SYMCIPHER_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SYMCIPHER_PROC, struct dxdi_sym_cipher_proc_params)
+#define DXDI_IOC_AUTH_ENC_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_AUTH_ENC_PROC, struct dxdi_auth_enc_proc_params)
+#define DXDI_IOC_MAC_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_MAC_PROC, struct dxdi_mac_proc_params)
+#define DXDI_IOC_HASH_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_HASH_PROC, struct dxdi_hash_proc_params)
+
+/* AES Initial Vector set/get */
+#define DXDI_IOC_SET_IV _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SET_IV, struct dxdi_aes_iv_params)
+#define DXDI_IOC_GET_IV _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_IV, struct dxdi_aes_iv_params)
+
+/* Combined mode  */
+#define DXDI_IOC_COMBINED_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_INIT,\
+		struct dxdi_combined_init_params)
+#define DXDI_IOC_COMBINED_PROC_DBLK _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC_DBLK,\
+		struct dxdi_combined_proc_dblk_params)
+#define DXDI_IOC_COMBINED_PROC_FIN _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC_FIN,\
+		struct dxdi_combined_proc_params)
+#define DXDI_IOC_COMBINED_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC,\
+		struct dxdi_combined_proc_params)
+
+/* SeP RPC */
+#define DXDI_IOC_SEP_RPC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEP_RPC, struct dxdi_sep_rpc_params)
+/* Memory registration */
+#define DXDI_IOC_REGISTER_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_REGISTER_MEM4DMA, \
+		struct dxdi_register_mem4dma_params)
+#define DXDI_IOC_ALLOC_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_ALLOC_MEM4DMA, \
+		struct dxdi_alloc_mem4dma_params)
+#define DXDI_IOC_FREE_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_FREE_MEM4DMA, \
+		struct dxdi_free_mem4dma_params)
+/* SeP Applets API */
+#define DXDI_IOC_SEPAPP_SESSION_OPEN _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_SESSION_OPEN, \
+		struct dxdi_sepapp_session_open_params)
+#define DXDI_IOC_SEPAPP_SESSION_CLOSE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_SESSION_CLOSE, \
+		struct dxdi_sepapp_session_close_params)
+#define DXDI_IOC_SEPAPP_COMMAND_INVOKE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE, \
+		struct dxdi_sepapp_command_invoke_params)
+
+/*** ABI constants ***/
+/* Max. symmetric crypto key size (512b) */
+#define DXDI_SYM_KEY_SIZE_MAX 64	/*octets */
+/* Max. MAC key size (applicable to HMAC-SHA512) */
+#define DXDI_MAC_KEY_SIZE_MAX 128	/*octets */
+/* AES IV/Counter size (128b) */
+#define DXDI_AES_BLOCK_SIZE 16	/*octets */
+/* DES IV size (64b) */
+#define DXDI_DES_BLOCK_SIZE 8	/*octets */
+/* Max. Nonce size */
+#define DXDI_NONCE_SIZE_MAX 16	/*octets */
+/* Max. digest size */
+#define DXDI_DIGEST_SIZE_MAX 64	/*octets */
+/* Max. nodes */
+#define DXDI_COMBINED_NODES_MAX 4
+#define DXDI_AES_IV_SIZE DXDI_AES_BLOCK_SIZE
+
+/*** ABI data types ***/
+
+enum dxdi_cipher_direction {
+	DXDI_CDIR_ENC = 0,
+	DXDI_CDIR_DEC = 1,
+	DXDI_CDIR_MAX = DXDI_CDIR_DEC,
+	DXDI_CDIR_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_sym_cipher_type {
+	DXDI_SYMCIPHER_NONE = 0,
+	_DXDI_SYMCIPHER_AES_FIRST = 1,
+	DXDI_SYMCIPHER_AES_XXX = _DXDI_SYMCIPHER_AES_FIRST,
+	DXDI_SYMCIPHER_AES_ECB = _DXDI_SYMCIPHER_AES_FIRST + 1,
+	DXDI_SYMCIPHER_AES_CBC = _DXDI_SYMCIPHER_AES_FIRST + 2,
+	DXDI_SYMCIPHER_AES_CTR = _DXDI_SYMCIPHER_AES_FIRST + 3,
+	DXDI_SYMCIPHER_AES_XTS = _DXDI_SYMCIPHER_AES_FIRST + 4,
+	_DXDI_SYMCIPHER_AES_LAST = DXDI_SYMCIPHER_AES_XTS,
+	_DXDI_SYMCIPHER_DES_FIRST = 0x11,
+	DXDI_SYMCIPHER_DES_ECB = _DXDI_SYMCIPHER_DES_FIRST,
+	DXDI_SYMCIPHER_DES_CBC = _DXDI_SYMCIPHER_DES_FIRST + 1,
+	_DXDI_SYMCIPHER_DES_LAST = DXDI_SYMCIPHER_DES_CBC,
+	_DXDI_SYMCIPHER_C2_FIRST = 0x21,
+	DXDI_SYMCIPHER_C2_ECB = _DXDI_SYMCIPHER_C2_FIRST,
+	DXDI_SYMCIPHER_C2_CBC = _DXDI_SYMCIPHER_C2_FIRST + 1,
+	_DXDI_SYMCIPHER_C2_LAST = DXDI_SYMCIPHER_C2_CBC,
+	DXDI_SYMCIPHER_RC4 = 0x31,	/* Supported in message API only */
+	DXDI_SYMCIPHER_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_auth_enc_type {
+	DXDI_AUTHENC_NONE = 0,
+	DXDI_AUTHENC_AES_CCM = 1,
+	DXDI_AUTHENC_AES_GCM = 2,
+	DXDI_AUTHENC_MAX = DXDI_AUTHENC_AES_GCM,
+	DXDI_AUTHENC_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_mac_type {
+	DXDI_MAC_NONE = 0,
+	DXDI_MAC_HMAC = 1,
+	DXDI_MAC_AES_MAC = 2,
+	DXDI_MAC_AES_CMAC = 3,
+	DXDI_MAC_AES_XCBC_MAC = 4,
+	DXDI_MAC_MAX = DXDI_MAC_AES_XCBC_MAC,
+	DXDI_MAC_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_hash_type {
+	DXDI_HASH_NONE = 0,
+	DXDI_HASH_MD5 = 1,
+	DXDI_HASH_SHA1 = 2,
+	DXDI_HASH_SHA224 = 3,
+	DXDI_HASH_SHA256 = 4,
+	DXDI_HASH_SHA384 = 5,
+	DXDI_HASH_SHA512 = 6,
+	DXDI_HASH_MAX = DXDI_HASH_SHA512,
+	DXDI_HASH_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_data_block_type {
+	DXDI_DATA_TYPE_NULL = 0,
+	DXDI_DATA_TYPE_TEXT = 1,/* Plain/cipher text */
+	DXDI_DATA_TYPE_ADATA = 2,/* Additional/Associated data for AEAD */
+	DXDI_DATA_TYPE_MAX = DXDI_DATA_TYPE_ADATA,
+	DXDI_DATA_TYPE_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_input_engine_type {
+	DXDI_INPUT_NULL = 0,	/* no input */
+	DXDI_INPUT_ENGINE_1 = 1,
+	DXID_INPUT_ENGINE_2 = 2,
+	DXDI_INPUT_DIN = 15,	/* input from DIN */
+	DXDI_INPUT_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+#pragma pack(push)
+#pragma pack(4) /* Force to 32 bit alignment */
+/* Properties of specific ciphers */
+/* (for use in alg_specific union of dxdi_cipher_props) */
+struct dxdi_des_cbc_props {
+	u8 iv[DXDI_DES_BLOCK_SIZE];
+};
+struct dxdi_aes_cbc_props {
+	u8 iv[DXDI_AES_BLOCK_SIZE];
+};
+struct dxdi_aes_ctr_props {
+	u8 cntr[DXDI_AES_BLOCK_SIZE];
+};
+struct dxdi_aes_xts_props {
+	u8 init_tweak[DXDI_AES_BLOCK_SIZE];
+	u32 data_unit_size;
+};
+struct dxdi_c2_cbc_props {
+	u32 reset_interval;
+};
+
+struct dxdi_sym_cipher_props {
+	enum dxdi_sym_cipher_type cipher_type;
+	enum dxdi_cipher_direction direction;
+	u8 key_size;	/* In octets */
+	u8 key[DXDI_SYM_KEY_SIZE_MAX];
+	union {			/* cipher specific properties */
+		struct dxdi_des_cbc_props des_cbc;
+		struct dxdi_aes_cbc_props aes_cbc;
+		struct dxdi_aes_ctr_props aes_ctr;
+		struct dxdi_aes_xts_props aes_xts;
+		struct dxdi_c2_cbc_props c2_cbc;
+		u32 __assure_32b_union_alignment;
+		/* Reserve space for future extension? */
+	} alg_specific;
+};
+
+struct dxdi_auth_enc_props {
+	enum dxdi_auth_enc_type ae_type;
+	enum dxdi_cipher_direction direction;
+	u32 adata_size;	/* In octets */
+	u32 text_size;	/* In octets */
+	u8 key_size;	/* In octets */
+	u8 nonce_size;	/* In octets */
+	u8 tag_size;	/* In octets */
+	u8 key[DXDI_SYM_KEY_SIZE_MAX];
+	u8 nonce[DXDI_NONCE_SIZE_MAX];
+};
+
+/* Properties specific for HMAC */
+/* (for use in properties union of dxdi_mac_props) */
+struct dxdi_hmac_props {
+	enum dxdi_hash_type hash_type;
+};
+
+struct dxdi_aes_mac_props {
+	u8 iv[DXDI_AES_BLOCK_SIZE];
+};
+
+struct dxdi_mac_props {
+	enum dxdi_mac_type mac_type;
+	u32 key_size;	/* In octets */
+	u8 key[DXDI_MAC_KEY_SIZE_MAX];
+	union {			/* Union of algorithm specific properties */
+		struct dxdi_hmac_props hmac;
+		struct dxdi_aes_mac_props aes_mac;
+		u32 __assure_32b_union_alignment;
+		/* Reserve space for future extension? */
+	} alg_specific;
+};
+
+/* Combined mode props */
+struct dxdi_combined_node_props {
+	u32 *context;
+	enum dxdi_input_engine_type eng_input;
+};
+
+struct dxdi_combined_props {
+	struct dxdi_combined_node_props node_props[DXDI_COMBINED_NODES_MAX];
+};
+
+/*** IOCTL commands parameters structures ***/
+
+struct dxdi_get_sym_cipher_ctx_size_params {
+	enum dxdi_sym_cipher_type sym_cipher_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_auth_enc_ctx_size_params {
+	enum dxdi_auth_enc_type ae_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_mac_ctx_size_params {
+	enum dxdi_mac_type mac_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_hash_ctx_size_params {
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+/* Init params */
+struct dxdi_sym_cipher_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_auth_enc_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_auth_enc_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_mac_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_mac_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_hash_init_params {
+	u32 *context_buf;	/*[in] */
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+/* Processing params */
+struct dxdi_process_dblk_params {
+	u32 *context_buf;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	enum dxdi_data_block_type data_block_type;	/*[in] */
+	u32 data_in_size;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_fin_process_params {
+	u32 *context_buf;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 digest_or_mac[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 digest_or_mac_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_sym_cipher_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_auth_enc_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_auth_enc_props props;	/*[in] */
+	u8 *adata;		/*[in] */
+	u8 *text_data;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u8 tag[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_mac_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_mac_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 mac[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 mac_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_hash_proc_params {
+	u32 *context_buf;	/*[in] */
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 digest[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 digest_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_aes_iv_params {
+	u32 *context_buf;	/*[in] */
+	u8 iv_ptr[DXDI_AES_IV_SIZE];	/*[in]/[out] */
+};
+
+/* Combined params */
+struct dxdi_combined_init_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_combined_proc_dblk_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[out] */
+	u32 data_in_size;	/*[in] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+/* the structure used in finalize and integrated processing */
+struct dxdi_combined_proc_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[out] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 auth_data[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 auth_data_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+/**************************************/
+/* Memory references and registration */
+/**************************************/
+
+enum dxdi_data_direction {
+	DXDI_DATA_NULL = 0,
+	DXDI_DATA_TO_DEVICE = 1,
+	DXDI_DATA_FROM_DEVICE = (1 << 1),
+	DXDI_DATA_BIDIR = (DXDI_DATA_TO_DEVICE | DXDI_DATA_FROM_DEVICE)
+};
+
+/* Reference to pre-registered memory */
+#define DXDI_MEMREF_ID_NULL -1
+
+struct dxdi_memref {
+	enum dxdi_data_direction dma_direction;
+	/* Memory reference ID - DXDI_MEMREF_ID_NULL if not registered */
+	int ref_id;
+	/* Start address of a non-registered memory or offset within a
+	 * registered memory */
+	u32 start_or_offset;
+	/* Size in bytes of non-registered buffer or size of chunk within a
+	 * registered buffer */
+	u32 size;
+};
+
+struct dxdi_register_mem4dma_params {
+	struct dxdi_memref memref;	/*[in] */
+	int memref_id;	/*[out] */
+};
+
+struct dxdi_alloc_mem4dma_params {
+	u32 size;	/*[in] */
+	int memref_id;	/*[out] */
+};
+
+struct dxdi_free_mem4dma_params {
+	int memref_id;	/*[in] */
+};
+
+/***********/
+/* SeP-RPC */
+/***********/
+struct dxdi_sep_rpc_params {
+	u16 agent_id;	/*[in] */
+	u16 func_id;	/*[in] */
+	struct dxdi_memref mem_refs[SEP_RPC_MAX_MEMREF_PER_FUNC]; /*[in] */
+	u32 rpc_params_size;	/*[in] */
+	struct seprpc_params *rpc_params;	/*[in] */
+	/* rpc_params to be copied into kernel DMA buffer */
+	enum seprpc_retcode error_info;	/*[out] */
+};
+
+/***************/
+/* SeP Applets */
+/***************/
+
+enum dxdi_sepapp_param_type {
+	DXDI_SEPAPP_PARAM_NULL = 0,
+	DXDI_SEPAPP_PARAM_VAL = 1,
+	DXDI_SEPAPP_PARAM_MEMREF = 2,
+	DXDI_SEPAPP_PARAM_RESERVE32B = 0x7FFFFFFF
+};
+
+struct dxdi_val_param {
+	enum dxdi_data_direction copy_dir;	/* Copy direction */
+	u32 data[2];
+};
+
+#define SEP_APP_PARAMS_MAX 4
+
+struct dxdi_sepapp_params {
+	enum dxdi_sepapp_param_type params_types[SEP_APP_PARAMS_MAX];
+	union {
+		struct dxdi_val_param val;
+		struct dxdi_memref memref;
+	} params[SEP_APP_PARAMS_MAX];
+};
+
+/* SeP modules ID for returnOrigin */
+enum dxdi_sep_module {
+	DXDI_SEP_MODULE_NULL = 0,
+	DXDI_SEP_MODULE_HOST_DRIVER = 1,
+	DXDI_SEP_MODULE_SW_QUEUE = 2,	/* SW-queue task: Inc. desc. parsers */
+	DXDI_SEP_MODULE_APP_MGR = 3,	/* Applet Manager */
+	DXDI_SEP_MODULE_APP = 4,	/* Applet */
+	DXDI_SEP_MODULE_RPC_AGENT = 5,	/* Down to RPC parsers */
+	DXDI_SEP_MODULE_SYM_CRYPTO = 6,	/* Symmetric crypto driver */
+	DXDI_SEP_MODULE_RESERVE32B = 0x7FFFFFFF
+};
+
+#define DXDI_SEPAPP_UUID_SIZE 16
+
+#define DXDI_SEPAPP_SESSION_INVALID (-1)
+
+struct dxdi_sepapp_session_open_params {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE];	/*[in] */
+	u32 auth_method;	/*[in] */
+	u32 auth_data[3];	/*[in] */
+	struct dxdi_sepapp_params app_auth_data;	/*[in/out] */
+	int session_id;	/*[out] */
+	enum dxdi_sep_module sep_ret_origin;	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_sepapp_session_close_params {
+	int session_id;	/*[in] */
+};
+
+struct dxdi_sepapp_command_invoke_params {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE];	/*[in] */
+	int session_id;	/*[in] */
+	u32 command_id;	/*[in] */
+	struct dxdi_sepapp_params command_params;	/*[in/out] */
+	enum dxdi_sep_module sep_ret_origin;	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+#pragma pack(pop)
+
+#endif /*__SEP_DRIVER_ABI_H__*/
diff --git a/drivers/staging/sep54/dx_env.h b/drivers/staging/sep54/dx_env.h
new file mode 100644
index 0000000..299e3c7
--- /dev/null
+++ b/drivers/staging/sep54/dx_env.h
@@ -0,0 +1,230 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_ENV_H__
+#define __DX_ENV_H__
+
+/*--------------------------------------*/
+/* BLOCK: ENV_REGS                      */
+/*--------------------------------------*/
+#define DX_ENV_CC_GPI_REG_OFFSET     0x18UL
+#define DX_ENV_CC_GPI_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_GPI_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_GPO_REG_OFFSET     0x1cUL
+#define DX_ENV_CC_GPO_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_GPO_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET     0x24UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_SCAN_MODE_REG_OFFSET     0x30UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET     0x34UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_HOST_CC_EXT_INT_REG_OFFSET     0x38UL
+#define DX_ENV_HOST_CC_EXT_INT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_HOST_CC_EXT_INT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_REG_OFFSET     0x60UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_HOST_INT_REG_OFFSET     0x0A0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_RST_N_REG_OFFSET     0x0A8UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_RST_OVERRIDE_REG_OFFSET     0x0ACUL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_HOST_EXT_ACK_REG_OFFSET     0x0B0UL
+#define DX_ENV_CC_HOST_EXT_ACK_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_HOST_EXT_ACK_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET     0x0E0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_WARM_BOOT_REG_OFFSET     0x0E4UL
+#define DX_ENV_CC_WARM_BOOT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_WARM_BOOT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_COLD_BOOT_REG_OFFSET     0x0E8UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_FULL_BIT_SHIFT  0x0UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_FULL_BIT_SIZE   0x1UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_SEMI_BIT_SHIFT  0x1UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_SEMI_BIT_SIZE   0x1UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_REG_OFFSET     0x0F0UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_REG_OFFSET     0x0F4UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_BM_ENB_ADDR_REG_OFFSET     0x0F8UL
+#define DX_ENV_CC_BM_ENB_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_ENB_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_COLD_RST_REG_OFFSET     0x0FCUL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_REG_OFFSET     0x100UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_BM_CC_ERR_ADDR_REG_OFFSET     0x104UL
+#define DX_ENV_BM_CC_ERR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_BM_CC_ERR_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_DUMMY_ADDR_REG_OFFSET     0x108UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CLK_STATUS_REG_OFFSET     0x10CUL
+#define DX_ENV_CLK_STATUS_AES_CLK_STATUS_BIT_SHIFT  0x0UL
+#define DX_ENV_CLK_STATUS_AES_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_DES_CLK_STATUS_BIT_SHIFT  0x1UL
+#define DX_ENV_CLK_STATUS_DES_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_HASH_CLK_STATUS_BIT_SHIFT  0x2UL
+#define DX_ENV_CLK_STATUS_HASH_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_PKA_CLK_STATUS_BIT_SHIFT  0x3UL
+#define DX_ENV_CLK_STATUS_PKA_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_RC4_CLK_STATUS_BIT_SHIFT  0x4UL
+#define DX_ENV_CLK_STATUS_RC4_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_AHB_CLK_STATUS_BIT_SHIFT  0x5UL
+#define DX_ENV_CLK_STATUS_AHB_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_RNG_CLK_STATUS_BIT_SHIFT  0x6UL
+#define DX_ENV_CLK_STATUS_RNG_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_C2_CLK_STATUS_BIT_SHIFT  0x7UL
+#define DX_ENV_CLK_STATUS_C2_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_SEP_CLK_STATUS_BIT_SHIFT  0x8UL
+#define DX_ENV_CLK_STATUS_SEP_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_COMM_CLK_STATUS_BIT_SHIFT  0x9UL
+#define DX_ENV_CLK_STATUS_COMM_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_COUNTER_CLR_REG_OFFSET     0x118UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_COUNTER_RD_REG_OFFSET     0x11CUL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_REG_OFFSET     0x120UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_REG_OFFSET     0x124UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_REG_OFFSET     0x128UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_REG_OFFSET     0x12CUL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_REG_OFFSET     0x130UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET     0x430UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_REG_OFFSET     0x434UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_LCS_REG_OFFSET     0x43CUL
+#define DX_ENV_CC_LCS_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_LCS_VALUE_BIT_SIZE    0x8UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET     0x440UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT  0x0UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT  0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SHIFT  0x2UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT  0x3UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE   0x1UL
+#define DX_ENV_DCU_EN_REG_OFFSET     0x444UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET     0x448UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CRYPTOKEY_0_REG_OFFSET     0x450UL
+#define DX_ENV_CRYPTOKEY_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_0_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_1_REG_OFFSET     0x454UL
+#define DX_ENV_CRYPTOKEY_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_1_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_2_REG_OFFSET     0x458UL
+#define DX_ENV_CRYPTOKEY_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_2_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_3_REG_OFFSET     0x45CUL
+#define DX_ENV_CRYPTOKEY_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_3_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_4_REG_OFFSET     0x460UL
+#define DX_ENV_CRYPTOKEY_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_4_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_5_REG_OFFSET     0x464UL
+#define DX_ENV_CRYPTOKEY_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_5_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_6_REG_OFFSET     0x468UL
+#define DX_ENV_CRYPTOKEY_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_6_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_7_REG_OFFSET     0x46CUL
+#define DX_ENV_CRYPTOKEY_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_7_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_POWER_DOWN_REG_OFFSET     0x478UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_POWER_DOWN_EN_REG_OFFSET     0x47CUL
+#define DX_ENV_POWER_DOWN_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_POWER_DOWN_EN_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_REG_OFFSET     0x480UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_DCU_H_EN_REG_OFFSET     0x484UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_VERSION_REG_OFFSET     0x488UL
+#define DX_ENV_VERSION_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_VERSION_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_REG_OFFSET     0x48CUL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_VALUE_BIT_SIZE    0x2UL
+/* --------------------------------------*/
+/* BLOCK: ENV_CC_MEMORIES                */
+/* --------------------------------------*/
+#define DX_ENV_FUSE_READY_REG_OFFSET     0x414UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_ROM_BANK_REG_OFFSET     0x420UL
+#define DX_ENV_ROM_BANK_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_ROM_BANK_VALUE_BIT_SIZE    0x2UL
+#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET     0x500UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET     0x504UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE    0x2UL
+#define DX_ENV_FUSES_RAM_REG_OFFSET     0x800UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE    0x20UL
+/* --------------------------------------*/
+/* BLOCK: ENV_PERF_RAM_BASE              */
+/* --------------------------------------*/
+#define DX_ENV_PERF_RAM_BASE_REG_OFFSET     0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE    0x20UL
+
+#endif /*__DX_ENV_H__*/
diff --git a/drivers/staging/sep54/dx_host.h b/drivers/staging/sep54/dx_host.h
new file mode 100644
index 0000000..0ae53b2
--- /dev/null
+++ b/drivers/staging/sep54/dx_host.h
@@ -0,0 +1,398 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_HOST_H__
+#define __DX_HOST_H__
+/* -------------------------------------- */
+/* BLOCK: HOST                            */
+/* -------------------------------------- */
+#define DX_HOST_IRR_REG_OFFSET     0xA00UL
+#define DX_HOST_IRR_SEP_WATCHDOG_BIT_SHIFT  0x0UL
+#define DX_HOST_IRR_SEP_WATCHDOG_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DSCRPTR_DONE_LOW_INT_BIT_SHIFT  0x2UL
+#define DX_HOST_IRR_DSCRPTR_DONE_LOW_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_HOST_SRAM_VIO_BIT_SHIFT  0x3UL
+#define DX_HOST_IRR_HOST_SRAM_VIO_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SRAM_TO_DIN_INT_BIT_SHIFT  0x4UL
+#define DX_HOST_IRR_SRAM_TO_DIN_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DOUT_TO_SRAM_INT_BIT_SHIFT  0x5UL
+#define DX_HOST_IRR_DOUT_TO_SRAM_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_MEM_TO_DIN_INT_BIT_SHIFT  0x6UL
+#define DX_HOST_IRR_MEM_TO_DIN_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DOUT_TO_MEM_INT_BIT_SHIFT  0x7UL
+#define DX_HOST_IRR_DOUT_TO_MEM_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT  0x8UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_PKA_EXP_INT_BIT_SHIFT  0x9UL
+#define DX_HOST_IRR_PKA_EXP_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_RNG_INT_BIT_SHIFT  0xAUL
+#define DX_HOST_IRR_RNG_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR0_INT_BIT_SHIFT  0xBUL
+#define DX_HOST_IRR_SEP_HOST_GPR0_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR1_INT_BIT_SHIFT  0xCUL
+#define DX_HOST_IRR_SEP_HOST_GPR1_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR2_INT_BIT_SHIFT  0xDUL
+#define DX_HOST_IRR_SEP_HOST_GPR2_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR3_INT_BIT_SHIFT  0xEUL
+#define DX_HOST_IRR_SEP_HOST_GPR3_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR4_INT_BIT_SHIFT  0xFUL
+#define DX_HOST_IRR_SEP_HOST_GPR4_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR5_INT_BIT_SHIFT  0x10UL
+#define DX_HOST_IRR_SEP_HOST_GPR5_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR6_INT_BIT_SHIFT  0x11UL
+#define DX_HOST_IRR_SEP_HOST_GPR6_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR7_INT_BIT_SHIFT  0x12UL
+#define DX_HOST_IRR_SEP_HOST_GPR7_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT  0x13UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_REG_OFFSET     0xA04UL
+#define DX_HOST_IMR_SEP_WATCHDOG_MASK_BIT_SHIFT  0x0UL
+#define DX_HOST_IMR_SEP_WATCHDOG_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT  0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT  0x2UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_HOST_SRAM_VIO_MASK_BIT_SHIFT  0x3UL
+#define DX_HOST_IMR_HOST_SRAM_VIO_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SRAM_TO_DIN_MASK_BIT_SHIFT  0x4UL
+#define DX_HOST_IMR_SRAM_TO_DIN_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DOUT_TO_SRAM_MASK_BIT_SHIFT  0x5UL
+#define DX_HOST_IMR_DOUT_TO_SRAM_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_MEM_TO_DIN_MASK_BIT_SHIFT  0x6UL
+#define DX_HOST_IMR_MEM_TO_DIN_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DOUT_TO_MEM_MASK_BIT_SHIFT  0x7UL
+#define DX_HOST_IMR_DOUT_TO_MEM_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT  0x8UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_PKA_EXP_MASK_BIT_SHIFT  0x9UL
+#define DX_HOST_IMR_PKA_EXP_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_RNG_INT_MASK_BIT_SHIFT  0xAUL
+#define DX_HOST_IMR_RNG_INT_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR0_MASK_BIT_SHIFT  0xBUL
+#define DX_HOST_IMR_SEP_HOST_GPR0_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR1_MASK_BIT_SHIFT  0xCUL
+#define DX_HOST_IMR_SEP_HOST_GPR1_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR2_MASK_BIT_SHIFT  0xDUL
+#define DX_HOST_IMR_SEP_HOST_GPR2_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR3_MASK_BIT_SHIFT  0xEUL
+#define DX_HOST_IMR_SEP_HOST_GPR3_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR4_MASK_BIT_SHIFT  0xFUL
+#define DX_HOST_IMR_SEP_HOST_GPR4_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR5_MASK_BIT_SHIFT  0x10UL
+#define DX_HOST_IMR_SEP_HOST_GPR5_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR6_MASK_BIT_SHIFT  0x11UL
+#define DX_HOST_IMR_SEP_HOST_GPR6_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR7_MASK_BIT_SHIFT  0x12UL
+#define DX_HOST_IMR_SEP_HOST_GPR7_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT  0x13UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK1_BIT_SHIFT  0x14UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK1_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_CNTX_SWITCH_CNTR_EXPIRED_BIT_SHIFT  0x15UL
+#define DX_HOST_IMR_CNTX_SWITCH_CNTR_EXPIRED_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_REG_OFFSET     0xA08UL
+#define DX_HOST_ICR_SEP_WATCHDOG_CLEAR_BIT_SHIFT  0x0UL
+#define DX_HOST_ICR_SEP_WATCHDOG_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT  0x2UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_HOST_SRAM_VIO_CLEAR_BIT_SHIFT  0x3UL
+#define DX_HOST_ICR_HOST_SRAM_VIO_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SRAM_TO_DIN_CLEAR_BIT_SHIFT  0x4UL
+#define DX_HOST_ICR_SRAM_TO_DIN_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DOUT_TO_SRAM_CLEAR_BIT_SHIFT  0x5UL
+#define DX_HOST_ICR_DOUT_TO_SRAM_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_MEM_TO_DIN_CLEAR_BIT_SHIFT  0x6UL
+#define DX_HOST_ICR_MEM_TO_DIN_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DOUT_TO_MEM_CLEAR_BIT_SHIFT  0x7UL
+#define DX_HOST_ICR_DOUT_TO_MEM_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT  0x8UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_PKA_EXP_CLEAR_BIT_SHIFT  0x9UL
+#define DX_HOST_ICR_PKA_EXP_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_RNG_INT_CLEAR_BIT_SHIFT  0xAUL
+#define DX_HOST_ICR_RNG_INT_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR0_CLEAR_BIT_SHIFT  0xBUL
+#define DX_HOST_ICR_SEP_HOST_GPR0_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR1_CLEAR_BIT_SHIFT  0xCUL
+#define DX_HOST_ICR_SEP_HOST_GPR1_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR2_CLEAR_BIT_SHIFT  0xDUL
+#define DX_HOST_ICR_SEP_HOST_GPR2_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR3_CLEAR_BIT_SHIFT  0xEUL
+#define DX_HOST_ICR_SEP_HOST_GPR3_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR4_CLEAR_BIT_SHIFT  0xFUL
+#define DX_HOST_ICR_SEP_HOST_GPR4_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR5_CLEAR_BIT_SHIFT  0x10UL
+#define DX_HOST_ICR_SEP_HOST_GPR5_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR6_CLEAR_BIT_SHIFT  0x11UL
+#define DX_HOST_ICR_SEP_HOST_GPR6_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR7_CLEAR_BIT_SHIFT  0x12UL
+#define DX_HOST_ICR_SEP_HOST_GPR7_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT  0x13UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET     0xA10UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE    0x10UL
+#define DX_HOST_SEP_BUSY_REG_OFFSET     0xA14UL
+#define DX_HOST_SEP_BUSY_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_BUSY_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_SEP_SW_MONITOR_REG_OFFSET     0xA20UL
+#define DX_HOST_SEP_SW_MONITOR_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_SW_MONITOR_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_CC_SW_RST_REG_OFFSET     0xA40UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_REQ_BIT_SHIFT  0x0UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_REQ_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_FORCE_BIT_SHIFT  0x1UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_FORCE_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_AXIS_SYSREQ_BIT_SHIFT  0x2UL
+#define DX_HOST_CC_SW_RST_AXIS_SYSREQ_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_AXIM_SYSREQ_BIT_SHIFT  0x3UL
+#define DX_HOST_CC_SW_RST_AXIM_SYSREQ_BIT_SIZE   0x1UL
+#define DX_HOST_SEP_HOST_GPR0_REG_OFFSET     0xA80UL
+#define DX_HOST_SEP_HOST_GPR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR1_REG_OFFSET     0xA88UL
+#define DX_HOST_SEP_HOST_GPR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR2_REG_OFFSET     0xA90UL
+#define DX_HOST_SEP_HOST_GPR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR3_REG_OFFSET     0xA98UL
+#define DX_HOST_SEP_HOST_GPR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR4_REG_OFFSET     0xAA0UL
+#define DX_HOST_SEP_HOST_GPR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR5_REG_OFFSET     0xAA8UL
+#define DX_HOST_SEP_HOST_GPR5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR6_REG_OFFSET     0xAB0UL
+#define DX_HOST_SEP_HOST_GPR6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR7_REG_OFFSET     0xAB8UL
+#define DX_HOST_SEP_HOST_GPR7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR0_REG_OFFSET     0xA84UL
+#define DX_HOST_HOST_SEP_GPR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR1_REG_OFFSET     0xA8CUL
+#define DX_HOST_HOST_SEP_GPR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR2_REG_OFFSET     0xA94UL
+#define DX_HOST_HOST_SEP_GPR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR3_REG_OFFSET     0xA9CUL
+#define DX_HOST_HOST_SEP_GPR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR4_REG_OFFSET     0xAA4UL
+#define DX_HOST_HOST_SEP_GPR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR5_REG_OFFSET     0xAACUL
+#define DX_HOST_HOST_SEP_GPR5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR6_REG_OFFSET     0xAB4UL
+#define DX_HOST_HOST_SEP_GPR6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR7_REG_OFFSET     0xABCUL
+#define DX_HOST_HOST_SEP_GPR7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_ENDIAN_REG_OFFSET     0xAD0UL
+#define DX_HOST_HOST_ENDIAN_DIN_ICACHE_END_BIT_SHIFT  0x0UL
+#define DX_HOST_HOST_ENDIAN_DIN_ICACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DCAHE_END_BIT_SHIFT  0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DCAHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DD_END_BIT_SHIFT  0x2UL
+#define DX_HOST_HOST_ENDIAN_DIN_DD_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DMA_END_BIT_SHIFT  0x3UL
+#define DX_HOST_HOST_ENDIAN_DIN_DMA_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_ICACHE_END_BIT_SHIFT  0x4UL
+#define DX_HOST_HOST_ENDIAN_DOUT_ICACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DCACHE_END_BIT_SHIFT  0x5UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DCACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DD_END_BIT_SHIFT  0x6UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DD_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DMA_END_BIT_SHIFT  0x7UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DMA_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_INTENAL_WORD_END_BIT_SHIFT  0x8UL
+#define DX_HOST_HOST_ENDIAN_INTENAL_WORD_END_BIT_SIZE   0x8UL
+#define DX_SRAM_DATA_REG_OFFSET     0xF00UL
+#define DX_SRAM_DATA_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_DATA_VALUE_BIT_SIZE    0x20UL
+#define DX_SRAM_ADDR_REG_OFFSET     0xF04UL
+#define DX_SRAM_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_ADDR_VALUE_BIT_SIZE    0xFUL
+#define DX_SRAM_DATA_READY_REG_OFFSET     0xF08UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_RKEK1_0_REG_OFFSET     0xA00UL
+#define DX_HOST_RKEK1_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_1_REG_OFFSET     0xA04UL
+#define DX_HOST_RKEK1_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_2_REG_OFFSET     0xA08UL
+#define DX_HOST_RKEK1_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_3_REG_OFFSET     0xA0CUL
+#define DX_HOST_RKEK1_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_4_REG_OFFSET     0xA10UL
+#define DX_HOST_RKEK1_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_5_REG_OFFSET     0xA14UL
+#define DX_HOST_RKEK1_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_6_REG_OFFSET     0xA18UL
+#define DX_HOST_RKEK1_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_7_REG_OFFSET     0xA1CUL
+#define DX_HOST_RKEK1_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_ECC_REG_OFFSET     0xA20UL
+#define DX_HOST_RKEK1_ECC_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_ECC_VALUE_BIT_SIZE    0x20UL
+#define DX_LCS_REG_REG_OFFSET     0xA24UL
+#define DX_LCS_REG_VALUE_BIT_SHIFT   0x0UL
+#define DX_LCS_REG_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_0_REG_OFFSET     0xA2CUL
+#define DX_HOST_RKEK2_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_1_REG_OFFSET     0xA30UL
+#define DX_HOST_RKEK2_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_2_REG_OFFSET     0xA34UL
+#define DX_HOST_RKEK2_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_3_REG_OFFSET     0xA38UL
+#define DX_HOST_RKEK2_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_4_REG_OFFSET     0xA3CUL
+#define DX_HOST_RKEK2_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_5_REG_OFFSET     0xA40UL
+#define DX_HOST_RKEK2_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_6_REG_OFFSET     0xA44UL
+#define DX_HOST_RKEK2_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_7_REG_OFFSET     0xA48UL
+#define DX_HOST_RKEK2_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_7_VALUE_BIT_SIZE    0x20UL
+#define DX_NVM_CC_BOOT_REG_OFFSET     0xAA4UL
+#define DX_NVM_CC_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT  0x1UL
+#define DX_NVM_CC_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE   0x1UL
+#define DX_NVM_CC_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT  0x2UL
+#define DX_NVM_CC_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CNTR0_REG_OFFSET     0xA50UL
+#define DX_PAU_HOST_CNTR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR0_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR1_REG_OFFSET     0xA54UL
+#define DX_PAU_HOST_CNTR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR1_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR2_REG_OFFSET     0xA58UL
+#define DX_PAU_HOST_CNTR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR2_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR3_REG_OFFSET     0xA5CUL
+#define DX_PAU_HOST_CNTR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR3_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR4_REG_OFFSET     0xA60UL
+#define DX_PAU_HOST_CNTR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR4_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_XOR_REG_OFFSET     0xA64UL
+#define DX_PAU_HOST_XOR_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_XOR_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_MASK0_REG_OFFSET     0xA68UL
+#define DX_PAU_HOST_MASK0_PAU_HOST_MASK0_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK0_PAU_HOST_MASK0_BIT_SIZE   0xDUL
+#define DX_PAU_HOST_MASK0_UN_USED_BIT_SHIFT  0xDUL
+#define DX_PAU_HOST_MASK0_UN_USED_BIT_SIZE   0x13UL
+#define DX_PAU_HOST_MASK1_REG_OFFSET     0xA6CUL
+#define DX_PAU_HOST_MASK1_PAU_HOST_MASK1_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK1_PAU_HOST_MASK1_BIT_SIZE   0x19UL
+#define DX_PAU_HOST_MASK1_UN_USED_BIT_SHIFT  0x19UL
+#define DX_PAU_HOST_MASK1_UN_USED_BIT_SIZE   0x7UL
+#define DX_PAU_HOST_MASK2_REG_OFFSET     0xA70UL
+#define DX_PAU_HOST_MASK2_PAU_HOST_MASK2_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK2_PAU_HOST_MASK2_BIT_SIZE   0x19UL
+#define DX_PAU_HOST_MASK2_UN_USED_BIT_SHIFT  0x19UL
+#define DX_PAU_HOST_MASK2_UN_USED_BIT_SIZE   0x7UL
+#define DX_PAU_HOST_MASK3_REG_OFFSET     0xA74UL
+#define DX_PAU_HOST_MASK3_PAU_HOST_MASK3_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK3_PAU_HOST_MASK3_BIT_SIZE   0x1EUL
+#define DX_PAU_HOST_MASK3_UN_USED_BIT_SHIFT  0x1EUL
+#define DX_PAU_HOST_MASK3_UN_USED_BIT_SIZE   0x2UL
+#define DX_PAU_HOST_MASK4_REG_OFFSET     0xA78UL
+#define DX_PAU_HOST_MASK4_PAU_HOST_MASK4_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK4_PAU_HOST_MASK4_BIT_SIZE   0x1EUL
+#define DX_PAU_HOST_MASK4_UN_USED_BIT_SHIFT  0x1EUL
+#define DX_PAU_HOST_MASK4_UN_USED_BIT_SIZE   0x2UL
+#define DX_PAU_HOST_CONFIG_REG_OFFSET     0xA7CUL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND0_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND1_BIT_SHIFT  0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND2_BIT_SHIFT  0x2UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND3_BIT_SHIFT  0x3UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND3_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND4_BIT_SHIFT  0x4UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND4_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL0_BIT_SHIFT  0x5UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL1_BIT_SHIFT  0x6UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL2_BIT_SHIFT  0x7UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL3_BIT_SHIFT  0x8UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL3_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL4_BIT_SHIFT  0x9UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL4_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING_BIT_SHIFT  0xAUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_AND_COUNETER_EVENT_BIT_SHIFT  0xBUL
+#define DX_PAU_HOST_CONFIG_AND_COUNETER_EVENT_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING0_BIT_SHIFT  0xCUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING1_BIT_SHIFT  0xDUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING2_BIT_SHIFT  0xEUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING3_BIT_SHIFT  0xFUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING3_BIT_SIZE   0x1UL
+#define DX_HOST_REGION_MASK_REG_OFFSET     0xAC4UL
+#define DX_HOST_REGION_MASK_HOST_REGION_SECURED_MASK_BIT_SHIFT  0x0UL
+#define DX_HOST_REGION_MASK_HOST_REGION_SECURED_MASK_BIT_SIZE   0x10UL
+#define DX_HOST_REGION_MASK_HOST_REGION_NON_SECURED_MASK_BIT_SHIFT  0x10UL
+#define DX_HOST_REGION_MASK_HOST_REGION_NON_SECURED_MASK_BIT_SIZE   0x10UL
+#define DX_HOST_REGION_GPRS_MASK_REG_OFFSET     0xAC0UL
+#define DX_HOST_REGION_GPRS_MASK_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_REGION_GPRS_MASK_VALUE_BIT_SIZE    0x8UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_REG_OFFSET     0xA48UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_CC_SIGNATURE_REG_OFFSET     0xAC8UL
+#define DX_HOST_CC_SIGNATURE_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_CC_SIGNATURE_VALUE_BIT_SIZE    0x20UL
+
+#endif /*__DX_HOST_H__*/
diff --git a/drivers/staging/sep54/dx_init_cc_abi.h b/drivers/staging/sep54/dx_init_cc_abi.h
new file mode 100644
index 0000000..c2c7bcd
--- /dev/null
+++ b/drivers/staging/sep54/dx_init_cc_abi.h
@@ -0,0 +1,204 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/***************************************************************************
+ *  This file provides the CC-init ABI (SeP-Host binary interface)         *
+ ***************************************************************************/
+
+#ifndef __DX_INIT_CC_ABI_H__
+#define __DX_INIT_CC_ABI_H__
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+/* For SeP code environment */
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#ifndef INT16_MAX
+#define INT16_MAX		(32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX		(2147483647)
+#endif
+
+/***********************************/
+/* SeP to host communication       */
+/***********************************/
+/* GPRs for CC-init state/status from SeP */
+#define DX_SEP_STATE_GPR_IDX               7	/* SeP state */
+#define DX_SEP_STATUS_GPR_IDX              6	/* SeP status */
+/* GPRs used for passing driver init. parameters */
+/* (Valid while in DX_SEP_STATE_DONE_COLD_BOOT) */
+#define DX_SEP_INIT_SEP_PROPS_GPR_IDX      3	/* SEP properties passed to the
+						 * driver (see fields below) */
+#define DX_SEP_INIT_FW_PROPS_GPR_IDX      2	/* FW properties passed to the
+						 * driver (see fields below) */
+#define DX_SEP_INIT_FW_VER_GPR_IDX         1	/* SeP FW images version */
+#define DX_SEP_INIT_ROM_VER_GPR_IDX        0	/* SeP ROM image version */
+
+/* Debugging "stdout" tunnel via GPR5 - see sep_driver_cc54.c for details */
+#define DX_SEP_HOST_PRINTF_GPR_IDX         5
+
+/* Fields in DX_SEP_INIT_FW_PROPS_GPR_IDX */
+/* MLLI table size in bytes */
+#define DX_SEP_INIT_MLLI_TBL_SIZE_BIT_OFFSET	0
+#define DX_SEP_INIT_MLLI_TBL_SIZE_BIT_SIZE	12
+/* Maximum number of work queues supported */
+#define DX_SEP_INIT_NUM_OF_QUEUES_BIT_OFFSET	12
+#define DX_SEP_INIT_NUM_OF_QUEUES_BIT_SIZE	4
+/* Number of availabel context cache entries */
+#define DX_SEP_INIT_CACHE_CTX_SIZE_BIT_OFFSET	16
+#define DX_SEP_INIT_CACHE_CTX_SIZE_BIT_SIZE     8
+
+/* Fields in DX_SEP_INIT_SEP_PROPS_GPR_IDX */
+/* SEP frequency */
+#define DX_SEP_INIT_SEP_FREQUENCY_BIT_OFFSET	0
+#define DX_SEP_INIT_SEP_FREQUENCY_BIT_SIZE	12
+
+/***********************************/
+/* Host to SeP communication       */
+/***********************************/
+/* GPRs for requests from host to SeP */
+#define DX_HOST_REQ_GPR_IDX 7	/* Host-to-SeP requests */
+#define DX_HOST_REQ_PARAM_GPR_IDX 6	/* Host request parameters */
+/* The parameters in GPR6 must be ready before writing the request to GPR7*/
+
+/* MAGIC value of TLV "FIRST" parameter */
+#define DX_FW_INIT_PARAM_FIRST_MAGIC	0x3243F6A8
+
+/* Type-Length word manipulation macros */
+/* Note that all TLV communication assumes little-endian words */
+/* (i.e., responsibility of host driver to convert to LE before passing) */
+#define DX_TL_WORD(type, length)  ((length) << 16 | (type))
+#define DX_TL_GET_TYPE(tl_word)   ((tl_word) & 0xFFFF)
+#define DX_TL_GET_LENGTH(tl_word) ((tl_word) >> 16)
+
+/* Macros for Assembly code */
+#define ASM_DX_SEP_STATE_ILLEGAL_INST   0x100
+#define ASM_DX_SEP_STATE_STACK_OVERFLOW 0x200
+/* SeP states over (SeP-to-host) GPR7*/
+enum dx_sep_state {
+	DX_SEP_STATE_OFF = 0x0,
+	DX_SEP_STATE_FATAL_ERROR = 0x1,
+	DX_SEP_STATE_START_SECURE_BOOT = 0x2,
+	DX_SEP_STATE_PROC_COLD_BOOT = 0x4,
+	DX_SEP_STATE_PROC_WARM_BOOT = 0x8,
+	DX_SEP_STATE_DONE_COLD_BOOT = 0x10,
+	DX_SEP_STATE_DONE_WARM_BOOT = 0x20,
+	/*DX_SEP_STATE_BM_ERR           = 0x40, */
+	/*DX_SEP_STATE_SECOND_BM_ERR    = 0x80, */
+	DX_SEP_STATE_ILLEGAL_INST = ASM_DX_SEP_STATE_ILLEGAL_INST,
+	DX_SEP_STATE_STACK_OVERFLOW = ASM_DX_SEP_STATE_STACK_OVERFLOW,
+	/* Response to DX_HOST_REQ_FW_INIT: */
+	DX_SEP_STATE_DONE_FW_INIT = 0x400,
+	DX_SEP_STATE_PROC_SLEEP_MODE = 0x800,
+	DX_SEP_STATE_DONE_SLEEP_MODE = 0x1000,
+	DX_SEP_STATE_FW_ABORT = 0xBAD0BAD0,
+	DX_SEP_STATE_ROM_ABORT = 0xBAD1BAD1,
+	DX_SEP_STATE_RESERVE32B = INT32_MAX
+};
+
+/* Host requests over (host-to-SeP) GPR7 */
+enum dx_host_req {
+	DX_HOST_REQ_RELEASE_CRYPTO_ENGINES = 0x0,
+	DX_HOST_REQ_ACQUIRE_CRYPTO_ENGINES = 0x2,
+	DX_HOST_REQ_CC_INIT = 0x1,
+	DX_HOST_REQ_FW_INIT = 0x4,
+	DX_HOST_REQ_SEP_SLEEP = 0x8,
+	DX_HOST_REQ_RESERVE32B = INT32_MAX
+};
+
+/* Init. TLV parameters from host to SeP */
+/* Some parameters are used by CC-init flow with DX_HOST_REQ_CC_INIT          *
+ * and the others by the host driver initialization with DX_HOST_REQ_FW_INIT  */
+enum dx_fw_init_tlv_params {
+	DX_FW_INIT_PARAM_NULL = 0,
+	/* Common parameters */
+	DX_FW_INIT_PARAM_FIRST = 1,	/* Param.=FIRST_MAGIC */
+	DX_FW_INIT_PARAM_LAST = 2,	/* Param.=checksum 32b */
+
+	/* CC-init. parameters */
+	DX_FW_INIT_PARAM_DISABLE_MODULES = 3,
+	DX_FW_INIT_PARAM_HOST_AXI_CONFIG = 4,
+	DX_FW_INIT_PARAM_HOST_DEF_APPLET_CONFIG = 5,
+	DX_FW_INIT_PARAM_SEP_FREQ = 6,
+
+	/* Host driver (post-cold-boot) parameters */
+	/* Number of descriptor queues: Length = 1 */
+	DX_FW_INIT_PARAM_NUM_OF_DESC_QS = 0x101,
+	/* The following parameters provide an array with value per queue: */
+	/* Length = num. of queues */
+	DX_FW_INIT_PARAM_DESC_QS_ADDR = 0x102,	/* Queue base addr. */
+	DX_FW_INIT_PARAM_DESC_QS_SIZE = 0x103,	/* Queue size(byte) */
+	/* FW context cache partition (num. of entries) per queue */
+	DX_FW_INIT_PARAM_CTX_CACHE_PART = 0x104,
+	/* sep request module parameters ( msg and response buffers and size */
+	DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS = 0x105,
+	DX_FW_INIT_PARAM_RESERVE16B = INT16_MAX
+};
+
+/* FW-init. error code encoding - GPR6 contents in DX_SEP_STATE_DONE_FW_INIT  */
+/* | 0xE | Param. Type | Error code |  */
+/* |--4--|-----16------|----12------|  */
+#define DX_FW_INIT_ERR_CODE_SIZE 12
+#define DX_FW_INIT_ERR_PARAM_TYPE_SHIFT DX_FW_INIT_ERR_CODE_SIZE
+#define DX_FW_INIT_ERR_PARAM_TYPE_SIZE 16
+#define DX_FW_INIT_ERR_TYPE_SHIFT \
+	(DX_FW_INIT_ERR_PARAM_TYPE_SHIFT + DX_FW_INIT_ERR_PARAM_TYPE_SIZE)
+#define DX_FW_INIT_ERR_TYPE_SIZE 4
+#define DX_FW_INIT_ERR_TYPE_VAL 0xE
+
+#define DX_SEP_REQUEST_PARAM_MSG_LEN		3
+
+/* Build error word to put in status GPR6 */
+#define DX_FW_INIT_ERR_WORD(err_code, param_type)                   \
+	((DX_FW_INIT_ERR_TYPE_VAL << DX_FW_INIT_ERR_TYPE_SHIFT) |   \
+	 ((param_type & BITMASK(DX_FW_INIT_ERR_PARAM_TYPE_SIZE)) << \
+	  DX_FW_INIT_ERR_PARAM_TYPE_SHIFT) |                        \
+	 (err_code))
+/* Parse status of DX_SEP_STATE_DONE_FW_INIT*/
+#define DX_FW_INIT_IS_SUCCESS(status_word) ((err_word) == 0)
+#define DX_FW_INIT_GET_ERR_CODE(status_word) \
+	 ((status_word) & BITMASK(DX_FW_INIT_ERR_CODE_SIZE))
+
+/* FW INIT Error codes */
+/* extract from the status word - GPR6 - using DX_FW_INIT_GET_ERR_CODE() */
+enum dx_fw_init_error_code {
+	DX_FW_INIT_ERR_INVALID_TYPE = 0x001,
+	DX_FW_INIT_ERR_INVALID_LENGTH = 0x002,
+	DX_FW_INIT_ERR_INVALID_VALUE = 0x003,
+	DX_FW_INIT_ERR_PARAM_MISSING = 0x004,
+	DX_FW_INIT_ERR_NOT_SUPPORTED = 0x005,
+	DX_FW_INIT_ERR_RNG_FAILURE = 0x00F,
+	DX_FW_INIT_ERR_MALLOC_FAILURE = 0x0FC,
+	DX_FW_INIT_ERR_INIT_FAILURE = 0x0FD,
+	DX_FW_INIT_ERR_TIMEOUT = 0x0FE,
+	DX_FW_INIT_ERR_GENERAL_FAILURE = 0x0FF
+};
+
+#endif /*__DX_INIT_CC_ABI_H__*/
diff --git a/drivers/staging/sep54/dx_init_cc_defs.h b/drivers/staging/sep54/dx_init_cc_defs.h
new file mode 100644
index 0000000..4524c35
--- /dev/null
+++ b/drivers/staging/sep54/dx_init_cc_defs.h
@@ -0,0 +1,163 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __DX_INIT_CC_DEFS__H__
+#define __DX_INIT_CC_DEFS__H__
+
+/** @file dx_init_cc_defs.h
+*  \brief definitions for the CC54 initialization API
+*
+*  \version
+*  \author avis
+*/
+
+/* message token to sep */
+/* CC_INIT definitions */
+#define DX_CC_INIT_HEAD_MSG_TOKEN		0X544B2FBAUL
+
+/*The below enumerator includes the offsets inside the CC_Init message*/
+/*The last enum is the length of the message */
+enum dx_cc_init_msg_offset {
+	DX_CC_INIT_MSG_TOKEN_OFFSET,
+	DX_CC_INIT_MSG_LENGTH_OFFSET,
+	DX_CC_INIT_MSG_OP_CODE_OFFSET,
+	DX_CC_INIT_MSG_FLAGS_OFFSET,
+	DX_CC_INIT_MSG_RESIDENT_IMAGE_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_IMAGE_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_DEST_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_SIZE_OFFSET,
+	DX_CC_INIT_MSG_D_CACHE_ADDR_OFFSET,
+	DX_CC_INIT_MSG_D_CACHE_SIZE_OFFSET,
+	DX_CC_INIT_MSG_CC_INIT_EXT_ADDR_OFFSET,
+	DX_CC_INIT_MSG_USER_CONFIG_OFFSET,
+	DX_CC_INIT_MSG_VRL_ADDR_OFFSET,
+	DX_CC_INIT_MSG_MAGIC_NUM_OFFSET,
+	DX_CC_INIT_MSG_KEY_INDEX_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_0_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_1_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_2_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_3_OFFSET,
+	DX_CC_INIT_MSG_CHECK_SUM_OFFSET,
+	DX_CC_INIT_MSG_LENGTH
+};
+
+/* Set this value if key used in the VRL is to be verified against KEY_HASH
+   fields in the CC_INIT message */
+#define DX_CC_INIT_MSG_VRL_KEY_INDEX_INVALID 0xFFFFFFFF
+
+enum dx_cc_init_msg_icache_size {
+	DX_CC_INIT_MSG_ICACHE_SCR_DISABLE_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_256K_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_1M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_2M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_4M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_INVALID_SIZE
+};
+/* Icache sizes enum to log2 -
+ * Map enum of dx_cc_init_msg_icache_size to log2(size)
+ * (-1) for invalid value. */
+#define DX_CC_ICACHE_SIZE_ENUM2LOG { -1, 18, 20, 21, 22, -1 }
+
+#define DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2 17	/* 128KB */
+#define DX_CC_INIT_D_CACHE_MIN_SIZE (1 << DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2)
+#define DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2 27	/* 128MB */
+#define DX_CC_INIT_D_CACHE_MAX_SIZE (1 << DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2)
+
+/* Bit flags for the CC_Init flags word*/
+/* The CC_Init resident address address is valid (it might be passed via VRL) */
+#define DX_CC_INIT_FLAGS_RESIDENT_ADDR_FLAG		0x00000001
+/* The CC_Init I$ address address is valid (it might be passed via VRL) */
+#define DX_CC_INIT_FLAGS_I_CACHE_ADDR_FLAG		0x00000002
+/* The CC_Init D$ address address is valid (First CC_Init does not config. D$)*/
+#define DX_CC_INIT_FLAGS_D_CACHE_EXIST_FLAG		0x00000004
+/* The CC_Init extension address is valid and should be used */
+#define DX_CC_INIT_FLAGS_INIT_EXT_FLAG			0x00000008
+/* The I$ (and applets) should be encrypted */
+#define DX_CC_INIT_FLAGS_CACHE_ENC_FLAG			0x00000010
+/* The I$ (and applets) should be scrambled */
+#define DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG		0x00000020
+/* The I$ (and applets) should be copied to new address (Icache address) */
+#define DX_CC_INIT_FLAGS_CACHE_COPY_FLAG		0x00000040
+/* use the magic number in the CC_Init to verify the  VRL */
+#define DX_CC_INIT_FLAGS_MAGIC_NUMBER_FLAG		0x00000080
+
+#define DX_CC_INIT_FLAGS_CACHE_COPY_MASK_FLAG \
+	(DX_CC_INIT_FLAGS_CACHE_ENC_FLAG | \
+	DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG | \
+	DX_CC_INIT_FLAGS_CACHE_COPY_FLAG)
+
+/*-------------------------------
+  STRUCTURES
+---------------------------------*/
+struct dx_cc_def_applet_msg {
+	u32 cc_flags;
+	u32 icache_image_addr;
+	u32 vrl_addr;
+	u32 magic_num;
+	u32 ver_key_index;
+	u32 hashed_key_val[4];
+};
+
+/**
+ * struct dx_cc_init_msg - used for passing the parameters to the CC_Init API.
+ * The structure is converted in to the CC_Init message
+ * @cc_flags:		Bits flags for different fields in the message
+ * @res_image_addr:	resident image address in the HOST memory
+ * @Icache_image_addr:	I$ image address in the HOST memeory
+ * @Icache_addr:	I$ memory allocation in case the I$ is not placed
+ *			(scramble or encrypted I$)
+ * @Icache_size:	Icache size ( The totoal I$ for Dx image and all
+ *			applets). The size is limited to: 256K,1M,2M and 4M.
+ * @Dcache_addr:	D$ memory allocation in the HOST memory
+ * @Dcache_size:	D$ memory allocation size
+ * @init_ext_addr:	Address of the cc_Init extension message in the HOST
+ * @vrl_addr:		The address of teh VRL in the HOST memory
+ * @magic_num:		Requested VRL magic number
+ * @ver_key_index:	The index of the verification key
+ * @output_buff_addr:	buffer to the HOST memeory, where the secure boot
+ *			process might write the results of the secure boot
+ * @output_buff_size:	size of the out put buffer ( in bytes)
+ * @Hashed_key_val:	the trunked hash value of teh verification key in case
+ *			the OTP keys are not in use
+*/
+
+struct dx_cc_init_msg {
+	u32 cc_flags;
+	u32 res_image_addr;
+	u32 icache_image_addr;
+	u32 icache_addr;
+	enum dx_cc_init_msg_icache_size icache_size;
+	u32 dcache_addr;
+	u32 dcache_size;
+	u32 init_ext_addr;
+	u32 user_config;
+	u32 vrl_addr;
+	u32 magic_num;
+	u32 ver_key_index;
+	u32 hashed_key_val[4];
+};
+
+#endif /*__DX_INIT_CC_DEFS__H__*/
diff --git a/drivers/staging/sep54/dx_reg_base_host.h b/drivers/staging/sep54/dx_reg_base_host.h
new file mode 100644
index 0000000..afe6ea6
--- /dev/null
+++ b/drivers/staging/sep54/dx_reg_base_host.h
@@ -0,0 +1,37 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_REG_BASE_HOST_H__
+#define __DX_REG_BASE_HOST_H__
+
+#define DX_BASE_CC 0x83F00000
+#define DX_BASE_ENV_REGS 0x83F88000
+#define DX_BASE_ENV_CC_MEMORIES 0x83F88000
+#define DX_BASE_ENV_PERF_RAM 0x83F89000
+
+#define DX_BASE_CRY_KERNEL     0x0UL
+#define DX_BASE_ROM     0x83F80000
+
+#endif /*__DX_REG_BASE_HOST_H__*/
diff --git a/drivers/staging/sep54/dx_reg_common.h b/drivers/staging/sep54/dx_reg_common.h
new file mode 100644
index 0000000..dd43153
--- /dev/null
+++ b/drivers/staging/sep54/dx_reg_common.h
@@ -0,0 +1,39 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_REG_COMMON_H__
+#define __DX_REG_COMMON_H__
+
+/* \file dx_reg_common.h
+   This file includes additions to the HW-specific information that is missing
+   from the header files provided by the HW team */
+
+#define DX_ICACHE_SIZE 0x08000000UL
+#define DX_DCACHE_SIZE 0x08000000UL
+#define DX_DEV_SIGNATURE 0xDCC54000UL
+
+#define DX_DD_REGION_MASK_SIZE 25/* Number of bits in direct-access region */
+
+#endif /*__DX_REG_COMMON_H__*/
diff --git a/drivers/staging/sep54/dx_sep_kapi.h b/drivers/staging/sep54/dx_sep_kapi.h
new file mode 100644
index 0000000..cb71f6a
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_kapi.h
@@ -0,0 +1,39 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of CC driver:
+ * 1. Host-to-SeP Applet request API.
+ * 2. SeP Request agent API.
+ * 3. Power state control (sleep, warm-boot)
+ */
+#ifndef __DX_SEP_KAPI_H__
+#define __DX_SEP_KAPI_H__
+
+#include "dx_sepapp_kapi.h"
+#include "dx_sep_req_kapi.h"
+#include "dx_sep_power_kapi.h"
+
+#endif /*__DX_SEP_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sep_power_kapi.h b/drivers/staging/sep54/dx_sep_power_kapi.h
new file mode 100644
index 0000000..43cb59b
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_power_kapi.h
@@ -0,0 +1,78 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of power state control (sleep, warm-boot)
+ */
+#ifndef __DX_SEP_POWER_KAPI_H__
+#define __DX_SEP_POWER_KAPI_H__
+
+#include <linux/types.h>
+
+/******************************************/
+/* Power state control (sleep, warm-boot) */
+/******************************************/
+
+/**
+ * Power states of SeP
+ */
+enum dx_sep_power_state {
+	DX_SEP_POWER_INVALID = -1,	/* SeP is in unexpected (error) state */
+	DX_SEP_POWER_OFF = 0,	/* SeP is assumed to be off (unreachable) */
+	DX_SEP_POWER_BOOT,	/* SeP is in (warm) boot process */
+	DX_SEP_POWER_IDLE,	/* SeP is running but no request is pending */
+	DX_SEP_POWER_ACTIVE,	/* SeP is running and processing */
+	DX_SEP_POWER_HIBERNATED	/* SeP is in hibernated (sleep) state */
+};
+
+/* Prototype for callback on sep state change */
+typedef void (*dx_sep_state_change_callback_t) (unsigned long cookie);
+
+/**
+ * dx_sep_power_state_set() - Change power state of SeP (CC)
+ *
+ * @req_state:	The requested power state (_HIBERNATED or _ACTIVE)
+ *
+ * Request changing of power state to given state and block until transition
+ * is completed.
+ * Requesting _HIBERNATED is allowed only from _ACTIVE state.
+ * Requesting _ACTIVE is allowed only after CC was powered back on (warm boot).
+ * Return codes:
+ * 0 -	Power state change completed.
+ * -EINVAL -	This request is not allowed in current SeP state or req_state
+ *		value is invalid.
+ * -EBUSY -	State change request ignored because SeP is busy (primarily,
+ *		when requesting hibernation while SeP is processing something).
+ * -ETIME -	Request timed out (primarily, when asking for _ACTIVE)
+ */
+int dx_sep_power_state_set(enum dx_sep_power_state req_state);
+
+/**
+ * dx_sep_power_state_get() - Get the current power state of SeP (CC)
+ * @state_jiffies_p:	The "jiffies" value at which given state was detected.
+ */
+enum dx_sep_power_state dx_sep_power_state_get(unsigned long *state_jiffies_p);
+
+#endif /*__DX_SEP_POWER_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sep_req_kapi.h b/drivers/staging/sep54/dx_sep_req_kapi.h
new file mode 100644
index 0000000..bde329d
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_req_kapi.h
@@ -0,0 +1,77 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of SeP Request agent API.
+ */
+#ifndef __DX_SEP_REQ_KAPI_H__
+#define __DX_SEP_REQ_KAPI_H__
+
+#include <linux/types.h>
+
+/*******************************/
+/* SeP-to-Host request agents  */
+/*******************************/
+
+/**
+ * dx_sep_req_register_agent() - Register an agent
+ * @agent_id: The agent ID
+ * @max_buf_size: A pointer to the max buffer size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_register_agent(u8 agent_id, u32 *max_buf_size);
+
+/**
+ * dx_sep_req_unregister_agent() - Unregister an agent
+ * @agent_id: The agent ID
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_unregister_agent(u8 agent_id);
+
+/**
+ * dx_sep_req_wait_for_request() - Wait from an incoming sep request
+ * @agent_id: The agent ID
+ * @sep_req_buf_p: Pointer to the incoming request buffer
+ * @req_buf_size: Pointer to the incoming request size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_wait_for_request(u8 agent_id, u8 *sep_req_buf_p,
+				u32 *req_buf_size);
+
+/**
+ * dx_sep_req_send_response() - Send a response to the sep
+ * @agent_id: The agent ID
+ * @host_resp_buf_p: Pointer to the outgoing response buffer
+ * @resp_buf_size: Pointer to the outgoing response size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_send_response(u8 agent_id, u8 *host_resp_buf_p,
+			     u32 resp_buf_size);
+
+#endif /*__DX_SEP_REQ_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sepapp_kapi.h b/drivers/staging/sep54/dx_sepapp_kapi.h
new file mode 100644
index 0000000..c85f6d3
--- /dev/null
+++ b/drivers/staging/sep54/dx_sepapp_kapi.h
@@ -0,0 +1,135 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel API for Host-to-SeP Applet request API.
+ */
+#ifndef __DX_SEPAPP_KAPI_H__
+#define __DX_SEPAPP_KAPI_H__
+
+#include <linux/types.h>
+#include "dx_driver_abi.h"
+#include "crypto_api.h"
+
+/**
+ * struct dxdi_kmemref - Kernel memory reference
+ * @dma_direction:	Planned DMA direction
+ * @sgl:	Scatter/Gather list of given buffer (NULL if using buf_p)
+ * @nbytes:	Size in bytes of data referenced by "sgl"
+ */
+struct dxdi_kmemref {
+	enum dxdi_data_direction dma_direction;
+	struct scatterlist *sgl;
+	unsigned long nbytes;	/* data size */
+};
+
+/**
+ * dxdi_sepapp_kparams - Kernel parameters description for dx_sepapp_* func.
+ * @params_types:	The type of each paramter in params[] array
+ * @params:		The given parameters description
+ */
+struct dxdi_sepapp_kparams {
+	enum dxdi_sepapp_param_type params_types[SEP_APP_PARAMS_MAX];
+	union {
+		struct dxdi_val_param val;	/* DXDI_SEPAPP_PARAM_VAL */
+		struct dxdi_kmemref kmemref;	/* DXDI_SEPAPP_PARAM_MEMREF */
+	} params[SEP_APP_PARAMS_MAX];
+};
+
+#define DX_SEPAPP_CLIENT_CTX_NULL NULL
+
+/*******************************/
+/* Host-to-SeP Applet requests */
+/*******************************/
+
+/**
+ * dx_sepapp_context_alloc() - Allocate client context for SeP applets ops.
+ * Returns DX_SEPAPP_CLIENT_CTX_NULL on failure.
+ */
+void *dx_sepapp_context_alloc(void);
+
+/**
+ * dx_sepapp_context_free() - Free client context.
+ *
+ * @ctx: Client context to free.
+ */
+void dx_sepapp_context_free(void *ctx);
+
+/**
+ * dx_sepapp_session_open() - Open a session with a SeP applet
+ *
+ * @ctx:		SeP client context
+ * @sepapp_uuid:	Target applet UUID
+ * @auth_method:	Session connection authentication method
+ *			(Currently only 0/Public is supported)
+ * @auth_data:		Pointer to authentication data - Should be NULL
+ * @open_params:	Parameters for session opening
+ * @session_id:		Returned session ID (on success)
+ * @ret_origin:		Return code origin
+ *
+ * If ret_origin is not DXDI_SEP_MODULE_APP (i.e., above the applet), it must
+ * be 0 on success. For DXDI_SEP_MODULE_APP it is an applet-specific return code
+ */
+int dx_sepapp_session_open(void *ctx,
+			   u8 *sepapp_uuid,
+			   u32 auth_method,
+			   void *auth_data,
+			   struct dxdi_sepapp_kparams *open_params,
+			   int *session_id, enum dxdi_sep_module *ret_origin);
+
+/**
+ * dx_sepapp_session_close() - Close a session with an applet
+ *
+ * @ctx:	SeP client context
+ * @session_id: Session ID as returned from dx_sepapp_open_session()
+ *
+ * Return code would be 0 on success
+ */
+int dx_sepapp_session_close(void *ctx, int session_id);
+
+/**
+ * dx_sepapp_command_invoke() - Initiate command in the applet associated with
+ *				given session ID
+ *
+ * @ctx:	SeP client context
+ * @session_id:	The target session ID
+ * @command_id:	The ID of the command to initiate (applet-specific)
+ * @command_params:	The command parameters
+ * @ret_origin:	The origin of the return code
+ */
+int dx_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin);
+
+int async_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin,
+			     struct async_req_ctx *areq_ctx);
+
+#endif /*__DX_SEPAPP_KAPI_H__*/
diff --git a/drivers/staging/sep54/lli_mgr.c b/drivers/staging/sep54/lli_mgr.c
new file mode 100644
index 0000000..414157e
--- /dev/null
+++ b/drivers/staging/sep54/lli_mgr.c
@@ -0,0 +1,2110 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+ /*!
+  * \file lli_mgr.c
+  * \brief LLI logic: Bulding MLLI tables from user virtual memory buffers
+  */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_LLI_MGR
+
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/pagemap.h>
+#include "sep_ctx.h"
+#include "dx_driver.h"
+#include "sep_log.h"
+#include "lli_mgr.h"
+
+/* Limitation of DLLI buffer size due to size of "SIZE" field */
+/* Set this to 0 in order to disable DLLI support */
+/*#define DLLI_BUF_LIMIT \
+	((1UL << SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE) - 1)*/
+/* For now limit to the size planned for DLLI_AUX_BUF_LIMIT because we
+   must always have both din and dout DLLI or MLLI. When mixing would be
+   supported we can increase this limit to the one in comment, above */
+#define DLLI_BUF_LIMIT 2048
+
+/* The following size defines up to which size we would optimize for DLLI
+   buffer descriptor even for non-contiguous buffers. If not physically
+   contiguous (or not cache aligned on platforms with no cache coherency) an
+   auxilliary buffer would be allocated and the data copied to/from it. */
+#define DLLI_AUX_BUF_LIMIT 2048
+/* Note: The value of DLLI_AUX_BUF_LIMIT is tuned based on emipirical tests
+   on our system so the memcpy overhead does not "consume" the performance
+   benefit of using DLLI */
+
+#if DLLI_AUX_BUF_LIMIT > DLLI_BUF_LIMIT
+#error DLLI_AUX_BUF_LIMIT is too large. May be at most 64KB-1
+#endif
+
+#if (SEP_SUPPORT_SHA > 256)
+#define MAX_CRYPTO_BLOCK_LOG2 7
+#else
+#define MAX_CRYPTO_BLOCK_LOG2 6
+#endif
+#define MAX_CRYPTO_BLOCK_SIZE (1 << MAX_CRYPTO_BLOCK_LOG2)
+#define MAX_CRYPTO_BLOCK_MASK (MAX_CRYPTO_BLOCK_SIZE - 1)
+
+#define SEP_LLI_ENTRY_BYTE_SIZE (SEP_LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Index of first LLI which encodes data buffer
+   (after "next VA" and "next DMA") */
+#define FIRST_DATA_LLI_INDEX 2
+
+/* Overhead for tables linked list:
+ * one entry for FW linked list + one for host/kernel linked list
+ * (required due to difference between dma_addr and kernel virt. addr.) +
+ * last entry is reserved to the protected entry with the stop bit */
+#define SEP_MLLI_LINK_TO_NEXT_OVERHEAD 3
+
+/* macro to set/get link-to-next virtual address for next MLLI
+ * This macro relies on availability of an extra LLI entry per MLLI table.
+ * It uses the space of the first entry so the "SeP" table start after it.
+ */
+#define SEP_MLLI_SET_NEXT_VA(cur_mlli_p, next_mlli_p) \
+do { \
+	u32 __phys_ptr_ = virt_to_phys(next_mlli_p) & (DMA_BIT_MASK(32));\
+	SEP_LLI_SET(cur_mlli_p , ADDR, __phys_ptr_);\
+} while (0)
+#define SEP_MLLI_SET_NEXT_VA_NULL(cur_mlli_start) \
+		SEP_MLLI_SET_NEXT_VA(mlli_table_p, 0)
+#define SEP_MLLI_GET_NEXT_VA(cur_mlli_start) \
+	SEP_LLI_GET((cur_mlli_start), ADDR) == 0 ? 0 :  ((u32 *)phys_to_virt(SEP_LLI_GET((cur_mlli_start), ADDR)));\
+
+
+#define CACHE_LINE_MASK (L1_CACHE_BYTES - 1)
+
+/* Number of data bytes to gather at last LLI of for end of Din buffer */
+#define DIN_LAST_LLI_GATHER_SIZE 32
+
+/* Select the client buffer amount to copy into aux. buffers (head/tail) */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#if (DIN_LAST_LLI_GATHER_SIZE > L1_CACHE_BYTES)
+#define EDGE_BUFS_POOL_ITEM_SIZE DIN_LAST_LLI_GATHER_SIZE
+#else
+#define EDGE_BUFS_POOL_ITEM_SIZE L1_CACHE_BYTES
+#endif
+#else	/* Coherent cache - only tail buffer required per CC requirements */
+#define EDGE_BUFS_POOL_ITEM_SIZE DIN_LAST_LLI_GATHER_SIZE
+#endif
+
+/* Similar to for_each_sg but no need for nents - runs until NULL */
+#define for_each_valid_sg(sglist, cur_sge)	\
+	for (cur_sge = (sglist); cur_sge != NULL; cur_sge = sg_next(cur_sge))
+
+/**
+ * struct llimgr_obj - The LLI manager object (exposed as llimgr_h)
+ * @dev:	The associated device context (for DMA operations)
+ * @mlli_cache:	DMA coherent memory pool for the MLLI tables
+ * @edge_bufs_pool:	Pool for auxilliary buffers used instead of user buffer
+ *			start/end. Used for last LLI data mirroring to fulfil
+ *			requirement of 32B on last LLI.
+ *			In case of a non-coherent cache and a data buffer
+ *			which starts/ends unaligned to cache line we should
+ *			allocate external buffer to be used as the first/last
+ *			LLI entry instead of the "tail". This is required to
+ *			avoid cache incoherency due to access by other process
+ *			entities to the cache lines where the data is. This is
+ *			required only for the output buffer where the same cache
+ *			line is accessed by the host processor while SeP should
+ *			DMA output data into it.
+ * @dlli_bufs_pool:	Pool for client buffers up to DLLI_AUX_BUF_LIMIT which
+ *			are not phys. contig. and copied into it in order to be
+ *			physically contiguous, thus suitable for DLLI access.
+ * @max_lli_num:	Maximum LLI entries number in MLLI table.
+ * @max_data_per_mlli:	Maximum bytes of data mapped by each MLLI table.
+ *
+ */
+struct llimgr_obj {
+	struct device *dev;
+	struct dma_pool *mlli_cache;
+	struct dma_pool *edge_bufs_pool;
+	struct dma_pool *dlli_bufs_pool;
+	unsigned int max_lli_num;
+	unsigned long max_data_per_mlli;
+};
+
+/* Iterator state for building the MLLI tables list */
+struct mlli_tables_list_iterator {
+	u32 *prev_mlli_table_p;
+	u32 *cur_mlli_table_p;
+	dma_addr_t cur_mlli_dma_addr;
+	unsigned int next_lli_idx; /* Data LLI (After FIRST_DATA_LLI_INDEX) */
+	unsigned long cur_mlli_accum_data; /* Accumulated in current MLLI */
+};
+
+static void cleanup_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				     struct mlli_tables_list *mlli_tables_ptr,
+				     int is_data_dirty);
+static inline unsigned int get_sgl_nents(struct scatterlist *sgl);
+
+/**
+ * llimgr_create() - Create LLI-manager object
+ * @dev:	 Device context
+ * @mlli_table_size:	 The maximum size of an MLLI table in bytes
+ *
+ * Returns llimgr_h Created object handle or LLIMGR_NULL_HANDLE if failed
+ */
+void *llimgr_create(struct device *dev, unsigned long mlli_table_size)
+{
+	struct llimgr_obj *new_llimgr_p;
+	unsigned int num_of_full_page_llis;
+
+	new_llimgr_p = kmalloc(sizeof(struct llimgr_obj), GFP_KERNEL);
+	if (new_llimgr_p == NULL)
+		return LLIMGR_NULL_HANDLE;
+	new_llimgr_p->dev = dev;
+	/* create dma "coherent" memory pool for MLLI tables */
+	new_llimgr_p->mlli_cache = dma_pool_create("dx_sep_mlli_tables", dev,
+						   mlli_table_size,
+						   L1_CACHE_BYTES, 0);
+	if (new_llimgr_p->mlli_cache == NULL) {
+		pr_err("Failed creating DMA pool for MLLI tables\n");
+		goto create_failed_mlli_pool;
+	}
+
+	/* Create pool for holding buffer "tails" which share cache lines with
+	 * other data buffers */
+	new_llimgr_p->edge_bufs_pool = dma_pool_create("dx_sep_edge_bufs", dev,
+						       EDGE_BUFS_POOL_ITEM_SIZE,
+						       EDGE_BUFS_POOL_ITEM_SIZE,
+						       0);
+	if (new_llimgr_p->edge_bufs_pool == NULL) {
+		pr_err("Failed creating DMA pool for edge buffers\n");
+		goto create_failed_edge_bufs_pool;
+	}
+
+	new_llimgr_p->max_lli_num =
+	    ((mlli_table_size / SEP_LLI_ENTRY_BYTE_SIZE) -
+	     SEP_MLLI_LINK_TO_NEXT_OVERHEAD);
+	num_of_full_page_llis = new_llimgr_p->max_lli_num;
+	num_of_full_page_llis -= 2;/*First and last entries are partial pages */
+	num_of_full_page_llis -= 1;	/* One less for end aux. buffer */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	num_of_full_page_llis -= 1;	/* One less for start aux. buffer */
+#endif
+	/* Always a multiple of PAGE_SIZE - assures that it is also a
+	 * crypto block multiple. */
+	new_llimgr_p->max_data_per_mlli = num_of_full_page_llis * PAGE_SIZE;
+
+#if DLLI_AUX_BUF_LIMIT > 0
+	new_llimgr_p->dlli_bufs_pool = dma_pool_create("dx_sep_dlli_bufs", dev,
+						       DLLI_AUX_BUF_LIMIT,
+						       DLLI_AUX_BUF_LIMIT, 0);
+	if (new_llimgr_p->dlli_bufs_pool == NULL) {
+		pr_err("Failed creating DMA pool for DLLI buffers\n");
+		goto create_failed_dlli_bufs_pool;
+	}
+#endif
+
+	return new_llimgr_p;
+
+ create_failed_dlli_bufs_pool:
+	dma_pool_destroy(new_llimgr_p->edge_bufs_pool);
+ create_failed_edge_bufs_pool:
+	dma_pool_destroy(new_llimgr_p->mlli_cache);
+ create_failed_mlli_pool:
+	kfree(new_llimgr_p);
+	return LLIMGR_NULL_HANDLE;
+}
+
+/**
+ * llimgr_destroy() - Destroy (free resources of) given LLI-manager object
+ * @llimgr:	 LLI-manager object handle
+ *
+ */
+void llimgr_destroy(void *llimgr)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+
+#if DLLI_AUX_BUF_LIMIT > 0
+	dma_pool_destroy(llimgr_p->dlli_bufs_pool);
+#endif
+	dma_pool_destroy(llimgr_p->edge_bufs_pool);
+	dma_pool_destroy(llimgr_p->mlli_cache);
+	kfree(llimgr_p);
+}
+
+/*****************************************/
+/* Auxilliary buffers handling functions */
+/*****************************************/
+
+/**
+ * calc_aux_bufs_size() - Calculate required aux. buffers for given user buffer
+ * @buf_start:	A pointer value at buffer start (used to calculate alignment)
+ * @buf_size:	User buffer size in bytes
+ * @data_direction:	DMA direction
+ * @last_blk_with_prelast:	Last crypto block must be in the same LLI and
+ *				pre-last block.
+ * @crypto_block_size:	The Crypto-block size in bytes
+ * @start_aux_buf_size_p:	Returned required aux. buffer size at start
+ * @end_aux_buf_size_p:	Returned required aux. buffers size at end
+ *
+ * Returns void
+ */
+static void calc_aux_bufs_size(const unsigned long buf_start,
+			       unsigned long buf_size,
+			       enum dma_data_direction data_direction,
+			       unsigned long *start_aux_buf_size_p,
+			       unsigned long *end_aux_buf_size_p)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	const bool is_dout = ((data_direction == DMA_BIDIRECTIONAL) ||
+			      (data_direction == DMA_FROM_DEVICE));
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+	/* Calculate required aux. buffers: cache line tails + last w/prelast */
+	*start_aux_buf_size_p = 0;
+	*end_aux_buf_size_p = 0;
+
+	if (buf_size == 0)
+		return;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* start of buffer unaligned to cache line... */
+	if ((is_dout) && (buf_start & CACHE_LINE_MASK)) {
+		*start_aux_buf_size_p =	/* Remainder to end of cache line */
+		    L1_CACHE_BYTES - (buf_start & CACHE_LINE_MASK);
+		/* But not more than buffer size */
+		if (*start_aux_buf_size_p > buf_size)
+			*start_aux_buf_size_p = buf_size;
+	}
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+	/* last 32 B must always be on last LLI entry */
+	/* Put 32 B or the whole buffer if smaller than 32 B */
+	*end_aux_buf_size_p = buf_size >= DIN_LAST_LLI_GATHER_SIZE ?
+	    DIN_LAST_LLI_GATHER_SIZE : buf_size;
+	if ((*end_aux_buf_size_p + *start_aux_buf_size_p) > buf_size) {
+		/* End aux. buffer covers part of
+		 * start aux. buffer - leave only remainder in
+		 * start aux. buffer. */
+		*start_aux_buf_size_p = buf_size - *end_aux_buf_size_p;
+	}
+#ifdef DEBUG
+	if (((*end_aux_buf_size_p + *start_aux_buf_size_p) > buf_size) ||
+	    (*start_aux_buf_size_p > EDGE_BUFS_POOL_ITEM_SIZE) ||
+	    (*end_aux_buf_size_p > EDGE_BUFS_POOL_ITEM_SIZE)) {
+		pr_err(
+			    "Invalid aux. buffer sizes: buf_size=%lu B, start_aux=%lu B, end_aux=%lu B\n",
+			    buf_size, *start_aux_buf_size_p,
+			    *end_aux_buf_size_p);
+	} else {
+		pr_debug
+		    ("buf_size=%lu B, start_aux=%lu B, end_aux=%lu B\n",
+		     buf_size, *start_aux_buf_size_p, *end_aux_buf_size_p);
+	}
+#endif
+}
+
+#ifdef DEBUG
+static void dump_client_buf_pages(const struct client_dma_buffer
+				  *client_dma_buf_p)
+{
+	int i;
+	struct scatterlist *sgentry;
+
+	if (client_dma_buf_p->user_buf_ptr != NULL) {
+		pr_debug(
+			      "Client DMA buffer %p maps %lu B over %d pages at user_ptr=0x%p (dma_dir=%d):\n",
+			      client_dma_buf_p, client_dma_buf_p->buf_size,
+			      client_dma_buf_p->num_of_pages,
+			      client_dma_buf_p->user_buf_ptr,
+			      client_dma_buf_p->dma_direction);
+	} else {
+		pr_debug("Client DMA buffer %p maps %lu B (dma_dir=%d):\n",
+			      client_dma_buf_p, client_dma_buf_p->buf_size,
+			      client_dma_buf_p->dma_direction);
+	}
+
+	if (client_dma_buf_p->user_pages != NULL) {
+		pr_debug("%d user_pages:\n",
+			      client_dma_buf_p->num_of_pages);
+		for (i = 0; i < client_dma_buf_p->num_of_pages; i++) {
+			pr_debug("%d. phys_addr=0x%08lX\n", i,
+				      page_to_pfn(client_dma_buf_p->
+						  user_pages[i]) << PAGE_SHIFT);
+		}
+	}
+#if 0
+	pr_debug("sg_head:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_head, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+			      sg_phys(sgentry), sgentry->length);
+		i++;
+	}
+#endif
+	pr_debug("sg_main:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_main, sgentry) {
+		pr_debug("%d. dma_addr=0x%08llX len=0x%08X\n", i,
+			(long long unsigned int)sg_dma_address(sgentry),
+			sg_dma_len(sgentry));
+		i++;
+	}
+	pr_debug("sg_tail:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_tail, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+				(long long unsigned int)sg_phys(sgentry),
+				sgentry->length);
+		i++;
+	}
+	pr_debug("sg_save4next:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_save4next, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+				(long long unsigned int)sg_phys(sgentry),
+				sgentry->length);
+		i++;
+	}
+
+}
+#endif /*DEBUG*/
+/**
+ * create_sg_list() - Allocate/create S/G list for given page array,
+ * @page_array:	 The source pages array
+ * @offset_in_first_page:	 Offset in bytes in the first page of page_array
+ * @sg_data_size:	 Number of bytes to include in the create S/G list
+ * @new_sg_list_p:	 The allocated S/G list buffer
+ * @next_page_p:	 The next page to map (for incremental list creation)
+ * @next_page_offset_p:	 The offset to start in next page to map
+ *	(The list is allocated by this func. and should be freed by the caller)
+ *
+ * Allocate/create S/G list for given page array,
+ * starting at given offset in the first page spanning across given sg_data_size
+ * Returns int 0 for success
+ */
+static int create_sg_list(struct page **pages_array,
+			  unsigned long offset_in_first_page,
+			  unsigned long sg_data_size,
+			  struct scatterlist **new_sg_list_p,
+			  struct page ***next_page_p,
+			  unsigned long *next_page_offset_p)
+{
+	const unsigned long end_offset =
+	    offset_in_first_page + sg_data_size - 1;
+	const unsigned long num_of_sg_ents = (end_offset >> PAGE_SHIFT) + 1;
+	const unsigned long size_of_first_page = (num_of_sg_ents == 1) ?
+	    sg_data_size : (PAGE_SIZE - offset_in_first_page);
+	const unsigned long size_of_last_page = (end_offset & ~PAGE_MASK) + 1;
+	struct scatterlist *cur_sge;
+	int i;
+
+	if (sg_data_size == 0) {	/* Empty S/G list */
+		*new_sg_list_p = NULL;
+		*next_page_p = pages_array;
+		*next_page_offset_p = offset_in_first_page;
+		return 0;
+	}
+
+	*new_sg_list_p =
+	    kmalloc(sizeof(struct scatterlist) * num_of_sg_ents, GFP_KERNEL);
+	if (unlikely(*new_sg_list_p == NULL)) {
+		pr_err("Failed allocating sglist array for %lu entries\n",
+			    num_of_sg_ents);
+		return -ENOMEM;
+	}
+
+	/* Set default for next table assuming full pages */
+	*next_page_p = pages_array + num_of_sg_ents;
+	*next_page_offset_p = 0;
+
+	sg_init_table(*new_sg_list_p, num_of_sg_ents);
+	cur_sge = *new_sg_list_p;
+	/* First page is partial
+	 * - May start in middle of page
+	 * - May end in middle of page if single page */
+	sg_set_page(cur_sge, pages_array[0],
+		    size_of_first_page, offset_in_first_page);
+	/* Handle following (whole) pages, but last (which may be partial) */
+	for (i = 1; i < (num_of_sg_ents - 1); i++) {
+		cur_sge = sg_next(cur_sge);
+		if (unlikely(cur_sge == NULL)) {
+			pr_err(
+				    "Reached end of sgl before (%d) num_of_sg_ents (%lu)\n",
+				    i, num_of_sg_ents);
+			kfree(*new_sg_list_p);
+			*new_sg_list_p = NULL;
+			return -EINVAL;
+		}
+		sg_set_page(cur_sge, pages_array[i], PAGE_SIZE, 0);
+	}
+	/* Handle last (partial?) page */
+	if (num_of_sg_ents > 1) {
+		/* only if was not handled already as first */
+		cur_sge = sg_next(cur_sge);
+		if (unlikely(cur_sge == NULL)) {
+			pr_err(
+				    "Cannot put last page in given num_of_sg_ents (%lu)\n",
+				    num_of_sg_ents);
+			kfree(*new_sg_list_p);
+			*new_sg_list_p = NULL;
+			return -EINVAL;
+		}
+		sg_set_page(cur_sge,
+			    pages_array[num_of_sg_ents - 1],
+			    size_of_last_page, 0);
+		if (size_of_last_page < PAGE_SIZE) {
+			(*next_page_p)--; /* Last page was not fully consumed */
+			*next_page_offset_p = size_of_last_page;
+		}
+	} else {		/* First was last */
+		if ((offset_in_first_page + size_of_first_page) < PAGE_SIZE) {
+			(*next_page_p)--; /* Page was not fully consumed */
+			*next_page_offset_p =
+			    (offset_in_first_page + size_of_first_page);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * split_sg_list() - Split SG list at given offset.
+ * @sgl_to_split:	The SG list to split
+ * @split_sg_list:	Returned new SG list which starts with second half
+ *			of split entry.
+ * @split_offset:	Size in bytes of entry part to leave on original list.
+ *
+ * Split SG list at given offset.
+ * The entry is shortened at given length and the remainder is added to
+ * a new SG entry that is chained to the original list following.
+ * Returns int 0 on success
+ */
+static int split_sg_list(struct scatterlist *sgl_to_split,
+			 struct scatterlist **split_sg_list,
+			 unsigned long split_offset)
+{
+	struct scatterlist *cur_sge = sgl_to_split;
+
+	/* Scan list until consuming enough for first part to fit in cur_sge */
+	while ((cur_sge != NULL) && (split_offset > cur_sge->length)) {
+		split_offset -= cur_sge->length;
+		cur_sge = sg_next(cur_sge);
+	}
+	/* After the loop above, split_offset is actually the offset within
+	 * cur_sge */
+
+	if (cur_sge == NULL)
+		return -ENOMEM;	/* SG list too short for given first_part_len */
+
+	if (split_offset < cur_sge->length) {
+		/* Split entry */
+		*split_sg_list =
+		    kmalloc(sizeof(struct scatterlist) * 2, GFP_KERNEL);
+		if (*split_sg_list == NULL) {
+			pr_err("Failed allocating SGE for split entry\n");
+			return -ENOMEM;
+		}
+		sg_init_table(*split_sg_list, 2);
+		sg_set_page(*split_sg_list, sg_page(cur_sge),
+			    cur_sge->length - split_offset,
+			    cur_sge->offset + split_offset);
+		/* Link to second SGE */
+		sg_chain(*split_sg_list, 2, sg_next(cur_sge));
+		cur_sge->length = split_offset;
+	} else {		/* Split at entry boundary */
+		*split_sg_list = sg_next(cur_sge);
+		sg_mark_end(cur_sge);
+	}
+
+	return 0;
+}
+
+/**
+ * link_sg_lists() - Link back split S/G list
+ * @first_sgl:	The first chunk s/g list
+ * @second_sgl:	The second chunk s/g list
+ *
+ * Returns Unified lists list head
+ */
+static struct scatterlist *link_sg_lists(struct scatterlist *first_sgl,
+					 struct scatterlist *second_sgl)
+{
+	struct scatterlist *second_sgl_second = NULL;
+	struct scatterlist *first_sgl_last;
+	struct scatterlist *cur_sge;
+
+	if (first_sgl == NULL)
+		return second_sgl;	/* Second list is the "unified" list */
+	if (second_sgl == NULL)
+		return first_sgl;	/* Nothing to link back */
+	/* Seek end of first s/g list */
+	first_sgl_last = NULL;	/* To save last s/g entry */
+	for_each_valid_sg(first_sgl, cur_sge)
+	    first_sgl_last = cur_sge;
+	if ((sg_page(first_sgl_last) == sg_page(second_sgl)) &&
+	    ((first_sgl_last->offset + first_sgl_last->length) ==
+	     second_sgl->offset)) {
+		/* Case of entry split */
+		/* Restore first entry length */
+		first_sgl_last->length += second_sgl->length;
+		/* Save before freeing */
+		second_sgl_second = sg_next(second_sgl);
+		kfree(second_sgl);
+	}
+	/* This entry was allocated by split_sg_list */
+	/* else, list was split on entry boundary */
+	if (second_sgl_second != NULL) {
+		/*
+		 * Restore link to following entries
+		 * Clear chain termination flag to link back to next sge
+		 * Unfortunately there is no direct function to do this
+		 * so we rely on implementation detail (all flags cleared)
+		 */
+		first_sgl_last->page_link =
+		    (unsigned long)sg_page(first_sgl_last);
+	}
+	return first_sgl;
+}
+
+/**
+ * cleanup_client_dma_buf() - Cleanup client_dma_buf resources (S/G lists,
+ *				pages array, aux. bufs)
+ * @client_dma_buf_p:
+
+ *
+ * Returns void
+ */
+static void cleanup_client_dma_buf(struct llimgr_obj *llimgr_p,
+				   struct client_dma_buffer *client_dma_buf_p)
+{
+	struct page *cur_page;
+	int i;
+	const bool is_outbuf =
+	    (client_dma_buf_p->dma_direction == DMA_FROM_DEVICE) ||
+	    (client_dma_buf_p->dma_direction == DMA_BIDIRECTIONAL);
+
+	/* User space buffer */
+	if (client_dma_buf_p->user_buf_ptr != NULL) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		if (client_dma_buf_p->sg_head != NULL)
+			kfree(client_dma_buf_p->sg_head);
+#endif
+		if (client_dma_buf_p->sg_main != NULL)
+			kfree(client_dma_buf_p->sg_main);
+		if (client_dma_buf_p->sg_tail != NULL)
+			kfree(client_dma_buf_p->sg_tail);
+		if (client_dma_buf_p->sg_save4next != NULL)
+			kfree(client_dma_buf_p->sg_save4next);
+		/* Unmap pages that were mapped/locked */
+		if (client_dma_buf_p->user_pages != NULL) {
+			for (i = 0; i < client_dma_buf_p->num_of_pages; i++) {
+				cur_page = client_dma_buf_p->user_pages[i];
+				/* Mark dirty for pages written by HW/DMA */
+				if (is_outbuf && !PageReserved(cur_page))
+					SetPageDirty(cur_page);
+				page_cache_release(cur_page);	/* Unlock */
+			}
+			kfree(client_dma_buf_p->user_pages);
+		}
+
+	} else {
+		/* (kernel) given s/g list */
+		/* Fix S/G list back to what was given */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		if (client_dma_buf_p->sg_head != NULL) {
+			if (client_dma_buf_p->sg_main != NULL) {
+				client_dma_buf_p->sg_main =
+				    link_sg_lists(client_dma_buf_p->sg_head,
+						  client_dma_buf_p->sg_main);
+			} else {
+				client_dma_buf_p->sg_tail =
+				    link_sg_lists(client_dma_buf_p->sg_head,
+						  client_dma_buf_p->sg_tail);
+			}
+			/* Linked to next */
+			client_dma_buf_p->sg_head = NULL;
+		}
+#endif
+		client_dma_buf_p->sg_tail =
+		    link_sg_lists(client_dma_buf_p->sg_main,
+				  client_dma_buf_p->sg_tail);
+		client_dma_buf_p->sg_save4next =
+		    link_sg_lists(client_dma_buf_p->sg_tail,
+				  client_dma_buf_p->sg_save4next);
+	}
+
+	/* Free aux. buffers */
+	if (client_dma_buf_p->buf_end_aux_buf_va != NULL) {
+		if (client_dma_buf_p->buf_end_aux_buf_size <=
+		    EDGE_BUFS_POOL_ITEM_SIZE) {
+			dma_pool_free(llimgr_p->edge_bufs_pool,
+				      client_dma_buf_p->buf_end_aux_buf_va,
+				      client_dma_buf_p->buf_end_aux_buf_dma);
+		} else {	/* From DLLI buffers pool */
+			dma_pool_free(llimgr_p->dlli_bufs_pool,
+				      client_dma_buf_p->buf_end_aux_buf_va,
+				      client_dma_buf_p->buf_end_aux_buf_dma);
+		}
+	}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (client_dma_buf_p->buf_start_aux_buf_va != NULL)
+		dma_pool_free(llimgr_p->edge_bufs_pool,
+			      client_dma_buf_p->buf_start_aux_buf_va,
+			      client_dma_buf_p->buf_start_aux_buf_dma);
+#endif
+	CLEAN_DMA_BUFFER_INFO(client_dma_buf_p);
+}
+
+/**
+ * is_sgl_phys_contig() - Check if given scatterlist is physically contig.
+ *
+ * @sgl:		Checked scatter/gather list
+ * @data_size_p:	Size of phys. contig. portion of given sgl
+ */
+static bool is_sgl_phys_contig(struct scatterlist *sgl,
+			       unsigned long *data_size_p)
+{
+	struct scatterlist *cur_sge = sgl;
+	struct scatterlist *next_sge;
+
+	*data_size_p = 0;
+	for (cur_sge = sgl; cur_sge != NULL; cur_sge = next_sge) {
+		(*data_size_p) += cur_sge->length;
+		next_sge = sg_next(cur_sge);
+		if ((next_sge != NULL) &&
+		    /* Check proximity of current entry to next entry */
+		    ((page_to_phys(sg_page(cur_sge)) + cur_sge->length) !=
+		     (page_to_phys(sg_page(next_sge)) + next_sge->offset))) {
+			/* End of cur_sge does not reach start of next_sge */
+			return false;
+		}
+	}
+	/* If we passed the loop then data is phys. contig. */
+	return true;
+}
+
+/**
+ * is_pages_phys_contig() - Check if given pages are phys. contig.
+ *
+ * @pages_list:		Array of pages
+ * @num_of_pages:	Number of pages in provided pages_list
+ */
+static bool is_pages_phys_contig(struct page *pages_list[],
+				 unsigned int num_of_pages)
+{
+	int i;
+
+	for (i = 0; i < (num_of_pages - 1); i++) {
+		if ((page_to_phys(pages_list[i]) + PAGE_SIZE) !=
+		    page_to_phys(pages_list[i + 1]))
+			return false;
+	}
+	/* If reached here then all pages are following each other */
+	return true;
+}
+
+/**
+ * user_buf_to_client_dma_buf() - Apply given user_buf_ptr (user space buffer)
+ *				into client_dma_buffer
+ * @llimgr_p:
+ * @client_dma_buf_p:	 Client DMA object
+ *
+ * Apply given user_buf_ptr (user space buffer) into client_dma_buffer
+ * This function should be invoked after caller has set in the given
+ * client_dma_buf_p object the user_buf_ptr, buf_size and dma_direction
+ * Returns int 0 for success
+ */
+static int user_buf_to_client_dma_buf(struct llimgr_obj *llimgr_p,
+				      struct client_dma_buffer
+				      *client_dma_buf_p)
+{
+	u8 __user const *user_buf_ptr = client_dma_buf_p->user_buf_ptr;
+	unsigned long buf_size = client_dma_buf_p->buf_size;
+	const enum dma_data_direction dma_direction =
+	    client_dma_buf_p->dma_direction;
+	unsigned long buf_end = (unsigned long)user_buf_ptr + buf_size - 1;
+	const int num_of_pages =
+	    (buf_end >> PAGE_SHIFT) -
+	    ((unsigned long)user_buf_ptr >> PAGE_SHIFT) + 1;
+	const unsigned long offset_in_first_page =
+	    (unsigned long)user_buf_ptr & ~PAGE_MASK;
+	const bool is_inbuf = (dma_direction == DMA_TO_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_outbuf = (dma_direction == DMA_FROM_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	unsigned long head_buf_size = 0, tail_buf_size = 0, main_buf_size = 0;
+	struct page **cur_page_p;
+	unsigned long cur_page_offset;
+	int rc = 0;
+
+	/* Verify permissions */
+	if (is_inbuf && !access_ok(ACCESS_READ, user_buf_ptr, buf_size)) {
+		pr_err("No read access to data buffer at %p\n",
+			    user_buf_ptr);
+		return -EFAULT;
+	}
+	if (is_outbuf && !access_ok(ACCESS_WRITE, user_buf_ptr, buf_size)) {
+		pr_err("No write access to data buffer at %p\n",
+			    user_buf_ptr);
+		return -EFAULT;
+	}
+	client_dma_buf_p->user_pages =
+	    kmalloc(sizeof(struct page *)*num_of_pages, GFP_KERNEL);
+	if (unlikely(client_dma_buf_p->user_pages == NULL)) {
+		pr_err("Failed allocating user_pages array for %d pages\n",
+			    num_of_pages);
+		return -ENOMEM;
+	}
+	/* Get user pages structure (also increment ref. count... lock) */
+	client_dma_buf_p->num_of_pages = get_user_pages_fast((unsigned long)
+							     user_buf_ptr,
+							     num_of_pages,
+							     is_outbuf,
+							     client_dma_buf_p->
+							     user_pages);
+	if (client_dma_buf_p->num_of_pages != num_of_pages) {
+		pr_warn(
+			     "Failed to lock all user pages (locked %d, requested lock = %d)\n",
+			     client_dma_buf_p->num_of_pages, num_of_pages);
+		rc = -ENOMEM;
+	}
+	/* Leave only currently processed data (remainder in sg_save4next */
+	buf_size -= client_dma_buf_p->save4next_size;
+	buf_end -= client_dma_buf_p->save4next_size;
+	/* Decide on type of mapping: MLLI, DLLI or DLLI after copy */
+	if (buf_size <= DLLI_BUF_LIMIT) {
+		/* Check if possible to map buffer directly as DLLI */
+		if (
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			   /* For systems with incoherent cache the buffer
+			    * must be cache line aligned to be cosidered
+			    * for this case
+			    */
+			   (((unsigned long)user_buf_ptr & CACHE_LINE_MASK) ==
+			    0) &&
+				((buf_end & CACHE_LINE_MASK) ==
+				 CACHE_LINE_MASK) &&
+#endif
+			   is_pages_phys_contig(client_dma_buf_p->user_pages,
+						client_dma_buf_p->
+						num_of_pages)) {
+			pr_debug(
+				      "Mapping user buffer @%p (0x%08lX B) to DLLI directly\n",
+				      client_dma_buf_p->user_buf_ptr, buf_size);
+			main_buf_size = buf_size;/* Leave 0 for tail_buf_size */
+			/* 0 for the tail buffer indicates that we use this
+			 * optimization, because in any other case there must
+			 * be some data in the tail buffer (if buf_size>0) */
+		} else if (buf_size <= DLLI_AUX_BUF_LIMIT) {
+			pr_debug(
+				      "Mapping user buffer @%p (0x%08lX B) to DLLI via aux. buffer\n",
+				      client_dma_buf_p->user_buf_ptr, buf_size);
+			tail_buf_size = buf_size;
+			/* All data goes to "tail" in order to piggy-back over
+			 * the aux. buffers logic for copying data in/out of the
+			 * temp. DLLI DMA buffer */
+		}
+	}
+	if ((main_buf_size + tail_buf_size) == 0) {
+		/* If none of the optimizations was applied... */
+		calc_aux_bufs_size((unsigned long)user_buf_ptr, buf_size,
+				   dma_direction, &head_buf_size,
+				   &tail_buf_size);
+		main_buf_size = buf_size - head_buf_size - tail_buf_size;
+	}
+
+	/* Create S/G list */
+	cur_page_p = client_dma_buf_p->user_pages;
+	cur_page_offset = offset_in_first_page;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (likely(rc == 0)) {
+		/* Create S/G list for head (aux.) buffer */
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    head_buf_size, &client_dma_buf_p->sg_head,
+				    &cur_page_p, &cur_page_offset);
+	}
+#endif
+	/* Create S/G list for buffer "body" - to be used for DMA */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    main_buf_size, &client_dma_buf_p->sg_main,
+				    &cur_page_p, &cur_page_offset);
+	}
+	/* Create S/G list for tail (aux.) buffer */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    tail_buf_size, &client_dma_buf_p->sg_tail,
+				    &cur_page_p, &cur_page_offset);
+	}
+	/* Create S/G list for save4next buffer */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    client_dma_buf_p->save4next_size,
+				    &client_dma_buf_p->sg_save4next,
+				    &cur_page_p, &cur_page_offset);
+	}
+
+	if (unlikely(rc != 0)) {
+		cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+	} else {
+		/* Save head/tail sizes */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		client_dma_buf_p->buf_start_aux_buf_size = head_buf_size;
+#endif
+		client_dma_buf_p->buf_end_aux_buf_size = tail_buf_size;
+	}
+
+	return rc;
+}
+
+/**
+ * client_sgl_to_client_dma_buf() - Create head/main/tail sg lists from given
+ *					sg list
+ * @llimgr_p:
+ * @sgl:
+ * @client_dma_buf_p:
+ *
+ * Returns int
+ */
+static int client_sgl_to_client_dma_buf(struct llimgr_obj *llimgr_p,
+					struct scatterlist *sgl,
+					struct client_dma_buffer
+					*client_dma_buf_p)
+{
+	const unsigned long buf_size = client_dma_buf_p->buf_size -
+	    client_dma_buf_p->save4next_size;
+	const enum dma_data_direction dma_direction =
+	    client_dma_buf_p->dma_direction;
+	unsigned long sgl_phys_contig_size;	/* Phys. contig. part size */
+	unsigned long head_buf_size = 0, tail_buf_size = 0;
+	unsigned long main_buf_size = 0;
+	unsigned long last_sgl_size = 0;
+	struct scatterlist *last_sgl = NULL;	/* sgl to split of save4next */
+	int rc;
+
+	pr_debug("sgl=%p nbytes=%lu save4next=%lu client_dma_buf=%p\n",
+		      sgl, client_dma_buf_p->buf_size,
+		      client_dma_buf_p->save4next_size, client_dma_buf_p);
+
+	if (buf_size == 0) {	/* all goes to save4next (if anything) */
+		client_dma_buf_p->sg_save4next = sgl;
+		return 0;
+	}
+
+	/* Decide on type of mapping: MLLI, DLLI or DLLI after copy */
+	if (buf_size <= DLLI_BUF_LIMIT) {
+		/* Check if possible to map buffer directly as DLLI */
+		if (
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			   /*
+			    * For systems with incoherent cache the
+			    * buffer must be cache line aligned to be
+			    * cosidered for this case
+			    */
+			   ((sgl->offset & CACHE_LINE_MASK) == 0) &&
+			   (((sgl->offset + buf_size) &
+			     CACHE_LINE_MASK) == 0) &&
+#endif
+			   is_sgl_phys_contig(sgl, &sgl_phys_contig_size)) {
+			pr_debug(
+				      "Mapping sgl buffer (0x%08lX B) to DLLI directly\n",
+				      buf_size);
+			main_buf_size = buf_size;
+			/* Leave 0 for tail_buf_size
+			 * 0 for the tail buffer indicates that we use this
+			 * optimization, because in any other case there must
+			 * be some data in the tail buffer (if buf_size>0)
+			 */
+		} else if (buf_size <= DLLI_AUX_BUF_LIMIT) {
+			pr_debug(
+				      "Mapping sgl buffer (0x%08lX B) to DLLI via aux. buffer\n",
+				      buf_size);
+			tail_buf_size = buf_size;
+			/* All data goes to "tail" in order to piggy-back
+			 * over the aux. buffers logic for copying data
+			 * in/out of the temp. DLLI DMA buffer
+			 */
+		}
+	}
+	if ((main_buf_size + tail_buf_size) == 0) {
+		/* If none of the optimizations was applied... */
+		/* Use first SG entry for start alignment */
+		calc_aux_bufs_size((unsigned long)sgl->offset, buf_size,
+				   dma_direction, &head_buf_size,
+				   &tail_buf_size);
+		main_buf_size = buf_size - head_buf_size - tail_buf_size;
+	}
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (head_buf_size > 0) {
+		client_dma_buf_p->sg_head = sgl;
+		rc = split_sg_list(client_dma_buf_p->sg_head,
+				   &client_dma_buf_p->sg_main, head_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Failed splitting sg_head-sg_main\n");
+			cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+			return rc;
+		}
+		last_sgl_size = head_buf_size;
+		last_sgl = client_dma_buf_p->sg_head;
+	} else
+#endif
+		/* Initilize sg_main to given sgl */
+		client_dma_buf_p->sg_main = sgl;
+
+	if (tail_buf_size > 0) {
+		if (main_buf_size > 0) {
+			rc = split_sg_list(client_dma_buf_p->sg_main,
+					   &client_dma_buf_p->sg_tail,
+					   main_buf_size);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail:splitting sg_main-sg_tail\n");
+				cleanup_client_dma_buf(llimgr_p,
+						       client_dma_buf_p);
+				return rc;
+			}
+		} else {	/* All data moved to sg_tail */
+			client_dma_buf_p->sg_tail = client_dma_buf_p->sg_main;
+			client_dma_buf_p->sg_main = NULL;
+		}
+		last_sgl_size = tail_buf_size;
+		last_sgl = client_dma_buf_p->sg_tail;
+	} else if (main_buf_size > 0) {	/* main only */
+		last_sgl_size = main_buf_size;
+		last_sgl = client_dma_buf_p->sg_main;
+	}
+
+	/* Save head/tail sizes */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	client_dma_buf_p->buf_start_aux_buf_size = head_buf_size;
+#endif
+	client_dma_buf_p->buf_end_aux_buf_size = tail_buf_size;
+
+	if (client_dma_buf_p->save4next_size > 0) {
+		if (last_sgl != NULL) {
+			rc = split_sg_list(last_sgl,
+					   &client_dma_buf_p->sg_save4next,
+					   last_sgl_size);
+			if (unlikely(rc != 0)) {
+				pr_err("Failed splitting sg_save4next\n");
+				cleanup_client_dma_buf(llimgr_p,
+						       client_dma_buf_p);
+				return rc;
+			}
+		} else {	/* Whole buffer goes to save4next */
+			client_dma_buf_p->sg_save4next = sgl;
+		}
+	}
+	return 0;
+}
+
+/**
+ * llimgr_register_client_dma_buf() - Register given client buffer for DMA
+ *					operation.
+ * @llimgr:	 The LLI manager object handle
+ * @user_buf_ptr:	 Pointer in user space of the user buffer
+ * @sgl:	Client provided s/g list. user_buf_ptr is assumed NULL if this
+ *		list is given (!NULL).
+ * @buf_size:	 The user buffer size in bytes (incl. save4next). May be 0.
+ * @save4next_size:	Amount from buffer end to save for next op.
+ *			(split into seperate sgl). May be 0.
+ * @dma_direction:	The DMA direction this buffer would be used for
+ * @client_dma_buf_p:	Pointer to the user DMA buffer "object"
+ *
+ * Register given client buffer for DMA operation.
+ * If user_buf_ptr!=NULL and sgl==NULL it locks the user pages and creates
+ * head/main/tail s/g lists. If sgl!=NULL is splits it into head/main/tail
+ * s/g lists.
+ * Returns 0 for success
+ */
+int llimgr_register_client_dma_buf(void *llimgr,
+				   u8 __user *user_buf_ptr,
+				   struct scatterlist *sgl,
+				   const unsigned long buf_size,
+				   const unsigned long save4next_size,
+				   const enum dma_data_direction dma_direction,
+				   struct client_dma_buffer *client_dma_buf_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	int rc, tmp;
+
+	CLEAN_DMA_BUFFER_INFO(client_dma_buf_p);
+
+	if (buf_size == 0) {	/* Handle empty buffer */
+		pr_debug("buf_size == 0\n");
+		return 0;
+	}
+
+	if ((user_buf_ptr == NULL) && (sgl == NULL)) {
+		pr_err("NULL user_buf_ptr/sgl\n");
+		return -EINVAL;
+	}
+	if ((user_buf_ptr != NULL) && (sgl != NULL)) {
+		pr_err("Provided with dual buffer info (both user+sgl)\n");
+		return -EINVAL;
+	}
+
+	/* Init. basic/common attributes */
+	client_dma_buf_p->user_buf_ptr = user_buf_ptr;
+	client_dma_buf_p->buf_size = buf_size;
+	client_dma_buf_p->save4next_size = save4next_size;
+	client_dma_buf_p->dma_direction = dma_direction;
+
+	if (user_buf_ptr != NULL) {
+		rc = user_buf_to_client_dma_buf(llimgr_p, client_dma_buf_p);
+	} else {
+		rc = client_sgl_to_client_dma_buf(llimgr_p, sgl,
+						  client_dma_buf_p);
+	}
+	if (unlikely(rc != 0))
+		return rc;
+	/* Since sg_main may be large and we need its nents for each
+	 * dma_map_sg/dma_unmap_sg operation, we count its nents once and
+	 * save the result.
+	 * (for the other sgl's in the object we can count when accessed) */
+	client_dma_buf_p->sg_main_nents =
+	    get_sgl_nents(client_dma_buf_p->sg_main);
+
+	/* Allocate auxilliary buffers for sg_head/sg_tail copies */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if ((likely(rc == 0)) &&
+	    (client_dma_buf_p->buf_start_aux_buf_size > 0)) {
+		client_dma_buf_p->buf_start_aux_buf_va =
+		    dma_pool_alloc(llimgr_p->edge_bufs_pool, GFP_KERNEL,
+				   &client_dma_buf_p->buf_start_aux_buf_dma);
+		if (unlikely(client_dma_buf_p->buf_start_aux_buf_va == NULL)) {
+			pr_err("Fail alloc from edge_bufs_pool, head\n");
+			rc = -ENOMEM;
+		} else {
+			pr_debug("start_aux: va=%p dma=0x%08llX\n",
+				      client_dma_buf_p->buf_start_aux_buf_va,
+				      client_dma_buf_p->buf_start_aux_buf_dma);
+		}
+	}
+#endif
+	if ((likely(rc == 0)) && (client_dma_buf_p->buf_end_aux_buf_size > 0)) {
+#ifdef DEBUG
+		if (client_dma_buf_p->buf_end_aux_buf_size >
+				DLLI_AUX_BUF_LIMIT) {
+			pr_err("end_aux_buf size too large = 0x%08lX\n",
+				    client_dma_buf_p->buf_end_aux_buf_size);
+			return -EINVAL;
+		}
+#endif
+		if (client_dma_buf_p->buf_end_aux_buf_size <=
+		    EDGE_BUFS_POOL_ITEM_SIZE) {
+			client_dma_buf_p->buf_end_aux_buf_va =
+			    dma_pool_alloc(llimgr_p->edge_bufs_pool, GFP_KERNEL,
+					   &client_dma_buf_p->
+					   buf_end_aux_buf_dma);
+		} else {
+			/* Allocate from the dedicated DLLI buffers pool */
+			client_dma_buf_p->buf_end_aux_buf_va =
+			    dma_pool_alloc(llimgr_p->dlli_bufs_pool, GFP_KERNEL,
+					   &client_dma_buf_p->
+					   buf_end_aux_buf_dma);
+		}
+		if (unlikely(client_dma_buf_p->buf_end_aux_buf_va == NULL)) {
+			pr_err("Fail:allocating from aux. buf for tail\n");
+			rc = -ENOMEM;
+		} else {
+			pr_debug("end_aux: va=%p dma=0x%08llX\n",
+				client_dma_buf_p->buf_end_aux_buf_va,
+				(long long unsigned int)
+				client_dma_buf_p->buf_end_aux_buf_dma);
+		}
+	}
+
+	/* Map the main sglist (head+tail would not be used for DMA) */
+	if (likely(rc == 0) && (client_dma_buf_p->sg_main != NULL)) {
+		tmp = dma_map_sg(llimgr_p->dev, client_dma_buf_p->sg_main,
+				 client_dma_buf_p->sg_main_nents,
+				 dma_direction);
+		if (unlikely(tmp == 0)) {
+			pr_err("dma_map_sg failed\n");
+			rc = -ENOMEM;
+		}
+	}
+
+#ifdef DEBUG
+	if (likely(rc == 0))
+		dump_client_buf_pages(client_dma_buf_p);
+#endif
+
+	if (unlikely(rc != 0)) {	/* Error cases cleanup */
+		cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+	}
+
+	return rc;
+}
+
+/**
+ * llimgr_deregister_client_dma_buf() - Unmap given user DMA buffer
+ * @llimgr:
+
+ * @client_dma_buf_p:	 User DMA buffer object
+ *
+ * Unmap given user DMA buffer (flush and unlock pages)
+ * (this function can handle client_dma_buffer of size 0)
+ */
+void llimgr_deregister_client_dma_buf(void *llimgr,
+				      struct client_dma_buffer
+				      *client_dma_buf_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+
+	/* Cleanup DMA mappings */
+	if (client_dma_buf_p->sg_main != NULL) {
+		dma_unmap_sg(llimgr_p->dev, client_dma_buf_p->sg_main,
+			     client_dma_buf_p->sg_main_nents,
+			     client_dma_buf_p->dma_direction);
+	}
+	cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+}
+
+/**
+ * get_sgl_nents() - Get (count) the number of entries in given s/g list
+ *	(used in order to be able to invoke sg_copy_to/from_buffer)
+ * @sgl:	Counted s/g list entries
+ */
+static inline unsigned int get_sgl_nents(struct scatterlist *sgl)
+{
+	int cnt = 0;
+	struct scatterlist *cur_sge;
+	for_each_valid_sg(sgl, cur_sge)
+	    cnt++;
+	return cnt;
+}
+
+/**
+ * copy_to_from_aux_buf() - Copy to/from given aux.buffer from/to given s/g list
+ * @to_buf:	 "TRUE" for copying to the given buffer from sgl
+ * @sgl:	 The S/G list data source/target
+ * @to_buf:	 Target/source buffer
+ * @buf_len:	 Buffer length
+ *
+ * Returns int
+ */
+static inline int copy_to_from_aux_buf(bool to_buf,
+				       struct scatterlist *sgl, void *buf_p,
+				       size_t buf_len)
+{
+	size_t copied_cnt;
+	unsigned int nents = get_sgl_nents(sgl);
+
+	if (to_buf)
+		copied_cnt = sg_copy_to_buffer(sgl, nents, buf_p, buf_len);
+	else
+		copied_cnt = sg_copy_from_buffer(sgl, nents, buf_p, buf_len);
+
+	if (copied_cnt < buf_len) {
+		pr_err("Failed copying %s buf of %zu B\n",
+			    to_buf ? "to" : "from", buf_len);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * sync_client_dma_buf() - Sync. pages before DMA to device at given offset in
+ *				the user buffer
+ * @dev:	 Associated device structure
+ * @client_dma_buf_p:	 The user DMA buffer object
+ * @for_device:	 Set to "true" to sync before device DMA op.
+ * @dma_direction:	 DMA direction for sync.
+ *
+ * Returns int 0 for success
+ */
+static int sync_client_dma_buf(struct device *dev,
+			       struct client_dma_buffer *client_dma_buf_p,
+			       const bool for_device,
+			       const enum dma_data_direction dma_direction)
+{
+	const bool is_from_device = ((dma_direction == DMA_BIDIRECTIONAL) ||
+				     (dma_direction == DMA_FROM_DEVICE));
+	const bool is_to_device = ((dma_direction == DMA_BIDIRECTIONAL) ||
+				   (dma_direction == DMA_TO_DEVICE));
+	int rc;
+
+	pr_debug("DMA buf %p (0x%08lX B) for %s\n",
+		      client_dma_buf_p, client_dma_buf_p->buf_size,
+		      for_device ? "device" : "cpu");
+
+	if (for_device) {
+		/* Copy out aux. buffers if required */
+		/* We should copy before dma_sync_sg_for_device */
+		if (is_to_device) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			if (client_dma_buf_p->sg_head != NULL) {
+				rc = copy_to_from_aux_buf(true,
+						  client_dma_buf_p->sg_head,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+#endif
+			if (client_dma_buf_p->sg_tail != NULL) {
+				rc = copy_to_from_aux_buf(true,
+						  client_dma_buf_p->sg_tail,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+		}
+		if (client_dma_buf_p->sg_main != NULL) {
+			dma_sync_sg_for_device(dev, client_dma_buf_p->sg_main,
+					       client_dma_buf_p->sg_main_nents,
+					       dma_direction);
+		}
+
+	} else {		/* for CPU */
+		if (client_dma_buf_p->sg_main != NULL) {
+			dma_sync_sg_for_cpu(dev, client_dma_buf_p->sg_main,
+					    client_dma_buf_p->sg_main_nents,
+					    dma_direction);
+		}
+		/* Copy back from aux. buffers */
+		/* We should copy after dma_sync_sg_for_cpu */
+		if (is_from_device) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			if (client_dma_buf_p->sg_head != NULL) {
+				rc = copy_to_from_aux_buf(false,
+						  client_dma_buf_p->sg_head,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+#endif
+			if (client_dma_buf_p->sg_tail != NULL) {
+				rc = copy_to_from_aux_buf(false,
+						  client_dma_buf_p->sg_tail,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * llimgr_copy_from_client_buf_save4next() - Copy from the sg_save4next chunk
+ *	of the client DMA buffer to given buffer.
+ *	Used to save hash block remainder.
+ *
+ * @client_dma_buf_p:	The client DMA buffer with the save4next chunk
+ * @to_buf:		Target buffer to copy to.
+ * @buf_len:		Given buffer length (to avoid buffer overflow)
+ *
+ * Returns number of bytes copied or -ENOMEM if given buffer is too small
+ */
+int llimgr_copy_from_client_buf_save4next(struct client_dma_buffer
+					  *client_dma_buf_p, u8 *to_buf,
+					  unsigned long buf_len)
+{
+	int copied_cnt;
+	struct scatterlist *sgl = client_dma_buf_p->sg_save4next;
+	unsigned int nents;
+
+	if (buf_len < client_dma_buf_p->save4next_size) {
+		pr_err("Invoked for copying %lu B to a buffer of %lu B\n",
+			    client_dma_buf_p->save4next_size, buf_len);
+		copied_cnt = -ENOMEM;
+	} else {
+		nents = get_sgl_nents(sgl);
+		if (nents > 0)
+			copied_cnt = sg_copy_to_buffer(sgl, nents,
+						       to_buf, buf_len);
+		else		/* empty */
+			copied_cnt = 0;
+	}
+	return copied_cnt;
+}
+
+#ifdef DEBUG
+/**
+ * dump_mlli_table() - Dump given MLLI table
+ * @table_start_p:	 Pointer to allocated table buffer
+ * @dma_addr:	 The table's DMA address as given to SeP
+ * @table_size:	 The table size in bytes
+ *
+ */
+static void dump_mlli_table(u32 *table_start_p, dma_addr_t dma_addr,
+			    unsigned long table_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	int i;
+	u32 *cur_entry_p;
+	unsigned int num_of_entries = table_size / SEP_LLI_ENTRY_BYTE_SIZE;
+
+	pr_debug("MLLI table at %p (dma_addr=0x%08X) with %u ent.:\n",
+		      table_start_p, (unsigned int)dma_addr, num_of_entries);
+
+	for (i = 0, cur_entry_p = table_start_p + SEP_LLI_ENTRY_WORD_SIZE;
+	     i < num_of_entries; cur_entry_p += SEP_LLI_ENTRY_WORD_SIZE, i++) {
+		/* LE to BE... */
+		SEP_LLI_COPY_FROM_SEP(lli_spad, cur_entry_p);
+		/*
+		 * pr_debug("%02d: addr=0x%08lX , size=0x%08lX\n  %s\n", i,
+		 * SEP_LLI_GET(lli_spad, ADDR),
+		 * SEP_LLI_GET(lli_spad, SIZE),
+		 */
+		pr_debug("%02d: [0x%08X,0x%08X] %s\n", i,
+			      lli_spad[0], lli_spad[1],
+			      i == 0 ? "(next table)" : "");
+	}
+}
+
+/**
+ * llimgr_dump_mlli_tables_list() - Dump all the MLLI tables in a given tables
+ *					list
+ * @mlli_tables_list_p:	 Pointer to tables list structure
+ *
+ */
+void llimgr_dump_mlli_tables_list(struct mlli_tables_list *mlli_tables_list_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	u32 *cur_table_p;
+	u32 *next_table_p;
+	u32 *link_entry_p;
+	unsigned long next_table_size;
+	dma_addr_t next_table_dma;
+	u16 table_count = 0;
+
+	/* This loop uses "cur_table_p" as the previous table that
+	 * was already dumped */
+	for (cur_table_p = mlli_tables_list_p->link_to_first_table;
+	     cur_table_p != NULL; cur_table_p = next_table_p, table_count++) {
+
+		if (table_count > mlli_tables_list_p->table_count) {
+			pr_err(
+				"MLLI tables list has more tables than table_cnt=%u. Stopping dump.\n",
+				mlli_tables_list_p->table_count);
+			break;
+		}
+
+		/* The SeP link entry is second in the buffer */
+		link_entry_p = cur_table_p + SEP_LLI_ENTRY_WORD_SIZE;
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);/* LE to BE... */
+		next_table_p = SEP_MLLI_GET_NEXT_VA(cur_table_p);
+		next_table_dma = SEP_LLI_GET(lli_spad, ADDR);
+		next_table_size = SEP_LLI_GET(lli_spad, SIZE);
+		if (next_table_p != NULL)
+			dump_mlli_table(next_table_p,
+					next_table_dma, next_table_size);
+
+	}
+
+}
+#endif /*DEBUG*/
+/*****************************************/
+/* MLLI tables construction functions    */
+/*****************************************/
+/**
+ * set_dlli() - Set given mlli object to function as DLLI descriptor
+ *
+ * @mlli_tables_list_p:	The associated "MLLI" tables list
+ * @dlli_addr:	The DMA address for the DLLI
+ * @dlli_size:	The size in bytes of referenced data
+ *
+ * The MLLI tables list object represents DLLI when its table_count is 0
+ * and the "link_to_first_table" actually points to the data.
+ */
+static inline void set_dlli(struct mlli_tables_list *mlli_tables_list_p,
+			    u32 dlli_addr, u16 dlli_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, dlli_addr);
+	SEP_LLI_SET(lli_spad, SIZE, dlli_size);
+	SEP_LLI_COPY_TO_SEP(mlli_tables_list_p->link_to_first_table +
+			    SEP_LLI_ENTRY_WORD_SIZE, lli_spad);
+	mlli_tables_list_p->table_count = 0;
+}
+
+/**
+ * set_last_lli() - Set "Last" bit on last LLI data entry
+ * @last_lli_p:
+ *
+ */
+static inline void set_last_lli(u32 *last_lli_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	SEP_LLI_COPY_FROM_SEP(lli_spad, last_lli_p);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	SEP_LLI_COPY_TO_SEP(last_lli_p, lli_spad);
+}
+
+/**
+ * set_last_table() - Set table link to next as NULL (last table).
+ * @mlli_table_p:
+ *
+ */
+static inline void set_last_table(u32 *mlli_table_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	/* Set SeP link entry */
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, FIRST, 1);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	/* The rest of the field are zero from SEP_LLI_INIT */
+	SEP_LLI_COPY_TO_SEP(mlli_table_p, lli_spad);
+	/* Set NULL for next VA */
+	SEP_MLLI_SET_NEXT_VA_NULL(mlli_table_p);
+}
+
+static inline void link_to_prev_mlli(struct mlli_tables_list_iterator
+				     *mlli_iter_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+	const u32 cur_mlli_table_size =
+	    (mlli_iter_p->next_lli_idx + 1) * SEP_LLI_ENTRY_BYTE_SIZE;
+	/* +1 for link entry at table start */
+
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, mlli_iter_p->cur_mlli_dma_addr);
+	SEP_LLI_SET(lli_spad, SIZE, cur_mlli_table_size);
+	SEP_LLI_SET(lli_spad, FIRST, 1);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	SEP_LLI_COPY_TO_SEP(mlli_iter_p->prev_mlli_table_p +
+			    SEP_LLI_ENTRY_WORD_SIZE, lli_spad);
+	SEP_MLLI_SET_NEXT_VA(mlli_iter_p->prev_mlli_table_p,
+			     mlli_iter_p->cur_mlli_table_p);
+}
+
+/**
+ * terminate_mlli_tables_list() - "NULL" terminate the MLLI tables list and link
+ *				to previous table if any.
+ * @mlli_iter_p:	 MLLI tables list iterator
+ *
+ */
+static inline void terminate_mlli_tables_list(struct mlli_tables_list_iterator
+					      *mlli_iter_p)
+{
+	u32 *last_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx - 1) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+
+	if (mlli_iter_p->prev_mlli_table_p != NULL)
+		link_to_prev_mlli(mlli_iter_p);
+
+	if (mlli_iter_p->cur_mlli_table_p != NULL) {
+		set_last_lli(last_lli_p);
+		set_last_table(mlli_iter_p->cur_mlli_table_p);
+	}
+
+}
+
+static int alloc_next_mlli(struct llimgr_obj *llimgr_p,
+			   struct mlli_tables_list *mlli_tables_list_p,
+			   struct mlli_tables_list_iterator *mlli_iter_p)
+{
+	u32 *last_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx - 1) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+
+	if (mlli_iter_p->prev_mlli_table_p != NULL) {
+		/* "prev == NULL" means that we are on the stub link entry. */
+		/* If we have "prev" it means that we already have one table */
+		link_to_prev_mlli(mlli_iter_p);
+		set_last_lli(last_lli_p);
+	}
+
+	mlli_iter_p->prev_mlli_table_p = mlli_iter_p->cur_mlli_table_p;
+
+	/* Allocate MLLI table buffer from the pool */
+	mlli_iter_p->cur_mlli_table_p =
+	    dma_pool_alloc(llimgr_p->mlli_cache, GFP_KERNEL,
+			   &mlli_iter_p->cur_mlli_dma_addr);
+	if (mlli_iter_p->cur_mlli_table_p == NULL) {
+		pr_err("Failed allocating MLLI table\n");
+		return -ENOMEM;
+	}
+
+	/* Set DMA addr to the table start from SeP perpective */
+	mlli_iter_p->cur_mlli_dma_addr += SEP_LLI_ENTRY_BYTE_SIZE;
+	mlli_iter_p->next_lli_idx = 0;
+	mlli_iter_p->cur_mlli_accum_data = 0;
+	/* Set as last until linked to next (for keeping a valid tables list) */
+	set_last_table(mlli_iter_p->cur_mlli_table_p);
+
+	mlli_tables_list_p->table_count++;
+
+	return 0;
+}
+
+/**
+ * append_lli_to_mlli() - Set current LLI info and progress to next entry
+ * @mlli_iter_p:
+ * @dma_addr:
+ * @dma_size:
+ *
+ * Returns void
+ */
+static inline void append_lli_to_mlli(struct mlli_tables_list_iterator
+				      *mlli_iter_p, dma_addr_t dma_addr,
+				      u32 data_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+	u32 *next_lli_p;
+
+	next_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+	/* calc. includes first link entry */
+	/* Create LLI entry */
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, dma_addr);
+	SEP_LLI_SET(lli_spad, SIZE, data_size);
+	SEP_LLI_COPY_TO_SEP(next_lli_p, lli_spad);
+
+	mlli_iter_p->next_lli_idx++;
+	mlli_iter_p->cur_mlli_accum_data += data_size;
+}
+
+/**
+ * append_data_to_mlli() - Append given DMA data chunk to given MLLI tables list
+ * @llimgr_p:
+ * @mlli_tables_list_p:
+ * @mlli_iter_p:
+ * @dma_addr:	 LLI entry ADDR
+ * @dma_size:	 LLI entry SIZE
+ *
+ * Append given DMA data chunk to given MLLI tables list.
+ * Based on given iterator the LLI entry may be added in the current table
+ * or if table end reached, a new table would be allocated.
+ * In the latter case the previous MLLI table would be linked to current.
+ * If not all data fits into current table limit, it would be split in to
+ * last LLI in this table and first LLI in the next table (assuming overall
+ * DMA data is not more than max_data_per_mlli)
+ * Returns int 0 for success
+ */
+static int append_data_to_mlli(struct llimgr_obj *llimgr_p,
+			       struct mlli_tables_list *mlli_tables_list_p,
+			       struct mlli_tables_list_iterator *mlli_iter_p,
+			       dma_addr_t data_dma_addr, u32 data_size)
+{
+	u32 remaining_data_for_mlli;
+	int rc;
+
+#ifdef DEBUG
+	if (data_size > llimgr_p->max_data_per_mlli) {
+		pr_err(
+			    "Given data size (%uB) is too large for MLLI (%luB)\n",
+			    data_size, llimgr_p->max_data_per_mlli);
+		return -EINVAL;
+	}
+#endif
+
+	if (mlli_iter_p->next_lli_idx >= llimgr_p->max_lli_num) {
+		/* Reached end of current MLLI table */
+		rc = alloc_next_mlli(llimgr_p, mlli_tables_list_p, mlli_iter_p);
+		if (rc != 0)
+			return rc;
+	}
+
+	remaining_data_for_mlli =
+	    llimgr_p->max_data_per_mlli - mlli_iter_p->cur_mlli_accum_data;
+
+	if (data_size > remaining_data_for_mlli) {
+		/* This chunk does not fit in this table */
+		if (remaining_data_for_mlli > 0) {/* Space left in this MLLI */
+			/* Add to this table first "half" of the chunk */
+			append_lli_to_mlli(mlli_iter_p, data_dma_addr,
+					   remaining_data_for_mlli);
+			pr_debug("Splitting SG of %uB to %uB+%uB\n",
+				      data_size, remaining_data_for_mlli,
+				      data_size - remaining_data_for_mlli);
+			/* Set the remainder to be pushed in the new table */
+			data_dma_addr += remaining_data_for_mlli;
+			data_size -= remaining_data_for_mlli;
+		}
+		rc = alloc_next_mlli(llimgr_p, mlli_tables_list_p, mlli_iter_p);
+		if (rc != 0)
+			return rc;
+	}
+
+	append_lli_to_mlli(mlli_iter_p, data_dma_addr, data_size);
+
+	return 0;
+}
+
+/**
+ * init_mlli_tables_list() - Initialize MLLI tables list object with user buffer
+ *				information
+ * @mlli_tables_list_p:
+ * @dma_direction:
+ * @user_buf_ptr:
+ * @buf_size:
+ *
+ * Returns int 0 on success
+ */
+static int init_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				 struct mlli_tables_list *mlli_tables_list_p,
+				 struct client_dma_buffer *client_memref,
+				 enum dma_data_direction dma_direction)
+{
+	const bool is_inbuf = (dma_direction == DMA_TO_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_outbuf = (dma_direction == DMA_FROM_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_memref_inbuf =
+	    (client_memref->dma_direction == DMA_TO_DEVICE) ||
+	    (client_memref->dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_memref_outbuf =
+	    (client_memref->dma_direction == DMA_FROM_DEVICE) ||
+	    (client_memref->dma_direction == DMA_BIDIRECTIONAL);
+	int rc;
+
+#ifdef DEBUG
+	/* Verify that given MLLI tables list is "clean" */
+	if (mlli_tables_list_p->user_memref != NULL) {
+		pr_err("Got \"dirty\" MLLI tables list!\n");
+		return -EINVAL;
+	}
+#endif	 /*DEBUG*/
+	    MLLI_TABLES_LIST_INIT(mlli_tables_list_p);
+	if (client_memref->buf_size > 0) {
+		/* Validate buffer access permissions */
+		if (is_inbuf && !is_memref_inbuf) {
+			pr_err("No read access (%d) to user buffer @ %p\n",
+				    client_memref->dma_direction,
+				    client_memref->user_buf_ptr);
+			return -EFAULT;
+		}
+		if (is_outbuf && !is_memref_outbuf) {
+			pr_err("No write access (%d), data buffer @ %p\n",
+				    client_memref->dma_direction,
+				    client_memref->user_buf_ptr);
+			return -EFAULT;
+		}
+
+	}
+	rc = sync_client_dma_buf(llimgr_p->dev,
+				 client_memref, true /*for device */ ,
+				 dma_direction);
+	if (likely(rc == 0)) {
+		/* Init. these fields only if the operations above succeeded */
+		mlli_tables_list_p->user_memref = client_memref;
+		mlli_tables_list_p->data_direction = dma_direction;
+	}
+	return rc;
+}
+
+/**
+ * cleanup_mlli_tables_list() - Cleanup MLLI tables resources
+ * @llimgr_p:	 LLI-manager pointer
+ * @mlli_table_p:	The MLLI tables list object
+ * @is_data_dirty:	If true (!0) the (output) data pages are marked as dirty
+ *
+ * Cleanup MLLI tables resources
+ * This function may be invoked for partially constructed MLLI tables list
+ * as tests for existence of released resources before trying to release.
+ */
+static void cleanup_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				     struct mlli_tables_list
+				     *mlli_tables_list_p, int is_data_dirty)
+{
+	dma_addr_t cur_mlli_dma_addr;
+	dma_addr_t next_mlli_dma_addr;
+	u32 *cur_mlli_p;
+	u32 *next_mlli_p;
+	u32 *link_entry_p;
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+
+	pr_debug("mlli_tables_list_p=%p user_memref=%p table_count=%u\n",
+		      mlli_tables_list_p, mlli_tables_list_p->user_memref,
+		      mlli_tables_list_p->table_count);
+	/* Initialize to the first MLLI table */
+	if (mlli_tables_list_p->table_count > 0) {
+		cur_mlli_p =
+		    SEP_MLLI_GET_NEXT_VA(mlli_tables_list_p->
+					 link_to_first_table);
+		link_entry_p =
+		    mlli_tables_list_p->link_to_first_table +
+		    SEP_LLI_ENTRY_WORD_SIZE;
+		/* LE to BE... */
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);
+		/* Actual allocation DMA address is one entry before
+		 * saved address */
+		cur_mlli_dma_addr =
+		    SEP_LLI_GET(lli_spad, ADDR) - SEP_LLI_ENTRY_BYTE_SIZE;
+	} else {
+		/*DLLI*/ cur_mlli_p = NULL;
+		/* Skip the cleanup loop below */
+	}
+
+	/* Cleanup MLLI tables */
+	while (cur_mlli_p != NULL) {
+		pr_debug("Freeing MLLI table buffer at %p (%08llX)\n",
+			cur_mlli_p, (long long unsigned int)cur_mlli_dma_addr);
+		/* The link entry follows the first entry that holds next VA */
+		link_entry_p = cur_mlli_p + SEP_LLI_ENTRY_WORD_SIZE;
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);/* LE to BE... */
+		/* Save link pointers before freeing the table */
+		next_mlli_p = SEP_MLLI_GET_NEXT_VA(cur_mlli_p);
+		/* Actual allocation DMA address is one entry before
+		 * saved address */
+		next_mlli_dma_addr =
+		    SEP_LLI_GET(lli_spad, ADDR) - SEP_LLI_ENTRY_BYTE_SIZE;
+		dma_pool_free(llimgr_p->mlli_cache,
+			      cur_mlli_p, cur_mlli_dma_addr);
+
+		cur_mlli_p = next_mlli_p;
+		cur_mlli_dma_addr = next_mlli_dma_addr;
+	}
+
+	if ((is_data_dirty) && (mlli_tables_list_p->user_memref != NULL))
+		sync_client_dma_buf(llimgr_p->dev,
+				    mlli_tables_list_p->user_memref,
+				    false /*for CPU */ ,
+				    mlli_tables_list_p->data_direction);
+
+	/* Clear traces (pointers) of released resources */
+	MLLI_TABLES_LIST_INIT(mlli_tables_list_p);
+
+}
+
+/**
+ * process_as_dlli() - Consider given MLLI request as DLLI and update the MLLI
+ *			object if possible. Otherwise return error.
+ *
+ * @mlli_tables_p:	Assciated MLLI object with client memref set.
+ * @prepend_data:	Optional prepend data
+ * @prepend_data_size:	Optional prepend data size
+ */
+static int process_as_dlli(struct mlli_tables_list *mlli_tables_p,
+			   dma_addr_t prepend_data,
+			   unsigned long prepend_data_size)
+{
+	struct client_dma_buffer *memref = mlli_tables_p->user_memref;
+	u32 dma_size = memref->buf_size - memref->save4next_size;
+
+	/* Prepend data only (or 0 data) case */
+	if (memref->buf_size == 0) {
+		/* Handle 0-sized buffer or prepend_data only */
+		set_dlli(mlli_tables_p, prepend_data, prepend_data_size);
+		return 0;
+	}
+
+	/* Cannot concatenate prepend_data to client data with DLLI */
+	if (prepend_data_size > 0)
+		return -EINVAL;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* None of the DLLI cases is possible with cache line alignment buf. */
+	if (memref->sg_head != NULL)
+		return -EINVAL;
+#endif
+
+	/* Physically contiguous buffer case */
+	if (memref->sg_tail == NULL) {
+		/* If no sg_tail it is an indication that sg_main is phys.
+		 * contiguous - DLLI directly to client buffer */
+		set_dlli(mlli_tables_p, sg_dma_address(memref->sg_main),
+			 dma_size);
+		return 0;
+	}
+
+	/* Small buffer copied to aux. buffer */
+	if (memref->sg_main == NULL) {
+		/* If not sg_main (i.e., only sg_tail) we can
+		 * DLLI to the aux. buf. */
+		set_dlli(mlli_tables_p, memref->buf_end_aux_buf_dma, dma_size);
+		return 0;
+	}
+
+	return -EINVAL;		/* Not suitable for DLLI */
+}
+
+/**
+ * llimgr_create_mlli() - Create MLLI tables list for given user buffer
+ * @llimgr:
+ * @mlli_tables_p:	 A pointer to MLLI tables list object
+ * @dma_direction:	 The DMA direction of data flow
+ * @user_memref:	 User DMA memory reference (locked pages, etc.)
+ * @prepend_data:	 DMA address of data buffer to prepend before user data
+ * @prepend_data_size:	 Size of prepend_data (0 if none)
+ *
+ * Returns int 0 on success
+ */
+int llimgr_create_mlli(void *llimgr,
+		       struct mlli_tables_list *mlli_tables_p,
+		       enum dma_data_direction dma_direction,
+		       struct client_dma_buffer *client_memref,
+		       dma_addr_t prepend_data, unsigned long prepend_data_size)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	unsigned long remaining_main_data;
+	unsigned int remaining_main_sg_ents;
+	struct mlli_tables_list_iterator mlli_iter;
+	unsigned long client_dma_size;
+	unsigned long cur_sge_len = 0;
+	dma_addr_t cur_sge_addr;
+	/* cur_lli_index initialized to end of "virtual" link table */
+	struct scatterlist *cur_sg_entry;
+	int rc;
+
+	/* client_memref must exist even if no user data (buf_size == 0), i.e.,
+	 * just prepend_data. */
+	if (client_memref == NULL) {
+		pr_err("Client memref is NULL.\n");
+		return -EINVAL;
+	}
+
+	client_dma_size =
+		client_memref->buf_size - client_memref->save4next_size;
+
+	SEP_LOG_TRACE(
+		      "buf @ 0x%08lX, size=0x%08lX B, prepend_size=0x%08lX B, dma_dir=%d\n",
+		      (unsigned long)client_memref->user_buf_ptr,
+		      client_memref->buf_size, prepend_data_size,
+		      dma_direction);
+
+	rc = init_mlli_tables_list(llimgr_p,
+				   mlli_tables_p, client_memref, dma_direction);
+	if (unlikely(rc != 0))
+		return rc;	/* No resources to cleanup */
+
+	rc = process_as_dlli(mlli_tables_p, prepend_data, prepend_data_size);
+	if (rc == 0)		/* Mapped as DLLI */
+		return 0;
+	rc = 0;			/* In case checked below before updating */
+
+	/* Initialize local state to empty list */
+	mlli_iter.prev_mlli_table_p = NULL;
+	mlli_iter.cur_mlli_table_p = mlli_tables_p->link_to_first_table;
+	/* "First" table is the stub in struct mlli_tables_list, so we
+	 * mark it as "full" by setting next_lli_idx to maximum */
+	mlli_iter.next_lli_idx = llimgr_p->max_lli_num;
+	mlli_iter.cur_mlli_accum_data = 0;
+
+	if (prepend_data_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 prepend_data, prepend_data_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for prepend_data\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (client_memref->buf_start_aux_buf_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 client_memref->buf_start_aux_buf_dma,
+					 client_memref->buf_start_aux_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for start_aux_buf\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+#endif
+
+	/* Calculate amount of "main" data before last LLI */
+	/* (round to crypto block multiple to avoid having MLLI table
+	 * which is not last with non-crypto-block multiple */
+	remaining_main_data =
+	    (prepend_data_size + client_dma_size -
+	     client_memref->buf_end_aux_buf_size) & ~MAX_CRYPTO_BLOCK_MASK;
+	/* Now remove the data outside of the main buffer */
+	remaining_main_data -= prepend_data_size;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	remaining_main_data -= client_memref->buf_start_aux_buf_size;
+#endif
+	cur_sg_entry = client_memref->sg_main;
+	remaining_main_sg_ents = client_memref->sg_main_nents;
+
+	/* construct MLLI tables for sg_main list */
+	for (cur_sg_entry = client_memref->sg_main,
+	     remaining_main_sg_ents = client_memref->sg_main_nents;
+	     cur_sg_entry != NULL;
+	     cur_sg_entry = sg_next(cur_sg_entry), remaining_main_sg_ents--) {
+		/* Get current S/G entry length */
+		cur_sge_len = sg_dma_len(cur_sg_entry);
+		cur_sge_addr = sg_dma_address(cur_sg_entry);
+
+		/* Reached end of "main" data which is multiple of largest
+		 * crypto block? (consider split to next/last table).
+		 * (Check if needs to skip to next table for 2nd half) */
+		if ((remaining_main_data > 0) &&
+		    (cur_sge_len >=
+		     remaining_main_data) /*last "main" data */ &&
+		    /* NOT at end of table (i.e.,starting a new table anyway) */
+		    (llimgr_p->max_lli_num > mlli_iter.next_lli_idx) &&
+		    /* Checks if remainig entries don't fit into this table */
+		    (remaining_main_sg_ents -
+		     ((cur_sge_len == remaining_main_data) ? 1 : 0) +
+		     ((client_memref->buf_end_aux_buf_size > 0) ? 1 : 0)) >
+		    (llimgr_p->max_lli_num - mlli_iter.next_lli_idx)) {
+			/* "tail" would be in next/last table */
+			/* Add last LLI for "main" data */
+			rc = append_data_to_mlli(llimgr_p, mlli_tables_p,
+						 &mlli_iter, cur_sge_addr,
+						 remaining_main_data);
+			if (unlikely(rc != 0)) {
+				pr_err(
+					    "Failed adding LLI entry for sg_main (last).\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+			cur_sge_len -= remaining_main_data;
+			cur_sge_addr += remaining_main_data;
+			/* Skip to next MLLI for tail data */
+			rc = alloc_next_mlli(llimgr_p, mlli_tables_p,
+					     &mlli_iter);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail add MLLI table for tail.\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+		}
+
+		if (likely(cur_sge_len > 0)) {
+			/* When entry is split to next table, this would append
+			 * the second half of it. */
+			rc = append_data_to_mlli(llimgr_p, mlli_tables_p,
+						 &mlli_iter, cur_sge_addr,
+						 cur_sge_len);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail add LLI entry for sg_main\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+		}
+	}			/*for */
+
+	if (remaining_main_sg_ents > 0) {
+		pr_err("Remaining sg_ents>0 after end of S/G list!\n");
+		rc = -EINVAL;
+		goto mlli_create_exit;	/* do cleanup */
+	}
+
+	/* Append end aux. buffer */
+	if (client_memref->buf_end_aux_buf_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 client_memref->buf_end_aux_buf_dma,
+					 client_memref->buf_end_aux_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for end_aux_buf\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+
+	terminate_mlli_tables_list(&mlli_iter);
+	pr_debug("MLLI %u tables (rc=%d):\n",
+		      mlli_tables_p->table_count, rc);
+	llimgr_dump_mlli_tables_list(mlli_tables_p);
+
+ mlli_create_exit:
+	if (rc != 0) {
+		/* The MLLI tables list are always consistent at bail-out points
+		 * so we can use the simple cleanup function.                 */
+		cleanup_mlli_tables_list(llimgr_p, mlli_tables_p, 0);
+	}
+
+	return rc;
+
+}
+
+/**
+ * llimgr_destroy_mlli() - Cleanup resources of given MLLI tables list object
+ *				(if has any tables)
+ * @llimgr:
+ * @mlli_tables_p:
+ *
+ */
+void llimgr_destroy_mlli(void *llimgr,
+			 struct mlli_tables_list *mlli_tables_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	const bool is_dirty =
+	    (mlli_tables_p->data_direction == DMA_BIDIRECTIONAL) ||
+	    (mlli_tables_p->data_direction == DMA_FROM_DEVICE);
+
+	cleanup_mlli_tables_list(llimgr_p, mlli_tables_p, is_dirty);
+}
+
+/**
+ * llimgr_mlli_to_seprpc_memref() - Convert given MLLI tables list into a SeP
+ *					RPC memory reference format
+ * @mlli_tables_p:	 The source MLLI table
+ * @memref_p:	 The destination RPC memory reference
+ *
+ */
+void llimgr_mlli_to_seprpc_memref(struct mlli_tables_list *mlli_tables_p,
+				  struct seprpc_memref *memref_p)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	llimgr_get_mlli_desc_info(mlli_tables_p,
+				  &xlli_addr, &xlli_size, &table_count);
+
+	memref_p->ref_type = cpu_to_le32(table_count > 0 ?
+					 SEPRPC_MEMREF_MLLI :
+					 SEPRPC_MEMREF_DLLI);
+	memref_p->location = cpu_to_le32(xlli_addr);
+	memref_p->size = cpu_to_le32(xlli_size);
+	memref_p->count = cpu_to_le32(table_count);
+}
+
+/**
+ * llimgr_get_mlli_desc_info() - Get the MLLI info required for a descriptor.
+ *
+ * @mlli_tables_p:	The source MLLI table
+ * @first_table_addr_p:	First table DMA address or data DMA address for DLLI
+ * @first_table_size_p:	First table size in bytes or data size for DLLI
+ * @num_of_tables:	Number of MLLI tables in the list (0 for DLLI)
+ *
+ * In case of DLLI, first_table_* refers to the client DMA buffer (DLLI info.)
+ */
+void llimgr_get_mlli_desc_info(struct mlli_tables_list *mlli_tables_p,
+			       u32 *first_table_addr_p,
+			       u16 *first_table_size_p,
+			       u16 *num_of_tables_p)
+{
+	u32 link_lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	u32 *first_mlli_link_p;
+
+	first_mlli_link_p = mlli_tables_p->link_to_first_table +
+	    SEP_LLI_ENTRY_WORD_SIZE;
+	SEP_LLI_COPY_FROM_SEP(link_lli_spad, first_mlli_link_p);
+	/* Descriptor are read by direct-access which takes care of
+	 * swapping from host endianess to SeP endianess, so we need
+	 * to revert the endiness in the link LLI entry */
+	*first_table_addr_p = SEP_LLI_GET(link_lli_spad, ADDR);
+	*first_table_size_p = SEP_LLI_GET(link_lli_spad, SIZE);
+	*num_of_tables_p = mlli_tables_p->table_count;
+}
diff --git a/drivers/staging/sep54/lli_mgr.h b/drivers/staging/sep54/lli_mgr.h
new file mode 100644
index 0000000..ab4bec9
--- /dev/null
+++ b/drivers/staging/sep54/lli_mgr.h
@@ -0,0 +1,291 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!                                                                           *
+ * \file lli_mgr.h                                                            *
+ * \brief LLI logic: API definition                                           *
+ *                                                                            */
+
+#ifndef __LLI_MGR_H__
+#define __LLI_MGR_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include "sep_lli.h"
+
+#define LLIMGR_NULL_HANDLE NULL
+
+/* Using this macros assures correct initialization in case of future changes */
+#define MLLI_TABLES_LIST_INIT(mlli_tables_list_ptr) do {		\
+	memset((mlli_tables_list_ptr), 0,				\
+	       sizeof(struct mlli_tables_list));			\
+} while (0)		/* Executed once */
+
+/**
+ * llimgr_is_same_mlli_tables() - Tell if given tables list are pointing to the
+ *					same tables list
+ * @llimgr:
+ * @mlli_tables1:	 First MLLI tables list object
+ * @mlli_tables2:	 Second MLLI tables list object
+ *
+ * Tell if given tables list are pointing to the same tables list
+ * (used to identify in-place operation)
+ */
+#define llimgr_is_same_mlli_tables(llimgr, mlli_tables1, mlli_tables2)   \
+	((mlli_tables1)->user_memref == (mlli_tables2)->user_memref)
+
+/* Clean struct client_dma_buffer buffer info */
+#define CLEAN_DMA_BUFFER_INFO(_client_dma_buf_p) \
+	memset(_client_dma_buf_p, 0, sizeof(struct client_dma_buffer))
+
+/**
+ * struct client_dma_buffer - Client DMA buffer object
+ * @buf_size: buffer size in bytes
+ * @user_buf_ptr:	Pointer to start of buffer in user space. May be NULL
+ *			for buf_size==0 or if the user is the kernel (given
+ *			scatterlist)
+ * @num_of_pages:	Number of pages in user_pages array
+ * @user_pages:		Locked user pages (for user space buffer)
+ * @dma_direction:	DMA direction over given buffer mapping
+ * @sg_head:		S/G list of buffer header (memcopied)
+ * @sg_main:		S/G list of buffer body (for DMA)
+ * @sg_tail:		S/G list of buffer tail (memcopied)
+ * @sg_main_nents:	Num. of S/G entries for sg_main
+ * @sg_save4next:	S/G list of buffer chunk past "tail" for copying to
+ *			side buffer for next operation (for hash block remains)
+ * @sg_save4next_nents:	Num. of S/G entries for sg_save4next
+ * @save4next_size:	Size of data in sg_save4next
+ * @buf_end_aux_buf_va:	Tail aux. buffer virtual address
+ * @buf_end_aux_buf_dma:	DMA address of buf_end_aux_buf_va
+ * @buf_end_aux_buf_size: Number of bytes copied in tail aux. buffer
+ * @buf_start_aux_buf_va:	Header aux. buffer virtual address
+ * @buf_start_aux_buf_dma:	DMA address of buf_start_aux_buf_va
+ * @buf_start_aux_buf_size: Number of bytes copied in header aux. buffer
+ */
+struct client_dma_buffer {
+	unsigned long buf_size;
+	/* User buffer info. */
+	u8 __user *user_buf_ptr;
+	int num_of_pages;
+	struct page **user_pages;
+
+	/*
+	 * DMA mapping info (either created for user space pages
+	 * or retrieved from kernel client)
+	 */
+	enum dma_data_direction dma_direction;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	struct scatterlist *sg_head;
+#endif
+	struct scatterlist *sg_main;
+	struct scatterlist *sg_tail;
+	struct scatterlist *sg_save4next;
+
+	unsigned int sg_main_nents;
+	unsigned long save4next_size;
+	/* Auxilliary driver buffer for sg_head and sg_tail copies */
+	void *buf_end_aux_buf_va;
+	dma_addr_t buf_end_aux_buf_dma;
+	unsigned long buf_end_aux_buf_size;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* Currently only cache line at buffer start requires aux. buffers */
+	void *buf_start_aux_buf_va;
+	dma_addr_t buf_start_aux_buf_dma;
+	unsigned long buf_start_aux_buf_size;
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+};
+
+/**
+ * struct mlli_tables_list - MLLI tables list object
+ * @link_to_first_table:	"link" to first table (one extra entry for link
+ *				to first table we are using LLI entry in order
+ *				to implement the list head using the same data
+ *				structure used to link subsequent tables. This
+ *				helps avoiding special code for the first table
+ *				linking.
+ * @table_count:	The total number of fragmented tables. table_count is
+ *			only 16b following the size allocated for it in the
+ *			SW descriptor.
+ * @user_memref:	Referenced client DMA buffer
+ * @data_direction:	This operation DMA direction
+ *
+ */
+struct mlli_tables_list {
+	u32 link_to_first_table[2 * SEP_LLI_ENTRY_WORD_SIZE];
+	u16 table_count;
+	struct client_dma_buffer *user_memref;
+	enum dma_data_direction data_direction;
+};
+
+/**
+ * checks if the given MLLI tables list object points to a Direct LLI buffer.
+ * @mlli_tables_p:	a pointer to a table list
+ *
+ * Returns true if DLLI buffer, false otherwise. If mlli_tables_p points to
+ * an empty data buffer this function will return true
+ */
+static inline bool llimgr_mlli_is_dlli(struct mlli_tables_list *mlli_tables_p)
+{
+	return (mlli_tables_p->user_memref != NULL) &&
+	    (mlli_tables_p->table_count == 0);
+}
+
+/**
+ * llimgr_create() - Create LLI-manager object
+ * @dev:	 Device context
+ * @mlli_table_size:	 The maximum size of an MLLI table in bytes
+ *
+ * Returns llimgr_h Created object handle or LLIMGR_NULL_HANDLE if failed
+ */
+void *llimgr_create(struct device *dev, unsigned long mlli_table_size);
+
+/**
+ * llimgr_destroy() - Destroy (free resources of) given LLI-manager object
+ * @llimgr:	 LLI-manager object handle
+ *
+ */
+void llimgr_destroy(void *llimgr);
+
+/**
+ * llimgr_register_client_dma_buf() - Register given client buffer for DMA
+ *					operation.
+ * @llimgr:	 The LLI manager object handle
+ * @user_buf_ptr:	 Pointer in user space of the user buffer
+ * @sgl:	Client provided s/g list. user_buf_ptr is assumed NULL if this
+ *		list is given (!NULL).
+ * @buf_size:	 The user buffer size in bytes (incl. save4next). May be 0.
+ * @save4next_size:	Amount from buffer end to save for next op.
+ *			(split into seperate sgl). May be 0.
+ * @dma_direction:	The DMA direction this buffer would be used for
+ * @client_dma_buf_p:	Pointer to the user DMA buffer "object"
+ *
+ * Register given client buffer for DMA operation.
+ * If user_buf_ptr!=NULL and sgl==NULL it locks the user pages and creates
+ * head/main/tail s/g lists. If sgl!=NULL is splits it into head/main/tail
+ * s/g lists.
+ * Returns 0 for success
+ */
+int llimgr_register_client_dma_buf(void *llimgr,
+				   u8 __user *user_buf_ptr,
+				   struct scatterlist *sgl,
+				   const unsigned long buf_size,
+				   const unsigned long save4next_size,
+				   const enum dma_data_direction dma_direction,
+				   struct client_dma_buffer *client_dma_buf_p);
+
+/**
+ * llimgr_deregister_client_dma_buf() - Unmap given user DMA buffer
+ *					(flush and unlock pages)
+ * @llimgr:
+
+ * @client_dma_buf_p:	 User DMA buffer object
+ *
+ */
+void llimgr_deregister_client_dma_buf(void *llimgr,
+				      struct client_dma_buffer
+				      *client_dma_buf_p);
+
+/**
+ * llimgr_copy_from_client_buf_save4next() - Copy from the sg_save4next chunk
+ *	of the client DMA buffer to given buffer.
+ *	Used to save hash block remainder.
+ *
+ * @client_dma_buf_p:	The client DMA buffer with the save4next chunk
+ * @to_buf:		Target buffer to copy to.
+ * @buf_len:		Given buffer length (to avoid buffer overflow)
+ *
+ * Returns number of bytes copied or -ENOMEM if given buffer is too small
+ */
+int llimgr_copy_from_client_buf_save4next(struct client_dma_buffer
+					  *client_dma_buf_p, u8 *to_buf,
+					  unsigned long buf_len);
+
+/**
+ * llimgr_create_mlli() - Create MLLI tables list for given user buffer
+ * @llimgr:
+ * @mlli_tables_p:	 A pointer to MLLI tables list object
+ * @dma_direction:	 The DMA direction of data flow
+ * @user_memref:	 User DMA memory reference (locked pages, etc.)
+ * @prepend_data:	 DMA address of data buffer to prepend before user data
+ * @prepend_data_size:	 Size of prepend_data (0 if none)
+ *
+ * Returns int 0 on success
+ */
+int llimgr_create_mlli(void *llimgr,
+		       struct mlli_tables_list *mlli_tables_p,
+		       enum dma_data_direction dma_direction,
+		       struct client_dma_buffer *user_memref,
+		       dma_addr_t prepend_data,
+		       unsigned long prepend_data_size);
+
+/**
+ * llimgr_destroy_mlli() - Cleanup resources of given MLLI tables list object
+ *				(if has any tables)
+ * @llimgr:
+ * @mlli_tables_p:
+ *
+ */
+void llimgr_destroy_mlli(void *llimgr,
+			 struct mlli_tables_list *mlli_tables_p);
+
+/**
+ * llimgr_mlli_to_seprpc_memref() - Convert given MLLI tables list into a
+ *					SeP RPC memory reference format
+ * @mlli_tables_p:	 The source MLLI table
+ * @memref_p:	 The destination RPC memory reference
+ *
+ */
+void llimgr_mlli_to_seprpc_memref(struct mlli_tables_list *mlli_tables_p,
+				  struct seprpc_memref *memref_p);
+
+/**
+ * llimgr_get_mlli_desc_info() - Get the MLLI info required for a descriptor.
+ *
+ * @mlli_tables_p:	The source MLLI table
+ * @first_table_addr_p:	First table DMA address or data DMA address for DLLI
+ * @first_table_size_p:	First table size in bytes or data size for DLLI
+ * @num_of_tables:	Number of MLLI tables in the list (0 for DLLI)
+ *
+ * In case of DLLI, first_table_* refers to the client DMA buffer (DLLI info.)
+ */
+void llimgr_get_mlli_desc_info(struct mlli_tables_list *mlli_tables_p,
+			       u32 *first_table_addr_p,
+			       u16 *first_table_size_p,
+			       u16 *num_of_tables_p);
+
+#ifdef DEBUG
+/**
+ * llimgr_dump_mlli_tables_list() - Dump all the MLLI tables in a given tables
+ *					list
+ * @mlli_tables_list_p:	 Pointer to tables list structure
+ *
+ */
+void llimgr_dump_mlli_tables_list(struct mlli_tables_list *mlli_tables_list_p);
+#else
+#define llimgr_dump_mlli_tables_list(mlli_tables_list_p) do {} while (0)
+#endif /*DEBUG*/
+#endif /*__LLI_MGR_H__*/
diff --git a/drivers/staging/sep54/sep_applets.h b/drivers/staging/sep54/sep_applets.h
new file mode 100644
index 0000000..e2e7d14
--- /dev/null
+++ b/drivers/staging/sep54/sep_applets.h
@@ -0,0 +1,38 @@
+/*
+ *  sep_applets.h - Security Processor applet definitions
+ *
+ *  Copyright(c) 2012-2013 Intel Corporation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#ifndef _SEP_APPLETS_H
+#define _SEP_APPLETS_H
+
+/* Kernel side threads (Agents as DX calls them) */
+#define RPMB_AGENT_ID            0
+
+/* Applet UUIDs */
+#define DEFAULT_APP_UUID { 0x00, 0xDE, 0xFA, 0x01, 0xDE, 0xFA, 0x02, 0xDE, \
+	0xFA, 0x03, 0xDE, 0xFA, 0x04, 0xDE, 0xFA, 0xFF }
+
+#define HDCP_APP_UUID { 0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,     \
+	0x98, 0xA9, 0xBA, 0xCB, 0xDC, 0xED, 0xFE, 0x0F }
+
+#define CMD_RPMB_ENABLE          1
+#define CMD_IMAGE_VERIFY         3
+#define CMD_DRM_ENABLE_IED       0x9000
+#define CMD_DRM_DISABLE_IED      0x9001
+#define HDCP_RX_HDMI_STATUS 0x80000080
+
+#endif /* _SEP_APPLETS_H_ */
diff --git a/drivers/staging/sep54/sep_compat_ioctl.c b/drivers/staging/sep54/sep_compat_ioctl.c
new file mode 100644
index 0000000..d5ab36f
--- /dev/null
+++ b/drivers/staging/sep54/sep_compat_ioctl.c
@@ -0,0 +1,1066 @@
+/*
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/uaccess.h>
+#include <linux/printk.h>
+#include "sep_compat_ioctl.h"
+#include "dx_driver_abi.h"
+#include "dx_dev_defs.h"
+#include "sepapp.h"
+
+typedef int sep_ioctl_compat_t(struct file *filp, unsigned int cmd,
+			       unsigned long arg);
+
+static int compat_sep_ioctl_get_ver_major(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_ver_minor(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_sym_cipher_ctx_size(struct file *filp,
+						    unsigned int cmd,
+						    unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_auth_enc_ctx_size(struct file *filp,
+						  unsigned int cmd,
+						  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_mac_ctx_size(struct file *filp,
+					     unsigned int cmd,
+					     unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+static int compat_sep_ioctl_get_hash_ctx_size(struct file *filp,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sym_cipher_init_params_32 {
+	u32 context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sym_cipher_init(struct file *filp, unsigned int cmd,
+					    unsigned long arg)
+{
+	struct sym_cipher_init_params_32 init_32;
+	struct dxdi_sym_cipher_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&init_32, (void __user *)arg, sizeof(init_32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)init_32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &init_32.props,
+			    sizeof(init_32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(init_32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(init_32.error_info,
+		       &((struct sym_cipher_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct auth_enc_init_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_auth_enc_props props; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_auth_enc_init(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct auth_enc_init_params_32 up32;
+	struct dxdi_auth_enc_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &up32.props,
+			    sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct auth_enc_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct mac_init_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_mac_props props;  /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_mac_init(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct mac_init_params_32 up32;
+	struct dxdi_mac_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &up32.props,
+			    sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct mac_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct hash_init_params_32 {
+	u32 context_buf; /*[in] */
+	enum dxdi_hash_type hash_type;  /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_hash_init(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct hash_init_params_32 up32;
+	struct dxdi_hash_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || __put_user(up32.hash_type, &init_params->hash_type))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct hash_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct process_dblk_params_32 {
+	u32 context_buf; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	enum dxdi_data_block_type data_block_type;  /*[in] */
+	u32 data_in_size; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_proc_dblk(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct process_dblk_params_32 up32;
+	struct dxdi_process_dblk_params __user *dblk_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	dblk_params = compat_alloc_user_space(sizeof(*dblk_params));
+	if (!access_ok(VERIFY_WRITE, dblk_params, sizeof(*dblk_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &dblk_params->context_buf)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &dblk_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &dblk_params->data_out)
+	    || __put_user(up32.data_block_type, &dblk_params->data_block_type)
+	    || __put_user(up32.data_in_size, &dblk_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)dblk_params);
+
+	if (__get_user(up32.error_info, &dblk_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct process_dblk_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct fin_process_params_32 {
+	u32 context_buf; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 digest_or_mac[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 digest_or_mac_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_fin_proc(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct fin_process_params_32 up32;
+	struct dxdi_fin_process_params __user *fin_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	fin_params = compat_alloc_user_space(sizeof(*fin_params));
+	if (!access_ok(VERIFY_WRITE, fin_params, sizeof(*fin_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &fin_params->context_buf)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &fin_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &fin_params->data_out)
+	    || __put_user(up32.data_in_size, &fin_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)fin_params);
+
+	if (copy_from_user(up32.digest_or_mac, fin_params->digest_or_mac,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.digest_or_mac_size,
+			  &fin_params->digest_or_mac_size)
+	    || __get_user(up32.error_info, &fin_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_init_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_init(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct combined_init_params_32 up32;
+	struct dxdi_combined_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&init_params->props, &up32.props, sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct combined_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_proc_dblk_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[out] */
+	u32 data_in_size; /*[in] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_proc_dblk(struct file *filp,
+					       unsigned int cmd,
+					       unsigned long arg)
+{
+	struct combined_proc_dblk_params_32 up32;
+	struct dxdi_combined_proc_dblk_params __user *blk_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	blk_params = compat_alloc_user_space(sizeof(*blk_params));
+	if (!access_ok(VERIFY_WRITE, blk_params, sizeof(*blk_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&blk_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &blk_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &blk_params->data_out)
+	    || __put_user(up32.data_in_size, &blk_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)blk_params);
+
+	if (__get_user(up32.error_info, &blk_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct combined_proc_dblk_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_proc_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[out] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 auth_data[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 auth_data_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_fin_proc(struct file *filp,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	struct combined_proc_params_32 up32;
+	struct dxdi_combined_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.auth_data, user_params->auth_data,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.auth_data_size,
+			  &user_params->auth_data_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+static int compat_sep_ioctl_combined_proc(struct file *filp,
+					  unsigned int cmd, unsigned long arg)
+{
+	return compat_sep_ioctl_combined_fin_proc(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sym_cipher_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_sym_cipher_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sym_cipher_proc(struct file *filp, unsigned int cmd,
+					    unsigned long arg)
+{
+	struct sym_cipher_proc_params_32 up32;
+	struct dxdi_sym_cipher_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			    sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (__get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct sym_cipher_proc_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct auth_enc_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_auth_enc_props props; /*[in] */
+	u32 adata;    /*[in] */
+	u32 text_data;  /*[in] */
+	u32 data_out; /*[in] */
+	u8 tag[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_auth_enc_proc(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct auth_enc_proc_params_32 up32;
+	struct dxdi_auth_enc_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			    sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.adata,
+			  &user_params->adata)
+	    || __put_user((void __user *)(unsigned long)up32.text_data,
+			  &user_params->text_data)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.tag, user_params->tag,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct mac_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_mac_props props;  /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 mac[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 mac_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_mac_proc(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct mac_proc_params_32 up32;
+	struct dxdi_mac_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.mac, user_params->mac, DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.mac_size, &user_params->mac_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct hash_proc_params_32 {
+	u32 context_buf; /*[in] */
+	enum dxdi_hash_type hash_type;  /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 digest[DXDI_DIGEST_SIZE_MAX];  /*[out] */
+	u8 digest_size; /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_hash_proc(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct hash_proc_params_32 up32;
+	struct dxdi_hash_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || __put_user(up32.hash_type, &user_params->hash_type)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.digest, user_params->digest,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.digest_size, &user_params->digest_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sep_rpc_params_32 {
+	u16 agent_id; /*[in] */
+	u16 func_id;  /*[in] */
+	struct dxdi_memref mem_refs[SEP_RPC_MAX_MEMREF_PER_FUNC]; /*[in] */
+	u32 rpc_params_size;  /*[in] */
+	u32 rpc_params; /*[in] */
+	/* rpc_params to be copied into kernel DMA buffer */
+	enum seprpc_retcode error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sep_rpc(struct file *filp, unsigned int cmd,
+				    unsigned long arg)
+{
+	struct sep_rpc_params_32 up32;
+	struct dxdi_sep_rpc_params __user *user_params;
+
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.agent_id, &user_params->agent_id)
+	    || __put_user(up32.func_id, &user_params->func_id)
+	    || copy_to_user(user_params->mem_refs, up32.mem_refs,
+			    sizeof(struct dxdi_memref)
+			    * SEP_RPC_MAX_MEMREF_PER_FUNC)
+	    || __put_user(up32.rpc_params_size, &user_params->rpc_params_size)
+	    || __put_user((void __user *)(unsigned long)up32.rpc_params,
+			 &user_params->rpc_params))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (__get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct sep_rpc_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct register_mem4dma_params_32 {
+	struct dxdi_memref memref;  /*[in] */
+	int memref_id;  /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_register_mem4dma(struct file *filp,
+					     unsigned int cmd,
+					     unsigned long arg)
+{
+	struct register_mem4dma_params_32 up32;
+	struct dxdi_register_mem4dma_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->memref, &up32.memref,
+		sizeof(struct dxdi_memref)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__get_user(up32.memref_id, &user_params->memref_id))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct alloc_mem4dma_params_32 {
+	u32 size; /*[in] */
+	int memref_id;  /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_alloc_mem4dma(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct alloc_mem4dma_params_32 up32;
+	struct dxdi_alloc_mem4dma_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.size, &user_params->size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__get_user(up32.memref_id, &user_params->memref_id))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct free_mem4dma_params_32 {
+	int memref_id;  /*[in] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_free_mem4dma(struct file *filp, unsigned int cmd,
+					 unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct aes_iv_params_32 {
+	u32 context_buf; /*[in] */
+	u8 iv_ptr[DXDI_AES_IV_SIZE];  /*[in]/[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_set_iv(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
+{
+	struct aes_iv_params_32 up32;
+	struct dxdi_aes_iv_params *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		      &user_params->context_buf)
+	    || __copy_to_user(user_params->iv_ptr, up32.iv_ptr,
+			      DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	return ret;
+}
+
+static int compat_sep_ioctl_get_iv(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
+{
+	struct aes_iv_params_32 up32;
+	struct dxdi_aes_iv_params *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || __copy_to_user(&user_params->iv_ptr, &up32.iv_ptr,
+			      DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (ret)
+		return ret;
+
+	if (__copy_from_user(up32.iv_ptr, user_params->iv_ptr,
+			     DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_session_open_params_32 {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE]; /*[in] */
+	u32 auth_method;  /*[in] */
+	u32 auth_data[3]; /*[in] */
+	struct dxdi_sepapp_params app_auth_data;  /*[in/out] */
+	int session_id; /*[out] */
+	enum dxdi_sep_module sep_ret_origin;  /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_session_open(struct file *filp,
+						unsigned int cmd,
+						unsigned long arg)
+{
+	struct sepapp_session_open_params_32 up32;
+	struct dxdi_sepapp_session_open_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->app_uuid[0], &up32.app_uuid[0],
+			 DXDI_SEPAPP_UUID_SIZE)
+	    || __put_user(up32.auth_method, &user_params->auth_method)
+	    || copy_to_user(user_params->auth_data, &up32.auth_data,
+			    3 * sizeof(u32))
+	    || copy_to_user(&user_params->app_auth_data, &up32.app_auth_data,
+			    sizeof(struct dxdi_sepapp_params)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__copy_from_user(&up32.app_auth_data, &user_params->app_auth_data,
+			     sizeof(struct dxdi_sepapp_params))
+	    || __get_user(up32.session_id, &user_params->session_id)
+	    || __get_user(up32.sep_ret_origin, &user_params->sep_ret_origin)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_session_close_params_32 {
+	int session_id; /*[in] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_session_close(struct file *filp,
+						 unsigned int cmd,
+						 unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_command_invoke_params_32 {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE]; /*[in] */
+	int session_id; /*[in] */
+	u32 command_id; /*[in] */
+	struct dxdi_sepapp_params command_params; /*[in/out] */
+	enum dxdi_sep_module sep_ret_origin;  /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_command_invoke(struct file *filp,
+						  unsigned int cmd,
+						  unsigned long arg)
+{
+	struct sepapp_command_invoke_params_32 up32;
+	struct dxdi_sepapp_command_invoke_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->app_uuid,
+			 &up32.app_uuid,
+			 DXDI_SEPAPP_UUID_SIZE)
+	    || __put_user(up32.session_id, &user_params->session_id)
+	    || __put_user(up32.command_id, &user_params->command_id)
+	    || copy_to_user(&user_params->command_params,
+			    &up32.command_params,
+			    sizeof(struct dxdi_sepapp_params)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+
+	if (__get_user(up32.sep_ret_origin, &user_params->sep_ret_origin)
+	    || __copy_from_user(&up32.command_params,
+				&user_params->command_params,
+				sizeof(struct dxdi_sepapp_params))
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+static sep_ioctl_compat_t *sep_compat_ioctls[] = {
+	/* Version info. commands */
+	[DXDI_IOC_NR_GET_VER_MAJOR] = compat_sep_ioctl_get_ver_major,
+	[DXDI_IOC_NR_GET_VER_MINOR] = compat_sep_ioctl_get_ver_minor,
+	/* Context size queries */
+	[DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE] = compat_sep_ioctl_get_sym_cipher_ctx_size,
+	[DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE] = compat_sep_ioctl_get_auth_enc_ctx_size,
+	[DXDI_IOC_NR_GET_MAC_CTX_SIZE] = compat_sep_ioctl_get_mac_ctx_size,
+	[DXDI_IOC_NR_GET_HASH_CTX_SIZE] = compat_sep_ioctl_get_hash_ctx_size,
+	/* Init context commands */
+	[DXDI_IOC_NR_SYMCIPHER_INIT] = compat_sep_ioctl_sym_cipher_init,
+	[DXDI_IOC_NR_AUTH_ENC_INIT] = compat_sep_ioctl_auth_enc_init,
+	[DXDI_IOC_NR_MAC_INIT] = compat_sep_ioctl_mac_init,
+	[DXDI_IOC_NR_HASH_INIT] = compat_sep_ioctl_hash_init,
+	/* Processing commands */
+	[DXDI_IOC_NR_PROC_DBLK] = compat_sep_ioctl_proc_dblk,
+	[DXDI_IOC_NR_FIN_PROC] = compat_sep_ioctl_fin_proc,
+	/* "Integrated" processing operations */
+	[DXDI_IOC_NR_SYMCIPHER_PROC] = compat_sep_ioctl_sym_cipher_proc,
+	[DXDI_IOC_NR_AUTH_ENC_PROC] = compat_sep_ioctl_auth_enc_proc,
+	[DXDI_IOC_NR_MAC_PROC] = compat_sep_ioctl_mac_proc,
+	[DXDI_IOC_NR_HASH_PROC] = compat_sep_ioctl_hash_proc,
+	/* SeP RPC */
+	[DXDI_IOC_NR_SEP_RPC] = compat_sep_ioctl_sep_rpc,
+	/* Memory registration */
+	[DXDI_IOC_NR_REGISTER_MEM4DMA] = compat_sep_ioctl_register_mem4dma,
+	[DXDI_IOC_NR_ALLOC_MEM4DMA] = compat_sep_ioctl_alloc_mem4dma,
+	[DXDI_IOC_NR_FREE_MEM4DMA] = compat_sep_ioctl_free_mem4dma,
+	/* SeP Applets API */
+	[DXDI_IOC_NR_SEPAPP_SESSION_OPEN] = compat_sep_ioctl_sepapp_session_open,
+	[DXDI_IOC_NR_SEPAPP_SESSION_CLOSE] = compat_sep_ioctl_sepapp_session_close,
+	[DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE] = compat_sep_ioctl_sepapp_command_invoke,
+	/* Combined mode */
+	[DXDI_IOC_NR_COMBINED_INIT] = compat_sep_ioctl_combined_init,
+	[DXDI_IOC_NR_COMBINED_PROC_DBLK] = compat_sep_ioctl_combined_proc_dblk,
+	[DXDI_IOC_NR_COMBINED_PROC_FIN] = compat_sep_ioctl_combined_fin_proc,
+	[DXDI_IOC_NR_COMBINED_PROC] = compat_sep_ioctl_combined_proc,
+
+	/* AES IV set/get API */
+	[DXDI_IOC_NR_SET_IV] = compat_sep_ioctl_set_iv,
+	[DXDI_IOC_NR_GET_IV] = compat_sep_ioctl_get_iv,
+};
+
+long sep_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = _IOC_NR(cmd);
+	sep_ioctl_compat_t *callback = NULL;
+	int ret;
+
+	pr_debug("Calling the IOCTL compat %d\n", nr);
+
+	if (nr < ARRAY_SIZE(sep_compat_ioctls))
+		callback = sep_compat_ioctls[nr];
+
+	if (callback != NULL)
+		ret = (*callback) (filp, cmd, arg);
+	else
+		ret = sep_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/drivers/staging/sep54/sep_compat_ioctl.h b/drivers/staging/sep54/sep_compat_ioctl.h
new file mode 100644
index 0000000..ba66665
--- /dev/null
+++ b/drivers/staging/sep54/sep_compat_ioctl.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#ifndef __SEP_COMPAT_IOCTL_H__
+#define __SEP_COMPAT_IOCTL_H__
+
+#include <linux/fs.h>
+
+/**
+ * \brief drm_compat_ioctl
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ * \return 0 on success or a negtive number on failure
+ */
+long sep_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif
diff --git a/drivers/staging/sep54/sep_ctx.h b/drivers/staging/sep54/sep_ctx.h
new file mode 100644
index 0000000..3282b54
--- /dev/null
+++ b/drivers/staging/sep54/sep_ctx.h
@@ -0,0 +1,296 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_CTX_H_
+#define _SEP_CTX_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#define INT32_MAX 0x7FFFFFFFL
+#else
+#include <stdint.h>
+#endif
+
+#include "dx_cc_defs.h"
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* SeP context size */
+#define SEP_CTX_SIZE_LOG2 7
+#define SEP_CTX_SIZE (1<<SEP_CTX_SIZE_LOG2)
+#define SEP_CTX_SIZE_WORDS (SEP_CTX_SIZE >> 2)
+
+#define SEP_DES_IV_SIZE 8
+#define SEP_DES_BLOCK_SIZE 8
+
+#define SEP_DES_ONE_KEY_SIZE 8
+#define SEP_DES_DOUBLE_KEY_SIZE 16
+#define SEP_DES_TRIPLE_KEY_SIZE 24
+#define SEP_DES_KEY_SIZE_MAX SEP_DES_TRIPLE_KEY_SIZE
+
+#define SEP_AES_IV_SIZE 16
+#define SEP_AES_IV_SIZE_WORDS (SEP_AES_IV_SIZE >> 2)
+
+#define SEP_AES_BLOCK_SIZE 16
+#define SEP_AES_BLOCK_SIZE_WORDS 4
+
+#define SEP_AES_128_BIT_KEY_SIZE 16
+#define SEP_AES_128_BIT_KEY_SIZE_WORDS	(SEP_AES_128_BIT_KEY_SIZE >> 2)
+#define SEP_AES_192_BIT_KEY_SIZE 24
+#define SEP_AES_192_BIT_KEY_SIZE_WORDS	(SEP_AES_192_BIT_KEY_SIZE >> 2)
+#define SEP_AES_256_BIT_KEY_SIZE 32
+#define SEP_AES_256_BIT_KEY_SIZE_WORDS	(SEP_AES_256_BIT_KEY_SIZE >> 2)
+#define SEP_AES_KEY_SIZE_MAX			SEP_AES_256_BIT_KEY_SIZE
+#define SEP_AES_KEY_SIZE_WORDS_MAX		(SEP_AES_KEY_SIZE_MAX >> 2)
+
+#define SEP_SHA1_DIGEST_SIZE 20
+#define SEP_SHA224_DIGEST_SIZE 28
+#define SEP_SHA256_DIGEST_SIZE 32
+#define SEP_SHA384_DIGEST_SIZE 48
+#define SEP_SHA512_DIGEST_SIZE 64
+#define SEP_SHA1024_DIGEST_SIZE 128
+
+#define SEP_SHA1_BLOCK_SIZE 64
+#define SEP_SHA224_BLOCK_SIZE 64
+#define SEP_SHA256_BLOCK_SIZE 64
+#define SEP_SHA1_224_256_BLOCK_SIZE 64
+#define SEP_SHA384_BLOCK_SIZE 128
+#define SEP_SHA512_BLOCK_SIZE 128
+#define SEP_SHA1024_BLOCK_SIZE 128
+
+#if (SEP_SUPPORT_SHA > 256)
+#define SEP_DIGEST_SIZE_MAX SEP_SHA512_DIGEST_SIZE
+#define SEP_HASH_BLOCK_SIZE_MAX SEP_SHA512_BLOCK_SIZE	/*1024b */
+#else				/* Only up to SHA256 */
+#define SEP_DIGEST_SIZE_MAX SEP_SHA256_DIGEST_SIZE
+#define SEP_HASH_BLOCK_SIZE_MAX SEP_SHA256_BLOCK_SIZE	/*256 */
+#endif
+
+#define SEP_HMAC_BLOCK_SIZE_MAX SEP_HASH_BLOCK_SIZE_MAX
+
+#define SEP_RC4_KEY_SIZE_MIN 1
+#define SEP_RC4_KEY_SIZE_MAX 20
+#define SEP_RC4_STATE_SIZE 264
+
+#define SEP_C2_KEY_SIZE_MAX 16
+#define SEP_C2_BLOCK_SIZE 8
+
+#define SEP_ALG_MAX_BLOCK_SIZE SEP_HASH_BLOCK_SIZE_MAX
+
+#define SEP_MAX_COMBINED_ENGINES 4
+
+#define SEP_MAX_CTX_SIZE (max(sizeof(struct sep_ctx_rc4), \
+				sizeof(struct sep_ctx_cache_entry)))
+enum sep_engine_type {
+	SEP_ENGINE_NULL = 0,
+	SEP_ENGINE_AES = 1,
+	SEP_ENGINE_DES = 2,
+	SEP_ENGINE_HASH = 3,
+	SEP_ENGINE_RC4 = 4,
+	SEP_ENGINE_DOUT = 5,
+	SEP_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+enum sep_crypto_alg {
+	SEP_CRYPTO_ALG_NULL = -1,
+	SEP_CRYPTO_ALG_AES = 0,
+	SEP_CRYPTO_ALG_DES = 1,
+	SEP_CRYPTO_ALG_HASH = 2,
+	SEP_CRYPTO_ALG_RC4 = 3,
+	SEP_CRYPTO_ALG_C2 = 4,
+	SEP_CRYPTO_ALG_HMAC = 5,
+	SEP_CRYPTO_ALG_AEAD = 6,
+	SEP_CRYPTO_ALG_BYPASS = 7,
+	SEP_CRYPTO_ALG_COMBINED = 8,
+	SEP_CRYPTO_ALG_NUM = 9,
+	SEP_CRYPTO_ALG_RESERVE32B = INT32_MAX
+};
+
+enum sep_crypto_direction {
+	SEP_CRYPTO_DIRECTION_NULL = -1,
+	SEP_CRYPTO_DIRECTION_ENCRYPT = 0,
+	SEP_CRYPTO_DIRECTION_DECRYPT = 1,
+	SEP_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+	SEP_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX
+};
+
+enum sep_cipher_mode {
+	SEP_CIPHER_NULL_MODE = -1,
+	SEP_CIPHER_ECB = 0,
+	SEP_CIPHER_CBC = 1,
+	SEP_CIPHER_CTR = 2,
+	SEP_CIPHER_CBC_MAC = 3,
+	SEP_CIPHER_XTS = 4,
+	SEP_CIPHER_XCBC_MAC = 5,
+	SEP_CIPHER_CMAC = 7,
+	SEP_CIPHER_CCM = 8,
+	SEP_CIPHER_RESERVE32B = INT32_MAX
+};
+
+enum sep_hash_mode {
+	SEP_HASH_NULL = -1,
+	SEP_HASH_SHA1 = 0,
+	SEP_HASH_SHA256 = 1,
+	SEP_HASH_SHA224 = 2,
+	SEP_HASH_MODE_NUM = 3,
+
+	/* Unsupported */
+	SEP_HASH_SHA512 = 3,
+	SEP_HASH_SHA384 = 4,
+	SEP_HASH_RESERVE32B = INT32_MAX
+};
+
+enum sep_hash_hw_mode {
+	SEP_HASH_HW_SHA1 = 1,
+	SEP_HASH_HW_SHA256 = 2,
+	SEP_HASH_HW_SHA224 = 10,
+	SEP_HASH_HW_SHA512 = 4,
+	SEP_HASH_HW_SHA384 = 12,
+	SEP_HASH_HW_RESERVE32B = INT32_MAX
+};
+
+enum sep_c2_mode {
+	SEP_C2_NULL = -1,
+	SEP_C2_ECB = 0,
+	SEP_C2_CBC = 1,
+	SEP_C2_RESERVE32B = INT32_MAX
+};
+
+/*******************************************************************/
+/***************** DESCRIPTOR BASED CONTEXTS ***********************/
+/*******************************************************************/
+
+ /* Generic context ("super-class") */
+struct sep_ctx_generic {
+	enum sep_crypto_alg alg;
+} __attribute__ ((__may_alias__));
+
+/* Cache context entry ("sub-class") */
+struct sep_ctx_cache_entry {
+	enum sep_crypto_alg alg;
+	u32 reserved[SEP_CTX_SIZE_WORDS - 1];
+};
+
+struct sep_ctx_c2 {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_C2 */
+	enum sep_c2_mode mode;
+	enum sep_crypto_direction direction;
+	/* reserve to end of allocated context size */
+	u32 key_size;	/* numeric value in bytes */
+	u8 key[SEP_C2_KEY_SIZE_MAX];
+	u8 reserved[SEP_CTX_SIZE - 4 * sizeof(u32) -
+			 SEP_C2_KEY_SIZE_MAX];
+};
+
+struct sep_ctx_hash {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_HASH */
+	enum sep_hash_mode mode;
+	u8 digest[SEP_DIGEST_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u8 reserved[SEP_CTX_SIZE - 2 * sizeof(u32) -
+			 SEP_DIGEST_SIZE_MAX];
+};
+
+/* !!!! sep_ctx_hmac should have the same structure as sep_ctx_hash except
+   k0, k0_size fields */
+struct sep_ctx_hmac {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_HMAC */
+	enum sep_hash_mode mode;
+	u8 digest[SEP_DIGEST_SIZE_MAX];
+	u8 k0[SEP_HMAC_BLOCK_SIZE_MAX];
+	u32 k0_size;
+	/* reserve to end of allocated context size */
+	u8 reserved[SEP_CTX_SIZE - 3 * sizeof(u32) -
+			 SEP_DIGEST_SIZE_MAX - SEP_HMAC_BLOCK_SIZE_MAX];
+};
+
+struct sep_ctx_cipher {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_AES */
+	enum sep_cipher_mode mode;
+	enum sep_crypto_direction direction;
+	enum dx_crypto_key_type crypto_key_type;
+	u32 key_size;	/* numeric value in bytes   */
+	u32 data_unit_size;	/* required for XTS */
+	/* block_state is the AES engine block state.
+	 *  It is used by the host to pass IV or counter at initialization.
+	 *  It is used by SeP for intermediate block chaining state and for
+	 *  returning MAC algorithms results.           */
+	u8 block_state[SEP_AES_BLOCK_SIZE];
+	u8 key[SEP_AES_KEY_SIZE_MAX];
+	u8 xex_key[SEP_AES_KEY_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u32 reserved[SEP_CTX_SIZE_WORDS - 6 -
+			  SEP_AES_BLOCK_SIZE / sizeof(u32) - 2 *
+			  (SEP_AES_KEY_SIZE_MAX / sizeof(u32))];
+};
+
+/* authentication and encryption with associated data class */
+struct sep_ctx_aead {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_AES */
+	enum sep_cipher_mode mode;
+	enum sep_crypto_direction direction;
+	u32 key_size;	/* numeric value in bytes   */
+	u32 nonce_size;	/* nonce size (octets) */
+	u32 header_size;	/* finit additional data size (octets) */
+	u32 text_size;	/* finit text data size (octets) */
+	/* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
+	u32 tag_size;
+	/* block_state1/2 is the AES engine block state */
+	u8 block_state[SEP_AES_BLOCK_SIZE];
+	u8 mac_state[SEP_AES_BLOCK_SIZE];	/* MAC result */
+	u8 nonce[SEP_AES_BLOCK_SIZE];	/* nonce buffer */
+	u8 key[SEP_AES_KEY_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u32 reserved[SEP_CTX_SIZE_WORDS - 8 -
+			  3 * (SEP_AES_BLOCK_SIZE / sizeof(u32)) -
+			  SEP_AES_KEY_SIZE_MAX / sizeof(u32)];
+};
+
+/* crys combined context */
+struct sep_ctx_combined {
+	enum sep_crypto_alg alg;
+	u32 mode;
+	/* array of sub contexts used for the combined operation      *
+	 *  according to the given mode                               */
+	struct sep_ctx_cache_entry *sub_ctx[SEP_MAX_COMBINED_ENGINES];
+	/* store the host contexts addresses (optimization) */
+	u32 host_addr[SEP_MAX_COMBINED_ENGINES];
+};
+
+/*******************************************************************/
+/***************** MESSAGE BASED CONTEXTS **************************/
+/*******************************************************************/
+
+struct sep_ctx_rc4 {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_RC4 */
+	u32 key_size;	/* numeric value in bytes */
+	u8 key[SEP_RC4_KEY_SIZE_MAX];
+	u8 state[SEP_RC4_STATE_SIZE];
+};
+
+#endif				/* _SEP_CTX_H_ */
diff --git a/drivers/staging/sep54/sep_init.c b/drivers/staging/sep54/sep_init.c
new file mode 100644
index 0000000..40d4bce
--- /dev/null
+++ b/drivers/staging/sep54/sep_init.c
@@ -0,0 +1,801 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/* \file
+   This file implements the SeP FW initialization sequence.
+   This is part of the Discretix CC initalization specifications              */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_INIT
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "dx_driver.h"
+#include "dx_init_cc_abi.h"
+#include "dx_init_cc_defs.h"
+#include "sep_sram_map.h"
+#include "sep_init.h"
+#include "sep_request_mgr.h"
+#include "sep_power.h"
+
+#ifdef DEBUG
+#define FW_INIT_TIMEOUT_SEC     10
+#define COLD_BOOT_TIMEOUT_SEC	10
+#else
+#define FW_INIT_TIMEOUT_SEC     3
+#define COLD_BOOT_TIMEOUT_SEC	3
+#endif
+#define FW_INIT_TIMEOUT_MSEC	(FW_INIT_TIMEOUT_SEC * 1000)
+#define COLD_BOOT_TIMEOUT_MSEC  (COLD_BOOT_TIMEOUT_SEC * 1000)
+
+#define FW_INIT_PARAMS_BUF_LEN		1024
+
+/*** CC_INIT handlers ***/
+
+/**
+ * struct cc_init_ctx - CC init. context
+ * @drvdata:		Associated device driver
+ * @resident_p:		Pointer to resident image buffer
+ * @resident_dma_addr:	DMA address of resident image buffer
+ * @resident_size:	Size in bytes of the resident image
+ * @cache_p:	Pointer to (i)cache image buffer
+ * @cache_dma_addr:	DMA address of the (i)cache image
+ * @cache_size:		Size in bytes if the (i)cache image
+ * @vrl_p:		Pointer to VRL image buffer
+ * @vrl_dma_addr:	DMA address of the VRL
+ * @vrl_size:		Size in bytes of the VRL
+ * @msg_buf:		A buffer for building the CC-Init message
+ */
+struct cc_init_ctx {
+	struct sep_drvdata *drvdata;
+	void *resident_p;
+	dma_addr_t resident_dma_addr;
+	size_t resident_size;
+	void *cache_p;
+	dma_addr_t cache_dma_addr;
+	size_t cache_size;
+	void *vrl_p;
+	dma_addr_t vrl_dma_addr;
+	size_t vrl_size;
+	u32 msg_buf[DX_CC_INIT_MSG_LENGTH];
+};
+
+static void destroy_cc_init_ctx(struct cc_init_ctx *ctx)
+{
+	struct device *mydev = ctx->drvdata->dev;
+
+	if (ctx->vrl_p != NULL)
+		dma_free_coherent(mydev, ctx->vrl_size,
+				  ctx->vrl_p, ctx->vrl_dma_addr);
+	if (ctx->cache_p != NULL)
+		dma_free_coherent(mydev, ctx->cache_size,
+				  ctx->cache_p, ctx->cache_dma_addr);
+	if (ctx->resident_p != NULL)
+		dma_free_coherent(mydev, ctx->resident_size,
+				  ctx->resident_p, ctx->resident_dma_addr);
+	kfree(ctx);
+}
+
+/**
+ * fetch_image() - Fetch CC image using request_firmware mechanism and
+ *	locate it in a DMA coherent buffer.
+ *
+ * @mydev:
+ * @image_name:		Image file name (from /lib/firmware/)
+ * @image_pp:		Allocated image buffer
+ * @image_dma_addr_p:	Allocated image DMA addr
+ * @image_size_p:	Loaded image size
+ */
+static int fetch_image(struct device *mydev, const char *image_name,
+		       void **image_pp, dma_addr_t *image_dma_addr_p,
+		       size_t *image_size_p)
+{
+	const struct firmware *image;
+	int rc;
+
+	rc = request_firmware(&image, image_name, mydev);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed loading image %s (%d)\n", image_name, rc);
+		return -ENODEV;
+	}
+	*image_pp = dma_alloc_coherent(mydev,
+				       image->size, image_dma_addr_p,
+				       GFP_KERNEL);
+	if (unlikely(*image_pp == NULL)) {
+		pr_err("Failed allocating DMA mem. for resident image\n");
+		rc = -ENOMEM;
+	} else {
+		memcpy(*image_pp, image->data, image->size);
+		*image_size_p = image->size;
+	}
+	/* Image copied into the DMA coherent buffer. No need for "firmware" */
+	release_firmware(image);
+	if (likely(rc == 0))
+		pr_debug("%s: %zu Bytes\n", image_name, *image_size_p);
+	return rc;
+}
+
+#ifdef CACHE_IMAGE_NAME
+static enum dx_cc_init_msg_icache_size icache_size_to_enum(u8
+							   icache_size_log2)
+{
+	int i;
+	const int icache_sizes_enum2log[] = DX_CC_ICACHE_SIZE_ENUM2LOG;
+
+	for (i = 0; i < sizeof(icache_sizes_enum2log) / sizeof(int); i++)
+		if ((icache_size_log2 == icache_sizes_enum2log[i]) &&
+		    (icache_sizes_enum2log[i] >= 0))
+			return (enum dx_cc_init_msg_icache_size)i;
+	pr_err("Requested Icache size (%uKB) is invalid\n",
+		    1 << (icache_size_log2 - 10));
+	return DX_CC_INIT_MSG_ICACHE_SCR_INVALID_SIZE;
+}
+#endif
+
+/**
+ * get_cc_init_checksum() - Calculate CC_INIT message checksum
+ *
+ * @msg_p:	Pointer to the message buffer
+ * @length:	Size of message in _bytes_
+ */
+static u32 get_cc_init_checksum(u32 *msg_p)
+{
+	int bytes_remain;
+	u32 sum = 0;
+	u16 *tdata = (u16 *)msg_p;
+
+	for (bytes_remain = DX_CC_INIT_MSG_LENGTH * sizeof(u32);
+	     bytes_remain > 1; bytes_remain -= 2)
+		sum += *tdata++;
+	/*  Add left-over byte, if any */
+	if (bytes_remain > 0)
+		sum += *(u8 *)tdata;
+	/*  Fold 32-bit sum to 16 bits */
+	while ((sum >> 16) != 0)
+		sum = (sum & 0xFFFF) + (sum >> 16);
+	return ~sum & 0xFFFF;
+}
+
+static void build_cc_init_msg(struct cc_init_ctx *init_ctx)
+{
+	u32 *const msg_p = init_ctx->msg_buf;
+	struct sep_drvdata *drvdata = init_ctx->drvdata;
+#ifndef VRL_KEY_INDEX
+	/* Verify VRL key against this truncated hash value */
+	u32 const vrl_key_hash[] = VRL_KEY_HASH;
+#endif
+#ifdef CACHE_IMAGE_NAME
+	enum dx_cc_init_msg_icache_size icache_size_code;
+#endif
+
+	memset(msg_p, 0, DX_CC_INIT_MSG_LENGTH * sizeof(u32));
+	msg_p[DX_CC_INIT_MSG_TOKEN_OFFSET] = DX_CC_INIT_HEAD_MSG_TOKEN;
+	msg_p[DX_CC_INIT_MSG_LENGTH_OFFSET] = DX_CC_INIT_MSG_LENGTH;
+	msg_p[DX_CC_INIT_MSG_OP_CODE_OFFSET] = DX_HOST_REQ_CC_INIT;
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] =
+	    DX_CC_INIT_FLAGS_RESIDENT_ADDR_FLAG;
+	msg_p[DX_CC_INIT_MSG_RESIDENT_IMAGE_OFFSET] =
+	    init_ctx->resident_dma_addr;
+
+#ifdef CACHE_IMAGE_NAME
+	icache_size_code = icache_size_to_enum(drvdata->icache_size_log2);
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] |=
+	    DX_CC_INIT_FLAGS_I_CACHE_ADDR_FLAG |
+	    DX_CC_INIT_FLAGS_D_CACHE_EXIST_FLAG |
+	    DX_CC_INIT_FLAGS_CACHE_ENC_FLAG | DX_CC_INIT_FLAGS_CACHE_COPY_FLAG;
+#ifdef DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG
+	/* Enable scrambling if available */
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] |=
+	    DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG;
+#endif
+	msg_p[DX_CC_INIT_MSG_I_CACHE_IMAGE_OFFSET] = init_ctx->cache_dma_addr;
+	msg_p[DX_CC_INIT_MSG_I_CACHE_DEST_OFFSET] =
+	    page_to_phys(drvdata->icache_pages);
+	msg_p[DX_CC_INIT_MSG_I_CACHE_SIZE_OFFSET] = icache_size_code;
+	msg_p[DX_CC_INIT_MSG_D_CACHE_ADDR_OFFSET] =
+	    page_to_phys(drvdata->dcache_pages);
+	msg_p[DX_CC_INIT_MSG_D_CACHE_SIZE_OFFSET] =
+	    1 << drvdata->dcache_size_log2;
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	/* Declare SEP backup buffer resources */
+	msg_p[DX_CC_INIT_MSG_HOST_BUFF_ADDR_OFFSET] =
+	    virt_to_phys(drvdata->sep_backup_buf);
+	msg_p[DX_CC_INIT_MSG_HOST_BUFF_SIZE_OFFSET] =
+	    drvdata->sep_backup_buf_size;
+#endif				/*CACHE_IMAGE_NAME */
+
+	msg_p[DX_CC_INIT_MSG_VRL_ADDR_OFFSET] = init_ctx->vrl_dma_addr;
+	/* Handle VRL key hash */
+#ifdef VRL_KEY_INDEX
+	msg_p[DX_CC_INIT_MSG_KEY_INDEX_OFFSET] = VRL_KEY_INDEX;
+#else	/* Key should be validated against VRL_KEY_HASH */
+	msg_p[DX_CC_INIT_MSG_KEY_INDEX_OFFSET] =
+	    DX_CC_INIT_MSG_VRL_KEY_INDEX_INVALID;
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_0_OFFSET] = vrl_key_hash[0];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_1_OFFSET] = vrl_key_hash[1];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_2_OFFSET] = vrl_key_hash[2];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_3_OFFSET] = vrl_key_hash[3];
+#endif
+
+	msg_p[DX_CC_INIT_MSG_CHECK_SUM_OFFSET] = get_cc_init_checksum(msg_p);
+
+	dump_word_array("CC_INIT", msg_p, DX_CC_INIT_MSG_LENGTH);
+}
+
+/**
+ * create_cc_init_ctx() - Create CC-INIT message and allocate associated
+ *	resources (load FW images, etc.)
+ *
+ * @drvdata:		Device context
+ *
+ * Returns the allocated message context or NULL on failure.
+ */
+struct cc_init_ctx *create_cc_init_ctx(struct sep_drvdata *drvdata)
+{
+	struct cc_init_ctx *init_ctx;
+	struct device *const mydev = drvdata->dev;
+	int rc;
+
+	init_ctx = kzalloc(sizeof(struct cc_init_ctx), GFP_KERNEL);
+	if (unlikely(init_ctx == NULL)) {
+		pr_err("Failed allocating CC-Init. context\n");
+		rc = -ENOMEM;
+		goto create_err;
+	}
+	init_ctx->drvdata = drvdata;
+	rc = fetch_image(mydev, RESIDENT_IMAGE_NAME, &init_ctx->resident_p,
+			 &init_ctx->resident_dma_addr,
+			 &init_ctx->resident_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+#ifdef CACHE_IMAGE_NAME
+	rc = fetch_image(mydev, CACHE_IMAGE_NAME, &init_ctx->cache_p,
+			 &init_ctx->cache_dma_addr, &init_ctx->cache_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+#endif				/*CACHE_IMAGE_NAME */
+	rc = fetch_image(mydev, VRL_IMAGE_NAME, &init_ctx->vrl_p,
+			 &init_ctx->vrl_dma_addr, &init_ctx->vrl_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+	build_cc_init_msg(init_ctx);
+	return init_ctx;
+
+ create_err:
+
+	if (init_ctx != NULL)
+		destroy_cc_init_ctx(init_ctx);
+	return NULL;
+}
+
+/**
+ * sepinit_wait_for_cold_boot_finish() - Wait for SeP to reach cold-boot-finish
+ *					state (i.e., ready for driver-init)
+ * @drvdata:
+ *
+ * Returns int 0 for success, !0 on timeout while waiting for cold-boot-finish
+ */
+static int sepinit_wait_for_cold_boot_finish(struct sep_drvdata *drvdata)
+{
+	enum dx_sep_state cur_state;
+	u32 cur_status;
+	int rc = 0;
+
+	cur_state =
+	    dx_sep_wait_for_state(DX_SEP_STATE_DONE_COLD_BOOT |
+				  DX_SEP_STATE_FATAL_ERROR,
+				  COLD_BOOT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_DONE_COLD_BOOT) {
+		rc = -EIO;
+		cur_status =
+		    READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err(
+			    "Failed waiting for DONE_COLD_BOOT from SeP (state=0x%08X status=0x%08X)\n",
+			    cur_state, cur_status);
+	}
+
+	return rc;
+}
+
+/**
+ * dispatch_cc_init_msg() - Push given CC_INIT message into SRAM and signal
+ *	SeP to start cold boot sequence
+ *
+ * @drvdata:
+ * @init_cc_msg_p:	A pointer to the message context
+ */
+static int dispatch_cc_init_msg(struct sep_drvdata *drvdata,
+				struct cc_init_ctx *cc_init_ctx_p)
+{
+	int i;
+	u32 is_data_ready;
+	/*
+	 * get the base address of the SRAM and add the offset
+	 * for the CC_Init message
+	 */
+	const u32 msg_target_addr =
+	    READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+					 HOST, HOST_SEP_SRAM_THRESHOLD)) +
+	    DX_CC_INIT_MSG_OFFSET_IN_SRAM;
+
+	/* Initialize SRAM access address register for message location */
+	WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base, SRAM, SRAM_ADDR),
+		       msg_target_addr);
+	/* Write the message word by word to the SEP intenral offset */
+	for (i = 0; i < sizeof(cc_init_ctx_p->msg_buf) / sizeof(u32);
+		i++) {
+		/* write data to SRAM */
+		WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+					      SRAM, SRAM_DATA),
+			       cc_init_ctx_p->msg_buf[i]);
+		/* wait for write complete */
+		do {
+			is_data_ready = 1 &
+			    READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+							 SRAM,
+							 SRAM_DATA_READY));
+		} while (!is_data_ready);
+		/* TODO: Timeout in case something gets broken */
+	}
+	/* Signal SeP: Request CC_INIT */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+		       DX_HOST_REQ_CC_INIT);
+	return 0;
+}
+
+/**
+ * sepinit_do_cc_init() - Initiate SeP cold boot sequence and wait for
+ *	its completion.
+ *
+ * @drvdata:
+ *
+ * This function loads the CC firmware and dispatches a CC_INIT request message
+ * Returns int 0 for success
+ */
+int sepinit_do_cc_init(struct sep_drvdata *drvdata)
+{
+	u32 cur_state;
+	struct cc_init_ctx *cc_init_ctx_p;
+	int rc;
+
+	cur_state = dx_sep_wait_for_state(DX_SEP_STATE_START_SECURE_BOOT,
+					  COLD_BOOT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_START_SECURE_BOOT) {
+		pr_err("Bad SeP state = 0x%08X\n", cur_state);
+		return -EIO;
+	}
+#ifdef __BIG_ENDIAN
+	/* Enable byte swapping in DMA operations */
+	WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base, HOST, HOST_HOST_ENDIAN),
+		       0xCCUL);
+	/* TODO: Define value in device specific header files? */
+#endif
+	cc_init_ctx_p = create_cc_init_ctx(drvdata);
+	if (likely(cc_init_ctx_p != NULL))
+		rc = dispatch_cc_init_msg(drvdata, cc_init_ctx_p);
+	else
+		rc = -ENOMEM;
+	if (likely(rc == 0))
+		rc = sepinit_wait_for_cold_boot_finish(drvdata);
+	if (cc_init_ctx_p != NULL)
+		destroy_cc_init_ctx(cc_init_ctx_p);
+	return rc;
+}
+
+/*** FW_INIT handlers ***/
+
+#ifdef DEBUG
+#define ENUM_CASE_RETURN_STR(enum_name)	(case enum_name: return #enum_name)
+
+static const char *param2str(enum dx_fw_init_tlv_params param_type)
+{
+	switch (param_type) {
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_NULL);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_FIRST);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_LAST);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DISABLE_MODULES);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_HOST_AXI_CONFIG);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_HOST_DEF_APPLET_CONFIG);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_NUM_OF_DESC_QS);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DESC_QS_ADDR);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DESC_QS_SIZE);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_CTX_CACHE_PART);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_SEP_FREQ);
+	default:
+		return "(unknown param.)";
+	}
+}
+
+static void dump_fwinit_params(struct sep_drvdata *drvdata,
+			       u32 *fw_init_params_buf_p)
+{
+#define LINE_BUF_LEN 90		/* increased to hold values for 2 queues */
+	const u32 last_tl_word = DX_TL_WORD(DX_FW_INIT_PARAM_LAST, 1);
+	u32 tl_word;
+	u16 type, len;
+	u32 *cur_buf_p;
+	unsigned int i = 0;
+	char line_buf[LINE_BUF_LEN];
+	unsigned int line_offset;
+
+	pr_debug("Dx SeP fw_init params dump:\n");
+	cur_buf_p = fw_init_params_buf_p;
+	do {
+		tl_word = le32_to_cpu(*cur_buf_p);
+		cur_buf_p++;
+		type = DX_TL_GET_TYPE(tl_word);
+		len = DX_TL_GET_LENGTH(tl_word);
+
+		if ((cur_buf_p + len - fw_init_params_buf_p) >
+		    (FW_INIT_PARAMS_BUF_LEN / sizeof(u32))) {
+			pr_err
+			    ("LAST parameter not found up to buffer end\n");
+			break;
+		}
+
+		line_offset = snprintf(line_buf, LINE_BUF_LEN,
+				       "Type=0x%04X (%s), Length=%u , Val={",
+				       type, param2str(type), len);
+		for (i = 0; i < len; i++) {
+			/*
+			 * 11 is length of printed value
+			 * (formatted with 0x%08X in the
+			 * next call to snprintf
+			 */
+			if (line_offset + 11 >= LINE_BUF_LEN) {
+				pr_debug("%s\n", line_buf);
+				line_offset = 0;
+			}
+			line_offset += snprintf(line_buf + line_offset,
+						LINE_BUF_LEN - line_offset,
+						"0x%08X,",
+						le32_to_cpu(*cur_buf_p));
+			cur_buf_p++;
+		}
+		pr_debug("%s}\n", line_buf);
+	} while (tl_word != last_tl_word);
+}
+#else
+#define dump_fwinit_params(drvdata, fw_init_params_buf_p) do {} while (0)
+#endif /*DEBUG*/
+/**
+ * add_fwinit_param() - Add TLV parameter for FW-init.
+ * @tlv_buf:	 The base of the TLV parameters buffers
+ * @idx_p:	 (in/out): Current tlv_buf word index
+ * @checksum_p:	 (in/out): 32bit checksum for TLV array
+ * @type:	 Parameter type
+ * @length:	 Parameter length (size in words of "values")
+ * @values:	 Values array ("length" values)
+ *
+ * Returns void
+ */
+static void add_fwinit_param(u32 *tlv_buf, u32 *idx_p,
+			     u32 *checksum_p,
+			     enum dx_fw_init_tlv_params type, u16 length,
+			     const u32 *values)
+{
+	const u32 tl_word = DX_TL_WORD(type, length);
+	int i;
+
+#ifdef DEBUG
+	/* Verify that we have enough space for LAST param. after this param. */
+	if ((*idx_p + 1 + length + 2) > (FW_INIT_PARAMS_BUF_LEN / 4)) {
+		pr_err("tlv_buf size limit reached!\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	/* Add type-length word */
+	tlv_buf[(*idx_p)++] = cpu_to_le32(tl_word);
+	*checksum_p += tl_word;
+
+	/* Add values */
+	for (i = 0; i < length; i++) {
+		/* Add value words if any. TL-word is counted as first... */
+		tlv_buf[(*idx_p)++] = cpu_to_le32(values[i]);
+		*checksum_p += values[i];
+	}
+}
+
+/**
+ * init_fwinit_param_list() - Initialize TLV parameters list
+ * @tlv_buf:	The pointer to the TLV list array buffer
+ * @idx_p:	The pointer to the variable that would maintain current
+ *		position in the tlv_array
+ * @checksum_p:	The pointer to the variable that would accumulate the
+ *		TLV array checksum
+ *
+ * Returns void
+ */
+static void init_fwinit_param_list(u32 *tlv_buf, u32 *idx_p,
+				   u32 *checksum_p)
+{
+	const u32 magic = DX_FW_INIT_PARAM_FIRST_MAGIC;
+	/* Initialize index and checksum variables */
+	*idx_p = 0;
+	*checksum_p = 0;
+	/* Start with FIRST_MAGIC parameter */
+	add_fwinit_param(tlv_buf, idx_p, checksum_p,
+			 DX_FW_INIT_PARAM_FIRST, 1, &magic);
+}
+
+/**
+ * terminate_fwinit_param_list() - Terminate the TLV parameters list with
+ *				LAST/checksum parameter
+ * @tlv_buf:	The pointer to the TLV list array buffer
+ * @idx_p:	The pointer to the variable that would maintain current
+ *		position in the tlv_array
+ * @checksum_p:	The pointer to the variable that would accumulate the
+ *		TLV array checksum
+ *
+ * Returns void
+ */
+static void terminate_fwinit_param_list(u32 *tlv_buf, u32 *idx_p,
+					u32 *checksum_p)
+{
+	const u32 tl_word = DX_TL_WORD(DX_FW_INIT_PARAM_LAST, 1);
+
+	tlv_buf[(*idx_p)++] = cpu_to_le32(tl_word);
+	*checksum_p += tl_word;	/* Last TL word is included in checksum */
+	tlv_buf[(*idx_p)++] = cpu_to_le32(~(*checksum_p));
+}
+
+static int create_fwinit_command(struct sep_drvdata *drvdata,
+				 u32 **fw_init_params_buf_pp,
+				 dma_addr_t *fw_init_params_dma_p)
+{
+	u32 idx;
+	u32 checksum = 0;
+#ifdef SEP_FREQ_MHZ
+	u32 sep_freq = SEP_FREQ_MHZ;
+#endif
+	dma_addr_t q_base_dma;
+	unsigned long q_size;
+	/* arrays for queue parameters values */
+	u32 qs_base_dma[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_size[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_ctx_size[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_ctx_size_total;
+	u32 qs_num = drvdata->num_of_desc_queues;
+	u32 sep_request_params[DX_SEP_REQUEST_PARAM_MSG_LEN];
+	int i;
+	int rc;
+
+	/* For klocwork, add extra check */
+	if (qs_num > SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("Max number of desc queues (%d) exceeded (%d)\n",
+		       qs_num, SEP_MAX_NUM_OF_DESC_Q);
+		return -EINVAL;
+	}
+
+	/* allocate coherent working buffer */
+	*fw_init_params_buf_pp = dma_alloc_coherent(drvdata->dev,
+						    FW_INIT_PARAMS_BUF_LEN,
+						    fw_init_params_dma_p,
+						    GFP_KERNEL);
+	pr_debug("fw_init_params_dma=0x%08lX fw_init_params_va=0x%p\n",
+		      (unsigned long)*fw_init_params_dma_p,
+		      *fw_init_params_buf_pp);
+	if (*fw_init_params_buf_pp == NULL) {
+		pr_err("Unable to allocate coherent workspace buffer\n");
+		return -ENOMEM;
+	}
+
+	init_fwinit_param_list(*fw_init_params_buf_pp, &idx, &checksum);
+
+#ifdef SEP_FREQ_MHZ
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_SEP_FREQ, 1, &sep_freq);
+#endif
+
+	/* No need to validate number of queues - already validated in
+	 * sep_setup() */
+
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_NUM_OF_DESC_QS, 1, &qs_num);
+
+	/* Fetch per-queue information */
+	qs_ctx_size_total = 0;
+	for (i = 0; i < qs_num; i++) {
+		desc_q_get_info4sep(drvdata->queue[i].desc_queue,
+				    &q_base_dma, &q_size);
+		/* Data is first fetched into q_base_dma and q_size because
+		 * return value is of different type than u32 */
+		qs_base_dma[i] = q_base_dma;
+		qs_size[i] = q_size;
+		qs_ctx_size[i] =
+		    ctxmgr_sep_cache_get_size(drvdata->queue[i].sep_cache);
+		if ((qs_base_dma[i] == 0) || (qs_size[i] == 0) ||
+		    (qs_ctx_size[i] == 0)) {
+			pr_err(
+				    "Invalid queue %d resources: base=0x%08X size=%u ctx_cache_size=%u\n",
+				    i, qs_base_dma[i], qs_size[i],
+				    qs_ctx_size[i]);
+			rc = -EINVAL;
+			goto fwinit_error;
+		}
+		qs_ctx_size_total += qs_ctx_size[i];
+	}
+
+	if (qs_ctx_size_total > drvdata->num_of_sep_cache_entries) {
+		pr_err("Too many context cache entries allocated(%u>%u)\n",
+			    qs_ctx_size_total,
+			    drvdata->num_of_sep_cache_entries);
+		rc = -EINVAL;
+		goto fwinit_error;
+	}
+
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_DESC_QS_ADDR, qs_num, qs_base_dma);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_DESC_QS_SIZE, qs_num, qs_size);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_CTX_CACHE_PART, qs_num, qs_ctx_size);
+
+	/* Prepare sep request params */
+	dx_sep_req_get_sep_init_params(sep_request_params);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS,
+			 DX_SEP_REQUEST_PARAM_MSG_LEN, sep_request_params);
+
+	terminate_fwinit_param_list(*fw_init_params_buf_pp, &idx, &checksum);
+
+	return 0;
+
+ fwinit_error:
+	dma_free_coherent(drvdata->dev, FW_INIT_PARAMS_BUF_LEN,
+			  *fw_init_params_buf_pp, *fw_init_params_dma_p);
+	return rc;
+}
+
+static void destroy_fwinit_command(struct sep_drvdata *drvdata,
+				   u32 *fw_init_params_buf_p,
+				   dma_addr_t fw_init_params_dma)
+{
+	/* release TLV parameters buffer */
+	dma_free_coherent(drvdata->dev, FW_INIT_PARAMS_BUF_LEN,
+			  fw_init_params_buf_p, fw_init_params_dma);
+}
+
+/**
+ * sepinit_get_fw_props() - Get the FW properties (version, cache size, etc.)
+ *				as given in the
+ * @drvdata:	 Context where to fill retrieved data
+ *
+ * Get the FW properties (version, cache size, etc.) as given in the
+ * respective GPRs.
+ * This function should be called only after sepinit_wait_for_cold_boot_finish
+ */
+void sepinit_get_fw_props(struct sep_drvdata *drvdata)
+{
+
+	u32 init_fw_props;
+	/* SeP ROM version */
+	drvdata->rom_ver = READ_REGISTER(drvdata->cc_base +
+					 SEP_HOST_GPR_REG_OFFSET
+					 (DX_SEP_INIT_ROM_VER_GPR_IDX));
+	drvdata->fw_ver =
+	    READ_REGISTER(drvdata->cc_base +
+			  SEP_HOST_GPR_REG_OFFSET(DX_SEP_INIT_FW_VER_GPR_IDX));
+	mdelay(100);		/* TODO for kernel hang bug */
+	init_fw_props = READ_REGISTER(drvdata->cc_base +
+				      SEP_HOST_GPR_REG_OFFSET
+				      (DX_SEP_INIT_FW_PROPS_GPR_IDX));
+
+	drvdata->num_of_desc_queues =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_NUM_OF_QUEUES_BIT_OFFSET,
+			 DX_SEP_INIT_NUM_OF_QUEUES_BIT_SIZE);
+	drvdata->num_of_sep_cache_entries =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_CACHE_CTX_SIZE_BIT_OFFSET,
+			 DX_SEP_INIT_CACHE_CTX_SIZE_BIT_SIZE);
+	drvdata->mlli_table_size =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_MLLI_TBL_SIZE_BIT_OFFSET,
+			 DX_SEP_INIT_MLLI_TBL_SIZE_BIT_SIZE);
+
+	pr_info("ROM Ver.=0x%08X , FW Ver.=0x%08X\n"
+		     "SEP queues=%u, Ctx.Cache#ent.=%u , MLLIsize=%lu B\n",
+		     drvdata->rom_ver, drvdata->fw_ver,
+		     drvdata->num_of_desc_queues,
+		     drvdata->num_of_sep_cache_entries,
+		     drvdata->mlli_table_size);
+}
+
+/**
+ * sepinit_wait_for_fw_init_done() - Wait for FW initialization to complete
+ * @drvdata:
+ *
+ * Wait for FW initialization to complete
+ * This function should be invoked after sepinit_set_fw_init_params
+ * Returns int
+ */
+static int sepinit_wait_for_fw_init_done(struct sep_drvdata *drvdata)
+{
+	enum dx_sep_state cur_state;
+	u32 cur_status;
+	int rc = 0;
+
+	cur_state =
+	    dx_sep_wait_for_state(DX_SEP_STATE_DONE_FW_INIT |
+				  DX_SEP_STATE_FATAL_ERROR,
+				  FW_INIT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_DONE_FW_INIT) {
+		rc = -EIO;
+		cur_status =
+		    READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err(
+			    "Failed waiting for DONE_FW_INIT from SeP (state=0x%08X status=0x%08X)\n",
+			    cur_state, cur_status);
+	} else {
+		pr_info("DONE_FW_INIT\n");
+	}
+
+	return rc;
+}
+
+/**
+ * sepinit_do_fw_init() - Initialize SeP FW
+ * @drvdata:
+ *
+ * Provide SeP FW with initialization parameters and wait for DONE_FW_INIT.
+ *
+ * Returns int 0 on success
+ */
+int sepinit_do_fw_init(struct sep_drvdata *drvdata)
+{
+	int rc;
+	u32 *fw_init_params_buf_p;
+	dma_addr_t fw_init_params_dma;
+
+	rc = create_fwinit_command(drvdata,
+				   &fw_init_params_buf_p, &fw_init_params_dma);
+	if (rc != 0)
+		return rc;
+	dump_fwinit_params(drvdata, fw_init_params_buf_p);
+	/* Write the physical address of the FW init parameters buffer */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_PARAM_GPR_IDX),
+		       fw_init_params_dma);
+	/* Initiate FW-init */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+		       DX_HOST_REQ_FW_INIT);
+	rc = sepinit_wait_for_fw_init_done(drvdata);
+	destroy_fwinit_command(drvdata,
+			       fw_init_params_buf_p, fw_init_params_dma);
+	return rc;
+}
diff --git a/drivers/staging/sep54/sep_init.h b/drivers/staging/sep54/sep_init.h
new file mode 100644
index 0000000..fb5a8ce
--- /dev/null
+++ b/drivers/staging/sep54/sep_init.h
@@ -0,0 +1,63 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_INIT_H__
+#define __SEP_INIT_H__
+
+#include "dx_driver.h"
+
+/**
+ * sepinit_do_cc_init() - Initiate SeP cold boot sequence and wait for
+ *	its completion.
+ *
+ * @drvdata:
+ *
+ * This function loads the CC firmware and dispatches an CC_INIT request message
+ * Returns int 0 for success
+ */
+int sepinit_do_cc_init(struct sep_drvdata *drvdata);
+
+/**
+ * sepinit_get_fw_props() - Get the FW properties (version, cache size, etc.)
+ *	after completing cold boot
+ * @drvdata:	 Context where to fill retrieved data
+ *
+ * This function should be called only after sepinit_do_cc_init completes
+ * successfully.
+ */
+void sepinit_get_fw_props(struct sep_drvdata *drvdata);
+
+/**
+ * sepinit_do_fw_init() - Initialize SeP FW
+ * @drvdata:
+ *
+ * Provide SeP FW with initialization parameters and wait for DONE_FW_INIT.
+ *
+ * Returns int 0 on success
+ */
+int sepinit_do_fw_init(struct sep_drvdata *drvdata);
+
+#endif /*__SEP_INIT_H__*/
diff --git a/drivers/staging/sep54/sep_init_cc_errors.h b/drivers/staging/sep54/sep_init_cc_errors.h
new file mode 100644
index 0000000..3c16d16
--- /dev/null
+++ b/drivers/staging/sep54/sep_init_cc_errors.h
@@ -0,0 +1,84 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef SEP_INIT_CC_ERROR_H
+#define SEP_INIT_CC_ERROR_H
+
+#include "sep_error.h"
+
+	/*! \file lcs.h
+	 * \brief This file containes lcs definitions
+	 */
+#define DX_INIT_CC_OK DX_SEP_OK
+/* DX_INIT_CC_MODULE_ERROR_BASE - 0xE0004000 */
+#define DX_CC_INIT_MSG_CS_ERROR	\
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x1)
+#define DX_CC_INIT_MSG_WRONG_TOKEN_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x2)
+#define DX_CC_INIT_MSG_WRONG_OP_CODE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x3)
+#define DX_CC_INIT_MSG_WRONG_RESIDENT_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x4)
+#define DX_CC_INIT_MSG_WRONG_I_CACHE_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x5)
+#define DX_CC_INIT_MSG_WRONG_I_CACHE_DEST_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x6)
+#define DX_CC_INIT_MSG_WRONG_D_CACHE_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x7)
+#define DX_CC_INIT_MSG_WRONG_D_CACHE_SIZE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x8)
+#define DX_CC_INIT_MSG_WRONG_INIT_EXT_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x9)
+#define DX_CC_INIT_MSG_WRONG_VRL_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xA)
+#define DX_CC_INIT_MSG_WRONG_MAGIC_NUM_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xB)
+#define DX_CC_INIT_MSG_WRONG_OUTPUT_BUFF_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xC)
+#define DX_CC_INIT_MSG_WRONG_OUTPUT_BUFF_SIZE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xD)
+#define DX_RESERVED_0_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xE)
+
+/* DX_INIT_CC_EXT_MODULE_ERROR_BASE - 0xE0005000 */
+#define DX_CC_INIT_EXT_FIRST_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x1)
+#define DX_CC_INIT_EXT_WRONG_LAST_PARAM_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x2)
+#define DX_CC_INIT_EXT_WRONG_CHECKSUM_VALUE_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x3)
+#define DX_CC_INIT_EXT_WRONG_DISABLE_MODULE_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x4)
+#define DX_CC_INIT_EXT_WRONG_AXI_CONFIG_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x5)
+#define DX_CC_INIT_EXT_WRONG_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x6)
+#define DX_CC_INIT_EXT_EXCEED_MAX_PARAM_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x7)
+#define DX_CC_INIT_EXT_WRONG_SEP_FREQ_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x8)
+
+#endif
diff --git a/drivers/staging/sep54/sep_lli.h b/drivers/staging/sep54/sep_lli.h
new file mode 100644
index 0000000..b778f4f
--- /dev/null
+++ b/drivers/staging/sep54/sep_lli.h
@@ -0,0 +1,86 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_LLI_H_
+#define _SEP_LLI_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#define SEP_LLI_GET(lli_p, lli_field) BITFIELD_GET(                            \
+		((u32 *)(lli_p))[SEP_LLI_ ## lli_field ## _WORD_OFFSET],  \
+		SEP_LLI_ ## lli_field ## _BIT_OFFSET,			       \
+		SEP_LLI_ ## lli_field ## _BIT_SIZE)
+#define SEP_LLI_SET(lli_p, lli_field, new_val) BITFIELD_SET(                   \
+		((u32 *)(lli_p))[SEP_LLI_ ## lli_field ## _WORD_OFFSET],  \
+		SEP_LLI_ ## lli_field ## _BIT_OFFSET,			       \
+		SEP_LLI_ ## lli_field ## _BIT_SIZE,			       \
+		new_val)
+
+#define SEP_LLI_INIT(lli_p)  do { \
+	((u32 *)(lli_p))[0] = 0; \
+	((u32 *)(lli_p))[1] = 0; \
+} while (0)
+
+/* Copy local LLI scratchpad to SeP LLI buffer */
+#define SEP_LLI_COPY_TO_SEP(sep_lli_p, host_lli_p) do {             \
+	int i;                                                      \
+	for (i = 0; i < SEP_LLI_ENTRY_WORD_SIZE; i++)               \
+		((u32 *)(sep_lli_p))[i] =                      \
+			cpu_to_le32(((u32 *)(host_lli_p))[i]); \
+} while (0)
+/* and vice-versa */
+#define SEP_LLI_COPY_FROM_SEP(host_lli_p, sep_lli_p) do {                 \
+	int i;                                                            \
+		for (i = 0; i < SEP_LLI_ENTRY_WORD_SIZE; i++)             \
+			((u32 *)(host_lli_p))[i] =                   \
+				le32_to_cpu(((u32 *)(sep_lli_p))[i]);\
+} while (0)
+
+/* Size of entry */
+#define SEP_LLI_ENTRY_WORD_SIZE 2
+#define SEP_LLI_ENTRY_BYTE_SIZE (SEP_LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* (DMA) Address: ADDR */
+#define SEP_LLI_ADDR_WORD_OFFSET 0
+#define SEP_LLI_ADDR_BIT_OFFSET 0
+#define SEP_LLI_ADDR_BIT_SIZE 32
+/* Size: SIZE */
+#define SEP_LLI_SIZE_WORD_OFFSET 1
+#define SEP_LLI_SIZE_BIT_OFFSET 0
+#define SEP_LLI_SIZE_BIT_SIZE 30
+/* First/Last LLI entries bit marks: FIRST, LAST */
+#define SEP_LLI_FIRST_WORD_OFFSET 1
+#define SEP_LLI_FIRST_BIT_OFFSET 30
+#define SEP_LLI_FIRST_BIT_SIZE 1
+#define SEP_LLI_LAST_WORD_OFFSET 1
+#define SEP_LLI_LAST_BIT_OFFSET 31
+#define SEP_LLI_LAST_BIT_SIZE 1
+
+#endif /*_SEP_LLI_H_*/
diff --git a/drivers/staging/sep54/sep_log.h b/drivers/staging/sep54/sep_log.h
new file mode 100644
index 0000000..fd7a9ae
--- /dev/null
+++ b/drivers/staging/sep54/sep_log.h
@@ -0,0 +1,131 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+
+#ifndef _SEP_LOG__H_
+#define _SEP_LOG__H_
+
+/* Define different "BUG()" behavior in DEBUG mode */
+#ifdef DEBUG
+/* It is easier to attach a debugger without causing the exception of "BUG()" */
+#define SEP_DRIVER_BUG() do {dump_stack(); while (1); } while (0)
+#else
+#define SEP_DRIVER_BUG() BUG()
+#endif
+
+/* SeP log levels (to be used in sep_log_level and SEP_BASE_LOG_LEVEL) */
+#define SEP_LOG_LEVEL_ERR       0
+#define SEP_LOG_LEVEL_WARN      1
+#define SEP_LOG_LEVEL_INFO      2
+#define SEP_LOG_LEVEL_DEBUG     3
+#define SEP_LOG_LEVEL_TRACE     4
+
+/* SeP log components (to be used in sep_log_mask and SEP_LOG_CUR_COMPONENT) */
+#define SEP_LOG_MASK_MAIN        1 /* dx_driver.c */
+#define SEP_LOG_MASK_LLI_MGR     (1<<1)
+#define SEP_LOG_MASK_CTX_MGR     (1<<2)
+#define SEP_LOG_MASK_DESC_MGR    (1<<3)
+#define SEP_LOG_MASK_SYSFS       (1<<4)
+#define SEP_LOG_MASK_SEP_INIT    (1<<5)
+#define SEP_LOG_MASK_CRYPTO_API  (1<<6)
+#define SEP_LOG_MASK_SEP_REQUEST (1<<7)
+#define SEP_LOG_MASK_SEP_POWER   (1<<8)
+#define SEP_LOG_MASK_SEP_APP     (1<<9)
+#define SEP_LOG_MASK_SEP_PRINTF  (1<<31)
+#define SEP_LOG_MASK_ALL        (SEP_LOG_MASK_MAIN | SEP_LOG_MASK_SEP_INIT |\
+	SEP_LOG_MASK_LLI_MGR | SEP_LOG_MASK_CTX_MGR | SEP_LOG_MASK_DESC_MGR |\
+	SEP_LOG_MASK_SYSFS | SEP_LOG_MASK_CRYPTO_API |\
+	SEP_LOG_MASK_SEP_REQUEST | SEP_LOG_MASK_SEP_POWER |\
+	SEP_LOG_MASK_SEP_APP | SEP_LOG_MASK_SEP_PRINTF)
+
+
+/* This printk wrapper masks maps log level to KERN_* levels and masks prints *
+   from specific components at run time based on SEP_LOG_CUR_COMPONENT and    *
+*  sep_log_component_mask.                                                    */
+#define MODULE_PRINTK(level, format, ...) do {			\
+	if (sep_log_mask & SEP_LOG_CUR_COMPONENT)		\
+		printk(level MODULE_NAME ":%s: " format,	\
+			__func__, ##__VA_ARGS__);		\
+} while (0)
+
+extern int sep_log_level;
+extern int sep_log_mask;
+
+
+/* change this to set the preferred log level */
+#ifdef DEBUG
+#define SEP_BASE_LOG_LEVEL SEP_LOG_LEVEL_TRACE
+#else
+#define SEP_BASE_LOG_LEVEL SEP_LOG_LEVEL_WARN
+#endif
+
+#define SEP_LOG_ERR(format, ...) \
+	MODULE_PRINTK(KERN_ERR, format, ##__VA_ARGS__);
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_WARN)
+#define SEP_LOG_WARN(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_WARN)		\
+		MODULE_PRINTK(KERN_WARNING, format,		\
+		##__VA_ARGS__);					\
+} while (0)
+#else
+#define SEP_LOG_WARN(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_INFO)
+#define SEP_LOG_INFO(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_INFO)		\
+		MODULE_PRINTK(KERN_INFO, format, ##__VA_ARGS__); \
+} while (0)
+#else
+#define SEP_LOG_INFO(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_DEBUG)
+#define SEP_LOG_DEBUG(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_DEBUG)		\
+		MODULE_PRINTK(KERN_DEBUG, format, ##__VA_ARGS__);\
+} while (0)
+#else
+#define SEP_LOG_DEBUG(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_TRACE)
+#define SEP_LOG_TRACE(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_TRACE)		\
+		MODULE_PRINTK(KERN_DEBUG, "<trace> " format,	\
+			      ##__VA_ARGS__);			\
+} while (0)
+#else
+#define SEP_LOG_TRACE(format, arg...) do {} while (0)
+#endif
+
+#undef pr_fmt
+#define pr_fmt(fmt)     KBUILD_MODNAME ": %s:%d: " fmt, __func__, __LINE__
+
+#endif
+
diff --git a/drivers/staging/sep54/sep_power.c b/drivers/staging/sep54/sep_power.c
new file mode 100644
index 0000000..d007907
--- /dev/null
+++ b/drivers/staging/sep54/sep_power.c
@@ -0,0 +1,431 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * This file implements the power state control functions for SeP/CC
+ */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_POWER
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+/*#include <linux/export.h>*/
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_driver.h"
+#include "dx_cc_regs.h"
+#include "dx_init_cc_abi.h"
+#include "sep_sw_desc.h"
+#include "dx_sep_kapi.h"
+#include <linux/delay.h>
+
+#define SEP_STATE_CHANGE_TIMEOUT_MSEC 2500
+/**
+ * struct sep_power_control - Control data for power state change operations
+ * @drvdata:		The associated driver context
+ * @state_changed:	Completion object to signal state change
+ * @last_state:		Recorded last state
+ * @state_jiffies:	jiffies at recorded last state
+ *
+ * last_state and state_jiffies are volatile because may be updated in
+ * the interrupt context while tested in _state_get function.
+ */
+struct sep_power_control {
+	struct sep_drvdata *drvdata;
+	struct completion state_changed;
+	volatile enum dx_sep_state last_state;
+	volatile unsigned long state_jiffies;
+};
+
+/* Global context for power management */
+static struct sep_power_control power_control;
+
+static const char *power_state_str(enum dx_sep_power_state pstate)
+{
+	switch (pstate) {
+	case DX_SEP_POWER_INVALID:
+		return "INVALID";
+	case DX_SEP_POWER_OFF:
+		return "OFF";
+	case DX_SEP_POWER_BOOT:
+		return "BOOT";
+	case DX_SEP_POWER_IDLE:
+		return "IDLE";
+	case DX_SEP_POWER_ACTIVE:
+		return "ACTIVE";
+	case DX_SEP_POWER_HIBERNATED:
+		return "HIBERNATED";
+	}
+	return "(unknown)";
+}
+
+/**
+ * dx_sep_state_change_handler() - Interrupt handler for SeP state changes
+ * @drvdata:	Associated driver context
+ */
+void dx_sep_state_change_handler(struct sep_drvdata *drvdata)
+{
+	pr_warn("State=0x%08X Status/RetCode=0x%08X\n",
+		     READ_REGISTER(drvdata->cc_base + SEP_STATE_GPR_OFFSET),
+		     READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET));
+	power_control.state_jiffies = jiffies;
+	power_control.last_state = GET_SEP_STATE(drvdata);
+	complete(&power_control.state_changed);
+}
+
+/**
+ * dx_sep_wait_for_state() - Wait for SeP to reach one of the states reflected
+ *				with given state mask
+ * @state_mask:		The OR of expected SeP states
+ * @timeout_msec:	Timeout of waiting for the state (in millisec.)
+ *
+ * Returns the state reached. In case of wait timeout the returned state
+ * may not be one of the expected states.
+ */
+enum dx_sep_state dx_sep_wait_for_state(u32 state_mask, int timeout_msec)
+{
+	int wait_jiffies = msecs_to_jiffies(timeout_msec);
+	enum dx_sep_state sep_state;
+
+	do {
+		/* Poll for state transition completion or failure */
+		/* Arm for next state change before reading current state */
+		INIT_COMPLETION(power_control.state_changed);
+		sep_state = GET_SEP_STATE(power_control.drvdata);
+		if ((sep_state & state_mask) || (wait_jiffies == 0))
+			/* It's a match or wait timed out */
+			break;
+		wait_jiffies =
+		    wait_for_completion_timeout(&power_control.state_changed,
+						wait_jiffies);
+	} while (1);
+
+	return sep_state;
+}
+
+void dx_sep_pm_runtime_get(void)
+{
+	pm_runtime_get_sync(power_control.drvdata->dev);
+}
+
+void dx_sep_pm_runtime_put(void)
+{
+	pm_runtime_mark_last_busy(power_control.drvdata->dev);
+	pm_runtime_put_autosuspend(power_control.drvdata->dev);
+}
+
+/**
+ * set_desc_qs_state() - Modify states of all Desc. queues
+ *
+ * @state:	Requested new state
+ */
+static int set_desc_qs_state(enum desc_q_state state)
+{
+	int i, rc;
+
+	for (i = 0, rc = 0;
+	     (i < power_control.drvdata->num_of_desc_queues) && (rc == 0); i++)
+		rc = desc_q_set_state(power_control.drvdata->queue[i].
+				      desc_queue, state);
+	if (unlikely(rc != 0))
+		/* Error - revert state of queues that were already changed */
+		for (i--; i >= 0; i--)
+			desc_q_set_state(power_control.drvdata->queue[i].
+					 desc_queue,
+					 (state ==
+					  DESC_Q_ASLEEP) ? DESC_Q_ACTIVE :
+					 DESC_Q_ASLEEP);
+	return rc;
+}
+
+static bool is_desc_qs_active(void)
+{
+	int i;
+	enum desc_q_state qstate;
+	bool is_all_qs_active = true;
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++) {
+		qstate =
+		    desc_q_get_state(power_control.drvdata->queue[i].
+				     desc_queue);
+		if ((qstate != DESC_Q_ACTIVE)) {
+			is_all_qs_active = false;
+			break;
+		}
+	}
+	return is_all_qs_active;
+}
+
+static bool is_desc_qs_idle(unsigned long *idle_jiffies_p)
+{
+	int i;
+	unsigned long this_q_idle_jiffies;
+	bool is_all_qs_idle = true;
+
+	*idle_jiffies_p = 0;	/* Max. of both queues if both idle */
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++) {
+		if (!desc_q_is_idle(power_control.drvdata->queue[i].desc_queue,
+				    &this_q_idle_jiffies)) {
+			is_all_qs_idle = false;
+			break;
+		}
+		if (this_q_idle_jiffies > *idle_jiffies_p)
+			*idle_jiffies_p = this_q_idle_jiffies;
+	}
+	return is_all_qs_idle;
+}
+
+/**
+ * reset_desc_qs() - Initiate clearing of desc. queue counters
+ * This function must be called only when transition to hibernation state
+ * is completed successfuly, i.e., the desc. queue is empty and asleep
+ */
+static void reset_desc_qs(void)
+{
+	int i;
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++)
+		(void)desc_q_reset(power_control.drvdata->queue[i].desc_queue);
+}
+
+static int process_hibernation_req(void)
+{
+	enum dx_sep_state sep_state;
+	int rc;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	/* Already off, no need to send the sleep descriptor */
+	if (sep_state == DX_SEP_STATE_OFF ||
+		sep_state == DX_SEP_STATE_DONE_SLEEP_MODE)
+		return 0;
+
+	if (sep_state != DX_SEP_STATE_DONE_FW_INIT ||
+		!(is_desc_qs_active())) {
+		pr_err("Requested hibernation while SeP state=0x%08X\n",
+			    sep_state);
+		return -EINVAL;
+	}
+	rc = set_desc_qs_state(DESC_Q_ASLEEP);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed moving queues to SLEEP state (%d)\n", rc);
+		return rc;
+	}
+	/* Write value SEP_SLEEP_ENABLE command to GPR7 to initialize
+	   sleep sequence */
+	WRITE_REGISTER(power_control.drvdata->cc_base +
+			DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR7),
+			SEP_SLEEP_ENABLE);
+	/* Process state change */
+	sep_state = dx_sep_wait_for_state(DX_SEP_STATE_DONE_SLEEP_MODE,
+					SEP_STATE_CHANGE_TIMEOUT_MSEC);
+	switch (sep_state) {
+	case DX_SEP_STATE_DONE_SLEEP_MODE:
+		break;
+	case DX_SEP_STATE_DONE_FW_INIT:
+		pr_err("Transition to SLEEP mode aborted.\n");
+		rc = -EBUSY;
+		break;
+	case DX_SEP_STATE_PROC_SLEEP_MODE:
+		pr_err("Stuck in processing of SLEEP req.\n");
+		rc = -ETIME;
+		break;
+	default:
+		pr_err(
+			"Unexpected SeP state after SLEEP request: 0x%08X\n",
+			sep_state);
+		rc = -EINVAL;
+	}
+	if (unlikely(rc != 0)) {
+		sep_state = GET_SEP_STATE(power_control.drvdata);
+		if (sep_state == DX_SEP_STATE_DONE_FW_INIT)
+			/* Revert queues state on failure */
+			/* (if remained on active state)  */
+			set_desc_qs_state(DESC_Q_ACTIVE);
+	} else {
+		reset_desc_qs();
+	}
+
+	return rc;
+}
+
+static int process_activate_req(void)
+{
+	enum dx_sep_state sep_state;
+	int rc;
+	int count = 0;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	if ((sep_state == DX_SEP_STATE_DONE_FW_INIT) && is_desc_qs_active()) {
+		pr_info("Requested activation when in active state\n");
+		return 0;	/* Already in this state */
+	}
+
+	/* make sure Sep is not off before restore IMR */
+	if (sep_state == DX_SEP_STATE_OFF) {
+		while (count < SEP_POWERON_TIMEOUT) {
+			sep_state = GET_SEP_STATE(power_control.drvdata);
+			if (sep_state != DX_SEP_STATE_OFF)
+				break;
+			usleep_range(50, 150);
+			count++;
+		}
+		if (count >= SEP_TIMEOUT) {
+			pr_info("Timeout while waiting SEP poweron\n");
+			return -ETIME;
+		}
+
+	}
+	/* SeP may have been reset - restore IMR if SeP is not off */
+	/* This must be done before dx_sep_wait_for_state() */
+	WRITE_REGISTER(power_control.drvdata->cc_base +
+		       DX_CC_REG_OFFSET(HOST, IMR),
+		       ~power_control.drvdata->irq_mask);
+	/* Nothing to initiate - just wait for FW_INIT_DONE */
+	sep_state = dx_sep_wait_for_state(DX_SEP_STATE_DONE_FW_INIT,
+					  SEP_STATE_CHANGE_TIMEOUT_MSEC);
+	if (sep_state == DX_SEP_STATE_DONE_FW_INIT)
+		rc = set_desc_qs_state(DESC_Q_ACTIVE);
+	else {
+		pr_info("Timeout while waiting SEP wakeup\n");
+		rc = -ETIME;	/* Timed out waiting */
+	}
+
+	return rc;
+}
+
+/**
+ * dx_sep_power_state_set() - Change power state of SeP (CC)
+ *
+ * @req_state:	The requested power state (_HIBERNATED or _ACTIVE)
+ *
+ * Request changing of power state to given state and block until transition
+ * is completed.
+ * Requesting _HIBERNATED is allowed only from _ACTIVE state.
+ * Requesting _ACTIVE is allowed only after CC was powered back on (warm boot).
+ * Return codes:
+ * 0 -	Power state change completed.
+ * -EINVAL -	This request is not allowed in current SeP state or req_state
+ *		value is invalid.
+ * -EBUSY -	State change request ignored because SeP is busy (primarily,
+ *		when requesting hibernation while SeP is processing something).
+ * -ETIME -	Request timed out (primarily, when asking for _ACTIVE)
+ */
+int dx_sep_power_state_set(enum dx_sep_power_state req_state)
+{
+	int rc = 0;
+
+	switch (req_state) {
+	case DX_SEP_POWER_HIBERNATED:
+		rc = process_hibernation_req();
+		break;
+	case DX_SEP_POWER_IDLE:
+	case DX_SEP_POWER_ACTIVE:
+		rc = process_activate_req();
+		break;
+	default:
+		pr_err("Invalid state to request (%s)\n",
+			    power_state_str(req_state));
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(dx_sep_power_state_set);
+
+/**
+ * dx_sep_power_state_get() - Get the current power state of SeP (CC)
+ * @state_jiffies_p:	The "jiffies" value at which given state was detected.
+ */
+enum dx_sep_power_state dx_sep_power_state_get(unsigned long *state_jiffies_p)
+{
+	enum dx_sep_state sep_state;
+	enum dx_sep_power_state rc;
+	unsigned long idle_jiffies;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	if (sep_state != power_control.last_state) {
+		/* Probably off or after warm-boot with lost IMR */
+		/* Recover last_state */
+		power_control.last_state = sep_state;
+		power_control.state_jiffies = jiffies;
+	}
+	if (state_jiffies_p != NULL)
+		*state_jiffies_p = power_control.state_jiffies;
+	switch (sep_state) {
+	case DX_SEP_STATE_PROC_WARM_BOOT:
+	 /*FALLTRHOUGH*/ case DX_SEP_STATE_DONE_WARM_BOOT:
+		rc = DX_SEP_POWER_BOOT;
+		break;
+	case DX_SEP_STATE_DONE_FW_INIT:
+		if (is_desc_qs_active()) {
+			if (is_desc_qs_idle(&idle_jiffies)) {
+				rc = DX_SEP_POWER_IDLE;
+				if (state_jiffies_p != NULL)
+					*state_jiffies_p = idle_jiffies;
+			} else {
+				rc = DX_SEP_POWER_ACTIVE;
+			}
+		} else {
+			/* SeP was woken up but dx_sep_power_state_set was not
+			 * invoked to activate the queues */
+			rc = DX_SEP_POWER_BOOT;
+		}
+		break;
+	case DX_SEP_STATE_PROC_SLEEP_MODE:
+		/* Report as active until actually asleep */
+		rc = DX_SEP_POWER_ACTIVE;
+		break;
+	case DX_SEP_STATE_DONE_SLEEP_MODE:
+		rc = DX_SEP_POWER_HIBERNATED;
+		break;
+	case DX_SEP_STATE_OFF:
+		rc = DX_SEP_POWER_OFF;
+		break;
+	case DX_SEP_STATE_FATAL_ERROR:
+	default:/* Any state not supposed to happen for the driver */
+		rc = DX_SEP_POWER_INVALID;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(dx_sep_power_state_get);
+
+/**
+ * dx_sep_power_init() - Init resources for this module
+ */
+void dx_sep_power_init(struct sep_drvdata *drvdata)
+{
+	power_control.drvdata = drvdata;
+	init_completion(&power_control.state_changed);
+	/* Init. recorded last state */
+	power_control.last_state = GET_SEP_STATE(drvdata);
+	power_control.state_jiffies = jiffies;
+}
+
+/**
+ * dx_sep_power_exit() - Cleanup resources for this module
+ */
+void dx_sep_power_exit(void)
+{
+}
diff --git a/drivers/staging/sep54/sep_power.h b/drivers/staging/sep54/sep_power.h
new file mode 100644
index 0000000..b92bd65
--- /dev/null
+++ b/drivers/staging/sep54/sep_power.h
@@ -0,0 +1,60 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __SEP_POWER_H__
+#define __SEP_POWER_H__
+
+/**
+ * dx_sep_power_init() - Init resources for this module
+ */
+void dx_sep_power_init(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_power_exit() - Cleanup resources for this module
+ */
+void dx_sep_power_exit(void);
+
+/**
+ * dx_sep_state_change_handler() - Interrupt handler for SeP state changes
+ * @drvdata:	Associated driver context
+ */
+void dx_sep_state_change_handler(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_wait_for_state() - Wait for SeP to reach one of the states reflected
+ *				with given state mask
+ * @state_mask:		The OR of expected SeP states
+ * @timeout_msec:	Timeout of waiting for the state (in millisec.)
+ *
+ * Returns the state reached. In case of wait timeout the returned state
+ * may not be one of the expected states.
+ */
+u32 dx_sep_wait_for_state(u32 state_mask, int timeout_msec);
+
+void dx_sep_pm_runtime_get(void);
+
+void dx_sep_pm_runtime_put(void);
+
+#endif				/* __SEP_POWER_H__ */
diff --git a/drivers/staging/sep54/sep_request.h b/drivers/staging/sep54/sep_request.h
new file mode 100644
index 0000000..dc1f5f0
--- /dev/null
+++ b/drivers/staging/sep54/sep_request.h
@@ -0,0 +1,96 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_REQUEST_H_
+#define _SEP_REQUEST_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#define DX_SEP_REQUEST_GPR_IDX 3
+
+#define DX_SEP_REQUEST_4KB_MASK 0xFFF
+#define DX_SEP_REQUEST_MIN_BUF_SIZE (4*1024)
+#define DX_SEP_REQUEST_MAX_BUF_SIZE (32*1024)
+
+/* Protocol error codes */
+#define DX_SEP_REQUEST_SUCCESS 0x00
+#define DX_SEP_REQUEST_OUT_OF_SYNC_ERR 0x01
+#define DX_SEP_REQUEST_INVALID_REQ_SIZE_ERR 0x02
+#define DX_SEP_REQUEST_INVALID_AGENT_ID_ERR 0x03
+
+/* Sep Request GPR3 format (Sep to Host) */
+#define DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET 0
+#define DX_SEP_REQUEST_AGENT_ID_BIT_SIZE 8
+#define DX_SEP_REQUEST_COUNTER_BIT_OFFSET 8
+#define DX_SEP_REQUEST_COUNTER_BIT_SIZE 8
+#define DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET 16
+#define DX_SEP_REQUEST_REQ_LEN_BIT_SIZE 16
+
+/* Sep Request GPR3 format (Host to Sep) */
+#define DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET 0
+#define DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE 8
+/* #define DX_SEP_REQUEST_COUNTER_BIT_OFFSET 8 */
+/* #define DX_SEP_REQUEST_COUNTER_BIT_SIZE 8 */
+#define DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET 16
+#define DX_SEP_REQUEST_RESP_LEN_BIT_SIZE 16
+
+/* Get/Set macros */
+#define SEP_REQUEST_GET_AGENT_ID(gpr) BITFIELD_GET(                           \
+	(gpr), DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_AGENT_ID_BIT_SIZE)
+#define SEP_REQUEST_SET_AGENT_ID(gpr, val) BITFIELD_SET(                      \
+	(gpr), DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_AGENT_ID_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_RETURN_CODE(gpr) BITFIELD_GET(                        \
+	(gpr), DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET,                         \
+	DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE)
+#define SEP_REQUEST_SET_RETURN_CODE(gpr, val) BITFIELD_SET(                   \
+	(gpr), DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET,                         \
+	DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_COUNTER(gpr) BITFIELD_GET(                            \
+	(gpr), DX_SEP_REQUEST_COUNTER_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_COUNTER_BIT_SIZE)
+#define SEP_REQUEST_SET_COUNTER(gpr, val) BITFIELD_SET(                       \
+	(gpr), DX_SEP_REQUEST_COUNTER_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_COUNTER_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_REQ_LEN(gpr) BITFIELD_GET(                            \
+	(gpr), DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_REQ_LEN_BIT_SIZE)
+#define SEP_REQUEST_SET_REQ_LEN(gpr, val) BITFIELD_SET(                       \
+	(gpr), DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_REQ_LEN_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_RESP_LEN(gpr) BITFIELD_GET(                           \
+	(gpr), DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_RESP_LEN_BIT_SIZE)
+#define SEP_REQUEST_SET_RESP_LEN(gpr, val) BITFIELD_SET(                      \
+	(gpr), DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_RESP_LEN_BIT_SIZE, (val))
+
+#endif /*_SEP_REQUEST_H_*/
diff --git a/drivers/staging/sep54/sep_request_mgr.c b/drivers/staging/sep54/sep_request_mgr.c
new file mode 100644
index 0000000..3cdd77f
--- /dev/null
+++ b/drivers/staging/sep54/sep_request_mgr.c
@@ -0,0 +1,503 @@
+/*******************************************************************
+ * (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+ * This software is protected by copyright, international           *
+ * treaties and patents, and distributed under multiple licenses.   *
+ * Any use of this Software as part of the Discretix CryptoCell or  *
+ * Packet Engine products requires a commercial license.            *
+ * Copies of this Software that are distributed with the Discretix  *
+ * CryptoCell or Packet Engine product drivers, may be used in      *
+ * accordance with a commercial license, or at the user's option,   *
+ * used and redistributed under the terms and conditions of the GNU *
+ * General Public License ("GPL") version 2, as published by the    *
+ * Free Software Foundation.                                        *
+ * This program is distributed in the hope that it will be useful,  *
+ * but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+ * See the GNU General Public License version 2 for more details.   *
+ * You should have received a copy of the GNU General Public        *
+ * License version 2 along with this Software; if not, please write *
+ * to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+ * 330, Boston, MA 02111-1307, USA.                                 *
+ * Any copy or reproduction of this Software, as permitted under    *
+ * the GNU General Public License version 2, must include this      *
+ * Copyright Notice as well as any other notices provided under     *
+ * the said license.                                                *
+ ********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_REQUEST
+
+#include <linux/sched.h>
+/*#include <linux/export.h>*/
+#include "dx_driver.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "sep_request.h"
+#include "sep_request_mgr.h"
+#include "dx_sep_kapi.h"
+
+/* The request/response coherent buffer size */
+#define DX_SEP_REQUEST_BUF_SIZE (4*1024)
+#if (DX_SEP_REQUEST_BUF_SIZE < DX_SEP_REQUEST_MIN_BUF_SIZE)
+#error DX_SEP_REQUEST_BUF_SIZE too small
+#endif
+#if (DX_SEP_REQUEST_BUF_SIZE > DX_SEP_REQUEST_MAX_BUF_SIZE)
+#error DX_SEP_REQUEST_BUF_SIZE too big
+#endif
+#if (DX_SEP_REQUEST_BUF_SIZE & DX_SEP_REQUEST_4KB_MASK)
+#error DX_SEP_REQUEST_BUF_SIZE must be a 4KB multiple
+#endif
+
+/* The maximum number of sep request agents */
+/* Valid IDs are 0 to (DX_SEP_REQUEST_MAX_AGENTS-1) */
+#define DX_SEP_REQUEST_MAX_AGENTS 4
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* Sep Request state object */
+static struct {
+	u8 *sep_req_buf_p;
+	dma_addr_t sep_req_buf_dma;
+	u8 *host_resp_buf_p;
+	dma_addr_t host_resp_buf_dma;
+	u8 req_counter;
+	wait_queue_head_t agent_event[DX_SEP_REQUEST_MAX_AGENTS];
+	bool agent_valid[DX_SEP_REQUEST_MAX_AGENTS];
+	bool agent_busy[DX_SEP_REQUEST_MAX_AGENTS];
+	bool request_pending;
+	u32 *sep_host_gpr_adr;
+	u32 *host_sep_gpr_adr;
+} sep_req_state;
+
+/* TODO:
+   1) request_pending should use agent id instead of a global flag
+   2) agent id 0 should be changed to non-valid
+   3) Change sep request params for sep init to [] array instead pointer
+   4) Consider usage of a mutex for syncing all access to the state
+*/
+
+/**
+ * dx_sep_req_handler() - SeP request interrupt handler
+ * @drvdata: The driver private info
+ */
+void dx_sep_req_handler(struct sep_drvdata *drvdata)
+{
+	u8 agent_id;
+	u32 gpr_val;
+	u32 sep_req_error = DX_SEP_REQUEST_SUCCESS;
+	u32 counter_val;
+	u32 req_len;
+
+	/* Read GPR3 value */
+	gpr_val = READ_REGISTER(drvdata->cc_base +
+				SEP_HOST_GPR_REG_OFFSET
+				(DX_SEP_REQUEST_GPR_IDX));
+
+	/* Parse the new gpr value */
+	counter_val = SEP_REQUEST_GET_COUNTER(gpr_val);
+	agent_id = SEP_REQUEST_GET_AGENT_ID(gpr_val);
+	req_len = SEP_REQUEST_GET_REQ_LEN(gpr_val);
+
+	/* Increase the req_counter value in the state structure */
+	sep_req_state.req_counter++;
+
+	if (unlikely(counter_val != sep_req_state.req_counter))
+		/* Verify new req_counter value is equal to the req_counter
+		 * value from the state. If not, proceed to critical error flow
+		 * below with error code SEP_REQUEST_OUT_OF_SYNC_ERR. */
+		sep_req_error = DX_SEP_REQUEST_OUT_OF_SYNC_ERR;
+	else if (unlikely((agent_id >= DX_SEP_REQUEST_MAX_AGENTS) ||
+			  (!sep_req_state.agent_valid[agent_id])))
+		/* Verify that the SeP Req Agent ID is registered in the LUT, if
+		 * not proceed to critical error flow below with error code
+		 * SEP_REQUEST_INVALID_AGENT_ID_ERR. */
+		sep_req_error = DX_SEP_REQUEST_INVALID_AGENT_ID_ERR;
+	else if (unlikely(req_len > DX_SEP_REQUEST_BUF_SIZE))
+		/* Verify the request length is not bigger than the maximum
+		 * allocated request buffer size. In bigger, proceed to critical
+		 * error flow below with the SEP_REQUEST_INVALID_REQ_SIZE_ERR
+		 * error code. */
+		sep_req_error = DX_SEP_REQUEST_INVALID_REQ_SIZE_ERR;
+
+	if (likely(sep_req_error == DX_SEP_REQUEST_SUCCESS)) {
+		/* Signal the wake up event according to the LUT */
+		sep_req_state.agent_busy[agent_id] = true;
+		wake_up_interruptible(&sep_req_state.agent_event[agent_id]);
+	} else {
+		/* Critical error flow */
+
+		/* Build the new GPR3 value out of the req_counter from the
+		 * state, the error condition and zero response length value */
+		gpr_val = 0;
+		SEP_REQUEST_SET_COUNTER(gpr_val, sep_req_state.req_counter);
+		SEP_REQUEST_SET_RESP_LEN(gpr_val, 0);
+		SEP_REQUEST_SET_RETURN_CODE(gpr_val, sep_req_error);
+		WRITE_REGISTER(drvdata->cc_base +
+			       HOST_SEP_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX),
+			       gpr_val);
+	}
+}
+
+/**
+ * dx_sep_req_register_agent() - Register an agent
+ * @agent_id: The agent ID
+ * @max_buf_size: A pointer to the max buffer size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_register_agent(u8 agent_id, u32 *max_buf_size)
+{
+	pr_debug("Register SeP Request agent (id=%d)\n", agent_id);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is not valid */
+	if (sep_req_state.agent_valid[agent_id] == true) {
+		pr_err("Agent already registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify max_buf_size pointer is not NULL */
+	if (max_buf_size == NULL) {
+		pr_err("max_buf_size is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Set "agent_valid" field to TRUE */
+	sep_req_state.agent_valid[agent_id] = true;
+
+	/* Return the request/response max buffer size */
+	*max_buf_size = DX_SEP_REQUEST_BUF_SIZE;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_register_agent);
+
+/**
+ * dx_sep_req_unregister_agent() - Unregsiter an agent
+ * @agent_id: The agent ID
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_unregister_agent(u8 agent_id)
+{
+	pr_debug("Unregsiter SeP Request agent (id=%d)\n", agent_id);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is not busy */
+	if (sep_req_state.agent_busy[agent_id] == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Set "agent_valid" field to FALSE */
+	sep_req_state.agent_valid[agent_id] = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_unregister_agent);
+
+/**
+ * dx_sep_req_wait_for_request() - Wait from an incoming sep request
+ * @agent_id: The agent ID
+ * @sep_req_buf_p: Pointer to the incoming request buffer
+ * @req_buf_size: Pointer to the incoming request size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_wait_for_request(u8 agent_id, u8 *sep_req_buf_p,
+				u32 *req_buf_size)
+{
+	u32 gpr_val;
+	int ret;
+
+	pr_debug("Wait for sep request\n");
+	pr_debug("agent_id=%d sep_req_buf_p=0x%p\n", agent_id, sep_req_buf_p);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify that another sep request is not pending */
+	if (sep_req_state.request_pending == true) {
+		pr_err("Request pending\n");
+		return -EBUSY;
+	}
+
+	/* Verify sep_req_buf_p pointer is not NULL */
+	if (sep_req_buf_p == NULL) {
+		pr_err("sep_req_buf_p is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify req_buf_size pointer is not NULL */
+	if (req_buf_size == NULL) {
+		pr_err("req_buf_size is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify *req_buf_size is not zero and not bigger than the
+	 * allocated request buffer */
+	if ((*req_buf_size == 0) || (*req_buf_size > DX_SEP_REQUEST_BUF_SIZE)) {
+		pr_err("Invalid request buffer size\n");
+		return -EINVAL;
+	}
+
+	/* Wait for incoming request */
+	ret = wait_event_interruptible(sep_req_state.agent_event[agent_id],
+			sep_req_state.agent_busy[agent_id] == true);
+	if (ret) {
+		pr_err("Wait event failed %d\n", ret);
+		return ret;
+	}
+
+	sep_req_state.request_pending = true;
+
+	gpr_val = READ_REGISTER(sep_req_state.sep_host_gpr_adr);
+
+	/* If the request length is bigger than the callers' specified
+	 * buffer size, the request is partially copied to the callers'
+	 * buffer (only the first relevant bytes). The caller will not be
+	 * indicated for an error condition in this case. The remaining bytes
+	 * in the callers' request buffer are left as is without clearing.
+	 * If the request length is smaller than the callers' specified buffer
+	 * size, the relevant bytes from the allocated kernel request buffer
+	 * are copied to the callers' request buffer */
+	memcpy(sep_req_buf_p, sep_req_state.sep_req_buf_p,
+	       MIN(*req_buf_size, SEP_REQUEST_GET_REQ_LEN(gpr_val)));
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_wait_for_request);
+
+/**
+ * dx_sep_req_send_response() - Send a response to the sep
+ * @agent_id: The agent ID
+ * @host_resp_buf_p: Pointer to the outgoing response buffer
+ * @resp_buf_size: Pointer to the outgoing response size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_send_response(u8 agent_id, u8 *host_resp_buf_p,
+			     u32 resp_buf_size)
+{
+	u32 gpr_val;
+
+	pr_debug("Send host response\n");
+	pr_debug("agent_id=%d host_resp_buf_p=0x%p resp_buf_size=%d\n",
+		 agent_id, host_resp_buf_p, resp_buf_size);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is busy */
+	if (sep_req_state.agent_busy[agent_id] != true) {
+		pr_err("Agent is not busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify that a sep request is pending */
+	if (sep_req_state.request_pending != true) {
+		pr_err("No requests are pending\n");
+		return -EBUSY;
+	}
+
+	/* Verify host_resp_buf_p pointer is not NULL */
+	if (host_resp_buf_p == NULL) {
+		pr_err("host_resp_buf_p is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify resp_buf_size is not zero and not bigger than the allocated
+	 * request buffer */
+	if ((resp_buf_size == 0) || (resp_buf_size > DX_SEP_REQUEST_BUF_SIZE)) {
+		pr_err("Invalid response buffer size\n");
+		return -EINVAL;
+	}
+
+	/* The request is copied from the callers' buffer to the global
+	 * response buffer, only up to the callers' response length */
+	memcpy(sep_req_state.host_resp_buf_p, host_resp_buf_p, resp_buf_size);
+
+	/* Clear the request message buffer */
+	memset(sep_req_state.sep_req_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* Clear the response message buffer for all remaining bytes
+	 * beyond the response buffer actual size */
+	memset(sep_req_state.host_resp_buf_p + resp_buf_size, 0,
+	       DX_SEP_REQUEST_BUF_SIZE - resp_buf_size);
+
+	/* This needs to be done before signaling Chaabi because otherwise
+	 * Chaabi is able to send next interrupt while this thread is scheduled
+	 * in wait queue */
+	sep_req_state.agent_busy[agent_id] = false;
+	sep_req_state.request_pending = false;
+
+	/* Build the new GPR3 value out of the req_counter from the state,
+	 * the response length and the DX_SEP_REQUEST_SUCCESS return code.
+	 * Place the value in GPR3. */
+	gpr_val = 0;
+	SEP_REQUEST_SET_COUNTER(gpr_val, sep_req_state.req_counter);
+	SEP_REQUEST_SET_RESP_LEN(gpr_val, resp_buf_size);
+	SEP_REQUEST_SET_RETURN_CODE(gpr_val, DX_SEP_REQUEST_SUCCESS);
+	WRITE_REGISTER(sep_req_state.host_sep_gpr_adr, gpr_val);
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_send_response);
+
+/**
+ * dx_sep_req_get_sep_init_params() - Setup sep init params
+ * @sep_request_params: The sep init parameters array
+ */
+void dx_sep_req_get_sep_init_params(u32 *sep_request_params)
+{
+	sep_request_params[0] = (u32) sep_req_state.sep_req_buf_dma;
+	sep_request_params[1] = (u32) sep_req_state.host_resp_buf_dma;
+	sep_request_params[2] = DX_SEP_REQUEST_BUF_SIZE;
+}
+
+/**
+ * dx_sep_req_enable() - Enable the sep request interrupt handling
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_enable(struct sep_drvdata *drvdata)
+{
+	/* clear pending interrupts in GPRs of SEP request
+	 * (leftovers from init writes to GPRs..) */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX));
+
+	/* set IMR register */
+	drvdata->irq_mask |= SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX);
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+}
+
+/**
+ * dx_sep_req_init() - Initialize the sep request state
+ * @drvdata: Driver private data
+ */
+int dx_sep_req_init(struct sep_drvdata *drvdata)
+{
+	int i;
+
+	pr_debug("Initialize SeP Request state\n");
+
+	sep_req_state.request_pending = false;
+	sep_req_state.req_counter = 0;
+
+	for (i = 0; i < DX_SEP_REQUEST_MAX_AGENTS; i++) {
+		sep_req_state.agent_valid[i] = false;
+		sep_req_state.agent_busy[i] = false;
+		init_waitqueue_head(&sep_req_state.agent_event[i]);
+	}
+
+	/* allocate coherent request buffer */
+	sep_req_state.sep_req_buf_p = dma_alloc_coherent(drvdata->dev,
+						 DX_SEP_REQUEST_BUF_SIZE,
+						 &sep_req_state.
+						 sep_req_buf_dma,
+						 GFP_KERNEL);
+	pr_debug("sep_req_buf_dma=0x%08X sep_req_buf_p=0x%p size=0x%08X\n",
+		      (u32)sep_req_state.sep_req_buf_dma,
+		      sep_req_state.sep_req_buf_p, DX_SEP_REQUEST_BUF_SIZE);
+
+	if (sep_req_state.sep_req_buf_p == NULL) {
+		pr_err("Unable to allocate coherent request buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the request buffer */
+	memset(sep_req_state.sep_req_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* allocate coherent response buffer */
+	sep_req_state.host_resp_buf_p = dma_alloc_coherent(drvdata->dev,
+						   DX_SEP_REQUEST_BUF_SIZE,
+						   &sep_req_state.
+						   host_resp_buf_dma,
+						   GFP_KERNEL);
+	pr_debug(
+		      "host_resp_buf_dma=0x%08X host_resp_buf_p=0x%p size=0x%08X\n",
+		      (u32)sep_req_state.host_resp_buf_dma,
+		      sep_req_state.host_resp_buf_p, DX_SEP_REQUEST_BUF_SIZE);
+
+	if (sep_req_state.host_resp_buf_p == NULL) {
+		pr_err("Unable to allocate coherent response buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the response buffer */
+	memset(sep_req_state.host_resp_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* Setup the GPR address */
+	sep_req_state.sep_host_gpr_adr = drvdata->cc_base +
+	    SEP_HOST_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX);
+
+	sep_req_state.host_sep_gpr_adr = drvdata->cc_base +
+	    HOST_SEP_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX);
+
+	return 0;
+}
+
+/**
+ * dx_sep_req_fini() - Finalize the sep request state
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_fini(struct sep_drvdata *drvdata)
+{
+	int i;
+
+	pr_debug("Finalize SeP Request state\n");
+
+	sep_req_state.request_pending = false;
+	sep_req_state.req_counter = 0;
+	for (i = 0; i < DX_SEP_REQUEST_MAX_AGENTS; i++) {
+		sep_req_state.agent_valid[i] = false;
+		sep_req_state.agent_busy[i] = false;
+	}
+
+	dma_free_coherent(drvdata->dev, DX_SEP_REQUEST_BUF_SIZE,
+			  sep_req_state.sep_req_buf_p,
+			  sep_req_state.sep_req_buf_dma);
+
+	dma_free_coherent(drvdata->dev, DX_SEP_REQUEST_BUF_SIZE,
+			  sep_req_state.host_resp_buf_p,
+			  sep_req_state.host_resp_buf_dma);
+}
diff --git a/drivers/staging/sep54/sep_request_mgr.h b/drivers/staging/sep54/sep_request_mgr.h
new file mode 100644
index 0000000..6d79651
--- /dev/null
+++ b/drivers/staging/sep54/sep_request_mgr.h
@@ -0,0 +1,63 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_REQUEST_KERNEL_API_H_
+#define _SEP_REQUEST_KERNEL_API_H_
+
+#include <linux/types.h>
+#include "dx_driver.h"
+
+/**
+ * dx_sep_req_handler() - SeP request interrupt handler
+ * @drvdata: The driver private info
+ */
+void dx_sep_req_handler(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_get_sep_init_params() - Setup sep init params
+ * @sep_request_params: The sep init parameters array
+ */
+void dx_sep_req_get_sep_init_params(u32 *sep_request_params);
+
+/**
+ * dx_sep_req_enable() - Enable the sep request interrupt handling
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_enable(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_init() - Initialize the sep request state
+ * @drvdata: Driver private data
+ */
+int dx_sep_req_init(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_fini() - Finalize the sep request state
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_fini(struct sep_drvdata *drvdata);
+
+#endif /*_SEP_REQUEST_KERNEL_API_H_*/
diff --git a/drivers/staging/sep54/sep_rpc.h b/drivers/staging/sep54/sep_rpc.h
new file mode 100644
index 0000000..3199a14
--- /dev/null
+++ b/drivers/staging/sep54/sep_rpc.h
@@ -0,0 +1,109 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_RPC_H__
+#define __SEP_RPC_H__
+
+/* SeP RPC infrastructure API */
+#ifdef __KERNEL__
+#include <linux/types.h>
+
+#else
+#include "dx_pal_types.h"
+
+#endif /*__KERNEL__*/
+
+/* Maximum size of SeP RPC message in bytes */
+#define SEP_RPC_MAX_MSG_SIZE 8191
+#define SEP_RPC_MAX_WORKSPACE_SIZE 8191
+
+/* The maximum allowed user memory references per function
+   (CRYS requires only 2, but GPAPI/TEE needs up to 4) */
+#define SEP_RPC_MAX_MEMREF_PER_FUNC 4
+
+/* If this macro is not provided by the includer of this file,
+   the log message would be dropped */
+#ifndef SEP_RPC_LOG
+#define SEP_RPC_LOG(format, ...) do {} while (0)
+#endif
+
+#define SEP_RPC_ASSERT(cond, inval_param_retcode) {\
+	if (!(cond)) {\
+		SEP_RPC_LOG("SEP_RPC_ASSERT: %s\n", #cond);\
+		return inval_param_retcode;\
+	} \
+}
+
+/* NOTE:
+   All data must be in little (SeP) endian */
+
+enum seprpc_retcode {
+	SEPRPC_RET_OK = 0,
+	SEPRPC_RET_ERROR,	/*Generic error code (not one of the others) */
+	SEPRPC_RET_EINVAL_AGENT,	/* Unknown agent ID */
+	SEPRPC_RET_EINVAL_FUNC,	/* Unknown function ID for given agent */
+	SEPRPC_RET_EINVAL,	/* Invalid parameter */
+	SEPRPC_RET_ENORSC,	/* Not enough resources to complete request */
+	SEPRPC_RET_RESERVE32 = 0x7FFFFFFF	/* assure this enum is 32b */
+};
+
+enum seprpc_memref_type {
+	SEPRPC_MEMREF_NULL = 0,	/* Invalid memory reference */
+	SEPRPC_MEMREF_EMBED = 1,/* Data embedded in parameters message */
+	SEPRPC_MEMREF_DLLI = 2,
+	SEPRPC_MEMREF_MLLI = 3,
+	SEPRPC_MEMREF_MAX = SEPRPC_MEMREF_MLLI,
+	SEPRPC_MEMREF_RESERVE32 = 0x7FFFFFFF	/* assure this enum is 32b */
+};
+
+#pragma pack(push)
+#pragma pack(4)
+/* A strcuture to pass host memory reference */
+struct seprpc_memref {
+	enum seprpc_memref_type ref_type;
+	u32 location;
+	u32 size;
+	u32 count;
+	/* SEPRPC_MEMREF_EMBED: location= offset in SepRpc_Params .
+	 * size= data size in bytes. count= N/A */
+	/* SEPRPC_MEMREF_DLLI: location= DMA address of data in host memory.
+	 * size= data size in bytes. count= N/A. */
+	/* SEPRPC_MEMREF_MLLI: location= DMA address of first MLLI table.
+	 * size= size in bytes of first table.
+	 * count= Num. of MLLI tables. */
+};
+
+struct seprpc_params {
+	u32 num_of_memrefs;/* Number of elements in the memRef array */
+	struct seprpc_memref memref[1];
+	/* This array is actually in the size of numOfMemRefs
+	 * (i.e., it is just a placeholder that may be void) */
+	/*   Following this array comes the function-specific parameters */
+} __attribute__ ((__may_alias__));
+
+#pragma pack(pop)
+
+#endif /*__SEP_RPC_H__*/
diff --git a/drivers/staging/sep54/sep_sram_map.h b/drivers/staging/sep54/sep_sram_map.h
new file mode 100644
index 0000000..2c7db45
--- /dev/null
+++ b/drivers/staging/sep54/sep_sram_map.h
@@ -0,0 +1,43 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+/* This file contains the definitions of the OTP data that the SEP copies
+into the SRAM during the first boot process */
+
+
+#ifndef _SEP_SRAM_MAP_
+#define _SEP_SRAM_MAP_
+
+#define DX_FIRST_OEM_KEY_OFFSET_IN_SRAM         0x0
+#define DX_SECOND_OEM_KEY_OFFSET_IN_SRAM        0x4
+#define DX_THIRD_OEM_KEY_OFFSET_IN_SRAM         0x8
+#define DX_LCS_OFFSET_IN_SRAM                   0xC
+#define DX_MISC_OFFSET_IN_SRAM                  0xD
+#define DX_CC_INIT_MSG_OFFSET_IN_SRAM		0x100
+#define DX_PKA_MEMORY_OFFSET_IN_SRAM		0x200
+
+#endif /*_GEN_SRAM_MAP_*/
diff --git a/drivers/staging/sep54/sep_sw_desc.h b/drivers/staging/sep54/sep_sw_desc.h
new file mode 100644
index 0000000..8538c4f
--- /dev/null
+++ b/drivers/staging/sep54/sep_sw_desc.h
@@ -0,0 +1,468 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_SW_DESC_H_
+#define _SEP_SW_DESC_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+#include "sep_rpc.h"
+
+/* Common descriptor fields access (type independent) */
+/* To be used with fields: TYPE, RET_CODE, COOKIE     */
+#define SEP_SW_DESC_GET(desc_p, desc_field) BITFIELD_GET(                     \
+	((u32 *)(desc_p))[SEP_SW_DESC_ ## desc_field ## _WORD_OFFSET],   \
+	SEP_SW_DESC_ ## desc_field ## _BIT_OFFSET,			      \
+	SEP_SW_DESC_ ## desc_field ## _BIT_SIZE)
+#define SEP_SW_DESC_SET(desc_p, desc_field, new_val) BITFIELD_SET(            \
+	((u32 *)(desc_p))[SEP_SW_DESC_ ## desc_field ## _WORD_OFFSET],   \
+	SEP_SW_DESC_ ## desc_field ## _BIT_OFFSET,			      \
+	SEP_SW_DESC_ ## desc_field ## _BIT_SIZE,			      \
+	new_val)
+
+/* Type specific descriptor fields access */
+#define SEP_SW_DESC_GET4TYPE(desc_p, desc_type, desc_field) BITFIELD_GET(     \
+	((u32 *)(desc_p))                                                \
+	[SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _WORD_OFFSET],	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ##  _BIT_OFFSET,	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _BIT_SIZE)
+#define SEP_SW_DESC_SET4TYPE(desc_p, desc_type, desc_field, new_val)          \
+	BITFIELD_SET(							      \
+	((u32 *)(desc_p))                                                \
+	[SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _WORD_OFFSET],       \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ##  _BIT_OFFSET,	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _BIT_SIZE, new_val)
+
+#define SEP_SW_DESC_INIT(desc_p) \
+	memset(desc_p, 0, SEP_SW_DESC_WORD_SIZE * sizeof(u32))
+
+/* Total descriptor size in 32b words */
+#define SEP_SW_DESC_WORD_SIZE 8
+#define SEP_SW_DESC_WORD_SIZE_LOG 3
+
+/***********************************/
+/* Common bit fields definitions   */
+/***********************************/
+ /* Descriptor type: TYPE */
+#define SEP_SW_DESC_TYPE_WORD_OFFSET 0
+#define SEP_SW_DESC_TYPE_BIT_OFFSET 0
+#define SEP_SW_DESC_TYPE_BIT_SIZE 4
+/* Descriptor type encoding */
+enum sep_sw_desc_type {
+	SEP_SW_DESC_TYPE_NULL = 0,
+	SEP_SW_DESC_TYPE_CRYPTO_OP = 0x1,
+	SEP_SW_DESC_TYPE_RPC_MSG = 0x2,
+	SEP_SW_DESC_TYPE_APP_REQ = 0x3,
+	SEP_SW_DESC_TYPE_LOAD_OP = 0x4,
+	SEP_SW_DESC_TYPE_COMBINED_OP = 0x5,
+	SEP_SW_DESC_TYPE_SLEEP_REQ = 0x6,
+	SEP_SW_DESC_TYPE_DEBUG = 0xF
+	    /* Only 4 bits - do not extend to 32b */
+};
+
+enum sep_sw_desc_retcode {
+	SEP_SW_DESC_RET_OK = 0,
+	SEP_SW_DESC_RET_EINVAL_DESC_TYPE	/* Invalid descriptor type */
+};
+
+/* Return code: RET_CODE */
+#define SEP_SW_DESC_RET_CODE_WORD_OFFSET 6
+#define SEP_SW_DESC_RET_CODE_BIT_OFFSET 0
+#define SEP_SW_DESC_RET_CODE_BIT_SIZE 32
+
+/* Descriptor cookie: COOKIE */
+#define SEP_SW_DESC_COOKIE_WORD_OFFSET 7
+#define SEP_SW_DESC_COOKIE_BIT_OFFSET 0
+#define SEP_SW_DESC_COOKIE_BIT_SIZE 32
+
+/****************************************/
+/* Crypto-Op descriptor type: CRYPTO_OP */
+/****************************************/
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_CRYPTO_OP_L_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_L_BIT_OFFSET 31
+#define SEP_SW_DESC_CRYPTO_OP_L_BIT_SIZE 1
+
+/* I bit: Initialize context: I */
+#define SEP_SW_DESC_CRYPTO_OP_I_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_I_BIT_OFFSET 30
+#define SEP_SW_DESC_CRYPTO_OP_I_BIT_SIZE 1
+
+/* Process mode: PROC_MODE */
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_OFFSET 28
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_SIZE 2
+/* Process mode field options */
+enum sep_proc_mode {
+	SEP_PROC_MODE_NOP = 0,
+	SEP_PROC_MODE_PROC_T = 1,	/* Process (Text data) */
+	SEP_PROC_MODE_FIN = 2,	/* Finalize (optional: with text data) */
+	SEP_PROC_MODE_PROC_A = 3	/* Process (Additional/Auth. data) */
+	    /* Only 2b - do not extend to 32b */
+};
+
+/* SeP/FW Cache Index: FW_CACHE_IDX */
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_BIT_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_BIT_SIZE 8
+
+/* HCB address: HCB_ADDR */
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_WORD_OFFSET 3
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_SIZE 32
+
+/* IFT: IFT_ADDR, IFT_SIZE, IFT_NUM */
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_WORD_OFFSET 1
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_WORD_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE 16
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_WORD_OFFSET 5
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_SIZE 16
+
+/* OFT: OFT_ADDR, OFT_SIZE, OFT_NUM */
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_WORD_OFFSET 2
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_WORD_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_OFFSET 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_SIZE 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_WORD_OFFSET 5
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_OFFSET 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_SIZE 16
+
+/********************************************/
+/* Combined-Op descriptor type: COMBINED_OP */
+/********************************************/
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_COMBINED_OP_L_WORD_OFFSET \
+	SEP_SW_DESC_CRYPTO_OP_L_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_L_BIT_OFFSET SEP_SW_DESC_CRYPTO_OP_L_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_L_BIT_SIZE SEP_SW_DESC_CRYPTO_OP_L_BIT_SIZE
+
+/* I bit: Initialize context: I */
+#define SEP_SW_DESC_COMBINED_OP_I_WORD_OFFSET \
+	SEP_SW_DESC_CRYPTO_OP_I_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_I_BIT_OFFSET SEP_SW_DESC_CRYPTO_OP_I_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_I_BIT_SIZE SEP_SW_DESC_CRYPTO_OP_I_BIT_SIZE
+
+/* Process mode: PROC_MODE */
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_SIZE
+
+/* Configuration scheme: CONFIG_SCHEME */
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_SIZE
+
+/* IFT: IFT_ADDR, IFT_SIZE, IFT_NUM */
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_SIZE
+
+/* OFT: OFT_ADDR, OFT_SIZE, OFT_NUM */
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_SIZE
+
+/* combined scheme macros:
+   these set of macros meant for configuration scheme encoding
+   from the user level interface to the SEP combined driver.
+*/
+#define SEP_ENGINE_TYPE_BIT_SHIFT 0
+#define SEP_ENGINE_TYPE_BIT_SIZE 4
+#define SEP_ENGINE_SRC_BIT_SHIFT 4
+#define SEP_ENGINE_SRC_BIT_SIZE 4
+#define SEP_ENGINE_SLOT_BIT_SIZE \
+		(SEP_ENGINE_SRC_BIT_SIZE + SEP_ENGINE_TYPE_BIT_SIZE)
+
+/******************************* MACROS ***********************************/
+#define _sep_comb_eng_pack_item(eng_src, eng_type) \
+		(((eng_src) << SEP_ENGINE_SRC_BIT_SIZE) | \
+		 ((eng_type) << SEP_ENGINE_TYPE_BIT_SHIFT))
+
+#define _sep_comb_eng_pack_n_shift(src, type, slot) \
+		(_sep_comb_eng_pack_item(src, type) << \
+		(slot * SEP_ENGINE_SLOT_BIT_SIZE))
+
+#define sep_comb_eng_props_set(cfg_p, eng_idx, eng_src, eng_type) do { \
+	BITFIELD_SET(*cfg_p, \
+		(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE), \
+		SEP_ENGINE_SLOT_BIT_SIZE, 0); \
+	BITFIELD_SET(*cfg_p, \
+		(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE), \
+		SEP_ENGINE_SLOT_BIT_SIZE, \
+		_sep_comb_eng_pack_item(eng_src, eng_type)); \
+} while (0)
+
+#define sep_comb_eng_props_get(cfg_p, eng_idx, eng_src, eng_type) do { \
+	*(eng_type) = BITFIELD_GET(*cfg_p, \
+				(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE),\
+				SEP_ENGINE_TYPE_BIT_SIZE); \
+	*(eng_src) = BITFIELD_GET(*cfg_p, \
+				(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE) \
+				+ SEP_ENGINE_TYPE_BIT_SIZE, \
+				SEP_ENGINE_SRC_BIT_SIZE); \
+} while (0)
+
+/******************************************/
+/* Message-Op descriptor type: RPC_MSG    */
+/******************************************/
+
+/* Agent ID: AGENT_ID */
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_WORD_OFFSET 1
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_SIZE 8
+
+/* Function ID: FUNC_ID */
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_WORD_OFFSET 1
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_BIT_OFFSET 16
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_BIT_SIZE 16
+
+/* HMB: HMB_ADDR , HMB_SIZE */
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_WORD_OFFSET 2
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_WORD_OFFSET 3
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_SIZE 13
+
+/************************************************/
+/* SeP Applet Request descriptor type: APP_REQ  */
+/************************************************/
+
+/* Request Type: REQ_TYPE */
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_BIT_OFFSET 4
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_BIT_SIZE 2
+
+/* Session ID: SESSION_ID */
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_BIT_OFFSET 16
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_BIT_SIZE 12
+
+/* Internal error: INTERNAL_ERR */
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_BIT_OFFSET 31
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_BIT_SIZE 1
+
+/* In-Params. Buffer Address: IN_PARAMS_ADDR */
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_WORD_OFFSET 1
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_BIT_SIZE 32
+
+/* Return codes for APP_REQ descriptor */
+enum sepapp_retcode {
+	SEPAPP_RET_OK = 0,
+	SEPAPP_RET_EINVAL_QUEUE,	/* Request sent on the wrong SW queue */
+	SEPAPP_RET_EINVAL,	/* Invalid parameters in descriptor */
+};
+
+/* REQ_TYPE field encoding */
+enum sepapp_req_type {
+	SEPAPP_REQ_TYPE_SESSION_OPEN = 1,
+	SEPAPP_REQ_TYPE_SESSION_CLOSE = 2,
+	SEPAPP_REQ_TYPE_COMMAND_INVOKE = 3
+};
+
+/** in-params. data types **/
+
+#define SEPAPP_UUID_SIZE 16
+#define SEPAPP_MAX_PARAMS 4
+#define SEPAPP_MAX_AUTH_DATA_SIZE 16	/* For Application UUID case */
+
+enum sepapp_param_type {
+	SEPAPP_PARAM_TYPE_NULL = 0,
+	SEPAPP_PARAM_TYPE_VAL = 1,
+	SEPAPP_PARAM_TYPE_MEMREF = 2
+};
+
+enum sepapp_data_direction {
+	SEPAPP_DIR_NULL = 0,
+	SEPAPP_DIR_IN = 1,
+	SEPAPP_DIR_OUT = (1 << 1),
+	SEPAPP_DIR_INOUT = SEPAPP_DIR_IN | SEPAPP_DIR_OUT
+};
+
+/* Descriptor for "by value" parameter */
+struct sepapp_val_param {
+	/*enum sepapp_data_direction */ u8 dir;
+	u32 data[2];
+};
+
+/* Depends on seprpc data type defined in sep_rpc.h */
+union sepapp_client_param {
+	struct sepapp_val_param val;
+	struct seprpc_memref memref;
+};
+
+struct sepapp_client_params {
+	u8 params_types[SEPAPP_MAX_PARAMS];
+	union sepapp_client_param params[SEPAPP_MAX_PARAMS];
+};
+
+/* In-params. for SESSION_OPEN request type */
+struct sepapp_in_params_session_open {
+	u8 app_uuid[SEPAPP_UUID_SIZE];
+	u32 auth_method;
+	u8 auth_data[SEPAPP_MAX_AUTH_DATA_SIZE];
+	struct sepapp_client_params client_params;
+};
+
+struct sepapp_in_params_command_invoke {
+	u32 command_id;
+	struct sepapp_client_params client_params;
+};
+
+/* Return codes for SLEEP_REQ descriptor */
+enum sepslp_mode_req_retcode {
+	SEPSLP_MODE_REQ_RET_OK = 0,
+	SEPSLP_MODE_REQ_EGEN,	/* general error */
+	SEPSLP_MODE_REQ_EINVAL_QUEUE,	/* Request sent on the wrong SW queue */
+	SEPSLP_MODE_REQ_EBUSY,/* Request sent while desc. queue is not empty */
+	SEPSLP_MODE_REQ_EABORT	/* Applet requested aborting this request */
+};
+
+/****************************************/
+/* Load-Op descriptor type: LOAD_OP */
+/****************************************/
+
+/* SeP/FW Cache Index: FW_CACHE_IDX */
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET 1
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET(slot) ((slot) * 8)
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE 8
+
+/* HCB address: HCB_ADDR */
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET(slot) ((slot) + 2)
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET 1
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE 31
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(slot) ((slot) + 2)
+#define SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET 0
+#define SEP_SW_DESC_LOAD_OP_L_BIT_SIZE 1
+
+/*****************************/
+/*** Descriptor copy flows ***/
+/*****************************/
+/* Copy host descriptor scratchpad to descriptor queue buffer */
+#ifdef __BIG_ENDIAN
+
+/* Verify descriptor copy flows assumptions at compile time:
+   assumes "retcode" and "cookie" are the last words */
+#if (SEP_SW_DESC_RET_CODE_WORD_OFFSET != 6)
+#error SW_DESC_RET_CODE location assumption is broken!
+#endif
+#if (SEP_SW_DESC_COOKIE_WORD_OFFSET != 7)
+#error SW_DESC_COOKIE location assumption is broken!
+#endif
+
+#define SEP_SW_DESC_COPY_TO_SEP(queue_desc_p, spad_desc_p) do {	               \
+	u32 *cur_q_desc_word_p = (u32 *)queue_desc_p;                \
+	u32 *cur_spad_desc_word_p = (u32 *)spad_desc_p;              \
+	int i;	                                                               \
+	/* First 6 words are input data to SeP-FW. Must be in SeP endianess:*/ \
+	/* Copy 7th word too in order to init./clear retcode field	    */ \
+	for (i = 0; i <= SEP_SW_DESC_RET_CODE_WORD_OFFSET; i++) {              \
+		*cur_q_desc_word_p = cpu_to_le32(*cur_spad_desc_word_p);       \
+		cur_spad_desc_word_p++;                                        \
+		cur_q_desc_word_p++;                                           \
+	}                                                                      \
+	/* Word 8 is the cookie which is referenced only by the host */        \
+	/* No need to swap endianess */                                        \
+	*cur_q_desc_word_p = *cur_spad_desc_word_p;                            \
+} while (0)
+
+/* and vice-versa */
+#define SEP_SW_DESC_COPY_FROM_SEP(spad_desc_p, queue_desc_p) do {              \
+	u32 *cur_q_desc_word_p = (u32 *)queue_desc_p;                \
+	u32 *cur_spad_desc_word_p = (u32 *)spad_desc_p;              \
+	int i;	                                                               \
+	/* First 6 words are input data to SeP-FW in SeP endianess:*/          \
+	/* Copy 7th word too in order to get retcode field	   */          \
+	for (i = 0; i <= SEP_SW_DESC_RET_CODE_WORD_OFFSET; i++) {              \
+		*cur_spad_desc_word_p = le32_to_cpu(*cur_q_desc_word_p);       \
+		cur_spad_desc_word_p++;                                        \
+		cur_q_desc_word_p++;                                           \
+	}                                                                      \
+	/* Word 8 is the cookie which is referenced only by the host */        \
+	/* No need to swap endianess */                                        \
+	*cur_spad_desc_word_p = *cur_q_desc_word_p;                            \
+} while (0)
+
+#else				/* __LITTLE_ENDIAN - simple memcpy */
+#define SEP_SW_DESC_COPY_TO_SEP(queue_desc_p, spad_desc_p)                     \
+	memcpy(queue_desc_p, spad_desc_p, SEP_SW_DESC_WORD_SIZE<<2)
+
+#define SEP_SW_DESC_COPY_FROM_SEP(spad_desc_p, queue_desc_p)                 \
+	memcpy(spad_desc_p, queue_desc_p, SEP_SW_DESC_WORD_SIZE<<2)
+#endif
+
+#endif /*_SEP_SW_DESC_H_*/
diff --git a/drivers/staging/sep54/sep_sysfs.c b/drivers/staging/sep54/sep_sysfs.c
new file mode 100644
index 0000000..9e6448b
--- /dev/null
+++ b/drivers/staging/sep54/sep_sysfs.c
@@ -0,0 +1,468 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SYSFS
+
+#include "dx_driver.h"
+#include "dx_driver_abi.h"
+#include "desc_mgr.h"
+#include "sep_log.h"
+#include "sep_sysfs.h"
+
+#define MAX_QUEUE_NAME_LEN 50
+
+struct sep_stats {
+	spinlock_t stat_lock;
+	unsigned long samples_cnt;	/* Total number of samples */
+	unsigned long long accu_time;/* Accum. samples time (for avg. calc.) */
+	unsigned long long min_time;
+	unsigned long long max_time;
+	/* all times in nano-sec. */
+};
+
+#define DESC_TYPE_NUM (1<<SEP_SW_DESC_TYPE_BIT_SIZE)
+static const char *desc_names[DESC_TYPE_NUM] = {
+	"NULL",			/*SEP_SW_DESC_TYPE_NULL */
+	"CRYPTO",		/*SEP_SW_DESC_TYPE_CRYPTO_OP */
+	"MSG",			/*SEP_SW_DESC_TYPE_MSG_OP */
+	/* Next 12 types are invalid */
+	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+	"DEBUG"			/*SEP_SW_DESC_TYPE_DEBUG */
+};
+
+struct kobj_attribute *queue_size[SEP_MAX_NUM_OF_DESC_Q];
+struct attribute *queue_attrs[SEP_MAX_NUM_OF_DESC_Q];
+
+struct sep_stats drv_lat_stats[SEP_MAX_NUM_OF_DESC_Q][DXDI_IOC_NR_MAX + 1];
+struct sep_stats sep_lat_stats[SEP_MAX_NUM_OF_DESC_Q][DESC_TYPE_NUM];
+
+/*
+ * Structure used to create a directory and its attributes in sysfs
+ */
+struct sys_dir {
+	struct kobject *sys_dir_kobj;
+	struct attribute_group sys_dir_attr_group;
+	struct attribute **sys_dir_attr_list;
+	int num_of_attrs;
+	struct sep_drvdata *drvdata;	/* Associated driver context */
+};
+
+/* directory initialization*/
+static int sys_init_dir(struct sys_dir *sys_dir, struct sep_drvdata *drvdata,
+			struct kobject *parent_dir_kobj,
+			const char *dir_name,
+			struct kobj_attribute *attrs, int num_of_attrs);
+
+/* directory deinitialization */
+static void sys_free_dir(struct sys_dir *sys_dir);
+
+/* top level directory structure */
+struct sys_dir sys_top_dir;
+
+/* queue level directory structures array */
+struct sys_dir sys_queue_dirs[SEP_MAX_NUM_OF_DESC_Q];
+
+/**************************************
+ * Statistics functions section       *
+ **************************************/
+
+static void update_stats(struct sep_stats *stats,
+			 unsigned long long start_ns, unsigned long long end_ns)
+{
+	unsigned long long delta;
+	unsigned long flags;
+
+	spin_lock_irqsave(&(stats->stat_lock), flags);
+
+	delta = end_ns - start_ns;
+	stats->samples_cnt++;
+	stats->accu_time += delta;
+	stats->min_time = min(delta, stats->min_time);
+	stats->max_time = max(delta, stats->max_time);
+
+	spin_unlock_irqrestore(&(stats->stat_lock), flags);
+}
+
+void sysfs_update_drv_stats(unsigned int qid, unsigned int ioctl_cmd_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns)
+{
+	if ((qid >= SEP_MAX_NUM_OF_DESC_Q) ||
+	    (ioctl_cmd_type > DXDI_IOC_NR_MAX)) {
+		pr_err("IDs out of range: qid=%d , ioctl_cmd=%d\n",
+			    qid, ioctl_cmd_type);
+		return;
+	}
+
+	update_stats(&(drv_lat_stats[qid][ioctl_cmd_type]), start_ns, end_ns);
+}
+
+void sysfs_update_sep_stats(unsigned int qid, enum sep_sw_desc_type desc_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns)
+{
+	if ((qid >= SEP_MAX_NUM_OF_DESC_Q) || (desc_type >= DESC_TYPE_NUM)) {
+		pr_err("IDs out of range: qid=%d , descriptor_type=%d\n",
+			    qid, desc_type);
+		return;
+	}
+	update_stats(&(sep_lat_stats[qid][desc_type]), start_ns, end_ns);
+}
+
+/* compute queue number based by kobject passed to attribute show function */
+static int sys_get_queue_num(struct kobject *kobj, struct sys_dir *dirs)
+{
+	int i;
+
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; ++i) {
+		if (dirs[i].sys_dir_kobj == kobj)
+			break;
+	}
+
+	return i;
+}
+
+static struct sep_drvdata *sys_get_drvdata(struct kobject *kobj)
+{
+	/* TODO: supporting multiple SeP devices would require avoiding
+	 * global "top_dir" and finding associated "top_dir" by traversing
+	 * up the tree to the kobject which matches one of the top_dir's */
+	return sys_top_dir.drvdata;
+}
+
+/**************************************
+ * Attributes show functions section  *
+ **************************************/
+
+static ssize_t sys_fw_ver_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	struct sep_drvdata *drvdata = sys_get_drvdata(kobj);
+	return sprintf(buf,
+		       "ROM_VER=0x%08X\nFW_VER=0x%08X\n",
+		       drvdata->rom_ver, drvdata->fw_ver);
+}
+
+static ssize_t sys_queue_size_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+
+	return sprintf(buf, "<not supported>\n");
+}
+
+static ssize_t sys_queue_dump_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+#ifdef DESCQ_DUMP_SUPPORT
+	int i;
+
+	i = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+#endif
+
+	return sprintf(buf, "DescQ dump not supported, yet.\n");
+}
+
+/* time from write to read is measured */
+static ssize_t sys_queue_stats_drv_lat_show(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    char *buf)
+{
+	u64 min_usec, max_usec, avg_usec;
+	int qid, i;
+	char *cur_buf_pos = buf;
+
+	qid = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+
+	cur_buf_pos += sprintf(cur_buf_pos,
+			       "ioctl#\tmin[us]\tavg[us]\tmax[us]\t#samples\n");
+
+	if (qid >= SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("ID out of range: qid=%d\n", qid);
+		return 0;
+	}
+
+	for (i = 0; i < DXDI_IOC_NR_MAX + 1; i++) {
+		/* Because we are doing 64 bit (long long) division we
+		 * need to explicitly invoke do_div() */
+		if (drv_lat_stats[qid][i].samples_cnt > 0) {
+			min_usec = drv_lat_stats[qid][i].min_time;
+			do_div(min_usec, 1000);	/* result goes into dividend */
+			max_usec = drv_lat_stats[qid][i].max_time;
+			do_div(max_usec, 1000);
+			avg_usec = drv_lat_stats[qid][i].accu_time;
+			do_div(avg_usec, drv_lat_stats[qid][i].samples_cnt);
+			do_div(avg_usec, 1000);
+		} else {
+			min_usec = 0;
+			max_usec = 0;
+			avg_usec = 0;
+		}
+
+		cur_buf_pos += sprintf(cur_buf_pos,
+				       "%u:\t%6llu\t%6llu\t%6llu\t%7lu\n", i,
+				       min_usec, avg_usec, max_usec,
+				       drv_lat_stats[qid][i].samples_cnt);
+	}
+	return cur_buf_pos - buf;
+}
+
+/* time from descriptor enqueue to interrupt is measured */
+static ssize_t sys_queue_stats_sep_lat_show(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    char *buf)
+{
+	u64 min_usec, max_usec, avg_usec;
+	int qid, i, buf_len;
+	char *line;
+
+	buf_len = sprintf(buf,
+			  "desc-type\tmin[us]\tavg[us]\tmax[us]\t#samples\n");
+
+	qid = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+
+	if (qid >= SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("ID out of range: qid=%d\n", qid);
+		return 0;
+	}
+
+	line = kzalloc(256 * sizeof(char), GFP_KERNEL);
+
+	if (line == NULL) {
+		pr_err("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < DESC_TYPE_NUM; ++i) {
+		if (desc_names[i] != NULL) {	/*Only if valid desc. type */
+			/* Because we are doing 64 bit (long long) division we*
+			 * need to explicitly invoke do_div() */
+			if (sep_lat_stats[qid][i].samples_cnt > 0) {
+				min_usec = sep_lat_stats[qid][i].min_time;
+				/* result goes into dividend */
+				do_div(min_usec, 1000);
+				max_usec = sep_lat_stats[qid][i].max_time;
+				do_div(max_usec, 1000);
+				avg_usec = sep_lat_stats[qid][i].accu_time;
+				do_div(avg_usec,
+				       sep_lat_stats[qid][i].samples_cnt);
+				do_div(avg_usec, 1000);
+			} else {
+				min_usec = 0;
+				max_usec = 0;
+				avg_usec = 0;
+			}
+
+			buf_len += sprintf(line,
+					   "%s\t\t%6llu\t%6llu\t%6llu\t%7lu\n",
+					   desc_names[i], min_usec, avg_usec,
+					   max_usec,
+					   sep_lat_stats[qid][i].samples_cnt);
+			strcat(buf, line);
+		}
+	}
+
+	kfree(line);
+
+	return buf_len;
+}
+
+/********************************************************
+ *		SYSFS objects				*
+ ********************************************************/
+
+/* TOP LEVEL ATTRIBUTES */
+
+static struct kobj_attribute sys_top_level_attrs[] = {
+	__ATTR(fw_ver, 0444, sys_fw_ver_show, NULL),
+#ifdef SEP_HWK_UNIT_TEST
+	__ATTR(hwk_self_test, 0664, sys_hwk_st_show, sys_hwk_st_start)
+#endif
+};
+
+struct kobj_attribute sys_queue_level_attrs[] = {
+
+	__ATTR(size, 0444, sys_queue_size_show, NULL),
+	__ATTR(dump, 0444, sys_queue_dump_show, NULL),
+	__ATTR(drv_lat, 0444, sys_queue_stats_drv_lat_show, NULL),
+	__ATTR(sep_lat, 0444, sys_queue_stats_sep_lat_show, NULL)
+};
+
+int sys_init_dir(struct sys_dir *sys_dir, struct sep_drvdata *drvdata,
+		 struct kobject *parent_dir_kobj, const char *dir_name,
+		 struct kobj_attribute *attrs, int num_of_attrs)
+{
+	int i;
+
+	memset(sys_dir, 0, sizeof(struct sys_dir));
+
+	sys_dir->drvdata = drvdata;
+
+	/* initialize directory kobject */
+	sys_dir->sys_dir_kobj =
+	    kobject_create_and_add(dir_name, parent_dir_kobj);
+
+	if (!(sys_dir->sys_dir_kobj))
+		return -ENOMEM;
+	/* allocate memory for directory's attributes list */
+	sys_dir->sys_dir_attr_list =
+	    kzalloc(sizeof(struct attribute *)*(num_of_attrs + 1),
+		    GFP_KERNEL);
+
+	if (!(sys_dir->sys_dir_attr_list)) {
+		kobject_put(sys_dir->sys_dir_kobj);
+		return -ENOMEM;
+	}
+
+	sys_dir->num_of_attrs = num_of_attrs;
+
+	/* initialize attributes list */
+	for (i = 0; i < num_of_attrs; ++i)
+		sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr);
+
+	/* last list entry should be NULL */
+	sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
+
+	sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
+
+	return sysfs_create_group(sys_dir->sys_dir_kobj,
+				  &(sys_dir->sys_dir_attr_group));
+}
+
+void sys_free_dir(struct sys_dir *sys_dir)
+{
+	if (!sys_dir)
+		return;
+
+	kfree(sys_dir->sys_dir_attr_list);
+
+	if (sys_dir->sys_dir_kobj)
+		kobject_put(sys_dir->sys_dir_kobj);
+}
+
+/* free sysfs directory structures */
+void sep_free_sysfs(void)
+{
+	int j;
+
+	for (j = 0; (j < SEP_MAX_NUM_OF_DESC_Q) &&
+	     (sys_queue_dirs[j].sys_dir_kobj != NULL); ++j) {
+		sys_free_dir(&(sys_queue_dirs[j]));
+	}
+
+	if (sys_top_dir.sys_dir_kobj != NULL)
+		sys_free_dir(&sys_top_dir);
+
+}
+
+/* initialize sysfs directories structures */
+int sep_setup_sysfs(struct kobject *sys_dev_kobj, struct sep_drvdata *drvdata)
+{
+	int retval = 0, i, j;
+	char queue_name[MAX_QUEUE_NAME_LEN];
+
+	pr_debug("setup sysfs under %s\n", sys_dev_kobj->name);
+	/* reset statistics */
+	memset(drv_lat_stats, 0, sizeof(drv_lat_stats));
+	memset(sep_lat_stats, 0, sizeof(sep_lat_stats));
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; i++) {
+		for (j = 0; j < DXDI_IOC_NR_MAX + 1; j++) {
+			spin_lock_init(&drv_lat_stats[i][j].stat_lock);
+			/* set min_time to largest ULL value so first sample
+			 * becomes the minimum. */
+			drv_lat_stats[i][j].min_time = (unsigned long long)-1;
+		}
+		for (j = 0; j < DESC_TYPE_NUM; j++) {
+			spin_lock_init(&sep_lat_stats[i][j].stat_lock);
+			/* set min_time to largest ULL value so first sample
+			 * becomes the minimum. */
+			sep_lat_stats[i][j].min_time = (unsigned long long)-1;
+		}
+	}
+
+	/* zero all directories structures */
+	memset(&sys_top_dir, 0, sizeof(struct sys_dir));
+	memset(&sys_queue_dirs, 0,
+	       sizeof(struct sys_dir) * SEP_MAX_NUM_OF_DESC_Q);
+
+	/* initialize the top directory */
+	retval =
+	    sys_init_dir(&sys_top_dir, drvdata, sys_dev_kobj,
+			 "sep_info", sys_top_level_attrs,
+			 sizeof(sys_top_level_attrs) /
+			 sizeof(struct kobj_attribute));
+
+	if (retval)
+		return -retval;
+
+	/* initialize decriptor queues directories structures */
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; ++i) {
+
+		sprintf(queue_name, "queue%d", i);
+
+		retval = sys_init_dir(&(sys_queue_dirs[i]), drvdata,
+				      sys_top_dir.sys_dir_kobj, queue_name,
+				      sys_queue_level_attrs,
+				      sizeof(sys_queue_level_attrs) /
+				      sizeof(struct kobj_attribute));
+
+		if (retval)
+			break;
+
+	}
+
+	if (retval)
+		sep_free_sysfs();
+
+	return -retval;
+}
+
+#ifdef SEP_SYSFS_UNIT_TEST
+
+static int __init sep_init(void)
+{
+	int retval;
+
+	pr_info("i am loading...\n");
+
+	retval = sep_setup_sysfs(kernel_kobj);
+
+	return retval;
+}
+
+static void __exit sep_exit(void)
+{
+	sep_free_sysfs();
+	pr_info("i am unloading...\n");
+}
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+#endif
diff --git a/drivers/staging/sep54/sep_sysfs.h b/drivers/staging/sep54/sep_sysfs.h
new file mode 100644
index 0000000..f8e1959
--- /dev/null
+++ b/drivers/staging/sep54/sep_sysfs.h
@@ -0,0 +1,49 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_SYSFS_H_
+#define _SEP_SYSFS_H_
+
+int sep_setup_sysfs(struct kobject *sys_dev_kobj, struct sep_drvdata *drvdata);
+void sep_free_sysfs(void);
+
+void sysfs_update_drv_stats(unsigned int qid, unsigned int ioctl_cmd_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns);
+
+void sysfs_update_sep_stats(unsigned int qid, enum sep_sw_desc_type desc_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns);
+
+#ifdef SEP_HWK_UNIT_TEST
+ssize_t sys_hwk_st_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf);
+ssize_t sys_hwk_st_start(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count);
+#endif
+
+
+#endif /*_SEP_SYSFS_H_*/
diff --git a/drivers/staging/sep54/sepapp.c b/drivers/staging/sep54/sepapp.c
new file mode 100644
index 0000000..89727cd
--- /dev/null
+++ b/drivers/staging/sep54/sepapp.c
@@ -0,0 +1,1176 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_APP
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+/*#include <linux/export.h>*/
+#include "dx_driver.h"
+#include "dx_sepapp_kapi.h"
+#include "sep_applets.h"
+#include "sep_power.h"
+#include "crypto_api.h"
+#include "sepapp.h"
+#include "sepfs.h"
+
+/* Global drvdata to be used by kernel clients via dx_sepapp_ API */
+static struct sep_drvdata *kapps_drvdata;
+
+/**
+ * sepapp_params_cleanup() - Clean resources allocated for SeP Applet paramters
+ * @client_ctx:	 The associated client context for this operation
+ * @dxdi_params:	The client parameters as passed from the DriverInterface
+ * @sw_desc_params:	The client parameters DMA information for SeP
+ *			(required for value copy back)
+ * @local_memref_idx:	Reference to array of SEPAPP_MAX_PARAMS
+ *			memory reference indexes.
+ *			Set for parameters of MEMREF type.
+ * @mlli_table:	Reference to array of SEPAPP_MAX_PARAMS MLLI tables
+ *		objects to be used for MEMREF parameters.
+ *
+ * Clean resources allocated for SeP Applet paramters
+ * (primarily, MLLI tables and temporary registered user memory)
+ * Returns void
+ */
+static void sepapp_params_cleanup(struct sep_client_ctx *client_ctx,
+				  struct dxdi_sepapp_params *dxdi_params,
+				  struct dxdi_sepapp_kparams *dxdi_kparams,
+				  struct sepapp_client_params *sw_desc_params,
+				  struct client_dma_buffer *local_dma_objs[],
+				  struct mlli_tables_list mlli_tables[])
+{
+	int i;
+	int memref_idx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	enum dxdi_sepapp_param_type *params_types;
+	struct dxdi_val_param *cur_val;
+
+	if (dxdi_params != NULL)
+		params_types = dxdi_params->params_types;
+	else if (dxdi_kparams != NULL)	/* kernel parameters */
+		params_types = dxdi_kparams->params_types;
+	else			/* No parameters - nothing to clean */
+		return;
+
+	for (i = 0; (i < SEPAPP_MAX_PARAMS); i++) {
+		if (params_types[i] == DXDI_SEPAPP_PARAM_MEMREF) {
+			/* Can call for all (unitialized MLLI ignored) */
+			llimgr_destroy_mlli(drvdata->sep_data->llimgr,
+					    mlli_tables + i);
+			memref_idx =
+			    DMA_OBJ_TO_MEMREF_IDX(client_ctx,
+						  local_dma_objs[i]);
+			release_dma_obj(client_ctx, local_dma_objs[i]);
+			if ((local_dma_objs[i] != NULL) &&
+			    (((dxdi_params != NULL) && (memref_idx !=
+							dxdi_params->params[i].
+							memref.ref_id)) ||
+			     (dxdi_kparams != NULL))) {
+				/* There is DMA object to free
+				 * (either user params of temp. reg. or
+				 * kernel params - always temp.) */
+				(void)free_client_memref(client_ctx,
+							 memref_idx);
+			}
+			local_dma_objs[i] = NULL;
+		} else if (params_types[i] == DXDI_SEPAPP_PARAM_VAL) {
+			if (dxdi_params != NULL)
+				cur_val = &dxdi_params->params[i].val;
+			else	/* kernel parameters */
+				cur_val = &dxdi_kparams->params[i].val;
+			if (cur_val->copy_dir & DXDI_DATA_FROM_DEVICE) {
+				/* Copy back output values */
+				cur_val->data[0] =
+				    sw_desc_params->params[i].val.data[0];
+				cur_val->data[1] =
+				    sw_desc_params->params[i].val.data[1];
+			}
+		}
+	}
+}
+
+static int kernel_memref_to_sw_desc_memref(struct dxdi_kmemref *cur_memref,
+					   struct sep_client_ctx *client_ctx,
+					   u8 *sep_memref_type_p,
+					   struct seprpc_memref *sep_memref_p,
+					   struct client_dma_buffer
+					   **local_dma_obj_pp,
+					   struct mlli_tables_list
+					   *mlli_table_p)
+{
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	enum dma_data_direction dma_dir;
+	int memref_idx;
+	int rc = 0;
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(cur_memref->dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d) for param.\n",
+			    cur_memref->dma_direction);
+		return -EINVAL;
+	}
+
+	/* For kernel parameters always temp. registration */
+	memref_idx = register_client_memref(client_ctx,
+					    NULL, cur_memref->sgl,
+					    cur_memref->nbytes, dma_dir);
+	if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+		pr_err("Failed temp. memory registration (rc=%d)\n",
+			    memref_idx);
+		return -ENOMEM;
+	}
+	*local_dma_obj_pp = acquire_dma_obj(client_ctx, memref_idx);
+	if (*local_dma_obj_pp == NULL)
+		rc = -EINVAL;
+	else
+		/* MLLI table creation */
+		rc = llimgr_create_mlli(llimgr,
+				mlli_table_p, dma_dir, *local_dma_obj_pp, 0, 0);
+
+	if (likely(rc == 0)) {
+		llimgr_mlli_to_seprpc_memref(mlli_table_p, sep_memref_p);
+		*sep_memref_type_p = SEPAPP_PARAM_TYPE_MEMREF;
+	}
+
+	return rc;		/* Cleanup on error in caller */
+}
+
+static int user_memref_to_sw_desc_memref(struct dxdi_memref *cur_memref,
+					 struct sep_client_ctx *client_ctx,
+					 u8 *sep_memref_type_p,
+					 struct seprpc_memref *sep_memref_p,
+					 struct client_dma_buffer
+					 **local_dma_obj_pp,
+					 struct mlli_tables_list *mlli_table_p)
+{
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	enum dma_data_direction dma_dir;
+	int memref_idx;
+	int rc = 0;
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(cur_memref->dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d) for param.\n",
+			    cur_memref->dma_direction);
+		return -EINVAL;
+	}
+
+	if (IS_VALID_MEMREF_IDX(cur_memref->ref_id)) {
+		/* Registered mem. */
+		*local_dma_obj_pp =
+		    acquire_dma_obj(client_ctx, cur_memref->ref_id);
+		if (unlikely(*local_dma_obj_pp == NULL)) {
+			pr_err("Failed to acquire DMA obj. at ref_id=%d\n",
+				    cur_memref->ref_id);
+			return -EINVAL;
+		}
+		if ((cur_memref->start_or_offset == 0) &&
+		    (cur_memref->size == (*local_dma_obj_pp)->buf_size)) {
+			/* Whole registered mem. */
+			memref_idx = cur_memref->ref_id;
+		} else {	/* Partial reference */
+			/* Handle as unregistered memory at
+			 * different address/len. */
+			INVALIDATE_MEMREF_IDX(cur_memref->ref_id);
+			cur_memref->start_or_offset += (unsigned long)
+			    (*local_dma_obj_pp)->user_buf_ptr;
+			/* Release base memref - not used */
+			release_dma_obj(client_ctx, *local_dma_obj_pp);
+			*local_dma_obj_pp = NULL;
+		}
+	}
+	/* The following is not "else" of previous, because
+	 * previous block may invalidate ref_id for partial
+	 * reference to cause this temp. registartion. */
+	if (!IS_VALID_MEMREF_IDX(cur_memref->ref_id)) {
+		/* Temp. registration */
+		memref_idx =
+		    register_client_memref(client_ctx,
+					   (u8 __user *)(uintptr_t) cur_memref->
+					   start_or_offset, NULL,
+					   cur_memref->size, dma_dir);
+		if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+			pr_err("Failed temp. memory " "registration\n");
+			return -ENOMEM;
+		}
+		*local_dma_obj_pp = acquire_dma_obj(client_ctx, memref_idx);
+	}
+
+	if (*local_dma_obj_pp == NULL)
+		rc = -EINVAL;
+	else
+		/* MLLI table creation */
+		rc = llimgr_create_mlli(llimgr,
+				mlli_table_p, dma_dir, *local_dma_obj_pp, 0, 0);
+
+	if (likely(rc == 0)) {
+		llimgr_mlli_to_seprpc_memref(mlli_table_p, sep_memref_p);
+		*sep_memref_type_p = SEPAPP_PARAM_TYPE_MEMREF;
+	}
+
+	return rc;		/* Cleanup on error in caller */
+}
+
+/**
+ * dxdi_sepapp_params_to_sw_desc_params() - Convert the client input parameters
+ * @client_ctx:	 The associated client context for this operation
+ * @dxdi_params:	The client parameters as passed from the DriverInterface
+ * @sw_desc_params:	The returned client parameters DMA information for SeP
+ * @local_memref_idx:	Reference to array of SEPAPP_MAX_PARAMS
+ *			memory reference indexes.
+ *			Set for parameters of MEMREF type.
+ * @mlli_table:	Reference to array of SEPAPP_MAX_PARAMS MLLI tables
+ *		objects to be used for MEMREF parameters.
+ *
+ * Convert the client input parameters array from dxdi format to the
+ * SW descriptor format while creating the required MLLI tables
+ * Returns int
+ */
+static int dxdi_sepapp_params_to_sw_desc_params(struct sep_client_ctx
+						*client_ctx,
+						struct dxdi_sepapp_params
+						*dxdi_params,
+						struct dxdi_sepapp_kparams
+						*dxdi_kparams,
+						struct sepapp_client_params
+						*sw_desc_params,
+						struct client_dma_buffer
+						*local_dma_objs[],
+						struct mlli_tables_list
+						mlli_tables[])
+{
+	enum dxdi_sepapp_param_type *params_types;
+	struct dxdi_val_param *cur_val;
+	int i;
+	int rc = 0;
+
+	/* Init./clean arrays for proper cleanup in case of failure */
+	for (i = 0; i < SEPAPP_MAX_PARAMS; i++) {
+		MLLI_TABLES_LIST_INIT(mlli_tables + i);
+		local_dma_objs[i] = NULL;
+		sw_desc_params->params_types[i] = SEPAPP_PARAM_TYPE_NULL;
+	}
+
+	if (dxdi_params != NULL)
+		params_types = dxdi_params->params_types;
+	else if (dxdi_kparams != NULL)	/* kernel parameters */
+		params_types = dxdi_kparams->params_types;
+	else	/* No parameters - nothing to beyond initialization (above) */
+		return 0;
+
+	/* Convert each parameter based on its type */
+	for (i = 0; (i < SEPAPP_MAX_PARAMS) && (rc == 0); i++) {
+		switch (params_types[i]) {
+
+		case DXDI_SEPAPP_PARAM_MEMREF:
+			if (dxdi_params != NULL)
+				rc = user_memref_to_sw_desc_memref
+				    (&dxdi_params->params[i].memref, client_ctx,
+				     &sw_desc_params->params_types[i],
+				     &(sw_desc_params->params[i].memref),
+				     local_dma_objs + i, mlli_tables + i);
+			else
+				rc = kernel_memref_to_sw_desc_memref
+				    (&dxdi_kparams->params[i].kmemref,
+				     client_ctx,
+				     &sw_desc_params->params_types[i],
+				     &(sw_desc_params->params[i].memref),
+				     local_dma_objs + i, mlli_tables + i);
+			break;	/* from switch */
+
+		case DXDI_SEPAPP_PARAM_VAL:
+			if (dxdi_params != NULL)
+				cur_val = &dxdi_params->params[i].val;
+			else	/* kernel parameters */
+				cur_val = &dxdi_kparams->params[i].val;
+
+			sw_desc_params->params[i].val.dir = SEPAPP_DIR_NULL;
+			if (cur_val->copy_dir & DXDI_DATA_TO_DEVICE) {
+				sw_desc_params->params[i].val.dir |=
+				    SEPAPP_DIR_IN;
+				sw_desc_params->params[i].val.data[0] =
+				    cur_val->data[0];
+				sw_desc_params->params[i].val.data[1] =
+				    cur_val->data[1];
+			}
+			if (cur_val->copy_dir & DXDI_DATA_FROM_DEVICE) {
+				sw_desc_params->params[i].val.dir |=
+				    SEPAPP_DIR_OUT;
+			}
+			sw_desc_params->params_types[i] = SEPAPP_PARAM_TYPE_VAL;
+			break;	/* from switch */
+
+		case DXDI_SEPAPP_PARAM_NULL:
+			sw_desc_params->params_types[i] =
+			    SEPAPP_PARAM_TYPE_NULL;
+			break;
+
+		default:
+			pr_err(
+				"Invalid parameter type (%d) for #%d\n",
+				params_types[i], i);
+			rc = -EINVAL;
+		}		/*switch */
+	}			/*for parameters */
+
+	/* Cleanup in case of error */
+	if (rc != 0)
+		sepapp_params_cleanup(client_ctx, dxdi_params, dxdi_kparams,
+				      sw_desc_params, local_dma_objs,
+				      mlli_tables);
+	return rc;
+}
+
+/**
+ * sepapp_session_open() - Open a session with given SeP Applet
+ * @op_ctx:
+ * @sepapp_uuid:	 Applet UUID
+ * @auth_method:	 Client authentication method
+ * @auth_data:	 Authentication data
+ * @app_auth_data:	 Applet specific authentication data
+ * @session_id:	 Returned allocated session ID
+ * @sep_ret_origin:	 Origin in SeP of error code
+ *
+ * Returns int
+ */
+static int sepapp_session_open(struct sep_op_ctx *op_ctx,
+			       u8 *sepapp_uuid,
+			       u32 auth_method,
+			       void *auth_data,
+			       struct dxdi_sepapp_params *app_auth_data,
+			       struct dxdi_sepapp_kparams *kapp_auth_data,
+			       int *session_id,
+			       enum dxdi_sep_module *sep_ret_origin)
+{
+	int rc;
+	struct sep_app_session *new_session;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_dma_buffer *local_dma_objs[SEPAPP_MAX_PARAMS];
+	struct mlli_tables_list mlli_tables[SEPAPP_MAX_PARAMS];
+	struct sep_sw_desc desc;
+	struct sepapp_in_params_session_open *sepapp_msg_p;
+
+	/* Verify that given spad_buf size can accomodate the in_params */
+	BUILD_BUG_ON(sizeof(struct sepapp_in_params_session_open) >
+		     USER_SPAD_SIZE);
+
+	op_ctx->op_type = SEP_OP_APP;
+	*sep_ret_origin = DXDI_SEP_MODULE_HOST_DRIVER;
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err(
+			    "Failed allocating from spad_buf_pool for SeP Applet Request message\n");
+		INVALIDATE_SESSION_IDX(*session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -ENOMEM;
+	}
+	sepapp_msg_p =
+	    (struct sepapp_in_params_session_open *)op_ctx->spad_buf_p;
+
+	/* Find free session entry */
+	for ((*session_id) = 0,
+	     new_session = &op_ctx->client_ctx->sepapp_sessions[0];
+	     (*session_id < MAX_SEPAPP_SESSION_PER_CLIENT_CTX);
+	     new_session++, (*session_id)++) {
+		mutex_lock(&new_session->session_lock);
+		if (new_session->ref_cnt == 0)
+			break;
+		mutex_unlock(&new_session->session_lock);
+	}
+	if (*session_id == MAX_SEPAPP_SESSION_PER_CLIENT_CTX) {
+		pr_err(
+			    "Could not allocate session entry. all %u are in use.\n",
+			    MAX_SEPAPP_SESSION_PER_CLIENT_CTX);
+		INVALIDATE_SESSION_IDX(*session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -ENOMEM;
+	}
+
+	new_session->ref_cnt = 1;	/* To be decremented by close_session */
+	/* Invalidate session ID so it cannot ne used until opening the session
+	 * is actually opened */
+	new_session->sep_session_id = SEP_SESSION_ID_INVALID;
+	mutex_unlock(&new_session->session_lock);
+
+	/* Convert parameters to SeP Applet format */
+	rc = dxdi_sepapp_params_to_sw_desc_params(op_ctx->client_ctx,
+						  app_auth_data, kapp_auth_data,
+						  &sepapp_msg_p->client_params,
+						  local_dma_objs, mlli_tables);
+
+	if (likely(rc == 0)) {
+		memcpy(&sepapp_msg_p->app_uuid, sepapp_uuid, SEPAPP_UUID_SIZE);
+		sepapp_msg_p->auth_method = cpu_to_le32(auth_method);
+		/* TODO: Fill msg.auth_data as required for supported methods,
+		 * e.g. client application ID */
+
+		/* Pack SW descriptor */
+		/* Set invalid session ID so in case of error the ID set
+		 * in the session context remains invalid. */
+		desc_q_pack_app_req_desc(&desc, op_ctx,
+					 SEPAPP_REQ_TYPE_SESSION_OPEN,
+					 SEP_SESSION_ID_INVALID,
+					 op_ctx->spad_buf_dma_addr);
+		/* Associate operation with the session */
+		op_ctx->session_ctx = new_session;
+		op_ctx->internal_error = false;
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+	}
+
+	if (likely(rc == 0)) {
+		rc = wait_for_sep_op_result(op_ctx);
+		/* Process descriptor completion */
+		if (likely(rc == 0)) {
+			if ((op_ctx->error_info != 0) &&
+			    (op_ctx->internal_error)) {
+				*sep_ret_origin = DXDI_SEP_MODULE_APP_MGR;
+			} else {	/* Success or error from applet */
+				*sep_ret_origin = DXDI_SEP_MODULE_APP;
+			}
+		} else {	/* Descriptor processing failed */
+			*sep_ret_origin = DXDI_SEP_MODULE_SW_QUEUE;
+			op_ctx->error_info = DXDI_ERROR_INTERNAL;
+		}
+	}
+
+	if (unlikely((rc != 0) || (op_ctx->error_info != 0))) {
+		mutex_lock(&new_session->session_lock);
+		new_session->ref_cnt = 0;
+		mutex_unlock(&new_session->session_lock);
+		INVALIDATE_SESSION_IDX(*session_id);
+	}
+	op_ctx->op_state = USER_OP_NOP;
+	sepapp_params_cleanup(op_ctx->client_ctx, app_auth_data, kapp_auth_data,
+			      &sepapp_msg_p->client_params, local_dma_objs,
+			      mlli_tables);
+
+	return rc;
+}
+
+/**
+ * sepapp_session_close() - Close given SeP Applet context
+ * @op_ctx:
+ * @session_id:
+ *
+ * Returns int
+ */
+int sepapp_session_close(struct sep_op_ctx *op_ctx, int session_id)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct sep_app_session *session_ctx =
+	    &client_ctx->sepapp_sessions[session_id];
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+	u16 sep_session_id;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	if (!IS_VALID_SESSION_IDX(session_id)) {
+		pr_err("Invalid session_id=%d\n", session_id);
+		rc = -EINVAL;
+		goto end;
+	}
+	op_ctx->op_type = SEP_OP_APP;
+
+	mutex_lock(&session_ctx->session_lock);
+
+	if (!IS_VALID_SESSION_CTX(session_ctx)) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invalid session ID %d for user %p\n",
+			    session_id, client_ctx);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (session_ctx->ref_cnt > 1) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invoked while still has pending commands!\n");
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		rc = -EBUSY;
+		goto end;
+	}
+
+	sep_session_id = session_ctx->sep_session_id;/* save before release */
+	/* Release host resources anyway... */
+	INVALIDATE_SESSION_CTX(session_ctx);
+	mutex_unlock(&session_ctx->session_lock);
+
+	/* Now release session resources on SeP */
+	/* Pack SW descriptor */
+	desc_q_pack_app_req_desc(&desc, op_ctx,
+				 SEPAPP_REQ_TYPE_SESSION_CLOSE, sep_session_id,
+				 0);
+	/* Associate operation with the session */
+	op_ctx->session_ctx = session_ctx;
+	op_ctx->internal_error = false;
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (unlikely(rc != 0)) {	/* Not supposed to happen */
+		pr_err(
+			    "Failure is SESSION_CLOSE operation for sep_session_id=%u\n",
+			    session_ctx->sep_session_id);
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+	}
+
+end:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	return rc;
+}
+
+static int sepapp_command_invoke(struct sep_op_ctx *op_ctx,
+				 int session_id,
+				 u32 command_id,
+				 struct dxdi_sepapp_params *command_params,
+				 struct dxdi_sepapp_kparams *command_kparams,
+				 enum dxdi_sep_module *sep_ret_origin,
+				 int async)
+{
+	int rc;
+	struct sep_app_session *session_ctx =
+	    &op_ctx->client_ctx->sepapp_sessions[session_id];
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	struct sepapp_in_params_command_invoke *sepapp_msg_p;
+
+	op_ctx->op_type = SEP_OP_APP;
+	/* Verify that given spad_buf size can accomodate the in_params */
+	BUILD_BUG_ON(sizeof(struct sepapp_in_params_command_invoke) >
+		     USER_SPAD_SIZE);
+
+	if (!IS_VALID_SESSION_IDX(session_id)) {
+		pr_err("Invalid session_id=%d\n", session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -EINVAL;
+	}
+	mutex_lock(&session_ctx->session_lock);
+	if (!IS_VALID_SESSION_CTX(session_ctx)) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invalid session ID %d for user %p\n",
+			    session_id, op_ctx->client_ctx);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return -EINVAL;
+	}
+	session_ctx->ref_cnt++;	/* Prevent deletion while in use */
+	/* Unlock to allow concurrent session use from different threads */
+	mutex_unlock(&session_ctx->session_lock);
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err(
+			    "Failed allocating from spad_buf_pool for SeP Applet Request message\n");
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		rc = -ENOMEM;
+		goto sepapp_command_exit;
+	}
+	sepapp_msg_p =
+	    (struct sepapp_in_params_command_invoke *)op_ctx->spad_buf_p;
+
+	op_ctx->async_info.dxdi_params = command_params;
+	op_ctx->async_info.dxdi_kparams = command_kparams;
+	op_ctx->async_info.sw_desc_params = &sepapp_msg_p->client_params;
+	op_ctx->async_info.session_id = session_id;
+
+	if (async) {
+		wait_event_interruptible(op_ctx->client_ctx->memref_wq,
+			op_ctx->client_ctx->memref_cnt < 1);
+		mutex_lock(&session_ctx->session_lock);
+		op_ctx->client_ctx->memref_cnt++;
+		mutex_unlock(&session_ctx->session_lock);
+	}
+
+	mutex_lock(&drvdata->desc_queue_sequencer);
+	/* Convert parameters to SeP Applet format */
+	rc = dxdi_sepapp_params_to_sw_desc_params(op_ctx->client_ctx,
+					  command_params,
+					  command_kparams,
+					  &sepapp_msg_p->client_params,
+					  op_ctx->async_info.local_dma_objs,
+					  op_ctx->async_info.mlli_tables);
+
+	if (likely(rc == 0)) {
+		sepapp_msg_p->command_id = cpu_to_le32(command_id);
+		desc_q_pack_app_req_desc(&desc, op_ctx,
+					 SEPAPP_REQ_TYPE_COMMAND_INVOKE,
+					 session_ctx->sep_session_id,
+					 op_ctx->spad_buf_dma_addr);
+		/* Associate operation with the session */
+		op_ctx->session_ctx = session_ctx;
+		op_ctx->internal_error = false;
+		op_ctx->op_state = USER_OP_INPROC;
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+
+		if (async && rc != 0) {
+			mutex_lock(&session_ctx->session_lock);
+			op_ctx->client_ctx->memref_cnt--;
+			mutex_unlock(&session_ctx->session_lock);
+		}
+	}
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	if (likely(rc == 0) && !async)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	/* Process descriptor completion */
+	if (likely(rc == 0)) {
+		if ((op_ctx->error_info != 0) && (op_ctx->internal_error)) {
+			*sep_ret_origin = DXDI_SEP_MODULE_APP_MGR;
+		} else {	/* Success or error from applet */
+			*sep_ret_origin = DXDI_SEP_MODULE_APP;
+		}
+	} else {		/* Descriptor processing failed */
+		*sep_ret_origin = DXDI_SEP_MODULE_SW_QUEUE;
+		op_ctx->error_info = DXDI_ERROR_INTERNAL;
+	}
+	if (!async) {
+		op_ctx->op_state = USER_OP_NOP;
+		sepapp_params_cleanup(op_ctx->client_ctx,
+				command_params, command_kparams,
+				&sepapp_msg_p->client_params,
+				op_ctx->async_info.local_dma_objs,
+				op_ctx->async_info.mlli_tables);
+	}
+
+sepapp_command_exit:
+	if (!async) {
+		/* Release session */
+		mutex_lock(&session_ctx->session_lock);
+		session_ctx->ref_cnt--;
+		mutex_unlock(&session_ctx->session_lock);
+	}
+
+	return rc;
+
+}
+
+int sep_ioctl_sepapp_session_open(struct sep_client_ctx *client_ctx,
+				  unsigned long arg)
+{
+	struct dxdi_sepapp_session_open_params __user *user_params =
+	    (struct dxdi_sepapp_session_open_params __user *)arg;
+	struct dxdi_sepapp_session_open_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sepapp_session_open_params, session_id);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* Check the MAC ACL to see if this client is allow to open a session */
+	if (!is_permitted(params.app_uuid, -666))
+		return -EPERM;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_open(&op_ctx,
+				 params.app_uuid, params.auth_method,
+				 &params.auth_data, &params.app_auth_data, NULL,
+				 &params.session_id, &params.sep_ret_origin);
+
+	/* Copying back from app_auth_data in case of output of "by value"... */
+	if (copy_to_user(&user_params->app_auth_data, &params.app_auth_data,
+			   sizeof(struct dxdi_sepapp_params))
+	    || put_user(params.session_id, &user_params->session_id)
+	    || put_user(params.sep_ret_origin,
+			  &user_params->sep_ret_origin)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+/**
+ * dx_sepapp_session_open() - Open a session with a SeP applet
+ *
+ * @ctx:		SeP client context
+ * @sepapp_uuid:	Target applet UUID
+ * @auth_method:	Session connection authentication method
+ *			(Currently only 0/Public is supported)
+ * @auth_data:		Pointer to authentication data - Should be NULL
+ * @open_params:	Parameters for session opening
+ * @session_id:		Returned session ID (on success)
+ * @ret_origin:		Return code origin
+ *
+ * If ret_origin is not DXDI_SEP_MODULE_APP (i.e., above the applet), it must
+ * be 0 on success. For DXDI_SEP_MODULE_APP it is an applet-specific return code
+ */
+int dx_sepapp_session_open(void *ctx,
+			   u8 *sepapp_uuid,
+			   u32 auth_method,
+			   void *auth_data,
+			   struct dxdi_sepapp_kparams *open_params,
+			   int *session_id, enum dxdi_sep_module *ret_origin)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_open(&op_ctx,
+				 sepapp_uuid, auth_method, auth_data,
+				 NULL, open_params, session_id, ret_origin);
+	/* If request operation suceeded return the return code from SeP */
+	if (likely(rc == 0))
+		rc = op_ctx.error_info;
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_session_open);
+
+int sep_ioctl_sepapp_session_close(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_sepapp_session_close_params __user *user_params =
+	    (struct dxdi_sepapp_session_close_params __user *)arg;
+	int session_id;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	rc = __get_user(session_id, &user_params->session_id);
+	if (rc) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_close(&op_ctx, session_id);
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+/**
+ * dx_sepapp_session_close() - Close a session with an applet
+ *
+ * @ctx:	SeP client context
+ * @session_id: Session ID as returned from dx_sepapp_open_session()
+ *
+ * Return code would be 0 on success
+ */
+int dx_sepapp_session_close(void *ctx, int session_id)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_close(&op_ctx, session_id);
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_session_close);
+
+int sep_ioctl_sepapp_command_invoke(struct sep_client_ctx *client_ctx,
+				    unsigned long arg)
+{
+	struct dxdi_sepapp_command_invoke_params __user *user_params =
+	    (struct dxdi_sepapp_command_invoke_params __user *)arg;
+	struct dxdi_sepapp_command_invoke_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sepapp_command_invoke_params,
+		     sep_ret_origin);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* Check the MAC ACL to see if this client is allow to invoke command */
+	if (!is_permitted(params.app_uuid, params.command_id))
+		return -EPERM;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_command_invoke(&op_ctx,
+				   params.session_id, params.command_id,
+				   &params.command_params, NULL,
+				   &params.sep_ret_origin, 0);
+
+	/* Copying back from command_params in case of output of "by value" */
+	if (copy_to_user(&user_params->command_params,
+			   &params.command_params,
+			   sizeof(struct dxdi_sepapp_params))
+	    || put_user(params.sep_ret_origin,
+			  &user_params->sep_ret_origin)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+
+/**
+ * dx_sepapp_command_invoke() - Initiate command in the applet associated with
+ *				given session ID
+ *
+ * @ctx:	SeP client context
+ * @session_id:	The target session ID
+ * @command_id:	The ID of the command to initiate (applet-specific)
+ * @command_params:	The command parameters
+ * @ret_origin:	The origin of the return code
+ */
+int dx_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_command_invoke(&op_ctx, session_id, command_id,
+				   NULL, command_params, ret_origin, 0);
+	/* If request operation suceeded return the return code from SeP */
+	if (likely(rc == 0))
+		rc = op_ctx.error_info;
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_command_invoke);
+
+static void async_app_handle_op_completion(struct work_struct *work)
+{
+	struct async_req_ctx *areq_ctx =
+	    container_of(work, struct async_req_ctx, comp_work);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct crypto_async_request *initiating_req = areq_ctx->initiating_req;
+	int err = 0;
+	struct sep_app_session *session_ctx =
+	  &op_ctx->client_ctx->sepapp_sessions[op_ctx->async_info.session_id];
+
+	SEP_LOG_DEBUG("req=%p op_ctx=%p\n", initiating_req, op_ctx);
+	if (op_ctx == NULL) {
+		SEP_LOG_ERR("Invalid work context (%p)\n", work);
+		return;
+	}
+
+	if (op_ctx->op_state == USER_OP_COMPLETED) {
+
+		if (unlikely(op_ctx->error_info != 0)) {
+			SEP_LOG_ERR("SeP crypto-op failed (sep_rc=0x%08X)\n",
+				    op_ctx->error_info);
+		}
+		/* Save ret_code info before cleaning op_ctx */
+		err = -(op_ctx->error_info);
+		if (unlikely(err == -EINPROGRESS)) {
+			/* SeP error code collides with EINPROGRESS */
+			SEP_LOG_ERR("Invalid SeP error code 0x%08X\n",
+				    op_ctx->error_info);
+			err = -EINVAL;	/* fallback */
+		}
+		sepapp_params_cleanup(op_ctx->client_ctx,
+					op_ctx->async_info.dxdi_params,
+					op_ctx->async_info.dxdi_kparams,
+					op_ctx->async_info.sw_desc_params,
+					op_ctx->async_info.local_dma_objs,
+					op_ctx->async_info.mlli_tables);
+
+		mutex_lock(&session_ctx->session_lock);
+		session_ctx->ref_cnt--;
+		mutex_unlock(&session_ctx->session_lock);
+
+		op_ctx->client_ctx->memref_cnt--;
+		wake_up_interruptible(&op_ctx->client_ctx->memref_wq);
+
+		if (op_ctx->async_info.dxdi_kparams != NULL)
+			kfree(op_ctx->async_info.dxdi_kparams);
+		op_ctx_fini(op_ctx);
+	} else if (op_ctx->op_state == USER_OP_INPROC) {
+		/* Report with the callback the dispatch from backlog to
+		   the actual processing in the SW descriptors queue
+		   (Returned -EBUSY when the request was dispatched) */
+		err = -EINPROGRESS;
+	} else {
+		SEP_LOG_ERR("Invalid state (%d) for op_ctx %p\n",
+			    op_ctx->op_state, op_ctx);
+		BUG();
+	}
+
+	if (likely(initiating_req->complete != NULL))
+		initiating_req->complete(initiating_req, err);
+	else
+		SEP_LOG_ERR("Async. operation has no completion callback.\n");
+}
+
+int async_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin,
+			     struct async_req_ctx *areq_ctx)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	int rc;
+
+	INIT_WORK(&areq_ctx->comp_work, async_app_handle_op_completion);
+	op_ctx_init(op_ctx, client_ctx);
+	op_ctx->comp_work = &areq_ctx->comp_work;
+	rc = sepapp_command_invoke(op_ctx, session_id, command_id,
+				   NULL, command_params, ret_origin, 1);
+
+	if (rc == 0)
+		return -EINPROGRESS;
+	else
+		return rc;
+}
+
+/**
+ * dx_sepapp_context_alloc() - Allocate client context for SeP applets ops.
+ * Returns DX_SEPAPP_CLIENT_CTX_NULL on failure.
+ */
+void *dx_sepapp_context_alloc(void)
+{
+	struct sep_client_ctx *client_ctx;
+
+	client_ctx = kzalloc(sizeof(struct sep_client_ctx), GFP_KERNEL);
+	if (client_ctx == NULL)
+		return DX_SEPAPP_CLIENT_CTX_NULL;
+
+	/* Always use queue 0 */
+	init_client_ctx(&kapps_drvdata->queue[0], client_ctx);
+
+	return (void *)client_ctx;
+}
+EXPORT_SYMBOL(dx_sepapp_context_alloc);
+
+/**
+ * dx_sepapp_context_free() - Free client context.
+ *
+ * @ctx: Client context to free.
+ *
+ * Returns 0 on success, -EBUSY if resources (sessions) are still allocated
+ */
+void dx_sepapp_context_free(void *ctx)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	cleanup_client_ctx(drvdata, client_ctx);
+	kfree(client_ctx);
+}
+EXPORT_SYMBOL(dx_sepapp_context_free);
+
+void dx_sepapp_init(struct sep_drvdata *drvdata)
+{
+	kapps_drvdata = drvdata;	/* Save for dx_sepapp_ API */
+}
+
+int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num)
+{
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+	struct dxdi_sepapp_kparams cmd_params;
+	int rc = 0;
+
+	pr_info("image verify: addr 0x%p size: %zd key_index: 0x%08X magic_num: 0x%08X\n",
+		addr, size, key_index, magic_num);
+
+	cmd_params.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	/* addr is already a physical address, so this works on
+	 * a system with <= 4GB RAM.
+	 * TODO revisit this if the physical address of IMR can be higher
+	 */
+	cmd_params.params[0].val.data[0] = (unsigned long)addr & (DMA_BIT_MASK(32));
+	cmd_params.params[0].val.data[1] = 0;
+	cmd_params.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[1] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[1].val.data[0] = (u32)size;
+	cmd_params.params[1].val.data[1] = 0;
+	cmd_params.params[1].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[2] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[2].val.data[0] = key_index;
+	cmd_params.params[2].val.data[1] = 0;
+	cmd_params.params[2].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[3] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[3].val.data[0] = magic_num;
+	cmd_params.params[3].val.data[1] = 0;
+	cmd_params.params[3].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+					&ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, sess_id, CMD_IMAGE_VERIFY,
+					&cmd_params, &ret_origin);
+
+	dx_sepapp_session_close(sctx, sess_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+EXPORT_SYMBOL(sepapp_image_verify);
+
+int sepapp_hdmi_status(u8 status, u8 bksv[5])
+{
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = HDCP_APP_UUID;
+	struct dxdi_sepapp_kparams cmd_params;
+	int rc = 0;
+
+	pr_info("Hdmi status: status 0x%02x\n", status);
+
+	memset(&cmd_params, 0, sizeof(struct dxdi_sepapp_kparams));
+
+	cmd_params.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[0].val.data[0] = status;
+	cmd_params.params[0].val.data[1] = 0;
+	cmd_params.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[1] = DXDI_SEPAPP_PARAM_VAL;
+	memcpy(&cmd_params.params[1].val.data[0], bksv, sizeof(u32));
+	memcpy((uint8_t *)&cmd_params.params[1].val.data[1],
+		&bksv[4], sizeof(u8));
+	cmd_params.params[1].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, sess_id, HDCP_RX_HDMI_STATUS,
+				      &cmd_params, &ret_origin);
+
+	dx_sepapp_session_close(sctx, sess_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+EXPORT_SYMBOL(sepapp_hdmi_status);
+
+int sepapp_drm_playback(bool ied_status)
+{
+	int ses_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+	int rc = 0;
+	int command;
+
+	pr_debug("Requesting IED status change %s\n", ied_status ? "ON" : "OFF");
+
+	if (ied_status)
+		command = CMD_DRM_ENABLE_IED;
+	else
+		command = CMD_DRM_DISABLE_IED;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &ses_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, ses_id, command, NULL, &ret_origin);
+
+	dx_sepapp_session_close(sctx, ses_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+EXPORT_SYMBOL(sepapp_drm_playback);
diff --git a/drivers/staging/sep54/sepapp.h b/drivers/staging/sep54/sepapp.h
new file mode 100644
index 0000000..dd02ce3
--- /dev/null
+++ b/drivers/staging/sep54/sepapp.h
@@ -0,0 +1,53 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/** SeP Applets support module */
+#ifndef _SEPAPP_H_
+#define _SEPAPP_H_
+
+#include "dx_driver_abi.h"
+#include "dx_driver.h"
+
+/* Global drvdata to be used by kernel clients via dx_sepapp_ API */
+int sep_ioctl_sepapp_session_open(struct sep_client_ctx *client_ctx,
+				  unsigned long arg);
+
+int sep_ioctl_sepapp_session_close(struct sep_client_ctx *client_ctx,
+				   unsigned long arg);
+
+int sep_ioctl_sepapp_command_invoke(struct sep_client_ctx *client_ctx,
+				    unsigned long arg);
+
+void dx_sepapp_init(struct sep_drvdata *drvdata);
+
+int sepapp_session_close(struct sep_op_ctx *op_ctx, int session_id);
+
+int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num);
+
+int sepapp_hdmi_status(u8 status, u8 bksv[5]);
+
+int sepapp_drm_playback(bool ied_status);
+
+#endif /*_SEPAPP_H_*/
diff --git a/drivers/staging/sep54/sepfs.c b/drivers/staging/sep54/sepfs.c
new file mode 100644
index 0000000..980395b
--- /dev/null
+++ b/drivers/staging/sep54/sepfs.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Special thanks to the author of smackfs, for keeping it simple!
+ *
+ *	Casey Schaufler <casey@schaufler-ca.com>
+ */
+
+#include <linux/seq_file.h>
+#include <linux/capability.h>
+#include <linux/rculist.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/cred.h>
+#include <linux/uidgid.h>
+#include <linux/sched.h>
+
+#include "sepfs.h"
+
+enum sep_inode {
+	SEP_ROOT_INODE	= 2,
+	SEP_LOAD	= 3,	/* load policy */
+	SEP_LOG         = 4,	/* log file */
+};
+
+/* A struct to contain a single known rule for an open or invoke command */
+struct sep_rule {
+	struct list_head		list;
+	u32				cmd_id;   /* command to be invoked */
+	uid_t                           uid;	  /* allowed user */
+	gid_t                           gid;	  /* allowed group */
+};
+
+/* Rules that apply to an individual TA */
+struct sep_ta_rules {
+	struct list_head                list;
+	u8				uuid[16]; /* uuid of the TA */
+	struct sep_rule                 open;     /* control open session */
+	struct sep_rule                 invoke;   /* control invoke command */
+	struct mutex		        ta_rules_mutex;	/* lock for the rules */
+};
+
+/* All the rules that apply to the Security Engine */
+LIST_HEAD(sep_acl);
+
+/* Create a mutex to protect the policy */
+static DEFINE_MUTEX(sep_acl_mutex);
+
+#define UUID_LEN 16
+#define TMP_SIZE 8
+
+#define SEP_MAGIC 0x37504553 /* SEP7 */
+
+/**
+ * Find the rules for a given TA
+ * @uuid The UUID of the TA
+ * returns the rule list if it exists or NULL if not found
+ */
+static struct sep_ta_rules *get_ta_rules(const u8 *uuid)
+{
+	struct sep_ta_rules *rule;
+
+	list_for_each_entry_rcu(rule, &sep_acl, list) {
+		if (memcmp(rule->uuid, uuid, UUID_LEN) == 0)
+			return rule;
+	}
+
+	return NULL;
+}
+
+/**
+ * Try to find a policy list for a TA, if it does not exist then create it
+ * @uuid the UUID of the TA
+ * @rules [out] A pointer to the rules for the TA
+ * return 0 on sucess, error otherwise
+ */
+static int get_create_ta_rules(u8 *uuid, struct sep_ta_rules **rules)
+{
+	int rc = 0;
+	struct sep_ta_rules *tmp_rules;
+
+	mutex_lock(&sep_acl_mutex);
+
+	tmp_rules = get_ta_rules(uuid);
+	if (tmp_rules == NULL) {
+		/* this is the first rule for this TA */
+		tmp_rules = kzalloc(sizeof(struct sep_ta_rules), GFP_KERNEL);
+		if (tmp_rules == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		memcpy(tmp_rules->uuid, uuid, UUID_LEN);
+
+		INIT_LIST_HEAD(&tmp_rules->open.list);
+		INIT_LIST_HEAD(&tmp_rules->invoke.list);
+
+		mutex_init(&tmp_rules->ta_rules_mutex);
+
+		/* Add to the policy */
+		list_add_rcu(&tmp_rules->list, &sep_acl);
+	}
+
+	*rules = tmp_rules;
+
+out:
+	mutex_unlock(&sep_acl_mutex);
+	return rc;
+}
+
+/**
+ * @brief toByte Convert a Character to its hex int representation
+ * @param c To be converted
+ * @return the value of the char on success, -1 on error
+ */
+static inline char to_byte(const char c)
+{
+	if (c >= '0' && c <= '9')
+		return c - '0';
+	else if (c >= 'A' && c <= 'F')
+		return (c - 'A') + 10;
+	else if (c >= 'a' && c <= 'f')
+		return (c - 'a') + 10;
+	else
+		return -1;
+}
+
+/**
+ * Parse a rule from the load file and create a rule in the policy for it
+ * @data The raw line that has been written to the load file
+ * returns 0 on success
+ */
+static int parse_raw_rule(const char *data)
+{
+	int rc = -EINVAL;
+	struct sep_ta_rules *ta_rules = NULL;
+	struct sep_rule *new_rule;
+	u8 uuid[UUID_LEN];
+	u8 tmp[UUID_LEN * 2 + 1]; /* Hex representation in the file */
+	int i;
+	char ct;
+
+	new_rule = kzalloc(sizeof(*new_rule), GFP_KERNEL);
+	if (new_rule == NULL)
+		return -ENOMEM;
+
+	/*
+	 * overall rule format is
+	 * UUID is in hex representation in the string
+	 * UUID[32] command_id(u32) uid(u32) gid(u32)
+	 */
+	if (sscanf(data, "%32s %d %d %d", tmp, &new_rule->cmd_id,
+		   &new_rule->uid, &new_rule->gid) != 4) {
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	if (strnlen(tmp, UUID_LEN * 2) < 32) {
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	/* convert the hex string to a byte array */
+	for (i = 0; i < UUID_LEN; i++) {
+		ct = to_byte(tmp[2 * i]);
+		if (ct == -1) {
+			rc = -EINVAL;
+			goto err_out;
+		}
+		/* set the first nibble of the byte */
+		uuid[i] = ct << 4;
+
+		ct = to_byte(tmp[2 * i + 1]);
+		if (ct == -1) {
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		/* complete the byte */
+		uuid[i] |= ct & 0x0F;
+	}
+
+	pr_debug("Scanned the rule\n");
+	pr_debug("%pUB %d %d %d\n", uuid,
+		   new_rule->cmd_id, new_rule->uid, new_rule->gid);
+
+	rc = get_create_ta_rules(uuid, &ta_rules);
+	if (rc != 0 || ta_rules == NULL)
+		goto err_out;
+
+	mutex_lock(&ta_rules->ta_rules_mutex);
+
+	/* TODO: we should probably check if there is a duplicate rule */
+	/* append the rule to the appropriate policy config */
+	if (new_rule->cmd_id == RESTRICT_OPEN)
+		list_add_rcu(&new_rule->list, &ta_rules->open.list);
+	else
+		list_add_rcu(&new_rule->list, &ta_rules->invoke.list);
+
+	mutex_unlock(&ta_rules->ta_rules_mutex);
+	goto out;
+
+err_out:
+	kfree(new_rule);
+	pr_err("Error Parsing the rule\n");
+out:
+	return rc;
+}
+
+bool is_permitted(const u8 *uuid, int cmd_id)
+{
+	struct list_head *pos;
+	struct list_head *list;
+	struct sep_rule *element;
+	struct sep_ta_rules *ta_rule;
+	struct group_info *groups_info;
+	bool rule_exists = false;
+
+	/* Allow a priviladged process to bypass the MAC checks */
+	if (capable(CAP_MAC_OVERRIDE))
+		return true;
+
+	ta_rule = get_ta_rules(uuid);
+	if (ta_rule == NULL) {
+#ifdef DRACONIAN
+		/* When enforced a rule must exist that allows access to a
+		 * service.  The rules, therefore, are used to relax the default
+		 * policy of no access.
+		 */
+		return false;
+#else
+		/* By default the policy is permissive, only if a rule exists
+		   for a TA will it be enforced to block access */
+		pr_debug("Allowed because no rule\n");
+		return true;
+#endif
+	}
+
+	/* determine the suplementary groups of the running process */
+	groups_info = get_current_groups();
+
+	if (cmd_id == RESTRICT_OPEN)
+		list = &(&ta_rule->open)->list;
+	else
+		list = &(&ta_rule->invoke)->list;
+
+
+	list_for_each(pos, list) {
+		element = list_entry(pos, struct sep_rule, list);
+		if (element->cmd_id == cmd_id) {
+			rule_exists = true;
+			/* if we have a rule for this command check the perms */
+			if (uid_eq(current_euid(), element->uid) ||
+			    gid_eq(current_egid(), element->gid) ||
+			    groups_search(groups_info, element->gid))
+				return true;
+		}
+	}
+
+#ifdef DRACONIAN
+	/* If no rule exists then there is no access allowed */
+	return false;
+#else
+	/* If there is a matching command id in the rule list and we have gotten
+	 * this far it means that we are not permitted to access the TA/cmd_id
+	 * so return "false".  Otherwise if there is no rule for that command ID
+	 * we default to permissive mode and return "true".
+	 */
+	return (rule_exists == true) ? false : true;
+#endif
+}
+
+/**
+ * Start at the first element of the sep_acl
+ * @sf the file being worked on
+ */
+static void *load_seq_start(struct seq_file *sf, loff_t *pos)
+{
+	if (sf->index == 0)
+		sf->private =  list_first_entry_or_null(&sep_acl,
+							struct sep_ta_rules,
+							list);
+
+	return sf->private;
+}
+
+/**
+ * Interate to the next element in the sep_acl list
+ * @s the file
+ * @v the current element in the sep_acl list
+ */
+static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct list_head *list = v;
+
+	if (list_is_last(list, &sep_acl)) {
+		s->private = NULL;
+		return NULL;
+	}
+
+	s->private = list->next;
+	return list->next;
+}
+
+/**
+ * Display the entries for a TA's policy
+ * @sf the sequence file
+ * @entry an element on the sep_acl list
+ */
+static int load_seq_show(struct seq_file *sf, void *entry)
+{
+	struct list_head *ta_r_list = entry;
+	struct list_head *pos;
+	struct sep_rule *element;
+	struct sep_ta_rules *ta_rules =
+			list_entry(ta_r_list, struct sep_ta_rules, list);
+
+	list_for_each(pos, &(&ta_rules->open)->list) {
+		element = list_entry(pos, struct sep_rule, list);
+		seq_printf(sf, "%pUB %d %d %d\n", ta_rules->uuid,
+			   element->cmd_id, element->uid, element->gid);
+	}
+
+	list_for_each(pos, &(&ta_rules->invoke)->list) {
+		element = list_entry(pos, struct sep_rule, list);
+		seq_printf(sf, "%pUB %d %d %d\n", ta_rules->uuid,
+			   element->cmd_id, element->uid, element->gid);
+	}
+
+	return 0;
+}
+
+/**
+ * Stop the sequential access to the file, unused
+ */
+static void load_seq_stop(struct seq_file *sf, void *entry)
+{
+}
+
+static const struct seq_operations load_seq_ops = {
+	.start = load_seq_start,
+	.next  = load_seq_next,
+	.show  = load_seq_show,
+	.stop  = load_seq_stop,
+};
+
+/**
+ * The open handler for the load file, when reading use the sequentianl file ops
+ * @inode of the file
+ * @file file descriptor of the opened file
+ */
+static int sep_open_load(struct inode *inode, struct file *file)
+{
+	/* only privilaged processes can interact with the policy */
+	/* TODO Android L does not support init with CAP_MAC_ADMIN */
+	/*if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+	*/
+
+	return seq_open(file, &load_seq_ops);
+}
+
+/**
+ * The write handler that is associated with the load file
+ * @file file descriptor of the open file
+ * @buf the userspace data that has been written to the file
+ * @count the amount of data written to the file
+ * @ppos the current offset in the file stream
+ */
+static ssize_t sep_write_load(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	char *data;
+	int rc = -EINVAL;
+	char *rule;
+	int orig_count = count;
+
+	/* only privilaged processes can update the policy */
+	/* TODO Android L does not support init with CAP_MAC_ADMIN */
+	/* if (capable(CAP_MAC_ADMIN) == false)
+		return -EPERM;
+	*/
+
+	if (*ppos != 0)
+		return -EINVAL;
+
+	/* allow for \0 to stringify the data */
+	data = kzalloc(count + 1, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(data, buf, count) != 0) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	/* all rules should be line based so only take whole lines */
+	while (count > 0 && data[count] != '\n')
+		count--;
+
+	data[count] = '\0';
+
+	while (data != NULL) {
+		rule = strsep(&data, "\n");
+		if (rule == NULL)
+			break;
+
+		rc = parse_raw_rule(rule);
+		if (rc != 0) /* if one rule is mangled continue */
+			pr_err("Failed to read rule\n");
+	}
+
+	/* say that we read the whole file */
+	rc = orig_count;
+
+out:
+	kfree(data);
+	return rc;
+}
+
+static const struct file_operations sep_load_ops = {
+	.open           = sep_open_load,
+	.read		= seq_read,
+	.llseek         = seq_lseek,
+	.write		= sep_write_load,
+	.release        = seq_release,
+};
+
+/**
+ * create the sep file system superblock and add the known entries
+ * @sb: the empty superblock
+ * @data: unused
+ * @silent: unused
+ *
+ * Returns 0 on success
+ */
+static int sep_fill_super(struct super_block *sb, void *data, int silent)
+{
+	int rc;
+	struct inode *root_inode;
+
+	static struct tree_descr sep_files[] = {
+		[SEP_LOAD] = {
+			"load", &sep_load_ops, S_IRUGO|S_IWUSR},
+				/* [SEP_LOG] = {
+				   "log", &sep_log_ops, S_IRUGO|S_IWUSR},*/
+		{""}
+	};
+
+	rc = simple_fill_super(sb, SEP_MAGIC, sep_files);
+	if (rc != 0) {
+		pr_err("%s failed %d while creating inodes\n", __func__, rc);
+		return rc;
+	}
+
+	/* access the inode to ensure it is created */
+	root_inode = sb->s_root->d_inode;
+
+	return 0;
+}
+
+/**
+ * get the sepfs superblock for mounting
+ */
+static struct dentry *sep_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	return mount_single(fs_type, flags, data, sep_fill_super);
+}
+
+static struct file_system_type sep_fs_type = {
+	.name		= "sepfs",
+	.mount		= sep_mount,
+	.kill_sb	= kill_litter_super,
+};
+
+static struct kset *sepfs_kset;
+
+static int sep_init_sysfs(void)
+{
+	sepfs_kset = kset_create_and_add("sepfs", NULL, fs_kobj);
+	if (!sepfs_kset)
+		return -ENOMEM;
+	return 0;
+}
+
+static struct vfsmount *sepfs_mount;
+
+/**
+ * initialize and register sepfs
+ */
+static int __init sep_init_fs(void)
+{
+	int rc;
+
+	rc = sep_init_sysfs();
+	if (rc)
+		pr_err("sysfs mountpoint problem.\n");
+
+	rc = register_filesystem(&sep_fs_type);
+	if (!rc) {
+		pr_err("Mounting\n");
+		sepfs_mount = kern_mount(&sep_fs_type);
+		if (IS_ERR(sepfs_mount)) {
+			pr_err("Failed to mount\n");
+			rc = PTR_ERR(sepfs_mount);
+			sepfs_mount = NULL;
+		}
+	}
+
+	return rc;
+}
+
+device_initcall(sep_init_fs);
diff --git a/drivers/staging/sep54/sepfs.h b/drivers/staging/sep54/sepfs.h
new file mode 100644
index 0000000..e232ff9
--- /dev/null
+++ b/drivers/staging/sep54/sepfs.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* When defined in a policy for the command ID this will lock down access to the
+   entire TA by restricting who can open a session to the TA. */
+#define RESTRICT_OPEN -666
+
+/**
+ * Determine if the currently connected client is allowed to access the
+ * TA, using the specified command
+ * @uuid The TA that is being connected to
+ * @cmd_id The command that is being invoked in the TA,
+ * RESTRICT_OPEN if it is an Open command
+ * return true if access is allowed
+ */
+bool is_permitted(const u8 *uuid, int cmd_id);
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8..8872e0f 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@
 	if (pMgmt == NULL)
 		return -EFAULT;
 
+	if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
+		return -ENODEV;
+
 	buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
 	if (buf == NULL)
 		return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 3a3fdc5..06b966c 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1099,6 +1099,8 @@
     memset(pMgmt->abyCurrBSSID, 0, 6);
     pMgmt->eCurrState = WMAC_STATE_IDLE;
 
+	pDevice->flags &= ~DEVICE_FLAGS_OPENED;
+
     device_free_tx_bufs(pDevice);
     device_free_rx_bufs(pDevice);
     device_free_int_bufs(pDevice);
@@ -1110,7 +1112,6 @@
     usb_free_urb(pDevice->pInterruptURB);
 
     BSSvClearNodeDBTable(pDevice, 0);
-    pDevice->flags &=(~DEVICE_FLAGS_OPENED);
 
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
 
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index c97e0e1..7e10dcd 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -570,6 +570,7 @@
 	ltv_t                   *pLtv;
 	bool_t                  ltvAllocated = FALSE;
 	ENCSTRCT                sEncryption;
+	size_t			len;
 
 #ifdef USE_WDS
 	hcf_16                  hcfPort  = HCF_PORT_0;
@@ -686,7 +687,8 @@
 					break;
 				case CFG_CNF_OWN_NAME:
 					memset(lp->StationName, 0, sizeof(lp->StationName));
-					memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+					len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
+					strlcpy(lp->StationName, &pLtv->u.u8[2], len);
 					pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
 					break;
 				case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@
 {
 	struct wl_private *lp = wl_priv(dev);
 	unsigned long flags;
+	size_t len;
 	int         ret = 0;
 	/*------------------------------------------------------------------------*/
 
@@ -1793,8 +1796,8 @@
 	wl_lock(lp, &flags);
 
 	memset(lp->StationName, 0, sizeof(lp->StationName));
-
-	memcpy(lp->StationName, extra, wrqu->data.length);
+	len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
+	strlcpy(lp->StationName, extra, len);
 
 	/* Commit the adapter parameters */
 	wl_apply(lp);
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index dcceed2..81972fa 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1811,10 +1811,12 @@
 #else
 	if (*zcache_comp_name != '\0') {
 		ret = crypto_has_comp(zcache_comp_name, 0, 0);
-		if (!ret)
+		if (!ret) {
 			pr_info("zcache: %s not supported\n",
 					zcache_comp_name);
-		goto out;
+			ret = 1;
+			goto out;
+		}
 	}
 	if (!ret)
 		strcpy(zcache_comp_name, "lzo");
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
new file mode 100644
index 0000000..cb0f9ce
--- /dev/null
+++ b/drivers/staging/zram/Makefile
@@ -0,0 +1,3 @@
+zram-y	:=	zram_drv.o
+
+obj-$(CONFIG_ZRAM)	+=	zram.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d7705e5..4c1b8db 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -628,25 +628,18 @@
 }
 
 static int iscsit_add_reject(
+	struct iscsi_conn *conn,
 	u8 reason,
-	int fail_conn,
-	unsigned char *buf,
-	struct iscsi_conn *conn)
+	unsigned char *buf)
 {
 	struct iscsi_cmd *cmd;
-	struct iscsi_reject *hdr;
-	int ret;
 
 	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 	if (!cmd)
 		return -1;
 
 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
-	if (fail_conn)
-		cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
-
-	hdr	= (struct iscsi_reject *) cmd->pdu;
-	hdr->reason = reason;
+	cmd->reject_reason = reason;
 
 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
 	if (!cmd->buf_ptr) {
@@ -662,23 +655,16 @@
 	cmd->i_state = ISTATE_SEND_REJECT;
 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
 
-	ret = wait_for_completion_interruptible(&cmd->reject_comp);
-	if (ret != 0)
-		return -1;
-
-	return (!fail_conn) ? 0 : -1;
+	return -1;
 }
 
-int iscsit_add_reject_from_cmd(
+static int iscsit_add_reject_from_cmd(
+	struct iscsi_cmd *cmd,
 	u8 reason,
-	int fail_conn,
-	int add_to_conn,
-	unsigned char *buf,
-	struct iscsi_cmd *cmd)
+	bool add_to_conn,
+	unsigned char *buf)
 {
 	struct iscsi_conn *conn;
-	struct iscsi_reject *hdr;
-	int ret;
 
 	if (!cmd->conn) {
 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -688,11 +674,7 @@
 	conn = cmd->conn;
 
 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
-	if (fail_conn)
-		cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
-
-	hdr	= (struct iscsi_reject *) cmd->pdu;
-	hdr->reason = reason;
+	cmd->reject_reason = reason;
 
 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
 	if (!cmd->buf_ptr) {
@@ -709,8 +691,6 @@
 
 	cmd->i_state = ISTATE_SEND_REJECT;
 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
-
-	ret = wait_for_completion_interruptible(&cmd->reject_comp);
 	/*
 	 * Perform the kref_put now if se_cmd has already been setup by
 	 * scsit_setup_scsi_cmd()
@@ -719,12 +699,19 @@
 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
 		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
 	}
-	if (ret != 0)
-		return -1;
-
-	return (!fail_conn) ? 0 : -1;
+	return -1;
 }
-EXPORT_SYMBOL(iscsit_add_reject_from_cmd);
+
+static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
+				 unsigned char *buf)
+{
+	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
+}
+
+int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
+{
+	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
+}
 
 /*
  * Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -844,8 +831,8 @@
 	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
 		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
 				" not set. Bad iSCSI Initiator.\n");
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 
 	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
@@ -865,8 +852,8 @@
 		pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
 			" set when Expected Data Transfer Length is 0 for"
 			" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 done:
 
@@ -875,62 +862,62 @@
 		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
 			" MUST be set if Expected Data Transfer Length is not 0."
 			" Bad iSCSI Initiator\n");
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 
 	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
 	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
 		pr_err("Bidirectional operations not supported!\n");
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 
 	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
 		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
 				" Scsi Command PDU.\n");
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 
 	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
 		pr_err("ImmediateData=No but DataSegmentLength=%u,"
 			" protocol error.\n", payload_length);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
-	if ((be32_to_cpu(hdr->data_length )== payload_length) &&
+	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
 	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
 		pr_err("Expected Data Transfer Length and Length of"
 			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
 			" bit is not set protocol error\n");
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
 	if (payload_length > be32_to_cpu(hdr->data_length)) {
 		pr_err("DataSegmentLength: %u is greater than"
 			" EDTL: %u, protocol error.\n", payload_length,
 				hdr->data_length);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
 		pr_err("DataSegmentLength: %u is greater than"
 			" MaxXmitDataSegmentLength: %u, protocol error.\n",
 			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
 	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
 		pr_err("DataSegmentLength: %u is greater than"
 			" FirstBurstLength: %u, protocol error.\n",
 			payload_length, conn->sess->sess_ops->FirstBurstLength);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 
 	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
@@ -985,9 +972,8 @@
 
 		dr = iscsit_allocate_datain_req();
 		if (!dr)
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-					1, 1, buf, cmd);
+			return iscsit_add_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 
 		iscsit_attach_datain_req(cmd, dr);
 	}
@@ -1015,18 +1001,16 @@
 	cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
 	if (cmd->sense_reason) {
 		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-					1, 1, buf, cmd);
+			return iscsit_add_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 		}
 
 		goto attach_cmd;
 	}
 
 	if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
-		return iscsit_add_reject_from_cmd(
-			ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-			1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 	}
 
 attach_cmd:
@@ -1068,17 +1052,13 @@
 	 * be acknowledged. (See below)
 	 */
 	if (!cmd->immediate_data) {
-		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
-		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
-			if (!cmd->sense_reason)
-				return 0;
-
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+					(unsigned char *)hdr, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
+		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
 			target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
 			return 0;
-		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
-			return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, (unsigned char *)hdr, cmd);
 		}
 	}
 
@@ -1103,7 +1083,9 @@
 	 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
 	 */
 	if (cmd->sense_reason) {
-		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+		if (cmd->reject_reason)
+			return 0;
+
 		return 1;
 	}
 	/*
@@ -1111,10 +1093,8 @@
 	 * the backend memory allocation.
 	 */
 	cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
-	if (cmd->sense_reason) {
-		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+	if (cmd->sense_reason)
 		return 1;
-	}
 
 	return 0;
 }
@@ -1124,6 +1104,7 @@
 iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
 			  bool dump_payload)
 {
+	struct iscsi_conn *conn = cmd->conn;
 	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
 	/*
 	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
@@ -1140,20 +1121,21 @@
 		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
 		 * Immediate Bit is not set.
 		 */
-		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, hdr->cmdsn);
+		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
+					(unsigned char *)hdr, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
 
-		if (cmd->sense_reason) {
-			if (iscsit_dump_data_payload(cmd->conn,
-					cmd->first_burst_len, 1) < 0)
-				return -1;
+		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			int rc;
+
+			rc = iscsit_dump_data_payload(cmd->conn,
+						      cmd->first_burst_len, 1);
+			target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+			return rc;
 		} else if (cmd->unsolicited_data)
 			iscsit_set_unsoliticed_dataout(cmd);
 
-		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
-			return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, (unsigned char *)hdr, cmd);
-
 	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
 		/*
 		 * Immediate Data failed DataCRC and ERL>=1,
@@ -1184,15 +1166,14 @@
 
 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
 	if (rc < 0)
-		return rc;
+		return 0;
 	/*
 	 * Allocation iovecs needed for struct socket operations for
 	 * traditional iSCSI block I/O.
 	 */
 	if (iscsit_allocate_iovecs(cmd) < 0) {
-		return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-				1, 0, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 	}
 	immed_data = cmd->immediate_data;
 
@@ -1283,8 +1264,8 @@
 
 	if (!payload_length) {
 		pr_err("DataOUT payload is ZERO, protocol error.\n");
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
 	}
 
 	/* iSCSI write */
@@ -1301,8 +1282,8 @@
 		pr_err("DataSegmentLength: %u is greater than"
 			" MaxXmitDataSegmentLength: %u\n", payload_length,
 			conn->conn_ops->MaxXmitDataSegmentLength);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
 	}
 
 	cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
@@ -1325,8 +1306,7 @@
 	if (cmd->data_direction != DMA_TO_DEVICE) {
 		pr_err("Command ITT: 0x%08x received DataOUT for a"
 			" NON-WRITE command.\n", cmd->init_task_tag);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, buf, cmd);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 	se_cmd = &cmd->se_cmd;
 	iscsit_mod_dataout_timer(cmd);
@@ -1335,8 +1315,7 @@
 		pr_err("DataOut Offset: %u, Length %u greater than"
 			" iSCSI Command EDTL %u, protocol error.\n",
 			hdr->offset, payload_length, cmd->se_cmd.data_length);
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 0, buf, cmd);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
 	}
 
 	if (cmd->unsolicited_data) {
@@ -1528,7 +1507,7 @@
 
 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
 	if (rc < 0)
-		return rc;
+		return 0;
 	else if (!cmd)
 		return 0;
 
@@ -1557,8 +1536,12 @@
 	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
 		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
 			" not set, protocol error.\n");
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
 	}
 
 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
@@ -1566,8 +1549,12 @@
 			" greater than MaxXmitDataSegmentLength: %u, protocol"
 			" error.\n", payload_length,
 			conn->conn_ops->MaxXmitDataSegmentLength);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		if (!cmd)
+			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+						 (unsigned char *)hdr);
+
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+					 (unsigned char *)hdr);
 	}
 
 	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
@@ -1584,9 +1571,9 @@
 	 */
 	if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
 		if (!cmd)
-			return iscsit_add_reject(
+			return iscsit_reject_cmd(cmd,
 					ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-					1, buf, conn);
+					(unsigned char *)hdr);
 
 		cmd->iscsi_opcode	= ISCSI_OP_NOOP_OUT;
 		cmd->i_state		= ISTATE_SEND_NOPIN;
@@ -1700,15 +1687,14 @@
 			return 0;
 		}
 
-		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+				(unsigned char *)hdr, hdr->cmdsn);
 		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
 			ret = 0;
 			goto ping_out;
 		}
 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_PROTOCOL_ERROR,
-					1, 0, buf, cmd);
+			return -1;
 
 		return 0;
 	}
@@ -1757,8 +1743,8 @@
 	struct se_tmr_req *se_tmr;
 	struct iscsi_tmr_req *tmr_req;
 	struct iscsi_tm *hdr;
-	int out_of_order_cmdsn = 0;
-	int ret;
+	int out_of_order_cmdsn = 0, ret;
+	bool sess_ref = false;
 	u8 function;
 
 	hdr			= (struct iscsi_tm *) buf;
@@ -1782,8 +1768,8 @@
 		pr_err("Task Management Request TASK_REASSIGN not"
 			" issued as immediate command, bad iSCSI Initiator"
 				"implementation\n");
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-					1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
 	    be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
@@ -1795,9 +1781,9 @@
 	if (!cmd->tmr_req) {
 		pr_err("Unable to allocate memory for"
 			" Task Management command!\n");
-		return iscsit_add_reject_from_cmd(
-			ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-			1, 1, buf, cmd);
+		return iscsit_add_reject_cmd(cmd,
+					     ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					     buf);
 	}
 
 	/*
@@ -1814,6 +1800,9 @@
 				      conn->sess->se_sess, 0, DMA_NONE,
 				      MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
 
+		target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+		sess_ref = true;
+
 		switch (function) {
 		case ISCSI_TM_FUNC_ABORT_TASK:
 			tcm_function = TMR_ABORT_TASK;
@@ -1839,17 +1828,15 @@
 		default:
 			pr_err("Unknown iSCSI TMR Function:"
 			       " 0x%02x\n", function);
-			return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-				1, 1, buf, cmd);
+			return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 		}
 
 		ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
 					 tcm_function, GFP_KERNEL);
 		if (ret < 0)
-			return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-				1, 1, buf, cmd);
+			return iscsit_add_reject_cmd(cmd,
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 
 		cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
 	}
@@ -1908,9 +1895,8 @@
 			break;
 
 		if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
-					buf, cmd);
+			return iscsit_add_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_INVALID, buf);
 		break;
 	default:
 		pr_err("Unknown TMR function: 0x%02x, protocol"
@@ -1928,15 +1914,13 @@
 	spin_unlock_bh(&conn->cmd_lock);
 
 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
-		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
 		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
 			out_of_order_cmdsn = 1;
 		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
 			return 0;
 		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_PROTOCOL_ERROR,
-					1, 0, buf, cmd);
+			return -1;
 	}
 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
@@ -1956,6 +1940,11 @@
 	 * For connection recovery, this is also the default action for
 	 * TMR TASK_REASSIGN.
 	 */
+	if (sess_ref) {
+		pr_debug("Handle TMR, using sess_ref=true check\n");
+		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+	}
+
 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
 	return 0;
 }
@@ -1981,8 +1970,7 @@
 		pr_err("Unable to accept text parameter length: %u"
 			"greater than MaxXmitDataSegmentLength %u.\n",
 		       payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
 	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
@@ -2084,8 +2072,8 @@
 
 	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 	if (!cmd)
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-					1, buf, conn);
+		return iscsit_add_reject(conn,
+					 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 
 	cmd->iscsi_opcode	= ISCSI_OP_TEXT;
 	cmd->i_state		= ISTATE_SEND_TEXTRSP;
@@ -2103,11 +2091,10 @@
 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
-		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+				(unsigned char *)hdr, hdr->cmdsn);
 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_PROTOCOL_ERROR,
-					1, 0, buf, cmd);
+			return -1;
 
 		return 0;
 	}
@@ -2292,14 +2279,11 @@
 		if (ret < 0)
 			return ret;
 	} else {
-		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
-		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
 			logout_remove = 0;
-		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
-			return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, buf, cmd);
-		}
+		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return -1;
 	}
 
 	return logout_remove;
@@ -2323,8 +2307,8 @@
 	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
 		pr_err("Initiator sent SNACK request while in"
 			" ErrorRecoveryLevel=0.\n");
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
 	}
 	/*
 	 * SNACK_DATA and SNACK_R2T are both 0,  so check which function to
@@ -2348,13 +2332,13 @@
 	case ISCSI_FLAG_SNACK_TYPE_RDATA:
 		/* FIXME: Support R-Data SNACK */
 		pr_err("R-Data SNACK Not Supported.\n");
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
 	default:
 		pr_err("Unknown SNACK type 0x%02x, protocol"
 			" error.\n", hdr->flags & 0x0f);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					 buf);
 	}
 
 	return 0;
@@ -2426,14 +2410,14 @@
 				pr_err("Unable to recover from"
 					" Immediate Data digest failure while"
 					" in ERL=0.\n");
-				iscsit_add_reject_from_cmd(
+				iscsit_reject_cmd(cmd,
 						ISCSI_REASON_DATA_DIGEST_ERROR,
-						1, 0, (unsigned char *)hdr, cmd);
+						(unsigned char *)hdr);
 				return IMMEDIATE_DATA_CANNOT_RECOVER;
 			} else {
-				iscsit_add_reject_from_cmd(
+				iscsit_reject_cmd(cmd,
 						ISCSI_REASON_DATA_DIGEST_ERROR,
-						0, 0, (unsigned char *)hdr, cmd);
+						(unsigned char *)hdr);
 				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
 			}
 		} else {
@@ -3533,6 +3517,7 @@
 		    struct iscsi_reject *hdr)
 {
 	hdr->opcode		= ISCSI_OP_REJECT;
+	hdr->reason		= cmd->reject_reason;
 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
 	hton24(hdr->dlength, ISCSI_HDR_LEN);
 	hdr->ffffffff		= cpu_to_be32(0xffffffff);
@@ -3806,18 +3791,11 @@
 	case ISTATE_SEND_STATUS_RECOVERY:
 	case ISTATE_SEND_TEXTRSP:
 	case ISTATE_SEND_TASKMGTRSP:
+	case ISTATE_SEND_REJECT:
 		spin_lock_bh(&cmd->istate_lock);
 		cmd->i_state = ISTATE_SENT_STATUS;
 		spin_unlock_bh(&cmd->istate_lock);
 		break;
-	case ISTATE_SEND_REJECT:
-		if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
-			cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
-			complete(&cmd->reject_comp);
-			goto err;
-		}
-		complete(&cmd->reject_comp);
-		break;
 	default:
 		pr_err("Unknown Opcode: 0x%02x ITT:"
 		       " 0x%08x, i_state: %d on CID: %hu\n",
@@ -3922,8 +3900,7 @@
 	case ISCSI_OP_SCSI_CMD:
 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 		if (!cmd)
-			return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-						1, buf, conn);
+			goto reject;
 
 		ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
 		break;
@@ -3935,16 +3912,14 @@
 		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
 			cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 			if (!cmd)
-				return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-						1, buf, conn);
+				goto reject;
 		}
 		ret = iscsit_handle_nop_out(conn, cmd, buf);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 		if (!cmd)
-			return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-						1, buf, conn);
+			goto reject;
 
 		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
 		break;
@@ -3954,8 +3929,7 @@
 	case ISCSI_OP_LOGOUT:
 		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 		if (!cmd)
-			return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-						1, buf, conn);
+			goto reject;
 
 		ret = iscsit_handle_logout_cmd(conn, cmd, buf);
 		if (ret > 0)
@@ -3987,6 +3961,8 @@
 	}
 
 	return ret;
+reject:
+	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 }
 
 int iscsi_target_rx_thread(void *arg)
@@ -4086,8 +4062,8 @@
 		    (!(opcode & ISCSI_OP_LOGOUT)))) {
 			pr_err("Received illegal iSCSI Opcode: 0x%02x"
 			" while in Discovery Session, rejecting.\n", opcode);
-			iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buffer, conn);
+			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+					  buffer);
 			goto transport_err;
 		}
 
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index a0050b2..2c437cb 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -15,7 +15,7 @@
 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
 				struct iscsi_portal_group *);
 extern int iscsit_del_np(struct iscsi_np *);
-extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
 extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
 extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
 extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 8d8b3ff..421344d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,7 @@
 	if (!capable(CAP_SYS_ADMIN))					\
 		return -EPERM;						\
 									\
-	snprintf(auth->name, PAGE_SIZE, "%s", page);			\
+	snprintf(auth->name, sizeof(auth->name), "%s", page);		\
 	if (!strncmp("NULL", auth->name, 4))				\
 		auth->naf_flags &= ~flags;				\
 	else								\
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 60ec4b9..8907dcd 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -132,7 +132,6 @@
 	ICF_CONTIG_MEMORY			= 0x00000020,
 	ICF_ATTACHED_TO_RQUEUE			= 0x00000040,
 	ICF_OOO_CMDSN				= 0x00000080,
-	ICF_REJECT_FAIL_CONN			= 0x00000100,
 };
 
 /* struct iscsi_cmd->i_state */
@@ -366,6 +365,8 @@
 	u8			maxcmdsn_inc;
 	/* Immediate Unsolicited Dataout */
 	u8			unsolicited_data;
+	/* Reject reason code */
+	u8			reject_reason;
 	/* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
 	u16			logout_cid;
 	/* Command flags */
@@ -446,7 +447,6 @@
 	struct list_head	datain_list;
 	/* R2T List */
 	struct list_head	cmd_r2t_list;
-	struct completion	reject_comp;
 	/* Timer for DataOUT */
 	struct timer_list	dataout_timer;
 	/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index dcb199d..08bd878 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -746,13 +746,12 @@
 		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
 			pr_err("Unable to recover from DataOUT CRC"
 				" failure while ERL=0, closing session.\n");
-			iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
-					1, 0, buf, cmd);
+			iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+					  buf);
 			return DATAOUT_CANNOT_RECOVER;
 		}
 
-		iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
-				0, 0, buf, cmd);
+		iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
 		return iscsit_dataout_post_crc_failed(cmd, buf);
 	}
 }
@@ -909,6 +908,7 @@
 	wait_for_completion(&conn->conn_wait_comp);
 	complete(&conn->conn_post_wait_comp);
 }
+EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
 
 void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
 {
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 40d9dbc..586c268 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -162,9 +162,8 @@
 			" protocol error.\n", cmd->init_task_tag, begrun,
 			(begrun + runlength), cmd->acked_data_sn);
 
-			return iscsit_add_reject_from_cmd(
-					ISCSI_REASON_PROTOCOL_ERROR,
-					1, 0, buf, cmd);
+			return iscsit_reject_cmd(cmd,
+					ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
 	if (runlength) {
@@ -173,8 +172,8 @@
 			" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
 			" current R2TSN: 0x%08x, protocol error.\n",
 			cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
-			return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+			return iscsit_reject_cmd(cmd,
+					ISCSI_REASON_BOOKMARK_INVALID, buf);
 		}
 		last_r2tsn = (begrun + runlength);
 	} else
@@ -433,8 +432,7 @@
 			" protocol error.\n", cmd->init_task_tag, begrun,
 			(begrun + runlength), cmd->acked_data_sn);
 
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, buf, cmd);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
 	}
 
 	/*
@@ -445,14 +443,14 @@
 		pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
 			": 0x%08x greater than maximum DataSN: 0x%08x.\n",
 				begrun, runlength, (cmd->data_sn - 1));
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
-				1, 0, buf, cmd);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
+					 buf);
 	}
 
 	dr = iscsit_allocate_datain_req();
 	if (!dr)
-		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-				1, 0, buf, cmd);
+		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					 buf);
 
 	dr->data_sn = dr->begrun = begrun;
 	dr->runlength = runlength;
@@ -1090,7 +1088,7 @@
 
 	ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
 	if (!ooo_cmdsn)
-		return CMDSN_ERROR_CANNOT_RECOVER;
+		return -ENOMEM;
 
 	ooo_cmdsn->cmd			= cmd;
 	ooo_cmdsn->batch_count		= (batch) ?
@@ -1101,10 +1099,10 @@
 
 	if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
 		kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
-		return CMDSN_ERROR_CANNOT_RECOVER;
+		return -ENOMEM;
 	}
 
-	return CMDSN_HIGHER_THAN_EXP;
+	return 0;
 }
 
 static int iscsit_set_dataout_timeout_values(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3402241..bc788c5 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1163,12 +1163,11 @@
 		if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
 			spin_unlock_bh(&np->np_thread_lock);
 			complete(&np->np_restart_comp);
-			if (ret == -ENODEV) {
-				iscsit_put_transport(conn->conn_transport);
-				kfree(conn);
-				conn = NULL;
+			iscsit_put_transport(conn->conn_transport);
+			kfree(conn);
+			conn = NULL;
+			if (ret == -ENODEV)
 				goto out;
-			}
 			/* Get another socket */
 			return 1;
 		}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 08a3bac..77dad24 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -178,7 +178,6 @@
 	INIT_LIST_HEAD(&cmd->i_conn_node);
 	INIT_LIST_HEAD(&cmd->datain_list);
 	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
-	init_completion(&cmd->reject_comp);
 	spin_lock_init(&cmd->datain_lock);
 	spin_lock_init(&cmd->dataout_timeout_lock);
 	spin_lock_init(&cmd->istate_lock);
@@ -284,13 +283,12 @@
  * Commands may be received out of order if MC/S is in use.
  * Ensure they are executed in CmdSN order.
  */
-int iscsit_sequence_cmd(
-	struct iscsi_conn *conn,
-	struct iscsi_cmd *cmd,
-	__be32 cmdsn)
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			unsigned char *buf, __be32 cmdsn)
 {
-	int ret;
-	int cmdsn_ret;
+	int ret, cmdsn_ret;
+	bool reject = false;
+	u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
 
 	mutex_lock(&conn->sess->cmdsn_mutex);
 
@@ -300,9 +298,19 @@
 		ret = iscsit_execute_cmd(cmd, 0);
 		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
 			iscsit_execute_ooo_cmdsns(conn->sess);
+		else if (ret < 0) {
+			reject = true;
+			ret = CMDSN_ERROR_CANNOT_RECOVER;
+		}
 		break;
 	case CMDSN_HIGHER_THAN_EXP:
 		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
+		if (ret < 0) {
+			reject = true;
+			ret = CMDSN_ERROR_CANNOT_RECOVER;
+			break;
+		}
+		ret = CMDSN_HIGHER_THAN_EXP;
 		break;
 	case CMDSN_LOWER_THAN_EXP:
 		cmd->i_state = ISTATE_REMOVE;
@@ -310,11 +318,16 @@
 		ret = cmdsn_ret;
 		break;
 	default:
+		reason = ISCSI_REASON_PROTOCOL_ERROR;
+		reject = true;
 		ret = cmdsn_ret;
 		break;
 	}
 	mutex_unlock(&conn->sess->cmdsn_mutex);
 
+	if (reject)
+		iscsit_reject_cmd(cmd, reason, buf);
+
 	return ret;
 }
 EXPORT_SYMBOL(iscsit_sequence_cmd);
@@ -721,7 +734,7 @@
 		 * Fallthrough
 		 */
 	case ISCSI_OP_SCSI_TMFUNC:
-		rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+		rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
 		if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
 			__iscsit_free_cmd(cmd, true, shutdown);
 			target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -737,7 +750,7 @@
 			se_cmd = &cmd->se_cmd;
 			__iscsit_free_cmd(cmd, true, shutdown);
 
-			rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+			rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
 			if (!rc && shutdown && se_cmd->se_sess) {
 				__iscsit_free_cmd(cmd, true, shutdown);
 				target_put_sess_cmd(se_cmd->se_sess, se_cmd);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index a442265..e4fc34a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -13,7 +13,8 @@
 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
 extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
-int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, __be32 cmdsn);
+extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			       unsigned char * ,__be32 cmdsn);
 extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index cbe48ab..f608fbc 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -730,7 +730,7 @@
 	if (ret < 0)
 		pr_err("Error writing ALUA metadata file: %s\n", path);
 	fput(file);
-	return ret ? -EIO : 0;
+	return (ret < 0) ? -EIO : 0;
 }
 
 /*
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3240f2c..04a7493 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1987,7 +1987,7 @@
 		pr_debug("Error writing APTPL metadata file: %s\n", path);
 	fput(file);
 
-	return ret ? -EIO : 0;
+	return (ret < 0) ? -EIO : 0;
 }
 
 static int
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e992b27..3250ba2 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@
 	 * pSCSI Host ID and enable for phba mode
 	 */
 	sh = scsi_host_lookup(phv->phv_host_id);
-	if (IS_ERR(sh)) {
+	if (!sh) {
 		pr_err("pSCSI: Unable to locate SCSI Host for"
 			" phv_host_id: %d\n", phv->phv_host_id);
-		return PTR_ERR(sh);
+		return -EINVAL;
 	}
 
 	phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@
 			sh = phv->phv_lld_host;
 		} else {
 			sh = scsi_host_lookup(pdv->pdv_host_id);
-			if (IS_ERR(sh)) {
+			if (!sh) {
 				pr_err("pSCSI: Unable to locate"
 					" pdv_host_id: %d\n", pdv->pdv_host_id);
-				return PTR_ERR(sh);
+				return -EINVAL;
 			}
 		}
 	} else {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 4cb667d..9fabbf7 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -97,9 +97,12 @@
 
 	buf[7] = 0x2; /* CmdQue=1 */
 
-	snprintf(&buf[8], 8, "LIO-ORG");
-	snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
-	snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
+	memcpy(&buf[8], "LIO-ORG ", 8);
+	memset(&buf[16], 0x20, 16);
+	memcpy(&buf[16], dev->t10_wwn.model,
+	       min_t(size_t, strlen(dev->t10_wwn.model), 16));
+	memcpy(&buf[32], dev->t10_wwn.revision,
+	       min_t(size_t, strlen(dev->t10_wwn.revision), 4));
 	buf[4] = 31; /* Set additional length to 31 */
 
 	return 0;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5e3c025..409d03b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -169,4 +169,33 @@
 	  enforce idle time which results in more package C-state residency. The
 	  user interface is exposed via generic thermal framework.
 
+config SOC_THERMAL
+	tristate "SoC Thermal driver"
+	depends on THERMAL
+	help
+	  SoC Thermal driver registers to Generic Thermal Framework.
+	  Exposes SoC DTS and aux trip point values through the framework.
+
+	  Say Y here to enable thermal driver on Intel Merrifield
+	  platform. To load this driver as a module, select M here.
+
+config DEBUG_THERMAL
+	bool "Thermal debug information support"
+	depends on THERMAL
+	help
+	  This enables debug sysfs interfaces/information for Thermal
+	  subsystem.
+
+	  Saying Y here will expose extra sysfs nodes under
+	  /sys/class/thermal/thermal_zoneX/
+
+config INTEL_MOOR_THERMAL
+	tristate "Thermal driver for Intel Moorefield platform"
+	depends on THERMAL && IIO && IIO_BASINCOVE_GPADC
+	help
+	  Say Y here to enable thermal driver on Intel Moorefield platform.
+
+	  To load this driver as a module, select M here. The module
+	  will be called "moor_thermal"
+
 endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index c054d41..0667102 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -2,9 +2,18 @@
 # Makefile for sensor chip drivers.
 #
 
+CFLAGS_intel_mrfl_thermal.o := -Werror
+CFLAGS_intel_soc_thermal.o := -Werror
+CFLAGS_thermal_core.o := -Werror
+CFLAGS_intel_moor_thermal.o := -Werror
+
 obj-$(CONFIG_THERMAL)		+= thermal_sys.o
 thermal_sys-y			+= thermal_core.o
 
+obj-$(CONFIG_SENSORS_THERMAL_MRFLD)     += intel_mrfl_thermal.o
+obj-$(CONFIG_SOC_THERMAL)               += intel_soc_thermal.o
+obj-$(CONFIG_INTEL_MOOR_THERMAL) += intel_moor_thermal.o
+
 # governors
 thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE)	+= fair_share.o
 thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE)	+= step_wise.o
diff --git a/drivers/thermal/intel_moor_thermal.c b/drivers/thermal/intel_moor_thermal.c
new file mode 100644
index 0000000..c67cdbd
--- /dev/null
+++ b/drivers/thermal/intel_moor_thermal.c
@@ -0,0 +1,1047 @@
+/*
+ * intel_moor_thermal.c - Intel Moorefield Platform Thermal Driver
+ *
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Sumeet R Pawnikar <sumeet.r.pawnikar@intel.com>
+ *
+ * Intel Moorefield platform - Shadycove PMIC: Thermal Monitor
+ * This driver exposes temperature and thresholds through sysfs interface
+ * to user space.
+ */
+
+#define pr_fmt(fmt)  "intel_moor_thermal: " fmt
+
+#include <linux/pm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/iio/consumer.h>
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_thermal.h>
+
+
+#define DRIVER_NAME "scove_thrm"
+
+/* Number of trip points */
+#define MOORE_THERMAL_TRIPS	2
+/* Writabilities of trip points */
+#define MOORE_TRIPS_RW		0x03
+
+/* Number of Thermal sensors on the PMIC */
+#define PMIC_THERMAL_SENSORS	4
+
+/* Registers that govern Thermal Monitoring */
+#define THRMMONCTL      0xB2
+#define STHRMIRQ	0xB0
+#define MIRQLVL1	0x0C
+
+/* Set 10 sec interval between temperature measurements in Active Mode */
+#define THERM_EN_ACTIVE_MODE	(3 << 0)
+/* Set 30 Sec interval between temperature measurements in standby */
+#define THERM_EN_STANDBY_MODE	(3 << 3)
+
+/* PMIC SRAM base address and offset for Thermal register */
+#define PMIC_SRAM_BASE_ADDR	0xFFFFF610
+#define PMIC_SRAM_THRM_OFFSET	0x03
+#define IOMAP_SIZE		0x04
+
+#define PMICALRT	(1 << 3)
+#define SYS2ALRT	(1 << 2)
+#define SYS1ALRT	(1 << 1)
+#define SYS0ALRT	(1 << 0)
+#define THERM_ALRT	(1 << 2)
+#define MTHERM_IRQ	(1 << 2)
+
+/* ADC to Temperature conversion table length */
+#define TABLE_LENGTH	34
+#define TEMP_INTERVAL	5
+
+/* Default _max 85 C */
+#define DEFAULT_MAX_TEMP	85
+#define ALERT_LIMIT		2
+
+/*
+ * LOW event is defined as 0 (implicit)
+ * HIGH event is defined as 1 (implicit)
+ * Hence this event is defined as 2.
+ */
+#define EMUL_TEMP_EVENT         2
+#define TEMP_WRITE_TIMEOUT      (2 * HZ)
+
+/* Constants defined in ShadyCove PMIC spec */
+#define PMIC_DIE_ADC_MIN	53
+#define PMIC_DIE_ADC_MAX	15000
+#define PMIC_DIE_TEMP_MIN       -40
+#define PMIC_DIE_TEMP_MAX       125
+#define ADC_COEFFICIENT         269
+#define TEMP_OFFSET             273150
+
+/* Structure for thermal event notification */
+struct thermal_event {
+	int sensor;	/* Sensor type */
+	int event;	/* Event type: LOW or HIGH */
+};
+
+/* 'enum' of Thermal sensors */
+enum thermal_sensors { SYS0, SYS1, SYS2, PMIC_DIE, _COUNT };
+
+/*
+ * Alert registers store the 'alert' temperature for each sensor,
+ * as 12 bit ADC code. The higher four bits are stored in bits[0:3] of
+ * alert_regs_h. The lower eight bits are stored in alert_regs_l.
+ * The hysteresis value is stored in bits[4:7] of alert_regs_h.
+ * Order: SYS0 SYS1 SYS2 PMIC_DIE
+ *
+ * Thermal Alert has Min and Max registers. Each Min and Max has
+ * High [alert_regs_h] and Low registers [alert_regs_l].
+ *
+ * static const int alert_regs_l[2][4] = {
+ *			SYS0, SYS1, SYS2, PMIC_DIE
+ *	Alert Min ==>	{ 0xB8, 0xBC, 0xC0, 0xC8 },
+ *	Alert Max ==>	{ 0xBA, 0xBE, 0xC2, 0xC8 }
+ *			};
+ */
+static const int alert_regs_h[2][4] = {
+				/* SYS0, SYS1, SYS2, PMIC_DIE */
+	/* Alert Min */		{ 0xB7, 0xBB, 0xBF, 0xC7 },
+	/* Alert Max */		{ 0xB9, 0xBD, 0xC1, 0xC7 },
+				};
+
+/*
+ * ADC code vs Temperature table
+ * This table will be different for different thermistors
+ * Row 0: ADC code
+ * Row 1: Temperature (in degree celsius)
+ */
+static const int adc_code[2][TABLE_LENGTH] = {
+	{19565, 14820, 11335, 8756, 6823, 5365, 4251, 3389, 2722, 2202,
+	1792, 1467, 1208, 1000, 831, 695, 583, 492, 416, 353,
+	301, 259, 223, 192, 167, 145, 127, 111, 97, 86,
+	76, 67, 60, 53},
+	{-40, -35, -30, -25, -20, -15, -10, -5, 0, 5,
+	10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
+	60, 65, 70, 75, 80, 85, 90, 95, 100, 105,
+	110, 115, 120, 125},
+	};
+
+static DEFINE_MUTEX(thrm_update_lock);
+
+struct thermal_device_info {
+	struct intel_mid_thermal_sensor *sensor;
+	struct completion temp_write_complete;
+};
+
+struct thermal_data {
+	struct platform_device *pdev;
+	struct iio_channel *iio_chan;
+	struct thermal_zone_device **tzd;
+	struct intel_mid_thermal_sensor *sensors;
+	unsigned int irq;
+	/* Caching information */
+	unsigned long last_updated;
+	int cached_vals[PMIC_THERMAL_SENSORS];
+	int num_sensors;
+	int num_virtual_sensors;
+	bool is_initialized;
+	void *thrm_addr;
+};
+static struct thermal_data *tdata;
+
+/*
+ * adc_to_pmic_die_temp - calculates the PMIC DIE temperature
+ * @adc_val: received the adc value from gpadc channel
+ *
+ * This function calculates the PMIC DIE temperature as per
+ * the formula given in ShadyCove PMIC spec.
+ * return value: temperature value in `C
+ */
+static inline int adc_to_pmic_die_temp(unsigned int adc_val)
+{
+	/* return temperature in mC */
+	return (ADC_COEFFICIENT * (adc_val - 2047)) - TEMP_OFFSET;
+}
+
+/*
+ * pmic_die_temp_to_adc - calculates the adc value
+ * @temp: received the PMIC DIE temp value in `C
+ *
+ * This function calculates the adc value as per
+ * the formula given in ShadyCove PMIC spec.
+ * return value: adc value
+ */
+
+static inline int pmic_die_temp_to_adc(int temp)
+{
+	/* 'temp' is in C, convert to mC and then do calculations */
+	return (((temp * 1000) + TEMP_OFFSET) / ADC_COEFFICIENT) + 2047;
+}
+
+/*
+ * find_adc_code - searches the ADC code using binary search
+ * @val: value to find in the array
+ *
+ * This function does binary search on an array sorted in 'descending' order.
+ * Can sleep
+ */
+static int find_adc_code(uint16_t val)
+{
+	int left = 0;
+	int right = TABLE_LENGTH - 1;
+	int mid;
+
+	while (left <= right) {
+		mid = (left + right)/2;
+		if (val == adc_code[0][mid] ||
+			(mid > 0 &&
+			val > adc_code[0][mid] &&
+			val < adc_code[0][mid-1]))
+			return mid;
+		else if (val > adc_code[0][mid])
+			right = mid - 1;
+		else if (val < adc_code[0][mid])
+			left = mid + 1;
+	}
+	return -EINVAL;
+}
+
+/*
+ * adc_to_temp - converts the ADC code to temperature in mC
+ * @direct: true if the sensor uses direct conversion
+ * @adc_val: the ADC code to be converted
+ * @tp: temperature return value
+ *
+ * Can sleep
+ */
+static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
+{
+	int x0, x1, y0, y1;
+	int nr, dr;		/* Numerator & Denominator */
+	int indx;
+	int x = adc_val;
+
+	/* Direct conversion for pmic die temperature */
+	if (direct) {
+		if (adc_val < PMIC_DIE_ADC_MIN || adc_val > PMIC_DIE_ADC_MAX)
+			return -EINVAL;
+
+		*tp = adc_to_pmic_die_temp(adc_val);
+		return 0;
+	}
+
+	indx = find_adc_code(adc_val);
+	if (indx < 0)
+		return -EINVAL;
+
+	if (adc_code[0][indx] == adc_val) {
+		*tp = adc_code[1][indx] * 1000;
+		return 0;
+	}
+
+	/* This is just to satisfy the KW issues #28365, #28369 */
+	if (indx == 0)
+		return -EINVAL;
+
+	/*
+	 * The ADC code is in between two values directly defined in the
+	 * table. So, do linear interpolation to calculate the temperature.
+	 */
+	x0 = adc_code[0][indx];
+	x1 = adc_code[0][indx - 1];
+	y0 = adc_code[1][indx];
+	y1 = adc_code[1][indx - 1];
+
+	/*
+	 * Find y:
+	 * Of course, we can avoid these variables, but keep them
+	 * for readability and maintainability.
+	 */
+	nr = (x-x0)*y1 + (x1-x)*y0;
+	dr =  x1-x0;
+
+	if (!dr)
+		return -EINVAL;
+	/*
+	 * We have to report the temperature in milli degree celsius.
+	 * So, to reduce the loss of precision, do (Nr*1000)/Dr, instead
+	 * of (Nr/Dr)*1000.
+	 */
+	*tp = (nr * 1000)/dr;
+
+	return 0;
+}
+
+/*
+ * temp_to_adc - converts the temperature(in C) to ADC code
+ * @direct: true if the sensor uses direct conversion
+ * @temp: the temperature to be converted
+ * @adc_val: ADC code return value
+ *
+ * Can sleep
+ */
+static int temp_to_adc(int direct, int temp, uint16_t *adc_val)
+{
+	int indx;
+	int x0, x1, y0, y1;
+	int nr, dr;		/* Numerator & Denominator */
+	int x = temp;
+
+	/* Direct conversion for pmic die temperature */
+	if (direct) {
+		if (temp < PMIC_DIE_TEMP_MIN || temp > PMIC_DIE_TEMP_MAX)
+			return -EINVAL;
+
+		*adc_val = pmic_die_temp_to_adc(temp);
+		return 0;
+	}
+
+	if (temp < adc_code[1][0] || temp > adc_code[1][TABLE_LENGTH - 1])
+		return -EINVAL;
+
+	/* Find the 'indx' of this 'temp' in the table */
+	indx = (temp - adc_code[1][0]) / TEMP_INTERVAL;
+
+	if (temp == adc_code[1][indx]) {
+		*adc_val = adc_code[0][indx];
+		return 0;
+	}
+
+	/*
+	 * Temperature is not a multiple of 'TEMP_INTERVAL'. So,
+	 * do linear interpolation to obtain a better ADC code.
+	 */
+	x0 = adc_code[1][indx];
+	x1 = adc_code[1][indx + 1];
+	y0 = adc_code[0][indx];
+	y1 = adc_code[0][indx + 1];
+
+	nr = (x-x0)*y1 + (x1-x)*y0;
+	dr =  x1-x0;
+
+	if (!dr)
+		return -EINVAL;
+
+	*adc_val = nr/dr;
+
+	return 0;
+}
+
+/*
+ * get_adc_value - gets the ADC code from the register
+ * @alert_reg_h: The 'high' register address
+ *
+ * Not protected. Calling function should handle synchronization.
+ * Can sleep
+ */
+static int get_adc_value(uint16_t alert_reg_h)
+{
+	int ret;
+	uint16_t adc_val;
+	uint8_t l, h;
+
+	/* Reading high register address */
+	ret = intel_scu_ipc_ioread8(alert_reg_h, &h);
+	if (ret)
+		goto exit;
+
+	/* Get the address of alert_reg_l */
+	++alert_reg_h;
+
+	/* Reading low register address */
+	ret = intel_scu_ipc_ioread8(alert_reg_h, &l);
+	if (ret)
+		goto exit;
+
+	/* Concatenate 'h' and 'l' to get 12-bit ADC code */
+	adc_val = ((h & 0x0F) << 8) | l;
+
+	return adc_val;
+
+exit:
+	return ret;
+
+}
+
+/*
+ * set_tmax - sets the given 'adc_val' to the 'alert_reg'
+ * @alert_reg: register address
+ * @adc_val: ADC value to be programmed
+ *
+ * Not protected. Calling function should handle synchronization.
+ * Can sleep
+ */
+static int set_tmax(uint16_t alert_reg, uint16_t adc_val)
+{
+	int ret;
+
+	/* Set bits[0:3] of alert_reg_h to bits[8:11] of 'adc_val' */
+	ret = intel_scu_ipc_update_register(alert_reg, (adc_val >> 8), 0x0F);
+	if (ret)
+		return ret;
+
+	/* Extract bits[0:7] of 'adc_val' and write them into alert_reg_l */
+	return intel_scu_ipc_iowrite8(alert_reg + 1, adc_val & 0xFF);
+}
+
+/*
+ * program_tmax - programs a default _max value for each sensor
+ * @dev: device pointer
+ *
+ * Can sleep
+ */
+static int program_tmax(struct device *dev)
+{
+	int i, ret, level;
+	uint16_t pmic_die_val;
+	uint16_t adc_val, val;
+
+	/* ADC code corresponding to max Temp 85 C */
+	ret = temp_to_adc(0, DEFAULT_MAX_TEMP, &adc_val);
+	if (ret)
+		return ret;
+
+	ret = temp_to_adc(1, DEFAULT_MAX_TEMP, &pmic_die_val);
+	if (ret)
+		return ret;
+	/*
+	 * Since this function sets max & min value, do for all sensors even if
+	 * the sensor does not register as a thermal zone.
+	 */
+	for (level = 0; level < ALERT_LIMIT; level++) {
+		for (i = 0; i < PMIC_THERMAL_SENSORS; i++) {
+			val = (i == PMIC_DIE) ? pmic_die_val : adc_val;
+
+			ret = set_tmax(alert_regs_h[level][i], val);
+			if (ret)
+				goto exit_err;
+		}
+	}
+	return ret;
+
+exit_err:
+	dev_err(dev, "set alert %d for channel %d failed:%d\n", level, i, ret);
+	return ret;
+}
+
+static int store_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long hyst)
+{
+	int ret;
+	uint8_t data;
+	struct thermal_device_info *td_info = tzd->devdata;
+	uint16_t alert_reg = alert_regs_h[trip][td_info->sensor->index];
+
+	/* Hysteresis value is 5 bits wide */
+	if (hyst > 31)
+		return -EINVAL;
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(alert_reg, &data);
+	if (ret)
+		goto ipc_fail;
+
+	/* Set bits [4:7] to value of hyst */
+	data = (data & 0xF) | (hyst << 4);
+
+	ret = intel_scu_ipc_iowrite8(alert_reg, data);
+
+ipc_fail:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int show_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long *hyst)
+{
+	int ret;
+	uint8_t data;
+	struct thermal_device_info *td_info = tzd->devdata;
+	uint16_t alert_reg = alert_regs_h[trip][td_info->sensor->index];
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(alert_reg, &data);
+	if (!ret)
+		*hyst = (data >> 4) & 0x0F; /* Extract bits[4:7] of data */
+
+	mutex_unlock(&thrm_update_lock);
+
+	return ret;
+}
+
+static int store_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long trip_temp)
+{
+	int ret;
+	uint16_t adc_val;
+	struct thermal_device_info *td_info = tzd->devdata;
+	uint16_t alert_reg = alert_regs_h[trip][td_info->sensor->index];
+
+	if (trip_temp < 1000) {
+		dev_err(&tzd->device, "Temperature should be in mC\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&thrm_update_lock);
+
+	/* Convert from mC to C */
+	trip_temp /= 1000;
+
+	ret = temp_to_adc(td_info->sensor->direct, (int)trip_temp, &adc_val);
+	if (ret)
+		goto exit;
+
+	ret =  set_tmax(alert_reg, adc_val);
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int show_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long *trip_temp)
+{
+	int ret = -EINVAL;
+	uint16_t adc_val;
+	struct thermal_device_info *td_info = tzd->devdata;
+	uint16_t alert_reg_h = alert_regs_h[trip][td_info->sensor->index];
+
+	mutex_lock(&thrm_update_lock);
+
+	adc_val = get_adc_value(alert_reg_h);
+	if (adc_val < 0)
+		goto exit;
+
+	ret = adc_to_temp(td_info->sensor->direct, adc_val, trip_temp);
+
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int show_trip_type(struct thermal_zone_device *tzd,
+			int trip, enum thermal_trip_type *trip_type)
+{
+	/* All are passive trip points */
+	*trip_type = THERMAL_TRIP_PASSIVE;
+	return 0;
+}
+
+static int update_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	int ret;
+	struct thermal_device_info *td_info = tzd->devdata;
+	int indx = td_info->sensor->index;
+
+	if (!tdata->iio_chan)
+		return -EINVAL;
+
+	if (!tdata->is_initialized ||
+			time_after(jiffies, tdata->last_updated + HZ)) {
+		ret = iio_read_channel_all_raw(tdata->iio_chan,
+						tdata->cached_vals);
+		if (ret == -ETIMEDOUT) {
+			dev_err(&tzd->device,
+				"ADC sampling failed:%d reading result regs\n",
+				ret);
+		}
+		tdata->last_updated = jiffies;
+		tdata->is_initialized = true;
+	}
+
+	ret = adc_to_temp(td_info->sensor->direct, tdata->cached_vals[indx],
+								temp);
+	return ret;
+}
+
+static int show_emul_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	int ret = 0;
+	char *thermal_event[3];
+	unsigned long timeout;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=%s", tzd->type);
+	thermal_event[1] = kasprintf(GFP_KERNEL, "EVENT=%d", EMUL_TEMP_EVENT);
+	thermal_event[2] = NULL;
+
+	INIT_COMPLETION(td_info->temp_write_complete);
+	kobject_uevent_env(&tzd->device.kobj, KOBJ_CHANGE, thermal_event);
+
+	timeout = wait_for_completion_timeout(&td_info->temp_write_complete,
+						TEMP_WRITE_TIMEOUT);
+	if (timeout == 0) {
+		/* Waiting timed out */
+		ret = -ETIMEDOUT;
+		goto exit;
+	}
+
+	*temp = tzd->emul_temperature;
+exit:
+	kfree(thermal_event[1]);
+	kfree(thermal_event[0]);
+	return ret;
+}
+
+static int store_emul_temp(struct thermal_zone_device *tzd,
+				unsigned long temp)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	tzd->emul_temperature = temp;
+	complete(&td_info->temp_write_complete);
+	return 0;
+}
+
+static int show_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	int ret;
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = update_temp(tzd, temp);
+
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int enable_tm(void)
+{
+	int ret;
+	uint8_t data;
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(THRMMONCTL, &data);
+	if (ret)
+		goto ipc_fail;
+
+	ret = intel_scu_ipc_iowrite8(THRMMONCTL, data |
+					THERM_EN_ACTIVE_MODE |
+					THERM_EN_STANDBY_MODE);
+	if (ret)
+		goto ipc_fail;
+
+ipc_fail:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static struct thermal_device_info *initialize_sensor(
+				struct intel_mid_thermal_sensor *sensor)
+{
+	struct thermal_device_info *td_info =
+		kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+	if (!td_info)
+		return NULL;
+
+	td_info->sensor = sensor;
+
+	init_completion(&td_info->temp_write_complete);
+	return td_info;
+}
+
+static void notify_thermal_event(struct thermal_event te)
+{
+	int ret;
+	long cur_temp;
+	char *thrm_event[4];
+	struct thermal_zone_device *tzd = tdata->tzd[te.sensor];
+
+	/*
+	 * Read the current Temperature and send it to user land;
+	 * so that the user space can avoid a sysfs read.
+	 */
+	ret = update_temp(tzd, &cur_temp);
+	if (ret) {
+		dev_err(&tzd->device, "Cannot update temperature\n");
+		goto exit;
+	}
+	pr_info("Thermal Event: sensor: %s, cur_temp: %ld, event: %d\n",
+				tzd->type, cur_temp, te.event);
+	thrm_event[0] = kasprintf(GFP_KERNEL, "NAME=%s", tzd->type);
+	thrm_event[1] = kasprintf(GFP_KERNEL, "TEMP=%ld", cur_temp);
+	thrm_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", te.event);
+	thrm_event[3] = NULL;
+
+	kobject_uevent_env(&tzd->device.kobj, KOBJ_CHANGE, thrm_event);
+
+	kfree(thrm_event[2]);
+	kfree(thrm_event[1]);
+	kfree(thrm_event[0]);
+
+exit:
+	return;
+}
+
+static irqreturn_t thermal_intrpt(int irq, void *dev_data)
+{
+	int ret;
+	unsigned int irq_data;
+	uint8_t irq_status;
+	struct thermal_event te;
+	struct thermal_data *tdata = (struct thermal_data *)dev_data;
+
+	if (!tdata)
+		return IRQ_HANDLED;
+
+	mutex_lock(&thrm_update_lock);
+
+	irq_data = ioread8(tdata->thrm_addr + PMIC_SRAM_THRM_OFFSET);
+
+	ret = intel_scu_ipc_ioread8(STHRMIRQ, &irq_status);
+	if (ret)
+		goto ipc_fail;
+
+	dev_dbg(&tdata->pdev->dev, "STHRMIRQ: %.2x\n", irq_status);
+
+	/*
+	 * -1 for invalid interrupt
+	 * 1 for LOW to HIGH temperature alert
+	 * 0 for HIGH to LOW temperature alert
+	 */
+	te.event = -1;
+
+	/* Check which interrupt occured and for what event */
+	if (irq_data & PMICALRT) {
+		te.event = !!(irq_status & PMICALRT);
+		te.sensor = PMIC_DIE;
+	} else if (irq_data & SYS2ALRT) {
+		te.event = !!(irq_status & SYS2ALRT);
+		te.sensor = SYS2;
+	} else if (irq_data & SYS1ALRT) {
+		te.event = !!(irq_status & SYS1ALRT);
+		te.sensor = SYS1;
+	} else if (irq_data & SYS0ALRT) {
+		te.event = !!(irq_status & SYS0ALRT);
+		te.sensor = SYS0;
+	} else {
+		dev_err(&tdata->pdev->dev, "Invalid Interrupt\n");
+		ret = IRQ_HANDLED;
+		goto ipc_fail;
+	}
+
+	if (te.event != -1) {
+		dev_info(&tdata->pdev->dev,
+			"%s interrupt for thermal sensor %d\n",
+			te.event ? "HIGH" : "LOW", te.sensor);
+
+		/* Notify using UEvent */
+		notify_thermal_event(te);
+	}
+
+	/* Unmask Thermal Interrupt bit:2 in the mask register */
+	ret = intel_scu_ipc_iowrite8(MIRQLVL1, MTHERM_IRQ);
+	if (ret)
+		goto ipc_fail;
+
+ipc_fail:
+	mutex_unlock(&thrm_update_lock);
+	/* In case of failure, returning IRQ_HANDLED to avoid
+	repeated invoke of this function */
+	return IRQ_HANDLED;
+}
+
+static struct thermal_zone_device_ops tzd_emul_ops = {
+	.get_temp = show_emul_temp,
+	.set_emul_temp = store_emul_temp,
+};
+
+static struct thermal_zone_device_ops tzd_ops = {
+	.get_temp = show_temp,
+	.get_trip_type = show_trip_type,
+	.get_trip_temp = show_trip_temp,
+	.set_trip_temp = store_trip_temp,
+	.get_trip_hyst = show_trip_hyst,
+	.set_trip_hyst = store_trip_hyst,
+};
+
+static irqreturn_t moor_thermal_intrpt_handler(int irq, void *dev_data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int moor_thermal_probe(struct platform_device *pdev)
+{
+	int i, size, ret;
+	int total_sensors; /* real + virtual sensors */
+	int trips = MOORE_THERMAL_TRIPS;
+	int trips_rw = MOORE_TRIPS_RW;
+	struct intel_mid_thermal_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data not found\n");
+		return -EINVAL;
+	}
+
+	tdata = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+	if (!tdata) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	tdata->pdev = pdev;
+	tdata->num_sensors = pdata->num_sensors;
+	tdata->num_virtual_sensors = pdata->num_virtual_sensors;
+	tdata->sensors = pdata->sensors;
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "platform_get_irq failed:%d\n", ret);
+		goto exit_free;
+	}
+	tdata->irq = ret;
+
+	platform_set_drvdata(pdev, tdata);
+
+	total_sensors = tdata->num_sensors;
+#ifdef CONFIG_THERMAL_EMULATION
+	total_sensors += tdata->num_virtual_sensors;
+#endif
+
+	size = sizeof(struct thermal_zone_device *) * total_sensors;
+	tdata->tzd = kzalloc(size, GFP_KERNEL);
+	if (!tdata->tzd) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		ret = -ENOMEM;
+		goto exit_free;
+	}
+
+	/* Program a default _max value for each sensor */
+	ret = program_tmax(&pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Programming _max failed:%d\n", ret);
+		goto exit_tzd;
+	}
+
+	/*
+	 * Register with IIO to sample temperature values.
+	 * Order of the channels obtained from adc:
+	 * "SYSTHERM0", "SYSTHERM1", "SYSTHERM2", "PMICDIE"
+	 */
+	tdata->iio_chan = iio_channel_get_all(&pdev->dev);
+	if (tdata->iio_chan == NULL) {
+		dev_err(&pdev->dev, "tdata->iio_chan is null\n");
+		ret = -EINVAL;
+		goto exit_tzd;
+	}
+
+	/* Check whether we got all the four channels */
+	ret = iio_channel_get_num(tdata->iio_chan);
+	if (ret != PMIC_THERMAL_SENSORS) {
+		dev_err(&pdev->dev, "incorrect number of channels:%d\n", ret);
+		ret = -EFAULT;
+		goto exit_iio;
+	}
+
+	/* Register each sensor with the generic thermal framework */
+	for (i = 0; i < total_sensors; i++) {
+		if (i < tdata->num_sensors) {
+
+			/* PMICDIE has only one trip point while other zones has two */
+			if (i == PMIC_DIE)
+				trips = trips_rw = 1;
+
+			tdata->tzd[i] = thermal_zone_device_register(
+				tdata->sensors[i].name, trips, trips_rw,
+				initialize_sensor(&tdata->sensors[i]),
+				&tzd_ops, NULL, 0, 0);
+		} else {
+			tdata->tzd[i] = thermal_zone_device_register(
+				tdata->sensors[i].name, 0, 0,
+				initialize_sensor(&tdata->sensors[i]),
+				&tzd_emul_ops, NULL, 0, 0);
+		}
+		if (IS_ERR(tdata->tzd[i])) {
+			ret = PTR_ERR(tdata->tzd[i]);
+			dev_err(&pdev->dev,
+				"registering thermal sensor %s failed: %d\n",
+				tdata->sensors[i].name, ret);
+			goto exit_reg;
+		}
+	}
+
+	tdata->thrm_addr = ioremap_nocache(PMIC_SRAM_BASE_ADDR, IOMAP_SIZE);
+	if (!tdata->thrm_addr) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "ioremap_nocache failed\n");
+		goto exit_reg;
+	}
+
+	/* Register for Interrupt Handler */
+	ret = request_threaded_irq(tdata->irq, moor_thermal_intrpt_handler,
+					thermal_intrpt, IRQF_TRIGGER_RISING,
+					DRIVER_NAME, tdata);
+	if (ret) {
+		dev_err(&pdev->dev, "request_threaded_irq failed:%d\n", ret);
+		goto exit_ioremap;
+	}
+
+	/* Enable Thermal Monitoring */
+	ret = enable_tm();
+	if (ret) {
+		dev_err(&pdev->dev, "Enabling TM failed:%d\n", ret);
+		goto exit_irq;
+	}
+
+	return 0;
+
+exit_irq:
+	free_irq(tdata->irq, tdata);
+exit_ioremap:
+	iounmap(tdata->thrm_addr);
+exit_reg:
+	while (--i >= 0)
+		thermal_zone_device_unregister(tdata->tzd[i]);
+exit_iio:
+	iio_channel_release_all(tdata->iio_chan);
+exit_tzd:
+	kfree(tdata->tzd);
+exit_free:
+	kfree(tdata);
+	return ret;
+}
+
+static int moor_thermal_resume(struct device *dev)
+{
+	dev_info(dev, "resume called.\n");
+	return 0;
+}
+
+static int moor_thermal_suspend(struct device *dev)
+{
+	dev_info(dev, "suspend called.\n");
+	return 0;
+}
+
+static int moor_thermal_remove(struct platform_device *pdev)
+{
+	int i, total_sensors;
+	struct thermal_data *tdata = platform_get_drvdata(pdev);
+
+	if (!tdata)
+		return 0;
+
+	total_sensors = tdata->num_sensors;
+
+#ifdef CONFIG_THERMAL_EMULATION
+	total_sensors += tdata->num_virtual_sensors;
+#endif
+
+	for (i = 0; i < total_sensors; i++)
+		thermal_zone_device_unregister(tdata->tzd[i]);
+
+	free_irq(tdata->irq, tdata);
+	iounmap(tdata->thrm_addr);
+	iio_channel_release_all(tdata->iio_chan);
+	kfree(tdata->tzd);
+	kfree(tdata);
+	return 0;
+}
+
+/* Driver initialization and finalization */
+static const struct dev_pm_ops thermal_pm_ops = {
+	.suspend = moor_thermal_suspend,
+	.resume = moor_thermal_resume,
+};
+
+static struct platform_driver moor_thermal_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &thermal_pm_ops,
+		},
+	.probe = moor_thermal_probe,
+	.remove = moor_thermal_remove,
+};
+
+static int moor_thermal_module_init(void)
+{
+	return platform_driver_register(&moor_thermal_driver);
+}
+
+static void moor_thermal_module_exit(void)
+{
+	platform_driver_unregister(&moor_thermal_driver);
+}
+
+/* RPMSG related functionality */
+static int moor_thermal_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	if (!rpdev) {
+		pr_err("rpmsg channel not created\n");
+		dev_info(&rpdev->dev, "rpmsg channel not created for moor_thermal\n");
+		return -ENODEV;
+	}
+	dev_info(&rpdev->dev, "Probed moor_thermal rpmsg device\n");
+
+	return moor_thermal_module_init();
+}
+
+static void moor_thermal_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	moor_thermal_module_exit();
+	dev_info(&rpdev->dev, "Removed moor_thermal rpmsg device\n");
+}
+
+static void moor_thermal_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+			int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+				data, len, true);
+}
+
+static struct rpmsg_device_id moor_thermal_id_table[] = {
+	{ .name = "rpmsg_moor_thermal" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, moor_thermal_id_table);
+
+static struct rpmsg_driver moor_thermal_rpmsg = {
+	.drv.name	= DRIVER_NAME,
+	.drv.owner	= THIS_MODULE,
+	.probe		= moor_thermal_rpmsg_probe,
+	.callback	= moor_thermal_rpmsg_cb,
+	.remove		= moor_thermal_rpmsg_remove,
+	.id_table	= moor_thermal_id_table,
+};
+
+static int __init moor_thermal_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&moor_thermal_rpmsg);
+}
+
+static void __exit moor_thermal_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&moor_thermal_rpmsg);
+}
+
+module_init(moor_thermal_rpmsg_init);
+module_exit(moor_thermal_rpmsg_exit);
+
+MODULE_AUTHOR("Sumeet Pawnikar<sumeet.r.pawnikar@intel.com>");
+MODULE_DESCRIPTION("Intel Moorefield Platform Thermal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/intel_soc_thermal.c b/drivers/thermal/intel_soc_thermal.c
new file mode 100644
index 0000000..beb76bd
--- /dev/null
+++ b/drivers/thermal/intel_soc_thermal.c
@@ -0,0 +1,863 @@
+/*
+ * intel_soc_thermal.c - Intel SoC Platform Thermal Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Shravan B M <shravan.k.b.m@intel.com>
+ *
+ * This driver registers to Thermal framework as SoC zone. It exposes
+ * two SoC DTS temperature with two writeable trip points.
+ */
+
+#define pr_fmt(fmt)  "intel_soc_thermal: " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/thermal.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/msr.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_thermal.h>
+#include <asm/processor.h>
+
+#define DRIVER_NAME	"soc_thrm"
+
+/* SOC DTS Registers */
+#define SOC_THERMAL_SENSORS	2
+#define SOC_THERMAL_TRIPS	2
+#define SOC_MAX_STATES		4
+#define DTS_ENABLE_REG		0xB0
+#define DTS_ENABLE		0x03
+#define DTS_TRIP_RW		0x03
+
+#define PUNIT_PORT		0x04
+#define PUNIT_TEMP_REG		0xB1
+#define PUNIT_AUX_REG		0xB2
+
+#define TJMAX_TEMP		90
+#define TJMAX_CODE		0x7F
+
+/* Default hysteresis values in C */
+#define DEFAULT_H2C_HYST	1
+#define MAX_HYST		7
+
+/* Power Limit registers */
+#define PKG_TURBO_POWER_LIMIT	0x610
+#define PKG_TURBO_CFG		0x670
+#define MSR_THERM_CFG1		0x673
+#define MSR_THERM_CFG2		0x674
+
+/* PKG_TURBO_PL1 holds PL1 in terms of 32mW */
+#define PL_UNIT_MW		32
+
+/* Magic number symbolising Dynamic Turbo OFF */
+#define DISABLE_DYNAMIC_TURBO	0xB0FF
+
+/* IRQ details */
+#define SOC_DTS_CONTROL		0x80
+#define TRIP_STATUS_RO		0xB3
+#define TRIP_STATUS_RW		0xB4
+/* TE stands for THERMAL_EVENT */
+#define TE_AUX0			0xB5
+#define ENABLE_AUX_INTRPT	0x0F
+#define ENABLE_CPU0		(1 << 16)
+#define RTE_ENABLE		(1 << 9)
+
+static int tjmax_temp;
+static int turbo_floor_reg;
+
+static DEFINE_MUTEX(thrm_update_lock);
+
+struct platform_soc_data {
+	struct thermal_zone_device *tzd[SOC_THERMAL_SENSORS];
+	struct thermal_cooling_device *soc_cdev; /* PL1 control */
+	int irq;
+};
+
+struct cooling_device_info {
+	struct soc_throttle_data *soc_data;
+	/* Lock protecting the soc_cur_state variable */
+	struct mutex lock_state;
+	unsigned long soc_cur_state;
+};
+
+struct thermal_device_info {
+	int sensor_index;
+	struct mutex lock_aux;
+};
+
+static inline u32 read_soc_reg(unsigned int addr)
+{
+	return intel_mid_msgbus_read32(PUNIT_PORT, addr);
+}
+
+static inline void write_soc_reg(unsigned int addr, u32 val)
+{
+	intel_mid_msgbus_write32(PUNIT_PORT, addr, val);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dts_regs {
+	char *name;
+	u32 addr;
+} dts_regs[] = {
+	/* Thermal Management Registers */
+	{"PTMC",	0x80},
+	{"TRR0",	0x81},
+	{"TRR1",	0x82},
+	{"TTS",		0x83},
+	{"TELB",	0x84},
+	{"TELT",	0x85},
+	{"GFXT",	0x88},
+	{"VEDT",	0x89},
+	{"VECT",	0x8A},
+	{"VSPT",	0x8B},
+	{"ISPT",	0x8C},
+	{"SWT",		0x8D},
+	/* Trip Event Registers */
+	{"DTSC",	0xB0},
+	{"TRR",		0xB1},
+	{"PTPS",	0xB2},
+	{"PTTS",	0xB3},
+	{"PTTSS",	0xB4},
+	{"TE_AUX0",	0xB5},
+	{"TE_AUX1",	0xB6},
+	{"TE_AUX2",	0xB7},
+	{"TE_AUX3",	0xB8},
+	{"TTE_VRIcc",	0xB9},
+	{"TTE_VRHOT",	0xBA},
+	{"TTE_PROCHOT",	0xBB},
+	{"TTE_SLM0",	0xBC},
+	{"TTE_SLM1",	0xBD},
+	{"BWTE",	0xBE},
+	{"TTE_SWT",	0xBF},
+	/* MSI Message Registers */
+	{"TMA",		0xC0},
+	{"TMD",		0xC1},
+};
+
+/* /sys/kernel/debug/soc_thermal/soc_dts */
+static struct dentry *soc_dts_dent;
+static struct dentry *soc_thermal_dir;
+
+static int soc_dts_debugfs_show(struct seq_file *s, void *unused)
+{
+	int i;
+	u32 val;
+
+	for (i = 0; i < ARRAY_SIZE(dts_regs); i++) {
+		val = read_soc_reg(dts_regs[i].addr);
+		seq_printf(s,
+			"%s[0x%X]	Val: 0x%X\n",
+			dts_regs[i].name, dts_regs[i].addr, val);
+	}
+	return 0;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, soc_dts_debugfs_show, NULL);
+}
+
+static const struct file_operations soc_dts_debugfs_fops = {
+	.open           = debugfs_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static void create_soc_dts_debugfs(void)
+{
+	int err;
+
+	/* /sys/kernel/debug/soc_thermal/ */
+	soc_thermal_dir = debugfs_create_dir("soc_thermal", NULL);
+	if (IS_ERR(soc_thermal_dir)) {
+		err = PTR_ERR(soc_thermal_dir);
+		pr_err("debugfs_create_dir failed:%d\n", err);
+		return;
+	}
+
+	/* /sys/kernel/debug/soc_thermal/soc_dts */
+	soc_dts_dent = debugfs_create_file("soc_dts", S_IFREG | S_IRUGO,
+					soc_thermal_dir, NULL,
+					&soc_dts_debugfs_fops);
+	if (IS_ERR(soc_dts_dent)) {
+		err = PTR_ERR(soc_dts_dent);
+		debugfs_remove_recursive(soc_thermal_dir);
+		pr_err("debugfs_create_file failed:%d\n", err);
+	}
+}
+
+static void remove_soc_dts_debugfs(void)
+{
+	debugfs_remove_recursive(soc_thermal_dir);
+}
+#else
+static inline void create_soc_dts_debugfs(void) { }
+static inline void remove_soc_dts_debugfs(void) { }
+#endif
+
+static
+struct cooling_device_info *initialize_cdev(struct platform_device *pdev)
+{
+	struct cooling_device_info *cdev_info =
+		kzalloc(sizeof(struct cooling_device_info), GFP_KERNEL);
+	if (!cdev_info)
+		return NULL;
+
+	cdev_info->soc_data = pdev->dev.platform_data;
+	mutex_init(&cdev_info->lock_state);
+	return cdev_info;
+}
+
+static struct thermal_device_info *initialize_sensor(int index)
+{
+	struct thermal_device_info *td_info =
+		kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+	if (!td_info)
+		return NULL;
+	td_info->sensor_index = index;
+	mutex_init(&td_info->lock_aux);
+
+	return td_info;
+}
+
+static void initialize_floor_reg_addr(void)
+{
+	struct cpuinfo_x86 *c = &cpu_data(0);
+
+	if (c->x86_model == 0x4a || c->x86_model == 0x5a)
+		turbo_floor_reg = 0xdf;
+	else
+		turbo_floor_reg = 0x2;
+}
+
+static void enable_soc_dts(void)
+{
+	int i;
+	u32 val, eax, edx;
+
+	rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+	/* B[8:10] H2C Hyst */
+	eax = (eax & ~(0x7 << 8)) | (DEFAULT_H2C_HYST << 8);
+
+	/* Set the Hysteresis value */
+	wrmsr_on_cpu(0, MSR_THERM_CFG1, eax, edx);
+
+	/* Enable CPU DTS averaging, set TM2 timeout */
+	rdmsr_on_cpu(0, MSR_THERM_CFG2, &eax, &edx);
+	eax &= ~0x2;
+	eax |= 9 << 12;
+	wrmsr_on_cpu(0, MSR_THERM_CFG2, eax, edx);
+
+	/* Enable the DTS */
+	write_soc_reg(DTS_ENABLE_REG, DTS_ENABLE);
+
+	val = read_soc_reg(SOC_DTS_CONTROL);
+	write_soc_reg(SOC_DTS_CONTROL, val | ENABLE_AUX_INTRPT | ENABLE_CPU0);
+
+	/* Enable Interrupts for all the AUX trips for the DTS */
+	for (i = 0; i < SOC_THERMAL_TRIPS; i++) {
+		val = read_soc_reg(TE_AUX0 + i);
+		write_soc_reg(TE_AUX0 + i, (val | RTE_ENABLE));
+	}
+}
+
+static int show_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long *hyst)
+{
+	u32 eax, edx;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	/* Hysteresis is only supported for trip point 0 */
+	if (trip != 0) {
+		*hyst = 0;
+		return 0;
+	}
+
+	mutex_lock(&td_info->lock_aux);
+
+	rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+	/* B[8:10] H2C Hyst, for trip 0. Report hysteresis in mC */
+	*hyst = ((eax >> 8) & 0x7) * 1000;
+
+	mutex_unlock(&td_info->lock_aux);
+	return 0;
+}
+
+static int store_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long hyst)
+{
+	u32 eax, edx;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	/* Convert from mC to C */
+	hyst /= 1000;
+
+	if (trip != 0 || hyst < 0 || hyst > MAX_HYST)
+		return -EINVAL;
+
+	mutex_lock(&td_info->lock_aux);
+
+	rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+	/* B[8:10] H2C Hyst */
+	eax = (eax & ~(0x7 << 8)) | (hyst << 8);
+
+	wrmsr_on_cpu(0, MSR_THERM_CFG1, eax, edx);
+
+	mutex_unlock(&td_info->lock_aux);
+	return 0;
+}
+
+static int show_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+	u32 val = read_soc_reg(PUNIT_TEMP_REG);
+
+	/* Extract bits[0:7] or [8:15] using sensor_index */
+	*temp =  (val >> (8 * td_info->sensor_index)) & 0xFF;
+
+	if (*temp == 0)
+		return 0;
+
+	/* Calibrate the temperature */
+	*temp = TJMAX_CODE - *temp + tjmax_temp;
+
+	/* Convert to mC */
+	*temp *= 1000;
+
+	return 0;
+}
+
+static int show_trip_type(struct thermal_zone_device *tzd,
+			int trip, enum thermal_trip_type *trip_type)
+{
+	/* All are passive trip points */
+	*trip_type = THERMAL_TRIP_PASSIVE;
+
+	return 0;
+}
+
+static int show_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long *trip_temp)
+{
+	u32 aux_value = read_soc_reg(PUNIT_AUX_REG);
+
+	/* aux0 b[0:7], aux1 b[8:15], aux2 b[16:23], aux3 b[24:31] */
+	*trip_temp = (aux_value >> (8 * trip)) & 0xFF;
+
+	/* Calibrate the trip point temperature */
+	*trip_temp = tjmax_temp - *trip_temp;
+
+	/* Convert to mC and report */
+	*trip_temp *= 1000;
+
+	return 0;
+}
+
+static int store_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long trip_temp)
+{
+	u32 aux_trip, aux = 0;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	/* Convert from mC to C */
+	trip_temp /= 1000;
+
+	/* The trip temp is 8 bits wide (unsigned) */
+	if (trip_temp > 255)
+		return -EINVAL;
+
+	/* Assign last byte to unsigned 32 */
+	aux_trip = trip_temp & 0xFF;
+
+	/* Calibrate w.r.t TJMAX_TEMP */
+	aux_trip = tjmax_temp - aux_trip;
+
+	mutex_lock(&td_info->lock_aux);
+	aux = read_soc_reg(PUNIT_AUX_REG);
+	switch (trip) {
+	case 0:
+		/* aux0 bits 0:7 */
+		aux = (aux & 0xFFFFFF00) | (aux_trip << (8 * trip));
+		break;
+	case 1:
+		/* aux1 bits 8:15 */
+		aux = (aux & 0xFFFF00FF) | (aux_trip << (8 * trip));
+		break;
+	}
+	write_soc_reg(PUNIT_AUX_REG, aux);
+
+	mutex_unlock(&td_info->lock_aux);
+
+	return 0;
+}
+
+/* SoC cooling device callbacks */
+static int soc_get_max_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	/* SoC has 4 levels of throttling from 0 to 3 */
+	*state = SOC_MAX_STATES - 1;
+	return 0;
+}
+
+static int soc_get_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	struct cooling_device_info *cdev_info =
+			(struct cooling_device_info *)cdev->devdata;
+
+	mutex_lock(&cdev_info->lock_state);
+	*state = cdev_info->soc_cur_state;
+	mutex_unlock(&cdev_info->lock_state);
+
+	return 0;
+}
+
+static void set_floor_freq(int val)
+{
+	u32 eax;
+
+	eax = read_soc_reg(turbo_floor_reg);
+
+	/* Do not modify if floor freq is controlled by PUNIT */
+	if (!((eax >> 25) & 0x1))
+		return;
+
+	/* Set bits[8:14] of eax to val */
+	eax = (eax & ~(0x7F << 8)) | (val << 8);
+
+	write_soc_reg(turbo_floor_reg, eax);
+}
+
+static int disable_dynamic_turbo(struct cooling_device_info *cdev_info)
+{
+	u32 eax, edx;
+
+	mutex_lock(&cdev_info->lock_state);
+
+	rdmsr_on_cpu(0, PKG_TURBO_CFG, &eax, &edx);
+
+	/* Set bits[0:2] to 0 to enable TjMax Turbo mode */
+	eax = eax & ~0x07;
+
+	/* Set bit[8] to 0 to disable Dynamic Turbo */
+	eax = eax & ~(1 << 8);
+
+	/* Set bits[9:11] to 0 disable Dynamic Turbo Policy */
+	eax = eax & ~(0x07 << 9);
+
+	wrmsr_on_cpu(0, PKG_TURBO_CFG, eax, edx);
+
+	/*
+	 * Now that we disabled Dynamic Turbo, we can
+	 * make the floor frequency ratio also 0.
+	 */
+	set_floor_freq(0);
+
+	cdev_info->soc_cur_state = DISABLE_DYNAMIC_TURBO;
+
+	mutex_unlock(&cdev_info->lock_state);
+	return 0;
+}
+
+static int soc_set_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long state)
+{
+	u32 eax, edx;
+	struct soc_throttle_data *data;
+	struct cooling_device_info *cdev_info =
+			(struct cooling_device_info *)cdev->devdata;
+
+	if (state == DISABLE_DYNAMIC_TURBO)
+		return disable_dynamic_turbo(cdev_info);
+
+	if (state >= SOC_MAX_STATES) {
+		pr_err("Invalid SoC throttle state:%ld\n", state);
+		return -EINVAL;
+	}
+
+	mutex_lock(&cdev_info->lock_state);
+
+	data = &cdev_info->soc_data[state];
+
+	rdmsr_on_cpu(0, PKG_TURBO_POWER_LIMIT, &eax, &edx);
+
+	/* Set bits[0:14] of eax to 'data->power_limit' */
+	eax = (eax & ~0x7FFF) | data->power_limit;
+
+	wrmsr_on_cpu(0, PKG_TURBO_POWER_LIMIT, eax, edx);
+
+	set_floor_freq(data->floor_freq);
+
+	cdev_info->soc_cur_state = state;
+
+	mutex_unlock(&cdev_info->lock_state);
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int soc_get_force_state_override(struct thermal_cooling_device *cdev,
+					char *buf)
+{
+	int i;
+	int pl1_vals_mw[SOC_MAX_STATES];
+	struct cooling_device_info *cdev_info =
+			(struct cooling_device_info *)cdev->devdata;
+
+	mutex_lock(&cdev_info->lock_state);
+
+	/* PKG_TURBO_PL1 holds PL1 in terms of 32mW. So, multiply by 32 */
+	for (i = 0; i < SOC_MAX_STATES; i++) {
+		pl1_vals_mw[i] =
+			cdev_info->soc_data[i].power_limit * PL_UNIT_MW;
+	}
+
+	mutex_unlock(&cdev_info->lock_state);
+
+	return sprintf(buf, "%d %d %d %d\n", pl1_vals_mw[0], pl1_vals_mw[1],
+					pl1_vals_mw[2], pl1_vals_mw[3]);
+}
+
+static int soc_set_force_state_override(struct thermal_cooling_device *cdev,
+					char *buf)
+{
+	int i, ret;
+	int pl1_vals_mw[SOC_MAX_STATES];
+	unsigned long cur_state;
+	struct cooling_device_info *cdev_info =
+				(struct cooling_device_info *)cdev->devdata;
+
+	/*
+	 * The four space separated values entered via the sysfs node
+	 * override the default values configured through platform data.
+	 */
+	ret = sscanf(buf, "%d %d %d %d", &pl1_vals_mw[0], &pl1_vals_mw[1],
+					&pl1_vals_mw[2], &pl1_vals_mw[3]);
+	if (ret != SOC_MAX_STATES) {
+		pr_err("Invalid values in soc_set_force_state_override\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&cdev_info->lock_state);
+
+	/* PKG_TURBO_PL1 takes PL1 in terms of 32mW. So, divide by 32 */
+	for (i = 0; i < SOC_MAX_STATES; i++) {
+		cdev_info->soc_data[i].power_limit =
+					pl1_vals_mw[i] / PL_UNIT_MW;
+	}
+
+	/* Update the cur_state value of this cooling device */
+	cur_state = cdev_info->soc_cur_state;
+
+	mutex_unlock(&cdev_info->lock_state);
+
+	return soc_set_cur_state(cdev, cur_state);
+}
+#endif
+
+static void notify_thermal_event(struct thermal_zone_device *tzd,
+				long temp, int event, int level)
+{
+	char *thermal_event[5];
+
+	pr_info("Thermal Event: sensor: %s, cur_temp: %ld, event: %d, level: %d\n",
+				tzd->type, temp, event, level);
+
+	thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=%s", tzd->type);
+	thermal_event[1] = kasprintf(GFP_KERNEL, "TEMP=%ld", temp);
+	thermal_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", event);
+	thermal_event[3] = kasprintf(GFP_KERNEL, "LEVEL=%d", level);
+	thermal_event[4] = NULL;
+
+	kobject_uevent_env(&tzd->device.kobj, KOBJ_CHANGE, thermal_event);
+
+	kfree(thermal_event[3]);
+	kfree(thermal_event[2]);
+	kfree(thermal_event[1]);
+	kfree(thermal_event[0]);
+
+	return;
+}
+
+static int get_max_temp(struct platform_soc_data *pdata, long *cur_temp)
+{
+	int i, ret;
+	long temp;
+
+	/*
+	 * The SoC has two or more DTS placed, to determine the
+	 * temperature of the SoC. The hardware actions are taken
+	 * using T(DTS) which is MAX(T(DTS0), T(DTS1), ... T(DTSn))
+	 *
+	 * Do not report error, as long as we can read at least
+	 * one DTS correctly.
+	 */
+	ret = show_temp(pdata->tzd[0], cur_temp);
+	if (ret)
+		return ret;
+
+	for (i = 1; i < SOC_THERMAL_SENSORS; i++) {
+		ret = show_temp(pdata->tzd[i], &temp);
+		if (ret)
+			goto fail_safe;
+
+		if (temp > *cur_temp)
+			*cur_temp = temp;
+	}
+
+fail_safe:
+	/*
+	 * We have one valid DTS temperature; Use that,
+	 * instead of reporting error.
+	 */
+	return 0;
+}
+
+static irqreturn_t soc_dts_intrpt(int irq, void *dev_data)
+{
+	u32 irq_sts, cur_sts;
+	int i, ret, event, level = -1;
+	long cur_temp;
+	struct thermal_zone_device *tzd;
+	struct platform_soc_data *pdata = (struct platform_soc_data *)dev_data;
+
+	if (!pdata || !pdata->tzd[0])
+		return IRQ_NONE;
+
+	mutex_lock(&thrm_update_lock);
+
+	tzd = pdata->tzd[0];
+
+	irq_sts = read_soc_reg(TRIP_STATUS_RW);
+	cur_sts = read_soc_reg(TRIP_STATUS_RO);
+
+	for (i = 0; i < SOC_THERMAL_TRIPS; i++) {
+		if (irq_sts & (1 << i)) {
+			level = i;
+			event = !!(cur_sts & (1 << i));
+			/* Clear the status bit by writing 1 */
+			irq_sts |= (1 << i);
+			break;
+		}
+	}
+
+	/* level == -1, indicates an invalid event */
+	if (level == -1) {
+		dev_err(&tzd->device, "Invalid event from SoC DTS\n");
+		goto exit;
+	}
+
+	ret = get_max_temp(pdata, &cur_temp);
+	if (ret) {
+		dev_err(&tzd->device, "Cannot read SoC DTS temperature\n");
+		goto exit;
+	}
+
+	/* Notify using UEvent */
+	notify_thermal_event(tzd, cur_temp, event, level);
+
+	/* Clear the status bits */
+	write_soc_reg(TRIP_STATUS_RW, irq_sts);
+
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return IRQ_HANDLED;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+	.get_temp = show_temp,
+	.get_trip_type = show_trip_type,
+	.get_trip_temp = show_trip_temp,
+	.set_trip_temp = store_trip_temp,
+	.get_trip_hyst = show_trip_hyst,
+	.set_trip_hyst = store_trip_hyst,
+};
+
+static struct thermal_cooling_device_ops soc_cooling_ops = {
+	.get_max_state = soc_get_max_state,
+	.get_cur_state = soc_get_cur_state,
+	.set_cur_state = soc_set_cur_state,
+#ifdef CONFIG_DEBUG_THERMAL
+	.get_force_state_override = soc_get_force_state_override,
+	.set_force_state_override = soc_set_force_state_override,
+#endif
+};
+
+/*********************************************************************
+ *		Driver initialization and finalization
+ *********************************************************************/
+
+static irqreturn_t soc_dts_intrpt_handler(int irq, void *dev_data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int soc_thermal_probe(struct platform_device *pdev)
+{
+	struct platform_soc_data *pdata;
+	int i, ret;
+	u32 eax, edx;
+	static char *name[SOC_THERMAL_SENSORS] = {"SoC_DTS0", "SoC_DTS1"};
+
+	/*
+	 * Register to configure floor frequency for DT done
+	 * using shadow register for ANN and TNG, Register address
+	 * chosen based on cpu model.[Refer:HSD:4380040]
+	 */
+	initialize_floor_reg_addr();
+
+	pdata = kzalloc(sizeof(struct platform_soc_data), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = rdmsr_safe_on_cpu(0, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+	if (ret) {
+		tjmax_temp = TJMAX_TEMP;
+		dev_err(&pdev->dev, "TjMax read from MSR %x failed, error:%d\n",
+				MSR_IA32_TEMPERATURE_TARGET, ret);
+	} else {
+		tjmax_temp = (eax >> 16) & 0xff;
+		dev_dbg(&pdev->dev, "TjMax is %d degrees C\n", tjmax_temp);
+	}
+
+	/* Register each sensor with the generic thermal framework */
+	for (i = 0; i < SOC_THERMAL_SENSORS; i++) {
+		pdata->tzd[i] = thermal_zone_device_register(name[i],
+					SOC_THERMAL_TRIPS, DTS_TRIP_RW,
+					initialize_sensor(i),
+					&tzd_ops, NULL, 0, 0);
+		if (IS_ERR(pdata->tzd[i])) {
+			ret = PTR_ERR(pdata->tzd[i]);
+			dev_err(&pdev->dev, "tzd register failed: %d\n", ret);
+			goto exit_reg;
+		}
+	}
+
+	/* Register a cooling device for PL1 (power limit) control */
+	pdata->soc_cdev = thermal_cooling_device_register("SoC",
+						initialize_cdev(pdev),
+						&soc_cooling_ops);
+	if (IS_ERR(pdata->soc_cdev)) {
+		ret = PTR_ERR(pdata->soc_cdev);
+		pdata->soc_cdev = NULL;
+		goto exit_reg;
+	}
+
+	platform_set_drvdata(pdev, pdata);
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "platform_get_irq failed:%d\n", ret);
+		goto exit_cdev;
+	}
+
+	pdata->irq = ret;
+
+	/* Register for Interrupt Handler */
+	ret = request_threaded_irq(pdata->irq, soc_dts_intrpt_handler,
+						soc_dts_intrpt,
+						IRQF_TRIGGER_RISING,
+						DRIVER_NAME, pdata);
+	if (ret) {
+		dev_err(&pdev->dev, "request_threaded_irq failed:%d\n", ret);
+		goto exit_cdev;
+	}
+
+	/* Enable DTS0 and DTS1 */
+	enable_soc_dts();
+
+	create_soc_dts_debugfs();
+
+	return 0;
+
+exit_cdev:
+	thermal_cooling_device_unregister(pdata->soc_cdev);
+exit_reg:
+	while (--i >= 0) {
+		struct thermal_device_info *td_info = pdata->tzd[i]->devdata;
+		kfree(td_info);
+		thermal_zone_device_unregister(pdata->tzd[i]);
+	}
+	platform_set_drvdata(pdev, NULL);
+	kfree(pdata);
+	return ret;
+}
+
+static int soc_thermal_remove(struct platform_device *pdev)
+{
+	int i;
+	struct platform_soc_data *pdata = platform_get_drvdata(pdev);
+
+	/* Unregister each sensor with the generic thermal framework */
+	for (i = 0; i < SOC_THERMAL_SENSORS; i++) {
+		struct thermal_device_info *td_info = pdata->tzd[i]->devdata;
+		kfree(td_info);
+		thermal_zone_device_unregister(pdata->tzd[i]);
+	}
+	thermal_cooling_device_unregister(pdata->soc_cdev);
+	platform_set_drvdata(pdev, NULL);
+	free_irq(pdata->irq, pdata);
+	kfree(pdata);
+
+	remove_soc_dts_debugfs();
+
+	return 0;
+}
+
+static const struct platform_device_id therm_id_table[] = {
+	{ DRIVER_NAME, 1},
+};
+
+static struct platform_driver soc_thermal_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DRIVER_NAME,
+	},
+	.probe = soc_thermal_probe,
+	.remove = soc_thermal_remove,
+	.id_table = therm_id_table,
+};
+
+static int __init soc_thermal_module_init(void)
+{
+	return platform_driver_register(&soc_thermal_driver);
+}
+
+static void __exit soc_thermal_module_exit(void)
+{
+	platform_driver_unregister(&soc_thermal_driver);
+}
+
+module_init(soc_thermal_module_init);
+module_exit(soc_thermal_module_exit);
+
+MODULE_AUTHOR("Shravan B M <shravan.k.b.m@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Thermal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d755440..fc7ac1c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -563,7 +563,7 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	unsigned long temperature;
+	long temperature;
 
 	if (!tz->ops->set_trip_temp)
 		return -EPERM;
@@ -707,6 +707,82 @@
 }
 
 static ssize_t
+slope_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	int ret;
+	long slope;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->set_slope)
+		return -EPERM;
+
+	if (kstrtol(buf, 10, &slope))
+		return -EINVAL;
+
+	ret = tz->ops->set_slope(tz, slope);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+slope_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret;
+	long slope;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->get_slope)
+		return -EINVAL;
+
+	ret = tz->ops->get_slope(tz, &slope);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%ld\n", slope);
+}
+
+static ssize_t
+intercept_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	int ret;
+	long intercept;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->set_intercept)
+		return -EPERM;
+
+	if (kstrtol(buf, 10, &intercept))
+		return -EINVAL;
+
+	ret = tz->ops->set_intercept(tz, intercept);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+intercept_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret;
+	long intercept;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->get_intercept)
+		return -EINVAL;
+
+	ret = tz->ops->get_intercept(tz, &intercept);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%ld\n", intercept);
+}
+
+static ssize_t
 policy_store(struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
 {
@@ -765,6 +841,9 @@
 static DEVICE_ATTR(temp, 0444, temp_show, NULL);
 static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
 static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
+static DEVICE_ATTR(slope, S_IRUGO | S_IWUSR, slope_show, slope_store);
+static DEVICE_ATTR(intercept,
+		S_IRUGO | S_IWUSR, intercept_show, intercept_store);
 static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
 
 /* sys I/F for cooling device */
@@ -794,6 +873,43 @@
 	return sprintf(buf, "%ld\n", state);
 }
 
+/*
+ * Sysfs to read the mapped values and to override
+ * the default values mapped to each state during runtime.
+ */
+static ssize_t
+thermal_cooling_device_force_state_override_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+	return cdev->ops->get_force_state_override(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_force_state_override_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int ret;
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+	ret = cdev->ops->set_force_state_override(cdev, (char *) buf);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+thermal_cooling_device_available_states_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+	return cdev->ops->get_available_states(cdev, buf);
+}
+
 static ssize_t
 thermal_cooling_device_cur_state_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
@@ -836,6 +952,11 @@
 static DEVICE_ATTR(cur_state, 0644,
 		   thermal_cooling_device_cur_state_show,
 		   thermal_cooling_device_cur_state_store);
+static DEVICE_ATTR(force_state_override, 0644,
+		thermal_cooling_device_force_state_override_show,
+		thermal_cooling_device_force_state_override_store);
+static DEVICE_ATTR(available_states, 0444,
+		thermal_cooling_device_available_states_show, NULL);
 
 static ssize_t
 thermal_cooling_device_trip_point_show(struct device *dev,
@@ -1017,7 +1138,7 @@
 		goto free_temp_mem;
 
 	if (tz->ops->get_crit_temp) {
-		unsigned long temperature;
+		long temperature;
 		if (!tz->ops->get_crit_temp(tz, &temperature)) {
 			snprintf(temp->temp_crit.name,
 				 sizeof(temp->temp_crit.name),
@@ -1184,7 +1305,8 @@
 	if (result)
 		goto release_idr;
 
-	sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
+	snprintf(dev->attr_name, THERMAL_NAME_LENGTH, "cdev%d_trip_point",
+								dev->id);
 	sysfs_attr_init(&dev->attr.attr);
 	dev->attr.attr.name = dev->attr_name;
 	dev->attr.attr.mode = 0444;
@@ -1306,7 +1428,7 @@
 	struct thermal_cooling_device *cdev;
 	int result;
 
-	if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+	if (!type || strlen(type) >= THERMAL_NAME_LENGTH)
 		return ERR_PTR(-EINVAL);
 
 	if (!ops || !ops->get_max_state || !ops->get_cur_state ||
@@ -1339,21 +1461,32 @@
 	}
 
 	/* sys I/F */
-	if (type) {
-		result = device_create_file(&cdev->device, &dev_attr_cdev_type);
-		if (result)
-			goto unregister;
-	}
+	result = device_create_file(&cdev->device, &dev_attr_cdev_type);
+	if (result)
+		goto unregister;
 
 	result = device_create_file(&cdev->device, &dev_attr_max_state);
 	if (result)
-		goto unregister;
+		goto remove_type;
 
 	result = device_create_file(&cdev->device, &dev_attr_cur_state);
 	if (result)
-		goto unregister;
+		goto remove_max_state;
+
+	if (ops->get_force_state_override) {
+		result = device_create_file(&cdev->device,
+					&dev_attr_force_state_override);
+		if (result)
+			goto remove_cur_state;
+	}
 
 	/* Add 'this' new cdev to the global cdev list */
+	if (ops->get_available_states) {
+		result = device_create_file(&cdev->device,
+						&dev_attr_available_states);
+		if (result)
+			goto remove_force_override;
+	}
 	mutex_lock(&thermal_list_lock);
 	list_add(&cdev->node, &thermal_cdev_list);
 	mutex_unlock(&thermal_list_lock);
@@ -1363,6 +1496,16 @@
 
 	return cdev;
 
+remove_force_override:
+	if (cdev->ops->get_force_state_override)
+		device_remove_file(&cdev->device,
+				&dev_attr_force_state_override);
+remove_cur_state:
+	device_remove_file(&cdev->device, &dev_attr_cur_state);
+remove_max_state:
+	device_remove_file(&cdev->device, &dev_attr_max_state);
+remove_type:
+	device_remove_file(&cdev->device, &dev_attr_cdev_type);
 unregister:
 	release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
 	device_unregister(&cdev->device);
@@ -1419,11 +1562,15 @@
 
 	mutex_unlock(&thermal_list_lock);
 
-	if (cdev->type[0])
-		device_remove_file(&cdev->device, &dev_attr_cdev_type);
+	device_remove_file(&cdev->device, &dev_attr_cdev_type);
 	device_remove_file(&cdev->device, &dev_attr_max_state);
 	device_remove_file(&cdev->device, &dev_attr_cur_state);
-
+	if (cdev->ops->get_force_state_override)
+		device_remove_file(&cdev->device,
+					&dev_attr_force_state_override);
+	if (cdev->ops->get_available_states)
+		device_remove_file(&cdev->device,
+					&dev_attr_available_states);
 	release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
 	device_unregister(&cdev->device);
 	return;
@@ -1615,7 +1762,7 @@
 	int count;
 	int passive = 0;
 
-	if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+	if (!type || strlen(type) >= THERMAL_NAME_LENGTH)
 		return ERR_PTR(-EINVAL);
 
 	if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
@@ -1658,11 +1805,9 @@
 	}
 
 	/* sys I/F */
-	if (type) {
-		result = device_create_file(&tz->device, &dev_attr_type);
-		if (result)
-			goto unregister;
-	}
+	result = device_create_file(&tz->device, &dev_attr_type);
+	if (result)
+		goto unregister;
 
 	result = device_create_file(&tz->device, &dev_attr_temp);
 	if (result)
@@ -1690,10 +1835,25 @@
 			goto unregister;
 	}
 
+	/* Create Sysfs for slope/intercept values */
+	if (tz->ops->get_slope) {
+		result = device_create_file(&tz->device, &dev_attr_slope);
+		if (result)
+			goto unregister;
+	}
+
+	if (tz->ops->get_intercept) {
+		result = device_create_file(&tz->device, &dev_attr_intercept);
+		if (result)
+			goto unregister;
+	}
+
 #ifdef CONFIG_THERMAL_EMULATION
-	result = device_create_file(&tz->device, &dev_attr_emul_temp);
-	if (result)
-		goto unregister;
+	if (tz->ops->set_emul_temp) {
+		result = device_create_file(&tz->device, &dev_attr_emul_temp);
+		if (result)
+			goto unregister;
+	}
 #endif
 	/* Create policy attribute */
 	result = device_create_file(&tz->device, &dev_attr_policy);
@@ -1723,7 +1883,13 @@
 
 	INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
 
-	thermal_zone_device_update(tz);
+	/*
+	 * Emulation temperature may need user land to provide
+	 * temperature data. In that case, do not try to update
+	 * this 'tzd' during registration.
+	 */
+	if (!tz->ops->set_emul_temp)
+		thermal_zone_device_update(tz);
 
 	if (!result)
 		return tz;
@@ -1784,11 +1950,19 @@
 
 	thermal_zone_device_set_polling(tz, 0);
 
-	if (tz->type[0])
-		device_remove_file(&tz->device, &dev_attr_type);
+	device_remove_file(&tz->device, &dev_attr_type);
 	device_remove_file(&tz->device, &dev_attr_temp);
 	if (tz->ops->get_mode)
 		device_remove_file(&tz->device, &dev_attr_mode);
+	if (tz->ops->get_slope)
+		device_remove_file(&tz->device, &dev_attr_slope);
+	if (tz->ops->get_intercept)
+		device_remove_file(&tz->device, &dev_attr_intercept);
+#ifdef CONFIG_THERMAL_EMULATION
+	if (tz->ops->set_emul_temp)
+		device_remove_file(&tz->device, &dev_attr_emul_temp);
+#endif
+
 	device_remove_file(&tz->device, &dev_attr_policy);
 	remove_trip_attrs(tz);
 	tz->governor = NULL;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 682210d..4fc32c8 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -636,6 +636,7 @@
 	.name		= "xenboot",
 	.write		= xenboot_write_console,
 	.flags		= CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
+	.index		= -1,
 };
 #endif	/* CONFIG_EARLY_PRINTK */
 
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 3396eb9..ac27671 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -341,8 +341,8 @@
 
 	pr_devel("HVSI@%x:   ... waiting handshake\n", pv->termno);
 
-	/* Try for up to 200s */
-	for (timeout = 0; timeout < 20; timeout++) {
+	/* Try for up to 400ms */
+	for (timeout = 0; timeout < 40; timeout++) {
 		if (pv->established)
 			goto established;
 		if (!hvsi_get_packet(pv))
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 6422390..a5a2be5 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -42,6 +42,8 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
 #include <linux/ctype.h>
 #include <linux/mm.h>
 #include <linux/string.h>
@@ -66,11 +68,17 @@
 static int debug;
 module_param(debug, int, 0600);
 
-/* Defaults: these are from the specification */
+#define GSMDBG_VERBOSE_PACKET_REPORT(x) ((x) &  1)
+#define GSMDBG_FORCE_CARRIER(x)         ((x) &  2)
+#define GSMDBG_DATA_FULL_REPORT(x)      ((x) &  4)
+#define GSMDBG_DLCI_STREAM_REPORT(x)    ((x) &  8)
+#define GSMDBG_DLCI_DATA_REPORT(x)      ((x) & 16)
+#define GSMDBG_DATA_LEN_REPORT(x)       ((x) & 32)
 
-#define T1	10		/* 100mS */
-#define T2	34		/* 333mS */
-#define N2	3		/* Retry 3 times */
+/* unit is 1/100 second according to 27.010 spec */
+#define T1	254
+#define T2	255
+#define N2	3
 
 /* Use long timers for testing at low speed with debug on */
 #ifdef DEBUG_TIMING
@@ -78,14 +86,12 @@
 #define T2	200
 #endif
 
-/*
- * Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte
- * limits so this is plenty
- */
-#define MAX_MRU 1500
-#define MAX_MTU 1500
+#define MAX_MRU 32768 /* In specification 3GPP TS 27.010, 5.7.2 */
+#define MAX_MTU 32768 /* In specification 3GPP TS 27.010, 5.7.2 */
 #define	GSM_NET_TX_TIMEOUT (HZ*10)
 
+#define TX_SIZE		4096
+
 /**
  *	struct gsm_mux_net	-	network interface
  *	@struct gsm_dlci* dlci
@@ -118,12 +124,7 @@
 
 /*
  *	Each active data link has a gsm_dlci structure associated which ties
- *	the link layer to an optional tty (if the tty side is open). To avoid
- *	complexity right now these are only ever freed up when the mux is
- *	shut down.
- *
- *	At the moment we don't free DLCI objects until the mux is torn down
- *	this avoid object life time issues but might be worth review later.
+ *	the link layer to an optional tty (if the tty side is open).
  */
 
 struct gsm_dlci {
@@ -134,6 +135,11 @@
 #define DLCI_OPENING		1	/* Sending SABM not seen UA */
 #define DLCI_OPEN		2	/* SABM/UA complete */
 #define DLCI_CLOSING		3	/* Sending DISC not seen UA/DM */
+#define DLCI_HANGUP		4	/* HANGUP received  */
+	struct kref ref;		/* freed from port or mux close */
+
+	spinlock_t gsmtty_lock;		/* Process multiple open of gsmtty */
+	int gsmtty_count;
 	struct mutex mutex;
 
 	/* Link layer */
@@ -146,15 +152,19 @@
 	struct kfifo _fifo;	/* For new fifo API porting only */
 	int adaption;		/* Adaption layer in use */
 	int prev_adaption;
+	struct mutex rx_mutex;	/* Mutex when adaption change */
 	u32 modem_rx;		/* Our incoming virtual modem lines */
 	u32 modem_tx;		/* Our outgoing modem lines */
 	int dead;		/* Refuse re-open */
 	/* Flow control */
 	int throttled;		/* Private copy of throttle state */
 	int constipated;	/* Throttle status for outgoing */
+	int need_tty_wakeup;	/* If wakeup of TTY is needed */
 	/* Packetised I/O */
 	struct sk_buff *skb;	/* Frame being sent */
 	struct sk_buff_head skb_list;	/* Queued frames */
+#define TXLIST_THRESH_HI		8192
+#define TXLIST_THRESH_LO		2048
 	/* Data handling callback */
 	void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
 	void (*prev_data)(struct gsm_dlci *dlci, u8 *data, int len);
@@ -194,6 +204,7 @@
 struct gsm_mux {
 	struct tty_struct *tty;		/* The tty our ldisc is bound to */
 	spinlock_t lock;
+	struct mutex mutex;
 	unsigned int num;
 	struct kref ref;
 
@@ -237,6 +248,7 @@
 	unsigned int mtu;
 	int initiator;			/* Did we initiate connection */
 	int dead;			/* Has the mux been shut down */
+	int tty_dead;
 	struct gsm_dlci *dlci[NUM_DLCI];
 	int constipated;		/* Asked by remote to shut up */
 
@@ -257,6 +269,8 @@
 	u8 ftype;		/* UI or UIH */
 	int t1, t2;		/* Timers in 1/100th of a sec */
 	int n2;			/* Retry count */
+	int clocal;		/* CLOCAL default state */
+	int burst;		/* Burst mode support */
 
 	/* Statistics (not currently exposed) */
 	unsigned long bad_fcs;
@@ -460,7 +474,7 @@
 static void gsm_print_packet(const char *hdr, int addr, int cr,
 					u8 control, const u8 *data, int dlen)
 {
-	if (!(debug & 1))
+	if (!GSMDBG_VERBOSE_PACKET_REPORT(debug))
 		return;
 
 	pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
@@ -488,7 +502,8 @@
 		if (!(control & 0x01)) {
 			pr_cont("I N(S)%d N(R)%d",
 				(control & 0x0E) >> 1, (control & 0xE0) >> 5);
-		} else switch (control & 0x0F) {
+		} else
+			switch (control & 0x0F) {
 			case RR:
 				pr_cont("RR(%d)", (control & 0xE0) >> 5);
 				break;
@@ -500,7 +515,7 @@
 				break;
 			default:
 				pr_cont("[%02X]", control);
-		}
+			}
 	}
 
 	if (control & PF)
@@ -657,6 +672,7 @@
 								GFP_ATOMIC);
 	if (m == NULL)
 		return NULL;
+
 	m->data = m->buffer + HDR_LEN - 1;	/* Allow for FCS */
 	m->len = len;
 	m->addr = addr;
@@ -699,7 +715,7 @@
 			len = msg->len + 2;
 		}
 
-		if (debug & 4)
+		if (GSMDBG_DATA_FULL_REPORT(debug))
 			print_hex_dump_bytes("gsm_data_kick: ",
 					     DUMP_PREFIX_OFFSET,
 					     gsm->txframe, len);
@@ -711,7 +727,8 @@
 		gsm->tx_bytes -= msg->len;
 		/* For a burst of frames skip the extra SOF within the
 		   burst */
-		skip_sof = 1;
+		if (gsm->burst)
+			skip_sof = 1;
 
 		list_del(&msg->list);
 		kfree(msg);
@@ -803,41 +820,38 @@
 {
 	struct gsm_msg *msg;
 	u8 *dp;
-	int len, total_size, size;
+	int len, size;
 	int h = dlci->adaption - 1;
 
-	total_size = 0;
-	while(1) {
-		len = kfifo_len(dlci->fifo);
-		if (len == 0)
-			return total_size;
+	len = kfifo_len(dlci->fifo);
+	if (len == 0)
+		return 0;
 
-		/* MTU/MRU count only the data bits */
-		if (len > gsm->mtu)
-			len = gsm->mtu;
+	/* MTU/MRU count only the data bits */
+	if (len > gsm->mtu)
+		len = gsm->mtu;
 
-		size = len + h;
+	size = len + h;
 
-		msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
-		/* FIXME: need a timer or something to kick this so it can't
-		   get stuck with no work outstanding and no buffer free */
-		if (msg == NULL)
-			return -ENOMEM;
-		dp = msg->data;
-		switch (dlci->adaption) {
-		case 1:	/* Unstructured */
-			break;
-		case 2:	/* Unstructed with modem bits. Always one byte as we never
-			   send inline break data */
-			*dp++ = gsm_encode_modem(dlci);
-			break;
-		}
-		WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
-		__gsm_data_queue(dlci, msg);
-		total_size += size;
+	msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+	/* FIXME: need a timer or something to kick this so it can't
+	   get stuck with no work outstanding and no buffer free */
+	if (msg == NULL)
+		return -ENOMEM;
+	dp = msg->data;
+	switch (dlci->adaption) {
+	case 1:	/* Unstructured */
+		break;
+	case 2:	/* Unstructed with modem bits. Always one byte as we never
+		   send inline break data */
+		*dp += gsm_encode_modem(dlci);
+		len--;
+		break;
 	}
+	WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+	__gsm_data_queue(dlci, msg);
 	/* Bytes of data we used up */
-	return total_size;
+	return size;
 }
 
 /**
@@ -870,6 +884,8 @@
 		dlci->skb = skb_dequeue_tail(&dlci->skb_list);
 		if (dlci->skb == NULL)
 			return 0;
+		if (skb_queue_len(&dlci->skb_list) < TXLIST_THRESH_LO)
+			netif_wake_queue(dlci->net);
 		first = 1;
 	}
 	len = dlci->skb->len + overhead;
@@ -948,6 +964,10 @@
 			len = gsm_dlci_data_output_framed(gsm, dlci);
 		if (len < 0)
 			break;
+		if (dlci->need_tty_wakeup) {
+			tty_port_tty_wakeup(&dlci->port);
+			dlci->need_tty_wakeup = 0;
+		}
 		/* DLCI empty - try the next */
 		if (len == 0)
 			i++;
@@ -968,10 +988,12 @@
 	unsigned long flags;
 	int sweep;
 
-	if (dlci->constipated) 
+	if (dlci->constipated)
 		return;
 
 	spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+	if (unlikely(dlci->gsm->dead))
+		goto out;
 	/* If we have nothing running then we need to fire up */
 	sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
 	if (dlci->gsm->tx_bytes == 0) {
@@ -981,7 +1003,8 @@
 			gsm_dlci_data_output(dlci->gsm, dlci);
 	}
 	if (sweep)
- 		gsm_dlci_data_sweep(dlci->gsm);
+		gsm_dlci_data_sweep(dlci->gsm);
+out:
 	spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
 }
 
@@ -1138,7 +1161,7 @@
 static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
 {
 	struct tty_port *port;
-	unsigned int addr = 0 ;
+	unsigned int addr = 0;
 	u8 bits;
 	int len = clen;
 	u8 *dp = data;
@@ -1212,16 +1235,20 @@
 		break;
 	case CMD_FCON:
 		/* Modem can accept data again */
-		gsm->constipated = 0;
 		gsm_control_reply(gsm, CMD_FCON, NULL, 0);
 		/* Kick the link in case it is idling */
 		spin_lock_irqsave(&gsm->tx_lock, flags);
+		gsm->constipated = 0;
 		gsm_data_kick(gsm);
+		if (gsm->tx_bytes < TX_THRESH_LO)
+			gsm_dlci_data_sweep(gsm);
 		spin_unlock_irqrestore(&gsm->tx_lock, flags);
 		break;
 	case CMD_FCOFF:
 		/* Modem wants us to STFU */
+		spin_lock_irqsave(&gsm->tx_lock, flags);
 		gsm->constipated = 1;
+		spin_unlock_irqrestore(&gsm->tx_lock, flags);
 		gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
 		break;
 	case CMD_MSC:
@@ -1358,12 +1385,17 @@
 	if (ctrl == NULL)
 		return NULL;
 retry:
-	wait_event(gsm->event, gsm->pending_cmd == NULL);
+	wait_event(gsm->event, gsm->pending_cmd == NULL || gsm->dead);
 	spin_lock_irqsave(&gsm->control_lock, flags);
-	if (gsm->pending_cmd != NULL) {
+	if ((gsm->pending_cmd != NULL) && !gsm->dead) {
 		spin_unlock_irqrestore(&gsm->control_lock, flags);
 		goto retry;
 	}
+	if (gsm->dead) {
+		spin_unlock_irqrestore(&gsm->control_lock, flags);
+		kfree(ctrl);
+		return NULL;
+	}
 	ctrl->cmd = command;
 	ctrl->data = data;
 	ctrl->len = clen;
@@ -1387,8 +1419,18 @@
 
 static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
 {
+	unsigned long flags;
 	int err;
-	wait_event(gsm->event, control->done == 1);
+	wait_event(gsm->event, control->done == 1 || gsm->dead);
+	if (gsm->dead) {
+		spin_lock_irqsave(&gsm->control_lock, flags);
+		if (control == gsm->pending_cmd) {
+			del_timer(&gsm->t2_timer);
+			gsm->pending_cmd = NULL;
+		}
+		control->error = -ETIMEDOUT;
+		spin_unlock_irqrestore(&gsm->control_lock, flags);
+	}
 	err = control->error;
 	kfree(control);
 	return err;
@@ -1414,7 +1456,7 @@
 static void gsm_dlci_close(struct gsm_dlci *dlci)
 {
 	del_timer(&dlci->t1);
-	if (debug & 8)
+	if (GSMDBG_DLCI_STREAM_REPORT(debug))
 		pr_debug("DLCI %d goes closed.\n", dlci->addr);
 	dlci->state = DLCI_CLOSED;
 	if (dlci->addr != 0) {
@@ -1441,7 +1483,7 @@
 	del_timer(&dlci->t1);
 	/* This will let a tty open continue */
 	dlci->state = DLCI_OPEN;
-	if (debug & 8)
+	if (GSMDBG_DLCI_STREAM_REPORT(debug))
 		pr_debug("DLCI %d goes open.\n", dlci->addr);
 	wake_up(&dlci->gsm->event);
 }
@@ -1517,7 +1559,8 @@
 static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
 {
 	struct gsm_mux *gsm = dlci->gsm;
-	if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
+	if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING ||
+	    dlci->state == DLCI_HANGUP)
 		return;
 	dlci->retries = gsm->n2;
 	dlci->state = DLCI_CLOSING;
@@ -1544,33 +1587,33 @@
 	unsigned int modem = 0;
 	int len = clen;
 
-	if (debug & 16)
-		pr_debug("%d bytes for tty\n", len);
-	switch (dlci->adaption)  {
-	/* Unsupported types */
-	/* Packetised interruptible data */
-	case 4:
-		break;
-	/* Packetised uininterruptible voice/data */
-	case 3:
-		break;
-	/* Asynchronous serial with line state in each frame */
-	case 2:
-		while (gsm_read_ea(&modem, *data++) == 0) {
-			len--;
-			if (len == 0)
-				return;
-		}
-		tty = tty_port_tty_get(port);
-		if (tty) {
-			gsm_process_modem(tty, dlci, modem, clen);
-			tty_kref_put(tty);
-		}
-	/* Line state will go via DLCI 0 controls only */
-	case 1:
-	default:
-		tty_insert_flip_string(port, data, len);
-		tty_flip_buffer_push(port);
+	if (GSMDBG_DLCI_DATA_REPORT(debug))
+		pr_debug("%s: %d bytes for tty\n", __func__, len);
+		switch (dlci->adaption)  {
+		/* Unsupported types */
+		/* Packetised interruptible data */
+		case 4:
+			break;
+		/* Packetised uininterruptible voice/data */
+		case 3:
+			break;
+		/* Asynchronous serial with line state in each frame */
+		case 2:
+			while (gsm_read_ea(&modem, *data++) == 0) {
+				len--;
+				if (len == 0)
+					return;
+			}
+			tty = tty_port_tty_get(port);
+			if (tty) {
+				gsm_process_modem(tty, dlci, modem, clen);
+				tty_kref_put(tty);
+			}
+		/* Line state will go via DLCI 0 controls only */
+		case 1:
+		default:
+			tty_insert_flip_string(port, data, len);
+			tty_flip_buffer_push(port);
 	}
 }
 
@@ -1629,10 +1672,13 @@
 	struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
 	if (dlci == NULL)
 		return NULL;
+	mutex_init(&dlci->rx_mutex);
 	spin_lock_init(&dlci->lock);
+	spin_lock_init(&dlci->gsmtty_lock);
+	kref_init(&dlci->ref);
 	mutex_init(&dlci->mutex);
 	dlci->fifo = &dlci->_fifo;
-	if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+	if (kfifo_alloc(&dlci->_fifo, TX_SIZE, GFP_KERNEL) < 0) {
 		kfree(dlci);
 		return NULL;
 	}
@@ -1647,6 +1693,7 @@
 	dlci->addr = addr;
 	dlci->adaption = gsm->adaption;
 	dlci->state = DLCI_CLOSED;
+	dlci->net = NULL;	/* network not initially created */
 	if (addr)
 		dlci->data = gsm_dlci_data;
 	else
@@ -1704,11 +1751,7 @@
 		gsm_destroy_network(dlci);
 		mutex_unlock(&dlci->mutex);
 
-		/* tty_vhangup needs the tty_lock, so unlock and
-		   relock after doing the hangup. */
-		tty_unlock(tty);
-		tty_vhangup(tty);
-		tty_lock(tty);
+		tty_hangup(tty);
 		tty_port_tty_set(&dlci->port, NULL);
 		tty_kref_put(tty);
 	}
@@ -1740,15 +1783,16 @@
 
 	if ((gsm->control & ~PF) == UI)
 		gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
-	if (gsm->encoding == 0){
-		/* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
-		            In this case it contain the last piece of data
-		            required to generate final CRC */
+	if (gsm->encoding == 0) {
+		/* WARNING: gsm->received_fcs is used for
+		   gsm->encoding = 0 only.
+		   In this case it contain the last piece of data
+		   required to generate final CRC */
 		gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
 	}
 	if (gsm->fcs != GOOD_FCS) {
 		gsm->bad_fcs++;
-		if (debug & 4)
+		if (GSMDBG_DATA_FULL_REPORT(debug))
 			pr_debug("BAD FCS %02x\n", gsm->fcs);
 		return;
 	}
@@ -1822,7 +1866,10 @@
 			gsm_command(gsm, address, DM|PF);
 			return;
 		}
+		/* We must prevent from changing adaption while receiving */
+		mutex_lock(&dlci->rx_mutex);
 		dlci->data(dlci, gsm->buf, gsm->len);
+		mutex_unlock(&dlci->rx_mutex);
 		break;
 	default:
 		goto invalid;
@@ -1912,6 +1959,7 @@
 			gsm->state = GSM_SEARCH;
 			break;
 		}
+		pr_debug("wait for GSM0_SOF, while got 0x%x\n", (u32)c);
 		break;
 	}
 }
@@ -2009,6 +2057,38 @@
 	gsm->io_error++;
 }
 
+
+void gsm_closeall_dlci(struct gsm_mux *gsm)
+{
+	int i;
+	int t;
+	struct gsm_dlci *dlci;
+
+	/* Free up any link layer users */
+	for (i = NUM_DLCI-1; i >= 0; i--) {
+		dlci = gsm->dlci[i];
+		if (dlci) {
+			if (i != 0)
+				gsm_dlci_begin_close(dlci);
+			else {
+				dlci->dead = 1;
+				gsm_dlci_begin_close(dlci);
+				if (dlci->state == DLCI_HANGUP)
+					goto close_this_dlci;
+				t = wait_event_timeout(gsm->event,
+					   dlci->state == DLCI_CLOSED,
+					   gsm->t2 * HZ / 100);
+				if (!t) {
+					pr_info("%s: timeout dlci0 close",
+						__func__);
+close_this_dlci:
+					gsm_dlci_close(dlci);
+				}
+			}
+		}
+	}
+}
+
 /**
  *	gsm_cleanup_mux		-	generic GSM protocol cleanup
  *	@gsm: our mux
@@ -2016,50 +2096,42 @@
  *	Clean up the bits of the mux which are the same for all framing
  *	protocols. Remove the mux from the mux table, stop all the timers
  *	and then shut down each device hanging up the channels as we go.
+ *
+ *	RRG: FIXME: need to validate if starting close of other
+ *		dlci channels is really needed or can we revert to
+ *		upstream code. Need full testing cycle.
  */
 
 void gsm_cleanup_mux(struct gsm_mux *gsm)
 {
 	int i;
-	struct gsm_dlci *dlci = gsm->dlci[0];
 	struct gsm_msg *txq, *ntxq;
-	struct gsm_control *gc;
+	unsigned long flags;
 
 	gsm->dead = 1;
 
 	spin_lock(&gsm_mux_lock);
-	for (i = 0; i < MAX_MUX; i++) {
-		if (gsm_mux[i] == gsm) {
-			gsm_mux[i] = NULL;
-			break;
-		}
-	}
+	gsm_mux[gsm->num] = NULL;
 	spin_unlock(&gsm_mux_lock);
-	WARN_ON(i == MAX_MUX);
 
-	/* In theory disconnecting DLCI 0 is sufficient but for some
-	   modems this is apparently not the case. */
-	if (dlci) {
-		gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
-		if (gc)
-			gsm_control_wait(gsm, gc);
-	}
 	del_timer_sync(&gsm->t2_timer);
 	/* Now we are sure T2 has stopped */
-	if (dlci) {
-		dlci->dead = 1;
-		gsm_dlci_begin_close(dlci);
-		wait_event_interruptible(gsm->event,
-					dlci->state == DLCI_CLOSED);
-	}
-	/* Free up any link layer users */
-	for (i = 0; i < NUM_DLCI; i++)
+
+	if (!gsm->tty_dead)
+		gsm_closeall_dlci(gsm);
+
+	mutex_lock(&gsm->mutex);
+	for (i = NUM_DLCI-1; i >= 0; i--)
 		if (gsm->dlci[i])
 			gsm_dlci_release(gsm->dlci[i]);
+	mutex_unlock(&gsm->mutex);
+
+	spin_lock_irqsave(&gsm->tx_lock, flags);
 	/* Now wipe the queues */
 	list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
 		kfree(txq);
 	INIT_LIST_HEAD(&gsm->tx_list);
+	spin_unlock_irqrestore(&gsm->tx_lock, flags);
 }
 EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
 
@@ -2111,6 +2183,35 @@
 EXPORT_SYMBOL_GPL(gsm_activate_mux);
 
 /**
+ *	gsm_mux_malloc		-
+ *	@size: bytes to allocate
+ *
+ *	Dedicated for gsm_mux buf/txframe malloc
+ */
+unsigned char *gsm_mux_buf_malloc(unsigned int size)
+{
+	if (size <= PAGE_SIZE)
+		return kmalloc(size, GFP_KERNEL);
+	else
+		return vmalloc(size);
+}
+
+/**
+ *	gsm_mux_buf_free		-
+ *	@size: bytes to free
+ *	@add: addr to free
+ *
+ *	Dedicated for gsm_mux buf/txframe free
+ */
+void gsm_mux_buf_free(unsigned int size, void *addr)
+{
+	if (size <= PAGE_SIZE)
+		kfree(addr);
+	else
+		vfree(addr);
+}
+
+/**
  *	gsm_free_mux		-	free up a mux
  *	@mux: mux to free
  *
@@ -2118,8 +2219,14 @@
  */
 void gsm_free_mux(struct gsm_mux *gsm)
 {
-	kfree(gsm->txframe);
-	kfree(gsm->buf);
+	if (gsm->buf != NULL)
+		gsm_mux_buf_free(gsm->mru + 1, gsm->buf);
+	if (gsm->txframe != NULL) {
+		if (gsm->encoding == 0)
+			gsm_mux_buf_free(gsm->mtu + 1, gsm->txframe);
+		else
+			gsm_mux_buf_free(2 * gsm->mtu + 2, gsm->txframe);
+	}
 	kfree(gsm);
 }
 EXPORT_SYMBOL_GPL(gsm_free_mux);
@@ -2141,6 +2248,13 @@
 	kref_get(&gsm->ref);
 }
 
+/**
+ *	mux_put		-	release a mux
+ *	@mux: mux to release
+ *
+ *	Dispose of allocated resources for a dead mux on release
+ *	from last client.
+ */
 static inline void mux_put(struct gsm_mux *gsm)
 {
 	kref_put(&gsm->ref, gsm_free_muxr);
@@ -2157,18 +2271,8 @@
 	struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
 	if (gsm == NULL)
 		return NULL;
-	gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
-	if (gsm->buf == NULL) {
-		kfree(gsm);
-		return NULL;
-	}
-	gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
-	if (gsm->txframe == NULL) {
-		kfree(gsm->buf);
-		kfree(gsm);
-		return NULL;
-	}
 	spin_lock_init(&gsm->lock);
+	mutex_init(&gsm->mutex);
 	kref_init(&gsm->ref);
 	INIT_LIST_HEAD(&gsm->tx_list);
 
@@ -2176,11 +2280,15 @@
 	gsm->t2 = T2;
 	gsm->n2 = N2;
 	gsm->ftype = UIH;
+	gsm->initiator = 0;
 	gsm->adaption = 1;
 	gsm->encoding = 1;
 	gsm->mru = 64;	/* Default to encoding 1 so these should be 64 */
 	gsm->mtu = 64;
+	gsm->clocal = 1; /* Ignore CD (DV flags in MSC)*/
+	gsm->burst = 1; /* Support burst mode by default */
 	gsm->dead = 1;	/* Avoid early tty opens */
+	gsm->tty_dead = 0;
 
 	return gsm;
 }
@@ -2198,13 +2306,20 @@
 
 static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
 {
+	if (!gsm->tty) {
+		WARN_ON(1);
+		return -ENXIO;
+	}
 	if (tty_write_room(gsm->tty) < len) {
 		set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
 		return -ENOSPC;
 	}
-	if (debug & 4)
-		print_hex_dump_bytes("gsmld_output: ", DUMP_PREFIX_OFFSET,
+	if (GSMDBG_DATA_FULL_REPORT(debug))
+		print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
 				     data, len);
+	else if (GSMDBG_DATA_LEN_REPORT(debug))
+		printk(KERN_ERR "n_gsm: >> %d bytes\n", len);
+
 	gsm->tty->ops->write(gsm->tty, data, len);
 	return len;
 }
@@ -2270,9 +2385,11 @@
 	char buf[64];
 	char flags;
 
-	if (debug & 4)
-		print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
+	if (GSMDBG_DATA_FULL_REPORT(debug))
+		print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
 				     cp, count);
+	else if (GSMDBG_DATA_LEN_REPORT(debug))
+		printk(KERN_ERR "n_gsm: << %d bytes\n", count);
 
 	for (i = count, dp = cp, f = fp; i; i--, dp++) {
 		flags = *f++;
@@ -2325,6 +2442,26 @@
 }
 
 /**
+ *	gsmld_hangup		-	hangup the ldisc for this tty
+ *	@tty: device
+ */
+
+static int gsmld_hangup(struct tty_struct *tty)
+{
+	struct gsm_mux *gsm = tty->disc_data;
+	int i;
+	struct gsm_dlci *dlci;
+
+	for (i = NUM_DLCI-1; i >= 0; i--) {
+		dlci = gsm->dlci[i];
+		if (dlci)
+			dlci->state = DLCI_HANGUP;
+	}
+
+	return 0;
+}
+
+/**
  *	gsmld_close		-	close the ldisc for this tty
  *	@tty: device
  *
@@ -2338,9 +2475,27 @@
 {
 	struct gsm_mux *gsm = tty->disc_data;
 
-	gsmld_detach_gsm(tty, gsm);
+	/* When this fct is called, tty can't be used.
+	 * Here is the 3 cases :
+	 *	- Close of the tty on which the mux is associated
+	 *	  tty->ops->close is called before tty_disc_release
+	 *	- Hangup of the tty on which the mux is assocated
+	 *	  A modem/link problem occurs and modem is not reachable
+	 *	- Replacing LD by another one
+	 *	  tty->receive_room is set to 0, no modem answer
+	 *
+	 * So, no need to send anything to modem, ether it will not
+	 * reach the modem, or the modem can't accept it. Last case,
+	 * we will not receive modem answer and we will timeout.
+	 * To close modem mux, please use ioctl GSMIOC_DEMUX first
+	 */
+	gsm->tty_dead = 1;
 
-	gsmld_flush_buffer(tty);
+	if (gsm->tty) {
+		gsmld_detach_gsm(tty, gsm);
+		gsmld_flush_buffer(tty);
+	}
+
 	/* Do other clean up here */
 	mux_put(gsm);
 }
@@ -2393,9 +2548,8 @@
 	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
 	spin_lock_irqsave(&gsm->tx_lock, flags);
 	gsm_data_kick(gsm);
-	if (gsm->tx_bytes < TX_THRESH_LO) {
+	if (gsm->tx_bytes < TX_THRESH_LO)
 		gsm_dlci_data_sweep(gsm);
-	}
 	spin_unlock_irqrestore(&gsm->tx_lock, flags);
 }
 
@@ -2517,6 +2671,34 @@
 		need_restart = 1;
 
 	/*
+	 *	gsm_mux.buf allocate dynamically
+	 *	according to ldisc configuration
+	 */
+	if (gsm->buf != NULL)
+		gsm_mux_buf_free(gsm->mru + 1, gsm->buf);
+	gsm->buf = gsm_mux_buf_malloc(c->mru + 1);
+	if (gsm->buf == NULL)
+		return -ENOMEM;
+	/*
+	 *	gsm_mux.txframe allocate dynamically
+	 *	according to ldisc configuration
+	 */
+	if (gsm->txframe != NULL) {
+		if (gsm->encoding == 0)
+			gsm_mux_buf_free(gsm->mtu + 1, gsm->txframe);
+		else
+			gsm_mux_buf_free(2 * gsm->mtu + 2, gsm->txframe);
+	}
+	if (c->encapsulation == 0)
+		gsm->txframe = gsm_mux_buf_malloc(c->mtu + 1);
+	else
+		gsm->txframe = gsm_mux_buf_malloc(2 * c->mtu + 2);
+	if (gsm->txframe == NULL) {
+		gsm_mux_buf_free(c->mru + 1, gsm->buf);
+		return -ENOMEM;
+	}
+
+	/*
 	 *	Close down what is needed, restart and initiate the new
 	 *	configuration
 	 */
@@ -2538,6 +2720,8 @@
 	gsm->encoding = c->encapsulation;
 	gsm->adaption = c->adaption;
 	gsm->n2 = c->n2;
+	gsm->clocal = c->clocal;
+	gsm->burst = c->burst;
 
 	if (c->i == 1)
 		gsm->ftype = UIH;
@@ -2581,6 +2765,8 @@
 		pr_debug("Ftype %d i %d\n", gsm->ftype, c.i);
 		c.mru = gsm->mru;
 		c.mtu = gsm->mtu;
+		c.clocal = gsm->clocal;
+		c.burst = gsm->burst;
 		c.k = 0;
 		if (copy_to_user((void *)arg, &c, sizeof(c)))
 			return -EFAULT;
@@ -2589,11 +2775,23 @@
 		if (copy_from_user(&c, (void *)arg, sizeof(c)))
 			return -EFAULT;
 		return gsmld_config(tty, gsm, &c);
+	case GSMIOC_DEMUX:
+		gsm->dead = 1;
+		gsm_closeall_dlci(gsm);
+		return 0;
 	default:
 		return n_tty_ioctl_helper(tty, file, cmd, arg);
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static long gsmld_compat_ioctl(struct tty_struct *tty, struct file *file,
+			       unsigned int cmd, unsigned long arg)
+{
+	return gsmld_ioctl(tty, file, cmd, arg);
+}
+#endif
+
 /*
  *	Network interface
  *
@@ -2661,6 +2859,8 @@
 	skb_queue_head(&dlci->skb_list, skb);
 	STATS(net).tx_packets++;
 	STATS(net).tx_bytes += skb->len;
+	if (skb_queue_len(&dlci->skb_list) >= TXLIST_THRESH_HI)
+		netif_stop_queue(net);
 	gsm_dlci_data_kick(dlci);
 	/* And tell the kernel when the last transmit started. */
 	net->trans_start = jiffies;
@@ -2672,7 +2872,7 @@
 static void gsm_mux_net_tx_timeout(struct net_device *net)
 {
 	/* Tell syslog we are hosed. */
-	dev_dbg(&net->dev, "Tx timed out.\n");
+	dev_dbg((struct device *)&net->dev, "Tx timed out.\n");
 
 	/* Update statistics */
 	STATS(net).tx_errors++;
@@ -2698,7 +2898,15 @@
 	memcpy(skb_put(skb, size), in_buf, size);
 
 	skb->dev = net;
-	skb->protocol = __constant_htons(ETH_P_IP);
+	/* IP version bit 4 to 7 */
+	switch ((*in_buf) >> 4) {
+	case 4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case 6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	}
 
 	/* Ship it off to the kernel */
 	netif_rx(skb);
@@ -2719,6 +2927,9 @@
 	return 0;
 }
 
+/*
+ * RRG: FIXME: replace MAX_MTU w/ gsm->mtu which can be smaller
+ */
 static void gsm_mux_net_init(struct net_device *net)
 {
 	static const struct net_device_ops gsm_netdev_ops = {
@@ -2729,13 +2940,13 @@
 		.ndo_get_stats		= gsm_mux_net_get_stats,
 		.ndo_change_mtu		= gsm_change_mtu,
 	};
-
 	net->netdev_ops = &gsm_netdev_ops;
 
 	/* fill in the other fields */
 	net->watchdog_timeo = GSM_NET_TX_TIMEOUT;
 	net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 	net->type = ARPHRD_NONE;
+	net->mtu = MAX_MTU;
 	net->tx_queue_len = 10;
 }
 
@@ -2746,14 +2957,16 @@
 	struct gsm_mux_net *mux_net;
 
 	pr_debug("destroy network interface");
-	if (!dlci->net)
-		return;
-	mux_net = (struct gsm_mux_net *)netdev_priv(dlci->net);
-	muxnet_put(mux_net);
+	mutex_lock(&dlci->rx_mutex);
+	if (dlci->net) {
+		netif_tx_disable(dlci->net);
+		mux_net = (struct gsm_mux_net *)netdev_priv(dlci->net);
+		muxnet_put(mux_net);
+	}
+	mutex_unlock(&dlci->rx_mutex);
 }
 
 
-/* caller holds the dlci mutex */
 static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
 {
 	char *netname;
@@ -2768,8 +2981,11 @@
 	if (dlci->adaption > 2)
 		return -EBUSY;
 
-	if (nc->protocol != htons(ETH_P_IP))
+	if (nc->protocol != htons(ETH_P_IP)
+	  && nc->protocol != htons(ETH_P_IPV6)) {
+		pr_err("only IPV4/V6 protocol supported");
 		return -EPROTONOSUPPORT;
+	}
 
 	if (nc->adaption != 3 && nc->adaption != 4)
 		return -EPROTONOSUPPORT;
@@ -2786,6 +3002,8 @@
 		pr_err("alloc_netdev failed");
 		return -ENOMEM;
 	}
+
+	mutex_lock(&dlci->rx_mutex);
 	net->mtu = dlci->gsm->mtu;
 	mux_net = (struct gsm_mux_net *)netdev_priv(net);
 	mux_net->dlci = dlci;
@@ -2798,6 +3016,7 @@
 	dlci->adaption = nc->adaption;
 	dlci->data = gsm_mux_rx_netchar;
 	dlci->net = net;
+	mutex_unlock(&dlci->rx_mutex);
 
 	pr_debug("register netdev");
 	retval = register_netdev(net);
@@ -2816,11 +3035,15 @@
 	.name            = "n_gsm",
 	.open            = gsmld_open,
 	.close           = gsmld_close,
+	.hangup          = gsmld_hangup,
 	.flush_buffer    = gsmld_flush_buffer,
 	.chars_in_buffer = gsmld_chars_in_buffer,
 	.read            = gsmld_read,
 	.write           = gsmld_write,
 	.ioctl           = gsmld_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl    = gsmld_compat_ioctl,
+#endif
 	.poll            = gsmld_poll,
 	.receive_buf     = gsmld_receive_buf,
 	.write_wakeup    = gsmld_write_wakeup
@@ -2830,8 +3053,6 @@
  *	Virtual tty side
  */
 
-#define TX_SIZE		512
-
 static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
 {
 	u8 modembits[5];
@@ -2858,7 +3079,7 @@
 	/* Not yet open so no carrier info */
 	if (dlci->state != DLCI_OPEN)
 		return 0;
-	if (debug & 2)
+	if (GSMDBG_FORCE_CARRIER(debug))
 		return 1;
 	return dlci->modem_rx & TIOCM_CD;
 }
@@ -2883,6 +3104,29 @@
 	.destruct = gsm_dlci_free,
 };
 
+static void gsmtty_attach_dlci(struct tty_struct *tty, struct gsm_dlci *dlci)
+{
+	spin_lock(&dlci->gsmtty_lock);
+	dlci->gsmtty_count++;
+	spin_unlock(&dlci->gsmtty_lock);
+}
+
+static void gsmtty_detach_dlci(struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	int has_open;
+
+	if (!dlci) {
+		WARN(1, "dlci shouldn't be NULL\n");
+		return;
+	}
+	spin_lock(&dlci->gsmtty_lock);
+	has_open = --dlci->gsmtty_count;
+	if (!has_open)
+		tty_port_tty_set(&dlci->port, NULL);
+	spin_unlock(&dlci->gsmtty_lock);
+}
+
 static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
 {
 	struct gsm_mux *gsm;
@@ -2904,25 +3148,36 @@
 	gsm = gsm_mux[mux];
 	if (gsm->dead)
 		return -EL2HLT;
-	/* If DLCI 0 is not yet fully open return an error. This is ok from a locking
-	   perspective as we don't have to worry about this if DLCI0 is lost */
-	if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) 
+	/* If DLCI 0 is not yet fully open return an error. This is ok from a
+	 * locking perspective as we don't have to worry about this if DLCI0
+	 * is lost */
+	mutex_lock(&gsm->mutex);
+	if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
+		mutex_unlock(&gsm->mutex);
 		return -EL2NSYNC;
+	}
 	dlci = gsm->dlci[line];
 	if (dlci == NULL) {
 		alloc = true;
 		dlci = gsm_dlci_alloc(gsm, line);
 	}
-	if (dlci == NULL)
+	if (dlci == NULL) {
+		mutex_unlock(&gsm->mutex);
 		return -ENOMEM;
+	}
 	ret = tty_port_install(&dlci->port, driver, tty);
 	if (ret) {
 		if (alloc)
 			dlci_put(dlci);
+		mutex_unlock(&gsm->mutex);
 		return ret;
 	}
 
+	dlci_get(dlci);
+	dlci_get(gsm->dlci[0]);
+	mux_get(gsm);
 	tty->driver_data = dlci;
+	mutex_unlock(&gsm->mutex);
 
 	return 0;
 }
@@ -2931,19 +3186,51 @@
 {
 	struct gsm_dlci *dlci = tty->driver_data;
 	struct tty_port *port = &dlci->port;
+	struct gsm_mux *gsm = dlci->gsm;
+	struct ktermios save;
+	int t;
 
+	if (dlci->state == DLCI_CLOSING) {
+		/* if we are in blocking mode, wait the end of the closing */
+		if (!(filp->f_flags & O_NONBLOCK)) {
+			t = wait_event_timeout(gsm->event,
+					dlci->state == DLCI_CLOSED,
+					gsm->n2 * gsm->t1 * HZ / 100);
+			if (!t)
+				return -ENXIO;
+		} else
+			return -EAGAIN;
+	}
 	port->count++;
-	dlci_get(dlci);
-	dlci_get(dlci->gsm->dlci[0]);
-	mux_get(dlci->gsm);
 	tty_port_tty_set(port, tty);
+	gsmtty_attach_dlci(tty, dlci);
 
+	/* Perform a change to the CLOCAL state and call into the driver
+	   layer to make it visible. All done with the termios mutex */
+	if (gsm->clocal) {
+		mutex_lock(&tty->termios_mutex);
+		save = tty->termios;
+		tty->termios.c_cflag |= CLOCAL;
+		if (tty->ops->set_termios)
+			tty->ops->set_termios(tty, &save);
+		mutex_unlock(&tty->termios_mutex);
+	}
 	dlci->modem_rx = 0;
 	/* We could in theory open and close before we wait - eg if we get
 	   a DM straight back. This is ok as that will have caused a hangup */
 	set_bit(ASYNCB_INITIALIZED, &port->flags);
 	/* Start sending off SABM messages */
 	gsm_dlci_begin_open(dlci);
+
+	/* Wait for UA */
+	if (!(filp->f_flags & O_NONBLOCK)) {
+		t = wait_event_timeout(gsm->event,
+					dlci->state == DLCI_OPEN,
+					gsm->n2 * gsm->t1 * HZ / 100);
+		if (!t)
+			return -ENXIO;
+	}
+
 	/* And wait for virtual carrier */
 	return tty_port_block_til_ready(port, tty, filp);
 }
@@ -2955,34 +3242,46 @@
 
 	if (dlci == NULL)
 		return;
+
 	if (dlci->state == DLCI_CLOSED)
 		return;
+	gsm = dlci->gsm;
 	mutex_lock(&dlci->mutex);
 	gsm_destroy_network(dlci);
 	mutex_unlock(&dlci->mutex);
-	gsm = dlci->gsm;
-	if (tty_port_close_start(&dlci->port, tty, filp) == 0)
-		goto out;
-	gsm_dlci_begin_close(dlci);
+	if (tty_port_close_start(&dlci->port, tty, filp) == 0) {
+		gsmtty_detach_dlci(tty);
+		return;
+	}
+
+	if (!gsm->tty_dead) {
+		gsm_dlci_begin_close(dlci);
+
+		/* Wait for UA */
+		if (!(filp->f_flags & O_NONBLOCK))
+			wait_event_timeout(gsm->event,
+						dlci->state == DLCI_CLOSED,
+						gsm->n2 * gsm->t1 * HZ / 100);
+	} else
+		gsm_dlci_close(dlci);
 	if (test_bit(ASYNCB_INITIALIZED, &dlci->port.flags)) {
 		if (C_HUPCL(tty))
 			tty_port_lower_dtr_rts(&dlci->port);
 	}
 	tty_port_close_end(&dlci->port, tty);
-	tty_port_tty_set(&dlci->port, NULL);
-out:
-	dlci_put(dlci);
-	dlci_put(gsm->dlci[0]);
-	mux_put(gsm);
+	gsmtty_detach_dlci(tty);
 }
 
 static void gsmtty_hangup(struct tty_struct *tty)
 {
 	struct gsm_dlci *dlci = tty->driver_data;
-	if (dlci->state == DLCI_CLOSED)
+	if (!dlci)
 		return;
 	tty_port_hangup(&dlci->port);
-	gsm_dlci_begin_close(dlci);
+	if (!dlci->gsm->tty_dead)
+		gsm_dlci_begin_close(dlci);
+	else
+		gsm_dlci_close(dlci);
 }
 
 static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
@@ -2990,10 +3289,18 @@
 {
 	int sent;
 	struct gsm_dlci *dlci = tty->driver_data;
-	if (dlci->state == DLCI_CLOSED)
-		return -EINVAL;
+
+	if (dlci->state == DLCI_OPENING)
+		return -EAGAIN;
+
+	if (dlci->state != DLCI_OPEN)
+		return -ENXIO;
+
 	/* Stuff the bytes into the fifo queue */
 	sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+	if (!sent)
+		dlci->need_tty_wakeup = 1;
+
 	/* Need to kick the channel */
 	gsm_dlci_data_kick(dlci);
 	return sent;
@@ -3009,16 +3316,23 @@
 
 static int gsmtty_chars_in_buffer(struct tty_struct *tty)
 {
-	struct gsm_dlci *dlci = tty->driver_data;
-	if (dlci->state == DLCI_CLOSED)
-		return -EINVAL;
+	struct gsm_dlci *dlci;
+
+	if (!tty)
+		return 0;
+	else
+		dlci = tty->driver_data;
+
+	if (!dlci)
+		return 0;
+
 	return kfifo_len(dlci->fifo);
 }
 
 static void gsmtty_flush_buffer(struct tty_struct *tty)
 {
 	struct gsm_dlci *dlci = tty->driver_data;
-	if (dlci->state == DLCI_CLOSED)
+	if (!dlci)
 		return;
 	/* Caution needed: If we implement reliable transport classes
 	   then the data being transmitted can't simply be junked once
@@ -3095,6 +3409,14 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static long gsmtty_compat_ioctl(struct tty_struct *tty,
+			unsigned int cmd, unsigned long arg)
+{
+	return gsmtty_ioctl(tty, cmd, arg);
+}
+#endif
+
 static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
 {
 	struct gsm_dlci *dlci = tty->driver_data;
@@ -3150,6 +3472,18 @@
 	return gsmtty_modem_update(dlci, encode);
 }
 
+static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct gsm_dlci *dlci = tty->driver_data;
+	struct gsm_mux *gsm = dlci->gsm;
+
+	dlci_put(dlci);
+	dlci_put(gsm->dlci[0]);
+	mux_put(gsm);
+	tty->driver_data = NULL;
+	tty->port = NULL;
+	driver->ttys[tty->index] = NULL;
+}
 
 /* Virtual ttys for the demux */
 static const struct tty_operations gsmtty_ops = {
@@ -3161,6 +3495,9 @@
 	.chars_in_buffer	= gsmtty_chars_in_buffer,
 	.flush_buffer		= gsmtty_flush_buffer,
 	.ioctl			= gsmtty_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= gsmtty_compat_ioctl,
+#endif
 	.throttle		= gsmtty_throttle,
 	.unthrottle		= gsmtty_unthrottle,
 	.set_termios		= gsmtty_set_termios,
@@ -3169,6 +3506,7 @@
 	.tiocmget		= gsmtty_tiocmget,
 	.tiocmset		= gsmtty_tiocmset,
 	.break_ctl		= gsmtty_break_ctl,
+	.remove			= gsmtty_remove,
 };
 
 
@@ -3189,6 +3527,7 @@
 		pr_err("gsm_init: tty allocation failed.\n");
 		return -EINVAL;
 	}
+	gsm_tty_driver->owner	= THIS_MODULE;
 	gsm_tty_driver->driver_name	= "gsmtty";
 	gsm_tty_driver->name		= "gsmtty";
 	gsm_tty_driver->major		= 0;	/* Dynamic */
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index bb91b47..2e3ea1a 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -31,9 +31,8 @@
 	int err;
 
 #ifdef CONFIG_64BIT
-	extern int iosapic_serial_irq(int cellnum);
 	if (!dev->irq && (dev->id.sversion == 0xad))
-		dev->irq = iosapic_serial_irq(dev->mod_index-1);
+		dev->irq = iosapic_serial_irq(dev);
 #endif
 
 	if (!dev->irq) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 26e3a97..c52948b 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -4797,10 +4797,6 @@
 		PCI_VENDOR_ID_IBM, 0x0299,
 		0, 0, pbn_b0_bt_2_115200 },
 
-	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
-		0x1000, 0x0012,
-		0, 0, pbn_b0_bt_2_115200 },
-
 	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
 		0xA000, 0x1000,
 		0, 0, pbn_b0_1_115200 },
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index cbf1d15..22f280a 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -773,6 +773,6 @@
 module_exit(arc_serial_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("plat-arcfpga/uart");
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_AUTHOR("Vineet Gupta");
 MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 9b6ef20..1578d4f 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -40,9 +40,11 @@
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <linux/serial_reg.h>
+#include <linux/serial_max3110.h>
 
 #include <linux/kthread.h>
 #include <linux/spi/spi.h>
+#include <linux/pm.h>
 
 #include "mrst_max3110.h"
 
@@ -61,12 +63,14 @@
 	struct task_struct *main_thread;
 	struct task_struct *read_thread;
 	struct mutex thread_mutex;
+	struct mutex io_mutex;
 
 	u32 baud;
 	u16 cur_conf;
 	u8 clock;
 	u8 parity, word_7bits;
 	u16 irq;
+	u16 irq_edge_triggered;
 
 	unsigned long uart_flags;
 
@@ -90,6 +94,7 @@
 	struct spi_transfer	x;
 	int ret;
 
+	mutex_lock(&max->io_mutex);
 	spi_message_init(&message);
 	memset(&x, 0, sizeof x);
 	x.len = len;
@@ -104,6 +109,7 @@
 
 	/* Do the i/o */
 	ret = spi_sync(spi, &message);
+	mutex_unlock(&max->io_mutex);
 	return ret;
 }
 
@@ -259,7 +265,6 @@
 	return;
 }
 
-#define WORDS_PER_XFER	128
 static void send_circ_buf(struct uart_max3110 *max,
 				struct circ_buf *xmit)
 {
@@ -268,7 +273,7 @@
 	int i, len, blen, dma_size, left, ret = 0;
 
 
-	dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
+	dma_size = M3110_RX_FIFO_DEPTH * sizeof(u16) * 2;
 	buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
 	if (!buf)
 		return;
@@ -278,7 +283,7 @@
 	while (!uart_circ_empty(xmit)) {
 		left = uart_circ_chars_pending(xmit);
 		while (left) {
-			len = min(left, WORDS_PER_XFER);
+			len = min(left, M3110_RX_FIFO_DEPTH);
 			blen = len * sizeof(u16);
 			memset(ibuf, 0, blen);
 
@@ -414,8 +419,8 @@
 				max->uart_flags || kthread_should_stop());
 
 		mutex_lock(&max->thread_mutex);
-
-		if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
+		if (max->irq_edge_triggered &&
+			test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
 			max3110_con_receive(max);
 
 		/* first handle console output */
@@ -437,11 +442,15 @@
 {
 	struct uart_max3110 *max = dev_id;
 
-	/* max3110's irq is a falling edge, not level triggered,
-	 * so no need to disable the irq */
+	if (max->irq_edge_triggered) {
+		/* max3110's irq is a falling edge, not level triggered,
+		 * so no need to disable the irq */
 
-	if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
-		wake_up(&max->wq);
+		if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
+			wake_up(&max->wq);
+	} else {
+		max3110_con_receive(max);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -490,20 +499,10 @@
 	/* as we use thread to handle tx/rx, need set low latency */
 	port->state->port.low_latency = 1;
 
-	if (max->irq) {
-		max->read_thread = NULL;
-		ret = request_irq(max->irq, serial_m3110_irq,
-				IRQ_TYPE_EDGE_FALLING, "max3110", max);
-		if (ret) {
-			max->irq = 0;
-			pr_err(PR_FMT "unable to allocate IRQ, polling\n");
-		}  else {
-			/* Enable RX IRQ only */
-			config |= WC_RXA_IRQ_ENABLE;
-		}
-	}
-
-	if (max->irq == 0) {
+	if (max->irq > 0) {
+		/* Enable RX IRQ only */
+		config |= WC_RXA_IRQ_ENABLE;
+	} else {
 		/* If IRQ is disabled, start a read thread for input data */
 		max->read_thread =
 			kthread_run(max3110_read_thread, max, "max3110_read");
@@ -517,8 +516,6 @@
 
 	ret = max3110_out(max, config);
 	if (ret) {
-		if (max->irq)
-			free_irq(max->irq, max);
 		if (max->read_thread)
 			kthread_stop(max->read_thread);
 		max->read_thread = NULL;
@@ -540,9 +537,6 @@
 		max->read_thread = NULL;
 	}
 
-	if (max->irq)
-		free_irq(max->irq, max);
-
 	/* Disable interrupts from this port */
 	config = WC_TAG | WC_SW_SHDI;
 	max3110_out(max, config);
@@ -749,7 +743,8 @@
 	struct spi_device *spi = to_spi_device(dev);
 	struct uart_max3110 *max = spi_get_drvdata(spi);
 
-	disable_irq(max->irq);
+	if (max->irq > 0)
+		disable_irq(max->irq);
 	uart_suspend_port(&serial_m3110_reg, &max->port);
 	max3110_out(max, max->cur_conf | WC_SW_SHDI);
 	return 0;
@@ -762,7 +757,8 @@
 
 	max3110_out(max, max->cur_conf);
 	uart_resume_port(&serial_m3110_reg, &max->port);
-	enable_irq(max->irq);
+	if (max->irq > 0)
+		enable_irq(max->irq);
 	return 0;
 }
 
@@ -780,6 +776,9 @@
 	void *buffer;
 	u16 res;
 	int ret = 0;
+	struct plat_max3110 *pdata = spi->dev.platform_data;
+	if (!pdata)
+		return -EINVAL;
 
 	max = kzalloc(sizeof(*max), GFP_KERNEL);
 	if (!max)
@@ -803,6 +802,7 @@
 	max->irq = (u16)spi->irq;
 
 	mutex_init(&max->thread_mutex);
+	mutex_init(&max->io_mutex);
 
 	max->word_7bits = 0;
 	max->parity = 0;
@@ -840,6 +840,25 @@
 		goto err_kthread;
 	}
 
+	max->irq_edge_triggered = pdata->irq_edge_triggered;
+
+	if (max->irq > 0) {
+		if (max->irq_edge_triggered) {
+			ret = request_irq(max->irq, serial_m3110_irq,
+					IRQ_TYPE_EDGE_FALLING, "max3110", max);
+		} else {
+			ret = request_threaded_irq(max->irq, NULL,
+					serial_m3110_irq,
+					IRQF_ONESHOT, "max3110", max);
+		}
+
+		if (ret) {
+			max->irq = 0;
+			dev_warn(&spi->dev,
+			"unable to allocate IRQ, will use polling method\n");
+		}
+	}
+
 	spi_set_drvdata(spi, max);
 	pmax = max;
 
@@ -867,6 +886,9 @@
 
 	free_page((unsigned long)max->con_xmit.buf);
 
+	if (max->irq)
+		free_irq(max->irq, max);
+
 	if (max->main_thread)
 		kthread_stop(max->main_thread);
 
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4f5f161..f85b8e6 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -678,11 +678,18 @@
 
 static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
 {
-	u32 istatus, istat;
+	u32 istat;
 	struct mxs_auart_port *s = context;
 	u32 stat = readl(s->port.membase + AUART_STAT);
 
-	istatus = istat = readl(s->port.membase + AUART_INTR);
+	istat = readl(s->port.membase + AUART_INTR);
+
+	/* ack irq */
+	writel(istat & (AUART_INTR_RTIS
+		| AUART_INTR_TXIS
+		| AUART_INTR_RXIS
+		| AUART_INTR_CTSMIS),
+			s->port.membase + AUART_INTR_CLR);
 
 	if (istat & AUART_INTR_CTSMIS) {
 		uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
@@ -702,12 +709,6 @@
 		istat &= ~AUART_INTR_TXIS;
 	}
 
-	writel(istatus & (AUART_INTR_RTIS
-		| AUART_INTR_TXIS
-		| AUART_INTR_RXIS
-		| AUART_INTR_CTSMIS),
-			s->port.membase + AUART_INTR_CLR);
-
 	return IRQ_HANDLED;
 }
 
@@ -850,7 +851,7 @@
 	struct mxs_auart_port *s;
 	struct uart_port *port;
 	unsigned int old_ctrl0, old_ctrl2;
-	unsigned int to = 1000;
+	unsigned int to = 20000;
 
 	if (co->index >= MXS_AUART_PORTS || co->index < 0)
 		return;
@@ -871,18 +872,23 @@
 
 	uart_console_write(port, str, count, mxs_auart_console_putchar);
 
-	/*
-	 * Finally, wait for transmitter to become empty
-	 * and restore the TCR
-	 */
+	/* Finally, wait for transmitter to become empty ... */
 	while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+		udelay(1);
 		if (!to--)
 			break;
-		udelay(1);
 	}
 
-	writel(old_ctrl0, port->membase + AUART_CTRL0);
-	writel(old_ctrl2, port->membase + AUART_CTRL2);
+	/*
+	 * ... and restore the TCR if we waited long enough for the transmitter
+	 * to be idle. This might keep the transmitter enabled although it is
+	 * unused, but that is better than to disable it while it is still
+	 * transmitting.
+	 */
+	if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
+		writel(old_ctrl0, port->membase + AUART_CTRL0);
+		writel(old_ctrl2, port->membase + AUART_CTRL2);
+	}
 
 	clk_disable(s->clk);
 }
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 21a7e17..20e4c94 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -217,6 +217,7 @@
 #define FRI2_64_UARTCLK  64000000 /*  64.0000 MHz */
 #define FRI2_48_UARTCLK  48000000 /*  48.0000 MHz */
 #define NTC1_UARTCLK     64000000 /*  64.0000 MHz */
+#define MINNOW_UARTCLK   50000000 /*  50.0000 MHz */
 
 struct pch_uart_buffer {
 	unsigned char *buf;
@@ -398,6 +399,10 @@
 		    strstr(cmp, "nanoETXexpress-TT")))
 		return NTC1_UARTCLK;
 
+	cmp = dmi_get_system_info(DMI_BOARD_NAME);
+	if (cmp && strstr(cmp, "MinnowBoard"))
+		return MINNOW_UARTCLK;
+
 	return DEFAULT_UARTCLK;
 }
 
@@ -653,11 +658,12 @@
 		dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
 			 size - room);
 	if (!room)
-		return room;
+		goto out;
 
 	tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
 
 	port->icount.rx += room;
+out:
 	tty_kref_put(tty);
 
 	return room;
@@ -1066,6 +1072,8 @@
 	if (tty == NULL) {
 		for (i = 0; error_msg[i] != NULL; i++)
 			dev_err(&priv->pdev->dev, error_msg[i]);
+	} else {
+		tty_kref_put(tty);
 	}
 }
 
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 9799d04..357a837 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -726,7 +726,7 @@
 static void tegra_uart_stop_rx(struct uart_port *u)
 {
 	struct tegra_uart_port *tup = to_tegra_uport(u);
-	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+	struct tty_struct *tty;
 	struct tty_port *port = &u->state->port;
 	struct dma_tx_state state;
 	unsigned long ier;
@@ -738,6 +738,8 @@
 	if (!tup->rx_in_progress)
 		return;
 
+	tty = tty_port_tty_get(&tup->uport.state->port);
+
 	tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
 
 	ier = tup->ier_shadow;
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 1a8bc22..f72b43f 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -559,12 +559,13 @@
 	if (!mmres || !irqres)
 		return -ENODEV;
 
-	if (np)
+	if (np) {
 		port = of_alias_get_id(np, "serial");
 		if (port >= VT8500_MAX_PORTS)
 			port = -1;
-	else
+	} else {
 		port = -1;
+	}
 
 	if (port < 0) {
 		/* calculate the port id */
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9121c1f..9144ae5 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -61,7 +61,12 @@
 {
 	struct tty_buffer *p;
 
-	if (port->buf.memory_used + size > 65536)
+	/* This will increase the maximum size of the buffering that is between
+	   the MUX write and the gsmtty read by the client to an amount that is
+	   higher than the maximum size of what the modem can flush through its
+	   NVM code. This is a workaround pending a proper flow control
+	   solution. */
+	if (port->buf.memory_used + size > (6 * 65536))
 		return NULL;
 	p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
 	if (p == NULL)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 6464029..31c4a57 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -850,7 +850,8 @@
 			struct pid *tty_pgrp = tty_get_pgrp(tty);
 			if (tty_pgrp) {
 				kill_pgrp(tty_pgrp, SIGHUP, on_exit);
-				kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+				if (!on_exit)
+					kill_pgrp(tty_pgrp, SIGCONT, on_exit);
 				put_pid(tty_pgrp);
 			}
 		}
@@ -1617,8 +1618,12 @@
 		tty->ops->shutdown(tty);
 	tty_free_termios(tty);
 	tty_driver_remove_tty(tty->driver, tty);
-	tty->port->itty = NULL;
-	cancel_work_sync(&tty->port->buf.work);
+	if (tty->port)
+		tty->port->itty = NULL;
+	if (tty->link)
+		tty->link->port->itty = NULL;
+	if (tty->port)
+		cancel_work_sync(&tty->port->buf.work);
 
 	if (tty->link)
 		tty_kref_put(tty->link);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 3500d41..088b4ca 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@
 		}
 		return 0;
 	case TCFLSH:
+		retval = tty_check_change(tty);
+		if (retval)
+			return retval;
 		return __tty_perform_flush(tty, arg);
 	default:
 		/* Try the mode commands */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 121aeb9..f597e88 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -256,10 +256,9 @@
 {
 	struct tty_struct *tty = tty_port_tty_get(port);
 
-	if (tty && (!check_clocal || !C_CLOCAL(tty))) {
+	if (tty && (!check_clocal || !C_CLOCAL(tty)))
 		tty_hangup(tty);
-		tty_kref_put(tty);
-	}
+	tty_kref_put(tty);
 }
 EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
 
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index a9af1b9a..299d2a1 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1014,6 +1014,7 @@
 	return leds;
 }
 
+#ifndef CONFIG_ANDROID
 static int kbd_update_leds_helper(struct input_handle *handle, void *data)
 {
 	unsigned char leds = *(unsigned char *)data;
@@ -1027,6 +1028,7 @@
 
 	return 0;
 }
+#endif
 
 /**
  *	vt_get_leds	-	helper for braille console
@@ -1110,6 +1112,7 @@
  * registered yet but we already getting updates from the VT to
  * update led state.
  */
+#ifndef CONFIG_ANDROID
 static void kbd_bh(unsigned long dummy)
 {
 	unsigned char leds;
@@ -1127,6 +1130,7 @@
 }
 
 DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0);
+#endif
 
 #if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
     defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
@@ -1393,7 +1397,9 @@
 
 	spin_unlock(&kbd_event_lock);
 
+#ifndef CONFIG_ANDROID
 	tasklet_schedule(&keyboard_tasklet);
+#endif
 	do_poke_blanked_console = 1;
 	schedule_console_callback();
 }
@@ -1465,6 +1471,7 @@
  * Start keyboard handler on the new keyboard by refreshing LED state to
  * match the rest of the system.
  */
+#ifndef CONFIG_ANDROID
 static void kbd_start(struct input_handle *handle)
 {
 	tasklet_disable(&keyboard_tasklet);
@@ -1474,6 +1481,9 @@
 
 	tasklet_enable(&keyboard_tasklet);
 }
+#else
+static void kbd_start(struct input_handle *handle) {}
+#endif
 
 static const struct input_device_id kbd_ids[] = {
 	{
@@ -1520,8 +1530,10 @@
 	if (error)
 		return error;
 
+#ifndef CONFIG_ANDROID
 	tasklet_enable(&keyboard_tasklet);
 	tasklet_schedule(&keyboard_tasklet);
+#endif
 
 	return 0;
 }
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index b645c47..2d57a00 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -630,36 +630,57 @@
 	return 0;
 }
 
-static const struct vm_operations_struct uio_vm_ops = {
+static const struct vm_operations_struct uio_logical_vm_ops = {
 	.open = uio_vma_open,
 	.close = uio_vma_close,
 	.fault = uio_vma_fault,
 };
 
+static int uio_mmap_logical(struct vm_area_struct *vma)
+{
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = &uio_logical_vm_ops;
+	uio_vma_open(vma);
+	return 0;
+}
+
+static const struct vm_operations_struct uio_physical_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+	.access = generic_access_phys,
+#endif
+};
+
 static int uio_mmap_physical(struct vm_area_struct *vma)
 {
 	struct uio_device *idev = vma->vm_private_data;
 	int mi = uio_find_mem_index(vma);
+	struct uio_mem *mem;
 	if (mi < 0)
 		return -EINVAL;
+	mem = idev->info->mem + mi;
 
+	if (vma->vm_end - vma->vm_start > mem->size)
+		return -EINVAL;
+
+	vma->vm_ops = &uio_physical_vm_ops;
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
+	/*
+	 * We cannot use the vm_iomap_memory() helper here,
+	 * because vma->vm_pgoff is the map index we looked
+	 * up above in uio_find_mem_index(), rather than an
+	 * actual page offset into the mmap.
+	 *
+	 * So we just do the physical mmap without a page
+	 * offset.
+	 */
 	return remap_pfn_range(vma,
 			       vma->vm_start,
-			       idev->info->mem[mi].addr >> PAGE_SHIFT,
+			       mem->addr >> PAGE_SHIFT,
 			       vma->vm_end - vma->vm_start,
 			       vma->vm_page_prot);
 }
 
-static int uio_mmap_logical(struct vm_area_struct *vma)
-{
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_ops = &uio_vm_ops;
-	uio_vma_open(vma);
-	return 0;
-}
-
 static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
 {
 	struct uio_listener *listener = filep->private_data;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 9b1cbcf..1dcdaf9 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -47,6 +47,8 @@
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 #include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
 
 #include "cdc-acm.h"
 
@@ -57,9 +59,13 @@
 static struct usb_driver acm_driver;
 static struct tty_driver *acm_tty_driver;
 static struct acm *acm_table[ACM_TTY_MINORS];
+static struct dentry *acm_debug_root;
+static struct dentry *acm_debug_data_dump_enable;
+static u32 acm_data_dump_enable;
 
 static DEFINE_MUTEX(acm_table_lock);
 
+static inline int is_hsic_host(struct usb_device *udev);
 /*
  * acm_table accessors
  */
@@ -220,6 +226,7 @@
 {
 	unsigned long flags;
 	struct acm_wb *wb = &acm->wb[wbn];
+	struct delayed_wb *d_wb;
 	int rc;
 
 	spin_lock_irqsave(&acm->write_lock, flags);
@@ -233,12 +240,17 @@
 							acm->susp_count);
 	usb_autopm_get_interface_async(acm->control);
 	if (acm->susp_count) {
-		if (!acm->delayed_wb)
-			acm->delayed_wb = wb;
-		else
+		d_wb = kmalloc(sizeof(struct delayed_wb), GFP_ATOMIC);
+		if (d_wb == NULL) {
+			rc = -ENOMEM;
 			usb_autopm_put_interface_async(acm->control);
+		} else {
+			d_wb->wb = wb;
+			list_add_tail(&d_wb->list, &acm->delayed_wb_list);
+			rc = 0;		/* A white lie */
+		}
 		spin_unlock_irqrestore(&acm->write_lock, flags);
-		return 0;	/* A white lie */
+		return rc;
 	}
 	usb_mark_last_busy(acm->dev);
 
@@ -283,6 +295,57 @@
 }
 
 static DEVICE_ATTR(iCountryCodeRelDate, S_IRUGO, show_country_rel_date, NULL);
+
+
+static ssize_t flow_statistics_show
+(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct acm *acm = usb_get_intfdata(intf);
+	unsigned long flags;
+	int ret;
+
+	if (!acm)
+		return 0;
+
+	spin_lock_irqsave(&acm->write_lock, flags);
+	ret = sprintf(buf, "ACM%d\tRX packets:%d\t  TX packets:%d\n"
+		"\tRX bytes:%d\t  TX bytes:%d\n",
+		acm->minor, acm->packets_rx, acm->packets_tx,
+		acm->bytes_rx, acm->bytes_tx);
+	spin_unlock_irqrestore(&acm->write_lock, flags);
+
+	return ret;
+}
+
+static ssize_t flow_statistics_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usb_interface *intf = to_usb_interface(dev);
+	struct acm *acm = usb_get_intfdata(intf);
+	unsigned long flags;
+	u32 tmp;
+
+	if (!acm || size > 2)
+		return -EINVAL;
+
+	if (sscanf(buf, "%d", &tmp) == 1) {
+		if (tmp == 0) {
+			spin_lock_irqsave(&acm->write_lock, flags);
+			acm->bytes_rx = acm->bytes_tx = 0;
+			acm->packets_rx = acm->packets_tx = 0;
+			spin_unlock_irqrestore(&acm->write_lock, flags);
+		}
+		return size;
+	}
+
+	return -1;
+}
+
+
+static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR | S_IROTH, flow_statistics_show,
+	flow_statistics_store);
+
 /*
  * Interrupt handlers for various ACM device responses
  */
@@ -402,6 +465,29 @@
 	return 0;
 }
 
+static void ftrace_dump_acm_data(struct acm *acm, u8 is_out, const void *buf,
+	size_t len)
+{
+	const u8 *ptr = buf;
+	int i, linelen, remaining = len;
+	unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+	int rowsize = 16;
+	int groupsize = 1;
+	bool ascii = true;
+
+	for (i = 0; i < len; i += rowsize) {
+		linelen = min(remaining, rowsize);
+		remaining -= rowsize;
+
+		hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+				   linebuf, sizeof(linebuf), ascii);
+
+		trace_printk("[ACM %02d %s] %.4x: %s\n", acm->minor,
+			is_out == 1 ? "-->" : "<--", i, linebuf);
+	}
+
+}
+
 static void acm_process_read_urb(struct acm *acm, struct urb *urb)
 {
 	if (!urb->actual_length)
@@ -409,6 +495,11 @@
 
 	tty_insert_flip_string(&acm->port, urb->transfer_buffer,
 			urb->actual_length);
+
+	if (is_hsic_host(acm->dev) && acm_data_dump_enable)
+		ftrace_dump_acm_data(acm, 0, urb->transfer_buffer,
+			urb->actual_length);
+
 	tty_flip_buffer_push(&acm->port);
 }
 
@@ -429,10 +520,23 @@
 	usb_mark_last_busy(acm->dev);
 
 	if (urb->status) {
-		dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
+		dev_dbg(&acm->data->dev,
+			"%s - non-zero urb status: %d, length: %d\n",
+			__func__, urb->status, urb->actual_length);
+		if ((urb->status != -ENOENT) ||
+			(urb->actual_length == 0)) {
+			dev_dbg(&acm->data->dev,
+				"%s - No handling for non-zero urb status: %d\n",
 							__func__, urb->status);
-		return;
+			return;
+		}
 	}
+
+	spin_lock_irqsave(&acm->write_lock, flags);
+	acm->bytes_rx += urb->actual_length;
+	acm->packets_rx++;
+	spin_unlock_irqrestore(&acm->write_lock, flags);
+
 	acm_process_read_urb(acm, urb);
 
 	/* throttle device if requested by tty */
@@ -460,7 +564,14 @@
 			urb->transfer_buffer_length,
 			urb->status);
 
+	if (is_hsic_host(acm->dev) && acm_data_dump_enable
+		&& usb_pipebulk(urb->pipe))
+		ftrace_dump_acm_data(acm, 1, urb->transfer_buffer,
+			urb->actual_length);
+
 	spin_lock_irqsave(&acm->write_lock, flags);
+	acm->bytes_tx += urb->actual_length;
+	acm->packets_tx++;
 	acm_write_done(acm, wb);
 	spin_unlock_irqrestore(&acm->write_lock, flags);
 	schedule_work(&acm->work);
@@ -600,8 +711,11 @@
 		usb_autopm_get_interface(acm->control);
 		acm_set_control(acm, acm->ctrlout = 0);
 		usb_kill_urb(acm->ctrlurb);
-		for (i = 0; i < ACM_NW; i++)
+		acm->transmitting = 0;
+		for (i = 0; i < ACM_NW; i++) {
 			usb_kill_urb(acm->wb[i].urb);
+			acm->wb[i].use = 0;
+		}
 		for (i = 0; i < acm->rx_buflimit; i++)
 			usb_kill_urb(acm->read_urbs[i]);
 		acm->control->needs_remote_wakeup = 0;
@@ -628,6 +742,8 @@
 {
 	struct acm *acm = tty->driver_data;
 	dev_dbg(&acm->control->dev, "%s\n", __func__);
+	/* Set flow_stopped to enable flush buffer*/
+	tty->flow_stopped = 1;
 	tty_port_close(&acm->port, tty, filp);
 }
 
@@ -690,6 +806,30 @@
 	return (ACM_NW - acm_wb_is_avail(acm)) * acm->writesize;
 }
 
+static void acm_tty_flush_buffer(struct tty_struct *tty)
+{
+	struct acm *acm = tty->driver_data;
+	struct acm_wb *wb;
+	struct delayed_wb *d_wb, *nd_wb;
+
+	/* flush delayed write buffer */
+	if (!acm->disconnected) {
+		usb_autopm_get_interface(acm->control);
+		spin_lock_irq(&acm->write_lock);
+		list_for_each_entry_safe(d_wb, nd_wb,
+				&acm->delayed_wb_list, list) {
+			wb = d_wb->wb;
+			list_del(&d_wb->list);
+			kfree(d_wb);
+			spin_unlock_irq(&acm->write_lock);
+			acm_start_wb(acm, wb);
+			spin_lock_irq(&acm->write_lock);
+		}
+		spin_unlock_irq(&acm->write_lock);
+		usb_autopm_put_interface(acm->control);
+	}
+}
+
 static void acm_tty_throttle(struct tty_struct *tty)
 {
 	struct acm *acm = tty->driver_data;
@@ -932,6 +1072,27 @@
 	return 0;
 }
 
+static inline int is_hsic_host(struct usb_device *udev)
+{
+	struct pci_dev   *pdev;
+
+	if (udev == NULL)
+		return -EINVAL;
+
+	pdev = to_pci_dev(udev->bus->controller);
+	if (pdev->device == 0x119D || pdev->device == 0x0f35)
+		return 1;
+	else
+		return 0;
+}
+
+static int is_comneon_modem(struct usb_device *dev)
+{
+	return (le16_to_cpu(dev->descriptor.idVendor) == CTP_MODEM_VID &&
+			le16_to_cpu(dev->descriptor.idProduct) ==
+			CTP_MODEM_PID);
+}
+
 static int acm_probe(struct usb_interface *intf,
 		     const struct usb_device_id *id)
 {
@@ -1269,6 +1430,7 @@
 		snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 		snd->instance = acm;
 	}
+	INIT_LIST_HEAD(&acm->delayed_wb_list);
 
 	usb_set_intfdata(intf, acm);
 
@@ -1332,6 +1494,32 @@
 		goto alloc_fail8;
 	}
 
+	i = device_create_file(&intf->dev, &dev_attr_stats);
+	if (i < 0)
+		goto alloc_fail8;
+
+	/* Enable Runtime-PM for HSIC */
+	if (is_hsic_host(usb_dev)) {
+		dev_dbg(&intf->dev,
+			"Enable autosuspend\n");
+		usb_enable_autosuspend(usb_dev);
+	}
+
+	/* Enable Runtime-PM for CTP Modem */
+	if (is_comneon_modem(usb_dev))
+		usb_enable_autosuspend(usb_dev);
+
+	if (is_hsic_host(usb_dev)) {
+		if (!acm_debug_root)
+			acm_debug_root = debugfs_create_dir("acm",
+				usb_debug_root);
+
+		if (!acm_debug_data_dump_enable)
+			acm_debug_data_dump_enable = debugfs_create_u32(
+				"acm_data_dump_enable",	0644, acm_debug_root,
+				&acm_data_dump_enable);
+	}
+
 	return 0;
 alloc_fail8:
 	if (acm->country_codes) {
@@ -1398,6 +1586,7 @@
 				&dev_attr_iCountryCodeRelDate);
 	}
 	device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
+	device_remove_file(&acm->control->dev, &dev_attr_stats);
 	usb_set_intfdata(acm->control, NULL);
 	usb_set_intfdata(acm->data, NULL);
 	mutex_unlock(&acm->mutex);
@@ -1425,6 +1614,11 @@
 		usb_driver_release_interface(&acm_driver, intf == acm->control ?
 					acm->data : acm->control);
 
+	debugfs_remove(acm_debug_data_dump_enable);
+	debugfs_remove(acm_debug_root);
+	acm_debug_data_dump_enable = NULL;
+	acm_debug_root = NULL;
+
 	tty_port_put(&acm->port);
 }
 
@@ -1463,6 +1657,7 @@
 {
 	struct acm *acm = usb_get_intfdata(intf);
 	struct acm_wb *wb;
+	struct delayed_wb *d_wb, *nd_wb;
 	int rv = 0;
 	int cnt;
 
@@ -1478,14 +1673,16 @@
 		rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
 
 		spin_lock_irq(&acm->write_lock);
-		if (acm->delayed_wb) {
-			wb = acm->delayed_wb;
-			acm->delayed_wb = NULL;
+		list_for_each_entry_safe(d_wb, nd_wb,
+				&acm->delayed_wb_list, list) {
+			wb = d_wb->wb;
+			list_del(&d_wb->list);
+			kfree(d_wb);
 			spin_unlock_irq(&acm->write_lock);
 			acm_start_wb(acm, wb);
-		} else {
-			spin_unlock_irq(&acm->write_lock);
+			spin_lock_irq(&acm->write_lock);
 		}
+		spin_unlock_irq(&acm->write_lock);
 
 		/*
 		 * delayed error checking because we must
@@ -1743,6 +1940,7 @@
 	.throttle =		acm_tty_throttle,
 	.unthrottle =		acm_tty_unthrottle,
 	.chars_in_buffer =	acm_tty_chars_in_buffer,
+	.flush_buffer =		acm_tty_flush_buffer,
 	.break_ctl =		acm_tty_break_ctl,
 	.set_termios =		acm_tty_set_termios,
 	.tiocmget =		acm_tty_tiocmget,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 0f76e4a..722bdfe 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -79,6 +79,11 @@
 	struct acm		*instance;
 };
 
+struct delayed_wb {
+	struct list_head        list;
+	struct acm_wb		*wb;
+};
+
 struct acm {
 	struct usb_device *dev;				/* the corresponding usb device */
 	struct usb_interface *control;			/* control interface */
@@ -117,7 +122,10 @@
 	unsigned int throttled:1;			/* actually throttled */
 	unsigned int throttle_req:1;			/* throttle requested */
 	u8 bInterval;
-	struct acm_wb *delayed_wb;			/* write queued for a device about to be woken */
+	struct list_head delayed_wb_list;		/* delayed wb list */
+
+	unsigned int bytes_rx, bytes_tx;		/* flow statistics */
+	unsigned int packets_rx, packets_tx;
 };
 
 #define CDC_DATA_INTERFACE_TYPE	0x0a
@@ -129,3 +137,6 @@
 #define NOT_A_MODEM			8
 #define NO_DATA_INTERFACE		16
 #define IGNORE_DEVICE			32
+/* CloverView Comneon Modem Device Info */
+#define CTP_MODEM_VID			0x1519
+#define CTP_MODEM_PID			0x0020
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 8a230f0e..d3318a0 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -209,6 +209,7 @@
 static void wdm_int_callback(struct urb *urb)
 {
 	int rv = 0;
+	int responding;
 	int status = urb->status;
 	struct wdm_device *desc;
 	struct usb_cdc_notification *dr;
@@ -262,8 +263,8 @@
 
 	spin_lock(&desc->iuspin);
 	clear_bit(WDM_READ, &desc->flags);
-	set_bit(WDM_RESPONDING, &desc->flags);
-	if (!test_bit(WDM_DISCONNECTING, &desc->flags)
+	responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+	if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
 		&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
 		rv = usb_submit_urb(desc->response, GFP_ATOMIC);
 		dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
@@ -685,16 +686,20 @@
 {
 	struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
 	unsigned long flags;
-	int rv;
+	int rv = 0;
+	int responding;
 
 	spin_lock_irqsave(&desc->iuspin, flags);
 	if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
 		spin_unlock_irqrestore(&desc->iuspin, flags);
 	} else {
+		responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
 		spin_unlock_irqrestore(&desc->iuspin, flags);
-		rv = usb_submit_urb(desc->response, GFP_KERNEL);
+		if (!responding)
+			rv = usb_submit_urb(desc->response, GFP_KERNEL);
 		if (rv < 0 && rv != -EPERM) {
 			spin_lock_irqsave(&desc->iuspin, flags);
+			clear_bit(WDM_RESPONDING, &desc->flags);
 			if (!test_bit(WDM_DISCONNECTING, &desc->flags))
 				schedule_work(&desc->rxwork);
 			spin_unlock_irqrestore(&desc->iuspin, flags);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index b0585e6..36138f7 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -115,6 +115,11 @@
 		return kmalloc(size, mem_flags);
 	}
 
+	/* we won't use internal SRAM as data payload, we can't get
+	   any benefits from it */
+	if (hcd->has_sram && hcd->sram_no_payload)
+		return dma_alloc_coherent(NULL, size, dma, mem_flags);
+
 	for (i = 0; i < HCD_BUFFER_POOLS; i++) {
 		if (size <= pool_max[i])
 			return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
@@ -141,6 +146,11 @@
 		return;
 	}
 
+	if (hcd->has_sram && hcd->sram_no_payload) {
+		dma_free_coherent(NULL, size, addr, dma);
+		return;
+	}
+
 	for (i = 0; i < HCD_BUFFER_POOLS; i++) {
 		if (size <= pool_max[i]) {
 			dma_pool_free(hcd->pool[i], addr, dma);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7199adc..a6b2cab 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -424,7 +424,8 @@
 
 	memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
 	if (config->desc.bDescriptorType != USB_DT_CONFIG ||
-	    config->desc.bLength < USB_DT_CONFIG_SIZE) {
+	    config->desc.bLength < USB_DT_CONFIG_SIZE ||
+	    config->desc.bLength > size) {
 		dev_err(ddev, "invalid descriptor for config index %d: "
 		    "type = 0x%X, length = %d\n", cfgidx,
 		    config->desc.bDescriptorType, config->desc.bLength);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index c88c4fb..ce773cc 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@
 		if ((index & ~USB_DIR_IN) == 0)
 			return 0;
 		ret = findintfep(ps->dev, index);
+		if (ret < 0) {
+			/*
+			 * Some not fully compliant Win apps seem to get
+			 * index wrong and have the endpoint number here
+			 * rather than the endpoint address (with the
+			 * correct direction). Win does let this through,
+			 * so we'll not reject it here but leave it to
+			 * the device to not break KVM. But we warn.
+			 */
+			ret = findintfep(ps->dev, index ^ 0x80);
+			if (ret >= 0)
+				dev_info(&ps->dev->dev,
+					"%s: process %i (%s) requesting ep %02x but needs %02x\n",
+					__func__, task_pid_nr(current),
+					current->comm, index, index ^ 0x80);
+		}
 		if (ret >= 0)
 			ret = checkintf(ps, ret);
 		break;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 6eab440..d1d4d9f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -28,6 +28,7 @@
 #include <linux/usb.h>
 #include <linux/usb/quirks.h>
 #include <linux/usb/hcd.h>
+#include <linux/pci.h>
 
 #include "usb.h"
 
@@ -1677,6 +1678,10 @@
 	int			w, i;
 	struct usb_interface	*intf;
 
+#ifdef CONFIG_USB_HCD_HSIC
+	struct pci_dev *pdev  = to_pci_dev(udev->bus->controller);
+#endif
+
 	/* Fail if autosuspend is disabled, or any interfaces are in use, or
 	 * any interface drivers require remote wakeup but it isn't available.
 	 */
@@ -1710,6 +1715,14 @@
 			}
 		}
 	}
+
+#ifdef CONFIG_USB_HCD_HSIC
+	if (pdev->device == 0x119D) {
+		udev->do_remote_wakeup = device_can_wakeup(&udev->dev);
+		return 0;
+	}
+#endif
+
 	if (w && !device_can_wakeup(&udev->dev)) {
 		dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
 		return -EOPNOTSUPP;
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index acbfeb0..eda1d66 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -210,8 +210,11 @@
 	/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
 	else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
 		rc = 0;
-	else
+	else {
 		rc = usb_port_suspend(udev, msg);
+		if (rc == 0)
+			usb_notify_port_suspend(udev);
+	}
 
 	return rc;
 }
@@ -227,8 +230,11 @@
 	 */
 	if (!udev->parent)
 		rc = hcd_bus_resume(udev, msg);
-	else
+	else {
 		rc = usb_port_resume(udev, msg);
+		if (rc == 0)
+			usb_notify_port_resume(udev);
+	}
 	return rc;
 }
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index d53547d..bad89a4 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1010,6 +1010,7 @@
 					dev_name(&usb_dev->dev), retval);
 			return retval;
 		}
+		usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
 	}
 
 	retval = usb_new_device (usb_dev);
@@ -2136,6 +2137,11 @@
 	usb_lock_device(udev);
 	usb_remote_wakeup(udev);
 	usb_unlock_device(udev);
+	if (HCD_IRQ_DISABLED(hcd)) {
+		/* We can now process IRQs so enable IRQ */
+		clear_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+		enable_irq(hcd->irq);
+	}
 }
 
 /**
@@ -2223,9 +2229,23 @@
 	 */
 	local_irq_save(flags);
 
-	if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
+	if (unlikely(HCD_DEAD(hcd)))
 		rc = IRQ_NONE;
-	else if (hcd->driver->irq(hcd) == IRQ_NONE)
+	else if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
+		if (hcd->has_wakeup_irq) {
+			/*
+			 * We got a wakeup interrupt while the controller was
+			 * suspending or suspended. We can't handle it now, so
+			 * disable the IRQ and resume the root hub (and hence
+			 * the controller too).
+			 */
+			disable_irq_nosync(hcd->irq);
+			set_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+			usb_hcd_resume_root_hub(hcd);
+			rc = IRQ_HANDLED;
+		} else
+			rc = IRQ_NONE;
+	} else if (hcd->driver->irq(hcd) == IRQ_NONE)
 		rc = IRQ_NONE;
 	else
 		rc = IRQ_HANDLED;
@@ -2338,6 +2358,8 @@
 	hcd->rh_timer.data = (unsigned long) hcd;
 #ifdef CONFIG_PM_RUNTIME
 	INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
+	wake_lock_init(&hcd->wake_lock,
+		WAKE_LOCK_SUSPEND, "hcd_wake_lock");
 #endif
 
 	hcd->driver = driver;
@@ -2386,6 +2408,11 @@
 		kfree(hcd->bandwidth_mutex);
 	else
 		hcd->shared_hcd->shared_hcd = NULL;
+
+#ifdef CONFIG_PM_RUNTIME
+	wake_lock_destroy(&hcd->wake_lock);
+#endif
+
 	kfree(hcd);
 }
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index feef935..465a158 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -33,6 +33,10 @@
 
 #include "hub.h"
 
+#ifdef CONFIG_USB_HCD_HSIC
+#include <linux/usb/ehci-tangier-hsic-pci.h>
+#endif
+
 /* if we are in debug mode, always announce new devices */
 #ifdef DEBUG
 #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@ -135,7 +139,7 @@
 	return usb_get_intfdata(hdev->actconfig->interface[0]);
 }
 
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
 {
 	/* USB 2.1 (and greater) devices indicate LPM support through
 	 * their USB 2.0 Extended Capabilities BOS descriptor.
@@ -156,6 +160,11 @@
 				"Power management will be impacted.\n");
 		return 0;
 	}
+
+	/* udev is root hub */
+	if (!udev->parent)
+		return 1;
+
 	if (udev->parent->lpm_capable)
 		return 1;
 
@@ -586,6 +595,13 @@
 	spin_unlock_irqrestore(&hub_event_lock, flags);
 }
 
+void usb_set_change_bits(struct usb_device *hdev, unsigned int port)
+{
+	struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+
+	set_bit(port, hub->change_bits);
+}
+
 void usb_kick_khubd(struct usb_device *hdev)
 {
 	struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
@@ -668,6 +684,15 @@
 static inline int
 hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
 {
+	/* Need to clear both directions for control ep */
+	if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
+			USB_ENDPOINT_XFER_CONTROL) {
+		int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+				HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
+				devinfo ^ 0x8000, tt, NULL, 0, 1000);
+		if (status)
+			return status;
+	}
 	return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
 			       HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
 			       tt, NULL, 0, 1000);
@@ -1548,10 +1573,15 @@
 	if (hub->has_indicators && blinkenlights)
 		hub->indicator [0] = INDICATOR_CYCLE;
 
-	for (i = 0; i < hdev->maxchild; i++)
-		if (usb_hub_create_port_device(hub, i + 1) < 0)
+	for (i = 0; i < hdev->maxchild; i++) {
+		ret = usb_hub_create_port_device(hub, i + 1);
+		if (ret < 0) {
 			dev_err(hub->intfdev,
 				"couldn't create port%d device.\n", i + 1);
+			hdev->maxchild = i;
+			goto fail_keep_maxchild;
+		}
+	}
 
 	usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
 
@@ -1559,6 +1589,8 @@
 	return 0;
 
 fail:
+	hdev->maxchild = 0;
+fail_keep_maxchild:
 	dev_err (hub_dev, "config failed, %s (err %d)\n",
 			message, ret);
 	/* hub_disconnect() frees urb and descriptor */
@@ -1840,9 +1872,11 @@
 	struct usb_hub *hub = usb_hub_to_struct_hub(udev);
 	int i;
 
-	for (i = 0; i < udev->maxchild; ++i) {
-		if (hub->ports[i]->child)
-			recursively_mark_NOTATTACHED(hub->ports[i]->child);
+	if (hub) {
+		for (i = 0; i < udev->maxchild; ++i) {
+			if (hub->ports[i]->child)
+				recursively_mark_NOTATTACHED(hub->ports[i]->child);
+		}
 	}
 	if (udev->state == USB_STATE_SUSPENDED)
 		udev->active_duration -= jiffies;
@@ -1987,6 +2021,24 @@
 		hcd->driver->free_dev(hcd, udev);
 }
 
+#ifdef CONFIG_USB_HCD_HSIC
+
+static void hsic_notify(struct usb_device *udev, unsigned action)
+{
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+	if (hcd->hsic_notify)
+		hcd->hsic_notify(udev, action);
+}
+
+#else
+
+static inline void hsic_notify(struct usb_device *udev, unsigned action)
+{
+}
+
+#endif
+
 /**
  * usb_disconnect - disconnect a device (usbcore-internal)
  * @pdev: pointer to device being disconnected
@@ -2054,6 +2106,7 @@
 	 * notifier chain (used by usbfs and possibly others).
 	 */
 	device_del(&udev->dev);
+	hsic_notify(udev, USB_DEVICE_REMOVE);
 
 	/* Free the device number and delete the parent's children[]
 	 * (or root_hub) pointer.
@@ -2135,23 +2188,26 @@
 						? "" : "non-");
 
 				/* enable HNP before suspend, it's simpler */
-				if (port1 == bus->otg_port)
+				if (port1 == bus->otg_port) {
 					bus->b_hnp_enable = 1;
-				err = usb_control_msg(udev,
-					usb_sndctrlpipe(udev, 0),
-					USB_REQ_SET_FEATURE, 0,
-					bus->b_hnp_enable
-						? USB_DEVICE_B_HNP_ENABLE
-						: USB_DEVICE_A_ALT_HNP_SUPPORT,
-					0, NULL, 0, USB_CTRL_SET_TIMEOUT);
-				if (err < 0) {
-					/* OTG MESSAGE: report errors here,
-					 * customize to match your product.
-					 */
-					dev_info(&udev->dev,
+					/* don't use A_ALT_HNP_SUPPORT as it is
+					 * obsoleted in OTG2.0 Spec */
+					err = usb_control_msg(udev,
+						usb_sndctrlpipe(udev, 0),
+						USB_REQ_SET_FEATURE, 0,
+						USB_DEVICE_B_HNP_ENABLE,
+						0, NULL, 0,
+						USB_CTRL_SET_TIMEOUT);
+					if (err < 0) {
+						/* OTG MESSAGE: report errors
+						 * here, customize to match
+						 * your product. */
+						dev_info(&udev->dev,
 						"can't set HNP mode: %d\n",
-						err);
-					bus->b_hnp_enable = 0;
+							err);
+						bus->b_hnp_enable = 0;
+					}
+
 				}
 			}
 		}
@@ -2334,6 +2390,7 @@
 	 * notifier chain (used by usbfs and possibly others).
 	 */
 	err = device_add(&udev->dev);
+	hsic_notify(udev, USB_DEVICE_ADD);
 	if (err) {
 		dev_err(&udev->dev, "can't device_add, error %d\n", err);
 		goto fail;
@@ -2846,6 +2903,15 @@
 				USB_CTRL_SET_TIMEOUT);
 }
 
+/* Count of wakeup-enabled devices at or below udev */
+static unsigned wakeup_enabled_descendants(struct usb_device *udev)
+{
+	struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+
+	return udev->do_remote_wakeup +
+			(hub ? hub->wakeup_enabled_descendants : 0);
+}
+
 /*
  * usb_port_suspend - suspend a usb device's upstream port
  * @udev: device that's no longer in active use, not a root hub
@@ -2886,8 +2952,8 @@
  * Linux (2.6) currently has NO mechanisms to initiate that:  no khubd
  * timer, no SRP, no requests through sysfs.
  *
- * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get
- * suspended only when their bus goes into global suspend (i.e., the root
+ * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
+ * suspended until their bus goes into global suspend (i.e., the root
  * hub is suspended).  Nevertheless, we change @udev->state to
  * USB_STATE_SUSPENDED as this is the device's "logical" state.  The actual
  * upstream port setting is stored in @udev->port_is_suspended.
@@ -2898,10 +2964,10 @@
 {
 	struct usb_hub	*hub = usb_hub_to_struct_hub(udev->parent);
 	struct usb_port *port_dev = hub->ports[udev->portnum - 1];
-	enum pm_qos_flags_status pm_qos_stat;
 	int		port1 = udev->portnum;
 	int		status;
 	bool		really_suspend = true;
+	bool		wakeup_mutex_locked = false;
 
 	/* enable remote wakeup when appropriate; this lets the device
 	 * wake up the upstream hub (including maybe the root hub).
@@ -2936,7 +3002,7 @@
 					status);
 			/* bail if autosuspend is requested */
 			if (PMSG_IS_AUTO(msg))
-				return status;
+				goto err_wakeup;
 		}
 	}
 
@@ -2945,28 +3011,45 @@
 		usb_set_usb2_hardware_lpm(udev, 0);
 
 	if (usb_disable_ltm(udev)) {
-		dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.",
-				__func__);
-		return -ENOMEM;
+		dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
+		status = -ENOMEM;
+		if (PMSG_IS_AUTO(msg))
+			goto err_ltm;
 	}
 	if (usb_unlocked_disable_lpm(udev)) {
-		dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.",
-				__func__);
-		return -ENOMEM;
+		dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
+		status = -ENOMEM;
+		if (PMSG_IS_AUTO(msg))
+			goto err_lpm3;
+	}
+
+	/*
+	 * Hold port wakeup mutex before set port suspend if device may
+	 * generate remote wakeup to avoid race condition.
+	 */
+	if (udev->do_remote_wakeup) {
+		mutex_lock(&port_dev->wakeup_mutex);
+		wakeup_mutex_locked = true;
 	}
 
 	/* see 7.1.7.6 */
 	if (hub_is_superspeed(hub->hdev))
 		status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
-	else if (PMSG_IS_AUTO(msg))
-		status = set_port_feature(hub->hdev, port1,
-						USB_PORT_FEAT_SUSPEND);
+
 	/*
 	 * For system suspend, we do not need to enable the suspend feature
 	 * on individual USB-2 ports.  The devices will automatically go
 	 * into suspend a few ms after the root hub stops sending packets.
 	 * The USB 2.0 spec calls this "global suspend".
+	 *
+	 * However, many USB hubs have a bug: They don't relay wakeup requests
+	 * from a downstream port if the port's suspend feature isn't on.
+	 * Therefore we will turn on the suspend feature if udev or any of its
+	 * descendants is enabled for remote wakeup.
 	 */
+	else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+		status = set_port_feature(hub->hdev, port1,
+				USB_PORT_FEAT_SUSPEND);
 	else {
 		really_suspend = false;
 		status = 0;
@@ -2974,54 +3057,52 @@
 	if (status) {
 		dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
 				port1, status);
-		/* paranoia:  "should not happen" */
-		if (udev->do_remote_wakeup) {
-			if (!hub_is_superspeed(hub->hdev)) {
-				(void) usb_control_msg(udev,
-						usb_sndctrlpipe(udev, 0),
-						USB_REQ_CLEAR_FEATURE,
-						USB_RECIP_DEVICE,
-						USB_DEVICE_REMOTE_WAKEUP, 0,
-						NULL, 0,
-						USB_CTRL_SET_TIMEOUT);
-			} else
-				(void) usb_disable_function_remotewakeup(udev);
 
-		}
-
+		/* Try to enable USB3 LPM and LTM again */
+		usb_unlocked_enable_lpm(udev);
+ err_lpm3:
+		usb_enable_ltm(udev);
+ err_ltm:
 		/* Try to enable USB2 hardware LPM again */
 		if (udev->usb2_hw_lpm_capable == 1)
 			usb_set_usb2_hardware_lpm(udev, 1);
 
-		/* Try to enable USB3 LTM and LPM again */
-		usb_enable_ltm(udev);
-		usb_unlocked_enable_lpm(udev);
+		if (udev->do_remote_wakeup) {
+			if (udev->speed < USB_SPEED_SUPER)
+				usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+						USB_REQ_CLEAR_FEATURE,
+						USB_RECIP_DEVICE,
+						USB_DEVICE_REMOTE_WAKEUP, 0,
+						NULL, 0, USB_CTRL_SET_TIMEOUT);
+			else
+				usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+						USB_REQ_CLEAR_FEATURE,
+						USB_RECIP_INTERFACE,
+						USB_INTRF_FUNC_SUSPEND, 0,
+						NULL, 0, USB_CTRL_SET_TIMEOUT);
+		}
+ err_wakeup:
 
 		/* System sleep transitions should never fail */
 		if (!PMSG_IS_AUTO(msg))
 			status = 0;
 	} else {
-		/* device has up to 10 msec to fully suspend */
 		dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
 				(PMSG_IS_AUTO(msg) ? "auto-" : ""),
 				udev->do_remote_wakeup);
-		usb_set_device_state(udev, USB_STATE_SUSPENDED);
 		if (really_suspend) {
 			udev->port_is_suspended = 1;
+
+			/* device has up to 10 msec to fully suspend */
 			msleep(10);
 		}
+		usb_set_device_state(udev, USB_STATE_SUSPENDED);
 	}
 
-	/*
-	 * Check whether current status meets the requirement of
-	 * usb port power off mechanism
-	 */
-	pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
-			PM_QOS_FLAG_NO_POWER_OFF);
-	if (!udev->do_remote_wakeup
-			&& pm_qos_stat != PM_QOS_FLAGS_ALL
-			&& udev->persist_enabled
-			&& !status) {
+	if (wakeup_mutex_locked)
+		mutex_unlock(&port_dev->wakeup_mutex);
+
+	if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
 		pm_runtime_put_sync(&port_dev->dev);
 		port_dev->did_runtime_put = true;
 	}
@@ -3291,7 +3372,11 @@
 	unsigned		port1;
 	int			status;
 
-	/* Warn if children aren't already suspended */
+	/*
+	 * Warn if children aren't already suspended.
+	 * Also, add up the number of wakeup-enabled descendants.
+	 */
+	hub->wakeup_enabled_descendants = 0;
 	for (port1 = 1; port1 <= hdev->maxchild; port1++) {
 		struct usb_device	*udev;
 
@@ -3301,6 +3386,9 @@
 			if (PMSG_IS_AUTO(msg))
 				return -EBUSY;
 		}
+		if (udev)
+			hub->wakeup_enabled_descendants +=
+					wakeup_enabled_descendants(udev);
 	}
 
 	if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
@@ -3392,6 +3480,9 @@
 	unsigned long long u2_pel;
 	int ret;
 
+	if (udev->state != USB_STATE_CONFIGURED)
+		return 0;
+
 	/* Convert SEL and PEL stored in ns to us */
 	u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
 	u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
@@ -4308,6 +4399,14 @@
 		"port %d, status %04x, change %04x, %s\n",
 		port1, portstatus, portchange, portspeed(hub, portstatus));
 
+#ifdef CONFIG_PM_RUNTIME
+	/* add 5s time-out wakelock for delay system suspend */
+	wake_lock_timeout(&hcd->wake_lock, 5 * HZ);
+	dev_dbg(hub_dev,
+		"%s add 5s wake_lock for port connect change\n",
+		__func__);
+#endif
+
 	if (hub->has_indicators) {
 		set_port_led(hub, port1, HUB_LED_AUTO);
 		hub->indicator[port1-1] = INDICATOR_AUTO;
@@ -4513,6 +4612,10 @@
 loop_disable:
 		hub_port_disable(hub, port1, 1);
 loop:
+		/* If the hcd was already quiesce,
+		 * we needn't to continue retry. */
+		if (hcd->state == HC_STATE_QUIESCING)
+			return;
 		usb_ep0_reinit(udev);
 		release_devnum(udev);
 		hub_free_dev(udev);
@@ -4561,7 +4664,12 @@
 		msleep(10);
 
 		usb_lock_device(udev);
+		/* hold port mutex before handle remote wakup */
+		if (hub->ports[port - 1])
+			mutex_lock(&hub->ports[port - 1]->wakeup_mutex);
 		ret = usb_remote_wakeup(udev);
+		if (hub->ports[port - 1])
+			mutex_unlock(&hub->ports[port - 1]->wakeup_mutex);
 		usb_unlock_device(udev);
 		if (ret < 0)
 			connect_change = 1;
@@ -4764,7 +4872,8 @@
 					hub->ports[i - 1]->child;
 
 				dev_dbg(hub_dev, "warm reset port %d\n", i);
-				if (!udev) {
+				if (!udev || !(portstatus &
+						USB_PORT_STAT_CONNECTION)) {
 					status = hub_port_reset(hub, i,
 							NULL, HUB_BH_RESET_TIME,
 							true);
@@ -4774,8 +4883,8 @@
 					usb_lock_device(udev);
 					status = usb_reset_device(udev);
 					usb_unlock_device(udev);
+					connect_change = 0;
 				}
-				connect_change = 0;
 			}
 
 			if (connect_change)
@@ -5037,6 +5146,12 @@
 	}
 	parent_hub = usb_hub_to_struct_hub(parent_hdev);
 
+	/* Disable USB2 hardware LPM.
+	 * It will be re-enabled by the enumeration process.
+	 */
+	if (udev->usb2_hw_lpm_enabled == 1)
+		usb_set_usb2_hardware_lpm(udev, 0);
+
 	/* Disable LPM and LTM while we reset the device and reinstall the alt
 	 * settings.  Device-initiated LPM settings, and system exit latency
 	 * settings are cleared when the device is reset, so we have to set
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 80ab9ee..9d3e4a1 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -59,6 +59,9 @@
 	struct usb_tt		tt;		/* Transaction Translator */
 
 	unsigned		mA_per_port;	/* current for each child */
+#ifdef	CONFIG_PM
+	unsigned		wakeup_enabled_descendants;
+#endif
 
 	unsigned		limited_power:1;
 	unsigned		quiescing:1;
@@ -82,6 +85,7 @@
  * @portnum: port index num based one
  * @power_is_on: port's power state
  * @did_runtime_put: port has done pm_runtime_put().
+ * @wakeup_mutex: mutex for remote wakeup.
  */
 struct usb_port {
 	struct usb_device *child;
@@ -91,6 +95,7 @@
 	u8 portnum;
 	unsigned power_is_on:1;
 	unsigned did_runtime_put:1;
+	struct mutex wakeup_mutex;
 };
 
 #define to_usb_port(_dev) \
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 444d30e..2413714 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1172,8 +1172,12 @@
 			put_device(&dev->actconfig->interface[i]->dev);
 			dev->actconfig->interface[i] = NULL;
 		}
+
+		if (dev->usb2_hw_lpm_enabled == 1)
+			usb_set_usb2_hardware_lpm(dev, 0);
 		usb_unlocked_disable_lpm(dev);
 		usb_disable_ltm(dev);
+
 		dev->actconfig = NULL;
 		if (dev->state == USB_STATE_CONFIGURED)
 			usb_set_device_state(dev, USB_STATE_ADDRESS);
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91..26c1b45 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -58,6 +58,18 @@
 	mutex_unlock(&usbfs_mutex);
 }
 
+void usb_notify_port_suspend(struct usb_device *udev)
+{
+	blocking_notifier_call_chain(&usb_notifier_list,
+			USB_PORT_SUSPEND, udev);
+}
+
+void usb_notify_port_resume(struct usb_device *udev)
+{
+	blocking_notifier_call_chain(&usb_notifier_list,
+			USB_PORT_RESUME, udev);
+}
+
 void usb_notify_add_bus(struct usb_bus *ubus)
 {
 	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index b8bad29..84fec24 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -89,22 +89,19 @@
 	retval = usb_hub_set_port_power(hdev, port1, true);
 	if (port_dev->child && !retval) {
 		/*
-		 * Wait for usb hub port to be reconnected in order to make
-		 * the resume procedure successful.
+		 * Attempt to wait for usb hub port to be reconnected in order
+		 * to make the resume procedure successful.  The device may have
+		 * disconnected while the port was powered off, so ignore the
+		 * return status.
 		 */
 		retval = hub_port_debounce_be_connected(hub, port1);
-		if (retval < 0) {
+		if (retval < 0)
 			dev_dbg(&port_dev->dev, "can't get reconnection after setting port  power on, status %d\n",
 					retval);
-			goto out;
-		}
 		usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
-
-		/* Set return value to 0 if debounce successful */
 		retval = 0;
 	}
 
-out:
 	clear_bit(port1, hub->busy_bits);
 	usb_autopm_put_interface(intf);
 	return retval;
@@ -169,6 +166,7 @@
 	port_dev->dev.groups = port_dev_group;
 	port_dev->dev.type = &usb_port_device_type;
 	dev_set_name(&port_dev->dev, "port%d", port1);
+	mutex_init(&port_dev->wakeup_mutex);
 
 	retval = device_register(&port_dev->dev);
 	if (retval)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a635988..01fe362 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@
 	{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
 
+	/* CarrolTouch 4000U */
+	{ USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
+
+	/* CarrolTouch 4500U */
+	{ USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	/* Samsung Android phone modem - ID conflict with SPH-I500 */
 	{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -91,6 +97,9 @@
 	/* Alcor Micro Corp. Hub */
 	{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* MicroTouch Systems touchscreen */
+	{ USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	/* appletouch */
 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -124,6 +133,9 @@
 	/* Broadcom BCM92035DGROM BT dongle */
 	{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* MAYA44USB sound device */
+	{ USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	/* Action Semiconductor flash disk */
 	{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
 			USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index aa38db4..134023e 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -497,8 +497,62 @@
 static DEVICE_ATTR(usb2_hardware_lpm, S_IRUGO | S_IWUSR, show_usb2_hardware_lpm,
 			set_usb2_hardware_lpm);
 
+static ssize_t
+show_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct usb_device *udev = to_usb_device(dev);
+	return sprintf(buf, "%d\n", udev->l1_params.timeout);
+}
+
+static ssize_t
+set_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct usb_device *udev = to_usb_device(dev);
+	u16 timeout;
+
+	if (kstrtou16(buf, 0, &timeout))
+		return -EINVAL;
+
+	udev->l1_params.timeout = timeout;
+
+	return count;
+}
+
+static DEVICE_ATTR(usb2_lpm_l1_timeout, S_IRUGO | S_IWUSR,
+			show_usb2_lpm_l1_timeout, set_usb2_lpm_l1_timeout);
+
+static ssize_t
+show_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct usb_device *udev = to_usb_device(dev);
+	return sprintf(buf, "%d\n", udev->l1_params.besl);
+}
+
+static ssize_t
+set_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct usb_device *udev = to_usb_device(dev);
+	u8 besl;
+
+	if (kstrtou8(buf, 0, &besl) || besl > 15)
+		return -EINVAL;
+
+	udev->l1_params.besl = besl;
+
+	return count;
+}
+
+static DEVICE_ATTR(usb2_lpm_besl, S_IRUGO | S_IWUSR,
+			show_usb2_lpm_besl, set_usb2_lpm_besl);
+
 static struct attribute *usb2_hardware_lpm_attr[] = {
 	&dev_attr_usb2_hardware_lpm.attr,
+	&dev_attr_usb2_lpm_l1_timeout.attr,
+	&dev_attr_usb2_lpm_besl.attr,
 	NULL,
 };
 static struct attribute_group usb2_hardware_lpm_attr_group = {
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 8238577..2386eba 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,6 +35,7 @@
 		unsigned int size);
 extern int usb_get_bos_descriptor(struct usb_device *dev);
 extern void usb_release_bos_descriptor(struct usb_device *dev);
+extern int usb_device_supports_lpm(struct usb_device *udev);
 extern char *usb_cache_string(struct usb_device *udev, int index);
 extern int usb_set_configuration(struct usb_device *dev, int configuration);
 extern int usb_choose_configuration(struct usb_device *udev);
@@ -49,6 +50,7 @@
 }
 
 extern void usb_kick_khubd(struct usb_device *dev);
+extern void usb_set_change_bits(struct usb_device *hdev, unsigned int port);
 extern int usb_match_one_id_intf(struct usb_device *dev,
 				 struct usb_host_interface *intf,
 				 const struct usb_device_id *id);
@@ -179,6 +181,8 @@
 extern void usb_notify_remove_device(struct usb_device *udev);
 extern void usb_notify_add_bus(struct usb_bus *ubus);
 extern void usb_notify_remove_bus(struct usb_bus *ubus);
+extern void usb_notify_port_suspend(struct usb_device *udev);
+extern void usb_notify_port_resume(struct usb_device *udev);
 extern enum usb_port_connect_type
 	usb_get_hub_port_connect_type(struct usb_device *hdev, int port1);
 extern void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 757aa18..e3c18fd 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -40,6 +40,78 @@
 
 endchoice
 
+comment "Platform Glue Driver Support"
+
+config USB_DWC3_OMAP
+	tristate "Texas Instruments OMAP5 and similar Platforms"
+	depends on EXTCON
+	default USB_DWC3
+	help
+	  Some platforms from Texas Instruments like OMAP5, DRA7xxx and
+	  AM437x use this IP for USB2/3 functionality.
+
+	  Say 'Y' or 'M' here if you have one such device
+
+config USB_DWC3_EXYNOS
+	tristate "Samsung Exynos Platform"
+	default USB_DWC3
+	help
+	  Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
+	  say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_PCI
+	tristate "PCIe-based Platforms"
+	depends on PCI
+	default USB_DWC3
+	help
+	  If you're using the DesignWare Core IP with a PCIe, please say
+	  'Y' or 'M' here.
+
+	  One such PCIe-based platform is Synopsys' PCIe HAPS model of
+	  this IP.
+
+config USB_DWC3_OTG
+	tristate "DWC3 OTG mode support"
+	depends on USB && PCI
+	select USB_OTG
+	help
+	  Say Y here to enable DWC3 OTG driver.
+	  This driver implement OTG framework for DWC3 OTG controller.
+	  Support role switch and charger detection feature. And maintain
+	  one state machine. This driver should be work with platform
+	  speical driver. Because every platform has their own hardware design.
+
+config USB_DWC3_INTEL_MRFL
+	tristate "DWC OTG 3.0 for Intel Merrifield platforms"
+	depends on USB && USB_DWC3_OTG
+	select USB_DWC3_DEVICE_INTEL
+	help
+	  Say Y here to enable DWC3 OTG driver for Intel Merrifield platforms.
+	  It implement OTG feature on DWC3 OTG controller.
+	  Support role switch and charger detection feature.
+	  This driver is must be set if you want to enable host mode on Intel
+	  Merrifield platforms.
+
+config USB_DWC3_DEVICE_INTEL
+	bool "DWC3 Device Mode support on Intel platform"
+	depends on USB_DWC3_OTG
+	help
+	  Support Device mode of DWC3 controller on Intel platform.
+	  It implement device mode feature on DWC3 OTG controller.
+	  This driver is must be set if you want to enable device mode for Intel
+	  platforms(e.g Baytrail and Merrifield)
+
+config USB_DWC3_HOST_INTEL
+	bool "DWC3 Host Mode support on Intel Merrifield platform"
+	depends on USB_ARCH_HAS_XHCI && USB_DWC3_INTEL_MRFL
+	help
+	  Support Host mode of DWC3 controller on Intel Merrifield platform.
+	  It is should be enable with DWC3 INTEL driver. Because Intel platform
+	  use different design with standard USB_DWC3_HOST. So if you want to
+	  enable host mode on Intel platform, then you have to enable this config.
+
+comment "Debugging features"
+
 config USB_DWC3_DEBUG
 	bool "Enable Debugging Messages"
 	help
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 0c7ac92..0064564 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,6 +1,14 @@
-ccflags-$(CONFIG_USB_DWC3_DEBUG)	:= -DDEBUG
+cflags-$(CONFIG_USB_DWC3_DEBUG)	:= -DDEBUG
 ccflags-$(CONFIG_USB_DWC3_VERBOSE)	+= -DVERBOSE_DEBUG
 
+
+obj-$(CONFIG_USB_DWC3_DEVICE_INTEL)		+= dwc3-device-intel.o
+obj-$(CONFIG_USB_DWC3_INTEL_MRFL)		+= dwc3-intel-mrfl.o
+ifneq ($(CONFIG_DEBUG_FS),)
+	obj-$(CONFIG_USB_DWC3_DEVICE_INTEL)	+= debugfs.o
+endif
+
+ifeq ($(CONFIG_USB_DWC3_DEVICE_INTEL),)
 obj-$(CONFIG_USB_DWC3)			+= dwc3.o
 
 dwc3-y					:= core.o
@@ -16,6 +24,7 @@
 ifneq ($(CONFIG_DEBUG_FS),)
 	dwc3-y				+= debugfs.o
 endif
+endif
 
 ##
 # Platform-specific glue layers go here
@@ -27,15 +36,10 @@
 # the entire driver (with all its glue layers) on several architectures
 # and make sure it compiles fine. This will also help with allmodconfig
 # and allyesconfig builds.
-#
-# The only exception is the PCI glue layer, but that's only because
-# PCI doesn't provide nops if CONFIG_PCI isn't enabled.
 ##
 
-obj-$(CONFIG_USB_DWC3)		+= dwc3-omap.o
-obj-$(CONFIG_USB_DWC3)		+= dwc3-exynos.o
 
 ifneq ($(CONFIG_PCI),)
-	obj-$(CONFIG_USB_DWC3)		+= dwc3-pci.o
+	obj-$(CONFIG_USB_DWC3_OTG)	+= otg.o
 endif
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c35d49d..087eadf 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -49,14 +49,17 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
+#include <linux/pci.h>
 
 #include <linux/usb/otg.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/ulpi.h>
 
 #include "core.h"
 #include "gadget.h"
 #include "io.h"
+#include "otg.h"
 
 #include "debug.h"
 
@@ -256,7 +259,8 @@
 		dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
 		dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
 		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
-		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n),
+			dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(n)));
 	}
 }
 
@@ -297,6 +301,7 @@
 	unsigned long		timeout;
 	u32			reg;
 	int			ret;
+	struct usb_phy		*usb_phy;
 
 	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
 	/* This should read as U3 followed by revision number */
@@ -326,6 +331,18 @@
 
 	dwc3_core_soft_reset(dwc);
 
+	/* DCTL core soft reset may cause PHY hang, delay 1 ms and check ulpi */
+	mdelay(1);
+
+	if (!dwc->utmi_phy) {
+		usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+		if (usb_phy &&
+			usb_phy_io_read(usb_phy, ULPI_VENDOR_ID_LOW) < 0)
+			dev_err(dwc->dev,
+				"ULPI not working after DCTL soft reset\n");
+		usb_put_phy(usb_phy);
+	}
+
 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
 	reg &= ~DWC3_GCTL_DISSCRAMBLE;
@@ -450,7 +467,7 @@
 	}
 
 	if (IS_ERR(dwc->usb3_phy)) {
-		ret = PTR_ERR(dwc->usb2_phy);
+		ret = PTR_ERR(dwc->usb3_phy);
 
 		/*
 		 * if -ENXIO is returned, it means PHY layer wasn't
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b69d322..0a89fee 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -51,6 +51,7 @@
 #include <linux/usb/gadget.h>
 
 /* Global constants */
+#define DWC3_SCRATCH_BUF_SIZE	4096
 #define DWC3_EP0_BOUNCE_SIZE	512
 #define DWC3_ENDPOINTS_NUM	32
 #define DWC3_XHCI_RESOURCES_NUM	2
@@ -162,6 +163,7 @@
 
 /* Global Configuration Register */
 #define DWC3_GCTL_PWRDNSCALE(n)	((n) << 19)
+#define DWC3_GCTL_PWRDNSCALE_MASK	DWC3_GCTL_PWRDNSCALE(0x1fff)
 #define DWC3_GCTL_U2RSTECN	(1 << 16)
 #define DWC3_GCTL_RAMCLKSEL(x)	(((x) & DWC3_GCTL_CLK_MASK) << 6)
 #define DWC3_GCTL_CLK_BUS	(0)
@@ -194,6 +196,10 @@
 #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
 #define DWC3_GTXFIFOSIZ_TXFSTADDR(n)	((n) & 0xffff0000)
 
+/* Global Event Size Registers */
+#define DWC3_GEVNTSIZ_INTMASK		(1 << 31)
+#define DWC3_GEVNTSIZ_SIZE(n)		((n) & 0xffff)
+
 /* Global HWPARAMS1 Register */
 #define DWC3_GHWPARAMS1_EN_PWROPT(n)	(((n) & (3 << 24)) >> 24)
 #define DWC3_GHWPARAMS1_EN_PWROPT_NO	0
@@ -309,6 +315,7 @@
 #define DWC3_DGCMD_SET_LMP		0x01
 #define DWC3_DGCMD_SET_PERIODIC_PAR	0x02
 #define DWC3_DGCMD_XMIT_FUNCTION	0x03
+#define DWC3_DGCMD_SET_SCRATCH_ADDR_LO	0x04
 
 /* These apply for core versions 1.94a and later */
 #define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO	0x04
@@ -408,9 +415,11 @@
  * @trb_pool_dma: dma address of @trb_pool
  * @free_slot: next slot which is going to be used
  * @busy_slot: first slot which is owned by HW
+ * @ep_state: endpoint state
  * @desc: usb_endpoint_descriptor pointer
  * @dwc: pointer to DWC controller
  * @flags: endpoint flags (wedged, stalled, ...)
+ * @flags_backup: backup endpoint flags
  * @current_trb: index of current used trb
  * @number: endpoint number (1 - 15)
  * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
@@ -429,16 +438,23 @@
 	dma_addr_t		trb_pool_dma;
 	u32			free_slot;
 	u32			busy_slot;
+	u32			ep_state;
 	const struct usb_ss_ep_comp_descriptor *comp_desc;
 	struct dwc3		*dwc;
 
+	struct ebc_io		*ebc;
+#define DWC3_EP_EBC_OUT_NB	16
+#define DWC3_EP_EBC_IN_NB	17
+
 	unsigned		flags;
+	unsigned		flags_backup;
 #define DWC3_EP_ENABLED		(1 << 0)
 #define DWC3_EP_STALL		(1 << 1)
 #define DWC3_EP_WEDGE		(1 << 2)
 #define DWC3_EP_BUSY		(1 << 4)
 #define DWC3_EP_PENDING_REQUEST	(1 << 5)
 #define DWC3_EP_MISSED_ISOC	(1 << 6)
+#define DWC3_EP_HIBERNATION	(1 << 7)
 
 	/* This last one is specific to EP0 */
 #define DWC3_EP0_DIR_IN		(1 << 31)
@@ -495,6 +511,13 @@
 	DWC3_LINK_STATE_MASK		= 0x0f,
 };
 
+enum dwc3_pm_state {
+	PM_DISCONNECTED = 0,
+	PM_ACTIVE,
+	PM_SUSPENDED,
+	PM_RESUMING,
+};
+
 /* TRB Length, PCM and Status */
 #define DWC3_TRB_SIZE_MASK	(0x00ffffff)
 #define DWC3_TRB_SIZE_LENGTH(n)	((n) & DWC3_TRB_SIZE_MASK)
@@ -600,6 +623,7 @@
 	unsigned		direction:1;
 	unsigned		mapped:1;
 	unsigned		queued:1;
+	unsigned		short_packet:1;
 };
 
 /*
@@ -611,6 +635,22 @@
 };
 
 /**
+ * struct dwc3_hwregs - registers saved when entering hibernation
+ */
+struct dwc3_hwregs {
+	u32	guctl;
+	u32	dcfg;
+	u32	devten;
+	u32	gctl;
+	u32	gusb3pipectl0;
+	u32	gusb2phycfg0;
+	u32	gevntadrlo;
+	u32	gevntadrhi;
+	u32	gevntsiz;
+	u32	grxthrcfg;
+};
+
+/**
  * struct dwc3 - representation of our controller
  * @ctrl_req: usb control request which is used for ep0
  * @ep0_trb: trb which is used for the ctrl_req
@@ -637,6 +677,7 @@
  * @usb3_phy: pointer to USB3 PHY
  * @dcfg: saved contents of DCFG register
  * @gctl: saved contents of GCTL register
+ * @utmi_phy: Add utmi phy interface support
  * @is_selfpowered: true when we are selfpowered
  * @three_stage_setup: set if we perform a three phase setup
  * @ep0_bounced: true when we used bounce buffer
@@ -717,6 +758,7 @@
 #define DWC3_REVISION_240A	0x5533240a
 #define DWC3_REVISION_250A	0x5533250a
 
+	unsigned		utmi_phy:1;
 	unsigned		is_selfpowered:1;
 	unsigned		three_stage_setup:1;
 	unsigned		ep0_bounced:1;
@@ -727,6 +769,7 @@
 	unsigned		needs_fifo_resize:1;
 	unsigned		resize_fifos:1;
 	unsigned		pullups_connected:1;
+	unsigned		quirks_disable_irqthread:1;
 
 	enum dwc3_ep0_next	ep0_next_event;
 	enum dwc3_ep0_state	ep0state;
@@ -748,9 +791,23 @@
 	struct dwc3_hwparams	hwparams;
 	struct dentry		*root;
 	struct debugfs_regset32	*regset;
+	enum dwc3_pm_state	pm_state;
+	u8			is_otg;
+	u8			soft_connected;
 
 	u8			test_mode;
 	u8			test_mode_nr;
+
+	/* delayed work for handling Link State Change */
+	struct delayed_work	link_work;
+
+	u8			is_ebc;
+
+	struct dwc3_scratchpad_array	*scratch_array;
+	dma_addr_t		scratch_array_dma;
+	void			*scratch_buffer[DWC3_MAX_HIBER_SCRATCHBUFS];
+	struct dwc3_hwregs	hwregs;
+	bool			hiber_enabled;
 };
 
 /* -------------------------------------------------------------------------- */
@@ -759,8 +816,8 @@
 
 struct dwc3_event_type {
 	u32	is_devspec:1;
-	u32	type:6;
-	u32	reserved8_31:25;
+	u32	type:7;
+	u32	reserved8_31:24;
 } __packed;
 
 #define DWC3_DEPEVT_XFERCOMPLETE	0x01
@@ -877,6 +934,23 @@
 	struct dwc3_event_gevt		gevt;
 };
 
+struct ebc_io {
+	const char	*name;
+	const char	*epname;
+	u8		epnum;
+	u8		is_ondemand;
+	u8		static_trb_pool_size;
+	struct list_head	list;
+	int		(*init) (void);
+	void		*(*alloc_static_trb_pool) (dma_addr_t *dma_addr);
+	void		(*free_static_trb_pool) (void);
+	int		(*xfer_start) (void);
+	int		(*xfer_stop) (void);
+};
+
+void dwc3_register_io_ebc(struct ebc_io *ebc);
+void dwc3_unregister_io_ebc(struct ebc_io *ebc);
+
 /*
  * DWC3 Features to be used as Driver Data
  */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9e9f122..fec9619 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -638,6 +638,300 @@
 	.release		= single_release,
 };
 
+static int dwc3_hiber_enabled_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+
+	if (dwc->hiber_enabled)
+		seq_puts(s, "hibernation enabled\n");
+	else
+		seq_puts(s, "hibernation disabled\n");
+
+	return 0;
+}
+
+static int dwc3_hiber_enabled_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_hiber_enabled_show, inode->i_private);
+}
+
+static ssize_t dwc3_hiber_enabled_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file		*s = file->private_data;
+	struct dwc3		*dwc = s->private;
+	char			buf[32];
+	int			enabled = 0;
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	sscanf(buf, "%d", &enabled);
+	dwc->hiber_enabled = enabled;
+
+	return count;
+}
+
+static const struct file_operations dwc3_hiber_enabled_fops = {
+	.open			= dwc3_hiber_enabled_open,
+	.write			= dwc3_hiber_enabled_write,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static void dwc3_dump_requests(struct seq_file *s, struct list_head *head,
+					const char *list_name)
+{
+	struct dwc3_request	*dreq;
+
+	if (list_empty(head)) {
+		seq_printf(s, "list %s is empty\n", list_name);
+		return;
+	}
+
+	seq_printf(s, "list %s:\n", list_name);
+	list_for_each_entry(dreq, head, list) {
+		struct usb_request *req = &dreq->request;
+
+		seq_printf(s, "usb_request@0x%p: buf@0x%p(dma@0x%llx): len=0x%x: status=%d: actual=0x%x; start_slot=%u: trb@%p: trb_dma@0x%llx\n",
+			req, req->buf, (unsigned long long)req->dma,
+			req->length, req->status, req->actual,
+			dreq->start_slot, dreq->trb,
+			(unsigned long long)dreq->trb_dma);
+	}
+}
+
+static void dwc3_dump_trbs(struct seq_file *s, struct dwc3_ep *dep)
+{
+	struct dwc3_trb	*trb;
+	int i;
+
+	seq_printf(s, "busy_slot = %u, free_slot = %u\n",
+				dep->busy_slot % DWC3_TRB_NUM,
+				dep->free_slot % DWC3_TRB_NUM);
+
+	seq_puts(s, "\t bpl, bph, size, ctrl\n");
+	for (i = 0; i < DWC3_TRB_NUM; i++) {
+		trb = &dep->trb_pool[i];
+		if (i == dep->busy_slot % DWC3_TRB_NUM) {
+			seq_puts(s, "busy_slot--|\n");
+			seq_puts(s, "           \\\n");
+		}
+		if (i == dep->free_slot % DWC3_TRB_NUM) {
+			seq_puts(s, "free_slot--|\n");
+			seq_puts(s, "           \\\n");
+		}
+		seq_printf(s, "trb[%d](dma&0x%llx): %08x, %08x, %08x, %08x\n",
+			i, (unsigned long long) dep->trb_pool_dma + i * sizeof(*trb),
+			trb->bpl, trb->bph, trb->size, trb->ctrl);
+	}
+}
+
+static void dwc3_dump_dev_event(struct seq_file *s, union dwc3_event event)
+{
+	seq_puts(s, "[0]DEV ");
+	seq_printf(s, "[1:7]%s ",
+		event.type.type == DWC3_EVENT_TYPE_DEV ? "TYPE_DEV" :
+		event.type.type == DWC3_EVENT_TYPE_CARKIT ? "TYPE_CARKIT" :
+		"TYPE_I2C");
+
+	switch (event.devt.type) {
+	case DWC3_DEVICE_EVENT_DISCONNECT:
+		seq_puts(s, "[8:11] DISCONNECT ");
+		break;
+	case DWC3_DEVICE_EVENT_RESET:
+		seq_puts(s, "[8:11] RESET ");
+		break;
+	case DWC3_DEVICE_EVENT_CONNECT_DONE:
+		seq_puts(s, "[8:11] CONNECTION_DONE ");
+		break;
+	case DWC3_DEVICE_EVENT_WAKEUP:
+		seq_puts(s, "[8:11] WAKEUP ");
+		break;
+	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+		seq_puts(s, "[8:11] LINK_CHANGE ");
+		seq_puts(s, "[16:24] ");
+		seq_printf(s, "%s ",
+			event.devt.event_info & BIT(4) ? "SS" : "HS");
+		switch (event.devt.event_info & DWC3_LINK_STATE_MASK) {
+		case DWC3_LINK_STATE_U0:
+			seq_puts(s, "U0");
+			break;
+		case DWC3_LINK_STATE_U1:
+			seq_puts(s, "U1");
+			break;
+		case DWC3_LINK_STATE_U2:
+			seq_puts(s, "U2");
+			break;
+		case DWC3_LINK_STATE_U3:
+			seq_puts(s, "U3");
+			break;
+		case DWC3_LINK_STATE_SS_DIS:
+			seq_puts(s, "SS_DIS");
+			break;
+		case DWC3_LINK_STATE_RX_DET:
+			seq_puts(s, "RX_DET");
+			break;
+		case DWC3_LINK_STATE_SS_INACT:
+			seq_puts(s, "SS_INACT");
+			break;
+		case DWC3_LINK_STATE_POLL:
+			seq_puts(s, "POLL");
+			break;
+		case DWC3_LINK_STATE_RECOV:
+			seq_puts(s, "RECOV");
+			break;
+		case DWC3_LINK_STATE_HRESET:
+			seq_puts(s, "HRESET");
+			break;
+		case DWC3_LINK_STATE_CMPLY:
+			seq_puts(s, "CMPLY");
+			break;
+		case DWC3_LINK_STATE_LPBK:
+			seq_puts(s, "LPBK");
+			break;
+		case DWC3_LINK_STATE_RESET:
+			seq_puts(s, "RESET");
+			break;
+		case DWC3_LINK_STATE_RESUME:
+			seq_puts(s, "RESUME");
+			break;
+		}
+		seq_printf(s, "(%x) ", event.devt.event_info & DWC3_LINK_STATE_MASK);
+		break;
+	case DWC3_DEVICE_EVENT_HIBER_REQ:
+		seq_puts(s, "[8:11] HIBER_REQ ");
+		break;
+	case DWC3_DEVICE_EVENT_EOPF:
+		seq_puts(s, "[8:11] EOPF ");
+		break;
+	case DWC3_DEVICE_EVENT_SOF:
+		seq_puts(s, "[8:11] SOF ");
+		break;
+	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+		seq_puts(s, "[8:11] ERRATIC_ERROR ");
+		break;
+	case DWC3_DEVICE_EVENT_CMD_CMPL:
+		seq_puts(s, "[8:11] COMMAND_COMPLETE ");
+		break;
+	case DWC3_DEVICE_EVENT_OVERFLOW:
+		seq_puts(s, "[8:11] OVERFLOW ");
+		break;
+	default:
+		seq_printf(s, "[8:11] UNKNOWN (%x) ", event.devt.type);
+	}
+}
+
+static void dwc3_dump_ep_event(struct seq_file *s, union dwc3_event event)
+{
+	seq_puts(s, "[0]EP ");
+	seq_printf(s, "[1:5]ep%d ", event.depevt.endpoint_number);
+	seq_printf(s, "[6:9]%s ",
+		dwc3_ep_event_string(event.depevt.endpoint_event));
+
+	switch (event.depevt.endpoint_event) {
+	case DWC3_DEPEVT_XFERCOMPLETE:
+	case DWC3_DEPEVT_XFERINPROGRESS:
+		if (event.depevt.status & DEPEVT_STATUS_BUSERR)
+			seq_puts(s, "[12] BUSERR ");
+		if (event.depevt.status & DEPEVT_STATUS_SHORT)
+			seq_puts(s, "[13] SHORT ");
+		if (event.depevt.status & DEPEVT_STATUS_IOC)
+			seq_puts(s, "[14] IOC ");
+		if (event.depevt.status & DEPEVT_STATUS_LST)
+			seq_puts(s, "[15] LST ");
+		break;
+	case DWC3_DEPEVT_XFERNOTREADY:
+		if ((event.depevt.status & 0x3) == 1)
+			seq_puts(s, "[12:13] Data_Stage ");
+		else if ((event.depevt.status & 0x3) == 2)
+			seq_puts(s, "[12:13] Status_Stage ");
+		if (event.depevt.status & DEPEVT_STATUS_TRANSFER_ACTIVE)
+			seq_puts(s, "[15] XferActive ");
+		else
+			seq_puts(s, "[15] XferNotActive ");
+		break;
+	case DWC3_DEPEVT_EPCMDCMPLT:
+		if (event.depevt.status & BIT(0))
+			seq_puts(s, "[12:15] Invalid Transfer Resource ");
+		break;
+	}
+}
+
+static void dwc3_dump_event_buf(struct seq_file *s, struct dwc3_event_buffer *evt)
+{
+	union dwc3_event	event;
+	int			i;
+
+	seq_printf(s, "evt->buf=0x%p(dma@0x%llx), length=%u, lpos=%u, count=%u, flags=%s\n",
+		evt->buf, (unsigned long long) evt->dma, evt->length,
+		evt->lpos, evt->count,
+		evt->flags & DWC3_EVENT_PENDING ? "pending" : "0");
+
+	for (i = 0; i < evt->length; i += 4) {
+		event.raw = *(u32 *) (evt->buf + i);
+		if (i == evt->lpos) {
+			seq_puts(s, "lpos-------|\n");
+			seq_puts(s, "           \\\n");
+		}
+		seq_printf(s, "event[%d]: %08x ", i, event.raw);
+
+		/* analyze device specific events */
+		if (event.type.is_devspec)
+			dwc3_dump_dev_event(s, event);
+		else
+			dwc3_dump_ep_event(s, event);
+
+		seq_puts(s, "\n");
+	}
+}
+
+static int dwc3_snapshot_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+	unsigned long		flags;
+	int			i;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
+		struct dwc3_ep	*dep = dwc->eps[i];
+
+		if (!(dep->flags & DWC3_EP_ENABLED))
+			continue;
+
+		seq_printf(s, "[%s]\n", dep->name);
+		dwc3_dump_requests(s, &dep->request_list, "request_list");
+		dwc3_dump_requests(s, &dep->req_queued, "req_queued");
+		if (!list_empty(&dep->req_queued))
+			dwc3_dump_trbs(s, dep);
+		seq_puts(s, "\n");
+	}
+	dwc3_dump_event_buf(s, dwc->ev_buffs[0]);
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+
+static int dwc3_snapshot_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_snapshot_show, inode->i_private);
+}
+
+static ssize_t dwc3_snapshot_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return count;
+}
+
+static const struct file_operations dwc3_snapshot_fops = {
+	.open			= dwc3_snapshot_open,
+	.write			= dwc3_snapshot_write,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
 int dwc3_debugfs_init(struct dwc3 *dwc)
 {
 	struct dentry		*root;
@@ -692,6 +986,20 @@
 			ret = -ENOMEM;
 			goto err1;
 		}
+
+		file = debugfs_create_file("hiber_enabled", S_IRUGO | S_IWUSR,
+				root, dwc, &dwc3_hiber_enabled_fops);
+		if (!file) {
+			ret = -ENOMEM;
+			goto err1;
+		}
+
+		file = debugfs_create_file("snapshot", S_IRUGO | S_IWUSR, root,
+				dwc, &dwc3_snapshot_fops);
+		if (!file) {
+			ret = -ENOMEM;
+			goto err1;
+		}
 	}
 
 	return 0;
diff --git a/drivers/usb/dwc3/dwc3-device-intel.c b/drivers/usb/dwc3/dwc3-device-intel.c
new file mode 100644
index 0000000..5bee70c
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-device-intel.c
@@ -0,0 +1,699 @@
+/**
+ * Copyright (C) 2012 Intel Corp.
+ * Author: Jiebing Li
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/usb/dwc3-intel-mid.h>
+#include <linux/usb/phy.h>
+#include <linux/wakelock.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "otg.h"
+
+#include "debug.h"
+
+#include "core.c"
+#include "ep0.c"
+#include "gadget.c"
+
+/* FLIS register */
+#define APBFC_EXIOTG3_MISC0_REG		0xF90FF85C
+
+/* Global User Control Register Auto Retry bit*/
+#define DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN	(1 << 14)
+
+/* Global Configuration Register */
+#define DWC3_GRXTHRCFG_USBRXPKTCNTSEL		(1 << 29)
+#define DWC3_GRXTHRCFG_USBRXPKTCNT(n)		(n << 24)
+#define DWC3_GRXTHRCFG_USBRXPKTCNT_MASK		(0xf << 24)
+#define DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE(n)	(n << 19)
+#define DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE_MASK	(0x1f << 19)
+
+/**
+ * struct dwc3_dev_data - Structure holding platform related
+ *			information
+ * @flis_reg:		FLIS register
+ * @grxthrcfg:		DWC3 GRXTHCFG register
+ */
+struct dwc3_dev_data {
+	struct dwc3		*dwc;
+	void __iomem		*flis_reg;
+	u32			grxthrcfg;
+	struct wake_lock	wake_lock;
+	struct mutex		mutex;
+};
+
+static struct dwc3_dev_data	*_dev_data;
+
+/*
+ * dwc3_disable_multi_packet - set GRXTHRCFG register to disable
+ * reception multi-packet thresholdingfor DWC2.50a.
+ */
+static void dwc3_disable_multi_packet(struct dwc3 *dwc)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+	_dev_data->grxthrcfg = reg;
+	if (reg) {
+		reg &= ~DWC3_GRXTHRCFG_USBRXPKTCNTSEL;
+		reg &= ~DWC3_GRXTHRCFG_USBRXPKTCNT_MASK;
+		reg &= ~DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE_MASK;
+
+		dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+	}
+}
+
+/*
+ * dwc3_enable_host_auto_retry - clear Auto Retry Enable bit
+ * for device mode
+ */
+static void dwc3_enable_host_auto_retry(struct dwc3 *dwc, bool enable)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+
+	if (enable)
+		reg |= DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN;
+	else
+		reg &= ~DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN;
+
+	dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+}
+
+static void dwc3_do_extra_change(struct dwc3 *dwc)
+{
+	u32		reg;
+
+	if (dwc->revision == DWC3_REVISION_250A)
+		dwc3_disable_multi_packet(dwc);
+
+	dwc3_enable_host_auto_retry(dwc, false);
+
+	/* the initial/default value of GCTL.PwnDnScale (bit 31:19) is not
+	 * properly set. This affects A0 as well as B0 and would affect how
+	 * ltssm in u3pmu works during suspend state where periodic rx
+	 * termination detect etc need to be performed in U3.
+	 *
+	 * Our suspend clock is 19.2 MHz.
+	 * Hence PwrDnScale = 19200 / 16 = 1200 (= 0x4B0). To account for
+	 * possible jitter of suspend clock and to have margin, I recommend
+	 * it to be set to 1250 (= 0x4E2). Current default value is wrong and
+	 * set to 0x8B0
+	 */
+	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+	reg &= ~DWC3_GCTL_PWRDNSCALE_MASK;
+	reg |= DWC3_GCTL_PWRDNSCALE(0x4E2);
+	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+static void dwc3_enable_hibernation(struct dwc3 *dwc, bool on)
+{
+	u32 num, reg;
+
+	if (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)
+		!= DWC3_GHWPARAMS1_EN_PWROPT_HIB) {
+		dev_err(dwc->dev, "Device Mode Hibernation is not supported\n");
+		return;
+	}
+
+	num = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(
+		 dwc->hwparams.hwparams4);
+	if (num != 1)
+		dev_err(dwc->dev, "number of scratchpad buffer: %d\n", num);
+
+	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+
+	if (on) {
+		dwc3_writel(dwc->regs, DWC3_GCTL,
+				reg | DWC3_GCTL_GBLHIBERNATIONEN);
+
+		dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_SET_SCRATCH_ADDR_LO,
+				dwc->scratch_array_dma & 0xffffffffU);
+	} else
+		dwc3_writel(dwc->regs, DWC3_GCTL,
+				reg & ~DWC3_GCTL_GBLHIBERNATIONEN);
+}
+
+/*
+ * Re-write irq functions. Not use irq thread. Because irqthread has negative
+ * impact on usb performance, especially for usb network performance, USB3 UDP
+ * download performance will drop from 80MB/s to 40MB/s if irqthread is enabled.
+ */
+static irqreturn_t dwc3_quirks_process_event_buf(struct dwc3 *dwc, u32 buf)
+{
+	struct dwc3_event_buffer *evt;
+	u32 count;
+	int left;
+
+	evt = dwc->ev_buffs[buf];
+
+	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
+	count &= DWC3_GEVNTCOUNT_MASK;
+	if (!count)
+		return IRQ_NONE;
+
+	evt->count = count;
+
+	/* WORKAROUND: Add 4 us delay workaround to A-unit issue in A0 stepping.
+	* Can be removed after B0.
+	*/
+	if (dwc->is_otg && dwc->revision == DWC3_REVISION_210A)
+		udelay(4);
+
+	/* WORKAROUND: Add 4 us delay as moorfield seems to have memory
+	 * inconsistent issue
+	 */
+	udelay(4);
+
+	left = evt->count;
+
+	while (left > 0) {
+		union dwc3_event event;
+
+		event.raw = *(u32 *) (evt->buf + evt->lpos);
+
+		dwc3_process_event_entry(dwc, &event);
+
+		/*
+		* FIXME we wrap around correctly to the next entry as
+		* almost all entries are 4 bytes in size. There is one
+		* entry which has 12 bytes which is a regular entry
+		* followed by 8 bytes data. ATM I don't know how
+		* things are organized if we get next to the a
+		* boundary so I worry about that once we try to handle
+		* that.
+		*/
+		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+		left -= 4;
+
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
+	}
+
+	evt->count = 0;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dwc3_quirks_interrupt(int irq, void *_dwc)
+{
+	struct dwc3	*dwc = _dwc;
+	int		i;
+	irqreturn_t	ret = IRQ_NONE;
+
+	spin_lock(&dwc->lock);
+	if (dwc->pm_state != PM_ACTIVE) {
+		if (dwc->pm_state == PM_SUSPENDED) {
+			dev_info(dwc->dev, "u2/u3 pmu is received\n");
+			pm_runtime_get(dwc->dev);
+			dwc->pm_state = PM_RESUMING;
+			ret = IRQ_HANDLED;
+		}
+		goto out;
+	}
+
+	for (i = 0; i < dwc->num_event_buffers; i++) {
+		irqreturn_t status;
+
+		status = dwc3_quirks_process_event_buf(dwc, i);
+		if (status == IRQ_HANDLED)
+			ret = status;
+	}
+
+out:
+	spin_unlock(&dwc->lock);
+
+	return ret;
+}
+
+int dwc3_start_peripheral(struct usb_gadget *g)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	int			irq;
+	int			ret = 0;
+
+	wake_lock(&_dev_data->wake_lock);
+	pm_runtime_get_sync(dwc->dev);
+
+	mutex_lock(&_dev_data->mutex);
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->gadget_driver && dwc->soft_connected) {
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dwc3_core_init(dwc);
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		if (dwc->hiber_enabled)
+			dwc3_enable_hibernation(dwc, true);
+		dwc3_do_extra_change(dwc);
+		dwc3_event_buffers_setup(dwc);
+		ret = dwc3_init_for_enumeration(dwc);
+		if (ret)
+			goto err1;
+
+		dwc3_gadget_run_stop(dwc, 1);
+		if (dwc->hiber_enabled)
+			dwc3_gadget_keep_conn(dwc, 1);
+	}
+
+	dwc->pm_state = PM_ACTIVE;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	if (dwc->quirks_disable_irqthread)
+		ret = request_irq(irq, dwc3_quirks_interrupt,
+				IRQF_SHARED, "dwc3", dwc);
+	else
+		ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+				IRQF_SHARED, "dwc3", dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+				irq, ret);
+		goto err0;
+	}
+	mutex_unlock(&_dev_data->mutex);
+
+	return 0;
+
+err1:
+	spin_unlock_irqrestore(&dwc->lock, flags);
+err0:
+	mutex_unlock(&_dev_data->mutex);
+
+	return ret;
+}
+
+int dwc3_stop_peripheral(struct usb_gadget *g)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	u8			epnum;
+	int			irq;
+
+	mutex_lock(&_dev_data->mutex);
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	/* Disable hibernation for D0i3cold */
+	dwc3_enable_hibernation(dwc, false);
+
+	dwc3_stop_active_transfers(dwc);
+
+	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+		dwc3_disconnect_gadget(dwc);
+
+		dwc->gadget.speed = USB_SPEED_UNKNOWN;
+	}
+
+	dwc->start_config_issued = false;
+
+	/* Clear Run/Stop bit */
+	dwc3_gadget_run_stop(dwc, 0);
+	dwc3_gadget_keep_conn(dwc, 0);
+
+	for (epnum = 0; epnum < 2; epnum++) {
+		struct dwc3_ep  *dep;
+
+		dep = dwc->eps[epnum];
+
+		if (dep->flags & DWC3_EP_ENABLED)
+			__dwc3_gadget_ep_disable(dep);
+	}
+
+	dwc3_gadget_disable_irq(dwc);
+
+	dwc3_event_buffers_cleanup(dwc);
+
+	if (_dev_data->grxthrcfg && dwc->revision == DWC3_REVISION_250A) {
+		dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, _dev_data->grxthrcfg);
+		_dev_data->grxthrcfg = 0;
+	}
+
+	dwc3_enable_host_auto_retry(dwc, true);
+
+	if (dwc->pm_state != PM_SUSPENDED)
+		pm_runtime_put(dwc->dev);
+
+	dwc->pm_state = PM_DISCONNECTED;
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	free_irq(irq, dwc);
+
+	mutex_unlock(&_dev_data->mutex);
+
+	cancel_delayed_work_sync(&dwc->link_work);
+
+	wake_unlock(&_dev_data->wake_lock);
+
+	return 0;
+}
+
+static int dwc3_device_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	int			ret;
+
+	/*
+	 * FIXME If pm_state is PM_RESUMING, we should wait for it to
+	 * become PM_ACTIVE before continue. The chance of hitting
+	 * PM_RESUMING is rare, but if so, we'll return directly.
+	 *
+	 * If some gadget reaches here in atomic context,
+	 * pm_runtime_get_sync will cause a sleep problem.
+	 */
+	if (dwc->pm_state == PM_RESUMING) {
+		dev_err(dwc->dev, "%s: PM_RESUMING, return -EIO\n", __func__);
+		return -EIO;
+	}
+
+	if (dwc->pm_state == PM_SUSPENDED) {
+
+		/* WORKAROUND Wait 300 ms and check if the state is still PM_SUSPENDED
+		 * before resuming the controller. This avoids resuming the controller
+		 * during enumeration and causing PHY hangs.
+		 */
+		msleep(300);
+		if (dwc->pm_state == PM_SUSPENDED)
+			pm_runtime_get_sync(dwc->dev);
+	}
+
+	is_on = !!is_on;
+
+	mutex_lock(&_dev_data->mutex);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	if (dwc->soft_connected == is_on) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		goto done;
+	}
+
+	dwc->soft_connected = is_on;
+
+	if (dwc->pm_state == PM_DISCONNECTED) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		goto done;
+	}
+
+	if (is_on) {
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+		/* Per dwc3 databook 2.40a section 8.1.9, re-connection
+		 * should follow steps described section 8.1.1 power on
+		 * or soft reset.
+		 */
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dwc3_core_init(dwc);
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		if (dwc->hiber_enabled)
+			dwc3_enable_hibernation(dwc, true);
+		dwc3_do_extra_change(dwc);
+		dwc3_event_buffers_setup(dwc);
+		dwc3_init_for_enumeration(dwc);
+		ret = dwc3_gadget_run_stop(dwc, 1);
+		if (dwc->hiber_enabled)
+			dwc3_gadget_keep_conn(dwc, 1);
+	} else {
+		u8 epnum;
+
+		for (epnum = 0; epnum < 2; epnum++) {
+			struct dwc3_ep  *dep;
+
+			dep = dwc->eps[epnum];
+
+			if (dep->flags & DWC3_EP_ENABLED)
+				__dwc3_gadget_ep_disable(dep);
+		}
+
+		dwc3_stop_active_transfers(dwc);
+		dwc3_gadget_keep_conn(dwc, 0);
+		ret = dwc3_gadget_run_stop(dwc, 0);
+		dwc3_gadget_disable_irq(dwc);
+
+		/* Clear all OTG events which will confuse host and
+		 * make enumeration failed after pullup with on.
+		 */
+		dwc3_writel(dwc->regs, OCTL, 0);
+		dwc3_writel(dwc->regs, OEVTEN, 0);
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	mutex_unlock(&_dev_data->mutex);
+
+	return ret;
+
+done:
+	mutex_unlock(&_dev_data->mutex);
+
+	return 0;
+}
+
+static const struct usb_gadget_ops dwc3_device_gadget_ops = {
+	.get_frame		= dwc3_gadget_get_frame,
+	.wakeup			= dwc3_gadget_wakeup,
+	.set_selfpowered	= dwc3_gadget_set_selfpowered,
+	.pullup			= dwc3_device_gadget_pullup,
+	.udc_start		= dwc3_gadget_start,
+	.udc_stop		= dwc3_gadget_stop,
+	.vbus_draw		= dwc3_vbus_draw,
+};
+
+static int dwc3_device_intel_probe(struct platform_device *pdev)
+{
+	struct device_node	*node = pdev->dev.of_node;
+	struct dwc3		*dwc;
+	struct device		*dev = &pdev->dev;
+	int			ret = -ENOMEM;
+	void			*mem;
+	struct intel_dwc_otg_pdata	*otg_data = dev->parent->platform_data;
+
+	struct dwc_device_par	*pdata;
+	struct usb_phy		*usb_phy;
+	struct dwc_otg2		*otg;
+
+	mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
+	if (!mem) {
+		dev_err(dev, "not enough memory\n");
+		return -ENOMEM;
+	}
+	dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
+	dwc->mem = mem;
+
+	_dev_data = kzalloc(sizeof(*_dev_data), GFP_KERNEL);
+	if (!_dev_data) {
+		dev_err(dev, "not enough memory\n");
+		return -ENOMEM;
+	}
+
+	_dev_data->dwc = dwc;
+
+	pdata = (struct dwc_device_par *)pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data for %s.\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	if (node) {
+		dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
+		dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
+	} else {
+		dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+		dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+	}
+
+	if (IS_ERR(dwc->usb2_phy)) {
+		ret = PTR_ERR(dwc->usb2_phy);
+
+		/*
+		 * if -ENXIO is returned, it means PHY layer wasn't
+		 * enabled, so it makes no sense to return -EPROBE_DEFER
+		 * in that case, since no PHY driver will ever probe.
+		 */
+		if (ret == -ENXIO)
+			return ret;
+
+		dev_err(dev, "no usb2 phy configured\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (IS_ERR(dwc->usb3_phy)) {
+		ret = PTR_ERR(dwc->usb2_phy);
+
+		/*
+		 * if -ENXIO is returned, it means PHY layer wasn't
+		 * enabled, so it makes no sense to return -EPROBE_DEFER
+		 * in that case, since no PHY driver will ever probe.
+		 */
+		if (ret == -ENXIO)
+			return ret;
+
+		dev_err(dev, "no usb3 phy configured\n");
+		return -EPROBE_DEFER;
+	}
+
+	mutex_init(&_dev_data->mutex);
+	spin_lock_init(&dwc->lock);
+	platform_set_drvdata(pdev, dwc);
+
+	dwc->regs   = pdata->io_addr + DWC3_GLOBALS_REGS_START;
+	dwc->regs_size  = pdata->len - DWC3_GLOBALS_REGS_START;
+	dwc->dev	= dev;
+	if (otg_data && otg_data->usb2_phy_type == USB2_PHY_UTMI)
+		dwc->utmi_phy = 1;
+	if (otg_data)
+		dwc->hiber_enabled = !!otg_data->device_hibernation;
+
+	dev->dma_mask	= dev->parent->dma_mask;
+	dev->dma_parms	= dev->parent->dma_parms;
+	dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+
+	if (!strncmp("super", maximum_speed, 5))
+		dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+	else if (!strncmp("high", maximum_speed, 4))
+		dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
+	else if (!strncmp("full", maximum_speed, 4))
+		dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
+	else if (!strncmp("low", maximum_speed, 3))
+		dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
+	else
+		dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+
+	dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_get_sync(dev);
+	pm_runtime_forbid(dev);
+
+	dwc3_cache_hwparams(dwc);
+	dwc3_core_num_eps(dwc);
+
+	_dev_data->flis_reg =
+		ioremap_nocache(APBFC_EXIOTG3_MISC0_REG, 4);
+
+	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+	if (ret) {
+		dev_err(dwc->dev, "failed to allocate event buffers\n");
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	/*
+	* Not use irq thread, because irqthread has negative impact
+	* on usb performance, especially for usb network performance.
+	*/
+	dwc->quirks_disable_irqthread = 1;
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!usb_phy) {
+		dev_err(dev, "failed to get usb2 phy\n");
+		return -ENODEV;
+	}
+	otg = container_of(usb_phy, struct dwc_otg2, usb2_phy);
+	otg->start_device = dwc3_start_peripheral;
+	otg->stop_device = dwc3_stop_peripheral;
+	otg->vbus_draw = dwc3_vbus_draw;
+	usb_put_phy(usb_phy);
+	dwc->is_otg = 1;
+
+	wake_lock_init(&_dev_data->wake_lock,
+			WAKE_LOCK_SUSPEND, "dwc_wake_lock");
+
+	ret = dwc3_gadget_init(dwc);
+	if (ret) {
+		dev_err(dev, "failed to initialize gadget\n");
+		goto err0;
+	}
+	dwc->gadget.ops = &dwc3_device_gadget_ops;
+	dwc->gadget.is_otg = 1;
+
+	dwc->mode = DWC3_MODE_DEVICE;
+
+	ret = dwc3_debugfs_init(dwc);
+	if (ret) {
+		dev_err(dev, "failed to initialize debugfs\n");
+		goto err1;
+	}
+
+	pm_runtime_allow(dev);
+	pm_runtime_put(dev);
+
+	return 0;
+
+err1:
+	dwc3_gadget_exit(dwc);
+
+err0:
+	dwc3_free_event_buffers(dwc);
+
+	return ret;
+}
+
+static int dwc3_device_intel_remove(struct platform_device *pdev)
+{
+	iounmap(_dev_data->flis_reg);
+
+	wake_lock_destroy(&_dev_data->wake_lock);
+
+	dwc3_remove(pdev);
+
+	kfree(_dev_data);
+	_dev_data = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static const struct dev_pm_ops dwc3_device_pm_ops = {
+	.runtime_suspend	= dwc3_runtime_suspend,
+	.runtime_resume		= dwc3_runtime_resume,
+};
+#define DWC3_DEVICE_PM_OPS	(&dwc3_device_pm_ops)
+#else
+#define DWC3_DEVICE_PM_OPS	NULL
+#endif
+
+static struct platform_driver dwc3_device_intel_driver = {
+	.probe		= dwc3_device_intel_probe,
+	.remove		= dwc3_device_intel_remove,
+	.driver		= {
+		.name	= "dwc3-device",
+		.of_match_table	= of_match_ptr(of_dwc3_match),
+		.pm	= DWC3_DEVICE_PM_OPS,
+	},
+};
+
+module_platform_driver(dwc3_device_intel_driver);
+
+MODULE_ALIAS("platform:dwc3");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
deleted file mode 100644
index 8ce9d7f..0000000
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/**
- * dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/dwc3-exynos.h>
-#include <linux/dma-mapping.h>
-#include <linux/clk.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/nop-usb-xceiv.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-
-struct dwc3_exynos {
-	struct platform_device	*usb2_phy;
-	struct platform_device	*usb3_phy;
-	struct device		*dev;
-
-	struct clk		*clk;
-};
-
-static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
-{
-	struct nop_usb_xceiv_platform_data pdata;
-	struct platform_device	*pdev;
-	int			ret;
-
-	memset(&pdata, 0x00, sizeof(pdata));
-
-	pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
-	if (!pdev)
-		return -ENOMEM;
-
-	exynos->usb2_phy = pdev;
-	pdata.type = USB_PHY_TYPE_USB2;
-
-	ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata));
-	if (ret)
-		goto err1;
-
-	pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
-	if (!pdev) {
-		ret = -ENOMEM;
-		goto err1;
-	}
-
-	exynos->usb3_phy = pdev;
-	pdata.type = USB_PHY_TYPE_USB3;
-
-	ret = platform_device_add_data(exynos->usb3_phy, &pdata, sizeof(pdata));
-	if (ret)
-		goto err2;
-
-	ret = platform_device_add(exynos->usb2_phy);
-	if (ret)
-		goto err2;
-
-	ret = platform_device_add(exynos->usb3_phy);
-	if (ret)
-		goto err3;
-
-	return 0;
-
-err3:
-	platform_device_del(exynos->usb2_phy);
-
-err2:
-	platform_device_put(exynos->usb3_phy);
-
-err1:
-	platform_device_put(exynos->usb2_phy);
-
-	return ret;
-}
-
-static int dwc3_exynos_remove_child(struct device *dev, void *unused)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-
-	platform_device_unregister(pdev);
-
-	return 0;
-}
-
-static int dwc3_exynos_probe(struct platform_device *pdev)
-{
-	struct dwc3_exynos	*exynos;
-	struct clk		*clk;
-	struct device		*dev = &pdev->dev;
-	struct device_node	*node = dev->of_node;
-
-	int			ret = -ENOMEM;
-
-	exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
-	if (!exynos) {
-		dev_err(dev, "not enough memory\n");
-		goto err1;
-	}
-
-	/*
-	 * Right now device-tree probed devices don't get dma_mask set.
-	 * Since shared usb code relies on it, set it here for now.
-	 * Once we move to full device tree support this will vanish off.
-	 */
-	if (!dev->dma_mask)
-		dev->dma_mask = &dev->coherent_dma_mask;
-	if (!dev->coherent_dma_mask)
-		dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
-	platform_set_drvdata(pdev, exynos);
-
-	ret = dwc3_exynos_register_phys(exynos);
-	if (ret) {
-		dev_err(dev, "couldn't register PHYs\n");
-		goto err1;
-	}
-
-	clk = devm_clk_get(dev, "usbdrd30");
-	if (IS_ERR(clk)) {
-		dev_err(dev, "couldn't get clock\n");
-		ret = -EINVAL;
-		goto err1;
-	}
-
-	exynos->dev	= dev;
-	exynos->clk	= clk;
-
-	clk_prepare_enable(exynos->clk);
-
-	if (node) {
-		ret = of_platform_populate(node, NULL, NULL, dev);
-		if (ret) {
-			dev_err(dev, "failed to add dwc3 core\n");
-			goto err2;
-		}
-	} else {
-		dev_err(dev, "no device node, failed to add dwc3 core\n");
-		ret = -ENODEV;
-		goto err2;
-	}
-
-	return 0;
-
-err2:
-	clk_disable_unprepare(clk);
-err1:
-	return ret;
-}
-
-static int dwc3_exynos_remove(struct platform_device *pdev)
-{
-	struct dwc3_exynos	*exynos = platform_get_drvdata(pdev);
-
-	device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
-	platform_device_unregister(exynos->usb2_phy);
-	platform_device_unregister(exynos->usb3_phy);
-
-	clk_disable_unprepare(exynos->clk);
-
-	return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id exynos_dwc3_match[] = {
-	{ .compatible = "samsung,exynos5250-dwusb3" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_exynos_suspend(struct device *dev)
-{
-	struct dwc3_exynos *exynos = dev_get_drvdata(dev);
-
-	clk_disable(exynos->clk);
-
-	return 0;
-}
-
-static int dwc3_exynos_resume(struct device *dev)
-{
-	struct dwc3_exynos *exynos = dev_get_drvdata(dev);
-
-	clk_enable(exynos->clk);
-
-	/* runtime set active to reflect active state. */
-	pm_runtime_disable(dev);
-	pm_runtime_set_active(dev);
-	pm_runtime_enable(dev);
-
-	return 0;
-}
-
-static const struct dev_pm_ops dwc3_exynos_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_exynos_suspend, dwc3_exynos_resume)
-};
-
-#define DEV_PM_OPS	(&dwc3_exynos_dev_pm_ops)
-#else
-#define DEV_PM_OPS	NULL
-#endif /* CONFIG_PM_SLEEP */
-
-static struct platform_driver dwc3_exynos_driver = {
-	.probe		= dwc3_exynos_probe,
-	.remove		= dwc3_exynos_remove,
-	.driver		= {
-		.name	= "exynos-dwc3",
-		.of_match_table = of_match_ptr(exynos_dwc3_match),
-		.pm	= DEV_PM_OPS,
-	},
-};
-
-module_platform_driver(dwc3_exynos_driver);
-
-MODULE_ALIAS("platform:exynos-dwc3");
-MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-host-intel.c b/drivers/usb/dwc3/dwc3-host-intel.c
new file mode 100644
index 0000000..60f4538
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-host-intel.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright (C) 2012 Intel Corp.
+ * Author: Yu Wang
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include "../host/xhci.h"
+#include "core.h"
+#include "otg.h"
+
+#define WAIT_DISC_EVENT_COMPLETE_TIMEOUT 5 /* 100ms */
+#define PORTSC_IO_ADDR 0xf9100430
+#define USBCMD_IO_ADDR 0xf9100020
+
+static int dwc3_start_host(struct usb_hcd *hcd);
+static int dwc3_stop_host(struct usb_hcd *hcd);
+static struct platform_driver dwc3_xhci_driver;
+static int __dwc3_stop_host(struct usb_hcd *hcd);
+static int __dwc3_start_host(struct usb_hcd *hcd);
+static int dwc3_suspend_host(struct usb_hcd *hcd);
+static int dwc3_resume_host(struct usb_hcd *hcd);
+
+static struct dwc3_xhci_hcd {
+	struct wake_lock wakelock;
+	struct xhci_hcd *xhci;
+	struct work_struct reset_hcd;
+	struct work_struct poll_loopback;
+	int is_rx_test;
+	int otg_irqnum;
+	bool host_started;
+	bool comp_test_enable;
+	void __iomem *portsc_mmaddr;
+} dwc3_xhci;
+
+static void dwc3_host_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct intel_dwc_otg_pdata *data = NULL;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	if (otg && otg->otg_data)
+		data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	if (data && data->utmi_fs_det_wa)
+		xhci->quirks |= XHCI_PORT_RESET;
+
+	/*
+	 * As of now platform drivers don't provide MSI support so we ensure
+	 * here that the generic code does not try to make a pci_dev from our
+	 * dev struct in order to setup MSI
+	 */
+	xhci->quirks |= XHCI_PLAT;
+
+	/*
+	 * Due to some fatal silicon errors, the controller have to do reset
+	 * for make driver continue work.
+	 */
+	xhci->quirks |= XHCI_RESET;
+}
+
+static int dwc3_host_setup(struct usb_hcd *hcd)
+{
+	return xhci_gen_setup(hcd, dwc3_host_quirks);
+}
+
+static int xhci_dwc_bus_resume(struct usb_hcd *hcd)
+{
+	int ret;
+
+	/* before resume bus, delay 1ms to waiting core stable */
+	mdelay(1);
+
+	ret = xhci_bus_resume(hcd);
+	return ret;
+}
+
+static const struct hc_driver xhci_dwc_hc_driver = {
+	.description =		"dwc-xhci",
+	.product_desc =		"xHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct xhci_hcd *),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			xhci_irq,
+	.flags =		HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		dwc3_host_setup,
+	.start =		xhci_run,
+	.stop =			xhci_stop,
+	.shutdown =		xhci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		xhci_urb_enqueue,
+	.urb_dequeue =		xhci_urb_dequeue,
+	.alloc_dev =		xhci_alloc_dev,
+	.free_dev =		xhci_free_dev,
+	.alloc_streams =	xhci_alloc_streams,
+	.free_streams =		xhci_free_streams,
+	.add_endpoint =		xhci_add_endpoint,
+	.drop_endpoint =	xhci_drop_endpoint,
+	.endpoint_reset =	xhci_endpoint_reset,
+	.check_bandwidth =	xhci_check_bandwidth,
+	.reset_bandwidth =	xhci_reset_bandwidth,
+	.address_device =	xhci_address_device,
+	.update_hub_device =	xhci_update_hub_device,
+	.reset_device =		xhci_discover_or_reset_device,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	xhci_get_frame,
+
+	/* Root hub support */
+	.hub_control =		xhci_hub_control,
+	.hub_status_data =	xhci_hub_status_data,
+	.bus_suspend =		xhci_bus_suspend,
+	.bus_resume =		xhci_dwc_bus_resume,
+};
+
+static int if_usb_devices_connected(struct xhci_hcd *xhci)
+{
+	struct usb_device		*usb_dev;
+	int i, connected_devices = 0;
+
+	if (!xhci)
+		return -EINVAL;
+
+	usb_dev = xhci->main_hcd->self.root_hub;
+	for (i = 1; i <= usb_dev->maxchild; ++i) {
+		if (usb_hub_find_child(usb_dev, i))
+			connected_devices++;
+	}
+
+	usb_dev = xhci->shared_hcd->self.root_hub;
+	for (i = 1; i <= usb_dev->maxchild; ++i) {
+		if (usb_hub_find_child(usb_dev, i))
+			connected_devices++;
+	}
+
+	if (connected_devices)
+		return 1;
+
+	return 0;
+}
+
+/* For USB3 host electronic compliance test. Controller have to enter
+ * Loopback mode for RX test. But controller easier enter compliance
+ * mode by mistake. So driver need to trigger warm reset until enter
+ * loopback mode successful.
+ **/
+static void dwc3_poll_lp(struct work_struct *data)
+{
+	u32 pls, val;
+
+	if (!dwc3_xhci.comp_test_enable)
+		return;
+
+	val = readl(dwc3_xhci.portsc_mmaddr);
+	pls = val & PORT_PLS_MASK;
+
+	if (pls == XDEV_COMP && dwc3_xhci.is_rx_test)
+		writel(val | PORT_WR, dwc3_xhci.portsc_mmaddr);
+
+	if (!dwc3_xhci.is_rx_test || pls == XDEV_LOOPBACK) {
+		iounmap(dwc3_xhci.portsc_mmaddr);
+		return;
+	}
+	else
+		schedule_work(&dwc3_xhci.poll_loopback);
+}
+
+/* Do xHCI driver reinitialize when met fatal errors */
+static void dwc3_host_reset(struct work_struct *data)
+{
+	struct usb_hcd *hcd;
+
+	if (!dwc3_xhci.host_started || !data)
+		return;
+
+	if (!dwc3_xhci.xhci)
+		return;
+	hcd = dwc3_xhci.xhci->main_hcd;
+
+	/* Need hold wakelock to prevent the S3 interrupt
+	 * the reset work.*/
+	wake_lock(&dwc3_xhci.wakelock);
+	__dwc3_stop_host(hcd);
+	__dwc3_start_host(hcd);
+	wake_unlock(&dwc3_xhci.wakelock);
+}
+
+static void dwc_xhci_enable_phy_auto_resume(struct usb_hcd *hcd, bool enable)
+{
+	u32 val;
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	if (enable)
+		val |= GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	else
+		val &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+}
+
+static void dwc_xhci_enable_phy_suspend(struct usb_hcd *hcd, bool enable)
+{
+	u32 val;
+
+	val = readl(hcd->regs + GUSB3PIPECTL0);
+	if (enable)
+		val |= GUSB3PIPECTL_SUS_EN;
+	else
+		val &= ~GUSB3PIPECTL_SUS_EN;
+	writel(val, hcd->regs + GUSB3PIPECTL0);
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	if (enable)
+		val |= GUSB2PHYCFG_SUS_PHY;
+	else
+		val &= ~GUSB2PHYCFG_SUS_PHY;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+}
+
+/* Some SS UMS will be enter polling state after plug in with micro A cable.
+ * If trigger warm reset, then link can be rescued to U0.
+ *
+ * This function copy from hub_port_reset function is USB core.
+ */
+static int dwc3_link_issue_wa(struct xhci_hcd *xhci)
+{
+	__le32 __iomem **addr;
+	int delay_time;
+	u32 val, delay = 0;
+
+	addr = dwc3_xhci.xhci->usb3_ports;
+	val = xhci_readl(dwc3_xhci.xhci, addr[0]);
+
+	/* If PORTSC.CCS bit haven't set. We can trigger warm reset
+	 * to double confirm if really have no device or link can't trained to
+	 * U0.
+	 */
+	if (!(val & PORT_CONNECT)) {
+		val |= PORT_WR;
+		xhci_writel(xhci, val, addr[0]);
+		xhci_dbg(xhci, "%s: trigger warm reset\n", __func__);
+	}
+
+	/* Waiting warm reset complete. */
+	for (delay_time = 0; delay_time < 800; delay_time += delay) {
+		msleep(delay);
+		val = xhci_readl(dwc3_xhci.xhci, addr[0]);
+		if (!(val & PORT_RESET))
+			break;
+
+		if (delay_time >= 20)
+			delay = 200;
+	}
+
+	if (val & PORT_RESET)
+		xhci_err(xhci, "%s port reset failed!\n", __func__);
+
+	return 0;
+}
+
+static void dwc_silicon_wa(struct usb_hcd *hcd)
+{
+	void __iomem *addr;
+	u32 val;
+
+	/* Clear GUCTL bit 15 as workaround of DWC controller Bugs
+	 * This Bug cause the xHCI driver does not see any
+	 * transfer complete events for certain EP after exit
+	 * from hibernation mode.*/
+	val = readl(hcd->regs + GUCTL);
+	val &= ~GUCTL_CMDEVADDR;
+	writel(val, hcd->regs + GUCTL);
+
+	/* Disable OTG3-EXI interface by default. It is one
+	 * workaround for silicon BUG. It will cause transfer
+	 * failed on EP#8 of any USB device.
+	 */
+	addr = ioremap_nocache(APBFC_EXIOTG3_MISC0_REG, 4);
+	val = readl(addr);
+	val |= (1 << 3);
+	writel(val, addr);
+	iounmap(addr);
+}
+
+static void dwc_core_reset(struct usb_hcd *hcd)
+{
+	u32 val;
+
+	val = readl(hcd->regs + GCTL);
+	val |= GCTL_CORESOFTRESET;
+	writel(val, hcd->regs + GCTL);
+
+	val = readl(hcd->regs + GUSB3PIPECTL0);
+	val |= GUSB3PIPECTL_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB3PIPECTL0);
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	val |= GUSB2PHYCFG_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+
+	msleep(100);
+
+	val = readl(hcd->regs + GUSB3PIPECTL0);
+	val &= ~GUSB3PIPECTL_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB3PIPECTL0);
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	val &= ~GUSB2PHYCFG_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+
+	msleep(20);
+
+	val = readl(hcd->regs + GCTL);
+	val &= ~GCTL_CORESOFTRESET;
+	writel(val, hcd->regs + GCTL);
+}
+
+/*
+ * On MERR platform, the suspend clock is 19.2MHz.
+ * Hence PwrDnScale = 19200 / 16 = 1200 (= 0x4B0).
+ * To account for possible jitter of suspend clock and to have margin,
+ * So recommend it to be set to 1250 (= 0x4E2).
+ * */
+static void dwc_set_ssphy_p3_clockrate(struct usb_hcd *hcd)
+{
+	u32 gctl;
+
+	gctl = readl(hcd->regs + GCTL);
+	gctl &= ~GCTL_PWRDNSCALE_MASK;
+	gctl |= GCTL_PWRDNSCALE(0x4E2);
+	writel(gctl, hcd->regs + GCTL);
+}
+
+static ssize_t
+show_pm_get(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct platform_device		*pdev = to_platform_device(_dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+
+	pm_runtime_put(hcd->self.controller);
+	return 0;
+
+}
+static ssize_t store_pm_get(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct platform_device		*pdev = to_platform_device(_dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+
+	pm_runtime_get(hcd->self.controller);
+	return count;
+
+}
+static DEVICE_ATTR(pm_get, S_IRUGO|S_IWUSR|S_IWGRP,
+			show_pm_get, store_pm_get);
+/*
+ * This is for host compliance test
+ * *
+ */
+static ssize_t
+show_host_comp_test(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	char				*next;
+	unsigned			size, t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%s\n",
+		(dwc3_xhci.comp_test_enable ? "compliance test enabled, echo 0 to disable"
+		 : "compliance test disabled, echo 1 to enable")
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+store_host_comp_test(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct platform_device		*pdev = to_platform_device(_dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+
+	void __iomem *addr;
+	u32 val;
+
+	if (count != 2) {
+		dev_err(hcd->self.controller, "return EINVAL\n");
+		return -EINVAL;
+	}
+
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	switch (buf[0]) {
+	case 'R':
+			if (!dwc3_xhci.comp_test_enable)
+				break;
+			dwc3_xhci.is_rx_test = 1;
+			dwc3_xhci.portsc_mmaddr = ioremap_nocache(PORTSC_IO_ADDR, 4);
+			if (!dwc3_xhci.portsc_mmaddr) {
+				dev_err(hcd->self.controller,
+						"ioremap failed!\n");
+				return -ENOMEM;
+			}
+			schedule_work(&dwc3_xhci.poll_loopback);
+			break;
+	case 'T':
+			dwc3_xhci.is_rx_test = 0;
+			if (!dwc3_xhci.comp_test_enable)
+				break;
+			break;
+	case '0':
+		if (dwc3_xhci.comp_test_enable) {
+			dev_dbg(hcd->self.controller, "run xHC\n");
+			addr = ioremap_nocache(USBCMD_IO_ADDR, 4);
+			if (!addr) {
+				dev_err(hcd->self.controller,
+						"ioremap failed!\n");
+				return -ENOMEM;
+			}
+			val = readl(addr);
+			val |= CMD_RUN;
+			writel(val, addr);
+			iounmap(addr);
+			pm_runtime_put(hcd->self.controller);
+			wake_unlock(&hcd->wake_lock);
+			dwc3_xhci.comp_test_enable = false;
+		}
+		break;
+	case '1':
+		if (!dwc3_xhci.comp_test_enable) {
+			dev_dbg(hcd->self.controller, "halt xHC\n");
+			wake_lock(&hcd->wake_lock);
+			pm_runtime_get_sync(hcd->self.controller);
+			addr = ioremap_nocache(USBCMD_IO_ADDR, 4);
+			if (!addr) {
+				dev_err(hcd->self.controller,
+						"ioremap failed!\n");
+				return -ENOMEM;
+			}
+			val = readl(addr);
+			val &= ~CMD_RUN;
+			writel(val, addr);
+			iounmap(addr);
+			dwc3_xhci.comp_test_enable = true;
+		}
+		break;
+	default:
+		dev_dbg(hcd->self.controller,
+				"Just support 0(halt)/1(run)\n");
+		return -EINVAL;
+	}
+	return count;
+}
+static DEVICE_ATTR(host_comp_test, S_IRUGO|S_IWUSR|S_IWGRP,
+			show_host_comp_test, store_host_comp_test);
+
+static void dwc_set_host_mode(struct usb_hcd *hcd)
+{
+	writel(0x45801000, hcd->regs + GCTL);
+
+	msleep(20);
+}
+
+static int dwc3_start_host(struct usb_hcd *hcd)
+{
+	dwc3_xhci.host_started = true;
+	__dwc3_start_host(hcd);
+
+	return 0;
+
+}
+
+static int __dwc3_start_host(struct usb_hcd *hcd)
+{
+	int ret = -EINVAL;
+	struct xhci_hcd *xhci;
+	struct usb_hcd *xhci_shared_hcd;
+
+	if (!hcd)
+		return ret;
+
+	if (hcd->rh_registered) {
+		dev_dbg(hcd->self.controller,
+				"%s() - Already registered", __func__);
+		return 0;
+	}
+
+	if (dwc3_xhci.comp_test_enable) {
+		dev_dbg(hcd->self.controller,
+				"%s() - Now is in comp test mode", __func__);
+		return 0;
+	}
+
+	pm_runtime_get_sync(hcd->self.controller);
+
+	dwc_core_reset(hcd);
+	dwc_silicon_wa(hcd);
+	dwc_set_host_mode(hcd);
+	dwc_set_ssphy_p3_clockrate(hcd);
+
+	/* Clear the hcd->flags.
+	 * To prevent incorrect flags set during last time. */
+	hcd->flags = 0;
+
+	ret = usb_add_hcd(hcd, dwc3_xhci.otg_irqnum, IRQF_SHARED);
+	if (ret)
+		return -EINVAL;
+
+	xhci = hcd_to_xhci(hcd);
+	dwc3_xhci.xhci = xhci;
+	xhci->reset_hcd_work = &dwc3_xhci.reset_hcd;
+	xhci->shared_hcd = usb_create_shared_hcd(&xhci_dwc_hc_driver,
+		   hcd->self.controller, dev_name(hcd->self.controller), hcd);
+	if (!xhci->shared_hcd) {
+		ret = -ENOMEM;
+		goto dealloc_usb2_hcd;
+	}
+
+	/* Set the xHCI pointer before xhci_pci_setup() (aka hcd_driver.reset)
+	 * is called by usb_add_hcd().
+	 */
+	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+
+	xhci->shared_hcd->regs = hcd->regs;
+
+	xhci->shared_hcd->rsrc_start = hcd->rsrc_start;
+	xhci->shared_hcd->rsrc_len = hcd->rsrc_len;
+
+	ret = usb_add_hcd(xhci->shared_hcd, dwc3_xhci.otg_irqnum, IRQF_SHARED);
+	if (ret)
+		goto put_usb3_hcd;
+
+	dwc3_link_issue_wa(xhci);
+	pm_runtime_put(hcd->self.controller);
+
+	ret = device_create_file(hcd->self.controller, &dev_attr_pm_get);
+	if (ret < 0)
+		dev_err(hcd->self.controller,
+			"Can't register sysfs attribute: %d\n", ret);
+
+	dwc3_xhci_driver.shutdown = usb_hcd_platform_shutdown;
+
+	return ret;
+
+put_usb3_hcd:
+	if (xhci->shared_hcd) {
+		xhci_shared_hcd = xhci->shared_hcd;
+		usb_remove_hcd(xhci_shared_hcd);
+		usb_put_hcd(xhci_shared_hcd);
+	}
+
+dealloc_usb2_hcd:
+	local_irq_disable();
+	usb_hcd_irq(0, hcd);
+	local_irq_enable();
+	usb_remove_hcd(hcd);
+
+	kfree(xhci);
+	*((struct xhci_hcd **) hcd->hcd_priv) = NULL;
+
+	pm_runtime_put(hcd->self.controller);
+	return ret;
+}
+
+static int __dwc3_stop_host(struct usb_hcd *hcd)
+{
+	int count = 0;
+	u32 data;
+	struct xhci_hcd *xhci;
+	struct usb_hcd *xhci_shared_hcd;
+
+	if (!hcd)
+		return -EINVAL;
+
+	if (dwc3_xhci.comp_test_enable) {
+		dev_dbg(hcd->self.controller,
+				"%s() - Now is in comp test mode", __func__);
+		return 0;
+	}
+
+	xhci = hcd_to_xhci(hcd);
+
+	pm_runtime_get_sync(hcd->self.controller);
+
+	/* Disable hibernation mode for D0i3cold. */
+	data = readl(hcd->regs + GCTL);
+	data &= ~GCTL_GBL_HIBERNATION_EN;
+	writel(data, hcd->regs + GCTL);
+
+	/* When plug out micro A cable, there will be two flows be executed.
+	 * The first one is xHCI controller get disconnect event. The
+	 * second one is PMIC get ID change event. During these events
+	 * handling, they both try to call usb_disconnect. Then met some
+	 * conflicts and cause kernel panic.
+	 * So treat disconnect event as first priority, handle the ID change
+	 * event until disconnect event handled done.*/
+	while (if_usb_devices_connected(xhci)) {
+		msleep(20);
+		if (count++ > WAIT_DISC_EVENT_COMPLETE_TIMEOUT)
+			break;
+	};
+	dwc3_xhci_driver.shutdown = NULL;
+
+	if (xhci->shared_hcd) {
+		xhci_shared_hcd = xhci->shared_hcd;
+		usb_remove_hcd(xhci_shared_hcd);
+		usb_put_hcd(xhci_shared_hcd);
+	}
+
+	usb_remove_hcd(hcd);
+
+	dwc3_xhci.xhci = NULL;
+	kfree(xhci);
+	*((struct xhci_hcd **) hcd->hcd_priv) = NULL;
+
+	dwc_xhci_enable_phy_suspend(hcd, false);
+
+	pm_runtime_put(hcd->self.controller);
+	device_remove_file(hcd->self.controller, &dev_attr_pm_get);
+	return 0;
+}
+
+static int dwc3_stop_host(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci;
+
+	xhci = hcd_to_xhci(hcd);
+	if (!xhci)
+		return -ENODEV;
+
+	dwc3_xhci.host_started = false;
+
+	cancel_work_sync(xhci->reset_hcd_work);
+	__dwc3_stop_host(hcd);
+
+	return 0;
+}
+
+static int xhci_dwc_drv_probe(struct platform_device *pdev)
+{
+	struct dwc_otg2 *otg;
+	struct usb_phy *usb_phy;
+	struct dwc_device_par *pdata;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int retval = 0;
+	int ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_debug("initializing FSL-SOC USB Controller\n");
+
+	/* Need platform data for setup */
+	pdata = (struct dwc_device_par *)pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev,
+			"No platform data for %s.\n", dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	dwc3_xhci.otg_irqnum = res->start;
+
+	hcd = usb_create_hcd(&xhci_dwc_hc_driver,
+			&pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		return retval;
+	}
+
+	hcd->regs = pdata->io_addr;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = res->end - res->start;
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (usb_phy)
+		otg_set_host(usb_phy->otg, &hcd->self);
+
+	otg = container_of(usb_phy->otg, struct dwc_otg2, otg);
+	if (otg) {
+		otg->start_host = dwc3_start_host;
+		otg->stop_host = dwc3_stop_host;
+		otg->suspend_host = dwc3_suspend_host;
+		otg->resume_host = dwc3_resume_host;
+	}
+
+	usb_put_phy(usb_phy);
+
+	/* Enable wakeup irq */
+	hcd->has_wakeup_irq = 1;
+	INIT_WORK(&dwc3_xhci.reset_hcd, dwc3_host_reset);
+	INIT_WORK(&dwc3_xhci.poll_loopback, dwc3_poll_lp);
+	wake_lock_init(&dwc3_xhci.wakelock, WAKE_LOCK_SUSPEND,
+			"dwc3_host_wakelock");
+
+	platform_set_drvdata(pdev, hcd);
+	pm_runtime_no_callbacks(hcd->self.controller);
+	pm_runtime_enable(hcd->self.controller);
+	ret = device_create_file(hcd->self.controller, &dev_attr_host_comp_test);
+	if (ret < 0)
+		dev_err(hcd->self.controller,
+			"Can't register sysfs attribute: %d\n", ret);
+
+	return retval;
+}
+
+static int xhci_dwc_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct usb_phy *usb_phy;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	otg_set_host(usb_phy->otg, NULL);
+	usb_put_phy(usb_phy);
+
+	if (xhci)
+		dwc3_stop_host(hcd);
+	usb_put_hcd(hcd);
+
+	pm_runtime_disable(hcd->self.controller);
+	pm_runtime_set_suspended(hcd->self.controller);
+	wake_lock_destroy(&dwc3_xhci.wakelock);
+	return 0;
+}
+
+
+#ifdef CONFIG_PM
+/* dwc_hcd_suspend_common and dwc_hcd_resume_common are refer to
+ * suspend_common and resume_common in usb core.
+ * Because the usb core function just support PCI device.
+ * So re-write them in here to support platform devices.
+ */
+static int dwc_hcd_suspend_common(struct device *dev)
+{
+	struct platform_device		*pdev = to_platform_device(dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	int			retval = 0;
+	u32 data = 0;
+
+	if (!xhci) {
+		dev_dbg(dev, "%s: host already stop!\n", __func__);
+		return 0;
+	}
+
+	/* Root hub suspend should have stopped all downstream traffic,
+	 * and all bus master traffic.  And done so for both the interface
+	 * and the stub usb_device (which we check here).  But maybe it
+	 * didn't; writing sysfs power/state files ignores such rules...
+	 */
+	if (HCD_RH_RUNNING(hcd)) {
+		dev_warn(dev, "Root hub is not suspended\n");
+		return -EBUSY;
+	}
+	if (hcd->shared_hcd) {
+		hcd = hcd->shared_hcd;
+		if (HCD_RH_RUNNING(hcd)) {
+			dev_warn(dev, "Secondary root hub is not suspended\n");
+			return -EBUSY;
+		}
+	}
+
+	if (!HCD_DEAD(hcd)) {
+		/* Optimization: Don't suspend if a root-hub wakeup is
+		 * pending and it would cause the HCD to wake up anyway.
+		 */
+		if (HCD_WAKEUP_PENDING(hcd))
+			return -EBUSY;
+		if (hcd->shared_hcd &&
+				HCD_WAKEUP_PENDING(hcd->shared_hcd))
+			return -EBUSY;
+		if (hcd->state != HC_STATE_SUSPENDED ||
+				xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+			retval = -EINVAL;
+
+		if (!retval) {
+			/* The auto-resume is diabled by default. Need enable it
+			 * if there have valid connection. To ensure that when
+			 * device resumes, host does resume reflect within
+			 * 900 usec as in USB spec.
+			 */
+			if (if_usb_devices_connected(xhci) == 1)
+				dwc_xhci_enable_phy_auto_resume(
+						xhci->main_hcd, true);
+
+			/* Ensure that suspend enable are set for
+			 * USB2 and USB3 PHY
+			 */
+			dwc_xhci_enable_phy_suspend(hcd, true);
+
+			data = readl(hcd->regs + GCTL);
+			data |= GCTL_GBL_HIBERNATION_EN;
+			writel(data, hcd->regs + GCTL);
+			dev_dbg(hcd->self.controller, "set xhci hibernation enable!\n");
+			retval = xhci_suspend(xhci);
+		}
+
+		/* Check again in case wakeup raced with pci_suspend */
+		if ((retval == 0 && HCD_WAKEUP_PENDING(hcd)) ||
+				(retval == 0 && hcd->shared_hcd &&
+				 HCD_WAKEUP_PENDING(hcd->shared_hcd))) {
+			xhci_resume(xhci, false);
+			retval = -EBUSY;
+		}
+		if (retval)
+			return retval;
+	}
+
+	synchronize_irq(dwc3_xhci.otg_irqnum);
+
+	return retval;
+
+}
+
+static int dwc_hcd_resume_common(struct device *dev)
+{
+	struct platform_device		*pdev = to_platform_device(dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	int			retval = 0;
+
+	if (!xhci)
+		return 0;
+
+	if (HCD_RH_RUNNING(hcd) ||
+			(hcd->shared_hcd &&
+			 HCD_RH_RUNNING(hcd->shared_hcd))) {
+		dev_dbg(dev, "can't resume, not suspended!\n");
+		return 0;
+	}
+
+	if (!HCD_DEAD(hcd)) {
+		retval = xhci_resume(xhci, false);
+		if (retval) {
+			dev_err(dev, "PCI post-resume error %d!\n", retval);
+			if (hcd->shared_hcd)
+				usb_hc_died(hcd->shared_hcd);
+			usb_hc_died(hcd);
+		}
+	}
+
+	dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+	return retval;
+}
+
+static int dwc3_suspend_host(struct usb_hcd *hcd)
+{
+	int retval;
+
+	if (!hcd)
+		return -EINVAL;
+
+	retval = dwc_hcd_suspend_common(hcd->self.controller);
+
+	if (retval)
+		dwc_xhci_enable_phy_auto_resume(
+			hcd, false);
+
+	dev_dbg(hcd->self.controller, "%s: %d\n", __func__, retval);
+	return retval;
+}
+
+static int dwc3_resume_host(struct usb_hcd *hcd)
+{
+	int retval;
+
+	if (!hcd)
+		return -EINVAL;
+
+	dwc_xhci_enable_phy_auto_resume(
+			hcd, false);
+
+	retval = dwc_hcd_resume_common(hcd->self.controller);
+	dev_dbg(hcd->self.controller, "%s: %d\n", __func__, retval);
+
+	return retval;
+}
+#endif
+
+static struct platform_driver dwc3_xhci_driver = {
+	.probe = xhci_dwc_drv_probe,
+	.remove = xhci_dwc_drv_remove,
+	.driver = {
+		.name = "dwc3-host",
+	},
+};
diff --git a/drivers/usb/dwc3/dwc3-intel-mrfl.c b/drivers/usb/dwc3/dwc3-intel-mrfl.c
new file mode 100644
index 0000000..ee4eefc
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-intel-mrfl.c
@@ -0,0 +1,1278 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include <linux/iio/consumer.h>
+#include <asm/intel_scu_pmic.h>
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+static int otg_id = -1;
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off);
+static int dwc3_intel_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event);
+static struct power_supply_cable_props cap_record;
+static int shady_cove_get_id(struct dwc_otg2 *otg);
+
+static int charger_detect_enable(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+
+	if (!otg || !otg->otg_data)
+		return 0;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->charger_detect_enable;
+}
+
+static int is_basin_cove(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+	if (!otg || !otg->otg_data)
+		return -EINVAL;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->pmic_type == BASIN_COVE;
+}
+
+static int is_utmi_phy(struct dwc_otg2 *otg)
+{
+	if (!otg || !otg->otg_data)
+		return -EINVAL;
+
+	return otg->usb2_phy.intf == USB2_PHY_UTMI;
+}
+
+void dwc3_switch_mode(struct dwc_otg2 *otg, u32 mode)
+{
+	u32 reg;
+
+	reg = otg_read(otg, GCTL);
+	reg &= ~(GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT);
+	reg |= mode << GCTL_PRT_CAP_DIR_SHIFT;
+	otg_write(otg, GCTL, reg);
+}
+
+
+static int is_hybridvp(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+	if (!otg || !otg->otg_data)
+		return -EINVAL;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->is_hvp;
+}
+
+static void usb2phy_eye_optimization(struct dwc_otg2 *otg)
+{
+	void __iomem *addr;
+	struct usb_phy *phy;
+	struct intel_dwc_otg_pdata *data;
+
+	if (!otg || !otg->otg_data)
+		return;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy)
+		return;
+
+	if ((data->usb2_phy_type == USB2_PHY_ULPI) && !!data->ulpi_eye_calibration)
+		usb_phy_io_write(phy, data->ulpi_eye_calibration, TUSB1211_VENDOR_SPECIFIC1_SET);
+	else if ((data->usb2_phy_type == USB2_PHY_UTMI) && !!data->utmi_eye_calibration) {
+		addr = ioremap_nocache(UTMI_PHY_USB2PERPORT, 4);
+		if (!addr) {
+			otg_info(otg, "UTMI phy register ioremap failed, use default setup!\n");
+			usb_put_phy(phy);
+			return;
+		}
+		writel(data->utmi_eye_calibration, addr);
+		iounmap(addr);
+	} else
+		otg_info(otg, "usb2 phy eye optimization fail, use default setup!\n");
+
+	usb_put_phy(phy);
+}
+
+
+/* As we use SW mode to do charger detection, need to notify HW
+ * the result SW get, charging port or not */
+static int dwc_otg_charger_hwdet(bool enable)
+{
+	int				retval;
+	struct usb_phy *phy;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+
+	/* Just return if charger detection is not enabled */
+	if (!charger_detect_enable(otg))
+		return 0;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy)
+		return -ENODEV;
+
+	if (enable) {
+		retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+				TUSB1211_POWER_CONTROL_SET);
+		if (retval)
+			return retval;
+		otg_dbg(otg, "set HWDETECT\n");
+	} else {
+		retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+				TUSB1211_POWER_CONTROL_CLR);
+		if (retval)
+			return retval;
+		otg_dbg(otg, "clear HWDETECT\n");
+	}
+	usb_put_phy(phy);
+
+	return 0;
+}
+
+static enum power_supply_charger_cable_type
+			basin_cove_aca_check(struct dwc_otg2 *otg)
+{
+	u8 rarbrc;
+	int ret;
+	enum power_supply_charger_cable_type type =
+		POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1,
+			USBIDCTRL_ACA_DETEN_D1);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	/* Wait >66.1ms (for TCHGD_SERX_DEB) */
+	msleep(66);
+
+	/* Read decoded RID value */
+	ret = intel_scu_ipc_ioread8(PMIC_USBIDSTS, &rarbrc);
+	if (ret)
+		otg_err(otg, "Fail to read decoded RID value\n");
+	rarbrc &= USBIDSTS_ID_RARBRC_STS(3);
+	rarbrc >>= 1;
+
+	/* If ID_RARBRC_STS==01: ACA-Dock detected
+	 * If ID_RARBRC_STS==00: MHL detected
+	 */
+	if (rarbrc == 1) {
+		/* ACA-Dock */
+		type = POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+	} else if (!rarbrc) {
+		/* MHL */
+		type = POWER_SUPPLY_CHARGER_TYPE_MHL;
+	}
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1,
+			0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	return type;
+}
+
+static enum power_supply_charger_cable_type
+		shady_cove_aca_check(struct dwc_otg2 *otg)
+{
+
+	if (!otg)
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+	switch (shady_cove_get_id(otg)) {
+	case RID_A:
+		return POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+	case RID_B:
+		return POWER_SUPPLY_CHARGER_TYPE_ACA_B;
+	case RID_C:
+		return POWER_SUPPLY_CHARGER_TYPE_ACA_C;
+	default:
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+}
+
+static enum power_supply_charger_cable_type
+		dwc3_intel_aca_check(struct dwc_otg2 *otg)
+{
+	if (is_basin_cove(otg))
+		return basin_cove_aca_check(otg);
+	else
+		return shady_cove_aca_check(otg);
+}
+
+static ssize_t store_otg_id(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+
+	if (!otg)
+		return 0;
+	if (count != 2) {
+		otg_err(otg, "return EINVAL\n");
+		return -EINVAL;
+	}
+
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	switch (buf[0]) {
+	case 'a':
+	case 'A':
+		otg_dbg(otg, "Change ID to A\n");
+		otg->user_events |= USER_ID_A_CHANGE_EVENT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		otg_id = 0;
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	case 'b':
+	case 'B':
+		otg_dbg(otg, "Change ID to B\n");
+		otg->user_events |= USER_ID_B_CHANGE_EVENT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		otg_id = 1;
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	default:
+		otg_err(otg, "Just support change ID to A!\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+show_otg_id(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	char				*next;
+	unsigned			size, t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size,
+		"USB OTG ID: %s\n",
+		(otg_id ? "B" : "A")
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static DEVICE_ATTR(otg_id, S_IRUGO|S_IWUSR|S_IWGRP,
+			show_otg_id, store_otg_id);
+
+static void dwc_a_bus_drop(struct usb_phy *x)
+{
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	unsigned long flags;
+
+	if (otg->usb2_phy.vbus_state == VBUS_DISABLED) {
+		spin_lock_irqsave(&otg->lock, flags);
+		otg->user_events |= USER_A_BUS_DROP;
+		dwc3_wakeup_otg_thread(otg);
+		spin_unlock_irqrestore(&otg->lock, flags);
+	}
+}
+
+static void set_sus_phy(struct dwc_otg2 *otg, int bit)
+{
+	u32 data = 0;
+
+	data = otg_read(otg, GUSB2PHYCFG0);
+	if (bit)
+		data |= GUSB2PHYCFG_SUS_PHY;
+	else
+		data &= ~GUSB2PHYCFG_SUS_PHY;
+
+	otg_write(otg, GUSB2PHYCFG0, data);
+
+	data = otg_read(otg, GUSB3PIPECTL0);
+	if (bit)
+		data |= GUSB3PIPECTL_SUS_EN;
+	else
+		data &= ~GUSB3PIPECTL_SUS_EN;
+	otg_write(otg, GUSB3PIPECTL0, data);
+}
+
+/* This function is used to control VUSBPHY or assert/deassert USBRST_N
+ * pin to control usb2 phy enter/exit low power mode.
+ */
+static int control_usb_phy_power(u16 addr, bool on_off)
+{
+	int ret;
+	u8 mask, bits;
+
+	if (addr == PMIC_VLDOCNT)
+		mask = PMIC_VLDOCNT_VUSBPHYEN;
+	else if (addr == PMIC_USBPHYCTRL)
+		mask = PMIC_USBPHYCTRL_D0;
+	else
+		return -EINVAL;
+
+	if (on_off)
+		bits = mask;
+	else
+		bits = 0x00;
+
+	ret = intel_scu_ipc_update_register(addr,
+			bits, mask);
+
+	/* Debounce 10ms for turn on VUSBPHY */
+	if (on_off)
+		usleep_range(10000, 11000);
+
+	return ret;
+}
+
+/* This function will control VUSBPHY to power gate/ungate USBPHY.
+ * If current platform haven't using VUSBPHY, then assert/deassert
+ * USBRST_N pin to make PHY enter reset state.
+ */
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off)
+{
+	struct intel_dwc_otg_pdata *data;
+	int ret;
+
+	if (!otg || !otg->otg_data)
+		return -EINVAL;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+	if (data->using_vusbphy)
+		ret = control_usb_phy_power(PMIC_VLDOCNT, on_off);
+	else
+		ret = control_usb_phy_power(PMIC_USBPHYCTRL, on_off);
+
+	if (ret)
+		otg_err(otg, "dwc3 %s usb phy failed\n",
+				on_off ? "enable" : "disable");
+
+	return ret;
+}
+
+
+int dwc3_intel_platform_init(struct dwc_otg2 *otg)
+{
+	int retval;
+	struct intel_dwc_otg_pdata *data;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	/* Init a_bus_drop callback */
+	otg->usb2_phy.a_bus_drop = dwc_a_bus_drop;
+	otg->usb2_phy.vbus_state = VBUS_ENABLED;
+	/* Get usb2 phy type */
+	otg->usb2_phy.intf = data->usb2_phy_type;
+
+	/* Turn off VUSBPHY if it haven't used by USB2 PHY.
+	 * Otherwise, it will consume ~2.6mA(on VSYS) on MOFD.
+	 */
+	if (!data->using_vusbphy) {
+		retval = control_usb_phy_power(PMIC_VLDOCNT, false);
+		if (retval)
+			otg_err(otg, "Fail to turn off VUSBPHY\n");
+	} else if (!is_utmi_phy(otg)) {
+		/* If the current USB2 PHY low power controlled by VUSBPHY. Then
+		 * we need to de-assert USBRST pin to make USB2 PHY always stay
+		 * in active state.
+		 */
+		retval = control_usb_phy_power(PMIC_USBPHYCTRL, true);
+		if (retval)
+			otg_err(otg, "Fail to de-assert USBRST#\n");
+	} else {
+		/* If we are using utmi phy, and through VUSBPHY to do power
+		 * control. Then we need to assert USBRST# for external ULPI phy
+		 * to ask it under inactive state saving power.
+		 */
+		retval = control_usb_phy_power(PMIC_USBPHYCTRL, false);
+		if (retval)
+			otg_err(otg, "Fail to de-assert USBRST#\n");
+	}
+
+	/* Don't let phy go to suspend mode, which
+	 * will cause FS/LS devices enum failed in host mode.
+	 */
+	set_sus_phy(otg, 0);
+
+	retval = device_create_file(otg->dev, &dev_attr_otg_id);
+	if (retval < 0) {
+		otg_dbg(otg,
+			"Can't register sysfs attribute: %d\n", retval);
+		return -ENOMEM;
+	}
+
+	otg_dbg(otg, "\n");
+	otg_write(otg, OEVTEN, 0);
+	otg_write(otg, OCTL, 0);
+
+	dwc3_switch_mode(otg, GCTL_PRT_CAP_DIR_OTG);
+
+	return 0;
+}
+
+/* Disable auto-resume feature for USB2 PHY. This is one
+ * silicon workaround. It will cause fabric timeout error
+ * for LS case after resume from hibernation */
+static void disable_phy_auto_resume(struct dwc_otg2 *otg)
+{
+	u32 data = 0;
+
+	data = otg_read(otg, GUSB2PHYCFG0);
+	data &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	otg_write(otg, GUSB2PHYCFG0, data);
+}
+
+int basin_cove_get_id(struct dwc_otg2 *otg)
+{
+	int ret, id = RID_UNKNOWN;
+	u8 idsts, pmic_id;
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	ret = intel_scu_ipc_ioread8(PMIC_USBIDSTS, &idsts);
+	if (ret) {
+		otg_err(otg, "Fail to read id\n");
+		return id;
+	}
+
+	if (idsts & USBIDSTS_ID_FLOAT_STS)
+		id = RID_FLOAT;
+	else if (idsts & USBIDSTS_ID_RARBRC_STS(1))
+		id = RID_A;
+	else if (idsts & USBIDSTS_ID_RARBRC_STS(2))
+		id = RID_B;
+	else if (idsts & USBIDSTS_ID_RARBRC_STS(3))
+		id = RID_C;
+	else {
+		/* PMIC A0 reports ID_GND = 0 for RID_GND but PMIC B0 reports
+		*  ID_GND = 1 for RID_GND
+		*/
+		ret = intel_scu_ipc_ioread8(0x00, &pmic_id);
+		if (ret) {
+			otg_err(otg, "Fail to read PMIC ID register\n");
+		} else if (((pmic_id & VENDOR_ID_MASK) == BASIN_COVE_PMIC_ID) &&
+			((pmic_id & PMIC_MAJOR_REV) == PMIC_A0_MAJOR_REV)) {
+				if (idsts & USBIDSTS_ID_GND)
+					id = RID_GND;
+		} else {
+			if (!(idsts & USBIDSTS_ID_GND))
+				id = RID_GND;
+		}
+	}
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+			0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	return id;
+}
+
+int shady_cove_get_id(struct dwc_otg2 *otg)
+{
+	u8 schgrirq1;
+	struct iio_channel *chan;
+	int ret, rid, id = RID_UNKNOWN;
+
+	ret = intel_scu_ipc_ioread8(PMIC_SCHGRIRQ1, &schgrirq1);
+	if (ret) {
+		otg_err(otg, "Fail to read id\n");
+		return id;
+	}
+
+	/* PMIC_SCHGRIRQ1_SUSBIDDET bit definition:
+	 * 0 = RID_A/B/C ; 1 = RID_GND ; 2 = RID_FLOAT */
+	if (schgrirq1 & PMIC_SCHGRIRQ1_SUSBIDDET(2))
+		return RID_FLOAT;
+	else if (schgrirq1 & PMIC_SCHGRIRQ1_SUSBIDDET(1))
+		return RID_GND;
+
+	chan = iio_channel_get(NULL, "USBID");
+	if (IS_ERR_OR_NULL(chan)) {
+		otg_err(otg, "%s: Fail to get USBID channel\n", __func__);
+		return id;
+	}
+
+	ret = iio_read_channel_raw(chan, &rid);
+	if (ret) {
+		otg_err(otg, "%s: Fail to read USBID channel", __func__);
+		goto done;
+	}
+
+	if ((rid > 11150) && (rid < 13640))
+		id = RID_A;
+	else if ((rid > 6120) && (rid < 7480))
+		id = RID_B;
+	else if ((rid > 3285) && (rid < 4015))
+		id = RID_C;
+
+done:
+
+	iio_channel_release(chan);
+	return id;
+}
+
+int dwc3_intel_get_id(struct dwc_otg2 *otg)
+{
+	if (is_basin_cove(otg))
+		return basin_cove_get_id(otg);
+	else
+		return shady_cove_get_id(otg);
+}
+
+int dwc3_intel_b_idle(struct dwc_otg2 *otg)
+{
+	u32 gctl, tmp;
+
+	/* Disable hibernation mode by default */
+	gctl = otg_read(otg, GCTL);
+	gctl &= ~GCTL_GBL_HIBERNATION_EN;
+	otg_write(otg, GCTL, gctl);
+
+	/* Reset ADP related registers */
+	otg_write(otg, ADPCFG, 0);
+	otg_write(otg, ADPCTL, 0);
+	otg_write(otg, ADPEVTEN, 0);
+	tmp = otg_read(otg, ADPEVT);
+	otg_write(otg, ADPEVT, tmp);
+
+	otg_write(otg, OCFG, 0);
+	otg_write(otg, OEVTEN, 0);
+	tmp = otg_read(otg, OEVT);
+	otg_write(otg, OEVT, tmp);
+
+	/* Force config to otg mode as default. */
+	dwc3_switch_mode(otg, GCTL_PRT_CAP_DIR_OTG);
+
+	if (!is_hybridvp(otg)) {
+		dwc_otg_charger_hwdet(false);
+		enable_usb_phy(otg, false);
+	}
+
+	mdelay(100);
+
+	return 0;
+}
+
+static int check_vbus_status(struct dwc_otg2 *otg)
+{
+	int ret;
+	u8 schgrirq1;
+
+	ret = intel_scu_ipc_ioread8(PMIC_SCHGRIRQ1, &schgrirq1);
+	if (ret)
+		return -EINVAL;
+
+	return schgrirq1 & PMIC_SCHGRIRQ1_SVBUSDET;
+}
+
+static int dwc3_intel_set_power(struct usb_phy *_otg,
+		unsigned ma)
+{
+	unsigned long flags;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct power_supply_cable_props cap;
+	struct intel_dwc_otg_pdata *data;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	/* On ANN, due the VBUS haven't connect to internal USB PHY. So
+	 * controller can't get disconnect interrupt which depend on vbus drop
+	 * detection.
+	 * So controller will receive early suspend and suspend interrupt. But
+	 * we can detect vbus status to determine current scenario is real
+	 * suspend or vbus drop.
+	 */
+	if (data->detect_vbus_drop && ma == OTG_DEVICE_SUSPEND) {
+		if (!check_vbus_status(otg)) {
+			cap.chrg_type = otg->charging_cap.chrg_type;
+			cap.ma = otg->charging_cap.ma;
+			cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+			atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+						USB_EVENT_CHARGER, &cap);
+			return 0;
+		}
+	}
+
+	if (otg->charging_cap.chrg_type ==
+			POWER_SUPPLY_CHARGER_TYPE_USB_CDP)
+		return 0;
+	else if (otg->charging_cap.chrg_type !=
+			POWER_SUPPLY_CHARGER_TYPE_USB_SDP) {
+		otg_err(otg, "%s: currently, chrg type is not SDP!\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ma == OTG_DEVICE_SUSPEND) {
+		spin_lock_irqsave(&otg->lock, flags);
+		cap.chrg_type = otg->charging_cap.chrg_type;
+		cap.ma = otg->charging_cap.ma;
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+		spin_unlock_irqrestore(&otg->lock, flags);
+
+		/* mA is zero mean D+/D- opened cable.
+		 * If SMIP set, then notify 500mA.
+		 * Otherwise, notify 0mA.
+		*/
+		if (!cap.ma) {
+			if (data->charging_compliance) {
+				cap.ma = 500;
+				cap.chrg_evt =
+					POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+			}
+		/* For standard SDP, if SMIP set, then ignore suspend */
+		} else if (data->charging_compliance)
+			return 0;
+		/* Stander SDP(cap.mA != 0) and SMIP not set.
+		 * Should send 0mA with SUSPEND event
+		 */
+		else
+			cap.ma = 0;
+
+		atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+				USB_EVENT_CHARGER, &cap);
+		otg_dbg(otg, "Notify EM");
+		otg_dbg(otg, "POWER_SUPPLY_CHARGER_EVENT_SUSPEND\n");
+
+		return 0;
+	} else if (ma == OTG_DEVICE_RESUME) {
+		otg_dbg(otg, "Notify EM");
+		otg_dbg(otg, "POWER_SUPPLY_CHARGER_EVENT_CONNECT\n");
+		dwc3_intel_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+		return 0;
+	}
+
+	/* For SMIP set case, only need to report 500/900mA */
+	if (data->charging_compliance) {
+		if ((ma != OTG_USB2_500MA) &&
+				(ma != OTG_USB3_900MA))
+			return 0;
+	}
+
+	/* Covert macro to integer number*/
+	switch (ma) {
+	case OTG_USB2_0MA:
+		ma = 0;
+		break;
+	case OTG_USB2_100MA:
+		ma = 100;
+		break;
+	case OTG_USB3_150MA:
+		ma = 150;
+		break;
+	case OTG_USB2_500MA:
+		ma = 500;
+		break;
+	case OTG_USB3_900MA:
+		ma = 900;
+		break;
+	default:
+		otg_err(otg, "Device driver set invalid SDP current value!\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.ma = ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	dwc3_intel_notify_charger_type(otg,
+			POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+	return 0;
+}
+
+int dwc3_intel_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+	atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+			USB_EVENT_DRIVE_VBUS, &enable);
+
+	return 0;
+}
+
+static int dwc3_intel_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event)
+{
+	struct power_supply_cable_props cap;
+	int ret = 0;
+	unsigned long flags;
+
+	if (!charger_detect_enable(otg) &&
+		((otg->charging_cap.chrg_type !=
+		POWER_SUPPLY_CHARGER_TYPE_USB_SDP) ||
+		 event == POWER_SUPPLY_CHARGER_EVENT_DISCONNECT))
+		return 0;
+
+	if (event > POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+		otg_err(otg,
+		"%s: Invalid power_supply_charger_event!\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((otg->charging_cap.chrg_type ==
+			POWER_SUPPLY_CHARGER_TYPE_USB_SDP) &&
+			((otg->charging_cap.ma != 0) &&
+			 (otg->charging_cap.ma != 100) &&
+			 (otg->charging_cap.ma != 150) &&
+			 (otg->charging_cap.ma != 500) &&
+			 (otg->charging_cap.ma != 900))) {
+		otg_err(otg, "%s: invalid SDP current!\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	cap.chrg_type = otg->charging_cap.chrg_type;
+	cap.ma = otg->charging_cap.ma;
+	cap.chrg_evt = event;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	atomic_notifier_call_chain(&otg->usb2_phy.notifier, USB_EVENT_CHARGER,
+			&cap);
+
+	return ret;
+}
+
+static enum power_supply_charger_cable_type
+			dwc3_intel_get_charger_type(struct dwc_otg2 *otg)
+{
+	int ret;
+	struct usb_phy *phy;
+	u8 val, vdat_det, chgd_serx_dm;
+	unsigned long timeout, interval;
+	enum power_supply_charger_cable_type type =
+		POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+	if (!charger_detect_enable(otg))
+		return cap_record.chrg_type;
+
+	if (dwc3_intel_get_id(otg) == RID_GND)
+		return POWER_SUPPLY_CHARGER_TYPE_B_DEVICE;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy) {
+		otg_err(otg, "Get USB2 PHY failed\n");
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+
+	/* PHY Enable:
+	 * Power on PHY
+	 */
+	enable_usb_phy(otg, true);
+
+	if (is_basin_cove(otg)) {
+		/* Enable ACA:
+		 * Enable ACA & ID detection logic.
+		 */
+		ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+				USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+				USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0);
+		if (ret)
+			otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+	}
+
+	/* DCD Enable: Change OPMODE to 01 (Non-driving),
+	 * TermSel to 0, &
+	 * XcvrSel to 01 (enable FS xcvr)
+	 */
+	usb_phy_io_write(phy, FUNCCTRL_OPMODE(1) | FUNCCTRL_XCVRSELECT(1),
+					TUSB1211_FUNC_CTRL_SET);
+
+	usb_phy_io_write(phy, FUNCCTRL_OPMODE(2) | FUNCCTRL_XCVRSELECT(2)
+					| FUNCCTRL_TERMSELECT,
+					TUSB1211_FUNC_CTRL_CLR);
+
+	/*Enable SW control*/
+	usb_phy_io_write(phy, PWCTRL_SW_CONTROL, TUSB1211_POWER_CONTROL_SET);
+
+	/* Enable IDPSRC */
+	usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+			TUSB1211_VENDOR_SPECIFIC3_SET);
+
+	/* Check DCD result, use same polling parameter */
+	timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+	interval = DATACON_INTERVAL * 1000; /* us */
+
+	/* DCD Check:
+	 * Delay 66.5 ms. (Note:
+	 * TIDP_SRC_ON + TCHGD_SERX_DEB =
+	 * 347.8us + 66.1ms).
+	 */
+	usleep_range(66500, 67000);
+
+	while (!time_after(jiffies, timeout)) {
+		/* Read DP logic level. */
+		val = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+		if (val < 0) {
+			otg_err(otg, "ULPI read error! try again\n");
+			continue;
+		}
+
+		if (!(val & VS4_CHGD_SERX_DP)) {
+			otg_info(otg, "Data contact detected!\n");
+			break;
+		}
+
+		/* Polling interval */
+		usleep_range(interval, interval + 2000);
+	}
+
+	/* Disable DP pullup (Idp_src) */
+	usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+			TUSB1211_VENDOR_SPECIFIC3_CLR);
+
+	/* ID Check:
+	 * Check ID pin state.
+	 */
+	val = dwc3_intel_get_id(otg);
+	if (val == RID_GND) {
+		type = POWER_SUPPLY_CHARGER_TYPE_B_DEVICE;
+		goto cleanup;
+	} else if (val != RID_FLOAT) {
+		type = dwc3_intel_aca_check(otg);
+		goto cleanup;
+	}
+
+	/* SE1 Det Enable:
+	 * Read DP/DM logic level. Note: use DEBUG
+	 * because VS4 isn’t enabled in this situation.
+     */
+	val = usb_phy_io_read(phy, TUSB1211_DEBUG);
+	if (val < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	val &= DEBUG_LINESTATE;
+
+	/* If '11': SE1 detected; goto 'Cleanup'.
+	 * Else: goto 'Pri Det Enable'.
+	 */
+	if (val == 3) {
+		type = POWER_SUPPLY_CHARGER_TYPE_SE1;
+		goto cleanup;
+	}
+
+	/* Pri Det Enable:
+	 * Enable VDPSRC.
+	 */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+	/* Wait >106.1ms (40ms for BC
+	 * Tvdpsrc_on, 66.1ms for TI CHGD_SERX_DEB).
+	 */
+	msleep(107);
+
+	/* Pri Det Check:
+	 * Check if DM > VDATREF.
+	 */
+	vdat_det = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+	if (vdat_det < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	vdat_det &= PWCTRL_VDAT_DET;
+
+	/* Check if DM<VLGC */
+	chgd_serx_dm = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+	if (chgd_serx_dm < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	chgd_serx_dm &= VS4_CHGD_SERX_DM;
+
+	/* If VDAT_DET==0 || CHGD_SERX_DM==1: SDP detected
+	 * If VDAT_DET==1 && CHGD_SERX_DM==0: CDP/DCP
+	 */
+	if (vdat_det == 0 || chgd_serx_dm == 1)
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+	/* Disable VDPSRC. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+	/* If SDP, goto “Cleanup”.
+	 * Else, goto “Sec Det Enable”
+	 */
+	if (type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+		goto cleanup;
+
+	/* Sec Det Enable:
+	 * delay 1ms.
+	 */
+	usleep_range(1000, 1500);
+
+	/* Swap DP & DM */
+	usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_CLR);
+
+	/* Enable 'VDMSRC'. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+	/* Wait >73ms (40ms for BC Tvdmsrc_on, 33ms for TI TVDPSRC_DEB) */
+	msleep(80);
+
+	/* Sec Det Check:
+	 * Check if DP>VDATREF.
+	 */
+	val = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+	if (val < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	val &= PWCTRL_VDAT_DET;
+
+	/* If VDAT_DET==0: CDP detected.
+	 * If VDAT_DET==1: DCP detected.
+	 */
+	if (!val)
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+	else
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+
+	/* Disable VDMSRC. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+	/* Swap DP & DM. */
+	usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+cleanup:
+
+	/* If DCP detected, assert VDPSRC. */
+	if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+		usb_phy_io_write(phy, PWCTRL_SW_CONTROL | PWCTRL_DP_VSRC_EN,
+				TUSB1211_POWER_CONTROL_SET);
+
+	usb_put_phy(phy);
+
+	switch (type) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		dwc_otg_charger_hwdet(true);
+		break;
+	default:
+		break;
+	};
+
+	return type;
+}
+
+static int dwc3_intel_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	int state;
+	unsigned long flags, valid_chrg_type;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct power_supply_cable_props *cap;
+
+	if (!otg)
+		return NOTIFY_BAD;
+
+	valid_chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP |
+		POWER_SUPPLY_CHARGER_TYPE_USB_CDP |
+		POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+
+	spin_lock_irqsave(&otg->lock, flags);
+	switch (event) {
+	case USB_EVENT_ID:
+		otg->otg_events |= OEVT_CONN_ID_STS_CHNG_EVNT;
+		state = NOTIFY_OK;
+		break;
+	case USB_EVENT_VBUS:
+		/* WA for EM driver which should not sent VBUS event
+		 * if UTMI PHY selected. */
+		if (!charger_detect_enable(otg)) {
+			state = NOTIFY_OK;
+			goto done;
+		}
+
+		if (*(int *)data) {
+			otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+			otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+		} else {
+			otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+			otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+		}
+		state = NOTIFY_OK;
+		break;
+	case USB_EVENT_CHARGER:
+		if (charger_detect_enable(otg)) {
+			state = NOTIFY_DONE;
+			goto done;
+		}
+		cap = (struct power_supply_cable_props *)data;
+		if (!(cap->chrg_type & valid_chrg_type)) {
+			otg_err(otg, "Ignore invalid charger type!\n");
+			state = NOTIFY_DONE;
+			goto done;
+		}
+
+		/* Ignore the events which send by USB driver itself. */
+		if (cap->chrg_evt == POWER_SUPPLY_CHARGER_EVENT_CONNECT)
+			if (cap_record.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP) {
+				state = NOTIFY_DONE;
+				goto done;
+			}
+
+		if (cap->chrg_evt == POWER_SUPPLY_CHARGER_EVENT_CONNECT) {
+			otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+			otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+
+			cap_record.chrg_type = cap->chrg_type;
+			cap_record.ma = cap->ma;
+			cap_record.chrg_evt = cap->chrg_evt;
+		} else if (cap->chrg_evt ==
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+			otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+			otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+			cap_record.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+			cap_record.ma = 0;
+			cap_record.chrg_evt =
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+		}
+
+		if (cap->chrg_type == POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			otg->otg_events |= OEVT_CONN_ID_STS_CHNG_EVNT;
+
+		state = NOTIFY_OK;
+		break;
+	default:
+		otg_dbg(otg, "DWC OTG Notify unknow notify message\n");
+		state = NOTIFY_DONE;
+	}
+
+	dwc3_wakeup_otg_thread(otg);
+done:
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	return state;
+
+}
+
+int dwc3_intel_prepare_start_host(struct dwc_otg2 *otg)
+{
+	dwc3_switch_mode(otg, GCTL_PRT_CAP_DIR_HOST);
+
+	if (!is_hybridvp(otg)) {
+		enable_usb_phy(otg, true);
+		usb2phy_eye_optimization(otg);
+		disable_phy_auto_resume(otg);
+	}
+
+	return 0;
+}
+
+int dwc3_intel_prepare_start_peripheral(struct dwc_otg2 *otg)
+{
+	if (!is_hybridvp(otg)) {
+		enable_usb_phy(otg, true);
+		usb2phy_eye_optimization(otg);
+		disable_phy_auto_resume(otg);
+	}
+
+	return 0;
+}
+
+int dwc3_intel_suspend(struct dwc_otg2 *otg)
+{
+	int ret;
+	struct usb_phy *phy;
+	struct pci_dev *pci_dev;
+	struct usb_hcd *hcd = NULL;
+	pci_power_t state = PCI_D3cold;
+
+	if (!otg)
+		return 0;
+
+	hcd = container_of(otg->otg.host, struct usb_hcd, self);
+
+	pci_dev = to_pci_dev(otg->dev);
+
+	if (otg->state == DWC_STATE_A_HOST &&
+			otg->suspend_host) {
+		/* Check if USB2 ULPI PHY is hang via access its internal
+		 * registers. If hang, then do hard reset before enter
+		 * hibernation mode. Otherwise, the USB2 PHY can't enter
+		 * suspended state which will blocking U2PMU can't get ready
+		 * then can't enter D0i3hot forever in SCU FW.
+		 */
+		if (!is_utmi_phy(otg)) {
+			phy = usb_get_phy(USB_PHY_TYPE_USB2);
+			if (!phy)
+				return -ENODEV;
+			if (usb_phy_io_read(phy, ULPI_VENDOR_ID_LOW) < 0) {
+				enable_usb_phy(otg, 0);
+				enable_usb_phy(otg, 1);
+			}
+			usb_put_phy(phy);
+		}
+
+		ret = otg->suspend_host(hcd);
+		if (ret) {
+			otg_err(otg, "dwc3-host enter suspend faield: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (otg->state == DWC_STATE_B_PERIPHERAL ||
+			otg->state == DWC_STATE_A_HOST)
+		state = PCI_D3hot;
+
+	set_sus_phy(otg, 1);
+
+	if (pci_save_state(pci_dev)) {
+		otg_err(otg, "pci_save_state failed!\n");
+		return -EIO;
+	}
+
+	pci_disable_device(pci_dev);
+	if ((state == PCI_D3cold) && is_utmi_phy(otg)) {
+		/* Important!!  Whenever the VUSBPHY rail is disabled, SW
+		 * must assert USBRST# to isolate the SOC’s DP/DM pins from the
+		 * outside world.  There is a risk of damage to the SOC if a
+		 * peripheral were to bias DP/DM to 3.3V when the SOC is
+		 * unpowered. */
+		ret = intel_scu_ipc_update_register(PMIC_USBPHYCTRL,
+				0x0, USBPHYRSTB);
+		if (ret)
+			otg_err(otg, "%s: ipc update failed\n", __func__);
+	}
+	pci_set_power_state(pci_dev, state);
+	return 0;
+}
+
+int dwc3_intel_resume(struct dwc_otg2 *otg)
+{
+	struct pci_dev *pci_dev;
+	struct usb_hcd *hcd = NULL;
+	u32 data;
+	int ret;
+
+	if (!otg)
+		return 0;
+
+	hcd = container_of(otg->otg.host, struct usb_hcd, self);
+	/* After resume from D0i3cold. The UTMI PHY D+ drive issue
+	 * reproduced due to all setting be reseted. So switch to OTG
+	 * mode avoid D+ drive too early.
+	 */
+	if ((otg->state == DWC_STATE_B_IDLE ||
+		otg->state == DWC_STATE_CHARGING ||
+		otg->state == DWC_STATE_WAIT_VBUS_FALL ||
+		otg->state == DWC_STATE_WAIT_VBUS_RAISE) &&
+			is_utmi_phy(otg)) {
+		/* Reconnect DP/DM between Pmic and SOC for support host
+		 * and device mode. */
+		ret = intel_scu_ipc_update_register(PMIC_USBPHYCTRL,
+				USBPHYRSTB, USBPHYRSTB);
+		if (ret)
+			otg_err(otg, "%s: ipc update failed\n", __func__);
+
+		otg_write(otg, OEVTEN, 0);
+		otg_write(otg, OCTL, 0);
+		dwc3_switch_mode(otg, GCTL_PRT_CAP_DIR_OTG);
+	}
+
+	/* This is one SCU WA. SCU should set GUSB2PHYCFG0
+	 * bit 4 for ULPI setting. But SCU haven't do that.
+	 * So do WA first until SCU fix.
+	 */
+	data = otg_read(otg, GUSB2PHYCFG0);
+	if (is_utmi_phy(otg))
+		data &= ~(1 << 4);
+	else
+		data |= (1 << 4);
+	otg_write(otg, GUSB2PHYCFG0, data);
+
+	pci_dev = to_pci_dev(otg->dev);
+
+	/* From synopsys spec 12.2.11.
+	 * Software cannot access memory-mapped I/O space
+	 * for 10ms. Delay 5 ms here should be enough. Too
+	 * long a delay causes hibernation exit failure.
+	 */
+	mdelay(5);
+
+	pci_restore_state(pci_dev);
+	if (pci_enable_device(pci_dev) < 0) {
+		otg_err(otg, "pci_enable_device failed.\n");
+		return -EIO;
+	}
+
+	set_sus_phy(otg, 0);
+
+	/* Delay 1ms waiting PHY clock debounce.
+	 * Without this debounce, will met fabric error randomly.
+	 **/
+	mdelay(1);
+
+	if (otg->state == DWC_STATE_A_HOST &&
+			otg->resume_host)
+		otg->resume_host(hcd);
+
+
+	return 0;
+}
+
+struct dwc3_otg_hw_ops dwc3_intel_otg_pdata = {
+	.mode = DWC3_DRD,
+	.bus = DWC3_PCI,
+	.get_id = dwc3_intel_get_id,
+	.b_idle = dwc3_intel_b_idle,
+	.set_power = dwc3_intel_set_power,
+	.enable_vbus = dwc3_intel_enable_vbus,
+	.platform_init = dwc3_intel_platform_init,
+	.get_charger_type = dwc3_intel_get_charger_type,
+	.otg_notifier_handler = dwc3_intel_handle_notification,
+	.prepare_start_peripheral = dwc3_intel_prepare_start_peripheral,
+	.prepare_start_host = dwc3_intel_prepare_start_host,
+	.notify_charger_type = dwc3_intel_notify_charger_type,
+
+	.suspend = dwc3_intel_suspend,
+	.resume = dwc3_intel_resume,
+};
+
+static int __init dwc3_intel_init(void)
+{
+	return dwc3_otg_register(&dwc3_intel_otg_pdata);
+}
+module_init(dwc3_intel_init);
+
+static void __exit dwc3_intel_exit(void)
+{
+	dwc3_otg_unregister(&dwc3_intel_otg_pdata);
+}
+module_exit(dwc3_intel_exit);
+
+MODULE_AUTHOR("Wang Yu <yu.y.wang@intel.com>");
+MODULE_DESCRIPTION("DWC3 Intel OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
deleted file mode 100644
index 34638b9..0000000
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ /dev/null
@@ -1,481 +0,0 @@
-/**
- * dwc3-omap.c - OMAP Specific Glue layer
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
- *
- * Authors: Felipe Balbi <balbi@ti.com>,
- *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- *    to endorse or promote products derived from this software without
- *    specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/dwc3-omap.h>
-#include <linux/usb/dwc3-omap.h>
-#include <linux/pm_runtime.h>
-#include <linux/dma-mapping.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-
-#include <linux/usb/otg.h>
-
-/*
- * All these registers belong to OMAP's Wrapper around the
- * DesignWare USB3 Core.
- */
-
-#define USBOTGSS_REVISION			0x0000
-#define USBOTGSS_SYSCONFIG			0x0010
-#define USBOTGSS_IRQ_EOI			0x0020
-#define USBOTGSS_IRQSTATUS_RAW_0		0x0024
-#define USBOTGSS_IRQSTATUS_0			0x0028
-#define USBOTGSS_IRQENABLE_SET_0		0x002c
-#define USBOTGSS_IRQENABLE_CLR_0		0x0030
-#define USBOTGSS_IRQSTATUS_RAW_1		0x0034
-#define USBOTGSS_IRQSTATUS_1			0x0038
-#define USBOTGSS_IRQENABLE_SET_1		0x003c
-#define USBOTGSS_IRQENABLE_CLR_1		0x0040
-#define USBOTGSS_UTMI_OTG_CTRL			0x0080
-#define USBOTGSS_UTMI_OTG_STATUS		0x0084
-#define USBOTGSS_MMRAM_OFFSET			0x0100
-#define USBOTGSS_FLADJ				0x0104
-#define USBOTGSS_DEBUG_CFG			0x0108
-#define USBOTGSS_DEBUG_DATA			0x010c
-
-/* SYSCONFIG REGISTER */
-#define USBOTGSS_SYSCONFIG_DMADISABLE		(1 << 16)
-
-/* IRQ_EOI REGISTER */
-#define USBOTGSS_IRQ_EOI_LINE_NUMBER		(1 << 0)
-
-/* IRQS0 BITS */
-#define USBOTGSS_IRQO_COREIRQ_ST		(1 << 0)
-
-/* IRQ1 BITS */
-#define USBOTGSS_IRQ1_DMADISABLECLR		(1 << 17)
-#define USBOTGSS_IRQ1_OEVT			(1 << 16)
-#define USBOTGSS_IRQ1_DRVVBUS_RISE		(1 << 13)
-#define USBOTGSS_IRQ1_CHRGVBUS_RISE		(1 << 12)
-#define USBOTGSS_IRQ1_DISCHRGVBUS_RISE		(1 << 11)
-#define USBOTGSS_IRQ1_IDPULLUP_RISE		(1 << 8)
-#define USBOTGSS_IRQ1_DRVVBUS_FALL		(1 << 5)
-#define USBOTGSS_IRQ1_CHRGVBUS_FALL		(1 << 4)
-#define USBOTGSS_IRQ1_DISCHRGVBUS_FALL		(1 << 3)
-#define USBOTGSS_IRQ1_IDPULLUP_FALL		(1 << 0)
-
-/* UTMI_OTG_CTRL REGISTER */
-#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS		(1 << 5)
-#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS		(1 << 4)
-#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS	(1 << 3)
-#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP		(1 << 0)
-
-/* UTMI_OTG_STATUS REGISTER */
-#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE	(1 << 31)
-#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT	(1 << 9)
-#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8)
-#define USBOTGSS_UTMI_OTG_STATUS_IDDIG		(1 << 4)
-#define USBOTGSS_UTMI_OTG_STATUS_SESSEND	(1 << 3)
-#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID	(1 << 2)
-#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID	(1 << 1)
-
-struct dwc3_omap {
-	/* device lock */
-	spinlock_t		lock;
-
-	struct device		*dev;
-
-	int			irq;
-	void __iomem		*base;
-
-	u32			utmi_otg_status;
-
-	u32			dma_status:1;
-};
-
-static struct dwc3_omap		*_omap;
-
-static inline u32 dwc3_omap_readl(void __iomem *base, u32 offset)
-{
-	return readl(base + offset);
-}
-
-static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
-{
-	writel(value, base + offset);
-}
-
-int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
-{
-	u32			val;
-	struct dwc3_omap	*omap = _omap;
-
-	if (!omap)
-		return -EPROBE_DEFER;
-
-	switch (status) {
-	case OMAP_DWC3_ID_GROUND:
-		dev_dbg(omap->dev, "ID GND\n");
-
-		val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
-		val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG
-				| USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
-				| USBOTGSS_UTMI_OTG_STATUS_SESSEND);
-		val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID
-				| USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
-		dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
-		break;
-
-	case OMAP_DWC3_VBUS_VALID:
-		dev_dbg(omap->dev, "VBUS Connect\n");
-
-		val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
-		val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND;
-		val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG
-				| USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
-				| USBOTGSS_UTMI_OTG_STATUS_SESSVALID
-				| USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
-		dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
-		break;
-
-	case OMAP_DWC3_ID_FLOAT:
-	case OMAP_DWC3_VBUS_OFF:
-		dev_dbg(omap->dev, "VBUS Disconnect\n");
-
-		val = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
-		val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID
-				| USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
-				| USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT);
-		val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND
-				| USBOTGSS_UTMI_OTG_STATUS_IDDIG;
-		dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, val);
-		break;
-
-	default:
-		dev_dbg(omap->dev, "ID float\n");
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(dwc3_omap_mailbox);
-
-static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
-{
-	struct dwc3_omap	*omap = _omap;
-	u32			reg;
-
-	spin_lock(&omap->lock);
-
-	reg = dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_1);
-
-	if (reg & USBOTGSS_IRQ1_DMADISABLECLR) {
-		dev_dbg(omap->dev, "DMA Disable was Cleared\n");
-		omap->dma_status = false;
-	}
-
-	if (reg & USBOTGSS_IRQ1_OEVT)
-		dev_dbg(omap->dev, "OTG Event\n");
-
-	if (reg & USBOTGSS_IRQ1_DRVVBUS_RISE)
-		dev_dbg(omap->dev, "DRVVBUS Rise\n");
-
-	if (reg & USBOTGSS_IRQ1_CHRGVBUS_RISE)
-		dev_dbg(omap->dev, "CHRGVBUS Rise\n");
-
-	if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_RISE)
-		dev_dbg(omap->dev, "DISCHRGVBUS Rise\n");
-
-	if (reg & USBOTGSS_IRQ1_IDPULLUP_RISE)
-		dev_dbg(omap->dev, "IDPULLUP Rise\n");
-
-	if (reg & USBOTGSS_IRQ1_DRVVBUS_FALL)
-		dev_dbg(omap->dev, "DRVVBUS Fall\n");
-
-	if (reg & USBOTGSS_IRQ1_CHRGVBUS_FALL)
-		dev_dbg(omap->dev, "CHRGVBUS Fall\n");
-
-	if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_FALL)
-		dev_dbg(omap->dev, "DISCHRGVBUS Fall\n");
-
-	if (reg & USBOTGSS_IRQ1_IDPULLUP_FALL)
-		dev_dbg(omap->dev, "IDPULLUP Fall\n");
-
-	dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_1, reg);
-
-	reg = dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_0);
-	dwc3_omap_writel(omap->base, USBOTGSS_IRQSTATUS_0, reg);
-
-	spin_unlock(&omap->lock);
-
-	return IRQ_HANDLED;
-}
-
-static int dwc3_omap_remove_core(struct device *dev, void *c)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-
-	platform_device_unregister(pdev);
-
-	return 0;
-}
-
-static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
-{
-	u32			reg;
-
-	/* enable all IRQs */
-	reg = USBOTGSS_IRQO_COREIRQ_ST;
-	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, reg);
-
-	reg = (USBOTGSS_IRQ1_OEVT |
-			USBOTGSS_IRQ1_DRVVBUS_RISE |
-			USBOTGSS_IRQ1_CHRGVBUS_RISE |
-			USBOTGSS_IRQ1_DISCHRGVBUS_RISE |
-			USBOTGSS_IRQ1_IDPULLUP_RISE |
-			USBOTGSS_IRQ1_DRVVBUS_FALL |
-			USBOTGSS_IRQ1_CHRGVBUS_FALL |
-			USBOTGSS_IRQ1_DISCHRGVBUS_FALL |
-			USBOTGSS_IRQ1_IDPULLUP_FALL);
-
-	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, reg);
-}
-
-static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
-{
-	/* disable all IRQs */
-	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, 0x00);
-	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, 0x00);
-}
-
-static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
-
-static int dwc3_omap_probe(struct platform_device *pdev)
-{
-	struct device_node	*node = pdev->dev.of_node;
-
-	struct dwc3_omap	*omap;
-	struct resource		*res;
-	struct device		*dev = &pdev->dev;
-
-	int			ret = -ENOMEM;
-	int			irq;
-
-	int			utmi_mode = 0;
-
-	u32			reg;
-
-	void __iomem		*base;
-
-	if (!node) {
-		dev_err(dev, "device node not found\n");
-		return -EINVAL;
-	}
-
-	omap = devm_kzalloc(dev, sizeof(*omap), GFP_KERNEL);
-	if (!omap) {
-		dev_err(dev, "not enough memory\n");
-		return -ENOMEM;
-	}
-
-	platform_set_drvdata(pdev, omap);
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "missing IRQ resource\n");
-		return -EINVAL;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "missing memory base resource\n");
-		return -EINVAL;
-	}
-
-	base = devm_ioremap_nocache(dev, res->start, resource_size(res));
-	if (!base) {
-		dev_err(dev, "ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	spin_lock_init(&omap->lock);
-
-	omap->dev	= dev;
-	omap->irq	= irq;
-	omap->base	= base;
-	dev->dma_mask	= &dwc3_omap_dma_mask;
-
-	/*
-	 * REVISIT if we ever have two instances of the wrapper, we will be
-	 * in big trouble
-	 */
-	_omap	= omap;
-
-	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0) {
-		dev_err(dev, "get_sync failed with err %d\n", ret);
-		return ret;
-	}
-
-	reg = dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
-
-	of_property_read_u32(node, "utmi-mode", &utmi_mode);
-
-	switch (utmi_mode) {
-	case DWC3_OMAP_UTMI_MODE_SW:
-		reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
-		break;
-	case DWC3_OMAP_UTMI_MODE_HW:
-		reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
-		break;
-	default:
-		dev_dbg(dev, "UNKNOWN utmi mode %d\n", utmi_mode);
-	}
-
-	dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, reg);
-
-	/* check the DMA Status */
-	reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
-	omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);
-
-	ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
-			"dwc3-omap", omap);
-	if (ret) {
-		dev_err(dev, "failed to request IRQ #%d --> %d\n",
-				omap->irq, ret);
-		return ret;
-	}
-
-	dwc3_omap_enable_irqs(omap);
-
-	ret = of_platform_populate(node, NULL, NULL, dev);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to create dwc3 core\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static int dwc3_omap_remove(struct platform_device *pdev)
-{
-	struct dwc3_omap	*omap = platform_get_drvdata(pdev);
-
-	dwc3_omap_disable_irqs(omap);
-	pm_runtime_put_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-	device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core);
-
-	return 0;
-}
-
-static const struct of_device_id of_dwc3_match[] = {
-	{
-		.compatible =	"ti,dwc3"
-	},
-	{ },
-};
-MODULE_DEVICE_TABLE(of, of_dwc3_match);
-
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_omap_prepare(struct device *dev)
-{
-	struct dwc3_omap	*omap = dev_get_drvdata(dev);
-
-	dwc3_omap_disable_irqs(omap);
-
-	return 0;
-}
-
-static void dwc3_omap_complete(struct device *dev)
-{
-	struct dwc3_omap	*omap = dev_get_drvdata(dev);
-
-	dwc3_omap_enable_irqs(omap);
-}
-
-static int dwc3_omap_suspend(struct device *dev)
-{
-	struct dwc3_omap	*omap = dev_get_drvdata(dev);
-
-	omap->utmi_otg_status = dwc3_omap_readl(omap->base,
-			USBOTGSS_UTMI_OTG_STATUS);
-
-	return 0;
-}
-
-static int dwc3_omap_resume(struct device *dev)
-{
-	struct dwc3_omap	*omap = dev_get_drvdata(dev);
-
-	dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS,
-			omap->utmi_otg_status);
-
-	pm_runtime_disable(dev);
-	pm_runtime_set_active(dev);
-	pm_runtime_enable(dev);
-
-	return 0;
-}
-
-static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
-	.prepare	= dwc3_omap_prepare,
-	.complete	= dwc3_omap_complete,
-
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
-};
-
-#define DEV_PM_OPS	(&dwc3_omap_dev_pm_ops)
-#else
-#define DEV_PM_OPS	NULL
-#endif /* CONFIG_PM_SLEEP */
-
-static struct platform_driver dwc3_omap_driver = {
-	.probe		= dwc3_omap_probe,
-	.remove		= dwc3_omap_remove,
-	.driver		= {
-		.name	= "omap-dwc3",
-		.of_match_table	= of_dwc3_match,
-		.pm	= DEV_PM_OPS,
-	},
-};
-
-module_platform_driver(dwc3_omap_driver);
-
-MODULE_ALIAS("platform:omap-dwc3");
-MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
deleted file mode 100644
index eba9e2b..0000000
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/**
- * dwc3-pci.c - PCI Specific glue layer
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
- *
- * Authors: Felipe Balbi <balbi@ti.com>,
- *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- *    to endorse or promote products derived from this software without
- *    specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include <linux/usb/otg.h>
-#include <linux/usb/nop-usb-xceiv.h>
-
-/* FIXME define these in <linux/pci_ids.h> */
-#define PCI_VENDOR_ID_SYNOPSYS		0x16c3
-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3	0xabcd
-
-struct dwc3_pci {
-	struct device		*dev;
-	struct platform_device	*dwc3;
-	struct platform_device	*usb2_phy;
-	struct platform_device	*usb3_phy;
-};
-
-static int dwc3_pci_register_phys(struct dwc3_pci *glue)
-{
-	struct nop_usb_xceiv_platform_data pdata;
-	struct platform_device	*pdev;
-	int			ret;
-
-	memset(&pdata, 0x00, sizeof(pdata));
-
-	pdev = platform_device_alloc("nop_usb_xceiv", 0);
-	if (!pdev)
-		return -ENOMEM;
-
-	glue->usb2_phy = pdev;
-	pdata.type = USB_PHY_TYPE_USB2;
-
-	ret = platform_device_add_data(glue->usb2_phy, &pdata, sizeof(pdata));
-	if (ret)
-		goto err1;
-
-	pdev = platform_device_alloc("nop_usb_xceiv", 1);
-	if (!pdev) {
-		ret = -ENOMEM;
-		goto err1;
-	}
-
-	glue->usb3_phy = pdev;
-	pdata.type = USB_PHY_TYPE_USB3;
-
-	ret = platform_device_add_data(glue->usb3_phy, &pdata, sizeof(pdata));
-	if (ret)
-		goto err2;
-
-	ret = platform_device_add(glue->usb2_phy);
-	if (ret)
-		goto err2;
-
-	ret = platform_device_add(glue->usb3_phy);
-	if (ret)
-		goto err3;
-
-	return 0;
-
-err3:
-	platform_device_del(glue->usb2_phy);
-
-err2:
-	platform_device_put(glue->usb3_phy);
-
-err1:
-	platform_device_put(glue->usb2_phy);
-
-	return ret;
-}
-
-static int dwc3_pci_probe(struct pci_dev *pci,
-		const struct pci_device_id *id)
-{
-	struct resource		res[2];
-	struct platform_device	*dwc3;
-	struct dwc3_pci		*glue;
-	int			ret = -ENOMEM;
-	struct device		*dev = &pci->dev;
-
-	glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
-	if (!glue) {
-		dev_err(dev, "not enough memory\n");
-		return -ENOMEM;
-	}
-
-	glue->dev = dev;
-
-	ret = pci_enable_device(pci);
-	if (ret) {
-		dev_err(dev, "failed to enable pci device\n");
-		return -ENODEV;
-	}
-
-	pci_set_power_state(pci, PCI_D0);
-	pci_set_master(pci);
-
-	ret = dwc3_pci_register_phys(glue);
-	if (ret) {
-		dev_err(dev, "couldn't register PHYs\n");
-		return ret;
-	}
-
-	dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
-	if (!dwc3) {
-		dev_err(dev, "couldn't allocate dwc3 device\n");
-		ret = -ENOMEM;
-		goto err1;
-	}
-
-	memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
-
-	res[0].start	= pci_resource_start(pci, 0);
-	res[0].end	= pci_resource_end(pci, 0);
-	res[0].name	= "dwc_usb3";
-	res[0].flags	= IORESOURCE_MEM;
-
-	res[1].start	= pci->irq;
-	res[1].name	= "dwc_usb3";
-	res[1].flags	= IORESOURCE_IRQ;
-
-	ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
-	if (ret) {
-		dev_err(dev, "couldn't add resources to dwc3 device\n");
-		goto err1;
-	}
-
-	pci_set_drvdata(pci, glue);
-
-	dma_set_coherent_mask(&dwc3->dev, dev->coherent_dma_mask);
-
-	dwc3->dev.dma_mask = dev->dma_mask;
-	dwc3->dev.dma_parms = dev->dma_parms;
-	dwc3->dev.parent = dev;
-	glue->dwc3 = dwc3;
-
-	ret = platform_device_add(dwc3);
-	if (ret) {
-		dev_err(dev, "failed to register dwc3 device\n");
-		goto err3;
-	}
-
-	return 0;
-
-err3:
-	pci_set_drvdata(pci, NULL);
-	platform_device_put(dwc3);
-err1:
-	pci_disable_device(pci);
-
-	return ret;
-}
-
-static void dwc3_pci_remove(struct pci_dev *pci)
-{
-	struct dwc3_pci	*glue = pci_get_drvdata(pci);
-
-	platform_device_unregister(glue->dwc3);
-	platform_device_unregister(glue->usb2_phy);
-	platform_device_unregister(glue->usb3_phy);
-	pci_set_drvdata(pci, NULL);
-	pci_disable_device(pci);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
-	{
-		PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
-				PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
-	},
-	{  }	/* Terminating Entry */
-};
-MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
-
-#ifdef CONFIG_PM
-static int dwc3_pci_suspend(struct device *dev)
-{
-	struct pci_dev	*pci = to_pci_dev(dev);
-
-	pci_disable_device(pci);
-
-	return 0;
-}
-
-static int dwc3_pci_resume(struct device *dev)
-{
-	struct pci_dev	*pci = to_pci_dev(dev);
-	int		ret;
-
-	ret = pci_enable_device(pci);
-	if (ret) {
-		dev_err(dev, "can't re-enable device --> %d\n", ret);
-		return ret;
-	}
-
-	pci_set_master(pci);
-
-	return 0;
-}
-
-static const struct dev_pm_ops dwc3_pci_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
-};
-
-#define DEV_PM_OPS	(&dwc3_pci_dev_pm_ops)
-#else
-#define DEV_PM_OPS	NULL
-#endif /* CONFIG_PM */
-
-static struct pci_driver dwc3_pci_driver = {
-	.name		= "dwc3-pci",
-	.id_table	= dwc3_pci_id_table,
-	.probe		= dwc3_pci_probe,
-	.remove		= dwc3_pci_remove,
-	.driver		= {
-		.pm	= DEV_PM_OPS,
-	},
-};
-
-MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("DesignWare USB3 PCI Glue Layer");
-
-module_pci_driver(dwc3_pci_driver);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 5acbb94..f352d70 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -417,6 +417,9 @@
 			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
 				return -EINVAL;
 
+			if (dwc->is_ebc)
+				break;
+
 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 			if (set)
 				reg |= DWC3_DCTL_INITU1ENA;
@@ -431,6 +434,9 @@
 			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
 				return -EINVAL;
 
+			if (dwc->is_ebc)
+				break;
+
 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 			if (set)
 				reg |= DWC3_DCTL_INITU2ENA;
@@ -449,8 +455,19 @@
 			if (!set)
 				return -EINVAL;
 
-			dwc->test_mode_nr = wIndex >> 8;
-			dwc->test_mode = true;
+			/* Stall SRP/HNP test modes */
+			switch (wIndex >> 8) {
+			case TEST_J:
+			case TEST_K:
+			case TEST_SE0_NAK:
+			case TEST_PACKET:
+			case TEST_FORCE_EN:
+				dwc->test_mode_nr = wIndex >> 8;
+				dwc->test_mode = true;
+				break;
+			default:
+				return -EINVAL;
+			}
 			break;
 		default:
 			return -EINVAL;
@@ -560,9 +577,12 @@
 			 * Enable transition to U1/U2 state when
 			 * nothing is pending from application.
 			 */
-			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-			reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
-			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+			if (!dwc->is_ebc) {
+				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+				reg |= (DWC3_DCTL_ACCEPTU1ENA
+					| DWC3_DCTL_ACCEPTU2ENA);
+				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+			}
 
 			dwc->resize_fifos = true;
 			dev_dbg(dwc->dev, "resize fifos flag SET\n");
@@ -776,6 +796,9 @@
 
 	dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
 
+	if (list_empty(&ep0->request_list))
+		return;
+
 	r = next_request(&ep0->request_list);
 	ur = &r->request;
 
@@ -1016,6 +1039,25 @@
 			return;
 		}
 
+		/*
+		 * Per databook, if an XferNotready(Data) is received after
+		 * XferComplete(Data), one possible reason is host is trying
+		 * to complete data stage by moving a 0-length packet.
+		 *
+		 * REVISIT in case of other cases
+		 */
+		if (dwc->ep0_next_event == DWC3_EP0_NRDY_STATUS) {
+			u32		size = 0;
+			struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+
+			if (dep->number == 0)
+				size = dep->endpoint.maxpacket;
+
+			dwc3_ep0_start_trans(dwc, dep->number,
+				dwc->ctrl_req_addr, size,
+				DWC3_TRBCTL_CONTROL_DATA);
+		}
+
 		break;
 
 	case DEPEVT_STATUS_CONTROL_STATUS:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b5e5b35..4ee8746 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -49,10 +49,16 @@
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
 
 #include "core.h"
 #include "gadget.h"
 #include "io.h"
+#include "otg.h"
+
+static LIST_HEAD(ebc_io_ops);
 
 /**
  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
@@ -267,7 +273,7 @@
 
 	if (dwc->ep0_bounced && dep->number == 0)
 		dwc->ep0_bounced = false;
-	else
+	else if (!dep->ebc)
 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
 				req->direction);
 
@@ -337,7 +343,7 @@
 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
 {
 	struct dwc3_ep		*dep = dwc->eps[ep];
-	u32			timeout = 500;
+	u32			timeout = 5000;
 	u32			reg;
 
 	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
@@ -388,9 +394,13 @@
 	if (dep->number == 0 || dep->number == 1)
 		return 0;
 
-	dep->trb_pool = dma_alloc_coherent(dwc->dev,
-			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
-			&dep->trb_pool_dma, GFP_KERNEL);
+	if (dep->ebc)
+		dep->trb_pool = dep->ebc->alloc_static_trb_pool(
+				&dep->trb_pool_dma);
+	else
+		dep->trb_pool = dma_alloc_coherent(dwc->dev,
+				sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+				&dep->trb_pool_dma, GFP_KERNEL);
 	if (!dep->trb_pool) {
 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
 				dep->name);
@@ -404,7 +414,11 @@
 {
 	struct dwc3		*dwc = dep->dwc;
 
-	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+	if (dep->ebc)
+		dep->ebc->free_static_trb_pool();
+	else
+		dma_free_coherent(dwc->dev,
+			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
 			dep->trb_pool, dep->trb_pool_dma);
 
 	dep->trb_pool = NULL;
@@ -437,35 +451,58 @@
 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
 		const struct usb_endpoint_descriptor *desc,
 		const struct usb_ss_ep_comp_descriptor *comp_desc,
-		bool ignore)
+		bool ignore, u32 cfg_action)
 {
 	struct dwc3_gadget_ep_cmd_params params;
 
 	memset(&params, 0x00, sizeof(params));
 
 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
-		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
+		| cfg_action;
 
-	/* Burst size is only needed in SuperSpeed mode */
-	if (dwc->gadget.speed == USB_SPEED_SUPER) {
-		u32 burst = dep->endpoint.maxburst - 1;
+	if (dep->ebc) {
+		if (dwc->gadget.speed == USB_SPEED_SUPER) {
+			u32 burst = 0;
 
-		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
-	}
+			params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+		}
 
-	if (ignore)
 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
 
-	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
-		| DWC3_DEPCFG_XFER_NOT_READY_EN;
+		params.param1 = DWC3_DEPCFG_EBC_MODE_EN;
 
-	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
-		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
-			| DWC3_DEPCFG_STREAM_EVENT_EN;
-		dep->stream_capable = true;
+		if (dep->ebc->is_ondemand)
+			params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+		dep->stream_capable = false;
+	} else {
+		/* Burst size is only needed in SuperSpeed mode */
+		if (dwc->gadget.speed == USB_SPEED_SUPER) {
+			/* In case a function forgets to set maxburst, maxburst
+			 * may be still 0, and we shouldn't minus 1 for it.
+			 */
+			u32 burst = dep->endpoint.maxburst ?
+					dep->endpoint.maxburst - 1 : 0;
+
+			params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+		}
+
+		if (ignore)
+			params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+
+		params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
+			| DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+		if (usb_ss_max_streams(comp_desc) &&
+				usb_endpoint_xfer_bulk(desc)) {
+			params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+				| DWC3_DEPCFG_STREAM_EVENT_EN;
+			dep->stream_capable = true;
+		}
 	}
 
-	if (usb_endpoint_xfer_isoc(desc))
+	if (usb_endpoint_xfer_isoc(desc) || usb_endpoint_is_bulk_out(desc))
 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
 
 	/*
@@ -488,6 +525,56 @@
 		dep->interval = 1 << (desc->bInterval - 1);
 	}
 
+	if (cfg_action == DWC3_DEPCFG_ACTION_RESTORE)
+		params.param2 = dep->ep_state;
+
+	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
+			DWC3_DEPCMD_SETEPCONFIG, &params);
+}
+
+static int dwc3_gadget_update_ebc_ep_config(struct dwc3 *dwc,
+		struct dwc3_ep *dep,
+		const struct usb_endpoint_descriptor *desc,
+		const struct usb_ss_ep_comp_descriptor *comp_desc,
+		bool ignore_nrdy)
+{
+	u16	maxp;
+	struct dwc3_gadget_ep_cmd_params params;
+
+	if (!dep->ebc)
+		return -EINVAL;
+
+	memset(&params, 0x00, sizeof(params));
+
+	maxp = usb_endpoint_maxp(desc);
+
+	params.param0 =	DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+		| DWC3_DEPCFG_MAX_PACKET_SIZE(maxp)
+		| DWC3_DEPCFG_ACTION_MODIFY;
+
+	if (dwc->gadget.speed == USB_SPEED_SUPER) {
+		u32 burst = 0;
+
+		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+	}
+	params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+	params.param1 = DWC3_DEPCFG_EBC_MODE_EN;
+
+	if (!ignore_nrdy)
+		params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+	dep->stream_capable = false;
+
+	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+	if (dep->direction)
+		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+	if (desc->bInterval) {
+		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+		dep->interval = 1 << (desc->bInterval - 1);
+	}
+
 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
 			DWC3_DEPCMD_SETEPCONFIG, &params);
 }
@@ -526,7 +613,8 @@
 			return ret;
 	}
 
-	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
+	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+				DWC3_DEPCFG_ACTION_INIT);
 	if (ret)
 		return ret;
 
@@ -547,6 +635,9 @@
 		reg |= DWC3_DALEPENA_EP(dep->number);
 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
+		if (dep->ebc)
+			dwc->is_ebc = 1;
+
 		if (!usb_endpoint_xfer_isoc(desc))
 			return 0;
 
@@ -566,13 +657,13 @@
 	return 0;
 }
 
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, int forcerm);
 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
 	struct dwc3_request		*req;
 
 	if (!list_empty(&dep->req_queued)) {
-		dwc3_stop_active_transfer(dwc, dep->number);
+		dwc3_stop_active_transfer(dwc, dep->number, 1);
 
 		/* - giveback all requests to gadget driver */
 		while (!list_empty(&dep->req_queued)) {
@@ -600,8 +691,16 @@
 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 {
 	struct dwc3		*dwc = dep->dwc;
+	struct ebc_io		*ebc = dep->ebc;
 	u32			reg;
 
+	if (ebc) {
+		dwc->is_ebc = 0;
+
+		if (ebc->is_ondemand && ebc->xfer_stop)
+			ebc->xfer_stop();
+	}
+
 	dwc3_remove_requests(dwc, dep);
 
 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
@@ -614,6 +713,10 @@
 	dep->type = 0;
 	dep->flags = 0;
 
+	/* set normal endpoint maxpacket to default value */
+	if (dep->number > 1)
+		dep->endpoint.maxpacket = 1024;
+
 	return 0;
 }
 
@@ -700,7 +803,8 @@
 	dep = to_dwc3_ep(ep);
 	dwc = dep->dwc;
 
-	if (!(dep->flags & DWC3_EP_ENABLED)) {
+	if (!(dep->flags & DWC3_EP_ENABLED) &&
+		dep->flags != DWC3_EP_HIBERNATION) {
 		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
 				dep->name);
 		return 0;
@@ -751,7 +855,8 @@
  */
 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 		struct dwc3_request *req, dma_addr_t dma,
-		unsigned length, unsigned last, unsigned chain, unsigned node)
+		unsigned length, unsigned last, unsigned chain,
+		unsigned node, unsigned csp)
 {
 	struct dwc3		*dwc = dep->dwc;
 	struct dwc3_trb		*trb;
@@ -761,10 +866,6 @@
 			length, last ? " last" : "",
 			chain ? " chain" : "");
 
-	/* Skip the LINK-TRB on ISOC */
-	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
-			usb_endpoint_xfer_isoc(dep->endpoint.desc))
-		dep->free_slot++;
 
 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
 
@@ -776,6 +877,10 @@
 	}
 
 	dep->free_slot++;
+	/* Skip the LINK-TRB on ISOC */
+	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+			usb_endpoint_xfer_isoc(dep->endpoint.desc))
+		dep->free_slot++;
 
 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
 	trb->bpl = lower_32_bits(dma);
@@ -818,6 +923,12 @@
 	if (chain)
 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
 
+	if (csp) {
+		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+		trb->ctrl |= DWC3_TRB_CTRL_IOC;
+	}
+
+
 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
 
@@ -882,7 +993,8 @@
 	}
 
 	/* The last TRB is a link TRB, not used for xfer */
-	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
+	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+		(dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1)
 		return;
 
 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
@@ -918,12 +1030,14 @@
 					chain = false;
 
 				dwc3_prepare_one_trb(dep, req, dma, length,
-						last_one, chain, i);
+						last_one, chain, i, false);
 
 				if (last_one)
 					break;
 			}
 		} else {
+			unsigned csp = false;
+
 			dma = req->request.dma;
 			length = req->request.length;
 			trbs_left--;
@@ -935,8 +1049,13 @@
 			if (list_is_last(&req->list, &dep->request_list))
 				last_one = 1;
 
+			/* For bulk-out ep, if req is the short packet and
+			 * not the last one, enable CSP. */
+			if (req->short_packet && !last_one)
+				csp = true;
+
 			dwc3_prepare_one_trb(dep, req, dma, length,
-					last_one, false, 0);
+					last_one, false, 0, csp);
 
 			if (last_one)
 				break;
@@ -944,6 +1063,115 @@
 	}
 }
 
+/*
+ * dwc3_prepare_ebc_trbs - setup TRBs from DvC endpoint requests
+ * @dep: endpoint for which requests are being prepared
+ * @starting: true if the endpoint is idle and no requests are queued.
+ *
+ * The functions goes through the requests list and setups TRBs for the
+ * transfers.
+ */
+static void dwc3_prepare_ebc_trbs(struct dwc3_ep *dep,
+		bool starting)
+{
+	struct dwc3_request	*req, *n;
+	struct dwc3_trb		*trb_st_hw;
+	struct dwc3_trb		*trb_link;
+	struct dwc3_trb		*trb;
+	u32			trbs_left;
+	u32			trbs_num;
+	u32			trbs_mask;
+
+	/* BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);*/
+	trbs_num = dep->ebc->static_trb_pool_size;
+	trbs_mask = trbs_num - 1;
+
+	/* the first request must not be queued */
+	trbs_left = (dep->busy_slot - dep->free_slot) & trbs_mask;
+	/*
+	 * if busy & slot are equal than it is either full or empty. If we are
+	 * starting to proceed requests then we are empty. Otherwise we ar
+	 * full and don't do anything
+	 */
+	if (!trbs_left) {
+		if (!starting)
+			return;
+		trbs_left = trbs_num;
+		dep->busy_slot = 0;
+		dep->free_slot = 0;
+	}
+
+	/* The tailed TRB is a link TRB, not used for xfer */
+	if ((trbs_left <= 1))
+		return;
+
+	list_for_each_entry_safe(req, n, &dep->request_list, list) {
+		unsigned int last_one = 0;
+		unsigned int cur_slot;
+
+		/* revisit: dont use specific TRB buffer for Debug class? */
+		trb = &dep->trb_pool[dep->free_slot & trbs_mask];
+		cur_slot = dep->free_slot;
+		dep->free_slot++;
+
+		/* Skip the LINK-TRB */
+		if (((cur_slot & trbs_mask) == trbs_num - 1))
+			continue;
+
+		dwc3_gadget_move_request_queued(req);
+		trbs_left--;
+
+		/* Is our TRB pool empty? */
+		if (!trbs_left)
+			last_one = 1;
+		/* Is this the last request? */
+		if (list_empty(&dep->request_list))
+			last_one = 1;
+
+		req->trb = trb;
+		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+		trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
+		trb->bpl = lower_32_bits(req->request.dma);
+		trb->bph = upper_32_bits(req->request.dma);
+
+		switch (usb_endpoint_type(dep->endpoint.desc)) {
+		case USB_ENDPOINT_XFER_BULK:
+			trb->ctrl = DWC3_TRBCTL_NORMAL;
+			break;
+
+		case USB_ENDPOINT_XFER_CONTROL:
+		case USB_ENDPOINT_XFER_ISOC:
+		case USB_ENDPOINT_XFER_INT:
+		default:
+			/*
+			 * This is only possible with faulty memory because we
+			 * checked it already :)
+			 */
+			BUG();
+		}
+
+		trb->ctrl |= DWC3_TRB_CTRL_HWO | DWC3_TRB_CTRL_CHN;
+
+		if (last_one) {
+			if (trbs_left >= 1) {
+				trb_st_hw = &dep->trb_pool[0];
+
+				trb_link = &dep->trb_pool[dep->free_slot &
+						trbs_mask];
+				trb_link->bpl = lower_32_bits(
+					dwc3_trb_dma_offset(dep, trb_st_hw));
+				trb_link->bph = upper_32_bits(
+					dwc3_trb_dma_offset(dep, trb_st_hw));
+				trb_link->ctrl = DWC3_TRBCTL_LINK_TRB;
+				trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
+				trb_link->size = 0;
+			}
+			break;
+		}
+	}
+}
+
 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
 		int start_new)
 {
@@ -964,8 +1192,11 @@
 	 * new requests as we try to set the IOC bit only on the last request.
 	 */
 	if (start_new) {
-		if (list_empty(&dep->req_queued))
-			dwc3_prepare_trbs(dep, start_new);
+		if (dep->ebc)
+			dwc3_prepare_ebc_trbs(dep, start_new);
+		else
+			if (list_empty(&dep->req_queued))
+				dwc3_prepare_trbs(dep, start_new);
 
 		/* req points to the first request which will be sent */
 		req = next_request(&dep->req_queued);
@@ -1002,8 +1233,9 @@
 		 * here and stop, unmap, free and del each of the linked
 		 * requests instead of what we do now.
 		 */
-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
-				req->direction);
+		if (!dep->ebc)
+			usb_gadget_unmap_request(&dwc->gadget, &req->request,
+					req->direction);
 		list_del(&req->list);
 		return ret;
 	}
@@ -1016,6 +1248,26 @@
 		WARN_ON_ONCE(!dep->resource_index);
 	}
 
+	if (dep->ebc) {
+		if (dep->ebc->is_ondemand == 1) {
+			ret = dwc3_gadget_update_ebc_ep_config(dwc, dep,
+				dep->endpoint.desc, dep->comp_desc, true);
+
+			if (ret < 0) {
+				dev_dbg(dwc->dev,
+					"DEPCFG command failed on %s\n",
+					dep->name);
+				return ret;
+			}
+			dev_dbg(dwc->dev,
+				"successfully udpated DEPCFG command on %s\n",
+				dep->name);
+		}
+
+		if (dep->ebc->xfer_start)
+			dep->ebc->xfer_start();
+	}
+
 	return 0;
 }
 
@@ -1058,6 +1310,29 @@
 	req->direction		= dep->direction;
 	req->epnum		= dep->number;
 
+	/* specific handling for debug class */
+	if (dep->ebc) {
+		list_add_tail(&req->list, &dep->request_list);
+
+		if ((dep->ebc->is_ondemand == 1) &&
+			(!(dep->flags & DWC3_EP_PENDING_REQUEST))) {
+			dev_dbg(dwc->dev, "%s: delayed to kick ebc transfers\n",
+				dep->name);
+			return 0;
+		}
+
+		if (dep->flags & DWC3_EP_BUSY) {
+			dwc3_stop_active_transfer(dwc, dep->number, 1);
+			dep->flags = DWC3_EP_ENABLED;
+		}
+
+		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+		if (ret)
+			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+					dep->name);
+		return ret;
+	}
+
 	/*
 	 * We only add to our list of requests now and
 	 * start consuming the list once we get XferNotReady
@@ -1097,7 +1372,7 @@
 		 */
 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 			if (list_empty(&dep->req_queued)) {
-				dwc3_stop_active_transfer(dwc, dep->number);
+				dwc3_stop_active_transfer(dwc, dep->number, 1);
 				dep->flags = DWC3_EP_ENABLED;
 			}
 			return 0;
@@ -1141,16 +1416,34 @@
 
 	int				ret;
 
+	spin_lock_irqsave(&dwc->lock, flags);
 	if (!dep->endpoint.desc) {
 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
 				request, ep->name);
+		spin_unlock_irqrestore(&dwc->lock, flags);
 		return -ESHUTDOWN;
 	}
 
+	if (!dwc->soft_connected) {
+		dev_dbg(dwc->dev, "request %p queued when pullup is disabled\n",
+				request);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -ESHUTDOWN;
+	}
 	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
 			request, ep->name, request->length);
 
-	spin_lock_irqsave(&dwc->lock, flags);
+	/* pad OUT endpoint buffer to MaxPacketSize per databook requirement*/
+	req->short_packet = false;
+	if (!IS_ALIGNED(request->length, ep->desc->wMaxPacketSize)
+		&& !(dep->number & 1) && (dep->number != DWC3_EP_EBC_OUT_NB)) {
+		request->length = roundup(request->length,
+					(u32) ep->desc->wMaxPacketSize);
+		/* set flag for bulk-out short request */
+		if (usb_endpoint_is_bulk_out(dep->endpoint.desc))
+			req->short_packet = true;
+	}
+
 	ret = __dwc3_gadget_ep_queue(dep, req);
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1183,10 +1476,10 @@
 		}
 		if (r == req) {
 			/* wait until it is processed */
-			dwc3_stop_active_transfer(dwc, dep->number);
+			dwc3_stop_active_transfer(dwc, dep->number, 1);
 			goto out1;
 		}
-		dev_err(dwc->dev, "request %p was not queued to %s\n",
+		dev_info(dwc->dev, "request %p was not queued to %s\n",
 				request, ep->name);
 		ret = -EINVAL;
 		goto out0;
@@ -1410,13 +1703,44 @@
 	return 0;
 }
 
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+static int __dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
 {
 	u32			reg;
 	u32			timeout = 500;
 
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-	if (is_on) {
+	if (is_on)
+		reg |= DWC3_DCTL_RUN_STOP;
+	else
+		reg &= ~DWC3_DCTL_RUN_STOP;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (is_on) {
+			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
+				break;
+		} else {
+			if (reg & DWC3_DSTS_DEVCTRLHLT)
+				break;
+		}
+		timeout--;
+		if (!timeout)
+			return -ETIMEDOUT;
+		udelay(1);
+	} while (1);
+
+	return 0;
+}
+
+static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+{
+	u32			reg;
+	u32			timeout = 500;
+	struct usb_phy		*usb_phy;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	if (is_on && !dwc->pullups_connected) {
 		if (dwc->revision <= DWC3_REVISION_187A) {
 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
@@ -1426,10 +1750,22 @@
 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
 		reg |= DWC3_DCTL_RUN_STOP;
 		dwc->pullups_connected = true;
-	} else {
+	} else if (!is_on && dwc->pullups_connected) {
 		reg &= ~DWC3_DCTL_RUN_STOP;
 		dwc->pullups_connected = false;
-	}
+
+		/* WORKAROUND: reset PHY via FUNC_CTRL before disconnect
+		 * to avoid PHY hang
+		 */
+		if (!dwc->utmi_phy) {
+			usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+			if (usb_phy)
+				usb_phy_io_write(usb_phy,
+					0x6D, ULPI_FUNC_CTRL);
+			usb_put_phy(usb_phy);
+		}
+	} else
+		return 0;
 
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
@@ -1480,6 +1816,7 @@
 			DWC3_DEVTEN_EVNTOVERFLOWEN |
 			DWC3_DEVTEN_CMDCMPLTEN |
 			DWC3_DEVTEN_ERRTICERREN |
+			DWC3_DEVTEN_HIBERNATIONREQEVTEN |
 			DWC3_DEVTEN_WKUPEVTEN |
 			DWC3_DEVTEN_ULSTCNGEN |
 			DWC3_DEVTEN_CONNECTDONEEN |
@@ -1498,28 +1835,12 @@
 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
 
-static int dwc3_gadget_start(struct usb_gadget *g,
-		struct usb_gadget_driver *driver)
+static int dwc3_init_for_enumeration(struct dwc3 *dwc)
 {
-	struct dwc3		*dwc = gadget_to_dwc(g);
 	struct dwc3_ep		*dep;
-	unsigned long		flags;
 	int			ret = 0;
-	int			irq;
 	u32			reg;
 
-	spin_lock_irqsave(&dwc->lock, flags);
-
-	if (dwc->gadget_driver) {
-		dev_err(dwc->dev, "%s is already bound to %s\n",
-				dwc->gadget.name,
-				dwc->gadget_driver->driver.name);
-		ret = -EBUSY;
-		goto err0;
-	}
-
-	dwc->gadget_driver	= driver;
-
 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 	reg &= ~(DWC3_DCFG_SPEED_MASK);
 
@@ -1542,6 +1863,7 @@
 		reg |= dwc->maximum_speed;
 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 
+	dwc->is_ebc = 0;
 	dwc->start_config_issued = false;
 
 	/* Start with SuperSpeed Default */
@@ -1551,41 +1873,92 @@
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-		goto err0;
+		return ret;
 	}
 
 	dep = dwc->eps[1];
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-		goto err1;
+		goto err0;
 	}
 
 	/* begin to receive SETUP packets */
 	dwc->ep0state = EP0_SETUP_PHASE;
 	dwc3_ep0_out_start(dwc);
 
-	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
-	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
-			IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
-	if (ret) {
-		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
-				irq, ret);
+	dwc3_gadget_enable_irq(dwc);
+
+	return 0;
+err0:
+	__dwc3_gadget_ep_disable(dwc->eps[0]);
+
+	return ret;
+}
+
+static int dwc3_gadget_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	int			ret = 0;
+	int			irq = 0;
+	struct usb_phy		*usb_phy;
+
+	if (dwc->is_otg) {
+		usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+		if (!usb_phy) {
+			dev_err(dwc->dev, "OTG driver not available\n");
+			ret = -ENODEV;
+			goto err0;
+		}
+
+		otg_set_peripheral(usb_phy->otg, &dwc->gadget);
+		usb_put_phy(usb_phy);
+	} else {
+		irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+		ret = request_threaded_irq(irq, dwc3_interrupt,
+				dwc3_thread_interrupt, IRQF_SHARED,
+				"dwc3", dwc);
+		if (ret) {
+			dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+					irq, ret);
+			goto err0;
+		}
+	}
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->gadget_driver) {
+		dev_err(dwc->dev, "%s is already bound to %s\n",
+				dwc->gadget.name,
+				dwc->gadget_driver->driver.name);
+		ret = -EBUSY;
 		goto err1;
 	}
 
-	dwc3_gadget_enable_irq(dwc);
+	dwc->gadget_driver	= driver;
+
+	if (!dwc->is_otg) {
+		ret = dwc3_init_for_enumeration(dwc);
+		if (ret)
+			goto err2;
+	}
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
 	return 0;
 
-err1:
-	__dwc3_gadget_ep_disable(dwc->eps[0]);
+err2:
+	dwc->gadget_driver = NULL;
 
-err0:
+err1:
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
+	if (!dwc->is_otg)
+		free_irq(irq, dwc);
+
+err0:
 	return ret;
 }
 
@@ -1599,9 +1972,6 @@
 	spin_lock_irqsave(&dwc->lock, flags);
 
 	dwc3_gadget_disable_irq(dwc);
-	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
-	free_irq(irq, dwc);
-
 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 	__dwc3_gadget_ep_disable(dwc->eps[1]);
 
@@ -1609,9 +1979,57 @@
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	free_irq(irq, dwc);
+
 	return 0;
 }
 
+static int __dwc3_vbus_draw(struct dwc3 *dwc, unsigned ma)
+{
+	int		ret;
+	struct usb_phy	*usb_phy;
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!usb_phy) {
+		dev_err(dwc->dev, "OTG driver not available\n");
+		return -ENODEV;
+	}
+
+	ret = usb_phy_set_power(usb_phy, ma);
+	usb_put_phy(usb_phy);
+
+	return ret;
+}
+
+static int dwc3_vbus_draw(struct usb_gadget *g, unsigned ma)
+{
+	unsigned	ma_otg = 0;
+	struct dwc3	*dwc = gadget_to_dwc(g);
+
+	dev_dbg(dwc->dev, "otg_set_power: %d mA\n", ma);
+
+	switch (ma) {
+	case USB3_I_MAX_OTG:
+		ma_otg = OTG_USB3_900MA;
+		break;
+	case USB3_I_UNIT_OTG:
+		ma_otg = OTG_USB3_150MA;
+		break;
+	case USB2_I_MAX_OTG:
+		ma_otg = OTG_USB2_500MA;
+		break;
+	case USB2_I_UNIT_OTG:
+		ma_otg = OTG_USB2_100MA;
+		break;
+	default:
+		dev_err(dwc->dev,
+			"wrong charging current reported: %dmA\n", ma);
+	}
+
+	return __dwc3_vbus_draw(dwc, ma_otg);
+}
+
 static const struct usb_gadget_ops dwc3_gadget_ops = {
 	.get_frame		= dwc3_gadget_get_frame,
 	.wakeup			= dwc3_gadget_wakeup,
@@ -1627,6 +2045,7 @@
 		u8 num, u32 direction)
 {
 	struct dwc3_ep			*dep;
+	struct ebc_io			*ebc, *n;
 	u8				i;
 
 	for (i = 0; i < num; i++) {
@@ -1649,6 +2068,17 @@
 		dep->endpoint.name = dep->name;
 		dep->direction = (epnum & 1);
 
+		list_for_each_entry_safe(ebc, n, &ebc_io_ops, list) {
+			if (epnum == ebc->epnum) {
+				dep->ebc = ebc;
+				if (ebc->init)
+					if (ebc->init() == -ENODEV)
+						dev_err(dwc->dev,
+						"debug class init fail %d\n",
+						epnum);
+			}
+		}
+
 		if (epnum == 0 || epnum == 1) {
 			dep->endpoint.maxpacket = 512;
 			dep->endpoint.maxburst = 1;
@@ -1838,7 +2268,7 @@
 			break;
 	} while (1);
 
-	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+	if (dep->endpoint.desc && usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
 			list_empty(&dep->req_queued)) {
 		if (list_empty(&dep->request_list)) {
 			/*
@@ -1849,7 +2279,7 @@
 			 */
 			dep->flags = DWC3_EP_PENDING_REQUEST;
 		} else {
-			dwc3_stop_active_transfer(dwc, dep->number);
+			dwc3_stop_active_transfer(dwc, dep->number, 1);
 			dep->flags = DWC3_EP_ENABLED;
 		}
 		return 1;
@@ -1868,6 +2298,12 @@
 	unsigned		status = 0;
 	int			clean_busy;
 
+	if (!(dep->flags & DWC3_EP_ENABLED)) {
+		dev_warn(dwc->dev, "%s: %s event on disabled ep\n", dep->name,
+			dwc3_ep_event_string(event->endpoint_event));
+		return;
+	}
+
 	if (event->status & DEPEVT_STATUS_BUSERR)
 		status = -ECONNRESET;
 
@@ -1933,8 +2369,9 @@
 		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
 		break;
 	case DWC3_DEPEVT_XFERINPROGRESS:
-		if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
+		if ((!usb_endpoint_xfer_isoc(dep->endpoint.desc)) &&
+			(!usb_endpoint_xfer_bulk(dep->endpoint.desc))) {
+			dev_dbg(dwc->dev, "%s is not an Isochronous/bulk endpoint\n",
 					dep->name);
 			return;
 		}
@@ -1999,7 +2436,7 @@
 	}
 }
 
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, int forcerm)
 {
 	struct dwc3_ep *dep;
 	struct dwc3_gadget_ep_cmd_params params;
@@ -2008,6 +2445,31 @@
 
 	dep = dwc->eps[epnum];
 
+	if (dep->ebc) {
+		if (dep->ebc->is_ondemand == 1) {
+			ret = dwc3_gadget_update_ebc_ep_config(dwc, dep,
+				dep->endpoint.desc, dep->comp_desc, false);
+			if (ret < 0) {
+				dev_dbg(dwc->dev,
+					"DEPCFG failed on %s\n",
+					dep->name);
+				return;
+			}
+			dev_dbg(dwc->dev,
+				"successfully udpated DEPCFG command on %s\n",
+				dep->name);
+		}
+
+		if (dep->ebc->xfer_stop)
+			dep->ebc->xfer_stop();
+		else
+			dev_dbg(dwc->dev, "%s xfer_stop() NULL\n", dep->name);
+	}
+
+	if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
+				dep->number);
+
 	if (!dep->resource_index)
 		return;
 
@@ -2031,8 +2493,10 @@
 	 */
 
 	cmd = DWC3_DEPCMD_ENDTRANSFER;
-	cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
+	cmd |= DWC3_DEPCMD_CMDIOC;
 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+	if (forcerm)
+		cmd |= DWC3_DEPCMD_HIPRI_FORCERM;
 	memset(&params, 0, sizeof(params));
 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
 	WARN_ON_ONCE(ret);
@@ -2132,6 +2596,18 @@
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 }
 
+static void link_state_change_work(struct work_struct *data)
+{
+	struct dwc3 *dwc = container_of((struct delayed_work *)data,
+			struct dwc3, link_work);
+
+	if (dwc->link_state == DWC3_LINK_STATE_U3 ||
+		dwc->pm_state == PM_SUSPENDED) {
+		dev_info(dwc->dev, "device suspended; notify OTG\n");
+		__dwc3_vbus_draw(dwc, OTG_DEVICE_SUSPEND);
+	}
+}
+
 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
 	u32			reg;
@@ -2291,6 +2767,10 @@
 		break;
 	}
 
+	/* Follow OTG2.0 spec. Unconfigured state, max charging cap should
+	 * not exceed 2.5ma. */
+	__dwc3_vbus_draw(dwc, OTG_USB2_0MA);
+
 	/* Enable USB2 LPM Capability */
 
 	if ((dwc->revision > DWC3_REVISION_194A)
@@ -2318,16 +2798,20 @@
 	}
 
 	dep = dwc->eps[0];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+	ret = dwc3_gadget_set_ep_config(dwc, dep,
+			&dwc3_gadget_ep0_desc, NULL, false,
+			DWC3_DEPCFG_ACTION_MODIFY);
 	if (ret) {
-		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+		dev_err(dwc->dev, "failed to update %s\n", dep->name);
 		return;
 	}
 
 	dep = dwc->eps[1];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+	ret = dwc3_gadget_set_ep_config(dwc, dep,
+			&dwc3_gadget_ep0_desc, NULL, false,
+			DWC3_DEPCFG_ACTION_MODIFY);
 	if (ret) {
-		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+		dev_err(dwc->dev, "failed to update %s\n", dep->name);
 		return;
 	}
 
@@ -2344,6 +2828,9 @@
 {
 	dev_vdbg(dwc->dev, "%s\n", __func__);
 
+	dev_info(dwc->dev, "device resumed; notify OTG\n");
+	__dwc3_vbus_draw(dwc, OTG_DEVICE_RESUME);
+
 	/*
 	 * TODO take core out of low power mode when that's
 	 * implemented.
@@ -2433,12 +2920,26 @@
 
 	dwc->link_state = next;
 
+	if (next ==  DWC3_LINK_STATE_U3)
+		schedule_delayed_work(
+			&dwc->link_work, msecs_to_jiffies(1000));
+
 	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
 }
 
+static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc)
+{
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	if (dwc->hiber_enabled)
+		pm_runtime_put(dwc->dev);
+}
+
 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		const struct dwc3_event_devt *event)
 {
+	u32	reg;
+
 	switch (event->type) {
 	case DWC3_DEVICE_EVENT_DISCONNECT:
 		dwc3_gadget_disconnect_interrupt(dwc);
@@ -2455,6 +2956,9 @@
 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
 		break;
+	case DWC3_DEVICE_EVENT_HIBER_REQ:
+		dwc3_gadget_hibernation_interrupt(dwc);
+		break;
 	case DWC3_DEVICE_EVENT_EOPF:
 		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
 		break;
@@ -2463,6 +2967,14 @@
 		break;
 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
 		dev_vdbg(dwc->dev, "Erratic Error\n");
+
+		/* Controller may generate too many Erratic Errors Messages,
+		 * Disable it until we find a way to recover the failure.
+		 */
+		reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+		reg &= ~DWC3_DEVTEN_ERRTICERREN;
+		dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+		dev_info(dwc->dev, "Erratic Error Event disabled\n");
 		break;
 	case DWC3_DEVICE_EVENT_CMD_CMPL:
 		dev_vdbg(dwc->dev, "Command Complete\n");
@@ -2499,6 +3011,7 @@
 	struct dwc3 *dwc = _dwc;
 	unsigned long flags;
 	irqreturn_t ret = IRQ_NONE;
+	u32 reg;
 	int i;
 
 	spin_lock_irqsave(&dwc->lock, flags);
@@ -2538,6 +3051,11 @@
 		evt->count = 0;
 		evt->flags &= ~DWC3_EVENT_PENDING;
 		ret = IRQ_HANDLED;
+
+		/* Unmask interrupt */
+		reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(i));
+		reg &= ~DWC3_GEVNTSIZ_INTMASK;
+		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(i), reg);
 	}
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2549,6 +3067,7 @@
 {
 	struct dwc3_event_buffer *evt;
 	u32 count;
+	u32 reg;
 
 	evt = dwc->ev_buffs[buf];
 
@@ -2560,6 +3079,17 @@
 	evt->count = count;
 	evt->flags |= DWC3_EVENT_PENDING;
 
+	/* WORKAROUND: Add 4 us delay workaround to A-unit issue in A0 stepping.
+	 * Can be removed after B0.
+	 */
+	if (dwc->is_otg && dwc->revision == DWC3_REVISION_210A)
+		udelay(4);
+
+	/* Mask interrupt */
+	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+	reg |= DWC3_GEVNTSIZ_INTMASK;
+	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+
 	return IRQ_WAKE_THREAD;
 }
 
@@ -2570,6 +3100,15 @@
 	irqreturn_t			ret = IRQ_NONE;
 
 	spin_lock(&dwc->lock);
+	if (dwc->pm_state != PM_ACTIVE) {
+		if (dwc->pm_state == PM_SUSPENDED) {
+			dev_info(dwc->dev, "u2/u3 pmu is received\n");
+			pm_runtime_get(dwc->dev);
+			dwc->pm_state = PM_RESUMING;
+			ret = IRQ_HANDLED;
+		}
+		goto out;
+	}
 
 	for (i = 0; i < dwc->num_event_buffers; i++) {
 		irqreturn_t status;
@@ -2579,6 +3118,7 @@
 			ret = status;
 	}
 
+out:
 	spin_unlock(&dwc->lock);
 
 	return ret;
@@ -2595,6 +3135,23 @@
 	u32					reg;
 	int					ret;
 
+	dwc->scratch_array = dma_alloc_coherent(dwc->dev,
+			sizeof(*dwc->scratch_array),
+			&dwc->scratch_array_dma, GFP_KERNEL);
+	if (!dwc->scratch_array) {
+		dev_err(dwc->dev, "failed to allocate scratch_arrary\n");
+		return -ENOMEM;
+	}
+
+	dwc->scratch_buffer[0] = dma_alloc_coherent(dwc->dev,
+			DWC3_SCRATCH_BUF_SIZE,
+			&dwc->scratch_array->dma_adr[0], GFP_KERNEL);
+	if (!dwc->scratch_buffer[0]) {
+		dev_err(dwc->dev, "failed to allocate scratch_buffer\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 			&dwc->ctrl_req_addr, GFP_KERNEL);
 	if (!dwc->ctrl_req) {
@@ -2631,8 +3188,11 @@
 	dwc->gadget.max_speed		= USB_SPEED_SUPER;
 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
 	dwc->gadget.sg_supported	= true;
+	dwc->gadget.quirk_ep_out_aligned_size = true;
 	dwc->gadget.name		= "dwc3-gadget";
 
+	INIT_DELAYED_WORK(&dwc->link_work, link_state_change_work);
+
 	/*
 	 * REVISIT: Here we should clear all pending IRQs to be
 	 * sure we're starting from a well known location.
@@ -2679,6 +3239,13 @@
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 
 err0:
+	dma_free_coherent(dwc->dev,
+			DWC3_SCRATCH_BUF_SIZE, dwc->scratch_buffer[0],
+			(dma_addr_t)dwc->scratch_array->dma_adr[0]);
+
+err:
+	dma_free_coherent(dwc->dev, sizeof(*dwc->scratch_array),
+			dwc->scratch_array, dwc->scratch_array_dma);
 	return ret;
 }
 
@@ -2700,6 +3267,13 @@
 
 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
+
+	dma_free_coherent(dwc->dev,
+			DWC3_SCRATCH_BUF_SIZE, dwc->scratch_buffer[0],
+			(dma_addr_t)dwc->scratch_array->dma_adr[0]);
+
+	dma_free_coherent(dwc->dev, sizeof(*dwc->scratch_array),
+			dwc->scratch_array, dwc->scratch_array_dma);
 }
 
 int dwc3_gadget_prepare(struct dwc3 *dwc)
@@ -2760,3 +3334,359 @@
 err0:
 	return ret;
 }
+
+void dwc3_register_io_ebc(struct ebc_io *ebc)
+{
+	list_add_tail(&ebc->list, &ebc_io_ops);
+}
+
+void dwc3_unregister_io_ebc(struct ebc_io *ebc)
+{
+	list_del(&ebc->list);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static void dwc3_gadget_get_ep_state(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+	struct	dwc3_gadget_ep_cmd_params params;
+	int	ret;
+
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	memset(&params, 0, sizeof(params));
+	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+			DWC3_DEPCMD_GETEPSTATE, &params);
+	WARN_ON_ONCE(ret);
+
+	dep->ep_state = dwc3_readl(dwc->regs, DWC3_DEPCMDPAR2(dep->number));
+}
+
+static void dwc3_cache_hwregs(struct dwc3 *dwc)
+{
+	struct dwc3_hwregs	*regs = &dwc->hwregs;
+
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	regs->guctl = dwc3_readl(dwc->regs, DWC3_GUCTL);
+	regs->grxthrcfg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+	regs->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
+	regs->devten = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+	regs->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
+	regs->gusb3pipectl0 = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+	regs->gusb2phycfg0 = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+	regs->gevntadrlo = dwc3_readl(dwc->regs, DWC3_GEVNTADRLO(0));
+	regs->gevntadrhi = dwc3_readl(dwc->regs, DWC3_GEVNTADRHI(0));
+	regs->gevntsiz = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
+}
+
+static void dwc3_restore_hwregs(struct dwc3 *dwc)
+{
+	struct dwc3_hwregs	*regs = &dwc->hwregs;
+
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	dwc3_writel(dwc->regs, DWC3_GUCTL, regs->guctl);
+	dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, regs->grxthrcfg);
+	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), regs->gusb3pipectl0);
+	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), regs->gusb2phycfg0);
+	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), regs->gevntadrlo);
+	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), regs->gevntadrhi);
+	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), regs->gevntsiz);
+	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
+	dwc3_writel(dwc->regs, DWC3_DCFG, regs->dcfg);
+	dwc3_writel(dwc->regs, DWC3_DEVTEN, regs->devten);
+	dwc3_writel(dwc->regs, DWC3_GCTL, regs->gctl);
+}
+
+static int dwc3_gadget_controller_save_state(struct dwc3 *dwc)
+{
+	u32			reg;
+	u32			timeout = 1000;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+	reg |= DWC3_DCTL_CSS;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (!(reg & DWC3_DSTS_SSS))
+				return 0;
+
+		timeout--;
+		if (!timeout)
+			return -ETIMEDOUT;
+		udelay(1);
+	} while (1);
+
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+}
+
+static int dwc3_gadget_controller_restore_state(struct dwc3 *dwc)
+{
+	u32			reg;
+	u32			timeout = 1000;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+	reg |= DWC3_DCTL_CRS;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (!(reg & DWC3_DSTS_RSS))
+				return 0;
+
+		timeout--;
+		if (!timeout)
+			return -ETIMEDOUT;
+		udelay(1);
+	} while (1);
+
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+}
+
+void dwc3_gadget_keep_conn(struct dwc3 *dwc, int is_on)
+{
+	u32         reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	if (is_on)
+		reg |= DWC3_DCTL_KEEP_CONNECT;
+	else
+		reg &= ~DWC3_DCTL_KEEP_CONNECT;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+}
+
+int dwc3_runtime_suspend(struct device *device)
+{
+	struct dwc3			*dwc;
+	struct platform_device		*pdev;
+	unsigned long			flags;
+	u32				epnum;
+	struct dwc3_ep			*dep;
+
+	pdev = to_platform_device(device);
+	dwc = platform_get_drvdata(pdev);
+
+	if (!dwc || !dwc->hiber_enabled)
+		return 0;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->pm_state != PM_ACTIVE) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return 0;
+	}
+
+
+	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+		dep = dwc->eps[epnum];
+		if (!(dep->flags & DWC3_EP_ENABLED))
+			continue;
+
+		dep->flags_backup = dep->flags;
+		if (dep->flags & DWC3_EP_BUSY)
+			dwc3_stop_active_transfer(dwc, epnum, 0);
+
+		dwc3_gadget_get_ep_state(dwc, dep);
+
+		dep->flags = DWC3_EP_HIBERNATION;
+	}
+
+	__dwc3_gadget_run_stop(dwc, 0);
+	dwc3_gadget_keep_conn(dwc, 1);
+
+	dwc3_cache_hwregs(dwc);
+
+	dwc3_gadget_disable_irq(dwc);
+	dwc3_event_buffers_cleanup(dwc);
+
+	dwc3_gadget_controller_save_state(dwc);
+
+	dwc->pm_state = PM_SUSPENDED;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	schedule_delayed_work(&dwc->link_work, msecs_to_jiffies(1000));
+	dev_info(dwc->dev, "suspended\n");
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+
+	return 0;
+}
+
+int dwc3_runtime_resume(struct device *device)
+{
+	struct dwc3			*dwc;
+	struct platform_device		*pdev;
+	unsigned long			flags;
+	int				ret;
+	u32				epnum;
+	u32				timeout = 500;
+	u32				reg;
+	u8				link_state;
+	struct dwc3_ep			*dep;
+
+	pdev = to_platform_device(device);
+	dwc = platform_get_drvdata(pdev);
+
+	if (!dwc || !dwc->hiber_enabled)
+		return 0;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->pm_state == PM_ACTIVE ||
+		dwc->pm_state == PM_DISCONNECTED) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return 0;
+	}
+
+	dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_SET_SCRATCH_ADDR_LO,
+		dwc->scratch_array_dma & 0xffffffffU);
+
+	dwc3_gadget_controller_restore_state(dwc);
+
+	dwc3_restore_hwregs(dwc);
+
+	dep = dwc->eps[0];
+	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+	if (ret) {
+		dev_err(dwc->dev, "failed to enable %s during runtime resume\n",
+			dep->name);
+		goto err0;
+	}
+
+	dep = dwc->eps[1];
+	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+	if (ret) {
+		dev_err(dwc->dev, "failed to enable %s during runtime resume\n",
+			dep->name);
+		goto err1;
+	}
+
+	for (epnum = 0; epnum < 2; epnum++) {
+		struct dwc3_gadget_ep_cmd_params params;
+
+		dep = dwc->eps[epnum];
+		if (dep->flags_backup & DWC3_EP_BUSY) {
+			dwc->ep0_trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+			memset(&params, 0, sizeof(params));
+			params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+			params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+			ret = dwc3_send_gadget_ep_cmd(dwc, epnum,
+					DWC3_DEPCMD_STARTTRANSFER, &params);
+			WARN_ON_ONCE(ret);
+		}
+
+		dep->flags = dep->flags_backup;
+		dep->flags_backup = 0;
+	}
+
+	__dwc3_gadget_run_stop(dwc, 1);
+	dwc3_gadget_keep_conn(dwc, 1);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (!(reg & DWC3_DSTS_DCNRD))
+				break;
+
+		timeout--;
+		if (!timeout)
+			break;
+		udelay(1);
+	} while (1);
+
+	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+	link_state = DWC3_DSTS_USBLNKST(reg);
+	switch (link_state) {
+	case DWC3_LINK_STATE_U3:
+	case DWC3_LINK_STATE_RESUME:
+		dwc3_gadget_conndone_interrupt(dwc);
+
+		for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+			dep = dwc->eps[epnum];
+			if (!(dep->flags_backup & DWC3_EP_ENABLED))
+				continue;
+			if (dep->endpoint.desc)
+				dwc3_gadget_set_ep_config(dwc,
+					dep, dep->endpoint.desc, dep->comp_desc,
+					false, DWC3_DEPCFG_ACTION_RESTORE);
+
+			dwc3_gadget_set_xfer_resource(dwc, dep);
+
+			reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+			reg |= DWC3_DALEPENA_EP(epnum);
+			dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+			if (dep->flags_backup & DWC3_EP_STALL)
+				__dwc3_gadget_ep_set_halt(dep, 1);
+
+			if (dep->flags_backup & DWC3_EP_BUSY) {
+				struct dwc3_request			*req;
+				struct dwc3_gadget_ep_cmd_params	params;
+
+				req = next_request(&dep->req_queued);
+				if (!req)
+					break;
+				req->trb->ctrl |= DWC3_TRB_CTRL_HWO;
+				memset(&params, 0, sizeof(params));
+				params.param0 = upper_32_bits(req->trb_dma);
+				params.param1 = lower_32_bits(req->trb_dma);
+
+				ret = dwc3_send_gadget_ep_cmd(dwc, epnum,
+						DWC3_DEPCMD_STARTTRANSFER,
+						&params);
+				WARN_ON_ONCE(ret);
+
+			}
+
+			dep->flags = dep->flags_backup;
+			dep->flags_backup = 0;
+		}
+
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+		break;
+	case DWC3_LINK_STATE_RESET:
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+		break;
+	default:
+		/* wait for USB Reset or Connect Done event */
+		break;
+	}
+
+	dwc->pm_state = PM_ACTIVE;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	__dwc3_vbus_draw(dwc, OTG_DEVICE_RESUME);
+	dev_info(dwc->dev, "resumed\n");
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+	return 0;
+
+err1:
+	__dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return ret;
+}
+#else
+void dwc3_gadget_keep_conn(struct dwc3 *dwc, int is_on) {}
+#endif
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 99e6d72..a691b7b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -47,6 +47,12 @@
 #define to_dwc3_ep(ep)		(container_of(ep, struct dwc3_ep, endpoint))
 #define gadget_to_dwc(g)	(container_of(g, struct dwc3, gadget))
 
+/* max power consumption of the device from the bus */
+#define USB3_I_MAX_OTG		896
+#define USB3_I_UNIT_OTG		144
+#define USB2_I_MAX_OTG		500
+#define USB2_I_UNIT_OTG		100
+
 /* DEPCFG parameter 1 */
 #define DWC3_DEPCFG_INT_NUM(n)		((n) << 0)
 #define DWC3_DEPCFG_XFER_COMPLETE_EN	(1 << 8)
@@ -54,6 +60,7 @@
 #define DWC3_DEPCFG_XFER_NOT_READY_EN	(1 << 10)
 #define DWC3_DEPCFG_FIFO_ERROR_EN	(1 << 11)
 #define DWC3_DEPCFG_STREAM_EVENT_EN	(1 << 13)
+#define DWC3_DEPCFG_EBC_MODE_EN		(1 << 15)
 #define DWC3_DEPCFG_BINTERVAL_M1(n)	((n) << 16)
 #define DWC3_DEPCFG_STREAM_CAPABLE	(1 << 24)
 #define DWC3_DEPCFG_EP_NUMBER(n)	((n) << 25)
diff --git a/drivers/usb/dwc3/otg.c b/drivers/usb/dwc3/otg.c
new file mode 100644
index 0000000..eca13b4
--- /dev/null
+++ b/drivers/usb/dwc3/otg.c
@@ -0,0 +1,1531 @@
+/*
+ * otg.c - Designware USB3 DRD Controller OTG driver
+ *
+ * Authors: Wang Yu <yu.y.wang@intel.com>
+ *		Synopsys inc
+ *
+ * Description:
+ *
+ * This driver is developed base on dwc_otg3.c which provided by
+ * Synopsys company. Yu removed some unused features from it,
+ * for example: HNP/SRP/ADP support. Because haven't use these features
+ * so far. And add charger detection support into the state machine.
+ * Support SDP/CDP/DCP/Micro-ACA/ACA-Dock and SE1 USB charger type.
+ *
+ * Beside that, make all hardware dependence as arguments, which need
+ * vendor to implemented by themselves. For example: VBus drive, USB ID
+ * pin value and so on.
+ *
+ * To enable this OTG driver, user have to call dwc3_otg_register API to
+ * regiter one dwc3_otg_hw_ops object which include all hardware
+ * dependent code.
+ *
+ * License:
+ * Below declaration is copy from Synopsys DWC3 SW 2.10a released README.txt.
+ *
+ * IMPORTANT:
+ *
+ * Synopsys SS USB3 Linux Driver Software and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/wakelock.h>
+
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+struct dwc3_otg_hw_ops *dwc3_otg_pdata;
+struct dwc_device_par *platform_par;
+
+static struct mutex lock;
+static struct wake_lock wakelock;
+static const char driver_name[] = "dwc3_otg";
+static struct dwc_otg2 *the_transceiver;
+static void dwc_otg_remove(struct pci_dev *pdev);
+
+
+static inline struct dwc_otg2 *xceiv_to_dwc_otg2(struct usb_otg *x)
+{
+	return container_of(x, struct dwc_otg2, otg);
+}
+
+struct dwc_otg2 *dwc3_get_otg(void)
+{
+	return the_transceiver;
+}
+EXPORT_SYMBOL_GPL(dwc3_get_otg);
+
+/* Caller must hold otg->lock */
+void dwc3_wakeup_otg_thread(struct dwc_otg2 *otg)
+{
+	if (!otg->main_thread)
+		return;
+
+	otg_dbg(otg, "\n");
+	/* Tell the main thread that something has happened */
+	otg->main_wakeup_needed = 1;
+	wake_up_interruptible(&otg->main_wq);
+}
+EXPORT_SYMBOL_GPL(dwc3_wakeup_otg_thread);
+
+static int sleep_main_thread_timeout(struct dwc_otg2 *otg, int msecs)
+{
+	signed long jiffies;
+	int rc = msecs;
+
+	if (otg->state == DWC_STATE_EXIT) {
+		otg_dbg(otg, "Main thread exiting\n");
+		rc = -EINTR;
+		goto done;
+	}
+
+	if (signal_pending(current)) {
+		otg_dbg(otg, "Main thread signal pending\n");
+		rc = -EINTR;
+		goto done;
+	}
+	if (otg->main_wakeup_needed) {
+		otg_dbg(otg, "Main thread wakeup needed\n");
+		rc = msecs;
+		goto done;
+	}
+
+	jiffies = msecs_to_jiffies(msecs);
+	rc = wait_event_freezable_timeout(otg->main_wq,
+					otg->main_wakeup_needed,
+					jiffies);
+
+	if (otg->state == DWC_STATE_EXIT) {
+		otg_dbg(otg, "Main thread exiting\n");
+		rc = -EINTR;
+		goto done;
+	}
+
+	if (rc > 0)
+		rc = jiffies_to_msecs(rc);
+
+done:
+	otg->main_wakeup_needed = 0;
+	return rc;
+}
+
+static int sleep_main_thread(struct dwc_otg2 *otg)
+{
+	int rc = 0;
+
+	do {
+		rc = sleep_main_thread_timeout(otg, 5000);
+	} while (rc == 0);
+
+	return rc;
+}
+
+static void get_and_clear_events(struct dwc_otg2 *otg,
+				u32 otg_mask,
+				u32 user_mask,
+				u32 *otg_events,
+				u32 *user_events)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&otg->lock, flags);
+
+	if (otg_events) {
+		if (otg->otg_events & otg_mask) {
+			*otg_events = otg->otg_events;
+			otg->otg_events &= ~otg_mask;
+		} else
+			*otg_events = 0;
+	}
+
+	if (user_events) {
+		if (otg->user_events & user_mask) {
+			*user_events = otg->user_events;
+			otg->user_events &= ~user_mask;
+		} else
+			*user_events = 0;
+	}
+
+	spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static int check_event(struct dwc_otg2 *otg,
+		u32 otg_mask,
+		u32 user_mask,
+		u32 *otg_events,
+		u32 *user_events)
+{
+	get_and_clear_events(otg, otg_mask, user_mask,
+			otg_events, user_events);
+
+	otg_dbg(otg, "Event occurred:");
+
+	if (otg_events && (*otg_events & otg_mask)) {
+		otg_dbg(otg, "otg_events=0x%x, otg_mask=0x%x",
+				*otg_events, otg_mask);
+		return 1;
+	}
+
+	if (user_events && (*user_events & user_mask)) {
+		otg_dbg(otg, "user_events=0x%x, user_mask=0x%x",
+				*user_events, user_mask);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int sleep_until_event(struct dwc_otg2 *otg,
+			u32 otg_mask, u32 user_mask,
+			u32 *otg_events, u32 *user_events,
+			int timeout)
+{
+	int rc = 0;
+
+	pm_runtime_mark_last_busy(otg->dev);
+	pm_runtime_put_autosuspend(otg->dev);
+	/* Wait until it occurs, or timeout, or interrupt. */
+	if (timeout) {
+		otg_dbg(otg, "Waiting for event (timeout=%d)...\n", timeout);
+		rc = sleep_main_thread_until_condition_timeout(otg,
+				check_event(otg, otg_mask,
+				user_mask, otg_events, user_events), timeout);
+	} else {
+		otg_dbg(otg, "Waiting for event (no timeout)...\n");
+		rc = sleep_main_thread_until_condition(otg,
+				check_event(otg, otg_mask,
+					user_mask, otg_events, user_events));
+	}
+	pm_runtime_get_sync(otg->dev);
+
+	/* Disable the events */
+	otg_write(otg, OEVTEN, 0);
+	otg_write(otg, ADPEVTEN, 0);
+
+	otg_dbg(otg, "Woke up rc=%d\n", rc);
+
+	return rc;
+}
+
+
+static int start_host(struct dwc_otg2 *otg)
+{
+	int ret = 0;
+	struct usb_hcd *hcd = NULL;
+
+	otg_dbg(otg, "\n");
+
+	if (!otg->otg.host) {
+		otg_err(otg, "Haven't set host yet!\n");
+		return -ENODEV;
+	}
+
+	if (dwc3_otg_pdata->prepare_start_host)
+		ret = dwc3_otg_pdata->prepare_start_host(otg);
+
+	/* Start host driver */
+	hcd = container_of(otg->otg.host, struct usb_hcd, self);
+	ret = otg->start_host(hcd);
+
+	return ret;
+}
+
+static int stop_host(struct dwc_otg2 *otg)
+{
+	int ret = -1;
+	struct usb_hcd *hcd = NULL;
+
+	otg_dbg(otg, "\n");
+
+	hcd = container_of(otg->otg.host, struct usb_hcd, self);
+	if (otg->otg.host)
+		ret = otg->stop_host(hcd);
+
+	if (dwc3_otg_pdata->after_stop_host)
+		ret = dwc3_otg_pdata->after_stop_host(otg);
+
+	return ret;
+}
+
+static void start_peripheral(struct dwc_otg2 *otg)
+{
+	struct usb_gadget *gadget;
+	int ret;
+
+	if (dwc3_otg_pdata->prepare_start_peripheral)
+		ret = dwc3_otg_pdata->prepare_start_peripheral(otg);
+
+	gadget = otg->otg.gadget;
+	if (!gadget) {
+		otg_err(otg, "Haven't set gadget yet!\n");
+		return;
+	}
+
+	otg->start_device(gadget);
+}
+
+static void stop_peripheral(struct dwc_otg2 *otg)
+{
+	struct usb_gadget *gadget = otg->otg.gadget;
+	int ret;
+
+	if (!gadget)
+		return;
+
+	otg->stop_device(gadget);
+
+	if (dwc3_otg_pdata->after_stop_peripheral)
+		ret = dwc3_otg_pdata->after_stop_peripheral(otg);
+}
+
+static int get_id(struct dwc_otg2 *otg)
+{
+	if (dwc3_otg_pdata->get_id)
+		return dwc3_otg_pdata->get_id(otg);
+	return RID_UNKNOWN;
+}
+
+static int dwc_otg_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event)
+{
+	if (dwc3_otg_pdata->notify_charger_type)
+		return dwc3_otg_pdata->notify_charger_type(otg, event);
+
+	return 0;
+}
+
+static int dwc_otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+	unsigned long flags;
+	struct power_supply_cable_props *cap =
+		(struct power_supply_cable_props *)data;
+	struct dwc_otg2 *otg = the_transceiver;
+
+	if (!x)
+		return -ENODEV;
+
+	if (!data)
+		return -EINVAL;
+
+	spin_lock_irqsave(&otg->lock, flags);
+	cap->chrg_type = otg->charging_cap.chrg_type;
+	cap->chrg_evt = otg->charging_cap.chrg_evt;
+	cap->ma = otg->charging_cap.ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	return 0;
+}
+
+static int dwc_otg_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+	if (dwc3_otg_pdata->enable_vbus)
+		return dwc3_otg_pdata->enable_vbus(otg, enable);
+
+	return -EINVAL;
+}
+
+static int is_self_powered_b_device(struct dwc_otg2 *otg)
+{
+	return get_id(otg) == RID_GND;
+}
+
+static enum dwc_otg_state do_wait_vbus_raise(struct dwc_otg2 *otg)
+{
+	int ret;
+	u32 otg_events = 0;
+	u32 user_events = 0;
+	u32 otg_mask = 0;
+	u32 user_mask = 0;
+
+	otg_mask = OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &otg_events,
+			&user_events, VBUS_TIMEOUT);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+		otg_dbg(otg, "OEVT_B_SES_VLD_EVT\n");
+		return DWC_STATE_CHARGER_DETECTION;
+	}
+
+	/* timeout*/
+	if (!ret)
+		return DWC_STATE_A_HOST;
+
+	return DWC_STATE_B_IDLE;
+}
+
+static enum dwc_otg_state do_wait_vbus_fall(struct dwc_otg2 *otg)
+{
+	int ret;
+
+	u32 otg_events = 0;
+	u32 user_events = 0;
+	u32 otg_mask = 0;
+	u32 user_mask = 0;
+
+	otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &otg_events,
+			&user_events, VBUS_TIMEOUT);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+		if (otg->charging_cap.chrg_type ==
+				POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			dwc_otg_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		return DWC_STATE_B_IDLE;
+	}
+
+	/* timeout*/
+	if (!ret) {
+		otg_err(otg, "Haven't get VBus drop event! Maybe something wrong\n");
+		return DWC_STATE_B_IDLE;
+	}
+
+	return DWC_STATE_INVALID;
+}
+
+static enum dwc_otg_state do_charging(struct dwc_otg2 *otg)
+{
+	int ret;
+	u32 otg_events = 0;
+	u32 user_events = 0;
+	u32 otg_mask = 0;
+	u32 user_mask = 0;
+
+	otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+
+	if (dwc3_otg_pdata->do_charging)
+		dwc3_otg_pdata->do_charging(otg);
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &otg_events,
+			&user_events, 0);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+		dwc_otg_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		return DWC_STATE_B_IDLE;
+	}
+
+	return DWC_STATE_INVALID;
+}
+
+static enum power_supply_charger_cable_type
+		get_charger_type(struct dwc_otg2 *otg)
+{
+	if (dwc3_otg_pdata->get_charger_type)
+		return dwc3_otg_pdata->get_charger_type(otg);
+
+	return POWER_SUPPLY_CHARGER_TYPE_NONE;
+}
+
+static enum dwc_otg_state do_charger_detection(struct dwc_otg2 *otg)
+{
+	enum dwc_otg_state state = DWC_STATE_INVALID;
+	enum power_supply_charger_cable_type charger =
+			POWER_SUPPLY_CHARGER_TYPE_NONE;
+	unsigned long flags, ma = 0;
+
+	charger = get_charger_type(otg);
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+		otg_err(otg, "Ignore micro ACA charger.\n");
+		charger = POWER_SUPPLY_CHARGER_TYPE_NONE;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		state = DWC_STATE_B_PERIPHERAL;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_B_DEVICE:
+		state = DWC_STATE_A_HOST;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		state = DWC_STATE_CHARGING;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_NONE:
+	default:
+		if (is_self_powered_b_device(otg)) {
+			state = DWC_STATE_A_HOST;
+			charger = POWER_SUPPLY_CHARGER_TYPE_B_DEVICE;
+			break;
+		}
+	};
+
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		ma = 1500;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	case POWER_SUPPLY_CHARGER_TYPE_B_DEVICE:
+		break;
+	default:
+		otg_err(otg, "Charger type is not valid to notify battery\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.chrg_type = charger;
+	otg->charging_cap.ma = ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		if (dwc_otg_notify_charger_type(otg,
+					POWER_SUPPLY_CHARGER_EVENT_CONNECT) < 0)
+			otg_err(otg, "Notify battery type failed!\n");
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	/* SDP is complicate, it will be handle in set_power */
+	default:
+		break;
+	}
+
+	return state;
+}
+
+static enum dwc_otg_state do_connector_id_status(struct dwc_otg2 *otg)
+{
+	int ret, id;
+	unsigned long flags;
+	u32 events = 0, user_events = 0;
+	u32 otg_mask = 0, user_mask = 0;
+
+	otg_dbg(otg, "\n");
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+	otg->charging_cap.ma = 0;
+	otg->charging_cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+stay_b_idle:
+	otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+			OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+	user_mask = USER_ID_B_CHANGE_EVENT |
+				USER_ID_A_CHANGE_EVENT;
+
+	if (dwc3_otg_pdata->b_idle)
+		dwc3_otg_pdata->b_idle(otg);
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &events,
+			&user_events, 0);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+		otg_dbg(otg, "OEVT_B_DEV_SES_VLD_DET_EVNT\n");
+		return DWC_STATE_CHARGER_DETECTION;
+	}
+
+	if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+		otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+
+		/* Prevent user fast plug out after plug in.
+		 * It will cause the first ID change event lost.
+		 * So need to check real ID currently.
+		 */
+		id = get_id(otg);
+		if (id == RID_FLOAT) {
+			otg_dbg(otg, "Stay DWC_STATE_INIT\n");
+			goto stay_b_idle;
+		} else if (id == RID_GND)
+			return DWC_STATE_A_HOST;
+		else
+			return DWC_STATE_CHARGER_DETECTION;
+	}
+
+	if (user_events & USER_ID_A_CHANGE_EVENT) {
+		otg_dbg(otg, "events is user id A change\n");
+		return DWC_STATE_A_HOST;
+	}
+
+	if (user_events & USER_ID_B_CHANGE_EVENT) {
+		otg_dbg(otg, "events is user id B change\n");
+		return DWC_STATE_B_PERIPHERAL;
+	}
+
+	return DWC_STATE_B_IDLE;
+}
+
+static enum dwc_otg_state do_a_host(struct dwc_otg2 *otg)
+{
+	int rc = 0;
+	u32 otg_events, user_events, otg_mask, user_mask;
+	int id = RID_UNKNOWN;
+	unsigned long flags;
+
+	/* If Battery low and connected charger is not ACA-DOCK.
+	 * Then stop trying to start host mode. */
+	if ((otg->usb2_phy.vbus_state == VBUS_DISABLED) &&
+			(otg->charging_cap.chrg_type !=
+			POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)) {
+		otg_uevent_trigger(&otg->usb2_phy);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (otg->charging_cap.chrg_type !=
+			POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK) {
+		dwc_otg_enable_vbus(otg, 1);
+
+		/* meant receive vbus valid event*/
+		if (do_wait_vbus_raise(otg) == DWC_STATE_A_HOST)
+			otg_err(otg, "Drive VBUS maybe fail!\n");
+	}
+
+	rc = start_host(otg);
+	if (rc < 0) {
+		stop_host(otg);
+		otg_err(otg, "start_host failed!");
+		return DWC_STATE_INVALID;
+	}
+
+stay_host:
+	otg_events = 0;
+	user_events = 0;
+
+	user_mask = USER_A_BUS_DROP |
+				USER_ID_B_CHANGE_EVENT;
+	otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT;
+
+	rc = sleep_until_event(otg,
+			otg_mask, user_mask,
+			&otg_events, &user_events, 0);
+	if (rc < 0) {
+		stop_host(otg);
+		return DWC_STATE_EXIT;
+	}
+
+	/* Higher priority first */
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+
+		/* ACA-Dock plug out */
+		if (otg->charging_cap.chrg_type ==
+				POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			dwc_otg_notify_charger_type(otg,
+					POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		else
+			dwc_otg_enable_vbus(otg, 0);
+
+		stop_host(otg);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (user_events & USER_A_BUS_DROP) {
+		/* Due to big consume by DUT, even ACA-Dock connected,
+		 * the battery capability still maybe decrease. For this
+		 * case, still save host mode. Because DUT haven't drive VBus.*/
+		if (otg->charging_cap.chrg_type ==
+				POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			goto stay_host;
+
+		dwc_otg_enable_vbus(otg, 0);
+		stop_host(otg);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+		otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+		id = get_id(otg);
+
+		/* Plug out ACA_DOCK/USB device */
+		if (id == RID_FLOAT) {
+			if (otg->charging_cap.chrg_type ==
+					POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK) {
+				/* ACA_DOCK plug out, receive
+				 * id change prior to vBus change
+				 */
+				dwc_otg_notify_charger_type(otg,
+					POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+
+				stop_host(otg);
+			} else {
+				/* Normal USB device plug out */
+				spin_lock_irqsave(&otg->lock, flags);
+				otg->charging_cap.chrg_type =
+					POWER_SUPPLY_CHARGER_TYPE_NONE;
+				spin_unlock_irqrestore(&otg->lock, flags);
+
+				stop_host(otg);
+				dwc_otg_enable_vbus(otg, 0);
+			}
+		} else if (id == RID_GND || id == RID_A) {
+			otg_dbg(otg, "Stay DWC_STATE_A_HOST!!\n");
+			/* Prevent user fast plug in after plug out.
+			 * It will cause the first ID change event lost.
+			 * So need to check real ID currently.
+			 */
+			goto stay_host;
+		} else {
+			otg_err(otg, "Meet invalid charger cases!");
+			spin_lock_irqsave(&otg->lock, flags);
+			otg->charging_cap.chrg_type =
+				POWER_SUPPLY_CHARGER_TYPE_NONE;
+			spin_unlock_irqrestore(&otg->lock, flags);
+
+			stop_host(otg);
+		}
+		return DWC_STATE_WAIT_VBUS_FALL;
+	}
+
+	/* Higher priority first */
+	if (user_events & USER_ID_B_CHANGE_EVENT) {
+		otg_dbg(otg, "USER_ID_B_CHANGE_EVENT\n");
+		stop_host(otg);
+		otg->user_events |= USER_ID_B_CHANGE_EVENT;
+		return DWC_STATE_B_IDLE;
+	}
+
+	/* Invalid state */
+	return DWC_STATE_INVALID;
+}
+
+static int do_b_peripheral(struct dwc_otg2 *otg)
+{
+	int rc = 0;
+	u32 otg_mask, user_mask, otg_events, user_events;
+
+	otg_mask = 0;
+	user_mask = 0;
+	otg_events = 0;
+	user_events = 0;
+
+	otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+	user_mask = USER_ID_A_CHANGE_EVENT;
+
+	rc = sleep_until_event(otg,
+			otg_mask, user_mask,
+			&otg_events, &user_events, 0);
+	if (rc < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+		dwc_otg_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (user_events & USER_ID_A_CHANGE_EVENT) {
+		otg_dbg(otg, "USER_ID_A_CHANGE_EVENT\n");
+		otg->user_events |= USER_ID_A_CHANGE_EVENT;
+		return DWC_STATE_B_IDLE;
+	}
+
+	return DWC_STATE_INVALID;
+}
+
+/* Charger driver may send ID change and VBus change event to OTG driver.
+ * This is like IRQ handler, just the event source is from charger driver.
+ * Because on Merrifield platform, the ID line and VBus line are connect to
+ * PMic which can make USB controller and PHY power off to save power.
+ */
+static int dwc_otg_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	if (dwc3_otg_pdata->otg_notifier_handler) {
+		/* hold wakelock for a while to block S3, avoid missing
+		 * events if S3 entry during notification handling */
+		wake_lock_timeout(&wakelock, msecs_to_jiffies(300));
+		return dwc3_otg_pdata->otg_notifier_handler(nb, event, data);
+	}
+
+	return NOTIFY_DONE;
+}
+
+int otg_main_thread(void *data)
+{
+	struct dwc_otg2 *otg = (struct dwc_otg2 *)data;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	pm_runtime_get_sync(otg->dev);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	otg_dbg(otg, "Thread running\n");
+	while (otg->state != DWC_STATE_TERMINATED) {
+		int next = DWC_STATE_B_IDLE;
+		otg_dbg(otg, "\n\n\nMain thread entering state\n");
+
+		switch (otg->state) {
+		case DWC_STATE_B_IDLE:
+			otg_dbg(otg, "DWC_STATE_B_IDLE\n");
+			next = do_connector_id_status(otg);
+			break;
+		case DWC_STATE_CHARGER_DETECTION:
+			otg_dbg(otg, "DWC_STATE_CHARGER_DETECTION\n");
+			next = do_charger_detection(otg);
+			break;
+		case DWC_STATE_WAIT_VBUS_FALL:
+			otg_dbg(otg, "DWC_STATE_WAIT_VBUS_FALL\n");
+			next = do_wait_vbus_fall(otg);
+			break;
+		case DWC_STATE_CHARGING:
+			otg_dbg(otg, "DWC_STATE_CHARGING\n");
+			next = do_charging(otg);
+			break;
+		case DWC_STATE_A_HOST:
+			otg_dbg(otg, "DWC_STATE_A_HOST\n");
+			next = do_a_host(otg);
+			break;
+		case DWC_STATE_B_PERIPHERAL:
+			otg_dbg(otg, "DWC_STATE_B_PERIPHERAL\n");
+			start_peripheral(otg);
+			next = do_b_peripheral(otg);
+
+			stop_peripheral(otg);
+			break;
+		case DWC_STATE_EXIT:
+			otg_dbg(otg, "DWC_STATE_EXIT\n");
+			next = DWC_STATE_TERMINATED;
+			break;
+		case DWC_STATE_INVALID:
+			otg_dbg(otg, "DWC_STATE_INVALID!!!\n");
+		default:
+			otg_dbg(otg, "Unknown State %d, sleeping...\n",
+					otg->state);
+			sleep_main_thread(otg);
+			break;
+		}
+
+		otg->prev = otg->state;
+		otg->state = next;
+	}
+
+	pm_runtime_mark_last_busy(otg->dev);
+	pm_runtime_put_autosuspend(otg->dev);
+	otg->main_thread = NULL;
+	otg_dbg(otg, "OTG main thread exiting....\n");
+
+	return 0;
+}
+
+static void start_main_thread(struct dwc_otg2 *otg)
+{
+	enum dwc3_otg_mode mode = dwc3_otg_pdata->mode;
+	bool children_ready = false;
+
+	mutex_lock(&lock);
+
+	if ((mode == DWC3_DEVICE_ONLY) &&
+			otg->otg.gadget)
+		children_ready = true;
+
+	if ((mode == DWC3_HOST_ONLY) &&
+			otg->otg.host)
+		children_ready = true;
+
+	if ((mode == DWC3_DRD) &&
+			otg->otg.host && otg->otg.gadget)
+		children_ready = true;
+
+	if (!otg->main_thread && children_ready) {
+		otg_dbg(otg, "Starting OTG main thread\n");
+		otg->main_thread = kthread_create(otg_main_thread, otg, "otg");
+		wake_up_process(otg->main_thread);
+	}
+	mutex_unlock(&lock);
+}
+
+static void stop_main_thread(struct dwc_otg2 *otg)
+{
+	mutex_lock(&lock);
+	if (otg->main_thread) {
+		otg_dbg(otg, "Stopping OTG main thread\n");
+		otg->state = DWC_STATE_EXIT;
+		dwc3_wakeup_otg_thread(otg);
+	}
+	mutex_unlock(&lock);
+}
+
+static int dwc_otg2_set_peripheral(struct usb_otg *x,
+		struct usb_gadget *gadget)
+{
+	struct dwc_otg2 *otg;
+
+	if (!x)
+		return -ENODEV;
+
+	otg = xceiv_to_dwc_otg2(x);
+	otg_dbg(otg, "\n");
+
+	if (!gadget) {
+		otg->otg.gadget = NULL;
+		stop_main_thread(otg);
+		return -ENODEV;
+	}
+
+	otg->otg.gadget = gadget;
+	otg->usb2_phy.state = OTG_STATE_B_IDLE;
+	start_main_thread(otg);
+	return 0;
+}
+
+static int dwc_otg2_set_host(struct usb_otg *x, struct usb_bus *host)
+{
+	struct dwc_otg2 *otg;
+
+	if (!x)
+		return -ENODEV;
+
+	otg = xceiv_to_dwc_otg2(x);
+	otg_dbg(otg, "\n");
+
+	if (!host) {
+		otg->otg.host = NULL;
+		stop_main_thread(otg);
+		return -ENODEV;
+	}
+
+	otg->otg.host = host;
+	start_main_thread(otg);
+	return 0;
+}
+
+static int ulpi_read(struct usb_phy *phy, u32 reg)
+{
+	struct dwc_otg2 *otg = container_of(phy, struct dwc_otg2, usb2_phy);
+	u32 val32 = 0, count = 200;
+	u8 val, tmp;
+
+	if (phy->intf != USB2_PHY_ULPI)
+		return -ENODEV;
+
+	reg &= 0xFF;
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSBSY)
+			udelay(5);
+		else
+			break;
+
+		count--;
+	}
+
+	if (!count) {
+		otg_err(otg, "USB2 PHY always busy!!\n");
+		return -EBUSY;
+	}
+
+	count = 200;
+	/* Determine if use extend registers access */
+	if (reg & EXTEND_ULPI_REGISTER_ACCESS_MASK) {
+		otg_dbg(otg, "Access extend registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ
+			| GUSB2PHYACC0_REGADDR(ULPI_ACCESS_EXTENDED)
+			| GUSB2PHYACC0_VCTRL(reg);
+	} else {
+		otg_dbg(otg, "Access normal registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ | GUSB2PHYACC0_REGADDR(reg)
+			| GUSB2PHYACC0_VCTRL(0x00);
+	}
+	otg_write(otg, GUSB2PHYACC0, val32);
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSDONE) {
+			val = otg_read(otg, GUSB2PHYACC0) &
+				  GUSB2PHYACC0_REGDATA_MASK;
+			otg_dbg(otg, "%s - reg 0x%x data 0x%x\n",
+					__func__, reg, val);
+			goto cleanup;
+		}
+
+		count--;
+	}
+
+	otg_err(otg, "%s read PHY data failed.\n", __func__);
+
+	return -ETIMEDOUT;
+
+cleanup:
+	/* Clear GUSB2PHYACC0[16:21] before return.
+	 * Otherwise, it will cause PHY can't in workable
+	 * state. This is one dwc3 controller silicon bug. */
+	tmp = otg_read(otg, GUSB2PHYACC0);
+	otg_write(otg, GUSB2PHYACC0, tmp &
+			~GUSB2PHYACC0_REGADDR(0x3F));
+	return val;
+}
+
+static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+{
+	struct dwc_otg2 *otg = container_of(phy, struct dwc_otg2, usb2_phy);
+	u32 val32 = 0, count = 200;
+	u8 tmp;
+
+	if (phy->intf != USB2_PHY_ULPI)
+		return -ENODEV;
+
+	val &= 0xFF;
+	reg &= 0xFF;
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSBSY)
+			udelay(5);
+		else
+			break;
+
+		count--;
+	}
+
+	if (!count) {
+		otg_err(otg, "USB2 PHY always busy!!\n");
+		return -EBUSY;
+	}
+
+	count = 200;
+
+	if (reg & EXTEND_ULPI_REGISTER_ACCESS_MASK) {
+		otg_dbg(otg, "Access extend registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ
+			| GUSB2PHYACC0_REGADDR(ULPI_ACCESS_EXTENDED)
+			| GUSB2PHYACC0_VCTRL(reg)
+			| GUSB2PHYACC0_REGWR | GUSB2PHYACC0_REGDATA(val);
+	} else {
+		otg_dbg(otg, "Access normal registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ
+			| GUSB2PHYACC0_REGADDR(reg)
+			| GUSB2PHYACC0_REGWR
+			| GUSB2PHYACC0_REGDATA(val);
+	}
+	otg_write(otg, GUSB2PHYACC0, val32);
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSDONE) {
+			otg_dbg(otg, "%s - reg 0x%x data 0x%x write done\n",
+					__func__, reg, val);
+			goto cleanup;
+		}
+
+		count--;
+	}
+
+	otg_err(otg, "%s read PHY data failed.\n", __func__);
+
+	return -ETIMEDOUT;
+
+cleanup:
+	/* Clear GUSB2PHYACC0[16:21] before return.
+	 * Otherwise, it will cause PHY can't in workable
+	 * state. This is one dwc3 controller silicon bug. */
+	tmp = otg_read(otg, GUSB2PHYACC0);
+	otg_write(otg, GUSB2PHYACC0, tmp &
+			~GUSB2PHYACC0_REGADDR(0x3F));
+	return 0;
+}
+
+static struct usb_phy_io_ops dwc_otg_io_ops = {
+	.read = ulpi_read,
+	.write = ulpi_write,
+};
+
+static struct dwc_otg2 *dwc3_otg_alloc(struct device *dev)
+{
+	struct dwc_otg2 *otg = NULL;
+	struct usb_phy *usb_phy;
+	int retval;
+
+	otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+	if (!otg) {
+		dev_err(dev, "Alloc otg failed\n");
+		return NULL;
+	}
+
+	the_transceiver = otg;
+	otg->otg_data = dev->platform_data;
+
+	usb_phy = &otg->usb2_phy;
+	otg->otg.phy = usb_phy;
+	otg->usb2_phy.otg = &otg->otg;
+
+	otg->dev		= dev;
+	otg->usb3_phy.dev		= otg->dev;
+	otg->usb3_phy.label		= "dwc-usb3-phy";
+	otg->usb3_phy.state		= OTG_STATE_UNDEFINED;
+	otg->usb3_phy.otg	= &otg->otg;
+	otg->usb2_phy.dev		= otg->dev;
+	otg->usb2_phy.label		= "dwc-usb2-phy";
+	otg->usb2_phy.state		= OTG_STATE_UNDEFINED;
+	otg->usb2_phy.set_power	= dwc3_otg_pdata->set_power;
+	otg->usb2_phy.get_chrg_status	= dwc_otg_get_chrg_status;
+	otg->usb2_phy.io_ops = &dwc_otg_io_ops;
+	otg->usb2_phy.otg	= &otg->otg;
+	otg->otg.set_host	= dwc_otg2_set_host;
+	otg->otg.set_peripheral	= dwc_otg2_set_peripheral;
+	ATOMIC_INIT_NOTIFIER_HEAD(&otg->usb2_phy.notifier);
+	ATOMIC_INIT_NOTIFIER_HEAD(&otg->usb3_phy.notifier);
+
+	otg->state = DWC_STATE_B_IDLE;
+	spin_lock_init(&otg->lock);
+	init_waitqueue_head(&otg->main_wq);
+
+	/* Register otg notifier to monitor ID and VBus change events */
+	otg->nb.notifier_call = dwc_otg_handle_notification;
+	usb_register_notifier(&otg->usb2_phy, &otg->nb);
+
+	otg_dbg(otg, "Version: %s\n", VERSION);
+	retval = usb_add_phy(&otg->usb2_phy, USB_PHY_TYPE_USB2);
+	if (retval) {
+		otg_err(otg, "can't register transceiver, err: %d\n",
+			retval);
+		goto err1;
+	}
+
+	retval = usb_add_phy(&otg->usb3_phy, USB_PHY_TYPE_USB3);
+	if (retval) {
+		otg_err(otg, "can't register transceiver, err: %d\n",
+			retval);
+		goto err2;
+	}
+
+	return otg;
+
+err2:
+	usb_remove_phy(&otg->usb2_phy);
+
+err1:
+	kfree(otg);
+	otg = NULL;
+
+	return otg;
+}
+
+static int dwc3_otg_create_children(struct dwc_otg2 *otg,
+		struct resource *res, int num)
+{
+	struct platform_device *dwc_host, *dwc_gadget;
+	enum dwc3_otg_mode mode = dwc3_otg_pdata->mode;
+	int retval = 0, i;
+
+	if (!otg || !res)
+		return -EINVAL;
+
+	if (num != 2)
+		return -EINVAL;
+
+	dwc_host = dwc_gadget = NULL;
+
+	for (i = 0; i < 2; i++) {
+		if (res[i].flags == IORESOURCE_MEM) {
+			otg->usb2_phy.io_priv = ioremap_nocache(
+				res[i].start, res[i].end - res[i].start);
+			if (!otg->usb2_phy.io_priv) {
+				otg_err(otg, "dwc3 otg ioremap failed\n");
+				return -ENOMEM;
+			}
+			break;
+		}
+	}
+
+	/* resource have no mem io resource */
+	if (!otg->usb2_phy.io_priv)
+		return -EINVAL;
+
+	platform_par = kzalloc(sizeof(*platform_par), GFP_KERNEL);
+	if (!platform_par) {
+		otg_err(otg, "alloc dwc_device_par failed\n");
+		goto err1;
+	}
+
+	platform_par->io_addr = otg->usb2_phy.io_priv;
+	platform_par->len = res[i].end - res[i].start;
+
+	if (mode == DWC3_DEVICE_ONLY)
+		goto device_only;
+
+	dwc_host = platform_device_alloc(DWC3_HOST_NAME,
+			HOST_DEVID);
+	if (!dwc_host) {
+		otg_err(otg, "couldn't allocate dwc3 host device\n");
+		goto err2;
+	}
+
+	retval = platform_device_add_resources(dwc_host, res, num);
+	if (retval) {
+		otg_err(otg, "couldn't add resources to dwc3 device\n");
+		goto err3;
+	}
+
+	platform_device_add_data(dwc_host, platform_par,
+			sizeof(struct dwc_device_par));
+
+	dwc_host->dev.dma_mask = otg->dev->dma_mask;
+	dwc_host->dev.dma_parms = otg->dev->dma_parms;
+	dwc_host->dev.parent = otg->dev;
+
+	retval = platform_device_add(dwc_host);
+	if (retval)	{
+		otg_err(otg, "failed to register dwc3 host\n");
+		goto err1;
+	}
+
+	otg->host = dwc_host;
+
+	if (mode != DWC3_DRD)
+		return 0;
+
+device_only:
+	dwc_gadget = platform_device_alloc(DWC3_DEVICE_NAME,
+			GADGET_DEVID);
+	if (!dwc_gadget) {
+		otg_err(otg, "couldn't allocate dwc3 device\n");
+		goto err3;
+	}
+
+	retval = platform_device_add_resources(dwc_gadget,
+				res, num);
+	if (retval) {
+		otg_err(otg, "couldn't add resources to dwc3 device\n");
+		goto err3;
+	}
+
+	dwc_gadget->dev.dma_mask = otg->dev->dma_mask;
+	dwc_gadget->dev.dma_parms = otg->dev->dma_parms;
+	dwc_gadget->dev.parent = otg->dev;
+
+	platform_device_add_data(dwc_gadget, platform_par,
+			sizeof(struct dwc_device_par));
+	retval = platform_device_add(dwc_gadget);
+	if (retval) {
+		otg_err(otg, "failed to register dwc3 gadget\n");
+		goto err3;
+	}
+	otg->gadget = dwc_gadget;
+
+	return 0;
+
+err3:
+	if (mode == DWC3_DRD)
+		platform_device_unregister(dwc_host);
+
+err2:
+	kfree(platform_par);
+
+err1:
+	iounmap(otg->usb2_phy.io_priv);
+
+	return retval;
+}
+
+#ifdef CONFIG_PCI
+
+static int dwc_otg_probe(struct pci_dev *pdev,
+			const struct pci_device_id *id)
+{
+	int retval = 0;
+	struct resource		res[2];
+	struct dwc_otg2 *otg = NULL;
+	unsigned long resource, len;
+
+	if (!dwc3_otg_pdata)
+		return -ENODEV;
+
+	if (pci_enable_device(pdev) < 0) {
+		dev_err(&pdev->dev, "pci device enable failed\n");
+		return -ENODEV;
+	}
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_set_master(pdev);
+
+	otg = dwc3_otg_alloc(&pdev->dev);
+	if (!otg) {
+		dev_err(&pdev->dev, "dwc3 otg init failed\n");
+		goto err;
+	}
+
+	/* control register: BAR 0 */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		otg_err(otg, "Request memory region failed\n");
+		retval = -EBUSY;
+		goto err;
+	}
+
+	otg_dbg(otg, "dwc otg pci resouce: 0x%lu, len: 0x%lu\n",
+			resource, len);
+	otg_dbg(otg, "vendor: 0x%x, device: 0x%x\n",
+			pdev->vendor, pdev->device);
+
+	memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+	res[0].start	= pci_resource_start(pdev, 0);
+	res[0].end	= pci_resource_end(pdev, 0);
+	res[0].name	= "dwc_usb3_io";
+	res[0].flags	= IORESOURCE_MEM;
+
+	res[1].start	= pdev->irq;
+	res[1].name	= "dwc_usb3_irq";
+	res[1].flags	= IORESOURCE_IRQ;
+
+	retval = dwc3_otg_create_children(otg, res, ARRAY_SIZE(res));
+	if (retval) {
+		otg_err(otg, "dwc3 otg create alloc children failed\n");
+		goto err;
+	}
+
+	otg->irqnum = pdev->irq;
+
+	wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "dwc_otg_wakelock");
+
+	if (dwc3_otg_pdata->platform_init) {
+		retval = dwc3_otg_pdata->platform_init(otg);
+		if (retval)
+			goto err;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_mark_last_busy(otg->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
+	return 0;
+
+err:
+	if (the_transceiver)
+		dwc_otg_remove(pdev);
+
+	return retval;
+}
+
+static void dwc_otg_remove(struct pci_dev *pdev)
+{
+	struct dwc_otg2 *otg = the_transceiver;
+	int resource, len;
+
+	if (otg->gadget)
+		platform_device_unregister(otg->gadget);
+	if (otg->host)
+		platform_device_unregister(otg->host);
+
+	wake_lock_destroy(&wakelock);
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	kfree(platform_par);
+	iounmap(otg->usb2_phy.io_priv);
+
+	usb_remove_phy(&otg->usb2_phy);
+	usb_remove_phy(&otg->usb3_phy);
+	kfree(otg);
+	otg = NULL;
+
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	release_mem_region(resource, len);
+
+	pci_disable_device(pdev);
+
+	the_transceiver = NULL;
+}
+
+static void dwc_otg_shutdown(struct pci_dev *pdev)
+{
+	struct dwc_otg2 *otg = the_transceiver;
+
+	/* stop main thread, ignore notification events */
+	stop_main_thread(otg);
+
+	pci_disable_device(pdev);
+}
+
+static int dwc_otg_runtime_idle(struct device *dev)
+{
+	if (dwc3_otg_pdata->idle)
+		return dwc3_otg_pdata->idle(the_transceiver);
+
+	return 0;
+}
+
+static int dwc_otg_runtime_suspend(struct device *dev)
+{
+	if (dwc3_otg_pdata->suspend)
+		return dwc3_otg_pdata->suspend(the_transceiver);
+
+	return 0;
+}
+
+static int dwc_otg_runtime_resume(struct device *dev)
+{
+	if (dwc3_otg_pdata->resume)
+		return dwc3_otg_pdata->resume(the_transceiver);
+	return 0;
+}
+
+static int dwc_otg_suspend(struct device *dev)
+{
+	if (dwc3_otg_pdata->suspend)
+		return dwc3_otg_pdata->suspend(the_transceiver);
+	return 0;
+}
+
+static int dwc_otg_resume(struct device *dev)
+{
+	if (dwc3_otg_pdata->resume)
+		return dwc3_otg_pdata->resume(the_transceiver);
+	return 0;
+}
+
+static const struct dev_pm_ops dwc_usb_otg_pm_ops = {
+	.runtime_suspend = dwc_otg_runtime_suspend,
+	.runtime_resume	= dwc_otg_runtime_resume,
+	.runtime_idle = dwc_otg_runtime_idle,
+	.suspend = dwc_otg_suspend,
+	.resume	= dwc_otg_resume
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	{ PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x20), ~0),
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_DWC,
+	},
+	{ PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x80), ~0),
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_DWC,
+	},
+	{ /* end: all zeroes */ }
+};
+
+static struct pci_driver dwc_otg_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+	.probe =	dwc_otg_probe,
+	.remove =	dwc_otg_remove,
+	.shutdown = dwc_otg_shutdown,
+	.driver = {
+		.name = (char *) driver_name,
+		.pm = &dwc_usb_otg_pm_ops,
+		.owner = THIS_MODULE,
+	},
+};
+#endif
+
+int dwc3_otg_register(struct dwc3_otg_hw_ops *pdata)
+{
+	int retval;
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (dwc3_otg_pdata)
+		return -EBUSY;
+
+	dwc3_otg_pdata = pdata;
+
+#ifdef CONFIG_PCI
+	retval = pci_register_driver(&dwc_otg_pci_driver);
+#endif
+	mutex_init(&lock);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(dwc3_otg_register);
+
+int dwc3_otg_unregister(struct dwc3_otg_hw_ops *pdata)
+{
+	if (!pdata)
+		return -EINVAL;
+
+	if (dwc3_otg_pdata != pdata)
+		return -EINVAL;
+
+	dwc3_otg_pdata = NULL;
+
+#ifdef CONFIG_PCI
+	pci_unregister_driver(&dwc_otg_pci_driver);
+#endif
+	mutex_destroy(&lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_otg_unregister);
+
+static int __init dwc_otg_init(void)
+{
+	return 0;
+}
+module_init(dwc_otg_init);
+
+static void __exit dwc_otg_exit(void)
+{
+}
+module_exit(dwc_otg_exit);
+
+MODULE_AUTHOR("Synopsys, Inc and Wang Yu <yu.y.wang@intel.com>");
+MODULE_DESCRIPTION("DWC3 OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0");
diff --git a/drivers/usb/dwc3/otg.h b/drivers/usb/dwc3/otg.h
new file mode 100644
index 0000000..01d7e22
--- /dev/null
+++ b/drivers/usb/dwc3/otg.h
@@ -0,0 +1,438 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_OTG_H
+#define __DWC3_OTG_H
+
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/compiler.h>
+#include <linux/power_supply.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ulpi.h>
+
+
+struct dwc_device_par {
+	void __iomem *io_addr;
+	int len;
+};
+
+#define DWC3_DEVICE_NAME "dwc3-device"
+#define DWC3_HOST_NAME "dwc3-host"
+#define GADGET_DEVID 1
+#define HOST_DEVID 2
+#define DRIVER_VERSION "0.1"
+
+#ifdef CONFIG_USB_DWC3_OTG_DEBUG
+#define DWC_OTG_DEBUG 1
+#else
+#define DWC_OTG_DEBUG 0
+#endif
+
+#define otg_dbg(d, fmt, args...)  \
+	do { dev_dbg((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_vdbg(d, fmt, args...)  \
+	do { if (DWC_OTG_DEBUG) dev_dbg((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_err(d, fmt, args...)  \
+	do { dev_err((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_warn(d, fmt, args...)  \
+	do { dev_warn((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_info(d, fmt, args...)  \
+	do { dev_info((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+
+#ifdef DEBUG
+#define otg_write(o, reg, val)	do { \
+		otg_dbg(o, "OTG_WRITE: reg=0x%05x, val=0x%08x\n", reg, val); \
+		writel(val, ((void *)((o)->usb2_phy.io_priv)) + reg);	\
+	} while (0)
+
+#define otg_read(o, reg) ({ \
+		u32 __r; \
+		__r = readl(((void *)((o)->usb2_phy.io_priv)) + reg);	\
+		otg_dbg(o, "OTG_READ: reg=0x%05x, val=0x%08x\n", reg, __r); \
+		__r;							\
+	})
+#else
+#define otg_write(o, reg, val) \
+		writel(val, ((void *)((o)->usb2_phy.io_priv)) + reg);
+
+#define otg_read(o, reg)	({ \
+		readl(((void *)((o)->usb2_phy.io_priv)) + reg); \
+	})
+#endif
+
+#define GUSB2PHYCFG0				0xc200
+#define GUSB2PHYCFG_SUS_PHY                     0x40
+#define GUSB2PHYCFG_PHYSOFTRST (1 << 31)
+#define GUSB2PHYCFG_ULPI_AUTO_RESUME (1 << 15)
+
+#define EXTEND_ULPI_REGISTER_ACCESS_MASK	0xC0
+#define GUSB2PHYACC0	0xc280
+#define GUSB2PHYACC0_DISULPIDRVR  (1 << 26)
+#define GUSB2PHYACC0_NEWREGREQ  (1 << 25)
+#define GUSB2PHYACC0_VSTSDONE  (1 << 24)
+#define GUSB2PHYACC0_VSTSBSY  (1 << 23)
+#define GUSB2PHYACC0_REGWR  (1 << 22)
+#define GUSB2PHYACC0_REGADDR(v)  ((v & 0x3F) << 16)
+#define GUSB2PHYACC0_EXTREGADDR(v)  ((v & 0x3F) << 8)
+#define GUSB2PHYACC0_VCTRL(v)  ((v & 0xFF) << 8)
+#define GUSB2PHYACC0_REGDATA(v)  (v & 0xFF)
+#define GUSB2PHYACC0_REGDATA_MASK  0xFF
+
+#define GUSB3PIPECTL0                           0xc2c0
+#define GUSB3PIPECTL_SUS_EN                     0x20000
+#define GUSB3PIPE_DISRXDETP3                    (1 << 28)
+#define GUSB3PIPECTL_PHYSOFTRST (1 << 31)
+
+#define GHWPARAMS6				0xc158
+#define GHWPARAMS6_SRP_SUPPORT_ENABLED		0x0400
+#define GHWPARAMS6_HNP_SUPPORT_ENABLED		0x0800
+#define GHWPARAMS6_ADP_SUPPORT_ENABLED		0x1000
+
+#define GUCTL 0xC12C
+#define GUCTL_CMDEVADDR		(1 << 15)
+
+#define GCTL 0xc110
+#define GCTL_PRT_CAP_DIR 0x3000
+#define GCTL_PRT_CAP_DIR_SHIFT 12
+#define GCTL_PRT_CAP_DIR_HOST 1
+#define GCTL_PRT_CAP_DIR_DEV 2
+#define GCTL_PRT_CAP_DIR_OTG 3
+#define GCTL_GBL_HIBERNATION_EN 0x2
+#define GCTL_CORESOFTRESET (1 << 11)
+#define GCTL_PWRDNSCALE(x) (x << 19)
+#define GCTL_PWRDNSCALE_MASK (0x1fff << 19)
+
+#define OCFG					0xcc00
+#define OCFG_SRP_CAP				0x01
+#define OCFG_SRP_CAP_SHIFT			0
+#define OCFG_HNP_CAP				0x02
+#define OCFG_HNP_CAP_SHIFT			1
+#define OCFG_OTG_VERSION			0x04
+#define OCFG_OTG_VERSION_SHIFT			2
+
+#define GCTL					0xc110
+#define OCTL					0xcc04
+#define OCTL_HST_SET_HNP_EN			0x01
+#define OCTL_HST_SET_HNP_EN_SHIFT		0
+#define OCTL_DEV_SET_HNP_EN			0x02
+#define OCTL_DEV_SET_HNP_EN_SHIFT		1
+#define OCTL_TERM_SEL_DL_PULSE			0x04
+#define OCTL_TERM_SEL_DL_PULSE_SHIFT		2
+#define OCTL_SES_REQ				0x08
+#define OCTL_SES_REQ_SHIFT			3
+#define OCTL_HNP_REQ				0x10
+#define OCTL_HNP_REQ_SHIFT			4
+#define OCTL_PRT_PWR_CTL			0x20
+#define OCTL_PRT_PWR_CTL_SHIFT			5
+#define OCTL_PERI_MODE				0x40
+#define OCTL_PERI_MODE_SHIFT			6
+
+#define OEVT					0xcc08
+#define OEVT_ERR				0x00000001
+#define OEVT_ERR_SHIFT				0
+#define OEVT_SES_REQ_SCS			0x00000002
+#define OEVT_SES_REQ_SCS_SHIFT			1
+#define OEVT_HST_NEG_SCS			0x00000004
+#define OEVT_HST_NEG_SCS_SHIFT			2
+#define OEVT_B_SES_VLD_EVT			0x00000008
+#define OEVT_B_SES_VLD_EVT_SHIFT		3
+#define OEVT_B_DEV_VBUS_CHNG_EVNT		0x00000100
+#define OEVT_B_DEV_VBUS_CHNG_EVNT_SHIFT		8
+#define OEVT_B_DEV_SES_VLD_DET_EVNT		0x00000200
+#define OEVT_B_DEV_SES_VLD_DET_EVNT_SHIFT	9
+#define OEVT_B_DEV_HNP_CHNG_EVNT		0x00000400
+#define OEVT_B_DEV_HNP_CHNG_EVNT_SHIFT		10
+#define OEVT_B_DEV_B_HOST_END_EVNT		0x00000800
+#define OEVT_B_DEV_B_HOST_END_EVNT_SHIFT	11
+#define OEVT_A_DEV_SESS_END_DET_EVNT		0x00010000
+#define OEVT_A_DEV_SESS_END_DET_EVNT_SHIFT	16
+#define OEVT_A_DEV_SRP_DET_EVNT			0x00020000
+#define OEVT_A_DEV_SRP_DET_EVNT_SHIFT		17
+#define OEVT_A_DEV_HNP_CHNG_EVNT		0x00040000
+#define OEVT_A_DEV_HNP_CHNG_EVNT_SHIFT		18
+#define OEVT_A_DEV_HOST_EVNT			0x00080000
+#define OEVT_A_DEV_HOST_EVNT_SHIFT		19
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT		0x00100000
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT_SHIFT	20
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT            0x00400000
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT_SHIFT      22
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT         0x00800000
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT_SHIFT   23
+#define OEVT_CONN_ID_STS_CHNG_EVNT		0x01000000
+#define OEVT_CONN_ID_STS_CHNG_EVNT_SHIFT	24
+#define OEVT_DEV_MOD_EVNT			0x80000000
+#define OEVT_DEV_MOD_EVNT_SHIFT			31
+
+#define OEVTEN					0xcc0c
+
+#define OEVT_ALL (OEVT_CONN_ID_STS_CHNG_EVNT | \
+		OEVT_HOST_ROLE_REQ_INIT_EVNT | \
+		OEVT_HOST_ROLE_REQ_CONFIRM_EVNT | \
+		OEVT_A_DEV_B_DEV_HOST_END_EVNT | \
+		OEVT_A_DEV_HOST_EVNT | \
+		OEVT_A_DEV_HNP_CHNG_EVNT | \
+		OEVT_A_DEV_SRP_DET_EVNT | \
+		OEVT_A_DEV_SESS_END_DET_EVNT | \
+		OEVT_B_DEV_B_HOST_END_EVNT | \
+		OEVT_B_DEV_HNP_CHNG_EVNT | \
+		OEVT_B_DEV_SES_VLD_DET_EVNT | \
+		OEVT_B_DEV_VBUS_CHNG_EVNT)
+
+#define OSTS					0xcc10
+#define OSTS_CONN_ID_STS			0x0001
+#define OSTS_CONN_ID_STS_SHIFT			0
+#define OSTS_A_SES_VLD				0x0002
+#define OSTS_A_SES_VLD_SHIFT			1
+#define OSTS_B_SES_VLD				0x0004
+#define OSTS_B_SES_VLD_SHIFT			2
+#define OSTS_XHCI_PRT_PWR			0x0008
+#define OSTS_XHCI_PRT_PWR_SHIFT			3
+#define OSTS_PERIP_MODE				0x0010
+#define OSTS_PERIP_MODE_SHIFT			4
+#define OSTS_OTG_STATES				0x0f00
+#define OSTS_OTG_STATE_SHIFT			8
+
+#define ADPCFG					0xcc20
+#define ADPCFG_PRB_DSCHGS			0x0c000000
+#define ADPCFG_PRB_DSCHG_SHIFT			26
+#define ADPCFG_PRB_DELTAS			0x30000000
+#define ADPCFG_PRB_DELTA_SHIFT			28
+#define ADPCFG_PRB_PERS				0xc0000000
+#define ADPCFG_PRB_PER_SHIFT			30
+
+#define ADPCTL					0xcc24
+#define ADPCTL_WB				0x01000000
+#define ADPCTL_WB_SHIFT				24
+#define ADPCTL_ADP_RES				0x02000000
+#define ADPCTL_ADP_RES_SHIFT			25
+#define ADPCTL_ADP_EN				0x04000000
+#define ADPCTL_ADP_EN_SHIFT			26
+#define ADPCTL_ENA_SNS				0x08000000
+#define ADPCTL_ENA_SNS_SHIFT			27
+#define ADPCTL_ENA_PRB				0x10000000
+#define ADPCTL_ENA_PRB_SHIFT			28
+
+#define ADPEVT					0xcc28
+#define ADPEVT_RTIM_EVNTS			0x000007ff
+#define ADPEVT_RTIM_EVNT_SHIFT			0
+#define ADPEVT_ADP_RST_CMPLT_EVNT		0x02000000
+#define ADPEVT_ADP_RST_CMPLT_EVNT_SHIFT		25
+#define ADPEVT_ADP_TMOUT_EVNT			0x04000000
+#define ADPEVT_ADP_TMOUT_EVNT_SHIFT		26
+#define ADPEVT_ADP_SNS_EVNT			0x08000000
+#define ADPEVT_ADP_SNS_EVNT_SHIFT		27
+#define ADPEVT_ADP_PRB_EVNT			0x10000000
+#define ADPEVT_ADP_PRB_EVNT_SHIFT		28
+
+#define ADPEVTEN				0xcc2c
+#define ADPEVTEN_ACC_DONE_EN			0x01000000
+#define ADPEVTEN_ACC_DONE_EN_SHIFT		24
+#define ADPEVTEN_ADP_RST_CMPLT_EVNT_EN		0x02000000
+#define ADPEVTEN_ADP_RST_CMPLT_EVNT_EN_SHIFT	25
+#define ADPEVTEN_ADP_TMOUT_EVNT_EN		0x04000000
+#define ADPEVTEN_ADP_TMOUT_EVNT_EN_SHIFT	26
+#define ADPEVTEN_ADP_SNS_EVNT_EN		0x08000000
+#define ADPEVTEN_ADP_SNS_EVNT_EN_SHIFT		27
+#define ADPEVTEN_ADP_PRB_EVNT_EN		0x10000000
+#define ADPEVTEN_ADP_PRB_EVNT_EN_SHIFT		28
+
+#define RID_A		0x01
+#define RID_B		0x02
+#define RID_C		0x03
+#define RID_FLOAT	0x04
+#define RID_GND		0x05
+#define RID_UNKNOWN	0x00
+
+/** The states for the OTG driver */
+enum dwc_otg_state {
+	DWC_STATE_INVALID = -1,
+
+	/** The initial state, check the connector
+	 * id status and determine what mode
+	 * (A-device or B-device) to operate in. */
+	DWC_STATE_B_IDLE = 0,
+
+	/* A-Host states */
+	DWC_STATE_A_PROBE,
+	DWC_STATE_A_HOST,
+	DWC_STATE_A_HNP_INIT,
+
+	/* A-Peripheral states */
+	DWC_STATE_A_PERIPHERAL,
+
+	/* B-Peripheral states */
+	DWC_STATE_B_SENSE,
+	DWC_STATE_B_PROBE,
+	DWC_STATE_B_PERIPHERAL,
+	DWC_STATE_B_HNP_INIT,
+
+	/* B-Host states */
+	DWC_STATE_B_HOST,
+
+	/* RSP */
+	DWC_STATE_B_RSP_INIT,
+
+	/* USB charger detection */
+	DWC_STATE_CHARGER_DETECTION,
+
+	/* VBUS */
+	DWC_STATE_WAIT_VBUS_RAISE,
+	DWC_STATE_WAIT_VBUS_FALL,
+
+	/* Charging*/
+	DWC_STATE_CHARGING,
+
+	/* Exit */
+	DWC_STATE_EXIT,
+	DWC_STATE_TERMINATED
+};
+
+/** The main structure to keep track of OTG driver state. */
+struct dwc_otg2 {
+	/** OTG transceiver */
+	struct usb_otg	otg;
+	struct usb_phy	usb2_phy;
+	struct usb_phy	usb3_phy;
+	struct device		*dev;
+	int irqnum;
+
+	int main_wakeup_needed;
+	struct task_struct *main_thread;
+	wait_queue_head_t main_wq;
+
+	spinlock_t lock;
+
+	/* Events */
+	u32 otg_events;
+	u32 user_events;
+
+	/** User space ID switch event */
+#define USER_ID_A_CHANGE_EVENT 0x01
+#define USER_ID_B_CHANGE_EVENT 0x02
+	/** a_bus_drop event from userspace */
+#define USER_A_BUS_DROP 0x40
+
+	/* States */
+	enum dwc_otg_state prev;
+	enum dwc_otg_state state;
+	struct platform_device *host;
+	struct platform_device *gadget;
+
+	/* Charger detection */
+	struct power_supply_cable_props charging_cap;
+	struct notifier_block nb;
+
+	/* Interfaces between host/device driver */
+	int (*start_host) (struct usb_hcd *hcd);
+	int (*stop_host) (struct usb_hcd *hcd);
+	int (*start_device)(struct usb_gadget *);
+	int (*stop_device)(struct usb_gadget *);
+	int (*vbus_draw) (struct usb_gadget *, unsigned ma);
+
+	/* host driver suspend/resume flow callback which
+	 * need host driver to register them if need call by
+	 * otg driver.*/
+	int (*suspend_host) (struct usb_hcd *hcd);
+	int (*resume_host) (struct usb_hcd *hcd);
+
+	/* Vendor driver private date */
+	void *otg_data;
+};
+
+#define sleep_main_thread_until_condition_timeout(otg, condition, msecs) ({ \
+		int __timeout = msecs;				\
+		while (!(condition)) {				\
+			otg_dbg(otg, "  ... sleeping for %d\n", __timeout); \
+			__timeout = sleep_main_thread_timeout(otg, __timeout); \
+			if (__timeout <= 0) {			\
+				break;				\
+			}					\
+		}						\
+		__timeout;					\
+	})
+
+#define sleep_main_thread_until_condition(otg, condition) ({	\
+		int __rc = 0;					\
+		do {						\
+			__rc = sleep_main_thread_until_condition_timeout(otg, \
+			condition, 50000); \
+		} while (__rc == 0);				\
+		__rc;						\
+	})
+
+#define VBUS_TIMEOUT	20
+#define PCI_DEVICE_ID_DWC 0x119E
+
+enum dwc3_otg_mode {
+	DWC3_DEVICE_ONLY,
+	DWC3_HOST_ONLY,
+	DWC3_DRD,
+};
+
+enum driver_bus_type {
+	DWC3_PLAT,
+	DWC3_PCI,
+};
+
+struct dwc3_otg_hw_ops {
+	enum dwc3_otg_mode mode;
+	enum driver_bus_type bus;
+
+	int (*set_power)(struct usb_phy *_otg, unsigned ma);
+	int (*platform_init)(struct dwc_otg2 *otg);
+	int (*otg_notifier_handler)(struct notifier_block *nb,
+			unsigned long event, void *data);
+	int (*prepare_start_peripheral)(struct dwc_otg2 *otg);
+	int (*prepare_start_host)(struct dwc_otg2 *otg);
+	int (*after_stop_peripheral)(struct dwc_otg2 *otg);
+	int (*after_stop_host)(struct dwc_otg2 *otg);
+	int (*b_idle)(struct dwc_otg2 *otg);
+	int (*do_charging)(struct dwc_otg2 *otg);
+	int (*notify_charger_type)(struct dwc_otg2 *otg,
+			enum power_supply_charger_event event);
+	enum power_supply_charger_cable_type
+		(*get_charger_type)(struct dwc_otg2 *otg);
+	int (*enable_vbus)(struct dwc_otg2 *otg, int enable);
+	int (*get_id)(struct dwc_otg2 *otg);
+
+	int (*idle)(struct dwc_otg2 *otg);
+	int (*suspend)(struct dwc_otg2 *otg);
+	int (*resume)(struct dwc_otg2 *otg);
+};
+
+#define OTG_USB2_0MA				0xfff0
+#define OTG_USB2_100MA				0xfff1
+#define OTG_USB3_150MA				0xfff2
+#define OTG_USB2_500MA				0xfff3
+#define OTG_USB3_900MA				0xfff4
+#define OTG_DEVICE_SUSPEND			0xfffe
+#define OTG_DEVICE_RESUME			0xffff
+
+void dwc3_wakeup_otg_thread(struct dwc_otg2 *otg);
+struct dwc_otg2 *dwc3_get_otg(void);
+int dwc3_otg_register(struct dwc3_otg_hw_ops *pdata);
+int dwc3_otg_unregister(struct dwc3_otg_hw_ops *pdata);
+#endif /* __DWC3_OTG_H */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 9de2eb2..b558d1c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -192,6 +192,21 @@
 	help
 	   Faraday usb device controller FUSB300 driver
 
+config USB_LANGWELL
+	boolean "Intel Langwell/Penwell USB Device Controller"
+	depends on PCI
+	select USB_GADGET_LANGWELL
+	help
+	   Intel Langwell/Penwell USB Device Controller is a High-Speed USB
+	   On-The-Go device controller.
+
+	   The number of programmable endpoints is different through
+	   controller revision.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "langwell_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
 config USB_OMAP
 	tristate "OMAP USB Device Controller"
 	depends on ARCH_OMAP1
@@ -488,6 +503,20 @@
 
 endmenu
 
+config USB_LANGWELLUDC_OTG
+	boolean "Intel Langwell/Penwell USB Device Controller OTG support"
+	depends on USB_LANGWELL
+	help
+	   OTG support from UDC driver's point of view, this will decide if
+	   USB OTG descriptor is needed or not from gadget.
+
+# Selected by LANGWELL UDC driver
+config USB_GADGET_LANGWELL
+	bool
+	depends on USB_LANGWELL
+
+
+
 #
 # USB Gadget Drivers
 #
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 0ec50ae..0c719e6 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
 obj-$(CONFIG_USB_S3C_HSOTG)	+= s3c-hsotg.o
 obj-$(CONFIG_USB_S3C_HSUDC)	+= s3c-hsudc.o
+obj-$(CONFIG_USB_LANGWELL)	+= langwell_udc.o
 obj-$(CONFIG_USB_LPC32XX)	+= lpc32xx_udc.o
 obj-$(CONFIG_USB_EG20T)		+= pch_udc.o
 obj-$(CONFIG_USB_MV_UDC)	+= mv_udc.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 61c5e58..bd96126 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -40,6 +40,8 @@
 #include "f_rndis.c"
 #include "rndis.c"
 #include "u_ether.c"
+#include "f_dvc_dfx.c"
+#include "f_dvc_trace.c"
 
 MODULE_AUTHOR("Mike Lockwood");
 MODULE_DESCRIPTION("Android Composite USB Driver");
@@ -152,11 +154,22 @@
 	.bNumConfigurations   = 1,
 };
 
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength              = sizeof(otg_descriptor),
+	.bDescriptorType      = USB_DT_OTG,
+	.bcdOTG               = 0x0200, /* version 2.0 */
+};
+
+const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
 static struct usb_configuration android_config_driver = {
 	.label		= "android",
 	.unbind		= android_unbind_config,
 	.bConfigurationValue = 1,
-	.bmAttributes	= USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.bmAttributes	= USB_CONFIG_ATT_ONE,
 	.MaxPower	= 500, /* 500ma */
 };
 
@@ -207,6 +220,13 @@
 
 	if (dev->disable_depth++ == 0) {
 		usb_gadget_disconnect(cdev->gadget);
+
+		/* As connection is disconnected here,
+		 * any flying request should be completed or potential
+		 * request should be added within 50ms.
+		 */
+		 msleep(50);
+
 		/* Cancel pending control requests */
 		usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
 		usb_remove_config(cdev, &android_config_driver);
@@ -452,16 +472,6 @@
 	return ret;
 }
 
-static void acm_function_unbind_config(struct android_usb_function *f,
-				       struct usb_configuration *c)
-{
-	int i;
-	struct acm_function_config *config = f->config;
-
-	for (i = 0; i < config->instances_on; i++)
-		usb_remove_function(c, config->f_acm[i]);
-}
-
 static ssize_t acm_instances_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -496,7 +506,6 @@
 	.init		= acm_function_init,
 	.cleanup	= acm_function_cleanup,
 	.bind_config	= acm_function_bind_config,
-	.unbind_config	= acm_function_unbind_config,
 	.attributes	= acm_function_attributes,
 };
 
@@ -547,6 +556,13 @@
 	return mtp_ctrlrequest(cdev, c);
 }
 
+static int ptp_function_ctrlrequest(struct android_usb_function *f,
+					struct usb_composite_dev *cdev,
+					const struct usb_ctrlrequest *c)
+{
+	return ptp_ctrlrequest(cdev, c);
+}
+
 static struct android_usb_function mtp_function = {
 	.name		= "mtp",
 	.init		= mtp_function_init,
@@ -561,6 +577,7 @@
 	.init		= ptp_function_init,
 	.cleanup	= ptp_function_cleanup,
 	.bind_config	= ptp_function_bind_config,
+	.ctrlrequest	= ptp_function_ctrlrequest,
 };
 
 
@@ -940,6 +957,306 @@
 	.attributes	= audio_source_function_attributes,
 };
 
+struct dvcdfx_function_config {
+	bool	enabled;
+	u8	bFunctionProtocol;
+	u8	bInterfaceProtocol;
+};
+
+static int dvcdfx_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct dvcdfx_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return dvc_dfx_setup();
+}
+
+static void dvcdfx_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+	dvc_dfx_cleanup();
+}
+
+static int dvcdfx_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct dvcdfx_function_config *dvcdfx = f->config;
+	if (!dvcdfx) {
+		pr_err("%s: dvcdfx_pdata\n", __func__);
+		return -1;
+	}
+
+	dfx_iad_desc.bFunctionProtocol = dvcdfx->bFunctionProtocol;
+	dfx_interface_desc.bInterfaceProtocol = dvcdfx->bInterfaceProtocol;
+	dfx_data_interface_desc.bInterfaceProtocol = dvcdfx->bInterfaceProtocol;
+
+	return dvc_dfx_bind_config(c);
+}
+
+static int dvcdfx_function_ctrlrequest(struct android_usb_function *f,
+		struct usb_composite_dev *cdev,
+		const struct usb_ctrlrequest *c)
+{
+
+	struct dvcdfx_function_config *dvcdfx = f->config;
+	int ret;
+	ret = dvc_dfx_ctrlrequest(cdev, c);
+	if (ret != -EOPNOTSUPP)
+		dvcdfx->enabled = false;
+	return ret;
+}
+
+static ssize_t dvcdfx_reset_store(struct device *pdev,
+		  struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_dev *dev = _android_dev;
+
+	mutex_lock(&dev->mutex);
+	/* reset DvC.Dfx function */
+	android_disable(dev);
+
+	android_enable(dev);
+	mutex_unlock(&dev->mutex);
+
+	return size;
+}
+
+static DEVICE_ATTR(reset_dfx, S_IWUSR, NULL, dvcdfx_reset_store);
+
+static ssize_t dvcdfx_enable_store(struct device *pdev,
+		  struct device_attribute *attr, const char *buf, size_t size)
+{
+	int value, ret;
+	struct android_usb_function *f = dev_get_drvdata(pdev);
+	struct dvcdfx_function_config *dvcdfx = f->config;
+
+	if (!dvcdfx) {
+		pr_err("%s: dvcdfx_pdata\n", __func__);
+		return -ENODEV;
+	}
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		if ((value > 0)  && (!dvcdfx->enabled)) {
+			ret = dvc_dfx_start_transfer(value);
+			if (ret < 0)
+				return -EINVAL;
+
+			dvcdfx->enabled = true;
+			return value;
+
+		} else if ((value == 0) && (dvcdfx->enabled)) {
+			ret = dvc_dfx_disable_transfer();
+			if (ret < 0)
+				return -EINVAL;
+
+			dvcdfx->enabled = false;
+			return value;
+		}
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(enable_dfx, S_IWUSR, NULL, dvcdfx_enable_store);
+
+static ssize_t dvcdfx_bFunctionProtocol_show(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct dvcdfx_function_config *config = f->config;
+
+	return sprintf(buf, "%d\n", config->bFunctionProtocol);
+}
+static ssize_t dvcdfx_bFunctionProtocol_store(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct dvcdfx_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		config->bFunctionProtocol = value;
+		config->bInterfaceProtocol = value;
+		return size;
+	}
+	return -1;
+}
+static DEVICE_ATTR(bFunctionProtocol_dfx, S_IRUGO | S_IWUSR,
+		   dvcdfx_bFunctionProtocol_show ,
+		   dvcdfx_bFunctionProtocol_store);
+
+static struct device_attribute *dvcdfx_function_attributes[] = {
+	&dev_attr_reset_dfx,
+	&dev_attr_enable_dfx,
+	&dev_attr_bFunctionProtocol_dfx,
+	NULL
+};
+
+static struct android_usb_function dvcdfx_function = {
+	.name		= "dvcdfx",
+	.init		= dvcdfx_function_init,
+	.cleanup	= dvcdfx_function_cleanup,
+	.bind_config	= dvcdfx_function_bind_config,
+	.ctrlrequest	= dvcdfx_function_ctrlrequest,
+	.attributes	= dvcdfx_function_attributes,
+};
+
+
+struct dvctrace_function_config {
+	bool	enabled;
+	u8	bFunctionProtocol;
+	u8	bInterfaceProtocol;
+};
+
+static int dvctrace_function_init(struct android_usb_function *f,
+				  struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct dvctrace_function_config),
+			    GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return dvc_trace_setup();
+}
+
+static void dvctrace_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+	return dvc_trace_cleanup();
+}
+
+static int dvctrace_function_bind_config(struct android_usb_function *f,
+					 struct usb_configuration *c)
+{
+	struct dvctrace_function_config *dvctrace = f->config;
+	if (!dvctrace) {
+		pr_err("%s: dvctrace_pdata\n", __func__);
+		return -1;
+	}
+
+	dvctrace->enabled = false;
+	trace_iad_desc.bFunctionProtocol = dvctrace->bFunctionProtocol;
+	trace_interface_desc.bInterfaceProtocol = dvctrace->bInterfaceProtocol;
+	trace_data_interface_desc.bInterfaceProtocol =
+		dvctrace->bInterfaceProtocol;
+
+	return dvc_trace_bind_config(c);
+}
+
+static int dvctrace_function_ctrlrequest(struct android_usb_function *f,
+						struct usb_composite_dev *cdev,
+						const struct usb_ctrlrequest *c)
+{
+	struct dvctrace_function_config *dvctrace = f->config;
+	int ret;
+
+	ret = dvc_trace_ctrlrequest(cdev, c);
+	if (!ret)
+		dvctrace->enabled = false;
+	else if (ret > 0)
+		dvctrace->enabled = true;
+	return ret;
+}
+
+static ssize_t dvctrace_reset_store(struct device *pdev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_dev *dev = _android_dev;
+
+	mutex_lock(&dev->mutex);
+	/* reset DvC.Trace function */
+	android_disable(dev);
+
+	android_enable(dev);
+	mutex_unlock(&dev->mutex);
+
+	return size;
+}
+
+static DEVICE_ATTR(reset_trace, S_IWUSR, NULL, dvctrace_reset_store);
+
+static ssize_t dvctrace_enable_store(struct device *pdev,
+		  struct device_attribute *attr, const char *buf, size_t size)
+{
+	int value, ret;
+	struct android_usb_function *f = dev_get_drvdata(pdev);
+	struct dvctrace_function_config *dvctrace = f->config;
+
+	if (!dvctrace) {
+		pr_err("%s: dvctrace_pdata\n", __func__);
+		return -ENODEV;
+	}
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		if ((value > 0)  && (!dvctrace->enabled)) {
+			ret = dvc_trace_start_transfer(value);
+			if (ret < 0)
+				return -EINVAL;
+
+			dvctrace->enabled = true;
+			return value;
+
+		} else if ((value == 0) && (dvctrace->enabled)) {
+			ret = dvc_trace_disable_transfer();
+			if (ret < 0)
+				return -EINVAL;
+
+			dvctrace->enabled = false;
+			return value;
+		}
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(enable_trace, S_IWUSR, NULL, dvctrace_enable_store);
+
+static ssize_t dvctrace_bFunctionProtocol_show(struct device *dev,
+					       struct device_attribute *attr,
+					       char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct dvctrace_function_config *config = f->config;
+
+	return sprintf(buf, "%d\n", config->bFunctionProtocol);
+}
+static ssize_t dvctrace_bFunctionProtocol_store(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct dvctrace_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		config->bFunctionProtocol = value;
+		config->bInterfaceProtocol = value;
+		return size;
+	}
+	return -1;
+}
+static DEVICE_ATTR(bFunctionProtocol_trace, S_IRUGO | S_IWUSR,
+		   dvctrace_bFunctionProtocol_show,
+		   dvctrace_bFunctionProtocol_store);
+
+static struct device_attribute *dvctrace_function_attributes[] = {
+	&dev_attr_reset_trace,
+	&dev_attr_enable_trace,
+	&dev_attr_bFunctionProtocol_trace,
+	NULL
+};
+
+static struct android_usb_function dvctrace_function = {
+	.name		= "dvctrace",
+	.init		= dvctrace_function_init,
+	.cleanup	= dvctrace_function_cleanup,
+	.bind_config	= dvctrace_function_bind_config,
+	.ctrlrequest	= dvctrace_function_ctrlrequest,
+	.attributes	= dvctrace_function_attributes,
+};
+
 static int midi_function_init(struct android_usb_function *f,
 					struct usb_composite_dev *cdev)
 {
@@ -1003,6 +1320,8 @@
 	&mass_storage_function,
 	&accessory_function,
 	&audio_source_function,
+	&dvcdfx_function,
+	&dvctrace_function,
 	&midi_function,
 	NULL
 };
@@ -1107,6 +1426,7 @@
 {
 	struct android_usb_function **functions = dev->functions;
 	struct android_usb_function *f;
+
 	while ((f = *functions++)) {
 		if (!strcmp(name, f->name)) {
 			list_add_tail(&f->enabled_list,
@@ -1168,6 +1488,14 @@
 		if (!name)
 			continue;
 
+		if (!strcmp(name, "dvctrace") &&
+		    !dvc_trace_is_enabled())
+			continue;
+
+		if (!strcmp(name, "dvcdfx") &&
+		    !dvc_dfx_is_enabled())
+			continue;
+
 		is_ffs = 0;
 		strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
 		a = aliases;
@@ -1249,6 +1577,9 @@
 				f->disable(f);
 		}
 		dev->enabled = false;
+	} else if (!enabled && !dev->enabled) {
+		usb_gadget_disconnect(cdev->gadget);
+		dev->enabled = false;
 	} else {
 		pr_err("android_usb: already %s\n",
 				dev->enabled ? "enabled" : "disabled");
@@ -1356,6 +1687,9 @@
 	struct android_dev *dev = _android_dev;
 	int ret = 0;
 
+	if (gadget_is_otg(c->cdev->gadget))
+		c->descriptors = otg_desc;
+
 	ret = android_bind_enabled_functions(dev, c);
 	if (ret)
 		return ret;
@@ -1415,7 +1749,13 @@
 	strings_dev[STRING_SERIAL_IDX].id = id;
 	device_desc.iSerialNumber = id;
 
-	usb_gadget_set_selfpowered(gadget);
+	cdev->reset_string_id = id;
+
+	if (gadget_is_otg(gadget))
+		cdev->otg_desc = &otg_descriptor;
+
+	if (android_config_driver.bmAttributes & USB_CONFIG_ATT_SELFPOWER)
+		usb_gadget_set_selfpowered(gadget);
 	dev->cdev = cdev;
 
 	return 0;
@@ -1468,6 +1808,13 @@
 	spin_lock_irqsave(&cdev->lock, flags);
 	if (!dev->connected) {
 		dev->connected = 1;
+
+		/* set MaxPower as 900mA for SuperSpeed mode */
+		if (gadget->speed == USB_SPEED_SUPER)
+			android_config_driver.MaxPower = 896;
+		else
+			android_config_driver.MaxPower = 500;
+
 		schedule_work(&dev->work);
 	} else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
 						cdev->config) {
@@ -1525,6 +1872,16 @@
 	return 0;
 }
 
+static void android_free_device(struct android_dev *dev)
+{
+	struct device_attribute **attrs = android_usb_attributes;
+	struct device_attribute *attr;
+
+	while ((attr = *attrs++))
+		device_remove_file(dev->dev, attr);
+
+	device_destroy(android_class, dev->dev->devt);
+}
 
 static int __init init(void)
 {
@@ -1570,6 +1927,7 @@
 err_probe:
 	device_destroy(android_class, dev->dev->devt);
 err_create:
+	android_free_device(dev);
 	kfree(dev);
 err_dev:
 	class_destroy(android_class);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f9e3975..6f43058 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -660,6 +660,13 @@
 			descriptors = f->fs_descriptors;
 		}
 
+		if (!descriptors) {
+			INFO(cdev, "%s is not supported\n",
+			     usb_speed_string(gadget->speed));
+			cdev->config = NULL;
+			return -ENODEV;
+		}
+
 		for (; *descriptors; ++descriptors) {
 			struct usb_endpoint_descriptor *ep;
 			int addr;
@@ -831,6 +838,12 @@
 		config->unbind(config);
 			/* may free memory for "c" */
 	}
+
+	/* reset cdev->next_string_id to cdev->reset_string_id
+	 * because "android_usb" driver is working and its
+	 * string descriptor numbers have been allocated
+	 */
+	cdev->next_string_id = cdev->reset_string_id;
 }
 
 /**
@@ -1291,6 +1304,12 @@
 				value = min(w_length, (u16) value);
 			}
 			break;
+		case USB_DT_OTG:
+			if (cdev->otg_desc) {
+				memcpy(req->buf, cdev->otg_desc, w_length);
+				value = w_length;
+			}
+			break;
 		}
 		break;
 
@@ -1436,6 +1455,8 @@
 			break;
 
 		case USB_RECIP_ENDPOINT:
+			if (!cdev->config)
+				break;
 			endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
 			list_for_each_entry(f, &cdev->config->functions, list) {
 				if (test_bit(endp, f->endpoints))
@@ -1717,7 +1738,7 @@
 {
 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
 	struct usb_function		*f;
-	u8				maxpower;
+	u16				maxpower;
 
 	/* REVISIT:  should we have config level
 	 * suspend/resume callbacks?
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index c588e8e..ac0e79e 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -923,8 +923,9 @@
 	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(g);
 	struct dummy		*dum = dum_hcd->dum;
 
-	dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
-			driver->driver.name);
+	if (driver)
+		dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
+				driver->driver.name);
 
 	dum->driver = NULL;
 
@@ -1000,8 +1001,8 @@
 {
 	struct dummy	*dum = platform_get_drvdata(pdev);
 
-	usb_del_gadget_udc(&dum->gadget);
 	device_remove_file(&dum->gadget.dev, &dev_attr_function);
+	usb_del_gadget_udc(&dum->gadget);
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index a777f7b..13a2a20 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -188,6 +188,8 @@
 	ep->address = desc->bEndpointAddress;
 	return 1;
 }
+EXPORT_SYMBOL_GPL(ep_matches);
+
 
 static struct usb_ep *
 find_ep (struct usb_gadget *gadget, const char *name)
@@ -200,6 +202,7 @@
 	}
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(find_ep);
 
 /**
  * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
@@ -253,7 +256,11 @@
 {
 	struct usb_ep	*ep;
 	u8		type;
+#ifdef CONFIG_USB_DWC3_GADGET
+	u8	       addr;
 
+	addr = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+#endif
 	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
 
 	/* First, apply chip-specific "best usage" knowledge.
@@ -301,10 +308,44 @@
 		if (ep && ep_matches(gadget, ep, desc, ep_comp))
 			goto found_ep;
 #endif
+
+#ifdef CONFIG_USB_DWC3_GADGET
+	} else if (gadget_is_middwc3tng(gadget)) {
+		if (addr == 0x1) {
+			/* statically assigned ebc-ep1 in/out  */
+			if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+			    & USB_DIR_IN)
+				ep = find_ep(gadget, "ep1in");
+			else
+				ep = NULL;
+		} else if (addr == 0x8) {
+			/* statically assigned ebc-ep8 in/out */
+			if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+			    & USB_DIR_IN)
+				ep = find_ep (gadget, "ep8in");
+			else
+				ep = find_ep (gadget, "ep8out");
+		} else
+			ep = NULL;
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+#endif
+
 	}
 
 	/* Second, look at endpoints until an unclaimed one looks usable */
 	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+#ifdef CONFIG_USB_DWC3_GADGET
+		/* ep1in and ep8in are reserved for DWC3 device controller */
+		if (!strncmp(ep->name, "ep1in", 5) ||
+		    !strncmp(ep->name, "ep8in", 5))
+			continue;
+		if (gadget_is_middwc3tng(gadget))
+			/* ep1out and ep8out are also reserved */
+			if (!strncmp(ep->name, "ep1out", 6) ||
+			    !strncmp(ep->name, "ep8out", 6))
+				continue;
+#endif
 		if (ep_matches(gadget, ep, desc, ep_comp))
 			goto found_ep;
 	}
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
index 0237f1e..2c4b64e 100644
--- a/drivers/usb/gadget/f_accessory.c
+++ b/drivers/usb/gadget/f_accessory.c
@@ -38,6 +38,7 @@
 #include <linux/usb.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/f_accessory.h>
+#include <linux/compat.h>
 
 #define BULK_BUFFER_SIZE    16384
 #define ACC_STRING_SIZE     256
@@ -284,6 +285,11 @@
 	wake_up(&dev->read_wq);
 }
 
+static void acc_complete_ep0(struct usb_ep *ep, struct usb_request *req)
+{
+	pr_debug("acc_complete_ep0\n");
+}
+
 static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
 {
 	struct acc_dev	*dev = ep->driver_data;
@@ -739,6 +745,13 @@
 	return ret;
 }
 
+#ifdef CONFIG_COMPAT
+static long acc_compat_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	return acc_ioctl(fp, code, (unsigned long) compat_ptr(value));
+}
+#endif
+
 static int acc_open(struct inode *ip, struct file *fp)
 {
 	printk(KERN_INFO "acc_open\n");
@@ -765,6 +778,9 @@
 	.read = acc_read,
 	.write = acc_write,
 	.unlocked_ioctl = acc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = acc_compat_ioctl,
+#endif
 	.open = acc_open,
 	.release = acc_release,
 };
@@ -823,6 +839,7 @@
 			dev->start_requested = 1;
 			schedule_delayed_work(
 				&dev->start_work, msecs_to_jiffies(10));
+			cdev->req->complete = acc_complete_ep0;
 			value = 0;
 		} else if (b_request == ACCESSORY_SEND_STRING) {
 			dev->string_index = w_index;
@@ -832,10 +849,13 @@
 		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
 				w_index == 0 && w_length == 0) {
 			dev->audio_mode = w_value;
+			cdev->req->complete = acc_complete_ep0;
 			value = 0;
 		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			cdev->req->complete = acc_complete_ep0;
 			value = acc_register_hid(dev, w_value, w_index);
 		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			cdev->req->complete = acc_complete_ep0;
 			value = acc_unregister_hid(dev, w_value);
 		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
 			spin_lock_irqsave(&dev->lock, flags);
@@ -869,6 +889,7 @@
 	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
 		if (b_request == ACCESSORY_GET_PROTOCOL) {
 			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			cdev->req->complete = acc_complete_ep0;
 			value = sizeof(u16);
 
 			/* clear any string left over from a previous session */
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 4b7e33e..6279d24 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -285,6 +285,7 @@
 	[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
 	[ACM_DATA_IDX].s = "CDC ACM Data",
 	[ACM_IAD_IDX ].s = "CDC Serial",
+	{  } /* end of list */
 };
 
 static struct usb_gadget_strings acm_string_table = {
@@ -429,7 +430,8 @@
 		if (acm->notify->driver_data) {
 			VDBG(cdev, "reset acm control interface %d\n", intf);
 			usb_ep_disable(acm->notify);
-		} else {
+		}
+		if (!acm->notify->desc) {
 			VDBG(cdev, "init acm ctrl interface %d\n", intf);
 			if (config_ep_by_speed(cdev->gadget, f, acm->notify))
 				return -EINVAL;
@@ -748,7 +750,6 @@
 	acm->port.send_break = acm_send_break;
 
 	acm->port.func.name = "acm";
-	acm->port.func.strings = acm_strings;
 	/* descriptors are per-instance copies */
 	acm->port.func.bind = acm_bind;
 	acm->port.func.set_alt = acm_set_alt;
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
index 07a1a3c..459fed3 100644
--- a/drivers/usb/gadget/f_audio_source.c
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -353,8 +353,23 @@
 	frames -= audio->frames_sent;
 
 	/* We need to send something to keep the pipeline going */
-	if (frames <= 0)
+	if (frames < FRAMES_PER_MSEC) {
 		frames = FRAMES_PER_MSEC;
+	} else if (frames == 2 * FRAMES_PER_MSEC) {
+		frames = FRAMES_PER_MSEC;
+
+		/* Adjust frames_sent.
+		 *
+		 * "frames" is calulated from kernel time which grows a
+		 * little faster than frames_sent, and we should adjust
+		 * frames_sent to catch up with "frames" to insure the
+		 * jitter won't exceed 2 * FRAMES_PER_MSEC + 1
+		 */
+		audio->frames_sent += FRAMES_PER_MSEC;
+	} else if (frames == 2 * FRAMES_PER_MSEC + 1) {
+		frames = FRAMES_PER_MSEC + 1;
+		audio->frames_sent += FRAMES_PER_MSEC;
+	}
 
 	while (frames > 0) {
 		req = audio_req_get(audio);
@@ -528,11 +543,16 @@
 
 	pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
 
-	ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
-	if (ret)
-		return ret;
+	if (intf == audio_as_interface_alt_1_desc.bInterfaceNumber &&
+		alt == 1 && !audio->in_ep->driver_data) {
+		ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+		if (ret)
+			return ret;
 
-	usb_ep_enable(audio->in_ep);
+		usb_ep_enable(audio->in_ep);
+		audio->in_ep->driver_data = audio;
+	}
+
 	return 0;
 }
 
@@ -542,6 +562,8 @@
 
 	pr_debug("audio_disable\n");
 	usb_ep_disable(audio->in_ep);
+
+	audio->in_ep->driver_data = NULL;
 }
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/f_dvc_dfx.c b/drivers/usb/gadget/f_dvc_dfx.c
new file mode 100644
index 0000000..ac802eb
--- /dev/null
+++ b/drivers/usb/gadget/f_dvc_dfx.c
@@ -0,0 +1,922 @@
+/*
+ * Gadget Driver for Android DvC.Dfx Debug Capability
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/usb/debug.h>
+#include <linux/sdm.h>
+#include <asm/intel_soc_debug.h>
+
+#define DFX_RX_REQ_MAX 1
+#define DFX_TX_REQ_MAX 2
+#define DFX_BULK_REQ_SIZE 64
+
+#define CONFIG_BOARD_MRFLD_VV
+
+struct dvc_dfx_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+	u8	ctrl_id, data_id;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	int transfering;
+	int online;
+	int online_ctrl;
+	int online_data;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	struct usb_request *rx_req[DFX_RX_REQ_MAX];
+
+	struct list_head tx_idle;
+	struct list_head tx_xfer;
+};
+
+static struct usb_interface_assoc_descriptor dfx_iad_desc = {
+	.bLength		= sizeof(dfx_iad_desc),
+	.bDescriptorType	= USB_DT_INTERFACE_ASSOCIATION,
+	/* .bFirstInterface	= DYNAMIC, */
+	.bInterfaceCount	= 2, /* debug control + data */
+	.bFunctionClass		= USB_CLASS_DEBUG,
+	.bFunctionSubClass	= USB_SUBCLASS_DVC_DFX,
+	/* .bFunctionProtocol	= DC_PROTOCOL_VENDOR, */
+	/* .iFunction		= 0, */
+};
+
+static struct usb_interface_descriptor dfx_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bNumEndpoints          = 0,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DEBUG_CONTROL,
+	/* .bInterfaceProtocol     = DC_PROTOCOL_VENDOR, */
+};
+
+#define DC_DBG_ATTRI_LENGTH	DC_DBG_ATTRI_SIZE(2, 32)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define DC_DBG_TOTAL_LENGTH (DC_DBG_ATTRI_LENGTH)
+
+DECLARE_DC_DEBUG_ATTR_DESCR(DVCD, 2, 32);
+
+static struct DC_DEBUG_ATTR_DESCR(DVCD) dfx_debug_attri_desc = {
+	.bLength		= DC_DBG_ATTRI_LENGTH,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_DEBUG_ATTRIBUTES,
+	.bcdDC			= __constant_cpu_to_le16(0x0100),
+	.wTotalLength		= __constant_cpu_to_le16(DC_DBG_TOTAL_LENGTH),
+	.bmSupportedFeatures	= 0, /* Debug Event Supported, per SAS */
+	.bControlSize		= 2,
+	.bmControl		= {	/* per SAS */
+		[0]		= 0xFF,
+		[1]		= 0x3F,
+	},
+	.wAuxDataSize		= __constant_cpu_to_le16(0x20),
+/* per SAS v0.3*/
+	.dInputBufferSize	= __constant_cpu_to_le32(0x40),
+	.dOutputBufferSize	= __constant_cpu_to_le32(0x80),
+	.qBaseAddress		= 0, /* revision */
+	.hGlobalID		= { /* revision */
+		[0]		= 0,
+		[1]		= 0,
+	}
+};
+
+static struct usb_interface_descriptor dfx_data_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bAlternateSetting	= 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DVC_DFX,
+	/* .bInterfaceProtocol     = DC_PROTOCOL_VENDOR, */
+};
+
+static struct usb_endpoint_descriptor dfx_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor dfx_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor dfx_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dfx_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dfx_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dfx_superspeed_in_comp_desc = {
+	.bLength		= USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst		= 0,
+	.bmAttributes		= 0,
+};
+
+static struct usb_endpoint_descriptor dfx_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dfx_superspeed_out_comp_desc = {
+	.bLength		= USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst		= 0,
+	.bmAttributes		= 0,
+};
+
+/* no INPUT/OUTPUT CONNECTION and UNIT descriptors for DvC.DFx */
+static struct usb_descriptor_header *fs_dfx_descs[] = {
+	(struct usb_descriptor_header *) &dfx_iad_desc,
+	(struct usb_descriptor_header *) &dfx_data_interface_desc,
+	(struct usb_descriptor_header *) &dfx_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &dfx_fullspeed_out_desc,
+
+	(struct usb_descriptor_header *) &dfx_interface_desc,
+	(struct usb_descriptor_header *) &dfx_debug_attri_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_dfx_descs[] = {
+	(struct usb_descriptor_header *) &dfx_iad_desc,
+	(struct usb_descriptor_header *) &dfx_data_interface_desc,
+	(struct usb_descriptor_header *) &dfx_highspeed_in_desc,
+	(struct usb_descriptor_header *) &dfx_highspeed_out_desc,
+
+	(struct usb_descriptor_header *) &dfx_interface_desc,
+	(struct usb_descriptor_header *) &dfx_debug_attri_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_dfx_descs[] = {
+	(struct usb_descriptor_header *) &dfx_iad_desc,
+	(struct usb_descriptor_header *) &dfx_data_interface_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_in_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_out_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_out_comp_desc,
+
+	(struct usb_descriptor_header *) &dfx_interface_desc,
+	(struct usb_descriptor_header *) &dfx_debug_attri_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define DVCDFX_CTRL_IDX	0
+#define DVCDFX_DATA_IDX	1
+#define DVCDFX_IAD_IDX	2
+
+/* static strings, in UTF-8 */
+static struct usb_string dfx_string_defs[] = {
+	[DVCDFX_CTRL_IDX].s = "Debug Sub-Class DvC.DFx (Control)",
+	[DVCDFX_DATA_IDX].s = "Debug Sub-Class DvC.DFx (Data)",
+	[DVCDFX_IAD_IDX].s = "Debug Sub-Class DvC.DFx",
+	{  /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings dfx_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		dfx_string_defs,
+};
+
+static struct usb_gadget_strings *dfx_strings[] = {
+	&dfx_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* temporary variable used between dvc_dfx_open() and dvc_dfx_gadget_bind() */
+static struct dvc_dfx_dev *_dvc_dfx_dev;
+
+static inline struct dvc_dfx_dev *func_to_dvc_dfx(struct usb_function *f)
+{
+	return container_of(f, struct dvc_dfx_dev, function);
+}
+
+static int dvc_dfx_is_enabled(void)
+{
+	if ((!cpu_has_debug_feature(DEBUG_FEATURE_USB3DFX)) ||
+	    (!stm_is_enabled())) {
+		pr_info("%s STM and/or USB3DFX is not enabled\n", __func__);
+		return 0;
+	}
+	return 1;
+}
+
+static struct usb_request *dvc_dfx_request_new(struct usb_ep *ep,
+					       int buffer_size, dma_addr_t dma)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+
+	req->dma = dma;
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void dvc_dfx_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void dvc_dfx_req_put(struct dvc_dfx_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *dvc_dfx_req_get(struct dvc_dfx_dev *dev,
+					   struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void dvc_dfx_set_disconnected(struct dvc_dfx_dev *dev)
+{
+	dev->transfering = 0;
+}
+
+static void dvc_dfx_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+
+	if (req->status != 0)
+		dvc_dfx_set_disconnected(dev);
+
+	dvc_dfx_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void dvc_dfx_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+
+	if (req->status != 0)
+		dvc_dfx_set_disconnected(dev);
+	wake_up(&dev->read_wq);
+}
+
+
+static inline int dvc_dfx_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void dvc_dfx_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+static int dfx_create_bulk_endpoints(struct dvc_dfx_dev *dev,
+				     struct usb_endpoint_descriptor *in_desc,
+				     struct usb_endpoint_descriptor *out_desc,
+				     struct usb_ss_ep_comp_descriptor *in_comp_desc,
+				     struct usb_ss_ep_comp_descriptor *out_comp_desc
+	)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	pr_debug("%s dev: %p\n", __func__, dev);
+
+	in_desc->bEndpointAddress |= 0x8;
+	ep = usb_ep_autoconfig_ss(cdev->gadget, in_desc, in_comp_desc);
+	if (!ep) {
+		pr_debug("%s for ep_in failed\n", __func__);
+		return -ENODEV;
+	}
+	pr_debug("%s for ep_in got %s\n", __func__, ep->name);
+
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	out_desc->bEndpointAddress |= 0x8;
+	ep = usb_ep_autoconfig_ss(cdev->gadget, out_desc, out_comp_desc);
+	if (!ep) {
+		pr_debug("%s for ep_out failed\n", __func__);
+		return -ENODEV;
+	}
+	pr_debug("%s for ep_out got %s\n", __func__, ep->name);
+
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < DFX_TX_REQ_MAX; i++) {
+		if (!(i % 2))
+			req = dvc_dfx_request_new(dev->ep_in,
+				DFX_BULK_BUFFER_SIZE,
+				(dma_addr_t)DFX_BULK_IN_BUFFER_ADDR);
+		else
+			req = dvc_dfx_request_new(dev->ep_in,
+				DFX_BULK_BUFFER_SIZE,
+				(dma_addr_t)DFX_BULK_IN_BUFFER_ADDR_2);
+		if (!req)
+			goto fail;
+		req->complete = dvc_dfx_complete_in;
+		dvc_dfx_req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < DFX_RX_REQ_MAX; i++) {
+		req = dvc_dfx_request_new(dev->ep_out, DFX_BULK_BUFFER_SIZE,
+			(dma_addr_t)DFX_BULK_OUT_BUFFER_ADDR);
+		if (!req)
+			goto fail;
+		req->complete = dvc_dfx_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("%s could not allocate requests\n", __func__);
+	while ((req = dvc_dfx_req_get(dev, &dev->tx_idle)))
+		dvc_dfx_request_free(req, dev->ep_out);
+	for (i = 0; i < DFX_RX_REQ_MAX; i++)
+		dvc_dfx_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t dvc_dfx_start_transfer(size_t count)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	struct usb_request *req = NULL;
+	int r = count, xfer;
+	int ret = -ENODEV;
+
+
+	pr_info("%s start\n", __func__);
+	if (!_dvc_dfx_dev)
+		return ret;
+
+	if (dvc_dfx_lock(&dev->read_excl)
+	    && dvc_dfx_lock(&dev->write_excl))
+		return -EBUSY;
+
+	/* we will block until enumeration completes */
+	while (!(dev->online || dev->error)) {
+		pr_debug("%s waiting for online state\n", __func__);
+		ret = wait_event_interruptible(dev->read_wq,
+				(dev->online || dev->error));
+
+		if (ret < 0) {
+			/* not at CONFIGURED state */
+			pr_info("%s USB not at CONFIGURED\n", __func__);
+			dvc_dfx_unlock(&dev->read_excl);
+			dvc_dfx_unlock(&dev->write_excl);
+			return ret;
+		}
+	}
+
+	/* queue a ep_in endless request */
+	while (r > 0) {
+		if (dev->error) {
+			pr_debug("%s dev->error\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		if (!dev->online) {
+			pr_debug("%s !dev->online issue\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+				dev->error || !dev->online ||
+				(req = dvc_dfx_req_get(dev, &dev->tx_idle)));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			if (count > DFX_BULK_BUFFER_SIZE)
+				xfer = DFX_BULK_BUFFER_SIZE;
+			else
+				xfer = count;
+
+			req->no_interrupt = 1;
+			req->context = &dev->function;
+			req->length = xfer;
+			pr_debug("%s queue tx_idle list req to dev->ep_in\n",
+				__func__);
+			ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+			if (ret < 0) {
+				pr_err("%s xfer error %d\n", __func__, ret);
+					dev->error = 1;
+				r = -EIO;
+				break;
+			}
+			pr_debug("%s xfer=%d/%d  queued req/%p\n", __func__,
+				xfer, r, req);
+			dvc_dfx_req_put(dev, &dev->tx_xfer, req);
+			r -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+	if (req) {
+		pr_debug("%s req re-added to tx_idle on error\n", __func__);
+		dvc_dfx_req_put(dev, &dev->tx_idle, req);
+	}
+
+	pr_debug("%s rx_req to dev->ep_out\n", __func__);
+	/* queue a ep_out endless request */
+	req = dev->rx_req[0];
+	req->length = DFX_BULK_BUFFER_SIZE;
+	req->no_interrupt = 1;
+	req->context = &dev->function;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+	if (ret < 0) {
+		pr_err("%s failed to queue out req %p (%d)\n",
+		       __func__, req, req->length);
+		r = -EIO;
+	} else {
+		dev->transfering = 1;
+	}
+
+	dvc_dfx_unlock(&dev->read_excl);
+	dvc_dfx_unlock(&dev->write_excl);
+	pr_debug("%s returning\n", __func__);
+	return r;
+}
+
+static int dvc_dfx_disable_transfer(void)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	struct usb_request *req;
+	int r = 1;
+	int ret;
+
+
+	pr_info("%s start\n", __func__);
+	if (!_dvc_dfx_dev)
+		return -ENODEV;
+
+	if (dvc_dfx_lock(&dev->read_excl)
+	    && dvc_dfx_lock(&dev->write_excl))
+		return -EBUSY;
+
+	if (dev->error) {
+		pr_debug("%s dev->error\n", __func__);
+		r = -EIO;
+		goto end;
+	}
+
+	if ((!dev->online) || (!dev->transfering)) {
+		pr_debug("%s !dev->online OR !dev->transfering\n", __func__);
+		r = -EIO;
+		goto end;
+	}
+
+	/* get an xfer tx request to use */
+	while ((req = dvc_dfx_req_get(dev, &dev->tx_xfer))) {
+		ret = usb_ep_dequeue(dev->ep_in, req);
+		if (ret < 0) {
+			pr_err("%s dequeue error %d\n", __func__, ret);
+			dev->error = 1;
+			r = -EIO;
+			goto end;
+		}
+		pr_debug("%s dequeued tx req/%p\n", __func__, req);
+	}
+	ret = usb_ep_dequeue(dev->ep_out, dev->rx_req[0]);
+	if (ret < 0) {
+		pr_err("%s dequeue rx error %d\n", __func__, ret);
+		dev->error = 1;
+		r = -EIO;
+		goto end;
+	}
+
+end:
+	dvc_dfx_unlock(&dev->read_excl);
+	dvc_dfx_unlock(&dev->write_excl);
+	return r;
+}
+
+static int dvc_dfx_open(struct inode *ip, struct file *fp)
+{
+	pr_info("%s\n", __func__);
+	if (!_dvc_dfx_dev)
+		return -ENODEV;
+
+	if (dvc_dfx_lock(&_dvc_dfx_dev->open_excl))
+		return -EBUSY;
+
+	fp->private_data = _dvc_dfx_dev;
+
+	/* clear the error latch */
+	_dvc_dfx_dev->error = 0;
+	_dvc_dfx_dev->transfering = 0;
+
+	return 0;
+}
+
+static int dvc_dfx_release(struct inode *ip, struct file *fp)
+{
+	pr_info("%s\n", __func__);
+
+	dvc_dfx_unlock(&_dvc_dfx_dev->open_excl);
+	return 0;
+}
+
+/* file operations for DvC.Dfx device /dev/usb_dvc_dfx */
+static const struct file_operations dvc_dfx_fops = {
+	.owner = THIS_MODULE,
+	.open = dvc_dfx_open,
+	.release = dvc_dfx_release,
+};
+
+static struct miscdevice dvc_dfx_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_dvc_dfx",
+	.fops = &dvc_dfx_fops,
+};
+
+static int dvc_dfx_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+
+	pr_debug("%s %02x.%02x v%04x i%04x l%u\n", __func__,
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* DC_REQUEST_SET_RESET ... stop active transfer */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| DC_REQUEST_SET_RESET:
+		if (w_index != dev->data_id)
+			goto invalid;
+
+		pr_info("%s DC_REQUEST_SET_RESET v%04x i%04x l%u\n", __func__,
+			w_value, w_index, w_length);
+
+		dvc_dfx_disable_transfer();
+		value = 0;
+		break;
+
+	default:
+invalid:
+		pr_debug("unknown class-specific control req "
+			 "%02x.%02x v%04x i%04x l%u\n",
+			 ctrl->bRequestType, ctrl->bRequest,
+			 w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("%s setup response queue error\n", __func__);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int
+dvc_dfx_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev	*cdev = c->cdev;
+	struct dvc_dfx_dev		*dev = func_to_dvc_dfx(f);
+	int			id;
+	int			ret;
+
+	dev->cdev = cdev;
+	pr_info("%s dev: %p\n", __func__, dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->data_id = id;
+	dfx_data_interface_desc.bInterfaceNumber = id;
+	dfx_iad_desc.bFirstInterface = id;
+
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ctrl_id = id;
+	dfx_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = dfx_create_bulk_endpoints(dev, &dfx_fullspeed_in_desc,
+					&dfx_fullspeed_out_desc,
+					&dfx_superspeed_in_comp_desc,
+					&dfx_superspeed_out_comp_desc
+		);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		dfx_highspeed_in_desc.bEndpointAddress =
+			dfx_fullspeed_in_desc.bEndpointAddress;
+		dfx_highspeed_out_desc.bEndpointAddress =
+			dfx_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		dfx_superspeed_in_desc.bEndpointAddress =
+			dfx_fullspeed_in_desc.bEndpointAddress;
+
+		dfx_superspeed_out_desc.bEndpointAddress =
+			dfx_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	pr_info("%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+dvc_dfx_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct dvc_dfx_dev	*dev = func_to_dvc_dfx(f);
+	struct usb_request *req;
+	int i;
+
+	dev->online = 0;
+	dev->online_ctrl = 0;
+	dev->online_data = 0;
+	dev->transfering = 0;
+	dev->error = 0;
+
+	dfx_string_defs[DVCDFX_CTRL_IDX].id = 0;
+
+	wake_up(&dev->read_wq);
+
+	for (i = 0; i < DFX_RX_REQ_MAX; i++)
+		dvc_dfx_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = dvc_dfx_req_get(dev, &dev->tx_idle)))
+		dvc_dfx_request_free(req, dev->ep_in);
+
+}
+
+static int dvc_dfx_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct dvc_dfx_dev	*dev = func_to_dvc_dfx(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_info("%s intf: %d alt: %d\n", __func__, intf, alt);
+	if (intf == dfx_data_interface_desc.bInterfaceNumber) {
+		ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_by_speed in error %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+		ret = usb_ep_enable(dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable in err %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+
+		ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable out error %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+
+		ret = usb_ep_enable(dev->ep_out);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable out err %d\n",
+				__func__, intf, alt, ret);
+			usb_ep_disable(dev->ep_in);
+			return ret;
+		}
+		dev->online_data = 1;
+	}
+	if (intf == dfx_interface_desc.bInterfaceNumber)
+		dev->online_ctrl = 1;
+
+	if (dev->online_data && dev->online_ctrl) {
+		dev->online = 1;
+		dev->error = 0;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void dvc_dfx_function_disable(struct usb_function *f)
+{
+	struct dvc_dfx_dev	*dev = func_to_dvc_dfx(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	pr_info("%s cdev %p\n", __func__, cdev);
+
+	if (dev->transfering)
+		dvc_dfx_disable_transfer();
+
+	dev->online = 0;
+	dev->online_ctrl = 0;
+	dev->online_data = 0;
+	dev->error = 0;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	pr_debug("%s disabled\n", dev->function.name);
+}
+
+static int dvc_dfx_bind_config(struct usb_configuration *c)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	int status;
+
+	pr_info("%s\n", __func__);
+
+	if (dfx_string_defs[DVCDFX_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		dfx_string_defs[DVCDFX_CTRL_IDX].id = status;
+
+		dfx_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		dfx_string_defs[DVCDFX_DATA_IDX].id = status;
+
+		dfx_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		dfx_string_defs[DVCDFX_IAD_IDX].id = status;
+
+		dfx_iad_desc.iFunction = status;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "dvcdfx";
+	dev->function.fs_descriptors = fs_dfx_descs;
+	dev->function.hs_descriptors = hs_dfx_descs;
+	dev->function.ss_descriptors = ss_dfx_descs;
+	dev->function.strings = dfx_strings;
+	dev->function.bind = dvc_dfx_function_bind;
+	dev->function.unbind = dvc_dfx_function_unbind;
+	dev->function.set_alt = dvc_dfx_function_set_alt;
+	dev->function.disable = dvc_dfx_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int dvc_dfx_setup(void)
+{
+	struct dvc_dfx_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->tx_xfer);
+
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->read_excl, 0);
+	atomic_set(&dev->write_excl, 0);
+
+	_dvc_dfx_dev = dev;
+
+	ret = misc_register(&dvc_dfx_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("DvC.Dfx gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void dvc_dfx_cleanup(void)
+{
+	misc_deregister(&dvc_dfx_device);
+
+	kfree(_dvc_dfx_dev);
+	_dvc_dfx_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_dvc_trace.c b/drivers/usb/gadget/f_dvc_trace.c
new file mode 100644
index 0000000..ab392dd
--- /dev/null
+++ b/drivers/usb/gadget/f_dvc_trace.c
@@ -0,0 +1,887 @@
+/*
+ * Gadget Driver for Android DvC.Trace Debug Capability
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/usb/debug.h>
+#include <linux/sdm.h>
+
+#define TRACE_TX_REQ_MAX 3
+
+#define CONFIG_BOARD_MRFLD_VV
+
+struct dvc_trace_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+	u8	ctrl_id, data_id;
+	u8	class_id, subclass_id;
+
+	struct usb_ep *ep_in;
+
+	int online;
+	int online_data;
+	int online_ctrl;
+	int transfering;
+	int error;
+
+	atomic_t write_excl;
+	atomic_t open_excl;
+
+	wait_queue_head_t write_wq;
+
+	struct list_head tx_idle;
+	struct list_head tx_xfer;
+};
+
+static struct usb_interface_assoc_descriptor trace_iad_desc = {
+	.bLength		= sizeof(trace_iad_desc),
+	.bDescriptorType	= USB_DT_INTERFACE_ASSOCIATION,
+	/* .bFirstInterface	= DYNAMIC, */
+	.bInterfaceCount	= 2, /* debug control + data */
+	.bFunctionClass		= USB_CLASS_DEBUG,
+	.bFunctionSubClass	= USB_SUBCLASS_DVC_TRACE,
+	/* .bFunctionProtocol	= 0, */
+	/* .iFunction		= DYNAMIC, */
+};
+
+static struct usb_interface_descriptor trace_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bNumEndpoints          = 0,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DEBUG_CONTROL,
+	/* .bInterfaceProtocol     = 0, */
+};
+
+#define DC_DVCTRACE_ATTRI_LENGTH	DC_DBG_ATTRI_SIZE(2, 32)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define DC_DVCTRACE_TOTAL_LENGTH (DC_DVCTRACE_ATTRI_LENGTH \
+	+ DC_OUTPUT_CONNECTION_SIZE \
+	+ DC_OUTPUT_CONNECTION_SIZE \
+	+ DC_DBG_UNIT_SIZE(STM_NB_IN_PINS, 2, 2, 24))
+
+DECLARE_DC_DEBUG_ATTR_DESCR(DVCT, 2, 32);
+
+static struct DC_DEBUG_ATTR_DESCR(DVCT) trace_debug_attri_desc = {
+	.bLength		= DC_DVCTRACE_ATTRI_LENGTH,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_DEBUG_ATTRIBUTES,
+	.bcdDC			= __constant_cpu_to_le16(0x0100),
+	.wTotalLength	= __constant_cpu_to_le16(DC_DVCTRACE_TOTAL_LENGTH),
+	.bmSupportedFeatures	= 0, /* Debug Event Supported, per SAS */
+	.bControlSize		= 2,
+	.bmControl		= {	/* per SAS */
+		[0]		= 0xFF,
+		[1]		= 0x3F,
+	},
+	.wAuxDataSize		= __constant_cpu_to_le16(0x20),
+	.dInputBufferSize	= __constant_cpu_to_le32(0x00), /* per SAS */
+	.dOutputBufferSize = __constant_cpu_to_le32(TRACE_BULK_BUFFER_SIZE),
+	.qBaseAddress		= 0, /* revision */
+	.hGlobalID		= { /* revision */
+		[0]		= 0,
+		[1]		= 0,
+	}
+};
+
+static struct dc_output_connection_descriptor trace_output_conn_usb_desc = {
+	.bLength		= DC_OUTPUT_CONNECTION_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_OUTPUT_CONNECTION,
+	.bConnectionID		= 0x01, /* USB */
+	.bConnectionType	= DC_CONNECTION_USB,
+	.bAssocConnection	= 0, /* No related input-connection */
+	.wSourceID		= __constant_cpu_to_le16(0x01),
+	/* .iConnection		= DYNAMIC, */
+};
+
+static struct dc_output_connection_descriptor trace_output_conn_pti_desc = {
+	.bLength		= DC_OUTPUT_CONNECTION_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_OUTPUT_CONNECTION,
+	.bConnectionID		= 0, /* PTI */
+	.bConnectionType	= DC_CONNECTION_DEBUG_DATA,
+	.bAssocConnection	= 0, /* No related input-connection */
+	.wSourceID		= __constant_cpu_to_le16(0x01),
+	/* .iConnection		= DYNAMIC, */
+};
+
+#define DC_DVCTRACE_UNIT_LENGTH	DC_DBG_UNIT_SIZE(STM_NB_IN_PINS, 2, 2, 24)
+
+DECLARE_DC_DEBUG_UNIT_DESCRIPTOR(STM_NB_IN_PINS, 2, 2, 24);
+
+static struct DC_DEBUG_UNIT_DESCRIPTOR(STM_NB_IN_PINS, 2, 2, 24)
+		trace_debug_unit_stm_desc = {
+	.bLength		= DC_DVCTRACE_UNIT_LENGTH,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_DEBUG_UNIT,
+	.bUnitID		= 0x01, /* per SAS */
+/* STM Trace Unit processor: revision */
+	.bDebugUnitType		= DC_UNIT_TYPE_TRACE_PROC,
+ /* STM: Trace compressor controller */
+	.bDebugSubUnitType	= DC_UNIT_SUBTYPE_TRACEZIP,
+	.bAliasUnitID		= 0, /* no associated debug unit */
+	.bNrInPins		= STM_NB_IN_PINS, /* p */
+/* wSourceID  contains STM_NB_IN_PINS elements */
+/*	.wSourceID		= {0}, */
+	.bNrOutPins		= 0x02,	/* q */
+	.dTraceFormat		= {
+		[0]	= __constant_cpu_to_le32(DC_TRACE_MIPI_FORMATED_STPV1),
+		[1]	= __constant_cpu_to_le32(DC_TRACE_MIPI_FORMATED_STPV1),
+	},
+	.dStreamID		= __constant_cpu_to_le32(0xFFFFFFFF),
+	.bControlSize		= 0x02,	/* n */
+	.bmControl		= {
+		[0]		= 0xFF,
+		[1]		= 0x3F,
+	},
+	.wAuxDataSize		= __constant_cpu_to_le16(24), /* m */
+	.qBaseAddress		= 0, /* revision */
+	.hIPID			= {
+		[0]		= 0,
+		[1]		= 0,
+	},
+	/* .iDebugUnitType		= DYNAMIC, */
+};
+
+static struct usb_interface_descriptor trace_data_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bAlternateSetting	= 0,
+	.bNumEndpoints          = 1,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DVC_TRACE,
+	/* .bInterfaceProtocol     = 0, */
+};
+
+static struct usb_endpoint_descriptor trace_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor trace_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor trace_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor trace_superspeed_in_comp_desc = {
+	.bLength		= USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst		= 0,
+	.bmAttributes		= 0,
+};
+
+static struct usb_descriptor_header *fs_trace_descs[] = {
+	(struct usb_descriptor_header *) &trace_iad_desc,
+	(struct usb_descriptor_header *) &trace_data_interface_desc,
+	(struct usb_descriptor_header *) &trace_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &trace_interface_desc,
+	(struct usb_descriptor_header *) &trace_debug_attri_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+	(struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_trace_descs[] = {
+	(struct usb_descriptor_header *) &trace_iad_desc,
+	(struct usb_descriptor_header *) &trace_data_interface_desc,
+	(struct usb_descriptor_header *) &trace_highspeed_in_desc,
+	(struct usb_descriptor_header *) &trace_interface_desc,
+	(struct usb_descriptor_header *) &trace_debug_attri_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+	(struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_trace_descs[] = {
+	(struct usb_descriptor_header *) &trace_iad_desc,
+	(struct usb_descriptor_header *) &trace_data_interface_desc,
+	(struct usb_descriptor_header *) &trace_superspeed_in_desc,
+	(struct usb_descriptor_header *) &trace_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &trace_interface_desc,
+	(struct usb_descriptor_header *) &trace_debug_attri_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+	(struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+	NULL,
+};
+
+/* string descriptors: */
+#define DVCTRACE_CTRL_IDX	0
+#define DVCTRACE_DATA_IDX	1
+#define DVCTRACE_IAD_IDX	2
+#define DVCTRACE_CONN_PTI_IDX	3
+#define DVCTRACE_CONN_USB_IDX	4
+#define DVCTRACE_UNIT_STM_IDX	5
+
+/* static strings, in UTF-8 */
+static struct usb_string trace_string_defs[] = {
+	[DVCTRACE_CTRL_IDX].s = "Debug Sub-Class DvC.Trace (Control)",
+	[DVCTRACE_DATA_IDX].s = "Debug Sub-Class DvC.Trace (Data)",
+	[DVCTRACE_IAD_IDX].s = "Debug Sub-Class DvC.Trace",
+	[DVCTRACE_CONN_PTI_IDX].s = "MIPI PTIv1 output Connector ",
+	[DVCTRACE_CONN_USB_IDX].s = "USB Device output connector",
+	[DVCTRACE_UNIT_STM_IDX].s = "MIPI STM Debug Unit",
+	{  /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings trace_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		trace_string_defs,
+};
+
+static struct usb_gadget_strings *trace_strings[] = {
+	&trace_string_table,
+	NULL,
+};
+
+/* temporary var used between dvc_trace_open() and dvc_trace_gadget_bind() */
+static struct dvc_trace_dev *_dvc_trace_dev;
+
+static inline struct dvc_trace_dev *func_to_dvc_trace(struct usb_function *f)
+{
+	return container_of(f, struct dvc_trace_dev, function);
+}
+
+static int dvc_trace_is_enabled(void)
+{
+	if (!stm_is_enabled()) {
+		pr_info("%s STM/PTI block is not enabled\n", __func__);
+		return 0;
+	}
+	return 1;
+}
+
+static struct usb_request *dvc_trace_request_new(struct usb_ep *ep,
+					 int buffer_size, dma_addr_t dma)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	req->dma = dma;
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void dvc_trace_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void dvc_trace_req_put(struct dvc_trace_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *dvc_trace_req_get(struct dvc_trace_dev *dev,
+			struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void dvc_trace_set_disconnected(struct dvc_trace_dev *dev)
+{
+	dev->transfering = 0;
+}
+
+static void dvc_trace_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+
+	if (req->status != 0)
+		dvc_trace_set_disconnected(dev);
+
+	dvc_trace_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static inline int dvc_trace_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void dvc_trace_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+static int trace_create_bulk_endpoints(struct dvc_trace_dev *dev,
+				       struct usb_endpoint_descriptor *in_desc,
+				       struct usb_ss_ep_comp_descriptor *in_comp_desc
+	)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	pr_debug("%s dev: %p\n", __func__, dev);
+
+	in_desc->bEndpointAddress |= 0x1;
+	ep = usb_ep_autoconfig_ss(cdev->gadget, in_desc, in_comp_desc);
+	if (!ep) {
+		pr_err("%s usb_ep_autoconfig for ep_in failed\n", __func__);
+		return -ENODEV;
+	}
+	pr_debug("%s usb_ep_autoconfig for ep_in got %s\n", __func__, ep->name);
+
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	for (i = 0; i < TRACE_TX_REQ_MAX; i++) {
+		req = dvc_trace_request_new(dev->ep_in, TRACE_BULK_BUFFER_SIZE,
+			(dma_addr_t)TRACE_BULK_IN_BUFFER_ADDR);
+		if (!req)
+			goto fail;
+		req->complete = dvc_trace_complete_in;
+		dvc_trace_req_put(dev, &dev->tx_idle, req);
+		pr_debug("%s req= %p : for %s predefined TRB\n", __func__,
+			req, ep->name);
+	}
+
+	return 0;
+
+fail:
+	pr_err("%s could not allocate requests\n", __func__);
+	return -1;
+}
+
+static ssize_t dvc_trace_start_transfer(size_t count)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret = -EINVAL;
+
+	pr_debug("%s\n", __func__);
+	if (!_dvc_trace_dev)
+		return -ENODEV;
+
+	if (dvc_trace_lock(&dev->write_excl))
+		return -EBUSY;
+
+	/* we will block until enumeration completes */
+	while (!(dev->online || dev->error)) {
+		pr_debug("%s: waiting for online state\n", __func__);
+		ret = wait_event_interruptible(dev->write_wq,
+				(dev->online || dev->error));
+
+		if (ret < 0) {
+			/* not at CONFIGURED state */
+			pr_info("%s !dev->online already\n", __func__);
+			dvc_trace_unlock(&dev->write_excl);
+			return ret;
+		}
+	}
+
+	/* queue a ep_in endless request */
+	while (r > 0) {
+		if (dev->error) {
+			pr_debug("%s dev->error\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		if (!dev->online) {
+			pr_debug("%s !dev->online\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+				dev->error || !dev->online ||
+				(req = dvc_trace_req_get(dev, &dev->tx_idle)));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			if (count > TRACE_BULK_BUFFER_SIZE)
+				xfer = TRACE_BULK_BUFFER_SIZE;
+			else
+				xfer = count;
+			pr_debug("%s queue tx_idle list req to dev->ep_in\n",
+				__func__);
+			req->no_interrupt = 1;
+			req->context = &dev->function;
+			req->length = xfer;
+			ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+			if (ret < 0) {
+				pr_err("%s: xfer error %d\n", __func__, ret);
+				dev->error = 1;
+				dev->transfering = 0;
+				r = -EIO;
+				break;
+			}
+			pr_debug("%s: xfer=%d/%d  queued req/%p\n", __func__,
+				xfer, r, req);
+			dvc_trace_req_put(dev, &dev->tx_xfer, req);
+			r -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+	if (req) {
+		pr_debug("%s req re-added to tx_idle on error\n", __func__);
+		dvc_trace_req_put(dev, &dev->tx_idle, req);
+	} else {
+		dev->transfering = 1;
+	}
+	dvc_trace_unlock(&dev->write_excl);
+	pr_debug("%s end\n", __func__);
+	return ret;
+}
+
+static int dvc_trace_disable_transfer(void)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+	struct usb_request *req = 0;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	if (!_dvc_trace_dev)
+		return -ENODEV;
+
+	if (dvc_trace_lock(&dev->write_excl))
+		return -EBUSY;
+
+	if (dev->error) {
+		pr_debug("%s dev->error\n", __func__);
+		dvc_trace_unlock(&dev->write_excl);
+		return -EIO;
+	}
+
+	if ((!dev->online) || (!dev->transfering)) {
+		pr_debug("%s !dev->online OR !dev->transfering\n", __func__);
+		dvc_trace_unlock(&dev->write_excl);
+		return -EIO;
+	}
+
+	/* get an xfer tx request to use */
+	while ((req = dvc_trace_req_get(dev, &dev->tx_xfer))) {
+		ret = usb_ep_dequeue(dev->ep_in, req);
+		if (ret < 0) {
+			pr_err("%s: dequeue error %d\n", __func__, ret);
+			dev->error = 1;
+			dvc_trace_unlock(&dev->write_excl);
+			return -EIO;
+		}
+		pr_debug("%s: dequeued req/%p\n", __func__, req);
+	}
+
+	dvc_trace_unlock(&dev->write_excl);
+	return 1;
+}
+
+static int dvc_trace_open(struct inode *ip, struct file *fp)
+{
+	pr_debug("%s\n", __func__);
+	if (!_dvc_trace_dev)
+		return -ENODEV;
+
+	if (dvc_trace_lock(&_dvc_trace_dev->open_excl))
+		return -EBUSY;
+
+	fp->private_data = _dvc_trace_dev;
+
+	/* clear the error latch */
+	_dvc_trace_dev->error = 0;
+	_dvc_trace_dev->transfering = 0;
+
+	return 0;
+}
+
+static int dvc_trace_release(struct inode *ip, struct file *fp)
+{
+	pr_debug("%s\n", __func__);
+
+	dvc_trace_unlock(&_dvc_trace_dev->open_excl);
+	return 0;
+}
+
+/* file operations for DvC.Trace device /dev/usb_dvc_trace */
+static const struct file_operations dvc_trace_fops = {
+	.owner = THIS_MODULE,
+	.open = dvc_trace_open,
+	.release = dvc_trace_release,
+};
+
+static struct miscdevice dvc_trace_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_dvc_trace",
+	.fops = &dvc_trace_fops,
+};
+
+static int dvc_trace_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+	int	value = -EOPNOTSUPP;
+	int	ret;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+
+	pr_debug("%s %02x.%02x v%04x i%04x l%u\n", __func__,
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* DC_REQUEST_SET_RESET ... stop active transfer */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| DC_REQUEST_SET_RESET:
+		if (w_index != dev->data_id)
+			goto invalid;
+
+		pr_info("%s DC_REQUEST_SET_RESET v%04x i%04x l%u\n", __func__,
+			 w_value, w_index, w_length);
+
+		dvc_trace_disable_transfer();
+		value = 0;
+		break;
+
+	/* DC_REQUEST_SET_TRACE ... start trace transfer */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| DC_REQUEST_SET_TRACE:
+
+		pr_info("%s DC_REQUEST_SET_TRACE v%04x i%04x l%u\n", __func__,
+			 w_value, w_index, w_length);
+
+		if (!w_index)
+			ret = dvc_trace_disable_transfer();
+		else
+			ret = dvc_trace_start_transfer(4096);
+
+		if (ret < 0)
+			value = -EINVAL;
+		else
+			value = (int) w_index;
+		break;
+
+	default:
+invalid:
+		pr_debug("unknown class-specific control req "
+			 "%02x.%02x v%04x i%04x l%u\n",
+			 ctrl->bRequestType, ctrl->bRequest,
+			 w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("%s setup response queue error\n", __func__);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int
+dvc_trace_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev	*cdev = c->cdev;
+	struct dvc_trace_dev		*dev = func_to_dvc_trace(f);
+	int			id;
+	int			ret;
+	int status;
+
+	dev->cdev = cdev;
+	pr_debug("%s dev: %p\n", __func__, dev);
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (trace_string_defs[DVCTRACE_CTRL_IDX].id == 0) {
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_DATA_IDX].id = status;
+		trace_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_CTRL_IDX].id = status;
+		trace_interface_desc.iInterface = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_IAD_IDX].id = status;
+		trace_iad_desc.iFunction = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_CONN_PTI_IDX].id = status;
+		trace_output_conn_pti_desc.iConnection = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_CONN_USB_IDX].id = status;
+		trace_output_conn_usb_desc.iConnection = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_UNIT_STM_IDX].id = status;
+		trace_debug_unit_stm_desc.iDebugUnitType = status;
+	}
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->data_id = id;
+	trace_data_interface_desc.bInterfaceNumber = id;
+	trace_iad_desc.bFirstInterface = id;
+
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ctrl_id = id;
+	trace_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = trace_create_bulk_endpoints(dev, &trace_fullspeed_in_desc,
+					  &trace_superspeed_in_comp_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		trace_highspeed_in_desc.bEndpointAddress =
+			trace_fullspeed_in_desc.bEndpointAddress;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		trace_superspeed_in_desc.bEndpointAddress =
+			trace_fullspeed_in_desc.bEndpointAddress;
+	}
+
+	pr_debug("%s speed %s: IN/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name);
+	return 0;
+}
+
+static void
+dvc_trace_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct dvc_trace_dev	*dev = func_to_dvc_trace(f);
+	struct usb_request *req;
+
+	dev->online = 0;
+	dev->online_data = 0;
+	dev->online_ctrl = 0;
+	dev->error = 0;
+	trace_string_defs[DVCTRACE_CTRL_IDX].id = 0;
+
+	wake_up(&dev->write_wq);
+
+	while ((req = dvc_trace_req_get(dev, &dev->tx_idle)))
+		dvc_trace_request_free(req, dev->ep_in);
+}
+
+static int dvc_trace_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct dvc_trace_dev	*dev = func_to_dvc_trace(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("%s intf: %d alt: %d\n", __func__, intf, alt);
+
+	if (intf == trace_interface_desc.bInterfaceNumber)
+		dev->online_ctrl = 1;
+
+	if (intf == trace_data_interface_desc.bInterfaceNumber) {
+		ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_by_speed in err %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+
+		ret = usb_ep_enable(dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable in err %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+		dev->online_data = 1;
+	}
+
+	if (dev->online_data && dev->online_ctrl) {
+		dev->online = 1;
+		dev->transfering = 0;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->write_wq);
+	return 0;
+}
+
+static void dvc_trace_function_disable(struct usb_function *f)
+{
+	struct dvc_trace_dev	*dev = func_to_dvc_trace(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	pr_debug("%s dev %p\n", __func__, cdev);
+
+	if (dev->transfering)
+		dvc_trace_disable_transfer();
+
+	dev->online = 0;
+	dev->online_data = 0;
+	dev->online_ctrl = 0;
+	dev->error = 0;
+	usb_ep_disable(dev->ep_in);
+
+	/* writer may be blocked waiting for us to go online */
+	wake_up(&dev->write_wq);
+
+	pr_debug("%s : %s disabled\n", __func__, dev->function.name);
+}
+
+static int dvc_trace_bind_config(struct usb_configuration *c)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+
+	pr_debug("%s\n", __func__);
+
+	dev->cdev = c->cdev;
+	dev->function.name = "dvctrace";
+	dev->function.strings = trace_strings;
+	dev->function.fs_descriptors = fs_trace_descs;
+	dev->function.hs_descriptors = hs_trace_descs;
+	dev->function.ss_descriptors = ss_trace_descs;
+	dev->function.bind = dvc_trace_function_bind;
+	dev->function.unbind = dvc_trace_function_unbind;
+	dev->function.set_alt = dvc_trace_function_set_alt;
+	dev->function.disable = dvc_trace_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int dvc_trace_setup(void)
+{
+	struct dvc_trace_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->tx_xfer);
+
+	init_waitqueue_head(&dev->write_wq);
+
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->write_excl, 0);
+
+	_dvc_trace_dev = dev;
+
+	ret = misc_register(&dvc_trace_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("DvC.Trace gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void dvc_trace_cleanup(void)
+{
+	misc_deregister(&dvc_trace_device);
+
+	kfree(_dvc_trace_dev);
+	_dvc_trace_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 3d132cb..21cf910 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -755,76 +755,81 @@
 	struct ffs_epfile *epfile = file->private_data;
 	struct ffs_ep *ep;
 	char *data = NULL;
-	ssize_t ret;
+	ssize_t ret, data_len = 0;
 	int halt;
 
-	goto first_try;
-	do {
-		spin_unlock_irq(&epfile->ffs->eps_lock);
-		mutex_unlock(&epfile->mutex);
+	/* Are we still active? */
+	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+		ret = -ENODEV;
+		goto error;
+	}
 
-first_try:
-		/* Are we still active? */
-		if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
-			ret = -ENODEV;
+	/* Wait for endpoint to be enabled */
+	ep = epfile->ep;
+	if (!ep) {
+		if (file->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
 			goto error;
 		}
 
-		/* Wait for endpoint to be enabled */
-		ep = epfile->ep;
-		if (!ep) {
-			if (file->f_flags & O_NONBLOCK) {
-				ret = -EAGAIN;
-				goto error;
-			}
-
-			if (wait_event_interruptible(epfile->wait,
-						     (ep = epfile->ep))) {
-				ret = -EINTR;
-				goto error;
-			}
-		}
-
-		/* Do we halt? */
-		halt = !read == !epfile->in;
-		if (halt && epfile->isoc) {
-			ret = -EINVAL;
+		ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
+		if (ret) {
+			ret = -EINTR;
 			goto error;
 		}
+	}
 
-		/* Allocate & copy */
-		if (!halt && !data) {
-			data = kzalloc(len, GFP_KERNEL);
-			if (unlikely(!data))
-				return -ENOMEM;
+	/* Do we halt? */
+	halt = !read == !epfile->in;
+	if (halt && epfile->isoc) {
+		ret = -EINVAL;
+		goto error;
+	}
 
-			if (!read &&
-			    unlikely(__copy_from_user(data, buf, len))) {
-				ret = -EFAULT;
-				goto error;
-			}
-		}
-
-		/* We will be using request */
-		ret = ffs_mutex_lock(&epfile->mutex,
-				     file->f_flags & O_NONBLOCK);
-		if (unlikely(ret))
-			goto error;
-
+	/* Allocate & copy */
+	if (!halt) {
 		/*
-		 * We're called from user space, we can use _irq rather then
-		 * _irqsave
+		 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
+		 * before the waiting completes, so do not assign to 'gadget' earlier
 		 */
+		struct usb_gadget *gadget = epfile->ffs->gadget;
+
 		spin_lock_irq(&epfile->ffs->eps_lock);
-
+		/* In the meantime, endpoint got disabled or changed. */
+		if (epfile->ep != ep) {
+			spin_unlock_irq(&epfile->ffs->eps_lock);
+			return -ESHUTDOWN;
+		}
 		/*
-		 * While we were acquiring mutex endpoint got disabled
-		 * or changed?
+		 * Controller may require buffer size to be aligned to
+		 * maxpacketsize of an out endpoint.
 		 */
-	} while (unlikely(epfile->ep != ep));
+		data_len = read ? usb_ep_align_maybe(gadget, ep->ep, len) : len;
+		spin_unlock_irq(&epfile->ffs->eps_lock);
 
-	/* Halt */
-	if (unlikely(halt)) {
+		data = kmalloc(data_len, GFP_KERNEL);
+		if (unlikely(!data))
+			return -ENOMEM;
+
+		if (!read && unlikely(copy_from_user(data, buf, len))) {
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+
+	/* We will be using request */
+	ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
+	if (unlikely(ret))
+		goto error;
+
+	spin_lock_irq(&epfile->ffs->eps_lock);
+
+	if (epfile->ep != ep) {
+		/* In the meantime, endpoint got disabled or changed. */
+		ret = -ESHUTDOWN;
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+	} else if (halt) {
+		/* Halt */
 		if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
 			usb_ep_set_halt(ep->ep);
 		spin_unlock_irq(&epfile->ffs->eps_lock);
@@ -837,7 +842,7 @@
 		req->context  = &done;
 		req->complete = ffs_epfile_io_complete;
 		req->buf      = data;
-		req->length   = len;
+		req->length   = data_len;
 
 		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
 
@@ -849,10 +854,20 @@
 			ret = -EINTR;
 			usb_ep_dequeue(ep->ep, req);
 		} else {
+			/*
+			 * XXX We may end up silently droping data here.
+			 * Since data_len (i.e. req->length) may be bigger
+			 * than len (after being rounded up to maxpacketsize),
+			 * we may end up with more data then user space has
+			 * space for.
+			 */
 			ret = ep->status;
-			if (read && ret > 0 &&
-			    unlikely(copy_to_user(buf, data, ret)))
-				ret = -EFAULT;
+			if (read && ret > 0) {
+				ret = min_t(size_t, ret, len);
+
+				if (unlikely(copy_to_user(buf, data, ret)))
+					ret = -EFAULT;
+			}
 		}
 	}
 
@@ -1034,37 +1049,19 @@
 	struct ffs_file_perms perms;
 	umode_t root_mode;
 	const char *dev_name;
-	union {
-		/* set by ffs_fs_mount(), read by ffs_sb_fill() */
-		void *private_data;
-		/* set by ffs_sb_fill(), read by ffs_fs_mount */
-		struct ffs_data *ffs_data;
-	};
+	struct ffs_data *ffs_data;
 };
 
 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
 {
 	struct ffs_sb_fill_data *data = _data;
 	struct inode	*inode;
-	struct ffs_data	*ffs;
+	struct ffs_data	*ffs = data->ffs_data;
 
 	ENTER();
 
-	/* Initialise data */
-	ffs = ffs_data_new();
-	if (unlikely(!ffs))
-		goto Enomem;
-
 	ffs->sb              = sb;
-	ffs->dev_name        = kstrdup(data->dev_name, GFP_KERNEL);
-	if (unlikely(!ffs->dev_name))
-		goto Enomem;
-	ffs->file_perms      = data->perms;
-	ffs->private_data    = data->private_data;
-
-	/* used by the caller of this function */
-	data->ffs_data       = ffs;
-
+	data->ffs_data       = NULL;
 	sb->s_fs_info        = ffs;
 	sb->s_blocksize      = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1077,14 @@
 				  &data->perms);
 	sb->s_root = d_make_root(inode);
 	if (unlikely(!sb->s_root))
-		goto Enomem;
+		return -ENOMEM;
 
 	/* EP0 file */
 	if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
 					 &ffs_ep0_operations, NULL)))
-		goto Enomem;
+		return -ENOMEM;
 
 	return 0;
-
-Enomem:
-	return -ENOMEM;
 }
 
 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1187,7 @@
 	struct dentry *rv;
 	int ret;
 	void *ffs_dev;
+	struct ffs_data	*ffs;
 
 	ENTER();
 
@@ -1200,18 +1195,30 @@
 	if (unlikely(ret < 0))
 		return ERR_PTR(ret);
 
+	ffs = ffs_data_new();
+	if (unlikely(!ffs))
+		return ERR_PTR(-ENOMEM);
+	ffs->file_perms = data.perms;
+
+	ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
+	if (unlikely(!ffs->dev_name)) {
+		ffs_data_put(ffs);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	ffs_dev = functionfs_acquire_dev_callback(dev_name);
-	if (IS_ERR(ffs_dev))
-		return ffs_dev;
+	if (IS_ERR(ffs_dev)) {
+		ffs_data_put(ffs);
+		return ERR_CAST(ffs_dev);
+	}
+	ffs->private_data = ffs_dev;
+	data.ffs_data = ffs;
 
-	data.dev_name = dev_name;
-	data.private_data = ffs_dev;
 	rv = mount_nodev(t, flags, &data, ffs_sb_fill);
-
-	/* data.ffs_data is set by ffs_sb_fill */
-	if (IS_ERR(rv))
+	if (IS_ERR(rv) && data.ffs_data) {
 		functionfs_release_dev_callback(data.ffs_data);
-
+		ffs_data_put(data.ffs_data);
+	}
 	return rv;
 }
 
@@ -1547,8 +1554,10 @@
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	do {
 		/* pending requests get nuked */
-		if (likely(ep->ep))
+		if (ep->ep && epfile->ep) {
 			usb_ep_disable(ep->ep);
+			ep->ep->driver_data = NULL;
+		}
 		epfile->ep = NULL;
 
 		++ep;
@@ -2100,7 +2109,7 @@
 	if (isHS)
 		func->function.hs_descriptors[(long)valuep] = desc;
 	else
-		func->function.fs_descriptors[(long)valuep]    = desc;
+		func->function.fs_descriptors[(long)valuep] = desc;
 
 	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
 		return 0;
@@ -2237,7 +2246,7 @@
 
 	/* Zero */
 	memset(data->eps, 0, sizeof data->eps);
-	memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
+	memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof(data->raw_descs));
 	memset(data->inums, 0xff, sizeof data->inums);
 	for (ret = ffs->eps_count; ret; --ret)
 		data->eps[ret].num = -1;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 97666e8..c35a9ec 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -413,6 +413,7 @@
 /* Caller must hold fsg->lock */
 static void wakeup_thread(struct fsg_common *common)
 {
+	smp_wmb();	/* ensure the write of bh->state is complete */
 	/* Tell the main thread that something has happened */
 	common->thread_wakeup_needed = 1;
 	if (common->thread_task)
@@ -632,6 +633,7 @@
 	}
 	__set_current_state(TASK_RUNNING);
 	common->thread_wakeup_needed = 0;
+	smp_rmb();	/* ensure the latest bh->state is visible */
 	return rc;
 }
 
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 620aeaa..ee93553 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -36,7 +36,9 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/f_mtp.h>
 
-#define MTP_BULK_BUFFER_SIZE       16384
+#define MTP_BULK_TX_BUFFER_SIZE       (16384*4)
+#define MTP_BULK_RX_BUFFER_SIZE       (65536*4)
+#define MTP_UDC_LIMITED_SIZE   16384
 #define INTR_BUFFER_SIZE           28
 
 /* String IDs */
@@ -69,6 +71,11 @@
 
 static const char mtp_shortname[] = "mtp_usb";
 
+static unsigned char tx_buffer[TX_REQ_MAX][MTP_BULK_TX_BUFFER_SIZE]
+__cacheline_aligned;
+static unsigned char rx_buffer[RX_REQ_MAX][MTP_BULK_RX_BUFFER_SIZE]
+__cacheline_aligned;
+
 struct mtp_dev {
 	struct usb_function function;
 	struct usb_composite_dev *cdev;
@@ -92,6 +99,8 @@
 	wait_queue_head_t write_wq;
 	wait_queue_head_t intr_wq;
 	struct usb_request *rx_req[RX_REQ_MAX];
+	void *tx_mem[TX_REQ_MAX];
+	void *rx_mem[RX_REQ_MAX];
 	int rx_done;
 
 	/* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
@@ -200,6 +209,70 @@
 	NULL,
 };
 
+/* super speed support: */
+static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
+	.bLength		= sizeof(mtp_superspeed_in_comp_desc),
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+
+	/* .bMaxBurst		= 0, */
+	/* .bmAttributes	= 0, */
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
+	.bLength		= sizeof(mtp_superspeed_out_comp_desc),
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+
+	/* .bMaxBurst		= 0, */
+	/* .bmAttributes	= 0, */
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_intr_comp_desc = {
+	.bLength		= sizeof(mtp_intr_comp_desc),
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+
+	/* .bMaxBurst		= 0, */
+	/* .bmAttributes	= 0, */
+	.wBytesPerInterval	= __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+};
+
+static struct usb_descriptor_header *ss_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_intr_comp_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_intr_comp_desc,
+	NULL,
+};
+
 static struct usb_string mtp_string_defs[] = {
 	/* Naming interface "MTP" so libmtp will recognize us */
 	[INTERFACE_STRING_INDEX].s	= "MTP",
@@ -432,16 +505,19 @@
 
 	/* now allocate requests for our endpoints */
 	for (i = 0; i < TX_REQ_MAX; i++) {
-		req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+		req = usb_ep_alloc_request(dev->ep_in, GFP_KERNEL);
 		if (!req)
 			goto fail;
+		req->buf = dev->tx_mem[i];
 		req->complete = mtp_complete_in;
 		mtp_req_put(dev, &dev->tx_idle, req);
 	}
 	for (i = 0; i < RX_REQ_MAX; i++) {
-		req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+		req = usb_ep_alloc_request(dev->ep_out, GFP_KERNEL);
 		if (!req)
 			goto fail;
+		/* link rx_mem buffer to the usb_request */
+		req->buf = dev->rx_mem[i];
 		req->complete = mtp_complete_out;
 		dev->rx_req[i] = req;
 	}
@@ -472,17 +548,14 @@
 
 	DBG(cdev, "mtp_read(%zu)\n", count);
 
-	if (count > MTP_BULK_BUFFER_SIZE)
+	if (count > MTP_BULK_RX_BUFFER_SIZE)
 		return -EINVAL;
 
-	/* we will block until we're online */
-	DBG(cdev, "mtp_read: waiting for online state\n");
-	ret = wait_event_interruptible(dev->read_wq,
-		dev->state != STATE_OFFLINE);
-	if (ret < 0) {
-		r = ret;
-		goto done;
+	if (dev->state == STATE_OFFLINE) {
+		DBG(cdev, "mtp_read: state offline, return\n");
+		return -EIO;
 	}
+
 	spin_lock_irq(&dev->lock);
 	if (dev->state == STATE_CANCELED) {
 		/* report cancelation to userspace */
@@ -592,8 +665,8 @@
 			break;
 		}
 
-		if (count > MTP_BULK_BUFFER_SIZE)
-			xfer = MTP_BULK_BUFFER_SIZE;
+		if (count > MTP_BULK_TX_BUFFER_SIZE)
+			xfer = MTP_BULK_TX_BUFFER_SIZE;
 		else
 			xfer = count;
 		if (xfer && copy_from_user(req->buf, buf, xfer)) {
@@ -680,20 +753,29 @@
 			r = -ECANCELED;
 			break;
 		}
+
+		if (dev->state != STATE_BUSY) {
+			r = -EIO;
+			break;
+		}
+
 		if (!req) {
 			r = ret;
 			break;
 		}
 
-		if (count > MTP_BULK_BUFFER_SIZE)
-			xfer = MTP_BULK_BUFFER_SIZE;
+		if (count > MTP_BULK_TX_BUFFER_SIZE)
+			xfer = MTP_BULK_TX_BUFFER_SIZE;
 		else
 			xfer = count;
 
 		if (hdr_size) {
 			/* prepend MTP data header */
 			header = (struct mtp_data_header *)req->buf;
-			header->length = __cpu_to_le32(count);
+			if (count > 0xffffffffLL)
+				header->length = 0xffffffff;
+			else
+				header->length = __cpu_to_le32(count);
 			header->type = __cpu_to_le16(2); /* data packet */
 			header->command = __cpu_to_le16(dev->xfer_command);
 			header->transaction_id =
@@ -742,7 +824,7 @@
 	struct usb_request *read_req = NULL, *write_req = NULL;
 	struct file *filp;
 	loff_t offset;
-	int64_t count;
+	int64_t count, bytes_received = 0;
 	int ret, cur_buf = 0;
 	int r = 0;
 
@@ -760,8 +842,10 @@
 			read_req = dev->rx_req[cur_buf];
 			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
 
-			read_req->length = (count > MTP_BULK_BUFFER_SIZE
-					? MTP_BULK_BUFFER_SIZE : count);
+			read_req->length = count > MTP_BULK_RX_BUFFER_SIZE ?
+				(bytes_received < 0xFFFFFFFFLL ?
+				MTP_BULK_RX_BUFFER_SIZE : MTP_UDC_LIMITED_SIZE)
+				: count;
 			dev->rx_done = 0;
 			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
 			if (ret < 0) {
@@ -794,6 +878,13 @@
 					usb_ep_dequeue(dev->ep_out, read_req);
 				break;
 			}
+
+			if (dev->state != STATE_BUSY) {
+				r = -EIO;
+				break;
+			}
+
+			bytes_received += read_req->actual;
 			/* if xfer_file_length is 0xFFFFFFFF, then we read until
 			 * we get a zero length packet
 			 */
@@ -953,6 +1044,64 @@
 	return ret;
 }
 
+#ifdef CONFIG_COMPAT
+static long mtp_compat_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+
+	code = (code == MTP_SEND_FILE_32) ? MTP_SEND_FILE :
+		(code == MTP_RECEIVE_FILE_32) ? MTP_RECEIVE_FILE :
+		(code == MTP_SEND_EVENT_32) ? MTP_SEND_EVENT : MTP_SEND_FILE_WITH_HEADER;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	case MTP_SEND_FILE_WITH_HEADER:
+	{
+		struct mtp_file_range __user *mfr64;
+		struct mtp_file_range_32 __user *mfr32;
+		compat_int_t	fd;
+		compat_s64	temp;
+		u16		command;
+		u32		id;
+
+		mfr32 = (struct mtp_file_range_32 __user *)value;
+		mfr64 = compat_alloc_user_space(sizeof(*mfr64));
+		if (get_user(fd, &mfr32->fd) || put_user(fd, &mfr64->fd) ||
+			get_user(temp, &mfr32->offset) || put_user(temp, &mfr64->offset) ||
+			get_user(temp, &mfr32->length) || put_user(temp, &mfr64->length) ||
+			get_user(command, &mfr32->command) || put_user(command, &mfr64->command) ||
+			get_user(id, &mfr32->transaction_id) || put_user(id, &mfr64->transaction_id))
+				return -EFAULT;
+		/* copy mfr64 to value */
+		value = (uintptr_t) mfr64;
+
+		break;
+	}
+	case MTP_SEND_EVENT:
+	{
+		struct mtp_event	__user *event64;
+		struct mtp_event_32	__user *event32;
+		__u32			udata;
+		u32			length;
+
+		event32 = (struct mtp_event_32 __user *)value;
+		event64 = compat_alloc_user_space(sizeof(*event64));
+
+		if (get_user(length, &event32->length) ||
+			put_user(length, &event64->length) ||
+			get_user(udata, &event32->compat_data) ||
+			put_user(compat_ptr(udata), &event64->data))
+			return -EFAULT;
+		/* copy event pointer to value */
+		value = (uintptr_t) event64;
+		break;
+	}
+	}
+
+	return mtp_ioctl(fp, code, (unsigned long) compat_ptr(value));
+}
+#endif
+
 static int mtp_open(struct inode *ip, struct file *fp)
 {
 	printk(KERN_INFO "mtp_open\n");
@@ -981,6 +1130,9 @@
 	.read = mtp_read,
 	.write = mtp_write,
 	.unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = mtp_compat_ioctl,
+#endif
 	.open = mtp_open,
 	.release = mtp_release,
 };
@@ -1082,6 +1234,76 @@
 	return value;
 }
 
+static int ptp_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long	flags;
+
+	VDBG(cdev, "ptp_ctrlrequest "\
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+				&& w_value == 0) {
+			DBG(cdev, "MTP_REQ_CANCEL\n");
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->state == STATE_BUSY) {
+				dev->state = STATE_CANCELED;
+				wake_up(&dev->read_wq);
+				wake_up(&dev->write_wq);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* We need to queue a request to read the remaining
+			 *  bytes, but we don't actually need to look at
+			 * the contents.
+			 */
+			value = w_length;
+		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+				&& w_index == 0 && w_value == 0) {
+			struct mtp_device_status *status = cdev->req->buf;
+			status->wLength =
+				__constant_cpu_to_le16(sizeof(*status));
+
+			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			/* device status is "busy" until we report
+			 * the cancelation to userspace
+			 */
+			if (dev->state == STATE_CANCELED)
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+			else
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_OK);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			value = sizeof(*status);
+		}
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s: response queue error\n", __func__);
+	}
+	return value;
+}
+
 static int
 mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
 {
@@ -1113,7 +1335,24 @@
 			mtp_fullspeed_out_desc.bEndpointAddress;
 	}
 
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		unsigned        max_in_burst;
+		unsigned        max_out_burst;
+
+		max_in_burst = min_t(unsigned,
+				     MTP_BULK_RX_BUFFER_SIZE / 1024, 15);
+		max_out_burst = min_t(unsigned,
+				      MTP_BULK_TX_BUFFER_SIZE / 1024, 15);
+		mtp_superspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_superspeed_in_comp_desc.bMaxBurst = max_in_burst;
+		mtp_superspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+		mtp_superspeed_out_comp_desc.bMaxBurst = max_out_burst;
+	}
+
 	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
 			f->name, dev->ep_in->name, dev->ep_out->name);
 	return 0;
@@ -1126,10 +1365,19 @@
 	struct usb_request *req;
 	int i;
 
+	/* cleanup the work items */
+	flush_workqueue(dev->wq);
+
 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
-		mtp_request_free(req, dev->ep_in);
-	for (i = 0; i < RX_REQ_MAX; i++)
-		mtp_request_free(dev->rx_req[i], dev->ep_out);
+		usb_ep_free_request(dev->ep_in, req);
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = dev->rx_req[i];
+		if (req) {
+			/* Set to NULL to avoid UDC touch the rx_mem */
+			req->buf = NULL;
+			usb_ep_free_request(dev->ep_out, req);
+		}
+	}
 	while ((req = mtp_req_get(dev, &dev->intr_idle)))
 		mtp_request_free(req, dev->ep_intr);
 	dev->state = STATE_OFFLINE;
@@ -1218,9 +1466,11 @@
 	if (ptp_config) {
 		dev->function.fs_descriptors = fs_ptp_descs;
 		dev->function.hs_descriptors = hs_ptp_descs;
+		dev->function.ss_descriptors = ss_ptp_descs;
 	} else {
 		dev->function.fs_descriptors = fs_mtp_descs;
 		dev->function.hs_descriptors = hs_mtp_descs;
+		dev->function.ss_descriptors = ss_mtp_descs;
 	}
 	dev->function.bind = mtp_function_bind;
 	dev->function.unbind = mtp_function_unbind;
@@ -1234,6 +1484,7 @@
 {
 	struct mtp_dev *dev;
 	int ret;
+	int i;
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
@@ -1253,6 +1504,27 @@
 		ret = -ENOMEM;
 		goto err1;
 	}
+
+	memset(tx_buffer, 0, TX_REQ_MAX * MTP_BULK_TX_BUFFER_SIZE);
+	/* Request memory buffer for TX */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		dev->tx_mem[i] = tx_buffer[i];
+		if (!dev->tx_mem[i]) {
+			ret = -ENOMEM;
+			goto err2;
+		}
+	}
+
+	memset(rx_buffer, 0, RX_REQ_MAX * MTP_BULK_RX_BUFFER_SIZE);
+	/* Request memory buffer for RX */
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		dev->rx_mem[i] = rx_buffer[i];
+		if (!dev->rx_mem[i]) {
+			ret = -ENOMEM;
+			goto err2;
+		}
+	}
+
 	INIT_WORK(&dev->send_file_work, send_file_work);
 	INIT_WORK(&dev->receive_file_work, receive_file_work);
 
@@ -1260,12 +1532,13 @@
 
 	ret = misc_register(&mtp_device);
 	if (ret)
-		goto err2;
+		goto err3;
 
 	return 0;
 
-err2:
+err3:
 	destroy_workqueue(dev->wq);
+err2:
 err1:
 	_mtp_dev = NULL;
 	kfree(dev);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 5c274f1..5e13051 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -375,12 +375,81 @@
 
 /*-------------------------------------------------------------------------*/
 
+static struct sk_buff
+*rndis_pskb_copy(struct sk_buff *skb, unsigned int headroom, gfp_t gfp_mask)
+{
+	/*
+	*      Allocate the copy buffer
+	*/
+	struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+	n = alloc_skb(skb->end, gfp_mask);
+#else
+	n = alloc_skb(skb->end - skb->head, gfp_mask);
+#endif
+	if (!n)
+		goto out;
+
+	/* Set the data pointer */
+	skb_reserve(n, headroom);
+	/* Set the tail pointer and length */
+	skb_put(n, skb_headlen(skb));
+	/* Copy the bytes */
+	skb_copy_from_linear_data(skb, n->data, n->len);
+
+	n->truesize += skb->data_len;
+	n->data_len  = skb->data_len;
+	n->len       = skb->len;
+
+	if (skb_shinfo(skb)->nr_frags) {
+		int i;
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
+			get_page(skb_shinfo(n)->frags[i].page.p);
+		}
+		skb_shinfo(n)->nr_frags = i;
+	}
+
+	if (skb_has_frag_list(skb)) {
+		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
+		skb_clone_fraglist(n);
+	}
+
+	copy_skb_header(n, skb);
+out:
+	return n;
+}
+
+/*Alloc headroom for rndis header. This function ensure that the whole rndis
+* package align on 64B after rndis header is added to the package.
+*/
+static struct sk_buff
+*rndis_skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
+{
+	struct sk_buff *skb2;
+	int delta = headroom - skb_headroom(skb);
+
+
+	if (delta <= 0) {
+		skb2 = rndis_pskb_copy(skb, headroom, GFP_ATOMIC);
+	} else {
+
+		skb2 = skb_clone(skb, GFP_ATOMIC);
+		if (skb2 && pskb_expand_head(skb2, delta, 0, GFP_ATOMIC)) {
+			kfree_skb(skb2);
+			skb2 = NULL;
+		}
+	}
+	return skb2;
+}
 static struct sk_buff *rndis_add_header(struct gether *port,
 					struct sk_buff *skb)
 {
 	struct sk_buff *skb2;
 
-	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+	skb2 = rndis_skb_realloc_headroom(skb,
+			sizeof(struct rndis_packet_msg_type));
 	if (skb2)
 		rndis_add_hdr(skb2);
 
@@ -456,9 +525,15 @@
 static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct f_rndis			*rndis = req->context;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
 	int				status;
 	rndis_init_msg_type		*buf;
 
+	if (req->status) {
+		DBG(cdev, "RNDIS command completes abnormally status %d\n",
+			req->status);
+		return;
+	}
 	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //	spin_lock(&dev->lock);
 	status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcd04bc..4c77856 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -16,6 +16,7 @@
 #define __GADGET_CHIPS_H
 
 #include <linux/usb/gadget.h>
+#include <asm/intel-mid.h>
 
 /*
  * NOTICE: the entries below are alphabetical and should be kept
@@ -29,11 +30,15 @@
  */
 #define gadget_is_at91(g)		(!strcmp("at91_udc", (g)->name))
 #define gadget_is_goku(g)		(!strcmp("goku_udc", (g)->name))
+#define gadget_is_middwc3tng(g)		((!strcmp("dwc3-gadget", (g)->name)) && \
+					 (intel_mid_identify_cpu() ==	\
+					  INTEL_MID_CPU_CHIP_TANGIER || \
+					 intel_mid_identify_cpu() ==	\
+					  INTEL_MID_CPU_CHIP_ANNIEDALE))
 #define gadget_is_musbhdrc(g)		(!strcmp("musb-hdrc", (g)->name))
 #define gadget_is_net2280(g)		(!strcmp("net2280", (g)->name))
 #define gadget_is_pxa(g)		(!strcmp("pxa25x_udc", (g)->name))
 #define gadget_is_pxa27x(g)		(!strcmp("pxa27x_udc", (g)->name))
-
 /**
  * gadget_supports_altsettings - return true if altsettings work
  * @gadget: the gadget in question
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index cb2767d..a1d9865 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1050,7 +1050,7 @@
 		}
 
 		if (skb->len < sizeof *hdr) {
-			pr_err("invalid rndis pkt: skblen:%u hdr_len:%u",
+			pr_err("invalid rndis pkt: skblen:%u hdr_len:%lu",
 					skb->len, sizeof *hdr);
 			dev_kfree_skb_any(skb);
 			return -EINVAL;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 14f587e..66a1507 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -98,6 +98,12 @@
 module_param(qmult, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
 
+/* Add padding space before the NET_IP_ALIGN to ensure the address of data
+ * buffer align on 64B
+ */
+#define DMA_ALIGN_64    64
+#define DMA_IP_ALIGN_PAD   (DMA_ALIGN_64 - NET_IP_ALIGN)
+
 /* for dual-speed hardware, use deeper queues at high/super speed */
 static inline int qlen(struct usb_gadget *gadget)
 {
@@ -233,8 +239,6 @@
 	 */
 	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 	size += dev->port_usb->header_len;
-	size += out->maxpacket - 1;
-	size -= size % out->maxpacket;
 
 	if (dev->ul_max_pkts_per_xfer)
 		size *= dev->ul_max_pkts_per_xfer;
@@ -243,7 +247,7 @@
 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
 	DBG(dev, "%s: size: %d\n", __func__, size);
-	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+	skb = alloc_skb(size + NET_IP_ALIGN + DMA_IP_ALIGN_PAD, gfp_flags);
 	if (skb == NULL) {
 		DBG(dev, "no rx skb\n");
 		goto enomem;
@@ -253,7 +257,7 @@
 	 * but on at least one, checksumming fails otherwise.  Note:
 	 * RNDIS headers involve variable numbers of LE32 values.
 	 */
-	skb_reserve(skb, NET_IP_ALIGN);
+	skb_reserve(skb, NET_IP_ALIGN + DMA_IP_ALIGN_PAD);
 
 	req->buf = skb->data;
 	req->length = size;
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 8877771..5ab797f 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -101,11 +102,18 @@
 
 /* ------------------------------------------------------------------------- */
 
+static void usb_gadget_state_work(struct work_struct *work)
+{
+	struct usb_gadget	*gadget = work_to_gadget(work);
+
+	sysfs_notify(&gadget->dev.kobj, NULL, "state");
+}
+
 void usb_gadget_set_state(struct usb_gadget *gadget,
 		enum usb_device_state state)
 {
 	gadget->state = state;
-	sysfs_notify(&gadget->dev.kobj, NULL, "status");
+	schedule_work(&gadget->work);
 }
 EXPORT_SYMBOL_GPL(usb_gadget_set_state);
 
@@ -192,6 +200,7 @@
 		goto err1;
 
 	dev_set_name(&gadget->dev, "gadget");
+	INIT_WORK(&gadget->work, usb_gadget_state_work);
 	gadget->dev.parent = parent;
 
 	dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
@@ -309,6 +318,7 @@
 		usb_gadget_remove_driver(udc);
 
 	kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+	flush_work(&gadget->work);
 	device_unregister(&udc->dev);
 	device_unregister(&gadget->dev);
 }
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index 7ce27e3..de456a5 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -177,12 +177,16 @@
 
 	mutex_lock(&queue->mutex);
 	ret = vb2_qbuf(&queue->queue, buf);
+	if (ret < 0)
+		goto done;
+
 	spin_lock_irqsave(&queue->irqlock, flags);
 	ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
 	queue->flags &= ~UVC_QUEUE_PAUSED;
 	spin_unlock_irqrestore(&queue->irqlock, flags);
-	mutex_unlock(&queue->mutex);
 
+done:
+	mutex_unlock(&queue->mutex);
 	return ret;
 }
 
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 344d5e2..8128982 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -132,6 +132,29 @@
 		support both high speed and full speed devices, or high speed
 		devices only.
 
+config USB_EHCI_HCD_SPH
+	bool "EHCI SPH Support"
+	depends on USB_EHCI_HCD && USB_EHCI_PCI
+	default n
+	---help---
+	  Say 'Y' to turn on SPH support for the EHCI host controller driver.
+	  Now SPH is used for debugging and tracing between Application Processor
+	  and Baseband Processor, it can also work as a standard EHCI host that
+	  can connect different devices.
+
+	  If unsure, say N
+
+config USB_HCD_HSIC
+	bool "HSIC support"
+	depends on USB_EHCI_HCD
+	default n
+	---help---
+	  Say 'Y' to turn on HSIC support for the EHCI host controller driver.
+	  HSIC is used for AP and BP IPC. In AP side, it will work as a EHCI host.
+	  Some functionalities like AUX signal and LPM will be supported.
+
+	  If unsure, say N.
+
 config USB_EHCI_FSL
 	bool "Support for Freescale PPC on-chip EHCI USB controller"
 	depends on FSL_SOC
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3be3df2..3c0a49a 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@
 	}
 
 	/* Enable USB controller, 83xx or 8536 */
-	if (pdata->have_sysif_regs)
+	if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
 		setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
 
 	/* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@
 	case FSL_USB2_PHY_ULPI:
 		if (pdata->have_sysif_regs && pdata->controller_ver) {
 			/* controller version 1.6 or above */
+			clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
 			setbits32(non_ehci + FSL_SOC_USB_CTRL,
-					ULPI_PHY_CLK_SEL);
-			/*
-			 * Due to controller issue of PHY_CLK_VALID in ULPI
-			 * mode, we set USB_CTRL_USB_EN before checking
-			 * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
-			 */
-			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
-					UTMI_PHY_EN, USB_CTRL_USB_EN);
+				ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
 		}
 		portsc |= PORT_PTS_ULPI;
 		break;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 246e124..98d7a4f 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -49,6 +49,11 @@
 #include <asm/firmware.h>
 #endif
 
+#ifdef CONFIG_USB_HCD_HSIC
+#include <linux/usb/ehci-tangier-hsic-pci.h>
+#endif
+
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -93,8 +98,8 @@
 module_param (log2_irq_thresh, int, S_IRUGO);
 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
 
-/* initial park setting:  slower than hw default */
-static unsigned park = 0;
+/* initial park setting:  hw default */
+static unsigned park = 3;
 module_param (park, uint, S_IRUGO);
 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
 
@@ -236,10 +241,12 @@
  * Reset a non-running (STS_HALT == 1) controller.
  * Must be called with interrupts enabled and the lock not held.
  */
-static int ehci_reset (struct ehci_hcd *ehci)
+int ehci_reset(struct ehci_hcd *ehci)
 {
 	int	retval;
 	u32	command = ehci_readl(ehci, &ehci->regs->command);
+	int	port;
+	u32	temp;
 
 	/* If the EHCI debug controller is active, special care must be
 	 * taken before and after a host controller reset */
@@ -258,6 +265,15 @@
 		ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
 				&ehci->regs->usbmode_ex);
 		ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
+
+		/* FIXME: clear ASUS auto PHY low power mode, as we set it
+		 * manually */
+		port = HCS_N_PORTS(ehci->hcs_params);
+		while (port--) {
+			u32 __iomem	*hostpc_reg = &ehci->regs->hostpc[port];
+			temp = ehci_readl(ehci, hostpc_reg);
+			ehci_writel(ehci, temp & ~HOSTPC_ASUS, hostpc_reg);
+		}
 	}
 	if (retval)
 		return retval;
@@ -272,6 +288,7 @@
 			ehci->resuming_ports = 0;
 	return retval;
 }
+EXPORT_SYMBOL_GPL(ehci_reset);
 
 /*
  * Idle the controller (turn off the schedules).
@@ -408,7 +425,7 @@
 /*
  * Called when the ehci_hcd module is removed.
  */
-static void ehci_stop (struct usb_hcd *hcd)
+void ehci_stop(struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 
@@ -448,6 +465,7 @@
 	dbg_status (ehci, "ehci_stop completed",
 		    ehci_readl(ehci, &ehci->regs->status));
 }
+EXPORT_SYMBOL_GPL(ehci_stop);
 
 /* one-time init, only for memory state */
 static int ehci_init(struct usb_hcd *hcd)
@@ -686,6 +704,9 @@
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 	u32			status, masked_status, pcd_status = 0, cmd;
 	int			bh;
+#ifdef CONFIG_USB_HCD_HSIC
+	struct pci_dev	*pdev = to_pci_dev(hcd->self.controller);
+#endif
 
 	spin_lock (&ehci->lock);
 
@@ -798,6 +819,11 @@
 			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
 			usb_hcd_start_port_resume(&hcd->self, i);
 			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
+
+#ifdef CONFIG_USB_HCD_HSIC
+			if (pdev->device == 0x119d || pdev->device == 0x0f35)
+				count_ipc_stats(0, REMOTE_WAKEUP);
+#endif
 		}
 	}
 
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 9ab4a4d..d36712e 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -200,8 +200,11 @@
 		ehci_writel(ehci, t2, reg);
 	}
 
-	/* enter phy low-power mode again */
-	if (ehci->has_hostpc) {
+	/* enter phy low-power mode again if it's suspending*/
+	/* during remote-wakeup if the phy enters low power mode, the port
+	 * will get disconnected.
+	 */
+	if (ehci->has_hostpc && suspending) {
 		port = HCS_N_PORTS(ehci->hcs_params);
 		while (port--) {
 			u32 __iomem	*hostpc_reg = &ehci->regs->hostpc[port];
@@ -224,7 +227,10 @@
 	int			port;
 	int			mask;
 	int			changed;
-
+	int			rc = 0;
+#ifdef CONFIG_USB_HCD_HSIC
+	struct pci_dev   *pdev;
+#endif
 	ehci_dbg(ehci, "suspend root hub\n");
 
 	if (time_before (jiffies, ehci->next_statechange))
@@ -237,6 +243,13 @@
 	if (ehci->rh_state < EHCI_RH_RUNNING)
 		goto done;
 
+#ifdef CONFIG_USB_HCD_HSIC
+	pdev = to_pci_dev(hcd->self.controller);
+	if (pdev->device == 0x119D)
+		if (device_can_wakeup(&hcd->self.root_hub->dev))
+			hcd->self.root_hub->do_remote_wakeup = 1;
+#endif
+
 	/* Once the controller is stopped, port resumes that are already
 	 * in progress won't complete.  Hence if remote wakeup is enabled
 	 * for the root hub and any ports are in the middle of a resume or
@@ -317,8 +330,13 @@
 	if (ehci->bus_suspended)
 		udelay(150);
 
+	/* if halt ehci, after remote-wakeup, the port get disabled,
+	 * so don't halt ehci here
+	 */
+#if 0
 	/* turn off now-idle HC */
 	ehci_halt (ehci);
+#endif
 
 	spin_lock_irq(&ehci->lock);
 	if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
@@ -339,6 +357,11 @@
 	ehci_writel(ehci, mask, &ehci->regs->intr_enable);
 	ehci_readl(ehci, &ehci->regs->intr_enable);
 
+#ifdef CONFIG_USB_OTG
+	if (ehci->has_otg && ehci->otg_suspend)
+		rc = ehci->otg_suspend(hcd);
+#endif
+
  done:
 	ehci->next_statechange = jiffies + msecs_to_jiffies(10);
 	ehci->enabled_hrtimer_events = 0;
@@ -358,6 +381,7 @@
 	u32			power_okay;
 	int			i;
 	unsigned long		resume_needed = 0;
+	int			rc = 0;
 
 	if (time_before (jiffies, ehci->next_statechange))
 		msleep(5);
@@ -387,6 +411,11 @@
 	 */
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 
+	/* if halt ehci in ehci_bus_suspend, after remote-wakeup, the port
+	 * gets disabled, so don't halt ehci in ehci_bus_suspend, and don't need
+	 * to re-start here
+	 */
+#if 0
 	/* re-init operational registers */
 	ehci_writel(ehci, 0, &ehci->regs->segment);
 	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
@@ -395,6 +424,8 @@
 	/* restore CMD_RUN, framelist size, and irq threshold */
 	ehci->command |= CMD_RUN;
 	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+#endif
+
 	ehci->rh_state = EHCI_RH_RUNNING;
 
 	/*
@@ -481,6 +512,12 @@
 		goto shutdown;
 	ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
 	(void) ehci_readl(ehci, &ehci->regs->intr_enable);
+
+#ifdef CONFIG_USB_OTG
+	if (ehci->has_otg && ehci->otg_resume)
+		rc = ehci->otg_resume(hcd);
+#endif
+
 	spin_unlock_irq(&ehci->lock);
 
 	return 0;
@@ -858,6 +895,7 @@
 				ehci->reset_done[wIndex] = jiffies
 						+ msecs_to_jiffies(20);
 				usb_hcd_start_port_resume(&hcd->self, wIndex);
+				set_bit(wIndex, &ehci->resuming_ports);
 				/* check the port again */
 				mod_timer(&ehci_to_hcd(ehci)->rh_timer,
 						ehci->reset_done[wIndex]);
@@ -1022,7 +1060,7 @@
 			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
 			if (ehci->has_hostpc) {
 				spin_unlock_irqrestore(&ehci->lock, flags);
-				msleep(5);/* 5ms for HCD enter low pwr mode */
+				mdelay(5);/* 5ms for HCD enter low pwr mode */
 				spin_lock_irqsave(&ehci->lock, flags);
 				temp1 = ehci_readl(ehci, hostpc_reg);
 				ehci_writel(ehci, temp1 | HOSTPC_PHCD,
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c369767..ec128bc 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -184,7 +184,7 @@
 	if (pdata && pdata->exit)
 		pdata->exit(pdev);
 
-	if (pdata->otg)
+	if (pdata && pdata->otg)
 		usb_phy_shutdown(pdata->otg);
 
 	clk_disable_unprepare(priv->usbclk);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 16d7150..dda408f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -187,6 +187,12 @@
 		}
 
 		omap->phy[i] = phy;
+
+		if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
+			usb_phy_init(omap->phy[i]);
+			/* bring PHY out of suspend */
+			usb_phy_set_suspend(omap->phy[i], 0);
+		}
 	}
 
 	pm_runtime_enable(dev);
@@ -211,13 +217,14 @@
 	}
 
 	/*
-	 * Bring PHYs out of reset.
+	 * Bring PHYs out of reset for non PHY modes.
 	 * Even though HSIC mode is a PHY-less mode, the reset
 	 * line exists between the chips and can be modelled
 	 * as a PHY device for reset control.
 	 */
 	for (i = 0; i < omap->nports; i++) {
-		if (!omap->phy[i])
+		if (!omap->phy[i] ||
+		     pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY)
 			continue;
 
 		usb_phy_init(omap->phy[i]);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 595d210..b01314b 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -35,6 +35,131 @@
 #define PCI_DEVICE_ID_INTEL_CE4100_USB	0x2e70
 
 /*-------------------------------------------------------------------------*/
+/* CloverTrail USB SPH and Modem USB Switch Control Flag */
+static unsigned int use_sph;
+module_param(use_sph, uint, S_IRUGO);
+MODULE_PARM_DESC(use_sph, "sph and modem usb switch flag, default disable\n");
+/*
+* for external read access to <usb_sph>
+*/
+unsigned int sph_enabled(void)
+{
+	return use_sph;
+}
+
+/* enable SRAM if sram detected */
+static void sram_init(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	void __iomem		*base = NULL;
+	void __iomem		*addr = NULL;
+
+	if (!hcd->has_sram)
+		return;
+	ehci->sram_addr = pci_resource_start(pdev, 1);
+	ehci->sram_size = pci_resource_len(pdev, 1);
+	ehci_info(ehci, "Found HCD SRAM at %x size:%x\n",
+		ehci->sram_addr, ehci->sram_size);
+
+	if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
+		ehci_warn(ehci, "SRAM request failed\n");
+		hcd->has_sram = 0;
+		return;
+	} else if (!dma_declare_coherent_memory(&pdev->dev, ehci->sram_addr,
+			ehci->sram_addr, ehci->sram_size, DMA_MEMORY_MAP)) {
+		ehci_warn(ehci, "SRAM DMA declare failed\n");
+		pci_release_region(pdev, 1);
+		hcd->has_sram = 0;
+		return;
+	}
+
+	/* initialize SRAM to 0 to avoid ECC errors during entry into D0 */
+	base = ioremap_nocache(ehci->sram_addr, ehci->sram_size);
+	if (base == NULL) {
+		ehci_warn(ehci, "SRAM init: ioremap failed\n");
+		return;
+	}
+
+	addr = base;
+
+	while (addr < base + ehci->sram_size) {
+		writel(0x0, addr);
+		addr = addr + 4;
+	}
+
+	iounmap(base);
+}
+
+static void sram_deinit(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+	if (!hcd->has_sram)
+		return;
+	dma_release_declared_memory(&pdev->dev);
+	pci_release_region(pdev, 1);
+
+	/* If host is suspended, SRAM backup memory should be freed */
+	if (ehci->sram_swap) {
+		vfree(ehci->sram_swap);
+		ehci->sram_swap = NULL;
+	}
+}
+
+static int sram_backup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	void __iomem		*base;
+	int			offset;
+
+	ehci->sram_swap = vmalloc(ehci->sram_size);
+	if (!ehci->sram_swap) {
+		ehci_warn(ehci, "SRAM backup memory request failed\n");
+		return -ENOMEM;
+	}
+
+	base = ioremap_nocache(ehci->sram_addr, ehci->sram_size);
+	if (!base) {
+		ehci_warn(ehci, "SRAM backeup ioremap fails\n");
+		vfree(ehci->sram_swap);
+		ehci->sram_swap = NULL;
+		return -EFAULT;
+	}
+
+	for (offset = 0; offset < ehci->sram_size; offset += 4)
+		*(u32 *)(ehci->sram_swap + offset) = readl(base + offset);
+
+	iounmap(base);
+
+	return 0;
+}
+
+static int sram_restore(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	void __iomem		*base;
+	int			offset;
+
+	if (!ehci->sram_swap)
+		return -EFAULT;
+
+	base = ioremap_nocache(ehci->sram_addr, ehci->sram_size);
+	if (!base) {
+		ehci_warn(ehci, "SRAM_restore ioremap fails\n");
+		return -EFAULT;
+	}
+
+	for (offset = 0; offset < ehci->sram_size; offset += 4)
+		writel(*(u32 *)(ehci->sram_swap + offset), base + offset);
+
+	iounmap(base);
+	vfree(ehci->sram_swap);
+	ehci->sram_swap = NULL;
+
+	return 0;
+}
 
 /* called after powerup, by probe or system-pm "wakeup" */
 static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
@@ -114,6 +239,94 @@
 	case PCI_VENDOR_ID_INTEL:
 		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB)
 			hcd->has_tt = 1;
+		else if (pdev->device == 0x0811 || pdev->device == 0x0829 ||
+				pdev->device == 0xE006) {
+			ehci_info(ehci, "Detected Intel MID OTG HC\n");
+			hcd->has_tt = 1;
+			ehci->has_hostpc = 1;
+#ifdef CONFIG_USB_OTG
+			ehci->has_otg = 1;
+#endif
+			hcd->has_sram = 1;
+			/*
+			 * Disable SRAM for CLVP A0 due to the silicon issue.
+			 */
+			if (pdev->device == 0xE006 && pdev->revision < 0xC) {
+				ehci_info(ehci, "Disable SRAM for CLVP A0\n");
+				hcd->has_sram = 0;
+			}
+
+			hcd->sram_no_payload = 1;
+			sram_init(hcd);
+			} else if (pdev->device == 0x0806) {
+				ehci_info(ehci, "Detected Langwell MPH\n");
+				hcd->has_tt = 1;
+				ehci->has_hostpc = 1;
+				hcd->has_sram = 1;
+				hcd->sram_no_payload = 1;
+				sram_init(hcd);
+			} else if (pdev->device == 0x0829) {
+				ehci_info(ehci, "Detected Penwell OTG HC\n");
+				hcd->has_tt = 1;
+				ehci->has_hostpc = 1;
+		} else if (pdev->device == 0x08F2) {
+#ifdef CONFIG_USB_EHCI_HCD_SPH
+			struct ehci_sph_pdata   *sph_pdata;
+			sph_pdata = pdev->dev.platform_data;
+
+			/* All need to bypass tll mode  */
+			temp = ehci_readl(ehci, hcd->regs + CLV_SPHCFG);
+			temp &= ~CLV_SPHCFG_ULPI1TYPE;
+			ehci_writel(ehci, temp, hcd->regs + CLV_SPHCFG);
+
+			/* Check SPH enabled or not */
+			if (!sph_enabled() || !sph_pdata) {
+				/* ULPI 1 ref-clock switch off */
+				temp = ehci_readl(ehci, hcd->regs + CLV_SPHCFG);
+				temp |= CLV_SPHCFG_REFCKDIS;
+				ehci_writel(ehci, temp, hcd->regs + CLV_SPHCFG);
+
+				/* Set Power state */
+				retval = pci_set_power_state(pdev, PCI_D1);
+				if (retval < 0)
+					ehci_err(ehci,
+						"Set SPH to D1 failed, retval = %d\n",
+						retval);
+
+				ehci_info(ehci, "USB SPH is disabled\n");
+				return -ENODEV;
+				}
+
+			sph_pdata->enabled = sph_enabled();
+
+			ehci_info(ehci, "Detected SPH HC\n");
+			hcd->has_tt = 1;
+			ehci->has_hostpc = 1;
+
+			hcd->has_wakeup_irq = 1;
+
+			temp = ehci_readl(ehci, hcd->regs + CLV_SPH_HOSTPC);
+			temp |= CLV_SPH_HOSTPC_PTS;
+			ehci_writel(ehci, temp, hcd->regs + CLV_SPH_HOSTPC);
+
+			device_set_wakeup_enable(&pdev->dev, true);
+
+			pm_runtime_set_active(&pdev->dev);
+#endif
+		} else if (pdev->device == 0x119D) {
+			ehci_info(ehci, "Detected HSIC HC\n");
+			hcd->has_tt = 1;
+			ehci->has_hostpc = 1;
+
+			hcd->has_wakeup_irq = 1;
+
+			hcd->has_sram = 0;
+			hcd->sram_no_payload = 1;
+			sram_init(hcd);
+
+			device_set_wakeup_enable(&pdev->dev, true);
+			pm_runtime_set_active(&pdev->dev);
+		}
 		break;
 	case PCI_VENDOR_ID_TDI:
 		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI)
@@ -338,11 +551,62 @@
 	}
 }
 
+static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	unsigned long		flags;
+	int			rc = 0;
+	int			port;
+
+	if (time_before(jiffies, ehci->next_statechange))
+		usleep_range(10000, 12000);
+
+	/* s0i3 may poweroff SRAM, backup the SRAM */
+	if (hcd->has_sram && sram_backup(hcd)) {
+		ehci_warn(ehci, "sram_backup failed\n");
+		return -EPERM;
+	}
+
+	rc = ehci_suspend(hcd, do_wakeup);
+
+	/* Set HOSTPC_PHCD if not set yet to let PHY enter low-power mode */
+	if (ehci->has_hostpc) {
+		usleep_range(5000, 6000);
+		spin_lock_irqsave(&ehci->lock, flags);
+
+		port = HCS_N_PORTS(ehci->hcs_params);
+		while (port--) {
+			u32 __iomem	*hostpc_reg;
+			u32		temp;
+
+			hostpc_reg = &ehci->regs->hostpc[port];
+			temp = ehci_readl(ehci, hostpc_reg);
+
+			if (!(temp & HOSTPC_PHCD))
+				ehci_writel(ehci, temp | HOSTPC_PHCD,
+						hostpc_reg);
+			temp = ehci_readl(ehci, hostpc_reg);
+			ehci_dbg(ehci, "Port %d PHY low-power mode %s\n",
+				port, (temp & HOSTPC_PHCD) ?
+					"succeeded" : "failed");
+		}
+		spin_unlock_irqrestore(&ehci->lock, flags);
+	}
+
+	return rc;
+}
+
 static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
 
+	/* s0i3 may have poweroff the SRAM, restore it here*/
+	if (hcd->has_sram && sram_restore(hcd)) {
+		ehci_warn(ehci, "sram_restore failed, stop resuming.\n");
+		return -EPERM;
+	}
+
 	/* The BIOS on systems with the Intel Panther Point chipset may or may
 	 * not support xHCI natively.  That means that during system resume, it
 	 * may switch the ports back to EHCI so that users can use their
@@ -369,10 +633,21 @@
 
 #else
 
-#define ehci_suspend		NULL
+#define ehci_pci_suspend	NULL
 #define ehci_pci_resume		NULL
 #endif	/* CONFIG_PM */
 
+/*
+ * Called when the ehci_pci module is removed.
+ */
+static void ehci_pci_stop(struct usb_hcd *hcd)
+{
+	ehci_stop(hcd);
+	/* Release sram when removed */
+	if (hcd->has_sram)
+		sram_deinit(hcd);
+}
+
 static struct hc_driver __read_mostly ehci_pci_hc_driver;
 
 static const struct ehci_driver_overrides pci_overrides __initconst = {
@@ -403,15 +678,32 @@
 	.remove =	usb_hcd_pci_remove,
 	.shutdown = 	usb_hcd_pci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 	.driver =	{
 		.pm =	&usb_hcd_pci_pm_ops
 	},
 #endif
 };
 
+#ifdef CONFIG_USB_EHCI_HCD_SPH
+#include "ehci-sph-pci.c"
+#define INTEL_MID_SPH_HOST_DRIVER	ehci_sph_driver
+#endif
+
+#if defined(CONFIG_USB_LANGWELL_OTG) || defined(CONFIG_USB_PENWELL_OTG)
+#include "ehci-langwell-pci.c"
+#define INTEL_MID_OTG_HOST_DRIVER	ehci_otg_driver
+#endif
+
+#ifdef CONFIG_USB_HCD_HSIC
+#include "ehci-tangier-hsic-pci.c"
+#define INTEL_MID_HSIC_HOST_DRIVER	ehci_hsic_driver
+#endif
+
 static int __init ehci_pci_init(void)
 {
+	int		retval;
+
 	if (usb_disabled())
 		return -ENODEV;
 
@@ -420,15 +712,82 @@
 	ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
 
 	/* Entries for the PCI suspend/resume callbacks are special */
-	ehci_pci_hc_driver.pci_suspend = ehci_suspend;
+	ehci_pci_hc_driver.pci_suspend = ehci_pci_suspend;
 	ehci_pci_hc_driver.pci_resume = ehci_pci_resume;
 
-	return pci_register_driver(&ehci_pci_driver);
+	/* Need to use seprate ehci_stop since need to hanlde sram */
+	ehci_pci_hc_driver.stop = ehci_pci_stop;
+
+#ifdef CONFIG_USB_HCD_HSIC
+	retval = pci_register_driver(&INTEL_MID_HSIC_HOST_DRIVER);
+	if (retval < 0)
+		goto clean0;
+#endif
+
+#ifdef INTEL_MID_OTG_HOST_DRIVER
+	retval = intel_mid_ehci_driver_register(&INTEL_MID_OTG_HOST_DRIVER);
+	if (retval < 0) {
+		pr_err("%s  register otg host driver failed, retval = %d\n",
+				__func__, retval);
+		goto clean1;
+	}
+#endif
+
+#ifdef CONFIG_USB_EHCI_HCD_SPH
+	if (sph_enabled()) {
+		retval = pci_register_driver(&INTEL_MID_SPH_HOST_DRIVER);
+		if (retval < 0) {
+			pr_err("%s  register sph driver failed, retval = %d\n",
+					__func__, retval);
+			goto clean2;
+		}
+	}
+#endif
+
+	retval = pci_register_driver(&ehci_pci_driver);
+	if (retval < 0) {
+		pr_err("%s  register ehci pci driver failed, retval = %d\n",
+				__func__, retval);
+		goto clean3;
+	}
+	return retval;
+
+clean3:
+	pci_unregister_driver(&ehci_pci_driver);
+
+#ifdef CONFIG_USB_EHCI_HCD_SPH
+clean2:
+	pci_unregister_driver(&INTEL_MID_SPH_HOST_DRIVER);
+#endif
+
+#ifdef INTEL_MID_OTG_HOST_DRIVER
+clean1:
+	intel_mid_ehci_driver_unregister(&INTEL_MID_OTG_HOST_DRIVER);
+#endif
+
+#ifdef CONFIG_USB_HCD_HSIC
+clean0:
+	pci_unregister_driver(&INTEL_MID_HSIC_HOST_DRIVER);
+#endif
+	return retval;
 }
 module_init(ehci_pci_init);
 
 static void __exit ehci_pci_cleanup(void)
 {
+#ifdef INTEL_MID_OTG_HOST_DRIVER
+	intel_mid_ehci_driver_unregister(&INTEL_MID_OTG_HOST_DRIVER);
+#endif
+
+#ifdef CONFIG_USB_EHCI_HCD_SPH
+	if (sph_enabled())
+		pci_unregister_driver(&INTEL_MID_SPH_HOST_DRIVER);
+#endif
+
+#ifdef CONFIG_USB_HCD_HSIC
+	pci_unregister_driver(&INTEL_MID_HSIC_HOST_DRIVER);
+#endif
+
 	pci_unregister_driver(&ehci_pci_driver);
 }
 module_exit(ehci_pci_cleanup);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d033..8e3c878 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1391,21 +1391,20 @@
 
 		/* Behind the scheduling threshold? */
 		if (unlikely(start < next)) {
+			unsigned now2 = (now - base) & (mod - 1);
 
 			/* USB_ISO_ASAP: Round up to the first available slot */
 			if (urb->transfer_flags & URB_ISO_ASAP)
 				start += (next - start + period - 1) & -period;
 
 			/*
-			 * Not ASAP: Use the next slot in the stream.  If
-			 * the entire URB falls before the threshold, fail.
+			 * Not ASAP: Use the next slot in the stream,
+			 * no matter what.
 			 */
-			else if (start + span - period < next) {
-				ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
+			else if (start + span - period < now2) {
+				ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
 						urb, start + base,
-						span - period, next + base);
-				status = -EXDEV;
-				goto fail;
+						span - period, now2 + base);
 			}
 		}
 
diff --git a/drivers/usb/host/ehci-tangier-hsic-pci.c b/drivers/usb/host/ehci-tangier-hsic-pci.c
new file mode 100644
index 0000000..6724fe9
--- /dev/null
+++ b/drivers/usb/host/ehci-tangier-hsic-pci.c
@@ -0,0 +1,1737 @@
+/*
+ * Intel MID Platform Tangier EHCI/HSIC Controller PCI Bus Glue.
+ *
+ * Copyright (c) 2008 - 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License 2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb/hcd.h>
+#include <linux/wakelock.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <linux/usb/ehci-tangier-hsic-pci.h>
+#include <asm/intel-mid.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define INIT_TASK_PID	1
+
+static struct pci_dev	*pci_dev;
+static struct class *hsic_class;
+static struct device *hsic_class_dev;
+
+
+static int ehci_hsic_start_host(struct pci_dev  *pdev);
+static int ehci_hsic_stop_host(struct pci_dev *pdev);
+static int create_device_files(void);
+static int create_class_device_files(void);
+
+static int enabling_disabling;
+static int hsic_enable;
+static struct hsic_tangier_priv hsic;
+
+/* pm_statistics structure, default disabled */
+static unsigned			stats_enable;
+#define IPC_STATS_NUM		8
+static struct ipc_stats		stats[IPC_STATS_NUM];
+
+static const char enabled[] = "enabled";
+static const char disabled[] = "disabled";
+static const char reset[] = "reset";
+
+
+static struct dentry *hsic_debugfs_root;
+static struct dentry *ipc_debug_control;
+static struct dentry *ipc_stats;
+
+#define IPCCOUNT(x, y) \
+	do { if (x) (y)++; } while (0)
+
+void count_ipc_stats(int retval, enum ipc_stats_type type)
+{
+	switch (retval) {
+	case 0:
+		IPCCOUNT(stats_enable, stats[type].success_cnt);
+		break;
+	case -EBUSY:
+		IPCCOUNT(stats_enable, stats[type].ipc_failure[0].fail_cnt);
+		break;
+	case -EINVAL:
+		IPCCOUNT(stats_enable, stats[type].ipc_failure[1].fail_cnt);
+		break;
+	case -ETIMEDOUT:
+		IPCCOUNT(stats_enable, stats[type].ipc_failure[2].fail_cnt);
+		break;
+	case -ESHUTDOWN:
+		IPCCOUNT(stats_enable, stats[type].ipc_failure[3].fail_cnt);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ipc_counter_init(void)
+{
+	int		i, j;
+
+	stats[0].name = "remote_wakeup(inband)";
+	stats[1].name = "remote_wakeup(outband)";
+	stats[2].name = "bus_suspend";
+	stats[3].name = "bus_resume";
+	stats[4].name = "d0i3_entry";
+	stats[5].name = "d0i3_exit";
+	stats[6].name = "d3_entry";
+	stats[7].name = "d3_exit";
+
+	for (i = 0; i < IPC_STATS_NUM; i++) {
+		stats[i].success_cnt = 0;
+		stats[i].ipc_failure[0].name = "EBUSY";
+		stats[i].ipc_failure[1].name = "EINVAL";
+		stats[i].ipc_failure[2].name = "ETIMEDOUT";
+		stats[i].ipc_failure[3].name = "ESHUTDOWN";
+	}
+
+	for (i = 0; i < IPC_STATS_NUM; i++)
+		for (j = 0; j < PM_FAILURE_COUNT; j++)
+			stats[i].ipc_failure[j].fail_cnt = 0;
+}
+
+static int ipc_control_show(struct seq_file *s, void *unused)
+{
+	if (stats_enable)
+		seq_printf(s, "%s\n", enabled);
+	else
+		seq_printf(s, "%s\n", disabled);
+	return 0;
+}
+
+static int ipc_control_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ipc_control_show, inode->i_private);
+}
+
+static ssize_t ipc_control_write(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char			buf[32];
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+			return -EFAULT;
+
+	if (!strncmp(buf, "on", 2)) {
+		stats_enable = STATS_ENABLE;
+	} else if (!strncmp(buf, "off", 3)) {
+		stats_enable = STATS_DISABLE;
+	} else if (!strncmp(buf, "reset", 5)) {
+		/* reset all counter and then enable stats */
+		ipc_counter_init();
+		stats_enable = STATS_ENABLE;
+	}
+	return count;
+}
+static const struct file_operations ipc_control_fops = {
+	.open                   = ipc_control_open,
+	.write                  = ipc_control_write,
+	.read                   = seq_read,
+	.llseek                 = seq_lseek,
+	.release                = single_release,
+};
+
+static int ipc_stats_show(struct seq_file *s, void *unused)
+{
+	int		i, j;
+
+	seq_puts(s, "USB IPC stats show:\n");
+
+	for (i = 0; i < IPC_STATS_NUM; i++) {
+		seq_printf(s, "status = %s success count = %lu\n",
+				stats[i].name, stats[i].success_cnt);
+		for (j = 0; j < PM_FAILURE_COUNT; j++)
+			seq_printf(s, "failure = %s count = %lu\n",
+				stats[i].ipc_failure[j].name,
+				stats[i].ipc_failure[j].fail_cnt);
+
+		seq_puts(s, "\n");
+	}
+	return 0;
+}
+
+static int ipc_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ipc_stats_show, inode->i_private);
+}
+
+static const struct file_operations ipc_stats_fops = {
+	.open                   = ipc_stats_open,
+	.read                   = seq_read,
+	.llseek                 = seq_lseek,
+	.release                = single_release,
+};
+
+/* Workaround for OSPM, set PMCMD to ask SCU
+ * power gate EHCI controller and DPHY
+ */
+static void hsic_enter_exit_d3(int enter_exit)
+{
+	if (enter_exit) {
+		printk(KERN_CRIT "HSIC Enter D0I3!\n");
+		pci_set_power_state(pci_dev, PCI_D3cold);
+		count_ipc_stats(0, D0I3_ENTRY);
+	} else {
+		printk(KERN_CRIT "HSIC Exit D0I3!\n");
+		pci_set_power_state(pci_dev, PCI_D0);
+		count_ipc_stats(0, D0I3_EXIT);
+	}
+}
+
+#if 0
+static void ehci_hsic_port_power(struct ehci_hcd *ehci, int is_on)
+{
+	unsigned port;
+
+	if (!HCS_PPC(ehci->hcs_params))
+		return;
+
+	dev_dbg(&pci_dev->dev, "...power%s ports...\n", is_on ? "up" : "down");
+	for (port = HCS_N_PORTS(ehci->hcs_params); port > 0; )
+		(void) ehci_hub_control(ehci_to_hcd(ehci),
+				is_on ? SetPortFeature : ClearPortFeature,
+				USB_PORT_FEAT_POWER,
+				port--, NULL, 0);
+	/* Flush those writes */
+	ehci_readl(ehci, &ehci->regs->command);
+}
+#endif
+
+static void ehci_hsic_phy_power(struct ehci_hcd *ehci, int is_low_power)
+{
+	unsigned port;
+
+	port = HCS_N_PORTS(ehci->hcs_params);
+	while (port--) {
+		u32 __iomem	*hostpc_reg;
+		u32		t3;
+
+		hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+				+ 0x84 + 4 * port);
+		t3 = ehci_readl(ehci, hostpc_reg);
+		ehci_dbg(ehci, "Port %d phy low-power mode org %08x\n",
+				port, t3);
+
+		if (is_low_power)
+			ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+		else
+			ehci_writel(ehci, t3 & ~HOSTPC_PHCD, hostpc_reg);
+
+		t3 = ehci_readl(ehci, hostpc_reg);
+		ehci_dbg(ehci, "Port %d phy low-power mode chg %08x\n",
+				port, t3);
+	}
+}
+
+/* Init HSIC AUX GPIO */
+static int hsic_aux_gpio_init(void)
+{
+	int		retval = 0;
+
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	hsic.aux_gpio = get_gpio_by_name(HSIC_AUX_GPIO_NAME);
+	if (gpio_is_valid(hsic.aux_gpio)) {
+		retval = gpio_request(hsic.aux_gpio, "hsic_aux");
+		if (retval < 0) {
+			dev_err(&pci_dev->dev,
+				"Request GPIO %d with error %d\n",
+				hsic.aux_gpio, retval);
+			retval = -ENODEV;
+			goto err;
+		}
+	} else {
+		retval = -ENODEV;
+		goto err;
+	}
+
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+	return retval;
+
+err:
+	gpio_free(hsic.aux_gpio);
+	return retval;
+}
+
+/* Init HSIC AUX2 GPIO as side band remote wakeup source */
+static int hsic_wakeup_gpio_init(void)
+{
+	int		retval = 0;
+
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	hsic.wakeup_gpio = get_gpio_by_name(HSIC_WAKEUP_GPIO_NAME);
+	if (gpio_is_valid(hsic.wakeup_gpio)) {
+		retval = gpio_request(hsic.wakeup_gpio, "hsic_wakeup");
+		if (retval < 0) {
+			dev_err(&pci_dev->dev,
+				"Request GPIO %d with error %d\n",
+				hsic.wakeup_gpio, retval);
+			retval = -ENODEV;
+			goto err;
+		}
+	} else {
+		retval = -ENODEV;
+		goto err;
+	}
+
+	gpio_direction_input(hsic.wakeup_gpio);
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+	return retval;
+
+err:
+	gpio_free(hsic.wakeup_gpio);
+	return retval;
+}
+
+static void hsic_aux_irq_free(void)
+{
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	if (hsic.hsic_aux_irq_enable) {
+		hsic.hsic_aux_irq_enable = 0;
+		free_irq(gpio_to_irq(hsic.aux_gpio), &pci_dev->dev);
+	}
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+	return;
+}
+
+static void hsic_wakeup_irq_free(void)
+{
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	if (hsic.hsic_wakeup_irq_enable) {
+		hsic.hsic_wakeup_irq_enable = 0;
+		free_irq(gpio_to_irq(hsic.wakeup_gpio), &pci_dev->dev);
+	}
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+	return;
+}
+
+/* HSIC AUX GPIO irq handler */
+static irqreturn_t hsic_aux_gpio_irq(int irq, void *data)
+{
+	struct device *dev = data;
+
+	dev_dbg(dev,
+		"%s---> hsic aux gpio request irq: %d\n",
+		__func__, irq);
+
+	if (hsic.hsic_aux_irq_enable == 0) {
+		dev_dbg(dev,
+			"%s---->AUX IRQ is disabled\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	cancel_delayed_work(&hsic.wakeup_work);
+	if (delayed_work_pending(&hsic.hsic_aux)) {
+		dev_dbg(dev,
+			"%s---->Delayed work pending\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	hsic.hsic_aux_finish = 0;
+	schedule_delayed_work(&hsic.hsic_aux, 0);
+	dev_dbg(dev,
+		"%s<----\n", __func__);
+
+	return IRQ_HANDLED;
+}
+
+/* HSIC Wakeup GPIO irq handler */
+static irqreturn_t hsic_wakeup_gpio_irq(int irq, void *data)
+{
+	struct device *dev = data;
+
+	dev_dbg(dev,
+		"%s---> hsic wakeup gpio request irq: %d\n",
+		__func__, irq);
+	if (hsic.hsic_wakeup_irq_enable == 0) {
+		dev_dbg(dev,
+			"%s---->Wakeup IRQ is disabled\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	/* take a wake lock during 25ms, because resume lasts 20ms, after that
+	USB framework will prevent to go in low power if there is traffic */
+	wake_lock_timeout(&hsic.resume_wake_lock, msecs_to_jiffies(25));
+
+	queue_delayed_work(hsic.work_queue, &hsic.wakeup_work, 0);
+	dev_dbg(dev,
+		"%s<----\n", __func__);
+	count_ipc_stats(0, REMOTE_WAKEUP_OOB);
+
+	return IRQ_HANDLED;
+}
+
+static int hsic_aux_irq_init(void)
+{
+	int retval;
+
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	if (hsic.hsic_aux_irq_enable) {
+		dev_dbg(&pci_dev->dev,
+			"%s<----AUX IRQ is enabled\n", __func__);
+		return 0;
+	}
+	hsic.hsic_aux_irq_enable = 1;
+	gpio_direction_input(hsic.aux_gpio);
+	retval = request_irq(gpio_to_irq(hsic.aux_gpio),
+			hsic_aux_gpio_irq,
+			IRQF_SHARED | IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+			"hsic_disconnect_request", &pci_dev->dev);
+	if (retval) {
+		dev_err(&pci_dev->dev,
+			"unable to request irq %i, err: %d\n",
+			gpio_to_irq(hsic.aux_gpio), retval);
+		goto err;
+	}
+
+	lnw_gpio_set_alt(hsic.aux_gpio, 0);
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+
+	return retval;
+
+err:
+	hsic.hsic_aux_irq_enable = 0;
+	free_irq(gpio_to_irq(hsic.aux_gpio), &pci_dev->dev);
+	return retval;
+}
+
+static int hsic_wakeup_irq_init(void)
+{
+	int retval;
+
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	if (hsic.hsic_wakeup_irq_enable) {
+		dev_dbg(&pci_dev->dev,
+			"%s<----Wakeup IRQ is enabled\n", __func__);
+		return 0;
+	}
+	hsic.hsic_wakeup_irq_enable = 1;
+	gpio_direction_input(hsic.wakeup_gpio);
+	retval = request_irq(gpio_to_irq(hsic.wakeup_gpio),
+			hsic_wakeup_gpio_irq,
+			IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
+			"hsic_remote_wakeup_request", &pci_dev->dev);
+	if (retval) {
+		dev_err(&pci_dev->dev,
+			"unable to request irq %i, err: %d\n",
+			gpio_to_irq(hsic.wakeup_gpio), retval);
+		goto err;
+	}
+
+	lnw_gpio_set_alt(hsic.wakeup_gpio, 0);
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+
+	return retval;
+
+err:
+	hsic.hsic_wakeup_irq_enable = 0;
+	free_irq(gpio_to_irq(hsic.wakeup_gpio), &pci_dev->dev);
+	return retval;
+}
+
+static void s3_wake_lock(void)
+{
+	mutex_lock(&hsic.wlock_mutex);
+	if (hsic.s3_wlock_state == UNLOCKED) {
+		wake_lock(&hsic.s3_wake_lock);
+		hsic.s3_wlock_state = LOCKED;
+	}
+	mutex_unlock(&hsic.wlock_mutex);
+}
+
+static void s3_wake_unlock(void)
+{
+	mutex_lock(&hsic.wlock_mutex);
+	if (hsic.s3_wlock_state == LOCKED) {
+		wake_unlock(&hsic.s3_wake_lock);
+		hsic.s3_wlock_state = UNLOCKED;
+	}
+	mutex_unlock(&hsic.wlock_mutex);
+}
+
+/* the root hub will call this callback when device added/removed */
+static void hsic_notify(struct usb_device *udev, unsigned action)
+{
+	int retval;
+	struct pci_dev *pdev = to_pci_dev(udev->bus->controller);
+
+	/* Ignore and only valid for HSIC. Filter out
+	 * the USB devices added by other USB2 host driver */
+	if (pdev->device != 0x119d)
+		return;
+
+	/* Ignore USB devices on external hub */
+	if (udev->parent && udev->parent->parent)
+		return;
+
+	/* Only valid for hsic port1 */
+	if (udev->portnum == 2) {
+		pr_debug("%s ignore hsic port2\n", __func__);
+		return;
+	}
+
+	switch (action) {
+	case USB_DEVICE_ADD:
+		pr_debug("Notify HSIC add device\n");
+		/* Root hub */
+		if (!udev->parent) {
+			hsic.rh_dev = udev;
+			pr_debug("%s Enable autosuspend\n", __func__);
+			pm_runtime_set_autosuspend_delay(&udev->dev,
+					hsic.bus_inactivityDuration);
+			hsic.autosuspend_enable = 1;
+			usb_enable_autosuspend(udev);
+		} else {
+			/* Modem devices */
+			hsic.modem_dev = udev;
+			pm_runtime_set_autosuspend_delay
+				(&udev->dev, hsic.port_inactivityDuration);
+			udev->persist_enabled = 0;
+
+			if (hsic.remoteWakeup_enable) {
+				pr_debug("%s Modem dev remote wakeup enabled\n",
+						 __func__);
+				device_set_wakeup_capable
+					(&hsic.modem_dev->dev, 1);
+				device_set_wakeup_capable
+					(&hsic.rh_dev->dev, 1);
+			} else {
+				pr_debug("%s Modem dev remote wakeup disabled\n",
+						 __func__);
+				device_set_wakeup_capable
+					(&hsic.modem_dev->dev, 0);
+				device_set_wakeup_capable
+					(&hsic.rh_dev->dev, 0);
+			}
+			pr_debug("%s Disable autosuspend\n", __func__);
+			usb_disable_autosuspend(hsic.modem_dev);
+			hsic.autosuspend_enable = 0;
+
+			pr_debug("%s----> Enable AUX irq\n", __func__);
+			retval = hsic_aux_irq_init();
+			if (retval)
+				dev_err(&pci_dev->dev,
+					"unable to request IRQ\n");
+		}
+		break;
+	case USB_DEVICE_REMOVE:
+		pr_debug("Notify HSIC delete device\n");
+		/* Root hub */
+		if (!udev->parent) {
+			pr_debug("%s rh_dev deleted\n", __func__);
+			hsic.rh_dev = NULL;
+			hsic.autosuspend_enable = 1;
+		} else {
+			/* Modem devices */
+			pr_debug("%s----> modem dev deleted\n", __func__);
+			hsic.modem_dev = NULL;
+		}
+		s3_wake_unlock();
+		break;
+	default:
+		pr_debug("Notify action not supported\n");
+		break ;
+	}
+	return;
+}
+
+static void hsic_port_suspend(struct usb_device *udev)
+{
+	struct pci_dev *pdev = to_pci_dev(udev->bus->controller);
+
+	if (pdev->device != 0x119d)
+		return;
+
+	/* Ignore USB devices on external hub */
+	if (udev->parent && udev->parent->parent)
+		return;
+
+	/* Only valid for hsic port1 */
+	if (udev->portnum == 2) {
+		pr_debug("%s ignore hsic port2\n", __func__);
+		return;
+	}
+
+	/* Modem dev */
+	if (udev->parent) {
+		pr_debug("%s s3 wlock unlocked\n", __func__);
+		s3_wake_unlock();
+	}
+	count_ipc_stats(0, BUS_SUSPEND);
+}
+
+static void hsic_port_resume(struct usb_device *udev)
+{
+	struct pci_dev *pdev = to_pci_dev(udev->bus->controller);
+
+	if (pdev->device != 0x119d)
+		return;
+
+	/* Ignore USB devices on external hub */
+	if (udev->parent && udev->parent->parent)
+		return;
+
+	/* Only valid for hsic port1 */
+	if (udev->portnum == 2) {
+		pr_debug("%s ignore hsic port2\n", __func__);
+		return;
+	}
+
+	/* Modem dev */
+	if ((udev->parent) && (hsic.s3_rt_state != SUSPENDING)) {
+		pr_debug("%s s3 wlock locked\n", __func__);
+		s3_wake_lock();
+	}
+	count_ipc_stats(0, BUS_RESUME);
+}
+
+static int hsic_pm_notify(struct notifier_block *self,
+		unsigned long action, void *dev)
+{
+	switch (action) {
+	case USB_PORT_SUSPEND:
+		hsic_port_suspend(dev);
+		break;
+	case USB_PORT_RESUME:
+		hsic_port_resume(dev);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static int hsic_s3_entry_notify(struct notifier_block *self,
+		unsigned long action, void *dummy)
+{
+	switch (action) {
+	case PM_SUSPEND_PREPARE:
+		hsic.s3_rt_state = SUSPENDING;
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static void hsic_aux_work(struct work_struct *work)
+{
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+	if (hsic.modem_dev == NULL) {
+		dev_dbg(&pci_dev->dev,
+			"%s---->Modem not created\n", __func__);
+		return;
+	}
+
+	mutex_lock(&hsic.hsic_mutex);
+	/* Free the aux irq */
+	hsic_aux_irq_free();
+	dev_dbg(&pci_dev->dev,
+		"%s---->AUX IRQ is disabled\n", __func__);
+
+	if (hsic.hsic_stopped == 0)
+		ehci_hsic_stop_host(pci_dev);
+	hsic_enter_exit_d3(1);
+	usleep_range(5000, 6000);
+	hsic_enter_exit_d3(0);
+	ehci_hsic_start_host(pci_dev);
+
+	hsic.autosuspend_enable = 0;
+	usb_disable_autosuspend(hsic.rh_dev);
+
+	hsic.hsic_aux_finish = 1;
+	wake_up(&hsic.aux_wq);
+	mutex_unlock(&hsic.hsic_mutex);
+
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+	return;
+}
+
+static void wakeup_work(struct work_struct *work)
+{
+	dev_dbg(&pci_dev->dev,
+		"%s---->\n", __func__);
+
+	if (!mutex_trylock(&hsic.hsic_mutex)) {
+		queue_delayed_work(hsic.work_queue, &hsic.wakeup_work,
+				   msecs_to_jiffies(10));
+		return;
+	}
+
+	if (hsic.modem_dev == NULL) {
+		mutex_unlock(&hsic.hsic_mutex);
+		dev_dbg(&pci_dev->dev,
+			"%s---->Modem not created\n", __func__);
+		return;
+	}
+
+	pm_runtime_get_sync(&hsic.modem_dev->dev);
+	/* need some time to wait modem device resume */
+	usleep_range(500, 600);
+	pm_runtime_put_sync(&hsic.modem_dev->dev);
+
+	mutex_unlock(&hsic.hsic_mutex);
+
+	dev_dbg(&pci_dev->dev,
+		"%s<----\n", __func__);
+
+	return;
+}
+
+static int hsic_debugfs_host_resume_show(struct seq_file *s, void *unused)
+{
+	return 0;
+}
+static int hsic_debugfs_host_resume_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hsic_debugfs_host_resume_show,
+						inode->i_private);
+}
+
+
+static ssize_t hsic_debugfs_host_resume_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct usb_hcd *hcd = s->private;
+
+	dev_dbg(hcd->self.controller, "wakeup hsic\n");
+	queue_delayed_work(hsic.work_queue, &hsic.wakeup_work, 0);
+
+	return count;
+}
+
+
+static const struct file_operations hsic_debugfs_host_resume_fops = {
+	.open			= hsic_debugfs_host_resume_open,
+	.read			= seq_read,
+	.write			= hsic_debugfs_host_resume_write,
+	.release		= single_release,
+};
+
+static ssize_t hsic_port_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", hsic_enable);
+}
+
+static ssize_t hsic_port_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int retval;
+	int org_req;
+
+	if (size > HSIC_ENABLE_SIZE) {
+		dev_dbg(dev, "Invalid, size = %zu\n", size);
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d", &org_req) != 1) {
+		dev_dbg(dev, "Invalid, value\n");
+		return -EINVAL;
+	}
+
+	/* Free the aux irq */
+	hsic_aux_irq_free();
+	dev_dbg(dev,
+		"%s---->AUX IRQ is disabled\n", __func__);
+
+	if (delayed_work_pending(&hsic.hsic_aux)) {
+		dev_dbg(dev,
+			"%s---->Wait for delayed work finish\n",
+			 __func__);
+		retval = wait_event_interruptible(hsic.aux_wq,
+						hsic.hsic_aux_finish);
+		if (retval < 0)
+			return retval;
+
+		if (org_req)
+			return size;
+	}
+
+	mutex_lock(&hsic.hsic_mutex);
+	if (org_req) {
+		dev_dbg(dev, "enable hsic\n");
+		/* add this due to hcd release
+		 doesn't set hcd to NULL */
+		if (hsic.hsic_stopped == 0)
+			ehci_hsic_stop_host(pci_dev);
+		hsic_enter_exit_d3(1);
+		usleep_range(5000, 6000);
+		hsic_enter_exit_d3(0);
+		retval = ehci_hsic_start_host(pci_dev);
+		if (retval < 0) {
+			dev_err(&pci_dev->dev,
+				"start host fail, retval %d\n", retval);
+			mutex_unlock(&hsic.hsic_mutex);
+			return retval;
+		}
+
+		hsic.autosuspend_enable = 0;
+		usb_disable_autosuspend(hsic.rh_dev);
+	} else {
+		dev_dbg(dev, "disable hsic\n");
+
+		/* If device enable auto suspend, disable it before disable hsic */
+		if (hsic.autosuspend_enable) {
+			dev_dbg(dev, "disable pm\n");
+			if (hsic.modem_dev != NULL) {
+				usb_disable_autosuspend(hsic.modem_dev);
+				hsic.autosuspend_enable = 0;
+			}
+			if (hsic.rh_dev != NULL) {
+				usb_disable_autosuspend(hsic.rh_dev);
+				hsic.autosuspend_enable = 0;
+			}
+		}
+
+		/* add this due to hcd release
+		 doesn't set hcd to NULL */
+		if (hsic.hsic_stopped == 0)
+			ehci_hsic_stop_host(pci_dev);
+	}
+
+	mutex_unlock(&hsic.hsic_mutex);
+
+	return size;
+}
+
+static DEVICE_ATTR(hsic_enable, S_IRUGO | S_IWUSR | S_IROTH,
+		hsic_port_enable_show, hsic_port_enable_store);
+
+static ssize_t hsic_port_inactivityDuration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", hsic.port_inactivityDuration);
+}
+
+static ssize_t hsic_port_inactivityDuration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned duration;
+
+	if (size > HSIC_DURATION_SIZE) {
+		dev_dbg(dev, "Invalid, size = %zu\n", size);
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d", &duration) != 1) {
+		dev_dbg(dev, "Invalid, value\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hsic.hsic_mutex);
+	hsic.port_inactivityDuration = duration;
+	dev_dbg(dev, "port Duration: %d\n",
+		hsic.port_inactivityDuration);
+	if (hsic.modem_dev != NULL) {
+		pm_runtime_set_autosuspend_delay
+		(&hsic.modem_dev->dev, hsic.port_inactivityDuration);
+	}
+
+	mutex_unlock(&hsic.hsic_mutex);
+	return size;
+}
+
+static DEVICE_ATTR(L2_inactivityDuration,
+		S_IRUGO | S_IWUSR | S_IROTH,
+		hsic_port_inactivityDuration_show,
+		 hsic_port_inactivityDuration_store);
+
+/* Interfaces for auto suspend */
+static ssize_t hsic_autosuspend_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", hsic.autosuspend_enable);
+}
+
+static ssize_t hsic_autosuspend_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int org_req;
+
+	if (size > HSIC_ENABLE_SIZE) {
+		dev_dbg(dev, "Invalid, size = %zu\n", size);
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d", &org_req) != 1) {
+		dev_dbg(dev, "Invalid, value\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hsic.hsic_mutex);
+	hsic.autosuspend_enable = org_req;
+
+	if (hsic.modem_dev != NULL) {
+		if (hsic.autosuspend_enable == 0) {
+			dev_dbg(dev, "Modem dev autosuspend disable\n");
+			usb_disable_autosuspend(hsic.modem_dev);
+		} else {
+			dev_dbg(dev, "Modem dev autosuspend enable\n");
+			usb_enable_autosuspend(hsic.modem_dev);
+			hsic_wakeup_irq_init();
+		}
+	}
+	if (hsic.rh_dev != NULL) {
+		if (hsic.autosuspend_enable == 0) {
+			dev_dbg(dev, "Port dev autosuspend disable\n");
+			usb_disable_autosuspend(hsic.rh_dev);
+		} else {
+			dev_dbg(dev, "Port dev autosuspend enable\n");
+			usb_enable_autosuspend(hsic.rh_dev);
+		}
+	}
+	mutex_unlock(&hsic.hsic_mutex);
+	return size;
+}
+
+static DEVICE_ATTR(L2_autosuspend_enable, S_IRUGO | S_IWUSR | S_IROTH,
+		hsic_autosuspend_enable_show,
+		 hsic_autosuspend_enable_store);
+
+static ssize_t hsic_pm_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", hsic.autosuspend_enable);
+}
+
+static ssize_t hsic_pm_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int rc;
+	unsigned int pm_enable;
+
+	if (size > HSIC_ENABLE_SIZE)
+		return -EINVAL;
+
+	if (sscanf(buf, "%u", &pm_enable) != 1) {
+		dev_dbg(dev, "Invalid, value\n");
+		return -EINVAL;
+	}
+
+	/*  pm_enable definition: 0b00 - L1 & L2 disabled, 0b01 - L2 only
+	  *  0b10 - L1 only, 0b11 - L1 + L2 enabled
+	   */
+	switch (pm_enable) {
+	case 0:
+		rc = hsic_autosuspend_enable_store(dev, attr, "0", size);
+		break;
+	case 1:
+		rc = hsic_autosuspend_enable_store(dev, attr, "1", size);
+		break;
+	case 2: /*Reserved for L1 only*/
+		rc = -EINVAL;
+		break;
+	case 3: /* Reserved for L1 + L2*/
+		rc = -EINVAL;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	if (rc == size)
+		return size;
+	else
+		return -EINVAL;
+}
+
+static DEVICE_ATTR(pm_enable, S_IRUGO | S_IWUSR | S_IROTH,
+		hsic_pm_enable_show,
+		 hsic_pm_enable_store);
+
+static ssize_t hsic_bus_inactivityDuration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", hsic.bus_inactivityDuration);
+}
+
+static ssize_t hsic_bus_inactivityDuration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	unsigned duration;
+
+	if (size > HSIC_DURATION_SIZE) {
+		dev_dbg(dev, "Invalid, size = %zu\n", size);
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d", &duration) != 1) {
+		dev_dbg(dev, "Invalid, value\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hsic.hsic_mutex);
+	hsic.bus_inactivityDuration = duration;
+	dev_dbg(dev, "bus Duration: %d\n",
+		hsic.bus_inactivityDuration);
+	if (hsic.rh_dev != NULL)
+		pm_runtime_set_autosuspend_delay
+			(&hsic.rh_dev->dev, hsic.bus_inactivityDuration);
+
+	mutex_unlock(&hsic.hsic_mutex);
+	return size;
+}
+
+static DEVICE_ATTR(bus_inactivityDuration,
+		S_IRUGO | S_IWUSR | S_IROTH,
+		hsic_bus_inactivityDuration_show,
+		 hsic_bus_inactivityDuration_store);
+
+static ssize_t hsic_remoteWakeup_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", hsic.remoteWakeup_enable);
+}
+
+static ssize_t hsic_remoteWakeup_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int org_req;
+
+	if (size > HSIC_ENABLE_SIZE) {
+		dev_dbg(dev, "Invalid, size = %zu\n", size);
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d", &org_req) != 1) {
+		dev_dbg(dev, "Invalid, value\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hsic.hsic_mutex);
+	hsic.remoteWakeup_enable = org_req;
+
+	if ((hsic.modem_dev != NULL) &&
+		(hsic.rh_dev != NULL)) {
+		if (hsic.remoteWakeup_enable) {
+			dev_dbg(dev, "Modem dev remote wakeup enabled\n");
+			device_set_wakeup_capable(&hsic.modem_dev->dev, 1);
+			device_set_wakeup_capable(&hsic.rh_dev->dev, 1);
+		} else {
+			dev_dbg(dev, "Modem dev remote wakeup disabled\n");
+			device_set_wakeup_capable(&hsic.modem_dev->dev, 0);
+			device_set_wakeup_capable(&hsic.rh_dev->dev, 0);
+		}
+		pm_runtime_get_sync(&hsic.modem_dev->dev);
+		pm_runtime_put_sync(&hsic.modem_dev->dev);
+	}
+
+	mutex_unlock(&hsic.hsic_mutex);
+	return size;
+}
+
+static DEVICE_ATTR(remoteWakeup, S_IRUGO | S_IWUSR | S_IROTH,
+		hsic_remoteWakeup_show, hsic_remoteWakeup_store);
+
+static int hsic_debugfs_registers_show(struct seq_file *s, void *unused)
+{
+	struct usb_hcd	*hcd = s->private;
+
+	pm_runtime_get_sync(hcd->self.controller);
+
+	seq_printf(s,
+		"\n"
+		"USBCMD = 0x%08x\n"
+		"USBSTS = 0x%08x\n"
+		"USBINTR = 0x%08x\n"
+		"ASYNCLISTADDR = 0x%08x\n"
+		"PORTSC1 = 0x%08x\n"
+		"PORTSC2 = 0x%08x\n"
+		"HOSTPC1 = 0x%08x\n"
+		"HOSTPC2 = 0x%08x\n"
+		"OTGSC = 0x%08x\n"
+		"USBMODE = 0x%08x\n",
+		readl(hcd->regs + 0x30),
+		readl(hcd->regs + 0x34),
+		readl(hcd->regs + 0x38),
+		readl(hcd->regs + 0x48),
+		readl(hcd->regs + 0x74),
+		readl(hcd->regs + 0x78),
+		readl(hcd->regs + 0xb4),
+		readl(hcd->regs + 0xb8),
+		readl(hcd->regs + 0xf4),
+		readl(hcd->regs + 0xf8)
+		);
+
+	pm_runtime_put_sync(hcd->self.controller);
+
+	return 0;
+}
+
+static int hsic_debugfs_registers_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hsic_debugfs_registers_show, inode->i_private);
+}
+
+static const struct file_operations hsic_debugfs_registers_fops = {
+	.open			= hsic_debugfs_registers_open,
+	.read			= seq_read,
+	.release		= single_release,
+};
+
+static int create_class_device_files(void)
+{
+	int retval;
+
+	hsic_class = class_create(NULL, "hsic");
+
+	if (IS_ERR(hsic_class))
+			return -EFAULT;
+
+	hsic_class_dev = device_create(hsic_class, &pci_dev->dev,
+			MKDEV(0, 0), NULL, "hsic0");
+
+	if (IS_ERR(hsic_class_dev)) {
+		retval = -EFAULT;
+		goto hsic_class_fail;
+	}
+
+	retval = device_create_file(hsic_class_dev, &dev_attr_hsic_enable);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "error create hsic_enable\n");
+		goto hsic_class_dev_fail;
+	}
+	hsic.autosuspend_enable = 0;
+	retval = device_create_file(hsic_class_dev,
+			 &dev_attr_L2_autosuspend_enable);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create autosuspend_enable\n");
+		goto hsic_class_dev_fail;
+	}
+
+	hsic.port_inactivityDuration = HSIC_PORT_INACTIVITYDURATION;
+	retval = device_create_file(hsic_class_dev,
+			 &dev_attr_L2_inactivityDuration);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create port_inactiveDuration\n");
+		goto hsic_class_dev_fail;
+	}
+
+	hsic.bus_inactivityDuration = HSIC_BUS_INACTIVITYDURATION;
+	retval = device_create_file(hsic_class_dev,
+			 &dev_attr_bus_inactivityDuration);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create bus_inactiveDuration\n");
+		goto hsic_class_dev_fail;
+	}
+
+	hsic.remoteWakeup_enable = HSIC_REMOTEWAKEUP;
+	retval = device_create_file(hsic_class_dev, &dev_attr_remoteWakeup);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create remoteWakeup\n");
+		goto hsic_class_dev_fail;
+	}
+
+	retval = device_create_file(hsic_class_dev,
+		 &dev_attr_pm_enable);
+
+	if (retval == 0)
+		return retval;
+
+	dev_dbg(&pci_dev->dev, "Error create pm_enable\n");
+
+hsic_class_dev_fail:
+	device_destroy(hsic_class, hsic_class_dev->devt);
+hsic_class_fail:
+	class_destroy(hsic_class);
+
+	return retval;
+}
+
+/* FixMe: create_device_files() need to be removed */
+static int create_device_files()
+{
+	int retval;
+
+	retval = device_create_file(&pci_dev->dev, &dev_attr_hsic_enable);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "error create hsic_enable\n");
+		goto hsic_enable;
+	}
+	hsic.autosuspend_enable = 0;
+	retval = device_create_file(&pci_dev->dev,
+			 &dev_attr_L2_autosuspend_enable);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create autosuspend_enable\n");
+		goto autosuspend;
+	}
+
+	hsic.port_inactivityDuration = HSIC_PORT_INACTIVITYDURATION;
+	retval = device_create_file(&pci_dev->dev,
+			 &dev_attr_L2_inactivityDuration);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create port_inactiveDuration\n");
+		goto port_duration;
+	}
+
+	hsic.bus_inactivityDuration = HSIC_BUS_INACTIVITYDURATION;
+	retval = device_create_file(&pci_dev->dev,
+			 &dev_attr_bus_inactivityDuration);
+	if (retval < 0) {
+		dev_dbg(&pci_dev->dev, "Error create bus_inactiveDuration\n");
+		goto bus_duration;
+	}
+
+	hsic.remoteWakeup_enable = HSIC_REMOTEWAKEUP;
+	retval = device_create_file(&pci_dev->dev, &dev_attr_remoteWakeup);
+	if (retval == 0)
+		return retval;
+
+	dev_dbg(&pci_dev->dev, "Error create remoteWakeup\n");
+
+	device_remove_file(&pci_dev->dev, &dev_attr_bus_inactivityDuration);
+bus_duration:
+	device_remove_file(&pci_dev->dev, &dev_attr_L2_inactivityDuration);
+port_duration:
+	device_remove_file(&pci_dev->dev, &dev_attr_L2_autosuspend_enable);
+autosuspend:
+/* host_resume: */
+	device_remove_file(&pci_dev->dev, &dev_attr_hsic_enable);
+hsic_enable:
+/* hsic_class_fail: */
+
+	return retval;
+}
+
+static void hsic_debugfs_cleanup(void)
+{
+	debugfs_remove_recursive(hsic_debugfs_root);
+	hsic_debugfs_root = NULL;
+}
+
+static int hsic_debugfs_init(struct usb_hcd *hcd)
+{
+	int retval = 0;
+	struct dentry *file;
+
+	if (!hsic_debugfs_root) {
+		hsic_debugfs_root = debugfs_create_dir("hsic", usb_debug_root);
+		if (!hsic_debugfs_root) {
+			retval = -ENOMEM;
+			dev_dbg(hcd->self.controller, "	Error create debugfs root failed !");
+			return retval;
+		}
+		file = debugfs_create_file(
+				"registers",
+				S_IRUGO,
+				hsic_debugfs_root,
+				hcd,
+				&hsic_debugfs_registers_fops);
+		if (!file) {
+			retval = -ENOMEM;
+			dev_dbg(hcd->self.controller, "	Error create debugfs file registers failed !");
+			goto remove_debugfs;
+		}
+		file = debugfs_create_file(
+				"host_resume",
+				S_IRUGO | S_IWUSR | S_IROTH,
+				hsic_debugfs_root,
+				hcd,
+				&hsic_debugfs_host_resume_fops);
+		if (!file) {
+			retval = -ENOMEM;
+			dev_dbg(hcd->self.controller, "	Error create debugfs file host_resume failed !");
+			goto remove_debugfs;
+		}
+		ipc_debug_control = debugfs_create_file("ipc_control", S_IRUGO,
+						hsic_debugfs_root, NULL,
+						&ipc_control_fops);
+		if (!ipc_debug_control) {
+			retval = -ENOENT;
+			goto remove_debugfs;
+		}
+
+		ipc_stats = debugfs_create_file("ipc_stats", S_IRUGO,
+						hsic_debugfs_root, NULL,
+						&ipc_stats_fops);
+		if (!ipc_stats) {
+			retval = -ENOENT;
+			goto remove_debugfs;
+		}
+
+		stats_enable = STATS_DISABLE;
+		ipc_counter_init();
+	}
+
+	if (retval != 0)
+		goto remove_debugfs;
+
+	return retval;
+remove_debugfs:
+	hsic_debugfs_cleanup();
+	return retval;
+}
+
+static int ehci_hsic_probe(struct pci_dev *pdev,
+				const struct pci_device_id *id)
+{
+	struct hc_driver *driver;
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	int irq, retval;
+
+	pr_debug("initializing Intel EHCI HSIC Host Controller\n");
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (!id)
+		return -EINVAL;
+
+	pci_dev = pdev;
+	if (pci_enable_device(pdev) < 0)
+		return -ENODEV;
+	pdev->current_state = PCI_D0;
+
+	wake_lock_init(&hsic.resume_wake_lock,
+		WAKE_LOCK_SUSPEND, "hsic_aux2_wlock");
+	wake_lock_init(&hsic.s3_wake_lock,
+			WAKE_LOCK_SUSPEND, "hsic_s3_wlock");
+	hsic.hsic_pm_nb.notifier_call = hsic_pm_notify;
+	usb_register_notify(&hsic.hsic_pm_nb);
+	hsic.hsic_s3_entry_nb.notifier_call = hsic_s3_entry_notify;
+	register_pm_notifier(&hsic.hsic_s3_entry_nb);
+
+	/* we need not call pci_enable_dev since otg transceiver already take
+	 * the control of this device and this probe actaully gets called by
+	 * otg transceiver driver with HNP protocol.
+	 */
+	irq = pdev->irq;
+	if (!pdev->irq) {
+		dev_dbg(&pdev->dev, "No IRQ.\n");
+		retval = -ENODEV;
+		goto disable_pci;
+	}
+
+	driver = (struct hc_driver *)id->driver_data;
+	if (!driver)
+		return -EINVAL;
+
+	/* AUX GPIO init */
+	retval = hsic_aux_gpio_init();
+	if (retval < 0) {
+		dev_err(&pdev->dev, "AUX GPIO init fail\n");
+		retval = -ENODEV;
+		goto disable_pci;
+	}
+
+	/* AUX GPIO init */
+	retval = hsic_wakeup_gpio_init();
+	if (retval < 0) {
+		dev_err(&pdev->dev, "Wakeup GPIO init fail\n");
+		retval = -ENODEV;
+		goto disable_pci;
+	}
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto disable_pci;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+
+	hcd->rsrc_start = pci_resource_start(pdev, 0);
+	hcd->rsrc_len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+			driver->description)) {
+		dev_dbg(&pdev->dev, "controller already in use\n");
+		retval = -EBUSY;
+		goto clear_companion;
+	}
+
+	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+	if (hcd->regs == NULL) {
+		dev_dbg(&pdev->dev, "error mapping memory\n");
+		retval = -EFAULT;
+		goto release_mem_region;
+	}
+
+	pci_set_master(pdev);
+
+	if (hsic.hsic_enable_created == 0) {
+		retval = create_device_files();
+		if (retval < 0) {
+			dev_dbg(&pdev->dev, "error create device files\n");
+			goto release_mem_region;
+		}
+
+		retval = create_class_device_files();
+		if (retval < 0) {
+			dev_dbg(&pdev->dev, "error create device files\n");
+			goto release_mem_region;
+		}
+		hsic.hsic_enable_created = 1;
+	}
+
+	if (hsic.hsic_mutex_init == 0) {
+		mutex_init(&hsic.hsic_mutex);
+		mutex_init(&hsic.wlock_mutex);
+		hsic.hsic_mutex_init = 1;
+	}
+
+	if (hsic.aux_wq_init == 0) {
+		init_waitqueue_head(&hsic.aux_wq);
+		hsic.aux_wq_init = 1;
+	}
+
+	hsic.work_queue = create_singlethread_workqueue("hsic");
+	INIT_DELAYED_WORK(&hsic.wakeup_work, wakeup_work);
+	INIT_DELAYED_WORK(&(hsic.hsic_aux), hsic_aux_work);
+
+	hcd->hsic_notify = hsic_notify;
+
+	retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED | IRQF_NO_SUSPEND);
+	if (retval != 0)
+		goto unmap_registers;
+	dev_set_drvdata(&pdev->dev, hcd);
+	/* Clear phy low power mode, enable phy clock */
+	ehci_hsic_phy_power(ehci, 0);
+
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_put_noidle(&pdev->dev);
+
+	if (!enabling_disabling) {
+		/* Check here to avoid to call pm_runtime_put_noidle() twice */
+		if (!pci_dev_run_wake(pdev))
+			pm_runtime_put_noidle(&pdev->dev);
+
+		pm_runtime_allow(&pdev->dev);
+	}
+	hsic.hsic_stopped = 0;
+	hsic_enable = 1;
+	hsic.s3_rt_state = RESUMED;
+	s3_wake_lock();
+	hsic_debugfs_init(hcd);
+
+	if (current->pid == INIT_TASK_PID) {
+		dev_info(&pdev->dev, "disable hsic on driver init!\n");
+		ehci_hsic_stop_host(pdev);
+	}
+
+	return retval;
+unmap_registers:
+	destroy_workqueue(hsic.work_queue);
+	if (driver->flags & HCD_MEMORY) {
+		iounmap(hcd->regs);
+release_mem_region:
+		release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	} else
+		release_region(hcd->rsrc_start, hcd->rsrc_len);
+clear_companion:
+	dev_set_drvdata(&pdev->dev, NULL);
+	usb_put_hcd(hcd);
+disable_pci:
+	pci_disable_device(pdev);
+	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
+	wake_lock_destroy(&(hsic.resume_wake_lock));
+	wake_lock_destroy(&hsic.s3_wake_lock);
+	return retval;
+}
+
+static void ehci_hsic_remove(struct pci_dev *pdev)
+{
+	struct usb_hcd    *hcd = pci_get_drvdata(pdev);
+	struct ehci_hcd   *ehci = hcd_to_ehci(hcd);
+
+	if (!hcd)
+		return;
+
+	hsic.hsic_stopped = 1;
+	hsic_enable = 0;
+
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_get_noresume(&pdev->dev);
+
+	if (!enabling_disabling) {
+		if (!pci_dev_run_wake(pdev))
+			pm_runtime_get_noresume(&pdev->dev);
+
+		pm_runtime_forbid(&pdev->dev);
+	}
+
+	/* Free the aux irq */
+	hsic_aux_irq_free();
+	hsic_wakeup_irq_free();
+
+	/* Fake an interrupt request in order to give the driver a chance
+	 * to test whether the controller hardware has been removed (e.g.,
+	 * cardbus physical eject).
+	 */
+	local_irq_disable();
+	usb_hcd_irq(0, hcd);
+	local_irq_enable();
+
+	usb_remove_hcd(hcd);
+
+#if 0
+	ehci_hsic_port_power(ehci, 0);
+#endif
+	/* Set phy low power mode, disable phy clock */
+	ehci_hsic_phy_power(ehci, 1);
+
+	if (hcd->driver->flags & HCD_MEMORY) {
+		iounmap(hcd->regs);
+		release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	} else {
+		release_region(hcd->rsrc_start, hcd->rsrc_len);
+	}
+
+	usb_put_hcd(hcd);
+	gpio_free(hsic.aux_gpio);
+	gpio_free(hsic.wakeup_gpio);
+	pci_disable_device(pdev);
+
+	cancel_delayed_work_sync(&hsic.wakeup_work);
+
+	destroy_workqueue(hsic.work_queue);
+	wake_lock_destroy(&(hsic.resume_wake_lock));
+	wake_lock_destroy(&hsic.s3_wake_lock);
+	usb_unregister_notify(&hsic.hsic_pm_nb);
+	unregister_pm_notifier(&hsic.hsic_s3_entry_nb);
+	hsic_debugfs_cleanup();
+}
+
+static void ehci_hsic_shutdown(struct pci_dev *pdev)
+{
+	struct usb_hcd *hcd;
+
+	mutex_lock(&hsic.hsic_mutex);
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(&pdev->dev, "hsic stopped return\n");
+		mutex_unlock(&hsic.hsic_mutex);
+		return;
+	}
+	mutex_unlock(&hsic.hsic_mutex);
+
+	dev_dbg(&pdev->dev, "%s --->\n", __func__);
+	hcd = pci_get_drvdata(pdev);
+	if (!hcd)
+		return;
+
+	if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) &&
+			hcd->driver->shutdown) {
+		hcd->driver->shutdown(hcd);
+		pci_disable_device(pdev);
+	}
+	dev_dbg(&pdev->dev, "%s <---\n", __func__);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tangier_hsic_suspend_noirq(struct device *dev)
+{
+	int	retval;
+
+	mutex_lock(&hsic.hsic_mutex);
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(dev, "hsic stopped return\n");
+		mutex_unlock(&hsic.hsic_mutex);
+		return 0;
+	}
+
+	dev_dbg(dev, "%s --->\n", __func__);
+	retval = usb_hcd_pci_pm_ops.suspend_noirq(dev);
+	dev_dbg(dev, "%s <--- retval = %d\n", __func__, retval);
+	mutex_unlock(&hsic.hsic_mutex);
+	return retval;
+}
+
+static int tangier_hsic_suspend(struct device *dev)
+{
+	int	retval;
+
+	mutex_lock(&hsic.hsic_mutex);
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(dev, "hsic stopped return\n");
+		mutex_unlock(&hsic.hsic_mutex);
+		return 0;
+	}
+	mutex_unlock(&hsic.hsic_mutex);
+
+	dev_dbg(dev, "%s --->\n", __func__);
+	retval = usb_hcd_pci_pm_ops.suspend(dev);
+	count_ipc_stats(retval, D3_ENTRY);
+	dev_dbg(dev, "%s <--- retval = %d\n", __func__, retval);
+	return retval;
+}
+
+static int tangier_hsic_resume_noirq(struct device *dev)
+{
+	int	retval;
+
+	mutex_lock(&hsic.hsic_mutex);
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(dev, "hsic stopped return\n");
+		mutex_unlock(&hsic.hsic_mutex);
+		return 0;
+	}
+
+	dev_dbg(dev, "%s --->\n", __func__);
+	retval = usb_hcd_pci_pm_ops.resume_noirq(dev);
+	hsic.s3_rt_state = RESUMED;
+	dev_dbg(dev, "%s <--- retval = %d\n", __func__, retval);
+	mutex_unlock(&hsic.hsic_mutex);
+	return retval;
+}
+
+static int tangier_hsic_resume(struct device *dev)
+{
+	int	retval;
+
+	mutex_lock(&hsic.hsic_mutex);
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(dev, "hsic stopped return\n");
+		mutex_unlock(&hsic.hsic_mutex);
+		return 0;
+	}
+	mutex_unlock(&hsic.hsic_mutex);
+
+	dev_dbg(dev, "%s --->\n", __func__);
+	retval = usb_hcd_pci_pm_ops.resume(dev);
+	count_ipc_stats(retval, D3_EXIT);
+	dev_dbg(dev, "%s <--- retval = %d\n", __func__, retval);
+	return retval;
+}
+#else
+#define tangier_hsic_suspend_noirq	NULL
+#define tangier_hsic_suspend		NULL
+#define tangier_hsic_resume_noirq	NULL
+#define tangier_hsic_resume		NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+/* Runtime PM */
+static int tangier_hsic_runtime_suspend(struct device *dev)
+{
+	int	retval;
+
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(dev, "hsic stopped return\n");
+		return 0;
+	}
+
+	dev_dbg(dev, "%s --->\n", __func__);
+	retval = usb_hcd_pci_pm_ops.runtime_suspend(dev);
+	s3_wake_unlock();
+	count_ipc_stats(retval, D0I3_ENTRY);
+	dev_dbg(dev, "%s <--- retval = %d\n", __func__, retval);
+	return retval;
+}
+
+static int tangier_hsic_runtime_resume(struct device *dev)
+{
+	int			retval;
+
+	if (hsic.hsic_stopped == 1) {
+		dev_dbg(dev, "hsic stopped return\n");
+		return 0;
+	}
+
+	dev_dbg(dev, "%s --->\n", __func__);
+	retval = usb_hcd_pci_pm_ops.runtime_resume(dev);
+	count_ipc_stats(retval, D0I3_EXIT);
+	dev_dbg(dev, "%s <--- retval = %d\n", __func__, retval);
+
+	return retval;
+}
+#else
+#define tangier_hsic_runtime_suspend NULL
+#define tangier_hsic_runtime_resume NULL
+#endif
+
+
+static DEFINE_PCI_DEVICE_TABLE(pci_hsic_ids) = {
+	{
+		.vendor =	0x8086,
+		.device =	0x119D,
+		.subvendor =	PCI_ANY_ID,
+		.subdevice =	PCI_ANY_ID,
+		.driver_data =  (unsigned long) &ehci_pci_hc_driver,
+	},
+	{ /* end: all zeroes */ }
+};
+
+static const struct dev_pm_ops tangier_hsic_pm_ops = {
+	.runtime_suspend = tangier_hsic_runtime_suspend,
+	.runtime_resume = tangier_hsic_runtime_resume,
+	.suspend = tangier_hsic_suspend,
+	.suspend_noirq = tangier_hsic_suspend_noirq,
+	.resume = tangier_hsic_resume,
+	.resume_noirq = tangier_hsic_resume_noirq,
+};
+
+/* Intel HSIC EHCI driver */
+static struct pci_driver ehci_hsic_driver = {
+	.name =	"ehci-intel-hsic",
+	.id_table =	pci_hsic_ids,
+
+	.probe =	ehci_hsic_probe,
+	.remove =	ehci_hsic_remove,
+
+#ifdef CONFIG_PM_SLEEP
+	.driver =	{
+		.pm =	&tangier_hsic_pm_ops
+	},
+#endif
+	.shutdown =	ehci_hsic_shutdown,
+};
+
+
+static int ehci_hsic_start_host(struct pci_dev  *pdev)
+{
+	int		retval;
+
+	pm_runtime_get_sync(&pdev->dev);
+	enabling_disabling = 1;
+	retval = ehci_hsic_probe(pdev, ehci_hsic_driver.id_table);
+	if (retval)
+		dev_dbg(&pdev->dev, "Failed to start host\n");
+	enabling_disabling = 0;
+	pm_runtime_put(&pdev->dev);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(ehci_hsic_start_host);
+
+static int ehci_hsic_stop_host(struct pci_dev *pdev)
+{
+	pm_runtime_get_sync(&pdev->dev);
+	enabling_disabling = 1;
+	ehci_hsic_remove(pdev);
+	enabling_disabling = 0;
+	pm_runtime_put(&pdev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ehci_hsic_stop_host);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 7c978b2..857a5b1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -212,6 +212,18 @@
 	unsigned		has_hostpc:1;
 	unsigned		has_ppcd:1; /* support per-port change bits */
 	u8			sbrn;		/* packed release number */
+	unsigned int		sram_addr;
+	unsigned int		sram_size;
+
+	/* SRAM backup memory */
+	void			*sram_swap;
+
+#ifdef CONFIG_USB_OTG
+	unsigned		has_otg:1;	/* if it is otg host*/
+	/* otg host has additional bus_suspend and bus_resume */
+	int (*otg_suspend)(struct usb_hcd *hcd);
+	int (*otg_resume)(struct usb_hcd *hcd);
+#endif
 
 	/* irq statistics */
 #ifdef EHCI_STATS
@@ -243,6 +255,9 @@
 /*-------------------------------------------------------------------------*/
 
 #include <linux/usb/ehci_def.h>
+#ifdef CONFIG_USB_EHCI_HCD_SPH
+#include <linux/usb/ehci_sph_pci.h>
+#endif
 
 /*-------------------------------------------------------------------------*/
 
@@ -806,4 +821,6 @@
 extern int	ehci_resume(struct usb_hcd *hcd, bool hibernated);
 #endif	/* CONFIG_PM */
 
+extern void ehci_stop(struct usb_hcd *hcd);
+extern int ehci_reset(struct ehci_hcd *ehci);
 #endif /* __LINUX_EHCI_HCD_H */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index fc627fd..865946c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -231,31 +231,26 @@
 			frame &= ~(ed->interval - 1);
 			frame |= ed->branch;
 			urb->start_frame = frame;
+			ed->last_iso = frame + ed->interval * (size - 1);
 		}
 	} else if (ed->type == PIPE_ISOCHRONOUS) {
 		u16	next = ohci_frame_no(ohci) + 1;
 		u16	frame = ed->last_iso + ed->interval;
+		u16	length = ed->interval * (size - 1);
 
 		/* Behind the scheduling threshold? */
 		if (unlikely(tick_before(frame, next))) {
 
-			/* USB_ISO_ASAP: Round up to the first available slot */
+			/* URB_ISO_ASAP: Round up to the first available slot */
 			if (urb->transfer_flags & URB_ISO_ASAP) {
 				frame += (next - frame + ed->interval - 1) &
 						-ed->interval;
 
 			/*
-			 * Not ASAP: Use the next slot in the stream.  If
-			 * the entire URB falls before the threshold, fail.
+			 * Not ASAP: Use the next slot in the stream,
+			 * no matter what.
 			 */
 			} else {
-				if (tick_before(frame + ed->interval *
-					(urb->number_of_packets - 1), next)) {
-					retval = -EXDEV;
-					usb_hcd_unlink_urb_from_ep(hcd, urb);
-					goto fail;
-				}
-
 				/*
 				 * Some OHCI hardware doesn't handle late TDs
 				 * correctly.  After retiring them it proceeds
@@ -266,9 +261,16 @@
 				urb_priv->td_cnt = DIV_ROUND_UP(
 						(u16) (next - frame),
 						ed->interval);
+				if (urb_priv->td_cnt >= urb_priv->length) {
+					++urb_priv->td_cnt;	/* Mark it */
+					ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+							urb, frame, length,
+							next);
+				}
 			}
 		}
 		urb->start_frame = frame;
+		ed->last_iso = frame + length;
 	}
 
 	/* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 951514e..ef6782b 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -371,7 +371,7 @@
 	.remove =	usb_hcd_pci_remove,
 	.shutdown =	usb_hcd_pci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 	.driver =	{
 		.pm =	&usb_hcd_pci_pm_ops
 	},
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 88731b7..37dc837 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,8 +41,12 @@
 __releases(ohci->lock)
 __acquires(ohci->lock)
 {
+	struct usb_host_endpoint *ep = urb->ep;
+	struct urb_priv *urb_priv;
+
 	// ASSERT (urb->hcpriv != 0);
 
+ restart:
 	urb_free_priv (ohci, urb->hcpriv);
 	urb->hcpriv = NULL;
 	if (likely(status == -EINPROGRESS))
@@ -79,6 +83,21 @@
 		ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
 		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
 	}
+
+	/*
+	 * An isochronous URB that is sumitted too late won't have any TDs
+	 * (marked by the fact that the td_cnt value is larger than the
+	 * actual number of TDs).  If the next URB on this endpoint is like
+	 * that, give it back now.
+	 */
+	if (!list_empty(&ep->urb_list)) {
+		urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+		urb_priv = urb->hcpriv;
+		if (urb_priv->td_cnt > urb_priv->length) {
+			status = 0;
+			goto restart;
+		}
+	}
 }
 
 
@@ -545,7 +564,6 @@
 		td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
 		*ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
 						(data & 0x0FFF) | 0xE000);
-		td->ed->last_iso = info & 0xffff;
 	} else {
 		td->hwCBP = cpu_to_hc32 (ohci, data);
 	}
@@ -994,7 +1012,7 @@
 			urb_priv->td_cnt++;
 
 			/* if URB is done, clean up */
-			if (urb_priv->td_cnt == urb_priv->length) {
+			if (urb_priv->td_cnt >= urb_priv->length) {
 				modified = completed = 1;
 				finish_urb(ohci, urb, 0);
 			}
@@ -1084,7 +1102,7 @@
 	urb_priv->td_cnt++;
 
 	/* If all this urb's TDs are done, call complete() */
-	if (urb_priv->td_cnt == urb_priv->length)
+	if (urb_priv->td_cnt >= urb_priv->length)
 		finish_urb(ohci, urb, status);
 
 	/* clean schedule:  unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 4c338ec..af82684 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -18,7 +18,7 @@
 #include <linux/acpi.h>
 #include <linux/dmi.h>
 #include "pci-quirks.h"
-#include "xhci-ext-caps.h"
+#include "xhci.h"
 
 
 #define UHCI_USBLEGSUP		0xc0		/* legacy support */
@@ -314,6 +314,36 @@
 }
 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 
+/* usb_quirk_ignore_comp_plc - If ignore PLC event for
+ * compliance/loopback mode transition.
+ * @ptr: base address of PORTSC egisters to be read.
+ * @ports: number of ports.
+ *
+ * Some xHC controller will generate PLC event when link transfer to
+ * compliance/loopback mode. By design, driver will trigger warm reset
+ * for this case which will interrupt USB3 electronic compliance test.
+ * So if want to avoid it, need to set XHCI_COMP_PLC_QUIRK during driver
+ * initialization.
+ **/
+int usb_quirk_ignore_comp_plc(void __iomem *ptr, int ports)
+{
+	int i;
+	u32 val;
+	__le32 __iomem *addr;
+
+	addr = ptr;
+	for (i = 0; i < ports; i++) {
+		val = readl(addr);
+		if (((val & PORT_PLC) && (val & PORT_PLS_MASK) == XDEV_COMP) ||
+			((val & PORT_PLC) && (val & PORT_PLS_MASK) == XDEV_LOOPBACK))
+			return 1;
+		addr += NUM_PORT_REGS;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(usb_quirk_ignore_comp_plc);
+
 void usb_amd_quirk_pll_enable(void)
 {
 	usb_amd_quirk_pll(0);
@@ -944,6 +974,61 @@
 	iounmap(base);
 }
 
+/* PCI Quirks for Intel MOFD OTG3 controller
+ *
+ * For FS device connection, there have some noise after D+ drive stage.
+ * The USB PHY treat the noise as chirp J which actually haven't get the
+ * chirp J target defined in USB spec (D+ - D- > 300mv). Then controller
+ * trying to do HS negotiation, finally get FS device disconnect after port
+ * reset.
+ *
+ * This is UTMI PHY bug which violate spec for detect chirp J.
+ *
+ * The WA is set UTMI PHY register to avoid the noise.
+ *
+ **/
+#define USB2_COMPBG_ADDR 0xf90b1110
+#define USB2_COMPBG_HSSQREFBEN(x)	((x & 0x3) << 11)
+#define USB2_COMPBG_HSSQREFEN(x)	((x & 0x3) << 13)
+#define USB2_COMPBG_HSSQREFEN_MASK	(0x3 << 13)
+#define USB2_COMPBG_HSSQREFBEN_MASK	(0x3 << 11)
+static void __iomem *usb2_compbg;
+int quirk_intel_xhci_pr_init(bool init)
+{
+
+	if (init) {
+		usb2_compbg = ioremap_nocache(USB2_COMPBG_ADDR, 4);
+		if (!usb2_compbg)
+			return -ENOMEM;
+	} else
+		iounmap(usb2_compbg);
+
+	return 0;
+}
+
+int quirk_intel_xhci_port_reset(struct device *dev, bool post)
+{
+	u32 val;
+	static u32 original_val;
+
+	if (!usb2_compbg)
+		return -ENOMEM;
+
+	if (!post) {
+		dev_warn(dev, "Enable UTMI PHY FS WA\n");
+		val = readl(usb2_compbg);
+		original_val = val;
+		val &= ~(USB2_COMPBG_HSSQREFEN_MASK | USB2_COMPBG_HSSQREFBEN_MASK);
+		val |= USB2_COMPBG_HSSQREFEN(3) | USB2_COMPBG_HSSQREFBEN(1);
+		writel(val, usb2_compbg);
+	} else {
+		dev_warn(dev, "Disable UTMI PHY FS WA\n");
+		writel(original_val, usb2_compbg);
+	}
+
+	return 0;
+}
+
 static void quirk_usb_early_handoff(struct pci_dev *pdev)
 {
 	/* Skip Netlogic mips SoC's internal PCI USB controller.
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 7f69a39..57d3311 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -17,5 +17,8 @@
 static inline void usb_amd_dev_put(void) {}
 static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 #endif  /* CONFIG_PCI */
+int usb_quirk_ignore_comp_plc(void __iomem *ptr, int ports);
+int quirk_intel_xhci_pr_init(bool init);
+int quirk_intel_xhci_port_reset(struct device *dev, bool post);
 
 #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7..0f228c4 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@
 	.remove =	usb_hcd_pci_remove,
 	.shutdown =	uhci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 	.driver =	{
 		.pm =	&usb_hcd_pci_pm_ops
 	},
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6dd..da6f56d 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@
 		}
 
 		/* Fell behind? */
-		if (uhci_frame_before_eq(frame, next)) {
+		if (!uhci_frame_before_eq(next, frame)) {
 
 			/* USB_ISO_ASAP: Round up to the first available slot */
 			if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@
 						-qh->period;
 
 			/*
-			 * Not ASAP: Use the next slot in the stream.  If
-			 * the entire URB falls before the threshold, fail.
+			 * Not ASAP: Use the next slot in the stream,
+			 * no matter what.
 			 */
 			else if (!uhci_frame_before_eq(next,
 					frame + (urb->number_of_packets - 1) *
 						qh->period))
-				return -EXDEV;
+				dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+						urb, frame,
+						(urb->number_of_packets - 1) *
+							qh->period,
+						next);
 		}
 	}
 
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 377f424..d5c82d4 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -20,7 +20,11 @@
  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 /* Up to 16 ms to halt an HC */
-#define XHCI_MAX_HALT_USEC	(16*1000)
+/*FIXME: Extend 16ms to 200ms. 16ms is not enough for Synopsys controller
+ * resume. And xHCI spec haven't define the timeout about this case. Extend
+ * to 200ms instead of 16ms.
+ */
+#define XHCI_MAX_HALT_USEC	(200*1000)
 /* HC not running - set to 1 when run/stop bit is cleared. */
 #define XHCI_STS_HALT		(1<<0)
 
@@ -71,6 +75,7 @@
 
 /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
 #define XHCI_HLC               (1 << 19)
+#define XHCI_BLC		(1 << 20)
 
 /* command register values to disable interrupts and halt the HC */
 /* start/stop HC execution - do not write unless HC is halted*/
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 187a3ec..096067f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -24,6 +24,7 @@
 #include <asm/unaligned.h>
 
 #include "xhci.h"
+#include "pci-quirks.h"
 
 #define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
 #define	PORT_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
@@ -286,7 +287,7 @@
 		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
 			xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
 	}
-	cmd->command_trb = xhci->cmd_ring->enqueue;
+	cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 	list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
 	xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
 	xhci_ring_cmd_db(xhci);
@@ -295,7 +296,7 @@
 	/* Wait for last stop endpoint command to finish */
 	timeleft = wait_for_completion_interruptible_timeout(
 			cmd->completion,
-			USB_CTRL_SET_TIMEOUT);
+			XHCI_CMD_DEFAULT_TIMEOUT);
 	if (timeleft <= 0) {
 		xhci_warn(xhci, "%s while waiting for stop endpoint command\n",
 				timeleft == 0 ? "Timeout" : "Signal");
@@ -548,6 +549,8 @@
 	u16 link_state = 0;
 	u16 wake_mask = 0;
 	u16 timeout = 0;
+	u32 __iomem *status_reg = NULL;
+	u32 i, command, num_ports, selector;
 
 	max_ports = xhci_get_ports(hcd, &port_array);
 	bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -703,12 +706,15 @@
 			link_state = (wIndex & 0xff00) >> 3;
 		if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
 			wake_mask = wIndex & 0xff00;
+		selector = wIndex >> 8;
 		/* The MSB of wIndex is the U1/U2 timeout */
 		timeout = (wIndex & 0xff00) >> 8;
 		wIndex &= 0xff;
 		if (!wIndex || wIndex > max_ports)
 			goto error;
 		wIndex--;
+		status_reg = &xhci->op_regs->port_power_base +
+			NUM_PORT_REGS*wIndex;
 		temp = xhci_readl(xhci, port_array[wIndex]);
 		if (temp == 0xffffffff) {
 			retval = -ENODEV;
@@ -823,6 +829,16 @@
 				bus_state->suspended_ports |= 1 << wIndex;
 			break;
 		case USB_PORT_FEAT_POWER:
+			/* FIXME Do not turn on BYT XHCI port 6 power,
+			 * Disable this port's power to disable HSIC hub
+			 */
+			 if ((xhci->quirks & XHCI_PORT_DISABLE_QUIRK) &&
+				(wIndex == 5)) {
+				temp = xhci_readl(xhci, port_array[wIndex]);
+				temp &= ~PORT_POWER;
+				xhci_writel(xhci, temp, port_array[wIndex]);
+				break;
+			}
 			/*
 			 * Turn on ports, even if there isn't per-port switching.
 			 * HC will report connect events even before this is set.
@@ -844,11 +860,26 @@
 			spin_lock_irqsave(&xhci->lock, flags);
 			break;
 		case USB_PORT_FEAT_RESET:
+			if (xhci->quirks & XHCI_PORT_RESET)
+				quirk_intel_xhci_port_reset(hcd->self.controller, false);
+
 			temp = (temp | PORT_RESET);
 			xhci_writel(xhci, temp, port_array[wIndex]);
 
 			temp = xhci_readl(xhci, port_array[wIndex]);
 			xhci_dbg(xhci, "set port reset, actual port %d status  = 0x%x\n", wIndex, temp);
+			if (xhci->quirks & XHCI_PORT_RESET) {
+				int delay_time;
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				for (delay_time = 0; delay_time < 800; delay_time += 10) {
+					if (!(temp & PORT_RESET))
+						break;
+					mdelay(2);
+					temp = xhci_readl(xhci, port_array[wIndex]);
+				}
+				spin_lock_irqsave(&xhci->lock, flags);
+				quirk_intel_xhci_port_reset(hcd->self.controller, true);
+			}
 			break;
 		case USB_PORT_FEAT_REMOTE_WAKE_MASK:
 			xhci_set_remote_wake_mask(xhci, port_array,
@@ -864,21 +895,96 @@
 
 			temp = xhci_readl(xhci, port_array[wIndex]);
 			break;
+		case USB_PORT_FEAT_TEST:
+			if (!selector || selector >= 5 || !status_reg)
+				goto error;
+			/*
+			 * Disable all Device Slots.
+			 */
+			for (i = 0; i < MAX_HC_SLOTS; i++) {
+				if (xhci->dcbaa->dev_context_ptrs[i]) {
+					if (xhci_queue_slot_control(xhci,
+						TRB_DISABLE_SLOT, i)) {
+						xhci_err(xhci,
+						"Disable slot[%d] failed!\n",
+						i);
+						goto error;
+					}
+				xhci_dbg(xhci, "Disable Slot[%d].\n", i);
+				}
+			}
+			/*
+			 *	All ports shall be in the Disable state (PP = 0)
+			 */
+			xhci_dbg(xhci, "Disable all port (PP = 0)\n");
+			num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+			for (i = 0; i < num_ports; i++) {
+				u32 __iomem *sreg =
+					&xhci->op_regs->port_status_base +
+						NUM_PORT_REGS*i;
+				temp = xhci_readl(xhci, sreg);
+				temp &= ~PORT_POWER;
+				xhci_writel(xhci, temp, sreg);
+			}
+
+			/*	Set the Run/Stop (R/S) bit in the USBCMD
+			 *	register to a '0' and wait for HCHalted(HCH) bit
+			 *	in the USBSTS register, to transition to a '1'.
+			 */
+			xhci_dbg(xhci, "Stop controller\n");
+			command = xhci_readl(xhci, &xhci->op_regs->command);
+			command &= ~CMD_RUN;
+			xhci_writel(xhci, command, &xhci->op_regs->command);
+			if (xhci_handshake(xhci, &xhci->op_regs->status,
+						STS_HALT, STS_HALT, 100*100)) {
+				xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+				return -ETIMEDOUT;
+			}
+
+			/*
+			 * start to test
+			 */
+			xhci_dbg(xhci, "test case:");
+			switch (selector) {
+			case 1:
+				xhci_dbg(xhci, "TEST_J\n");
+				break;
+			case 2:
+				xhci_dbg(xhci, "TEST_K\n");
+				break;
+			case 3:
+				xhci_dbg(xhci, "TEST_SE0_NAK\n");
+				break;
+			case 4:
+				xhci_dbg(xhci, "TEST_PACKET\n");
+				break;
+			default:
+				xhci_dbg(xhci, "Invalide test case!\n");
+				goto error;
+			}
+			/* prevent controller enters Low power state in Test mode.
+			 * some controller will exit Test mode once enter low power
+			 * mode */
+			pm_runtime_get(hcd->self.controller);
+			temp = xhci_readl(xhci, status_reg);
+			temp |= selector << 28;
+			xhci_writel(xhci, temp, status_reg);
+			break;
 		case USB_PORT_FEAT_U1_TIMEOUT:
 			if (hcd->speed != HCD_USB3)
 				goto error;
-			temp = xhci_readl(xhci, port_array[wIndex] + 1);
+			temp = xhci_readl(xhci, port_array[wIndex] + PORTPMSC);
 			temp &= ~PORT_U1_TIMEOUT_MASK;
 			temp |= PORT_U1_TIMEOUT(timeout);
-			xhci_writel(xhci, temp, port_array[wIndex] + 1);
+			xhci_writel(xhci, temp, port_array[wIndex] + PORTPMSC);
 			break;
 		case USB_PORT_FEAT_U2_TIMEOUT:
 			if (hcd->speed != HCD_USB3)
 				goto error;
-			temp = xhci_readl(xhci, port_array[wIndex] + 1);
+			temp = xhci_readl(xhci, port_array[wIndex] + PORTPMSC);
 			temp &= ~PORT_U2_TIMEOUT_MASK;
 			temp |= PORT_U2_TIMEOUT(timeout);
-			xhci_writel(xhci, temp, port_array[wIndex] + 1);
+			xhci_writel(xhci, temp, port_array[wIndex] + PORTPMSC);
 			break;
 		default:
 			goto error;
@@ -986,6 +1092,10 @@
 	struct xhci_bus_state *bus_state;
 	bool reset_change = false;
 
+	/* Forbid to access register if controller in suspneded */
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		return -ESHUTDOWN;
+
 	max_ports = xhci_get_ports(hcd, &port_array);
 	bus_state = &xhci->bus_state[hcd_index(hcd)];
 
@@ -1092,20 +1202,6 @@
 		t1 = xhci_port_state_to_neutral(t1);
 		if (t1 != t2)
 			xhci_writel(xhci, t2, port_array[port_index]);
-
-		if (hcd->speed != HCD_USB3) {
-			/* enable remote wake up for USB 2.0 */
-			__le32 __iomem *addr;
-			u32 tmp;
-
-			/* Add one to the port status register address to get
-			 * the port power control register address.
-			 */
-			addr = port_array[port_index] + 1;
-			tmp = xhci_readl(xhci, addr);
-			tmp |= PORT_RWE;
-			xhci_writel(xhci, tmp, addr);
-		}
 	}
 	hcd->state = HC_STATE_SUSPENDED;
 	bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1160,6 +1256,10 @@
 				xhci_set_link_state(xhci, port_array,
 						port_index, XDEV_RESUME);
 
+				/* need 1ms delay between access to USB2 PORTSC
+				 * and USB3 PORTSC to avoid Fabric Error.
+				 * */
+				mdelay(1);
 				spin_unlock_irqrestore(&xhci->lock, flags);
 				msleep(20);
 				spin_lock_irqsave(&xhci->lock, flags);
@@ -1184,24 +1284,30 @@
 				xhci_ring_device(xhci, slot_id);
 		} else
 			xhci_writel(xhci, temp, port_array[port_index]);
-
-		if (hcd->speed != HCD_USB3) {
-			/* disable remote wake up for USB 2.0 */
-			__le32 __iomem *addr;
-			u32 tmp;
-
-			/* Add one to the port status register address to get
-			 * the port power control register address.
-			 */
-			addr = port_array[port_index] + 1;
-			tmp = xhci_readl(xhci, addr);
-			tmp &= ~PORT_RWE;
-			xhci_writel(xhci, tmp, addr);
-		}
 	}
 
 	(void) xhci_readl(xhci, &xhci->op_regs->command);
 
+	/* This is for wakeup IRQ case. For USB3 remote wakeup for S3 case,
+	 * the wakeup IRQ get immediately which before HCD set accessiable.
+	 * So the IRQ be disabled in usb_hcd_irq. For S3 case, after bus
+	 * resume done, the kernel will call their children directly. This
+	 * will cause children driver resume called before hcd_resume_work.
+	 *
+	 * The USB device corresponding port only be resume to U0/Enabled
+	 * state during xhci driver handle port change event. So this gap
+	 * will cause port haven't resumed when matched usb driver resume
+	 * callback be called. It will cause usb core try to disconnect the
+	 * usb device and cause some unexpected behavior.
+	 *
+	 * So we need to enable IRQ immediately at the end of bus_resume to
+	 * prevent above gap.
+	 **/
+	if (HCD_IRQ_DISABLED(hcd) && hcd->has_wakeup_irq) {
+		clear_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+		enable_irq(hcd->irq);
+	}
+
 	bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
 	/* re-enable irqs */
 	temp = xhci_readl(xhci, &xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fbf75e5..0cdc20a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -369,6 +369,10 @@
 		ctx->size += CTX_SIZE(xhci->hcc_params);
 
 	ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
+	if (!ctx->bytes) {
+		kfree(ctx);
+		return NULL;
+	}
 	memset(ctx->bytes, 0, ctx->size);
 	return ctx;
 }
@@ -1856,6 +1860,7 @@
 	kfree(xhci->usb3_ports);
 	kfree(xhci->port_array);
 	kfree(xhci->rh_bw);
+	kfree(xhci->ext_caps);
 
 	xhci->page_size = 0;
 	xhci->page_shift = 0;
@@ -2043,7 +2048,7 @@
 }
 
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
-		__le32 __iomem *addr, u8 major_revision)
+		__le32 __iomem *addr, u8 major_revision, int max_caps)
 {
 	u32 temp, port_offset, port_count;
 	int i;
@@ -2068,6 +2073,10 @@
 		/* WTF? "Valid values are ‘1’ to MaxPorts" */
 		return;
 
+	/* cache usb2 port capabilities */
+	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
+		xhci->ext_caps[xhci->num_ext_caps++] = temp;
+
 	/* Check the host's USB2 LPM capability */
 	if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
 			(temp & XHCI_L1C)) {
@@ -2125,10 +2134,11 @@
  */
 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
 {
-	__le32 __iomem *addr;
-	u32 offset;
+	__le32 __iomem *addr, *tmp_addr;
+	u32 offset, tmp_offset;
 	unsigned int num_ports;
 	int i, j, port_index;
+	int cap_count = 0;
 
 	addr = &xhci->cap_regs->hcc_params;
 	offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
@@ -2161,13 +2171,35 @@
 	 * See section 5.3.6 for offset calculation.
 	 */
 	addr = &xhci->cap_regs->hc_capbase + offset;
+
+	tmp_addr = addr;
+	tmp_offset = offset;
+
+	/* count extended protocol capability entries for later caching */
+	do {
+		u32 cap_id;
+		cap_id = xhci_readl(xhci, tmp_addr);
+		if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
+			cap_count++;
+
+		tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
+		tmp_addr += tmp_offset;
+
+	} while (tmp_offset);
+
+	xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
+	if (!xhci->ext_caps)
+		return -ENOMEM;
+
+
 	while (1) {
 		u32 cap_id;
 
 		cap_id = xhci_readl(xhci, addr);
 		if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
 			xhci_add_in_port(xhci, num_ports, addr,
-					(u8) XHCI_EXT_PORT_MAJOR(cap_id));
+					(u8) XHCI_EXT_PORT_MAJOR(cap_id),
+					cap_count);
 		offset = XHCI_EXT_CAPS_NEXT(cap_id);
 		if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
 				== num_ports)
@@ -2433,6 +2465,8 @@
 	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
 	xhci_print_ir_set(xhci, 0);
 
+	init_completion(&xhci->enable_slot);
+
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
 	 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc24e39..2a0bbd4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -93,7 +93,6 @@
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 			pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
-		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
 		xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
 		xhci->limit_active_eps = 64;
 		xhci->quirks |= XHCI_SW_BW_CHECKING;
@@ -213,6 +212,7 @@
 		usb_remove_hcd(xhci->shared_hcd);
 		usb_put_hcd(xhci->shared_hcd);
 	}
+
 	usb_hcd_pci_remove(dev);
 	kfree(xhci);
 }
@@ -346,7 +346,7 @@
 	/* suspend and resume implemented later */
 
 	.shutdown = 	usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 	.driver = {
 		.pm = &usb_hcd_pci_pm_ops
 	},
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index df90fe5..37003c8 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -24,7 +24,7 @@
 	 * here that the generic code does not try to make a pci_dev from our
 	 * dev struct in order to setup MSI
 	 */
-	xhci->quirks |= XHCI_BROKEN_MSI;
+	xhci->quirks |= XHCI_PLAT;
 }
 
 /* called during probe() after chip reset completes */
@@ -179,6 +179,7 @@
 
 	usb_remove_hcd(hcd);
 	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 	usb_put_hcd(hcd);
 	kfree(xhci);
 
@@ -194,12 +195,23 @@
 };
 MODULE_ALIAS("platform:xhci-hcd");
 
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+#include "../dwc3/dwc3-host-intel.c"
+#endif
+
 int xhci_register_plat(void)
 {
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+	return platform_driver_register(&dwc3_xhci_driver);
+#endif
 	return platform_driver_register(&usb_xhci_driver);
 }
 
 void xhci_unregister_plat(void)
 {
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+	platform_driver_unregister(&dwc3_xhci_driver);
+	return;
+#endif
 	platform_driver_unregister(&usb_xhci_driver);
 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1969c00..4bb4f9d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -66,6 +66,8 @@
 
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <acpi/acpi.h>
+#include "../../acpi/acpica/achware.h"
 #include "xhci.h"
 
 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
@@ -122,6 +124,16 @@
 	return TRB_TYPE_LINK_LE32(link->control);
 }
 
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+	/* Enqueue pointer can be left pointing to the link TRB,
+	 * we must handle that
+	 */
+	if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+		return ring->enq_seg->next->trbs;
+	return ring->enqueue;
+}
+
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
  * effect the ring dequeue or enqueue pointers.
@@ -280,8 +292,11 @@
 /* Ring the host controller doorbell after placing a command on the ring */
 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 {
-	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
+	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
+		xhci_err(xhci, "xhci->cmd_ring_state(0x%x) not equals to RUNNING\n",
+				xhci->cmd_ring_state);
 		return;
+	}
 
 	xhci_dbg(xhci, "// Ding dong!\n");
 	xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
@@ -388,6 +403,10 @@
 			spin_unlock_irqrestore(&xhci->lock, flags);
 			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
 			xhci_dbg(xhci, "xHCI host controller is dead.\n");
+			if (xhci->quirks | XHCI_RESET && xhci->reset_hcd_work) {
+				xhci_dbg(xhci, "Trying to reset xHCI host controller.\n");
+				schedule_work(xhci->reset_hcd_work);
+			}
 			return retval;
 		}
 	}
@@ -434,7 +453,7 @@
 
 	/* A ring has pending URBs if its TD list is not empty */
 	if (!(ep->ep_state & EP_HAS_STREAMS)) {
-		if (!(list_empty(&ep->ring->td_list)))
+		if (ep->ring && !(list_empty(&ep->ring->td_list)))
 			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
 		return;
 	}
@@ -545,6 +564,7 @@
 	struct xhci_generic_trb *trb;
 	struct xhci_ep_ctx *ep_ctx;
 	dma_addr_t addr;
+	u64 hw_dequeue;
 
 	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
 			ep_index, stream_id);
@@ -554,52 +574,54 @@
 				stream_id);
 		return;
 	}
-	state->new_cycle_state = 0;
-	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
-	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
-			dev->eps[ep_index].stopped_trb,
-			&state->new_cycle_state);
-	if (!state->new_deq_seg) {
-		WARN_ON(1);
-		return;
-	}
 
 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
 	xhci_dbg(xhci, "Finding endpoint context\n");
 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
-	state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+	hw_dequeue = le64_to_cpu(ep_ctx->deq);
+
+	/* Find virtual address and segment of hardware dequeue pointer */
+	state->new_deq_seg = ep_ring->deq_seg;
+	state->new_deq_ptr = ep_ring->dequeue;
+	while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+			!= (dma_addr_t)(hw_dequeue & ~0xf)) {
+		next_trb(xhci, ep_ring, &state->new_deq_seg,
+					&state->new_deq_ptr);
+		if (state->new_deq_ptr == ep_ring->dequeue) {
+			WARN_ON(1);
+			return;
+		}
+	}
+	/*
+	 * Find cycle state for last_trb, starting at old cycle state of
+	 * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+	 * return immediately and cannot toggle the cycle state if this search
+	 * wraps around, so add one more toggle manually in that case.
+	 */
+	state->new_cycle_state = hw_dequeue & 0x1;
+	if (ep_ring->first_seg == ep_ring->first_seg->next &&
+			cur_td->last_trb < state->new_deq_ptr)
+		state->new_cycle_state ^= 0x1;
 
 	state->new_deq_ptr = cur_td->last_trb;
 	xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
 	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
-			state->new_deq_ptr,
-			&state->new_cycle_state);
+			state->new_deq_ptr, &state->new_cycle_state);
 	if (!state->new_deq_seg) {
 		WARN_ON(1);
 		return;
 	}
 
+	/* Increment to find next TRB after last_trb. Cycle if appropriate. */
 	trb = &state->new_deq_ptr->generic;
 	if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
 	    (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
 		state->new_cycle_state ^= 0x1;
 	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
-	/*
-	 * If there is only one segment in a ring, find_trb_seg()'s while loop
-	 * will not run, and it will return before it has a chance to see if it
-	 * needs to toggle the cycle bit.  It can't tell if the stalled transfer
-	 * ended just before the link TRB on a one-segment ring, or if the TD
-	 * wrapped around the top of the ring, because it doesn't have the TD in
-	 * question.  Look for the one-segment case where stalled TRB's address
-	 * is greater than the new dequeue pointer address.
-	 */
-	if (ep_ring->first_seg == ep_ring->first_seg->next &&
-			state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
-		state->new_cycle_state ^= 0x1;
+	/* Don't update the ring cycle state for the producer (us). */
 	xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
 
-	/* Don't update the ring cycle state for the producer (us). */
 	xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
 			state->new_deq_seg);
 	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
@@ -776,12 +798,16 @@
 	memset(&deq_state, 0, sizeof(deq_state));
 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+	if (!xhci->devs[slot_id]) {
+		xhci_warn(xhci, "Stop endpoint command completion for "
+				"disabled slot\n");
+		return;
+	}
 	ep = &xhci->devs[slot_id]->eps[ep_index];
 
 	if (list_empty(&ep->cancelled_td_list)) {
 		xhci_stop_watchdog_timer_in_irq(xhci, ep);
 		ep->stopped_td = NULL;
-		ep->stopped_trb = NULL;
 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 		return;
 	}
@@ -847,8 +873,10 @@
 		/* Otherwise ring the doorbell(s) to restart queued transfers */
 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 	}
-	ep->stopped_td = NULL;
-	ep->stopped_trb = NULL;
+
+	/* Clear stopped_td if endpoint is not halted */
+	if (!(ep->ep_state & EP_HALTED))
+		ep->stopped_td = NULL;
 
 	/*
 	 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -989,6 +1017,10 @@
 	xhci_dbg(xhci, "Calling usb_hc_died()\n");
 	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
 	xhci_dbg(xhci, "xHCI host controller is dead.\n");
+	if (xhci->quirks | XHCI_RESET && xhci->reset_hcd_work) {
+		xhci_dbg(xhci, "Trying to reset xHCI host controller.\n");
+		schedule_work(xhci->reset_hcd_work);
+	}
 }
 
 
@@ -1063,6 +1095,12 @@
 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
 	dev = xhci->devs[slot_id];
 
+	if (!dev) {
+		xhci_warn(xhci, "WARN Set TR deq ptr command for "
+				"disabled slot\n");
+		return;
+	}
+
 	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
 	if (!ep_ring) {
 		xhci_warn(xhci, "WARN Set TR deq ptr command for "
@@ -1144,9 +1182,16 @@
 {
 	int slot_id;
 	unsigned int ep_index;
+	struct xhci_virt_device *dev;
 
 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+	dev = xhci->devs[slot_id];
+	if (!dev) {
+		xhci_warn(xhci, "WARN reset ep command for "
+				"disabled slot\n");
+		return;
+	}
 	/* This command will only fail if the endpoint wasn't halted,
 	 * but we don't care.
 	 */
@@ -1390,6 +1435,12 @@
 			inc_deq(xhci, xhci->cmd_ring);
 			return;
 		}
+		/* There is no command to handle if we get a stop event when the
+		 * command ring is empty, event->cmd_trb points to the next
+		 * unset command
+		 */
+		if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+			return;
 	}
 
 	switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1399,7 +1450,7 @@
 			xhci->slot_id = slot_id;
 		else
 			xhci->slot_id = 0;
-		complete(&xhci->addr_dev);
+		complete(&xhci->enable_slot);
 		break;
 	case TRB_TYPE(TRB_DISABLE_SLOT):
 		if (xhci->devs[slot_id]) {
@@ -1412,6 +1463,11 @@
 		break;
 	case TRB_TYPE(TRB_CONFIG_EP):
 		virt_dev = xhci->devs[slot_id];
+		if (!virt_dev) {
+			xhci_warn(xhci, "TRB_CONFIG_EP cmd completion "
+					"for disabled slot\n");
+			break;
+		}
 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
 			break;
 		/*
@@ -1457,13 +1513,25 @@
 		break;
 	case TRB_TYPE(TRB_EVAL_CONTEXT):
 		virt_dev = xhci->devs[slot_id];
+		if (!virt_dev) {
+			xhci_warn(xhci, "TRB_EVAL_CONTEXT cmd completion "
+					"for disabled slot\n");
+			break;
+		}
 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
 			break;
 		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
 		complete(&xhci->devs[slot_id]->cmd_completion);
 		break;
 	case TRB_TYPE(TRB_ADDR_DEV):
-		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
+		virt_dev = xhci->devs[slot_id];
+		if (!virt_dev) {
+			xhci_warn(xhci, "TRB_ADDR_DEV cmd completion "
+					"for disabled slot\n");
+			break;
+		}
+		virt_dev->cmd_status =
+			GET_COMP_CODE(le32_to_cpu(event->status));
 		complete(&xhci->addr_dev);
 		break;
 	case TRB_TYPE(TRB_STOP_RING):
@@ -1650,6 +1718,16 @@
 		usb_hcd_resume_root_hub(hcd);
 	}
 
+	/* Some xHC will generate PLC event during compliance test.
+	 * To avoid break compliance test, ignore this interrupt. */
+	if ((xhci->quirks & XHCI_COMP_PLC_QUIRK) &&
+		usb_quirk_ignore_comp_plc(&xhci->op_regs->port_status_base,
+				HCS_MAX_PORTS(xhci->hcs_params1))) {
+		xhci_test_and_clear_bit(xhci, port_array,
+				faked_port_index, PORT_PLC);
+		return;
+	}
+
 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
 
@@ -1659,6 +1737,13 @@
 			goto cleanup;
 		}
 
+#ifdef CONFIG_PM_RUNTIME
+		/* add 5s time-out wakelock for delay system suspend */
+		wake_lock_timeout(&hcd->wake_lock, 5 * HZ);
+		xhci_dbg(xhci,
+			"%s add 5s wake_lock for port connect change\n",
+			__func__);
+#endif
 		if (DEV_SUPERSPEED(temp)) {
 			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
 			/* Set a flag to say the port signaled remote wakeup,
@@ -1806,14 +1891,12 @@
 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 	ep->ep_state |= EP_HALTED;
 	ep->stopped_td = td;
-	ep->stopped_trb = event_trb;
 	ep->stopped_stream = stream_id;
 
 	xhci_queue_reset_ep(xhci, slot_id, ep_index);
 	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
 
 	ep->stopped_td = NULL;
-	ep->stopped_trb = NULL;
 	ep->stopped_stream = 0;
 
 	xhci_ring_cmd_db(xhci);
@@ -1895,7 +1978,6 @@
 		 * the ring dequeue pointer or take this TD off any lists yet.
 		 */
 		ep->stopped_td = td;
-		ep->stopped_trb = event_trb;
 		return 0;
 	} else {
 		if (trb_comp_code == COMP_STALL) {
@@ -1907,7 +1989,6 @@
 			 * USB class driver clear the stall later.
 			 */
 			ep->stopped_td = td;
-			ep->stopped_trb = event_trb;
 			ep->stopped_stream = ep_ring->stream_id;
 		} else if (xhci_requires_manual_halt_cleanup(xhci,
 					ep_ctx, trb_comp_code)) {
@@ -2255,8 +2336,11 @@
 					*status = 0;
 			}
 		} else {
-			td->urb->actual_length =
-				td->urb->transfer_buffer_length;
+			if (trb_comp_code != COMP_STOP_INVAL)
+				td->urb->actual_length =
+					td->urb->transfer_buffer_length;
+			else
+				td->urb->actual_length = 0;
 			/* Ignore a short packet completion if the
 			 * untransferred length was zero.
 			 */
@@ -2799,6 +2883,51 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_ACPI
+irqreturn_t xhci_byt_pm_irq(int irq, struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	u32			gpe_sts;
+	u32			gpe_en;
+	u32			pme_sts;
+
+	/* PME status from PMC side for XHCI */
+	pme_sts = readl(xhci->pmc_base_addr + 0xc0);
+
+	/* GPE_PME status from ACPI register */
+	acpi_hw_register_read(0xf1, &gpe_sts);
+
+	xhci_dbg(xhci, "xhci_pm_irq: pmc_pme_sts = 0x%x, gpe_sts = 0x%x\n",
+			pme_sts, gpe_sts);
+
+	/* 0x2000(bit 13) is PME_B0_STS for XHCI */
+	if (gpe_sts & 0x2000) {
+		if (work_busy(&xhci->pm_check)) {
+			xhci_dbg(xhci, "pm_check work busy\n");
+			return IRQ_HANDLED;
+		}
+
+		/* clear PME_B0 bit in GPE0_EN(0xf2) to disable interrupt */
+		acpi_hw_register_read(0xf2, &gpe_en);
+		gpe_en = gpe_en & (~0x2000);
+		acpi_hw_register_write(0xf2, gpe_en);
+		xhci_dbg(xhci, "clear GPE_EN\n");
+
+		spin_lock(&xhci->lock);
+		if (!xhci->pm_check_flag) {
+			xhci_dbg(xhci, "schedule work to handle PME\n");
+			xhci->pm_check_flag = 1;
+			schedule_work(&xhci->pm_check);
+		}
+		spin_unlock(&xhci->lock);
+
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+#endif
+
 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
 {
 	return xhci_irq(hcd);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d8f640b..5375410 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -156,6 +156,14 @@
 	u32 state;
 	int ret, i;
 
+	/* If any ports under compliance test, then giveup to reset host */
+	if ((xhci->quirks & XHCI_COMP_PLC_QUIRK) &&
+			usb_quirk_ignore_comp_plc(&xhci->op_regs->port_status_base,
+				HCS_MAX_PORTS(xhci->hcs_params1))) {
+		xhci_dbg(xhci, "xHC under compliance testing, aborting reset\n");
+		return 0;
+	}
+
 	state = xhci_readl(xhci, &xhci->op_regs->status);
 	if ((state & STS_HALT) == 0) {
 		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
@@ -317,6 +325,9 @@
 
 	xhci_free_irq(xhci);
 
+	if (xhci->quirks & XHCI_PLAT)
+		return;
+
 	if (xhci->msix_entries) {
 		pci_disable_msix(pdev);
 		kfree(xhci->msix_entries);
@@ -342,9 +353,14 @@
 static int xhci_try_enable_msi(struct usb_hcd *hcd)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	struct pci_dev  *pdev;
 	int ret;
 
+	/* The xhci platform device has set up IRQs through usb_add_hcd. */
+	if (xhci->quirks & XHCI_PLAT)
+		return 0;
+
+	pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
 	/*
 	 * Some Fresco Logic host controllers advertise MSI, but fail to
 	 * generate interrupts.  Don't even try to enable MSI.
@@ -522,6 +538,10 @@
 		compliance_mode_recovery_timer_init(xhci);
 	}
 
+	/* initialize port reset environment */
+	if (xhci->quirks & XHCI_PORT_RESET)
+		quirk_intel_xhci_pr_init(true);
+
 	return retval;
 }
 
@@ -535,8 +555,12 @@
 	int temp;
 	u64 temp_64;
 	struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	int i, j;
 
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		return;
+
 	xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
 
 	spin_lock_irqsave(&xhci->lock, flags);
@@ -739,6 +763,9 @@
 				__func__);
 	}
 
+	if (xhci->quirks & XHCI_PORT_RESET)
+		quirk_intel_xhci_pr_init(false);
+
 	if (xhci->quirks & XHCI_AMD_PLL_FIX)
 		usb_amd_dev_put();
 
@@ -884,7 +911,13 @@
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	u32			command;
 
-	if (hcd->state != HC_STATE_SUSPENDED ||
+	/* need to check if xhci->shared_hcd is null to fix one possible
+	 * kernel panic when boot up and xhci not allocate xhci->shared_hcd
+	 * yet, due to autosuspend_delay set to 0 in kernel3.10,after usb1
+	 * finish the probe,xhci runtime suspend will trigger immediately,
+	 * but xhci->shared_hcd may not initialized yet.
+	 */
+	if (hcd->state != HC_STATE_SUSPENDED || !xhci->shared_hcd ||
 			xhci->shared_hcd->state != HC_STATE_SUSPENDED)
 		return -EINVAL;
 
@@ -1171,9 +1204,6 @@
 	}
 
 	xhci = hcd_to_xhci(hcd);
-	if (xhci->xhc_state & XHCI_STATE_HALTED)
-		return -ENODEV;
-
 	if (check_virt_dev) {
 		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
 			printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1189,6 +1219,9 @@
 		}
 	}
 
+	if (xhci->xhc_state & XHCI_STATE_HALTED)
+		return -ENODEV;
+
 	return 1;
 }
 
@@ -1475,6 +1508,14 @@
 	struct xhci_virt_ep *ep;
 
 	xhci = hcd_to_xhci(hcd);
+	/* Add a 1 ms delay before the stopping of the endpoint.
+	 * This is a workaround to avoid the xHCI controller returning
+	 * event stopped - length invalid when dequeueing a link TRB.
+	 */
+	if (in_interrupt() || irqs_disabled())
+		udelay(1000);
+	else
+		usleep_range(1000, 1001);
 	spin_lock_irqsave(&xhci->lock, flags);
 	/* Make sure the URB hasn't completed or been unlinked already */
 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2587,15 +2628,7 @@
 	if (command) {
 		cmd_completion = command->completion;
 		cmd_status = &command->status;
-		command->command_trb = xhci->cmd_ring->enqueue;
-
-		/* Enqueue pointer can be left pointing to the link TRB,
-		 * we must handle that
-		 */
-		if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
-			command->command_trb =
-				xhci->cmd_ring->enq_seg->next->trbs;
-
+		command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 		list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
 	} else {
 		cmd_completion = &virt_dev->cmd_completion;
@@ -2603,7 +2636,7 @@
 	}
 	init_completion(cmd_completion);
 
-	cmd_trb = xhci->cmd_ring->dequeue;
+	cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 	if (!ctx_change)
 		ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
 				udev->slot_id, must_succeed);
@@ -2632,10 +2665,21 @@
 				ctx_change == 0 ?
 					"configure endpoint" :
 					"evaluate context");
-		/* cancel the configure endpoint command */
-		ret = xhci_cancel_cmd(xhci, command, cmd_trb);
-		if (ret < 0)
-			return ret;
+		/* When configure ep command met timeout, the Synopsys OTG3 was
+		 * already messed. Then even driver trying to stop cmd ring also
+		 * have no response.
+		 * So give up to stop cmd ring which waste another 10sec timeout
+		 * delay. Reset xHCI directly.
+		 **/
+		if (xhci->quirks | XHCI_RESET && xhci->reset_hcd_work) {
+			xhci_dbg(xhci, "Trying to reset xHCI host controller.\n");
+			schedule_work(xhci->reset_hcd_work);
+		} else {
+			/* cancel the configure endpoint command */
+			ret = xhci_cancel_cmd(xhci, command, cmd_trb);
+			if (ret < 0)
+				return ret;
+		}
 		return -ETIME;
 	}
 
@@ -2894,7 +2938,6 @@
 		xhci_ring_cmd_db(xhci);
 	}
 	virt_ep->stopped_td = NULL;
-	virt_ep->stopped_trb = NULL;
 	virt_ep->stopped_stream = 0;
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -3388,14 +3431,7 @@
 
 	/* Attempt to submit the Reset Device command to the command ring */
 	spin_lock_irqsave(&xhci->lock, flags);
-	reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
-
-	/* Enqueue pointer can be left pointing to the link TRB,
-	 * we must handle that
-	 */
-	if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
-		reset_device_cmd->command_trb =
-			xhci->cmd_ring->enq_seg->next->trbs;
+	reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 
 	list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
 	ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3411,7 +3447,7 @@
 	/* Wait for the Reset Device command to finish */
 	timeleft = wait_for_completion_interruptible_timeout(
 			reset_device_cmd->completion,
-			USB_CTRL_SET_TIMEOUT);
+			XHCI_CMD_DEFAULT_TIMEOUT);
 	if (timeleft <= 0) {
 		xhci_warn(xhci, "%s while waiting for reset device command\n",
 				timeleft == 0 ? "Timeout" : "Signal");
@@ -3510,6 +3546,17 @@
 	u32 state;
 	int i, ret;
 
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+	struct device *dev = hcd->self.controller;
+	/*
+	 * We called pm_runtime_get_noresume when the device was attached.
+	 * Decrement the counter here to allow controller to runtime suspend
+	 * if no devices remain.
+	 */
+	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+		pm_runtime_put_noidle(dev);
+#endif
+
 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
 	/* If the host is halted due to driver unload, we still need to free the
 	 * device.
@@ -3525,11 +3572,6 @@
 		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
 	}
 
-	if (udev->usb2_hw_lpm_enabled) {
-		xhci_set_usb2_hardware_lpm(hcd, udev, 0);
-		udev->usb2_hw_lpm_enabled = 0;
-	}
-
 	spin_lock_irqsave(&xhci->lock, flags);
 	/* Don't disable the slot if the host controller is dead. */
 	state = xhci_readl(xhci, &xhci->op_regs->status);
@@ -3581,13 +3623,30 @@
 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+	struct device *dev = hcd->self.controller;
+#endif
 	unsigned long flags;
 	int timeleft;
-	int ret;
+	int ret, count = 0;
 	union xhci_trb *cmd_trb;
 
+	if (xhci->xhc_state & XHCI_STATE_HALTED)
+		return -EFAULT;
+
+	/* Need to wait xhci->cmd_ring_state to be RUNNING before
+	 * issue ENABLE_SLOT command.
+	 */
+	while (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
+		if (count++ > XHCI_WAIT_CMD_RING_READY_TIMEOUT) {
+			xhci_err(xhci, "%s: cmd ring can't get ready.\n", __func__);
+			return -EFAULT;
+		}
+		msleep(200);
+	}
+
 	spin_lock_irqsave(&xhci->lock, flags);
-	cmd_trb = xhci->cmd_ring->dequeue;
+	cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 	ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
 	if (ret) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3598,7 +3657,7 @@
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
 	/* XXX: how much time for xHC slot assignment? */
-	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+	timeleft = wait_for_completion_interruptible_timeout(&xhci->enable_slot,
 			XHCI_CMD_DEFAULT_TIMEOUT);
 	if (timeleft <= 0) {
 		xhci_warn(xhci, "%s while waiting for a slot\n",
@@ -3612,6 +3671,11 @@
 		return 0;
 	}
 
+	/* set xhci->slot_id to udev->slot_id just after Enable_Slot
+	 * successful to avoid race condition
+	 */
+	udev->slot_id = xhci->slot_id;
+
 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
 		spin_lock_irqsave(&xhci->lock, flags);
 		ret = xhci_reserve_host_control_ep_resources(xhci);
@@ -3620,6 +3684,7 @@
 			xhci_warn(xhci, "Not enough host resources, "
 					"active endpoint contexts = %u\n",
 					xhci->num_active_eps);
+			udev->slot_id = 0;
 			goto disable_slot;
 		}
 		spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3628,11 +3693,22 @@
 	 * xhci_discover_or_reset_device(), which may be called as part of
 	 * mass storage driver error handling.
 	 */
-	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
+	if (!xhci_alloc_virt_device(xhci, udev->slot_id, udev, GFP_NOIO)) {
 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+		udev->slot_id = 0;
 		goto disable_slot;
 	}
-	udev->slot_id = xhci->slot_id;
+
+
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+	/*
+	 * If resetting upon resume, we can't put the controller into runtime
+	 * suspend if there is a device attached.
+	 */
+	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+		pm_runtime_get_noresume(dev);
+#endif
+
 	/* Is this a LS or FS device under a HS hub? */
 	/* Hub or peripherial? */
 	return 1;
@@ -3704,7 +3780,7 @@
 	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
 
 	spin_lock_irqsave(&xhci->lock, flags);
-	cmd_trb = xhci->cmd_ring->dequeue;
+	cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
 					udev->slot_id);
 	if (ret) {
@@ -3725,6 +3801,13 @@
 	if (timeleft <= 0) {
 		xhci_warn(xhci, "%s while waiting for address device command\n",
 				timeleft == 0 ? "Timeout" : "Signal");
+		xhci_dbg(xhci, "xhci registers:\n");
+		xhci_print_registers(xhci);
+		xhci_dbg(xhci, "Command ring:\n");
+		xhci_debug_ring(xhci, xhci->cmd_ring);
+		xhci_dbg(xhci, "Event ring:\n");
+		xhci_debug_ring(xhci, xhci->event_ring);
+
 		/* cancel the address device command */
 		ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
 		if (ret < 0)
@@ -3760,6 +3843,14 @@
 		break;
 	}
 	if (ret) {
+		/* When this issue reproduce. The controller was already very
+		 * unstable. And easy to met fabric error. So do soft-reset for
+		 * the controller.
+		 */
+		if (xhci->quirks | XHCI_RESET && xhci->reset_hcd_work) {
+			xhci_dbg(xhci, "Trying to reset xHCI host controller.\n");
+			schedule_work(xhci->reset_hcd_work);
+		}
 		return ret;
 	}
 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
@@ -3815,6 +3906,56 @@
 	return raw_port;
 }
 
+/*
+ * Issue an Evaluate Context command to change the Maximum Exit Latency in the
+ * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
+ */
+static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+			struct usb_device *udev, u16 max_exit_latency)
+{
+	struct xhci_virt_device *virt_dev;
+	struct xhci_command *command;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return 0;
+	}
+
+	/* Attempt to issue an Evaluate Context command to change the MEL. */
+	virt_dev = xhci->devs[udev->slot_id];
+	command = xhci->lpm_command;
+	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
+	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
+	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
+
+	xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
+	xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
+	xhci_dbg_ctx(xhci, command->in_ctx, 0);
+
+	/* Issue and wait for the evaluate context command. */
+	ret = xhci_configure_endpoint(xhci, udev, command,
+			true, true);
+	xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
+	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
+
+	if (!ret) {
+		spin_lock_irqsave(&xhci->lock, flags);
+		virt_dev->current_mel = max_exit_latency;
+		spin_unlock_irqrestore(&xhci->lock, flags);
+	}
+	return ret;
+}
+
 #ifdef CONFIG_PM_RUNTIME
 
 /* BESL to HIRD Encoding array for USB2 LPM */
@@ -3855,7 +3996,27 @@
 
 	return besl;
 }
+/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
+static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
+{
+	u32 field;
+	int l1;
+	int besld = 0;
+	int hirdm = 0;
 
+	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+	/* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
+	l1 = udev->l1_params.timeout / 256;
+
+	/* device has preferred BESLD */
+	if (field & USB_BESL_DEEP_VALID) {
+		besld = USB_GET_BESL_DEEP(field);
+		hirdm = 1;
+	}
+
+	return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
+}
 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
 					struct usb_device *udev)
 {
@@ -3911,7 +4072,7 @@
 	 * Check device's USB 2.0 extension descriptor to determine whether
 	 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
 	 */
-	pm_addr = port_array[port_num] + 1;
+	pm_addr = port_array[port_num] + PORTPMSC;
 	hird = xhci_calculate_hird_besl(xhci, udev);
 	temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
 	xhci_writel(xhci, temp, pm_addr);
@@ -3988,11 +4149,12 @@
 {
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
 	__le32 __iomem	**port_array;
-	__le32 __iomem	*pm_addr;
-	u32		temp;
+	__le32 __iomem	*pm_addr, *hlpm_addr;
+	u32		pm_val, hlpm_val, field;
 	unsigned int	port_num;
 	unsigned long	flags;
-	int		hird;
+	int		hird, exit_latency;
+	int		ret;
 
 	if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
 			!udev->lpm_capable)
@@ -4009,40 +4171,125 @@
 
 	port_array = xhci->usb2_ports;
 	port_num = udev->portnum - 1;
-	pm_addr = port_array[port_num] + 1;
-	temp = xhci_readl(xhci, pm_addr);
+	pm_addr = port_array[port_num] + PORTPMSC;
+	pm_val = xhci_readl(xhci, pm_addr);
+	hlpm_addr = port_array[port_num] + PORTHLPMC;
+	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
 
 	xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
 			enable ? "enable" : "disable", port_num);
 
-	hird = xhci_calculate_hird_besl(xhci, udev);
-
 	if (enable) {
-		temp &= ~PORT_HIRD_MASK;
-		temp |= PORT_HIRD(hird) | PORT_RWE;
-		xhci_writel(xhci, temp, pm_addr);
-		temp = xhci_readl(xhci, pm_addr);
-		temp |= PORT_HLE;
-		xhci_writel(xhci, temp, pm_addr);
+		/* Host supports BESL timeout instead of HIRD */
+		if (udev->usb2_hw_lpm_besl_capable) {
+			/* if device doesn't have a preferred BESL value use a
+			 * default one which works with mixed HIRD and BESL
+			 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
+			 */
+			if ((field & USB_BESL_SUPPORT) &&
+			    (field & USB_BESL_BASELINE_VALID))
+				hird = USB_GET_BESL_BASELINE(field);
+			else
+				hird = udev->l1_params.besl;
+
+			exit_latency = xhci_besl_encoding[hird];
+			spin_unlock_irqrestore(&xhci->lock, flags);
+
+			/* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
+			 * input context for link powermanagement evaluate
+			 * context commands. It is protected by hcd->bandwidth
+			 * mutex and is shared by all devices. We need to set
+			 * the max ext latency in USB 2 BESL LPM as well, so
+			 * use the same mutex and xhci_change_max_exit_latency()
+			 */
+			mutex_lock(hcd->bandwidth_mutex);
+			ret = xhci_change_max_exit_latency(xhci, udev,
+							   exit_latency);
+			mutex_unlock(hcd->bandwidth_mutex);
+
+			if (ret < 0)
+				return ret;
+			spin_lock_irqsave(&xhci->lock, flags);
+
+			hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
+			xhci_writel(xhci, hlpm_val, hlpm_addr);
+			/* flush write */
+			xhci_readl(xhci, hlpm_addr);
+		} else
+			hird = xhci_calculate_hird_besl(xhci, udev);
+
+		pm_val &= ~PORT_HIRD_MASK;
+		pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
+		xhci_writel(xhci, pm_val, pm_addr);
+		pm_val = xhci_readl(xhci, pm_addr);
+		pm_val |= PORT_HLE;
+		xhci_writel(xhci, pm_val, pm_addr);
+		/* flush write */
+		xhci_readl(xhci, pm_addr);
 	} else {
-		temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
-		xhci_writel(xhci, temp, pm_addr);
+		pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK
+				| PORT_L1DS_MASK);
+		xhci_writel(xhci, pm_val, pm_addr);
+		/* flush write */
+		xhci_readl(xhci, pm_addr);
+		if (udev->usb2_hw_lpm_besl_capable) {
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			mutex_lock(hcd->bandwidth_mutex);
+			xhci_change_max_exit_latency(xhci, udev, 0);
+			mutex_unlock(hcd->bandwidth_mutex);
+			return 0;
+		}
 	}
 
 	spin_unlock_irqrestore(&xhci->lock, flags);
 	return 0;
 }
 
+/* check if a usb2 port supports a given extened capability protocol
+ * only USB2 ports extended protocol capability values are cached.
+ * Return 1 if capability is supported
+ */
+static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
+					   unsigned capability)
+{
+	u32 port_offset, port_count;
+	int i;
+
+	for (i = 0; i < xhci->num_ext_caps; i++) {
+		if (xhci->ext_caps[i] & capability) {
+			/* port offsets starts at 1 */
+			port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
+			port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
+			if (port >= port_offset &&
+				port < port_offset + port_count)
+					return 1;
+		}
+	}
+	return 0;
+}
+
 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
 {
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
 	int		ret;
+	int		portnum = udev->portnum - 1;
+
+	/* Just return if XHCI_LPM_DISABLE_QUIRK set */
+	if (xhci->quirks & XHCI_LPM_DISABLE_QUIRK)
+		return 0;
 
 	ret = xhci_usb2_software_lpm_test(hcd, udev);
 	if (!ret) {
 		xhci_dbg(xhci, "software LPM test succeed\n");
-		if (xhci->hw_lpm_support == 1) {
+		if (xhci->hw_lpm_support == 1 &&
+				xhci_check_usb2_port_capability(xhci,
+				portnum, XHCI_HLC)) {
 			udev->usb2_hw_lpm_capable = 1;
+			udev->l1_params.timeout = XHCI_L1_TIMEOUT;
+			udev->l1_params.besl = XHCI_DEFAULT_BESL;
+			if (xhci_check_usb2_port_capability(xhci, portnum,
+						XHCI_BLC))
+				udev->usb2_hw_lpm_besl_capable = 1;
 			ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
 			if (!ret)
 				udev->usb2_hw_lpm_enabled = 1;
@@ -4373,56 +4620,6 @@
 	return timeout;
 }
 
-/*
- * Issue an Evaluate Context command to change the Maximum Exit Latency in the
- * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
- */
-static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
-			struct usb_device *udev, u16 max_exit_latency)
-{
-	struct xhci_virt_device *virt_dev;
-	struct xhci_command *command;
-	struct xhci_input_control_ctx *ctrl_ctx;
-	struct xhci_slot_ctx *slot_ctx;
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&xhci->lock, flags);
-	if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
-		spin_unlock_irqrestore(&xhci->lock, flags);
-		return 0;
-	}
-
-	/* Attempt to issue an Evaluate Context command to change the MEL. */
-	virt_dev = xhci->devs[udev->slot_id];
-	command = xhci->lpm_command;
-	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
-	spin_unlock_irqrestore(&xhci->lock, flags);
-
-	ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
-	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
-	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
-	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
-	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
-
-	xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
-	xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
-	xhci_dbg_ctx(xhci, command->in_ctx, 0);
-
-	/* Issue and wait for the evaluate context command. */
-	ret = xhci_configure_endpoint(xhci, udev, command,
-			true, true);
-	xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
-	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
-
-	if (!ret) {
-		spin_lock_irqsave(&xhci->lock, flags);
-		virt_dev->current_mel = max_exit_latency;
-		spin_unlock_irqrestore(&xhci->lock, flags);
-	}
-	return ret;
-}
-
 static int calculate_max_exit_latency(struct usb_device *udev,
 		enum usb3_link_state state_changed,
 		u16 hub_encoded_timeout)
@@ -4697,6 +4894,13 @@
 
 	get_quirks(dev, xhci);
 
+	/* In xhci controllers which follow xhci 1.0 spec gives a spurious
+	 * success event after a short transfer. This quirk will ignore such
+	 * spurious event.
+	 */
+	if (xhci->hci_version > 0x96)
+		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
 	/* Make sure the HC is halted. */
 	retval = xhci_halt(xhci);
 	if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 77600ce..05fc32e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -132,6 +132,11 @@
 /* Number of registers per port */
 #define	NUM_PORT_REGS	4
 
+#define PORTSC		0
+#define PORTPMSC	1
+#define PORTLI		2
+#define PORTHLPMC	3
+
 /**
  * struct xhci_op_regs - xHCI Host Controller Operational Registers.
  * @command:		USBCMD - xHC command register
@@ -278,6 +283,8 @@
 #define XDEV_U0		(0x0 << 5)
 #define XDEV_U2		(0x2 << 5)
 #define XDEV_U3		(0x3 << 5)
+#define XDEV_COMP	(0xa << 5)
+#define XDEV_LOOPBACK	(0xb << 5)
 #define XDEV_RESUME	(0xf << 5)
 /* true: port has power (see HCC_PPC) */
 #define PORT_POWER	(1 << 9)
@@ -378,9 +385,30 @@
 #define	PORT_RWE		(1 << 3)
 #define	PORT_HIRD(p)		(((p) & 0xf) << 4)
 #define	PORT_HIRD_MASK		(0xf << 4)
+#define	PORT_L1DS_MASK		(0xff << 8)
 #define	PORT_L1DS(p)		(((p) & 0xff) << 8)
 #define	PORT_HLE		(1 << 16)
 
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT		512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL	4
+
 /**
  * struct xhci_intr_reg - Interrupt Register Set
  * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
@@ -837,8 +865,6 @@
 #define EP_GETTING_NO_STREAMS	(1 << 5)
 	/* ----  Related to URB cancellation ---- */
 	struct list_head	cancelled_td_list;
-	/* The TRB that was last reported in a stopped endpoint ring */
-	union xhci_trb		*stopped_trb;
 	struct xhci_td		*stopped_td;
 	unsigned int		stopped_stream;
 	/* Watchdog timer for stop endpoint command to cancel URBs */
@@ -1262,6 +1288,7 @@
 
 /* xHCI command default timeout value */
 #define XHCI_CMD_DEFAULT_TIMEOUT	(5 * HZ)
+#define XHCI_WAIT_CMD_RING_READY_TIMEOUT 10 /* 2s*/
 
 /* command descriptor */
 struct xhci_cd {
@@ -1448,7 +1475,9 @@
 	/* Store LPM test failed devices' information */
 	struct list_head	lpm_failed_devs;
 
-	/* slot enabling and address device helpers */
+	/* slot enabling helpers */
+	struct completion	enable_slot;
+	/* address device helpers */
 	struct completion	addr_dev;
 	int slot_id;
 	/* For USB 3.0 LPM enable/disable. */
@@ -1516,6 +1545,12 @@
 #define XHCI_SPURIOUS_REBOOT	(1 << 13)
 #define XHCI_COMP_MODE_QUIRK	(1 << 14)
 #define XHCI_AVOID_BEI		(1 << 15)
+#define XHCI_PLAT		(1 << 16)
+#define XHCI_PORT_DISABLE_QUIRK	(1 << 17)
+#define XHCI_LPM_DISABLE_QUIRK	(1 << 18)
+#define XHCI_COMP_PLC_QUIRK		(1 << 19)
+#define XHCI_RESET		(1 << 20)
+#define XHCI_PORT_RESET		(1 << 21)
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
 	/* There are two roothubs to keep track of bus suspend info for */
@@ -1532,11 +1567,18 @@
 	unsigned		sw_lpm_support:1;
 	/* support xHCI 1.0 spec USB2 hardware LPM */
 	unsigned		hw_lpm_support:1;
+	/* cached usb2 extened protocol capabilites */
+	u32			*ext_caps;
+	unsigned int		num_ext_caps;
 	/* Compliance Mode Recovery Data */
 	struct timer_list	comp_mode_recovery_timer;
 	u32			port_status_u0;
 /* Compliance Mode Timer Triggered every 2 seconds */
 #define COMP_MODE_RCVRY_MSECS 2000
+	struct work_struct	pm_check;
+	int			pm_check_flag;
+	void __iomem		*pmc_base_addr;
+	struct work_struct	*reset_hcd_work;
 };
 
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
@@ -1746,6 +1788,7 @@
 int xhci_get_frame(struct usb_hcd *hcd);
 irqreturn_t xhci_irq(struct usb_hcd *hcd);
 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
+irqreturn_t xhci_byt_pm_irq(int irq, struct usb_hcd *hcd);
 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1820,6 +1863,7 @@
 		union xhci_trb *cmd_trb);
 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
 		unsigned int ep_index, unsigned int stream_id);
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
 
 /* xHCI roothub code */
 void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index a51e7d6..e6b4a65 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -235,3 +235,14 @@
        depends on I2C
        help
          This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
+
+config USB_TEST_MODE
+	tristate "USB Test Mode support"
+	depends on USB
+	help
+	  Say Y here if you want to build USB test mode driver in kernel.
+	  This driver implements test mode initiated by test device with
+	  VID 0x1A0A.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called usb_tm.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 3e1bd70..9fb82ea 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -27,5 +27,6 @@
 obj-$(CONFIG_USB_SEVSEG)		+= usbsevseg.o
 obj-$(CONFIG_USB_YUREX)			+= yurex.o
 obj-$(CONFIG_USB_HSIC_USB3503)		+= usb3503.o
+obj-$(CONFIG_USB_TEST_MODE)		+= usb2_test_mode.o
 
 obj-$(CONFIG_USB_SISUSBVGA)		+= sisusbvga/
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c21386e..de98906 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3247,6 +3247,7 @@
 	{ USB_DEVICE(0x0711, 0x0903) },
 	{ USB_DEVICE(0x0711, 0x0918) },
 	{ USB_DEVICE(0x0711, 0x0920) },
+	{ USB_DEVICE(0x0711, 0x0950) },
 	{ USB_DEVICE(0x182d, 0x021c) },
 	{ USB_DEVICE(0x182d, 0x0269) },
 	{ }
diff --git a/drivers/usb/misc/usb2_test_mode.c b/drivers/usb/misc/usb2_test_mode.c
new file mode 100644
index 0000000..ddfcadc
--- /dev/null
+++ b/drivers/usb/misc/usb2_test_mode.c
@@ -0,0 +1,191 @@
+/*
+ * USB2 Test Mode driver
+ * Copyright (C) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/* This driver supports USB Host Test Mode initiated by test device with
+ * VID 0x1a0a for USB controller in high-speed.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "../core/usb.h"
+
+struct usb_tm_dev {
+	struct usb_device	*udev;
+	struct usb_hcd		*hcd;
+
+#define TBUF_SIZE	256
+	u8			*buf;
+};
+
+static int
+usb_tm_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+	struct usb_tm_dev	*dev;
+	int retval, port_num;
+
+	dev_dbg(&intf->dev, "USB test mode is initiated.\n");
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
+	if (!dev->buf) {
+		kfree(dev);
+		return -ENOMEM;
+	}
+
+	dev->udev = usb_get_dev(interface_to_usbdev(intf));
+	dev->hcd = usb_get_hcd(bus_to_hcd(dev->udev->bus));
+	usb_set_intfdata(intf, dev);
+	port_num = dev->udev->portnum & 0xff;
+
+	dev_dbg(&intf->dev, "test mode PID 0x%04x\n",
+		le16_to_cpu(dev->udev->descriptor.idProduct));
+	switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
+	case 0x0101:
+		/* TEST_SE0_NAK */
+		dev->hcd->driver->hub_control(dev->hcd, SetPortFeature,
+			USB_PORT_FEAT_TEST, 0x300 + port_num, NULL, 0);
+		break;
+	case 0x0102:
+		/* TEST_J */
+		dev->hcd->driver->hub_control(dev->hcd, SetPortFeature,
+			USB_PORT_FEAT_TEST, 0x100 + port_num, NULL, 0);
+		break;
+	case 0x0103:
+		/* TEST_K */
+		dev->hcd->driver->hub_control(dev->hcd, SetPortFeature,
+			USB_PORT_FEAT_TEST, 0x200 + port_num, NULL, 0);
+		break;
+	case 0x0104:
+		/* TEST_PACKET */
+		dev->hcd->driver->hub_control(dev->hcd, SetPortFeature,
+			USB_PORT_FEAT_TEST, 0x400 + port_num, NULL, 0);
+		break;
+	case 0x0106:
+		/* HS_HOST_PORT_SUSPEND_RESUME */
+		msleep(15000);
+		dev->hcd->driver->hub_control(dev->hcd, SetPortFeature,
+			USB_PORT_FEAT_SUSPEND, port_num, NULL, 0);
+		msleep(15000);
+		dev->hcd->driver->hub_control(dev->hcd, ClearPortFeature,
+			USB_PORT_FEAT_SUSPEND, port_num, NULL, 0);
+		break;
+	case 0x0107:
+		/* SINGLE_STEP_GET_DEV_DESC */
+		msleep(15000);
+		retval = usb_control_msg(dev->udev,
+				usb_rcvctrlpipe(dev->udev, 0),
+				USB_REQ_GET_DESCRIPTOR,
+				USB_DIR_IN | USB_RECIP_DEVICE,
+				cpu_to_le16(USB_DT_DEVICE << 8),
+				0, dev->buf,
+				USB_DT_DEVICE_SIZE,
+				USB_CTRL_GET_TIMEOUT);
+		break;
+	case 0x0108:
+		/* SINGLE_STEP_SET_FEATURE */
+
+		/* FIXME */
+		/* set size = 0 to ignore DATA phase */
+		retval = usb_control_msg(dev->udev,
+				usb_rcvctrlpipe(dev->udev, 0),
+				USB_REQ_GET_DESCRIPTOR,
+				USB_DIR_IN | USB_RECIP_DEVICE,
+				cpu_to_le16(USB_DT_DEVICE << 8),
+				0, dev->buf, 0,
+				USB_CTRL_GET_TIMEOUT);
+		msleep(15000);
+		retval = usb_control_msg(dev->udev,
+				usb_rcvctrlpipe(dev->udev, 0),
+				USB_REQ_GET_DESCRIPTOR,
+				USB_DIR_IN | USB_RECIP_DEVICE,
+				cpu_to_le16(USB_DT_DEVICE << 8),
+				0, dev->buf,
+				USB_DT_DEVICE_SIZE,
+				USB_CTRL_GET_TIMEOUT);
+		break;
+	default:
+		dev_info(&intf->dev, "unknown test mode with PID 0x%04x",
+			id->idProduct);
+	}
+
+	return 0;
+}
+
+static void usb_tm_disconnect(struct usb_interface *intf)
+{
+	struct usb_tm_dev	*dev = usb_get_intfdata(intf);
+
+	usb_put_hcd(dev->hcd);
+	usb_put_dev(dev->udev);
+	usb_set_intfdata(intf, NULL);
+	dev_dbg(&intf->dev, "disconnect\n");
+	kfree(dev->buf);
+	kfree(dev);
+}
+
+static const struct usb_device_id id_table[] = {
+	/* USB Test Device */
+	{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR,
+		.idVendor = 0x1A0A,
+		},
+
+	{ }
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver usb_tm_driver = {
+	.name =		"usb_tm",
+	.id_table =	id_table,
+	.probe =	usb_tm_probe,
+	.disconnect =	usb_tm_disconnect,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init usb_tm_init(void)
+{
+	int result;
+
+	result = usb_register(&usb_tm_driver);
+	if (result)
+		pr_err("usb_tm: usb_register failed. error number %d",
+			result);
+
+	return result;
+}
+module_init(usb_tm_init);
+
+static void __exit usb_tm_exit(void)
+{
+	usb_deregister(&usb_tm_driver);
+}
+module_exit(usb_tm_exit);
+
+MODULE_DESCRIPTION("USB Test Mode Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 700d572..c6849f2 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -1,6 +1,14 @@
 #
 # Physical Layer USB driver configuration
 #
+config USB_OTG_WAKELOCK
+	bool "Hold a wakelock when USB connected"
+	depends on WAKELOCK
+	select USB_OTG_UTILS
+	help
+	  Select this to automatically hold a wakelock when USB is
+	  connected, preventing suspend.
+
 menuconfig USB_PHY
 	bool "USB Physical Layer drivers"
 	help
@@ -16,14 +24,6 @@
 	  If you're not sure if this applies to you, it probably doesn't;
 	  say N here.
 
-config USB_OTG_WAKELOCK
-	bool "Hold a wakelock when USB connected"
-	depends on WAKELOCK
-	select USB_OTG_UTILS
-	help
-	  Select this to automatically hold a wakelock when USB is
-	  connected, preventing suspend.
-
 if USB_PHY
 
 #
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index c70afde..567a0e7 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -30,6 +30,7 @@
 obj-$(CONFIG_USB_MSM_OTG)		+= phy-msm-usb.o
 obj-$(CONFIG_USB_MV_OTG)		+= phy-mv-usb.o
 obj-$(CONFIG_USB_MXS_PHY)		+= phy-mxs-usb.o
+obj-$(CONFIG_USB_PENWELL_OTG)		+= penwell_otg.o
 obj-$(CONFIG_USB_RCAR_PHY)		+= phy-rcar-usb.o
 obj-$(CONFIG_USB_ULPI)			+= phy-ulpi.o
 obj-$(CONFIG_USB_ULPI_VIEWPORT)		+= phy-ulpi-viewport.o
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca26628..e1859b8 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include "otg_fsm.h"
+#include "phy-fsm-usb.h"
 #include <linux/usb/otg.h>
 #include <linux/ioctl.h>
 
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b35..7f45966 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
 #include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
 
-#include "phy-otg-fsm.h"
+#include "phy-fsm-usb.h"
 
 /* Change USB protocol when there is a protocol change */
 static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index a9984c7..d94569d 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -137,10 +137,13 @@
 
 	get_device(phy->dev);
 
-err0:
 	spin_unlock_irqrestore(&phy_lock, flags);
 
 	return phy;
+err0:
+	spin_unlock_irqrestore(&phy_lock, flags);
+
+	return NULL;
 }
 EXPORT_SYMBOL_GPL(usb_get_phy);
 
@@ -309,6 +312,88 @@
 }
 EXPORT_SYMBOL_GPL(usb_put_phy);
 
+static ssize_t
+a_bus_drop_show(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	unsigned size, len;
+	unsigned char *str;
+	struct usb_phy *_phy;
+
+	_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!_phy)
+		return -ENODEV;
+
+	switch (_phy->vbus_state) {
+	case VBUS_DISABLED:
+		str = "1\n";
+		break;
+	case VBUS_ENABLED:
+		str = "0\n";
+		break;
+	case UNKNOW_STATE:
+	default:
+		str = "unkown\n";
+		break;
+	}
+
+	size = PAGE_SIZE;
+
+	len = strlen(str);
+	strncpy(buf, str, len);
+	buf[len + 1] = '\0';
+
+	size -= len;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t a_bus_drop_store(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_phy *_phy;
+	int trigger;
+
+	_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!_phy)
+		return -ENODEV;
+
+	if (!buf || !count)
+		return -EINVAL;
+
+	if (!strncmp(buf, "1", strlen("1"))) {
+		trigger = 1;
+		_phy->vbus_state = VBUS_DISABLED;
+	} else if (!strncmp(buf, "0", strlen("0"))) {
+		_phy->vbus_state = VBUS_ENABLED;
+		trigger = 1;
+	} else
+		return -EINVAL;
+
+	if (trigger && _phy->a_bus_drop)
+		_phy->a_bus_drop(_phy);
+
+	return count;
+}
+
+static DEVICE_ATTR(a_bus_drop, S_IRUGO|S_IWUSR|S_IWGRP,
+		a_bus_drop_show, a_bus_drop_store);
+
+void otg_uevent_trigger(struct usb_phy *otg)
+{
+	char *uevent_envp[2] = { "USB_WARNING=HOST_NO_WORK", NULL };
+
+	printk(KERN_INFO"%s: send uevent USB_OTG=HOST_NO_WORK\n", __func__);
+	kobject_uevent_env(&otg->class_dev->kobj, KOBJ_CHANGE, uevent_envp);
+}
+EXPORT_SYMBOL(otg_uevent_trigger);
+
+static struct device_attribute *otg_dev_attributes[] = {
+	&dev_attr_a_bus_drop,
+	NULL,
+};
+
+
+
 /**
  * usb_add_phy - declare the USB PHY
  * @x: the USB phy to be used; or NULL
@@ -323,6 +408,8 @@
 	int		ret = 0;
 	unsigned long	flags;
 	struct usb_phy	*phy;
+	struct device_attribute **attrs = otg_dev_attributes;
+	struct device_attribute *attr;
 
 	if (x->type != USB_PHY_TYPE_UNDEFINED) {
 		dev_err(x->dev, "not accepting initialized PHY %s\n", x->label);
@@ -333,6 +420,7 @@
 
 	list_for_each_entry(phy, &phy_list, head) {
 		if (phy->type == type) {
+			spin_unlock_irqrestore(&phy_lock, flags);
 			ret = -EBUSY;
 			dev_err(x->dev, "transceiver type %s already exists\n",
 						usb_phy_type_string(type));
@@ -343,8 +431,37 @@
 	x->type = type;
 	list_add_tail(&x->head, &phy_list);
 
+	if (type == USB_PHY_TYPE_USB2) {
+		spin_unlock_irqrestore(&phy_lock, flags);
+		x->usb_otg_class = class_create(NULL, "usb_otg");
+		if (IS_ERR(x->usb_otg_class))
+			return -EFAULT;
+
+		x->class_dev = device_create(x->usb_otg_class, x->dev,
+				MKDEV(0, 0), NULL, "otg0");
+		if (IS_ERR(x->class_dev)) {
+			ret = -EFAULT;
+			goto err2;
+		}
+
+		while ((attr = *attrs++)) {
+			ret = device_create_file(x->class_dev, attr);
+			if (ret)
+				goto err1;
+		}
+
+	} else
+		spin_unlock_irqrestore(&phy_lock, flags);
+
+	goto out;
+
+err1:
+	device_destroy(x->usb_otg_class, x->class_dev->devt);
+
+err2:
+	class_destroy(x->usb_otg_class);
+
 out:
-	spin_unlock_irqrestore(&phy_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(usb_add_phy);
@@ -391,7 +508,14 @@
 	struct usb_phy_bind *phy_bind;
 
 	spin_lock_irqsave(&phy_lock, flags);
+
 	if (x) {
+		if (x->class_dev && x->class_dev->devt)
+			device_destroy(x->usb_otg_class, x->class_dev->devt);
+
+		if (x->usb_otg_class)
+			class_destroy(x->usb_otg_class);
+
 		list_for_each_entry(phy_bind, &phy_bind_list, list)
 			if (phy_bind->phy == x)
 				phy_bind->phy = NULL;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2c65955..c90d960 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -53,6 +53,7 @@
 	{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
 	{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
 	{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
+	{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
 	{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
@@ -118,6 +119,8 @@
 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
 	{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
 	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
@@ -148,6 +151,7 @@
 	{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
 	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
 	{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
 	{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7260ec6..aa3aed5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -735,9 +735,34 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
-	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
-	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
-	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
+	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
 	{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -881,6 +906,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
 	/* Crucible Devices */
 	{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6dd7925..a7019d1 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -815,11 +815,35 @@
 /*
  * RT Systems programming cables for various ham radios
  */
-#define RTSYSTEMS_VID			0x2100	/* Vendor ID */
-#define RTSYSTEMS_SERIAL_VX7_PID	0x9e52	/* Serial converter for VX-7 Radios using FT232RL */
-#define RTSYSTEMS_CT29B_PID		0x9e54	/* CT29B Radio Cable */
-#define RTSYSTEMS_RTS01_PID		0x9e57	/* USB-RTS01 Radio Cable */
-
+#define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+#define RTSYSTEMS_USB_S03_PID	0x9001	/* RTS-03 USB to Serial Adapter */
+#define RTSYSTEMS_USB_59_PID	0x9e50	/* USB-59 USB to 8 pin plug */
+#define RTSYSTEMS_USB_57A_PID	0x9e51	/* USB-57A USB to 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_57B_PID	0x9e52	/* USB-57B USB to extended 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_29A_PID	0x9e53	/* USB-29A USB to 3.5mm stereo plug */
+#define RTSYSTEMS_USB_29B_PID	0x9e54	/* USB-29B USB to 6 pin mini din */
+#define RTSYSTEMS_USB_29F_PID	0x9e55	/* USB-29F USB to 6 pin modular plug */
+#define RTSYSTEMS_USB_62B_PID	0x9e56	/* USB-62B USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_S01_PID	0x9e57	/* USB-RTS01 USB to 3.5 mm stereo plug*/
+#define RTSYSTEMS_USB_63_PID	0x9e58	/* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_29C_PID	0x9e59	/* USB-29C USB to 4 pin modular plug*/
+#define RTSYSTEMS_USB_81B_PID	0x9e5A	/* USB-81 USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_82B_PID	0x9e5B	/* USB-82 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_K5D_PID	0x9e5C	/* USB-K5D USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_K4Y_PID	0x9e5D	/* USB-K4Y USB to 2.5/3.5 mm plugs*/
+#define RTSYSTEMS_USB_K5G_PID	0x9e5E	/* USB-K5G USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_S05_PID	0x9e5F	/* USB-RTS05 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_60_PID	0x9e60	/* USB-60 USB to 6 pin din*/
+#define RTSYSTEMS_USB_61_PID	0x9e61	/* USB-61 USB to 6 pin mini din*/
+#define RTSYSTEMS_USB_62_PID	0x9e62	/* USB-62 USB to 8 pin mini din*/
+#define RTSYSTEMS_USB_63B_PID	0x9e63	/* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_64_PID	0x9e64	/* USB-64 USB to 9 pin male*/
+#define RTSYSTEMS_USB_65_PID	0x9e65	/* USB-65 USB to 9 pin female null modem*/
+#define RTSYSTEMS_USB_92_PID	0x9e66	/* USB-92 USB to 12 pin plug*/
+#define RTSYSTEMS_USB_92D_PID	0x9e67	/* USB-92D USB to 12 pin plug data*/
+#define RTSYSTEMS_USB_W5R_PID	0x9e68	/* USB-W5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_A5R_PID	0x9e69	/* USB-A5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_PW1_PID	0x9e6A	/* USB-PW1 USB to 8 pin modular plug*/
 
 /*
  * Physik Instrumente
@@ -1283,3 +1307,9 @@
  * Manufacturer: Crucible Technologies
  */
 #define FTDI_CT_COMET_PID	0x8e08
+
+/*
+ * Product: Z3X Box
+ * Manufacturer: Smart GSM Team
+ */
+#define FTDI_Z3X_PID		0x0011
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 3549d07..07fbdf0 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2315,7 +2315,7 @@
 	if (d_details == NULL) {
 		dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
 		    __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
-		return 1;
+		return -ENODEV;
 	}
 
 	/* Setup private data for serial driver */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index f27c621..0f16bf6 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@
 	struct list_head        urblist_entry;
 	struct kref             ref_count;
 	struct urb              *urb;
+	struct usb_ctrlrequest	*setup;
 };
 
 enum mos7715_pp_modes {
@@ -271,6 +272,7 @@
 	struct mos7715_parport *mos_parport = urbtrack->mos_parport;
 
 	usb_free_urb(urbtrack->urb);
+	kfree(urbtrack->setup);
 	kfree(urbtrack);
 	kref_put(&mos_parport->ref_count, destroy_mos_parport);
 }
@@ -355,7 +357,6 @@
 	struct urbtracker *urbtrack;
 	int ret_val;
 	unsigned long flags;
-	struct usb_ctrlrequest setup;
 	struct usb_serial *serial = mos_parport->serial;
 	struct usb_device *usbdev = serial->dev;
 
@@ -373,14 +374,20 @@
 		kfree(urbtrack);
 		return -ENOMEM;
 	}
-	setup.bRequestType = (__u8)0x40;
-	setup.bRequest = (__u8)0x0e;
-	setup.wValue = get_reg_value(reg, dummy);
-	setup.wIndex = get_reg_index(reg);
-	setup.wLength = 0;
+	urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
+	if (!urbtrack->setup) {
+		usb_free_urb(urbtrack->urb);
+		kfree(urbtrack);
+		return -ENOMEM;
+	}
+	urbtrack->setup->bRequestType = (__u8)0x40;
+	urbtrack->setup->bRequest = (__u8)0x0e;
+	urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
+	urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
+	urbtrack->setup->wLength = 0;
 	usb_fill_control_urb(urbtrack->urb, usbdev,
 			     usb_sndctrlpipe(usbdev, 0),
-			     (unsigned char *)&setup,
+			     (unsigned char *)urbtrack->setup,
 			     NULL, 0, async_complete, urbtrack);
 	kref_init(&urbtrack->ref_count);
 	INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 7e99808..2c1749d 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -183,7 +183,10 @@
 #define LED_ON_MS	500
 #define LED_OFF_MS	500
 
-static int device_type;
+enum mos7840_flag {
+	MOS7840_FLAG_CTRL_BUSY,
+	MOS7840_FLAG_LED_BUSY,
+};
 
 static const struct usb_device_id id_table[] = {
 	{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
@@ -238,9 +241,12 @@
 
 	/* For device(s) with LED indicator */
 	bool has_led;
-	bool led_flag;
 	struct timer_list led_timer1;	/* Timer for LED on */
 	struct timer_list led_timer2;	/* Timer for LED off */
+	struct urb *led_urb;
+	struct usb_ctrlrequest *led_dr;
+
+	unsigned long flags;
 };
 
 /*
@@ -467,10 +473,10 @@
 	case -ESHUTDOWN:
 		/* this urb is terminated, clean up */
 		dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
-		return;
+		goto out;
 	default:
 		dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
-		return;
+		goto out;
 	}
 
 	dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
@@ -483,6 +489,8 @@
 		mos7840_handle_new_msr(mos7840_port, regval);
 	else if (mos7840_port->MsrLsr == 1)
 		mos7840_handle_new_lsr(mos7840_port, regval);
+out:
+	clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
 }
 
 static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
@@ -493,6 +501,9 @@
 	unsigned char *buffer = mcs->ctrl_buf;
 	int ret;
 
+	if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
+		return -EBUSY;
+
 	dr->bRequestType = MCS_RD_RTYPE;
 	dr->bRequest = MCS_RDREQ;
 	dr->wValue = cpu_to_le16(Wval);	/* 0 */
@@ -504,6 +515,9 @@
 			     mos7840_control_callback, mcs);
 	mcs->control_urb->transfer_buffer_length = 2;
 	ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
+	if (ret)
+		clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
+
 	return ret;
 }
 
@@ -530,7 +544,7 @@
 				__u16 reg)
 {
 	struct usb_device *dev = mcs->port->serial->dev;
-	struct usb_ctrlrequest *dr = mcs->dr;
+	struct usb_ctrlrequest *dr = mcs->led_dr;
 
 	dr->bRequestType = MCS_WR_RTYPE;
 	dr->bRequest = MCS_WRREQ;
@@ -538,10 +552,10 @@
 	dr->wIndex = cpu_to_le16(reg);
 	dr->wLength = cpu_to_le16(0);
 
-	usb_fill_control_urb(mcs->control_urb, dev, usb_sndctrlpipe(dev, 0),
+	usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
 		(unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
 
-	usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
+	usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
 }
 
 static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
@@ -567,7 +581,19 @@
 {
 	struct moschip_port *mcs = (struct moschip_port *) arg;
 
-	mcs->led_flag = false;
+	clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
+}
+
+static void mos7840_led_activity(struct usb_serial_port *port)
+{
+	struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+
+	if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
+		return;
+
+	mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
+	mod_timer(&mos7840_port->led_timer1,
+				jiffies + msecs_to_jiffies(LED_ON_MS));
 }
 
 /*****************************************************************************
@@ -767,14 +793,8 @@
 		return;
 	}
 
-	/* Turn on LED */
-	if (mos7840_port->has_led && !mos7840_port->led_flag) {
-		mos7840_port->led_flag = true;
-		mos7840_set_led_async(mos7840_port, 0x0301,
-					MODEM_CONTROL_REGISTER);
-		mod_timer(&mos7840_port->led_timer1,
-				jiffies + msecs_to_jiffies(LED_ON_MS));
-	}
+	if (mos7840_port->has_led)
+		mos7840_led_activity(port);
 
 	mos7840_port->read_urb_busy = true;
 	retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -825,18 +845,6 @@
 /************************************************************************/
 /*       D R I V E R  T T Y  I N T E R F A C E  F U N C T I O N S       */
 /************************************************************************/
-#ifdef MCSSerialProbe
-static int mos7840_serial_probe(struct usb_serial *serial,
-				const struct usb_device_id *id)
-{
-
-	/*need to implement the mode_reg reading and updating\
-	   structures usb_serial_ device_type\
-	   (i.e num_ports, num_bulkin,bulkout etc) */
-	/* Also we can update the changes  attach */
-	return 1;
-}
-#endif
 
 /*****************************************************************************
  * mos7840_open
@@ -914,20 +922,20 @@
 	status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "Reading Spreg failed\n");
-		return -1;
+		goto err;
 	}
 	Data |= 0x80;
 	status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "writing Spreg failed\n");
-		return -1;
+		goto err;
 	}
 
 	Data &= ~0x80;
 	status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "writing Spreg failed\n");
-		return -1;
+		goto err;
 	}
 	/* End of block to be checked */
 
@@ -936,7 +944,7 @@
 									&Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "Reading Controlreg failed\n");
-		return -1;
+		goto err;
 	}
 	Data |= 0x08;		/* Driver done bit */
 	Data |= 0x20;		/* rx_disable */
@@ -944,7 +952,7 @@
 				mos7840_port->ControlRegOffset, Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "writing Controlreg failed\n");
-		return -1;
+		goto err;
 	}
 	/* do register settings here */
 	/* Set all regs to the device default values. */
@@ -955,21 +963,21 @@
 	status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "disabling interrupts failed\n");
-		return -1;
+		goto err;
 	}
 	/* Set FIFO_CONTROL_REGISTER to the default value */
 	Data = 0x00;
 	status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER  failed\n");
-		return -1;
+		goto err;
 	}
 
 	Data = 0xcf;
 	status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
 	if (status < 0) {
 		dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER  failed\n");
-		return -1;
+		goto err;
 	}
 
 	Data = 0x03;
@@ -1114,6 +1122,15 @@
 	/* mos7840_change_port_settings(mos7840_port,old_termios); */
 
 	return 0;
+err:
+	for (j = 0; j < NUM_URBS; ++j) {
+		urb = mos7840_port->write_urb_pool[j];
+		if (!urb)
+			continue;
+		kfree(urb->transfer_buffer);
+		usb_free_urb(urb);
+	}
+	return status;
 }
 
 /*****************************************************************************
@@ -1458,13 +1475,8 @@
 	data1 = urb->transfer_buffer;
 	dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
 
-	/* Turn on LED */
-	if (mos7840_port->has_led && !mos7840_port->led_flag) {
-		mos7840_port->led_flag = true;
-		mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0301);
-		mod_timer(&mos7840_port->led_timer1,
-				jiffies + msecs_to_jiffies(LED_ON_MS));
-	}
+	if (mos7840_port->has_led)
+		mos7840_led_activity(port);
 
 	/* send it down the pipe */
 	status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -2193,38 +2205,48 @@
 	return 0;
 }
 
-static int mos7840_calc_num_ports(struct usb_serial *serial)
+static int mos7840_probe(struct usb_serial *serial,
+				const struct usb_device_id *id)
 {
-	__u16 data = 0x00;
+	u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
 	u8 *buf;
-	int mos7840_num_ports;
+	int device_type;
+
+	if (product == MOSCHIP_DEVICE_ID_7810 ||
+		product == MOSCHIP_DEVICE_ID_7820) {
+		device_type = product;
+		goto out;
+	}
 
 	buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
-	if (buf) {
-		usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+	if (!buf)
+		return -ENOMEM;
+
+	usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
 			MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
 			VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
-		data = *buf;
-		kfree(buf);
-	}
 
-	if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
-		serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
-		device_type = serial->dev->descriptor.idProduct;
-	} else {
-		/* For a MCS7840 device GPIO0 must be set to 1 */
-		if ((data & 0x01) == 1)
-			device_type = MOSCHIP_DEVICE_ID_7840;
-		else if (mos7810_check(serial))
-			device_type = MOSCHIP_DEVICE_ID_7810;
-		else
-			device_type = MOSCHIP_DEVICE_ID_7820;
-	}
+	/* For a MCS7840 device GPIO0 must be set to 1 */
+	if (buf[0] & 0x01)
+		device_type = MOSCHIP_DEVICE_ID_7840;
+	else if (mos7810_check(serial))
+		device_type = MOSCHIP_DEVICE_ID_7810;
+	else
+		device_type = MOSCHIP_DEVICE_ID_7820;
+
+	kfree(buf);
+out:
+	usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+
+	return 0;
+}
+
+static int mos7840_calc_num_ports(struct usb_serial *serial)
+{
+	int device_type = (unsigned long)usb_get_serial_data(serial);
+	int mos7840_num_ports;
 
 	mos7840_num_ports = (device_type >> 4) & 0x000F;
-	serial->num_bulk_in = mos7840_num_ports;
-	serial->num_bulk_out = mos7840_num_ports;
-	serial->num_ports = mos7840_num_ports;
 
 	return mos7840_num_ports;
 }
@@ -2232,6 +2254,7 @@
 static int mos7840_port_probe(struct usb_serial_port *port)
 {
 	struct usb_serial *serial = port->serial;
+	int device_type = (unsigned long)usb_get_serial_data(serial);
 	struct moschip_port *mos7840_port;
 	int status;
 	int pnum;
@@ -2409,6 +2432,14 @@
 	if (device_type == MOSCHIP_DEVICE_ID_7810) {
 		mos7840_port->has_led = true;
 
+		mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
+		mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
+								GFP_KERNEL);
+		if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
+			status = -ENOMEM;
+			goto error;
+		}
+
 		init_timer(&mos7840_port->led_timer1);
 		mos7840_port->led_timer1.function = mos7840_led_off;
 		mos7840_port->led_timer1.expires =
@@ -2421,8 +2452,6 @@
 			jiffies + msecs_to_jiffies(LED_OFF_MS);
 		mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
 
-		mos7840_port->led_flag = false;
-
 		/* Turn off LED */
 		mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
 	}
@@ -2444,6 +2473,8 @@
 	}
 	return 0;
 error:
+	kfree(mos7840_port->led_dr);
+	usb_free_urb(mos7840_port->led_urb);
 	kfree(mos7840_port->dr);
 	kfree(mos7840_port->ctrl_buf);
 	usb_free_urb(mos7840_port->control_urb);
@@ -2464,6 +2495,10 @@
 
 		del_timer_sync(&mos7840_port->led_timer1);
 		del_timer_sync(&mos7840_port->led_timer2);
+
+		usb_kill_urb(mos7840_port->led_urb);
+		usb_free_urb(mos7840_port->led_urb);
+		kfree(mos7840_port->led_dr);
 	}
 	usb_kill_urb(mos7840_port->control_urb);
 	usb_free_urb(mos7840_port->control_urb);
@@ -2490,9 +2525,7 @@
 	.throttle = mos7840_throttle,
 	.unthrottle = mos7840_unthrottle,
 	.calc_num_ports = mos7840_calc_num_ports,
-#ifdef MCSSerialProbe
-	.probe = mos7840_serial_probe,
-#endif
+	.probe = mos7840_probe,
 	.ioctl = mos7840_ioctl,
 	.set_termios = mos7840_set_termios,
 	.break_ctl = mos7840_break,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index bd4323d..c3d9485 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -81,6 +81,7 @@
 
 #define HUAWEI_VENDOR_ID			0x12D1
 #define HUAWEI_PRODUCT_E173			0x140C
+#define HUAWEI_PRODUCT_E1750			0x1406
 #define HUAWEI_PRODUCT_K4505			0x1464
 #define HUAWEI_PRODUCT_K3765			0x1465
 #define HUAWEI_PRODUCT_K4605			0x14C6
@@ -159,8 +160,6 @@
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED	0x9000
 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED	0x9001
 #define NOVATELWIRELESS_PRODUCT_E362		0x9010
-#define NOVATELWIRELESS_PRODUCT_G1		0xA001
-#define NOVATELWIRELESS_PRODUCT_G1_M		0xA002
 #define NOVATELWIRELESS_PRODUCT_G2		0xA010
 #define NOVATELWIRELESS_PRODUCT_MC551		0xB001
 
@@ -343,17 +342,12 @@
 #define OLIVETTI_VENDOR_ID			0x0b3c
 #define OLIVETTI_PRODUCT_OLICARD100		0xc000
 #define OLIVETTI_PRODUCT_OLICARD145		0xc003
+#define OLIVETTI_PRODUCT_OLICARD200		0xc005
 
 /* Celot products */
 #define CELOT_VENDOR_ID				0x211f
 #define CELOT_PRODUCT_CT680M			0x6801
 
-/* ONDA Communication vendor id */
-#define ONDA_VENDOR_ID       0x1ee8
-
-/* ONDA MT825UP HSDPA 14.2 modem */
-#define ONDA_MT825UP         0x000b
-
 /* Samsung products */
 #define SAMSUNG_VENDOR_ID                       0x04e8
 #define SAMSUNG_PRODUCT_GT_B3730                0x6889
@@ -446,7 +440,8 @@
 
 /* Hyundai Petatel Inc. products */
 #define PETATEL_VENDOR_ID			0x1ff4
-#define PETATEL_PRODUCT_NP10T			0x600e
+#define PETATEL_PRODUCT_NP10T_600A		0x600a
+#define PETATEL_PRODUCT_NP10T_600E		0x600e
 
 /* TP-LINK Incorporated products */
 #define TPLINK_VENDOR_ID			0x2357
@@ -456,6 +451,10 @@
 #define CHANGHONG_VENDOR_ID			0x2077
 #define CHANGHONG_PRODUCT_CH690			0x7001
 
+/* Inovia */
+#define INOVIA_VENDOR_ID			0x20a6
+#define INOVIA_SEW858				0x1105
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
 		OPTION_BLACKLIST_NONE = 0,
@@ -573,6 +572,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
+		.driver_info = (kernel_ulong_t) &net_intf2_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
@@ -692,6 +693,222 @@
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
+	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
 
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -730,8 +947,6 @@
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
-	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
-	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
 	/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
@@ -786,6 +1001,7 @@
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
@@ -821,7 +1037,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
+		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1159,6 +1376,23 @@
 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
 	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1260,8 +1494,10 @@
 
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+		.driver_info = (kernel_ulong_t)&net_intf6_blacklist
+	},
 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
-	{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
@@ -1333,9 +1569,12 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
-	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
+	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),					/* TP-Link MA260 */
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) },	/* D-Link DWM-156 (variant) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) },	/* D-Link DWM-156 (variant) */
@@ -1343,6 +1582,9 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index bd794b4..c65437c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -35,7 +35,13 @@
 	{DEVICE_G1K(0x04da, 0x250c)},	/* Panasonic Gobi QDL device */
 	{DEVICE_G1K(0x413c, 0x8172)},	/* Dell Gobi Modem device */
 	{DEVICE_G1K(0x413c, 0x8171)},	/* Dell Gobi QDL device */
-	{DEVICE_G1K(0x1410, 0xa001)},	/* Novatel Gobi Modem device */
+	{DEVICE_G1K(0x1410, 0xa001)},	/* Novatel/Verizon USB-1000 */
+	{DEVICE_G1K(0x1410, 0xa002)},	/* Novatel Gobi Modem device */
+	{DEVICE_G1K(0x1410, 0xa003)},	/* Novatel Gobi Modem device */
+	{DEVICE_G1K(0x1410, 0xa004)},	/* Novatel Gobi Modem device */
+	{DEVICE_G1K(0x1410, 0xa005)},	/* Novatel Gobi Modem device */
+	{DEVICE_G1K(0x1410, 0xa006)},	/* Novatel Gobi Modem device */
+	{DEVICE_G1K(0x1410, 0xa007)},	/* Novatel Gobi Modem device */
 	{DEVICE_G1K(0x1410, 0xa008)},	/* Novatel Gobi QDL device */
 	{DEVICE_G1K(0x0b05, 0x1776)},	/* Asus Gobi Modem device */
 	{DEVICE_G1K(0x0b05, 0x1774)},	/* Asus Gobi QDL device */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e581c25..4cc84c0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -203,6 +203,7 @@
 	{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
 	{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
 	{ }
 };
@@ -371,7 +372,7 @@
 	usb_set_serial_data(serial, tdev);
 
 	/* determine device type */
-	if (usb_match_id(serial->interface, ti_id_table_3410))
+	if (serial->type == &ti_1port_device)
 		tdev->td_is_3410 = 1;
 	dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
 		tdev->td_is_3410 ? "3410" : "5052");
@@ -1536,14 +1537,15 @@
 	char buf[32];
 
 	/* try ID specific firmware first, then try generic firmware */
-	sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
-	    dev->descriptor.idProduct);
+	sprintf(buf, "ti_usb-v%04x-p%04x.fw",
+			le16_to_cpu(dev->descriptor.idVendor),
+			le16_to_cpu(dev->descriptor.idProduct));
 	status = request_firmware(&fw_p, buf, &dev->dev);
 
 	if (status != 0) {
 		buf[0] = '\0';
-		if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
-			switch (dev->descriptor.idProduct) {
+		if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
+			switch (le16_to_cpu(dev->descriptor.idProduct)) {
 			case MTS_CDMA_PRODUCT_ID:
 				strcpy(buf, "mts_cdma.fw");
 				break;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index ece326e..db0cf53 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@
 			tty_flip_buffer_push(&port->port);
 		} else
 			dev_dbg(dev, "%s: empty read urb received\n", __func__);
-
-		/* Resubmit urb so we continue receiving */
-		err = usb_submit_urb(urb, GFP_ATOMIC);
-		if (err) {
-			if (err != -EPERM) {
-				dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err);
-				/* busy also in error unless we are killed */
-				usb_mark_last_busy(port->serial->dev);
-			}
-		} else {
+	}
+	/* Resubmit urb so we continue receiving */
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (err) {
+		if (err != -EPERM) {
+			dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
+				__func__, err);
+			/* busy also in error unless we are killed */
 			usb_mark_last_busy(port->serial->dev);
 		}
+	} else {
+		usb_mark_last_busy(port->serial->dev);
 	}
 }
 
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 92b05d9..5db1532 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -211,8 +211,11 @@
 		/*
 		 * Many devices do not respond properly to READ_CAPACITY_16.
 		 * Tell the SCSI layer to try READ_CAPACITY_10 first.
+		 * However some USB 3.0 drive enclosures return capacity
+		 * modulo 2TB. Those must use READ_CAPACITY_16
 		 */
-		sdev->try_rc_10_first = 1;
+		if (!(us->fflags & US_FL_NEEDS_CAP16))
+			sdev->try_rc_10_first = 1;
 
 		/* assume SPC3 or latter devices support sense size > 18 */
 		if (sdev->scsi_level > SCSI_SPC_2)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 179933528..de32cfa 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -665,6 +665,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY ),
 
+/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
+UNUSUAL_DEV(  0x054c, 0x02a5, 0x0100, 0x0100,
+		"Sony Corp.",
+		"MicroVault Flash Drive",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NO_READ_CAPACITY_16 ),
+
 /* floppy reports multiple luns */
 UNUSUAL_DEV(  0x055d, 0x2020, 0x0000, 0x0210,
 		"SAMSUNG",
@@ -1918,6 +1925,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com> */
+UNUSUAL_DEV(  0x174c, 0x55aa, 0x0100, 0x0100,
+		"ASMedia",
+		"AS2105",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NEEDS_CAP16),
+
 /* Reported by Jesse Feddema <jdfeddema@gmail.com> */
 UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
 		"Yarvik",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5c4fe07..da4c69c 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -818,6 +818,10 @@
 {
 	struct Scsi_Host *host = us_to_host(us);
 
+	/* Kill all transfer first. Otherwise, it will cause scsi-host
+	 * remove blocking about 30sec. */
+	usb_stor_stop_transport(us);
+
 	/* If the device is really gone, cut short reset delays */
 	if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
 		set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 6ef94bc..028fc83 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1110,6 +1110,12 @@
 	}
 	spin_lock_irqsave(&xfer->lock, flags);
 	rpipe = xfer->ep->hcpriv;
+	if (rpipe == NULL) {
+		pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s",
+			__func__, wa_xfer_id(xfer),
+			"Probably already aborted.\n" );
+		goto out_unlock;
+	}
 	/* Check the delayed list -> if there, release and complete */
 	spin_lock_irqsave(&wa->xfer_list_lock, flags2);
 	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1493,8 +1499,7 @@
 			break;
 		}
 		usb_status = xfer_result->bTransferStatus & 0x3f;
-		if (usb_status == WA_XFER_STATUS_ABORTED
-		    || usb_status == WA_XFER_STATUS_NOT_FOUND)
+		if (usb_status == WA_XFER_STATUS_NOT_FOUND)
 			/* taken care of already */
 			break;
 		xfer_id = xfer_result->dwTransferID;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f80d3dd..d6a518c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -150,6 +150,11 @@
 {
 	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
 	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+}
+
+static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+{
+	vhost_net_ubuf_put_and_wait(ubufs);
 	kfree(ubufs);
 }
 
@@ -302,6 +307,11 @@
 	struct vhost_virtqueue *vq = ubufs->vq;
 	int cnt = atomic_read(&ubufs->kref.refcount);
 
+	/* set len to mark this desc buffers done DMA */
+	vq->heads[ubuf->desc].len = success ?
+		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+	vhost_net_ubuf_put(ubufs);
+
 	/*
 	 * Trigger polling thread if guest stopped submitting new buffers:
 	 * in this case, the refcount after decrement will eventually reach 1
@@ -312,10 +322,6 @@
 	 */
 	if (cnt <= 2 || !(cnt % 16))
 		vhost_poll_queue(&vq->poll);
-	/* set len to mark this desc buffers done DMA */
-	vq->heads[ubuf->desc].len = success ?
-		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
-	vhost_net_ubuf_put(ubufs);
 }
 
 /* Expects to be always run from workqueue - which acts as
@@ -948,7 +954,7 @@
 	mutex_unlock(&vq->mutex);
 
 	if (oldubufs) {
-		vhost_net_ubuf_put_and_wait(oldubufs);
+		vhost_net_ubuf_put_wait_and_free(oldubufs);
 		mutex_lock(&vq->mutex);
 		vhost_zerocopy_signal_used(n, vq);
 		mutex_unlock(&vq->mutex);
@@ -966,7 +972,7 @@
 	rcu_assign_pointer(vq->private_data, oldsock);
 	vhost_net_enable_vq(n, vq);
 	if (ubufs)
-		vhost_net_ubuf_put_and_wait(ubufs);
+		vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
 	fput(sock->file);
 err_vq:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 7014202..962c7e3 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1017,7 +1017,7 @@
 		if (data_direction != DMA_NONE) {
 			ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
 					&vq->iov[data_first], data_num,
-					data_direction == DMA_TO_DEVICE);
+					data_direction == DMA_FROM_DEVICE);
 			if (unlikely(ret)) {
 				vq_err(vq, "Failed to map iov to sgl\n");
 				goto err_free;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0669dac..7ad3db0 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -39,6 +39,9 @@
 config HDMI
 	bool
 
+config ITE_HDMI_CEC
+	bool "Enable ITE HDMI CEC"
+
 menuconfig FB
 	tristate "Support for frame buffer devices"
 	---help---
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 2babdef..995e446 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -6,6 +6,7 @@
 
 obj-$(CONFIG_VGASTATE)            += vgastate.o
 obj-$(CONFIG_HDMI)                += hdmi.o
+obj-$(CONFIG_ITE_HDMI_CEC)        += it8566_hdmi_cec/
 obj-y                             += fb_notify.o
 obj-$(CONFIG_FB)                  += fb.o
 fb-y                              := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 700cac0..bdc515f 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -361,39 +361,13 @@
 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 {
 	struct au1100fb_device *fbdev;
-	unsigned int len;
-	unsigned long start=0, off;
 
 	fbdev = to_au1100fb_device(fbi);
 
-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
-		return -EINVAL;
-	}
-
-	start = fbdev->fb_phys & PAGE_MASK;
-	len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
-	off = vma->vm_pgoff << PAGE_SHIFT;
-
-	if ((vma->vm_end - vma->vm_start + off) > len) {
-		return -EINVAL;
-	}
-
-	off += start;
-	vma->vm_pgoff = off >> PAGE_SHIFT;
-
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
 
-	vma->vm_flags |= VM_IO;
-
-	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-				vma->vm_end - vma->vm_start,
-				vma->vm_page_prot)) {
-		return -EAGAIN;
-	}
-
-	return 0;
+	return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
 }
 
 static struct fb_ops au1100fb_ops =
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 1b59054..1d02897 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1233,38 +1233,13 @@
  * method mainly to allow the use of the TLB streaming flag (CCA=6)
  */
 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-
 {
-	unsigned int len;
-	unsigned long start=0, off;
 	struct au1200fb_device *fbdev = info->par;
 
-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
-		return -EINVAL;
-	}
-
-	start = fbdev->fb_phys & PAGE_MASK;
-	len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
-	off = vma->vm_pgoff << PAGE_SHIFT;
-
-	if ((vma->vm_end - vma->vm_start + off) > len) {
-		return -EINVAL;
-	}
-
-	off += start;
-	vma->vm_pgoff = off >> PAGE_SHIFT;
-
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
 
-	vma->vm_flags |= VM_IO;
-
-	return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-				  vma->vm_end - vma->vm_start,
-				  vma->vm_page_prot);
-
-	return 0;
+	return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
 }
 
 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index a60d6af..30e4ed5 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -118,7 +118,7 @@
 	.update_status  = atmel_pwm_bl_set_intensity,
 };
 
-static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
+static int atmel_pwm_bl_probe(struct platform_device *pdev)
 {
 	struct backlight_properties props;
 	const struct atmel_pwm_bl_platform_data *pdata;
@@ -203,7 +203,7 @@
 	return retval;
 }
 
-static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
+static int atmel_pwm_bl_remove(struct platform_device *pdev)
 {
 	struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
 
@@ -222,10 +222,11 @@
 		.name = "atmel-pwm-bl",
 	},
 	/* REVISIT add suspend() and resume() */
-	.remove = __exit_p(atmel_pwm_bl_remove),
+	.probe = atmel_pwm_bl_probe,
+	.remove = atmel_pwm_bl_remove,
 };
 
-module_platform_driver_probe(atmel_pwm_bl_driver, atmel_pwm_bl_probe);
+module_platform_driver(atmel_pwm_bl_driver);
 
 MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
 MODULE_DESCRIPTION("Atmel PWM backlight driver");
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 4017833..635d569 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -22,6 +22,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/hdmi.h>
@@ -52,7 +53,7 @@
 
 	frame->type = HDMI_INFOFRAME_TYPE_AVI;
 	frame->version = 2;
-	frame->length = 13;
+	frame->length = HDMI_AVI_INFOFRAME_SIZE;
 
 	return 0;
 }
@@ -83,7 +84,7 @@
 	if (size < length)
 		return -ENOSPC;
 
-	memset(buffer, 0, length);
+	memset(buffer, 0, size);
 
 	ptr[0] = frame->type;
 	ptr[1] = frame->version;
@@ -151,7 +152,7 @@
 
 	frame->type = HDMI_INFOFRAME_TYPE_SPD;
 	frame->version = 1;
-	frame->length = 25;
+	frame->length = HDMI_SPD_INFOFRAME_SIZE;
 
 	strncpy(frame->vendor, vendor, sizeof(frame->vendor));
 	strncpy(frame->product, product, sizeof(frame->product));
@@ -185,7 +186,7 @@
 	if (size < length)
 		return -ENOSPC;
 
-	memset(buffer, 0, length);
+	memset(buffer, 0, size);
 
 	ptr[0] = frame->type;
 	ptr[1] = frame->version;
@@ -218,7 +219,7 @@
 
 	frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
 	frame->version = 1;
-	frame->length = 10;
+	frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
 
 	return 0;
 }
@@ -250,7 +251,7 @@
 	if (size < length)
 		return -ENOSPC;
 
-	memset(buffer, 0, length);
+	memset(buffer, 0, size);
 
 	if (frame->channels >= 2)
 		channels = frame->channels - 1;
@@ -307,7 +308,7 @@
 	if (size < length)
 		return -ENOSPC;
 
-	memset(buffer, 0, length);
+	memset(buffer, 0, size);
 
 	ptr[0] = frame->type;
 	ptr[1] = frame->version;
@@ -321,3 +322,45 @@
 	return length;
 }
 EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+
+/**
+ * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+	ssize_t length;
+
+	switch (frame->any.type) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
+		break;
+	case HDMI_INFOFRAME_TYPE_SPD:
+		length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
+		break;
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
+		break;
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		length = hdmi_vendor_infoframe_pack(&frame->vendor,
+						    buffer, size);
+		break;
+	default:
+		WARN(1, "Bad infoframe type %d\n", frame->any.type);
+		length = -EINVAL;
+	}
+
+	return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack);
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index d4d2c5f..0f3b33c 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -795,12 +795,21 @@
 }
 
 
+static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = {
+	{
+		.vendor      = PCI_VENDOR_ID_MICROSOFT,
+		.device      = PCI_DEVICE_ID_HYPERV_VIDEO,
+	},
+	{ /* end of list */ }
+};
+
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Synthetic Video Device GUID */
 	{HV_SYNTHVID_GUID},
 	{}
 };
 
+MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
 MODULE_DEVICE_TABLE(vmbus, id_table);
 
 static struct hv_driver hvfb_drv = {
@@ -810,14 +819,43 @@
 	.remove = hvfb_remove,
 };
 
+static int hvfb_pci_stub_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *ent)
+{
+	return 0;
+}
+
+static void hvfb_pci_stub_remove(struct pci_dev *pdev)
+{
+}
+
+static struct pci_driver hvfb_pci_stub_driver = {
+	.name =		KBUILD_MODNAME,
+	.id_table =	pci_stub_id_table,
+	.probe =	hvfb_pci_stub_probe,
+	.remove =	hvfb_pci_stub_remove,
+};
 
 static int __init hvfb_drv_init(void)
 {
-	return vmbus_driver_register(&hvfb_drv);
+	int ret;
+
+	ret = vmbus_driver_register(&hvfb_drv);
+	if (ret != 0)
+		return ret;
+
+	ret = pci_register_driver(&hvfb_pci_stub_driver);
+	if (ret != 0) {
+		vmbus_driver_unregister(&hvfb_drv);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void __exit hvfb_drv_exit(void)
 {
+	pci_unregister_driver(&hvfb_pci_stub_driver);
 	vmbus_driver_unregister(&hvfb_drv);
 }
 
diff --git a/drivers/video/it8566_hdmi_cec/Makefile b/drivers/video/it8566_hdmi_cec/Makefile
new file mode 100644
index 0000000..73a8b67
--- /dev/null
+++ b/drivers/video/it8566_hdmi_cec/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Werror
+obj-y += it8566.o
+
+it8566-y := it8566_hdmi_cec.o it8566_fw_flash.o
diff --git a/drivers/video/it8566_hdmi_cec/it8566_fw_flash.c b/drivers/video/it8566_hdmi_cec/it8566_fw_flash.c
new file mode 100644
index 0000000..e67e691
--- /dev/null
+++ b/drivers/video/it8566_hdmi_cec/it8566_fw_flash.c
@@ -0,0 +1,1019 @@
+/*
+ * ITE it8566 HDMI CEC driver
+ *
+ * Copyright(c) 2014 ASUSTek COMPUTER INC. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/uaccess.h>
+#include "it8566_hdmi_cec.h"
+
+#define DEV_NAME_FLASH "it8566_flash_mod"
+static struct i2c_client        *flash_mode_client;
+struct mutex it8566_fw_lock;
+/* fw update ++ */
+static char *fw_bin_path = "/system/etc/firmware/IT8566_CEC.BIN";
+#define FW_FILE_SIZE 65536
+unsigned char *gbuffer;
+unsigned char flash_id[3];
+unsigned int start;
+unsigned int end;
+#define EFLASH_CMD_BYTE_PROGRAM		0x02
+#define EFLASH_CMD_WRITE_DISABLE	0x04
+#define EFLASH_CMD_READ_STATUS		0x05
+#define EFLASH_CMD_WRITE_ENABLE		0x06
+#define EFLASH_CMD_FAST_READ		0x0B
+#define EFLASH_CMD_CHIP_ERASE		0x60
+#define EFLASH_CMD_READ_ID		0x9F
+#define EFLASH_CMD_AAI_WORD_PROGRAM	0xAD
+#define EFLASH_CMD_SECTOR_ERASE		0xD7
+#define CMD_CS_HIGH			0x17
+
+static int load_fw_bin_to_buffer(char *path)
+{
+	int result = 0;
+	struct file *fp = NULL;
+	mm_segment_t old_fs;
+
+	kfree(gbuffer);
+	gbuffer = kzalloc(FW_FILE_SIZE, GFP_KERNEL);
+
+	if (!gbuffer) {
+		dev_dbg(&flash_mode_client->dev,
+			"%s:unable to allocate gbuffer\n", __func__);
+		return -1;
+	}
+
+	dev_info(&flash_mode_client->dev, "open file:%s\n", path);
+	fp = filp_open(path, O_RDONLY , 0);
+
+	if (!IS_ERR_OR_NULL(fp)) {
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		if (fp->f_op != NULL && fp->f_op->read != NULL) {
+			fp->f_op->read(fp, gbuffer, FW_FILE_SIZE, &fp->f_pos);
+		} else {
+			dev_err(&flash_mode_client->dev, "fail to read fw file\n");
+			result = -2;
+		}
+		set_fs(old_fs);
+	} else if (PTR_ERR(fp) == -ENOENT) {
+		dev_err(&flash_mode_client->dev, "fw file not found error\n");
+		result = -3;
+	} else {
+		dev_err(&flash_mode_client->dev, "fw file open error\n");
+		result = -4;
+	}
+
+	if (fp)
+		filp_close(fp, NULL);
+
+	return result;
+}
+
+static int ite_i2c_pre_define_cmd_read(unsigned char cmd1,
+	unsigned int payload_len, unsigned char payload[])
+{
+	int result = 0, err = 0;
+	struct i2c_msg msg[2];
+	unsigned char buf1[2] = {0x18};
+
+	if (!flash_mode_client) {
+		dev_err(&flash_mode_client->dev,
+			"%s:no flash_mode_client\n", __func__);
+		return -1;
+	}
+
+	err = i2c_smbus_write_byte(flash_mode_client, CMD_CS_HIGH);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	/*CMD1*/
+	buf1[1] = cmd1;
+	msg[0].addr = flash_mode_client->addr;
+	msg[0].len = 2;
+	msg[0].flags = 0;
+	msg[0].buf = buf1;
+
+	/*CMD1 Read Payload*/
+	msg[1].addr = flash_mode_client->addr;
+	msg[1].len = payload_len;
+	msg[1].flags = I2C_M_RD;
+	msg[1].buf = payload;
+
+	err = i2c_transfer(flash_mode_client->adapter, msg, 2);
+
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #2: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	return result;
+}
+
+/*
+  payload under 256 can call this function
+  Write to Read
+
+  S 0x5B [W] [A] 0x17 [A] S 0x5B [W] [A] 0x18 [A] 0x0B [A] S
+  0x5B [R] payload[0] [A] ... payload[N] [NA] [P]
+*/
+int ite_i2c_pre_define_cmd_fastread(unsigned char addr[],
+	unsigned int payload_len, unsigned char payload[])
+{
+	int result = 0, err = 0;
+	struct i2c_msg msg[2];
+	unsigned char buf1[6] = {0x18, 0x0B};      /* 0x0b => Fast Read */
+
+	if (!flash_mode_client) {
+		dev_err(&flash_mode_client->dev,
+			"%s:no flash_mode_client\n", __func__);
+		return -1;
+	}
+
+	err = i2c_smbus_write_byte(flash_mode_client, CMD_CS_HIGH);
+
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	buf1[2] = addr[3]; /* Address H */
+	buf1[3] = addr[2]; /* Address M */
+	buf1[4] = addr[1]; /* Address L */
+	buf1[5] = addr[0]; /* Dummy */
+
+	/*CMD1*/
+	msg[0].addr = flash_mode_client->addr;
+	msg[0].len = 6;
+	msg[0].flags = 0;
+	msg[0].buf = buf1;
+
+	/*CMD1 Read Payload*/
+	msg[1].addr = flash_mode_client->addr;
+	msg[1].len = payload_len;
+	msg[1].flags = I2C_M_RD;
+	msg[1].buf = payload;
+
+	err = i2c_transfer(flash_mode_client->adapter, msg, 2);
+
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #2: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	return result;
+}
+
+/*
+  payload under 256 can call this function
+  S 0x5B [W] [A] 0x17 [A] S 0x5B [W] [A] 0x18 [A] cmd1 [A] payload[0] [A] ...
+  ... payload[N] [NA] [P]
+*/
+static int ite_i2c_pre_define_cmd_write(unsigned char cmd1,
+	unsigned int payload_len, unsigned char payload[])
+{
+	int i, err = 0;
+	unsigned char buf1[256];
+
+	if (!flash_mode_client) {
+		dev_err(&flash_mode_client->dev,
+			"%s:no flash_mode_client\n", __func__);
+		return -1;
+	}
+
+	err = i2c_smbus_write_byte(flash_mode_client, CMD_CS_HIGH);
+
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	buf1[0] = cmd1;
+	if (payload_len < 256) {
+		for (i = 0; i < payload_len; i++)
+			buf1[i+1] = payload[i];
+	} else {
+		dev_err(&flash_mode_client->dev,
+			"%s:payload_len over 256\n", __func__);
+		return -1;
+	}
+
+	err = i2c_smbus_write_i2c_block_data(flash_mode_client,
+			0x18, payload_len + 1, buf1);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #2: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+   payload under 256 can call this function
+   S 0x5B [W] [A] 0x17 [A] S 0x5B [W] [A] 0x18 [A] cmd1 [A] payload[0] [A] ...
+   ... payload[N] [NA] [P]
+*/
+static int ite_i2c_pre_define_cmd_write_with_status(unsigned char cmd1,
+		unsigned int payload_len, unsigned char payload[])
+{
+	int result = 0, i, err = 0;
+	struct i2c_msg msg[2];
+	unsigned char buf1[256];
+	unsigned char cmd_status[2] = {0x18, EFLASH_CMD_READ_STATUS};
+	unsigned char read_status[3];
+
+	/*CS High*/
+	err = i2c_smbus_write_byte(flash_mode_client, CMD_CS_HIGH);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	buf1[0] = cmd1;
+	if (payload_len < 256) {
+		for (i = 0; i < payload_len; i++)
+			buf1[i+1] = payload[i];
+	} else {
+		dev_err(&flash_mode_client->dev,
+			"%s: payload_len over 256\n", __func__);
+		return -1;
+	}
+
+	err = i2c_smbus_write_i2c_block_data(flash_mode_client,
+			0x18, payload_len + 1, buf1);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #2: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	/*1st read status
+	CS High*/
+	err = i2c_smbus_write_byte(flash_mode_client, CMD_CS_HIGH);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #3: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	/*CMD1 change to EFLASH_CMD_READ_STATUS*/
+	msg[0].addr = flash_mode_client->addr;
+	msg[0].len = 2;
+	msg[0].flags = 0;
+	msg[0].buf = cmd_status;
+
+	/*CMD1 Read Payload*/
+	msg[1].addr = flash_mode_client->addr;
+	msg[1].len = 3;
+	msg[1].flags = I2C_M_RD;
+	msg[1].buf = read_status;
+
+	err = i2c_transfer(flash_mode_client->adapter, msg, 2);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c_transfer FAIL #4: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	result = (int)read_status[2];
+
+	return result;
+}
+
+static int i2ec_readbyte(unsigned int address)
+{
+	int result;
+	unsigned char buf0[2] = {0};
+
+	buf0[0] = (address >> 8) & 0xFF; /*addr[15:8]*/
+	buf0[1] = (address) & 0xFF; /*addr[7:0]*/
+
+	result = i2c_smbus_write_i2c_block_data(flash_mode_client,
+						0x10, 2, buf0);
+	if (result < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: result=%d\n",
+			__func__, result);
+		return -1;
+	}
+
+	result = i2c_smbus_read_byte_data(flash_mode_client, 0x11);
+	if (result < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #2: result=%d\n",
+			__func__, result);
+		return -1;
+	}
+
+	return result;
+}
+
+static int i2ec_writebyte(unsigned int address, unsigned char data)
+{
+	int err = 0;
+	unsigned char buf0[2] = {0};
+
+	buf0[0] = (address >> 8) & 0xFF; /*addr[15:8]*/
+	buf0[1] = (address) & 0xFF; /*addr[7:0]*/
+
+	err = i2c_smbus_write_i2c_block_data(flash_mode_client, 0x10, 2, buf0);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	/*I2EC WRITE BYTE DATA*/
+	err = i2c_smbus_write_byte_data(flash_mode_client, 0x11, data);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #2: err=%d\n", __func__,  err);
+		return -1;
+	}
+
+	return err;
+}
+
+static int cmd_write_enable(void)
+{
+	return ite_i2c_pre_define_cmd_write(EFLASH_CMD_WRITE_ENABLE,
+			0, NULL);
+}
+
+static int cmd_write_disable(void)
+{
+	return ite_i2c_pre_define_cmd_write(EFLASH_CMD_WRITE_DISABLE,
+			0, NULL);
+}
+
+static unsigned char cmd_check_status(void)
+{
+	unsigned char status[2];
+	ite_i2c_pre_define_cmd_read(EFLASH_CMD_READ_STATUS, 2, status);
+	return status[1];
+}
+
+static int cmd_erase_sector(int address)
+{
+
+	int result;
+	unsigned char addr_h, addr_m, addr_l, buf[3];
+	addr_h = (unsigned char)((address >> 16) & 0xFF);
+	addr_m = (unsigned char)((address >> 8) & 0xFF);
+	addr_l = (unsigned char)((address) & 0xFF);
+
+	buf[0] = addr_h;
+	buf[1] = addr_m;
+	buf[2] = addr_l;
+	result = ite_i2c_pre_define_cmd_write(EFLASH_CMD_SECTOR_ERASE, 3, buf);
+	while (cmd_check_status() & 0x1)
+		usleep_range(1000, 2000);
+
+	return result;
+
+}
+
+static int cmd_enter_flash_mode(void)
+{
+	int err = 0;
+	struct i2c_client *cec_client = it8566_get_cec_client();
+
+	if (!cec_client)
+		return -1;
+
+	err = i2c_smbus_write_byte(cec_client, 0xEF);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL...#1: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	return err;
+}
+
+static void get_flash_id(void)
+{
+	ite_i2c_pre_define_cmd_read(EFLASH_CMD_READ_ID, 3, flash_id);
+}
+
+static int get_ec_ver(char *ec_ver)
+{
+	int err;
+#ifdef DEBUG
+	int i;
+#endif
+	struct i2c_client *cec_client = it8566_get_cec_client();
+
+	if (!cec_client)
+		return -1;
+
+	err = i2c_smbus_read_i2c_block_data(cec_client, 0xEC, 8, ec_ver);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:i2c transfer FAIL #1: err=%d\n", __func__, err);
+		return -1;
+	}
+#ifdef DEBUG
+	dev_dbg(&flash_mode_client->dev, "EC Version:\n");
+	for (i = 0; i < 8; i++)
+		dev_dbg(&flash_mode_client->dev, "%x:\n", ec_ver[i]);
+#endif
+	return err;
+}
+
+static int check_ec_has_been_flash(char *ec_ver)
+{
+	unsigned char addr_18, addr_19;
+
+	if (get_ec_ver(ec_ver) < 0) {
+		/* may flash fw fail before or ec
+		   has not been flash fw yet */
+		addr_18 = i2ec_readbyte(0x18);
+		addr_19 = i2ec_readbyte(0x19);
+
+		dev_info(&flash_mode_client->dev,
+			"%s:SRAM(0x18h)=0x%x, SRAM(0x19h)=0x%x\n",
+			__func__, addr_18, addr_19);
+
+		if ((addr_18 == 0xBB) && (addr_19 == 0xBB)) {
+			/* has been flash fw, but fail before */
+			/* no need enter flash mode*/
+			return 0;
+		} else if ((addr_18 == 0xEC) && (addr_19 == 0xEC)) {
+			/* has been flash fw, but fail before */
+			/* need enter flash mode*/
+			return 1;
+		} else {
+			/* has not been flash fw yet */
+			/* no need enter flash mode*/
+			return -1;
+		}
+	} else {
+		/* means ec has valid fw*/
+		/* need enter flash mode*/
+		return 2;
+	}
+}
+
+static void cmd_erase_all_sector(void)
+{
+	int address;
+	dev_info(&flash_mode_client->dev,
+		"%s: start=0x%x, end=0x%x\n", __func__, start, end);
+	for (address = start; address < end; address += 0x400) {
+		cmd_write_enable();
+		while ((cmd_check_status() & 0x02) != 0x02)
+			usleep_range(1000, 2000);
+		cmd_erase_sector(address); /*per 1k byte*/
+	}
+}
+
+static void do_erase_all(void)
+{
+	cmd_erase_all_sector();
+	cmd_write_disable();
+}
+
+static int do_check(void)
+{
+	int i, result = 0, addr = 0;
+	unsigned char *buffer;
+	unsigned char address[4] = {0}; /* Dummy , L , M , H */
+
+	buffer = kzalloc(FW_FILE_SIZE, GFP_KERNEL);
+
+	if (!buffer) {
+		dev_dbg(&flash_mode_client->dev,
+			"%s:unable to allocate buffer\n", __func__);
+		return -1;
+	}
+
+	dev_info(&flash_mode_client->dev, "Check erase ...\n");
+#ifdef FAST_READ_256_BYTE
+	dev_dbg(&flash_mode_client->dev, "per 256 byte\n");
+	for (i = 0; i < 0x100; i++) {/*per 256 bytes, 256 times*/
+		address[2] = i;
+		ite_i2c_pre_define_cmd_fastread(address, 0x100,
+			&buffer[0x100*i]);
+	}
+#else
+	dev_dbg(&flash_mode_client->dev, "per 32 byte\n");
+	for (i = 0; i < 0x800; i++) {/*per 32 bytes, 2048 times*/
+		addr = 0x20*i;
+		address[1] = (unsigned char)(addr & 0xFF);
+		address[2] = (unsigned char)((addr>>8) & 0xFF);
+		address[3] = (unsigned char)((addr>>16) & 0xFF);
+		ite_i2c_pre_define_cmd_fastread(address, 0x20,
+			&buffer[addr]);
+	}
+#endif
+	dev_info(&flash_mode_client->dev,
+		"start=0x%x, end=0x%x: check if all values are 0xFF...\n",
+		start, end);
+	for (i = start; i < end; i++) {
+		if (buffer[i] != 0xFF) {
+			dev_err(&flash_mode_client->dev,
+				"Check Erase Error on offset[%x] ; EFLASH=%02x\n",
+				i, buffer[i]);
+			result = -1;
+			break;
+		}
+	}
+
+	if (result == 0)
+		dev_info(&flash_mode_client->dev, "Check erase OK!\n");
+
+	kfree(buffer);
+
+	return result;
+}
+
+int do_program(void)
+{
+	int result = 0, i;
+	unsigned char payload[5];/*A2,A1,A0,Data0,Data1*/
+
+	cmd_write_enable();
+	while ((cmd_check_status() & 0x02) != 0x02)
+		usleep_range(1000, 2000);
+
+	payload[0] = (unsigned char)((start >> 16) & 0xFF); /*A2*/
+	payload[1] = (unsigned char)((start >> 8) & 0xFF); /*A1*/
+	payload[2] = (unsigned char)(start & 0xFF); /*A0*/
+
+	payload[3] = gbuffer[start+0]; /*Data 0*/
+	payload[4] = gbuffer[start+1]; /*Data 1*/
+
+	result = ite_i2c_pre_define_cmd_write_with_status(
+			EFLASH_CMD_AAI_WORD_PROGRAM, 5, payload);
+	/*double check status not in busy status*/
+	while (result & 0x01) {
+		usleep_range(1000, 2000);
+		result = cmd_check_status();
+	}
+	/*Combine command & check status to enhance speed*/
+	dev_info(&flash_mode_client->dev, "Program...\n");
+
+	for (i = (2+start); i < end;) {
+		result = ite_i2c_pre_define_cmd_write_with_status(
+				EFLASH_CMD_AAI_WORD_PROGRAM,
+				2, &gbuffer[i]);
+		/*wait Bit0 = 0*/
+		while (result & 0x01) {
+			usleep_range(1000, 2000);
+			result = cmd_check_status();
+		}
+		if ((i%4096) == 0)
+			dev_info(&flash_mode_client->dev,
+				"Program i=0x%04x\n", i);
+
+		i += 2;
+	}
+
+	cmd_write_disable();
+	dev_info(&flash_mode_client->dev, "Program OK!\n");
+
+	return result;
+}
+
+static int do_verify(void)
+{
+	int i, result = 0, addr = 0;
+	unsigned char *buffer;
+	unsigned char address[4] = {0}; /* Dummy , L , M , H */
+
+	buffer = kzalloc(FW_FILE_SIZE, GFP_KERNEL);
+
+	if (!buffer) {
+		dev_dbg(&flash_mode_client->dev,
+			"%s:unable to allocate buffer\n", __func__);
+		kfree(gbuffer);
+		gbuffer = NULL;
+		return -1;
+	}
+
+	dev_info(&flash_mode_client->dev, "Verify ...\n");
+#ifdef FAST_READ_256_BYTE
+	dev_dbg(&flash_mode_client->dev, "per 256 byte\n");
+	for (i = 0; i < 0x100; i++) {
+		address[2] = i;
+		ite_i2c_pre_define_cmd_fastread(address,
+			0x100, &buffer[0x100*i]);
+	}
+#else
+	dev_dbg(&flash_mode_client->dev, "per 32 byte\n");
+	for (i = 0; i < 0x800; i++) {/*per 32 bytes, 2048 times*/
+		addr = 0x20*i;
+		address[1] = (unsigned char)(addr & 0xFF);
+		address[2] = (unsigned char)((addr>>8) & 0xFF);
+		address[3] = (unsigned char)((addr>>16) & 0xFF);
+		ite_i2c_pre_define_cmd_fastread(address,
+			0x20, &buffer[addr]);
+	}
+#endif
+	dev_info(&flash_mode_client->dev,
+		"start=0x%x, end=0x%x: check if all values are equal to fw file...\n",
+		start, end);
+
+	for (i = start; i < end; i++) {
+		if (buffer[i] != gbuffer[i]) {
+			dev_err(&flash_mode_client->dev,
+			"Verify Error on offset[%x] ; file=%02x EFLASH=%02x\n",
+			i, gbuffer[i], buffer[i]);
+
+			result = -1;
+			break;
+		}
+	}
+
+	if (result == 0)
+		dev_info(&flash_mode_client->dev, "Verify OK!\n");
+
+	kfree(buffer);
+
+	kfree(gbuffer);
+	gbuffer = NULL;
+
+	return result;
+}
+
+static void do_reset(void)
+{
+	unsigned char tmp1;
+	tmp1 = i2ec_readbyte(0x1F01);
+	i2ec_writebyte(0x1F01, 0x20);
+	i2ec_writebyte(0x1F07, 0x01);
+	i2ec_writebyte(0x1F01, tmp1);
+}
+
+static int load_fw_and_set_flash_region(int force)
+{
+	unsigned char ec_version[8];
+	unsigned char bin_version[3];
+	int err, i;
+
+	err = load_fw_bin_to_buffer(fw_bin_path);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s: load %s fail, abort\n",
+			__func__, fw_bin_path);
+		return -1;
+	}
+
+	err = check_ec_has_been_flash(ec_version);
+
+	if (!force) {
+		if (err < 0) {
+			/* err < 0 means no fw in ic yet, do not
+			   do update since it may brick the ic
+			   if shutdown during flash */
+			dev_err(&flash_mode_client->dev,
+				"it8566 has no fw, abort\n");
+			return 1;
+		}
+
+		if (err < 2) {
+			/* err == 0, 1 means ic has fw flash fail before,
+			   continue to do fw update */
+			dev_info(&flash_mode_client->dev,
+				"it8566 has fw flash fail before\n");
+		} else {
+			/* err == 2 means ic has valid fw inside,
+			   futhur check fw version to see if need
+			   to update */
+			for (i = 0; i < 3; i++)
+				bin_version[i] = gbuffer[0x7f20 + i];
+
+			dev_info(&flash_mode_client->dev,
+				"Check bin ver: %x.%x.%x, current fw ver: %x.%x.%x\n",
+				bin_version[0], bin_version[1], bin_version[2],
+				ec_version[0], ec_version[1], ec_version[2]);
+
+			if (ec_version[0] > bin_version[0] ||
+			    (ec_version[0] == bin_version[0] &&
+			     ec_version[1] > bin_version[1]) ||
+			    (ec_version[0] == bin_version[0] &&
+			     ec_version[1] == bin_version[1] &&
+			     ec_version[2] >= bin_version[2])) {
+				dev_info(&flash_mode_client->dev,
+						"no need to update\n");
+				return 1;
+			}
+		}
+	} else {
+		if (err < 0)
+			dev_info(&flash_mode_client->dev,
+			"it8566 has no fw, do full flash\n");
+		else if (err < 2)
+			dev_info(&flash_mode_client->dev,
+			"it8566 fail to flash fw before, do partial flash\n");
+		else
+			dev_info(&flash_mode_client->dev,
+			"it8566 has fw, do partial flash. Current fw ver: %x.%x.%x\n",
+			ec_version[0], ec_version[1], ec_version[2]);
+	}
+
+	if (err < 0) {
+		start = 0x0;
+		end = 0x10000; /*65536*/
+
+		get_flash_id();
+		if ((flash_id[0] != 0xFF) ||
+			(flash_id[1] != 0xFF) ||
+			(flash_id[2] != 0xFE)) {
+			dev_err(&flash_mode_client->dev,
+			"%s:fail: flash_id:[0]=%d, [1]=%d, [2]=%d\n",
+			__func__, flash_id[0], flash_id[1], flash_id[2]);
+			return -1;
+		}
+	} else {
+		start = 0x2000; /*8192*/
+		end = 0xFC00; /*64512*/
+
+		if (err > 0) {
+			cmd_enter_flash_mode();
+			msleep(20);/*wait for enter flash mode*/
+		}
+
+		/*double check if get flash id ok*/
+		get_flash_id();
+		if ((flash_id[0] != 0xFF) ||
+			(flash_id[1] != 0xFF) ||
+			(flash_id[2] != 0xFE)) {
+			dev_err(&flash_mode_client->dev,
+			"%s:fail: flash_id:[0]=%d, [1]=%d, [2]=%d\n",
+			__func__, flash_id[0], flash_id[1], flash_id[2]);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+#define NUM_FW_UPDATE_RETRY 2
+int it8566_fw_update(int force)
+{
+	int err = 0;
+	int retry = -1;
+	unsigned char ec_version[8];
+
+retry:
+	retry++;
+	if (retry > NUM_FW_UPDATE_RETRY)
+		goto update_fail;
+
+	dev_info(&flash_mode_client->dev, "Update fw..., retry=%d\n", retry);
+
+	err = load_fw_and_set_flash_region(force);
+	if (err < 0)
+		goto retry;
+	else if (err > 0)
+		goto update_none;
+
+	do_erase_all();
+	err = do_check();
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+		"%s:check erase fail...\n", __func__);
+		goto retry;
+	}
+
+	do_program();
+	err = do_verify();
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+		"%s:verify program fail...\n", __func__);
+		goto retry;
+	}
+
+	do_reset();
+	msleep(2000);
+
+	err = get_ec_ver(ec_version);
+	if (err < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s:get fw version fail...\n", __func__);
+		goto retry;
+	}
+
+	dev_info(&flash_mode_client->dev,
+		"Update fw Success!, Current fw ver: %x.%x.%x\n",
+		ec_version[0], ec_version[1], ec_version[2]);
+
+	/*update success*/
+	return 0;
+
+update_fail:
+	dev_err(&flash_mode_client->dev, "Update fw Fail...\n");
+	kfree(gbuffer);
+	gbuffer = NULL;
+	/*update fail*/
+	return -1;
+update_none:
+	kfree(gbuffer);
+	gbuffer = NULL;
+	/*no need to update*/
+	return 1;
+}
+/* fw update -- */
+
+/* dbg_fw_update ++ */
+static ssize_t dbg_fw_update_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	char char_buf[10];
+	int int_buf = 0;
+
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	/*buf[buf_size] = 0;*/
+	sscanf(buf, "%s %d", char_buf, &int_buf);
+	dev_dbg(&flash_mode_client->dev,
+		"char_buf=%s, int_buf=%d\n", char_buf, int_buf);
+
+	mutex_lock(&it8566_fw_lock);
+	if (!strcmp(char_buf, "force")) {
+		it8566_fw_update(1);
+	} else if (!strcmp(char_buf, "step")) {
+		switch (int_buf) {
+		case 1:
+			load_fw_and_set_flash_region(1);
+			break;
+		case 2:
+			do_erase_all();
+			break;
+		case 3:
+			do_check();
+			break;
+		case 4:
+			do_program();
+			break;
+		case 5:
+			do_verify();
+			break;
+		case 6:
+			do_reset();
+			break;
+		default:
+			dev_info(&flash_mode_client->dev, "do nothing");
+		}
+	}
+	mutex_unlock(&it8566_fw_lock);
+
+	return buf_size;
+}
+
+static const struct file_operations dbg_fw_update_fops = {
+	.open           = simple_open,
+	.write          = dbg_fw_update_write,
+};
+/* dbg_fw_update -- */
+
+/* dbg_ec_ver ++ */
+static ssize_t dbg_ec_ver_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	char v[8];
+
+	if (get_ec_ver(v) < 0) {
+		dev_err(&flash_mode_client->dev,
+			"%s: can't get fw version\n", __func__);
+
+		snprintf(buf, 32, "%s\n",
+				"can't get fw version");
+		return simple_read_from_buffer(user_buf, count, ppos,
+				buf, strlen(buf));
+	}
+	snprintf(buf, 32, "%x.%x.%x\n", v[0], v[1], v[2]);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static const struct file_operations dbg_ec_ver_fops = {
+	.open           = simple_open,
+	.read           = dbg_ec_ver_read,
+};
+/* dbg_ec_ver -- */
+
+static void add_debugfs(void)
+{
+	struct dentry *cecdir, *d;
+
+	cecdir = it8566_get_debugfs_dir();
+	if (!cecdir) {
+		dev_err(&flash_mode_client->dev,
+			"can not create debugfs dir\n");
+		return;
+	}
+	d = debugfs_create_file("fw_update", S_IWUSR, cecdir,
+			NULL, &dbg_fw_update_fops);
+	if (!d) {
+		dev_err(&flash_mode_client->dev,
+			"can not create debugfs fw_update\n");
+		return;
+	}
+	d = debugfs_create_file("fw_version", S_IRUSR , cecdir,
+			NULL, &dbg_ec_ver_fops);
+	if (!d) {
+		dev_err(&flash_mode_client->dev,
+			"can not create debugfs fw_version\n");
+		return;
+	}
+}
+
+static int it8566_flash_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter;
+
+	dev_info(&client->dev, "%s\n", __func__);
+	adapter = to_i2c_adapter(client->dev.parent);
+
+	if (!i2c_check_functionality(adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA |
+			I2C_FUNC_SMBUS_I2C_BLOCK)) {
+		dev_err(&client->dev,
+			"I2C adapter %s doesn't support"
+			" SMBus BYTE DATA & I2C BLOCK\n",
+			adapter->name);
+		return -EIO;
+	}
+
+	flash_mode_client = client;
+	mutex_init(&it8566_fw_lock);
+
+	add_debugfs();
+
+	return 0;
+}
+
+static int it8566_flash_remove(struct i2c_client *client)
+{
+	mutex_destroy(&it8566_fw_lock);
+	return 0;
+}
+
+static const struct i2c_device_id it8566_flash_id[] = {
+	{DEV_NAME_FLASH, 0},
+	{},
+};
+
+MODULE_DEVICE_TABLE(i2c, it8566_flash_id);
+
+static struct i2c_driver it8566_i2c_flash_driver = {
+	.driver = {
+		.name = DEV_NAME_FLASH,
+	},
+	.probe = it8566_flash_probe,
+	.remove = it8566_flash_remove,
+	.id_table = it8566_flash_id,
+};
+
+static int __init it8566_flash_init(void)
+{
+	return i2c_add_driver(&it8566_i2c_flash_driver);
+}
+
+module_init(it8566_flash_init);
+
+static void __exit it8566_flash_exit(void)
+{
+	i2c_del_driver(&it8566_i2c_flash_driver);
+}
+
+module_exit(it8566_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ASUS IT8566 firmware flash driver");
+MODULE_AUTHOR("Joey SY Chen <joeysy_chen@asus.com>");
diff --git a/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec.c b/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec.c
new file mode 100644
index 0000000..13fd5c7
--- /dev/null
+++ b/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec.c
@@ -0,0 +1,760 @@
+/*
+ * ITE it8566 HDMI CEC driver
+ *
+ * Copyright(c) 2014 ASUSTek COMPUTER INC. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include "it8566_hdmi_cec_ioctl.h"
+#include "it8566_hdmi_cec.h"
+
+#define CEC_DEV_NAME		"it8566_hdmi_cec"
+#define CEC_IRQ_GPIO_PIN	55
+/*debug*/
+/*#define CEC_REV_WORK*/
+/*#define DEBUG*/
+
+#define CEC_WRITE_DATA_LEN	17
+#define CEC_READ_DATA_LEN	17
+#define CEC_WRITE_CMD		0x90
+#define CEC_READ_CMD		0x91
+#define CEC_LA_CMD		0x92
+#define CEC_CHECK_STATUS_CMD	0x93
+#define CEC_GET_VERSION_CMD	0x94
+
+#define NUM_CHECK_TX_BUSY	25
+#define NUM_I2C_READ_RETRY	5
+
+static struct i2c_client	*cec_client;
+static struct miscdevice hdmi_cec_device;
+static struct workqueue_struct  *cec_rev_wq;
+static int cec_irq;
+
+
+
+static int cec_i2c_read(unsigned char *to_header, unsigned char *to_opcode,
+		unsigned char to_data[], int *to_opds_len);
+
+static void cec_rev_worker(struct work_struct *work)
+{
+#ifndef CEC_REV_WORK
+	char *envp[2] = { "CEC_MSG_RCEV=1", NULL };
+	/*send event to user*/
+	dev_dbg(&cec_client->dev, "%s:send CEC_MSG_RCEV uevent\n", __func__);
+	kobject_uevent_env(&hdmi_cec_device.this_device->kobj
+			, KOBJ_CHANGE, envp);
+#else
+	unsigned char header, opcode;
+	int op_len;
+	unsigned char data[14] = {0};
+	int i, result;
+
+	dev_info(&cec_client->dev, "%s\n", __func__);
+
+	result = cec_i2c_read(&header, &opcode, data, &op_len);
+	if (result < 0) {
+		dev_err(&cec_client->dev, "%s: cec_i2c_read fail...", __func__);
+		return;
+	}
+	dev_info(&cec_client->dev, "%s: header=0x%x, opcode=0x%x, op_len=0x%x\n"
+			, __func__, header, opcode, op_len);
+	for (i = 0; i < op_len; i++)
+		dev_info(&cec_client->dev, "op[%d]=0x%x", i, data[i]);
+	dev_info(&cec_client->dev, "\n");
+#endif
+}
+static DECLARE_WORK(cec_rev_work, cec_rev_worker);
+
+static irqreturn_t hdmi_cec_irq_handler(int irq, void *ptr_not_use)
+{
+	dev_dbg(&cec_client->dev, "%s: receive cec irq\n", __func__);
+	queue_work(cec_rev_wq, &cec_rev_work);
+	return IRQ_HANDLED;
+}
+
+static inline int it8566_setup_irq(void)
+{
+	int ret;
+
+	ret = gpio_request(CEC_IRQ_GPIO_PIN, "it8566_irq");
+	if (ret) {
+		dev_err(&cec_client->dev,
+			"failed to request GPIO %d\n",
+			CEC_IRQ_GPIO_PIN);
+		return ret;
+	}
+	ret = gpio_direction_input(CEC_IRQ_GPIO_PIN);
+	if (ret) {
+		dev_err(&cec_client->dev,
+			"failed to set GPIO %d as input\n",
+			CEC_IRQ_GPIO_PIN);
+		return ret;
+	}
+
+	cec_irq = gpio_to_irq(CEC_IRQ_GPIO_PIN);
+	dev_info(&cec_client->dev, "IRQ number assigned = %d\n", cec_irq);
+
+	ret = request_irq(cec_irq, hdmi_cec_irq_handler,
+				IRQF_TRIGGER_FALLING,
+				"hdmi_cec_irq", NULL);
+	if (ret) {
+		dev_err(&cec_client->dev,
+			"request_irq %d failed\n", cec_irq);
+		return ret;
+	}
+	return 0;
+}
+
+/*cec transmit/receive ++*/
+static unsigned char check_it8566_status(void)
+{
+	int st = 0;
+
+	if (!cec_client) {
+		dev_err(&cec_client->dev, "no cec_client\n");
+		return 0;
+	}
+
+	st = i2c_smbus_read_byte_data(cec_client, CEC_CHECK_STATUS_CMD);
+
+	if (st < 0) {
+		dev_err(&cec_client->dev,
+			"%s:i2c transfer FAIL: err=%d\n", __func__, st);
+		return RESULT_CEC_BUS_ERR;
+	}
+	dev_dbg(&cec_client->dev, "%s:status=0x%x\n", __func__, st);
+	return (unsigned char)st;
+}
+
+static int cec_i2c_write(unsigned char header, unsigned char opcode,
+		unsigned char opds[], int opds_len)
+{
+	unsigned char tx_data[CEC_WRITE_DATA_LEN] = {0};
+	int i, err;
+
+	if (!cec_client) {
+		dev_err(&cec_client->dev, "no cec_client\n");
+		return -1;
+	}
+
+	if (opds_len > 14) {
+		dev_err(&cec_client->dev, "operands can not exceed 14: %d\n",
+			opds_len);
+		return -1;
+	}
+
+	/*length: header,opcode,data0,data1 ... , region:1~16*/
+	tx_data[0] = opds_len + 2;
+	tx_data[1] = header;
+	tx_data[2] = opcode;
+
+	for (i = 0; i < opds_len; i++)
+		tx_data[i+3] = opds[i];
+
+	for (i = 0; i < CEC_WRITE_DATA_LEN; i++)
+		dev_dbg(&cec_client->dev, "tx_data[%d]= 0x%x\n", i, tx_data[i]);
+
+	err = i2c_smbus_write_i2c_block_data(cec_client, CEC_WRITE_CMD,
+			CEC_WRITE_DATA_LEN, tx_data);
+	if (err < 0) {
+		dev_err(&cec_client->dev,
+			"%s:i2c transfer FAIL: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	return 0;
+}
+
+static unsigned char cec_i2c_write_with_status_check(unsigned char header,
+		unsigned char opcode, unsigned char *body_operads,
+		int body_operads_len)
+{
+		unsigned char ck_st;
+		int chk_tx_busy_cnt = 0;
+		int err;
+		bool is_broadcast, nak;
+
+		/* 1. check if rx busy or bus err*/
+		ck_st = check_it8566_status();
+		if ((ck_st & RESULT_RX_BUSY) == RESULT_RX_BUSY) {
+			dev_info(&cec_client->dev, "check it8566 rx busy, abort cec tx\n");
+			return RESULT_RX_BUSY;
+		} else if ((ck_st & RESULT_CEC_BUS_ERR) == RESULT_CEC_BUS_ERR) {
+			dev_info(&cec_client->dev, "check it8566 tx bus err, abort cec tx\n");
+			return RESULT_CEC_BUS_ERR;
+		}
+
+		/* 2. cec tx */
+		err = cec_i2c_write(header, opcode,
+				body_operads, body_operads_len);
+		if (err < 0)
+			return RESULT_CEC_BUS_ERR;
+
+		/* 3. check tx busy, wait for it8566 done cec transfer */
+		do {
+			/* at least start bit + header */
+			/*about 4.5 + 10*2.4 = 28.5ms */
+			usleep_range(20000, 20500);
+			ck_st = check_it8566_status();
+			chk_tx_busy_cnt++;
+		} while ((ck_st & RESULT_TX_BUSY) == RESULT_TX_BUSY &&
+			  chk_tx_busy_cnt < NUM_CHECK_TX_BUSY);
+
+		/* 4. check cec transfer result */
+		is_broadcast = (header & 0xF) == 0xF;
+		nak = (ck_st & RESULT_CEC_NACK) == RESULT_CEC_NACK;
+		if ((ck_st & RESULT_TX_BUSY) == RESULT_TX_BUSY) {
+			dev_err(&cec_client->dev, "cec tx busy\n");
+			return RESULT_CEC_BUS_ERR;
+		} else if ((ck_st & RESULT_CEC_BUS_ERR) == RESULT_CEC_BUS_ERR) {
+			dev_info(&cec_client->dev, "cec tx bus err\n");
+			return RESULT_CEC_BUS_ERR;
+		} else if ((!is_broadcast && nak) || (is_broadcast && !nak)) {
+			/* For broadcast messages the sense of
+			   the ACK bit is inverted:
+			   A ‘0’ read by the Initiator indicates that
+			   one or more devices have rejected the message */
+			dev_info(&cec_client->dev, "cec tx direct nack / bcst reject\n");
+			return RESULT_CEC_NACK;
+		} else {
+			dev_dbg(&cec_client->dev, "cec tx success\n");
+			return RESULT_TX_SUCCESS;
+		}
+}
+
+/*
+   number of elements of to_data[] passed in must be 14
+*/
+static int cec_i2c_read(unsigned char *to_header, unsigned char *to_opcode,
+		unsigned char to_data[], int *to_opds_len)
+{
+	unsigned char rx_data[CEC_READ_DATA_LEN] = {0};
+	int i, err;
+	int retry = 0;
+
+	if (!cec_client) {
+		dev_err(&cec_client->dev, "no cec_client\n");
+		return -1;
+	}
+
+read_retry:
+	if (retry > NUM_I2C_READ_RETRY) {
+		dev_err(&cec_client->dev, "cec read fail\n");
+		return -1;
+	}
+
+	err = i2c_smbus_read_i2c_block_data(cec_client, CEC_READ_CMD,
+			CEC_READ_DATA_LEN, rx_data);
+	if (err < 0) {
+		dev_err(&cec_client->dev,
+		"%s:i2c transfer FAIL: err=%d, retry=%d\n",
+		__func__, err, retry);
+		retry++;
+		goto read_retry;
+	}
+
+	for (i = 0; i < rx_data[0] + 1; i++)
+		dev_dbg(&cec_client->dev, "rx_data[%d]= 0x%x\n", i, rx_data[i]);
+
+	/*may only receive header*/
+	*to_opds_len = rx_data[0] - 2; /*rx_data[0] region: 1~16*/
+
+	*to_header = rx_data[1];
+
+	if (*to_opds_len >= 0)
+		*to_opcode = rx_data[2];
+
+	for (i = 0; i < *to_opds_len; i++)
+		to_data[i] = rx_data[i+3];
+
+	return 0;
+}
+/*cec transmit/receive --*/
+
+/* set/get LA ++ */
+static int set_logical_address(unsigned char la)
+{
+	int err;
+
+	if (!cec_client) {
+		dev_err(&cec_client->dev, "no cec_client\n");
+		return -1;
+	}
+
+	dev_dbg(&cec_client->dev, "%s:la=0x%x\n", __func__, la);
+
+	err = i2c_smbus_write_byte_data(cec_client,
+			CEC_LA_CMD, la);
+	if (err < 0) {
+		dev_err(&cec_client->dev,
+			"%s:i2c transfer FAIL: err=%d\n", __func__, err);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int get_logical_address(unsigned char *to_la)
+{
+	int la;
+
+	if (!cec_client) {
+		dev_err(&cec_client->dev, "no cec_client\n");
+		return -1;
+	}
+
+	la = i2c_smbus_read_byte_data(cec_client, CEC_LA_CMD);
+	if (la < 0) {
+		dev_err(&cec_client->dev,
+			"%s:i2c transfer FAIL...: err=%d\n", __func__, la);
+		return -1;
+	}
+
+	dev_dbg(&cec_client->dev, "%s:get la=0x%x ---\n", __func__, la);
+	*to_la = (unsigned char)la;
+	return 0;
+}
+/* set/get LA -- */
+
+/* dbg_cec_tx ++ */
+static ssize_t dbg_cec_tx_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[128];
+	char *b;
+	unsigned char user_data[17] = {0};
+	unsigned int d;
+	int offset = 0, i = 0;
+
+	unsigned char user_header = 0, user_opcode = 0;
+	int user_data_len = 0;
+
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	b = buf;
+	while (sscanf(b, "%x%n", &d, &offset) == 1) {
+		b += offset;
+		user_data[i] = (unsigned char)d;
+		i++;
+	}
+
+	user_header = user_data[0];
+	user_opcode = user_data[1];
+	user_data_len = user_data[2];
+
+	if (user_opcode == 0)
+		user_data_len = user_data_len - 1;
+
+	dev_info(&cec_client->dev,
+		"user_header=0x%x, user_opcode=0x%x, user_data_len=%d\n",
+		user_header, user_opcode, user_data_len);
+
+
+	cec_i2c_write_with_status_check(user_header, user_opcode,
+			&user_data[3], user_data_len);
+
+	return buf_size;
+}
+
+static const struct file_operations dbg_cec_tx_fops = {
+	.open           = simple_open,
+	.write          = dbg_cec_tx_write,
+};
+/* dbg_cec_tx -- */
+/* dbg_cec_rx ++ */
+static ssize_t dbg_cec_rx_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[64];
+	int result;
+	unsigned char header, opcode;
+	int len;
+	unsigned char data[14] = {0};
+
+	result = cec_i2c_read(&header, &opcode, data, &len);
+	if (result < 0)
+		goto err;
+
+	dev_dbg(&cec_client->dev, "%s: header=0x%x, opcode=0x%x, len=0x%x,"
+		"data1=0x%x, data2=0x%x, data3=0x%x\n",
+		__func__, header, opcode, len, data[0], data[1], data[3]);
+	snprintf(buf, 64,
+			"header=0x%x, opcode=0x%x, len=0x%x, data1=0x%x, data2=0x%x, data3=0x%x\n",
+			header, opcode, len, data[0], data[1], data[2]);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+err:
+	snprintf(buf, 32, "%s\n",
+			"cec read fail...");
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static const struct file_operations dbg_cec_rx_fops = {
+	.open           = simple_open,
+	.read           = dbg_cec_rx_read,
+};
+/* dbg_cec_rx -- */
+
+/* dbg_cec_la ++ */
+static ssize_t dbg_cec_la_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	unsigned char la;
+	int result;
+
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+	if (kstrtou8(buf, 0, &la))
+		return -EINVAL;
+
+	dev_info(&cec_client->dev, "set la:0x%x\n", la);
+
+	result = set_logical_address(la);
+	if (result < 0)
+		dev_err(&cec_client->dev, "set la fail\n");
+	return buf_size;
+}
+
+static ssize_t dbg_cec_la_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	unsigned char get_la = 0xf;
+	char buf[32];
+	int result;
+
+	result = get_logical_address(&get_la);
+	if (result < 0)
+		dev_err(&cec_client->dev, "get la fail\n");
+
+	dev_info(&cec_client->dev, "get la:0x%x\n", get_la);
+
+	snprintf(buf, 32, "%d\n", get_la);
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static const struct file_operations dbg_cec_la_fops = {
+	.open           = simple_open,
+	.write           = dbg_cec_la_write,
+	.read            = dbg_cec_la_read,
+};
+/* dbg_cec_la -- */
+
+/* ioctl ++ */
+static long hdmi_cec_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	/*compat_ptr() may need*/
+	switch (cmd) {
+	case IT8566_HDMI_CEC_IOCTL_SEND_MESSAGE:
+	{
+		struct it8566_cec_msg cec_msg;
+		int rst;
+#ifdef DEBUG
+		int i;
+#endif
+		unsigned char header = 0, opcode = 0, body_len = 0;
+		unsigned char *body_operads;
+
+		if (copy_from_user(&cec_msg, (void __user *)arg,
+				sizeof(cec_msg))) {
+			dev_err(&cec_client->dev, "get arg error\n");
+			return -EFAULT;
+		}
+#ifdef DEBUG
+		dev_info(&cec_client->dev, "from user: init:0x%x, dest:0x%x, len:0x%x\n",
+				cec_msg.initiator,
+				cec_msg.destination,
+				cec_msg.length);
+		for (i = 0; i < cec_msg.length; i++)
+			dev_info(&cec_client->dev,
+				", body[%d]= 0x%x", i, cec_msg.body[i]);
+		dev_info(&cec_client->dev, "\n");
+#endif
+		header = (cec_msg.initiator & 0xF) << 4 |
+			(cec_msg.destination & 0xF);
+		body_len = cec_msg.length;
+
+		if (body_len > 0)
+			opcode = cec_msg.body[0];
+
+		if (body_len > 1)
+			body_operads = &cec_msg.body[1];
+
+		rst = cec_i2c_write_with_status_check(header, opcode,
+				body_operads, body_len - 1);
+		cec_msg.result = rst;
+
+		if (copy_to_user((void __user *)arg, &cec_msg,
+				sizeof(cec_msg))) {
+			dev_err(&cec_client->dev, "pass arg fail\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case IT8566_HDMI_CEC_IOCTL_RCEV_MESSAGE:
+	{
+		struct it8566_cec_msg cec_msg;
+		int i, err;
+		unsigned char header = 0, opcode = 0;
+		int body_operads_len = 0;
+		unsigned char body_operads[14] = {0};
+
+		err = cec_i2c_read(&header, &opcode,
+				body_operads, &body_operads_len);
+		if (err) {
+			dev_err(&cec_client->dev, "i2c read err\n");
+			return err;
+		}
+
+		cec_msg.initiator = (header >> 4) & 0xF;
+		cec_msg.destination = header & 0xF;
+		cec_msg.length = body_operads_len + 1; /*region:0~15*/
+		cec_msg.body[0] = opcode;
+		for (i = 0; i < body_operads_len; i++)
+			cec_msg.body[i+1] = body_operads[i];
+#ifdef DEBUG
+		dev_info(&cec_client->dev, "to user: init:0x%x, dest:0x%x, len:0x%x\n",
+				cec_msg.initiator,
+				cec_msg.destination,
+				cec_msg.length);
+		for (i = 0; i < cec_msg.length; i++)
+			dev_info(&cec_client->dev,
+				", body[%d]= 0x%x", i, cec_msg.body[i]);
+		dev_info(&cec_client->dev, "\n");
+#endif
+		if (copy_to_user((void __user *)arg, &cec_msg,
+				sizeof(cec_msg))) {
+			dev_err(&cec_client->dev, "pass arg fail\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	case IT8566_HDMI_CEC_IOCTL_SET_LA:
+	{
+		unsigned char logical_addr;
+		int err;
+
+		if (copy_from_user(&logical_addr, (void __user *)arg,
+				sizeof(unsigned char))) {
+			dev_err(&cec_client->dev, "get arg error\n");
+			return -EFAULT;
+		}
+		dev_dbg(&cec_client->dev,
+			"set logical addr to %d\n", logical_addr);
+
+		err = set_logical_address(logical_addr);
+		if (err < 0)
+			return -EBUSY;
+		break;
+	}
+	case IT8566_HDMI_CEC_IOCTL_FW_UPDATE_IF_NEEDED:
+	{
+		int err;
+
+		/* no need to update: err > 0,
+		 * update success: err = 0,
+		 * update fail: err < 0 */
+		mutex_lock(&it8566_fw_lock);
+		err = it8566_fw_update(0);
+		mutex_unlock(&it8566_fw_lock);
+
+		if (copy_to_user((void __user *)arg, &err,
+				sizeof(int))) {
+			dev_err(&cec_client->dev, "pass arg fail\n");
+			return -EFAULT;
+		}
+		break;
+	}
+	default:
+		dev_err(&cec_client->dev, "%s:unknown cmd=%d\n", __func__, cmd);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int hdmi_cec_open(struct inode *inode, struct file *filp)
+{
+	dev_info(&cec_client->dev, "%s\n", __func__);
+	return nonseekable_open(inode, filp);
+}
+
+static int hdmi_cec_release(struct inode *inode, struct file *file)
+{
+	dev_info(&cec_client->dev, "%s\n", __func__);
+	return 0;
+}
+
+static const struct file_operations hdmi_cec_fops = {
+	.owner = THIS_MODULE,
+	.open  = hdmi_cec_open,
+	.unlocked_ioctl = hdmi_cec_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = hdmi_cec_ioctl,
+#endif
+	.release = hdmi_cec_release
+};
+/* ioctl -- */
+
+struct i2c_client *it8566_get_cec_client(void)
+{
+	return cec_client;
+}
+
+struct dentry *it8566_get_debugfs_dir(void)
+{
+	static struct dentry *d;
+
+	if (!d)
+		d = debugfs_create_dir("it8566_hdmi_cec", NULL);
+
+	return d;
+}
+
+static void add_debugfs(void)
+{
+	struct dentry *cecdir, *d;
+
+	cecdir = it8566_get_debugfs_dir();
+	if (!cecdir) {
+		dev_err(&cec_client->dev, "can not create debugfs dir\n");
+		return;
+	}
+	d = debugfs_create_file("cec_tx", S_IWUSR , cecdir,
+			NULL, &dbg_cec_tx_fops);
+	if (!d) {
+		dev_err(&cec_client->dev, "can not create debugfs cec_tx\n");
+		return;
+	}
+	d = debugfs_create_file("cec_rx", S_IRUSR , cecdir,
+			NULL, &dbg_cec_rx_fops);
+	if (!d) {
+		dev_err(&cec_client->dev, "can not create debugfs cec_rx\n");
+		return;
+	}
+	d = debugfs_create_file("cec_la", S_IWUSR , cecdir,
+			NULL, &dbg_cec_la_fops);
+	if (!d) {
+		dev_err(&cec_client->dev, "can not create debugfs cec_rx\n");
+		return;
+	}
+}
+
+static int it8566_cec_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter;
+	int ret;
+
+	dev_info(&client->dev, "%s\n", __func__);
+	adapter = to_i2c_adapter(client->dev.parent);
+
+	if (!i2c_check_functionality(adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA |
+			I2C_FUNC_SMBUS_I2C_BLOCK)) {
+		dev_err(&client->dev,
+			"I2C adapter %s doesn't support"
+			" SMBus BYTE DATA & I2C BLOCK\n",
+			adapter->name);
+		return -EIO;
+	}
+
+	cec_client = client;
+
+	ret = it8566_setup_irq();
+	if (ret)
+		return ret;
+
+	/*for ioctl*/
+	hdmi_cec_device.minor = MISC_DYNAMIC_MINOR;
+	hdmi_cec_device.name = "it8566_hdmi_cec";
+	hdmi_cec_device.fops = &hdmi_cec_fops;
+
+	ret = misc_register(&hdmi_cec_device);
+	if (ret) {
+		dev_err(&client->dev,
+			"fail to register misc device\n");
+		return ret;
+	}
+
+	add_debugfs();
+
+	cec_rev_wq = create_workqueue("cec_rev_wq");
+
+	return 0;
+}
+
+static int it8566_cec_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+static const struct i2c_device_id it8566_cec_id[] = {
+	{CEC_DEV_NAME, 0},
+	{},
+};
+
+MODULE_DEVICE_TABLE(i2c, it8566_cec_id);
+
+static struct i2c_driver it8566_i2c_cec_driver = {
+	.driver = {
+		.name = CEC_DEV_NAME,
+	},
+	.probe = it8566_cec_probe,
+	.remove = it8566_cec_remove,
+	.id_table = it8566_cec_id,
+};
+
+static int __init it8566_cec_init(void)
+{
+	return i2c_add_driver(&it8566_i2c_cec_driver);
+}
+
+module_init(it8566_cec_init);
+
+static void __exit it8566_cec_exit(void)
+{
+	i2c_del_driver(&it8566_i2c_cec_driver);
+}
+
+module_exit(it8566_cec_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ASUS IT8566 HDMI CEC driver");
+MODULE_AUTHOR("Joey SY Chen <joeysy_chen@asus.com>");
diff --git a/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec.h b/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec.h
new file mode 100644
index 0000000..387d383
--- /dev/null
+++ b/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec.h
@@ -0,0 +1,22 @@
+/*
+ * ITE it8566 HDMI CEC driver
+ *
+ * Copyright(c) 2014 ASUSTek COMPUTER INC. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IT8566_HDMI_CEC_H
+#define _IT8566_HDMI_CEC_H
+extern struct i2c_client *it8566_get_cec_client(void);
+extern struct dentry *it8566_get_debugfs_dir(void);
+extern int it8566_fw_update(int force);
+extern struct mutex it8566_fw_lock;
+#endif
diff --git a/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec_ioctl.h b/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec_ioctl.h
new file mode 100644
index 0000000..6ff294c
--- /dev/null
+++ b/drivers/video/it8566_hdmi_cec/it8566_hdmi_cec_ioctl.h
@@ -0,0 +1,58 @@
+/*
+ * ITE it8566 HDMI CEC driver
+ *
+ * Copyright(c) 2014 ASUSTek COMPUTER INC. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IT8566_HDMI_CEC_IOCTL_H
+#define _IT8566_HDMI_CEC_IOCTL_H
+
+/*error code for send cec_msg ioctl*/
+enum {
+	RESULT_SUCCESS = 0,
+	RESULT_NACK = 1,        /* not acknowledged */
+	RESULT_BUSY = 2,        /* bus is busy */
+	RESULT_FAIL = 3,
+};
+
+#define CEC_MESSAGE_BODY_MAX_LENGTH	16 /*should be 14? */
+struct it8566_cec_msg {
+	/* logical address of sender */
+	unsigned short initiator;
+
+	/* logical address of receiver */
+	unsigned short destination;
+
+	/* Length in bytes of body, range [0, CEC_MESSAGE_BODY_MAX_LENGTH] */
+	unsigned short length;
+	unsigned char body[CEC_MESSAGE_BODY_MAX_LENGTH];
+	/* for sent cec result*/
+	unsigned char result;
+};
+
+#define IT8566_HDMI_CEC_IOCTL_SEND_MESSAGE \
+	_IOWR('\x66', 1, struct it8566_cec_msg)
+#define IT8566_HDMI_CEC_IOCTL_RCEV_MESSAGE \
+	_IOR('\x66', 2, struct it8566_cec_msg)
+#define IT8566_HDMI_CEC_IOCTL_SET_LA \
+	_IOW('\x66', 3, unsigned char)
+#define IT8566_HDMI_CEC_IOCTL_FW_UPDATE_IF_NEEDED \
+	_IOR('\x66', 4, int)
+
+/*for it8566 status*/
+#define RESULT_CEC_BUS_ERR	0x1
+#define RESULT_CEC_NACK		(0x1 << 1)
+#define RESULT_TX_BUSY		(0x1 << 2)
+#define RESULT_RX_BUSY		(0x1 << 3)
+#define RESULT_TX_SUCCESS	(0x1 << 7)
+
+#endif /* _IT8566_HDMI_CEC_IOCTL_H */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bd3ae32..71af7b5 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -191,7 +191,8 @@
 	 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
 	 * is true, we *have* to do it in this order
 	 */
-	tell_host(vb, vb->deflate_vq);
+	if (vb->num_pfns != 0)
+		tell_host(vb, vb->deflate_vq);
 	mutex_unlock(&vb->balloon_lock);
 	release_pages_by_pfn(vb->pfns, vb->num_pfns);
 }
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5217baf..37d58f8 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -607,6 +607,55 @@
 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 
 /**
+ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns current queue state
+ * in an opaque unsigned value. This value should be later tested by
+ * virtqueue_poll, to detect a possible race between the driver checking for
+ * more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+{
+	struct vring_virtqueue *vq = to_vvq(_vq);
+	u16 last_used_idx;
+
+	START_USE(vq);
+
+	/* We optimistically turn back on interrupts, then check if there was
+	 * more to do. */
+	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
+	 * either clear the flags bit or point the event index at the next
+	 * entry. Always do both to keep code simple. */
+	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+	vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
+	END_USE(vq);
+	return last_used_idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
+
+/**
+ * virtqueue_poll - query pending used buffers
+ * @vq: the struct virtqueue we're talking about.
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
+ *
+ * Returns "true" if there are pending used buffers in the queue.
+ *
+ * This does not need to be serialized.
+ */
+bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+{
+	struct vring_virtqueue *vq = to_vvq(_vq);
+
+	virtio_mb(vq->weak_barriers);
+	return (u16)last_used_idx != vq->vring.used->idx;
+}
+EXPORT_SYMBOL_GPL(virtqueue_poll);
+
+/**
  * virtqueue_enable_cb - restart callbacks after disable_cb.
  * @vq: the struct virtqueue we're talking about.
  *
@@ -619,25 +668,8 @@
  */
 bool virtqueue_enable_cb(struct virtqueue *_vq)
 {
-	struct vring_virtqueue *vq = to_vvq(_vq);
-
-	START_USE(vq);
-
-	/* We optimistically turn back on interrupts, then check if there was
-	 * more to do. */
-	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
-	 * either clear the flags bit or point the event index at the next
-	 * entry. Always do both to keep code simple. */
-	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
-	vring_used_event(&vq->vring) = vq->last_used_idx;
-	virtio_mb(vq->weak_barriers);
-	if (unlikely(more_used(vq))) {
-		END_USE(vq);
-		return false;
-	}
-
-	END_USE(vq);
-	return true;
+	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+	return !virtqueue_poll(_vq, last_used_idx);
 }
 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
 
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index e89fc31..8b660e7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -615,6 +615,27 @@
 
 	  To compile this driver as a module, choose M here.
 
+config INTEL_SCU_WATCHDOG_EVO
+	bool "Intel SCU Watchdog Evolution for Mobile Platforms"
+	depends on X86_INTEL_MID
+	---help---
+	  Hardware driver evolution for the watchdog timer built into the Intel
+	  SCU for Intel Mobile Platforms.
+
+	  This driver supports the watchdog evolution implementation in SCU,
+	  available for Merrifield generation.
+
+	  To compile this driver as a module, choose M here.
+
+config DISABLE_SCU_WATCHDOG
+	bool "De-activate Intel SCU Watchdog by cmdline for Mobile Platforms"
+	depends on INTEL_SCU_WATCHDOG || INTEL_SCU_WATCHDOG_EVO
+	---help---
+	  De-activate the watchdog by cmdline for Intel Mobile Platforms.
+	  This allows to use breakpoints without resetting.
+
+	  Only for debug purpose.
+
 config ITCO_WDT
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index a300b94..2d3cdd5 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -110,6 +110,7 @@
 obj-$(CONFIG_MACHZ_WDT) += machzwd.o
 obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
 obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG_EVO) += intel_scu_watchdog_evo.o
 
 # M32R Architecture
 
diff --git a/drivers/watchdog/intel_scu_watchdog_evo.c b/drivers/watchdog/intel_scu_watchdog_evo.c
new file mode 100644
index 0000000..0cb7e67
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog_evo.c
@@ -0,0 +1,1482 @@
+/*
+ *      Intel_SCU 0.3:  An Intel SCU IOH Based Watchdog Device
+ *			for Intel part #(s):
+ *				- AF82MP20 PCH
+ *
+ *      Copyright (C) 2009-2013 Intel Corporation. All rights reserved.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of version 2 of the GNU General
+ *      Public License as published by the Free Software Foundation.
+ *
+ *      This program is distributed in the hope that it will be
+ *      useful, but WITHOUT ANY WARRANTY; without even the implied
+ *      warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ *      PURPOSE.  See the GNU General Public License for more details.
+ *      You should have received a copy of the GNU General Public
+ *      License along with this program; if not, write to the Free
+ *      Software Foundation, Inc., 59 Temple Place - Suite 330,
+ *      Boston, MA  02111-1307, USA.
+ *      The full GNU General Public License is included in this
+ *      distribution in the file called COPYING.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* See Documentation/watchdog/intel-scu-watchdog.txt */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/rpmsg.h>
+#include <linux/nmi.h>
+#include <linux/rtc.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+#include <asm/proto.h>
+
+#include "intel_scu_watchdog_evo.h"
+
+/* Adjustment flags */
+#define CONFIG_INTEL_SCU_SOFT_LOCKUP
+#define CONFIG_DEBUG_WATCHDOG
+
+/* Defines */
+#define STRING_RESET_TYPE_MAX_LEN   11
+#define STRING_COLD_OFF             "COLD_OFF"
+#define STRING_COLD_RESET           "COLD_RESET"
+#define STRING_COLD_BOOT            "COLD_BOOT"
+
+#define IPC_WATCHDOG 0xF8
+
+enum {
+	SCU_WATCHDOG_START = 0,
+	SCU_WATCHDOG_STOP,
+	SCU_WATCHDOG_KEEPALIVE,
+	SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT
+};
+
+enum {
+	SCU_COLD_OFF_ON_TIMEOUT = 0,
+	SCU_COLD_RESET_ON_TIMEOUT,
+	SCU_COLD_BOOT_ON_TIMEOUT,
+	SCU_DO_NOTHING_ON_TIMEOUT
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define SECURITY_WATCHDOG_ADDR  0xFF222230
+#define STRING_NONE "NONE"
+#endif
+
+/* Statics */
+static int reset_type_to_string(int reset_type, char *string);
+static int string_to_reset_type(const char *string, int *reset_type);
+static struct intel_scu_watchdog_dev watchdog_device;
+static unsigned char osnib_reset = OSNIB_WRITE_VALUE;
+
+/* Module params */
+static bool kicking_active = true;
+#ifdef CONFIG_DEBUG_WATCHDOG
+module_param(kicking_active, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(kicking_active,
+		"Deactivate the kicking will result in a cold reset"
+		"after a while");
+#endif
+
+static bool disable_kernel_watchdog = false;
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+/*
+ * Please note that we are using a config CONFIG_DISABLE_SCU_WATCHDOG
+ * because this boot parameter should only be settable in a developement
+ */
+module_param(disable_kernel_watchdog, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_kernel_watchdog,
+		"Disable kernel watchdog"
+		"Set to 0, watchdog started at boot"
+		"and left running; Set to 1; watchdog"
+		"is not started until user space"
+		"watchdog daemon is started; also if the"
+		"timer is started by the iafw firmware, it"
+		"will be disabled upon initialization of this"
+		"driver if disable_kernel_watchdog is set");
+#endif
+
+static int pre_timeout = DEFAULT_PRETIMEOUT;
+module_param(pre_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pre_timeout,
+		"Watchdog pre timeout"
+		"Time between interrupt and resetting the system"
+		"The range is from 1 to 160");
+
+static int timeout = DEFAULT_TIMEOUT;
+module_param(timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(timeout,
+		"Default Watchdog timer setting"
+		"Complete cycle time"
+		"The range is from 1 to 170"
+		"This is the time for all keep alives to arrive");
+
+static bool reset_on_release = true;
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+/*
+ * heartbeats: cpu last kstat.system times
+ * beattime : jiffies at the sample time of heartbeats.
+ * SOFT_LOCK_TIME : some time out in sec after warning interrupt.
+ * dump_softloc_debug : called on SOFT_LOCK_TIME time out after scu
+ *	interrupt to log data to logbuffer and emmc-panic code,
+ *	SOFT_LOCK_TIME needs to be < SCU warn to reset time
+ *	which is currently thats 15 sec.
+ *
+ * The soft lock works be taking a snapshot of kstat_cpu(i).cpustat.system at
+ * the time of the warning interrupt for each cpu.  Then at SOFT_LOCK_TIME the
+ * amount of time spend in system is computed and if its within 10 ms of the
+ * total SOFT_LOCK_TIME on any cpu it will dump the stack on that cpu and then
+ * calls panic.
+ *
+ */
+static u64 heartbeats[NR_CPUS];
+static u64 beattime;
+#define SOFT_LOCK_TIME 10
+static void dump_softlock_debug(unsigned long data);
+DEFINE_TIMER(softlock_timer, dump_softlock_debug, 0, 0);
+
+static struct rpmsg_instance *watchdog_instance;
+
+/* time is about to run out and the scu will reset soon.  quickly
+ * dump debug data to logbuffer and emmc via calling panic before lights
+ * go out.
+ */
+static void smp_dumpstack(void *info)
+{
+	dump_stack();
+}
+
+static void dump_softlock_debug(unsigned long data)
+{
+	int i, reboot;
+	u64 system[NR_CPUS], num_jifs;
+
+	memset(system, 0, NR_CPUS*sizeof(u64));
+
+	num_jifs = jiffies - beattime;
+	for_each_possible_cpu(i) {
+		system[i] = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM] -
+				heartbeats[i];
+	}
+
+	reboot = 0;
+
+	for_each_possible_cpu(i) {
+		if ((num_jifs - cputime_to_jiffies(system[i])) <
+						msecs_to_jiffies(10)) {
+			WARN(1, "cpu %d wedged\n", i);
+			smp_call_function_single(i, smp_dumpstack, NULL, 1);
+			reboot = 1;
+		}
+	}
+
+	if (reboot) {
+		panic_timeout = 10;
+		trigger_all_cpu_backtrace();
+		panic("Soft lock on CPUs\n");
+	}
+}
+#endif /* CONFIG_INTEL_SCU_SOFT_LOCKUP */
+
+/* Check current timeouts */
+static int check_timeouts(int pre_timeout_time, int timeout_time)
+{
+	if (pre_timeout_time < timeout_time)
+		return 0;
+
+	return -EINVAL;
+}
+
+/* Set the different timeouts needed by the SCU FW and start the
+ * kernel watchdog */
+static int watchdog_set_timeouts_and_start(int pretimeout,
+					   int timeout)
+{
+	int ret, error = 0;
+	struct ipc_wd_start {
+		u32 pretimeout;
+		u32 timeout;
+	} ipc_wd_start = { pretimeout, timeout };
+
+	ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
+				 SCU_WATCHDOG_START, (u8 *)&ipc_wd_start,
+				 NULL, sizeof(ipc_wd_start), 0);
+	if (ret) {
+		pr_crit("Error configuring and starting watchdog: %d\n",
+			ret);
+		error = -EIO;
+	}
+
+	return error;
+}
+
+/* Provisioning function for future enhancement : allow to fine tune timing
+   according to watchdog action settings */
+static int watchdog_set_appropriate_timeouts(void)
+{
+	pr_debug("Setting shutdown timeouts\n");
+	return watchdog_set_timeouts_and_start(pre_timeout, timeout);
+}
+
+/* Keep alive  */
+static int watchdog_keepalive(void)
+{
+	int ret, error = 0;
+    struct timespec ts;
+    struct rtc_time tm;
+
+    getnstimeofday(&ts);
+    rtc_time_to_tm(ts.tv_sec, &tm);
+    pr_info("%s: at %lld " "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
+            __func__,
+            ktime_to_ns(ktime_get()),
+            tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+            tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+
+	if (unlikely(!kicking_active)) {
+		/* Close our eyes */
+		pr_err("Transparent kicking\n");
+		return 0;
+	}
+
+	/* Really kick it */
+	ret = rpmsg_send_simple_command(watchdog_instance, IPC_WATCHDOG,
+					SCU_WATCHDOG_KEEPALIVE);
+	if (ret) {
+		pr_crit("Error executing keepalive: %x\n", ret);
+		error = -EIO;
+	}
+
+	return error;
+}
+
+/* stops the timer */
+static int watchdog_stop(void)
+{
+	int ret = 0;
+	int error = 0;
+
+	pr_crit("%s\n", __func__);
+
+	ret = rpmsg_send_simple_command(watchdog_instance, IPC_WATCHDOG,
+					SCU_WATCHDOG_STOP);
+	if (ret) {
+		pr_crit("Error stopping watchdog: %x\n", ret);
+		error = -EIO;
+	}
+
+	watchdog_device.started = false;
+
+	return error;
+}
+
+/* warning interrupt handler */
+static irqreturn_t watchdog_warning_interrupt(int irq, void *dev_id)
+{
+	if (unlikely(!kicking_active))
+		pr_warn("[SHTDWN] WATCHDOG TIMEOUT for test!, %s\n", __func__);
+
+	else
+		pr_warn("[SHTDWN] %s, WATCHDOG TIMEOUT!\n", __func__);
+
+	/* Let's reset the platform after dumping some data */
+	trigger_all_cpu_backtrace();
+
+	/* let the watchdog expire to reset the platform */
+	reboot_force = REBOOT_FORCE_ON;
+	panic("Kernel Watchdog");
+
+	/* This code should not be reached */
+	return IRQ_HANDLED;
+}
+
+/* Program and starts the timer */
+static int watchdog_config_and_start(u32 newtimeout, u32 newpretimeout)
+{
+	int ret;
+
+	timeout = newtimeout;
+	pre_timeout = newpretimeout;
+
+	pr_info("timeout=%ds, pre_timeout=%ds\n", timeout, pre_timeout);
+
+	/* Configure the watchdog */
+	ret = watchdog_set_timeouts_and_start(pre_timeout, timeout);
+	if (ret) {
+		pr_err("%s: Cannot configure the watchdog\n", __func__);
+
+		/* Make sure the watchdog timer is stopped */
+		watchdog_stop();
+		return ret;
+	}
+
+	watchdog_device.started = true;
+
+	return 0;
+}
+
+/* Open */
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+	/* Set flag to indicate that watchdog device is open */
+	if (test_and_set_bit(0, &watchdog_device.driver_open)) {
+		pr_err("watchdog device is busy\n");
+		return -EBUSY;
+	}
+
+	/* Check for reopen of driver. Reopens are not allowed */
+	if (watchdog_device.driver_closed) {
+		pr_err("watchdog device has been closed\n");
+		return -EPERM;
+	}
+
+	return nonseekable_open(inode, file);
+}
+
+/* Release */
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+	/*
+	 * This watchdog should not be closed, after the timer
+	 * is started with the WDIPC_SETTIMEOUT ioctl
+	 * If reset_on_release is set this  will cause an
+	 * immediate reset. If reset_on_release is not set, the watchdog
+	 * timer is refreshed for one more interval. At the end
+	 * of that interval, the watchdog timer will reset the system.
+	 */
+
+	if (!test_bit(0, &watchdog_device.driver_open)) {
+		pr_err("intel_scu_release, without open\n");
+		return -ENOTTY;
+	}
+
+	if (!watchdog_device.started) {
+		/* Just close, since timer has not been started */
+		pr_err("Closed, without starting timer\n");
+		return 0;
+	}
+
+	pr_crit("Unexpected close of /dev/watchdog!\n");
+
+	/* Since the timer was started, prevent future reopens */
+	watchdog_device.driver_closed = 1;
+
+	/* Refresh the timer for one more interval */
+	watchdog_keepalive();
+
+	/* Reboot system if requested */
+	if (reset_on_release) {
+		pr_crit("Initiating system reboot.\n");
+		emergency_restart();
+	}
+
+	pr_crit("Immediate Reboot Disabled\n");
+	pr_crit("System will reset when watchdog timer expire!\n");
+
+	return 0;
+}
+
+/* Write */
+static ssize_t intel_scu_write(struct file *file, char const *data, size_t len,
+			      loff_t *ppos)
+{
+	pr_debug("watchdog %s\n", __func__);
+
+	if (watchdog_device.shutdown_flag == true)
+		/* do nothing if we are shutting down */
+		return len;
+
+	if (watchdog_device.started) {
+		/* Watchdog already started, keep it alive */
+		watchdog_keepalive();
+	}
+
+	return len;
+}
+
+/* ioctl */
+static long intel_scu_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	u32 __user *p = argp;
+	u32 val;
+	int options;
+
+	static const struct watchdog_info ident = {
+		.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+		/* @todo Get from SCU via ipc_get_scu_fw_version()? */
+		.firmware_version = 0,
+		/* len < 32 */
+		.identity = "Intel_SCU IOH Watchdog"
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident,
+				    sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_KEEPALIVE:
+		pr_warn("%s: KeepAlive ioctl\n", __func__);
+		if (!watchdog_device.started)
+			return -EINVAL;
+
+		watchdog_keepalive();
+		return 0;
+	case WDIOC_SETPRETIMEOUT:
+		pr_warn("%s: SetPreTimeout ioctl is deprecated\n", __func__);
+
+		if (watchdog_device.started)
+			return -EBUSY;
+
+		/* Timeout to warn */
+		if (get_user(val, p))
+			return -EFAULT;
+
+		pre_timeout = val;
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		pr_warn("%s: SetTimeout ioctl\n", __func__);
+
+		if (get_user(val, p))
+			return -EFAULT;
+
+		timeout = val;
+
+		if (check_timeouts(pre_timeout, timeout)) {
+			pr_warn("%s: Invalid thresholds\n",
+				__func__);
+			return -EINVAL;
+		}
+		if (watchdog_config_and_start(timeout, pre_timeout))
+			return -EINVAL;
+
+		return 0;
+	case WDIOC_GETTIMEOUT:
+		return put_user(timeout, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(options, p))
+			return -EFAULT;
+
+		if (options & WDIOS_DISABLECARD) {
+			pr_warn("%s: Stopping the watchdog\n", __func__);
+			watchdog_stop();
+			return 0;
+		}
+
+		if (options & WDIOS_ENABLECARD) {
+			pr_warn("%s: Starting the watchdog\n", __func__);
+
+			if (watchdog_device.started)
+				return -EBUSY;
+
+			if (check_timeouts(pre_timeout, timeout)) {
+				pr_warn("%s: Invalid thresholds\n",
+					__func__);
+				return -EINVAL;
+			}
+			if (watchdog_config_and_start(timeout, pre_timeout))
+				return -EINVAL;
+			return 0;
+		}
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int watchdog_set_reset_type(int reset_type)
+{
+	int ret;
+	struct ipc_wd_on_timeout {
+		u32 reset_type;
+	} ipc_wd_on_timeout = { reset_type };
+
+	ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
+				 SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT,
+				 (u8 *)&ipc_wd_on_timeout, NULL,
+				 sizeof(ipc_wd_on_timeout), 0);
+	if (ret) {
+		pr_crit("Error setting watchdog action: %d\n", ret);
+		return -EIO;
+	}
+
+	watchdog_device.normal_wd_action = reset_type;
+
+	return 0;
+}
+
+/* Reboot notifier */
+static int reboot_notifier(struct notifier_block *this,
+			   unsigned long code,
+			   void *another_unused)
+{
+	int ret;
+
+	if (code == SYS_RESTART || code == SYS_HALT || code == SYS_POWER_OFF) {
+		pr_warn("Reboot notifier\n");
+
+		if (watchdog_set_appropriate_timeouts())
+			pr_crit("reboot notifier cant set time\n");
+
+		switch (code) {
+		case SYS_RESTART:
+			watchdog_device.reboot_flag = true;
+			ret = watchdog_set_reset_type(
+				watchdog_device.reboot_wd_action);
+			break;
+
+		case SYS_HALT:
+		case SYS_POWER_OFF:
+			ret = watchdog_set_reset_type(
+				watchdog_device.shutdown_wd_action);
+			break;
+		}
+		if (ret)
+			pr_err("%s: could not set reset type\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+		/* debugfs entry to generate a BUG during
+		any shutdown/reboot call */
+		if (watchdog_device.panic_reboot_notifier) {
+			/* let the watchdog expire to reset the platform */
+			reboot_force = REBOOT_FORCE_ON;
+			BUG();
+		}
+#endif
+		/* Don't do instant reset on close */
+		reset_on_release = false;
+
+		/* Kick once again */
+		if (disable_kernel_watchdog == false) {
+			ret = watchdog_keepalive();
+			if (ret)
+				pr_warn("%s: no keep alive\n", __func__);
+
+			/* Don't allow any more keep-alives */
+			watchdog_device.shutdown_flag = true;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/* This code triggers a Security Watchdog */
+ssize_t write_security(struct file *file, const char __user *buff, size_t count,
+		       loff_t *ppos)
+{
+	int ret = 0;
+	void __iomem *ptr;
+	u32 value;
+
+	ptr = ioremap_nocache(SECURITY_WATCHDOG_ADDR, sizeof(u32));
+
+	if (!ptr) {
+		pr_err("cannot open secwd's debugfile\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	value = readl(ptr); /* trigger */
+
+	pr_err("%s: This code should never be reached but it got %x\n",
+		__func__, (unsigned int)value);
+
+error:
+	return ret;
+}
+
+static const struct file_operations security_watchdog_fops = {
+	.open = nonseekable_open,
+	.write = write_security,
+	.llseek = no_llseek,
+};
+
+static ssize_t kwd_trigger_write(struct file *file, const char __user *buff,
+			     size_t count, loff_t *ppos)
+{
+	pr_debug("kwd_trigger_write\n");
+
+	/* let the watchdog expire to reset the platform */
+	reboot_force = REBOOT_FORCE_ON;
+	BUG();
+	return 0;
+}
+
+static const struct file_operations kwd_trigger_fops = {
+	.open = nonseekable_open,
+	.write = kwd_trigger_write,
+	.llseek = no_llseek,
+};
+
+static int kwd_reset_type_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t kwd_reset_type_read(struct file *file, char __user *buff,
+				   size_t count, loff_t *ppos)
+{
+	ssize_t len;
+	int ret;
+	char str[STRING_RESET_TYPE_MAX_LEN + 1];
+
+	pr_debug("reading reset_type of %x\n",
+		 watchdog_device.normal_wd_action);
+
+	if (*ppos > 0)
+		return 0;
+
+	ret = reset_type_to_string(watchdog_device.normal_wd_action, str);
+	if (ret)
+		return -EINVAL;
+	else {
+		for (len = 0; len < (STRING_RESET_TYPE_MAX_LEN - 1)
+			     && str[len] != '\0'; len++)
+			;
+		str[len++] = '\n';
+		ret = copy_to_user(buff, str, len);
+	}
+
+	*ppos += len;
+	return len;
+}
+
+static ssize_t kwd_reset_type_write(struct file *file, const char __user *buff,
+				    size_t count, loff_t *ppos)
+{
+	char str[STRING_RESET_TYPE_MAX_LEN];
+	unsigned long res;
+	int ret, reset_type;
+
+	if (count > STRING_RESET_TYPE_MAX_LEN) {
+		pr_err("Invalid size: count=%zu\n", count);
+		return -EINVAL;
+	}
+
+	memset(str, 0x00, STRING_RESET_TYPE_MAX_LEN);
+
+	res = copy_from_user((void *)str,
+		(void __user *)buff,
+		(unsigned long)min((unsigned long)(count-1),
+		(unsigned long)(STRING_RESET_TYPE_MAX_LEN-1)));
+
+	if (res) {
+		pr_err("%s: copy to user failed\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("writing reset_type of %s\n", str);
+
+	ret = string_to_reset_type(str, &reset_type);
+	if (ret) {
+		pr_err("Invalid value\n");
+		return -EINVAL;
+	}
+
+	ret = watchdog_set_reset_type(reset_type);
+	if (ret) {
+		pr_err("%s: could not set reset type\n", __func__);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations kwd_reset_type_fops = {
+	.open		= nonseekable_open,
+	.release	= kwd_reset_type_release,
+	.read		= kwd_reset_type_read,
+	.write		= kwd_reset_type_write,
+	.llseek		= no_llseek,
+};
+
+static ssize_t kwd_panic_reboot_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	# define RET_SIZE 3 /* prints only 2 chars : '0' or '1', plus '\n' */
+	char str[RET_SIZE];
+
+	int res;
+
+	if (*ppos > 0)
+		return 0;
+
+	strcpy(str, watchdog_device.panic_reboot_notifier ? "1\n" : "0\n");
+
+	res = copy_to_user(buff, str, RET_SIZE);
+	if (res) {
+		pr_err("%s: copy to user failed\n", __func__);
+		return -EINVAL;
+	}
+
+	*ppos += RET_SIZE-1;
+	return RET_SIZE-1;
+}
+
+
+static ssize_t kwd_panic_reboot_write(struct file *file,
+		const char __user *buff, size_t count, loff_t *ppos)
+{
+	/* whatever is written, simply set flag to TRUE */
+	watchdog_device.panic_reboot_notifier = true;
+
+	return count;
+}
+
+
+static const struct file_operations kwd_panic_reboot_fops = {
+	.open		= nonseekable_open,
+	.read		= kwd_panic_reboot_read,
+	.write		= kwd_panic_reboot_write,
+	.llseek		= no_llseek,
+};
+
+static int remove_debugfs_entries(void)
+{
+struct intel_scu_watchdog_dev *dev = &watchdog_device;
+
+	/* /sys/kernel/debug/watchdog */
+	debugfs_remove_recursive(dev->dfs_wd);
+
+	return 0;
+}
+
+static int create_debugfs_entries(void)
+{
+	struct intel_scu_watchdog_dev *dev = &watchdog_device;
+
+	/* /sys/kernel/debug/watchdog */
+	dev->dfs_wd = debugfs_create_dir("watchdog", NULL);
+	if (!dev->dfs_wd) {
+		pr_err("%s: Error, cannot create main dir\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/security_watchdog */
+	dev->dfs_secwd = debugfs_create_dir("security_watchdog", dev->dfs_wd);
+	if (!dev->dfs_secwd) {
+		pr_err("%s: Error, cannot create sec dir\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/security_watchdog/trigger */
+	dev->dfs_secwd_trigger = debugfs_create_file("trigger",
+				    S_IFREG | S_IWUSR | S_IWGRP,
+				    dev->dfs_secwd, NULL,
+				    &security_watchdog_fops);
+
+	if (!dev->dfs_secwd_trigger) {
+		pr_err("%s: Error, cannot create sec file\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog */
+	dev->dfs_kwd = debugfs_create_dir("kernel_watchdog", dev->dfs_wd);
+	if (!dev->dfs_kwd) {
+		pr_err("%s: Error, cannot create kwd dir\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog/trigger */
+	dev->dfs_kwd_trigger = debugfs_create_file("trigger",
+				    S_IFREG | S_IWUSR | S_IWGRP,
+				    dev->dfs_kwd, NULL,
+				    &kwd_trigger_fops);
+
+	if (!dev->dfs_kwd_trigger) {
+		pr_err("%s: Error, cannot create kwd trigger file\n",
+			__func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog/reset_type */
+	dev->dfs_kwd_reset_type = debugfs_create_file("reset_type",
+				    S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				    dev->dfs_kwd, NULL,
+				    &kwd_reset_type_fops);
+
+	if (!dev->dfs_kwd_trigger) {
+		pr_err("%s: Error, cannot create kwd trigger file\n",
+			__func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog/panic_reboot_notifier */
+	dev->dfs_kwd_panic_reboot = debugfs_create_file("panic_reboot_notifier",
+					S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+					dev->dfs_kwd, NULL,
+					&kwd_panic_reboot_fops);
+
+	if (!dev->dfs_kwd_panic_reboot) {
+		pr_err("%s: Error, cannot create kwd panic_reboot_notifier file\n",
+			__func__);
+		goto error;
+	}
+
+
+	return 0;
+error:
+	remove_debugfs_entries();
+	return 1;
+}
+#endif  /* CONFIG_DEBUG_FS*/
+
+/* Kernel Interfaces */
+static const struct file_operations intel_scu_fops = {
+	.owner          = THIS_MODULE,
+	.llseek         = no_llseek,
+	.write          = intel_scu_write,
+	.unlocked_ioctl = intel_scu_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= intel_scu_ioctl,
+#endif
+	.open           = intel_scu_open,
+	.release        = intel_scu_release,
+};
+
+/* sysfs entry to disable watchdog */
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+static ssize_t disable_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	if (!strtobool(buf, &disable_kernel_watchdog)) {
+		if (disable_kernel_watchdog) {
+			ret = watchdog_stop();
+			if (ret)
+				pr_err("cannot disable the timer\n");
+		} else {
+			ret = watchdog_config_and_start(timeout, pre_timeout);
+			if (ret)
+				return -EINVAL;
+		}
+	} else {
+		pr_err("got invalid value\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t disable_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	pr_debug("%s\n", __func__);
+	if (disable_kernel_watchdog)
+		return sprintf(buf, "1\n");
+
+	return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(disable, S_IWUSR | S_IRUGO,
+	disable_show, disable_store);
+
+#endif
+
+#define OSNIB_WDOG_COUNTER_MASK 0xF0
+#define OSNIB_WDOG_COUNTER_SHIFT 4
+#define WDOG_COUNTER_MAX_VALUE   3
+static ssize_t counter_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	ret = sscanf(buf, "%hhu", &osnib_reset);
+	if (ret != 1) {
+		pr_err(PFX "cannot get counter value\n");
+		if (ret == 0)
+			ret = -EINVAL;
+		return ret;
+	}
+	if (osnib_reset > WDOG_COUNTER_MAX_VALUE)
+		osnib_reset = WDOG_COUNTER_MAX_VALUE;
+	osnib_reset = ((osnib_reset << OSNIB_WDOG_COUNTER_SHIFT) &
+				OSNIB_WDOG_COUNTER_MASK);
+	ret = intel_scu_ipc_write_osnib_wd(&osnib_reset);
+
+	if (ret != 0) {
+		pr_err(PFX "cannot write OSNIB\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+
+static ssize_t counter_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned char osnib_read = (unsigned char)0;
+	int ret;
+	pr_debug("%s\n", __func__);
+
+	ret = intel_scu_ipc_read_osnib_wd(&osnib_read);
+
+	if (ret != 0)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", (int)((osnib_read & OSNIB_WDOG_COUNTER_MASK)
+						>> OSNIB_WDOG_COUNTER_SHIFT));
+}
+
+static int reset_type_to_string(int reset_type, char *string)
+{
+	switch (reset_type) {
+	case SCU_COLD_BOOT_ON_TIMEOUT:
+		strcpy(string, STRING_COLD_BOOT);
+		break;
+	case SCU_COLD_RESET_ON_TIMEOUT:
+		strcpy(string, STRING_COLD_RESET);
+		break;
+	case SCU_COLD_OFF_ON_TIMEOUT:
+		strcpy(string, STRING_COLD_OFF);
+		break;
+#ifdef CONFIG_DEBUG_FS
+	case SCU_DO_NOTHING_ON_TIMEOUT:
+		/* The IPC command DONOTHING is provided */
+		/* for debug purpose only.               */
+		strcpy(string, STRING_NONE);
+		break;
+#endif
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+
+static int string_to_reset_type(const char *string, int *reset_type)
+{
+	if (!reset_type || !string)
+		return 1;
+
+	if (strncmp(string, STRING_COLD_RESET,
+			sizeof(STRING_COLD_RESET) - 1) == 0) {
+		*reset_type = SCU_COLD_RESET_ON_TIMEOUT;
+		return 0;
+	}
+	if (strncmp(string, STRING_COLD_BOOT,
+			sizeof(STRING_COLD_BOOT) - 1) == 0) {
+		*reset_type = SCU_COLD_BOOT_ON_TIMEOUT;
+		return 0;
+	}
+	if (strncmp(string, STRING_COLD_OFF,
+			sizeof(STRING_COLD_OFF) - 1) == 0) {
+		*reset_type = SCU_COLD_OFF_ON_TIMEOUT;
+		return 0;
+	}
+#ifdef CONFIG_DEBUG_FS
+	if (strncmp(string, STRING_NONE,
+			sizeof(STRING_NONE) - 1) == 0) {
+		*reset_type = SCU_DO_NOTHING_ON_TIMEOUT;
+		return 0;
+	}
+#endif
+	/* We should not be here, this is an error case */
+	pr_debug("Invalid reset type value\n");
+	return 1;
+}
+
+static ssize_t reboot_ongoing_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	/* reprogram timeouts. if error : continue */
+	ret = watchdog_set_appropriate_timeouts();
+	if (ret)
+		pr_err("%s: could not set timeouts\n", __func__);
+
+	/* restore reset type */
+	watchdog_set_reset_type(watchdog_device.reboot_wd_action);
+	if (ret) {
+		pr_err("%s: could not set reset type\n", __func__);
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t shutdown_ongoing_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	/* reprogram timeouts. if error : continue */
+	ret = watchdog_set_appropriate_timeouts();
+	if (ret)
+		pr_err("%s: could not set timeouts\n", __func__);
+
+	/* restore reset type */
+	watchdog_set_reset_type(watchdog_device.shutdown_wd_action);
+	if (ret) {
+		pr_err("%s: could not set reset type\n", __func__);
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t normal_config_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (reset_type_to_string(watchdog_device.normal_wd_action, buf) != 0)
+		return -EINVAL;
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t normal_config_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	if (string_to_reset_type(buf, &watchdog_device.normal_wd_action) != 0)
+		return -EINVAL;
+	if (watchdog_set_reset_type(watchdog_device.normal_wd_action) != 0)
+		return -EINVAL;
+
+	return size;
+}
+
+static ssize_t reboot_config_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (reset_type_to_string(watchdog_device.reboot_wd_action, buf) != 0)
+		return -EINVAL;
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t reboot_config_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	if (string_to_reset_type(buf, &watchdog_device.reboot_wd_action) != 0)
+		return -EINVAL;
+
+	return size;
+}
+
+static ssize_t shutdown_config_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (reset_type_to_string(watchdog_device.shutdown_wd_action, buf) != 0)
+		return -EINVAL;
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t shutdown_config_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	if (string_to_reset_type(buf, &watchdog_device.shutdown_wd_action) != 0)
+		return -EINVAL;
+
+	return size;
+}
+
+/* Watchdog behavior depending on system phase */
+static DEVICE_ATTR(normal_config, S_IWUSR | S_IRUGO,
+	normal_config_show, normal_config_store);
+static DEVICE_ATTR(reboot_config, S_IWUSR | S_IRUGO,
+	reboot_config_show, reboot_config_store);
+static DEVICE_ATTR(shutdown_config, S_IWUSR | S_IRUGO,
+	shutdown_config_show, shutdown_config_store);
+static DEVICE_ATTR(reboot_ongoing, S_IWUSR,
+	NULL, reboot_ongoing_store);
+static DEVICE_ATTR(shutdown_ongoing, S_IWUSR,
+	NULL, shutdown_ongoing_store);
+
+/* Reset counter watchdog entry */
+static DEVICE_ATTR(counter, S_IWUSR | S_IRUGO,
+	counter_show, counter_store);
+
+
+int create_watchdog_sysfs_files(void)
+{
+	int ret;
+
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_disable);
+	if (ret) {
+		pr_warn("cant register dev file for disable\n");
+		return ret;
+	}
+#endif
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_normal_config);
+	if (ret) {
+		pr_warn("cant register dev file for normal_config\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_config);
+	if (ret) {
+		pr_warn("cant register dev file for reboot_config\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_config);
+	if (ret) {
+		pr_warn("cant register dev file for shutdown_config\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_counter);
+	if (ret) {
+		pr_warn("cant register dev file for counter\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_ongoing);
+	if (ret) {
+		pr_warn("cant register dev file for reboot_ongoing\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_ongoing);
+	if (ret) {
+		pr_warn("cant register dev file for shutdown_ongoing\n");
+		return ret;
+	}
+	return 0;
+}
+
+int remove_watchdog_sysfs_files(void)
+{
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_disable);
+#endif
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_normal_config);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_config);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_config);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_counter);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_ongoing);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_ongoing);
+	return 0;
+}
+
+void set_reboot_force(int watchdog_action)
+{
+	switch (watchdog_action) {
+	case SCU_COLD_RESET_ON_TIMEOUT:
+		reboot_force = REBOOT_FORCE_COLD_RESET;
+		break;
+	case SCU_COLD_BOOT_ON_TIMEOUT:
+		reboot_force = REBOOT_FORCE_COLD_BOOT;
+		break;
+	case SCU_COLD_OFF_ON_TIMEOUT:
+		reboot_force = REBOOT_FORCE_OFF;
+		break;
+	case SCU_DO_NOTHING_ON_TIMEOUT:
+		reboot_force = REBOOT_FORCE_ON;
+		break;
+	default:
+		reboot_force = REBOOT_FORCE_COLD_RESET;
+	}
+}
+
+/* This is the callback function launched when kernel panic() function */
+/* is executed.                                                        */
+static int watchdog_panic_handler(struct notifier_block *this,
+				  unsigned long         event,
+				  void                  *unused)
+{
+	if (disable_kernel_watchdog == true) {
+		reboot_force = REBOOT_FORCE_ON;
+		return NOTIFY_OK;
+	}
+
+	if (reboot_force == REBOOT_FORCE_ON)
+		return NOTIFY_OK;
+
+	if (watchdog_device.reboot_flag == true)
+		set_reboot_force(watchdog_device.reboot_wd_action);
+	else if (watchdog_device.shutdown_flag == true)
+		set_reboot_force(watchdog_device.shutdown_wd_action);
+	else
+		set_reboot_force(watchdog_device.normal_wd_action);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block watchdog_panic_notifier = {
+	.notifier_call	= watchdog_panic_handler,
+	.next		= NULL,
+	.priority	= 150	/* priority: INT_MAX >= x >= 0 */
+};
+
+/* Init code */
+static int intel_scu_watchdog_init(void)
+{
+	int ret = 0;
+	unsigned int watchdog_irq;
+
+	watchdog_device.normal_wd_action   = SCU_COLD_RESET_ON_TIMEOUT;
+	watchdog_device.reboot_wd_action   = SCU_COLD_RESET_ON_TIMEOUT;
+	watchdog_device.shutdown_wd_action = SCU_COLD_OFF_ON_TIMEOUT;
+
+#ifdef CONFIG_DEBUG_FS
+	watchdog_device.panic_reboot_notifier = false;
+#endif /* CONFIG_DEBUG_FS */
+
+	/* Initially, we are not in shutdown mode */
+	watchdog_device.shutdown_flag = false;
+	watchdog_device.reboot_flag = false;
+
+	/* Check timeouts boot parameter */
+	if (check_timeouts(pre_timeout, timeout)) {
+		pr_err("%s: Invalid timeouts\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Reboot notifier */
+	watchdog_device.reboot_notifier.notifier_call = reboot_notifier;
+	watchdog_device.reboot_notifier.priority = 1;
+	ret = register_reboot_notifier(&watchdog_device.reboot_notifier);
+	if (ret) {
+		pr_crit("cannot register reboot notifier %d\n", ret);
+		goto error_stop_timer;
+	}
+
+	ret = atomic_notifier_chain_register(&panic_notifier_list,
+			&watchdog_panic_notifier);
+	if (ret) {
+		pr_crit("cannot register panic notifier %d\n", ret);
+		goto error_reboot_notifier;
+	}
+
+	/* Do not publish the watchdog device when disable (TO BE REMOVED) */
+	if (!disable_kernel_watchdog) {
+		watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+		watchdog_device.miscdev.name = "watchdog";
+		watchdog_device.miscdev.fops = &intel_scu_fops;
+
+		ret = misc_register(&watchdog_device.miscdev);
+		if (ret) {
+			pr_crit("Cannot register miscdev %d err =%d\n",
+				WATCHDOG_MINOR, ret);
+			goto error_panic_notifier;
+		}
+	}
+
+	/* MSI handler to dump registers */
+	watchdog_irq = sfi_get_watchdog_irq();
+	if (watchdog_irq == 0xff) {
+		pr_err("error: sfi_get_watchdog_irq returned %d\n", watchdog_irq);
+		goto error_misc_register;
+	}
+
+	ret = request_irq(watchdog_irq,
+		watchdog_warning_interrupt,
+		IRQF_SHARED|IRQF_NO_SUSPEND, "watchdog",
+		&watchdog_device);
+	if (ret) {
+		pr_err("error requesting warning irq %d\n",
+		       watchdog_irq);
+		pr_err("error value returned is %d\n", ret);
+		goto error_misc_register;
+	}
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+	init_timer(&softlock_timer);
+#endif
+
+	if (disable_kernel_watchdog) {
+		pr_err("%s: Disable kernel watchdog\n", __func__);
+
+		/* Make sure timer is stopped */
+		ret = watchdog_stop();
+		if (ret != 0)
+			pr_debug("cant disable timer\n");
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	ret = create_debugfs_entries();
+	if (ret) {
+		pr_err("%s: Error creating debugfs entries\n", __func__);
+		goto error_debugfs_entry;
+	}
+#endif
+
+	watchdog_device.started = false;
+
+	ret = create_watchdog_sysfs_files();
+	if (ret) {
+		pr_err("%s: Error creating debugfs entries\n", __func__);
+		goto error_sysfs_entry;
+	}
+
+	return ret;
+
+error_sysfs_entry:
+	/* Nothing special to do */
+#ifdef CONFIG_DEBUG_FS
+error_debugfs_entry:
+	/* Remove entries done by create function */
+#endif
+
+error_misc_register:
+	misc_deregister(&watchdog_device.miscdev);
+
+error_panic_notifier:
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &watchdog_panic_notifier);
+
+error_reboot_notifier:
+	unregister_reboot_notifier(&watchdog_device.reboot_notifier);
+
+error_stop_timer:
+	watchdog_stop();
+
+	return ret;
+}
+
+static void intel_scu_watchdog_exit(void)
+{
+	int ret = 0;
+
+	remove_watchdog_sysfs_files();
+#ifdef CONFIG_DEBUG_FS
+	remove_debugfs_entries();
+#endif
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+	del_timer_sync(&softlock_timer);
+#endif
+
+	ret = watchdog_stop();
+	if (ret != 0)
+		pr_err("cant disable timer\n");
+
+	misc_deregister(&watchdog_device.miscdev);
+	unregister_reboot_notifier(&watchdog_device.reboot_notifier);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &watchdog_panic_notifier);
+}
+
+static int watchdog_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed watchdog rpmsg device\n");
+
+	/* Allocate rpmsg instance for watchdog*/
+	ret = alloc_rpmsg_instance(rpdev, &watchdog_instance);
+	if (!watchdog_instance) {
+		dev_err(&rpdev->dev, "kzalloc watchdog instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(watchdog_instance);
+	/* Init scu watchdog */
+	ret = intel_scu_watchdog_init();
+
+	if (ret)
+		free_rpmsg_instance(rpdev, &watchdog_instance);
+out:
+	return ret;
+}
+
+static void watchdog_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	intel_scu_watchdog_exit();
+	free_rpmsg_instance(rpdev, &watchdog_instance);
+	dev_info(&rpdev->dev, "Removed watchdog rpmsg device\n");
+}
+
+static void watchdog_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id watchdog_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_watchdog" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, watchdog_rpmsg_id_table);
+
+static struct rpmsg_driver watchdog_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= watchdog_rpmsg_id_table,
+	.probe		= watchdog_rpmsg_probe,
+	.callback	= watchdog_rpmsg_cb,
+	.remove		= watchdog_rpmsg_remove,
+};
+
+static int __init watchdog_rpmsg_init(void)
+{
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+		(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE))
+		return register_rpmsg_driver(&watchdog_rpmsg);
+	else {
+		pr_err("%s: watchdog driver: bad platform\n", __func__);
+		return -ENODEV;
+	}
+}
+
+#ifdef MODULE
+module_init(watchdog_rpmsg_init);
+#else
+rootfs_initcall(watchdog_rpmsg_init);
+#endif
+
+static void __exit watchdog_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&watchdog_rpmsg);
+}
+module_exit(watchdog_rpmsg_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_AUTHOR("mark.a.allyn@intel.com");
+MODULE_AUTHOR("yannx.puech@intel.com");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog_evo.h b/drivers/watchdog/intel_scu_watchdog_evo.h
new file mode 100644
index 0000000..5119545
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog_evo.h
@@ -0,0 +1,70 @@
+/*
+ *      Intel_SCU 0.3:  An Intel SCU IOH Based Watchdog Device
+ *			for Intel part #(s):
+ *				- AF82MP20 PCH
+ *
+ *      Copyright (C) 2009-2013 Intel Corporation. All rights reserved.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of version 2 of the GNU General
+ *      Public License as published by the Free Software Foundation.
+ *
+ *      This program is distributed in the hope that it will be
+ *      useful, but WITHOUT ANY WARRANTY; without even the implied
+ *      warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ *      PURPOSE.  See the GNU General Public License for more details.
+ *      You should have received a copy of the GNU General Public
+ *      License along with this program; if not, write to the Free
+ *      Software Foundation, Inc., 59 Temple Place - Suite 330,
+ *      Boston, MA  02111-1307, USA.
+ *      The full GNU General Public License is included in this
+ *      distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+#define PFX "intel_scu_watchdog: "
+#define WDT_VER "0.3"
+
+#define RESET_ON_PANIC_TIMEOUT 15
+#define DEFAULT_PRETIMEOUT 75
+#define DEFAULT_TIMEOUT (DEFAULT_PRETIMEOUT + RESET_ON_PANIC_TIMEOUT)
+
+/* Value 0 to reset the reset counter */
+#define OSNIB_WRITE_VALUE 0
+
+struct intel_scu_watchdog_dev {
+	ulong driver_open;
+	ulong driver_closed;
+	bool started;
+	struct notifier_block reboot_notifier;
+	struct miscdevice miscdev;
+	bool shutdown_flag;
+	bool reboot_flag;
+	int reset_type;
+	int normal_wd_action;
+	int reboot_wd_action;
+	int shutdown_wd_action;
+#ifdef CONFIG_DEBUG_FS
+	bool panic_reboot_notifier;
+	struct dentry *dfs_wd;
+	struct dentry *dfs_secwd;
+	struct dentry *dfs_secwd_trigger;
+	struct dentry *dfs_kwd;
+	struct dentry *dfs_kwd_trigger;
+	struct dentry *dfs_kwd_reset_type;
+	struct dentry *dfs_kwd_panic_reboot;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index b8a9245..9ad2bd3 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -310,7 +310,8 @@
 
 	case WDIOC_GETSTATUS:
 	case WDIOC_GETBOOTSTATUS:
-		return put_user(0, p);
+		error = put_user(0, p);
+		break;
 
 	case WDIOC_KEEPALIVE:
 		ts72xx_wdt_kick(wdt);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6a6bbe4..1faa130 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -346,7 +346,7 @@
 
 	for_each_possible_cpu(i)
 		memset(per_cpu(cpu_evtchn_mask, i),
-		       (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+		       (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
 }
 
 static inline void clear_evtchn(int port)
@@ -1492,8 +1492,10 @@
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 {
+	struct shared_info *s = HYPERVISOR_shared_info;
 	struct evtchn_bind_vcpu bind_vcpu;
 	int evtchn = evtchn_from_irq(irq);
+	int masked;
 
 	if (!VALID_EVTCHN(evtchn))
 		return -1;
@@ -1510,6 +1512,12 @@
 	bind_vcpu.vcpu = tcpu;
 
 	/*
+	 * Mask the event while changing the VCPU binding to prevent
+	 * it being delivered on an unexpected VCPU.
+	 */
+	masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+
+	/*
 	 * If this fails, it usually just indicates that we're dealing with a
 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
 	 * it, but don't do the xenlinux-level rebind in that case.
@@ -1517,6 +1525,9 @@
 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
 		bind_evtchn_to_cpu(evtchn, tcpu);
 
+	if (!masked)
+		unmask_evtchn(evtchn);
+
 	return 0;
 }
 
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 45c8efa..34924fb 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -377,18 +377,12 @@
 		if (unbind.port >= NR_EVENT_CHANNELS)
 			break;
 
-		spin_lock_irq(&port_user_lock);
-
 		rc = -ENOTCONN;
-		if (get_port_user(unbind.port) != u) {
-			spin_unlock_irq(&port_user_lock);
+		if (get_port_user(unbind.port) != u)
 			break;
-		}
 
 		disable_irq(irq_from_evtchn(unbind.port));
 
-		spin_unlock_irq(&port_user_lock);
-
 		evtchn_unbind_from_user(u, unbind.port);
 
 		rc = 0;
@@ -488,26 +482,15 @@
 	int i;
 	struct per_user_data *u = filp->private_data;
 
-	spin_lock_irq(&port_user_lock);
-
-	free_page((unsigned long)u->ring);
-
 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
 		if (get_port_user(i) != u)
 			continue;
 
 		disable_irq(irq_from_evtchn(i));
-	}
-
-	spin_unlock_irq(&port_user_lock);
-
-	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-		if (get_port_user(i) != u)
-			continue;
-
 		evtchn_unbind_from_user(get_port_user(i), i);
 	}
 
+	free_page((unsigned long)u->ring);
 	kfree(u->name);
 	kfree(u);
 
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 04c1b2d..d5418c1 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -729,9 +729,18 @@
 				  void (*fn)(void *), void *arg, u16 count)
 {
 	unsigned long flags;
+	struct gnttab_free_callback *cb;
+
 	spin_lock_irqsave(&gnttab_list_lock, flags);
-	if (callback->next)
-		goto out;
+
+	/* Check if the callback is already on the list */
+	cb = gnttab_free_callback_list;
+	while (cb) {
+		if (cb == callback)
+			goto out;
+		cb = cb->next;
+	}
+
 	callback->fn = fn;
 	callback->arg = arg;
 	callback->count = count;
diff --git a/firmware/Makefile b/firmware/Makefile
index cbb09ce..6e1dca7 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -135,6 +135,7 @@
 fw-shipped-$(CONFIG_USB_VICAM) += vicam/firmware.fw
 fw-shipped-$(CONFIG_VIDEO_CPIA2) += cpia2/stv0672_vp4.bin
 fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
+fw-shipped-$(CONFIG_INTEL_MID_REMOTEPROC) += intel_mid/intel_mid_remoteproc.fw
 
 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
 
diff --git a/firmware/intel_mid/intel_mid_remoteproc.fw.ihex b/firmware/intel_mid/intel_mid_remoteproc.fw.ihex
new file mode 100644
index 0000000..998c4f2
--- /dev/null
+++ b/firmware/intel_mid/intel_mid_remoteproc.fw.ihex
@@ -0,0 +1,280 @@
+:100000007F454C4601010100000000000000000097
+:100010000200030001000000240000003400000082
+:10002000F010000000000000340020000100280053
+:1000300003000200040000000000000000000000B7
+:1000400000000000000000000000000007000000A9
+:10005000040000000000000000000000000000009C
+:100060000000000000000000000000000000000090
+:100070000000000000000000000000000000000080
+:100080000000000000000000000000000000000070
+:100090000000000000000000000000000000000060
+:1000A0000000000000000000000000000000000050
+:1000B0000000000000000000000000000000000040
+:1000C0000000000000000000000000000000000030
+:1000D0000000000000000000000000000000000020
+:1000E0000000000000000000000000000000000010
+:1000F0000000000000000000000000000000000000
+:1001000000000000000000000000000000000000EF
+:1001100000000000000000000000000000000000DF
+:1001200000000000000000000000000000000000CF
+:1001300000000000000000000000000000000000BF
+:1001400000000000000000000000000000000000AF
+:10015000000000000000000000000000000000009F
+:10016000000000000000000000000000000000008F
+:10017000000000000000000000000000000000007F
+:10018000000000000000000000000000000000006F
+:10019000000000000000000000000000000000005F
+:1001A000000000000000000000000000000000004F
+:1001B000000000000000000000000000000000003F
+:1001C000000000000000000000000000000000002F
+:1001D000000000000000000000000000000000001F
+:1001E000000000000000000000000000000000000F
+:1001F00000000000000000000000000000000000FF
+:1002000000000000000000000000000000000000EE
+:1002100000000000000000000000000000000000DE
+:1002200000000000000000000000000000000000CE
+:1002300000000000000000000000000000000000BE
+:1002400000000000000000000000000000000000AE
+:10025000000000000000000000000000000000009E
+:10026000000000000000000000000000000000008E
+:10027000000000000000000000000000000000007E
+:10028000000000000000000000000000000000006E
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B000000000000000000000000000000000003D
+:1003C000000000000000000000000000000000002D
+:1003D000000000000000000000000000000000001D
+:1003E000000000000000000000000000000000000D
+:1003F00000000000000000000000000000000000FD
+:1004000000000000000000000000000000000000EC
+:1004100000000000000000000000000000000000DC
+:1004200000000000000000000000000000000000CC
+:1004300000000000000000000000000000000000BC
+:1004400000000000000000000000000000000000AC
+:10045000000000000000000000000000000000009C
+:10046000000000000000000000000000000000008C
+:10047000000000000000000000000000000000007C
+:10048000000000000000000000000000000000006C
+:10049000000000000000000000000000000000005C
+:1004A000000000000000000000000000000000004C
+:1004B000000000000000000000000000000000003C
+:1004C000000000000000000000000000000000002C
+:1004D000000000000000000000000000000000001C
+:1004E000000000000000000000000000000000000C
+:1004F00000000000000000000000000000000000FC
+:1005000000000000000000000000000000000000EB
+:1005100000000000000000000000000000000000DB
+:1005200000000000000000000000000000000000CB
+:1005300000000000000000000000000000000000BB
+:1005400000000000000000000000000000000000AB
+:10055000000000000000000000000000000000009B
+:10056000000000000000000000000000000000008B
+:10057000000000000000000000000000000000007B
+:10058000000000000000000000000000000000006B
+:10059000000000000000000000000000000000005B
+:1005A000000000000000000000000000000000004B
+:1005B000000000000000000000000000000000003B
+:1005C000000000000000000000000000000000002B
+:1005D000000000000000000000000000000000001B
+:1005E000000000000000000000000000000000000B
+:1005F00000000000000000000000000000000000FB
+:1006000000000000000000000000000000000000EA
+:1006100000000000000000000000000000000000DA
+:1006200000000000000000000000000000000000CA
+:1006300000000000000000000000000000000000BA
+:1006400000000000000000000000000000000000AA
+:10065000000000000000000000000000000000009A
+:10066000000000000000000000000000000000008A
+:10067000000000000000000000000000000000007A
+:10068000000000000000000000000000000000006A
+:10069000000000000000000000000000000000005A
+:1006A000000000000000000000000000000000004A
+:1006B000000000000000000000000000000000003A
+:1006C000000000000000000000000000000000002A
+:1006D000000000000000000000000000000000001A
+:1006E000000000000000000000000000000000000A
+:1006F00000000000000000000000000000000000FA
+:1007000000000000000000000000000000000000E9
+:1007100000000000000000000000000000000000D9
+:1007200000000000000000000000000000000000C9
+:1007300000000000000000000000000000000000B9
+:1007400000000000000000000000000000000000A9
+:100750000000000000000000000000000000000099
+:100760000000000000000000000000000000000089
+:100770000000000000000000000000000000000079
+:100780000000000000000000000000000000000069
+:100790000000000000000000000000000000000059
+:1007A0000000000000000000000000000000000049
+:1007B0000000000000000000000000000000000039
+:1007C0000000000000000000000000000000000029
+:1007D0000000000000000000000000000000000019
+:1007E0000000000000000000000000000000000009
+:1007F00000000000000000000000000000000000F9
+:1008000000000000000000000000000000000000E8
+:1008100000000000000000000000000000000000D8
+:1008200000000000000000000000000000000000C8
+:1008300000000000000000000000000000000000B8
+:1008400000000000000000000000000000000000A8
+:100850000000000000000000000000000000000098
+:100860000000000000000000000000000000000088
+:100870000000000000000000000000000000000078
+:100880000000000000000000000000000000000068
+:100890000000000000000000000000000000000058
+:1008A0000000000000000000000000000000000048
+:1008B0000000000000000000000000000000000038
+:1008C0000000000000000000000000000000000028
+:1008D0000000000000000000000000000000000018
+:1008E0000000000000000000000000000000000008
+:1008F00000000000000000000000000000000000F8
+:1009000000000000000000000000000000000000E7
+:1009100000000000000000000000000000000000D7
+:1009200000000000000000000000000000000000C7
+:1009300000000000000000000000000000000000B7
+:1009400000000000000000000000000000000000A7
+:100950000000000000000000000000000000000097
+:100960000000000000000000000000000000000087
+:100970000000000000000000000000000000000077
+:100980000000000000000000000000000000000067
+:100990000000000000000000000000000000000057
+:1009A0000000000000000000000000000000000047
+:1009B0000000000000000000000000000000000037
+:1009C0000000000000000000000000000000000027
+:1009D0000000000000000000000000000000000017
+:1009E0000000000000000000000000000000000007
+:1009F00000000000000000000000000000000000F7
+:100A000000000000000000000000000000000000E6
+:100A100000000000000000000000000000000000D6
+:100A200000000000000000000000000000000000C6
+:100A300000000000000000000000000000000000B6
+:100A400000000000000000000000000000000000A6
+:100A50000000000000000000000000000000000096
+:100A60000000000000000000000000000000000086
+:100A70000000000000000000000000000000000076
+:100A80000000000000000000000000000000000066
+:100A90000000000000000000000000000000000056
+:100AA0000000000000000000000000000000000046
+:100AB0000000000000000000000000000000000036
+:100AC0000000000000000000000000000000000026
+:100AD0000000000000000000000000000000000016
+:100AE0000000000000000000000000000000000006
+:100AF00000000000000000000000000000000000F6
+:100B000000000000000000000000000000000000E5
+:100B100000000000000000000000000000000000D5
+:100B200000000000000000000000000000000000C5
+:100B300000000000000000000000000000000000B5
+:100B400000000000000000000000000000000000A5
+:100B50000000000000000000000000000000000095
+:100B60000000000000000000000000000000000085
+:100B70000000000000000000000000000000000075
+:100B80000000000000000000000000000000000065
+:100B90000000000000000000000000000000000055
+:100BA0000000000000000000000000000000000045
+:100BB0000000000000000000000000000000000035
+:100BC0000000000000000000000000000000000025
+:100BD0000000000000000000000000000000000015
+:100BE0000000000000000000000000000000000005
+:100BF00000000000000000000000000000000000F5
+:100C000000000000000000000000000000000000E4
+:100C100000000000000000000000000000000000D4
+:100C200000000000000000000000000000000000C4
+:100C300000000000000000000000000000000000B4
+:100C400000000000000000000000000000000000A4
+:100C50000000000000000000000000000000000094
+:100C60000000000000000000000000000000000084
+:100C70000000000000000000000000000000000074
+:100C80000000000000000000000000000000000064
+:100C90000000000000000000000000000000000054
+:100CA0000000000000000000000000000000000044
+:100CB0000000000000000000000000000000000034
+:100CC0000000000000000000000000000000000024
+:100CD0000000000000000000000000000000000014
+:100CE0000000000000000000000000000000000004
+:100CF00000000000000000000000000000000000F4
+:100D000000000000000000000000000000000000E3
+:100D100000000000000000000000000000000000D3
+:100D200000000000000000000000000000000000C3
+:100D300000000000000000000000000000000000B3
+:100D400000000000000000000000000000000000A3
+:100D50000000000000000000000000000000000093
+:100D60000000000000000000000000000000000083
+:100D70000000000000000000000000000000000073
+:100D80000000000000000000000000000000000063
+:100D90000000000000000000000000000000000053
+:100DA0000000000000000000000000000000000043
+:100DB0000000000000000000000000000000000033
+:100DC0000000000000000000000000000000000023
+:100DD0000000000000000000000000000000000013
+:100DE0000000000000000000000000000000000003
+:100DF00000000000000000000000000000000000F3
+:100E000000000000000000000000000000000000E2
+:100E100000000000000000000000000000000000D2
+:100E200000000000000000000000000000000000C2
+:100E300000000000000000000000000000000000B2
+:100E400000000000000000000000000000000000A2
+:100E50000000000000000000000000000000000092
+:100E60000000000000000000000000000000000082
+:100E70000000000000000000000000000000000072
+:100E80000000000000000000000000000000000062
+:100E90000000000000000000000000000000000052
+:100EA0000000000000000000000000000000000042
+:100EB0000000000000000000000000000000000032
+:100EC0000000000000000000000000000000000022
+:100ED0000000000000000000000000000000000012
+:100EE0000000000000000000000000000000000002
+:100EF00000000000000000000000000000000000F2
+:100F000000000000000000000000000000000000E1
+:100F100000000000000000000000000000000000D1
+:100F200000000000000000000000000000000000C1
+:100F300000000000000000000000000000000000B1
+:100F400000000000000000000000000000000000A1
+:100F50000000000000000000000000000000000091
+:100F60000000000000000000000000000000000081
+:100F70000000000000000000000000000000000071
+:100F80000000000000000000000000000000000061
+:100F90000000000000000000000000000000000051
+:100FA0000000000000000000000000000000000041
+:100FB0000000000000000000000000000000000031
+:100FC0000000000000000000000000000000000021
+:100FD0000000000000000000000000000000000011
+:100FE0000000000000000000000000000000000001
+:100FF00000000000000000000000000000000000F1
+:1010000000000000000000000000000000000000E0
+:1010100000000000000000000000000000000000D0
+:1010200000000000000000000000000000000000C0
+:1010300000000000000000000000000000000000B0
+:10104000010000000200000000000000000000009D
+:10105000180000005C000000030000000700000012
+:10106000010000000100000001000000000000007D
+:10107000000200000000000001000000000100006C
+:10108000000000000000000000000000010000005F
+:10109000000100000000000000000000000000004F
+:1010A000000000000000000000020000000000003E
+:1010B0000000000000000000000000000000000030
+:1010C0000000000000000000000000000000000020
+:1010D00000000000002E7368737472746162002E49
+:1010E0007265736F757263655F7461626C65000031
+:1010F00000000000000000000000000000000000F0
+:1011000000000000000000000000000000000000DF
+:1011100000000000000000000B00000007000000BD
+:101120000300000040000000401000009400000098
+:10113000000000000000000020000000000000008F
+:10114000010000000300000000000000000000009B
+:10115000D41000001B000000000000000000000090
+:08116000010000000000000086
+:00000001FF
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f8a0b0e..3aac8e9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1415,7 +1415,7 @@
  *   long file_ofs
  * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
  */
-static void fill_files_note(struct memelfnote *note)
+static int fill_files_note(struct memelfnote *note)
 {
 	struct vm_area_struct *vma;
 	unsigned count, size, names_ofs, remaining, n;
@@ -1430,11 +1430,11 @@
 	names_ofs = (2 + 3 * count) * sizeof(data[0]);
  alloc:
 	if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
-		goto err;
+		return -EINVAL;
 	size = round_up(size, PAGE_SIZE);
 	data = vmalloc(size);
 	if (!data)
-		goto err;
+		return -ENOMEM;
 
 	start_end_ofs = data + 2;
 	name_base = name_curpos = ((char *)data) + names_ofs;
@@ -1487,7 +1487,7 @@
 
 	size = name_curpos - (char *)data;
 	fill_note(note, "CORE", NT_FILE, size, data);
- err: ;
+	return 0;
 }
 
 #ifdef CORE_DUMP_USE_REGSET
@@ -1688,8 +1688,8 @@
 	fill_auxv_note(&info->auxv, current->mm);
 	info->size += notesize(&info->auxv);
 
-	fill_files_note(&info->files);
-	info->size += notesize(&info->files);
+	if (fill_files_note(&info->files) == 0)
+		info->size += notesize(&info->files);
 
 	return 1;
 }
@@ -1721,7 +1721,8 @@
 			return 0;
 		if (first && !writenote(&info->auxv, file, foffset))
 			return 0;
-		if (first && !writenote(&info->files, file, foffset))
+		if (first && info->files.data &&
+				!writenote(&info->files, file, foffset))
 			return 0;
 
 		for (i = 1; i < info->thread_notes; ++i)
@@ -1808,6 +1809,7 @@
 
 struct elf_note_info {
 	struct memelfnote *notes;
+	struct memelfnote *notes_files;
 	struct elf_prstatus *prstatus;	/* NT_PRSTATUS */
 	struct elf_prpsinfo *psinfo;	/* NT_PRPSINFO */
 	struct list_head thread_list;
@@ -1898,9 +1900,12 @@
 
 	fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
 	fill_auxv_note(info->notes + 3, current->mm);
-	fill_files_note(info->notes + 4);
+	info->numnote = 4;
 
-	info->numnote = 5;
+	if (fill_files_note(info->notes + info->numnote) == 0) {
+		info->notes_files = info->notes + info->numnote;
+		info->numnote++;
+	}
 
 	/* Try to dump the FPU. */
 	info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1962,8 +1967,9 @@
 		kfree(list_entry(tmp, struct elf_thread_status, list));
 	}
 
-	/* Free data allocated by fill_files_note(): */
-	vfree(info->notes[4].data);
+	/* Free data possibly allocated by fill_files_note(): */
+	if (info->notes_files)
+		vfree(info->notes_files->data);
 
 	kfree(info->prstatus);
 	kfree(info->psinfo);
@@ -2046,7 +2052,7 @@
 	struct vm_area_struct *vma, *gate_vma;
 	struct elfhdr *elf = NULL;
 	loff_t offset = 0, dataoff, foffset;
-	struct elf_note_info info;
+	struct elf_note_info info = { };
 	struct elf_phdr *phdr4note = NULL;
 	struct elf_shdr *shdr4extnum = NULL;
 	Elf_Half e_phnum;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 8fb42916..45e944f 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -734,7 +734,7 @@
 		mempool_destroy(bs->bio_integrity_pool);
 
 	if (bs->bvec_integrity_pool)
-		mempool_destroy(bs->bio_integrity_pool);
+		mempool_destroy(bs->bvec_integrity_pool);
 }
 EXPORT_SYMBOL(bioset_integrity_free);
 
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04..5e7507d 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@
 		src_p = kmap_atomic(src_bv->bv_page);
 		dst_p = kmap_atomic(dst_bv->bv_page);
 
-		memcpy(dst_p + dst_bv->bv_offset,
-		       src_p + src_bv->bv_offset,
+		memcpy(dst_p + dst_offset,
+		       src_p + src_offset,
 		       bytes);
 
 		kunmap_atomic(dst_p);
@@ -1045,12 +1045,22 @@
 int bio_uncopy_user(struct bio *bio)
 {
 	struct bio_map_data *bmd = bio->bi_private;
-	int ret = 0;
+	struct bio_vec *bvec;
+	int ret = 0, i;
 
-	if (!bio_flagged(bio, BIO_NULL_MAPPED))
-		ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-				     bmd->nr_sgvecs, bio_data_dir(bio) == READ,
-				     0, bmd->is_our_pages);
+	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+		/*
+		 * if we're in a workqueue, the request is orphaned, so
+		 * don't copy into a random user address space, just free.
+		 */
+		if (current->mm)
+			ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+					     bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+					     0, bmd->is_our_pages);
+		else if (bmd->is_our_pages)
+			bio_for_each_segment_all(bvec, bio, i)
+				__free_page(bvec->bv_page);
+	}
 	bio_free_map_data(bmd);
 	bio_put(bio);
 	return ret;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 2091db8..85f5c85 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -58,17 +58,24 @@
 			struct backing_dev_info *dst)
 {
 	struct backing_dev_info *old = inode->i_data.backing_dev_info;
+	bool wakeup_bdi = false;
 
 	if (unlikely(dst == old))		/* deadlock avoidance */
 		return;
 	bdi_lock_two(&old->wb, &dst->wb);
 	spin_lock(&inode->i_lock);
 	inode->i_data.backing_dev_info = dst;
-	if (inode->i_state & I_DIRTY)
+	if (inode->i_state & I_DIRTY) {
+		if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
+			wakeup_bdi = true;
 		list_move(&inode->i_wb_list, &dst->wb.b_dirty);
+	}
 	spin_unlock(&inode->i_lock);
 	spin_unlock(&old->wb.list_lock);
 	spin_unlock(&dst->wb.list_lock);
+
+	if (wakeup_bdi)
+		bdi_wakeup_thread_delayed(dst);
 }
 
 /* Kill _all_ buffers and pagecache , dirty or not.. */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 02fae7f..7fb054b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1089,7 +1089,8 @@
 		btrfs_set_node_ptr_generation(parent, parent_slot,
 					      trans->transid);
 		btrfs_mark_buffer_dirty(parent);
-		tree_mod_log_free_eb(root->fs_info, buf);
+		if (last_ref)
+			tree_mod_log_free_eb(root->fs_info, buf);
 		btrfs_free_tree_block(trans, root, buf, parent_start,
 				      last_ref);
 	}
@@ -1161,8 +1162,8 @@
  * time_seq).
  */
 static void
-__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
-		      struct tree_mod_elem *first_tm)
+__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
+		      u64 time_seq, struct tree_mod_elem *first_tm)
 {
 	u32 n;
 	struct rb_node *next;
@@ -1172,6 +1173,7 @@
 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
 
 	n = btrfs_header_nritems(eb);
+	tree_mod_log_read_lock(fs_info);
 	while (tm && tm->seq >= time_seq) {
 		/*
 		 * all the operations are recorded with the operator used for
@@ -1226,6 +1228,7 @@
 		if (tm->index != first_tm->index)
 			break;
 	}
+	tree_mod_log_read_unlock(fs_info);
 	btrfs_set_header_nritems(eb, n);
 }
 
@@ -1274,7 +1277,7 @@
 
 	extent_buffer_get(eb_rewin);
 	btrfs_tree_read_lock(eb_rewin);
-	__tree_mod_log_rewind(eb_rewin, time_seq, tm);
+	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
 	WARN_ON(btrfs_header_nritems(eb_rewin) >
 		BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
 
@@ -1350,7 +1353,7 @@
 		btrfs_set_header_generation(eb, old_generation);
 	}
 	if (tm)
-		__tree_mod_log_rewind(eb, time_seq, tm);
+		__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
 	else
 		WARN_ON(btrfs_header_level(eb) != 0);
 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index df472ab..3b6d20b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2402,6 +2402,8 @@
 			default:
 				WARN_ON(1);
 			}
+		} else {
+			list_del_init(&locked_ref->cluster);
 		}
 		spin_unlock(&delayed_refs->lock);
 
@@ -2424,7 +2426,6 @@
 		 * list before we release it.
 		 */
 		if (btrfs_delayed_ref_is_head(ref)) {
-			list_del_init(&locked_ref->cluster);
 			btrfs_delayed_ref_unlock(locked_ref);
 			locked_ref = NULL;
 		}
@@ -7298,6 +7299,7 @@
 	int err = 0;
 	int ret;
 	int level;
+	bool root_dropped = false;
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -7355,6 +7357,7 @@
 		while (1) {
 			btrfs_tree_lock(path->nodes[level]);
 			btrfs_set_lock_blocking(path->nodes[level]);
+			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 
 			ret = btrfs_lookup_extent_info(trans, root,
 						path->nodes[level]->start,
@@ -7370,6 +7373,7 @@
 				break;
 
 			btrfs_tree_unlock(path->nodes[level]);
+			path->locks[level] = 0;
 			WARN_ON(wc->refs[level] != 1);
 			level--;
 		}
@@ -7471,12 +7475,22 @@
 		free_extent_buffer(root->commit_root);
 		kfree(root);
 	}
+	root_dropped = true;
 out_end_trans:
 	btrfs_end_transaction_throttle(trans, tree_root);
 out_free:
 	kfree(wc);
 	btrfs_free_path(path);
 out:
+	/*
+	 * So if we need to stop dropping the snapshot for whatever reason we
+	 * need to make sure to add it back to the dead root list so that we
+	 * keep trying to do the work later.  This also cleans up roots if we
+	 * don't have it in the radix (like when we recover after a power fail
+	 * or unmount) so we don't leak memory.
+	 */
+	if (root_dropped == false)
+		btrfs_add_dead_root(root);
 	if (err)
 		btrfs_std_error(root->fs_info, err);
 	return err;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5d8c37a..612610f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8147,7 +8147,7 @@
 
 
 	/* check for collisions, even if the  name isn't there */
-	ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
+	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
 			     new_dentry->d_name.name,
 			     new_dentry->d_name.len);
 
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0f81d67..8dedf40 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3299,6 +3299,9 @@
 
 	switch (p->cmd) {
 	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
+		if (root->fs_info->sb->s_flags & MS_RDONLY)
+			return -EROFS;
+
 		if (atomic_xchg(
 			&root->fs_info->mutually_exclusive_operation_running,
 			1)) {
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 4febca4..b3896d5 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -691,6 +691,7 @@
 	int cowonly;
 	int ret;
 	int err = 0;
+	bool need_check = true;
 
 	path1 = btrfs_alloc_path();
 	path2 = btrfs_alloc_path();
@@ -914,6 +915,7 @@
 			cur->bytenr);
 
 		lower = cur;
+		need_check = true;
 		for (; level < BTRFS_MAX_LEVEL; level++) {
 			if (!path2->nodes[level]) {
 				BUG_ON(btrfs_root_bytenr(&root->root_item) !=
@@ -957,14 +959,12 @@
 
 				/*
 				 * add the block to pending list if we
-				 * need check its backrefs. only block
-				 * at 'cur->level + 1' is added to the
-				 * tail of pending list. this guarantees
-				 * we check backrefs from lower level
-				 * blocks to upper level blocks.
+				 * need check its backrefs, we only do this once
+				 * while walking up a tree as we will catch
+				 * anything else later on.
 				 */
-				if (!upper->checked &&
-				    level == cur->level + 1) {
+				if (!upper->checked && need_check) {
+					need_check = false;
 					list_add_tail(&edge->list[UPPER],
 						      &list);
 				} else
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 79bd479..eb84c2d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2501,7 +2501,7 @@
 			ret = scrub_extent(sctx, extent_logical, extent_len,
 					   extent_physical, extent_dev, flags,
 					   generation, extent_mirror_num,
-					   extent_physical);
+					   extent_logical - logical + physical);
 			if (ret)
 				goto out;
 
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index ff40f1c..09ea0bd 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2524,7 +2524,8 @@
 		di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
 		btrfs_dir_item_key_to_cpu(eb, di, &di_key);
 
-		if (di_key.objectid < sctx->send_progress) {
+		if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
+		    di_key.objectid < sctx->send_progress) {
 			ret = 1;
 			goto out;
 		}
@@ -4579,6 +4580,41 @@
 	send_root = BTRFS_I(file_inode(mnt_file))->root;
 	fs_info = send_root->fs_info;
 
+	/*
+	 * This is done when we lookup the root, it should already be complete
+	 * by the time we get here.
+	 */
+	WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
+
+	/*
+	 * If we just created this root we need to make sure that the orphan
+	 * cleanup has been done and committed since we search the commit root,
+	 * so check its commit root transid with our otransid and if they match
+	 * commit the transaction to make sure everything is updated.
+	 */
+	down_read(&send_root->fs_info->extent_commit_sem);
+	if (btrfs_header_generation(send_root->commit_root) ==
+	    btrfs_root_otransid(&send_root->root_item)) {
+		struct btrfs_trans_handle *trans;
+
+		up_read(&send_root->fs_info->extent_commit_sem);
+
+		trans = btrfs_attach_transaction_barrier(send_root);
+		if (IS_ERR(trans)) {
+			if (PTR_ERR(trans) != -ENOENT) {
+				ret = PTR_ERR(trans);
+				goto out;
+			}
+			/* ENOENT means theres no transaction */
+		} else {
+			ret = btrfs_commit_transaction(trans, send_root);
+			if (ret)
+				goto out;
+		}
+	} else {
+		up_read(&send_root->fs_info->extent_commit_sem);
+	}
+
 	arg = memdup_user(arg_, sizeof(*arg));
 	if (IS_ERR(arg)) {
 		ret = PTR_ERR(arg);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c276ac9..cf68596 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3728,8 +3728,9 @@
 	}
 
 log_extents:
+	btrfs_release_path(path);
+	btrfs_release_path(dst_path);
 	if (fast_search) {
-		btrfs_release_path(dst_path);
 		ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
 		if (ret) {
 			err = ret;
@@ -3746,8 +3747,6 @@
 	}
 
 	if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
-		btrfs_release_path(path);
-		btrfs_release_path(dst_path);
 		ret = log_directory_changes(trans, root, inode, path, dst_path);
 		if (ret) {
 			err = ret;
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 7b417e2..b0a523b2 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -205,6 +205,10 @@
 		u64 new_alloced = ulist->nodes_alloced + 128;
 		struct ulist_node *new_nodes;
 		void *old = NULL;
+		int i;
+
+		for (i = 0; i < ulist->nnodes; i++)
+			rb_erase(&ulist->nodes[i].rb_node, &ulist->root);
 
 		/*
 		 * if nodes_alloced == ULIST_SIZE no memory has been allocated
@@ -224,6 +228,17 @@
 
 		ulist->nodes = new_nodes;
 		ulist->nodes_alloced = new_alloced;
+
+		/*
+		 * krealloc actually uses memcpy, which does not copy rb_node
+		 * pointers, so we have to do it ourselves.  Otherwise we may
+		 * be bitten by crashes.
+		 */
+		for (i = 0; i < ulist->nnodes; i++) {
+			ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]);
+			if (ret < 0)
+				return ret;
+		}
 	}
 	ulist->nodes[ulist->nnodes].val = val;
 	ulist->nodes[ulist->nnodes].aux = aux;
diff --git a/fs/buffer.c b/fs/buffer.c
index 37073af..035ae3a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -522,7 +522,7 @@
 {
 	char b[BDEVNAME_SIZE];
 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
-		printk(KERN_WARNING "Emergency Thaw on %s\n",
+		printk_ratelimited(KERN_WARNING "Emergency Thaw on %s\n",
 		       bdevname(sb->s_bdev, b));
 }
 
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index e0b4ef3..a5ce62e 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -196,8 +196,10 @@
 	r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
 					  &dl.object_no, &dl.object_offset,
 					  &olen);
-	if (r < 0)
+	if (r < 0) {
+		up_read(&osdc->map_sem);
 		return -EIO;
+	}
 	dl.file_offset -= dl.object_offset;
 	dl.object_size = ceph_file_layout_object_size(ci->i_layout);
 	dl.block_size = ceph_file_layout_su(ci->i_layout);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 9b6b2b6d..be661d8 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -675,17 +675,18 @@
 	if (!ceph_is_valid_xattr(name))
 		return -ENODATA;
 
-	spin_lock(&ci->i_ceph_lock);
-	dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
-	     ci->i_xattrs.version, ci->i_xattrs.index_version);
 
 	/* let's see if a virtual xattr was requested */
 	vxattr = ceph_match_vxattr(inode, name);
 	if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
 		err = vxattr->getxattr_cb(ci, value, size);
-		goto out;
+		return err;
 	}
 
+	spin_lock(&ci->i_ceph_lock);
+	dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
+	     ci->i_xattrs.version, ci->i_xattrs.index_version);
+
 	if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
 	    (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
 		goto get_xattr;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 4fb0974..fe8d627 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -327,14 +327,14 @@
 /*
  * UniStrupr:  Upper case a unicode string
  */
-static inline wchar_t *
-UniStrupr(register wchar_t *upin)
+static inline __le16 *
+UniStrupr(register __le16 *upin)
 {
-	register wchar_t *up;
+	register __le16 *up;
 
 	up = upin;
 	while (*up) {		/* For all characters */
-		*up = UniToupper(*up);
+		*up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
 		up++;
 	}
 	return upin;		/* Return input pointer */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 71436d1..5c807b2 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -389,7 +389,7 @@
 		if (blobptr + attrsize > blobend)
 			break;
 		if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
-			if (!attrsize)
+			if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
 				break;
 			if (!ses->domainName) {
 				ses->domainName =
@@ -414,7 +414,7 @@
 	int rc = 0;
 	int len;
 	char nt_hash[CIFS_NTHASH_SIZE];
-	wchar_t *user;
+	__le16 *user;
 	wchar_t *domain;
 	wchar_t *server;
 
@@ -439,7 +439,7 @@
 		return rc;
 	}
 
-	/* convert ses->user_name to unicode and uppercase */
+	/* convert ses->user_name to unicode */
 	len = ses->user_name ? strlen(ses->user_name) : 0;
 	user = kmalloc(2 + (len * 2), GFP_KERNEL);
 	if (user == NULL) {
@@ -448,7 +448,7 @@
 	}
 
 	if (len) {
-		len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp);
+		len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
 		UniStrupr(user);
 	} else {
 		memset(user, '\0', 2);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4f07f6f..ea3a0b3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
 #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
 #define MAX_SERVER_SIZE 15
 #define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
 #define MAX_USERNAME_SIZE 256	/* reasonable maximum for current servers */
 #define MAX_PASSWORD_SIZE 512	/* max for windows seems to be 256 wide chars */
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e3bc39b..d05a300 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -377,6 +377,7 @@
 		try_to_freeze();
 
 		/* we should try only the port we connected to before */
+		mutex_lock(&server->srv_mutex);
 		rc = generic_ip_connect(server);
 		if (rc) {
 			cifs_dbg(FYI, "reconnect error %d\n", rc);
@@ -388,6 +389,7 @@
 				server->tcpStatus = CifsNeedNegotiate;
 			spin_unlock(&GlobalMid_Lock);
 		}
+		mutex_unlock(&server->srv_mutex);
 	} while (server->tcpStatus == CifsNeedReconnect);
 
 	return rc;
@@ -1662,7 +1664,8 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (strnlen(string, 256) == 256) {
+			if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+					== CIFS_MAX_DOMAINNAME_LEN) {
 				printk(KERN_WARNING "CIFS: domain name too"
 						    " long\n");
 				goto cifs_parse_mount_err;
@@ -2288,8 +2291,8 @@
 
 #ifdef CONFIG_KEYS
 
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
 
 /* Populate username and pw fields from keyring if possible */
 static int
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 5699b50..0c2425b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -491,6 +491,7 @@
 		if (server->ops->close)
 			server->ops->close(xid, tcon, &fid);
 		cifs_del_pending_open(&open);
+		fput(file);
 		rc = -ENOMEM;
 	}
 
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4d8ba8d..93ac223 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -553,11 +553,10 @@
 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
 	int rc = 0;
 
-	/* we are going to update can_cache_brlcks here - need a write access */
-	down_write(&cinode->lock_sem);
+	down_read(&cinode->lock_sem);
 	if (cinode->can_cache_brlcks) {
-		/* can cache locks - no need to push them */
-		up_write(&cinode->lock_sem);
+		/* can cache locks - no need to relock */
+		up_read(&cinode->lock_sem);
 		return rc;
 	}
 
@@ -568,7 +567,7 @@
 	else
 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
 
-	up_write(&cinode->lock_sem);
+	up_read(&cinode->lock_sem);
 	return rc;
 }
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 20efd81..449b6cf 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -558,6 +558,11 @@
 			fattr->cf_mode &= ~(S_IWUGO);
 
 		fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+		if (fattr->cf_nlink < 1) {
+			cifs_dbg(1, "replacing bogus file nlink value %u\n",
+				fattr->cf_nlink);
+			fattr->cf_nlink = 1;
+		}
 	}
 
 	fattr->cf_uid = cifs_sb->mnt_uid;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 770d5a9..036279c 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@
 			return;
 	}
 
+	/*
+	 * If we know that the inode will need to be revalidated immediately,
+	 * then don't create a new dentry for it. We'll end up doing an on
+	 * the wire call either way and this spares us an invalidation.
+	 */
+	if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+		return;
+
 	dentry = d_alloc(parent, name);
 	if (!dentry)
 		return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index f230571..8edc9eb 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -198,7 +198,7 @@
 		bytes_ret = 0;
 	} else
 		bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
-					    256, nls_cp);
+					    CIFS_MAX_DOMAINNAME_LEN, nls_cp);
 	bcc_ptr += 2 * bytes_ret;
 	bcc_ptr += 2;  /* account for null terminator */
 
@@ -256,8 +256,8 @@
 
 	/* copy domain */
 	if (ses->domainName != NULL) {
-		strncpy(bcc_ptr, ses->domainName, 256);
-		bcc_ptr += strnlen(ses->domainName, 256);
+		strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+		bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
 	} /* else we will send a null domain name
 	     so the server will default to its own domain */
 	*bcc_ptr = 0;
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 10383d8..4f791e0 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -413,19 +413,76 @@
 }
 
 static bool
-smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
+smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
+		    struct smb2_lease_break_work *lw)
+{
+	bool found;
+	__u8 lease_state;
+	struct list_head *tmp;
+	struct cifsFileInfo *cfile;
+	struct cifs_pending_open *open;
+	struct cifsInodeInfo *cinode;
+	int ack_req = le32_to_cpu(rsp->Flags &
+				  SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
+
+	lease_state = smb2_map_lease_to_oplock(rsp->NewLeaseState);
+
+	list_for_each(tmp, &tcon->openFileList) {
+		cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+		cinode = CIFS_I(cfile->dentry->d_inode);
+
+		if (memcmp(cinode->lease_key, rsp->LeaseKey,
+							SMB2_LEASE_KEY_SIZE))
+			continue;
+
+		cifs_dbg(FYI, "found in the open list\n");
+		cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
+			 le32_to_cpu(rsp->NewLeaseState));
+
+		smb2_set_oplock_level(cinode, lease_state);
+
+		if (ack_req)
+			cfile->oplock_break_cancelled = false;
+		else
+			cfile->oplock_break_cancelled = true;
+
+		queue_work(cifsiod_wq, &cfile->oplock_break);
+		kfree(lw);
+		return true;
+	}
+
+	found = false;
+	list_for_each_entry(open, &tcon->pending_opens, olist) {
+		if (memcmp(open->lease_key, rsp->LeaseKey,
+			   SMB2_LEASE_KEY_SIZE))
+			continue;
+
+		if (!found && ack_req) {
+			found = true;
+			memcpy(lw->lease_key, open->lease_key,
+			       SMB2_LEASE_KEY_SIZE);
+			lw->tlink = cifs_get_tlink(open->tlink);
+			queue_work(cifsiod_wq, &lw->lease_break);
+		}
+
+		cifs_dbg(FYI, "found in the pending open list\n");
+		cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
+			 le32_to_cpu(rsp->NewLeaseState));
+
+		open->oplock = lease_state;
+	}
+	return found;
+}
+
+static bool
+smb2_is_valid_lease_break(char *buffer)
 {
 	struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
 	struct list_head *tmp, *tmp1, *tmp2;
+	struct TCP_Server_Info *server;
 	struct cifs_ses *ses;
 	struct cifs_tcon *tcon;
-	struct cifsInodeInfo *cinode;
-	struct cifsFileInfo *cfile;
-	struct cifs_pending_open *open;
 	struct smb2_lease_break_work *lw;
-	bool found;
-	int ack_req = le32_to_cpu(rsp->Flags &
-				  SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
 
 	lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
 	if (!lw)
@@ -438,71 +495,26 @@
 
 	/* look up tcon based on tid & uid */
 	spin_lock(&cifs_tcp_ses_lock);
-	list_for_each(tmp, &server->smb_ses_list) {
-		ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+	list_for_each(tmp, &cifs_tcp_ses_list) {
+		server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
 
-		spin_lock(&cifs_file_list_lock);
-		list_for_each(tmp1, &ses->tcon_list) {
-			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+		list_for_each(tmp1, &server->smb_ses_list) {
+			ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
 
-			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
-			list_for_each(tmp2, &tcon->openFileList) {
-				cfile = list_entry(tmp2, struct cifsFileInfo,
-						   tlist);
-				cinode = CIFS_I(cfile->dentry->d_inode);
-
-				if (memcmp(cinode->lease_key, rsp->LeaseKey,
-					   SMB2_LEASE_KEY_SIZE))
-					continue;
-
-				cifs_dbg(FYI, "found in the open list\n");
-				cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
-					 le32_to_cpu(rsp->NewLeaseState));
-
-				smb2_set_oplock_level(cinode,
-				  smb2_map_lease_to_oplock(rsp->NewLeaseState));
-
-				if (ack_req)
-					cfile->oplock_break_cancelled = false;
-				else
-					cfile->oplock_break_cancelled = true;
-
-				queue_work(cifsiod_wq, &cfile->oplock_break);
-
-				spin_unlock(&cifs_file_list_lock);
-				spin_unlock(&cifs_tcp_ses_lock);
-				return true;
-			}
-
-			found = false;
-			list_for_each_entry(open, &tcon->pending_opens, olist) {
-				if (memcmp(open->lease_key, rsp->LeaseKey,
-					   SMB2_LEASE_KEY_SIZE))
-					continue;
-
-				if (!found && ack_req) {
-					found = true;
-					memcpy(lw->lease_key, open->lease_key,
-					       SMB2_LEASE_KEY_SIZE);
-					lw->tlink = cifs_get_tlink(open->tlink);
-					queue_work(cifsiod_wq,
-						   &lw->lease_break);
+			spin_lock(&cifs_file_list_lock);
+			list_for_each(tmp2, &ses->tcon_list) {
+				tcon = list_entry(tmp2, struct cifs_tcon,
+						  tcon_list);
+				cifs_stats_inc(
+				    &tcon->stats.cifs_stats.num_oplock_brks);
+				if (smb2_tcon_has_lease(tcon, rsp, lw)) {
+					spin_unlock(&cifs_file_list_lock);
+					spin_unlock(&cifs_tcp_ses_lock);
+					return true;
 				}
-
-				cifs_dbg(FYI, "found in the pending open list\n");
-				cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
-					 le32_to_cpu(rsp->NewLeaseState));
-
-				open->oplock =
-				  smb2_map_lease_to_oplock(rsp->NewLeaseState);
 			}
-			if (found) {
-				spin_unlock(&cifs_file_list_lock);
-				spin_unlock(&cifs_tcp_ses_lock);
-				return true;
-			}
+			spin_unlock(&cifs_file_list_lock);
 		}
-		spin_unlock(&cifs_file_list_lock);
 	}
 	spin_unlock(&cifs_tcp_ses_lock);
 	kfree(lw);
@@ -528,7 +540,7 @@
 	if (rsp->StructureSize !=
 				smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
 		if (le16_to_cpu(rsp->StructureSize) == 44)
-			return smb2_is_valid_lease_break(buffer, server);
+			return smb2_is_valid_lease_break(buffer);
 		else
 			return false;
 	}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 60ef5f9..50c080a 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -534,8 +534,7 @@
  */
 void debugfs_remove_recursive(struct dentry *dentry)
 {
-	struct dentry *child;
-	struct dentry *parent;
+	struct dentry *child, *next, *parent;
 
 	if (IS_ERR_OR_NULL(dentry))
 		return;
@@ -545,61 +544,37 @@
 		return;
 
 	parent = dentry;
+ down:
 	mutex_lock(&parent->d_inode->i_mutex);
+	list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+		if (!debugfs_positive(child))
+			continue;
 
-	while (1) {
-		/*
-		 * When all dentries under "parent" has been removed,
-		 * walk up the tree until we reach our starting point.
-		 */
-		if (list_empty(&parent->d_subdirs)) {
-			mutex_unlock(&parent->d_inode->i_mutex);
-			if (parent == dentry)
-				break;
-			parent = parent->d_parent;
-			mutex_lock(&parent->d_inode->i_mutex);
-		}
-		child = list_entry(parent->d_subdirs.next, struct dentry,
-				d_u.d_child);
- next_sibling:
-
-		/*
-		 * If "child" isn't empty, walk down the tree and
-		 * remove all its descendants first.
-		 */
+		/* perhaps simple_empty(child) makes more sense */
 		if (!list_empty(&child->d_subdirs)) {
 			mutex_unlock(&parent->d_inode->i_mutex);
 			parent = child;
-			mutex_lock(&parent->d_inode->i_mutex);
-			continue;
+			goto down;
 		}
-		__debugfs_remove(child, parent);
-		if (parent->d_subdirs.next == &child->d_u.d_child) {
-			/*
-			 * Try the next sibling.
-			 */
-			if (child->d_u.d_child.next != &parent->d_subdirs) {
-				child = list_entry(child->d_u.d_child.next,
-						   struct dentry,
-						   d_u.d_child);
-				goto next_sibling;
-			}
-
-			/*
-			 * Avoid infinite loop if we fail to remove
-			 * one dentry.
-			 */
-			mutex_unlock(&parent->d_inode->i_mutex);
-			break;
-		}
-		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ up:
+		if (!__debugfs_remove(child, parent))
+			simple_release_fs(&debugfs_mount, &debugfs_mount_count);
 	}
 
-	parent = dentry->d_parent;
-	mutex_lock(&parent->d_inode->i_mutex);
-	__debugfs_remove(dentry, parent);
 	mutex_unlock(&parent->d_inode->i_mutex);
-	simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+	child = parent;
+	parent = parent->d_parent;
+	mutex_lock(&parent->d_inode->i_mutex);
+
+	if (child != dentry) {
+		next = list_entry(child->d_u.d_child.next, struct dentry,
+					d_u.d_child);
+		goto up;
+	}
+
+	if (!__debugfs_remove(child, parent))
+		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+	mutex_unlock(&parent->d_inode->i_mutex);
 }
 EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
 
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 7d52806..4725a07 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1149,7 +1149,7 @@
 	struct ecryptfs_msg_ctx *msg_ctx;
 	struct ecryptfs_message *msg = NULL;
 	char *auth_tok_sig;
-	char *payload;
+	char *payload = NULL;
 	size_t payload_len = 0;
 	int rc;
 
@@ -1203,6 +1203,7 @@
 	}
 out:
 	kfree(msg);
+	kfree(payload);
 	return rc;
 }
 
diff --git a/fs/exec.c b/fs/exec.c
index c568bdc..b23604c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -607,7 +607,7 @@
 		return -ENOMEM;
 
 	lru_add_drain();
-	tlb_gather_mmu(&tlb, mm, 0);
+	tlb_gather_mmu(&tlb, mm, old_start, old_end);
 	if (new_end > old_start) {
 		/*
 		 * when the old and new regions overlap clear from new_end.
@@ -624,7 +624,7 @@
 		free_pgd_range(&tlb, old_start, old_end, new_end,
 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 	}
-	tlb_finish_mmu(&tlb, new_end, old_end);
+	tlb_finish_mmu(&tlb, old_start, old_end);
 
 	/*
 	 * Shrink the vma to just the new range.  Always succeeds.
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 692de13..cea8ecf 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -576,11 +576,8 @@
 		if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
 					(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
 						+((char *)de - bh->b_data))) {
-			/* On error, skip the f_pos to the next block. */
-			dir_file->f_pos = (dir_file->f_pos |
-					(dir->i_sb->s_blocksize - 1)) + 1;
-			brelse (bh);
-			return count;
+			/* silently ignore the rest of the block */
+			break;
 		}
 		ext3fs_dirhash(de->name, de->name_len, hinfo);
 		if ((hinfo->hash < start_hash) ||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0d8ffe5..5aae297 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4678,7 +4678,7 @@
 		 * Truncate pagecache after we've waited for commit
 		 * in data=journal mode to make pages freeable.
 		 */
-			truncate_pagecache(inode, oldsize, inode->i_size);
+		truncate_pagecache(inode, oldsize, inode->i_size);
 	}
 	/*
 	 * We want to call ext4_truncate() even if attr->ia_size ==
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 21f2140..a10a6f4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4878,14 +4878,6 @@
 		goto restore_opts;
 	}
 
-	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
-	    test_opt(sb, JOURNAL_CHECKSUM)) {
-		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-			 "during remount not supported");
-		err = -EINVAL;
-		goto restore_opts;
-	}
-
 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
 			ext4_msg(sb, KERN_ERR, "can't mount with "
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 495ca87..791fc29 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1013,7 +1013,9 @@
 	struct backing_dev_info *bdi = wb->bdi;
 	long pages_written;
 
-	set_worker_desc("flush-%s", dev_name(bdi->dev));
+	if (bdi->dev)
+		set_worker_desc("flush-%s", dev_name(bdi->dev));
+
 	current->flags |= PF_SWAPWRITE;
 
 	if (likely(!current_is_workqueue_rescuer() ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index f3f783d..e67b13d 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1175,6 +1175,8 @@
 			return -EIO;
 		if (reclen > nbytes)
 			break;
+		if (memchr(dirent->name, '/', dirent->namelen) != NULL)
+			return -EIO;
 
 		over = filldir(dstbuf, dirent->name, dirent->namelen,
 			       file->f_pos, dirent->ino, dirent->type);
@@ -1225,13 +1227,29 @@
 		if (name.name[1] == '.' && name.len == 2)
 			return 0;
 	}
+
+	if (invalid_nodeid(o->nodeid))
+		return -EIO;
+	if (!fuse_valid_type(o->attr.mode))
+		return -EIO;
+
 	fc = get_fuse_conn(dir);
 
 	name.hash = full_name_hash(name.name, name.len);
 	dentry = d_lookup(parent, &name);
-	if (dentry && dentry->d_inode) {
+	if (dentry) {
 		inode = dentry->d_inode;
-		if (get_node_id(inode) == o->nodeid) {
+		if (!inode) {
+			d_drop(dentry);
+		} else if (get_node_id(inode) != o->nodeid ||
+			   ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
+			err = d_invalidate(dentry);
+			if (err)
+				goto out;
+		} else if (is_bad_inode(inode)) {
+			err = -EIO;
+			goto out;
+		} else {
 			struct fuse_inode *fi;
 			fi = get_fuse_inode(inode);
 			spin_lock(&fc->lock);
@@ -1244,9 +1262,6 @@
 			 */
 			goto found;
 		}
-		err = d_invalidate(dentry);
-		if (err)
-			goto out;
 		dput(dentry);
 		dentry = NULL;
 	}
@@ -1261,10 +1276,19 @@
 	if (!inode)
 		goto out;
 
-	alias = d_materialise_unique(dentry, inode);
-	err = PTR_ERR(alias);
-	if (IS_ERR(alias))
-		goto out;
+	if (S_ISDIR(inode->i_mode)) {
+		mutex_lock(&fc->inst_mutex);
+		alias = fuse_d_add_directory(dentry, inode);
+		mutex_unlock(&fc->inst_mutex);
+		err = PTR_ERR(alias);
+		if (IS_ERR(alias)) {
+			iput(inode);
+			goto out;
+		}
+	} else {
+		alias = d_splice_alias(inode, dentry);
+	}
+
 	if (alias) {
 		dput(dentry);
 		dentry = alias;
@@ -1301,6 +1325,8 @@
 			return -EIO;
 		if (reclen > nbytes)
 			break;
+		if (memchr(dirent->name, '/', dirent->namelen) != NULL)
+			return -EIO;
 
 		if (!over) {
 			/* We fill entries into dstbuf only as much as
@@ -1572,6 +1598,7 @@
 		    struct file *file)
 {
 	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_inode *fi = get_fuse_inode(inode);
 	struct fuse_req *req;
 	struct fuse_setattr_in inarg;
 	struct fuse_attr_out outarg;
@@ -1599,8 +1626,10 @@
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	if (is_truncate)
+	if (is_truncate) {
 		fuse_set_nowrite(inode);
+		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+	}
 
 	memset(&inarg, 0, sizeof(inarg));
 	memset(&outarg, 0, sizeof(outarg));
@@ -1662,12 +1691,14 @@
 		invalidate_inode_pages2(inode->i_mapping);
 	}
 
+	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 	return 0;
 
 error:
 	if (is_truncate)
 		fuse_release_nowrite(inode);
 
+	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 	return err;
 }
 
@@ -1731,6 +1762,8 @@
 		fc->no_setxattr = 1;
 		err = -EOPNOTSUPP;
 	}
+	if (!err)
+		fuse_invalidate_attr(inode);
 	return err;
 }
 
@@ -1860,6 +1893,8 @@
 		fc->no_removexattr = 1;
 		err = -EOPNOTSUPP;
 	}
+	if (!err)
+		fuse_invalidate_attr(inode);
 	return err;
 }
 
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 35f2810..4fafb84 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -630,7 +630,8 @@
 	struct fuse_inode *fi = get_fuse_inode(inode);
 
 	spin_lock(&fc->lock);
-	if (attr_ver == fi->attr_version && size < inode->i_size) {
+	if (attr_ver == fi->attr_version && size < inode->i_size &&
+	    !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
 		fi->attr_version = ++fc->attr_version;
 		i_size_write(inode, size);
 	}
@@ -1033,12 +1034,16 @@
 {
 	struct inode *inode = mapping->host;
 	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_inode *fi = get_fuse_inode(inode);
 	int err = 0;
 	ssize_t res = 0;
 
 	if (is_bad_inode(inode))
 		return -EIO;
 
+	if (inode->i_size < pos + iov_iter_count(ii))
+		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
 	do {
 		struct fuse_req *req;
 		ssize_t count;
@@ -1074,6 +1079,7 @@
 	if (res > 0)
 		fuse_write_update_size(inode, pos);
 
+	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 	fuse_invalidate_attr(inode);
 
 	return res > 0 ? res : err;
@@ -1530,7 +1536,6 @@
 
 	inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
 	inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
-	end_page_writeback(page);
 
 	spin_lock(&fc->lock);
 	list_add(&req->writepages_entry, &fi->writepages);
@@ -1538,6 +1543,8 @@
 	fuse_flush_writepages(inode);
 	spin_unlock(&fc->lock);
 
+	end_page_writeback(page);
+
 	return 0;
 
 err_free:
@@ -2461,6 +2468,7 @@
 {
 	struct fuse_file *ff = file->private_data;
 	struct inode *inode = file->f_inode;
+	struct fuse_inode *fi = get_fuse_inode(inode);
 	struct fuse_conn *fc = ff->fc;
 	struct fuse_req *req;
 	struct fuse_fallocate_in inarg = {
@@ -2478,10 +2486,20 @@
 
 	if (lock_inode) {
 		mutex_lock(&inode->i_mutex);
-		if (mode & FALLOC_FL_PUNCH_HOLE)
-			fuse_set_nowrite(inode);
+		if (mode & FALLOC_FL_PUNCH_HOLE) {
+			loff_t endbyte = offset + length - 1;
+			err = filemap_write_and_wait_range(inode->i_mapping,
+							   offset, endbyte);
+			if (err)
+				goto out;
+
+			fuse_sync_writes(inode);
+		}
 	}
 
+	if (!(mode & FALLOC_FL_KEEP_SIZE))
+		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
 	req = fuse_get_req_nopages(fc);
 	if (IS_ERR(req)) {
 		err = PTR_ERR(req);
@@ -2514,11 +2532,11 @@
 	fuse_invalidate_attr(inode);
 
 out:
-	if (lock_inode) {
-		if (mode & FALLOC_FL_PUNCH_HOLE)
-			fuse_release_nowrite(inode);
+	if (!(mode & FALLOC_FL_KEEP_SIZE))
+		clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
+	if (lock_inode)
 		mutex_unlock(&inode->i_mutex);
-	}
 
 	return err;
 }
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index fde7249..5ced199 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -115,6 +115,8 @@
 enum {
 	/** Advise readdirplus  */
 	FUSE_I_ADVISE_RDPLUS,
+	/** An operation changing file size is in progress  */
+	FUSE_I_SIZE_UNSTABLE,
 };
 
 struct fuse_conn;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index a796d1c..5757c34 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -202,7 +202,8 @@
 	struct timespec old_mtime;
 
 	spin_lock(&fc->lock);
-	if (attr_version != 0 && fi->attr_version > attr_version) {
+	if ((attr_version != 0 && fi->attr_version > attr_version) ||
+	    test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
 		spin_unlock(&fc->lock);
 		return;
 	}
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 4acb19d..803d3da 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -17,7 +17,8 @@
 			 struct quad_buffer_head *qbh, char *id)
 {
 	secno sec;
-	if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
+	unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
+	if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
 		hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
 		return NULL;
 	}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index fc80376..d37f6ff 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -560,7 +560,13 @@
 	sbi->sb_cp_table = NULL;
 	sbi->sb_c_bitmap = -1;
 	sbi->sb_max_fwd_alloc = 0xffffff;
-	
+
+	if (sbi->sb_fs_size >= 0x80000000) {
+		hpfs_error(s, "invalid size in superblock: %08x",
+			(unsigned)sbi->sb_fs_size);
+		goto bail4;
+	}
+
 	/* Load bitmap directory */
 	if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
 		goto bail4;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index d9b8aeb..d370549 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -125,8 +125,8 @@
 
 static int isofs_remount(struct super_block *sb, int *flags, char *data)
 {
-	/* we probably want a lot more here */
-	*flags |= MS_RDONLY;
+	if (!(*flags & MS_RDONLY))
+		return -EROFS;
 	return 0;
 }
 
@@ -779,15 +779,6 @@
 	 */
 	s->s_maxbytes = 0x80000000000LL;
 
-	/*
-	 * The CDROM is read-only, has no nodes (devices) on it, and since
-	 * all of the files appear to be owned by root, we really do not want
-	 * to allow suid.  (suid or devices will not show up unless we have
-	 * Rock Ridge extensions)
-	 */
-
-	s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
-
 	/* Set this for reference. Its not currently used except on write
 	   which we don't have .. */
 
@@ -1546,6 +1537,9 @@
 static struct dentry *isofs_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
+	/* We don't support read-write mounts */
+	if (!(flags & MS_RDONLY))
+		return ERR_PTR(-EACCES);
 	return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
 }
 
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 0ddbece..c450fdb 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3047,6 +3047,14 @@
 
 		dir_index = (u32) filp->f_pos;
 
+		/*
+		 * NFSv4 reserves cookies 1 and 2 for . and .. so we add
+		 * the value we return to the vfs is one greater than the
+		 * one we use internally.
+		 */
+		if (dir_index)
+			dir_index--;
+
 		if (dir_index > 1) {
 			struct dir_table_slot dirtab_slot;
 
@@ -3086,7 +3094,7 @@
 			if (p->header.flag & BT_INTERNAL) {
 				jfs_err("jfs_readdir: bad index table");
 				DT_PUTPAGE(mp);
-				filp->f_pos = -1;
+				filp->f_pos = DIREND;
 				return 0;
 			}
 		} else {
@@ -3094,7 +3102,7 @@
 				/*
 				 * self "."
 				 */
-				filp->f_pos = 0;
+				filp->f_pos = 1;
 				if (filldir(dirent, ".", 1, 0, ip->i_ino,
 					    DT_DIR))
 					return 0;
@@ -3102,7 +3110,7 @@
 			/*
 			 * parent ".."
 			 */
-			filp->f_pos = 1;
+			filp->f_pos = 2;
 			if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
 				return 0;
 
@@ -3123,24 +3131,25 @@
 		/*
 		 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
 		 *
-		 * pn = index = 0:	First entry "."
-		 * pn = 0; index = 1:	Second entry ".."
+		 * pn = 0; index = 1:	First entry "."
+		 * pn = 0; index = 2:	Second entry ".."
 		 * pn > 0:		Real entries, pn=1 -> leftmost page
 		 * pn = index = -1:	No more entries
 		 */
 		dtpos = filp->f_pos;
-		if (dtpos == 0) {
+		if (dtpos < 2) {
 			/* build "." entry */
 
+			filp->f_pos = 1;
 			if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
 				    DT_DIR))
 				return 0;
-			dtoffset->index = 1;
+			dtoffset->index = 2;
 			filp->f_pos = dtpos;
 		}
 
 		if (dtoffset->pn == 0) {
-			if (dtoffset->index == 1) {
+			if (dtoffset->index == 2) {
 				/* build ".." entry */
 
 				if (filldir(dirent, "..", 2, filp->f_pos,
@@ -3233,6 +3242,12 @@
 					}
 					jfs_dirent->position = unique_pos++;
 				}
+				/*
+				 * We add 1 to the index because we may
+				 * use a value of 2 internally, and NFSv4
+				 * doesn't like that.
+				 */
+				jfs_dirent->position++;
 			} else {
 				jfs_dirent->position = dtpos;
 				len = min(d_namleft, DTLHDRDATALEN_LEGACY);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index c1a3e60..7f464c5 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -95,7 +95,7 @@
 
 	if (insert_inode_locked(inode) < 0) {
 		rc = -EINVAL;
-		goto fail_unlock;
+		goto fail_put;
 	}
 
 	inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@
 fail_drop:
 	dquot_drop(inode);
 	inode->i_flags |= S_NOQUOTA;
-fail_unlock:
 	clear_nlink(inode);
 	unlock_new_inode(inode);
 fail_put:
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 01bfe766..41e491b 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -64,12 +64,17 @@
 				   nlm_init->protocol, nlm_version,
 				   nlm_init->hostname, nlm_init->noresvport,
 				   nlm_init->net);
-	if (host == NULL) {
-		lockd_down(nlm_init->net);
-		return ERR_PTR(-ENOLCK);
-	}
+	if (host == NULL)
+		goto out_nohost;
+	if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
+		goto out_nobind;
 
 	return host;
+out_nobind:
+	nlmclnt_release_host(host);
+out_nohost:
+	lockd_down(nlm_init->net);
+	return ERR_PTR(-ENOLCK);
 }
 EXPORT_SYMBOL_GPL(nlmclnt_init);
 
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 9760ecb..acd3947 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -125,14 +125,15 @@
 {
 	struct nlm_args	*argp = &req->a_args;
 	struct nlm_lock	*lock = &argp->lock;
+	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
 
 	nlmclnt_next_cookie(&argp->cookie);
 	memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
-	lock->caller  = utsname()->nodename;
+	lock->caller  = nodename;
 	lock->oh.data = req->a_owner;
 	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
 				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
-				utsname()->nodename);
+				nodename);
 	lock->svid = fl->fl_u.nfs_fl.owner->pid;
 	lock->fl.fl_start = fl->fl_start;
 	lock->fl.fl_end = fl->fl_end;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e703318..8ebd3f55 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -939,6 +939,7 @@
 	unsigned long	timeout = MAX_SCHEDULE_TIMEOUT;
 	struct nlm_block *block;
 
+	spin_lock(&nlm_blocked_lock);
 	while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
 
@@ -948,6 +949,7 @@
 			timeout = block->b_when - jiffies;
 			break;
 		}
+		spin_unlock(&nlm_blocked_lock);
 
 		dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
 			block, block->b_when);
@@ -957,7 +959,9 @@
 			retry_deferred_block(block);
 		} else
 			nlmsvc_grant_blocked(block);
+		spin_lock(&nlm_blocked_lock);
 	}
+	spin_unlock(&nlm_blocked_lock);
 
 	return timeout;
 }
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9b..a45ba4f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@
 			 CL_COPY_ALL | CL_PRIVATE);
 	namespace_unlock();
 	if (IS_ERR(tree))
-		return NULL;
+		return ERR_CAST(tree);
 	return &tree->mnt;
 }
 
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 661a0f6..678cb89 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -797,34 +797,34 @@
 	struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
 	struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
 	struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
-
-	if (filelayout_test_devid_unavailable(devid))
-		return NULL;
+	struct nfs4_pnfs_ds *ret = ds;
 
 	if (ds == NULL) {
 		printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
 			__func__, ds_idx);
 		filelayout_mark_devid_invalid(devid);
-		return NULL;
+		goto out;
 	}
 	if (ds->ds_clp)
-		return ds;
+		goto out_test_devid;
 
 	if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
 		struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
 		int err;
 
 		err = nfs4_ds_connect(s, ds);
-		if (err) {
+		if (err)
 			nfs4_mark_deviceid_unavailable(devid);
-			ds = NULL;
-		}
 		nfs4_clear_ds_conn_bit(ds);
 	} else {
 		/* Either ds is connected, or ds is NULL */
 		nfs4_wait_ds_connect(ds);
 	}
-	return ds;
+out_test_devid:
+	if (filelayout_test_devid_unavailable(devid))
+		ret = NULL;
+out:
+	return ret;
 }
 
 module_param(dataserver_retrans, uint, 0644);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1fab140..2c37442 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -228,19 +228,8 @@
 	return status;
 }
 
-/*
- * Back channel returns NFS4ERR_DELAY for new requests when
- * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
- * is ended.
- */
-static void nfs4_end_drain_session(struct nfs_client *clp)
+static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl)
 {
-	struct nfs4_session *ses = clp->cl_session;
-	struct nfs4_slot_table *tbl;
-
-	if (ses == NULL)
-		return;
-	tbl = &ses->fc_slot_table;
 	if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
 		spin_lock(&tbl->slot_tbl_lock);
 		nfs41_wake_slot_table(tbl);
@@ -248,6 +237,16 @@
 	}
 }
 
+static void nfs4_end_drain_session(struct nfs_client *clp)
+{
+	struct nfs4_session *ses = clp->cl_session;
+
+	if (ses != NULL) {
+		nfs4_end_drain_slot_table(&ses->bc_slot_table);
+		nfs4_end_drain_slot_table(&ses->fc_slot_table);
+	}
+}
+
 /*
  * Signal state manager thread if session fore channel is drained
  */
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 6cd86e0..582321a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -162,8 +162,8 @@
 	 */
 	memcpy(p, argp->p, avail);
 	/* step to next page */
-	argp->p = page_address(argp->pagelist[0]);
 	argp->pagelist++;
+	argp->p = page_address(argp->pagelist[0]);
 	if (argp->pagelen < PAGE_SIZE) {
 		argp->end = argp->p + (argp->pagelen>>2);
 		argp->pagelen = 0;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index a6bc8a7..b586699 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -802,9 +802,10 @@
 			flags = O_WRONLY|O_LARGEFILE;
 	}
 	*filp = dentry_open(&path, flags, current_cred());
-	if (IS_ERR(*filp))
+	if (IS_ERR(*filp)) {
 		host_err = PTR_ERR(*filp);
-	else {
+		*filp = NULL;
+	} else {
 		host_err = ima_file_check(*filp, may_flags);
 
 		if (may_flags & NFSD_MAY_64BIT_COOKIE)
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0ba6798..da27664 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -94,6 +94,7 @@
 	clear_buffer_nilfs_volatile(bh);
 	clear_buffer_nilfs_checked(bh);
 	clear_buffer_nilfs_redirected(bh);
+	clear_buffer_async_write(bh);
 	clear_buffer_dirty(bh);
 	if (nilfs_page_buffers_clean(page))
 		__nilfs_clear_page_dirty(page);
@@ -429,6 +430,7 @@
 					"discard block %llu, size %zu",
 					(u64)bh->b_blocknr, bh->b_size);
 			}
+			clear_buffer_async_write(bh);
 			clear_buffer_dirty(bh);
 			clear_buffer_nilfs_volatile(bh);
 			clear_buffer_nilfs_checked(bh);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913..2d8be51 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@
 
 	if (err == -EOPNOTSUPP) {
 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
-		bio_put(bio);
-		/* to be detected by submit_seg_bio() */
+		/* to be detected by nilfs_segbuf_submit_bio() */
 	}
 
 	if (!uptodate)
@@ -377,12 +376,12 @@
 	bio->bi_private = segbuf;
 	bio_get(bio);
 	submit_bio(mode, bio);
+	segbuf->sb_nbio++;
 	if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
 		bio_put(bio);
 		err = -EOPNOTSUPP;
 		goto failed;
 	}
-	segbuf->sb_nbio++;
 	bio_put(bio);
 
 	wi->bio = NULL;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index a5752a58..cbd6618 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -665,7 +665,7 @@
 
 		bh = head = page_buffers(page);
 		do {
-			if (!buffer_dirty(bh))
+			if (!buffer_dirty(bh) || buffer_async_write(bh))
 				continue;
 			get_bh(bh);
 			list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +699,8 @@
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			bh = head = page_buffers(pvec.pages[i]);
 			do {
-				if (buffer_dirty(bh)) {
+				if (buffer_dirty(bh) &&
+						!buffer_async_write(bh)) {
 					get_bh(bh);
 					list_add_tail(&bh->b_assoc_buffers,
 						      listp);
@@ -1579,6 +1580,7 @@
 
 		list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
 				    b_assoc_buffers) {
+			set_buffer_async_write(bh);
 			if (bh->b_page != bd_page) {
 				if (bd_page) {
 					lock_page(bd_page);
@@ -1592,6 +1594,7 @@
 
 		list_for_each_entry(bh, &segbuf->sb_payload_buffers,
 				    b_assoc_buffers) {
+			set_buffer_async_write(bh);
 			if (bh == segbuf->sb_super_root) {
 				if (bh->b_page != bd_page) {
 					lock_page(bd_page);
@@ -1677,6 +1680,7 @@
 	list_for_each_entry(segbuf, logs, sb_list) {
 		list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
 				    b_assoc_buffers) {
+			clear_buffer_async_write(bh);
 			if (bh->b_page != bd_page) {
 				if (bd_page)
 					end_page_writeback(bd_page);
@@ -1686,6 +1690,7 @@
 
 		list_for_each_entry(bh, &segbuf->sb_payload_buffers,
 				    b_assoc_buffers) {
+			clear_buffer_async_write(bh);
 			if (bh == segbuf->sb_super_root) {
 				if (bh->b_page != bd_page) {
 					end_page_writeback(bd_page);
@@ -1755,6 +1760,7 @@
 				    b_assoc_buffers) {
 			set_buffer_uptodate(bh);
 			clear_buffer_dirty(bh);
+			clear_buffer_async_write(bh);
 			if (bh->b_page != bd_page) {
 				if (bd_page)
 					end_page_writeback(bd_page);
@@ -1776,6 +1782,7 @@
 				    b_assoc_buffers) {
 			set_buffer_uptodate(bh);
 			clear_buffer_dirty(bh);
+			clear_buffer_async_write(bh);
 			clear_buffer_delay(bh);
 			clear_buffer_nilfs_volatile(bh);
 			clear_buffer_nilfs_redirected(bh);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 6c80083..77cc85d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -122,6 +122,7 @@
 	metadata->event_len = FAN_EVENT_METADATA_LEN;
 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
 	metadata->vers = FANOTIFY_METADATA_VERSION;
+	metadata->reserved = 0;
 	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
 	metadata->pid = pid_vnr(event->tgid);
 	if (unlikely(event->mask & FAN_Q_OVERFLOW))
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 2487116..8460647 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -781,7 +781,6 @@
 	cpos = map_start >> osb->s_clustersize_bits;
 	mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
 					       map_start + map_len);
-	mapping_end -= cpos;
 	is_last = 0;
 	while (cpos < mapping_end && !is_last) {
 		u32 fe_flags;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 2e3ea30..5b8d944 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -6499,6 +6499,16 @@
 	}
 
 	new_oi = OCFS2_I(args->new_inode);
+	/*
+	 * Adjust extent record count to reserve space for extended attribute.
+	 * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
+	 */
+	if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
+	    !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
+		struct ocfs2_extent_list *el = &new_di->id2.i_list;
+		le16_add_cpu(&el->l_count, -(inline_size /
+					sizeof(struct ocfs2_extent_rec)));
+	}
 	spin_lock(&new_oi->ip_lock);
 	new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
 	new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index ab30716..61135f5 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -3,6 +3,7 @@
 #
 
 obj-y   += proc.o
+obj-y   += history_record.o
 
 proc-y			:= nommu.o task_nommu.o
 proc-$(CONFIG_MMU)	:= task_mmu.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b5553af..2208ff0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -324,8 +324,25 @@
 			seq_printf(m, "[<%pK>] %pS\n",
 				   (void *)entries[i], (void *)entries[i]);
 		}
-		unlock_trace(task);
+	} else
+		goto out;
+
+	trace.nr_entries	= 0;
+	trace.max_entries	= MAX_STACK_TRACE_DEPTH;
+	trace.entries		= entries;
+	trace.skip		= 0;
+
+	seq_printf(m, "userspace\n");
+
+	save_stack_trace_user_task(task, &trace);
+
+	for (i = 0; i < trace.nr_entries; i++) {
+		if (entries[i] != ULONG_MAX)
+			seq_printf(m, "%p\n", (void *)entries[i]);
 	}
+	unlock_trace(task);
+
+out:
 	kfree(entries);
 
 	return err;
diff --git a/fs/proc/history_record.c b/fs/proc/history_record.c
new file mode 100644
index 0000000..a91818f
--- /dev/null
+++ b/fs/proc/history_record.c
@@ -0,0 +1,138 @@
+#include <linux/proc_fs.h>
+#include <linux/atomic.h>
+#include <linux/printk.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/history_record.h>
+
+static struct saved_history_record all_history_record[SAVED_HISTORY_MAX];
+static atomic_t saved_history_current = ATOMIC_INIT(-1);
+
+/* mapping entry name according type */
+static char *entry_name[] = {
+			" ",
+			"sgx-1",
+			"sgx-2",
+			"pipe",
+			"msvdx_stat",
+			"vdc_stat",
+};
+
+static void history_record_init(struct saved_history_record *record)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+	record->ts = cpu_clock(cpu);
+	record->record_value.value = 0;
+}
+
+struct saved_history_record *get_new_history_record(void)
+{
+	struct saved_history_record *precord = NULL;
+	int ret = atomic_add_return(1, &saved_history_current);
+	/* in case overflow */
+	if (ret < 0) {
+		atomic_set(&saved_history_current, 0);
+		ret = 0;
+	}
+	precord = &all_history_record[ret%SAVED_HISTORY_MAX];
+	history_record_init(precord);
+	return precord;
+}
+EXPORT_SYMBOL(get_new_history_record);
+
+static void print_saved_history_record(struct saved_history_record *record)
+{
+	unsigned long long ts = record->ts;
+	unsigned long nanosec_rem = do_div(ts, 1000000000);
+
+	printk(KERN_INFO "----\n");
+	switch (record->type) {
+	case 1:
+	case 2:
+		printk(KERN_INFO "name:[%s] ts[%5lu.%06lu] HostIrqCountSample[%u] InterruptCount[%u]\n",
+				entry_name[record->type],
+				(unsigned long)ts,
+				nanosec_rem / 1000,
+				record->record_value.sgx.HostIrqCountSample,
+				record->record_value.sgx.InterruptCount);
+		break;
+
+	case 3:
+		printk(KERN_INFO "name:[%s] ts[%5lu.%06lu] pipe[%u] pipe_stat_val[%#x]\n",
+				entry_name[record->type],
+				(unsigned long)ts,
+				nanosec_rem / 1000,
+				record->record_value.pipe.pipe_nu,
+				record->record_value.pipe.pipe_stat_val);
+		break;
+
+	case 4:
+		printk(KERN_INFO "name:[%s] ts[%5lu.%06lu] msvdx_stat[%#lx]\n",
+				entry_name[record->type],
+				(unsigned long)ts,
+				nanosec_rem / 1000,
+				record->record_value.msvdx_stat);
+		break;
+
+	case 5:
+		printk(KERN_INFO "name:[%s] ts[%5lu.%06lu] vdc_stat[%#lx]\n",
+				entry_name[record->type],
+				(unsigned long)ts,
+				nanosec_rem / 1000,
+				record->record_value.vdc_stat);
+		break;
+
+	default:
+		break;
+
+	}
+}
+
+
+void interrupt_dump_history(void)
+{
+	int i, start;
+	unsigned int total = atomic_read(&saved_history_current);
+
+	start = total % SAVED_HISTORY_MAX;
+	printk(KERN_INFO "<----current timestamp\n");
+	printk(KERN_INFO "start[%d] saved[%d]\n",
+			start, total);
+	for (i = start; i >= 0; i--) {
+		if (i % 10 == 0)
+			schedule();
+		print_saved_history_record(&all_history_record[i]);
+	}
+	for (i = SAVED_HISTORY_MAX - 1; i > start; i--) {
+		if (i % 10 == 0)
+			schedule();
+		print_saved_history_record(&all_history_record[i]);
+	}
+}
+EXPORT_SYMBOL(interrupt_dump_history);
+
+static ssize_t debug_read_history_record(struct file *f,
+		char __user *buf, size_t size, loff_t *off)
+{
+	interrupt_dump_history();
+	return 0;
+}
+
+static const struct file_operations debug_history_proc_fops = {
+           .owner = THIS_MODULE,
+           .read = debug_read_history_record,
+};
+
+static int __init debug_read_history_record_entry(void)
+{
+	struct proc_dir_entry *res = NULL;
+	res = proc_create_data("debug_read_sgx_history", S_IRUGO | S_IWUSR,
+				NULL, &debug_history_proc_fops, NULL);
+	return 0;
+}
+
+device_initcall(debug_read_history_record_entry);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 85c5018..3df19e6e 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -112,7 +112,8 @@
 		ns = task_active_pid_ns(current);
 		options = data;
 
-		if (!current_user_ns()->may_mount_proc)
+		if (!current_user_ns()->may_mount_proc ||
+		    !ns_capable(ns->user_ns, CAP_SYS_ADMIN))
 			return ERR_PTR(-EPERM);
 	}
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f479cc4..e7e7d26 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -883,14 +883,14 @@
 } pagemap_entry_t;
 
 struct pagemapread {
-	int pos, len;
+	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
 	pagemap_entry_t *buffer;
 };
 
 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
 #define PAGEMAP_WALK_MASK	(PMD_MASK)
 
-#define PM_ENTRY_BYTES      sizeof(u64)
+#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
 #define PM_STATUS_BITS      3
 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -1129,8 +1129,8 @@
 	if (!count)
 		goto out_task;
 
-	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
+	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
 	ret = -ENOMEM;
 	if (!pm.buffer)
 		goto out_task;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 4f11f23..72d2c2b 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -206,7 +206,7 @@
 static struct console pstore_console = {
 	.name	= "pstore",
 	.write	= pstore_console_write,
-	.flags	= CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
+	.flags	= CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME | CON_IGNORELEVEL,
 	.index	= -1,
 };
 
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 3a28d46..fc5516b 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -180,6 +180,9 @@
 	/* ECC correction notice */
 	ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
 
+	if (!(size + ecc_notice_size))
+		return 0;
+
 	*buf = kmalloc(size + ecc_notice_size + 1, GFP_KERNEL);
 	if (*buf == NULL)
 		return -ENOMEM;
@@ -432,8 +435,6 @@
 		goto fail_out;
 	}
 
-	if (!is_power_of_2(pdata->mem_size))
-		pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
 	if (!is_power_of_2(pdata->record_size))
 		pdata->record_size = rounddown_pow_of_two(pdata->record_size);
 	if (!is_power_of_2(pdata->console_size))
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 5933732..3ec3a85 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -33,6 +33,18 @@
 	uint8_t     data[0];
 };
 
+/*
+ * struct persistent_ram_buffer_ctrl
+ *
+ * This structure controls the offset where the pstore is written at, and its
+ * size.  This structure must be in a cacheable memory area, so that atomic
+ * accesses don't lock the RAM bus and trigger starvation.
+ */
+struct persistent_ram_buffer_ctrl {
+	atomic_t    start;
+	atomic_t    size;
+};
+
 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
 
 static inline size_t buffer_size(struct persistent_ram_zone *prz)
@@ -49,15 +61,19 @@
 static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 {
 	int old;
+	int cur;
 	int new;
 
+	cur = atomic_read(&prz->buffer_ctrl->start);
 	do {
-		old = atomic_read(&prz->buffer->start);
+		old = cur;
 		new = old + a;
 		while (unlikely(new > prz->buffer_size))
 			new -= prz->buffer_size;
-	} while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+		cur = atomic_cmpxchg(&prz->buffer_ctrl->start, old, new);
+	} while (cur != old);
 
+	atomic_set(&prz->buffer->start, atomic_read(&prz->buffer_ctrl->start));
 	return old;
 }
 
@@ -65,17 +81,22 @@
 static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
 {
 	size_t old;
+	size_t cur;
 	size_t new;
 
-	if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+	if (atomic_read(&prz->buffer_ctrl->size) == prz->buffer_size)
 		return;
 
+	cur = atomic_read(&prz->buffer_ctrl->size);
 	do {
-		old = atomic_read(&prz->buffer->size);
+		old = cur;
 		new = old + a;
 		if (new > prz->buffer_size)
 			new = prz->buffer_size;
-	} while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+		cur = atomic_cmpxchg(&prz->buffer_ctrl->size, old, new);
+	} while (cur != old);
+
+	atomic_set(&prz->buffer->size, atomic_read(&prz->buffer_ctrl->size));
 }
 
 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -326,10 +347,17 @@
 	prz->old_log_size = 0;
 }
 
+void persistent_ram_sync_ctrl_buffer(struct persistent_ram_zone *prz)
+{
+	atomic_set(&prz->buffer_ctrl->start, atomic_read(&prz->buffer->start));
+	atomic_set(&prz->buffer_ctrl->size, atomic_read(&prz->buffer->size));
+}
+
 void persistent_ram_zap(struct persistent_ram_zone *prz)
 {
 	atomic_set(&prz->buffer->start, 0);
 	atomic_set(&prz->buffer->size, 0);
+	persistent_ram_sync_ctrl_buffer(prz);
 	persistent_ram_update_header_ecc(prz);
 }
 
@@ -344,9 +372,11 @@
 
 	page_start = start - offset_in_page(start);
 	page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
-
+#ifdef CONFIG_X86_64
+	prot = pgprot_writecombine(PAGE_KERNEL);
+#else
 	prot = pgprot_noncached(PAGE_KERNEL);
-
+#endif
 	pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
 	if (!pages) {
 		pr_err("%s: Failed to allocate array for %u pages\n", __func__,
@@ -401,11 +431,18 @@
 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
 				    struct persistent_ram_ecc_info *ecc_info)
 {
-	int ret;
+	int ret = -ENOMEM;
+
+	prz->buffer_ctrl = kmalloc(sizeof(struct persistent_ram_buffer_ctrl), GFP_KERNEL);
+	if (!prz->buffer_ctrl) {
+		pr_err("persistent_ram_post_init: failed to allocate persistent ram control buffer\n");
+		goto err;
+	}
+	persistent_ram_sync_ctrl_buffer(prz);
 
 	ret = persistent_ram_init_ecc(prz, ecc_info);
 	if (ret)
-		return ret;
+		goto err;
 
 	sig ^= PERSISTENT_RAM_SIG;
 
@@ -431,6 +468,9 @@
 	persistent_ram_zap(prz);
 
 	return 0;
+err:
+	kfree(prz->buffer_ctrl);
+	return ret;
 }
 
 void persistent_ram_free(struct persistent_ram_zone *prz)
@@ -448,6 +488,7 @@
 		prz->vaddr = NULL;
 	}
 	persistent_ram_free_old(prz);
+	kfree(prz->buffer_ctrl);
 	kfree(prz);
 }
 
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 33532f7..1d48974 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -19,12 +19,13 @@
 /*
  * LOCKING:
  *
- * We rely on new Alexander Viro's super-block locking.
+ * These guys are evicted from procfs as the very first step in ->kill_sb().
  *
  */
 
-static int show_version(struct seq_file *m, struct super_block *sb)
+static int show_version(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	char *format;
 
 	if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
@@ -66,8 +67,9 @@
 #define DJP( x ) le32_to_cpu( jp -> x )
 #define JF( x ) ( r -> s_journal -> x )
 
-static int show_super(struct seq_file *m, struct super_block *sb)
+static int show_super(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	struct reiserfs_sb_info *r = REISERFS_SB(sb);
 
 	seq_printf(m, "state: \t%s\n"
@@ -128,8 +130,9 @@
 	return 0;
 }
 
-static int show_per_level(struct seq_file *m, struct super_block *sb)
+static int show_per_level(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	struct reiserfs_sb_info *r = REISERFS_SB(sb);
 	int level;
 
@@ -186,8 +189,9 @@
 	return 0;
 }
 
-static int show_bitmap(struct seq_file *m, struct super_block *sb)
+static int show_bitmap(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	struct reiserfs_sb_info *r = REISERFS_SB(sb);
 
 	seq_printf(m, "free_block: %lu\n"
@@ -218,8 +222,9 @@
 	return 0;
 }
 
-static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
+static int show_on_disk_super(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
 	struct reiserfs_super_block *rs = sb_info->s_rs;
 	int hash_code = DFL(s_hash_function_code);
@@ -261,8 +266,9 @@
 	return 0;
 }
 
-static int show_oidmap(struct seq_file *m, struct super_block *sb)
+static int show_oidmap(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
 	struct reiserfs_super_block *rs = sb_info->s_rs;
 	unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
@@ -291,8 +297,9 @@
 	return 0;
 }
 
-static int show_journal(struct seq_file *m, struct super_block *sb)
+static int show_journal(struct seq_file *m, void *unused)
 {
+	struct super_block *sb = m->private;
 	struct reiserfs_sb_info *r = REISERFS_SB(sb);
 	struct reiserfs_super_block *rs = r->s_rs;
 	struct journal_params *jp = &rs->s_v1.s_journal;
@@ -383,92 +390,24 @@
 	return 0;
 }
 
-/* iterator */
-static int test_sb(struct super_block *sb, void *data)
-{
-	return data == sb;
-}
-
-static int set_sb(struct super_block *sb, void *data)
-{
-	return -ENOENT;
-}
-
-struct reiserfs_seq_private {
-	struct super_block *sb;
-	int (*show) (struct seq_file *, struct super_block *);
-};
-
-static void *r_start(struct seq_file *m, loff_t * pos)
-{
-	struct reiserfs_seq_private *priv = m->private;
-	loff_t l = *pos;
-
-	if (l)
-		return NULL;
-
-	if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
-		return NULL;
-
-	up_write(&priv->sb->s_umount);
-	return priv->sb;
-}
-
-static void *r_next(struct seq_file *m, void *v, loff_t * pos)
-{
-	++*pos;
-	if (v)
-		deactivate_super(v);
-	return NULL;
-}
-
-static void r_stop(struct seq_file *m, void *v)
-{
-	if (v)
-		deactivate_super(v);
-}
-
-static int r_show(struct seq_file *m, void *v)
-{
-	struct reiserfs_seq_private *priv = m->private;
-	return priv->show(m, v);
-}
-
-static const struct seq_operations r_ops = {
-	.start = r_start,
-	.next = r_next,
-	.stop = r_stop,
-	.show = r_show,
-};
-
 static int r_open(struct inode *inode, struct file *file)
 {
-	struct reiserfs_seq_private *priv;
-	int ret = seq_open_private(file, &r_ops,
-				   sizeof(struct reiserfs_seq_private));
-
-	if (!ret) {
-		struct seq_file *m = file->private_data;
-		priv = m->private;
-		priv->sb = proc_get_parent_data(inode);
-		priv->show = PDE_DATA(inode);
-	}
-	return ret;
+	return single_open(file, PDE_DATA(inode),
+				proc_get_parent_data(inode));
 }
 
 static const struct file_operations r_file_operations = {
 	.open = r_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
-	.release = seq_release_private,
-	.owner = THIS_MODULE,
+	.release = single_release,
 };
 
 static struct proc_dir_entry *proc_info_root = NULL;
 static const char proc_info_root_name[] = "fs/reiserfs";
 
 static void add_file(struct super_block *sb, char *name,
-		     int (*func) (struct seq_file *, struct super_block *))
+		     int (*func) (struct seq_file *, void *))
 {
 	proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
 			 &r_file_operations, func);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 9fc9d56..460f762 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -499,6 +499,7 @@
 static void reiserfs_kill_sb(struct super_block *s)
 {
 	if (REISERFS_SB(s)) {
+		reiserfs_proc_info_done(s);
 		/*
 		 * Force any pending inode evictions to occur now. Any
 		 * inodes to be removed that have extended attributes
@@ -554,8 +555,6 @@
 				 REISERFS_SB(s)->reserved_blocks);
 	}
 
-	reiserfs_proc_info_done(s);
-
 	reiserfs_write_unlock(s);
 	mutex_destroy(&REISERFS_SB(s)->lock);
 	kfree(s->s_fs_info);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 749b841..d84ca59 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -340,6 +340,8 @@
 				m->read_pos = offset;
 				retval = file->f_pos = offset;
 			}
+		} else {
+			file->f_pos = offset;
 		}
 	}
 	file->f_version = m->version;
diff --git a/fs/statfs.c b/fs/statfs.c
index c219e733..083dc0a 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -94,7 +94,7 @@
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-	struct fd f = fdget(fd);
+	struct fd f = fdget_raw(fd);
 	int error = -EBADF;
 	if (f.file) {
 		error = vfs_statfs(&f.file->f_path, st);
diff --git a/fs/super.c b/fs/super.c
index d86f525..4659bf4 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -331,19 +331,19 @@
  *	and want to turn it into a full-blown active reference.  grab_super()
  *	is called with sb_lock held and drops it.  Returns 1 in case of
  *	success, 0 if we had failed (superblock contents was already dead or
- *	dying when grab_super() had been called).
+ *	dying when grab_super() had been called).  Note that this is only
+ *	called for superblocks not in rundown mode (== ones still on ->fs_supers
+ *	of their type), so increment of ->s_count is OK here.
  */
 static int grab_super(struct super_block *s) __releases(sb_lock)
 {
-	if (atomic_inc_not_zero(&s->s_active)) {
-		spin_unlock(&sb_lock);
-		return 1;
-	}
-	/* it's going away */
 	s->s_count++;
 	spin_unlock(&sb_lock);
-	/* wait for it to die */
 	down_write(&s->s_umount);
+	if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
+		put_super(s);
+		return 1;
+	}
 	up_write(&s->s_umount);
 	put_super(s);
 	return 0;
@@ -463,11 +463,6 @@
 				destroy_super(s);
 				s = NULL;
 			}
-			down_write(&old->s_umount);
-			if (unlikely(!(old->s_flags & MS_BORN))) {
-				deactivate_locked_super(old);
-				goto retry;
-			}
 			return old;
 		}
 	}
@@ -660,10 +655,10 @@
 		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		if (sb->s_bdev == bdev) {
-			if (grab_super(sb)) /* drops sb_lock */
-				return sb;
-			else
+			if (!grab_super(sb))
 				goto restart;
+			up_write(&sb->s_umount);
+			return sb;
 		}
 	}
 	spin_unlock(&sb_lock);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index d0c6a00..eda1095 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -487,6 +487,7 @@
 	sbi->s_sb = sb;
 	sbi->s_block_base = 0;
 	sbi->s_type = FSTYPE_V7;
+	mutex_init(&sbi->s_lock);
 	sb->s_fs_info = sbi;
 	
 	sb_set_blocksize(sb, 512);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 0d00367..13fd841 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1412,7 +1412,7 @@
 
 	ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s",
 		  c->vi.ubi_num, c->vi.vol_id, c->vi.name,
-		  c->ro_mount ? ", R/O mode" : NULL);
+		  c->ro_mount ? ", R/O mode" : "");
 	x = (long long)c->main_lebs * c->leb_size;
 	y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
 	ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3b1a4be..32f5297 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -631,6 +631,12 @@
 	int error = 0;
 	sync_filesystem(sb);
 
+	if (sbi->s_lvid_bh) {
+		int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+		if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+			return -EACCES;
+	}
+
 	uopt.flags = sbi->s_flags;
 	uopt.uid   = sbi->s_uid;
 	uopt.gid   = sbi->s_gid;
@@ -650,12 +656,6 @@
 	sbi->s_dmode = uopt.dmode;
 	write_unlock(&sbi->s_cred_lock);
 
-	if (sbi->s_lvid_bh) {
-		int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
-		if (write_rev > UDF_MAX_WRITE_VERSION)
-			*flags |= MS_RDONLY;
-	}
-
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		goto out_unlock;
 
@@ -844,27 +844,38 @@
 	return 1;
 }
 
+/*
+ * Load primary Volume Descriptor Sequence
+ *
+ * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
+ * should be tried.
+ */
 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 {
 	struct primaryVolDesc *pvoldesc;
 	struct ustr *instr, *outstr;
 	struct buffer_head *bh;
 	uint16_t ident;
-	int ret = 1;
+	int ret = -ENOMEM;
 
 	instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
 	if (!instr)
-		return 1;
+		return -ENOMEM;
 
 	outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
 	if (!outstr)
 		goto out1;
 
 	bh = udf_read_tagged(sb, block, block, &ident);
-	if (!bh)
+	if (!bh) {
+		ret = -EAGAIN;
 		goto out2;
+	}
 
-	BUG_ON(ident != TAG_IDENT_PVD);
+	if (ident != TAG_IDENT_PVD) {
+		ret = -EIO;
+		goto out_bh;
+	}
 
 	pvoldesc = (struct primaryVolDesc *)bh->b_data;
 
@@ -890,8 +901,9 @@
 		if (udf_CS0toUTF8(outstr, instr))
 			udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
 
-	brelse(bh);
 	ret = 0;
+out_bh:
+	brelse(bh);
 out2:
 	kfree(outstr);
 out1:
@@ -948,7 +960,7 @@
 
 		if (mdata->s_mirror_fe == NULL) {
 			udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
-			goto error_exit;
+			return -EIO;
 		}
 	}
 
@@ -965,23 +977,18 @@
 			  addr.logicalBlockNum, addr.partitionReferenceNum);
 
 		mdata->s_bitmap_fe = udf_iget(sb, &addr);
-
 		if (mdata->s_bitmap_fe == NULL) {
 			if (sb->s_flags & MS_RDONLY)
 				udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
 			else {
 				udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
-				goto error_exit;
+				return -EIO;
 			}
 		}
 	}
 
 	udf_debug("udf_load_metadata_files Ok\n");
-
 	return 0;
-
-error_exit:
-	return 1;
 }
 
 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1070,7 +1077,7 @@
 		if (!map->s_uspace.s_table) {
 			udf_debug("cannot load unallocSpaceTable (part %d)\n",
 				  p_index);
-			return 1;
+			return -EIO;
 		}
 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
 		udf_debug("unallocSpaceTable (part %d) @ %ld\n",
@@ -1080,7 +1087,7 @@
 	if (phd->unallocSpaceBitmap.extLength) {
 		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
 		if (!bitmap)
-			return 1;
+			return -ENOMEM;
 		map->s_uspace.s_bitmap = bitmap;
 		bitmap->s_extPosition = le32_to_cpu(
 				phd->unallocSpaceBitmap.extPosition);
@@ -1103,7 +1110,7 @@
 		if (!map->s_fspace.s_table) {
 			udf_debug("cannot load freedSpaceTable (part %d)\n",
 				  p_index);
-			return 1;
+			return -EIO;
 		}
 
 		map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
@@ -1114,7 +1121,7 @@
 	if (phd->freedSpaceBitmap.extLength) {
 		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
 		if (!bitmap)
-			return 1;
+			return -ENOMEM;
 		map->s_fspace.s_bitmap = bitmap;
 		bitmap->s_extPosition = le32_to_cpu(
 				phd->freedSpaceBitmap.extPosition);
@@ -1166,7 +1173,7 @@
 		udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
 	}
 	if (!sbi->s_vat_inode)
-		return 1;
+		return -EIO;
 
 	if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
 		map->s_type_specific.s_virtual.s_start_offset = 0;
@@ -1178,7 +1185,7 @@
 			pos = udf_block_map(sbi->s_vat_inode, 0);
 			bh = sb_bread(sb, pos);
 			if (!bh)
-				return 1;
+				return -EIO;
 			vat20 = (struct virtualAllocationTable20 *)bh->b_data;
 		} else {
 			vat20 = (struct virtualAllocationTable20 *)
@@ -1196,6 +1203,12 @@
 	return 0;
 }
 
+/*
+ * Load partition descriptor block
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
+ * sequence.
+ */
 static int udf_load_partdesc(struct super_block *sb, sector_t block)
 {
 	struct buffer_head *bh;
@@ -1205,13 +1218,15 @@
 	int i, type1_idx;
 	uint16_t partitionNumber;
 	uint16_t ident;
-	int ret = 0;
+	int ret;
 
 	bh = udf_read_tagged(sb, block, block, &ident);
 	if (!bh)
-		return 1;
-	if (ident != TAG_IDENT_PD)
+		return -EAGAIN;
+	if (ident != TAG_IDENT_PD) {
+		ret = 0;
 		goto out_bh;
+	}
 
 	p = (struct partitionDesc *)bh->b_data;
 	partitionNumber = le16_to_cpu(p->partitionNumber);
@@ -1230,10 +1245,13 @@
 	if (i >= sbi->s_partitions) {
 		udf_debug("Partition (%d) not found in partition map\n",
 			  partitionNumber);
+		ret = 0;
 		goto out_bh;
 	}
 
 	ret = udf_fill_partdesc_info(sb, p, i);
+	if (ret < 0)
+		goto out_bh;
 
 	/*
 	 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
@@ -1250,32 +1268,37 @@
 			break;
 	}
 
-	if (i >= sbi->s_partitions)
+	if (i >= sbi->s_partitions) {
+		ret = 0;
 		goto out_bh;
+	}
 
 	ret = udf_fill_partdesc_info(sb, p, i);
-	if (ret)
+	if (ret < 0)
 		goto out_bh;
 
 	if (map->s_partition_type == UDF_METADATA_MAP25) {
 		ret = udf_load_metadata_files(sb, i);
-		if (ret) {
+		if (ret < 0) {
 			udf_err(sb, "error loading MetaData partition map %d\n",
 				i);
 			goto out_bh;
 		}
 	} else {
-		ret = udf_load_vat(sb, i, type1_idx);
-		if (ret)
-			goto out_bh;
 		/*
-		 * Mark filesystem read-only if we have a partition with
-		 * virtual map since we don't handle writing to it (we
-		 * overwrite blocks instead of relocating them).
+		 * If we have a partition with virtual map, we don't handle
+		 * writing to it (we overwrite blocks instead of relocating
+		 * them).
 		 */
-		sb->s_flags |= MS_RDONLY;
-		pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
+		if (!(sb->s_flags & MS_RDONLY)) {
+			ret = -EACCES;
+			goto out_bh;
+		}
+		ret = udf_load_vat(sb, i, type1_idx);
+		if (ret < 0)
+			goto out_bh;
 	}
+	ret = 0;
 out_bh:
 	/* In case loading failed, we handle cleanup in udf_fill_super */
 	brelse(bh);
@@ -1341,11 +1364,11 @@
 	uint16_t ident;
 	struct buffer_head *bh;
 	unsigned int table_len;
-	int ret = 0;
+	int ret;
 
 	bh = udf_read_tagged(sb, block, block, &ident);
 	if (!bh)
-		return 1;
+		return -EAGAIN;
 	BUG_ON(ident != TAG_IDENT_LVD);
 	lvd = (struct logicalVolDesc *)bh->b_data;
 	table_len = le32_to_cpu(lvd->mapTableLength);
@@ -1353,7 +1376,7 @@
 		udf_err(sb, "error loading logical volume descriptor: "
 			"Partition table too long (%u > %lu)\n", table_len,
 			sb->s_blocksize - sizeof(*lvd));
-		ret = 1;
+		ret = -EIO;
 		goto out_bh;
 	}
 
@@ -1397,11 +1420,10 @@
 			} else if (!strncmp(upm2->partIdent.ident,
 						UDF_ID_SPARABLE,
 						strlen(UDF_ID_SPARABLE))) {
-				if (udf_load_sparable_map(sb, map,
-				    (struct sparablePartitionMap *)gpm) < 0) {
-					ret = 1;
+				ret = udf_load_sparable_map(sb, map,
+					(struct sparablePartitionMap *)gpm);
+				if (ret < 0)
 					goto out_bh;
-				}
 			} else if (!strncmp(upm2->partIdent.ident,
 						UDF_ID_METADATA,
 						strlen(UDF_ID_METADATA))) {
@@ -1466,7 +1488,7 @@
 	}
 	if (lvd->integritySeqExt.extLength)
 		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
-
+	ret = 0;
 out_bh:
 	brelse(bh);
 	return ret;
@@ -1504,22 +1526,18 @@
 }
 
 /*
- * udf_process_sequence
+ * Process a main/reserve volume descriptor sequence.
+ *   @block		First block of first extent of the sequence.
+ *   @lastblock		Lastblock of first extent of the sequence.
+ *   @fileset		There we store extent containing root fileset
  *
- * PURPOSE
- *	Process a main/reserve volume descriptor sequence.
- *
- * PRE-CONDITIONS
- *	sb			Pointer to _locked_ superblock.
- *	block			First block of first extent of the sequence.
- *	lastblock		Lastblock of first extent of the sequence.
- *
- * HISTORY
- *	July 1, 1997 - Andrew E. Mileski
- *	Written, tested, and released.
+ * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
+ * sequence
  */
-static noinline int udf_process_sequence(struct super_block *sb, long block,
-				long lastblock, struct kernel_lb_addr *fileset)
+static noinline int udf_process_sequence(
+		struct super_block *sb,
+		sector_t block, sector_t lastblock,
+		struct kernel_lb_addr *fileset)
 {
 	struct buffer_head *bh = NULL;
 	struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1530,6 +1548,7 @@
 	uint32_t vdsn;
 	uint16_t ident;
 	long next_s = 0, next_e = 0;
+	int ret;
 
 	memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
 
@@ -1544,7 +1563,7 @@
 			udf_err(sb,
 				"Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
 				(unsigned long long)block);
-			return 1;
+			return -EAGAIN;
 		}
 
 		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
@@ -1617,14 +1636,19 @@
 	 */
 	if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
 		udf_err(sb, "Primary Volume Descriptor not found!\n");
-		return 1;
+		return -EAGAIN;
 	}
-	if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
-		return 1;
+	ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
+	if (ret < 0)
+		return ret;
 
-	if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
-	    vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
-		return 1;
+	if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+		ret = udf_load_logicalvol(sb,
+					  vds[VDS_POS_LOGICAL_VOL_DESC].block,
+					  fileset);
+		if (ret < 0)
+			return ret;
+	}
 
 	if (vds[VDS_POS_PARTITION_DESC].block) {
 		/*
@@ -1633,19 +1657,27 @@
 		 */
 		for (block = vds[VDS_POS_PARTITION_DESC].block;
 		     block < vds[VDS_POS_TERMINATING_DESC].block;
-		     block++)
-			if (udf_load_partdesc(sb, block))
-				return 1;
+		     block++) {
+			ret = udf_load_partdesc(sb, block);
+			if (ret < 0)
+				return ret;
+		}
 	}
 
 	return 0;
 }
 
+/*
+ * Load Volume Descriptor Sequence described by anchor in bh
+ *
+ * Returns <0 on error, 0 on success
+ */
 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
 			     struct kernel_lb_addr *fileset)
 {
 	struct anchorVolDescPtr *anchor;
-	long main_s, main_e, reserve_s, reserve_e;
+	sector_t main_s, main_e, reserve_s, reserve_e;
+	int ret;
 
 	anchor = (struct anchorVolDescPtr *)bh->b_data;
 
@@ -1663,18 +1695,26 @@
 
 	/* Process the main & reserve sequences */
 	/* responsible for finding the PartitionDesc(s) */
-	if (!udf_process_sequence(sb, main_s, main_e, fileset))
-		return 1;
+	ret = udf_process_sequence(sb, main_s, main_e, fileset);
+	if (ret != -EAGAIN)
+		return ret;
 	udf_sb_free_partitions(sb);
-	if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
-		return 1;
-	udf_sb_free_partitions(sb);
-	return 0;
+	ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
+	if (ret < 0) {
+		udf_sb_free_partitions(sb);
+		/* No sequence was OK, return -EIO */
+		if (ret == -EAGAIN)
+			ret = -EIO;
+	}
+	return ret;
 }
 
 /*
  * Check whether there is an anchor block in the given block and
  * load Volume Descriptor Sequence if so.
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
+ * block
  */
 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
 				  struct kernel_lb_addr *fileset)
@@ -1686,33 +1726,40 @@
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
 	    udf_fixed_to_variable(block) >=
 	    sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
-		return 0;
+		return -EAGAIN;
 
 	bh = udf_read_tagged(sb, block, block, &ident);
 	if (!bh)
-		return 0;
+		return -EAGAIN;
 	if (ident != TAG_IDENT_AVDP) {
 		brelse(bh);
-		return 0;
+		return -EAGAIN;
 	}
 	ret = udf_load_sequence(sb, bh, fileset);
 	brelse(bh);
 	return ret;
 }
 
-/* Search for an anchor volume descriptor pointer */
-static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
-				 struct kernel_lb_addr *fileset)
+/*
+ * Search for an anchor volume descriptor pointer.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
+ * of anchors.
+ */
+static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
+			    struct kernel_lb_addr *fileset)
 {
 	sector_t last[6];
 	int i;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	int last_count = 0;
+	int ret;
 
 	/* First try user provided anchor */
 	if (sbi->s_anchor) {
-		if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
-			return lastblock;
+		ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
+		if (ret != -EAGAIN)
+			return ret;
 	}
 	/*
 	 * according to spec, anchor is in either:
@@ -1721,39 +1768,46 @@
 	 *     lastblock
 	 *  however, if the disc isn't closed, it could be 512.
 	 */
-	if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
-		return lastblock;
+	ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
+	if (ret != -EAGAIN)
+		return ret;
 	/*
 	 * The trouble is which block is the last one. Drives often misreport
 	 * this so we try various possibilities.
 	 */
-	last[last_count++] = lastblock;
-	if (lastblock >= 1)
-		last[last_count++] = lastblock - 1;
-	last[last_count++] = lastblock + 1;
-	if (lastblock >= 2)
-		last[last_count++] = lastblock - 2;
-	if (lastblock >= 150)
-		last[last_count++] = lastblock - 150;
-	if (lastblock >= 152)
-		last[last_count++] = lastblock - 152;
+	last[last_count++] = *lastblock;
+	if (*lastblock >= 1)
+		last[last_count++] = *lastblock - 1;
+	last[last_count++] = *lastblock + 1;
+	if (*lastblock >= 2)
+		last[last_count++] = *lastblock - 2;
+	if (*lastblock >= 150)
+		last[last_count++] = *lastblock - 150;
+	if (*lastblock >= 152)
+		last[last_count++] = *lastblock - 152;
 
 	for (i = 0; i < last_count; i++) {
 		if (last[i] >= sb->s_bdev->bd_inode->i_size >>
 				sb->s_blocksize_bits)
 			continue;
-		if (udf_check_anchor_block(sb, last[i], fileset))
-			return last[i];
+		ret = udf_check_anchor_block(sb, last[i], fileset);
+		if (ret != -EAGAIN) {
+			if (!ret)
+				*lastblock = last[i];
+			return ret;
+		}
 		if (last[i] < 256)
 			continue;
-		if (udf_check_anchor_block(sb, last[i] - 256, fileset))
-			return last[i];
+		ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
+		if (ret != -EAGAIN) {
+			if (!ret)
+				*lastblock = last[i];
+			return ret;
+		}
 	}
 
 	/* Finally try block 512 in case media is open */
-	if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
-		return last[0];
-	return 0;
+	return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
 }
 
 /*
@@ -1761,54 +1815,59 @@
  * area specified by it. The function expects sbi->s_lastblock to be the last
  * block on the media.
  *
- * Return 1 if ok, 0 if not found.
- *
+ * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
+ * was not found.
  */
 static int udf_find_anchor(struct super_block *sb,
 			   struct kernel_lb_addr *fileset)
 {
-	sector_t lastblock;
 	struct udf_sb_info *sbi = UDF_SB(sb);
+	sector_t lastblock = sbi->s_last_block;
+	int ret;
 
-	lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
-	if (lastblock)
+	ret = udf_scan_anchors(sb, &lastblock, fileset);
+	if (ret != -EAGAIN)
 		goto out;
 
 	/* No anchor found? Try VARCONV conversion of block numbers */
 	UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+	lastblock = udf_variable_to_fixed(sbi->s_last_block);
 	/* Firstly, we try to not convert number of the last block */
-	lastblock = udf_scan_anchors(sb,
-				udf_variable_to_fixed(sbi->s_last_block),
-				fileset);
-	if (lastblock)
+	ret = udf_scan_anchors(sb, &lastblock, fileset);
+	if (ret != -EAGAIN)
 		goto out;
 
+	lastblock = sbi->s_last_block;
 	/* Secondly, we try with converted number of the last block */
-	lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
-	if (!lastblock) {
+	ret = udf_scan_anchors(sb, &lastblock, fileset);
+	if (ret < 0) {
 		/* VARCONV didn't help. Clear it. */
 		UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
-		return 0;
 	}
 out:
-	sbi->s_last_block = lastblock;
-	return 1;
+	if (ret == 0)
+		sbi->s_last_block = lastblock;
+	return ret;
 }
 
 /*
  * Check Volume Structure Descriptor, find Anchor block and load Volume
- * Descriptor Sequence
+ * Descriptor Sequence.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
+ * block was not found.
  */
 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
 			int silent, struct kernel_lb_addr *fileset)
 {
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	loff_t nsr_off;
+	int ret;
 
 	if (!sb_set_blocksize(sb, uopt->blocksize)) {
 		if (!silent)
 			udf_warn(sb, "Bad block size\n");
-		return 0;
+		return -EINVAL;
 	}
 	sbi->s_last_block = uopt->lastblock;
 	if (!uopt->novrs) {
@@ -1829,12 +1888,13 @@
 
 	/* Look for anchor block and load Volume Descriptor Sequence */
 	sbi->s_anchor = uopt->anchor;
-	if (!udf_find_anchor(sb, fileset)) {
-		if (!silent)
+	ret = udf_find_anchor(sb, fileset);
+	if (ret < 0) {
+		if (!silent && ret == -EAGAIN)
 			udf_warn(sb, "No anchor found\n");
-		return 0;
+		return ret;
 	}
-	return 1;
+	return 0;
 }
 
 static void udf_open_lvid(struct super_block *sb)
@@ -1940,7 +2000,7 @@
 
 static int udf_fill_super(struct super_block *sb, void *options, int silent)
 {
-	int ret;
+	int ret = -EINVAL;
 	struct inode *inode = NULL;
 	struct udf_options uopt;
 	struct kernel_lb_addr rootdir, fileset;
@@ -2012,7 +2072,7 @@
 	} else {
 		uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
 		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
-		if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
+		if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
 			if (!silent)
 				pr_notice("Rescanning with blocksize %d\n",
 					  UDF_DEFAULT_BLOCKSIZE);
@@ -2022,8 +2082,11 @@
 			ret = udf_load_vrs(sb, &uopt, silent, &fileset);
 		}
 	}
-	if (!ret) {
-		udf_warn(sb, "No partition found (1)\n");
+	if (ret < 0) {
+		if (ret == -EAGAIN) {
+			udf_warn(sb, "No partition found (1)\n");
+			ret = -EINVAL;
+		}
 		goto error_out;
 	}
 
@@ -2041,9 +2104,13 @@
 			udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
 				le16_to_cpu(lvidiu->minUDFReadRev),
 				UDF_MAX_READ_VERSION);
+			ret = -EINVAL;
 			goto error_out;
-		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
-			sb->s_flags |= MS_RDONLY;
+		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
+			   !(sb->s_flags & MS_RDONLY)) {
+			ret = -EACCES;
+			goto error_out;
+		}
 
 		sbi->s_udfrev = minUDFWriteRev;
 
@@ -2055,17 +2122,20 @@
 
 	if (!sbi->s_partitions) {
 		udf_warn(sb, "No partition found (2)\n");
+		ret = -EINVAL;
 		goto error_out;
 	}
 
 	if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
-			UDF_PART_FLAG_READ_ONLY) {
-		pr_notice("Partition marked readonly; forcing readonly mount\n");
-		sb->s_flags |= MS_RDONLY;
+			UDF_PART_FLAG_READ_ONLY &&
+	    !(sb->s_flags & MS_RDONLY)) {
+		ret = -EACCES;
+		goto error_out;
 	}
 
 	if (udf_find_fileset(sb, &fileset, &rootdir)) {
 		udf_warn(sb, "No fileset found\n");
+		ret = -EINVAL;
 		goto error_out;
 	}
 
@@ -2087,6 +2157,7 @@
 	if (!inode) {
 		udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
 		       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+		ret = -EIO;
 		goto error_out;
 	}
 
@@ -2094,6 +2165,7 @@
 	sb->s_root = d_make_root(inode);
 	if (!sb->s_root) {
 		udf_err(sb, "Couldn't allocate root dentry\n");
+		ret = -ENOMEM;
 		goto error_out;
 	}
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -2114,7 +2186,7 @@
 	kfree(sbi);
 	sb->s_fs_info = NULL;
 
-	return -EINVAL;
+	return ret;
 }
 
 void _udf_err(struct super_block *sb, const char *function,
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 0b8b2a1..eca6f9d 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1223,6 +1223,7 @@
 	/* start with smaller blk num */
 	forward = nodehdr.forw < nodehdr.back;
 	for (i = 0; i < 2; forward = !forward, i++) {
+		struct xfs_da3_icnode_hdr thdr;
 		if (forward)
 			blkno = nodehdr.forw;
 		else
@@ -1235,10 +1236,10 @@
 			return(error);
 
 		node = bp->b_addr;
-		xfs_da3_node_hdr_from_disk(&nodehdr, node);
+		xfs_da3_node_hdr_from_disk(&thdr, node);
 		xfs_trans_brelse(state->args->trans, bp);
 
-		if (count - nodehdr.count >= 0)
+		if (count - thdr.count >= 0)
 			break;	/* fits with at least 25% to spare */
 	}
 	if (i >= 2) {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c13c919..f45b2a78 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -455,7 +455,11 @@
 };
 
 /* helper */
-acpi_handle acpi_get_child(acpi_handle, u64);
+acpi_handle acpi_find_child(acpi_handle, u64, bool);
+static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+	return acpi_find_child(handle, addr, false);
+}
 int acpi_is_root_bridge(acpi_handle);
 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
 #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 454881e..22d497e 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20130328
+#define ACPI_CA_VERSION                 0x20130517
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -62,6 +62,7 @@
 extern struct acpi_table_fadt acpi_gbl_FADT;
 extern u8 acpi_gbl_system_awake_and_running;
 extern u8 acpi_gbl_reduced_hardware;	/* ACPI 5.0 */
+extern u8 acpi_gbl_osi_data;
 
 /* Runtime configuration of debug print levels */
 
@@ -80,6 +81,7 @@
 extern u8 acpi_gbl_copy_dsdt_locally;
 extern u8 acpi_gbl_truncate_io_addresses;
 extern u8 acpi_gbl_disable_auto_repair;
+extern u8 acpi_gbl_disable_ssdt_table_load;
 
 /*
  * Hardware-reduced prototypes. All interfaces that use these macros will
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a64adcc..22b03c9 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1144,4 +1144,19 @@
 #endif
 };
 
+/* Definitions for _OSI support */
+
+#define ACPI_OSI_WIN_2000               0x01
+#define ACPI_OSI_WIN_XP                 0x02
+#define ACPI_OSI_WIN_XP_SP1             0x03
+#define ACPI_OSI_WINSRV_2003            0x04
+#define ACPI_OSI_WIN_XP_SP2             0x05
+#define ACPI_OSI_WINSRV_2003_SP1        0x06
+#define ACPI_OSI_WIN_VISTA              0x07
+#define ACPI_OSI_WINSRV_2008            0x08
+#define ACPI_OSI_WIN_VISTA_SP1          0x09
+#define ACPI_OSI_WIN_VISTA_SP2          0x0A
+#define ACPI_OSI_WIN_7                  0x0B
+#define ACPI_OSI_WIN_8                  0x0C
+
 #endif				/* __ACTYPES_H__ */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index bde6469..27f5017 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -25,7 +25,7 @@
  */
 
 #ifndef ARCH_NR_GPIOS
-#define ARCH_NR_GPIOS		256
+#define ARCH_NR_GPIOS		384
 #endif
 
 /*
@@ -118,6 +118,9 @@
 	int			(*set_debounce)(struct gpio_chip *chip,
 						unsigned offset, unsigned debounce);
 
+	void			(*set_pinmux)(int gpio, int alt);
+	int			(*get_pinmux)(int gpio);
+
 	void			(*set)(struct gpio_chip *chip,
 						unsigned offset, int value);
 
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d06079c..99b490b 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -6,12 +6,12 @@
 	return mk_pte(page, pgprot);
 }
 
-static inline int huge_pte_write(pte_t pte)
+static inline unsigned long huge_pte_write(pte_t pte)
 {
 	return pte_write(pte);
 }
 
-static inline int huge_pte_dirty(pte_t pte)
+static inline unsigned long huge_pte_dirty(pte_t pte)
 {
 	return pte_dirty(pte);
 }
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c3..5672d7e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@
 
 #define HAVE_GENERIC_MMU_GATHER
 
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
 							unsigned long end);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index eb58d2d..cf736d6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -416,6 +416,8 @@
 		*(.text.hot)						\
 		*(.text)						\
 		*(.ref.text)						\
+		*(.text.ssse3)					\
+		*(.rodata.ssse3)				\
 	DEV_KEEP(init.text)						\
 	DEV_KEEP(exit.text)						\
 	CPU_KEEP(init.text)						\
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 63d17ee..2aec6ee 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -55,16 +55,13 @@
 #include <linux/mm.h>
 #include <linux/cdev.h>
 #include <linux/mutex.h>
+#include <linux/io.h>
 #include <linux/slab.h>
 #if defined(__alpha__) || defined(__powerpc__)
 #include <asm/pgtable.h>	/* For pte_wrprotect */
 #endif
-#include <asm/io.h>
 #include <asm/mman.h>
 #include <asm/uaccess.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
 #include <linux/types.h>
 #include <linux/agp_backend.h>
@@ -96,6 +93,9 @@
 #define DRM_UT_DRIVER		0x02
 #define DRM_UT_KMS		0x04
 #define DRM_UT_PRIME		0x08
+#define DRM_UT_PM		0x20
+#define DRM_UT_TDR      0x10
+
 /*
  * Three debug levels are defined.
  * drm_core, drm_driver, drm_kms
@@ -210,7 +210,6 @@
 		drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, 		\
 					__func__, fmt, ##args);		\
 	} while (0)
-
 #define DRM_DEBUG_DRIVER(fmt, args...)					\
 	do {								\
 		drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME,		\
@@ -226,6 +225,14 @@
 		drm_ut_debug_printk(DRM_UT_PRIME, DRM_NAME,		\
 					__func__, fmt, ##args);		\
 	} while (0)
+#define DRM_DEBUG_PM(fmt, args...)					\
+	do {								\
+		drm_ut_debug_printk(DRM_UT_PM, DRM_NAME,		\
+					__func__, fmt, ##args);		\
+	} while (0)
+#define DRM_DEBUG_TDR(fmt, args...)					\
+		drm_ut_debug_printk(DRM_UT_TDR, DRM_NAME,		\
+					__func__, fmt, ##args);
 #define DRM_LOG(fmt, args...)						\
 	do {								\
 		drm_ut_debug_printk(DRM_UT_CORE, NULL,			\
@@ -250,7 +257,9 @@
 #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
 #define DRM_DEBUG_KMS(fmt, args...)	do { } while (0)
 #define DRM_DEBUG_PRIME(fmt, args...)	do { } while (0)
+#define DRM_DEBUG_TDR(fmt, args...)	do { } while (0)
 #define DRM_DEBUG(fmt, arg...)		 do { } while (0)
+#define DRM_DEBUG_PM(fmt, args...) do { } while (0)
 #define DRM_LOG(fmt, arg...)		do { } while (0)
 #define DRM_LOG_KMS(fmt, args...) do { } while (0)
 #define DRM_LOG_MODE(fmt, arg...) do { } while (0)
@@ -933,12 +942,15 @@
 				struct dma_buf *dma_buf);
 	/* low-level interface used by drm_gem_prime_{import,export} */
 	int (*gem_prime_pin)(struct drm_gem_object *obj);
+	void (*gem_prime_unpin)(struct drm_gem_object *obj);
 	struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
 	struct drm_gem_object *(*gem_prime_import_sg_table)(
 				struct drm_device *dev, size_t size,
 				struct sg_table *sgt);
 	void *(*gem_prime_vmap)(struct drm_gem_object *obj);
 	void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+	int (*gem_prime_mmap)(struct drm_gem_object *obj,
+				struct vm_area_struct *vma);
 
 	/* vga arb irq handler */
 	void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1218,6 +1230,11 @@
 	int switch_power_state;
 
 	atomic_t unplugged; /* device has been unplugged or gone away */
+
+	struct mutex halt_mutex;
+	atomic_t halt_count;
+	wait_queue_head_t ioctl_queue;
+	wait_queue_head_t halt_queue;
 };
 
 #define DRM_SWITCH_POWER_ON 0
@@ -1250,37 +1267,8 @@
 {
 	return drm_core_check_feature(dev, DRIVER_USE_MTRR);
 }
-
-#define DRM_MTRR_WC		MTRR_TYPE_WRCOMB
-
-static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
-			       unsigned int flags)
-{
-	return mtrr_add(offset, size, flags, 1);
-}
-
-static inline int drm_mtrr_del(int handle, unsigned long offset,
-			       unsigned long size, unsigned int flags)
-{
-	return mtrr_del(handle, offset, size);
-}
-
 #else
 #define drm_core_has_MTRR(dev) (0)
-
-#define DRM_MTRR_WC		0
-
-static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
-			       unsigned int flags)
-{
-	return 0;
-}
-
-static inline int drm_mtrr_del(int handle, unsigned long offset,
-			       unsigned long size, unsigned int flags)
-{
-	return 0;
-}
 #endif
 
 static inline void drm_device_set_unplugged(struct drm_device *dev)
@@ -1447,12 +1435,19 @@
 extern int drm_control(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv);
 extern int drm_irq_install(struct drm_device *dev);
+extern int drm_irq_install_locked(struct drm_device *dev, int locked);
 extern int drm_irq_uninstall(struct drm_device *dev);
+extern int drm_irq_uninstall_locked(struct drm_device *dev, int locked);
+
+extern void drm_halt(struct drm_device *dev);
+extern int drm_wait_idle(struct drm_device *dev, unsigned timeout);
+extern void drm_continue(struct drm_device *dev);
 
 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
 			   struct drm_file *filp);
 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+void drm_clean_pending_vblanks(struct drm_device *dev);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 				     struct timeval *vblanktime);
@@ -1630,7 +1625,6 @@
 extern int drm_sysfs_device_add(struct drm_minor *minor);
 extern void drm_sysfs_hotplug_event(struct drm_device *dev);
 extern void drm_sysfs_device_remove(struct drm_minor *minor);
-extern char *drm_get_connector_status_name(enum drm_connector_status status);
 extern int drm_sysfs_connector_add(struct drm_connector *connector);
 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
 
@@ -1648,6 +1642,8 @@
 void drm_gem_object_handle_free(struct drm_gem_object *obj);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+		     struct vm_area_struct *vma);
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 #include <drm/drm_global.h>
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index adb3f9b..cfecf27 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -31,7 +31,7 @@
 #include <linux/idr.h>
 #include <linux/fb.h>
 #include <drm/drm_mode.h>
-
+#include <linux/hdmi.h>
 #include <drm/drm_fourcc.h>
 
 struct drm_device;
@@ -40,7 +40,7 @@
 struct drm_object_properties;
 struct drm_file;
 struct drm_clip_rect;
-
+extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(u8 vic);
 #define DRM_MODE_OBJECT_CRTC 0xcccccccc
 #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
 #define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
@@ -115,13 +115,15 @@
 #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
 				    DRM_MODE_TYPE_CRTC_C)
 
-#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f, \
+ar) \
 	.name = nm, .status = 0, .type = (t), .clock = (c), \
 	.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
 	.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
 	.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
-	.vscan = (vs), .flags = (f), \
-	.base.type = DRM_MODE_OBJECT_MODE
+	.vscan = (vs), .flags = (f), .vrefresh = 0, \
+	.base.type = DRM_MODE_OBJECT_MODE, \
+	.picture_aspect_ratio = (ar)
 
 #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
 
@@ -177,6 +179,7 @@
 
 	int vrefresh;		/* in Hz */
 	int hsync;		/* in kHz */
+	enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
 enum drm_connector_status {
@@ -217,6 +220,7 @@
 	u32 color_formats;
 
 	u8 cea_rev;
+	char *raw_edid; /* if any */
 };
 
 struct drm_framebuffer_funcs {
@@ -339,6 +343,9 @@
 	/* cursor controls */
 	int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
 			  uint32_t handle, uint32_t width, uint32_t height);
+	int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
+			   uint32_t handle, uint32_t width, uint32_t height,
+			   int32_t hot_x, int32_t hot_y);
 	int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
 
 	/* Set gamma on the CRTC */
@@ -409,6 +416,10 @@
 	/* framebuffer the connector is currently bound to */
 	struct drm_framebuffer *fb;
 
+	/* Temporary tracking of the old fb while a modeset is ongoing. Used
+	 * by drm_mode_set_config_internal to implement correct refcounting. */
+	struct drm_framebuffer *old_fb;
+
 	bool enabled;
 
 	/* Requested mode from modesetting. */
@@ -435,6 +446,7 @@
 	void *helper_private;
 
 	struct drm_object_properties properties;
+	bool panning_en;
 };
 
 
@@ -597,6 +609,7 @@
 	struct drm_display_info display_info;
 	const struct drm_connector_funcs *funcs;
 
+	struct list_head user_modes;
 	struct drm_property_blob *edid_blob_ptr;
 	struct drm_object_properties properties;
 
@@ -636,7 +649,8 @@
 			    int crtc_x, int crtc_y,
 			    unsigned int crtc_w, unsigned int crtc_h,
 			    uint32_t src_x, uint32_t src_y,
-			    uint32_t src_w, uint32_t src_h);
+			    uint32_t src_w, uint32_t src_h,
+				struct drm_pending_vblank_event *e);
 	int (*disable_plane)(struct drm_plane *plane);
 	void (*destroy)(struct drm_plane *plane);
 
@@ -654,11 +668,7 @@
  * @format_count: number of formats supported
  * @crtc: currently bound CRTC
  * @fb: currently bound fb
- * @gamma_size: size of gamma table
- * @gamma_store: gamma correction table
- * @enabled: enabled flag
  * @funcs: helper functions
- * @helper_private: storage for drver layer
  * @properties: property tracking for this plane
  */
 struct drm_plane {
@@ -674,14 +684,7 @@
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb;
 
-	/* CRTC gamma size for reporting to userspace */
-	uint32_t gamma_size;
-	uint16_t *gamma_store;
-
-	bool enabled;
-
 	const struct drm_plane_funcs *funcs;
-	void *helper_private;
 
 	struct drm_object_properties properties;
 };
@@ -816,6 +819,7 @@
 	bool poll_enabled;
 	bool poll_running;
 	struct delayed_work output_poll_work;
+	struct delayed_work dpms_work;
 
 	/* pointers to standard properties */
 	struct list_head property_blob_list;
@@ -894,15 +898,17 @@
 			  const uint32_t *formats, uint32_t format_count,
 			  bool priv);
 extern void drm_plane_cleanup(struct drm_plane *plane);
+extern void drm_plane_force_disable(struct drm_plane *plane);
 
 extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
-extern char *drm_get_connector_name(struct drm_connector *connector);
-extern char *drm_get_dpms_name(int val);
-extern char *drm_get_dvi_i_subconnector_name(int val);
-extern char *drm_get_dvi_i_select_name(int val);
-extern char *drm_get_tv_subconnector_name(int val);
-extern char *drm_get_tv_select_name(int val);
+extern const char *drm_get_connector_name(const struct drm_connector *connector);
+extern const char *drm_get_connector_status_name(enum drm_connector_status status);
+extern const char *drm_get_dpms_name(int val);
+extern const char *drm_get_dvi_i_subconnector_name(int val);
+extern const char *drm_get_dvi_i_select_name(int val);
+extern const char *drm_get_tv_subconnector_name(int val);
+extern const char *drm_get_tv_select_name(int val);
 extern void drm_fb_release(struct drm_file *file_priv);
 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
@@ -994,7 +1000,7 @@
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 extern int drm_mode_create_dithering_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern char *drm_get_encoder_name(struct drm_encoder *encoder);
+extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
 
 extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
 					     struct drm_encoder *encoder);
@@ -1022,6 +1028,8 @@
 			       void *data, struct drm_file *file_priv);
 extern int drm_mode_cursor_ioctl(struct drm_device *dev,
 				void *data, struct drm_file *file_priv);
+extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
+				void *data, struct drm_file *file_priv);
 extern int drm_mode_addfb(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv);
 extern int drm_mode_addfb2(struct drm_device *dev,
@@ -1094,5 +1102,6 @@
 extern int drm_format_plane_cpp(uint32_t format, int plane);
 extern int drm_format_horz_chroma_subsampling(uint32_t format);
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern const char *drm_get_format_name(uint32_t format);
 
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index f43d556..36d33b4 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -109,6 +109,7 @@
 					    struct drm_connector *connector);
 	/* disable encoder when not in use - more explicit than dpms off */
 	void (*disable)(struct drm_encoder *encoder);
+	int (*inuse)(struct drm_encoder *encoder);
 };
 
 /**
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index e8e1417..ae8dbfb 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -342,13 +342,42 @@
 u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane);
 
-#define DP_RECEIVER_CAP_SIZE	0xf
+#define DP_RECEIVER_CAP_SIZE		0xf
+#define EDP_PSR_RECEIVER_CAP_SIZE	2
+
 void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 
 u8 drm_dp_link_rate_to_bw_code(int link_rate);
 int drm_dp_bw_code_to_link_rate(u8 link_bw);
 
+struct edp_sdp_header {
+	u8 HB0; /* Secondary Data Packet ID */
+	u8 HB1; /* Secondary Data Packet Type */
+	u8 HB2; /* 7:5 reserved, 4:0 revision number */
+	u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+} __packed;
+
+#define EDP_SDP_HEADER_REVISION_MASK		0x1F
+#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES	0x1F
+
+struct edp_vsc_psr {
+	struct edp_sdp_header sdp_header;
+	u8 DB0; /* Stereo Interface */
+	u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
+	u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
+	u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
+	u8 DB4; /* CRC value bits 7:0 of the G or Y component */
+	u8 DB5; /* CRC value bits 15:8 of the G or Y component */
+	u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
+	u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
+	u8 DB8_31[24]; /* Reserved */
+} __packed;
+
+#define EDP_VSC_PSR_STATE_ACTIVE	(1<<0)
+#define EDP_VSC_PSR_UPDATE_RFB		(1<<1)
+#define EDP_VSC_PSR_CRC_VALUES_VALID	(1<<2)
+
 static inline int
 drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index fce2ef3..2ed6f55 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -55,7 +55,8 @@
 				     unsigned long seed, int bits, int shift,
 				     unsigned long add);
 extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
+extern int drm_ht_find_item_anyused(struct drm_open_hash *ht,
+				struct drm_hash_item **item);
 extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
 extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
 extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 88591ef..b87d05e 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -138,10 +138,7 @@
 /*
  * Basic range manager support (drm_mm.c)
  */
-extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-					       unsigned long start,
-					       unsigned long size,
-					       bool atomic);
+extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
 extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
 						    unsigned long size,
 						    unsigned alignment,
@@ -155,6 +152,7 @@
 						unsigned long start,
 						unsigned long end,
 						int atomic);
+
 static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
 						   unsigned long size,
 						   unsigned alignment)
@@ -177,17 +175,6 @@
 	return drm_mm_get_block_range_generic(parent, size, alignment, 0,
 					      start, end, 0);
 }
-static inline struct drm_mm_node *drm_mm_get_color_block_range(
-						struct drm_mm_node *parent,
-						unsigned long size,
-						unsigned alignment,
-						unsigned long color,
-						unsigned long start,
-						unsigned long end)
-{
-	return drm_mm_get_block_range_generic(parent, size, alignment, color,
-					      start, end, 0);
-}
 static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
 						struct drm_mm_node *parent,
 						unsigned long size,
@@ -255,29 +242,10 @@
 	return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
 						   start, end, best_match);
 }
-static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
-							   unsigned long size,
-							   unsigned alignment,
-							   unsigned long color,
-							   bool best_match)
-{
-	return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
-}
-static inline  struct drm_mm_node *drm_mm_search_free_in_range_color(
-						const struct drm_mm *mm,
-						unsigned long size,
-						unsigned alignment,
-						unsigned long color,
-						unsigned long start,
-						unsigned long end,
-						bool best_match)
-{
-	return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
-						   start, end, best_match);
-}
-extern int drm_mm_init(struct drm_mm *mm,
-		       unsigned long start,
-		       unsigned long size);
+
+extern void drm_mm_init(struct drm_mm *mm,
+			unsigned long start,
+			unsigned long size);
 extern void drm_mm_takedown(struct drm_mm *mm);
 extern int drm_mm_clean(struct drm_mm *mm);
 extern int drm_mm_pre_get(struct drm_mm *mm);
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 675ddf4..815fafc 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -65,22 +65,6 @@
 #define DRM_AGP_KERN            struct no_agp_kern
 #endif
 
-#if !(__OS_HAS_MTRR)
-static __inline__ int mtrr_add(unsigned long base, unsigned long size,
-			       unsigned int type, char increment)
-{
-	return -ENODEV;
-}
-
-static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
-{
-	return -ENODEV;
-}
-
-#define MTRR_TYPE_WRCOMB     1
-
-#endif
-
 /** Other copying of data to kernel space */
 #define DRM_COPY_FROM_USER(arg1, arg2, arg3)		\
 	copy_from_user(arg1, arg2, arg3)
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
new file mode 100644
index 0000000..d128629
--- /dev/null
+++ b/include/drm/drm_rect.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_RECT_H
+#define DRM_RECT_H
+
+/**
+ * DOC: rect utils
+ *
+ * Utility functions to help manage rectangular areas for
+ * clipping, scaling, etc. calculations.
+ */
+
+/**
+ * struct drm_rect - two dimensional rectangle
+ * @x1: horizontal starting coordinate (inclusive)
+ * @x2: horizontal ending coordinate (exclusive)
+ * @y1: vertical starting coordinate (inclusive)
+ * @y2: vertical ending coordinate (exclusive)
+ */
+struct drm_rect {
+	int x1, y1, x2, y2;
+};
+
+/**
+ * drm_rect_adjust_size - adjust the size of the rectangle
+ * @r: rectangle to be adjusted
+ * @dw: horizontal adjustment
+ * @dh: vertical adjustment
+ *
+ * Change the size of rectangle @r by @dw in the horizontal direction,
+ * and by @dh in the vertical direction, while keeping the center
+ * of @r stationary.
+ *
+ * Positive @dw and @dh increase the size, negative values decrease it.
+ */
+static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
+{
+	r->x1 -= dw >> 1;
+	r->y1 -= dh >> 1;
+	r->x2 += (dw + 1) >> 1;
+	r->y2 += (dh + 1) >> 1;
+}
+
+/**
+ * drm_rect_translate - translate the rectangle
+ * @r: rectangle to be tranlated
+ * @dx: horizontal translation
+ * @dy: vertical translation
+ *
+ * Move rectangle @r by @dx in the horizontal direction,
+ * and by @dy in the vertical direction.
+ */
+static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
+{
+	r->x1 += dx;
+	r->y1 += dy;
+	r->x2 += dx;
+	r->y2 += dy;
+}
+
+/**
+ * drm_rect_downscale - downscale a rectangle
+ * @r: rectangle to be downscaled
+ * @horz: horizontal downscale factor
+ * @vert: vertical downscale factor
+ *
+ * Divide the coordinates of rectangle @r by @horz and @vert.
+ */
+static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
+{
+	r->x1 /= horz;
+	r->y1 /= vert;
+	r->x2 /= horz;
+	r->y2 /= vert;
+}
+
+/**
+ * drm_rect_width - determine the rectangle width
+ * @r: rectangle whose width is returned
+ *
+ * RETURNS:
+ * The width of the rectangle.
+ */
+static inline int drm_rect_width(const struct drm_rect *r)
+{
+	return r->x2 - r->x1;
+}
+
+/**
+ * drm_rect_height - determine the rectangle height
+ * @r: rectangle whose height is returned
+ *
+ * RETURNS:
+ * The height of the rectangle.
+ */
+static inline int drm_rect_height(const struct drm_rect *r)
+{
+	return r->y2 - r->y1;
+}
+
+/**
+ * drm_rect_visible - determine if the the rectangle is visible
+ * @r: rectangle whose visibility is returned
+ *
+ * RETURNS:
+ * %true if the rectangle is visible, %false otherwise.
+ */
+static inline bool drm_rect_visible(const struct drm_rect *r)
+{
+	return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
+}
+
+/**
+ * drm_rect_equals - determine if two rectangles are equal
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles are equal, %false otherwise.
+ */
+static inline bool drm_rect_equals(const struct drm_rect *r1,
+				   const struct drm_rect *r2)
+{
+	return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
+		r1->y1 == r2->y1 && r1->y2 == r2->y2;
+}
+
+bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
+bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+			  const struct drm_rect *clip,
+			  int hscale, int vscale);
+int drm_rect_calc_hscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_hscale, int max_hscale);
+int drm_rect_calc_vscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_vscale, int max_vscale);
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_hscale, int max_hscale);
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_vscale, int max_vscale);
+void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 17b5b59..0bf3398 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -191,6 +191,7 @@
 #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO			0x0200
 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR		0x0400
 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO		0x0800
+#define ACPI_VIDEO_SKIP_BACKLIGHT			0x1000
 
 #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
 
diff --git a/include/linux/acpi_gpio.h b/include/linux/acpi_gpio.h
index 4c120a1..b5a0bb9 100644
--- a/include/linux/acpi_gpio.h
+++ b/include/linux/acpi_gpio.h
@@ -18,6 +18,8 @@
 int acpi_get_gpio(char *path, int pin);
 int acpi_get_gpio_by_index(struct device *dev, int index,
 			   struct acpi_gpio_info *info);
+int acpi_get_gpio_by_name(struct device *dev, char *name,
+			   struct acpi_gpio_info *info);
 void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
 void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
 
@@ -33,6 +35,11 @@
 {
 	return -ENODEV;
 }
+static inline int acpi_get_gpio_by_name(struct device *dev, char *name,
+					 struct acpi_gpio_info *info)
+{
+	return -ENODEV;
+}
 
 static inline void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
 static inline void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index a899402..86e7344 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -9,6 +9,7 @@
 enum alarmtimer_type {
 	ALARM_REALTIME,
 	ALARM_BOOTTIME,
+	ALARM_REALTIME_OFF,
 
 	ALARM_NUMTYPE,
 };
diff --git a/include/linux/atomisp.h b/include/linux/atomisp.h
new file mode 100644
index 0000000..115d471
--- /dev/null
+++ b/include/linux/atomisp.h
@@ -0,0 +1,1125 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef _ATOM_ISP_H
+#define _ATOM_ISP_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+
+/* struct media_device_info.driver_version */
+#define ATOMISP_CSS_VERSION_MASK	0x00ffffff
+#define ATOMISP_CSS_VERSION_15		KERNEL_VERSION(1, 5, 0)
+#define ATOMISP_CSS_VERSION_20		KERNEL_VERSION(2, 0, 0)
+#define ATOMISP_CSS_VERSION_21		KERNEL_VERSION(2, 1, 0)
+
+/* struct media_device_info.hw_revision */
+#define ATOMISP_HW_REVISION_MASK	0x0000ff00
+#define ATOMISP_HW_REVISION_SHIFT	8
+#define ATOMISP_HW_REVISION_ISP2300	0x00
+#define ATOMISP_HW_REVISION_ISP2400	0x10
+#define ATOMISP_HW_REVISION_ISP2401_LEGACY 0x11
+#define ATOMISP_HW_REVISION_ISP2401	0x20
+
+#define ATOMISP_HW_STEPPING_MASK	0x000000ff
+#define ATOMISP_HW_STEPPING_A0		0x00
+#define ATOMISP_HW_STEPPING_B0		0x10
+
+/*ISP binary running mode*/
+#define CI_MODE_PREVIEW		0x8000
+#define CI_MODE_VIDEO		0x4000
+#define CI_MODE_STILL_CAPTURE	0x2000
+#define CI_MODE_CONTINUOUS	0x1000
+#define CI_MODE_NONE		0x0000
+
+#define OUTPUT_MODE_FILE 0x0100
+#define OUTPUT_MODE_TEXT 0x0200
+
+/* Custom format for RAW capture from M10MO 0x3130314d */
+#define V4L2_PIX_FMT_CUSTOM_M10MO_RAW	v4l2_fourcc('M', '1', '0', '1')
+
+/* Custom media bus formats being used in atomisp */
+#define V4L2_MBUS_FMT_CUSTOM_YUV420	0x8001
+#define V4L2_MBUS_FMT_CUSTOM_YVU420	0x8002
+#define V4L2_MBUS_FMT_CUSTOM_YUV422P	0x8003
+#define V4L2_MBUS_FMT_CUSTOM_YUV444	0x8004
+#define V4L2_MBUS_FMT_CUSTOM_NV12	0x8005
+#define V4L2_MBUS_FMT_CUSTOM_NV21	0x8006
+#define V4L2_MBUS_FMT_CUSTOM_NV16	0x8007
+#define V4L2_MBUS_FMT_CUSTOM_YUYV	0x8008
+#define V4L2_MBUS_FMT_CUSTOM_SBGGR16	0x8009
+#define V4L2_MBUS_FMT_CUSTOM_RGB32	0x800a
+
+/* Custom media bus format for M10MO RAW capture */
+#define V4L2_MBUS_FMT_CUSTOM_M10MO_RAW	0x800b
+
+
+/* Configuration used by Bayer noise reduction and YCC noise reduction */
+struct atomisp_nr_config {
+	/* [gain] Strength of noise reduction for Bayer NR (Used by Bayer NR) */
+	unsigned int bnr_gain;
+	/* [gain] Strength of noise reduction for YCC NR (Used by YCC NR) */
+	unsigned int ynr_gain;
+	/* [intensity] Sensitivity of Edge (Used by Bayer NR) */
+	unsigned int direction;
+	/* [intensity] coring threshold for Cb (Used by YCC NR) */
+	unsigned int threshold_cb;
+	/* [intensity] coring threshold for Cr (Used by YCC NR) */
+	unsigned int threshold_cr;
+};
+
+/* Temporal noise reduction configuration */
+struct atomisp_tnr_config {
+	unsigned int gain;	 /* [gain] Strength of NR */
+	unsigned int threshold_y;/* [intensity] Motion sensitivity for Y */
+	unsigned int threshold_uv;/* [intensity] Motion sensitivity for U/V */
+};
+
+/* Histogram. This contains num_elements values of type unsigned int.
+ * The data pointer is a DDR pointer (virtual address).
+ */
+struct atomisp_histogram {
+	unsigned int num_elements;
+	void __user *data;
+};
+
+enum atomisp_ob_mode {
+	atomisp_ob_mode_none,
+	atomisp_ob_mode_fixed,
+	atomisp_ob_mode_raster
+};
+
+/* Optical black level configuration */
+struct atomisp_ob_config {
+	/* Obtical black level mode (Fixed / Raster) */
+	enum atomisp_ob_mode mode;
+	/* [intensity] optical black level for GR (relevant for fixed mode) */
+	unsigned int level_gr;
+	/* [intensity] optical black level for R (relevant for fixed mode) */
+	unsigned int level_r;
+	/* [intensity] optical black level for B (relevant for fixed mode) */
+	unsigned int level_b;
+	/* [intensity] optical black level for GB (relevant for fixed mode) */
+	unsigned int level_gb;
+	/* [BQ] 0..63 start position of OB area (relevant for raster mode) */
+	unsigned short start_position;
+	/* [BQ] start..63 end position of OB area (relevant for raster mode) */
+	unsigned short end_position;
+};
+
+/* Edge enhancement (sharpen) configuration */
+struct atomisp_ee_config {
+	/* [gain] The strength of sharpness. u5_11 */
+	unsigned int gain;
+	/* [intensity] The threshold that divides noises from edge. u8_8 */
+	unsigned int threshold;
+	/* [gain] The strength of sharpness in pell-mell area. u5_11 */
+	unsigned int detail_gain;
+};
+
+struct atomisp_3a_output {
+	int ae_y;
+	int awb_cnt;
+	int awb_gr;
+	int awb_r;
+	int awb_b;
+	int awb_gb;
+	int af_hpf1;
+	int af_hpf2;
+};
+
+enum atomisp_calibration_type {
+	calibration_type1,
+	calibration_type2,
+	calibration_type3
+};
+
+struct atomisp_calibration_group {
+	unsigned int size;
+	unsigned int type;
+	unsigned short *calb_grp_values;
+};
+
+struct atomisp_gc_config {
+	__u16 gain_k1;
+	__u16 gain_k2;
+};
+
+struct atomisp_3a_config {
+	unsigned int ae_y_coef_r;	/* [gain] Weight of R for Y */
+	unsigned int ae_y_coef_g;	/* [gain] Weight of G for Y */
+	unsigned int ae_y_coef_b;	/* [gain] Weight of B for Y */
+	unsigned int awb_lg_high_raw;	/* [intensity]
+					   AWB level gate high for raw */
+	unsigned int awb_lg_low;	/* [intensity] AWB level gate low */
+	unsigned int awb_lg_high;	/* [intensity] AWB level gate high */
+	int af_fir1_coef[7];	/* [factor] AF FIR coefficients of fir1 */
+	int af_fir2_coef[7];	/* [factor] AF FIR coefficients of fir2 */
+};
+
+struct atomisp_dvs_grid_info {
+	uint32_t enable;
+	uint32_t width;
+	uint32_t aligned_width;
+	uint32_t height;
+	uint32_t aligned_height;
+	uint32_t bqs_per_grid_cell;
+	uint32_t num_hor_coefs;
+	uint32_t num_ver_coefs;
+};
+
+struct atomisp_dvs_envelop {
+	unsigned int width;
+	unsigned int height;
+};
+
+struct atomisp_grid_info {
+	uint32_t enable;
+	uint32_t use_dmem;
+	uint32_t has_histogram;
+	uint32_t s3a_width;
+	uint32_t s3a_height;
+	uint32_t aligned_width;
+	uint32_t aligned_height;
+	uint32_t s3a_bqs_per_grid_cell;
+	uint32_t deci_factor_log2;
+	uint32_t elem_bit_depth;
+};
+
+struct atomisp_dis_vector {
+	int x;
+	int y;
+};
+
+
+/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ *  arrays that contain the coeffients for each type.
+ */
+struct atomisp_dvs2_coef_types {
+	short __user *odd_real; /**< real part of the odd coefficients*/
+	short __user *odd_imag; /**< imaginary part of the odd coefficients*/
+	short __user *even_real;/**< real part of the even coefficients*/
+	short __user *even_imag;/**< imaginary part of the even coefficients*/
+};
+
+/*
+ * DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct atomisp_dvs2_stat_types {
+	int __user *odd_real; /**< real part of the odd statistics*/
+	int __user *odd_imag; /**< imaginary part of the odd statistics*/
+	int __user *even_real;/**< real part of the even statistics*/
+	int __user *even_imag;/**< imaginary part of the even statistics*/
+};
+
+struct atomisp_dis_coefficients {
+	struct atomisp_dvs_grid_info grid_info;
+	struct atomisp_dvs2_coef_types hor_coefs;
+	struct atomisp_dvs2_coef_types ver_coefs;
+};
+
+struct atomisp_dvs2_statistics {
+	struct atomisp_dvs_grid_info grid_info;
+	struct atomisp_dvs2_stat_types hor_prod;
+	struct atomisp_dvs2_stat_types ver_prod;
+};
+
+struct atomisp_dis_statistics {
+	struct atomisp_dvs2_statistics dvs2_stat;
+	uint32_t exp_id;
+};
+
+struct atomisp_3a_rgby_output {
+	uint32_t r;
+	uint32_t g;
+	uint32_t b;
+	uint32_t y;
+};
+
+struct atomisp_metadata {
+	void __user *data;
+	uint32_t width;
+	uint32_t height;
+	uint32_t stride; /* in bytes */
+	uint32_t exp_id; /* exposure ID */
+	uint32_t *effective_width; /* mipi packets valid data size */
+};
+
+struct atomisp_ext_isp_ctrl {
+	uint32_t id;
+	uint32_t data;
+};
+
+struct atomisp_3a_statistics {
+	struct atomisp_grid_info  grid_info;
+	struct atomisp_3a_output __user *data;
+	struct atomisp_3a_rgby_output __user *rgby_data;
+	uint32_t exp_id; /* exposure ID */
+};
+
+/**
+ * struct atomisp_cont_capture_conf - continuous capture parameters
+ * @num_captures: number of still images to capture
+ * @skip_frames: number of frames to skip between 2 captures
+ * @offset: offset in ring buffer to start capture
+ *
+ * For example, to capture 1 frame from past, current, and 1 from future
+ * and skip one frame between each capture, parameters would be:
+ * num_captures:3
+ * skip_frames:1
+ * offset:-2
+ */
+
+struct atomisp_cont_capture_conf {
+	int num_captures;
+	unsigned int skip_frames;
+	int offset;
+	__u32 reserved[5];
+};
+
+/* White Balance (Gain Adjust) */
+struct atomisp_wb_config {
+	unsigned int integer_bits;
+	unsigned int gr;	/* unsigned <integer_bits>.<16-integer_bits> */
+	unsigned int r;		/* unsigned <integer_bits>.<16-integer_bits> */
+	unsigned int b;		/* unsigned <integer_bits>.<16-integer_bits> */
+	unsigned int gb;	/* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+/* Color Space Conversion settings */
+struct atomisp_cc_config {
+	unsigned int fraction_bits;
+	int matrix[3 * 3];	/* RGB2YUV Color matrix, signed
+				   <13-fraction_bits>.<fraction_bits> */
+};
+
+/* De pixel noise configuration */
+struct atomisp_de_config {
+	unsigned int pixelnoise;
+	unsigned int c1_coring_threshold;
+	unsigned int c2_coring_threshold;
+};
+
+/* Chroma enhancement */
+struct atomisp_ce_config {
+	unsigned char uv_level_min;
+	unsigned char uv_level_max;
+};
+
+/* Defect pixel correction configuration */
+struct atomisp_dp_config {
+	/* [intensity] The threshold of defect Pixel Correction, representing
+	 * the permissible difference of intensity between one pixel and its
+	 * surrounding pixels. Smaller values result in more frequent pixel
+	 * corrections. u0_16
+	 */
+	unsigned int threshold;
+	/* [gain] The sensitivity of mis-correction. ISP will miss a lot of
+	 * defects if the value is set too large. u8_8
+	 */
+	unsigned int gain;
+	unsigned int gr;
+	unsigned int r;
+	unsigned int b;
+	unsigned int gb;
+};
+
+/* XNR threshold */
+struct atomisp_xnr_config {
+	__u16 threshold;
+};
+
+/* metadata config */
+struct atomisp_metadata_config {
+	uint32_t metadata_height;
+	uint32_t metadata_stride;
+};
+
+struct atomisp_parm {
+	struct atomisp_grid_info info;
+	struct atomisp_dvs_grid_info dvs_grid;
+	struct atomisp_dvs_envelop dvs_envelop;
+	struct atomisp_wb_config wb_config;
+	struct atomisp_cc_config cc_config;
+	struct atomisp_ob_config ob_config;
+	struct atomisp_de_config de_config;
+	struct atomisp_ce_config ce_config;
+	struct atomisp_dp_config dp_config;
+	struct atomisp_nr_config nr_config;
+	struct atomisp_ee_config ee_config;
+	struct atomisp_tnr_config tnr_config;
+	struct atomisp_metadata_config metadata_config;
+};
+
+struct dvs2_bq_resolution {
+	int width_bq;         /* width [BQ] */
+	int height_bq;        /* height [BQ] */
+};
+
+struct atomisp_dvs2_bq_resolutions {
+	/* GDC source image size [BQ] */
+	struct dvs2_bq_resolution source_bq;
+	/* GDC output image size [BQ] */
+	struct dvs2_bq_resolution output_bq;
+	/* GDC effective envelope size [BQ] */
+	struct dvs2_bq_resolution envelope_bq;
+	/* isp pipe filter size [BQ] */
+	struct dvs2_bq_resolution ispfilter_bq;
+	/* GDC shit size [BQ] */
+	struct dvs2_bq_resolution gdc_shift_bq;
+};
+
+struct atomisp_dvs_6axis_config {
+	uint32_t exp_id;
+	uint32_t width_y;
+	uint32_t height_y;
+	uint32_t width_uv;
+	uint32_t height_uv;
+	uint32_t *xcoords_y;
+	uint32_t *ycoords_y;
+	uint32_t *xcoords_uv;
+	uint32_t *ycoords_uv;
+};
+
+struct atomisp_parameters {
+	struct atomisp_wb_config   *wb_config;  /* White Balance config */
+	struct atomisp_cc_config   *cc_config;  /* Color Correction config */
+	struct atomisp_tnr_config  *tnr_config; /* Temporal Noise Reduction */
+	struct atomisp_ecd_config  *ecd_config; /* Eigen Color Demosaicing */
+	struct atomisp_ynr_config  *ynr_config; /* Y(Luma) Noise Reduction */
+	struct atomisp_fc_config   *fc_config;  /* Fringe Control */
+	struct atomisp_cnr_config  *cnr_config; /* Chroma Noise Reduction */
+	struct atomisp_macc_config *macc_config;  /* MACC */
+	struct atomisp_ctc_config  *ctc_config; /* Chroma Tone Control */
+	struct atomisp_aa_config   *aa_config;  /* Anti-Aliasing */
+	struct atomisp_aa_config   *baa_config;  /* Anti-Aliasing */
+	struct atomisp_ce_config   *ce_config;
+	struct atomisp_dvs_6axis_config *dvs_6axis_config;
+	struct atomisp_ob_config   *ob_config;  /* Objective Black config */
+	struct atomisp_dp_config   *dp_config;  /* Dead Pixel config */
+	struct atomisp_nr_config   *nr_config;  /* Noise Reduction config */
+	struct atomisp_ee_config   *ee_config;  /* Edge Enhancement config */
+	struct atomisp_de_config   *de_config;  /* Demosaic config */
+	struct atomisp_gc_config   *gc_config;  /* Gamma Correction config */
+	struct atomisp_anr_config  *anr_config; /* Advanced Noise Reduction */
+	struct atomisp_3a_config   *a3a_config; /* 3A Statistics config */
+	struct atomisp_xnr_config  *xnr_config; /* eXtra Noise Reduction */
+	struct atomisp_dz_config   *dz_config;  /* Digital Zoom */
+	struct atomisp_cc_config *yuv2rgb_cc_config; /* Color
+							Correction config */
+	struct atomisp_cc_config *rgb2yuv_cc_config; /* Color
+							Correction config */
+	struct atomisp_macc_table  *macc_table;
+	struct atomisp_gamma_table *gamma_table;
+	struct atomisp_ctc_table   *ctc_table;
+	struct atomisp_xnr_table   *xnr_table;
+	struct atomisp_rgb_gamma_table *r_gamma_table;
+	struct atomisp_rgb_gamma_table *g_gamma_table;
+	struct atomisp_rgb_gamma_table *b_gamma_table;
+	struct atomisp_vector      *motion_vector; /* For 2-axis DVS */
+	struct atomisp_shading_table *shading_table;
+	struct atomisp_morph_table   *morph_table;
+	struct atomisp_dvs_coefficients *dvs_coefs; /* DVS 1.0 coefficients */
+	struct atomisp_dvs2_coefficients *dvs2_coefs; /* DVS 2.0 coefficients */
+	struct atomisp_capture_config   *capture_config;
+	struct atomisp_anr_thres   *anr_thres;
+};
+
+#define ATOMISP_GAMMA_TABLE_SIZE        1024
+struct atomisp_gamma_table {
+	unsigned short data[ATOMISP_GAMMA_TABLE_SIZE];
+};
+
+/* Morphing table for advanced ISP.
+ * Each line of width elements takes up COORD_TABLE_EXT_WIDTH elements
+ * in memory.
+ */
+#define ATOMISP_MORPH_TABLE_NUM_PLANES  6
+struct atomisp_morph_table {
+	unsigned int enabled;
+
+	unsigned int height;
+	unsigned int width;	/* number of valid elements per line */
+	unsigned short __user *coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
+	unsigned short __user *coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
+};
+
+#define ATOMISP_NUM_SC_COLORS	4
+#define ATOMISP_SC_FLAG_QUERY	(1 << 0)
+
+struct atomisp_shading_table {
+	__u32 enable;
+
+	__u32 sensor_width;
+	__u32 sensor_height;
+	__u32 width;
+	__u32 height;
+	__u32 fraction_bits;
+
+	__u16 *data[ATOMISP_NUM_SC_COLORS];
+};
+
+struct atomisp_makernote_info {
+	/* bits 31-16: numerator, bits 15-0: denominator */
+	unsigned int focal_length;
+	/* bits 31-16: numerator, bits 15-0: denominator*/
+	unsigned int f_number_curr;
+	/*
+	* bits 31-24: max f-number numerator
+	* bits 23-16: max f-number denominator
+	* bits 15-8: min f-number numerator
+	* bits 7-0: min f-number denominator
+	*/
+	unsigned int f_number_range;
+};
+
+/* parameter for MACC */
+#define ATOMISP_NUM_MACC_AXES           16
+struct atomisp_macc_table {
+	short data[4 * ATOMISP_NUM_MACC_AXES];
+};
+
+struct atomisp_macc_config {
+	int color_effect;
+	struct atomisp_macc_table table;
+};
+
+/* Parameter for ctc parameter control */
+#define ATOMISP_CTC_TABLE_SIZE          1024
+struct atomisp_ctc_table {
+	unsigned short data[ATOMISP_CTC_TABLE_SIZE];
+};
+
+/* Parameter for overlay image loading */
+struct atomisp_overlay {
+	/* the frame containing the overlay data The overlay frame width should
+	 * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
+	 * should be the multiples of 2.
+	 */
+	struct v4l2_framebuffer *frame;
+	/* Y value of overlay background */
+	unsigned char bg_y;
+	/* U value of overlay background */
+	char bg_u;
+	/* V value of overlay background */
+	char bg_v;
+	/* the blending percent of input data for Y subpixels */
+	unsigned char blend_input_perc_y;
+	/* the blending percent of input data for U subpixels */
+	unsigned char blend_input_perc_u;
+	/* the blending percent of input data for V subpixels */
+	unsigned char blend_input_perc_v;
+	/* the blending percent of overlay data for Y subpixels */
+	unsigned char blend_overlay_perc_y;
+	/* the blending percent of overlay data for U subpixels */
+	unsigned char blend_overlay_perc_u;
+	/* the blending percent of overlay data for V subpixels */
+	unsigned char blend_overlay_perc_v;
+	/* the overlay start x pixel position on output frame It should be the
+	   multiples of 2*ISP_VEC_NELEMS. */
+	unsigned int overlay_start_x;
+	/* the overlay start y pixel position on output frame It should be the
+	   multiples of 2. */
+	unsigned int overlay_start_y;
+};
+
+/* Sensor resolution specific data for AE calculation.*/
+struct atomisp_sensor_mode_data {
+	unsigned int coarse_integration_time_min;
+	unsigned int coarse_integration_time_max_margin;
+	unsigned int fine_integration_time_min;
+	unsigned int fine_integration_time_max_margin;
+	unsigned int fine_integration_time_def;
+	unsigned int frame_length_lines;
+	unsigned int line_length_pck;
+	unsigned int read_mode;
+	unsigned int vt_pix_clk_freq_mhz;
+	unsigned int crop_horizontal_start; /* Sensor crop start cord. (x0,y0)*/
+	unsigned int crop_vertical_start;
+	unsigned int crop_horizontal_end; /* Sensor crop end cord. (x1,y1)*/
+	unsigned int crop_vertical_end;
+	unsigned int output_width; /* input size to ISP after binning/scaling */
+	unsigned int output_height;
+	uint8_t binning_factor_x; /* horizontal binning factor used */
+	uint8_t binning_factor_y; /* vertical binning factor used */
+	uint8_t reserved[2];
+};
+
+struct atomisp_exposure {
+	unsigned int integration_time[8];
+	unsigned int shutter_speed[8];
+	unsigned int gain[4];
+	unsigned int aperture;
+};
+
+/* For texture streaming. */
+struct atomisp_bc_video_package {
+	int ioctl_cmd;
+	int device_id;
+	int inputparam;
+	int outputparam;
+};
+
+enum atomisp_focus_hp {
+	ATOMISP_FOCUS_HP_IN_PROGRESS = (1U << 2),
+	ATOMISP_FOCUS_HP_COMPLETE    = (2U << 2),
+	ATOMISP_FOCUS_HP_FAILED      = (3U << 2)
+};
+
+/* Masks */
+#define ATOMISP_FOCUS_STATUS_MOVING           (1U << 0)
+#define ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE (1U << 1)
+#define ATOMISP_FOCUS_STATUS_HOME_POSITION    (3U << 2)
+
+enum atomisp_camera_port {
+	ATOMISP_CAMERA_PORT_SECONDARY,
+	ATOMISP_CAMERA_PORT_PRIMARY,
+	ATOMISP_CAMERA_PORT_TERTIARY,
+	ATOMISP_CAMERA_NR_PORTS
+};
+
+/* Flash modes. Default is off.
+ * Setting a flash to TORCH or INDICATOR mode will automatically
+ * turn it on. Setting it to FLASH mode will not turn on the flash
+ * until the FLASH_STROBE command is sent. */
+enum atomisp_flash_mode {
+	ATOMISP_FLASH_MODE_OFF,
+	ATOMISP_FLASH_MODE_FLASH,
+	ATOMISP_FLASH_MODE_TORCH,
+	ATOMISP_FLASH_MODE_INDICATOR,
+};
+
+/* Flash statuses, used by atomisp driver to check before starting
+ * flash and after having started flash. */
+enum atomisp_flash_status {
+	ATOMISP_FLASH_STATUS_OK,
+	ATOMISP_FLASH_STATUS_HW_ERROR,
+	ATOMISP_FLASH_STATUS_INTERRUPTED,
+	ATOMISP_FLASH_STATUS_TIMEOUT,
+};
+
+/* Frame status. This is used to detect corrupted frames and flash
+ * exposed frames. Usually, the first 2 frames coming out of the sensor
+ * are corrupted. When using flash, the frame before and the frame after
+ * the flash exposed frame may be partially exposed by flash. The ISP
+ * statistics for these frames should not be used by the 3A library.
+ * The frame status value can be found in the "reserved" field in the
+ * v4l2_buffer struct. */
+enum atomisp_frame_status {
+	ATOMISP_FRAME_STATUS_OK,
+	ATOMISP_FRAME_STATUS_CORRUPTED,
+	ATOMISP_FRAME_STATUS_FLASH_EXPOSED,
+	ATOMISP_FRAME_STATUS_FLASH_PARTIAL,
+	ATOMISP_FRAME_STATUS_FLASH_FAILED,
+};
+
+enum atomisp_acc_type {
+	ATOMISP_ACC_STANDALONE,	/* Stand-alone acceleration */
+	ATOMISP_ACC_OUTPUT,	/* Accelerator stage on output frame */
+	ATOMISP_ACC_VIEWFINDER	/* Accelerator stage on viewfinder frame */
+};
+
+enum atomisp_acc_arg_type {
+	ATOMISP_ACC_ARG_SCALAR_IN,    /* Scalar input argument */
+	ATOMISP_ACC_ARG_SCALAR_OUT,   /* Scalar output argument */
+	ATOMISP_ACC_ARG_SCALAR_IO,    /* Scalar in/output argument */
+	ATOMISP_ACC_ARG_PTR_IN,	     /* Pointer input argument */
+	ATOMISP_ACC_ARG_PTR_OUT,	     /* Pointer output argument */
+	ATOMISP_ACC_ARG_PTR_IO,	     /* Pointer in/output argument */
+	ATOMISP_ARG_PTR_NOFLUSH,  /* Pointer argument will not be flushed */
+	ATOMISP_ARG_PTR_STABLE,   /* Pointer input argument that is stable */
+	ATOMISP_ACC_ARG_FRAME	     /* Frame argument */
+};
+
+/** ISP memories, isp2400 */
+enum atomisp_acc_memory {
+	ATOMISP_ACC_MEMORY_PMEM0 = 0,
+	ATOMISP_ACC_MEMORY_DMEM0,
+	/* for backward compatibility */
+	ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0,
+	ATOMISP_ACC_MEMORY_VMEM0,
+	ATOMISP_ACC_MEMORY_VAMEM0,
+	ATOMISP_ACC_MEMORY_VAMEM1,
+	ATOMISP_ACC_MEMORY_VAMEM2,
+	ATOMISP_ACC_MEMORY_HMEM0,
+	ATOMISP_ACC_NR_MEMORY
+};
+
+enum atomisp_ext_isp_id {
+	EXT_ISP_CID_ISO = 0,
+	EXT_ISP_CID_CAPTURE_HDR,
+	EXT_ISP_CID_CAPTURE_LLS,
+	EXT_ISP_CID_FOCUS_MODE,
+	EXT_ISP_CID_FOCUS_EXECUTION,
+	EXT_ISP_CID_TOUCH_POSX,
+	EXT_ISP_CID_TOUCH_POSY,
+	EXT_ISP_CID_CAF_STATUS,
+	EXT_ISP_CID_AF_STATUS,
+	EXT_ISP_CID_GET_AF_MODE,
+	EXT_ISP_CID_CAPTURE_BURST,
+	EXT_ISP_CID_FLASH_MODE,
+	EXT_ISP_CID_ZOOM,
+	EXT_ISP_CID_SHOT_MODE
+};
+
+#define EXT_ISP_FOCUS_MODE_NORMAL	0
+#define EXT_ISP_FOCUS_MODE_MACRO	1
+#define EXT_ISP_FOCUS_MODE_TOUCH_AF	2
+#define EXT_ISP_FOCUS_MODE_PREVIEW_CAF	3
+#define EXT_ISP_FOCUS_MODE_MOVIE_CAF	4
+#define EXT_ISP_FOCUS_MODE_FACE_CAF	5
+#define EXT_ISP_FOCUS_MODE_TOUCH_MACRO	6
+#define EXT_ISP_FOCUS_MODE_TOUCH_CAF	7
+
+#define EXT_ISP_FOCUS_STOP		0
+#define EXT_ISP_FOCUS_SEARCH		1
+#define EXT_ISP_PAN_FOCUSING		2
+
+#define EXT_ISP_CAF_RESTART_CHECK	1
+#define EXT_ISP_CAF_STATUS_FOCUSING	2
+#define EXT_ISP_CAF_STATUS_SUCCESS	3
+#define EXT_ISP_CAF_STATUS_FAIL         4
+
+#define EXT_ISP_AF_STATUS_INVALID	1
+#define EXT_ISP_AF_STATUS_FOCUSING	2
+#define EXT_ISP_AF_STATUS_SUCCESS	3
+#define EXT_ISP_AF_STATUS_FAIL		4
+
+enum atomisp_burst_capture_options {
+	EXT_ISP_BURST_CAPTURE_CTRL_START = 0,
+	EXT_ISP_BURST_CAPTURE_CTRL_STOP
+};
+
+#define EXT_ISP_FLASH_MODE_OFF		0
+#define EXT_ISP_FLASH_MODE_ON		1
+#define EXT_ISP_FLASH_MODE_AUTO		2
+#define EXT_ISP_LED_TORCH_OFF		3
+#define EXT_ISP_LED_TORCH_ON		4
+
+#define EXT_ISP_SHOT_MODE_AUTO		0
+#define EXT_ISP_SHOT_MODE_BEAUTY_FACE	1
+#define EXT_ISP_SHOT_MODE_BEST_PHOTO	2
+#define EXT_ISP_SHOT_MODE_DRAMA		3
+#define EXT_ISP_SHOT_MODE_BEST_FACE	4
+#define EXT_ISP_SHOT_MODE_ERASER	5
+#define EXT_ISP_SHOT_MODE_PANORAMA	6
+#define EXT_ISP_SHOT_MODE_RICH_TONE_HDR	7
+#define EXT_ISP_SHOT_MODE_NIGHT		8
+#define EXT_ISP_SHOT_MODE_SOUND_SHOT	9
+#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO	10
+#define EXT_ISP_SHOT_MODE_SPORTS	11
+
+struct atomisp_sp_arg {
+	enum atomisp_acc_arg_type type;	/* Type  of SP argument */
+	void                    *value;	/* Value of SP argument */
+	unsigned int             size;	/* Size  of SP argument */
+};
+
+/* Acceleration API */
+
+/* For CSS 1.0 only */
+struct atomisp_acc_fw_arg {
+	unsigned int fw_handle;
+	unsigned int index;
+	void __user *value;
+	size_t size;
+};
+
+/*
+ * Set arguments after first mapping with ATOMISP_IOC_ACC_S_MAPPED_ARG.
+ */
+struct atomisp_acc_s_mapped_arg {
+	unsigned int fw_handle;
+	__u32 memory;			/* one of enum atomisp_acc_memory */
+	size_t length;
+	unsigned long css_ptr;
+};
+
+struct atomisp_acc_fw_abort {
+	unsigned int fw_handle;
+	/* Timeout in us */
+	unsigned int timeout;
+};
+
+struct atomisp_acc_fw_load {
+	unsigned int size;
+	unsigned int fw_handle;
+	void __user *data;
+};
+
+/*
+ * Load firmware to specified pipeline.
+ */
+struct atomisp_acc_fw_load_to_pipe {
+	__u32 flags;			/* Flags, see below for valid values */
+	unsigned int fw_handle;		/* Handle, filled by kernel. */
+	__u32 size;			/* Firmware binary size */
+	void __user *data;		/* Pointer to firmware */
+	__u32 type;			/* Binary type */
+	__u32 reserved[3];		/* Set to zero */
+};
+
+#define ATOMISP_ACC_FW_LOAD_FL_PREVIEW		(1 << 0)
+#define ATOMISP_ACC_FW_LOAD_FL_COPY		(1 << 1)
+#define ATOMISP_ACC_FW_LOAD_FL_VIDEO		(1 << 2)
+#define ATOMISP_ACC_FW_LOAD_FL_CAPTURE		(1 << 3)
+#define ATOMISP_ACC_FW_LOAD_FL_ACC		(1 << 4)
+
+#define ATOMISP_ACC_FW_LOAD_TYPE_NONE		0 /* Normal binary: don't use */
+#define ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT		1 /* Stage on output */
+#define ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER	2 /* Stage on viewfinder */
+#define ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE	3 /* Stand-alone acceleration */
+
+struct atomisp_acc_map {
+	__u32 flags;			/* Flags, see list below */
+	__u32 length;			/* Length of data in bytes */
+	void __user *user_ptr;		/* Pointer into user space */
+	unsigned long css_ptr;		/* Pointer into CSS address space */
+	__u32 reserved[4];		/* Set to zero */
+};
+
+#define ATOMISP_MAP_FLAG_NOFLUSH	0x0001	/* Do not flush cache */
+
+/*
+ * V4L2 private internal data interface.
+ * -----------------------------------------------------------------------------
+ * struct v4l2_private_int_data - request private data stored in video device
+ * internal memory.
+ * @size: sanity check to ensure userspace's buffer fits whole private data.
+ *	  If not, kernel will make partial copy (or nothing if @size == 0).
+ *	  @size is always corrected for the minimum necessary if IOCTL returns
+ *	  no error.
+ * @data: pointer to userspace buffer.
+ */
+struct v4l2_private_int_data {
+	__u32 size;
+	void __user *data;
+	__u32 reserved[2];
+};
+
+/*Private IOCTLs for ISP */
+#define ATOMISP_IOC_G_XNR \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_S_XNR \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_G_NR \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_S_NR \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_G_TNR \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_S_TNR \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_G_HISTOGRAM \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_S_HISTOGRAM \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_G_BLACK_LEVEL_COMP \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_S_BLACK_LEVEL_COMP \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_G_EE \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+#define ATOMISP_IOC_S_EE \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+/* Digital Image Stabilization:
+ * 1. get dis statistics: reads DIS statistics from ISP (every frame)
+ * 2. set dis coefficients: set DIS filter coefficients (one time)
+ * 3. set dis motion vecotr: set motion vector (result of DIS, every frame)
+ */
+#define ATOMISP_IOC_G_DIS_STAT \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics)
+
+#define ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs2_bq_resolutions)
+
+#define ATOMISP_IOC_S_DIS_COEFS \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients)
+
+#define ATOMISP_IOC_S_DIS_VECTOR \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config)
+
+#define ATOMISP_IOC_G_3A_STAT \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics)
+#define ATOMISP_IOC_G_ISP_PARM \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_S_ISP_PARM \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_G_ISP_GAMMA \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_S_ISP_GAMMA \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_G_ISP_GDC_TAB \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_S_ISP_GDC_TAB \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_ISP_MAKERNOTE \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 11, struct atomisp_makernote_info)
+
+/* macc parameter control*/
+#define ATOMISP_IOC_G_ISP_MACC \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+#define ATOMISP_IOC_S_ISP_MACC \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+
+/* Defect pixel detection & Correction */
+#define ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+#define ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+
+/* False Color Correction */
+#define ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+#define ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+
+/* ctc parameter control */
+#define ATOMISP_IOC_G_ISP_CTC \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+#define ATOMISP_IOC_S_ISP_CTC \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_ISP_WHITE_BALANCE \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+#define ATOMISP_IOC_S_ISP_WHITE_BALANCE \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+
+/* fpn table loading */
+#define ATOMISP_IOC_S_ISP_FPN_TABLE \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer)
+
+/* overlay image loading */
+#define ATOMISP_IOC_G_ISP_OVERLAY \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+#define ATOMISP_IOC_S_ISP_OVERLAY \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+
+/* bcd driver bridge */
+#define ATOMISP_IOC_CAMERA_BRIDGE \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 19, struct atomisp_bc_video_package)
+
+/* Sensor resolution specific info for AE */
+#define ATOMISP_IOC_G_SENSOR_MODE_DATA \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 20, struct atomisp_sensor_mode_data)
+
+#define ATOMISP_IOC_S_EXPOSURE \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 21, struct atomisp_exposure)
+
+/* sensor calibration registers group */
+#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_3A_CONFIG \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+#define ATOMISP_IOC_S_3A_CONFIG \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+
+/* Accelerate ioctls */
+#define ATOMISP_IOC_ACC_LOAD \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load)
+
+#define ATOMISP_IOC_ACC_UNLOAD \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
+
+/* For CSS 1.0 only */
+#define ATOMISP_IOC_ACC_S_ARG \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg)
+
+#define ATOMISP_IOC_ACC_START \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
+
+#define ATOMISP_IOC_ACC_WAIT \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 25, unsigned int)
+
+#define ATOMISP_IOC_ACC_ABORT \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_abort)
+
+#define ATOMISP_IOC_ACC_DESTAB \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg)
+
+/* sensor OTP memory read */
+#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data)
+
+/* LCS (shading) table write */
+#define ATOMISP_IOC_S_ISP_SHD_TAB \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table)
+
+/* Gamma Correction */
+#define ATOMISP_IOC_G_ISP_GAMMA_CORRECTION \
+	_IOR('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+/* motor internal memory read */
+#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data)
+
+/*
+ * Ioctls to map and unmap user buffers to CSS address space for acceleration.
+ * User fills fields length and user_ptr and sets other fields to zero,
+ * kernel may modify the flags and sets css_ptr.
+ */
+#define ATOMISP_IOC_ACC_MAP \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
+
+/* User fills fields length, user_ptr, and css_ptr and zeroes other fields. */
+#define ATOMISP_IOC_ACC_UNMAP \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
+
+#define ATOMISP_IOC_ACC_S_MAPPED_ARG \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg)
+
+#define ATOMISP_IOC_ACC_LOAD_TO_PIPE \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe)
+
+#define ATOMISP_IOC_S_PARAMETERS \
+	_IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters)
+
+#define ATOMISP_IOC_S_CONT_CAPTURE_CONFIG \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 33, struct atomisp_cont_capture_conf)
+
+#define ATOMISP_IOC_G_METADATA \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata)
+
+#define ATOMISP_IOC_EXT_ISP_CTRL \
+	_IOWR('v', BASE_VIDIOC_PRIVATE + 35, struct atomisp_ext_isp_ctrl)
+
+/*
+ * Reserved ioctls. We have customer implementing it internally.
+ * We can't use both numbers to not cause ABI conflict.
+ * Anyway, those ioctls are hacks and not implemented by us:
+ *
+ * #define ATOMISP_IOC_G_SENSOR_REG \
+ *	_IOW('v', BASE_VIDIOC_PRIVATE + 55, struct atomisp_sensor_regs)
+ * #define ATOMISP_IOC_S_SENSOR_REG \
+ *	_IOW('v', BASE_VIDIOC_PRIVATE + 56, struct atomisp_sensor_regs)
+ */
+
+/*  ISP Private control IDs */
+#define V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION \
+	(V4L2_CID_PRIVATE_BASE + 0)
+#define V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC \
+	(V4L2_CID_PRIVATE_BASE + 1)
+#define V4L2_CID_ATOMISP_VIDEO_STABLIZATION \
+	(V4L2_CID_PRIVATE_BASE + 2)
+#define V4L2_CID_ATOMISP_FIXED_PATTERN_NR \
+	(V4L2_CID_PRIVATE_BASE + 3)
+#define V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION \
+	(V4L2_CID_PRIVATE_BASE + 4)
+#define V4L2_CID_ATOMISP_LOW_LIGHT \
+	(V4L2_CID_PRIVATE_BASE + 5)
+
+/* Camera class:
+ * Exposure, Flash and privacy (indicator) light controls, to be upstreamed */
+#define V4L2_CID_CAMERA_LASTP1             (V4L2_CID_CAMERA_CLASS_BASE + 1024)
+
+#define V4L2_CID_FOCAL_ABSOLUTE            (V4L2_CID_CAMERA_LASTP1 + 0)
+#define V4L2_CID_FNUMBER_ABSOLUTE          (V4L2_CID_CAMERA_LASTP1 + 1)
+#define V4L2_CID_FNUMBER_RANGE             (V4L2_CID_CAMERA_LASTP1 + 2)
+
+/* Flash related CIDs, see also:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/extended-controls.html\
+ * #flash-controls */
+
+/* Request a number of flash-exposed frames. The frame status can be
+ * found in the reserved field in the v4l2_buffer struct. */
+#define V4L2_CID_REQUEST_FLASH             (V4L2_CID_CAMERA_LASTP1 + 3)
+/* Query flash driver status. See enum atomisp_flash_status above. */
+#define V4L2_CID_FLASH_STATUS              (V4L2_CID_CAMERA_LASTP1 + 5)
+/* Set the flash mode (see enum atomisp_flash_mode) */
+#define V4L2_CID_FLASH_MODE                (V4L2_CID_CAMERA_LASTP1 + 10)
+
+/* VCM slew control */
+#define V4L2_CID_VCM_SLEW                  (V4L2_CID_CAMERA_LASTP1 + 11)
+/* VCM step time */
+#define V4L2_CID_VCM_TIMEING               (V4L2_CID_CAMERA_LASTP1 + 12)
+
+/* Query Focus Status */
+#define V4L2_CID_FOCUS_STATUS              (V4L2_CID_CAMERA_LASTP1 + 14)
+
+/* Query sensor's binning factor */
+#define V4L2_CID_BIN_FACTOR_HORZ	   (V4L2_CID_CAMERA_LASTP1 + 15)
+#define V4L2_CID_BIN_FACTOR_VERT	   (V4L2_CID_CAMERA_LASTP1 + 16)
+
+/* number of frames to skip at stream start */
+#define V4L2_CID_G_SKIP_FRAMES		   (V4L2_CID_CAMERA_LASTP1 + 17)
+
+/* Query sensor's 2A status */
+#define V4L2_CID_2A_STATUS                 (V4L2_CID_CAMERA_LASTP1 + 18)
+#define V4L2_2A_STATUS_AE_READY            (1 << 0)
+#define V4L2_2A_STATUS_AWB_READY           (1 << 1)
+
+#define V4L2_CID_FMT_AUTO			(V4L2_CID_CAMERA_LASTP1 + 19)
+
+#define V4L2_CID_RUN_MODE			(V4L2_CID_CAMERA_LASTP1 + 20)
+#define ATOMISP_RUN_MODE_VIDEO			1
+#define ATOMISP_RUN_MODE_STILL_CAPTURE		2
+#define ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE	3
+#define ATOMISP_RUN_MODE_PREVIEW		4
+#define ATOMISP_RUN_MODE_SDV			5
+
+#define V4L2_CID_ENABLE_VFPP			(V4L2_CID_CAMERA_LASTP1 + 21)
+#define V4L2_CID_ATOMISP_CONTINUOUS_MODE	(V4L2_CID_CAMERA_LASTP1 + 22)
+#define V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE \
+						(V4L2_CID_CAMERA_LASTP1 + 23)
+#define V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER \
+						(V4L2_CID_CAMERA_LASTP1 + 24)
+
+#define V4L2_CID_VFPP				(V4L2_CID_CAMERA_LASTP1 + 25)
+#define ATOMISP_VFPP_ENABLE			0
+#define ATOMISP_VFPP_DISABLE_SCALER		1
+#define ATOMISP_VFPP_DISABLE_LOWLAT		2
+
+/* Query real flash status register value */
+#define V4L2_CID_FLASH_STATUS_REGISTER  (V4L2_CID_CAMERA_LASTP1 + 26)
+
+#define V4L2_CID_START_ZSL_CAPTURE	(V4L2_CID_CAMERA_LASTP1 + 27)
+
+#define V4L2_BUF_FLAG_BUFFER_INVALID       0x0400
+#define V4L2_BUF_FLAG_BUFFER_VALID         0x0800
+
+#define V4L2_BUF_TYPE_VIDEO_CAPTURE_ION  (V4L2_BUF_TYPE_PRIVATE + 1024)
+
+#define V4L2_EVENT_ATOMISP_3A_STATS_READY   (V4L2_EVENT_PRIVATE_START + 1)
+#define V4L2_EVENT_ATOMISP_METADATA_READY   (V4L2_EVENT_PRIVATE_START + 2)
+
+/* Nonstandard color effects for V4L2_CID_COLORFX */
+enum {
+	V4L2_COLORFX_SKIN_WHITEN_LOW = 1001,
+	V4L2_COLORFX_SKIN_WHITEN_HIGH = 1002,
+	V4L2_COLORFX_WARM = 1003,
+	V4L2_COLORFX_COLD = 1004,
+	V4L2_COLORFX_WASHED = 1005,
+	V4L2_COLORFX_RED = 1006,
+	V4L2_COLORFX_GREEN = 1007,
+	V4L2_COLORFX_BLUE = 1008,
+	V4L2_COLORFX_PINK = 1009,
+	V4L2_COLORFX_YELLOW = 1010,
+	V4L2_COLORFX_PURPLE = 1011,
+};
+
+#endif /* _ATOM_ISP_H */
diff --git a/include/linux/atomisp_platform.h b/include/linux/atomisp_platform.h
new file mode 100644
index 0000000..720901d
--- /dev/null
+++ b/include/linux/atomisp_platform.h
@@ -0,0 +1,217 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef ATOMISP_PLATFORM_H_
+#define ATOMISP_PLATFORM_H_
+
+#include <linux/i2c.h>
+#include <linux/sfi.h>
+#include <media/v4l2-subdev.h>
+#include "atomisp.h"
+
+#define MAX_SENSORS_PER_PORT 4
+#define MAX_STREAMS_PER_CHANNEL 2
+
+enum atomisp_bayer_order {
+	atomisp_bayer_order_grbg,
+	atomisp_bayer_order_rggb,
+	atomisp_bayer_order_bggr,
+	atomisp_bayer_order_gbrg
+};
+
+enum atomisp_input_stream_id {
+	ATOMISP_INPUT_STREAM_GENERAL = 0,
+	ATOMISP_INPUT_STREAM_CAPTURE = 0,
+	ATOMISP_INPUT_STREAM_POSTVIEW,
+	ATOMISP_INPUT_STREAM_PREVIEW,
+	ATOMISP_INPUT_STREAM_VIDEO,
+	ATOMISP_INPUT_STREAM_NUM
+};
+
+enum atomisp_input_format {
+	ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY,/* 8 bits per subpixel (legacy) */
+	ATOMISP_INPUT_FORMAT_YUV420_8, /* 8 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_YUV420_10,/* 10 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_YUV420_16,/* 16 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_YUV422_8, /* UYVY..UVYV, 8 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_YUV422_10,/* UYVY..UVYV, 10 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_YUV422_16,/* UYVY..UVYV, 16 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_RGB_444,  /* BGR..BGR, 4 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_RGB_555,  /* BGR..BGR, 5 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_RGB_565,  /* BGR..BGR, 5 bits B and R, 6 bits G */
+	ATOMISP_INPUT_FORMAT_RGB_666,  /* BGR..BGR, 6 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_RGB_888,  /* BGR..BGR, 8 bits per subpixel */
+	ATOMISP_INPUT_FORMAT_RAW_6,    /* RAW data, 6 bits per pixel */
+	ATOMISP_INPUT_FORMAT_RAW_7,    /* RAW data, 7 bits per pixel */
+	ATOMISP_INPUT_FORMAT_RAW_8,    /* RAW data, 8 bits per pixel */
+	ATOMISP_INPUT_FORMAT_RAW_10,   /* RAW data, 10 bits per pixel */
+	ATOMISP_INPUT_FORMAT_RAW_12,   /* RAW data, 12 bits per pixel */
+	ATOMISP_INPUT_FORMAT_RAW_14,   /* RAW data, 14 bits per pixel */
+	ATOMISP_INPUT_FORMAT_RAW_16,   /* RAW data, 16 bits per pixel */
+	ATOMISP_INPUT_FORMAT_BINARY_8, /* Binary byte stream. */
+
+	/* CSI2-MIPI specific format: Generic short packet data. It is used to
+	 * keep the timing information for the opening/closing of shutters,
+	 * triggering of flashes and etc.
+	 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT1,  /* Generic Short Packet Code 1 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT2,  /* Generic Short Packet Code 2 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT3,  /* Generic Short Packet Code 3 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT4,  /* Generic Short Packet Code 4 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT5,  /* Generic Short Packet Code 5 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT6,  /* Generic Short Packet Code 6 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT7,  /* Generic Short Packet Code 7 */
+	ATOMISP_INPUT_FORMAT_GENERIC_SHORT8,  /* Generic Short Packet Code 8 */
+
+	/* CSI2-MIPI specific format: YUV data.
+	 */
+	ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT,  /* YUV420 8-bit (Chroma Shifted
+						 Pixel Sampling) */
+	ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT, /* YUV420 8-bit (Chroma Shifted
+						 Pixel Sampling) */
+
+	/* CSI2-MIPI specific format: Generic long packet data
+	 */
+	ATOMISP_INPUT_FORMAT_EMBEDDED, /* Embedded 8-bit non Image Data */
+
+	/* CSI2-MIPI specific format: User defined byte-based data. For example,
+	 * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
+	 * the User Defined Data Type 4 and the MPEG data as the
+	 * User Defined Data Type 7.
+	 */
+	ATOMISP_INPUT_FORMAT_USER_DEF1,  /* User defined 8-bit data type 1 */
+	ATOMISP_INPUT_FORMAT_USER_DEF2,  /* User defined 8-bit data type 2 */
+	ATOMISP_INPUT_FORMAT_USER_DEF3,  /* User defined 8-bit data type 3 */
+	ATOMISP_INPUT_FORMAT_USER_DEF4,  /* User defined 8-bit data type 4 */
+	ATOMISP_INPUT_FORMAT_USER_DEF5,  /* User defined 8-bit data type 5 */
+	ATOMISP_INPUT_FORMAT_USER_DEF6,  /* User defined 8-bit data type 6 */
+	ATOMISP_INPUT_FORMAT_USER_DEF7,  /* User defined 8-bit data type 7 */
+	ATOMISP_INPUT_FORMAT_USER_DEF8,  /* User defined 8-bit data type 8 */
+};
+
+enum intel_v4l2_subdev_type {
+	RAW_CAMERA = 1,
+	SOC_CAMERA = 2,
+	CAMERA_MOTOR = 3,
+	LED_FLASH = 4,
+	XENON_FLASH = 5,
+	FILE_INPUT = 6,
+	TEST_PATTERN = 7,
+};
+
+struct intel_v4l2_subdev_id {
+	char name[17];
+	enum intel_v4l2_subdev_type type;
+	enum atomisp_camera_port    port;
+};
+
+struct intel_v4l2_subdev_i2c_board_info {
+	struct i2c_board_info board_info;
+	int i2c_adapter_id;
+};
+
+struct intel_v4l2_subdev_table {
+	struct intel_v4l2_subdev_i2c_board_info v4l2_subdev;
+	enum intel_v4l2_subdev_type type;
+	enum atomisp_camera_port port;
+};
+
+struct atomisp_platform_data {
+	struct intel_v4l2_subdev_table *subdevs;
+	const struct soft_platform_id *spid;
+};
+
+/* Describe the capacities of one single sensor. */
+struct atomisp_sensor_caps {
+	/* The number of streams this sensor can output. */
+	int stream_num;
+};
+
+/* Describe the capacities of sensors connected to one camera port. */
+struct atomisp_camera_caps {
+	/* The number of sensors connected to this camera port. */
+	int sensor_num;
+	/* The capacities of each sensor. */
+	struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT];
+};
+
+/*
+ *  Sensor of external ISP can send multiple steams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_isys_config_info {
+	u8 input_format;
+	u16 width;
+	u16 height;
+};
+
+struct atomisp_input_stream_info {
+	enum atomisp_input_stream_id stream;
+	u8 enable;
+	/* Sensor driver fills ch_id with the id
+	   of the virtual channel. */
+	u8 ch_id;
+	/* Tells how many streams in this virtual channel. If 0 ignore rest
+	 * and the input format will be from mipi_info */
+	u8 isys_configs;
+	/*
+	 * if more isys_configs is more than 0, sensor needs to configure the
+	 * input format differently. width and height can be 0. If width and
+	 * height is not zero, then the corresponsing data needs to be set
+	 */
+	struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct camera_sensor_platform_data {
+	int (*gpio_ctrl)(struct v4l2_subdev *subdev, int flag);
+	int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
+	int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
+	int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
+	bool (*low_fps)(void);
+	int (*platform_init)(struct i2c_client *);
+	int (*platform_deinit)(void);
+	char *(*msr_file_name)(void);
+	struct atomisp_camera_caps *(*get_camera_caps)(void);
+	int (*gpio_intr_ctrl)(struct v4l2_subdev *subdev);
+};
+
+struct camera_af_platform_data {
+	int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
+};
+
+const struct camera_af_platform_data *camera_get_af_platform_data(void);
+
+struct camera_mipi_info {
+	enum atomisp_camera_port        port;
+	unsigned int                    num_lanes;
+	enum atomisp_input_format       input_format;
+	enum atomisp_bayer_order        raw_bayer_order;
+	struct atomisp_sensor_mode_data data;
+	enum atomisp_input_format       metadata_format;
+	uint32_t                        metadata_width;
+	uint32_t                        metadata_height;
+	const uint32_t                  *metadata_effective_width;
+};
+
+extern const struct atomisp_platform_data *atomisp_get_platform_data(void);
+extern const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
+
+#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index f7f1d71..089743a 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -159,6 +159,26 @@
 }
 
 /*
+ * isolated_balloon_page - identify an isolated balloon page on private
+ *			   compaction/migration page lists.
+ *
+ * After a compaction thread isolates a balloon page for migration, it raises
+ * the page refcount to prevent concurrent compaction threads from re-isolating
+ * the same page. For that reason putback_movable_pages(), or other routines
+ * that need to identify isolated balloon pages on private pagelists, cannot
+ * rely on balloon_page_movable() to accomplish the task.
+ */
+static inline bool isolated_balloon_page(struct page *page)
+{
+	/* Already isolated balloon pages, by default, have a raised refcount */
+	if (page_flags_cleared(page) && !page_mapped(page) &&
+	    page_count(page) >= 2)
+		return __is_movable_balloon_page(page);
+
+	return false;
+}
+
+/*
  * balloon_page_insert - insert a page into the balloon's page list and make
  *		         the page->mapping assignment accordingly.
  * @page    : page to be assigned as a 'balloon page'
@@ -243,6 +263,11 @@
 	return false;
 }
 
+static inline bool isolated_balloon_page(struct page *page)
+{
+	return false;
+}
+
 static inline bool balloon_page_isolate(struct page *page)
 {
 	return false;
diff --git a/include/linux/board_asustek.h b/include/linux/board_asustek.h
new file mode 100644
index 0000000..07125f7
--- /dev/null
+++ b/include/linux/board_asustek.h
@@ -0,0 +1,36 @@
+/* include/linux/board_asustek.h
+ *
+ * Copyright (c) 2012-2014, ASUSTek Computer Inc.
+ * Author: Paris Yeh <paris_yeh@asus.com>
+ *	   Hank Lee  <hank_lee@asus.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _INTEL_MID_BOARD_ASUSTEK_H
+#define _INTEL_MID_BOARD_ASUSTEK_H
+
+typedef enum {
+	HW_REV_INVALID = -1,
+	HW_REV_A = 0,
+	HW_REV_B = 1,
+	HW_REV_C = 2,
+	HW_REV_D = 3,
+	HW_REV_E = 4,
+	HW_REV_F = 5,
+	HW_REV_G = 6,
+	HW_REV_H = 7,
+	HW_REV_MAX = 8
+} hw_rev;
+
+hw_rev asustek_get_hw_rev(void);
+
+#endif /* _INTEL_MID_BOARD_ASUSTEK_H */
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 379f715..0442c3d 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -160,11 +160,6 @@
 static inline void ceph_encode_timespec(struct ceph_timespec *tv,
 					const struct timespec *ts)
 {
-	BUG_ON(ts->tv_sec < 0);
-	BUG_ON(ts->tv_sec > (__kernel_time_t)U32_MAX);
-	BUG_ON(ts->tv_nsec < 0);
-	BUG_ON(ts->tv_nsec > (long)U32_MAX);
-
 	tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
 	tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
 }
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a2bcbd2..a6fc777 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -647,22 +647,60 @@
 	return cgrp->subsys[subsys_id];
 }
 
-/*
- * function to get the cgroup_subsys_state which allows for extra
- * rcu_dereference_check() conditions, such as locks used during the
- * cgroup_subsys::attach() methods.
+/**
+ * task_css_set_check - obtain a task's css_set with extra access conditions
+ * @task: the task to obtain css_set for
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
+ *
+ * A task's css_set is RCU protected, initialized and exited while holding
+ * task_lock(), and can only be modified while holding both cgroup_mutex
+ * and task_lock() while the task is alive.  This macro verifies that the
+ * caller is inside proper critical section and returns @task's css_set.
+ *
+ * The caller can also specify additional allowed conditions via @__c, such
+ * as locks used during the cgroup_subsys::attach() methods.
  */
 #ifdef CONFIG_PROVE_RCU
 extern struct mutex cgroup_mutex;
-#define task_subsys_state_check(task, subsys_id, __c)			\
-	rcu_dereference_check((task)->cgroups->subsys[(subsys_id)],	\
-			      lockdep_is_held(&(task)->alloc_lock) ||	\
-			      lockdep_is_held(&cgroup_mutex) || (__c))
+#define task_css_set_check(task, __c)					\
+	rcu_dereference_check((task)->cgroups,				\
+		lockdep_is_held(&(task)->alloc_lock) ||			\
+		lockdep_is_held(&cgroup_mutex) || (__c))
 #else
-#define task_subsys_state_check(task, subsys_id, __c)			\
-	rcu_dereference((task)->cgroups->subsys[(subsys_id)])
+#define task_css_set_check(task, __c)					\
+	rcu_dereference((task)->cgroups)
 #endif
 
+/**
+ * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
+ *
+ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
+ * synchronization rules are the same as task_css_set_check().
+ */
+#define task_subsys_state_check(task, subsys_id, __c)			\
+	task_css_set_check((task), (__c))->subsys[(subsys_id)]
+
+/**
+ * task_css_set - obtain a task's css_set
+ * @task: the task to obtain css_set for
+ *
+ * See task_css_set_check().
+ */
+static inline struct css_set *task_css_set(struct task_struct *task)
+{
+	return task_css_set_check(task, false);
+}
+
+/**
+ * task_subsys_state - obtain css for (task, subsys)
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * See task_subsys_state_check().
+ */
 static inline struct cgroup_subsys_state *
 task_subsys_state(struct task_struct *task, int subsys_id)
 {
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7f0c1dd..ec1aee4 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -669,6 +669,13 @@
 
 int compat_restore_altstack(const compat_stack_t __user *uss);
 int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#define compat_save_altstack_ex(uss, sp) do { \
+	compat_stack_t __user *__uss = uss; \
+	struct task_struct *t = current; \
+	put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
+	put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+	put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+} while (0);
 
 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
 						 struct compat_timespec __user *interval);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 842de22..ded4299 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -65,6 +65,21 @@
 #define __visible __attribute__((externally_visible))
 #endif
 
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#if GCC_VERSION <= 40801
+# define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+#else
+# define asm_volatile_goto(x...)	do { asm goto(x); } while (0)
+#endif
 
 #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
 #if GCC_VERSION >= 40400
diff --git a/include/linux/console.h b/include/linux/console.h
index 73bab0f..c9daaef 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -118,6 +118,7 @@
 #define CON_BOOT	(8)
 #define CON_ANYTIME	(16) /* Safe to call when cpu is offline */
 #define CON_BRL		(32) /* Used for a braille device */
+#define CON_IGNORELEVEL	(64) /* Used to ignore log level for a console */
 
 struct console {
 	char	name[16];
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 282e270..a5d52ee 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -41,7 +41,7 @@
  */
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
 
-unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int);
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
 #else /* !CONFIG_CPU_THERMAL */
 static inline struct thermal_cooling_device *
 cpufreq_cooling_register(const struct cpumask *clip_cpus)
@@ -54,7 +54,7 @@
 	return;
 }
 static inline
-unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int)
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
 	return THERMAL_CSTATE_INVALID;
 }
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f04062..364f639 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -114,6 +114,7 @@
 };
 
 #ifdef CONFIG_CPU_IDLE
+DECLARE_PER_CPU(int, update_buckets);
 extern void disable_cpuidle(void);
 extern int cpuidle_idle_call(void);
 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
@@ -139,6 +140,7 @@
 extern void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu);
 
 #else
+DECLARE_PER_CPU(int, update_buckets);
 static inline void disable_cpuidle(void) { }
 static inline int cpuidle_idle_call(void) { return -ENODEV; }
 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3cd3247..ef5bd3f 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -405,13 +405,14 @@
 union map_info *dm_get_mapinfo(struct bio *bio);
 union map_info *dm_get_rq_mapinfo(struct request *rq);
 
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
 /*
  * Geometry functions.
  */
 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 
-
 /*-----------------------------------------------------------------
  * Functions for manipulating device-mapper tables.
  *---------------------------------------------------------------*/
diff --git a/include/linux/device.h b/include/linux/device.h
index c0a1261..45e56a0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -516,6 +516,9 @@
 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
 	struct device_attribute dev_attr_##_name =		\
 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+#define DEVICE_EARLY_SUSPEND_ATTR(_store) \
+	DEVICE_ATTR(early_suspend, S_IWUSR|S_IWGRP, \
+		NULL, _store)
 
 extern int device_create_file(struct device *device,
 			      const struct device_attribute *entry);
diff --git a/include/linux/early_suspend_sysfs.h b/include/linux/early_suspend_sysfs.h
new file mode 100644
index 0000000..fb99d3d
--- /dev/null
+++ b/include/linux/early_suspend_sysfs.h
@@ -0,0 +1,24 @@
+/*
+ * early_suspend_sysfs.h: Early suspend sysfs header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Sathyanarayanan KN(sathyanarayanan.kuppuswamy@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _LINUX_EARLY_SUSPEND_SYSFS_H
+#define _LINUX_EARLY_SUSPEND_SYSFS_H
+
+#define EARLY_SUSPEND_STATUS_LEN 1
+#define EARLY_SUSPEND_ON  "1"
+#define EARLY_SUSPEND_OFF "0"
+
+struct device;
+int register_early_suspend_device(struct device *dev);
+void unregister_early_suspend_device(struct device *dev);
+#endif
+
diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h
new file mode 100644
index 0000000..8343b81
--- /dev/null
+++ b/include/linux/earlysuspend.h
@@ -0,0 +1,56 @@
+/* include/linux/earlysuspend.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_EARLYSUSPEND_H
+#define _LINUX_EARLYSUSPEND_H
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/list.h>
+#endif
+
+/* The early_suspend structure defines suspend and resume hooks to be called
+ * when the user visible sleep state of the system changes, and a level to
+ * control the order. They can be used to turn off the screen and input
+ * devices that are not used for wakeup.
+ * Suspend handlers are called in low to high level order, resume handlers are
+ * called in the opposite order. If, when calling register_early_suspend,
+ * the suspend handlers have already been called without a matching call to the
+ * resume handlers, the suspend handler will be called directly from
+ * register_early_suspend. This direct call can violate the normal level order.
+ */
+enum {
+	EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50,
+	EARLY_SUSPEND_LEVEL_STOP_DRAWING = 100,
+	EARLY_SUSPEND_LEVEL_DISABLE_FB = 150,
+};
+struct early_suspend {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct list_head link;
+	int level;
+	void (*suspend)(struct early_suspend *h);
+	void (*resume)(struct early_suspend *h);
+#endif
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void register_early_suspend(struct early_suspend *handler);
+void unregister_early_suspend(struct early_suspend *handler);
+#else
+#define register_early_suspend(handler) do { } while (0)
+#define unregister_early_suspend(handler) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 0b76327..5c6d7fb 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -622,7 +622,7 @@
  */
 struct mem_ctl_info {
 	struct device			dev;
-	struct bus_type			bus;
+	struct bus_type			*bus;
 
 	struct list_head link;	/* for global list of mem_ctl_info structs */
 
@@ -742,4 +742,9 @@
 #endif
 };
 
+/*
+ * Maximum number of memory controllers in the coherent fabric.
+ */
+#define EDAC_MAX_MCS	16
+
 #endif
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index acd0312..306dd8c 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -7,6 +7,7 @@
 #ifdef CONFIG_BLOCK
 
 struct io_cq;
+struct elevator_type;
 
 typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
 				 struct bio *);
@@ -35,7 +36,8 @@
 typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
 typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
 
-typedef int (elevator_init_fn) (struct request_queue *);
+typedef int (elevator_init_fn) (struct request_queue *,
+				struct elevator_type *e);
 typedef void (elevator_exit_fn) (struct elevator_queue *);
 
 struct elevator_ops
@@ -155,6 +157,8 @@
 extern void elevator_exit(struct elevator_queue *);
 extern int elevator_change(struct request_queue *, const char *);
 extern bool elv_rq_merge_ok(struct request *, struct bio *);
+extern struct elevator_queue *elevator_alloc(struct request_queue *,
+					struct elevator_type *);
 
 /*
  * Helper functions.
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index fcb51c8..53ecf2b 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -26,6 +26,7 @@
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/sysfs.h>
+#include <linux/kconfig.h>
 
 #define SUPPORTED_CABLE_MAX	32
 #define CABLE_NAME_MAX		30
@@ -55,6 +56,11 @@
 	EXTCON_FAST_CHARGER,
 	EXTCON_SLOW_CHARGER,
 	EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
+	EXTCON_SDP,
+	EXTCON_DCP,
+	EXTCON_CDP,
+	EXTCON_ACA,
+	EXTCON_AC,
 	EXTCON_HDMI,
 	EXTCON_MHL,
 	EXTCON_DVI,
@@ -119,6 +125,7 @@
 	/* --- Optional callbacks to override class functions --- */
 	ssize_t	(*print_name)(struct extcon_dev *edev, char *buf);
 	ssize_t	(*print_state)(struct extcon_dev *edev, char *buf);
+	int (*get_cable_properties)(const char *cable_name, void *cable_props);
 
 	/* --- Internal data. Please do not set. --- */
 	struct device	*dev;
@@ -174,6 +181,23 @@
 	unsigned long previous_value;
 };
 
+enum extcon_chrgr_cbl_stat {
+	EXTCON_CHRGR_CABLE_CONNECTED,
+	EXTCON_CHRGR_CABLE_DISCONNECTED,
+	EXTCON_CHRGR_CABLE_SUSPENDED,
+	EXTCON_CHRGR_CABLE_RESUMED,
+	EXTCON_CHRGR_CABLE_UPDATED,
+};
+
+struct extcon_chrgr_cbl_props {
+	enum extcon_chrgr_cbl_stat cable_stat;
+	unsigned long ma;
+};
+
+/* extcon device register notify events */
+#define EXTCON_DEVICE_ADD		0x0001
+#define EXTCON_DEVICE_REMOVE		0x0002
+
 #if IS_ENABLED(CONFIG_EXTCON)
 
 /*
@@ -183,6 +207,9 @@
 extern int extcon_dev_register(struct extcon_dev *edev, struct device *dev);
 extern void extcon_dev_unregister(struct extcon_dev *edev);
 extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+extern int extcon_num_of_cable_devs(const char *cable);
+extern void extcon_dev_register_notify(struct notifier_block *nb);
+extern void extcon_dev_unregister_notify(struct notifier_block *nb);
 
 /*
  * get/set/update_state access the 32b encoded state value, which represents
@@ -205,6 +232,7 @@
  */
 extern int extcon_find_cable_index(struct extcon_dev *sdev,
 				   const char *cable_name);
+extern int extcon_find_cable_type(struct extcon_dev *edev, int index);
 extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
 extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
 				   bool cable_state);
@@ -245,6 +273,8 @@
 }
 
 static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
+static void extcon_dev_register_notify(struct notifier_block *nb) { }
+static void extcon_dev_unregister_notify(struct notifier_block *nb) { }
 
 static inline u32 extcon_get_state(struct extcon_dev *edev)
 {
@@ -268,6 +298,11 @@
 	return 0;
 }
 
+static int extcon_find_cable_type(struct extcon_dev *edev, int index)
+{
+	return 0;
+}
+
 static inline int extcon_get_cable_state_(struct extcon_dev *edev,
 					  int cable_index)
 {
@@ -297,6 +332,11 @@
 	return NULL;
 }
 
+static inline int extcon_num_of_cable_devs(const char *cable)
+{
+	return 0;
+}
+
 static inline int extcon_register_notifier(struct extcon_dev *edev,
 					   struct notifier_block *nb)
 {
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 191501a..217e4b4 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -434,6 +434,7 @@
 	int type;
 	int channel;
 	int speed;
+	bool drop_overflow_headers;
 	size_t header_size;
 	union {
 		fw_iso_callback_t sc;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4372658..120d57a 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -78,6 +78,11 @@
 	/* trace_seq for __print_flags() and __print_symbolic() etc. */
 	struct trace_seq	tmp_seq;
 
+	cpumask_var_t		started;
+
+	/* it's true when current open file is snapshot */
+	bool			snapshot;
+
 	/* The below is zeroed out in pipe_read */
 	struct trace_seq	seq;
 	struct trace_entry	*ent;
@@ -90,10 +95,7 @@
 	loff_t			pos;
 	long			idx;
 
-	cpumask_var_t		started;
-
-	/* it's true when current open file is snapshot */
-	bool			snapshot;
+	/* All new field here will be zeroed out in pipe_read */
 };
 
 enum trace_iter_flags {
@@ -332,7 +334,7 @@
 			      const char *name, int offset, int size,
 			      int is_signed, int filter_type);
 extern int trace_add_event_call(struct ftrace_event_call *call);
-extern void trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_remove_event_call(struct ftrace_event_call *call);
 
 #define is_signed_type(type)	(((type)(-1)) < (type)1)
 
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index a7e977f..e074680 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -10,6 +10,9 @@
 	int active_low;
 	const char *desc;
 	unsigned int type;	/* input event type (EV_KEY, EV_SW, EV_ABS) */
+#ifdef CONFIG_ACPI
+	int acpi_idx;		/* ACPI gpio index */
+#endif
 	int wakeup;		/* configure the button as a wake-up source */
 	int debounce_interval;	/* debounce ticks interval in msecs */
 	bool can_disable;
diff --git a/include/linux/gsmmux.h b/include/linux/gsmmux.h
index c25e947..9975210 100644
--- a/include/linux/gsmmux.h
+++ b/include/linux/gsmmux.h
@@ -1,8 +1,9 @@
 #ifndef _LINUX_GSMMUX_H
 #define _LINUX_GSMMUX_H
 
-struct gsm_config
-{
+#include <linux/if.h>
+
+struct gsm_config {
 	unsigned int adaption;
 	unsigned int encapsulation;
 	unsigned int initiator;
@@ -14,12 +15,15 @@
 	unsigned int mtu;
 	unsigned int k;
 	unsigned int i;
-	unsigned int unused[8];		/* Padding for expansion without
+	unsigned int clocal;
+	unsigned int burst;
+	unsigned int unused[6];		/* Padding for expansion without
 					   breaking stuff */
 };
 
 #define GSMIOC_GETCONF		_IOR('G', 0, struct gsm_config)
 #define GSMIOC_SETCONF		_IOW('G', 1, struct gsm_config)
+#define GSMIOC_DEMUX		_IO('G', 4)
 
 struct gsm_netconfig {
 	unsigned int adaption;  /* Adaption to use in network mode */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 3b58944..bc6743e 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -23,6 +23,15 @@
 #define HDMI_SPD_INFOFRAME_SIZE    25
 #define HDMI_AUDIO_INFOFRAME_SIZE  10
 
+#define HDMI_INFOFRAME_SIZE(type)	\
+	(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
+
+struct hdmi_any_infoframe {
+	enum hdmi_infoframe_type type;
+	unsigned char version;
+	unsigned char length;
+};
+
 enum hdmi_colorspace {
 	HDMI_COLORSPACE_RGB,
 	HDMI_COLORSPACE_YUV422,
@@ -228,4 +237,15 @@
 ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
 				   void *buffer, size_t size);
 
+union hdmi_infoframe {
+	struct hdmi_any_infoframe any;
+	struct hdmi_avi_infoframe avi;
+	struct hdmi_spd_infoframe spd;
+	struct hdmi_vendor_infoframe vendor;
+	struct hdmi_audio_infoframe audio;
+};
+
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+
 #endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 1f3c5f7..254d56c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -458,6 +458,7 @@
 	enum hid_type type;						/* device type (mouse, kbd, ...) */
 	unsigned country;						/* HID country */
 	struct hid_report_enum report_enum[HID_REPORT_TYPES];
+	struct work_struct led_work;					/* delayed LED worker */
 
 	struct semaphore driver_lock;					/* protects the current driver, except during input */
 	struct semaphore driver_input_lock;				/* protects the current driver */
@@ -746,6 +747,7 @@
 unsigned int hidinput_count_leds(struct hid_device *hid);
 __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
 void hid_output_report(struct hid_report *report, __u8 *data);
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
 struct hid_device *hid_allocate_device(void);
 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
diff --git a/include/linux/history_record.h b/include/linux/history_record.h
new file mode 100644
index 0000000..f10e251
--- /dev/null
+++ b/include/linux/history_record.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_HISTORY_RECORD_H
+#define _LINUX_HISTORY_RECORD_H
+
+#ifdef __KERNEL__
+
+#define SAVED_HISTORY_MAX 250
+
+struct saved_history_record {
+	unsigned long long ts;
+	unsigned int type;
+	union {
+		struct {
+			unsigned int HostIrqCountSample;
+			unsigned int InterruptCount;
+		} sgx; /*type = 1,2;*/
+
+		struct {
+			unsigned int pipe_nu;
+			unsigned int pipe_stat_val;
+		} pipe; /*type = 3;*/
+
+		unsigned long msvdx_stat; /*type = 4;*/
+		unsigned long vdc_stat; /*type = 5;*/
+		unsigned long value; /*type = 0;*/
+	} record_value;
+};
+
+extern struct saved_history_record *get_new_history_record(void);
+extern void interrupt_dump_history(void);
+
+#endif
+
+#endif
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 0dca785..f67545c 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -65,6 +65,8 @@
 enum {
 	HSI_EVENT_START_RX,
 	HSI_EVENT_STOP_RX,
+	HSI_EVENT_RESUME,
+	HSI_EVENT_SUSPEND,
 };
 
 /**
@@ -129,6 +131,8 @@
 	struct device		device;
 	struct hsi_config	tx_cfg;
 	struct hsi_config	rx_cfg;
+	void			(*hsi_start_rx)(struct hsi_client *cl);
+	void			(*hsi_stop_rx)(struct hsi_client *cl);
 	/* private: */
 	void			(*ehandler)(struct hsi_client *, unsigned long);
 	unsigned int		pclaimed:1;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b4890f..feaf0c7f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -358,6 +358,17 @@
 	return h - hstates;
 }
 
+pgoff_t __basepage_index(struct page *page);
+
+/* Return page->index in PAGE_SIZE units */
+static inline pgoff_t basepage_index(struct page *page)
+{
+	if (!PageCompound(page))
+		return page->index;
+
+	return __basepage_index(page);
+}
+
 #else	/* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 #define alloc_huge_page_node(h, nid) NULL
@@ -378,6 +389,11 @@
 }
 #define hstate_index_to_shift(index) 0
 #define hstate_index(h) 0
+
+static inline pgoff_t basepage_index(struct page *page)
+{
+	return page->index;
+}
 #endif	/* CONFIG_HUGETLB_PAGE */
 
 #endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 637fa71d..0b34988 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -79,9 +79,8 @@
 }
 
 #define vlan_tx_tag_present(__skb)	((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define vlan_tx_nonzero_tag_present(__skb) \
-	(vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
 #define vlan_tx_tag_get(__skb)		((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define vlan_tx_tag_get_id(__skb)	((__skb)->vlan_tci & VLAN_VID_MASK)
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 833926c..14f8aac 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -183,4 +183,7 @@
 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 	int *processed, unsigned int scale);
 
+int iio_read_channel_all_raw(struct iio_channel *chan, int *val);
+int iio_channel_get_name(const struct iio_channel *chan, char **chan_name);
+int iio_channel_get_num(const struct iio_channel *chan);
 #endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 8d171f4..2cd7a46 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -13,6 +13,8 @@
 #include <linux/device.h>
 #include <linux/cdev.h>
 #include <linux/iio/types.h>
+#include <linux/iio/consumer.h>
+
 /* IIO TODO LIST */
 /*
  * Provide means of adjusting timer accuracy.
@@ -211,8 +213,8 @@
 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
 	enum iio_chan_info_enum type)
 {
-	return (chan->info_mask_separate & type) |
-	       (chan->info_mask_shared_by_type & type);
+	return (chan->info_mask_separate & BIT(type)) |
+	       (chan->info_mask_shared_by_type & BIT(type));
 }
 
 #define IIO_ST(si, rb, sb, sh)						\
@@ -287,6 +289,9 @@
 			int *val2,
 			long mask);
 
+	int (*read_all_raw)(struct iio_channel *chan,
+			int *val);
+
 	int (*write_raw)(struct iio_dev *indio_dev,
 			 struct iio_chan_spec const *chan,
 			 int val,
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 88bf0f0..2a50d36 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,6 +29,7 @@
 	IIO_ALTVOLTAGE,
 	IIO_CCT,
 	IIO_PRESSURE,
+	IIO_RESISTANCE,
 };
 
 enum iio_modifier {
diff --git a/include/linux/input.h b/include/linux/input.h
index 82ce323..2fe74bc 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -76,6 +76,8 @@
  *	about absolute axes (current value, min, max, flat, fuzz,
  *	resolution)
  * @key: reflects current state of device's keys/buttons
+ * @key_suspend: reflects state of device's keys/buttons during device
+ *	suspend entry.
  * @led: reflects current state of device's LEDs
  * @snd: reflects current state of sound effects
  * @sw: reflects current state of device's switches
@@ -160,6 +162,7 @@
 	struct input_absinfo *absinfo;
 
 	unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long key_suspend[BITS_TO_LONGS(KEY_CNT)];
 	unsigned long led[BITS_TO_LONGS(LED_CNT)];
 	unsigned long snd[BITS_TO_LONGS(SND_CNT)];
 	unsigned long sw[BITS_TO_LONGS(SW_CNT)];
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h
index 10496bd..47edd7b 100644
--- a/include/linux/intel_mid_dma.h
+++ b/include/linux/intel_mid_dma.h
@@ -29,6 +29,13 @@
 
 #define DMA_PREP_CIRCULAR_LIST		(1 << 10)
 
+#define SST_MAX_DMA_LEN		4095
+#define SST_MAX_DMA_LEN_MRFLD	131071 /* 2^17 - 1 */
+
+#define MRFL_INSTANCE_SPI3	3
+#define MRFL_INSTANCE_SPI5	5
+#define MRFL_INSTANCE_SPI6	6
+
 /*DMA mode configurations*/
 enum intel_mid_dma_mode {
 	LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
@@ -73,4 +80,7 @@
 	struct dma_slave_config		dma_slave;
 };
 
+struct device *intel_mid_get_acpi_dma(const char *hid);
+dma_addr_t intel_dma_get_src_addr(struct dma_chan *chan);
+dma_addr_t intel_dma_get_dst_addr(struct dma_chan *chan);
 #endif /*__INTEL_MID_DMA_H__*/
diff --git a/include/linux/intel_mid_pm.h b/include/linux/intel_mid_pm.h
new file mode 100644
index 0000000..031637a
--- /dev/null
+++ b/include/linux/intel_mid_pm.h
@@ -0,0 +1,216 @@
+/*
+ * intel_mid_pm.h
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/errno.h>
+
+#ifndef INTEL_MID_PM_H
+#define INTEL_MID_PM_H
+
+#include <asm/intel-mid.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+
+/* Chip ID of Intel Atom SOC*/
+#define INTEL_ATOM_MRFLD 0x4a
+#define INTEL_ATOM_MOORFLD 0x5a
+
+static inline int platform_is(u8 model)
+{
+	return (boot_cpu_data.x86_model == model);
+}
+
+/* Register Type definitions */
+#define OSPM_REG_TYPE          0x0
+#define APM_REG_TYPE           0x1
+#define OSPM_MAX_POWER_ISLANDS 16
+#define OSPM_ISLAND_UP         0x0
+#define OSPM_ISLAND_DOWN       0x1
+/*Soft reset*/
+#define OSPM_ISLAND_SR         0x2
+
+/* North complex power islands definitions for APM block*/
+#define APM_GRAPHICS_ISLAND    0x1
+#define APM_VIDEO_DEC_ISLAND   0x2
+#define APM_VIDEO_ENC_ISLAND   0x4
+#define APM_GL3_CACHE_ISLAND   0x8
+#define APM_ISP_ISLAND         0x10
+#define APM_IPH_ISLAND         0x20
+
+/* North complex power islands definitions for OSPM block*/
+#define OSPM_DISPLAY_A_ISLAND  0x2
+#define OSPM_DISPLAY_B_ISLAND  0x80
+#define OSPM_DISPLAY_C_ISLAND  0x100
+#define OSPM_MIPI_ISLAND       0x200
+
+/* North Complex power islands definitions for Tangier */
+#define TNG_ISP_ISLAND		0x1
+/* North Complex Register definitions for Tangier */
+#define	ISP_SS_PM0		0x39
+
+#define C4_STATE_IDX	3
+#define C6_STATE_IDX	4
+#define S0I1_STATE_IDX  5
+#define LPMP3_STATE_IDX 6
+#define S0I3_STATE_IDX  7
+
+#define C4_HINT	(0x30)
+#define C6_HINT	(0x52)
+
+#define CSTATE_EXIT_LATENCY_C1	 1
+#define CSTATE_EXIT_LATENCY_C2	 20
+#define CSTATE_EXIT_LATENCY_C4	 100
+#define CSTATE_EXIT_LATENCY_C6	 140
+
+/* Since entry latency is substantial
+ * put exit_latency = entry+exit latency
+ */
+#define CSTATE_EXIT_LATENCY_S0i1 1200
+#define CSTATE_EXIT_LATENCY_S0i2 2000
+#define CSTATE_EXIT_LATENCY_S0i3 10000
+
+enum s3_parts {
+	PROC_FRZ,
+	DEV_SUS,
+	NB_CPU_OFF,
+	NB_CPU_ON,
+	DEV_RES,
+	PROC_UNFRZ,
+	MAX_S3_PARTS
+};
+
+#ifdef CONFIG_ATOM_SOC_POWER
+#define LOG_PMU_EVENTS
+
+/* Error codes for pmu */
+#define	PMU_SUCCESS			0
+#define PMU_FAILED			-1
+#define PMU_BUSY_STATUS			0
+#define PMU_MODE_ID			1
+#define	SET_MODE			1
+#define	SET_AOAC_S0i1			2
+#define	SET_AOAC_S0i3			3
+#define	SET_LPAUDIO			4
+#define	SET_AOAC_S0i2			7
+
+#define MID_S0I1_STATE         0x60
+#define MID_S0I2_STATE         0x62
+#define MID_LPMP3_STATE        0x62
+#define MID_S0I3_STATE         0x64
+
+#define MID_S0IX_STATE         0xf
+#define MID_S3_STATE           0x1f
+#define MID_FAST_ON_OFF_STATE  0x3f
+
+/* combinations */
+#define MID_LPI1_STATE         0x1f
+#define MID_LPI3_STATE         0x7f
+#define MID_I1I3_STATE         0xff
+
+#define REMOVE_LP_FROM_LPIX    4
+
+/* Power number for MID_POWER */
+#define C0_POWER_USAGE         450
+#define C6_POWER_USAGE         200
+#define LPMP3_POWER_USAGE      130
+#define S0I1_POWER_USAGE       50
+#define S0I3_POWER_USAGE       31
+
+extern unsigned int enable_s3;
+extern unsigned int enable_s0ix;
+
+extern void pmu_s0ix_demotion_stat(int req_state, int grant_state);
+extern unsigned int pmu_get_new_cstate(unsigned int cstate, int *index);
+extern int get_target_platform_state(unsigned long *eax);
+extern int mid_s0ix_enter(int);
+extern int pmu_set_devices_in_d0i0(void);
+extern int pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t pmu_pci_choose_state(struct pci_dev *pdev);
+
+extern void time_stamp_in_suspend_flow(int mark, bool start);
+extern void time_stamp_for_sleep_state_latency(int sleep_state,
+						bool start, bool entry);
+extern int mid_state_to_sys_state(int mid_state);
+extern void pmu_power_off(void);
+extern void pmu_set_s0ix_complete(void);
+extern bool pmu_is_s0ix_in_progress(void);
+extern int pmu_nc_set_power_state
+	(int islands, int state_type, int reg_type);
+extern int pmu_nc_get_power_state(int island, int reg_type);
+extern void pmu_set_s0i1_disp_vote(bool enable);
+extern int pmu_set_emmc_to_d0i0_atomic(void);
+
+#ifdef LOG_PMU_EVENTS
+extern void pmu_log_ipc(u32 command);
+extern void pmu_log_ipc_irq(void);
+#else
+static inline void pmu_log_ipc(u32 command) { return; };
+static inline void pmu_log_ipc_irq(void) { return; };
+#endif
+extern void dump_nc_power_history(void);
+
+extern bool mid_pmu_is_wake_source(u32 lss_number);
+
+extern void (*nc_report_power_state) (u32, int);
+#else
+
+/*
+ * If CONFIG_ATOM_SOC_POWER is not defined
+ * fall back to C6
+ */
+
+#define MID_S0I1_STATE         C6_HINT
+#define MID_LPMP3_STATE        C6_HINT
+#define MID_S0I3_STATE         C6_HINT
+#define MID_S3_STATE           C6_HINT
+#define MID_FAST_ON_OFF_STATE  C6_HINT
+
+/* Power usage unknown if MID_POWER not defined */
+#define C0_POWER_USAGE         0
+#define C6_POWER_USAGE         0
+#define LPMP3_POWER_USAGE      0
+#define S0I1_POWER_USAGE       0
+#define S0I3_POWER_USAGE       0
+
+#define TEMP_DTS_ID     43
+
+static inline int pmu_nc_set_power_state
+	(int islands, int state_type, int reg_type) { return 0; }
+static inline int pmu_nc_get_power_state(int island, int reg_type) { return 0; }
+static inline void pmu_set_s0i1_disp_vote(bool enable) { return; }
+
+static inline void pmu_set_s0ix_complete(void) { return; }
+static inline bool pmu_is_s0ix_in_progress(void) { return false; };
+static inline unsigned int pmu_get_new_cstate
+			(unsigned int cstate, int *index) { return cstate; };
+
+/*returns function not implemented*/
+static inline void time_stamp_in_suspend_flow(int mark, bool start) {}
+static inline void time_stamp_for_sleep_state_latency(int sleep_state,
+					bool start, bool entry) {}
+static inline int mid_state_to_sys_state(int mid_state) { return 0; }
+
+static inline int pmu_set_devices_in_d0i0(void) { return 0; }
+static inline void pmu_log_ipc(u32 command) { return; };
+static inline void pmu_log_ipc_irq(void) { return; };
+static inline int pmu_set_emmc_to_d0i0_atomic(void) { return -ENOSYS; }
+static inline void pmu_power_off(void) { return; }
+static inline bool mid_pmu_is_wake_source(u32 lss_number) { return false; }
+#endif /* #ifdef CONFIG_ATOM_SOC_POWER */
+
+#endif /* #ifndef INTEL_MID_PM_H */
diff --git a/include/linux/intel_pidv_acpi.h b/include/linux/intel_pidv_acpi.h
new file mode 100644
index 0000000..fd5e7c9
--- /dev/null
+++ b/include/linux/intel_pidv_acpi.h
@@ -0,0 +1,59 @@
+/*
+ * include/linux/intel_pidv_acpi.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Vincent Tinelli (vincent.tinelli@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _INTEL_PIDV_ACPI_H
+#define _INTEL_PIDV_ACPI_H
+
+#include <linux/acpi.h>
+#ifdef CONFIG_ACPI
+#define ACPI_SIG_PIDV           "PIDV"
+#define revmajor		0
+#define revminor		1
+
+#define pidv_attr(_name) \
+static struct kobj_attribute _name##_attr = {	\
+	.attr   = {				\
+		.name = __stringify(_name),	\
+		.mode = 0440,			\
+	},					\
+	.show   = _name##_show,			\
+}
+
+struct platform_id {
+	u8 part_number[32];
+	u8 ext_id_1[32];
+	u8 ext_id_2[32];
+	u8 uuid[16];
+	u8 iafwBuildid[32];
+	u32 iasvn;
+	u32 secsvn;
+	u32 pdrsvn;
+	u16 iafwrevvalues[4];
+	u16 secrevvalues[4];
+	u16 pdrrevvalues[4];
+};
+
+struct acpi_table_pidv {
+	struct acpi_table_header header;
+	struct platform_id pidv;
+};
+
+#endif
+#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 069e407..f4f42fa 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -76,4 +76,29 @@
 #define arch_has_dev_port()     (1)
 #endif
 
+/*
+ * Some systems (x86 without PAT) have a somewhat reliable way to mark a
+ * physical address range such that uncached mappings will actually
+ * end up write-combining.  This facility should be used in conjunction
+ * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
+ * no effect if the per-page mechanisms are functional.
+ * (On x86 without PAT, these functions manipulate MTRRs.)
+ *
+ * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
+ * to have no effect.
+ */
+#ifndef arch_phys_wc_add
+static inline int __must_check arch_phys_wc_add(unsigned long base,
+						unsigned long size)
+{
+	return 0;  /* It worked (i.e. did nothing). */
+}
+
+static inline void arch_phys_wc_del(int handle)
+{
+}
+
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
 #endif /* _LINUX_IO_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c4d870b..19c19a5 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -22,7 +22,7 @@
 	int in_use;
 	unsigned short seq;
 	unsigned short seq_max;
-	struct rw_semaphore rw_mutex;
+	struct rw_semaphore rwsem;
 	struct idr ipcs_idr;
 	int next_id;
 };
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 3d15bbd..5fabaa4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -104,6 +104,7 @@
 #define IP6SKB_FORWARDED	2
 #define IP6SKB_REROUTED		4
 #define IP6SKB_ROUTERALERT	8
+#define IP6SKB_FRAGMENTED      16
 };
 
 #define IP6CB(skb)	((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index bc4e066..5864ea8 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -94,6 +94,7 @@
 	IRQ_NESTED_THREAD	= (1 << 15),
 	IRQ_NOTHREAD		= (1 << 16),
 	IRQ_PER_CPU_DEVID	= (1 << 17),
+	IRQ_CHAINED		= (1 << 18),
 };
 
 #define IRQF_MODIFY_MASK	\
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 623325e..82c2638 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -123,6 +123,20 @@
 	return desc->action != NULL;
 }
 
+/* Test to see if the IRQ is chained */
+static inline int irq_is_chained(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	return desc->status_use_accessors & IRQ_CHAINED;
+}
+
+/* Test to see if the IRQ is nested_thread */
+static inline int irq_is_nested_thread(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	return desc->status_use_accessors & IRQ_NESTED_THREAD;
+}
+
 /* caller has locked the irq_desc and both params are valid */
 static inline void __irq_set_handler_locked(unsigned int irq,
 					    irq_flow_handler_t handler)
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index b7c8cdc..febedda 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -5,7 +5,9 @@
 #include <linux/interrupt.h>
 #include <linux/keyboard.h>
 
+#ifndef CONFIG_ANDROID
 extern struct tasklet_struct keyboard_tasklet;
+#endif
 
 extern char *func_table[MAX_NR_FUNC];
 extern char func_buf[];
@@ -74,11 +76,15 @@
 extern int set_console(int nr);
 extern void schedule_console_callback(void);
 
+#ifndef CONFIG_ANDROID
 /* FIXME: review locking for vt.c callers */
 static inline void set_leds(void)
 {
 	tasklet_schedule(&keyboard_tasklet);
 }
+#else
+static inline void set_leds(void) {}
+#endif
 
 static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
 {
diff --git a/include/linux/kct.h b/include/linux/kct.h
new file mode 100644
index 0000000..fbbe679
--- /dev/null
+++ b/include/linux/kct.h
@@ -0,0 +1,282 @@
+#ifndef KCT_H_
+#  define KCT_H_
+
+#  include <linux/netlink.h>
+
+/*
+ * warning: structures and constants in this header must match the
+ * ones in libc/kernel/common/linux/kct.h, so that information can
+ * be exchange between kernel and userspace throught netlink socket.
+ */
+/* flags to optionally filter events on android property activation */
+#define	EV_FLAGS_PRIORITY_LOW	(1<<0)
+
+#  ifndef MAX_SB_N
+#    define MAX_SB_N 32
+#  endif
+
+#  ifndef MAX_EV_N
+#    define MAX_EV_N 32
+#  endif
+
+#  define NETLINK_CRASHTOOL 27
+#  define ATTCHMT_ALIGN 4U
+
+/* Type of events supported by crashtool */
+enum ct_ev_type {
+	CT_EV_STAT,
+	CT_EV_INFO,
+	CT_EV_ERROR,
+	CT_EV_CRASH,
+	CT_EV_LAST
+};
+
+enum ct_attchmt_type {
+	CT_ATTCHMT_DATA0,
+	CT_ATTCHMT_DATA1,
+	CT_ATTCHMT_DATA2,
+	CT_ATTCHMT_DATA3,
+	CT_ATTCHMT_DATA4,
+	CT_ATTCHMT_DATA5,
+	/* Always add new types after DATA5 */
+	CT_ATTCHMT_BINARY,
+	CT_ATTCHMT_FILELIST
+};
+
+struct ct_attchmt {
+	__u32 size; /* sizeof(data) */
+	enum ct_attchmt_type type;
+	char data[];
+} __aligned(4);
+
+struct ct_event {
+	__u64 timestamp;
+	char submitter_name[MAX_SB_N];
+	char ev_name[MAX_EV_N];
+	enum ct_ev_type type;
+	__u32 attchmt_size; /* sizeof(all_attachments inc. padding) */
+	__u32 flags;
+	struct ct_attchmt attachments[];
+} __aligned(4);
+
+enum kct_nlmsg_type {
+	/* kernel -> userland */
+	KCT_EVENT,
+	/* userland -> kernel */
+	KCT_SET_PID = 4200,
+};
+
+struct kct_packet {
+	struct nlmsghdr nlh;
+	struct ct_event event;
+};
+
+#  define ATTCHMT_ALIGNMENT	4
+
+#  ifndef KCT_ALIGN
+#    define __KCT_ALIGN_MASK(x, mask)    (((x) + (mask)) & ~(mask))
+#    define __KCT_ALIGN(x, a)            __KCT_ALIGN_MASK(x, (typeof(x))(a) - 1)
+#    define KCT_ALIGN(x, a)		     __KCT_ALIGN((x), (a))
+#  endif /* !KCT_ALIGN */
+
+#  define foreach_attchmt(Event, Attchmt)				\
+	if ((Event)->attchmt_size)					\
+		for ((Attchmt) = (Event)->attachments;			\
+		     (Attchmt) < (typeof(Attchmt))(((char *)		\
+				  (Event)->attachments) +               \
+			(Event)->attchmt_size);                         \
+	(Attchmt) = (typeof(Attchmt))KCT_ALIGN(((size_t)(Attchmt)) \
+						     + sizeof(*(Attchmt)) + \
+			      (Attchmt)->size, ATTCHMT_ALIGNMENT))
+
+/*
+ * User should use the macros below rather than those extern functions
+ * directly. Laters' declaration are only to set them __weak so
+ * that the macros works fine.
+ */
+/* Raw API (deprecated) */
+extern struct ct_event *kct_alloc_event(const char *submitter_name,
+					const char *ev_name,
+					enum ct_ev_type ev_type,
+					gfp_t flags, uint eflags) __weak;
+extern int kct_add_attchmt(struct ct_event **ev,
+			   enum ct_attchmt_type at_type,
+			   unsigned int size,
+			   char *data, gfp_t flags)  __weak;
+extern void kct_free_event(struct ct_event *ev) __weak;
+extern int kct_log_event(struct ct_event *ev, gfp_t flags) __weak;
+
+/* API */
+#define MKFN(fn, ...) MKFN_N(fn, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)(__VA_ARGS__)
+#define MKFN_N(fn, n0, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n, ...) fn##n
+#define kct_log(...) MKFN(__kct_log_, ##__VA_ARGS__)
+
+#define __kct_log_4(Type, Submitter_name, Ev_name, flags) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+#define __kct_log_5(Type, Submitter_name, Ev_name, flags, Data0) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+#define __kct_log_6(Type, Submitter_name, Ev_name, flags, Data0, Data1) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			if (Data1) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
+					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+#define __kct_log_7(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			if (Data1) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
+					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
+			if (Data2) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
+					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+#define __kct_log_8(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
+					Data3) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			if (Data1) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
+					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
+			if (Data2) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
+					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
+			if (Data3) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
+					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+	#define __kct_log_9(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
+					 Data3, Data4) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			if (Data1) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
+					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
+			if (Data2) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
+					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
+			if (Data3) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
+					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
+			if (Data4) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA4, \
+					strlen(Data4) + 1, Data4, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+	#define __kct_log_10(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
+					 Data3, Data4, Data5) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			if (Data1) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
+					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
+			if (Data2) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
+					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
+			if (Data3) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
+					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
+			if (Data4) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA4, \
+					strlen(Data4) + 1, Data4, GFP_ATOMIC); \
+			if (Data5) \
+				kct_add_attchmt(&__ev, CT_ATTCHMT_DATA5, \
+					strlen(Data5) + 1, Data5, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+	#define __kct_log_11(Type, Submitter_name, Ev_name, flags, Data0, Data1, Data2, \
+					 Data3, Data4, Data5, filelist) \
+	do {  if (kct_alloc_event) {	\
+		struct ct_event *__ev =	\
+			kct_alloc_event(Submitter_name, Ev_name, Type, \
+				GFP_ATOMIC, flags); \
+		if (__ev) { \
+			if (Data0) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA0, \
+					strlen(Data0) + 1, Data0, GFP_ATOMIC); \
+			if (Data1) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA1, \
+					strlen(Data1) + 1, Data1, GFP_ATOMIC); \
+			if (Data2) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA2, \
+					strlen(Data2) + 1, Data2, GFP_ATOMIC); \
+			if (Data3) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA3, \
+					strlen(Data3) + 1, Data3, GFP_ATOMIC); \
+			if (Data4) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA4, \
+					strlen(Data4) + 1, Data4, GFP_ATOMIC); \
+			if (Data5) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_DATA5, \
+					strlen(Data5) + 1, Data5, GFP_ATOMIC); \
+			if (filelist) \
+			kct_add_attchmt(&__ev, CT_ATTCHMT_FILELIST, \
+					strlen(filelist) + 1, filelist, GFP_ATOMIC); \
+			kct_log_event(__ev, GFP_ATOMIC); \
+		} \
+	} } while (0)
+
+#endif /* !KCT_H_ */
diff --git a/include/linux/libmsrlisthelper.h b/include/linux/libmsrlisthelper.h
new file mode 100644
index 0000000..589f4eae
--- /dev/null
+++ b/include/linux/libmsrlisthelper.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __LIBMSRLISTHELPER_H__
+#define __LIBMSRLISTHELPER_H__
+
+struct i2c_client;
+struct firmware;
+
+extern int load_msr_list(struct i2c_client *client, char *path,
+		const struct firmware **fw);
+extern int apply_msr_data(struct i2c_client *client, const struct firmware *fw);
+extern void release_msr_list(struct i2c_client *client,
+		const struct firmware *fw);
+
+
+#endif /* ifndef __LIBMSRLISTHELPER_H__ */
diff --git a/include/linux/lnw_gpio.h b/include/linux/lnw_gpio.h
new file mode 100644
index 0000000..9cde4d2
--- /dev/null
+++ b/include/linux/lnw_gpio.h
@@ -0,0 +1,13 @@
+#ifndef _H_LANGWELL_GPIO_H
+#define _H_LANGWELL_GPIO_H
+
+enum {
+	LNW_GPIO = 0,
+	LNW_ALT_1 = 1,
+	LNW_ALT_2 = 2,
+	LNW_ALT_3 = 3,
+};
+
+void lnw_gpio_set_alt(int gpio, int alt);
+
+#endif
diff --git a/include/linux/mdm_ctrl.h b/include/linux/mdm_ctrl.h
new file mode 100644
index 0000000..daf1886
--- /dev/null
+++ b/include/linux/mdm_ctrl.h
@@ -0,0 +1,105 @@
+/*
+ * mdm_ctrl.h
+ *
+ * Intel Mobile Communication modem boot driver
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * Contact: Faouaz Tenoutit <faouazx.tenoutit@intel.com>
+ *          Frederic Berat <fredericx.berat@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#ifndef _MDM_CTRL_H
+#define _MDM_CTRL_H
+
+#include <linux/ioctl.h>
+
+/* Modem state */
+enum {
+	MDM_CTRL_STATE_UNKNOWN			= 0x0000,
+	MDM_CTRL_STATE_OFF			= 0x0001,
+	MDM_CTRL_STATE_COLD_BOOT		= 0x0002,
+	MDM_CTRL_STATE_WARM_BOOT		= 0x0004,
+	MDM_CTRL_STATE_COREDUMP			= 0x0008,
+	MDM_CTRL_STATE_IPC_READY		= 0x0010,
+	MDM_CTRL_STATE_FW_DOWNLOAD_READY	= 0x0020,
+};
+
+/* Backward compatibility with previous patches */
+#define MDM_CTRL_STATE_NONE MDM_CTRL_STATE_UNKNOWN
+
+/* Modem hanging up reasons */
+enum {
+	MDM_CTRL_NO_HU		= 0x00,
+	MDM_CTRL_HU_RESET	= 0x01,
+	MDM_CTRL_HU_COREDUMP	= 0x02,
+};
+
+enum {
+	MDM_TIMER_FLASH_ENABLE,
+	MDM_TIMER_FLASH_DISABLE,
+	MDM_TIMER_DEFAULT
+};
+
+/* Supported Modem IDs*/
+enum mdm_ctrl_mdm_type {
+	MODEM_UNSUP,
+	MODEM_6260,
+	MODEM_6268,
+	MODEM_6360,
+	MODEM_7160,
+	MODEM_7260
+};
+
+/* Type of modem board */
+enum mdm_ctrl_board_type {
+	BOARD_UNSUP,
+	BOARD_AOB,
+	BOARD_NGFF,
+	BOARD_PCIE,
+};
+
+/**
+ * struct mdm_ctrl_cmd - Command parameters
+ *
+ * @timeout: the command timeout duration
+ * @curr_state: the current modem state
+ * @expected_state: the modem state to wait for
+ */
+struct mdm_ctrl_cmd {
+	unsigned int param;
+	unsigned int timeout;
+};
+
+#define MDM_CTRL_MAGIC	0x87 /* FIXME: Revisit */
+
+/* IOCTL commands list */
+#define MDM_CTRL_POWER_OFF		_IO(MDM_CTRL_MAGIC, 0)
+#define MDM_CTRL_POWER_ON		_IO(MDM_CTRL_MAGIC, 1)
+#define MDM_CTRL_WARM_RESET		_IO(MDM_CTRL_MAGIC, 2)
+#define MDM_CTRL_COLD_RESET		_IO(MDM_CTRL_MAGIC, 3)
+#define MDM_CTRL_SET_STATE		_IO(MDM_CTRL_MAGIC, 4)
+#define MDM_CTRL_GET_STATE		_IO(MDM_CTRL_MAGIC, 5)
+#define MDM_CTRL_RESERVED		_IO(MDM_CTRL_MAGIC, 6)
+#define MDM_CTRL_FLASHING_WARM_RESET	_IO(MDM_CTRL_MAGIC, 7)
+#define MDM_CTRL_GET_HANGUP_REASONS	_IO(MDM_CTRL_MAGIC, 8)
+#define MDM_CTRL_CLEAR_HANGUP_REASONS	_IO(MDM_CTRL_MAGIC, 9)
+#define MDM_CTRL_SET_POLLED_STATES	_IO(MDM_CTRL_MAGIC, 10)
+#define MDM_CTRL_SET_BOARD		_IO(MDM_CTRL_MAGIC, 11)
+#define MDM_CTRL_SET_MDM		_IO(MDM_CTRL_MAGIC, 12)
+
+#endif /* _MDM_CTRL_H */
+
diff --git a/include/linux/mdm_ctrl_board.h b/include/linux/mdm_ctrl_board.h
new file mode 100644
index 0000000..cdc5e44
--- /dev/null
+++ b/include/linux/mdm_ctrl_board.h
@@ -0,0 +1,195 @@
+/*
+ * mdm_ctrl_board.h
+ *
+ * Header for the Modem control driver.
+ *
+ * Copyright (C) 2010, 2011 Intel Corporation. All rights reserved.
+ *
+ * Contact: Frederic BERAT <fredericx.berat@intel.com>
+ *          Faouaz TENOUTIT <faouazx.tenoutit@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __MDM_CTRL_BOARD_H__
+#define __MDM_CTRL_BOARD_H__
+
+#include <asm/intel-mid.h>
+#include <linux/module.h>
+
+#define DEVICE_NAME "modem_control"
+#define DRVNAME "mdm_ctrl"
+
+/* Supported PMIC IDs*/
+enum {
+	PMIC_UNSUP,
+	PMIC_MFLD,
+	PMIC_CLVT,
+	PMIC_MRFL,
+	PMIC_BYT,
+	PMIC_MOOR,
+	PMIC_CHT
+};
+
+/* Supported CPU IDs*/
+enum {
+	CPU_UNSUP,
+	CPU_PWELL,
+	CPU_CLVIEW,
+	CPU_TANGIER,
+	CPU_VVIEW2,
+	CPU_ANNIEDALE,
+	CPU_CHERRYVIEW
+};
+
+struct mdm_ops {
+	int	(*init) (void *data);
+	int	(*cleanup) (void *data);
+	int	(*get_cflash_delay) (void *data);
+	int	(*get_wflash_delay) (void *data);
+	int	(*power_on) (void *data, int gpio_rst, int gpio_pwr);
+	int	(*power_off) (void *data, int gpio_rst);
+	int	(*warm_reset) (void *data, int gpio_rst);
+};
+
+struct cpu_ops {
+	int	(*init) (void *data);
+	int	(*cleanup) (void *data);
+	int	(*get_mdm_state) (void *data);
+	int	(*get_irq_cdump) (void *data);
+	int	(*get_irq_rst) (void *data);
+	int	(*get_gpio_rst) (void *data);
+	int	(*get_gpio_pwr) (void *data);
+};
+
+struct pmic_ops {
+	int	(*init) (void *data);
+	int	(*cleanup) (void *data);
+	int	(*power_on_mdm) (void *data);
+	int	(*power_off_mdm) (void *data);
+	int	(*get_early_pwr_on) (void *data);
+	int	(*get_early_pwr_off) (void *data);
+};
+
+struct mcd_base_info {
+	/* modem infos */
+	int		mdm_ver;
+	struct	mdm_ops mdm;
+	void	*modem_data;
+
+	/* cpu infos */
+	int		cpu_ver;
+	struct	cpu_ops cpu;
+	void	*cpu_data;
+
+	/* pmic infos */
+	int		pmic_ver;
+	struct	pmic_ops pmic;
+	void	*pmic_data;
+
+	/* board type */
+	int		board_type;
+};
+
+struct sfi_to_mdm {
+	char modem_name[SFI_NAME_LEN + 1];
+	int modem_type;
+};
+
+/* GPIO names */
+#define GPIO_RST_OUT	"ifx_mdm_rst_out"
+#define GPIO_PWR_ON	"ifx_mdm_pwr_on"
+#define GPIO_RST_BBN	"ifx_mdm_rst_pmu"
+#define GPIO_CDUMP	"modem-gpio2"
+#define GPIO_CDUMP_MRFL	"MODEM_CORE_DUMP"
+
+/* Retrieve modem parameters on ACPI framework */
+int retrieve_modem_platform_data(struct platform_device *pdev);
+
+int mcd_register_mdm_info(struct mcd_base_info *info,
+			  struct platform_device *pdev);
+
+void mcd_set_mdm(struct mcd_base_info *info, int mdm_ver);
+int mcd_finalize_cpu_data(struct mcd_base_info *mcd_reg_info);
+
+/* struct mcd_cpu_data
+ * @gpio_rst_out: Reset out gpio (self reset indicator)
+ * @gpio_pwr_on: Power on gpio (ON1 - Power up pin)
+ * @gpio_rst_bbn: RST_BB_N gpio (Reset pin)
+ * @gpio_cdump: CORE DUMP indicator
+ * @irq_cdump: CORE DUMP irq
+ * @irq_reset: RST_BB_N irq
+ */
+struct mdm_ctrl_cpu_data {
+	int		entries[4];
+
+	/* GPIOs */
+	char	*gpio_rst_out_name;
+	int		gpio_rst_out;
+	char	*gpio_pwr_on_name;
+	int		gpio_pwr_on;
+	char	*gpio_rst_bbn_name;
+	int		gpio_rst_bbn;
+	char	*gpio_cdump_name;
+	int		 gpio_cdump;
+	char	*gpio_wwan_disable_name;
+	char	*gpio_wake_on_wwan_name;
+
+	/* NGFF specific */
+	int		gpio_wwan_disable;
+
+	/* IRQs */
+	int	irq_cdump;
+	int	irq_reset;
+	int	irq_wake_on_wwan;
+};
+
+/* struct mdm_ctrl_pmic_data
+ * @chipctrl: PMIC base address
+ * @chipctrlon: Modem power on PMIC value
+ * @chipctrloff: Modem power off PMIC value
+ * @early_pwr_on: call to power_on on probe indicator
+ * @early_pwr_off: call to power_off on probe indicator
+ * @pwr_down_duration:Powering down duration (us)
+ */
+struct mdm_ctrl_pmic_data {
+	int		chipctrl;
+	int		chipctrlon;
+	int		chipctrloff;
+	int		chipctrl_mask;
+	bool	early_pwr_on;
+	bool	early_pwr_off;
+	int		pwr_down_duration;
+};
+
+/* struct mdm_ctrl_device_info - Board and modem infos
+ *
+ * @pre_on_delay:Delay before pulse on ON1 (us)
+ * @on_duration:Pulse on ON1 duration (us)
+ * @pre_wflash_delay:Delay before flashing window, after warm_reset (ms)
+ * @pre_cflash_delay:Delay before flashing window, after cold_reset (ms)
+ * @flash_duration:Flashing window durtion (ms)int  Not used ?
+ * @warm_rst_duration:Warm reset duration (ms)
+ */
+struct mdm_ctrl_mdm_data {
+	int	pre_on_delay;
+	int	on_duration;
+	int	pre_wflash_delay;
+	int	pre_cflash_delay;
+	int	flash_duration;
+	int	warm_rst_duration;
+	int	pre_pwr_down_delay;
+};
+#endif				/* __MDM_CTRL_BOARD_H__ */
diff --git a/include/linux/mfd/intel_mid_pmic.h b/include/linux/mfd/intel_mid_pmic.h
new file mode 100644
index 0000000..5f563b3
--- /dev/null
+++ b/include/linux/mfd/intel_mid_pmic.h
@@ -0,0 +1,24 @@
+#ifndef __INTEL_MID_PMIC_H__
+#define __INTEL_MID_PMIC_H__
+
+#ifdef CONFIG_INTEL_MID_PMIC
+int intel_mid_pmic_readb(int reg);
+int intel_mid_pmic_writeb(int reg, u8 val);
+int intel_mid_pmic_setb(int reg, u8 mask);
+int intel_mid_pmic_clearb(int reg, u8 mask);
+int intel_mid_pmic_update(int reg, u8 val, u8 mask);
+int intel_mid_pmic_set_pdata(const char *name, void *data, int len);
+struct device *intel_mid_pmic_dev(void);
+#else
+static inline int intel_mid_pmic_readb(int reg) { return 0; }
+static inline int intel_mid_pmic_writeb(int reg, u8 val) { return 0; }
+static inline int intel_mid_pmic_setb(int reg, u8 mask) { return 0; }
+static inline int intel_mid_pmic_clearb(int reg, u8 mask) { return 0; }
+static inline int intel_mid_pmic_set_pdata(const char *name,
+				void *data, int len) {
+	return 0;
+}
+static inline struct device *intel_mid_pmic_dev(void) { return NULL; }
+#endif
+#endif
+
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
index 439a7a6..0b94093 100644
--- a/include/linux/mfd/intel_msic.h
+++ b/include/linux/mfd/intel_msic.h
@@ -12,6 +12,8 @@
 #ifndef __LINUX_MFD_INTEL_MSIC_H__
 #define __LINUX_MFD_INTEL_MSIC_H__
 
+#include <asm/intel_mid_gpadc.h>
+
 /* ID */
 #define INTEL_MSIC_ID0			0x000	/* RO */
 #define INTEL_MSIC_ID1			0x001	/* RO */
@@ -52,6 +54,15 @@
 #define INTEL_MSIC_PBCONFIG		0x03e
 #define INTEL_MSIC_PBSTATUS		0x03f	/* RO */
 
+/*
+ * MSIC interrupt tree is readable from SRAM at INTEL_MSIC_IRQ_PHYS_BASE.
+ * Since IRQ block starts from address 0x002 we need to substract that from
+ * the actual IRQ status register address.
+ */
+#define MSIC_IRQ_STATUS(x)      (INTEL_MSIC_IRQ_PHYS_BASE + ((x) - 2))
+#define MSIC_IRQ_STATUS_ACCDET  MSIC_IRQ_STATUS(INTEL_MSIC_ACCDET)
+#define MSIC_IRQ_STATUS_OCAUDIO MSIC_IRQ_STATUS(INTEL_MSIC_OCAUDIO)
+
 /* GPIO */
 #define INTEL_MSIC_GPIO0LV7CTLO		0x040
 #define INTEL_MSIC_GPIO0LV6CTLO		0x041
@@ -377,9 +388,41 @@
 /**
  * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
  * @gpio_base: base number for the GPIOs
+ * @ngpio_lv: number of low voltage GPIOs
+ * @ngpio_hv: number of high voltage GPIOs
+ * @gpio0_lv_ctlo: low voltage GPIO0 output control register
+ * @gpio0_lv_ctli: low voltage GPIO0 input control register
+ * @gpio0_hv_ctlo: high voltage GPIO0 output control register
+ * @gpio0_hv_ctli: high voltage GPIO0 input control register
+ * @can_sleep: flag for gpio chip
  */
 struct intel_msic_gpio_pdata {
 	unsigned	gpio_base;
+	int		ngpio_lv;
+	int		ngpio_hv;
+	u16		gpio0_lv_ctlo;
+	u16		gpio0_lv_ctli;
+	u16		gpio0_hv_ctlo;
+	u16		gpio0_hv_ctli;
+	int		can_sleep;
+};
+
+#define DISABLE_VCRIT	0x01
+#define DISABLE_VWARNB	0x02
+#define DISABLE_VWARNA	0x04
+/**
+ * struct intel_msic_vdd_pdata - platform data for the MSIC VDD driver
+ * @msi: MSI number used for VDD interrupts
+ *
+ * The MSIC CTP driver converts @msi into an IRQ number and passes it to
+ * the VDD driver as %IORESOURCE_IRQ.
+ */
+struct intel_msic_vdd_pdata {
+	unsigned	msi;
+	/* is set if device is ctp */
+	u8 is_clvp;
+	/* 1 = VCRIT, 2 = WARNB, 4 = WARNA */
+	u8 disable_unused_comparator;
 };
 
 /**
@@ -429,6 +472,7 @@
 	int				irq[INTEL_MSIC_BLOCK_LAST];
 	struct intel_msic_gpio_pdata	*gpio;
 	struct intel_msic_ocd_pdata	*ocd;
+	struct intel_mid_gpadc_platform_data	*gpadc;
 };
 
 struct intel_msic;
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 68e7765..9e96ca7 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -130,6 +130,12 @@
 	int rate;
 };
 
+struct wm8958_custom_config {
+	int format;
+	int rate;
+	int channels;
+};
+
 struct wm8994_pdata {
 	int gpio_base;
 
@@ -182,6 +188,16 @@
 	 */
 	int micdet_delay;
 
+	/* Delay between microphone detect completing and reporting on
+	 * insert (specified in ms)
+	 */
+	int mic_id_delay;
+
+	/* Keep MICBIAS2 high for micb_en_delay, during jack insertion
+	 * removal
+	 */
+	int micb_en_delay;
+
 	/* IRQ for microphone detection if brought out directly as a
 	 * signal.
 	 */
@@ -223,6 +239,9 @@
 	 * lines is mastered.
 	 */
 	int max_channels_clocked[WM8994_NUM_AIF];
+
+	/* custom config for overriding the hw params */
+	struct wm8958_custom_config *custom_cfg;
 };
 
 #endif
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 0535489..68a20a3 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -2017,6 +2017,7 @@
 #define WM8958_MICB2_DISCH_MASK                 0x0001  /* MICB2_DISCH */
 #define WM8958_MICB2_DISCH_SHIFT                     0  /* MICB2_DISCH */
 #define WM8958_MICB2_DISCH_WIDTH                     1  /* MICB2_DISCH */
+#define WM8958_MICB2_LVL_2P6V			   0x7	/* MICB2_LVL_VAL: 2.6V */
 
 /*
  * R210 (0xD2) - Mic Detect 3
@@ -2668,6 +2669,10 @@
 /*
  * R772 (0x304) - AIF1ADC LRCLK
  */
+#define WM8958_AIF1_LRCLK_INV                   0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK              0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT                 12  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH                  1  /* AIF1_LRCLK_INV */
 #define WM8994_AIF1ADC_LRCLK_DIR                0x0800  /* AIF1ADC_LRCLK_DIR */
 #define WM8994_AIF1ADC_LRCLK_DIR_MASK           0x0800  /* AIF1ADC_LRCLK_DIR */
 #define WM8994_AIF1ADC_LRCLK_DIR_SHIFT              11  /* AIF1ADC_LRCLK_DIR */
@@ -2679,6 +2684,10 @@
 /*
  * R773 (0x305) - AIF1DAC LRCLK
  */
+#define WM8958_AIF1_LRCLK_INV                   0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK              0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT                 12  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH                  1  /* AIF1_LRCLK_INV */
 #define WM8994_AIF1DAC_LRCLK_DIR                0x0800  /* AIF1DAC_LRCLK_DIR */
 #define WM8994_AIF1DAC_LRCLK_DIR_MASK           0x0800  /* AIF1DAC_LRCLK_DIR */
 #define WM8994_AIF1DAC_LRCLK_DIR_SHIFT              11  /* AIF1DAC_LRCLK_DIR */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 09c2300..cb35835 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -45,6 +45,7 @@
 #define MAPPER_CTRL_MINOR	236
 #define LOOP_CTRL_MINOR		237
 #define VHOST_NET_MINOR		238
+#define UHID_MINOR		239
 #define MISC_DYNAMIC_MINOR	255
 
 struct device;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6ba724b..6cba80e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1692,6 +1692,8 @@
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 			unsigned long pfn);
+int vm_insert_pfn_with_pgprot(struct vm_area_struct *vma, unsigned long addr,
+			      unsigned long pfn, pgprot_t pgprot);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 			unsigned long pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 875ba48..93ddd4e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -338,6 +338,7 @@
 	void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
 #endif
 	unsigned long mmap_base;		/* base of mmap area */
+	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
 	unsigned long task_size;		/* size of task vm space */
 	unsigned long cached_hole_size; 	/* if non-zero, the largest hole below free_area_cache */
 	unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 6a5c754..60560ce 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -67,6 +67,7 @@
 #define MMC_HIGH_52_MAX_DTR	52000000
 #define MMC_HIGH_DDR_MAX_DTR	52000000
 #define MMC_HS200_MAX_DTR	200000000
+#define MMC_HS400_MAX_DTR	200000000
 	unsigned int		sectors;
 	unsigned int		card_type;
 	unsigned int		hc_erase_size;		/* In sectors */
@@ -83,11 +84,13 @@
 	unsigned int		hpi_cmd;		/* cmd used as HPI */
 	bool			bkops;		/* background support bit */
 	bool			bkops_en;	/* background enable bit */
+	unsigned int		rpmb_size;		/* Units: half sector */
 	unsigned int            data_sector_size;       /* 512 bytes or 4KB */
 	unsigned int            data_tag_unit_size;     /* DATA TAG UNIT size */
 	unsigned int		boot_ro_lock;		/* ro lock support */
 	bool			boot_ro_lockable;
 	u8			raw_exception_status;	/* 54 */
+	u8			part_set_complete;	/* 155 */
 	u8			raw_partition_support;	/* 160 */
 	u8			raw_rpmb_size_mult;	/* 168 */
 	u8			raw_erased_mem_count;	/* 181 */
@@ -107,6 +110,8 @@
 
 	unsigned int            feature_support;
 #define MMC_DISCARD_FEATURE	BIT(0)                  /* CMD38 feature */
+	unsigned int            gpp_sz[4];
+	unsigned int            wpg_sz;
 };
 
 struct sd_scr {
@@ -208,7 +213,7 @@
  */
 #define MMC_NUM_BOOT_PARTITION	2
 #define MMC_NUM_GP_PARTITION	4
-#define MMC_NUM_PHY_PARTITION	6
+#define MMC_NUM_PHY_PARTITION	7
 #define MAX_MMC_PART_NAME_LEN	20
 
 /*
@@ -248,7 +253,9 @@
 #define MMC_CARD_SDXC		(1<<6)		/* card is SDXC */
 #define MMC_CARD_REMOVED	(1<<7)		/* card has been removed */
 #define MMC_STATE_HIGHSPEED_200	(1<<8)		/* card is in HS200 mode */
+#define MMC_STATE_HIGHSPEED_400	(1<<9)		/* card is in HS400 mode */
 #define MMC_STATE_DOING_BKOPS	(1<<10)		/* card is doing BKOPS */
+#define MMC_STATE_NO_DDR50	(1<<11)		/* card cannot do DDR50 */
 	unsigned int		quirks; 	/* card quirks */
 #define MMC_QUIRK_LENIENT_FN0	(1<<0)		/* allow SDIO FN0 writes outside of the VS CCCR range */
 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1)	/* use func->cur_blksize */
@@ -264,6 +271,7 @@
 #define MMC_QUIRK_LONG_READ_TIME (1<<9)		/* Data read time > CSD says */
 #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10)	/* Skip secure for erase/trim */
 						/* byte mode */
+#define MMC_QUIRK_NON_STD_CIS (1<<11)
 
 	unsigned int		erase_size;	/* erase size in sectors */
  	unsigned int		erase_shift;	/* if erase unit is power 2 */
@@ -294,6 +302,9 @@
 	struct dentry		*debugfs_root;
 	struct mmc_part	part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
 	unsigned int    nr_parts;
+
+	unsigned int		rpmb_max_req;
+	unsigned int            last_max_dtr;
 };
 
 /*
@@ -409,6 +420,7 @@
 #define mmc_card_readonly(c)	((c)->state & MMC_STATE_READONLY)
 #define mmc_card_highspeed(c)	((c)->state & MMC_STATE_HIGHSPEED)
 #define mmc_card_hs200(c)	((c)->state & MMC_STATE_HIGHSPEED_200)
+#define mmc_card_hs400(c)	((c)->state & MMC_STATE_HIGHSPEED_400)
 #define mmc_card_blockaddr(c)	((c)->state & MMC_STATE_BLOCKADDR)
 #define mmc_card_ddr_mode(c)	((c)->state & MMC_STATE_HIGHSPEED_DDR)
 #define mmc_card_uhs(c)		((c)->state & MMC_STATE_ULTRAHIGHSPEED)
@@ -416,11 +428,13 @@
 #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
 #define mmc_card_removed(c)	((c) && ((c)->state & MMC_CARD_REMOVED))
 #define mmc_card_doing_bkops(c)	((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_noddr50(c)	((c)->state & MMC_STATE_NO_DDR50)
 
 #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
 #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
 #define mmc_card_set_hs200(c)	((c)->state |= MMC_STATE_HIGHSPEED_200)
+#define mmc_card_set_hs400(c)	((c)->state |= MMC_STATE_HIGHSPEED_400)
 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
 #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
@@ -429,6 +443,7 @@
 #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
 #define mmc_card_set_doing_bkops(c)	((c)->state |= MMC_STATE_DOING_BKOPS)
 #define mmc_card_clr_doing_bkops(c)	((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_noddr50(c)	((c)->state |= MMC_STATE_NO_DDR50)
 
 /*
  * Quirk add/remove for MMC products.
@@ -521,4 +536,8 @@
 extern void mmc_fixup_device(struct mmc_card *card,
 			     const struct mmc_fixup *table);
 
+extern int mmc_rpmb_req_handle(struct device *emmc,
+		struct mmc_ioc_rpmb_req *req);
+
+extern void dis_cache_mmc(struct mmc_card *card, int data);
 #endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 39613b9..2e3fc43 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -10,6 +10,8 @@
 
 #include <linux/interrupt.h>
 #include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mmc/ioctl.h>
 
 struct request;
 struct mmc_data;
@@ -121,6 +123,7 @@
 	unsigned int		sg_len;		/* size of scatter list */
 	struct scatterlist	*sg;		/* I/O scatter list */
 	s32			host_cookie;	/* host private data */
+	dma_addr_t		dmabuf;		/* used in panic mode */
 };
 
 struct mmc_host;
@@ -135,6 +138,34 @@
 	struct mmc_host		*host;
 };
 
+/*
+ * RPMB frame structure for MMC core stack
+ */
+struct mmc_core_rpmb_req {
+	struct mmc_ioc_rpmb_req *req;
+	__u8 *frame;
+	bool ready;
+};
+
+#define RPMB_PROGRAM_KEY       1       /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 2       /* Read RPMB write counter */
+#define RPMB_WRITE_DATA		3	/* Write data to RPMB partition */
+#define RPMB_READ_DATA         4       /* Read data from RPMB partition */
+#define RPMB_RESULT_READ       5       /* Read result request */
+#define RPMB_REQ               1       /* RPMB request mark */
+#define RPMB_RESP              (1 << 1)/* RPMB response mark */
+#define RPMB_AVALIABLE_SECTORS 8       /* 4K page size */
+
+#define RPMB_TYPE_BEG          510
+#define RPMB_RES_BEG           508
+#define RPMB_BLKS_BEG          506
+#define RPMB_ADDR_BEG          504
+#define RPMB_WCOUNTER_BEG      500
+
+#define RPMB_NONCE_BEG         484
+#define RPMB_DATA_BEG          228
+#define RPMB_MAC_BEG           196
+
 struct mmc_card;
 struct mmc_async_req;
 
@@ -152,6 +183,13 @@
 extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
 extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
 extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
+extern int mmc_rpmb_partition_ops(struct mmc_core_rpmb_req *,
+		struct mmc_card *);
+extern int mmc_rpmb_pre_frame(struct mmc_core_rpmb_req *, struct mmc_card *);
+extern void mmc_rpmb_post_frame(struct mmc_core_rpmb_req *);
+
+extern int mmc_set_user_wp(struct mmc_card *, unsigned int, unsigned int);
+extern int mmc_wp_status(struct mmc_card *, unsigned int, unsigned int, u8 *);
 
 #define MMC_ERASE_ARG		0x00000000
 #define MMC_SECURE_ERASE_ARG	0x80000000
@@ -205,4 +243,6 @@
 
 extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
 
+extern int mmc_busy_wait(struct mmc_host *host);
+
 #endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 88ed02b..83d20c0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -16,6 +16,7 @@
 #include <linux/device.h>
 #include <linux/fault-inject.h>
 #include <linux/wakelock.h>
+#include <linux/pm_qos.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/pm.h>
@@ -60,6 +61,7 @@
 #define MMC_TIMING_UHS_SDR104	6
 #define MMC_TIMING_UHS_DDR50	7
 #define MMC_TIMING_MMC_HS200	8
+#define MMC_TIMING_MMC_HS400	9
 
 #define MMC_SDR_MODE		0
 #define MMC_1_2V_DDR_MODE	1
@@ -81,6 +83,19 @@
 #define MMC_SET_DRIVER_TYPE_D	3
 };
 
+struct mmc_panic_host;
+
+struct mmc_host_panic_ops {
+	void    (*request)(struct mmc_panic_host *, struct mmc_request *);
+	void	(*prepare)(struct mmc_panic_host *);
+	int     (*setup)(struct mmc_panic_host *);
+	void    (*set_ios)(struct mmc_panic_host *);
+	void    (*dumpregs)(struct mmc_panic_host *);
+	int	(*power_on)(struct mmc_panic_host *);
+	int	(*hold_mutex)(struct mmc_panic_host *);
+	void	(*release_mutex)(struct mmc_panic_host *);
+};
+
 struct mmc_host_ops {
 	/*
 	 * 'enable' is called when the host is claimed and 'disable' is called
@@ -140,6 +155,9 @@
 	int	(*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
 	void	(*hw_reset)(struct mmc_host *host);
 	void	(*card_event)(struct mmc_host *host);
+	void	(*set_dev_power)(struct mmc_host *, bool);
+	/* Prevent host controller from Auto Clock Gating by busy reading */
+	void	(*busy_wait)(struct mmc_host *mmc, u32 delay);
 };
 
 struct mmc_card;
@@ -196,6 +214,28 @@
 	struct regulator *vqmmc;	/* Optional Vccq supply */
 };
 
+struct mmc_panic_host {
+	/*
+	 * DMA buffer for the log
+	 */
+	dma_addr_t      dmabuf;
+	void            *logbuf;
+	const struct mmc_host_panic_ops *panic_ops;
+	unsigned int            panic_ready;
+	unsigned int            totalsecs;
+	unsigned int            max_blk_size;
+	unsigned int            max_blk_count;
+	unsigned int            max_req_size;
+	unsigned int            blkaddr;
+	unsigned int            caps;
+	unsigned int		caps2;
+	u32                     ocr;            /* the current OCR setting */
+	struct mmc_ios          ios;            /* current io bus settings */
+	struct mmc_card         *card;
+	struct mmc_host         *mmc;
+	void                    *priv;
+};
+
 struct mmc_host {
 	struct device		*parent;
 	struct device		class_dev;
@@ -282,6 +322,17 @@
 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
 				 MMC_CAP2_PACKED_WR)
 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14)	/* Don't power up before scan */
+#define MMC_CAP2_INIT_CARD_SYNC	(1 << 15)	/* init card in sync mode */
+#define MMC_CAP2_POLL_R1B_BUSY	(1 << 16)	/* host poll R1B busy*/
+#define MMC_CAP2_RPMBPART_NOACC	(1 << 17)	/* RPMB partition no access */
+#define MMC_CAP2_LED_SUPPORT	(1 << 18)	/* led support */
+#define MMC_CAP2_PWCTRL_POWER	(1 << 19)	/* power control card power */
+#define MMC_CAP2_FIXED_NCRC	(1 << 20)	/* fixed NCRC */
+#define MMC_CAP2_HS200_DIS	(1 << 21)	/* HS200 can be disabled */
+#define MMC_CAP2_HS400_1_8V_DDR	(1 << 22)	/* support HS400 */
+#define MMC_CAP2_HS400_1_2V_DDR	(1 << 23)	/* support HS400 */
+#define MMC_CAP2_HS400		(MMC_CAP2_HS400_1_8V_DDR | \
+				MMC_CAP2_HS400_1_2V_DDR)
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
@@ -376,10 +427,17 @@
 		int				num_funcs;
 	} embedded_sdio_data;
 #endif
-
+	struct mmc_panic_host *phost;
+	struct pm_qos_request *qos;
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
+#define SECTOR_SIZE    512
+int mmc_emergency_init(void);
+int mmc_emergency_write(char *, unsigned int);
+void mmc_alloc_panic_host(struct mmc_host *, const struct mmc_host_panic_ops *);
+void mmc_emergency_setup(struct mmc_host *host);
+
 struct mmc_host *mmc_alloc_host(int extra, struct device *);
 int mmc_add_host(struct mmc_host *);
 void mmc_remove_host(struct mmc_host *);
@@ -426,6 +484,8 @@
 void mmc_detect_change(struct mmc_host *, unsigned long delay);
 void mmc_request_done(struct mmc_host *, struct mmc_request *);
 
+int mmc_cache_ctrl(struct mmc_host *, u8);
+
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
 	host->ops->enable_sdio_irq(host, 0);
@@ -492,6 +552,11 @@
 	return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
 }
 
+static inline int mmc_rpmb_partition_access(struct mmc_host *host)
+{
+	return !(host->caps2 & MMC_CAP2_RPMBPART_NOACC);
+}
+
 static inline int mmc_host_uhs(struct mmc_host *host)
 {
 	return host->caps &
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 50bcde3..69bbffd 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -67,6 +67,7 @@
 #define MMC_SET_WRITE_PROT       28   /* ac   [31:0] data addr   R1b */
 #define MMC_CLR_WRITE_PROT       29   /* ac   [31:0] data addr   R1b */
 #define MMC_SEND_WRITE_PROT      30   /* adtc [31:0] wpdata addr R1  */
+#define MMC_SEND_WRITE_PROT_TYPE 31   /* adtc [31:0] wpdata addr R1  */
 
   /* class 5 */
 #define MMC_ERASE_GROUP_START    35   /* ac   [31:0] data addr   R1  */
@@ -281,6 +282,7 @@
 #define EXT_CSD_EXP_EVENTS_CTRL		56	/* R/W, 2 bytes */
 #define EXT_CSD_DATA_SECTOR_SIZE	61	/* R */
 #define EXT_CSD_GP_SIZE_MULT		143	/* R/W */
+#define EXT_CSD_PART_SET_COMPLETE	155	/* R/W */
 #define EXT_CSD_PARTITION_ATTRIBUTE	156	/* R/W */
 #define EXT_CSD_PARTITION_SUPPORT	160	/* RO */
 #define EXT_CSD_HPI_MGMT		161	/* R/W */
@@ -290,6 +292,7 @@
 #define EXT_CSD_SANITIZE_START		165     /* W */
 #define EXT_CSD_WR_REL_PARAM		166	/* RO */
 #define EXT_CSD_RPMB_MULT		168	/* RO */
+#define EXT_CSD_USER_WP			171	/* R/W */
 #define EXT_CSD_BOOT_WP			173	/* R/W */
 #define EXT_CSD_ERASE_GROUP_DEF		175	/* R/W */
 #define EXT_CSD_PART_CONFIG		179	/* R/W */
@@ -325,6 +328,7 @@
 #define EXT_CSD_POWER_OFF_LONG_TIME	247	/* RO */
 #define EXT_CSD_GENERIC_CMD6_TIME	248	/* RO */
 #define EXT_CSD_CACHE_SIZE		249	/* RO, 4 bytes */
+#define EXT_CSD_PWR_CL_200_DDR_195	253	/* RO, support HS400 */
 #define EXT_CSD_TAG_UNIT_SIZE		498	/* RO */
 #define EXT_CSD_DATA_TAG_SUPPORT	499	/* RO */
 #define EXT_CSD_MAX_PACKED_WRITES	500	/* RO */
@@ -344,9 +348,11 @@
 #define EXT_CSD_BOOT_WP_B_PWR_WP_EN	(0x01)
 
 #define EXT_CSD_PART_CONFIG_ACC_MASK	(0x7)
+#define EXT_CSD_PART_CONFIG_ACC_USER	(0x0)
 #define EXT_CSD_PART_CONFIG_ACC_BOOT0	(0x1)
 #define EXT_CSD_PART_CONFIG_ACC_RPMB	(0x3)
 #define EXT_CSD_PART_CONFIG_ACC_GP0	(0x4)
+#define EXT_CSD_GPP_NUM			(0x4)
 
 #define EXT_CSD_PART_SUPPORT_PART_EN	(0x1)
 
@@ -357,6 +363,7 @@
 #define EXT_CSD_CARD_TYPE_26	(1<<0)	/* Card can run at 26MHz */
 #define EXT_CSD_CARD_TYPE_52	(1<<1)	/* Card can run at 52MHz */
 #define EXT_CSD_CARD_TYPE_MASK	0x3F	/* Mask out reserved bits */
+#define EXT_CSD_CARD_TYPE_MASK_FULL	0xFF	/* Support HS400 */
 #define EXT_CSD_CARD_TYPE_DDR_1_8V  (1<<2)   /* Card can run at 52MHz */
 					     /* DDR mode @1.8V or 3V I/O */
 #define EXT_CSD_CARD_TYPE_DDR_1_2V  (1<<3)   /* Card can run at 52MHz */
@@ -366,6 +373,9 @@
 #define EXT_CSD_CARD_TYPE_SDR_1_8V	(1<<4)	/* Card can run at 200MHz */
 #define EXT_CSD_CARD_TYPE_SDR_1_2V	(1<<5)	/* Card can run at 200MHz */
 						/* SDR mode @1.2V I/O */
+#define EXT_CSD_CARD_TYPE_HS400_1_8V	(1<<6)	/* Card can run at 200MHz */
+#define EXT_CSD_CARD_TYPE_HS400_1_2V	(1<<7)	/* Card can run at 200MHz */
+						/* DDR mode @1.8/1.2v I/O */
 
 #define EXT_CSD_BUS_WIDTH_1	0	/* Card is in 1 bit mode */
 #define EXT_CSD_BUS_WIDTH_4	1	/* Card is in 4 bit mode */
@@ -400,6 +410,7 @@
 #define EXT_CSD_DYNCAP_NEEDED		BIT(1)
 #define EXT_CSD_SYSPOOL_EXHAUSTED	BIT(2)
 #define EXT_CSD_PACKED_FAILURE		BIT(3)
+#define EXT_CSD_PERMANENT_WP		BIT(2)
 
 #define EXT_CSD_PACKED_GENERIC_ERROR	BIT(0)
 #define EXT_CSD_PACKED_INDEXED_ERROR	BIT(1)
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 8959604..20c36f9 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -5,13 +5,32 @@
 
 struct sdhci_pci_data {
 	struct pci_dev	*pdev;
+	void __iomem *flis_addr; /* eMMC0 FLIS registers start address */
 	int		slotno;
 	int		rst_n_gpio; /* Set to -EINVAL if unused */
 	int		cd_gpio;    /* Set to -EINVAL if unused */
+	int		quirks;
+	int		quirks2;
+	int		platform_quirks; /* Platform related quirks */
 	int		(*setup)(struct sdhci_pci_data *data);
 	void		(*cleanup)(struct sdhci_pci_data *data);
+	int		(*power_up)(void *data);
+	void		(*register_embedded_control)(void *dev_id,
+			   void (*virtual_cd)(void *dev_id, int card_present));
+	int		(*flis_check)(void *data, unsigned int host_clk,
+				unsigned int clk);
+	int		(*flis_dump)(void *data);
 };
 
+/* Some Pre-Silicon platform not support all SDHCI HCs of the SoC */
+#define PLFM_QUIRK_NO_HOST_CTRL_HW	(1<<0)
+/* Some Pre-Silicon platform do not support eMMC boot partition access */
+#define PLFM_QUIRK_NO_EMMC_BOOT_PART	(1<<1)
+/* Some Pre-Silicon platform do not support eMMC or SD High Speed */
+#define PLFM_QUIRK_NO_HIGH_SPEED	(1<<2)
+/* For the platform which don't have the SD card slot */
+#define PLFM_QUIRK_NO_SDCARD_SLOT	(1<<3)
+
 extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
 				int slotno);
 
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index b838ffc..ffb9dbe 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -18,6 +18,7 @@
 #include <linux/mmc/host.h>
 
 struct sdhci_host {
+	void __iomem *gpiobase;
 	/* Data set by hardware interface driver */
 	const char *hw_name;	/* Hardware bus name */
 
@@ -96,9 +97,56 @@
 #define SDHCI_QUIRK2_NO_1_8_V				(1<<2)
 #define SDHCI_QUIRK2_PRESET_VALUE_BROKEN		(1<<3)
 
+/* Intel private quirk2 starts on 15 */
+
+/* V2.0 host controller support DDR50 */
+#define SDHCI_QUIRK2_V2_0_SUPPORT_DDR50			(1<<15)
+/* Controller has bug when enabling Auto CMD23 */
+#define SDHCI_QUIRK2_BROKEN_AUTO_CMD23			(1<<16)
+/* HC Reg High Speed must be set later than HC2 Reg 1.8v Signaling Enable */
+#define SDHCI_QUIRK2_HIGH_SPEED_SET_LATE		(1<<17)
+/* SDR104 broken */
+#define SDHCI_QUIRK2_SDR104_BROKEN			(1<<18)
+/* to allow mmc_detect to detach the bus */
+#define SDHCI_QUIRK2_DISABLE_MMC_CAP_NONREMOVABLE	(1<<19)
+/* avoid detect/rescan/poweoff operations on suspend/resume. */
+#define SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY	(1<<20)
+/* Disable eMMC/SD card High speed feature. */
+#define SDHCI_QUIRK2_DISABLE_HIGH_SPEED			(1<<21)
+/* Fake VDD for device */
+#define SDHCI_QUIRK2_FAKE_VDD				(1<<22)
+#define SDHCI_QUIRK2_CARD_CD_DELAY			(1<<24)
+#define SDHCI_QUIRK2_WAIT_FOR_IDLE			(1<<25)
+/* BAD sd cd in HOST IC. This will cause system hang when removing SD */
+#define SDHCI_QUIRK2_BAD_SD_CD				(1<<26)
+#define SDHCI_QUIRK2_POWER_PIN_GPIO_MODE		(1<<27)
+#define SDHCI_QUIRK2_NON_STD_CIS   (1<<29)
+#define SDHCI_QUIRK2_TUNING_POLL			(1<<30)
+
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 
+	/*
+	 * XXX: SCU/X86 mutex variables base address in shared SRAM
+	 * NOTE: Max size of this struct is 16 bytes
+	 * without shared SRAM re-organization.
+	 */
+	void __iomem *sram_addr;        /* Shared SRAM address */
+
+	void __iomem *rte_addr;	/* IOAPIC RTE register address */
+
+#define DEKKER_EMMC_OWNER_OFFSET        0
+#define DEKKER_IA_REQ_OFFSET            0x04
+#define DEKKER_SCU_REQ_OFFSET           0x08
+/* 0xc offset: state of the emmc chip to SCU. */
+#define DEKKER_EMMC_STATE               0x0c
+#define DEKKER_OWNER_IA                 0
+#define DEKKER_OWNER_SCU                1
+#define DEKKER_EMMC_CHIP_ACTIVE         0
+#define DEKKER_EMMC_CHIP_SUSPENDED      1
+
+	unsigned int	usage_cnt;	/* eMMC mutex usage count */
+
 	const struct sdhci_ops *ops;	/* Low level hw interface */
 
 	struct regulator *vmmc;		/* Power regulator (vmmc) */
@@ -114,6 +162,7 @@
 #endif
 
 	spinlock_t lock;	/* Mutex */
+	spinlock_t dekker_lock; /* eMMC Dekker Mutex lock */
 
 	int flags;		/* Host attributes */
 #define SDHCI_USE_SDMA		(1<<0)	/* Host is SDMA capable */
@@ -128,6 +177,8 @@
 #define SDHCI_SDIO_IRQ_ENABLED	(1<<9)	/* SDIO irq enabled */
 #define SDHCI_HS200_NEEDS_TUNING (1<<10)	/* HS200 needs tuning */
 #define SDHCI_USING_RETUNING_TIMER (1<<11)	/* Host is using a retuning timer for the card */
+#define SDHCI_POWER_CTRL_DEV	(1<<12) /* ctrl dev power */
+#define SDHCI_EXIT_RPM_RESUME (1<<13)	/* Exit from runtime PM resume */
 
 	unsigned int version;	/* SDHCI spec. version */
 
@@ -139,11 +190,13 @@
 	u8 pwr;			/* Current voltage */
 
 	bool runtime_suspended;	/* Host is runtime suspended */
+	bool suspended;		/* Host is suspended */
 
 	struct mmc_request *mrq;	/* Current request */
 	struct mmc_command *cmd;	/* Current command */
 	struct mmc_data *data;	/* Current data request */
 	unsigned int data_early:1;	/* Data finished before cmd */
+	unsigned int r1b_busy_end:1;	/* R1B busy end */
 
 	struct sg_mapping_iter sg_miter;	/* SG state for PIO */
 	unsigned int blocks;	/* remaining PIO blocks */
@@ -176,6 +229,9 @@
 #define SDHCI_TUNING_MODE_1	0
 	struct timer_list	tuning_timer;	/* Timer for tuning */
 
+	unsigned int            gpio_pwr_en;
+	unsigned int            gpio_1p8_en;
+
 	unsigned long private[0] ____cacheline_aligned;
 };
 #endif /* LINUX_MMC_SDHCI_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 9f03fee..b3933db 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -23,6 +23,8 @@
 /*
  * Vendors and devices.  Sort key: vendor first, device next.
  */
+#define IWL_SDIO_DEVICE_ID_WKP1	0x3160
+#define IWL_SDIO_DEVICE_ID_WKP2	0x7260
 #define SDIO_VENDOR_ID_INTEL			0x0089
 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX	0x1402
 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI	0x1403
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e2c55f2..b923c20 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -456,7 +456,8 @@
 };
 
 struct dmi_strmatch {
-	unsigned char slot;
+	unsigned char slot:7;
+	unsigned char exact_match:1;
 	char substr[79];
 };
 
@@ -474,7 +475,8 @@
  */
 #define dmi_device_id dmi_system_id
 
-#define DMI_MATCH(a, b)	{ a, b }
+#define DMI_MATCH(a, b)	{ .slot = a, .substr = b }
+#define DMI_EXACT_MATCH(a, b)	{ .slot = a, .substr = b, .exact_match = 1 }
 
 #define PLATFORM_NAME_SIZE	20
 #define PLATFORM_MODULE_PREFIX	"platform:"
@@ -586,4 +588,23 @@
 	kernel_ulong_t driver_info;
 };
 
+/* RapidIO */
+
+#define RIO_ANY_ID	0xffff
+
+/**
+ * struct rio_device_id - RIO device identifier
+ * @did: RapidIO device ID
+ * @vid: RapidIO vendor ID
+ * @asm_did: RapidIO assembly device ID
+ * @asm_vid: RapidIO assembly vendor ID
+ *
+ * Identifies a RapidIO device based on both the device/vendor IDs and
+ * the assembly device/vendor IDs.
+ */
+struct rio_device_id {
+	__u16 did, vid;
+	__u16 asm_did, asm_vid;
+};
+
 #endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 433da8a..589b014 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -10,6 +10,7 @@
 #ifndef __LINUX_MUTEX_H
 #define __LINUX_MUTEX_H
 
+#include <asm/current.h>
 #include <linux/list.h>
 #include <linux/spinlock_types.h>
 #include <linux/linkage.h>
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 4871170..ae4981e 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -41,6 +41,7 @@
 	u64 bytesize;
 	pid_t pid; /* pid of nbd-client, if attached */
 	int xmit_timeout;
+	int disconnect; /* a disconnect has been requested by user */
 };
 
 #endif
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
new file mode 100644
index 0000000..08e1fbe
--- /dev/null
+++ b/include/linux/nfc/pn544.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010 Trusted Logic S.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#define PN544_MAGIC	0xE9
+
+/*
+ * PN544 power control via ioctl
+ * PN544_SET_PWR(0): power off
+ * PN544_SET_PWR(1): power on
+ * PN544_SET_PWR(2): reset and power on with firmware download enabled
+ */
+#define PN544_SET_PWR	_IOW(PN544_MAGIC, 0x01, unsigned int)
+
+struct pn544_i2c_platform_data {
+	int (*request_resources) (struct i2c_client *client);
+	unsigned int irq_gpio;
+	unsigned int ven_gpio;
+	unsigned int firm_gpio;
+	unsigned int max_i2c_xfer_size;
+};
diff --git a/include/linux/panel_psb_drv.h b/include/linux/panel_psb_drv.h
new file mode 100644
index 0000000..5d3837e
--- /dev/null
+++ b/include/linux/panel_psb_drv.h
@@ -0,0 +1,58 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PANEL_PSB_DRV_H_
+#define _PANEL_PSB_DRV_H_
+
+#include <linux/kernel.h>
+#include <linux/sfi.h>
+
+extern int PanelID;
+
+enum panel_type {
+	TPO_CMD,
+	TPO_VID,
+	TMD_CMD,
+	TMD_VID,
+	TMD_6X10_VID,
+	PYR_CMD,
+	PYR_VID,
+	TPO,
+	TMD,
+	PYR,
+	HDMI,
+	JDI_7x12_VID,
+	JDI_7x12_CMD,
+	CMI_7x12_VID,
+	CMI_7x12_CMD,
+	JDI_10x19_VID,
+	JDI_10x19_CMD,
+	JDI_25x16_VID,
+	JDI_25x16_CMD,
+	SHARP_10x19_VID,
+	SHARP_10x19_CMD,
+	SHARP_10x19_DUAL_CMD,
+	SHARP_25x16_VID,
+	SHARP_25x16_CMD,
+	SDC_16x25_CMD,
+	SDC_25x16_CMD,
+	GCT_DETECT
+};
+
+#endif
diff --git a/include/linux/panic_gbuffer.h b/include/linux/panic_gbuffer.h
new file mode 100644
index 0000000..c0580e5
--- /dev/null
+++ b/include/linux/panic_gbuffer.h
@@ -0,0 +1,37 @@
+/*
+ * panic_gbuffer.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ *
+ * Expose a generic buffer header to be passed to the panic handler in
+ * order to dump buffer content in case of kernel panic.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _LINUX_PANIC_GBUFFER_H
+#define _LINUX_PANIC_GBUFFER_H
+
+struct g_buffer_header {
+	unsigned char *base;
+	size_t size;
+	size_t woff;
+	size_t head;
+};
+
+void panic_set_gbuffer(struct g_buffer_header *gbuffer);
+
+#endif /* _LINUX_PANIC_GBUFFER_H */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 1704479..bdf737c 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -45,6 +45,12 @@
 void acpi_pci_add_bus(struct pci_bus *bus);
 void acpi_pci_remove_bus(struct pci_bus *bus);
 
+#ifdef CONFIG_INTEL_SOC_PMC
+extern bool acpi_pci_quirk_power_manageable(struct pci_dev *dev);
+extern pci_power_t acpi_pci_quirk_choose_state(struct pci_dev *pdev);
+extern int acpi_pci_quirk_set_state(struct pci_dev *dev, pci_power_t state);
+#endif
+
 #ifdef	CONFIG_ACPI_PCI_SLOT
 void acpi_pci_slot_init(void);
 void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c129162..06fb7d0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,8 @@
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC	0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK	0x1304
 #define PCI_DEVICE_ID_AMD_15H_M10H_F3	0x1403
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
 #define PCI_DEVICE_ID_AMD_15H_NB_F0	0x1600
 #define PCI_DEVICE_ID_AMD_15H_NB_F1	0x1601
 #define PCI_DEVICE_ID_AMD_15H_NB_F2	0x1602
@@ -2544,8 +2546,14 @@
 #define PCI_DEVICE_ID_INTEL_MFD_SDIO2	0x0822
 #define PCI_DEVICE_ID_INTEL_MFD_EMMC0	0x0823
 #define PCI_DEVICE_ID_INTEL_MFD_EMMC1	0x0824
+#define PCI_DEVICE_ID_INTEL_MFD_OTG	0x0829
 #define PCI_DEVICE_ID_INTEL_MRST_SD2	0x084F
 #define PCI_DEVICE_ID_INTEL_I960	0x0960
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
+#define PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG	0x119E
+#define PCI_DEVICE_ID_INTEL_MOOR_EMMC	0x1490
+#define PCI_DEVICE_ID_INTEL_MOOR_SD	0x1491
+#define PCI_DEVICE_ID_INTEL_MOOR_SDIO	0x1492
 #define PCI_DEVICE_ID_INTEL_I960RM	0x0962
 #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB	0x0c60
 #define PCI_DEVICE_ID_INTEL_8257X_SOL	0x1062
@@ -2857,6 +2865,13 @@
 #define PCI_DEVICE_ID_INTEL_IXP4XX	0x8500
 #define PCI_DEVICE_ID_INTEL_IXP2800	0x9004
 #define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+#define PCI_DEVICE_ID_INTEL_SST_MRFLD   0x119A
+#define PCI_DEVICE_ID_INTEL_SST_MOOR    0x1495
+#define PCI_DEVICE_ID_INTEL_AUDIO_DMAC0_MOOR	0x1496
+#define PCI_DEVICE_ID_INTEL_GP_DMAC2_MOOR	0x1497
+#define PCI_DEVICE_ID_INTEL_VIBRA_MRFLD 0x11a5
+#define PCI_DEVICE_ID_INTEL_VIBRA_MOOR 0x1498
+
 
 #define PCI_VENDOR_ID_SCALEMP		0x8686
 #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL	0x1010
diff --git a/include/linux/platform_data/intel_mid_remoteproc.h b/include/linux/platform_data/intel_mid_remoteproc.h
new file mode 100644
index 0000000..ad0cd9a
--- /dev/null
+++ b/include/linux/platform_data/intel_mid_remoteproc.h
@@ -0,0 +1,121 @@
+/*
+ * INTEL MID Remote Processor Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ASM_INTEL_MID_REMOTEPROC_H
+#define _ASM_INTEL_MID_REMOTEPROC_H
+
+#define RP_IPC_COMMAND		0xA0
+#define RP_IPC_SIMPLE_COMMAND	0xA1
+#define RP_IPC_RAW_COMMAND	0xA2
+
+#define	RP_PMIC_ACCESS		0xFF
+#define	RP_DFU_REQUEST		0xFE
+#define	RP_SET_WATCHDOG		0xF8
+#define	RP_FLIS_ACCESS		0xF5
+#define	RP_GET_FW_REVISION	0xF4
+#define	RP_COLD_BOOT		0xF3
+#define	RP_COLD_RESET		0xF1
+#define	RP_COLD_OFF		0x80
+#define	RP_MIP_ACCESS		0xEC
+#define RP_GET_HOBADDR		0xE5
+#define RP_OSC_CLK_CTRL		0xE6
+#define RP_S0IX_COUNTER		0xE8
+#define RP_WRITE_OSNIB		0xE4
+#define RP_CLEAR_FABERROR	0xE3
+#define RP_SCULOG_CTRL		0xE1
+#define RP_FW_UPDATE		0xFE
+#define RP_VRTC			0xFA
+#define RP_PMDB			0xE0
+#define RP_SCULOG_TRACE		0x90
+#define RP_WRITE_OEMNIB		0xDF	/* Command is used to write OEMNIB */
+					/* data. Used with extended OSHOB  */
+					/* OSNIB only.                     */
+/*
+ * Assigning some temp ids for following devices
+ * TODO: Need to change it to some meaningful
+ *       values.
+ */
+#define RP_PMIC_GPIO		0X02
+#define RP_PMIC_AUDIO		0x03
+#define RP_MSIC_GPIO		0x05
+#define RP_MSIC_AUDIO		0x06
+#define RP_MSIC_OCD		0x07
+#define RP_MSIC_BATTERY		0XEF
+#define RP_MSIC_THERMAL		0x09
+#define RP_MSIC_POWER_BTN	0x10
+#define RP_IPC			0X11
+#define RP_IPC_UTIL		0X12
+#define RP_FW_ACCESS		0X13
+#define RP_UMIP_ACCESS		0x14
+#define RP_OSIP_ACCESS		0x15
+#define RP_MSIC_ADC		0x16
+#define RP_BQ24192		0x17
+#define RP_MSIC_CLV_AUDIO	0x18
+#define RP_PMIC_CCSM		0x19
+#define RP_PMIC_I2C		0x20
+#define RP_MSIC_MRFLD_AUDIO	0x21
+#define RP_MSIC_PWM		0x22
+#define RP_MSIC_KPD_LED		0x23
+#define RP_BCOVE_ADC		0x24
+#define RP_BCOVE_THERMAL	0x25
+#define RP_MRFL_OCD		0x26
+#define RP_FW_LOGGING		0x27
+#define RP_PMIC_CHARGER		0x28
+
+enum rproc_type {
+	RPROC_SCU = 0,
+	RPROC_PSH,
+	RPROC_NUM,
+};
+
+struct rproc_ops;
+struct platform_device;
+struct rpmsg_ns_msg;
+
+struct rpmsg_ns_info {
+	enum rproc_type type;
+	char name[RPMSG_NAME_SIZE];
+	u32 addr;
+	u32 flags;
+	struct list_head node;
+};
+
+struct rpmsg_ns_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+extern struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name,
+						int id, u32 addr);
+extern void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+					struct rpmsg_ns_list *nslist);
+
+/*
+ * struct intel_mid_rproc_pdata - intel mid remoteproc's platform data
+ * @name: the remoteproc's name
+ * @firmware: name of firmware file to load
+ * @ops: start/stop rproc handlers
+ * @device_enable: handler for enabling a device
+ * @device_shutdown: handler for shutting down a device
+ */
+struct intel_mid_rproc_pdata {
+	const char *name;
+	const char *firmware;
+	const struct rproc_ops *ops;
+	int (*device_enable) (struct platform_device *pdev);
+	int (*device_shutdown) (struct platform_device *pdev);
+	struct rpmsg_ns_list *nslist;
+};
+
+#endif /* _ASM_INTEL_MID_REMOTEPROC_H */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index ea32005..82a42ad 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -12,6 +12,10 @@
 #ifndef _LP855X_H
 #define _LP855X_H
 
+extern struct lp855x *lpdata;
+int lp855x_ext_write_byte(u8 reg, u8 data);
+int lp855x_ext_read_byte(u8 reg);
+
 #define BL_CTL_SHFT	(0)
 #define BRT_MODE_SHFT	(1)
 #define BRT_MODE_MASK	(0x06)
@@ -48,6 +52,79 @@
 #define LP8556_COMB2_CONFIG	(LP8556_COMBINED2 << BRT_MODE_SHFT)
 #define LP8556_FAST_CONFIG	BIT(7) /* use it if EPROMs should be maintained
 					  when exiting the low power mode */
+#define	LP8556_5LEDSTR		0x1F  /* 5 led string definition for ffrd8 and bytcr-rvp boards.*/
+
+#define	LP8556_LEDSTREN		0x16
+#define LP8556_CFG98		0x98
+#define LP8556_CFG9E		0x9E
+#define LP8556_CFG0		0xA0
+#define LP8556_CFG1		0xA1
+#define LP8556_CFG2		0xA2
+#define LP8556_CFG3		0xA3
+#define LP8556_CFG4		0xA4
+#define LP8556_CFG5		0xA5
+	#define LP8556_PWM_DRECT_EN	0x80
+	#define LP8556_PWM_DRECT_DIS	0x00
+	#define LP8556_PS_MODE_6P6D	0x00
+	#define LP8556_PS_MODE_5P5D	0x10
+	#define LP8556_PS_MODE_4P4D	0x20
+	#define LP8556_PS_MODE_3P3D	0x30
+	#define LP8556_PS_MODE_2P2D	0x40
+	#define LP8556_PS_MODE_3P6D	0x50
+	#define LP8556_PS_MODE_2P6D	0x60
+	#define LP8556_PS_MODE_1P6D	0x70
+	#define LP8556_PWM_FREQ_4808HZ	0x00
+	#define LP8556_PWM_FREQ_6010HZ	0x01
+	#define LP8556_PWM_FREQ_7212HZ	0x02
+	#define LP8556_PWM_FREQ_8414HZ	0x03
+	#define LP8556_PWM_FREQ_9616HZ	0x04
+	#define LP8556_PWM_FREQ_12020HZ	0x05
+	#define LP8556_PWM_FREQ_13222HZ	0x06
+	#define LP8556_PWM_FREQ_14424HZ	0x07
+	#define LP8556_PWM_FREQ_15626HZ	0x08
+	#define LP8556_PWM_FREQ_16828HZ	0x09
+	#define LP8556_PWM_FREQ_18030HZ	0x0A
+	#define LP8556_PWM_FREQ_19232HZ	0x0B
+	#define LP8556_PWM_FREQ_24040HZ	0x0C
+	#define LP8556_PWM_FREQ_28848HZ	0x0D
+	#define LP8556_PWM_FREQ_33656HZ	0x0E
+	#define LP8556_PWM_FREQ_38464HZ	0x0F
+#define LP8556_CFG6		0xA6
+#define LP8556_CFG7		0xA7
+	#define LP8556_RSRVD_76	0xC0
+	#define LP8556_DRV3_EN	0x20
+	#define LP8556_DRV3_DIS	0x00
+	#define LP8556_DRV2_EN	0x10
+	#define LP8556_DRV2_DIS	0x00
+	#define LP8556_RSRVD_32	0x0C
+	#define LP8556_IBOOST_LIM_0_9A_1_6A	0x00
+	#define LP8556_IBOOST_LIM_1_2A_2_1A	0x01
+	#define LP8556_IBOOST_LIM_1_5A_2_6A	0x02
+	#define LP8556_IBOOST_LIM_1_8A_NA	0x03
+#define LP8556_CFG8		0xA8
+#define LP8556_CFG9		0xA9
+	#define LP8556_VBOOST_MAX_NA_21V	0x40
+	#define LP8556_VBOOST_MAX_NA_25V	0x60
+	#define LP8556_VBOOST_MAX_21V_30V	0x80
+	#define LP8556_VBOOST_MAX_25V_34_5V	0xA0
+	#define LP8556_VBOOST_MAX_30V_39V	0xC0
+	#define LP8556_VBOOST_MAX_34V_43V	0xE0
+	#define LP8556_JUMP_EN			0x10
+	#define LP8556_JUMP_DIS			0x00
+	#define LP8556_JMP_TSHOLD_10P		0x00
+	#define LP8556_JMP_TSHOLD_30P		0x04
+	#define LP8556_JMP_TSHOLD_50P		0x08
+	#define LP8556_JMP_TSHOLD_70P		0x0C
+	#define LP8556_JMP_VOLT_0_5V		0x00
+	#define LP8556_JMP_VOLT_1V		0x01
+	#define LP8556_JMP_VOLT_2V		0x02
+	#define LP8556_JMP_VOLT_4V		0x03
+#define LP8556_CFGA		0xAA
+#define LP8556_CFGB		0xAB
+#define LP8556_CFGC		0xAC
+#define LP8556_CFGD		0xAD
+#define LP8556_CFGE		0xAE
+#define LP8556_CFGF		0xAF
 
 /* CONFIG register - LP8557 */
 #define LP8557_PWM_STANDBY	BIT(7)
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 5a95013..8770ae4 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -15,6 +15,7 @@
 	PM_QOS_CPU_DMA_LATENCY,
 	PM_QOS_NETWORK_LATENCY,
 	PM_QOS_NETWORK_THROUGHPUT,
+	PM_QOS_CPU_FREQ_MIN,
 
 	/* insert new class ID */
 	PM_QOS_NUM_CLASSES,
@@ -32,6 +33,7 @@
 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE	0
+#define PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE	0
 #define PM_QOS_DEV_LAT_DEFAULT_VALUE		0
 
 #define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0)
diff --git a/include/linux/power/battery_id.h b/include/linux/power/battery_id.h
new file mode 100644
index 0000000..d604acd
--- /dev/null
+++ b/include/linux/power/battery_id.h
@@ -0,0 +1,79 @@
+#ifndef __BATTERY_ID_H__
+
+#define __BATTERY_ID_H__
+
+enum {
+	POWER_SUPPLY_BATTERY_REMOVED = 0,
+	POWER_SUPPLY_BATTERY_INSERTED,
+};
+
+enum batt_chrg_prof_type {
+	CHRG_PROF_NONE = 0,
+	PSE_MOD_CHRG_PROF,
+};
+
+/* charging profile structure definition */
+struct ps_batt_chg_prof {
+	enum batt_chrg_prof_type chrg_prof_type;
+	void *batt_prof;
+};
+
+/* PSE Modified Algo Structure */
+/* Parameters defining the charging range */
+struct ps_temp_chg_table {
+	/* upper temperature limit for each zone */
+	short int temp_up_lim;
+	/* charge current and voltage */
+	short int full_chrg_vol;
+	short int full_chrg_cur;
+	/* maintenance thresholds */
+	/* maintenance lower threshold. Once battery hits full, charging
+	*  charging will be resumed when battery voltage <= this voltage
+	*/
+	short int maint_chrg_vol_ll;
+	/* Charge current and voltage in maintenance mode */
+	short int maint_chrg_vol_ul;
+	short int maint_chrg_cur;
+} __packed;
+
+
+#define BATTID_STR_LEN		8
+#define BATT_TEMP_NR_RNG	6
+/* Charging Profile */
+struct ps_pse_mod_prof {
+	/* battery id */
+	char batt_id[BATTID_STR_LEN];
+	/* type of battery */
+	u16 battery_type;
+	u16 capacity;
+	u16 voltage_max;
+	/* charge termination current */
+	u16 chrg_term_ma;
+	/* Low battery level voltage */
+	u16 low_batt_mV;
+	/* upper and lower temperature limits on discharging */
+	s8 disch_tmp_ul;
+	s8 disch_tmp_ll;
+	/* number of temperature monitoring ranges */
+	u16 temp_mon_ranges;
+	struct ps_temp_chg_table temp_mon_range[BATT_TEMP_NR_RNG];
+	/* Lowest temperature supported */
+	short int temp_low_lim;
+} __packed;
+
+/*For notification during battery change event*/
+extern struct atomic_notifier_head    batt_id_notifier;
+
+extern void battery_prop_changed(int battery_conn_stat,
+				struct ps_batt_chg_prof *batt_prop);
+#ifdef CONFIG_POWER_SUPPLY_BATTID
+extern int get_batt_prop(struct ps_batt_chg_prof *batt_prop);
+#else
+static inline int get_batt_prop(struct ps_batt_chg_prof *batt_prop)
+{
+	return -ENOMEM;
+}
+#endif
+extern int batt_id_reg_notifier(struct notifier_block *nb);
+extern void batt_id_unreg_notifier(struct notifier_block *nb);
+#endif
diff --git a/include/linux/power/bq24192_charger.h b/include/linux/power/bq24192_charger.h
new file mode 100644
index 0000000..619d7e5
--- /dev/null
+++ b/include/linux/power/bq24192_charger.h
@@ -0,0 +1,145 @@
+/*
+ * bq24192_charger.h - Charger driver for TI BQ24190/191/192/192I
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/types.h>
+#include <linux/power_supply.h>
+#include <linux/power/battery_id.h>
+
+#ifndef __BQ24192_CHARGER_H_
+#define __BQ24192_CHARGER_H_
+
+#define TEMP_NR_RNG	4
+#define BATTID_STR_LEN	8
+#define RANGE	25
+/* User limits for sysfs charge enable/disable */
+#define USER_SET_CHRG_DISABLE	0
+#define USER_SET_CHRG_LMT1	1
+#define USER_SET_CHRG_LMT2	2
+#define USER_SET_CHRG_LMT3	3
+#define USER_SET_CHRG_NOLMT	4
+
+#define INPUT_CHRG_CURR_0	0
+#define INPUT_CHRG_CURR_100	100
+#define INPUT_CHRG_CURR_500	500
+#define INPUT_CHRG_CURR_950	950
+#define INPUT_CHRG_CURR_1500	1500
+/* Charger Master Temperature Control Register */
+#define MSIC_CHRTMPCTRL         0x18E
+/* Higher Temprature Values*/
+#define CHRTMPCTRL_TMPH_60      (3 << 6)
+#define CHRTMPCTRL_TMPH_55      (2 << 6)
+#define CHRTMPCTRL_TMPH_50      (1 << 6)
+#define CHRTMPCTRL_TMPH_45      (0 << 6)
+
+/* Lower Temprature Values*/
+#define CHRTMPCTRL_TMPL_15      (3 << 4)
+#define CHRTMPCTRL_TMPL_10      (2 << 4)
+#define CHRTMPCTRL_TMPL_05      (1 << 4)
+#define CHRTMPCTRL_TMPL_00      (0 << 4)
+
+enum bq24192_bat_chrg_mode {
+	BATT_CHRG_FULL = 0,
+	BATT_CHRG_NORMAL = 1,
+	BATT_CHRG_MAINT = 2,
+	BATT_CHRG_NONE = 3
+};
+
+
+/*********************************************************************
+ * SFI table entries Structures
+ ********************************************************************/
+/*********************************************************************
+ *		Platform Data Section
+ *********************************************************************/
+/* Battery Thresholds info which need to get from SMIP area */
+struct platform_batt_safety_param {
+	u8 smip_rev;
+	u8 fpo;		/* fixed implementation options */
+	u8 fpo1;	/* fixed implementation options1 */
+	u8 rsys;	/* System Resistance for Fuel gauging */
+
+	/* Minimum voltage necessary to
+	 * be able to safely shut down */
+	short int vbatt_sh_min;
+
+	/* Voltage at which the battery driver
+	 * should report the LEVEL as CRITICAL */
+	short int vbatt_crit;
+
+	short int itc;		/* Charge termination current */
+	short int temp_high;	/* Safe Temp Upper Limit */
+	short int temp_low;	/* Safe Temp lower Limit */
+	u8 brd_id;		/* Unique Board ID */
+} __packed;
+
+/* Parameters defining the range */
+struct platform_temp_mon_table {
+	short int temp_up_lim;
+	short int temp_low_lim;
+	short int rbatt;
+	short int full_chrg_vol;
+	short int full_chrg_cur;
+	short int maint_chrg_vol_ll;
+	short int maint_chrg_vol_ul;
+	short int maint_chrg_cur;
+} __packed;
+
+struct platform_batt_profile {
+	char batt_id[BATTID_STR_LEN];
+	unsigned short int voltage_max;
+	unsigned int capacity;
+	u8 battery_type;
+	u8 temp_mon_ranges;
+	struct platform_temp_mon_table temp_mon_range[TEMP_NR_RNG];
+
+} __packed;
+
+struct bq24192_platform_data {
+	bool slave_mode;
+	short int temp_low_lim;
+	bool sfi_tabl_present;
+	short int safetemp;
+	struct platform_batt_profile batt_profile;
+	struct platform_batt_safety_param safety_param;
+	struct power_supply_throttle *throttle_states;
+	struct ps_batt_chg_prof *chg_profile;
+
+	char **supplied_to;
+	size_t	num_supplicants;
+	size_t num_throttle_states;
+	unsigned long supported_cables;
+
+	/* safety charegr setting */
+	int max_cc;
+	int max_cv;
+	int max_temp;
+	int min_temp;
+
+	/* Function pointers for platform specific initialization */
+	int (*init_platform_data)(void);
+	int (*get_irq_number)(void);
+	int (*query_otg)(void *, void *);
+	int (*drive_vbus)(bool);
+	int (*get_battery_pack_temp)(int *);
+	void (*free_platform_data)(void);
+};
+
+#endif /* __BQ24192_CHARGER_H_ */
diff --git a/include/linux/power/bq24261_charger.h b/include/linux/power/bq24261_charger.h
new file mode 100644
index 0000000..9dee86e
--- /dev/null
+++ b/include/linux/power/bq24261_charger.h
@@ -0,0 +1,58 @@
+/*
+ * bq24261_charger.h: platform data structure for bq24261 driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef __BQ24261_CHARGER_H__
+#define __BQ24261_CHARGER_H__
+
+struct bq24261_plat_data {
+	u32 irq_map;
+	u8 irq_mask;
+	char **supplied_to;
+	size_t num_supplicants;
+	struct power_supply_throttle *throttle_states;
+	size_t num_throttle_states;
+	int safety_timer;
+	int boost_mode_ma;
+	bool is_ts_enabled;
+	bool is_wdt_kick_needed;
+
+	int (*enable_charging) (bool val);
+	int (*enable_charger) (bool val);
+	int (*set_inlmt) (int val);
+	int (*set_cc) (int val);
+	int (*set_cv) (int val);
+	int (*set_iterm) (int val);
+	int (*enable_vbus) (bool val);
+	int (*handle_otgmode) (bool val);
+	/* WA for ShadyCove VBUS removal detect issue */
+	int (*handle_low_supply) (void);
+	void (*dump_master_regs) (void);
+};
+
+extern void bq24261_cv_to_reg(int, u8*);
+extern void bq24261_cc_to_reg(int, u8*);
+extern void bq24261_inlmt_to_reg(int, u8*);
+
+#ifdef CONFIG_BQ24261_CHARGER
+extern int bq24261_get_bat_health(void);
+extern int bq24261_get_bat_status(void);
+#else
+static int __maybe_unused bq24261_get_bat_health(void)
+{
+	return 0;
+}
+static int __maybe_unused bq24261_get_bat_status(void)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 89dd84f..1b5ca16 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -1,5 +1,5 @@
 /*
- * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ * max17042_battery.h - Fuel gauge driver for Maxim 17042 / 8966 / 8997
  *  Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
  *
  * Copyright (C) 2011 Samsung Electronics
@@ -23,191 +23,100 @@
 #ifndef __MAX17042_BATTERY_H_
 #define __MAX17042_BATTERY_H_
 
-#define MAX17042_STATUS_BattAbsent	(1 << 3)
-#define MAX17042_BATTERY_FULL	(100)
-#define MAX17042_DEFAULT_SNS_RESISTOR	(10000)
+/* No of cell characterization words to be written to max17042 */
+#define CELL_CHAR_TBL_SAMPLES	48
 
-#define MAX17042_CHARACTERIZATION_DATA_SIZE 48
+#define BATTID_LEN		8
+#define MAX_TABLE_NAME_LEN	8
+#define MODEL_NAME_LEN		2
+#define SERIAL_NUM_LEN		6
 
-enum max17042_register {
-	MAX17042_STATUS		= 0x00,
-	MAX17042_VALRT_Th	= 0x01,
-	MAX17042_TALRT_Th	= 0x02,
-	MAX17042_SALRT_Th	= 0x03,
-	MAX17042_AtRate		= 0x04,
-	MAX17042_RepCap		= 0x05,
-	MAX17042_RepSOC		= 0x06,
-	MAX17042_Age		= 0x07,
-	MAX17042_TEMP		= 0x08,
-	MAX17042_VCELL		= 0x09,
-	MAX17042_Current	= 0x0A,
-	MAX17042_AvgCurrent	= 0x0B,
-
-	MAX17042_SOC		= 0x0D,
-	MAX17042_AvSOC		= 0x0E,
-	MAX17042_RemCap		= 0x0F,
-	MAX17042_FullCAP	= 0x10,
-	MAX17042_TTE		= 0x11,
-	MAX17042_V_empty	= 0x12,
-
-	MAX17042_RSLOW		= 0x14,
-
-	MAX17042_AvgTA		= 0x16,
-	MAX17042_Cycles		= 0x17,
-	MAX17042_DesignCap	= 0x18,
-	MAX17042_AvgVCELL	= 0x19,
-	MAX17042_MinMaxTemp	= 0x1A,
-	MAX17042_MinMaxVolt	= 0x1B,
-	MAX17042_MinMaxCurr	= 0x1C,
-	MAX17042_CONFIG		= 0x1D,
-	MAX17042_ICHGTerm	= 0x1E,
-	MAX17042_AvCap		= 0x1F,
-	MAX17042_ManName	= 0x20,
-	MAX17042_DevName	= 0x21,
-
-	MAX17042_FullCAPNom	= 0x23,
-	MAX17042_TempNom	= 0x24,
-	MAX17042_TempLim	= 0x25,
-	MAX17042_TempHot	= 0x26,
-	MAX17042_AIN		= 0x27,
-	MAX17042_LearnCFG	= 0x28,
-	MAX17042_FilterCFG	= 0x29,
-	MAX17042_RelaxCFG	= 0x2A,
-	MAX17042_MiscCFG	= 0x2B,
-	MAX17042_TGAIN		= 0x2C,
-	MAx17042_TOFF		= 0x2D,
-	MAX17042_CGAIN		= 0x2E,
-	MAX17042_COFF		= 0x2F,
-
-	MAX17042_MaskSOC	= 0x32,
-	MAX17042_SOC_empty	= 0x33,
-	MAX17042_T_empty	= 0x34,
-
-	MAX17042_FullCAP0       = 0x35,
-	MAX17042_LAvg_empty	= 0x36,
-	MAX17042_FCTC		= 0x37,
-	MAX17042_RCOMP0		= 0x38,
-	MAX17042_TempCo		= 0x39,
-	MAX17042_EmptyTempCo	= 0x3A,
-	MAX17042_K_empty0	= 0x3B,
-	MAX17042_TaskPeriod	= 0x3C,
-	MAX17042_FSTAT		= 0x3D,
-
-	MAX17042_SHDNTIMER	= 0x3F,
-
-	MAX17042_dQacc		= 0x45,
-	MAX17042_dPacc		= 0x46,
-
-	MAX17042_VFSOC0		= 0x48,
-
-	MAX17042_QH		= 0x4D,
-	MAX17042_QL		= 0x4E,
-
-	MAX17042_VFSOC0Enable	= 0x60,
-	MAX17042_MLOCKReg1	= 0x62,
-	MAX17042_MLOCKReg2	= 0x63,
-
-	MAX17042_MODELChrTbl	= 0x80,
-
-	MAX17042_OCV		= 0xEE,
-
-	MAX17042_OCVInternal	= 0xFB,
-
-	MAX17042_VFSOC		= 0xFF,
-};
-
-/* Registers specific to max17047/50 */
-enum max17047_register {
-	MAX17047_QRTbl00	= 0x12,
-	MAX17047_FullSOCThr	= 0x13,
-	MAX17047_QRTbl10	= 0x22,
-	MAX17047_QRTbl20	= 0x32,
-	MAX17047_V_empty	= 0x3A,
-	MAX17047_QRTbl30	= 0x42,
-};
-
-enum max170xx_chip_type {MAX17042, MAX17047};
-
-/*
- * used for setting a register to a desired value
- * addr : address for a register
- * data : setting value for the register
- */
-struct max17042_reg_data {
-	u8 addr;
-	u16 data;
-};
+/* fuel gauge table type for DV10 platfrom */
+#define MAX17042_TBL_TYPE_DV10	0xff
 
 struct max17042_config_data {
-	/* External current sense resistor value in milli-ohms */
-	u32	cur_sense_val;
+	/*
+	* if config_init is 0, which means new
+	* configuration has been loaded in that case
+	* we need to perform complete init of chip
+	*/
 
-	/* A/D measurement */
-	u16	tgain;		/* 0x2C */
-	u16	toff;		/* 0x2D */
-	u16	cgain;		/* 0x2E */
-	u16	coff;		/* 0x2F */
+	u8	table_type;
+	u16	size;
+	u16	rev;
+	u8	table_name[MAX_TABLE_NAME_LEN];
+	u8      battid[BATTID_LEN];
+	u8	config_init;
 
-	/* Alert / Status */
-	u16	valrt_thresh;	/* 0x01 */
-	u16	talrt_thresh;	/* 0x02 */
-	u16	soc_alrt_thresh;	/* 0x03 */
-	u16	config;		/* 0x01D */
-	u16	shdntimer;	/* 0x03F */
+	u16	rcomp0;
+	u16	tempCo;
+	u16	kempty0;
+	u16	full_cap;
+	u16	cycles;
+	u16	full_capnom;
 
-	/* App data */
-	u16	full_soc_thresh;	/* 0x13 */
-	u16	design_cap;	/* 0x18 */
-	u16	ichgt_term;	/* 0x1E */
+	u16	soc_empty;
+	u16	ichgt_term;
+	u16	design_cap;
+	u16	etc;
+	u16	rsense;
+	u16	cfg;
+	u16	learn_cfg;
+	u16	filter_cfg;
+	u16	relax_cfg;
 
-	/* MG3 config */
-	u16	at_rate;	/* 0x04 */
-	u16	learn_cfg;	/* 0x28 */
-	u16	filter_cfg;	/* 0x29 */
-	u16	relax_cfg;	/* 0x2A */
-	u16	misc_cfg;	/* 0x2B */
-	u16	masksoc;	/* 0x32 */
+	/* config data specific to max17050 */
+	u16	qrtbl00;
+	u16	qrtbl10;
+	u16	qrtbl20;
+	u16	qrtbl30;
+	u16	full_soc_thr;
+	u16	vempty;
 
-	/* MG3 save and restore */
-	u16	fullcap;	/* 0x10 */
-	u16	fullcapnom;	/* 0x23 */
-	u16	socempty;	/* 0x33 */
-	u16	lavg_empty;	/* 0x36 */
-	u16	dqacc;		/* 0x45 */
-	u16	dpacc;		/* 0x46 */
-	u16	qrtbl00;	/* 0x12 */
-	u16	qrtbl10;	/* 0x22 */
-	u16	qrtbl20;	/* 0x32 */
-	u16	qrtbl30;	/* 0x42 */
-
-	/* Cell technology from power_supply.h */
-	u16	cell_technology;
-
-	/* Cell Data */
-	u16	vempty;		/* 0x12 */
-	u16	temp_nom;	/* 0x24 */
-	u16	temp_lim;	/* 0x25 */
-	u16	fctc;		/* 0x37 */
-	u16	rcomp0;		/* 0x38 */
-	u16	tcompc0;	/* 0x39 */
-	u16	empty_tempco;	/* 0x3A */
-	u16	kempty0;	/* 0x3B */
-	u16	cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE];
+	u16	cell_char_tbl[CELL_CHAR_TBL_SAMPLES];
 } __packed;
 
 struct max17042_platform_data {
-	struct max17042_reg_data *init_data;
-	struct max17042_config_data *config_data;
-	int num_init_data; /* Number of enties in init_data array */
 	bool enable_current_sense;
-	bool enable_por_init; /* Use POR init from Maxim appnote */
+	bool is_init_done;
+	bool is_volt_shutdown;
+	bool is_capacity_shutdown;
+	bool is_lowbatt_shutdown;
+	bool file_sys_storage_enabled;
+	bool soc_intr_mode_enabled;
+	bool reset_chip;
+	bool valid_battery;
+	bool en_vmax_intr;
+	int technology;
+	int fg_algo_model; /* maxim chip algorithm model */
+	char battid[BATTID_LEN + 1];
+	char model_name[MODEL_NAME_LEN + 1];
+	char serial_num[2*SERIAL_NUM_LEN + 1];
 
-	/*
-	 * R_sns in micro-ohms.
-	 * default 10000 (if r_sns = 0) as it is the recommended value by
-	 * the datasheet although it can be changed by board designers.
-	 */
-	unsigned int r_sns;
+	/* battery safety thresholds */
+	int temp_min_lim;	/* in degrees centigrade */
+	int temp_max_lim;	/* in degrees centigrade */
+	int volt_min_lim;	/* milli volts */
+	int volt_max_lim;	/* milli volts */
+	int resv_cap;
+
+	u16 tgain;
+	u16 toff;
+
+	int (*current_sense_enabled)(void);
+	int (*battery_present)(void);
+	int (*battery_health)(void);
+	int (*battery_status)(void);
+	int (*battery_pack_temp)(int *);
+	int (*save_config_data)(const char *name, void *data, int len);
+	int (*restore_config_data)(const char *name, void *data, int len);
+	void (*reset_i2c_lines)(void);
+
+	bool (*is_cap_shutdown_enabled)(void);
+	bool (*is_volt_shutdown_enabled)(void);
+	bool (*is_lowbatt_shutdown_enabled)(void);
+	int (*get_vmin_threshold)(void);
+	int (*get_vmax_threshold)(void);
 };
 
 #endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
index b3cb20d..3c14ff9 100644
--- a/include/linux/power/smb347-charger.h
+++ b/include/linux/power/smb347-charger.h
@@ -16,6 +16,7 @@
 
 #include <linux/types.h>
 #include <linux/power_supply.h>
+#include <linux/power/battery_id.h>
 
 enum {
 	/* use the default compensation method */
@@ -39,79 +40,103 @@
 	SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
 };
 
+/*
+ * Driving VBUS can be controlled by software (via i2c) by pin or by
+ * hardware ID pin autodetection. If set to %SMB347_OTG_CONTROL_DISABLED
+ * the functionality is disabled.
+ *
+ * %SMB347_OTG_CONTROL_DISABLED - don't use OTG at all
+ * %SMB347_OTG_CONTROL_PIN - use ID pin to detect when VBUS should be
+ *			     driven and raise VBUS automatically
+ * %SMB347_OTG_CONTROL_AUTO - Use auto-OTG and RID detection algorithm
+ * %SMB347_OTG_CONTROL_SW - enable OTG VBUS via register when we receive an
+ *			    OTG event from transceiver driver
+ * %SMB347_OTG_CONTROL_SW_PIN - enable OTG VBUS by switching to pin control
+ *				mode when OTG event is received
+ * %SMB347_OTG_CONTROL_SW_AUTO - enable OTG VBUS by switching to auto-OTG
+ *				 mode when OTG event is received
+ */
+enum smb347_otg_control {
+	SMB347_OTG_CONTROL_DISABLED,
+	SMB347_OTG_CONTROL_PIN,
+	SMB347_OTG_CONTROL_AUTO,
+	SMB347_OTG_CONTROL_SW,
+	SMB347_OTG_CONTROL_SW_PIN,
+	SMB347_OTG_CONTROL_SW_AUTO,
+};
+
 /**
  * struct smb347_charger_platform_data - platform data for SMB347 charger
  * @battery_info: Information about the battery
- * @max_charge_current: maximum current (in uA) the battery can be charged
- * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
- * @pre_charge_current: current (in uA) to use in pre-charging phase
  * @termination_current: current (in uA) used to determine when the
  *			 charging cycle terminates
- * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
- *			 pre-charge to fast charge mode
- * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
- * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
- *			  input
- * @chip_temp_threshold: die temperature where device starts limiting charge
- *			 current [%100 - %130] (in degree C)
- * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
- *			  granularity is 5 deg C.
- * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree  C),
- *			 granularity is 5 deg C.
- * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
- *			  granularity is 5 deg C.
- * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
- *			 granularity is 5 deg C.
- * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
- * @soft_temp_limit_compensation: compensation method when soft temperature
- *				  limit is hit
- * @charge_current_compensation: current (in uA) for charging compensation
- *				 current when temperature hits soft limits
  * @use_mains: AC/DC input can be used
  * @use_usb: USB input can be used
- * @use_usb_otg: USB OTG output can be used (not implemented yet)
  * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
  * @enable_control: how charging enable/disable is controlled
  *		    (driver/pin controls)
+ * @otg_control: how OTG VBUS is controlled
  *
- * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
+ * @use_main, @use_usb, and @otg_control are means to enable/disable
  * hardware support for these. This is useful when we want to have for
  * example OTG charging controlled via OTG transceiver driver and not by
  * the SMB347 hardware.
  *
- * Hard and soft temperature limit values are given as described in the
- * device data sheet and assuming NTC beta value is %3750. Even if this is
- * not the case, these values should be used. They can be mapped to the
- * corresponding NTC beta values with the help of table %2 in the data
- * sheet. So for example if NTC beta is %3375 and we want to program hard
- * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
- *
  * If zero value is given in any of the current and voltage values, the
  * factory programmed default will be used. For soft/hard temperature
  * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
  */
+#define  MAXSMB34x_CONFIG_REG		20
+#define  MAXSMB347_CONFIG_DATA_SIZE	(MAXSMB34x_CONFIG_REG*2)
+
 struct smb347_charger_platform_data {
 	struct power_supply_info battery_info;
-	unsigned int	max_charge_current;
-	unsigned int	max_charge_voltage;
-	unsigned int	pre_charge_current;
-	unsigned int	termination_current;
-	unsigned int	pre_to_fast_voltage;
-	unsigned int	mains_current_limit;
-	unsigned int	usb_hc_current_limit;
-	unsigned int	chip_temp_threshold;
-	int		soft_cold_temp_limit;
-	int		soft_hot_temp_limit;
-	int		hard_cold_temp_limit;
-	int		hard_hot_temp_limit;
-	bool		suspend_on_hard_temp_limit;
-	unsigned int	soft_temp_limit_compensation;
-	unsigned int	charge_current_compensation;
 	bool		use_mains;
 	bool		use_usb;
-	bool		use_usb_otg;
+	bool		show_battery;
+	bool		is_valid_battery;
 	int		irq_gpio;
+	unsigned int	termination_current;
 	enum smb347_chg_enable enable_control;
+	enum smb347_otg_control otg_control;
+	/*
+	 * One time initialized configuration
+	 * register map[offset, value].
+	 */
+	u16	char_config_regs[MAXSMB347_CONFIG_DATA_SIZE];
+
+	char **supplied_to;
+	size_t num_supplicants;
+	size_t num_throttle_states;
+	unsigned long supported_cables;
+	struct power_supply_throttle *throttle_states;
+	struct ps_batt_chg_prof *chg_profile;
+	bool	detect_chg;
+	bool	use_regulator;
+	int	gpio_mux;
 };
 
+#ifdef CONFIG_CHARGER_SMB347
+extern int smb347_get_charging_status(void);
+extern int smb347_enable_charger(void);
+extern int smb347_disable_charger(void);
+extern int smb34x_get_bat_health(void);
+#else
+static int __maybe_unused smb347_get_charging_status(void)
+{
+	return 0;
+}
+static int __maybe_unused smb347_enable_charger(void)
+{
+	return 0;
+}
+static int __maybe_unused smb347_disable_charger(void)
+{
+	return 0;
+}
+int __maybe_unused smb34x_get_bat_health(void)
+{
+	return 0;
+}
 #endif /* SMB347_CHARGER_H */
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 03d921f..2cd86f4 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -84,6 +84,14 @@
 	POWER_SUPPLY_SCOPE_DEVICE,
 };
 
+enum {
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_ZERO = 0,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_LOW,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_MEDIUM,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_HIGH,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_NONE,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -117,8 +125,14 @@
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_INLMT,
 	POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
 	POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
 	POWER_SUPPLY_PROP_ENERGY_FULL,
@@ -132,6 +146,8 @@
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
 	POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+	POWER_SUPPLY_PROP_MAX_TEMP,
+	POWER_SUPPLY_PROP_MIN_TEMP,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
@@ -140,6 +156,11 @@
 	POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
 	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
 	POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
+	POWER_SUPPLY_PROP_CHARGE_TERM_CUR,
+	POWER_SUPPLY_PROP_ENABLE_CHARGING,
+	POWER_SUPPLY_PROP_ENABLE_CHARGER,
+	POWER_SUPPLY_PROP_CABLE_TYPE,
+	POWER_SUPPLY_PROP_PRIORITY,
 	POWER_SUPPLY_PROP_SCOPE,
 	/* Local extensions */
 	POWER_SUPPLY_PROP_USB_HC,
@@ -159,17 +180,84 @@
 	POWER_SUPPLY_TYPE_UPS,
 	POWER_SUPPLY_TYPE_MAINS,
 	POWER_SUPPLY_TYPE_USB,		/* Standard Downstream Port */
+	POWER_SUPPLY_TYPE_USB_INVAL,	/* Invalid Standard Downstream Port */
 	POWER_SUPPLY_TYPE_USB_DCP,	/* Dedicated Charging Port */
 	POWER_SUPPLY_TYPE_USB_CDP,	/* Charging Downstream Port */
 	POWER_SUPPLY_TYPE_USB_ACA,	/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_USB_HOST,	/* To support OTG devices */
 };
 
+enum power_supply_charger_event {
+	POWER_SUPPLY_CHARGER_EVENT_CONNECT = 0,
+	POWER_SUPPLY_CHARGER_EVENT_UPDATE,
+	POWER_SUPPLY_CHARGER_EVENT_RESUME,
+	POWER_SUPPLY_CHARGER_EVENT_SUSPEND,
+	POWER_SUPPLY_CHARGER_EVENT_DISCONNECT,
+};
+
+struct power_supply_charger_cap {
+	enum power_supply_charger_event chrg_evt;
+	enum power_supply_type chrg_type;
+	unsigned int mA; /* input current limit */
+};
+
+enum power_supply_charger_cable_type {
+	POWER_SUPPLY_CHARGER_TYPE_NONE = 0,
+	POWER_SUPPLY_CHARGER_TYPE_USB_SDP = 1 << 0,
+	POWER_SUPPLY_CHARGER_TYPE_USB_DCP = 1 << 1,
+	POWER_SUPPLY_CHARGER_TYPE_USB_CDP = 1 << 2,
+	POWER_SUPPLY_CHARGER_TYPE_USB_ACA = 1 << 3,
+	POWER_SUPPLY_CHARGER_TYPE_AC = 1 << 4,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK = 1 << 5,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_A = 1 << 6,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_B = 1 << 7,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_C = 1 << 8,
+	POWER_SUPPLY_CHARGER_TYPE_SE1 = 1 << 9,
+	POWER_SUPPLY_CHARGER_TYPE_MHL = 1 << 10,
+	POWER_SUPPLY_CHARGER_TYPE_B_DEVICE = 1 << 11,
+};
+
+struct power_supply_cable_props {
+	enum power_supply_charger_event	chrg_evt;
+	enum power_supply_charger_cable_type chrg_type;
+	unsigned int ma;   /* input current limit */
+};
+
+#define POWER_SUPPLY_CHARGER_TYPE_USB \
+	(POWER_SUPPLY_CHARGER_TYPE_USB_SDP | \
+	POWER_SUPPLY_CHARGER_TYPE_USB_DCP | \
+	POWER_SUPPLY_CHARGER_TYPE_USB_CDP | \
+	POWER_SUPPLY_CHARGER_TYPE_USB_ACA | \
+	POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK| \
+	POWER_SUPPLY_CHARGER_TYPE_SE1)
+
 union power_supply_propval {
 	int intval;
 	const char *strval;
 	int64_t int64val;
 };
 
+enum power_supply_notifier_events {
+	POWER_SUPPLY_EVENT_NONE,
+	POWER_SUPPLY_PROP_CHANGED,
+	POWER_SUPPLY_BATTERY_EVENT,
+	POWER_SUPPLY_CABLE_EVENT,
+};
+
+
+enum psy_throttle_action {
+
+	PSY_THROTTLE_DISABLE_CHARGER = 0,
+	PSY_THROTTLE_DISABLE_CHARGING,
+	PSY_THROTTLE_CC_LIMIT,
+	PSY_THROTTLE_INPUT_LIMIT,
+};
+
+struct power_supply_throttle {
+	enum psy_throttle_action throttle_action;
+	unsigned throttle_val;
+};
+
 struct power_supply {
 	const char *name;
 	enum power_supply_type type;
@@ -177,8 +265,10 @@
 	size_t num_properties;
 
 	char **supplied_to;
+	unsigned long supported_cables;
 	size_t num_supplicants;
-
+	struct power_supply_throttle *throttle_states;
+	size_t num_throttle_states;
 	char **supplied_from;
 	size_t num_supplies;
 #ifdef CONFIG_OF
@@ -193,8 +283,12 @@
 			    const union power_supply_propval *val);
 	int (*property_is_writeable)(struct power_supply *psy,
 				     enum power_supply_property psp);
+	int (*property_is_privileged_read)(struct power_supply *psy,
+					enum power_supply_property psp);
 	void (*external_power_changed)(struct power_supply *psy);
 	void (*set_charged)(struct power_supply *psy);
+	void (*charging_port_changed)(struct power_supply *psy,
+				struct power_supply_charger_cap *cap);
 
 	/* For APM emulation, think legacy userspace. */
 	int use_for_apm;
@@ -242,12 +336,19 @@
 	int use_for_apm;
 };
 
+extern struct atomic_notifier_head    power_supply_notifier;
+extern int power_supply_reg_notifier(struct notifier_block *nb);
+extern void power_supply_unreg_notifier(struct notifier_block *nb);
 extern struct power_supply *power_supply_get_by_name(const char *name);
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
+extern int power_supply_is_battery_connected(void);
 extern int power_supply_set_battery_charged(struct power_supply *psy);
+extern void power_supply_charger_event(struct power_supply_charger_cap cap);
+extern void power_supply_query_charger_caps(struct power_supply_charger_cap
+									*cap);
 
-#ifdef CONFIG_POWER_SUPPLY
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
 extern int power_supply_is_system_supplied(void);
 #else
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 608e60a..2032b9f 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -6,11 +6,9 @@
 
 #include <linux/types.h>
 #include <linux/fs.h>
-
 struct proc_dir_entry;
 
 #ifdef CONFIG_PROC_FS
-
 extern void proc_root_init(void);
 extern void proc_flush_task(struct task_struct *);
 
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 2c8e5dd..1905d44 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 
 struct persistent_ram_buffer;
+struct persistent_ram_buffer_ctrl;
 struct rs_control;
 
 struct persistent_ram_ecc_info {
@@ -38,6 +39,7 @@
 	size_t size;
 	void *vaddr;
 	struct persistent_ram_buffer *buffer;
+	struct persistent_ram_buffer_ctrl *buffer_ctrl;
 	size_t buffer_size;
 
 	/* ECC correction */
diff --git a/include/linux/pti.h b/include/linux/pti.h
index b3ea01a..e4769ff 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -35,7 +35,7 @@
 };
 
 /* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
+void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count, bool eom);
 struct pti_masterchannel *pti_request_masterchannel(u8 type,
 						    const char *thread_name);
 void pti_release_masterchannel(struct pti_masterchannel *mc);
diff --git a/include/linux/r69001-ts.h b/include/linux/r69001-ts.h
new file mode 100644
index 0000000..011e3cb
--- /dev/null
+++ b/include/linux/r69001-ts.h
@@ -0,0 +1,122 @@
+/*
+ * R69001 Touchscreen Controller Driver
+ * Header file
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _R69001_TS_H_
+#define _R69001_TS_H_
+
+#ifdef CONFIG_R69001_DEBUG_SUPPORT
+#include <linux/r69001-ts-debug.h>
+#endif
+
+/* Spec */
+#define MIN_X                       0
+#define MIN_Y                       0
+#define MIN_Z                       0
+#ifdef CONFIG_R69001_RESOLUTION_WIDTH
+#define MAX_X                       CONFIG_R69001_RESOLUTION_WIDTH
+#else
+#define MAX_X                       720
+#endif
+#ifdef CONFIG_R69001_RESOLUTION_HEIGHT
+#define MAX_Y                       CONFIG_R69001_RESOLUTION_HEIGHT
+#else
+#define MAX_Y                       1280
+#endif
+#define MAX_Z                       255
+#define MIN_AREA                    0
+#define MAX_AREA                    10
+#ifdef CONFIG_R69001_MAX_FINGERS
+#define MAX_FINGERS                 CONFIG_R69001_MAX_FINGERS
+#else
+#define MAX_FINGERS                 10
+#endif
+
+/* Scan cycle */
+#ifdef CONFIG_R69001_SCAN_TIME
+#define SCAN_TIME                   CONFIG_R69001_SCAN_TIME
+#else
+#define SCAN_TIME                   16
+#endif
+
+/* Polling interval */
+#ifdef CONFIG_R69001_POLLING_TIME
+#define POLL_INTERVAL               CONFIG_R69001_POLLING_TIME
+#else
+#define POLL_INTERVAL               2
+#endif
+#define POLL_INTERVAL_MAX           1000
+
+/* Number of RX/TX */
+#ifdef CONFIG_R69001_NUM_RX
+#define NUM_RX                      CONFIG_R69001_NUM_RX
+#else
+#define NUM_RX                      21
+#endif
+#ifdef CONFIG_R69001_NUM_TX
+#define NUM_TX                      CONFIG_R69001_NUM_TX
+#else
+#define NUM_TX                      13
+#endif
+
+/* Scan Mode */
+#define R69001_SCAN_MODE_STOP           0x00
+#define R69001_SCAN_MODE_LOW_POWER      0x01
+#define R69001_SCAN_MODE_FULL_SCAN      0x02
+#define R69001_SCAN_MODE_CALIBRATION    0x03
+
+/* Interrupt/Polling mode */
+#define R69001_TS_INTERRUPT_MODE        0
+#define R69001_TS_POLLING_MODE          1
+#define R69001_TS_POLLING_LOW_EDGE_MODE 2
+#define R69001_TS_CALIBRATION_INTERRUPT_MODE 3
+
+struct io_mode {
+	u8 scan;
+	u8 mode;
+	u16 poll_time;
+};
+
+struct io_rxtx_num {
+	u8 rx;
+	u8 tx;
+};
+
+struct io_resolution {
+	u16 x;
+	u16 y;
+};
+
+struct r69001_io_data {
+	struct io_mode mode;
+	struct io_rxtx_num rxtx_num;
+	struct io_resolution resolution;
+};
+
+struct r69001_platform_data {
+	int irq_type;
+	int gpio;
+};
+
+#define R69001_IO   'R'
+
+/* R69001 ioctl commands */
+#define RSP_IOCTL_SET_INT_POLL_MODE _IOW(R69001_IO, 0x01, struct r69001_io_data)
+#define RSP_IOCTL_GET_RXTX_NUM      _IOR(R69001_IO, 0x02, struct r69001_io_data)
+#define RSP_IOCTL_GET_RESOLUTION    _IOR(R69001_IO, 0x03, struct r69001_io_data)
+#define RSP_IOCTL_SET_SCAN_MODE     _IOW(R69001_IO, 0x04, struct r69001_io_data)
+
+#define RSP_IOCTL_SUSPEND           _IOW(R69001_IO, 0x50, struct r69001_io_data)
+#define RSP_IOCTL_RESUME            _IOW(R69001_IO, 0x51, struct r69001_io_data)
+
+#endif /* _R69001_TS_H_ */
diff --git a/include/linux/random.h b/include/linux/random.h
index 3b9377d..6312dd9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -17,6 +17,7 @@
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
 void generate_random_uuid(unsigned char uuid_out[16]);
+extern int random_int_secret_init(void);
 
 #ifndef MODULE
 extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index f4b1001..4106721 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -267,8 +267,9 @@
  */
 #define list_first_or_null_rcu(ptr, type, member) \
 	({struct list_head *__ptr = (ptr); \
-	  struct list_head __rcu *__next = list_next_rcu(__ptr); \
-	  likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
+	  struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+	  likely(__ptr != __next) ? \
+		list_entry_rcu(__next, type, member) : NULL; \
 	})
 
 /**
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 23b3630..2ca9ed7 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -10,6 +10,11 @@
 #define SYS_HALT	0x0002	/* Notify of system halt */
 #define SYS_POWER_OFF	0x0003	/* Notify of system power off */
 
+enum reboot_mode {
+	REBOOT_COLD = 0,
+	REBOOT_WARM,
+};
+
 extern int register_reboot_notifier(struct notifier_block *);
 extern int unregister_reboot_notifier(struct notifier_block *);
 
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 02d84e2..98c470ce 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,8 @@
 
 #include <linux/list.h>
 #include <linux/rbtree.h>
+#include <linux/err.h>
+#include <linux/bug.h>
 
 struct module;
 struct device;
diff --git a/include/linux/regulator/intel_basin_cove_pmic.h b/include/linux/regulator/intel_basin_cove_pmic.h
new file mode 100644
index 0000000..8dcdfa7
--- /dev/null
+++ b/include/linux/regulator/intel_basin_cove_pmic.h
@@ -0,0 +1,59 @@
+/*
+ * intel_basin_cove_pmic.h - Support for Basin Cove pmic VR
+ * Copyright (c) 2012, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#ifndef __INTEL_BASIN_COVE_PMIC_H_
+#define __INTEL_BASIN_COVE_PMIC_H_
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+	VPROG1,
+	VPROG2,
+	VPROG3,
+};
+
+/* Voltage tables for Regulators */
+static const u16 VPROG1_VSEL_table[] = {
+	1500, 1800, 2500, 2800,
+};
+
+static const u16 VPROG2_VSEL_table[] = {
+	1500, 1800, 2500, 2850,
+};
+
+static const u16 VPROG3_VSEL_table[] = {
+	1050, 1800, 2500, 2800,
+};
+
+/* Slave Address for all regulators */
+#define VPROG1CNT_ADDR	0x0ac
+#define VPROG2CNT_ADDR	0x0ad
+#define VPROG3CNT_ADDR	0x0ae
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+	struct regulator_init_data *init_data;
+	struct regulator_dev *intel_pmic_rdev;
+	const u16 *table;
+	u16 pmic_reg;
+	u8 table_len;
+};
+
+#endif /* __INTEL_BASIN_COVE_PMIC_H_ */
diff --git a/include/linux/regulator/intel_crystal_cove_pmic.h b/include/linux/regulator/intel_crystal_cove_pmic.h
new file mode 100644
index 0000000..22e3225
--- /dev/null
+++ b/include/linux/regulator/intel_crystal_cove_pmic.h
@@ -0,0 +1,115 @@
+/*
+ * intel_crystal_cove_pmic.h - Support for Basin Cove pmic VR
+ * Copyright (c) 2013, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#ifndef __INTEL_CRYSTAL_COVE_PMIC_H_
+#define __INTEL_CRYSTAL_COVE_PMIC_H_
+
+#include <linux/notifier.h>
+
+/* Slave Address for all regulators */
+#define V2P85SCNT_ADDR	0x065
+#define V2P85SXCNT_ADDR	0x066
+#define V3P3SXCNT_ADDR	0x069
+#define V1P8SCNT_ADDR	0x05c
+#define V1P8SXCNT_ADDR	0x05d
+#define VSYS_SCNT_ADDR	0x06c
+#define V1P0ACNT_ADDR	0x055
+#define V1P8ACNT_ADDR	0x05a
+
+#define CRYSTAL_COVE_REGULATOR_ID_START 1000
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+	V2P85S = CRYSTAL_COVE_REGULATOR_ID_START,
+	V2P85SX,
+	V3P3SX,
+	V1P8S,
+	V1P8SX,
+	V1P0A,
+	V1P8A,
+	VSYS_S,
+};
+
+struct regulator_info {
+	struct regulator *regulator;
+	struct device *dev;
+};
+
+/* Voltage tables for Regulators */
+static const u16 V2P85S_VSEL_TABLE[] = {
+	2565, 2700, 2850, 2900, 2950, 3000, 3135, 3300,
+};
+
+static const u16 V2P85SX_VSEL_TABLE[] = {
+	2900,
+};
+
+static const u16 V3P3SX_VSEL_TABLE[] = {
+	3332,
+};
+
+static const u16 V1P8S_VSEL_TABLE[] = {
+	1817,
+};
+
+static const u16 V1P8SX_VSEL_TABLE[] = {
+	1817,
+};
+
+static const u16 VSYS_S_VSEL_TABLE[] = {
+	4200,
+};
+
+static const u16 V1P0A_VSEL_TABLE[] = {
+	900, 950, 1000, 1020, 1030, 1050, 1100,
+};
+
+static const u16 V1P8A_VSEL_TABLE[] = {
+	1620, 1710, 1800, 1836, 1854, 1890, 1980,
+};
+
+struct pmic_regulator_gpio_en {
+	int gpio;
+	int init_gpio_state;
+};
+
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+	struct regulator_init_data *init_data;
+	struct regulator_dev *intel_pmic_rdev;
+	struct pmic_regulator_gpio_en *en_pin;
+	const u16 *table;
+	u16 pmic_reg;
+	u8 table_len;
+};
+
+#ifdef CONFIG_REGULATOR_CRYSTAL_COVE
+extern void vrf_notifier_register(struct notifier_block *n);
+extern void vrf_notifier_unregister(struct notifier_block *n);
+extern void vrf_notifier_call_chain(unsigned int val);
+#else
+static inline void vrf_notifier_register(struct notifier_block *n) {}
+static inline void vrf_notifier_unregister(struct notifier_block *n) {}
+static inline void vrf_notifier_call_chain(unsigned int val) {}
+#endif /* CONFIG_CRYSTAL_COVE */
+
+#endif /* __INTEL_CRYSTAL_COVE_PMIC_H_ */
diff --git a/include/linux/regulator/intel_pmic.h b/include/linux/regulator/intel_pmic.h
new file mode 100644
index 0000000..f972d60
--- /dev/null
+++ b/include/linux/regulator/intel_pmic.h
@@ -0,0 +1,54 @@
+/*
+*Support for intel pmic
+*Copyright (c) 2012, Intel Corporation.
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 as
+*published by the Free Software Foundation.
+*
+*/
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+	VPROG1,
+	VPROG2,
+	VEMMC1,
+	VEMMC2,
+};
+
+/* Voltage tables for Regulators */
+static const u16 VPROG1_VSEL_table[] = {
+	1200, 1800, 2500, 2800,
+};
+
+static const u16 VPROG2_VSEL_table[] = {
+	1200, 1800, 2500, 2800,
+};
+
+static const u16 VEMMC1_VSEL_table[] = {
+	2850,
+};
+static const u16 VEMMC2_VSEL_table[] = {
+	2850,
+};
+
+static const u16 V180AON_VSEL_table[] = {
+	1800, 1817, 1836, 1854,
+};
+
+/* Slave Address for all regulators */
+#define VPROG1CNT_ADDR	0x0D6
+#define VPROG2CNT_ADDR	0x0D7
+#define VEMMC1CNT_ADDR	0x0D9
+#define VEMMC2CNT_ADDR	0x0DA
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+	struct regulator_init_data *init_data;
+	struct regulator_dev *intel_pmic_rdev;
+	const u16 *table;
+	u16 pmic_reg;
+	u8 table_len;
+};
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
new file mode 100644
index 0000000..813dae9
--- /dev/null
+++ b/include/linux/reservation.h
@@ -0,0 +1,62 @@
+/*
+ * Header file for reservations for dma-buf and ttm
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <rob.clark@linaro.org>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _LINUX_RESERVATION_H
+#define _LINUX_RESERVATION_H
+
+#include <linux/ww_mutex.h>
+
+extern struct ww_class reservation_ww_class;
+
+struct reservation_object {
+	struct ww_mutex lock;
+};
+
+static inline void
+reservation_object_init(struct reservation_object *obj)
+{
+	ww_mutex_init(&obj->lock, &reservation_ww_class);
+}
+
+static inline void
+reservation_object_fini(struct reservation_object *obj)
+{
+	ww_mutex_destroy(&obj->lock);
+}
+
+#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0ae16dd..742d87c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -245,6 +245,10 @@
  */
 extern void show_state_filter(unsigned long state_filter);
 
+#ifdef CONFIG_EMMC_IPANIC
+extern void emmc_ipanic_stream_emmc(void);
+#endif
+
 static inline void show_state(void)
 {
 	show_state_filter(0);
@@ -780,6 +784,12 @@
 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+#define SD_ASYM_CONCURRENCY	0x4000	/* Higher concurrency in front to save power */
+#else
+#define SD_ASYM_CONCURRENCY 0
+#endif
+
 extern int __weak arch_sd_sibiling_asym_packing(void);
 
 struct sched_domain_attr {
@@ -861,6 +871,13 @@
 		struct rcu_head rcu;	/* used during destruction */
 	};
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	unsigned int total_groups;
+	unsigned int group_number;
+	unsigned int asym_concurrency;
+	struct sched_group *first_group;	/* ordered by CPU number */
+#endif
+
 	unsigned int span_weight;
 	/*
 	 * Span of all CPUs in this domain.
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index bf8086b..2720f5e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -36,6 +36,14 @@
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
 
+#ifdef CONFIG_CPU_CONCURRENCY
+extern unsigned long sysctl_concurrency_sum_period;
+extern unsigned long sysctl_concurrency_decay_rate;
+extern int concurrency_decay_rate_handler(
+					struct ctl_table *table, int write, void __user *buffer,
+					size_t *lenp, loff_t *ppos);
+#endif
+
 enum sched_tunable_scaling {
 	SCHED_TUNABLESCALING_NONE,
 	SCHED_TUNABLESCALING_LOG,
diff --git a/include/linux/sdm.h b/include/linux/sdm.h
new file mode 100644
index 0000000..0a75ffb
--- /dev/null
+++ b/include/linux/sdm.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The SDM (System Debug Monitor) directs trace data routed from
+ * various parts in the system out through the Intel Tangier PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef SDM_H_
+#define SDM_H_
+
+#ifdef CONFIG_INTEL_PTI_STM
+/* the following functions are defined in drivers/misc/stm.c */
+int stm_kernel_get_out(void);
+int stm_kernel_set_out(int bus_type);
+int stm_is_enabled(void);
+#else
+static inline int stm_kernel_get_out(void) { return -EOPNOTSUPP; };
+static inline int stm_kernel_set_out(int bus_type) { return -EOPNOTSUPP; };
+static inline int stm_is_enabled(void) { return 0; };
+#endif
+
+/* Temporary : To be replace later with dynamic*/
+#define STM_NB_IN_PINS                  0
+
+/* STM output configurations */
+#define STM_PTI_4BIT_LEGACY                    0
+#define STM_PTI_4BIT_NIDNT                     1
+#define STM_PTI_16BIT                          2
+#define STM_PTI_12BIT                          3
+#define STM_PTI_8BIT                           4
+#define STM_USB                                15
+
+/* Buffer configurations */
+#define DFX_BULK_BUFFER_SIZE		64 /* for Tangier A0 */
+#define DFX_BULK_OUT_BUFFER_ADDR	0xF90B0000
+#define DFX_BULK_IN_BUFFER_ADDR		0xF90B0000
+#define DFX_BULK_IN_BUFFER_ADDR_2	0xF90B0400
+
+#define TRACE_BULK_BUFFER_SIZE		65536 /* revision */
+#define TRACE_BULK_IN_BUFFER_ADDR	0xF90A0000 /* revision */
+
+#endif /*SDM_H_*/
+
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 53d4265..976ce3a 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -12,10 +12,12 @@
 struct sem_array {
 	struct kern_ipc_perm	____cacheline_aligned_in_smp
 				sem_perm;	/* permissions .. see ipc.h */
-	time_t			sem_otime;	/* last semop time */
 	time_t			sem_ctime;	/* last change time */
 	struct sem		*sem_base;	/* ptr to first semaphore in array */
-	struct list_head	sem_pending;	/* pending operations to be processed */
+	struct list_head	pending_alter;	/* pending operations */
+						/* that alter the array */
+	struct list_head	pending_const;	/* pending complex operations */
+						/* that do not alter semvals */
 	struct list_head	list_id;	/* undo requests on this array */
 	int			sem_nsems;	/* no. of semaphores in array */
 	int			complex_count;	/* pending complex operations */
diff --git a/include/linux/serial_max3110.h b/include/linux/serial_max3110.h
new file mode 100644
index 0000000..5470556
--- /dev/null
+++ b/include/linux/serial_max3110.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_SERIAL_MAX3110_H
+#define _LINUX_SERIAL_MAX3110_H
+
+/**
+ * struct plat_max3110 - MAX3110 SPI UART platform data
+ * @irq_edge_trigger: if IRQ is edge triggered
+ *
+ * You should use this structure in your machine description to specify
+ * how the MAX3110 is connected.
+ *
+ */
+struct plat_max3110 {
+	int irq_edge_triggered;
+};
+
+#endif
diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h
index 2b071e0..0c0ba99 100644
--- a/include/linux/serial_mfd.h
+++ b/include/linux/serial_mfd.h
@@ -3,6 +3,7 @@
 
 /* HW register offset definition */
 #define UART_FOR	0x08
+#define UART_ABR	0x09
 #define UART_PS		0x0C
 #define UART_MUL	0x0D
 #define UART_DIV	0x0E
@@ -18,8 +19,8 @@
 #define HSU_GBL_INT_BIT_DMA	0x5
 
 #define HSU_GBL_ISR	0x8
-#define HSU_GBL_DMASR	0x400
-#define HSU_GBL_DMAISR	0x404
+#define HSU_GBL_DMASR	0x0
+#define HSU_GBL_DMAISR	0x4
 
 #define HSU_PORT_REG_OFFSET	0x80
 #define HSU_PORT0_REG_OFFSET	0x80
@@ -27,7 +28,7 @@
 #define HSU_PORT2_REG_OFFSET	0x180
 #define HSU_PORT_REG_LENGTH	0x80
 
-#define HSU_DMA_CHANS_REG_OFFSET	0x500
+#define HSU_DMA_CHANS_REG_OFFSET	0x100
 #define HSU_DMA_CHANS_REG_LENGTH	0x40
 
 #define HSU_CH_SR		0x0	/* channel status reg */
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index fe81791..16637d8 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -72,6 +72,7 @@
 #define SFI_SIG_WAKE		"WAKE"
 #define SFI_SIG_DEVS		"DEVS"
 #define SFI_SIG_GPIO		"GPIO"
+#define SFI_SIG_OEMB		"OEMB"
 
 #define SFI_SIGNATURE_SIZE	4
 #define SFI_OEM_ID_SIZE		6
@@ -85,6 +86,9 @@
 #define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
 	((ptable->header.len - sizeof(struct sfi_table_header)) / \
 	(sizeof(entry_type)))
+
+#define SPID_FRU_SIZE	10
+
 /*
  * Table structures must be byte-packed to match the SFI specification,
  * as they are provided by the BIOS.
@@ -153,6 +157,9 @@
 #define SFI_DEV_TYPE_UART	2
 #define SFI_DEV_TYPE_HSI	3
 #define SFI_DEV_TYPE_IPC	4
+#define SFI_DEV_TYPE_SD		5
+#define SFI_DEV_TYPE_MDM	6
+#define SFI_DEV_TYPE_USB	7
 
 	u8	host_num;	/* attached to host 0, 1...*/
 	u16	addr;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index d897484..2ac423b 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -434,6 +434,14 @@
 int restore_altstack(const stack_t __user *);
 int __save_altstack(stack_t __user *, unsigned long);
 
+#define save_altstack_ex(uss, sp) do { \
+	stack_t __user *__uss = uss; \
+	struct task_struct *t = current; \
+	put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
+	put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+	put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+} while (0);
+
 #ifdef CONFIG_PROC_FS
 struct seq_file;
 extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dec1748..26e0d75 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -681,6 +681,8 @@
 extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
 				    struct sk_buff **trailer);
 extern int	       skb_pad(struct sk_buff *skb, int pad);
+extern void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
+extern void skb_clone_fraglist(struct sk_buff *skb);
 #define dev_kfree_skb(a)	consume_skb(a)
 
 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -1308,6 +1310,11 @@
 	return len + skb_headlen(skb);
 }
 
+static inline bool skb_has_frags(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->nr_frags;
+}
+
 /**
  * __skb_fill_page_desc - initialise a paged fragment in an skb
  * @skb: buffer containing fragment to be initialised
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c848876..6954541 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -20,6 +20,7 @@
 	struct list_head list;
 	smp_call_func_t func;
 	void *info;
+	int cpu;
 	u16 flags;
 };
 
diff --git a/include/linux/spi/intel_mid_ssp_spi.h b/include/linux/spi/intel_mid_ssp_spi.h
new file mode 100644
index 0000000..2412166
--- /dev/null
+++ b/include/linux/spi/intel_mid_ssp_spi.h
@@ -0,0 +1,350 @@
+/*
+ *  Copyright (C) Intel 2009
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_H_
+#define INTEL_MID_SSP_SPI_H_
+
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/wakelock.h>
+#include <linux/completion.h>
+
+#define PCI_MRFL_DMAC_ID	0x11A2
+
+#define SSP_NOT_SYNC 0x400000
+#define MAX_SPI_TRANSFER_SIZE 8192
+#define MAX_BITBANGING_LOOP   10000
+#define SPI_FIFO_SIZE 16
+
+/* PM QoS define */
+#define MIN_EXIT_LATENCY 20
+
+/* SSP assignement configuration from PCI config */
+
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* Driver's quirk flags */
+/* This workarround bufferizes data in the audio fabric SDRAM from  */
+/* where the DMA transfers will operate. Should be enabled only for */
+/* SPI slave mode.                                                  */
+#define QUIRKS_SRAM_ADDITIONAL_CPY	1
+/* If set the trailing bytes won't be handled by the DMA.           */
+/* Trailing byte feature not fully available.                       */
+#define QUIRKS_DMA_USE_NO_TRAIL		2
+/* If set, the driver will use PM_QOS to reduce the latency         */
+/* introduced by the deeper C-states which may produce over/under   */
+/* run issues. Must be used in slave mode. In master mode, the      */
+/* latency is not critical, but setting this workarround  may       */
+/* improve the SPI throughput.                                      */
+#define QUIRKS_USE_PM_QOS		4
+/* This quirks is set on Moorestown                                 */
+#define QUIRKS_PLATFORM_MRST		8
+/* This quirks is set on Medfield                                   */
+#define QUIRKS_PLATFORM_MDFL		16
+/* If set, the driver will apply the bitbanging workarround needed  */
+/* to enable defective Langwell stepping A SSP. The defective SSP   */
+/* can be enabled only once, and should never be disabled.          */
+#define QUIRKS_BIT_BANGING		32
+/* If set, SPI is in slave clock mode                               */
+#define QUIRKS_SPI_SLAVE_CLOCK_MODE	64
+/* Add more platform here. */
+/* This quirks is set on Baytrail. */
+#define QUIRKS_PLATFORM_BYT		128
+#define QUIRKS_PLATFORM_MRFL		256
+
+/* Uncomment to get RX and TX short dumps after each transfer */
+/* #define DUMP_RX 1 */
+#define MAX_TRAILING_BYTE_RETRY 16
+#define MAX_TRAILING_BYTE_LOOP 100
+#define DELAY_TO_GET_A_WORD 3
+#define DFLT_TIMEOUT_VAL 500
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+#define RX_DIRECTION 0
+#define TX_DIRECTION 1
+
+#define I2C_ACCESS_USDELAY 10
+
+#define DFLT_BITS_PER_WORD 16
+#define MIN_BITS_PER_WORD     4
+#define MAX_BITS_PER_WORD     32
+#define DFLT_FIFO_BURST_SIZE	IMSS_FIFO_BURST_8
+
+#define TRUNCATE(x, a) ((x) & ~((a)-1))
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+DEFINE_SSP_REG(SSFS, 0x44)
+DEFINE_SSP_REG(SFIFOL, 0x68)
+
+DEFINE_SSP_REG(I2CCTRL, 0x00);
+DEFINE_SSP_REG(I2CDATA, 0x04);
+
+DEFINE_SSP_REG(GPLR1, 0x04);
+DEFINE_SSP_REG(GPDR1, 0x0c);
+DEFINE_SSP_REG(GPSR1, 0x14);
+DEFINE_SSP_REG(GPCR1, 0x1C);
+DEFINE_SSP_REG(GAFR1_U, 0x44);
+
+#define SYSCFG  0x20bc0
+
+#define SRAM_BASE_ADDR 0xfffdc000
+#define SRAM_RX_ADDR   SRAM_BASE_ADDR
+#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
+#define SSCR0_NCS   (1 << 21)           /* Network clock select */
+#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
+#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF		(1 << 2)	/* Tx FIFO Not Full */
+#define SSSR_RNE		(1 << 3)	/* Rx FIFO Not Empty */
+#define SSSR_BSY		(1 << 4)	/* SSP Busy */
+#define SSSR_TFS		(1 << 5)	/* Tx FIFO Service Request */
+#define SSSR_RFS		(1 << 6)	/* Rx FIFO Service Request */
+#define SSSR_ROR		(1 << 7)	/* Rx FIFO Overrun */
+#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
+#define SSSR_RFL_SHIFT		12		/* Rx FIFO MASK shift */
+#define SSSR_RFL_MASK		(0x0F << SSSR_RFL_SHIFT)/* RxFIFOlevel mask */
+
+#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
+#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
+
+#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
+#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
+#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
+
+#define SSSR_BCE         (1 << 23) /* Bit Count Error */
+#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC         (1 << 20) /* End Of Chain */
+#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
+#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
+#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+/* add CS control call back feature to give user capability
+to control CS signal by themselves*/
+#define CS_DEASSERT	0
+#define CS_ASSERT		1
+
+struct callback_param {
+	void *drv_context;
+	u32 direction;
+};
+
+struct ssp_drv_context {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void *ioaddr;
+	int irq;
+
+	/* I2C registers */
+	dma_addr_t I2C_paddr;
+	void *I2C_ioaddr;
+
+	/* SSP masks*/
+	u32 cr1_sig;
+	u32 cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+	/* PM_QOS request */
+	struct pm_qos_request pm_qos_req;
+
+	struct tasklet_struct poll_transfer;
+
+	spinlock_t lock;
+	struct wake_lock	 stay_awake;
+	struct workqueue_struct *workqueue;
+	struct work_struct pump_messages;
+	struct list_head queue;
+	struct completion msg_done;
+
+	int suspended;
+
+	/* Current message transfer state info */
+	struct spi_message *cur_msg;
+	size_t len;
+	size_t len_dma_rx;
+	size_t len_dma_tx;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	bool dma_initialized;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	u8 n_bytes;
+	int (*write)(struct ssp_drv_context *sspc);
+	int (*read)(struct ssp_drv_context *sspc);
+
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan    *txchan;
+	struct dma_chan    *rxchan;
+	struct workqueue_struct *dma_wq;
+	struct work_struct complete_work;
+
+	u8 __iomem *virt_addr_sram_tx;
+	u8 __iomem *virt_addr_sram_rx;
+
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+	struct pci_dev *dmac1;
+
+	unsigned long quirks;
+	u32 rx_fifo_threshold;
+
+	int cs_change;
+	void (*cs_control)(u32 command);
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 timeout;
+	u8 chip_select;
+	u8 n_bytes;
+	u8 dma_enabled;
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct ssp_drv_context *sspc);
+	int (*read)(struct ssp_drv_context *sspc);
+	void (*cs_control)(u32 command);
+};
+
+
+enum intel_mid_ssp_spi_fifo_burst {
+	IMSS_FIFO_BURST_1,
+	IMSS_FIFO_BURST_4,
+	IMSS_FIFO_BURST_8
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	enum intel_mid_ssp_spi_fifo_burst burst_size;
+	u32 timeout;
+	u8 enable_loopback;
+	u8 dma_enabled;
+	void (*cs_control)(u32 command);
+};
+
+#define SPI_DIB_NAME_LEN  16
+#define SPI_DIB_SPEC_INFO_LEN      10
+
+struct spi_dib_header {
+	u32       signature;
+	u32       length;
+	u8         rev;
+	u8         checksum;
+	u8         dib[0];
+} __packed;
+
+#endif /*INTEL_MID_SSP_SPI_H_*/
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 115b570..6bfc250 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -23,8 +23,11 @@
 
 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
 extern void save_stack_trace_user(struct stack_trace *trace);
+extern void save_stack_trace_user_task(struct task_struct *task,
+				struct stack_trace *trace);
 #else
 # define save_stack_trace_user(trace)              do { } while (0)
+# define save_stack_trace_user_task(task, trace)   do { } while (0)
 #endif
 
 #else
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 84ca436..9faf0f4 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,6 +130,7 @@
 #define RPC_TASK_SOFTCONN	0x0400		/* Fail if can't connect */
 #define RPC_TASK_SENT		0x0800		/* message was sent */
 #define RPC_TASK_TIMEOUT	0x1000		/* fail with ETIMEDOUT on timeout */
+#define RPC_TASK_NOCONNECT	0x2000		/* return ENOTCONN if not connected */
 
 #define RPC_IS_ASYNC(t)		((t)->tk_flags & RPC_TASK_ASYNC)
 #define RPC_IS_SWAPPER(t)	((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/synaptics_i2c_rmi4.h b/include/linux/synaptics_i2c_rmi4.h
new file mode 100644
index 0000000..b9d284d
--- /dev/null
+++ b/include/linux/synaptics_i2c_rmi4.h
@@ -0,0 +1,73 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+
+#ifndef _SYNAPTICS_RMI4_H_INCLUDED_
+#define _SYNAPTICS_RMI4_H_INCLUDED_
+
+#include <linux/sfi.h>
+
+#define RMI4_S3202_OGS	0
+#define RMI4_S3202_GFF	1
+#define RMI4_S3400_CGS	2
+#define RMI4_S3400_IGZO 3
+
+#define S3202_DEV_ID		"synaptics_3202"
+#define S3402_DEV_ID            "synaptics_3402"
+#define S3400_CGS_DEV_ID	"syn_3400_cgs"
+#define S3400_IGZO_DEV_ID	"syn_3400_igzo"
+
+struct rmi4_touch_calib {
+	bool x_flip;
+	bool y_flip;
+	bool swap_axes;
+	u32 customer_id;
+	char *fw_name;
+	char *key_dev_name;
+};
+
+/**
+ * struct synaptics_rmi4_platform_data - contains the rmi4 platform data
+ * @int_gpio_number: interrupt gpio number
+ * @rst_gpio_umber: reset gpio number
+ * @irq_type: irq type
+ * @x flip: x flip flag
+ * @y flip: y flip flag
+ * @swap_axes: x, y swap flag
+ * @regulator_en: regulator enable flag
+ *
+ * This structure gives platform data for rmi4.
+ */
+struct rmi4_platform_data {
+	int int_gpio_number;
+	int rst_gpio_number;
+	int irq_type;
+	bool regulator_en;
+	char *regulator_name;
+	struct rmi4_touch_calib *calib;
+};
+
+#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f2c973e..dbcf606 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -818,9 +818,14 @@
 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
 	       int __user *);
 #else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+			  int __user *, int);
+#else
 asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
 	       int __user *, int);
 #endif
+#endif
 
 asmlinkage long sys_execve(const char __user *filename,
 		const char __user *const __user *argv,
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a386a1c..04b08a6 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -110,22 +110,22 @@
 		     struct thermal_cooling_device *);
 	int (*unbind) (struct thermal_zone_device *,
 		       struct thermal_cooling_device *);
-	int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+	int (*get_temp) (struct thermal_zone_device *, long *);
 	int (*get_mode) (struct thermal_zone_device *,
 			 enum thermal_device_mode *);
 	int (*set_mode) (struct thermal_zone_device *,
 		enum thermal_device_mode);
 	int (*get_trip_type) (struct thermal_zone_device *, int,
 		enum thermal_trip_type *);
-	int (*get_trip_temp) (struct thermal_zone_device *, int,
-			      unsigned long *);
-	int (*set_trip_temp) (struct thermal_zone_device *, int,
-			      unsigned long);
-	int (*get_trip_hyst) (struct thermal_zone_device *, int,
-			      unsigned long *);
-	int (*set_trip_hyst) (struct thermal_zone_device *, int,
-			      unsigned long);
-	int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+	int (*get_trip_temp) (struct thermal_zone_device *, int, long *);
+	int (*set_trip_temp) (struct thermal_zone_device *, int, long);
+	int (*get_trip_hyst) (struct thermal_zone_device *, int, long *);
+	int (*set_trip_hyst) (struct thermal_zone_device *, int, long);
+	int (*get_slope) (struct thermal_zone_device *, long *);
+	int (*set_slope) (struct thermal_zone_device *, long);
+	int (*get_intercept) (struct thermal_zone_device *, long *);
+	int (*set_intercept) (struct thermal_zone_device *, long);
+	int (*get_crit_temp) (struct thermal_zone_device *, long *);
 	int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
 	int (*get_trend) (struct thermal_zone_device *, int,
 			  enum thermal_trend *);
@@ -137,6 +137,12 @@
 	int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+	int (*get_force_state_override) (struct thermal_cooling_device *,
+								char *);
+	int (*set_force_state_override) (struct thermal_cooling_device *,
+								char *);
+	int (*get_available_states) (struct thermal_cooling_device *,
+								char *);
 };
 
 struct thermal_cooling_device {
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b3726e6..dd3edd7 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -141,6 +141,7 @@
 extern void hardpps(const struct timespec *, const struct timespec *);
 
 int read_current_timer(unsigned long *timer_val);
+void ntp_notify_cmos_timer(void);
 
 /* The clock frequency of the i8253/i8254 PIT */
 #define PIT_TICK_RATE 1193182ul
diff --git a/include/linux/topology.h b/include/linux/topology.h
index d3cf0d6..7b5f582 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -84,6 +84,17 @@
  */
 #define ARCH_HAS_SCHED_WAKE_IDLE
 /* Common values for SMT siblings */
+
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+#ifndef ASYM_CONCURRENCY_INIT
+#define ASYM_CONCURRENCY_INIT(n) .asym_concurrency = (n),
+#endif
+#else
+#ifndef ASYM_CONCURRENCY_INIT
+#define ASYM_CONCURRENCY_INIT(n)
+#endif
+#endif
+
 #ifndef SD_SIBLING_INIT
 #define SD_SIBLING_INIT (struct sched_domain) {				\
 	.min_interval		= 1,					\
@@ -102,10 +113,12 @@
 				| 0*SD_SERIALIZE			\
 				| 0*SD_PREFER_SIBLING			\
 				| arch_sd_sibling_asym_packing()	\
+				| 0*SD_ASYM_CONCURRENCY			\
 				,					\
 	.last_balance		= jiffies,				\
 	.balance_interval	= 1,					\
 	.smt_gain		= 1178,	/* 15% */			\
+	ASYM_CONCURRENCY_INIT(0)					\
 }
 #endif
 #endif /* CONFIG_SCHED_SMT */
@@ -132,9 +145,11 @@
 				| 0*SD_SHARE_CPUPOWER			\
 				| 1*SD_SHARE_PKG_RESOURCES		\
 				| 0*SD_SERIALIZE			\
+				| 0*SD_ASYM_CONCURRENCY			\
 				,					\
 	.last_balance		= jiffies,				\
 	.balance_interval	= 1,					\
+	ASYM_CONCURRENCY_INIT(0)					\
 }
 #endif
 #endif /* CONFIG_SCHED_MC */
@@ -163,9 +178,11 @@
 				| 0*SD_SHARE_PKG_RESOURCES		\
 				| 0*SD_SERIALIZE			\
 				| 1*SD_PREFER_SIBLING			\
+				| 1*SD_ASYM_CONCURRENCY			\
 				,					\
 	.last_balance		= jiffies,				\
 	.balance_interval	= 1,					\
+	ASYM_CONCURRENCY_INIT(180)					\
 }
 #endif
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a0bee5a..d484a3d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -394,6 +394,22 @@
 };
 
 /*
+ * USB 2.0 Link Power Management (LPM) parameters.
+ */
+struct usb2_lpm_parameters {
+	/* Best effort service latency indicate how long the host will drive
+	 * resume on an exit from L1.
+	 */
+	unsigned int besl;
+
+	/* Timeout value in microseconds for the L1 inactivity (LPM) timer.
+	 * When the timer counts to zero, the parent hub will initiate a LPM
+	 * transition to L1.
+	 */
+	int timeout;
+};
+
+/*
  * USB 3.0 Link Power Management (LPM) parameters.
  *
  * PEL and SEL are USB 3.0 Link PM latencies for device-initiated LPM exit.
@@ -468,6 +484,7 @@
  * @wusb: device is Wireless USB
  * @lpm_capable: device supports LPM
  * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+ * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
  * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled
  * @usb3_lpm_enabled: USB3 hardware LPM enabled
  * @string_langid: language ID for strings
@@ -487,6 +504,7 @@
  *	specific data for the device.
  * @slot_id: Slot ID assigned by xHCI
  * @removable: Device can be physically removed from this port
+ * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
  * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
  * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
  * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm()
@@ -538,6 +556,7 @@
 	unsigned wusb:1;
 	unsigned lpm_capable:1;
 	unsigned usb2_hw_lpm_capable:1;
+	unsigned usb2_hw_lpm_besl_capable:1;
 	unsigned usb2_hw_lpm_enabled:1;
 	unsigned usb3_lpm_enabled:1;
 	int string_langid;
@@ -566,6 +585,7 @@
 	struct wusb_dev *wusb_dev;
 	int slot_id;
 	enum usb_device_removable removable;
+	struct usb2_lpm_parameters l1_params;
 	struct usb3_lpm_parameters u1_params;
 	struct usb3_lpm_parameters u2_params;
 	unsigned lpm_disable_count;
@@ -1804,6 +1824,12 @@
 #define USB_DEVICE_REMOVE	0x0002
 #define USB_BUS_ADD		0x0003
 #define USB_BUS_REMOVE		0x0004
+#define USB_PORT_SUSPEND	0x0005
+#define USB_PORT_RESUME		0x0006
+/* For OTG Test mode */
+#define USB_OTG_TESTDEV		0x0011
+#define USB_OTG_TESTDEV_VBUSOFF	0x0012
+
 extern void usb_register_notify(struct notifier_block *nb);
 extern void usb_unregister_notify(struct notifier_block *nb);
 
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index cc25b70..06ff1f9 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -125,6 +125,11 @@
 	u16 tx_seq;
 	u16 rx_seq;
 	u16 connected;
+
+	u8 *fragment;
+	u32 fragment_size;
+	u32 fragment_deleted;
+	u32 fragment_recombinated;
 };
 
 extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 5e61589..7ffce74 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -380,6 +380,7 @@
 	struct list_head		gstrings;
 	struct usb_composite_driver	*driver;
 	u8				next_string_id;
+	u8				reset_string_id;
 	char				*def_manufacturer;
 
 	/* the gadget driver won't enable the data pullup
@@ -394,6 +395,9 @@
 
 	/* protects deactivations and delayed_status counts*/
 	spinlock_t			lock;
+
+	/* OTG support */
+	struct usb_otg_descriptor	*otg_desc;
 };
 
 extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/debug.h b/include/linux/usb/debug.h
new file mode 100644
index 0000000..2a08e36
--- /dev/null
+++ b/include/linux/usb/debug.h
@@ -0,0 +1,253 @@
+/*
+ * <linux/usb/debug.h> -- USB Debug Class definitions.
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_USB_DEBUG_H
+#define __LINUX_USB_DEBUG_H
+
+#include <linux/types.h>
+
+/* Debug Interface Subclass Codes */
+#define USB_SUBCLASS_DVC_GP		0x05
+#define USB_SUBCLASS_DVC_DFX		0x06
+#define USB_SUBCLASS_DVC_TRACE		0x07
+#define USB_SUBCLASS_DEBUG_CONTROL	0x08
+
+/* Debug Interface Function Protocol */
+#define DC_PROTOCOL_VENDOR			0x00
+#define DC_PROTOCOL_LAUTERBACH			0x01
+#define DC_PROTOCOL_ITP			0x02
+
+/* Debug Class-Specific Interface Descriptor Subtypes */
+#define DC_UNDEFINED			0x00
+#define DC_INPUT_CONNECTION		0x01
+#define DC_OUTPUT_CONNECTION		0x02
+#define DC_DEBUG_UNIT			0x03
+#define DC_DEBUG_ATTRIBUTES		0x04 /* revision: per SAS */
+
+/* Debug-Class Input/Output Connection Type */
+#define DC_CONNECTION_USB			0x00
+#define DC_CONNECTION_JTAG			0x01
+#define DC_CONNECTION_DEBUG_DATA_CONTROL	0x02
+#define DC_CONNECTION_DEBUG_DATA		0x03
+#define DC_CONNECTION_DEBUG_CONTROL		0x04
+
+/*
+ * Debug-class (rev 0.88r2) section 4.4.3
+ * Attibute  Descriptor, bmControl
+ */
+#define DC_CTL_SET_CFG_DATA_SG			(1 << 0)
+#define DC_CTL_SET_CFG_DATA			(1 << 1)
+#define DC_CTL_GET_CFG_DATA			(1 << 2)
+#define DC_CTL_SET_CFG_ADDR			(1 << 3)
+#define DC_CTL_GET_CFG_ADDR			(1 << 4)
+#define DC_CTL_SET_ALT_STACK			(1 << 5)
+#define DC_CTL_GET_ALT_STACK			(1 << 6)
+#define DC_CTL_SET_OP_MODE			(1 << 7)
+#define DC_CTL_GET_OP_MODE			(1 << 8)
+#define DC_CTL_SET_TRACE_CFG			(1 << 9)
+#define DC_CTL_GET_TRACE_CFG			(1 << 10)
+#define DC_CTL_SET_BUFF_INFO			(1 << 11)
+#define DC_CTL_GET_BUFF_INFO			(1 << 12)
+#define DC_CTL_SET_RESET			(1 << 13)
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit/Input/Output connection Descriptors,
+ * dTraceFormat
+ */
+#define DC_TRACE_NOT_FORMATED_PASSTHROUGH   0x00000000
+#define DC_TRACE_NOT_FORMATED_HEADER        0x00000001
+#define DC_TRACE_NOT_FORMATED_FOOTER        0x00000002
+#define DC_TRACE_NOT_FORMATED_GUID          0x00000005
+#define DC_TRACE_NOT_FORMATED_UTF8          0x00000006
+#define DC_TRACE_INTEL_FORMATED_VENDOR      0x01000000
+#define DC_TRACE_MIPI_FORMATED_STPV1        0x80000000
+#define DC_TRACE_MIPI_FORMATED_STPV2        0x80000001
+#define DC_TRACE_MIPI_FORMATED_TWP          0x80000100
+#define DC_TRACE_MIPI_FORMATED_OST          0x80001000
+#define DC_TRACE_NEXUS_FORMATED             0x81000000
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit connection Descriptors, dDebugUnitType
+ */
+#define DC_UNIT_TYPE_DFX		0x00
+#define DC_UNIT_TYPE_SELECT		0x01
+#define DC_UNIT_TYPE_TRACE_ROUTE	0x02
+#define DC_UNIT_TYPE_TRACE_PROC	0x03
+#define DC_UNIT_TYPE_TRACE_GEN		0x04
+#define DC_UNIT_TYPE_TRACE_SINK	0x05
+#define DC_UNIT_TYPE_CONTROL		0x06
+#define DC_UNIT_TYPE_VENDOR		0x40
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit connection Descriptors, dDebugUnitSubType
+ */
+#define DC_UNIT_SUBTYPE_NO              0x00
+#define DC_UNIT_SUBTYPE_CPU             0x01
+#define DC_UNIT_SUBTYPE_GFX             0x02
+#define DC_UNIT_SUBTYPE_VIDEO		0x03
+#define DC_UNIT_SUBTYPE_IMAGING	0x04
+#define DC_UNIT_SUBTYPE_AUDIO		0x05
+#define DC_UNIT_SUBTYPE_MODEM		0x06
+#define DC_UNIT_SUBTYPE_BLUETOOTH	0x07
+#define DC_UNIT_SUBTYPE_PWR_MGT	0x08
+#define DC_UNIT_SUBTYPE_SECURITY	0x09
+#define DC_UNIT_SUBTYPE_SENSOR		0x0A
+#define DC_UNIT_SUBTYPE_BUSWATCH	0x0B
+#define DC_UNIT_SUBTYPE_GPS		0x0C
+#define DC_UNIT_SUBTYPE_TRACEZIP	0x0D
+#define DC_UNIT_SUBTYPE_TAPCTL		0x0E
+#define DC_UNIT_SUBTYPE_MEMACC		0x0F
+#define DC_UNIT_SUBTYPE_SWLOGGER	0x40
+#define DC_UNIT_SUBTYPE_SWROUTER	0x41
+#define DC_UNIT_SUBTYPE_SWDRIVER	0x42
+#define DC_UNIT_SUBTYPE_VENDOR		0x80
+
+/* USB DBG requests values */
+#define DC_REQUEST_SET_CONFIG_DATA		0x01
+#define DC_REQUEST_SET_CONFIG_DATA_SINGLE	0x02
+#define DC_REQUEST_SET_CONFIG_ADDRESS		0x03
+#define DC_REQUEST_SET_ALT_STACK		0x04
+#define DC_REQUEST_SET_OPERATING		0x05
+#define DC_REQUEST_SET_TRACE			0x08
+#define DC_REQUEST_SET_BUFFER_INFO		0x09
+#define DC_REQUEST_SET_RESET			0x0A
+#define DC_REQUEST_GET_CONFIG_DATA		0x81
+#define DC_REQUEST_GET_CONFIG_DATA_SINGLE	0x82
+#define DC_REQUEST_GET_CONFIG_ADDRESS		0x83
+#define DC_REQUEST_GET_ALT_STACK		0x84
+#define DC_REQUEST_GET_OPERATING		0x85
+#define DC_REQUEST_GET_TRACE			0x86
+#define DC_REQUEST_GET_INFO			0x87
+#define DC_REQUEST_GET_ERROR			0x88
+#define DC_REQUEST_GET_BUFFER_INFO		0x89
+
+/* Debug-Class Debug-Attributes Descriptor */
+struct dc_debug_attributes_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__le16 bcdDC;
+	__le16 wTotalLength;
+	__u8  bmSupportedFeatures;
+	__u8  bControlSize;		/* n */
+	__u8  bmControl[0];		/* [n] */
+	__le16 wAuxDataSize;		/* m */
+	__le32 dInputBufferSize;
+	__le32 dOutputBufferSize;
+	__le64 qBaseAddress;
+	__le64 hGlobalID[2];
+	__u8  Supplementary[0];		/* [m-32] */
+} __attribute__((__packed__));
+
+#define DC_DEBUG_ATTR_DESCR(name)		 \
+	dc_debug_attributes_descriptor_##name
+
+#define DECLARE_DC_DEBUG_ATTR_DESCR(name, n, m)	\
+struct DC_DEBUG_ATTR_DESCR(name) {		\
+	__u8  bLength;					\
+	__u8  bDescriptorType;				\
+	__u8  bDescriptorSubtype;			\
+	__le16 bcdDC;					\
+	__le16 wTotalLength;				\
+	__u8  bmSupportedFeatures;			\
+	__u8  bControlSize;				\
+	__u8  bmControl[n];				\
+	__le16 wAuxDataSize;				\
+	__le32 dInputBufferSize;			\
+	__le32 dOutputBufferSize;			\
+	__le64 qBaseAddress;				\
+	__le64 hGlobalID[2];				\
+	__u8  Supplementary[m-32];			\
+} __attribute__((__packed__));
+
+#define DC_DBG_ATTRI_SIZE(n, m)		(9 + (n) + 2 + (m))
+
+/* Debug-Class Input Connection Descriptor */
+struct dc_input_connection_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__u8  bConnectionID;
+	__u8  bConnectionType;
+	__u8  bAssocConnection;
+	__u8  iConnection;
+	__le32 dTraceFormat;
+	__le32 dStreamID;
+} __attribute__((__packed__));
+
+#define DC_INPUT_CONNECTION_SIZE	15
+
+/* Debug-Class Output Connection Descriptor */
+struct dc_output_connection_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__u8  bConnectionID;
+	__u8  bConnectionType;
+	__u8  bAssocConnection;
+	__le16 wSourceID;
+	__u8  iConnection;
+} __attribute__((__packed__));
+
+#define DC_OUTPUT_CONNECTION_SIZE	9
+
+/* Debug-Class Debug-Unit Descriptor */
+struct dc_debug_unit_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__u8  bUnitID;
+	__u8  bDebugUnitType;
+	__u8  bDebugSubUnitType;
+	__u8  bAliasUnitID;
+	__u8  bNrInPins;		/* p */
+	__le16 wSourceID[0];		/* [p] */
+	__u8  bNrOutPins;		/* q */
+	__le32 dTraceFormat[0];		/* [q] */
+	__le32 dStreamID;
+	__u8  bControlSize;		/* n */
+	__u8  bmControl[0];		/* [n] */
+	__le16 wAuxDataSize;		/* m */
+	__le64 qBaseAddress;
+	__le64 hIPID[2];
+	__u8  Supplementary[0];		/* [m-24] */
+	__u8  iDebugUnitType;
+} __attribute__((__packed__));
+
+#define DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m)		\
+	dc_debug_unit_descriptor_##p_##q##n_##m
+
+#define DECLARE_DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m)	\
+struct DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m) {		\
+	__u8  bLength;					\
+	__u8  bDescriptorType;				\
+	__u8  bDescriptorSubtype;			\
+	__u8  bUnitID;					\
+	__u8  bDebugUnitType;				\
+	__u8  bDebugSubUnitType;			\
+	__u8  bAliasUnitID;				\
+	__u8  bNrInPins;				\
+	__le16 wSourceID[p];				\
+	__u8  bNrOutPins;				\
+	__le32 dTraceFormat[q];			\
+	__le32 dStreamID;				\
+	__u8  bControlSize;				\
+	__u8  bmControl[n];				\
+	__le16 wAuxDataSize;				\
+	__le64 qBaseAddress;				\
+	__le64 hIPID[2];				\
+	__u8  Supplementary[m-24];			\
+	__u8  iDebugUnitType;				\
+} __attribute__((__packed__));
+
+#define DC_DBG_UNIT_SIZE(p, q, n, m)	\
+(8 + (p * 2) + 1 + (q * 4) + 5 + (n) + 2 + (m) + 1)
+
+#endif /* __LINUX_USB_DEBUG_H */
diff --git a/include/linux/usb/dwc3-intel-mid.h b/include/linux/usb/dwc3-intel-mid.h
new file mode 100644
index 0000000..7bcd41d
--- /dev/null
+++ b/include/linux/usb/dwc3-intel-mid.h
@@ -0,0 +1,245 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_INTEL_H
+#define __DWC3_INTEL_H
+
+#include "otg.h"
+
+enum intel_mid_pmic_type {
+	NO_PMIC,
+	SHADY_COVE,
+	BASIN_COVE
+};
+
+struct intel_dwc_otg_pdata {
+	unsigned device_hibernation:1;
+	int is_hvp;
+	enum intel_mid_pmic_type pmic_type;
+	int charger_detect_enable;
+	int gpio_cs;
+	int gpio_reset;
+	int gpio_id;
+	int id;
+	int charging_compliance;
+	struct delayed_work suspend_discon_work;
+	u8 ti_phy_vs1;
+	int sdp_charging;
+	enum usb_phy_intf usb2_phy_type;
+
+	/* USB2 electronic calibration value.
+	 *
+	 * ULPI(TI1211):
+	 * ZHSDRV and IHSTX of VS1 register for TI1211 PHY.
+	 * They impact the eye diagram qulity.
+	 *
+	 * UTMI(Intel):
+	 * USB2PERPORT register.
+	 * D14:  0=full-bit PE; 1=half-bit PE
+	 * D[13:11]:  PE/DE bias (0-to-7)
+	 * D[10:08]:  TX bias (0-to-7)
+	 */
+	int ulpi_eye_calibration;
+	int utmi_eye_calibration;
+
+	/* If the VUSBPHY power rail using for providing
+	 * power for USB PHY. */
+	int using_vusbphy;
+
+	/* WA for PMIC can't detect VBUS drop. */
+	int detect_vbus_drop;
+
+	/* Enable UTMI PHY WA for FS device detection issue */
+	int utmi_fs_det_wa;
+};
+
+/* timeout for disconnect from a suspended host */
+#define SUSPEND_DISCONNECT_TIMEOUT	(HZ * 300)
+
+#define TUSB1211_VENDOR_ID_LO					0x00
+#define TUSB1211_VENDOR_ID_HI					0x01
+#define TUSB1211_PRODUCT_ID_LO					0x02
+#define TUSB1211_PRODUCT_ID_HI					0x03
+#define TUSB1211_FUNC_CTRL						0x04
+#define TUSB1211_FUNC_CTRL_SET					0x05
+#define TUSB1211_FUNC_CTRL_CLR					0x06
+#define TUSB1211_IFC_CTRL						0x07
+#define TUSB1211_IFC_CTRL_SET					0x08
+#define TUSB1211_IFC_CTRL_CLR					0x09
+#define TUSB1211_OTG_CTRL						0x0A
+#define TUSB1211_OTG_CTRL_SET					0x0B
+#define TUSB1211_OTG_CTRL_CLR					0x0C
+#define TUSB1211_USB_INT_EN_RISE				0x0D
+#define TUSB1211_USB_INT_EN_RISE_SET			0x0E
+#define TUSB1211_USB_INT_EN_RISE_CLR			0x0F
+#define TUSB1211_USB_INT_EN_FALL				0x10
+#define TUSB1211_USB_INT_EN_FALL_SET			0x11
+#define TUSB1211_USB_INT_EN_FALL_CLR			0x12
+#define TUSB1211_USB_INT_STS					0x13
+#define TUSB1211_USB_INT_LATCH					0x14
+#define TUSB1211_DEBUG							0x15
+#define TUSB1211_SCRATCH_REG					0x16
+#define TUSB1211_SCRATCH_REG_SET				0x17
+#define TUSB1211_SCRATCH_REG_CLR				0x18
+#define TUSB1211_ACCESS_EXT_REG_SET				0x2F
+
+#define TUSB1211_VENDOR_SPECIFIC1				0x80
+#define TUSB1211_VENDOR_SPECIFIC1_SET			0x81
+#define TUSB1211_VENDOR_SPECIFIC1_CLR			0x82
+#define TUSB1211_POWER_CONTROL					0x3D
+#define TUSB1211_POWER_CONTROL_SET				0x3E
+#define TUSB1211_POWER_CONTROL_CLR				0x3F
+
+#define TUSB1211_VENDOR_SPECIFIC2				0x80
+#define TUSB1211_VENDOR_SPECIFIC2_SET			0x81
+#define TUSB1211_VENDOR_SPECIFIC2_CLR			0x82
+#define TUSB1211_VENDOR_SPECIFIC2_STS			0x83
+#define TUSB1211_VENDOR_SPECIFIC2_LATCH			0x84
+#define TUSB1211_VENDOR_SPECIFIC3				0x85
+#define TUSB1211_VENDOR_SPECIFIC3_SET			0x86
+#define TUSB1211_VENDOR_SPECIFIC3_CLR			0x87
+#define TUSB1211_VENDOR_SPECIFIC4				0x88
+#define TUSB1211_VENDOR_SPECIFIC4_SET			0x89
+#define TUSB1211_VENDOR_SPECIFIC4_CLR			0x8A
+#define TUSB1211_VENDOR_SPECIFIC5				0x8B
+#define TUSB1211_VENDOR_SPECIFIC5_SET			0x8C
+#define TUSB1211_VENDOR_SPECIFIC5_CLR			0x8D
+#define TUSB1211_VENDOR_SPECIFIC6				0x8E
+#define TUSB1211_VENDOR_SPECIFIC6_SET			0x8F
+#define TUSB1211_VENDOR_SPECIFIC6_CLR			0x90
+
+#define VS1_DATAPOLARITY						(1 << 6)
+#define VS1_ZHSDRV(v)					((v & 0x3) << 5)
+#define VS1_IHSTX(v)						 ((v & 0x7))
+
+#define VS2STS_VBUS_MNTR_STS					(1 << 7)
+#define VS2STS_REG3V3IN_MNTR_STS				(1 << 6)
+#define VS2STS_SVLDCONWKB_WDOG_STS				(1 << 5)
+#define VS2STS_ID_FLOAT_STS						(1 << 4)
+#define VS2STS_ID_RARBRC_STS(v)					((v & 0x3) << 2)
+#define VS2STS_BVALID_STS						(1 << 0)
+
+#define VS3_CHGD_IDP_SRC_EN						(1 << 6)
+#define VS3_IDPULLUP_WK_EN						(1 << 5)
+#define VS3_SW_USB_DET							(1 << 4)
+#define VS3_DATA_CONTACT_DET_EN					(1 << 3)
+#define VS3_REG3V3_VSEL(v)					   (v & 0x7)
+
+#define VS4_ACA_DET_EN							(1 << 6)
+#define VS4_RABUSIN_EN							(1 << 5)
+#define VS4_R1KSERIES							(1 << 4)
+#define VS4_PSW_OSOD							(1 << 3)
+#define VS4_PSW_CMOS							(1 << 2)
+#define VS4_CHGD_SERX_DP						(1 << 1)
+#define VS4_CHGD_SERX_DM						(1 << 0)
+
+#define VS5_AUTORESUME_WDOG_EN					(1 << 6)
+#define VS5_ID_FLOAT_EN							(1 << 5)
+#define VS5_ID_RES_EN							(1 << 4)
+#define VS5_SVLDCONWKB_WDOG_EN					(1 << 3)
+#define VS5_VBUS_MNTR_RISE_EN					(1 << 2)
+#define VS5_VBUS_MNTR_FALL_EN					(1 << 1)
+#define VS5_REG3V3IN_MNTR_EN					(1 << 0)
+
+#define DEBUG_LINESTATE                       (0x3 << 0)
+
+#define OTGCTRL_USEEXTVBUS_INDICATOR			(1 << 7)
+#define OTGCTRL_DRVVBUSEXTERNAL					(1 << 6)
+#define OTGCTRL_DRVVBUS							(1 << 5)
+#define OTGCTRL_CHRGVBUS						(1 << 4)
+#define OTGCTRL_DISCHRGVBUS						(1 << 3)
+#define OTGCTRL_DMPULLDOWN						(1 << 2)
+#define OTGCTRL_DPPULLDOWN						(1 << 1)
+#define OTGCTRL_IDPULLUP						(1 << 0)
+
+#define FUNCCTRL_SUSPENDM						(1 << 6)
+#define FUNCCTRL_RESET							(1 << 5)
+#define FUNCCTRL_OPMODE(v)				((v & 0x3) << 3)
+#define FUNCCTRL_TERMSELECT						(1 << 2)
+#define FUNCCTRL_XCVRSELECT(v)					(v & 0x3)
+
+#define PWCTRL_HWDETECT							(1 << 7)
+#define PWCTRL_DP_VSRC_EN						(1 << 6)
+#define PWCTRL_VDAT_DET							(1 << 5)
+#define PWCTRL_DP_WKPU_EN						(1 << 4)
+#define PWCTRL_BVALID_FALL						(1 << 3)
+#define PWCTRL_BVALID_RISE						(1 << 2)
+#define PWCTRL_DET_COMP							(1 << 1)
+#define PWCTRL_SW_CONTROL						(1 << 0)
+
+
+#define PMIC_VLDOCNT                0xAF
+#define PMIC_VLDOCNT_VUSBPHYEN      (1 << 2)
+
+#define PMIC_TLP1ESBS0I1VNNBASE		0X6B
+#define PMIC_I2COVRDADDR			0x59
+#define PMIC_I2COVROFFSET			0x5A
+#define PMIC_USBPHYCTRL				0x30
+#define PMIC_I2COVRWRDATA			0x5B
+#define PMIC_I2COVRCTRL				0x58
+#define PMIC_I2COVRCTL_I2CWR		0x01
+
+#define USBPHYRSTB				(1 << 0)
+#define USBPHYCTRL_D0			(1 << 0)
+#define PMIC_USBIDCTRL				0x19
+#define USBIDCTRL_ACA_DETEN_D1	(1 << 1)
+#define USBIDCTRL_USB_IDEN_D0	(1 << 0)
+#define PMIC_USBIDSTS				0x1A
+#define USBIDSTS_ID_GND			(1 << 0)
+#define USBIDSTS_ID_RARBRC_STS(v)	((v & 0x3)  << 1)
+#define USBIDSTS_ID_FLOAT_STS	(1 << 3)
+#define PMIC_USBPHYCTRL_D0		(1 << 0)
+#define APBFC_EXIOTG3_MISC0_REG			0xF90FF85C
+
+#define DATACON_TIMEOUT		750
+#define DATACON_INTERVAL	10
+#define PCI_DEVICE_ID_DWC 0x119E
+
+#define VENDOR_ID_MASK (0x03 << 6)
+#define BASIN_COVE_PMIC_ID (0x03 << 6)
+
+#define PMIC_MAJOR_REV (0x07 << 3)
+#define PMIC_A0_MAJOR_REV 0x00
+
+/* ShardyCove register */
+#define PMIC_SCHGRIRQ1		0X4F
+#define PMIC_GPADCREQ_REG	0xDC
+#define PMIC_ADCIRQ_REG		0x06
+#define PMIC_USBIDRSLTL		0xEF
+#define PMIC_USBIDRSLTH		0xEE
+#define PMIC_GPADCREQ_ADC_USBID	(1 << 1)
+#define PMIC_ADCIRQ_USBID	(1 << 0)
+#define PMIC_USBIDRSLTL_USBID_L_MASK	(0xFF)
+#define PMIC_USBIDRSLTH_USBID_H_MASK	(0x0F)
+#define PMIC_USBIDRSLTH_USBID_CURSRC_MASK	(0xF0)
+#define PMIC_SCHGRIRQ1_SUSBIDDET(v)	((v & 0x3) << 3)
+#define PMIC_SCHGRIRQ1_SVBUSDET	(1 << 0)
+
+/* SCCB registers */
+#define SCCB_USB_CFG	0xff03a018
+#define SCCB_USB_CFG_SELECT_ULPI	(1 << 14)
+
+/* SMIP address which check if violate BC */
+#define MOFD_SMIP_VIOLATE_BC_ADDR	0xFFFC631B
+#define MERR_SMIP_VIOLATE_BC_ADDR	0xFFFCE717
+#define SMIP_VIOLATE_BC_MASK	0x40
+
+/* UTMI(Intel) PHY USB2PERPORT register */
+#define UTMI_PHY_USB2PERPORT	0xf90B1200
+#endif /* __DWC3_INTEL_H */
diff --git a/include/linux/usb/ehci-tangier-hsic-pci.h b/include/linux/usb/ehci-tangier-hsic-pci.h
new file mode 100644
index 0000000..68ad791
--- /dev/null
+++ b/include/linux/usb/ehci-tangier-hsic-pci.h
@@ -0,0 +1,119 @@
+#ifndef EHCI_TANGIER_HSIC_PCI_H
+#define EHCI_TANGIER_HSIC_PCI_H
+
+#include <linux/notifier.h>
+#include <linux/usb.h>
+#include <linux/wakelock.h>
+
+#define HSIC_AUX_GPIO_NAME       "usb_hsic_aux1"
+#define HSIC_WAKEUP_GPIO_NAME    "usb_hsic_aux2"
+#define HSIC_HUB_RESET_TIME   10
+#define HSIC_ENABLE_SIZE      2
+#define HSIC_DURATION_SIZE    7
+#define PM_BASE		0xff00b000
+#define PM_STS		0x00
+#define PM_CMD		0x04
+
+#define PM_SS0		0x30
+#define PM_SS1		0x34
+#define PM_SS2		0x38
+#define PM_SS3		0x3C
+
+#define PM_SSC0		0x20
+#define PM_SSC1		0x24
+#define PM_SSC2		0x28
+#define PM_SSC3		0x2C
+#define PMU_HW_PEN0 0x108
+#define PMU_HW_PEN1 0x10C
+
+/* Port Inactivity Duratoin is default value for L2 suspend */
+#define HSIC_PORT_INACTIVITYDURATION              500
+/* This is the default value for L2 autosuspend enable */
+#define HSIC_BUS_INACTIVITYDURATION               500
+#define HSIC_REMOTEWAKEUP                         1
+
+enum wlock_state {
+	UNLOCKED,
+	LOCKED
+};
+
+enum s3_state {
+	RESUMED,
+	RESUMING,
+	SUSPENDED,
+	SUSPENDING
+};
+
+/* Failure counter we used for IPC */
+#define PM_FAILURE_COUNT	4
+#define STATS_DISABLE		0
+#define STATS_ENABLE		1
+
+enum ipc_stats_type {
+	REMOTE_WAKEUP,
+	REMOTE_WAKEUP_OOB,
+	BUS_SUSPEND,
+	BUS_RESUME,
+	D0I3_ENTRY,
+	D0I3_EXIT,
+	D3_ENTRY,
+	D3_EXIT
+};
+
+struct ipc_failure {
+	const char	*name;
+	unsigned long	fail_cnt;
+};
+
+struct ipc_stats {
+	const char		*name;
+	unsigned long		success_cnt;
+	struct ipc_failure	ipc_failure[PM_FAILURE_COUNT];
+};
+void count_ipc_stats(int retval, enum ipc_stats_type type);
+
+struct hsic_tangier_priv {
+	struct delayed_work  hsic_aux;
+	wait_queue_head_t    aux_wq;
+	struct mutex         hsic_mutex;
+	struct mutex         wlock_mutex;
+	unsigned             hsic_mutex_init:1;
+	unsigned             aux_wq_init:1;
+	unsigned             hsic_aux_irq_enable:1;
+	unsigned             hsic_wakeup_irq_enable:1;
+	unsigned             hsic_aux_finish:1;
+	unsigned             hsic_enable_created:1;
+	unsigned             hsic_lock_init:1;
+	unsigned             hsic_stopped:1;
+
+	unsigned             remoteWakeup_enable;
+	unsigned             autosuspend_enable;
+	unsigned             aux_gpio;
+	unsigned             wakeup_gpio;
+	unsigned             port_inactivityDuration;
+	unsigned             bus_inactivityDuration;
+	spinlock_t           hsic_lock;
+	/* Root hub device */
+	struct usb_device           *rh_dev;
+	struct usb_device           *modem_dev;
+	struct workqueue_struct     *work_queue;
+	struct delayed_work         wakeup_work;
+	struct notifier_block       hsic_pm_nb;
+	struct notifier_block       hsic_s3_entry_nb;
+	struct wake_lock            resume_wake_lock;
+	struct wake_lock            s3_wake_lock;
+	enum wlock_state            s3_wlock_state;
+	enum s3_state               s3_rt_state;
+};
+
+enum {
+	PROBE,
+	REMOVE
+};
+
+#define HSIC_DPHY_D3_STATE_MASK		0xE00
+#define PM_REGISTER_LENGHT			0x200
+#define PM_SSC0_HSIC_D3_MODE		(0x3 << 16)
+#define PM_STS_DONE					(1 << 0x8)
+#define PMU_HW_PEN0_HSIC_DPHY_MASK	0xE00
+#endif
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index daec99a..b773e9b 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -179,6 +179,7 @@
  */
 	/* HOSTPC: offset 0x84 */
 	u32		hostpc[1];	/* HOSTPC extension */
+#define HOSTPC_ASUS	(1<<0)		/* Auto PHY low power mode */
 #define HOSTPC_PHCD	(1<<22)		/* Phy clock disable */
 #define HOSTPC_PSPD	(3<<25)		/* Port speed detection */
 
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index f1b0dca..0dc17d3 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <linux/usb/ch9.h>
 
 struct usb_ep;
@@ -475,6 +476,7 @@
 
 /**
  * struct usb_gadget - represents a usb slave device
+ * @work: (internal use) Workqueue to be used for sysfs_notify()
  * @ops: Function pointers used to access hardware-specific operations.
  * @ep0: Endpoint zero, used when reading or writing responses to
  *	driver setup() requests
@@ -483,6 +485,11 @@
  * @max_speed: Maximal speed the UDC can handle.  UDC must support this
  *      and all slower speeds.
  * @state: the state we are now (attached, suspended, configured, etc)
+ * @name: Identifies the controller hardware type.  Used in diagnostics
+ *	and sometimes configuration.
+ * @dev: Driver model state for this abstract device.
+ * @out_epnum: last used out ep number
+ * @in_epnum: last used in ep number
  * @sg_supported: true if we can handle scatter-gather
  * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
  *	gadget driver must provide a USB OTG descriptor.
@@ -495,11 +502,8 @@
  *	only supports HNP on a different root port.
  * @b_hnp_enable: OTG device feature flag, indicating that the A-Host
  *	enabled HNP support.
- * @name: Identifies the controller hardware type.  Used in diagnostics
- *	and sometimes configuration.
- * @dev: Driver model state for this abstract device.
- * @out_epnum: last used out ep number
- * @in_epnum: last used in ep number
+ * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
+ *	MaxPacketSize.
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -520,6 +524,7 @@
  * device is acting as a B-Peripheral (so is_a_peripheral is false).
  */
 struct usb_gadget {
+	struct work_struct		work;
 	/* readonly to gadget driver */
 	const struct usb_gadget_ops	*ops;
 	struct usb_ep			*ep0;
@@ -527,17 +532,21 @@
 	enum usb_device_speed		speed;
 	enum usb_device_speed		max_speed;
 	enum usb_device_state		state;
+	const char			*name;
+	struct device			dev;
+	unsigned			out_epnum;
+	unsigned			in_epnum;
+
 	unsigned			sg_supported:1;
 	unsigned			is_otg:1;
 	unsigned			is_a_peripheral:1;
 	unsigned			b_hnp_enable:1;
 	unsigned			a_hnp_support:1;
 	unsigned			a_alt_hnp_support:1;
-	const char			*name;
-	struct device			dev;
-	unsigned			out_epnum;
-	unsigned			in_epnum;
+	unsigned			host_request_flag:1;
+	unsigned			quirk_ep_out_aligned_size:1;
 };
+#define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
 static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
 	{ dev_set_drvdata(&gadget->dev, data); }
@@ -554,6 +563,23 @@
 
 
 /**
+ * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
+ *	requires quirk_ep_out_aligned_size, otherwise reguens len.
+ * @g: controller to check for quirk
+ * @ep: the endpoint whose maxpacketsize is used to align @len
+ * @len: buffer size's length to align to @ep's maxpacketsize
+ *
+ * This helper is used in case it's required for any reason to check and maybe
+ * align buffer's size to an ep's maxpacketsize.
+ */
+static inline size_t
+usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
+{
+	return !g->quirk_ep_out_aligned_size ? len :
+			round_up(len, (size_t)ep->desc->wMaxPacketSize);
+}
+
+/**
  * gadget_is_dualspeed - return true iff the hardware handles high speed
  * @g: controller that might support both high and full speeds
  */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index f5f5c7d..7545e78 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -22,6 +22,7 @@
 #ifdef __KERNEL__
 
 #include <linux/rwsem.h>
+#include <linux/wakelock.h>
 
 #define MAX_TOPO_LEVEL		6
 
@@ -86,6 +87,7 @@
 	struct urb		*status_urb;	/* the current status urb */
 #ifdef CONFIG_PM_RUNTIME
 	struct work_struct	wakeup_work;	/* for remote wakeup */
+	struct wake_lock	wake_lock;/* for add time-delay */
 #endif
 
 	/*
@@ -110,6 +112,7 @@
 #define HCD_FLAG_WAKEUP_PENDING		4	/* root hub is resuming? */
 #define HCD_FLAG_RH_RUNNING		5	/* root hub is running? */
 #define HCD_FLAG_DEAD			6	/* controller has died? */
+#define HCD_FLAG_IRQ_DISABLED	 7	/* Interrupt was disabled */
 
 	/* The flags can be tested using these macros; they are likely to
 	 * be slightly faster than test_bit().
@@ -120,6 +123,7 @@
 #define HCD_WAKEUP_PENDING(hcd)	((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
 #define HCD_RH_RUNNING(hcd)	((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
 #define HCD_DEAD(hcd)		((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_IRQ_DISABLED(hcd)	((hcd)->flags & (1U << HCD_FLAG_IRQ_DISABLED))
 
 	/* Flags that get set only during HCD registration or removal. */
 	unsigned		rh_registered:1;/* is root hub registered? */
@@ -132,6 +136,9 @@
 	unsigned		wireless:1;	/* Wireless USB HCD */
 	unsigned		authorized_default:1;
 	unsigned		has_tt:1;	/* Integrated TT in root hub */
+	unsigned		has_wakeup_irq:1; /* Can IRQ when suspended */
+	unsigned		has_sram:1;	/* Local SRAM for caching */
+	unsigned		sram_no_payload:1; /* sram not for payload */
 
 	unsigned int		irq;		/* irq allocated */
 	void __iomem		*regs;		/* device memory/io */
@@ -172,6 +179,18 @@
 #define	HC_IS_RUNNING(state) ((state) & __ACTIVE)
 #define	HC_IS_SUSPENDED(state) ((state) & __SUSPEND)
 
+#ifdef CONFIG_USB_OTG
+	/* some otg HCDs need this to get USB_DEVICE_ADD and USB_DEVICE_REMOVE
+	 * from root hub, we do not want to use USB notification chain, since
+	 * it would be a over kill to use high level notification.
+	 */
+	void (*otg_notify) (struct usb_device *udev, unsigned action);
+#endif
+
+#ifdef CONFIG_USB_HCD_HSIC
+	void (*hsic_notify)(struct usb_device *udev, unsigned action);
+#endif
+
 	/* more shared queuing code would be good; it should support
 	 * smarter scheduling, handle transaction translators, etc;
 	 * input size of periodic table to an interrupt scheduler.
@@ -410,7 +429,7 @@
 extern void usb_hcd_pci_remove(struct pci_dev *dev);
 extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
 #endif
 #endif /* CONFIG_PCI */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 6b5978f..47747c2 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -18,6 +18,7 @@
 	USB_EVENT_ID,           /* id was grounded */
 	USB_EVENT_CHARGER,      /* usb dedicated charger */
 	USB_EVENT_ENUMERATED,   /* gadget driver enumerated */
+	USB_EVENT_DRIVE_VBUS,	/* drive vbus request */
 };
 
 /* associate a type with PHY */
@@ -27,6 +28,12 @@
 	USB_PHY_TYPE_USB3,
 };
 
+/* usb2 phy interface type */
+enum usb_phy_intf {
+	USB2_PHY_ULPI,
+	USB2_PHY_UTMI,
+};
+
 /* OTG defines lots of enumeration states before device reset */
 enum usb_otg_state {
 	OTG_STATE_UNDEFINED = 0,
@@ -51,6 +58,12 @@
 	OTG_STATE_A_VBUS_ERR,
 };
 
+enum vbus_state {
+	UNKNOW_STATE,
+	VBUS_ENABLED,			/* vbus at normal state */
+	VBUS_DISABLED,			/* vbus disabled by a_bus_drop */
+};
+
 struct usb_phy;
 struct usb_otg;
 
@@ -68,6 +81,7 @@
 	unsigned int		 flags;
 
 	enum usb_phy_type	type;
+	enum usb_phy_intf	intf;
 	enum usb_otg_state	state;
 	enum usb_phy_events	last_event;
 
@@ -84,6 +98,11 @@
 	u16			port_status;
 	u16			port_change;
 
+	/* Porivde sysfs interface to userspace for set a_bus_drop argument */
+	struct class *usb_otg_class;
+	struct device *class_dev;
+	int vbus_state;
+
 	/* to support controllers that have multiple transceivers */
 	struct list_head	head;
 
@@ -107,6 +126,14 @@
 			enum usb_device_speed speed);
 	int	(*notify_disconnect)(struct usb_phy *x,
 			enum usb_device_speed speed);
+
+	/* check charger status */
+	int	(*get_chrg_status)(struct usb_phy *x, void *data);
+	/* check ID status */
+	int	(*get_id_status)(struct usb_phy *x, void *data);
+
+	/* for a_bus_drop handler fromed user space */
+	void (*a_bus_drop)(struct usb_phy *phy);
 };
 
 /**
@@ -297,4 +324,24 @@
 		return "UNKNOWN PHY TYPE";
 	}
 }
+
+static inline int
+otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+	if (x && x->get_chrg_status)
+		return x->get_chrg_status(x, data);
+
+	return -ENOTSUPP;
+}
+
+static inline int
+otg_get_id_status(struct usb_phy *x, void *data)
+{
+	if (x && x->get_id_status)
+		return x->get_id_status(x, data);
+
+	return -ENOTSUPP;
+}
+
+void otg_uevent_trigger(struct usb_phy *phy);
 #endif /* __LINUX_USB_PHY_H */
diff --git a/include/linux/usb/xhci-ush-hsic-pci.h b/include/linux/usb/xhci-ush-hsic-pci.h
new file mode 100644
index 0000000..442e313
--- /dev/null
+++ b/include/linux/usb/xhci-ush-hsic-pci.h
@@ -0,0 +1,95 @@
+#ifndef XHCI_USH_HSIC_PCI_h
+#define XHCI_USH_HSIC_PCI_h
+
+#include <linux/usb.h>
+#include <linux/wakelock.h>
+
+/* CHT ID MUX register in USH MMIO */
+#define DUAL_ROLE_CFG0			0x80D8
+#define SW_IDPIN_EN			(1 << 21)
+#define SW_IDPIN			(1 << 20)
+
+#define DUAL_ROLE_CFG1			0x80DC
+#define SUS				(1 << 29)
+
+#define HSIC_HUB_RESET_TIME   10
+#define HSIC_ENABLE_SIZE      2
+#define HSIC_DURATION_SIZE    7
+#define HSIC_DELAY_SIZE       8
+
+#define HSIC_AUTOSUSPEND                     0
+#define HSIC_PORT_INACTIVITYDURATION              500
+#define HSIC_BUS_INACTIVITYDURATION              500
+#define HSIC_REMOTEWAKEUP                       1
+
+#define USH_PCI_ID                     0x0F35
+#define USH_REENUM_DELAY_FFRD8_PR0     600000
+#define USH_REENUM_DELAY               20000
+
+enum wlock_state {
+	UNLOCKED,
+	LOCKED
+};
+
+enum s3_state {
+	RESUMED,
+	RESUMING,
+	SUSPENDED,
+	SUSPENDING
+};
+
+struct ush_hsic_priv {
+	struct delayed_work  hsic_aux;
+	wait_queue_head_t    aux_wq;
+	struct mutex         hsic_mutex;
+	struct mutex         wlock_mutex;
+	unsigned             hsic_mutex_init:1;
+	unsigned             aux_wq_init:1;
+	unsigned             hsic_aux_irq_enable:1;
+	unsigned             hsic_wakeup_irq_enable:1;
+	unsigned             hsic_aux_finish:1;
+	unsigned             hsic_enable_created:1;
+	unsigned             hsic_lock_init:1;
+	unsigned             port_disconnect:1;
+
+	unsigned             remoteWakeup_enable;
+	unsigned             autosuspend_enable;
+	unsigned             aux_gpio;
+	unsigned             wakeup_gpio;
+	unsigned             port_inactivityDuration;
+	unsigned             bus_inactivityDuration;
+	unsigned             reenumeration_delay;
+	spinlock_t           hsic_lock;
+	struct	wake_lock    resume_wake_lock;
+	/* Root hub device */
+	struct usb_device           *rh_dev;
+	struct usb_device           *modem_dev;
+	struct workqueue_struct     *work_queue;
+	struct work_struct          wakeup_work;
+	struct notifier_block       hsicdev_nb;
+	struct notifier_block       hsic_pm_nb;
+	struct notifier_block       hsic_s3_entry_nb;
+	struct wake_lock            s3_wake_lock;
+	enum wlock_state            s3_wlock_state;
+	enum s3_state               s3_rt_state;
+	int		hsic_port_num;
+};
+
+enum {
+	PROBE,
+	REMOVE
+};
+
+struct ush_hsic_pdata {
+	unsigned                has_modem:1;     /* has modem or not */
+	unsigned                enabled:1;       /* enable flag */
+	unsigned		no_power_gate:1; /* no power gating on d3 */
+	int                     aux_gpio;
+	int                     wakeup_gpio;
+	int                     reenum_delay;
+	int			hsic_port_num;
+};
+
+static int hsic_notify(struct notifier_block *self,
+		unsigned long action, void *dev);
+#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index bf99cd0..6303568 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -66,7 +66,9 @@
 	US_FLAG(INITIAL_READ10,	0x00100000)			\
 		/* Initial READ(10) (and others) must be retried */	\
 	US_FLAG(WRITE_CACHE,	0x00200000)			\
-		/* Write Cache status is not available */
+		/* Write Cache status is not available */	\
+	US_FLAG(NEEDS_CAP16,	0x00400000)
+		/* cannot handle READ_CAPACITY_10 */
 
 #define US_FLAG(name, value)	US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b6b215f..14105c2 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -23,6 +23,7 @@
 	struct uid_gid_map	projid_map;
 	atomic_t		count;
 	struct user_namespace	*parent;
+	int			level;
 	kuid_t			owner;
 	kgid_t			group;
 	unsigned int		proc_inum;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 9ff8645..72398ee 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -70,6 +70,10 @@
 
 bool virtqueue_enable_cb(struct virtqueue *vq);
 
+unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+
+bool virtqueue_poll(struct virtqueue *vq, unsigned);
+
 bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
 
 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 60280e7..569d33b6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -805,6 +805,63 @@
 	__ret;								\
 })
 
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
+						    lock, ret)		\
+do {									\
+	DEFINE_WAIT(__wait);						\
+									\
+	for (;;) {							\
+		prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);	\
+		if (condition)						\
+			break;						\
+		if (signal_pending(current)) {				\
+			ret = -ERESTARTSYS;				\
+			break;						\
+		}							\
+		spin_unlock_irq(&lock);					\
+		ret = schedule_timeout(ret);				\
+		spin_lock_irq(&lock);					\
+		if (!ret)						\
+			break;						\
+	}								\
+	finish_wait(&wq, &__wait);					\
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ *		The condition is checked under the lock. This is expected
+ *		to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ *	  and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
+						  timeout)		\
+({									\
+	int __ret = timeout;						\
+									\
+	if (!(condition))						\
+		__wait_event_interruptible_lock_irq_timeout(		\
+					wq, condition, lock, __ret);	\
+	__ret;								\
+})
+
 
 /*
  * These are the old interfaces to sleep waiting for an event.
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
index 0607a6e..ae895f6 100644
--- a/include/linux/wlan_plat.h
+++ b/include/linux/wlan_plat.h
@@ -26,6 +26,8 @@
 	int (*get_mac_addr)(unsigned char *buf);
 	int (*get_wake_irq)(void);
 	void *(*get_country_code)(char *ccode, u32 flags);
+	char *nvram_id;
+	bool use_fast_irq;
 #ifdef CONFIG_PARTIALRESUME
 #define WIFI_PR_INIT			0
 #define WIFI_PR_NOTIFY_RESUME		1
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 623488f..8888a61 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -602,7 +602,7 @@
 
 #ifdef CONFIG_FREEZER
 extern void freeze_workqueues_begin(void);
-extern bool freeze_workqueues_busy(void);
+extern bool freeze_workqueues_busy(char **busy_wq_name);
 extern void thaw_workqueues(void);
 #endif /* CONFIG_FREEZER */
 
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
new file mode 100644
index 0000000..b2c2948
--- /dev/null
+++ b/include/linux/ww_mutex.h
@@ -0,0 +1,379 @@
+/*
+ * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
+ *
+ * Original mutex implementation started by Ingo Molnar:
+ *
+ *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Wound/wait implementation:
+ *  Copyright (C) 2013 Canonical Ltd.
+ *
+ * This file contains the main data structure and API definitions.
+ */
+
+#ifndef __LINUX_WW_MUTEX_H
+#define __LINUX_WW_MUTEX_H
+
+#include <linux/mutex.h>
+#include <linux/debug_locks.h>
+
+struct ww_class {
+	atomic_long_t stamp;
+	struct lock_class_key acquire_key;
+	struct lock_class_key mutex_key;
+	const char *acquire_name;
+	const char *mutex_name;
+};
+
+struct ww_acquire_ctx {
+	struct task_struct *task;
+	unsigned long stamp;
+	unsigned acquired;
+#ifdef CONFIG_DEBUG_MUTEXES
+	unsigned done_acquire;
+	struct ww_class *ww_class;
+	struct ww_mutex *contending_lock;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map dep_map;
+#endif
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+	unsigned deadlock_inject_interval;
+	unsigned deadlock_inject_countdown;
+#endif
+};
+
+struct ww_mutex {
+	struct mutex base;
+	struct ww_acquire_ctx *ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+	struct ww_class *ww_class;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
+		, .ww_class = &ww_class
+#else
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
+#endif
+
+#define __WW_CLASS_INITIALIZER(ww_class) \
+		{ .stamp = ATOMIC_LONG_INIT(0) \
+		, .acquire_name = #ww_class "_acquire" \
+		, .mutex_name = #ww_class "_mutex" }
+
+#define __WW_MUTEX_INITIALIZER(lockname, class) \
+		{ .base = { \__MUTEX_INITIALIZER(lockname) } \
+		__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
+
+#define DEFINE_WW_CLASS(classname) \
+	struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
+
+#define DEFINE_WW_MUTEX(mutexname, ww_class) \
+	struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
+
+/**
+ * ww_mutex_init - initialize the w/w mutex
+ * @lock: the mutex to be initialized
+ * @ww_class: the w/w class the mutex should belong to
+ *
+ * Initialize the w/w mutex to unlocked state and associate it with the given
+ * class.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+static inline void ww_mutex_init(struct ww_mutex *lock,
+				 struct ww_class *ww_class)
+{
+	__mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
+	lock->ctx = NULL;
+#ifdef CONFIG_DEBUG_MUTEXES
+	lock->ww_class = ww_class;
+#endif
+}
+
+/**
+ * ww_acquire_init - initialize a w/w acquire context
+ * @ctx: w/w acquire context to initialize
+ * @ww_class: w/w class of the context
+ *
+ * Initializes an context to acquire multiple mutexes of the given w/w class.
+ *
+ * Context-based w/w mutex acquiring can be done in any order whatsoever within
+ * a given lock class. Deadlocks will be detected and handled with the
+ * wait/wound logic.
+ *
+ * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
+ * result in undetected deadlocks and is so forbidden. Mixing different contexts
+ * for the same w/w class when acquiring mutexes can also result in undetected
+ * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
+ * enabling CONFIG_PROVE_LOCKING.
+ *
+ * Nesting of acquire contexts for _different_ w/w classes is possible, subject
+ * to the usual locking rules between different lock classes.
+ *
+ * An acquire context must be released with ww_acquire_fini by the same task
+ * before the memory is freed. It is recommended to allocate the context itself
+ * on the stack.
+ */
+static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
+				   struct ww_class *ww_class)
+{
+	ctx->task = current;
+	ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
+	ctx->acquired = 0;
+#ifdef CONFIG_DEBUG_MUTEXES
+	ctx->ww_class = ww_class;
+	ctx->done_acquire = 0;
+	ctx->contending_lock = NULL;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
+	lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
+			 &ww_class->acquire_key, 0);
+	mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
+#endif
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+	ctx->deadlock_inject_interval = 1;
+	ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
+#endif
+}
+
+/**
+ * ww_acquire_done - marks the end of the acquire phase
+ * @ctx: the acquire context
+ *
+ * Marks the end of the acquire phase, any further w/w mutex lock calls using
+ * this context are forbidden.
+ *
+ * Calling this function is optional, it is just useful to document w/w mutex
+ * code and clearly designated the acquire phase from actually using the locked
+ * data structures.
+ */
+static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+	lockdep_assert_held(ctx);
+
+	DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
+	ctx->done_acquire = 1;
+#endif
+}
+
+/**
+ * ww_acquire_fini - releases a w/w acquire context
+ * @ctx: the acquire context to free
+ *
+ * Releases a w/w acquire context. This must be called _after_ all acquired w/w
+ * mutexes have been released with ww_mutex_unlock.
+ */
+static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+	mutex_release(&ctx->dep_map, 0, _THIS_IP_);
+
+	DEBUG_LOCKS_WARN_ON(ctx->acquired);
+	if (!config_enabled(CONFIG_PROVE_LOCKING))
+		/*
+		 * lockdep will normally handle this,
+		 * but fail without anyway
+		 */
+		ctx->done_acquire = 1;
+
+	if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
+		/* ensure ww_acquire_fini will still fail if called twice */
+		ctx->acquired = ~0U;
+#endif
+}
+
+extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
+					struct ww_acquire_ctx *ctx);
+extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
+						      struct ww_acquire_ctx *ctx);
+
+/**
+ * ww_mutex_lock - acquire the w/w mutex
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context, or NULL to acquire only a single lock.
+ *
+ * Lock the w/w mutex exclusively for this task.
+ *
+ * Deadlocks within a given w/w class of locks are detected and handled with the
+ * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * will either sleep until it is (wait case). Or it selects the current context
+ * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * same lock with the same context twice is also detected and signalled by
+ * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
+ *
+ * In the wound case the caller must release all currently held w/w mutexes for
+ * the given context and then wait for this contending lock to be available by
+ * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
+ * lock and proceed with trying to acquire further w/w mutexes (e.g. when
+ * scanning through lru lists trying to free resources).
+ *
+ * The mutex must later on be released by the same task that
+ * acquired it. The task may not exit without first unlocking the mutex. Also,
+ * kernel memory where the mutex resides must not be freed with the mutex still
+ * locked. The mutex must first be initialized (or statically defined) before it
+ * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
+ * of the same w/w lock class as was used to initialize the acquire context.
+ *
+ * A mutex acquired with this function must be released with ww_mutex_unlock.
+ */
+static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+	if (ctx)
+		return __ww_mutex_lock(lock, ctx);
+
+	mutex_lock(&lock->base);
+	return 0;
+}
+
+/**
+ * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context
+ *
+ * Lock the w/w mutex exclusively for this task.
+ *
+ * Deadlocks within a given w/w class of locks are detected and handled with the
+ * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * will either sleep until it is (wait case). Or it selects the current context
+ * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * same lock with the same context twice is also detected and signalled by
+ * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
+ * signal arrives while waiting for the lock then this function returns -EINTR.
+ *
+ * In the wound case the caller must release all currently held w/w mutexes for
+ * the given context and then wait for this contending lock to be available by
+ * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
+ * not acquire this lock and proceed with trying to acquire further w/w mutexes
+ * (e.g. when scanning through lru lists trying to free resources).
+ *
+ * The mutex must later on be released by the same task that
+ * acquired it. The task may not exit without first unlocking the mutex. Also,
+ * kernel memory where the mutex resides must not be freed with the mutex still
+ * locked. The mutex must first be initialized (or statically defined) before it
+ * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
+ * of the same w/w lock class as was used to initialize the acquire context.
+ *
+ * A mutex acquired with this function must be released with ww_mutex_unlock.
+ */
+static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
+							   struct ww_acquire_ctx *ctx)
+{
+	if (ctx)
+		return __ww_mutex_lock_interruptible(lock, ctx);
+	else
+		return mutex_lock_interruptible(&lock->base);
+}
+
+/**
+ * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context
+ *
+ * Acquires a w/w mutex with the given context after a wound case. This function
+ * will sleep until the lock becomes available.
+ *
+ * The caller must have released all w/w mutexes already acquired with the
+ * context and then call this function on the contended lock.
+ *
+ * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
+ * needs with ww_mutex_lock. Note that the -EALREADY return code from
+ * ww_mutex_lock can be used to avoid locking this contended mutex twice.
+ *
+ * It is forbidden to call this function with any other w/w mutexes associated
+ * with the context held. It is forbidden to call this on anything else than the
+ * contending mutex.
+ *
+ * Note that the slowpath lock acquiring can also be done by calling
+ * ww_mutex_lock directly. This function here is simply to help w/w mutex
+ * locking code readability by clearly denoting the slowpath.
+ */
+static inline void
+ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+	int ret;
+#ifdef CONFIG_DEBUG_MUTEXES
+	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
+#endif
+	ret = ww_mutex_lock(lock, ctx);
+	(void)ret;
+}
+
+/**
+ * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context
+ *
+ * Acquires a w/w mutex with the given context after a wound case. This function
+ * will sleep until the lock becomes available and returns 0 when the lock has
+ * been acquired. If a signal arrives while waiting for the lock then this
+ * function returns -EINTR.
+ *
+ * The caller must have released all w/w mutexes already acquired with the
+ * context and then call this function on the contended lock.
+ *
+ * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
+ * needs with ww_mutex_lock. Note that the -EALREADY return code from
+ * ww_mutex_lock can be used to avoid locking this contended mutex twice.
+ *
+ * It is forbidden to call this function with any other w/w mutexes associated
+ * with the given context held. It is forbidden to call this on anything else
+ * than the contending mutex.
+ *
+ * Note that the slowpath lock acquiring can also be done by calling
+ * ww_mutex_lock_interruptible directly. This function here is simply to help
+ * w/w mutex locking code readability by clearly denoting the slowpath.
+ */
+static inline int __must_check
+ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
+				 struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
+#endif
+	return ww_mutex_lock_interruptible(lock, ctx);
+}
+
+extern void ww_mutex_unlock(struct ww_mutex *lock);
+
+/**
+ * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
+ * @lock: mutex to lock
+ *
+ * Trylocks a mutex without acquire context, so no deadlock detection is
+ * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
+ */
+static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
+{
+	return mutex_trylock(&lock->base);
+}
+
+/***
+ * ww_mutex_destroy - mark a w/w mutex unusable
+ * @lock: the mutex to be destroyed
+ *
+ * This function marks the mutex uninitialized, and any subsequent
+ * use of the mutex is forbidden. The mutex must not be locked when
+ * this function is called.
+ */
+static inline void ww_mutex_destroy(struct ww_mutex *lock)
+{
+	mutex_destroy(&lock->base);
+}
+
+/**
+ * ww_mutex_is_locked - is the w/w mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
+{
+	return mutex_is_locked(&lock->base);
+}
+
+#endif
diff --git a/include/media/lm3559.h b/include/media/lm3559.h
new file mode 100644
index 0000000..2e5986e
--- /dev/null
+++ b/include/media/lm3559.h
@@ -0,0 +1,137 @@
+/*
+ * include/media/lm3559.h
+ *
+ * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef _LM3559_H_
+#define _LM3559_H_
+
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+
+#define LM3559_NAME    "lm3559"
+#define LM3560_NAME    "lm3560"
+
+#define	v4l2_queryctrl_entry_integer(_id, _name,\
+		_minimum, _maximum, _step, \
+		_default_value, _flags)	\
+	{\
+		.id = (_id), \
+		.type = V4L2_CTRL_TYPE_INTEGER, \
+		.name = _name, \
+		.minimum = (_minimum), \
+		.maximum = (_maximum), \
+		.step = (_step), \
+		.default_value = (_default_value),\
+		.flags = (_flags),\
+	}
+#define	v4l2_queryctrl_entry_boolean(_id, _name,\
+		_default_value, _flags)	\
+	{\
+		.id = (_id), \
+		.type = V4L2_CTRL_TYPE_BOOLEAN, \
+		.name = _name, \
+		.minimum = 0, \
+		.maximum = 1, \
+		.step = 1, \
+		.default_value = (_default_value),\
+		.flags = (_flags),\
+	}
+
+#define	s_ctrl_id_entry_integer(_id, _name, \
+		_minimum, _maximum, _step, \
+		_default_value, _flags, \
+		_s_ctrl, _g_ctrl)	\
+	{\
+		.qc = v4l2_queryctrl_entry_integer(_id, _name,\
+				_minimum, _maximum, _step,\
+				_default_value, _flags), \
+		.s_ctrl = _s_ctrl, \
+		.g_ctrl = _g_ctrl, \
+	}
+
+#define	s_ctrl_id_entry_boolean(_id, _name, \
+		_default_value, _flags, \
+		_s_ctrl, _g_ctrl)	\
+	{\
+		.qc = v4l2_queryctrl_entry_boolean(_id, _name,\
+				_default_value, _flags), \
+		.s_ctrl = _s_ctrl, \
+		.g_ctrl = _g_ctrl, \
+	}
+
+/* Value settings for Flash Time-out Duration*/
+#define LM3559_DEFAULT_TIMEOUT          480U
+#define LM3559_DEFAULT_TIMEOUT_SETTING 0x07  /* 480ms */
+#define LM3559_MIN_TIMEOUT              32U
+#define LM3559_MAX_TIMEOUT              1024U
+#define LM3559_TIMEOUT_STEPSIZE         32U
+
+/* Flash modes */
+#define LM3559_MODE_SHUTDOWN            0
+#define LM3559_MODE_INDICATOR           1
+#define LM3559_MODE_TORCH               2
+#define LM3559_MODE_FLASH               3
+
+/* timer delay time */
+#define LM3559_TIMER_DELAY		5
+
+/* Percentage <-> value macros */
+#define LM3559_MIN_PERCENT                   0U
+#define LM3559_MAX_PERCENT                   100U
+#define LM3559_CLAMP_PERCENTAGE(val) \
+	clamp(val, LM3559_MIN_PERCENT, LM3559_MAX_PERCENT)
+/* we add 1 to the value to end up in the range [min..100%]
+ * rather than in the range [0..max] where max < 100 */
+#define LM3559_VALUE_TO_PERCENT(v, step)     ((((v)+1) * (step)) / 100)
+/* we subtract 1 from the percentage to make sure we round down into
+ * the valid range of [0..max] and not [1..max+1] */
+#define LM3559_PERCENT_TO_VALUE(p, step)     ((((p)-1) * 100) / (step))
+
+/* Flash brightness in percentage */
+#define LM3559_FLASH_DEFAULT_BRIGHTNESS		100
+
+/* Torch brightness, input is percentage, output is [0..7] */
+#define LM3559_TORCH_STEP                    1250
+#define LM3559_TORCH_DEFAULT              2
+#define LM3559_TORCH_DEFAULT_BRIGHTNESS \
+	LM3559_VALUE_TO_PERCENT(LM3559_TORCH_DEFAULT, LM3559_TORCH_STEP)
+
+/* Indicator brightness, input is percentage, output is [0..7] */
+#define LM3559_INDICATOR_STEP                1250
+#define LM3559_INDICATOR_DEFAULT          1
+#define LM3559_INDICATOR_DEFAULT_BRIGHTNESS \
+	LM3559_VALUE_TO_PERCENT(LM3559_INDICATOR_DEFAULT, LM3559_INDICATOR_STEP)
+
+/*
+ * lm3559_platform_data - Flash controller platform data
+ */
+struct lm3559_platform_data {
+	int gpio_torch;
+	int gpio_strobe;
+	int gpio_reset;
+
+	unsigned int current_limit;
+	unsigned int envm_tx2;
+	unsigned int tx2_polarity;
+	unsigned int flash_current_limit;
+	unsigned int disable_tx2;
+};
+
+#endif /* _LM3559_H_ */
+
diff --git a/include/media/media-device.h b/include/media/media-device.h
index eaade98..12155a9 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -45,6 +45,7 @@
  * @entities:	List of registered entities
  * @lock:	Entities list lock
  * @graph_mutex: Entities graph operation lock
+ * @link_notify: Link state change notification callback
  *
  * This structure represents an abstract high-level media device. It allows easy
  * access to entities and provides basic media device-level support. The
@@ -75,10 +76,14 @@
 	/* Serializes graph operations. */
 	struct mutex graph_mutex;
 
-	int (*link_notify)(struct media_pad *source,
-			   struct media_pad *sink, u32 flags);
+	int (*link_notify)(struct media_link *link, u32 flags,
+			   unsigned int notification);
 };
 
+/* Supported link_notify @notification values. */
+#define MEDIA_DEV_NOTIFY_PRE_LINK_CH	0
+#define MEDIA_DEV_NOTIFY_POST_LINK_CH	1
+
 /* media_devnode to media_device */
 #define to_media_device(node) container_of(node, struct media_device, devnode)
 
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 0c16f51..11bd9d0 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -23,6 +23,7 @@
 #ifndef _MEDIA_ENTITY_H
 #define _MEDIA_ENTITY_H
 
+#include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/media.h>
 
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7343a27..47ada23 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,7 @@
 #define _V4L2_CTRLS_H
 
 #include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/videodev2.h>
 
 /* forward references */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5298d67..d3830a9 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -163,6 +163,10 @@
 	int (*g_std)(struct v4l2_subdev *sd, v4l2_std_id *norm);
 	int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm);
 	long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+#ifdef CONFIG_COMPAT
+	long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd,
+			       unsigned long arg);
+#endif
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg);
 	int (*s_register)(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 25d5a98..2cbf0ba 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -73,6 +73,10 @@
 						   const struct in6_addr *addr);
 #endif
 
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+				   const unsigned int prefix_len,
+				   struct net_device *dev);
+
 extern int			ipv6_chk_prefix(const struct in6_addr *addr,
 						struct net_device *dev);
 
@@ -86,6 +90,9 @@
 					       const struct in6_addr *daddr,
 					       unsigned int srcprefs,
 					       struct in6_addr *saddr);
+extern int			__ipv6_get_lladdr(struct inet6_dev *idev,
+						  struct in6_addr *addr,
+						  unsigned char banned_flags);
 extern int			ipv6_get_lladdr(struct net_device *dev,
 						struct in6_addr *addr,
 						unsigned char banned_flags);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d84ec07..db43501 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -104,6 +104,7 @@
 enum {
 	HCI_SETUP,
 	HCI_AUTO_OFF,
+	HCI_RFKILLED,
 	HCI_MGMT,
 	HCI_PAIRABLE,
 	HCI_SERVICE_CACHE,
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a7a683e..a8c2ef6 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -290,6 +290,7 @@
 	unsigned char err_offset = 0;
 	u8 opt_len = opt[1];
 	u8 opt_iter;
+	u8 tag_len;
 
 	if (opt_len < 8) {
 		err_offset = 1;
@@ -302,11 +303,12 @@
 	}
 
 	for (opt_iter = 6; opt_iter < opt_len;) {
-		if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+		tag_len = opt[opt_iter + 1];
+		if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
 			err_offset = opt_iter + 1;
 			goto out;
 		}
-		opt_iter += opt[opt_iter + 1];
+		opt_iter += tag_len;
 	}
 
 out:
diff --git a/include/net/dst.h b/include/net/dst.h
index 1f8fd10..e0c97f5 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -477,10 +477,22 @@
 {
 	return dst_orig;
 } 
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+	return NULL;
+}
+
 #else
 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
 				     const struct flowi *fl, struct sock *sk,
 				     int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+	return dst->xfrm;
+}
 #endif
 
 #endif /* _NET_DST_H */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93024a4..8e0b6c8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -61,6 +61,7 @@
 	struct list_head	ops_list;	/* private */
 	struct list_head	family_list;	/* private */
 	struct list_head	mcast_groups;	/* private */
+	struct module		*module;
 };
 
 /**
@@ -121,9 +122,24 @@
 	struct list_head	ops_list;
 };
 
-extern int genl_register_family(struct genl_family *family);
-extern int genl_register_family_with_ops(struct genl_family *family,
+extern int __genl_register_family(struct genl_family *family);
+
+static inline int genl_register_family(struct genl_family *family)
+{
+	family->module = THIS_MODULE;
+	return __genl_register_family(family);
+}
+
+extern int __genl_register_family_with_ops(struct genl_family *family,
 	struct genl_ops *ops, size_t n_ops);
+
+static inline int genl_register_family_with_ops(struct genl_family *family,
+	struct genl_ops *ops, size_t n_ops)
+{
+	family->module = THIS_MODULE;
+	return __genl_register_family_with_ops(family, ops, n_ops);
+}
+
 extern int genl_unregister_family(struct genl_family *family);
 extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
 extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
diff --git a/include/net/ip.h b/include/net/ip.h
index 02fc145..2228b7a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -258,9 +258,11 @@
 
 extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
 
-static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
+static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
 {
-	if (iph->frag_off & htons(IP_DF)) {
+	struct iphdr *iph = ip_hdr(skb);
+
+	if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
 		/* This is only to work around buggy Windows95/2000
 		 * VJ compression implementations.  If the ID field
 		 * does not change, they drop every other packet in
@@ -272,9 +274,11 @@
 		__ip_select_ident(iph, dst, 0);
 }
 
-static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
+static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
 {
-	if (iph->frag_off & htons(IP_DF)) {
+	struct iphdr *iph = ip_hdr(skb);
+
+	if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
 		if (sk && inet_sk(sk)->inet_daddr) {
 			iph->id = htons(inet_sk(sk)->inet_id);
 			inet_sk(sk)->inet_id += 1 + more;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 2a601e7..665e0ce 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -165,6 +165,7 @@
 static inline void rt6_clean_expires(struct rt6_info *rt)
 {
 	rt->rt6i_flags &= ~RTF_EXPIRES;
+	rt->dst.expires = 0;
 }
 
 static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 25b4500..ca32756 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -194,11 +194,9 @@
 	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
 }
 
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
 {
-	if (rt->rt6i_flags & RTF_GATEWAY)
-		return &rt->rt6i_gateway;
-	return dest;
+	return &rt->rt6i_gateway;
 }
 
 #endif
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 09b1360..7ac7f91 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -113,7 +113,7 @@
 				   __be32 key);
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-		  const struct tnl_ptk_info *tpi, bool log_ecn_error);
+		  const struct tnl_ptk_info *tpi, int hdr_len, bool log_ecn_error);
 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
 			 struct ip_tunnel_parm *p);
 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -141,20 +141,6 @@
 	return INET_ECN_encapsulate(tos, inner);
 }
 
-static inline void tunnel_ip_select_ident(struct sk_buff *skb,
-					  const struct iphdr  *old_iph,
-					  struct dst_entry *dst)
-{
-	struct iphdr *iph = ip_hdr(skb);
-
-	/* Use inner packet iph-id if possible. */
-	if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
-		iph->id	= old_iph->id;
-	else
-		__ip_select_ident(iph, dst,
-				  (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-}
-
 static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	int err;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 885898a..4e50d36 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1484,6 +1484,7 @@
 	IEEE80211_HW_SUPPORTS_RC_TABLE			= 1<<24,
 	IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF		= 1<<25,
 	IEEE80211_HW_TIMING_BEACON_ONLY			= 1<<26,
+	IEEE80211_HW_SUPPORTS_HT_CCK_RATES		= 1<<27,
 };
 
 /**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 745bf74..5043f8b 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -119,7 +119,7 @@
  * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
  * also need a pad of 2.
  */
-static int ndisc_addr_option_pad(unsigned short type)
+static inline int ndisc_addr_option_pad(unsigned short type)
 {
 	switch (type) {
 	case ARPHRD_INFINIBAND: return 2;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e7f4e21..63ed1d1 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -682,13 +682,19 @@
 	u64	rate_bps;
 	u32	mult;
 	u16	overhead;
+	u8	linklayer;
 	u8	shift;
 };
 
 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
 				unsigned int len)
 {
-	return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+	len += r->overhead;
+
+	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+	return ((u64)len * r->mult) >> r->shift;
 }
 
 extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -699,6 +705,7 @@
 	memset(res, 0, sizeof(*res));
 	res->rate = r->rate_bps >> 3;
 	res->overhead = r->overhead;
+	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 }
 
 #endif
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 6ca975b..c2e542b 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,7 +3,6 @@
 
 #include <linux/types.h>
 
-extern void net_secret_init(void);
 extern __u32 secure_ip_id(__be32 daddr);
 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
diff --git a/include/net/sock.h b/include/net/sock.h
index 96f8a26..be058cd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -231,6 +231,7 @@
   *	@sk_wmem_queued: persistent queue size
   *	@sk_forward_alloc: space allocated forward
   *	@sk_allocation: allocation mode
+  *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
   *	@sk_sndbuf: size of send buffer in bytes
   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
   *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -356,6 +357,7 @@
 	kmemcheck_bitfield_end(flags);
 	int			sk_wmem_queued;
 	gfp_t			sk_allocation;
+	u32			sk_pacing_rate; /* bytes per second */
 	netdev_features_t	sk_route_caps;
 	netdev_features_t	sk_route_nocaps;
 	int			sk_gso_type;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cc75af4..d8cd476 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -287,6 +287,7 @@
 extern int sysctl_tcp_early_retrans;
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
+extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_default_init_rwnd;
 
 extern atomic_long_t tcp_memory_allocated;
diff --git a/include/net/udp.h b/include/net/udp.h
index 065f379..ad99eed 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -181,6 +181,7 @@
 extern void udp_err(struct sk_buff *, u32);
 extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
 			    struct msghdr *msg, size_t len);
+extern int udp_push_pending_frames(struct sock *sk);
 extern void udp_flush_pending_frames(struct sock *sk);
 extern int udp_rcv(struct sk_buff *skb);
 extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index cc64587..22c2a3c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -7,6 +7,7 @@
 #include <linux/blkdev.h>
 #include <scsi/scsi.h>
 #include <linux/atomic.h>
+#include <linux/ratelimit.h>
 
 struct device;
 struct request_queue;
@@ -226,6 +227,15 @@
 #define sdev_printk(prefix, sdev, fmt, a...)	\
 	dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
 
+#define sdev_printk_ratelimited(prefix, sdev, fmt, a...)  \
+do {                                                      \
+	static DEFINE_RATELIMIT_STATE(_rs,                       \
+		DEFAULT_RATELIMIT_INTERVAL,                            \
+		DEFAULT_RATELIMIT_BURST);                              \
+	if (__ratelimit(&_rs))                                   \
+		dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a);    \
+} while (0)
+
 #define scmd_printk(prefix, scmd, fmt, a...)				\
         (scmd)->request->rq_disk ?					\
 	sdev_printk(prefix, (scmd)->device, "[%s] " fmt,		\
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 9031a26..9307359 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -32,6 +32,7 @@
 #include <sound/pcm.h>
 
 struct snd_compr_ops;
+struct snd_pcm_substream;
 
 /**
  * struct snd_compr_runtime: runtime stream description
@@ -48,6 +49,8 @@
  *	the ring buffer
  * @total_bytes_transferred: cumulative bytes transferred by offload DSP
  * @sleep: poll sleep
+ * @wait: drain wait queue
+ * @drain_wake: condition for drain wake
  */
 struct snd_compr_runtime {
 	snd_pcm_state_t state;
@@ -59,6 +62,9 @@
 	u64 total_bytes_available;
 	u64 total_bytes_transferred;
 	wait_queue_head_t sleep;
+	wait_queue_head_t wait;
+	unsigned int drain_wake;
+	struct snd_pcm_substream *fe_substream;
 	void *private_data;
 };
 
@@ -157,6 +163,7 @@
 int snd_compress_deregister(struct snd_compr *device);
 int snd_compress_new(struct snd_card *card, int device,
 			int type, struct snd_compr *compr);
+int snd_compr_stop(struct snd_compr_stream *stream);
 
 /* dsp driver callback apis
  * For playback: driver should call snd_compress_fragment_elapsed() to let the
@@ -171,4 +178,12 @@
 	wake_up(&stream->runtime->sleep);
 }
 
+static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
+{
+	snd_BUG_ON(!stream);
+
+	stream->runtime->drain_wake = 1;
+	wake_up(&stream->runtime->wait);
+}
+
 #endif
diff --git a/include/sound/core.h b/include/sound/core.h
index 5bfe513..971a2ec 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -146,6 +146,11 @@
 	struct snd_mixer_oss *mixer_oss;
 	int mixer_oss_change_count;
 #endif
+
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+	struct snd_effect_ops *effect_ops;
+	struct mutex effect_lock;	/* effect lock */
+#endif
 };
 
 #ifdef CONFIG_PM
diff --git a/include/sound/effect_driver.h b/include/sound/effect_driver.h
new file mode 100644
index 0000000..410e5f9
--- /dev/null
+++ b/include/sound/effect_driver.h
@@ -0,0 +1,63 @@
+/*
+ *  effect_driver.h - effect offload driver APIs
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __EFFECT_DRIVER_H
+#define __EFFECT_DRIVER_H
+
+#include <sound/effect_offload.h>
+
+struct snd_effect_ops {
+	int (*create)(struct snd_card *card, struct snd_effect *effect);
+	int (*destroy)(struct snd_card *card, struct snd_effect *effect);
+	int (*set_params)(struct snd_card *card,
+				struct snd_effect_params *params);
+	int (*get_params)(struct snd_card *card,
+				struct snd_effect_params *params);
+	int (*query_num_effects)(struct snd_card *card);
+	int (*query_effect_caps)(struct snd_card *card,
+					struct snd_effect_caps *caps);
+};
+
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_effect_register(struct snd_card *card, struct snd_effect_ops *ops);
+int snd_effect_deregister(struct snd_card *card);
+#else
+static inline int snd_effect_register(struct snd_card *card,
+					struct snd_effect_ops *ops)
+{
+	return -ENODEV;
+}
+static inline int snd_effect_deregister(struct snd_card *card)
+{
+	return -ENODEV;
+}
+#endif
+
+/* IOCTL fns */
+int snd_ctl_effect_create(struct snd_card *card, void *arg);
+int snd_ctl_effect_destroy(struct snd_card *card, void *arg);
+int snd_ctl_effect_set_params(struct snd_card *card, void *arg);
+int snd_ctl_effect_get_params(struct snd_card *card, void *arg);
+int snd_ctl_effect_query_num_effects(struct snd_card *card, void *arg);
+int snd_ctl_effect_query_effect_caps(struct snd_card *card, void *arg);
+#endif
diff --git a/include/sound/intel_sst_ioctl.h b/include/sound/intel_sst_ioctl.h
new file mode 100644
index 0000000..6025ef9
--- /dev/null
+++ b/include/sound/intel_sst_ioctl.h
@@ -0,0 +1,63 @@
+#ifndef __INTEL_SST_IOCTL_H__
+#define __INTEL_SST_IOCTL_H__
+/*
+ *  intel_sst_ioctl.h - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file defines all sst ioctls
+ */
+
+/* codec and post/pre processing related info */
+
+#include <linux/types.h>
+
+/* Pre and post processing params structure */
+struct snd_ppp_params {
+	__u8			algo_id;/* Post/Pre processing algorithm ID  */
+	__u8			str_id;	/*Only 5 bits used 0 - 31 are valid*/
+	__u8			enable;	/* 0= disable, 1= enable*/
+	__u8			operation; /* 0 = set_algo, 1 = get_algo */
+	__u32			size;	/*Size of parameters for all blocks*/
+	void			*params;
+} __packed;
+
+struct snd_sst_driver_info {
+	__u32 max_streams;
+};
+
+struct snd_sst_tuning_params {
+	__u8 type;
+	__u8 str_id;
+	__u8 size;
+	__u8 rsvd;
+	__u64 addr;
+} __packed;
+
+/*IOCTL defined here */
+/*SST common ioctls */
+#define SNDRV_SST_DRIVER_INFO	_IOR('L', 0x10, struct snd_sst_driver_info)
+#define SNDRV_SST_SET_ALGO	_IOW('L', 0x30,  struct snd_ppp_params)
+#define SNDRV_SST_GET_ALGO	_IOWR('L', 0x31,  struct snd_ppp_params)
+#define SNDRV_SST_TUNING_PARAMS	_IOW('L', 0x32,  struct snd_sst_tuning_params)
+#endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index b48792f..6f01136 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -285,6 +285,7 @@
 	unsigned long hw_ptr_jiffies;	/* Time when hw_ptr is updated */
 	unsigned long hw_ptr_buffer_jiffies; /* buffer time in jiffies */
 	snd_pcm_sframes_t delay;	/* extra delay; typically FIFO size */
+	snd_pcm_sframes_t soc_delay;    /* extra delay; typically delay incurred in soc */
 	u64 hw_ptr_wrap;                /* offset for hw_ptr due to boundary wrap-around */
 
 	/* -- HW params -- */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index ae9a227..41e9df9 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -231,8 +231,8 @@
 	struct snd_soc_dai_driver *driver;
 
 	/* DAI runtime info */
-	unsigned int capture_active:1;		/* stream is in use */
-	unsigned int playback_active:1;		/* stream is in use */
+	unsigned int capture_active;		/* cap streams is in use */
+	unsigned int playback_active;		/* pb streams is in use */
 	unsigned int symmetric_rates:1;
 	struct snd_pcm_runtime *runtime;
 	unsigned int active;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 385c632..f48d25c 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -422,6 +422,8 @@
 int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
 	struct snd_soc_dapm_widget_list **list);
 
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
+
 /* dapm widget types */
 enum snd_soc_dapm_type {
 	snd_soc_dapm_input = 0,		/* input pin */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 04598f1..a2e15ca 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -11,6 +11,7 @@
 #ifndef __LINUX_SND_SOC_DPCM_H
 #define __LINUX_SND_SOC_DPCM_H
 
+#include <linux/slab.h>
 #include <linux/list.h>
 #include <sound/pcm.h>
 
@@ -135,4 +136,25 @@
 int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
 int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *);
 
+int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
+	int stream, struct snd_soc_dapm_widget_list **list_);
+int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
+	int stream, struct snd_soc_dapm_widget_list **list, int new);
+int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
+int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
+int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+	int event);
+
+static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
+{
+	kfree(*list);
+}
+
+
 #endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 85c1522..b16d7ee 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -26,6 +26,7 @@
 #include <sound/compress_driver.h>
 #include <sound/control.h>
 #include <sound/ac97_codec.h>
+#include <sound/effect_driver.h>
 
 /*
  * Convenience kcontrol builders
@@ -92,6 +93,12 @@
 		{.reg = xreg, .rreg = xreg, .shift = xshift, \
 		 .rshift = xshift, .min = xmin, .max = xmax, \
 		 .platform_max = xmax, .invert = xinvert} }
+#define SND_SOC_BYTES_EXT(xname, xcount, xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_bytes_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct soc_bytes_ext) \
+		{.max = xcount} }
 #define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
 	.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
@@ -420,6 +427,21 @@
 		const char *dai_link, int stream);
 struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
 		const char *dai_link);
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_soc_register_effect(struct snd_soc_card *card,
+				struct snd_effect_ops *ops);
+int snd_soc_unregister_effect(struct snd_soc_card *card);
+#else
+static inline int snd_soc_register_effect(struct snd_soc_card *card,
+					struct snd_effect_ops *ops)
+{
+	return -ENODEV;
+}
+static inline int snd_soc_unregister_effect(struct snd_soc_card *card)
+{
+	return -ENODEV;
+}
+#endif
 
 /* Utility functions to get clock rates from various things */
 int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -538,6 +560,8 @@
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_bytes_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *ucontrol);
 
 /**
  * struct snd_soc_reg_access - Describes whether a given register is
@@ -594,6 +618,7 @@
  * @name:         gpio name
  * @report:       value to report when jack detected
  * @invert:       report presence in low state
+ * @irq_flag:	  Interrupt flags for GPIO-Irq line
  * @debouce_time: debouce time in ms
  * @wake:	  enable as wake source
  * @jack_status_check: callback function which overrides the detection
@@ -608,6 +633,7 @@
 	int invert;
 	int debounce_time;
 	bool wake;
+	unsigned long irq_flags;
 
 	struct snd_soc_jack *jack;
 	struct delayed_work work;
@@ -902,6 +928,7 @@
 	int be_id;	/* optional ID for machine driver BE identification */
 
 	const struct snd_soc_pcm_stream *params;
+	bool dsp_loopback;
 
 	unsigned int dai_fmt;           /* format to set on init */
 
@@ -932,6 +959,10 @@
 	/* machine stream operations */
 	const struct snd_soc_ops *ops;
 	const struct snd_soc_compr_ops *compr_ops;
+
+	/*no of substreams */
+	unsigned int playback_count;
+	unsigned int capture_count;
 };
 
 struct snd_soc_codec_conf {
@@ -1063,6 +1094,7 @@
 
 	/* Dynamic PCM BE runtime data */
 	struct snd_soc_dpcm_runtime dpcm[2];
+	int fe_compr;
 
 	long pmdown_time;
 	unsigned char pop_wait:1;
@@ -1100,6 +1132,10 @@
 	unsigned int regbase, regcount, nbits, invert;
 };
 
+struct soc_bytes_ext {
+	int max;
+};
+
 /* enumerated kcontrol */
 struct soc_enum {
 	unsigned short reg;
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 23a87d0..c5aade5 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -34,8 +34,6 @@
 /*
  * From iscsi_target.c
  */
-extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *,
-				struct iscsi_cmd *);
 extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
 				unsigned char *);
 extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
@@ -67,6 +65,10 @@
  */
 extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
 /*
+ * From iscsi_target_erl0.c
+ */
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+/*
  * From iscsi_target_erl1.c
  */
 extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
@@ -80,4 +82,5 @@
  * From iscsi_target_util.c
  */
 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
-extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
+extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+			       unsigned char *, __be32);
diff --git a/include/trace/events/marker_event.h b/include/trace/events/marker_event.h
new file mode 100644
index 0000000..285d3ee
--- /dev/null
+++ b/include/trace/events/marker_event.h
@@ -0,0 +1,32 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM marker_event
+
+#if !defined(_TRACE_MARKER_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MARKER_EVENT_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(marker_event,
+	/* TODO */
+	TP_PROTO(const char *text, size_t len),
+
+	TP_ARGS(text, len),
+
+	TP_STRUCT__entry(
+		__dynamic_array(char, msg, len + 1)
+	),
+
+	TP_fast_assign(
+		memcpy(__get_dynamic_array(msg), text, len);
+		((char *)__get_dynamic_array(msg))[len] = 0;
+	),
+
+	TP_printk("%s", __get_str(msg))
+
+);
+#endif /* _TRACE_MARKER_EVENT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 4ec2d3e..a9f672d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -221,6 +221,34 @@
 
 	TP_ARGS(name, state, cpu_id)
 );
+
+/* Event class which will help in synchronizing the start of the
+*	power measurement with the logs.
+*/
+DECLARE_EVENT_CLASS(trigger,
+
+	TP_PROTO(unsigned short state),
+
+	TP_ARGS(state),
+
+	TP_STRUCT__entry(
+		__field(u16, state)
+	),
+
+	TP_fast_assign(
+		__entry->state = state;
+	),
+
+	TP_printk("state=%lu", (unsigned long)__entry->state)
+);
+
+DEFINE_EVENT(trigger, vibrator,
+
+	TP_PROTO(unsigned short state),
+
+	TP_ARGS(state)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/tp2e.h b/include/trace/events/tp2e.h
new file mode 100644
index 0000000..77d0b08
--- /dev/null
+++ b/include/trace/events/tp2e.h
@@ -0,0 +1,130 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tp2e
+
+
+#if !defined(_TRACE_TP2E_H)
+enum tp2e_ev_type {
+	TP2E_EV_STAT,
+	TP2E_EV_INFO,
+	TP2E_EV_ERROR,
+	TP2E_EV_CRASH,
+	TP2E_EV_LAST
+};
+
+#endif /* _TRACE_TP2E_H */
+
+
+#if !defined(_TRACE_TP2E_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TP2E_H
+
+#include <linux/tracepoint.h>
+
+#define NAME_MAX_LEN 16
+#define DATA_MAX_LEN 128
+#define FILELIST_MAX_LEN 256
+
+#define show_tp2e_ev_type(type)			\
+	__print_symbolic(type,				\
+			 { TP2E_EV_STAT, "STAT" },	\
+			 { TP2E_EV_INFO, "INFO" },	\
+			 { TP2E_EV_ERROR, "ERROR" },	\
+			 { TP2E_EV_CRASH, "CRASH" })
+
+DECLARE_EVENT_CLASS(tp2e_generic_class,
+
+	    TP_PROTO(
+		    enum tp2e_ev_type ev_type,
+		    char *submitter_name,
+		    char *ev_name,
+		    char *data0,
+		    char *data1,
+		    char *data2,
+		    char *data3,
+		    char *data4,
+		    char *data5,
+		    char *filelist
+		    ),
+
+	    TP_ARGS(
+		    ev_type, submitter_name, ev_name,
+		    data0, data1, data2, data3, data4, data5, filelist
+		    ),
+
+	    TP_STRUCT__entry(
+		    __field(enum tp2e_ev_type, ev_type)
+		    __array(char, submitter_name, NAME_MAX_LEN)
+		    __array(char, ev_name, NAME_MAX_LEN)
+		    __array(char, data0, DATA_MAX_LEN)
+		    __array(char, data1, DATA_MAX_LEN)
+		    __array(char, data2, DATA_MAX_LEN)
+		    __array(char, data3, DATA_MAX_LEN)
+		    __array(char, data4, DATA_MAX_LEN)
+		    __array(char, data5, DATA_MAX_LEN)
+		    __array(char, filelist, FILELIST_MAX_LEN)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->ev_type = ev_type;
+		    strncpy(__entry->submitter_name, submitter_name, NAME_MAX_LEN);
+		    strncpy(__entry->ev_name, ev_name, NAME_MAX_LEN);
+		    strncpy(__entry->data0, data0, DATA_MAX_LEN);
+		    strncpy(__entry->data1, data1, DATA_MAX_LEN);
+		    strncpy(__entry->data2, data2, DATA_MAX_LEN);
+		    strncpy(__entry->data3, data3, DATA_MAX_LEN);
+		    strncpy(__entry->data4, data4, DATA_MAX_LEN);
+		    strncpy(__entry->data5, data5, DATA_MAX_LEN);
+		    strncpy(__entry->filelist, filelist, FILELIST_MAX_LEN);
+		    ),
+
+	    TP_printk("type=%s submitter_name=%s name=%s data0=%s data1=%s data2=%s data3=%s data4=%s data5=%s",
+		      show_tp2e_ev_type(__entry->ev_type),
+		      __entry->submitter_name, __entry->ev_name,
+		      __entry->data0, __entry->data1, __entry->data2,
+		      __entry->data3, __entry->data4, __entry->data5
+		    )
+	);
+
+DEFINE_EVENT(tp2e_generic_class, tp2e_generic_event,
+	TP_PROTO(
+		enum tp2e_ev_type ev_type,
+		char *submitter_name,
+		char *ev_name,
+		char *data0,
+		char *data1,
+		char *data2,
+		char *data3,
+		char *data4,
+		char *data5,
+		char *filelist
+		),
+
+	TP_ARGS(
+		ev_type, submitter_name, ev_name,
+		data0, data1, data2, data3, data4, data5, filelist
+		)
+	);
+
+DEFINE_EVENT(tp2e_generic_class, tp2e_scu_recov_event,
+	TP_PROTO(
+		enum tp2e_ev_type ev_type,
+		char *submitter_name,
+		char *ev_name,
+		char *data0,
+		char *data1,
+		char *data2,
+		char *data3,
+		char *data4,
+		char *data5,
+		char *filelist
+		),
+
+	TP_ARGS(
+		ev_type, submitter_name, ev_name,
+		data0, data1, data2, data3, data4, data5, filelist
+		)
+	);
+
+#endif /* _TRACE_TP2E_H || TRACE_HEADER_MULTI_READ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 5a57be6..238a166 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -732,6 +732,7 @@
 #define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
 #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
 #define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_CURSOR2		DRM_IOWR(0xBB, struct drm_mode_cursor2)
 
 /**
  * Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 090e533..c1d0c62 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -59,12 +59,19 @@
 #define DRM_MODE_FLAG_DBLCLK	(1<<12)
 #define DRM_MODE_FLAG_CLKDIV2	(1<<13)
 
+/*  FIXME - Begin - Added at Intel - move this to a file owned by HDMI code? */
+#define DRM_MODE_FLAG_PAR16_9	(1<<14)
+#define DRM_MODE_FLAG_PAR4_3	(1<<15)
+/*  FIXME - End   - Added at Intel - move this to a file owned by HDMI code? */
+
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
 #define DRM_MODE_DPMS_ON	0
 #define DRM_MODE_DPMS_STANDBY	1
 #define DRM_MODE_DPMS_SUSPEND	2
 #define DRM_MODE_DPMS_OFF	3
+#define DRM_MODE_DPMS_ASYNC_ON	4
+#define DRM_MODE_DPMS_ASYNC_OFF	5
 
 /* Scaling mode options */
 #define DRM_MODE_SCALE_NONE		0 /* Unmodified timing (display or
@@ -92,6 +99,9 @@
 
 	__u32 flags;
 	__u32 type;
+#if defined(CONFIG_DRM_I915)
+	__u32 picture_aspect_ratio;
+#endif
 	char name[DRM_DISPLAY_MODE_LEN];
 };
 
@@ -139,6 +149,7 @@
 	/* Source values are 16.16 fixed point */
 	__u32 src_x, src_y;
 	__u32 src_h, src_w;
+	__u64 user_data;
 };
 
 struct drm_mode_get_plane {
@@ -164,7 +175,8 @@
 #define DRM_MODE_ENCODER_TMDS	2
 #define DRM_MODE_ENCODER_LVDS	3
 #define DRM_MODE_ENCODER_TVDAC	4
-#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI	5
+#define DRM_MODE_ENCODER_VIRTUAL 6
 
 struct drm_mode_get_encoder {
 	__u32 encoder_id;
@@ -203,6 +215,7 @@
 #define DRM_MODE_CONNECTOR_TV		13
 #define DRM_MODE_CONNECTOR_eDP		14
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
+#define DRM_MODE_CONNECTOR_DSI		16
 
 struct drm_mode_get_connector {
 
@@ -223,6 +236,8 @@
 	__u32 connection;
 	__u32 mm_width, mm_height; /**< HxW in millimeters */
 	__u32 subpixel;
+
+	__u32 pad;
 };
 
 #define DRM_MODE_PROP_PENDING	(1<<0)
@@ -388,6 +403,19 @@
 	__u32 handle;
 };
 
+struct drm_mode_cursor2 {
+	__u32 flags;
+	__u32 crtc_id;
+	__s32 x;
+	__s32 y;
+	__u32 width;
+	__u32 height;
+	/* driver specific handle */
+	__u32 handle;
+	__s32 hot_x;
+	__s32 hot_y;
+};
+
 struct drm_mode_crtc_lut {
 	__u32 crtc_id;
 	__u32 gamma_size;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 321d4ac..fa8b3ad 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -979,6 +979,8 @@
 #define RADEON_INFO_RING_WORKING	0x15
 /* SI tile mode array */
 #define RADEON_INFO_SI_TILE_MODE_ARRAY	0x16
+/* query if CP DMA is supported on the compute ring */
+#define RADEON_INFO_SI_CP_DMA_COMPUTE	0x17
 
 
 struct drm_radeon_info {
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index d500369..1db453e 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -215,8 +215,8 @@
  * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
  * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
  * without the interrupt bit set that the kernel's internal buffer for @header
- * is about to overflow.  (In the last case, kernels with ABI version < 5 drop
- * header data up to the next interrupt packet.)
+ * is about to overflow.  (In the last case, ABI versions < 5 drop header data
+ * up to the next interrupt packet.)
  *
  * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
  *
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index e0133c7..590beda 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -115,6 +115,8 @@
 #define ICMPV6_NOT_NEIGHBOUR		2
 #define ICMPV6_ADDR_UNREACH		3
 #define ICMPV6_PORT_UNREACH		4
+#define ICMPV6_POLICY_FAIL		5
+#define ICMPV6_REJECT_ROUTE		6
 
 /*
  *	Codes for Time Exceeded
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index 772cf08..87f478b 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -139,11 +139,11 @@
 
 struct pppoe_hdr {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
-	__u8 ver : 4;
 	__u8 type : 4;
+	__u8 ver : 4;
 #elif defined(__BIG_ENDIAN_BITFIELD)
-	__u8 type : 4;
 	__u8 ver : 4;
+	__u8 type : 4;
 #else
 #error	"Please fix <asm/byteorder.h>"
 #endif
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 1f5e689..98ad956 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -47,6 +47,19 @@
 
 #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
 
+struct mmc_ioc_rpmb_req {
+	__u16 type;                     /* RPMB request type */
+	__u16 *result;                  /* response or request result */
+	__u16 blk_cnt;                  /* Number of blocks(half sector 256B) */
+	__u16 addr;                     /* data address */
+	__u32 *wc;                      /* write counter */
+	__u8 *nonce;                    /* Ramdom number */
+	__u8 *data;                     /* Buffer of the user data */
+	__u8 *mac;                      /* Message Authentication Code */
+};
+
+#define MMC_IOC_RPMB_REQ _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_rpmb_req)
+
 /*
  * Since this ioctl is only meant to enhance (and not replace) normal access
  * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index fb104e5..9e59950 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -425,13 +425,15 @@
 	/*
 	 * Control data for the mmap() data buffer.
 	 *
-	 * User-space reading the @data_head value should issue an rmb(), on
-	 * SMP capable platforms, after reading this value -- see
-	 * perf_event_wakeup().
+	 * User-space reading the @data_head value should issue an smp_rmb(),
+	 * after reading this value.
 	 *
 	 * When the mapping is PROT_WRITE the @data_tail value should be
-	 * written by userspace to reflect the last read data. In this case
-	 * the kernel will not over-write unread data.
+	 * written by userspace to reflect the last read data, after issueing
+	 * an smp_mb() to separate the data read from the ->data_tail store.
+	 * In this case the kernel will not over-write unread data.
+	 *
+	 * See perf_output_put_handle() for the data ordering.
 	 */
 	__u64   data_head;		/* head in the data section */
 	__u64	data_tail;		/* user-space written tail */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0..09d62b92 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@
 #define TC_H_ROOT	(0xFFFFFFFFU)
 #define TC_H_INGRESS    (0xFFFFFFF1U)
 
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+	TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+	TC_LINKLAYER_ETHERNET,
+	TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
 struct tc_ratespec {
 	unsigned char	cell_log;
-	unsigned char	__reserved;
+	__u8		linklayer; /* lower 4 bits */
 	unsigned short	overhead;
 	short		cell_align;
 	unsigned short	mpu;
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index e75e1b6..4603500 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -56,6 +56,7 @@
 #define CLOCK_BOOTTIME_ALARM		9
 #define CLOCK_SGI_CYCLE			10	/* Hardware specific */
 #define CLOCK_TAI			11
+#define CLOCK_REALTIME_ALARM_OFF	12
 
 #define MAX_CLOCKS			16
 #define CLOCKS_MASK			(CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index dac199a..1d98cfa 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -34,5 +34,7 @@
 #define N_TI_WL		22	/* for TI's WL BT, FM, GPS combo chips */
 #define N_TRACESINK	23	/* Trace data routing for MIPI P1149.7 */
 #define N_TRACEROUTER	24	/* Trace data routing for MIPI P1149.7 */
+#define N_INTEL_LDISC   25   /* Intel ST Driver*/
+
 
 #endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index aa33fd1..06356ff 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -125,6 +125,8 @@
 #define USB_DEVICE_A_HNP_SUPPORT	4	/* (otg) RH port supports HNP */
 #define USB_DEVICE_A_ALT_HNP_SUPPORT	5	/* (otg) other RH port does */
 #define USB_DEVICE_DEBUG_MODE		6	/* (special devices only) */
+#define USB_NTF_HOST_REL		51
+#define USB_B3_RSP_ENABLE		52
 
 /*
  * Test Mode Selectors
@@ -137,6 +139,20 @@
 #define	TEST_FORCE_EN	5
 
 /*
+ * USB OTG 2.0 Test Mode
+ * See OTG 2.0 spec Table 6-8
+ */
+#define TEST_SRP_REQD	6
+#define TEST_HNP_REQD	7
+
+/*
+ * OTG 2.0
+ * Section 6.2 & 6.3
+ */
+#define OTG_STATUS_SELECTOR	0xF000
+
+
+/*
  * New Feature Selectors as added by USB 3.0
  * See USB 3.0 spec Table 9-7
  */
@@ -294,6 +310,7 @@
 #define USB_CLASS_CSCID			0x0b	/* chip+ smart card */
 #define USB_CLASS_CONTENT_SEC		0x0d	/* content security */
 #define USB_CLASS_VIDEO			0x0e
+#define USB_CLASS_DEBUG			0xdc
 #define USB_CLASS_WIRELESS_CONTROLLER	0xe0
 #define USB_CLASS_MISC			0xef
 #define USB_CLASS_APP_SPEC		0xfe
@@ -666,17 +683,19 @@
 
 /*-------------------------------------------------------------------------*/
 
-/* USB_DT_OTG (from OTG 1.0a supplement) */
+/* USB_DT_OTG (from OTG 2.0) */
 struct usb_otg_descriptor {
 	__u8  bLength;
 	__u8  bDescriptorType;
 
 	__u8  bmAttributes;	/* support for HNP, SRP, etc */
+	__le16 bcdOTG;          /* release number, i.e, 2.0 is 0x0200 */
 } __attribute__ ((packed));
 
 /* from usb_otg_descriptor.bmAttributes */
 #define USB_OTG_SRP		(1 << 0)
 #define USB_OTG_HNP		(1 << 1)	/* swap host/device roles */
+#define USB_OTG_ADP		(1 << 2)	/* attachment detection */
 
 /*-------------------------------------------------------------------------*/
 
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
index 5032918..621fee7 100644
--- a/include/uapi/linux/usb/f_mtp.h
+++ b/include/uapi/linux/usb/f_mtp.h
@@ -20,6 +20,7 @@
 
 #include <linux/ioctl.h>
 #include <linux/types.h>
+#include <linux/compat.h>
 
 struct mtp_file_range {
 	/* file descriptor for file to transfer */
@@ -58,4 +59,41 @@
  */
 #define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
 
+#ifdef CONFIG_COMPAT
+struct mtp_file_range_32 {
+	/* file descriptor for file to transfer */
+	compat_int_t		fd;
+	/* offset in file for start of transfer */
+	compat_s64		offset;
+	/* number of bytes to transfer */
+	compat_s64		length;
+	/* MTP command ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	u16			command;
+	/* MTP transaction ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	u32			transaction_id;
+};
+
+struct mtp_event_32 {
+	/* size of the event */
+	u32		length;
+	compat_caddr_t	compat_data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE_32              _IOW('M', 0, struct mtp_file_range_32)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE_32           _IOW('M', 1, struct mtp_file_range_32)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT_32             _IOW('M', 3, struct mtp_event_32)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER_32  _IOW('M', 4, struct mtp_file_range_32)
+#endif
 #endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index f40b41c..560f632 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1721,6 +1721,7 @@
 #define V4L2_EVENT_EOS				2
 #define V4L2_EVENT_CTRL				3
 #define V4L2_EVENT_FRAME_SYNC			4
+#define V4L2_EVENT_FRAME_END			5
 #define V4L2_EVENT_PRIVATE_START		0x08000000
 
 /* Payload for V4L2_EVENT_VSYNC */
@@ -1748,6 +1749,7 @@
 	__s32 default_value;
 };
 
+/* V4L2_EVENT_FRAME_SYNC or V4L2_EVENT_FRAME_END */
 struct v4l2_event_frame_sync {
 	__u32 frame_sequence;
 };
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index d630163..21eed48 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -30,7 +30,7 @@
 #include <sound/compress_params.h>
 
 
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
 /**
  * struct snd_compressed_buffer: compressed buffer
  * @fragment_size: size of buffer fragment in bytes
@@ -67,8 +67,8 @@
 struct snd_compr_tstamp {
 	__u32 byte_offset;
 	__u32 copied_total;
-	snd_pcm_uframes_t pcm_frames;
-	snd_pcm_uframes_t pcm_io_frames;
+	__u32 pcm_frames;
+	__u32 pcm_io_frames;
 	__u32 sampling_rate;
 };
 
@@ -80,7 +80,7 @@
 struct snd_compr_avail {
 	__u64 avail;
 	struct snd_compr_tstamp tstamp;
-};
+} __attribute__((packed));
 
 enum snd_compr_direction {
 	SND_COMPRESS_PLAYBACK = 0,
diff --git a/include/uapi/sound/effect_offload.h b/include/uapi/sound/effect_offload.h
new file mode 100644
index 0000000..0a5626e
--- /dev/null
+++ b/include/uapi/sound/effect_offload.h
@@ -0,0 +1,62 @@
+/*
+ *  effect_offload.h - effect offload header definations
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __EFFECT_OFFLOAD_H
+#define __EFFECT_OFFLOAD_H
+
+#include <linux/types.h>
+
+#define SNDRV_EFFECT_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 0)
+
+struct snd_effect {
+	char uuid[16];  /* effect UUID */
+	u32 device;	/* streaming interface for effect insertion */
+	u32 pos;	/* position of effect to be placed in effect chain */
+	u32 mode;	/* Backend for Global device (Headset/Speaker) */
+} __packed;
+
+struct snd_effect_params {
+	char uuid[16];
+	u32 device;
+	u32 size;	/* size of parameter blob */
+	u64 buffer_ptr;
+} __packed;
+
+struct snd_effect_caps {
+	u32 size;	/* size of buffer to read effect descriptors */
+	u64 buffer_ptr;
+} __packed;
+
+#define SNDRV_CTL_IOCTL_EFFECT_VERSION		_IOR('E', 0x00, int)
+#define SNDRV_CTL_IOCTL_EFFECT_CREATE		_IOW('E', 0x01,\
+						struct snd_effect)
+#define SNDRV_CTL_IOCTL_EFFECT_DESTROY		_IOW('E', 0x02,\
+						struct snd_effect)
+#define SNDRV_CTL_IOCTL_EFFECT_SET_PARAMS	_IOW('E', 0x03,\
+						struct snd_effect_params)
+#define SNDRV_CTL_IOCTL_EFFECT_GET_PARAMS	_IOWR('E', 0x04,\
+						struct snd_effect_params)
+#define SNDRV_CTL_IOCTL_EFFECT_QUERY_NUM	_IOR('E', 0x05, int)
+#define SNDRV_CTL_IOCTL_EFFECT_QUERY_CAPS	_IOWR('E', 0x06,\
+						struct snd_effect_caps)
+#endif
diff --git a/init/main.c b/init/main.c
index 544cccf..2132ffd 100644
--- a/init/main.c
+++ b/init/main.c
@@ -74,6 +74,7 @@
 #include <linux/ptrace.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
+#include <linux/random.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -781,6 +782,7 @@
 	do_ctors();
 	usermodehelper_enable();
 	do_initcalls();
+	random_int_secret_init();
 }
 
 static void __init do_pre_smp_initcalls(void)
diff --git a/ipc/msg.c b/ipc/msg.c
index b2b4667..daa604d 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -70,8 +70,6 @@
 
 #define msg_ids(ns)	((ns)->ids[IPC_MSG_IDS])
 
-#define msg_unlock(msq)		ipc_unlock(&(msq)->q_perm)
-
 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
 static int newque(struct ipc_namespace *, struct ipc_params *);
 #ifdef CONFIG_PROC_FS
@@ -141,27 +139,23 @@
 				IPC_MSG_IDS, sysvipc_msg_proc_show);
 }
 
-/*
- * msg_lock_(check_) routines are called in the paths where the rw_mutex
- * is not held.
- */
-static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
+static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
 {
-	struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
+	struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
 
 	if (IS_ERR(ipcp))
-		return (struct msg_queue *)ipcp;
+		return ERR_CAST(ipcp);
 
 	return container_of(ipcp, struct msg_queue, q_perm);
 }
 
-static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
-						int id)
+static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns,
+							int id)
 {
-	struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
+	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
 
 	if (IS_ERR(ipcp))
-		return (struct msg_queue *)ipcp;
+		return ERR_CAST(ipcp);
 
 	return container_of(ipcp, struct msg_queue, q_perm);
 }
@@ -185,7 +179,7 @@
  * @ns: namespace
  * @params: ptr to the structure that contains the key and msgflg
  *
- * Called with msg_ids.rw_mutex held (writer)
+ * Called with msg_ids.rwsem held (writer)
  */
 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
 {
@@ -270,8 +264,8 @@
  * removes the message queue from message queue ID IDR, and cleans up all the
  * messages associated with this queue.
  *
- * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
- * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
+ * msg_ids.rwsem (writer) and the spinlock for this message queue are held
+ * before freeque() is called. msg_ids.rwsem remains locked on exit.
  */
 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
@@ -281,7 +275,8 @@
 	expunge_all(msq, -EIDRM);
 	ss_wakeup(&msq->q_senders, 1);
 	msg_rmid(ns, msq);
-	msg_unlock(msq);
+	ipc_unlock_object(&msq->q_perm);
+	rcu_read_unlock();
 
 	list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
 		atomic_dec(&ns->msg_hdrs);
@@ -292,7 +287,7 @@
 }
 
 /*
- * Called with msg_ids.rw_mutex and ipcp locked.
+ * Called with msg_ids.rwsem and ipcp locked.
  */
 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
 {
@@ -396,9 +391,9 @@
 }
 
 /*
- * This function handles some msgctl commands which require the rw_mutex
+ * This function handles some msgctl commands which require the rwsem
  * to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
 		       struct msqid_ds __user *buf, int version)
@@ -413,31 +408,39 @@
 			return -EFAULT;
 	}
 
-	ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
-			       &msqid64.msg_perm, msqid64.msg_qbytes);
-	if (IS_ERR(ipcp))
-		return PTR_ERR(ipcp);
+	down_write(&msg_ids(ns).rwsem);
+	rcu_read_lock();
+
+	ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
+				      &msqid64.msg_perm, msqid64.msg_qbytes);
+	if (IS_ERR(ipcp)) {
+		err = PTR_ERR(ipcp);
+		goto out_unlock1;
+	}
 
 	msq = container_of(ipcp, struct msg_queue, q_perm);
 
 	err = security_msg_queue_msgctl(msq, cmd);
 	if (err)
-		goto out_unlock;
+		goto out_unlock1;
 
 	switch (cmd) {
 	case IPC_RMID:
+		ipc_lock_object(&msq->q_perm);
+		/* freeque unlocks the ipc object and rcu */
 		freeque(ns, ipcp);
 		goto out_up;
 	case IPC_SET:
 		if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
 		    !capable(CAP_SYS_RESOURCE)) {
 			err = -EPERM;
-			goto out_unlock;
+			goto out_unlock1;
 		}
 
+		ipc_lock_object(&msq->q_perm);
 		err = ipc_update_perm(&msqid64.msg_perm, ipcp);
 		if (err)
-			goto out_unlock;
+			goto out_unlock0;
 
 		msq->q_qbytes = msqid64.msg_qbytes;
 
@@ -453,25 +456,23 @@
 		break;
 	default:
 		err = -EINVAL;
+		goto out_unlock1;
 	}
-out_unlock:
-	msg_unlock(msq);
+
+out_unlock0:
+	ipc_unlock_object(&msq->q_perm);
+out_unlock1:
+	rcu_read_unlock();
 out_up:
-	up_write(&msg_ids(ns).rw_mutex);
+	up_write(&msg_ids(ns).rwsem);
 	return err;
 }
 
-SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
+static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
+			 int cmd, int version, void __user *buf)
 {
+	int err;
 	struct msg_queue *msq;
-	int err, version;
-	struct ipc_namespace *ns;
-
-	if (msqid < 0 || cmd < 0)
-		return -EINVAL;
-
-	version = ipc_parse_version(&cmd);
-	ns = current->nsproxy->ipc_ns;
 
 	switch (cmd) {
 	case IPC_INFO:
@@ -482,6 +483,7 @@
 
 		if (!buf)
 			return -EFAULT;
+
 		/*
 		 * We must not return kernel stack data.
 		 * due to padding, it's not enough
@@ -497,7 +499,7 @@
 		msginfo.msgmnb = ns->msg_ctlmnb;
 		msginfo.msgssz = MSGSSZ;
 		msginfo.msgseg = MSGSEG;
-		down_read(&msg_ids(ns).rw_mutex);
+		down_read(&msg_ids(ns).rwsem);
 		if (cmd == MSG_INFO) {
 			msginfo.msgpool = msg_ids(ns).in_use;
 			msginfo.msgmap = atomic_read(&ns->msg_hdrs);
@@ -508,12 +510,13 @@
 			msginfo.msgtql = MSGTQL;
 		}
 		max_id = ipc_get_maxid(&msg_ids(ns));
-		up_read(&msg_ids(ns).rw_mutex);
+		up_read(&msg_ids(ns).rwsem);
 		if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
 			return -EFAULT;
 		return (max_id < 0) ? 0 : max_id;
 	}
-	case MSG_STAT:	/* msqid is an index rather than a msg queue id */
+
+	case MSG_STAT:
 	case IPC_STAT:
 	{
 		struct msqid64_ds tbuf;
@@ -522,17 +525,25 @@
 		if (!buf)
 			return -EFAULT;
 
+		memset(&tbuf, 0, sizeof(tbuf));
+
+		rcu_read_lock();
 		if (cmd == MSG_STAT) {
-			msq = msg_lock(ns, msqid);
-			if (IS_ERR(msq))
-				return PTR_ERR(msq);
+			msq = msq_obtain_object(ns, msqid);
+			if (IS_ERR(msq)) {
+				err = PTR_ERR(msq);
+				goto out_unlock;
+			}
 			success_return = msq->q_perm.id;
 		} else {
-			msq = msg_lock_check(ns, msqid);
-			if (IS_ERR(msq))
-				return PTR_ERR(msq);
+			msq = msq_obtain_object_check(ns, msqid);
+			if (IS_ERR(msq)) {
+				err = PTR_ERR(msq);
+				goto out_unlock;
+			}
 			success_return = 0;
 		}
+
 		err = -EACCES;
 		if (ipcperms(ns, &msq->q_perm, S_IRUGO))
 			goto out_unlock;
@@ -541,8 +552,6 @@
 		if (err)
 			goto out_unlock;
 
-		memset(&tbuf, 0, sizeof(tbuf));
-
 		kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
 		tbuf.msg_stime  = msq->q_stime;
 		tbuf.msg_rtime  = msq->q_rtime;
@@ -552,22 +561,46 @@
 		tbuf.msg_qbytes = msq->q_qbytes;
 		tbuf.msg_lspid  = msq->q_lspid;
 		tbuf.msg_lrpid  = msq->q_lrpid;
-		msg_unlock(msq);
+		rcu_read_unlock();
+
 		if (copy_msqid_to_user(buf, &tbuf, version))
 			return -EFAULT;
 		return success_return;
 	}
+
+	default:
+		return -EINVAL;
+	}
+
+	return err;
+out_unlock:
+	rcu_read_unlock();
+	return err;
+}
+
+SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
+{
+	int version;
+	struct ipc_namespace *ns;
+
+	if (msqid < 0 || cmd < 0)
+		return -EINVAL;
+
+	version = ipc_parse_version(&cmd);
+	ns = current->nsproxy->ipc_ns;
+
+	switch (cmd) {
+	case IPC_INFO:
+	case MSG_INFO:
+	case MSG_STAT:	/* msqid is an index rather than a msg queue id */
+	case IPC_STAT:
+		return msgctl_nolock(ns, msqid, cmd, version, buf);
 	case IPC_SET:
 	case IPC_RMID:
-		err = msgctl_down(ns, msqid, cmd, buf, version);
-		return err;
+		return msgctl_down(ns, msqid, cmd, buf, version);
 	default:
 		return  -EINVAL;
 	}
-
-out_unlock:
-	msg_unlock(msq);
-	return err;
 }
 
 static int testmsg(struct msg_msg *msg, long type, int mode)
@@ -645,22 +678,31 @@
 	msg->m_type = mtype;
 	msg->m_ts = msgsz;
 
-	msq = msg_lock_check(ns, msqid);
+	rcu_read_lock();
+	msq = msq_obtain_object_check(ns, msqid);
 	if (IS_ERR(msq)) {
 		err = PTR_ERR(msq);
-		goto out_free;
+		goto out_unlock1;
 	}
 
+	ipc_lock_object(&msq->q_perm);
+
 	for (;;) {
 		struct msg_sender s;
 
 		err = -EACCES;
 		if (ipcperms(ns, &msq->q_perm, S_IWUGO))
-			goto out_unlock_free;
+			goto out_unlock0;
+
+		/* raced with RMID? */
+		if (msq->q_perm.deleted) {
+			err = -EIDRM;
+			goto out_unlock0;
+		}
 
 		err = security_msg_queue_msgsnd(msq, msg, msgflg);
 		if (err)
-			goto out_unlock_free;
+			goto out_unlock0;
 
 		if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
 				1 + msq->q_qnum <= msq->q_qbytes) {
@@ -670,32 +712,35 @@
 		/* queue full, wait: */
 		if (msgflg & IPC_NOWAIT) {
 			err = -EAGAIN;
-			goto out_unlock_free;
+			goto out_unlock0;
 		}
+
 		ss_add(msq, &s);
 
 		if (!ipc_rcu_getref(msq)) {
 			err = -EIDRM;
-			goto out_unlock_free;
+			goto out_unlock0;
 		}
 
-		msg_unlock(msq);
+		ipc_unlock_object(&msq->q_perm);
+		rcu_read_unlock();
 		schedule();
 
 		ipc_lock_by_ptr(&msq->q_perm);
 		ipc_rcu_putref(msq, ipc_rcu_free);
 		if (msq->q_perm.deleted) {
 			err = -EIDRM;
-			goto out_unlock_free;
+			goto out_unlock0;
 		}
+
 		ss_del(&s);
 
 		if (signal_pending(current)) {
 			err = -ERESTARTNOHAND;
-			goto out_unlock_free;
+			goto out_unlock0;
 		}
-	}
 
+	}
 	msq->q_lspid = task_tgid_vnr(current);
 	msq->q_stime = get_seconds();
 
@@ -711,9 +756,10 @@
 	err = 0;
 	msg = NULL;
 
-out_unlock_free:
-	msg_unlock(msq);
-out_free:
+out_unlock0:
+	ipc_unlock_object(&msq->q_perm);
+out_unlock1:
+	rcu_read_unlock();
 	if (msg != NULL)
 		free_msg(msg);
 	return err;
@@ -800,7 +846,7 @@
 
 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
 {
-	struct msg_msg *msg;
+	struct msg_msg *msg, *found = NULL;
 	long count = 0;
 
 	list_for_each_entry(msg, &msq->q_messages, m_list) {
@@ -809,6 +855,7 @@
 					       *msgtyp, mode)) {
 			if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
 				*msgtyp = msg->m_type - 1;
+				found = msg;
 			} else if (mode == SEARCH_NUMBER) {
 				if (*msgtyp == count)
 					return msg;
@@ -818,24 +865,22 @@
 		}
 	}
 
-	return ERR_PTR(-EAGAIN);
+	return found ?: ERR_PTR(-EAGAIN);
 }
 
-
-long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
-	       int msgflg,
+long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
 	       long (*msg_handler)(void __user *, struct msg_msg *, size_t))
 {
-	struct msg_queue *msq;
-	struct msg_msg *msg;
 	int mode;
+	struct msg_queue *msq;
 	struct ipc_namespace *ns;
-	struct msg_msg *copy = NULL;
+	struct msg_msg *msg, *copy = NULL;
 
 	ns = current->nsproxy->ipc_ns;
 
 	if (msqid < 0 || (long) bufsz < 0)
 		return -EINVAL;
+
 	if (msgflg & MSG_COPY) {
 		copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
 		if (IS_ERR(copy))
@@ -843,8 +888,10 @@
 	}
 	mode = convert_mode(&msgtyp, msgflg);
 
-	msq = msg_lock_check(ns, msqid);
+	rcu_read_lock();
+	msq = msq_obtain_object_check(ns, msqid);
 	if (IS_ERR(msq)) {
+		rcu_read_unlock();
 		free_copy(copy);
 		return PTR_ERR(msq);
 	}
@@ -854,10 +901,17 @@
 
 		msg = ERR_PTR(-EACCES);
 		if (ipcperms(ns, &msq->q_perm, S_IRUGO))
-			goto out_unlock;
+			goto out_unlock1;
+
+		ipc_lock_object(&msq->q_perm);
+
+		/* raced with RMID? */
+		if (msq->q_perm.deleted) {
+			msg = ERR_PTR(-EIDRM);
+			goto out_unlock0;
+		}
 
 		msg = find_msg(msq, &msgtyp, mode);
-
 		if (!IS_ERR(msg)) {
 			/*
 			 * Found a suitable message.
@@ -865,7 +919,7 @@
 			 */
 			if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
 				msg = ERR_PTR(-E2BIG);
-				goto out_unlock;
+				goto out_unlock0;
 			}
 			/*
 			 * If we are copying, then do not unlink message and do
@@ -873,8 +927,9 @@
 			 */
 			if (msgflg & MSG_COPY) {
 				msg = copy_msg(msg, copy);
-				goto out_unlock;
+				goto out_unlock0;
 			}
+
 			list_del(&msg->m_list);
 			msq->q_qnum--;
 			msq->q_rtime = get_seconds();
@@ -883,14 +938,16 @@
 			atomic_sub(msg->m_ts, &ns->msg_bytes);
 			atomic_dec(&ns->msg_hdrs);
 			ss_wakeup(&msq->q_senders, 0);
-			msg_unlock(msq);
-			break;
+
+			goto out_unlock0;
 		}
+
 		/* No message waiting. Wait for a message */
 		if (msgflg & IPC_NOWAIT) {
 			msg = ERR_PTR(-ENOMSG);
-			goto out_unlock;
+			goto out_unlock0;
 		}
+
 		list_add_tail(&msr_d.r_list, &msq->q_receivers);
 		msr_d.r_tsk = current;
 		msr_d.r_msgtype = msgtyp;
@@ -901,8 +958,9 @@
 			msr_d.r_maxsize = bufsz;
 		msr_d.r_msg = ERR_PTR(-EAGAIN);
 		current->state = TASK_INTERRUPTIBLE;
-		msg_unlock(msq);
 
+		ipc_unlock_object(&msq->q_perm);
+		rcu_read_unlock();
 		schedule();
 
 		/* Lockless receive, part 1:
@@ -913,7 +971,7 @@
 		 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
 		 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
 		 * rcu_read_lock() prevents preemption between reading r_msg
-		 * and the spin_lock() inside ipc_lock_by_ptr().
+		 * and acquiring the q_perm.lock in ipc_lock_object().
 		 */
 		rcu_read_lock();
 
@@ -932,32 +990,34 @@
 		 * If there is a message or an error then accept it without
 		 * locking.
 		 */
-		if (msg != ERR_PTR(-EAGAIN)) {
-			rcu_read_unlock();
-			break;
-		}
+		if (msg != ERR_PTR(-EAGAIN))
+			goto out_unlock1;
 
 		/* Lockless receive, part 3:
 		 * Acquire the queue spinlock.
 		 */
-		ipc_lock_by_ptr(&msq->q_perm);
-		rcu_read_unlock();
+		ipc_lock_object(&msq->q_perm);
 
 		/* Lockless receive, part 4:
 		 * Repeat test after acquiring the spinlock.
 		 */
 		msg = (struct msg_msg*)msr_d.r_msg;
 		if (msg != ERR_PTR(-EAGAIN))
-			goto out_unlock;
+			goto out_unlock0;
 
 		list_del(&msr_d.r_list);
 		if (signal_pending(current)) {
 			msg = ERR_PTR(-ERESTARTNOHAND);
-out_unlock:
-			msg_unlock(msq);
-			break;
+			goto out_unlock0;
 		}
+
+		ipc_unlock_object(&msq->q_perm);
 	}
+
+out_unlock0:
+	ipc_unlock_object(&msq->q_perm);
+out_unlock1:
+	rcu_read_unlock();
 	if (IS_ERR(msg)) {
 		free_copy(copy);
 		return PTR_ERR(msg);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 7ee61bf..aba9a58 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -81,7 +81,7 @@
 	int next_id;
 	int total, in_use;
 
-	down_write(&ids->rw_mutex);
+	down_write(&ids->rwsem);
 
 	in_use = ids->in_use;
 
@@ -89,11 +89,12 @@
 		perm = idr_find(&ids->ipcs_idr, next_id);
 		if (perm == NULL)
 			continue;
-		ipc_lock_by_ptr(perm);
+		rcu_read_lock();
+		ipc_lock_object(perm);
 		free(ns, perm);
 		total++;
 	}
-	up_write(&ids->rw_mutex);
+	up_write(&ids->rwsem);
 }
 
 static void free_ipc_ns(struct ipc_namespace *ns)
diff --git a/ipc/sem.c b/ipc/sem.c
index 3e76b2a..5a6b362 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -95,8 +95,12 @@
 	int	semval;		/* current value */
 	int	sempid;		/* pid of last operation */
 	spinlock_t	lock;	/* spinlock for fine-grained semtimedop */
-	struct list_head sem_pending; /* pending single-sop operations */
-};
+	struct list_head pending_alter; /* pending single-sop operations */
+					/* that alter the semaphore */
+	struct list_head pending_const; /* pending single-sop operations */
+					/* that do not alter the semaphore*/
+	time_t	sem_otime;	/* candidate for sem_otime */
+} ____cacheline_aligned_in_smp;
 
 /* One queue for each sleeping process in the system. */
 struct sem_queue {
@@ -150,12 +154,15 @@
 #define SEMOPM_FAST	64  /* ~ 372 bytes on stack */
 
 /*
- * linked list protection:
+ * Locking:
  *	sem_undo.id_next,
- *	sem_array.sem_pending{,last},
- *	sem_array.sem_undo: sem_lock() for read/write
+ *	sem_array.complex_count,
+ *	sem_array.pending{_alter,_cont},
+ *	sem_array.sem_undo: global sem_lock() for read/write
  *	sem_undo.proc_next: only "current" is allowed to read/write that field.
  *	
+ *	sem_array.sem_base[i].pending_{const,alter}:
+ *		global or semaphore sem_lock() for read/write
  */
 
 #define sc_semmsl	sem_ctls[0]
@@ -204,71 +211,89 @@
  * Otherwise, lock the entire semaphore array, since we either have
  * multiple semaphores in our own semops, or we need to look at
  * semaphores from other pending complex operations.
- *
- * Carefully guard against sma->complex_count changing between zero
- * and non-zero while we are spinning for the lock. The value of
- * sma->complex_count cannot change while we are holding the lock,
- * so sem_unlock should be fine.
- *
- * The global lock path checks that all the local locks have been released,
- * checking each local lock once. This means that the local lock paths
- * cannot start their critical sections while the global lock is held.
  */
 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
 			      int nsops)
 {
-	int locknum;
- again:
-	if (nsops == 1 && !sma->complex_count) {
-		struct sem *sem = sma->sem_base + sops->sem_num;
+	struct sem *sem;
 
-		/* Lock just the semaphore we are interested in. */
+	if (nsops != 1) {
+		/* Complex operation - acquire a full lock */
+		ipc_lock_object(&sma->sem_perm);
+
+		/* And wait until all simple ops that are processed
+		 * right now have dropped their locks.
+		 */
+		sem_wait_array(sma);
+		return -1;
+	}
+
+	/*
+	 * Only one semaphore affected - try to optimize locking.
+	 * The rules are:
+	 * - optimized locking is possible if no complex operation
+	 *   is either enqueued or processed right now.
+	 * - The test for enqueued complex ops is simple:
+	 *      sma->complex_count != 0
+	 * - Testing for complex ops that are processed right now is
+	 *   a bit more difficult. Complex ops acquire the full lock
+	 *   and first wait that the running simple ops have completed.
+	 *   (see above)
+	 *   Thus: If we own a simple lock and the global lock is free
+	 *	and complex_count is now 0, then it will stay 0 and
+	 *	thus just locking sem->lock is sufficient.
+	 */
+	sem = sma->sem_base + sops->sem_num;
+
+	if (sma->complex_count == 0) {
+		/*
+		 * It appears that no complex operation is around.
+		 * Acquire the per-semaphore lock.
+		 */
 		spin_lock(&sem->lock);
 
-		/*
-		 * If sma->complex_count was set while we were spinning,
-		 * we may need to look at things we did not lock here.
-		 */
-		if (unlikely(sma->complex_count)) {
-			spin_unlock(&sem->lock);
-			goto lock_array;
-		}
+		/* Then check that the global lock is free */
+		if (!spin_is_locked(&sma->sem_perm.lock)) {
+			/* spin_is_locked() is not a memory barrier */
+			smp_mb();
 
-		/*
-		 * Another process is holding the global lock on the
-		 * sem_array; we cannot enter our critical section,
-		 * but have to wait for the global lock to be released.
-		 */
-		if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
-			spin_unlock(&sem->lock);
-			spin_unlock_wait(&sma->sem_perm.lock);
-			goto again;
+			/* Now repeat the test of complex_count:
+			 * It can't change anymore until we drop sem->lock.
+			 * Thus: if is now 0, then it will stay 0.
+			 */
+			if (sma->complex_count == 0) {
+				/* fast path successful! */
+				return sops->sem_num;
+			}
 		}
-
-		locknum = sops->sem_num;
-	} else {
-		int i;
-		/*
-		 * Lock the semaphore array, and wait for all of the
-		 * individual semaphore locks to go away.  The code
-		 * above ensures no new single-lock holders will enter
-		 * their critical section while the array lock is held.
-		 */
- lock_array:
-		spin_lock(&sma->sem_perm.lock);
-		for (i = 0; i < sma->sem_nsems; i++) {
-			struct sem *sem = sma->sem_base + i;
-			spin_unlock_wait(&sem->lock);
-		}
-		locknum = -1;
+		spin_unlock(&sem->lock);
 	}
-	return locknum;
+
+	/* slow path: acquire the full lock */
+	ipc_lock_object(&sma->sem_perm);
+
+	if (sma->complex_count == 0) {
+		/* False alarm:
+		 * There is no complex operation, thus we can switch
+		 * back to the fast path.
+		 */
+		spin_lock(&sem->lock);
+		ipc_unlock_object(&sma->sem_perm);
+		return sops->sem_num;
+	} else {
+		/* Not a false alarm, thus complete the sequence for a
+		 * full lock.
+		 */
+		sem_wait_array(sma);
+		return -1;
+	}
 }
 
 static inline void sem_unlock(struct sem_array *sma, int locknum)
 {
 	if (locknum == -1) {
-		spin_unlock(&sma->sem_perm.lock);
+		unmerge_queues(sma);
+		ipc_unlock_object(&sma->sem_perm);
 	} else {
 		struct sem *sem = sma->sem_base + locknum;
 		spin_unlock(&sem->lock);
@@ -276,7 +301,7 @@
 }
 
 /*
- * sem_lock_(check_) routines are called in the paths where the rw_mutex
+ * sem_lock_(check_) routines are called in the paths where the rwsem
  * is not held.
  *
  * The caller holds the RCU read lock.
@@ -341,7 +366,7 @@
  * Without the check/retry algorithm a lockless wakeup is possible:
  * - queue.status is initialized to -EINTR before blocking.
  * - wakeup is performed by
- *	* unlinking the queue entry from sma->sem_pending
+ *	* unlinking the queue entry from the pending list
  *	* setting queue.status to IN_WAKEUP
  *	  This is the notification for the blocked thread that a
  *	  result value is imminent.
@@ -375,7 +400,7 @@
  * @ns: namespace
  * @params: ptr to the structure that contains key, semflg and nsems
  *
- * Called with sem_ids.rw_mutex held (as a writer)
+ * Called with sem_ids.rwsem held (as a writer)
  */
 
 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
@@ -414,12 +439,14 @@
 	sma->sem_base = (struct sem *) &sma[1];
 
 	for (i = 0; i < nsems; i++) {
-		INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+		INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
+		INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
 		spin_lock_init(&sma->sem_base[i].lock);
 	}
 
 	sma->complex_count = 0;
-	INIT_LIST_HEAD(&sma->sem_pending);
+	INIT_LIST_HEAD(&sma->pending_alter);
+	INIT_LIST_HEAD(&sma->pending_const);
 	INIT_LIST_HEAD(&sma->list_id);
 	sma->sem_nsems = nsems;
 	sma->sem_ctime = get_seconds();
@@ -439,7 +466,7 @@
 
 
 /*
- * Called with sem_ids.rw_mutex and ipcp locked.
+ * Called with sem_ids.rwsem and ipcp locked.
  */
 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
 {
@@ -450,7 +477,7 @@
 }
 
 /*
- * Called with sem_ids.rw_mutex and ipcp locked.
+ * Called with sem_ids.rwsem and ipcp locked.
  */
 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
 				struct ipc_params *params)
@@ -486,12 +513,19 @@
 	return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
 }
 
-/*
- * Determine whether a sequence of semaphore operations would succeed
- * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
+/** perform_atomic_semop - Perform (if possible) a semaphore operation
+ * @sma: semaphore array
+ * @sops: array with operations that should be checked
+ * @nsems: number of sops
+ * @un: undo array
+ * @pid: pid that did the change
+ *
+ * Returns 0 if the operation was possible.
+ * Returns 1 if the operation is impossible, the caller must sleep.
+ * Negative values are error codes.
  */
 
-static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
+static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
 			     int nsops, struct sem_undo *un, int pid)
 {
 	int result, sem_op;
@@ -613,60 +647,132 @@
  * update_queue is O(N^2) when it restarts scanning the whole queue of
  * waiting operations. Therefore this function checks if the restart is
  * really necessary. It is called after a previously waiting operation
- * was completed.
+ * modified the array.
+ * Note that wait-for-zero operations are handled without restart.
  */
 static int check_restart(struct sem_array *sma, struct sem_queue *q)
 {
-	struct sem *curr;
-	struct sem_queue *h;
-
-	/* if the operation didn't modify the array, then no restart */
-	if (q->alter == 0)
-		return 0;
-
-	/* pending complex operations are too difficult to analyse */
-	if (sma->complex_count)
+	/* pending complex alter operations are too difficult to analyse */
+	if (!list_empty(&sma->pending_alter))
 		return 1;
 
 	/* we were a sleeping complex operation. Too difficult */
 	if (q->nsops > 1)
 		return 1;
 
-	curr = sma->sem_base + q->sops[0].sem_num;
+	/* It is impossible that someone waits for the new value:
+	 * - complex operations always restart.
+	 * - wait-for-zero are handled seperately.
+	 * - q is a previously sleeping simple operation that
+	 *   altered the array. It must be a decrement, because
+	 *   simple increments never sleep.
+	 * - If there are older (higher priority) decrements
+	 *   in the queue, then they have observed the original
+	 *   semval value and couldn't proceed. The operation
+	 *   decremented to value - thus they won't proceed either.
+	 */
+	return 0;
+}
 
-	/* No-one waits on this queue */
-	if (list_empty(&curr->sem_pending))
-		return 0;
+/**
+ * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks
+ * @sma: semaphore array.
+ * @semnum: semaphore that was modified.
+ * @pt: list head for the tasks that must be woken up.
+ *
+ * wake_const_ops must be called after a semaphore in a semaphore array
+ * was set to 0. If complex const operations are pending, wake_const_ops must
+ * be called with semnum = -1, as well as with the number of each modified
+ * semaphore.
+ * The tasks that must be woken up are added to @pt. The return code
+ * is stored in q->pid.
+ * The function returns 1 if at least one operation was completed successfully.
+ */
+static int wake_const_ops(struct sem_array *sma, int semnum,
+				struct list_head *pt)
+{
+	struct sem_queue *q;
+	struct list_head *walk;
+	struct list_head *pending_list;
+	int semop_completed = 0;
 
-	/* the new semaphore value */
-	if (curr->semval) {
-		/* It is impossible that someone waits for the new value:
-		 * - q is a previously sleeping simple operation that
-		 *   altered the array. It must be a decrement, because
-		 *   simple increments never sleep.
-		 * - The value is not 0, thus wait-for-zero won't proceed.
-		 * - If there are older (higher priority) decrements
-		 *   in the queue, then they have observed the original
-		 *   semval value and couldn't proceed. The operation
-		 *   decremented to value - thus they won't proceed either.
+	if (semnum == -1)
+		pending_list = &sma->pending_const;
+	else
+		pending_list = &sma->sem_base[semnum].pending_const;
+
+	walk = pending_list->next;
+	while (walk != pending_list) {
+		int error;
+
+		q = container_of(walk, struct sem_queue, list);
+		walk = walk->next;
+
+		error = perform_atomic_semop(sma, q->sops, q->nsops,
+						 q->undo, q->pid);
+
+		if (error <= 0) {
+			/* operation completed, remove from queue & wakeup */
+
+			unlink_queue(sma, q);
+
+			wake_up_sem_queue_prepare(pt, q, error);
+			if (error == 0)
+				semop_completed = 1;
+		}
+	}
+	return semop_completed;
+}
+
+/**
+ * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks
+ * @sma: semaphore array
+ * @sops: operations that were performed
+ * @nsops: number of operations
+ * @pt: list head of the tasks that must be woken up.
+ *
+ * do_smart_wakeup_zero() checks all required queue for wait-for-zero
+ * operations, based on the actual changes that were performed on the
+ * semaphore array.
+ * The function returns 1 if at least one operation was completed successfully.
+ */
+static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
+					int nsops, struct list_head *pt)
+{
+	int i;
+	int semop_completed = 0;
+	int got_zero = 0;
+
+	/* first: the per-semaphore queues, if known */
+	if (sops) {
+		for (i = 0; i < nsops; i++) {
+			int num = sops[i].sem_num;
+
+			if (sma->sem_base[num].semval == 0) {
+				got_zero = 1;
+				semop_completed |= wake_const_ops(sma, num, pt);
+			}
+		}
+	} else {
+		/*
+		 * No sops means modified semaphores not known.
+		 * Assume all were changed.
 		 */
-		BUG_ON(q->sops[0].sem_op >= 0);
-		return 0;
+		for (i = 0; i < sma->sem_nsems; i++) {
+			if (sma->sem_base[i].semval == 0) {
+				got_zero = 1;
+				semop_completed |= wake_const_ops(sma, i, pt);
+			}
+		}
 	}
 	/*
-	 * semval is 0. Check if there are wait-for-zero semops.
-	 * They must be the first entries in the per-semaphore queue
+	 * If one of the modified semaphores got 0,
+	 * then check the global queue, too.
 	 */
-	h = list_first_entry(&curr->sem_pending, struct sem_queue, list);
-	BUG_ON(h->nsops != 1);
-	BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
+	if (got_zero)
+		semop_completed |= wake_const_ops(sma, -1, pt);
 
-	/* Yes, there is a wait-for-zero semop. Restart */
-	if (h->sops[0].sem_op == 0)
-		return 1;
-
-	/* Again - no-one is waiting for the new value. */
-	return 0;
+	return semop_completed;
 }
 
 
@@ -682,6 +788,8 @@
  * semaphore.
  * The tasks that must be woken up are added to @pt. The return code
  * is stored in q->pid.
+ * The function internally checks if const operations can now succeed.
+ *
  * The function return 1 if at least one semop was completed successfully.
  */
 static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
@@ -692,9 +800,9 @@
 	int semop_completed = 0;
 
 	if (semnum == -1)
-		pending_list = &sma->sem_pending;
+		pending_list = &sma->pending_alter;
 	else
-		pending_list = &sma->sem_base[semnum].sem_pending;
+		pending_list = &sma->sem_base[semnum].pending_alter;
 
 again:
 	walk = pending_list->next;
@@ -706,16 +814,15 @@
 
 		/* If we are scanning the single sop, per-semaphore list of
 		 * one semaphore and that semaphore is 0, then it is not
-		 * necessary to scan the "alter" entries: simple increments
+		 * necessary to scan further: simple increments
 		 * that affect only one entry succeed immediately and cannot
 		 * be in the  per semaphore pending queue, and decrements
 		 * cannot be successful if the value is already 0.
 		 */
-		if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
-				q->alter)
+		if (semnum != -1 && sma->sem_base[semnum].semval == 0)
 			break;
 
-		error = try_atomic_semop(sma, q->sops, q->nsops,
+		error = perform_atomic_semop(sma, q->sops, q->nsops,
 					 q->undo, q->pid);
 
 		/* Does q->sleeper still need to sleep? */
@@ -728,6 +835,7 @@
 			restart = 0;
 		} else {
 			semop_completed = 1;
+			do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
 			restart = check_restart(sma, q);
 		}
 
@@ -739,6 +847,24 @@
 }
 
 /**
+ * set_semotime(sma, sops) - set sem_otime
+ * @sma: semaphore array
+ * @sops: operations that modified the array, may be NULL
+ *
+ * sem_otime is replicated to avoid cache line trashing.
+ * This function sets one instance to the current time.
+ */
+static void set_semotime(struct sem_array *sma, struct sembuf *sops)
+{
+	if (sops == NULL) {
+		sma->sem_base[0].sem_otime = get_seconds();
+	} else {
+		sma->sem_base[sops[0].sem_num].sem_otime =
+							get_seconds();
+	}
+}
+
+/**
  * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
  * @sma: semaphore array
  * @sops: operations that were performed
@@ -746,8 +872,8 @@
  * @otime: force setting otime
  * @pt: list head of the tasks that must be woken up.
  *
- * do_smart_update() does the required called to update_queue, based on the
- * actual changes that were performed on the semaphore array.
+ * do_smart_update() does the required calls to update_queue and wakeup_zero,
+ * based on the actual changes that were performed on the semaphore array.
  * Note that the function does not do the actual wake-up: the caller is
  * responsible for calling wake_up_sem_queue_do(@pt).
  * It is safe to perform this call after dropping all locks.
@@ -756,52 +882,42 @@
 			int otime, struct list_head *pt)
 {
 	int i;
-	int progress;
 
-	progress = 1;
-retry_global:
-	if (sma->complex_count) {
-		if (update_queue(sma, -1, pt)) {
-			progress = 1;
-			otime = 1;
-			sops = NULL;
-		}
-	}
-	if (!progress)
-		goto done;
+	otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
 
-	if (!sops) {
-		/* No semops; something special is going on. */
-		for (i = 0; i < sma->sem_nsems; i++) {
-			if (update_queue(sma, i, pt)) {
-				otime = 1;
-				progress = 1;
+	if (!list_empty(&sma->pending_alter)) {
+		/* semaphore array uses the global queue - just process it. */
+		otime |= update_queue(sma, -1, pt);
+	} else {
+		if (!sops) {
+			/*
+			 * No sops, thus the modified semaphores are not
+			 * known. Check all.
+			 */
+			for (i = 0; i < sma->sem_nsems; i++)
+				otime |= update_queue(sma, i, pt);
+		} else {
+			/*
+			 * Check the semaphores that were increased:
+			 * - No complex ops, thus all sleeping ops are
+			 *   decrease.
+			 * - if we decreased the value, then any sleeping
+			 *   semaphore ops wont be able to run: If the
+			 *   previous value was too small, then the new
+			 *   value will be too small, too.
+			 */
+			for (i = 0; i < nsops; i++) {
+				if (sops[i].sem_op > 0) {
+					otime |= update_queue(sma,
+							sops[i].sem_num, pt);
+				}
 			}
 		}
-		goto done_checkretry;
 	}
-
-	/* Check the semaphores that were modified. */
-	for (i = 0; i < nsops; i++) {
-		if (sops[i].sem_op > 0 ||
-			(sops[i].sem_op < 0 &&
-				sma->sem_base[sops[i].sem_num].semval == 0))
-			if (update_queue(sma, sops[i].sem_num, pt)) {
-				otime = 1;
-				progress = 1;
-			}
-	}
-done_checkretry:
-	if (progress) {
-		progress = 0;
-		goto retry_global;
-	}
-done:
 	if (otime)
-		sma->sem_otime = get_seconds();
+		set_semotime(sma, sops);
 }
 
-
 /* The following counts are associated to each semaphore:
  *   semncnt        number of tasks waiting on semval being nonzero
  *   semzcnt        number of tasks waiting on semval being zero
@@ -817,14 +933,14 @@
 	struct sem_queue * q;
 
 	semncnt = 0;
-	list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
+	list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
 		struct sembuf * sops = q->sops;
 		BUG_ON(sops->sem_num != semnum);
 		if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
 			semncnt++;
 	}
 
-	list_for_each_entry(q, &sma->sem_pending, list) {
+	list_for_each_entry(q, &sma->pending_alter, list) {
 		struct sembuf * sops = q->sops;
 		int nsops = q->nsops;
 		int i;
@@ -843,14 +959,14 @@
 	struct sem_queue * q;
 
 	semzcnt = 0;
-	list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
+	list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
 		struct sembuf * sops = q->sops;
 		BUG_ON(sops->sem_num != semnum);
 		if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
 			semzcnt++;
 	}
 
-	list_for_each_entry(q, &sma->sem_pending, list) {
+	list_for_each_entry(q, &sma->pending_const, list) {
 		struct sembuf * sops = q->sops;
 		int nsops = q->nsops;
 		int i;
@@ -863,8 +979,8 @@
 	return semzcnt;
 }
 
-/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
- * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
+/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
+ * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
  * remains locked on exit.
  */
 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
@@ -876,7 +992,7 @@
 	int i;
 
 	/* Free the existing undo structures for this semaphore set.  */
-	assert_spin_locked(&sma->sem_perm.lock);
+	ipc_assert_locked_object(&sma->sem_perm);
 	list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
 		list_del(&un->list_id);
 		spin_lock(&un->ulp->lock);
@@ -888,13 +1004,22 @@
 
 	/* Wake up all pending processes and let them fail with EIDRM. */
 	INIT_LIST_HEAD(&tasks);
-	list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
+	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
+		unlink_queue(sma, q);
+		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
+	}
+
+	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 		unlink_queue(sma, q);
 		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
 	}
 	for (i = 0; i < sma->sem_nsems; i++) {
 		struct sem *sem = sma->sem_base + i;
-		list_for_each_entry_safe(q, tq, &sem->sem_pending, list) {
+		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
+			unlink_queue(sma, q);
+			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
+		}
+		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
 			unlink_queue(sma, q);
 			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
 		}
@@ -934,6 +1059,21 @@
 	}
 }
 
+static time_t get_semotime(struct sem_array *sma)
+{
+	int i;
+	time_t res;
+
+	res = sma->sem_base[0].sem_otime;
+	for (i = 1; i < sma->sem_nsems; i++) {
+		time_t to = sma->sem_base[i].sem_otime;
+
+		if (to > res)
+			res = to;
+	}
+	return res;
+}
+
 static int semctl_nolock(struct ipc_namespace *ns, int semid,
 			 int cmd, int version, void __user *p)
 {
@@ -960,7 +1100,7 @@
 		seminfo.semmnu = SEMMNU;
 		seminfo.semmap = SEMMAP;
 		seminfo.semume = SEMUME;
-		down_read(&sem_ids(ns).rw_mutex);
+		down_read(&sem_ids(ns).rwsem);
 		if (cmd == SEM_INFO) {
 			seminfo.semusz = sem_ids(ns).in_use;
 			seminfo.semaem = ns->used_sems;
@@ -969,7 +1109,7 @@
 			seminfo.semaem = SEMAEM;
 		}
 		max_id = ipc_get_maxid(&sem_ids(ns));
-		up_read(&sem_ids(ns).rw_mutex);
+		up_read(&sem_ids(ns).rwsem);
 		if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) 
 			return -EFAULT;
 		return (max_id < 0) ? 0: max_id;
@@ -1007,9 +1147,9 @@
 			goto out_unlock;
 
 		kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
-		tbuf.sem_otime  = sma->sem_otime;
-		tbuf.sem_ctime  = sma->sem_ctime;
-		tbuf.sem_nsems  = sma->sem_nsems;
+		tbuf.sem_otime = get_semotime(sma);
+		tbuf.sem_ctime = sma->sem_ctime;
+		tbuf.sem_nsems = sma->sem_nsems;
 		rcu_read_unlock();
 		if (copy_semid_to_user(p, &tbuf, version))
 			return -EFAULT;
@@ -1073,7 +1213,7 @@
 
 	curr = &sma->sem_base[semnum];
 
-	assert_spin_locked(&sma->sem_perm.lock);
+	ipc_assert_locked_object(&sma->sem_perm);
 	list_for_each_entry(un, &sma->list_id, list_id)
 		un->semadj[semnum] = 0;
 
@@ -1202,7 +1342,7 @@
 		for (i = 0; i < nsems; i++)
 			sma->sem_base[i].semval = sem_io[i];
 
-		assert_spin_locked(&sma->sem_perm.lock);
+		ipc_assert_locked_object(&sma->sem_perm);
 		list_for_each_entry(un, &sma->list_id, list_id) {
 			for (i = 0; i < nsems; i++)
 				un->semadj[i] = 0;
@@ -1275,9 +1415,9 @@
 }
 
 /*
- * This function handles some semctl commands which require the rw_mutex
+ * This function handles some semctl commands which require the rwsem
  * to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int semctl_down(struct ipc_namespace *ns, int semid,
 		       int cmd, int version, void __user *p)
@@ -1292,42 +1432,46 @@
 			return -EFAULT;
 	}
 
+	down_write(&sem_ids(ns).rwsem);
+	rcu_read_lock();
+
 	ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
 				      &semid64.sem_perm, 0);
-	if (IS_ERR(ipcp))
-		return PTR_ERR(ipcp);
+	if (IS_ERR(ipcp)) {
+		err = PTR_ERR(ipcp);
+		goto out_unlock1;
+	}
 
 	sma = container_of(ipcp, struct sem_array, sem_perm);
 
 	err = security_sem_semctl(sma, cmd);
-	if (err) {
-		rcu_read_unlock();
-		goto out_up;
-	}
+	if (err)
+		goto out_unlock1;
 
-	switch(cmd){
+	switch (cmd) {
 	case IPC_RMID:
 		sem_lock(sma, NULL, -1);
+		/* freeary unlocks the ipc object and rcu */
 		freeary(ns, ipcp);
 		goto out_up;
 	case IPC_SET:
 		sem_lock(sma, NULL, -1);
 		err = ipc_update_perm(&semid64.sem_perm, ipcp);
 		if (err)
-			goto out_unlock;
+			goto out_unlock0;
 		sma->sem_ctime = get_seconds();
 		break;
 	default:
-		rcu_read_unlock();
 		err = -EINVAL;
-		goto out_up;
+		goto out_unlock1;
 	}
 
-out_unlock:
+out_unlock0:
 	sem_unlock(sma, -1);
+out_unlock1:
 	rcu_read_unlock();
 out_up:
-	up_write(&sem_ids(ns).rw_mutex);
+	up_write(&sem_ids(ns).rwsem);
 	return err;
 }
 
@@ -1499,7 +1643,7 @@
 	new->semid = semid;
 	assert_spin_locked(&ulp->lock);
 	list_add_rcu(&new->list_proc, &ulp->list_proc);
-	assert_spin_locked(&sma->sem_perm.lock);
+	ipc_assert_locked_object(&sma->sem_perm);
 	list_add(&new->list_id, &sma->list_id);
 	un = new;
 
@@ -1536,7 +1680,6 @@
 	return error;
 }
 
-
 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 		unsigned, nsops, const struct timespec __user *, timeout)
 {
@@ -1634,13 +1777,19 @@
 	if (un && un->semid == -1)
 		goto out_unlock_free;
 
-	error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
-	if (error <= 0) {
-		if (alter && error == 0)
+	error = perform_atomic_semop(sma, sops, nsops, un,
+					task_tgid_vnr(current));
+	if (error == 0) {
+		/* If the operation was successful, then do
+		 * the required updates.
+		 */
+		if (alter)
 			do_smart_update(sma, sops, nsops, 1, &tasks);
-
-		goto out_unlock_free;
+		else
+			set_semotime(sma, sops);
 	}
+	if (error <= 0)
+		goto out_unlock_free;
 
 	/* We need to sleep on this operation, so we put the current
 	 * task into the pending queue and go to sleep.
@@ -1656,15 +1805,27 @@
 		struct sem *curr;
 		curr = &sma->sem_base[sops->sem_num];
 
-		if (alter)
-			list_add_tail(&queue.list, &curr->sem_pending);
-		else
-			list_add(&queue.list, &curr->sem_pending);
+		if (alter) {
+			if (sma->complex_count) {
+				list_add_tail(&queue.list,
+						&sma->pending_alter);
+			} else {
+
+				list_add_tail(&queue.list,
+						&curr->pending_alter);
+			}
+		} else {
+			list_add_tail(&queue.list, &curr->pending_const);
+		}
 	} else {
+		if (!sma->complex_count)
+			merge_queues(sma);
+
 		if (alter)
-			list_add_tail(&queue.list, &sma->sem_pending);
+			list_add_tail(&queue.list, &sma->pending_alter);
 		else
-			list_add(&queue.list, &sma->sem_pending);
+			list_add_tail(&queue.list, &sma->pending_const);
+
 		sma->complex_count++;
 	}
 
@@ -1836,7 +1997,7 @@
 		}
 
 		/* remove un from the linked lists */
-		assert_spin_locked(&sma->sem_perm.lock);
+		ipc_assert_locked_object(&sma->sem_perm);
 		list_del(&un->list_id);
 
 		spin_lock(&ulp->lock);
@@ -1885,6 +2046,17 @@
 {
 	struct user_namespace *user_ns = seq_user_ns(s);
 	struct sem_array *sma = it;
+	time_t sem_otime;
+
+	/*
+	 * The proc interface isn't aware of sem_lock(), it calls
+	 * ipc_lock_object() directly (in sysvipc_find_ipc).
+	 * In order to stay compatible with sem_lock(), we must wait until
+	 * all simple semop() calls have left their critical regions.
+	 */
+	sem_wait_array(sma);
+
+	sem_otime = get_semotime(sma);
 
 	return seq_printf(s,
 			  "%10d %10d  %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
@@ -1896,7 +2068,7 @@
 			  from_kgid_munged(user_ns, sma->sem_perm.gid),
 			  from_kuid_munged(user_ns, sma->sem_perm.cuid),
 			  from_kgid_munged(user_ns, sma->sem_perm.cgid),
-			  sma->sem_otime,
+			  sem_otime,
 			  sma->sem_ctime);
 }
 #endif
diff --git a/ipc/shm.c b/ipc/shm.c
index 943e717..c4ba1f3 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -19,6 +19,9 @@
  * namespaces support
  * OpenVZ, SWsoft Inc.
  * Pavel Emelianov <xemul@openvz.org>
+ *
+ * Better ipc lock (kern_ipc_perm.lock) handling
+ * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  */
 
 #include <linux/slab.h>
@@ -80,8 +83,8 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
- * Only shm_ids.rw_mutex remains locked on exit.
+ * Called with shm_ids.rwsem (writer) and the shp structure locked.
+ * Only shm_ids.rwsem remains locked on exit.
  */
 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
@@ -124,8 +127,28 @@
 				IPC_SHM_IDS, sysvipc_shm_proc_show);
 }
 
+static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
+{
+	struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
+
+	if (IS_ERR(ipcp))
+		return ERR_CAST(ipcp);
+
+	return container_of(ipcp, struct shmid_kernel, shm_perm);
+}
+
+static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
+{
+	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
+
+	if (IS_ERR(ipcp))
+		return ERR_CAST(ipcp);
+
+	return container_of(ipcp, struct shmid_kernel, shm_perm);
+}
+
 /*
- * shm_lock_(check_) routines are called in the paths where the rw_mutex
+ * shm_lock_(check_) routines are called in the paths where the rwsem
  * is not necessarily held.
  */
 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
@@ -141,18 +164,16 @@
 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
 {
 	rcu_read_lock();
-	spin_lock(&ipcp->shm_perm.lock);
+	ipc_lock_object(&ipcp->shm_perm);
 }
 
-static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
-						int id)
+static void shm_rcu_free(struct rcu_head *head)
 {
-	struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
+	struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+	struct shmid_kernel *shp = ipc_rcu_to_struct(p);
 
-	if (IS_ERR(ipcp))
-		return (struct shmid_kernel *)ipcp;
-
-	return container_of(ipcp, struct shmid_kernel, shm_perm);
+	security_shm_free(shp);
+	ipc_rcu_free(head);
 }
 
 static void shm_rcu_free(struct rcu_head *head)
@@ -191,7 +212,7 @@
  * @ns: namespace
  * @shp: struct to free
  *
- * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
+ * It has to be called with shp and shm_ids.rwsem (writer) locked,
  * but returns with shp unlocked and freed.
  */
 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
@@ -238,7 +259,7 @@
 	struct shmid_kernel *shp;
 	struct ipc_namespace *ns = sfd->ns;
 
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	/* remove from the list of attaches of the shm segment */
 	shp = shm_lock(ns, sfd->id);
 	BUG_ON(IS_ERR(shp));
@@ -249,10 +270,10 @@
 		shm_destroy(ns, shp);
 	else
 		shm_unlock(shp);
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 }
 
-/* Called with ns->shm_ids(ns).rw_mutex locked */
+/* Called with ns->shm_ids(ns).rwsem locked */
 static int shm_try_destroy_current(int id, void *p, void *data)
 {
 	struct ipc_namespace *ns = data;
@@ -283,7 +304,7 @@
 	return 0;
 }
 
-/* Called with ns->shm_ids(ns).rw_mutex locked */
+/* Called with ns->shm_ids(ns).rwsem locked */
 static int shm_try_destroy_orphaned(int id, void *p, void *data)
 {
 	struct ipc_namespace *ns = data;
@@ -294,7 +315,7 @@
 	 * We want to destroy segments without users and with already
 	 * exit'ed originating process.
 	 *
-	 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
+	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
 	 */
 	if (shp->shm_creator != NULL)
 		return 0;
@@ -308,10 +329,10 @@
 
 void shm_destroy_orphaned(struct ipc_namespace *ns)
 {
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	if (shm_ids(ns).in_use)
 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 }
 
 
@@ -323,10 +344,10 @@
 		return;
 
 	/* Destroy all already created segments, but not mapped yet */
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	if (shm_ids(ns).in_use)
 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 }
 
 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -460,7 +481,7 @@
  * @ns: namespace
  * @params: ptr to the structure that contains key, size and shmflg
  *
- * Called with shm_ids.rw_mutex held as a writer.
+ * Called with shm_ids.rwsem held as a writer.
  */
 
 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
@@ -552,7 +573,9 @@
 
 	ns->shm_tot += numpages;
 	error = shp->shm_perm.id;
-	shm_unlock(shp);
+
+	ipc_unlock_object(&shp->shm_perm);
+	rcu_read_unlock();
 	return error;
 
 no_id:
@@ -565,7 +588,7 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex and ipcp locked.
+ * Called with shm_ids.rwsem and ipcp locked.
  */
 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
 {
@@ -576,7 +599,7 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex and ipcp locked.
+ * Called with shm_ids.rwsem and ipcp locked.
  */
 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
 				struct ipc_params *params)
@@ -689,7 +712,7 @@
 
 /*
  * Calculate and add used RSS and swap pages of a shm.
- * Called with shm_ids.rw_mutex held as a reader
+ * Called with shm_ids.rwsem held as a reader
  */
 static void shm_add_rss_swap(struct shmid_kernel *shp,
 	unsigned long *rss_add, unsigned long *swp_add)
@@ -716,7 +739,7 @@
 }
 
 /*
- * Called with shm_ids.rw_mutex held as a reader
+ * Called with shm_ids.rwsem held as a reader
  */
 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
 		unsigned long *swp)
@@ -745,9 +768,9 @@
 }
 
 /*
- * This function handles some shmctl commands which require the rw_mutex
+ * This function handles some shmctl commands which require the rwsem
  * to be held in write mode.
- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ * NOTE: no locks must be held, the rwsem is taken inside this function.
  */
 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
 		       struct shmid_ds __user *buf, int version)
@@ -762,58 +785,66 @@
 			return -EFAULT;
 	}
 
-	ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
-			       &shmid64.shm_perm, 0);
-	if (IS_ERR(ipcp))
-		return PTR_ERR(ipcp);
+	down_write(&shm_ids(ns).rwsem);
+	rcu_read_lock();
+
+	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
+				      &shmid64.shm_perm, 0);
+	if (IS_ERR(ipcp)) {
+		err = PTR_ERR(ipcp);
+		goto out_unlock1;
+	}
 
 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
 	err = security_shm_shmctl(shp, cmd);
 	if (err)
-		goto out_unlock;
+		goto out_unlock1;
+
 	switch (cmd) {
 	case IPC_RMID:
+		ipc_lock_object(&shp->shm_perm);
+		/* do_shm_rmid unlocks the ipc object and rcu */
 		do_shm_rmid(ns, ipcp);
 		goto out_up;
 	case IPC_SET:
+		ipc_lock_object(&shp->shm_perm);
 		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
 		if (err)
-			goto out_unlock;
+			goto out_unlock0;
 		shp->shm_ctim = get_seconds();
 		break;
 	default:
 		err = -EINVAL;
+		goto out_unlock1;
 	}
-out_unlock:
-	shm_unlock(shp);
+
+out_unlock0:
+	ipc_unlock_object(&shp->shm_perm);
+out_unlock1:
+	rcu_read_unlock();
 out_up:
-	up_write(&shm_ids(ns).rw_mutex);
+	up_write(&shm_ids(ns).rwsem);
 	return err;
 }
 
-SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
+			 int cmd, int version, void __user *buf)
 {
+	int err;
 	struct shmid_kernel *shp;
-	int err, version;
-	struct ipc_namespace *ns;
 
-	if (cmd < 0 || shmid < 0) {
-		err = -EINVAL;
-		goto out;
-	}
-
-	version = ipc_parse_version(&cmd);
-	ns = current->nsproxy->ipc_ns;
-
-	switch (cmd) { /* replace with proc interface ? */
-	case IPC_INFO:
-	{
-		struct shminfo64 shminfo;
-
+	/* preliminary security checks for *_INFO */
+	if (cmd == IPC_INFO || cmd == SHM_INFO) {
 		err = security_shm_shmctl(NULL, cmd);
 		if (err)
 			return err;
+	}
+
+	switch (cmd) {
+	case IPC_INFO:
+	{
+		struct shminfo64 shminfo;
 
 		memset(&shminfo, 0, sizeof(shminfo));
 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
@@ -824,9 +855,9 @@
 		if(copy_shminfo_to_user (buf, &shminfo, version))
 			return -EFAULT;
 
-		down_read(&shm_ids(ns).rw_mutex);
+		down_read(&shm_ids(ns).rwsem);
 		err = ipc_get_maxid(&shm_ids(ns));
-		up_read(&shm_ids(ns).rw_mutex);
+		up_read(&shm_ids(ns).rwsem);
 
 		if(err<0)
 			err = 0;
@@ -836,19 +867,15 @@
 	{
 		struct shm_info shm_info;
 
-		err = security_shm_shmctl(NULL, cmd);
-		if (err)
-			return err;
-
 		memset(&shm_info, 0, sizeof(shm_info));
-		down_read(&shm_ids(ns).rw_mutex);
+		down_read(&shm_ids(ns).rwsem);
 		shm_info.used_ids = shm_ids(ns).in_use;
 		shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
 		shm_info.shm_tot = ns->shm_tot;
 		shm_info.swap_attempts = 0;
 		shm_info.swap_successes = 0;
 		err = ipc_get_maxid(&shm_ids(ns));
-		up_read(&shm_ids(ns).rw_mutex);
+		up_read(&shm_ids(ns).rwsem);
 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
 			err = -EFAULT;
 			goto out;
@@ -863,27 +890,31 @@
 		struct shmid64_ds tbuf;
 		int result;
 
+		rcu_read_lock();
 		if (cmd == SHM_STAT) {
-			shp = shm_lock(ns, shmid);
+			shp = shm_obtain_object(ns, shmid);
 			if (IS_ERR(shp)) {
 				err = PTR_ERR(shp);
-				goto out;
+				goto out_unlock;
 			}
 			result = shp->shm_perm.id;
 		} else {
-			shp = shm_lock_check(ns, shmid);
+			shp = shm_obtain_object_check(ns, shmid);
 			if (IS_ERR(shp)) {
 				err = PTR_ERR(shp);
-				goto out;
+				goto out_unlock;
 			}
 			result = 0;
 		}
+
 		err = -EACCES;
 		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
 			goto out_unlock;
+
 		err = security_shm_shmctl(shp, cmd);
 		if (err)
 			goto out_unlock;
+
 		memset(&tbuf, 0, sizeof(tbuf));
 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
 		tbuf.shm_segsz	= shp->shm_segsz;
@@ -893,43 +924,76 @@
 		tbuf.shm_cpid	= shp->shm_cprid;
 		tbuf.shm_lpid	= shp->shm_lprid;
 		tbuf.shm_nattch	= shp->shm_nattch;
-		shm_unlock(shp);
-		if(copy_shmid_to_user (buf, &tbuf, version))
+		rcu_read_unlock();
+
+		if (copy_shmid_to_user(buf, &tbuf, version))
 			err = -EFAULT;
 		else
 			err = result;
 		goto out;
 	}
+	default:
+		return -EINVAL;
+	}
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	return err;
+}
+
+SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+{
+	struct shmid_kernel *shp;
+	int err, version;
+	struct ipc_namespace *ns;
+
+	if (cmd < 0 || shmid < 0)
+		return -EINVAL;
+
+	version = ipc_parse_version(&cmd);
+	ns = current->nsproxy->ipc_ns;
+
+	switch (cmd) {
+	case IPC_INFO:
+	case SHM_INFO:
+	case SHM_STAT:
+	case IPC_STAT:
+		return shmctl_nolock(ns, shmid, cmd, version, buf);
+	case IPC_RMID:
+	case IPC_SET:
+		return shmctl_down(ns, shmid, cmd, buf, version);
 	case SHM_LOCK:
 	case SHM_UNLOCK:
 	{
 		struct file *shm_file;
 
-		shp = shm_lock_check(ns, shmid);
+		rcu_read_lock();
+		shp = shm_obtain_object_check(ns, shmid);
 		if (IS_ERR(shp)) {
 			err = PTR_ERR(shp);
-			goto out;
+			goto out_unlock1;
 		}
 
 		audit_ipc_obj(&(shp->shm_perm));
+		err = security_shm_shmctl(shp, cmd);
+		if (err)
+			goto out_unlock1;
 
+		ipc_lock_object(&shp->shm_perm);
 		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 			kuid_t euid = current_euid();
 			err = -EPERM;
 			if (!uid_eq(euid, shp->shm_perm.uid) &&
 			    !uid_eq(euid, shp->shm_perm.cuid))
-				goto out_unlock;
+				goto out_unlock0;
 			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
-				goto out_unlock;
+				goto out_unlock0;
 		}
 
-		err = security_shm_shmctl(shp, cmd);
-		if (err)
-			goto out_unlock;
-
 		shm_file = shp->shm_file;
 		if (is_file_hugepages(shm_file))
-			goto out_unlock;
+			goto out_unlock0;
 
 		if (cmd == SHM_LOCK) {
 			struct user_struct *user = current_user();
@@ -938,32 +1002,31 @@
 				shp->shm_perm.mode |= SHM_LOCKED;
 				shp->mlock_user = user;
 			}
-			goto out_unlock;
+			goto out_unlock0;
 		}
 
 		/* SHM_UNLOCK */
 		if (!(shp->shm_perm.mode & SHM_LOCKED))
-			goto out_unlock;
+			goto out_unlock0;
 		shmem_lock(shm_file, 0, shp->mlock_user);
 		shp->shm_perm.mode &= ~SHM_LOCKED;
 		shp->mlock_user = NULL;
 		get_file(shm_file);
-		shm_unlock(shp);
+		ipc_unlock_object(&shp->shm_perm);
+		rcu_read_unlock();
 		shmem_unlock_mapping(shm_file->f_mapping);
+
 		fput(shm_file);
-		goto out;
-	}
-	case IPC_RMID:
-	case IPC_SET:
-		err = shmctl_down(ns, shmid, cmd, buf, version);
 		return err;
+	}
 	default:
 		return -EINVAL;
 	}
 
-out_unlock:
-	shm_unlock(shp);
-out:
+out_unlock0:
+	ipc_unlock_object(&shp->shm_perm);
+out_unlock1:
+	rcu_read_unlock();
 	return err;
 }
 
@@ -1031,10 +1094,11 @@
 	 * additional creator id...
 	 */
 	ns = current->nsproxy->ipc_ns;
-	shp = shm_lock_check(ns, shmid);
+	rcu_read_lock();
+	shp = shm_obtain_object_check(ns, shmid);
 	if (IS_ERR(shp)) {
 		err = PTR_ERR(shp);
-		goto out;
+		goto out_unlock;
 	}
 
 	err = -EACCES;
@@ -1045,24 +1109,31 @@
 	if (err)
 		goto out_unlock;
 
+	ipc_lock_object(&shp->shm_perm);
 	path = shp->shm_file->f_path;
 	path_get(&path);
 	shp->shm_nattch++;
 	size = i_size_read(path.dentry->d_inode);
-	shm_unlock(shp);
+	ipc_unlock_object(&shp->shm_perm);
+	rcu_read_unlock();
 
 	err = -ENOMEM;
 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
-	if (!sfd)
-		goto out_put_dentry;
+	if (!sfd) {
+		path_put(&path);
+		goto out_nattch;
+	}
 
 	file = alloc_file(&path, f_mode,
 			  is_file_hugepages(shp->shm_file) ?
 				&shm_file_operations_huge :
 				&shm_file_operations);
 	err = PTR_ERR(file);
-	if (IS_ERR(file))
-		goto out_free;
+	if (IS_ERR(file)) {
+		kfree(sfd);
+		path_put(&path);
+		goto out_nattch;
+	}
 
 	file->private_data = sfd;
 	file->f_mapping = shp->shm_file->f_mapping;
@@ -1088,7 +1159,7 @@
 		    addr > current->mm->start_stack - size - PAGE_SIZE * 5)
 			goto invalid;
 	}
-		
+
 	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
 	*raddr = addr;
 	err = 0;
@@ -1103,7 +1174,7 @@
 	fput(file);
 
 out_nattch:
-	down_write(&shm_ids(ns).rw_mutex);
+	down_write(&shm_ids(ns).rwsem);
 	shp = shm_lock(ns, shmid);
 	BUG_ON(IS_ERR(shp));
 	shp->shm_nattch--;
@@ -1111,20 +1182,13 @@
 		shm_destroy(ns, shp);
 	else
 		shm_unlock(shp);
-	up_write(&shm_ids(ns).rw_mutex);
-
-out:
+	up_write(&shm_ids(ns).rwsem);
 	return err;
 
 out_unlock:
-	shm_unlock(shp);
-	goto out;
-
-out_free:
-	kfree(sfd);
-out_put_dentry:
-	path_put(&path);
-	goto out_nattch;
+	rcu_read_unlock();
+out:
+	return err;
 }
 
 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
@@ -1229,8 +1293,7 @@
 #else /* CONFIG_MMU */
 	/* under NOMMU conditions, the exact address to be destroyed must be
 	 * given */
-	retval = -EINVAL;
-	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
+	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
 		retval = 0;
 	}
diff --git a/ipc/util.c b/ipc/util.c
index 5d2948d..18ccaa4 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -15,6 +15,14 @@
  * Jun 2006 - namespaces ssupport
  *            OpenVZ, SWsoft Inc.
  *            Pavel Emelianov <xemul@openvz.org>
+ *
+ * General sysv ipc locking scheme:
+ *  when doing ipc id lookups, take the ids->rwsem
+ *      rcu_read_lock()
+ *          obtain the ipc object (kern_ipc_perm)
+ *          perform security, capabilities, auditing and permission checks, etc.
+ *          acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
+ *             perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
  */
 
 #include <linux/mm.h>
@@ -119,7 +127,7 @@
  
 void ipc_init_ids(struct ipc_ids *ids)
 {
-	init_rwsem(&ids->rw_mutex);
+	init_rwsem(&ids->rwsem);
 
 	ids->in_use = 0;
 	ids->seq = 0;
@@ -174,7 +182,7 @@
  *	@ids: Identifier set
  *	@key: The key to find
  *	
- *	Requires ipc_ids.rw_mutex locked.
+ *	Requires ipc_ids.rwsem locked.
  *	Returns the LOCKED pointer to the ipc structure if found or NULL
  *	if not.
  *	If key is found ipc points to the owning ipc structure
@@ -197,7 +205,8 @@
 			continue;
 		}
 
-		ipc_lock_by_ptr(ipc);
+		rcu_read_lock();
+		ipc_lock_object(ipc);
 		return ipc;
 	}
 
@@ -208,7 +217,7 @@
  *	ipc_get_maxid 	-	get the last assigned id
  *	@ids: IPC identifier set
  *
- *	Called with ipc_ids.rw_mutex held.
+ *	Called with ipc_ids.rwsem held.
  */
 
 int ipc_get_maxid(struct ipc_ids *ids)
@@ -246,9 +255,8 @@
  *	is returned. The 'new' entry is returned in a locked state on success.
  *	On failure the entry is not locked and a negative err-code is returned.
  *
- *	Called with ipc_ids.rw_mutex held as a writer.
+ *	Called with writer ipc_ids.rwsem held.
  */
- 
 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
 {
 	kuid_t euid;
@@ -313,9 +321,9 @@
 {
 	int err;
 
-	down_write(&ids->rw_mutex);
+	down_write(&ids->rwsem);
 	err = ops->getnew(ns, params);
-	up_write(&ids->rw_mutex);
+	up_write(&ids->rwsem);
 	return err;
 }
 
@@ -332,7 +340,7 @@
  *
  *	On success, the IPC id is returned.
  *
- *	It is called with ipc_ids.rw_mutex and ipcp->lock held.
+ *	It is called with ipc_ids.rwsem and ipcp->lock held.
  */
 static int ipc_check_perms(struct ipc_namespace *ns,
 			   struct kern_ipc_perm *ipcp,
@@ -377,7 +385,7 @@
 	 * Take the lock as a writer since we are potentially going to add
 	 * a new entry + read locks are not "upgradable"
 	 */
-	down_write(&ids->rw_mutex);
+	down_write(&ids->rwsem);
 	ipcp = ipc_findkey(ids, params->key);
 	if (ipcp == NULL) {
 		/* key not used */
@@ -403,7 +411,7 @@
 		}
 		ipc_unlock(ipcp);
 	}
-	up_write(&ids->rw_mutex);
+	up_write(&ids->rwsem);
 
 	return err;
 }
@@ -414,7 +422,7 @@
  *	@ids: IPC identifier set
  *	@ipcp: ipc perm structure containing the identifier to remove
  *
- *	ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
+ *	ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
  *	before this function is called, and remain locked on the exit.
  */
  
@@ -614,7 +622,7 @@
 }
 
 /**
- * ipc_lock - Lock an ipc structure without rw_mutex held
+ * ipc_lock - Lock an ipc structure without rwsem held
  * @ids: IPC identifier set
  * @id: ipc id to look for
  *
@@ -670,22 +678,6 @@
 	return out;
 }
 
-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
-{
-	struct kern_ipc_perm *out;
-
-	out = ipc_lock(ids, id);
-	if (IS_ERR(out))
-		return out;
-
-	if (ipc_checkid(out, id)) {
-		ipc_unlock(out);
-		return ERR_PTR(-EIDRM);
-	}
-
-	return out;
-}
-
 /**
  * ipcget - Common sys_*get() code
  * @ns : namsepace
@@ -726,7 +718,7 @@
 }
 
 /**
- * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
+ * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
  * @ns:  the ipc namespace
  * @ids:  the table of ids where to look for the ipc
  * @id:   the id of the ipc to retrieve
@@ -739,39 +731,22 @@
  * It must be called without any lock held and
  *  - retrieves the ipc with the given id in the given table.
  *  - performs some audit and permission check, depending on the given cmd
- *  - returns the ipc with both ipc and rw_mutex locks held in case of success
- *    or an err-code without any lock held otherwise.
+ *  - returns a pointer to the ipc object or otherwise, the corresponding error.
+ *
+ * Call holding the both the rwsem and the rcu read lock.
  */
-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
-				      struct ipc_ids *ids, int id, int cmd,
-				      struct ipc64_perm *perm, int extra_perm)
-{
-	struct kern_ipc_perm *ipcp;
-
-	ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
-	if (IS_ERR(ipcp))
-		goto out;
-
-	spin_lock(&ipcp->lock);
-out:
-	return ipcp;
-}
-
 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
-					     struct ipc_ids *ids, int id, int cmd,
-					     struct ipc64_perm *perm, int extra_perm)
+					struct ipc_ids *ids, int id, int cmd,
+					struct ipc64_perm *perm, int extra_perm)
 {
 	kuid_t euid;
 	int err = -EPERM;
 	struct kern_ipc_perm *ipcp;
 
-	down_write(&ids->rw_mutex);
-	rcu_read_lock();
-
 	ipcp = ipc_obtain_object_check(ids, id);
 	if (IS_ERR(ipcp)) {
 		err = PTR_ERR(ipcp);
-		goto out_up;
+		goto err;
 	}
 
 	audit_ipc_obj(ipcp);
@@ -782,16 +757,8 @@
 	euid = current_euid();
 	if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid)  ||
 	    ns_capable(ns->user_ns, CAP_SYS_ADMIN))
-		return ipcp;
-
-out_up:
-	/*
-	 * Unsuccessful lookup, unlock and return
-	 * the corresponding error.
-	 */
-	rcu_read_unlock();
-	up_write(&ids->rw_mutex);
-
+		return ipcp; /* successful lookup */
+err:
 	return ERR_PTR(err);
 }
 
@@ -848,7 +815,8 @@
 		ipc = idr_find(&ids->ipcs_idr, pos);
 		if (ipc != NULL) {
 			*new_pos = pos + 1;
-			ipc_lock_by_ptr(ipc);
+			rcu_read_lock();
+			ipc_lock_object(ipc);
 			return ipc;
 		}
 	}
@@ -886,7 +854,7 @@
 	 * Take the lock - this will be released by the corresponding
 	 * call to stop().
 	 */
-	down_read(&ids->rw_mutex);
+	down_read(&ids->rwsem);
 
 	/* pos < 0 is invalid */
 	if (*pos < 0)
@@ -913,7 +881,7 @@
 
 	ids = &iter->ns->ids[iface->ids];
 	/* Release the lock we took in start() */
-	up_read(&ids->rw_mutex);
+	up_read(&ids->rwsem);
 }
 
 static int sysvipc_proc_show(struct seq_file *s, void *it)
diff --git a/ipc/util.h b/ipc/util.h
index 9f51af5..f2f5036 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -101,10 +101,10 @@
 #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
 #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
 
-/* must be called with ids->rw_mutex acquired for writing */
+/* must be called with ids->rwsem acquired for writing */
 int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
 
-/* must be called with ids->rw_mutex acquired for reading */
+/* must be called with ids->rwsem acquired for reading */
 int ipc_get_maxid(struct ipc_ids *);
 
 /* must be called with both locks acquired. */
@@ -139,9 +139,6 @@
 struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
 					     struct ipc_ids *ids, int id, int cmd,
 					     struct ipc64_perm *perm, int extra_perm);
-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
-				      struct ipc_ids *ids, int id, int cmd,
-				      struct ipc64_perm *perm, int extra_perm);
 
 #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
   /* On IA-64, we always use the "64-bit version" of the IPC structures.  */ 
@@ -167,24 +164,27 @@
 	return uid / SEQ_MULTIPLIER != ipcp->seq;
 }
 
-static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
-{
-	rcu_read_lock();
-	spin_lock(&perm->lock);
-}
-
-static inline void ipc_unlock(struct kern_ipc_perm *perm)
-{
-	spin_unlock(&perm->lock);
-	rcu_read_unlock();
-}
-
 static inline void ipc_lock_object(struct kern_ipc_perm *perm)
 {
 	spin_lock(&perm->lock);
 }
 
-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
+static inline void ipc_unlock_object(struct kern_ipc_perm *perm)
+{
+	spin_unlock(&perm->lock);
+}
+
+static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
+{
+	assert_spin_locked(&perm->lock);
+}
+
+static inline void ipc_unlock(struct kern_ipc_perm *perm)
+{
+	ipc_unlock_object(perm);
+	rcu_read_unlock();
+}
+
 struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
 int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
 			struct ipc_ops *ops, struct ipc_params *params);
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d0..b0122cd 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -64,6 +64,9 @@
 #include <linux/freezer.h>
 #include <linux/tty.h>
 #include <linux/pid_namespace.h>
+#ifdef CONFIG_TRACEPOINT_TO_EVENT
+#include <trace/events/tp2e.h>
+#endif
 
 #include "audit.h"
 
@@ -437,6 +440,20 @@
 		consume_skb(skb);
 }
 
+#ifdef CONFIG_TRACEPOINT_TO_EVENT
+static void audit_tp2e_skb(struct sk_buff *skb)
+{
+	struct nlmsghdr *nlh = nlmsg_hdr(skb);
+	char *data = nlmsg_data(nlh);
+
+	/* for the moment, only SE violation needs to be reported */
+	if ((nlh->nlmsg_type == AUDIT_AVC) || (nlh->nlmsg_type == AUDIT_SELINUX_ERR)) {
+		trace_tp2e_generic_event(TP2E_EV_INFO, "SELinux", "Violation",
+		"", "", "", "", "", data, "");
+	}
+}
+#endif
+
 static int kauditd_thread(void *dummy)
 {
 	set_freezable();
@@ -1117,9 +1134,10 @@
 
 			sleep_time = timeout_start + audit_backlog_wait_time -
 					jiffies;
-			if ((long)sleep_time > 0)
+			if ((long)sleep_time > 0) {
 				wait_for_auditd(sleep_time);
-			continue;
+				continue;
+			}
 		}
 		if (audit_rate_check() && printk_ratelimit())
 			printk(KERN_WARNING
@@ -1668,7 +1686,10 @@
 	} else {
 		struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
 		nlh->nlmsg_len = ab->skb->len - NLMSG_HDRLEN;
-
+#ifdef CONFIG_TRACEPOINT_TO_EVENT
+		/* function added to create a crashtool event on condition */
+		audit_tp2e_skb(ab->skb);
+#endif
 		if (audit_pid) {
 			skb_queue_tail(&audit_skb_queue, ab->skb);
 			wake_up_interruptible(&kauditd_wait);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8dc7ec1..66a5e86 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1993,9 +1993,16 @@
 	do {
 		struct task_and_cgroup ent;
 
+		/*
+		 * @Leader exiting: stop right now to avoid infinite loop
+		 * as it will be removed from the list
+		 */
+		if (leader->flags & PF_EXITING)
+			break;
+
 		/* @tsk either already exited or can't exit until the end */
 		if (tsk->flags & PF_EXITING)
-			continue;
+			goto next;
 
 		/* as per above, nr_threads may decrease, but not increase. */
 		BUG_ON(i >= group_size);
@@ -2003,7 +2010,7 @@
 		ent.cgrp = task_cgroup_from_root(tsk, root);
 		/* nothing to do if this task is already in the cgroup */
 		if (ent.cgrp == cgrp)
-			continue;
+			goto next;
 		/*
 		 * saying GFP_ATOMIC has no effect here because we did prealloc
 		 * earlier, but it's good form to communicate our expectations.
@@ -2011,7 +2018,7 @@
 		retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
 		BUG_ON(retval != 0);
 		i++;
-
+	next:
 		if (!threadgroup)
 			break;
 	} while_each_thread(leader, tsk);
@@ -2815,13 +2822,17 @@
 {
 	LIST_HEAD(pending);
 	struct cgroup *cgrp, *n;
+	struct super_block *sb = ss->root->sb;
 
 	/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
-	if (cfts && ss->root != &rootnode) {
+	if (cfts && ss->root != &rootnode &&
+	    atomic_inc_not_zero(&sb->s_active)) {
 		list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
 			dget(cgrp->dentry);
 			list_add_tail(&cgrp->cft_q_node, &pending);
 		}
+	} else {
+		sb = NULL;
 	}
 
 	mutex_unlock(&cgroup_mutex);
@@ -2844,6 +2855,9 @@
 		dput(cgrp->dentry);
 	}
 
+	if (sb)
+		deactivate_super(sb);
+
 	mutex_unlock(&cgroup_cft_mutex);
 }
 
@@ -3773,6 +3787,23 @@
 }
 
 /*
+ * When dput() is called asynchronously, if umount has been done and
+ * then deactivate_super() in cgroup_free_fn() kills the superblock,
+ * there's a small window that vfs will see the root dentry with non-zero
+ * refcnt and trigger BUG().
+ *
+ * That's why we hold a reference before dput() and drop it right after.
+ */
+static void cgroup_dput(struct cgroup *cgrp)
+{
+	struct super_block *sb = cgrp->root->sb;
+
+	atomic_inc(&sb->s_active);
+	dput(cgrp->dentry);
+	deactivate_super(sb);
+}
+
+/*
  * Unregister event and free resources.
  *
  * Gets called from workqueue.
@@ -3792,7 +3823,7 @@
 
 	eventfd_ctx_put(event->eventfd);
 	kfree(event);
-	dput(cgrp->dentry);
+	cgroup_dput(cgrp);
 }
 
 /*
@@ -4077,12 +4108,8 @@
 {
 	struct cgroup_subsys_state *css =
 		container_of(work, struct cgroup_subsys_state, dput_work);
-	struct dentry *dentry = css->cgroup->dentry;
-	struct super_block *sb = dentry->d_sb;
 
-	atomic_inc(&sb->s_active);
-	dput(dentry);
-	deactivate_super(sb);
+	cgroup_dput(css->cgroup);
 }
 
 static void init_cgroup_css(struct cgroup_subsys_state *css,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 64b3f79..6948e94 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1502,11 +1502,13 @@
 {
 	struct cpuset *cs = cgroup_cs(cgrp);
 	cpuset_filetype_t type = cft->private;
-	int retval = -ENODEV;
+	int retval = 0;
 
 	mutex_lock(&cpuset_mutex);
-	if (!is_cpuset_online(cs))
+	if (!is_cpuset_online(cs)) {
+		retval = -ENODEV;
 		goto out_unlock;
+	}
 
 	switch (type) {
 	case FILE_CPU_EXCLUSIVE:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b391907..d91833a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -761,8 +761,18 @@
 {
 	struct perf_event_context *ctx;
 
-	rcu_read_lock();
 retry:
+	/*
+	 * One of the few rules of preemptible RCU is that one cannot do
+	 * rcu_read_unlock() while holding a scheduler (or nested) lock when
+	 * part of the read side critical section was preemptible -- see
+	 * rcu_read_unlock_special().
+	 *
+	 * Since ctx->lock nests under rq->lock we must ensure the entire read
+	 * side critical section is non-preemptible.
+	 */
+	preempt_disable();
+	rcu_read_lock();
 	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
 	if (ctx) {
 		/*
@@ -778,6 +788,8 @@
 		raw_spin_lock_irqsave(&ctx->lock, *flags);
 		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
 			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+			rcu_read_unlock();
+			preempt_enable();
 			goto retry;
 		}
 
@@ -787,6 +799,7 @@
 		}
 	}
 	rcu_read_unlock();
+	preempt_enable();
 	return ctx;
 }
 
@@ -1761,7 +1774,16 @@
 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 	int err;
 
-	if (WARN_ON_ONCE(!ctx->is_active))
+	/*
+	 * There's a time window between 'ctx->is_active' check
+	 * in perf_event_enable function and this place having:
+	 *   - IRQs on
+	 *   - ctx->lock unlocked
+	 *
+	 * where the task could be killed and 'ctx' deactivated
+	 * by perf_event_exit_task.
+	 */
+	if (!ctx->is_active)
 		return -EINVAL;
 
 	raw_spin_lock(&ctx->lock);
@@ -5281,7 +5303,8 @@
 
 static void perf_swevent_del(struct perf_event *event, int flags)
 {
-	hlist_del_rcu(&event->hlist_entry);
+	if (!hlist_unhashed(&event->hlist_entry))
+		hlist_del_rcu(&event->hlist_entry);
 }
 
 static void perf_swevent_start(struct perf_event *event, int flags)
@@ -6554,6 +6577,9 @@
 	if (err)
 		return err;
 
+	if (attr.__reserved_1)
+		return -EINVAL;
+
 	if (!attr.exclude_kernel) {
 		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
 			return -EACCES;
@@ -7228,7 +7254,7 @@
 		 * child.
 		 */
 
-		child_ctx = alloc_perf_context(event->pmu, child);
+		child_ctx = alloc_perf_context(parent_ctx->pmu, child);
 		if (!child_ctx)
 			return -ENOMEM;
 
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index cd55144..9c2ddfb 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -87,10 +87,31 @@
 		goto out;
 
 	/*
-	 * Publish the known good head. Rely on the full barrier implied
-	 * by atomic_dec_and_test() order the rb->head read and this
-	 * write.
+	 * Since the mmap() consumer (userspace) can run on a different CPU:
+	 *
+	 *   kernel				user
+	 *
+	 *   READ ->data_tail			READ ->data_head
+	 *   smp_mb()	(A)			smp_rmb()	(C)
+	 *   WRITE $data			READ $data
+	 *   smp_wmb()	(B)			smp_mb()	(D)
+	 *   STORE ->data_head			WRITE ->data_tail
+	 *
+	 * Where A pairs with D, and B pairs with C.
+	 *
+	 * I don't think A needs to be a full barrier because we won't in fact
+	 * write data until we see the store from userspace. So we simply don't
+	 * issue the data WRITE until we observe it. Be conservative for now.
+	 *
+	 * OTOH, D needs to be a full barrier since it separates the data READ
+	 * from the tail WRITE.
+	 *
+	 * For B a WMB is sufficient since it separates two WRITEs, and for C
+	 * an RMB is sufficient since it separates two READs.
+	 *
+	 * See perf_output_begin().
 	 */
+	smp_wmb();
 	rb->user_page->data_head = head;
 
 	/*
@@ -154,9 +175,11 @@
 		 * Userspace could choose to issue a mb() before updating the
 		 * tail pointer. So that all reads will be completed before the
 		 * write is issued.
+		 *
+		 * See perf_output_put_handle().
 		 */
 		tail = ACCESS_ONCE(rb->user_page->data_tail);
-		smp_rmb();
+		smp_mb();
 		offset = head = local_read(&rb->head);
 		head += size;
 		if (unlikely(!perf_output_space(rb, tail, offset, head)))
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f356974..ad8e1bd 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1682,12 +1682,10 @@
 		tmp = ri;
 		ri = ri->next;
 		kfree(tmp);
+		utask->depth--;
 
 		if (!chained)
 			break;
-
-		utask->depth--;
-
 		BUG_ON(!ri);
 	}
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 52bde0c..da15b0c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1235,10 +1235,11 @@
 		return ERR_PTR(-EINVAL);
 
 	/*
-	 * If the new process will be in a different pid namespace
-	 * don't allow the creation of threads.
+	 * If the new process will be in a different pid namespace don't
+	 * allow it to share a thread group or signal handlers with the
+	 * forking task.
 	 */
-	if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
+	if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
 	    (task_active_pid_ns(current) != current->nsproxy->pid_ns))
 		return ERR_PTR(-EINVAL);
 
@@ -1748,6 +1749,12 @@
 		 int __user *, parent_tidptr,
 		 int __user *, child_tidptr,
 		 int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+		int, stack_size,
+		int __user *, parent_tidptr,
+		int __user *, child_tidptr,
+		int, tls_val)
 #else
 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 		 int __user *, parent_tidptr,
diff --git a/kernel/futex.c b/kernel/futex.c
index 590483b..99de9c4 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -62,6 +62,7 @@
 #include <linux/ptrace.h>
 #include <linux/sched/rt.h>
 #include <linux/freezer.h>
+#include <linux/hugetlb.h>
 
 #include <asm/futex.h>
 
@@ -366,7 +367,7 @@
 	} else {
 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
 		key->shared.inode = page_head->mapping->host;
-		key->shared.pgoff = page_head->index;
+		key->shared.pgoff = basepage_index(page);
 	}
 
 	get_futex_key_refs(key);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 3ee4d06..d2553d8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -246,6 +246,11 @@
 			goto again;
 		}
 		timer->base = new_base;
+	} else {
+		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+			cpu = this_cpu;
+			goto again;
+		}
 	}
 	return new_base;
 }
@@ -722,17 +727,20 @@
 	return 1;
 }
 
+static void clock_was_set_work(struct work_struct *work)
+{
+	clock_was_set();
+}
+
+static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+
 /*
- * Called from timekeeping code to reprogramm the hrtimer interrupt
- * device. If called from the timer interrupt context we defer it to
- * softirq context.
+ * Called from timekeeping and resume code to reprogramm the hrtimer
+ * interrupt device on all cpus.
  */
 void clock_was_set_delayed(void)
 {
-	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
-	cpu_base->clock_was_set = 1;
-	__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+	schedule_work(&hrtimer_work);
 }
 
 #else
@@ -781,8 +789,10 @@
 	WARN_ONCE(!irqs_disabled(),
 		  KERN_INFO "hrtimers_resume() called with IRQs enabled!");
 
+	/* Retrigger on the local CPU */
 	retrigger_next_event(NULL);
-	timerfd_clock_was_set();
+	/* And schedule a retrigger for all others */
+	clock_was_set_delayed();
 }
 
 static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
@@ -1433,13 +1443,6 @@
 
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
-	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
-	if (cpu_base->clock_was_set) {
-		cpu_base->clock_was_set = 0;
-		clock_was_set();
-	}
-
 	hrtimer_peek_ahead_timers();
 }
 
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index cbd97ce..63af23a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -672,6 +672,7 @@
 		irq_settings_set_noprobe(desc);
 		irq_settings_set_norequest(desc);
 		irq_settings_set_nothread(desc);
+		irq_settings_set_chained(desc);
 		irq_startup(desc, true);
 	}
 out:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fa17855..acd0bd7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -207,6 +207,7 @@
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL(irq_set_affinity);
 
 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 {
@@ -555,9 +556,9 @@
 		return 0;
 
 	if (irq_settings_can_request(desc)) {
-		if (desc->action)
-			if (irqflags & desc->action->flags & IRQF_SHARED)
-				canrequest =1;
+		if (!desc->action ||
+		    irqflags & desc->action->flags & IRQF_SHARED)
+			canrequest = 1;
 	}
 	irq_put_desc_unlock(desc, flags);
 	return canrequest;
@@ -802,8 +803,7 @@
 
 static void wake_threads_waitq(struct irq_desc *desc)
 {
-	if (atomic_dec_and_test(&desc->threads_active) &&
-	    waitqueue_active(&desc->wait_for_threads))
+	if (atomic_dec_and_test(&desc->threads_active))
 		wake_up(&desc->wait_for_threads);
 }
 
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 4ea5ced..6378790 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -53,6 +53,15 @@
 		if (is_early != want_early)
 			continue;
 
+#ifdef CONFIG_PM_DEBUG
+		if (desc->istate & IRQS_PENDING) {
+			printk(KERN_DEBUG "Wakeup from IRQ %d %s\n",
+				irq,
+				desc->action && desc->action->name ?
+				desc->action->name : "");
+		}
+#endif /* CONFIG_PM_DEBUG */
+
 		raw_spin_lock_irqsave(&desc->lock, flags);
 		__enable_irq(desc, irq, true);
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 1162f10..4ea2f96 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -15,6 +15,7 @@
 	_IRQ_NESTED_THREAD	= IRQ_NESTED_THREAD,
 	_IRQ_PER_CPU_DEVID	= IRQ_PER_CPU_DEVID,
 	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
+	_IRQ_CHAINED		= IRQ_CHAINED,
 };
 
 #define IRQ_PER_CPU		GOT_YOU_MORON
@@ -28,6 +29,7 @@
 #define IRQ_PER_CPU_DEVID	GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK	GOT_YOU_MORON
+#define IRQ_CHAINED		GOT_YOU_MORON
 
 static inline void
 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
@@ -147,3 +149,8 @@
 {
 	return desc->status_use_accessors & _IRQ_NESTED_THREAD;
 }
+
+static inline bool irq_settings_set_chained(struct irq_desc *desc)
+{
+	return desc->status_use_accessors |= _IRQ_CHAINED;
+}
diff --git a/kernel/module.c b/kernel/module.c
index cab4bce..fa53db8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2927,7 +2927,6 @@
 {
 	/* Module within temporary copy. */
 	struct module *mod;
-	Elf_Shdr *pcpusec;
 	int err;
 
 	mod = setup_load_info(info, flags);
@@ -2942,17 +2941,10 @@
 	err = module_frob_arch_sections(info->hdr, info->sechdrs,
 					info->secstrings, mod);
 	if (err < 0)
-		goto out;
+		return ERR_PTR(err);
 
-	pcpusec = &info->sechdrs[info->index.pcpu];
-	if (pcpusec->sh_size) {
-		/* We have a special allocation for this section. */
-		err = percpu_modalloc(mod,
-				      pcpusec->sh_size, pcpusec->sh_addralign);
-		if (err)
-			goto out;
-		pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
-	}
+	/* We will do a special allocation for per-cpu sections later. */
+	info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
 
 	/* Determine total sizes, and put offsets in sh_entsize.  For now
 	   this is done generically; there doesn't appear to be any
@@ -2963,17 +2955,22 @@
 	/* Allocate and move to the final place */
 	err = move_module(mod, info);
 	if (err)
-		goto free_percpu;
+		return ERR_PTR(err);
 
 	/* Module has been copied to its final place now: return it. */
 	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
 	kmemleak_load_module(mod, info);
 	return mod;
+}
 
-free_percpu:
-	percpu_modfree(mod);
-out:
-	return ERR_PTR(err);
+static int alloc_module_percpu(struct module *mod, struct load_info *info)
+{
+	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
+	if (!pcpusec->sh_size)
+		return 0;
+
+	/* We have a special allocation for this section. */
+	return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
 }
 
 /* mod is no longer valid after this! */
@@ -3237,6 +3234,11 @@
 	}
 #endif
 
+	/* To avoid stressing percpu allocator, do this once we're unique. */
+	err = alloc_module_percpu(mod, info);
+	if (err)
+		goto unlink_mod;
+
 	/* Now module is in final location, initialize linked lists, etc. */
 	err = module_unload_init(mod);
 	if (err)
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a66..56071e9 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -18,6 +18,7 @@
  * Also see Documentation/mutex-design.txt.
  */
 #include <linux/mutex.h>
+#include <linux/ww_mutex.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
 #include <linux/export.h>
diff --git a/kernel/pid.c b/kernel/pid.c
index 0db3e79..0eb6d8e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -264,6 +264,7 @@
 		struct pid_namespace *ns = upid->ns;
 		hlist_del_rcu(&upid->pid_chain);
 		switch(--ns->nr_hashed) {
+		case 2:
 		case 1:
 			/* When all that is left in the pid namespace
 			 * is the reaper wake up the reaper.  The reaper
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index f8cc6c4..a2a84ef 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -22,10 +22,48 @@
 	bool
 	default y
 
+config HAS_EARLYSUSPEND
+	bool
+
 config WAKELOCK
 	bool
 	default y
 
+config EARLYSUSPEND
+	bool "Early suspend"
+	depends on WAKELOCK && SUSPEND
+	default y
+	select HAS_EARLYSUSPEND
+	---help---
+	  Call early suspend handlers when the user requested sleep state
+	  changes.
+
+choice
+	prompt "User-space screen access"
+	default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE
+	default CONSOLE_EARLYSUSPEND
+	depends on HAS_EARLYSUSPEND
+
+	config NO_USER_SPACE_SCREEN_ACCESS_CONTROL
+		bool "None"
+
+	config CONSOLE_EARLYSUSPEND
+		bool "Console switch on early-suspend"
+		depends on HAS_EARLYSUSPEND && VT
+		---help---
+		  Register early suspend handler to perform a console switch to
+		  when user-space should stop drawing to the screen and a switch
+		  back when it should resume.
+
+	config FB_EARLYSUSPEND
+		bool "Sysfs interface"
+		depends on HAS_EARLYSUSPEND
+		---help---
+		  Register early suspend handler that notifies and waits for
+		  user-space through sysfs when user-space should stop drawing
+		  to the screen and notifies user-space when it should resume.
+endchoice
+
 config HIBERNATE_CALLBACKS
 	bool
 
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 74c713b..a311b7e 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -12,6 +12,9 @@
 obj-$(CONFIG_PM_AUTOSLEEP)	+= autosleep.o
 obj-$(CONFIG_PM_WAKELOCKS)	+= wakelock.o
 obj-$(CONFIG_SUSPEND_TIME)	+= suspend_time.o
+obj-$(CONFIG_EARLYSUSPEND)	+= earlysuspend.o
+obj-$(CONFIG_CONSOLE_EARLYSUSPEND)	+= consoleearlysuspend.o
+obj-$(CONFIG_FB_EARLYSUSPEND)	+= fbearlysuspend.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
 
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
index c6422ff..9012ecf 100644
--- a/kernel/power/autosleep.c
+++ b/kernel/power/autosleep.c
@@ -32,7 +32,8 @@
 
 	mutex_lock(&autosleep_lock);
 
-	if (!pm_save_wakeup_count(initial_count)) {
+	if (!pm_save_wakeup_count(initial_count) ||
+		system_state != SYSTEM_RUNNING) {
 		mutex_unlock(&autosleep_lock);
 		goto out;
 	}
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 463aa673..eacb8bd 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -81,6 +81,7 @@
 	list_for_each_entry(tmp, &pm_vt_switch_list, head) {
 		if (tmp->dev == dev) {
 			list_del(&tmp->head);
+			kfree(tmp);
 			break;
 		}
 	}
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
new file mode 100644
index 0000000..66298a1
--- /dev/null
+++ b/kernel/power/earlysuspend.c
@@ -0,0 +1,266 @@
+/* kernel/power/earlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/writeback.h>
+#include <linux/pm_wakeup.h>
+#include <linux/workqueue.h>
+
+#include "power.h"
+
+enum {
+	DEBUG_USER_STATE = 1U << 0,
+	DEBUG_SUSPEND = 1U << 2,
+};
+static int debug_mask = DEBUG_USER_STATE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(early_suspend_lock);
+static DEFINE_MUTEX(suspend_lock);
+static LIST_HEAD(early_suspend_handlers);
+static void early_suspend(struct work_struct *work);
+static void late_resume(struct work_struct *work);
+static void try_to_suspend(struct work_struct *work);
+static struct workqueue_struct *early_suspend_wq;
+static struct workqueue_struct *suspend_wq;
+static DECLARE_WORK(early_suspend_work, early_suspend);
+static DECLARE_WORK(late_resume_work, late_resume);
+static DECLARE_WORK(suspend_work, try_to_suspend);
+static DEFINE_SPINLOCK(state_lock);
+enum {
+	SUSPEND_REQUESTED = 0x1,
+	SUSPENDED = 0x2,
+	SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED,
+};
+static int state;
+static int suspend_state;
+static struct wakeup_source *early_suspend_ws;
+
+void queue_up_early_suspend_work(struct work_struct *work)
+{
+	queue_work(early_suspend_wq, work);
+}
+
+static void try_to_suspend(struct work_struct *work)
+{
+	unsigned int initial_count, final_count;
+
+	if (!pm_get_wakeup_count(&initial_count, true))
+		goto queue_again;
+
+	mutex_lock(&suspend_lock);
+
+	if (!pm_save_wakeup_count(initial_count) ||
+		system_state != SYSTEM_RUNNING) {
+		mutex_unlock(&suspend_lock);
+		goto queue_again;
+	}
+
+	if (suspend_state == PM_SUSPEND_ON) {
+		mutex_unlock(&suspend_lock);
+		return;
+	}
+
+	if (suspend_state >= PM_SUSPEND_MAX)
+		hibernate();
+	else
+		pm_suspend(suspend_state);
+
+	mutex_unlock(&suspend_lock);
+
+	if (!pm_get_wakeup_count(&final_count, false))
+		goto queue_again;
+
+	/*
+	 * If the wakeup occured for an unknown reason, wait to prevent the
+	 * system from trying to suspend and waking up in a tight loop.
+	 */
+	if (final_count == initial_count)
+		schedule_timeout_uninterruptible(HZ / 2);
+
+queue_again:
+	queue_work(suspend_wq, &suspend_work);
+}
+
+void register_early_suspend(struct early_suspend *handler)
+{
+	struct list_head *pos;
+
+	mutex_lock(&early_suspend_lock);
+	list_for_each(pos, &early_suspend_handlers) {
+		struct early_suspend *e;
+		e = list_entry(pos, struct early_suspend, link);
+		if (e->level > handler->level)
+			break;
+	}
+	list_add_tail(&handler->link, pos);
+	if ((state & SUSPENDED) && handler->suspend)
+		handler->suspend(handler);
+	mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(register_early_suspend);
+
+void unregister_early_suspend(struct early_suspend *handler)
+{
+	mutex_lock(&early_suspend_lock);
+	list_del(&handler->link);
+	mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(unregister_early_suspend);
+
+static void early_suspend(struct work_struct *work)
+{
+	struct early_suspend *pos;
+	unsigned long irqflags;
+	int abort = 0;
+
+	mutex_lock(&early_suspend_lock);
+	spin_lock_irqsave(&state_lock, irqflags);
+	if (state == SUSPEND_REQUESTED)
+		state |= SUSPENDED;
+	else
+		abort = 1;
+	spin_unlock_irqrestore(&state_lock, irqflags);
+
+	if (abort) {
+		if (debug_mask & DEBUG_SUSPEND)
+			pr_info("early_suspend: abort, state %d\n", state);
+		mutex_unlock(&early_suspend_lock);
+		goto abort;
+	}
+
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("early_suspend: call handlers\n");
+	list_for_each_entry(pos, &early_suspend_handlers, link) {
+		if (pos->suspend != NULL)
+			pos->suspend(pos);
+	}
+	mutex_unlock(&early_suspend_lock);
+
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("early_suspend: after call handlers\n");
+	/* just wake up flusher to start write back and don't wait it finished*/
+	wakeup_flusher_threads(0, WB_REASON_SYNC);
+abort:
+	spin_lock_irqsave(&state_lock, irqflags);
+	if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
+		__pm_relax(early_suspend_ws);
+	spin_unlock_irqrestore(&state_lock, irqflags);
+	queue_work(suspend_wq, &suspend_work);
+}
+
+static void late_resume(struct work_struct *work)
+{
+	struct early_suspend *pos;
+	unsigned long irqflags;
+	int abort = 0;
+
+	mutex_lock(&early_suspend_lock);
+	spin_lock_irqsave(&state_lock, irqflags);
+	if (state == SUSPENDED)
+		state &= ~SUSPENDED;
+	else
+		abort = 1;
+	spin_unlock_irqrestore(&state_lock, irqflags);
+
+	if (abort) {
+		if (debug_mask & DEBUG_SUSPEND)
+			pr_info("late_resume: abort, state %d\n", state);
+		goto abort;
+	}
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("late_resume: call handlers\n");
+	list_for_each_entry_reverse(pos, &early_suspend_handlers, link)
+		if (pos->resume != NULL)
+			pos->resume(pos);
+	if (debug_mask & DEBUG_SUSPEND)
+		pr_info("late_resume: done\n");
+abort:
+	mutex_unlock(&early_suspend_lock);
+}
+
+void request_suspend_state(suspend_state_t new_state)
+{
+	unsigned long irqflags;
+	int old_sleep;
+	suspend_state_t prev_state;
+
+	mutex_lock(&suspend_lock);
+	prev_state = suspend_state;
+	spin_lock_irqsave(&state_lock, irqflags);
+	old_sleep = state & SUSPEND_REQUESTED;
+	if (debug_mask & DEBUG_USER_STATE) {
+		struct timespec ts;
+		struct rtc_time tm;
+		getnstimeofday(&ts);
+		rtc_time_to_tm(ts.tv_sec, &tm);
+		pr_info("request_suspend_state: %s (%d->%d) at %lld "
+			"(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
+			new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
+			prev_state, new_state,
+			ktime_to_ns(ktime_get()),
+			tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+			tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+	}
+	if (!old_sleep && new_state != PM_SUSPEND_ON) {
+		state |= SUSPEND_REQUESTED;
+		queue_up_early_suspend_work(&early_suspend_work);
+	} else if (old_sleep && new_state == PM_SUSPEND_ON) {
+		state &= ~SUSPEND_REQUESTED;
+		__pm_stay_awake(early_suspend_ws);
+		queue_up_early_suspend_work(&late_resume_work);
+	}
+	suspend_state = new_state;
+	spin_unlock_irqrestore(&state_lock, irqflags);
+	mutex_unlock(&suspend_lock);
+}
+
+int __init early_suspend_init(void)
+{
+	int ret = 0;
+
+	early_suspend_ws = wakeup_source_register("early_suspend");
+
+	if (!early_suspend_ws) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	early_suspend_wq = alloc_ordered_workqueue("early_suspend", 0);
+
+	if (!early_suspend_wq) {
+		ret = -ENOMEM;
+		goto ws_err;
+	}
+
+	suspend_wq = alloc_ordered_workqueue("auto_suspend", 0);
+
+	if (!suspend_wq) {
+		ret = -ENOMEM;
+		goto es_wq_err;
+	}
+
+	goto out;
+
+es_wq_err:
+	destroy_workqueue(early_suspend_wq);
+ws_err:
+	wakeup_source_unregister(early_suspend_ws);
+out:
+	return ret;
+}
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 0000000..d391b03
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,153 @@
+/* kernel/power/fbearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+
+#include "power.h"
+
+static wait_queue_head_t fb_state_wq;
+static DEFINE_SPINLOCK(fb_state_lock);
+static enum {
+	FB_STATE_STOPPED_DRAWING,
+	FB_STATE_REQUEST_STOP_DRAWING,
+	FB_STATE_DRAWING_OK,
+} fb_state;
+
+/* tell userspace to stop drawing, wait for it to stop */
+static void stop_drawing_early_suspend(struct early_suspend *h)
+{
+	int ret;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fb_state_lock, irq_flags);
+	fb_state = FB_STATE_REQUEST_STOP_DRAWING;
+	spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+	wake_up_all(&fb_state_wq);
+	ret = wait_event_timeout(fb_state_wq,
+				 fb_state == FB_STATE_STOPPED_DRAWING,
+				 HZ);
+	if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
+		pr_warn("stop_drawing_early_suspend: timeout waiting for "
+			   "userspace to stop drawing\n");
+}
+
+/* tell userspace to start drawing */
+static void start_drawing_late_resume(struct early_suspend *h)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fb_state_lock, irq_flags);
+	fb_state = FB_STATE_DRAWING_OK;
+	spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+	wake_up(&fb_state_wq);
+}
+
+static struct early_suspend stop_drawing_early_suspend_desc = {
+	.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+	.suspend = stop_drawing_early_suspend,
+	.resume = start_drawing_late_resume,
+};
+
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int ret;
+
+	ret = wait_event_interruptible(fb_state_wq,
+				       fb_state != FB_STATE_DRAWING_OK);
+	if (ret && fb_state == FB_STATE_DRAWING_OK)
+		return ret;
+	else
+		s += sprintf(buf, "sleeping");
+	return s - buf;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+				     struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int ret;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fb_state_lock, irq_flags);
+	if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
+		fb_state = FB_STATE_STOPPED_DRAWING;
+		wake_up(&fb_state_wq);
+	}
+	spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+	ret = wait_event_interruptible(fb_state_wq,
+				       fb_state == FB_STATE_DRAWING_OK);
+	if (ret && fb_state != FB_STATE_DRAWING_OK)
+		return ret;
+	else
+		s += sprintf(buf, "awake");
+
+	return s - buf;
+}
+
+#define power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = {	\
+	.attr	= {				\
+		.name = __stringify(_name),	\
+		.mode = 0444,			\
+	},					\
+	.show	= _name##_show,			\
+	.store	= NULL,		\
+}
+
+power_ro_attr(wait_for_fb_sleep);
+power_ro_attr(wait_for_fb_wake);
+
+static struct attribute *g[] = {
+	&wait_for_fb_sleep_attr.attr,
+	&wait_for_fb_wake_attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = g,
+};
+
+static int __init android_power_init(void)
+{
+	int ret;
+
+	init_waitqueue_head(&fb_state_wq);
+	fb_state = FB_STATE_DRAWING_OK;
+
+	ret = sysfs_create_group(power_kobj, &attr_group);
+	if (ret) {
+		pr_err("android_power_init: sysfs_create_group failed\n");
+		return ret;
+	}
+
+	register_early_suspend(&stop_drawing_early_suspend_desc);
+	return 0;
+}
+
+static void  __exit android_power_exit(void)
+{
+	unregister_early_suspend(&stop_drawing_early_suspend_desc);
+	sysfs_remove_group(power_kobj, &attr_group);
+}
+
+module_init(android_power_init);
+module_exit(android_power_exit);
+
diff --git a/kernel/power/main.c b/kernel/power/main.c
index d77663b..aecc58e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -15,6 +15,7 @@
 #include <linux/workqueue.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/early_suspend_sysfs.h>
 
 #include "power.h"
 
@@ -277,6 +278,7 @@
 #endif /* CONFIG_PM_SLEEP_DEBUG */
 
 struct kobject *power_kobj;
+struct kobject *early_suspend_kobj;
 
 /**
  *	state - control system power state.
@@ -313,7 +315,11 @@
 static suspend_state_t decode_state(const char *buf, size_t n)
 {
 #ifdef CONFIG_SUSPEND
+#ifdef CONFIG_EARLYSUSPEND
+	suspend_state_t state = PM_SUSPEND_ON;
+#else
 	suspend_state_t state = PM_SUSPEND_MIN;
+#endif
 	const char * const *s;
 #endif
 	char *p;
@@ -351,8 +357,17 @@
 	}
 
 	state = decode_state(buf, n);
-	if (state < PM_SUSPEND_MAX)
+	if (state < PM_SUSPEND_MAX) {
+#ifdef CONFIG_EARLYSUSPEND
+		if (state == PM_SUSPEND_ON || valid_state(state)) {
+			error = 0;
+			request_suspend_state(state);
+		}
+#else
 		error = pm_suspend(state);
+#endif
+
+}
 	else if (state == PM_SUSPEND_MAX)
 		error = hibernate();
 	else
@@ -624,6 +639,22 @@
 static inline int pm_start_workqueue(void) { return 0; }
 #endif
 
+int register_early_suspend_device(struct device *dev)
+{
+	if (!early_suspend_kobj || !dev)
+		return -ENODEV;
+
+	return sysfs_create_link(early_suspend_kobj, &dev->kobj,
+			dev_name(dev));
+}
+EXPORT_SYMBOL(register_early_suspend_device);
+
+void unregister_early_suspend_device(struct device *dev)
+{
+	sysfs_delete_link(early_suspend_kobj, &dev->kobj, dev_name(dev));
+}
+EXPORT_SYMBOL(unregister_early_suspend_device);
+
 static int __init pm_init(void)
 {
 	int error = pm_start_workqueue();
@@ -632,8 +663,13 @@
 	hibernate_image_size_init();
 	hibernate_reserved_size_init();
 	power_kobj = kobject_create_and_add("power", NULL);
+	early_suspend_kobj = kobject_create_and_add("early_suspend",
+					power_kobj);
+	early_suspend_init();
 	if (!power_kobj)
 		return -ENOMEM;
+	if (!early_suspend_kobj)
+		return -ENOMEM;
 	error = sysfs_create_group(power_kobj, &attr_group);
 	if (error)
 		return error;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 7d4b7ff..79b6de5 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -294,3 +294,11 @@
 extern int pm_wake_unlock(const char *buf);
 
 #endif /* !CONFIG_PM_WAKELOCKS */
+
+#ifdef CONFIG_EARLYSUSPEND
+extern int __init early_suspend_init(void);
+/* kernel/power/earlysuspend.c */
+void request_suspend_state(suspend_state_t state);
+#else
+static inline int early_suspend_init(void) { return 0; }
+#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 87387b9..dc2cb5d 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -34,6 +34,7 @@
 	unsigned int elapsed_msecs;
 	bool wakeup = false;
 	int sleep_usecs = USEC_PER_MSEC;
+	char *busy_wq_name = NULL;
 #ifdef CONFIG_PM_SLEEP
 	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 #endif
@@ -58,7 +59,7 @@
 		read_unlock(&tasklist_lock);
 
 		if (!user_only) {
-			wq_busy = freeze_workqueues_busy();
+			wq_busy = freeze_workqueues_busy(&busy_wq_name);
 			todo += wq_busy;
 		}
 
@@ -97,9 +98,9 @@
 	} else if (todo) {
 		printk("\n");
 		printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds"
-		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
+		       " (%d tasks refusing to freeze, wq_busy=%d, wq_name=%s):\n",
 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
-		       todo - wq_busy, wq_busy);
+		       todo - wq_busy, wq_busy, wq_busy ? "" : busy_wq_name);
 
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 587ddde..3fbb5ff 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -100,12 +100,26 @@
 	.name = "network_throughput",
 };
 
+static BLOCKING_NOTIFIER_HEAD(cpu_freq_min_notifier);
+static struct pm_qos_constraints cpu_freq_min_constraints = {
+	.list = PLIST_HEAD_INIT(cpu_freq_min_constraints.list),
+	.target_value = PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE,
+	.default_value = PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE,
+	.type = PM_QOS_MAX,
+	.notifiers = &cpu_freq_min_notifier,
+};
+static struct pm_qos_object cpu_freq_min_pm_qos = {
+	.constraints = &cpu_freq_min_constraints,
+	.name = "cpu_freq_min",
+};
+
 
 static struct pm_qos_object *pm_qos_array[] = {
 	&null_pm_qos,
 	&cpu_dma_pm_qos,
 	&network_lat_pm_qos,
-	&network_throughput_pm_qos
+	&network_throughput_pm_qos,
+	&cpu_freq_min_pm_qos
 };
 
 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
@@ -293,6 +307,15 @@
 }
 EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
+static void __pm_qos_update_request(struct pm_qos_request *req,
+			   s32 new_value)
+{
+	if (new_value != req->node.prio)
+		pm_qos_update_target(
+			pm_qos_array[req->pm_qos_class]->constraints,
+			&req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+
 /**
  * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
  * @work: work struct for the delayed work (timeout)
@@ -305,7 +328,7 @@
 						  struct pm_qos_request,
 						  work);
 
-	pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+	__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
 }
 
 /**
@@ -365,6 +388,8 @@
 		pm_qos_update_target(
 			pm_qos_array[req->pm_qos_class]->constraints,
 			&req->node, PM_QOS_UPDATE_REQ, new_value);
+
+	__pm_qos_update_request(req, new_value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
 
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 221037a..0458359 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -26,12 +26,18 @@
 #include <linux/syscore_ops.h>
 #include <linux/ftrace.h>
 #include <linux/rtc.h>
+#include <linux/workqueue.h>
 #include <trace/events/power.h>
 #include <linux/wakeup_reason.h>
 
 #include "power.h"
 
+static void do_suspend_sync(struct work_struct *work);
+
 const char *const pm_states[PM_SUSPEND_MAX] = {
+#ifdef CONFIG_EARLYSUSPEND
+	[PM_SUSPEND_ON]		= "on",
+#endif
 	[PM_SUSPEND_FREEZE]	= "freeze",
 	[PM_SUSPEND_STANDBY]	= "standby",
 	[PM_SUSPEND_MEM]	= "mem",
@@ -44,6 +50,9 @@
 	return !!(state > PM_SUSPEND_FREEZE);
 }
 
+static DECLARE_WORK(suspend_sync_work, do_suspend_sync);
+static DECLARE_COMPLETION(suspend_sync_complete);
+
 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
 static bool suspend_freeze_wake;
 
@@ -64,6 +73,43 @@
 }
 EXPORT_SYMBOL_GPL(freeze_wake);
 
+static void do_suspend_sync(struct work_struct *work)
+{
+	sys_sync();
+	complete(&suspend_sync_complete);
+}
+
+static bool check_sys_sync(void)
+{
+	while (!wait_for_completion_timeout(&suspend_sync_complete,
+		HZ / 5)) {
+		if (pm_wakeup_pending())
+			return false;
+		/* If sys_sync is doing, and no wakeup pending,
+		 * we can try in loop to wait sys_sync() finish.
+		 */
+	}
+
+	return true;
+}
+
+static bool suspend_sync(void)
+{
+	if (work_busy(&suspend_sync_work)) {
+		/* When last sys_sync() work is still running,
+		 * we need wait for it to be finished.
+		 */
+		if (!check_sys_sync())
+			return false;
+	}
+
+	INIT_COMPLETION(suspend_sync_complete);
+	schedule_work(&suspend_sync_work);
+
+	return check_sys_sync();
+}
+
+
 /**
  * suspend_set_ops - Set the global suspend method table.
  * @ops: Suspend operations to use.
@@ -349,7 +395,11 @@
 		freeze_begin();
 
 	printk(KERN_INFO "PM: Syncing filesystems ... ");
-	sys_sync();
+	if (!suspend_sync()) {
+		printk(KERN_INFO "PM: Suspend aborted for filesystem syncing\n");
+		error = -EBUSY;
+		goto Unlock;
+	}
 	printk("done.\n");
 
 	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 8f50de3..c8fba33 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -9,7 +9,6 @@
  * manipulate wakelocks on Android.
  */
 
-#include <linux/capability.h>
 #include <linux/ctype.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -189,9 +188,6 @@
 	size_t len;
 	int ret = 0;
 
-	if (!capable(CAP_BLOCK_SUSPEND))
-		return -EPERM;
-
 	while (*str && !isspace(*str))
 		str++;
 
@@ -235,9 +231,6 @@
 	size_t len;
 	int ret = 0;
 
-	if (!capable(CAP_BLOCK_SUSPEND))
-		return -EPERM;
-
 	len = strlen(buf);
 	if (!len)
 		return -EINVAL;
diff --git a/kernel/printk.c b/kernel/printk.c
index c2f720e..ebecbbe 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -367,6 +367,13 @@
 	log_next_seq++;
 }
 
+/* Clears the ring-buffer */
+void log_buf_clear(void)
+{
+	clear_seq = log_next_seq;
+	clear_idx = log_next_idx;
+}
+
 #ifdef CONFIG_SECURITY_DMESG_RESTRICT
 int dmesg_restrict = 1;
 #else
@@ -1276,8 +1283,6 @@
 
 	trace_console(text, len);
 
-	if (level >= console_loglevel && !ignore_loglevel)
-		return;
 	if (!console_drivers)
 		return;
 
@@ -1291,6 +1296,9 @@
 		if (!cpu_online(smp_processor_id()) &&
 		    !(con->flags & CON_ANYTIME))
 			continue;
+		if ((level >= console_loglevel) &&
+		    (!(con->flags & CON_IGNORELEVEL)) && (!ignore_loglevel))
+			continue;
 		con->write(con, text, len);
 	}
 }
@@ -1357,7 +1365,7 @@
 {
 	int retval = 0, wake = 0;
 
-	if (console_trylock()) {
+	if (!in_nmi() && console_trylock()) {
 		retval = 1;
 
 		/*
@@ -1373,9 +1381,9 @@
 		}
 	}
 	logbuf_cpu = UINT_MAX;
+	raw_spin_unlock(&logbuf_lock);
 	if (wake)
 		up(&console_sem);
-	raw_spin_unlock(&logbuf_lock);
 	return retval;
 }
 
@@ -1536,7 +1544,13 @@
 	}
 
 	lockdep_off();
-	raw_spin_lock(&logbuf_lock);
+	if (unlikely(in_nmi())) {
+		if (!raw_spin_trylock(&logbuf_lock))
+			goto out_restore_lockdep_irqs;
+	} else {
+		raw_spin_lock(&logbuf_lock);
+	}
+
 	logbuf_cpu = this_cpu;
 
 	if (recursion_bug) {
@@ -1636,6 +1650,7 @@
 	if (console_trylock_for_printk(this_cpu))
 		console_unlock();
 
+out_restore_lockdep_irqs:
 	lockdep_on();
 out_restore_irqs:
 	local_irq_restore(flags);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8782490..7a03735 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -26,7 +26,11 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
+#include <linux/module.h>
 
+static int ptrace_can_access;
+module_param_named(ptrace_can_access, ptrace_can_access,\
+	int, S_IRUGO | S_IWUSR | S_IWGRP);
 
 static int ptrace_trapping_sleep_fn(void *flags)
 {
@@ -225,7 +229,10 @@
 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
 	const struct cred *cred = current_cred(), *tcred;
+	int dumpable = 0;
 
+	if (ptrace_can_access)
+		return 0;
 	/* May we inspect the given task?
 	 * This check is used both for attaching with ptrace
 	 * and for allowing access to sensitive information in /proc.
@@ -234,7 +241,6 @@
 	 * because setting up the necessary parent/child relationship
 	 * or halting the specified task is impossible.
 	 */
-	int dumpable = 0;
 	/* Don't let security modules deny introspection */
 	if (same_thread_group(task, current))
 		return 0;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index deaf90e..7317e9f 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,3 +17,4 @@
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+obj-$(CONFIG_CPU_CONCURRENCY) += consolidation.o
diff --git a/kernel/sched/consolidation.c b/kernel/sched/consolidation.c
new file mode 100644
index 0000000..967220f
--- /dev/null
+++ b/kernel/sched/consolidation.c
@@ -0,0 +1,943 @@
+/*
+ * CPU ConCurrency (CC) is measures the CPU load by averaging
+ * the number of running tasks. Using CC, the scheduler can
+ * evaluate the load of CPUs to improve load balance for power
+ * efficiency without sacrificing performance.
+ *
+ * Copyright (C) 2013 Intel, Inc.,
+ *
+ * Author: Du, Yuyang <yuyang.du@intel.com>
+ *
+ * CPU Workload Consolidation consolidate workload to the smallest
+ * number of CPUs that are capable of handling it. We measure
+ * capability of CPU by CC, then compare it with a threshold,
+ * and finally run the workload on non-shielded CPUs if they are
+ * predicted capable after the consolidation.
+ *
+ * Copyright (C) 2013 Intel, Inc.,
+ *
+ * Author: Rudramuni, Vishwesh M <vishwesh.m.rudramuni@intel.com>
+ *         Du, Yuyang <yuyang.du@intel.com>
+ *
+ */
+
+#ifdef CONFIG_CPU_CONCURRENCY
+
+#include "sched.h"
+
+/*
+ * the sum period of time is 2^26 ns (~64) by default
+ */
+unsigned long sysctl_concurrency_sum_period = 26UL;
+
+/*
+ * the number of sum periods, after which the original
+ * will be reduced/decayed to half
+ */
+unsigned long sysctl_concurrency_decay_rate = 1UL;
+
+/*
+ * the contrib period of time is 2^10 (~1us) by default,
+ * us has better precision than ms, and
+ * 1024 makes use of faster shift than div
+ */
+static unsigned long cc_contrib_period = 10UL;
+
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+/*
+ * whether we use concurrency to select cpu to run
+ * the woken up task
+ */
+static unsigned long wc_wakeup = 1UL;
+
+/*
+ * concurrency lower than percentage of this number
+ * is capable of running wakee
+ */
+static unsigned long wc_wakeup_threshold = 80UL;
+
+/*
+ * aggressively push the task even it is hot
+ */
+static unsigned long wc_push_hot_task = 1UL;
+#endif
+
+/*
+ * the concurrency is scaled up for decaying,
+ * thus, concurrency 1 is effectively 2^cc_resolution (1024),
+ * which can be halved by 10 half-life periods
+ */
+static unsigned long cc_resolution = 10UL;
+
+/*
+ * after this number of half-life periods, even
+ * (1>>32)-1 (which is sufficiently large) is less than 1
+ */
+static unsigned long cc_decay_max_pds = 32UL;
+
+static inline unsigned long cc_scale_up(unsigned long c)
+{
+	return c << cc_resolution;
+}
+
+static inline unsigned long cc_scale_down(unsigned long c)
+{
+	return c >> cc_resolution;
+}
+
+/* from nanoseconds to sum periods */
+static inline u64 cc_sum_pds(u64 n)
+{
+	return n >> sysctl_concurrency_sum_period;
+}
+
+/* from sum period to timestamp in ns */
+static inline u64 cc_timestamp(u64 p)
+{
+	return p << sysctl_concurrency_sum_period;
+}
+
+/*
+ * from nanoseconds to contrib periods, because
+ * ns so risky that can overflow cc->contrib
+ */
+static inline u64 cc_contrib_pds(u64 n)
+{
+	return n >> cc_contrib_period;
+}
+
+/*
+ * cc_decay_factor only works for 32bit integer,
+ * cc_decay_factor_x, x indicates the number of periods
+ * as half-life (sysctl_concurrency_decay_rate)
+ */
+static const unsigned long cc_decay_factor_1[] = {
+	0xFFFFFFFF,
+};
+
+static const unsigned long cc_decay_factor_2[] = {
+	0xFFFFFFFF, 0xB504F333,
+};
+
+static const unsigned long cc_decay_factor_4[] = {
+	0xFFFFFFFF, 0xD744FCCA, 0xB504F333, 0x9837F051,
+};
+
+static const unsigned long cc_decay_factor_8[] = {
+	0xFFFFFFFF, 0xEAC0C6E7, 0xD744FCCA, 0xC5672A11,
+	0xB504F333, 0xA5FED6A9, 0x9837F051, 0x8B95C1E3,
+};
+
+/* by default sysctl_concurrency_decay_rate */
+static const unsigned long *cc_decay_factor =
+	cc_decay_factor_1;
+
+/*
+ * cc_decayed_sum depends on cc_resolution (fixed 10),
+ * cc_decayed_sum_x, x indicates the number of periods
+ * as half-life (sysctl_concurrency_decay_rate)
+ */
+static const unsigned long cc_decayed_sum_1[] = {
+	0, 512, 768, 896, 960, 992,
+	1008, 1016, 1020, 1022, 1023,
+};
+
+static const unsigned long cc_decayed_sum_2[] = {
+	0, 724, 1235, 1597, 1853, 2034, 2162, 2252,
+	2316, 2361, 2393, 2416, 2432, 2443, 2451,
+	2457, 2461, 2464, 2466, 2467, 2468, 2469,
+};
+
+static const unsigned long cc_decayed_sum_4[] = {
+	0, 861, 1585, 2193, 2705, 3135, 3497, 3801, 4057,
+	4272, 4453, 4605, 4733, 4840, 4930, 5006, 5070,
+	5124, 5169, 5207, 5239, 5266, 5289, 5308, 5324,
+	5337, 5348, 5358, 5366, 5373, 5379, 5384, 5388,
+	5391, 5394, 5396, 5398, 5400, 5401, 5402, 5403,
+	5404, 5405, 5406,
+};
+
+static const unsigned long cc_decayed_sum_8[] = {
+	0, 939, 1800, 2589, 3313, 3977, 4585, 5143,
+	5655, 6124, 6554, 6949, 7311, 7643, 7947, 8226,
+	8482, 8717, 8932, 9129, 9310, 9476, 9628, 9767,
+	9895, 10012, 10120, 10219, 10309, 10392, 10468, 10538,
+	10602, 10661, 10715, 10764, 10809, 10850, 10888, 10923,
+	10955, 10984, 11011, 11036, 11059, 11080, 11099, 11116,
+	11132, 11147, 11160, 11172, 11183, 11193, 11203, 11212,
+	11220, 11227, 11234, 11240, 11246, 11251, 11256, 11260,
+	11264, 11268, 11271, 11274, 11277, 11280, 11282, 11284,
+	11286, 11288, 11290, 11291, 11292, 11293, 11294, 11295,
+	11296, 11297, 11298, 11299, 11300, 11301, 11302,
+};
+
+/* by default sysctl_concurrency_decay_rate */
+static const unsigned long *cc_decayed_sum = cc_decayed_sum_1;
+
+/*
+ * the last index of cc_decayed_sum array
+ */
+static unsigned long cc_decayed_sum_len =
+	sizeof(cc_decayed_sum_1) / sizeof(cc_decayed_sum_1[0]) - 1;
+
+/*
+ * sysctl handler to update decay rate
+ */
+int concurrency_decay_rate_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+	if (ret || !write)
+		return ret;
+
+	switch (sysctl_concurrency_decay_rate) {
+	case 1:
+		cc_decay_factor = cc_decay_factor_1;
+		cc_decayed_sum = cc_decayed_sum_1;
+		cc_decayed_sum_len = sizeof(cc_decayed_sum_1) /
+			sizeof(cc_decayed_sum_1[0]) - 1;
+		break;
+	case 2:
+		cc_decay_factor = cc_decay_factor_2;
+		cc_decayed_sum = cc_decayed_sum_2;
+		cc_decayed_sum_len = sizeof(cc_decayed_sum_2) /
+			sizeof(cc_decayed_sum_2[0]) - 1;
+		break;
+	case 4:
+		cc_decay_factor = cc_decay_factor_4;
+		cc_decayed_sum = cc_decayed_sum_4;
+		cc_decayed_sum_len = sizeof(cc_decayed_sum_4) /
+			sizeof(cc_decayed_sum_4[0]) - 1;
+		break;
+	case 8:
+		cc_decay_factor = cc_decay_factor_8;
+		cc_decayed_sum = cc_decayed_sum_8;
+		cc_decayed_sum_len = sizeof(cc_decayed_sum_8) /
+			sizeof(cc_decayed_sum_8[0]) - 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cc_decay_max_pds *= sysctl_concurrency_decay_rate;
+
+	return 0;
+}
+
+/*
+ * decay concurrency at some decay rate
+ */
+static inline u64 decay_cc(u64 cc, u64 periods)
+{
+	u32 periods_l;
+
+	if (periods <= 0)
+		return cc;
+
+	if (unlikely(periods >= cc_decay_max_pds))
+		return 0;
+
+	/* now period is not too large */
+	periods_l = (u32)periods;
+	if (periods_l >= sysctl_concurrency_decay_rate) {
+		cc >>= periods_l / sysctl_concurrency_decay_rate;
+		periods_l %= sysctl_concurrency_decay_rate;
+	}
+
+	if (!periods_l)
+		return cc;
+
+	cc *= cc_decay_factor[periods_l];
+
+	return cc >> 32;
+}
+
+/*
+ * add missed periods by predefined constants
+ */
+static inline u64 cc_missed_pds(u64 periods)
+{
+	if (periods <= 0)
+		return 0;
+
+	if (periods > cc_decayed_sum_len)
+		periods = cc_decayed_sum_len;
+
+	return cc_decayed_sum[periods];
+}
+
+/*
+ * scale up nr_running, because we decay
+ */
+static inline unsigned long cc_weight(unsigned long nr_running)
+{
+	/*
+	 * scaling factor, this should be tunable
+	 */
+	return cc_scale_up(nr_running);
+}
+
+static inline void
+__update_concurrency(struct rq *rq, u64 now, struct cpu_concurrency_t *cc)
+{
+	u64 sum_pds, sum_pds_s, sum_pds_e;
+	u64 contrib_pds, ts_contrib, contrib_pds_one;
+	u64 sum_now = 0;
+	unsigned long weight;
+	int updated = 0;
+
+	/*
+	 * guarantee contrib_timestamp always >= sum_timestamp,
+	 * and sum_timestamp is at period boundary
+	 */
+	if (now <= cc->sum_timestamp) {
+		cc->sum_timestamp = cc_timestamp(cc_sum_pds(now));
+		cc->contrib_timestamp = now;
+		return;
+	}
+
+	weight = cc_weight(cc->nr_running);
+
+	/* start and end of sum periods */
+	sum_pds_s = cc_sum_pds(cc->sum_timestamp);
+	sum_pds_e = cc_sum_pds(now);
+	sum_pds = sum_pds_e - sum_pds_s;
+	/* number of contrib periods in one sum period */
+	contrib_pds_one = cc_contrib_pds(cc_timestamp(1));
+
+	/*
+	 * if we have passed at least one period,
+	 * we need to do four things:
+	 */
+	if (sum_pds) {
+		/* 1) complete the last period */
+		ts_contrib = cc_timestamp(sum_pds_s + 1);
+		contrib_pds = cc_contrib_pds(ts_contrib);
+		contrib_pds -= cc_contrib_pds(cc->contrib_timestamp);
+
+		if (likely(contrib_pds))
+			cc->contrib += weight * contrib_pds;
+
+		cc->contrib = div64_u64(cc->contrib, contrib_pds_one);
+
+		cc->sum += cc->contrib;
+		cc->contrib = 0;
+
+		/* 2) update/decay them */
+		cc->sum = decay_cc(cc->sum, sum_pds);
+		sum_now = decay_cc(cc->sum, sum_pds - 1);
+
+		/* 3) compensate missed periods if any */
+		sum_pds -= 1;
+		cc->sum += cc->nr_running * cc_missed_pds(sum_pds);
+		sum_now += cc->nr_running * cc_missed_pds(sum_pds - 1);
+		updated = 1;
+
+		/* 4) update contrib timestamp to period boundary */
+		ts_contrib = cc_timestamp(sum_pds_e);
+
+		cc->sum_timestamp = ts_contrib;
+		cc->contrib_timestamp = ts_contrib;
+	}
+
+	/* current period */
+	contrib_pds = cc_contrib_pds(now);
+	contrib_pds -= cc_contrib_pds(cc->contrib_timestamp);
+
+	if (likely(contrib_pds))
+		cc->contrib += weight * contrib_pds;
+
+	/* new nr_running for next update */
+	cc->nr_running = rq->nr_running;
+
+	/*
+	 * we need to account for the current sum period,
+	 * if now has passed 1/2 of sum period, we contribute,
+	 * otherwise, we use the last complete sum period
+	 */
+	contrib_pds = cc_contrib_pds(now - cc->sum_timestamp);
+
+	if (contrib_pds > contrib_pds_one / 2) {
+		sum_now = div64_u64(cc->contrib, contrib_pds);
+		sum_now += cc->sum;
+		updated = 1;
+	}
+
+	if (updated == 1)
+		cc->sum_now = sum_now;
+	cc->contrib_timestamp = now;
+}
+
+void init_cpu_concurrency(struct rq *rq)
+{
+	rq->concurrency.sum = 0;
+	rq->concurrency.sum_now = 0;
+	rq->concurrency.contrib = 0;
+	rq->concurrency.nr_running = 0;
+	rq->concurrency.sum_timestamp = ULLONG_MAX;
+	rq->concurrency.contrib_timestamp = ULLONG_MAX;
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	rq->concurrency.unload = 0;
+#endif
+}
+
+/*
+ * we update cpu concurrency at:
+ * 1) enqueue task, which increases concurrency
+ * 2) dequeue task, which decreases concurrency
+ * 3) periodic scheduler tick, in case no en/dequeue for long
+ * 4) enter and exit idle (necessary?)
+ */
+void update_cpu_concurrency(struct rq *rq)
+{
+	/*
+	 * protected under rq->lock
+	 */
+	struct cpu_concurrency_t *cc = &rq->concurrency;
+	u64 now = rq->clock;
+
+	__update_concurrency(rq, now, cc);
+}
+
+#endif
+
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+/*
+ * whether cpu is capable of having more concurrency
+ */
+static int cpu_cc_capable(int cpu)
+{
+	u64 sum = cpu_rq(cpu)->concurrency.sum_now;
+	u64 threshold = cc_weight(1);
+
+	sum *= 100;
+	sum *= cpu_rq(cpu)->cpu_power;
+
+	threshold *= wc_wakeup_threshold;
+	threshold <<= SCHED_POWER_SHIFT;
+
+	if (sum <= threshold)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * we do not select idle, if the cc of the
+ * wakee and waker (in this order) is capable
+ * of handling the wakee task
+ */
+int workload_consolidation_wakeup(int prev, int target)
+{
+	if (!wc_wakeup) {
+		if (idle_cpu(target))
+			return target;
+
+		return nr_cpu_ids;
+	}
+
+	if (idle_cpu(prev) || cpu_cc_capable(prev))
+		return prev;
+
+	if (prev != target && (idle_cpu(target) || cpu_cc_capable(target)))
+		return target;
+
+	return nr_cpu_ids;
+}
+
+static inline u64 sched_group_cc(struct sched_group *sg)
+{
+	u64 sg_cc = 0;
+	int i;
+
+	for_each_cpu(i, sched_group_cpus(sg))
+		sg_cc += cpu_rq(i)->concurrency.sum_now *
+			cpu_rq(i)->cpu_power;
+
+	return sg_cc;
+}
+
+static inline u64 sched_domain_cc(struct sched_domain *sd)
+{
+	struct sched_group *sg = sd->groups;
+	u64 sd_cc = 0;
+
+	do {
+		sd_cc += sched_group_cc(sg);
+		sg = sg->next;
+	} while (sg != sd->groups);
+
+	return sd_cc;
+}
+
+static inline struct sched_group *
+find_lowest_cc_group(struct sched_group *sg, int span)
+{
+	u64 grp_cc, min = ULLONG_MAX;
+	struct sched_group *lowest = NULL;
+	int i;
+
+	for (i = 0; i < span; ++i) {
+		grp_cc = sched_group_cc(sg);
+
+		if (grp_cc < min) {
+			min = grp_cc;
+			lowest = sg;
+		}
+
+		sg = sg->next;
+	}
+
+	return lowest;
+}
+
+static inline u64 __calc_cc_thr(int cpus, unsigned int asym_cc)
+{
+	u64 thr = cpus;
+
+	thr *= cc_weight(1);
+	thr *= asym_cc;
+	thr <<= SCHED_POWER_SHIFT;
+
+	return thr;
+}
+
+/*
+ * can @src_cc of @src_nr cpus be consolidated
+ * to @dst_cc of @dst_nr cpus
+ */
+static inline int
+__can_consolidate_cc(u64 src_cc, int src_nr, u64 dst_cc, int dst_nr)
+{
+	dst_cc *= dst_nr;
+	src_nr -= dst_nr;
+
+	if (unlikely(src_nr <= 0))
+		return 0;
+
+	src_nr = ilog2(src_nr);
+	src_nr += dst_nr;
+	src_cc *= src_nr;
+
+	if (src_cc > dst_cc)
+		return 0;
+
+	return 1;
+}
+
+/*
+ * find the group for asymmetric concurrency
+ * problem to address: traverse sd from top to down
+ */
+struct sched_group *
+workload_consolidation_find_group(struct sched_domain *sd,
+	struct task_struct *p, int this_cpu)
+{
+	int half, sg_weight, ns_half = 0;
+	struct sched_group *sg;
+	u64 sd_cc;
+
+	half = DIV_ROUND_CLOSEST(sd->total_groups, 2);
+	sg_weight = sd->groups->group_weight;
+
+	sd_cc = sched_domain_cc(sd);
+	sd_cc *= 100;
+
+	while (half) {
+		int allowed = 0, i;
+		int cpus = sg_weight * half;
+		u64 threshold = __calc_cc_thr(cpus,
+			sd->asym_concurrency);
+
+		/*
+		 * we did not consider the added cc by this
+		 * wakeup (mostly from fork/exec)
+		 */
+		if (!__can_consolidate_cc(sd_cc, sd->span_weight,
+			threshold, cpus))
+			break;
+
+		sg = sd->first_group;
+		for (i = 0; i < half; ++i) {
+			/* if it has no cpus allowed */
+			if (!cpumask_intersects(sched_group_cpus(sg),
+					tsk_cpus_allowed(p)))
+				continue;
+
+			allowed = 1;
+			sg = sg->next;
+		}
+
+		if (!allowed)
+			break;
+
+		ns_half = half;
+		half /= 2;
+	}
+
+	if (!ns_half)
+		return NULL;
+
+	if (ns_half == 1)
+		return sd->first_group;
+
+	return find_lowest_cc_group(sd->first_group, ns_half);
+}
+
+/*
+ * top_flag_domain - return top sched_domain containing flag.
+ * @cpu:	the cpu whose highest level of sched domain is to
+ *		be returned.
+ * @flag:	the flag to check for the highest sched_domain
+ *		for the given cpu.
+ *
+ * returns the highest sched_domain of a cpu which contains the given flag.
+ * different from highest_flag_domain in that along the domain upward chain
+ * domain may or may not contain the flag.
+ */
+static inline struct sched_domain *top_flag_domain(int cpu, int flag)
+{
+	struct sched_domain *sd, *hsd = NULL;
+
+	for_each_domain(cpu, sd) {
+		if (!(sd->flags & flag))
+			continue;
+		hsd = sd;
+	}
+
+	return hsd;
+}
+
+/*
+ * workload_consolidation_cpu_shielded - return whether @cpu is shielded or not
+ *
+ * traverse downward the sched_domain tree when the sched_domain contains
+ * flag SD_ASYM_CONCURRENCY, each sd may have more than two groups, but
+ * we assume 1) every sched_group has the same weight, 2) every CPU has
+ * the same computing power
+ */
+int workload_consolidation_cpu_shielded(int cpu)
+{
+	struct sched_domain *sd;
+
+	sd = top_flag_domain(cpu, SD_ASYM_CONCURRENCY);
+
+	while (sd) {
+		int half, sg_weight, this_sg_nr;
+		u64 sd_cc;
+
+		if (!(sd->flags & SD_ASYM_CONCURRENCY)) {
+			sd = sd->child;
+			continue;
+		}
+
+		half = DIV_ROUND_CLOSEST(sd->total_groups, 2);
+		sg_weight = sd->groups->group_weight;
+		this_sg_nr = sd->group_number;
+
+		sd_cc = sched_domain_cc(sd);
+		sd_cc *= 100;
+
+		while (half) {
+			int cpus = sg_weight * half;
+			u64 threshold = __calc_cc_thr(cpus,
+				sd->asym_concurrency);
+
+			if (!__can_consolidate_cc(sd_cc, sd->span_weight,
+				threshold, cpus))
+				return 0;
+
+			if (this_sg_nr >= half)
+				return 1;
+
+			half /= 2;
+		}
+
+		sd = sd->child;
+	}
+
+	return 0;
+}
+
+/*
+ * as of now, we have the following assumption
+ * 1) every sched_group has the same weight
+ * 2) every CPU has the same computing power
+ */
+static inline int __nonshielded_groups(struct sched_domain *sd)
+{
+	int half, sg_weight, ret = 0;
+	u64 sd_cc;
+
+	half = DIV_ROUND_CLOSEST(sd->total_groups, 2);
+	sg_weight = sd->groups->group_weight;
+
+	sd_cc = sched_domain_cc(sd);
+	sd_cc *= 100;
+
+	while (half) {
+		int cpus = sg_weight * half;
+		u64 threshold = __calc_cc_thr(cpus,
+			sd->asym_concurrency);
+
+		if (!__can_consolidate_cc(sd_cc, sd->span_weight,
+			threshold, cpus))
+			return ret;
+
+		ret = half;
+		half /= 2;
+	}
+
+	return ret;
+}
+
+static DEFINE_PER_CPU(struct cpumask, nonshielded_cpumask);
+
+/*
+ * workload_consolidation_nonshielded_mask - return the nonshielded cpus in the @mask,
+ * which is unmasked by the shielded cpus
+ *
+ * traverse downward the sched_domain tree when the sched_domain contains
+ * flag SD_ASYM_CONCURRENCY, each sd may have more than two groups
+ */
+void workload_consolidation_nonshielded_mask(int cpu, struct cpumask *mask)
+{
+	struct sched_domain *sd;
+	struct cpumask *pcpu_mask = &per_cpu(nonshielded_cpumask, cpu);
+	int i;
+
+	sd = top_flag_domain(cpu, SD_ASYM_CONCURRENCY);
+
+	if (!sd)
+		return;
+
+	while (sd) {
+		struct sched_group *sg;
+		int this_sg_nr, ns_half;
+
+		if (!(sd->flags & SD_ASYM_CONCURRENCY)) {
+			sd = sd->child;
+			continue;
+		}
+
+		ns_half = __nonshielded_groups(sd);
+
+		if (!ns_half)
+			break;
+
+		cpumask_clear(pcpu_mask);
+		sg = sd->first_group;
+
+		for (i = 0; i < ns_half; ++i) {
+			cpumask_or(pcpu_mask, pcpu_mask,
+				sched_group_cpus(sg));
+			sg = sg->next;
+		}
+
+		cpumask_and(mask, mask, pcpu_mask);
+
+		this_sg_nr = sd->group_number;
+		if (this_sg_nr)
+			break;
+
+		sd = sd->child;
+	}
+}
+
+static int cpu_task_hot(struct task_struct *p, u64 now)
+{
+	s64 delta;
+
+	if (p->sched_class != &fair_sched_class)
+		return 0;
+
+	if (unlikely(p->policy == SCHED_IDLE))
+		return 0;
+
+	if (sysctl_sched_migration_cost == -1)
+		return 1;
+
+	if (sysctl_sched_migration_cost == 0)
+		return 0;
+
+	if (wc_push_hot_task)
+		return 0;
+
+	/*
+	 * buddy candidates are cache hot:
+	 */
+	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
+			(&p->se == p->se.cfs_rq->next ||
+			 &p->se == p->se.cfs_rq->last)) {
+		return 1;
+	}
+
+	delta = now - p->se.exec_start;
+
+	if (delta < (s64)sysctl_sched_migration_cost)
+		return 1;
+
+	return 0;
+}
+
+static int
+cpu_move_task(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
+{
+	/*
+	 * we do not migrate tasks that are:
+	 * 1) running (obviously), or
+	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
+	 * 3) are cache-hot on their current CPU.
+	 */
+	if (!cpumask_test_cpu(dst_rq->cpu, tsk_cpus_allowed(p)))
+		return 0;
+
+	if (task_running(src_rq, p))
+		return 0;
+
+	/*
+	 * aggressive migration if task is cache cold
+	 */
+	if (!cpu_task_hot(p, src_rq->clock_task)) {
+		/*
+		 * move a task
+		 */
+		deactivate_task(src_rq, p, 0);
+		set_task_cpu(p, dst_rq->cpu);
+		activate_task(dst_rq, p, 0);
+		check_preempt_curr(dst_rq, p, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * __unload_cpu_work is run by src cpu stopper, which pushes running
+ * tasks off src cpu onto dst cpu
+ */
+static int __unload_cpu_work(void *data)
+{
+	struct rq *src_rq = data;
+	int src_cpu = cpu_of(src_rq);
+	struct cpu_concurrency_t *cc = &src_rq->concurrency;
+	struct rq *dst_rq = cpu_rq(cc->dst_cpu);
+
+	struct list_head *tasks = &src_rq->cfs_tasks;
+	struct task_struct *p, *n;
+	int pushed = 0;
+	int nr_migrate_break = 1;
+
+	raw_spin_lock_irq(&src_rq->lock);
+
+	/* make sure the requested cpu hasn't gone down in the meantime */
+	if (unlikely(src_cpu != smp_processor_id() || !cc->unload))
+		goto out_unlock;
+
+	/* Is there any task to move? */
+	if (src_rq->nr_running <= 1)
+		goto out_unlock;
+
+	double_lock_balance(src_rq, dst_rq);
+
+	list_for_each_entry_safe(p, n, tasks, se.group_node) {
+
+		if (!cpu_move_task(p, src_rq, dst_rq))
+			continue;
+
+		pushed++;
+
+		if (pushed >= nr_migrate_break)
+			break;
+	}
+
+	double_unlock_balance(src_rq, dst_rq);
+out_unlock:
+	cc->unload = 0;
+	raw_spin_unlock_irq(&src_rq->lock);
+
+	return 0;
+}
+
+/*
+ * unload src_cpu to dst_cpu
+ */
+static void unload_cpu(int src_cpu, int dst_cpu)
+{
+	unsigned long flags;
+	struct rq *src_rq = cpu_rq(src_cpu);
+	struct cpu_concurrency_t *cc = &src_rq->concurrency;
+	int unload = 0;
+
+	raw_spin_lock_irqsave(&src_rq->lock, flags);
+
+	if (!cc->unload) {
+		cc->unload = 1;
+		cc->dst_cpu = dst_cpu;
+		unload = 1;
+	}
+
+	raw_spin_unlock_irqrestore(&src_rq->lock, flags);
+
+	if (unload)
+		stop_one_cpu_nowait(src_cpu, __unload_cpu_work, src_rq,
+			&cc->unload_work);
+}
+
+static inline int find_lowest_cc_cpu(struct cpumask *mask)
+{
+	u64 cpu_cc, min = ULLONG_MAX;
+	int i, lowest = nr_cpu_ids;
+	struct rq *rq;
+
+	for_each_cpu(i, mask) {
+		rq = cpu_rq(i);
+		cpu_cc = rq->concurrency.sum_now * rq->cpu_power;
+
+		if (cpu_cc < min) {
+			min = cpu_cc;
+			lowest = i;
+		}
+	}
+
+	return lowest;
+}
+
+/*
+ * find the lowest cc cpu in shielded and nonshielded cpus,
+ * aggressively unload the shielded to the nonshielded
+ */
+void workload_consolidation_unload(struct cpumask *nonshielded)
+{
+	int src_cpu = nr_cpu_ids, dst_cpu, i;
+	u64 cpu_cc, min = ULLONG_MAX;
+	struct rq *rq;
+
+	for_each_cpu_not(i, nonshielded) {
+		if (i >= nr_cpu_ids)
+			break;
+
+		rq = cpu_rq(i);
+		if (rq->nr_running <= 0)
+			continue;
+
+		cpu_cc = rq->concurrency.sum_now * rq->cpu_power;
+		if (cpu_cc < min) {
+			min = cpu_cc;
+			src_cpu = i;
+		}
+	}
+
+	if (src_cpu >= nr_cpu_ids)
+		return;
+
+	dst_cpu = find_lowest_cc_cpu(nonshielded);
+	if (dst_cpu >= nr_cpu_ids)
+		return;
+
+	if (src_cpu != dst_cpu)
+		unload_cpu(src_cpu, dst_cpu);
+}
+
+#endif /* CONFIG_WORKLOAD_CONSOLIDATION */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d5c5c98..21ee7f4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -767,6 +767,7 @@
 	update_rq_clock(rq);
 	sched_info_queued(p);
 	p->sched_class->enqueue_task(rq, p, flags);
+	update_cpu_concurrency(rq);
 }
 
 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -774,6 +775,7 @@
 	update_rq_clock(rq);
 	sched_info_dequeued(p);
 	p->sched_class->dequeue_task(rq, p, flags);
+	update_cpu_concurrency(rq);
 }
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2740,6 +2742,7 @@
 	raw_spin_lock(&rq->lock);
 	update_rq_clock(rq);
 	update_cpu_load_active(rq);
+	update_cpu_concurrency(rq);
 	curr->sched_class->task_tick(rq, curr, 0);
 	raw_spin_unlock(&rq->lock);
 
@@ -4029,6 +4032,7 @@
 {
 	return __sched_setscheduler(p, policy, param, false);
 }
+EXPORT_SYMBOL(sched_setscheduler_nocheck);
 
 static int
 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
@@ -4690,6 +4694,9 @@
 		touch_nmi_watchdog();
 		if (!state_filter || (p->state & state_filter))
 			sched_show_task(p);
+#ifdef CONFIG_EMMC_IPANIC
+		emmc_ipanic_stream_emmc();
+#endif
 	} while_each_thread(g, p);
 
 	touch_all_softlockup_watchdogs();
@@ -4703,6 +4710,10 @@
 	 */
 	if (!state_filter)
 		debug_show_all_locks();
+
+#ifdef CONFIG_EMMC_IPANIC
+	emmc_ipanic_stream_emmc();
+#endif
 }
 
 void __cpuinit init_idle_bootup_task(struct task_struct *idle)
@@ -5058,7 +5069,11 @@
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	struct ctl_table *table = sd_alloc_ctl_entry(14);
+#else
 	struct ctl_table *table = sd_alloc_ctl_entry(13);
+#endif
 
 	if (table == NULL)
 		return NULL;
@@ -5088,7 +5103,13 @@
 		sizeof(int), 0644, proc_dointvec_minmax, false);
 	set_table_entry(&table[11], "name", sd->name,
 		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	set_table_entry(&table[12], "asym_concurrency", &sd->asym_concurrency,
+		sizeof(int), 0644, proc_dointvec, false);
+	/* &table[13] is terminator */
+#else
 	/* &table[12] is terminator */
+#endif
 
 	return table;
 }
@@ -5663,6 +5684,32 @@
 	per_cpu(sd_llc_id, cpu) = id;
 }
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+static void update_domain_extra_info(struct sched_domain *sd)
+{
+	while (sd) {
+		int i = 0, j = 0, first, min = INT_MAX;
+		struct sched_group *group;
+
+		group = sd->groups;
+		first = group_first_cpu(group);
+		do {
+			int k = group_first_cpu(group);
+			i += 1;
+			if (k < first)
+				j += 1;
+			if (k < min) {
+				sd->first_group = group;
+				min = k;
+			}
+		} while (group = group->next, group != sd->groups);
+
+		sd->total_groups = i;
+		sd->group_number = j;
+		sd = sd->parent;
+	}
+}
+#endif
 /*
  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  * hold the hotplug lock.
@@ -5704,6 +5751,10 @@
 	destroy_sched_domains(tmp, cpu);
 
 	update_top_cache_domain(cpu);
+
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	update_domain_extra_info(sd);
+#endif
 }
 
 /* cpus with isolated domains */
@@ -7040,6 +7091,11 @@
 #endif
 		init_rq_hrtick(rq);
 		atomic_set(&rq->nr_iowait, 0);
+
+		/*
+		 * cpu concurrency init
+		 */
+		init_cpu_concurrency(rq);
 	}
 
 	set_load_weight(&init_task);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 8e8259e..20c8160 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -569,7 +569,7 @@
 			   struct cputime *prev,
 			   cputime_t *ut, cputime_t *st)
 {
-	cputime_t rtime, stime, utime, total;
+	cputime_t rtime, stime, utime;
 
 	if (vtime_accounting_enabled()) {
 		*ut = curr->utime;
@@ -577,9 +577,6 @@
 		return;
 	}
 
-	stime = curr->stime;
-	total = stime + curr->utime;
-
 	/*
 	 * Tick based cputime accounting depend on random scheduling
 	 * timeslices of a task to be interrupted or not by the timer.
@@ -600,13 +597,19 @@
 	if (prev->stime + prev->utime >= rtime)
 		goto out;
 
-	if (total) {
+	stime = curr->stime;
+	utime = curr->utime;
+
+	if (utime == 0) {
+		stime = rtime;
+	} else if (stime == 0) {
+		utime = rtime;
+	} else {
+		cputime_t total = stime + utime;
+
 		stime = scale_stime((__force u64)stime,
 				    (__force u64)rtime, (__force u64)total);
 		utime = rtime - stime;
-	} else {
-		stime = rtime;
-		utime = 0;
 	}
 
 	/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 24acce2..9681d5e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1572,6 +1572,7 @@
 void idle_enter_fair(struct rq *this_rq)
 {
 	update_rq_runnable_avg(this_rq, 1);
+	update_cpu_concurrency(this_rq);
 }
 
 /*
@@ -1582,6 +1583,7 @@
 void idle_exit_fair(struct rq *this_rq)
 {
 	update_rq_runnable_avg(this_rq, 0);
+	update_cpu_concurrency(this_rq);
 }
 
 #else
@@ -1985,6 +1987,7 @@
 	 */
 	update_entity_load_avg(curr, 1);
 	update_cfs_rq_blocked_load(cfs_rq, 1);
+	update_cfs_shares(cfs_rq);
 
 #ifdef CONFIG_SCHED_HRTICK
 	/*
@@ -3259,6 +3262,10 @@
 
 	/* Traverse only the allowed CPUs */
 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+		if (workload_consolidation_cpu_shielded(i))
+			continue;
+#endif
 		load = weighted_cpuload(i);
 
 		if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3278,9 +3285,16 @@
 	struct sched_domain *sd;
 	struct sched_group *sg;
 	int i = task_cpu(p);
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	int ret;
 
+	ret = workload_consolidation_wakeup(i, target);
+	if (ret < nr_cpu_ids)
+		return ret;
+#else
 	if (idle_cpu(target))
 		return target;
+#endif
 
 	/*
 	 * If the prevous cpu is cache affine and idle, don't be stupid.
@@ -3374,7 +3388,7 @@
 
 	while (sd) {
 		int load_idx = sd->forkexec_idx;
-		struct sched_group *group;
+		struct sched_group *group = NULL;
 		int weight;
 
 		if (!(sd->flags & sd_flag)) {
@@ -3385,7 +3399,14 @@
 		if (sd_flag & SD_BALANCE_WAKE)
 			load_idx = sd->wake_idx;
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+		if (sd->flags & SD_ASYM_CONCURRENCY)
+			group = workload_consolidation_find_group(sd, p, cpu);
+
+		if (!group)
+#endif
 		group = find_idlest_group(sd, p, cpu, load_idx);
+
 		if (!group) {
 			sd = sd->child;
 			continue;
@@ -3618,6 +3639,8 @@
 		return NULL;
 
 	do {
+		if (!cfs_rq->rb_leftmost)
+			return NULL;
 		se = pick_next_entity(cfs_rq);
 		set_next_entity(cfs_rq, se);
 		cfs_rq = group_cfs_rq(se);
@@ -5232,6 +5255,10 @@
 	return ld_moved;
 }
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
+#endif
+
 /*
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
@@ -5241,6 +5268,9 @@
 	struct sched_domain *sd;
 	int pulled_task = 0;
 	unsigned long next_balance = jiffies + HZ;
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	struct cpumask *nonshielded = __get_cpu_var(local_cpu_mask);
+#endif
 
 	this_rq->idle_stamp = this_rq->clock;
 
@@ -5254,6 +5284,19 @@
 
 	update_blocked_averages(this_cpu);
 	rcu_read_lock();
+
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	cpumask_copy(nonshielded, cpu_active_mask);
+
+	/*
+	 * if we encounter shadowded cpus here, don't do balance on them
+	 */
+	workload_consolidation_nonshielded_mask(this_cpu, nonshielded);
+	if (!cpumask_test_cpu(this_cpu, nonshielded))
+		goto unlock;
+	workload_consolidation_unload(nonshielded);
+#endif
+
 	for_each_domain(this_cpu, sd) {
 		unsigned long interval;
 		int balance = 1;
@@ -5275,6 +5318,9 @@
 			break;
 		}
 	}
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+unlock:
+#endif
 	rcu_read_unlock();
 
 	raw_spin_lock(&this_rq->lock);
@@ -5371,10 +5417,45 @@
 
 static inline int find_new_ilb(int call_cpu)
 {
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	struct cpumask *nonshielded = __get_cpu_var(local_cpu_mask);
+	int ilb, weight;
+
+	/*
+	 * Optimize for the case when we have no idle CPUs or only one
+	 * idle CPU. Don't walk the sched_domain hierarchy in such cases
+	 */
+	if (cpumask_weight(nohz.idle_cpus_mask) < 2)
+		return nr_cpu_ids;
+
+	ilb = cpumask_first(nohz.idle_cpus_mask);
+
+	if (ilb < nr_cpu_ids && idle_cpu(ilb)) {
+
+		cpumask_copy(nonshielded, nohz.idle_cpus_mask);
+
+		rcu_read_lock();
+		workload_consolidation_nonshielded_mask(call_cpu, nonshielded);
+		rcu_read_unlock();
+
+		weight = cpumask_weight(nonshielded);
+
+		if (weight < 2)
+			return nr_cpu_ids;
+
+		/*
+		 * get idle load balancer again
+		 */
+		ilb = cpumask_first(nonshielded);
+	    if (ilb < nr_cpu_ids && idle_cpu(ilb))
+			return ilb;
+	}
+#else
 	int ilb = cpumask_first(nohz.idle_cpus_mask);
 
 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
 		return ilb;
+#endif
 
 	return nr_cpu_ids;
 }
@@ -5578,7 +5659,7 @@
  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
  * rebalancing for all the cpus for whom scheduler ticks are stopped.
  */
-static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
+static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle, struct cpumask *mask)
 {
 	struct rq *this_rq = cpu_rq(this_cpu);
 	struct rq *rq;
@@ -5588,7 +5669,7 @@
 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
 		goto end;
 
-	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
+	for_each_cpu(balance_cpu, mask) {
 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
 			continue;
 
@@ -5634,10 +5715,10 @@
 	if (unlikely(idle_cpu(cpu)))
 		return 0;
 
-       /*
-	* We may be recently in ticked or tickless idle mode. At the first
-	* busy tick after returning from idle, we will update the busy stats.
-	*/
+	/*
+	 * We may be recently in ticked or tickless idle mode. At the first
+	 * busy tick after returning from idle, we will update the busy stats.
+	 */
 	set_cpu_sd_state_busy();
 	nohz_balance_exit_idle(cpu);
 
@@ -5680,7 +5761,7 @@
 	return 1;
 }
 #else
-static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle, struct cpumask *mask) { }
 #endif
 
 /*
@@ -5693,6 +5774,35 @@
 	struct rq *this_rq = cpu_rq(this_cpu);
 	enum cpu_idle_type idle = this_rq->idle_balance ?
 						CPU_IDLE : CPU_NOT_IDLE;
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	struct cpumask *nonshielded = __get_cpu_var(local_cpu_mask);
+
+	/*
+	 * if we encounter shadowded cpus here, don't do balance on them
+	 */
+	cpumask_copy(nonshielded, cpu_active_mask);
+
+	rcu_read_lock();
+	workload_consolidation_nonshielded_mask(this_cpu, nonshielded);
+	rcu_read_unlock();
+
+	/*
+	 * aggressively unload the shielded cpus to unshielded cpus
+	 */
+	workload_consolidation_unload(nonshielded);
+
+	if (cpumask_test_cpu(this_cpu, nonshielded)) {
+		rebalance_domains(this_cpu, idle);
+
+		/*
+		 * If this cpu has a pending nohz_balance_kick, then do the
+		 * balancing on behalf of the other idle cpus whose ticks are
+		 * stopped.
+		 */
+		cpumask_and(nonshielded, nonshielded, nohz.idle_cpus_mask);
+		nohz_idle_balance(this_cpu, idle, nonshielded);
+	}
+#else
 
 	rebalance_domains(this_cpu, idle);
 
@@ -5701,7 +5811,8 @@
 	 * balancing on behalf of the other idle cpus whose ticks are
 	 * stopped.
 	 */
-	nohz_idle_balance(this_cpu, idle);
+	nohz_idle_balance(this_cpu, idle, nohz.idle_cpus_mask);
+#endif
 }
 
 static inline int on_null_domain(int cpu)
@@ -5778,11 +5889,15 @@
 	cfs_rq = task_cfs_rq(current);
 	curr = cfs_rq->curr;
 
-	if (unlikely(task_cpu(p) != this_cpu)) {
-		rcu_read_lock();
-		__set_task_cpu(p, this_cpu);
-		rcu_read_unlock();
-	}
+	/*
+	 * Not only the cpu but also the task_group of the parent might have
+	 * been changed after parent->se.parent,cfs_rq were copied to
+	 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
+	 * of child point to valid ones.
+	 */
+	rcu_read_lock();
+	__set_task_cpu(p, this_cpu);
+	rcu_read_unlock();
 
 	update_curr(cfs_rq);
 
@@ -6186,6 +6301,14 @@
 __init void init_sched_fair_class(void)
 {
 #ifdef CONFIG_SMP
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	unsigned int i;
+	for_each_possible_cpu(i) {
+		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+					GFP_KERNEL, cpu_to_node(i));
+	}
+#endif
+
 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
 
 #ifdef CONFIG_NO_HZ_COMMON
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 127a2c4..a4e3533 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1244,6 +1244,9 @@
 	struct task_struct *curr;
 	struct rq *rq;
 	int cpu;
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	int do_find = 0;
+#endif
 
 	cpu = task_cpu(p);
 
@@ -1259,6 +1262,12 @@
 	rcu_read_lock();
 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	if (workload_consolidation_cpu_shielded(cpu) &&
+		(p->nr_cpus_allowed > 1))
+		do_find = 1;
+#endif
+
 	/*
 	 * If the current task on @p's runqueue is an RT task, then
 	 * try to see if we can wake this RT task up on another
@@ -1281,10 +1290,18 @@
 	 * This test is optimistic, if we get it wrong the load-balancer
 	 * will have to sort it out.
 	 */
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	if (do_find || (curr && unlikely(rt_task(curr)) &&
+	    (curr->nr_cpus_allowed < 2 ||
+	     curr->prio <= p->prio) &&
+	    (p->nr_cpus_allowed > 1)))
+#else
 	if (curr && unlikely(rt_task(curr)) &&
 	    (curr->nr_cpus_allowed < 2 ||
 	     curr->prio <= p->prio) &&
-	    (p->nr_cpus_allowed > 1)) {
+	    (p->nr_cpus_allowed > 1))
+#endif
+	{
 		int target = find_lowest_rq(p);
 
 		if (target != -1)
@@ -1491,6 +1508,12 @@
 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
 		return -1; /* No targets found */
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	workload_consolidation_nonshielded_mask(this_cpu, lowest_mask);
+	if (!cpumask_weight(lowest_mask))
+		return -1;
+#endif
+
 	/*
 	 * At this point we have built a mask of cpus representing the
 	 * lowest priority tasks in the system.  Now we want to elect
@@ -1718,6 +1741,11 @@
 	if (likely(!rt_overloaded(this_rq)))
 		return 0;
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	if (workload_consolidation_cpu_shielded(this_cpu))
+		return 0;
+#endif
+
 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
 		if (this_cpu == cpu)
 			continue;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ce39224d..6a03e9a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -387,6 +387,22 @@
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_CPU_CONCURRENCY
+struct cpu_concurrency_t {
+	u64 sum;
+	u64 sum_now;
+	u64 contrib;
+	u64 sum_timestamp;
+	u64 contrib_timestamp;
+	unsigned long nr_running;
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+	int unload;
+	int dst_cpu;
+	struct cpu_stop_work unload_work;
+#endif
+};
+#endif
+
 /*
  * This is the main, per-CPU runqueue data structure.
  *
@@ -521,6 +537,10 @@
 #endif
 
 	struct sched_avg avg;
+
+#ifdef CONFIG_CPU_CONCURRENCY
+	struct cpu_concurrency_t concurrency;
+#endif
 };
 
 static inline int cpu_of(struct rq *rq)
@@ -1058,6 +1078,22 @@
 extern void resched_task(struct task_struct *p);
 extern void resched_cpu(int cpu);
 
+#ifdef CONFIG_CPU_CONCURRENCY
+extern void init_cpu_concurrency(struct rq *rq);
+extern void update_cpu_concurrency(struct rq *rq);
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+extern int workload_consolidation_wakeup(int prev, int target);
+extern struct sched_group *
+workload_consolidation_find_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
+extern void workload_consolidation_unload(struct cpumask *nonshielded);
+extern int workload_consolidation_cpu_shielded(int cpu);
+extern void workload_consolidation_nonshielded_mask(int cpu, struct cpumask *mask);
+#endif
+#else
+static inline void init_cpu_concurrency(struct rq *rq) {}
+static inline void update_cpu_concurrency(struct rq *rq) {}
+#endif
+
 extern struct rt_bandwidth def_rt_bandwidth;
 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 4dba0f7..7e838e0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -12,6 +12,7 @@
 #include <linux/gfp.h>
 #include <linux/smp.h>
 #include <linux/cpu.h>
+#include <linux/nmi.h>
 
 #include "smpboot.h"
 
@@ -102,14 +103,26 @@
  */
 static void csd_lock_wait(struct call_single_data *csd)
 {
-	while (csd->flags & CSD_FLAG_LOCK)
+	unsigned long timeout = jiffies + 5 * HZ;	/* must be less than Soft & Hard lockup timeouts */
+
+	while (csd->flags & CSD_FLAG_LOCK) {
 		cpu_relax();
+
+		/* Dump useful info in case of deadlock */
+		if (time_after(jiffies, timeout)) {
+			timeout = jiffies + 5 * HZ;
+			pr_emerg("BUG: CPU %d waiting for CSD lock held by CPU %d\n", get_cpu(), csd->cpu);
+			dump_stack();
+			trigger_all_cpu_backtrace();
+		}
+	}
 }
 
-static void csd_lock(struct call_single_data *csd)
+static void csd_lock(int cpu, struct call_single_data *csd)
 {
 	csd_lock_wait(csd);
 	csd->flags |= CSD_FLAG_LOCK;
+	csd->cpu = cpu;
 
 	/*
 	 * prevent CPU from reordering the above assignment
@@ -129,6 +142,7 @@
 	smp_mb();
 
 	csd->flags &= ~CSD_FLAG_LOCK;
+	csd->cpu = -1;
 }
 
 /*
@@ -254,7 +268,7 @@
 			if (!wait)
 				csd = &__get_cpu_var(csd_data);
 
-			csd_lock(csd);
+			csd_lock(cpu, csd);
 
 			csd->func = func;
 			csd->info = info;
@@ -346,7 +360,7 @@
 		csd->func(csd->info);
 		local_irq_restore(flags);
 	} else {
-		csd_lock(csd);
+		csd_lock(cpu, csd);
 		generic_exec_single(cpu, csd, wait);
 	}
 	put_cpu();
@@ -423,7 +437,7 @@
 					&per_cpu(call_single_queue, cpu);
 		unsigned long flags;
 
-		csd_lock(csd);
+		csd_lock(cpu, csd);
 		csd->func = func;
 		csd->info = info;
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 3d6833f..787b3a0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -330,10 +330,19 @@
 
 static inline void invoke_softirq(void)
 {
-	if (!force_irqthreads)
-		__do_softirq();
-	else
+	if (!force_irqthreads) {
+		/*
+		 * We can safely execute softirq on the current stack if
+		 * it is the irq stack, because it should be near empty
+		 * at this stage. But we have no way to know if the arch
+		 * calls irq_exit() on the irq stack. So call softirq
+		 * in its own stack to prevent from any overrun on top
+		 * of a potentially deep task stack.
+		 */
+		do_softirq();
+	} else {
 		wakeup_softirqd();
+	}
 }
 
 static inline void tick_irq_exit(void)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2e81f5c..ea66172 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1065,6 +1065,22 @@
 		.proc_handler	= proc_dointvec,
 	},
 #endif
+#ifdef CONFIG_CPU_CONCURRENCY
+	{
+		.procname	= "concurrency_sum_period",
+		.data		= &sysctl_concurrency_sum_period,
+		.maxlen		= sizeof(sysctl_concurrency_sum_period),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "concurrency_decay_rate",
+		.data		= &sysctl_concurrency_decay_rate,
+		.maxlen		= sizeof(sysctl_concurrency_decay_rate),
+		.mode		= 0644,
+		.proc_handler	= concurrency_decay_rate_handler,
+	},
+#endif
 	{ }
 };
 
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 3e5cba2..ca63352 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -25,6 +25,8 @@
 #include <linux/posix-timers.h>
 #include <linux/workqueue.h>
 #include <linux/freezer.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
 
 /**
  * struct alarm_base - Alarm timer bases
@@ -47,6 +49,14 @@
 
 static struct wakeup_source *ws;
 
+static int alarm_reboot_callback(struct notifier_block *nfb,
+			unsigned long event, void *data);
+
+static struct notifier_block alarm_reboot_notifier_block = {
+	.notifier_call = alarm_reboot_callback,
+	.priority = 0,
+};
+
 #ifdef CONFIG_RTC_CLASS
 /* rtc timer and device for setting alarm wakeups at suspend */
 static struct rtc_timer		rtctimer;
@@ -270,11 +280,92 @@
 		__pm_wakeup_event(ws, MSEC_PER_SEC);
 	return ret;
 }
+
+/**
+ * alarmtimer_resume - Resume time callback
+ * @dev: unused
+ *
+ * We just waked up, no need of rtc timer anymore,
+ * so we can cancel it.
+ */
+static int alarmtimer_resume(struct device *dev)
+{
+	struct rtc_device *rtc;
+
+	rtc = alarmtimer_get_rtcdev();
+	/* If we have no rtcdev, just return */
+	if (!rtc)
+		return 0;
+
+	/* cancel rtc timer if pending */
+	rtc_timer_cancel(rtc, &rtctimer);
+
+	return 0;
+}
+
+static void write_rtc_wakeup(void)
+{
+	struct rtc_time tm;
+	ktime_t min, now;
+	unsigned long flags;
+	struct rtc_device *rtc;
+	struct alarm_base *base = &alarm_bases[ALARM_REALTIME_OFF];
+	struct timerqueue_node *next;
+	ktime_t delta;
+
+	spin_lock_irqsave(&freezer_delta_lock, flags);
+	min = freezer_delta;
+	freezer_delta = ktime_set(0, 0);
+	spin_unlock_irqrestore(&freezer_delta_lock, flags);
+
+	rtc = alarmtimer_get_rtcdev();
+	/* If we have no rtcdev, just return */
+	if (!rtc)
+		return;
+
+	/* Find the soonest timer to expire*/
+
+	spin_lock_irqsave(&base->lock, flags);
+	next = timerqueue_getnext(&base->timerqueue);
+	spin_unlock_irqrestore(&base->lock, flags);
+	if (!next) {
+		rtc_timer_cancel(rtc, &rtctimer);
+		return;
+	}
+
+	delta = ktime_sub(next->expires, base->gettime());
+	if (!min.tv64 || (delta.tv64 < min.tv64))
+		min = delta;
+
+	if (min.tv64 == 0)
+		return;
+
+	/* Setup an rtc timer to fire that far in the future */
+	rtc_timer_cancel(rtc, &rtctimer);
+	rtc_read_time(rtc, &tm);
+	now = rtc_tm_to_ktime(tm);
+	now = ktime_add(now, min);
+
+	/* Set alarm */
+	rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
+}
+
 #else
 static int alarmtimer_suspend(struct device *dev)
 {
 	return 0;
 }
+
+static int alarmtimer_resume(struct device *dev)
+{
+	return 0;
+}
+
+static void write_rtc_wakeup(void)
+{
+	return;
+}
+
 #endif
 
 static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
@@ -444,6 +535,8 @@
 		return ALARM_REALTIME;
 	if (clockid == CLOCK_BOOTTIME_ALARM)
 		return ALARM_BOOTTIME;
+	if (clockid == CLOCK_REALTIME_ALARM_OFF)
+		return ALARM_REALTIME_OFF;
 	return -1;
 }
 
@@ -767,8 +860,16 @@
 /* Suspend hook structures */
 static const struct dev_pm_ops alarmtimer_pm_ops = {
 	.suspend = alarmtimer_suspend,
+	.resume = alarmtimer_resume,
 };
 
+static int alarm_reboot_callback(struct notifier_block *nfb,
+				unsigned long event, void *data)
+{
+	write_rtc_wakeup();
+	return NOTIFY_OK;
+}
+
 static struct platform_driver alarmtimer_driver = {
 	.driver = {
 		.name = "alarmtimer",
@@ -801,12 +902,15 @@
 
 	posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
 	posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
+	posix_timers_register_clock(CLOCK_REALTIME_ALARM_OFF, &alarm_clock);
 
 	/* Initialize alarm bases */
 	alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
 	alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
 	alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
 	alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
+	alarm_bases[ALARM_REALTIME_OFF].base_clockid = CLOCK_REALTIME;
+	alarm_bases[ALARM_REALTIME_OFF].gettime = &ktime_get_real;
 	for (i = 0; i < ALARM_NUMTYPE; i++) {
 		timerqueue_init_head(&alarm_bases[i].timerqueue);
 		spin_lock_init(&alarm_bases[i].lock);
@@ -826,6 +930,9 @@
 		goto out_drv;
 	}
 	ws = wakeup_source_register("alarmtimer");
+
+	register_reboot_notifier(&alarm_reboot_notifier_block);
+
 	return 0;
 
 out_drv:
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c6d6400..6a23c6c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -30,6 +30,54 @@
 /* Protection for the above */
 static DEFINE_RAW_SPINLOCK(clockevents_lock);
 
+static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
+			bool ismax)
+{
+	u64 clc = (u64) latch << evt->shift;
+	u64 rnd;
+
+	if (unlikely(!evt->mult)) {
+		evt->mult = 1;
+		WARN_ON(1);
+	}
+	rnd = (u64) evt->mult - 1;
+
+	/*
+	 * Upper bound sanity check. If the backwards conversion is
+	 * not equal latch, we know that the above shift overflowed.
+	 */
+	if ((clc >> evt->shift) != (u64)latch)
+		clc = ~0ULL;
+
+	/*
+	 * Scaled math oddities:
+	 *
+	 * For mult <= (1 << shift) we can safely add mult - 1 to
+	 * prevent integer rounding loss. So the backwards conversion
+	 * from nsec to device ticks will be correct.
+	 *
+	 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
+	 * need to be careful. Adding mult - 1 will result in a value
+	 * which when converted back to device ticks can be larger
+	 * than latch by up to (mult - 1) >> shift. For the min_delta
+	 * calculation we still want to apply this in order to stay
+	 * above the minimum device ticks limit. For the upper limit
+	 * we would end up with a latch value larger than the upper
+	 * limit of the device, so we omit the add to stay below the
+	 * device upper boundary.
+	 *
+	 * Also omit the add if it would overflow the u64 boundary.
+	 */
+	if ((~0ULL - clc > rnd) &&
+	    (!ismax || evt->mult <= (1U << evt->shift)))
+		clc += rnd;
+
+	do_div(clc, evt->mult);
+
+	/* Deltas less than 1usec are pointless noise */
+	return clc > 1000 ? clc : 1000;
+}
+
 /**
  * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  * @latch:	value to convert
@@ -39,20 +87,7 @@
  */
 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
 {
-	u64 clc = (u64) latch << evt->shift;
-
-	if (unlikely(!evt->mult)) {
-		evt->mult = 1;
-		WARN_ON(1);
-	}
-
-	do_div(clc, evt->mult);
-	if (clc < 1000)
-		clc = 1000;
-	if (clc > KTIME_MAX)
-		clc = KTIME_MAX;
-
-	return clc;
+	return cev_delta2ns(latch, evt, false);
 }
 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
 
@@ -317,8 +352,8 @@
 		sec = 600;
 
 	clockevents_calc_mult_shift(dev, freq, sec);
-	dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
-	dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
+	dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
+	dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
 }
 
 /**
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8f5b3b9..bb22151 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -516,13 +516,13 @@
 	schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
 }
 
-static void notify_cmos_timer(void)
+void ntp_notify_cmos_timer(void)
 {
 	schedule_delayed_work(&sync_cmos_work, 0);
 }
 
 #else
-static inline void notify_cmos_timer(void) { }
+void ntp_notify_cmos_timer(void) { }
 #endif
 
 
@@ -687,8 +687,6 @@
 	if (!(time_status & STA_NANO))
 		txc->time.tv_usec /= NSEC_PER_USEC;
 
-	notify_cmos_timer();
-
 	return result;
 }
 
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 20d6fba..297b90b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -29,6 +29,7 @@
 
 static struct tick_device tick_broadcast_device;
 static cpumask_var_t tick_broadcast_mask;
+static cpumask_var_t tick_broadcast_on;
 static cpumask_var_t tmpmask;
 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
 static int tick_broadcast_force;
@@ -123,8 +124,9 @@
  */
 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
 {
+	struct clock_event_device *bc = tick_broadcast_device.evtdev;
 	unsigned long flags;
-	int ret = 0;
+	int ret;
 
 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
 
@@ -138,20 +140,59 @@
 		dev->event_handler = tick_handle_periodic;
 		tick_device_setup_broadcast_func(dev);
 		cpumask_set_cpu(cpu, tick_broadcast_mask);
-		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
+		tick_broadcast_start_periodic(bc);
 		ret = 1;
 	} else {
 		/*
-		 * When the new device is not affected by the stop
-		 * feature and the cpu is marked in the broadcast mask
-		 * then clear the broadcast bit.
+		 * Clear the broadcast bit for this cpu if the
+		 * device is not power state affected.
 		 */
-		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
-			int cpu = smp_processor_id();
+		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
 			cpumask_clear_cpu(cpu, tick_broadcast_mask);
-			tick_broadcast_clear_oneshot(cpu);
-		} else {
+		else
 			tick_device_setup_broadcast_func(dev);
+
+		/*
+		 * Clear the broadcast bit if the CPU is not in
+		 * periodic broadcast on state.
+		 */
+		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
+			cpumask_clear_cpu(cpu, tick_broadcast_mask);
+
+		switch (tick_broadcast_device.mode) {
+		case TICKDEV_MODE_ONESHOT:
+			/*
+			 * If the system is in oneshot mode we can
+			 * unconditionally clear the oneshot mask bit,
+			 * because the CPU is running and therefore
+			 * not in an idle state which causes the power
+			 * state affected device to stop. Let the
+			 * caller initialize the device.
+			 */
+			tick_broadcast_clear_oneshot(cpu);
+			ret = 0;
+			break;
+
+		case TICKDEV_MODE_PERIODIC:
+			/*
+			 * If the system is in periodic mode, check
+			 * whether the broadcast device can be
+			 * switched off now.
+			 */
+			if (cpumask_empty(tick_broadcast_mask) && bc)
+				clockevents_shutdown(bc);
+			/*
+			 * If we kept the cpu in the broadcast mask,
+			 * tell the caller to leave the per cpu device
+			 * in shutdown state. The periodic interrupt
+			 * is delivered by the broadcast device.
+			 */
+			ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
+			break;
+		default:
+			/* Nothing to do */
+			ret = 0;
+			break;
 		}
 	}
 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -281,6 +322,7 @@
 	switch (*reason) {
 	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
 	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
+		cpumask_set_cpu(cpu, tick_broadcast_on);
 		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
 			if (tick_broadcast_device.mode ==
 			    TICKDEV_MODE_PERIODIC)
@@ -290,8 +332,12 @@
 			tick_broadcast_force = 1;
 		break;
 	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
-		if (!tick_broadcast_force &&
-		    cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
+		if (tick_broadcast_force)
+			break;
+		cpumask_clear_cpu(cpu, tick_broadcast_on);
+		if (!tick_device_is_functional(dev))
+			break;
+		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
 			if (tick_broadcast_device.mode ==
 			    TICKDEV_MODE_PERIODIC)
 				tick_setup_periodic(dev, 0);
@@ -349,6 +395,7 @@
 
 	bc = tick_broadcast_device.evtdev;
 	cpumask_clear_cpu(cpu, tick_broadcast_mask);
+	cpumask_clear_cpu(cpu, tick_broadcast_on);
 
 	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
 		if (bc && cpumask_empty(tick_broadcast_mask))
@@ -475,7 +522,15 @@
 	if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
 		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
 
-		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
+		/*
+		 * We might be in the middle of switching over from
+		 * periodic to oneshot. If the CPU has not yet
+		 * switched over, leave the device alone.
+		 */
+		if (td->mode == TICKDEV_MODE_ONESHOT) {
+			clockevents_set_mode(td->evtdev,
+					     CLOCK_EVT_MODE_ONESHOT);
+		}
 	}
 }
 
@@ -792,6 +847,7 @@
 void __init tick_broadcast_init(void)
 {
 	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
+	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
 	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
 #ifdef CONFIG_TICK_ONESHOT
 	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 5d3fb10..7ce5e5a 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -194,7 +194,8 @@
 	 * When global broadcasting is active, check if the current
 	 * device is registered as a placeholder for broadcast mode.
 	 * This allows us to handle this x86 misfeature in a generic
-	 * way.
+	 * way. This function also returns !=0 when we keep the
+	 * current active broadcast state for this CPU.
 	 */
 	if (tick_device_uses_broadcast(newdev, cpu))
 		return;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index baeeb5c8..fcc261c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1682,6 +1682,8 @@
 	write_seqcount_end(&timekeeper_seq);
 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
+	ntp_notify_cmos_timer();
+
 	return ret;
 }
 
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3bdf283..61ed862 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -265,10 +265,9 @@
 static int timer_list_show(struct seq_file *m, void *v)
 {
 	struct timer_list_iter *iter = v;
-	u64 now = ktime_to_ns(ktime_get());
 
 	if (iter->cpu == -1 && !iter->second_pass)
-		timer_list_header(m, now);
+		timer_list_header(m, iter->now);
 	else if (!iter->second_pass)
 		print_cpu(m, iter->cpu, iter->now);
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -298,33 +297,41 @@
 	return;
 }
 
+static void *move_iter(struct timer_list_iter *iter, loff_t offset)
+{
+	for (; offset; offset--) {
+		iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
+		if (iter->cpu >= nr_cpu_ids) {
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+			if (!iter->second_pass) {
+				iter->cpu = -1;
+				iter->second_pass = true;
+			} else
+				return NULL;
+#else
+			return NULL;
+#endif
+		}
+	}
+	return iter;
+}
+
 static void *timer_list_start(struct seq_file *file, loff_t *offset)
 {
 	struct timer_list_iter *iter = file->private;
 
-	if (!*offset) {
-		iter->cpu = -1;
+	if (!*offset)
 		iter->now = ktime_to_ns(ktime_get());
-	} else if (iter->cpu >= nr_cpu_ids) {
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
-		if (!iter->second_pass) {
-			iter->cpu = -1;
-			iter->second_pass = true;
-		} else
-			return NULL;
-#else
-		return NULL;
-#endif
-	}
-	return iter;
+	iter->cpu = -1;
+	iter->second_pass = false;
+	return move_iter(iter, *offset);
 }
 
 static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
 {
 	struct timer_list_iter *iter = file->private;
-	iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
 	++*offset;
-	return timer_list_start(file, offset);
+	return move_iter(iter, 1);
 }
 
 static void timer_list_stop(struct seq_file *seq, void *v)
diff --git a/kernel/timer.c b/kernel/timer.c
index 15ffdb3..15bc1b4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -149,9 +149,11 @@
 	/* now that we have rounded, subtract the extra skew again */
 	j -= cpu * 3;
 
-	if (j <= jiffies) /* rounding ate our timeout entirely; */
-		return original;
-	return j;
+	/*
+	 * Make sure j is still in the future. Otherwise return the
+	 * unmodified value.
+	 */
+	return time_is_after_jiffies(j) ? j : original;
 }
 
 /**
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6c508ff..f23449d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1416,12 +1416,22 @@
  * the hashes are freed with call_rcu_sched().
  */
 static int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 {
 	struct ftrace_hash *filter_hash;
 	struct ftrace_hash *notrace_hash;
 	int ret;
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+	/*
+	 * There's a small race when adding ops that the ftrace handler
+	 * that wants regs, may be called without them. We can not
+	 * allow that handler to be called if regs is NULL.
+	 */
+	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
+		return 0;
+#endif
+
 	filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
 	notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
 
@@ -2134,12 +2144,57 @@
 static unsigned long	ftrace_update_cnt;
 unsigned long		ftrace_update_tot_cnt;
 
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
-	struct ftrace_hash *hash;
+	/*
+	 * Filter_hash being empty will default to trace module.
+	 * But notrace hash requires a test of individual module functions.
+	 */
+	return ftrace_hash_empty(ops->filter_hash) &&
+		ftrace_hash_empty(ops->notrace_hash);
+}
 
-	hash = ops->filter_hash;
-	return ftrace_hash_empty(hash);
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+	/* If ops isn't enabled, ignore it */
+	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+		return 0;
+
+	/* If ops traces all mods, we already accounted for it */
+	if (ops_traces_mod(ops))
+		return 0;
+
+	/* The function must be in the filter */
+	if (!ftrace_hash_empty(ops->filter_hash) &&
+	    !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+		return 0;
+
+	/* If in notrace hash, we ignore it too */
+	if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+		return 0;
+
+	return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+	struct ftrace_ops *ops;
+	int cnt = 0;
+
+	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+		if (ops_references_rec(ops, rec))
+		    cnt++;
+	}
+
+	return cnt;
 }
 
 static int ftrace_update_code(struct module *mod)
@@ -2148,6 +2203,7 @@
 	struct dyn_ftrace *p;
 	cycle_t start, stop;
 	unsigned long ref = 0;
+	bool test = false;
 	int i;
 
 	/*
@@ -2161,9 +2217,12 @@
 
 		for (ops = ftrace_ops_list;
 		     ops != &ftrace_list_end; ops = ops->next) {
-			if (ops->flags & FTRACE_OPS_FL_ENABLED &&
-			    ops_traces_mod(ops))
-				ref++;
+			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+				if (ops_traces_mod(ops))
+					ref++;
+				else
+					test = true;
+			}
 		}
 	}
 
@@ -2173,12 +2232,16 @@
 	for (pg = ftrace_new_pgs; pg; pg = pg->next) {
 
 		for (i = 0; i < pg->index; i++) {
+			int cnt = ref;
+
 			/* If something went wrong, bail without enabling anything */
 			if (unlikely(ftrace_disabled))
 				return -1;
 
 			p = &pg->records[i];
-			p->flags = ref;
+			if (test)
+				cnt += referenced_filters(p);
+			p->flags = cnt;
 
 			/*
 			 * Do the initial record conversion from mcount jump
@@ -2198,7 +2261,7 @@
 			 * conversion puts the module to the correct state, thus
 			 * passing the ftrace_make_call check.
 			 */
-			if (ftrace_start_up && ref) {
+			if (ftrace_start_up && cnt) {
 				int failed = __ftrace_replace_code(p, 1);
 				if (failed)
 					ftrace_bug(failed, p->ip);
@@ -4188,7 +4251,7 @@
 # define ftrace_shutdown_sysctl()	do { } while (0)
 
 static inline int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 {
 	return 1;
 }
@@ -4211,7 +4274,7 @@
 	do_for_each_ftrace_op(op, ftrace_control_list) {
 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
 		    !ftrace_function_local_disabled(op) &&
-		    ftrace_ops_test(op, ip))
+		    ftrace_ops_test(op, ip, regs))
 			op->func(ip, parent_ip, op, regs);
 	} while_for_each_ftrace_op(op);
 	trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4244,7 +4307,7 @@
 	 */
 	preempt_disable_notrace();
 	do_for_each_ftrace_op(op, ftrace_ops_list) {
-		if (ftrace_ops_test(op, ip))
+		if (ftrace_ops_test(op, ip, regs))
 			op->func(ip, parent_ip, op, regs);
 	} while_for_each_ftrace_op(op);
 	preempt_enable_notrace();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 28f007a..b49eb7e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -40,6 +40,9 @@
 #include <linux/fs.h>
 #include <linux/sched/rt.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/marker_event.h>
+
 #include "trace.h"
 #include "trace_output.h"
 
@@ -193,6 +196,37 @@
 
 LIST_HEAD(ftrace_trace_arrays);
 
+int trace_array_get(struct trace_array *this_tr)
+{
+	struct trace_array *tr;
+	int ret = -ENODEV;
+
+	mutex_lock(&trace_types_lock);
+	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+		if (tr == this_tr) {
+			tr->ref++;
+			ret = 0;
+			break;
+		}
+	}
+	mutex_unlock(&trace_types_lock);
+
+	return ret;
+}
+
+static void __trace_array_put(struct trace_array *this_tr)
+{
+	WARN_ON(!this_tr->ref);
+	this_tr->ref--;
+}
+
+void trace_array_put(struct trace_array *this_tr)
+{
+	mutex_lock(&trace_types_lock);
+	__trace_array_put(this_tr);
+	mutex_unlock(&trace_types_lock);
+}
+
 int filter_current_check_discard(struct ring_buffer *buffer,
 				 struct ftrace_event_call *call, void *rec,
 				 struct ring_buffer_event *event)
@@ -201,23 +235,43 @@
 }
 EXPORT_SYMBOL_GPL(filter_current_check_discard);
 
-cycle_t ftrace_now(int cpu)
+cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 {
 	u64 ts;
 
 	/* Early boot up does not have a buffer yet */
-	if (!global_trace.trace_buffer.buffer)
+	if (!buf->buffer)
 		return trace_clock_local();
 
-	ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
-	ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
+	ts = ring_buffer_time_stamp(buf->buffer, cpu);
+	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 
 	return ts;
 }
 
+cycle_t ftrace_now(int cpu)
+{
+	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+}
+
+/**
+ * tracing_is_enabled - Show if global_trace has been disabled
+ *
+ * Shows if the global trace has been enabled or not. It uses the
+ * mirror flag "buffer_disabled" to be used in fast paths such as for
+ * the irqsoff tracer. But it may be inaccurate due to races. If you
+ * need to know the accurate state, use tracing_is_on() which is a little
+ * slower, but accurate.
+ */
 int tracing_is_enabled(void)
 {
-	return tracing_is_on();
+	/*
+	 * For quick access (irqsoff uses this in fast path), just
+	 * return the mirror variable of the state of the ring buffer.
+	 * It's a little racy, but we don't really care.
+	 */
+	smp_rmb();
+	return !global_trace.buffer_disabled;
 }
 
 /*
@@ -240,7 +294,7 @@
 /*
  * trace_types_lock is used to protect the trace_types list.
  */
-static DEFINE_MUTEX(trace_types_lock);
+DEFINE_MUTEX(trace_types_lock);
 
 /*
  * serialize the access of the ring buffer
@@ -330,6 +384,23 @@
 	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
 	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
 
+void tracer_tracing_on(struct trace_array *tr)
+{
+	if (tr->trace_buffer.buffer)
+		ring_buffer_record_on(tr->trace_buffer.buffer);
+	/*
+	 * This flag is looked at when buffers haven't been allocated
+	 * yet, or by some tracers (like irqsoff), that just want to
+	 * know if the ring buffer has been disabled, but it can handle
+	 * races of where it gets disabled but we still do a record.
+	 * As the check is in the fast path of the tracers, it is more
+	 * important to be fast than accurate.
+	 */
+	tr->buffer_disabled = 0;
+	/* Make the flag seen by readers */
+	smp_wmb();
+}
+
 /**
  * tracing_on - enable tracing buffers
  *
@@ -338,15 +409,7 @@
  */
 void tracing_on(void)
 {
-	if (global_trace.trace_buffer.buffer)
-		ring_buffer_record_on(global_trace.trace_buffer.buffer);
-	/*
-	 * This flag is only looked at when buffers haven't been
-	 * allocated yet. We don't really care about the race
-	 * between setting this flag and actually turning
-	 * on the buffer.
-	 */
-	global_trace.buffer_disabled = 0;
+	tracer_tracing_on(&global_trace);
 }
 EXPORT_SYMBOL_GPL(tracing_on);
 
@@ -540,6 +603,23 @@
 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
+void tracer_tracing_off(struct trace_array *tr)
+{
+	if (tr->trace_buffer.buffer)
+		ring_buffer_record_off(tr->trace_buffer.buffer);
+	/*
+	 * This flag is looked at when buffers haven't been allocated
+	 * yet, or by some tracers (like irqsoff), that just want to
+	 * know if the ring buffer has been disabled, but it can handle
+	 * races of where it gets disabled but we still do a record.
+	 * As the check is in the fast path of the tracers, it is more
+	 * important to be fast than accurate.
+	 */
+	tr->buffer_disabled = 1;
+	/* Make the flag seen by readers */
+	smp_wmb();
+}
+
 /**
  * tracing_off - turn off tracing buffers
  *
@@ -550,26 +630,29 @@
  */
 void tracing_off(void)
 {
-	if (global_trace.trace_buffer.buffer)
-		ring_buffer_record_off(global_trace.trace_buffer.buffer);
-	/*
-	 * This flag is only looked at when buffers haven't been
-	 * allocated yet. We don't really care about the race
-	 * between setting this flag and actually turning
-	 * on the buffer.
-	 */
-	global_trace.buffer_disabled = 1;
+	tracer_tracing_off(&global_trace);
 }
 EXPORT_SYMBOL_GPL(tracing_off);
 
 /**
+ * tracer_tracing_is_on - show real state of ring buffer enabled
+ * @tr : the trace array to know if ring buffer is enabled
+ *
+ * Shows real state of the ring buffer if it is enabled or not.
+ */
+int tracer_tracing_is_on(struct trace_array *tr)
+{
+	if (tr->trace_buffer.buffer)
+		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
+	return !tr->buffer_disabled;
+}
+
+/**
  * tracing_is_on - show state of ring buffers enabled
  */
 int tracing_is_on(void)
 {
-	if (global_trace.trace_buffer.buffer)
-		return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
-	return !global_trace.buffer_disabled;
+	return tracer_tracing_is_on(&global_trace);
 }
 EXPORT_SYMBOL_GPL(tracing_is_on);
 
@@ -747,9 +830,12 @@
 	if (isspace(ch)) {
 		parser->buffer[parser->idx] = 0;
 		parser->cont = false;
-	} else {
+	} else if (parser->idx < parser->size - 1) {
 		parser->cont = true;
 		parser->buffer[parser->idx++] = ch;
+	} else {
+		ret = -EINVAL;
+		goto out;
 	}
 
 	*ppos += read;
@@ -1120,7 +1206,7 @@
 	/* Make sure all commits have finished */
 	synchronize_sched();
 
-	buf->time_start = ftrace_now(buf->cpu);
+	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
 
 	for_each_online_cpu(cpu)
 		ring_buffer_reset_cpu(buffer, cpu);
@@ -1128,23 +1214,17 @@
 	ring_buffer_record_enable(buffer);
 }
 
-void tracing_reset_current(int cpu)
-{
-	tracing_reset(&global_trace.trace_buffer, cpu);
-}
-
+/* Must have trace_types_lock held */
 void tracing_reset_all_online_cpus(void)
 {
 	struct trace_array *tr;
 
-	mutex_lock(&trace_types_lock);
 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 		tracing_reset_online_cpus(&tr->trace_buffer);
 #ifdef CONFIG_TRACER_MAX_TRACE
 		tracing_reset_online_cpus(&tr->max_buffer);
 #endif
 	}
-	mutex_unlock(&trace_types_lock);
 }
 
 #define SAVED_CMDLINES 128
@@ -2807,6 +2887,17 @@
 	return 0;
 }
 
+/*
+ * Should be used after trace_array_get(), trace_types_lock
+ * ensures that i_cdev was already initialized.
+ */
+static inline int tracing_get_cpu(struct inode *inode)
+{
+	if (inode->i_cdev) /* See trace_create_cpu_file() */
+		return (long)inode->i_cdev - 1;
+	return RING_BUFFER_ALL_CPUS;
+}
+
 static const struct seq_operations tracer_seq_ops = {
 	.start		= s_start,
 	.next		= s_next,
@@ -2817,8 +2908,7 @@
 static struct trace_iterator *
 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
 {
-	struct trace_cpu *tc = inode->i_private;
-	struct trace_array *tr = tc->tr;
+	struct trace_array *tr = inode->i_private;
 	struct trace_iterator *iter;
 	int cpu;
 
@@ -2859,8 +2949,8 @@
 		iter->trace_buffer = &tr->trace_buffer;
 	iter->snapshot = snapshot;
 	iter->pos = -1;
+	iter->cpu_file = tracing_get_cpu(inode);
 	mutex_init(&iter->mutex);
-	iter->cpu_file = tc->cpu;
 
 	/* Notify the tracer early; before we stop tracing. */
 	if (iter->trace && iter->trace->open)
@@ -2897,8 +2987,6 @@
 		tracing_iter_reset(iter, cpu);
 	}
 
-	tr->ref++;
-
 	mutex_unlock(&trace_types_lock);
 
 	return iter;
@@ -2921,24 +3009,41 @@
 	return 0;
 }
 
+/*
+ * Open and update trace_array ref count.
+ * Must have the current trace_array passed to it.
+ */
+int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+{
+	struct trace_array *tr = inode->i_private;
+
+	if (tracing_disabled)
+		return -ENODEV;
+
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
+	filp->private_data = inode->i_private;
+
+	return 0;
+}
+
 static int tracing_release(struct inode *inode, struct file *file)
 {
+	struct trace_array *tr = inode->i_private;
 	struct seq_file *m = file->private_data;
 	struct trace_iterator *iter;
-	struct trace_array *tr;
 	int cpu;
 
-	if (!(file->f_mode & FMODE_READ))
+	if (!(file->f_mode & FMODE_READ)) {
+		trace_array_put(tr);
 		return 0;
+	}
 
+	/* Writes do not use seq_file */
 	iter = m->private;
-	tr = iter->tr;
-
 	mutex_lock(&trace_types_lock);
 
-	WARN_ON(!tr->ref);
-	tr->ref--;
-
 	for_each_tracing_cpu(cpu) {
 		if (iter->buffer_iter[cpu])
 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
@@ -2950,6 +3055,9 @@
 	if (!iter->snapshot)
 		/* reenable tracing if it was previously enabled */
 		tracing_start_tr(tr);
+
+	__trace_array_put(tr);
+
 	mutex_unlock(&trace_types_lock);
 
 	mutex_destroy(&iter->mutex);
@@ -2957,24 +3065,44 @@
 	kfree(iter->trace);
 	kfree(iter->buffer_iter);
 	seq_release_private(inode, file);
+
 	return 0;
 }
 
+static int tracing_release_generic_tr(struct inode *inode, struct file *file)
+{
+	struct trace_array *tr = inode->i_private;
+
+	trace_array_put(tr);
+	return 0;
+}
+
+static int tracing_single_release_tr(struct inode *inode, struct file *file)
+{
+	struct trace_array *tr = inode->i_private;
+
+	trace_array_put(tr);
+
+	return single_release(inode, file);
+}
+
 static int tracing_open(struct inode *inode, struct file *file)
 {
+	struct trace_array *tr = inode->i_private;
 	struct trace_iterator *iter;
 	int ret = 0;
 
-	/* If this file was open for write, then erase contents */
-	if ((file->f_mode & FMODE_WRITE) &&
-	    (file->f_flags & O_TRUNC)) {
-		struct trace_cpu *tc = inode->i_private;
-		struct trace_array *tr = tc->tr;
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
 
-		if (tc->cpu == RING_BUFFER_ALL_CPUS)
+	/* If this file was open for write, then erase contents */
+	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+		int cpu = tracing_get_cpu(inode);
+
+		if (cpu == RING_BUFFER_ALL_CPUS)
 			tracing_reset_online_cpus(&tr->trace_buffer);
 		else
-			tracing_reset(&tr->trace_buffer, tc->cpu);
+			tracing_reset(&tr->trace_buffer, cpu);
 	}
 
 	if (file->f_mode & FMODE_READ) {
@@ -2984,6 +3112,10 @@
 		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
 	}
+
+	if (ret < 0)
+		trace_array_put(tr);
+
 	return ret;
 }
 
@@ -3340,17 +3472,27 @@
 
 static int tracing_trace_options_open(struct inode *inode, struct file *file)
 {
+	struct trace_array *tr = inode->i_private;
+	int ret;
+
 	if (tracing_disabled)
 		return -ENODEV;
 
-	return single_open(file, tracing_trace_options_show, inode->i_private);
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
+	ret = single_open(file, tracing_trace_options_show, inode->i_private);
+	if (ret < 0)
+		trace_array_put(tr);
+
+	return ret;
 }
 
 static const struct file_operations tracing_iter_fops = {
 	.open		= tracing_trace_options_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
-	.release	= single_release,
+	.release	= tracing_single_release_tr,
 	.write		= tracing_trace_options_write,
 };
 
@@ -3874,20 +4016,23 @@
 
 static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
-	struct trace_cpu *tc = inode->i_private;
-	struct trace_array *tr = tc->tr;
+	struct trace_array *tr = inode->i_private;
 	struct trace_iterator *iter;
 	int ret = 0;
 
 	if (tracing_disabled)
 		return -ENODEV;
 
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	mutex_lock(&trace_types_lock);
 
 	/* create a buffer to store the information to pass to userspace */
 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 	if (!iter) {
 		ret = -ENOMEM;
+		__trace_array_put(tr);
 		goto out;
 	}
 
@@ -3917,9 +4062,9 @@
 	if (trace_clocks[tr->clock_id].in_ns)
 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
-	iter->cpu_file = tc->cpu;
-	iter->tr = tc->tr;
-	iter->trace_buffer = &tc->tr->trace_buffer;
+	iter->tr = tr;
+	iter->trace_buffer = &tr->trace_buffer;
+	iter->cpu_file = tracing_get_cpu(inode);
 	mutex_init(&iter->mutex);
 	filp->private_data = iter;
 
@@ -3934,6 +4079,7 @@
 fail:
 	kfree(iter->trace);
 	kfree(iter);
+	__trace_array_put(tr);
 	mutex_unlock(&trace_types_lock);
 	return ret;
 }
@@ -3941,6 +4087,7 @@
 static int tracing_release_pipe(struct inode *inode, struct file *file)
 {
 	struct trace_iterator *iter = file->private_data;
+	struct trace_array *tr = inode->i_private;
 
 	mutex_lock(&trace_types_lock);
 
@@ -3954,6 +4101,8 @@
 	kfree(iter->trace);
 	kfree(iter);
 
+	trace_array_put(tr);
+
 	return 0;
 }
 
@@ -4030,7 +4179,7 @@
 		 *
 		 * iter->pos will be 0 if we haven't read anything.
 		 */
-		if (!tracing_is_enabled() && iter->pos)
+		if (!tracing_is_on() && iter->pos)
 			break;
 	}
 
@@ -4091,6 +4240,7 @@
 	memset(&iter->seq, 0,
 	       sizeof(struct trace_iterator) -
 	       offsetof(struct trace_iterator, seq));
+	cpumask_clear(iter->started);
 	iter->pos = -1;
 
 	trace_event_read_lock();
@@ -4291,15 +4441,16 @@
 tracing_entries_read(struct file *filp, char __user *ubuf,
 		     size_t cnt, loff_t *ppos)
 {
-	struct trace_cpu *tc = filp->private_data;
-	struct trace_array *tr = tc->tr;
+	struct inode *inode = file_inode(filp);
+	struct trace_array *tr = inode->i_private;
+	int cpu = tracing_get_cpu(inode);
 	char buf[64];
 	int r = 0;
 	ssize_t ret;
 
 	mutex_lock(&trace_types_lock);
 
-	if (tc->cpu == RING_BUFFER_ALL_CPUS) {
+	if (cpu == RING_BUFFER_ALL_CPUS) {
 		int cpu, buf_size_same;
 		unsigned long size;
 
@@ -4326,7 +4477,7 @@
 		} else
 			r = sprintf(buf, "X\n");
 	} else
-		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
+		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
 
 	mutex_unlock(&trace_types_lock);
 
@@ -4338,7 +4489,8 @@
 tracing_entries_write(struct file *filp, const char __user *ubuf,
 		      size_t cnt, loff_t *ppos)
 {
-	struct trace_cpu *tc = filp->private_data;
+	struct inode *inode = file_inode(filp);
+	struct trace_array *tr = inode->i_private;
 	unsigned long val;
 	int ret;
 
@@ -4352,8 +4504,7 @@
 
 	/* value is in KB */
 	val <<= 10;
-
-	ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
+	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
 	if (ret < 0)
 		return ret;
 
@@ -4407,10 +4558,12 @@
 
 	/* disable tracing ? */
 	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
-		tracing_off();
+		tracer_tracing_off(tr);
 	/* resize the ring buffer to 0 */
 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
 
+	trace_array_put(tr);
+
 	return 0;
 }
 
@@ -4419,6 +4572,7 @@
 					size_t cnt, loff_t *fpos)
 {
 	unsigned long addr = (unsigned long)ubuf;
+	struct trace_array *tr = filp->private_data;
 	struct ring_buffer_event *event;
 	struct ring_buffer *buffer;
 	struct print_entry *entry;
@@ -4456,6 +4610,9 @@
 	 * pages directly. We then write the data directly into the
 	 * ring buffer.
 	 */
+
+	trace_marker_event(ubuf, cnt);
+
 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
 	/* check if we cross pages */
@@ -4478,7 +4635,7 @@
 
 	local_save_flags(irq_flags);
 	size = sizeof(*entry) + cnt + 2; /* possible \n added */
-	buffer = global_trace.trace_buffer.buffer;
+	buffer = tr->trace_buffer.buffer;
 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 					  irq_flags, preempt_count());
 	if (!event) {
@@ -4569,12 +4726,12 @@
 	 * New clock may not be consistent with the previous clock.
 	 * Reset the buffer so that it doesn't have incomparable timestamps.
 	 */
-	tracing_reset_online_cpus(&global_trace.trace_buffer);
+	tracing_reset_online_cpus(&tr->trace_buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
-	tracing_reset_online_cpus(&global_trace.max_buffer);
+	tracing_reset_online_cpus(&tr->max_buffer);
 #endif
 
 	mutex_unlock(&trace_types_lock);
@@ -4586,10 +4743,20 @@
 
 static int tracing_clock_open(struct inode *inode, struct file *file)
 {
+	struct trace_array *tr = inode->i_private;
+	int ret;
+
 	if (tracing_disabled)
 		return -ENODEV;
 
-	return single_open(file, tracing_clock_show, inode->i_private);
+	if (trace_array_get(tr))
+		return -ENODEV;
+
+	ret = single_open(file, tracing_clock_show, inode->i_private);
+	if (ret < 0)
+		trace_array_put(tr);
+
+	return ret;
 }
 
 struct ftrace_buffer_info {
@@ -4601,31 +4768,40 @@
 #ifdef CONFIG_TRACER_SNAPSHOT
 static int tracing_snapshot_open(struct inode *inode, struct file *file)
 {
-	struct trace_cpu *tc = inode->i_private;
+	struct trace_array *tr = inode->i_private;
 	struct trace_iterator *iter;
 	struct seq_file *m;
 	int ret = 0;
 
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	if (file->f_mode & FMODE_READ) {
 		iter = __tracing_open(inode, file, true);
 		if (IS_ERR(iter))
 			ret = PTR_ERR(iter);
 	} else {
 		/* Writes still need the seq_file to hold the private data */
+		ret = -ENOMEM;
 		m = kzalloc(sizeof(*m), GFP_KERNEL);
 		if (!m)
-			return -ENOMEM;
+			goto out;
 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 		if (!iter) {
 			kfree(m);
-			return -ENOMEM;
+			goto out;
 		}
-		iter->tr = tc->tr;
-		iter->trace_buffer = &tc->tr->max_buffer;
-		iter->cpu_file = tc->cpu;
+		ret = 0;
+
+		iter->tr = tr;
+		iter->trace_buffer = &tr->max_buffer;
+		iter->cpu_file = tracing_get_cpu(inode);
 		m->private = iter;
 		file->private_data = m;
 	}
+out:
+	if (ret < 0)
+		trace_array_put(tr);
 
 	return ret;
 }
@@ -4707,9 +4883,12 @@
 static int tracing_snapshot_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *m = file->private_data;
+	int ret;
+
+	ret = tracing_release(inode, file);
 
 	if (file->f_mode & FMODE_READ)
-		return tracing_release(inode, file);
+		return ret;
 
 	/* If write only, the seq_file is just a stub */
 	if (m)
@@ -4775,34 +4954,38 @@
 };
 
 static const struct file_operations tracing_entries_fops = {
-	.open		= tracing_open_generic,
+	.open		= tracing_open_generic_tr,
 	.read		= tracing_entries_read,
 	.write		= tracing_entries_write,
 	.llseek		= generic_file_llseek,
+	.release	= tracing_release_generic_tr,
 };
 
 static const struct file_operations tracing_total_entries_fops = {
-	.open		= tracing_open_generic,
+	.open		= tracing_open_generic_tr,
 	.read		= tracing_total_entries_read,
 	.llseek		= generic_file_llseek,
+	.release	= tracing_release_generic_tr,
 };
 
 static const struct file_operations tracing_free_buffer_fops = {
+	.open		= tracing_open_generic_tr,
 	.write		= tracing_free_buffer_write,
 	.release	= tracing_free_buffer_release,
 };
 
 static const struct file_operations tracing_mark_fops = {
-	.open		= tracing_open_generic,
+	.open		= tracing_open_generic_tr,
 	.write		= tracing_mark_write,
 	.llseek		= generic_file_llseek,
+	.release	= tracing_release_generic_tr,
 };
 
 static const struct file_operations trace_clock_fops = {
 	.open		= tracing_clock_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
-	.release	= single_release,
+	.release	= tracing_single_release_tr,
 	.write		= tracing_clock_write,
 };
 
@@ -4827,23 +5010,26 @@
 
 static int tracing_buffers_open(struct inode *inode, struct file *filp)
 {
-	struct trace_cpu *tc = inode->i_private;
-	struct trace_array *tr = tc->tr;
+	struct trace_array *tr = inode->i_private;
 	struct ftrace_buffer_info *info;
+	int ret;
 
 	if (tracing_disabled)
 		return -ENODEV;
 
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
+	if (!info) {
+		trace_array_put(tr);
 		return -ENOMEM;
+	}
 
 	mutex_lock(&trace_types_lock);
 
-	tr->ref++;
-
 	info->iter.tr		= tr;
-	info->iter.cpu_file	= tc->cpu;
+	info->iter.cpu_file	= tracing_get_cpu(inode);
 	info->iter.trace	= tr->current_trace;
 	info->iter.trace_buffer = &tr->trace_buffer;
 	info->spare		= NULL;
@@ -4854,7 +5040,11 @@
 
 	mutex_unlock(&trace_types_lock);
 
-	return nonseekable_open(inode, filp);
+	ret = nonseekable_open(inode, filp);
+	if (ret < 0)
+		trace_array_put(tr);
+
+	return ret;
 }
 
 static unsigned int
@@ -4954,8 +5144,7 @@
 
 	mutex_lock(&trace_types_lock);
 
-	WARN_ON(!iter->tr->ref);
-	iter->tr->ref--;
+	__trace_array_put(iter->tr);
 
 	if (info->spare)
 		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
@@ -5157,14 +5346,14 @@
 tracing_stats_read(struct file *filp, char __user *ubuf,
 		   size_t count, loff_t *ppos)
 {
-	struct trace_cpu *tc = filp->private_data;
-	struct trace_array *tr = tc->tr;
+	struct inode *inode = file_inode(filp);
+	struct trace_array *tr = inode->i_private;
 	struct trace_buffer *trace_buf = &tr->trace_buffer;
+	int cpu = tracing_get_cpu(inode);
 	struct trace_seq *s;
 	unsigned long cnt;
 	unsigned long long t;
 	unsigned long usec_rem;
-	int cpu = tc->cpu;
 
 	s = kmalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
@@ -5217,9 +5406,10 @@
 }
 
 static const struct file_operations tracing_stats_fops = {
-	.open		= tracing_open_generic,
+	.open		= tracing_open_generic_tr,
 	.read		= tracing_stats_read,
 	.llseek		= generic_file_llseek,
+	.release	= tracing_release_generic_tr,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -5408,10 +5598,20 @@
 	return tr->percpu_dir;
 }
 
+static struct dentry *
+trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
+		      void *data, long cpu, const struct file_operations *fops)
+{
+	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
+
+	if (ret) /* See tracing_get_cpu() */
+		ret->d_inode->i_cdev = (void *)(cpu + 1);
+	return ret;
+}
+
 static void
 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
 {
-	struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
 	struct dentry *d_cpu;
 	char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5427,28 +5627,28 @@
 	}
 
 	/* per cpu trace_pipe */
-	trace_create_file("trace_pipe", 0444, d_cpu,
-			(void *)&data->trace_cpu, &tracing_pipe_fops);
+	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
+				tr, cpu, &tracing_pipe_fops);
 
 	/* per cpu trace */
-	trace_create_file("trace", 0644, d_cpu,
-			(void *)&data->trace_cpu, &tracing_fops);
+	trace_create_cpu_file("trace", 0644, d_cpu,
+				tr, cpu, &tracing_fops);
 
-	trace_create_file("trace_pipe_raw", 0444, d_cpu,
-			(void *)&data->trace_cpu, &tracing_buffers_fops);
+	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
+				tr, cpu, &tracing_buffers_fops);
 
-	trace_create_file("stats", 0444, d_cpu,
-			(void *)&data->trace_cpu, &tracing_stats_fops);
+	trace_create_cpu_file("stats", 0444, d_cpu,
+				tr, cpu, &tracing_stats_fops);
 
-	trace_create_file("buffer_size_kb", 0444, d_cpu,
-			(void *)&data->trace_cpu, &tracing_entries_fops);
+	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
+				tr, cpu, &tracing_entries_fops);
 
 #ifdef CONFIG_TRACER_SNAPSHOT
-	trace_create_file("snapshot", 0644, d_cpu,
-			  (void *)&data->trace_cpu, &snapshot_fops);
+	trace_create_cpu_file("snapshot", 0644, d_cpu,
+				tr, cpu, &snapshot_fops);
 
-	trace_create_file("snapshot_raw", 0444, d_cpu,
-			(void *)&data->trace_cpu, &snapshot_raw_fops);
+	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
+				tr, cpu, &snapshot_raw_fops);
 #endif
 }
 
@@ -5703,15 +5903,10 @@
 	       size_t cnt, loff_t *ppos)
 {
 	struct trace_array *tr = filp->private_data;
-	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 	char buf[64];
 	int r;
 
-	if (buffer)
-		r = ring_buffer_record_is_on(buffer);
-	else
-		r = 0;
-
+	r = tracer_tracing_is_on(tr);
 	r = sprintf(buf, "%d\n", r);
 
 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -5733,11 +5928,11 @@
 	if (buffer) {
 		mutex_lock(&trace_types_lock);
 		if (val) {
-			ring_buffer_record_on(buffer);
+			tracer_tracing_on(tr);
 			if (tr->current_trace->start)
 				tr->current_trace->start(tr);
 		} else {
-			ring_buffer_record_off(buffer);
+			tracer_tracing_off(tr);
 			if (tr->current_trace->stop)
 				tr->current_trace->stop(tr);
 		}
@@ -5750,9 +5945,10 @@
 }
 
 static const struct file_operations rb_simple_fops = {
-	.open		= tracing_open_generic,
+	.open		= tracing_open_generic_tr,
 	.read		= rb_simple_read,
 	.write		= rb_simple_write,
+	.release	= tracing_release_generic_tr,
 	.llseek		= default_llseek,
 };
 
@@ -5866,8 +6062,10 @@
 		goto out_free_tr;
 
 	ret = event_trace_add_tracer(tr->dir, tr);
-	if (ret)
+	if (ret) {
+		debugfs_remove_recursive(tr->dir);
 		goto out_free_tr;
+	}
 
 	init_tracer_debugfs(tr, tr->dir);
 
@@ -6013,13 +6211,13 @@
 			  tr, &tracing_iter_fops);
 
 	trace_create_file("trace", 0644, d_tracer,
-			(void *)&tr->trace_cpu, &tracing_fops);
+			  tr, &tracing_fops);
 
 	trace_create_file("trace_pipe", 0444, d_tracer,
-			(void *)&tr->trace_cpu, &tracing_pipe_fops);
+			  tr, &tracing_pipe_fops);
 
 	trace_create_file("buffer_size_kb", 0644, d_tracer,
-			(void *)&tr->trace_cpu, &tracing_entries_fops);
+			  tr, &tracing_entries_fops);
 
 	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
 			  tr, &tracing_total_entries_fops);
@@ -6037,11 +6235,11 @@
 			  &trace_clock_fops);
 
 	trace_create_file("tracing_on", 0644, d_tracer,
-			    tr, &rb_simple_fops);
+			  tr, &rb_simple_fops);
 
 #ifdef CONFIG_TRACER_SNAPSHOT
 	trace_create_file("snapshot", 0644, d_tracer,
-			  (void *)&tr->trace_cpu, &snapshot_fops);
+			  tr, &snapshot_fops);
 #endif
 
 	for_each_tracing_cpu(cpu)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6c42f18..691cb4f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -224,6 +224,11 @@
 
 extern struct list_head ftrace_trace_arrays;
 
+extern struct mutex trace_types_lock;
+
+extern int trace_array_get(struct trace_array *tr);
+extern void trace_array_put(struct trace_array *tr);
+
 /*
  * The global tracer (top) should be the first trace array added,
  * but we check the flag anyway.
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 84b1e04..a55be36 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -222,7 +222,10 @@
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
 	struct ftrace_event_call *tp_event = p_event->tp_event;
-	hlist_del_rcu(&p_event->hlist_entry);
+	if (!hlist_unhashed(&p_event->hlist_entry))
+		hlist_del_rcu(&p_event->hlist_entry);
+	else
+		return;
 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
 
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 27963e2..3d18aad 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -41,6 +41,23 @@
 static struct kmem_cache *field_cachep;
 static struct kmem_cache *file_cachep;
 
+#define SYSTEM_FL_FREE_NAME		(1 << 31)
+
+static inline int system_refcount(struct event_subsystem *system)
+{
+	return system->ref_count & ~SYSTEM_FL_FREE_NAME;
+}
+
+static int system_refcount_inc(struct event_subsystem *system)
+{
+	return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
+}
+
+static int system_refcount_dec(struct event_subsystem *system)
+{
+	return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
+}
+
 /* Double loops, do not use break, only goto's work */
 #define do_for_each_event_file(tr, file)			\
 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
@@ -97,7 +114,7 @@
 
 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
 	if (!field)
-		goto err;
+		return -ENOMEM;
 
 	field->name = name;
 	field->type = type;
@@ -114,11 +131,6 @@
 	list_add(&field->link, head);
 
 	return 0;
-
-err:
-	kmem_cache_free(field_cachep, field);
-
-	return -ENOMEM;
 }
 
 int trace_define_field(struct ftrace_event_call *call, const char *type,
@@ -349,8 +361,8 @@
 {
 	struct event_filter *filter = system->filter;
 
-	WARN_ON_ONCE(system->ref_count == 0);
-	if (--system->ref_count)
+	WARN_ON_ONCE(system_refcount(system) == 0);
+	if (system_refcount_dec(system))
 		return;
 
 	list_del(&system->list);
@@ -359,13 +371,15 @@
 		kfree(filter->filter_string);
 		kfree(filter);
 	}
+	if (system->ref_count & SYSTEM_FL_FREE_NAME)
+		kfree(system->name);
 	kfree(system);
 }
 
 static void __get_system(struct event_subsystem *system)
 {
-	WARN_ON_ONCE(system->ref_count == 0);
-	system->ref_count++;
+	WARN_ON_ONCE(system_refcount(system) == 0);
+	system_refcount_inc(system);
 }
 
 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
@@ -379,7 +393,7 @@
 {
 	WARN_ON_ONCE(dir->ref_count == 0);
 	/* If the subsystem is about to be freed, the dir must be too */
-	WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
+	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
 
 	__put_system(dir->subsystem);
 	if (!--dir->ref_count)
@@ -393,17 +407,55 @@
 	mutex_unlock(&event_mutex);
 }
 
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
+{
+	if (!dir)
+		return;
+
+	if (!--dir->nr_events) {
+		debugfs_remove_recursive(dir->entry);
+		list_del(&dir->list);
+		__put_system_dir(dir);
+	}
+}
+
+static void *event_file_data(struct file *filp)
+{
+	return ACCESS_ONCE(file_inode(filp)->i_private);
+}
+
+static void remove_event_file_dir(struct ftrace_event_file *file)
+{
+	struct dentry *dir = file->dir;
+	struct dentry *child;
+
+	if (dir) {
+		spin_lock(&dir->d_lock);	/* probably unneeded */
+		list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+			if (child->d_inode)	/* probably unneeded */
+				child->d_inode->i_private = NULL;
+		}
+		spin_unlock(&dir->d_lock);
+
+		debugfs_remove_recursive(dir);
+	}
+
+	list_del(&file->list);
+	remove_subsystem(file->system);
+	kmem_cache_free(file_cachep, file);
+}
+
 /*
  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  */
-static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
-				  const char *sub, const char *event, int set)
+static int
+__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
+			      const char *sub, const char *event, int set)
 {
 	struct ftrace_event_file *file;
 	struct ftrace_event_call *call;
 	int ret = -EINVAL;
 
-	mutex_lock(&event_mutex);
 	list_for_each_entry(file, &tr->events, list) {
 
 		call = file->event_call;
@@ -429,6 +481,17 @@
 
 		ret = 0;
 	}
+
+	return ret;
+}
+
+static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+				  const char *sub, const char *event, int set)
+{
+	int ret;
+
+	mutex_lock(&event_mutex);
+	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
 	mutex_unlock(&event_mutex);
 
 	return ret;
@@ -623,13 +686,23 @@
 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
 		  loff_t *ppos)
 {
-	struct ftrace_event_file *file = filp->private_data;
+	struct ftrace_event_file *file;
+	unsigned long flags;
 	char *buf;
 
-	if (file->flags & FTRACE_EVENT_FL_ENABLED) {
-		if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
+	mutex_lock(&event_mutex);
+	file = event_file_data(filp);
+	if (likely(file))
+		flags = file->flags;
+	mutex_unlock(&event_mutex);
+
+	if (!file)
+		return -ENODEV;
+
+	if (flags & FTRACE_EVENT_FL_ENABLED) {
+		if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
 			buf = "0*\n";
-		else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+		else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
 			buf = "1*\n";
 		else
 			buf = "1\n";
@@ -643,13 +716,10 @@
 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
 		   loff_t *ppos)
 {
-	struct ftrace_event_file *file = filp->private_data;
+	struct ftrace_event_file *file;
 	unsigned long val;
 	int ret;
 
-	if (!file)
-		return -EINVAL;
-
 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 	if (ret)
 		return ret;
@@ -661,8 +731,11 @@
 	switch (val) {
 	case 0:
 	case 1:
+		ret = -ENODEV;
 		mutex_lock(&event_mutex);
-		ret = ftrace_event_enable_disable(file, val);
+		file = event_file_data(filp);
+		if (likely(file))
+			ret = ftrace_event_enable_disable(file, val);
 		mutex_unlock(&event_mutex);
 		break;
 
@@ -769,7 +842,7 @@
 
 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
-	struct ftrace_event_call *call = m->private;
+	struct ftrace_event_call *call = event_file_data(m->private);
 	struct ftrace_event_field *field;
 	struct list_head *common_head = &ftrace_common_fields;
 	struct list_head *head = trace_get_fields(call);
@@ -813,6 +886,11 @@
 	loff_t l = 0;
 	void *p;
 
+	/* ->stop() is called even if ->start() fails */
+	mutex_lock(&event_mutex);
+	if (!event_file_data(m->private))
+		return ERR_PTR(-ENODEV);
+
 	/* Start by showing the header */
 	if (!*pos)
 		return (void *)FORMAT_HEADER;
@@ -827,7 +905,7 @@
 
 static int f_show(struct seq_file *m, void *v)
 {
-	struct ftrace_event_call *call = m->private;
+	struct ftrace_event_call *call = event_file_data(m->private);
 	struct ftrace_event_field *field;
 	const char *array_descriptor;
 
@@ -878,6 +956,7 @@
 
 static void f_stop(struct seq_file *m, void *p)
 {
+	mutex_unlock(&event_mutex);
 }
 
 static const struct seq_operations trace_format_seq_ops = {
@@ -889,7 +968,6 @@
 
 static int trace_format_open(struct inode *inode, struct file *file)
 {
-	struct ftrace_event_call *call = inode->i_private;
 	struct seq_file *m;
 	int ret;
 
@@ -898,7 +976,7 @@
 		return ret;
 
 	m = file->private_data;
-	m->private = call;
+	m->private = file;
 
 	return 0;
 }
@@ -906,19 +984,22 @@
 static ssize_t
 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-	struct ftrace_event_call *call = filp->private_data;
+	int id = (long)event_file_data(filp);
 	struct trace_seq *s;
 	int r;
 
 	if (*ppos)
 		return 0;
 
+	if (unlikely(!id))
+		return -ENODEV;
+
 	s = kmalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
 		return -ENOMEM;
 
 	trace_seq_init(s);
-	trace_seq_printf(s, "%d\n", call->event.type);
+	trace_seq_printf(s, "%d\n", id);
 
 	r = simple_read_from_buffer(ubuf, cnt, ppos,
 				    s->buffer, s->len);
@@ -930,21 +1011,28 @@
 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
 		  loff_t *ppos)
 {
-	struct ftrace_event_call *call = filp->private_data;
+	struct ftrace_event_call *call;
 	struct trace_seq *s;
-	int r;
+	int r = -ENODEV;
 
 	if (*ppos)
 		return 0;
 
 	s = kmalloc(sizeof(*s), GFP_KERNEL);
+
 	if (!s)
 		return -ENOMEM;
 
 	trace_seq_init(s);
 
-	print_event_filter(call, s);
-	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+	mutex_lock(&event_mutex);
+	call = event_file_data(filp);
+	if (call)
+		print_event_filter(call, s);
+	mutex_unlock(&event_mutex);
+
+	if (call)
+		r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
 
 	kfree(s);
 
@@ -955,9 +1043,9 @@
 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
 		   loff_t *ppos)
 {
-	struct ftrace_event_call *call = filp->private_data;
+	struct ftrace_event_call *call;
 	char *buf;
-	int err;
+	int err = -ENODEV;
 
 	if (cnt >= PAGE_SIZE)
 		return -EINVAL;
@@ -972,7 +1060,12 @@
 	}
 	buf[cnt] = '\0';
 
-	err = apply_event_filter(call, buf);
+	mutex_lock(&event_mutex);
+	call = event_file_data(filp);
+	if (call)
+		err = apply_event_filter(call, buf);
+	mutex_unlock(&event_mutex);
+
 	free_page((unsigned long) buf);
 	if (err < 0)
 		return err;
@@ -992,6 +1085,7 @@
 	int ret;
 
 	/* Make sure the system still exists */
+	mutex_lock(&trace_types_lock);
 	mutex_lock(&event_mutex);
 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 		list_for_each_entry(dir, &tr->systems, list) {
@@ -1007,6 +1101,7 @@
 	}
  exit_loop:
 	mutex_unlock(&event_mutex);
+	mutex_unlock(&trace_types_lock);
 
 	if (!system)
 		return -ENODEV;
@@ -1014,9 +1109,17 @@
 	/* Some versions of gcc think dir can be uninitialized here */
 	WARN_ON(!dir);
 
-	ret = tracing_open_generic(inode, filp);
-	if (ret < 0)
+	/* Still need to increment the ref count of the system */
+	if (trace_array_get(tr) < 0) {
 		put_system(dir);
+		return -ENODEV;
+	}
+
+	ret = tracing_open_generic(inode, filp);
+	if (ret < 0) {
+		trace_array_put(tr);
+		put_system(dir);
+	}
 
 	return ret;
 }
@@ -1027,16 +1130,23 @@
 	struct trace_array *tr = inode->i_private;
 	int ret;
 
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
+
 	/* Make a temporary dir that has no system but points to tr */
 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
-	if (!dir)
+	if (!dir) {
+		trace_array_put(tr);
 		return -ENOMEM;
+	}
 
 	dir->tr = tr;
 
 	ret = tracing_open_generic(inode, filp);
-	if (ret < 0)
+	if (ret < 0) {
+		trace_array_put(tr);
 		kfree(dir);
+	}
 
 	filp->private_data = dir;
 
@@ -1047,6 +1157,8 @@
 {
 	struct ftrace_subsystem_dir *dir = file->private_data;
 
+	trace_array_put(dir->tr);
+
 	/*
 	 * If dir->subsystem is NULL, then this is a temporary
 	 * descriptor that was made for a trace_array to enable
@@ -1143,6 +1255,7 @@
 
 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
 static int ftrace_event_set_open(struct inode *inode, struct file *file);
+static int ftrace_event_release(struct inode *inode, struct file *file);
 
 static const struct seq_operations show_event_seq_ops = {
 	.start = t_start,
@@ -1170,7 +1283,7 @@
 	.read = seq_read,
 	.write = ftrace_event_write,
 	.llseek = seq_lseek,
-	.release = seq_release,
+	.release = ftrace_event_release,
 };
 
 static const struct file_operations ftrace_enable_fops = {
@@ -1188,7 +1301,6 @@
 };
 
 static const struct file_operations ftrace_event_id_fops = {
-	.open = tracing_open_generic,
 	.read = event_id_read,
 	.llseek = default_llseek,
 };
@@ -1247,6 +1359,15 @@
 	return ret;
 }
 
+static int ftrace_event_release(struct inode *inode, struct file *file)
+{
+	struct trace_array *tr = inode->i_private;
+
+	trace_array_put(tr);
+
+	return seq_release(inode, file);
+}
+
 static int
 ftrace_event_avail_open(struct inode *inode, struct file *file)
 {
@@ -1260,12 +1381,19 @@
 {
 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
 	struct trace_array *tr = inode->i_private;
+	int ret;
+
+	if (trace_array_get(tr) < 0)
+		return -ENODEV;
 
 	if ((file->f_mode & FMODE_WRITE) &&
 	    (file->f_flags & O_TRUNC))
 		ftrace_clear_events(tr);
 
-	return ftrace_event_open(inode, file, seq_ops);
+	ret = ftrace_event_open(inode, file, seq_ops);
+	if (ret < 0)
+		trace_array_put(tr);
+	return ret;
 }
 
 static struct event_subsystem *
@@ -1279,7 +1407,15 @@
 		return NULL;
 
 	system->ref_count = 1;
-	system->name = name;
+
+	/* Only allocate if dynamic (kprobes and modules) */
+	if (!core_kernel_data((unsigned long)name)) {
+		system->ref_count |= SYSTEM_FL_FREE_NAME;
+		system->name = kstrdup(name, GFP_KERNEL);
+		if (!system->name)
+			goto out_free;
+	} else
+		system->name = name;
 
 	system->filter = NULL;
 
@@ -1292,6 +1428,8 @@
 	return system;
 
  out_free:
+	if (system->ref_count & SYSTEM_FL_FREE_NAME)
+		kfree(system->name);
 	kfree(system);
 	return NULL;
 }
@@ -1410,8 +1548,8 @@
 
 #ifdef CONFIG_PERF_EVENTS
 	if (call->event.type && call->class->reg)
-		trace_create_file("id", 0444, file->dir, call,
-		 		  id);
+		trace_create_file("id", 0444, file->dir,
+				  (void *)(long)call->event.type, id);
 #endif
 
 	/*
@@ -1436,33 +1574,16 @@
 	return 0;
 }
 
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
-	if (!dir)
-		return;
-
-	if (!--dir->nr_events) {
-		debugfs_remove_recursive(dir->entry);
-		list_del(&dir->list);
-		__put_system_dir(dir);
-	}
-}
-
 static void remove_event_from_tracers(struct ftrace_event_call *call)
 {
 	struct ftrace_event_file *file;
 	struct trace_array *tr;
 
 	do_for_each_event_file_safe(tr, file) {
-
 		if (file->event_call != call)
 			continue;
 
-		list_del(&file->list);
-		debugfs_remove_recursive(file->dir);
-		remove_subsystem(file->system);
-		kmem_cache_free(file_cachep, file);
-
+		remove_event_file_dir(file);
 		/*
 		 * The do_for_each_event_file_safe() is
 		 * a double loop. After finding the call for this
@@ -1591,6 +1712,7 @@
 int trace_add_event_call(struct ftrace_event_call *call)
 {
 	int ret;
+	mutex_lock(&trace_types_lock);
 	mutex_lock(&event_mutex);
 
 	ret = __register_event(call, NULL);
@@ -1598,11 +1720,13 @@
 		__add_event_to_tracers(call, NULL);
 
 	mutex_unlock(&event_mutex);
+	mutex_unlock(&trace_types_lock);
 	return ret;
 }
 
 /*
- * Must be called under locking both of event_mutex and trace_event_sem.
+ * Must be called under locking of trace_types_lock, event_mutex and
+ * trace_event_sem.
  */
 static void __trace_remove_event_call(struct ftrace_event_call *call)
 {
@@ -1611,14 +1735,47 @@
 	destroy_preds(call);
 }
 
-/* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+static int probe_remove_event_call(struct ftrace_event_call *call)
 {
+	struct trace_array *tr;
+	struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+	if (call->perf_refcount)
+		return -EBUSY;
+#endif
+	do_for_each_event_file(tr, file) {
+		if (file->event_call != call)
+			continue;
+		/*
+		 * We can't rely on ftrace_event_enable_disable(enable => 0)
+		 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+		 * TRACE_REG_UNREGISTER.
+		 */
+		if (file->flags & FTRACE_EVENT_FL_ENABLED)
+			return -EBUSY;
+		break;
+	} while_for_each_event_file();
+
+	__trace_remove_event_call(call);
+
+	return 0;
+}
+
+/* Remove an event_call */
+int trace_remove_event_call(struct ftrace_event_call *call)
+{
+	int ret;
+
+	mutex_lock(&trace_types_lock);
 	mutex_lock(&event_mutex);
 	down_write(&trace_event_sem);
-	__trace_remove_event_call(call);
+	ret = probe_remove_event_call(call);
 	up_write(&trace_event_sem);
 	mutex_unlock(&event_mutex);
+	mutex_unlock(&trace_types_lock);
+
+	return ret;
 }
 
 #define for_each_event(event, start, end)			\
@@ -1762,6 +1919,7 @@
 {
 	struct module *mod = data;
 
+	mutex_lock(&trace_types_lock);
 	mutex_lock(&event_mutex);
 	switch (val) {
 	case MODULE_STATE_COMING:
@@ -1772,6 +1930,7 @@
 		break;
 	}
 	mutex_unlock(&event_mutex);
+	mutex_unlock(&trace_types_lock);
 
 	return 0;
 }
@@ -2188,12 +2347,8 @@
 {
 	struct ftrace_event_file *file, *next;
 
-	list_for_each_entry_safe(file, next, &tr->events, list) {
-		list_del(&file->list);
-		debugfs_remove_recursive(file->dir);
-		remove_subsystem(file->system);
-		kmem_cache_free(file_cachep, file);
-	}
+	list_for_each_entry_safe(file, next, &tr->events, list)
+		remove_event_file_dir(file);
 }
 
 static void
@@ -2329,11 +2484,11 @@
 
 int event_trace_del_tracer(struct trace_array *tr)
 {
-	/* Disable any running events */
-	__ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
-
 	mutex_lock(&event_mutex);
 
+	/* Disable any running events */
+	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
+
 	down_write(&trace_event_sem);
 	__trace_remove_event_dirs(tr);
 	debugfs_remove_recursive(tr->event_dir);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e1b653f..0a1edc6 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -631,17 +631,15 @@
 	free_page((unsigned long) buf);
 }
 
+/* caller must hold event_mutex */
 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
 {
-	struct event_filter *filter;
+	struct event_filter *filter = call->filter;
 
-	mutex_lock(&event_mutex);
-	filter = call->filter;
 	if (filter && filter->filter_string)
 		trace_seq_printf(s, "%s\n", filter->filter_string);
 	else
 		trace_seq_printf(s, "none\n");
-	mutex_unlock(&event_mutex);
 }
 
 void print_subsystem_event_filter(struct event_subsystem *system,
@@ -1835,23 +1833,22 @@
 	return err;
 }
 
+/* caller must hold event_mutex */
 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
 {
 	struct event_filter *filter;
-	int err = 0;
-
-	mutex_lock(&event_mutex);
+	int err;
 
 	if (!strcmp(strstrip(filter_string), "0")) {
 		filter_disable(call);
 		filter = call->filter;
 		if (!filter)
-			goto out_unlock;
+			return 0;
 		RCU_INIT_POINTER(call->filter, NULL);
 		/* Make sure the filter is not being used */
 		synchronize_sched();
 		__free_filter(filter);
-		goto out_unlock;
+		return 0;
 	}
 
 	err = create_filter(call, filter_string, true, &filter);
@@ -1878,8 +1875,6 @@
 			__free_filter(tmp);
 		}
 	}
-out_unlock:
-	mutex_unlock(&event_mutex);
 
 	return err;
 }
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b19d065..2aefbee 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -373,7 +373,7 @@
 	struct trace_array_cpu *data;
 	unsigned long flags;
 
-	if (likely(!tracer_enabled))
+	if (!tracer_enabled || !tracing_is_enabled())
 		return;
 
 	cpu = raw_smp_processor_id();
@@ -416,7 +416,7 @@
 	else
 		return;
 
-	if (!tracer_enabled)
+	if (!tracer_enabled || !tracing_is_enabled())
 		return;
 
 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 9f46e98..64abc8c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -90,7 +90,7 @@
 }
 
 static int register_probe_event(struct trace_probe *tp);
-static void unregister_probe_event(struct trace_probe *tp);
+static int unregister_probe_event(struct trace_probe *tp);
 
 static DEFINE_MUTEX(probe_lock);
 static LIST_HEAD(probe_list);
@@ -281,6 +281,8 @@
 static int
 disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
+	struct ftrace_event_file **old = NULL;
+	int wait = 0;
 	int ret = 0;
 
 	mutex_lock(&probe_enable_lock);
@@ -314,10 +316,7 @@
 		}
 
 		rcu_assign_pointer(tp->files, new);
-
-		/* Make sure the probe is done with old files */
-		synchronize_sched();
-		kfree(old);
+		wait = 1;
 	} else
 		tp->flags &= ~TP_FLAG_PROFILE;
 
@@ -326,11 +325,25 @@
 			disable_kretprobe(&tp->rp);
 		else
 			disable_kprobe(&tp->rp.kp);
+		wait = 1;
 	}
 
  out_unlock:
 	mutex_unlock(&probe_enable_lock);
 
+	if (wait) {
+		/*
+		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
+		 * to ensure disabled (all running handlers are finished).
+		 * This is not only for kfree(), but also the caller,
+		 * trace_remove_event_call() supposes it for releasing
+		 * event_call related objects, which will be accessed in
+		 * the kprobe_trace_func/kretprobe_trace_func.
+		 */
+		synchronize_sched();
+		kfree(old);	/* Ignored if link == NULL */
+	}
+
 	return ret;
 }
 
@@ -398,9 +411,12 @@
 	if (trace_probe_is_enabled(tp))
 		return -EBUSY;
 
+	/* Will fail if probe is being used by ftrace or perf */
+	if (unregister_probe_event(tp))
+		return -EBUSY;
+
 	__unregister_trace_probe(tp);
 	list_del(&tp->list);
-	unregister_probe_event(tp);
 
 	return 0;
 }
@@ -679,7 +695,9 @@
 	/* TODO: Use batch unregistration */
 	while (!list_empty(&probe_list)) {
 		tp = list_entry(probe_list.next, struct trace_probe, list);
-		unregister_trace_probe(tp);
+		ret = unregister_trace_probe(tp);
+		if (ret)
+			goto end;
 		free_trace_probe(tp);
 	}
 
@@ -1312,11 +1330,15 @@
 	return ret;
 }
 
-static void unregister_probe_event(struct trace_probe *tp)
+static int unregister_probe_event(struct trace_probe *tp)
 {
+	int ret;
+
 	/* tp->event is unregistered in trace_remove_event_call() */
-	trace_remove_event_call(&tp->call);
-	kfree(tp->call.print_fmt);
+	ret = trace_remove_event_call(&tp->call);
+	if (!ret)
+		kfree(tp->call.print_fmt);
+	return ret;
 }
 
 /* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8f2ac73..322e164 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -306,6 +306,8 @@
 	struct syscall_metadata *sys_data;
 	struct ring_buffer_event *event;
 	struct ring_buffer *buffer;
+	unsigned long irq_flags;
+	int pc;
 	int syscall_nr;
 	int size;
 
@@ -321,9 +323,12 @@
 
 	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
 
+	local_save_flags(irq_flags);
+	pc = preempt_count();
+
 	buffer = tr->trace_buffer.buffer;
 	event = trace_buffer_lock_reserve(buffer,
-			sys_data->enter_event->event.type, size, 0, 0);
+			sys_data->enter_event->event.type, size, irq_flags, pc);
 	if (!event)
 		return;
 
@@ -333,7 +338,8 @@
 
 	if (!filter_current_check_discard(buffer, sys_data->enter_event,
 					  entry, event))
-		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
+		trace_current_buffer_unlock_commit(buffer, event,
+						   irq_flags, pc);
 }
 
 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -343,6 +349,8 @@
 	struct syscall_metadata *sys_data;
 	struct ring_buffer_event *event;
 	struct ring_buffer *buffer;
+	unsigned long irq_flags;
+	int pc;
 	int syscall_nr;
 
 	syscall_nr = trace_get_syscall_nr(current, regs);
@@ -355,9 +363,13 @@
 	if (!sys_data)
 		return;
 
+	local_save_flags(irq_flags);
+	pc = preempt_count();
+
 	buffer = tr->trace_buffer.buffer;
 	event = trace_buffer_lock_reserve(buffer,
-			sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
+			sys_data->exit_event->event.type, sizeof(*entry),
+			irq_flags, pc);
 	if (!event)
 		return;
 
@@ -367,7 +379,8 @@
 
 	if (!filter_current_check_discard(buffer, sys_data->exit_event,
 					  entry, event))
-		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
+		trace_current_buffer_unlock_commit(buffer, event,
+						   irq_flags, pc);
 }
 
 static int reg_event_syscall_enter(struct ftrace_event_file *file,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 32494fb0..6fd72b7 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@
 	(sizeof(struct probe_arg) * (n)))
 
 static int register_uprobe_event(struct trace_uprobe *tu);
-static void unregister_uprobe_event(struct trace_uprobe *tu);
+static int unregister_uprobe_event(struct trace_uprobe *tu);
 
 static DEFINE_MUTEX(uprobe_lock);
 static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@
 }
 
 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
-static void unregister_trace_uprobe(struct trace_uprobe *tu)
+static int unregister_trace_uprobe(struct trace_uprobe *tu)
 {
+	int ret;
+
+	ret = unregister_uprobe_event(tu);
+	if (ret)
+		return ret;
+
 	list_del(&tu->list);
-	unregister_uprobe_event(tu);
 	free_trace_uprobe(tu);
+	return 0;
 }
 
 /* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@
 
 	/* register as an event */
 	old_tp = find_probe_event(tu->call.name, tu->call.class->system);
-	if (old_tp)
+	if (old_tp) {
 		/* delete old event */
-		unregister_trace_uprobe(old_tp);
+		ret = unregister_trace_uprobe(old_tp);
+		if (ret)
+			goto end;
+	}
 
 	ret = register_uprobe_event(tu);
 	if (ret) {
@@ -256,6 +265,8 @@
 		group = UPROBE_EVENT_SYSTEM;
 
 	if (is_delete) {
+		int ret;
+
 		if (!event) {
 			pr_info("Delete command needs an event name.\n");
 			return -EINVAL;
@@ -269,9 +280,9 @@
 			return -ENOENT;
 		}
 		/* delete an event */
-		unregister_trace_uprobe(tu);
+		ret = unregister_trace_uprobe(tu);
 		mutex_unlock(&uprobe_lock);
-		return 0;
+		return ret;
 	}
 
 	if (argc < 2) {
@@ -283,8 +294,10 @@
 		return -EINVAL;
 	}
 	arg = strchr(argv[1], ':');
-	if (!arg)
+	if (!arg) {
+		ret = -EINVAL;
 		goto fail_address_parse;
+	}
 
 	*arg++ = '\0';
 	filename = argv[1];
@@ -406,16 +419,20 @@
 	return ret;
 }
 
-static void cleanup_all_probes(void)
+static int cleanup_all_probes(void)
 {
 	struct trace_uprobe *tu;
+	int ret = 0;
 
 	mutex_lock(&uprobe_lock);
 	while (!list_empty(&uprobe_list)) {
 		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
-		unregister_trace_uprobe(tu);
+		ret = unregister_trace_uprobe(tu);
+		if (ret)
+			break;
 	}
 	mutex_unlock(&uprobe_lock);
+	return ret;
 }
 
 /* Probes listing interfaces */
@@ -460,8 +477,13 @@
 
 static int probes_open(struct inode *inode, struct file *file)
 {
-	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
-		cleanup_all_probes();
+	int ret;
+
+	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+		ret = cleanup_all_probes();
+		if (ret)
+			return ret;
+	}
 
 	return seq_open(file, &probes_seq_op);
 }
@@ -968,12 +990,17 @@
 	return ret;
 }
 
-static void unregister_uprobe_event(struct trace_uprobe *tu)
+static int unregister_uprobe_event(struct trace_uprobe *tu)
 {
+	int ret;
+
 	/* tu->event is unregistered in trace_remove_event_call() */
-	trace_remove_event_call(&tu->call);
+	ret = trace_remove_event_call(&tu->call);
+	if (ret)
+		return ret;
 	kfree(tu->call.print_fmt);
 	tu->call.print_fmt = NULL;
+	return 0;
 }
 
 /* Make a trace interface for controling probe points */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d8c30db..9064b91 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -62,6 +62,9 @@
 	kgid_t group = new->egid;
 	int ret;
 
+	if (parent_ns->level > 32)
+		return -EUSERS;
+
 	/*
 	 * Verify that we can not violate the policy of which files
 	 * may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@
 	atomic_set(&ns->count, 1);
 	/* Leave the new->user_ns reference with the new user namespace. */
 	ns->parent = parent_ns;
+	ns->level = parent_ns->level + 1;
 	ns->owner = owner;
 	ns->group = group;
 
@@ -105,16 +109,21 @@
 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
 {
 	struct cred *cred;
+	int err = -ENOMEM;
 
 	if (!(unshare_flags & CLONE_NEWUSER))
 		return 0;
 
 	cred = prepare_creds();
-	if (!cred)
-		return -ENOMEM;
+	if (cred) {
+		err = create_user_ns(cred);
+		if (err)
+			put_cred(cred);
+		else
+			*new_cred = cred;
+	}
 
-	*new_cred = cred;
-	return create_user_ns(cred);
+	return err;
 }
 
 void free_user_ns(struct user_namespace *ns)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e092e5a..28ad69a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -314,8 +314,10 @@
 		if (__this_cpu_read(hard_watchdog_warn) == true)
 			return;
 
-		if (hardlockup_panic)
+		if (hardlockup_panic) {
+			trigger_all_cpu_backtrace();
 			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+		}
 		else
 			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
 
@@ -401,8 +403,10 @@
 		else
 			dump_stack();
 
-		if (softlockup_panic)
+		if (softlockup_panic) {
+			trigger_all_cpu_backtrace();
 			panic("softlockup: hung tasks");
+		}
 		__this_cpu_write(soft_watchdog_warn, true);
 	} else
 		__this_cpu_write(soft_watchdog_warn, false);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ee8e29a..59847fd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2188,6 +2188,15 @@
 		dump_stack();
 	}
 
+	/*
+	 * The following prevents a kworker from hogging CPU on !PREEMPT
+	 * kernels, where a requeueing work item waiting for something to
+	 * happen could deadlock with stop_machine as such work item could
+	 * indefinitely requeue itself while all other CPUs are trapped in
+	 * stop_machine.
+	 */
+	cond_resched();
+
 	spin_lock_irq(&pool->lock);
 
 	/* clear cpu intensive status */
@@ -3398,6 +3407,12 @@
 {
 	to->nice = from->nice;
 	cpumask_copy(to->cpumask, from->cpumask);
+	/*
+	 * Unlike hash and equality test, this function doesn't ignore
+	 * ->no_numa as it is used for both pool and wq attrs.  Instead,
+	 * get_unbound_pool() explicitly clears ->no_numa after copying.
+	 */
+	to->no_numa = from->no_numa;
 }
 
 /* hash value of the content of @attr */
@@ -3565,6 +3580,12 @@
 	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
 	copy_workqueue_attrs(pool->attrs, attrs);
 
+	/*
+	 * no_numa isn't a worker_pool attribute, always clear it.  See
+	 * 'struct workqueue_attrs' comments for detail.
+	 */
+	pool->attrs->no_numa = false;
+
 	/* if cpumask is contained inside a NUMA node, we belong to that node */
 	if (wq_numa_enabled) {
 		for_each_node(node) {
@@ -4800,7 +4821,7 @@
  * %true if some freezable workqueues are still busy.  %false if freezing
  * is complete.
  */
-bool freeze_workqueues_busy(void)
+bool freeze_workqueues_busy(char **busy_wq_name)
 {
 	bool busy = false;
 	struct workqueue_struct *wq;
@@ -4822,6 +4843,7 @@
 			WARN_ON_ONCE(pwq->nr_active < 0);
 			if (pwq->nr_active) {
 				busy = true;
+				*busy_wq_name = wq->name;
 				rcu_read_unlock_sched();
 				goto out_unlock;
 			}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 86ed239..d317c1a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1285,7 +1285,7 @@
 	depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
 	depends on !X86_64
 	select STACKTRACE
-	select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
+	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
 	help
 	  Provide stacktrace filter for fault-injection capabilities
 
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 52e5abb..3eb875d 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -24,6 +24,7 @@
 #include <linux/socket.h>
 #include <linux/skbuff.h>
 #include <linux/netlink.h>
+#include <linux/suspend.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
 
@@ -41,6 +42,21 @@
 /* This lock protects uevent_seqnum and uevent_sock_list */
 static DEFINE_MUTEX(uevent_sock_mutex);
 
+#ifdef CONFIG_PM_SLEEP
+struct uevent_buffered {
+	struct kobject *kobj;
+	struct kobj_uevent_env *env;
+	const char *action;
+	char *devpath;
+	char *subsys;
+	struct list_head buffer_list;
+};
+
+static DEFINE_MUTEX(uevent_buffer_mutex);
+static bool uevent_buffer;
+static LIST_HEAD(uevent_buffer_list);
+#endif
+
 /* the strings here must match the enum in include/linux/kobject.h */
 static const char *kobject_actions[] = {
 	[KOBJ_ADD] =		"add",
@@ -118,6 +134,89 @@
 	return 0;
 }
 
+static int kobject_deliver_uevent(struct kobject *kobj,
+				  struct kobj_uevent_env *env,
+				  const char *action_string,
+				  const char *devpath,
+				  const char *subsystem)
+{
+	int retval, i;
+#ifdef CONFIG_NET
+	struct uevent_sock *ue_sk;
+#endif
+
+	mutex_lock(&uevent_sock_mutex);
+	/* we will send an event, so request a new sequence number */
+	retval = add_uevent_var(env, "SEQNUM=%llu",
+				 (unsigned long long) ++uevent_seqnum);
+	if (retval) {
+		mutex_unlock(&uevent_sock_mutex);
+		return -1;
+	}
+
+#if defined(CONFIG_NET)
+	/* send netlink message */
+	list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+		struct sock *uevent_sock = ue_sk->sk;
+		struct sk_buff *skb;
+		size_t len;
+
+		if (!netlink_has_listeners(uevent_sock, 1))
+			continue;
+
+		/* allocate message with the maximum possible size */
+		len = strlen(action_string) + strlen(devpath) + 2;
+		skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+		if (skb) {
+			char *scratch;
+
+			/* add header */
+			scratch = skb_put(skb, len);
+			sprintf(scratch, "%s@%s", action_string, devpath);
+
+			/* copy keys to our continuous event payload buffer */
+			for (i = 0; i < env->envp_idx; i++) {
+				len = strlen(env->envp[i]) + 1;
+				scratch = skb_put(skb, len);
+				strcpy(scratch, env->envp[i]);
+			}
+
+			NETLINK_CB(skb).dst_group = 1;
+			retval = netlink_broadcast_filtered(uevent_sock, skb,
+							    0, 1, GFP_KERNEL,
+							    kobj_bcast_filter,
+							    kobj);
+			/* ENOBUFS should be handled in userspace */
+			if (retval == -ENOBUFS || retval == -ESRCH)
+				retval = 0;
+		} else
+			retval = -ENOMEM;
+	}
+#endif
+	mutex_unlock(&uevent_sock_mutex);
+
+	/* call uevent_helper, usually only enabled during early boot */
+	if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
+		char *argv[3];
+
+		argv[0] = uevent_helper;
+		argv[1] = (char *) subsystem;
+		argv[2] = NULL;
+		retval = add_uevent_var(env, "HOME=/");
+		if (retval)
+			return -1;
+		retval = add_uevent_var(env,
+					"PATH=/sbin:/bin:/user/sbin:/usr/bin");
+		if (retval)
+			return -1;
+
+		retval = call_usermodehelper(argv[0], argv,
+					     env->envp, UMH_WAIT_EXEC);
+	}
+
+	return 0;
+}
+
 /**
  * kobject_uevent_env - send an uevent with environmental data
  *
@@ -140,9 +239,6 @@
 	const struct kset_uevent_ops *uevent_ops;
 	int i = 0;
 	int retval = 0;
-#ifdef CONFIG_NET
-	struct uevent_sock *ue_sk;
-#endif
 
 	pr_debug("kobject: '%s' (%p): %s\n",
 		 kobject_name(kobj), kobj, __func__);
@@ -244,73 +340,48 @@
 	else if (action == KOBJ_REMOVE)
 		kobj->state_remove_uevent_sent = 1;
 
-	mutex_lock(&uevent_sock_mutex);
-	/* we will send an event, so request a new sequence number */
-	retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
-	if (retval) {
-		mutex_unlock(&uevent_sock_mutex);
-		goto exit;
-	}
+#ifdef CONFIG_PM_SLEEP
+	/*
+	 * Delivery of skb's to userspace processes waiting via
+	 * EPOLLWAKEUP will abort suspend.  Buffer events emitted when
+	 * there is no unfrozen userspace to receive them.
+	 */
+	mutex_lock(&uevent_buffer_mutex);
+	if (uevent_buffer) {
+		struct uevent_buffered *ub;
+		ub = kmalloc(sizeof(*ub), GFP_KERNEL);
+		if (!ub) {
+			mutex_unlock(&uevent_buffer_mutex);
+			goto exit;
+		}
 
-#if defined(CONFIG_NET)
-	/* send netlink message */
-	list_for_each_entry(ue_sk, &uevent_sock_list, list) {
-		struct sock *uevent_sock = ue_sk->sk;
-		struct sk_buff *skb;
-		size_t len;
+		ub->kobj = kobj;
+		ub->env = env;
+		ub->action = action_string;
+		ub->devpath = kstrdup(devpath, GFP_KERNEL);
+		ub->subsys = kstrdup(subsystem, GFP_KERNEL);
 
-		if (!netlink_has_listeners(uevent_sock, 1))
-			continue;
-
-		/* allocate message with the maximum possible size */
-		len = strlen(action_string) + strlen(devpath) + 2;
-		skb = alloc_skb(len + env->buflen, GFP_KERNEL);
-		if (skb) {
-			char *scratch;
-
-			/* add header */
-			scratch = skb_put(skb, len);
-			sprintf(scratch, "%s@%s", action_string, devpath);
-
-			/* copy keys to our continuous event payload buffer */
-			for (i = 0; i < env->envp_idx; i++) {
-				len = strlen(env->envp[i]) + 1;
-				scratch = skb_put(skb, len);
-				strcpy(scratch, env->envp[i]);
-			}
-
-			NETLINK_CB(skb).dst_group = 1;
-			retval = netlink_broadcast_filtered(uevent_sock, skb,
-							    0, 1, GFP_KERNEL,
-							    kobj_bcast_filter,
-							    kobj);
-			/* ENOBUFS should be handled in userspace */
-			if (retval == -ENOBUFS || retval == -ESRCH)
-				retval = 0;
-		} else
+		if (!ub->devpath || !ub->subsys) {
+			kfree(ub->devpath);
+			/* kfree(ub->action);  not free'd as action_string is on the stack */
+			kfree(ub->subsys);
+			kfree(ub);
 			retval = -ENOMEM;
+			mutex_unlock(&uevent_buffer_mutex);
+			goto exit;
+		}
+
+		kobject_get(kobj);
+		list_add(&ub->buffer_list, &uevent_buffer_list);
+		env = NULL;
 	}
+	mutex_unlock(&uevent_buffer_mutex);
 #endif
-	mutex_unlock(&uevent_sock_mutex);
 
-	/* call uevent_helper, usually only enabled during early boot */
-	if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
-		char *argv [3];
-
-		argv [0] = uevent_helper;
-		argv [1] = (char *)subsystem;
-		argv [2] = NULL;
-		retval = add_uevent_var(env, "HOME=/");
-		if (retval)
+	if (env)
+		if (kobject_deliver_uevent(kobj, env, action_string, devpath,
+					   subsystem))
 			goto exit;
-		retval = add_uevent_var(env,
-					"PATH=/sbin:/bin:/usr/sbin:/usr/bin");
-		if (retval)
-			goto exit;
-
-		retval = call_usermodehelper(argv[0], argv,
-					     env->envp, UMH_WAIT_EXEC);
-	}
 
 exit:
 	kfree(devpath);
@@ -415,6 +486,34 @@
 	kfree(ue_sk);
 }
 
+#ifdef CONFIG_PM_SLEEP
+int uevent_buffer_pm_notify(struct notifier_block *nb,
+			    unsigned long action, void *data)
+{
+	mutex_lock(&uevent_buffer_mutex);
+	if (action == PM_SUSPEND_PREPARE) {
+		uevent_buffer = true;
+	} else if (action == PM_POST_SUSPEND) {
+		struct uevent_buffered *ub, *tmp;
+		list_for_each_entry_safe(ub, tmp, &uevent_buffer_list,
+					 buffer_list) {
+			kobject_deliver_uevent(ub->kobj, ub->env, ub->action,
+					       ub->devpath, ub->subsys);
+			list_del(&ub->buffer_list);
+			kobject_put(ub->kobj);
+			kfree(ub->env);
+			kfree(ub->devpath);
+			kfree(ub->subsys);
+			kfree(ub);
+		}
+
+		uevent_buffer = false;
+	}
+	mutex_unlock(&uevent_buffer_mutex);
+	return 0;
+}
+#endif
+
 static struct pernet_operations uevent_net_ops = {
 	.init	= uevent_net_init,
 	.exit	= uevent_net_exit,
@@ -422,6 +521,9 @@
 
 static int __init kobject_uevent_init(void)
 {
+#ifdef CONFIG_PM_SLEEP
+	pm_notifier(uevent_buffer_pm_notify, 0);
+#endif
 	return register_pernet_subsys(&uevent_net_ops);
 }
 
diff --git a/lib/list_debug.c b/lib/list_debug.c
index c24c2f7..97f5ed2 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -12,6 +12,41 @@
 #include <linux/kernel.h>
 #include <linux/rculist.h>
 
+#ifdef CONFIG_X86_64
+static unsigned long count_bits(unsigned long value)
+{
+	value = value - ((value >> 1) & 0x5555555555555555);
+	value = (value & 0x3333333333333333) + ((value >> 2) & 0x3333333333333333);
+	return (((value + (value >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56;
+}
+#else
+static unsigned long count_bits(unsigned long value)
+{
+	value = value - ((value >> 1) & 0x55555555);
+	value = (value & 0x33333333) + ((value >> 2) & 0x33333333);
+	return (((value + (value >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
+}
+#endif
+
+inline int check_list_corruption(void *ptr1, void *ptr2, void *ptr3, const char *func_name,
+		const char *ptr1_name, const char *ptr2_name, const char *ptr3_name) {
+	unsigned long delta_bits = (unsigned long)ptr1 ^ (unsigned long)ptr2;
+	if (!delta_bits)
+		return 0;
+
+	if (count_bits(delta_bits)  < 3) {
+		/* less than 3 bits differ; probably a bit flip...*/
+		panic("Bit flip in %s: value %p should be %p\n", func_name, ptr2, ptr1);
+	}
+
+	return WARN(1,
+		"%s corruption. %s should be "
+		"%s (%p), but was %p. (%s=%p).\n",
+		func_name, ptr1_name, ptr2_name, ptr2, ptr1, ptr3_name, ptr3);
+}
+
+
+
 /*
  * Insert a new entry between two known consecutive entries.
  *
@@ -23,14 +58,8 @@
 			      struct list_head *prev,
 			      struct list_head *next)
 {
-	WARN(next->prev != prev,
-		"list_add corruption. next->prev should be "
-		"prev (%p), but was %p. (next=%p).\n",
-		prev, next->prev, next);
-	WARN(prev->next != next,
-		"list_add corruption. prev->next should be "
-		"next (%p), but was %p. (prev=%p).\n",
-		next, prev->next, prev);
+	check_list_corruption(next->prev, prev, next, __func__, "next->prev", "prev", "next");
+	check_list_corruption(prev->next, next, prev, __func__, "prev->next", "next", "prev");
 	WARN(new == prev || new == next,
 	     "list_add double add: new=%p, prev=%p, next=%p.\n",
 	     new, prev, next);
@@ -54,12 +83,10 @@
 	    WARN(prev == LIST_POISON2,
 		"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
 		entry, LIST_POISON2) ||
-	    WARN(prev->next != entry,
-		"list_del corruption. prev->next should be %p, "
-		"but was %p\n", entry, prev->next) ||
-	    WARN(next->prev != entry,
-		"list_del corruption. next->prev should be %p, "
-		"but was %p\n", entry, next->prev))
+	    check_list_corruption(next->prev, entry, next,
+		__func__, "next->prev", "entry", "next") ||
+	    check_list_corruption(prev->next, entry, prev,
+		__func__, "prev->next", "entry", "prev"))
 		return;
 
 	__list_del(prev, next);
@@ -86,12 +113,8 @@
 void __list_add_rcu(struct list_head *new,
 		    struct list_head *prev, struct list_head *next)
 {
-	WARN(next->prev != prev,
-		"list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
-		prev, next->prev, next);
-	WARN(prev->next != next,
-		"list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
-		next, prev->next, prev);
+	check_list_corruption(next->prev, prev, next, __func__, "next->prev", "prev", "next");
+	check_list_corruption(prev->next, next, prev, __func__, "prev->next", "next", "prev");
 	new->next = next;
 	new->prev = prev;
 	rcu_assign_pointer(list_next_rcu(prev), new);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a1cf8ca..3e7df38 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -529,7 +529,8 @@
 		miter->__offset += miter->consumed;
 		miter->__remaining -= miter->consumed;
 
-		if (miter->__flags & SG_MITER_TO_SG)
+		if ((miter->__flags & SG_MITER_TO_SG) &&
+		    !PageSlab(miter->page))
 			flush_kernel_dcache_page(miter->page);
 
 		if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 0374a59..d2f2869 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -164,9 +164,9 @@
 	if (!debug_locks_off())
 		return;
 
-	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
+	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p, m:oc:o %u %u %p\n",
 		msg, raw_smp_processor_id(), current->comm,
-		task_pid_nr(current), lock);
+		task_pid_nr(current), lock, lock->magic, lock->owner_cpu, lock->owner);
 	dump_stack();
 }
 
diff --git a/mm/bounce.c b/mm/bounce.c
index c9f0a43..5a7d58f 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -204,6 +204,8 @@
 	struct bio_vec *to, *from;
 	unsigned i;
 
+	if (force)
+		goto bounce;
 	bio_for_each_segment(from, *bio_orig, i)
 		if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
 			goto bounce;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 362c329..c403a74 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1288,64 +1288,90 @@
 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 				unsigned long addr, pmd_t pmd, pmd_t *pmdp)
 {
+	struct anon_vma *anon_vma = NULL;
 	struct page *page;
 	unsigned long haddr = addr & HPAGE_PMD_MASK;
+	int page_nid = -1, this_nid = numa_node_id();
 	int target_nid;
-	int current_nid = -1;
-	bool migrated;
+	bool page_locked;
+	bool migrated = false;
 
 	spin_lock(&mm->page_table_lock);
 	if (unlikely(!pmd_same(pmd, *pmdp)))
 		goto out_unlock;
 
 	page = pmd_page(pmd);
-	get_page(page);
-	current_nid = page_to_nid(page);
+	page_nid = page_to_nid(page);
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (current_nid == numa_node_id())
+	if (page_nid == this_nid)
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
+	/*
+	 * Acquire the page lock to serialise THP migrations but avoid dropping
+	 * page_table_lock if at all possible
+	 */
+	page_locked = trylock_page(page);
 	target_nid = mpol_misplaced(page, vma, haddr);
 	if (target_nid == -1) {
-		put_page(page);
-		goto clear_pmdnuma;
+		/* If the page was locked, there are no parallel migrations */
+		if (page_locked)
+			goto clear_pmdnuma;
+
+		/*
+		 * Otherwise wait for potential migrations and retry. We do
+		 * relock and check_same as the page may no longer be mapped.
+		 * As the fault is being retried, do not account for it.
+		 */
+		spin_unlock(&mm->page_table_lock);
+		wait_on_page_locked(page);
+		page_nid = -1;
+		goto out;
 	}
 
-	/* Acquire the page lock to serialise THP migrations */
+	/* Page is misplaced, serialise migrations and parallel THP splits */
+	get_page(page);
 	spin_unlock(&mm->page_table_lock);
-	lock_page(page);
+	if (!page_locked)
+		lock_page(page);
+	anon_vma = page_lock_anon_vma_read(page);
 
 	/* Confirm the PTE did not while locked */
 	spin_lock(&mm->page_table_lock);
 	if (unlikely(!pmd_same(pmd, *pmdp))) {
 		unlock_page(page);
 		put_page(page);
+		page_nid = -1;
 		goto out_unlock;
 	}
-	spin_unlock(&mm->page_table_lock);
 
-	/* Migrate the THP to the requested node */
+	/*
+	 * Migrate the THP to the requested node, returns with page unlocked
+	 * and pmd_numa cleared.
+	 */
+	spin_unlock(&mm->page_table_lock);
 	migrated = migrate_misplaced_transhuge_page(mm, vma,
 				pmdp, pmd, addr, page, target_nid);
-	if (!migrated)
-		goto check_same;
+	if (migrated)
+		page_nid = target_nid;
 
-	task_numa_fault(target_nid, HPAGE_PMD_NR, true);
-	return 0;
-
-check_same:
-	spin_lock(&mm->page_table_lock);
-	if (unlikely(!pmd_same(pmd, *pmdp)))
-		goto out_unlock;
+	goto out;
 clear_pmdnuma:
+	BUG_ON(!PageLocked(page));
 	pmd = pmd_mknonnuma(pmd);
 	set_pmd_at(mm, haddr, pmdp, pmd);
 	VM_BUG_ON(pmd_numa(*pmdp));
 	update_mmu_cache_pmd(vma, addr, pmdp);
+	unlock_page(page);
 out_unlock:
 	spin_unlock(&mm->page_table_lock);
-	if (current_nid != -1)
-		task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+
+out:
+	if (anon_vma)
+		page_unlock_anon_vma_read(anon_vma);
+
+	if (page_nid != -1)
+		task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
+
 	return 0;
 }
 
@@ -2286,6 +2312,8 @@
 		goto out;
 
 	vma = find_vma(mm, address);
+	if (!vma)
+		goto out;
 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 	hend = vma->vm_end & HPAGE_PMD_MASK;
 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
@@ -2697,6 +2725,7 @@
 
 	mmun_start = haddr;
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
+again:
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 	spin_lock(&mm->page_table_lock);
 	if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2719,7 +2748,14 @@
 	split_huge_page(page);
 
 	put_page(page);
-	BUG_ON(pmd_trans_huge(*pmd));
+
+	/*
+	 * We don't always have down_write of mmap_sem here: a racing
+	 * do_huge_pmd_wp_page() might have copied-on-write to another
+	 * huge page before our split_huge_page() got the anon_vma lock.
+	 */
+	if (unlikely(pmd_trans_huge(*pmd)))
+		goto again;
 }
 
 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e2bfbf7..7c5eb85 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -690,6 +690,23 @@
 }
 EXPORT_SYMBOL_GPL(PageHuge);
 
+pgoff_t __basepage_index(struct page *page)
+{
+	struct page *page_head = compound_head(page);
+	pgoff_t index = page_index(page_head);
+	unsigned long compound_idx;
+
+	if (!PageHuge(page_head))
+		return page_index(page);
+
+	if (compound_order(page_head) >= MAX_ORDER)
+		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+	else
+		compound_idx = page - page_head;
+
+	return (index << compound_order(page_head)) + compound_idx;
+}
+
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
 	struct page *page;
@@ -2473,7 +2490,7 @@
 
 	mm = vma->vm_mm;
 
-	tlb_gather_mmu(&tlb, mm, 0);
+	tlb_gather_mmu(&tlb, mm, start, end);
 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
 	tlb_finish_mmu(&tlb, start, end);
 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6a9a0c3..703e942 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3186,11 +3186,11 @@
 	if (!s->memcg_params)
 		return -ENOMEM;
 
-	INIT_WORK(&s->memcg_params->destroy,
-			kmem_cache_destroy_work_func);
 	if (memcg) {
 		s->memcg_params->memcg = memcg;
 		s->memcg_params->root_cache = root_cache;
+		INIT_WORK(&s->memcg_params->destroy,
+				kmem_cache_destroy_work_func);
 	} else
 		s->memcg_params->is_root_cache = true;
 
@@ -5584,7 +5584,13 @@
 	const struct mem_cgroup_threshold *_a = a;
 	const struct mem_cgroup_threshold *_b = b;
 
-	return _a->threshold - _b->threshold;
+	if (_a->threshold > _b->threshold)
+		return 1;
+
+	if (_a->threshold < _b->threshold)
+		return -1;
+
+	return 0;
 }
 
 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
@@ -6297,16 +6303,6 @@
 
 	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
 	mutex_unlock(&memcg_create_mutex);
-	if (error) {
-		/*
-		 * We call put now because our (and parent's) refcnts
-		 * are already in place. mem_cgroup_put() will internally
-		 * call __mem_cgroup_free, so return directly
-		 */
-		mem_cgroup_put(memcg);
-		if (parent->use_hierarchy)
-			mem_cgroup_put(parent);
-	}
 	return error;
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index 61a262b..09cb2e1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -211,14 +211,15 @@
  *	tear-down from @mm. The @fullmm argument is used when @mm is without
  *	users and we're going to destroy the full address space (exit/execve).
  */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
 	tlb->mm = mm;
 
-	tlb->fullmm     = fullmm;
+	/* Is it from 0 to ~0? */
+	tlb->fullmm     = !(start | (end+1));
 	tlb->need_flush_all = 0;
-	tlb->start	= -1UL;
-	tlb->end	= 0;
+	tlb->start	= start;
+	tlb->end	= end;
 	tlb->need_flush = 0;
 	tlb->local.next = NULL;
 	tlb->local.nr   = 0;
@@ -258,8 +259,6 @@
 {
 	struct mmu_gather_batch *batch, *next;
 
-	tlb->start = start;
-	tlb->end   = end;
 	tlb_flush_mmu(tlb);
 
 	/* keep the page table cache within bounds */
@@ -1203,13 +1202,23 @@
 	 * and page-free while holding it.
 	 */
 	if (force_flush) {
+		unsigned long old_end;
+
 		force_flush = 0;
 
-#ifdef HAVE_GENERIC_MMU_GATHER
-		tlb->start = addr;
-		tlb->end = end;
-#endif
+		/*
+		 * Flush the TLB just for the previous segment,
+		 * then update the range to be the remaining
+		 * TLB range.
+		 */
+		old_end = tlb->end;
+		tlb->end = addr;
+
 		tlb_flush_mmu(tlb);
+
+		tlb->start = addr;
+		tlb->end = old_end;
+
 		if (addr != end)
 			goto again;
 	}
@@ -1396,7 +1405,7 @@
 	unsigned long end = start + size;
 
 	lru_add_drain();
-	tlb_gather_mmu(&tlb, mm, 0);
+	tlb_gather_mmu(&tlb, mm, start, end);
 	update_hiwater_rss(mm);
 	mmu_notifier_invalidate_range_start(mm, start, end);
 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1422,7 +1431,7 @@
 	unsigned long end = address + size;
 
 	lru_add_drain();
-	tlb_gather_mmu(&tlb, mm, 0);
+	tlb_gather_mmu(&tlb, mm, address, end);
 	update_hiwater_rss(mm);
 	mmu_notifier_invalidate_range_start(mm, address, end);
 	unmap_single_vma(&tlb, vma, address, end, details);
@@ -2224,6 +2233,26 @@
 }
 EXPORT_SYMBOL(vm_insert_pfn);
 
+int vm_insert_pfn_with_pgprot(struct vm_area_struct *vma, unsigned long addr,
+			      unsigned long pfn, pgprot_t pgprot)
+{
+	/*
+	 * Technically, architectures with pte_special can avoid all these
+	 * restrictions (same for remap_pfn_range).  However we would like
+	 * consistency in testing and feature parity among all, so we should
+	 * try to keep these invariants in place for everybody.
+	 */
+	BUG_ON((vma->vm_flags & VM_PFNMAP) == 0);
+	BUG_ON((vma->vm_flags & VM_MIXEDMAP));
+	BUG_ON(is_cow_mapping(vma->vm_flags));
+
+	if (addr < vma->vm_start || addr >= vma->vm_end)
+		return -EFAULT;
+
+	return insert_pfn(vma, addr, pfn, pgprot);
+}
+EXPORT_SYMBOL(vm_insert_pfn_with_pgprot);
+
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 			unsigned long pfn)
 {
@@ -3516,12 +3545,12 @@
 }
 
 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
-				unsigned long addr, int current_nid)
+				unsigned long addr, int page_nid)
 {
 	get_page(page);
 
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (current_nid == numa_node_id())
+	if (page_nid == numa_node_id())
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
 	return mpol_misplaced(page, vma, addr);
@@ -3532,7 +3561,7 @@
 {
 	struct page *page = NULL;
 	spinlock_t *ptl;
-	int current_nid = -1;
+	int page_nid = -1;
 	int target_nid;
 	bool migrated = false;
 
@@ -3562,15 +3591,10 @@
 		return 0;
 	}
 
-	current_nid = page_to_nid(page);
-	target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+	page_nid = page_to_nid(page);
+	target_nid = numa_migrate_prep(page, vma, addr, page_nid);
 	pte_unmap_unlock(ptep, ptl);
 	if (target_nid == -1) {
-		/*
-		 * Account for the fault against the current node if it not
-		 * being replaced regardless of where the page is located.
-		 */
-		current_nid = numa_node_id();
 		put_page(page);
 		goto out;
 	}
@@ -3578,11 +3602,11 @@
 	/* Migrate to the requested node */
 	migrated = migrate_misplaced_page(page, target_nid);
 	if (migrated)
-		current_nid = target_nid;
+		page_nid = target_nid;
 
 out:
-	if (current_nid != -1)
-		task_numa_fault(current_nid, 1, migrated);
+	if (page_nid != -1)
+		task_numa_fault(page_nid, 1, migrated);
 	return 0;
 }
 
@@ -3597,7 +3621,6 @@
 	unsigned long offset;
 	spinlock_t *ptl;
 	bool numa = false;
-	int local_nid = numa_node_id();
 
 	spin_lock(&mm->page_table_lock);
 	pmd = *pmdp;
@@ -3620,9 +3643,10 @@
 	for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
 		pte_t pteval = *pte;
 		struct page *page;
-		int curr_nid = local_nid;
+		int page_nid = -1;
 		int target_nid;
-		bool migrated;
+		bool migrated = false;
+
 		if (!pte_present(pteval))
 			continue;
 		if (!pte_numa(pteval))
@@ -3644,25 +3668,19 @@
 		if (unlikely(page_mapcount(page) != 1))
 			continue;
 
-		/*
-		 * Note that the NUMA fault is later accounted to either
-		 * the node that is currently running or where the page is
-		 * migrated to.
-		 */
-		curr_nid = local_nid;
-		target_nid = numa_migrate_prep(page, vma, addr,
-					       page_to_nid(page));
-		if (target_nid == -1) {
+		page_nid = page_to_nid(page);
+		target_nid = numa_migrate_prep(page, vma, addr, page_nid);
+		pte_unmap_unlock(pte, ptl);
+		if (target_nid != -1) {
+			migrated = migrate_misplaced_page(page, target_nid);
+			if (migrated)
+				page_nid = target_nid;
+		} else {
 			put_page(page);
-			continue;
 		}
 
-		/* Migrate to the requested node */
-		pte_unmap_unlock(pte, ptl);
-		migrated = migrate_misplaced_page(page, target_nid);
-		if (migrated)
-			curr_nid = target_nid;
-		task_numa_fault(curr_nid, 1, migrated);
+		if (page_nid != -1)
+			task_numa_fault(page_nid, 1, migrated);
 
 		pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 	}
@@ -4065,6 +4083,7 @@
 
 	return len;
 }
+EXPORT_SYMBOL_GPL(generic_access_phys);
 #endif
 
 /*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index bf12a3f..28fb178 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -732,7 +732,10 @@
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
-			continue;
+			if (mpol_equal(vma_policy(vma), new_pol))
+				continue;
+			/* vma_merge() joined vma && vma->next, case 8 */
+			goto replace;
 		}
 		if (vma->vm_start != vmstart) {
 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +747,7 @@
 			if (err)
 				goto out;
 		}
+ replace:
 		err = vma_replace_policy(vma, new_pol);
 		if (err)
 			goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index 6f0c244..bf436c1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -103,7 +103,7 @@
 		list_del(&page->lru);
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
-		if (unlikely(balloon_page_movable(page)))
+		if (unlikely(isolated_balloon_page(page)))
 			balloon_page_putback(page);
 		else
 			putback_lru_page(page);
@@ -1710,12 +1710,12 @@
 		unlock_page(new_page);
 		put_page(new_page);		/* Free it */
 
-		unlock_page(page);
+		/* Retake the callers reference and putback on LRU */
+		get_page(page);
 		putback_lru_page(page);
-
-		count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-		isolated = 0;
-		goto out;
+		mod_zone_page_state(page_zone(page),
+			 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+		goto out_fail;
 	}
 
 	/*
@@ -1732,9 +1732,9 @@
 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 	entry = pmd_mkhuge(entry);
 
-	page_add_new_anon_rmap(new_page, vma, haddr);
-
+	pmdp_clear_flush(vma, haddr, pmd);
 	set_pmd_at(mm, haddr, pmd, entry);
+	page_add_new_anon_rmap(new_page, vma, haddr);
 	update_mmu_cache_pmd(vma, address, &entry);
 	page_remove_rmap(page);
 	/*
@@ -1753,7 +1753,6 @@
 	count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
 	count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
 
-out:
 	mod_zone_page_state(page_zone(page),
 			NR_ISOLATED_ANON + page_lru,
 			-HPAGE_PMD_NR);
@@ -1762,6 +1761,10 @@
 out_fail:
 	count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
 out_dropref:
+	entry = pmd_mknonnuma(entry);
+	set_pmd_at(mm, haddr, pmd, entry);
+	update_mmu_cache_pmd(vma, address, &entry);
+
 	unlock_page(page);
 	put_page(page);
 	return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 93f936d..97d81e4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -877,7 +877,7 @@
 		if (next->anon_vma)
 			anon_vma_merge(vma, next);
 		mm->map_count--;
-		vma_set_policy(vma, vma_policy(next));
+		mpol_put(vma_policy(next));
 		kmem_cache_free(vm_area_cachep, next);
 		/*
 		 * In mprotect's case 6 (see comments on vma_merge),
@@ -2375,7 +2375,7 @@
 	struct mmu_gather tlb;
 
 	lru_add_drain();
-	tlb_gather_mmu(&tlb, mm, 0);
+	tlb_gather_mmu(&tlb, mm, start, end);
 	update_hiwater_rss(mm);
 	unmap_vmas(&tlb, vma, start, end);
 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2754,7 +2754,7 @@
 
 	lru_add_drain();
 	flush_cache_mm(mm);
-	tlb_gather_mmu(&tlb, mm, 1);
+	tlb_gather_mmu(&tlb, mm, 0, -1);
 	/* update_hiwater_rss(mm) here? but nobody should be looking */
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
 	unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 94d50b7..5d80adf 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -145,7 +145,7 @@
 				split_huge_page_pmd(vma, addr, pmd);
 			else if (change_huge_pmd(vma, pmd, addr, newprot,
 						 prot_numa)) {
-				pages += HPAGE_PMD_NR;
+				pages++;
 				continue;
 			}
 			/* fall through */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2cccf9b..f156ec3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1104,11 +1104,11 @@
 	return 1;
 }
 
-static long bdi_max_pause(struct backing_dev_info *bdi,
-			  unsigned long bdi_dirty)
+static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
+				   unsigned long bdi_dirty)
 {
-	long bw = bdi->avg_write_bandwidth;
-	long t;
+	unsigned long bw = bdi->avg_write_bandwidth;
+	unsigned long t;
 
 	/*
 	 * Limit pause time for small memory systems. If sleeping for too long
@@ -1120,7 +1120,7 @@
 	t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
 	t++;
 
-	return min_t(long, t, MAX_PAUSE);
+	return min_t(unsigned long, t, MAX_PAUSE);
 }
 
 static long bdi_min_pause(struct backing_dev_info *bdi,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a6bd1d8..5f59be3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6191,6 +6191,10 @@
 		list_del(&page->lru);
 		rmv_page_order(page);
 		zone->free_area[order].nr_free--;
+#ifdef CONFIG_HIGHMEM
+		if (PageHighMem(page))
+			totalhigh_pages -= 1 << order;
+#endif
 		for (i = 0; i < (1 << order); i++)
 			SetPageReserved((page+i));
 		pfn += (1 << order);
diff --git a/mm/readahead.c b/mm/readahead.c
index 829a77c..192c53a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -401,6 +401,7 @@
 		   unsigned long req_size)
 {
 	unsigned long max = max_sane_readahead(ra->ra_pages);
+	pgoff_t prev_offset;
 
 	/*
 	 * start of file
@@ -452,8 +453,11 @@
 
 	/*
 	 * sequential cache miss
+	 * trivial case: (offset - prev_offset) == 1
+	 * unaligned reads: (offset - prev_offset) == 0
 	 */
-	if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+	prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
+	if (offset - prev_offset <= 1UL)
 		goto initial_readahead;
 
 	/*
diff --git a/mm/slab.c b/mm/slab.c
index 8ccd296..bd88411 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -565,7 +565,7 @@
 	if (slab_state < UP)
 		return;
 
-	for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
+	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
 		struct kmem_cache_node *n;
 		struct kmem_cache *cache = kmalloc_caches[i];
 
diff --git a/mm/slab.h b/mm/slab.h
index f96b49e..4d6d836 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -162,6 +162,8 @@
 
 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
 {
+	if (!s->memcg_params)
+		return NULL;
 	return s->memcg_params->memcg_caches[idx];
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 57707f0..0ed34d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1052,6 +1052,7 @@
 	return 1;
 
 bad:
+	panic("Memory corruption detected in %s\n", __func__);
 	if (PageSlab(page)) {
 		/*
 		 * If this is a slab page then lets do the best we can
@@ -1118,6 +1119,7 @@
 	return n;
 
 fail:
+	panic("Memory corruption detected in %s\n", __func__);
 	slab_unlock(page);
 	spin_unlock_irqrestore(&n->list_lock, *flags);
 	slab_fix(s, "Object at 0x%p not freed", object);
diff --git a/mm/swap.c b/mm/swap.c
index dfd7d71..9f2225f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,7 @@
 #include <linux/memcontrol.h>
 #include <linux/gfp.h>
 #include <linux/uio.h>
+#include <linux/hugetlb.h>
 
 #include "internal.h"
 
@@ -78,6 +79,19 @@
 
 static void put_compound_page(struct page *page)
 {
+	/*
+	 * hugetlbfs pages cannot be split from under us.  If this is a
+	 * hugetlbfs page, check refcount on head page and release the page if
+	 * the refcount becomes zero.
+	 */
+	if (PageHuge(page)) {
+		page = compound_head(page);
+		if (put_page_testzero(page))
+			__put_compound_page(page);
+
+		return;
+	}
+
 	if (unlikely(PageTail(page))) {
 		/* __split_huge_page_refcount can run under us */
 		struct page *page_head = compound_trans_head(page);
@@ -181,38 +195,51 @@
 	 * proper PT lock that already serializes against
 	 * split_huge_page().
 	 */
-	unsigned long flags;
 	bool got = false;
-	struct page *page_head = compound_trans_head(page);
+	struct page *page_head;
 
-	if (likely(page != page_head && get_page_unless_zero(page_head))) {
+	/*
+	 * If this is a hugetlbfs page it cannot be split under us.  Simply
+	 * increment refcount for the head page.
+	 */
+	if (PageHuge(page)) {
+		page_head = compound_head(page);
+		atomic_inc(&page_head->_count);
+		got = true;
+	} else {
+		unsigned long flags;
 
-		/* Ref to put_compound_page() comment. */
-		if (PageSlab(page_head)) {
+		page_head = compound_trans_head(page);
+		if (likely(page != page_head &&
+					get_page_unless_zero(page_head))) {
+
+			/* Ref to put_compound_page() comment. */
+			if (PageSlab(page_head)) {
+				if (likely(PageTail(page))) {
+					__get_page_tail_foll(page, false);
+					return true;
+				} else {
+					put_page(page_head);
+					return false;
+				}
+			}
+
+			/*
+			 * page_head wasn't a dangling pointer but it
+			 * may not be a head page anymore by the time
+			 * we obtain the lock. That is ok as long as it
+			 * can't be freed from under us.
+			 */
+			flags = compound_lock_irqsave(page_head);
+			/* here __split_huge_page_refcount won't run anymore */
 			if (likely(PageTail(page))) {
 				__get_page_tail_foll(page, false);
-				return true;
-			} else {
-				put_page(page_head);
-				return false;
+				got = true;
 			}
+			compound_unlock_irqrestore(page_head, flags);
+			if (unlikely(!got))
+				put_page(page_head);
 		}
-
-		/*
-		 * page_head wasn't a dangling pointer but it
-		 * may not be a head page anymore by the time
-		 * we obtain the lock. That is ok as long as it
-		 * can't be freed from under us.
-		 */
-		flags = compound_lock_irqsave(page_head);
-		/* here __split_huge_page_refcount won't run anymore */
-		if (likely(PageTail(page))) {
-			__get_page_tail_foll(page, false);
-			got = true;
-		}
-		compound_unlock_irqrestore(page_head, flags);
-		if (unlikely(!got))
-			put_page(page_head);
 	}
 	return got;
 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 766ba2c..497787a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -389,12 +389,12 @@
 		addr = ALIGN(first->va_end, align);
 		if (addr < vstart)
 			goto nocache;
-		if (addr + size - 1 < addr)
+		if (addr + size < addr)
 			goto overflow;
 
 	} else {
 		addr = ALIGN(vstart, align);
-		if (addr + size - 1 < addr)
+		if (addr + size < addr)
 			goto overflow;
 
 		n = vmap_area_root.rb_node;
@@ -421,7 +421,7 @@
 		if (addr + cached_hole_size < first->va_start)
 			cached_hole_size = first->va_start - addr;
 		addr = ALIGN(first->va_end, align);
-		if (addr + size - 1 < addr)
+		if (addr + size < addr)
 			goto overflow;
 
 		if (list_is_last(&first->list, &vmap_area_list))
@@ -1282,6 +1282,7 @@
 	vunmap_page_range(addr, end);
 	flush_tlb_kernel_range(addr, end);
 }
+EXPORT_SYMBOL_GPL(unmap_kernel_range);
 
 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
 {
@@ -1420,6 +1421,7 @@
 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 				  NUMA_NO_NODE, GFP_KERNEL, caller);
 }
+EXPORT_SYMBOL_GPL(get_vm_area_caller);
 
 /**
  *	find_vm_area  -  find a continuous kernel virtual area
@@ -1439,6 +1441,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(find_vm_area);
 
 /**
  *	remove_vm_area  -  find and remove a continuous kernel virtual area
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 80c0b17..78baaa8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -49,6 +49,7 @@
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
+#include <linux/balloon_compaction.h>
 
 #include "internal.h"
 
@@ -167,7 +168,6 @@
 
 	down_read(&shrinker_rwsem);
 	list_for_each_entry(shrinker, &shrinker_list, list) {
-		char name[64];
 		int num_objs;
 
 		num_objs = shrinker->shrink(shrinker, &sc);
@@ -1022,7 +1022,8 @@
 	LIST_HEAD(clean_pages);
 
 	list_for_each_entry_safe(page, next, page_list, lru) {
-		if (page_is_file_cache(page) && !PageDirty(page)) {
+		if (page_is_file_cache(page) && !PageDirty(page) &&
+		    !isolated_balloon_page(page)) {
 			ClearPageActive(page);
 			list_move(&page->lru, &clean_pages);
 		}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8a15eaa..4a78c4d 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@
 {
 	struct sk_buff *skb = *skbp;
 	__be16 vlan_proto = skb->vlan_proto;
-	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+	u16 vlan_id = vlan_tx_tag_get_id(skb);
 	struct net_device *vlan_dev;
 	struct vlan_pcpu_stats *rx_stats;
 
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3a8c8fd..1cd3d2a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -73,6 +73,8 @@
 {
 	struct vlan_priority_tci_mapping *mp;
 
+	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
+
 	mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
 	while (mp) {
 		if (mp->priority == skb->priority) {
@@ -249,6 +251,11 @@
 	np->next = mp;
 	np->priority = skb_prio;
 	np->vlan_qos = vlan_qos;
+	/* Before inserting this element in hash table, make sure all its fields
+	 * are committed to memory.
+	 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+	 */
+	smp_wmb();
 	vlan->egress_priority_map[skb_prio & 0xF] = np;
 	if (vlan_qos)
 		vlan->nr_egress_mappings++;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 3091297..c7e634a 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -171,7 +171,7 @@
 
 	return nla_total_size(2) +	/* IFLA_VLAN_PROTOCOL */
 	       nla_total_size(2) +	/* IFLA_VLAN_ID */
-	       sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+	       nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
 	       vlan_qos_map_size(vlan->nr_ingress_mappings) +
 	       vlan_qos_map_size(vlan->nr_egress_mappings);
 }
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index de8df95..2ee3879 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -24,11 +24,11 @@
  */
 void p9_release_pages(struct page **pages, int nr_pages)
 {
-	int i = 0;
-	while (pages[i] && nr_pages--) {
-		put_page(pages[i]);
-		i++;
-	}
+	int i;
+
+	for (i = 0; i < nr_pages; i++)
+		if (pages[i])
+			put_page(pages[i]);
 }
 EXPORT_SYMBOL(p9_release_pages);
 
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 51aafd6..f1cb1f5 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -61,6 +61,7 @@
 	batadv_recv_handler_init();
 
 	batadv_iv_init();
+	batadv_nc_init();
 
 	batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
@@ -138,7 +139,7 @@
 	if (ret < 0)
 		goto err;
 
-	ret = batadv_nc_init(bat_priv);
+	ret = batadv_nc_mesh_init(bat_priv);
 	if (ret < 0)
 		goto err;
 
@@ -163,7 +164,7 @@
 	batadv_vis_quit(bat_priv);
 
 	batadv_gw_node_purge(bat_priv);
-	batadv_nc_free(bat_priv);
+	batadv_nc_mesh_free(bat_priv);
 	batadv_dat_free(bat_priv);
 	batadv_bla_free(bat_priv);
 
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index e84629e..f97aeee 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -35,6 +35,20 @@
 				       struct batadv_hard_iface *recv_if);
 
 /**
+ * batadv_nc_init - one-time initialization for network coding
+ */
+int __init batadv_nc_init(void)
+{
+	int ret;
+
+	/* Register our packet type */
+	ret = batadv_recv_handler_register(BATADV_CODED,
+					   batadv_nc_recv_coded_packet);
+
+	return ret;
+}
+
+/**
  * batadv_nc_start_timer - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -45,10 +59,10 @@
 }
 
 /**
- * batadv_nc_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  */
-int batadv_nc_init(struct batadv_priv *bat_priv)
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
 	bat_priv->nc.timestamp_fwd_flush = jiffies;
 	bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,11 +84,6 @@
 	batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
 				   &batadv_nc_decoding_hash_lock_class_key);
 
-	/* Register our packet type */
-	if (batadv_recv_handler_register(BATADV_CODED,
-					 batadv_nc_recv_coded_packet) < 0)
-		goto err;
-
 	INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
 	batadv_nc_start_timer(bat_priv);
 
@@ -1722,12 +1731,11 @@
 }
 
 /**
- * batadv_nc_free - clean up network coding memory
+ * batadv_nc_mesh_free - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
-void batadv_nc_free(struct batadv_priv *bat_priv)
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
-	batadv_recv_handler_unregister(BATADV_CODED);
 	cancel_delayed_work_sync(&bat_priv->nc.work);
 
 	batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 4fa6d0c..bd4295f 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,8 +22,9 @@
 
 #ifdef CONFIG_BATMAN_ADV_NC
 
-int batadv_nc_init(struct batadv_priv *bat_priv);
-void batadv_nc_free(struct batadv_priv *bat_priv);
+int batadv_nc_init(void);
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
 void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
 			      struct batadv_orig_node *orig_node,
 			      struct batadv_orig_node *orig_neigh_node,
@@ -47,12 +48,17 @@
 
 #else /* ifdef CONFIG_BATMAN_ADV_NC */
 
-static inline int batadv_nc_init(struct batadv_priv *bat_priv)
+static inline int batadv_nc_init(void)
 {
 	return 0;
 }
 
-static inline void batadv_nc_free(struct batadv_priv *bat_priv)
+static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
+{
+	return 0;
+}
+
+static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
 	return;
 }
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ace5e55..7c88f5f 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1123,7 +1123,11 @@
 		goto done;
 	}
 
-	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
+	/* Check for rfkill but allow the HCI setup stage to proceed
+	 * (which in itself doesn't cause any RF activity).
+	 */
+	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
+	    !test_bit(HCI_SETUP, &hdev->dev_flags)) {
 		ret = -ERFKILL;
 		goto done;
 	}
@@ -1545,10 +1549,13 @@
 
 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
 
-	if (!blocked)
-		return 0;
-
-	hci_dev_do_close(hdev);
+	if (blocked) {
+		set_bit(HCI_RFKILLED, &hdev->dev_flags);
+		if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+			hci_dev_do_close(hdev);
+	} else {
+		clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+}
 
 	return 0;
 }
@@ -1570,9 +1577,13 @@
 		return;
 	}
 
-	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+	if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+		clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+		hci_dev_do_close(hdev);
+	} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
 				   HCI_AUTO_OFF_TIMEOUT);
+	}
 
 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
 		mgmt_index_added(hdev);
@@ -2241,6 +2252,9 @@
 		}
 	}
 
+	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
+		set_bit(HCI_RFKILLED, &hdev->dev_flags);
+
 	set_bit(HCI_SETUP, &hdev->dev_flags);
 
 	if (hdev->dev_type != HCI_AMP)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1cd9075..49d5c94 100755
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3623,7 +3623,11 @@
 	cp.handle = cpu_to_le16(conn->handle);
 
 	if (ltk->authenticated)
-		conn->sec_level = BT_SECURITY_HIGH;
+		conn->pending_sec_level = BT_SECURITY_HIGH;
+	else
+		conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
+	conn->enc_key_size = ltk->enc_size;
 
 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
 
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 940f5ac..de030f5 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -231,17 +231,22 @@
 
 static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
 {
-	unsigned char buf[32], hdr;
-	int rsize;
+	unsigned char hdr;
+	u8 *buf;
+	int rsize, ret;
 
-	rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
-	if (rsize > sizeof(buf))
+	buf = hid_alloc_report_buf(report, GFP_ATOMIC);
+	if (!buf)
 		return -EIO;
 
 	hid_output_report(report, buf);
 	hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
 
-	return hidp_send_intr_message(session, hdr, buf, rsize);
+	rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+	ret = hidp_send_intr_message(session, hdr, buf, rsize);
+
+	kfree(buf);
+	return ret;
 }
 
 static int hidp_get_raw_report(struct hid_device *hid,
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index ebfa444..84dd783 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@
 	if (!pv)
 		return;
 
-	for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+	for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
 		f = __br_fdb_get(br, br->dev->dev_addr, vid);
 		if (f && f->is_local && !f->dst)
 			fdb_delete(br, f);
@@ -725,7 +725,7 @@
 		/* VID was specified, so use it. */
 		err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
 	} else {
-		if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+		if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
 			err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
 			goto out;
 		}
@@ -734,7 +734,7 @@
 		 * specify a VLAN.  To be nice, add/update entry for every
 		 * vlan on this port.
 		 */
-		for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
 			err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
 			if (err)
 				goto out;
@@ -812,7 +812,7 @@
 
 		err = __br_fdb_delete(p, addr, vid);
 	} else {
-		if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+		if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
 			err = __br_fdb_delete(p, addr, 0);
 			goto out;
 		}
@@ -822,7 +822,7 @@
 		 * vlan on this port.
 		 */
 		err = -ENOENT;
-		for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
 			err &= __br_fdb_delete(p, addr, vid);
 		}
 	}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index d6448e3..d82058f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1185,7 +1185,7 @@
 		max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
 		if (max_delay)
 			group = &mld->mld_mca;
-	} else if (skb->len >= sizeof(*mld2q)) {
+	} else {
 		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
 			err = -EINVAL;
 			goto out;
@@ -1193,7 +1193,8 @@
 		mld2q = (struct mld2_query *)icmp6_hdr(skb);
 		if (!mld2q->mld2q_nsrcs)
 			group = &mld2q->mld2q_mca;
-		max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
+
+		max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
 	}
 
 	if (!group)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 8e3abf5..06873e8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -128,7 +128,7 @@
 		else
 			pv = br_get_vlan_info(br);
 
-		if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+		if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
 			goto done;
 
 		af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -136,7 +136,7 @@
 			goto nla_put_failure;
 
 		pvid = br_get_pvid(pv);
-		for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
 			vinfo.vid = vid;
 			vinfo.flags = 0;
 			if (vid == pvid)
@@ -203,7 +203,7 @@
 	       struct net_device *dev, u32 filter_mask)
 {
 	int err = 0;
-	struct net_bridge_port *port = br_port_get_rcu(dev);
+	struct net_bridge_port *port = br_port_get_rtnl(dev);
 
 	/* not a bridge port and  */
 	if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
@@ -443,7 +443,7 @@
 	struct net_port_vlans *pv;
 
 	if (br_port_exists(dev))
-		pv = nbp_get_vlan_info(br_port_get_rcu(dev));
+		pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
 	else if (dev->priv_flags & IFF_EBRIDGE)
 		pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
 	else
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d2c043a..e696833 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -183,13 +183,10 @@
 
 static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
 {
-	struct net_bridge_port *port =
-			rcu_dereference_rtnl(dev->rx_handler_data);
-
-	return br_port_exists(dev) ? port : NULL;
+	return rcu_dereference(dev->rx_handler_data);
 }
 
-static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
+static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev)
 {
 	return br_port_exists(dev) ?
 		rtnl_dereference(dev->rx_handler_data) : NULL;
@@ -714,6 +711,7 @@
 extern void br_init_port(struct net_bridge_port *p);
 extern void br_become_designated_port(struct net_bridge_port *p);
 
+extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
 extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
 extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
 extern int br_set_max_age(struct net_bridge *br, unsigned long x);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 1c0a50f..3c86f05 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,7 +209,7 @@
 	p->designated_age = jiffies - bpdu->message_age;
 
 	mod_timer(&p->message_age_timer, jiffies
-		  + (p->br->max_age - bpdu->message_age));
+		  + (bpdu->max_age - bpdu->message_age));
 }
 
 /* called under bridge lock */
@@ -544,18 +544,27 @@
 
 }
 
-int br_set_forward_delay(struct net_bridge *br, unsigned long val)
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
 {
-	unsigned long t = clock_t_to_jiffies(val);
-
-	if (br->stp_enabled != BR_NO_STP &&
-	    (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
-		return -ERANGE;
-
-	spin_lock_bh(&br->lock);
 	br->bridge_forward_delay = t;
 	if (br_is_root_bridge(br))
 		br->forward_delay = br->bridge_forward_delay;
+}
+
+int br_set_forward_delay(struct net_bridge *br, unsigned long val)
+{
+	unsigned long t = clock_t_to_jiffies(val);
+	int err = -ERANGE;
+
+	spin_lock_bh(&br->lock);
+	if (br->stp_enabled != BR_NO_STP &&
+	    (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
+		goto unlock;
+
+	__br_set_forward_delay(br, t);
+	err = 0;
+
+unlock:
 	spin_unlock_bh(&br->lock);
-	return 0;
+	return err;
 }
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d45e760..656a6f3 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -129,6 +129,14 @@
 	char *envp[] = { NULL };
 
 	r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+
+	spin_lock_bh(&br->lock);
+
+	if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
+		__br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
+	else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
+		__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
+
 	if (r == 0) {
 		br->stp_enabled = BR_USER_STP;
 		br_debug(br, "userspace STP started\n");
@@ -137,10 +145,10 @@
 		br_debug(br, "using kernel STP\n");
 
 		/* To start timers on any ports left in blocking */
-		spin_lock_bh(&br->lock);
 		br_port_state_selection(br);
-		spin_unlock_bh(&br->lock);
 	}
+
+	spin_unlock_bh(&br->lock);
 }
 
 static void br_stp_stop(struct net_bridge *br)
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45..9a9ffe7 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@
 
 	clear_bit(vid, v->vlan_bitmap);
 	v->num_vlans--;
-	if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+	if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
 		if (v->port_idx)
 			rcu_assign_pointer(v->parent.port->vlan_info, NULL);
 		else
@@ -122,7 +122,7 @@
 {
 	smp_wmb();
 	v->pvid = 0;
-	bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+	bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
 	if (v->port_idx)
 		rcu_assign_pointer(v->parent.port->vlan_info, NULL);
 	else
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 2bd4b58..0f45522 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -293,9 +293,10 @@
 
 		count = cfctrl_cancel_req(&cfctrl->serv.layer,
 						user_layer);
-		if (count != 1)
+		if (count != 1) {
 			pr_err("Could not remove request (%d)", count);
 			return -ENODEV;
+		}
 	}
 	return 0;
 }
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 925ca58..8c93fa8 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -39,6 +39,11 @@
 	return xi->starting;
 }
 
+static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
+{
+	return 0;
+}
+
 /*
  * the generic auth code decode the global_id, and we carry no actual
  * authenticate state, so nothing happens here.
@@ -106,6 +111,7 @@
 	.destroy = destroy,
 	.is_authenticated = is_authenticated,
 	.should_authenticate = should_authenticate,
+	.build_request = build_request,
 	.handle_reply = handle_reply,
 	.create_authorizer = ceph_auth_none_create_authorizer,
 	.destroy_authorizer = ceph_auth_none_destroy_authorizer,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 3a246a6..46ec767 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2130,6 +2130,8 @@
 			dout("osdc_start_request failed map, "
 				" will retry %lld\n", req->r_tid);
 			rc = 0;
+		} else {
+			__unregister_request(osdc, req);
 		}
 		goto out_unlock;
 	}
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 603ddd9..dbd9a47 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1129,7 +1129,7 @@
 
 	/* pg_temp? */
 	pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
-				    pool->pgp_num_mask);
+				    pool->pg_num_mask);
 	pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
 	if (pg) {
 		*num = pg->len;
diff --git a/net/compat.c b/net/compat.c
index f0a1ba6..8903258 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -71,6 +71,8 @@
 	    __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
 	    __get_user(kmsg->msg_flags, &umsg->msg_flags))
 		return -EFAULT;
+	if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+		return -EINVAL;
 	kmsg->msg_name = compat_ptr(tmp1);
 	kmsg->msg_iov = compat_ptr(tmp2);
 	kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/dev.c b/net/core/dev.c
index faebb39..7ddbb31 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3513,8 +3513,15 @@
 		}
 	}
 
-	if (vlan_tx_nonzero_tag_present(skb))
-		skb->pkt_type = PACKET_OTHERHOST;
+	if (unlikely(vlan_tx_tag_present(skb))) {
+		if (vlan_tx_tag_get_id(skb))
+			skb->pkt_type = PACKET_OTHERHOST;
+		/* Note: we might in the future use prio bits
+		 * and set skb->priority like in vlan_do_receive()
+		 * For the time being, just ignore Priority Code Point
+		 */
+		skb->vlan_tci = 0;
+	}
 
 	/* deliver only exact match when indicated */
 	null_or_dev = deliver_exact ? skb->dev : NULL;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068..f97101b 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -40,7 +40,7 @@
 		struct iphdr _iph;
 ip:
 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-		if (!iph)
+		if (!iph || iph->ihl < 5)
 			return false;
 
 		if (ip_is_fragment(iph))
@@ -149,8 +149,8 @@
 	if (poff >= 0) {
 		__be32 *ports, _ports;
 
-		nhoff += poff;
-		ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
+		ports = skb_header_pointer(skb, nhoff + poff,
+					   sizeof(_ports), &_ports);
 		if (ports)
 			flow->ports = *ports;
 	}
@@ -345,14 +345,9 @@
 		if (new_index < 0)
 			new_index = skb_tx_hash(dev, skb);
 
-		if (queue_index != new_index && sk) {
-			struct dst_entry *dst =
-				    rcu_dereference_check(sk->sk_dst_cache, 1);
-
-			if (dst && skb_dst(skb) == dst)
-				sk_tx_queue_set(sk, queue_index);
-
-		}
+		if (queue_index != new_index && sk &&
+		    rcu_access_pointer(sk->sk_dst_cache))
+			sk_tx_queue_set(sk, new_index);
 
 		queue_index = new_index;
 	}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 703cd2a..c7c7c86 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -231,7 +231,7 @@
 				   we must kill timers etc. and move
 				   it to safe state.
 				 */
-				skb_queue_purge(&n->arp_queue);
+				__skb_queue_purge(&n->arp_queue);
 				n->arp_queue_len_bytes = 0;
 				n->output = neigh_blackhole;
 				if (n->nud_state & NUD_VALID)
@@ -286,7 +286,7 @@
 	if (!n)
 		goto out_entries;
 
-	skb_queue_head_init(&n->arp_queue);
+	__skb_queue_head_init(&n->arp_queue);
 	rwlock_init(&n->lock);
 	seqlock_init(&n->ha_lock);
 	n->updated	  = n->used = now;
@@ -708,7 +708,9 @@
 	if (neigh_del_timer(neigh))
 		pr_warn("Impossible event\n");
 
-	skb_queue_purge(&neigh->arp_queue);
+	write_lock_bh(&neigh->lock);
+	__skb_queue_purge(&neigh->arp_queue);
+	write_unlock_bh(&neigh->lock);
 	neigh->arp_queue_len_bytes = 0;
 
 	if (dev->netdev_ops->ndo_neigh_destroy)
@@ -858,7 +860,7 @@
 		neigh->ops->error_report(neigh, skb);
 		write_lock(&neigh->lock);
 	}
-	skb_queue_purge(&neigh->arp_queue);
+	__skb_queue_purge(&neigh->arp_queue);
 	neigh->arp_queue_len_bytes = 0;
 }
 
@@ -1213,7 +1215,7 @@
 
 			write_lock_bh(&neigh->lock);
 		}
-		skb_queue_purge(&neigh->arp_queue);
+		__skb_queue_purge(&neigh->arp_queue);
 		neigh->arp_queue_len_bytes = 0;
 	}
 out:
@@ -1446,16 +1448,18 @@
 		atomic_set(&p->refcnt, 1);
 		p->reachable_time =
 				neigh_rand_reach_time(p->base_reachable_time);
-
-		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
-			kfree(p);
-			return NULL;
-		}
-
 		dev_hold(dev);
 		p->dev = dev;
 		write_pnet(&p->net, hold_net(net));
 		p->sysctl_table = NULL;
+
+		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+			release_net(net);
+			dev_put(dev);
+			kfree(p);
+			return NULL;
+		}
+
 		write_lock_bh(&tbl->lock);
 		p->next		= tbl->parms.next;
 		tbl->parms.next = p;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index cec074b..b04f738 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -550,7 +550,7 @@
 		return;
 
 	proto = ntohs(eth_hdr(skb)->h_proto);
-	if (proto == ETH_P_IP) {
+	if (proto == ETH_P_ARP) {
 		struct arphdr *arp;
 		unsigned char *arp_ptr;
 		/* No arp on this interface */
@@ -1289,15 +1289,14 @@
 
 void netpoll_cleanup(struct netpoll *np)
 {
-	if (!np->dev)
-		return;
-
 	rtnl_lock();
+	if (!np->dev)
+		goto out;
 	__netpoll_cleanup(np);
-	rtnl_unlock();
-
 	dev_put(np->dev);
 	np->dev = NULL;
+out:
+	rtnl_unlock();
 }
 EXPORT_SYMBOL(netpoll_cleanup);
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a08bd2b..fd01eca 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2142,7 +2142,7 @@
 	/* If aging addresses are supported device will need to
 	 * implement its own handler for this.
 	 */
-	if (ndm->ndm_state & NUD_PERMANENT) {
+	if (!(ndm->ndm_state & NUD_PERMANENT)) {
 		pr_info("%s: FDB only supports static addresses\n", dev->name);
 		return -EINVAL;
 	}
@@ -2374,7 +2374,7 @@
 	struct nlattr *extfilt;
 	u32 filter_mask = 0;
 
-	extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+	extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
 				  IFLA_EXT_MASK);
 	if (extfilt)
 		filter_mask = nla_get_u32(extfilt);
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0..b4da80b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@
 		return -EINVAL;
 
 	if ((creds->pid == task_tgid_vnr(current) ||
-	     ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
+	     ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
 	    ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
 	      uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
 	    ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 6a2f13c..8d9d05e 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -10,12 +10,27 @@
 
 #include <net/secure_seq.h>
 
-static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
+#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
 
-void net_secret_init(void)
+static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
+
+static void net_secret_init(void)
 {
-	get_random_bytes(net_secret, sizeof(net_secret));
+	u32 tmp;
+	int i;
+
+	if (likely(net_secret[0]))
+		return;
+
+	for (i = NET_SECRET_SIZE; i > 0;) {
+		do {
+			get_random_bytes(&tmp, sizeof(tmp));
+		} while (!tmp);
+		cmpxchg(&net_secret[--i], 0, tmp);
+	}
 }
+#endif
 
 #ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
@@ -42,6 +57,7 @@
 	u32 hash[MD5_DIGEST_WORDS];
 	u32 i;
 
+	net_secret_init();
 	memcpy(hash, saddr, 16);
 	for (i = 0; i < 4; i++)
 		secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +79,7 @@
 	u32 hash[MD5_DIGEST_WORDS];
 	u32 i;
 
+	net_secret_init();
 	memcpy(hash, saddr, 16);
 	for (i = 0; i < 4; i++)
 		secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +99,7 @@
 {
 	u32 hash[MD5_DIGEST_WORDS];
 
+	net_secret_init();
 	hash[0] = (__force __u32) daddr;
 	hash[1] = net_secret[13];
 	hash[2] = net_secret[14];
@@ -96,6 +114,7 @@
 {
 	__u32 hash[4];
 
+	net_secret_init();
 	memcpy(hash, daddr, 16);
 	md5_transform(hash, net_secret);
 
@@ -107,6 +126,7 @@
 {
 	u32 hash[MD5_DIGEST_WORDS];
 
+	net_secret_init();
 	hash[0] = (__force u32)saddr;
 	hash[1] = (__force u32)daddr;
 	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +141,7 @@
 {
 	u32 hash[MD5_DIGEST_WORDS];
 
+	net_secret_init();
 	hash[0] = (__force u32)saddr;
 	hash[1] = (__force u32)daddr;
 	hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +161,7 @@
 	u32 hash[MD5_DIGEST_WORDS];
 	u64 seq;
 
+	net_secret_init();
 	hash[0] = (__force u32)saddr;
 	hash[1] = (__force u32)daddr;
 	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +186,7 @@
 	u64 seq;
 	u32 i;
 
+	net_secret_init();
 	memcpy(hash, saddr, 16);
 	for (i = 0; i < 4; i++)
 		secret[i] = net_secret[i] + daddr[i];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1c1738c..f2cb585 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -492,13 +492,14 @@
 	skb_drop_list(&skb_shinfo(skb)->frag_list);
 }
 
-static void skb_clone_fraglist(struct sk_buff *skb)
+void skb_clone_fraglist(struct sk_buff *skb)
 {
 	struct sk_buff *list;
 
 	skb_walk_frags(skb, list)
 		skb_get(list);
 }
+EXPORT_SYMBOL(skb_clone_fraglist);
 
 static void skb_free_head(struct sk_buff *skb)
 {
@@ -913,7 +914,7 @@
 	skb->inner_mac_header += off;
 }
 
-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
 #ifndef NET_SKBUFF_DATA_USES_OFFSET
 	/*
@@ -931,6 +932,7 @@
 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 }
+EXPORT_SYMBOL(copy_skb_header);
 
 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
 {
diff --git a/net/core/sock.c b/net/core/sock.c
index d6d024c..6565431 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2271,6 +2271,7 @@
 
 	sk->sk_stamp = ktime_set(-1L, 0);
 
+	sk->sk_pacing_rate = ~0U;
 	/*
 	 * Before updating sk_refcnt, we must commit prior changes to memory
 	 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cfdb46a..2ff093b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -20,7 +20,9 @@
 #include <net/sock.h>
 #include <net/net_ratelimit.h>
 
+static int zero = 0;
 static int one = 1;
+static int ushort_max = USHRT_MAX;
 
 #ifdef CONFIG_RPS
 static int rps_sock_flow_sysctl(ctl_table *table, int write,
@@ -204,7 +206,9 @@
 		.data		= &init_net.core.sysctl_somaxconn,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.extra1		= &zero,
+		.extra2		= &ushort_max,
+		.proc_handler	= proc_dointvec_minmax
 	},
 	{ }
 };
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c61f9c..6cf9f77 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -135,6 +135,7 @@
 
 		if (dst)
 			dst->ops->redirect(dst, sk, skb);
+		goto out;
 	}
 
 	if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 109017e..f022e0e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -276,10 +276,8 @@
 		get_random_bytes(&rnd, sizeof(rnd));
 	} while (rnd == 0);
 
-	if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
+	if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
 		get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-		net_secret_init();
-	}
 }
 EXPORT_SYMBOL(build_ehash_secret);
 
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index b326e67..e392355 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -772,7 +772,7 @@
 		ci = nla_data(tb[IFA_CACHEINFO]);
 		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
 			err = -EINVAL;
-			goto errout;
+			goto errout_free;
 		}
 		*pvalid_lft = ci->ifa_valid;
 		*pprefered_lft = ci->ifa_prefered;
@@ -780,6 +780,8 @@
 
 	return ifa;
 
+errout_free:
+	inet_free_ifa(ifa);
 errout:
 	return ERR_PTR(err);
 }
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 49616fe..6e8a13d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/prefetch.h>
 #include <linux/export.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
@@ -1761,10 +1760,8 @@
 			if (!c)
 				continue;
 
-			if (IS_LEAF(c)) {
-				prefetch(rcu_dereference_rtnl(p->child[idx]));
+			if (IS_LEAF(c))
 				return (struct leaf *) c;
-			}
 
 			/* Rescan start scanning in new node */
 			p = (struct tnode *) c;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d8c2327..089b4af 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -343,7 +343,7 @@
 	pip->saddr    = fl4.saddr;
 	pip->protocol = IPPROTO_IGMP;
 	pip->tot_len  = 0;	/* filled in later */
-	ip_select_ident(pip, &rt->dst, NULL);
+	ip_select_ident(skb, &rt->dst, NULL);
 	((u8 *)&pip[1])[0] = IPOPT_RA;
 	((u8 *)&pip[1])[1] = 4;
 	((u8 *)&pip[1])[2] = 0;
@@ -687,7 +687,7 @@
 	iph->daddr    = dst;
 	iph->saddr    = fl4.saddr;
 	iph->protocol = IPPROTO_IGMP;
-	ip_select_ident(iph, &rt->dst, NULL);
+	ip_select_ident(skb, &rt->dst, NULL);
 	((u8 *)&iph[1])[0] = IPOPT_RA;
 	((u8 *)&iph[1])[1] = 4;
 	((u8 *)&iph[1])[2] = 0;
@@ -709,7 +709,7 @@
 
 	in_dev->mr_gq_running = 0;
 	igmpv3_send_report(in_dev, NULL);
-	__in_dev_put(in_dev);
+	in_dev_put(in_dev);
 }
 
 static void igmp_ifc_timer_expire(unsigned long data)
@@ -721,7 +721,7 @@
 		in_dev->mr_ifc_count--;
 		igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
 	}
-	__in_dev_put(in_dev);
+	in_dev_put(in_dev);
 }
 
 static void igmp_ifc_event(struct in_device *in_dev)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 6af375a..c95848d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -287,7 +287,7 @@
 			if (unlikely(!INET_TW_MATCH(sk, net, acookie,
 						    saddr, daddr, ports,
 						    dif))) {
-				sock_put(sk);
+				inet_twsk_put(inet_twsk(sk));
 				goto begintw;
 			}
 			goto out;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 000e3d2..33d5537 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -32,8 +32,8 @@
  *  At the moment of writing this notes identifier of IP packets is generated
  *  to be unpredictable using this code only for packets subjected
  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
- *  PMTU in size uses a constant ID and do not use this code (see
- *  ip_select_ident() in include/net/ip.h).
+ *  PMTU in size when local fragmentation is disabled use a constant ID and do
+ *  not use this code (see ip_select_ident() in include/net/ip.h).
  *
  *  Route cache entries hold references to our nodes.
  *  New cache entries get references via lookup by destination IP address in
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 2a83591..64e4e98 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -335,7 +335,7 @@
 				  iph->saddr, iph->daddr, tpi.key);
 
 	if (tunnel) {
-		ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+		ip_tunnel_rcv(tunnel, skb, &tpi, hdr_len, log_ecn_error);
 		return 0;
 	}
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -503,10 +503,11 @@
 
 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 		return -EFAULT;
-	if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
-	    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
-	    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) {
-		return -EINVAL;
+	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
+		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
+		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
+		    ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
+			return -EINVAL;
 	}
 	p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
 	p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
@@ -571,7 +572,7 @@
 	if (daddr)
 		memcpy(&iph->daddr, daddr, 4);
 	if (iph->daddr)
-		return t->hlen;
+		return t->hlen + sizeof(*iph);
 
 	return -(t->hlen + sizeof(*iph));
 }
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3da817b..15e3e68 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -190,10 +190,7 @@
 {
 	struct net *net = dev_net(skb->dev);
 
-	__skb_pull(skb, ip_hdrlen(skb));
-
-	/* Point into the IP datagram, just past the header. */
-	skb_reset_transport_header(skb);
+	__skb_pull(skb, skb_network_header_len(skb));
 
 	rcu_read_lock();
 	{
@@ -437,6 +434,8 @@
 		goto drop;
 	}
 
+	skb->transport_header = skb->network_header + iph->ihl*4;
+
 	/* Remove any debris in the socket control block */
 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8e20e94..91e3b15 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -148,7 +148,7 @@
 	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
 	iph->saddr    = saddr;
 	iph->protocol = sk->sk_protocol;
-	ip_select_ident(iph, &rt->dst, sk);
+	ip_select_ident(skb, &rt->dst, sk);
 
 	if (opt && opt->opt.optlen) {
 		iph->ihl += opt->opt.optlen>>2;
@@ -394,7 +394,7 @@
 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
 	}
 
-	ip_select_ident_more(iph, &rt->dst, sk,
+	ip_select_ident_more(skb, &rt->dst, sk,
 			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 
 	skb->priority = sk->sk_priority;
@@ -844,7 +844,7 @@
 		csummode = CHECKSUM_PARTIAL;
 
 	cork->length += length;
-	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+	if (((length > mtu) || (skb && skb_has_frags(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
@@ -1324,7 +1324,7 @@
 	else
 		ttl = ip_select_ttl(inet, &rt->dst);
 
-	iph = (struct iphdr *)skb->data;
+	iph = ip_hdr(skb);
 	iph->version = 4;
 	iph->ihl = 5;
 	iph->tos = inet->tos;
@@ -1332,7 +1332,7 @@
 	iph->ttl = ttl;
 	iph->protocol = sk->sk_protocol;
 	ip_copy_addrs(iph, fl4);
-	ip_select_ident(iph, &rt->dst, sk);
+	ip_select_ident(skb, &rt->dst, sk);
 
 	if (opt) {
 		iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 7fa8f08..46dcf32 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -402,7 +402,7 @@
 }
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
+		  const struct tnl_ptk_info *tpi, int hdr_len, bool log_ecn_error)
 {
 	struct pcpu_tstats *tstats;
 	const struct iphdr *iph = ip_hdr(skb);
@@ -413,7 +413,7 @@
 	skb->protocol = tpi->proto;
 
 	skb->mac_header = skb->network_header;
-	__pskb_pull(skb, tunnel->hlen);
+	__pskb_pull(skb, hdr_len);
 	skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen);
 #ifdef CONFIG_NET_IPGRE_BROADCAST
 	if (ipv4_is_multicast(iph->daddr)) {
@@ -486,6 +486,53 @@
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
 
+static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+			    struct rtable *rt, __be16 df)
+{
+	struct ip_tunnel *tunnel = netdev_priv(dev);
+	int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+	int mtu;
+
+	if (df)
+		mtu = dst_mtu(&rt->dst) - dev->hard_header_len
+					- sizeof(struct iphdr) - tunnel->hlen;
+	else
+		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+
+	if (skb_dst(skb))
+		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		if (!skb_is_gso(skb) &&
+		    (df & htons(IP_DF)) && mtu < pkt_size) {
+			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+			return -E2BIG;
+		}
+	}
+#if IS_ENABLED(CONFIG_IPV6)
+	else if (skb->protocol == htons(ETH_P_IPV6)) {
+		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
+
+		if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
+			   mtu >= IPV6_MIN_MTU) {
+			if ((tunnel->parms.iph.daddr &&
+			    !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
+			    rt6->rt6i_dst.plen == 128) {
+				rt6->rt6i_flags |= RTF_MODIFIED;
+				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
+			}
+		}
+
+		if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
+					mtu < pkt_size) {
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+			return -E2BIG;
+		}
+	}
+#endif
+	return 0;
+}
+
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 		    const struct iphdr *tnl_params)
 {
@@ -499,7 +546,6 @@
 	struct net_device *tdev;	/* Device to other host */
 	unsigned int max_headroom;	/* The extra header space needed */
 	__be32 dst;
-	int mtu;
 
 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 
@@ -579,50 +625,11 @@
 		goto tx_error;
 	}
 
-	df = tnl_params->frag_off;
 
-	if (df)
-		mtu = dst_mtu(&rt->dst) - dev->hard_header_len
-					- sizeof(struct iphdr);
-	else
-		mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
-
-	if (skb_dst(skb))
-		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-
-	if (skb->protocol == htons(ETH_P_IP)) {
-		df |= (inner_iph->frag_off&htons(IP_DF));
-
-		if (!skb_is_gso(skb) &&
-		    (inner_iph->frag_off&htons(IP_DF)) &&
-		     mtu < ntohs(inner_iph->tot_len)) {
-			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
-			ip_rt_put(rt);
-			goto tx_error;
-		}
+	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
+		ip_rt_put(rt);
+		goto tx_error;
 	}
-#if IS_ENABLED(CONFIG_IPV6)
-	else if (skb->protocol == htons(ETH_P_IPV6)) {
-		struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
-
-		if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
-		    mtu >= IPV6_MIN_MTU) {
-			if ((tunnel->parms.iph.daddr &&
-			    !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
-			    rt6->rt6i_dst.plen == 128) {
-				rt6->rt6i_flags |= RTF_MODIFIED;
-				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
-			}
-		}
-
-		if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
-		    mtu < skb->len) {
-			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-			ip_rt_put(rt);
-			goto tx_error;
-		}
-	}
-#endif
 
 	if (tunnel->err_count > 0) {
 		if (time_before(jiffies,
@@ -646,15 +653,19 @@
 			ttl = ip4_dst_hoplimit(&rt->dst);
 	}
 
+	df = tnl_params->frag_off;
+	if (skb->protocol == htons(ETH_P_IP))
+		df |= (inner_iph->frag_off&htons(IP_DF));
+
 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
 					       + rt->dst.header_len;
-	if (max_headroom > dev->needed_headroom) {
+	if (max_headroom > dev->needed_headroom)
 		dev->needed_headroom = max_headroom;
-		if (skb_cow_head(skb, dev->needed_headroom)) {
-			dev->stats.tx_dropped++;
-			dev_kfree_skb(skb);
-			return;
-		}
+
+	if (skb_cow_head(skb, dev->needed_headroom)) {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+		return;
 	}
 
 	skb_dst_drop(skb);
@@ -675,7 +686,7 @@
 	iph->daddr	=	fl4.daddr;
 	iph->saddr	=	fl4.saddr;
 	iph->ttl	=	ttl;
-	tunnel_ip_select_ident(skb, inner_iph, &rt->dst);
+	__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 
 	iptunnel_xmit(skb, dev);
 	return;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index c118f6b..0656041 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -285,8 +285,17 @@
 	tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
 	if (tunnel != NULL) {
 		struct pcpu_tstats *tstats;
+		u32 oldmark = skb->mark;
+		int ret;
 
-		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+
+		/* temporarily mark the skb with the tunnel o_key, to
+		 * only match policies with this mark.
+		 */
+		skb->mark = be32_to_cpu(tunnel->parms.o_key);
+		ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
+		skb->mark = oldmark;
+		if (!ret)
 			return -1;
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -295,7 +304,6 @@
 		tstats->rx_bytes += skb->len;
 		u64_stats_update_end(&tstats->syncp);
 
-		skb->mark = 0;
 		secpath_reset(skb);
 		skb->dev = tunnel->dev;
 		return 1;
@@ -327,7 +335,7 @@
 
 	memset(&fl4, 0, sizeof(fl4));
 	flowi4_init_output(&fl4, tunnel->parms.link,
-			   be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
+			   be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
 			   RT_SCOPE_UNIVERSE,
 			   IPPROTO_IPIP, 0,
 			   dst, tiph->saddr, 0, 0);
@@ -606,17 +614,10 @@
 	struct iphdr *iph = &tunnel->parms.iph;
 	struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
 
-	tunnel->dev = dev;
-	strcpy(tunnel->parms.name, dev->name);
-
 	iph->version		= 4;
 	iph->protocol		= IPPROTO_IPIP;
 	iph->ihl		= 5;
 
-	dev->tstats = alloc_percpu(struct pcpu_tstats);
-	if (!dev->tstats)
-		return -ENOMEM;
-
 	dev_hold(dev);
 	rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
 	return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 77bfcce..f5cc7b3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -195,7 +195,7 @@
 	if (tunnel) {
 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 			goto drop;
-		return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+		return ip_tunnel_rcv(tunnel, skb, &tpi, 0, log_ecn_error);
 	}
 
 	return -1;
@@ -240,11 +240,13 @@
 	if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
 		return -EFAULT;
 
-	if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
-			p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
-		return -EINVAL;
-	if (p.i_key || p.o_key || p.i_flags || p.o_flags)
-		return -EINVAL;
+	if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
+		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
+		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
+			return -EINVAL;
+	}
+
+	p.i_key = p.o_key = p.i_flags = p.o_flags = 0;
 	if (p.iph.ttl)
 		p.iph.frag_off |= htons(IP_DF);
 
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 2c538ec..9e10d89 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1658,7 +1658,7 @@
 	iph->protocol	=	IPPROTO_IPIP;
 	iph->ihl	=	5;
 	iph->tot_len	=	htons(skb->len);
-	ip_select_ident(iph, skb_dst(skb), NULL);
+	ip_select_ident(skb, skb_dst(skb), NULL);
 	ip_send_check(iph);
 
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 32d894f..c106921 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -295,6 +295,21 @@
 }
 EXPORT_SYMBOL_GPL(ping_close);
 
+void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
+{
+	if (saddr->sa_family == AF_INET) {
+		struct inet_sock *isk = inet_sk(sk);
+		struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
+		isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (saddr->sa_family == AF_INET6) {
+		struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
+		struct ipv6_pinfo *np = inet6_sk(sk);
+		np->rcv_saddr = np->saddr = addr->sin6_addr;
+#endif
+	}
+}
+
 /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
 int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
 			 struct sockaddr *uaddr, int addr_len) {
@@ -356,8 +371,7 @@
 				return -ENODEV;
 			}
 		}
-		has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
-						    scoped);
+		has_addr = ipv6_chk_addr(net, &addr->sin6_addr, dev, scoped);
 		rcu_read_unlock();
 
 		if (!(isk->freebind || isk->transparent || has_addr ||
@@ -373,21 +387,6 @@
 	return 0;
 }
 
-void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
-{
-	if (saddr->sa_family == AF_INET) {
-		struct inet_sock *isk = inet_sk(sk);
-		struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
-		isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
-#if IS_ENABLED(CONFIG_IPV6)
-	} else if (saddr->sa_family == AF_INET6) {
-		struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
-		struct ipv6_pinfo *np = inet6_sk(sk);
-		np->rcv_saddr = np->saddr = addr->sin6_addr;
-#endif
-	}
-}
-
 void ping_clear_saddr(struct sock *sk, int dif)
 {
 	sk->sk_bound_dev_if = dif;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index b828733..0b41ccd 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -387,7 +387,7 @@
 		iph->check   = 0;
 		iph->tot_len = htons(length);
 		if (!iph->id)
-			ip_select_ident(iph, &rt->dst, NULL);
+			ip_select_ident(skb, &rt->dst, NULL);
 
 		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 	}
@@ -571,7 +571,8 @@
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE,
 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
-			   inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
+			   inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
+			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
 			   daddr, saddr, 0, 0,
 			   sock_i_uid(sk));
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 42cd979..84d003f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2029,7 +2029,7 @@
 							      RT_SCOPE_LINK);
 			goto make_route;
 		}
-		if (fl4->saddr) {
+		if (!fl4->saddr) {
 			if (ipv4_is_multicast(fl4->daddr))
 				fl4->saddr = inet_select_addr(dev_out, 0,
 							      fl4->flowi4_scope);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index cc5fa7d..2c707a9 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -29,6 +29,7 @@
 static int zero;
 static int one = 1;
 static int four = 4;
+static int gso_max_segs = GSO_MAX_SEGS;
 static int tcp_retr1_max = 255;
 static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -36,6 +37,8 @@
 static int tcp_adv_win_scale_max = 31;
 static int ip_ttl_min = 1;
 static int ip_ttl_max = 255;
+static int tcp_syn_retries_min = 1;
+static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 
@@ -346,7 +349,9 @@
 		.data		= &sysctl_tcp_syn_retries,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &tcp_syn_retries_min,
+		.extra2		= &tcp_syn_retries_max
 	},
 	{
 		.procname	= "tcp_synack_retries",
@@ -764,6 +769,15 @@
 		.extra2		= &four,
 	},
 	{
+		.procname	= "tcp_min_tso_segs",
+		.data		= &sysctl_tcp_min_tso_segs,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &gso_max_segs,
+	},
+	{
 		.procname       = "tcp_default_init_rwnd",
 		.data           = &sysctl_tcp_default_init_rwnd,
 		.maxlen         = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e66b8a8..399e2cd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -286,6 +286,8 @@
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 
+int sysctl_tcp_min_tso_segs __read_mostly = 2;
+
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
@@ -790,12 +792,28 @@
 	xmit_size_goal = mss_now;
 
 	if (large_allowed && sk_can_gso(sk)) {
-		xmit_size_goal = ((sk->sk_gso_max_size - 1) -
-				  inet_csk(sk)->icsk_af_ops->net_header_len -
-				  inet_csk(sk)->icsk_ext_hdr_len -
-				  tp->tcp_header_len);
+		u32 gso_size, hlen;
 
-		/* TSQ : try to have two TSO segments in flight */
+		/* Maybe we should/could use sk->sk_prot->max_header here ? */
+		hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
+		       inet_csk(sk)->icsk_ext_hdr_len +
+		       tp->tcp_header_len;
+
+		/* Goal is to send at least one packet per ms,
+		 * not one big TSO packet every 100 ms.
+		 * This preserves ACK clocking and is consistent
+		 * with tcp_tso_should_defer() heuristic.
+		 */
+		gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
+		gso_size = max_t(u32, gso_size,
+				 sysctl_tcp_min_tso_segs * mss_now);
+
+		xmit_size_goal = min_t(u32, gso_size,
+				       sk->sk_gso_max_size - 1 - hlen);
+
+		/* TSQ : try to have at least two segments in flight
+		 * (one in NIC TX ring, another in Qdisc)
+		 */
 		xmit_size_goal = min_t(u32, xmit_size_goal,
 				       sysctl_tcp_limit_output_bytes >> 1);
 
@@ -1122,6 +1140,13 @@
 					goto wait_for_memory;
 
 				/*
+				 * All packets are restored as if they have
+				 * already been sent.
+				 */
+				if (tp->repair)
+					TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
+				/*
 				 * Check whether we can use HW checksum.
 				 */
 				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
@@ -2453,10 +2478,11 @@
 	case TCP_THIN_DUPACK:
 		if (val < 0 || val > 1)
 			err = -EINVAL;
-		else
+		else {
 			tp->thin_dupack = val;
 			if (tp->thin_dupack)
 				tcp_disable_early_retrans(tp);
+		}
 		break;
 
 	case TCP_REPAIR:
@@ -3527,7 +3553,7 @@
 
 	struct in_addr *in;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-	struct in6_addr *in6;
+	struct in6_addr *in6 = NULL;
 #endif
 	if (family == AF_INET) {
 		in = &((struct sockaddr_in *)addr)->sin_addr;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f4..b6ae92a 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@
  */
 static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 {
-	u64 offs;
-	u32 delta, t, bic_target, max_cnt;
+	u32 delta, bic_target, max_cnt;
+	u64 offs, t;
 
 	ca->ack_cnt++;	/* count the number of ACKs */
 
@@ -250,9 +250,11 @@
 	 * if the cwnd < 1 million packets !!!
 	 */
 
+	t = (s32)(tcp_time_stamp - ca->epoch_start);
+	t += msecs_to_jiffies(ca->delay_min >> 3);
 	/* change the unit from HZ to bictcp_HZ */
-	t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
-	      - ca->epoch_start) << BICTCP_HZ) / HZ;
+	t <<= BICTCP_HZ;
+	do_div(t, HZ);
 
 	if (t < ca->bic_K)		/* t - K */
 		offs = ca->bic_K - t;
@@ -414,7 +416,7 @@
 		return;
 
 	/* Discard delay samples right after fast recovery */
-	if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+	if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
 		return;
 
 	delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4189682..c4dac9e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -700,6 +700,34 @@
 	}
 }
 
+/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
+ * Note: TCP stack does not yet implement pacing.
+ * FQ packet scheduler can be used to implement cheap but effective
+ * TCP pacing, to smooth the burst on large writes when packets
+ * in flight is significantly lower than cwnd (or rwin)
+ */
+static void tcp_update_pacing_rate(struct sock *sk)
+{
+	const struct tcp_sock *tp = tcp_sk(sk);
+	u64 rate;
+
+	/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
+	rate = (u64)tp->mss_cache * 2 * (HZ << 3);
+
+	rate *= max(tp->snd_cwnd, tp->packets_out);
+
+	/* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
+	 * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
+	 * We probably need usec resolution in the future.
+	 * Note: This also takes care of possible srtt=0 case,
+	 * when tcp_rtt_estimator() was not yet called.
+	 */
+	if (tp->srtt > 8 + 2)
+		do_div(rate, tp->srtt);
+
+	sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+}
+
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
  * routine referred to above.
  */
@@ -1265,7 +1293,10 @@
 		tp->lost_cnt_hint -= tcp_skb_pcount(prev);
 	}
 
-	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
+	TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+		TCP_SKB_CB(prev)->end_seq++;
+
 	if (skb == tcp_highest_sack(sk))
 		tcp_advance_highest_sack(sk, skb);
 
@@ -3315,7 +3346,7 @@
 			tcp_init_cwnd_reduction(sk, true);
 			tcp_set_ca_state(sk, TCP_CA_CWR);
 			tcp_end_cwnd_reduction(sk);
-			tcp_set_ca_state(sk, TCP_CA_Open);
+			tcp_try_keep_open(sk);
 			NET_INC_STATS_BH(sock_net(sk),
 					 LINUX_MIB_TCPLOSSPROBERECOVERY);
 		}
@@ -3331,7 +3362,7 @@
 	u32 ack_seq = TCP_SKB_CB(skb)->seq;
 	u32 ack = TCP_SKB_CB(skb)->ack_seq;
 	bool is_dupack = false;
-	u32 prior_in_flight;
+	u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
 	u32 prior_fackets;
 	int prior_packets = tp->packets_out;
 	int prior_sacked = tp->sacked_out;
@@ -3439,6 +3470,8 @@
 
 	if (icsk->icsk_pending == ICSK_TIME_RETRANS)
 		tcp_schedule_loss_probe(sk);
+	if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
+		tcp_update_pacing_rate(sk);
 	return 1;
 
 no_queue:
@@ -3599,7 +3632,10 @@
 		++ptr;
 		tp->rx_opt.rcv_tsval = ntohl(*ptr);
 		++ptr;
-		tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
+		if (*ptr)
+			tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
+		else
+			tp->rx_opt.rcv_tsecr = 0;
 		return true;
 	}
 	return false;
@@ -3624,7 +3660,7 @@
 	}
 
 	tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
-	if (tp->rx_opt.saw_tstamp)
+	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
 		tp->rx_opt.rcv_tsecr -= tp->tsoffset;
 
 	return true;
@@ -4466,7 +4502,8 @@
 		 *   overlaps to the next one.
 		 */
 		if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
-		    (tcp_win_from_space(skb->truesize) > skb->len ||
+		    ((tcp_win_from_space(skb->truesize) > skb->len &&
+				!before(start, TCP_SKB_CB(skb)->seq)) ||
 		     before(TCP_SKB_CB(skb)->seq, start))) {
 			end_of_skbs = false;
 			break;
@@ -5377,7 +5414,7 @@
 	int saved_clamp = tp->rx_opt.mss_clamp;
 
 	tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
-	if (tp->rx_opt.saw_tstamp)
+	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
 		tp->rx_opt.rcv_tsecr -= tp->tsoffset;
 
 	if (th->ack) {
@@ -5734,6 +5771,8 @@
 				} else
 					tcp_init_metrics(sk);
 
+				tcp_update_pacing_rate(sk);
+
 				/* Prevent spurious tcp_cwnd_restart() on
 				 * first data packet.
 				 */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 084eac6..0441adf 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -976,6 +976,9 @@
 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
 				 unsigned int mss_now)
 {
+	/* Make sure we own this skb before messing gso_size/gso_segs */
+	WARN_ON_ONCE(skb_cloned(skb));
+
 	if (skb->len <= mss_now || !sk_can_gso(sk) ||
 	    skb->ip_summed == CHECKSUM_NONE) {
 		/* Avoid the costly divide in the normal
@@ -1057,9 +1060,7 @@
 	if (nsize < 0)
 		nsize = 0;
 
-	if (skb_cloned(skb) &&
-	    skb_is_nonlinear(skb) &&
-	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+	if (skb_unclone(skb, GFP_ATOMIC))
 		return -ENOMEM;
 
 	/* Get a new skb... force flag on. */
@@ -1622,7 +1623,7 @@
 
 	/* If a full-sized TSO skb can be sent, do it. */
 	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
-			   sk->sk_gso_max_segs * tp->mss_cache))
+			   tp->xmit_size_goal_segs * tp->mss_cache))
 		goto send_now;
 
 	/* Middle in queue won't get any more data, full sendable already? */
@@ -2328,6 +2329,8 @@
 		int oldpcount = tcp_skb_pcount(skb);
 
 		if (unlikely(oldpcount > 1)) {
+			if (skb_unclone(skb, GFP_ATOMIC))
+				return -ENOMEM;
 			tcp_init_tso_segs(sk, skb, cur_mss);
 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
 		}
@@ -2663,7 +2666,7 @@
 	int tcp_header_size;
 	int mss;
 
-	skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
+	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
 	if (unlikely(!skb)) {
 		dst_release(dst);
 		return NULL;
@@ -2807,6 +2810,8 @@
 
 	if (likely(!tp->repair))
 		tp->rcv_nxt = 0;
+	else
+		tp->rcv_tstamp = tcp_time_stamp;
 	tp->rcv_wup = tp->rcv_nxt;
 	tp->copied_seq = tp->rcv_nxt;
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 35ab330..af03108 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -799,7 +799,7 @@
 /*
  * Push out all pending data as one UDP datagram. Socket is locked.
  */
-static int udp_push_pending_frames(struct sock *sk)
+int udp_push_pending_frames(struct sock *sk)
 {
 	struct udp_sock  *up = udp_sk(sk);
 	struct inet_sock *inet = inet_sk(sk);
@@ -818,6 +818,7 @@
 	up->pending = 0;
 	return err;
 }
+EXPORT_SYMBOL(udp_push_pending_frames);
 
 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		size_t len)
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index eb1dd4d..b5663c3 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -117,7 +117,7 @@
 
 	top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
 		0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
-	ip_select_ident(top_iph, dst->child, NULL);
+	ip_select_ident(skb, dst->child, NULL);
 
 	top_iph->ttl = ip4_dst_hoplimit(dst->child);
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index da81f97..cd2339a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1128,12 +1128,10 @@
 	if (ifp->flags & IFA_F_OPTIMISTIC)
 		addr_flags |= IFA_F_OPTIMISTIC;
 
-	ift = !max_addresses ||
-	      ipv6_count_addresses(idev) < max_addresses ?
-		ipv6_add_addr(idev, &addr, tmp_plen,
-			      ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
-			      addr_flags) : NULL;
-	if (IS_ERR_OR_NULL(ift)) {
+	ift = ipv6_add_addr(idev, &addr, tmp_plen,
+			    ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+			    addr_flags);
+	if (IS_ERR(ift)) {
 		in6_ifa_put(ifp);
 		in6_dev_put(idev);
 		pr_info("%s: retry temporary address regeneration\n", __func__);
@@ -1484,6 +1482,23 @@
 }
 EXPORT_SYMBOL(ipv6_dev_get_saddr);
 
+int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+		      unsigned char banned_flags)
+{
+	struct inet6_ifaddr *ifp;
+	int err = -EADDRNOTAVAIL;
+
+	list_for_each_entry(ifp, &idev->addr_list, if_list) {
+		if (ifp->scope == IFA_LINK &&
+		    !(ifp->flags & banned_flags)) {
+			*addr = ifp->addr;
+			err = 0;
+			break;
+		}
+	}
+	return err;
+}
+
 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
 		    unsigned char banned_flags)
 {
@@ -1493,17 +1508,8 @@
 	rcu_read_lock();
 	idev = __in6_dev_get(dev);
 	if (idev) {
-		struct inet6_ifaddr *ifp;
-
 		read_lock_bh(&idev->lock);
-		list_for_each_entry(ifp, &idev->addr_list, if_list) {
-			if (ifp->scope == IFA_LINK &&
-			    !(ifp->flags & banned_flags)) {
-				*addr = ifp->addr;
-				err = 0;
-				break;
-			}
-		}
+		err = __ipv6_get_lladdr(idev, addr, banned_flags);
 		read_unlock_bh(&idev->lock);
 	}
 	rcu_read_unlock();
@@ -1565,6 +1571,33 @@
 	return false;
 }
 
+/* Compares an address/prefix_len with addresses on device @dev.
+ * If one is found it returns true.
+ */
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+	const unsigned int prefix_len, struct net_device *dev)
+{
+	struct inet6_dev *idev;
+	struct inet6_ifaddr *ifa;
+	bool ret = false;
+
+	rcu_read_lock();
+	idev = __in6_dev_get(dev);
+	if (idev) {
+		read_lock_bh(&idev->lock);
+		list_for_each_entry(ifa, &idev->addr_list, if_list) {
+			ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
+			if (ret)
+				break;
+		}
+		read_unlock_bh(&idev->lock);
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipv6_chk_custom_prefix);
+
 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
 {
 	struct inet6_dev *idev;
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f083a58..b30ad37 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -251,38 +251,36 @@
 /* add a label */
 static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
 {
+	struct hlist_node *n;
+	struct ip6addrlbl_entry *last = NULL, *p = NULL;
 	int ret = 0;
 
-	ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n",
-			__func__,
-			newp, replace);
+	ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
+		  replace);
 
-	if (hlist_empty(&ip6addrlbl_table.head)) {
-		hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
-	} else {
-		struct hlist_node *n;
-		struct ip6addrlbl_entry *p = NULL;
-		hlist_for_each_entry_safe(p, n,
-					  &ip6addrlbl_table.head, list) {
-			if (p->prefixlen == newp->prefixlen &&
-			    net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
-			    p->ifindex == newp->ifindex &&
-			    ipv6_addr_equal(&p->prefix, &newp->prefix)) {
-				if (!replace) {
-					ret = -EEXIST;
-					goto out;
-				}
-				hlist_replace_rcu(&p->list, &newp->list);
-				ip6addrlbl_put(p);
-				goto out;
-			} else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
-				   (p->prefixlen < newp->prefixlen)) {
-				hlist_add_before_rcu(&newp->list, &p->list);
+	hlist_for_each_entry_safe(p, n,	&ip6addrlbl_table.head, list) {
+		if (p->prefixlen == newp->prefixlen &&
+		    net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
+		    p->ifindex == newp->ifindex &&
+		    ipv6_addr_equal(&p->prefix, &newp->prefix)) {
+			if (!replace) {
+				ret = -EEXIST;
 				goto out;
 			}
+			hlist_replace_rcu(&p->list, &newp->list);
+			ip6addrlbl_put(p);
+			goto out;
+		} else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
+			   (p->prefixlen < newp->prefixlen)) {
+			hlist_add_before_rcu(&newp->list, &p->list);
+			goto out;
 		}
-		hlist_add_after_rcu(&p->list, &newp->list);
+		last = p;
 	}
+	if (last)
+		hlist_add_after_rcu(&last->list, &newp->list);
+	else
+		hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
 out:
 	if (!ret)
 		ip6addrlbl_table.seq++;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 07a7d65..8d67900 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -162,12 +162,6 @@
 		off += optlen;
 		len -= optlen;
 	}
-	/* This case will not be caught by above check since its padding
-	 * length is smaller than 7:
-	 * 1 byte NH + 1 byte Length + 6 bytes Padding
-	 */
-	if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
-		goto bad;
 
 	if (len == 0)
 		return true;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 12b1a94..45a7e02 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -946,6 +946,14 @@
 		.err	= ECONNREFUSED,
 		.fatal	= 1,
 	},
+	{	/* POLICY_FAIL */
+		.err	= EACCES,
+		.fatal	= 1,
+	},
+	{	/* REJECT_ROUTE	*/
+		.err	= EACCES,
+		.fatal	= 1,
+	},
 };
 
 int icmpv6_err_convert(u8 type, u8 code, int *err)
@@ -957,7 +965,7 @@
 	switch (type) {
 	case ICMPV6_DEST_UNREACH:
 		fatal = 1;
-		if (code <= ICMPV6_PORT_UNREACH) {
+		if (code < ARRAY_SIZE(tab_unreach)) {
 			*err  = tab_unreach[code].err;
 			fatal = tab_unreach[code].fatal;
 		}
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 32b4a16..066640e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -116,7 +116,7 @@
 			}
 			if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
 						     ports, dif))) {
-				sock_put(sk);
+				inet_twsk_put(inet_twsk(sk));
 				goto begintw;
 			}
 			goto out;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 192dd1a..9c06ecb 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -632,6 +632,12 @@
 	return ln;
 }
 
+static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
+{
+	return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+	       RTF_GATEWAY;
+}
+
 /*
  *	Insert routing information in a node.
  */
@@ -646,6 +652,7 @@
 	int add = (!info->nlh ||
 		   (info->nlh->nlmsg_flags & NLM_F_CREATE));
 	int found = 0;
+	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
 
 	ins = &fn->leaf;
 
@@ -691,9 +698,8 @@
 			 * To avoid long list, we only had siblings if the
 			 * route have a gateway.
 			 */
-			if (rt->rt6i_flags & RTF_GATEWAY &&
-			    !(rt->rt6i_flags & RTF_EXPIRES) &&
-			    !(iter->rt6i_flags & RTF_EXPIRES))
+			if (rt_can_ecmp &&
+			    rt6_qualify_for_ecmp(iter))
 				rt->rt6i_nsiblings++;
 		}
 
@@ -715,7 +721,8 @@
 		/* Find the first route that have the same metric */
 		sibling = fn->leaf;
 		while (sibling) {
-			if (sibling->rt6i_metric == rt->rt6i_metric) {
+			if (sibling->rt6i_metric == rt->rt6i_metric &&
+			    rt6_qualify_for_ecmp(sibling)) {
 				list_add_tail(&rt->rt6i_siblings,
 					      &sibling->rt6i_siblings);
 				break;
@@ -818,9 +825,9 @@
 	fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
 			rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
 			allow_create, replace_required);
-
 	if (IS_ERR(fn)) {
 		err = PTR_ERR(fn);
+		fn = NULL;
 		goto out;
 	}
 
@@ -986,14 +993,22 @@
 
 			if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
 #ifdef CONFIG_IPV6_SUBTREES
-				if (fn->subtree)
-					fn = fib6_lookup_1(fn->subtree, args + 1);
+				if (fn->subtree) {
+					struct fib6_node *sfn;
+					sfn = fib6_lookup_1(fn->subtree,
+							    args + 1);
+					if (!sfn)
+						goto backtrack;
+					fn = sfn;
+				}
 #endif
-				if (!fn || fn->fn_flags & RTN_RTINFO)
+				if (fn->fn_flags & RTN_RTINFO)
 					return fn;
 			}
 		}
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
 		if (fn->fn_flags & RTN_ROOT)
 			break;
 
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ecd6073..1f9a1a5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -620,7 +620,7 @@
 	struct ip6_tnl *tunnel = netdev_priv(dev);
 	struct net_device *tdev;    /* Device to other host */
 	struct ipv6hdr  *ipv6h;     /* Our new IP header */
-	unsigned int max_headroom;  /* The extra header space needed */
+	unsigned int max_headroom = 0; /* The extra header space needed */
 	int    gre_hlen;
 	struct ipv6_tel_txoption opt;
 	int    mtu;
@@ -693,7 +693,7 @@
 			tunnel->err_count = 0;
 	}
 
-	max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+	max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
 
 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d5d20cd..878f802 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -130,7 +130,7 @@
 	}
 
 	rcu_read_lock_bh();
-	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+	nexthop = rt6_nexthop((struct rt6_info *)dst);
 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 	if (unlikely(!neigh))
 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -898,7 +898,7 @@
 	 */
 	rt = (struct rt6_info *) *dst;
 	rcu_read_lock_bh();
-	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
+	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
 	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
 	rcu_read_unlock_bh();
 
@@ -1039,6 +1039,8 @@
 	 * udp datagram
 	 */
 	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+		struct frag_hdr fhdr;
+
 		skb = sock_alloc_send_skb(sk,
 			hh_len + fragheaderlen + transhdrlen + 20,
 			(flags & MSG_DONTWAIT), &err);
@@ -1059,12 +1061,6 @@
 
 		skb->ip_summed = CHECKSUM_PARTIAL;
 		skb->csum = 0;
-	}
-
-	err = skb_append_datato_frags(sk,skb, getfrag, from,
-				      (length - transhdrlen));
-	if (!err) {
-		struct frag_hdr fhdr;
 
 		/* Specify the length of each IPv6 datagram fragment.
 		 * It has to be a multiple of 8.
@@ -1075,15 +1071,10 @@
 		ipv6_select_ident(&fhdr, rt);
 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
 		__skb_queue_tail(&sk->sk_write_queue, skb);
-
-		return 0;
 	}
-	/* There is not enough support do UPD LSO,
-	 * so follow normal path
-	 */
-	kfree_skb(skb);
 
-	return err;
+	return skb_append_datato_frags(sk, skb, getfrag, from,
+				       (length - transhdrlen));
 }
 
 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1098,11 +1089,12 @@
 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
 }
 
-static void ip6_append_data_mtu(int *mtu,
+static void ip6_append_data_mtu(unsigned int *mtu,
 				int *maxfraglen,
 				unsigned int fragheaderlen,
 				struct sk_buff *skb,
-				struct rt6_info *rt)
+				struct rt6_info *rt,
+				bool pmtuprobe)
 {
 	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
 		if (skb == NULL) {
@@ -1114,7 +1106,9 @@
 			 * this fragment is not first, the headers
 			 * space is regarded as data space.
 			 */
-			*mtu = dst_mtu(rt->dst.path);
+			*mtu = min(*mtu, pmtuprobe ?
+				   rt->dst.dev->mtu :
+				   dst_mtu(rt->dst.path));
 		}
 		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
 			      + fragheaderlen - sizeof(struct frag_hdr);
@@ -1131,11 +1125,10 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct inet_cork *cork;
 	struct sk_buff *skb, *skb_prev = NULL;
-	unsigned int maxfraglen, fragheaderlen;
+	unsigned int maxfraglen, fragheaderlen, mtu;
 	int exthdrlen;
 	int dst_exthdrlen;
 	int hh_len;
-	int mtu;
 	int copy;
 	int err;
 	int offset = 0;
@@ -1248,27 +1241,27 @@
 	 * --yoshfuji
 	 */
 
-	cork->length += length;
-	if (length > mtu) {
-		int proto = sk->sk_protocol;
-		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
-			ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
-			return -EMSGSIZE;
-		}
-
-		if (proto == IPPROTO_UDP &&
-		    (rt->dst.dev->features & NETIF_F_UFO)) {
-
-			err = ip6_ufo_append_data(sk, getfrag, from, length,
-						  hh_len, fragheaderlen,
-						  transhdrlen, mtu, flags, rt);
-			if (err)
-				goto error;
-			return 0;
-		}
+	if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
+					   sk->sk_protocol == IPPROTO_RAW)) {
+		ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
+		return -EMSGSIZE;
 	}
 
-	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+	skb = skb_peek_tail(&sk->sk_write_queue);
+	cork->length += length;
+	if (((length > mtu) ||
+	     (skb && skb_has_frags(skb))) &&
+	    (sk->sk_protocol == IPPROTO_UDP) &&
+	    (rt->dst.dev->features & NETIF_F_UFO)) {
+		err = ip6_ufo_append_data(sk, getfrag, from, length,
+					  hh_len, fragheaderlen,
+					  transhdrlen, mtu, flags, rt);
+		if (err)
+			goto error;
+		return 0;
+	}
+
+	if (!skb)
 		goto alloc_new_skb;
 
 	while (length > 0) {
@@ -1292,7 +1285,9 @@
 			/* update mtu and maxfraglen if necessary */
 			if (skb == NULL || skb_prev == NULL)
 				ip6_append_data_mtu(&mtu, &maxfraglen,
-						    fragheaderlen, skb, rt);
+						    fragheaderlen, skb, rt,
+						    np->pmtudisc ==
+						    IPV6_PMTUDISC_PROBE);
 
 			skb_prev = skb;
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1e55866..0516ebb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1646,9 +1646,9 @@
 
 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
 	    nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
-		    &parm->raddr) ||
-	    nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
 		    &parm->laddr) ||
+	    nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
+		    &parm->raddr) ||
 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
@@ -1732,6 +1732,7 @@
 	if (!ip6n->fb_tnl_dev)
 		goto err_alloc_dev;
 	dev_net_set(ip6n->fb_tnl_dev, net);
+	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
 
 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
 	if (err < 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7a3bd3b..2cdb704 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -259,10 +259,12 @@
 {
 	struct mr6_table *mrt, *next;
 
+	rtnl_lock();
 	list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
 		list_del(&mrt->list);
 		ip6mr_free_table(mrt);
 	}
+	rtnl_unlock();
 	fib_rules_unregister(net->ipv6.mr6_rules_ops);
 }
 #else
@@ -289,7 +291,10 @@
 
 static void __net_exit ip6mr_rules_exit(struct net *net)
 {
+	rtnl_lock();
 	ip6mr_free_table(net->ipv6.mrt6);
+	net->ipv6.mrt6 = NULL;
+	rtnl_unlock();
 }
 #endif
 
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bfa6cc3..952eaed 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1343,8 +1343,9 @@
 	hdr->daddr = *daddr;
 }
 
-static struct sk_buff *mld_newpack(struct net_device *dev, int size)
+static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
 {
+	struct net_device *dev = idev->dev;
 	struct net *net = dev_net(dev);
 	struct sock *sk = net->ipv6.igmp_sk;
 	struct sk_buff *skb;
@@ -1369,7 +1370,7 @@
 
 	skb_reserve(skb, hlen);
 
-	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
+	if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
 		/* <draft-ietf-magma-mld-source-05.txt>:
 		 * use unspecified address as the source address
 		 * when a valid link-local address is not available.
@@ -1465,7 +1466,7 @@
 	struct mld2_grec *pgr;
 
 	if (!skb)
-		skb = mld_newpack(dev, dev->mtu);
+		skb = mld_newpack(pmc->idev, dev->mtu);
 	if (!skb)
 		return NULL;
 	pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
@@ -1485,7 +1486,8 @@
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
 	int type, int gdeleted, int sdeleted)
 {
-	struct net_device *dev = pmc->idev->dev;
+	struct inet6_dev *idev = pmc->idev;
+	struct net_device *dev = idev->dev;
 	struct mld2_report *pmr;
 	struct mld2_grec *pgr = NULL;
 	struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
@@ -1514,7 +1516,7 @@
 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
 			if (skb)
 				mld_sendpack(skb);
-			skb = mld_newpack(dev, dev->mtu);
+			skb = mld_newpack(idev, dev->mtu);
 		}
 	}
 	first = 1;
@@ -1541,7 +1543,7 @@
 				pgr->grec_nsrcs = htons(scount);
 			if (skb)
 				mld_sendpack(skb);
-			skb = mld_newpack(dev, dev->mtu);
+			skb = mld_newpack(idev, dev->mtu);
 			first = 1;
 			scount = 0;
 		}
@@ -1596,8 +1598,8 @@
 	struct sk_buff *skb = NULL;
 	int type;
 
+	read_lock_bh(&idev->lock);
 	if (!pmc) {
-		read_lock_bh(&idev->lock);
 		for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
 			if (pmc->mca_flags & MAF_NOREPORT)
 				continue;
@@ -1609,7 +1611,6 @@
 			skb = add_grec(skb, pmc, type, 0, 0);
 			spin_unlock_bh(&pmc->mca_lock);
 		}
-		read_unlock_bh(&idev->lock);
 	} else {
 		spin_lock_bh(&pmc->mca_lock);
 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
@@ -1619,6 +1620,7 @@
 		skb = add_grec(skb, pmc, type, 0, 0);
 		spin_unlock_bh(&pmc->mca_lock);
 	}
+	read_unlock_bh(&idev->lock);
 	if (skb)
 		mld_sendpack(skb);
 }
@@ -2156,7 +2158,7 @@
 
 	idev->mc_gq_running = 0;
 	mld_send_report(idev, NULL);
-	__in6_dev_put(idev);
+	in6_dev_put(idev);
 }
 
 static void mld_ifc_timer_expire(unsigned long data)
@@ -2169,7 +2171,7 @@
 		if (idev->mc_ifc_count)
 			mld_ifc_start_timer(idev, idev->mc_maxdelay);
 	}
-	__in6_dev_put(idev);
+	in6_dev_put(idev);
 }
 
 static void mld_ifc_event(struct inet6_dev *idev)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index ca4ffcc..060a044 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -372,14 +372,11 @@
 	int tlen = dev->needed_tailroom;
 	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
 	struct sk_buff *skb;
-	int err;
 
-	skb = sock_alloc_send_skb(sk,
-				  hlen + sizeof(struct ipv6hdr) + len + tlen,
-				  1, &err);
+	skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
 	if (!skb) {
-		ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n",
-			  __func__, err);
+		ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
+			  __func__);
 		return NULL;
 	}
 
@@ -389,6 +386,11 @@
 	skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
 	skb_reset_transport_header(skb);
 
+	/* Manually assign socket ownership as we avoid calling
+	 * sock_alloc_send_pskb() to bypass wmem buffer limits
+	 */
+	skb_set_owner_w(skb, sk);
+
 	return skb;
 }
 
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4..1aeb473 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@
 	ipv6_hdr(head)->payload_len = htons(payload_len);
 	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
 	IP6CB(head)->nhoff = nhoff;
+	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
 
 	/* Yes, and fold redundant checksum back. 8) */
 	if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@
 	struct net *net = dev_net(skb_dst(skb)->dev);
 	int evicted;
 
+	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+		goto fail_hdr;
+
 	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
 	/* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@
 				 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
 
 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
 		return 1;
 	}
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 28a664d..7b71541 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -65,6 +65,12 @@
 #include <linux/sysctl.h>
 #endif
 
+enum rt6_nud_state {
+	RT6_NUD_FAIL_HARD = -2,
+	RT6_NUD_FAIL_SOFT = -1,
+	RT6_NUD_SUCCEED = 1
+};
+
 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
 				    const struct in6_addr *dest);
 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
@@ -466,6 +472,24 @@
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
+struct __rt6_probe_work {
+	struct work_struct work;
+	struct in6_addr target;
+	struct net_device *dev;
+};
+
+static void rt6_probe_deferred(struct work_struct *w)
+{
+	struct in6_addr mcaddr;
+	struct __rt6_probe_work *work =
+		container_of(w, struct __rt6_probe_work, work);
+
+	addrconf_addr_solict_mult(&work->target, &mcaddr);
+	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+	dev_put(work->dev);
+	kfree(w);
+}
+
 static void rt6_probe(struct rt6_info *rt)
 {
 	struct neighbour *neigh;
@@ -489,17 +513,23 @@
 
 	if (!neigh ||
 	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-		struct in6_addr mcaddr;
-		struct in6_addr *target;
+		struct __rt6_probe_work *work;
 
-		if (neigh) {
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+
+		if (neigh && work)
 			neigh->updated = jiffies;
-			write_unlock(&neigh->lock);
-		}
 
-		target = (struct in6_addr *)&rt->rt6i_gateway;
-		addrconf_addr_solict_mult(target, &mcaddr);
-		ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
+		if (neigh)
+			write_unlock(&neigh->lock);
+
+		if (work) {
+			INIT_WORK(&work->work, rt6_probe_deferred);
+			work->target = rt->rt6i_gateway;
+			dev_hold(rt->dst.dev);
+			work->dev = rt->dst.dev;
+			schedule_work(&work->work);
+		}
 	} else {
 out:
 		write_unlock(&neigh->lock);
@@ -526,26 +556,29 @@
 	return 0;
 }
 
-static inline bool rt6_check_neigh(struct rt6_info *rt)
+static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
 {
 	struct neighbour *neigh;
-	bool ret = false;
+	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
 
 	if (rt->rt6i_flags & RTF_NONEXTHOP ||
 	    !(rt->rt6i_flags & RTF_GATEWAY))
-		return true;
+		return RT6_NUD_SUCCEED;
 
 	rcu_read_lock_bh();
 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
 	if (neigh) {
 		read_lock(&neigh->lock);
 		if (neigh->nud_state & NUD_VALID)
-			ret = true;
+			ret = RT6_NUD_SUCCEED;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 		else if (!(neigh->nud_state & NUD_FAILED))
-			ret = true;
+			ret = RT6_NUD_SUCCEED;
 #endif
 		read_unlock(&neigh->lock);
+	} else {
+		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
+		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
 	}
 	rcu_read_unlock_bh();
 
@@ -559,43 +592,52 @@
 
 	m = rt6_check_dev(rt, oif);
 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
-		return -1;
+		return RT6_NUD_FAIL_HARD;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
 #endif
-	if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
-		return -1;
+	if (strict & RT6_LOOKUP_F_REACHABLE) {
+		int n = rt6_check_neigh(rt);
+		if (n < 0)
+			return n;
+	}
 	return m;
 }
 
 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
-				   int *mpri, struct rt6_info *match)
+				   int *mpri, struct rt6_info *match,
+				   bool *do_rr)
 {
 	int m;
+	bool match_do_rr = false;
 
 	if (rt6_check_expired(rt))
 		goto out;
 
 	m = rt6_score_route(rt, oif, strict);
-	if (m < 0)
+	if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
+		match_do_rr = true;
+		m = 0; /* lowest valid score */
+	} else if (m < 0) {
 		goto out;
-
-	if (m > *mpri) {
-		if (strict & RT6_LOOKUP_F_REACHABLE)
-			rt6_probe(match);
-		*mpri = m;
-		match = rt;
-	} else if (strict & RT6_LOOKUP_F_REACHABLE) {
-		rt6_probe(rt);
 	}
 
+	if (strict & RT6_LOOKUP_F_REACHABLE)
+		rt6_probe(rt);
+
+	if (m > *mpri) {
+		*do_rr = match_do_rr;
+		*mpri = m;
+		match = rt;
+	}
 out:
 	return match;
 }
 
 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 				     struct rt6_info *rr_head,
-				     u32 metric, int oif, int strict)
+				     u32 metric, int oif, int strict,
+				     bool *do_rr)
 {
 	struct rt6_info *rt, *match;
 	int mpri = -1;
@@ -603,10 +645,10 @@
 	match = NULL;
 	for (rt = rr_head; rt && rt->rt6i_metric == metric;
 	     rt = rt->dst.rt6_next)
-		match = find_match(rt, oif, strict, &mpri, match);
+		match = find_match(rt, oif, strict, &mpri, match, do_rr);
 	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
 	     rt = rt->dst.rt6_next)
-		match = find_match(rt, oif, strict, &mpri, match);
+		match = find_match(rt, oif, strict, &mpri, match, do_rr);
 
 	return match;
 }
@@ -615,15 +657,16 @@
 {
 	struct rt6_info *match, *rt0;
 	struct net *net;
+	bool do_rr = false;
 
 	rt0 = fn->rr_ptr;
 	if (!rt0)
 		fn->rr_ptr = rt0 = fn->leaf;
 
-	match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
+	match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
+			     &do_rr);
 
-	if (!match &&
-	    (strict & RT6_LOOKUP_F_REACHABLE)) {
+	if (do_rr) {
 		struct rt6_info *next = rt0->dst.rt6_next;
 
 		/* no entries matched; do round-robin */
@@ -825,7 +868,6 @@
 			if (ort->rt6i_dst.plen != 128 &&
 			    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
 				rt->rt6i_flags |= RTF_ANYCAST;
-			rt->rt6i_gateway = *daddr;
 		}
 
 		rt->rt6i_flags |= RTF_CACHE;
@@ -1038,10 +1080,13 @@
 	if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
 		return NULL;
 
-	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
-		return dst;
+	if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+		return NULL;
 
-	return NULL;
+	if (rt6_check_expired(rt))
+		return NULL;
+
+	return dst;
 }
 
 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
@@ -1070,10 +1115,13 @@
 
 	rt = (struct rt6_info *) skb_dst(skb);
 	if (rt) {
-		if (rt->rt6i_flags & RTF_CACHE)
-			rt6_update_expires(rt, 0);
-		else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
+		if (rt->rt6i_flags & RTF_CACHE) {
+			dst_hold(&rt->dst);
+			if (ip6_del_rt(rt))
+				dst_free(&rt->dst);
+		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
 			rt->rt6i_node->fn_sernum = -1;
+		}
 	}
 }
 
@@ -1220,6 +1268,7 @@
 	rt->dst.flags |= DST_HOST;
 	rt->dst.output  = ip6_output;
 	atomic_set(&rt->dst.__refcnt, 1);
+	rt->rt6i_gateway  = fl6->daddr;
 	rt->rt6i_dst.addr = fl6->daddr;
 	rt->rt6i_dst.plen = 128;
 	rt->rt6i_idev     = idev;
@@ -1776,7 +1825,10 @@
 			in6_dev_hold(rt->rt6i_idev);
 		rt->dst.lastuse = jiffies;
 
-		rt->rt6i_gateway = ort->rt6i_gateway;
+		if (ort->rt6i_flags & RTF_GATEWAY)
+			rt->rt6i_gateway = ort->rt6i_gateway;
+		else
+			rt->rt6i_gateway = *dest;
 		rt->rt6i_flags = ort->rt6i_flags;
 		if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
 		    (RTF_DEFAULT | RTF_ADDRCONF))
@@ -2053,6 +2105,7 @@
 	else
 		rt->rt6i_flags |= RTF_LOCAL;
 
+	rt->rt6i_gateway  = *addr;
 	rt->rt6i_dst.addr = *addr;
 	rt->rt6i_dst.plen = 128;
 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3353634..0491264 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -566,6 +566,70 @@
 	return false;
 }
 
+/* Checks if an address matches an address on the tunnel interface.
+ * Used to detect the NAT of proto 41 packets and let them pass spoofing test.
+ * Long story:
+ * This function is called after we considered the packet as spoofed
+ * in is_spoofed_6rd.
+ * We may have a router that is doing NAT for proto 41 packets
+ * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
+ * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
+ * function will return true, dropping the packet.
+ * But, we can still check if is spoofed against the IP
+ * addresses associated with the interface.
+ */
+static bool only_dnatted(const struct ip_tunnel *tunnel,
+	const struct in6_addr *v6dst)
+{
+	int prefix_len;
+
+#ifdef CONFIG_IPV6_SIT_6RD
+	prefix_len = tunnel->ip6rd.prefixlen + 32
+		- tunnel->ip6rd.relay_prefixlen;
+#else
+	prefix_len = 48;
+#endif
+	return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
+}
+
+/* Returns true if a packet is spoofed */
+static bool packet_is_spoofed(struct sk_buff *skb,
+			      const struct iphdr *iph,
+			      struct ip_tunnel *tunnel)
+{
+	const struct ipv6hdr *ipv6h;
+
+	if (tunnel->dev->priv_flags & IFF_ISATAP) {
+		if (!isatap_chksrc(skb, iph, tunnel))
+			return true;
+
+		return false;
+	}
+
+	if (tunnel->dev->flags & IFF_POINTOPOINT)
+		return false;
+
+	ipv6h = ipv6_hdr(skb);
+
+	if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
+		net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
+				     &iph->saddr, &ipv6h->saddr,
+				     &iph->daddr, &ipv6h->daddr);
+		return true;
+	}
+
+	if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
+		return false;
+
+	if (only_dnatted(tunnel, &ipv6h->daddr))
+		return false;
+
+	net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
+			     &iph->saddr, &ipv6h->saddr,
+			     &iph->daddr, &ipv6h->daddr);
+	return true;
+}
+
 static int ipip6_rcv(struct sk_buff *skb)
 {
 	const struct iphdr *iph = ip_hdr(skb);
@@ -584,19 +648,9 @@
 		skb->protocol = htons(ETH_P_IPV6);
 		skb->pkt_type = PACKET_HOST;
 
-		if (tunnel->dev->priv_flags & IFF_ISATAP) {
-			if (!isatap_chksrc(skb, iph, tunnel)) {
-				tunnel->dev->stats.rx_errors++;
-				goto out;
-			}
-		} else {
-			if (is_spoofed_6rd(tunnel, iph->saddr,
-					   &ipv6_hdr(skb)->saddr) ||
-			    is_spoofed_6rd(tunnel, iph->daddr,
-					   &ipv6_hdr(skb)->daddr)) {
-				tunnel->dev->stats.rx_errors++;
-				goto out;
-			}
+		if (packet_is_spoofed(skb, iph, tunnel)) {
+			tunnel->dev->stats.rx_errors++;
+			goto out;
 		}
 
 		__skb_tunnel_rx(skb, tunnel->dev);
@@ -713,7 +767,7 @@
 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
 		if (neigh == NULL) {
-			net_dbg_ratelimited("sit: nexthop == NULL\n");
+			net_dbg_ratelimited("nexthop == NULL\n");
 			goto tx_error;
 		}
 
@@ -742,7 +796,7 @@
 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
 		if (neigh == NULL) {
-			net_dbg_ratelimited("sit: nexthop == NULL\n");
+			net_dbg_ratelimited("nexthop == NULL\n");
 			goto tx_error;
 		}
 
@@ -865,7 +919,7 @@
 		iph->ttl	=	iph6->hop_limit;
 
 	skb->ip_summed = CHECKSUM_NONE;
-	ip_select_ident(iph, skb_dst(skb), NULL);
+	ip_select_ident(skb, skb_dst(skb), NULL);
 	iptunnel_xmit(skb, dev);
 	return NETDEV_TX_OK;
 
@@ -1507,6 +1561,7 @@
 		goto err_alloc_dev;
 	}
 	dev_net_set(sitn->fb_tunnel_dev, net);
+	sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
 
 	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
 	if (err)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8bb486f..3dd3f01 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1429,7 +1429,7 @@
 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
 		if (np->rxopt.bits.rxtclass)
-			np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+			np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
 		if (ipv6_opt_accepted(sk, opt_skb)) {
 			skb_set_owner_r(opt_skb, sk);
 			opt_skb = xchg(&np->pktoptions, opt_skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e6dd85d..a0a911f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -955,11 +955,16 @@
 	struct udphdr *uh;
 	struct udp_sock  *up = udp_sk(sk);
 	struct inet_sock *inet = inet_sk(sk);
-	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
+	struct flowi6 *fl6;
 	int err = 0;
 	int is_udplite = IS_UDPLITE(sk);
 	__wsum csum = 0;
 
+	if (up->pending == AF_INET)
+		return udp_push_pending_frames(sk);
+
+	fl6 = &inet->cork.fl.u.ip6;
+
 	/* Grab the skbuff where UDP header space exists. */
 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
 		goto out;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9da8620..ab8bd2c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2081,6 +2081,7 @@
 			pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
 	}
 	pol->sadb_x_policy_dir = dir+1;
+	pol->sadb_x_policy_reserved = 0;
 	pol->sadb_x_policy_id = xp->index;
 	pol->sadb_x_policy_priority = xp->priority;
 
@@ -3137,7 +3138,9 @@
 	pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
 	pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
 	pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
+	pol->sadb_x_policy_reserved = 0;
 	pol->sadb_x_policy_id = xp->index;
+	pol->sadb_x_policy_priority = xp->priority;
 
 	/* Set sadb_comb's. */
 	if (x->id.proto == IPPROTO_AH)
@@ -3525,6 +3528,7 @@
 	pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
 	pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
 	pol->sadb_x_policy_dir = dir + 1;
+	pol->sadb_x_policy_reserved = 0;
 	pol->sadb_x_policy_id = 0;
 	pol->sadb_x_policy_priority = 0;
 
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 6984c3a..8c27de2 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -115,6 +115,11 @@
 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 
+static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+{
+	return sk->sk_user_data;
+}
+
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
 	BUG_ON(!net);
@@ -507,7 +512,7 @@
 		return 0;
 
 #if IS_ENABLED(CONFIG_IPV6)
-	if (sk->sk_family == PF_INET6) {
+	if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
 		if (!uh->check) {
 			LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
 			return 1;
@@ -1071,7 +1076,7 @@
 	/* Queue the packet to IP for output */
 	skb->local_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
-	if (skb->sk->sk_family == PF_INET6)
+	if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
 		error = inet6_csk_xmit(skb, NULL);
 	else
 #endif
@@ -1198,7 +1203,7 @@
 
 		/* Calculate UDP checksum if configured to do so */
 #if IS_ENABLED(CONFIG_IPV6)
-		if (sk->sk_family == PF_INET6)
+		if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
 			l2tp_xmit_ipv6_csum(sk, skb, udp_len);
 		else
 #endif
@@ -1247,10 +1252,9 @@
  */
 static void l2tp_tunnel_destruct(struct sock *sk)
 {
-	struct l2tp_tunnel *tunnel;
+	struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
 	struct l2tp_net *pn;
 
-	tunnel = sk->sk_user_data;
 	if (tunnel == NULL)
 		goto end;
 
@@ -1618,7 +1622,7 @@
 	}
 
 	/* Check if this socket has already been prepped */
-	tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+	tunnel = l2tp_tunnel(sk);
 	if (tunnel != NULL) {
 		/* This socket has already been prepped */
 		err = -EBUSY;
@@ -1647,6 +1651,24 @@
 	if (cfg != NULL)
 		tunnel->debug = cfg->debug;
 
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == PF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		if (ipv6_addr_v4mapped(&np->saddr) &&
+		    ipv6_addr_v4mapped(&np->daddr)) {
+			struct inet_sock *inet = inet_sk(sk);
+
+			tunnel->v4mapped = true;
+			inet->inet_saddr = np->saddr.s6_addr32[3];
+			inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
+			inet->inet_daddr = np->daddr.s6_addr32[3];
+		} else {
+			tunnel->v4mapped = false;
+		}
+	}
+#endif
+
 	/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
 	tunnel->encap = encap;
 	if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1655,7 +1677,7 @@
 		udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
 		udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
 #if IS_ENABLED(CONFIG_IPV6)
-		if (sk->sk_family == PF_INET6)
+		if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
 			udpv6_encap_enable();
 		else
 #endif
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 485a490..2f89d43 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -189,6 +189,9 @@
 	struct sock		*sock;		/* Parent socket */
 	int			fd;		/* Parent fd, if tunnel socket
 						 * was created by userspace */
+#if IS_ENABLED(CONFIG_IPV6)
+	bool			v4mapped;
+#endif
 
 	struct work_struct	del_work;
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8dec687..8c46b27 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -353,7 +353,9 @@
 		goto error_put_sess_tun;
 	}
 
+	local_bh_disable();
 	l2tp_xmit_skb(session, skb, session->hdr_len);
+	local_bh_enable();
 
 	sock_put(ps->tunnel_sock);
 	sock_put(sk);
@@ -422,7 +424,9 @@
 	skb->data[0] = ppph[0];
 	skb->data[1] = ppph[1];
 
+	local_bh_disable();
 	l2tp_xmit_skb(session, skb, session->hdr_len);
+	local_bh_enable();
 
 	sock_put(sk_tun);
 	sock_put(sk);
@@ -1793,7 +1797,8 @@
 
 static const struct pppox_proto pppol2tp_proto = {
 	.create		= pppol2tp_create,
-	.ioctl		= pppol2tp_ioctl
+	.ioctl		= pppol2tp_ioctl,
+	.owner		= THIS_MODULE,
 };
 
 #ifdef CONFIG_L2TP_V3
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4fdb306e..5ab17b8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -652,6 +652,8 @@
 			if (sta->sdata->dev != dev)
 				continue;
 
+			sinfo.filled = 0;
+			sta_set_sinfo(sta, &sinfo);
 			i = 0;
 			ADD_STA_STATS(sta);
 		}
@@ -3313,7 +3315,7 @@
 		return -EINVAL;
 	}
 	band = chanctx_conf->def.chan->band;
-	sta = sta_info_get(sdata, peer);
+	sta = sta_info_get_bss(sdata, peer);
 	if (sta) {
 		qos = test_sta_flag(sta, WLAN_STA_WME);
 	} else {
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 170f9a7..3052672 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1166,6 +1166,7 @@
 	clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
 						BSS_CHANGED_IBSS);
+	ieee80211_vif_release_channel(sdata);
 	synchronize_rcu();
 	kfree(presp);
 
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 9ca8e32..92ef04c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -842,6 +842,8 @@
  *	that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
  *	a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ *	cancelled.
  */
 enum {
 	SCAN_SW_SCANNING,
@@ -849,6 +851,7 @@
 	SCAN_ONCHANNEL_SCANNING,
 	SCAN_COMPLETED,
 	SCAN_ABORTED,
+	SCAN_HW_CANCELLED,
 };
 
 /**
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 98d20c0..514e90f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1726,6 +1726,15 @@
 		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 			dev_close(sdata->dev);
 
+	/*
+	 * Close all AP_VLAN interfaces first, as otherwise they
+	 * might be closed while the AP interface they belong to
+	 * is closed, causing unregister_netdevice_many() to crash.
+	 */
+	list_for_each_entry(sdata, &local->interfaces, list)
+		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+			dev_close(sdata->dev);
+
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
 		list_del(&sdata->list);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 741448b..5b4328d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
 #include "led.h"
 
 #define IEEE80211_AUTH_TIMEOUT		(HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_LONG	(HZ / 2)
 #define IEEE80211_AUTH_TIMEOUT_SHORT	(HZ / 10)
 #define IEEE80211_AUTH_MAX_TRIES	3
 #define IEEE80211_AUTH_WAIT_ASSOC	(HZ * 5)
 #define IEEE80211_ASSOC_TIMEOUT		(HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_LONG	(HZ / 2)
 #define IEEE80211_ASSOC_TIMEOUT_SHORT	(HZ / 10)
 #define IEEE80211_ASSOC_MAX_TRIES	3
 
@@ -237,8 +239,9 @@
 			     struct ieee80211_channel *channel,
 			     const struct ieee80211_ht_operation *ht_oper,
 			     const struct ieee80211_vht_operation *vht_oper,
-			     struct cfg80211_chan_def *chandef, bool verbose)
+			     struct cfg80211_chan_def *chandef, bool tracking)
 {
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct cfg80211_chan_def vht_chandef;
 	u32 ht_cfreq, ret;
 
@@ -257,7 +260,7 @@
 	ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
 						  channel->band);
 	/* check that channel matches the right operating channel */
-	if (channel->center_freq != ht_cfreq) {
+	if (!tracking && channel->center_freq != ht_cfreq) {
 		/*
 		 * It's possible that some APs are confused here;
 		 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -265,11 +268,10 @@
 		 * since we look at probe response/beacon data here
 		 * it should be OK.
 		 */
-		if (verbose)
-			sdata_info(sdata,
-				   "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
-				   channel->center_freq, ht_cfreq,
-				   ht_oper->primary_chan, channel->band);
+		sdata_info(sdata,
+			   "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+			   channel->center_freq, ht_cfreq,
+			   ht_oper->primary_chan, channel->band);
 		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
 		goto out;
 	}
@@ -323,7 +325,7 @@
 				channel->band);
 		break;
 	default:
-		if (verbose)
+		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
 			sdata_info(sdata,
 				   "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
 				   vht_oper->chan_width);
@@ -332,7 +334,7 @@
 	}
 
 	if (!cfg80211_chandef_valid(&vht_chandef)) {
-		if (verbose)
+		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
 			sdata_info(sdata,
 				   "AP VHT information is invalid, disable VHT\n");
 		ret = IEEE80211_STA_DISABLE_VHT;
@@ -345,7 +347,7 @@
 	}
 
 	if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
-		if (verbose)
+		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
 			sdata_info(sdata,
 				   "AP VHT information doesn't match HT, disable VHT\n");
 		ret = IEEE80211_STA_DISABLE_VHT;
@@ -361,18 +363,27 @@
 	if (ret & IEEE80211_STA_DISABLE_VHT)
 		vht_chandef = *chandef;
 
+	/*
+	 * Ignore the DISABLED flag when we're already connected and only
+	 * tracking the APs beacon for bandwidth changes - otherwise we
+	 * might get disconnected here if we connect to an AP, update our
+	 * regulatory information based on the AP's country IE and the
+	 * information we have is wrong/outdated and disables the channel
+	 * that we're actually using for the connection to the AP.
+	 */
 	while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-					IEEE80211_CHAN_DISABLED)) {
+					tracking ? 0 :
+						   IEEE80211_CHAN_DISABLED)) {
 		if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
 			ret = IEEE80211_STA_DISABLE_HT |
 			      IEEE80211_STA_DISABLE_VHT;
-			goto out;
+			break;
 		}
 
 		ret |= chandef_downgrade(chandef);
 	}
 
-	if (chandef->width != vht_chandef.width && verbose)
+	if (chandef->width != vht_chandef.width && !tracking)
 		sdata_info(sdata,
 			   "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
 
@@ -412,7 +423,7 @@
 
 	/* calculate new channel (type) based on HT/VHT operation IEs */
 	flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
-					     vht_oper, &chandef, false);
+					     vht_oper, &chandef, true);
 
 	/*
 	 * Downgrade the new channel if we associated with restricted
@@ -3461,10 +3472,13 @@
 
 	if (tx_flags == 0) {
 		auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
-		ifmgd->auth_data->timeout_started = true;
+		auth_data->timeout_started = true;
 		run_again(ifmgd, auth_data->timeout);
 	} else {
-		auth_data->timeout_started = false;
+		auth_data->timeout =
+			round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+		auth_data->timeout_started = true;
+		run_again(ifmgd, auth_data->timeout);
 	}
 
 	return 0;
@@ -3501,7 +3515,11 @@
 		assoc_data->timeout_started = true;
 		run_again(&sdata->u.mgd, assoc_data->timeout);
 	} else {
-		assoc_data->timeout_started = false;
+		assoc_data->timeout =
+			round_jiffies_up(jiffies +
+					 IEEE80211_ASSOC_TIMEOUT_LONG);
+		assoc_data->timeout_started = true;
+		run_again(&sdata->u.mgd, assoc_data->timeout);
 	}
 
 	return 0;
@@ -3906,7 +3924,7 @@
 	ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
 						     cbss->channel,
 						     ht_oper, vht_oper,
-						     &chandef, true);
+						     &chandef, false);
 
 	sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
 				      local->rx_chains);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 7fc5d0d..3401262 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -99,10 +99,13 @@
 	}
 	mutex_unlock(&local->sta_mtx);
 
-	/* remove all interfaces */
+	/* remove all interfaces that were created in the driver */
 	list_for_each_entry(sdata, &local->interfaces, list) {
-		if (!ieee80211_sdata_running(sdata))
+		if (!ieee80211_sdata_running(sdata) ||
+		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+		    sdata->vif.type == NL80211_IFTYPE_MONITOR)
 			continue;
+
 		drv_remove_interface(local, sdata);
 	}
 
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index ac7ef54..e6512e2 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -290,7 +290,7 @@
 	struct minstrel_rate *msr, *mr;
 	unsigned int ndx;
 	bool mrr_capable;
-	bool prev_sample = mi->prev_sample;
+	bool prev_sample;
 	int delta;
 	int sampling_ratio;
 
@@ -314,6 +314,7 @@
 			(mi->sample_count + mi->sample_deferred / 2);
 
 	/* delta < 0: no sampling required */
+	prev_sample = mi->prev_sample;
 	mi->prev_sample = false;
 	if (delta < 0 || (!mrr_capable && prev_sample))
 		return;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5b2d301..f3bbea1 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -804,10 +804,18 @@
 
 	sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
 	info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+	rate->count = 1;
+
+	if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+		int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
+		rate->idx = mp->cck_rates[idx];
+		rate->flags = 0;
+		return;
+	}
+
 	rate->idx = sample_idx % MCS_GROUP_RATES +
 		    (sample_group->streams - 1) * MCS_GROUP_RATES;
 	rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
-	rate->count = 1;
 }
 
 static void
@@ -820,6 +828,9 @@
 	if (sband->band != IEEE80211_BAND_2GHZ)
 		return;
 
+	if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
+		return;
+
 	mi->cck_supported = 0;
 	mi->cck_supported_short = 0;
 	for (i = 0; i < 4; i++) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8e29526..ec09bcb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -932,8 +932,14 @@
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 
-	/* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
-	if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
+	/*
+	 * Drop duplicate 802.11 retransmissions
+	 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+	 */
+	if (rx->skb->len >= 24 && rx->sta &&
+	    !ieee80211_is_ctl(hdr->frame_control) &&
+	    !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+	    !is_multicast_ether_addr(hdr->addr1)) {
 		if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
 			     rx->sta->last_seq_ctrl[rx->seqno_idx] ==
 			     hdr->seq_ctrl)) {
@@ -2996,6 +3002,9 @@
 	case NL80211_IFTYPE_ADHOC:
 		if (!bssid)
 			return 0;
+		if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+		    ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+			return 0;
 		if (ieee80211_is_beacon(hdr->frame_control)) {
 			return 1;
 		} else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 99b10392..eb03337 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -202,6 +202,9 @@
 	enum ieee80211_band band;
 	int i, ielen, n_chans;
 
+	if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+		return false;
+
 	do {
 		if (local->hw_scan_band == IEEE80211_NUM_BANDS)
 			return false;
@@ -878,7 +881,23 @@
 	if (!local->scan_req)
 		goto out;
 
+	/*
+	 * We have a scan running and the driver already reported completion,
+	 * but the worker hasn't run yet or is stuck on the mutex - mark it as
+	 * cancelled.
+	 */
+	if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+	    test_bit(SCAN_COMPLETED, &local->scanning)) {
+		set_bit(SCAN_HW_CANCELLED, &local->scanning);
+		goto out;
+	}
+
 	if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
+		/*
+		 * Make sure that __ieee80211_scan_completed doesn't trigger a
+		 * scan on another band.
+		 */
+		set_bit(SCAN_HW_CANCELLED, &local->scanning);
 		if (local->ops->cancel_hw_scan)
 			drv_cancel_hw_scan(local,
 				rcu_dereference_protected(local->scan_sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 4343920..9e78206 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -180,6 +180,9 @@
 	struct ieee80211_local *local = sta->local;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
+	if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+		sta->last_rx = jiffies;
+
 	if (ieee80211_is_data_qos(mgmt->frame_control)) {
 		struct ieee80211_hdr *hdr = (void *) skb->data;
 		u8 *qc = ieee80211_get_qos_ctl(hdr);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 9972e07..e9d18c3 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1100,7 +1100,8 @@
 		tx->sta = rcu_dereference(sdata->u.vlan.sta);
 		if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
 			return TX_DROP;
-	} else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+	} else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
+				  IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
 		   tx->sdata->control_port_protocol == tx->skb->protocol) {
 		tx->sta = sta_info_get_bss(sdata, hdr->addr1);
 	}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 72e6292..5db8eb5 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2174,6 +2174,10 @@
 	}
 
 	rate = cfg80211_calculate_bitrate(&ri);
+	if (WARN_ONCE(!rate,
+		      "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
+		      status->flag, status->rate_idx, status->vht_nss))
+		return 0;
 
 	/* rewind from end of MPDU */
 	if (status->flag & RX_FLAG_MACTIME_END)
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 57beb17..707bc52 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -325,18 +325,22 @@
 static void
 mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
 {
-	u8 i, j;
+	u8 i, j, net_end = nets_length - 1;
 
-	for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++)
-		;
-	h->nets[i].nets--;
-
-	if (h->nets[i].nets != 0)
-		return;
-
-	for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) {
-		h->nets[j].cidr = h->nets[j + 1].cidr;
-		h->nets[j].nets = h->nets[j + 1].nets;
+	for (i = 0; i < nets_length; i++) {
+	        if (h->nets[i].cidr != cidr)
+	                continue;
+                if (h->nets[i].nets > 1 || i == net_end ||
+                    h->nets[i + 1].nets == 0) {
+                        h->nets[i].nets--;
+                        return;
+                }
+                for (j = i; j < net_end && h->nets[j].nets; j++) {
+		        h->nets[j].cidr = h->nets[j + 1].cidr;
+		        h->nets[j].nets = h->nets[j + 1].nets;
+                }
+                h->nets[j].nets = 0;
+                return;
 	}
 }
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index b75ff64..c47444e 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -883,7 +883,7 @@
 	iph->daddr		=	cp->daddr.ip;
 	iph->saddr		=	saddr;
 	iph->ttl		=	old_iph->ttl;
-	ip_select_ident(iph, &rt->dst, NULL);
+	ip_select_ident(skb, &rt->dst, NULL);
 
 	/* Another hack: avoid icmp_send in ip_fragment */
 	skb->local_df = 1;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bdebd03..70866d1 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -778,8 +778,8 @@
 				   flowi6_to_flowi(&fl1), false)) {
 			if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
 					   flowi6_to_flowi(&fl2), false)) {
-				if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
-					    sizeof(rt1->rt6i_gateway)) &&
+				if (ipv6_addr_equal(rt6_nexthop(rt1),
+						    rt6_nexthop(rt2)) &&
 				    rt1->dst.dev == rt2->dst.dev)
 					ret = 1;
 				dst_release(&rt2->dst);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2fd6dbe..393f17e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@
 EXPORT_SYMBOL(genl_unregister_ops);
 
 /**
- * genl_register_family - register a generic netlink family
+ * __genl_register_family - register a generic netlink family
  * @family: generic netlink family
  *
  * Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@
  *
  * Return 0 on success or a negative error code.
  */
-int genl_register_family(struct genl_family *family)
+int __genl_register_family(struct genl_family *family)
 {
 	int err = -EINVAL;
 
@@ -430,10 +430,10 @@
 errout:
 	return err;
 }
-EXPORT_SYMBOL(genl_register_family);
+EXPORT_SYMBOL(__genl_register_family);
 
 /**
- * genl_register_family_with_ops - register a generic netlink family
+ * __genl_register_family_with_ops - register a generic netlink family
  * @family: generic netlink family
  * @ops: operations to be registered
  * @n_ops: number of elements to register
@@ -457,12 +457,12 @@
  *
  * Return 0 on success or a negative error code.
  */
-int genl_register_family_with_ops(struct genl_family *family,
+int __genl_register_family_with_ops(struct genl_family *family,
 	struct genl_ops *ops, size_t n_ops)
 {
 	int err, i;
 
-	err = genl_register_family(family);
+	err = __genl_register_family(family);
 	if (err)
 		return err;
 
@@ -476,7 +476,7 @@
 	genl_unregister_family(family);
 	return err;
 }
-EXPORT_SYMBOL(genl_register_family_with_ops);
+EXPORT_SYMBOL(__genl_register_family_with_ops);
 
 /**
  * genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@
 }
 EXPORT_SYMBOL(genlmsg_put);
 
+static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct genl_ops *ops = cb->data;
+	int rc;
+
+	genl_lock();
+	rc = ops->dumpit(skb, cb);
+	genl_unlock();
+	return rc;
+}
+
+static int genl_lock_done(struct netlink_callback *cb)
+{
+	struct genl_ops *ops = cb->data;
+	int rc = 0;
+
+	if (ops->done) {
+		genl_lock();
+		rc = ops->done(cb);
+		genl_unlock();
+	}
+	return rc;
+}
+
 static int genl_family_rcv_msg(struct genl_family *family,
 			       struct sk_buff *skb,
 			       struct nlmsghdr *nlh)
@@ -572,15 +596,34 @@
 		return -EPERM;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
-		struct netlink_dump_control c = {
-			.dump = ops->dumpit,
-			.done = ops->done,
-		};
+		int rc;
 
 		if (ops->dumpit == NULL)
 			return -EOPNOTSUPP;
 
-		return netlink_dump_start(net->genl_sock, skb, nlh, &c);
+		if (!family->parallel_ops) {
+			struct netlink_dump_control c = {
+				.module = family->module,
+				.data = ops,
+				.dump = genl_lock_dumpit,
+				.done = genl_lock_done,
+			};
+
+			genl_unlock();
+			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+			genl_lock();
+
+		} else {
+			struct netlink_dump_control c = {
+				.module = family->module,
+				.dump = ops->dumpit,
+				.done = ops->done,
+			};
+
+			rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+		}
+
+		return rc;
 	}
 
 	if (ops->doit == NULL)
@@ -877,8 +920,10 @@
 #ifdef CONFIG_MODULES
 		if (res == NULL) {
 			genl_unlock();
+			up_read(&cb_lock);
 			request_module("net-pf-%d-proto-%d-family-%s",
 				       PF_NETLINK, NETLINK_GENERIC, name);
+			down_read(&cb_lock);
 			genl_lock();
 			res = genl_family_find_byname(name);
 		}
diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
index ff8c434..f924dd2 100644
--- a/net/nfc/llcp.h
+++ b/net/nfc/llcp.h
@@ -19,6 +19,7 @@
 
 enum llcp_state {
 	LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
+	LLCP_CONNECTING,
 	LLCP_CLOSED,
 	LLCP_BOUND,
 	LLCP_LISTEN,
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 380253e..7522c37 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -571,7 +571,7 @@
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
 		mask |= POLLHUP;
 
-	if (sock_writeable(sk))
+	if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 	else
 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -722,14 +722,16 @@
 	if (ret)
 		goto sock_unlink;
 
+	sk->sk_state = LLCP_CONNECTING;
+
 	ret = sock_wait_state(sk, LLCP_CONNECTED,
 			      sock_sndtimeo(sk, flags & O_NONBLOCK));
-	if (ret)
+	if (ret && ret != -EINPROGRESS)
 		goto sock_unlink;
 
 	release_sock(sk);
 
-	return 0;
+	return ret;
 
 sock_unlink:
 	nfc_llcp_put_ssap(local, llcp_sock->ssap);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 20a1bd0..a6895ab 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@
 
 		if (po->tp_version == TPACKET_V3) {
 			lv = sizeof(struct tpacket_stats_v3);
+			st.stats3.tp_packets += st.stats3.tp_drops;
 			data = &st.stats3;
 		} else {
 			lv = sizeof(struct tpacket_stats);
+			st.stats1.tp_packets += st.stats1.tp_drops;
 			data = &st.stats1;
 		}
 
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bd..51b968d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@
 	return q;
 }
 
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value.  The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell.  If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+	int low       = roundup(r->mpu, 48);
+	int high      = roundup(low+1, 48);
+	int cell_low  = low >> r->cell_log;
+	int cell_high = (high >> r->cell_log) - 1;
+
+	/* rtab is too inaccurate at rates > 100Mbit/s */
+	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+		pr_debug("TC linklayer: Giving up ATM detection\n");
+		return TC_LINKLAYER_ETHERNET;
+	}
+
+	if ((cell_high > cell_low) && (cell_high < 256)
+	    && (rtab[cell_low] == rtab[cell_high])) {
+		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+			 cell_low, cell_high, rtab[cell_high]);
+		return TC_LINKLAYER_ATM;
+	}
+	return TC_LINKLAYER_ETHERNET;
+}
+
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@
 		rtab->rate = *r;
 		rtab->refcnt = 1;
 		memcpy(rtab->data, nla_data(tab), 1024);
+		if (r->linklayer == TC_LINKLAYER_UNAWARE)
+			r->linklayer = __detect_linklayer(r, rtab->data);
 		rtab->next = qdisc_rtab_list;
 		qdisc_rtab_list = rtab;
 	}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ca8e0a5..1f9c314 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -605,6 +605,7 @@
 		struct sockaddr_atmpvc pvc;
 		int state;
 
+		memset(&pvc, 0, sizeof(pvc));
 		pvc.sap_family = AF_ATMPVC;
 		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
 		pvc.sap_addr.vpi = flow->vcc->vpi;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 1bc210f..8ec1598 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1465,6 +1465,7 @@
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_cbq_wrropt opt;
 
+	memset(&opt, 0, sizeof(opt));
 	opt.flags = 0;
 	opt.allot = cl->allot;
 	opt.priority = cl->priority + 1;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2022408..a7f838b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -908,6 +908,7 @@
 	memset(r, 0, sizeof(*r));
 	r->overhead = conf->overhead;
 	r->rate_bps = (u64)conf->rate << 3;
+	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
 	r->mult = 1;
 	/*
 	 * Calibrate mult, shift so that token counting is accurate
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index adaedd7..e09b074 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -87,7 +87,7 @@
 	unsigned int children;
 	struct htb_class *parent;	/* parent class */
 
-	int prio;		/* these two are used only by leaves... */
+	u32 prio;		/* these two are used only by leaves... */
 	int quantum;		/* but stored for parent-to-leaf return */
 
 	union {
@@ -1312,6 +1312,7 @@
 	struct htb_sched *q = qdisc_priv(sch);
 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
 	struct nlattr *opt = tca[TCA_OPTIONS];
+	struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
 	struct nlattr *tb[TCA_HTB_MAX + 1];
 	struct tc_htb_opt *hopt;
 
@@ -1333,6 +1334,18 @@
 	if (!hopt->rate.rate || !hopt->ceil.rate)
 		goto failure;
 
+	/* Keeping backward compatible with rate_table based iproute2 tc */
+	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+		rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+		if (rtab)
+			qdisc_put_rtab(rtab);
+	}
+	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+		ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+		if (ctab)
+			qdisc_put_rtab(ctab);
+	}
+
 	if (!cl) {		/* new class */
 		struct Qdisc *new_q;
 		int prio;
@@ -1463,7 +1476,7 @@
 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
 
 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
-	cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
+	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
 
 	sch_tree_unlock(sch);
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index d51852b..5792252 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -113,7 +113,6 @@
 
 #define FRAC_BITS		30	/* fixed point arithmetic */
 #define ONE_FP			(1UL << FRAC_BITS)
-#define IWSUM			(ONE_FP/QFQ_MAX_WSUM)
 
 #define QFQ_MTU_SHIFT		16	/* to support TSO/GSO */
 #define QFQ_MIN_LMAX		512	/* see qfq_slot_insert */
@@ -189,6 +188,7 @@
 	struct qfq_aggregate	*in_serv_agg;   /* Aggregate being served. */
 	u32			num_active_agg; /* Num. of active aggregates */
 	u32			wsum;		/* weight sum */
+	u32			iwsum;		/* inverse weight sum */
 
 	unsigned long bitmaps[QFQ_MAX_STATE];	    /* Group bitmaps. */
 	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
@@ -314,6 +314,7 @@
 
 	q->wsum +=
 		(int) agg->class_weight * (new_num_classes - agg->num_classes);
+	q->iwsum = ONE_FP / q->wsum;
 
 	agg->num_classes = new_num_classes;
 }
@@ -340,6 +341,10 @@
 {
 	if (!hlist_unhashed(&agg->nonfull_next))
 		hlist_del_init(&agg->nonfull_next);
+	q->wsum -= agg->class_weight;
+	if (q->wsum != 0)
+		q->iwsum = ONE_FP / q->wsum;
+
 	if (q->in_serv_agg == agg)
 		q->in_serv_agg = qfq_choose_next_agg(q);
 	kfree(agg);
@@ -827,38 +832,60 @@
 	}
 }
 
-
 /*
- * The index of the slot in which the aggregate is to be inserted must
- * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
- * because the start time of the group may be moved backward by one
- * slot after the aggregate has been inserted, and this would cause
- * non-empty slots to be right-shifted by one position.
+ * The index of the slot in which the input aggregate agg is to be
+ * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
+ * and not a '-1' because the start time of the group may be moved
+ * backward by one slot after the aggregate has been inserted, and
+ * this would cause non-empty slots to be right-shifted by one
+ * position.
  *
- * If the weight and lmax (max_pkt_size) of the classes do not change,
- * then QFQ+ does meet the above contraint according to the current
- * values of its parameters. In fact, if the weight and lmax of the
- * classes do not change, then, from the theory, QFQ+ guarantees that
- * the slot index is never higher than
- * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
- * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
+ * QFQ+ fully satisfies this bound to the slot index if the parameters
+ * of the classes are not changed dynamically, and if QFQ+ never
+ * happens to postpone the service of agg unjustly, i.e., it never
+ * happens that the aggregate becomes backlogged and eligible, or just
+ * eligible, while an aggregate with a higher approximated finish time
+ * is being served. In particular, in this case QFQ+ guarantees that
+ * the timestamps of agg are low enough that the slot index is never
+ * higher than 2. Unfortunately, QFQ+ cannot provide the same
+ * guarantee if it happens to unjustly postpone the service of agg, or
+ * if the parameters of some class are changed.
  *
- * When the weight of a class is increased or the lmax of the class is
- * decreased, a new aggregate with smaller slot size than the original
- * parent aggregate of the class may happen to be activated. The
- * activation of this aggregate should be properly delayed to when the
- * service of the class has finished in the ideal system tracked by
- * QFQ+. If the activation of the aggregate is not delayed to this
- * reference time instant, then this aggregate may be unjustly served
- * before other aggregates waiting for service. This may cause the
- * above bound to the slot index to be violated for some of these
- * unlucky aggregates.
+ * As for the first event, i.e., an out-of-order service, the
+ * upper bound to the slot index guaranteed by QFQ+ grows to
+ * 2 +
+ * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
+ * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
+ *
+ * The following function deals with this problem by backward-shifting
+ * the timestamps of agg, if needed, so as to guarantee that the slot
+ * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
+ * cause the service of other aggregates to be postponed, yet the
+ * worst-case guarantees of these aggregates are not violated.  In
+ * fact, in case of no out-of-order service, the timestamps of agg
+ * would have been even lower than they are after the backward shift,
+ * because QFQ+ would have guaranteed a maximum value equal to 2 for
+ * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
+ * service is postponed because of the backward-shift would have
+ * however waited for the service of agg before being served.
+ *
+ * The other event that may cause the slot index to be higher than 2
+ * for agg is a recent change of the parameters of some class. If the
+ * weight of a class is increased or the lmax (max_pkt_size) of the
+ * class is decreased, then a new aggregate with smaller slot size
+ * than the original parent aggregate of the class may happen to be
+ * activated. The activation of this aggregate should be properly
+ * delayed to when the service of the class has finished in the ideal
+ * system tracked by QFQ+. If the activation of the aggregate is not
+ * delayed to this reference time instant, then this aggregate may be
+ * unjustly served before other aggregates waiting for service. This
+ * may cause the above bound to the slot index to be violated for some
+ * of these unlucky aggregates.
  *
  * Instead of delaying the activation of the new aggregate, which is
- * quite complex, the following inaccurate but simple solution is used:
- * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
- * timestamps of the aggregate are shifted backward so as to let the
- * slot index become equal to QFQ_MAX_SLOTS-2.
+ * quite complex, the above-discussed capping of the slot index is
+ * used to handle also the consequences of a change of the parameters
+ * of a class.
  */
 static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
 			    u64 roundedS)
@@ -1077,7 +1104,7 @@
 	else
 		in_serv_agg->budget -= len;
 
-	q->V += (u64)len * IWSUM;
+	q->V += (u64)len * q->iwsum;
 	pr_debug("qfq dequeue: len %u F %lld now %lld\n",
 		 len, (unsigned long long) in_serv_agg->F,
 		 (unsigned long long) q->V);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 4b2c831..bd4fb45 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -648,8 +648,7 @@
 		break;
 	case ICMP_REDIRECT:
 		sctp_icmp_redirect(sk, transport, skb);
-		err = 0;
-		break;
+		/* Fall through to out_unlock. */
 	default:
 		goto out_unlock;
 	}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 391a245..422d8bd 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -189,7 +189,7 @@
 		break;
 	case NDISC_REDIRECT:
 		sctp_icmp_redirect(sk, transport, skb);
-		break;
+		goto out_unlock;
 	default:
 		break;
 	}
@@ -210,45 +210,24 @@
 		in6_dev_put(idev);
 }
 
-/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
 static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 {
 	struct sock *sk = skb->sk;
 	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct flowi6 fl6;
-
-	memset(&fl6, 0, sizeof(fl6));
-
-	fl6.flowi6_proto = sk->sk_protocol;
-
-	/* Fill in the dest address from the route entry passed with the skb
-	 * and the source address from the transport.
-	 */
-	fl6.daddr = transport->ipaddr.v6.sin6_addr;
-	fl6.saddr = transport->saddr.v6.sin6_addr;
-
-	fl6.flowlabel = np->flow_label;
-	IP6_ECN_flow_xmit(sk, fl6.flowlabel);
-	if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
-		fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
-	else
-		fl6.flowi6_oif = sk->sk_bound_dev_if;
-
-	if (np->opt && np->opt->srcrt) {
-		struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-		fl6.daddr = *rt0->addr;
-	}
+	struct flowi6 *fl6 = &transport->fl.u.ip6;
 
 	SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
 			  __func__, skb, skb->len,
-			  &fl6.saddr, &fl6.daddr);
+			  &fl6->saddr, &fl6->daddr);
 
-	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+	IP6_ECN_flow_xmit(sk, fl6->flowlabel);
 
 	if (!(transport->param_flags & SPP_PMTUD_ENABLE))
 		skb->local_df = 1;
 
-	return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+
+	return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
 }
 
 /* Returns the dst cache entry for the given source and destination ip
@@ -261,10 +240,12 @@
 	struct dst_entry *dst = NULL;
 	struct flowi6 *fl6 = &fl->u.ip6;
 	struct sctp_bind_addr *bp;
+	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sctp_sockaddr_entry *laddr;
 	union sctp_addr *baddr = NULL;
 	union sctp_addr *daddr = &t->ipaddr;
 	union sctp_addr dst_saddr;
+	struct in6_addr *final_p, final;
 	__u8 matchlen = 0;
 	__u8 bmatchlen;
 	sctp_scope_t scope;
@@ -287,7 +268,8 @@
 		SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
 	}
 
-	dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
+	final_p = fl6_update_dst(fl6, np->opt, &final);
+	dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
 	if (!asoc || saddr)
 		goto out;
 
@@ -339,10 +321,12 @@
 		}
 	}
 	rcu_read_unlock();
+
 	if (baddr) {
 		fl6->saddr = baddr->v6.sin6_addr;
 		fl6->fl6_sport = baddr->v6.sin6_port;
-		dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
+		final_p = fl6_update_dst(fl6, np->opt, &final);
+		dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
 	}
 
 out:
diff --git a/net/sctp/output.c b/net/sctp/output.c
index bbef4a7..0beb2f9 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -547,7 +547,8 @@
 	 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
 	 */
 	if (!sctp_checksum_disable) {
-		if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+		if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
+		    (dst_xfrm(dst) != NULL) || packet->ipfragok) {
 			__u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
 			/* 3) Put the resultant value into the checksum field in the
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6abb1ca..79bc2510 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -820,6 +820,9 @@
 			goto skip_mkasconf;
 		}
 
+		if (laddr == NULL)
+			return -EINVAL;
+
 		/* We do not need RCU protection throughout this loop
 		 * because this is done under a socket lock from the
 		 * setsockopt call.
@@ -6193,7 +6196,7 @@
 	/* Is there any exceptional events?  */
 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 		mask |= POLLERR |
-			sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0;
+			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --git a/net/socket.c b/net/socket.c
index 4ca1526..1a5e247 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1956,6 +1956,16 @@
 	unsigned int name_len;
 };
 
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+				 struct msghdr __user *umsg)
+{
+	if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+		return -EFAULT;
+	if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+		return -EINVAL;
+	return 0;
+}
+
 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
 			 struct msghdr *msg_sys, unsigned int flags,
 			 struct used_address *used_address)
@@ -1974,8 +1984,11 @@
 	if (MSG_CMSG_COMPAT & flags) {
 		if (get_compat_msghdr(msg_sys, msg_compat))
 			return -EFAULT;
-	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-		return -EFAULT;
+	} else {
+		err = copy_msghdr_from_user(msg_sys, msg);
+		if (err)
+			return err;
+	}
 
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
 		err = -EMSGSIZE;
@@ -2183,8 +2196,11 @@
 	if (MSG_CMSG_COMPAT & flags) {
 		if (get_compat_msghdr(msg_sys, msg_compat))
 			return -EFAULT;
-	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-		return -EFAULT;
+	} else {
+		err = copy_msghdr_from_user(msg_sys, msg);
+		if (err)
+			return err;
+	}
 
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
 		err = -EMSGSIZE;
@@ -3292,6 +3308,7 @@
 	case SIOCGMIIPHY:
 	case SIOCGMIIREG:
 	case SIOCSMIIREG:
+	case SIOCKILLADDR:
 		return dev_ifsioc(net, sock, cmd, argp);
 
 	case SIOCSARP:
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index d304f41..f1eb0d1 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -120,7 +120,7 @@
 	if (IS_ERR(clnt)) {
 		dprintk("RPC:       failed to create AF_LOCAL gssproxy "
 				"client (errno %ld).\n", PTR_ERR(clnt));
-		result = -PTR_ERR(clnt);
+		result = PTR_ERR(clnt);
 		*_clnt = NULL;
 		goto out;
 	}
@@ -213,6 +213,26 @@
 	return status;
 }
 
+static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
+{
+	int i;
+
+	for (i = 0; i < arg->npages && arg->pages[i]; i++)
+		__free_page(arg->pages[i]);
+}
+
+static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
+{
+	arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
+	arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
+	/*
+	 * XXX: actual pages are allocated by xdr layer in
+	 * xdr_partial_copy_from_skb.
+	 */
+	if (!arg->pages)
+		return -ENOMEM;
+	return 0;
+}
 
 /*
  * Public functions
@@ -261,10 +281,16 @@
 		arg.context_handle = &ctxh;
 	res.output_token->len = GSSX_max_output_token_sz;
 
+	ret = gssp_alloc_receive_pages(&arg);
+	if (ret)
+		return ret;
+
 	/* use nfs/ for targ_name ? */
 
 	ret = gssp_call(net, &msg);
 
+	gssp_free_receive_pages(&arg);
+
 	/* we need to fetch all data even in case of error so
 	 * that we can free special strctures is they have been allocated */
 	data->major_status = res.status.major_status;
@@ -328,7 +354,6 @@
 	kfree(data->in_handle.data);
 	kfree(data->out_handle.data);
 	kfree(data->out_token.data);
-	kfree(data->mech_oid.data);
 	free_svc_cred(&data->creds);
 }
 
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 357f613..f0f78c5 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -166,14 +166,15 @@
 	return 0;
 }
 
-static int get_s32(void **p, void *max, s32 *res)
+static int get_host_u32(struct xdr_stream *xdr, u32 *res)
 {
-	void *base = *p;
-	void *next = (void *)((char *)base + sizeof(s32));
-	if (unlikely(next > max || next < base))
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (!p)
 		return -EINVAL;
-	memcpy(res, base, sizeof(s32));
-	*p = next;
+	/* Contents of linux creds are all host-endian: */
+	memcpy(res, p, sizeof(u32));
 	return 0;
 }
 
@@ -182,9 +183,9 @@
 {
 	u32 length;
 	__be32 *p;
-	void *q, *end;
-	s32 tmp;
-	int N, i, err;
+	u32 tmp;
+	u32 N;
+	int i, err;
 
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
@@ -192,33 +193,28 @@
 
 	length = be32_to_cpup(p);
 
-	/* FIXME: we do not want to use the scratch buffer for this one
-	 * may need to use functions that allows us to access an io vector
-	 * directly */
-	p = xdr_inline_decode(xdr, length);
-	if (unlikely(p == NULL))
+	if (length > (3 + NGROUPS_MAX) * sizeof(u32))
 		return -ENOSPC;
 
-	q = p;
-	end = q + length;
-
 	/* uid */
-	err = get_s32(&q, end, &tmp);
+	err = get_host_u32(xdr, &tmp);
 	if (err)
 		return err;
 	creds->cr_uid = make_kuid(&init_user_ns, tmp);
 
 	/* gid */
-	err = get_s32(&q, end, &tmp);
+	err = get_host_u32(xdr, &tmp);
 	if (err)
 		return err;
 	creds->cr_gid = make_kgid(&init_user_ns, tmp);
 
 	/* number of additional gid's */
-	err = get_s32(&q, end, &tmp);
+	err = get_host_u32(xdr, &tmp);
 	if (err)
 		return err;
 	N = tmp;
+	if ((3 + N) * sizeof(u32) != length)
+		return -EINVAL;
 	creds->cr_group_info = groups_alloc(N);
 	if (creds->cr_group_info == NULL)
 		return -ENOMEM;
@@ -226,7 +222,7 @@
 	/* gid's */
 	for (i = 0; i < N; i++) {
 		kgid_t kgid;
-		err = get_s32(&q, end, &tmp);
+		err = get_host_u32(xdr, &tmp);
 		if (err)
 			goto out_free_groups;
 		err = -EINVAL;
@@ -430,7 +426,7 @@
 static int dummy_dec_nameattr_array(struct xdr_stream *xdr,
 				    struct gssx_name_attr_array *naa)
 {
-	struct gssx_name_attr dummy;
+	struct gssx_name_attr dummy = { .attr = {.len = 0} };
 	u32 count, i;
 	__be32 *p;
 
@@ -493,12 +489,13 @@
 	return err;
 }
 
+
 static int gssx_dec_name(struct xdr_stream *xdr,
 			 struct gssx_name *name)
 {
-	struct xdr_netobj dummy_netobj;
-	struct gssx_name_attr_array dummy_name_attr_array;
-	struct gssx_option_array dummy_option_array;
+	struct xdr_netobj dummy_netobj = { .len = 0 };
+	struct gssx_name_attr_array dummy_name_attr_array = { .count = 0 };
+	struct gssx_option_array dummy_option_array = { .count = 0 };
 	int err;
 
 	/* name->display_name */
@@ -783,6 +780,9 @@
 	/* arg->options */
 	err = dummy_enc_opt_array(xdr, &arg->options);
 
+	xdr_inline_pages(&req->rq_rcv_buf,
+		PAGE_SIZE/2 /* pretty arbitrary */,
+		arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
 done:
 	if (err)
 		dprintk("RPC:       gssx_enc_accept_sec_context: %d\n", err);
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h
index 1c98b27..685a688 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.h
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h
@@ -147,6 +147,8 @@
 	struct gssx_cb *input_cb;
 	u32 ret_deleg_cred;
 	struct gssx_option_array options;
+	struct page **pages;
+	unsigned int npages;
 };
 
 struct gssx_res_accept_sec_context {
@@ -240,7 +242,8 @@
 			     2 * GSSX_max_princ_sz + \
 			     8 + 8 + 4 + 4 + 4)
 #define GSSX_max_output_token_sz 1024
-#define GSSX_max_creds_sz (4 + 4 + 4 + NGROUPS_MAX * 4)
+/* grouplist not included; we allocate separate pages for that: */
+#define GSSX_max_creds_sz (4 + 4 + 4 /* + NGROUPS_MAX*4 */)
 #define GSSX_RES_accept_sec_context_sz (GSSX_default_status_sz + \
 					GSSX_default_ctx_sz + \
 					GSSX_max_output_token_sz + \
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5a750b9..426f8fc 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1644,6 +1644,10 @@
 		task->tk_action = call_connect_status;
 		if (task->tk_status < 0)
 			return;
+		if (task->tk_flags & RPC_TASK_NOCONNECT) {
+			rpc_exit(task, -ENOTCONN);
+			return;
+		}
 		xprt_connect(task);
 	}
 }
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f..779742c 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@
 	struct rpc_clnt *rpcb_local_clnt4;
 	spinlock_t rpcb_clnt_lock;
 	unsigned int rpcb_users;
+	unsigned int rpcb_is_af_local : 1;
 
 	struct mutex gssp_lock;
 	wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764d..1891a10 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@
 }
 
 static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
-			struct rpc_clnt *clnt4)
+			struct rpc_clnt *clnt4,
+			bool is_af_local)
 {
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
 	/* Protected by rpcb_create_local_mutex */
 	sn->rpcb_local_clnt = clnt;
 	sn->rpcb_local_clnt4 = clnt4;
+	sn->rpcb_is_af_local = is_af_local ? 1 : 0;
 	smp_wmb(); 
 	sn->rpcb_users = 1;
 	dprintk("RPC:       created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@
 		.program	= &rpcb_program,
 		.version	= RPCBVERS_2,
 		.authflavor	= RPC_AUTH_NULL,
+		/*
+		 * We turn off the idle timeout to prevent the kernel
+		 * from automatically disconnecting the socket.
+		 * Otherwise, we'd have to cache the mount namespace
+		 * of the caller and somehow pass that to the socket
+		 * reconnect code.
+		 */
+		.flags		= RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
 	};
 	struct rpc_clnt *clnt, *clnt4;
 	int result = 0;
@@ -263,7 +273,7 @@
 		clnt4 = NULL;
 	}
 
-	rpcb_set_local(net, clnt, clnt4);
+	rpcb_set_local(net, clnt, clnt4, true);
 
 out:
 	return result;
@@ -315,7 +325,7 @@
 		clnt4 = NULL;
 	}
 
-	rpcb_set_local(net, clnt, clnt4);
+	rpcb_set_local(net, clnt, clnt4, false);
 
 out:
 	return result;
@@ -376,13 +386,16 @@
 	return rpc_create(&args);
 }
 
-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
+static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
 {
-	int result, error = 0;
+	int flags = RPC_TASK_NOCONNECT;
+	int error, result = 0;
 
+	if (is_set || !sn->rpcb_is_af_local)
+		flags = RPC_TASK_SOFTCONN;
 	msg->rpc_resp = &result;
 
-	error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
+	error = rpc_call_sync(clnt, msg, flags);
 	if (error < 0) {
 		dprintk("RPC:       failed to contact local rpcbind "
 				"server (errno %d).\n", -error);
@@ -439,16 +452,19 @@
 		.rpc_argp	= &map,
 	};
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+	bool is_set = false;
 
 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
 			"rpcbind\n", (port ? "" : "un"),
 			prog, vers, prot, port);
 
 	msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
-	if (port)
+	if (port != 0) {
 		msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
+		is_set = true;
+	}
 
-	return rpcb_register_call(sn->rpcb_local_clnt, &msg);
+	return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
 }
 
 /*
@@ -461,6 +477,7 @@
 	const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
 	struct rpcbind_args *map = msg->rpc_argp;
 	unsigned short port = ntohs(sin->sin_port);
+	bool is_set = false;
 	int result;
 
 	map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@
 			map->r_addr, map->r_netid);
 
 	msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
-	if (port)
+	if (port != 0) {
 		msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+		is_set = true;
+	}
 
-	result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+	result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
 	kfree(map->r_addr);
 	return result;
 }
@@ -489,6 +508,7 @@
 	const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
 	struct rpcbind_args *map = msg->rpc_argp;
 	unsigned short port = ntohs(sin6->sin6_port);
+	bool is_set = false;
 	int result;
 
 	map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@
 			map->r_addr, map->r_netid);
 
 	msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
-	if (port)
+	if (port != 0) {
 		msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+		is_set = true;
+	}
 
-	result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+	result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
 	kfree(map->r_addr);
 	return result;
 }
@@ -519,7 +541,7 @@
 	map->r_addr = "";
 	msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
 
-	return rpcb_register_call(sn->rpcb_local_clnt4, msg);
+	return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
 }
 
 /**
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 06bdf5a..1583c8a 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -493,8 +493,6 @@
 	if (rv)
 		return -EINVAL;
 	uid = make_kuid(&init_user_ns, id);
-	if (!uid_valid(uid))
-		return -EINVAL;
 	ug.uid = uid;
 
 	expiry = get_expiry(&mesg);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 0f679df..305374d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -917,7 +917,10 @@
 	len = svsk->sk_datalen;
 	npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	for (i = 0; i < npages; i++) {
-		BUG_ON(svsk->sk_pages[i] == NULL);
+		if (svsk->sk_pages[i] == NULL) {
+			WARN_ON_ONCE(1);
+			continue;
+		}
 		put_page(svsk->sk_pages[i]);
 		svsk->sk_pages[i] = NULL;
 	}
@@ -1092,8 +1095,10 @@
 		goto err_noclose;
 	}
 
-	if (svc_sock_reclen(svsk) < 8)
+	if (svsk->sk_datalen < 8) {
+		svsk->sk_datalen = 0;
 		goto err_delete; /* client is nuts. */
+	}
 
 	rqstp->rq_arg.len = svsk->sk_datalen;
 	rqstp->rq_arg.page_base = 0;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 75edcfa..1504bb1 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -207,10 +207,13 @@
 		pgfrom_base -= copy;
 
 		vto = kmap_atomic(*pgto);
-		vfrom = kmap_atomic(*pgfrom);
-		memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
+		if (*pgto != *pgfrom) {
+			vfrom = kmap_atomic(*pgfrom);
+			memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
+			kunmap_atomic(vfrom);
+		} else
+			memmove(vto + pgto_base, vto + pgfrom_base, copy);
 		flush_dcache_page(*pgto);
-		kunmap_atomic(vfrom);
 		kunmap_atomic(vto);
 
 	} while ((len -= copy) != 0);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 8d2eddd..65b1462 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -98,6 +98,7 @@
  */
 static u32 *decode_write_list(u32 *va, u32 *vaend)
 {
+	unsigned long start, end;
 	int nchunks;
 
 	struct rpcrdma_write_array *ary =
@@ -113,9 +114,12 @@
 		return NULL;
 	}
 	nchunks = ntohl(ary->wc_nchunks);
-	if (((unsigned long)&ary->wc_array[0] +
-	     (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
-	    (unsigned long)vaend) {
+
+	start = (unsigned long)&ary->wc_array[0];
+	end = (unsigned long)vaend;
+	if (nchunks < 0 ||
+	    nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
+	    (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
 		dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
 			ary, nchunks, vaend);
 		return NULL;
@@ -129,6 +133,7 @@
 
 static u32 *decode_reply_array(u32 *va, u32 *vaend)
 {
+	unsigned long start, end;
 	int nchunks;
 	struct rpcrdma_write_array *ary =
 		(struct rpcrdma_write_array *)va;
@@ -143,9 +148,12 @@
 		return NULL;
 	}
 	nchunks = ntohl(ary->wc_nchunks);
-	if (((unsigned long)&ary->wc_array[0] +
-	     (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
-	    (unsigned long)vaend) {
+
+	start = (unsigned long)&ary->wc_array[0];
+	end = (unsigned long)vaend;
+	if (nchunks < 0 ||
+	    nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
+	    (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
 		dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
 			ary, nchunks, vaend);
 		return NULL;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 9bc6db0..e7000be 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -47,12 +47,12 @@
 
 	/* Allow network administrator to have same access as root. */
 	if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
-	    uid_eq(root_uid, current_uid())) {
+	    uid_eq(root_uid, current_euid())) {
 		int mode = (table->mode >> 6) & 7;
 		return (mode << 6) | (mode << 3) | mode;
 	}
 	/* Allow netns root group to have the same access as the root group */
-	if (gid_eq(root_gid, current_gid())) {
+	if (in_egroup_p(root_gid)) {
 		int mode = (table->mode >> 3) & 7;
 		return (mode << 3) | mode;
 	}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 515ce38..7e26ad4 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1179,7 +1179,7 @@
 		/* Accept only ACK or NACK message */
 		if (unlikely(msg_errcode(msg))) {
 			sock->state = SS_DISCONNECTING;
-			sk->sk_err = -ECONNREFUSED;
+			sk->sk_err = ECONNREFUSED;
 			retval = TIPC_OK;
 			break;
 		}
@@ -1190,7 +1190,7 @@
 		res = auto_connect(sock, msg);
 		if (res) {
 			sock->state = SS_DISCONNECTING;
-			sk->sk_err = res;
+			sk->sk_err = -res;
 			retval = TIPC_OK;
 			break;
 		}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c3a13a7..9d8ef94 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1246,6 +1246,15 @@
 	return 0;
 }
 
+static void unix_sock_inherit_flags(const struct socket *old,
+				    struct socket *new)
+{
+	if (test_bit(SOCK_PASSCRED, &old->flags))
+		set_bit(SOCK_PASSCRED, &new->flags);
+	if (test_bit(SOCK_PASSSEC, &old->flags))
+		set_bit(SOCK_PASSSEC, &new->flags);
+}
+
 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
 {
 	struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@
 	/* attach accepted sock to socket */
 	unix_state_lock(tsk);
 	newsock->state = SS_CONNECTED;
+	unix_sock_inherit_flags(sock, newsock);
 	sock_graft(tsk, newsock);
 	unix_state_unlock(tsk);
 	return 0;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index d591091..86fa0f3 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -124,6 +124,7 @@
 	rep->udiag_family = AF_UNIX;
 	rep->udiag_type = sk->sk_type;
 	rep->udiag_state = sk->sk_state;
+	rep->pad = 0;
 	rep->udiag_ino = sk_ino;
 	sock_diag_save_cookie(sk, rep->udiag_cookie);
 
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index d80e471..e62c1ad 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -269,6 +269,8 @@
 				if (chan->flags & IEEE80211_CHAN_DISABLED)
 					continue;
 				wdev->wext.ibss.chandef.chan = chan;
+				wdev->wext.ibss.chandef.center_freq1 =
+					chan->center_freq;
 				break;
 			}
 
@@ -353,6 +355,7 @@
 	if (chan) {
 		wdev->wext.ibss.chandef.chan = chan;
 		wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+		wdev->wext.ibss.chandef.center_freq1 = freq;
 		wdev->wext.ibss.channel_fixed = true;
 	} else {
 		/* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 7d604c0..a271c27 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -97,6 +97,10 @@
 	struct ieee80211_radiotap_header *radiotap_header,
 	int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
 {
+	/* check the radiotap header can actually be present */
+	if (max_length < sizeof(struct ieee80211_radiotap_header))
+		return -EINVAL;
+
 	/* Linux only supports version 0 radiotap format */
 	if (radiotap_header->it_version)
 		return -EINVAL;
@@ -131,7 +135,8 @@
 			 */
 
 			if ((unsigned long)iterator->_arg -
-			    (unsigned long)iterator->_rtheader >
+			    (unsigned long)iterator->_rtheader +
+			    sizeof(uint32_t) >
 			    (unsigned long)iterator->_max_length)
 				return -EINVAL;
 		}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 37ca969..22c88d2 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1583,11 +1583,11 @@
 	case SIOCX25CALLACCPTAPPRV: {
 		rc = -EINVAL;
 		lock_sock(sk);
-		if (sk->sk_state != TCP_CLOSE)
-			break;
-		clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
+		if (sk->sk_state == TCP_CLOSE) {
+			clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
+			rc = 0;
+		}
 		release_sock(sk);
-		rc = 0;
 		break;
 	}
 
@@ -1595,14 +1595,15 @@
 		rc = -EINVAL;
 		lock_sock(sk);
 		if (sk->sk_state != TCP_ESTABLISHED)
-			break;
+			goto out_sendcallaccpt_release;
 		/* must call accptapprv above */
 		if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
-			break;
+			goto out_sendcallaccpt_release;
 		x25_write_internal(sk, X25_CALL_ACCEPTED);
 		x25->state = X25_STATE_3;
-		release_sock(sk);
 		rc = 0;
+out_sendcallaccpt_release:
+		release_sock(sk);
 		break;
 	}
 
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 487ac6f..9a11f9f 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -55,6 +55,7 @@
 static unsigned int table_size, table_cnt;
 static int all_symbols = 0;
 static char symbol_prefix_char = '\0';
+static unsigned long long kernel_start_addr = 0;
 
 int token_profit[0x10000];
 
@@ -65,7 +66,10 @@
 
 static void usage(void)
 {
-	fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
+	fprintf(stderr, "Usage: kallsyms [--all-symbols] "
+			"[--symbol-prefix=<prefix char>] "
+			"[--page-offset=<CONFIG_PAGE_OFFSET>] "
+			"< in.map > out.S\n");
 	exit(1);
 }
 
@@ -194,6 +198,9 @@
 	int i;
 	int offset = 1;
 
+	if (s->addr < kernel_start_addr)
+		return 0;
+
 	/* skip prefix char */
 	if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
 		offset++;
@@ -646,6 +653,9 @@
 				if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
 					p++;
 				symbol_prefix_char = *p;
+			} else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
+				const char *p = &argv[i][14];
+				kernel_start_addr = strtoull(p, NULL, 16);
 			} else
 				usage();
 		}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 0149949..32b10f5 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,6 +82,8 @@
 		kallsymopt="${kallsymopt} --all-symbols"
 	fi
 
+	kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+
 	local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
 		      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
 
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 76e0d56..823359e 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -166,7 +166,9 @@
 	} else {
 		printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
 			rtd->params->name, dma_ch, dcsr);
+		snd_pcm_stream_lock(substream);
 		snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(substream);
 	}
 }
 EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index b413ed0..d57b219 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -15,6 +15,12 @@
 config SND_COMPRESS_OFFLOAD
 	tristate
 
+config SND_EFFECTS_OFFLOAD
+	bool "Effect Offload Support"
+	default n
+	help
+	Say Y here to enable the Effect Offload.
+
 # To be effective this also requires INPUT - users should say:
 #    select SND_JACK if INPUT=y || INPUT=SND
 # to avoid having to force INPUT on.
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 43d4117..9e37a2b 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -23,6 +23,7 @@
 snd-hwdep-objs    := hwdep.o
 
 snd-compress-objs := compress_offload.o
+snd-effects-objs := effects_offload.o
 
 obj-$(CONFIG_SND) 		+= snd.o
 obj-$(CONFIG_SND_HWDEP)		+= snd-hwdep.o
@@ -36,3 +37,4 @@
 obj-$(CONFIG_SND_SEQUENCER)	+= seq/
 
 obj-$(CONFIG_SND_COMPRESS_OFFLOAD)	+= snd-compress.o
+obj-$(CONFIG_SND_EFFECTS_OFFLOAD)	+= snd-effects.o
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index fe399f8..d7842ac 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -123,6 +123,7 @@
 	}
 	runtime->state = SNDRV_PCM_STATE_OPEN;
 	init_waitqueue_head(&runtime->sleep);
+	init_waitqueue_head(&runtime->wait);
 	data->stream.runtime = runtime;
 	f->private_data = (void *)data;
 	mutex_lock(&compr->lock);
@@ -139,6 +140,18 @@
 static int snd_compr_free(struct inode *inode, struct file *f)
 {
 	struct snd_compr_file *data = f->private_data;
+	struct snd_compr_runtime *runtime = data->stream.runtime;
+
+	switch (runtime->state) {
+	case SNDRV_PCM_STATE_RUNNING:
+	case SNDRV_PCM_STATE_DRAINING:
+	case SNDRV_PCM_STATE_PAUSED:
+		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+		break;
+	default:
+		break;
+	}
+
 	data->stream.ops->free(&data->stream);
 	kfree(data->stream.runtime->buffer);
 	kfree(data->stream.runtime);
@@ -256,16 +269,25 @@
 	struct snd_compr_file *data = f->private_data;
 	struct snd_compr_stream *stream;
 	size_t avail;
-	int retval;
+	int retval = 0;
 
 	if (snd_BUG_ON(!data))
 		return -EFAULT;
 
 	stream = &data->stream;
 	mutex_lock(&stream->device->lock);
-	/* write is allowed when stream is running or has been steup */
+	 /*
+	 * if the stream is in paused state, return the
+	 * number of bytes consumed as 0
+	 */
+	if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED) {
+		mutex_unlock(&stream->device->lock);
+		return retval;
+	}
+	/* write is allowed when stream is running or prepared or in setup */
 	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
-			stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
+			stream->runtime->state != SNDRV_PCM_STATE_RUNNING &&
+			stream->runtime->state != SNDRV_PCM_STATE_PREPARED) {
 		mutex_unlock(&stream->device->lock);
 		return -EBADFD;
 	}
@@ -372,8 +394,7 @@
 		return -EFAULT;
 
 	mutex_lock(&stream->device->lock);
-	if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED ||
-			stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
+	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
 		retval = -EBADFD;
 		goto out;
 	}
@@ -397,6 +418,7 @@
 			retval = snd_compr_get_poll(stream);
 		break;
 	default:
+		pr_err("poll returns err!...\n");
 		if (stream->direction == SND_COMPRESS_PLAYBACK)
 			retval = POLLOUT | POLLWRNORM | POLLERR;
 		else
@@ -624,7 +646,8 @@
 {
 	int retval;
 
-	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+	if ((stream->runtime->state != SNDRV_PCM_STATE_RUNNING) &&
+		(stream->runtime->state != SNDRV_PCM_STATE_DRAINING))
 		return -EPERM;
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 	if (!retval)
@@ -639,8 +662,10 @@
 	if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
 		return -EPERM;
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
-	if (!retval)
+	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+		wake_up(&stream->runtime->sleep);
+	}
 	return retval;
 }
 
@@ -656,35 +681,71 @@
 	return retval;
 }
 
-static int snd_compr_stop(struct snd_compr_stream *stream)
+int snd_compr_stop(struct snd_compr_stream *stream)
 {
-	int retval;
+	int retval = 0;
 
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
 		return -EPERM;
-	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 		wake_up(&stream->runtime->sleep);
+		snd_compr_drain_notify(stream);
 		stream->runtime->total_bytes_available = 0;
 		stream->runtime->total_bytes_transferred = 0;
 	}
 	return retval;
 }
+EXPORT_SYMBOL(snd_compr_stop);
+
+static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
+{
+	/*
+	 * We are called with lock held. So drop the lock while we wait for
+	 * drain complete notfication from the driver
+	 *
+	 * It is expected that driver will notify the drain completion and then
+	 * stream will be moved to SETUP state, even if draining resulted in an
+	 * error. We can trigger next track after this.
+	 */
+	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
+	mutex_unlock(&stream->device->lock);
+
+	wait_event(stream->runtime->wait, stream->runtime->drain_wake);
+
+	wake_up(&stream->runtime->sleep);
+	mutex_lock(&stream->device->lock);
+
+	return 0;
+}
 
 static int snd_compr_drain(struct snd_compr_stream *stream)
 {
-	int retval;
+	int retval = 0;
 
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
 		return -EPERM;
-	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
-	if (!retval) {
-		stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
+
+	stream->runtime->drain_wake = 0;
+
+	/* this is hackish for our tree but for now lets carry it while we fix
+	 * usermode behaviour
+	 */
+	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+		retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+	else
+		return 0;
+
+	if (retval) {
+		pr_err("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
 		wake_up(&stream->runtime->sleep);
+		return retval;
 	}
+
+	retval = snd_compress_wait_for_drain(stream);
+	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 	return retval;
 }
 
@@ -712,17 +773,30 @@
 
 static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 {
-	int retval;
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	int retval = 0;
+
+	/* agaain hackish  changes */
+	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
 		return -EPERM;
 	/* stream can be drained only when next track has been signalled */
 	if (stream->next_track == false)
 		return -EPERM;
 
-	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+	stream->runtime->drain_wake = 0;
+	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+		retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+	else
+		return 0;
+
+	if (retval) {
+		pr_err("Partial drain returned failure\n");
+		wake_up(&stream->runtime->sleep);
+		return retval;
+	}
 
 	stream->next_track = false;
+	retval = snd_compress_wait_for_drain(stream);
+	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 	return retval;
 }
 
@@ -740,7 +814,7 @@
 	mutex_lock(&stream->device->lock);
 	switch (_IOC_NR(cmd)) {
 	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
-		put_user(SNDRV_COMPRESS_VERSION,
+		retval = put_user(SNDRV_COMPRESS_VERSION,
 				(int __user *)arg) ? -EFAULT : 0;
 		break;
 	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
@@ -801,6 +875,9 @@
 		.write =	snd_compr_write,
 		.read =		snd_compr_read,
 		.unlocked_ioctl = snd_compr_ioctl,
+#ifdef CONFIG_COMPAT
+		.compat_ioctl =	snd_compr_ioctl,
+#endif
 		.mmap =		snd_compr_mmap,
 		.poll =		snd_compr_poll,
 };
@@ -815,7 +892,7 @@
 		return -EBADFD;
 	compr = device->device_data;
 
-	sprintf(str, "comprC%iD%i", compr->card->number, compr->device);
+	snprintf(str, sizeof(str), "comprC%iD%i", compr->card->number, compr->device);
 	pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
 			compr->direction);
 	/* register compressed device */
@@ -834,7 +911,8 @@
 	struct snd_compr *compr;
 
 	compr = device->device_data;
-	snd_unregister_device(compr->direction, compr->card, compr->device);
+	snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
+		compr->device);
 	return 0;
 }
 
diff --git a/sound/core/control.c b/sound/core/control.c
index d8aa206..6cf53ad 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -29,6 +29,7 @@
 #include <sound/minors.h>
 #include <sound/info.h>
 #include <sound/control.h>
+#include <sound/effect_driver.h>
 
 /* max number of user-defined controls */
 #define MAX_USER_CONTROLS	32
@@ -1396,6 +1397,31 @@
 #else
 		return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0;
 #endif
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+	case SNDRV_CTL_IOCTL_EFFECT_VERSION:
+		return put_user(SNDRV_EFFECT_VERSION, ip) ? -EFAULT : 0;
+	case SNDRV_CTL_IOCTL_EFFECT_CREATE:
+		return snd_ctl_effect_create(card, argp);
+	case SNDRV_CTL_IOCTL_EFFECT_DESTROY:
+		return snd_ctl_effect_destroy(card, argp);
+	case SNDRV_CTL_IOCTL_EFFECT_SET_PARAMS:
+		return snd_ctl_effect_set_params(card, argp);
+	case SNDRV_CTL_IOCTL_EFFECT_GET_PARAMS:
+		return snd_ctl_effect_get_params(card, argp);
+	case SNDRV_CTL_IOCTL_EFFECT_QUERY_NUM:
+		return snd_ctl_effect_query_num_effects(card, argp);
+	case SNDRV_CTL_IOCTL_EFFECT_QUERY_CAPS:
+		return snd_ctl_effect_query_effect_caps(card, argp);
+#else
+	case SNDRV_CTL_IOCTL_EFFECT_VERSION:
+	case SNDRV_CTL_IOCTL_EFFECT_CREATE:
+	case SNDRV_CTL_IOCTL_EFFECT_DESTROY:
+	case SNDRV_CTL_IOCTL_EFFECT_SET_PARAMS:
+	case SNDRV_CTL_IOCTL_EFFECT_GET_PARAMS:
+	case SNDRV_CTL_IOCTL_EFFECT_QUERY_NUM:
+	case SNDRV_CTL_IOCTL_EFFECT_QUERY_CAPS:
+		return -ENOPROTOOPT;
+#endif
 	}
 	down_read(&snd_ioctl_rwsem);
 	list_for_each_entry(p, &snd_control_ioctls, list) {
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 2bb95a7..2af6bf9 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -418,6 +418,15 @@
 	case SNDRV_CTL_IOCTL_TLV_READ:
 	case SNDRV_CTL_IOCTL_TLV_WRITE:
 	case SNDRV_CTL_IOCTL_TLV_COMMAND:
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+	case SNDRV_CTL_IOCTL_EFFECT_VERSION:
+	case SNDRV_CTL_IOCTL_EFFECT_CREATE:
+	case SNDRV_CTL_IOCTL_EFFECT_DESTROY:
+	case SNDRV_CTL_IOCTL_EFFECT_SET_PARAMS:
+	case SNDRV_CTL_IOCTL_EFFECT_GET_PARAMS:
+	case SNDRV_CTL_IOCTL_EFFECT_QUERY_NUM:
+	case SNDRV_CTL_IOCTL_EFFECT_QUERY_CAPS:
+#endif
 		return snd_ctl_ioctl(file, cmd, (unsigned long)argp);
 	case SNDRV_CTL_IOCTL_ELEM_LIST32:
 		return snd_ctl_elem_list_compat(ctl->card, argp);
diff --git a/sound/core/effects_offload.c b/sound/core/effects_offload.c
new file mode 100644
index 0000000..3d665b4
--- /dev/null
+++ b/sound/core/effects_offload.c
@@ -0,0 +1,316 @@
+/*
+ *  effect_offload.c - effects offload core
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ *		Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
+
+#include <linux/module.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/effect_offload.h>
+#include <sound/effect_driver.h>
+
+static DEFINE_MUTEX(effect_mutex);
+
+int snd_ctl_effect_create(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect *effect;
+
+	effect = kmalloc(sizeof(*effect), GFP_KERNEL);
+	if (!effect)
+		return -ENOMEM;
+	if (copy_from_user(effect, (void __user *)arg, sizeof(*effect))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	pr_debug("effect_offload: device %u, pos %u, mode%u\n",
+			effect->device, effect->pos, effect->mode);
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->create(card, effect);
+	mutex_unlock(&card->effect_lock);
+out:
+	kfree(effect);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_create);
+
+int snd_ctl_effect_destroy(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect *effect;
+
+	effect = kmalloc(sizeof(*effect), GFP_KERNEL);
+	if (!effect)
+		return -ENOMEM;
+	if (copy_from_user(effect, (void __user *)arg, sizeof(*effect))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->destroy(card, effect);
+	mutex_unlock(&card->effect_lock);
+out:
+	kfree(effect);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_destroy);
+
+int snd_ctl_effect_set_params(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect_params *params;
+	char *params_ptr;
+	char __user *argp = (char __user *)arg;
+
+	params = kmalloc(sizeof(*params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	if (copy_from_user(params, argp, sizeof(*params))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	params_ptr = kmalloc(params->size, GFP_KERNEL);
+	if (!params_ptr) {
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user((void *)params_ptr, (void __user *)params->buffer_ptr,
+				params->size)) {
+		retval = -EFAULT;
+		goto free_buf;
+	}
+
+	params->buffer_ptr = (unsigned long)params_ptr;
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->set_params(card, params);
+	mutex_unlock(&card->effect_lock);
+free_buf:
+	kfree(params_ptr);
+out:
+	kfree(params);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_set_params);
+
+int snd_ctl_effect_get_params(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect_params inparams;
+	struct snd_effect_params *outparams;
+	unsigned int offset;
+	char *params_ptr;
+	char __user *argp = (char __user *)arg;
+
+	if (copy_from_user((void *)&inparams, argp, sizeof(inparams)))
+		retval = -EFAULT;
+
+	outparams = kmalloc(sizeof(*outparams), GFP_KERNEL);
+	if (!outparams)
+		return -ENOMEM;
+
+	memcpy(outparams, &inparams, sizeof(inparams));
+	params_ptr = kmalloc(inparams.size, GFP_KERNEL);
+	if (!params_ptr) {
+		retval = -ENOMEM;
+		goto free_out;
+	}
+
+	if (copy_from_user((void *)params_ptr, (void *)inparams.buffer_ptr,
+							inparams.size)) {
+		retval = -EFAULT;
+		goto free_buf;
+	}
+
+	outparams->buffer_ptr = (unsigned long)params_ptr;
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->get_params(card, outparams);
+	mutex_unlock(&card->effect_lock);
+
+	if (retval)
+		goto free_buf;
+
+	if (!outparams->size)
+		goto free_buf;
+
+	if (outparams->size > inparams.size) {
+		pr_err("mem insufficient to copy\n");
+		retval = -EMSGSIZE;
+		goto free_buf;
+	} else {
+		offset = offsetof(struct snd_effect_params, size);
+		if (copy_to_user((argp + offset), (void *)&outparams->size,
+								sizeof(u32)))
+			retval = -EFAULT;
+
+		if (copy_to_user((void *)inparams.buffer_ptr,
+				(void *) outparams->buffer_ptr, outparams->size))
+			retval = -EFAULT;
+	}
+free_buf:
+	kfree(params_ptr);
+free_out:
+	kfree(outparams);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_get_params);
+
+int snd_ctl_effect_query_num_effects(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	int __user *ip = arg;
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->query_num_effects(card);
+	mutex_unlock(&card->effect_lock);
+
+	if (retval < 0)
+		goto out;
+	retval = put_user(retval, ip) ? -EFAULT : 0;
+out:
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_query_num_effects);
+
+int snd_ctl_effect_query_effect_caps(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect_caps *caps;
+	unsigned int offset, insize;
+	char *caps_ptr;
+	char __user *argp = (char __user *)arg;
+	char __user *bufp;
+
+	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+	if (!caps)
+		return -ENOMEM;
+
+	if (copy_from_user(caps, argp, sizeof(*caps))) {
+		retval = -EFAULT;
+		goto out;
+	}
+
+	bufp = (void __user *)caps->buffer_ptr;
+	insize = caps->size;
+	caps_ptr = kmalloc(caps->size, GFP_KERNEL);
+	if (!caps_ptr) {
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	caps->buffer_ptr = (unsigned long)caps_ptr;
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->query_effect_caps(card, caps);
+	mutex_unlock(&card->effect_lock);
+
+	if (retval)
+		goto free_buf;
+
+	if (insize < caps->size) {
+		pr_err("mem insufficient to copy\n");
+		retval = -EMSGSIZE;
+		goto free_buf;
+	}
+
+	offset = offsetof(struct snd_effect_caps, size);
+	if (copy_to_user((argp + offset), (void *)&caps->size, sizeof(u32))) {
+		retval = -EFAULT;
+		goto free_buf;
+	}
+
+	if (copy_to_user(bufp, (void *)caps->buffer_ptr, caps->size))
+		retval = -EFAULT;
+
+free_buf:
+	kfree(caps_ptr);
+out:
+	kfree(caps);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_query_effect_caps);
+
+/**
+ * snd_effect_register - register compressed device
+ *
+ * @card : snd card to which the effect is registered
+ * @ops : effect_ops to register
+ */
+int snd_effect_register(struct snd_card *card, struct snd_effect_ops *ops)
+{
+
+	if (card == NULL || ops == NULL)
+		return -EINVAL;
+
+	if (snd_BUG_ON(!ops->create))
+		return -EINVAL;
+	if (snd_BUG_ON(!ops->destroy))
+		return -EINVAL;
+	if (snd_BUG_ON(!ops->set_params))
+		return -EINVAL;
+
+	mutex_init(&card->effect_lock);
+
+	pr_debug("Registering Effects to card %s\n", card->shortname);
+	/* register the effect ops with the card */
+	mutex_lock(&effect_mutex);
+	card->effect_ops = ops;
+	mutex_unlock(&effect_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_effect_register);
+
+int snd_effect_deregister(struct snd_card *card)
+{
+	pr_debug("Removing effects for card %s\n", card->shortname);
+	mutex_lock(&effect_mutex);
+	card->effect_ops = NULL;
+	mutex_unlock(&effect_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_effect_deregister);
+
+static int __init snd_effect_init(void)
+{
+	return 0;
+}
+
+static void __exit snd_effect_exit(void)
+{
+}
+
+module_init(snd_effect_init);
+module_exit(snd_effect_exit);
+
+MODULE_DESCRIPTION("ALSA Effect offload framework");
+MODULE_AUTHOR("Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index bdf826f..f561647 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -193,7 +193,7 @@
 	switch (type) {
 	case SNDRV_DMA_TYPE_CONTINUOUS:
 		dmab->area = snd_malloc_pages(size,
-					(__force gfp_t)(unsigned long)device);
+			(__force gfp_t)(unsigned long)device);
 		dmab->addr = 0;
 		break;
 #ifdef CONFIG_HAS_DMA
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 17f45e8..e1e9e0c 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -49,6 +49,8 @@
 	struct snd_pcm *pcm;
 
 	list_for_each_entry(pcm, &snd_pcm_devices, list) {
+		if (pcm->internal)
+			continue;
 		if (pcm->card == card && pcm->device == device)
 			return pcm;
 	}
@@ -60,6 +62,8 @@
 	struct snd_pcm *pcm;
 
 	list_for_each_entry(pcm, &snd_pcm_devices, list) {
+		if (pcm->internal)
+			continue;
 		if (pcm->card == card && pcm->device > device)
 			return pcm->device;
 		else if (pcm->card->number > card->number)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 41b3dfe..7725f56 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -293,7 +293,15 @@
 			return -EPIPE;
 		}
 	} else {
-		if (avail >= runtime->stop_threshold) {
+		snd_pcm_uframes_t actual_avail;
+		if (avail < runtime->soc_delay)
+			actual_avail = avail;
+		else
+			actual_avail = avail - runtime->soc_delay;
+		if (actual_avail  >= runtime->stop_threshold) {
+			snd_printd(KERN_ERR  "avail > stop_threshold!!\n");
+			snd_printd(KERN_ERR  "actual_avail %ld, avail %ld, soc_delay %ld!!\n",
+					actual_avail, avail,  runtime->soc_delay);
 			xrun(substream);
 			return -EPIPE;
 		}
@@ -454,9 +462,9 @@
 	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
 		goto no_jiffies_check;
 	hdelta = delta;
-	if (hdelta < runtime->delay)
+	if (hdelta < (runtime->delay + runtime->soc_delay))
 		goto no_jiffies_check;
-	hdelta -= runtime->delay;
+	hdelta -= (runtime->delay + runtime->soc_delay);
 	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
 		delta = jdelta /
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f928181..8833629 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -616,13 +616,13 @@
 		if (runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
 		    runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
 			status->delay = runtime->buffer_size - status->avail;
-			status->delay += runtime->delay;
+			status->delay += runtime->delay + runtime->soc_delay;
 		} else
 			status->delay = 0;
 	} else {
 		status->avail = snd_pcm_capture_avail(runtime);
 		if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
-			status->delay = status->avail + runtime->delay;
+			status->delay = status->avail + runtime->delay + runtime->soc_delay;
 		else
 			status->delay = 0;
 	}
@@ -2481,7 +2481,7 @@
 			n = snd_pcm_playback_hw_avail(runtime);
 		else
 			n = snd_pcm_capture_avail(runtime);
-		n += runtime->delay;
+		n += runtime->delay + runtime->soc_delay;
 		break;
 	case SNDRV_PCM_STATE_XRUN:
 		err = -EPIPE;
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index e3cb46f..b3f39b5 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -31,6 +31,7 @@
 #include <linux/export.h>
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 
 /*
  * common variables
@@ -60,6 +61,14 @@
 #define call_ctl(type,rec) snd_seq_kernel_client_ctl(system_client, type, rec)
 
 
+/* call snd_seq_oss_midi_lookup_ports() asynchronously */
+static void async_call_lookup_ports(struct work_struct *work)
+{
+	snd_seq_oss_midi_lookup_ports(system_client);
+}
+
+static DECLARE_WORK(async_lookup_work, async_call_lookup_ports);
+
 /*
  * create sequencer client for OSS sequencer
  */
@@ -85,9 +94,6 @@
 	system_client = rc;
 	debug_printk(("new client = %d\n", rc));
 
-	/* look up midi devices */
-	snd_seq_oss_midi_lookup_ports(system_client);
-
 	/* create annoucement receiver port */
 	memset(port, 0, sizeof(*port));
 	strcpy(port->name, "Receiver");
@@ -115,6 +121,9 @@
 	}
 	rc = 0;
 
+	/* look up midi devices */
+	schedule_work(&async_lookup_work);
+
  __error:
 	kfree(port);
 	return rc;
@@ -160,6 +169,7 @@
 int
 snd_seq_oss_delete_client(void)
 {
+	cancel_work_sync(&async_lookup_work);
 	if (system_client >= 0)
 		snd_seq_delete_kernel_client(system_client);
 
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 677dc84..862d8489 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -72,7 +72,7 @@
  * look up the existing ports
  * this looks a very exhausting job.
  */
-int __init
+int
 snd_seq_oss_midi_lookup_ports(int client)
 {
 	struct snd_seq_client_info *clinfo;
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index b41ed86..e427dbf 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -173,11 +173,7 @@
 
 #endif	/* CONFIG_PNP */
 
-#ifdef OPTi93X
-#define DEV_NAME "opti93x"
-#else
-#define DEV_NAME "opti92x"
-#endif
+#define DEV_NAME KBUILD_MODNAME
 
 static char * snd_opti9xx_names[] = {
 	"unknown",
@@ -1168,7 +1164,7 @@
 
 static struct pnp_card_driver opti9xx_pnpc_driver = {
 	.flags		= PNP_DRIVER_RES_DISABLE,
-	.name		= "opti9xx",
+	.name		= DEV_NAME,
 	.id_table	= snd_opti9xx_pnpids,
 	.probe		= snd_opti9xx_pnp_probe,
 	.remove		= snd_opti9xx_pnp_remove,
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index fbc1720..a471d82 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -769,7 +769,10 @@
 						s->number);
 				ds->drained_count++;
 				if (ds->drained_count > 20) {
+					unsigned long flags;
+					snd_pcm_stream_lock_irqsave(s, flags);
 					snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
+					snd_pcm_stream_unlock_irqrestore(s, flags);
 					continue;
 				}
 			} else {
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 6e78c67..819430a 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -689,7 +689,9 @@
 	if (! dma->substream || ! dma->running)
 		return;
 	snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type);
+	snd_pcm_stream_lock(dma->substream);
 	snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
+	snd_pcm_stream_unlock(dma->substream);
 }
 
 /*
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index d0bec7b..57f4182 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -638,7 +638,9 @@
 	if (! dma->substream || ! dma->running)
 		return;
 	snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
+	snd_pcm_stream_lock(dma->substream);
 	snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
+	snd_pcm_stream_unlock(dma->substream);
 }
 
 /*
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 7c11d46..48a9d00 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -860,7 +860,7 @@
 		}
 	}
 	if (id < 0 && quirk) {
-		for (q = quirk; q->subvendor; q++) {
+		for (q = quirk; q->subvendor || q->subdevice; q++) {
 			unsigned int vendorid =
 				q->subdevice | (q->subvendor << 16);
 			unsigned int mask = 0xffff0000 | q->subdevice_mask;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 55108b5..31461ba 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4789,8 +4789,8 @@
 	spin_unlock(&codec->power_lock);
 
 	state = hda_call_codec_suspend(codec, true);
-	codec->pm_down_notified = 0;
-	if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
+	if (!codec->pm_down_notified &&
+	    !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
 		codec->pm_down_notified = 1;
 		hda_call_pm_notify(bus, false);
 	}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 4b1524a..d0cc796 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -519,7 +519,7 @@
 }
 
 #define nid_has_mute(codec, nid, dir) \
-	check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE)
+	check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
 #define nid_has_volume(codec, nid, dir) \
 	check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
 
@@ -621,7 +621,7 @@
 		if (enable)
 			val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
 	}
-	if (caps & AC_AMPCAP_MUTE) {
+	if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
 		if (!enable)
 			val |= HDA_AMP_MUTE;
 	}
@@ -645,7 +645,7 @@
 {
 	unsigned int mask = 0xff;
 
-	if (caps & AC_AMPCAP_MUTE) {
+	if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
 		if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
 			mask &= ~0x80;
 	}
@@ -840,7 +840,7 @@
 				const char *pfx, const char *dir,
 				const char *sfx, int cidx, unsigned long val)
 {
-	char name[32];
+	char name[44];
 	snprintf(name, sizeof(name), "%s %s %s", pfx, dir, sfx);
 	if (!add_control(spec, type, name, cidx, val))
 		return -ENOMEM;
@@ -3474,7 +3474,7 @@
 		if (!multi)
 			err = create_single_cap_vol_ctl(codec, n, vol, sw,
 							inv_dmic);
-		else if (!multi_cap_vol)
+		else if (!multi_cap_vol && !inv_dmic)
 			err = create_bind_cap_vol_ctl(codec, n, vol, sw);
 		else
 			err = create_multi_cap_vol_ctl(codec);
@@ -4383,9 +4383,11 @@
 					    true, &spec->vmaster_mute.sw_kctl);
 		if (err < 0)
 			return err;
-		if (spec->vmaster_mute.hook)
+		if (spec->vmaster_mute.hook) {
 			snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
 						 spec->vmaster_mute_enum);
+			snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
+		}
 	}
 
 	free_kctls(spec); /* no longer needed */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index de18722..624e6c0 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -3335,6 +3335,7 @@
 	SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
 	SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
 	SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
+	SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
 	SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
 	SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
 	{}
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index e0bf753..2e7493e 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -562,6 +562,14 @@
 	return chans;
 }
 
+static inline void snd_hda_override_wcaps(struct hda_codec *codec,
+					  hda_nid_t nid, u32 val)
+{
+	if (nid >= codec->start_nid &&
+	    nid < codec->start_nid + codec->num_nodes)
+		codec->wcaps[nid - codec->start_nid] = val;
+}
+
 u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction);
 int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
 			      unsigned int caps);
@@ -667,7 +675,7 @@
 	if (state & AC_PWRST_ERROR)
 		return true;
 	state = (state >> 4) & 0x0f;
-	return (state != target_state);
+	return (state == target_state);
 }
 
 unsigned int snd_hda_codec_eapd_power_filter(struct hda_codec *codec,
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 977b0d8..d97f0d6 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -2112,6 +2112,9 @@
 {
 	struct hda_codec *codec = private_data;
 	struct ad198x_spec *spec = codec->spec;
+
+	if (!spec->eapd_nid)
+		return;
 	snd_hda_codec_update_cache(codec, spec->eapd_nid, 0,
 				   AC_VERB_SET_EAPD_BTLENABLE,
 				   enabled ? 0x02 : 0x00);
@@ -3601,13 +3604,16 @@
 {
 	struct ad198x_spec *spec = codec->spec;
 
-	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+	switch (action) {
+	case HDA_FIXUP_ACT_PRE_PROBE:
+		spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
+		break;
+	case HDA_FIXUP_ACT_PROBE:
 		if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
 			spec->eapd_nid = spec->gen.autocfg.line_out_pins[0];
 		else
 			spec->eapd_nid = spec->gen.autocfg.speaker_pins[0];
-		if (spec->eapd_nid)
-			spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
+		break;
 	}
 }
 
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index b314d3e..c96e194 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3225,6 +3225,7 @@
 	CXT_PINCFG_LEMOTE_A1205,
 	CXT_FIXUP_STEREO_DMIC,
 	CXT_FIXUP_INC_MIC_BOOST,
+	CXT_FIXUP_GPIO1,
 };
 
 static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -3303,6 +3304,15 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = cxt5066_increase_mic_boost,
 	},
+	[CXT_FIXUP_GPIO1] = {
+		.type = HDA_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			{ 0x01, AC_VERB_SET_GPIO_MASK, 0x01 },
+			{ 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01 },
+			{ 0x01, AC_VERB_SET_GPIO_DATA, 0x01 },
+			{ }
+		},
+	},
 };
 
 static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3312,6 +3322,7 @@
 
 static const struct snd_pci_quirk cxt5066_fixups[] = {
 	SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
 	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
 	SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index e12f7a0..b937992 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -67,6 +67,8 @@
 	struct delayed_work work;
 	struct snd_kcontrol *eld_ctl;
 	int repoll_count;
+	bool setup; /* the stream has been set up by prepare callback */
+	int channels; /* current number of channels */
 	bool non_pcm;
 	bool chmap_set;		/* channel-map override by ALSA API? */
 	unsigned char chmap[8]; /* ALSA API channel-map */
@@ -551,6 +553,17 @@
 		}
 	}
 
+	if (!ca) {
+		/* if there was no match, select the regular ALSA channel
+		 * allocation with the matching number of channels */
+		for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
+			if (channels == channel_allocations[i].channels) {
+				ca = channel_allocations[i].ca_index;
+				break;
+			}
+		}
+	}
+
 	snd_print_channel_allocation(eld->info.spk_alloc, buf, sizeof(buf));
 	snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
 		    ca, channels, buf);
@@ -725,9 +738,10 @@
 static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
 {
 	int i;
+	int ordered_ca = get_channel_allocation_order(ca);
 	for (i = 0; i < 8; i++) {
-		if (i < channel_allocations[ca].channels)
-			map[i] = from_cea_slot((hdmi_channel_mapping[ca][i] >> 4) & 0x0f);
+		if (i < channel_allocations[ordered_ca].channels)
+			map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
 		else
 			map[i] = 0;
 	}
@@ -868,18 +882,19 @@
 	return true;
 }
 
-static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
-				       bool non_pcm,
-				       struct snd_pcm_substream *substream)
+static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+				       struct hdmi_spec_per_pin *per_pin,
+				       bool non_pcm)
 {
-	struct hdmi_spec *spec = codec->spec;
-	struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 	hda_nid_t pin_nid = per_pin->pin_nid;
-	int channels = substream->runtime->channels;
+	int channels = per_pin->channels;
 	struct hdmi_eld *eld;
 	int ca;
 	union audio_infoframe ai;
 
+	if (!channels)
+		return;
+
 	eld = &per_pin->sink_eld;
 	if (!eld->monitor_present)
 		return;
@@ -916,6 +931,14 @@
 	}
 
 	/*
+	 * always configure channel mapping, it may have been changed by the
+	 * user in the meantime
+	 */
+	hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
+				   channels, per_pin->chmap,
+				   per_pin->chmap_set);
+
+	/*
 	 * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
 	 * sizeof(*dp_ai) to avoid partial match/update problems when
 	 * the user switches between HDMI/DP monitors.
@@ -926,20 +949,10 @@
 			    "pin=%d channels=%d\n",
 			    pin_nid,
 			    channels);
-		hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
-					   channels, per_pin->chmap,
-					   per_pin->chmap_set);
 		hdmi_stop_infoframe_trans(codec, pin_nid);
 		hdmi_fill_audio_infoframe(codec, pin_nid,
 					    ai.bytes, sizeof(ai));
 		hdmi_start_infoframe_trans(codec, pin_nid);
-	} else {
-		/* For non-pcm audio switch, setup new channel mapping
-		 * accordingly */
-		if (per_pin->non_pcm != non_pcm)
-			hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
-						   channels, per_pin->chmap,
-						   per_pin->chmap_set);
 	}
 
 	per_pin->non_pcm = non_pcm;
@@ -1146,7 +1159,7 @@
 	per_cvt->assigned = 1;
 	hinfo->nid = per_cvt->cvt_nid;
 
-	snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+	snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
 			    AC_VERB_SET_CONNECT_SEL,
 			    mux_idx);
 	snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
@@ -1263,6 +1276,7 @@
 		eld_changed = true;
 	}
 	if (update_eld) {
+		bool old_eld_valid = pin_eld->eld_valid;
 		pin_eld->eld_valid = eld->eld_valid;
 		eld_changed = pin_eld->eld_size != eld->eld_size ||
 			      memcmp(pin_eld->eld_buffer, eld->eld_buffer,
@@ -1272,6 +1286,18 @@
 			       eld->eld_size);
 		pin_eld->eld_size = eld->eld_size;
 		pin_eld->info = eld->info;
+
+		/* Haswell-specific workaround: re-setup when the transcoder is
+		 * changed during the stream playback
+		 */
+		if (codec->vendor_id == 0x80862807 &&
+		    eld->eld_valid && !old_eld_valid && per_pin->setup) {
+			snd_hda_codec_write(codec, pin_nid, 0,
+					    AC_VERB_SET_AMP_GAIN_MUTE,
+					    AMP_OUT_UNMUTE);
+			hdmi_setup_audio_infoframe(codec, per_pin,
+						   per_pin->non_pcm);
+		}
 	}
 	mutex_unlock(&pin_eld->lock);
 
@@ -1444,14 +1470,17 @@
 	hda_nid_t cvt_nid = hinfo->nid;
 	struct hdmi_spec *spec = codec->spec;
 	int pin_idx = hinfo_to_pin_index(spec, hinfo);
-	hda_nid_t pin_nid = get_pin(spec, pin_idx)->pin_nid;
+	struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+	hda_nid_t pin_nid = per_pin->pin_nid;
 	bool non_pcm;
 
 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+	per_pin->channels = substream->runtime->channels;
+	per_pin->setup = true;
 
 	hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
 
-	hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
+	hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
 
 	return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
 }
@@ -1491,6 +1520,9 @@
 		snd_hda_spdif_ctls_unassign(codec, pin_idx);
 		per_pin->chmap_set = false;
 		memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
+
+		per_pin->setup = false;
+		per_pin->channels = 0;
 	}
 
 	return 0;
@@ -1626,8 +1658,7 @@
 	per_pin->chmap_set = true;
 	memcpy(per_pin->chmap, chmap, sizeof(chmap));
 	if (prepared)
-		hdmi_setup_audio_infoframe(codec, pin_idx, per_pin->non_pcm,
-					   substream);
+		hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
 
 	return 0;
 }
@@ -1715,6 +1746,9 @@
 		struct snd_pcm_chmap *chmap;
 		struct snd_kcontrol *kctl;
 		int i;
+
+		if (!codec->pcm_info[pin_idx].pcm)
+			break;
 		err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm,
 					     SNDRV_PCM_STREAM_PLAYBACK,
 					     NULL, 0, pin_idx, &chmap);
@@ -2536,6 +2570,7 @@
 { .id = 0x10de0043, .name = "GPU 43 HDMI/DP",	.patch = patch_generic_hdmi },
 { .id = 0x10de0044, .name = "GPU 44 HDMI/DP",	.patch = patch_generic_hdmi },
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",	.patch = patch_generic_hdmi },
+{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP",	.patch = patch_generic_hdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",	.patch = patch_nvhdmi_2ch },
 { .id = 0x10de8001, .name = "MCP73 HDMI",	.patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",	.patch = patch_via_hdmi },
@@ -2588,6 +2623,7 @@
 MODULE_ALIAS("snd-hda-codec-id:10de0043");
 MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
+MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 403010c..4496e0a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1027,6 +1027,7 @@
 	ALC880_FIXUP_GPIO2,
 	ALC880_FIXUP_MEDION_RIM,
 	ALC880_FIXUP_LG,
+	ALC880_FIXUP_LG_LW25,
 	ALC880_FIXUP_W810,
 	ALC880_FIXUP_EAPD_COEF,
 	ALC880_FIXUP_TCL_S700,
@@ -1085,6 +1086,14 @@
 			{ }
 		}
 	},
+	[ALC880_FIXUP_LG_LW25] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x1a, 0x0181344f }, /* line-in */
+			{ 0x1b, 0x0321403f }, /* headphone */
+			{ }
+		}
+	},
 	[ALC880_FIXUP_W810] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -1337,6 +1346,7 @@
 	SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
 	SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
 	SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
+	SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
 	SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
 
 	/* Below is the copied entries from alc880_quirks.c.
@@ -3190,6 +3200,15 @@
 	}
 }
 
+static void alc290_fixup_mono_speakers(struct hda_codec *codec,
+				       const struct hda_fixup *fix, int action)
+{
+	if (action == HDA_FIXUP_ACT_PRE_PROBE)
+		/* Remove DAC node 0x03, as it seems to be
+		   giving mono output */
+		snd_hda_override_wcaps(codec, 0x03, 0);
+}
+
 enum {
 	ALC269_FIXUP_SONY_VAIO,
 	ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -3213,9 +3232,12 @@
 	ALC269_FIXUP_HP_GPIO_LED,
 	ALC269_FIXUP_INV_DMIC,
 	ALC269_FIXUP_LENOVO_DOCK,
+	ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
 	ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
 	ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 	ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
+	ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+	ALC290_FIXUP_MONO_SPEAKERS,
 	ALC269_FIXUP_HEADSET_MODE,
 	ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
 	ALC269_FIXUP_ASUS_X101_FUNC,
@@ -3402,6 +3424,15 @@
 		.chained = true,
 		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
 	},
+	[ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
 	[ALC269_FIXUP_HEADSET_MODE] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_headset_mode,
@@ -3410,6 +3441,13 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_headset_mode_no_hp_mic,
 	},
+	[ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+	},
 	[ALC269_FIXUP_ASUS_X101_FUNC] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc269_fixup_x101_headset_mic,
@@ -3467,6 +3505,12 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc269_fixup_limit_int_mic_boost,
 	},
+	[ALC290_FIXUP_MONO_SPEAKERS] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc290_fixup_mono_speakers,
+		.chained = true,
+		.chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3495,9 +3539,13 @@
 	SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x05f9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x05fb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
 	SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
 	SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -3516,6 +3564,7 @@
 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+	SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
 	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -4194,13 +4243,17 @@
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
+	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+	SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
+	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
 	SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
 	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 1d9d642..dc4833f 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -417,9 +417,11 @@
 			val &= ~spec->eapd_mask;
 		else
 			val |= spec->eapd_mask;
-		if (spec->gpio_data != val)
+		if (spec->gpio_data != val) {
+			spec->gpio_data = val;
 			stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir,
 				      val);
+		}
 	}
 }
 
@@ -2813,6 +2815,7 @@
 
 /* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */
 static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = {
+	SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3),
 	SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1),
 	SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2),
 	SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2),
@@ -3227,7 +3230,7 @@
 			/* configure the analog microphone on some laptops */
 			{ 0x0c, 0x90a79130 },
 			/* correct the front output jack as a hp out */
-			{ 0x0f, 0x0227011f },
+			{ 0x0f, 0x0221101f },
 			/* correct the front input jack as a mic */
 			{ 0x0e, 0x02a79130 },
 			{}
@@ -3608,20 +3611,18 @@
 static int stac_init(struct hda_codec *codec)
 {
 	struct sigmatel_spec *spec = codec->spec;
-	unsigned int gpio;
 	int i;
 
 	/* override some hints */
 	stac_store_hints(codec);
 
 	/* set up GPIO */
-	gpio = spec->gpio_data;
 	/* turn on EAPD statically when spec->eapd_switch isn't set.
 	 * otherwise, unsol event will turn it on/off dynamically
 	 */
 	if (!spec->eapd_switch)
-		gpio |= spec->eapd_mask;
-	stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio);
+		spec->gpio_data |= spec->eapd_mask;
+	stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
 
 	snd_hda_gen_init(codec);
 
@@ -3921,6 +3922,7 @@
 {
 	struct sigmatel_spec *spec = codec->spec;
 
+	spec->gpio_mask |= spec->eapd_mask;
 	if (spec->gpio_led) {
 		if (!spec->vref_mute_led_nid) {
 			spec->gpio_mask |= spec->gpio_led;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index e524554..aed19c3 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -910,6 +910,8 @@
 static void override_mic_boost(struct hda_codec *codec, hda_nid_t pin,
 			       int offset, int num_steps, int step_size)
 {
+	snd_hda_override_wcaps(codec, pin,
+			       get_wcaps(codec, pin) | AC_WCAP_IN_AMP);
 	snd_hda_override_amp_caps(codec, pin, HDA_INPUT,
 				  (offset << AC_AMPCAP_OFFSET_SHIFT) |
 				  (num_steps << AC_AMPCAP_NUM_STEPS_SHIFT) |
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 9e675c7..23f39d2 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -45,7 +45,7 @@
 source "sound/soc/nuc900/Kconfig"
 source "sound/soc/omap/Kconfig"
 source "sound/soc/kirkwood/Kconfig"
-source "sound/soc/mid-x86/Kconfig"
+source "sound/soc/intel/Kconfig"
 source "sound/soc/mxs/Kconfig"
 source "sound/soc/pxa/Kconfig"
 source "sound/soc/samsung/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 197b6ae..25201f53 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -20,7 +20,7 @@
 obj-$(CONFIG_SND_SOC)	+= dwc/
 obj-$(CONFIG_SND_SOC)	+= fsl/
 obj-$(CONFIG_SND_SOC)	+= jz4740/
-obj-$(CONFIG_SND_SOC)	+= mid-x86/
+obj-$(CONFIG_SND_SOC)	+= intel/
 obj-$(CONFIG_SND_SOC)	+= mxs/
 obj-$(CONFIG_SND_SOC)	+= nuc900/
 obj-$(CONFIG_SND_SOC)	+= omap/
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index 1d38fd0..d128265 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -81,7 +81,9 @@
 
 		/* stop RX and capture: will be enabled again at restart */
 		ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable);
+		snd_pcm_stream_lock(substream);
 		snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(substream);
 
 		/* now drain RHR and read status to remove xrun condition */
 		ssc_readx(prtd->ssc->regs, SSC_RHR);
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 60159c0..6fd174b 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -351,6 +351,9 @@
 	val = ucontrol->value.integer.value[0];
 	val2 = ucontrol->value.integer.value[1];
 
+	if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
+		return -EINVAL;
+
 	err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
 	if (err < 0)
 		return err;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 2f45f00..23eb69a 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -32,7 +32,8 @@
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS42L51 if I2C
 	select SND_SOC_CS42L52 if I2C
-	select SND_SOC_CS42L73 if I2C
+	select SND_SOC_LM49453 if I2C
+	select SND_SOC_RT5640 if I2C
 	select SND_SOC_CS4270 if I2C
 	select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_CX20442 if TTY
@@ -69,6 +70,7 @@
 	select SND_SOC_TLV320AIC26 if SPI_MASTER
 	select SND_SOC_TLV320AIC32X4 if I2C
 	select SND_SOC_TLV320AIC3X if I2C
+	select SND_SOC_TLV320AIC31XX if I2C
 	select SND_SOC_TPA6130A2 if I2C
 	select SND_SOC_TLV320DAC33 if I2C
 	select SND_SOC_TWL4030 if TWL4030_CORE
@@ -81,7 +83,6 @@
 	select SND_SOC_WM2000 if I2C
 	select SND_SOC_WM2200 if I2C
 	select SND_SOC_WM5100 if I2C
-	select SND_SOC_WM5102 if MFD_WM5102
 	select SND_SOC_WM5110 if MFD_WM5110
 	select SND_SOC_WM8350 if MFD_WM8350
 	select SND_SOC_WM8400 if MFD_WM8400
@@ -141,9 +142,7 @@
 
 config SND_SOC_ARIZONA
 	tristate
-	default y if SND_SOC_WM5102=y
 	default y if SND_SOC_WM5110=y
-	default m if SND_SOC_WM5102=m
 	default m if SND_SOC_WM5110=m
 
 config SND_SOC_WM_HUBS
@@ -153,9 +152,7 @@
 
 config SND_SOC_WM_ADSP
 	tristate
-	default y if SND_SOC_WM5102=y
 	default y if SND_SOC_WM2200=y
-	default m if SND_SOC_WM5102=m
 	default m if SND_SOC_WM2200=m
 
 config SND_SOC_AB8500_CODEC
@@ -218,11 +215,17 @@
 
 config SND_SOC_CS42L51
 	tristate
+config SND_SOC_LM49453
+	tristate
 
 config SND_SOC_CS42L52
 	tristate
 
-config SND_SOC_CS42L73
+config SND_SOC_RT5640
+	tristate
+config SND_SOC_RT5642
+	tristate
+config SND_SOC_RT5672
 	tristate
 
 # Cirrus Logic CS4270 Codec
@@ -341,6 +344,9 @@
 config SND_SOC_TLV320AIC3X
 	tristate
 
+config SND_SOC_TLV320AIC31XX
+        tristate
+
 config SND_SOC_TLV320DAC33
 	tristate
 
@@ -375,9 +381,6 @@
 config SND_SOC_WM5100
 	tristate
 
-config SND_SOC_WM5102
-	tristate
-
 config SND_SOC_WM5110
 	tristate
 
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index b9e41c9..e451bbb 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -19,6 +19,10 @@
 snd-soc-cq93vc-objs := cq93vc.o
 snd-soc-cs42l51-objs := cs42l51.o
 snd-soc-cs42l52-objs := cs42l52.o
+snd-soc-lm49453-objs := lm49453.o
+snd-soc-rt5640-objs := rt5640.o rt5640_ioctl.o rt56xx_ioctl.o
+snd-soc-rt5642-objs := rt5640.o rt5640-dsp.o rt5640_ioctl.o rt56xx_ioctl.o
+snd-soc-rt5672-objs := rt5670.o rt5670-dsp.o rt5670_ioctl.o rt_codec_ioctl.o
 snd-soc-cs42l73-objs := cs42l73.o
 snd-soc-cs4270-objs := cs4270.o
 snd-soc-cs4271-objs := cs4271.o
@@ -61,6 +65,7 @@
 snd-soc-tlv320aic26-objs := tlv320aic26.o
 snd-soc-tlv320aic3x-objs := tlv320aic3x.o
 snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
+snd-soc-tlv320aic31xx-objs := tlv320aic31xx.o
 snd-soc-tlv320dac33-objs := tlv320dac33.o
 snd-soc-twl4030-objs := twl4030.o
 snd-soc-twl6040-objs := twl6040.o
@@ -144,9 +149,12 @@
 obj-$(CONFIG_SND_SOC_ALC5632)	+= snd-soc-alc5632.o
 obj-$(CONFIG_SND_SOC_ARIZONA)	+= snd-soc-arizona.o
 obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
+obj-$(CONFIG_SND_SOC_LM49453) += snd-soc-lm49453.o
+obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
+obj-$(CONFIG_SND_SOC_RT5642) += snd-soc-rt5642.o
+obj-$(CONFIG_SND_SOC_RT5672) += snd-soc-rt5672.o
 obj-$(CONFIG_SND_SOC_CS42L51)	+= snd-soc-cs42l51.o
 obj-$(CONFIG_SND_SOC_CS42L52)	+= snd-soc-cs42l52.o
-obj-$(CONFIG_SND_SOC_CS42L73)	+= snd-soc-cs42l73.o
 obj-$(CONFIG_SND_SOC_CS4270)	+= snd-soc-cs4270.o
 obj-$(CONFIG_SND_SOC_CS4271)	+= snd-soc-cs4271.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
@@ -185,6 +193,7 @@
 obj-$(CONFIG_SND_SOC_TLV320AIC26)	+= snd-soc-tlv320aic26.o
 obj-$(CONFIG_SND_SOC_TLV320AIC3X)	+= snd-soc-tlv320aic3x.o
 obj-$(CONFIG_SND_SOC_TLV320AIC32X4)     += snd-soc-tlv320aic32x4.o
+obj-$(CONFIG_SND_SOC_TLV320AIC31XX)     += snd-soc-tlv320aic31xx.o
 obj-$(CONFIG_SND_SOC_TLV320DAC33)	+= snd-soc-tlv320dac33.o
 obj-$(CONFIG_SND_SOC_TWL4030)	+= snd-soc-twl4030.o
 obj-$(CONFIG_SND_SOC_TWL6040)	+= snd-soc-twl6040.o
@@ -196,7 +205,6 @@
 obj-$(CONFIG_SND_SOC_WM2000)	+= snd-soc-wm2000.o
 obj-$(CONFIG_SND_SOC_WM2200)	+= snd-soc-wm2200.o
 obj-$(CONFIG_SND_SOC_WM5100)	+= snd-soc-wm5100.o
-obj-$(CONFIG_SND_SOC_WM5102)	+= snd-soc-wm5102.o
 obj-$(CONFIG_SND_SOC_WM5110)	+= snd-soc-wm5110.o
 obj-$(CONFIG_SND_SOC_WM8350)	+= snd-soc-wm8350.o
 obj-$(CONFIG_SND_SOC_WM8400)	+= snd-soc-wm8400.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index a153b16..bce45c1 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1225,13 +1225,18 @@
 	struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
 	struct device *dev = codec->dev;
 	bool apply_fir, apply_iir;
-	int req, status;
+	unsigned int req;
+	int status;
 
 	dev_dbg(dev, "%s: Enter.\n", __func__);
 
 	mutex_lock(&drvdata->anc_lock);
 
 	req = ucontrol->value.integer.value[0];
+	if (req >= ARRAY_SIZE(enum_anc_state)) {
+		status = -EINVAL;
+		goto cleanup;
+	}
 	if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
 		req != ANC_APPLY_IIR) {
 		dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n",
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728..ee25f32 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -451,7 +451,7 @@
 	SOC_ENUM("Beep Pitch", beep_pitch_enum),
 	SOC_ENUM("Beep on Time", beep_ontime_enum),
 	SOC_ENUM("Beep off Time", beep_offtime_enum),
-	SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv),
+	SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x07, 0x1f, hl_tlv),
 	SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
 	SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
 	SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 3eeada5..566a367 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1612,7 +1612,7 @@
 
 static void max98088_sync_cache(struct snd_soc_codec *codec)
 {
-       u16 *reg_cache = codec->reg_cache;
+       u8 *reg_cache = codec->reg_cache;
        int i;
 
        if (!codec->cache_sync)
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 41cdd16..8dbcacd 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -1863,7 +1863,7 @@
 	struct max98095_pdata *pdata = max98095->pdata;
 	int channel = max98095_get_eq_channel(kcontrol->id.name);
 	struct max98095_cdata *cdata;
-	int sel = ucontrol->value.integer.value[0];
+	unsigned int sel = ucontrol->value.integer.value[0];
 	struct max98095_eq_cfg *coef_set;
 	int fs, best, best_val, i;
 	int regmask, regsave;
@@ -2016,7 +2016,7 @@
 	struct max98095_pdata *pdata = max98095->pdata;
 	int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
 	struct max98095_cdata *cdata;
-	int sel = ucontrol->value.integer.value[0];
+	unsigned int sel = ucontrol->value.integer.value[0];
 	struct max98095_biquad_cfg *coef_set;
 	int fs, best, best_val, i;
 	int regmask, regsave;
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 5402dfb..8a8d936 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -126,6 +126,10 @@
 
 	ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
 
+	/* include errata fix for spi audio problems */
+	if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
+		ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
+
 	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 92bbfec..ea47938 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -37,7 +37,7 @@
 static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] =  {
 	[SGTL5000_CHIP_CLK_CTRL] = 0x0008,
 	[SGTL5000_CHIP_I2S_CTRL] = 0x0010,
-	[SGTL5000_CHIP_SSS_CTRL] = 0x0008,
+	[SGTL5000_CHIP_SSS_CTRL] = 0x0010,
 	[SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
 	[SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
 	[SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index 8a9f435..d3a68bb 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -347,7 +347,7 @@
 #define SGTL5000_PLL_INT_DIV_MASK		0xf800
 #define SGTL5000_PLL_INT_DIV_SHIFT		11
 #define SGTL5000_PLL_INT_DIV_WIDTH		5
-#define SGTL5000_PLL_FRAC_DIV_MASK		0x0700
+#define SGTL5000_PLL_FRAC_DIV_MASK		0x07ff
 #define SGTL5000_PLL_FRAC_DIV_SHIFT		0
 #define SGTL5000_PLL_FRAC_DIV_WIDTH		11
 
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 0a4ffdd..5e5af89 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -857,9 +857,9 @@
 	if (pll_div.k) {
 		reg |= 0x20;
 
-		snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
-		snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
-		snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
+		snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
+		snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
+		snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
 	}
 	snd_soc_write(codec, WM8960_PLL1, reg);
 
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index e971028..730dd0c 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1600,7 +1600,6 @@
 			    struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	u16 *reg_cache = codec->reg_cache;
 	int ret;
 
 	/* Apply the update (if any) */
@@ -1609,16 +1608,19 @@
 		return 0;
 
 	/* If the left PGA is enabled hit that VU bit... */
-	if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
-		return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
-				     reg_cache[WM8962_HPOUTL_VOLUME]);
+	ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
+	if (ret & WM8962_HPOUTL_PGA_ENA) {
+		snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
+			      snd_soc_read(codec, WM8962_HPOUTL_VOLUME));
+		return 1;
+	}
 
 	/* ...otherwise the right.  The VU is stereo. */
-	if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
-		return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
-				     reg_cache[WM8962_HPOUTR_VOLUME]);
+	if (ret & WM8962_HPOUTR_PGA_ENA)
+		snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
+			      snd_soc_read(codec, WM8962_HPOUTR_VOLUME));
 
-	return 0;
+	return 1;
 }
 
 /* The VU bits for the speakers are in a different register to the mute
@@ -3374,7 +3376,6 @@
 	int ret;
 	struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
 	struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
-	u16 *reg_cache = codec->reg_cache;
 	int i, trigger, irq_pol;
 	bool dmicclk, dmicdat;
 
@@ -3432,8 +3433,9 @@
 
 		/* Put the speakers into mono mode? */
 		if (pdata->spk_mono)
-			reg_cache[WM8962_CLASS_D_CONTROL_2]
-				|= WM8962_SPK_MONO;
+			snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
+				WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
+
 
 		/* Micbias setup, detection enable and detection
 		 * threasholds. */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 29e95f9..8f6b4dc 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
+#include <linux/gcd.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -93,9 +94,9 @@
 
 static const struct wm8958_micd_rate micdet_rates[] = {
 	{ 32768,       true,  1, 4 },
-	{ 32768,       false, 1, 1 },
+	{ 32768,       false, 1, 0 },
 	{ 44100 * 256, true,  7, 10 },
-	{ 44100 * 256, false, 7, 10 },
+	{ 44100 * 256, false, 7, 9 },
 };
 
 static const struct wm8958_micd_rate jackdet_rates[] = {
@@ -539,13 +540,13 @@
 static const struct snd_kcontrol_new wm8994_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
 		 WM8994_AIF1_ADC1_RIGHT_VOLUME,
-		 1, 119, 0, digital_tlv),
+		 1, 120, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
 		 WM8994_AIF1_ADC2_RIGHT_VOLUME,
-		 1, 119, 0, digital_tlv),
+		 1, 120, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
 		 WM8994_AIF2_ADC_RIGHT_VOLUME,
-		 1, 119, 0, digital_tlv),
+		 1, 120, 0, digital_tlv),
 
 SOC_ENUM("AIF1ADCL Source", aif1adcl_src),
 SOC_ENUM("AIF1ADCR Source", aif1adcr_src),
@@ -607,12 +608,12 @@
 SOC_ENUM("DAC OSR", dac_osr),
 
 SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME,
-		 WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+		 WM8994_DAC1_RIGHT_VOLUME, 1, 112, 0, digital_tlv),
 SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME,
 	     WM8994_DAC1_RIGHT_VOLUME, 9, 1, 1),
 
 SOC_DOUBLE_R_TLV("DAC2 Volume", WM8994_DAC2_LEFT_VOLUME,
-		 WM8994_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+		 WM8994_DAC2_RIGHT_VOLUME, 1, 112, 0, digital_tlv),
 SOC_DOUBLE_R("DAC2 Switch", WM8994_DAC2_LEFT_VOLUME,
 	     WM8994_DAC2_RIGHT_VOLUME, 9, 1, 1),
 
@@ -638,6 +639,11 @@
 	       10, 15, 0, wm8994_3d_tlv),
 SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
 	   8, 1, 0),
+
+SOC_SINGLE_TLV("MIXINL MIXOUTL Volume", WM8994_INPUT_MIXER_3, 0, 7, 0,
+	       mixin_boost_tlv),
+SOC_SINGLE_TLV("MIXINR MIXOUTR Volume", WM8994_INPUT_MIXER_4, 0, 7, 0,
+	       mixin_boost_tlv),
 };
 
 static const struct snd_kcontrol_new wm8994_eq_controls[] = {
@@ -866,7 +872,7 @@
 					    WM8994_BIAS_SRC |
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
-					    (0x2 << WM8994_VMID_RAMP_SHIFT));
+					    (0x3 << WM8994_VMID_RAMP_SHIFT));
 
 			/* Main bias enable, VMID=2x40k */
 			snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
@@ -874,7 +880,14 @@
 					    WM8994_VMID_SEL_MASK,
 					    WM8994_BIAS_ENA | 0x2);
 
-			msleep(300);
+			/* The delay of 300ms was recommended to support pop
+			 * free startup of the line output driver, as we don't use
+			 * that feature reducing the delay to 50ms as recommended in
+			 * the spec, Also changing VMID_RAMP to soft fast start
+			 * accordingly Also applies for VMID_FORCE and
+			 * vmid_dereference.
+			 */
+			msleep(50);
 
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_VMID_RAMP_MASK |
@@ -893,15 +906,14 @@
 					    WM8994_BIAS_SRC |
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
-					    (0x2 << WM8994_VMID_RAMP_SHIFT));
+					    (0x3 << WM8994_VMID_RAMP_SHIFT));
 
 			/* Main bias enable, VMID=2x40k */
 			snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
 					    WM8994_BIAS_ENA |
 					    WM8994_VMID_SEL_MASK,
 					    WM8994_BIAS_ENA | 0x2);
-
-			msleep(400);
+			msleep(50);
 
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_VMID_RAMP_MASK |
@@ -946,7 +958,7 @@
 		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
 				    WM8994_VMID_SEL_MASK, 0);
 
-		msleep(400);
+		msleep(50);
 
 		/* Active discharge */
 		snd_soc_update_bits(codec, WM8994_ANTIPOP_1,
@@ -969,7 +981,7 @@
 				    WM8994_VMID_RAMP_MASK, 0);
 
 		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
-				    WM8994_VMID_SEL_MASK, 0);
+				    WM8994_BIAS_ENA | WM8994_VMID_SEL_MASK, 0);
 	}
 
 	pm_runtime_put(codec->dev);
@@ -1055,7 +1067,7 @@
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		/* Don't enable timeslot 2 if not in use */
-		if (wm8994->channels[0] <= 2)
+		if ((wm8994->channels[0] <= 2) && (wm8994->slots <= 2))
 			mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
 
 		val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
@@ -1331,6 +1343,26 @@
 	struct snd_soc_codec *codec = w->codec;
 	unsigned int mask = 1 << w->shift;
 
+	/* Don't propagate FIFO errors unless the DAC is running */
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* Clear FIFO error status */
+		snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2,
+				    WM8994_FIFOS_ERR_EINT_MASK,
+				    1 << WM8994_FIFOS_ERR_EINT_SHIFT);
+		/* Unmask FIFO error interrupts */
+		snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+				    WM8994_IM_FIFOS_ERR_EINT_MASK,
+				    0 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* Mask FIFO error interrupts */
+		snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+				    WM8994_IM_FIFOS_ERR_EINT_MASK,
+				    1 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+		break;
+	}
+
 	snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
 			    mask, mask);
 	return 0;
@@ -1498,6 +1530,24 @@
 	"AIF1DACDAT", "AIF3DACDAT",
 };
 
+static const char *loopback_text[] = {
+	"None", "ADCDAT",
+};
+
+static const struct soc_enum aif1_loopback_enum =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, WM8994_AIF1_LOOPBACK_SHIFT, 2,
+			loopback_text);
+
+static const struct snd_kcontrol_new aif1_loopback =
+	SOC_DAPM_ENUM("AIF1 Loopback", aif1_loopback_enum);
+
+static const struct soc_enum aif2_loopback_enum =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, WM8994_AIF2_LOOPBACK_SHIFT, 2,
+			loopback_text);
+
+static const struct snd_kcontrol_new aif2_loopback =
+	SOC_DAPM_ENUM("AIF2 Loopback", aif2_loopback_enum);
+
 static const struct soc_enum aif1dac_enum =
 	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 0, 2, aif1dac_text);
 
@@ -1616,13 +1666,17 @@
 
 static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
 SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
@@ -1633,15 +1687,15 @@
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux,
 			adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux,
 			adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -1737,12 +1791,11 @@
 SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8994_POWER_MANAGEMENT_4, 3, 0),
 SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
 
-/* Power is done with the muxes since the ADC power also controls the
- * downsampling chain, the chip will automatically manage the analogue
- * specific portions.
- */
-SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
-SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_ADC("ADCL", NULL, WM8994_POWER_MANAGEMENT_4, 1, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, WM8994_POWER_MANAGEMENT_4, 0, 0),
+
+SND_SOC_DAPM_MUX("AIF1 Loopback", SND_SOC_NOPM, 0, 0, &aif1_loopback),
+SND_SOC_DAPM_MUX("AIF2 Loopback", SND_SOC_NOPM, 0, 0, &aif2_loopback),
 
 SND_SOC_DAPM_POST("Debug log", post_ev),
 };
@@ -1875,9 +1928,9 @@
 	{ "AIF1DAC2L", NULL, "AIF1DAC Mux" },
 	{ "AIF1DAC2R", NULL, "AIF1DAC Mux" },
 
-	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" },
+	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1 Loopback" },
 	{ "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
-	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" },
+	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2 Loopback" },
 	{ "AIF2DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
 	{ "AIF2ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
 	{ "AIF2ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
@@ -1928,6 +1981,12 @@
 	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" },
 	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" },
 
+	/* Loopback */
+	{ "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
+	{ "AIF1 Loopback", "None", "AIF1DACDAT" },
+	{ "AIF2 Loopback", "ADCDAT", "AIF2ADCDAT" },
+	{ "AIF2 Loopback", "None", "AIF2DACDAT" },
+
 	/* Sidetone */
 	{ "Left Sidetone", "ADC/DMIC1", "ADCL Mux" },
 	{ "Left Sidetone", "DMIC2", "DMIC2L" },
@@ -2010,15 +2069,16 @@
 	u16 outdiv;
 	u16 n;
 	u16 k;
+	u16 lambda;
 	u16 clk_ref_div;
 	u16 fll_fratio;
 };
 
-static int wm8994_get_fll_config(struct fll_div *fll,
+static int wm8994_get_fll_config(struct wm8994 *control, struct fll_div *fll,
 				 int freq_in, int freq_out)
 {
 	u64 Kpart;
-	unsigned int K, Ndiv, Nmod;
+	unsigned int K, Ndiv, Nmod, gcd_fll;
 
 	pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
 
@@ -2067,20 +2127,32 @@
 	Nmod = freq_out % freq_in;
 	pr_debug("Nmod=%d\n", Nmod);
 
-	/* Calculate fractional part - scale up so we can round. */
-	Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+	switch (control->type) {
+	case WM8994:
+		/* Calculate fractional part - scale up so we can round. */
+		Kpart = FIXED_FLL_SIZE * (long long)Nmod;
 
-	do_div(Kpart, freq_in);
+		do_div(Kpart, freq_in);
 
-	K = Kpart & 0xFFFFFFFF;
+		K = Kpart & 0xFFFFFFFF;
 
-	if ((K % 10) >= 5)
-		K += 5;
+		if ((K % 10) >= 5)
+			K += 5;
 
-	/* Move down to proper range now rounding is done */
-	fll->k = K / 10;
+		/* Move down to proper range now rounding is done */
+		fll->k = K / 10;
+		fll->lambda = 0;
 
-	pr_debug("N=%x K=%x\n", fll->n, fll->k);
+		pr_debug("N=%x K=%x\n", fll->n, fll->k);
+		break;
+
+	default:
+		gcd_fll = gcd(freq_out, freq_in);
+
+		fll->k = (freq_out - (freq_in * fll->n)) / gcd_fll;
+		fll->lambda = freq_in / gcd_fll;
+		
+	}
 
 	return 0;
 }
@@ -2144,9 +2216,9 @@
 	 * analysis bugs spewing warnings.
 	 */
 	if (freq_out)
-		ret = wm8994_get_fll_config(&fll, freq_in, freq_out);
+		ret = wm8994_get_fll_config(control, &fll, freq_in, freq_out);
 	else
-		ret = wm8994_get_fll_config(&fll, wm8994->fll[id].in,
+		ret = wm8994_get_fll_config(control, &fll, wm8994->fll[id].in,
 					    wm8994->fll[id].out);
 	if (ret < 0)
 		return ret;
@@ -2191,6 +2263,17 @@
 			    WM8994_FLL1_N_MASK,
 			    fll.n << WM8994_FLL1_N_SHIFT);
 
+	if (fll.lambda) {
+		snd_soc_update_bits(codec, WM8958_FLL1_EFS_1 + reg_offset,
+				    WM8958_FLL1_LAMBDA_MASK,
+				    fll.lambda);
+		snd_soc_update_bits(codec, WM8958_FLL1_EFS_2 + reg_offset,
+				    WM8958_FLL1_EFS_ENA, WM8958_FLL1_EFS_ENA);
+	} else {
+		snd_soc_update_bits(codec, WM8958_FLL1_EFS_2 + reg_offset,
+				    WM8958_FLL1_EFS_ENA, 0);
+	}
+
 	snd_soc_update_bits(codec, WM8994_FLL1_CONTROL_5 + reg_offset,
 			    WM8994_FLL1_FRC_NCO | WM8958_FLL1_BYP |
 			    WM8994_FLL1_REFCLK_DIV_MASK |
@@ -2235,7 +2318,7 @@
 
 		if (wm8994->fll_locked_irq) {
 			timeout = wait_for_completion_timeout(&wm8994->fll_locked[id],
-							      msecs_to_jiffies(10));
+							      msecs_to_jiffies(12));
 			if (timeout == 0)
 				dev_warn(codec->dev,
 					 "Timed out waiting for FLL lock\n");
@@ -2271,7 +2354,8 @@
 	 * If SYSCLK will be less than 50kHz adjust AIFnCLK dividers
 	 * for detection.
 	 */
-	if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000) {
+	if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000 &&
+		!wm8994->aifdiv[0]) {
 		dev_dbg(codec->dev, "Configuring AIFs for 128fs\n");
 
 		wm8994->aifdiv[0] = snd_soc_read(codec, WM8994_AIF1_RATE)
@@ -2555,17 +2639,24 @@
 	struct wm8994 *control = wm8994->wm8994;
 	int ms_reg;
 	int aif1_reg;
+	int dac_reg;
+	int adc_reg;
 	int ms = 0;
 	int aif1 = 0;
+	int lrclk = 0;
 
 	switch (dai->id) {
 	case 1:
 		ms_reg = WM8994_AIF1_MASTER_SLAVE;
 		aif1_reg = WM8994_AIF1_CONTROL_1;
+		dac_reg = WM8994_AIF1DAC_LRCLK;
+		adc_reg = WM8994_AIF1ADC_LRCLK;
 		break;
 	case 2:
 		ms_reg = WM8994_AIF2_MASTER_SLAVE;
 		aif1_reg = WM8994_AIF2_CONTROL_1;
+		dac_reg = WM8994_AIF1DAC_LRCLK;
+		adc_reg = WM8994_AIF1ADC_LRCLK;
 		break;
 	default:
 		return -EINVAL;
@@ -2584,6 +2675,7 @@
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_DSP_B:
 		aif1 |= WM8994_AIF1_LRCLK_INV;
+		lrclk |= WM8958_AIF1_LRCLK_INV;
 	case SND_SOC_DAIFMT_DSP_A:
 		aif1 |= 0x18;
 		break;
@@ -2622,12 +2714,14 @@
 			break;
 		case SND_SOC_DAIFMT_IB_IF:
 			aif1 |= WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV;
+			lrclk |= WM8958_AIF1_LRCLK_INV;
 			break;
 		case SND_SOC_DAIFMT_IB_NF:
 			aif1 |= WM8994_AIF1_BCLK_INV;
 			break;
 		case SND_SOC_DAIFMT_NB_IF:
 			aif1 |= WM8994_AIF1_LRCLK_INV;
+			lrclk |= WM8958_AIF1_LRCLK_INV;
 			break;
 		default:
 			return -EINVAL;
@@ -2658,6 +2752,10 @@
 			    aif1);
 	snd_soc_update_bits(codec, ms_reg, WM8994_AIF1_MSTR,
 			    ms);
+	snd_soc_update_bits(codec, dac_reg,
+			    WM8958_AIF1_LRCLK_INV, lrclk);
+	snd_soc_update_bits(codec, adc_reg,
+			    WM8958_AIF1_LRCLK_INV, lrclk);
 
 	return 0;
 }
@@ -2706,9 +2804,40 @@
 	int lrclk = 0;
 	int rate_val = 0;
 	int id = dai->id - 1;
+	struct snd_pcm_hw_params hw_params;
 
 	int i, cur_val, best_val, bclk_rate, best;
 
+	if (params)
+		memcpy(&hw_params, params, sizeof(*params));
+	else
+		return -EINVAL;
+
+	/* If custom params are there, override to custom params */
+	if (pdata->custom_cfg) {
+
+		dev_dbg(codec->dev, "%s: Overriding to custom params....\n",
+							__func__);
+
+		snd_mask_none(hw_param_mask(&hw_params,
+					SNDRV_PCM_HW_PARAM_FORMAT));
+		snd_mask_set(hw_param_mask(&hw_params,
+					SNDRV_PCM_HW_PARAM_FORMAT),
+					pdata->custom_cfg->format);
+
+		hw_param_interval(&hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
+						pdata->custom_cfg->rate;
+		hw_param_interval(&hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
+						pdata->custom_cfg->rate;
+
+		hw_param_interval(&hw_params,
+					SNDRV_PCM_HW_PARAM_CHANNELS)->min =
+						pdata->custom_cfg->channels;
+		hw_param_interval(&hw_params,
+					SNDRV_PCM_HW_PARAM_CHANNELS)->max =
+						pdata->custom_cfg->channels;
+	}
+
 	switch (dai->id) {
 	case 1:
 		aif1_reg = WM8994_AIF1_CONTROL_1;
@@ -2740,8 +2869,9 @@
 		return -EINVAL;
 	}
 
-	bclk_rate = params_rate(params);
-	switch (params_format(params)) {
+	bclk_rate = params_rate(&hw_params);
+
+	switch (params_format(&hw_params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
 		bclk_rate *= 16;
 		break;
@@ -2761,7 +2891,7 @@
 		return -EINVAL;
 	}
 
-	wm8994->channels[id] = params_channels(params);
+	wm8994->channels[id] = params_channels(&hw_params);
 	if (pdata->max_channels_clocked[id] &&
 	    wm8994->channels[id] > pdata->max_channels_clocked[id]) {
 		dev_dbg(dai->dev, "Constraining channels to %d from %d\n",
@@ -2781,7 +2911,7 @@
 
 	/* Try to find an appropriate sample rate; look for an exact match. */
 	for (i = 0; i < ARRAY_SIZE(srs); i++)
-		if (srs[i].rate == params_rate(params))
+		if (srs[i].rate == params_rate(&hw_params))
 			break;
 	if (i == ARRAY_SIZE(srs))
 		return -EINVAL;
@@ -2802,10 +2932,10 @@
 
 	/* AIFCLK/fs ratio; look for a close match in either direction */
 	best = 0;
-	best_val = abs((fs_ratios[0] * params_rate(params))
+	best_val = abs((fs_ratios[0] * params_rate(&hw_params))
 		       - wm8994->aifclk[id]);
 	for (i = 1; i < ARRAY_SIZE(fs_ratios); i++) {
-		cur_val = abs((fs_ratios[i] * params_rate(params))
+		cur_val = abs((fs_ratios[i] * params_rate(&hw_params))
 			      - wm8994->aifclk[id]);
 		if (cur_val >= best_val)
 			continue;
@@ -2833,7 +2963,7 @@
 		bclk_divs[best], bclk_rate);
 	bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
 
-	lrclk = bclk_rate / params_rate(params);
+	lrclk = bclk_rate / params_rate(&hw_params);
 	if (!lrclk) {
 		dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
 			bclk_rate);
@@ -2853,12 +2983,12 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		switch (dai->id) {
 		case 1:
-			wm8994->dac_rates[0] = params_rate(params);
+			wm8994->dac_rates[0] = params_rate(&hw_params);
 			wm8994_set_retune_mobile(codec, 0);
 			wm8994_set_retune_mobile(codec, 1);
 			break;
 		case 2:
-			wm8994->dac_rates[1] = params_rate(params);
+			wm8994->dac_rates[1] = params_rate(&hw_params);
 			wm8994_set_retune_mobile(codec, 2);
 			break;
 		}
@@ -2911,6 +3041,13 @@
 	return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
 }
 
+#if IS_ENABLED(CONFIG_SND_MRFLD_MACHINE) || \
+	IS_ENABLED(CONFIG_SND_MOOR_MACHINE)
+static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+	return 0;
+}
+#else
 static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
@@ -2937,6 +3074,7 @@
 
 	return 0;
 }
+#endif
 
 static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
 {
@@ -2964,6 +3102,25 @@
 	return snd_soc_update_bits(codec, reg, mask, val);
 }
 
+static int wm8994_set_tdm_slots(struct snd_soc_dai *dai,
+	unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = wm8994->wm8994;
+
+	switch (control->type) {
+	case WM8958:
+		wm8994->slots = slots;
+		break;
+	default:
+		pr_err("we dont support tdm for non 8958!");
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
 static int wm8994_aif2_probe(struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
@@ -2991,6 +3148,7 @@
 	.digital_mute	= wm8994_aif_mute,
 	.set_pll	= wm8994_set_fll,
 	.set_tristate	= wm8994_set_tristate,
+	.set_tdm_slot	= wm8994_set_tdm_slots,
 };
 
 static const struct snd_soc_dai_ops wm8994_aif2_dai_ops = {
@@ -3096,24 +3254,7 @@
 static int wm8994_codec_resume(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-	struct wm8994 *control = wm8994->wm8994;
 	int i, ret;
-	unsigned int val, mask;
-
-	if (control->revision < 4) {
-		/* force a HW read */
-		ret = regmap_read(control->regmap,
-				  WM8994_POWER_MANAGEMENT_5, &val);
-
-		/* modify the cache only */
-		codec->cache_only = 1;
-		mask =  WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
-			WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
-		val &= mask;
-		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
-				    mask, val);
-		codec->cache_only = 0;
-	}
 
 	for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
 		if (!wm8994->fll_suspend[i].out)
@@ -3495,6 +3636,32 @@
 			    wm8994->btn_mask);
 }
 
+static void wm8958_open_circuit_work(struct work_struct *work)
+{
+	struct wm8994_priv *wm8994 = container_of(work,
+						  struct wm8994_priv,
+						  open_circuit_work.work);
+	struct device *dev = wm8994->wm8994->dev;
+
+	wm1811_micd_stop(wm8994->hubs.codec);
+
+	mutex_lock(&wm8994->accdet_lock);
+
+	dev_dbg(dev, "Reporting open circuit\n");
+
+	wm8994->jack_mic = false;
+	wm8994->mic_detecting = true;
+	wm8994->headphone_detected = false;
+
+	wm8958_micd_set_rate(wm8994->hubs.codec);
+
+	snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+			    wm8994->btn_mask |
+			    SND_JACK_HEADSET);
+
+	mutex_unlock(&wm8994->accdet_lock);
+}
+
 static void wm8958_mic_id(void *data, u16 status)
 {
 	struct snd_soc_codec *codec = data;
@@ -3504,16 +3671,9 @@
 	if (!(status & WM8958_MICD_STS)) {
 		/* If nothing present then clear our statuses */
 		dev_dbg(codec->dev, "Detected open circuit\n");
-		wm8994->jack_mic = false;
-		wm8994->mic_detecting = true;
 
-		wm1811_micd_stop(codec);
-
-		wm8958_micd_set_rate(codec);
-
-		snd_soc_jack_report(wm8994->micdet[0].jack, 0,
-				    wm8994->btn_mask |
-				    SND_JACK_HEADSET);
+		schedule_delayed_work(&wm8994->open_circuit_work,
+				      msecs_to_jiffies(2500));
 		return;
 	}
 
@@ -3568,10 +3728,8 @@
 
 	dev_dbg(codec->dev, "Starting mic detection\n");
 
-	/* Use a user-supplied callback if we have one */
-	if (wm8994->micd_cb) {
-		wm8994->micd_cb(wm8994->micd_cb_data);
-	} else {
+	/* If there's a callback it'll be called out of the lock */
+	if (!wm8994->micd_cb) {
 		/*
 		 * Start off measument of microphone impedence to find out
 		 * what's actually there.
@@ -3585,6 +3743,10 @@
 
 	mutex_unlock(&wm8994->accdet_lock);
 
+	/* Custom callbacks may reasonably wish to take the same locks */
+	if (wm8994->micd_cb)
+		wm8994->micd_cb(wm8994->micd_cb_data);
+
 	pm_runtime_put(codec->dev);
 }
 
@@ -3596,8 +3758,12 @@
 	int reg, delay;
 	bool present;
 
+	cancel_delayed_work_sync(&wm8994->mic_work);
+
 	pm_runtime_get_sync(codec->dev);
 
+	cancel_delayed_work_sync(&wm8994->mic_complete_work);
+
 	mutex_lock(&wm8994->accdet_lock);
 
 	reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
@@ -3630,8 +3796,6 @@
 	} else {
 		dev_dbg(codec->dev, "Jack not detected\n");
 
-		cancel_delayed_work_sync(&wm8994->mic_work);
-
 		snd_soc_update_bits(codec, WM8958_MICBIAS2,
 				    WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
 
@@ -3720,6 +3884,7 @@
 		} else {
 			wm8994->mic_detecting = true;
 			wm8994->jack_mic = false;
+			wm8994->headphone_detected = false;
 		}
 
 		if (id_cb) {
@@ -3780,11 +3945,70 @@
 }
 EXPORT_SYMBOL_GPL(wm8958_mic_detect);
 
+int wm8958_micd_set_custom_rate(struct snd_soc_codec *codec,
+		wm8958_micd_set_custom_rate_cb micd_custom_rate_cb,
+		void *micd_custom_rate_cb_data)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	if (micd_custom_rate_cb) {
+		wm8994->micd_custom_rate_cb = micd_custom_rate_cb;
+		wm8994->micd_custom_rate_cb_data = micd_custom_rate_cb_data;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wm8958_micd_set_custom_rate);
+
+static void wm8958_mic_work(struct work_struct *work)
+{
+	struct wm8994_priv *wm8994 = container_of(work,
+						  struct wm8994_priv,
+						  mic_complete_work.work);
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
+
+	dev_crit(codec->dev, "MIC WORK %x\n", wm8994->mic_status);
+
+	pm_runtime_get_sync(codec->dev);
+
+	mutex_lock(&wm8994->accdet_lock);
+
+	wm8994->mic_id_cb(wm8994->mic_id_cb_data, wm8994->mic_status);
+
+	mutex_unlock(&wm8994->accdet_lock);
+
+	pm_runtime_put(codec->dev);
+
+	dev_crit(codec->dev, "MIC WORK %x DONE\n", wm8994->mic_status);
+}
+
+static void wm8958_micd_set_custom_rate_work(struct work_struct *work)
+{
+	struct wm8994_priv *wm8994 = container_of(work,
+						  struct wm8994_priv,
+						  micd_set_custom_rate_work.work);
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
+
+	dev_dbg(codec->dev, "%s: Set custom rates\n", __func__);
+
+	pm_runtime_get_sync(codec->dev);
+
+	mutex_lock(&wm8994->accdet_lock);
+
+	wm8994->micd_custom_rate_cb(wm8994->micd_custom_rate_cb_data);
+
+	mutex_unlock(&wm8994->accdet_lock);
+
+	pm_runtime_put(codec->dev);
+
+}
+
 static irqreturn_t wm8958_mic_irq(int irq, void *data)
 {
 	struct wm8994_priv *wm8994 = data;
 	struct snd_soc_codec *codec = wm8994->hubs.codec;
-	int reg, count, ret;
+	struct wm8994 *control = wm8994->wm8994;
+	int reg, count, ret, id_delay;
 
 	/*
 	 * Jack detection may have detected a removal simulataneously
@@ -3794,6 +4018,9 @@
 	if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
 		return IRQ_HANDLED;
 
+	cancel_delayed_work_sync(&wm8994->mic_complete_work);
+	cancel_delayed_work_sync(&wm8994->open_circuit_work);
+
 	pm_runtime_get_sync(codec->dev);
 
 	/* We may occasionally read a detection without an impedence
@@ -3842,14 +4069,30 @@
 		snd_soc_jack_report(wm8994->micdet[0].jack, 0,
 				    SND_JACK_MECHANICAL | SND_JACK_HEADSET |
 				    wm8994->btn_mask);
+		wm8994->jack_mic = false;
+		wm8994->headphone_detected = false;
 		wm8994->mic_detecting = true;
 		goto out;
 	}
 
-	if (wm8994->mic_detecting)
-		wm8994->mic_id_cb(wm8994->mic_id_cb_data, reg);
-	else
+	wm8994->mic_status = reg;
+	id_delay = wm8994->wm8994->pdata.mic_id_delay;
+
+	if (wm8994->mic_detecting) {
+		if (control->type == WM8958) {
+			/* Set mic-bias high during detection phase (micb_en_delay) */
+			/* 0 == Continuous */
+			dev_dbg(codec->dev, "Set MICBIAS High, for micb_en_delay time\n");
+			snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_BIAS_STARTTIME_MASK |
+				    WM8958_MICD_RATE_MASK, 0);
+		}
+
+		schedule_delayed_work(&wm8994->mic_complete_work,
+				      msecs_to_jiffies(id_delay));
+	} else {
 		wm8958_button_det(codec, reg);
+	}
 
 out:
 	pm_runtime_put(codec->dev);
@@ -3888,6 +4131,7 @@
 	struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	unsigned int dcs_done_irq;
 	unsigned int reg;
 	int ret, i;
 
@@ -3899,6 +4143,8 @@
 	mutex_init(&wm8994->accdet_lock);
 	INIT_DELAYED_WORK(&wm8994->jackdet_bootstrap,
 			  wm1811_jackdet_bootstrap);
+	INIT_DELAYED_WORK(&wm8994->open_circuit_work,
+			  wm8958_open_circuit_work);
 
 	switch (control->type) {
 	case WM8994:
@@ -3907,10 +4153,16 @@
 	case WM1811:
 		INIT_DELAYED_WORK(&wm8994->mic_work, wm1811_mic_work);
 		break;
+	case WM8958:
+		INIT_DELAYED_WORK(&wm8994->micd_set_custom_rate_work,
+					wm8958_micd_set_custom_rate_work);
+		break;
 	default:
 		break;
 	}
 
+	INIT_DELAYED_WORK(&wm8994->mic_complete_work, wm8958_mic_work);
+
 	for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
 		init_completion(&wm8994->fll_locked[i]);
 
@@ -3983,6 +4235,9 @@
 	wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_TEMP_SHUT,
 			   wm8994_temp_shut, "Thermal shutdown", codec);
 
+	dcs_done_irq = regmap_irq_get_virq(wm8994->wm8994->irq_data,
+					   WM8994_IRQ_DCS_DONE);
+	irq_set_status_flags(dcs_done_irq, IRQ_NOAUTOEN);
 	ret = wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_DCS_DONE,
 				 wm_hubs_dcs_done, "DC servo done",
 				 &wm8994->hubs);
@@ -4089,7 +4344,6 @@
 	}
 	if ((reg & WM8994_GPN_FN_MASK) != WM8994_GP_FN_PIN_SPECIFIC) {
 		wm8994->lrclk_shared[0] = 1;
-		wm8994_dai[0].symmetric_rates = 1;
 	} else {
 		wm8994->lrclk_shared[0] = 0;
 	}
@@ -4219,6 +4473,7 @@
 	}
 
 	wm_hubs_add_analogue_routes(codec, 0, 0);
+	enable_irq(dcs_done_irq);
 	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	switch (control->type) {
@@ -4261,6 +4516,23 @@
 		break;
 	}
 
+	/* Make sure FIFO errors are masked */
+	snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+			    WM8994_IM_FIFOS_ERR_EINT_MASK,
+			    1 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+
+	/* Enable bandgap-VREFC */
+	/* Note: VREFC is required for jack detection in
+	 * low power jack detect mode */
+	/* TODO: get the hardcoded reg value macro name and the regmap sync
+	   issue resolved with the wolfson folks  */
+	snd_soc_write(codec, 0x102, 0x3);
+	regcache_sync_region(wm8994->wm8994->regmap, 0x102, 0x102);
+	snd_soc_write(codec, 0xCB, 0x3921);
+	regcache_sync_region(wm8994->wm8994->regmap, 0xCB, 0xCB);
+	snd_soc_write(codec, 0x102, 0x0);
+	regcache_sync_region(wm8994->wm8994->regmap, 0x102, 0x102);
+
 	return 0;
 
 err_irq:
@@ -4365,6 +4637,11 @@
 static int wm8994_suspend(struct device *dev)
 {
 	struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
+	struct wm8994 *control = wm8994->wm8994;
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
+	unsigned int reg;
+	int ret;
+
 
 	/* Drop down to power saving mode when system is suspended */
 	if (wm8994->jackdet && !wm8994->active_refcount)
@@ -4372,18 +4649,65 @@
 				   WM1811_JACKDET_MODE_MASK,
 				   wm8994->jackdet_mode);
 
+	/* Disable the MIC Detection when suspended */
+	if ((control->type == WM8958) && wm8994->mic_id_cb) {
+
+		reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+
+		dev_dbg(codec->dev, "%s: WM8958_MIC_DETECT_3 0x%x\n", __func__, reg);
+		dev_dbg(codec->dev, "mic_detect %d jack_mic %d headphone %d\n",
+					wm8994->mic_detecting, wm8994->jack_mic,
+					wm8994->headphone_detected);
+
+		if (!(wm8994->jack_mic) && !(wm8994->headphone_detected)) {
+
+			dev_dbg(codec->dev, "Disable MIC Detection!!!\n");
+			snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+						WM8958_MICD_ENA, 0);
+
+			snd_soc_dapm_disable_pin(&codec->dapm, "CLK_SYS");
+			snd_soc_dapm_sync(&codec->dapm);
+
+			dev_dbg(codec->dev, "Jack not connected..Mask interrupts\n");
+			snd_soc_write(codec, WM8994_INTERRUPT_CONTROL, 0x01);
+
+			ret = regcache_sync_region(wm8994->wm8994->regmap,
+					WM8994_INTERRUPT_CONTROL,
+					WM8994_INTERRUPT_CONTROL);
+			if (ret != 0)
+				dev_err(dev, "Failed to sync register: %d\n", ret);
+			synchronize_irq(control->irq);
+		}
+	}
+
 	return 0;
 }
 
 static int wm8994_resume(struct device *dev)
 {
 	struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
+	struct wm8994 *control = wm8994->wm8994;
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
 
 	if (wm8994->jackdet && wm8994->jackdet_mode)
 		regmap_update_bits(wm8994->wm8994->regmap, WM8994_ANTIPOP_2,
 				   WM1811_JACKDET_MODE_MASK,
 				   WM1811_JACKDET_MODE_AUDIO);
 
+	/* Enable the MIC Detection when resumed */
+	if ((control->type == WM8958) && wm8994->mic_id_cb) {
+
+		dev_dbg(codec->dev, "Unmask interrupts..\n");
+		snd_soc_write(codec, WM8994_INTERRUPT_CONTROL, 0x00);
+
+		snd_soc_dapm_force_enable_pin(&codec->dapm, "CLK_SYS");
+		snd_soc_dapm_sync(&codec->dapm);
+
+		dev_dbg(codec->dev, "Enable MIC Detection!!!\n");
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+					WM8958_MICD_ENA, WM8958_MICD_ENA);
+	}
+
 	return 0;
 }
 #endif
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 55ddf4d..0a834df 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -41,6 +41,7 @@
 
 typedef void (*wm1811_micdet_cb)(void *data);
 typedef void (*wm1811_mic_id_cb)(void *data, u16 status);
+typedef void (*wm8958_micd_set_custom_rate_cb)(struct snd_soc_codec *codec);
 
 int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
 		      int micbias);
@@ -55,6 +56,10 @@
 
 void wm8958_dsp2_init(struct snd_soc_codec *codec);
 
+int wm8958_micd_set_custom_rate(struct snd_soc_codec *codec,
+			wm8958_micd_set_custom_rate_cb micd_custom_rate_cb,
+			void *micd_custom_rate_cb_data);
+
 struct wm8994_micdet {
 	struct snd_soc_jack *jack;
 	bool detecting;
@@ -86,6 +91,7 @@
 	bool fll_locked_irq;
 	bool fll_byp;
 	bool clk_has_run;
+	int slots;
 
 	int vmid_refcount;
 	int active_refcount;
@@ -134,8 +140,14 @@
 	struct mutex accdet_lock;
 	struct wm8994_micdet micdet[2];
 	struct delayed_work mic_work;
+	struct delayed_work open_circuit_work;
+	struct delayed_work mic_complete_work;
+	struct delayed_work micd_set_custom_rate_work;
+
+	u16 mic_status;
 	bool mic_detecting;
 	bool jack_mic;
+	bool headphone_detected;
 	int btn_mask;
 	bool jackdet;
 	int jackdet_mode;
@@ -146,6 +158,8 @@
 	void *micd_cb_data;
 	wm1811_mic_id_cb mic_id_cb;
 	void *mic_id_cb_data;
+	wm8958_micd_set_custom_rate_cb micd_custom_rate_cb;
+	void *micd_custom_rate_cb_data;
 
 	unsigned int aif1clk_enable:1;
 	unsigned int aif2clk_enable:1;
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index f5d81b9..4bbb1a8 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -530,6 +530,7 @@
 				hubs->hp_startup_mode);
 			break;
 		}
+		break;
 
 	case SND_SOC_DAPM_PRE_PMD:
 		snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
@@ -854,6 +855,17 @@
 SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
 };
 
+static const char *hpvirtual_mux_text[] = {
+	"Enable",
+	"Disable",
+};
+
+static const struct soc_enum  hpvirtual_enum =
+	SOC_ENUM_SINGLE(0, 0, 2,  hpvirtual_mux_text);
+
+static const struct snd_kcontrol_new hpvirtual_mux =
+	SOC_DAPM_ENUM_VIRT("HPVIRTUAL",  hpvirtual_enum);
+
 static const struct snd_soc_dapm_widget analogue_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("IN1LN"),
 SND_SOC_DAPM_INPUT("IN1LP"),
@@ -940,7 +952,8 @@
 SND_SOC_DAPM_OUT_DRV_E("LINEOUT2P Driver", WM8993_POWER_MANAGEMENT_3, 10, 0,
 		       NULL, 0, lineout_event,
 		       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
-
+SND_SOC_DAPM_VIRT_MUX_E("HPVIRTUAL", SND_SOC_NOPM, 0, 0, &hpvirtual_mux,
+			NULL, SND_SOC_DAPM_PRE_POST_PMD),
 SND_SOC_DAPM_OUTPUT("SPKOUTLP"),
 SND_SOC_DAPM_OUTPUT("SPKOUTLN"),
 SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
@@ -1069,8 +1082,9 @@
 	{ "Headphone PGA", NULL, "CLK_SYS" },
 	{ "Headphone PGA", NULL, "Headphone Supply" },
 
-	{ "HPOUT1L", NULL, "Headphone PGA" },
-	{ "HPOUT1R", NULL, "Headphone PGA" },
+	{ "HPVIRTUAL", "Enable", "Headphone PGA"},
+	{ "HPOUT1L", NULL, "HPVIRTUAL"},
+	{ "HPOUT1R", NULL, "HPVIRTUAL"},
 
 	{ "LINEOUT1N Driver", NULL, "VMID" },
 	{ "LINEOUT1P Driver", NULL, "VMID" },
@@ -1222,11 +1236,6 @@
 				    WM8993_LINEOUT2_MODE,
 				    WM8993_LINEOUT2_MODE);
 
-	if (!lineout1_diff && !lineout2_diff)
-		snd_soc_update_bits(codec, WM8993_ANTIPOP1,
-				    WM8993_LINEOUT_VMID_BUF_ENA,
-				    WM8993_LINEOUT_VMID_BUF_ENA);
-
 	if (lineout1fb)
 		snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
 				    WM8993_LINEOUT1_FB, WM8993_LINEOUT1_FB);
@@ -1252,6 +1261,13 @@
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
 	int val = 0;
 
+	if ((hubs->lineout1_se && hubs->lineout2_se) &&
+			(hubs->lineout1n_ena  || hubs->lineout1p_ena ||
+			hubs->lineout2n_ena || hubs->lineout2p_ena))
+		snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+			WM8993_LINEOUT_VMID_BUF_ENA,
+			WM8993_LINEOUT_VMID_BUF_ENA);
+
 	if (hubs->lineout1_se)
 		val |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
 
@@ -1281,6 +1297,13 @@
 		val = 0;
 		mask = 0;
 
+		if ((hubs->lineout1_se && hubs->lineout2_se) &&
+				(hubs->lineout1n_ena  || hubs->lineout1p_ena ||
+				hubs->lineout2n_ena || hubs->lineout2p_ena))
+			snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+				WM8993_LINEOUT_VMID_BUF_ENA,
+				WM8993_LINEOUT_VMID_BUF_ENA);
+
 		if (hubs->lineout1_se)
 			mask |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
 
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
new file mode 100644
index 0000000..32727e9
--- /dev/null
+++ b/sound/soc/intel/Kconfig
@@ -0,0 +1,60 @@
+config SND_MOOR_MACHINE
+	tristate "SOC Machine Audio driver for Intel Moorefield MID platform"
+	depends on INTEL_SCU_IPC && X86
+	select SND_MOOR_PLATFORM
+	select SND_SOC_WM8994
+	select MFD_CORE
+	select MFD_WM8994
+	select REGULATOR_WM8994
+	select SND_SST_PLATFORM
+	select SND_SST_MACHINE
+	select SND_INTEL_SST
+	select SND_EFFECTS_OFFLOAD
+	default n
+	help
+	  This adds support for ASoC machine driver for Intel(R) MID Moorefield platform
+          used as alsa device in audio substream in Intel(R) MID devices
+          Say Y if you have such a device
+          If unsure select "N".
+
+config SND_MOOR_PLATFORM
+	tristate "Intel Moorefield MID platform"
+	default n
+	help
+	  This option selects base functionality for ASoC machine driver for Intel
+	  Moorefield platform
+
+config SND_MOOR_DPCM
+	bool "Use DPCM for Intel Moorefield MID platform"
+	default SST_DPCM
+	depends on SND_MOOR_MACHINE
+	help
+	  This adds an option to enable the DPCM for Intel Moorefield
+
+config SND_INTEL_SST
+       tristate
+
+config SND_SST_PLATFORM
+	tristate
+
+config SND_SOC_COMMS_SSP
+	depends on SND_INTEL_MID_I2S
+	tristate "Use ASOC framework to drive AudioComms SSP BT and Modem"
+	help
+	  Sound SOC cards usually used for BT VOIP and MODEM MIXING use cases.
+	  This will add devices for these uses cases in the list of alsa cards.
+	  Say Y if you need these sound cards (BT chipset or Modem present).
+	  Requires to enable the INTEL_MID_I2S low level SSP I2S driver.
+
+config PRH_TEMP_WA_FOR_SPID
+	tristate "Workaround for PRh since SPID is unavailable"
+
+config SST_DPCM
+	bool "Use DPCM based Machine Audio driver"
+	default n
+	help
+	  This adds an option to enable the DPCM based machine driver
+
+config SND_SST_MACHINE
+	tristate
+
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
new file mode 100644
index 0000000..eae5b99
--- /dev/null
+++ b/sound/soc/intel/Makefile
@@ -0,0 +1,19 @@
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+ifeq (${TARGET_BUILD_VARIANT},$(filter ${TARGET_BUILD_VARIANT}, eng))
+ccflags-y += -DCONFIG_SND_VERBOSE_PRINTK -DCONFIG_SND_DEBUG -DCONFIG_SND_DEBUG_VERBOSE
+endif
+
+# SST Platform Driver
+PLATFORM_LIBS = platform-libs/controls_v1.o platform-libs/controls_v2.o platform-libs/controls_v2_dpcm.o \
+		platform-libs/ipc_lib_v2.o
+
+snd-soc-sst-platform-objs := pcm.o compress.o effects.o $(PLATFORM_LIBS)
+obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
+
+# Relevant Machine driver
+obj-$(CONFIG_SND_SST_MACHINE) += board/
+
+# DSP driver
+obj-$(CONFIG_SND_INTEL_SST) += sst/
diff --git a/sound/soc/intel/board/Makefile b/sound/soc/intel/board/Makefile
new file mode 100644
index 0000000..afe20c5
--- /dev/null
+++ b/sound/soc/intel/board/Makefile
@@ -0,0 +1,12 @@
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+# Merrifield board
+snd-merr-saltbay-lm49453-objs := merr_saltbay_lm49453.o
+snd-merr-saltbay-wm8958-objs := merr_saltbay_wm8958.o
+snd-merr-dpcm-wm8958-objs := merr_dpcm_wm8958.o
+
+# Moorefield board
+obj-$(CONFIG_SND_MOOR_DPCM) += snd-merr-dpcm-wm8958.o
+obj-$(CONFIG_SND_MOOR_MACHINE) += snd-merr-saltbay-wm8958.o
+
diff --git a/sound/soc/intel/board/merr_dpcm_wm8958.c b/sound/soc/intel/board/merr_dpcm_wm8958.c
new file mode 100644
index 0000000..3e07a85
--- /dev/null
+++ b/sound/soc/intel/board/merr_dpcm_wm8958.c
@@ -0,0 +1,1009 @@
+/*
+ *  merr_dpcm_wm8958.c - ASoc DPCM Machine driver for Intel Merrfield MID platform
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/platform_mrfld_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/input.h>
+#include <asm/intel-mid.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "../../codecs/wm8994.h"
+
+/* Codec PLL output clk rate */
+#define CODEC_SYSCLK_RATE			24576000
+/* Input clock to codec at MCLK1 PIN */
+#define CODEC_IN_MCLK1_RATE			19200000
+/* Input clock to codec at MCLK2 PIN */
+#define CODEC_IN_MCLK2_RATE			32768
+/*  define to select between MCLK1 and MCLK2 input to codec as its clock */
+#define CODEC_IN_MCLK1				1
+#define CODEC_IN_MCLK2				2
+
+/* Register address for OSC Clock */
+#define MERR_OSC_CLKOUT_CTRL0_REG_ADDR  0xFF00BC04
+/* Size of osc clock register */
+#define MERR_OSC_CLKOUT_CTRL0_REG_SIZE  4
+
+struct mrfld_8958_mc_private {
+	struct snd_soc_jack jack;
+	int jack_retry;
+	u8 pmic_id;
+	void __iomem    *osc_clk0_reg;
+};
+
+
+/* set_osc_clk0-	enable/disables the osc clock0
+ * addr:		address of the register to write to
+ * enable:		bool to enable or disable the clock
+ */
+static inline void set_soc_osc_clk0(void __iomem *addr, bool enable)
+{
+	u32 osc_clk_ctrl;
+
+	osc_clk_ctrl = readl(addr);
+	if (enable)
+		osc_clk_ctrl |= BIT(31);
+	else
+		osc_clk_ctrl &= ~(BIT(31));
+
+	pr_debug("%s: enable:%d val 0x%x\n", __func__, enable, osc_clk_ctrl);
+
+	writel(osc_clk_ctrl, addr);
+}
+
+static inline struct snd_soc_codec *mrfld_8958_get_codec(struct snd_soc_card *card)
+{
+	bool found = false;
+	struct snd_soc_codec *codec;
+
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		if (!strstr(codec->name, "wm8994-codec")) {
+			pr_debug("codec was %s", codec->name);
+			continue;
+		} else {
+			found = true;
+			break;
+		}
+	}
+	if (found == false) {
+		pr_warn("%s: cant find codec", __func__);
+		return NULL;
+	}
+	return codec;
+}
+
+/* TODO: find better way of doing this */
+static struct snd_soc_dai *find_codec_dai(struct snd_soc_card *card, const char *dai_name)
+{
+	int i;
+	for (i = 0; i < card->num_rtd; i++) {
+		if (!strcmp(card->rtd[i].codec_dai->name, dai_name))
+			return card->rtd[i].codec_dai;
+	}
+	pr_err("%s: unable to find codec dai\n", __func__);
+	/* this should never occur */
+	WARN_ON(1);
+	return NULL;
+}
+
+/* Function to switch the input clock for codec,  When audio is in
+ * progress input clock to codec will be through MCLK1 which is 19.2MHz
+ * while in off state input clock to codec will be through 32KHz through
+ * MCLK2
+ * card	: Sound card structure
+ * src	: Input clock source to codec
+ */
+static int mrfld_8958_set_codec_clk(struct snd_soc_card *card, int src)
+{
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	int ret;
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	switch (src) {
+	case CODEC_IN_MCLK1:
+		/* Turn ON the PLL to generate required sysclk rate
+		 * from MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai,
+			WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+			CODEC_IN_MCLK1_RATE, CODEC_SYSCLK_RATE);
+		if (ret < 0) {
+			pr_err("Failed to start FLL: %d\n", ret);
+			return ret;
+		}
+		/* Switch to MCLK1 input */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_FLL1,
+				CODEC_SYSCLK_RATE, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to set codec sysclk configuration %d\n",
+				 ret);
+			return ret;
+		}
+		break;
+	case CODEC_IN_MCLK2:
+		/* Switch to MCLK2 */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
+				32768, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to switch to MCLK2: %d", ret);
+			return ret;
+		}
+		/* Turn off PLL for MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai, WM8994_FLL1, 0, 0, 0);
+		if (ret < 0) {
+			pr_err("Failed to stop the FLL: %d", ret);
+			return ret;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mrfld_wm8958_set_clk_fmt(struct snd_soc_dai *codec_dai)
+{
+	unsigned int fmt;
+	int ret = 0;
+	struct snd_soc_card *card = codec_dai->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	/* Enable the osc clock at start so that it gets settling time */
+	set_soc_osc_clk0(ctx->osc_clk0_reg, true);
+
+	pr_err("setting snd_soc_dai_set_tdm_slot\n");
+	ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	/* FIXME: move this to SYS_CLOCK event handler when codec driver
+	 * dependency is clean.
+	 */
+	/* Switch to 19.2MHz MCLK1 input clock for codec */
+	ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK1);
+
+	return ret;
+}
+
+static int mrfld_8958_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	if (!strcmp(codec_dai->name, "wm8994-aif1"))
+		return mrfld_wm8958_set_clk_fmt(codec_dai);
+	return 0;
+}
+
+static int mrfld_wm8958_compr_set_params(struct snd_compr_stream *cstream)
+{
+	return 0;
+}
+
+static const struct snd_soc_pcm_stream mrfld_wm8958_dai_params = {
+	.formats = SNDRV_PCM_FMTBIT_S24_LE,
+	.rate_min = 48000,
+	.rate_max = 48000,
+	.channels_min = 2,
+	.channels_max = 2,
+};
+
+static int merr_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+			    struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+			SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+						SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	pr_debug("Invoked %s for dailink %s\n", __func__, rtd->dai_link->name);
+
+	/* The DSP will covert the FE rate to 48k, stereo, 24bits */
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = 2;
+
+	/* set SSP2 to 24-bit */
+	snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+				    SNDRV_PCM_HW_PARAM_FIRST_MASK],
+				    SNDRV_PCM_FORMAT_S24_LE);
+	return 0;
+}
+
+static int mrfld_8958_set_bias_level(struct snd_soc_card *card,
+				struct snd_soc_dapm_context *dapm,
+				enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	int ret = 0;
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+
+			ret = mrfld_wm8958_set_clk_fmt(aif1_dai);
+		break;
+	default:
+		break;
+	}
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+
+static int mrfld_8958_set_bias_level_post(struct snd_soc_card *card,
+		 struct snd_soc_dapm_context *dapm,
+		 enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+
+	switch (level) {
+	case SND_SOC_BIAS_STANDBY:
+		/* We are in stabdba down so */
+		/* Switch to 32KHz MCLK2 input clock for codec
+		 */
+		ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK2);
+		/* Turn off 19.2MHz soc osc clock */
+		set_soc_osc_clk0(ctx->osc_clk0_reg, false);
+		break;
+	default:
+		break;
+	}
+	card->dapm.bias_level = level;
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+
+#define PMIC_ID_ADDR		0x00
+#define PMIC_CHIP_ID_A0_VAL	0xC0
+
+static int mrfld_8958_set_vflex_vsel(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *k, int event)
+{
+#define VFLEXCNT		0xAB
+#define VFLEXVSEL_5V		0x01
+#define VFLEXVSEL_B0_VSYS_PT	0x80	/* B0: Vsys pass-through */
+#define VFLEXVSEL_A0_4P5V	0x41	/* A0: 4.5V */
+
+	struct snd_soc_dapm_context *dapm = w->dapm;
+	struct snd_soc_card *card = dapm->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	u8 vflexvsel, pmic_id = ctx->pmic_id;
+	int retval = 0;
+
+	pr_debug("%s: ON? %d\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+	vflexvsel = (pmic_id == PMIC_CHIP_ID_A0_VAL) ? VFLEXVSEL_A0_4P5V : VFLEXVSEL_B0_VSYS_PT;
+	pr_debug("pmic_id %#x vflexvsel %#x\n", pmic_id,
+		SND_SOC_DAPM_EVENT_ON(event) ? VFLEXVSEL_5V : vflexvsel);
+
+	/*FIXME: seems to be issue with bypass mode in MOOR, for now
+		force the bias off volate as VFLEXVSEL_5V */
+	vflexvsel = VFLEXVSEL_5V;
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		retval = intel_scu_ipc_iowrite8(VFLEXCNT, VFLEXVSEL_5V);
+	else if (SND_SOC_DAPM_EVENT_OFF(event))
+		retval = intel_scu_ipc_iowrite8(VFLEXCNT, vflexvsel);
+	if (retval)
+		pr_err("Error writing to VFLEXCNT register\n");
+
+	return retval;
+}
+
+static const struct snd_soc_dapm_widget widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_MIC("AMIC", NULL),
+	SND_SOC_DAPM_MIC("DMIC", NULL),
+	SND_SOC_DAPM_SUPPLY("VFLEXCNT", SND_SOC_NOPM, 0, 0,
+			mrfld_8958_set_vflex_vsel,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route map[] = {
+	{ "Headphones", NULL, "HPOUT1L" },
+	{ "Headphones", NULL, "HPOUT1R" },
+
+	/* saltbay uses 2 DMICs, other configs may use more so change below
+	 * accordingly
+	 */
+	{ "DMIC1DAT", NULL, "DMIC" },
+	{ "DMIC2DAT", NULL, "DMIC" },
+	/*{ "DMIC3DAT", NULL, "DMIC" },*/
+	/*{ "DMIC4DAT", NULL, "DMIC" },*/
+
+	/* MICBIAS2 is connected as Bias for AMIC so we link it
+	 * here. Also AMIC wires up to IN1LP pin.
+	 * DMIC is externally connected to 1.8V rail, so no link rqd.
+	 */
+	{ "AMIC", NULL, "MICBIAS2" },
+	{ "IN1LP", NULL, "AMIC" },
+
+	/* SWM map link the SWM outs to codec AIF */
+	{ "AIF1 Playback", NULL, "ssp2 Tx"},
+	{ "ssp2 Tx", NULL, "codec_out0"},
+	{ "ssp2 Tx", NULL, "codec_out1"},
+	{ "codec_in0", NULL, "ssp2 Rx" },
+	{ "codec_in1", NULL, "ssp2 Rx" },
+	{ "ssp2 Rx", NULL, "AIF1 Capture"},
+
+	{ "ssp0 Tx", NULL, "modem_out"},
+	{ "modem_in", NULL, "ssp0 Rx" },
+
+	{ "ssp1 Tx", NULL, "bt_fm_out"},
+	{ "bt_fm_in", NULL, "ssp1 Rx" },
+
+	{ "AIF1 Playback", NULL, "VFLEXCNT" },
+	{ "AIF1 Capture", NULL, "VFLEXCNT" },
+};
+
+static const struct wm8958_micd_rate micdet_rates[] = {
+	{ 32768,       true,  1, 4 },
+	{ 32768,       false, 1, 1 },
+	{ 44100 * 256, true,  7, 10 },
+	{ 44100 * 256, false, 7, 10 },
+};
+
+static void wm8958_custom_micd_set_rate(struct snd_soc_codec *codec)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
+	int best, i, sysclk, val;
+	bool idle;
+	const struct wm8958_micd_rate *rates;
+	int num_rates;
+
+	idle = !wm8994->jack_mic;
+
+	sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (sysclk & WM8994_SYSCLK_SRC)
+		sysclk = wm8994->aifclk[1];
+	else
+		sysclk = wm8994->aifclk[0];
+
+	if (control->pdata.micd_rates) {
+		rates = control->pdata.micd_rates;
+		num_rates = control->pdata.num_micd_rates;
+	} else {
+		rates = micdet_rates;
+		num_rates = ARRAY_SIZE(micdet_rates);
+	}
+
+	best = 0;
+	for (i = 0; i < num_rates; i++) {
+		if (rates[i].idle != idle)
+			continue;
+		if (abs(rates[i].sysclk - sysclk) <
+		    abs(rates[best].sysclk - sysclk))
+			best = i;
+		else if (rates[best].idle != idle)
+			best = i;
+	}
+
+	val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT
+		| rates[best].rate << WM8958_MICD_RATE_SHIFT;
+
+	dev_dbg(codec->dev, "MICD rate %d,%d for %dHz %s\n",
+		rates[best].start, rates[best].rate, sysclk,
+		idle ? "idle" : "active");
+
+	snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+			    WM8958_MICD_BIAS_STARTTIME_MASK |
+			    WM8958_MICD_RATE_MASK, val);
+}
+
+static void wm8958_custom_mic_id(void *data, u16 status)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "wm8958 custom mic id called with status %x\n",
+		status);
+
+	/* Either nothing present or just starting detection */
+	if (!(status & WM8958_MICD_STS)) {
+		/* If nothing present then clear our statuses */
+		dev_dbg(codec->dev, "Detected open circuit\n");
+
+		schedule_delayed_work(&wm8994->open_circuit_work,
+				      msecs_to_jiffies(2500));
+		return;
+	}
+
+	schedule_delayed_work(&wm8994->micd_set_custom_rate_work,
+		msecs_to_jiffies(wm8994->wm8994->pdata.micb_en_delay));
+
+	/* If the measurement is showing a high impedence we've got a
+	 * microphone.
+	 */
+	if (status & 0x600) {
+		dev_dbg(codec->dev, "Detected microphone\n");
+
+		wm8994->mic_detecting = false;
+		wm8994->jack_mic = true;
+		wm8994->headphone_detected = false;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET,
+				    SND_JACK_HEADSET);
+	}
+
+
+	if (status & 0xfc) {
+		dev_dbg(codec->dev, "Detected headphone\n");
+
+		/* Partial inserts of headsets with complete insert
+		 * after an indeterminate amount of time require
+		 * continouous micdetect enabled (until open circuit
+		 * or headset is detected)
+		 * */
+		wm8994->mic_detecting = true;
+		wm8994->jack_mic = false;
+		wm8994->headphone_detected = true;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
+				    SND_JACK_HEADSET);
+	}
+}
+
+static int mrfld_8958_init(struct snd_soc_pcm_runtime *runtime)
+{
+	int ret;
+	unsigned int fmt;
+	struct snd_soc_codec *codec;
+	struct snd_soc_card *card = runtime->card;
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	pr_debug("Entry %s\n", __func__);
+
+	ret = snd_soc_dai_set_tdm_slot(aif1_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(aif1_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	mrfld_8958_set_bias_level(card, &card->dapm, SND_SOC_BIAS_OFF);
+	card->dapm.idle_bias_off = true;
+
+	/* these pins are not used in SB config so mark as nc
+	 *
+	 * LINEOUT1, 2
+	 * IN1R
+	 * DMICDAT2
+	 */
+	snd_soc_dapm_nc_pin(&card->dapm, "DMIC2DAT");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(&card->dapm, "IN1RN");
+	snd_soc_dapm_nc_pin(&card->dapm, "IN1RP");
+
+	/* Force enable VMID to avoid cold latency constraints */
+	snd_soc_dapm_force_enable_pin(&card->dapm, "VMID");
+	snd_soc_dapm_sync(&card->dapm);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: we didnt find the codec pointer!\n", __func__);
+		return 0;
+	}
+
+	ctx->jack_retry = 0;
+	ret = snd_soc_jack_new(codec, "Intel MID Audio Jack",
+			       SND_JACK_HEADSET | SND_JACK_HEADPHONE |
+				SND_JACK_BTN_0 | SND_JACK_BTN_1,
+				&ctx->jack);
+	if (ret) {
+		pr_err("jack creation failed\n");
+		return ret;
+	}
+
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_1, KEY_MEDIA);
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+
+	snd_soc_update_bits(codec, WM8958_MICBIAS2, WM8958_MICB2_LVL_MASK,
+				WM8958_MICB2_LVL_2P6V << WM8958_MICB2_LVL_SHIFT);
+
+	wm8958_mic_detect(codec, &ctx->jack, NULL, NULL,
+			  wm8958_custom_mic_id, codec);
+
+	wm8958_micd_set_custom_rate(codec, wm8958_custom_micd_set_rate, codec);
+
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MUTE, 0);
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC2_FILTERS_1, WM8994_AIF1DAC2_MUTE, 0);
+
+	/* Micbias1 is always off, so for pm optimizations make sure the micbias1
+	 * discharge bit is set to floating to avoid discharge in disable state
+	 */
+	snd_soc_update_bits(codec, WM8958_MICBIAS1, WM8958_MICB1_DISCH, 0);
+
+	return 0;
+}
+
+static unsigned int rates_8000_16000[] = {
+	8000,
+	16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_8000_16000 = {
+	.count = ARRAY_SIZE(rates_8000_16000),
+	.list  = rates_8000_16000,
+};
+static unsigned int rates_48000[] = {
+	48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+	.count = ARRAY_SIZE(rates_48000),
+	.list  = rates_48000,
+};
+static int mrfld_8958_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_48000);
+}
+
+static struct snd_soc_ops mrfld_8958_ops = {
+	.startup = mrfld_8958_startup,
+};
+static int mrfld_8958_8k_16k_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_8000_16000);
+}
+
+static struct snd_soc_ops mrfld_8958_8k_16k_ops = {
+	.startup = mrfld_8958_8k_16k_startup,
+	.hw_params = mrfld_8958_hw_params,
+};
+
+static struct snd_soc_ops mrfld_8958_be_ssp2_ops = {
+	.hw_params = mrfld_8958_hw_params,
+};
+static struct snd_soc_compr_ops mrfld_compr_ops = {
+	.set_params = mrfld_wm8958_compr_set_params,
+};
+
+struct snd_soc_dai_link mrfld_8958_dpcm_msic_dailink[] = {
+	[MERR_DPCM_AUDIO] = {
+		.name = "Merrifield Audio Port",
+		.stream_name = "Saltbay Audio",
+		.cpu_dai_name = "Headset-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &mrfld_8958_ops,
+	},
+	[MERR_DPCM_DB] = {
+		.name = "Merrifield DB Audio Port",
+		.stream_name = "Deep Buffer Audio",
+		.cpu_dai_name = "Deepbuffer-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &mrfld_8958_ops,
+	},
+	[MERR_DPCM_LL] = {
+		.name = "Merrifield LL Audio Port",
+		.stream_name = "Low Latency Audio",
+		.cpu_dai_name = "Lowlatency-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &mrfld_8958_ops,
+	},
+	[MERR_DPCM_COMPR] = {
+		.name = "Merrifield Compress Port",
+		.stream_name = "Saltbay Compress",
+		.platform_name = "sst-platform",
+		.cpu_dai_name = "Compress-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dynamic = 1,
+		.init = mrfld_8958_init,
+		.compr_ops = &mrfld_compr_ops,
+	},
+	[MERR_DPCM_VOIP] = {
+		.name = "Merrifield VOIP Port",
+		.stream_name = "Saltbay Voip",
+		.cpu_dai_name = "Voip-cpu-dai",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+		.dynamic = 1,
+	},
+	[MERR_DPCM_PROBE] = {
+		.name = "Merrifield Probe Port",
+		.stream_name = "Saltbay Probe",
+		.cpu_dai_name = "Probe-cpu-dai",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.platform_name = "sst-platform",
+		.playback_count = 8,
+		.capture_count = 8,
+	},
+	/* CODEC<->CODEC link */
+	{
+		.name = "Merrifield Codec-Loop Port",
+		.stream_name = "Saltbay Codec-Loop",
+		.cpu_dai_name = "ssp2-port",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+						| SND_SOC_DAIFMT_CBS_CFS,
+		.params = &mrfld_wm8958_dai_params,
+		.dsp_loopback = true,
+	},
+	{
+		.name = "Merrifield Modem-Loop Port",
+		.stream_name = "Saltbay Modem-Loop",
+		.cpu_dai_name = "ssp0-port",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.params = &mrfld_wm8958_dai_params,
+		.dsp_loopback = true,
+	},
+	{
+		.name = "Merrifield BTFM-Loop Port",
+		.stream_name = "Saltbay BTFM-Loop",
+		.cpu_dai_name = "ssp1-port",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.params = &mrfld_wm8958_dai_params,
+		.dsp_loopback = true,
+	},
+
+	/* back ends */
+	{
+		.name = "SSP2-Codec",
+		.be_id = 1,
+		.cpu_dai_name = "ssp2-port",
+		.platform_name = "sst-platform",
+		.no_pcm = 1,
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.be_hw_params_fixup = merr_codec_fixup,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_be_ssp2_ops,
+	},
+	{
+		.name = "SSP1-BTFM",
+		.be_id = 2,
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "SSP0-Modem",
+		.be_id = 3,
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.ignore_suspend = 1,
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_mrfld_dpcm_8958_prepare(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
+
+	pr_debug("In %s\n", __func__);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: couldn't find the codec pointer!\n", __func__);
+		return -EAGAIN;
+	}
+
+	pr_debug("found codec %s\n", codec->name);
+	dapm = &codec->dapm;
+
+	snd_soc_dapm_disable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_suspend(dev);
+	return 0;
+}
+
+static void snd_mrfld_dpcm_8958_complete(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
+
+	pr_debug("In %s\n", __func__);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: couldn't find the codec pointer!\n", __func__);
+		return;
+	}
+
+	pr_debug("found codec %s\n", codec->name);
+	dapm = &codec->dapm;
+
+	snd_soc_dapm_force_enable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_resume(dev);
+	return;
+}
+
+static int snd_mrfld_dpcm_8958_poweroff(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_poweroff(dev);
+	return 0;
+}
+#else
+#define snd_mrfld_dpcm_8958_prepare NULL
+#define snd_mrfld_dpcm_8958_complete NULL
+#define snd_mrfld_dpcm_8958_poweroff NULL
+#endif
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mrfld = {
+	.name = "wm8958-audio",
+	.dai_link = mrfld_8958_dpcm_msic_dailink,
+	.num_links = ARRAY_SIZE(mrfld_8958_dpcm_msic_dailink),
+	.set_bias_level = mrfld_8958_set_bias_level,
+	.set_bias_level_post = mrfld_8958_set_bias_level_post,
+	.dapm_widgets = widgets,
+	.num_dapm_widgets = ARRAY_SIZE(widgets),
+	.dapm_routes = map,
+	.num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_mrfld_dpcm_8958_mc_probe(struct platform_device *pdev)
+{
+	int ret_val = 0;
+	struct mrfld_8958_mc_private *drv;
+
+	pr_debug("Entry %s\n", __func__);
+
+	drv = kzalloc(sizeof(*drv), GFP_ATOMIC);
+	if (!drv) {
+		pr_err("allocation failed\n");
+		return -ENOMEM;
+	}
+
+	/* ioremap the register */
+	drv->osc_clk0_reg = devm_ioremap_nocache(&pdev->dev,
+					MERR_OSC_CLKOUT_CTRL0_REG_ADDR,
+					MERR_OSC_CLKOUT_CTRL0_REG_SIZE);
+	if (!drv->osc_clk0_reg) {
+		pr_err("osc clk0 ctrl ioremap failed\n");
+		ret_val = -1;
+		goto unalloc;
+	}
+
+	ret_val = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &drv->pmic_id);
+	if (ret_val) {
+		pr_err("Error reading PMIC ID register\n");
+		goto unalloc;
+	}
+
+	/* register the soc card */
+	snd_soc_card_mrfld.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&snd_soc_card_mrfld, drv);
+	ret_val = snd_soc_register_card(&snd_soc_card_mrfld);
+	if (ret_val) {
+		pr_err("snd_soc_register_card failed %d\n", ret_val);
+		goto unalloc;
+	}
+	platform_set_drvdata(pdev, &snd_soc_card_mrfld);
+	pr_info("%s successful\n", __func__);
+	return ret_val;
+
+unalloc:
+	kfree(drv);
+	return ret_val;
+}
+
+static int snd_mrfld_dpcm_8958_mc_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+	struct mrfld_8958_mc_private *drv = snd_soc_card_get_drvdata(soc_card);
+
+	pr_debug("In %s\n", __func__);
+	kfree(drv);
+	snd_soc_card_set_drvdata(soc_card, NULL);
+	snd_soc_unregister_card(soc_card);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+const struct dev_pm_ops snd_mrfld_dpcm_8958_mc_pm_ops = {
+	.prepare = snd_mrfld_dpcm_8958_prepare,
+	.complete = snd_mrfld_dpcm_8958_complete,
+	.poweroff = snd_mrfld_dpcm_8958_poweroff,
+};
+
+static struct platform_driver snd_mrfld_dpcm_8958_mc_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mrfld_wm8958",
+		.pm = &snd_mrfld_dpcm_8958_mc_pm_ops,
+	},
+	.probe = snd_mrfld_dpcm_8958_mc_probe,
+	.remove = snd_mrfld_dpcm_8958_mc_remove,
+};
+
+static int snd_mrfld_dpcm_8958_driver_init(void)
+{
+	pr_info("Merrifield Machine Driver mrfld_wm8958 registerd\n");
+	return platform_driver_register(&snd_mrfld_dpcm_8958_mc_driver);
+}
+
+static void snd_mrfld_dpcm_8958_driver_exit(void)
+{
+	pr_debug("In %s\n", __func__);
+	platform_driver_unregister(&snd_mrfld_dpcm_8958_mc_driver);
+}
+
+static int snd_mrfld_dpcm_8958_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed snd_mrfld wm8958 rpmsg device\n");
+
+	ret = snd_mrfld_dpcm_8958_driver_init();
+
+out:
+	return ret;
+}
+
+static void snd_mrfld_dpcm_8958_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	snd_mrfld_dpcm_8958_driver_exit();
+	dev_info(&rpdev->dev, "Removed snd_mrfld wm8958 rpmsg device\n");
+}
+
+static void snd_mrfld_dpcm_8958_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+				int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+			data, len,  true);
+}
+
+static struct rpmsg_device_id snd_mrfld_dpcm_8958_rpmsg_id_table[] = {
+	{ .name = "rpmsg_mrfld_wm8958_audio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, snd_mrfld_dpcm_8958_rpmsg_id_table);
+
+static struct rpmsg_driver snd_mrfld_dpcm_8958_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= snd_mrfld_dpcm_8958_rpmsg_id_table,
+	.probe		= snd_mrfld_dpcm_8958_rpmsg_probe,
+	.callback	= snd_mrfld_dpcm_8958_rpmsg_cb,
+	.remove		= snd_mrfld_dpcm_8958_rpmsg_remove,
+};
+
+static int __init snd_mrfld_dpcm_8958_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&snd_mrfld_dpcm_8958_rpmsg);
+}
+late_initcall(snd_mrfld_dpcm_8958_rpmsg_init);
+
+static void __exit snd_mrfld_dpcm_8958_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&snd_mrfld_dpcm_8958_rpmsg);
+}
+module_exit(snd_mrfld_dpcm_8958_rpmsg_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Merrifield MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mrfld_wm8958");
diff --git a/sound/soc/intel/board/merr_saltbay_wm8958.c b/sound/soc/intel/board/merr_saltbay_wm8958.c
new file mode 100644
index 0000000..1a852dd
--- /dev/null
+++ b/sound/soc/intel/board/merr_saltbay_wm8958.c
@@ -0,0 +1,897 @@
+/*
+ *  merr_saltbay_wm8958.c - ASoc Machine driver for Intel Merrfield MID platform
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/platform_mrfld_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/input.h>
+#include <asm/intel-mid.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "../../codecs/wm8994.h"
+
+/* Codec PLL output clk rate */
+#define CODEC_SYSCLK_RATE			24576000
+/* Input clock to codec at MCLK1 PIN */
+#define CODEC_IN_MCLK1_RATE			19200000
+/* Input clock to codec at MCLK2 PIN */
+#define CODEC_IN_MCLK2_RATE			32768
+/*  define to select between MCLK1 and MCLK2 input to codec as its clock */
+#define CODEC_IN_MCLK1				1
+#define CODEC_IN_MCLK2				2
+
+/* Register address for OSC Clock */
+#define MERR_OSC_CLKOUT_CTRL0_REG_ADDR  0xFF00BC04
+/* Size of osc clock register */
+#define MERR_OSC_CLKOUT_CTRL0_REG_SIZE  4
+
+struct mrfld_8958_mc_private {
+	struct snd_soc_jack jack;
+	int jack_retry;
+	void __iomem    *osc_clk0_reg;
+	int spk_gpio;
+};
+
+
+/* set_osc_clk0-	enable/disables the osc clock0
+ * addr:		address of the register to write to
+ * enable:		bool to enable or disable the clock
+ */
+static inline void set_soc_osc_clk0(void __iomem *addr, bool enable)
+{
+	u32 osc_clk_ctrl;
+
+	osc_clk_ctrl = readl(addr);
+	if (enable)
+		osc_clk_ctrl |= BIT(31);
+	else
+		osc_clk_ctrl &= ~(BIT(31));
+
+	pr_debug("%s: enable:%d val 0x%x\n", __func__, enable, osc_clk_ctrl);
+
+	writel(osc_clk_ctrl, addr);
+}
+
+
+static inline struct snd_soc_codec *mrfld_8958_get_codec(struct snd_soc_card *card)
+{
+	bool found = false;
+	struct snd_soc_codec *codec;
+
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		if (!strstr(codec->name, "wm8994-codec")) {
+			pr_debug("codec was %s", codec->name);
+			continue;
+		} else {
+			found = true;
+			break;
+		}
+	}
+	if (found == false) {
+		pr_err("%s: cant find codec", __func__);
+		return NULL;
+	}
+	return codec;
+}
+
+/* Function to switch the input clock for codec,  When audio is in
+ * progress input clock to codec will be through MCLK1 which is 19.2MHz
+ * while in off state input clock to codec will be through 32KHz through
+ * MCLK2
+ * card	: Sound card structure
+ * src	: Input clock source to codec
+ */
+static int mrfld_8958_set_codec_clk(struct snd_soc_card *card, int src)
+{
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	int ret;
+
+	switch (src) {
+	case CODEC_IN_MCLK1:
+		/* Turn ON the PLL to generate required sysclk rate
+		 * from MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai,
+			WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+			CODEC_IN_MCLK1_RATE, CODEC_SYSCLK_RATE);
+		if (ret < 0) {
+			pr_err("Failed to start FLL: %d\n", ret);
+			return ret;
+		}
+		/* Switch to MCLK1 input */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_FLL1,
+				CODEC_SYSCLK_RATE, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to set codec sysclk configuration %d\n",
+				 ret);
+			return ret;
+		}
+		break;
+	case CODEC_IN_MCLK2:
+		/* Switch to MCLK2 */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
+				32768, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to switch to MCLK2: %d", ret);
+			return ret;
+		}
+		/* Turn off PLL for MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai, WM8994_FLL1, 0, 0, 0);
+		if (ret < 0) {
+			pr_err("Failed to stop the FLL: %d", ret);
+			return ret;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mrfld_wm8958_set_clk_fmt(struct snd_soc_dai *codec_dai)
+{
+	unsigned int fmt;
+	int ret = 0;
+	struct snd_soc_card *card = codec_dai->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	/* Enable the osc clock at start so that it gets settling time */
+	set_soc_osc_clk0(ctx->osc_clk0_reg, true);
+
+	ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	/* FIXME: move this to SYS_CLOCK event handler when codec driver
+	 * dependency is clean.
+	 */
+	/* Switch to 19.2MHz MCLK1 input clock for codec */
+	ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK1);
+
+	return ret;
+}
+
+static int mrfld_8958_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	return mrfld_wm8958_set_clk_fmt(codec_dai);
+}
+
+static int mrfld_wm8958_compr_set_params(struct snd_compr_stream *cstream)
+{
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	return mrfld_wm8958_set_clk_fmt(codec_dai);
+}
+static int mrfld_8958_set_bias_level(struct snd_soc_card *card,
+		struct snd_soc_dapm_context *dapm,
+		enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	int ret = 0;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+
+			ret = mrfld_wm8958_set_clk_fmt(aif1_dai);
+		break;
+	default:
+		break;
+	}
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+static int mrfld_8958_set_bias_level_post(struct snd_soc_card *card,
+		 struct snd_soc_dapm_context *dapm,
+		 enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+
+	switch (level) {
+	case SND_SOC_BIAS_STANDBY:
+		/* We are in stabdba down so */
+		/* Switch to 32KHz MCLK2 input clock for codec
+		 */
+		ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK2);
+		/* Turn off 19.2MHz soc osc clock */
+		set_soc_osc_clk0(ctx->osc_clk0_reg, false);
+		break;
+	default:
+		break;
+	}
+	card->dapm.bias_level = level;
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+
+static int mrfld_8958_set_spk_boost(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *k, int event)
+{
+	struct snd_soc_dapm_context *dapm = w->dapm;
+	struct snd_soc_card *card = dapm->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	pr_debug("%s: ON? %d\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		gpio_set_value((unsigned)ctx->spk_gpio, 1);
+	else if (SND_SOC_DAPM_EVENT_OFF(event))
+		gpio_set_value((unsigned)ctx->spk_gpio, 0);
+
+	return ret;
+}
+
+static const struct snd_soc_dapm_widget widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_MIC("AMIC", NULL),
+	SND_SOC_DAPM_MIC("DMIC", NULL),
+};
+static const struct snd_soc_dapm_widget spk_boost_widget[] = {
+	/* DAPM route is added only for Moorefield */
+	SND_SOC_DAPM_SUPPLY("SPK_BOOST", SND_SOC_NOPM, 0, 0,
+			mrfld_8958_set_spk_boost,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route map[] = {
+	{ "Headphones", NULL, "HPOUT1L" },
+	{ "Headphones", NULL, "HPOUT1R" },
+
+	/* saltbay uses 2 DMICs, other configs may use more so change below
+	 * accordingly
+	 */
+	{ "DMIC1DAT", NULL, "DMIC" },
+	{ "DMIC2DAT", NULL, "DMIC" },
+	/*{ "DMIC3DAT", NULL, "DMIC" },*/
+	/*{ "DMIC4DAT", NULL, "DMIC" },*/
+
+	/* MICBIAS2 is connected as Bias for AMIC so we link it
+	 * here. Also AMIC wires up to IN1LP pin.
+	 * DMIC is externally connected to 1.8V rail, so no link rqd.
+	 */
+	{ "AMIC", NULL, "MICBIAS2" },
+	{ "IN1LP", NULL, "AMIC" },
+
+	/* SWM map link the SWM outs to codec AIF */
+	{ "AIF1DAC1L", NULL, "Codec OUT0"  },
+	{ "AIF1DAC1R", NULL, "Codec OUT0"  },
+	{ "AIF1DAC2L", NULL, "Codec OUT1"  },
+	{ "AIF1DAC2R", NULL, "Codec OUT1"  },
+	{ "Codec IN0", NULL, "AIF1ADC1L" },
+	{ "Codec IN0", NULL, "AIF1ADC1R" },
+	{ "Codec IN1", NULL, "AIF1ADC1L" },
+	{ "Codec IN1", NULL, "AIF1ADC1R" },
+};
+
+static const struct snd_soc_dapm_route mofd_spk_boost_map[] = {
+	{"SPKOUTLP", NULL, "SPK_BOOST"},
+	{"SPKOUTLN", NULL, "SPK_BOOST"},
+	{"SPKOUTRP", NULL, "SPK_BOOST"},
+	{"SPKOUTRN", NULL, "SPK_BOOST"},
+};
+
+static const struct wm8958_micd_rate micdet_rates[] = {
+	{ 32768,       true,  1, 4 },
+	{ 32768,       false, 1, 1 },
+	{ 44100 * 256, true,  7, 10 },
+	{ 44100 * 256, false, 7, 10 },
+};
+
+static void wm8958_custom_micd_set_rate(struct snd_soc_codec *codec)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
+	int best, i, sysclk, val;
+	bool idle;
+	const struct wm8958_micd_rate *rates;
+	int num_rates;
+
+	idle = !wm8994->jack_mic;
+
+	sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (sysclk & WM8994_SYSCLK_SRC)
+		sysclk = wm8994->aifclk[1];
+	else
+		sysclk = wm8994->aifclk[0];
+
+	if (control->pdata.micd_rates) {
+		rates = control->pdata.micd_rates;
+		num_rates = control->pdata.num_micd_rates;
+	} else {
+		rates = micdet_rates;
+		num_rates = ARRAY_SIZE(micdet_rates);
+	}
+
+	best = 0;
+	for (i = 0; i < num_rates; i++) {
+		if (rates[i].idle != idle)
+			continue;
+		if (abs(rates[i].sysclk - sysclk) <
+		    abs(rates[best].sysclk - sysclk))
+			best = i;
+		else if (rates[best].idle != idle)
+			best = i;
+	}
+
+	val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT
+		| rates[best].rate << WM8958_MICD_RATE_SHIFT;
+
+	dev_dbg(codec->dev, "MICD rate %d,%d for %dHz %s\n",
+		rates[best].start, rates[best].rate, sysclk,
+		idle ? "idle" : "active");
+
+	snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+			    WM8958_MICD_BIAS_STARTTIME_MASK |
+			    WM8958_MICD_RATE_MASK, val);
+}
+
+static void wm8958_custom_mic_id(void *data, u16 status)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "wm8958 custom mic id called with status %x\n",
+		status);
+
+	/* Either nothing present or just starting detection */
+	if (!(status & WM8958_MICD_STS)) {
+		/* If nothing present then clear our statuses */
+		dev_dbg(codec->dev, "Detected open circuit\n");
+
+		schedule_delayed_work(&wm8994->open_circuit_work,
+				      msecs_to_jiffies(2500));
+		return;
+	}
+
+	schedule_delayed_work(&wm8994->micd_set_custom_rate_work,
+		msecs_to_jiffies(wm8994->wm8994->pdata.micb_en_delay));
+
+	/* If the measurement is showing a high impedence we've got a
+	 * microphone.
+	 */
+	if (status & 0x600) {
+		dev_dbg(codec->dev, "Detected microphone\n");
+
+		wm8994->mic_detecting = false;
+		wm8994->jack_mic = true;
+		wm8994->headphone_detected = false;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET,
+				    SND_JACK_HEADSET);
+	}
+
+
+	if (status & 0xfc) {
+		dev_dbg(codec->dev, "Detected headphone\n");
+
+		/* Partial inserts of headsets with complete insert
+		 * after an indeterminate amount of time require
+		 * continouous micdetect enabled (until open circuit
+		 * or headset is detected)
+		 * */
+		wm8994->mic_detecting = true;
+
+		wm8994->jack_mic = false;
+		wm8994->headphone_detected = true;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
+				    SND_JACK_HEADSET);
+	}
+}
+
+static int mrfld_8958_init(struct snd_soc_pcm_runtime *runtime)
+{
+	int ret;
+	unsigned int fmt;
+	struct snd_soc_codec *codec = runtime->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_card *card = runtime->card;
+	struct snd_soc_dapm_context *card_dapm = &card->dapm;
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	pr_debug("Entry %s\n", __func__);
+
+	ret = snd_soc_dai_set_tdm_slot(aif1_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(aif1_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	mrfld_8958_set_bias_level(card, dapm, SND_SOC_BIAS_OFF);
+	card->dapm.idle_bias_off = true;
+
+	/* these pins are not used in SB config so mark as nc
+	 *
+	 * LINEOUT1, 2
+	 * IN1R
+	 * DMICDAT2
+	 */
+	snd_soc_dapm_nc_pin(dapm, "DMIC2DAT");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "IN1RN");
+	snd_soc_dapm_nc_pin(dapm, "IN1RP");
+
+	if (ctx->spk_gpio >= 0) {
+		snd_soc_dapm_new_controls(card_dapm, spk_boost_widget,
+					ARRAY_SIZE(spk_boost_widget));
+		snd_soc_dapm_add_routes(card_dapm, mofd_spk_boost_map,
+				ARRAY_SIZE(mofd_spk_boost_map));
+	}
+	/* Force enable VMID to avoid cold latency constraints */
+	snd_soc_dapm_force_enable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	ctx->jack_retry = 0;
+	ret = snd_soc_jack_new(codec, "Intel MID Audio Jack",
+			       SND_JACK_HEADSET | SND_JACK_HEADPHONE |
+				SND_JACK_BTN_0 | SND_JACK_BTN_1,
+				&ctx->jack);
+	if (ret) {
+		pr_err("jack creation failed\n");
+		return ret;
+	}
+
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_1, KEY_MEDIA);
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+
+	snd_soc_update_bits(codec, WM8958_MICBIAS2, WM8958_MICB2_LVL_MASK,
+				WM8958_MICB2_LVL_2P6V << WM8958_MICB2_LVL_SHIFT);
+
+	wm8958_mic_detect(codec, &ctx->jack, NULL, NULL,
+			  wm8958_custom_mic_id, codec);
+
+	wm8958_micd_set_custom_rate(codec, wm8958_custom_micd_set_rate, codec);
+
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MUTE, 0);
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC2_FILTERS_1, WM8994_AIF1DAC2_MUTE, 0);
+
+	/* Micbias1 is always off, so for pm optimizations make sure the micbias1
+	 * discharge bit is set to floating to avoid discharge in disable state
+	 */
+	snd_soc_update_bits(codec, WM8958_MICBIAS1, WM8958_MICB1_DISCH, 0);
+
+	return 0;
+}
+
+static unsigned int rates_8000_16000[] = {
+	8000,
+	16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_8000_16000 = {
+	.count = ARRAY_SIZE(rates_8000_16000),
+	.list  = rates_8000_16000,
+};
+
+static unsigned int rates_48000[] = {
+	48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+	.count = ARRAY_SIZE(rates_48000),
+	.list  = rates_48000,
+};
+
+static int mrfld_8958_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_48000);
+}
+
+static struct snd_soc_ops mrfld_8958_ops = {
+	.startup = mrfld_8958_startup,
+	.hw_params = mrfld_8958_hw_params,
+};
+
+static int mrfld_8958_8k_16k_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_8000_16000);
+}
+
+static struct snd_soc_ops mrfld_8958_8k_16k_ops = {
+	.startup = mrfld_8958_8k_16k_startup,
+	.hw_params = mrfld_8958_hw_params,
+};
+
+static struct snd_soc_compr_ops mrfld_compr_ops = {
+	.set_params = mrfld_wm8958_compr_set_params,
+};
+
+struct snd_soc_dai_link mrfld_8958_msic_dailink[] = {
+	[MERR_SALTBAY_AUDIO] = {
+		.name = "Merrifield Audio Port",
+		.stream_name = "Audio",
+		.cpu_dai_name = "Headset-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_ops,
+		.playback_count = 3,
+	},
+	[MERR_SALTBAY_COMPR] = {
+		.name = "Merrifield Compress Port",
+		.stream_name = "Compress",
+		.platform_name = "sst-platform",
+		.cpu_dai_name = "Compress-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.compr_ops = &mrfld_compr_ops,
+	},
+	[MERR_SALTBAY_VOIP] = {
+		.name = "Merrifield VOIP Port",
+		.stream_name = "Voip",
+		.cpu_dai_name = "Voip-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+	},
+	[MERR_SALTBAY_PROBE] = {
+		.name = "Merrifield Probe Port",
+		.stream_name = "Probe",
+		.cpu_dai_name = "Probe-cpu-dai",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.platform_name = "sst-platform",
+		.playback_count = 8,
+		.capture_count = 8,
+	},
+	[MERR_SALTBAY_AWARE] = {
+		.name = "Merrifield Aware Port",
+		.stream_name = "Aware",
+		.cpu_dai_name = "Loopback-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+	},
+	[MERR_SALTBAY_VAD] = {
+		.name = "Merrifield VAD Port",
+		.stream_name = "Vad",
+		.cpu_dai_name = "Loopback-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+	},
+	[MERR_SALTBAY_POWER] = {
+		.name = "Virtual Power Port",
+		.stream_name = "Power",
+		.cpu_dai_name = "Power-cpu-dai",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_mrfld_8958_prepare(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
+
+	pr_debug("In %s\n", __func__);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: couldn't find the codec pointer!\n", __func__);
+		return -EAGAIN;
+	}
+
+	pr_debug("found codec %s\n", codec->name);
+	dapm = &codec->dapm;
+
+	snd_soc_dapm_disable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_suspend(dev);
+	return 0;
+}
+
+static void snd_mrfld_8958_complete(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
+
+	pr_debug("In %s\n", __func__);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: couldn't find the codec pointer!\n", __func__);
+		return;
+	}
+
+	pr_debug("found codec %s\n", codec->name);
+	dapm = &codec->dapm;
+
+	snd_soc_dapm_force_enable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_resume(dev);
+	return;
+}
+
+static int snd_mrfld_8958_poweroff(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_poweroff(dev);
+	return 0;
+}
+#else
+#define snd_mrfld_8958_prepare NULL
+#define snd_mrfld_8958_complete NULL
+#define snd_mrfld_8958_poweroff NULL
+#endif
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mrfld = {
+	.name = "wm8958-audio",
+	.dai_link = mrfld_8958_msic_dailink,
+	.num_links = ARRAY_SIZE(mrfld_8958_msic_dailink),
+	.set_bias_level = mrfld_8958_set_bias_level,
+	.set_bias_level_post = mrfld_8958_set_bias_level_post,
+	.dapm_widgets = widgets,
+	.num_dapm_widgets = ARRAY_SIZE(widgets),
+	.dapm_routes = map,
+	.num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_mrfld_8958_config_gpio(struct platform_device *pdev,
+					struct mrfld_8958_mc_private *drv)
+{
+	int ret = 0;
+
+	if (drv->spk_gpio >= 0) {
+		/* Set GPIO as output and init it with high value. So
+		 * spk boost is disable by default */
+		ret = devm_gpio_request_one(&pdev->dev, (unsigned)drv->spk_gpio,
+				GPIOF_INIT_LOW, "spk_boost");
+		if (ret) {
+			pr_err("GPIO request failed\n");
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static int snd_mrfld_8958_mc_probe(struct platform_device *pdev)
+{
+	int ret_val = 0;
+	struct mrfld_8958_mc_private *drv;
+	struct mrfld_audio_platform_data *mrfld_audio_pdata = pdev->dev.platform_data;
+
+	pr_debug("Entry %s\n", __func__);
+
+	if (!mrfld_audio_pdata) {
+		pr_err("Platform data not provided\n");
+		return -EINVAL;
+	}
+	drv = kzalloc(sizeof(*drv), GFP_ATOMIC);
+	if (!drv) {
+		pr_err("allocation failed\n");
+		return -ENOMEM;
+	}
+	drv->spk_gpio = mrfld_audio_pdata->spk_gpio;
+	ret_val = snd_mrfld_8958_config_gpio(pdev, drv);
+	if (ret_val) {
+		pr_err("GPIO configuration failed\n");
+		goto unalloc;
+	}
+	/* ioremap the register */
+	drv->osc_clk0_reg = devm_ioremap_nocache(&pdev->dev,
+					MERR_OSC_CLKOUT_CTRL0_REG_ADDR,
+					MERR_OSC_CLKOUT_CTRL0_REG_SIZE);
+	if (!drv->osc_clk0_reg) {
+		pr_err("osc clk0 ctrl ioremap failed\n");
+		ret_val = -1;
+		goto unalloc;
+	}
+
+	/* register the soc card */
+	snd_soc_card_mrfld.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&snd_soc_card_mrfld, drv);
+	ret_val = snd_soc_register_card(&snd_soc_card_mrfld);
+	if (ret_val) {
+		pr_err("snd_soc_register_card failed %d\n", ret_val);
+		goto unalloc;
+	}
+	platform_set_drvdata(pdev, &snd_soc_card_mrfld);
+	pr_info("%s successful\n", __func__);
+	return ret_val;
+
+unalloc:
+	kfree(drv);
+	return ret_val;
+}
+
+static int snd_mrfld_8958_mc_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+	struct mrfld_8958_mc_private *drv = snd_soc_card_get_drvdata(soc_card);
+
+	pr_debug("In %s\n", __func__);
+	kfree(drv);
+	snd_soc_card_set_drvdata(soc_card, NULL);
+	snd_soc_unregister_card(soc_card);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+const struct dev_pm_ops snd_mrfld_8958_mc_pm_ops = {
+	.prepare = snd_mrfld_8958_prepare,
+	.complete = snd_mrfld_8958_complete,
+	.poweroff = snd_mrfld_8958_poweroff,
+};
+
+static struct platform_driver snd_mrfld_8958_mc_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mrfld_wm8958",
+		.pm = &snd_mrfld_8958_mc_pm_ops,
+	},
+	.probe = snd_mrfld_8958_mc_probe,
+	.remove = snd_mrfld_8958_mc_remove,
+};
+
+static int snd_mrfld_8958_driver_init(void)
+{
+	pr_info("Merrifield Machine Driver mrfld_wm8958 registerd\n");
+	return platform_driver_register(&snd_mrfld_8958_mc_driver);
+}
+
+static void snd_mrfld_8958_driver_exit(void)
+{
+	pr_debug("In %s\n", __func__);
+	platform_driver_unregister(&snd_mrfld_8958_mc_driver);
+}
+
+static int snd_mrfld_8958_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed snd_mrfld wm8958 rpmsg device\n");
+
+	ret = snd_mrfld_8958_driver_init();
+
+out:
+	return ret;
+}
+
+static void snd_mrfld_8958_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	snd_mrfld_8958_driver_exit();
+	dev_info(&rpdev->dev, "Removed snd_mrfld wm8958 rpmsg device\n");
+}
+
+static void snd_mrfld_8958_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+				int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+			data, len,  true);
+}
+
+static struct rpmsg_device_id snd_mrfld_8958_rpmsg_id_table[] = {
+	{ .name = "rpmsg_mrfld_wm8958_audio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, snd_mrfld_8958_rpmsg_id_table);
+
+static struct rpmsg_driver snd_mrfld_8958_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= snd_mrfld_8958_rpmsg_id_table,
+	.probe		= snd_mrfld_8958_rpmsg_probe,
+	.callback	= snd_mrfld_8958_rpmsg_cb,
+	.remove		= snd_mrfld_8958_rpmsg_remove,
+};
+
+static int __init snd_mrfld_8958_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+late_initcall(snd_mrfld_8958_rpmsg_init);
+
+static void __exit snd_mrfld_8958_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+module_exit(snd_mrfld_8958_rpmsg_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Merrifield MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mrfld_wm8958");
diff --git a/sound/soc/intel/compress.c b/sound/soc/intel/compress.c
new file mode 100644
index 0000000..f067468
--- /dev/null
+++ b/sound/soc/intel/compress.c
@@ -0,0 +1,254 @@
+/*
+ *  compress.c - Intel MID Platform driver for Compress stream operations
+ *
+ *  Copyright (C) 2010-2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/intel_sst_ioctl.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+static void sst_compr_fragment_elapsed(void *arg)
+{
+	struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
+
+	pr_debug("fragment elapsed by driver\n");
+	if (cstream)
+		snd_compr_fragment_elapsed(cstream);
+}
+
+static void sst_drain_notify(void *arg)
+{
+	struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
+
+	pr_debug("drain notify by driver\n");
+	if (cstream)
+		snd_compr_drain_notify(cstream);
+}
+
+static int sst_platform_compr_open(struct snd_compr_stream *cstream)
+{
+
+	int ret_val = 0;
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct sst_runtime_stream *stream;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	pr_debug("%s called:%s\n", __func__, dai_link->cpu_dai_name);
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (!stream)
+		return -ENOMEM;
+
+	spin_lock_init(&stream->status_lock);
+
+	/* get the sst ops */
+	if (!sst_dsp || !try_module_get(sst_dsp->dev->driver->owner)) {
+		pr_err("no device available to run\n");
+		ret_val = -ENODEV;
+		goto out_ops;
+	}
+	stream->compr_ops = sst_dsp->compr_ops;
+
+	stream->id = 0;
+	sst_set_stream_status(stream, SST_PLATFORM_INIT);
+	runtime->private_data = stream;
+	return 0;
+out_ops:
+	kfree(stream);
+	return ret_val;
+}
+
+static int sst_platform_compr_free(struct snd_compr_stream *cstream)
+{
+	struct sst_runtime_stream *stream;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+	int ret_val = 0, str_id;
+
+	stream = cstream->runtime->private_data;
+	/*need to check*/
+	str_id = stream->id;
+	if (str_id)
+		ret_val = stream->compr_ops->close(str_id);
+	module_put(sst_dsp->dev->driver->owner);
+	kfree(stream);
+	pr_debug("%s called for dai %s: ret = %d\n", __func__,
+				dai_link->cpu_dai_name, ret_val);
+	return 0;
+}
+
+static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
+					struct snd_compr_params *params)
+{
+	struct sst_runtime_stream *stream;
+	int retval = 0;
+	struct snd_sst_params str_params;
+	struct sst_compress_cb cb;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_platform *platform = rtd->platform;
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("In function %s\n", __func__);
+	stream = cstream->runtime->private_data;
+	/* construct fw structure for this*/
+	memset(&str_params, 0, sizeof(str_params));
+
+	/* fill the device type and stream id to pass to SST driver */
+	retval = sst_fill_stream_params(cstream, ctx, &str_params, true);
+	pr_debug("compr_set_params: fill stream params ret_val = 0x%x\n", retval);
+	if (retval < 0)
+		return retval;
+
+	switch (params->codec.id) {
+	case SND_AUDIOCODEC_MP3: {
+		str_params.codec = SST_CODEC_TYPE_MP3;
+		str_params.sparams.uc.mp3_params.num_chan = params->codec.ch_in;
+		str_params.sparams.uc.mp3_params.pcm_wd_sz = 16;
+		break;
+	}
+
+	case SND_AUDIOCODEC_AAC: {
+		str_params.codec = SST_CODEC_TYPE_AAC;
+		str_params.sparams.uc.aac_params.num_chan = params->codec.ch_in;
+		str_params.sparams.uc.aac_params.pcm_wd_sz = 16;
+		if (params->codec.format == SND_AUDIOSTREAMFORMAT_MP4ADTS)
+			str_params.sparams.uc.aac_params.bs_format =
+							AAC_BIT_STREAM_ADTS;
+		else if (params->codec.format == SND_AUDIOSTREAMFORMAT_RAW)
+			str_params.sparams.uc.aac_params.bs_format =
+							AAC_BIT_STREAM_RAW;
+		else {
+			pr_err("Undefined format%d\n", params->codec.format);
+			return -EINVAL;
+		}
+		str_params.sparams.uc.aac_params.externalsr =
+						params->codec.sample_rate;
+		break;
+	}
+
+	default:
+		pr_err("codec not supported, id =%d\n", params->codec.id);
+		return -EINVAL;
+	}
+
+	str_params.aparams.ring_buf_info[0].addr  =
+					virt_to_phys(cstream->runtime->buffer);
+	str_params.aparams.ring_buf_info[0].size =
+					cstream->runtime->buffer_size;
+	str_params.aparams.sg_count = 1;
+	str_params.aparams.frag_size = cstream->runtime->fragment_size;
+
+	cb.param = cstream;
+	cb.compr_cb = sst_compr_fragment_elapsed;
+	cb.drain_cb_param = cstream;
+	cb.drain_notify = sst_drain_notify;
+
+	retval = stream->compr_ops->open(&str_params, &cb);
+	if (retval < 0) {
+		pr_err("stream allocation failed %d\n", retval);
+		return retval;
+	}
+
+	stream->id = retval;
+	return 0;
+}
+
+static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+{
+	struct sst_runtime_stream *stream =
+		cstream->runtime->private_data;
+
+	return stream->compr_ops->control(cmd, stream->id);
+}
+
+static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
+					struct snd_compr_tstamp *tstamp)
+{
+	struct sst_runtime_stream *stream;
+
+	stream  = cstream->runtime->private_data;
+	stream->compr_ops->tstamp(stream->id, tstamp);
+	tstamp->byte_offset = tstamp->copied_total %
+				 (u32)cstream->runtime->buffer_size;
+	pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset);
+	return 0;
+}
+
+static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
+					size_t bytes)
+{
+	struct sst_runtime_stream *stream;
+
+	stream  = cstream->runtime->private_data;
+	stream->compr_ops->ack(stream->id, (unsigned long)bytes);
+	stream->bytes_written += bytes;
+
+	return 0;
+}
+
+static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
+					struct snd_compr_caps *caps)
+{
+	struct sst_runtime_stream *stream =
+		cstream->runtime->private_data;
+
+	return stream->compr_ops->get_caps(caps);
+}
+
+static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
+					struct snd_compr_codec_caps *codec)
+{
+	struct sst_runtime_stream *stream =
+		cstream->runtime->private_data;
+
+	return stream->compr_ops->get_codec_caps(codec);
+}
+
+static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
+					struct snd_compr_metadata *metadata)
+{
+	struct sst_runtime_stream *stream  =
+		 cstream->runtime->private_data;
+
+	return stream->compr_ops->set_metadata(stream->id, metadata);
+}
+
+struct snd_compr_ops sst_platform_compr_ops = {
+
+	.open = sst_platform_compr_open,
+	.free = sst_platform_compr_free,
+	.set_params = sst_platform_compr_set_params,
+	.set_metadata = sst_platform_compr_set_metadata,
+	.trigger = sst_platform_compr_trigger,
+	.pointer = sst_platform_compr_pointer,
+	.ack = sst_platform_compr_ack,
+	.get_caps = sst_platform_compr_get_caps,
+	.get_codec_caps = sst_platform_compr_get_codec_caps,
+};
diff --git a/sound/soc/intel/effects.c b/sound/soc/intel/effects.c
new file mode 100644
index 0000000..d74775d
--- /dev/null
+++ b/sound/soc/intel/effects.c
@@ -0,0 +1,407 @@
+/*
+ *  effects.c - platform file for effects interface
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Samreen Nilofer <samreen.nilofer@intel.com>
+ *		Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include <linux/slab.h>
+#include <asm/platform_sst_audio.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+extern struct sst_device *sst_dsp;
+extern struct device *sst_pdev;
+
+struct effect_uuid {
+	uint32_t timeLow;
+	uint16_t timeMid;
+	uint16_t timeHiAndVersion;
+	uint16_t clockSeq;
+	uint8_t node[6];
+};
+
+#define EFFECT_STRING_LEN_MAX 64
+
+enum sst_effect {
+	EFFECTS_CREATE = 0,
+	EFFECTS_DESTROY,
+	EFFECTS_SET_PARAMS,
+	EFFECTS_GET_PARAMS,
+};
+
+enum sst_mixer_output_mode {
+	SST_MEDIA0_OUT,
+	SST_MEDIA1_OUT,
+};
+
+static inline void sst_fill_byte_stream(struct snd_sst_bytes_v2 *bytes, u8 type,
+			u8 msg, u8 block, u8 task, u8 pipe_id, u16 len,
+			struct ipc_effect_payload *payload)
+{
+	u32 size = sizeof(struct ipc_effect_dsp_hdr);
+
+	bytes->type = type;
+	bytes->ipc_msg = msg;
+	bytes->block = block;
+	bytes->task_id = task;
+	bytes->pipe_id = pipe_id;
+	bytes->len = len;
+
+	/* Copy the ipc_effect_dsp_hdr followed by the data */
+	memcpy(bytes->bytes, payload, size);
+	memcpy(bytes->bytes + size, payload->data, len - size);
+}
+
+static int sst_send_effects(struct ipc_effect_payload *dsp_payload, int data_len,
+					enum sst_effect effect_type)
+{
+	struct snd_sst_bytes_v2 *bytes;
+	u32 len;
+	int ret;
+	u8 type, msg = IPC_INVALID, pipe, payload_len;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	len = sizeof(*bytes) + sizeof(struct ipc_effect_dsp_hdr) + data_len;
+
+	bytes = kzalloc(len, GFP_KERNEL);
+	if (!bytes) {
+		pr_err("kzalloc failed allocate bytes\n");
+		return -ENOMEM;
+	}
+
+	switch (effect_type) {
+	case EFFECTS_CREATE:
+	case EFFECTS_DESTROY:
+		type = SND_SST_BYTES_SET;
+		msg = IPC_CMD;
+		break;
+
+	case EFFECTS_SET_PARAMS:
+		type = SND_SST_BYTES_SET;
+		msg = IPC_SET_PARAMS;
+		break;
+
+	case EFFECTS_GET_PARAMS:
+		type = SND_SST_BYTES_GET;
+		msg =  IPC_GET_PARAMS;
+		break;
+	default:
+		pr_err("No such effect %#x", effect_type);
+		ret = -EINVAL;
+		goto free_bytes;
+	}
+
+	pipe = dsp_payload->dsp_hdr.pipe_id;
+	payload_len = sizeof(struct ipc_effect_dsp_hdr) + data_len;
+	sst_fill_byte_stream(bytes, type, msg, 1, SST_TASK_ID_MEDIA,
+				pipe, payload_len, dsp_payload);
+
+	mutex_lock(&sst->lock);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM, bytes);
+	mutex_unlock(&sst->lock);
+
+	if (ret) {
+		pr_err("byte_stream failed err %d pipe_id %#x\n", ret,
+				dsp_payload->dsp_hdr.pipe_id);
+		goto free_bytes;
+	}
+
+	/* Copy only the data - skip the dsp header */
+	if (msg == IPC_GET_PARAMS)
+		memcpy(dsp_payload->data, bytes->bytes, data_len);
+
+free_bytes:
+	kfree(bytes);
+	return ret;
+}
+
+static int sst_get_algo_id(const struct sst_dev_effects *pdev_effs,
+					char *uuid, u16 *algo_id)
+{
+	int i, len;
+
+	len = pdev_effs->effs_num_map;
+
+	for (i = 0; i < len; i++) {
+		if (!strncmp(pdev_effs->effs_map[i].uuid, uuid, sizeof(struct effect_uuid))) {
+			*algo_id = pdev_effs->effs_map[i].algo_id;
+			return 0;
+		}
+	}
+	pr_err("no such uuid\n");
+	return -EINVAL;
+}
+
+static int sst_fill_effects_info(const struct sst_dev_effects *pdev_effs,
+					char *uuid, u16 pos,
+					struct ipc_dsp_effects_info *effs_info, u16 cmd_id)
+{
+	int i, len;
+
+	len = pdev_effs->effs_num_map;
+
+	for (i = 0; i < len; i++) {
+		if (!strncmp(pdev_effs->effs_map[i].uuid, uuid, sizeof(struct effect_uuid))) {
+
+			effs_info->cmd_id = cmd_id;
+			effs_info->length = (sizeof(struct ipc_dsp_effects_info) -
+						offsetof(struct ipc_dsp_effects_info, sel_pos));
+			effs_info->sel_pos = pos;
+			effs_info->sel_algo_id = pdev_effs->effs_map[i].algo_id;
+			effs_info->cpu_load = pdev_effs->effs_res_map[i].cpuLoad;
+			effs_info->memory_usage = pdev_effs->effs_res_map[i].memoryUsage;
+			effs_info->flags = pdev_effs->effs_res_map[i].flags;
+
+			return 0;
+		}
+	}
+
+	pr_err("no such uuid\n");
+	return -EINVAL;
+}
+
+static inline void sst_fill_dsp_payload(struct ipc_effect_payload *dsp_payload,
+					u8 pipe_id, u16 mod_id, char *data)
+{
+	dsp_payload->dsp_hdr.mod_index_id = 0xFF;
+	dsp_payload->dsp_hdr.pipe_id = pipe_id;
+	dsp_payload->dsp_hdr.mod_id = mod_id;
+	dsp_payload->data = data;
+}
+
+static int sst_get_pipe_id(struct sst_dev_stream_map *map, int map_size,
+				u32 dev, u32 mode, u8 *pipe_id)
+{
+	int index;
+
+	if (map == NULL)
+		return -EINVAL;
+
+	/* In case of global effects, dev will be 0xff */
+	if (dev == 0xFF) {
+		*pipe_id = (mode == SST_MEDIA0_OUT) ? PIPE_MEDIA0_OUT : PIPE_MEDIA1_OUT;
+		return 0;
+	}
+
+	for (index = 1; index < map_size; index++) {
+		if (map[index].dev_num == dev) {
+			*pipe_id = map[index].device_id;
+			break;
+		}
+	}
+
+	if (index == map_size) {
+		pr_err("no such device %d\n", dev);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int sst_effects_create(struct snd_card *card, struct snd_effect *effect)
+{
+	int ret = 0;
+	u8 pipe_id;
+	struct ipc_effect_payload dsp_payload;
+	struct ipc_dsp_effects_info effects_info;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_fill_effects_info(&sst->pdata->pdev_effs, effect->uuid, effect->pos,
+				 &effects_info, IPC_EFFECTS_CREATE);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				effect->device, effect->mode, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, 0xFF, (char *)&effects_info);
+
+	ret = sst_send_effects(&dsp_payload, sizeof(effects_info), EFFECTS_CREATE);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_effects_destroy(struct snd_card *card, struct snd_effect *effect)
+{
+	int ret = 0;
+	u8 pipe_id;
+	struct ipc_effect_payload dsp_payload;
+	struct ipc_dsp_effects_info effects_info;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_fill_effects_info(&sst->pdata->pdev_effs, effect->uuid, effect->pos,
+				&effects_info, IPC_EFFECTS_DESTROY);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				effect->device, effect->mode, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, 0xFF, (char *)&effects_info);
+
+	ret = sst_send_effects(&dsp_payload, sizeof(effects_info), EFFECTS_DESTROY);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_effects_set_params(struct snd_card *card,
+					struct snd_effect_params *params)
+{
+	int ret = 0;
+	u8 pipe_id;
+	u16 algo_id;
+	struct ipc_effect_payload dsp_payload;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_get_algo_id(&sst->pdata->pdev_effs, params->uuid, &algo_id);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				params->device, SST_MEDIA0_OUT, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, algo_id,
+			(void *)(unsigned long)params->buffer_ptr);
+
+	ret = sst_send_effects(&dsp_payload, params->size, EFFECTS_SET_PARAMS);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_effects_get_params(struct snd_card *card,
+					struct snd_effect_params *params)
+{
+	int ret = 0;
+	u8 pipe_id;
+	u16 algo_id;
+	struct ipc_effect_payload dsp_payload;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_get_algo_id(&sst->pdata->pdev_effs, params->uuid, &algo_id);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				params->device, SST_MEDIA0_OUT, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, algo_id,
+			(void *)(unsigned long)params->buffer_ptr);
+
+	ret = sst_send_effects(&dsp_payload, params->size, EFFECTS_GET_PARAMS);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_query_num_effects(struct snd_card *card)
+{
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	return sst->pdata->pdev_effs.effs_num_map;
+}
+
+static int sst_query_effects_caps(struct snd_card *card,
+					struct snd_effect_caps *caps)
+{
+	struct sst_data *sst;
+	struct sst_dev_effects_map *effs_map;
+	unsigned int num_effects, offset = 0;
+	char *dstn;
+	int i;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	effs_map = sst->pdata->pdev_effs.effs_map;
+	num_effects = sst->pdata->pdev_effs.effs_num_map;
+
+	if (caps->size < (num_effects * MAX_DESCRIPTOR_SIZE)) {
+		pr_err("buffer size is insufficient\n");
+		return -ENOMEM;
+	}
+
+	dstn = (void *)(unsigned long)caps->buffer_ptr;
+	for (i = 0; i < num_effects; i++) {
+		memcpy(dstn + offset, effs_map[i].descriptor, MAX_DESCRIPTOR_SIZE);
+		offset += MAX_DESCRIPTOR_SIZE;
+	}
+	caps->size = offset;
+
+	return 0;
+}
+
+struct snd_effect_ops effects_ops = {
+	.create = sst_effects_create,
+	.destroy = sst_effects_destroy,
+	.set_params = sst_effects_set_params,
+	.get_params = sst_effects_get_params,
+	.query_num_effects = sst_query_num_effects,
+	.query_effect_caps = sst_query_effects_caps,
+};
diff --git a/sound/soc/intel/pcm.c b/sound/soc/intel/pcm.c
new file mode 100644
index 0000000..8c2c3bd
--- /dev/null
+++ b/sound/soc/intel/pcm.c
@@ -0,0 +1,1119 @@
+/*
+ *  pcm.c - Intel MID Platform driver file implementing PCM functionality
+ *
+ *  Copyright (C) 2010-2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/intel_sst_ioctl.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+struct device *sst_pdev;
+struct sst_device *sst_dsp;
+extern struct snd_compr_ops sst_platform_compr_ops;
+extern struct snd_effect_ops effects_ops;
+
+/* module parameters */
+static int dpcm_enable;
+
+/* dpcm_enable should be =0 for mofd_v0 and =1 for mofd_v1 */
+module_param(dpcm_enable, int, 0644);
+MODULE_PARM_DESC(dpcm_enable, "DPCM module parameter");
+
+static DEFINE_MUTEX(sst_dsp_lock);
+
+static struct snd_pcm_hardware sst_platform_pcm_hw = {
+	.info =	(SNDRV_PCM_INFO_INTERLEAVED |
+			SNDRV_PCM_INFO_DOUBLE |
+			SNDRV_PCM_INFO_PAUSE |
+			SNDRV_PCM_INFO_RESUME |
+			SNDRV_PCM_INFO_MMAP|
+			SNDRV_PCM_INFO_MMAP_VALID |
+			SNDRV_PCM_INFO_BLOCK_TRANSFER |
+			SNDRV_PCM_INFO_SYNC_START),
+	.formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
+			SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
+			SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
+	.rates = (SNDRV_PCM_RATE_8000|
+			SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_44100 |
+			SNDRV_PCM_RATE_48000),
+	.rate_min = SST_MIN_RATE,
+	.rate_max = SST_MAX_RATE,
+	.channels_min =	SST_MIN_CHANNEL,
+	.channels_max =	SST_MAX_CHANNEL,
+	.buffer_bytes_max = SST_MAX_BUFFER,
+	.period_bytes_min = SST_MIN_PERIOD_BYTES,
+	.period_bytes_max = SST_MAX_PERIOD_BYTES,
+	.periods_min = SST_MIN_PERIODS,
+	.periods_max = SST_MAX_PERIODS,
+	.fifo_size = SST_FIFO_SIZE,
+};
+
+static struct sst_dev_stream_map dpcm_strm_map[] = {
+	{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
+	{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA1_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_DB,    0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA3_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_LL,    0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_LOW_PCM0_IN, SST_TASK_ID_SBA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_VOIP,  0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_VOIP_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE1_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE2_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE3_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 3, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE4_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 4, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE5_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 5, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE6_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 6, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE7_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 7, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE8_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_VOIP,  0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VOIP_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 1, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE2_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 2, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE3_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 3, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE4_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 4, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE5_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 5, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE6_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 6, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE7_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 7, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE8_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	/* stream ID 25 used by Aware, but no device exposed to userspace */
+};
+
+static int sst_platform_ihf_set_tdm_slot(struct snd_soc_dai *dai,
+			unsigned int tx_mask, unsigned int rx_mask,
+			int slots, int slot_width) {
+	struct snd_sst_runtime_params params_data;
+	int channels = slots;
+
+	/* registering with SST driver to get access to SST APIs to use */
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -EIO;
+	}
+	params_data.type = SST_SET_CHANNEL_INFO;
+	params_data.str_id = SND_SST_DEVICE_IHF;
+	params_data.size = sizeof(channels);
+	params_data.addr = &channels;
+	return sst_dsp->ops->set_generic_params(SST_SET_RUNTIME_PARAMS,
+							(void *)&params_data);
+}
+
+static int sst_media_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+
+	pr_debug("%s: enter, mute=%d dai-name=%s dir=%d\n", __func__, mute, dai->name, stream);
+
+	if (dpcm_enable == 1)
+		sst_send_pipe_gains(dai, stream, mute);
+
+	return 0;
+}
+
+/* helper functions */
+void sst_set_stream_status(struct sst_runtime_stream *stream,
+					int state)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&stream->status_lock, flags);
+	stream->stream_status = state;
+	spin_unlock_irqrestore(&stream->status_lock, flags);
+}
+
+static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
+{
+	int state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&stream->status_lock, flags);
+	state = stream->stream_status;
+	spin_unlock_irqrestore(&stream->status_lock, flags);
+	return state;
+}
+
+static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
+				struct snd_sst_alloc_params_ext *alloc_param)
+{
+	unsigned int channels;
+	snd_pcm_uframes_t period_size;
+	ssize_t periodbytes;
+	ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
+	u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+
+	channels = substream->runtime->channels;
+	period_size = substream->runtime->period_size;
+	periodbytes = samples_to_bytes(substream->runtime, period_size);
+	alloc_param->ring_buf_info[0].addr = buffer_addr;
+	alloc_param->ring_buf_info[0].size = buffer_bytes;
+	alloc_param->sg_count = 1;
+	alloc_param->reserved = 0;
+	alloc_param->frag_size = periodbytes * channels;
+
+	pr_debug("period_size = %d\n", alloc_param->frag_size);
+	pr_debug("ring_buf_addr = 0x%x\n", alloc_param->ring_buf_info[0].addr);
+}
+static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
+				struct snd_sst_stream_params *param)
+{
+	param->uc.pcm_params.num_chan = (u8) substream->runtime->channels;
+	param->uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
+	param->uc.pcm_params.sfreq = substream->runtime->rate;
+
+	/* PCM stream via ALSA interface */
+	param->uc.pcm_params.use_offload_path = 0;
+	param->uc.pcm_params.reserved2 = 0;
+	memset(param->uc.pcm_params.channel_map, 0, sizeof(u8));
+	pr_debug("sfreq= %d, wd_sz = %d\n",
+	param->uc.pcm_params.sfreq, param->uc.pcm_params.pcm_wd_sz);
+
+}
+
+#define ASSIGN_PIPE_ID(periodtime, lowlatency, deepbuffer) \
+	((periodtime) <= (lowlatency) ? PIPE_LOW_PCM0_IN : \
+	((periodtime) >= (deepbuffer) ? PIPE_MEDIA3_IN : PIPE_MEDIA1_IN))
+
+static int sst_get_stream_mapping(int dev, int sdev, int dir,
+	struct sst_dev_stream_map *map, int size, u8 pipe_id,
+	const struct sst_lowlatency_deepbuff *ll_db)
+{
+	int index;
+	unsigned long pt = 0, ll = 0, db = 0;
+
+	if (map == NULL)
+		return -EINVAL;
+
+	pr_debug("dev %d sdev %d dir %d\n", dev, sdev, dir);
+
+	/* index 0 is not used in stream map */
+	for (index = 1; index < size; index++) {
+		if ((map[index].dev_num == dev) &&
+		    (map[index].subdev_num == sdev) &&
+		    (map[index].direction == dir)) {
+			/* device id for the probe is assigned dynamically */
+			if (map[index].status == SST_DEV_MAP_IN_USE) {
+				return index;
+			} else if (map[index].status == SST_DEV_MAP_FREE) {
+				map[index].status = SST_DEV_MAP_IN_USE;
+
+				if (map[index].dev_num == MERR_SALTBAY_PROBE) {
+					map[index].device_id = pipe_id;
+
+				} else if (map[index].dev_num == MERR_SALTBAY_AUDIO) {
+					if (!ll_db->low_latency || !ll_db->deep_buffer)
+						return -EINVAL;
+
+					pt = ll_db->period_time;
+					ll = *(ll_db->low_latency);
+					db = *(ll_db->deep_buffer);
+
+					pr_debug("PT %lu LL %lu DB %lu\n", pt, ll, db);
+
+					map[index].device_id = ASSIGN_PIPE_ID(pt,
+								ll, db);
+				}
+				pr_debug("%s: pipe_id 0%x index %d", __func__,
+						map[index].device_id, index);
+
+				return index;
+			}
+		}
+	}
+	return 0;
+}
+
+int sst_fill_stream_params(void *substream,
+	const struct sst_data *ctx, struct snd_sst_params *str_params, bool is_compress)
+{
+	int map_size;
+	int index;
+	struct sst_dev_stream_map *map;
+	struct snd_pcm_substream *pstream = NULL;
+	struct snd_compr_stream *cstream = NULL;
+
+	map = ctx->pdata->pdev_strm_map;
+	map_size = ctx->pdata->strm_map_size;
+
+	if (is_compress == true)
+		cstream = (struct snd_compr_stream *)substream;
+	else
+		pstream = (struct snd_pcm_substream *)substream;
+
+	str_params->stream_type = SST_STREAM_TYPE_MUSIC;
+
+	/* For pcm streams */
+	if (pstream) {
+		index = sst_get_stream_mapping(pstream->pcm->device,
+					  pstream->number, pstream->stream,
+					  map, map_size, ctx->pipe_id, &ctx->ll_db);
+		if (index <= 0)
+			return -EINVAL;
+
+		str_params->stream_id = index;
+		str_params->device_type = map[index].device_id;
+		str_params->task = map[index].task_id;
+
+		if (str_params->device_type == SST_PROBE_IN)
+			str_params->stream_type = SST_STREAM_TYPE_PROBE;
+
+		pr_debug("str_id = %d, device_type = 0x%x, task = %d",
+			 str_params->stream_id, str_params->device_type,
+			 str_params->task);
+
+		str_params->ops = (u8)pstream->stream;
+	}
+
+	if (cstream) {
+		/* FIXME: Add support for subdevice number in
+		 * snd_compr_stream */
+		index = sst_get_stream_mapping(cstream->device->device,
+					       0, cstream->direction,
+					       map, map_size, ctx->pipe_id, &ctx->ll_db);
+		if (index <= 0)
+			return -EINVAL;
+		str_params->stream_id = index;
+		str_params->device_type = map[index].device_id;
+		str_params->task = map[index].task_id;
+		pr_debug("compress str_id = %d, device_type = 0x%x, task = %d",
+			 str_params->stream_id, str_params->device_type,
+			 str_params->task);
+
+		str_params->ops = (u8)cstream->direction;
+	}
+	return 0;
+}
+
+#define CALC_PERIODTIME(period_size, rate) (((period_size) * 1000) / (rate))
+
+static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
+		struct snd_soc_platform *platform)
+{
+	struct sst_runtime_stream *stream =
+			substream->runtime->private_data;
+	struct snd_sst_stream_params param = {{{0,},},};
+	struct snd_sst_params str_params = {0};
+	struct snd_sst_alloc_params_ext alloc_params = {0};
+	int ret_val = 0;
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+
+	/* set codec params and inform SST driver the same */
+	sst_fill_pcm_params(substream, &param);
+	sst_fill_alloc_params(substream, &alloc_params);
+	substream->runtime->dma_area = substream->dma_buffer.area;
+	str_params.sparams = param;
+	str_params.aparams = alloc_params;
+	str_params.codec = SST_CODEC_TYPE_PCM;
+
+	ctx->ll_db.period_time = CALC_PERIODTIME(substream->runtime->period_size,
+					substream->runtime->rate);
+
+	/* fill the device type and stream id to pass to SST driver */
+	ret_val = sst_fill_stream_params(substream, ctx, &str_params, false);
+	pr_debug("platform prepare: fill stream params ret_val = 0x%x\n", ret_val);
+	if (ret_val < 0)
+		return ret_val;
+
+	stream->stream_info.str_id = str_params.stream_id;
+
+	ret_val = stream->ops->open(&str_params);
+	pr_debug("platform prepare: stream open ret_val = 0x%x\n", ret_val);
+	if (ret_val <= 0)
+		return ret_val;
+
+	pr_debug("platform allocated strid:  %d\n", stream->stream_info.str_id);
+
+	return ret_val;
+}
+
+static void sst_period_elapsed(void *mad_substream)
+{
+	struct snd_pcm_substream *substream = mad_substream;
+	struct sst_runtime_stream *stream;
+	int status;
+
+	if (!substream || !substream->runtime) {
+		pr_debug("In %s : Null Substream pointer\n", __func__);
+		return;
+	}
+	stream = substream->runtime->private_data;
+	if (!stream) {
+		pr_debug("In %s : Null Stream pointer\n", __func__);
+		return;
+	}
+	status = sst_get_stream_status(stream);
+	if (status != SST_PLATFORM_RUNNING) {
+		pr_debug("In %s : Stream Status=%d\n", __func__, status);
+		return;
+	}
+	snd_pcm_period_elapsed(substream);
+}
+
+static int sst_platform_init_stream(struct snd_pcm_substream *substream)
+{
+	struct sst_runtime_stream *stream =
+			substream->runtime->private_data;
+	int ret_val;
+
+	pr_debug("setting buffer ptr param\n");
+	sst_set_stream_status(stream, SST_PLATFORM_INIT);
+	stream->stream_info.period_elapsed = sst_period_elapsed;
+	stream->stream_info.mad_substream = substream;
+	stream->stream_info.buffer_ptr = 0;
+	stream->stream_info.sfreq = substream->runtime->rate;
+	pr_debug("pcm_substream %p, period_elapsed %p\n",
+			stream->stream_info.mad_substream, stream->stream_info.period_elapsed);
+	ret_val = stream->ops->device_control(
+			SST_SND_STREAM_INIT, &stream->stream_info);
+	if (ret_val)
+		pr_err("control_set ret error %d\n", ret_val);
+	return ret_val;
+
+}
+
+static inline int power_up_sst(struct sst_runtime_stream *sst)
+{
+	return sst->ops->power(true);
+}
+
+static inline int power_down_sst(struct sst_runtime_stream *sst)
+{
+	return sst->ops->power(false);
+}
+/* end -- helper functions */
+
+static int sst_media_open(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int ret_val = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct sst_runtime_stream *stream;
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (!stream)
+		return -ENOMEM;
+
+	spin_lock_init(&stream->status_lock);
+
+	/* get the sst ops */
+	mutex_lock(&sst_dsp_lock);
+	if (!sst_dsp ||
+	    !try_module_get(sst_dsp->dev->driver->owner)) {
+		pr_err("no device available to run\n");
+		ret_val = -ENODEV;
+		goto out_ops;
+	}
+	stream->ops = sst_dsp->ops;
+	mutex_unlock(&sst_dsp_lock);
+
+	stream->stream_info.str_id = 0;
+	sst_set_stream_status(stream, SST_PLATFORM_UNINIT);
+	stream->stream_info.mad_substream = substream;
+	runtime->private_data = stream;
+
+	if (strstr(dai->name, "Power-cpu-dai"))
+		return power_up_sst(stream);
+
+	/* Make sure, that the period size is always even */
+	snd_pcm_hw_constraint_step(substream->runtime, 0,
+			   SNDRV_PCM_HW_PARAM_PERIODS, 2);
+
+	pr_debug("buf_ptr %llu\n", stream->stream_info.buffer_ptr);
+	return snd_pcm_hw_constraint_integer(runtime,
+			 SNDRV_PCM_HW_PARAM_PERIODS);
+out_ops:
+	kfree(stream);
+	mutex_unlock(&sst_dsp_lock);
+	return ret_val;
+}
+
+static void sst_free_stream_in_use(struct sst_dev_stream_map *map, int str_id)
+{
+	if (dpcm_enable == 1)
+		return;
+
+	if ((map[str_id].dev_num == MERR_SALTBAY_AUDIO) ||
+			(map[str_id].dev_num == MERR_SALTBAY_PROBE)) {
+		/* Do nothing in capture for audio device */
+		if ((map[str_id].dev_num == MERR_SALTBAY_AUDIO) &&
+				(map[str_id].direction == SNDRV_PCM_STREAM_CAPTURE))
+			return;
+		if ((map[str_id].task_id == SST_TASK_ID_MEDIA) &&
+				(map[str_id].status == SST_DEV_MAP_IN_USE)) {
+			pr_debug("str_id %d device_id 0x%x\n", str_id, map[str_id].device_id);
+			map[str_id].status = SST_DEV_MAP_FREE;
+			map[str_id].device_id = PIPE_RSVD;
+		}
+	}
+	return;
+}
+
+static void sst_media_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct sst_runtime_stream *stream;
+	int ret_val = 0, str_id;
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(dai->platform);
+
+	stream = substream->runtime->private_data;
+	if (strstr(dai->name, "Power-cpu-dai"))
+		ret_val = power_down_sst(stream);
+
+	str_id = stream->stream_info.str_id;
+	if (str_id)
+		ret_val = stream->ops->close(str_id);
+	sst_free_stream_in_use(ctx->pdata->pdev_strm_map, str_id);
+	module_put(sst_dsp->dev->driver->owner);
+	kfree(stream);
+	pr_debug("%s: %d\n", __func__, ret_val);
+}
+
+static int sst_dpcm_probe_cmd(struct snd_soc_platform *platform,
+		struct snd_pcm_substream *substream, u16 pipe_id, bool on)
+{
+	int ret = 0;
+	if ((dpcm_enable == 1) && (substream->pcm->device == MERR_DPCM_PROBE))
+			ret = sst_dpcm_probe_send(platform, pipe_id, substream->number,
+					substream->stream, on);
+	return ret;
+}
+
+static inline unsigned int get_current_pipe_id(struct snd_soc_platform *platform,
+					       struct snd_pcm_substream *substream)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_dev_stream_map *map = sst->pdata->pdev_strm_map;
+	struct sst_runtime_stream *stream =
+			substream->runtime->private_data;
+	u32 str_id = stream->stream_info.str_id;
+	unsigned int pipe_id;
+	pipe_id = map[str_id].device_id;
+
+	pr_debug("%s: got pipe_id = %#x for str_id = %d\n",
+		 __func__, pipe_id, str_id);
+	return pipe_id;
+}
+
+static void sst_probe_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	u16 probe_pipe_id = get_current_pipe_id(dai->platform, substream);
+
+	sst_dpcm_probe_cmd(dai->platform, substream, probe_pipe_id, false);
+	sst_media_close(substream, dai);
+}
+
+static int sst_media_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct sst_runtime_stream *stream;
+	int ret_val = 0, str_id;
+
+	pr_debug("%s\n", __func__);
+
+	stream = substream->runtime->private_data;
+	str_id = stream->stream_info.str_id;
+	if (stream->stream_info.str_id)
+		return ret_val;
+
+	ret_val = sst_platform_alloc_stream(substream, dai->platform);
+	if (ret_val <= 0)
+		return ret_val;
+	snprintf(substream->pcm->id, sizeof(substream->pcm->id),
+			"%d", stream->stream_info.str_id);
+
+	ret_val = sst_platform_init_stream(substream);
+	if (ret_val)
+		return ret_val;
+	substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
+
+	return ret_val;
+}
+
+static int sst_probe_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	u16 probe_pipe_id;
+
+	sst_media_prepare(substream, dai);
+	probe_pipe_id = get_current_pipe_id(dai->platform, substream);
+
+	return sst_dpcm_probe_cmd(dai->platform, substream, probe_pipe_id, true);
+}
+
+static int sst_media_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	pr_debug("%s\n", __func__);
+
+	snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+	memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+	return 0;
+}
+
+static int sst_media_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static int sst_enable_ssp(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *dai)
+{
+	pr_debug("In %s :dai=%s pb=%d cp= %d dai_active=%d id=%d\n", __func__,
+		dai->name, dai->playback_active, dai->capture_active, dai->active,  dai->id);
+	if (!dai->active) {
+		sst_handle_vb_timer(dai->platform, true);
+		send_ssp_cmd(dai->platform, dai->name, 1);
+	}
+	return 0;
+}
+
+static void sst_disable_ssp(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *dai)
+{
+	pr_debug("In %s :dai=%s pb=%d cp= %d dai_active=%d id=%d\n", __func__,
+		dai->name, dai->playback_active, dai->capture_active, dai->active, dai->id);
+	if (!dai->active) {
+		send_ssp_cmd(dai->platform, dai->name, 0);
+		sst_handle_vb_timer(dai->platform, false);
+	}
+}
+
+static struct snd_soc_dai_ops sst_media_dai_ops = {
+	.startup = sst_media_open,
+	.shutdown = sst_media_close,
+	.prepare = sst_media_prepare,
+	.hw_params = sst_media_hw_params,
+	.hw_free = sst_media_hw_free,
+	.set_tdm_slot = sst_platform_ihf_set_tdm_slot,
+	.mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_ops sst_probe_dai_ops = {
+	.startup = sst_media_open,
+	.hw_params = sst_media_hw_params,
+	.hw_free = sst_media_hw_free,
+	.shutdown = sst_probe_close,
+	.prepare = sst_probe_prepare,
+};
+
+static struct snd_soc_dai_ops sst_loopback_dai_ops = {
+	.startup = sst_media_open,
+	.shutdown = sst_media_close,
+	.prepare = sst_media_prepare,
+};
+
+static struct snd_soc_dai_ops sst_compr_dai_ops = {
+	.mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_ops sst_be_dai_ops = {
+	.startup = sst_enable_ssp,
+	.shutdown = sst_disable_ssp,
+};
+
+static struct snd_soc_dai_driver sst_platform_dai[] = {
+{
+	.name = SST_HEADSET_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Headset Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "Headset Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_DEEPBUFFER_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Deepbuffer Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_LOWLATENCY_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Low Latency Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_SPEAKER_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Speaker Playback",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_VOICE_DAI,
+	.playback = {
+		.stream_name = "Voice Downlink",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "Voice Uplink",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_COMPRESS_DAI,
+	.compress_dai = 1,
+	.ops = &sst_compr_dai_ops,
+	.playback = {
+		.stream_name = "Compress Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_VIRTUAL_DAI,
+	.playback = {
+		.stream_name = "Virtual Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_POWER_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Dummy Power Stream",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = SST_PROBE_DAI,
+	.ops = &sst_probe_dai_ops,
+	.playback = {
+		.stream_name = "Probe Playback",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			   SNDRV_PCM_FMTBIT_S32_LE,
+	},
+	.capture = {
+		.stream_name = "Probe Capture",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			   SNDRV_PCM_FMTBIT_S32_LE,
+	},
+},
+{
+	.name = SST_VOIP_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "VOIP Playback",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "VOIP Capture",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_LOOPBACK_DAI,
+	.ops = &sst_loopback_dai_ops,
+	.capture = {
+		.stream_name = "Loopback Capture",
+		.channels_min = SST_MONO,
+		.channels_max = SST_MONO,
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+/*BE CPU  Dais */
+{
+	.name = "ssp0-port",
+	.ops = &sst_be_dai_ops,
+	.playback = {
+		.stream_name = "ssp0 Tx",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp0 Rx",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "ssp1-port",
+	.ops = &sst_be_dai_ops,
+	.playback = {
+		.stream_name = "ssp1 Tx",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp1 Rx",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "ssp2-port",
+	.ops = &sst_be_dai_ops,
+	.playback = {
+		.stream_name = "ssp2 Tx",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp2 Rx",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+};
+
+static int sst_platform_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	pr_debug("sst_platform_open called:%s\n", dai_link->cpu_dai_name);
+	if (substream->pcm->internal)
+		return 0;
+	runtime = substream->runtime;
+	runtime->hw = sst_platform_pcm_hw;
+	return 0;
+}
+
+static int sst_platform_close(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+	pr_debug("sst_platform_close called:%s\n", dai_link->cpu_dai_name);
+	return 0;
+}
+
+static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	int ret_val = 0, str_id;
+	struct sst_runtime_stream *stream;
+	int str_cmd, status, alsa_state;
+
+	if (substream->pcm->internal)
+		return 0;
+	pr_debug("sst_platform_pcm_trigger called\n");
+	stream = substream->runtime->private_data;
+	str_id = stream->stream_info.str_id;
+	alsa_state = substream->runtime->status->state;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		pr_debug("Trigger Start\n");
+		str_cmd = SST_SND_START;
+		status = SST_PLATFORM_RUNNING;
+		stream->stream_info.mad_substream = substream;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("Trigger stop\n");
+		str_cmd = SST_SND_DROP;
+		status = SST_PLATFORM_DROPPED;
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("Trigger pause\n");
+		str_cmd = SST_SND_PAUSE;
+		status = SST_PLATFORM_PAUSED;
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("Trigger pause release\n");
+		str_cmd = SST_SND_RESUME;
+		status = SST_PLATFORM_RUNNING;
+		break;
+	default:
+		return -EINVAL;
+	}
+	ret_val = stream->ops->device_control(str_cmd, &str_id);
+	if (!ret_val)
+		sst_set_stream_status(stream, status);
+
+	return ret_val;
+}
+
+
+static snd_pcm_uframes_t sst_platform_pcm_pointer
+			(struct snd_pcm_substream *substream)
+{
+	struct sst_runtime_stream *stream;
+	int ret_val, status;
+	struct pcm_stream_info *str_info;
+
+	stream = substream->runtime->private_data;
+	status = sst_get_stream_status(stream);
+	if (status == SST_PLATFORM_INIT)
+		return 0;
+	str_info = &stream->stream_info;
+	ret_val = stream->ops->device_control(
+				SST_SND_BUFFER_POINTER, str_info);
+	if (ret_val) {
+		pr_err("sst: error code = %d\n", ret_val);
+		return ret_val;
+	}
+	substream->runtime->soc_delay = str_info->pcm_delay;
+	return str_info->buffer_ptr;
+}
+
+static struct snd_pcm_ops sst_platform_ops = {
+	.open = sst_platform_open,
+	.close = sst_platform_close,
+	.ioctl = snd_pcm_lib_ioctl,
+	.trigger = sst_platform_pcm_trigger,
+	.pointer = sst_platform_pcm_pointer,
+};
+
+static void sst_pcm_free(struct snd_pcm *pcm)
+{
+	pr_debug("sst_pcm_free called\n");
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	struct snd_pcm *pcm = rtd->pcm;
+	int retval = 0;
+
+	pr_debug("sst_pcm_new called\n");
+	if (dai->driver->playback.channels_min ||
+			dai->driver->capture.channels_min) {
+		retval =  snd_pcm_lib_preallocate_pages_for_all(pcm,
+			SNDRV_DMA_TYPE_CONTINUOUS,
+			snd_dma_continuous_data(GFP_DMA),
+			SST_MAX_BUFFER, SST_MAX_BUFFER);
+		if (retval) {
+			pr_err("dma buffer allocationf fail\n");
+			return retval;
+		}
+	}
+	return retval;
+}
+
+static int sst_soc_probe(struct snd_soc_platform *platform)
+{
+	int ret = 0;
+
+	pr_debug("Enter:%s\n", __func__);
+	if (dpcm_enable == 1)
+		ret = sst_dsp_init_v2_dpcm(platform);
+	else
+		ret = sst_dsp_init(platform);
+	if (ret)
+		return ret;
+	ret = snd_soc_register_effect(platform->card, &effects_ops);
+
+	return ret;
+}
+
+static int sst_soc_remove(struct snd_soc_platform *platform)
+{
+	pr_debug("%s called\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_platform_driver sst_soc_platform_drv  = {
+	.probe		= sst_soc_probe,
+	.remove		= sst_soc_remove,
+	.ops		= &sst_platform_ops,
+	.compr_ops	= &sst_platform_compr_ops,
+	.pcm_new	= sst_pcm_new,
+	.pcm_free	= sst_pcm_free,
+	.read		= sst_soc_read,
+	.write		= sst_soc_write,
+};
+
+int sst_register_dsp(struct sst_device *sst_dev)
+{
+	if (!sst_dev)
+		return -ENODEV;
+	mutex_lock(&sst_dsp_lock);
+	if (sst_dsp) {
+		pr_err("we already have a device %s\n", sst_dsp->name);
+		mutex_unlock(&sst_dsp_lock);
+		return -EEXIST;
+	}
+	pr_debug("registering device %s\n", sst_dev->name);
+
+	sst_dsp = sst_dev;
+	mutex_unlock(&sst_dsp_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sst_register_dsp);
+
+int sst_unregister_dsp(struct sst_device *dev)
+{
+	if (dev != sst_dsp)
+		return -EINVAL;
+
+	mutex_lock(&sst_dsp_lock);
+	if (sst_dsp) {
+		pr_debug("unregister %s\n", sst_dsp->name);
+		mutex_unlock(&sst_dsp_lock);
+		return -EIO;
+	}
+
+	sst_dsp = NULL;
+	mutex_unlock(&sst_dsp_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sst_unregister_dsp);
+
+static const struct snd_soc_component_driver pcm_component = {
+	.name           = "pcm",
+};
+
+static int sst_platform_probe(struct platform_device *pdev)
+{
+	struct sst_data *sst;
+	int ret;
+	struct sst_platform_data *pdata = pdev->dev.platform_data;
+
+	pr_debug("sst_platform_probe called\n");
+	sst = devm_kzalloc(&pdev->dev, sizeof(*sst), GFP_KERNEL);
+	if (sst == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	if (dpcm_enable == 1) {
+		pr_info("dpcm enabled; overriding stream map\n");
+		pdata->pdev_strm_map = dpcm_strm_map;
+		pdata->strm_map_size = ARRAY_SIZE(dpcm_strm_map);
+	}
+	sst_pdev = &pdev->dev;
+	sst->pdata = pdata;
+	mutex_init(&sst->lock);
+	dev_set_drvdata(&pdev->dev, sst);
+
+	ret = snd_soc_register_platform(&pdev->dev,
+					 &sst_soc_platform_drv);
+	if (ret) {
+		pr_err("registering soc platform failed\n");
+		return ret;
+	}
+	ret = snd_soc_register_component(&pdev->dev, &pcm_component,
+				sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
+	if (ret) {
+		pr_err("registering cpu dais failed\n");
+		snd_soc_unregister_platform(&pdev->dev);
+	}
+
+	return ret;
+}
+
+static int sst_platform_remove(struct platform_device *pdev)
+{
+
+	snd_soc_unregister_component(&pdev->dev);
+	snd_soc_unregister_platform(&pdev->dev);
+	pr_debug("sst_platform_remove success\n");
+	return 0;
+}
+
+static struct platform_driver sst_platform_driver = {
+	.driver		= {
+		.name		= "sst-platform",
+		.owner		= THIS_MODULE,
+	},
+	.probe		= sst_platform_probe,
+	.remove		= sst_platform_remove,
+};
+
+module_platform_driver(sst_platform_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sst-platform");
diff --git a/sound/soc/intel/platform-libs/controls_v1.c b/sound/soc/intel/platform-libs/controls_v1.c
new file mode 100644
index 0000000..b6a5fa7
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v1.c
@@ -0,0 +1,184 @@
+/*
+ *  controls_v1.c - Intel MID Platform driver ALSA controls for CTP
+ *
+ *  Copyright (C) 2012 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/slab.h>
+#include <sound/intel_sst_ioctl.h>
+#include <sound/soc.h>
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+
+
+static int sst_set_mixer_param(unsigned int device_input_mixer)
+{
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -ENODEV;
+	}
+
+	/*allocate memory for params*/
+	return sst_dsp->ops->set_generic_params(SST_SET_ALGO_PARAMS,
+						(void *)&device_input_mixer);
+}
+static int lpe_mixer_ihf_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	ucontrol->value.integer.value[0] = sst->lpe_mixer_input_ihf;
+	return 0;
+}
+
+static int lpe_mixer_ihf_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int device_input_mixer;
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		pr_debug("input is None\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_NONE;
+		break;
+	case 1:
+		pr_debug("input is PCM stream\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_PCM;
+		break;
+	case 2:
+		pr_debug("input is Compress  stream\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_COMPRESS;
+		break;
+	case 3:
+		pr_debug("input is Mixed stream\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_MIXED;
+		break;
+	default:
+		pr_err("Invalid Input:%ld\n", ucontrol->value.integer.value[0]);
+		return -EINVAL;
+	}
+	sst->lpe_mixer_input_ihf  = ucontrol->value.integer.value[0];
+	return sst_set_mixer_param(device_input_mixer);
+}
+
+static int lpe_mixer_headset_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	ucontrol->value.integer.value[0] = sst->lpe_mixer_input_hs;
+	return 0;
+}
+
+static int lpe_mixer_headset_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int mixer_input_stream;
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		pr_debug("input is None\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					| SST_INPUT_STREAM_NONE;
+		break;
+	case 1:
+		pr_debug("input is PCM stream\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					 | SST_INPUT_STREAM_PCM;
+		break;
+	case 2:
+		pr_debug("input is Compress  stream\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					 | SST_INPUT_STREAM_COMPRESS;
+		break;
+	case 3:
+		pr_debug("input is Mixed stream\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					 | SST_INPUT_STREAM_MIXED;
+		break;
+	default:
+		pr_err("Invalid Input:%ld\n", ucontrol->value.integer.value[0]);
+		return -EINVAL;
+	}
+	sst->lpe_mixer_input_hs  = ucontrol->value.integer.value[0];
+	return sst_set_mixer_param(mixer_input_stream);
+}
+
+static int sst_probe_byte_control_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -ENODEV;
+	}
+
+	return sst_dsp->ops->set_generic_params(SST_GET_PROBE_BYTE_STREAM,
+				ucontrol->value.bytes.data);
+}
+
+static int sst_probe_byte_control_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -ENODEV;
+	}
+
+	return sst_dsp->ops->set_generic_params(SST_SET_PROBE_BYTE_STREAM,
+				ucontrol->value.bytes.data);
+}
+
+static const char *lpe_mixer_text[] = {
+	"None", "PCM", "Compressed", "Mixed",
+};
+
+static const struct soc_enum lpe_mixer_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lpe_mixer_text), lpe_mixer_text);
+
+static const struct snd_kcontrol_new sst_controls_clv[] = {
+	SOC_ENUM_EXT("LPE IHF mixer", lpe_mixer_enum,
+		lpe_mixer_ihf_get, lpe_mixer_ihf_set),
+	SOC_ENUM_EXT("LPE headset mixer", lpe_mixer_enum,
+		lpe_mixer_headset_get, lpe_mixer_headset_set),
+	SND_SOC_BYTES_EXT("SST Probe Byte Control", SST_MAX_BIN_BYTES,
+		sst_probe_byte_control_get,
+		sst_probe_byte_control_set),
+};
+
+int sst_platform_clv_init(struct snd_soc_platform *platform)
+{
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+	ctx->lpe_mixer_input_hs = 0;
+	ctx->lpe_mixer_input_ihf = 0;
+	snd_soc_add_platform_controls(platform, sst_controls_clv,
+						ARRAY_SIZE(sst_controls_clv));
+	return 0;
+}
diff --git a/sound/soc/intel/platform-libs/controls_v2.c b/sound/soc/intel/platform-libs/controls_v2.c
new file mode 100644
index 0000000..5e51d4b
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v2.c
@@ -0,0 +1,1769 @@
+/*
+ *  controls_v2.c - Intel MID Platform driver ALSA controls for Mrfld
+ *
+ *  Copyright (C) 2012 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@ilinux.intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <sound/soc.h>
+#include <sound/asound.h>
+#include <asm/platform_sst_audio.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+#include "ipc_lib.h"
+#include "controls_v2.h"
+
+
+#define SST_ALGO_KCONTROL_INT(xname, xreg, xshift, xmax, xinvert,\
+	xhandler_get, xhandler_put, xmod, xpipe, xinstance, default_val) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = sst_algo_int_ctl_info, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_algo_int_control_v2) \
+		{.mc.reg = xreg, .mc.rreg = xreg, .mc.shift = xshift, \
+		.mc.rshift = xshift, .mc.max = xmax, .mc.platform_max = xmax, \
+		.mc.invert = xinvert, .module_id = xmod, .pipe_id = xpipe, \
+		.instance_id = xinstance, .value = default_val } }
+/* Thresholds for Low Latency & Deep Buffer*/
+#define DEFAULT_LOW_LATENCY 10 /* In Ms */
+#define DEFAULT_DEEP_BUFFER 96
+
+unsigned long ll_threshold = DEFAULT_LOW_LATENCY;
+unsigned long db_threshold = DEFAULT_DEEP_BUFFER;
+
+int sst_algo_int_ctl_info(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+	struct soc_mixer_control *mc = &amc->mc;
+	int platform_max;
+
+	if (!mc->platform_max)
+		mc->platform_max = mc->max;
+	platform_max = mc->platform_max;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = platform_max;
+	return 0;
+}
+
+unsigned int sst_soc_read(struct snd_soc_platform *platform,
+			unsigned int reg)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("%s: reg[%d] = %#x\n", __func__, reg, sst->widget[reg]);
+	BUG_ON(reg > (SST_NUM_WIDGETS - 1));
+	return sst->widget[reg];
+}
+
+int sst_soc_write(struct snd_soc_platform *platform,
+		  unsigned int reg, unsigned int val)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("%s: reg[%d] = %#x\n", __func__, reg, val);
+	BUG_ON(reg > (SST_NUM_WIDGETS - 1));
+	sst->widget[reg] = val;
+	return 0;
+}
+
+unsigned int sst_reg_read(struct sst_data *sst, unsigned int reg,
+			  unsigned int shift, unsigned int max)
+{
+	unsigned int mask = (1 << fls(max)) - 1;
+
+	return (sst->widget[reg] >> shift) & mask;
+}
+
+unsigned int sst_reg_write(struct sst_data *sst, unsigned int reg,
+			   unsigned int shift, unsigned int max, unsigned int val)
+{
+	unsigned int mask = (1 << fls(max)) - 1;
+
+	val &= mask;
+	val <<= shift;
+	sst->widget[reg] &= ~(mask << shift);
+	sst->widget[reg] |= val;
+	return val;
+}
+
+int sst_mix_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+	unsigned int mask = (1 << fls(mc->max)) - 1;
+	unsigned int val;
+	int connect;
+	struct snd_soc_dapm_update update;
+
+	pr_debug("%s called set %#lx for %s\n", __func__,
+			ucontrol->value.integer.value[0], widget->name);
+	val = sst_reg_write(sst, mc->reg, mc->shift, mc->max, ucontrol->value.integer.value[0]);
+	connect = !!val;
+
+	widget->value = val;
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = mc->reg;
+	update.mask = mask;
+	update.val = val;
+
+	widget->dapm->update = &update;
+	snd_soc_dapm_mixer_update_power(widget, kcontrol, connect);
+	widget->dapm->update = NULL;
+	return 0;
+}
+
+int sst_mix_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *w = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+
+	ucontrol->value.integer.value[0] = !!sst_reg_read(sst, mc->reg, mc->shift, mc->max);
+	return 0;
+}
+
+static const struct snd_kcontrol_new sst_mix_modem_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_MODEM, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_MODEM, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_MODEM, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_MODEM, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MODEM, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_MODEM, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_MODEM, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_MODEM, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_MODEM, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MODEM, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MODEM, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_MODEM, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_MODEM, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_MODEM, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_MODEM, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_MODEM, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_MODEM, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_MODEM, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_MODEM, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_codec0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_CODEC0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_CODEC0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_CODEC0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_CODEC0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_CODEC0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_CODEC0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_CODEC0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_CODEC0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_CODEC0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_CODEC0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_CODEC0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_CODEC0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_CODEC0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_CODEC0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_CODEC0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_CODEC0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_CODEC0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_CODEC0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_CODEC0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_codec1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_CODEC1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_CODEC1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_CODEC1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_CODEC1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_CODEC1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_CODEC1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_CODEC1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_CODEC1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_CODEC1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_CODEC1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_CODEC1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_CODEC1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_CODEC1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_CODEC1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_CODEC1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_CODEC1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_CODEC1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_CODEC1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_CODEC1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_sprot_l0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_LOOP0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_LOOP0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_LOOP0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_LOOP0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_LOOP0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_LOOP0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_LOOP0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_LOOP0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_LOOP0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media_l1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_LOOP1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_LOOP1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_LOOP1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_LOOP1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_LOOP1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_LOOP1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_LOOP1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_LOOP1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_LOOP1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media_l2_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_LOOP2, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_LOOP2, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP2, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP2, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP2, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP2, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP2, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_LOOP2, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP2, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP2, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP2, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_LOOP2, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_LOOP2, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP2, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP2, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_LOOP2, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_LOOP2, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_LOOP2, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_LOOP2, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_speech_tx_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_SPEECH, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_SPEECH, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_SPEECH, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_SPEECH, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_SPEECH, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_SPEECH, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_SPEECH, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_SPEECH, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_SPEECH, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_SPEECH, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_SPEECH, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_SPEECH, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_SPEECH, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_SPEECH, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_SPEECH, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_SPEECH, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_SPEECH, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_SPEECH, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_SPEECH, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_speech_rx_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_RXSPEECH, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_RXSPEECH, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_RXSPEECH, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_RXSPEECH, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_RXSPEECH, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_RXSPEECH, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_RXSPEECH, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_RXSPEECH, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_RXSPEECH, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_RXSPEECH, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_RXSPEECH, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_RXSPEECH, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_RXSPEECH, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_RXSPEECH, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_RXSPEECH, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_RXSPEECH, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_RXSPEECH, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_RXSPEECH, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_RXSPEECH, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_voip_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_VOIP, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_VOIP, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_VOIP, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_VOIP, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_VOIP, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_VOIP, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_VOIP, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_VOIP, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_VOIP, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_VOIP, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_VOIP, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_VOIP, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_VOIP, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_VOIP, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_VOIP, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_VOIP, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_VOIP, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_VOIP, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_VOIP, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_PCM0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_PCM0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_PCM0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_PCM0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_PCM0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_PCM0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_PCM0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_PCM0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_PCM0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_PCM0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_PCM0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_PCM0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_PCM0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_PCM1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_PCM1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_PCM1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_PCM1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_PCM1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_PCM1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_PCM1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_PCM1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_PCM1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_PCM1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_PCM1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_PCM1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_PCM1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm2_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_PCM2, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_PCM2, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_PCM2, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_PCM2, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM2, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM2, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM2, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_PCM2, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM2, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM2, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM2, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_PCM2, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_PCM2, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_PCM2, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_PCM2, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_PCM2, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_PCM2, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_PCM2, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_PCM2, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_aware_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_AWARE, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_AWARE, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_AWARE, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_AWARE, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_AWARE, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_AWARE, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_AWARE, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_AWARE, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_AWARE, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_AWARE, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_AWARE, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_AWARE, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_AWARE, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_AWARE, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_AWARE, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_AWARE, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_AWARE, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_AWARE, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_AWARE, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_vad_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_VAD, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_VAD, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_VAD, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_VAD, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_VAD, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_VAD, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_VAD, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_VAD, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_VAD, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_VAD, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_VAD, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_VAD, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_VAD, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_VAD, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_VAD, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_VAD, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_VAD, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_VAD, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_VAD, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_MEDIA0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_MEDIA0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_MEDIA0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_MEDIA0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MEDIA0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_MEDIA0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_MEDIA0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_MEDIA0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_MEDIA0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MEDIA0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MEDIA0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_MEDIA0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_MEDIA0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_MEDIA0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_MEDIA0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_MEDIA0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_MEDIA0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_MEDIA0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_MEDIA0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_MEDIA1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_MEDIA1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_MEDIA1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_MEDIA1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MEDIA1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_MEDIA1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_MEDIA1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_MEDIA1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_MEDIA1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MEDIA1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MEDIA1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_MEDIA1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_MEDIA1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_MEDIA1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_MEDIA1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_MEDIA1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_MEDIA1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_MEDIA1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_MEDIA1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_fm_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_FM, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_FM, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_FM, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_FM, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_FM, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_FM, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_FM, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_FM, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_FM, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_FM, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_FM, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_FM, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_FM, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_FM, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_FM, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_FM, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_FM, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_FM, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_FM, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_sw_modem =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_codec0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_codec1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 2, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_sprot_l0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 3, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media_l1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 4, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media_l2 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 5, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_speech_tx =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 6, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_speech_rx =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 7, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_voip =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 8, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 9, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 10, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm2 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 11, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_aware =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 12, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_vad =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 13, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 14, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 15, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_fm =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 16, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_modem =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_codec0 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_codec1 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 2, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_speech_tx =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 6, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_speech_rx =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 7, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_voip =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 8, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm0 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 9, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm1 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 10, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm2 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 11, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_aware =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 12, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_vad =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 13, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_media0 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 14, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_media1 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 15, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_fm =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 16, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_modem =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_codec0 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_codec1 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 2, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_sidetone =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 3, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_speech_tx =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 4, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_speech_rx  =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 5, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_tone =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 6, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_voip =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 7, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_pcm0 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 8, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_pcm1 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 9, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media0 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 10, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media1 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 11, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media2 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 12, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_fm =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 13, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("Modem IN"),
+	SND_SOC_DAPM_INPUT("Codec IN0"),
+	SND_SOC_DAPM_INPUT("Codec IN1"),
+	SND_SOC_DAPM_INPUT("Tone IN"),
+	SND_SOC_DAPM_INPUT("FM IN"),
+	SND_SOC_DAPM_OUTPUT("Modem OUT"),
+	SND_SOC_DAPM_OUTPUT("Codec OUT0"),
+	SND_SOC_DAPM_OUTPUT("Codec OUT1"),
+	SND_SOC_DAPM_OUTPUT("FM OUT"),
+	SND_SOC_DAPM_AIF_IN("Voip IN", "VoIP", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("Media IN0", "Compress", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("Media IN1", "PCM", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("Voip OUT", "VoIP", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PCM1 OUT", "Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("Aware OUT", "Aware", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VAD OUT", "VAD", 0, SND_SOC_NOPM, 0, 0),
+
+	/* output mixers */
+	SND_SOC_DAPM_MIXER("MIX Modem", SND_SOC_NOPM, 0, 0,
+		sst_mix_modem_controls, ARRAY_SIZE(sst_mix_modem_controls)),
+	SND_SOC_DAPM_MIXER("MIX Codec0", SND_SOC_NOPM, 0, 0,
+		sst_mix_codec0_controls , ARRAY_SIZE(sst_mix_codec0_controls)),
+	SND_SOC_DAPM_MIXER("MIX Codec1", SND_SOC_NOPM, 0, 0,
+		sst_mix_codec1_controls, ARRAY_SIZE(sst_mix_codec1_controls)),
+	SND_SOC_DAPM_MIXER("MIX Sprot L0", SND_SOC_NOPM, 0, 0,
+		sst_mix_sprot_l0_controls, ARRAY_SIZE(sst_mix_sprot_l0_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media L1", SND_SOC_NOPM, 0, 0,
+		sst_mix_media_l1_controls, ARRAY_SIZE(sst_mix_media_l1_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media L2", SND_SOC_NOPM, 0, 0,
+		sst_mix_media_l2_controls, ARRAY_SIZE(sst_mix_media_l2_controls)),
+	SND_SOC_DAPM_MIXER("MIX Speech Tx", SND_SOC_NOPM, 0, 0,
+		sst_mix_speech_tx_controls, ARRAY_SIZE(sst_mix_speech_tx_controls)),
+	SND_SOC_DAPM_MIXER("MIX Speech Rx", SND_SOC_NOPM, 0, 0,
+		sst_mix_speech_rx_controls, ARRAY_SIZE(sst_mix_speech_rx_controls)),
+	SND_SOC_DAPM_MIXER("MIX Voip", SND_SOC_NOPM, 0, 0,
+		sst_mix_voip_controls, ARRAY_SIZE(sst_mix_voip_controls)),
+	SND_SOC_DAPM_MIXER("MIX PCM0", SND_SOC_NOPM, 0, 0,
+		sst_mix_pcm0_controls, ARRAY_SIZE(sst_mix_pcm0_controls)),
+	SND_SOC_DAPM_MIXER("MIX PCM1", SND_SOC_NOPM, 0, 0,
+		sst_mix_pcm1_controls, ARRAY_SIZE(sst_mix_pcm1_controls)),
+	SND_SOC_DAPM_MIXER("MIX PCM2", SND_SOC_NOPM, 0, 0,
+		sst_mix_pcm2_controls, ARRAY_SIZE(sst_mix_pcm2_controls)),
+	SND_SOC_DAPM_MIXER("MIX Aware", SND_SOC_NOPM, 0, 0,
+		sst_mix_aware_controls, ARRAY_SIZE(sst_mix_aware_controls)),
+	SND_SOC_DAPM_MIXER("MIX VAD", SND_SOC_NOPM, 0, 0,
+		sst_mix_vad_controls, ARRAY_SIZE(sst_mix_vad_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media0", SND_SOC_NOPM, 0, 0,
+		sst_mix_media0_controls, ARRAY_SIZE(sst_mix_media0_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media1", SND_SOC_NOPM, 0, 0,
+		sst_mix_media1_controls, ARRAY_SIZE(sst_mix_media1_controls)),
+	SND_SOC_DAPM_MIXER("MIX FM", SND_SOC_NOPM, 0, 0,
+		sst_mix_fm_controls, ARRAY_SIZE(sst_mix_fm_controls)),
+
+	/* switches for mixer outputs */
+	SND_SOC_DAPM_SWITCH("Mix Modem Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_modem),
+	SND_SOC_DAPM_SWITCH("Mix Codec0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_codec0),
+	SND_SOC_DAPM_SWITCH("Mix Codec1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_codec1),
+	SND_SOC_DAPM_SWITCH("Mix Sprot L0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_sprot_l0),
+	SND_SOC_DAPM_SWITCH("Mix Media L1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media_l1),
+	SND_SOC_DAPM_SWITCH("Mix Media L2 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media_l2),
+	SND_SOC_DAPM_SWITCH("Mix Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_speech_tx),
+	SND_SOC_DAPM_SWITCH("Mix Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_speech_rx),
+	SND_SOC_DAPM_SWITCH("Mix Voip Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_voip),
+	SND_SOC_DAPM_SWITCH("Mix PCM0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_pcm0),
+	SND_SOC_DAPM_SWITCH("Mix PCM1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_pcm1),
+	SND_SOC_DAPM_SWITCH("Mix PCM2 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_pcm2),
+	SND_SOC_DAPM_SWITCH("Mix Aware Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_aware),
+	SND_SOC_DAPM_SWITCH("Mix VAD Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_vad),
+	SND_SOC_DAPM_SWITCH("Mix Media0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media0),
+	SND_SOC_DAPM_SWITCH("Mix Media1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media1),
+	SND_SOC_DAPM_SWITCH("Mix FM Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_fm),
+
+	/* output pipeline switches */
+	SND_SOC_DAPM_SWITCH("Out Modem Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_modem),
+	SND_SOC_DAPM_SWITCH("Out Codec0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_codec0),
+	SND_SOC_DAPM_SWITCH("Out Codec1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_codec1),
+	SND_SOC_DAPM_SWITCH("Out Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_speech_tx),
+	SND_SOC_DAPM_SWITCH("Out Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_speech_rx),
+	SND_SOC_DAPM_SWITCH("Out Voip Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_voip),
+	SND_SOC_DAPM_SWITCH("Out PCM0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_pcm0),
+	SND_SOC_DAPM_SWITCH("Out PCM1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_pcm1),
+	SND_SOC_DAPM_SWITCH("Out PCM2 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_pcm2),
+	SND_SOC_DAPM_SWITCH("Out Aware Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_aware),
+	SND_SOC_DAPM_SWITCH("Out VAD Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_vad),
+	SND_SOC_DAPM_SWITCH("Out Media0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_media0),
+	SND_SOC_DAPM_SWITCH("Out Media1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_media1),
+	SND_SOC_DAPM_SWITCH("Out FM Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_fm),
+
+	/* Input pipeline switches */
+	SND_SOC_DAPM_SWITCH("In Modem Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_modem),
+	SND_SOC_DAPM_SWITCH("In Codec0 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_codec0),
+	SND_SOC_DAPM_SWITCH("In Codec1 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_codec1),
+	SND_SOC_DAPM_SWITCH("In Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_speech_tx),
+	SND_SOC_DAPM_SWITCH("In Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_speech_rx),
+	SND_SOC_DAPM_SWITCH("In Tone Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_tone),
+	SND_SOC_DAPM_SWITCH("In Voip Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_voip),
+	SND_SOC_DAPM_SWITCH("In PCM0 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_pcm0),
+	SND_SOC_DAPM_SWITCH("In PCM1 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_pcm1),
+	SND_SOC_DAPM_SWITCH("In Media0 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_media0),
+	SND_SOC_DAPM_SWITCH("In Media1 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_media1),
+	SND_SOC_DAPM_SWITCH("In Media2 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_media2),
+	SND_SOC_DAPM_SWITCH("In FM Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_fm),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	/* media mixer settings */
+	{ "In Media0 Switch", "Switch", "Media IN0"},
+	{ "In Media1 Switch", "Switch", "Media IN1"},
+	{ "MIX Media0", "Media0", "In Media0 Switch"},
+	{ "MIX Media0", "Media1", "In Media1 Switch"},
+	{ "MIX Media0", "Media2", "In Media2 Switch"},
+	{ "MIX Media1", "Media0", "In Media0 Switch"},
+	{ "MIX Media1", "Media1", "In Media1 Switch"},
+	{ "MIX Media1", "Media2", "In Media2 Switch"},
+
+	/* media to main mixer intercon */
+	/* two media paths from media to main */
+	{ "Mix Media0 Switch", "Switch", "MIX Media0"},
+	{ "Out Media0 Switch", "Switch", "Mix Media0 Switch"},
+	{ "In PCM0 Switch", "Switch", "Out Media0 Switch"},
+	{ "Mix Media1 Switch", "Switch", "MIX Media1"},
+	{ "Out Media1 Switch", "Switch", "Mix Media1 Switch"},
+	{ "In PCM1 Switch", "Switch", "Out Media1 Switch"},
+	/* one back from main to media */
+	{ "Mix PCM0 Switch", "Switch", "MIX PCM0"},
+	{ "Out PCM0 Switch", "Switch", "Mix PCM0 Switch"},
+	{ "In Media2 Switch", "Switch", "Out PCM0 Switch"},
+
+	/* main mixer inputs - all inputs connect to mixer */
+	{ "MIX Modem", "Modem", "In Modem Switch"},
+	{ "MIX Modem", "Codec0", "In Codec0 Switch"},
+	{ "MIX Modem", "Codec1", "In Codec1 Switch"},
+	{ "MIX Modem", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Modem", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Modem", "Tone", "In Tone Switch"},
+	{ "MIX Modem", "Voip", "In Voip Switch"},
+	{ "MIX Modem", "PCM0", "In PCM0 Switch"},
+	{ "MIX Modem", "PCM1", "In PCM1 Switch"},
+	{ "MIX Modem", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Modem", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Modem", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Modem", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Modem", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Codec0", "Modem", "In Modem Switch"},
+	{ "MIX Codec0", "Codec0", "In Codec0 Switch"},
+	{ "MIX Codec0", "Codec1", "In Codec1 Switch"},
+	{ "MIX Codec0", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Codec0", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Codec0", "Tone", "In Tone Switch"},
+	{ "MIX Codec0", "Voip", "In Voip Switch"},
+	{ "MIX Codec0", "PCM0", "In PCM0 Switch"},
+	{ "MIX Codec0", "PCM1", "In PCM1 Switch"},
+	{ "MIX Codec0", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Codec0", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Codec0", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Codec0", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Codec0", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Codec1", "Modem", "In Modem Switch"},
+	{ "MIX Codec1", "Codec0", "In Codec0 Switch"},
+	{ "MIX Codec1", "Codec1", "In Codec1 Switch"},
+	{ "MIX Codec1", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Codec1", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Codec1", "Tone", "In Tone Switch"},
+	{ "MIX Codec1", "Voip", "In Voip Switch"},
+	{ "MIX Codec1", "PCM0", "In PCM0 Switch"},
+	{ "MIX Codec1", "PCM1", "In PCM1 Switch"},
+	{ "MIX Codec1", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Codec1", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Codec1", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Codec1", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Codec1", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Sprot L0", "Modem", "In Modem Switch"},
+	{ "MIX Sprot L0", "Codec0", "In Codec0 Switch"},
+	{ "MIX Sprot L0", "Codec1", "In Codec1 Switch"},
+	{ "MIX Sprot L0", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Sprot L0", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Sprot L0", "Tone", "In Tone Switch"},
+	{ "MIX Sprot L0", "Voip", "In Voip Switch"},
+	{ "MIX Sprot L0", "PCM0", "In PCM0 Switch"},
+	{ "MIX Sprot L0", "PCM1", "In PCM1 Switch"},
+	{ "MIX Sprot L0", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Sprot L0", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Sprot L0", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Sprot L0", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Sprot L0", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Media L1", "Modem", "In Modem Switch"},
+	{ "MIX Media L1", "Codec0", "In Codec0 Switch"},
+	{ "MIX Media L1", "Codec1", "In Codec1 Switch"},
+	{ "MIX Media L1", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Media L1", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Media L1", "Tone", "In Tone Switch"},
+	{ "MIX Media L1", "Voip", "In Voip Switch"},
+	{ "MIX Media L1", "PCM0", "In PCM0 Switch"},
+	{ "MIX Media L1", "PCM1", "In PCM1 Switch"},
+	{ "MIX Media L1", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Media L1", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Media L1", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Media L1", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Media L1", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Media L2", "Modem", "In Modem Switch"},
+	{ "MIX Media L2", "Codec0", "In Codec0 Switch"},
+	{ "MIX Media L2", "Codec1", "In Codec1 Switch"},
+	{ "MIX Media L2", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Media L2", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Media L2", "Tone", "In Tone Switch"},
+	{ "MIX Media L2", "Voip", "In Voip Switch"},
+	{ "MIX Media L2", "PCM0", "In PCM0 Switch"},
+	{ "MIX Media L2", "PCM1", "In PCM1 Switch"},
+	{ "MIX Media L2", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Media L2", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Media L2", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Media L2", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Media L2", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Speech Rx", "Modem", "In Modem Switch"},
+	{ "MIX Speech Rx", "Codec0", "In Codec0 Switch"},
+	{ "MIX Speech Rx", "Codec1", "In Codec1 Switch"},
+	{ "MIX Speech Rx", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Speech Rx", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Speech Rx", "Tone", "In Tone Switch"},
+	{ "MIX Speech Rx", "Voip", "In Voip Switch"},
+	{ "MIX Speech Rx", "PCM0", "In PCM0 Switch"},
+	{ "MIX Speech Rx", "PCM1", "In PCM1 Switch"},
+	{ "MIX Speech Rx", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Speech Rx", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Speech Rx", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Speech Rx", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Speech Rx", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Speech Tx", "Modem", "In Modem Switch"},
+	{ "MIX Speech Tx", "Codec0", "In Codec0 Switch"},
+	{ "MIX Speech Tx", "Codec1", "In Codec1 Switch"},
+	{ "MIX Speech Tx", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Speech Tx", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Speech Tx", "Tone", "In Tone Switch"},
+	{ "MIX Speech Tx", "Voip", "In Voip Switch"},
+	{ "MIX Speech Tx", "PCM0", "In PCM0 Switch"},
+	{ "MIX Speech Tx", "PCM1", "In PCM1 Switch"},
+	{ "MIX Speech Tx", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Speech Tx", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Speech Tx", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Speech Tx", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Speech Tx", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Voip", "Modem", "In Modem Switch"},
+	{ "MIX Voip", "Codec0", "In Codec0 Switch"},
+	{ "MIX Voip", "Codec1", "In Codec1 Switch"},
+	{ "MIX Voip", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Voip", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Voip", "Tone", "In Tone Switch"},
+	{ "MIX Voip", "Voip", "In Voip Switch"},
+	{ "MIX Voip", "PCM0", "In PCM0 Switch"},
+	{ "MIX Voip", "PCM1", "In PCM1 Switch"},
+	{ "MIX Voip", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Voip", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Voip", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Voip", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Voip", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX PCM0", "Modem", "In Modem Switch"},
+	{ "MIX PCM0", "Codec0", "In Codec0 Switch"},
+	{ "MIX PCM0", "Codec1", "In Codec1 Switch"},
+	{ "MIX PCM0", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX PCM0", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX PCM0", "Tone", "In Tone Switch"},
+	{ "MIX PCM0", "Voip", "In Voip Switch"},
+	{ "MIX PCM0", "PCM0", "In PCM0 Switch"},
+	{ "MIX PCM0", "PCM1", "In PCM1 Switch"},
+	{ "MIX PCM0", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX PCM0", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX PCM0", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX PCM0", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX PCM0", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX PCM1", "Modem", "In Modem Switch"},
+	{ "MIX PCM1", "Codec0", "In Codec0 Switch"},
+	{ "MIX PCM1", "Codec1", "In Codec1 Switch"},
+	{ "MIX PCM1", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX PCM1", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX PCM1", "Tone", "In Tone Switch"},
+	{ "MIX PCM1", "Voip", "In Voip Switch"},
+	{ "MIX PCM1", "PCM0", "In PCM0 Switch"},
+	{ "MIX PCM1", "PCM1", "In PCM1 Switch"},
+	{ "MIX PCM1", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX PCM1", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX PCM1", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX PCM1", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX PCM1", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX PCM2", "Modem", "In Modem Switch"},
+	{ "MIX PCM2", "Codec0", "In Codec0 Switch"},
+	{ "MIX PCM2", "Codec1", "In Codec1 Switch"},
+	{ "MIX PCM2", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX PCM2", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX PCM2", "Tone", "In Tone Switch"},
+	{ "MIX PCM2", "Voip", "In Voip Switch"},
+	{ "MIX PCM2", "PCM0", "In PCM0 Switch"},
+	{ "MIX PCM2", "PCM1", "In PCM1 Switch"},
+	{ "MIX PCM2", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX PCM2", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX PCM2", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX PCM2", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX PCM2", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Aware", "Modem", "In Modem Switch"},
+	{ "MIX Aware", "Codec0", "In Codec0 Switch"},
+	{ "MIX Aware", "Codec1", "In Codec1 Switch"},
+	{ "MIX Aware", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Aware", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Aware", "Tone", "In Tone Switch"},
+	{ "MIX Aware", "Voip", "In Voip Switch"},
+	{ "MIX Aware", "PCM0", "In PCM0 Switch"},
+	{ "MIX Aware", "PCM1", "In PCM1 Switch"},
+	{ "MIX Aware", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Aware", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Aware", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Aware", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Aware", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX VAD", "Modem", "In Modem Switch"},
+	{ "MIX VAD", "Codec0", "In Codec0 Switch"},
+	{ "MIX VAD", "Codec1", "In Codec1 Switch"},
+	{ "MIX VAD", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX VAD", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX VAD", "Tone", "In Tone Switch"},
+	{ "MIX VAD", "Voip", "In Voip Switch"},
+	{ "MIX VAD", "PCM0", "In PCM0 Switch"},
+	{ "MIX VAD", "PCM1", "In PCM1 Switch"},
+	{ "MIX VAD", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX VAD", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX VAD", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX VAD", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX VAD", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX FM", "Modem", "In Modem Switch"},
+	{ "MIX FM", "Codec0", "In Codec0 Switch"},
+	{ "MIX FM", "Codec1", "In Codec1 Switch"},
+	{ "MIX FM", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX FM", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX FM", "Tone", "In Tone Switch"},
+	{ "MIX FM", "Voip", "In Voip Switch"},
+	{ "MIX FM", "PCM0", "In PCM0 Switch"},
+	{ "MIX FM", "PCM1", "In PCM1 Switch"},
+	{ "MIX FM", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX FM", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX FM", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX FM", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX FM", "Sidetone", "Mix Speech Tx Switch"},
+
+	/* now connect the mixers to output switches */
+	{ "Mix Modem Switch", "Switch", "MIX Modem"},
+	{ "Out Modem Switch", "Switch", "Mix Modem Switch"},
+	{ "Mix Codec0 Switch", "Switch", "MIX Codec0"},
+	{ "Out Codec0 Switch", "Switch", "Mix Codec0 Switch"},
+	{ "Mix Codec1 Switch", "Switch", "MIX Codec1"},
+	{ "Out Codec1 Switch", "Switch", "Mix Codec1 Switch"},
+	{ "Mix Speech Tx Switch", "Switch", "MIX Speech Tx"},
+	{ "Out Speech Tx Switch", "Switch", "Mix Speech Tx Switch"},
+	{ "Mix Speech Rx Switch", "Switch", "MIX Speech Rx"},
+	{ "Out Speech Rx Switch", "Switch", "Mix Speech Rx Switch"},
+	{ "Mix Voip Switch", "Switch", "MIX Voip"},
+	{ "Out Voip Switch", "Switch", "Mix Voip Switch"},
+	{ "Mix Aware Switch", "Switch", "MIX Aware"},
+	{ "Out Aware Switch", "Switch", "Mix Aware Switch"},
+	{ "Mix VAD Switch", "Switch", "MIX VAD"},
+	{ "Out VAD Switch", "Switch", "Mix VAD Switch"},
+	{ "Mix FM Switch", "Switch", "MIX FM"},
+	{ "Out FM Switch", "Switch", "Mix FM Switch"},
+	{ "Mix PCM1 Switch", "Switch", "MIX PCM1"},
+	{ "Out PCM1 Switch", "Switch", "Mix PCM1 Switch"},
+	{ "Mix PCM2 Switch", "Switch", "MIX PCM2"},
+	{ "Out PCM2 Switch", "Switch", "Mix PCM2 Switch"},
+
+	/* the loops
+	 * media loops dont have i/p o/p switches, just mixer enable
+	 */
+	{ "Mix Sprot L0 Switch", "Switch", "MIX Sprot L0"},
+	{ "Mix Media L1 Switch", "Switch", "MIX Media L1"},
+	{ "Mix Media L2 Switch", "Switch", "MIX Media L2"},
+	/* so no need as mixer switches are
+	 * inputs to all mixers
+	 * need to connect speech loops here
+	 */
+	{ "In Speech Rx Switch", "Switch", "Out Speech Rx Switch"},
+	{ "In Speech Tx Switch", "Switch", "Out Speech Tx Switch"},
+	/* last one, connect the output switches to ip's
+	 * and op's. Also connect the AIFs
+	 */
+	{ "In Modem Switch", "Switch", "Modem IN"},
+	{ "In Codec0 Switch", "Switch", "Codec IN0"},
+	{ "In Codec1 Switch", "Switch", "Codec IN1"},
+	{ "In Tone Switch", "Switch", "Tone IN"},
+	{ "In FM Switch", "Switch", "FM IN"},
+
+	{ "Modem OUT", NULL, "Out Modem Switch"},
+	{ "Codec OUT0", NULL, "Out Codec0 Switch"},
+	{ "Codec OUT1", NULL, "Out Codec1 Switch"},
+	{ "FM OUT", NULL, "Out FM Switch"},
+
+	{ "In Voip Switch", "Switch", "Voip IN"},
+
+	{ "Voip OUT", NULL, "Out Voip Switch"},
+	{ "PCM1 OUT", NULL, "Out PCM1 Switch"},
+	{ "Aware OUT", NULL, "Out Aware Switch"},
+	{ "VAD OUT", NULL, "Out VAD Switch"},
+};
+
+int sst_byte_control_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("in %s\n", __func__);
+	memcpy(ucontrol->value.bytes.data, sst->byte_stream, SST_MAX_BIN_BYTES);
+	print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+			     (const void *)sst->byte_stream, 32);
+	return 0;
+}
+
+static int sst_check_binary_input(char *stream)
+{
+	struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *)stream;
+
+	if (bytes->len == 0 || bytes->len > 1000) {
+		pr_err("length out of bounds %d\n", bytes->len);
+		return -EINVAL;
+	}
+	if (bytes->type == 0 || bytes->type > SND_SST_BYTES_GET) {
+		pr_err("type out of bounds: %d\n", bytes->type);
+		return -EINVAL;
+	}
+	if (bytes->block > 1) {
+		pr_err("block invalid %d\n", bytes->block);
+		return -EINVAL;
+	}
+	if (bytes->task_id == SST_TASK_ID_NONE || bytes->task_id > SST_TASK_ID_MAX) {
+		pr_err("taskid invalid %d\n", bytes->task_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int sst_byte_control_set(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	pr_debug("in %s\n", __func__);
+	mutex_lock(&sst->lock);
+	memcpy(sst->byte_stream, ucontrol->value.bytes.data, SST_MAX_BIN_BYTES);
+	if (0 != sst_check_binary_input(sst->byte_stream)) {
+		mutex_unlock(&sst->lock);
+		return -EINVAL;
+	}
+	print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+			     (const void *)sst->byte_stream, 32);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM, sst->byte_stream);
+	mutex_unlock(&sst->lock);
+
+	return ret;
+}
+
+static int sst_pipe_id_control_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	ucontrol->value.integer.value[0] = sst->pipe_id;
+
+	return ret;
+}
+
+static int sst_pipe_id_control_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	sst->pipe_id = ucontrol->value.integer.value[0];
+	pr_debug("%s: pipe_id %d", __func__, sst->pipe_id);
+
+	return ret;
+}
+
+/* dB range for mrfld compress volume is -144dB to +36dB.
+ * Gain library expects user input in terms of 0.1dB, for example,
+ * 60 (in decimal) represents 6dB.
+ * MW will pass 2's complement value for negative dB values.
+ */
+static int sst_compr_vol_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+	u16 gain;
+	unsigned int gain_offset, ret;
+
+	sst_create_compr_vol_ipc(sst->byte_stream, SND_SST_BYTES_GET, amc);
+	mutex_lock(&sst->lock);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+						sst->byte_stream);
+	mutex_unlock(&sst->lock);
+	if (ret) {
+		pr_err("failed to get compress vol from fw: %d\n", ret);
+		return ret;
+	}
+	gain_offset = sizeof(struct snd_sst_bytes_v2) +
+				sizeof(struct ipc_dsp_hdr);
+
+	/* Get params format for vol ctrl lib, size 6 bytes :
+	 * u16 left_gain, u16 right_gain, u16 ramp
+	 */
+	memcpy(&gain,
+		(unsigned int *)(sst->byte_stream + gain_offset),
+		sizeof(u16));
+	pr_debug("%s: cell_gain = %d\n", __func__, gain);
+	amc->value = gain;
+	ucontrol->value.integer.value[0] = gain;
+	return 0;
+}
+
+static int sst_compr_vol_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+	int ret = 0;
+	unsigned int old_val;
+
+	pr_debug("%s: cell_gain = %ld\n", __func__,\
+				ucontrol->value.integer.value[0]);
+	old_val = amc->value;
+	amc->value = ucontrol->value.integer.value[0];
+	sst_create_compr_vol_ipc(sst->byte_stream, SND_SST_BYTES_SET,
+					amc);
+
+	mutex_lock(&sst->lock);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+						sst->byte_stream);
+	mutex_unlock(&sst->lock);
+	if (ret) {
+		pr_err("failed to set compress vol in fw: %d\n", ret);
+		amc->value = old_val;
+		return ret;
+	}
+	return 0;
+}
+
+int sst_vtsv_enroll_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	sst->vtsv_enroll = ucontrol->value.integer.value[0];
+	mutex_lock(&sst->lock);
+	if (sst->vtsv_enroll)
+		ret = sst_dsp->ops->set_generic_params(SST_SET_VTSV_INFO,
+					(void *)&sst->vtsv_enroll);
+	mutex_unlock(&sst->lock);
+	return ret;
+}
+
+int sst_vtsv_enroll_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	ucontrol->value.integer.value[0] = sst->vtsv_enroll;
+	return 0;
+}
+
+/* This value corresponds to two's complement value of -10 or -1dB */
+#define SST_COMPR_VOL_MAX_INTEG_GAIN 0xFFF6
+#define SST_COMPR_VOL_MUTE 0xFA60 /* 2's complement of -1440 or -144dB*/
+
+
+static const struct snd_kcontrol_new sst_mrfld_controls[] = {
+	SND_SOC_BYTES_EXT("SST Byte control", SST_MAX_BIN_BYTES,
+		       sst_byte_control_get, sst_byte_control_set),
+	SOC_SINGLE_EXT("SST Pipe_id control", SST_PIPE_CONTROL, 0, 0x9A, 0,
+		sst_pipe_id_control_get, sst_pipe_id_control_set),
+	SST_ALGO_KCONTROL_INT("Compress Volume", SST_COMPRESS_VOL,
+		0, SST_COMPR_VOL_MAX_INTEG_GAIN, 0,
+		sst_compr_vol_get, sst_compr_vol_set,
+		SST_ALGO_VOLUME_CONTROL, PIPE_MEDIA0_IN, 0,
+		SST_COMPR_VOL_MUTE),
+	SOC_SINGLE_BOOL_EXT("SST VTSV Enroll", 0, sst_vtsv_enroll_get,
+		       sst_vtsv_enroll_set),
+};
+
+static DEVICE_ULONG_ATTR(low_latency_threshold, 0644, ll_threshold);
+static DEVICE_ULONG_ATTR(deep_buffer_threshold, 0644, db_threshold);
+
+static struct attribute *device_sysfs_attrs[] = {
+	&dev_attr_low_latency_threshold.attr.attr,
+	&dev_attr_deep_buffer_threshold.attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = device_sysfs_attrs,
+};
+
+int sst_dsp_init(struct snd_soc_platform *platform)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int error = 0;
+
+	sst->byte_stream = devm_kzalloc(platform->dev,
+			SST_MAX_BIN_BYTES, GFP_KERNEL);
+	if (sst->byte_stream == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	sst->widget = devm_kzalloc(platform->dev,
+				   SST_NUM_WIDGETS * sizeof(*sst->widget),
+				   GFP_KERNEL);
+	if (sst->widget == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	sst->vtsv_enroll = false;
+	/* Assign the pointer variables */
+	sst->ll_db.low_latency = &ll_threshold;
+	sst->ll_db.deep_buffer = &db_threshold;
+
+	pr_debug("Default ll thres %lu db thres %lu\n", ll_threshold, db_threshold);
+
+	snd_soc_dapm_new_controls(&platform->dapm, sst_dapm_widgets,
+			ARRAY_SIZE(sst_dapm_widgets));
+	snd_soc_dapm_add_routes(&platform->dapm, intercon,
+			ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(&platform->dapm);
+	snd_soc_add_platform_controls(platform, sst_mrfld_controls,
+			ARRAY_SIZE(sst_mrfld_controls));
+
+	error = sysfs_create_group(&platform->dev->kobj, &attr_group);
+	if (error)
+		pr_err("failed to create sysfs files  %d\n", error);
+
+	return error;
+}
diff --git a/sound/soc/intel/platform-libs/controls_v2.h b/sound/soc/intel/platform-libs/controls_v2.h
new file mode 100644
index 0000000..86ea859
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v2.h
@@ -0,0 +1,753 @@
+/*
+ *  controls_v2.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Ramesh Babu <ramesh.babu.koul@intel.com>
+ *  Author: Omair M Abdullah <omair.m.abdullah@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SST_CONTROLS_V2_H__
+#define __SST_CONTROLS_V2_H__
+
+/*
+ * This section defines the map for the mixer widgets.
+ *
+ * Each mixer will be represented by single value and that value will have each
+ * bit corresponding to one input
+ *
+ * Each out_id will correspond to one mixer and one path. Each input will be
+ * represented by single bit in the register.
+ */
+
+/* mixer register ids here */
+#define SST_MIX(x)		(x)
+
+#define SST_MIX_MODEM		SST_MIX(0)
+#define SST_MIX_BT		SST_MIX(1)
+#define SST_MIX_CODEC0		SST_MIX(2)
+#define SST_MIX_CODEC1		SST_MIX(3)
+#define SST_MIX_LOOP0		SST_MIX(4)
+#define SST_MIX_LOOP1		SST_MIX(5)
+#define SST_MIX_LOOP2		SST_MIX(6)
+#define SST_MIX_PROBE		SST_MIX(7)
+#define SST_MIX_HF_SNS		SST_MIX(8)
+#define SST_MIX_HF		SST_MIX(9)
+#define SST_MIX_SPEECH		SST_MIX(10)
+#define SST_MIX_RXSPEECH	SST_MIX(11)
+#define SST_MIX_VOIP		SST_MIX(12)
+#define SST_MIX_PCM0		SST_MIX(13)
+#define SST_MIX_PCM1		SST_MIX(14)
+#define SST_MIX_PCM2		SST_MIX(15)
+#define SST_MIX_AWARE		SST_MIX(16)
+#define SST_MIX_VAD		SST_MIX(17)
+#define SST_MIX_FM		SST_MIX(18)
+
+#define SST_MIX_MEDIA0		SST_MIX(19)
+#define SST_MIX_MEDIA1		SST_MIX(20)
+
+#define SST_NUM_MIX		(SST_MIX_MEDIA1 + 1)
+
+#define SST_MIX_SWITCH		(SST_NUM_MIX + 1)
+#define SST_OUT_SWITCH		(SST_NUM_MIX + 2)
+#define SST_IN_SWITCH		(SST_NUM_MIX + 3)
+#define SST_MUX_REG		(SST_NUM_MIX + 4)
+#define SST_REG_LAST		(SST_MUX_REG)
+
+/* last entry defines array size */
+#define SST_NUM_WIDGETS		(SST_REG_LAST + 1)
+
+#define SST_BT_FM_MUX_SHIFT	0
+#define SST_VOICE_MODE_SHIFT	1
+#define SST_BT_MODE_SHIFT	2
+
+/* in each mixer register we will define one bit for each input */
+#define SST_MIX_IP(x)		(x)
+
+#define SST_IP_MODEM		SST_MIX_IP(0)
+#define SST_IP_BT		SST_MIX_IP(1)
+#define SST_IP_CODEC0		SST_MIX_IP(2)
+#define SST_IP_CODEC1		SST_MIX_IP(3)
+#define SST_IP_LOOP0		SST_MIX_IP(4)
+#define SST_IP_LOOP1		SST_MIX_IP(5)
+#define SST_IP_LOOP2		SST_MIX_IP(6)
+#define SST_IP_PROBE		SST_MIX_IP(7)
+#define SST_IP_SIDETONE		SST_MIX_IP(8)
+#define SST_IP_TXSPEECH		SST_MIX_IP(9)
+#define SST_IP_SPEECH		SST_MIX_IP(10)
+#define SST_IP_TONE		SST_MIX_IP(11)
+#define SST_IP_VOIP		SST_MIX_IP(12)
+#define SST_IP_PCM0		SST_MIX_IP(13)
+#define SST_IP_PCM1		SST_MIX_IP(14)
+#define SST_IP_LOW_PCM0		SST_MIX_IP(15)
+#define SST_IP_FM		SST_MIX_IP(16)
+#define SST_IP_MEDIA0		SST_MIX_IP(17)
+#define SST_IP_MEDIA1		SST_MIX_IP(18)
+#define SST_IP_MEDIA2		SST_MIX_IP(19)
+#define SST_IP_MEDIA3		SST_MIX_IP(20)
+
+#define SST_IP_LAST		SST_IP_MEDIA3
+
+#define SST_SWM_INPUT_COUNT	(SST_IP_LAST + 1)
+#define SST_CMD_SWM_MAX_INPUTS	6
+
+#define SST_PATH_ID_SHIFT	8
+#define SST_DEFAULT_LOCATION_ID	0xFFFF
+#define SST_DEFAULT_CELL_NBR	0xFF
+#define SST_DEFAULT_MODULE_ID	0xFFFF
+
+/*
+ * Audio DSP Path Ids. Specified by the audio DSP FW
+ */
+enum sst_path_index {
+	SST_PATH_INDEX_MODEM_OUT                = (0x00 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_BT_OUT                   = (0x01 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_OUT0               = (0x02 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_OUT1               = (0x03 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SPROT_LOOP_OUT           = (0x04 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP1_OUT          = (0x05 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP2_OUT          = (0x06 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE_OUT                = (0x07 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_HF_SNS_OUT               = (0x08 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_UPLINK_REF2	= (0x08 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_HF_OUT                   = (0x09 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_UPLINK_REF1	= (0x09 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SPEECH_OUT               = (0x0A << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_UPLINK		= (0x0A << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_RX_SPEECH_OUT            = (0x0B << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_DOWNLINK		= (0x0B << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_VOIP_OUT                 = (0x0C << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM0_OUT                 = (0x0D << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM1_OUT                 = (0x0E << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM2_OUT                 = (0x0F << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_AWARE_OUT                = (0x10 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VAD_OUT                  = (0x11 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_MEDIA0_OUT               = (0x12 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA1_OUT               = (0x13 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_FM_OUT                   = (0x14 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PROBE1_PIPE_OUT		= (0x15 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE2_PIPE_OUT		= (0x16 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE3_PIPE_OUT		= (0x17 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE4_PIPE_OUT		= (0x18 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE5_PIPE_OUT		= (0x19 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE6_PIPE_OUT		= (0x1A << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE7_PIPE_OUT		= (0x1B << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE8_PIPE_OUT		= (0x1C << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SIDETONE_OUT		= (0x1D << SST_PATH_ID_SHIFT),
+
+	/* Start of input paths */
+	SST_PATH_INDEX_MODEM_IN                 = (0x80 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_BT_IN                    = (0x81 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_IN0                = (0x82 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_IN1                = (0x83 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SPROT_LOOP_IN            = (0x84 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP1_IN           = (0x85 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP2_IN           = (0x86 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PROBE_IN                 = (0x87 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_SIDETONE_IN              = (0x88 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_TX_SPEECH_IN             = (0x89 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_SPEECH_IN                = (0x8A << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_TONE_IN                  = (0x8B << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOIP_IN                  = (0x8C << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PCM0_IN                  = (0x8D << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM1_IN                  = (0x8E << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_MEDIA0_IN                = (0x8F << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA1_IN                = (0x90 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA2_IN                = (0x91 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_FM_IN                    = (0x92 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PROBE1_PIPE_IN           = (0x93 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE2_PIPE_IN           = (0x94 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE3_PIPE_IN           = (0x95 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE4_PIPE_IN           = (0x96 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE5_PIPE_IN           = (0x97 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE6_PIPE_IN           = (0x98 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE7_PIPE_IN           = (0x99 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE8_PIPE_IN           = (0x9A << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_MEDIA3_IN		= (0x9C << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_LOW_PCM0_IN		= (0x9D << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_RESERVED                 = (0xFF << SST_PATH_ID_SHIFT),
+};
+
+/*
+ * switch matrix input path IDs
+ */
+enum sst_swm_inputs {
+	SST_SWM_IN_MODEM	= (SST_PATH_INDEX_MODEM_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_BT		= (SST_PATH_INDEX_BT_IN		  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_CODEC0	= (SST_PATH_INDEX_CODEC_IN0	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_CODEC1	= (SST_PATH_INDEX_CODEC_IN1	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_SPROT_LOOP	= (SST_PATH_INDEX_SPROT_LOOP_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA_LOOP1	= (SST_PATH_INDEX_MEDIA_LOOP1_IN  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA_LOOP2	= (SST_PATH_INDEX_MEDIA_LOOP2_IN  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_PROBE	= (SST_PATH_INDEX_PROBE_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_SIDETONE	= (SST_PATH_INDEX_SIDETONE_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_TXSPEECH	= (SST_PATH_INDEX_TX_SPEECH_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_SPEECH	= (SST_PATH_INDEX_SPEECH_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_TONE		= (SST_PATH_INDEX_TONE_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_VOIP		= (SST_PATH_INDEX_VOIP_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_PCM0		= (SST_PATH_INDEX_PCM0_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_PCM1		= (SST_PATH_INDEX_PCM1_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA0	= (SST_PATH_INDEX_MEDIA0_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_MEDIA1	= (SST_PATH_INDEX_MEDIA1_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_MEDIA2	= (SST_PATH_INDEX_MEDIA2_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_FM		= (SST_PATH_INDEX_FM_IN		  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA3	= (SST_PATH_INDEX_MEDIA3_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_LOW_PCM0	= (SST_PATH_INDEX_LOW_PCM0_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_END		= (SST_PATH_INDEX_RESERVED	  | SST_DEFAULT_CELL_NBR)
+};
+
+/*
+ * switch matrix output path IDs
+ */
+enum sst_swm_outputs {
+	SST_SWM_OUT_MODEM	= (SST_PATH_INDEX_MODEM_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_BT		= (SST_PATH_INDEX_BT_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_CODEC0	= (SST_PATH_INDEX_CODEC_OUT0	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_CODEC1	= (SST_PATH_INDEX_CODEC_OUT1	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_SPROT_LOOP	= (SST_PATH_INDEX_SPROT_LOOP_OUT  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_MEDIA_LOOP1	= (SST_PATH_INDEX_MEDIA_LOOP1_OUT | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_MEDIA_LOOP2	= (SST_PATH_INDEX_MEDIA_LOOP2_OUT | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PROBE	= (SST_PATH_INDEX_PROBE_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_HF_SNS	= (SST_PATH_INDEX_HF_SNS_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_HF		= (SST_PATH_INDEX_HF_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_SPEECH	= (SST_PATH_INDEX_SPEECH_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_RXSPEECH	= (SST_PATH_INDEX_RX_SPEECH_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_VOIP	= (SST_PATH_INDEX_VOIP_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PCM0	= (SST_PATH_INDEX_PCM0_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PCM1	= (SST_PATH_INDEX_PCM1_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PCM2	= (SST_PATH_INDEX_PCM2_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_AWARE	= (SST_PATH_INDEX_AWARE_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_VAD		= (SST_PATH_INDEX_VAD_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_MEDIA0	= (SST_PATH_INDEX_MEDIA0_OUT	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_OUT_MEDIA1	= (SST_PATH_INDEX_MEDIA1_OUT	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_OUT_FM		= (SST_PATH_INDEX_FM_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_END		= (SST_PATH_INDEX_RESERVED	  | SST_DEFAULT_CELL_NBR),
+};
+
+enum sst_ipc_msg {
+	SST_IPC_IA_CMD = 1,
+	SST_IPC_IA_SET_PARAMS,
+	SST_IPC_IA_GET_PARAMS,
+};
+
+enum sst_cmd_type {
+	SST_CMD_BYTES_SET = 1,
+	SST_CMD_BYTES_GET = 2,
+};
+
+enum sst_task {
+	SST_TASK_SBA = 1,
+	SST_TASK_FBA_UL,
+	SST_TASK_MMX,
+	SST_TASK_AWARE,
+	SST_TASK_FBA_DL,
+};
+
+enum sst_type {
+	SST_TYPE_CMD = 1,
+	SST_TYPE_PARAMS,
+};
+
+enum sst_flag {
+	SST_FLAG_BLOCKED = 1,
+	SST_FLAG_NONBLOCK,
+};
+
+/*
+ * Enumeration for indexing the gain cells in VB_SET_GAIN DSP command
+ */
+enum sst_gain_index {
+	/* GAIN IDs for SB task start here */
+	SST_GAIN_INDEX_MODEM_OUT,
+	SST_GAIN_INDEX_MODEM_IN,
+	SST_GAIN_INDEX_BT_OUT,
+	SST_GAIN_INDEX_BT_IN,
+	SST_GAIN_INDEX_FM_OUT,
+
+	SST_GAIN_INDEX_FM_IN,
+	SST_GAIN_INDEX_CODEC_OUT0,
+	SST_GAIN_INDEX_CODEC_OUT1,
+	SST_GAIN_INDEX_CODEC_IN0,
+	SST_GAIN_INDEX_CODEC_IN1,
+
+	SST_GAIN_INDEX_SPROT_LOOP_OUT,
+	SST_GAIN_INDEX_MEDIA_LOOP1_OUT,
+	SST_GAIN_INDEX_MEDIA_LOOP2_OUT,
+	SST_GAIN_INDEX_RX_SPEECH_OUT,
+	SST_GAIN_INDEX_TX_SPEECH_IN,
+
+	SST_GAIN_INDEX_SPEECH_OUT,
+	SST_GAIN_INDEX_SPEECH_IN,
+	SST_GAIN_INDEX_HF_OUT,
+	SST_GAIN_INDEX_HF_SNS_OUT,
+	SST_GAIN_INDEX_TONE_IN,
+
+	SST_GAIN_INDEX_SIDETONE_IN,
+	SST_GAIN_INDEX_PROBE_OUT,
+	SST_GAIN_INDEX_PROBE_IN,
+	SST_GAIN_INDEX_PCM0_IN_LEFT,
+	SST_GAIN_INDEX_PCM0_IN_RIGHT,
+
+	SST_GAIN_INDEX_PCM1_OUT_LEFT,
+	SST_GAIN_INDEX_PCM1_OUT_RIGHT,
+	SST_GAIN_INDEX_PCM1_IN_LEFT,
+	SST_GAIN_INDEX_PCM1_IN_RIGHT,
+	SST_GAIN_INDEX_PCM2_OUT_LEFT,
+
+	SST_GAIN_INDEX_PCM2_OUT_RIGHT,
+	SST_GAIN_INDEX_VOIP_OUT,
+	SST_GAIN_INDEX_VOIP_IN,
+	SST_GAIN_INDEX_AWARE_OUT,
+	SST_GAIN_INDEX_VAD_OUT,
+
+	/* Gain IDs for FBA task start here */
+	SST_GAIN_INDEX_VOICE_UL,
+
+	/* Gain IDs for MMX task start here */
+	SST_GAIN_INDEX_MEDIA0_IN_LEFT,
+	SST_GAIN_INDEX_MEDIA0_IN_RIGHT,
+	SST_GAIN_INDEX_MEDIA1_IN_LEFT,
+	SST_GAIN_INDEX_MEDIA1_IN_RIGHT,
+
+	SST_GAIN_INDEX_MEDIA2_IN_LEFT,
+	SST_GAIN_INDEX_MEDIA2_IN_RIGHT,
+
+	SST_GAIN_INDEX_GAIN_END
+};
+
+/*
+ * Audio DSP module IDs specified by FW spec
+ * TODO: Update with all modules
+ */
+enum sst_module_id {
+	SST_MODULE_ID_PCM		  = 0x0001,
+	SST_MODULE_ID_MP3		  = 0x0002,
+	SST_MODULE_ID_MP24		  = 0x0003,
+	SST_MODULE_ID_AAC		  = 0x0004,
+	SST_MODULE_ID_AACP		  = 0x0005,
+	SST_MODULE_ID_EAACP		  = 0x0006,
+	SST_MODULE_ID_WMA9		  = 0x0007,
+	SST_MODULE_ID_WMA10		  = 0x0008,
+	SST_MODULE_ID_WMA10P		  = 0x0009,
+	SST_MODULE_ID_RA		  = 0x000A,
+	SST_MODULE_ID_DDAC3		  = 0x000B,
+	SST_MODULE_ID_TRUE_HD		  = 0x000C,
+	SST_MODULE_ID_HD_PLUS		  = 0x000D,
+
+	SST_MODULE_ID_SRC		  = 0x0064,
+	SST_MODULE_ID_DOWNMIX		  = 0x0066,
+	SST_MODULE_ID_GAIN_CELL		  = 0x0067,
+	SST_MODULE_ID_SPROT		  = 0x006D,
+	SST_MODULE_ID_BASS_BOOST	  = 0x006E,
+	SST_MODULE_ID_STEREO_WDNG	  = 0x006F,
+	SST_MODULE_ID_AV_REMOVAL	  = 0x0070,
+	SST_MODULE_ID_MIC_EQ		  = 0x0071,
+	SST_MODULE_ID_SPL		  = 0x0072,
+	SST_MODULE_ID_ALGO_VTSV           = 0x0073,
+	SST_MODULE_ID_NR		  = 0x0076,
+	SST_MODULE_ID_BWX		  = 0x0077,
+	SST_MODULE_ID_DRP		  = 0x0078,
+	SST_MODULE_ID_MDRP		  = 0x0079,
+
+	SST_MODULE_ID_ANA		  = 0x007A,
+	SST_MODULE_ID_AEC		  = 0x007B,
+	SST_MODULE_ID_NR_SNS		  = 0x007C,
+	SST_MODULE_ID_SER		  = 0x007D,
+	SST_MODULE_ID_AGC		  = 0x007E,
+
+	SST_MODULE_ID_CNI		  = 0x007F,
+	SST_MODULE_ID_CONTEXT_ALGO_AWARE  = 0x0080,
+	SST_MODULE_ID_FIR_24		  = 0x0081,
+	SST_MODULE_ID_IIR_24		  = 0x0082,
+
+	SST_MODULE_ID_ASRC		  = 0x0083,
+	SST_MODULE_ID_TONE_GEN		  = 0x0084,
+	SST_MODULE_ID_BMF		  = 0x0086,
+	SST_MODULE_ID_EDL		  = 0x0087,
+	SST_MODULE_ID_GLC		  = 0x0088,
+
+	SST_MODULE_ID_FIR_16		  = 0x0089,
+	SST_MODULE_ID_IIR_16		  = 0x008A,
+	SST_MODULE_ID_DNR		  = 0x008B,
+
+	SST_MODULE_ID_VIRTUALIZER	  = 0x008C,
+	SST_MODULE_ID_VISUALIZATION	  = 0x008D,
+	SST_MODULE_ID_LOUDNESS_OPTIMIZER  = 0x008E,
+	SST_MODULE_ID_REVERBERATION	  = 0x008F,
+
+	SST_MODULE_ID_CNI_TX		  = 0x0090,
+	SST_MODULE_ID_REF_LINE		  = 0x0091,
+	SST_MODULE_ID_VOLUME		  = 0x0092,
+	SST_MODULE_ID_FILT_DCR		  = 0x0094,
+	SST_MODULE_ID_SLV		  = 0x009A,
+	SST_MODULE_ID_NLF		  = 0x009B,
+	SST_MODULE_ID_TNR		  = 0x009C,
+	SST_MODULE_ID_WNR		  = 0x009D,
+
+	SST_MODULE_ID_LOG		  = 0xFF00,
+
+	SST_MODULE_ID_TASK		  = 0xFFFF,
+};
+
+enum sst_cmd {
+	SBA_IDLE		= 14,
+	SBA_VB_SET_SPEECH_PATH	= 26,
+	MMX_SET_GAIN		= 33,
+	SBA_VB_SET_GAIN		= 33,
+	FBA_VB_RX_CNI		= 35,
+	MMX_SET_GAIN_TIMECONST	= 36,
+	SBA_VB_SET_TIMECONST	= 36,
+	FBA_VB_ANA		= 37,
+	FBA_VB_SET_FIR		= 38,
+	FBA_VB_SET_IIR		= 39,
+	SBA_VB_START_TONE	= 41,
+	SBA_VB_STOP_TONE	= 42,
+	FBA_VB_AEC		= 47,
+	FBA_VB_NR_UL		= 48,
+	FBA_VB_AGC		= 49,
+	FBA_VB_WNR		= 52,
+	FBA_VB_SLV		= 53,
+	FBA_VB_NR_DL		= 55,
+	SBA_PROBE		= 66,
+	MMX_PROBE		= 66,
+	FBA_VB_SET_BIQUAD_D_C	= 69,
+	FBA_VB_DUAL_BAND_COMP	= 70,
+	FBA_VB_SNS		= 72,
+	FBA_VB_SER		= 78,
+	FBA_VB_TX_CNI		= 80,
+	SBA_VB_START		= 85,
+	FBA_VB_SET_REF_LINE	= 94,
+	FBA_VB_SET_DELAY_LINE	= 95,
+	FBA_VB_BWX		= 104,
+	FBA_VB_GMM		= 105,
+	FBA_VB_GLC		= 107,
+	FBA_VB_BMF		= 111,
+	FBA_VB_DNR		= 113,
+	MMX_SET_SWM		= 114,
+	SBA_SET_SWM		= 114,
+	SBA_SET_MDRP            = 116,
+	SBA_HW_SET_SSP		= 117,
+	SBA_SET_MEDIA_LOOP_MAP	= 118,
+	SBA_SET_MEDIA_PATH	= 119,
+	MMX_SET_MEDIA_PATH	= 119,
+	FBA_VB_TNR_UL		= 119,
+	FBA_VB_TNR_DL		= 121,
+	FBA_VB_NLF		= 125,
+	SBA_VB_LPRO		= 126,
+	FBA_VB_MDRP		= 127,
+	SBA_VB_SET_FIR          = 128,
+	SBA_VB_SET_IIR          = 129,
+	SBA_SET_SSP_SLOT_MAP	= 130,
+	AWARE_ENV_CLASS_PARAMS	= 130,
+	VAD_ENV_CLASS_PARAMS	= 2049,
+};
+
+enum sst_dsp_switch {
+	SST_SWITCH_OFF = 0,
+	SST_SWITCH_ON = 3,
+};
+
+enum sst_path_switch {
+	SST_PATH_OFF = 0,
+	SST_PATH_ON = 1,
+};
+
+enum sst_swm_state {
+	SST_SWM_OFF = 0,
+	SST_SWM_ON = 3,
+};
+
+#define SST_FILL_LOCATION_IDS(dst, cell_idx, pipe_id)		do {	\
+		dst.location_id.p.cell_nbr_idx = (cell_idx);		\
+		dst.location_id.p.path_id = (pipe_id);			\
+	} while (0)
+#define SST_FILL_LOCATION_ID(dst, loc_id)				(\
+	dst.location_id.f = (loc_id))
+#define SST_FILL_MODULE_ID(dst, mod_id)					(\
+	dst.module_id = (mod_id))
+
+#define SST_FILL_DESTINATION1(dst, id)				do {	\
+		SST_FILL_LOCATION_ID(dst, (id) & 0xFFFF);		\
+		SST_FILL_MODULE_ID(dst, ((id) & 0xFFFF0000) >> 16);	\
+	} while (0)
+#define SST_FILL_DESTINATION2(dst, loc_id, mod_id)		do {	\
+		SST_FILL_LOCATION_ID(dst, loc_id);			\
+		SST_FILL_MODULE_ID(dst, mod_id);			\
+	} while (0)
+#define SST_FILL_DESTINATION3(dst, cell_idx, path_id, mod_id)	do {	\
+		SST_FILL_LOCATION_IDS(dst, cell_idx, path_id);		\
+		SST_FILL_MODULE_ID(dst, mod_id);			\
+	} while (0)
+
+#define SST_FILL_DESTINATION(level, dst, ...)				\
+	SST_FILL_DESTINATION##level(dst, __VA_ARGS__)
+#define SST_FILL_DEFAULT_DESTINATION(dst)				\
+	SST_FILL_DESTINATION(2, dst, SST_DEFAULT_LOCATION_ID, SST_DEFAULT_MODULE_ID)
+
+struct sst_destination_id {
+	union sst_location_id {
+		struct {
+			u8 cell_nbr_idx;	/* module index */
+			u8 path_id;		/* pipe_id */
+		} __packed	p;		/* part */
+		u16		f;		/* full */
+	} __packed location_id;
+	u16	   module_id;
+} __packed;
+
+struct sst_dsp_header {
+	struct sst_destination_id dst;
+	u16 command_id;
+	u16 length;
+} __packed;
+
+/*
+ *
+ * Common Commands
+ *
+ */
+struct sst_cmd_generic {
+	struct sst_dsp_header header;
+} __packed;
+
+struct swm_input_ids {
+	struct sst_destination_id input_id;
+} __packed;
+
+struct sst_cmd_set_swm {
+	struct sst_dsp_header header;
+	struct sst_destination_id output_id;
+	u16    switch_state;
+	u16    nb_inputs;
+	struct swm_input_ids input[SST_CMD_SWM_MAX_INPUTS];
+} __packed;
+
+struct sst_cmd_set_media_path {
+	struct sst_dsp_header header;
+	u16    switch_state;
+} __packed;
+
+struct pcm_cfg {
+		u8 s_length:2;
+		u8 rate:3;
+		u8 format:3;
+} __packed;
+
+struct sst_cmd_set_speech_path {
+	struct sst_dsp_header header;
+	u16    switch_state;
+	struct {
+		u16 rsvd:8;
+		struct pcm_cfg cfg;
+	} config;
+} __packed;
+
+struct gain_cell {
+	struct sst_destination_id dest;
+	s16 cell_gain_left;
+	s16 cell_gain_right;
+	u16 gain_time_constant;
+} __packed;
+
+#define NUM_GAIN_CELLS 1
+struct sst_cmd_set_gain_dual {
+	struct sst_dsp_header header;
+	u16    gain_cell_num;
+	struct gain_cell cell_gains[NUM_GAIN_CELLS];
+} __packed;
+
+struct sst_cmd_set_params {
+	struct sst_destination_id dst;
+	u16 command_id;
+	char params[0];
+} __packed;
+
+/*
+ *
+ * Media (MMX) commands
+ *
+ */
+
+/*
+ *
+ * SBA commands
+ *
+ */
+struct sst_cmd_sba_vb_start {
+	struct sst_dsp_header header;
+} __packed;
+
+union sba_media_loop_params {
+	struct {
+		u16 rsvd:8;
+		struct pcm_cfg cfg;
+	} part;
+	u16 full;
+} __packed;
+
+struct sst_cmd_sba_set_media_loop_map {
+	struct	sst_dsp_header header;
+	u16	switch_state;
+	union	sba_media_loop_params param;
+	u16	map;
+} __packed;
+
+struct sst_cmd_tone_stop {
+	struct	sst_dsp_header header;
+	u16	switch_state;
+} __packed;
+
+enum sst_ssp_mode {
+	SSP_MODE_MASTER = 0,
+	SSP_MODE_SLAVE = 1,
+};
+
+enum sst_ssp_pcm_mode {
+	SSP_PCM_MODE_NORMAL = 0,
+	SSP_PCM_MODE_NETWORK = 1,
+};
+
+enum sst_ssp_duplex {
+	SSP_DUPLEX = 0,
+	SSP_RX = 1,
+	SSP_TX = 2,
+};
+
+enum sst_ssp_fs_frequency {
+	SSP_FS_8_KHZ = 0,
+	SSP_FS_16_KHZ = 1,
+	SSP_FS_44_1_KHZ = 2,
+	SSP_FS_48_KHZ = 3,
+};
+
+enum sst_ssp_fs_polarity {
+	SSP_FS_ACTIVE_LOW = 0,
+	SSP_FS_ACTIVE_HIGH = 1,
+};
+
+enum sst_ssp_protocol {
+	SSP_MODE_PCM = 0,
+	SSP_MODE_I2S = 1,
+};
+
+enum sst_ssp_port_id {
+	SSP_MODEM = 0,
+	SSP_BT = 1,
+	SSP_FM = 2,
+	SSP_CODEC = 3,
+};
+
+struct sst_cmd_sba_hw_set_ssp {
+	struct sst_dsp_header header;
+	u16 selection;			/* 0:SSP0(def), 1:SSP1, 2:SSP2 */
+
+	u16 switch_state;
+
+	u16 nb_bits_per_slots:6;        /* 0-32 bits, 24 (def) */
+	u16 nb_slots:4;			/* 0-8: slots per frame  */
+	u16 mode:3;			/* 0:Master, 1: Slave  */
+	u16 duplex:3;
+
+	u16 active_tx_slot_map:8;       /* Bit map, 0:off, 1:on */
+	u16 reserved1:8;
+
+	u16 active_rx_slot_map:8;       /* Bit map 0: Off, 1:On */
+	u16 reserved2:8;
+
+	u16 frame_sync_frequency;
+
+	u16 frame_sync_polarity:8;
+	u16 data_polarity:8;
+
+	u16 frame_sync_width;           /* 1 to N clocks */
+	u16 ssp_protocol:8;
+	u16 start_delay:8;		/* Start delay in terms of clock ticks */
+} __packed;
+
+#define SST_MAX_TDM_SLOTS 8
+
+struct sst_param_sba_ssp_slot_map {
+	struct sst_dsp_header header;
+
+	u16 param_id;
+	u16 param_len;
+	u16 ssp_index;
+
+	u8 rx_slot_map[SST_MAX_TDM_SLOTS];
+	u8 tx_slot_map[SST_MAX_TDM_SLOTS];
+} __packed;
+
+enum {
+	SST_PROBE_EXTRACTOR = 0,
+	SST_PROBE_INJECTOR = 1,
+};
+
+struct sst_cmd_probe {
+	struct sst_dsp_header header;
+
+	u16 switch_state;
+	struct sst_destination_id probe_dst;
+
+	u16 shared_mem:1;
+	u16 probe_in:1;
+	u16 probe_out:1;
+	u16 rsvd_1:13;
+
+	u16 rsvd_2:5;
+	u16 probe_mode:2;
+	u16 rsvd_3:1;
+	struct pcm_cfg cfg;
+
+	u16 sm_buf_id;
+
+	u16 gain[6];
+	u16 rsvd_4[9];
+} __packed;
+
+struct sst_probe_config {
+	const char *name;
+	u16 loc_id;
+	u16 mod_id;
+	u8 task_id;
+	struct pcm_cfg cfg;
+};
+
+int sst_mix_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int sst_mix_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int sst_vtsv_enroll_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int sst_vtsv_enroll_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+#endif
diff --git a/sound/soc/intel/platform-libs/controls_v2_dpcm.c b/sound/soc/intel/platform-libs/controls_v2_dpcm.c
new file mode 100644
index 0000000..b5c338e
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v2_dpcm.c
@@ -0,0 +1,2109 @@
+/*
+ *  controls_v2_dpcm.c - Intel MID Platform driver DPCM ALSA controls for Mrfld
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+#include "controls_v2.h"
+#include "sst_widgets.h"
+
+static inline void sst_fill_byte_control(char *param,
+					 u8 ipc_msg, u8 block,
+					 u8 task_id, u8 pipe_id,
+					 u16 len, void *cmd_data)
+{
+
+	struct snd_sst_bytes_v2 *byte_data = (struct snd_sst_bytes_v2 *)param;
+	byte_data->type = SST_CMD_BYTES_SET;
+	byte_data->ipc_msg = ipc_msg;
+	byte_data->block = block;
+	byte_data->task_id = task_id;
+	byte_data->pipe_id = pipe_id;
+
+	if (len > SST_MAX_BIN_BYTES - sizeof(*byte_data)) {
+		pr_err("%s: command length too big (%u)", __func__, len);
+		len = SST_MAX_BIN_BYTES - sizeof(*byte_data);
+		WARN_ON(1); /* this happens only if code is wrong */
+	}
+	byte_data->len = len;
+	memcpy(byte_data->bytes, cmd_data, len);
+	print_hex_dump_bytes("writing to lpe: ", DUMP_PREFIX_OFFSET,
+			     byte_data, len + sizeof(*byte_data));
+}
+
+static int sst_fill_and_send_cmd_unlocked(struct sst_data *sst,
+				 u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
+				 void *cmd_data, u16 len)
+{
+	sst_fill_byte_control(sst->byte_stream, ipc_msg, block, task_id, pipe_id,
+			      len, cmd_data);
+	return sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+						sst->byte_stream);
+}
+
+/**
+ * sst_fill_and_send_cmd - generate the IPC message and send it to the FW
+ * @ipc_msg:	type of IPC (CMD, SET_PARAMS, GET_PARAMS)
+ * @cmd_data:	the IPC payload
+ */
+static int sst_fill_and_send_cmd(struct sst_data *sst,
+				 u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
+				 void *cmd_data, u16 len)
+{
+	int ret;
+
+	mutex_lock(&sst->lock);
+	ret = sst_fill_and_send_cmd_unlocked(sst, ipc_msg, block, task_id, pipe_id,
+					     cmd_data, len);
+	mutex_unlock(&sst->lock);
+
+	return ret;
+}
+
+static int sst_probe_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_probe_value *v = (void *)kcontrol->private_value;
+
+	ucontrol->value.enumerated.item[0] = v->val;
+	return 0;
+}
+
+static int sst_probe_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_probe_value *v = (void *)kcontrol->private_value;
+	const struct soc_enum *e = v->p_enum;
+
+	if (ucontrol->value.enumerated.item[0] > e->max - 1)
+		return -EINVAL;
+	v->val = ucontrol->value.enumerated.item[0];
+	return 0;
+}
+
+int sst_probe_enum_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_probe_value *v = (void *)kcontrol->private_value;
+	const struct soc_enum *e = v->p_enum;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = e->max;
+
+	if (uinfo->value.enumerated.item > e->max - 1)
+		uinfo->value.enumerated.item = e->max - 1;
+	strcpy(uinfo->value.enumerated.name,
+		e->texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+/*
+ * slot map value is a bitfield where each bit represents a FW channel
+ *
+ *			3 2 1 0		# 0 = codec0, 1 = codec1
+ *			RLRLRLRL	# 3, 4 = reserved
+ *
+ * e.g. slot 0 rx map =	00001100b -> data from slot 0 goes into codec_in1 L,R
+ */
+static u8 sst_ssp_slot_map[SST_MAX_TDM_SLOTS] = {
+	0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default rx map */
+};
+
+/*
+ * channel map value is a bitfield where each bit represents a slot
+ *
+ *			  76543210	# 0 = slot 0, 1 = slot 1
+ *
+ * e.g. codec1_0 tx map = 00000101b -> data from codec_out1_0 goes into slot 0, 2
+ */
+static u8 sst_ssp_channel_map[SST_MAX_TDM_SLOTS] = {
+	0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default tx map */
+};
+
+static void sst_send_slot_map(struct sst_data *sst)
+{
+	struct sst_param_sba_ssp_slot_map cmd;
+
+	pr_debug("Enter: %s\n", __func__);
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.header.command_id = SBA_SET_SSP_SLOT_MAP;
+	cmd.header.length = sizeof(struct sst_param_sba_ssp_slot_map)
+				- sizeof(struct sst_dsp_header);
+
+	cmd.param_id = SBA_SET_SSP_SLOT_MAP;
+	cmd.param_len = sizeof(cmd.rx_slot_map) + sizeof(cmd.tx_slot_map) + sizeof(cmd.ssp_index);
+	cmd.ssp_index = SSP_CODEC;
+
+	memcpy(cmd.rx_slot_map, &sst_ssp_slot_map[0], sizeof(cmd.rx_slot_map));
+	memcpy(cmd.tx_slot_map, &sst_ssp_channel_map[0], sizeof(cmd.tx_slot_map));
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+			      SST_TASK_SBA, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+}
+
+int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
+		       struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = e->max;
+
+	if (uinfo->value.enumerated.item > e->max - 1)
+		uinfo->value.enumerated.item = e->max - 1;
+	strcpy(uinfo->value.enumerated.name,
+		e->texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+/**
+ * sst_slot_get - get the status of the interleaver/deinterleaver control
+ *
+ * Searches the map where the control status is stored, and gets the
+ * channel/slot which is currently set for this enumerated control. Since it is
+ * an enumerated control, there is only one possible value.
+ */
+static int sst_slot_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_enum *e = (void *)kcontrol->private_value;
+	unsigned int ctl_no = e->reg;
+	unsigned int is_tx = e->tx;
+	unsigned int val, mux;
+	u8 *map = is_tx ? sst_ssp_channel_map : sst_ssp_slot_map;
+
+	val = 1 << ctl_no;
+	/* search which slot/channel has this bit set - there should be only one */
+	for (mux = e->max; mux > 0;  mux--)
+		if (map[mux - 1] & val)
+			break;
+
+	ucontrol->value.enumerated.item[0] = mux;
+	pr_debug("%s: %s - %s map = %#x\n", __func__, is_tx ? "tx channel" : "rx slot",
+		 e->texts[mux], mux ? map[mux - 1] : -1);
+	return 0;
+}
+
+/**
+ * sst_slot_put - set the status of interleaver/deinterleaver control
+ *
+ * (de)interleaver controls are defined in opposite sense to be user-friendly
+ *
+ * Instead of the enum value being the value written to the register, it is the
+ * register address; and the kcontrol number (register num) is the value written
+ * to the register. This is so that there can be only one value for each
+ * slot/channel since there is only one control for each slot/channel.
+ *
+ * This means that whenever an enum is set, we need to clear the bit
+ * for that kcontrol_no for all the interleaver OR deinterleaver registers
+ */
+static int sst_slot_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_enum *e = (void *)kcontrol->private_value;
+	int i;
+	unsigned int ctl_no = e->reg;
+	unsigned int is_tx = e->tx;
+	unsigned int slot_channel_no;
+	unsigned int val, mux;
+
+	u8 *map = is_tx ? sst_ssp_channel_map : sst_ssp_slot_map;
+
+	val = 1 << ctl_no;
+	mux = ucontrol->value.enumerated.item[0];
+	if (mux > e->max - 1)
+		return -EINVAL;
+
+	/* first clear all registers of this bit */
+	for (i = 0; i < e->max; i++)
+		map[i] &= ~val;
+
+	if (mux == 0) /* kctl set to 'none' */
+		return 0;
+
+	/* offset by one to take "None" into account */
+	slot_channel_no = mux - 1;
+	map[slot_channel_no] |= val;
+
+	pr_debug("%s: %s %s map = %#x\n", __func__, is_tx ? "tx channel" : "rx slot",
+		 e->texts[mux], map[slot_channel_no]);
+
+	if (e->w && e->w->power)
+		sst_send_slot_map(sst);
+	return 0;
+}
+
+/* assumes a boolean mux */
+static inline bool get_mux_state(struct sst_data *sst, unsigned int reg, unsigned int shift)
+{
+	return (sst_reg_read(sst, reg, shift, 1) == 1);
+}
+
+static int sst_mux_get(struct snd_kcontrol *kcontrol,
+		       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int max = e->max - 1;
+
+	ucontrol->value.enumerated.item[0] = sst_reg_read(sst, e->reg, e->shift_l, max);
+	return 0;
+}
+
+static int sst_mux_put(struct snd_kcontrol *kcontrol,
+		       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	struct snd_soc_dapm_update update;
+	unsigned int max = e->max - 1;
+	unsigned int mask = (1 << fls(max)) - 1;
+	unsigned int mux, val;
+
+	if (ucontrol->value.enumerated.item[0] > e->max - 1)
+		return -EINVAL;
+
+	mux = ucontrol->value.enumerated.item[0];
+	val = sst_reg_write(sst, e->reg, e->shift_l, max, mux);
+
+	pr_debug("%s: reg[%d] = %#x\n", __func__, e->reg, val);
+
+	widget->value = val;
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+
+	widget->dapm->update = &update;
+	snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+	widget->dapm->update = NULL;
+	return 0;
+}
+
+static int sst_mode_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int max = e->max - 1;
+
+	ucontrol->value.enumerated.item[0] = sst_reg_read(sst, e->reg, e->shift_l, max);
+	return 0;
+}
+
+static int sst_mode_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int max = e->max - 1;
+	unsigned int val;
+
+	if (ucontrol->value.enumerated.item[0] > e->max - 1)
+		return -EINVAL;
+
+	val = sst_reg_write(sst, e->reg, e->shift_l, max, ucontrol->value.enumerated.item[0]);
+	pr_debug("%s: reg[%d] - %#x\n", __func__, e->reg, val);
+	return 0;
+}
+
+static void sst_send_algo_cmd(struct sst_data *sst,
+			      struct sst_algo_control *bc)
+{
+	int len;
+	struct sst_cmd_set_params *cmd;
+
+	/* bc->max includes sizeof algos + length field */
+	len = sizeof(cmd->dst) + sizeof(cmd->command_id) + bc->max;
+
+	cmd = kzalloc(len, GFP_KERNEL);
+	if (cmd == NULL) {
+		pr_err("Failed to send cmd, kzalloc failed\n");
+		return;
+	}
+
+	SST_FILL_DESTINATION(2, cmd->dst, bc->pipe_id, bc->module_id);
+	cmd->command_id = bc->cmd_id;
+	memcpy(cmd->params, bc->params, bc->max);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+			      bc->task_id, 0, cmd, len);
+	kfree(cmd);
+}
+
+/**
+ * sst_find_and_send_pipe_algo - send all the algo parameters for a pipe
+ *
+ * The algos which are in each pipeline are sent to the firmware one by one
+ */
+static void sst_find_and_send_pipe_algo(struct sst_data *sst,
+					const char *pipe, struct sst_ids *ids)
+{
+	struct sst_algo_control *bc;
+	struct module *algo = NULL;
+
+	pr_debug("Enter: %s, widget=%s\n", __func__, pipe);
+
+	list_for_each_entry(algo, &ids->algo_list, node) {
+		bc = (void *)algo->kctl->private_value;
+
+		pr_debug("Found algo control name=%s pipe=%s\n", algo->kctl->id.name, pipe);
+		sst_send_algo_cmd(sst, bc);
+	}
+}
+
+int sst_algo_bytes_ctl_info(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_algo_control *bc = (void *)kcontrol->private_value;
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = bc->max;
+
+	/* allocate space to cache the algo parameters in the driver */
+	if (bc->params == NULL) {
+		bc->params = devm_kzalloc(platform->dev, bc->max, GFP_KERNEL);
+		if (bc->params == NULL) {
+			pr_err("kzalloc failed\n");
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int sst_algo_control_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+	switch (bc->type) {
+	case SST_ALGO_PARAMS:
+		if (bc->params)
+			memcpy(ucontrol->value.bytes.data, bc->params, bc->max);
+		break;
+	case SST_ALGO_BYPASS:
+		ucontrol->value.integer.value[0] = bc->bypass ? 1 : 0;
+		pr_debug("%s: bypass  %d\n", __func__, bc->bypass);
+		break;
+	default:
+		pr_err("Invalid Input- algo type:%d\n", bc->type);
+		return -EINVAL;
+
+	}
+	return 0;
+}
+
+static int sst_algo_control_set(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+	pr_debug("in %s control_name=%s\n", __func__, kcontrol->id.name);
+	switch (bc->type) {
+	case SST_ALGO_PARAMS:
+		if (bc->params)
+			memcpy(bc->params, ucontrol->value.bytes.data, bc->max);
+		break;
+	case SST_ALGO_BYPASS:
+		bc->bypass = !!ucontrol->value.integer.value[0];
+		break;
+	default:
+		pr_err("Invalid Input- algo type:%ld\n", ucontrol->value.integer.value[0]);
+		return -EINVAL;
+	}
+	/*if pipe is enabled, need to send the algo params from here */
+	if (bc->w && bc->w->power)
+		sst_send_algo_cmd(sst, bc);
+
+	return 0;
+}
+
+static int sst_gain_ctl_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = mc->stereo ? 2 : 1;
+	uinfo->value.integer.min = mc->min;
+	uinfo->value.integer.max = mc->max;
+	return 0;
+}
+
+/**
+ * sst_send_gain_cmd - send the gain algorithm IPC to the FW
+ * @gv:		the stored value of gain (also contains rampduration)
+ * @mute:	flag that indicates whether this was called from the
+ *		digital_mute callback or directly. If called from the
+ *		digital_mute callback, module will be muted/unmuted based on this
+ *		flag. The flag is always 0 if called directly.
+ *
+ * The user-set gain value is sent only if the user-controllable 'mute' control
+ * is OFF (indicated by gv->mute). Otherwise, the mute value (MIN value) is
+ * sent.
+ */
+static void sst_send_gain_cmd(struct sst_data *sst, struct sst_gain_value *gv,
+			      u16 task_id, u16 loc_id, u16 module_id, int mute)
+{
+	struct sst_cmd_set_gain_dual cmd;
+	pr_debug("%s\n", __func__);
+
+	cmd.header.command_id = MMX_SET_GAIN;
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.gain_cell_num = 1;
+
+	if (mute || gv->mute) {
+		cmd.cell_gains[0].cell_gain_left = SST_GAIN_MIN_VALUE;
+		cmd.cell_gains[0].cell_gain_right = SST_GAIN_MIN_VALUE;
+	} else {
+		cmd.cell_gains[0].cell_gain_left = gv->l_gain;
+		cmd.cell_gains[0].cell_gain_right = gv->r_gain;
+	}
+	SST_FILL_DESTINATION(2, cmd.cell_gains[0].dest,
+			     loc_id, module_id);
+	cmd.cell_gains[0].gain_time_constant = gv->ramp_duration;
+
+	cmd.header.length = sizeof(struct sst_cmd_set_gain_dual)
+				- sizeof(struct sst_dsp_header);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+			      task_id, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_gain_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+	struct sst_gain_value *gv = mc->gain_val;
+
+	switch (mc->type) {
+	case SST_GAIN_TLV:
+		ucontrol->value.integer.value[0] = gv->l_gain;
+		ucontrol->value.integer.value[1] = gv->r_gain;
+		break;
+	case SST_GAIN_MUTE:
+		ucontrol->value.integer.value[0] = gv->mute ? 1 : 0;
+		break;
+	case SST_GAIN_RAMP_DURATION:
+		ucontrol->value.integer.value[0] = gv->ramp_duration;
+		break;
+	default:
+		pr_err("Invalid Input- gain type:%d\n", mc->type);
+		return -EINVAL;
+	};
+	return 0;
+}
+
+static int sst_gain_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+	struct sst_gain_value *gv = mc->gain_val;
+
+	switch (mc->type) {
+	case SST_GAIN_TLV:
+		gv->l_gain = ucontrol->value.integer.value[0];
+		gv->r_gain = ucontrol->value.integer.value[1];
+		pr_debug("%s: %s: Volume %d, %d\n", __func__, mc->pname, gv->l_gain, gv->r_gain);
+		break;
+	case SST_GAIN_MUTE:
+		gv->mute = !!ucontrol->value.integer.value[0];
+		pr_debug("%s: %s: Mute %d\n", __func__, mc->pname, gv->mute);
+		break;
+	case SST_GAIN_RAMP_DURATION:
+		gv->ramp_duration = ucontrol->value.integer.value[0];
+		pr_debug("%s: %s: RampDuration %d\n", __func__, mc->pname, gv->ramp_duration);
+		break;
+	default:
+		pr_err("Invalid Input- gain type:%d\n", mc->type);
+		return -EINVAL;
+	};
+
+	if (mc->w && mc->w->power)
+		sst_send_gain_cmd(sst, gv, mc->task_id,
+				mc->pipe_id | mc->instance_id, mc->module_id, 0);
+	return 0;
+}
+
+static void sst_set_pipe_gain(struct sst_ids *ids, struct sst_data *sst, int mute);
+
+static void sst_send_pipe_module_params(struct snd_soc_dapm_widget *w)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+
+	sst_find_and_send_pipe_algo(sst, w->name, ids);
+	sst_set_pipe_gain(ids, sst, 0);
+}
+
+static int sst_generic_modules_event(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *k, int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		sst_send_pipe_module_params(w);
+	return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(sst_gain_tlv_common, SST_GAIN_MIN_VALUE * 10, 10, 0);
+
+/* Look up table to convert MIXER SW bit regs to SWM inputs */
+static const uint swm_mixer_input_ids[SST_SWM_INPUT_COUNT] = {
+	[SST_IP_MODEM]		= SST_SWM_IN_MODEM,
+	[SST_IP_BT]		= SST_SWM_IN_BT,
+	[SST_IP_CODEC0]		= SST_SWM_IN_CODEC0,
+	[SST_IP_CODEC1]		= SST_SWM_IN_CODEC1,
+	[SST_IP_LOOP0]		= SST_SWM_IN_SPROT_LOOP,
+	[SST_IP_LOOP1]		= SST_SWM_IN_MEDIA_LOOP1,
+	[SST_IP_LOOP2]		= SST_SWM_IN_MEDIA_LOOP2,
+	[SST_IP_SIDETONE]	= SST_SWM_IN_SIDETONE,
+	[SST_IP_TXSPEECH]	= SST_SWM_IN_TXSPEECH,
+	[SST_IP_SPEECH]		= SST_SWM_IN_SPEECH,
+	[SST_IP_TONE]		= SST_SWM_IN_TONE,
+	[SST_IP_VOIP]		= SST_SWM_IN_VOIP,
+	[SST_IP_PCM0]		= SST_SWM_IN_PCM0,
+	[SST_IP_PCM1]		= SST_SWM_IN_PCM1,
+	[SST_IP_LOW_PCM0]	= SST_SWM_IN_LOW_PCM0,
+	[SST_IP_FM]		= SST_SWM_IN_FM,
+	[SST_IP_MEDIA0]		= SST_SWM_IN_MEDIA0,
+	[SST_IP_MEDIA1]		= SST_SWM_IN_MEDIA1,
+	[SST_IP_MEDIA2]		= SST_SWM_IN_MEDIA2,
+	[SST_IP_MEDIA3]		= SST_SWM_IN_MEDIA3,
+};
+
+/**
+ * fill_swm_input - fill in the SWM input ids given the register
+ *
+ * The register value is a bit-field inicated which mixer inputs are ON. Use the
+ * lookup table to get the input-id and fill it in the structure.
+ */
+static int fill_swm_input(struct swm_input_ids *swm_input, unsigned int reg)
+{
+	uint i, is_set, nb_inputs = 0;
+	u16 input_loc_id;
+
+	pr_debug("%s: reg: %#x\n", __func__, reg);
+	for (i = 0; i < SST_SWM_INPUT_COUNT; i++) {
+		is_set = reg & BIT(i);
+		if (!is_set)
+			continue;
+
+		input_loc_id = swm_mixer_input_ids[i];
+		SST_FILL_DESTINATION(2, swm_input->input_id,
+				     input_loc_id, SST_DEFAULT_MODULE_ID);
+		nb_inputs++;
+		swm_input++;
+		pr_debug("input id: %#x, nb_inputs: %d\n", input_loc_id, nb_inputs);
+
+		if (nb_inputs == SST_CMD_SWM_MAX_INPUTS) {
+			pr_warn("%s: SET_SWM cmd max inputs reached", __func__);
+			break;
+		}
+	}
+	return nb_inputs;
+}
+
+static void sst_set_pipe_gain(struct sst_ids *ids, struct sst_data *sst, int mute)
+{
+	struct sst_gain_mixer_control *mc;
+	struct sst_gain_value *gv;
+	struct module *gain = NULL;
+
+	list_for_each_entry(gain, &ids->gain_list, node) {
+		struct snd_kcontrol *kctl = gain->kctl;
+
+		pr_debug("control name=%s\n", kctl->id.name);
+		mc = (void *)kctl->private_value;
+		gv = mc->gain_val;
+
+		sst_send_gain_cmd(sst, gv, mc->task_id,
+				mc->pipe_id | mc->instance_id, mc->module_id, mute);
+	}
+}
+
+static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_set_swm cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+	bool set_mixer = false;
+	int val = sst->widget[ids->reg];
+
+	pr_debug("%s: widget = %s\n", __func__, w->name);
+	pr_debug("%s: reg[%d] = %#x\n", __func__, ids->reg, val);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		set_mixer = true;
+		break;
+	case SND_SOC_DAPM_POST_REG:
+		if (w->power)
+			set_mixer = true;
+		break;
+	default:
+		set_mixer = false;
+	}
+
+	if (set_mixer == false)
+		return 0;
+
+	if (SND_SOC_DAPM_EVENT_ON(event) ||
+	    event == SND_SOC_DAPM_POST_REG)
+		cmd.switch_state = SST_SWM_ON;
+	else
+		cmd.switch_state = SST_SWM_OFF;
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	/* MMX_SET_SWM == SBA_SET_SWM */
+	cmd.header.command_id = SBA_SET_SWM;
+
+	SST_FILL_DESTINATION(2, cmd.output_id,
+			     ids->location_id, SST_DEFAULT_MODULE_ID);
+	cmd.nb_inputs =	fill_swm_input(&cmd.input[0], val);
+	cmd.header.length = offsetof(struct sst_cmd_set_swm, input) - sizeof(struct sst_dsp_header)
+				+ (cmd.nb_inputs * sizeof(cmd.input[0]));
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      ids->task_id, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+	return 0;
+}
+
+/* SBA mixers - 16 inputs */
+#define SST_SBA_DECLARE_MIX_CONTROLS(kctl_name, mixer_reg)			\
+	static const struct snd_kcontrol_new kctl_name[] = {			\
+		SOC_SINGLE_EXT("modem_in", mixer_reg, SST_IP_MODEM, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("bt_in", mixer_reg, SST_IP_BT, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("codec_in0", mixer_reg, SST_IP_CODEC0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("codec_in1", mixer_reg, SST_IP_CODEC1, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("sprot_loop_in", mixer_reg, SST_IP_LOOP0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media_loop1_in", mixer_reg, SST_IP_LOOP1, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media_loop2_in", mixer_reg, SST_IP_LOOP2, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("sidetone_in", mixer_reg, SST_IP_SIDETONE, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("txspeech_in", mixer_reg, SST_IP_TXSPEECH, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("speech_in", mixer_reg, SST_IP_SPEECH, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("tone_in", mixer_reg, SST_IP_TONE, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("voip_in", mixer_reg, SST_IP_VOIP, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("pcm0_in", mixer_reg, SST_IP_PCM0, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("pcm1_in", mixer_reg, SST_IP_PCM1, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("low_pcm0_in", mixer_reg, SST_IP_LOW_PCM0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("fm_in", mixer_reg, SST_IP_FM, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+	}
+
+#define SST_SBA_MIXER_GRAPH_MAP(mix_name)			\
+	{ mix_name, "modem_in",		"modem_in" },		\
+	{ mix_name, "bt_in",		"bt_in" },		\
+	{ mix_name, "codec_in0",	"codec_in0" },		\
+	{ mix_name, "codec_in1",	"codec_in1" },		\
+	{ mix_name, "sprot_loop_in",	"sprot_loop_in" },	\
+	{ mix_name, "media_loop1_in",	"media_loop1_in" },	\
+	{ mix_name, "media_loop2_in",	"media_loop2_in" },	\
+	{ mix_name, "sidetone_in",	"sidetone_in" },	\
+	{ mix_name, "txspeech_in",	"txspeech_in" },	\
+	{ mix_name, "speech_in",	"speech_in" },		\
+	{ mix_name, "tone_in",		"tone_in" },		\
+	{ mix_name, "voip_in",		"voip_in" },		\
+	{ mix_name, "pcm0_in",		"pcm0_in" },		\
+	{ mix_name, "pcm1_in",		"pcm1_in" },		\
+	{ mix_name, "low_pcm0_in",	"low_pcm0_in" },	\
+	{ mix_name, "fm_in",		"fm_in" }
+
+#define SST_MMX_DECLARE_MIX_CONTROLS(kctl_name, mixer_reg)			\
+	static const struct snd_kcontrol_new kctl_name[] = {			\
+		SOC_SINGLE_EXT("media0_in", mixer_reg, SST_IP_MEDIA0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media1_in", mixer_reg, SST_IP_MEDIA1, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media2_in", mixer_reg, SST_IP_MEDIA2, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media3_in", mixer_reg, SST_IP_MEDIA3, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+	}
+
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media0_controls, SST_MIX_MEDIA0);
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media1_controls, SST_MIX_MEDIA1);
+
+/* 18 SBA mixers */
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm0_controls, SST_MIX_PCM0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm1_controls, SST_MIX_PCM1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm2_controls, SST_MIX_PCM2);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_sprot_l0_controls, SST_MIX_LOOP0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l1_controls, SST_MIX_LOOP1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l2_controls, SST_MIX_LOOP2);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_voip_controls, SST_MIX_VOIP);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_aware_controls, SST_MIX_AWARE);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_vad_controls, SST_MIX_VAD);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_hf_sns_controls, SST_MIX_HF_SNS);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_hf_controls, SST_MIX_HF);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_speech_controls, SST_MIX_SPEECH);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_rxspeech_controls, SST_MIX_RXSPEECH);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec0_controls, SST_MIX_CODEC0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec1_controls, SST_MIX_CODEC1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_bt_controls, SST_MIX_BT);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_fm_controls, SST_MIX_FM);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_modem_controls, SST_MIX_MODEM);
+
+void sst_handle_vb_timer(struct snd_soc_platform *p, bool enable)
+{
+	struct sst_cmd_generic cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(p);
+	static int timer_usage;
+
+	if (enable)
+		cmd.header.command_id = SBA_VB_START;
+	else
+		cmd.header.command_id = SBA_IDLE;
+	pr_debug("%s: enable=%u, usage=%d\n", __func__, enable, timer_usage);
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.header.length = 0;
+
+	if (enable)
+		sst_dsp->ops->power(true);
+
+	mutex_lock(&sst->lock);
+	if (enable)
+		timer_usage++;
+	else
+		timer_usage--;
+
+	/* Send the command only if this call is the first enable or last
+	 * disable
+	 */
+	if ((enable && (timer_usage == 1)) ||
+	    (!enable && (timer_usage == 0)))
+		sst_fill_and_send_cmd_unlocked(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				      SST_TASK_SBA, 0, &cmd,
+				      sizeof(cmd.header) + cmd.header.length);
+	mutex_unlock(&sst->lock);
+
+	if (!enable)
+		sst_dsp->ops->power(false);
+}
+
+#define SST_SSP_CODEC_MUX		0
+#define SST_SSP_CODEC_DOMAIN		0
+#define SST_SSP_MODEM_MUX		0
+#define SST_SSP_MODEM_DOMAIN		0
+#define SST_SSP_FM_MUX			0
+#define SST_SSP_FM_DOMAIN		0
+#define SST_SSP_BT_MUX			1
+#define SST_SSP_BT_NB_DOMAIN		0
+#define SST_SSP_BT_WB_DOMAIN		1
+
+static const int sst_ssp_mux_shift[SST_NUM_SSPS] = {
+	[SST_SSP0] = -1,			/* no register shift, i.e. single mux value */
+	[SST_SSP1] = SST_BT_FM_MUX_SHIFT,
+	[SST_SSP2] = -1,
+};
+
+static const int sst_ssp_domain_shift[SST_NUM_SSPS][SST_MAX_SSP_MUX] = {
+	[SST_SSP0][0] = -1,			/* no domain shift, i.e. single domain */
+	[SST_SSP1] = {
+		[SST_SSP_FM_MUX] = -1,
+		[SST_SSP_BT_MUX] = SST_BT_MODE_SHIFT,
+	},
+	[SST_SSP2][0] = -1,
+};
+
+/**
+ * sst_ssp_config - contains SSP configuration for different UCs
+ *
+ * The 3-D array contains SSP configuration for different SSPs for different
+ * domains (e.g. NB, WB), as well as muxed SSPs.
+ *
+ * The first dimension has SSP number
+ * The second dimension has SSP Muxing (e.g. BT/FM muxed on same SSP)
+ * The third dimension has SSP domains (e.g. NB/WB for BT)
+ */
+static const struct sst_ssp_config
+sst_ssp_configs[SST_NUM_SSPS][SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS] = {
+	[SST_SSP0] = {
+		[SST_SSP_MODEM_MUX] = {
+			[SST_SSP_MODEM_DOMAIN] = {
+				.ssp_id = SSP_MODEM,
+				.bits_per_slot = 16,
+				.slots = 1,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NETWORK,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_48_KHZ,
+				.active_slot_map = 0x1,
+				.start_delay = 1,
+			},
+		},
+	},
+	[SST_SSP1] = {
+		[SST_SSP_FM_MUX] = {
+			[SST_SSP_FM_DOMAIN] = {
+				.ssp_id = SSP_FM,
+				.bits_per_slot = 16,
+				.slots = 2,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NORMAL,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_I2S,
+				.fs_width = 32,
+				.fs_frequency = SSP_FS_48_KHZ,
+				.active_slot_map = 0x3,
+				.start_delay = 0,
+			},
+		},
+		[SST_SSP_BT_MUX] = {
+			[SST_SSP_BT_NB_DOMAIN] = {
+				.ssp_id = SSP_BT,
+				.bits_per_slot = 16,
+				.slots = 1,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NORMAL,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_8_KHZ,
+				.active_slot_map = 0x1,
+				.start_delay = 1,
+			},
+			[SST_SSP_BT_WB_DOMAIN] = {
+				.ssp_id = SSP_BT,
+				.bits_per_slot = 16,
+				.slots = 1,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NORMAL,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_16_KHZ,
+				.active_slot_map = 0x1,
+				.start_delay = 1,
+			},
+		},
+	},
+	[SST_SSP2] = {
+		[SST_SSP_CODEC_MUX] = {
+			[SST_SSP_CODEC_DOMAIN] = {
+				.ssp_id = SSP_CODEC,
+				.bits_per_slot = 24,
+				.slots = 4,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NETWORK,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_48_KHZ,
+				.active_slot_map = 0xF,
+				.start_delay = 0,
+			},
+		},
+	},
+};
+
+#define SST_SSP_CFG(wssp_no)                                                            \
+	(const struct sst_ssp_cfg){ .ssp_config = &sst_ssp_configs[wssp_no],            \
+				.ssp_number = wssp_no,                              \
+				.mux_shift = &sst_ssp_mux_shift[wssp_no],           \
+				.domain_shift = &sst_ssp_domain_shift[wssp_no], }
+
+void send_ssp_cmd(struct snd_soc_platform *platform, const char *id, bool enable)
+{
+	struct sst_cmd_sba_hw_set_ssp cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	unsigned int domain, mux;
+	int domain_shift, mux_shift, ssp_no;
+	const struct sst_ssp_config *config;
+	const struct sst_ssp_cfg *ssp;
+
+
+	pr_err("Enter:%s, enable=%d port_name=%s\n", __func__, enable, id);
+
+	if (strcmp(id, "ssp0-port") == 0)
+		ssp_no = SST_SSP0;
+	else if (strcmp(id, "ssp1-port") == 0)
+		ssp_no = SST_SSP1;
+	else if (strcmp(id, "ssp2-port") == 0)
+		ssp_no = SST_SSP2;
+	else
+		return;
+
+	ssp = &SST_SSP_CFG(ssp_no);
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.header.command_id = SBA_HW_SET_SSP;
+	cmd.header.length = sizeof(struct sst_cmd_sba_hw_set_ssp)
+				- sizeof(struct sst_dsp_header);
+	mux_shift = *ssp->mux_shift;
+	mux = (mux_shift == -1) ? 0 : get_mux_state(sst, SST_MUX_REG, mux_shift);
+	domain_shift = (*ssp->domain_shift)[mux];
+	domain = (domain_shift == -1) ? 0 : get_mux_state(sst, SST_MUX_REG, domain_shift);
+
+	config = &(*ssp->ssp_config)[mux][domain];
+	pr_debug("%s: ssp_id: %u, mux: %d, domain: %d\n", __func__,
+		 config->ssp_id, mux, domain);
+
+	if (enable)
+		cmd.switch_state = SST_SWITCH_ON;
+	else
+		cmd.switch_state = SST_SWITCH_OFF;
+
+	cmd.selection = config->ssp_id;
+	cmd.nb_bits_per_slots = config->bits_per_slot;
+	cmd.nb_slots = config->slots;
+	cmd.mode = config->ssp_mode | (config->pcm_mode << 1);
+	cmd.duplex = config->duplex;
+	cmd.active_tx_slot_map = config->active_slot_map;
+	cmd.active_rx_slot_map = config->active_slot_map;
+	cmd.frame_sync_frequency = config->fs_frequency;
+	cmd.frame_sync_polarity = SSP_FS_ACTIVE_HIGH;
+	cmd.data_polarity = 1;
+	cmd.frame_sync_width = config->fs_width;
+	cmd.ssp_protocol = config->ssp_protocol;
+	cmd.start_delay = config->start_delay;
+	cmd.reserved1 = cmd.reserved2 = 0xFF;
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				SST_TASK_SBA, 0, &cmd,
+				sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_set_be_modules(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *k, int event)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+
+	pr_debug("Enter: %s, widget=%s\n", __func__, w->name);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		sst_send_slot_map(sst);
+		sst_send_pipe_module_params(w);
+	}
+	return 0;
+}
+
+/**
+ * sst_set_speech_path - send SPEECH_UL/DL enable/disable IPC
+ *
+ * The SPEECH_PATH IPC enables more than one pipeline (speech uplink, downlink,
+ * sidetone etc.). Since the command should be sent only once, use a refcount to
+ * send the command only on first enable/last disable.
+ */
+static int sst_set_speech_path(struct snd_soc_dapm_widget *w,
+			       struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_set_speech_path cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	bool is_wideband;
+	static int speech_active;
+
+	pr_debug("%s: widget=%s\n", __func__, w->name);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		speech_active++;
+		cmd.switch_state = SST_SWITCH_ON;
+	} else {
+		speech_active--;
+		cmd.switch_state = SST_SWITCH_OFF;
+	}
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+
+	cmd.header.command_id = SBA_VB_SET_SPEECH_PATH;
+	cmd.header.length = sizeof(struct sst_cmd_set_speech_path)
+				- sizeof(struct sst_dsp_header);
+	cmd.config.cfg.s_length = 0;
+	cmd.config.cfg.rate = 0;		/* 8 khz */
+	cmd.config.cfg.format = 0;
+
+	is_wideband = get_mux_state(sst, SST_MUX_REG, SST_VOICE_MODE_SHIFT);
+	if (is_wideband)
+		cmd.config.cfg.rate = 1;	/* 16 khz */
+
+	if ((SND_SOC_DAPM_EVENT_ON(event) && (speech_active == 1)) ||
+	    (SND_SOC_DAPM_EVENT_OFF(event) && (speech_active == 0)))
+		sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				SST_TASK_SBA, 0, &cmd,
+				sizeof(cmd.header) + cmd.header.length);
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		sst_send_pipe_module_params(w);
+	return 0;
+}
+
+/**
+ * sst_set_linked_pipe - send gain/algo for a linked input/output
+ *
+ * A linked pipe is dependent on the power status of its parent widget since it
+ * itself does not have any enabling command.
+ */
+static int sst_set_linked_pipe(struct snd_soc_dapm_widget *w,
+		       struct snd_kcontrol *k, int event)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+	pr_debug("%s: widget=%s\n", __func__, w->name);
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		if (ids->parent_w && ids->parent_w->power)
+			sst_find_and_send_pipe_algo(sst, w->name, ids);
+			sst_set_pipe_gain(ids, sst, 0);
+	}
+	return 0;
+}
+
+static int sst_set_media_path(struct snd_soc_dapm_widget *w,
+			      struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_set_media_path cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+
+	pr_debug("%s: widget=%s\n", __func__, w->name);
+	pr_debug("%s: task=%u, location=%#x\n", __func__,
+				ids->task_id, ids->location_id);
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		cmd.switch_state = SST_PATH_ON;
+	else
+		cmd.switch_state = SST_PATH_OFF;
+
+	SST_FILL_DESTINATION(2, cmd.header.dst,
+			     ids->location_id, SST_DEFAULT_MODULE_ID);
+
+	/* MMX_SET_MEDIA_PATH == SBA_SET_MEDIA_PATH */
+	cmd.header.command_id = MMX_SET_MEDIA_PATH;
+	cmd.header.length = sizeof(struct sst_cmd_set_media_path)
+				- sizeof(struct sst_dsp_header);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      ids->task_id, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		sst_send_pipe_module_params(w);
+	return 0;
+}
+
+static int sst_set_media_loop(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_sba_set_media_loop_map cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+
+	pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		cmd.switch_state = SST_SWITCH_ON;
+	else
+		cmd.switch_state = SST_SWITCH_OFF;
+
+	SST_FILL_DESTINATION(2, cmd.header.dst,
+			     ids->location_id, SST_DEFAULT_MODULE_ID);
+
+	cmd.header.command_id = SBA_SET_MEDIA_LOOP_MAP;
+	cmd.header.length = sizeof(struct sst_cmd_sba_set_media_loop_map)
+				 - sizeof(struct sst_dsp_header);
+	cmd.param.part.cfg.rate = 2; /* 48khz */
+
+	cmd.param.part.cfg.format = ids->format; /* stereo/Mono */
+	cmd.param.part.cfg.s_length = 1; /* 24bit left justified*/
+	cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR  */
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      SST_TASK_SBA, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		sst_send_pipe_module_params(w);
+	return 0;
+}
+
+static int sst_tone_generator_event(struct snd_soc_dapm_widget *w,
+				    struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_tone_stop cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+
+	pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+	/* in case of tone generator, the params are combined with the ON cmd */
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		int len;
+		struct module *algo;
+		struct sst_algo_control *bc;
+		struct sst_cmd_set_params *cmd;
+
+		algo = list_first_entry(&ids->algo_list, struct module, node);
+		if (algo == NULL)
+			return -EINVAL;
+		bc = (void *)algo->kctl->private_value;
+		len = sizeof(cmd->dst) + sizeof(cmd->command_id) + bc->max;
+
+		cmd = kzalloc(len, GFP_KERNEL);
+		if (cmd == NULL) {
+			pr_err("Failed to send cmd, kzalloc failed\n");
+			return -ENOMEM;
+		}
+
+		SST_FILL_DESTINATION(2, cmd->dst, bc->pipe_id, bc->module_id);
+		cmd->command_id = bc->cmd_id;
+		memcpy(cmd->params, bc->params, bc->max);
+
+		sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				      bc->task_id, 0, cmd, len);
+		kfree(cmd);
+		sst_set_pipe_gain(ids, sst, 0);
+	} else {
+		SST_FILL_DESTINATION(2, cmd.header.dst,
+				     SST_PATH_INDEX_RESERVED, SST_MODULE_ID_TONE_GEN);
+
+		cmd.header.command_id = SBA_VB_STOP_TONE;
+		cmd.header.length = sizeof(struct sst_cmd_tone_stop)
+					 - sizeof(struct sst_dsp_header);
+		cmd.switch_state = SST_SWITCH_OFF;
+		sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				      SST_TASK_SBA, 0, &cmd,
+				      sizeof(cmd.header) + cmd.header.length);
+	}
+	return 0;
+}
+
+static int sst_send_probe_cmd(struct sst_data *sst, u16 probe_pipe_id,
+			      int mode, int switch_state,
+			      const struct sst_probe_config *probe_cfg)
+{
+	struct sst_cmd_probe cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	SST_FILL_DESTINATION(3, cmd.header.dst, SST_DEFAULT_CELL_NBR,
+			     probe_pipe_id, SST_DEFAULT_MODULE_ID);
+	cmd.header.command_id = SBA_PROBE;
+	cmd.header.length = sizeof(struct sst_cmd_probe)
+				 - sizeof(struct sst_dsp_header);
+	cmd.switch_state = switch_state;
+
+	SST_FILL_DESTINATION(2, cmd.probe_dst,
+			     probe_cfg->loc_id, probe_cfg->mod_id);
+
+	cmd.shared_mem = 1;
+	cmd.probe_in = 0;
+	cmd.probe_out = 0;
+
+	cmd.probe_mode = mode;
+	cmd.cfg.s_length = probe_cfg->cfg.s_length;
+	cmd.cfg.rate = probe_cfg->cfg.rate;
+	cmd.cfg.format = probe_cfg->cfg.format;
+	cmd.sm_buf_id = 1;
+
+	return sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				     probe_cfg->task_id, 0, &cmd,
+				     sizeof(cmd.header) + cmd.header.length);
+}
+
+static const struct snd_kcontrol_new sst_probe_controls[];
+static const struct sst_probe_config sst_probes[];
+
+#define SST_MAX_PROBE_STREAMS 8
+int sst_dpcm_probe_send(struct snd_soc_platform *platform, u16 probe_pipe_id,
+			int substream, int direction, bool on)
+{
+	int switch_state = on ? SST_SWITCH_ON : SST_SWITCH_OFF;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	const struct sst_probe_config *probe_cfg;
+	struct sst_probe_value *probe_val;
+	char *type;
+	int offset;
+	int mode;
+
+	if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+		mode = SST_PROBE_EXTRACTOR;
+		offset = 0;
+		type = "extractor";
+	} else {
+		mode = SST_PROBE_INJECTOR;
+		offset = SST_MAX_PROBE_STREAMS;
+		type = "injector";
+	}
+	/* get the value of the probe connection kcontrol */
+	probe_val = (void *)sst_probe_controls[substream + offset].private_value;
+	probe_cfg = &sst_probes[probe_val->val];
+
+	pr_debug("%s: substream=%d, direction=%d\n", __func__, substream, direction);
+	pr_debug("%s: %s probe point at %s\n", __func__, type, probe_cfg->name);
+
+	return sst_send_probe_cmd(sst, probe_pipe_id, mode, switch_state, probe_cfg);
+}
+
+/**
+ * sst_alloc_hostless_stream - send ALLOC for a stream
+ *
+ * The stream does not send data to IA. The data is consumed by an internal
+ * sink.
+ */
+static int sst_alloc_hostless_stream(const struct sst_pcm_format *pcm_params,
+				     int str_id, uint pipe_id, uint task_id)
+{
+	struct snd_sst_stream_params param;
+	struct snd_sst_params str_params = {0};
+	struct snd_sst_alloc_params_ext alloc_params = {0};
+	int ret_val = 0;
+
+	memset(&param.uc.pcm_params, 0, sizeof(param.uc.pcm_params));
+	param.uc.pcm_params.num_chan = pcm_params->channels_max;
+	param.uc.pcm_params.pcm_wd_sz = pcm_params->sample_bits;
+	param.uc.pcm_params.sfreq = pcm_params->rate_min;
+	pr_debug("%s: sfreq= %d, wd_sz = %d\n", __func__,
+		 param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
+
+	str_params.sparams = param;
+	str_params.aparams = alloc_params;
+	str_params.codec = SST_CODEC_TYPE_PCM;
+
+	/* fill the pipe_id and stream id to pass to SST driver */
+	str_params.stream_type = SST_STREAM_TYPE_MUSIC;
+	str_params.stream_id = str_id;
+	str_params.device_type = pipe_id;
+	str_params.task = task_id;
+	str_params.ops = STREAM_OPS_CAPTURE;
+
+	ret_val = sst_dsp->ops->open(&str_params);
+	pr_debug("platform prepare: stream open ret_val = 0x%x\n", ret_val);
+	if (ret_val <= 0)
+		return ret_val;
+
+	return ret_val;
+}
+
+static int sst_hostless_stream_event(struct snd_soc_dapm_widget *w,
+					struct snd_kcontrol *k, int event)
+{
+	struct sst_ids *ids = w->priv;
+
+#define MERR_DPCM_HOSTLESS_STRID 25
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		/* ALLOC */
+		/* FIXME: HACK - FW shouldn't require alloc for aware */
+		return sst_alloc_hostless_stream(ids->pcm_fmt,
+						 MERR_DPCM_HOSTLESS_STRID,
+						 ids->location_id >> SST_PATH_ID_SHIFT,
+						 ids->task_id);
+	else
+		/* FREE */
+		return sst_dsp->ops->close(MERR_DPCM_HOSTLESS_STRID);
+}
+
+static const struct snd_kcontrol_new sst_mix_sw_aware =
+	SOC_SINGLE_EXT("switch", SST_MIX_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_vad =
+	SOC_SINGLE_EXT("switch", SST_MIX_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_vad_enroll[] = {
+	SOC_SINGLE_BOOL_EXT("SST VTSV Enroll", 0, sst_vtsv_enroll_get,
+					sst_vtsv_enroll_set),
+};
+
+static const struct snd_kcontrol_new sst_mix_sw_tone_gen =
+	SOC_SINGLE_EXT("switch", SST_MIX_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const char * const sst_bt_fm_texts[] = {
+	"fm", "bt",
+};
+
+static const struct snd_kcontrol_new sst_bt_fm_mux =
+	SST_SSP_MUX_CTL("ssp1_out", 0, SST_MUX_REG, SST_BT_FM_MUX_SHIFT, sst_bt_fm_texts,
+			sst_mux_get, sst_mux_put);
+
+static const struct sst_pcm_format aware_stream_fmt = {
+	.sample_bits = 24,
+	.rate_min = 8000,
+	.channels_max = 1,
+};
+
+static const struct sst_pcm_format vad_stream_fmt = {
+	.sample_bits = 16,
+	.rate_min = 16000,
+	.channels_max = 1,
+};
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("tone"),
+	SST_DAPM_OUTPUT("aware", SST_PATH_INDEX_AWARE_OUT, SST_TASK_AWARE, &aware_stream_fmt, sst_hostless_stream_event),
+	SST_DAPM_OUTPUT("vad", SST_PATH_INDEX_VAD_OUT, SST_TASK_AWARE, &vad_stream_fmt, sst_hostless_stream_event),
+	SST_AIF_IN("modem_in",  sst_set_be_modules),
+	SST_AIF_IN("codec_in0", sst_set_be_modules),
+	SST_AIF_IN("codec_in1", sst_set_be_modules),
+	SST_AIF_IN("bt_fm_in", sst_set_be_modules),
+	SST_AIF_OUT("modem_out", sst_set_be_modules),
+	SST_AIF_OUT("codec_out0", sst_set_be_modules),
+	SST_AIF_OUT("codec_out1", sst_set_be_modules),
+	SST_AIF_OUT("bt_fm_out", sst_set_be_modules),
+
+	/* Media Paths */
+	/* MediaX IN paths are set via ALLOC, so no SET_MEDIA_PATH command */
+	SST_PATH_INPUT("media0_in", SST_TASK_MMX, SST_SWM_IN_MEDIA0, sst_generic_modules_event),
+	SST_PATH_INPUT("media1_in", SST_TASK_MMX, SST_SWM_IN_MEDIA1, NULL),
+	SST_PATH_INPUT("media2_in", SST_TASK_MMX, SST_SWM_IN_MEDIA2, sst_set_media_path),
+	SST_PATH_INPUT("media3_in", SST_TASK_MMX, SST_SWM_IN_MEDIA3, NULL),
+	SST_PATH_OUTPUT("media0_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA0, sst_set_media_path),
+	SST_PATH_OUTPUT("media1_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA1, sst_set_media_path),
+
+	/* SBA PCM Paths */
+	SST_PATH_INPUT("pcm0_in", SST_TASK_SBA, SST_SWM_IN_PCM0, sst_set_media_path),
+	SST_PATH_INPUT("pcm1_in", SST_TASK_SBA, SST_SWM_IN_PCM1, sst_set_media_path),
+	SST_PATH_OUTPUT("pcm0_out", SST_TASK_SBA, SST_SWM_OUT_PCM0, sst_set_media_path),
+	SST_PATH_OUTPUT("pcm1_out", SST_TASK_SBA, SST_SWM_OUT_PCM1, sst_set_media_path),
+	SST_PATH_OUTPUT("pcm2_out", SST_TASK_SBA, SST_SWM_OUT_PCM2, sst_set_media_path),
+	/* TODO: check if this needs SET_MEDIA_PATH command*/
+	SST_PATH_INPUT("low_pcm0_in", SST_TASK_SBA, SST_SWM_IN_LOW_PCM0, NULL),
+
+	SST_PATH_INPUT("voip_in", SST_TASK_SBA, SST_SWM_IN_VOIP, sst_set_media_path),
+	SST_PATH_OUTPUT("voip_out", SST_TASK_SBA, SST_SWM_OUT_VOIP, sst_set_media_path),
+	SST_PATH_OUTPUT("aware_out", SST_TASK_SBA, SST_SWM_OUT_AWARE, sst_set_media_path),
+	SST_PATH_OUTPUT("vad_out", SST_TASK_SBA, SST_SWM_OUT_VAD, sst_set_media_path),
+
+	/* SBA Loops */
+	SST_PATH_INPUT("sprot_loop_in", SST_TASK_SBA, SST_SWM_IN_SPROT_LOOP, NULL),
+	SST_PATH_INPUT("media_loop1_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP1, NULL),
+	SST_PATH_INPUT("media_loop2_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP2, NULL),
+	SST_PATH_MEDIA_LOOP_OUTPUT("sprot_loop_out", SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP, SST_FMT_MONO, sst_set_media_loop),
+	SST_PATH_MEDIA_LOOP_OUTPUT("media_loop1_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1, SST_FMT_MONO, sst_set_media_loop),
+	SST_PATH_MEDIA_LOOP_OUTPUT("media_loop2_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2, SST_FMT_STEREO, sst_set_media_loop),
+
+	SST_PATH_INPUT("tone_in", SST_TASK_SBA, SST_SWM_IN_TONE, sst_tone_generator_event),
+
+	SST_PATH_LINKED_INPUT("bt_in", SST_TASK_SBA, SST_SWM_IN_BT, "bt_fm_in", sst_set_linked_pipe),
+	SST_PATH_LINKED_INPUT("fm_in", SST_TASK_SBA, SST_SWM_IN_FM, "bt_fm_in", sst_set_linked_pipe),
+	SST_PATH_LINKED_OUTPUT("bt_out", SST_TASK_SBA, SST_SWM_OUT_BT, "bt_fm_out", sst_set_linked_pipe),
+	SST_PATH_LINKED_OUTPUT("fm_out", SST_TASK_SBA, SST_SWM_OUT_FM, "bt_fm_out", sst_set_linked_pipe),
+
+	/* SBA Voice Paths */
+	SST_PATH_LINKED_INPUT("sidetone_in", SST_TASK_SBA, SST_SWM_IN_SIDETONE, "speech_out", sst_set_linked_pipe),
+	SST_PATH_INPUT("speech_in", SST_TASK_SBA, SST_SWM_IN_SPEECH, sst_set_speech_path),
+	SST_PATH_INPUT("txspeech_in", SST_TASK_SBA, SST_SWM_IN_TXSPEECH, sst_set_speech_path),
+	SST_PATH_OUTPUT("hf_sns_out", SST_TASK_SBA, SST_SWM_OUT_HF_SNS, sst_set_speech_path),
+	SST_PATH_OUTPUT("hf_out", SST_TASK_SBA, SST_SWM_OUT_HF, sst_set_speech_path),
+	SST_PATH_OUTPUT("speech_out", SST_TASK_SBA, SST_SWM_OUT_SPEECH, sst_set_speech_path),
+	SST_PATH_OUTPUT("rxspeech_out", SST_TASK_SBA, SST_SWM_OUT_RXSPEECH, sst_set_speech_path),
+
+	/* Media Mixers */
+	SST_SWM_MIXER("media0_out mix 0", SST_MIX_MEDIA0, SST_TASK_MMX, SST_SWM_OUT_MEDIA0,
+		      sst_mix_media0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("media1_out mix 0", SST_MIX_MEDIA1, SST_TASK_MMX, SST_SWM_OUT_MEDIA1,
+		      sst_mix_media1_controls, sst_swm_mixer_event),
+
+	/* SBA PCM mixers */
+	SST_SWM_MIXER("pcm0_out mix 0", SST_MIX_PCM0, SST_TASK_SBA, SST_SWM_OUT_PCM0,
+		      sst_mix_pcm0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("pcm1_out mix 0", SST_MIX_PCM1, SST_TASK_SBA, SST_SWM_OUT_PCM1,
+		      sst_mix_pcm1_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("pcm2_out mix 0", SST_MIX_PCM2, SST_TASK_SBA, SST_SWM_OUT_PCM2,
+		      sst_mix_pcm2_controls, sst_swm_mixer_event),
+
+	/* SBA Loop mixers */
+	SST_SWM_MIXER("sprot_loop_out mix 0", SST_MIX_LOOP0, SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP,
+		      sst_mix_sprot_l0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("media_loop1_out mix 0", SST_MIX_LOOP1, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1,
+		      sst_mix_media_l1_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("media_loop2_out mix 0", SST_MIX_LOOP2, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2,
+		      sst_mix_media_l2_controls, sst_swm_mixer_event),
+
+	SST_SWM_MIXER("voip_out mix 0", SST_MIX_VOIP, SST_TASK_SBA, SST_SWM_OUT_VOIP,
+		      sst_mix_voip_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("aware_out mix 0", SST_MIX_AWARE, SST_TASK_SBA, SST_SWM_OUT_AWARE,
+		      sst_mix_aware_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("vad_out mix 0", SST_MIX_VAD, SST_TASK_SBA, SST_SWM_OUT_VAD,
+		      sst_mix_vad_controls, sst_swm_mixer_event),
+
+	/* SBA Voice mixers */
+	SST_SWM_MIXER("hf_sns_out mix 0", SST_MIX_HF_SNS, SST_TASK_SBA, SST_SWM_OUT_HF_SNS,
+		      sst_mix_hf_sns_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("hf_out mix 0", SST_MIX_HF, SST_TASK_SBA, SST_SWM_OUT_HF,
+		      sst_mix_hf_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("speech_out mix 0", SST_MIX_SPEECH, SST_TASK_SBA, SST_SWM_OUT_SPEECH,
+		      sst_mix_speech_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("rxspeech_out mix 0", SST_MIX_RXSPEECH, SST_TASK_SBA, SST_SWM_OUT_RXSPEECH,
+		      sst_mix_rxspeech_controls, sst_swm_mixer_event),
+
+	/* SBA Backend mixers */
+	SST_SWM_MIXER("codec_out0 mix 0", SST_MIX_CODEC0, SST_TASK_SBA, SST_SWM_OUT_CODEC0,
+		      sst_mix_codec0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("codec_out1 mix 0", SST_MIX_CODEC1, SST_TASK_SBA, SST_SWM_OUT_CODEC1,
+		      sst_mix_codec1_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("bt_out mix 0", SST_MIX_BT, SST_TASK_SBA, SST_SWM_OUT_BT,
+		      sst_mix_bt_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("fm_out mix 0", SST_MIX_FM, SST_TASK_SBA, SST_SWM_OUT_FM,
+		      sst_mix_fm_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("modem_out mix 0", SST_MIX_MODEM, SST_TASK_SBA, SST_SWM_OUT_MODEM,
+		      sst_mix_modem_controls, sst_swm_mixer_event),
+
+	SND_SOC_DAPM_MUX("ssp1_out mux 0", SND_SOC_NOPM, 0, 0, &sst_bt_fm_mux),
+	SND_SOC_DAPM_SWITCH("aware_out aware 0", SND_SOC_NOPM, 0, 0, &sst_mix_sw_aware),
+	SND_SOC_DAPM_SWITCH("vad_out vad 0", SND_SOC_NOPM, 0, 0, &sst_mix_sw_vad),
+
+	SND_SOC_DAPM_SWITCH("tone_in tone_generator 0", SND_SOC_NOPM, 0, 0, &sst_mix_sw_tone_gen),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{"media0_in", NULL, "Compress Playback"},
+	{"media1_in", NULL, "Headset Playback"},
+	{"media2_in", NULL, "pcm0_out"},
+	{"media3_in", NULL, "Deepbuffer Playback"},
+
+	{"media0_out mix 0", "media0_in", "media0_in"},
+	{"media0_out mix 0", "media1_in", "media1_in"},
+	{"media0_out mix 0", "media2_in", "media2_in"},
+	{"media0_out mix 0", "media3_in", "media3_in"},
+	{"media1_out mix 0", "media0_in", "media0_in"},
+	{"media1_out mix 0", "media1_in", "media1_in"},
+	{"media1_out mix 0", "media2_in", "media2_in"},
+	{"media1_out mix 0", "media3_in", "media3_in"},
+
+	{"media0_out", NULL, "media0_out mix 0"},
+	{"media1_out", NULL, "media1_out mix 0"},
+	{"pcm0_in", NULL, "media0_out"},
+	{"pcm1_in", NULL, "media1_out"},
+
+	{"Headset Capture", NULL, "pcm1_out"},
+	{"Headset Capture", NULL, "pcm2_out"},
+	{"pcm0_out", NULL, "pcm0_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("pcm0_out mix 0"),
+	{"pcm1_out", NULL, "pcm1_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("pcm1_out mix 0"),
+	{"pcm2_out", NULL, "pcm2_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("pcm2_out mix 0"),
+
+	{"media_loop1_in", NULL, "media_loop1_out"},
+	{"media_loop1_out", NULL, "media_loop1_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("media_loop1_out mix 0"),
+	{"media_loop2_in", NULL, "media_loop2_out"},
+	{"media_loop2_out", NULL, "media_loop2_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("media_loop2_out mix 0"),
+	{"sprot_loop_in", NULL, "sprot_loop_out"},
+	{"sprot_loop_out", NULL, "sprot_loop_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("sprot_loop_out mix 0"),
+
+	{"voip_in", NULL, "VOIP Playback"},
+	{"VOIP Capture", NULL, "voip_out"},
+	{"voip_out", NULL, "voip_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("voip_out mix 0"),
+
+	{"aware", NULL, "aware_out"},
+	{"aware_out", NULL, "aware_out aware 0"},
+	{"aware_out aware 0", "switch", "aware_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("aware_out mix 0"),
+	{"vad", NULL, "vad_out"},
+	{"vad_out", NULL, "vad_out vad 0"},
+	{"vad_out vad 0", "switch", "vad_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("vad_out mix 0"),
+
+	{"codec_out0", NULL, "codec_out0 mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("codec_out0 mix 0"),
+	{"codec_out1", NULL, "codec_out1 mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("codec_out1 mix 0"),
+	{"modem_out", NULL, "modem_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("modem_out mix 0"),
+
+	{"bt_fm_out", NULL, "ssp1_out mux 0"},
+	{"ssp1_out mux 0", "bt", "bt_out"},
+	{"ssp1_out mux 0", "fm", "fm_out"},
+	{"bt_out", NULL, "bt_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("bt_out mix 0"),
+	{"fm_out", NULL, "fm_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("fm_out mix 0"),
+	{"bt_in", NULL, "bt_fm_in"},
+	{"fm_in", NULL, "bt_fm_in"},
+
+	/* Uplink processing */
+	{"txspeech_in", NULL, "hf_sns_out"},
+	{"txspeech_in", NULL, "hf_out"},
+	{"txspeech_in", NULL, "speech_out"},
+	{"sidetone_in", NULL, "speech_out"},
+
+	{"hf_sns_out", NULL, "hf_sns_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("hf_sns_out mix 0"),
+	{"hf_out", NULL, "hf_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("hf_out mix 0"),
+	{"speech_out", NULL, "speech_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("speech_out mix 0"),
+
+	/* Downlink processing */
+	{"speech_in", NULL, "rxspeech_out"},
+	{"rxspeech_out", NULL, "rxspeech_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("rxspeech_out mix 0"),
+
+	{"tone_in", NULL, "tone_in tone_generator 0"},
+	{"tone_in tone_generator 0", "switch", "tone"},
+
+	/* TODO: add sidetone inputs */
+	/* TODO: add Low Latency stream support */
+};
+
+static const char * const sst_nb_wb_texts[] = {
+	"narrowband", "wideband",
+};
+
+static const struct snd_kcontrol_new sst_mux_controls[] = {
+	SST_SSP_MUX_CTL("domain voice mode", 0, SST_MUX_REG, SST_VOICE_MODE_SHIFT, sst_nb_wb_texts,
+			sst_mode_get, sst_mode_put),
+	SST_SSP_MUX_CTL("domain bt mode", 0, SST_MUX_REG, SST_BT_MODE_SHIFT, sst_nb_wb_texts,
+			sst_mode_get, sst_mode_put),
+};
+
+static const char * const slot_names[] = {
+	"none",
+	"slot 0", "slot 1", "slot 2", "slot 3",
+	"slot 4", "slot 5", "slot 6", "slot 7", /* not supported by FW */
+};
+
+static const char * const channel_names[] = {
+	"none",
+	"codec_out0_0", "codec_out0_1", "codec_out1_0", "codec_out1_1",
+	"codec_out2_0", "codec_out2_1", "codec_out3_0", "codec_out3_1", /* not supported by FW */
+};
+
+#define SST_INTERLEAVER(xpname, slot_name, slotno) \
+	SST_SSP_SLOT_CTL(xpname, "interleaver", slot_name, slotno, true, \
+			 channel_names, sst_slot_get, sst_slot_put)
+
+#define SST_DEINTERLEAVER(xpname, channel_name, channel_no) \
+	SST_SSP_SLOT_CTL(xpname, "deinterleaver", channel_name, channel_no, false, \
+			 slot_names, sst_slot_get, sst_slot_put)
+
+static const struct snd_kcontrol_new sst_slot_controls[] = {
+	SST_INTERLEAVER("codec_out", "slot 0", 0),
+	SST_INTERLEAVER("codec_out", "slot 1", 1),
+	SST_INTERLEAVER("codec_out", "slot 2", 2),
+	SST_INTERLEAVER("codec_out", "slot 3", 3),
+	SST_DEINTERLEAVER("codec_in", "codec_in0_0", 0),
+	SST_DEINTERLEAVER("codec_in", "codec_in0_1", 1),
+	SST_DEINTERLEAVER("codec_in", "codec_in1_0", 2),
+	SST_DEINTERLEAVER("codec_in", "codec_in1_1", 3),
+};
+
+#include "probe_point_dpcm.c"
+
+/* initialized based on names in sst_probes array */
+static const char *sst_probe_enum_texts[ARRAY_SIZE(sst_probes)];
+static const SOC_ENUM_SINGLE_EXT_DECL(sst_probe_enum, sst_probe_enum_texts);
+
+#define SST_PROBE_CTL(name, num)						\
+	SST_PROBE_ENUM(SST_PROBE_CTL_NAME(name, num, "connection"),		\
+		       sst_probe_enum, sst_probe_get, sst_probe_put)
+	/* TODO: implement probe gains
+	SOC_SINGLE_EXT_TLV(SST_PROBE_CTL_NAME(name, num, "gains"), xreg, xshift,
+		xmax, xinv, xget, xput, sst_gain_tlv_common)
+	*/
+
+static const struct snd_kcontrol_new sst_probe_controls[] = {
+	SST_PROBE_CTL("probe out", 0),
+	SST_PROBE_CTL("probe out", 1),
+	SST_PROBE_CTL("probe out", 2),
+	SST_PROBE_CTL("probe out", 3),
+	SST_PROBE_CTL("probe out", 4),
+	SST_PROBE_CTL("probe out", 5),
+	SST_PROBE_CTL("probe out", 6),
+	SST_PROBE_CTL("probe out", 7),
+	SST_PROBE_CTL("probe in", 0),
+	SST_PROBE_CTL("probe in", 1),
+	SST_PROBE_CTL("probe in", 2),
+	SST_PROBE_CTL("probe in", 3),
+	SST_PROBE_CTL("probe in", 4),
+	SST_PROBE_CTL("probe in", 5),
+	SST_PROBE_CTL("probe in", 6),
+	SST_PROBE_CTL("probe in", 7),
+};
+
+/* Gain helper with min/max set */
+#define SST_GAIN(name, path_id, task_id, instance, gain_var)				\
+	SST_GAIN_KCONTROLS(name, "gain", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE,	\
+		SST_GAIN_TC_MIN, SST_GAIN_TC_MAX,					\
+		sst_gain_get, sst_gain_put,						\
+		SST_MODULE_ID_GAIN_CELL, path_id, instance, task_id,			\
+		sst_gain_tlv_common, gain_var)
+
+#define SST_VOLUME(name, path_id, task_id, instance, gain_var)				\
+	SST_GAIN_KCONTROLS(name, "volume", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE,	\
+		SST_GAIN_TC_MIN, SST_GAIN_TC_MAX,					\
+		sst_gain_get, sst_gain_put,						\
+		SST_MODULE_ID_VOLUME, path_id, instance, task_id,			\
+		sst_gain_tlv_common, gain_var)
+
+#define SST_NUM_GAINS 36
+static struct sst_gain_value sst_gains[SST_NUM_GAINS];
+
+static const struct snd_kcontrol_new sst_gain_controls[] = {
+	SST_GAIN("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[0]),
+	SST_GAIN("media1_in", SST_PATH_INDEX_MEDIA1_IN, SST_TASK_MMX, 0, &sst_gains[1]),
+	SST_GAIN("media2_in", SST_PATH_INDEX_MEDIA2_IN, SST_TASK_MMX, 0, &sst_gains[2]),
+	SST_GAIN("media3_in", SST_PATH_INDEX_MEDIA3_IN, SST_TASK_MMX, 0, &sst_gains[3]),
+
+	SST_GAIN("pcm0_in", SST_PATH_INDEX_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[4]),
+	SST_GAIN("pcm1_in", SST_PATH_INDEX_PCM1_IN, SST_TASK_SBA, 0, &sst_gains[5]),
+	SST_GAIN("low_pcm0_in", SST_PATH_INDEX_LOW_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[6]),
+	SST_GAIN("pcm1_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[7]),
+	SST_GAIN("pcm2_out", SST_PATH_INDEX_PCM2_OUT, SST_TASK_SBA, 0, &sst_gains[8]),
+
+	SST_GAIN("voip_in", SST_PATH_INDEX_VOIP_IN, SST_TASK_SBA, 0, &sst_gains[9]),
+	SST_GAIN("voip_out", SST_PATH_INDEX_VOIP_OUT, SST_TASK_SBA, 0, &sst_gains[10]),
+	SST_GAIN("tone_in", SST_PATH_INDEX_TONE_IN, SST_TASK_SBA, 0, &sst_gains[11]),
+
+	SST_GAIN("aware_out", SST_PATH_INDEX_AWARE_OUT, SST_TASK_SBA, 0, &sst_gains[12]),
+	SST_GAIN("vad_out", SST_PATH_INDEX_VAD_OUT, SST_TASK_SBA, 0, &sst_gains[13]),
+
+	SST_GAIN("hf_sns_out", SST_PATH_INDEX_HF_SNS_OUT, SST_TASK_SBA, 0, &sst_gains[14]),
+	SST_GAIN("hf_out", SST_PATH_INDEX_HF_OUT, SST_TASK_SBA, 0, &sst_gains[15]),
+	SST_GAIN("speech_out", SST_PATH_INDEX_SPEECH_OUT, SST_TASK_SBA, 0, &sst_gains[16]),
+	SST_GAIN("txspeech_in", SST_PATH_INDEX_TX_SPEECH_IN, SST_TASK_SBA, 0, &sst_gains[17]),
+	SST_GAIN("rxspeech_out", SST_PATH_INDEX_RX_SPEECH_OUT, SST_TASK_SBA, 0, &sst_gains[18]),
+	SST_GAIN("speech_in", SST_PATH_INDEX_SPEECH_IN, SST_TASK_SBA, 0, &sst_gains[19]),
+
+	SST_GAIN("codec_in0", SST_PATH_INDEX_CODEC_IN0, SST_TASK_SBA, 0, &sst_gains[20]),
+	SST_GAIN("codec_in1", SST_PATH_INDEX_CODEC_IN1, SST_TASK_SBA, 0, &sst_gains[21]),
+	SST_GAIN("codec_out0", SST_PATH_INDEX_CODEC_OUT0, SST_TASK_SBA, 0, &sst_gains[22]),
+	SST_GAIN("codec_out1", SST_PATH_INDEX_CODEC_OUT1, SST_TASK_SBA, 0, &sst_gains[23]),
+	SST_GAIN("bt_out", SST_PATH_INDEX_BT_OUT, SST_TASK_SBA, 0, &sst_gains[24]),
+	SST_GAIN("fm_out", SST_PATH_INDEX_FM_OUT, SST_TASK_SBA, 0, &sst_gains[25]),
+	SST_GAIN("bt_in", SST_PATH_INDEX_BT_IN, SST_TASK_SBA, 0, &sst_gains[26]),
+	SST_GAIN("fm_in", SST_PATH_INDEX_FM_IN, SST_TASK_SBA, 0, &sst_gains[27]),
+	SST_GAIN("modem_in", SST_PATH_INDEX_MODEM_IN, SST_TASK_SBA, 0, &sst_gains[28]),
+	SST_GAIN("modem_out", SST_PATH_INDEX_MODEM_OUT, SST_TASK_SBA, 0, &sst_gains[29]),
+	SST_GAIN("media_loop1_out", SST_PATH_INDEX_MEDIA_LOOP1_OUT, SST_TASK_SBA, 0, &sst_gains[30]),
+	SST_GAIN("media_loop2_out", SST_PATH_INDEX_MEDIA_LOOP2_OUT, SST_TASK_SBA, 0, &sst_gains[31]),
+	SST_GAIN("sprot_loop_out", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_TASK_SBA, 0, &sst_gains[32]),
+	SST_VOLUME("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[33]),
+	SST_GAIN("sidetone_in", SST_PATH_INDEX_SIDETONE_IN, SST_TASK_SBA, 0, &sst_gains[34]),
+	SST_GAIN("speech_out", SST_PATH_INDEX_SPEECH_OUT, SST_TASK_FBA_UL, 1, &sst_gains[35]),
+};
+
+static const struct snd_kcontrol_new sst_algo_controls[] = {
+	SST_ALGO_KCONTROL_BYTES("media_loop1_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		 SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop1_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop1_out", "mdrp", 286, SST_MODULE_ID_MDRP,
+		SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+	SST_ALGO_KCONTROL_BYTES("media_loop2_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop2_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop2_out", "mdrp", 286, SST_MODULE_ID_MDRP,
+		SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+	SST_ALGO_KCONTROL_BYTES("aware_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("aware_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("aware_out", "aware", 48, SST_MODULE_ID_CONTEXT_ALGO_AWARE,
+		SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_AWARE, AWARE_ENV_CLASS_PARAMS),
+	SST_ALGO_KCONTROL_BYTES("vad_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("vad_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("vad_out", "vad", 28, SST_MODULE_ID_ALGO_VTSV,
+		SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_AWARE, VAD_ENV_CLASS_PARAMS),
+	SST_ALGO_KCONTROL_BYTES("sprot_loop_out", "lpro", 192, SST_MODULE_ID_SPROT,
+		SST_PATH_INDEX_SPROT_LOOP_OUT, 0, SST_TASK_SBA, SBA_VB_LPRO),
+	SST_ALGO_KCONTROL_BYTES("modem_in", "dcr", 60, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_MODEM_IN, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("bt_in", "dcr", 60, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_BT_IN, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("codec_in0", "dcr", 52, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_CODEC_IN0, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("codec_in1", "dcr", 52, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_CODEC_IN1, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("fm_in", "dcr", 60, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_FM_IN, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	/* Uplink */
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "fir_speech", 134, SST_MODULE_ID_FIR_16,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_FIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "fir_hf_sns", 134, SST_MODULE_ID_FIR_16,
+		SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_FIR | (0x0001<<11)),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "iir_speech", 46, SST_MODULE_ID_IIR_16,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_IIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "iir_hf_sns", 46, SST_MODULE_ID_IIR_16,
+		SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_IIR | (0x0001<<11)),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "aec", 642, SST_MODULE_ID_AEC,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_AEC),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "nr", 38, SST_MODULE_ID_NR,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_NR_UL),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "agc", 62, SST_MODULE_ID_AGC,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_AGC),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "compr", 100, SST_MODULE_ID_DRP,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_DUAL_BAND_COMP),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "ser", 44, SST_MODULE_ID_SER,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SER),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "cni", 48, SST_MODULE_ID_CNI_TX,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_TX_CNI),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "ref", 24, SST_MODULE_ID_REF_LINE,
+		SST_PATH_INDEX_HF_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_REF_LINE),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "delay", 6, SST_MODULE_ID_EDL,
+		SST_PATH_INDEX_HF_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_DELAY_LINE),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "bmf", 572, SST_MODULE_ID_BMF,
+		SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_BMF),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "dnr", 56, SST_MODULE_ID_DNR,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_DNR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "wnr", 64, SST_MODULE_ID_WNR,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_WNR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "tnr", 38, SST_MODULE_ID_TNR,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_TNR_UL),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "nlf", 236, SST_MODULE_ID_NLF,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_NLF),
+
+	/* Downlink */
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "ana", 52, SST_MODULE_ID_ANA,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_ANA),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "fir", 134, SST_MODULE_ID_FIR_16,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_FIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "iir", 46, SST_MODULE_ID_IIR_16,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_IIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "nr", 38, SST_MODULE_ID_NR,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_NR_DL),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "compr", 100, SST_MODULE_ID_DRP,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_DUAL_BAND_COMP),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "cni", 28, SST_MODULE_ID_CNI,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_RX_CNI),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "bwx", 54, SST_MODULE_ID_BWX,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_BWX),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "gmm", 586, SST_MODULE_ID_BWX,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_GMM),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "glc", 18, SST_MODULE_ID_GLC,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_GLC),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "tnr", 38, SST_MODULE_ID_TNR,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_TNR_DL),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "slv", 34, SST_MODULE_ID_SLV,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SLV),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "mdrp", 134, SST_MODULE_ID_MDRP,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_MDRP),
+
+	/* Tone Generator */
+	SST_ALGO_KCONTROL_BYTES("tone_in", "tone_generator", 116, SST_MODULE_ID_TONE_GEN,
+		SST_PATH_INDEX_RESERVED, 0, SST_TASK_SBA, SBA_VB_START_TONE),
+
+	/* Sidetone */
+	SST_ALGO_KCONTROL_BYTES("sidetone_in", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_SIDETONE_IN, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+
+};
+
+static const struct snd_kcontrol_new sst_debug_controls[] = {
+	SND_SOC_BYTES_EXT("sst debug byte control", SST_MAX_BIN_BYTES,
+		       sst_byte_control_get, sst_byte_control_set),
+};
+
+static inline bool is_sst_dapm_widget(struct snd_soc_dapm_widget *w)
+{
+	if ((w->id == snd_soc_dapm_pga) ||
+	    (w->id == snd_soc_dapm_aif_in) ||
+	    (w->id == snd_soc_dapm_aif_out) ||
+	    (w->id == snd_soc_dapm_input) ||
+	    (w->id == snd_soc_dapm_output) ||
+	    (w->id == snd_soc_dapm_mixer))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * sst_send_pipe_gains - send gains for the front-end DAIs
+ *
+ * The gains in the pipes connected to the front-ends are muted/unmuted
+ * automatically via the digital_mute() DAPM callback. This function sends the
+ * gains for the front-end pipes.
+ */
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
+{
+	struct snd_soc_platform *platform = dai->platform;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_path *p = NULL;
+
+	pr_debug("%s: enter, dai-name=%s dir=%d\n", __func__, dai->name, stream);
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("Stream name=%s\n", dai->playback_widget->name);
+		w = dai->playback_widget;
+		list_for_each_entry(p, &w->sinks, list_source) {
+			if (p->connected && !p->connected(w, p->sink))
+				continue;
+
+			if (p->connect && p->sink->power && is_sst_dapm_widget(p->sink)) {
+				struct sst_ids *ids = p->sink->priv;
+
+				pr_debug("send gains for widget=%s\n", p->sink->name);
+				sst_set_pipe_gain(ids, sst, mute);
+			}
+		}
+	} else {
+		pr_debug("Stream name=%s\n", dai->capture_widget->name);
+		w = dai->capture_widget;
+		list_for_each_entry(p, &w->sources, list_sink) {
+			if (p->connected && !p->connected(w, p->sink))
+				continue;
+
+			if (p->connect &&  p->source->power && is_sst_dapm_widget(p->source)) {
+				struct sst_ids *ids = p->source->priv;
+
+				pr_debug("send gain for widget=%s\n", p->source->name);
+				sst_set_pipe_gain(ids, sst, mute);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * sst_fill_module_list - populate the list of modules/gains for a pipe
+ *
+ *
+ * Fills the widget pointer in the kcontrol private data, and also fills the
+ * kcontrol pointer in the widget private data.
+ *
+ * Widget pointer is used to send the algo/gain in the .put() handler if the
+ * widget is powerd on.
+ *
+ * Kcontrol pointer is used to send the algo/gain in the widget power ON/OFF
+ * event handler. Each widget (pipe) has multiple algos stored in the algo_list.
+ */
+static int sst_fill_module_list(struct snd_kcontrol *kctl,
+	 struct snd_soc_dapm_widget *w, int type)
+{
+	struct module *module = NULL;
+	struct sst_ids *ids = w->priv;
+
+	module = devm_kzalloc(w->platform->dev, sizeof(*module), GFP_KERNEL);
+	if (!module) {
+		pr_err("kzalloc block failed\n");
+		return -ENOMEM;
+	}
+
+	if (type == SST_MODULE_GAIN) {
+		struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+
+		mc->w = w;
+		module->kctl = kctl;
+		list_add_tail(&module->node, &ids->gain_list);
+	} else if (type == SST_MODULE_ALGO) {
+		struct sst_algo_control *bc = (void *)kctl->private_value;
+
+		bc->w = w;
+		module->kctl = kctl;
+		list_add_tail(&module->node, &ids->algo_list);
+	}
+
+	return 0;
+}
+
+/**
+ * sst_fill_widget_module_info - fill list of gains/algos for the pipe
+ * @widget:	pipe modelled as a DAPM widget
+ *
+ * Fill the list of gains/algos for the widget by looking at all the card
+ * controls and comparing the name of the widget with the first part of control
+ * name. First part of control name contains the pipe name (widget name).
+ */
+static int sst_fill_widget_module_info(struct snd_soc_dapm_widget *w,
+	struct snd_soc_platform *platform)
+{
+	struct snd_kcontrol *kctl;
+	int index, ret = 0;
+	struct snd_card *card = platform->card->snd_card;
+	char *idx;
+
+	down_read(&card->controls_rwsem);
+
+	list_for_each_entry(kctl, &card->controls, list) {
+		idx = strstr(kctl->id.name, " ");
+		if (idx == NULL)
+			continue;
+		index  = strlen(kctl->id.name) - strlen(idx);
+		if (strstr(kctl->id.name, "volume") &&
+		    !strncmp(kctl->id.name, w->name, index))
+			ret = sst_fill_module_list(kctl, w, SST_MODULE_GAIN);
+		else if (strstr(kctl->id.name, "params") &&
+			 !strncmp(kctl->id.name, w->name, index))
+			ret = sst_fill_module_list(kctl, w, SST_MODULE_ALGO);
+		else if (strstr(kctl->id.name, "mute") &&
+			 !strncmp(kctl->id.name, w->name, index)) {
+			struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+			mc->w = w;
+		} else if (strstr(kctl->id.name, "interleaver") &&
+			 !strncmp(kctl->id.name, w->name, index)) {
+			struct sst_enum *e = (void *)kctl->private_value;
+			e->w = w;
+		} else if (strstr(kctl->id.name, "deinterleaver") &&
+			 !strncmp(kctl->id.name, w->name, index)) {
+			struct sst_enum *e = (void *)kctl->private_value;
+			e->w = w;
+		}
+		if (ret < 0) {
+			up_read(&card->controls_rwsem);
+			return ret;
+		}
+	}
+	up_read(&card->controls_rwsem);
+	return 0;
+}
+
+/**
+ * sst_fill_linked_widgets - fill the parent pointer for the linked widget
+ */
+static void sst_fill_linked_widgets(struct snd_soc_platform *platform,
+						struct sst_ids *ids)
+{
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_context *dapm = &platform->dapm;
+
+	unsigned int len = strlen(ids->parent_wname);
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (!strncmp(ids->parent_wname, w->name, len)) {
+			ids->parent_w = w;
+			break;
+		}
+	}
+}
+
+/**
+ * sst_map_modules_to_pipe - fill algo/gains list for all pipes
+ */
+static int sst_map_modules_to_pipe(struct snd_soc_platform *platform)
+{
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_context *dapm = &platform->dapm;
+	int ret = 0;
+
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->platform && is_sst_dapm_widget(w) && (w->priv)) {
+			struct sst_ids *ids = w->priv;
+
+			pr_debug("widget type=%d name=%s\n", w->id, w->name);
+			INIT_LIST_HEAD(&ids->algo_list);
+			INIT_LIST_HEAD(&ids->gain_list);
+			ret = sst_fill_widget_module_info(w, platform);
+			if (ret < 0)
+				return ret;
+			/* fill linked widgets */
+			if (ids->parent_wname !=  NULL)
+				sst_fill_linked_widgets(platform, ids);
+		}
+	}
+	return 0;
+}
+
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform)
+{
+	int i, ret = 0;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	sst->byte_stream = devm_kzalloc(platform->dev,
+					SST_MAX_BIN_BYTES, GFP_KERNEL);
+	if (!sst->byte_stream) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	sst->widget = devm_kzalloc(platform->dev,
+				   SST_NUM_WIDGETS * sizeof(*sst->widget),
+				   GFP_KERNEL);
+	if (!sst->widget) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	snd_soc_dapm_new_controls(&platform->dapm, sst_dapm_widgets,
+			ARRAY_SIZE(sst_dapm_widgets));
+	snd_soc_dapm_add_routes(&platform->dapm, intercon,
+			ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(&platform->dapm);
+
+	for (i = 0; i < SST_NUM_GAINS; i++) {
+		sst_gains[i].mute = SST_GAIN_MUTE_DEFAULT;
+		sst_gains[i].l_gain = SST_GAIN_VOLUME_DEFAULT;
+		sst_gains[i].r_gain = SST_GAIN_VOLUME_DEFAULT;
+		sst_gains[i].ramp_duration = SST_GAIN_RAMP_DURATION_DEFAULT;
+	}
+
+	snd_soc_add_platform_controls(platform, sst_gain_controls,
+			ARRAY_SIZE(sst_gain_controls));
+
+	snd_soc_add_platform_controls(platform, sst_algo_controls,
+			ARRAY_SIZE(sst_algo_controls));
+	snd_soc_add_platform_controls(platform, sst_slot_controls,
+			ARRAY_SIZE(sst_slot_controls));
+	snd_soc_add_platform_controls(platform, sst_mux_controls,
+			ARRAY_SIZE(sst_mux_controls));
+	snd_soc_add_platform_controls(platform, sst_debug_controls,
+			ARRAY_SIZE(sst_debug_controls));
+	snd_soc_add_platform_controls(platform, sst_vad_enroll,
+			ARRAY_SIZE(sst_vad_enroll));
+
+	/* initialize the names of the probe points */
+	for (i = 0; i < ARRAY_SIZE(sst_probes); i++)
+		sst_probe_enum_texts[i] = sst_probes[i].name;
+
+	snd_soc_add_platform_controls(platform, sst_probe_controls,
+			ARRAY_SIZE(sst_probe_controls));
+
+	ret = sst_map_modules_to_pipe(platform);
+
+	return ret;
+}
diff --git a/sound/soc/intel/platform-libs/ipc_lib.h b/sound/soc/intel/platform-libs/ipc_lib.h
new file mode 100644
index 0000000..6ba50c4
--- /dev/null
+++ b/sound/soc/intel/platform-libs/ipc_lib.h
@@ -0,0 +1,33 @@
+/*
+ *  ipc_lib.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __PLATFORMDRV_IPC_LIB_H__
+#define __PLATFORMDRV_IPC_LIB_H__
+
+struct sst_algo_int_control_v2;
+
+void sst_create_compr_vol_ipc(char *bytes, unsigned int type,
+		struct sst_algo_int_control_v2 *kdata);
+#endif
diff --git a/sound/soc/intel/platform-libs/ipc_lib_v2.c b/sound/soc/intel/platform-libs/ipc_lib_v2.c
new file mode 100644
index 0000000..105c756
--- /dev/null
+++ b/sound/soc/intel/platform-libs/ipc_lib_v2.c
@@ -0,0 +1,109 @@
+/*
+ *  ipc_lib_v2.c - Intel MID Platform Driver IPC wrappers for mrfld
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#include <sound/soc.h>
+#include <asm/platform_sst_audio.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+
+
+static inline void sst_fill_dsp_hdr(struct ipc_dsp_hdr *hdr, u8 index, u8 pipe,
+	u16 module, u16 cmd, u16 len)
+{
+	hdr->mod_index_id = index;
+	hdr->pipe_id = pipe;
+	hdr->mod_id = module;
+	hdr->cmd_id = cmd;
+	hdr->length = len;
+
+}
+
+static inline void sst_fill_byte_control_hdr(struct snd_sst_bytes_v2 *hdr,
+	u8 type, u8 msg, u8 block, u8 task, u8 pipe, u16 len)
+{
+	hdr->type = type;
+	hdr->ipc_msg = msg;
+	hdr->block = block;
+	hdr->task_id = task;
+	hdr->pipe_id = pipe;
+	hdr->rsvd = 0;
+	hdr->len = len;
+}
+
+#define SST_GAIN_V2_TIME_CONST 50
+
+void sst_create_compr_vol_ipc(char *bytes, unsigned int type,
+	struct sst_algo_int_control_v2 *kdata)
+{
+	struct snd_sst_gain_v2 gain1;
+	struct snd_sst_bytes_v2 byte_hdr;
+	struct ipc_dsp_hdr dsp_hdr;
+	char *tmp;
+	u16 len;
+	u8 ipc_msg;
+
+	/* Fill gain params */
+	gain1.gain_cell_num = 1;  /* num of gain cells to modify*/
+	gain1.cell_nbr_idx = kdata->instance_id; /* instance index */
+	gain1.cell_path_idx = kdata->pipe_id; /* pipe id */
+	gain1.module_id = kdata->module_id; /*module id */
+	gain1.left_cell_gain = kdata->value; /* left gain value in dB*/
+	gain1.right_cell_gain = kdata->value; /* same value as left in dB*/
+	/* set to default recommended value*/
+	gain1.gain_time_const = SST_GAIN_V2_TIME_CONST;
+
+	/* fill dsp header */
+	/* Get params format for vol ctrl lib, size 6 bytes :
+	 * u16 left_gain, u16 right_gain, u16 ramp
+	 */
+	memset(&dsp_hdr, 0, sizeof(dsp_hdr));
+	if (type == SND_SST_BYTES_GET) {
+		len = 6;
+		ipc_msg = IPC_GET_PARAMS;
+	} else {
+		len = sizeof(gain1);
+		ipc_msg = IPC_SET_PARAMS;
+	}
+
+	sst_fill_dsp_hdr(&dsp_hdr, 0, kdata->pipe_id, kdata->module_id,
+				IPC_IA_SET_GAIN_MRFLD, len);
+
+	/* fill byte control header */
+	memset(&byte_hdr, 0, sizeof(byte_hdr));
+	len = sizeof(dsp_hdr) + dsp_hdr.length;
+	sst_fill_byte_control_hdr(&byte_hdr, type, ipc_msg, 1,
+			SST_TASK_ID_MEDIA, kdata->pipe_id, len);
+
+	/* fill complete byte stream as ipc payload */
+	tmp = bytes;
+	memcpy(tmp, &byte_hdr, sizeof(byte_hdr));
+	memcpy((tmp + sizeof(byte_hdr)), &dsp_hdr, sizeof(dsp_hdr));
+	if (type != SND_SST_BYTES_GET)
+		memcpy((tmp + sizeof(byte_hdr) + sizeof(dsp_hdr)), &gain1,
+			sizeof(gain1));
+#ifdef DEBUG_HEX_DUMP_BYTES
+	print_hex_dump_bytes(__func__, DUMP_PREFIX_NONE, bytes, 32);
+#endif
+}
diff --git a/sound/soc/intel/platform-libs/probe_point_dpcm.c b/sound/soc/intel/platform-libs/probe_point_dpcm.c
new file mode 100644
index 0000000..bff107d
--- /dev/null
+++ b/sound/soc/intel/platform-libs/probe_point_dpcm.c
@@ -0,0 +1,172 @@
+/*
+ *  probe_point_dpcm.c - Intel MID probe definition
+ *
+ *  Copyright (C) 2014 Intel Corp
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+static const struct sst_probe_config sst_probes[] = {
+	/* TODO: get this struct from FW config data */
+	/* gain outputs  */
+	{ "media0_in gain", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media1_in gain", SST_PATH_INDEX_MEDIA1_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media2_in gain", SST_PATH_INDEX_MEDIA2_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media3_in gain", SST_PATH_INDEX_MEDIA3_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "pcm0_in gain", SST_PATH_INDEX_PCM0_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm1_in gain", SST_PATH_INDEX_PCM1_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm1_out gain", SST_PATH_INDEX_PCM1_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm2_out gain", SST_PATH_INDEX_PCM2_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voip_in gain", SST_PATH_INDEX_VOIP_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voip_out gain", SST_PATH_INDEX_VOIP_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "aware_out gain", SST_PATH_INDEX_AWARE_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "vad_out gain", SST_PATH_INDEX_VAD_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_sns_out gain", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_out gain", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "speech_out gain", SST_PATH_INDEX_SPEECH_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "txspeech_in gain", SST_PATH_INDEX_TX_SPEECH_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "rxspeech_out gain", SST_PATH_INDEX_RX_SPEECH_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "speech_in gain", SST_PATH_INDEX_SPEECH_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop1_out gain", SST_PATH_INDEX_MEDIA_LOOP1_OUT , SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop2_out gain", SST_PATH_INDEX_MEDIA_LOOP2_OUT , SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "tone_in gain", SST_PATH_INDEX_TONE_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out0 gain", SST_PATH_INDEX_CODEC_OUT0, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out1 gain", SST_PATH_INDEX_CODEC_OUT1, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_out gain", SST_PATH_INDEX_BT_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_out gain", SST_PATH_INDEX_FM_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "modem_out gain", SST_PATH_INDEX_MODEM_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in0 gain", SST_PATH_INDEX_CODEC_IN0, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in1 gain", SST_PATH_INDEX_CODEC_IN1, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_in gain", SST_PATH_INDEX_BT_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_in gain", SST_PATH_INDEX_FM_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "modem_in gain", SST_PATH_INDEX_MODEM_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "sprot_loop_out gain", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "sidetone_in", SST_PATH_INDEX_SIDETONE_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+
+	/* SRC */
+	{ "media0_in src", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_SRC, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "rxspeech_out src", SST_PATH_INDEX_RX_SPEECH_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "txspeech_in src", SST_PATH_INDEX_TX_SPEECH_IN, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "speech_out src", SST_PATH_INDEX_SPEECH_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "speech_in src", SST_PATH_INDEX_SPEECH_IN, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_out src", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_sns_out src", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm1_out src", SST_PATH_INDEX_PCM1_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm2_out src", SST_PATH_INDEX_PCM2_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voip_in src", SST_PATH_INDEX_VOIP_IN, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voip_out src", SST_PATH_INDEX_VOIP_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "aware_out src", SST_PATH_INDEX_AWARE_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "vad_out src", SST_PATH_INDEX_VAD_OUT, SST_MODULE_ID_SRC, SST_TASK_SBA, { 1, 2, 1 } },
+
+	{ "media0_in downmix", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_DOWNMIX, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "sprot_loop_out lpro", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_MODULE_ID_SPROT, SST_TASK_SBA, { 1, 2, 1 } },
+
+	{ "voice_downlink nr", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_NR, SST_TASK_FBA_DL , { 1, 2, 1 } },
+	{ "voice_uplink nr", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_NR, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_downlink bwx", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_BWX, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_downlink drp", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_DRP, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_uplink drp", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_DRP, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_downlink ana", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_ANA, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_uplink aec", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_AEC, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_uplink nr_sns", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_NR_SNS, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_uplink ser", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_SER, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_uplink agc", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_AGC, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_downlink cni", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_CNI, SST_TASK_FBA_DL, { 1, 2, 1 } },
+
+
+	{ "media_loop1_out mdrp", SST_PATH_INDEX_MEDIA_LOOP1_OUT , SST_MODULE_ID_MDRP, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop2_out mdrp", SST_PATH_INDEX_MEDIA_LOOP2_OUT , SST_MODULE_ID_MDRP, SST_TASK_SBA, { 1, 2, 1 } },
+
+	{ "media_loop1_out fir_stereo", SST_PATH_INDEX_MEDIA_LOOP1_OUT , SST_MODULE_ID_FIR_24, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop2_out fir_stereo", SST_PATH_INDEX_MEDIA_LOOP2_OUT , SST_MODULE_ID_FIR_24, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop1_out iir_stereo", SST_PATH_INDEX_MEDIA_LOOP1_OUT , SST_MODULE_ID_IIR_24, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop2_out iir_stereo", SST_PATH_INDEX_MEDIA_LOOP2_OUT , SST_MODULE_ID_IIR_24, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "sidetone_in iir_stereo", SST_PATH_INDEX_SIDETONE_IN, SST_MODULE_ID_IIR_24, SST_TASK_SBA, { 1, 2, 1 } },
+
+	/* ASRC */
+	{ "modem_out asrc", SST_PATH_INDEX_MODEM_OUT, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "modem_in asrc", SST_PATH_INDEX_MODEM_IN, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_out asrc", SST_PATH_INDEX_BT_OUT, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_in asrc", SST_PATH_INDEX_BT_IN, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_out asrc", SST_PATH_INDEX_FM_OUT, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_in asrc", SST_PATH_INDEX_FM_IN, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out0 asrc", SST_PATH_INDEX_CODEC_OUT0, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out1 asrc", SST_PATH_INDEX_CODEC_OUT1, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in0 asrc", SST_PATH_INDEX_CODEC_IN0, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in1 asrc", SST_PATH_INDEX_CODEC_IN1, SST_MODULE_ID_ASRC, SST_TASK_SBA, { 1, 2, 1 } },
+
+	{ "tone_in tone_gen", SST_PATH_INDEX_TONE_IN, SST_MODULE_ID_TONE_GEN, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_sns_out bmf", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_BMF, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "hf_out edl", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_EDL, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_downlink glc", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_GLC, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_downlink fir", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_FIR_16, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_uplink fir", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_FIR_16, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "hf_sns_out fir", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_FIR_16, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_downlink iir", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_IIR_16, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_uplink iir", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_IIR_16, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "hf_sns_out iir", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_IIR_16, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_uplink dnr", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_DNR, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "voice_uplink cni", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_CNI_TX, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "hf_out ref_line", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_REF_LINE, SST_TASK_FBA_UL, { 1, 2, 1 } },
+
+	{ "media0_in volume", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_VOLUME, SST_TASK_MMX, { 1, 2, 1 } },
+
+	/* DCR */
+	{ "modem_in dcr", SST_PATH_INDEX_MODEM_IN, SST_MODULE_ID_FILT_DCR, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_in dcr", SST_PATH_INDEX_BT_IN, SST_MODULE_ID_FILT_DCR, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_in dcr", SST_PATH_INDEX_FM_IN, SST_MODULE_ID_FILT_DCR, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in0 dcr", SST_PATH_INDEX_CODEC_IN0, SST_MODULE_ID_FILT_DCR, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in1 dcr", SST_PATH_INDEX_CODEC_IN1, SST_MODULE_ID_FILT_DCR, SST_TASK_SBA, { 1, 2, 1 } },
+
+	/* Log */
+	{ "modem_out log", SST_PATH_INDEX_MODEM_OUT, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "modem_in log", SST_PATH_INDEX_MODEM_IN, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_out log", SST_PATH_INDEX_BT_OUT, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_in log", SST_PATH_INDEX_BT_IN, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_in log", SST_PATH_INDEX_FM_IN, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_out log", SST_PATH_INDEX_FM_OUT, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out0 log", SST_PATH_INDEX_CODEC_OUT0, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in0 log", SST_PATH_INDEX_CODEC_IN0, SST_MODULE_ID_LOG, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voice_downlink log", SST_PATH_INDEX_VOICE_DOWNLINK, SST_MODULE_ID_LOG, SST_TASK_FBA_DL, { 1, 2, 1 } },
+	{ "voice_uplink log", SST_PATH_INDEX_VOICE_UPLINK, SST_MODULE_ID_LOG, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "hf_out log", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_LOG, SST_TASK_FBA_UL, { 1, 2, 1 } },
+	{ "hf_sns_out log", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_LOG, SST_TASK_FBA_UL, { 1, 2, 1 } },
+
+	/* Decoder */
+	{ "media0_in pcm", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_PCM, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in mp3", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_MP3, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in mp24", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_MP24, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in aac", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_AAC, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in aacp", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_AACP, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in eaacp", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_EAACP, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in wma9", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_WMA9, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in wma10", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_WMA10, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in wma10p", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_WMA10P, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in ra", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_RA, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in ddac3", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_DDAC3, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in true_hd", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_TRUE_HD, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in hd_plus", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_HD_PLUS, SST_TASK_MMX, { 1, 2, 1 } },
+
+	/* Effects */
+	{ "media0_in bass_boost", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_BASS_BOOST, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in stereo_wdng", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_STEREO_WDNG, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in av_removal", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_AV_REMOVAL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in mic_eq", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_MIC_EQ, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in spl", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_SPL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in vtsv", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_ALGO_VTSV, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in virtualizer", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_VIRTUALIZER, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in visualization", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_VISUALIZATION, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in loudness_optimizer", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_LOUDNESS_OPTIMIZER, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media0_in reverberation", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_REVERBERATION, SST_TASK_MMX, { 1, 2, 1 } },
+};
+
diff --git a/sound/soc/intel/platform-libs/sst_widgets.h b/sound/soc/intel/platform-libs/sst_widgets.h
new file mode 100644
index 0000000..07bf59b
--- /dev/null
+++ b/sound/soc/intel/platform-libs/sst_widgets.h
@@ -0,0 +1,382 @@
+/*
+ *  sst_widgets.h - Intel helpers to generate FW widgets
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __SST_WIDGETS_H__
+#define __SST_WIDGETS_H__
+
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define SST_MODULE_GAIN 1
+#define SST_MODULE_ALGO 2
+
+#define SST_FMT_MONO 0
+#define SST_FMT_STEREO 3
+
+/* physical SSP numbers */
+enum {
+	SST_SSP0 = 0,
+	SST_SSP1,
+	SST_SSP2,
+	SST_SSP_LAST = SST_SSP2,
+};
+
+#define SST_NUM_SSPS		(SST_SSP_LAST + 1)	/* physical SSPs */
+#define SST_MAX_SSP_MUX		2			/* single SSP muxed between pipes */
+#define SST_MAX_SSP_DOMAINS	2			/* domains present in each pipe */
+
+struct module {
+	struct snd_kcontrol *kctl;
+	struct list_head node;
+};
+
+struct sst_ssp_config {
+	u8 ssp_id;
+	u8 bits_per_slot;
+	u8 slots;
+	u8 ssp_mode;
+	u8 pcm_mode;
+	u8 duplex;
+	u8 ssp_protocol;
+	u8 fs_frequency;
+	u8 active_slot_map;
+	u8 start_delay;
+	u16 fs_width;
+};
+
+struct sst_ssp_cfg {
+	const u8 ssp_number;
+	const int *mux_shift;
+	const int (*domain_shift)[SST_MAX_SSP_MUX];
+	const struct sst_ssp_config (*ssp_config)[SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS];
+};
+
+struct sst_ids {
+	u16 location_id;
+	u16 module_id;
+	u8  task_id;
+	u8  format;
+	u8  reg;
+	const char *parent_wname;
+	struct snd_soc_dapm_widget *parent_w;
+	struct list_head algo_list;
+	struct list_head gain_list;
+	const struct sst_pcm_format *pcm_fmt;
+};
+
+
+#define SST_AIF_IN(wname, wevent)							\
+{	.id = snd_soc_dapm_aif_in, .name = wname, .sname = NULL,			\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 }		\
+}
+
+#define SST_AIF_OUT(wname, wevent)							\
+{	.id = snd_soc_dapm_aif_out, .name = wname, .sname = NULL,			\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 }		\
+}
+
+#define SST_INPUT(wname, wevent)							\
+{	.id = snd_soc_dapm_input, .name = wname, .sname = NULL,				\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 }		\
+}
+
+#define SST_OUTPUT(wname, wevent)							\
+{	.id = snd_soc_dapm_output, .name = wname, .sname = NULL,			\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .task_id = 0, .location_id = 0 }		\
+}
+
+#define SST_DAPM_OUTPUT(wname, wloc_id, wtask_id, wformat, wevent)                      \
+{	.id = snd_soc_dapm_output, .name = wname, .sname = NULL,                        \
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,                                   \
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,   \
+	.priv = (void *)&(struct sst_ids) { .location_id = wloc_id, .task_id = wtask_id,\
+						.pcm_fmt = wformat, }			\
+}
+
+#define SST_PATH(wname, wtask, wloc_id, wevent, wflags)					\
+{	.id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,		\
+	.invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0,				\
+	.event = wevent, .event_flags = wflags,						\
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, }	\
+}
+
+#define SST_LINKED_PATH(wname, wtask, wloc_id, linked_wname, wevent, wflags)		\
+{	.id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,		\
+	.invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0,				\
+	.event = wevent, .event_flags = wflags,						\
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id,	\
+					.parent_wname = linked_wname}			\
+}
+
+#define SST_PATH_MEDIA_LOOP(wname, wtask, wloc_id, wformat, wevent, wflags)             \
+{	.id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,         \
+	.invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0,                         \
+	.event = wevent, .event_flags = wflags,                                         \
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id,	\
+					    .format = wformat,}				\
+}
+
+/* output is triggered before input */
+#define SST_PATH_INPUT(name, task_id, loc_id, event)					\
+	SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
+
+#define SST_PATH_LINKED_INPUT(name, task_id, loc_id, linked_wname, event)		\
+	SST_LINKED_PATH(name, task_id, loc_id, linked_wname, event,			\
+					SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
+
+#define SST_PATH_OUTPUT(name, task_id, loc_id, event)					\
+	SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+#define SST_PATH_LINKED_OUTPUT(name, task_id, loc_id, linked_wname, event)		\
+	SST_LINKED_PATH(name, task_id, loc_id, linked_wname, event,			\
+					SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+#define SST_PATH_MEDIA_LOOP_OUTPUT(name, task_id, loc_id, format, event)		\
+	SST_PATH_MEDIA_LOOP(name, task_id, loc_id, format, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+
+#define SST_SWM_MIXER(wname, wreg, wtask, wloc_id, wcontrols, wevent)			\
+{	.id = snd_soc_dapm_mixer, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,	\
+	.invert = 0, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols),\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD |	\
+					SND_SOC_DAPM_POST_REG,				\
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id,	\
+					    .reg = wreg }				\
+}
+
+enum sst_gain_kcontrol_type {
+	SST_GAIN_TLV,
+	SST_GAIN_MUTE,
+	SST_GAIN_RAMP_DURATION,
+};
+
+struct sst_gain_mixer_control {
+	bool stereo;
+	enum sst_gain_kcontrol_type type;
+	struct sst_gain_value *gain_val;
+	int max;
+	int min;
+	u16 instance_id;
+	u16 module_id;
+	u16 pipe_id;
+	u16 task_id;
+	char pname[44];
+	struct snd_soc_dapm_widget *w;
+};
+
+struct sst_gain_value {
+	u16 ramp_duration;
+	s16 l_gain;
+	s16 r_gain;
+	bool mute;
+};
+
+#define SST_GAIN_VOLUME_DEFAULT		(-1440)
+#define SST_GAIN_RAMP_DURATION_DEFAULT	5 /* timeconstant */
+#define SST_GAIN_MUTE_DEFAULT		true
+
+#define SST_GAIN_KCONTROL_TLV(xname, xhandler_get, xhandler_put, \
+			      xmod, xpipe, xinstance, xtask, tlv_array, xgain_val, \
+			      xmin, xmax, xpname) \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+		  SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+	.tlv.p = (tlv_array), \
+	.info = sst_gain_ctl_info,\
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+	{ .stereo = true, .max = xmax, .min = xmin, .type = SST_GAIN_TLV, \
+	  .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+	  .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_GAIN_KCONTROL_INT(xname, xhandler_get, xhandler_put, \
+			      xmod, xpipe, xinstance, xtask, xtype, xgain_val, \
+			      xmin, xmax, xpname) \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = sst_gain_ctl_info, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+	{ .stereo = false, .max = xmax, .min = xmin, .type = xtype, \
+	  .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+	  .instance_id = xinstance, .gain_val = xgain_val, .pname =  xpname}
+
+#define SST_GAIN_KCONTROL_BOOL(xname, xhandler_get, xhandler_put,\
+			       xmod, xpipe, xinstance, xtask, xgain_val, xpname) \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_bool_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+	{ .stereo = false, .type = SST_GAIN_MUTE, \
+	  .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+	  .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_CONTROL_NAME(xpname, xmname, xinstance, xtype) \
+	xpname " " xmname " " #xinstance " " xtype
+
+#define SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, xtype, xsubmodule) \
+	xpname " " xmname " " #xinstance " " xtype " " xsubmodule
+
+/*
+ * 3 Controls for each Gain module
+ * e.g.	- pcm0_in gain 0 volume
+ *	- pcm0_in gain 0 rampduration
+ *	- pcm0_in gain 0 mute
+ */
+#define SST_GAIN_KCONTROLS(xpname, xmname, xmin_gain, xmax_gain, xmin_tc, xmax_tc, \
+			   xhandler_get, xhandler_put, \
+			   xmod, xpipe, xinstance, xtask, tlv_array, xgain_val) \
+	{ SST_GAIN_KCONTROL_INT(SST_CONTROL_NAME(xpname, xmname, xinstance, "rampduration"), \
+		xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, SST_GAIN_RAMP_DURATION, \
+		xgain_val, xmin_tc, xmax_tc, xpname) }, \
+	{ SST_GAIN_KCONTROL_BOOL(SST_CONTROL_NAME(xpname, xmname, xinstance, "mute"), \
+		xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, \
+		xgain_val, xpname) } ,\
+	{ SST_GAIN_KCONTROL_TLV(SST_CONTROL_NAME(xpname, xmname, xinstance, "volume"), \
+		xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, tlv_array, \
+		xgain_val, xmin_gain, xmax_gain, xpname) }
+
+#define SST_GAIN_TC_MIN		5
+#define SST_GAIN_TC_MAX		5000
+#define SST_GAIN_MIN_VALUE	-1440 /* in 0.1 DB units */
+#define SST_GAIN_MAX_VALUE	360
+
+enum sst_algo_kcontrol_type {
+	SST_ALGO_PARAMS,
+	SST_ALGO_BYPASS,
+};
+
+struct sst_algo_control {
+	enum sst_algo_kcontrol_type type;
+	int max;
+	u16 module_id;
+	u16 pipe_id;
+	u16 task_id;
+	u16 cmd_id;
+	bool bypass;
+	unsigned char *params;
+	struct snd_soc_dapm_widget *w;
+};
+
+/* size of the control = size of params + size of length field */
+#define SST_ALGO_CTL_VALUE(xcount, xtype, xpipe, xmod, xtask, xcmd)			\
+	(struct sst_algo_control){							\
+		.max = xcount + sizeof(u16), .type = xtype, .module_id = xmod,			\
+		.pipe_id = xpipe, .task_id = xtask, .cmd_id = xcmd,			\
+	}
+
+#define SST_ALGO_KCONTROL(xname, xcount, xmod, xpipe,					\
+			  xtask, xcmd, xtype, xinfo, xget, xput)			\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,						\
+	.name =  xname,									\
+	.info = xinfo, .get = xget, .put = xput,					\
+	.private_value = (unsigned long)&						\
+			SST_ALGO_CTL_VALUE(xcount, xtype, xpipe,			\
+					   xmod, xtask, xcmd),				\
+}
+
+#define SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod,				\
+				xpipe, xinstance, xtask, xcmd)				\
+	SST_ALGO_KCONTROL(SST_CONTROL_NAME(xpname, xmname, xinstance, "params"),	\
+			  xcount, xmod, xpipe, xtask, xcmd, SST_ALGO_PARAMS,		\
+			  sst_algo_bytes_ctl_info,					\
+			  sst_algo_control_get, sst_algo_control_set)
+
+#define SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask)		\
+	SST_ALGO_KCONTROL(SST_CONTROL_NAME(xpname, xmname, xinstance, "bypass"),	\
+			  0, xmod, xpipe, xtask, 0, SST_ALGO_BYPASS,			\
+			  snd_soc_info_bool_ext,					\
+			  sst_algo_control_get, sst_algo_control_set)
+
+#define SST_ALGO_BYPASS_PARAMS(xpname, xmname, xcount, xmod, xpipe,			\
+				xinstance, xtask, xcmd)					\
+	SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask),		\
+	SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, xpipe, xinstance, xtask, xcmd)
+
+#define SST_COMBO_ALGO_KCONTROL_BYTES(xpname, xmname, xsubmod, xcount, xmod,		\
+				      xpipe, xinstance, xtask, xcmd)			\
+	SST_ALGO_KCONTROL(SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, "params",	\
+						 xsubmod),				\
+			  xcount, xmod, xpipe, xtask, xcmd, SST_ALGO_PARAMS,		\
+			  sst_algo_bytes_ctl_info,					\
+			  sst_algo_control_get, sst_algo_control_set)
+
+
+struct sst_enum {
+	bool tx;
+	unsigned short reg;
+	unsigned int max;
+	const char * const *texts;
+	struct snd_soc_dapm_widget *w;
+};
+
+/* only 4 slots/channels supported atm */
+#define SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts) \
+	(struct sst_enum){ .reg = s_ch_no, .tx = is_tx, .max = 4+1, .texts = xtexts, }
+
+#define SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name) \
+	xpname " " xmname " " s_ch_name
+
+#define SST_SSP_SLOT_CTL(xpname, xmname, s_ch_name, s_ch_no, is_tx, xtexts, xget, xput) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name), \
+	.info = sst_slot_enum_info, \
+	.get = xget, .put = xput, \
+	.private_value = (unsigned long)&SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts), \
+}
+
+#define SST_MUX_CTL_NAME(xpname, xinstance) \
+	xpname " " #xinstance
+
+#define SST_SSP_MUX_ENUM(xreg, xshift, xtexts) \
+	(struct soc_enum){ .reg = xreg, .texts = xtexts, .shift_l = xshift, \
+			   .shift_r = xshift, .max = ARRAY_SIZE(xtexts), }
+
+#define SST_SSP_MUX_CTL(xpname, xinstance, xreg, xshift, xtexts, xget, xput) \
+	SOC_DAPM_ENUM_EXT(SST_MUX_CTL_NAME(xpname, xinstance), \
+			  SST_SSP_MUX_ENUM(xreg, xshift, xtexts), \
+			  xget, xput)
+
+struct sst_probe_value {
+	unsigned int val;
+	const struct soc_enum *p_enum;
+};
+
+#define SST_PROBE_CTL_NAME(dir, num, type) \
+	dir #num " " type
+
+#define SST_PROBE_ENUM(xname, xenum, xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = sst_probe_enum_info, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_probe_value) \
+	{ .val = 0, .p_enum = &xenum } }
+
+#endif
diff --git a/sound/soc/intel/platform_ipc_v2.h b/sound/soc/intel/platform_ipc_v2.h
new file mode 100644
index 0000000..a718f9c
--- /dev/null
+++ b/sound/soc/intel/platform_ipc_v2.h
@@ -0,0 +1,694 @@
+/*
+*  platform_ipc_v2.h - Intel MID Platform driver FW IPC definitions
+*
+*  Copyright (C) 2008-10 Intel Corporation
+*  Author:	Vinod Koul <vinod.koul@intel.com>
+*		Harsha Priya <priya.harsha@intel.com>
+*		Dharageswari R <dharageswari.r@intel.com>
+*		KP Jeeja <jeeja.kp@intel.com>
+*  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*
+*  This program is free software; you can redistribute it and/or modify
+*  it under the terms of the GNU General Public License as published by
+*  the Free Software Foundation; version 2 of the License.
+*
+*  This program is distributed in the hope that it will be useful, but
+*  WITHOUT ANY WARRANTY; without even the implied warranty of
+*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+*  General Public License for more details.
+*
+*  You should have received a copy of the GNU General Public License along
+*  with this program; if not, write to the Free Software Foundation, Inc.,
+*  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+*
+* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*
+*  This driver exposes the audio engine functionalities to the ALSA
+*	and middleware.
+*  This file has definitions shared between the firmware and driver
+*/
+#ifndef __PLATFORM_IPC_V2_H__
+#define __PLATFORM_IPC_V2_H__
+
+#define MAX_DBG_RW_BYTES 80
+#define MAX_NUM_SCATTER_BUFFERS 8
+#define MAX_LOOP_BACK_DWORDS 8
+/* IPC base address and mailbox, timestamp offsets */
+#define SST_MAILBOX_SIZE 0x0400
+#define SST_MAILBOX_SEND 0x0000
+#define SST_TIME_STAMP 0x1800
+#define SST_TIME_STAMP_MRFLD 0x680
+#define SST_TIME_STAMP_BYT 0x800
+#define SST_RESERVED_OFFSET 0x1A00
+#define SST_SCU_LPE_MAILBOX 0x1000
+#define SST_LPE_SCU_MAILBOX 0x1400
+#define SST_SCU_LPE_LOG_BUF (SST_SCU_LPE_MAILBOX+16)
+#define PROCESS_MSG 0x80
+
+/* Message ID's for IPC messages */
+/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
+
+/* I2L Firmware/Codec Download msgs */
+#define IPC_IA_PREP_LIB_DNLD 0x01
+#define IPC_IA_LIB_DNLD_CMPLT 0x02
+#define IPC_IA_GET_FW_VERSION 0x04
+#define IPC_IA_GET_FW_BUILD_INF 0x05
+#define IPC_IA_GET_FW_INFO 0x06
+#define IPC_IA_GET_FW_CTXT 0x07
+#define IPC_IA_SET_FW_CTXT 0x08
+#define IPC_IA_PREPARE_SHUTDOWN 0x31
+/* I2L Codec Config/control msgs */
+#define IPC_PREP_D3 0x10
+#define IPC_IA_SET_CODEC_PARAMS 0x10
+#define IPC_IA_GET_CODEC_PARAMS 0x11
+#define IPC_IA_SET_PPP_PARAMS 0x12
+#define IPC_IA_GET_PPP_PARAMS 0x13
+#define IPC_SST_PERIOD_ELAPSED_MRFLD 0xA
+#define IPC_SST_VB_RESET 0x28
+#define IPC_IA_ALG_PARAMS 0x1A
+#define IPC_IA_TUNING_PARAMS 0x1B
+#define IPC_IA_SET_RUNTIME_PARAMS 0x1C
+#define IPC_IA_SET_PARAMS 0x1
+#define IPC_IA_GET_PARAMS 0x2
+
+#define IPC_EFFECTS_CREATE 0xE
+#define IPC_EFFECTS_DESTROY 0xF
+
+/* I2L Stream config/control msgs */
+#define IPC_IA_ALLOC_STREAM_MRFLD 0x2
+#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
+#define IPC_IA_FREE_STREAM_MRFLD 0x03
+#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
+#define IPC_IA_SET_STREAM_PARAMS 0x22
+#define IPC_IA_SET_STREAM_PARAMS_MRFLD 0x12
+#define IPC_IA_GET_STREAM_PARAMS 0x23
+#define IPC_IA_PAUSE_STREAM 0x24
+#define IPC_IA_PAUSE_STREAM_MRFLD 0x4
+#define IPC_IA_RESUME_STREAM 0x25
+#define IPC_IA_RESUME_STREAM_MRFLD 0x5
+#define IPC_IA_DROP_STREAM 0x26
+#define IPC_IA_DROP_STREAM_MRFLD 0x07
+#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
+#define IPC_IA_DRAIN_STREAM_MRFLD 0x8
+#define IPC_IA_CONTROL_ROUTING 0x29
+#define IPC_IA_VTSV_UPDATE_MODULES 0x20
+#define IPC_IA_VTSV_DETECTED 0x21
+
+#define IPC_IA_START_STREAM_MRFLD 0X06
+#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
+
+#define IPC_IA_SET_GAIN_MRFLD 0x21
+/* Debug msgs */
+#define IPC_IA_DBG_MEM_READ 0x40
+#define IPC_IA_DBG_MEM_WRITE 0x41
+#define IPC_IA_DBG_LOOP_BACK 0x42
+#define IPC_IA_DBG_LOG_ENABLE 0x45
+#define IPC_IA_DBG_SET_PROBE_PARAMS 0x47
+
+/* L2I Firmware/Codec Download msgs */
+#define IPC_IA_FW_INIT_CMPLT 0x81
+#define IPC_IA_FW_INIT_CMPLT_MRFLD 0x01
+#define IPC_IA_FW_ASYNC_ERR_MRFLD 0x11
+
+/* L2I Codec Config/control msgs */
+#define IPC_SST_FRAGMENT_ELPASED 0x90 /* Request IA more data */
+
+#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
+#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
+#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
+#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
+#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
+#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
+
+#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occurred */
+/* L2S messages */
+#define IPC_SC_DDR_LINK_UP 0xC0
+#define IPC_SC_DDR_LINK_DOWN 0xC1
+#define IPC_SC_SET_LPECLK_REQ 0xC2
+#define IPC_SC_SSP_BIT_BANG 0xC3
+
+/* L2I Error reporting msgs */
+#define IPC_IA_MEM_ALLOC_FAIL 0xE0
+#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
+					stream can be used by playback and
+					capture modules */
+
+/* L2I Debug msgs */
+#define IPC_IA_PRINT_STRING 0xF0
+
+/* Buffer under-run */
+#define IPC_IA_BUF_UNDER_RUN_MRFLD 0x0B
+
+/* Mrfld specific defines:
+ * For asynchronous messages(INIT_CMPLT, PERIOD_ELAPSED, ASYNC_ERROR)
+ * received from FW, the format is:
+ *  - IPC High: pvt_id is set to zero. Always short message.
+ *  - msg_id is in lower 16-bits of IPC low payload.
+ *  - pipe_id is in higher 16-bits of IPC low payload for period_elapsed.
+ *  - error id is in higher 16-bits of IPC low payload for async errors.
+ */
+#define SST_ASYNC_DRV_ID 0
+
+/* Command Response or Acknowledge message to any IPC message will have
+ * same message ID and stream ID information which is sent.
+ * There is no specific Ack message ID. The data field is used as response
+ * meaning.
+ */
+
+/* SCU IPC for resetting & power gating the LPE through SCU */
+#define IPC_SCU_LPE_RESET 0xA3
+
+enum ackData {
+	IPC_ACK_SUCCESS = 0,
+	IPC_ACK_FAILURE,
+};
+
+enum ipc_ia_msg_id {
+	IPC_CMD = 1,		/*!< Task Control message ID */
+	IPC_SET_PARAMS = 2,/*!< Task Set param message ID */
+	IPC_GET_PARAMS = 3,	/*!< Task Get param message ID */
+	IPC_INVALID = 0xFF,	/*!<Task Get param message ID */
+};
+
+enum sst_codec_types {
+	/*  AUDIO/MUSIC	CODEC Type Definitions */
+	SST_CODEC_TYPE_UNKNOWN = 0,
+	SST_CODEC_TYPE_PCM,	/* Pass through Audio codec */
+	SST_CODEC_TYPE_MP3,
+	SST_CODEC_TYPE_MP24,
+	SST_CODEC_TYPE_AAC,
+	SST_CODEC_TYPE_AACP,
+	SST_CODEC_TYPE_eAACP,
+	SST_CODEC_TYPE_WMA9,
+	SST_CODEC_TYPE_WMA10,
+	SST_CODEC_TYPE_WMA10P,
+	SST_CODEC_TYPE_RA,
+	SST_CODEC_TYPE_DDAC3,
+	SST_CODEC_TYPE_STEREO_TRUE_HD,
+	SST_CODEC_TYPE_STEREO_HD_PLUS,
+
+	/*  VOICE CODEC Type Definitions */
+	SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
+};
+
+enum sst_algo_types {
+	SST_ALGO_SRC = 0x64,
+	SST_ALGO_MIXER = 0x65,
+	SST_ALGO_DOWN_MIXER = 0x66,
+	SST_ALGO_VTSV = 0x73,
+	SST_ALGO_AUDCLASSIFIER = 0x80,
+	SST_ALGO_VOLUME_CONTROL = 0x92,
+	SST_ALGO_GEQ = 0x99,
+};
+
+enum stream_type {
+	SST_STREAM_TYPE_NONE = 0,
+	SST_STREAM_TYPE_MUSIC = 1,
+	SST_STREAM_TYPE_NORMAL = 2,
+	SST_STREAM_TYPE_PROBE = 3,
+	SST_STREAM_TYPE_LONG_PB = 4,
+	SST_STREAM_TYPE_LOW_LATENCY = 5,
+};
+
+enum sst_error_codes {
+	/* Error code,response to msgId: Description */
+	/* Common error codes */
+	SST_SUCCESS = 0,	/* Success */
+	SST_ERR_INVALID_STREAM_ID = 1,
+	SST_ERR_INVALID_MSG_ID = 2,
+	SST_ERR_INVALID_STREAM_OP = 3,
+	SST_ERR_INVALID_PARAMS = 4,
+	SST_ERR_INVALID_CODEC = 5,
+	SST_ERR_INVALID_MEDIA_TYPE = 6,
+	SST_ERR_STREAM_ERR = 7,
+
+	/* IPC specific error codes */
+	SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
+	SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
+	SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
+	SST_IPC_ERR_GET_STREAM_FAILED = 11,
+	SST_ERR_MOD_NOT_AVAIL = 12,
+	SST_ERR_MOD_DNLD_RQD = 13,
+	SST_ERR_STREAM_STOPPED = 14,
+	SST_ERR_STREAM_IN_USE = 15,
+
+	/* Capture specific error codes */
+	SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
+	SST_CAP_ERR_CAPTURE_FAIL = 17,
+	SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
+	SST_CAP_ERR_UNDER_RUN = 19,
+	SST_CAP_ERR_OVERFLOW = 20,
+
+	/* Playback specific error codes*/
+	SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
+	SST_PB_ERR_PLAY_FAIL = 22,
+	SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
+
+	/* Codec manager specific error codes */
+	SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
+	SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
+
+	/* Library manager specific error codes */
+	SST_SCC_ERR_PREP_DNLD_FAILED = 26,
+	SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
+	/* Scheduler specific error codes */
+	SST_SCH_ERR_FAIL = 28,
+
+	/* DMA specific error codes */
+	SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
+	SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
+	SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
+	SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
+	SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
+	SST_DMA_ERR_TRANSFER_FAILED = 34,
+
+	SST_SSP_ERR_ALREADY_ENABLED = 35,
+	SST_SSP_ERR_ALREADY_DISABLED = 36,
+	SST_SSP_ERR_NOT_INITIALIZED = 37,
+	SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
+
+	/* Other error codes */
+	SST_ERR_MOD_INIT_FAIL = 39,
+
+	/* FW init error codes */
+	SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
+	SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
+	SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
+	SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
+
+	/* Memory debug error codes */
+	SST_ERR_DBG_MEM_READ_FAIL = 44,
+	SST_ERR_DBG_MEM_WRITE_FAIL = 45,
+	SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
+	SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
+
+	SST_ERR_BUFFER_NOT_AVAILABLE = 48,
+	SST_ERR_BUFFER_NOT_ALLOCATED = 49,
+	SST_ERR_INVALID_REGION_TYPE = 50,
+	SST_ERR_NULL_PTR = 51,
+	SST_ERR_INVALID_BUFFER_SIZE = 52,
+	SST_ERR_INVALID_BUFFER_INDEX = 53,
+
+	/*IIPC specific error codes */
+	SST_IIPC_QUEUE_FULL = 54,
+	SST_IIPC_ERR_MSG_SND_FAILED = 55,
+	SST_PB_ERR_UNDERRUN_OCCURED = 56,
+	SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
+	SST_INVALID_TIME_SLOTS = 58,
+};
+
+enum dbg_mem_data_type {
+	/* Data type of debug read/write */
+	DATA_TYPE_U32,
+	DATA_TYPE_U16,
+	DATA_TYPE_U8,
+};
+
+enum dbg_type {
+	NO_DEBUG = 0,
+	SRAM_DEBUG,
+	PTI_DEBUG,
+};
+
+struct ipc_dsp_hdr {
+	u16 mod_index_id:8;		/*!< DSP Command ID specific to tasks */
+	u16 pipe_id:8;	/*!< instance of the module in the pipeline */
+	u16 mod_id;		/*!< Pipe_id */
+	u16 cmd_id;		/*!< Module ID = lpe_algo_types_t */
+	u16 length;		/*!< Length of the payload only */
+} __packed;
+
+struct ipc_dsp_effects_info {
+	u16	cmd_id;
+	u16	length;
+	u16	sel_pos;
+	u16	sel_algo_id;
+	u16	cpu_load;       /* CPU load indication */
+	u16	memory_usage;   /* Data Memory usage */
+	u32	flags;         /* effect engine caps/requirements flags */
+} __packed;
+
+struct ipc_effect_dsp_hdr {
+	u16 mod_index_id:8;             /*!< DSP Command ID specific to tasks */
+	u16 pipe_id:8;  /*!< instance of the module in the pipeline */
+	u16 mod_id;             /*!< Pipe_id */
+} __packed;
+
+struct ipc_effect_payload {
+	struct ipc_effect_dsp_hdr dsp_hdr;
+	char *data;
+};
+
+union ipc_header_high {
+	struct {
+		u32  msg_id:8;	    /* Message ID - Max 256 Message Types */
+		u32  task_id:4;	    /* Task ID associated with this comand */
+		u32  drv_id:4;    /* Identifier for the driver to track*/
+		u32  rsvd1:8;	    /* Reserved */
+		u32  result:4;	    /* Reserved */
+		u32  res_rqd:1;	    /* Response rqd */
+		u32  large:1;	    /* Large Message if large = 1 */
+		u32  done:1;	    /* bit 30 - Done bit */
+		u32  busy:1;	    /* bit 31 - busy bit*/
+	} part;
+	u32 full;
+} __packed;
+
+/* IPC header */
+union ipc_header_mrfld {
+	struct {
+		u32 header_low_payload;
+		union ipc_header_high header_high;
+	} p;
+	u64 full;
+} __packed;
+
+/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
+
+/* IPC Header */
+union ipc_header {
+	struct {
+		u32  msg_id:8; /* Message ID - Max 256 Message Types */
+		u32  str_id:5;
+		u32  large:1;	/* Large Message if large = 1 */
+		u32  reserved:2;	/* Reserved for future use */
+		u32  data:14;	/* Ack/Info for msg, size of msg in Mailbox */
+		u32  done:1; /* bit 30 */
+		u32  busy:1; /* bit 31 */
+	} part;
+	u32 full;
+} __packed;
+
+/* Firmware build info */
+struct sst_fw_build_info {
+	unsigned char  date[16]; /* Firmware build date */
+	unsigned char  time[16]; /* Firmware build time */
+} __packed;
+
+/* Firmware Version info */
+struct snd_sst_fw_version {
+	u8 build;	/* build number*/
+	u8 minor;	/* minor number*/
+	u8 major;	/* major number*/
+	u8 type;	/* build type */
+};
+
+struct ipc_header_fw_init {
+	struct snd_sst_fw_version fw_version;/* Firmware version details */
+	struct sst_fw_build_info build_info;
+	u16 result;	/* Fw init result */
+	u8 module_id; /* Module ID in case of error */
+	u8 debug_info; /* Debug info from Module ID in case of fail */
+} __packed;
+
+struct snd_sst_tstamp {
+	u64 ring_buffer_counter;	/* PB/CP: Bytes copied from/to DDR. */
+	u64 hardware_counter;	    /* PB/CP: Bytes DMAed to/from SSP. */
+	u64 frames_decoded;
+	u64 bytes_decoded;
+	u64 bytes_copied;
+	u32 sampling_frequency;
+	u32 channel_peak[8];
+} __packed;
+
+/* SST to IA memory read debug message  */
+struct ipc_sst_ia_dbg_mem_rw  {
+	u16  num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
+	u16  data_type;/* enum: dbg_mem_data_type */
+	u32  address;	/* Memory address of data memory of data_type */
+	u8	rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
+} __packed;
+
+struct ipc_sst_ia_dbg_loop_back {
+	u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
+	u16 increment_val;/* Increments dwords by this value, 0- no increment */
+	u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
+} __packed;
+
+/* Stream type params struture for Alloc stream */
+struct snd_sst_str_type {
+	u8 codec_type;		/* Codec type */
+	u8 str_type;		/* 1 = voice 2 = music */
+	u8 operation;		/* Playback or Capture */
+	u8 protected_str;	/* 0=Non DRM, 1=DRM */
+	u8 time_slots;
+	u8 reserved;		/* Reserved */
+	u16 result;		/* Result used for acknowledgment */
+} __packed;
+
+/* Library info structure */
+struct module_info {
+	u32 lib_version;
+	u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
+	u32 media_type;
+	u8  lib_name[12];
+	u32 lib_caps;
+	unsigned char  b_date[16]; /* Lib build date */
+	unsigned char  b_time[16]; /* Lib build time */
+} __packed;
+
+/* Library slot info */
+struct lib_slot_info {
+	u8  slot_num; /* 1 or 2 */
+	u8  reserved1;
+	u16 reserved2;
+	u32 iram_size; /* slot size in IRAM */
+	u32 dram_size; /* slot size in DRAM */
+	u32 iram_offset; /* starting offset of slot in IRAM */
+	u32 dram_offset; /* starting offset of slot in DRAM */
+} __packed;
+
+struct snd_ppp_mixer_params {
+	__u32			type; /*Type of the parameter */
+	__u32			size;
+	__u32			input_stream_bitmap; /*Input stream Bit Map*/
+} __packed;
+
+struct snd_sst_lib_download {
+	struct module_info lib_info; /* library info type, capabilities etc */
+	struct lib_slot_info slot_info; /* slot info to be downloaded */
+	u32 mod_entry_pt;
+};
+
+struct snd_sst_lib_download_info {
+	struct snd_sst_lib_download dload_lib;
+	u16 result;	/* Result used for acknowledgment */
+	u8 pvt_id; /* Private ID */
+	u8 reserved;  /* for alignment */
+};
+
+struct snd_pcm_params {
+	u8 num_chan;	/* 1=Mono, 2=Stereo */
+	u8 pcm_wd_sz;	/* 16/24 - bit*/
+	u8 use_offload_path;	/* 0-PCM using period elpased & ALSA interfaces
+				   1-PCM stream via compressed interface  */
+	u8 reserved2;
+	u32 sfreq;    /* Sampling rate in Hz */
+	u8 channel_map[8];
+} __packed;
+
+/* MP3 Music Parameters Message */
+struct snd_mp3_params {
+	u8  num_chan;	/* 1=Mono, 2=Stereo	*/
+	u8  pcm_wd_sz; /* 16/24 - bit*/
+	u8  crc_check; /* crc_check - disable (0) or enable (1) */
+	u8  reserved1; /* unused*/
+};
+
+#define AAC_BIT_STREAM_ADTS		0
+#define AAC_BIT_STREAM_ADIF		1
+#define AAC_BIT_STREAM_RAW		2
+
+/* AAC Music Parameters Message */
+struct snd_aac_params {
+	u8 num_chan; /* 1=Mono, 2=Stereo*/
+	u8 pcm_wd_sz; /* 16/24 - bit*/
+	u8 bdownsample; /*SBR downsampling 0 - disable 1 -enabled AAC+ only */
+	u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
+	u32 externalsr; /*sampling rate of basic AAC raw bit stream*/
+	u8 sbr_signalling;/*disable/enable/set automode the SBR tool.AAC+*/
+	u8 reser1;
+	u16  reser2;
+};
+
+/* WMA Music Parameters Message */
+struct snd_wma_params {
+	u8  num_chan;	/* 1=Mono, 2=Stereo */
+	u8  pcm_wd_sz;	/* 16/24 - bit*/
+	u16 reserved1;
+	u32 brate;	/* Use the hard coded value. */
+	u32 sfreq;	/* Sampling freq eg. 8000, 441000, 48000 */
+	u32 channel_mask;  /* Channel Mask */
+	u16 format_tag;	/* Format Tag */
+	u16 block_align;	/* packet size */
+	u16 wma_encode_opt;/* Encoder option */
+	u8 op_align;	/* op align 0- 16 bit, 1- MSB, 2 LSB */
+	u8 reserved;	/* reserved */
+};
+
+/* Codec params struture */
+union  snd_sst_codec_params {
+	struct snd_pcm_params pcm_params;
+	struct snd_mp3_params mp3_params;
+	struct snd_aac_params aac_params;
+	struct snd_wma_params wma_params;
+};
+
+/* Address and size info of a frame buffer in DDR */
+struct sst_address_info {
+	__u32 addr; /* Address at IA */
+	__u32 size; /* Size of the buffer */
+} __packed;
+
+/* Additional params for Alloc struct*/
+struct snd_sst_alloc_params_ext {
+	__u16 sg_count;
+	__u16 reserved;
+	__u32 frag_size;	/*Number of samples after which period elapsed
+				  message is sent valid only if path  = 0*/
+	struct sst_address_info  ring_buf_info[8];
+};
+
+struct snd_sst_stream_params {
+	union snd_sst_codec_params uc;
+} __packed;
+
+struct snd_sst_params {
+	u32 result;
+	u32 stream_id;
+	u8 codec;
+	u8 ops;
+	u8 stream_type;
+	u8 device_type;
+	u8 task;
+	struct snd_sst_stream_params sparams;
+	struct snd_sst_alloc_params_ext aparams;
+};
+
+struct snd_sst_alloc_mrfld {
+	u16 codec_type;
+	u8 operation;
+	u8 sg_count;
+	struct sst_address_info ring_buf_info[8];
+	u32 frag_size;
+	u32 ts;
+	struct snd_sst_stream_params codec_params;
+} __packed;
+
+/* Alloc stream params structure */
+struct snd_sst_alloc_params {
+	struct snd_sst_str_type str_type;
+	struct snd_sst_stream_params stream_params;
+	struct snd_sst_alloc_params_ext alloc_params;
+} __packed;
+
+/* Alloc stream response message */
+struct snd_sst_alloc_response {
+	struct snd_sst_str_type str_type; /* Stream type for allocation */
+	struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
+};
+
+/* Drop response */
+struct snd_sst_drop_response {
+	u32 result;
+	u32 bytes;
+};
+
+struct snd_sst_async_msg {
+	u32 msg_id; /* Async msg id */
+	u32 payload[0];
+};
+
+struct snd_sst_async_err_msg {
+	u32 fw_resp; /* Firmware Result */
+	u32 lib_resp; /*Library result */
+} __packed;
+
+struct snd_sst_vol {
+	u32	stream_id;
+	s32	volume;
+	u32	ramp_duration;
+	u32	ramp_type;		/* Ramp type, default=0 */
+};
+
+/* Gain library parameters for mrfld
+ * based on DSP command spec v0.82
+ */
+struct snd_sst_gain_v2 {
+	u16 gain_cell_num;  /* num of gain cells to modify*/
+	u8 cell_nbr_idx; /* instance index*/
+	u8 cell_path_idx; /* pipe-id */
+	u16 module_id; /*module id */
+	u16 left_cell_gain; /* left gain value in dB*/
+	u16 right_cell_gain; /* right gain value in dB*/
+	u16 gain_time_const; /* gain time constant*/
+} __packed;
+
+struct snd_sst_mute {
+	u32	stream_id;
+	u32	mute;
+};
+
+struct snd_sst_runtime_params {
+	u8 type;
+	u8 str_id;
+	u8 size;
+	u8 rsvd;
+	void *addr;
+} __packed;
+
+enum stream_param_type {
+	SST_SET_TIME_SLOT = 0,
+	SST_SET_CHANNEL_INFO = 1,
+	OTHERS = 2, /*reserved for future params*/
+};
+
+/* CSV Voice call routing structure */
+struct snd_sst_control_routing {
+	u8 control; /* 0=start, 1=Stop */
+	u8 reserved[3];	/* Reserved- for 32 bit alignment */
+};
+
+struct ipc_post {
+	struct list_head node;
+	union ipc_header header; /* driver specific */
+	bool is_large;
+	bool is_process_reply;
+	union ipc_header_mrfld mrfld_header;
+	char *mailbox_data;
+};
+
+struct snd_sst_ctxt_params {
+	u32 address; /* Physical Address in DDR where the context is stored */
+	u32 size; /* size of the context */
+};
+
+struct snd_sst_lpe_log_params {
+	u8 dbg_type;
+	u8 module_id;
+	u8 log_level;
+	u8 reserved;
+} __packed;
+
+enum snd_sst_bytes_type {
+	SND_SST_BYTES_SET = 0x1,
+	SND_SST_BYTES_GET = 0x2,
+};
+
+struct snd_sst_bytes_v2 {
+	u8 type;
+	u8 ipc_msg;
+	u8 block;
+	u8 task_id;
+	u8 pipe_id;
+	u8 rsvd;
+	u16 len;
+	char bytes[0];
+};
+
+#define MAX_VTSV_FILES 2
+struct snd_sst_vtsv_info {
+	struct sst_address_info vfiles[MAX_VTSV_FILES];
+} __packed;
+
+#endif /* __PLATFORMDRV_IPC_V2_H__ */
diff --git a/sound/soc/intel/sst/Makefile b/sound/soc/intel/sst/Makefile
new file mode 100644
index 0000000..99ad5ee
--- /dev/null
+++ b/sound/soc/intel/sst/Makefile
@@ -0,0 +1,12 @@
+# Makefile for SST Audio driver
+snd-intel-sst-objs := sst.o sst_ipc.o sst_stream.o sst_drv_interface.o sst_dsp.o sst_pvt.o sst_app_interface.o sst_acpi.o
+
+ifdef CONFIG_DEBUG_FS
+	snd-intel-sst-objs += sst_debug.o
+endif
+
+obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
+
+ccflags-y += -DMRFLD_WORD_WA -Werror
+
+ccflags-y += -I$(src)
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c
new file mode 100644
index 0000000..38903aa
--- /dev/null
+++ b/sound/soc/intel/sst/sst.c
@@ -0,0 +1,996 @@
+/*
+ *  sst.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This driver enumerates the SST audio engine as a PCI or ACPI device and
+ *  provides interface to the platform driver to interact with the SST audio
+ *  Firmware.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/async.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/platform_sst.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define CREATE_TRACE_POINTS
+#include "sst_trace.h"
+
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
+MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(SST_DRIVER_VERSION);
+
+struct intel_sst_drv *sst_drv_ctx;
+static struct mutex drv_ctx_lock;
+
+/*
+ *  * ioctl32 compat
+ *   */
+#ifdef CONFIG_COMPAT
+#include "sst_app_compat_interface.c"
+#else
+#define intel_sst_ioctl_compat NULL
+#endif
+
+static const struct file_operations intel_sst_fops_cntrl = {
+	.owner = THIS_MODULE,
+	.open = intel_sst_open_cntrl,
+	.release = intel_sst_release_cntrl,
+	.unlocked_ioctl = intel_sst_ioctl,
+	.compat_ioctl = intel_sst_ioctl_compat,
+};
+
+struct miscdevice lpe_ctrl = {
+	.minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
+	.name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
+	.fops = &intel_sst_fops_cntrl
+};
+
+static inline void set_imr_interrupts(struct intel_sst_drv *ctx, bool enable)
+{
+	union interrupt_reg imr;
+
+	spin_lock(&ctx->ipc_spin_lock);
+	imr.full = sst_shim_read(ctx->shim, SST_IMRX);
+	if (enable) {
+		imr.part.done_interrupt = 0;
+		imr.part.busy_interrupt = 0;
+	} else {
+		imr.part.done_interrupt = 1;
+		imr.part.busy_interrupt = 1;
+	}
+	sst_shim_write(ctx->shim, SST_IMRX, imr.full);
+	spin_unlock(&ctx->ipc_spin_lock);
+}
+
+#define SST_IS_PROCESS_REPLY(header) ((header & PROCESS_MSG) ? true : false)
+#define SST_VALIDATE_MAILBOX_SIZE(size) ((size <= SST_MAILBOX_SIZE) ? true : false)
+
+static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
+{
+	union interrupt_reg_mrfld isr;
+	union ipc_header_mrfld header;
+	union sst_imr_reg_mrfld imr;
+	struct ipc_post *msg = NULL;
+	unsigned int size = 0;
+	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+	irqreturn_t retval = IRQ_HANDLED;
+
+	/* Interrupt arrived, check src */
+	isr.full = sst_shim_read64(drv->shim, SST_ISRX);
+	if (isr.part.done_interrupt) {
+		/* Clear done bit */
+		spin_lock(&drv->ipc_spin_lock);
+		header.full = sst_shim_read64(drv->shim,
+					drv->ipc_reg.ipcx);
+		header.p.header_high.part.done = 0;
+		sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
+		/* write 1 to clear status register */;
+		isr.part.done_interrupt = 1;
+		sst_shim_write64(drv->shim, SST_ISRX, isr.full);
+		spin_unlock(&drv->ipc_spin_lock);
+		trace_sst_ipc("ACK   <-", header.p.header_high.full,
+					  header.p.header_low_payload,
+					  header.p.header_high.part.drv_id);
+		queue_work(drv->post_msg_wq, &drv->ipc_post_msg.wq);
+		retval = IRQ_HANDLED;
+	}
+	if (isr.part.busy_interrupt) {
+		spin_lock(&drv->ipc_spin_lock);
+		imr.full = sst_shim_read64(drv->shim, SST_IMRX);
+		imr.part.busy_interrupt = 1;
+		sst_shim_write64(drv->shim, SST_IMRX, imr.full);
+		spin_unlock(&drv->ipc_spin_lock);
+		header.full =  sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
+		if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
+			pr_err("No memory available\n");
+			drv->ops->clear_interrupt();
+			return IRQ_HANDLED;
+		}
+		if (header.p.header_high.part.large) {
+			size = header.p.header_low_payload;
+			if (SST_VALIDATE_MAILBOX_SIZE(size)) {
+				memcpy_fromio(msg->mailbox_data,
+					drv->mailbox + drv->mailbox_recv_offset, size);
+			} else {
+				pr_err("Mailbox not copied, payload siz is: %u\n", size);
+				header.p.header_low_payload = 0;
+			}
+		}
+		msg->mrfld_header = header;
+		msg->is_process_reply =
+			SST_IS_PROCESS_REPLY(header.p.header_high.part.msg_id);
+		trace_sst_ipc("REPLY <-", msg->mrfld_header.p.header_high.full,
+					  msg->mrfld_header.p.header_low_payload,
+					  msg->mrfld_header.p.header_high.part.drv_id);
+		spin_lock(&drv->rx_msg_lock);
+		list_add_tail(&msg->node, &drv->rx_list);
+		spin_unlock(&drv->rx_msg_lock);
+		drv->ops->clear_interrupt();
+		retval = IRQ_WAKE_THREAD;
+	}
+	return retval;
+}
+
+static irqreturn_t intel_sst_irq_thread_mfld(int irq, void *context)
+{
+	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+	struct ipc_post *__msg, *msg = NULL;
+	unsigned long irq_flags;
+
+	if (list_empty(&drv->rx_list))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+	list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
+
+		list_del(&msg->node);
+		spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+		if (msg->is_process_reply)
+			drv->ops->process_message(msg);
+		else
+			drv->ops->process_reply(msg);
+
+		if (msg->is_large)
+			kfree(msg->mailbox_data);
+		kfree(msg);
+		spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+	}
+	spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+	return IRQ_HANDLED;
+}
+
+static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
+{
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct sst_block *block;
+
+	/*send msg to fw*/
+	pvt_id = sst_assign_pvt_id(sst);
+	if (sst_create_block_and_ipc_msg(&msg, true, sst, &block,
+				IPC_CMD, pvt_id)) {
+		pr_err("msg/block alloc failed. Not proceeding with context save\n");
+		return 0;
+	}
+
+	sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+			      SST_TASK_ID_MEDIA, 1, pvt_id);
+	msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr);
+	msg->mrfld_header.p.header_high.part.res_rqd = 1;
+	sst_fill_header_dsp(&dsp_hdr, IPC_PREP_D3, PIPE_RSVD, pvt_id);
+	memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+
+	sst_add_to_dispatch_list_and_post(sst, msg);
+	/*wait for reply*/
+	if (sst_wait_timeout(sst, block)) {
+		pr_err("sst: err fw context save timeout  ...\n");
+		pr_err("not suspending FW!!!");
+		sst_free_block(sst, block);
+		return -EIO;
+	}
+	if (block->ret_code) {
+		pr_err("fw responded w/ error %d", block->ret_code);
+		sst_free_block(sst, block);
+		return -EIO;
+	}
+
+	sst_free_block(sst, block);
+	return 0;
+}
+
+static struct intel_sst_ops mrfld_ops = {
+	.interrupt = intel_sst_interrupt_mrfld,
+	.irq_thread = intel_sst_irq_thread_mfld,
+	.clear_interrupt = intel_sst_clear_intr_mrfld,
+	.start = sst_start_mrfld,
+	.reset = intel_sst_reset_dsp_mrfld,
+	.post_message = sst_post_message_mrfld,
+	.sync_post_message = sst_sync_post_message_mrfld,
+	.process_message = sst_process_message_mrfld,
+	.process_reply = sst_process_reply_mrfld,
+	.save_dsp_context =  sst_save_dsp_context_v2,
+	.alloc_stream = sst_alloc_stream_mrfld,
+	.post_download = sst_post_download_mrfld,
+	.do_recovery = sst_do_recovery_mrfld,
+};
+
+int sst_driver_ops(struct intel_sst_drv *sst)
+{
+
+	switch (sst->pci_id) {
+	case SST_MRFLD_PCI_ID:
+	case PCI_DEVICE_ID_INTEL_SST_MOOR:
+		sst->tstamp = SST_TIME_STAMP_MRFLD;
+		sst->ops = &mrfld_ops;
+
+		/* For MOFD platforms disable/enable recovery based on
+		 * platform data
+		 */
+		if (sst->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR) {
+			if (!sst->pdata->enable_recovery) {
+				pr_debug("Recovery disabled for this mofd platform\n");
+				sst->ops->do_recovery = sst_do_recovery;
+			} else
+				pr_debug("Recovery enabled for this mofd platform\n");
+		}
+
+		return 0;
+	default:
+		pr_err("SST Driver capablities missing for pci_id: %x", sst->pci_id);
+		return -EINVAL;
+	};
+}
+
+int sst_alloc_drv_context(struct device *dev)
+{
+	struct intel_sst_drv *ctx;
+	mutex_lock(&drv_ctx_lock);
+	if (sst_drv_ctx) {
+		pr_err("Only one sst handle is supported\n");
+		mutex_unlock(&drv_ctx_lock);
+		return -EBUSY;
+	}
+	pr_debug("%s: %d", __func__, __LINE__);
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		pr_err("malloc fail\n");
+		mutex_unlock(&drv_ctx_lock);
+		return -ENOMEM;
+	}
+	sst_drv_ctx = ctx;
+	mutex_unlock(&drv_ctx_lock);
+	return 0;
+}
+
+static ssize_t sst_sysfs_get_recovery(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", ctx->sst_state);
+}
+
+
+static ssize_t sst_sysfs_set_recovery(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t len)
+{
+	long val;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	if (kstrtol(buf, 0, &val))
+		return -EINVAL;
+
+	if (val == 1) {
+		if (!atomic_read(&ctx->pm_usage_count)) {
+			pr_debug("%s: set sst state to RESET...\n", __func__);
+			sst_set_fw_state_locked(ctx, SST_RESET);
+		} else {
+			pr_err("%s: not setting sst state... %d\n", __func__,
+					atomic_read(&ctx->pm_usage_count));
+			pr_err("Unrecoverable state....\n");
+			BUG();
+			return -EPERM;
+		}
+	}
+
+	return len;
+}
+
+static DEVICE_ATTR(audio_recovery, S_IRUGO | S_IWUSR,
+			sst_sysfs_get_recovery, sst_sysfs_set_recovery);
+
+int sst_request_firmware_async(struct intel_sst_drv *ctx)
+{
+	int ret = 0;
+
+	snprintf(ctx->firmware_name, sizeof(ctx->firmware_name),
+			"%s%04x%s", "fw_sst_",
+			ctx->pci_id, ".bin");
+	pr_debug("Requesting FW %s now...\n", ctx->firmware_name);
+
+	trace_sst_fw_download("Request firmware async", ctx->sst_state);
+
+	ret = request_firmware_nowait(THIS_MODULE, 1, ctx->firmware_name,
+			ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
+	if (ret)
+		pr_err("could not load firmware %s error %d\n", ctx->firmware_name, ret);
+
+	return ret;
+}
+/*
+* intel_sst_probe - PCI probe function
+*
+* @pci:	PCI device structure
+* @pci_id: PCI device ID structure
+*
+* This function is called by OS when a device is found
+* This enables the device, interrupt etc
+*/
+static int intel_sst_probe(struct pci_dev *pci,
+			const struct pci_device_id *pci_id)
+{
+	int i, ret = 0;
+	struct intel_sst_ops *ops;
+	struct sst_platform_info *sst_pdata = pci->dev.platform_data;
+	int ddr_base;
+	u32 ssp_base_add;
+	u32 dma_base_add;
+	u32 len;
+
+
+
+	pr_debug("Probe for DID %x\n", pci->device);
+	ret = sst_alloc_drv_context(&pci->dev);
+	if (ret)
+		return ret;
+
+	sst_drv_ctx->dev = &pci->dev;
+	sst_drv_ctx->pci_id = pci->device;
+	if (!sst_pdata)
+		return -EINVAL;
+	sst_drv_ctx->pdata = sst_pdata;
+
+	if (!sst_drv_ctx->pdata->probe_data)
+		return -EINVAL;
+	memcpy(&sst_drv_ctx->info, sst_drv_ctx->pdata->probe_data,
+					sizeof(sst_drv_ctx->info));
+
+	sst_drv_ctx->use_32bit_ops = sst_drv_ctx->pdata->ipc_info->use_32bit_ops;
+	sst_drv_ctx->mailbox_recv_offset = sst_drv_ctx->pdata->ipc_info->mbox_recv_off;
+
+	if (0 != sst_driver_ops(sst_drv_ctx))
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	mutex_init(&sst_drv_ctx->stream_lock);
+	mutex_init(&sst_drv_ctx->sst_lock);
+	mutex_init(&sst_drv_ctx->mixer_ctrl_lock);
+	mutex_init(&sst_drv_ctx->csr_lock);
+
+	sst_drv_ctx->stream_cnt = 0;
+	sst_drv_ctx->fw_in_mem = NULL;
+	sst_drv_ctx->vcache.file1_in_mem = NULL;
+	sst_drv_ctx->vcache.file2_in_mem = NULL;
+	sst_drv_ctx->vcache.size1 = 0;
+	sst_drv_ctx->vcache.size2 = 0;
+
+	/* we use dma, so set to 1*/
+	sst_drv_ctx->use_dma = 1;
+	sst_drv_ctx->use_lli = 1;
+
+	INIT_LIST_HEAD(&sst_drv_ctx->memcpy_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->libmemcpy_list);
+
+	INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->block_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->rx_list);
+	INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, ops->post_message);
+	init_waitqueue_head(&sst_drv_ctx->wait_queue);
+
+	sst_drv_ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
+	if (!sst_drv_ctx->mad_wq) {
+		ret = -EINVAL;
+		goto do_free_drv_ctx;
+	}
+	sst_drv_ctx->post_msg_wq =
+		create_singlethread_workqueue("sst_post_msg_wq");
+	if (!sst_drv_ctx->post_msg_wq) {
+		ret = -EINVAL;
+		goto free_mad_wq;
+	}
+
+	spin_lock_init(&sst_drv_ctx->ipc_spin_lock);
+	spin_lock_init(&sst_drv_ctx->block_lock);
+	spin_lock_init(&sst_drv_ctx->pvt_id_lock);
+	spin_lock_init(&sst_drv_ctx->rx_msg_lock);
+
+	sst_drv_ctx->ipc_reg.ipcx = SST_IPCX + sst_drv_ctx->pdata->ipc_info->ipc_offset;
+	sst_drv_ctx->ipc_reg.ipcd = SST_IPCD + sst_drv_ctx->pdata->ipc_info->ipc_offset;
+	pr_debug("ipcx 0x%x ipxd 0x%x", sst_drv_ctx->ipc_reg.ipcx,
+					sst_drv_ctx->ipc_reg.ipcd);
+
+	pr_info("Got drv data max stream %d\n",
+				sst_drv_ctx->info.max_streams);
+	for (i = 1; i <= sst_drv_ctx->info.max_streams; i++) {
+		struct stream_info *stream = &sst_drv_ctx->streams[i];
+		memset(stream, 0, sizeof(*stream));
+		stream->pipe_id = PIPE_RSVD;
+		mutex_init(&stream->lock);
+	}
+
+	ret = sst_request_firmware_async(sst_drv_ctx);
+	if (ret) {
+		pr_err("Firmware download failed:%d\n", ret);
+		goto do_free_mem;
+	}
+	/* Init the device */
+	ret = pci_enable_device(pci);
+	if (ret) {
+		pr_err("device can't be enabled\n");
+		goto do_free_mem;
+	}
+	sst_drv_ctx->pci = pci_dev_get(pci);
+	ret = pci_request_regions(pci, SST_DRV_NAME);
+	if (ret)
+		goto do_disable_device;
+	/* map registers */
+	/* SST Shim */
+
+	if ((sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) ||
+			(sst_drv_ctx->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR)) {
+		sst_drv_ctx->ddr_base = pci_resource_start(pci, 0);
+		/*
+		* check that the relocated IMR base matches with FW Binary
+		* put temporary check till better soln is available for FW
+		*/
+		ddr_base = relocate_imr_addr_mrfld(sst_drv_ctx->ddr_base);
+		if (!sst_drv_ctx->pdata->lib_info) {
+			pr_err("%s:lib_info pointer NULL\n", __func__);
+			ret = -EINVAL;
+			goto do_release_regions;
+		}
+		if (ddr_base != sst_drv_ctx->pdata->lib_info->mod_base) {
+			pr_err("FW LSP DDR BASE does not match with IFWI\n");
+			ret = -EINVAL;
+			goto do_release_regions;
+		}
+		sst_drv_ctx->ddr_end = pci_resource_end(pci, 0);
+
+		sst_drv_ctx->ddr = pci_ioremap_bar(pci, 0);
+		if (!sst_drv_ctx->ddr) {
+			ret = -EINVAL;
+			goto do_unmap_ddr;
+		}
+		pr_debug("sst: DDR Ptr %p\n", sst_drv_ctx->ddr);
+	} else {
+		sst_drv_ctx->ddr = NULL;
+	}
+
+	/* SHIM */
+	sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
+	sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
+	if (!sst_drv_ctx->shim) {
+		ret = -EINVAL;
+		goto do_release_regions;
+	}
+	pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
+
+	/* Shared SRAM */
+	sst_drv_ctx->mailbox_add = pci_resource_start(pci, 2);
+	sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
+	if (!sst_drv_ctx->mailbox) {
+		ret = -EINVAL;
+		goto do_unmap_shim;
+	}
+	pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
+
+	/* IRAM */
+	sst_drv_ctx->iram_end = pci_resource_end(pci, 3);
+	sst_drv_ctx->iram_base = pci_resource_start(pci, 3);
+	sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
+	if (!sst_drv_ctx->iram) {
+		ret = -EINVAL;
+		goto do_unmap_sram;
+	}
+	pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
+
+	/* DRAM */
+	sst_drv_ctx->dram_end = pci_resource_end(pci, 4);
+	sst_drv_ctx->dram_base = pci_resource_start(pci, 4);
+	sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
+	if (!sst_drv_ctx->dram) {
+		ret = -EINVAL;
+		goto do_unmap_iram;
+	}
+	pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
+
+	if ((sst_pdata->pdata != NULL) &&
+			(sst_pdata->debugfs_data != NULL)) {
+		if (sst_pdata->ssp_data != NULL) {
+			/* SSP Register */
+			ssp_base_add = sst_pdata->ssp_data->base_add;
+			len = sst_pdata->debugfs_data->ssp_reg_size;
+			for (i = 0; i < sst_pdata->debugfs_data->num_ssp; i++) {
+				sst_drv_ctx->debugfs.ssp[i] =
+					devm_ioremap(&pci->dev,
+						ssp_base_add + (len * i), len);
+				if (!sst_drv_ctx->debugfs.ssp[i]) {
+					pr_warn("ssp ioremap failed\n");
+					continue;
+				}
+
+				pr_debug("\n ssp io 0x%p ssp 0x%x size 0x%x",
+					sst_drv_ctx->debugfs.ssp[i],
+						ssp_base_add, len);
+			}
+		}
+
+		/* DMA Register */
+		dma_base_add = sst_pdata->pdata->sst_dma_base[0];
+		len = sst_pdata->debugfs_data->dma_reg_size;
+		for (i = 0; i < sst_pdata->debugfs_data->num_dma; i++) {
+			sst_drv_ctx->debugfs.dma_reg[i] =
+				devm_ioremap(&pci->dev,
+					dma_base_add + (len * i), len);
+			if (!sst_drv_ctx->debugfs.dma_reg[i]) {
+				pr_warn("dma ioremap failed\n");
+				continue;
+			}
+
+			pr_debug("\n dma io 0x%p ssp 0x%x size 0x%x",
+				sst_drv_ctx->debugfs.dma_reg[i],
+					dma_base_add, len);
+		}
+	}
+
+	/* Do not access iram/dram etc before LPE is reset */
+
+	sst_drv_ctx->dump_buf.iram_buf.size = pci_resource_len(pci, 3);
+	sst_drv_ctx->dump_buf.iram_buf.buf = kzalloc(sst_drv_ctx->dump_buf.iram_buf.size,
+						GFP_KERNEL);
+	if (!sst_drv_ctx->dump_buf.iram_buf.buf) {
+		pr_err("%s: no memory\n", __func__);
+		ret = -ENOMEM;
+		goto do_unmap_dram;
+	}
+
+	sst_drv_ctx->dump_buf.dram_buf.size = pci_resource_len(pci, 4);
+	sst_drv_ctx->dump_buf.dram_buf.buf = kzalloc(sst_drv_ctx->dump_buf.dram_buf.size,
+						GFP_KERNEL);
+	if (!sst_drv_ctx->dump_buf.dram_buf.buf) {
+		pr_err("%s: no memory\n", __func__);
+		ret = -ENOMEM;
+		goto do_free_iram_buf;
+	}
+
+	pr_debug("\niram len 0x%x dram len 0x%x",
+			sst_drv_ctx->dump_buf.iram_buf.size,
+			sst_drv_ctx->dump_buf.dram_buf.size);
+
+	sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
+	sst_drv_ctx->irq_num = pci->irq;
+	/* Register the ISR */
+	ret = request_threaded_irq(pci->irq, sst_drv_ctx->ops->interrupt,
+		sst_drv_ctx->ops->irq_thread, 0, SST_DRV_NAME,
+		sst_drv_ctx);
+	if (ret)
+		goto do_free_probe_bytes;
+	pr_debug("Registered IRQ 0x%x\n", pci->irq);
+
+	/*Register LPE Control as misc driver*/
+	ret = misc_register(&lpe_ctrl);
+	if (ret) {
+		pr_err("couldn't register control device\n");
+		goto do_free_irq;
+	}
+	/* default intr are unmasked so set this as masked */
+	if ((sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) ||
+			(sst_drv_ctx->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR))
+		sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, 0xFFFF0038);
+
+	if (sst_drv_ctx->use_32bit_ops) {
+		pr_debug("allocate mem for context save/restore\n ");
+		/*allocate mem for fw context save during suspend*/
+		sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
+		if (!sst_drv_ctx->fw_cntx) {
+			ret = -ENOMEM;
+			goto do_free_misc;
+		}
+		/*setting zero as that is valid mem to restore*/
+		sst_drv_ctx->fw_cntx_size = 0;
+	}
+	if (sst_drv_ctx->pdata->ssp_data) {
+		if (sst_drv_ctx->pdata->ssp_data->gpio_in_use)
+			sst_set_gpio_conf(&sst_drv_ctx->pdata->ssp_data->gpio);
+	}
+	pci_set_drvdata(pci, sst_drv_ctx);
+	pm_runtime_allow(sst_drv_ctx->dev);
+	pm_runtime_put_noidle(sst_drv_ctx->dev);
+	register_sst(sst_drv_ctx->dev);
+	sst_debugfs_init(sst_drv_ctx);
+	sst_drv_ctx->qos = kzalloc(sizeof(struct pm_qos_request),
+				GFP_KERNEL);
+	if (!sst_drv_ctx->qos) {
+		ret = -EINVAL;
+		goto do_free_misc;
+	}
+	pm_qos_add_request(sst_drv_ctx->qos, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	ret = device_create_file(sst_drv_ctx->dev, &dev_attr_audio_recovery);
+	if (ret) {
+		pr_err("could not create sysfs %s file\n",
+			dev_attr_audio_recovery.attr.name);
+		goto do_free_qos;
+	}
+
+	pr_info("%s successfully done!\n", __func__);
+	return ret;
+
+do_free_qos:
+	pm_qos_remove_request(sst_drv_ctx->qos);
+	kfree(sst_drv_ctx->qos);
+do_free_misc:
+	misc_deregister(&lpe_ctrl);
+do_free_irq:
+	free_irq(pci->irq, sst_drv_ctx);
+do_free_probe_bytes:
+#ifdef CONFIG_DEBUG_FS
+do_free_iram_buf:
+#endif
+do_unmap_dram:
+	iounmap(sst_drv_ctx->dram);
+do_unmap_iram:
+	iounmap(sst_drv_ctx->iram);
+do_unmap_sram:
+	iounmap(sst_drv_ctx->mailbox);
+do_unmap_shim:
+	iounmap(sst_drv_ctx->shim);
+
+do_unmap_ddr:
+	if (sst_drv_ctx->ddr)
+		iounmap(sst_drv_ctx->ddr);
+
+do_release_regions:
+	pci_release_regions(pci);
+do_disable_device:
+	pci_disable_device(pci);
+do_free_mem:
+	destroy_workqueue(sst_drv_ctx->post_msg_wq);
+free_mad_wq:
+	destroy_workqueue(sst_drv_ctx->mad_wq);
+do_free_drv_ctx:
+	sst_drv_ctx = NULL;
+	pr_err("Probe failed with %d\n", ret);
+	return ret;
+}
+
+/**
+* intel_sst_remove - PCI remove function
+*
+* @pci:	PCI device structure
+*
+* This function is called by OS when a device is unloaded
+* This frees the interrupt etc
+*/
+static void intel_sst_remove(struct pci_dev *pci)
+{
+	struct intel_sst_drv *sst_drv_ctx = pci_get_drvdata(pci);
+	sst_debugfs_exit(sst_drv_ctx);
+	pm_runtime_get_noresume(sst_drv_ctx->dev);
+	pm_runtime_forbid(sst_drv_ctx->dev);
+	unregister_sst(sst_drv_ctx->dev);
+	pci_dev_put(sst_drv_ctx->pci);
+	sst_set_fw_state_locked(sst_drv_ctx, SST_SHUTDOWN);
+	misc_deregister(&lpe_ctrl);
+	free_irq(pci->irq, sst_drv_ctx);
+
+	iounmap(sst_drv_ctx->dram);
+	iounmap(sst_drv_ctx->iram);
+	iounmap(sst_drv_ctx->mailbox);
+	iounmap(sst_drv_ctx->shim);
+#ifdef CONFIG_DEBUG_FS
+#endif
+	device_remove_file(sst_drv_ctx->dev, &dev_attr_audio_recovery);
+	kfree(sst_drv_ctx->fw_cntx);
+	kfree(sst_drv_ctx->runtime_param.param.addr);
+	flush_scheduled_work();
+	destroy_workqueue(sst_drv_ctx->post_msg_wq);
+	destroy_workqueue(sst_drv_ctx->mad_wq);
+	pm_qos_remove_request(sst_drv_ctx->qos);
+	kfree(sst_drv_ctx->qos);
+	kfree(sst_drv_ctx->fw_sg_list.src);
+	kfree(sst_drv_ctx->fw_sg_list.dst);
+	sst_drv_ctx->fw_sg_list.list_len = 0;
+	kfree(sst_drv_ctx->fw_in_mem);
+	sst_drv_ctx->fw_in_mem = NULL;
+	sst_memcpy_free_resources();
+	sst_drv_ctx = NULL;
+	pci_release_regions(pci);
+	pci_disable_device(pci);
+	pci_set_drvdata(pci, NULL);
+}
+
+inline void sst_save_shim64(struct intel_sst_drv *ctx,
+			    void __iomem *shim,
+			    struct sst_shim_regs64 *shim_regs)
+{
+	unsigned long irq_flags;
+	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+
+	shim_regs->csr = sst_shim_read64(shim, SST_CSR),
+	shim_regs->pisr = sst_shim_read64(shim, SST_PISR),
+	shim_regs->pimr = sst_shim_read64(shim, SST_PIMR),
+	shim_regs->isrx = sst_shim_read64(shim, SST_ISRX),
+	shim_regs->isrd = sst_shim_read64(shim, SST_ISRD),
+	shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
+	shim_regs->imrd = sst_shim_read64(shim, SST_IMRD),
+	shim_regs->ipcx = sst_shim_read64(shim, ctx->ipc_reg.ipcx),
+	shim_regs->ipcd = sst_shim_read64(shim, ctx->ipc_reg.ipcd),
+	shim_regs->isrsc = sst_shim_read64(shim, SST_ISRSC),
+	shim_regs->isrlpesc = sst_shim_read64(shim, SST_ISRLPESC),
+	shim_regs->imrsc = sst_shim_read64(shim, SST_IMRSC),
+	shim_regs->imrlpesc = sst_shim_read64(shim, SST_IMRLPESC),
+	shim_regs->ipcsc = sst_shim_read64(shim, SST_IPCSC),
+	shim_regs->ipclpesc = sst_shim_read64(shim, SST_IPCLPESC),
+	shim_regs->clkctl = sst_shim_read64(shim, SST_CLKCTL),
+	shim_regs->csr2 = sst_shim_read64(shim, SST_CSR2);
+
+	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
+				      void __iomem *shim,
+				      struct sst_shim_regs64 *shim_regs)
+{
+	unsigned long irq_flags;
+	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+	sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+/*
+ * The runtime_suspend/resume is pretty much similar to the legacy
+ * suspend/resume with the noted exception below: The PCI core takes care of
+ * taking the system through D3hot and restoring it back to D0 and so there is
+ * no need to duplicate that here.
+ */
+static int intel_sst_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	pr_info("runtime_suspend called\n");
+	if (ctx->sst_state == SST_RESET) {
+		pr_debug("LPE is already in RESET state, No action");
+		return 0;
+	}
+	/*save fw context*/
+	if (ctx->ops->save_dsp_context(ctx))
+		return -EBUSY;
+
+	/* Move the SST state to Reset */
+	sst_set_fw_state_locked(ctx, SST_RESET);
+
+	flush_workqueue(ctx->post_msg_wq);
+	synchronize_irq(ctx->irq_num);
+
+	return ret;
+}
+
+static int intel_sst_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	pr_info("runtime_resume called\n");
+
+	/* When fw_clear_cache is set, clear the cached firmware copy */
+	/* fw_clear_cache is set through debugfs support */
+	if (atomic_read(&ctx->fw_clear_cache) && ctx->fw_in_mem) {
+		pr_debug("Clearing the cached firmware\n");
+		kfree(ctx->fw_in_mem);
+		ctx->fw_in_mem = NULL;
+		atomic_set(&ctx->fw_clear_cache, 0);
+	}
+
+	sst_set_fw_state_locked(ctx, SST_RESET);
+
+	return ret;
+}
+
+static int intel_sst_suspend(struct device *dev)
+{
+	int retval = 0, usage_count;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	usage_count = atomic_read(&ctx->pm_usage_count);
+	if (usage_count) {
+		pr_err("Ret error for suspend:%d\n", usage_count);
+		return -EBUSY;
+	}
+	retval = intel_sst_runtime_suspend(dev);
+
+	return retval;
+}
+
+static int intel_sst_runtime_idle(struct device *dev)
+{
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	pr_info("runtime_idle called\n");
+	if (ctx->sst_state != SST_RESET) {
+		pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
+		return -EBUSY;
+	} else {
+		return 0;
+	}
+	return -EBUSY;
+
+}
+
+static void sst_do_shutdown(struct intel_sst_drv *ctx)
+{
+	int retval = 0;
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct sst_block *block = NULL;
+
+	pr_debug(" %s called\n", __func__);
+	if ((atomic_read(&ctx->pm_usage_count) == 0) ||
+		ctx->sst_state == SST_RESET) {
+		sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+		pr_debug("sst is already in suspended/RESET state\n");
+		return;
+	}
+	if (!ctx->use_32bit_ops)
+		return;
+
+	sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+	flush_workqueue(ctx->post_msg_wq);
+	pvt_id = sst_assign_pvt_id(ctx);
+	retval = sst_create_block_and_ipc_msg(&msg, false,
+			ctx, &block,
+			IPC_IA_PREPARE_SHUTDOWN, pvt_id);
+	if (retval) {
+		pr_err("sst_create_block returned error!\n");
+		return;
+	}
+	sst_fill_header(&msg->header, IPC_IA_PREPARE_SHUTDOWN, 0, pvt_id);
+	sst_add_to_dispatch_list_and_post(ctx, msg);
+	sst_wait_timeout(ctx, block);
+	sst_free_block(ctx, block);
+}
+
+
+/**
+* sst_pci_shutdown - PCI shutdown function
+*
+* @pci:        PCI device structure
+*
+* This function is called by OS when a device is shutdown/reboot
+*
+*/
+
+static void sst_pci_shutdown(struct pci_dev *pci)
+{
+	struct intel_sst_drv *ctx = pci_get_drvdata(pci);
+
+	pr_debug(" %s called\n", __func__);
+
+	sst_do_shutdown(ctx);
+	disable_irq_nosync(pci->irq);
+}
+
+static const struct dev_pm_ops intel_sst_pm = {
+	.suspend = intel_sst_suspend,
+	.resume = intel_sst_runtime_resume,
+	.runtime_suspend = intel_sst_runtime_suspend,
+	.runtime_resume = intel_sst_runtime_resume,
+	.runtime_idle = intel_sst_runtime_idle,
+};
+
+/* PCI Routines */
+static DEFINE_PCI_DEVICE_TABLE(intel_sst_ids) = {
+	{ PCI_VDEVICE(INTEL, SST_MRFLD_PCI_ID), 0},
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SST_MOOR), 0},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, intel_sst_ids);
+
+static struct pci_driver driver = {
+	.name = SST_DRV_NAME,
+	.id_table = intel_sst_ids,
+	.probe = intel_sst_probe,
+	.remove = intel_sst_remove,
+	.shutdown = sst_pci_shutdown,
+#ifdef CONFIG_PM
+	.driver = {
+		.pm = &intel_sst_pm,
+	},
+#endif
+};
+
+/**
+* intel_sst_init - Module init function
+*
+* Registers with PCI
+* Registers with /dev
+* Init all data strutures
+*/
+static int __init intel_sst_init(void)
+{
+	/* Init all variables, data structure etc....*/
+	int ret = 0;
+	pr_info("INFO: ******** SST DRIVER loading.. Ver: %s\n",
+				       SST_DRIVER_VERSION);
+
+	mutex_init(&drv_ctx_lock);
+	/* Register with PCI */
+	ret = pci_register_driver(&driver);
+	if (ret)
+		pr_err("PCI register failed\n");
+
+	return ret;
+}
+
+/**
+* intel_sst_exit - Module exit function
+*
+* Unregisters with PCI
+* Unregisters with /dev
+* Frees all data strutures
+*/
+static void __exit intel_sst_exit(void)
+{
+	pci_unregister_driver(&driver);
+
+	pr_debug("driver unloaded\n");
+	sst_drv_ctx = NULL;
+	return;
+}
+
+module_init(intel_sst_init);
+module_exit(intel_sst_exit);
diff --git a/sound/soc/intel/sst/sst.h b/sound/soc/intel/sst/sst.h
new file mode 100644
index 0000000..b370bb6
--- /dev/null
+++ b/sound/soc/intel/sst/sst.h
@@ -0,0 +1,920 @@
+/*
+ *  sst.h - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  Common private declarations for SST
+ */
+#ifndef __SST_H__
+#define __SST_H__
+
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/lnw_gpio.h>
+#include <asm/platform_sst.h>
+#include <sound/intel_sst_ioctl.h>
+
+#define SST_DRIVER_VERSION "3.0.8"
+
+/* driver names */
+#define SST_DRV_NAME "intel_sst_driver"
+#define SST_MRFLD_PCI_ID 0x119A
+
+#define SST_SUSPEND_DELAY 2000
+#define FW_CONTEXT_MEM (64*1024)
+#define SST_ICCM_BOUNDARY 4
+#define SST_CONFIG_SSP_SIGN 0x7ffe8001
+
+/* FIXME: All this info should come from platform data
+ * move this when the base framework is ready to pass
+ * platform data to SST driver
+ */
+#define MRFLD_FW_VIRTUAL_BASE 0xC0000000
+#define MRFLD_FW_DDR_BASE_OFFSET 0x0
+#define MRFLD_FW_FEATURE_BASE_OFFSET 0x4
+#define MRFLD_FW_BSS_RESET_BIT 0
+extern struct intel_sst_drv *sst_drv_ctx;
+enum sst_states {
+	SST_FW_LOADING = 1,
+	SST_FW_RUNNING,
+	SST_RESET,
+	SST_SHUTDOWN,
+};
+
+enum sst_algo_ops {
+	SST_SET_ALGO = 0,
+	SST_GET_ALGO = 1,
+};
+
+#define SST_BLOCK_TIMEOUT	1000
+
+/* SST register map */
+#define SST_CSR			0x00
+#define SST_PISR		0x08
+#define SST_PIMR		0x10
+#define SST_ISRX		0x18
+#define SST_ISRD		0x20
+#define SST_IMRX		0x28
+#define SST_IMRD		0x30
+#define SST_IPCX		0x38 /* IPC IA-SST */
+#define SST_IPCD		0x40 /* IPC SST-IA */
+#define SST_ISRSC		0x48
+#define SST_ISRLPESC		0x50
+#define SST_IMRSC		0x58
+#define SST_IMRLPESC		0x60
+#define SST_IPCSC		0x68
+#define SST_IPCLPESC		0x70
+#define SST_CLKCTL		0x78
+#define SST_CSR2		0x80
+
+#define SST_SHIM_BEGIN		SST_CSR
+#define SST_SHIM_END		SST_CSR2
+#define SST_SHIM_SIZE		0x88
+
+#define FW_SIGNATURE_SIZE	4
+
+/* stream states */
+enum sst_stream_states {
+	STREAM_UN_INIT	= 0,	/* Freed/Not used stream */
+	STREAM_RUNNING	= 1,	/* Running */
+	STREAM_PAUSED	= 2,	/* Paused stream */
+	STREAM_DECODE	= 3,	/* stream is in decoding only state */
+	STREAM_INIT	= 4,	/* stream init, waiting for data */
+	STREAM_RESET	= 5,	/* force reset on recovery */
+};
+
+enum sst_ram_type {
+	SST_IRAM	= 1,
+	SST_DRAM	= 2,
+	SST_DDR	= 5,
+	SST_CUSTOM_INFO	= 7,	/* consists of FW binary information */
+};
+
+/* SST shim registers to structure mapping  */
+union config_status_reg {
+	struct {
+		u32 mfld_strb:1;
+		u32 sst_reset:1;
+		u32 clk_sel:3;
+		u32 sst_clk:2;
+		u32 bypass:3;
+		u32 run_stall:1;
+		u32 rsvd1:2;
+		u32 strb_cntr_rst:1;
+		u32 rsvd:18;
+	} part;
+	u32 full;
+};
+
+union interrupt_reg {
+	struct {
+		u64 done_interrupt:1;
+		u64 busy_interrupt:1;
+		u64 rsvd:62;
+	} part;
+	u64 full;
+};
+
+union sst_imr_reg {
+	struct {
+		u32 done_interrupt:1;
+		u32 busy_interrupt:1;
+		u32 rsvd:30;
+	} part;
+	u32 full;
+};
+
+union sst_pisr_reg {
+	struct {
+		u32 pssp0:1;
+		u32 pssp1:1;
+		u32 rsvd0:3;
+		u32 dmac:1;
+		u32 rsvd1:26;
+	} part;
+	u32 full;
+};
+
+union sst_pimr_reg {
+	struct {
+		u32 ssp0:1;
+		u32 ssp1:1;
+		u32 rsvd0:3;
+		u32 dmac:1;
+		u32 rsvd1:10;
+		u32 ssp0_sc:1;
+		u32 ssp1_sc:1;
+		u32 rsvd2:3;
+		u32 dmac_sc:1;
+		u32 rsvd3:10;
+	} part;
+	u32 full;
+};
+
+union config_status_reg_mrfld {
+	struct {
+		u64 lpe_reset:1;
+		u64 lpe_reset_vector:1;
+		u64 runstall:1;
+		u64 pwaitmode:1;
+		u64 clk_sel:3;
+		u64 rsvd2:1;
+		u64 sst_clk:3;
+		u64 xt_snoop:1;
+		u64 rsvd3:4;
+		u64 clk_sel1:6;
+		u64 clk_enable:3;
+		u64 rsvd4:6;
+		u64 slim0baseclk:1;
+		u64 rsvd:32;
+	} part;
+	u64 full;
+};
+
+union interrupt_reg_mrfld {
+	struct {
+		u64 done_interrupt:1;
+		u64 busy_interrupt:1;
+		u64 rsvd:62;
+	} part;
+	u64 full;
+};
+
+union sst_imr_reg_mrfld {
+	struct {
+		u64 done_interrupt:1;
+		u64 busy_interrupt:1;
+		u64 rsvd:62;
+	} part;
+	u64 full;
+};
+
+/*This structure is used to block a user/fw data call to another
+fw/user call
+*/
+struct sst_block {
+	bool	condition; /* condition for blocking check */
+	int	ret_code; /* ret code when block is released */
+	void	*data; /* data to be appsed for block if any */
+	u32     size;
+	bool	on;
+	u32     msg_id;  /*msg_id = msgid in mfld/ctp, mrfld = 0 */
+	u32     drv_id; /* = str_id in mfld/ctp, = drv_id in mrfld*/
+	struct list_head node;
+};
+
+/**
+ * struct stream_info - structure that holds the stream information
+ *
+ * @status : stream current state
+ * @prev : stream prev state
+ * @ops : stream operation pb/cp/drm...
+ * @bufs: stream buffer list
+ * @lock : stream mutex for protecting state
+ * @pcm_substream : PCM substream
+ * @period_elapsed : PCM period elapsed callback
+ * @sfreq : stream sampling freq
+ * @str_type : stream type
+ * @cumm_bytes : cummulative bytes decoded
+ * @str_type : stream type
+ * @src : stream source
+ * @device : output device type (medfield only)
+ */
+struct stream_info {
+	unsigned int		status;
+	unsigned int		prev;
+	unsigned int		ops;
+	struct mutex		lock; /* mutex */
+	void			*pcm_substream;
+	void (*period_elapsed)	(void *pcm_substream);
+	unsigned int		sfreq;
+	u32			cumm_bytes;
+	void			*compr_cb_param;
+	void (*compr_cb)	(void *compr_cb_param);
+	void			*drain_cb_param;
+	void (*drain_notify)	(void *drain_cb_param);
+
+	unsigned int		num_ch;
+	unsigned int		pipe_id;
+	unsigned int		str_id;
+	unsigned int		task_id;
+};
+
+#define SST_FW_SIGN "$SST"
+#define SST_FW_LIB_SIGN "$LIB"
+
+/*
+ * struct fw_header - FW file headers
+ *
+ * @signature : FW signature
+ * @modules : # of modules
+ * @file_format : version of header format
+ * @reserved : reserved fields
+ */
+struct fw_header {
+	unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
+	u32 file_size; /* size of fw minus this header */
+	u32 modules; /*  # of modules */
+	u32 file_format; /* version of header format */
+	u32 reserved[4];
+};
+
+struct fw_module_header {
+	unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
+	u32 mod_size; /* size of module */
+	u32 blocks; /* # of blocks */
+	u32 type; /* codec type, pp lib */
+	u32 entry_point;
+};
+
+struct fw_block_info {
+	enum sst_ram_type	type;	/* IRAM/DRAM */
+	u32			size;	/* Bytes */
+	u32			ram_offset; /* Offset in I/DRAM */
+	u32			rsvd;	/* Reserved field */
+};
+
+struct sst_ipc_msg_wq {
+	union ipc_header_mrfld mrfld_header;
+	struct ipc_dsp_hdr dsp_hdr;
+	char mailbox[SST_MAILBOX_SIZE];
+	struct work_struct	wq;
+	union ipc_header header;
+};
+
+struct sst_dma {
+	struct dma_chan *ch;
+	struct intel_mid_dma_slave slave;
+	struct device *dev;
+};
+
+struct sst_runtime_param {
+	struct snd_sst_runtime_params param;
+};
+
+struct sst_sg_list {
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	int list_len;
+	unsigned int sg_idx;
+};
+
+struct sst_memcpy_list {
+	struct list_head memcpylist;
+	void *dstn;
+	const void *src;
+	u32 size;
+	bool is_io;
+};
+
+struct sst_debugfs {
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		*root;
+#endif
+	int			runtime_pm_status;
+	void __iomem            *ssp[SST_MAX_SSP_PORTS];
+	void __iomem            *dma_reg[SST_MAX_DMA];
+	unsigned char get_params_data[1024];
+	ssize_t get_params_len;
+};
+
+struct lpe_log_buf_hdr {
+	u32 base_addr;
+	u32 end_addr;
+	u32 rd_addr;
+	u32 wr_addr;
+};
+
+struct snd_ssp_config {
+	int size;
+	char bytes[0];
+};
+
+struct snd_sst_probe_bytes {
+	u16 len;
+	char bytes[0];
+};
+
+#define PCI_DMAC_CLV_ID 0x08F0
+#define PCI_DMAC_MRFLD_ID 0x119B
+
+struct sst_ram_buf {
+	u32 size;
+	char *buf;
+};
+
+/* Firmware Module Information*/
+
+enum sst_lib_dwnld_status {
+	SST_LIB_NOT_FOUND = 0,
+	SST_LIB_FOUND,
+	SST_LIB_DOWNLOADED,
+};
+
+struct sst_module_info {
+	const char *name; /* Library name */
+	u32	id; /* Module ID */
+	u32	entry_pt; /* Module entry point */
+	u8	status; /* module status*/
+	u8	rsvd1;
+	u16	rsvd2;
+};
+
+/* Structure for managing the Library Region(1.5MB)
+ * in DDR in Merrifield
+ */
+struct sst_mem_mgr {
+	phys_addr_t current_base;
+	int avail;
+	unsigned int count;
+};
+
+struct sst_dump_buf {
+	/* buffers for iram-dram dump crash */
+	struct sst_ram_buf iram_buf;
+	struct sst_ram_buf dram_buf;
+};
+
+struct sst_ipc_reg {
+	int ipcx;
+	int ipcd;
+};
+
+struct sst_shim_regs64 {
+	u64 csr;
+	u64 pisr;
+	u64 pimr;
+	u64 isrx;
+	u64 isrd;
+	u64 imrx;
+	u64 imrd;
+	u64 ipcx;
+	u64 ipcd;
+	u64 isrsc;
+	u64 isrlpesc;
+	u64 imrsc;
+	u64 imrlpesc;
+	u64 ipcsc;
+	u64 ipclpesc;
+	u64 clkctl;
+	u64 csr2;
+};
+
+struct sst_vtsv_cache {
+	void *file1_in_mem;
+	u32 size1;
+	void *file2_in_mem;
+	u32 size2;
+};
+
+/***
+ *
+ * struct intel_sst_drv - driver ops
+ *
+ * @sst_state : current sst device state
+ * @pci_id : PCI device id loaded
+ * @shim : SST shim pointer
+ * @mailbox : SST mailbox pointer
+ * @iram : SST IRAM pointer
+ * @dram : SST DRAM pointer
+ * @pdata : SST info passed as a part of pci platform data
+ * @shim_phy_add : SST shim phy addr
+ * @shim_regs64: Struct to save shim registers
+ * @ipc_dispatch_list : ipc messages dispatched
+ * @rx_list : to copy the process_reply/process_msg from DSP
+ * @ipc_post_msg_wq : wq to post IPC messages context
+ * @ipc_post_msg : wq to post reply from FW context
+ * @mad_ops : MAD driver operations registered
+ * @mad_wq : MAD driver wq
+ * @post_msg_wq : wq to post IPC messages
+ * @streams : sst stream contexts
+ * @list_lock : sst driver list lock (deprecated)
+ * @ipc_spin_lock : spin lock to handle audio shim access and ipc queue
+ * @rx_msg_lock : spin lock to handle the rx messages from the DSP
+ * @scard_ops : sst card ops
+ * @pci : sst pci device struture
+ * @dev : pointer to current device struct
+ * @sst_lock : sst device lock
+ * @stream_lock : sst stream lock
+ * @pvt_id : sst private id
+ * @stream_cnt : total sst active stream count
+ * @pb_streams : total active pb streams
+ * @cp_streams : total active cp streams
+ * @audio_start : audio status
+ * @qos		: PM Qos struct
+ * firmware_name : Firmware / Library name
+ */
+struct intel_sst_drv {
+	int			sst_state;
+	int			irq_num;
+	unsigned int		pci_id;
+	bool			use_32bit_ops;
+	void __iomem		*ddr;
+	void __iomem		*shim;
+	void __iomem		*mailbox;
+	void __iomem		*iram;
+	void __iomem		*dram;
+	unsigned int		mailbox_add;
+	unsigned int		iram_base;
+	unsigned int		dram_base;
+	unsigned int		shim_phy_add;
+	unsigned int		iram_end;
+	unsigned int		dram_end;
+	unsigned int		ddr_end;
+	unsigned int		ddr_base;
+	unsigned int		mailbox_recv_offset;
+	atomic_t		pm_usage_count;
+	struct sst_shim_regs64	*shim_regs64;
+	struct list_head        block_list;
+	struct list_head	ipc_dispatch_list;
+	struct sst_platform_info *pdata;
+	struct sst_ipc_msg_wq   ipc_post_msg;
+	struct list_head	rx_list;
+	struct work_struct      ipc_post_msg_wq;
+	wait_queue_head_t	wait_queue;
+	struct workqueue_struct *mad_wq;
+	struct workqueue_struct *post_msg_wq;
+	unsigned int		tstamp;
+	struct stream_info	streams[MAX_NUM_STREAMS+1]; /*str_id 0 is not used*/
+	spinlock_t		ipc_spin_lock; /* lock for Shim reg access and ipc queue */
+	spinlock_t              block_lock; /* lock for adding block to block_list */
+	spinlock_t              pvt_id_lock; /* lock for allocating private id */
+	spinlock_t		rx_msg_lock;
+	struct pci_dev		*pci;
+	struct device		*dev;
+	unsigned int		pvt_id;
+	struct mutex            sst_lock;
+	struct mutex		stream_lock;
+	unsigned int		stream_cnt;
+	unsigned int		*fw_cntx;
+	unsigned int		fw_cntx_size;
+	unsigned int		csr_value;
+	struct sst_dma		dma;
+	void			*fw_in_mem;
+	struct sst_runtime_param runtime_param;
+	unsigned int		device_input_mixer;
+	struct mutex		mixer_ctrl_lock;
+	struct dma_async_tx_descriptor *desc;
+	struct sst_sg_list	fw_sg_list, library_list;
+	struct intel_sst_ops	*ops;
+	struct sst_debugfs	debugfs;
+	struct pm_qos_request	*qos;
+	struct sst_info	info;
+	unsigned int		use_dma;
+	unsigned int		use_lli;
+	atomic_t		fw_clear_context;
+	atomic_t		fw_clear_cache;
+	bool			lib_dwnld_reqd;
+	/* list used during FW download in memcpy mode */
+	struct list_head	memcpy_list;
+	/* list used during LIB download in memcpy mode */
+	struct list_head	libmemcpy_list;
+	/* holds the stucts of iram/dram local buffers for dump*/
+	struct sst_dump_buf	dump_buf;
+	/* Lock for CSR register change */
+	struct mutex		csr_lock;
+	/* byte control to set the probe stream */
+	struct snd_sst_probe_bytes *probe_bytes;
+	/* contains the ipc registers */
+	struct sst_ipc_reg	ipc_reg;
+	/* IMR region Library space memory manager */
+	struct sst_mem_mgr      lib_mem_mgr;
+	/* Contains the cached vtsv files*/
+	struct sst_vtsv_cache	vcache;
+	/* Pointer to device ID, now for same PCI_ID, HID will be
+	 * will be different for FDK and EDK2. This will be used
+	 * for devices where PCI or ACPI id is same but HID is
+	 * different
+	 */
+	const char *hid;
+	/* Holder for firmware name. Due to async call it needs to be
+	 * persistent till worker thread gets called
+	 */
+	char firmware_name[20];
+};
+
+extern struct intel_sst_drv *sst_drv_ctx;
+
+/* misc definitions */
+#define FW_DWNL_ID 0xFF
+
+struct sst_fill_config {
+	u32 sign;
+	struct sst_board_config_data sst_bdata;
+	struct sst_platform_config_data sst_pdata;
+	u32 shim_phy_add;
+	u32 mailbox_add;
+} __packed;
+
+struct intel_sst_ops {
+	irqreturn_t (*interrupt) (int, void *);
+	irqreturn_t (*irq_thread) (int, void *);
+	void (*clear_interrupt) (void);
+	int (*start) (void);
+	int (*reset) (void);
+	void (*process_reply) (struct ipc_post *msg);
+	void (*post_message) (struct work_struct *work);
+	int (*sync_post_message) (struct ipc_post *msg);
+	void (*process_message) (struct ipc_post *msg);
+	void (*set_bypass)(bool set);
+	int (*save_dsp_context) (struct intel_sst_drv *sst);
+	void (*restore_dsp_context) (void);
+	int (*alloc_stream) (char *params, struct sst_block *block);
+	void (*post_download)(struct intel_sst_drv *sst);
+	void (*do_recovery)(struct intel_sst_drv *sst);
+};
+
+int sst_alloc_stream(char *params, struct sst_block *block);
+int sst_pause_stream(int id);
+int sst_resume_stream(int id);
+int sst_drop_stream(int id);
+int sst_next_track(void);
+int sst_free_stream(int id);
+int sst_start_stream(int str_id);
+int sst_send_byte_stream_mrfld(void *sbytes);
+int sst_send_probe_bytes(struct intel_sst_drv *sst);
+int sst_set_stream_param(int str_id, struct snd_sst_params *str_param);
+int sst_set_metadata(int str_id, char *params);
+int sst_get_stream(struct snd_sst_params *str_param);
+int sst_get_stream_allocated(struct snd_sst_params *str_param,
+				struct snd_sst_lib_download **lib_dnld);
+int sst_drain_stream(int str_id, bool partial_drain);
+
+
+int sst_sync_post_message_mfld(struct ipc_post *msg);
+void sst_post_message_mfld(struct work_struct *work);
+void sst_process_message_mfld(struct ipc_post *msg);
+void sst_process_reply_mfld(struct ipc_post *msg);
+int sst_start_mfld(void);
+int intel_sst_reset_dsp_mfld(void);
+void intel_sst_clear_intr_mfld(void);
+void intel_sst_set_bypass_mfld(bool set);
+
+int sst_sync_post_message_mrfld(struct ipc_post *msg);
+void sst_post_message_mrfld(struct work_struct *work);
+void sst_process_message_mrfld(struct ipc_post *msg);
+void sst_process_reply_mrfld(struct ipc_post *msg);
+int sst_start_mrfld(void);
+int intel_sst_reset_dsp_mrfld(void);
+void intel_sst_clear_intr_mrfld(void);
+void sst_process_mad_ops(struct work_struct *work);
+
+long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd,
+			unsigned long arg);
+int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
+int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
+
+int sst_load_fw(void);
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
+int sst_load_all_modules_elf(struct intel_sst_drv *ctx,
+		struct sst_module_info *mod_table, int mod_table_size);
+int sst_get_next_lib_mem(struct sst_mem_mgr *mgr, int size,
+			unsigned long *lib_base);
+void sst_post_download_mrfld(struct intel_sst_drv *ctx);
+int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
+void sst_memcpy_free_resources(void);
+
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+				struct sst_block *block);
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
+			struct sst_block *block);
+int sst_create_ipc_msg(struct ipc_post **arg, bool large);
+int sst_download_fw(void);
+int free_stream_context(unsigned int str_id);
+void sst_clean_stream(struct stream_info *stream);
+int intel_sst_register_compress(struct intel_sst_drv *sst);
+int intel_sst_remove_compress(struct intel_sst_drv *sst);
+void sst_cdev_fragment_elapsed(int str_id);
+int sst_send_sync_msg(int ipc, int str_id);
+int sst_get_num_channel(struct snd_sst_params *str_param);
+int sst_get_sfreq(struct snd_sst_params *str_param);
+int intel_sst_check_device(void);
+int sst_alloc_stream_mrfld(char *params, struct sst_block *block);
+void sst_restore_fw_context(void);
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+				u32 msg_id, u32 drv_id);
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+		u32 msg_id, u32 drv_id);
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed);
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+		u32 drv_id, u32 ipc, void *data, u32 size);
+int sst_alloc_drv_context(struct device *dev);
+int sst_request_firmware_async(struct intel_sst_drv *ctx);
+int sst_driver_ops(struct intel_sst_drv *sst);
+struct sst_platform_info *sst_get_acpi_driver_data(const char *hid);
+int sst_acpi_probe(struct platform_device *pdev);
+int sst_acpi_remove(struct platform_device *pdev);
+void sst_save_shim64(struct intel_sst_drv *ctx, void __iomem *shim,
+		     struct sst_shim_regs64 *shim_regs);
+void sst_firmware_load_cb(const struct firmware *fw, void *context);
+int sst_send_vtsv_data_to_fw(struct intel_sst_drv *ctx);
+
+void sst_do_recovery_mrfld(struct intel_sst_drv *sst);
+void sst_do_recovery(struct intel_sst_drv *sst);
+long intel_sst_ioctl_dsp(unsigned int cmd,
+		struct snd_ppp_params *algo_params, unsigned long arg);
+
+void sst_dump_to_buffer(const void *from, size_t from_len, char *buf);
+
+extern int intel_scu_ipc_simple_command(int, int);
+
+static inline int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
+{
+	int ret;
+
+	ret = pm_runtime_put_sync(sst_drv->dev);
+	if (ret < 0)
+		return ret;
+	atomic_dec(&sst_drv->pm_usage_count);
+
+	pr_debug("%s: count is %d now..\n", __func__,
+			atomic_read(&sst_drv->pm_usage_count));
+	return 0;
+}
+/*
+ * sst_fill_header - inline to fill sst header
+ *
+ * @header : ipc header
+ * @msg : IPC message to be sent
+ * @large : is ipc large msg
+ * @str_id : stream id
+ *
+ * this function is an inline function that sets the headers before
+ * sending a message
+ */
+static inline void sst_fill_header(union ipc_header *header,
+				int msg, int large, int str_id)
+{
+	header->part.msg_id = msg;
+	header->part.str_id = str_id;
+	header->part.large = large;
+	header->part.done = 0;
+	header->part.busy = 1;
+	header->part.data = 0;
+}
+
+
+static inline void sst_fill_header_mrfld(union ipc_header_mrfld *header,
+				int msg, int task_id, int large, int drv_id)
+{
+	header->full = 0;
+	header->p.header_high.part.msg_id = msg;
+	header->p.header_high.part.task_id = task_id;
+	header->p.header_high.part.large = large;
+	header->p.header_high.part.drv_id = drv_id;
+	header->p.header_high.part.done = 0;
+	header->p.header_high.part.busy = 1;
+	header->p.header_high.part.res_rqd = 1;
+}
+
+static inline void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
+					int pipe_id, int len)
+{
+	dsp->cmd_id = msg;
+	dsp->mod_index_id = 0xff;
+	dsp->pipe_id = pipe_id;
+	dsp->length = len;
+	dsp->mod_id = 0;
+}
+
+#define MAX_BLOCKS 15
+/* sst_assign_pvt_id - assign a pvt id for stream
+ *
+ * @sst_drv_ctx : driver context
+ *
+ * this inline function assigns a private id for calls that dont have stream
+ * context yet, should be called with lock held
+ */
+static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
+{
+	unsigned int local;
+
+	spin_lock(&sst_drv_ctx->pvt_id_lock);
+	sst_drv_ctx->pvt_id++;
+	if (sst_drv_ctx->pvt_id > MAX_BLOCKS)
+		sst_drv_ctx->pvt_id = 1;
+	local = sst_drv_ctx->pvt_id;
+	spin_unlock(&sst_drv_ctx->pvt_id_lock);
+	return local;
+}
+
+
+/*
+ * sst_init_stream - this function initialzes stream context
+ *
+ * @stream : stream struture
+ * @codec : codec for stream
+ * @sst_id : stream id
+ * @ops : stream operation
+ * @slot : stream pcm slot
+ * @device : device type
+ *
+ * this inline function initialzes stream context for allocated stream
+ */
+static inline void sst_init_stream(struct stream_info *stream,
+		int codec, int sst_id, int ops, u8 slot)
+{
+	stream->status = STREAM_INIT;
+	stream->prev = STREAM_UN_INIT;
+	stream->ops = ops;
+}
+
+static inline void sst_set_gpio_conf(const struct sst_gpio_config *gpio_conf)
+{
+	lnw_gpio_set_alt(gpio_conf->i2s_rx_alt, gpio_conf->alt_function);
+	lnw_gpio_set_alt(gpio_conf->i2s_tx_alt, gpio_conf->alt_function);
+	lnw_gpio_set_alt(gpio_conf->i2s_frame, gpio_conf->alt_function);
+	lnw_gpio_set_alt(gpio_conf->i2s_clock, gpio_conf->alt_function);
+}
+
+
+/*
+ * sst_validate_strid - this function validates the stream id
+ *
+ * @str_id : stream id to be validated
+ *
+ * returns 0 if valid stream
+ */
+static inline int sst_validate_strid(int str_id)
+{
+	if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
+		pr_err("SST ERR: invalid stream id : %d, max %d\n",
+					str_id, sst_drv_ctx->info.max_streams);
+		return -EINVAL;
+	} else
+		return 0;
+}
+
+static inline int sst_shim_write(void __iomem *addr, int offset, int value)
+{
+	writel(value, addr + offset);
+	return 0;
+}
+
+static inline u32 sst_shim_read(void __iomem *addr, int offset)
+{
+
+	return readl(addr + offset);
+}
+
+static inline u32 sst_reg_read(void __iomem *addr, int offset)
+{
+
+	return readl(addr + offset);
+}
+
+static inline u64 sst_reg_read64(void __iomem *addr, int offset)
+{
+	u64 val = 0;
+
+	memcpy_fromio(&val, addr + offset, sizeof(val));
+
+	return val;
+}
+
+static inline int sst_shim_write64(void __iomem *addr, int offset, u64 value)
+{
+	memcpy_toio(addr + offset, &value, sizeof(value));
+	return 0;
+}
+
+static inline u64 sst_shim_read64(void __iomem *addr, int offset)
+{
+	u64 val = 0;
+
+	memcpy_fromio(&val, addr + offset, sizeof(val));
+	return val;
+}
+
+static inline void
+sst_set_fw_state_locked(struct intel_sst_drv *sst_drv_ctx, int sst_state)
+{
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	sst_drv_ctx->sst_state = sst_state;
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+}
+
+static inline struct stream_info *get_stream_info(int str_id)
+{
+	if (sst_validate_strid(str_id))
+		return NULL;
+	return &sst_drv_ctx->streams[str_id];
+}
+
+static inline int get_stream_id_mrfld(u32 pipe_id)
+{
+	int i;
+
+	for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
+		if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
+			return i;
+
+	pr_debug("%s: no such pipe_id(%u)", __func__, pipe_id);
+	return -1;
+}
+
+int register_sst(struct device *);
+int unregister_sst(struct device *);
+
+#ifdef CONFIG_DEBUG_FS
+void sst_debugfs_init(struct intel_sst_drv *sst);
+void sst_debugfs_exit(struct intel_sst_drv *sst);
+#else
+static inline void sst_debugfs_init(struct intel_sst_drv *sst)
+{
+}
+
+static inline void sst_debugfs_exit(struct intel_sst_drv *sst)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * FW should use virtual address 0xC000_0000 to map to the DDR
+ * reserved 2MB region at 512MB boundary. Currently the address of
+ * DDR region allocated by IA FW is not 512MB aligned. So FW is
+ * statically linking the DDR region at 0xDF600000. So we need to
+ * use the translated address to identify the DDR regions in the FW
+ * ELF binary.
+ */
+static inline u32 relocate_imr_addr_mrfld(u32 base_addr)
+{
+	/* Get the difference from 512MB aligned base addr */
+	/* relocate the base */
+	base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
+	return base_addr;
+}
+
+static inline void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
+						struct ipc_post *msg)
+{
+	unsigned long irq_flags;
+	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+	list_add_tail(&msg->node, &sst->ipc_dispatch_list);
+	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+	sst->ops->post_message(&sst->ipc_post_msg_wq);
+}
+#endif
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
new file mode 100644
index 0000000..cff6101
--- /dev/null
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -0,0 +1,104 @@
+/* sst_acpi.c - SST (LPE) driver init file for ACPI enumeration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ *  Authors:	Ramesh Babu K V <Ramesh.Babu@intel.com>
+ *  Authors:	Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <asm/platform_sst.h>
+#include <acpi/acpi_bus.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+extern struct miscdevice lpe_ctrl;
+
+int sst_workqueue_init(struct intel_sst_drv *ctx)
+{
+	pr_debug("%s", __func__);
+
+	INIT_LIST_HEAD(&ctx->memcpy_list);
+	INIT_LIST_HEAD(&ctx->libmemcpy_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->rx_list);
+	INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
+	INIT_LIST_HEAD(&ctx->block_list);
+	INIT_WORK(&ctx->ipc_post_msg.wq, ctx->ops->post_message);
+	init_waitqueue_head(&ctx->wait_queue);
+
+	ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
+	if (!ctx->mad_wq)
+		goto err_wq;
+	ctx->post_msg_wq =
+		create_singlethread_workqueue("sst_post_msg_wq");
+	if (!ctx->post_msg_wq)
+		goto err_wq;
+	return 0;
+err_wq:
+	return -EBUSY;
+}
+
+void sst_init_locks(struct intel_sst_drv *ctx)
+{
+	mutex_init(&ctx->stream_lock);
+	mutex_init(&ctx->sst_lock);
+	mutex_init(&ctx->mixer_ctrl_lock);
+	mutex_init(&ctx->csr_lock);
+	spin_lock_init(&sst_drv_ctx->rx_msg_lock);
+	spin_lock_init(&ctx->ipc_spin_lock);
+	spin_lock_init(&ctx->block_lock);
+	spin_lock_init(&ctx->pvt_id_lock);
+}
+
+int sst_destroy_workqueue(struct intel_sst_drv *ctx)
+{
+	pr_debug("%s", __func__);
+	if (ctx->mad_wq)
+		destroy_workqueue(ctx->mad_wq);
+	if (ctx->post_msg_wq)
+		destroy_workqueue(ctx->post_msg_wq);
+	return 0;
+}
+
+int sst_acpi_probe(struct platform_device *pdev)
+{
+	return -EINVAL;
+}
+
+int sst_acpi_remove(struct platform_device *pdev)
+{
+	return -EINVAL;
+}
+
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine ACPI Driver");
+MODULE_AUTHOR("Ramesh Babu K V");
+MODULE_AUTHOR("Omair Mohammed Abdullah");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("sst");
diff --git a/sound/soc/intel/sst/sst_app_compat_interface.c b/sound/soc/intel/sst/sst_app_compat_interface.c
new file mode 100644
index 0000000..4babd76
--- /dev/null
+++ b/sound/soc/intel/sst/sst_app_compat_interface.c
@@ -0,0 +1,85 @@
+
+/*
+ *  sst_app_compat_interface.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2013-14 Intel Corp
+ *  Authors: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *  This driver exposes the audio engine functionalities to the ALSA
+ *	and middleware.
+ */
+
+/* This file is included from sst.c */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <sound/intel_sst_ioctl.h>
+#include "sst.h"
+
+struct snd_ppp_params32 {
+	__u8			algo_id;/* Post/Pre processing algorithm ID  */
+	__u8			str_id;	/*Only 5 bits used 0 - 31 are valid*/
+	__u8			enable;	/* 0= disable, 1= enable*/
+	__u8			operation;
+	__u32			size;	/*Size of parameters for all blocks*/
+	__u32			params;
+} __packed;
+
+enum {
+SNDRV_SST_SET_ALGO32 = _IOW('L', 0x30,  struct snd_ppp_params32),
+SNDRV_SST_GET_ALGO32 = _IOWR('L', 0x31,  struct snd_ppp_params32),
+};
+
+static long sst_algo_compat(unsigned int cmd,
+				struct snd_ppp_params32 __user *arg32)
+{
+	int retval = 0;
+	struct snd_ppp_params32 algo_params32;
+	struct snd_ppp_params algo_params;
+
+	if (copy_from_user(&algo_params32, arg32, sizeof(algo_params32))) {
+		pr_debug("%s: copy from user failed: %d\n", __func__, retval);
+		return -EINVAL;
+	}
+
+	memcpy(&algo_params, &algo_params32, sizeof(algo_params32)-sizeof(__u32));
+	algo_params.params = compat_ptr(algo_params32.params);
+	retval = intel_sst_ioctl_dsp(cmd, &algo_params, (unsigned long)arg32);
+	return retval;
+}
+
+static long intel_sst_ioctl_compat(struct file *file_ptr,
+				unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = compat_ptr(arg);
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_DRIVER_INFO):
+	case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+		return intel_sst_ioctl(file_ptr, cmd, (unsigned long)argp);
+	case _IOC_NR(SNDRV_SST_SET_ALGO32):
+		return sst_algo_compat(SNDRV_SST_SET_ALGO, argp);
+	case _IOC_NR(SNDRV_SST_GET_ALGO32):
+		return sst_algo_compat(SNDRV_SST_GET_ALGO, argp);
+
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
diff --git a/sound/soc/intel/sst/sst_app_interface.c b/sound/soc/intel/sst/sst_app_interface.c
new file mode 100644
index 0000000..cfbbf4f
--- /dev/null
+++ b/sound/soc/intel/sst/sst_app_interface.c
@@ -0,0 +1,342 @@
+/*
+ *  sst_app_interface.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *  Harsha Priya <priya.harsha@intel.com>
+ *  Dharageswari R <dharageswari.r@intel.com>
+ *  Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *  This driver exposes the audio engine functionalities to the ALSA
+ *	and middleware.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+#include <linux/aio.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <linux/ioctl.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define AM_MODULE 1
+
+/**
+ * intel_sst_open_cntrl - opens a handle to driver
+ *
+ * @i_node:	inode structure
+ * @file_ptr:pointer to file
+ *
+ * This function is called by OS when a user space component
+ * tries to get a driver handle to /dev/intel_sst_control.
+ * Only one handle at a time will be allowed
+ * This is for control operations only
+ */
+int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
+{
+	unsigned int retval;
+
+	/* audio manager open */
+	mutex_lock(&sst_drv_ctx->stream_lock);
+	retval = intel_sst_check_device();
+	if (retval) {
+		mutex_unlock(&sst_drv_ctx->stream_lock);
+		return retval;
+	}
+	pr_debug("AM handle opened\n");
+
+	mutex_unlock(&sst_drv_ctx->stream_lock);
+	return retval;
+}
+
+
+int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
+{
+	/* audio manager close */
+	mutex_lock(&sst_drv_ctx->stream_lock);
+	sst_pm_runtime_put(sst_drv_ctx);
+	mutex_unlock(&sst_drv_ctx->stream_lock);
+	pr_debug("AM handle closed\n");
+	return 0;
+}
+
+/**
+ * sst_get_max_streams - Function to populate the drv info structure
+ *				with the max streams
+ * @info: the out params that holds the drv info
+ *
+ * This function is called when max streams count is required
+**/
+void sst_get_max_streams(struct snd_sst_driver_info *info)
+{
+	pr_debug("info.max_streams %d num_probes %d\n", sst_drv_ctx->info.max_streams,
+					sst_drv_ctx->info.num_probes);
+	info->max_streams = sst_drv_ctx->info.max_streams - sst_drv_ctx->info.num_probes;
+}
+
+/**
+ * sst_create_algo_ipc - create ipc msg for algorithm parameters
+ *
+ * @algo_params: Algorithm parameters
+ * @msg: post msg pointer
+ * @pvt_id: Checked by wake_up_block
+ *
+ * This function is called to create ipc msg
+ * For copying the mailbox data the function returns offset in bytes to mailbox
+ * memory where the mailbox data should be copied after msg header
+ */
+static int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
+					struct ipc_post **msg, int pvt_id)
+{
+	u32 header_size = 0;
+	u32 ipc_msg_size = sizeof(u32) + sizeof(*algo_params)
+			 - sizeof(algo_params->params) + algo_params->size;
+	u32 offset = 0;
+
+	if (ipc_msg_size > SST_MAILBOX_SIZE)
+		return -ENOMEM;
+	if (sst_create_ipc_msg(msg, true))
+		return -ENOMEM;
+	sst_fill_header(&(*msg)->header,
+			IPC_IA_ALG_PARAMS, 1, pvt_id);
+	(*msg)->header.part.data = ipc_msg_size;
+	memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
+	offset = sizeof(u32);
+	header_size = sizeof(*algo_params) - sizeof(algo_params->params);
+	memcpy((*msg)->mailbox_data + offset, algo_params, header_size);
+	offset += header_size;
+	return offset;
+}
+
+static long sst_send_algo(struct snd_ppp_params *algo_params,
+		struct sst_block *block, enum sst_algo_ops algo)
+{
+	struct ipc_post *msg;
+	int retval;
+	int offset;
+
+	pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+		algo_params->algo_id, algo_params->str_id,
+		algo_params->enable, algo_params->size);
+
+	algo_params->operation = algo;
+
+	offset = sst_create_algo_ipc(algo_params, &msg, block->drv_id);
+	if (offset < 0)
+		return offset;
+
+	if (copy_from_user(msg->mailbox_data + offset,
+			algo_params->params, algo_params->size)) {
+		kfree(msg);
+		return -EFAULT;
+	}
+
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	if (retval) {
+		pr_debug("%s: failed for algo ops %s with retval %d\n",
+				__func__, algo ? "SST_GET_ALGO" : "SST_SET_ALGO", retval);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * intel_sst_ioctl_dsp - receives the device ioctl's
+ *
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called when a user space component
+ * sends a DSP Ioctl to SST driver
+ */
+long intel_sst_ioctl_dsp(unsigned int cmd,
+		struct snd_ppp_params *algo_params, unsigned long arg)
+{
+	int retval = 0;
+	struct snd_ppp_params *algo_params_copied;
+	struct sst_block *block;
+	int pvt_id;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	block = sst_create_block(sst_drv_ctx, IPC_IA_ALG_PARAMS, pvt_id);
+	if (block == NULL)
+		return -ENOMEM;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_SET_ALGO):
+		retval = sst_send_algo(algo_params, block, SST_SET_ALGO);
+		break;
+
+	case _IOC_NR(SNDRV_SST_GET_ALGO):
+		retval = sst_send_algo(algo_params, block, SST_GET_ALGO);
+		if (retval)
+			break;
+		algo_params_copied = (struct snd_ppp_params *)block->data;
+
+		if (algo_params_copied->size > algo_params->size) {
+			pr_debug("mem insufficient to copy\n");
+			retval = -EMSGSIZE;
+			break;
+		} else {
+			char __user *tmp;
+			struct snd_ppp_params *get_params;
+			char *pp;
+
+			tmp = (char __user *)arg + offsetof(
+					struct snd_ppp_params, size);
+			if (copy_to_user(tmp, &algo_params_copied->size,
+						 sizeof(u32))) {
+				retval = -EFAULT;
+				break;
+			}
+			tmp = (char __user *)arg + offsetof(
+					struct snd_ppp_params, enable);
+			if (copy_to_user(tmp, &algo_params_copied->enable,
+						 sizeof(u8))) {
+				retval = -EFAULT;
+				break;
+			}
+			if (algo_params_copied->size == 0)
+				break;
+
+			get_params = kmalloc(sizeof(*get_params), GFP_KERNEL);
+			if (!get_params) {
+				pr_err("sst: mem alloc failed\n");
+				break;
+			}
+			memcpy(get_params, algo_params_copied,
+							sizeof(*get_params));
+
+			get_params->params = kmalloc(get_params->size, GFP_KERNEL);
+			if (!get_params->params) {
+				pr_err("sst: mem alloc failed\n");
+				goto free_mem;
+			}
+			pp = (char *)algo_params_copied;
+			pp = pp + sizeof(*get_params) -
+						sizeof(get_params->params);
+			memcpy(get_params->params, pp, get_params->size);
+			if (copy_to_user(algo_params->params,
+					get_params->params,
+					get_params->size)) {
+				retval = -EFAULT;
+			}
+			kfree(get_params->params);
+
+free_mem:
+			kfree(get_params);
+
+		}
+		break;
+	}
+	sst_free_block(sst_drv_ctx, block);
+	pr_debug("ioctl dsp return = %d, for cmd = %x\n", retval, cmd);
+	return retval;
+}
+
+static long sst_ioctl_tuning_params(unsigned int cmd, unsigned long arg)
+{
+	struct snd_sst_tuning_params params;
+	struct ipc_post *msg;
+	unsigned long address;
+
+	if (copy_from_user(&params, (void __user *)arg, sizeof(params)))
+		return -EFAULT;
+	pr_debug("sst: Parameter %d, Stream %d, Size %d\n", params.type,
+			params.str_id, params.size);
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+	address = (unsigned long)params.addr;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+		sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1,
+				params.str_id);
+		break;
+	}
+	msg->header.part.data = sizeof(u32) + sizeof(params) + params.size;
+	memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), &params, sizeof(params));
+	/* driver doesn't need to send address, so overwrite addr with data */
+	if (copy_from_user(msg->mailbox_data + sizeof(u32)
+				+ sizeof(params) - sizeof(params.addr),
+			(void __user *)address, params.size)) {
+		kfree(msg->mailbox_data);
+		kfree(msg);
+		return -EFAULT;
+	}
+
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return 0;
+}
+/**
+ * intel_sst_ioctl - receives the device ioctl's
+ * @file_ptr:pointer to file
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called by OS when a user space component
+ * sends an Ioctl to SST driver
+ */
+long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	struct snd_ppp_params algo_params;
+
+	if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
+		return -EBUSY;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
+		struct snd_sst_driver_info info;
+
+		pr_debug("SNDRV_SST_DRIVER_INFO received\n");
+		sst_get_max_streams(&info);
+
+		if (copy_to_user((void __user *)arg, &info,
+				sizeof(info)))
+			retval = -EFAULT;
+		break;
+	}
+	case _IOC_NR(SNDRV_SST_GET_ALGO):
+	case _IOC_NR(SNDRV_SST_SET_ALGO):
+		if (copy_from_user(&algo_params, (void __user *)arg,
+						sizeof(algo_params))) {
+			return -EFAULT;
+		}
+		retval = intel_sst_ioctl_dsp(cmd, &algo_params, arg);
+		break;
+
+	case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+		retval = sst_ioctl_tuning_params(cmd, arg);
+		break;
+
+	default:
+		retval = -EINVAL;
+	}
+	pr_debug("intel_sst_ioctl:complete ret code = %d for command = %x\n", retval, cmd);
+	return retval;
+}
+
diff --git a/sound/soc/intel/sst/sst_debug.c b/sound/soc/intel/sst/sst_debug.c
new file mode 100644
index 0000000..727553e
--- /dev/null
+++ b/sound/soc/intel/sst/sst_debug.c
@@ -0,0 +1,1325 @@
+/*
+ *  sst_debug.c - Intel SST Driver debugfs support
+ *
+ *  Copyright (C) 2012	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Omair Mohammed Abdullah <omair.m.abdullah@linux.intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains all debugfs functions
+ *  Support includes:
+ *   - Disabling/Enabling runtime PM for SST
+ *   - Reading/Writing SST SHIM registers
+ *   - Reading/Enabling Input OSC Clock
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": debugfs: " fmt
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipcutil.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define DMA_NUM_CH	8
+#define DEBUGFS_SSP_BUF_SIZE	300  /* 22 chars * 12 reg*/
+#define DEBUGFS_DMA_BUF_SIZE	2500 /* 32 chars * 78 regs*/
+
+/* Register Offsets of SSP3 and LPE DMA */
+u32 ssp_reg_off[] = {0x0, 0x4, 0x8, 0xC, 0x10, 0x28, 0x2C, 0x30, 0x34, 0x38,
+			0x3C, 0x40};
+/* Excludes the channel registers */
+u32 dma_reg_off[] = {0x2C0, 0x2C8, 0x2D0, 0x2D8, 0x2E0, 0x2E8,
+		0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320, 0x328, 0x330,
+		0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370, 0x378,
+		0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3C8, 0x3D0,
+		0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8};
+
+static inline int is_fw_running(struct intel_sst_drv *drv);
+
+static ssize_t sst_debug_shim_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	unsigned long long val = 0;
+	unsigned int addr;
+	char buf[512];
+	char name[8];
+	int pos = 0, ret = 0;
+
+	buf[0] = 0;
+
+	ret = is_fw_running(drv);
+	if (ret) {
+		pr_err("FW not running, cannot read SHIM registers\n");
+		return ret;
+	}
+
+	for (addr = SST_SHIM_BEGIN; addr <= SST_SHIM_END; addr += 8) {
+		switch (drv->pci_id) {
+		case SST_MRFLD_PCI_ID:
+		case PCI_DEVICE_ID_INTEL_SST_MOOR:
+			val = sst_shim_read64(drv->shim, addr);
+			break;
+		}
+
+		name[0] = 0;
+		switch (addr) {
+		case SST_ISRX:
+			strcpy(name, "ISRX"); break;
+		case SST_ISRD:
+			strcpy(name, "ISRD"); break;
+		case SST_IPCX:
+			strcpy(name, "IPCX"); break;
+		case SST_IPCD:
+			strcpy(name, "IPCD"); break;
+		case SST_IMRX:
+			strcpy(name, "IMRX"); break;
+		case SST_IMRD:
+			strcpy(name, "IMRD"); break;
+		}
+		pos += sprintf(buf + pos, "0x%.2x: %.8llx  %s\n", addr, val, name);
+	}
+
+	sst_pm_runtime_put(drv);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static ssize_t sst_debug_shim_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	char buf[32];
+	char *start = buf, *end;
+	unsigned long long value;
+	unsigned long reg_addr;
+	int ret_val = 0;
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	ret_val = is_fw_running(drv);
+	if (ret_val) {
+		pr_err("FW not running, cannot read SHIM registers\n");
+		return ret_val;
+	}
+
+	while (*start == ' ')
+		start++;
+	end = start;
+	while (isalnum(*end))
+		end++;
+	*end = 0;
+
+	ret_val = kstrtoul(start, 16, &reg_addr);
+	if (ret_val) {
+		pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+		goto put_pm_runtime;
+	}
+	if (!(SST_SHIM_BEGIN < reg_addr && reg_addr < SST_SHIM_END)) {
+		pr_err("invalid shim address: 0x%lx\n", reg_addr);
+		ret_val = -EINVAL;
+		goto put_pm_runtime;
+	}
+
+	start = end + 1;
+	while (*start == ' ')
+		start++;
+
+	ret_val = kstrtoull(start, 16, &value);
+	if (ret_val) {
+		pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+		goto put_pm_runtime;
+	}
+
+	pr_debug("writing shim: 0x%.2lx=0x%.8llx", reg_addr, value);
+
+	if ((drv->pci_id == SST_MRFLD_PCI_ID) ||
+			(drv->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR))
+		sst_shim_write64(drv->shim, reg_addr, (u64) value);
+
+	/* Userspace has been fiddling around behind the kernel's back */
+	add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
+	ret_val = buf_size;
+
+put_pm_runtime:
+	sst_pm_runtime_put(drv);
+	return ret_val;
+}
+
+static const struct file_operations sst_debug_shim_ops = {
+	.open = simple_open,
+	.read = sst_debug_shim_read,
+	.write = sst_debug_shim_write,
+	.llseek = default_llseek,
+};
+
+#define RESVD_DUMP_SZ		40
+#define IA_LPE_MAILBOX_DUMP_SZ	100
+#define LPE_IA_MAILBOX_DUMP_SZ	100
+#define SCU_LPE_MAILBOX_DUMP_SZ	256
+#define LPE_SCU_MAILBOX_DUMP_SZ	256
+
+static inline int is_fw_running(struct intel_sst_drv *drv)
+{
+	pm_runtime_get_sync(drv->dev);
+	atomic_inc(&drv->pm_usage_count);
+	if (drv->sst_state != SST_FW_RUNNING) {
+		pr_err("FW not running\n");
+		sst_pm_runtime_put(drv);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static inline int read_buffer_fromio(char *dest, unsigned int sz,
+				     const u32 __iomem *from,
+				     unsigned int num_dwords)
+{
+	int i;
+	const unsigned int rowsz = 16, groupsz = 4;
+	const unsigned int size = num_dwords * sizeof(u32);
+	unsigned int linelen, printed = 0, remaining = size;
+
+	u8 *tmp = kmalloc(size, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+	memcpy_fromio(tmp, from, size);
+	for (i = 0; i < size; i += rowsz) {
+		linelen = min(remaining, rowsz);
+		remaining -= rowsz;
+		hex_dump_to_buffer(tmp + i, linelen, rowsz, groupsz,
+				   dest + printed, sz - printed, false);
+		printed += linelen * 2 + linelen / groupsz - 1;
+		*(dest + printed++) = '\n';
+		*(dest + printed) = 0;
+	}
+	kfree(tmp);
+	return 0;
+}
+
+static inline int copy_sram_to_user_buffer(char __user *user_buf, size_t count, loff_t *ppos,
+					   unsigned int num_dwords, const u32 __iomem *from,
+					   u32 offset)
+{
+	ssize_t bytes_read;
+	char *buf;
+	int pos;
+	unsigned int bufsz = 48 + sizeof(u32) * num_dwords * (2 + 1) + 1;
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+	*buf = 0;
+	pos = scnprintf(buf, 48, "Reading %u dwords from offset %#x\n",
+			num_dwords, offset);
+	read_buffer_fromio(buf + pos, bufsz - pos, from, num_dwords);
+	bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+					     buf, strlen(buf));
+	kfree(buf);
+	return bytes_read;
+}
+
+static ssize_t sst_debug_sram_lpe_debug_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, RESVD_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_RESERVED_OFFSET),
+				       SST_RESERVED_OFFSET);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_debug_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_debug_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_checkpoint_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+	u32 offset;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	offset = sst_drv_ctx->pdata->debugfs_data->checkpoint_offset;
+
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos,
+				sst_drv_ctx->pdata->debugfs_data->checkpoint_size,
+				(u32 *)(drv->mailbox + offset), offset);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_checkpoint_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_checkpoint_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_ia_lpe_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, IA_LPE_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_MAILBOX_SEND),
+				       SST_MAILBOX_SEND);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_ia_lpe_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_ia_lpe_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_ia_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, LPE_IA_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + drv->mailbox_recv_offset),
+				       drv->mailbox_recv_offset);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_ia_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_ia_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_scu_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, LPE_SCU_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_LPE_SCU_MAILBOX),
+				       SST_LPE_SCU_MAILBOX);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_scu_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_scu_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_scu_lpe_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, SCU_LPE_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX),
+				       SST_SCU_LPE_MAILBOX);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_scu_lpe_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_scu_lpe_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_lpe_log_enable_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	struct ipc_post *msg = NULL;
+	char buf[32];
+	int str_id = 0;	/* DUMMY, required by post message */
+	struct snd_sst_lpe_log_params params;
+	int ret_val = 0;
+	char *start = buf, *end;
+	int i = 0;
+	u8 *addr;
+	unsigned long tmp;
+
+	size_t buf_size = min(count, sizeof(buf)-1);
+	memset(&params, 0, sizeof(params));
+
+	ret_val = is_fw_running(drv);
+	if (ret_val)
+		return ret_val;
+
+	if (copy_from_user(buf, user_buf, buf_size)) {
+		ret_val = -EFAULT;
+		goto put_pm_runtime;
+	}
+
+	buf[buf_size] = 0;
+
+	addr = &params.dbg_type;
+	for (i = 0; i < (sizeof(params) - sizeof(u8)); i++) {
+		while (*start == ' ')
+			start++;
+		end = start;
+		while (isalnum(*end))
+			end++;
+		*end = 0;
+		ret_val = kstrtoul(start, 16, &tmp);
+		if (ret_val) {
+			pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+			goto put_pm_runtime;
+		}
+		*addr++ = (u8)tmp;
+		start = end + 1;
+	}
+
+	pr_debug("dbg_type = %d module_id = %d log_level = %d\n",
+			params.dbg_type, params.module_id, params.log_level);
+
+	if (params.dbg_type < NO_DEBUG || params.dbg_type > PTI_DEBUG) {
+		ret_val = -EINVAL;
+		goto put_pm_runtime;
+	}
+
+	ret_val = sst_create_ipc_msg(&msg, true);
+	if (ret_val != 0)
+		goto put_pm_runtime;
+
+	if ((sst_drv_ctx->pci_id != SST_MRFLD_PCI_ID) &&
+		(sst_drv_ctx->pci_id != PCI_DEVICE_ID_INTEL_SST_MOOR)) {
+		sst_fill_header(&msg->header, IPC_IA_DBG_LOG_ENABLE, 1,
+							str_id);
+		msg->header.part.data = sizeof(u32) + sizeof(params);
+		memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+		memcpy(msg->mailbox_data + sizeof(u32), &params,
+							sizeof(params));
+	}
+	drv->ops->sync_post_message(msg);
+	ret_val = buf_size;
+put_pm_runtime:
+	sst_pm_runtime_put(drv);
+	return ret_val;
+}
+
+/*
+ * Circular buffer hdr -> 0x1000
+ * log data starts at 0x1010
+ */
+static ssize_t sst_debug_lpe_log_enable_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	struct lpe_log_buf_hdr buf_hdr;
+	size_t size1, size2, offset, bytes_read;
+	char *buf = NULL;
+	int ret;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	/* Get the sram lpe log buffer header */
+	memcpy_fromio(&buf_hdr, (u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX),
+							sizeof(buf_hdr));
+	if (buf_hdr.rd_addr == buf_hdr.wr_addr) {
+		pr_err("SRAM emptry\n");
+		ret = -ENODATA;
+		goto put_pm_runtime;
+	} else if (buf_hdr.rd_addr < buf_hdr.wr_addr) {
+		size1 = buf_hdr.wr_addr - buf_hdr.rd_addr;
+		offset = (buf_hdr.rd_addr - buf_hdr.base_addr)
+						+ SST_SCU_LPE_LOG_BUF;
+		pr_debug("Size = %zu, offset = %zx\n", size1, offset);
+		buf = vmalloc(size1);
+		if (buf == NULL) {
+			pr_err("Not enough memory to allocate\n");
+			ret = -ENOMEM;
+			goto put_pm_runtime;
+		}
+		memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size1);
+		bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+							buf, size1);
+
+		buf_hdr.rd_addr = buf_hdr.rd_addr + bytes_read;
+
+	} else {
+		/* Read including the end address as well */
+		size1 = buf_hdr.end_addr - buf_hdr.rd_addr + 1;
+		offset = (buf_hdr.rd_addr - buf_hdr.base_addr)
+						+ SST_SCU_LPE_LOG_BUF;
+		pr_debug("Size = %zu, offset = %zx\n", size1, offset);
+		buf = vmalloc(size1);
+		if (buf == NULL) {
+			pr_err("Not enough memory to allocate\n");
+			ret = -ENOMEM;
+			goto put_pm_runtime;
+		}
+		memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size1);
+		bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+							buf, size1);
+		if (bytes_read != size1) {
+			buf_hdr.rd_addr = buf_hdr.rd_addr + bytes_read;
+			goto update_rd_ptr;
+		}
+
+		/* Wrap around lpe log buffer here */
+		vfree(buf);
+		buf = NULL;
+		size2 = (buf_hdr.wr_addr - buf_hdr.base_addr);
+		offset = SST_SCU_LPE_LOG_BUF;
+		pr_debug("Size = %zu, offset = %zx\n", size2, offset);
+		buf = vmalloc(size2);
+		if (buf == NULL) {
+			pr_err("Not enough memory to allocate\n");
+			ret = -ENOMEM;
+			goto put_pm_runtime;
+		}
+		memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size2);
+		bytes_read += simple_read_from_buffer(user_buf,
+				(count - bytes_read), ppos, buf, size2);
+		buf_hdr.rd_addr = buf_hdr.base_addr + bytes_read - size1;
+
+	}
+update_rd_ptr:
+	if (bytes_read != 0) {
+		memcpy_toio((u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX +
+				2 * sizeof(u32)), &(buf_hdr.rd_addr), sizeof(u32));
+		pr_debug("read pointer restored\n");
+	}
+	vfree(buf);
+	buf = NULL;
+	ret = bytes_read;
+put_pm_runtime:
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_lpe_log_enable_ops = {
+	.open = simple_open,
+	.write = sst_debug_lpe_log_enable_write,
+	.read = sst_debug_lpe_log_enable_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_rtpm_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	char *status;
+
+	int usage = atomic_read(&drv->pm_usage_count);
+
+	pr_debug("RTPM usage: %d\n", usage);
+	status = drv->debugfs.runtime_pm_status ? "enabled\n" : "disabled\n";
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+}
+
+static ssize_t sst_debug_rtpm_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	int usage = atomic_read(&drv->pm_usage_count);
+
+	pr_debug("RTPM Usage: %d\n", usage);
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "enable\n", sz)) {
+		/* already enabled? */
+		if (drv->debugfs.runtime_pm_status)
+			return -EINVAL;
+		drv->debugfs.runtime_pm_status = 1;
+		pm_runtime_allow(drv->dev);
+		sz = 6; /* strlen("enable") */
+	} else if (!strncmp(buf, "disable\n", sz)) {
+		if (!drv->debugfs.runtime_pm_status)
+			return -EINVAL;
+		drv->debugfs.runtime_pm_status = 0;
+		pm_runtime_forbid(drv->dev);
+		sz = 7; /* strlen("disable") */
+	} else
+		return -EINVAL;
+	return sz;
+}
+
+static const struct file_operations sst_debug_rtpm_ops = {
+	.open = simple_open,
+	.read = sst_debug_rtpm_read,
+	.write = sst_debug_rtpm_write,
+	.llseek = default_llseek,
+};
+
+
+static ssize_t sst_debug_readme_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	const char *buf =
+		"\nAll files can be read using 'cat'\n"
+		"1. 'echo disable > runtime_pm' disables runtime PM and will prevent SST from suspending.\n"
+		"To enable runtime PM, echo 'enable' to runtime_pm. Dmesg will print the runtime pm usage\n"
+		"if logs are enabled.\n"
+		"2. Write to shim register using 'echo <addr> <value> > shim_dump'.\n"
+		"Valid address range is between 0x00 to 0x80 in increments of 8.\n"
+		"3. echo 1 > fw_clear_context , This sets the flag to skip the context restore\n"
+		"4. echo 1 > fw_clear_cache , This sets the flag to clear the cached copy of firmware\n"
+		"5. echo 1 > fw_reset_state ,This sets the fw state to RESET\n"
+		"6. echo memcpy > fw_dwnld_mode, This will set the firmware download mode to memcpy\n"
+		"   echo lli > fw_dwnld_mode, This will set the firmware download mode to\n"
+					"dma lli mode\n"
+		"   echo dma > fw_dwnld_mode, This will set the firmware download mode to\n"
+					"dma single block mode\n"
+		"7. iram_dump, dram_dump, interfaces provide mmap support to\n"
+		"get the iram and dram dump, these buffers will have data only\n"
+		"after the recovery is triggered\n";
+
+	const char *mrfld_buf =
+		"8. lpe_log_enable usage:\n"
+		"	echo <dbg_type> <module_id> <log_level> > lpe_log_enable.\n"
+		"9. cat fw_ssp_reg,This will dump the ssp register contents\n"
+		"10. cat fw_dma_reg,This will dump the dma register contents\n"
+		"11. ddr_imr_dump interface provides mmap support to get the imr dump,\n"
+		"this buffer will have data only after the recovery is triggered\n"
+		"12. ipc usage:\n"
+		"\t ipc file works only in binary mode. The ipc format is <IPC hdr><dsp hdr><payload>.\n"
+		"\t drv_id in the ipc header will be overwritten with unique driver id in the driver\n";
+
+	char *readme = NULL;
+	const char *buf2 = NULL;
+	int size, ret = 0;
+
+	switch (sst_drv_ctx->pci_id) {
+	case SST_MRFLD_PCI_ID:
+	case PCI_DEVICE_ID_INTEL_SST_MOOR:
+		size = strlen(buf) + strlen(mrfld_buf) + 2;
+		buf2 = mrfld_buf;
+		break;
+	default:
+		size = strlen(buf) + 1;
+	};
+
+	readme = kmalloc(size, GFP_KERNEL);
+	if (readme == NULL) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (buf2)
+		sprintf(readme, "%s%s\n", buf, buf2);
+	else
+		sprintf(readme, "%s\n", buf);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos,
+			readme, strlen(readme));
+	kfree(readme);
+	return ret;
+}
+
+static const struct file_operations sst_debug_readme_ops = {
+	.open = simple_open,
+	.read = sst_debug_readme_read,
+};
+
+static ssize_t sst_debug_osc_clk0_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	char status[16];
+	int mode = -1;
+#ifdef CONFIG_INTEL_SCU_IPC_UTIL
+	mode = intel_scu_ipc_set_osc_clk0(0, CLK0_QUERY);
+#endif
+
+	snprintf(status, 16, "0x%x\n", mode);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+}
+
+static ssize_t sst_debug_osc_clk0_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+#ifdef CONFIG_INTEL_SCU_IPC_UTIL
+	if (!strncmp(buf, "enable\n", sz)) {
+		intel_scu_ipc_set_osc_clk0(true, CLK0_DEBUG);
+		sz = 6; /* strlen("enable") */
+	} else if (!strncmp(buf, "disable\n", sz)) {
+		intel_scu_ipc_set_osc_clk0(false, CLK0_DEBUG);
+		sz = 7; /* strlen("disable") */
+	} else
+		return -EINVAL;
+#endif
+	return sz;
+}
+
+static const struct file_operations sst_debug_osc_clk0_ops = {
+	.open = simple_open,
+	.read = sst_debug_osc_clk0_read,
+	.write = sst_debug_osc_clk0_write,
+};
+
+static ssize_t sst_debug_fw_clear_cntx_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *status;
+
+	status = atomic_read(&sst_drv_ctx->fw_clear_context) ? \
+			"clear fw cntx\n" : "do not clear fw cntx\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+
+}
+
+static ssize_t sst_debug_fw_clear_cntx_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "1\n", sz))
+		atomic_set(&sst_drv_ctx->fw_clear_context, 1);
+	else
+		atomic_set(&sst_drv_ctx->fw_clear_context, 0);
+
+	return sz;
+
+}
+
+static const struct file_operations sst_debug_fw_clear_cntx = {
+	.open = simple_open,
+	.read = sst_debug_fw_clear_cntx_read,
+	.write = sst_debug_fw_clear_cntx_write,
+};
+
+static ssize_t sst_debug_fw_clear_cache_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *status;
+
+	status = atomic_read(&sst_drv_ctx->fw_clear_cache) ? \
+			"cache clear flag set\n" : "cache clear flag not set\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+
+}
+
+static ssize_t sst_debug_fw_clear_cache_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "1\n", sz))
+		atomic_set(&sst_drv_ctx->fw_clear_cache, 1);
+	else
+		return -EINVAL;
+
+	return sz;
+}
+
+static const struct file_operations sst_debug_fw_clear_cache = {
+	.open = simple_open,
+	.read = sst_debug_fw_clear_cache_read,
+	.write = sst_debug_fw_clear_cache_write,
+};
+
+static ssize_t sst_debug_fw_reset_state_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char state[16];
+
+	sprintf(state, "%d\n", sst_drv_ctx->sst_state);
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			state, strlen(state));
+
+}
+
+static ssize_t sst_debug_fw_reset_state_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "1\n", sz))
+		sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
+	else
+		return -EINVAL;
+
+	return sz;
+
+}
+
+static const struct file_operations sst_debug_fw_reset_state = {
+	.open = simple_open,
+	.read = sst_debug_fw_reset_state_read,
+	.write = sst_debug_fw_reset_state_write,
+};
+
+static ssize_t sst_debug_dwnld_mode_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *state = "error\n";
+
+	if (sst_drv_ctx->use_dma == 0) {
+		state = "memcpy\n";
+	} else if (sst_drv_ctx->use_dma == 1) {
+		state = sst_drv_ctx->use_lli ? \
+				"lli\n" : "dma\n";
+
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			state, strlen(state));
+
+}
+
+static ssize_t sst_debug_dwnld_mode_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (atomic_read(&sst_drv_ctx->pm_usage_count) &&
+	    sst_drv_ctx->sst_state != SST_RESET) {
+		pr_err("FW should be in suspended/RESET state\n");
+		return -EFAULT;
+	}
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = '\0';
+
+	/* Firmware needs to be downloaded again to populate the lists */
+	atomic_set(&sst_drv_ctx->fw_clear_cache, 1);
+
+	if (!strncmp(buf, "memcpy\n", sz)) {
+		sst_drv_ctx->use_dma = 0;
+	} else if (!strncmp(buf, "lli\n", sz)) {
+		sst_drv_ctx->use_dma = 1;
+		sst_drv_ctx->use_lli = 1;
+	} else if (!strncmp(buf, "dma\n", sz)) {
+		sst_drv_ctx->use_dma = 1;
+		sst_drv_ctx->use_lli = 0;
+	}
+	return sz;
+
+}
+
+static const struct file_operations sst_debug_dwnld_mode = {
+	.open = simple_open,
+	.read = sst_debug_dwnld_mode_read,
+	.write = sst_debug_dwnld_mode_write,
+};
+
+static int dump_ssp_port(void __iomem *ssp_base, char *buf, int pos)
+{
+	int index = 0;
+
+	while (index < ARRAY_SIZE(ssp_reg_off)) {
+		pos += sprintf(buf + pos, "Reg: 0x%x: 0x%x\n", ssp_reg_off[index],
+			sst_reg_read(ssp_base, ssp_reg_off[index]));
+		index++;
+	}
+	return pos;
+}
+
+static ssize_t sst_debug_ssp_reg_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buf;
+	int i, pos = 0, off = 0;
+	struct intel_sst_drv *drv = file->private_data;
+	int num_ssp, buf_size, ret;
+
+	num_ssp = sst_drv_ctx->pdata->debugfs_data->num_ssp;
+	buf_size = DEBUGFS_SSP_BUF_SIZE * num_ssp;
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = is_fw_running(drv);
+	if (ret)
+		goto err;
+
+	buf[0] = 0;
+
+	for (i = 0; i < num_ssp ; i++) {
+		if (!sst_drv_ctx->debugfs.ssp[i]) {
+			pr_err("ssp %d port not mapped\n", i);
+			continue;
+		}
+		off = sst_drv_ctx->pdata->debugfs_data->ssp_reg_size * i;
+		pos = dump_ssp_port((sst_drv_ctx->debugfs.ssp[i]), buf, pos);
+	}
+	sst_pm_runtime_put(drv);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+err:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations sst_debug_ssp_reg = {
+		.open = simple_open,
+		.read = sst_debug_ssp_reg_read,
+};
+
+static int dump_dma_reg(char *buf, int pos, int dma)
+{
+	int i, index = 0;
+	int off = 0 ;
+	void __iomem *dma_reg;
+
+	if (!sst_drv_ctx->debugfs.dma_reg[dma]) {
+		pr_err("dma %d not mapped\n", dma);
+		return pos;
+	}
+
+	pos += sprintf(buf + pos, "\nDump DMA%d Reg\n\n", dma);
+
+	dma_reg = sst_drv_ctx->debugfs.dma_reg[dma];
+
+	/* Dump the DMA channel registers */
+	for (i = 0; i < DMA_NUM_CH; i++) {
+		pos += sprintf(buf + pos, "SAR%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 8;
+
+		pos += sprintf(buf + pos, "DAR%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 8;
+
+		pos += sprintf(buf + pos, "LLP%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 8;
+
+		pos += sprintf(buf + pos, "CTL%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 0x28;
+
+		pos += sprintf(buf + pos, "CFG%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 0x18;
+	}
+
+	/* Dump the remaining DMA registers */
+	while (index < ARRAY_SIZE(dma_reg_off)) {
+		pos += sprintf(buf + pos, "Reg: 0x%x: 0x%llx\n", dma_reg_off[index],
+				sst_reg_read64(dma_reg, dma_reg_off[index]));
+		index++;
+	}
+	return pos;
+}
+
+static ssize_t sst_debug_dma_reg_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buf;
+	int pos = 0;
+	int ret, i;
+	struct intel_sst_drv *drv = file->private_data;
+	int num_dma, buf_size;
+
+	num_dma = sst_drv_ctx->pdata->debugfs_data->num_dma;
+	buf_size = DEBUGFS_DMA_BUF_SIZE * num_dma;
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = is_fw_running(drv);
+	if (ret)
+		goto err;
+
+	buf[0] = 0;
+
+	for (i = 0; i < num_dma; i++)
+		pos = dump_dma_reg(buf, pos, i);
+
+	sst_pm_runtime_put(drv);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+err:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations sst_debug_dma_reg = {
+		.open = simple_open,
+		.read = sst_debug_dma_reg_read,
+};
+
+/**
+ * sst_debug_remap - function remaps the iram/dram buff to userspace
+ *
+ * @vma: vm_area_struct passed from userspace
+ * @buf: Physical addr of the pointer to be remapped
+ * @type: type of the buffer
+ *
+ * Remaps the kernel buffer to the userspace
+ */
+static int sst_debug_remap(struct vm_area_struct *vma, char *buf,
+					enum sst_ram_type type)
+{
+	int retval, length;
+	void *mem_area;
+
+	if (!buf)
+		return -EIO;
+
+	length = vma->vm_end - vma->vm_start;
+	pr_debug("iram length 0x%x\n", length);
+
+	/* round it up to the page bondary  */
+	mem_area = (void *)PAGE_ALIGN((unsigned long)buf);
+
+	/* map the whole physically contiguous area in one piece  */
+	retval = remap_pfn_range(vma,
+			vma->vm_start,
+			virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
+			length,
+			vma->vm_page_prot);
+	if (retval)
+		pr_err("mapping failed %d ", retval);
+	return retval;
+}
+
+int sst_debug_iram_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int retval;
+	struct intel_sst_drv *sst = sst_drv_ctx;
+
+	retval = sst_debug_remap(vma, sst->dump_buf.iram_buf.buf, SST_IRAM);
+
+	return retval;
+}
+
+static const struct file_operations sst_debug_iram_dump = {
+	.open = simple_open,
+	.mmap = sst_debug_iram_dump_mmap,
+};
+
+int sst_debug_dram_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int retval;
+	struct intel_sst_drv *sst = sst_drv_ctx;
+
+	retval = sst_debug_remap(vma, sst->dump_buf.dram_buf.buf, SST_DRAM);
+
+	return retval;
+}
+
+static const struct file_operations sst_debug_dram_dump = {
+	.open = simple_open,
+	.mmap = sst_debug_dram_dump_mmap,
+};
+
+int sst_debug_ddr_imr_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int retval;
+	struct intel_sst_drv *sst = sst_drv_ctx;
+
+	retval = sst_debug_remap(vma, sst->ddr, 0);
+
+	return retval;
+}
+
+static const struct file_operations sst_debug_ddr_imr_dump = {
+	.open = simple_open,
+	.mmap = sst_debug_ddr_imr_dump_mmap,
+};
+
+static ssize_t sst_debug_ipc_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *ctx = (struct intel_sst_drv *)file->private_data;
+	unsigned char *buf;
+	struct sst_block *block = NULL;
+	struct ipc_dsp_hdr *dsp_hdr;
+	struct ipc_post *msg = NULL;
+	int ret, res_rqd, msg_id, drv_id;
+	u32 low_payload;
+
+	if (count > 1024)
+		return -EINVAL;
+
+	ret = is_fw_running(ctx);
+	if (ret)
+		return ret;
+
+	buf = kzalloc((sizeof(unsigned char) * (count)), GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto put_pm_runtime;
+	}
+	if (copy_from_user(buf, user_buf, count)) {
+		ret = -EFAULT;
+		goto free_mem;
+	}
+
+	if (sst_create_ipc_msg(&msg, true)) {
+		ret = -ENOMEM;
+		goto free_mem;
+	}
+
+	msg->mrfld_header.full = *((u64 *)buf);
+	pr_debug("ipc hdr: %llx\n", msg->mrfld_header.full);
+
+	/* Override the drv id with unique drv id */
+	drv_id = sst_assign_pvt_id(ctx);
+	msg->mrfld_header.p.header_high.part.drv_id = drv_id;
+
+	res_rqd = msg->mrfld_header.p.header_high.part.res_rqd;
+	msg_id = msg->mrfld_header.p.header_high.part.msg_id;
+	pr_debug("res_rqd: %d, msg_id: %d, drv_id: %d\n",
+					res_rqd, msg_id, drv_id);
+	if (res_rqd) {
+		block = sst_create_block(ctx, msg_id, drv_id);
+		if (block == NULL) {
+			ret = -ENOMEM;
+			kfree(msg);
+			goto free_mem;
+		}
+	}
+
+	dsp_hdr = (struct ipc_dsp_hdr *)(buf + 8);
+	pr_debug("dsp hdr: %llx\n", *((u64 *)(dsp_hdr)));
+	low_payload = msg->mrfld_header.p.header_low_payload;
+	if (low_payload > (1024 - sizeof(union ipc_header_mrfld))) {
+		pr_err("Invalid low payload length: %x\n", low_payload);
+		ret = -EINVAL;
+		kfree(msg);
+		goto free_block;
+	}
+
+	memcpy(msg->mailbox_data, (buf+(sizeof(union ipc_header_mrfld))),
+			low_payload);
+	sst_add_to_dispatch_list_and_post(ctx, msg);
+	if (res_rqd) {
+		ret = sst_wait_timeout(ctx, block);
+		if (ret) {
+			pr_err("%s: fw returned err %d\n", __func__, ret);
+			goto free_block;
+		}
+
+		if (msg_id == IPC_GET_PARAMS) {
+			unsigned char *r = block->data;
+			/* Copy the IPC header first and then append dsp header
+			 * and payload data*/
+			memcpy(ctx->debugfs.get_params_data, &msg->mrfld_header.full, sizeof(msg->mrfld_header.full));
+			memcpy(ctx->debugfs.get_params_data + sizeof(msg->mrfld_header.full), r, dsp_hdr->length);
+			ctx->debugfs.get_params_len = sizeof(msg->mrfld_header.full) + dsp_hdr->length;
+		}
+
+	}
+	ret = count;
+free_block:
+	if (res_rqd)
+		sst_free_block(sst_drv_ctx, block);
+free_mem:
+	kfree(buf);
+put_pm_runtime:
+	sst_pm_runtime_put(ctx);
+	return ret;
+}
+
+static ssize_t sst_debug_ipc_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *ctx = (struct intel_sst_drv *)file->private_data;
+	return simple_read_from_buffer(user_buf, count, ppos,
+			ctx->debugfs.get_params_data,
+			ctx->debugfs.get_params_len);
+}
+
+static const struct file_operations sst_debug_ipc_ops = {
+	.open = simple_open,
+	.write = sst_debug_ipc_write,
+	.read = sst_debug_ipc_read,
+	.llseek = default_llseek,
+};
+
+struct sst_debug {
+	const char *name;
+	const struct file_operations *fops;
+	umode_t mode;
+};
+
+static const struct sst_debug sst_common_dbg_entries[] = {
+	{"runtime_pm", &sst_debug_rtpm_ops, 0600},
+	{"shim_dump", &sst_debug_shim_ops, 0600},
+	{"fw_clear_context", &sst_debug_fw_clear_cntx, 0600},
+	{"fw_clear_cache", &sst_debug_fw_clear_cache, 0600},
+	{"fw_reset_state", &sst_debug_fw_reset_state, 0600},
+	{"fw_dwnld_mode", &sst_debug_dwnld_mode, 0600},
+	{"iram_dump", &sst_debug_iram_dump, 0400},
+	{"dram_dump", &sst_debug_dram_dump, 0400},
+	{"sram_ia_lpe_mailbox", &sst_debug_sram_ia_lpe_mbox_ops, 0400},
+	{"sram_lpe_ia_mailbox", &sst_debug_sram_lpe_ia_mbox_ops, 0400},
+	{"README", &sst_debug_readme_ops, 0400},
+};
+
+static const struct sst_debug ctp_dbg_entries[] = {
+	{"sram_lpe_debug", &sst_debug_sram_lpe_debug_ops, 0400},
+	{"sram_lpe_checkpoint", &sst_debug_sram_lpe_checkpoint_ops, 0400},
+	{"sram_lpe_scu_mailbox", &sst_debug_sram_lpe_scu_mbox_ops, 0400},
+	{"sram_scu_lpe_mailbox", &sst_debug_sram_scu_lpe_mbox_ops, 0400},
+	{"lpe_log_enable", &sst_debug_lpe_log_enable_ops, 0400},
+	{"fw_ssp_reg", &sst_debug_ssp_reg, 0400},
+	{"fw_dma_reg", &sst_debug_dma_reg, 0400},
+	{"osc_clk0", &sst_debug_osc_clk0_ops, 0600},
+};
+
+static const struct sst_debug mrfld_dbg_entries[] = {
+	{"sram_lpe_checkpoint", &sst_debug_sram_lpe_checkpoint_ops, 0400},
+	{"fw_ssp_reg", &sst_debug_ssp_reg, 0400},
+	{"fw_dma_reg", &sst_debug_dma_reg, 0400},
+	{"ddr_imr_dump", &sst_debug_ddr_imr_dump, 0400},
+	{"ipc", &sst_debug_ipc_ops, 0400},
+};
+
+void sst_debugfs_create_files(struct intel_sst_drv *sst,
+			const struct sst_debug *entries, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		struct dentry *dentry;
+		const struct sst_debug *entry = &entries[i];
+
+		dentry = debugfs_create_file(entry->name, entry->mode,
+				sst->debugfs.root, sst, entry->fops);
+		if (dentry == NULL) {
+			pr_err("Failed to create %s file\n", entry->name);
+			return;
+		}
+	}
+}
+
+void sst_debugfs_init(struct intel_sst_drv *sst)
+{
+	int size = 0;
+	const struct sst_debug *debug = NULL;
+
+	sst->debugfs.root = debugfs_create_dir("sst", NULL);
+	if (IS_ERR(sst->debugfs.root) || !sst->debugfs.root) {
+		pr_err("Failed to create debugfs directory\n");
+		return;
+	}
+
+	sst_debugfs_create_files(sst, sst_common_dbg_entries,
+				ARRAY_SIZE(sst_common_dbg_entries));
+
+	/* Initial status is enabled */
+	sst->debugfs.runtime_pm_status = 1;
+
+	if ((sst->pci_id == SST_MRFLD_PCI_ID) ||
+			(sst->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR)) {
+		debug = mrfld_dbg_entries;
+		size = ARRAY_SIZE(mrfld_dbg_entries);
+	}
+
+	if (debug)
+		sst_debugfs_create_files(sst, debug, size);
+
+}
+
+void sst_debugfs_exit(struct intel_sst_drv *sst)
+{
+	if (sst->debugfs.runtime_pm_status)
+		pm_runtime_allow(sst->dev);
+	debugfs_remove_recursive(sst->debugfs.root);
+}
diff --git a/sound/soc/intel/sst/sst_drv_interface.c b/sound/soc/intel/sst/sst_drv_interface.c
new file mode 100644
index 0000000..196e645
--- /dev/null
+++ b/sound/soc/intel/sst/sst_drv_interface.c
@@ -0,0 +1,1107 @@
+/*
+ *  sst_drv_interface.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file defines the interface  between the platform driver and the SST
+ * driver.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/math64.h>
+#include <linux/intel_mid_pm.h>
+#include <sound/compress_offload.h>
+#include <sound/pcm.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define NUM_CODEC 2
+#define MIN_FRAGMENT 2
+#define MAX_FRAGMENT 4
+#define MIN_FRAGMENT_SIZE (50 * 1024)
+#define MAX_FRAGMENT_SIZE (1024 * 1024)
+#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
+
+void sst_restore_fw_context(void)
+{
+	struct snd_sst_ctxt_params fw_context;
+	struct ipc_post *msg = NULL;
+	int retval = 0;
+	struct sst_block *block;
+
+	/* Skip the context restore, when fw_clear_context is set */
+	/* fw_clear_context set through debugfs support */
+	if (atomic_read(&sst_drv_ctx->fw_clear_context)) {
+		pr_debug("Skipping restore_fw_context\n");
+		atomic_set(&sst_drv_ctx->fw_clear_context, 0);
+		return;
+	}
+
+	pr_debug("restore_fw_context\n");
+	/*nothing to restore*/
+	if (!sst_drv_ctx->fw_cntx_size)
+		return;
+	pr_debug("restoring context......\n");
+	/*send msg to fw*/
+	retval = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+			IPC_IA_SET_FW_CTXT, 0);
+	if (retval) {
+		pr_err("Can't allocate block/msg. No restore fw_context\n");
+		return;
+	}
+
+	sst_fill_header(&msg->header, IPC_IA_SET_FW_CTXT, 1, 0);
+
+	msg->header.part.data = sizeof(fw_context) + sizeof(u32);
+	fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
+	fw_context.size = sst_drv_ctx->fw_cntx_size;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32),
+				&fw_context, sizeof(fw_context));
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	sst_free_block(sst_drv_ctx, block);
+	if (retval)
+		pr_err("sst_restore_fw_context..timeout!\n");
+	return;
+}
+
+/*
+ * sst_download_fw - download the audio firmware to DSP
+ *
+ * This function is called when the FW needs to be downloaded to SST DSP engine
+ */
+int sst_download_fw(void)
+{
+	int retval = 0;
+
+	retval = sst_load_fw();
+	if (retval)
+		return retval;
+	pr_debug("fw loaded successful!!!\n");
+
+	if (sst_drv_ctx->ops->restore_dsp_context)
+		sst_drv_ctx->ops->restore_dsp_context();
+	sst_drv_ctx->sst_state = SST_FW_RUNNING;
+	return retval;
+}
+
+int free_stream_context(unsigned int str_id)
+{
+	struct stream_info *stream;
+	int ret = 0;
+
+	stream = get_stream_info(str_id);
+	if (stream) {
+		/* str_id is valid, so stream is alloacted */
+		ret = sst_free_stream(str_id);
+		if (ret)
+			sst_clean_stream(&sst_drv_ctx->streams[str_id]);
+		return ret;
+	}
+	return ret;
+}
+
+/*
+ * sst_send_algo_param - send LPE Mixer param to SST
+ *
+ * this function sends the algo parameter to sst dsp engine
+ */
+static int sst_send_algo_param(struct snd_ppp_params *algo_params)
+{
+	u32 header_size = 0;
+	struct ipc_post *msg = NULL;
+	u32 ipc_msg_size = sizeof(u32) + sizeof(*algo_params)
+			 - sizeof(algo_params->params) + algo_params->size;
+	u32 offset = 0;
+
+	if (ipc_msg_size > SST_MAILBOX_SIZE)
+		return -ENOMEM;
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+	sst_fill_header(&msg->header,
+			IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
+	msg->header.part.data = sizeof(u32) + sizeof(*algo_params)
+			 - sizeof(algo_params->params) + algo_params->size;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	offset = sizeof(u32);
+	header_size = sizeof(*algo_params) - sizeof(algo_params->params);
+	memcpy(msg->mailbox_data + sizeof(u32), algo_params,
+		sizeof(*algo_params) - sizeof(algo_params->params));
+	offset += header_size;
+	memcpy(msg->mailbox_data + offset , algo_params->params,
+			algo_params->size);
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return 0;
+}
+
+static int sst_send_lpe_mixer_algo_params(void)
+{
+	struct snd_ppp_params algo_param;
+	struct snd_ppp_mixer_params mixer_param;
+	unsigned int input_mixer, stream_device_id;
+	int retval = 0;
+
+	retval = intel_sst_check_device();
+	if (retval) {
+		pr_err("sst_check_device failed %d\n", retval);
+		return retval;
+	}
+
+	mutex_lock(&sst_drv_ctx->mixer_ctrl_lock);
+	input_mixer = (sst_drv_ctx->device_input_mixer)
+				& SST_INPUT_STREAM_MIXED;
+	pr_debug("Input Mixer settings %d", input_mixer);
+	stream_device_id = sst_drv_ctx->device_input_mixer - input_mixer;
+	algo_param.algo_id = SST_ALGO_MIXER;
+	algo_param.str_id = stream_device_id;
+	algo_param.enable = 1;
+	algo_param.operation = SST_SET_ALGO;
+	algo_param.size = sizeof(mixer_param);
+	mixer_param.type = SST_ALGO_PARAM_MIXER_STREAM_CFG;
+	mixer_param.input_stream_bitmap = input_mixer;
+	mixer_param.size = sizeof(input_mixer);
+	algo_param.params = &mixer_param;
+	mutex_unlock(&sst_drv_ctx->mixer_ctrl_lock);
+	pr_debug("setting pp param\n");
+	pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+			algo_param.algo_id, algo_param.str_id,
+			algo_param.enable, algo_param.size);
+	sst_send_algo_param(&algo_param);
+	sst_pm_runtime_put(sst_drv_ctx);
+	return retval;
+}
+
+/*
+ * sst_get_stream_allocated - this function gets a stream allocated with
+ * the given params
+ *
+ * @str_param : stream params
+ * @lib_dnld : pointer to pointer of lib downlaod struct
+ *
+ * This creates new stream id for a stream, in case lib is to be downloaded to
+ * DSP, it downloads that
+ */
+int sst_get_stream_allocated(struct snd_sst_params *str_param,
+		struct snd_sst_lib_download **lib_dnld)
+{
+	int retval, str_id;
+	struct sst_block *block;
+	struct snd_sst_alloc_response *response;
+	struct stream_info *str_info;
+
+	pr_debug("In %s\n", __func__);
+	block = sst_create_block(sst_drv_ctx, 0, 0);
+	if (block == NULL)
+		return -ENOMEM;
+
+	retval = sst_drv_ctx->ops->alloc_stream((char *) str_param, block);
+	str_id = retval;
+	if (retval < 0) {
+		pr_err("sst_alloc_stream failed %d\n", retval);
+		goto free_block;
+	}
+	pr_debug("Stream allocated %d\n", retval);
+	str_info = get_stream_info(str_id);
+	if (str_info == NULL) {
+		pr_err("get stream info returned null\n");
+		str_id = -EINVAL;
+		goto free_block;
+	}
+
+	/* Block the call for reply */
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	if (block->data) {
+		response = (struct snd_sst_alloc_response *)block->data;
+		retval = response->str_type.result;
+		if (!retval)
+			goto free_block;
+
+		pr_err("sst: FW alloc failed retval %d\n", retval);
+		if (retval == SST_ERR_STREAM_IN_USE) {
+			pr_err("sst:FW not in clean state, send free for:%d\n",
+					str_id);
+			sst_free_stream(str_id);
+			*lib_dnld = NULL;
+		}
+		if (retval == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
+			*lib_dnld = kzalloc(sizeof(**lib_dnld), GFP_KERNEL);
+			if (*lib_dnld == NULL) {
+				str_id = -ENOMEM;
+				goto free_block;
+			}
+			memcpy(*lib_dnld, &response->lib_dnld, sizeof(**lib_dnld));
+			sst_clean_stream(str_info);
+		} else {
+			*lib_dnld = NULL;
+		}
+		str_id = -retval;
+	} else if (retval != 0) {
+		pr_err("sst: FW alloc failed retval %d\n", retval);
+		/* alloc failed, so reset the state to uninit */
+		str_info->status = STREAM_UN_INIT;
+		str_id = retval;
+	}
+free_block:
+	sst_free_block(sst_drv_ctx, block);
+	return str_id; /*will ret either error (in above if) or correct str id*/
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_sfreq(struct snd_sst_params *str_param)
+{
+	switch (str_param->codec) {
+	case SST_CODEC_TYPE_PCM:
+		return str_param->sparams.uc.pcm_params.sfreq;
+	case SST_CODEC_TYPE_AAC:
+		return str_param->sparams.uc.aac_params.externalsr;
+	case SST_CODEC_TYPE_MP3:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_num_channel(struct snd_sst_params *str_param)
+{
+	switch (str_param->codec) {
+	case SST_CODEC_TYPE_PCM:
+		return str_param->sparams.uc.pcm_params.num_chan;
+	case SST_CODEC_TYPE_MP3:
+		return str_param->sparams.uc.mp3_params.num_chan;
+	case SST_CODEC_TYPE_AAC:
+		return str_param->sparams.uc.aac_params.num_chan;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * sst_get_stream - this function prepares for stream allocation
+ *
+ * @str_param : stream param
+ */
+int sst_get_stream(struct snd_sst_params *str_param)
+{
+	int retval;
+	struct stream_info *str_info;
+	struct snd_sst_lib_download *lib_dnld;
+
+	pr_debug("In %s\n", __func__);
+	/* stream is not allocated, we are allocating */
+	retval = sst_get_stream_allocated(str_param, &lib_dnld);
+
+	if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
+		/* codec download is required */
+
+		pr_debug("Codec is required.... trying that\n");
+		if (lib_dnld == NULL) {
+			pr_err("lib download null!!! abort\n");
+			return -EIO;
+		}
+
+		retval = sst_load_library(lib_dnld, str_param->ops);
+		kfree(lib_dnld);
+
+		if (!retval) {
+			pr_debug("codec was downloaded successfully\n");
+
+			retval = sst_get_stream_allocated(str_param, &lib_dnld);
+			if (retval <= 0) {
+				retval = -EIO;
+				goto err;
+			}
+
+			pr_debug("Alloc done stream id %d\n", retval);
+		} else {
+			pr_debug("codec download failed\n");
+			retval = -EIO;
+			goto err;
+		}
+	} else if  (retval <= 0) {
+		retval = -EIO;
+		goto err;
+	}
+	/* store sampling freq */
+	str_info = &sst_drv_ctx->streams[retval];
+	str_info->sfreq = sst_get_sfreq(str_param);
+
+err:
+	return retval;
+}
+
+/**
+* intel_sst_check_device - checks SST device
+*
+* This utility function checks the state of SST device and downlaods FW if
+* not done, or resumes the device if suspended
+*/
+int intel_sst_check_device(void)
+{
+	int retval = 0;
+
+	pr_debug("In %s\n", __func__);
+
+	pm_runtime_get_sync(sst_drv_ctx->dev);
+	atomic_inc(&sst_drv_ctx->pm_usage_count);
+
+	pr_debug("%s: count is %d now\n", __func__,
+				atomic_read(&sst_drv_ctx->pm_usage_count));
+
+	mutex_lock(&sst_drv_ctx->sst_lock);
+
+	if (sst_drv_ctx->sst_state == SST_RESET) {
+
+		/* FW is not downloaded */
+		pr_debug("DSP Downloading FW now...\n");
+		retval = sst_download_fw();
+		if (retval) {
+			pr_err("FW download fail %x\n", retval);
+			sst_drv_ctx->sst_state = SST_RESET;
+			mutex_unlock(&sst_drv_ctx->sst_lock);
+			sst_pm_runtime_put(sst_drv_ctx);
+			return retval;
+		}
+	}
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+	return retval;
+}
+
+void sst_process_mad_ops(struct work_struct *work)
+{
+
+	struct mad_ops_wq *mad_ops =
+			container_of(work, struct mad_ops_wq, wq);
+	int retval = 0;
+
+	switch (mad_ops->control_op) {
+	case SST_SND_PAUSE:
+		retval = sst_pause_stream(mad_ops->stream_id);
+		break;
+	case SST_SND_RESUME:
+		retval = sst_resume_stream(mad_ops->stream_id);
+		break;
+	default:
+		pr_err(" wrong control_ops reported\n");
+	}
+	if (retval)
+		pr_err("%s(): op: %d, retval: %d\n",
+				__func__, mad_ops->control_op, retval);
+	kfree(mad_ops);
+	return;
+}
+
+static int sst_power_control(bool state)
+{
+	pr_debug("%s for %d", __func__, state);
+
+	/* should we do ref count here, or rely on pcm handle?? */
+	if (state == true)
+		return intel_sst_check_device();
+	else
+		return sst_pm_runtime_put(sst_drv_ctx);
+}
+/*
+ * sst_open_pcm_stream - Open PCM interface
+ *
+ * @str_param: parameters of pcm stream
+ *
+ * This function is called by MID sound card driver to open
+ * a new pcm interface
+ */
+static int sst_open_pcm_stream(struct snd_sst_params *str_param)
+{
+	int retval;
+
+	if (!str_param)
+		return -EINVAL;
+
+	pr_debug("%s: doing rtpm_get\n", __func__);
+
+	retval = intel_sst_check_device();
+
+	if (retval)
+		return retval;
+	retval = sst_get_stream(str_param);
+	if (retval > 0) {
+		sst_drv_ctx->stream_cnt++;
+	} else {
+		pr_err("sst_get_stream returned err %d\n", retval);
+		sst_pm_runtime_put(sst_drv_ctx);
+	}
+
+	return retval;
+}
+
+static int sst_cdev_open(struct snd_sst_params *str_params,
+		struct sst_compress_cb *cb)
+{
+	int str_id, retval;
+	struct stream_info *stream;
+
+	pr_debug("%s: doing rtpm_get\n", __func__);
+
+	retval = intel_sst_check_device();
+	if (retval)
+		return retval;
+
+	str_id = sst_get_stream(str_params);
+	if (str_id > 0) {
+		pr_debug("stream allocated in sst_cdev_open %d\n", str_id);
+		stream = &sst_drv_ctx->streams[str_id];
+		stream->compr_cb = cb->compr_cb;
+		stream->compr_cb_param = cb->param;
+		stream->drain_notify = cb->drain_notify;
+		stream->drain_cb_param = cb->drain_cb_param;
+	} else {
+		pr_err("stream encountered error during alloc %d\n", str_id);
+		str_id = -EINVAL;
+		sst_pm_runtime_put(sst_drv_ctx);
+	}
+	return str_id;
+}
+
+static int sst_cdev_close(unsigned int str_id)
+{
+	int retval;
+	struct stream_info *stream;
+
+	pr_debug("%s: Entry\n", __func__);
+	stream = get_stream_info(str_id);
+	if (!stream) {
+		pr_err("stream info is NULL for str %d!!!\n", str_id);
+		return -EINVAL;
+	}
+
+	if (stream->status == STREAM_RESET) {
+		/* silently fail here as we have cleaned the stream */
+		pr_debug("stream in reset state...\n");
+		stream->status = STREAM_UN_INIT;
+
+		retval = 0;
+		goto put;
+	}
+
+	retval = sst_free_stream(str_id);
+put:
+	stream->compr_cb_param = NULL;
+	stream->compr_cb = NULL;
+
+	/* The free_stream will return a error if there is no stream to free,
+	(i.e. the alloc failure case). And in this case the open does a put in
+	the error scenario, so skip in this case.
+		In the close we need to handle put in the success scenario and
+	the timeout error(EBUSY) scenario. */
+	if (!retval || (retval == -EBUSY))
+		sst_pm_runtime_put(sst_drv_ctx);
+	else
+		pr_err("%s: free stream returned err %d\n", __func__, retval);
+
+	pr_debug("%s: End\n", __func__);
+	return retval;
+
+}
+
+static int sst_cdev_ack(unsigned int str_id, unsigned long bytes)
+{
+	struct stream_info *stream;
+	struct snd_sst_tstamp fw_tstamp = {0,};
+	int offset;
+	void __iomem *addr;
+
+	pr_debug("sst:  ackfor %d\n", str_id);
+	stream = get_stream_info(str_id);
+	if (!stream)
+		return -EINVAL;
+
+	/* update bytes sent */
+	stream->cumm_bytes += bytes;
+	pr_debug("bytes copied %d inc by %ld\n", stream->cumm_bytes, bytes);
+
+	memcpy_fromio(&fw_tstamp,
+		((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+		+(str_id * sizeof(fw_tstamp))),
+		sizeof(fw_tstamp));
+
+	fw_tstamp.bytes_copied = stream->cumm_bytes;
+	pr_debug("bytes sent to fw %llu inc by %ld\n", fw_tstamp.bytes_copied,
+							 bytes);
+
+	addr =  ((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)) +
+			(str_id * sizeof(fw_tstamp));
+	offset =  offsetof(struct snd_sst_tstamp, bytes_copied);
+	sst_shim_write(addr, offset, fw_tstamp.bytes_copied);
+	return 0;
+
+}
+
+static int sst_cdev_set_metadata(unsigned int str_id,
+				struct snd_compr_metadata *metadata)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("set metadata for stream %d\n", str_id);
+
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+
+	if (sst_create_ipc_msg(&msg, 1))
+		return -ENOMEM;
+
+	if (!sst_drv_ctx->use_32bit_ops) {
+		pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+		pr_debug("pvt id = %d\n", pvt_id);
+		pr_debug("pipe id = %d\n", str_info->pipe_id);
+		sst_fill_header_mrfld(&msg->mrfld_header,
+			IPC_CMD, str_info->task_id, 1, pvt_id);
+
+		len = sizeof(*metadata) + sizeof(dsp_hdr);
+		msg->mrfld_header.p.header_low_payload = len;
+		sst_fill_header_dsp(&dsp_hdr, IPC_IA_SET_STREAM_PARAMS_MRFLD,
+				str_info->pipe_id, sizeof(*metadata));
+		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+		memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+				metadata, sizeof(*metadata));
+	} else {
+		sst_fill_header(&msg->header, IPC_IA_SET_STREAM_PARAMS,
+					1, str_id);
+		msg->header.part.data = sizeof(u32) + sizeof(*metadata);
+		memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+		memcpy(msg->mailbox_data + sizeof(u32),
+				metadata, sizeof(*metadata));
+	}
+
+	sst_drv_ctx->ops->sync_post_message(msg);
+	return retval;
+}
+
+static int sst_cdev_control(unsigned int cmd, unsigned int str_id)
+{
+	pr_debug("recieved cmd %d on stream %d\n", cmd, str_id);
+
+	if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
+		return 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		return sst_pause_stream(str_id);
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		return sst_resume_stream(str_id);
+	case SNDRV_PCM_TRIGGER_START: {
+		struct stream_info *str_info;
+		str_info = get_stream_info(str_id);
+		if (!str_info)
+			return -EINVAL;
+		str_info->prev = str_info->status;
+		str_info->status = STREAM_RUNNING;
+		return sst_start_stream(str_id);
+	}
+	case SNDRV_PCM_TRIGGER_STOP:
+		return sst_drop_stream(str_id);
+	case SND_COMPR_TRIGGER_DRAIN:
+		return sst_drain_stream(str_id, false);
+	case SND_COMPR_TRIGGER_NEXT_TRACK:
+		return sst_next_track();
+	case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
+		return sst_drain_stream(str_id, true);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int sst_cdev_tstamp(unsigned int str_id, struct snd_compr_tstamp *tstamp)
+{
+	struct snd_sst_tstamp fw_tstamp = {0,};
+	struct stream_info *stream;
+
+	memcpy_fromio(&fw_tstamp,
+		((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+		+(str_id * sizeof(fw_tstamp))),
+		sizeof(fw_tstamp));
+
+	stream = get_stream_info(str_id);
+	if (!stream)
+		return -EINVAL;
+	pr_debug("rb_counter %llu in bytes\n", fw_tstamp.ring_buffer_counter);
+
+	tstamp->copied_total = fw_tstamp.ring_buffer_counter;
+	tstamp->pcm_frames = fw_tstamp.frames_decoded;
+	tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter,
+			(u64)((stream->num_ch) * SST_GET_BYTES_PER_SAMPLE(24)));
+	tstamp->sampling_rate = fw_tstamp.sampling_frequency;
+	pr_debug("PCM  = %u\n", tstamp->pcm_io_frames);
+	pr_debug("Pointer Query on strid = %d  copied_total %d, decodec %d\n",
+		str_id, tstamp->copied_total, tstamp->pcm_frames);
+	pr_debug("rendered %d\n", tstamp->pcm_io_frames);
+	return 0;
+}
+
+static int sst_cdev_caps(struct snd_compr_caps *caps)
+{
+	caps->num_codecs = NUM_CODEC;
+	caps->min_fragment_size = MIN_FRAGMENT_SIZE;  /* 50KB */
+	caps->max_fragment_size = MAX_FRAGMENT_SIZE;  /* 1024KB */
+	caps->min_fragments = MIN_FRAGMENT;
+	caps->max_fragments = MAX_FRAGMENT;
+	caps->codecs[0] = SND_AUDIOCODEC_MP3;
+	caps->codecs[1] = SND_AUDIOCODEC_AAC;
+	return 0;
+}
+
+static int sst_cdev_codec_caps(struct snd_compr_codec_caps *codec)
+{
+
+	if (codec->codec == SND_AUDIOCODEC_MP3) {
+		codec->num_descriptors = 2;
+		codec->descriptor[0].max_ch = 2;
+		codec->descriptor[0].sample_rates = SNDRV_PCM_RATE_8000_48000;
+		codec->descriptor[0].bit_rate[0] = 320; /* 320kbps */
+		codec->descriptor[0].bit_rate[1] = 192;
+		codec->descriptor[0].num_bitrates = 2;
+		codec->descriptor[0].profiles = 0;
+		codec->descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO;
+		codec->descriptor[0].formats = 0;
+	} else if (codec->codec == SND_AUDIOCODEC_AAC) {
+		codec->num_descriptors = 2;
+		codec->descriptor[1].max_ch = 2;
+		codec->descriptor[1].sample_rates = SNDRV_PCM_RATE_8000_48000;
+		codec->descriptor[1].bit_rate[0] = 320; /* 320kbps */
+		codec->descriptor[1].bit_rate[1] = 192;
+		codec->descriptor[1].num_bitrates = 2;
+		codec->descriptor[1].profiles = 0;
+		codec->descriptor[1].modes = 0;
+		codec->descriptor[1].formats =
+			(SND_AUDIOSTREAMFORMAT_MP4ADTS |
+				SND_AUDIOSTREAMFORMAT_RAW);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void sst_cdev_fragment_elapsed(int str_id)
+{
+	struct stream_info *stream;
+
+	pr_debug("fragment elapsed from firmware for str_id %d\n", str_id);
+	stream = &sst_drv_ctx->streams[str_id];
+	if (stream->compr_cb)
+		stream->compr_cb(stream->compr_cb_param);
+}
+
+/*
+ * sst_close_pcm_stream - Close PCM interface
+ *
+ * @str_id: stream id to be closed
+ *
+ * This function is called by MID sound card driver to close
+ * an existing pcm interface
+ */
+static int sst_close_pcm_stream(unsigned int str_id)
+{
+	struct stream_info *stream;
+	int retval = 0;
+
+	pr_debug("%s: Entry\n", __func__);
+	stream = get_stream_info(str_id);
+	if (!stream) {
+		pr_err("stream info is NULL for str %d!!!\n", str_id);
+		return -EINVAL;
+	}
+
+	if (stream->status == STREAM_RESET) {
+		/* silently fail here as we have cleaned the stream */
+		pr_debug("stream in reset state...\n");
+
+		retval = 0;
+		goto put;
+	}
+
+	retval = free_stream_context(str_id);
+put:
+	stream->pcm_substream = NULL;
+	stream->status = STREAM_UN_INIT;
+	stream->period_elapsed = NULL;
+	sst_drv_ctx->stream_cnt--;
+
+	/* The free_stream will return a error if there is no stream to free,
+	(i.e. the alloc failure case). And in this case the open does a put in
+	the error scenario, so skip in this case.
+		In the close we need to handle put in the success scenario and
+	the timeout error(EBUSY) scenario. */
+	if (!retval || (retval == -EBUSY))
+		sst_pm_runtime_put(sst_drv_ctx);
+	else
+		pr_err("%s: free stream returned err %d\n", __func__, retval);
+
+	pr_debug("%s: Exit\n", __func__);
+	return 0;
+}
+
+int sst_send_sync_msg(int ipc, int str_id)
+{
+	struct ipc_post *msg = NULL;
+
+	if (sst_create_ipc_msg(&msg, false))
+		return -ENOMEM;
+	sst_fill_header(&msg->header, ipc, 0, str_id);
+	return sst_drv_ctx->ops->sync_post_message(msg);
+}
+
+static inline int sst_calc_tstamp(struct pcm_stream_info *info,
+		struct snd_pcm_substream *substream,
+		struct snd_sst_tstamp *fw_tstamp)
+{
+	size_t delay_bytes, delay_frames;
+	size_t buffer_sz;
+	u32 pointer_bytes, pointer_samples;
+
+	pr_debug("mrfld ring_buffer_counter %llu in bytes\n",
+			fw_tstamp->ring_buffer_counter);
+	pr_debug("mrfld hardware_counter %llu in bytes\n",
+			 fw_tstamp->hardware_counter);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		delay_bytes = (size_t) (fw_tstamp->ring_buffer_counter -
+					fw_tstamp->hardware_counter);
+	else
+		delay_bytes = (size_t) (fw_tstamp->hardware_counter -
+					fw_tstamp->ring_buffer_counter);
+	delay_frames = bytes_to_frames(substream->runtime, delay_bytes);
+	buffer_sz = snd_pcm_lib_buffer_bytes(substream);
+	div_u64_rem(fw_tstamp->ring_buffer_counter, buffer_sz, &pointer_bytes);
+	pointer_samples = bytes_to_samples(substream->runtime, pointer_bytes);
+
+	pr_debug("pcm delay %zu in bytes\n", delay_bytes);
+
+	info->buffer_ptr = pointer_samples / substream->runtime->channels;
+
+	info->pcm_delay = delay_frames / substream->runtime->channels;
+	pr_debug("buffer ptr %llu pcm_delay rep: %llu\n",
+			info->buffer_ptr, info->pcm_delay);
+	return 0;
+}
+
+static int sst_read_timestamp(struct pcm_stream_info *info)
+{
+	struct stream_info *stream;
+	struct snd_pcm_substream *substream;
+	struct snd_sst_tstamp fw_tstamp;
+	unsigned int str_id;
+
+	str_id = info->str_id;
+	stream = get_stream_info(str_id);
+	if (!stream)
+		return -EINVAL;
+
+	if (!stream->pcm_substream)
+		return -EINVAL;
+	substream = stream->pcm_substream;
+
+	memcpy_fromio(&fw_tstamp,
+		((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+			+ (str_id * sizeof(fw_tstamp))),
+		sizeof(fw_tstamp));
+	return sst_calc_tstamp(info, substream, &fw_tstamp);
+}
+
+/*
+ * sst_device_control - Set Control params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to set
+ * SST/Sound card controls for an opened stream.
+ * This is registered with MID driver
+ */
+static int sst_device_control(int cmd, void *arg)
+{
+	int retval = 0, str_id = 0;
+
+	if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
+		return 0;
+
+	switch (cmd) {
+	case SST_SND_PAUSE:
+	case SST_SND_RESUME: {
+		struct mad_ops_wq *work = kzalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work)
+			return -ENOMEM;
+		INIT_WORK(&work->wq, sst_process_mad_ops);
+		work->control_op = cmd;
+		work->stream_id = *(int *)arg;
+		queue_work(sst_drv_ctx->mad_wq, &work->wq);
+		break;
+	}
+	case SST_SND_START: {
+		struct stream_info *str_info;
+		int ipc;
+		str_id = *(int *)arg;
+		str_info = get_stream_info(str_id);
+		if (!str_info)
+			return -EINVAL;
+		ipc = IPC_IA_START_STREAM;
+		str_info->prev = str_info->status;
+		str_info->status = STREAM_RUNNING;
+		sst_start_stream(str_id);
+		break;
+	}
+	case SST_SND_DROP: {
+		struct stream_info *str_info;
+		int ipc;
+		str_id = *(int *)arg;
+		str_info = get_stream_info(str_id);
+		if (!str_info)
+			return -EINVAL;
+		ipc = IPC_IA_DROP_STREAM;
+		str_info->prev = STREAM_UN_INIT;
+		str_info->status = STREAM_INIT;
+		if (sst_drv_ctx->use_32bit_ops)
+			retval = sst_send_sync_msg(ipc, str_id);
+		else
+			retval = sst_drop_stream(str_id);
+		break;
+	}
+	case SST_SND_STREAM_INIT: {
+		struct pcm_stream_info *str_info;
+		struct stream_info *stream;
+
+		pr_debug("stream init called\n");
+		str_info = (struct pcm_stream_info *)arg;
+		str_id = str_info->str_id;
+		stream = get_stream_info(str_id);
+		if (!stream) {
+			retval = -EINVAL;
+			break;
+		}
+		pr_debug("setting the period ptrs\n");
+		stream->pcm_substream = str_info->mad_substream;
+		stream->period_elapsed = str_info->period_elapsed;
+		stream->sfreq = str_info->sfreq;
+		stream->prev = stream->status;
+		stream->status = STREAM_INIT;
+		pr_debug("pcm_substream %p, period_elapsed %p, sfreq %d, status %d\n",
+				stream->pcm_substream, stream->period_elapsed, stream->sfreq, stream->status);
+		break;
+	}
+
+	case SST_SND_BUFFER_POINTER: {
+		struct pcm_stream_info *stream_info;
+
+		stream_info = (struct pcm_stream_info *)arg;
+		retval = sst_read_timestamp(stream_info);
+		pr_debug("pointer %llu, delay %llu\n",
+			stream_info->buffer_ptr, stream_info->pcm_delay);
+		break;
+	}
+	default:
+		/* Illegal case */
+		pr_warn("illegal req\n");
+		return -EINVAL;
+	}
+
+	return retval;
+}
+
+/*
+ * sst_copy_runtime_param - copy runtime params from src to dst
+ *				 structure.
+ *
+ *@dst: destination runtime structure
+ *@src: source runtime structure
+ *
+ * This helper function is called to copy the runtime parameter
+ * structure.
+*/
+static int sst_copy_runtime_param(struct snd_sst_runtime_params *dst,
+			struct snd_sst_runtime_params *src)
+{
+	dst->type = src->type;
+	dst->str_id = src->str_id;
+	dst->size = src->size;
+	if (dst->addr) {
+		pr_err("mem allocated in prev setting, use the same memory\n");
+		return -EINVAL;
+	}
+	dst->addr = kzalloc(dst->size, GFP_KERNEL);
+	if (!dst->addr)
+		return -ENOMEM;
+	memcpy(dst->addr, src->addr, dst->size);
+	return 0;
+}
+/*
+ * sst_set_generic_params - Set generic params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to configure
+ * SST runtime params.
+ */
+static int sst_set_generic_params(enum sst_controls cmd, void *arg)
+{
+	int ret_val = 0;
+	pr_debug("Enter:%s, cmd:%d\n", __func__, cmd);
+
+	if (NULL == arg)
+		return -EINVAL;
+
+	switch (cmd) {
+	case SST_SET_RUNTIME_PARAMS: {
+		struct snd_sst_runtime_params *src;
+		struct snd_sst_runtime_params *dst;
+
+		src = (struct snd_sst_runtime_params *)arg;
+		dst = &(sst_drv_ctx->runtime_param.param);
+		ret_val = sst_copy_runtime_param(dst, src);
+		break;
+		}
+	case SST_SET_ALGO_PARAMS: {
+		unsigned int device_input_mixer = *((unsigned int *)arg);
+		pr_debug("LPE mixer algo param set %x\n", device_input_mixer);
+		mutex_lock(&sst_drv_ctx->mixer_ctrl_lock);
+		sst_drv_ctx->device_input_mixer = device_input_mixer;
+		mutex_unlock(&sst_drv_ctx->mixer_ctrl_lock);
+		ret_val = sst_send_lpe_mixer_algo_params();
+		break;
+	}
+	case SST_SET_BYTE_STREAM: {
+		ret_val = intel_sst_check_device();
+		if (ret_val)
+			return ret_val;
+
+		ret_val = sst_send_byte_stream_mrfld(arg);
+		sst_pm_runtime_put(sst_drv_ctx);
+		break;
+	}
+	case SST_GET_PROBE_BYTE_STREAM: {
+		struct snd_sst_probe_bytes *prb_bytes = (struct snd_sst_probe_bytes *)arg;
+
+		if (sst_drv_ctx->probe_bytes) {
+			prb_bytes->len = sst_drv_ctx->probe_bytes->len;
+			memcpy(prb_bytes->bytes, &sst_drv_ctx->probe_bytes->bytes, prb_bytes->len);
+		}
+		break;
+	}
+	case SST_SET_PROBE_BYTE_STREAM: {
+		struct snd_sst_probe_bytes *prb_bytes = (struct snd_sst_probe_bytes *)arg;
+
+		if (sst_drv_ctx->probe_bytes) {
+			sst_drv_ctx->probe_bytes->len = prb_bytes->len;
+			memcpy(&sst_drv_ctx->probe_bytes->bytes, prb_bytes->bytes, prb_bytes->len);
+		}
+
+		ret_val = intel_sst_check_device();
+		if (ret_val)
+			return ret_val;
+
+		ret_val = sst_send_probe_bytes(sst_drv_ctx);
+		break;
+	}
+	case SST_SET_VTSV_INFO: {
+		ret_val = intel_sst_check_device();
+		if (ret_val)
+			return ret_val;
+
+		ret_val = sst_send_vtsv_data_to_fw(sst_drv_ctx);
+		if (ret_val)
+			pr_err("vtsv data send failed\n");
+		sst_pm_runtime_put(sst_drv_ctx);
+		break;
+	}
+	default:
+		pr_err("Invalid cmd request:%d\n", cmd);
+		ret_val = -EINVAL;
+	}
+	return ret_val;
+}
+
+static struct sst_ops pcm_ops = {
+	.open = sst_open_pcm_stream,
+	.device_control = sst_device_control,
+	.set_generic_params = sst_set_generic_params,
+	.close = sst_close_pcm_stream,
+	.power = sst_power_control,
+};
+
+static struct compress_sst_ops compr_ops = {
+	.open = sst_cdev_open,
+	.close = sst_cdev_close,
+	.control = sst_cdev_control,
+	.tstamp = sst_cdev_tstamp,
+	.ack = sst_cdev_ack,
+	.get_caps = sst_cdev_caps,
+	.get_codec_caps = sst_cdev_codec_caps,
+	.set_metadata = sst_cdev_set_metadata,
+};
+
+
+static struct sst_device sst_dsp_device = {
+	.name = "Intel(R) SST LPE",
+	.dev = NULL,
+	.ops = &pcm_ops,
+	.compr_ops = &compr_ops,
+};
+
+/*
+ * register_sst - function to register DSP
+ *
+ * This functions registers DSP with the platform driver
+ */
+int register_sst(struct device *dev)
+{
+	int ret_val;
+	sst_dsp_device.dev = dev;
+	ret_val = sst_register_dsp(&sst_dsp_device);
+	if (ret_val)
+		pr_err("Unable to register DSP with platform driver\n");
+
+	return ret_val;
+}
+
+int unregister_sst(struct device *dev)
+{
+	return sst_unregister_dsp(&sst_dsp_device);
+}
diff --git a/sound/soc/intel/sst/sst_dsp.c b/sound/soc/intel/sst/sst_dsp.c
new file mode 100644
index 0000000..d95288d
--- /dev/null
+++ b/sound/soc/intel/sst/sst_dsp.c
@@ -0,0 +1,1990 @@
+/*
+ *  sst_dsp.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains all dsp controlling functions like firmware download,
+ * setting/resetting dsp cores, etc
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/dmaengine.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/elf.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+#ifndef CONFIG_X86_64
+#define MEMCPY_TOIO memcpy_toio
+#else
+#define MEMCPY_TOIO memcpy32_toio
+#endif
+
+static struct sst_module_info sst_modules_mrfld[] = {
+	{"mp3_dec", SST_CODEC_TYPE_MP3, 0, SST_LIB_NOT_FOUND},
+	{"aac_dec", SST_CODEC_TYPE_AAC, 0, SST_LIB_NOT_FOUND},
+	{"audclass_lib", SST_ALGO_AUDCLASSIFIER, 0, SST_LIB_NOT_FOUND},
+	{"vtsv_lib", SST_ALGO_VTSV, 0, SST_LIB_NOT_FOUND},
+	{"geq_lib", SST_ALGO_GEQ, 0, SST_LIB_NOT_FOUND},
+};
+
+static struct sst_module_info sst_modules_byt[] = {
+	{"mp3_dec", SST_CODEC_TYPE_MP3, 0, SST_LIB_NOT_FOUND},
+	{"aac_dec", SST_CODEC_TYPE_AAC, 0, SST_LIB_NOT_FOUND},
+};
+
+/**
+ * memcpy32_toio: Copy using writel commands
+ *
+ * This is needed because the hardware does not support
+ * 64-bit moveq insructions while writing to PCI MMIO
+ */
+void memcpy32_toio(void *dst, const void *src, int count)
+{
+	int i;
+	const u32 *src_32 = src;
+	u32 *dst_32 = dst;
+
+	for (i = 0; i < count/sizeof(u32); i++)
+		writel(*src_32++, dst_32++);
+}
+
+/**
+ * intel_sst_reset_dsp_medfield - Resetting SST DSP
+ *
+ * This resets DSP in case of Medfield platfroms
+ */
+int intel_sst_reset_dsp_mfld(void)
+{
+	union config_status_reg csr;
+
+	pr_debug("Resetting the DSP in medfield\n");
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.full |= 0x382;
+	csr.part.run_stall = 0x1;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+	return 0;
+}
+
+/**
+ * sst_start_medfield - Start the SST DSP processor
+ *
+ * This starts the DSP in MRST platfroms
+ */
+int sst_start_mfld(void)
+{
+	union config_status_reg csr;
+
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.bypass = 0;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.mfld_strb = 1;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.run_stall = 0;
+	csr.part.sst_reset = 0;
+	pr_debug("Starting the DSP_medfld %x\n", csr.full);
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	pr_debug("Starting the DSP_medfld\n");
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+	return 0;
+}
+/**
+ * intel_sst_reset_dsp_mrfld - Resetting SST DSP
+ *
+ * This resets DSP in case of MRFLD platfroms
+ */
+int intel_sst_reset_dsp_mrfld(void)
+{
+	union config_status_reg_mrfld csr;
+
+	pr_debug("sst: Resetting the DSP in mrfld\n");
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.full |= 0x7;
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.full &= ~(0x1);
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("value:0x%llx\n", csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+	return 0;
+}
+
+/**
+ * sst_start_merrifield - Start the SST DSP processor
+ *
+ * This starts the DSP in MERRIFIELD platfroms
+ */
+int sst_start_mrfld(void)
+{
+	union config_status_reg_mrfld csr;
+
+	pr_debug("sst: Starting the DSP in mrfld LALALALA\n");
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.full |= 0x7;
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.part.xt_snoop = 1;
+	csr.full &= ~(0x5);
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("sst: Starting the DSP_merrifield:%llx\n", csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+	return 0;
+}
+
+/**
+ * intel_sst_set_bypass - Sets/clears the bypass bits
+ *
+ * This sets/clears the bypass bits
+ */
+void intel_sst_set_bypass_mfld(bool set)
+{
+	union config_status_reg csr;
+
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	if (set == true)
+		csr.full |= 0x380;
+	else
+		csr.part.bypass = 0;
+	pr_debug("SetupByPass set %d Val 0x%x\n", set, csr.full);
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+}
+#define SST_CALC_DMA_DSTN(lpe_viewpt_rqd, ia_viewpt_addr, elf_paddr, \
+			lpe_viewpt_addr) ((lpe_viewpt_rqd) ? \
+		elf_paddr : (ia_viewpt_addr + elf_paddr - lpe_viewpt_addr))
+
+static int sst_fill_dstn(struct intel_sst_drv *sst, struct sst_info info,
+			Elf32_Phdr *pr, void **dstn, unsigned int *dstn_phys, int *mem_type)
+{
+#ifdef MRFLD_WORD_WA
+	/* work arnd-since only 4 byte align copying is only allowed for ICCM */
+	if ((pr->p_paddr >= info.iram_start) && (pr->p_paddr < info.iram_end)) {
+		size_t data_size = pr->p_filesz % SST_ICCM_BOUNDARY;
+
+		if (data_size)
+			pr->p_filesz += 4 - data_size;
+		*dstn = sst->iram + (pr->p_paddr - info.iram_start);
+		*dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+				sst->iram_base, pr->p_paddr, info.iram_start);
+		*mem_type = 1;
+	}
+#else
+	if ((pr->p_paddr >= info.iram_start) &&
+	    (pr->p_paddr < info.iram_end)) {
+
+		*dstn = sst->iram + (pr->p_paddr - info.iram_start);
+		*dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+				sst->iram_base, pr->p_paddr, info.iram_start);
+		*mem_type = 1;
+	}
+#endif
+	else if ((pr->p_paddr >= info.dram_start) &&
+		 (pr->p_paddr < info.dram_end)) {
+
+		*dstn = sst->dram + (pr->p_paddr - info.dram_start);
+		*dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+				sst->dram_base, pr->p_paddr, info.dram_start);
+		*mem_type = 1;
+	} else if ((pr->p_paddr >= info.imr_start) &&
+		   (pr->p_paddr < info.imr_end)) {
+
+		*dstn = sst->ddr + (pr->p_paddr - info.imr_start);
+		*dstn_phys =  sst->ddr_base + pr->p_paddr - info.imr_start;
+		*mem_type = 0;
+	} else {
+	       return -EINVAL;
+	}
+	return 0;
+}
+
+static void sst_fill_info(struct intel_sst_drv *sst,
+			struct sst_info *info)
+{
+	/* first we setup addresses to be used for elf sections */
+	if (sst->info.iram_use) {
+		info->iram_start = sst->info.iram_start;
+		info->iram_end = sst->info.iram_end;
+	} else {
+		info->iram_start = sst->iram_base;
+		info->iram_end = sst->iram_end;
+	}
+	if (sst->info.dram_use) {
+		info->dram_start = sst->info.dram_start;
+		info->dram_end = sst->info.dram_end;
+	} else {
+		info->dram_start = sst->dram_base;
+		info->dram_end = sst->dram_end;
+	}
+	if (sst->info.imr_use) {
+		info->imr_start = sst->info.imr_start;
+		info->imr_end = sst->info.imr_end;
+	} else {
+		info->imr_start = relocate_imr_addr_mrfld(sst->ddr_base);
+		info->imr_end = relocate_imr_addr_mrfld(sst->ddr_end);
+	}
+
+	info->lpe_viewpt_rqd = sst->info.lpe_viewpt_rqd;
+	info->dma_max_len = sst->info.dma_max_len;
+	pr_debug("%s: dma_max_len 0x%x", __func__, info->dma_max_len);
+}
+
+static inline int sst_validate_elf(const struct firmware *sst_bin, bool dynamic)
+{
+	Elf32_Ehdr *elf;
+
+	BUG_ON(!sst_bin);
+
+	pr_debug("IN %s\n", __func__);
+
+	elf = (Elf32_Ehdr *)sst_bin->data;
+
+	if ((elf->e_ident[0] != 0x7F) || (elf->e_ident[1] != 'E') ||
+	    (elf->e_ident[2] != 'L') || (elf->e_ident[3] != 'F')) {
+		pr_debug("ELF Header Not found!%zu\n", sst_bin->size);
+		return -EINVAL;
+	}
+
+	if (dynamic == true) {
+		if (elf->e_type != ET_DYN) {
+			pr_err("Not a dynamic loadable library\n");
+			return -EINVAL;
+		}
+	}
+	pr_debug("Valid ELF Header...%zu\n", sst_bin->size);
+	return 0;
+}
+
+/**
+ * sst_validate_fw_image - validates the firmware signature
+ *
+ * @sst_fw_in_mem	: pointer to audio FW
+ * @size		: size of the firmware
+ * @module		: points to the FW modules
+ * @num_modules		: points to the num of modules
+ * This function validates the header signature in the FW image
+ */
+static int sst_validate_fw_image(const void *sst_fw_in_mem, unsigned long size,
+		struct fw_module_header **module, u32 *num_modules)
+{
+	struct fw_header *header;
+
+	pr_debug("%s\n", __func__);
+
+	/* Read the header information from the data pointer */
+	header = (struct fw_header *)sst_fw_in_mem;
+	pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%zx\n",
+			header->signature, header->file_size, header->modules,
+			header->file_format, sizeof(*header));
+
+	/* verify FW */
+	if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
+		(size != header->file_size + sizeof(*header))) {
+		/* Invalid FW signature */
+		pr_err("InvalidFW sign/filesize mismatch\n");
+		return -EINVAL;
+	}
+	*num_modules = header->modules;
+	*module = (void *)sst_fw_in_mem + sizeof(*header);
+
+	return 0;
+}
+
+/**
+ * sst_validate_library - validates the library signature
+ *
+ * @fw_lib			: pointer to FW library
+ * @slot			: pointer to the lib slot info
+ * @entry_point		: out param, which contains the module entry point
+ * This function is called before downloading the codec/postprocessing
+ * library
+ */
+static int sst_validate_library(const struct firmware *fw_lib,
+		struct lib_slot_info *slot,
+		u32 *entry_point)
+{
+	struct fw_header *header;
+	struct fw_module_header *module;
+	struct fw_block_info *block;
+	unsigned int n_blk, isize = 0, dsize = 0;
+	int err = 0;
+
+	header = (struct fw_header *)fw_lib->data;
+	if (header->modules != 1) {
+		pr_err("Module no mismatch found\n");
+		err = -EINVAL;
+		goto exit;
+	}
+	module = (void *)fw_lib->data + sizeof(*header);
+	*entry_point = module->entry_point;
+	pr_debug("Module entry point 0x%x\n", *entry_point);
+	pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
+			module->signature, module->mod_size,
+			module->blocks, module->type);
+
+	block = (void *)module + sizeof(*module);
+	for (n_blk = 0; n_blk < module->blocks; n_blk++) {
+		switch (block->type) {
+		case SST_IRAM:
+			isize += block->size;
+			break;
+		case SST_DRAM:
+			dsize += block->size;
+			break;
+		default:
+			pr_err("Invalid block type for 0x%x\n", n_blk);
+			err = -EINVAL;
+			goto exit;
+		}
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+	if (isize > slot->iram_size || dsize > slot->dram_size) {
+		pr_err("library exceeds size allocated\n");
+		err = -EINVAL;
+		goto exit;
+	} else
+		pr_debug("Library is safe for download...\n");
+
+	pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
+			isize, dsize, slot->iram_size, slot->dram_size);
+exit:
+	return err;
+
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct sst_dma *dma = (struct sst_dma *)param;
+
+	/* we only need MID_DMAC1 as that can access DSP RAMs*/
+	if (chan->device->dev == dma->dev)
+		return true;
+
+	return false;
+}
+
+static unsigned int
+sst_get_elf_sg_len(struct intel_sst_drv *sst, Elf32_Ehdr *elf, Elf32_Phdr *pr,
+		struct sst_info info)
+{
+	unsigned int i = 0, count = 0;
+
+	pr_debug("in %s: dma_max_len 0x%x\n", __func__, info.dma_max_len);
+
+	while (i < elf->e_phnum) {
+		if (pr[i].p_type == PT_LOAD) {
+
+			if ((pr[i].p_paddr >= info.iram_start) &&
+					(pr[i].p_paddr < info.iram_end &&
+						pr[i].p_filesz)) {
+				count += (pr[i].p_filesz) / info.dma_max_len;
+
+				if ((pr[i].p_filesz) % info.dma_max_len)
+					count++;
+
+			} else if ((pr[i].p_paddr >= info.dram_start) &&
+					(pr[i].p_paddr < info.dram_end &&
+						pr[i].p_filesz)) {
+				count += (pr[i].p_filesz) / info.dma_max_len;
+
+				if ((pr[i].p_filesz) % info.dma_max_len)
+					count++;
+
+			} else if ((pr[i].p_paddr >= info.imr_start) &&
+					(pr[i].p_paddr < info.imr_end &&
+						pr[i].p_filesz)) {
+				count += (pr[i].p_filesz) / info.dma_max_len;
+
+				if ((pr[i].p_filesz) % info.dma_max_len)
+					count++;
+			}
+		}
+		i++;
+	}
+
+	pr_debug("gotcha count %d\n", count);
+	return count;
+}
+
+static int
+sst_init_dma_sg_list(struct intel_sst_drv *sst, unsigned int len,
+		struct scatterlist **src, struct scatterlist **dstn)
+{
+	struct scatterlist *sg_src = NULL, *sg_dst = NULL;
+
+	sg_src = kzalloc(sizeof(*sg_src)*(len), GFP_KERNEL);
+	if (NULL == sg_src)
+		return -ENOMEM;
+	sg_init_table(sg_src, len);
+	sg_dst = kzalloc(sizeof(*sg_dst)*(len), GFP_KERNEL);
+	if (NULL == sg_dst) {
+		kfree(sg_src);
+		return -ENOMEM;
+	}
+	sg_init_table(sg_dst, len);
+	*src = sg_src;
+	*dstn = sg_dst;
+
+	return 0;
+}
+
+static int sst_alloc_dma_chan(struct sst_dma *dma)
+{
+	dma_cap_mask_t mask;
+	struct intel_mid_dma_slave *slave = &dma->slave;
+	int retval;
+	struct pci_dev *dmac = NULL;
+
+	pr_debug("%s\n", __func__);
+	dma->dev = NULL;
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID)
+		dmac = pci_get_device(PCI_VENDOR_ID_INTEL,
+				      PCI_DMAC_MRFLD_ID, NULL);
+	else if (sst_drv_ctx->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR)
+		dmac = pci_get_device(PCI_VENDOR_ID_INTEL,
+			      PCI_DEVICE_ID_INTEL_AUDIO_DMAC0_MOOR, NULL);
+
+	if (!dmac && !dma->dev) {
+		pr_err("Can't find DMAC\n");
+		return -ENODEV;
+	}
+	if (dmac)
+		dma->dev = &dmac->dev;
+
+	dma->ch = dma_request_channel(mask, chan_filter, dma);
+	if (!dma->ch) {
+		pr_err("unable to request dma channel\n");
+		return -EIO;
+	}
+
+	slave->dma_slave.direction = DMA_MEM_TO_MEM;
+	slave->hs_mode = 0;
+	slave->cfg_mode = LNW_DMA_MEM_TO_MEM;
+	slave->dma_slave.src_addr_width = slave->dma_slave.dst_addr_width =
+						DMA_SLAVE_BUSWIDTH_4_BYTES;
+	slave->dma_slave.src_maxburst = slave->dma_slave.dst_maxburst =
+							LNW_DMA_MSIZE_16;
+
+	retval = dmaengine_slave_config(dma->ch, &slave->dma_slave);
+	if (retval) {
+		pr_err("unable to set slave config, err %d\n", retval);
+		dma_release_channel(dma->ch);
+		return -EIO;
+	}
+	return retval;
+}
+
+static void sst_dma_transfer_complete(void *arg)
+{
+	sst_drv_ctx  = (struct intel_sst_drv *)arg;
+	pr_debug(" sst_dma_transfer_complete\n");
+	sst_wake_up_block(sst_drv_ctx, 0, FW_DWNL_ID, FW_DWNL_ID, NULL, 0);
+}
+
+static inline int sst_dma_wait_for_completion(struct intel_sst_drv *sst)
+{
+	int ret = 0;
+	struct sst_block *block;
+	/* call prep and wait */
+	sst->desc->callback = sst_dma_transfer_complete;
+	sst->desc->callback_param = sst;
+
+	block = sst_create_block(sst, FW_DWNL_ID, FW_DWNL_ID);
+	if (block == NULL)
+		return -ENOMEM;
+
+	sst->desc->tx_submit(sst_drv_ctx->desc);
+	ret = sst_wait_timeout(sst, block);
+	if (ret)
+		dma_wait_for_async_tx(sst_drv_ctx->desc);
+	sst_free_block(sst, block);
+	return ret;
+}
+
+static int sst_dma_firmware(struct sst_dma *dma, struct sst_sg_list *sg_list)
+{
+	int retval = 0;
+	enum dma_ctrl_flags flag = DMA_CTRL_ACK;
+	struct scatterlist *sg_src_list, *sg_dst_list;
+	int length;
+	pr_debug("%s: use_lli %d\n", __func__, sst_drv_ctx->use_lli);
+
+	sg_src_list = sg_list->src;
+	sg_dst_list = sg_list->dst;
+	length = sg_list->list_len;
+
+	/* BY default PIMR is unsmasked
+	 * FW gets unmaksed dma intr too, so mask it for FW to execute on mrfld
+	 */
+	/*FIXME: Need to check if this workaround is valid for CHT*/
+	if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID ||
+	    sst_drv_ctx->pci_id == PCI_DEVICE_ID_INTEL_SST_MOOR)
+		sst_shim_write(sst_drv_ctx->shim, SST_PIMR, 0xFFFF0034);
+
+	if (sst_drv_ctx->use_lli) {
+		sst_drv_ctx->desc = dma->ch->device->device_prep_dma_sg(dma->ch,
+					sg_dst_list, length,
+					sg_src_list, length, flag);
+		if (!sst_drv_ctx->desc)
+			return -EFAULT;
+		retval = sst_dma_wait_for_completion(sst_drv_ctx);
+		if (retval)
+			pr_err("sst_dma_firmware..timeout!\n");
+	} else {
+		struct scatterlist *sg;
+		dma_addr_t src_addr, dstn_addr;
+		int i = 0;
+
+		/* dma single block mode */
+		for_each_sg(sg_src_list, sg, length, i) {
+			pr_debug("dma desc %d, length %d\n", i, sg->length);
+			src_addr = sg_phys(sg);
+			dstn_addr = sg_phys(sg_dst_list);
+			if (sg_dst_list)
+				sg_dst_list = sg_next(sg_dst_list);
+			sst_drv_ctx->desc = dma->ch->device->device_prep_dma_memcpy(
+					dma->ch, dstn_addr, src_addr, sg->length, flag);
+			if (!sst_drv_ctx->desc)
+				return -EFAULT;
+			retval = sst_dma_wait_for_completion(sst_drv_ctx);
+			if (retval)
+				pr_err("sst_dma_firmware..timeout!\n");
+
+		}
+	}
+
+	return retval;
+}
+
+/*
+ * sst_fill_sglist - Fill the sg list
+ *
+ * @from: src address of the fw
+ * @to: virtual address of IRAM/DRAM
+ * @block_size: size of the block
+ * @sg_src: source scatterlist pointer
+ * @sg_dst: Destination scatterlist pointer
+ * @fw_sg_list: Pointer to the sg_list
+ * @dma_max_len: maximum len of the DMA block
+ *
+ * Parses modules that need to be placed in SST IRAM and DRAM
+ * and stores them in a sg list for transfer
+ * returns error or 0 if list creation fails or pass.
+  */
+static int sst_fill_sglist(unsigned long from, unsigned long to,
+		u32 block_size, struct scatterlist **sg_src, struct scatterlist **sg_dstn,
+		struct sst_sg_list *fw_sg_list, u32 dma_max_len)
+{
+	u32 offset = 0;
+	int len = 0;
+	unsigned long dstn, src;
+
+	pr_debug("%s entry", __func__);
+	if (!sg_src || !sg_dstn)
+		return -EINVAL;
+
+	do {
+		dstn = (unsigned long) (to + offset);
+		src = (unsigned long) (from + offset);
+
+		/* split blocks to dma_max_len */
+
+		len = block_size - offset;
+		pr_debug("DMA blk src %lx,dstn %lx,len %d,offset %d, size %d\n",
+			src, dstn, len, offset, block_size);
+		if (len > dma_max_len) {
+			pr_debug("block size exceeds %d\n", dma_max_len);
+			len = dma_max_len;
+			offset += len;
+		} else {
+			pr_debug("Node length less that %d\n", dma_max_len);
+			offset = 0;
+		}
+
+		if (!(*sg_src) || !(*sg_dstn))
+			return -ENOMEM;
+
+		sg_set_page(*sg_src, virt_to_page((void *) src), len,
+				offset_in_page((void *) src));
+		sg_set_page(*sg_dstn, virt_to_page((void *) dstn), len,
+				offset_in_page((void *) dstn));
+
+		*sg_src = sg_next(*sg_src);
+		*sg_dstn = sg_next(*sg_dstn);
+
+		/* TODO: is sg_idx required? */
+		if (sst_drv_ctx->info.use_elf == true)
+			fw_sg_list->sg_idx++;
+	} while (offset > 0);
+
+	return 0;
+}
+
+static int sst_parse_elf_module_dma(struct intel_sst_drv *sst, const void *fw,
+		 struct sst_info info, Elf32_Phdr *pr,
+		 struct scatterlist **sg_src, struct scatterlist **sg_dstn,
+		 struct sst_sg_list *fw_sg_list)
+{
+	unsigned long dstn, src;
+	unsigned int dstn_phys;
+	int ret_val = 0;
+	int mem_type;
+
+	ret_val = sst_fill_dstn(sst, info, pr, (void *)&dstn, &dstn_phys, &mem_type);
+	if (ret_val)
+		return ret_val;
+
+	dstn = (unsigned long) phys_to_virt(dstn_phys);
+	src = (unsigned long) (fw + pr->p_offset);
+
+	ret_val = sst_fill_sglist(src, dstn, pr->p_filesz,
+				sg_src, sg_dstn, fw_sg_list, sst->info.dma_max_len);
+
+	return ret_val;
+}
+
+static int
+sst_parse_elf_fw_dma(struct intel_sst_drv *sst, const void *fw_in_mem,
+			struct sst_sg_list *fw_sg_list)
+{
+	int i = 0, ret = 0;
+	Elf32_Ehdr *elf;
+	Elf32_Phdr *pr;
+	struct sst_info info;
+	struct scatterlist *sg_src = NULL, *sg_dst = NULL;
+	unsigned int sg_len;
+
+	BUG_ON(!fw_in_mem);
+
+	elf = (Elf32_Ehdr *)fw_in_mem;
+	pr = (Elf32_Phdr *) (fw_in_mem + elf->e_phoff);
+	pr_debug("%s entry\n", __func__);
+
+	sst_fill_info(sst, &info);
+
+	sg_len = sst_get_elf_sg_len(sst, elf, pr, info);
+	if (sg_len == 0) {
+		pr_err("we got NULL sz ELF, abort\n");
+		return -EIO;
+	}
+
+	if (sst_init_dma_sg_list(sst, sg_len, &sg_src, &sg_dst)) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	fw_sg_list->src = sg_src;
+	fw_sg_list->dst = sg_dst;
+	fw_sg_list->list_len = sg_len;
+	fw_sg_list->sg_idx = 0;
+
+	while (i < elf->e_phnum) {
+		if ((pr[i].p_type == PT_LOAD) && (pr[i].p_filesz)) {
+			ret = sst_parse_elf_module_dma(sst, fw_in_mem, info,
+					&pr[i], &sg_src, &sg_dst, fw_sg_list);
+			if (ret)
+				goto err;
+		}
+		i++;
+	}
+	return 0;
+err:
+	kfree(fw_sg_list->src);
+	kfree(fw_sg_list->dst);
+err1:
+	fw_sg_list->src = NULL;
+	fw_sg_list->dst = NULL;
+	fw_sg_list->list_len = 0;
+	fw_sg_list->sg_idx = 0;
+
+	return ret;
+}
+
+/**
+ * sst_parse_module_dma - Parse audio FW modules and populate the dma list
+ *
+ * @sst_ctx	: sst driver context
+ * @module	: FW module header
+ * @sg_list	: Pointer to the sg_list to be populated
+ * Count the length for scattergather list
+ * and create the scattergather list of same length
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_dma(struct intel_sst_drv *sst_ctx,
+				struct fw_module_header *module,
+				struct sst_sg_list *sg_list)
+{
+	struct fw_block_info *block;
+	u32 count;
+	unsigned long ram, src;
+	int retval, sg_len = 0;
+	struct scatterlist *sg_src, *sg_dst;
+
+	pr_debug("module sign %s size %x blocks %x type %x\n",
+			module->signature, module->mod_size,
+			module->blocks, module->type);
+	pr_debug("module entrypoint 0x%x\n", module->entry_point);
+
+	block = (void *)module + sizeof(*module);
+
+	for (count = 0; count < module->blocks; count++) {
+		if (block->type != SST_CUSTOM_INFO) {
+			sg_len += (block->size) / sst_drv_ctx->info.dma_max_len;
+			if (block->size % sst_drv_ctx->info.dma_max_len)
+				sg_len = sg_len + 1;
+		}
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+
+	if (sst_init_dma_sg_list(sst_ctx, sg_len, &sg_src, &sg_dst)) {
+		retval = -ENOMEM;
+		goto err1;
+	}
+
+	sg_list->src = sg_src;
+	sg_list->dst = sg_dst;
+	sg_list->list_len = sg_len;
+
+	block = (void *)module + sizeof(*module);
+
+	for (count = 0; count < module->blocks; count++) {
+		if (block->size <= 0) {
+			pr_err("block size invalid\n");
+			retval = -EINVAL;
+			goto err;
+		}
+		switch (block->type) {
+		case SST_IRAM:
+			ram = sst_ctx->iram_base;
+			break;
+		case SST_DRAM:
+			ram = sst_ctx->dram_base;
+			break;
+		case SST_DDR:
+			ram = sst_drv_ctx->ddr_base;
+			break;
+		case SST_CUSTOM_INFO:
+			block = (void *)block + sizeof(*block) + block->size;
+			continue;
+		default:
+			pr_err("wrong ram type0x%x in block0x%x\n",
+					block->type, count);
+			retval = -EINVAL;
+			goto err;
+		}
+
+		/*converting from physical to virtual because
+		scattergather list works on virtual pointers*/
+		ram = (unsigned long) phys_to_virt(ram);
+		ram = (unsigned long)(ram + block->ram_offset);
+		src = (unsigned long) (void *)block + sizeof(*block);
+
+		retval = sst_fill_sglist(src, ram,
+				block->size, &sg_src, &sg_dst,
+				sg_list, sst_ctx->info.dma_max_len);
+		if (retval)
+			goto err;
+
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+	return 0;
+err:
+	kfree(sg_list->src);
+	kfree(sg_list->dst);
+err1:
+	sg_list->src = NULL;
+	sg_list->dst = NULL;
+	sg_list->list_len = 0;
+
+	return retval;
+}
+
+/**
+ * sst_parse_fw_dma - parse the firmware image & populate the list for dma
+ *
+ * @sst_fw_in_mem	: pointer to audio fw
+ * @size		: size of the firmware
+ * @fw_list		: pointer to sst_sg_list to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for dma
+ */
+static int sst_parse_fw_dma(const void *sst_fw_in_mem, unsigned long size,
+				struct sst_sg_list *fw_list)
+{
+	struct fw_module_header *module;
+	u32 count, num_modules;
+	int ret_val;
+
+	ret_val = sst_validate_fw_image(sst_fw_in_mem, size,
+				&module, &num_modules);
+	if (ret_val)
+		return ret_val;
+
+	for (count = 0; count < num_modules; count++) {
+		/* module */
+		ret_val = sst_parse_module_dma(sst_drv_ctx, module, fw_list);
+		if (ret_val)
+			return ret_val;
+		module = (void *)module + sizeof(*module) + module->mod_size ;
+	}
+
+	return 0;
+}
+
+static void sst_dma_free_resources(struct sst_dma *dma)
+{
+	pr_debug("entry:%s\n", __func__);
+
+	dma_release_channel(dma->ch);
+}
+
+void sst_fill_config(struct intel_sst_drv *sst_ctx, unsigned int offset)
+{
+	struct sst_fill_config sst_config;
+
+	 if (!(sst_ctx->pdata->bdata && sst_ctx->pdata->pdata))
+		return;
+
+	sst_config.sign = SST_CONFIG_SSP_SIGN;
+	memcpy(&sst_config.sst_bdata, sst_ctx->pdata->bdata, sizeof(struct sst_board_config_data));
+	memcpy(&sst_config.sst_pdata, sst_ctx->pdata->pdata, sizeof(struct sst_platform_config_data));
+	sst_config.shim_phy_add = sst_ctx->shim_phy_add;
+	sst_config.mailbox_add = sst_ctx->mailbox_add;
+	MEMCPY_TOIO(sst_ctx->dram + offset, &sst_config, sizeof(sst_config));
+
+}
+
+/**
+ * sst_do_dma - function allocs and initiates the DMA
+ *
+ * @sg_list: Pointer to dma list on which the dma needs to be initiated
+ *
+ * Triggers the DMA
+ */
+static int sst_do_dma(struct sst_sg_list *sg_list)
+{
+	int ret_val;
+
+	/* get a dmac channel */
+	ret_val = sst_alloc_dma_chan(&sst_drv_ctx->dma);
+	if (ret_val)
+		return ret_val;
+
+	/* allocate desc for transfer and submit */
+	ret_val = sst_dma_firmware(&sst_drv_ctx->dma, sg_list);
+
+	sst_dma_free_resources(&sst_drv_ctx->dma);
+
+	return ret_val;
+}
+
+/*
+ * sst_fill_memcpy_list - Fill the memcpy list
+ *
+ * @memcpy_list: List to be filled
+ * @destn: Destination addr to be filled in the list
+ * @src: Source addr to be filled in the list
+ * @size: Size to be filled in the list
+ *
+ * Adds the node to the list after required fields
+ * are populated in the node
+ */
+
+static int sst_fill_memcpy_list(struct list_head *memcpy_list,
+			void *destn, const void *src, u32 size, bool is_io)
+{
+	struct sst_memcpy_list *listnode;
+
+	listnode = kzalloc(sizeof(*listnode), GFP_KERNEL);
+	if (listnode == NULL)
+		return -ENOMEM;
+	listnode->dstn = destn;
+	listnode->src = src;
+	listnode->size = size;
+	listnode->is_io = is_io;
+	list_add_tail(&listnode->memcpylist, memcpy_list);
+
+	return 0;
+}
+
+static int sst_parse_elf_module_memcpy(struct intel_sst_drv *sst,
+		const void *fw, struct sst_info info, Elf32_Phdr *pr,
+		struct list_head *memcpy_list)
+{
+	void *dstn;
+	unsigned int dstn_phys;
+	int ret_val = 0;
+	int mem_type;
+
+	ret_val = sst_fill_dstn(sst, info, pr, &dstn, &dstn_phys, &mem_type);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = sst_fill_memcpy_list(memcpy_list, dstn,
+			(void *)fw + pr->p_offset, pr->p_filesz, mem_type);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+static int
+sst_parse_elf_fw_memcpy(struct intel_sst_drv *sst, const void *fw_in_mem,
+			struct list_head *memcpy_list)
+{
+	int i = 0;
+
+	Elf32_Ehdr *elf;
+	Elf32_Phdr *pr;
+	struct sst_info info;
+
+	BUG_ON(!fw_in_mem);
+
+	elf = (Elf32_Ehdr *)fw_in_mem;
+	pr = (Elf32_Phdr *) (fw_in_mem + elf->e_phoff);
+	pr_debug("%s entry\n", __func__);
+
+	sst_fill_info(sst, &info);
+
+	while (i < elf->e_phnum) {
+		if (pr[i].p_type == PT_LOAD)
+			sst_parse_elf_module_memcpy(sst, fw_in_mem, info,
+					&pr[i], memcpy_list);
+		i++;
+	}
+	return 0;
+}
+
+/**
+ * sst_parse_module_memcpy - Parse audio FW modules and populate the memcpy list
+ *
+ * @module		: FW module header
+ * @memcpy_list	: Pointer to the list to be populated
+ * Create the memcpy list as the number of block to be copied
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_memcpy(struct fw_module_header *module,
+				struct list_head *memcpy_list)
+{
+	struct fw_block_info *block;
+	u32 count;
+	int ret_val = 0;
+	void __iomem *ram_iomem;
+
+	pr_debug("module sign %s size %x blocks %x type %x\n",
+			module->signature, module->mod_size,
+			module->blocks, module->type);
+	pr_debug("module entrypoint 0x%x\n", module->entry_point);
+
+	block = (void *)module + sizeof(*module);
+
+	for (count = 0; count < module->blocks; count++) {
+		if (block->size <= 0) {
+			pr_err("block size invalid\n");
+			return -EINVAL;
+		}
+		switch (block->type) {
+		case SST_IRAM:
+			ram_iomem = sst_drv_ctx->iram;
+			break;
+		case SST_DRAM:
+			ram_iomem = sst_drv_ctx->dram;
+			break;
+		case SST_DDR:
+			ram_iomem = sst_drv_ctx->ddr;
+			break;
+		case SST_CUSTOM_INFO:
+			block = (void *)block + sizeof(*block) + block->size;
+			continue;
+		default:
+			pr_err("wrong ram type0x%x in block0x%x\n",
+					block->type, count);
+			return -EINVAL;
+		}
+
+		ret_val = sst_fill_memcpy_list(memcpy_list,
+				ram_iomem + block->ram_offset,
+				(void *)block + sizeof(*block), block->size, 1);
+		if (ret_val)
+			return ret_val;
+
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+	return 0;
+}
+
+/**
+ * sst_parse_fw_memcpy - parse the firmware image & populate the list for memcpy
+ *
+ * @sst_fw_in_mem	: pointer to audio fw
+ * @size		: size of the firmware
+ * @fw_list		: pointer to list_head to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for memcpy
+ */
+static int sst_parse_fw_memcpy(const void *sst_fw_in_mem, unsigned long size,
+				struct list_head *fw_list)
+{
+	struct fw_module_header *module;
+	u32 count, num_modules;
+	int ret_val;
+
+	ret_val = sst_validate_fw_image(sst_fw_in_mem, size,
+				&module, &num_modules);
+	if (ret_val)
+		return ret_val;
+
+	for (count = 0; count < num_modules; count++) {
+		/* module */
+		ret_val = sst_parse_module_memcpy(module, fw_list);
+		if (ret_val)
+			return ret_val;
+		module = (void *)module + sizeof(*module) + module->mod_size ;
+	}
+
+	return 0;
+}
+
+/**
+ * sst_do_memcpy - function initiates the memcpy
+ *
+ * @memcpy_list: Pter to memcpy list on which the memcpy needs to be initiated
+ *
+ * Triggers the memcpy
+ */
+static void sst_do_memcpy(struct list_head *memcpy_list)
+{
+	struct sst_memcpy_list *listnode;
+
+	list_for_each_entry(listnode, memcpy_list, memcpylist) {
+		if (listnode->is_io == true)
+			MEMCPY_TOIO((void __iomem *)listnode->dstn, listnode->src,
+							listnode->size);
+		else
+			memcpy(listnode->dstn, listnode->src, listnode->size);
+	}
+}
+
+static void sst_memcpy_free_lib_resources(void)
+{
+	struct sst_memcpy_list *listnode, *tmplistnode;
+
+	pr_debug("entry:%s\n", __func__);
+
+	/*Free the list*/
+	if (!list_empty(&sst_drv_ctx->libmemcpy_list)) {
+		list_for_each_entry_safe(listnode, tmplistnode,
+				&sst_drv_ctx->libmemcpy_list, memcpylist) {
+			list_del(&listnode->memcpylist);
+			kfree(listnode);
+		}
+	}
+}
+
+void sst_memcpy_free_resources(void)
+{
+	struct sst_memcpy_list *listnode, *tmplistnode;
+
+	pr_debug("entry:%s\n", __func__);
+
+	/*Free the list*/
+	if (!list_empty(&sst_drv_ctx->memcpy_list)) {
+		list_for_each_entry_safe(listnode, tmplistnode,
+				&sst_drv_ctx->memcpy_list, memcpylist) {
+			list_del(&listnode->memcpylist);
+			kfree(listnode);
+		}
+	}
+	sst_memcpy_free_lib_resources();
+}
+
+void sst_firmware_load_cb(const struct firmware *fw, void *context)
+{
+	struct intel_sst_drv *ctx = context;
+	int ret = 0;
+
+	pr_debug("In %s\n", __func__);
+
+	if (fw == NULL) {
+		pr_err("request fw failed\n");
+		return;
+	}
+
+	mutex_lock(&sst_drv_ctx->sst_lock);
+
+	if (sst_drv_ctx->sst_state != SST_RESET ||
+			ctx->fw_in_mem != NULL)
+		goto out;
+
+	pr_debug("Request Fw completed\n");
+	trace_sst_fw_download("End of FW request", ctx->sst_state);
+
+	if (ctx->info.use_elf == true)
+		ret = sst_validate_elf(fw, false);
+
+	if (ret != 0) {
+		pr_err("FW image invalid...\n");
+		goto out;
+	}
+
+	ctx->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+	if (!ctx->fw_in_mem) {
+		pr_err("%s unable to allocate memory\n", __func__);
+		goto out;
+	}
+
+	pr_debug("copied fw to %p", ctx->fw_in_mem);
+	pr_debug("phys: %lx", (unsigned long)virt_to_phys(ctx->fw_in_mem));
+	memcpy(ctx->fw_in_mem, fw->data, fw->size);
+
+	trace_sst_fw_download("Start FW parsing", ctx->sst_state);
+	if (ctx->use_dma) {
+		if (ctx->info.use_elf == true)
+			ret = sst_parse_elf_fw_dma(ctx, ctx->fw_in_mem,
+							&ctx->fw_sg_list);
+		else
+			ret = sst_parse_fw_dma(ctx->fw_in_mem, fw->size,
+							&ctx->fw_sg_list);
+	} else {
+		if (ctx->info.use_elf == true)
+			ret = sst_parse_elf_fw_memcpy(ctx, ctx->fw_in_mem,
+							&ctx->memcpy_list);
+		else
+			ret = sst_parse_fw_memcpy(ctx->fw_in_mem, fw->size,
+							&ctx->memcpy_list);
+	}
+	trace_sst_fw_download("End FW parsing", ctx->sst_state);
+	if (ret) {
+		kfree(ctx->fw_in_mem);
+		ctx->fw_in_mem = NULL;
+		goto out;
+	}
+	/* If static module download(download at boot time) is supported,
+	 * set the flag to indicate lib download is to be done
+	 */
+	if (ctx->pdata->lib_info)
+		if (ctx->pdata->lib_info->mod_ddr_dnld)
+			ctx->lib_dwnld_reqd = true;
+
+out:
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+	if (fw != NULL)
+		release_firmware(fw);
+}
+
+/*
+ * sst_request_fw - requests audio fw from kernel and saves a copy
+ *
+ * This function requests the SST FW from the kernel, parses it and
+ * saves a copy in the driver context
+ */
+static int sst_request_fw(struct intel_sst_drv *sst)
+{
+	int retval = 0;
+	char name[20];
+	const struct firmware *fw;
+
+	snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
+				sst->pci_id, ".bin");
+	pr_debug("Requesting FW %s now...\n", name);
+
+	retval = request_firmware(&fw, name, sst->dev);
+	if (fw == NULL) {
+		pr_err("fw is returning as null\n");
+		return -EINVAL;
+	}
+	if (retval) {
+		pr_err("request fw failed %d\n", retval);
+		return retval;
+	}
+	trace_sst_fw_download("End of FW request", sst->sst_state);
+	if (sst->info.use_elf == true)
+		retval = sst_validate_elf(fw, false);
+	if (retval != 0) {
+		pr_err("FW image invalid...\n");
+		goto end_release;
+	}
+	sst->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+	if (!sst->fw_in_mem) {
+		pr_err("%s unable to allocate memory\n", __func__);
+		retval = -ENOMEM;
+		goto end_release;
+	}
+	pr_debug("copied fw to %p", sst->fw_in_mem);
+	pr_debug("phys: %lx", (unsigned long)virt_to_phys(sst->fw_in_mem));
+	memcpy(sst->fw_in_mem, fw->data, fw->size);
+	trace_sst_fw_download("Start FW parsing", sst->sst_state);
+	if (sst->use_dma) {
+		if (sst->info.use_elf == true)
+			retval = sst_parse_elf_fw_dma(sst, sst->fw_in_mem,
+							&sst->fw_sg_list);
+		else
+			retval = sst_parse_fw_dma(sst->fw_in_mem, fw->size,
+							&sst->fw_sg_list);
+	} else {
+		if (sst->info.use_elf == true)
+			retval = sst_parse_elf_fw_memcpy(sst, sst->fw_in_mem,
+							&sst->memcpy_list);
+		else
+			retval = sst_parse_fw_memcpy(sst->fw_in_mem, fw->size,
+							&sst->memcpy_list);
+	}
+	trace_sst_fw_download("End FW parsing", sst->sst_state);
+	if (retval) {
+		kfree(sst->fw_in_mem);
+		sst->fw_in_mem = NULL;
+	}
+
+	/* If static module download(download at boot time) is supported,
+	 * set the flag to indicate lib download is to be done
+	 */
+	if (sst->pdata->lib_info)
+		if (sst->pdata->lib_info->mod_ddr_dnld)
+			sst->lib_dwnld_reqd = true;
+end_release:
+	release_firmware(fw);
+	return retval;
+}
+
+static inline void print_lib_info(struct snd_sst_lib_download_info *resp)
+{
+	pr_debug("codec Type %d Ver %d Built %s: %s\n",
+		resp->dload_lib.lib_info.lib_type,
+		resp->dload_lib.lib_info.lib_version,
+		resp->dload_lib.lib_info.b_date,
+		resp->dload_lib.lib_info.b_time);
+}
+
+/* sst_download_library - This function is called when any
+ codec/post processing library needs to be downloaded */
+static int sst_download_library(const struct firmware *fw_lib,
+				struct snd_sst_lib_download_info *lib)
+{
+	int ret_val = 0;
+
+	/* send IPC message and wait */
+	u8 pvt_id;
+	struct ipc_post *msg = NULL;
+	union config_status_reg csr;
+	struct snd_sst_str_type str_type = {0};
+	int retval = 0;
+	void *codec_fw;
+	struct sst_block *block;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	ret_val = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+				IPC_IA_PREP_LIB_DNLD, pvt_id);
+	if (ret_val) {
+		pr_err("library download failed\n");
+		return ret_val;
+	}
+
+	sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
+	msg->header.part.data = sizeof(u32) + sizeof(str_type);
+	str_type.codec_type = lib->dload_lib.lib_info.lib_type;
+	/*str_type.pvt_id = pvt_id;*/
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	if (block->data) {
+		struct snd_sst_str_type *str_type =
+			(struct snd_sst_str_type *)block->data;
+		if (str_type->result) {
+			/* error */
+			pr_err("Prep codec downloaded failed %d\n",
+					str_type->result);
+			retval = -EIO;
+			goto free_block;
+		}
+		kfree(block->data);
+	} else if (retval != 0) {
+		retval = -EIO;
+		goto free_block;
+	}
+	pr_debug("FW responded, ready for download now...\n");
+	codec_fw = kzalloc(fw_lib->size, GFP_KERNEL);
+	if (!codec_fw) {
+		memset(lib, 0, sizeof(*lib));
+		retval = -ENOMEM;
+		goto send_ipc;
+	}
+	memcpy(codec_fw, fw_lib->data, fw_lib->size);
+
+	if (sst_drv_ctx->use_dma)
+		retval = sst_parse_fw_dma(codec_fw, fw_lib->size,
+				 &sst_drv_ctx->library_list);
+	else
+		retval = sst_parse_fw_memcpy(codec_fw, fw_lib->size,
+				 &sst_drv_ctx->libmemcpy_list);
+
+	if (retval) {
+		memset(lib, 0, sizeof(*lib));
+		goto send_ipc;
+	}
+
+	/* downloading on success */
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	mutex_lock(&sst_drv_ctx->csr_lock);
+
+	sst_drv_ctx->sst_state = SST_FW_LOADING;
+	csr.full = readl(sst_drv_ctx->shim + SST_CSR);
+	csr.part.run_stall = 1;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.bypass = 0x7;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+	if (sst_drv_ctx->use_dma) {
+		ret_val = sst_do_dma(&sst_drv_ctx->library_list);
+		if (ret_val) {
+			pr_err("sst_do_dma failed, abort\n");
+			memset(lib, 0, sizeof(*lib));
+		}
+	} else
+		sst_do_memcpy(&sst_drv_ctx->libmemcpy_list);
+	/* set the FW to running again */
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.bypass = 0x0;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.run_stall = 0;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+send_ipc:
+	/* send download complete and wait */
+	if (sst_create_ipc_msg(&msg, true)) {
+		retval = -ENOMEM;
+		goto free_resources;
+	}
+
+	block->condition = false;
+	block->msg_id = IPC_IA_LIB_DNLD_CMPLT;
+	sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
+	msg->header.part.data = sizeof(u32) + sizeof(*lib);
+	lib->pvt_id = pvt_id;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	pr_debug("Waiting for FW response Download complete\n");
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	sst_drv_ctx->sst_state = SST_FW_RUNNING;
+	if (block->data) {
+		struct snd_sst_lib_download_info *resp = block->data;
+		retval = resp->result;
+		if (retval) {
+			pr_err("err in lib dload %x\n", resp->result);
+			goto free_resources;
+		} else {
+			pr_debug("Codec download complete...\n");
+			print_lib_info(resp);
+		}
+	} else if (retval) {
+		/* error */
+		retval = -EIO;
+		goto free_resources;
+	}
+
+	pr_debug("FW success on Download complete\n");
+
+free_resources:
+	if (sst_drv_ctx->use_dma) {
+		kfree(sst_drv_ctx->library_list.src);
+		kfree(sst_drv_ctx->library_list.dst);
+		sst_drv_ctx->library_list.list_len = 0;
+	}
+
+	kfree(codec_fw);
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+free_block:
+	sst_free_block(sst_drv_ctx, block);
+	return retval;
+}
+
+/*
+ * Writing the DDR physical base to DCCM offset
+ * so that FW can use it to setup TLB
+ */
+static void sst_dccm_config_write(void __iomem *dram_base, unsigned int ddr_base)
+{
+	void __iomem *addr;
+	u32 bss_reset = 0;
+
+	addr = (void __iomem *)(dram_base + MRFLD_FW_DDR_BASE_OFFSET);
+	MEMCPY_TOIO(addr, (void *)&ddr_base, sizeof(u32));
+	bss_reset |= (1 << MRFLD_FW_BSS_RESET_BIT);
+	addr = (void __iomem *)(dram_base + MRFLD_FW_FEATURE_BASE_OFFSET);
+	MEMCPY_TOIO(addr, &bss_reset, sizeof(u32));
+	pr_debug("%s: config written to DCCM\n", __func__);
+}
+
+void sst_post_download_mrfld(struct intel_sst_drv *ctx)
+{
+	sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+	/* For mrfld, download all libraries the first time fw is
+	 * downloaded */
+	pr_debug("%s: lib_dwnld = %u\n", __func__, ctx->lib_dwnld_reqd);
+	if (ctx->lib_dwnld_reqd) {
+		sst_load_all_modules_elf(ctx, sst_modules_mrfld, ARRAY_SIZE(sst_modules_mrfld));
+		ctx->lib_dwnld_reqd = false;
+	}
+}
+
+void sst_post_download_ctp(struct intel_sst_drv *ctx)
+{
+	sst_fill_config(ctx, 0);
+}
+
+void sst_post_download_byt(struct intel_sst_drv *ctx)
+{
+	sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+	sst_fill_config(ctx, 2 * sizeof(u32));
+
+	pr_debug("%s: lib_dwnld = %u\n", __func__, ctx->lib_dwnld_reqd);
+	if (ctx->lib_dwnld_reqd) {
+		sst_load_all_modules_elf(ctx, sst_modules_byt,
+					ARRAY_SIZE(sst_modules_byt));
+		ctx->lib_dwnld_reqd = false;
+	}
+}
+
+static void sst_init_lib_mem_mgr(struct intel_sst_drv *ctx)
+{
+	struct sst_mem_mgr *mgr = &ctx->lib_mem_mgr;
+	const struct sst_lib_dnld_info *lib_info = ctx->pdata->lib_info;
+
+	memset(mgr, 0, sizeof(*mgr));
+	mgr->current_base = lib_info->mod_base + lib_info->mod_table_offset
+						+ lib_info->mod_table_size;
+	mgr->avail = lib_info->mod_end - mgr->current_base + 1;
+
+	pr_debug("current base = 0x%lx , avail = 0x%x\n",
+		(unsigned long)mgr->current_base, mgr->avail);
+}
+
+/**
+ * sst_load_fw - function to load FW into DSP
+ *
+ *
+ * Transfers the FW to DSP using dma/memcpy
+ */
+int sst_load_fw(void)
+{
+	int ret_val = 0;
+	struct sst_block *block;
+
+	pr_debug("sst_load_fw\n");
+
+	if (sst_drv_ctx->sst_state !=  SST_RESET ||
+			sst_drv_ctx->sst_state == SST_SHUTDOWN)
+		return -EAGAIN;
+
+	if (!sst_drv_ctx->fw_in_mem) {
+		trace_sst_fw_download("Req FW sent in check device",
+					sst_drv_ctx->sst_state);
+		pr_debug("sst: FW not in memory retry to download\n");
+		ret_val = sst_request_fw(sst_drv_ctx);
+		if (ret_val)
+			return ret_val;
+	}
+
+	BUG_ON(!sst_drv_ctx->fw_in_mem);
+	block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID);
+	if (block == NULL)
+		return -ENOMEM;
+
+	/* Prevent C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
+
+	sst_drv_ctx->sst_state = SST_FW_LOADING;
+
+	ret_val = sst_drv_ctx->ops->reset();
+	if (ret_val)
+		goto restore;
+
+	trace_sst_fw_download("Start FW copy", sst_drv_ctx->sst_state);
+	if (sst_drv_ctx->use_dma) {
+		ret_val = sst_do_dma(&sst_drv_ctx->fw_sg_list);
+		if (ret_val) {
+			pr_err("sst_do_dma failed, abort\n");
+			goto restore;
+		}
+	} else {
+		sst_do_memcpy(&sst_drv_ctx->memcpy_list);
+	}
+
+	trace_sst_fw_download("Post download for Lib start",
+			sst_drv_ctx->sst_state);
+	/* Write the DRAM/DCCM config before enabling FW */
+	if (sst_drv_ctx->ops->post_download)
+		sst_drv_ctx->ops->post_download(sst_drv_ctx);
+	trace_sst_fw_download("Post download for Lib end",
+			sst_drv_ctx->sst_state);
+
+	/* bring sst out of reset */
+	ret_val = sst_drv_ctx->ops->start();
+	if (ret_val)
+		goto restore;
+	trace_sst_fw_download("DSP reset done",
+			sst_drv_ctx->sst_state);
+
+	ret_val = sst_wait_timeout(sst_drv_ctx, block);
+	if (ret_val) {
+		pr_err("fw download failed %d\n" , ret_val);
+		/* assume FW d/l failed due to timeout*/
+		ret_val = -EBUSY;
+
+	}
+
+restore:
+	/* Re-enable Deeper C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+	sst_free_block(sst_drv_ctx, block);
+
+	return ret_val;
+}
+
+/**
+ * sst_load_library - function to load FW into DSP
+ *
+ * @lib: Pointer to the lib download structure
+ * @ops: Contains the stream ops
+ * This function is called when FW requests for a particular library download
+ * This function prepares & downloads the library
+ */
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
+{
+	char buf[20];
+	const char *type, *dir;
+	int len = 0, error = 0;
+	u32 entry_point;
+	const struct firmware *fw_lib;
+	struct snd_sst_lib_download_info dload_info = {{{0},},};
+
+	memset(buf, 0, sizeof(buf));
+
+	pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
+			lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
+	pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
+		lib->lib_info.lib_version, lib->lib_info.lib_name,
+		lib->lib_info.lib_caps, lib->lib_info.media_type);
+
+	pr_debug("IRAM Size 0x%x, offset 0x%x\n",
+		lib->slot_info.iram_size, lib->slot_info.iram_offset);
+	pr_debug("DRAM Size 0x%x, offset 0x%x\n",
+		lib->slot_info.dram_size, lib->slot_info.dram_offset);
+
+	switch (lib->lib_info.lib_type) {
+	case SST_CODEC_TYPE_MP3:
+		type = "mp3_";
+		break;
+	case SST_CODEC_TYPE_AAC:
+		type = "aac_";
+		break;
+	case SST_CODEC_TYPE_AACP:
+		type = "aac_v1_";
+		break;
+	case SST_CODEC_TYPE_eAACP:
+		type = "aac_v2_";
+		break;
+	case SST_CODEC_TYPE_WMA9:
+		type = "wma9_";
+		break;
+	default:
+		pr_err("Invalid codec type\n");
+		error = -EINVAL;
+		goto wake;
+	}
+
+	if (ops == STREAM_OPS_CAPTURE)
+		dir = "enc_";
+	else
+		dir = "dec_";
+	len = strlen(type) + strlen(dir);
+	strncpy(buf, type, sizeof(buf)-1);
+	strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
+	len += snprintf(buf + len, sizeof(buf) - len, "%d",
+			lib->slot_info.slot_num);
+	len += snprintf(buf + len, sizeof(buf) - len, ".bin");
+
+	pr_debug("Requesting %s\n", buf);
+
+	error = request_firmware(&fw_lib, buf, sst_drv_ctx->dev);
+	if (fw_lib == NULL) {
+		pr_err("fw_lib pointer is returning null\n");
+		return -EINVAL;
+	}
+	if (error) {
+		pr_err("library load failed %d\n", error);
+		goto wake;
+	}
+	error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
+	if (error)
+		goto wake_free;
+
+	lib->mod_entry_pt = entry_point;
+	memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
+	/* Prevent C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
+	error = sst_download_library(fw_lib, &dload_info);
+	/* Re-enable Deeper C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+	if (error)
+		goto wake_free;
+
+	/* lib is downloaded and init send alloc again */
+	pr_debug("Library is downloaded now...\n");
+wake_free:
+	/* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
+	release_firmware(fw_lib);
+wake:
+	return error;
+}
+
+/* In relocatable elf file, there can be  relocatable variables and functions.
+ * Variables are kept in Global Address Offset Table (GOT) and functions in
+ * Procedural Linkage Table (PLT). In current codec binaries only relocatable
+ * variables are seen. So we use the GOT table.
+ */
+static int sst_find_got_table(Elf32_Shdr *shdr, int nsec, char *in_elf,
+		Elf32_Rela **got, unsigned int *cnt)
+{
+	int i = 0;
+	while (i < nsec) {
+		if (shdr[i].sh_type == SHT_RELA) {
+			*got = (Elf32_Rela *)(in_elf + shdr[i].sh_offset);
+			*cnt = shdr[i].sh_size / sizeof(Elf32_Rela);
+			break;
+		}
+		i++;
+	}
+	if (i == nsec)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* For each entry in the GOT table, find the unrelocated offset. Then
+ * add the relocation base to the offset and write back the new address to the
+ * original variable location.
+ */
+static int sst_relocate_got_entries(Elf32_Rela *table, unsigned int size,
+	char *in_elf, int elf_size, u32 rel_base)
+{
+	int i;
+	Elf32_Rela *entry;
+	Elf32_Addr *target_addr, unreloc_addr;
+
+	for (i = 0; i < size; i++) {
+		entry = &table[i];
+		if (ELF32_R_SYM(entry->r_info) != 0) {
+			return -EINVAL;
+		} else {
+			if (entry->r_offset > elf_size) {
+				pr_err("GOT table target addr out of range\n");
+				return -EINVAL;
+			}
+			target_addr = (Elf32_Addr *)(in_elf + entry->r_offset);
+			unreloc_addr = *target_addr + entry->r_addend;
+			if (unreloc_addr > elf_size) {
+				pr_err("GOT table entry invalid\n");
+				continue;
+			}
+			*target_addr = unreloc_addr + rel_base;
+		}
+	}
+	return 0;
+}
+
+static int sst_relocate_elf(char *in_elf, int elf_size, phys_addr_t rel_base,
+		Elf32_Addr *entry_pt)
+{
+	int retval = 0;
+	Elf32_Ehdr *ehdr = (Elf32_Ehdr *)in_elf;
+	Elf32_Shdr *shdr = (Elf32_Shdr *) (in_elf + ehdr->e_shoff);
+	Elf32_Phdr *phdr = (Elf32_Phdr *) (in_elf + ehdr->e_phoff);
+	int i, num_sec;
+	Elf32_Rela *rel_table = NULL;
+	unsigned int rela_cnt = 0;
+	u32 rbase;
+
+	BUG_ON(rel_base > (u32)(-1));
+	rbase = (u32) (rel_base & (u32)(~0));
+
+	/* relocate the entry_pt */
+	*entry_pt = (Elf32_Addr)(ehdr->e_entry + rbase);
+	num_sec = ehdr->e_shnum;
+
+	/* Find the relocation(GOT) table through the section header */
+	retval = sst_find_got_table(shdr, num_sec, in_elf,
+					&rel_table, &rela_cnt);
+	if (retval < 0)
+		return retval;
+
+	/* Relocate all the entries in the GOT */
+	retval = sst_relocate_got_entries(rel_table, rela_cnt, in_elf,
+						elf_size, rbase);
+	if (retval < 0)
+		return retval;
+
+	pr_debug("GOT entries relocated\n");
+
+	/* Update the program headers in the ELF */
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		if (phdr[i].p_type == PT_LOAD) {
+			phdr[i].p_vaddr += rbase;
+			phdr[i].p_paddr += rbase;
+		}
+	}
+	pr_debug("program header entries updated\n");
+
+	return retval;
+}
+
+#define ALIGN_256 0x100
+
+int sst_get_next_lib_mem(struct sst_mem_mgr *mgr, int size,
+			unsigned long *lib_base)
+{
+	int retval = 0;
+
+	pr_debug("library orig size = 0x%x", size);
+	if (size % ALIGN_256)
+		size += (ALIGN_256 - (size % ALIGN_256));
+	if (size > mgr->avail)
+		return -ENOMEM;
+
+	*lib_base = mgr->current_base;
+	mgr->current_base += size;
+	mgr->avail -= size;
+	mgr->count++;
+	pr_debug("library base = 0x%lx", *lib_base);
+	pr_debug("library aligned size = 0x%x", size);
+	pr_debug("lib count = %d\n", mgr->count);
+	return retval;
+
+}
+
+static int sst_download_lib_elf(struct intel_sst_drv *sst, const void *lib,
+		int size)
+{
+	int retval = 0;
+
+	pr_debug("In %s\n", __func__);
+
+	if (sst->use_dma) {
+		retval = sst_parse_elf_fw_dma(sst, lib,
+				 &sst->library_list);
+		if (retval)
+			goto free_dma_res;
+		retval = sst_do_dma(&sst->library_list);
+		if (retval)
+			pr_err("sst_do_dma failed, abort\n");
+free_dma_res:
+		kfree(sst->library_list.src);
+		kfree(sst->library_list.dst);
+		sst->library_list.list_len = 0;
+	} else {
+		retval = sst_parse_elf_fw_memcpy(sst, lib,
+				 &sst->libmemcpy_list);
+		if (retval)
+			return retval;
+		sst_do_memcpy(&sst->libmemcpy_list);
+		sst_memcpy_free_lib_resources();
+	}
+	pr_debug("download lib complete");
+	return retval;
+}
+
+static void sst_fill_fw_module_table(struct sst_module_info *mod_list,
+		int list_size, unsigned long ddr_base)
+{
+	int i;
+	u32 *write_ptr = (u32 *)ddr_base;
+
+	pr_debug("In %s\n", __func__);
+
+	for (i = 0; i < list_size; i++) {
+		if (mod_list[i].status == SST_LIB_DOWNLOADED) {
+			pr_debug("status dnwld for %d\n", i);
+			pr_debug("module id %d\n", mod_list[i].id);
+			pr_debug("entry pt 0x%x\n", mod_list[i].entry_pt);
+
+			*write_ptr++ = mod_list[i].id;
+			*write_ptr++ = mod_list[i].entry_pt;
+		}
+	}
+}
+
+static int sst_request_lib_elf(struct sst_module_info *mod_entry,
+	const struct firmware **fw_lib, int pci_id, struct device *dev)
+{
+	char name[25];
+	int retval = 0;
+
+	snprintf(name, sizeof(name), "%s%s%04x%s", mod_entry->name,
+			"_", pci_id, ".bin");
+	pr_debug("Requesting %s\n", name);
+
+	retval = request_firmware(fw_lib, name, dev);
+	if (retval) {
+		pr_err("%s library load failed %d\n", name, retval);
+		return retval;
+	}
+	pr_debug("got lib\n");
+	mod_entry->status = SST_LIB_FOUND;
+	return 0;
+}
+
+static int sst_allocate_lib_mem(const struct firmware *lib, int size,
+	struct sst_mem_mgr *mem_mgr, char **out_elf, unsigned long *lib_start)
+{
+	int retval = 0;
+
+	*out_elf = kzalloc(size, GFP_KERNEL);
+	if (!*out_elf) {
+		pr_err("cannot alloc mem for elf copy %d\n", retval);
+		goto mem_error;
+	}
+
+	memcpy(*out_elf, lib->data, size);
+	retval = sst_get_next_lib_mem(mem_mgr, size, lib_start);
+	if (retval < 0) {
+		pr_err("cannot alloc ddr mem for lib: %d\n", retval);
+		kfree(*out_elf);
+		goto mem_error;
+	}
+	return 0;
+
+mem_error:
+	release_firmware(lib);
+	return -ENOMEM;
+}
+
+int sst_load_all_modules_elf(struct intel_sst_drv *ctx, struct sst_module_info *mod_table,
+								int num_modules)
+{
+	int retval = 0;
+	int i;
+	const struct firmware *fw_lib;
+	struct sst_module_info *mod = NULL;
+	char *out_elf;
+	unsigned int lib_size = 0;
+	unsigned int mod_table_offset = ctx->pdata->lib_info->mod_table_offset;
+	unsigned long lib_base;
+
+	pr_debug("In %s", __func__);
+
+	sst_init_lib_mem_mgr(ctx);
+
+	for (i = 0; i < num_modules; i++) {
+		mod = &mod_table[i];
+		trace_sst_lib_download("Start of Request Lib", mod->name);
+		retval = sst_request_lib_elf(mod, &fw_lib,
+						ctx->pci_id, ctx->dev);
+		if (retval < 0)
+			continue;
+		lib_size = fw_lib->size;
+
+		trace_sst_lib_download("End of Request Lib", mod->name);
+		retval = sst_validate_elf(fw_lib, true);
+		if (retval < 0) {
+			pr_err("library is not valid elf %d\n", retval);
+			release_firmware(fw_lib);
+			continue;
+		}
+		pr_debug("elf validated\n");
+		retval = sst_allocate_lib_mem(fw_lib, lib_size,
+				&ctx->lib_mem_mgr, &out_elf, &lib_base);
+		if (retval < 0) {
+			pr_err("lib mem allocation failed: %d\n", retval);
+			continue;
+		}
+		pr_debug("lib space allocated\n");
+
+		/* relocate in place */
+		retval = sst_relocate_elf(out_elf, lib_size,
+						lib_base, &mod->entry_pt);
+		if (retval < 0) {
+			pr_err("lib elf relocation failed: %d\n", retval);
+			release_firmware(fw_lib);
+			kfree(out_elf);
+			continue;
+		}
+		pr_debug("relocation done\n");
+		release_firmware(fw_lib);
+		trace_sst_lib_download("Start of download Lib", mod->name);
+		/* write to ddr imr region,use memcpy method */
+		retval = sst_download_lib_elf(ctx, out_elf, lib_size);
+		trace_sst_lib_download("End of download Lib", mod->name);
+		mod->status = SST_LIB_DOWNLOADED;
+		kfree(out_elf);
+	}
+
+	/* write module table to DDR */
+	sst_fill_fw_module_table(mod_table, num_modules,
+			(unsigned long)(ctx->ddr + mod_table_offset));
+	return retval;
+}
diff --git a/sound/soc/intel/sst/sst_ipc.c b/sound/soc/intel/sst/sst_ipc.c
new file mode 100644
index 0000000..f33dee2
--- /dev/null
+++ b/sound/soc/intel/sst/sst_ipc.c
@@ -0,0 +1,789 @@
+/*
+ *  sst_ipc.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file defines all ipc functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+void sst_dump_to_buffer(const void *from, size_t len, char *buf)
+{
+	int i, end;
+	const unsigned char *cmd = from;
+
+	if (len == 0) {
+		buf[0] = '\0';
+		return;
+	}
+
+	for (end = len - 1; end >= 0; end--)
+		if (cmd[end])
+			break;
+	end++;
+
+	buf += snprintf(buf, 3, "%02x", cmd[0]);
+	for (i = 1; i < len; i++) {
+		buf += snprintf(buf, 4, " %02x", cmd[i]);
+		if (i == end && end != len - 1) {
+			sprintf(buf, "...");
+			break;
+		}
+	}
+}
+
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+					u32 msg_id, u32 drv_id)
+{
+	struct sst_block *msg = NULL;
+
+	pr_debug("in %s\n", __func__);
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		pr_err("kzalloc block failed\n");
+		return NULL;
+	}
+	msg->condition = false;
+	msg->on = true;
+	msg->msg_id = msg_id;
+	msg->drv_id = drv_id;
+	spin_lock_bh(&ctx->block_lock);
+	list_add_tail(&msg->node, &ctx->block_list);
+	spin_unlock_bh(&ctx->block_lock);
+
+	return msg;
+}
+
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+		u32 drv_id, u32 ipc, void *data, u32 size)
+{
+	struct sst_block *block = NULL;
+
+	pr_debug("in %s\n", __func__);
+	spin_lock_bh(&ctx->block_lock);
+	list_for_each_entry(block, &ctx->block_list, node) {
+		pr_debug("Block ipc %d, drv_id %d\n", block->msg_id,
+							block->drv_id);
+		if (block->msg_id == ipc && block->drv_id == drv_id) {
+			pr_debug("free up the block\n");
+			block->ret_code = result;
+			block->data = data;
+			block->size = size;
+			block->condition = true;
+			spin_unlock_bh(&ctx->block_lock);
+			wake_up(&ctx->wait_queue);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&ctx->block_lock);
+	pr_debug("Block not found or a response is received for a short message for ipc %d, drv_id %d\n",
+			ipc, drv_id);
+	return -EINVAL;
+}
+
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
+{
+	struct sst_block *block = NULL, *__block;
+
+	pr_debug("in %s\n", __func__);
+	spin_lock_bh(&ctx->block_lock);
+	list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
+		if (block == freed) {
+			list_del(&freed->node);
+			kfree(freed->data);
+			freed->data = NULL;
+			kfree(freed);
+			spin_unlock_bh(&ctx->block_lock);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&ctx->block_lock);
+	return -EINVAL;
+}
+
+/*
+ * sst_send_runtime_param - send runtime param to SST
+ *
+ * this function sends the runtime parameter to sst dsp engine
+ */
+static int sst_send_runtime_param(struct snd_sst_runtime_params *params)
+{
+	struct ipc_post *msg = NULL;
+	int ret_val;
+
+	pr_debug("Enter:%s\n", __func__);
+	ret_val = sst_create_ipc_msg(&msg, true);
+	if (ret_val)
+		return ret_val;
+	sst_fill_header(&msg->header, IPC_IA_SET_RUNTIME_PARAMS, 1,
+							params->str_id);
+	msg->header.part.data = sizeof(u32) + sizeof(*params) - sizeof(params->addr)
+				+ params->size;
+	memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), params, sizeof(*params)
+				- sizeof(params->addr));
+	/* driver doesn't need to send address, so overwrite addr with data */
+	memcpy(msg->mailbox_data + sizeof(u32) + sizeof(*params)
+			- sizeof(params->addr),
+			params->addr, params->size);
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return 0;
+}
+
+void sst_post_message_mrfld(struct work_struct *work)
+{
+	struct ipc_post *msg;
+	union ipc_header_mrfld header;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	/* check list */
+	if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+		/* queue is empty, nothing to send */
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Empty msg queue... NO Action\n");
+		return;
+	}
+
+	/* check busy bit */
+	header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+	if (header.p.header_high.part.busy) {
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Busy not free... post later\n");
+		return;
+	}
+	/* copy msg from list */
+	msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+			struct ipc_post, node);
+	list_del(&msg->node);
+	pr_debug("sst: size: = %x\n", msg->mrfld_header.p.header_low_payload);
+	if (msg->mrfld_header.p.header_high.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			    msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+
+	trace_sst_ipc("POST  ->", msg->mrfld_header.p.header_high.full,
+				  msg->mrfld_header.p.header_low_payload,
+				  msg->mrfld_header.p.header_high.part.drv_id);
+	trace_sst_ipc_mailbox(msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+	sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	pr_debug("sst: Post message: header = %x\n",
+					msg->mrfld_header.p.header_high.full);
+	kfree(msg->mailbox_data);
+	kfree(msg);
+	return;
+}
+
+/**
+* sst_post_message - Posts message to SST
+*
+* @work: Pointer to work structure
+*
+* This function is called by any component in driver which
+* wants to send an IPC message. This will post message only if
+* busy bit is free
+*/
+void sst_post_message_mfld(struct work_struct *work)
+{
+	struct ipc_post *msg;
+	union ipc_header header;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	/* check list */
+	if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+		/* queue is empty, nothing to send */
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Empty msg queue... NO Action\n");
+		return;
+	}
+
+	/* check busy bit */
+	header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+	if (header.part.busy) {
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Busy not free... Post later\n");
+		return;
+	}
+	/* copy msg from list */
+	msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+			struct ipc_post, node);
+	list_del(&msg->node);
+	pr_debug("size: = %x\n", msg->header.part.data);
+	if (msg->header.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			msg->mailbox_data, msg->header.part.data);
+
+	sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx, msg->header.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	pr_debug("Posted message: header = %x\n", msg->header.full);
+
+	kfree(msg->mailbox_data);
+	kfree(msg);
+	return;
+}
+
+int sst_sync_post_message_mrfld(struct ipc_post *msg)
+{
+	union ipc_header_mrfld header;
+	unsigned int loop_count = 0;
+	int retval = 0;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+
+	/* check busy bit */
+	header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+	while (header.p.header_high.part.busy) {
+		if (loop_count > 10) {
+			pr_err("sst: Busy wait failed, cant send this msg\n");
+			retval = -EBUSY;
+			goto out;
+		}
+		udelay(500);
+		loop_count++;
+		header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+	}
+	pr_debug("sst: Post message: header = %x\n",
+					msg->mrfld_header.p.header_high.full);
+	pr_debug("sst: size = 0x%x\n", msg->mrfld_header.p.header_low_payload);
+	if (msg->mrfld_header.p.header_high.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+
+	trace_sst_ipc("POST  ->", msg->mrfld_header.p.header_high.full,
+				  msg->mrfld_header.p.header_low_payload,
+				  msg->mrfld_header.p.header_high.part.drv_id);
+	trace_sst_ipc_mailbox(msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+	sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+
+out:
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	kfree(msg->mailbox_data);
+	kfree(msg);
+	return retval;
+}
+
+/* use this for trigger ops to post syncronous msgs
+ */
+int sst_sync_post_message_mfld(struct ipc_post *msg)
+{
+	union ipc_header header;
+	unsigned int loop_count = 0;
+	int retval = 0;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+
+	/* check busy bit */
+	header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+	while (header.part.busy) {
+		if (loop_count > 10) {
+			pr_err("busy wait failed, cant send this msg\n");
+			retval = -EBUSY;
+			goto out;
+		}
+		udelay(500);
+		loop_count++;
+		header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+	}
+	pr_debug("sst: Post message: header = %x\n", msg->header.full);
+	if (msg->header.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			msg->mailbox_data, msg->header.part.data);
+	sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx, msg->header.full);
+
+out:
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	kfree(msg->mailbox_data);
+	kfree(msg);
+
+	return retval;
+}
+
+/*
+ * sst_clear_interrupt - clear the SST FW interrupt
+ *
+ * This function clears the interrupt register after the interrupt
+ * bottom half is complete allowing next interrupt to arrive
+ */
+void intel_sst_clear_intr_mfld(void)
+{
+	union interrupt_reg isr;
+	union interrupt_reg imr;
+	union ipc_header clear_ipc;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
+	isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
+	/*  write 1 to clear  */;
+	isr.part.busy_interrupt = 1;
+	sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
+	/* Set IA done bit */
+	clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcd);
+	clear_ipc.part.busy = 0;
+	clear_ipc.part.done = 1;
+	clear_ipc.part.data = IPC_ACK_SUCCESS;
+	sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcd, clear_ipc.full);
+	/* un mask busy interrupt */
+	imr.part.busy_interrupt = 0;
+	imr.part.done_interrupt = 0;
+	sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+void intel_sst_clear_intr_mrfld(void)
+{
+	union interrupt_reg_mrfld isr;
+	union interrupt_reg_mrfld imr;
+	union ipc_header_mrfld clear_ipc;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
+	isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
+
+	/*  write 1 to clear  */
+	isr.part.busy_interrupt = 1;
+	sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
+
+	/* Set IA done bit */
+	clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
+
+	clear_ipc.p.header_high.part.busy = 0;
+	clear_ipc.p.header_high.part.done = 1;
+	clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
+	sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
+	/* un mask busy interrupt */
+	imr.part.busy_interrupt = 0;
+	sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+/*
+ * process_fw_init - process the FW init msg
+ *
+ * @msg: IPC message mailbox data from FW
+ *
+ * This function processes the FW init msg from FW
+ * marks FW state and prints debug info of loaded FW
+ */
+static void process_fw_init(void *msg)
+{
+	struct ipc_header_fw_init *init =
+		(struct ipc_header_fw_init *)msg;
+	int retval = 0;
+
+	pr_debug("*** FW Init msg came***\n");
+	if (init->result) {
+		sst_drv_ctx->sst_state =  SST_RESET;
+		pr_debug("FW Init failed, Error %x\n", init->result);
+		pr_err("FW Init failed, Error %x\n", init->result);
+		retval = init->result;
+		goto ret;
+	}
+	pr_info("FW Version %02x.%02x.%02x.%02x\n",
+		init->fw_version.type, init->fw_version.major,
+		init->fw_version.minor, init->fw_version.build);
+	pr_info("Build date %s Time %s\n",
+			init->build_info.date, init->build_info.time);
+
+	/* If there any runtime parameter to set, send it */
+	if (sst_drv_ctx->runtime_param.param.addr)
+		sst_send_runtime_param(&(sst_drv_ctx->runtime_param.param));
+
+ret:
+	sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
+}
+/**
+* sst_process_message_mfld - Processes message from SST
+*
+* @work:	Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a msg from process_queue and does action based on msg
+*/
+void sst_process_message_mfld(struct ipc_post *msg)
+{
+	int str_id;
+	struct stream_info *stream;
+
+	str_id = msg->header.part.str_id;
+	pr_debug("IPC process for %x\n", msg->header.full);
+	/* based on msg in list call respective handler */
+	switch (msg->header.part.msg_id) {
+	case IPC_SST_PERIOD_ELAPSED:
+		if (sst_validate_strid(str_id)) {
+			pr_err("stream id %d invalid\n", str_id);
+			break;
+		}
+		stream = &sst_drv_ctx->streams[str_id];
+		if (stream->period_elapsed)
+			stream->period_elapsed(stream->pcm_substream);
+		break;
+	case IPC_SST_BUF_UNDER_RUN:
+	case IPC_SST_BUF_OVER_RUN:
+		if (sst_validate_strid(str_id)) {
+			pr_err("stream id %d invalid\n", str_id);
+			break;
+		}
+		pr_err("Buffer under/overrun for %d\n",
+				msg->header.part.str_id);
+		pr_err("Got Underrun & not to send data...ignore\n");
+		break;
+
+	case IPC_SST_FRAGMENT_ELPASED: {
+		pr_debug("IPC_SST_FRAGMENT_ELPASED for %d", str_id);
+		sst_cdev_fragment_elapsed(str_id);
+		break;
+	}
+
+	case IPC_IA_PRINT_STRING:
+		pr_debug("been asked to print something by fw\n");
+		/* TBD */
+		break;
+
+	case IPC_IA_FW_INIT_CMPLT: {
+		/* send next data to FW */
+		process_fw_init(msg->mailbox_data);
+		break;
+	}
+
+	case IPC_SST_STREAM_PROCESS_FATAL_ERR:
+		if (sst_validate_strid(str_id)) {
+			pr_err("stream id %d invalid\n", str_id);
+			break;
+		}
+		pr_err("codec fatal error %x stream %d...\n",
+				msg->header.full, msg->header.part.str_id);
+		pr_err("Dropping the stream\n");
+		sst_drop_stream(msg->header.part.str_id);
+		break;
+	default:
+		/* Illegal case */
+		pr_err("Unhandled msg %x header %x\n",
+		msg->header.part.msg_id, msg->header.full);
+	}
+	return;
+}
+
+/**
+* sst_process_message - Processes message from SST
+*
+* @work:	Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a msg from process_queue and does action based on msg
+*/
+
+void sst_process_message_mrfld(struct ipc_post *msg)
+{
+	int str_id;
+
+	str_id = msg->mrfld_header.p.header_high.part.drv_id;
+
+	pr_debug("IPC process message header %x payload %x\n",
+			msg->mrfld_header.p.header_high.full,
+			msg->mrfld_header.p.header_low_payload);
+
+	return;
+}
+
+#define VTSV_MAX_NUM_RESULTS 6
+#define VTSV_SIZE_PER_RESULT 7 /* 7 16 bit words */
+/* Max 6 results each of size 7 words + 1 num results word */
+#define VTSV_MAX_TOTAL_RESULT_SIZE \
+	(VTSV_MAX_NUM_RESULTS*VTSV_SIZE_PER_RESULT + 1)
+/* Each data word in the result is sent as a string in the format:
+DATAn=d, where n is the data word index varying from 0 to
+				VTSV_MAX_TOTAL_RESULT_SIZE-1
+d = string representation of data in decimal format;
+				unsigned 16bit data needs max 5 chars
+So total data string size = 4("DATA")+2("n")+1("=")
+				+5("d")+1(null)+5(reserved) = 18  */
+#define VTSV_DATA_STRING_SIZE 18
+
+static int send_vtsv_result_event(void *data, int size)
+{
+	char *envp[VTSV_MAX_TOTAL_RESULT_SIZE+3];
+	char res_size[30];
+	char ev_type[30];
+	char result[VTSV_MAX_TOTAL_RESULT_SIZE][VTSV_DATA_STRING_SIZE];
+	int offset = 0;
+	u16 *tmp;
+	int i;
+	int ret;
+
+	if (!data) {
+		pr_err("Data pointer Null into %s\n", __func__);
+		return -EINVAL;
+	}
+	size = size / (sizeof(u16)); /* Number of 16 bit data words*/
+	if (size > VTSV_MAX_TOTAL_RESULT_SIZE) {
+		pr_err("VTSV result size exceeds expected value, no uevent sent\n");
+		return -EINVAL;
+	}
+
+	snprintf(ev_type, sizeof(res_size), "EVENT_TYPE=SST_VTSV");
+	envp[offset++] = ev_type;
+	snprintf(res_size, sizeof(ev_type), "VTSV_RESULT_SIZE=%u", size);
+	envp[offset++] = res_size;
+	tmp = (u16 *)(data);
+	for (i = 0; i < size; i++) {
+		/* Driver assumes all data to be u16; The VTSV service
+		layer will type cast to u16 or s16 as appropriate for
+		a given data word*/
+		snprintf(result[i], VTSV_DATA_STRING_SIZE,
+				"DATA%u=%u", i, *tmp++);
+		envp[offset++] = result[i];
+	}
+	envp[offset] = NULL;
+	ret = kobject_uevent_env(&sst_drv_ctx->dev->kobj, KOBJ_CHANGE, envp);
+	if (ret)
+		pr_err("VTSV event send failed: ret = %d\n", ret);
+	return ret;
+}
+
+static void process_fw_async_msg(struct ipc_post *msg)
+{
+	u32 msg_id;
+	int str_id;
+	int res_size, ret;
+	u32 data_size, i;
+	void *data_offset;
+	struct stream_info *stream;
+	union ipc_header_high msg_high;
+	u32 msg_low, pipe_id;
+
+	msg_high = msg->mrfld_header.p.header_high;
+	msg_low = msg->mrfld_header.p.header_low_payload;
+	msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
+	data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
+	data_size =  msg_low - (sizeof(struct ipc_dsp_hdr));
+
+	switch (msg_id) {
+	case IPC_SST_PERIOD_ELAPSED_MRFLD:
+		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+		str_id = get_stream_id_mrfld(pipe_id);
+		if (str_id > 0) {
+			pr_debug("Period elapsed rcvd for pipe id 0x%x\n", pipe_id);
+			stream = &sst_drv_ctx->streams[str_id];
+			if (stream->period_elapsed)
+				stream->period_elapsed(stream->pcm_substream);
+			if (stream->compr_cb)
+				stream->compr_cb(stream->compr_cb_param);
+		}
+		break;
+
+	case IPC_IA_DRAIN_STREAM_MRFLD:
+		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+		str_id = get_stream_id_mrfld(pipe_id);
+		if (str_id > 0) {
+			stream = &sst_drv_ctx->streams[str_id];
+			if (stream->drain_notify)
+				stream->drain_notify(stream->drain_cb_param);
+		}
+		break;
+
+	case IPC_IA_FW_ASYNC_ERR_MRFLD:
+		pr_err("FW sent async error msg:\n");
+		for (i = 0; i < (data_size/4); i++)
+			pr_err("0x%x\n", (*((unsigned int *)data_offset + i)));
+		break;
+
+	case IPC_IA_VTSV_DETECTED:
+		res_size = data_size;
+		ret = send_vtsv_result_event(data_offset, res_size);
+		if (ret)
+			pr_err("VTSV uevent send failed: %d\n", ret);
+		else
+			pr_debug("VTSV uevent sent\n");
+		break;
+
+	case IPC_IA_FW_INIT_CMPLT_MRFLD:
+		process_fw_init(data_offset);
+		break;
+
+	case IPC_IA_BUF_UNDER_RUN_MRFLD:
+		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+		str_id = get_stream_id_mrfld(pipe_id);
+		if (str_id > 0)
+			pr_err("Buffer under-run for pipe:%#x str_id:%d\n",
+					pipe_id, str_id);
+		break;
+
+	default:
+		pr_err("Unrecognized async msg from FW msg_id %#x\n", msg_id);
+	}
+}
+
+void sst_process_reply_mrfld(struct ipc_post *msg)
+{
+	unsigned int drv_id;
+	void *data;
+	union ipc_header_high msg_high;
+	u32 msg_low;
+	struct ipc_dsp_hdr *dsp_hdr;
+	unsigned int cmd_id;
+
+	msg_high = msg->mrfld_header.p.header_high;
+	msg_low = msg->mrfld_header.p.header_low_payload;
+
+	pr_debug("IPC process message header %x payload %x\n",
+			msg->mrfld_header.p.header_high.full,
+			msg->mrfld_header.p.header_low_payload);
+
+	drv_id = msg_high.part.drv_id;
+
+	/* Check for async messages */
+	if (drv_id == SST_ASYNC_DRV_ID) {
+		/* FW sent async large message */
+		process_fw_async_msg(msg);
+		goto end;
+	}
+
+	/* FW sent short error response for an IPC */
+	if (msg_high.part.result && drv_id && !msg_high.part.large) {
+		/* 32-bit FW error code in msg_low */
+		pr_err("FW sent error response 0x%x", msg_low);
+		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+			msg_high.part.drv_id,
+			msg_high.part.msg_id, NULL, 0);
+		goto end;
+	}
+
+	/* Process all valid responses */
+	/* if it is a large message, the payload contains the size to
+	 * copy from mailbox */
+	if (msg_high.part.large) {
+
+		if (!msg_low) {
+			pr_err("payload size is 0 for large message\n");
+			pr_err("IPC header %#x has %#x payload\n",
+					msg_high.full, msg_low);
+
+			sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+					msg_high.part.drv_id,
+					msg_high.part.msg_id, NULL, 0);
+			WARN_ON(1);
+			return;
+		}
+
+		data = kzalloc(msg_low, GFP_KERNEL);
+		if (!data)
+			goto end;
+		memcpy(data, (void *) msg->mailbox_data, msg_low);
+		/* Copy command id so that we can use to put sst to reset */
+		dsp_hdr = (struct ipc_dsp_hdr *)data;
+		cmd_id = dsp_hdr->cmd_id;
+		pr_debug("cmd_id %d\n", dsp_hdr->cmd_id);
+		if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+				msg_high.part.drv_id,
+				msg_high.part.msg_id, data, msg_low))
+			kfree(data);
+		if (cmd_id == IPC_SST_VB_RESET)
+			sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
+	} else {
+		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+				msg_high.part.drv_id,
+				msg_high.part.msg_id, NULL, 0);
+	}
+
+end:
+	return;
+}
+
+/**
+* sst_process_reply - Processes reply message from SST
+*
+* @work:	Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a reply msg from response_queue and
+* does action based on msg
+*/
+void sst_process_reply_mfld(struct ipc_post *msg)
+{
+	void *data;
+	int str_id;
+	struct stream_info *stream;
+
+
+	str_id = msg->header.part.str_id;
+
+	pr_debug("sst: IPC process reply for %x\n", msg->header.full);
+
+	/* handle drain notify first */
+	if (msg->header.part.msg_id == IPC_IA_DRAIN_STREAM) {
+		pr_debug("drain message notify\n");
+		if (str_id > 0) {
+			stream = &sst_drv_ctx->streams[str_id];
+			if (stream->drain_notify)
+				stream->drain_notify(stream->drain_cb_param);
+		}
+		return;
+	}
+
+
+	if (!msg->header.part.large) {
+		if (!msg->header.part.data)
+			pr_debug("Success\n");
+		else
+			pr_err("Error from firmware: %d\n", msg->header.part.data);
+		sst_wake_up_block(sst_drv_ctx, msg->header.part.data,
+				str_id, msg->header.part.msg_id, NULL, 0);
+	} else {
+		pr_debug("Allocating %d\n", msg->header.part.data);
+		data = kzalloc(msg->header.part.data, GFP_KERNEL);
+		if (!data) {
+			pr_err("sst: mem alloc failed\n");
+			return;
+		}
+
+		memcpy(data, (void *)msg->mailbox_data, msg->header.part.data);
+		if (sst_wake_up_block(sst_drv_ctx, 0, str_id,
+				msg->header.part.msg_id, data,
+				msg->header.part.data))
+			kfree(data);
+	}
+	return;
+}
diff --git a/sound/soc/intel/sst/sst_pvt.c b/sound/soc/intel/sst/sst_pvt.c
new file mode 100644
index 0000000..1f89162
--- /dev/null
+++ b/sound/soc/intel/sst/sst_pvt.c
@@ -0,0 +1,541 @@
+/*
+ *  sst_pvt.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains all private functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/asound.h>
+#include <sound/pcm.h>
+#include <sound/compress_offload.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define SST_EXCE_DUMP_BASE	0xFFFF2c00
+#define SST_EXCE_DUMP_WORD	4
+#define SST_EXCE_DUMP_LEN	32
+#define SST_EXCE_DUMP_SIZE	((SST_EXCE_DUMP_LEN)*(SST_EXCE_DUMP_WORD))
+#define SST_EXCE_DUMP_OFFSET	0xA00
+/*
+ * sst_wait_interruptible - wait on event
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits without a timeout (and is interruptable) for a
+ * given block event
+ */
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+				struct sst_block *block)
+{
+	int retval = 0;
+
+	if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
+				block->condition)) {
+		/* event wake */
+		if (block->ret_code < 0) {
+			pr_err("stream failed %d\n", block->ret_code);
+			retval = -EBUSY;
+		} else {
+			pr_debug("event up\n");
+			retval = 0;
+		}
+	} else {
+		pr_err("signal interrupted\n");
+		retval = -EINTR;
+	}
+	return retval;
+
+}
+
+unsigned long long read_shim_data(struct intel_sst_drv *sst, int addr)
+{
+	unsigned long long val = 0;
+
+	switch (sst->pci_id) {
+	case SST_MRFLD_PCI_ID:
+	case PCI_DEVICE_ID_INTEL_SST_MOOR:
+		val = sst_shim_read64(sst->shim, addr);
+		break;
+	}
+	return val;
+}
+
+void write_shim_data(struct intel_sst_drv *sst, int addr,
+				unsigned long long data)
+{
+	switch (sst->pci_id) {
+	case SST_MRFLD_PCI_ID:
+	case PCI_DEVICE_ID_INTEL_SST_MOOR:
+		sst_shim_write64(sst->shim, addr, (u64) data);
+		break;
+	}
+}
+
+
+void dump_sst_shim(struct intel_sst_drv *sst)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+	pr_err("audio shim registers:\n"
+		"CSR: %.8llx\n"
+		"PISR: %.8llx\n"
+		"PIMR: %.8llx\n"
+		"ISRX: %.8llx\n"
+		"ISRD: %.8llx\n"
+		"IMRX: %.8llx\n"
+		"IMRD: %.8llx\n"
+		"IPCX: %.8llx\n"
+		"IPCD: %.8llx\n"
+		"ISRSC: %.8llx\n"
+		"ISRLPESC: %.8llx\n"
+		"IMRSC: %.8llx\n"
+		"IMRLPESC: %.8llx\n"
+		"IPCSC: %.8llx\n"
+		"IPCLPESC: %.8llx\n"
+		"CLKCTL: %.8llx\n"
+		"CSR2: %.8llx\n",
+		read_shim_data(sst, SST_CSR),
+		read_shim_data(sst, SST_PISR),
+		read_shim_data(sst, SST_PIMR),
+		read_shim_data(sst, SST_ISRX),
+		read_shim_data(sst, SST_ISRD),
+		read_shim_data(sst, SST_IMRX),
+		read_shim_data(sst, SST_IMRD),
+		read_shim_data(sst, sst->ipc_reg.ipcx),
+		read_shim_data(sst, sst->ipc_reg.ipcd),
+		read_shim_data(sst, SST_ISRSC),
+		read_shim_data(sst, SST_ISRLPESC),
+		read_shim_data(sst, SST_IMRSC),
+		read_shim_data(sst, SST_IMRLPESC),
+		read_shim_data(sst, SST_IPCSC),
+		read_shim_data(sst, SST_IPCLPESC),
+		read_shim_data(sst, SST_CLKCTL),
+		read_shim_data(sst, SST_CSR2));
+	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+}
+
+void reset_sst_shim(struct intel_sst_drv *sst)
+{
+	union config_status_reg_mrfld csr;
+
+	pr_err("Resetting few Shim registers\n");
+	write_shim_data(sst, sst->ipc_reg.ipcx, 0x0);
+	write_shim_data(sst, sst->ipc_reg.ipcd, 0x0);
+	write_shim_data(sst, SST_ISRX, 0x0);
+	write_shim_data(sst, SST_ISRD, 0x0);
+	write_shim_data(sst, SST_IPCSC, 0x0);
+	write_shim_data(sst, SST_IPCLPESC, 0x0);
+	write_shim_data(sst, SST_ISRSC, 0x0);
+	write_shim_data(sst, SST_ISRLPESC, 0x0);
+	write_shim_data(sst, SST_PISR, 0x0);
+
+	/* Reset the CSR value to the default value. i.e 0x1e40001*/
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	csr.part.xt_snoop = 0;
+	csr.full &= ~(0xf);
+	csr.full |= 0x01;
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+}
+
+/**
+ * dump_ram_area - dumps the iram/dram into a local buff
+ *
+ * @sst			: pointer to driver context
+ * @recovery		: pointer to the struct containing buffers
+ * @iram		: true if iram dump else false
+ * This function dumps the iram dram data into the respective buffers
+ */
+static void dump_ram_area(struct intel_sst_drv *sst,
+			struct sst_dump_buf *dump_buf, enum sst_ram_type type)
+{
+	if (type == SST_IRAM) {
+		pr_err("Iram dumped in buffer\n");
+		memcpy_fromio(dump_buf->iram_buf.buf, sst->iram,
+				dump_buf->iram_buf.size);
+	} else {
+		pr_err("Dram dumped in buffer\n");
+		memcpy_fromio(dump_buf->dram_buf.buf, sst->dram,
+				dump_buf->dram_buf.size);
+	}
+}
+
+/*FIXME Disabling IRAM/DRAM dump for timeout issues */
+static void sst_stream_recovery(struct intel_sst_drv *sst)
+{
+	struct stream_info *str_info;
+	u8 i;
+	for (i = 1; i <= sst->info.max_streams; i++) {
+		pr_err("Audio: Stream %d, state %d\n", i, sst->streams[i].status);
+		if (sst->streams[i].status != STREAM_UN_INIT) {
+			str_info = &sst_drv_ctx->streams[i];
+			if (str_info->pcm_substream)
+				snd_pcm_stop(str_info->pcm_substream, SNDRV_PCM_STATE_SETUP);
+			else if (str_info->compr_cb_param)
+				snd_compr_stop(str_info->compr_cb_param);
+			sst->streams[i].status = STREAM_RESET;
+		}
+	}
+}
+
+static void sst_dump_ipc_dispatch_lists(struct intel_sst_drv *sst)
+{
+	struct ipc_post *m, *_m;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+	if (list_empty(&sst->ipc_dispatch_list))
+		pr_err("ipc dispatch list is Empty\n");
+
+	list_for_each_entry_safe(m, _m, &sst->ipc_dispatch_list, node) {
+		pr_err("ipc-dispatch:pending msg header %#x\n", m->header.full);
+		list_del(&m->node);
+		kfree(m->mailbox_data);
+		kfree(m);
+	}
+	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+}
+
+static void sst_dump_rx_lists(struct intel_sst_drv *sst)
+{
+	struct ipc_post *m, *_m;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst->rx_msg_lock, irq_flags);
+	if (list_empty(&sst->rx_list))
+		pr_err("rx msg list is empty\n");
+
+	list_for_each_entry_safe(m, _m, &sst->rx_list, node) {
+		pr_err("rx: pending msg header %#x\n", m->header.full);
+		list_del(&m->node);
+		kfree(m->mailbox_data);
+		kfree(m);
+	}
+	spin_unlock_irqrestore(&sst->rx_msg_lock, irq_flags);
+}
+
+/* num_dwords: should be multiple of 4 */
+static void dump_buffer_fromio(void __iomem *from,
+				     unsigned int num_dwords)
+{
+	int i;
+	u32 val[4];
+
+	if (num_dwords % 4) {
+		pr_err("%s: num_dwords %d not multiple of 4\n",
+				__func__, num_dwords);
+		return;
+	}
+
+	pr_err("****** Start *******\n");
+	pr_err("Dump %d dwords, from location %p\n", num_dwords, from);
+
+	for (i = 0; i < num_dwords; ) {
+		val[0] = ioread32(from + (i++ * 4));
+		val[1] = ioread32(from + (i++ * 4));
+		val[2] = ioread32(from + (i++ * 4));
+		val[3] = ioread32(from + (i++ * 4));
+		pr_err("%.8x %.8x %.8x %.8x\n", val[0], val[1], val[2], val[3]);
+	}
+	pr_err("****** End *********\n\n\n");
+}
+
+static void sst_stall_lpe_n_wait(struct intel_sst_drv *sst)
+{
+	union config_status_reg_mrfld csr;
+	void __iomem *dma_reg0 = sst->debugfs.dma_reg[0];
+	void __iomem *dma_reg1 = sst->debugfs.dma_reg[1];
+	int offset = 0x3A0; /* ChEnReg of DMA */
+
+
+	pr_err("Before stall: DMA_0 Ch_EN %#llx DMA_1 Ch_EN %#llx\n",
+				sst_reg_read64(dma_reg0, offset),
+				sst_reg_read64(dma_reg1, offset));
+
+	/* Stall LPE */
+	csr.full = sst_shim_read64(sst->shim, SST_CSR);
+	csr.part.runstall  = 1;
+	sst_shim_write64(sst->shim, SST_CSR, csr.full);
+
+	/* A 5ms delay, before resetting the LPE */
+	usleep_range(5000, 5100);
+
+	pr_err("After stall: DMA_0 Ch_EN %#llx DMA_1 Ch_EN %#llx\n",
+				sst_reg_read64(dma_reg0, offset),
+				sst_reg_read64(dma_reg1, offset));
+}
+
+#if IS_ENABLED(CONFIG_INTEL_SCU_IPC)
+static void sst_send_scu_reset_ipc(struct intel_sst_drv *sst)
+{
+	int ret = 0;
+
+	/* Reset and power gate the LPE */
+	ret = intel_scu_ipc_simple_command(IPC_SCU_LPE_RESET, 0);
+	if (ret) {
+		pr_err("Power gating LPE failed %d\n", ret);
+		reset_sst_shim(sst);
+	} else {
+		pr_err("LPE reset via SCU is success!!\n");
+		pr_err("dump after LPE power cycle\n");
+		dump_sst_shim(sst);
+
+		/* Mask the DMA & SSP interrupts */
+		sst_shim_write64(sst->shim, SST_IMRX, 0xFFFF0038);
+	}
+}
+#else
+static void sst_send_scu_reset_ipc(struct intel_sst_drv *sst)
+{
+	pr_debug("%s: do nothing, just return\n", __func__);
+}
+#endif
+
+#define SRAM_OFFSET_MRFLD	0xc00
+#define NUM_DWORDS		256
+void sst_do_recovery_mrfld(struct intel_sst_drv *sst)
+{
+	char iram_event[30], dram_event[30], ddr_imr_event[65], event_type[30];
+	char *envp[5];
+	int env_offset = 0;
+
+	/*
+	 * setting firmware state as RESET so that the firmware will get
+	 * redownloaded on next request.This is because firmare not responding
+	 * for 1 sec is equalant to some unrecoverable error of FW.
+	 */
+	pr_err("Audio: Intel SST engine encountered an unrecoverable error\n");
+	pr_err("Audio: trying to reset the dsp now\n");
+
+	mutex_lock(&sst->sst_lock);
+	sst->sst_state = SST_RESET;
+	sst_stream_recovery(sst);
+	mutex_unlock(&sst->sst_lock);
+
+	dump_stack();
+	dump_sst_shim(sst);
+
+	sst_stall_lpe_n_wait(sst);
+
+	/* dump mailbox and sram */
+	pr_err("Dumping Mailbox...\n");
+	dump_buffer_fromio(sst->mailbox, NUM_DWORDS);
+	pr_err("Dumping SRAM...\n");
+	dump_buffer_fromio(sst->mailbox + SRAM_OFFSET_MRFLD, NUM_DWORDS);
+
+	if (sst_drv_ctx->ops->set_bypass) {
+
+		sst_drv_ctx->ops->set_bypass(true);
+		dump_ram_area(sst, &(sst->dump_buf), SST_IRAM);
+		dump_ram_area(sst, &(sst->dump_buf), SST_DRAM);
+		sst_drv_ctx->ops->set_bypass(false);
+
+	}
+
+	snprintf(event_type, sizeof(event_type), "EVENT_TYPE=SST_RECOVERY");
+	envp[env_offset++] = event_type;
+	snprintf(iram_event, sizeof(iram_event), "IRAM_DUMP_SIZE=%d",
+					sst->dump_buf.iram_buf.size);
+	envp[env_offset++] = iram_event;
+	snprintf(dram_event, sizeof(dram_event), "DRAM_DUMP_SIZE=%d",
+					sst->dump_buf.dram_buf.size);
+	envp[env_offset++] = dram_event;
+
+	if (sst->ddr != NULL) {
+		snprintf(ddr_imr_event, sizeof(ddr_imr_event),
+		"DDR_IMR_DUMP_SIZE=%d DDR_IMR_ADDRESS=%p", (sst->ddr_end - sst->ddr_base), sst->ddr);
+		envp[env_offset++] = ddr_imr_event;
+	}
+	envp[env_offset] = NULL;
+	kobject_uevent_env(&sst->dev->kobj, KOBJ_CHANGE, envp);
+	pr_err("Recovery Uevent Sent!!\n");
+
+	/* Send IPC to SCU to power gate and reset the LPE */
+	sst_send_scu_reset_ipc(sst);
+
+	pr_err("reset the pvt id from val %d\n", sst_drv_ctx->pvt_id);
+	spin_lock(&sst_drv_ctx->pvt_id_lock);
+	sst_drv_ctx->pvt_id = 0;
+	spin_unlock(&sst_drv_ctx->pvt_id_lock);
+	sst_dump_ipc_dispatch_lists(sst_drv_ctx);
+	sst_dump_rx_lists(sst_drv_ctx);
+
+	if (sst_drv_ctx->fw_in_mem) {
+		pr_err("Clearing the cached FW copy...\n");
+		kfree(sst_drv_ctx->fw_in_mem);
+		sst_drv_ctx->fw_in_mem = NULL;
+	}
+}
+
+void sst_do_recovery(struct intel_sst_drv *sst)
+{
+	pr_err("Audio: Intel SST engine encountered an unrecoverable error\n");
+
+	dump_stack();
+	dump_sst_shim(sst);
+
+	sst_dump_ipc_dispatch_lists(sst_drv_ctx);
+
+}
+
+/*
+ * sst_wait_timeout - wait on event for timeout
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits with a timeout value (and is not interruptible) on a
+ * given block event
+ */
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
+{
+	int retval = 0;
+
+	/* NOTE:
+	Observed that FW processes the alloc msg and replies even
+	before the alloc thread has finished execution */
+	pr_debug("sst: waiting for condition %x ipc %d drv_id %d\n",
+		       block->condition, block->msg_id, block->drv_id);
+	if (wait_event_timeout(sst_drv_ctx->wait_queue,
+				block->condition,
+				msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
+		/* event wake */
+		pr_debug("sst: Event wake %x\n", block->condition);
+		pr_debug("sst: message ret: %d\n", block->ret_code);
+		retval = -block->ret_code;
+	} else {
+		block->on = false;
+		pr_err("sst: Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
+				block->condition, block->msg_id, sst_drv_ctx->sst_state);
+
+		if (sst_drv_ctx->sst_state == SST_FW_LOADING) {
+			pr_err("Can't recover as timedout while downloading the FW\n");
+			pr_err("reseting fw state to RESET from %d ...\n", sst_drv_ctx->sst_state);
+			sst_drv_ctx->sst_state = SST_RESET;
+
+			dump_sst_shim(sst_drv_ctx);
+
+			/* Reset & Power Off the LPE only for MRFLD */
+			if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) {
+				sst_stall_lpe_n_wait(sst_drv_ctx);
+
+				/* Send IPC to SCU to power gate and reset the LPE */
+				sst_send_scu_reset_ipc(sst_drv_ctx);
+			}
+
+		} else {
+			if (sst_drv_ctx->ops->do_recovery)
+				sst_drv_ctx->ops->do_recovery(sst_drv_ctx);
+		}
+
+		retval = -EBUSY;
+	}
+	return retval;
+}
+
+/*
+ * sst_create_ipc_msg - create a IPC message
+ *
+ * @arg: ipc message
+ * @large: large or short message
+ *
+ * this function allocates structures to send a large or short
+ * message to the firmware
+ */
+int sst_create_ipc_msg(struct ipc_post **arg, bool large)
+{
+	struct ipc_post *msg;
+
+	msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
+	if (!msg) {
+		pr_err("kzalloc ipc msg failed\n");
+		return -ENOMEM;
+	}
+	if (large) {
+		msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
+		if (!msg->mailbox_data) {
+			kfree(msg);
+			pr_err("kzalloc mailbox_data failed");
+			return -ENOMEM;
+		}
+	} else {
+		msg->mailbox_data = NULL;
+	}
+	msg->is_large = large;
+	*arg = msg;
+	return 0;
+}
+
+/*
+ * sst_create_block_and_ipc_msg - Creates IPC message and sst block
+ * @arg: passed to sst_create_ipc_message API
+ * @large: large or short message
+ * @sst_drv_ctx: sst driver context
+ * @block: return block allocated
+ * @msg_id: IPC
+ * @drv_id: stream id or private id
+ */
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+		u32 msg_id, u32 drv_id)
+{
+	int retval = 0;
+	retval = sst_create_ipc_msg(arg, large);
+	if (retval)
+		return retval;
+	*block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
+	if (*block == NULL) {
+		kfree(*arg);
+		return -ENOMEM;
+	}
+	return retval;
+}
+
+/*
+ * sst_clean_stream - clean the stream context
+ *
+ * @stream: stream structure
+ *
+ * this function resets the stream contexts
+ * should be called in free
+ */
+void sst_clean_stream(struct stream_info *stream)
+{
+	stream->status = STREAM_UN_INIT;
+	stream->prev = STREAM_UN_INIT;
+	mutex_lock(&stream->lock);
+	stream->cumm_bytes = 0;
+	mutex_unlock(&stream->lock);
+}
+
diff --git a/sound/soc/intel/sst/sst_stream.c b/sound/soc/intel/sst/sst_stream.c
new file mode 100644
index 0000000..0ce779d
--- /dev/null
+++ b/sound/soc/intel/sst/sst_stream.c
@@ -0,0 +1,831 @@
+/*
+ *  sst_stream.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains the stream operations of SST driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+/**
+ * sst_alloc_stream - Send msg for a new stream ID
+ *
+ * @params:	stream params
+ * @stream_ops:	operation of stream PB/capture
+ * @codec:	codec for stream
+ * @device:	device stream to be allocated for
+ *
+ * This function is called by any function which wants to start
+ * a new stream. This also check if a stream exists which is idle
+ * it initializes idle stream id to this request
+ */
+int sst_alloc_stream_ctp(char *params, struct sst_block *block)
+{
+	struct ipc_post *msg = NULL;
+	struct snd_sst_alloc_params alloc_param;
+	unsigned int pcm_slot = 0x03, num_ch;
+	int str_id;
+	struct snd_sst_params *str_params;
+	struct snd_sst_stream_params *sparams;
+	struct snd_sst_alloc_params_ext *aparams;
+	struct stream_info *str_info;
+	unsigned int stream_ops, device;
+	u8 codec;
+
+	pr_debug("In %s\n", __func__);
+
+	BUG_ON(!params);
+	str_params = (struct snd_sst_params *)params;
+	stream_ops = str_params->ops;
+	codec = str_params->codec;
+	device = str_params->device_type;
+	sparams = &str_params->sparams;
+	aparams = &str_params->aparams;
+	num_ch = sst_get_num_channel(str_params);
+
+	pr_debug("period_size = %d\n", aparams->frag_size);
+	pr_debug("ring_buf_addr = 0x%x\n", aparams->ring_buf_info[0].addr);
+	pr_debug("ring_buf_size = %d\n", aparams->ring_buf_info[0].size);
+	pr_debug("In alloc device_type=%d\n", str_params->device_type);
+	pr_debug("In alloc sg_count =%d\n", aparams->sg_count);
+
+	str_id = str_params->stream_id;
+	if (str_id <= 0)
+		return -EBUSY;
+
+	/*allocate device type context*/
+	sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
+			str_id, stream_ops, pcm_slot);
+	/* send msg to FW to allocate a stream */
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	alloc_param.str_type.codec_type = codec;
+	alloc_param.str_type.str_type = str_params->stream_type;
+	alloc_param.str_type.operation = stream_ops;
+	alloc_param.str_type.protected_str = 0; /* non drm */
+	alloc_param.str_type.time_slots = pcm_slot;
+	alloc_param.str_type.reserved = 0;
+	alloc_param.str_type.result = 0;
+	memcpy(&alloc_param.stream_params, sparams,
+			sizeof(struct snd_sst_stream_params));
+	memcpy(&alloc_param.alloc_params, aparams,
+			sizeof(struct snd_sst_alloc_params_ext));
+	block->drv_id = str_id;
+	block->msg_id = IPC_IA_ALLOC_STREAM;
+	sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
+	msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
+			sizeof(alloc_param));
+	str_info = &sst_drv_ctx->streams[str_id];
+	str_info->num_ch = num_ch;
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return str_id;
+}
+
+int sst_alloc_stream_mrfld(char *params, struct sst_block *block)
+{
+	struct ipc_post *msg = NULL;
+	struct snd_sst_alloc_mrfld alloc_param;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct snd_sst_params *str_params;
+	struct snd_sst_tstamp fw_tstamp;
+	unsigned int str_id, pipe_id, pvt_id, task_id;
+	u32 len = 0;
+	struct stream_info *str_info;
+	int i, num_ch;
+
+	pr_debug("In %s\n", __func__);
+	BUG_ON(!params);
+
+	str_params = (struct snd_sst_params *)params;
+	memset(&alloc_param, 0, sizeof(alloc_param));
+	alloc_param.operation = str_params->ops;
+	alloc_param.codec_type = str_params->codec;
+	alloc_param.sg_count = str_params->aparams.sg_count;
+	alloc_param.ring_buf_info[0].addr = str_params->aparams.ring_buf_info[0].addr;
+	alloc_param.ring_buf_info[0].size = str_params->aparams.ring_buf_info[0].size;
+	alloc_param.frag_size = str_params->aparams.frag_size;
+
+	memcpy(&alloc_param.codec_params, &str_params->sparams,
+			sizeof(struct snd_sst_stream_params));
+
+	/* fill channel map params for multichannel support.
+	 * Ideally channel map should be received from upper layers
+	 * for multichannel support.
+	 * Currently hardcoding as per FW reqm.
+	 */
+	num_ch = sst_get_num_channel(str_params);
+	for (i = 0; i < 8; i++) {
+		if (i < num_ch)
+			alloc_param.codec_params.uc.pcm_params.channel_map[i] = i;
+		else
+			alloc_param.codec_params.uc.pcm_params.channel_map[i] = 0xFF;
+	}
+
+	str_id = str_params->stream_id;
+	pipe_id = str_params->device_type;
+	task_id = str_params->task;
+	sst_drv_ctx->streams[str_id].pipe_id = pipe_id;
+	sst_drv_ctx->streams[str_id].task_id = task_id;
+	sst_drv_ctx->streams[str_id].num_ch = num_ch;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	if (sst_drv_ctx->info.lpe_viewpt_rqd)
+		alloc_param.ts = sst_drv_ctx->info.mailbox_start +
+			sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+	else
+		alloc_param.ts = sst_drv_ctx->mailbox_add +
+			sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+
+	pr_debug("alloc tstamp location = 0x%x\n", alloc_param.ts);
+	pr_debug("assigned pipe id 0x%x to task %d\n", pipe_id, task_id);
+
+	/*allocate device type context*/
+	sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
+			str_id, alloc_param.operation, 0);
+	/* send msg to FW to allocate a stream */
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	block->drv_id = pvt_id;
+	block->msg_id = IPC_CMD;
+
+	sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+			      task_id, 1, pvt_id);
+	pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full);
+	msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+	len = msg->mrfld_header.p.header_low_payload = sizeof(alloc_param) + sizeof(dsp_hdr);
+	sst_fill_header_dsp(&dsp_hdr, IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param));
+	memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+	memcpy(msg->mailbox_data + sizeof(dsp_hdr), &alloc_param,
+			sizeof(alloc_param));
+	trace_sst_stream("ALLOC ->", str_id, pipe_id);
+	str_info = &sst_drv_ctx->streams[str_id];
+	pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full);
+	pr_debug("response rqd: %x", msg->mrfld_header.p.header_high.part.res_rqd);
+	pr_debug("calling post_message\n");
+	pr_info("Alloc for str %d pipe %#x\n", str_id, pipe_id);
+
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return str_id;
+}
+
+/**
+* sst_stream_stream - Send msg for a pausing stream
+* @str_id:	 stream ID
+*
+* This function is called by any function which wants to start
+* a stream.
+*/
+int sst_start_stream(int str_id)
+{
+	int retval = 0, pvt_id;
+	u32 len = 0;
+	struct ipc_post *msg = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct stream_info *str_info;
+
+	pr_debug("sst_start_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	if (str_info->status != STREAM_RUNNING)
+		return -EBADRQC;
+
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	if (!sst_drv_ctx->use_32bit_ops) {
+		pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+		pr_debug("pvt_id = %d, pipe id = %d, task = %d\n",
+			 pvt_id, str_info->pipe_id, str_info->task_id);
+		sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+				      str_info->task_id, 1, pvt_id);
+
+		len = sizeof(u16) + sizeof(dsp_hdr);
+		msg->mrfld_header.p.header_low_payload = len;
+		sst_fill_header_dsp(&dsp_hdr, IPC_IA_START_STREAM_MRFLD,
+				str_info->pipe_id, sizeof(u16));
+		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+		memset(msg->mailbox_data + sizeof(dsp_hdr), 0, sizeof(u16));
+		trace_sst_stream("START ->", str_id, str_info->pipe_id);
+		pr_info("Start for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+	} else {
+		pr_debug("fill START_STREAM for CTP\n");
+		sst_fill_header(&msg->header, IPC_IA_START_STREAM, 1, str_id);
+		msg->header.part.data =  sizeof(u32) + sizeof(u32);
+		memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+		memset(msg->mailbox_data + sizeof(u32), 0, sizeof(u32));
+	}
+	sst_drv_ctx->ops->sync_post_message(msg);
+	return retval;
+}
+
+int sst_send_byte_stream_mrfld(void *sbytes)
+{
+	struct ipc_post *msg = NULL;
+	struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *) sbytes;
+	u32 length;
+	int pvt_id, ret = 0;
+	struct sst_block *block = NULL;
+
+	pr_debug("%s: type:%u ipc_msg:%u block:%u task_id:%u pipe: %#x length:%#x\n",
+		__func__, bytes->type, bytes->ipc_msg,
+		bytes->block, bytes->task_id,
+		bytes->pipe_id, bytes->len);
+
+	/* need some err check as this is user data, perhpas move this to the
+	 * platform driver and pass the struct
+	 */
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	sst_fill_header_mrfld(&msg->mrfld_header, bytes->ipc_msg, bytes->task_id,
+			      1, pvt_id);
+	msg->mrfld_header.p.header_high.part.res_rqd = bytes->block;
+	length = bytes->len;
+	msg->mrfld_header.p.header_low_payload = length;
+	pr_debug("length is %d\n", length);
+	memcpy(msg->mailbox_data, &bytes->bytes, bytes->len);
+	trace_sst_stream("BYTES ->", bytes->type, bytes->pipe_id);
+	if (bytes->block) {
+		block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id);
+		if (block == NULL) {
+			kfree(msg);
+			return -ENOMEM;
+		}
+	}
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	pr_debug("msg->mrfld_header.p.header_low_payload:%d", msg->mrfld_header.p.header_low_payload);
+	if (bytes->block) {
+		ret = sst_wait_timeout(sst_drv_ctx, block);
+		if (ret) {
+			pr_err("%s: fw returned err %d\n", __func__, ret);
+			sst_free_block(sst_drv_ctx, block);
+			return ret;
+		}
+	}
+	if (bytes->type == SND_SST_BYTES_GET) {
+		/* copy the reply and send back
+		 * we need to update only sz and payload
+		 */
+		if (bytes->block) {
+			unsigned char *r = block->data;
+			pr_debug("read back %d bytes", bytes->len);
+			memcpy(bytes->bytes, r, bytes->len);
+			trace_sst_stream("BYTES <-", bytes->type, bytes->pipe_id);
+		}
+	}
+	if (bytes->block)
+		sst_free_block(sst_drv_ctx, block);
+	return 0;
+}
+
+int sst_send_probe_bytes(struct intel_sst_drv *sst)
+{
+	struct ipc_post *msg = NULL;
+	struct sst_block *block;
+	int ret_val = 0;
+
+	ret_val = sst_create_block_and_ipc_msg(&msg, true, sst,
+			&block, IPC_IA_DBG_SET_PROBE_PARAMS, 0);
+	if (ret_val) {
+		pr_err("Can't allocate block/msg: Probe Byte Stream\n");
+		return ret_val;
+	}
+
+	sst_fill_header(&msg->header, IPC_IA_DBG_SET_PROBE_PARAMS, 1, 0);
+
+	msg->header.part.data = sizeof(u32) + sst->probe_bytes->len;
+	memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), sst->probe_bytes->bytes,
+				sst->probe_bytes->len);
+
+	sst_add_to_dispatch_list_and_post(sst, msg);
+	ret_val = sst_wait_timeout(sst, block);
+	sst_free_block(sst, block);
+	if (ret_val)
+		pr_err("set probe stream param..timeout!\n");
+	return ret_val;
+}
+
+/*
+ * sst_pause_stream - Send msg for a pausing stream
+ * @str_id:	 stream ID
+ *
+ * This function is called by any function which wants to pause
+ * an already running stream.
+ */
+int sst_pause_stream(int str_id)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	struct sst_block *block;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	if (str_info->status == STREAM_PAUSED)
+		return 0;
+	if (str_info->status == STREAM_RUNNING ||
+		str_info->status == STREAM_INIT) {
+		if (str_info->prev == STREAM_UN_INIT)
+			return -EBADRQC;
+		if (!sst_drv_ctx->use_32bit_ops) {
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			retval = sst_create_block_and_ipc_msg(&msg, true,
+					sst_drv_ctx, &block, IPC_CMD, pvt_id);
+			if (retval)
+				return retval;
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					str_info->task_id, 1, pvt_id);
+			msg->mrfld_header.p.header_high.part.res_rqd = 1;
+			len = sizeof(dsp_hdr);
+			msg->mrfld_header.p.header_low_payload = len;
+			sst_fill_header_dsp(&dsp_hdr, IPC_IA_PAUSE_STREAM_MRFLD,
+						str_info->pipe_id, 0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("PAUSE ->", str_id, str_info->pipe_id);
+		} else {
+			retval = sst_create_block_and_ipc_msg(&msg, false,
+					sst_drv_ctx, &block,
+					IPC_IA_PAUSE_STREAM, str_id);
+			if (retval)
+				return retval;
+			sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM,
+								0, str_id);
+		}
+		sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+		retval = sst_wait_timeout(sst_drv_ctx, block);
+		sst_free_block(sst_drv_ctx, block);
+		if (retval == 0) {
+			str_info->prev = str_info->status;
+			str_info->status = STREAM_PAUSED;
+		} else if (retval == SST_ERR_INVALID_STREAM_ID) {
+			retval = -EINVAL;
+			mutex_lock(&sst_drv_ctx->stream_lock);
+			sst_clean_stream(str_info);
+			mutex_unlock(&sst_drv_ctx->stream_lock);
+		}
+	} else {
+		retval = -EBADRQC;
+		pr_debug("SST DBG:BADRQC for stream\n ");
+	}
+
+	return retval;
+}
+
+/**
+ * sst_resume_stream - Send msg for resuming stream
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to resume
+ * an already paused stream.
+ */
+int sst_resume_stream(int str_id)
+{
+	int retval = 0;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	struct sst_block *block = NULL;
+	int pvt_id, len;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	if (str_info->status == STREAM_RUNNING)
+			return 0;
+	if (str_info->status == STREAM_PAUSED) {
+		if (!sst_drv_ctx->use_32bit_ops) {
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			retval = sst_create_block_and_ipc_msg(&msg, true,
+					sst_drv_ctx, &block, IPC_CMD, pvt_id);
+			if (retval)
+				return retval;
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					str_info->task_id, 1, pvt_id);
+			msg->mrfld_header.p.header_high.part.res_rqd = 1;
+			len = sizeof(dsp_hdr);
+			msg->mrfld_header.p.header_low_payload = len;
+			sst_fill_header_dsp(&dsp_hdr,
+						IPC_IA_RESUME_STREAM_MRFLD,
+						str_info->pipe_id, 0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("RESUME->", str_id, str_info->pipe_id);
+		} else {
+			retval = sst_create_block_and_ipc_msg(&msg, false,
+					sst_drv_ctx, &block,
+					IPC_IA_RESUME_STREAM, str_id);
+			if (retval)
+				return retval;
+			sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM,
+								0, str_id);
+		}
+		sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+		retval = sst_wait_timeout(sst_drv_ctx, block);
+		sst_free_block(sst_drv_ctx, block);
+		if (!retval) {
+			if (str_info->prev == STREAM_RUNNING)
+				str_info->status = STREAM_RUNNING;
+			else
+				str_info->status = STREAM_INIT;
+			str_info->prev = STREAM_PAUSED;
+		} else if (retval == -SST_ERR_INVALID_STREAM_ID) {
+			retval = -EINVAL;
+			mutex_lock(&sst_drv_ctx->stream_lock);
+			sst_clean_stream(str_info);
+			mutex_unlock(&sst_drv_ctx->stream_lock);
+		}
+	} else {
+		retval = -EBADRQC;
+		pr_err("SST ERR: BADQRC for stream\n");
+	}
+
+	return retval;
+}
+
+
+/**
+ * sst_drop_stream - Send msg for stopping stream
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to stop
+ * a stream.
+ */
+int sst_drop_stream(int str_id)
+{
+	int retval = 0, pvt_id;
+	struct stream_info *str_info;
+	struct ipc_post *msg = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+
+	if (str_info->status != STREAM_UN_INIT) {
+
+		if (sst_drv_ctx->use_32bit_ops == true) {
+			str_info->prev = STREAM_UN_INIT;
+			str_info->status = STREAM_INIT;
+			str_info->cumm_bytes = 0;
+			sst_send_sync_msg(IPC_IA_DROP_STREAM, str_id);
+		} else {
+			if (sst_create_ipc_msg(&msg, true))
+				return -ENOMEM;
+			str_info->prev = STREAM_UN_INIT;
+			str_info->status = STREAM_INIT;
+			str_info->cumm_bytes = 0;
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					      str_info->task_id, 1, pvt_id);
+
+			msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr);
+			sst_fill_header_dsp(&dsp_hdr, IPC_IA_DROP_STREAM_MRFLD,
+					str_info->pipe_id, 0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("STOP  ->", str_id, str_info->pipe_id);
+			pr_info("Stop for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+			sst_drv_ctx->ops->sync_post_message(msg);
+		}
+	} else {
+		retval = -EBADRQC;
+		pr_debug("BADQRC for stream, state %x\n", str_info->status);
+	}
+	return retval;
+}
+
+/**
+ * sst_next_track: notify next track
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to
+ * set next track. Current this is NOP as FW doest care
+ */
+int sst_next_track(void)
+{
+	pr_debug("SST DBG: next_track");
+	return 0;
+}
+
+/**
+* sst_drain_stream - Send msg for draining stream
+* @str_id:		stream ID
+*
+* This function is called by any function which wants to drain
+* a stream.
+*/
+int sst_drain_stream(int str_id, bool partial_drain)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	struct sst_block *block = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	if (str_info->status != STREAM_RUNNING &&
+		str_info->status != STREAM_INIT &&
+		str_info->status != STREAM_PAUSED) {
+			pr_err("SST ERR: BADQRC for stream = %d\n",
+				       str_info->status);
+			return -EBADRQC;
+	}
+
+	if (!sst_drv_ctx->use_32bit_ops) {
+		pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+		retval = sst_create_block_and_ipc_msg(&msg, true,
+				sst_drv_ctx, &block, IPC_CMD, pvt_id);
+		if (retval)
+			return retval;
+		sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+				str_info->task_id, 1, pvt_id);
+		pr_debug("header:%x\n",
+			(unsigned int)msg->mrfld_header.p.header_high.full);
+		msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+		len = sizeof(u8) + sizeof(dsp_hdr);
+		msg->mrfld_header.p.header_low_payload = len;
+		sst_fill_header_dsp(&dsp_hdr, IPC_IA_DRAIN_STREAM_MRFLD,
+					str_info->pipe_id, sizeof(u8));
+		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+		memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+				&partial_drain, sizeof(u8));
+		trace_sst_stream("DRAIN ->", str_id, str_info->pipe_id);
+	} else {
+		retval = sst_create_block_and_ipc_msg(&msg, false,
+				sst_drv_ctx, &block,
+				IPC_IA_DRAIN_STREAM, str_id);
+		if (retval)
+			return retval;
+		sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
+		msg->header.part.data = partial_drain;
+	}
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	/* with new non blocked drain implementation in core we dont need to
+	 * wait for respsonse, and need to only invoke callback for drain
+	 * complete
+	 */
+
+	sst_free_block(sst_drv_ctx, block);
+	return retval;
+}
+
+/**
+ * sst_free_stream - Frees a stream
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to free
+ * a stream.
+ */
+int sst_free_stream(int str_id)
+{
+	int retval = 0;
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	unsigned long irq_flags;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct sst_block *block;
+
+	pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
+
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	if (sst_drv_ctx->sst_state == SST_RESET) {
+		mutex_unlock(&sst_drv_ctx->sst_lock);
+		return -ENODEV;
+	}
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+
+	mutex_lock(&str_info->lock);
+	if (str_info->status != STREAM_UN_INIT) {
+		str_info->prev =  str_info->status;
+		str_info->status = STREAM_UN_INIT;
+		mutex_unlock(&str_info->lock);
+
+		if (!sst_drv_ctx->use_32bit_ops) {
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			retval = sst_create_block_and_ipc_msg(&msg, true,
+					sst_drv_ctx, &block, IPC_CMD, pvt_id);
+			if (retval)
+				return retval;
+
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					      str_info->task_id, 1, pvt_id);
+			msg->mrfld_header.p.header_low_payload =
+							sizeof(dsp_hdr);
+			sst_fill_header_dsp(&dsp_hdr, IPC_IA_FREE_STREAM_MRFLD,
+						str_info->pipe_id,  0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("FREE  ->", str_id, str_info->pipe_id);
+			pr_info("Free for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+		} else {
+			retval = sst_create_block_and_ipc_msg(&msg, false,
+						sst_drv_ctx, &block,
+						IPC_IA_FREE_STREAM, str_id);
+			if (retval)
+				return retval;
+			sst_fill_header(&msg->header, IPC_IA_FREE_STREAM,
+								 0, str_id);
+		}
+		spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		if (!sst_drv_ctx->use_32bit_ops) {
+			/*FIXME: do we need to wake up drain stream here,
+			 * how to get the pvt_id and msg_id
+			 */
+		} else {
+			sst_wake_up_block(sst_drv_ctx, 0, str_id,
+				IPC_IA_DRAIN_STREAM, NULL, 0);
+		}
+		ops->post_message(&sst_drv_ctx->ipc_post_msg_wq);
+		retval = sst_wait_timeout(sst_drv_ctx, block);
+		pr_debug("sst: wait for free returned %d\n", retval);
+		mutex_lock(&sst_drv_ctx->stream_lock);
+		sst_clean_stream(str_info);
+		mutex_unlock(&sst_drv_ctx->stream_lock);
+		pr_debug("SST DBG:Stream freed\n");
+		sst_free_block(sst_drv_ctx, block);
+	} else {
+		mutex_unlock(&str_info->lock);
+		retval = -EBADRQC;
+		pr_debug("SST DBG:BADQRC for stream\n");
+	}
+
+	return retval;
+}
+
+int sst_request_vtsv_file(char *fname, struct intel_sst_drv *ctx,
+		void **out_file, u32 *out_size)
+{
+	int retval = 0;
+	const struct firmware *file;
+	void *ddr_virt_addr;
+	unsigned long file_base;
+
+	if (!ctx->pdata->lib_info) {
+		pr_err("lib_info pointer NULL\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Requesting VTSV file %s now...\n", fname);
+	retval = request_firmware(&file, fname, ctx->dev);
+	if (file == NULL) {
+		pr_err("VTSV file is returning as null\n");
+		return -EINVAL;
+	}
+	if (retval) {
+		pr_err("request fw failed %d\n", retval);
+		return retval;
+	}
+
+	if ((*out_file == NULL) || (*out_size < file->size)) {
+		retval = sst_get_next_lib_mem(&ctx->lib_mem_mgr, file->size,
+			&file_base);
+		*out_file = (void *)file_base;
+	}
+	ddr_virt_addr = (unsigned char *)ctx->ddr +
+		(unsigned long)(*out_file - ctx->pdata->lib_info->mod_base);
+	memcpy(ddr_virt_addr, file->data, file->size);
+
+	*out_size = file->size;
+	release_firmware(file);
+	return 0;
+}
+
+int sst_format_vtsv_message(struct intel_sst_drv *ctx,
+	struct ipc_post **msgptr, struct sst_block **block)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct snd_sst_vtsv_info vinfo;
+	struct ipc_post *msg;
+
+	BUG_ON((unsigned long)(ctx->vcache.file1_in_mem) & 0xffffffff00000000ULL);
+	BUG_ON((unsigned long)(ctx->vcache.file2_in_mem) & 0xffffffff00000000ULL);
+
+	vinfo.vfiles[0].addr = (u32)((unsigned long)ctx->vcache.file1_in_mem
+				& 0xffffffff);
+	vinfo.vfiles[0].size = ctx->vcache.size1;
+	vinfo.vfiles[1].addr = (u32)((unsigned long)ctx->vcache.file2_in_mem
+				& 0xffffffff);
+	vinfo.vfiles[1].size = ctx->vcache.size2;
+
+	/* Create the vtsv message */
+	pvt_id = sst_assign_pvt_id(ctx);
+	retval = sst_create_block_and_ipc_msg(msgptr, true,
+			ctx, block, IPC_CMD, pvt_id);
+	if (retval)
+		return retval;
+	msg = *msgptr;
+	sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+			SST_TASK_ID_AWARE, 1, pvt_id);
+	pr_debug("header:%x\n",
+			(unsigned int)msg->mrfld_header.p.header_high.full);
+	msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+	len = sizeof(vinfo) + sizeof(dsp_hdr);
+	msg->mrfld_header.p.header_low_payload = len;
+	sst_fill_header_dsp(&dsp_hdr, IPC_IA_VTSV_UPDATE_MODULES,
+				PIPE_VAD_OUT, sizeof(u8));
+	dsp_hdr.mod_id = SST_ALGO_VTSV;
+	memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+	memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+			&vinfo, sizeof(vinfo));
+	return 0;
+}
+
+int sst_send_vtsv_data_to_fw(struct intel_sst_drv *ctx)
+{
+	int retval = 0;
+	struct ipc_post *msg = NULL;
+	struct sst_block *block = NULL;
+
+	/* Download both the data files */
+	retval = sst_request_vtsv_file("vtsv_net.bin", ctx,
+			&ctx->vcache.file1_in_mem, &ctx->vcache.size1);
+	if (retval) {
+		pr_err("vtsv data file1 request failed %d\n", retval);
+		return retval;
+	}
+
+	retval = sst_request_vtsv_file("vtsv_grammar.bin", ctx,
+			&ctx->vcache.file2_in_mem, &ctx->vcache.size2);
+	if (retval) {
+		pr_err("vtsv data file2 request failed %d\n", retval);
+		return retval;
+	}
+
+	retval = sst_format_vtsv_message(ctx, &msg, &block);
+	if (retval) {
+		pr_err("vtsv msg format failed %d\n", retval);
+		return retval;
+	}
+	sst_add_to_dispatch_list_and_post(ctx, msg);
+	retval = sst_wait_timeout(ctx, block);
+	if (retval)
+		pr_err("vtsv msg send to fw failed %d\n", retval);
+
+	sst_free_block(ctx, block);
+	return retval;
+}
diff --git a/sound/soc/intel/sst/sst_trace.h b/sound/soc/intel/sst/sst_trace.h
new file mode 100644
index 0000000..8d34ef3
--- /dev/null
+++ b/sound/soc/intel/sst/sst_trace.h
@@ -0,0 +1,147 @@
+/*
+ *  sst_trace.h - Intel SST Driver tracing support
+ *
+ *  Copyright (C) 2013	Intel Corp
+ *  Authors: Omair Mohammed Abdullah <omair.m.abdullah@linux.intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sst
+
+#if !defined(_TRACE_SST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SST_H
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sst_ipc,
+
+	TP_PROTO(const char *msg, u32 header_high, u32 header_low, int pvt_id),
+
+	TP_ARGS(msg, header_high, header_low, pvt_id),
+
+	TP_STRUCT__entry(
+		__string(info_msg,	msg)
+		__field(unsigned int,	val_l)
+		__field(unsigned int,	val_h)
+		__field(unsigned int,	id)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__entry->val_l = header_low;
+		__entry->val_h = header_high;
+		__entry->id = pvt_id;
+	),
+
+	TP_printk("\t%s\t [%2u] = %#8.8x:%.4x", __get_str(info_msg),
+		  (unsigned int)__entry->id,
+		  (unsigned int)__entry->val_h, (unsigned int)__entry->val_l)
+
+);
+
+TRACE_EVENT(sst_stream,
+
+	TP_PROTO(const char *msg, int str_id, int pipe_id),
+
+	TP_ARGS(msg, str_id, pipe_id),
+
+	TP_STRUCT__entry(
+		__string(info_msg,	msg)
+		__field(unsigned int,	str_id)
+		__field(unsigned int,	pipe_id)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__entry->str_id = str_id;
+		__entry->pipe_id = pipe_id;
+	),
+
+	TP_printk("\t%s\t str  = %2u, pipe = %#x", __get_str(info_msg),
+		  (unsigned int)__entry->str_id, (unsigned int)__entry->pipe_id)
+);
+
+TRACE_EVENT(sst_ipc_mailbox,
+
+	TP_PROTO(const char *mailbox, int mbox_len),
+
+	TP_ARGS(mailbox, mbox_len),
+
+	TP_STRUCT__entry(
+		__dynamic_array(char,	mbox,	(3 * mbox_len))
+	),
+
+	TP_fast_assign(
+		sst_dump_to_buffer(mailbox, mbox_len,
+				   __get_dynamic_array(mbox));
+	),
+
+	TP_printk("  %s", __get_str(mbox))
+
+);
+
+TRACE_EVENT(sst_lib_download,
+
+	TP_PROTO(const char *msg, const char *lib_name),
+
+	TP_ARGS(msg, lib_name),
+
+	TP_STRUCT__entry(
+		__string(info_msg, msg)
+		__string(info_lib_name, lib_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__assign_str(info_lib_name, lib_name);
+	),
+
+	TP_printk("\t%s %s", __get_str(info_msg),
+			__get_str(info_lib_name))
+);
+
+TRACE_EVENT(sst_fw_download,
+
+	TP_PROTO(const char *msg, int fw_state),
+
+	TP_ARGS(msg, fw_state),
+
+	TP_STRUCT__entry(
+		__string(info_msg, msg)
+		__field(unsigned int,   fw_state)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__entry->fw_state = fw_state;
+	),
+
+	TP_printk("\t%s\tFW state = %d", __get_str(info_msg),
+				(unsigned int)__entry->fw_state)
+);
+
+#endif /* _TRACE_SST_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE sst_trace
+#include <trace/define_trace.h>
diff --git a/sound/soc/intel/sst_platform.h b/sound/soc/intel/sst_platform.h
new file mode 100644
index 0000000..5d231b5
--- /dev/null
+++ b/sound/soc/intel/sst_platform.h
@@ -0,0 +1,155 @@
+/*
+ *  sst_platform.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __SST_PLATFORM_H__
+#define __SST_PLATFORM_H__
+
+#include <sound/soc.h>
+
+#define SST_MAX_BIN_BYTES 1024
+
+struct sst_data;
+
+enum sst_audio_device_type {
+	SND_SST_DEVICE_HEADSET = 1,
+	SND_SST_DEVICE_IHF,
+	SND_SST_DEVICE_VIBRA,
+	SND_SST_DEVICE_HAPTIC,
+	SND_SST_DEVICE_CAPTURE,
+	SND_SST_DEVICE_COMPRESS,
+};
+
+enum snd_sst_input_stream {
+	SST_INPUT_STREAM_NONE = 0x0,
+	SST_INPUT_STREAM_PCM = 0x6,
+	SST_INPUT_STREAM_COMPRESS = 0x8,
+	SST_INPUT_STREAM_MIXED = 0xE,
+};
+
+enum sst_stream_ops {
+	STREAM_OPS_PLAYBACK = 0,        /* Decode */
+	STREAM_OPS_CAPTURE,             /* Encode */
+	STREAM_OPS_COMPRESSED_PATH,     /* Offload playback/capture */
+
+};
+enum snd_sst_stream_type {
+	SST_STREAM_DEVICE_HS = 32,
+	SST_STREAM_DEVICE_IHF = 33,
+	SST_STREAM_DEVICE_MIC0 = 34,
+	SST_STREAM_DEVICE_MIC1 = 35,
+};
+
+enum sst_controls {
+	SST_SND_ALLOC =			0x1000,
+	SST_SND_PAUSE =			0x1001,
+	SST_SND_RESUME =		0x1002,
+	SST_SND_DROP =			0x1003,
+	SST_SND_FREE =			0x1004,
+	SST_SND_BUFFER_POINTER =	0x1005,
+	SST_SND_STREAM_INIT =		0x1006,
+	SST_SND_START	 =		0x1007,
+	SST_SET_RUNTIME_PARAMS =	0x1008,
+	SST_SET_ALGO_PARAMS =		0x1009,
+	SST_SET_BYTE_STREAM =		0x100A,
+	SST_GET_BYTE_STREAM =		0x100B,
+	SST_SET_SSP_CONFIG =		0x100C,
+	SST_SET_PROBE_BYTE_STREAM =     0x100D,
+	SST_GET_PROBE_BYTE_STREAM =	0x100E,
+	SST_SET_VTSV_INFO =		0x100F,
+};
+
+struct pcm_stream_info {
+	int str_id;
+	void *mad_substream;
+	void (*period_elapsed) (void *mad_substream);
+	unsigned long long buffer_ptr;
+	unsigned long long pcm_delay;
+	int sfreq;
+};
+
+struct sst_compress_cb {
+	void *param;
+	void (*compr_cb)(void *param);
+	void *drain_cb_param;
+	void (*drain_notify)(void *param);
+
+};
+
+struct snd_sst_params;
+
+struct compress_sst_ops {
+	const char *name;
+	int (*open) (struct snd_sst_params *str_params,
+			struct sst_compress_cb *cb);
+	int (*control) (unsigned int cmd, unsigned int str_id);
+	int (*tstamp) (unsigned int str_id, struct snd_compr_tstamp *tstamp);
+	int (*ack) (unsigned int str_id, unsigned long bytes);
+	int (*close) (unsigned int str_id);
+	int (*get_caps) (struct snd_compr_caps *caps);
+	int (*get_codec_caps) (struct snd_compr_codec_caps *codec);
+	int (*set_metadata) (unsigned int str_id, struct snd_compr_metadata *metadata);
+
+};
+
+enum lpe_param_types_mixer {
+	SST_ALGO_PARAM_MIXER_STREAM_CFG = 0x801,
+};
+
+struct mad_ops_wq {
+	int stream_id;
+	enum sst_controls control_op;
+	struct work_struct wq;
+};
+
+struct sst_ops {
+	int (*open) (struct snd_sst_params *str_param);
+	int (*device_control) (int cmd, void *arg);
+	int (*set_generic_params) (enum sst_controls cmd, void *arg);
+	int (*close) (unsigned int str_id);
+	int (*power) (bool state);
+};
+
+struct sst_runtime_stream {
+	int     stream_status;
+	unsigned int id;
+	size_t bytes_written;
+	struct pcm_stream_info stream_info;
+	struct sst_ops *ops;
+	struct compress_sst_ops *compr_ops;
+	spinlock_t	status_lock;
+};
+
+struct sst_device {
+	char *name;
+	struct device *dev;
+	struct sst_ops *ops;
+	struct platform_device *pdev;
+	struct compress_sst_ops *compr_ops;
+};
+
+int sst_register_dsp(struct sst_device *sst);
+int sst_unregister_dsp(struct sst_device *sst);
+#endif
diff --git a/sound/soc/intel/sst_platform_pvt.h b/sound/soc/intel/sst_platform_pvt.h
new file mode 100644
index 0000000..2125dc6
--- /dev/null
+++ b/sound/soc/intel/sst_platform_pvt.h
@@ -0,0 +1,145 @@
+/*
+ *  sst_platform_pvt.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __SST_PLATFORM_PVT_H__
+#define __SST_PLATFORM_PVT_H__
+
+/* TODO rmv this global */
+extern struct sst_device *sst_dsp;
+
+#define SST_MONO		1
+#define SST_STEREO		2
+
+#define SST_MIN_RATE		8000
+#define SST_MAX_RATE		48000
+#define SST_MIN_CHANNEL		1
+#define SST_MAX_CHANNEL		2
+
+#define SST_MAX_BUFFER		96000 /*500ms@48K,16bit,2ch - CLV*/
+#define SST_MIN_PERIOD_BYTES	1536  /*24ms@16K,16bit,2ch - For VoIP on Mrfld*/
+#define SST_MAX_PERIOD_BYTES	48000 /*250ms@48K,16bit,2ch - CLV*/
+
+#define SST_MIN_PERIODS		2
+#define SST_MAX_PERIODS		50
+#define SST_FIFO_SIZE		0
+#define SST_CODEC_TYPE_PCM	1
+
+#define SST_HEADSET_DAI		"Headset-cpu-dai"
+#define SST_SPEAKER_DAI		"Speaker-cpu-dai"
+#define SST_VOICE_DAI		"Voice-cpu-dai"
+#define SST_VIRTUAL_DAI		"Virtual-cpu-dai"
+#define SST_LOOPBACK_DAI	"Loopback-cpu-dai"
+#define SST_POWER_DAI		"Power-cpu-dai"
+#define SST_COMPRESS_DAI	"Compress-cpu-dai"
+#define SST_PROBE_DAI		"Probe-cpu-dai"
+#define SST_VOIP_DAI		"Voip-cpu-dai"
+#define SST_DEEPBUFFER_DAI	"Deepbuffer-cpu-dai"
+#define SST_LOWLATENCY_DAI	"Lowlatency-cpu-dai"
+
+struct sst_device;
+
+enum sst_drv_status {
+	SST_PLATFORM_UNINIT,
+	SST_PLATFORM_INIT,
+	SST_PLATFORM_RUNNING,
+	SST_PLATFORM_PAUSED,
+	SST_PLATFORM_DROPPED,
+};
+
+enum ssp_port {
+	SST_SSP_PORT0 = 0,
+	SST_SSP_PORT1,
+	SST_SSP_PORT2,
+	SST_SSP_PORT3,
+};
+
+#define SST_PIPE_CONTROL	0x0
+#define SST_COMPRESS_VOL	0x01
+
+int sst_platform_clv_init(struct snd_soc_platform *platform);
+int sst_dsp_init(struct snd_soc_platform *platform);
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform);
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute);
+void send_ssp_cmd(struct snd_soc_platform *platform, const char *id, bool enable);
+void sst_handle_vb_timer(struct snd_soc_platform *platform, bool enable);
+
+unsigned int sst_soc_read(struct snd_soc_platform *platform, unsigned int reg);
+int sst_soc_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val);
+unsigned int sst_reg_read(struct sst_data *sst, unsigned int reg,
+			  unsigned int shift, unsigned int max);
+unsigned int sst_reg_write(struct sst_data *sst, unsigned int reg,
+			   unsigned int shift, unsigned int max, unsigned int val);
+
+int sst_algo_int_ctl_info(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_info *uinfo);
+void sst_set_stream_status(struct sst_runtime_stream *stream, int state);
+int sst_fill_stream_params(void *substream, const struct sst_data *ctx,
+			   struct snd_sst_params *str_params, bool is_compress);
+int sst_dpcm_probe_send(struct snd_soc_platform *platform, u16 probe_pipe,
+			int substream, int direction, bool on);
+int sst_byte_control_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol);
+int sst_byte_control_set(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol);
+
+struct sst_algo_int_control_v2 {
+	struct soc_mixer_control mc;
+	u16 module_id; /* module identifieer */
+	u16 pipe_id; /* location info: pipe_id + instance_id */
+	u16 instance_id;
+	unsigned int value; /* Value received is stored here */
+};
+
+struct sst_lowlatency_deepbuff {
+	/* Thresholds for low latency & deep buffer */
+	unsigned long	*low_latency;
+	unsigned long	*deep_buffer;
+	unsigned long	period_time;
+};
+
+struct sst_pcm_format {
+	unsigned int sample_bits;
+	unsigned int rate_min;
+	unsigned int rate_max;
+	unsigned int channels_min;
+	unsigned int channels_max;
+};
+
+
+struct sst_data {
+	struct platform_device *pdev;
+	struct sst_platform_data *pdata;
+	unsigned int lpe_mixer_input_ihf;
+	unsigned int lpe_mixer_input_hs;
+	u32 *widget;
+	char *byte_stream;
+	struct mutex lock;
+	/* Pipe_id for probe_stream to be saved in stream map */
+	u8 pipe_id;
+	bool vtsv_enroll;
+	struct sst_lowlatency_deepbuff ll_db;
+};
+#endif
diff --git a/sound/soc/mid-x86/Kconfig b/sound/soc/mid-x86/Kconfig
index 61c10bf..77f0f36 100644
--- a/sound/soc/mid-x86/Kconfig
+++ b/sound/soc/mid-x86/Kconfig
@@ -1,13 +1,2 @@
-config SND_MFLD_MACHINE
-	tristate "SOC Machine Audio driver for Intel Medfield MID platform"
-	depends on INTEL_SCU_IPC
-	select SND_SOC_SN95031
-	select SND_SST_PLATFORM
-	help
-          This adds support for ASoC machine driver for Intel(R) MID Medfield platform
-          used as alsa device in audio substem in Intel(R) MID devices
-          Say Y if you have such a device
-          If unsure select "N".
-
 config SND_SST_PLATFORM
 	tristate
diff --git a/sound/soc/mid-x86/Makefile b/sound/soc/mid-x86/Makefile
index 6398833..09d12ff 100644
--- a/sound/soc/mid-x86/Makefile
+++ b/sound/soc/mid-x86/Makefile
@@ -1,5 +1,3 @@
 snd-soc-sst-platform-objs := sst_platform.o
-snd-soc-mfld-machine-objs := mfld_machine.o
 
 obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
-obj-$(CONFIG_SND_MFLD_MACHINE) += snd-soc-mfld-machine.o
diff --git a/sound/soc/mid-x86/mfld_machine.c b/sound/soc/mid-x86/mfld_machine.c
deleted file mode 100644
index 4139116..0000000
--- a/sound/soc/mid-x86/mfld_machine.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- *  mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
- *
- *  Copyright (C) 2010 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/sn95031.h"
-
-#define MID_MONO 1
-#define MID_STEREO 2
-#define MID_MAX_CAP 5
-#define MFLD_JACK_INSERT 0x04
-
-enum soc_mic_bias_zones {
-	MFLD_MV_START = 0,
-	/* mic bias volutage range for Headphones*/
-	MFLD_MV_HP = 400,
-	/* mic bias volutage range for American Headset*/
-	MFLD_MV_AM_HS = 650,
-	/* mic bias volutage range for Headset*/
-	MFLD_MV_HS = 2000,
-	MFLD_MV_UNDEFINED,
-};
-
-static unsigned int	hs_switch;
-static unsigned int	lo_dac;
-
-struct mfld_mc_private {
-	void __iomem *int_base;
-	u8 interrupt_status;
-};
-
-struct snd_soc_jack mfld_jack;
-
-/*Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin mfld_jack_pins[] = {
-	{
-		.pin = "Headphones",
-		.mask = SND_JACK_HEADPHONE,
-	},
-	{
-		.pin = "AMIC1",
-		.mask = SND_JACK_MICROPHONE,
-	},
-};
-
-/* jack detection voltage zones */
-static struct snd_soc_jack_zone mfld_zones[] = {
-	{MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
-	{MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
-};
-
-/* sound card controls */
-static const char *headset_switch_text[] = {"Earpiece", "Headset"};
-
-static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
-
-static const struct soc_enum headset_enum =
-	SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
-
-static const struct soc_enum lo_enum =
-	SOC_ENUM_SINGLE_EXT(4, lo_text);
-
-static int headset_get_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = hs_switch;
-	return 0;
-}
-
-static int headset_set_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
-
-	if (ucontrol->value.integer.value[0] == hs_switch)
-		return 0;
-
-	if (ucontrol->value.integer.value[0]) {
-		pr_debug("hs_set HS path\n");
-		snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
-	} else {
-		pr_debug("hs_set EP path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
-	}
-	snd_soc_dapm_sync(&codec->dapm);
-	hs_switch = ucontrol->value.integer.value[0];
-
-	return 0;
-}
-
-static void lo_enable_out_pins(struct snd_soc_codec *codec)
-{
-	snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTL");
-	snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTR");
-	snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTL");
-	snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTR");
-	snd_soc_dapm_enable_pin(&codec->dapm, "VIB1OUT");
-	snd_soc_dapm_enable_pin(&codec->dapm, "VIB2OUT");
-	if (hs_switch) {
-		snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
-	} else {
-		snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
-	}
-}
-
-static int lo_get_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = lo_dac;
-	return 0;
-}
-
-static int lo_set_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
-
-	if (ucontrol->value.integer.value[0] == lo_dac)
-		return 0;
-
-	/* we dont want to work with last state of lineout so just enable all
-	 * pins and then disable pins not required
-	 */
-	lo_enable_out_pins(codec);
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		pr_debug("set vibra path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "VIB1OUT");
-		snd_soc_dapm_disable_pin(&codec->dapm, "VIB2OUT");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0);
-		break;
-
-	case 1:
-		pr_debug("set hs  path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x22);
-		break;
-
-	case 2:
-		pr_debug("set spkr path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTL");
-		snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTR");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x44);
-		break;
-
-	case 3:
-		pr_debug("set null path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTL");
-		snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTR");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x66);
-		break;
-	}
-	snd_soc_dapm_sync(&codec->dapm);
-	lo_dac = ucontrol->value.integer.value[0];
-	return 0;
-}
-
-static const struct snd_kcontrol_new mfld_snd_controls[] = {
-	SOC_ENUM_EXT("Playback Switch", headset_enum,
-			headset_get_switch, headset_set_switch),
-	SOC_ENUM_EXT("Lineout Mux", lo_enum,
-			lo_get_switch, lo_set_switch),
-};
-
-static const struct snd_soc_dapm_widget mfld_widgets[] = {
-	SND_SOC_DAPM_HP("Headphones", NULL),
-	SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mfld_map[] = {
-	{"Headphones", NULL, "HPOUTR"},
-	{"Headphones", NULL, "HPOUTL"},
-	{"Mic", NULL, "AMIC1"},
-};
-
-static void mfld_jack_check(unsigned int intr_status)
-{
-	struct mfld_jack_data jack_data;
-
-	jack_data.mfld_jack = &mfld_jack;
-	jack_data.intr_id = intr_status;
-
-	sn95031_jack_detection(&jack_data);
-	/* TODO: add american headset detection post gpiolib support */
-}
-
-static int mfld_init(struct snd_soc_pcm_runtime *runtime)
-{
-	struct snd_soc_codec *codec = runtime->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
-	int ret_val;
-
-	/* Add jack sense widgets */
-	snd_soc_dapm_new_controls(dapm, mfld_widgets, ARRAY_SIZE(mfld_widgets));
-
-	/* Set up the map */
-	snd_soc_dapm_add_routes(dapm, mfld_map, ARRAY_SIZE(mfld_map));
-
-	/* always connected */
-	snd_soc_dapm_enable_pin(dapm, "Headphones");
-	snd_soc_dapm_enable_pin(dapm, "Mic");
-
-	ret_val = snd_soc_add_codec_controls(codec, mfld_snd_controls,
-				ARRAY_SIZE(mfld_snd_controls));
-	if (ret_val) {
-		pr_err("soc_add_controls failed %d", ret_val);
-		return ret_val;
-	}
-	/* default is earpiece pin, userspace sets it explcitly */
-	snd_soc_dapm_disable_pin(dapm, "Headphones");
-	/* default is lineout NC, userspace sets it explcitly */
-	snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
-	snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
-	lo_dac = 3;
-	hs_switch = 0;
-	/* we dont use linein in this so set to NC */
-	snd_soc_dapm_disable_pin(dapm, "LINEINL");
-	snd_soc_dapm_disable_pin(dapm, "LINEINR");
-
-	/* Headset and button jack detection */
-	ret_val = snd_soc_jack_new(codec, "Intel(R) MID Audio Jack",
-			SND_JACK_HEADSET | SND_JACK_BTN_0 |
-			SND_JACK_BTN_1, &mfld_jack);
-	if (ret_val) {
-		pr_err("jack creation failed\n");
-		return ret_val;
-	}
-
-	ret_val = snd_soc_jack_add_pins(&mfld_jack,
-			ARRAY_SIZE(mfld_jack_pins), mfld_jack_pins);
-	if (ret_val) {
-		pr_err("adding jack pins failed\n");
-		return ret_val;
-	}
-	ret_val = snd_soc_jack_add_zones(&mfld_jack,
-			ARRAY_SIZE(mfld_zones), mfld_zones);
-	if (ret_val) {
-		pr_err("adding jack zones failed\n");
-		return ret_val;
-	}
-
-	/* we want to check if anything is inserted at boot,
-	 * so send a fake event to codec and it will read adc
-	 * to find if anything is there or not */
-	mfld_jack_check(MFLD_JACK_INSERT);
-	return ret_val;
-}
-
-static struct snd_soc_dai_link mfld_msic_dailink[] = {
-	{
-		.name = "Medfield Headset",
-		.stream_name = "Headset",
-		.cpu_dai_name = "Headset-cpu-dai",
-		.codec_dai_name = "SN95031 Headset",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = mfld_init,
-	},
-	{
-		.name = "Medfield Speaker",
-		.stream_name = "Speaker",
-		.cpu_dai_name = "Speaker-cpu-dai",
-		.codec_dai_name = "SN95031 Speaker",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-	{
-		.name = "Medfield Vibra",
-		.stream_name = "Vibra1",
-		.cpu_dai_name = "Vibra1-cpu-dai",
-		.codec_dai_name = "SN95031 Vibra1",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-	{
-		.name = "Medfield Haptics",
-		.stream_name = "Vibra2",
-		.cpu_dai_name = "Vibra2-cpu-dai",
-		.codec_dai_name = "SN95031 Vibra2",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-	{
-		.name = "Medfield Compress",
-		.stream_name = "Speaker",
-		.cpu_dai_name = "Compress-cpu-dai",
-		.codec_dai_name = "SN95031 Speaker",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-};
-
-/* SoC card */
-static struct snd_soc_card snd_soc_card_mfld = {
-	.name = "medfield_audio",
-	.owner = THIS_MODULE,
-	.dai_link = mfld_msic_dailink,
-	.num_links = ARRAY_SIZE(mfld_msic_dailink),
-};
-
-static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
-{
-	struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
-
-	memcpy_fromio(&mc_private->interrupt_status,
-			((void *)(mc_private->int_base)),
-			sizeof(u8));
-	return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
-{
-	struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
-
-	if (mfld_jack.codec == NULL)
-		return IRQ_HANDLED;
-	mfld_jack_check(mc_drv_ctx->interrupt_status);
-
-	return IRQ_HANDLED;
-}
-
-static int snd_mfld_mc_probe(struct platform_device *pdev)
-{
-	int ret_val = 0, irq;
-	struct mfld_mc_private *mc_drv_ctx;
-	struct resource *irq_mem;
-
-	pr_debug("snd_mfld_mc_probe called\n");
-
-	/* retrive the irq number */
-	irq = platform_get_irq(pdev, 0);
-
-	/* audio interrupt base of SRAM location where
-	 * interrupts are stored by System FW */
-	mc_drv_ctx = kzalloc(sizeof(*mc_drv_ctx), GFP_ATOMIC);
-	if (!mc_drv_ctx) {
-		pr_err("allocation failed\n");
-		return -ENOMEM;
-	}
-
-	irq_mem = platform_get_resource_byname(
-				pdev, IORESOURCE_MEM, "IRQ_BASE");
-	if (!irq_mem) {
-		pr_err("no mem resource given\n");
-		ret_val = -ENODEV;
-		goto unalloc;
-	}
-	mc_drv_ctx->int_base = ioremap_nocache(irq_mem->start,
-					resource_size(irq_mem));
-	if (!mc_drv_ctx->int_base) {
-		pr_err("Mapping of cache failed\n");
-		ret_val = -ENOMEM;
-		goto unalloc;
-	}
-	/* register for interrupt */
-	ret_val = request_threaded_irq(irq, snd_mfld_jack_intr_handler,
-			snd_mfld_jack_detection,
-			IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
-	if (ret_val) {
-		pr_err("cannot register IRQ\n");
-		goto unalloc;
-	}
-	/* register the soc card */
-	snd_soc_card_mfld.dev = &pdev->dev;
-	ret_val = snd_soc_register_card(&snd_soc_card_mfld);
-	if (ret_val) {
-		pr_debug("snd_soc_register_card failed %d\n", ret_val);
-		goto freeirq;
-	}
-	platform_set_drvdata(pdev, mc_drv_ctx);
-	pr_debug("successfully exited probe\n");
-	return ret_val;
-
-freeirq:
-	free_irq(irq, mc_drv_ctx);
-unalloc:
-	kfree(mc_drv_ctx);
-	return ret_val;
-}
-
-static int snd_mfld_mc_remove(struct platform_device *pdev)
-{
-	struct mfld_mc_private *mc_drv_ctx = platform_get_drvdata(pdev);
-
-	pr_debug("snd_mfld_mc_remove called\n");
-	free_irq(platform_get_irq(pdev, 0), mc_drv_ctx);
-	snd_soc_unregister_card(&snd_soc_card_mfld);
-	kfree(mc_drv_ctx);
-	platform_set_drvdata(pdev, NULL);
-	return 0;
-}
-
-static struct platform_driver snd_mfld_mc_driver = {
-	.driver = {
-		.owner = THIS_MODULE,
-		.name = "msic_audio",
-	},
-	.probe = snd_mfld_mc_probe,
-	.remove = snd_mfld_mc_remove,
-};
-
-module_platform_driver(snd_mfld_mc_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msic-audio");
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index 1358c7d..d0740a7 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -128,7 +128,9 @@
 		    substream->runtime &&
 		    snd_pcm_running(substream)) {
 			dev_dbg(pcm->dev, "xrun\n");
+			snd_pcm_stream_lock(substream);
 			snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+			snd_pcm_stream_unlock(substream);
 			ret = IRQ_HANDLED;
 		}
 
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 06a8000..4c85bb8 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -24,6 +24,7 @@
 #include <sound/compress_driver.h>
 #include <sound/soc.h>
 #include <sound/initval.h>
+#include <sound/soc-dpcm.h>
 
 static int soc_compr_open(struct snd_compr_stream *cstream)
 {
@@ -75,6 +76,98 @@
 	return ret;
 }
 
+static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+	struct snd_soc_dai *codec_dai = fe->codec_dai;
+	struct snd_soc_dpcm *dpcm;
+	struct snd_soc_dapm_widget_list *list;
+	int stream;
+	int ret = 0;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
+		ret = platform->driver->compr_ops->open(cstream);
+		if (ret < 0) {
+			pr_err("compress asoc: can't open platform %s\n", platform->name);
+			goto out;
+		}
+	}
+
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->startup) {
+		ret = fe->dai_link->compr_ops->startup(cstream);
+		if (ret < 0) {
+			pr_err("compress asoc: %s startup failed\n", fe->dai_link->name);
+			goto machine_err;
+		}
+	}
+
+	fe->dpcm[stream].runtime = fe_substream->runtime;
+
+	if (dpcm_path_get(fe, stream, &list) <= 0) {
+		dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
+			fe->dai_link->name, stream ? "capture" : "playback");
+	}
+
+	/* calculate valid and active FE <-> BE dpcms */
+	dpcm_process_paths(fe, stream, &list, 1);
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_startup(fe, stream);
+	if (ret < 0) {
+		/* clean up all links */
+		list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+			dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+		dpcm_be_disconnect(fe, stream);
+		fe->dpcm[stream].runtime = NULL;
+		goto fe_err;
+	}
+
+	dpcm_clear_pending_state(fe, stream);
+	dpcm_path_put(&list);
+
+	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		cpu_dai->playback_active++;
+		codec_dai->playback_active++;
+	} else {
+		cpu_dai->capture_active++;
+		codec_dai->capture_active++;
+	}
+
+	cpu_dai->active++;
+	codec_dai->active++;
+	fe->codec->active++;
+
+	mutex_unlock(&fe->card->mutex);
+
+	return 0;
+
+fe_err:
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
+		fe->dai_link->compr_ops->shutdown(cstream);
+machine_err:
+	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+		platform->driver->compr_ops->free(cstream);
+out:
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+	mutex_unlock(&fe->card->mutex);
+	return ret;
+}
+
 /*
  * Power down the audio subsystem pmdown_time msecs after close is called.
  * This is to ensure there are no pops or clicks in between any music tracks
@@ -88,18 +181,17 @@
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
-	dev_dbg(rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
-		 codec_dai->driver->playback.stream_name,
-		 codec_dai->playback_active ? "active" : "inactive",
-		 rtd->pop_wait ? "yes" : "no");
+	dev_dbg (rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
+			codec_dai->driver->playback.stream_name,
+			codec_dai->playback_active ? "active" : "inactive",
+			rtd->pop_wait ? "yes" : "no");
 
 	/* are we waiting on this codec DAI stream */
-	if (rtd->pop_wait == 1) {
+	if (rtd->pop_wait == 1 && !codec_dai->active) {
 		rtd->pop_wait = 0;
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
 					  SND_SOC_DAPM_STREAM_STOP);
 	}
-
 	mutex_unlock(&rtd->pcm_mutex);
 }
 
@@ -120,8 +212,8 @@
 		cpu_dai->capture_active--;
 		codec_dai->capture_active--;
 	}
-
-	snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
+	if (!codec_dai->playback_active)
+		snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
 
 	cpu_dai->active--;
 	codec_dai->active--;
@@ -133,7 +225,6 @@
 	if (!codec_dai->active)
 		codec_dai->rate = 0;
 
-
 	if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->shutdown)
 		rtd->dai_link->compr_ops->shutdown(cstream);
 
@@ -141,7 +232,8 @@
 		platform->driver->compr_ops->free(cstream);
 	cpu_dai->runtime = NULL;
 
-	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+	if (cstream->direction == SND_COMPRESS_PLAYBACK
+				&& !codec_dai->playback_active) {
 		if (!rtd->pmdown_time || codec->ignore_pmdown_time ||
 		    rtd->dai_link->ignore_pmdown_time) {
 			snd_soc_dapm_stream_event(rtd,
@@ -152,7 +244,8 @@
 			schedule_delayed_work(&rtd->delayed_work,
 				msecs_to_jiffies(rtd->pmdown_time));
 		}
-	} else {
+	} else if (cstream->direction == SND_COMPRESS_CAPTURE
+					&& !codec_dai->capture_active) {
 		/* capture streams can be powered down now */
 		snd_soc_dapm_stream_event(rtd,
 			SNDRV_PCM_STREAM_CAPTURE,
@@ -163,33 +256,126 @@
 	return 0;
 }
 
+static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+	struct snd_soc_dai *codec_dai = fe->codec_dai;
+	struct snd_soc_dpcm *dpcm;
+	int stream, ret;
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+		cpu_dai->playback_active--;
+		codec_dai->playback_active--;
+	} else {
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+		cpu_dai->capture_active--;
+		codec_dai->capture_active--;
+	}
+
+	cpu_dai->active--;
+	codec_dai->active--;
+	fe->codec->active--;
+
+	snd_soc_dai_digital_mute(cpu_dai, 1, stream);
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_hw_free(fe, stream);
+	if (ret < 0)
+		dev_err(fe->dev, "compressed hw_free failed %d\n", ret);
+
+	ret = dpcm_be_dai_shutdown(fe, stream);
+
+	/* mark FE's links ready to prune */
+	list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+		dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+	else
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+
+	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+
+	dpcm_be_disconnect(fe, stream);
+
+	fe->dpcm[stream].runtime = NULL;
+
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
+		fe->dai_link->compr_ops->shutdown(cstream);
+
+	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+		platform->driver->compr_ops->free(cstream);
+
+	mutex_unlock(&fe->card->mutex);
+	return 0;
+}
+
 static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
 {
 
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	int ret = 0;
 
-	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
-
 	if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
 		ret = platform->driver->compr_ops->trigger(cstream, cmd);
 		if (ret < 0)
 			goto out;
 	}
 
+out:
+	return ret;
+}
+
+static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_soc_platform *platform = fe->platform;
+	int ret = 0, stream;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
+		ret = platform->driver->compr_ops->trigger(cstream, cmd);
+		if (ret < 0)
+			goto out;
+	}
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_trigger(fe, stream, cmd);
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
-		snd_soc_dai_digital_mute(codec_dai, 0, cstream->direction);
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
-		snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
 		break;
 	}
 
 out:
-	mutex_unlock(&rtd->pcm_mutex);
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+	mutex_unlock(&fe->card->mutex);
 	return ret;
 }
 
@@ -220,26 +406,90 @@
 			goto err;
 	}
 
-	if (cstream->direction == SND_COMPRESS_PLAYBACK)
-		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
-					SND_SOC_DAPM_STREAM_START);
-	else
-		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
-					SND_SOC_DAPM_STREAM_START);
-
 	/* cancel any delayed stream shutdown that is pending */
-	rtd->pop_wait = 0;
-	mutex_unlock(&rtd->pcm_mutex);
+	if (cstream->direction == SND_COMPRESS_PLAYBACK
+				 && rtd->pop_wait) {
+		rtd->pop_wait = 0;
+		cancel_delayed_work(&rtd->delayed_work);
+	}
 
-	cancel_delayed_work_sync(&rtd->delayed_work);
+	snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
+					SND_SOC_DAPM_STREAM_START);
 
-	return ret;
-
+	snd_soc_dai_digital_mute(rtd->codec_dai, 0, cstream->direction);
 err:
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
 }
 
+static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
+					struct snd_compr_params *params)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+
+	struct snd_pcm_hw_params *hw_params;
+	int ret = 0, stream;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+	hw_params = kzalloc(sizeof(*hw_params), GFP_KERNEL);
+	if (hw_params == NULL)
+		return -ENOMEM;
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	/* first we call set_params for the platform driver
+	 * this should configure the soc side
+	 * if the machine has compressed ops then we call that as well
+	 * expectation is that platform and machine will configure everything
+	 * for this compress path, like configuring pcm port for codec
+	 */
+	if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
+		ret = platform->driver->compr_ops->set_params(cstream, params);
+		if (ret < 0)
+			goto out;
+	}
+
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->set_params) {
+		ret = fe->dai_link->compr_ops->set_params(cstream);
+		if (ret < 0)
+			goto out;
+	}
+
+	memcpy(&fe->dpcm[fe_substream->stream].hw_params, params,
+			sizeof(struct snd_pcm_hw_params));
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_hw_params(fe, stream);
+	if (ret < 0)
+		goto out;
+
+	ret = dpcm_be_dai_prepare(fe, stream);
+	if (ret < 0)
+		goto out;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+	else
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+
+	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+
+	snd_soc_dai_digital_mute(cpu_dai, 0, stream);
+
+out:
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+	mutex_unlock(&fe->card->mutex);
+	return ret;
+}
+
 static int soc_compr_get_params(struct snd_compr_stream *cstream,
 					struct snd_codec *params)
 {
@@ -359,6 +609,7 @@
 
 	return ret;
 }
+
 /* ASoC Compress operations */
 static struct snd_compr_ops soc_compr_ops = {
 	.open		= soc_compr_open,
@@ -374,6 +625,21 @@
 	.get_codec_caps = soc_compr_get_codec_caps
 };
 
+/* ASoC Dynamic Compress operations */
+static struct snd_compr_ops soc_compr_dyn_ops = {
+	.open		= soc_compr_open_fe,
+	.free		= soc_compr_free_fe,
+	.set_params	= soc_compr_set_params_fe,
+	.get_params	= soc_compr_get_params,
+	.set_metadata   = sst_compr_set_metadata,
+	.get_metadata	= sst_compr_get_metadata,
+	.trigger	= soc_compr_trigger_fe,
+	.pointer	= soc_compr_pointer,
+	.ack		= soc_compr_ack,
+	.get_caps	= soc_compr_get_caps,
+	.get_codec_caps = soc_compr_get_codec_caps
+};
+
 /* create a new compress */
 int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 {
@@ -382,6 +648,7 @@
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	struct snd_compr *compr;
+	struct snd_pcm *be_pcm;
 	char new_name[64];
 	int ret = 0, direction = 0;
 
@@ -409,7 +676,26 @@
 		ret = -ENOMEM;
 		goto compr_err;
 	}
-	memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
+
+	if (rtd->dai_link->dynamic) {
+		snprintf(new_name, sizeof(new_name), "(%s)",
+			rtd->dai_link->stream_name);
+
+		ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
+				1, 0, &be_pcm);
+		if (ret < 0) {
+			dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
+				rtd->dai_link->name);
+			goto compr_err;
+		}
+
+		rtd->pcm = be_pcm;
+		rtd->fe_compr = 1;
+		be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+		/*be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;*/
+		memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
+	} else
+		memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
 
 	/* Add copy callback for not memory mapped DSPs */
 	if (platform->driver->compr_ops && platform->driver->compr_ops->copy)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d56bbea..210edbf 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -530,6 +530,15 @@
 }
 #endif
 
+static void codec2codec_close_delayed_work(struct work_struct *work)
+{
+	/* Currently nothing to do for c2c links
+	 * Since c2c links are internal nodes in the DAPM graph and
+	 * don't interface with the outside world or application layer
+	 * we don't have to do any special handling on close.
+	 */
+}
+
 #ifdef CONFIG_PM_SLEEP
 /* powers down audio subsystem for suspend */
 int snd_soc_suspend(struct device *dev)
@@ -1163,6 +1172,11 @@
 		if (dai->dev != platform->dev)
 			continue;
 
+		/* dummy platform doesn't have and DAIs, don't add dummy-codec
+		 * widgets here (since dev is the same)
+		 */
+		if (!strcmp(dai->name, "snd-soc-dummy-dai"))
+			continue;
 		snd_soc_dapm_new_dai_widgets(&platform->dapm, dai);
 	}
 
@@ -1224,9 +1238,6 @@
 	}
 	rtd->card = card;
 
-	/* Make sure all DAPM widgets are instantiated */
-	snd_soc_dapm_new_widgets(&codec->dapm);
-
 	/* machine controls, routes and widgets are not prefixed */
 	temp = codec->name_prefix;
 	codec->name_prefix = NULL;
@@ -1362,7 +1373,6 @@
 				return -ENODEV;
 
 			list_add(&cpu_dai->dapm.list, &card->dapm_list);
-			snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
 		}
 
 		if (cpu_dai->driver->probe) {
@@ -1429,9 +1439,18 @@
 				return ret;
 			}
 		} else {
+			INIT_DELAYED_WORK(&rtd->delayed_work,
+						codec2codec_close_delayed_work);
+
 			/* link the DAI widgets */
-			play_w = codec_dai->playback_widget;
-			capture_w = cpu_dai->capture_widget;
+			if (!dai_link->dsp_loopback) {
+				play_w = codec_dai->playback_widget;
+				capture_w = cpu_dai->capture_widget;
+			} else {
+				play_w = codec_dai->playback_widget;
+				capture_w = cpu_dai->playback_widget;
+			}
+
 			if (play_w && capture_w) {
 				ret = snd_soc_dapm_new_pcm(card, dai_link->params,
 						   capture_w, play_w);
@@ -1442,8 +1461,14 @@
 				}
 			}
 
-			play_w = cpu_dai->playback_widget;
-			capture_w = codec_dai->capture_widget;
+			if (!dai_link->dsp_loopback) {
+				play_w = cpu_dai->playback_widget;
+				capture_w = codec_dai->capture_widget;
+			} else {
+				play_w = cpu_dai->capture_widget;
+				capture_w = codec_dai->capture_widget;
+			}
+
 			if (play_w && capture_w) {
 				ret = snd_soc_dapm_new_pcm(card, dai_link->params,
 						   capture_w, play_w);
@@ -1717,8 +1742,6 @@
 		snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
 					card->num_dapm_routes);
 
-	snd_soc_dapm_new_widgets(&card->dapm);
-
 	for (i = 0; i < card->num_links; i++) {
 		dai_link = &card->dai_link[i];
 		dai_fmt = dai_link->dai_fmt;
@@ -2812,7 +2835,8 @@
 		val2 = (ucontrol->value.integer.value[1] + min) & mask;
 		val2 = val2 << rshift;
 
-		if (snd_soc_update_bits_locked(codec, reg2, val_mask, val2))
+		err = snd_soc_update_bits_locked(codec, reg2, val_mask, val2);
+		if (err < 0)
 			return err;
 	}
 	return 0;
@@ -3174,6 +3198,18 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
 
+int snd_soc_info_bytes_ext(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *ucontrol)
+{
+	struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+
+	ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	ucontrol->count = params->max;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_bytes_ext);
+
 /**
  * snd_soc_info_xr_sx - signed multi register info callback
  * @kcontrol: mreg control
@@ -4262,6 +4298,23 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
 
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_soc_register_effect(struct snd_soc_card *card,
+				struct snd_effect_ops *ops)
+{
+	return snd_effect_register(card->snd_card, ops);
+
+}
+EXPORT_SYMBOL_GPL(snd_soc_register_effect);
+
+int snd_soc_unregister_effect(struct snd_soc_card *card)
+{
+	return snd_effect_deregister(card->snd_card);
+
+}
+EXPORT_SYMBOL_GPL(snd_soc_unregister_effect);
+#endif
+
 /* Retrieve a card's name from device tree */
 int snd_soc_of_parse_card_name(struct snd_soc_card *card,
 			       const char *propname)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c7051c4..1f8d19a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -64,6 +64,7 @@
 	[snd_soc_dapm_virt_mux] = 5,
 	[snd_soc_dapm_value_mux] = 5,
 	[snd_soc_dapm_dac] = 6,
+	[snd_soc_dapm_switch] = 7,
 	[snd_soc_dapm_mixer] = 7,
 	[snd_soc_dapm_mixer_named_ctl] = 7,
 	[snd_soc_dapm_pga] = 8,
@@ -83,6 +84,7 @@
 	[snd_soc_dapm_line] = 2,
 	[snd_soc_dapm_out_drv] = 2,
 	[snd_soc_dapm_pga] = 4,
+	[snd_soc_dapm_switch] = 5,
 	[snd_soc_dapm_mixer_named_ctl] = 5,
 	[snd_soc_dapm_mixer] = 5,
 	[snd_soc_dapm_dac] = 6,
@@ -172,6 +174,17 @@
 	return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
 }
 
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ */
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	return wlist->widgets[0]->codec;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_codec);
+
 /* get snd_card from DAPM context */
 static inline struct snd_card *dapm_get_snd_card(
 	struct snd_soc_dapm_context *dapm)
@@ -682,13 +695,14 @@
 		return -EINVAL;
 	}
 
-	path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
-				list_sink);
-	if (!path) {
+	if (list_empty(&w->sources)) {
 		dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
 		return -EINVAL;
 	}
 
+	path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
+				list_sink);
+
 	ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
 	if (ret < 0)
 		return ret;
@@ -885,9 +899,7 @@
 			path->walking = 0;
 		}
 	}
-
 	widget->outputs = con;
-
 	return con;
 }
 
@@ -992,9 +1004,7 @@
 			path->walking = 0;
 		}
 	}
-
 	widget->inputs = con;
-
 	return con;
 }
 
@@ -1796,7 +1806,7 @@
 				w->active ? "active" : "inactive");
 
 	list_for_each_entry(p, &w->sources, list_sink) {
-		if (p->connected && !p->connected(w, p->sink))
+		if (p->connected && !p->connected(w, p->source))
 			continue;
 
 		if (p->connect)
@@ -3260,6 +3270,28 @@
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
+		if (source->driver->ops && source->driver->ops->startup) {
+			substream.stream = SNDRV_PCM_STREAM_CAPTURE;
+			ret = source->driver->ops->startup(&substream, source);
+			if (ret != 0) {
+				dev_err(source->dev,
+					"ASoC: startup() failed: %d\n", ret);
+				goto out;
+			}
+			source->active++;
+		}
+
+		if (sink->driver->ops && sink->driver->ops->startup) {
+			substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+			ret = sink->driver->ops->startup(&substream, sink);
+			if (ret != 0) {
+				dev_err(sink->dev,
+					"ASoC: startup() failed: %d\n", ret);
+				goto out;
+			}
+			sink->active++;
+		}
+
 		if (source->driver->ops && source->driver->ops->hw_params) {
 			substream.stream = SNDRV_PCM_STREAM_CAPTURE;
 			ret = source->driver->ops->hw_params(&substream,
@@ -3297,6 +3329,18 @@
 		if (ret != 0 && ret != -ENOTSUPP)
 			dev_warn(sink->dev, "ASoC: Failed to mute: %d\n", ret);
 		ret = 0;
+
+		source->active--;
+		if (source->driver->ops && source->driver->ops->shutdown) {
+			substream.stream = SNDRV_PCM_STREAM_CAPTURE;
+			source->driver->ops->shutdown(&substream, source);
+		}
+
+		sink->active--;
+		if (sink->driver->ops && sink->driver->ops->shutdown) {
+			substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+			sink->driver->ops->shutdown(&substream, sink);
+		}
 		break;
 
 	default:
@@ -3439,7 +3483,7 @@
 				break;
 			}
 
-			if (!w->sname)
+			if (!w->sname || !strstr(w->sname, dai_w->name))
 				continue;
 
 			if (dai->driver->playback.stream_name &&
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 0bb5cccd..9fd4ed8 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -65,6 +65,7 @@
 	struct snd_soc_codec *codec;
 	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_jack_pin *pin;
+	unsigned int sync = 0;
 	int enable;
 
 	trace_snd_soc_jack_report(jack, mask, status);
@@ -92,12 +93,16 @@
 			snd_soc_dapm_enable_pin(dapm, pin->pin);
 		else
 			snd_soc_dapm_disable_pin(dapm, pin->pin);
+
+		/* we need to sync for this case only */
+		sync = 1;
 	}
 
 	/* Report before the DAPM sync to help users updating micbias status */
 	blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
 
-	snd_soc_dapm_sync(dapm);
+	if (sync)
+		snd_soc_dapm_sync(dapm);
 
 	snd_jack_report(jack->jack, jack->status);
 
@@ -318,10 +323,13 @@
 		INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
 		gpios[i].jack = jack;
 
+		if (!gpios[i].irq_flags)
+			gpios[i].irq_flags =
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
 		ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
 					      gpio_handler,
-					      IRQF_TRIGGER_RISING |
-					      IRQF_TRIGGER_FALLING,
+					      gpios[i].irq_flags,
 					      gpios[i].name,
 					      &gpios[i]);
 		if (ret < 0)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index ccb6be4..fc27d27 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -34,7 +34,7 @@
 #define DPCM_MAX_BE_USERS	8
 
 /* DPCM stream event, send event to FE and all active BEs. */
-static int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
 	int event)
 {
 	struct snd_soc_dpcm *dpcm;
@@ -337,7 +337,7 @@
 		 rtd->pop_wait ? "yes" : "no");
 
 	/* are we waiting on this codec DAI stream */
-	if (rtd->pop_wait == 1) {
+	if (rtd->pop_wait == 1 && !codec_dai->active) {
 		rtd->pop_wait = 0;
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
 					  SND_SOC_DAPM_STREAM_STOP);
@@ -383,7 +383,10 @@
 	/* Muting the DAC suppresses artifacts caused during digital
 	 * shutdown, for example from stopping clocks.
 	 */
-	snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
+	if (!codec_dai->playback_active)
+		snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
+
+	snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
 
 	if (cpu_dai->driver->ops->shutdown)
 		cpu_dai->driver->ops->shutdown(substream, cpu_dai);
@@ -398,7 +401,8 @@
 		platform->driver->ops->close(substream);
 	cpu_dai->runtime = NULL;
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+			&& !codec_dai->playback_active) {
 		if (!rtd->pmdown_time || codec->ignore_pmdown_time ||
 		    rtd->dai_link->ignore_pmdown_time) {
 			/* powered down playback stream now */
@@ -411,14 +415,14 @@
 			schedule_delayed_work(&rtd->delayed_work,
 				msecs_to_jiffies(rtd->pmdown_time));
 		}
-	} else {
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE
+			&& !codec_dai->capture_active) {
 		/* capture streams can be powered down now */
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
 					  SND_SOC_DAPM_STREAM_STOP);
 	}
 
 	mutex_unlock(&rtd->pcm_mutex);
-
 	pm_runtime_put(platform->dev);
 	pm_runtime_put(codec_dai->dev);
 	pm_runtime_put(cpu_dai->dev);
@@ -488,6 +492,7 @@
 			SND_SOC_DAPM_STREAM_START);
 
 	snd_soc_dai_digital_mute(codec_dai, 0, substream->stream);
+	snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
 
 out:
 	mutex_unlock(&rtd->pcm_mutex);
@@ -757,7 +762,7 @@
 }
 
 /* disconnect a BE and FE */
-static void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
+void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm, *d;
 
@@ -853,7 +858,7 @@
 	return 0;
 }
 
-static int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
+int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
 	int stream, struct snd_soc_dapm_widget_list **list_)
 {
 	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
@@ -875,11 +880,6 @@
 	return paths;
 }
 
-static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
-{
-	kfree(*list);
-}
-
 static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
 	struct snd_soc_dapm_widget_list **list_)
 {
@@ -949,7 +949,7 @@
 			continue;
 
 		/* don't connect if FE is not running */
-		if (!fe->dpcm[stream].runtime)
+		if (!fe->dpcm[stream].runtime && !fe->fe_compr)
 			continue;
 
 		/* newly connected FE and BE */
@@ -974,7 +974,7 @@
  * Find the corresponding BE DAIs that source or sink audio to this
  * FE substream.
  */
-static int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
+int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
 	int stream, struct snd_soc_dapm_widget_list **list, int new)
 {
 	if (new)
@@ -983,7 +983,7 @@
 		return dpcm_prune_paths(fe, stream, list);
 }
 
-static void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
+void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 
@@ -1021,7 +1021,7 @@
 	}
 }
 
-static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 	int err, count = 0;
@@ -1149,7 +1149,6 @@
 	}
 
 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
-
 	dpcm_set_fe_runtime(fe_substream);
 	snd_pcm_limit_hw_rates(runtime);
 
@@ -1163,7 +1162,7 @@
 	return ret;
 }
 
-static int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 
@@ -1224,7 +1223,7 @@
 	return 0;
 }
 
-static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 
@@ -1289,7 +1288,7 @@
 	return 0;
 }
 
-static int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 	int ret;
@@ -1395,7 +1394,7 @@
 	if (ret < 0) {
 		dev_err(fe->dev,"ASoC: hw_params FE failed %d\n", ret);
 		dpcm_be_dai_hw_free(fe, stream);
-	 } else
+	} else
 		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
 
 out:
@@ -1419,7 +1418,7 @@
 	return ret;
 }
 
-static int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
+int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
 			       int cmd)
 {
 	struct snd_soc_dpcm *dpcm;
@@ -1511,7 +1510,6 @@
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
 
 static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
 {
@@ -1587,7 +1585,7 @@
 	return ret;
 }
 
-static int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 	int ret = 0;
@@ -2011,17 +2009,36 @@
 	int ret = 0, playback = 0, capture = 0;
 
 	if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) {
-		if (cpu_dai->driver->playback.channels_min)
-			playback = 1;
-		if (cpu_dai->driver->capture.channels_min)
-			capture = 1;
+		if (cpu_dai->driver->playback.channels_min) {
+			if (rtd->dai_link->playback_count)
+				playback = rtd->dai_link->playback_count;
+			else
+				playback = 1;
+		}
+
+		if (cpu_dai->driver->capture.channels_min) {
+			if (rtd->dai_link->capture_count)
+				capture = rtd->dai_link->capture_count;
+			else
+				capture = 1;
+		}
 	} else {
 		if (codec_dai->driver->playback.channels_min &&
-		    cpu_dai->driver->playback.channels_min)
-			playback = 1;
+		    cpu_dai->driver->playback.channels_min) {
+			if (rtd->dai_link->playback_count)
+				playback = rtd->dai_link->playback_count;
+			else
+				playback = 1;
+		}
+
 		if (codec_dai->driver->capture.channels_min &&
-		    cpu_dai->driver->capture.channels_min)
-			capture = 1;
+		    cpu_dai->driver->capture.channels_min) {
+			if (rtd->dai_link->capture_count)
+				capture = rtd->dai_link->capture_count;
+			else
+				capture = 1;
+		}
+
 	}
 
 	/* create the PCM */
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 4b3be6c..8ecf448 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -75,7 +75,11 @@
 
 static int dummy_dma_open(struct snd_pcm_substream *substream)
 {
-	snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+	/* BE's dont need dummy params */
+	if (!rtd->dai_link->no_pcm)
+		snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
 
 	return 0;
 }
@@ -89,7 +93,28 @@
 	.ops = &dummy_dma_ops,
 };
 
-static struct snd_soc_codec_driver dummy_codec;
+static struct snd_soc_dapm_widget dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("Dummy Input"),
+	SND_SOC_DAPM_OUTPUT("Dummy Output"),
+};
+
+static struct snd_soc_dapm_route intercon[] = {
+	{ "Dummy Output", NULL, "Dummy Playback"},
+	{ "Dummy Capture", NULL, "Dummy Input"},
+};
+
+static int dummy_codec_probe(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
+			ARRAY_SIZE(dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+	return 0;
+}
+
+static struct snd_soc_codec_driver dummy_codec = {
+	.probe = dummy_codec_probe,
+};
 
 #define STUB_RATES	SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS	(SNDRV_PCM_FMTBIT_S8 | \
@@ -101,17 +126,18 @@
 			SNDRV_PCM_FMTBIT_S32_LE | \
 			SNDRV_PCM_FMTBIT_U32_LE | \
 			SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
 static struct snd_soc_dai_driver dummy_dai = {
 	.name = "snd-soc-dummy-dai",
 	.playback = {
-		.stream_name	= "Playback",
+		.stream_name	= "Dummy Playback",
 		.channels_min	= 1,
 		.channels_max	= 384,
 		.rates		= STUB_RATES,
 		.formats	= STUB_FORMATS,
 	},
 	.capture = {
-		.stream_name	= "Capture",
+		.stream_name	= "Dummy Capture",
 		.channels_min	= 1,
 		.channels_max	= 384,
 		.rates = STUB_RATES,
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index 2f70ea7..05676c0 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -399,9 +399,9 @@
 	ac97->capture_dma_data.slave_id = of_dma[1];
 
 	ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1;
-	ac97->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	ac97->capture_dma_data.maxburst = 4;
-	ac97->capture_dma_data.slave_id = of_dma[0];
+	ac97->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ac97->playback_dma_data.maxburst = 4;
+	ac97->playback_dma_data.slave_id = of_dma[1];
 
 	ret = snd_soc_register_component(&pdev->dev, &tegra20_ac97_component,
 					 &tegra20_ac97_dai, 1);
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 5eaa12c..551b3c9 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -323,8 +323,8 @@
 	}
 
 	spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
-	spdif->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	spdif->capture_dma_data.maxburst = 4;
+	spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	spdif->playback_dma_data.maxburst = 4;
 	spdif->playback_dma_data.slave_id = dmareq->start;
 
 	pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 31d092d..a5432b1 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@
 		reg = TEGRA30_I2S_CIF_RX_CTRL;
 	} else {
 		val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
-		reg = TEGRA30_I2S_CIF_RX_CTRL;
+		reg = TEGRA30_I2S_CIF_TX_CTRL;
 	}
 
 	regmap_write(i2s->regmap, reg, val);
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
index 9e6e3ff..23452ee 100644
--- a/sound/usb/6fire/comm.c
+++ b/sound/usb/6fire/comm.c
@@ -110,19 +110,37 @@
 static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
 		u8 reg, u8 value)
 {
-	u8 buffer[13]; /* 13: maximum length of message */
+	u8 *buffer;
+	int ret;
+
+	/* 13: maximum length of message */
+	buffer = kmalloc(13, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
 
 	usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
-	return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+	ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+	kfree(buffer);
+	return ret;
 }
 
 static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
 		u8 reg, u8 vl, u8 vh)
 {
-	u8 buffer[13]; /* 13: maximum length of message */
+	u8 *buffer;
+	int ret;
+
+	/* 13: maximum length of message */
+	buffer = kmalloc(13, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
 
 	usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
-	return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+	ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+	kfree(buffer);
+	return ret;
 }
 
 int usb6fire_comm_init(struct sfire_chip *chip)
@@ -135,6 +153,12 @@
 	if (!rt)
 		return -ENOMEM;
 
+	rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
+	if (!rt->receiver_buffer) {
+		kfree(rt);
+		return -ENOMEM;
+	}
+
 	urb = &rt->receiver;
 	rt->serial = 1;
 	rt->chip = chip;
@@ -153,6 +177,7 @@
 	urb->interval = 1;
 	ret = usb_submit_urb(urb, GFP_KERNEL);
 	if (ret < 0) {
+		kfree(rt->receiver_buffer);
 		kfree(rt);
 		snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
 		return ret;
@@ -171,6 +196,9 @@
 
 void usb6fire_comm_destroy(struct sfire_chip *chip)
 {
-	kfree(chip->comm);
+	struct comm_runtime *rt = chip->comm;
+
+	kfree(rt->receiver_buffer);
+	kfree(rt);
 	chip->comm = NULL;
 }
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
index 6a0840b..780d5ed 100644
--- a/sound/usb/6fire/comm.h
+++ b/sound/usb/6fire/comm.h
@@ -24,7 +24,7 @@
 	struct sfire_chip *chip;
 
 	struct urb receiver;
-	u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
+	u8 *receiver_buffer;
 
 	u8 serial; /* urb serial */
 
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 2672242..f3dd726 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
 #include "chip.h"
 #include "comm.h"
 
+enum {
+	MIDI_BUFSIZE = 64
+};
+
 static void usb6fire_midi_out_handler(struct urb *urb)
 {
 	struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@
 	if (!rt)
 		return -ENOMEM;
 
+	rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
+	if (!rt->out_buffer) {
+		kfree(rt);
+		return -ENOMEM;
+	}
+
 	rt->chip = chip;
 	rt->in_received = usb6fire_midi_in_received;
 	rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@
 
 	ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
 	if (ret < 0) {
+		kfree(rt->out_buffer);
 		kfree(rt);
 		snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
 		return ret;
@@ -197,6 +208,9 @@
 
 void usb6fire_midi_destroy(struct sfire_chip *chip)
 {
-	kfree(chip->midi);
+	struct midi_runtime *rt = chip->midi;
+
+	kfree(rt->out_buffer);
+	kfree(rt);
 	chip->midi = NULL;
 }
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006..84851b9 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
 
 #include "common.h"
 
-enum {
-	MIDI_BUFSIZE = 64
-};
-
 struct midi_runtime {
 	struct sfire_chip *chip;
 	struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@
 	struct snd_rawmidi_substream *out;
 	struct urb out_urb;
 	u8 out_serial; /* serial number of out packet */
-	u8 out_buffer[MIDI_BUFSIZE];
+	u8 *out_buffer;
 	int buffer_offset;
 
 	void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 40dd50a..25f9e61 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -543,7 +543,7 @@
 	snd_pcm_uframes_t ret;
 
 	if (rt->panic || !sub)
-		return SNDRV_PCM_STATE_XRUN;
+		return SNDRV_PCM_POS_XRUN;
 
 	spin_lock_irqsave(&sub->lock, flags);
 	ret = sub->dma_off;
@@ -580,6 +580,33 @@
 	urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
 }
 
+static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
+{
+	int i;
+
+	for (i = 0; i < PCM_N_URBS; i++) {
+		rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+				* PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+		if (!rt->out_urbs[i].buffer)
+			return -ENOMEM;
+		rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+				* PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+		if (!rt->in_urbs[i].buffer)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
+{
+	int i;
+
+	for (i = 0; i < PCM_N_URBS; i++) {
+		kfree(rt->out_urbs[i].buffer);
+		kfree(rt->in_urbs[i].buffer);
+	}
+}
+
 int usb6fire_pcm_init(struct sfire_chip *chip)
 {
 	int i;
@@ -591,6 +618,13 @@
 	if (!rt)
 		return -ENOMEM;
 
+	ret = usb6fire_pcm_buffers_init(rt);
+	if (ret) {
+		usb6fire_pcm_buffers_destroy(rt);
+		kfree(rt);
+		return ret;
+	}
+
 	rt->chip = chip;
 	rt->stream_state = STREAM_DISABLED;
 	rt->rate = ARRAY_SIZE(rates);
@@ -612,6 +646,7 @@
 
 	ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
 	if (ret < 0) {
+		usb6fire_pcm_buffers_destroy(rt);
 		kfree(rt);
 		snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
 		return ret;
@@ -627,6 +662,7 @@
 			snd_dma_continuous_data(GFP_KERNEL),
 			MAX_BUFSIZE, MAX_BUFSIZE);
 	if (ret) {
+		usb6fire_pcm_buffers_destroy(rt);
 		kfree(rt);
 		snd_printk(KERN_ERR PREFIX
 				"error preallocating pcm buffers.\n");
@@ -641,17 +677,25 @@
 void usb6fire_pcm_abort(struct sfire_chip *chip)
 {
 	struct pcm_runtime *rt = chip->pcm;
+	unsigned long flags;
 	int i;
 
 	if (rt) {
 		rt->panic = true;
 
-		if (rt->playback.instance)
+		if (rt->playback.instance) {
+			snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
 			snd_pcm_stop(rt->playback.instance,
 					SNDRV_PCM_STATE_XRUN);
-		if (rt->capture.instance)
+			snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
+		}
+
+		if (rt->capture.instance) {
+			snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
 			snd_pcm_stop(rt->capture.instance,
 					SNDRV_PCM_STATE_XRUN);
+			snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
+		}
 
 		for (i = 0; i < PCM_N_URBS; i++) {
 			usb_poison_urb(&rt->in_urbs[i].instance);
@@ -663,6 +707,9 @@
 
 void usb6fire_pcm_destroy(struct sfire_chip *chip)
 {
-	kfree(chip->pcm);
+	struct pcm_runtime *rt = chip->pcm;
+
+	usb6fire_pcm_buffers_destroy(rt);
+	kfree(rt);
 	chip->pcm = NULL;
 }
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133..f5779d6 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@
 	struct urb instance;
 	struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
 	/* END DO NOT SEPARATE */
-	u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
+	u8 *buffer;
 
 	struct pcm_urb *peer;
 };
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7a444b5..659950e 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -591,17 +591,16 @@
 	ep->stride = frame_bits >> 3;
 	ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
 
-	/* calculate max. frequency */
-	if (ep->maxpacksize) {
+	/* assume max. frequency is 25% higher than nominal */
+	ep->freqmax = ep->freqn + (ep->freqn >> 2);
+	maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
+				>> (16 - ep->datainterval);
+	/* but wMaxPacketSize might reduce this */
+	if (ep->maxpacksize && ep->maxpacksize < maxsize) {
 		/* whatever fits into a max. size packet */
 		maxsize = ep->maxpacksize;
 		ep->freqmax = (maxsize / (frame_bits >> 3))
 				<< (16 - ep->datainterval);
-	} else {
-		/* no max. packet size: just take 25% higher than nominal */
-		ep->freqmax = ep->freqn + (ep->freqn >> 2);
-		maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
-				>> (16 - ep->datainterval);
 	}
 
 	if (ep->fill_max)
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index 6ad617b..76d8329 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -613,14 +613,24 @@
 
 static void abort_alsa_capture(struct ua101 *ua)
 {
-	if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
+	unsigned long flags;
+
+	if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
+		snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
 		snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
+	}
 }
 
 static void abort_alsa_playback(struct ua101 *ua)
 {
-	if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
+	unsigned long flags;
+
+	if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
+		snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
 		snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
+	}
 }
 
 static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d543808..95558ef 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@
 	case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
 	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
 	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
 	case USB_ID(0x046d, 0x0991):
 	/* Most audio usb devices lie about volume resolution.
 	 * Most Logitech webcams have res = 384.
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index d0323a6..999550b 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -262,7 +262,9 @@
 	}
 
 	area->vm_ops = &usb_stream_hwdep_vm_ops;
-	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	area->vm_flags |= VM_DONTDUMP;
+	if (!read)
+		area->vm_flags |= VM_DONTEXPAND;
 	area->vm_private_data = us122l;
 	atomic_inc(&us122l->mmap_count);
 out:
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index b376532..cd69a80 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -273,7 +273,11 @@
 		struct snd_usX2Y_substream *subs = usX2Y->subs[s];
 		if (subs) {
 			if (atomic_read(&subs->state) >= state_PRERUNNING) {
+				unsigned long flags;
+
+				snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
 				snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
+				snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
 			}
 			for (u = 0; u < NRURBS; u++) {
 				struct urb *urb = subs->urb[u];
@@ -295,19 +299,6 @@
 	usX2Y_clients_stop(usX2Y);
 }
 
-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
-				 struct snd_usX2Y_substream *subs, struct urb *urb)
-{
-	snd_printk(KERN_ERR
-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
-"Most probably some urb of usb-frame %i is still missing.\n"
-"Cause could be too long delays in usb-hcd interrupt handling.\n",
-		   usb_get_current_frame_number(usX2Y->dev),
-		   subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
-		   usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
-	usX2Y_clients_stop(usX2Y);
-}
-
 static void i_usX2Y_urb_complete(struct urb *urb)
 {
 	struct snd_usX2Y_substream *subs = urb->context;
@@ -324,12 +315,9 @@
 		usX2Y_error_urb_status(usX2Y, subs, urb);
 		return;
 	}
-	if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
-		subs->completed_urb = urb;
-	else {
-		usX2Y_error_sequence(usX2Y, subs, urb);
-		return;
-	}
+
+	subs->completed_urb = urb;
+
 	{
 		struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
 			*playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index f2a1acd..814d0e8 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -244,13 +244,8 @@
 		usX2Y_error_urb_status(usX2Y, subs, urb);
 		return;
 	}
-	if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
-		subs->completed_urb = urb;
-	else {
-		usX2Y_error_sequence(usX2Y, subs, urb);
-		return;
-	}
 
+	subs->completed_urb = urb;
 	capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
 	capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
 	playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 5a1f648..274e178 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1016,9 +1016,10 @@
 
 				if (sn_offset == 0)
 					strcpy(sn_str, cidr_mask);
-				else
+				else {
+					strcat((char *)ip_buffer->sub_net, ";");
 					strcat(sn_str, cidr_mask);
-				strcat((char *)ip_buffer->sub_net, ";");
+				}
 				sn_offset += strlen(sn_str) + 1;
 			}
 
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c
index 099e7cd..7c43479 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/lk/debugfs.c
@@ -5,7 +5,6 @@
 #include <stdbool.h>
 #include <sys/vfs.h>
 #include <sys/mount.h>
-#include <linux/magic.h>
 #include <linux/kernel.h>
 
 #include "debugfs.h"
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 8ef3bd3..3e89719 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -173,7 +173,7 @@
 # Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
 #
 define get-executable-or-default
-$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1)))
+$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
 endef
 _ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
 _gea_warn = $(warning The path '$(1)' is not executable.)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6fcb9de..8bcdf9e 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -21,6 +21,7 @@
 static inline int is_anon_memory(const char *filename)
 {
 	return !strcmp(filename, "//anon") ||
+	       !strcmp(filename, "/dev/zero (deleted)") ||
 	       !strcmp(filename, "/anon_hugepage (deleted)");
 }